+ ./ya make . -T --test-size=small --test-size=medium --stat --test-threads 52 --link-threads 12 -DUSE_EAT_MY_DATA --build release --sanitize=address -DDEBUGINFO_LINES_ONLY --bazel-remote-store --bazel-remote-base-uri http://cachesrv.internal:8081 --bazel-remote-username cache_user --bazel-remote-password-file /tmp/tmp.lgPdoCawLU --bazel-remote-put --dist-cache-max-file-size=209715200 -A --retest --stat -DCONSISTENT_DEBUG --no-dir-outputs --test-failure-code 0 --build-all --cache-size 2TB --force-build-depends --log-file /home/runner/actions_runner/_work/ydb/ydb/tmp/results/ya_log.txt --evlog-file /home/runner/actions_runner/_work/ydb/ydb/tmp/results/try_1/ya_evlog.jsonl --junit /home/runner/actions_runner/_work/ydb/ydb/tmp/results/try_1/junit.xml --build-results-report /home/runner/actions_runner/_work/ydb/ydb/tmp/results/try_1/report.json --output /home/runner/actions_runner/_work/ydb/ydb/tmp/out Output root is subdirectory of Arcadia root, this may cause non-idempotent build Configuring dependencies for platform default-linux-x86_64-release-asan Configuring dependencies for platform tools [3 ymakes processing] [7840/7841 modules configured] Warn[-WPluginErr]: in $B/ydb/tests/functional/tpc/medium/ydb-tests-functional-tpc-medium: Requirement ram is redefined 16 -> 28 [3 ymakes processing] [8444/8444 modules configured] [3 ymakes processing] [8444/8444 modules configured] [144/144 modules rendered] [2 ymakes processing] [8444/8444 modules configured] [4813/4838 modules rendered] [2 ymakes processing] [8444/8444 modules configured] [4838/4838 modules rendered] Configuring dependencies for platform test_tool_tc1-global [0 ymakes processing] [8450/8450 modules configured] [4838/4838 modules rendered] Configuring tests execution Configuring local and dist store caches Configuration done. Preparing for execution |33.3%| CLEANING SYMRES |24.1%| [AR] {BAZEL_DOWNLOAD} $(B)/certs/libcerts.global.a |24.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/deprecated/yajl/libcontrib-deprecated-yajl.a |28.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/getopt/liblibrary-cpp-getopt.global.a |32.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/digest/lower_case/libcpp-digest-lower_case.a |28.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/digest/md5/libcpp-digest-md5.a |30.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/disjoint_sets/liblibrary-cpp-disjoint_sets.a |32.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/libssh2/libcontrib-libs-libssh2.a |32.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/http/io/libcpp-http-io.a |35.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/ipv6_address/liblibrary-cpp-ipv6_address.a |36.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/deprecated/enum_codegen/libcpp-deprecated-enum_codegen.a |37.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/getopt/small/libcpp-getopt-small.a |37.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/ipmath/liblibrary-cpp-ipmath.a |37.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/nghttp2/libcontrib-libs-nghttp2.a |38.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/deprecated/split/libcpp-deprecated-split.a |38.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/enumbitset/liblibrary-cpp-enumbitset.a |41.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/lcs/liblibrary-cpp-lcs.a |37.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/dbg_output/liblibrary-cpp-dbg_output.a |38.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/digest/argonish/internal/proxies/ref/libinternal-proxies-ref.a |39.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/digest/argonish/internal/proxies/sse41/libinternal-proxies-sse41.a |40.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/cpuid_check/liblibrary-cpp-cpuid_check.global.a |41.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/json/common/libcpp-json-common.a |42.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/containers/str_map/libcpp-containers-str_map.a |43.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/html/pcdata/libcpp-html-pcdata.a |43.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/deprecated/accessors/libcpp-deprecated-accessors.a |43.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/digest/argonish/internal/proxies/sse2/libinternal-proxies-sse2.a |45.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/containers/sorted_vector/libcpp-containers-sorted_vector.a |45.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/containers/paged_vector/libcpp-containers-paged_vector.a |46.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/coroutine/listener/libcpp-coroutine-listener.a |46.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/diff/liblibrary-cpp-diff.a |46.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/dot_product/liblibrary-cpp-dot_product.a |46.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/l2_distance/liblibrary-cpp-l2_distance.a |47.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/digest/crc32c/libcpp-digest-crc32c.a |47.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/lfalloc/dbg_info/libcpp-lfalloc-dbg_info.a |47.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/containers/2d_array/libcpp-containers-2d_array.a |47.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/logger/global/libcpp-logger-global.a |48.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/containers/stack_array/libcpp-containers-stack_array.a |48.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/json/yson/libcpp-json-yson.a |48.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/json/fast_sax/libcpp-json-fast_sax.a |48.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/base64/neon64/liblibs-base64-neon64.a |49.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/http/server/libcpp-http-server.a |49.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/containers/bitseq/libcpp-containers-bitseq.a |49.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/digest/murmur/libcpp-digest-murmur.a |49.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/html/escape/libcpp-html-escape.a |49.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/deprecated/kmp/libcpp-deprecated-kmp.a |49.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/json/writer/libcpp-json-writer.a |49.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/containers/comptrie/libcpp-containers-comptrie.a |50.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/digest/argonish/internal/proxies/ssse3/libinternal-proxies-ssse3.a |50.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/containers/disjoint_interval_tree/libcpp-containers-disjoint_interval_tree.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/lfalloc/alloc_profiler/libcpp-lfalloc-alloc_profiler.a |50.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/http/misc/libcpp-http-misc.a |51.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/containers/intrusive_avl_tree/libcpp-containers-intrusive_avl_tree.a |51.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/containers/compact_vector/libcpp-containers-compact_vector.a |51.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/cgiparam/liblibrary-cpp-cgiparam.a |51.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/iterator/liblibrary-cpp-iterator.a |25.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/http/simple/libcpp-http-simple.a |25.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/http/fetch/libcpp-http-fetch.a |25.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/containers/ring_buffer/libcpp-containers-ring_buffer.a |25.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/logger/liblibrary-cpp-logger.a |25.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/codecs/greedy_dict/libcpp-codecs-greedy_dict.a |25.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/colorizer/liblibrary-cpp-colorizer.a |25.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/blockcodecs/codecs/lz4/libblockcodecs-codecs-lz4.global.a |25.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/charset/lite/libcpp-charset-lite.a |25.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/blockcodecs/codecs/lzma/libblockcodecs-codecs-lzma.global.a |25.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/blockcodecs/codecs/bzip/libblockcodecs-codecs-bzip.global.a |25.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/coroutine/engine/libcpp-coroutine-engine.a |25.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/cache/liblibrary-cpp-cache.a |26.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/blockcodecs/codecs/fastlz/libblockcodecs-codecs-fastlz.global.a |26.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/digest/argonish/internal/proxies/avx2/libinternal-proxies-avx2.a |26.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/config/liblibrary-cpp-config.a |26.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/cityhash-1.0.2/libcontrib-restricted-cityhash-1.0.2.a |18.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/boost/iostreams/librestricted-boost-iostreams.a |18.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/dragonbox/libdragonbox.a |18.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/blockcodecs/codecs/zstd/libblockcodecs-codecs-zstd.global.a |18.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_user_attributes_reboots/core-tx-schemeshard-ut_user_attributes_reboots |18.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/transfer/transfer |18.8%| PREPARE $(VCS) |19.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/blockcodecs/core/libcpp-blockcodecs-core.a |21.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/boost/context/ucontext_impl/libboost-context-ucontext_impl.a |21.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/thrift/libcontrib-restricted-thrift.a |21.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/boost/graph/librestricted-boost-graph.a |23.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/blockcodecs/codecs/legacy_zstd06/libblockcodecs-codecs-legacy_zstd06.global.a |23.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/boost/chrono/librestricted-boost-chrono.a |23.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/transfer/tests/ydb-tests-stress-transfer-tests |25.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/aws/aws-c-cal/librestricted-aws-aws-c-cal.a |25.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/zlib/libcontrib-libs-zlib.a |26.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/yaml/libcontrib-libs-yaml.a |33.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/boost/context/impl_common/libboost-context-impl_common.a |34.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Transforms/CFGuard/liblib-Transforms-CFGuard.a |34.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/boost/thread/librestricted-boost-thread.a |34.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/libiconv/static/liblibs-libiconv-static.a |35.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/double-conversion/libcontrib-libs-double-conversion.a |36.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Target/libllvm16-lib-Target.a |36.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/ExecutionEngine/libllvm16-lib-ExecutionEngine.a |36.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Linker/libllvm16-lib-Linker.a |36.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/cctz/tzdata/liblibs-cctz-tzdata.global.a |36.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/base64/neon32/liblibs-base64-neon32.a |36.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/brotli/c/dec/libbrotli-c-dec.a |36.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/base64/ssse3/liblibs-base64-ssse3.a |36.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/clang18-rt/lib/asan_static/libclang_rt.asan_static-x86_64.a |36.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/base64/plain64/liblibs-base64-plain64.a |36.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/c-ares/libcontrib-libs-c-ares.a |36.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/libidn/static/liblibs-libidn-static.a |37.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/crcutil/libcontrib-libs-crcutil.a |37.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/farmhash/libcontrib-libs-farmhash.a |37.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/antlr3_cpp_runtime/libcontrib-libs-antlr3_cpp_runtime.a |37.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/deprecated/http-parser/libcontrib-deprecated-http-parser.a |36.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/libunwind/libcontrib-libs-libunwind.a |37.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/malloc/api/libcpp-malloc-api.a |37.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/brotli/c/enc/libbrotli-c-enc.a |37.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/ExecutionEngine/RuntimeDyld/liblib-ExecutionEngine-RuntimeDyld.a |37.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/cxxsupp/builtins/liblibs-cxxsupp-builtins.a |37.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/abseil-cpp/libcontrib-restricted-abseil-cpp.a |37.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/grpc/third_party/address_sorting/libgrpc-third_party-address_sorting.a |37.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/IRReader/libllvm16-lib-IRReader.a |37.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/libevent/event_thread/liblibs-libevent-event_thread.a |37.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/MC/MCDisassembler/liblib-MC-MCDisassembler.a |37.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Bitcode/Writer/liblib-Bitcode-Writer.a |37.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/libbz2/libcontrib-libs-libbz2.a |37.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/farmhash/arch/sse42_aesni/libfarmhash-arch-sse42_aesni.a |37.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/clang18-rt/lib/asan_cxx/libclang_rt.asan_cxx-x86_64.a |37.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/fmt/libcontrib-libs-fmt.a |37.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/linuxvdso/libcontrib-libs-linuxvdso.a |38.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/base64/plain32/liblibs-base64-plain32.a |38.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/libaio/static/liblibs-libaio-static.a |38.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/libevent/event_extra/liblibs-libevent-event_extra.a |38.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/expat/libcontrib-libs-expat.a |38.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/messagebus/actor/libmessagebus_actor.a |37.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/libevent/event_openssl/liblibs-libevent-event_openssl.a |37.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/CodeGen/GlobalISel/liblib-CodeGen-GlobalISel.a |37.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/liburing/libcontrib-libs-liburing.a |38.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/grpc/third_party/upb/libgrpc-third_party-upb.a |38.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Bitstream/Reader/liblib-Bitstream-Reader.a |38.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/lwtrace/protos/libcpp-lwtrace-protos.a |38.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/clang18-rt/lib/asan/libclang_rt.asan-x86_64.a |38.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/ExecutionEngine/Orc/Shared/libExecutionEngine-Orc-Shared.a |38.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/libfyaml/libcontrib-libs-libfyaml.a |38.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/IRPrinter/libllvm16-lib-IRPrinter.a |38.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/libevent/event_core/liblibs-libevent-event_core.a |38.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/messagebus/config/libcpp-messagebus-config.a |38.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/base64/avx2/liblibs-base64-avx2.a |38.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/malloc/system/libsystem_allocator.a |38.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/DebugInfo/MSF/liblib-DebugInfo-MSF.a |38.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/pcre/pcre16/liblibs-pcre-pcre16.a |38.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/hdr_histogram/libcontrib-libs-hdr_histogram.a |38.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/lwtrace/mon/libcpp-lwtrace-mon.global.a |38.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/ExecutionEngine/PerfJITEvents/liblib-ExecutionEngine-PerfJITEvents.a |38.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/backtrace/libcontrib-libs-backtrace.a |38.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/DebugInfo/PDB/liblib-DebugInfo-PDB.a |38.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/cxxsupp/libcxxrt/liblibs-cxxsupp-libcxxrt.a |38.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/brotli/c/common/libbrotli-c-common.a |38.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/providers/dq/actors/ut/ydb-library-yql-providers-dq-actors-ut |38.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/farmhash/arch/sse42/libfarmhash-arch-sse42.a |38.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/fastlz/libcontrib-libs-fastlz.a |38.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/apache/orc-format/liblibs-apache-orc-format.a |38.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/libc_compat/libcontrib-libs-libc_compat.a |38.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/hyperscan/runtime_core2/liblibs-hyperscan-runtime_core2.a |38.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/DebugInfo/Symbolize/liblib-DebugInfo-Symbolize.a |38.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Transforms/AggressiveInstCombine/liblib-Transforms-AggressiveInstCombine.a |39.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/cxxsupp/libcxxabi-parts/liblibs-cxxsupp-libcxxabi-parts.a |39.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/messagebus/oldmodule/libcpp-messagebus-oldmodule.a |39.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Demangle/libllvm16-lib-Demangle.a |39.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/antlr4_cpp_runtime/libcontrib-libs-antlr4_cpp_runtime.a |39.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/lwtrace/mon/libcpp-lwtrace-mon.a |39.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/linuxvdso/original/liblibs-linuxvdso-original.a |38.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/flatbuffers/libcontrib-libs-flatbuffers.a |38.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/signer/libfq-libs-signer.a |39.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/messagebus/monitoring/libcpp-messagebus-monitoring.a |39.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/shared_resources/libfq-libs-shared_resources.a |39.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/DebugInfo/CodeView/liblib-DebugInfo-CodeView.a |39.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/AsmParser/libllvm16-lib-AsmParser.a |39.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Target/X86/Disassembler/libTarget-X86-Disassembler.a |39.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/farmhash/arch/sse41/libfarmhash-arch-sse41.a |39.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/events/libfq-libs-events.a |39.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/ExecutionEngine/Orc/TargetProcess/libExecutionEngine-Orc-TargetProcess.a |39.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/TextAPI/libllvm16-lib-TextAPI.a |39.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/hyperscan/runtime_corei7/liblibs-hyperscan-runtime_corei7.a |39.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/cctz/libcontrib-libs-cctz.a |39.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/messagebus/liblibrary-cpp-messagebus.a |39.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/common/protos/libcolumnshard-common-protos.a |39.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Remarks/libllvm16-lib-Remarks.a |39.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/BinaryFormat/libllvm16-lib-BinaryFormat.a |39.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/ExecutionEngine/MCJIT/liblib-ExecutionEngine-MCJIT.a |39.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Frontend/OpenMP/liblib-Frontend-OpenMP.a |39.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/DebugInfo/DWARF/liblib-DebugInfo-DWARF.a |39.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/curl/libcontrib-libs-curl.a |39.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/shared_resources/interface/liblibs-shared_resources-interface.a |39.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/libxml/libcontrib-libs-libxml.a |39.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/sys_view/portions/libreader-sys_view-portions.global.a |39.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/providers/stat/expr_nodes/libproviders-stat-expr_nodes.a |39.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/lambda_builder/libyt-lib-lambda_builder.a |40.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/hyperscan/runtime_avx2/liblibs-hyperscan-runtime_avx2.a |40.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/liblibs-aws-sdk-cpp-aws-cpp-sdk-core.a |40.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Bitcode/Reader/liblib-Bitcode-Reader.a |39.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/key_filter/libyt-lib-key_filter.a |39.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/pcre/libcontrib-libs-pcre.a |40.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/ProfileData/libllvm16-lib-ProfileData.a |40.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/CodeGen/AsmPrinter/liblib-CodeGen-AsmPrinter.a |40.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/counters/libtx-columnshard-counters.a |40.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/MC/MCParser/liblib-MC-MCParser.a |40.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v0/lexer/libsql-v0-lexer.a |40.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/row_dispatcher/purecalc_no_pg_wrapper/liblibs-row_dispatcher-purecalc_no_pg_wrapper.a |40.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/schema/libproviders-common-schema.a |40.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/MC/libllvm16-lib-MC.a |40.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/structured_token/libproviders-common-structured_token.a |40.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/schema/mkql/libcommon-schema-mkql.a |40.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/schema/expr/libcommon-schema-expr.a |40.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Passes/libllvm16-lib-Passes.a |40.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/schema/parser/libcommon-schema-parser.a |40.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/blobs_action/tier/libcolumnshard-blobs_action-tier.a |40.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/config/libessentials-providers-config.a |40.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/schema/skiff/libcommon-schema-skiff.a |40.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/apache/orc/liblibs-apache-orc.a |40.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/testlib/common/libactors-testlib-common.a |40.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/arrow_clickhouse/Columns/liblibrary-arrow_clickhouse-Columns.a |40.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/schemeshard/olap/table/libschemeshard-olap-table.a |40.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/arrow_clickhouse/Common/liblibrary-arrow_clickhouse-Common.a |41.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/googleapis-common-protos/libcontrib-libs-googleapis-common-protos.a |41.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/wilson/liblibrary-actors-wilson.a |41.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/slide_limiter/usage/liblibrary-slide_limiter-usage.a |41.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/folder_service/mock/liblibrary-folder_service-mock.a |40.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/arrow_clickhouse/DataStreams/liblibrary-arrow_clickhouse-DataStreams.a |40.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/aiohttp/libpy3contrib-python-aiohttp.global.a |40.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/arrow_parquet/libydb-library-arrow_parquet.a |40.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/conclusion/libydb-library-conclusion.a |41.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/chunks_limiter/libydb-library-chunks_limiter.a |41.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/schemeshard/olap/manager/libschemeshard-olap-manager.a |41.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/testlib/liblibrary-actors-testlib.a |41.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/result/provider/libproviders-result-provider.a |41.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/coordinator/protos/libtx-coordinator-protos.a |41.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Object/libllvm16-lib-Object.a |41.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/folder_service/proto/liblibrary-folder_service-proto.a |41.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/public/decimal/libessentials-public-decimal.a |41.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/pg/expr_nodes/libproviders-pg-expr_nodes.a |41.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/arrow_kernels/libydb-library-arrow_kernels.a |41.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/db_pool/protos/liblibrary-db_pool-protos.a |41.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/db_pool/libydb-library-db_pool.a |41.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/provider/libproviders-common-provider.a |41.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/folder_service/libydb-library-folder_service.a |41.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/transform/libproviders-common-transform.a |41.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/subscriber/events/tables_erased/libsubscriber-events-tables_erased.a |41.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/formats/arrow/hash/liblibrary-formats-arrow-hash.a |41.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/udf_resolve/libproviders-common-udf_resolve.a |41.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/resources/libtx-columnshard-resources.a |41.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/formats/arrow/csv/converter/libarrow-csv-converter.a |41.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/unicode/normalization/libcpp-unicode-normalization.a |41.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/export/session/selector/abstract/libsession-selector-abstract.a |41.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/arrow_clickhouse/libydb-library-arrow_clickhouse.a |41.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/formats/arrow/modifier/liblibrary-formats-arrow-modifier.a |42.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/formats/arrow/protos/liblibrary-formats-arrow-protos.a |42.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/openssl/libcontrib-libs-openssl.a |42.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/normalizer/granule/libcolumnshard-normalizer-granule.global.a |42.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/liblibs-aws-sdk-cpp-aws-cpp-sdk-s3.a |42.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/formats/arrow/liblibrary-formats-arrow.a |42.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/tiling/libstorage-optimizer-tiling.global.a |42.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/writer/libcolumnshard-engines-writer.a |42.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/formats/arrow/scalar/liblibrary-formats-arrow-scalar.a |42.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/result/expr_nodes/libproviders-result-expr_nodes.a |42.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/planner/selector/liblcbuckets-planner-selector.a |42.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/formats/arrow/simple_builder/liblibrary-formats-arrow-simple_builder.a |42.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/formats/arrow/switch/liblibrary-formats-arrow-switch.a |42.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/planner/liboptimizer-lcbuckets-planner.global.a |42.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/actualizer/index/libstorage-actualizer-index.a |42.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/formats/arrow/splitter/liblibrary-formats-arrow-splitter.a |43.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/pg/provider/libproviders-pg-provider.a |43.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/scheme/defaults/common/libscheme-defaults-common.a |42.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/formats/arrow/validation/liblibrary-formats-arrow-validation.a |42.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/formats/arrow/transformer/liblibrary-formats-arrow-transformer.a |43.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/sys_view/constructor/libreader-sys_view-constructor.a |43.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/fyamlcpp/libydb-library-fyamlcpp.a |43.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/settings/libessentials-sql-settings.a |43.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/public/udf/arrow/libpublic-udf-arrow.a |43.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/sync_points/libsimple_reader-iterator-sync_points.a |43.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/public/issue/protos/libpublic-issue-protos.a |43.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/global_plugins/libydb-library-global_plugins.a |43.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/grpc/server/liblibrary-grpc-server.a |44.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/grpc/server/actors/libgrpc-server-actors.a |43.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/public/result_format/libessentials-public-result_format.a |43.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/public/issue/libessentials-public-issue.a |43.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/http_proxy/authorization/liblibrary-http_proxy-authorization.a |43.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/public/langver/libessentials-public-langver.a |43.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/data_sharing/source/events/libdata_sharing-source-events.a |43.7%| PREPARE $(YMAKE_PYTHON3-4256832079) |43.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/login/account_lockout/liblibrary-login-account_lockout.a |43.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/IR/libllvm16-lib-IR.a |43.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/http_proxy/error/liblibrary-http_proxy-error.a |44.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/CodeGen/SelectionDAG/liblib-CodeGen-SelectionDAG.a |44.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/blobs_action/common/libcolumnshard-blobs_action-common.a |44.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/balance_coverage/libcore-tx-balance_coverage.a |44.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/public/types/libessentials-public-types.a |44.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/blobs_action/events/libcolumnshard-blobs_action-events.a |44.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/data_sharing/source/transactions/libdata_sharing-source-transactions.a |44.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/login/cache/liblibrary-login-cache.a |44.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Analysis/libllvm16-lib-Analysis.a |44.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/blobs_action/abstract/libcolumnshard-blobs_action-abstract.a |44.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/logger/libydb-library-logger.a |44.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/bg_tasks/abstract/libcolumnshard-bg_tasks-abstract.a |44.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/scheme/protos/libcore-scheme-protos.a |44.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/raw_socket/libydb-core-raw_socket.a |44.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/naming_conventions/libydb-library-naming_conventions.a |44.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/security/certificate_check/libcore-security-certificate_check.a |44.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/sys_view/nodes/libcore-sys_view-nodes.a |44.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/login/libydb-library-login.a |44.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/mkql_proto/libydb-library-mkql_proto.a |44.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/login/password_checker/liblibrary-login-password_checker.a |44.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/public_http/libydb-core-public_http.global.a |44.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/pgproxy/protos/libcore-pgproxy-protos.a |44.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/persqueue/purecalc/libcore-persqueue-purecalc.a |44.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/public_http/protos/libcore-public_http-protos.a |45.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/login/protos/liblibrary-login-protos.a |45.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/libydb-core-kqp.global.a |44.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/public_http/libydb-core-public_http.a |45.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/gateway/behaviour/resource_pool/libgateway-behaviour-resource_pool.global.a |45.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/memory_controller/libydb-core-memory_controller.a |45.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/public/udf/support/libpublic-udf-support.a |45.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/pg_dummy/libessentials-sql-pg_dummy.a |45.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/query_data/libcore-kqp-query_data.a |45.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/quoter/libydb-core-quoter.a |45.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/mon/libydb-core-mon.a |45.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/rm_service/libcore-kqp-rm_service.a |45.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/public/udf/service/exception_policy/libudf-service-exception_policy.global.a |45.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/security/libydb-core-security.a |45.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/public/udf/libessentials-public-udf.a |45.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/persqueue/counter_time_keeper/liblibrary-persqueue-counter_time_keeper.a |45.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/mkql_proto/protos/liblibrary-mkql_proto-protos.a |45.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/tasks_packer/libfq-libs-tasks_packer.a |45.5%| PREPARE $(CLANG_FORMAT-3855767795) |45.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/ncloud/impl/liblibrary-ncloud-impl.a |45.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/pdisk_io/protos/liblibrary-pdisk_io-protos.a |45.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/persqueue/writer/libcore-persqueue-writer.a |45.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/row_dispatcher/format_handler/common/librow_dispatcher-format_handler-common.a |45.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/persqueue/deprecated/read_batch_converter/libpersqueue-deprecated-read_batch_converter.a |45.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/rate_limiter/utils/liblibs-rate_limiter-utils.a |45.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/rate_limiter/events/liblibs-rate_limiter-events.a |45.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/persqueue/topic_parser/liblibrary-persqueue-topic_parser.a |45.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/pdisk_io/libydb-library-pdisk_io.a |45.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/row_dispatcher/format_handler/parsers/librow_dispatcher-format_handler-parsers.a |46.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/control_plane_storage/proto/liblibs-control_plane_storage-proto.a |46.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/libyql-essentials-sql.a |45.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/gateway/behaviour/resource_pool_classifier/libgateway-behaviour-resource_pool_classifier.a |45.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/protobuf_printer/libydb-library-protobuf_printer.a |45.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/pretty_types_print/protobuf/liblibrary-pretty_types_print-protobuf.a |45.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/audit/events/liblibs-audit-events.a |46.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/checkpoint_storage/events/liblibs-checkpoint_storage-events.a |46.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/common/libcore-kqp-common.a |46.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/schlab/libydb-library-schlab.a |45.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/driver_lib/version/libversion.a |45.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/debug/libydb-core-debug.a |45.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/session_actor/libcore-kqp-session_actor.a |45.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/splitter/libformats-arrow-splitter.a |45.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/schlab/mon/liblibrary-schlab-mon.a |45.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/private_client/libfq-libs-private_client.a |45.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/query_actor/libydb-library-query_actor.a |45.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/cms/console/validators/libcms-console-validators.a |45.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/vdisk/ingress/libblobstorage-vdisk-ingress.a |45.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/vdisk/hulldb/bulksst_add/libvdisk-hulldb-bulksst_add.a |45.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/vdisk/hullop/hullcompdelete/libvdisk-hullop-hullcompdelete.a |45.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/accessor/sub_columns/libarrow-accessor-sub_columns.a |45.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/apps/version/libversion_definition.a |45.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/vdisk/hulldb/base/libvdisk-hulldb-base.a |45.5%| [AR] {BAZEL_DOWNLOAD} $(B)/util/draft/libutil-draft.a |45.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/ytalloc/api/libcpp-ytalloc-api.a |45.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/base/generated/libcore-base-generated.a |45.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yt/threading/libcpp-yt-threading.a |45.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/utils/threading/libessentials-utils-threading.a |45.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/proto_parser/antlr4_ansi/libv1-proto_parser-antlr4_ansi.a |45.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/hyperscan/libcontrib-libs-hyperscan.a |45.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/lexer/antlr4_ansi/libv1-lexer-antlr4_ansi.a |45.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/actorlib_impl/libydb-core-actorlib_impl.a |45.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kesus/tablet/libcore-kesus-tablet.a |45.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/schlab/probes/liblibrary-schlab-probes.a |45.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/vdisk/huge/libblobstorage-vdisk-huge.a |45.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/schlab/mon/liblibrary-schlab-mon.global.a |45.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/runtime/libcore-kqp-runtime.a |45.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yt/misc/libcpp-yt-misc.a |45.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yt/logging/libcpp-yt-logging.a |45.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yt/logging/plain_text_formatter/libyt-logging-plain_text_formatter.a |45.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/format/libsql-v1-format.a |45.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_rs/ydb-core-tx-datashard-ut_rs |45.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yt/malloc/libcpp-yt-malloc.a |45.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yt/exception/libcpp-yt-exception.a |45.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/schlab/protos/liblibrary-schlab-protos.a |45.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yt/assert/libcpp-yt-assert.a |45.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/schlab/schine/liblibrary-schlab-schine.a |45.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yt/backtrace/libcpp-yt-backtrace.a |45.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yt/cpu_clock/libcpp-yt-cpu_clock.a |45.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/rate_limiter/ut/ydb-services-rate_limiter-ut |45.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/threading/future/libcpp-threading-future.a |45.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yson/node/libcpp-yson-node.a |45.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yt/error/libcpp-yt-error.a |45.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/vdisk/hullop/libblobstorage-vdisk-hullop.a |45.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/tdigest/liblibrary-cpp-tdigest.a |45.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/string_utils/url/libcpp-string_utils-url.a |45.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/schlab/schemu/liblibrary-schlab-schemu.a |45.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/string_utils/relaxed_escaper/libcpp-string_utils-relaxed_escaper.a |45.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/string_utils/ztstrbuf/libcpp-string_utils-ztstrbuf.a |45.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/string_utils/base64/libcpp-string_utils-base64.a |44.8%| PREPARE $(LLD_ROOT-3808007503) |44.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/sse/liblibrary-cpp-sse.a |44.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/terminate_handler/liblibrary-cpp-terminate_handler.a |44.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/signal_backtrace/libydb-library-signal_backtrace.a |44.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/terminate_handler/liblibrary-cpp-terminate_handler.global.a |44.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/streams/zstd/libcpp-streams-zstd.a |44.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/string_utils/indent_text/libcpp-string_utils-indent_text.a |44.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/security/libydb-library-security.a |45.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/string_utils/quote/libcpp-string_utils-quote.a |45.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/replication/service/ut_json_change_record/tx-replication-service-ut_json_change_record |45.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/schlab/schoot/liblibrary-schlab-schoot.a |45.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/monlib/service/pages/tablesorter/libservice-pages-tablesorter.global.a |45.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/format/libsql-v1-format.global.a |44.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/monlib/exception/libcpp-monlib-exception.a |44.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/monlib/service/pages/libmonlib-service-pages.a |44.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/openssl/big_integer/libcpp-openssl-big_integer.a |44.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/lexer/antlr3_ansi/libv1-lexer-antlr3_ansi.a |44.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/monlib/service/pages/resources/libservice-pages-resources.global.a |44.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/openssl/method/libcpp-openssl-method.a |44.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/monlib/encode/text/libmonlib-encode-text.a |44.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/lexer/antlr3/libv1-lexer-antlr3.a |44.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/abstract/liblibrary-workload-abstract.a |44.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/services/libydb-library-services.a |44.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/kv/liblibrary-workload-kv.global.a |44.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/openssl/io/libcpp-openssl-io.a |45.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/protobuf/json/proto/libprotobuf-json-proto.a |45.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/monlib/encode/spack/libmonlib-encode-spack.a |45.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/slide_limiter/service/liblibrary-slide_limiter-service.a |45.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/signals/libydb-library-signals.a |45.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/regex/pcre/libcpp-regex-pcre.a |45.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/resource/liblibrary-cpp-resource.a |44.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/regex/hyperscan/libcpp-regex-hyperscan.a |44.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/retry/protos/libcpp-retry-protos.a |44.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/protobuf/util/libcpp-protobuf-util.a |44.8%| {BAZEL_DOWNLOAD} $(B)/library/cpp/sanitizer/plugin/sanitizer.py.pyplugin |44.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/table_creator/libydb-library-table_creator.a |44.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/monlib/encode/libcpp-monlib-encode.a |44.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/monlib/metrics/libcpp-monlib-metrics.a |44.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/sliding_window/liblibrary-cpp-sliding_window.a |44.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/monlib/encode/prometheus/libmonlib-encode-prometheus.a |44.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/streams/brotli/libcpp-streams-brotli.a |44.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/skiff/liblibrary-cpp-skiff.a |44.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/testing/gmock_in_unittest/libcpp-testing-gmock_in_unittest.global.a |45.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/messagebus/www/libcpp-messagebus-www.global.a |45.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/string_utils/levenshtein_diff/libcpp-string_utils-levenshtein_diff.a |45.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/monlib/encode/legacy_protobuf/protos/libencode-legacy_protobuf-protos.a |45.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/string_utils/parse_size/libcpp-string_utils-parse_size.a |45.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/opt/physical/libkqp-opt-physical.a |45.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/string_utils/csv/libcpp-string_utils-csv.a |45.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/testing/unittest_main/libcpp-testing-unittest_main.a |45.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/testing/hook/libcpp-testing-hook.a |45.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/threading/blocking_queue/libcpp-threading-blocking_queue.a |45.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/stock/liblibrary-workload-stock.global.a |44.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/monlib/encode/json/libmonlib-encode-json.a |44.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/proto_parser/antlr3_ansi/libv1-proto_parser-antlr3_ansi.a |44.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/lexer/antlr4/libv1-lexer-antlr4.a |44.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/monlib/dynamic_counters/libcpp-monlib-dynamic_counters.a |44.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/ut/common/libkqp-ut-common.a |44.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/scheme/liblibrary-cpp-scheme.a |44.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/threading/equeue/libcpp-threading-equeue.a |45.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/threading/cron/libcpp-threading-cron.a |45.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/threading/task_scheduler/libcpp-threading-task_scheduler.a |45.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/threading/skip_list/libcpp-threading-skip_list.a |45.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/lexer/libsql-v1-lexer.a |45.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/threading/thread_local/libcpp-threading-thread_local.a |45.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/kv/liblibrary-workload-kv.a |45.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yaml_config/public/liblibrary-yaml_config-public.a |45.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/stock/liblibrary-workload-stock.a |45.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yaml_config/protos/libyaml-config-protos.a |44.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_data_cleanup/ydb-core-tx-datashard-ut_data_cleanup |44.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yaml_json/libydb-library-yaml_json.a |44.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v0/libessentials-sql-v0.a |44.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/type_info/liblibrary-cpp-type_info.a |44.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yaml_config/libydb-library-yaml_config.a |44.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/uri/liblibrary-cpp-uri.a |44.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/unified_agent_client/proto/libcpp-unified_agent_client-proto.a |44.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/unified_agent_client/liblibrary-cpp-unified_agent_client.a |44.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/opt/physical/effects/libopt-physical-effects.a |44.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/xml/init/libcpp-xml-init.a |44.4%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/dq/comp_nodes/libyql-dq-comp_nodes.a |44.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/xml/document/libcpp-xml-document.a |44.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yson/json/libcpp-yson-json.a |44.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/icu/libcontrib-libs-icu.a |44.3%| PREPARE $(FLAKE8_PY3-715603131) |44.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/ycloud/impl/liblibrary-ycloud-impl.a |44.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yt/backtrace/cursors/libunwind/libbacktrace-cursors-libunwind.a |44.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yt/yson/libcpp-yt-yson.a |44.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yt/system/libcpp-yt-system.a |44.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yson_pull/libyson_pull.a |44.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/grpc/libcontrib-libs-grpc.a |44.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yt/yson_string/libcpp-yt-yson_string.a |44.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/CodeGen/libllvm16-lib-CodeGen.a |44.1%| [AR] {BAZEL_DOWNLOAD} $(B)/tools/enum_parser/enum_serialization_runtime/libtools-enum_parser-enum_serialization_runtime.a |44.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/backup/common/proto/libbackup-common-proto.a |44.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/backup/common/libcore-backup-common.a |44.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yson/liblibrary-cpp-yson.a |44.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/backup/controller/libcore-backup-controller.a |43.8%| [AR] {BAZEL_DOWNLOAD} $(B)/util/charset/libutil-charset.a |43.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/backup/impl/libcore-backup-impl.a |43.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/ydb_issue/libydb-library-ydb_issue.global.a |43.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/proto_parser/antlr3/libv1-proto_parser-antlr3.a |43.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/ydb_issue/libydb-library-ydb_issue.a |43.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/ydb_issue/proto/liblibrary-ydb_issue-proto.a |43.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/crypto/libcore-blobstorage-crypto.a |43.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/yql/dq/comp_nodes/dq_hash_combine.cpp |43.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/groupinfo/libcore-blobstorage-groupinfo.a |43.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/dsproxy/mock/libblobstorage-dsproxy-mock.a |43.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/yql/dq/comp_nodes/dq_program_builder.cpp |43.4%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/yql/dq/comp_nodes/dq_hash_operator_serdes.cpp |43.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/dsproxy/bridge/libblobstorage-dsproxy-bridge.a |43.2%| [AR] {BAZEL_DOWNLOAD} $(B)/util/libyutil.a |43.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blob_depot/libydb-core-blob_depot.a |43.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/unicode_base/lib/libcommon-unicode_base-lib.a |43.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/math/libmath_udf.global.a |42.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/backpressure/libcore-blobstorage-backpressure.a |42.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/math/lib/libcommon-math-lib.a |42.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/incrhuge/libcore-blobstorage-incrhuge.a |42.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/pdisk/mock/libblobstorage-pdisk-mock.a |42.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/utils/log/libessentials-utils-log.a |42.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/actors/compute/libdq-actors-compute.a |42.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/vdisk/hulldb/barriers/libvdisk-hulldb-barriers.a |42.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/proto_parser/libsql-v1-proto_parser.a |42.5%| PREPARE $(TEST_TOOL_HOST-sbr:9029509511) |42.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/proto_parser/antlr4/libv1-proto_parser-antlr4.a |42.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/utils/fetch/libessentials-utils-fetch.a |42.6%| PREPARE $(PYTHON) |42.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/actors/common/libdq-actors-common.a |42.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/vdisk/balance/libblobstorage-vdisk-balance.a |42.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/types/binary_json/libessentials-types-binary_json.a |42.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/vdisk/libcore-blobstorage-vdisk.a |42.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/vdisk/anubis_osiris/libblobstorage-vdisk-anubis_osiris.a |42.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/actors/libyql-dq-actors.a |42.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/unicode_base/libunicode_udf.global.a |42.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/vdisk/hulldb/recovery/libvdisk-hulldb-recovery.a |42.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/actors/input_transforms/libdq-actors-input_transforms.a |42.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/vdisk/common/libblobstorage-vdisk-common.a |42.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/control/libydb-core-control.a |42.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/types/uuid/libessentials-types-uuid.a |42.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/actors/task_runner/libdq-actors-task_runner.a |42.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/config/validation/libcore-config-validation.a |42.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/utils/backtrace/libessentials-utils-backtrace.a |42.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/docapi/libydb-core-docapi.a |42.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/string/libstring_udf.global.a |42.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/control/lib/libcore-control-lib.a |42.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/change_exchange/libydb-core-change_exchange.a |42.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/cms/console/util/libcms-console-util.a |42.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/types/dynumber/libessentials-types-dynumber.a |42.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/common/libyql-dq-common.a |42.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/accessor/common/liblibrary-formats-arrow-accessor-common.a |42.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/external_sources/object_storage/libcore-external_sources-object_storage.a |42.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/actors/spilling/libdq-actors-spilling.a |42.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/accessor/composite/liblibrary-formats-arrow-accessor-composite.a |42.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/accessor/plain/libarrow-accessor-plain.a |42.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/client/minikql_compile/libcore-client-minikql_compile.a |42.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/external_sources/object_storage/inference/libexternal_sources-object_storage-inference.a |42.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/datashard/ut_common/libtx-datashard-ut_common.a |42.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/driver_lib/cli_config_base/libcore-driver_lib-cli_config_base.a |42.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/tasks/libyql-dq-tasks.a |42.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/accessor/dictionary/libarrow-accessor-dictionary.a |42.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/accessor/sparsed/libarrow-accessor-sparsed.global.a |42.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/pg/libessentials-sql-pg.a |42.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/utils/log/proto/libutils-log-proto.a |42.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/accessor/abstract/libarrow-accessor-abstract.a |42.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/accessor/sub_columns/libarrow-accessor-sub_columns.global.a |42.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/utils/failure_injector/libessentials-utils-failure_injector.a |42.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/config/init/libcore-config-init.a |42.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/driver_lib/cli_base/libcli_base.a |42.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/utils/libyql-essentials-utils.a |42.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/state/libyql-dq-state.a |42.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/erasure/libydb-core-erasure.a |42.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/proto/libyql-dq-proto.a |42.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/transformer/libformats-arrow-transformer.a |42.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/serializer/libformats-arrow-serializer.a |42.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/serializer/libformats-arrow-serializer.global.a |42.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/engine/minikql/libcore-engine-minikql.a |42.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/accessor/sparsed/libarrow-accessor-sparsed.a |42.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/rows/libformats-arrow-rows.a |42.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/transform/libyql-dq-transform.a |42.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/reader/libformats-arrow-reader.a |42.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/actors/protos/libdq-actors-protos.a |42.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/vdisk/synclog/libblobstorage-vdisk-synclog.a |42.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/external_sources/libydb-core-external_sources.a |42.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/cms/libydb-core-cms.global.a |42.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/libydb-core-formats.a |42.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/checkpointing_common/libfq-libs-checkpointing_common.a |42.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/vdisk/syncer/libblobstorage-vdisk-syncer.a |42.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/test_helper/libtx-columnshard-test_helper.a |42.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/type_ann/libyql-dq-type_ann.a |42.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/common/arrow/interface/libcommon-arrow-interface.a |42.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/libcore-formats-arrow.a |42.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/clickhouse/proto/libproviders-clickhouse-proto.a |42.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/clickhouse/expr_nodes/libproviders-clickhouse-expr_nodes.a |42.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/libfq-libs-config.a |42.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/vdisk/query/libblobstorage-vdisk-query.a |42.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/dq/comp_nodes/dq_block_hash_join.cpp |42.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/dictionary/libformats-arrow-dictionary.a |42.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/checkpointing/libfq-libs-checkpointing.a |41.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/dq/comp_nodes/yql_common_dq_factory.cpp |41.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/dq/comp_nodes/dq_hash_aggregate.cpp |42.0%| [CP] {default-linux-x86_64, release, asan} $(B)/common_test.context |42.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/dq/comp_nodes/dq_hash_operator_common.cpp |42.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/driver_lib/run/librun.a |41.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/checkpoint_storage/libfq-libs-checkpoint_storage.a |41.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/control_plane_storage/events/liblibs-control_plane_storage-events.a |41.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/compute/ydb/control_plane/libcompute-ydb-control_plane.a |41.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/db_schema/libfq-libs-db_schema.a |41.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/deprecated/client/liblib-deprecated-client.a |41.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/db_id_async_resolver_impl/libfq-libs-db_id_async_resolver_impl.a |41.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/hmac/libfq-libs-hmac.a |41.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/health/libfq-libs-health.a |41.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/gateway/libfq-libs-gateway.a |41.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/compute/ydb/liblibs-compute-ydb.a |41.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/actors/libfq-libs-actors.a |41.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/gateway/lib/libyt-gateway-lib.a |41.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/graph_params/proto/liblibs-graph_params-proto.a |41.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/runtime/libyql-dq-runtime.a |41.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/init/libfq-libs-init.a |41.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/control_plane_proxy/actors/liblibs-control_plane_proxy-actors.a |41.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/nodewarden/libcore-blobstorage-nodewarden.a |41.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/program/libformats-arrow-program.a |41.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/metrics/libfq-libs-metrics.a |41.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/mock/libfq-libs-mock.a |41.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/quota_manager/proto/liblibs-quota_manager-proto.a |41.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/clickhouse/provider/libproviders-clickhouse-provider.a |41.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/result_formatter/libfq-libs-result_formatter.a |41.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/rate_limiter/control_plane_service/liblibs-rate_limiter-control_plane_service.a |41.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/rate_limiter/quoter_service/liblibs-rate_limiter-quoter_service.a |41.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/io_formats/ydb_dump/libcore-io_formats-ydb_dump.a |41.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/control_plane_storage/internal/liblibs-control_plane_storage-internal.a |41.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/quota_manager/libfq-libs-quota_manager.a |41.0%| [CF] {default-linux-x86_64, release, asan} $(B)/library/cpp/build_info/sandbox.cpp |41.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/row_dispatcher/format_handler/filters/librow_dispatcher-format_handler-filters.a |41.1%| [CF] {default-linux-x86_64, release, asan} $(B)/library/cpp/build_info/build_info.cpp |41.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/libessentials-sql-v1.a |41.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/protos/libfq-libs-protos.a |41.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/client/server/libcore-client-server.a |41.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/row_dispatcher/purecalc_compilation/liblibs-row_dispatcher-purecalc_compilation.a |41.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/vdisk/skeleton/libblobstorage-vdisk-skeleton.a |41.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/common/token_accessor/client/libcommon-token_accessor-client.a |41.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/pdisk/libcore-blobstorage-pdisk.a |41.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/common/db_id_async_resolver/libproviders-common-db_id_async_resolver.a |41.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/common/arrow/libproviders-common-arrow.a |40.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/grpc_services/cancelation/protos/libgrpc_services-cancelation-protos.a |40.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/common/http_gateway/libproviders-common-http_gateway.a |40.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/grpc_caching/libydb-core-grpc_caching.a |40.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/api/grpc/libdq-api-grpc.a |40.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/io_formats/arrow/scheme/libio_formats-arrow-scheme.a |40.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/grpc_services/cancelation/libcore-grpc_services-cancelation.a |40.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/row_dispatcher/libfq-libs-row_dispatcher.a |40.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/grpc_streaming/libydb-core-grpc_streaming.a |40.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/row_dispatcher/events/liblibs-row_dispatcher-events.a |40.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/test_connection/libfq-libs-test_connection.a |40.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/common/pushdown/libproviders-common-pushdown.a |40.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/oss/ydb_sdk_import/libpy3tests-oss-ydb_sdk_import.global.a |40.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/cms/libydb-core-cms.a |40.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/jaeger_tracing/libydb-core-jaeger_tracing.a |40.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/cpp/mapreduce/library/user_job_statistics/libmapreduce-library-user_job_statistics.a |40.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/control_plane_proxy/libfq-libs-control_plane_proxy.a |40.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/grpc_services/tablet/libcore-grpc_services-tablet.a |40.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/actors/events/libdq-actors-events.a |40.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/common/buffer/libkqp-common-buffer.a |40.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/base/libpublic-lib-base.a |40.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/common/compilation/libkqp-common-compilation.a |40.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/common/events/libkqp-common-events.a |40.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/common/token_accessor/grpc/libcommon-token_accessor-grpc.a |40.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/common/simple/libkqp-common-simple.a |40.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/expr_nodes/libcore-kqp-expr_nodes.a |40.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/graph/shard/libcore-graph-shard.a |40.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kesus/proxy/libcore-kesus-proxy.a |40.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/executer_actor/shards_resolver/libkqp-executer_actor-shards_resolver.a |40.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/cpp/mapreduce/io/libcpp-mapreduce-io.a |40.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/common/libproviders-dq-common.a |40.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/gateway/behaviour/external_data_source/libgateway-behaviour-external_data_source.global.a |40.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/cpp/mapreduce/common/libcpp-mapreduce-common.a |40.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/interface/libproviders-dq-interface.a |40.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/finalize_script_service/libcore-kqp-finalize_script_service.a |40.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/gateway/behaviour/tablestore/operations/libbehaviour-tablestore-operations.a |40.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/gateway/behaviour/resource_pool/libgateway-behaviour-resource_pool.a |40.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/gateway/behaviour/external_data_source/libgateway-behaviour-external_data_source.a |40.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/gateway/behaviour/tablestore/libgateway-behaviour-tablestore.global.a |40.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/cpp/mapreduce/http/libcpp-mapreduce-http.a |40.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/counters/libcore-kqp-counters.a |40.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/counters/libproviders-dq-counters.a |40.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/cpp/mapreduce/http_client/libcpp-mapreduce-http_client.a |40.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/proxy_service/proto/libkqp-proxy_service-proto.a |40.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/config/libproviders-dq-config.a |40.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/gateway/behaviour/view/libgateway-behaviour-view.a |40.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/workload_service/common/libkqp-workload_service-common.a |40.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/gateway/behaviour/tablestore/operations/libbehaviour-tablestore-operations.global.a |40.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/library/libpy3ydb-tests-library.global.a |40.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/gateway/utils/libkqp-gateway-utils.a |40.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/cpp/mapreduce/interface/logging/libmapreduce-interface-logging.a |40.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/monlib/service/libcpp-monlib-service.a |40.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/cms/console/libcore-cms-console.a |40.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/helper/libproviders-dq-helper.a |40.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/workload_service/libcore-kqp-workload_service.a |40.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/log_backend/libydb-core-log_backend.a |40.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/metering/libydb-core-metering.a |40.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/persqueue/config/libcore-persqueue-config.a |40.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/mind/address_classification/libcore-mind-address_classification.a |40.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/mkql/libproviders-dq-mkql.a |40.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/api/protos/libdq-api-protos.a |40.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/persqueue/partition_key_range/libcore-persqueue-partition_key_range.a |40.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/apache/arrow/liblibs-apache-arrow.a |40.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/control_plane_storage/libfq-libs-control_plane_storage.a |40.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/opt/peephole/libkqp-opt-peephole.a |40.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/providers/generic/actors/libproviders-generic-actors.a |40.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/mon_alloc/libydb-core-mon_alloc.a |40.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/on_disk/chunks/libcpp-on_disk-chunks.a |40.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/actors/libproviders-dq-actors.a |40.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/cpp/mapreduce/interface/libcpp-mapreduce-interface.a |40.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/query_compiler/libcore-kqp-query_compiler.a |40.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/gateway/libcore-kqp-gateway.a |40.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/proxy_service/libcore-kqp-proxy_service.a |40.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/out/libcore-protos-out.a |40.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/pgproxy/libydb-core-pgproxy.a |40.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/quoter/public/libcore-quoter-public.a |40.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/compute_actor/libcore-kqp-compute_actor.a |39.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/opt/libproviders-dq-opt.a |39.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/resource_pools/libydb-core-resource_pools.a |39.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/host/libcore-kqp-host.a |39.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/scheme/libydb-core-scheme.a |39.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/persqueue/ut/common/libpersqueue-ut-common.a |39.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/statistics/database/libcore-statistics-database.a |39.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/sys_view/libydb-core-sys_view.a |39.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/openssl/init/libcpp-openssl-init.global.a |39.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/statistics/service/libcore-statistics-service.a |39.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/sys_view/resource_pools/libcore-sys_view-resource_pools.a |39.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/sys_view/resource_pool_classifiers/libcore-sys_view-resource_pool_classifiers.a |39.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/openssl/crypto/libcpp-openssl-crypto.a |39.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/sys_view/partition_stats/libcore-sys_view-partition_stats.a |39.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/openssl/holders/libcpp-openssl-holders.a |39.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/cpp/mapreduce/client/libcpp-mapreduce-client.a |39.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/opt/rbo/libkqp-opt-rbo.a |39.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/packedtypes/liblibrary-cpp-packedtypes.a |39.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tablet_flat/protos/libcore-tablet_flat-protos.a |39.2%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/yql/providers/generic/actors/yql_generic_lookup_actor.cpp |39.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/packers/liblibrary-cpp-packers.a |39.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/testlib/actors/libcore-testlib-actors.a |39.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/sys_view/query_stats/libcore-sys_view-query_stats.a |39.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/sys_view/auth/libcore-sys_view-auth.a |39.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/sys_view/show_create/libcore-sys_view-show_create.a |39.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/bg_tasks/protos/libcolumnshard-bg_tasks-protos.a |39.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/provider/exec/libdq-provider-exec.a |39.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/bg_tasks/session/libcolumnshard-bg_tasks-session.a |39.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/bg_tasks/events/libcolumnshard-bg_tasks-events.a |39.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/transfer/libydb-core-transfer.a |39.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tracing/libydb-core-tracing.a |39.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/sys_view/storage/libcore-sys_view-storage.a |39.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/blobs_action/protos/libcolumnshard-blobs_action-protos.a |39.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/planner/libproviders-dq-planner.a |39.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/blobs_action/local/libcolumnshard-blobs_action-local.a |39.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/bg_tasks/manager/libcolumnshard-bg_tasks-manager.a |39.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/sys_view/processor/libcore-sys_view-processor.a |39.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/load_test/libydb-core-load_test.a |39.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/testlib/basics/libcore-testlib-basics.a |39.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/bg_tasks/transactions/libcolumnshard-bg_tasks-transactions.a |39.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/data_accessor/cache_policy/libcolumnshard-data_accessor-cache_policy.a |39.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/data_accessor/abstract/libcolumnshard-data_accessor-abstract.a |39.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/data_accessor/in_mem/libcolumnshard-data_accessor-in_mem.a |39.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/data_accessor/libtx-columnshard-data_accessor.a |39.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/data_accessor/local_db/libcolumnshard-data_accessor-local_db.a |39.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/oss/canonical/libpy3tests-oss-canonical.global.a |39.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/data_accessor/local_db/libcolumnshard-data_accessor-local_db.global.a |39.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/library/wardens/libpy3tests-library-wardens.global.a |39.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/library/clients/libpy3tests-library-clients.global.a |39.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/random_provider/liblibrary-cpp-random_provider.a |39.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/protobuf/interop/libcpp-protobuf-interop.a |39.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/data_locks/manager/libcolumnshard-data_locks-manager.a |39.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/protobuf/util/proto/libprotobuf-util-proto.a |39.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/data_sharing/common/context/libdata_sharing-common-context.a |39.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/data_sharing/destination/session/libdata_sharing-destination-session.a |39.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/retry/liblibrary-cpp-retry.a |39.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/data_sharing/initiator/status/libdata_sharing-initiator-status.a |39.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/data_sharing/initiator/controller/libdata_sharing-initiator-controller.a |39.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/data_sharing/destination/events/libdata_sharing-destination-events.a |39.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/data_sharing/modification/tasks/libdata_sharing-modification-tasks.a |39.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/task_runner/libproviders-dq-task_runner.a |39.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/data_sharing/modification/transactions/libdata_sharing-modification-transactions.a |39.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/sighandler/liblibrary-cpp-sighandler.a |39.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/data_reader/libtx-columnshard-data_reader.a |39.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/common/libchanges-compaction-common.a |39.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/string_utils/scan/libcpp-string_utils-scan.a |39.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/testing/gmock_in_unittest/libcpp-testing-gmock_in_unittest.a |39.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/task_runner_actor/libproviders-dq-task_runner_actor.a |39.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/runtime/libproviders-dq-runtime.a |39.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/changes/actualization/controller/libchanges-actualization-controller.a |39.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/data_sharing/source/session/libdata_sharing-source-session.a |39.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/changes/actualization/construction/libchanges-actualization-construction.a |39.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/worker_manager/interface/libdq-worker_manager-interface.a |39.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/testing/gtest_extensions/libcpp-testing-gtest_extensions.a |39.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/data_sharing/protos/libcolumnshard-data_sharing-protos.a |39.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/threading/atomic/libcpp-threading-atomic.a |39.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/generic/expr_nodes/libproviders-generic-expr_nodes.a |39.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/dictionary/libchanges-compaction-dictionary.global.a |39.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/changes/abstract/libengines-changes-abstract.a |39.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/worker_manager/libproviders-dq-worker_manager.a |39.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/threading/cancellation/libcpp-threading-cancellation.a |39.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/sparsed/libchanges-compaction-sparsed.global.a |39.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/generic/connector/libcpp/libgeneric-connector-libcpp.a |39.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/generic/proto/libproviders-generic-proto.a |39.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/generic/connector/api/service/libconnector-api-service.a |39.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/threading/queue/libcpp-threading-queue.a |39.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/testing/unittest/libcpp-testing-unittest.a |39.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/threading/hot_swap/libcpp-threading-hot_swap.a |39.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/generic/connector/api/service/protos/libapi-service-protos.a |39.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/threading/light_rw_lock/libcpp-threading-light_rw_lock.a |39.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/threading/poor_man_openmp/libcpp-threading-poor_man_openmp.a |39.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/public/types/libpy3essentials-public-types.global.a |39.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/protos/libcolumnshard-engines-protos.a |39.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/file_storage/proto/libpy3core-file_storage-proto.global.a |39.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tools/cfg/k8s_api/libpy3tools-cfg-k8s_api.global.a |39.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/pq/common/libproviders-pq-common.a |39.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/opt/libyql-dq-opt.a |39.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/predicate/libcolumnshard-engines-predicate.a |39.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/provider/libproviders-dq-provider.a |39.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/pq/cm_client/libproviders-pq-cm_client.a |39.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/generic/pushdown/libproviders-generic-pushdown.a |39.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/time_provider/liblibrary-cpp-time_provider.a |39.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/unified_agent_client/liblibrary-cpp-unified_agent_client.global.a |39.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/type_info/tz/libcpp-type_info-tz.a |39.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/libtx-columnshard-engines.a |39.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/pq/expr_nodes/libproviders-pq-expr_nodes.a |39.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/common_reader/constructor/libreader-common_reader-constructor.a |39.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/pq/proto/libproviders-pq-proto.a |39.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/providers/generic/actors/yql_generic_token_provider.cpp |39.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/providers/generic/actors/yql_generic_provider_factories.cpp |39.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yt/global/libcpp-yt-global.a |39.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/pq/gateway/native/libpq-gateway-native.a |39.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/providers/generic/actors/yql_generic_read_actor.cpp |39.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/plain_reader/constructor/libreader-plain_reader-constructor.global.a |39.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yt/string/libcpp-yt-string.a |37.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yt/memory/libcpp-yt-memory.a |37.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/udfs/common/clickhouse/client/libclickhouse_client_udf.global.a |37.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/simple_reader/constructor/libreader-simple_reader-constructor.a |37.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tools/cfg/walle/libpy3tools-cfg-walle.global.a |37.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/simple_reader/constructor/libreader-simple_reader-constructor.global.a |37.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/pq/async_io/libproviders-pq-async_io.a |37.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/collections/libsimple_reader-iterator-collections.a |37.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/libreader-common_reader-iterator.a |37.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/simple_reader/duplicates/libreader-simple_reader-duplicates.a |37.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tools/cfg/libpy3ydb-tools-cfg.global.a |37.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/pq/task_meta/libproviders-pq-task_meta.a |37.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/opt/libcore-kqp-opt.a |37.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/s3/actors_factory/libproviders-s3-actors_factory.a |37.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tools/ydbd_slice/libpy3ydbd_slice.global.a |37.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/s3/common/libproviders-s3-common.a |37.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/audit/libydb-core-audit.a |37.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/s3/events/libproviders-s3-events.a |37.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/generic/provider/libproviders-generic-provider.a |37.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/sys_view/granules/libreader-sys_view-granules.global.a |37.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/s3/credentials/libproviders-s3-credentials.a |38.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/scheme/abstract/libengines-scheme-abstract.a |37.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/scheme/common/libengines-scheme-common.a |37.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/crypto/chacha_512/libblobstorage-crypto-chacha_512.a |38.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/common/libcore-blobstorage-common.a |38.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/s3/compressors/libproviders-s3-compressors.a |38.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/base/libcore-blobstorage-base.a |38.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/sys_view/optimizer/libreader-sys_view-optimizer.global.a |38.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/protos/libpy3yql-essentials-protos.global.a |38.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/proto/libpy3providers-common-proto.global.a |38.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/codec/codegen/llvm16/libcodec-codegen-llvm16.a |38.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/libreader-simple_reader-iterator.a |38.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/issue/protos/libpy3core-issue-protos.global.a |38.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/s3/expr_nodes/libproviders-s3-expr_nodes.a |38.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/s3/object_listers/libproviders-s3-object_listers.a |38.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/other/libcore-blobstorage-other.a |38.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/lwtrace_probes/libcore-blobstorage-lwtrace_probes.a |38.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/s3/path_generator/libproviders-s3-path_generator.a |38.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/public/issue/protos/libpy3public-issue-protos.global.a |38.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/vdisk/hulldb/cache_block/libvdisk-hulldb-cache_block.a |38.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/s3/proto/libproviders-s3-proto.a |38.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/s3/range_helpers/libproviders-s3-range_helpers.a |38.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/s3/serializations/libproviders-s3-serializations.a |38.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/mind/bscontroller/libcore-mind-bscontroller.a |38.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/mind/hive/libcore-mind-hive.a |38.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/solomon/common/libproviders-solomon-common.a |38.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/persqueue/libydb-core-persqueue.a |38.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/pg_wrapper/libessentials-parser-pg_wrapper.a |38.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/vdisk/protos/libblobstorage-vdisk-protos.a |38.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/s3/statistics/libproviders-s3-statistics.a |38.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blockstore/core/libcore-blockstore-core.a |38.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/vdisk/hulldb/compstrat/libvdisk-hulldb-compstrat.a |38.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/solomon/events/libproviders-solomon-events.a |38.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/vdisk/defrag/libblobstorage-vdisk-defrag.a |38.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/client/metadata/libcore-client-metadata.a |38.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/vdisk/hulldb/fresh/libvdisk-hulldb-fresh.a |38.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/proto/libpy3yql-dq-proto.global.a |38.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/client/scheme_cache_lib/libcore-client-scheme_cache_lib.a |38.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/pq/provider/libproviders-pq-provider.a |38.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/codec/codegen/llvm16/libcodec-codegen-llvm16.global.a |38.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/s3/actors/libproviders-s3-actors.a |38.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/solomon/expr_nodes/libproviders-solomon-expr_nodes.a |38.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/control_plane_proxy/events/liblibs-control_plane_proxy-events.a |38.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/solomon/gateway/libproviders-solomon-gateway.a |38.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/ydb/expr_nodes/libproviders-ydb-expr_nodes.a |38.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/config/protos/libcore-config-protos.a |38.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/base/libydb-core-base.a |38.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/job/libproviders-yt-job.a |38.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/ydb/proto/libproviders-ydb-proto.a |38.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/vdisk/localrecovery/libblobstorage-vdisk-localrecovery.a |38.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tablet_flat/libydb-core-tablet_flat.a |38.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/ydb_issue/proto/libpy3library-ydb_issue-proto.global.a |38.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/driver_lib/base_utils/libbase_utils.a |38.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/solomon/actors/libproviders-solomon-actors.a |38.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/mkql_proto/protos/libpy3library-mkql_proto-protos.global.a |38.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/discovery/libydb-core-discovery.a |38.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/services/libpy3ydb-library-services.global.a |38.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/solomon/solomon_accessor/client/libsolomon-solomon_accessor-client.a |38.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/public/ydb_issue/libyql-public-ydb_issue.a |38.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/vdisk/hulldb/generic/libvdisk-hulldb-generic.a |38.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/filestore/core/libcore-filestore-core.a |38.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/provider/libcore-kqp-provider.a |38.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/solomon/proto/libproviders-solomon-proto.a |38.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/solomon/solomon_accessor/grpc/libsolomon-solomon_accessor-grpc.a |38.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/utils/actor_log/libyql-utils-actor_log.a |38.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/utils/actors/libyql-utils-actors.a |38.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/accessor/composite_serial/libarrow-accessor-composite_serial.a |39.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/s3/proto/libpy3providers-s3-proto.global.a |39.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/actors/protos/libpy3dq-actors-protos.global.a |38.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/accessor/plain/libarrow-accessor-plain.global.a |38.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/utils/plan/libyql-utils-plan.a |39.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/annotations/libapi-protos-annotations.a |39.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/accessor/dictionary/libarrow-accessor-dictionary.global.a |38.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/common/libformats-arrow-common.a |38.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/nc_private/accessservice/libclient-nc_private-accessservice.a |38.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/common/libproviders-yt-common.a |38.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/vdisk/repl/libblobstorage-vdisk-repl.a |38.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/hash/libformats-arrow-hash.a |38.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/generic/connector/api/service/protos/libpy3api-service-protos.global.a |38.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/program/libformats-arrow-program.global.a |39.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/accessservice/libclient-yc_private-accessservice.a |39.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/operation/libclient-yc_private-operation.a |39.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/save_load/libformats-arrow-save_load.a |39.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_public/common/libclient-yc_public-common.a |39.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/actualizer/counters/libstorage-actualizer-counters.a |39.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/switch/libformats-arrow-switch.a |39.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/vdisk/scrub/libblobstorage-vdisk-scrub.a |39.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/engine/libydb-core-engine.a |39.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/audit/libfq-libs-audit.a |39.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/servicecontrol/libclient-yc_private-servicecontrol.a |39.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/control_plane_config/events/liblibs-control_plane_config-events.a |39.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/scheme/libcolumnshard-engines-scheme.a |39.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/checkpoint_storage/proto/liblibs-checkpoint_storage-proto.a |39.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_public/iam/libclient-yc_public-iam.a |39.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_public/events/libclient-yc_public-events.a |39.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/base/common/DateLUT.cpp |39.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/base/common/getFQDNOrHostName.cpp |39.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/resourcemanager/libclient-yc_private-resourcemanager.a |39.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/codec/libproviders-yt-codec.a |39.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/base/common/JSON.cpp |39.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/compute/common/liblibs-compute-common.a |39.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/AggregateFunctions/AggregateFunctionCombinatorFactory.cpp |39.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Columns/ColumnsCommon.cpp |39.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Processors/ISink.cpp |39.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ParserExplainQuery.cpp |39.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ParserDropQuery.cpp |39.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ParserExternalDDLQuery.cpp |39.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/cloud_audit/libfq-libs-cloud_audit.a |39.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ParserDictionaryAttributeDeclaration.cpp |39.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ExpressionListParsers.cpp |39.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/iam/libclient-yc_private-iam.a |39.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTWithAlias.cpp |39.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/granule/libengines-storage-granule.a |38.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/driver_lib/cli_utils/libcli_utils.a |38.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTQualifiedAsterisk.cpp |38.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/solomon/provider/libproviders-solomon-provider.a |38.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTQueryParameter.cpp |38.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTSubquery.cpp |38.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ExpressionElementParsers.cpp |38.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTSettingsProfileElement.cpp |38.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTProjectionSelectQuery.cpp |39.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTDatabaseOrNone.cpp |39.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/ydb/provider/libproviders-ydb-provider.a |39.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTDictionary.cpp |39.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/IO/copyData.cpp |38.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/IO/WriteHelpers.cpp |38.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/IO/WriteBufferFromPocoSocket.cpp |38.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/IO/WriteBufferFromFileDescriptorDiscardOnFailure.cpp |38.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/IO/WriteBufferValidUTF8.cpp |38.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/IO/Progress.cpp |38.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/IO/PeekableReadBuffer.cpp |38.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/s3/provider/libproviders-s3-provider.a |38.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Formats/verbosePrintString.cpp |38.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/DataTypeUUID.cpp |38.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/DataTypeNothing.cpp |38.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/DataTypeString.cpp |38.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/DataTypeTuple.cpp |38.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/DataTypeNumberBase.cpp |38.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/DataTypeMap.cpp |38.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/DataTypeNested.cpp |38.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/DataTypeLowCardinalityHelpers.cpp |38.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/DataTypeInterval.cpp |38.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/DataTypeDecimalBase.cpp |38.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/DataTypeFunction.cpp |38.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/DataTypeFactory.cpp |38.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/DataTypeDateTime64.cpp |38.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/DataTypeLowCardinality.cpp |38.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/DataTypeDate32.cpp |38.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/DataTypeDateTime.cpp |38.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataStreams/IBlockInputStream.cpp |38.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataStreams/NativeBlockInputStream.cpp |38.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataStreams/ExecutionSpeedLimits.cpp |38.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Compression/CompressedReadBufferFromFile.cpp |38.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Core/SettingsFields.cpp |38.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Compression/CompressedReadBufferBase.cpp |38.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Compression/CompressedReadBuffer.cpp |38.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/draft/libapi-grpc-draft.a |38.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/thread_local_rng.cpp |38.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/randomSeed.cpp |37.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/setThreadName.cpp |38.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/quoteString.cpp |38.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/formatReadable.cpp |37.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/TaskStatsInfoGetter.cpp |37.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/dsproxy/libcore-blobstorage-dsproxy.a |37.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/RemoteHostFilter.cpp |37.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/ClickHouseRevision.cpp |37.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/ProfileEvents.cpp |37.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Columns/ColumnNullable.cpp |37.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/AlignedBuffer.cpp |37.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/Allocator.cpp |37.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Columns/ColumnConst.cpp |37.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Columns/ColumnFixedString.cpp |37.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Columns/ColumnMap.cpp |37.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/base/common/sleep.cpp |37.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Columns/ColumnString.cpp |37.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/AggregateFunctions/IAggregateFunction.cpp |37.7%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/tools/solomon_emulator/recipe/solomon_recipe |37.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Columns/ColumnCompressed.cpp |37.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/base/common/mremap.cpp |37.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/base/common/shift10.cpp |37.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/base/common/getResource.cpp |37.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Columns/ColumnLowCardinality.cpp |37.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/base/common/errnoToString.cpp |37.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/AggregateFunctions/AggregateFunctionFactory.cpp |37.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Columns/MaskOperations.cpp |37.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Columns/ColumnFunction.cpp |37.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/base/common/demangle.cpp |37.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/base/common/preciseExp10.cpp |37.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/base/common/getThreadId.cpp |37.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Columns/ColumnAggregateFunction.cpp |37.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Columns/IColumn.cpp |37.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Columns/ColumnDecimal.cpp |37.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Columns/FilterDescription.cpp |37.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/base/common/DateLUTImpl.cpp |37.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/base/common/getPageSize.cpp |37.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/base/common/StringRef.cpp |37.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/OpenSSLHelpers.cpp |37.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/CurrentMemoryTracker.cpp |37.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/ProcfsMetricsProvider.cpp |37.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Columns/ColumnArray.cpp |37.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/CurrentMetrics.cpp |37.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Columns/ColumnTuple.cpp |37.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/Config/AbstractConfigurationComparison.cpp |37.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/IntervalKind.cpp |37.8%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/tools/solomon_emulator/bin/solomon_emulator |37.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/CurrentThread.cpp |37.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/FieldVisitorWriteBinary.cpp |37.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/ErrorCodes.cpp |37.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/IPv6ToBinary.cpp |37.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/Epoll.cpp |37.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/Exception.cpp |37.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/DNSResolver.cpp |37.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/FieldVisitorToString.cpp |37.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/Throttler.cpp |37.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/PipeFDs.cpp |37.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/ThreadStatus.cpp |37.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/PODArray.cpp |37.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/formatIPv6.cpp |37.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/ThreadPool.cpp |37.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/createHardLink.cpp |37.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/MemoryTracker.cpp |37.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/checkStackSize.cpp |37.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/FieldVisitorDump.cpp |37.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/ZooKeeper/IKeeper.cpp |37.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/getNumberOfPhysicalCPUCores.cpp |37.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/hasLinuxCapability.cpp |37.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/hex.cpp |37.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/ThreadProfileEvents.cpp |37.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/escapeForFileName.cpp |37.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/getMultipleKeysFromConfig.cpp |37.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/isLocalAddress.cpp |37.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/parseAddress.cpp |37.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Compression/CompressionCodecLZ4.cpp |37.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Core/BlockInfo.cpp |37.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Compression/CompressedWriteBuffer.cpp |37.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Compression/CompressionCodecMultiple.cpp |37.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Compression/CompressionFactory.cpp |37.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Core/Block.cpp |37.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Compression/CompressionCodecNone.cpp |37.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Columns/ColumnVector.cpp |37.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Core/BaseSettings.cpp |37.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Compression/LZ4_decompress_faster.cpp |37.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Compression/ICompressionCodec.cpp |37.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Core/ColumnWithTypeAndName.cpp |37.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Core/NamesAndTypes.cpp |37.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Core/Field.cpp |37.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Core/SettingsEnums.cpp |37.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataStreams/BlockStreamProfileInfo.cpp |37.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/DataTypeDate.cpp |37.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataStreams/ColumnGathererStream.cpp |37.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/DataTypeArray.cpp |37.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/DataTypeAggregateFunction.cpp |37.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataStreams/SizeLimits.cpp |37.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataStreams/NativeBlockOutputStream.cpp |37.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataStreams/materializeBlock.cpp |37.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/DataTypeCustomSimpleAggregateFunction.cpp |37.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/DataTypeCustomIPv4AndIPv6.cpp |37.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/grpc_services/libydb-core-grpc_services.a |37.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/DataTypeCustomGeo.cpp |37.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/DataTypeFixedString.cpp |37.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Core/Settings.cpp |37.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/DataTypeEnum.cpp |37.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Formats/registerFormats.cpp |37.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/Serializations/SerializationWrapper.cpp |37.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/Serializations/SerializationFixedString.cpp |37.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/getLeastSupertype.cpp |37.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/Serializations/SerializationString.cpp |37.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/Serializations/SerializationEnum.cpp |37.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/Serializations/SerializationArray.cpp |37.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/IDataType.cpp |37.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/DataTypesNumber.cpp |37.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/Serializations/SerializationAggregateFunction.cpp |37.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/EnumValues.cpp |37.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/DataTypesDecimal.cpp |37.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/Serializations/ISerialization.cpp |37.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/NestedUtils.cpp |37.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/Serializations/SerializationDateTime64.cpp |37.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/Serializations/SerializationDateTime.cpp |37.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/Serializations/SerializationCustomSimpleText.cpp |37.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/Serializations/SerializationNothing.cpp |37.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/Serializations/SerializationDate.cpp |37.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/Serializations/SerializationDecimalBase.cpp |37.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/Serializations/SerializationDecimal.cpp |37.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/Serializations/SerializationDate32.cpp |37.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/Serializations/SerializationMap.cpp |37.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/Serializations/SerializationLowCardinality.cpp |36.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/Serializations/SerializationNumber.cpp |36.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/Serializations/SerializationIP.cpp |36.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/Serializations/SerializationNullable.cpp |36.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/Serializations/SerializationUUID.cpp |36.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/Serializations/SerializationTupleElement.cpp |36.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Formats/NativeFormat.cpp |36.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Formats/ProtobufWriter.cpp |36.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/Serializations/SerializationTuple.cpp |36.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/IO/AsynchronousReadBufferFromFile.cpp |36.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Formats/JSONEachRowUtils.cpp |36.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Formats/ProtobufReader.cpp |36.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Formats/FormatFactory.cpp |36.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/registerDataTypeDateTime.cpp |36.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/IO/OpenedFile.cpp |36.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Functions/extractTimeZoneFromFunctionArguments.cpp |36.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/IO/AsynchronousReadBufferFromFileDescriptor.cpp |36.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Functions/FunctionFactory.cpp |36.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Functions/FunctionHelpers.cpp |36.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Functions/toFixedString.cpp |36.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Functions/IFunction.cpp |36.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/IO/MMappedFile.cpp |36.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/IO/DoubleConverter.cpp |36.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/IO/CompressionMethod.cpp |36.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/IO/SynchronousReader.cpp |36.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/IO/MMapReadBufferFromFile.cpp |36.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/IO/ReadBufferFromFileDescriptor.cpp |36.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/IO/MMapReadBufferFromFileWithCache.cpp |36.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/IO/ThreadPoolReader.cpp |36.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/IO/ReadBufferFromPocoSocket.cpp |36.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/IO/ReadBufferFromFileBase.cpp |36.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/IO/MMapReadBufferFromFileDescriptor.cpp |36.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/IO/ReadSettings.cpp |36.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/IO/ReadBufferFromFile.cpp |36.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/IO/WriteBufferFromFileBase.cpp |36.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/IO/MMappedFileDescriptor.cpp |36.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/IO/TimeoutSetter.cpp |36.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/IO/ReadBufferFromMemory.cpp |36.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/IO/WriteBufferFromFile.cpp |36.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/IO/WriteBufferFromFileDescriptor.cpp |36.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/IO/UseSSL.cpp |36.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTAlterQuery.cpp |36.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Interpreters/InternalTextLogsQueue.cpp |36.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Interpreters/ProfileEventsExt.cpp |36.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Interpreters/ClientInfo.cpp |36.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/IO/createReadBufferFromFileBase.cpp |36.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Interpreters/TablesStatus.cpp |36.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/IO/parseDateTimeBestEffort.cpp |36.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTColumnsMatcher.cpp |36.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/IO/ReadHelpers.cpp |36.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/IO/readFloatText.cpp |36.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTBackupQuery.cpp |36.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTInsertQuery.cpp |36.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Interpreters/QueryThreadLog.cpp |36.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTProjectionDeclaration.cpp |36.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTConstraintDeclaration.cpp |36.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTAsterisk.cpp |36.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTColumnDeclaration.cpp |36.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTCreateQuery.cpp |36.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTIndexDeclaration.cpp |36.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTDropQuery.cpp |36.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Interpreters/QueryLog.cpp |36.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTFunction.cpp |36.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTColumnsTransformers.cpp |36.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTDictionaryAttributeDeclaration.cpp |36.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTExpressionList.cpp |36.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTFunctionWithKeyValueArguments.cpp |36.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTOrderByElement.cpp |36.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTKillQueryQuery.cpp |36.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTNameTypePair.cpp |36.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTIdentifier.cpp |36.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTOptimizeQuery.cpp |36.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTLiteral.cpp |36.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTShowTablesQuery.cpp |36.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTPartition.cpp |36.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTSetRoleQuery.cpp |36.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTSampleRatio.cpp |36.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTQueryWithOutput.cpp |36.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTSetQuery.cpp |36.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTQueryWithOnCluster.cpp |36.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTQueryWithTableAndOutput.cpp |36.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTSelectWithUnionQuery.cpp |36.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTSelectQuery.cpp |36.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTRolesOrUsersSet.cpp |36.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTWindowDefinition.cpp |36.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTTTLElement.cpp |36.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTTablesInSelectQuery.cpp |36.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTSystemQuery.cpp |36.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTUserNameWithHost.cpp |36.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTWithElement.cpp |36.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/CommonParsers.cpp |36.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ParserCase.cpp |36.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/InsertQuerySettingsPushDownVisitor.cpp |36.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ParserCreateQuery.cpp |36.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/IParserBase.cpp |36.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/IAST.cpp |36.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/Lexer.cpp |36.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ParserDataType.cpp |36.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ParserBackupQuery.cpp |36.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ParserDescribeTableQuery.cpp |36.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ParserAlterQuery.cpp |36.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ParserDictionary.cpp |36.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ParserDatabaseOrNone.cpp |36.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ParserCheckQuery.cpp |36.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Processors/ISimpleTransform.cpp |36.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/parseIdentifierOrStringLiteral.cpp |36.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Processors/Formats/Impl/JSONAsStringRowInputFormat.cpp |36.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ParserShowPrivilegesQuery.cpp |36.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ParserRolesOrUsersSet.cpp |36.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ParserRenameQuery.cpp |36.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ParserInsertQuery.cpp |36.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ParserQuery.cpp |36.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ParserOptimizeQuery.cpp |36.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ParserProjectionSelectQuery.cpp |36.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ParserKillQueryQuery.cpp |36.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ParserPartition.cpp |36.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ParserSettingsProfileElement.cpp |35.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ParserShowGrantsQuery.cpp |35.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ParserSetQuery.cpp |35.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ParserSelectQuery.cpp |36.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ParserSampleRatio.cpp |36.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ParserSelectWithUnionQuery.cpp |36.0%| PREPARE $(OS_SDK_ROOT-sbr:243881345) |36.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ParserSetRoleQuery.cpp |36.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/parseIntervalKind.cpp |36.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ParserWatchQuery.cpp |36.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Functions/FunctionsConversion.cpp |35.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Interpreters/castColumn.cpp |35.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ParserSystemQuery.cpp |35.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ParserShowTablesQuery.cpp |35.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ParserTablePropertiesQuery.cpp |35.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ParserUnionQueryElement.cpp |35.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ParserUseQuery.cpp |35.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ParserUserNameWithHost.cpp |35.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/parseDatabaseAndTableName.cpp |35.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ParserTablesInSelectQuery.cpp |35.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/formatAST.cpp |35.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/QueryWithOutputSettingsPushDownVisitor.cpp |35.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ParserWithElement.cpp |35.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/formatSettingName.cpp |35.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/TokenIterator.cpp |35.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Processors/Formats/Impl/CSVRowOutputFormat.cpp |35.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Processors/Executors/PollingQueue.cpp |35.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Processors/Formats/IOutputFormat.cpp |35.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/parseUserName.cpp |35.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Processors/Formats/Impl/CSVRowInputFormat.cpp |35.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/parseQuery.cpp |35.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/queryToString.cpp |35.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Processors/Formats/IInputFormat.cpp |35.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Processors/Formats/Impl/ArrowBlockInputFormat.cpp |35.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Processors/Chunk.cpp |35.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Processors/IProcessor.cpp |35.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Processors/Formats/Impl/AvroRowInputFormat.cpp |35.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Processors/Formats/IRowInputFormat.cpp |35.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Processors/ConcatProcessor.cpp |35.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Processors/Formats/Impl/JSONEachRowRowOutputFormat.cpp |35.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Processors/Formats/Impl/ArrowBufferedStreams.cpp |35.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Processors/Formats/Impl/ORCBlockInputFormat.cpp |35.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Processors/Formats/Impl/CHColumnToArrowColumn.cpp |35.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Processors/Formats/Impl/TSKVRowOutputFormat.cpp |35.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Processors/Formats/Impl/RawBLOBRowInputFormat.cpp |35.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Processors/Formats/Impl/ParquetBlockInputFormat.cpp |35.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Processors/Formats/IRowOutputFormat.cpp |35.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Processors/IAccumulatingTransform.cpp |35.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Processors/Formats/Impl/JSONEachRowRowInputFormat.cpp |35.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Processors/Formats/Impl/TSKVRowInputFormat.cpp |35.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/executer_actor/libcore-kqp-executer_actor.a |35.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Processors/Formats/Impl/ArrowColumnToCHColumn.cpp |35.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Processors/Formats/RowInputFormatWithDiagnosticInfo.cpp |35.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Common/TimerDescriptor.cpp |35.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Processors/Formats/Impl/ParquetBlockOutputFormat.cpp |35.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/DataTypes/DataTypeNullable.cpp |35.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Processors/Formats/OutputStreamToOutputFormat.cpp |35.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Parsers/ASTShowGrantsQuery.cpp |35.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Processors/Port.cpp |35.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Processors/Formats/Impl/TabSeparatedRowInputFormat.cpp |35.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Processors/Formats/Impl/TabSeparatedRowOutputFormat.cpp |35.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Processors/ResizeProcessor.cpp |35.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/out/libapi-protos-out.a |35.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Processors/LimitTransform.cpp |35.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Processors/ISource.cpp |35.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/extension_common/libsrc-client-extension_common.a |35.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/src/Functions/CastOverloadResolver.cpp |35.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/windows/libpy3library-python-windows.global.a |35.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/gateway/behaviour/resource_pool_classifier/libgateway-behaviour-resource_pool_classifier.global.a |35.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/categories_bloom/libstorage-indexes-categories_bloom.a |35.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/gateway/actors/libkqp-gateway-actors.a |35.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/udfs/common/clickhouse/client/clickhouse_client_udf.cpp |35.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/graph_reorder/libyt-lib-graph_reorder.a |35.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/comp_nodes/llvm16/libyt-comp_nodes-llvm16.a |35.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/compute/ydb/synchronization_service/libcompute-ydb-synchronization_service.a |35.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/expr_traits/libyt-lib-expr_traits.a |35.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/quota_manager/events/liblibs-quota_manager-events.a |35.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/graph/shard/protos/libgraph-shard-protos.a |35.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/graph/service/libcore-graph-service.a |35.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/count_min_sketch/libstorage-indexes-count_min_sketch.global.a |35.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/hash/libyt-lib-hash.a |35.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/grpc/libfq-libs-grpc.a |35.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/common/libfq-libs-common.a |35.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/skip_index/libstorage-indexes-skip_index.a |35.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/discovery/libydb-services-discovery.a |35.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/abstract/libstorage-optimizer-abstract.a |35.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/value/libpublic-lib-value.a |35.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/logs/libfq-libs-logs.a |35.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lbuckets/planner/liboptimizer-lbuckets-planner.a |35.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/scheme_types/libpublic-lib-scheme_types.a |35.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/constructor/level/liblcbuckets-constructor-level.a |35.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/deprecated/kicli/liblib-deprecated-kicli.a |35.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/json_value/libpublic-lib-json_value.a |35.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/liblibs-config-protos.a |35.6%| [LD] {BAZEL_DOWNLOAD} $(B)/contrib/python/moto/bin/moto_server |35.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/row_dispatcher/protos/liblibs-row_dispatcher-protos.a |35.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/deprecated/persqueue_v0/libservices-deprecated-persqueue_v0.a |35.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/read_rule/libfq-libs-read_rule.a |35.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/gateway/qplayer/libyt-gateway-qplayer.a |35.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/dump/files/libydb_cli-dump-files.a |35.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/commands/command_base/libydb_cli_command_base.a |35.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/infer_schema/libyt-lib-infer_schema.a |35.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/dynamic_config/libydb-services-dynamic_config.a |35.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/writer/buffer/libengines-writer-buffer.a |35.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/row_dispatcher/format_handler/liblibs-row_dispatcher-format_handler.a |35.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/commands/ydb_discovery/libydb_cli_command_ydb_discovery.a |34.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/commands/sdk_core_access/libydb_sdk_core_access.a |34.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/core/http/libyt-core-http.a |35.0%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/executer_actor/kqp_tasks_graph.cpp |35.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/export/common/libcolumnshard-export-common.a |35.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/planner/level/liblcbuckets-planner-level.a |35.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/test_connection/events/liblibs-test_connection-events.a |35.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/experimental/libpublic-lib-experimental.a |35.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/init_yt_api/libyt-lib-init_yt_api.a |35.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/graph/protos/libcore-graph-protos.a |35.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/yson_value/libpublic-lib-yson_value.a |35.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/common/yql_parser/libydb_cli-common-yql_parser.a |35.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/config_clusters/libyt-lib-config_clusters.a |35.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/fq/libpublic-lib-fq.a |35.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/ydb/libfq-libs-ydb.a |35.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/comp_nodes/dq/llvm16/libcomp_nodes-dq-llvm16.a |35.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/control_plane_config/libfq-libs-control_plane_config.a |35.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/adapters/issue/libcpp-adapters-issue.a |35.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/common_client/impl/libclient-common_client-impl.a |35.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/dump/util/libydb_cli-dump-util.a |35.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/keyvalue/protos/libcore-keyvalue-protos.a |35.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/grpc_services/counters/libcore-grpc_services-counters.a |35.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/common/batch/libkqp-common-batch.a |35.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/common_client/libsrc-client-common_client.a |35.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/include/ydb-cpp-sdk/client/topic/libydb-cpp-sdk-client-topic.a |35.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/io_formats/cell_maker/libcore-io_formats-cell_maker.a |35.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/common/shutdown/libkqp-common-shutdown.a |35.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/driver/libsrc-client-driver.a |35.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/export/session/storage/tier/libsession-storage-tier.global.a |35.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/common/libcommon.a |35.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/config/libsrc-client-config.a |35.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/discovery/libsrc-client-discovery.a |35.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/coordination/libsrc-client-coordination.a |35.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/Jinja2/py3/libpy3python-Jinja2-py3.global.a |35.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/health_check/libydb-core-health_check.a |35.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/bloom_ngramm/libstorage-indexes-bloom_ngramm.global.a |35.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/compile_service/libcore-kqp-compile_service.a |35.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/expr_nodes/libproviders-yt-expr_nodes.a |35.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/federated_query/libcore-kqp-federated_query.a |35.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/types/operation/libclient-types-operation.a |35.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/persqueue/codecs/libcore-persqueue-codecs.a |35.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/tools/python3/Modules/_sqlite/libpy3python3-Modules-_sqlite.global.a |35.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/testlib/audit_helpers/libcore-testlib-audit_helpers.a |35.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/bloom/libstorage-indexes-bloom.global.a |35.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/gateway/behaviour/tablestore/libgateway-behaviour-tablestore.a |35.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/url_mapper/libyt-lib-url_mapper.a |35.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/gateway/behaviour/view/libgateway-behaviour-view.global.a |35.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/ytflow/integration/proto/libytflow-integration-proto.a |35.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/export/libsrc-client-export.a |35.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/llhttp/libcontrib-restricted-llhttp.a |35.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/libffi/libcontrib-restricted-libffi.a |35.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/gateway/behaviour/table/libgateway-behaviour-table.global.a |35.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/operations/common/libcolumnshard-operations-common.a |35.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/normalizer/portion/libcolumnshard-normalizer-portion.a |35.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/gateway/native/libyt-gateway-native.a |35.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/subscriber/abstract/subscriber/libsubscriber-abstract-subscriber.a |35.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/gateway/local_rpc/libkqp-gateway-local_rpc.a |35.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/extensions/solomon_stats/libclient-extensions-solomon_stats.a |35.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/deprecated/persqueue_v0/api/protos/libapi-protos-persqueue-deprecated.a |35.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/node_service/libcore-kqp-node_service.a |35.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/res_pull/libyt-lib-res_pull.a |35.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/subscriber/events/tx_completed/libsubscriber-events-tx_completed.a |35.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/unicode/set/libcpp-unicode-set.a |35.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/libydb-core-protos.a |35.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/executer_actor/kqp_executer_impl.cpp |35.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/operations/batch_builder/libcolumnshard-operations-batch_builder.a |35.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/executer_actor/kqp_locks_helper.cpp |35.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/executer_actor/kqp_tasks_validate.cpp |35.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/executer_actor/kqp_planner_strategy.cpp |35.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/operations/libtx-columnshard-operations.a |35.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/operations/slice_builder/libcolumnshard-operations-slice_builder.a |35.3%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/executer_actor/kqp_executer.h_serialized.cpp |35.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/executer_actor/kqp_partition_helper.cpp |35.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/transactions/operators/ev_write/libtransactions-operators-ev_write.global.a |35.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/executer_actor/kqp_planner.cpp |35.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/executer_actor/kqp_table_resolver.cpp |35.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/executer_actor/kqp_partitioned_executer.cpp |35.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/topics/libcore-kqp-topics.a |35.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/subscriber/abstract/events/libsubscriber-abstract-events.a |35.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/MarkupSafe/py3/libpy3python-MarkupSafe-py3.global.a |35.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/executer_actor/kqp_literal_executer.cpp |35.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/transactions/operators/ev_write/libtransactions-operators-ev_write.a |35.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/transactions/libtx-columnshard-transactions.a |35.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/run_script_actor/libcore-kqp-run_script_actor.a |35.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/executer_actor/kqp_executer_stats.cpp |35.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/log/libyt-lib-log.a |35.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/skiff/libyt-lib-skiff.a |35.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/executer_actor/kqp_scheme_executer.cpp |35.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/schema/libyt-lib-schema.a |35.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/federated_topic/impl/libclient-federated_topic-impl.a |35.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/workload_service/tables/libkqp-workload_service-tables.a |35.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/keyvalue/libydb-core-keyvalue.a |34.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/PyHamcrest/py3/libpy3python-PyHamcrest-py3.global.a |34.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/draft/libsrc-client-draft.a |34.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/MarkupSafe/py3/libpy3python-MarkupSafe-py3.a |34.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/tx_reader/libtx-columnshard-tx_reader.a |34.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/transactions/transactions/libcolumnshard-transactions-transactions.a |34.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/schemeshard/libcore-protos-schemeshard.a |34.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/federated_topic/libsrc-client-federated_topic.a |34.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/executer_actor/kqp_scan_executer.cpp |34.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/iam/libsrc-client-iam.a |34.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/folder_service/proto/libpy3library-folder_service-proto.global.a |34.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/yt/yql/providers/yt/provider/libproviders-yt-provider.a |34.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/workload_service/actors/libkqp-workload_service-actors.a |34.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/local_pgwire/libydb-core-local_pgwire.a |34.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/persqueue/events/libcore-persqueue-events.a |34.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/PyJWT/py3/libpy3python-PyJWT-py3.global.a |35.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/scheme_types/libydb-core-scheme_types.a |35.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/executer_actor/kqp_data_executer.cpp |35.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/sys_view/pg_tables/libcore-sys_view-pg_tables.a |35.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/datastreams/libsrc-client-datastreams.a |35.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/data_events/libcore-tx-data_events.a |35.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/PyYAML/py3/libpy3python-PyYAML-py3.global.a |35.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/PyYAML/py3/libpy3python-PyYAML-py3.a |35.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/general_cache/service/libtx-general_cache-service.a |35.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/yson_helpers/libyt-lib-yson_helpers.a |35.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/tools/python3/Lib/libpy3tools-python3-Lib.global.a |35.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/security/ldap_auth_provider/libcore-security-ldap_auth_provider.a |35.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/datashard/libcore-tx-datashard.global.a |35.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/protos/libpy3library-actors-protos.global.a |35.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/http_proxy/libydb-core-http_proxy.a |35.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/impl/ydb_endpoints/libclient-impl-ydb_endpoints.a |35.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/iam_private/libsrc-client-iam_private.a |35.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/sys_view/common/libcore-sys_view-common.a |35.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/yt_download/libyt-lib-yt_download.a |35.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/limiter/grouped_memory/service/liblimiter-grouped_memory-service.a |35.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/impl/ydb_internal/kqp_session_common/libimpl-ydb_internal-kqp_session_common.a |35.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/libapi-grpc.a |35.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/impl/ydb_internal/common/libimpl-ydb_internal-common.a |35.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/sys_view/sessions/libcore-sys_view-sessions.a |35.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/locks/libcore-tx-locks.a |35.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/Pygments/py3/libpy3python-Pygments-py3.global.a |34.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/sys_view/service/libcore-sys_view-service.a |35.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/sys_view/tablets/libcore-sys_view-tablets.a |35.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/mkql_helpers/libyt-lib-mkql_helpers.a |35.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/formats/arrow/protos/libpy3library-formats-arrow-protos.global.a |35.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/impl/ydb_internal/logger/libimpl-ydb_internal-logger.a |34.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/impl/ydb_internal/db_driver_state/libimpl-ydb_internal-db_driver_state.a |34.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/impl/ydb_internal/value_helpers/libimpl-ydb_internal-value_helpers.a |34.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/data_sharing/modification/events/libdata_sharing-modification-events.a |34.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/topic/libsrc-client-topic.a |34.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/data_sharing/initiator/controller/libdata_sharing-initiator-controller.global.a |34.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/impl/ydb_internal/make_request/libimpl-ydb_internal-make_request.a |34.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/impl/ydb_internal/plain_status/libimpl-ydb_internal-plain_status.a |34.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/impl/ydb_internal/grpc_connections/libimpl-ydb_internal-grpc_connections.a |34.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/blobs_action/counters/libcolumnshard-blobs_action-counters.a |34.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/impl/ydb_internal/retry/libimpl-ydb_internal-retry.a |34.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/impl/ydb_stats/libclient-impl-ydb_stats.a |34.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/blobs_action/storages_manager/libcolumnshard-blobs_action-storages_manager.a |34.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/coordinator/libcore-tx-coordinator.a |34.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/libcore-tx-columnshard.a |34.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/blobs_action/bs/libcolumnshard-blobs_action-bs.a |34.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/impl/ydb_internal/session_pool/libimpl-ydb_internal-session_pool.a |34.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/statistics/aggregator/libcore-statistics-aggregator.a |34.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/impl/ydb_internal/thread_pool/libimpl-ydb_internal-thread_pool.a |34.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/mkql_dq/libproviders-yt-mkql_dq.a |34.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/data_locks/locks/libcolumnshard-data_locks-locks.a |34.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/mind/libydb-core-mind.a |34.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/blobs_action/libtx-columnshard-blobs_action.a |34.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/data_sharing/common/session/libdata_sharing-common-session.a |34.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/test_tablet/libydb-core-test_tablet.a |34.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/tools/python3/libcontrib-tools-python3.a |34.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/import/libsrc-client-import.a |34.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/data_accessor/in_mem/libcolumnshard-data_accessor-in_mem.global.a |34.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/persqueue_public/include/libclient-persqueue_public-include.a |34.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/proto/libproviders-yt-proto.a |34.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/params/libsrc-client-params.a |34.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/config/protos/libpy3core-config-protos.global.a |34.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/opt/libproviders-yt-opt.a |34.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/proto/libsrc-client-proto.a |34.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/libpy3libs-config-protos.global.a |34.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/schemeshard/libpy3core-protos-schemeshard.global.a |34.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kafka_proxy/libydb-core-kafka_proxy.a |34.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/operation/libsrc-client-operation.a |34.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/resources/libsrc-client-resources.a |34.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/result/libsrc-client-result.a |34.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/scheme/protos/libpy3core-scheme-protos.global.a |34.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/data_sharing/destination/transactions/libdata_sharing-destination-transactions.a |34.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/resources/libsrc-client-resources.global.a |34.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/abstract/libchanges-compaction-abstract.a |34.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/query/impl/libclient-query-impl.a |34.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/loading/libcolumnshard-engines-loading.a |34.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/plain/libchanges-compaction-plain.global.a |34.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/ss_tasks/libsrc-client-ss_tasks.a |34.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/sub_columns/libchanges-compaction-sub_columns.global.a |34.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/protos/libpy3columnshard-engines-protos.global.a |34.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/common/protos/libpy3columnshard-common-protos.global.a |34.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/libengines-changes-compaction.a |34.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/topic/common/libclient-topic-common.a |34.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/data_sharing/manager/libcolumnshard-data_sharing-manager.a |34.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/topic/codecs/libclient-topic-codecs.global.a |34.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/changes/counters/libengines-changes-counters.a |34.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/sub_columns/libchanges-compaction-sub_columns.a |34.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/table/query_stats/libclient-table-query_stats.a |34.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/types/libsrc-client-types.a |34.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/plain/libchanges-compaction-plain.a |34.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/libpy3ydb-core-protos.global.a |34.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/scheme/libsrc-client-scheme.a |34.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/types/credentials/libclient-types-credentials.a |34.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/rate_limiter/libsrc-client-rate_limiter.a |34.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/bg_tasks/abstract/libservices-bg_tasks-abstract.a |34.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/ytflow/expr_nodes/libproviders-ytflow-expr_nodes.a |34.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/provider/libproviders-yt-provider.global.a |34.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/types/exceptions/libclient-types-exceptions.a |34.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/testing/yatest_common/libpy3python-testing-yatest_common.global.a |34.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/query/libsrc-client-query.a |34.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/actualizer/tiering/libstorage-actualizer-tiering.a |34.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/types/status/libclient-types-status.a |34.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/common/libengines-reader-common.a |34.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/types/fatal_error_handlers/libclient-types-fatal_error_handlers.a |34.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/abstract/libengines-reader-abstract.a |34.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/types/credentials/login/libtypes-credentials-login.a |34.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/sys_view/abstract/libreader-sys_view-abstract.a |34.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/actor/libengines-reader-actor.a |34.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/row_spec/libyt-lib-row_spec.a |34.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/table/libsrc-client-table.a |34.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/build/libyt-yt-build.a |34.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/types/credentials/oauth2_token_exchange/libtypes-credentials-oauth2_token_exchange.a |34.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/pytest/libpy3library-python-pytest.global.a |34.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/libreader-common_reader-iterator.global.a |34.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/opt/logical/libkqp-opt-logical.a |34.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/client/arrow/fbs/libclient-arrow-fbs.a |34.8%| PREPARE $(WITH_JDK17-sbr:7832760150) |34.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/library/decimal/libsrc-library-decimal.a |34.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/value/libsrc-client-value.a |34.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/phy_opt/yql_yt_phy_opt.cpp |34.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/phy_opt/yql_yt_phy_opt_field_subset.cpp |34.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/changes/libcolumnshard-engines-changes.a |34.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_physical_optimize.cpp |34.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/phy_opt/yql_yt_phy_opt_weak_fields.cpp |34.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/portions/libcolumnshard-engines-portions.a |34.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tablet/libydb-core-tablet.a |34.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_datasource_exec.cpp |34.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/phy_opt/yql_yt_phy_opt_key_range.cpp |34.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_provider.cpp |34.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/phy_opt/yql_yt_phy_opt_lambda.cpp |34.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/testlib/libydb-core-testlib.a |34.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_wide_flow.cpp |34.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_datasink.cpp |34.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/plain_reader/iterator/libreader-plain_reader-iterator.a |34.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_cbo_helpers.cpp |34.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/persqueue_public/impl/libclient-persqueue_public-impl.a |34.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/phy_opt/yql_yt_phy_opt_content.cpp |34.0%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/yt/yt/client/libyt-yt-client.a |33.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/phy_opt/yql_yt_phy_opt_merge.cpp |33.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/phy_opt/yql_yt_phy_opt_fuse.cpp |33.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_block_output.cpp |33.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/yt/yt/core/libyt-yt-core.a |33.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/phy_opt/yql_yt_phy_opt_join.cpp |33.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/phy_opt/yql_yt_phy_opt_ytql.cpp |33.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_block_io_filter.cpp |33.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/phy_opt/yql_yt_phy_opt_map.cpp |33.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_block_io_utils.cpp |33.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_block_input.cpp |33.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_intent_determination.cpp |33.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/table/impl/libclient-table-impl.a |33.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_datasink_trackable.cpp |33.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/phy_opt/yql_yt_phy_opt_write.cpp |33.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_datasource_constraints.cpp |33.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/phy_opt/yql_yt_phy_opt_push.cpp |33.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_datasink_finalize.cpp |33.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_datasink_constraints.cpp |33.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/phy_opt/yql_yt_phy_opt_helper.cpp |33.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_forwarding_gateway.cpp |33.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/phy_opt/yql_yt_phy_opt_partition.cpp |33.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_datasource_type_ann.cpp |33.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/phy_opt/yql_yt_phy_opt_misc.cpp |33.8%| PREPARE $(JDK_DEFAULT-472926544) |33.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_dq_optimize.cpp |33.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/phy_opt/yql_yt_phy_opt_sort.cpp |33.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_gateway.cpp |33.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_datasink_exec.cpp |33.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_datasink_type_ann.cpp |33.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_dq_hybrid.cpp |33.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_epoch.cpp |34.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_peephole.cpp |34.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_join_reorder.cpp |34.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_load_table_meta.cpp |34.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_key.cpp |34.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_datasource.cpp |34.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_load_columnar_stats.cpp |34.1%| [CC] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/provider/yql_yt_op_settings.h_serialized.cpp |34.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_physical_finalizing.cpp |34.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_dq_integration.cpp |34.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_io_discovery_walk_folders.cpp |34.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_optimize.cpp |34.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_op_hash.cpp |34.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_table_desc.cpp |34.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/scheme/defaults/protos/libpy3scheme-defaults-protos.global.a |34.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/topic/impl/libclient-topic-impl.a |34.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_provider_impl.cpp |34.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_ytflow_optimize.cpp |34.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/library/operation_id/libsrc-library-operation_id.a |34.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_provider_context.cpp |34.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_op_settings.cpp |34.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/library/operation_id/protos/liblibrary-operation_id-protos.a |34.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/library/persqueue/obfuscate/libsdk-library-persqueue-obfuscate-v3.a |34.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_io_discovery.cpp |34.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_mkql_compiler.cpp |34.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_ytflow_integration.cpp |34.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/library/jwt/libsrc-library-jwt.a |34.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_horizontal_join.cpp |34.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/file_storage/defs/libcore-file_storage-defs.a |34.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/library/issue/libsrc-library-issue.a |34.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/library/uuid/libsrc-library-uuid.a |34.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/library/persqueue/topic_parser_public/libsdk-library-persqueue-topic_parser_public-v3.a |34.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/sys_view/chunks/libreader-sys_view-chunks.global.a |34.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/scheme/column/libengines-scheme-column.a |34.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/client/arrow/libyt-client-arrow.a |34.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/scheme/defaults/protos/libscheme-defaults-protos.a |34.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/library/string_utils/helpers/liblibrary-string_utils-helpers.a |34.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/transaction/libengines-reader-transaction.a |34.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_helpers.cpp |34.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/scheme/tiering/libengines-scheme-tiering.a |34.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/bg_tasks/protos/libservices-bg_tasks-protos.a |34.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/testing/filter/libpy3python-testing-filter.global.a |34.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/actualizer/abstract/libstorage-actualizer-abstract.a |34.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/pytest/plugins/libpy3python-pytest-plugins.global.a |34.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/replication/common/libtx-replication-common.a |34.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/long_tx_service/public/libtx-long_tx_service-public.a |34.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/actualizer/common/libstorage-actualizer-common.a |34.4%| PREPARE $(CLANG-1922233694) |34.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/priorities/service/libtx-priorities-service.a |34.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/auth/libydb-services-auth.a |34.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/chunks/libengines-storage-chunks.a |34.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/scheme/indexes/abstract/libscheme-indexes-abstract.a |34.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/scheme/versions/libengines-scheme-versions.a |34.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/bloom_ngramm/libstorage-indexes-bloom_ngramm.a |34.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/login/protos/libpy3library-login-protos.global.a |34.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/bits_storage/libstorage-indexes-bits_storage.a |34.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/bridge/libydb-services-bridge.a |34.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/plain_reader/constructor/libreader-plain_reader-constructor.a |34.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/bits_storage/libstorage-indexes-bits_storage.global.a |34.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/actualizer/scheme/libstorage-actualizer-scheme.a |34.1%| PREPARE $(JDK17-472926544) |34.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/testing/yatest_lib/libpy3python-testing-yatest_lib.global.a |34.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/schemeshard/common/libtx-schemeshard-common.a |34.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/ext_index/metadata/extractor/libext_index-metadata-extractor.a |34.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/string_utils/base32/libcpp-string_utils-base32.a |34.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/config/libydb-services-config.a |34.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/client/query_tracker_client/libyt-client-query_tracker_client.a |34.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/ext_index/common/libservices-ext_index-common.a |34.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/deprecated/persqueue_v0/api/grpc/libapi-grpc-persqueue-deprecated.a |34.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/library/auth/libyt-library-auth.a |34.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_table.cpp |34.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/schemeshard/olap/bg_tasks/adapter/libolap-bg_tasks-adapter.a |34.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/replication/ydb_proxy/local_proxy/libreplication-ydb_proxy-local_proxy.a |34.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/replication/service/libtx-replication-service.a |34.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/schemeshard/olap/bg_tasks/protos/libolap-bg_tasks-protos.a |34.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/replication/ydb_proxy/libtx-replication-ydb_proxy.a |34.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/ext_index/metadata/extractor/libext_index-metadata-extractor.global.a |34.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/cms/libydb-services-cms.a |34.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_join_impl.cpp |34.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/categories_bloom/libstorage-indexes-categories_bloom.global.a |34.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/portions/extractor/libindexes-portions-extractor.a |34.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/max/libstorage-indexes-max.global.a |34.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yql/providers/yt/provider/yql_yt_logical_optimize.cpp |34.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/portions/libstorage-indexes-portions.a |34.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/schemeshard/olap/column_families/libschemeshard-olap-column_families.a |34.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/core/https/libyt-core-https.a |34.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/portions/extractor/libindexes-portions-extractor.global.a |34.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/ext_index/metadata/libservices-ext_index-metadata.global.a |34.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lbuckets/planner/liboptimizer-lbuckets-planner.global.a |34.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/common/liboperations-alter-common.a |34.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/common/libalter-in_store-common.a |34.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/client/formats/libyt-client-formats.a |34.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/core/misc/isa_crc64/libisa-l_crc_yt_patch.a |34.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/liboperations-alter-in_store.a |34.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lbuckets/constructor/liboptimizer-lbuckets-constructor.global.a |34.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/ext_index/service/libservices-ext_index-service.a |34.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/chaos_lease_base.cpp |34.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/size.cpp |34.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/client_cache.cpp |34.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/chunk_client/data_statistics.cpp |34.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/ext_index/metadata/libservices-ext_index-metadata.a |34.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/file_writer.cpp |34.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/request_complexity_limits.cpp |34.8%| PREPARE $(WITH_JDK-sbr:7832760150) |34.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/delegating_transaction.cpp |34.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/serialize.cpp |34.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/roaming_channel.cpp |34.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/request_complexity_limiter.cpp |34.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/kesus/libydb-services-kesus.a |34.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/service.cpp |34.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/overload_controlling_service_base.cpp |34.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytalloc/config.cpp |34.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/operation_client.cpp |34.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/retrying_channel.cpp |34.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/libapi-protos.a |34.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/null_channel.cpp |34.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/response_keeper.cpp |35.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/net/helpers.cpp |35.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/net/local_address.cpp |35.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/service_combiner.cpp |35.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/configurable_singleton_def.cpp |35.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/keyvalue/libydb-services-keyvalue.a |35.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/client.cpp |35.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/overload_controller.cpp |35.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/checksum.cpp |35.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/codicil.cpp |35.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/config.cpp |35.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/bloom_filter.cpp |35.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/logging/random_access_gzip.cpp |35.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/cache_config.cpp |35.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/delayed_executor.cpp |35.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/async_rw_lock.cpp |35.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/thread_pool_poller.cpp |35.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/async_semaphore.cpp |35.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/throughput_throttler.cpp |35.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/async_stream_pipe.cpp |35.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/core/libyt-yt-core.global.a |35.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/async_looper.cpp |35.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/async_barrier.cpp |35.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/replication/controller/libtx-replication-controller.a |35.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/async_stream.cpp |35.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/compression/zstd.cpp |35.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/action_queue.cpp |35.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/compression/zlib.cpp |35.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/compression/public.cpp |35.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/compression/snappy.cpp |35.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/compression/bzip2.cpp |35.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/logging/config.cpp |35.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/bus/public.cpp |35.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/tablet_client/table_mount_cache.cpp |35.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/versioned_io_options.cpp |35.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/serialize.cpp |35.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/composite_compare.cpp |35.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/tablet_client/table_mount_cache_detail.cpp |35.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/columnar.cpp |35.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/columnar_statistics.cpp |35.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/column_sort_schema.cpp |35.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/fq/libydb-services-fq.a |35.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/signature/generator.cpp |35.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/datastreams/libydb-services-datastreams.a |35.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/security_client/helpers.cpp |35.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/security_client/access_control.cpp |35.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/job_tracker_client/helpers.cpp |35.2%| [UN] {default-linux-x86_64, release, asan} $(B)/yql/essentials/tests/common/test_framework/udfs_deps/common-test_framework-udfs_deps.pkg.fake |35.2%| RESOURCE $(sbr:4966407557) |35.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/hydra/version.cpp |35.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/security_client/acl.cpp |35.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/file_client/config.cpp |35.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/hive/timestamp_map.cpp |35.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/complex_types/infinite_entity.cpp |35.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/complex_types/merge_complex_types.cpp |35.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/complex_types/check_yson_token.cpp |35.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/chaos_client/helpers.cpp |35.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/chaos_client/replication_card.cpp |35.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/row_batch_writer.cpp |35.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/row_stream.cpp |35.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/public.cpp |35.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/connection.cpp |35.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/address_helpers.cpp |35.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/chaos_lease.cpp |35.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/file_reader.cpp |35.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/table_mount_cache.cpp |35.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/queue_transaction_mixin.cpp |35.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rowset.cpp |35.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/public.cpp |35.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/options.cpp |35.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/journal_client.cpp |35.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/journal_writer.cpp |35.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/journal_reader.cpp |35.3%| PREPARE $(FLAKE8_PY2-2255386470) |35.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/private.cpp |35.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/connection_impl.cpp |35.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/query_tracker_client.cpp |35.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/row_batch_reader.cpp |35.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/helpers.cpp |35.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/etc_client.cpp |35.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/helpers.cpp |35.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/dynamic_table_transaction_mixin.cpp |35.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/chaos_client/config.cpp |35.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/table_writer.cpp |35.4%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/vector/libvector_udf.so |35.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/internal_client.cpp |35.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/target_cluster_injecting_channel.cpp |35.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/shuffle_client.cpp |35.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/wire_row_stream.cpp |35.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/config.cpp |35.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/timestamp_provider.cpp |35.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/table_reader.cpp |35.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/delegating_client.cpp |35.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/distributed_table_client.cpp |35.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/table_partition_reader.cpp |35.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/transaction.cpp |35.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/security_client.cpp |35.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/election/public.cpp |35.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/skynet.cpp |35.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/chunk_client/helpers.cpp |35.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/chunk_client/public.cpp |35.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/complex_types/check_type_compatibility.cpp |35.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/chaos_client/replication_card_cache.cpp |35.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/client.cpp |35.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/transaction.cpp |35.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/cypress_client/public.cpp |35.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/bundle_controller_client/bundle_controller_client.cpp |35.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/sticky_transaction_pool.cpp |35.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/kafka/packet.cpp |35.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/table_client.cpp |35.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/chunk_client/chunk_replica.cpp |35.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/node_tracker_client/helpers.cpp |35.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/complex_types/uuid_text.cpp |35.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/persistent_queue.cpp |35.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/chunk_client/ready_event_reader_base.cpp |35.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/complex_types/time_text.cpp |35.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/journal_client/public.cpp |35.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/job_tracker_client/public.cpp |35.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/journal_client/config.cpp |35.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/client_common.cpp |35.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/distributed_table_session.cpp |35.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/misc/method_helpers.cpp |35.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/kafka/protocol.cpp |35.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/node_tracker_client/public.cpp |35.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/misc/io_tags.cpp |35.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/queue_client/common.cpp |35.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/query_client/query_statistics.cpp |35.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/misc/config.cpp |35.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/object_client/public.cpp |35.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/bundle_controller_client/bundle_controller_settings.cpp |35.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/chunk_client/read_limit.cpp |35.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/object_client/helpers.cpp |35.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/query_client/query_builder.cpp |35.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/complex_types/yson_format_conversion.cpp |35.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/config.cpp |35.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/misc/workload.cpp |35.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/scheduler/spec_patch.cpp |35.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/chaos_client/replication_card_serialization.cpp |35.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/scheduler/operation_id_or_alias.cpp |35.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/chunk_client/config.cpp |35.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/scheduler/operation_cache.cpp |35.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/queue_client/partition_reader.cpp |35.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/queue_client/queue_rowset.cpp |35.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/queue_client/config.cpp |35.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/node_tracker_client/node_directory.cpp |35.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/queue_client/helpers.cpp |35.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/security_client/public.cpp |35.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/transaction_impl.cpp |35.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/kafka/requests.cpp |35.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/queue_client/consumer_client.cpp |35.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/queue_client/producer_client.cpp |35.7%| PREPARE $(GDB) |35.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/client_base.cpp |35.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/signature/signature.cpp |35.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/signature/validator.cpp |35.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/adapters.cpp |35.7%| PREPARE $(FLAKE8_LINTER-sbr:6561765464) |35.7%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/examples/lists/liblists_udf.so |35.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/chunk_stripe_statistics.cpp |35.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/blob_reader.cpp |35.7%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/topfreq/libtopfreq_udf.so |35.7%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/test/simple/libsimple_udf.so |35.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/column_rename_descriptor.cpp |35.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/check_schema_compatibility.cpp |35.7%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/digest/libdigest_udf.so |35.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/schemaless_row_reorderer.cpp |35.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/schemaless_dynamic_table_writer.cpp |35.7%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/logs/dsv/libdsv_udf.so |35.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/pipe.cpp |35.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/key_bound.cpp |35.8%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/examples/dummylog/libdummylog.so |35.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/key_bound_compressor.cpp |35.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/comparator.cpp |35.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/schema_serialization_helpers.cpp |35.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/helpers.cpp |35.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/name_table.cpp |35.8%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/hyperloglog/libhyperloglog_udf.so |35.8%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/math/libmath_udf.so |35.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/record_helpers.cpp |35.8%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/stat/libstat_udf.so |35.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/merge_table_schemas.cpp |35.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/public.cpp |35.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/row_batch.cpp |35.8%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/examples/dicts/libdicts_udf.so |35.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/row_buffer.cpp |35.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/row_base.cpp |35.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/record_codegen_cpp.cpp |35.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/table_output.cpp |35.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/tablet_client/public.cpp |35.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/unordered_schemaful_reader.cpp |35.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/table_consumer.cpp |35.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/value_consumer.cpp |35.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/versioned_reader.cpp |35.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/tablet_client/helpers.cpp |35.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/unversioned_value.cpp |35.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/timestamped_schema_helpers.cpp |35.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/actions/invoker_util.cpp |35.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/transaction_client/helpers.cpp |35.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/ypath/parser_detail.cpp |35.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/table_upload_options.cpp |35.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/logical_type.cpp |35.9%| PREPARE $(BLACK_LINTER-sbr:8415400280) |35.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/validate_logical_type.cpp |35.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/actions/cancelation_token.cpp |35.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/versioned_row.cpp |35.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/actions/future.cpp |35.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/actions/current_invoker.cpp |35.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/transaction_client/noop_timestamp_provider.cpp |35.9%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/yson2/libyson2_udf.so |35.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/actions/invoker_detail.cpp |35.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/actions/codicil_guarded_invoker.cpp |35.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/tablet_client/config.cpp |35.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/actions/invoker_pool.cpp |35.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/bus/tcp/packet.cpp |35.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/compression/brotli.cpp |35.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/transaction_client/config.cpp |35.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/actions/cancelable_context.cpp |35.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/tablet_client/watermark_runtime_data.cpp |36.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/transaction_client/batching_timestamp_provider.cpp |36.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/bus/tcp/dispatcher.cpp |36.0%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/examples/structs/libstructs_udf.so |36.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/transaction_client/timestamp_provider_base.cpp |36.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/compression/stream.cpp |36.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/wire_protocol.cpp |36.0%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/examples/type_inspection/libtype_inspection_udf.so |36.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/schema.cpp |36.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/config.cpp |36.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/bus/tcp/local_bypass.cpp |36.0%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/examples/callables/libcallables_udf.so |36.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/compression/dictionary_codec.cpp |36.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/compression/lz.cpp |36.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/compression/lzma.cpp |36.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/unversioned_row.cpp |36.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/nonblocking_batcher.cpp |36.1%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/histogram/libhistogram_udf.so |36.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/transaction_client/remote_timestamp_provider.cpp |36.1%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/set/libset_udf.so |36.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/lease_manager.cpp |36.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/coroutine.cpp |36.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/fair_share_queue_scheduler_thread.cpp |36.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/bus/tcp/client.cpp |36.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/fair_share_invoker_queue.cpp |36.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/execution_stack.cpp |36.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/ypath/rich.cpp |36.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/fair_share_action_queue.cpp |36.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/fair_share_thread_pool.cpp |36.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/compression/codec.cpp |36.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/bus/tcp/server.cpp |36.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/thread_pool_detail.cpp |36.1%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/streaming/libstreaming_udf.so |36.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/fls.cpp |36.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/bus/tcp/dispatcher_impl.cpp |36.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/fiber.cpp |36.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/fiber_manager.cpp |36.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/fair_share_invoker_pool.cpp |36.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/retrying_periodic_executor.cpp |36.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/thread_pool.cpp |36.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/invoker_alarm.cpp |36.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/fiber_scheduler_thread.cpp |36.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/pollable_detail.cpp |36.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/periodic_yielder.cpp |36.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/invoker_queue.cpp |36.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/notify_manager.cpp |36.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/config.cpp |36.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/periodic_executor.cpp |36.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/propagating_storage.cpp |36.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/bus/tcp/connection.cpp |36.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/profiling_helpers.cpp |36.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/bus/tcp/config.cpp |36.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/thread_affinity.cpp |36.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/suspendable_action_queue.cpp |36.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/quantized_executor.cpp |36.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/fair_throttler.cpp |36.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/client_impl.cpp |36.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/scheduler_thread.cpp |36.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/scheduled_executor.cpp |36.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/system_invokers.cpp |36.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/single_queue_scheduler_thread.cpp |36.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/json/json_callbacks.cpp |36.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/json/config.cpp |36.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/dns/config.cpp |36.3%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/top/libtop_udf.so |36.3%| [ld] {default-linux-x86_64, release, asan} $(B)/tools/flake8_linter/flake8_linter |36.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/crypto/tls.cpp |36.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/crypto/config.cpp |36.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/crypto/crypto.cpp |36.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/two_level_fair_share_thread_pool.cpp |36.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/json/helpers.cpp |36.3%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/tools/astdiff/astdiff |36.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/dns/dns_resolver.cpp |36.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/logging/log_writer_detail.cpp |36.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/dns/ares_dns_resolver.cpp |36.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/logging/fluent_log.cpp |36.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/json/json_parser.cpp |36.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/json/json_writer.cpp |36.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/logging/logger_owner.cpp |36.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/logging/file_log_writer.cpp |36.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/backoff_strategy.cpp |36.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/blob_output.cpp |36.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/logging/formatter.cpp |36.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/logging/system_log_event_provider.cpp |36.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/logging/stream_output.cpp |36.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/logging/stream_log_writer.cpp |36.4%| [ld] {default-linux-x86_64, release, asan} $(B)/tools/black_linter/black_linter |36.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/logging/serializable_logger.cpp |36.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/adjusted_exponential_moving_average.cpp |36.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/bitmap.cpp |36.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/logging/zstd_compression.cpp |36.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/bit_packed_unsigned_vector.cpp |36.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/public.cpp |36.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/linear_probe.cpp |36.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/bit_packing.cpp |36.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/id_generator.cpp |36.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/pool_allocator.cpp |36.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/arithmetic_formula.cpp |36.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/fair_share_hierarchical_queue.cpp |36.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/hedging_manager.cpp |36.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/net/connection.cpp |36.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/histogram.cpp |36.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/hazard_ptr.cpp |36.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/memory_usage_tracker.cpp |36.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/coro_pipe.cpp |36.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/parser_helpers.cpp |36.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/net/config.cpp |36.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/pattern_formatter.cpp |36.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/process_exit_profiler.cpp |36.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/protobuf_helpers.cpp |36.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/relaxed_mpsc_queue.cpp |36.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/zerocopy_output_writer.cpp |36.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/digest.cpp |36.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/error.cpp |36.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/random.cpp |36.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/ref_counted_tracker_statistics_producer.cpp |36.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/ref_counted_tracker.cpp |36.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/ref_counted_tracker_profiler.cpp |36.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/serialize_dump.cpp |36.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/fs.cpp |36.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/utf8_decoder.cpp |36.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/statistic_path.cpp |36.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/serialize.cpp |36.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/shutdown.cpp |36.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/phoenix/type_def.cpp |36.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/net/public.cpp |36.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/net/socket.cpp |36.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/net/dialer.cpp |36.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/proc.cpp |36.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/channel_detail.cpp |36.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/phoenix/context.cpp |36.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/slab_allocator.cpp |36.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/phoenix/type_registry.cpp |36.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/profiling/timing.cpp |36.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/phoenix/descriptors.cpp |36.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/authentication_identity.cpp |36.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/authenticator.cpp |36.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/statistics.cpp |36.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/caching_channel_factory.cpp |36.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/dispatcher.cpp |36.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/net/listener.cpp |36.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/phoenix/load.cpp |36.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/protobuf_interop_options.cpp |36.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/phoenix/schemas.cpp |36.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/hedging_channel.cpp |36.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/message.cpp |36.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/local_channel.cpp |36.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/local_server.cpp |36.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/message_format.cpp |36.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/balancing_channel.cpp |36.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/net/address.cpp |36.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/peer_discovery.cpp |36.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/public.cpp |36.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/protocol_version.cpp |36.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/request_queue_provider.cpp |36.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ypath/token.cpp |36.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/helpers.cpp |36.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/tracing/public.cpp |36.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/per_key_request_queue_provider.cpp |36.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/serialized_channel.cpp |36.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/tracing/allocation_tags.cpp |36.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/threading/spin_wait_slow_path_logger.cpp |36.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/utilex/random.cpp |36.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/bus/server.cpp |36.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/static_channel_factory.cpp |36.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ypath/helpers.cpp |36.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ypath/stack.cpp |36.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/throttling_channel.cpp |36.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/service_discovery/service_discovery.cpp |36.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/dynamic_channel_pool.cpp |36.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/depth_limiting_yson_consumer.cpp |36.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/server_detail.cpp |36.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/bus/channel.cpp |36.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/threading/thread.cpp |36.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ypath/tokenizer.cpp |36.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/async_consumer.cpp |36.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/viable_peer_registry.cpp |36.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/attributes_stripper.cpp |36.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/attribute_consumer.cpp |36.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/consumer.cpp |36.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/forwarding_consumer.cpp |36.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/protobuf_interop.cpp |36.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/stream.cpp |36.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/async_writer.cpp |36.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/protobuf_helpers.cpp |36.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/config.cpp |36.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/list_verb_lazy_yson_consumer.cpp |36.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/lexer.cpp |36.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/service_detail.cpp |36.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/config.cpp |36.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/parser.cpp |36.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/null_consumer.cpp |36.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/producer.cpp |36.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/token.cpp |36.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/syntax_checker.cpp |36.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/string.cpp |36.9%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/file/libfile_udf.so |36.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/pull_parser_deserialize.cpp |36.9%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/hyperscan/libhyperscan_udf.so |36.9%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/python/python3_small/libpython3_udf.so |36.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/protobuf_interop_unknown_fields.cpp |36.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/stream.cpp |36.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/pull_parser.cpp |36.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/string_merger.cpp |36.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/string_filter.cpp |36.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/string_builder_stream.cpp |37.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/ypath_designated_consumer.cpp |36.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytalloc/bindings.cpp |36.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/tokenizer.cpp |37.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/token_writer.cpp |37.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/yson_builder.cpp |37.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/writer.cpp |37.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/exception_helpers.cpp |37.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/ypath_filtering_consumer.cpp |37.0%| [SB] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/postgresql/psql/psql |37.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/attributes.cpp |37.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytalloc/statistics_producer.cpp |37.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/attribute_consumer.cpp |37.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/ephemeral_attribute_owner.cpp |37.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/helpers.cpp |37.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/convert.cpp |37.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/permission.cpp |37.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/attribute_filter.cpp |37.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/node.cpp |37.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/interned_attributes.cpp |37.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/key.cpp |37.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/datashard/libcore-tx-datashard.a |37.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/yson_struct_update.cpp |37.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/ephemeral_node_factory.cpp |37.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/static_service_dispatcher.cpp |37.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/tree_visitor.cpp |37.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/logging/compression.cpp |37.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/system_attribute_provider.cpp |37.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/ypath_resolver.cpp |37.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/tree_builder.cpp |37.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/yson_struct.cpp |37.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/node_detail.cpp |37.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/virtual.cpp |37.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/lib/sharding/libservices-lib-sharding.a |37.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/transactions/locks/libcolumnshard-transactions-locks.global.a |37.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/resharding/libalter-in_store-resharding.a |37.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/schema/libalter-in_store-schema.a |37.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/yson_struct_detail.cpp |37.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/ypath_service.cpp |37.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/export/session/storage/abstract/libsession-storage-abstract.a |37.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/metadata/abstract/libservices-metadata-abstract.a |37.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/ypath_detail.cpp |37.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/standalone/liboperations-alter-standalone.a |37.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/metadata/common/libservices-metadata-common.a |37.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/export/session/libcolumnshard-export-session.a |37.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/transfer/libalter-in_store-transfer.a |37.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/export/events/libcolumnshard-export-events.a |37.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/ypath_client.cpp |37.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/schemeshard/olap/store/libschemeshard-olap-store.a |37.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/export/actor/libcolumnshard-export-actor.a |37.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/computation/llvm16/libminikql-computation-llvm16.a |37.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/export/session/selector/backup/libsession-selector-backup.global.a |37.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/sequenceshard/public/libtx-sequenceshard-public.a |37.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/constructor/selector/liblcbuckets-constructor-selector.a |37.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/metadata/libydb-services-metadata.a |37.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/metadata/request/libservices-metadata-request.a |37.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/credentials/libessentials-core-credentials.a |37.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/constructor/selector/liblcbuckets-constructor-selector.global.a |37.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/constructor/liboptimizer-lcbuckets-constructor.global.a |37.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/constructor/level/liblcbuckets-constructor-level.global.a |37.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/export/protos/libcolumnshard-export-protos.a |37.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/lib/auth/libservices-lib-auth.a |37.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/export/session/libcolumnshard-export-session.global.a |37.2%| PREPARE $(CLANG16-1380963495) |37.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/hooks/abstract/libcolumnshard-hooks-abstract.a |37.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/svn_version/libpy3library-python-svn_version.a |37.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/metadata/secret/libservices-metadata-secret.global.a |37.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/conveyor/usage/libtx-conveyor-usage.a |37.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/common/libtx-columnshard-common.a |37.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/public/udf/service/stub/libudf-service-stub.global.a |37.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/metadata/manager/libservices-metadata-manager.a |37.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/persqueue_cluster_discovery/cluster_ordering/libservices-persqueue_cluster_discovery-cluster_ordering.a |37.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/schemeshard/olap/bg_tasks/tx_chain/libolap-bg_tasks-tx_chain.a |37.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/local_discovery/libydb-services-local_discovery.a |37.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/export/session/storage/s3/libsession-storage-s3.global.a |37.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/view/libydb-services-view.a |37.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/monitoring/libydb-services-monitoring.a |37.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/metadata/ds_table/libservices-metadata-ds_table.a |37.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/reservoir_sampling/libpy3library-python-reservoir_sampling.global.a |37.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/splitter/abstract/libcolumnshard-splitter-abstract.a |37.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/maintenance/libydb-services-maintenance.a |37.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/splitter/libtx-columnshard-splitter.a |37.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/resource_subscriber/libtx-columnshard-resource_subscriber.a |37.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/transactions/operators/libcolumnshard-transactions-operators.a |37.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/attrs/py3/libpy3python-attrs-py3.global.a |37.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/normalizer/abstract/libcolumnshard-normalizer-abstract.a |37.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/schemeshard/olap/operations/libschemeshard-olap-operations.a |37.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/loading/libtx-columnshard-loading.a |37.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/tablet/libtx-columnshard-tablet.a |37.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/replication/libydb-services-replication.a |37.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/normalizer/tables/libcolumnshard-normalizer-tables.global.a |37.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/normalizer/tablet/libcolumnshard-normalizer-tablet.global.a |37.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/facade/libessentials-core-facade.a |37.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/yarl/libpy3contrib-python-yarl.a |37.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/persqueue_cluster_discovery/libydb-services-persqueue_cluster_discovery.a |37.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/normalizer/schema_version/libcolumnshard-normalizer-schema_version.global.a |37.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/strings/libpy3library-python-strings.a |37.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/aiohttp/libpy3contrib-python-aiohttp.a |37.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/multidict/libpy3contrib-python-multidict.global.a |37.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/metadata/initializer/libservices-metadata-initializer.a |37.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/tablet/libydb-services-tablet.a |37.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/packaging/py3/libpy3python-packaging-py3.global.a |37.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/metadata/secret/libservices-metadata-secret.a |37.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/rate_limiter/libydb-services-rate_limiter.a |37.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/protobuf/py3/libpy3python-protobuf-py3.global.a |37.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/arrow_kernels/registry/libcore-arrow_kernels-registry.a |37.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/cbo/libessentials-core-cbo.a |37.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/ast/serialize/libessentials-ast-serialize.a |37.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/conveyor_composite/service/libtx-conveyor_composite-service.a |37.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/library/erasure/libyt-library-erasure.a |37.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/yarl/libpy3contrib-python-yarl.global.a |36.9%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/libcore-tx-schemeshard.a |36.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/transactions/locks/libcolumnshard-transactions-locks.a |36.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/grpcio/py3/libpy3python-grpcio-py3.a |37.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/viewer/yaml/libcore-viewer-yaml.a |37.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/ast/libyql-essentials-ast.a |36.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/protobuf/py3/libpy3python-protobuf-py3.a |37.1%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/yql/essentials/minikql/comp_nodes/llvm16/libminikql-comp_nodes-llvm16.a |37.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/arrow_kernels/request/libcore-arrow_kernels-request.a |37.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/library/profiling/libyt-library-profiling.a |37.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/tools/python3/Modules/_sqlite/libpy3python3-Modules-_sqlite.a |38.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/tools/python3/lib2/py/libpy3python3-lib2-py.global.a |38.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/persqueue_v1/libydb-services-persqueue_v1.a |38.1%| [UN] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/postgresql/psql/psql |38.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/transactions/protos/libcolumnshard-transactions-protos.a |38.1%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/url_base/liburl_udf.so |38.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/wrappers/events/libcore-wrappers-events.a |38.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/backup/libydb-services-backup.a |38.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/persqueue_v1/actors/libservices-persqueue_v1-actors.a |38.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/wrappers/ut_helpers/libcore-wrappers-ut_helpers.a |38.1%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/protobuf/libprotobuf_udf.so |38.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/normalizer/portion/libcolumnshard-normalizer-portion.global.a |38.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/ymq/proto/libcore-ymq-proto.a |38.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/expr_nodes_gen/libessentials-core-expr_nodes_gen.a |38.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/library/procfs/libyt-library-procfs.a |38.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/library/numeric/libyt-library-numeric.a |38.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/ymq/base/libcore-ymq-base.a |38.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/ymq/queues/common/libymq-queues-common.a |38.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/viewer/protos/libcore-viewer-protos.a |38.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/dq_integration/libessentials-core-dq_integration.a |38.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/library/decimal/libyt-library-decimal.a |38.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/ydb_convert/libydb-core-ydb_convert.a |38.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/aclib/libydb-library-aclib.a |38.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/dnscachelib/liblibrary-actors-dnscachelib.a |38.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/dq_integration/transform/libcore-dq_integration-transform.a |38.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/transactions/operators/libcolumnshard-transactions-operators.global.a |38.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/conveyor/service/libtx-conveyor-service.a |38.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/file_storage/download/libcore-file_storage-download.a |38.1%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/unicode_base/libunicode_udf.so |38.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/log_backend/liblibrary-actors-log_backend.a |38.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/svn_version/libpy3library-python-svn_version.global.a |38.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/expr_nodes/libessentials-core-expr_nodes.a |38.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/strings/libpy3library-python-strings.global.a |38.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/helpers/liblibrary-actors-helpers.a |38.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/conveyor_composite/usage/libtx-conveyor_composite-usage.a |38.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/coordinator/public/libtx-coordinator-public.a |38.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/library/profiling/resource_tracker/liblibrary-profiling-resource_tracker.global.a |38.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/resource/libpy3library-python-resource.global.a |38.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/ytflow/integration/interface/libytflow-integration-interface.a |38.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/file_storage/http_download/proto/libfile_storage-http_download-proto.a |38.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/general_cache/source/libtx-general_cache-source.a |38.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/file_storage/http_download/libcore-file_storage-http_download.a |38.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/libydb-core-tx.a |38.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/general_cache/usage/libtx-general_cache-usage.a |38.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/library/column_converters/libyt-library-column_converters.a |38.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/histogram/libessentials-core-histogram.a |38.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/ymq/libydb-services-ymq.a |38.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/library/profiling/resource_tracker/liblibrary-profiling-resource_tracker.a |38.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/blobs_reader/libtx-columnshard-blobs_reader.a |38.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/library/re2/libyt-library-re2.a |38.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/library/signals/libyt-library-signals.a |38.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/issue/libessentials-core-issue.a |38.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/limiter/grouped_memory/usage/liblimiter-grouped_memory-usage.a |38.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/data_events/common/libtx-data_events-common.a |38.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/file_storage/proto/libcore-file_storage-proto.a |38.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/library/quantile_digest/libyt-library-quantile_digest.a |38.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/priorities/usage/libtx-priorities-usage.a |38.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/issue/protos/libcore-issue-protos.a |38.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/issue/libessentials-core-issue.global.a |38.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/library/tvm/libyt-library-tvm.a |38.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/minsketch/libessentials-core-minsketch.a |38.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/library/tracing/libyt-library-tracing.a |38.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/pg_settings/libessentials-core-pg_settings.a |38.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/langver/libessentials-core-langver.a |38.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/blobs_action/transaction/libcolumnshard-blobs_action-transaction.a |38.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/qplayer/udf_resolver/libcore-qplayer-udf_resolver.a |38.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/extract_predicate/libessentials-core-extract_predicate.a |38.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/qplayer/storage/interface/libqplayer-storage-interface.a |38.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/file_storage/libessentials-core-file_storage.a |38.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/library/tz_types/libyt-library-tz_types.a |38.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/scheme_cache/libcore-tx-scheme_cache.a |38.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/schemeshard/olap/bg_tasks/events/libolap-bg_tasks-events.a |38.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/metadata/secret/accessor/libmetadata-secret-accessor.a |38.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/schemeshard/olap/bg_tasks/transactions/libolap-bg_tasks-transactions.a |38.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/library/ytprof/api/liblibrary-ytprof-api.a |38.4%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/json2/libjson2_udf.so |38.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/library/undumpable/libyt-library-undumpable.a |38.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/schemeshard/olap/columns/libschemeshard-olap-columns.a |38.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/services/mounts/libcore-services-mounts.global.a |38.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/library/skiff_ext/libyt-library-skiff_ext.a |38.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/schemeshard/olap/common/libschemeshard-olap-common.a |38.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/library/grpc/client/libsdk-library-grpc-client-v3.a |38.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/user_data/libessentials-core-user_data.a |38.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/program/libcore-tx-program.a |38.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/abstract/liboperations-alter-abstract.a |38.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/services/mounts/libcore-services-mounts.a |38.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/config_shards/libalter-in_store-config_shards.a |38.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/schemeshard/olap/layout/libschemeshard-olap-layout.a |38.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/url_preprocessing/interface/libcore-url_preprocessing-interface.a |38.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/url_lister/interface/libcore-url_lister-interface.a |38.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/long_tx_service/libcore-tx-long_tx_service.a |38.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt_proto/yt/formats/libyt_proto-yt-formats.a |38.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/schemeshard/olap/indexes/libschemeshard-olap-indexes.a |38.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/sql_types/libessentials-core-sql_types.a |38.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/codegen/llvm16/libminikql-codegen-llvm16.a |38.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/arrow/libessentials-minikql-arrow.a |38.4%| PREPARE $(CLANG18-1866954364) |38.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/core/liblibrary-actors-core.a |38.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/lib/actors/libservices-lib-actors.a |38.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/jsonpath/parser/libminikql-jsonpath-parser.a |38.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/schemeshard/olap/options/libschemeshard-olap-options.a |38.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/schemeshard/olap/schema/libschemeshard-olap-schema.a |38.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/mediator/libcore-tx-mediator.a |38.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/datetime/libessentials-minikql-datetime.a |38.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/services/libessentials-core-services.a |38.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/schemeshard/olap/ttl/libschemeshard-olap-ttl.a |38.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt_proto/yt/core/libyt_proto-yt-core.a |38.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/pg_catalog/libessentials-parser-pg_catalog.global.a |38.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/common/antlr4/libparser-common-antlr4.a |38.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/jsonpath/rewrapper/hyperscan/libjsonpath-rewrapper-hyperscan.global.a |38.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/dom/libessentials-minikql-dom.a |38.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/jsonpath/libessentials-minikql-jsonpath.a |38.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/jsonpath/rewrapper/libminikql-jsonpath-rewrapper.a |38.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/sequenceproxy/libcore-tx-sequenceproxy.a |38.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/sequenceproxy/public/libtx-sequenceproxy-public.a |38.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/scheme_board/libcore-tx-scheme_board.a |38.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/schemeshard/operation_queue_timer.h_serialized.cpp |38.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__clean_pathes.cpp |38.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__background_cleaning.cpp |38.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_import.cpp |38.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/sequenceshard/libcore-tx-sequenceshard.a |38.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp |38.5%| PREPARE $(CLANG-874354456) |38.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__table_stats.cpp |38.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_table.cpp |38.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_move_index.cpp |38.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_sequence.cpp |38.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_replication.cpp |38.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/peephole_opt/libessentials-core-peephole_opt.a |38.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_resource_pool.cpp |38.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_pq.cpp |38.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_sysview.cpp |38.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_subdomain.cpp |38.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_solomon.cpp |38.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__background_compaction.cpp |38.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_mkdir.cpp |38.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_initiate_build_index.cpp |38.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_view.cpp |38.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__borrowed_compaction.cpp |38.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp |38.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_unsafe.cpp |38.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_just_reject.cpp |38.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__list_users.cpp |38.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__sync_update_tenants.cpp |38.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_memory_changes.cpp |38.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_move_tables.cpp |38.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_move_table_index.cpp |38.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__notify.cpp |38.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_side_effects.cpp |38.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_move_table.cpp |38.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_rmdir.cpp |38.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__state_changed_reply.cpp |38.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_continuous_backup.cpp |38.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_move_sequence.cpp |38.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_restore_backup_collection.cpp |38.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_impl.cpp |38.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/schemeshard/ut_helpers/libtx-schemeshard-ut_helpers.a |38.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__serverless_storage_billing.cpp |38.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__publish_to_scheme_board.cpp |38.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__root_data_erasure_manager.cpp |38.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_replication.cpp |38.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_backup.cpp |38.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/type_ann/libessentials-core-type_ann.a |38.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_split_merge.cpp |38.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_part.cpp |38.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_build_index__list.cpp |38.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__upgrade_access_database.cpp |38.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp |38.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__unmark_restore_tables.cpp |38.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_backup_backup_collection.cpp |38.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_bg_tasks__list.cpp |38.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_billing_helpers.cpp |38.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/computation/libessentials-minikql-computation.a |38.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_audit_log_fragment.cpp |38.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__table_stats_histogram.cpp |38.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__tenant_data_erasure_manager.cpp |38.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_build_index.cpp |38.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_build_index__forget.cpp |38.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_audit_log.cpp |38.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_upgrade_subdomain.cpp |38.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_build_index__get.cpp |38.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_common_cdc_stream.cpp |38.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_identificators.cpp |38.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__upgrade_schema.cpp |38.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_build_index__create.cpp |38.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_common_resource_pool.cpp |38.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_build_index__cancel.cpp |38.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_effective_acl.cpp |38.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_copy_sequence.cpp |38.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_build_index__progress.cpp |38.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_cdc_stream_common.cpp |38.8%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_block_agg_count.cpp |38.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_cdc_stream_scan.cpp |38.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_export.cpp |38.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_export__cancel.cpp |38.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_domain_links.cpp |38.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_export__create.cpp |38.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_export__get.cpp |38.9%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_block_just.cpp |38.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_export__forget.cpp |38.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_export_uploaders.cpp |38.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/common_opt/libessentials-core-common_opt.a |38.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_build_index_tx_base.cpp |38.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_export_flow_proposals.cpp |38.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_import__get.cpp |38.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_svp_migration.cpp |38.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_export__list.cpp |38.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_import_scheme_query_executor.cpp |38.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__init_populator.cpp |38.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_import__forget.cpp |38.9%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_check_args.cpp |38.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_import__cancel.cpp |38.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt_proto/yt/client/libyt_proto-yt-client.a |38.9%| [BI] {default-linux-x86_64, release, asan} $(B)/library/cpp/build_info/buildinfo_data.h |38.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_index.cpp |38.9%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_chain_map.cpp |38.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_import_flow_proposals.cpp |39.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_shard_deleter.cpp |39.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/schemeshard/schemeshard_info_types.h_serialized.cpp |39.0%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_collect.cpp |39.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_import__create.cpp |39.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_import__list.cpp |38.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_path_element.cpp |38.9%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_dictitems.cpp |39.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_self_pinger.cpp |39.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_validate_ttl.cpp |39.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/ydb/libydb-services-ydb.a |39.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/schemeshard/schemeshard_types.h_serialized.cpp |39.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_path.cpp |39.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_path_describer.cpp |39.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_import_getters.cpp |39.0%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_block_compress.cpp |39.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__pq_stats.cpp |39.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_types.cpp |39.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_info_types.cpp |39.0%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_block_coalesce.cpp |39.0%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_enumerate.cpp |39.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_bsv.cpp |39.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_xxport__helpers.cpp |39.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/user_attributes.cpp |39.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard_utils.cpp |39.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/library/formats/libyt-library-formats.a |39.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_lock.cpp |39.1%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_filter.cpp |39.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_kesus.cpp |39.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_solomon.cpp |39.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_pq.cpp |39.1%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_decimal_mul.cpp |39.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__data_erasure_manager.cpp |39.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard.cpp |39.1%| [CC] {default-linux-x86_64, release, asan} $(B)/library/cpp/build_info/build_info.cpp |39.1%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_grace_join.cpp |39.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__backup_collection_common.cpp |39.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__conditional_erase.cpp |39.1%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_guess.cpp |39.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__fix_bad_paths.cpp |39.1%| [CC] {default-linux-x86_64, release, asan} $(S)/library/cpp/svnversion/svn_interface.c |39.1%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_iterable.cpp |39.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__describe_scheme.cpp |39.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__delete_tablet_reply.cpp |39.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__init_root.cpp |39.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__find_subdomain_path_id.cpp |39.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__make_access_database_no_inheritable.cpp |39.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_fs.cpp |39.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__login.cpp |39.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__init_schema.cpp |39.2%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_logical.cpp |39.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_bsv.cpp |39.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_cdc_stream.cpp |39.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_external_data_source.cpp |39.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_kesus.cpp |39.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__monitoring.cpp |39.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_login.cpp |39.2%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_discard.cpp |39.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_external_table.cpp |39.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_extsubdomain.cpp |39.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_apply_build_index.cpp |39.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/libyql-essentials-core.a |39.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_resource_pool.cpp |39.2%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_multihopping.cpp |39.2%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_prepend.cpp |39.2%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_reduce.cpp |39.2%| [CC] {default-linux-x86_64, release, asan} $(S)/library/cpp/build_info/build_info_static.cpp |39.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_sequence.cpp |39.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp |39.2%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_reverse.cpp >> conftest.py::black [GOOD] |39.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_backup_incremental_backup_collection.cpp >> test_clickhouse.py::black [GOOD] >> test_greenplum.py::black [GOOD] >> test_join.py::black [GOOD] >> test_mysql.py::black [GOOD] >> test_postgresql.py::black [GOOD] >> test_ydb.py::black [GOOD] >> conftest.py::black [GOOD] >> test_join.py::black [GOOD] |39.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp |39.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_cancel_tx.cpp |39.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_cansel_build_index.cpp |39.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_common_bsv.cpp |39.2%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_map_join.cpp |39.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_assign_bsv.cpp |39.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_common.cpp |39.3%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_apply.cpp |39.3%| [CC] {default-linux-x86_64, release, asan} $(S)/library/cpp/svnversion/svnversion.cpp |39.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_common_external_data_source.cpp |39.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_common_external_table.cpp |39.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_blob_depot.cpp |39.3%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_append.cpp |39.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_consistent_copy_tables.cpp |39.3%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_aggrcount.cpp |39.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_common_subdomain.cpp |39.3%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_time_order_recover.cpp |39.3%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_sort.cpp |39.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_common_pq.cpp |39.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_build_index.cpp >> test.py::py2_flake8 [GOOD] |39.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_copy_table.cpp |39.3%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_block_agg_factory.cpp |39.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_change_path_state.cpp |39.3%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_block_exists.cpp |39.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation.cpp |39.4%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_decimal_div.cpp |39.4%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_block_container.cpp |39.4%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_visitall.cpp >> test.py::py2_flake8 [GOOD] |39.4%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_block_skiptake.cpp >> test.py::py2_flake8 [GOOD] |39.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_backup.cpp |39.4%| [CC] {default-linux-x86_64, release, asan} $(B)/library/cpp/build_info/sandbox.cpp |39.4%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_block_decimal.cpp |39.4%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_block_func.cpp |39.4%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_wide_top_sort.cpp |39.4%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_block_top.cpp >> test.py::flake8 [GOOD] >> test_update_script_tables.py::flake8 [GOOD] >> test_workload.py::flake8 [GOOD] >> test.py::py2_flake8 [GOOD] >> test.py::py2_flake8 [GOOD] |39.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__init.cpp |39.4%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_block_agg_some.cpp |39.4%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_block_getelem.cpp |39.4%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_wide_combine.cpp |39.4%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_decimal_mod.cpp |39.4%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_block_agg_sum.cpp |39.4%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_block_if.cpp |39.4%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_block_logical.cpp |39.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp |39.4%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_coalesce.cpp |39.4%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_todict.cpp |39.4%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_block_agg_minmax.cpp >> gen-report.py::flake8 [GOOD] |39.4%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_chain1_map.cpp |39.4%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_callable.cpp |39.5%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_block_map_join.cpp |39.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_indexed_table.cpp |39.5%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_chopper.cpp >> test.py::flake8 [GOOD] >> test_actorsystem.py::flake8 [GOOD] |39.5%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_contains.cpp |39.5%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_condense.cpp |39.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_index.cpp >> test.py::flake8 [GOOD] >> test_liveness_wardens.py::flake8 [GOOD] >> test.py::flake8 [GOOD] >> base.py::flake8 [GOOD] |39.5%| [TS] {asan, default-linux-x86_64, release} ydb/tests/fq/generic/analytics/black >> test_ydb.py::black [GOOD] |39.5%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_condense1.cpp |39.5%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_length.cpp |39.5%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_source.cpp |39.5%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_blocks.cpp |39.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_pq.cpp |39.5%| [TS] {asan, default-linux-x86_64, release} ydb/tests/fq/generic/streaming/black >> test_join.py::black [GOOD] |39.5%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_fromstring.cpp >> test_tpch_import.py::flake8 [GOOD] >> test_pdisk_format_info.py::flake8 [GOOD] >> test_disk.py::flake8 [GOOD] >> test_replication.py::flake8 [GOOD] >> test_self_heal.py::flake8 [GOOD] >> test_tablet_channel_migration.py::flake8 [GOOD] >> overlapping_portions.py::flake8 [GOOD] >> test_tablet.py::flake8 [GOOD] >> test_clickbench.py::flake8 [GOOD] >> test_workload.py::flake8 [GOOD] >> test_external.py::flake8 [GOOD] >> test.py::flake8 [GOOD] >> test_import_csv.py::flake8 [GOOD] >> test_tpcds.py::flake8 [GOOD] >> test_tpch.py::flake8 [GOOD] >> test_upload.py::flake8 [GOOD] >> test_workload_oltp.py::flake8 [GOOD] >> test_workload_simple_queue.py::flake8 [GOOD] >> test_schemeshard_limits.py::flake8 [GOOD] |39.5%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_frombytes.cpp |39.5%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_combine.cpp |39.5%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_element.cpp |39.5%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_ensure.cpp >> test_select.py::flake8 [GOOD] >> test_tpcds.py::flake8 [GOOD] >> common.py::flake8 [GOOD] >> test_tpch_spilling.py::flake8 [GOOD] >> conftest.py::flake8 [GOOD] >> test_rename.py::flake8 [GOOD] >> test_workload.py::flake8 [GOOD] >> helpers.py::flake8 [GOOD] >> test_base.py::flake8 [GOOD] >> test_query.py::flake8 [GOOD] >> test_s3.py::flake8 [GOOD] >> test_secondary_index.py::flake8 [GOOD] |39.5%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_dynamic_variant.cpp |39.5%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_exists.cpp |39.5%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_extend.cpp |39.5%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_fold.cpp |39.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_subdomain.cpp |39.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_db_changes.cpp |39.6%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_fromyson.cpp |39.6%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/tests/sql/dq_file/part9/py2_flake8 >> test.py::py2_flake8 [GOOD] |39.6%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_fold1.cpp >> test.py::flake8 [GOOD] >> test.py::flake8 [GOOD] >> tpc_tests.py::flake8 [GOOD] >> test_large_import.py::flake8 [GOOD] |39.5%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/tests/sql/dq_file/part2/py2_flake8 >> test.py::py2_flake8 [GOOD] |39.5%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_flow.cpp |39.6%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_factory.cpp |39.6%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_if.cpp |39.6%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_pickle.cpp |39.6%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_hasitems.cpp |39.6%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_group.cpp |39.6%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_lazy_list.cpp |39.6%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_listfromrange.cpp |39.6%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_heap.cpp |39.6%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_null.cpp |39.6%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_iterator.cpp >> test_dml.py::flake8 [GOOD] >> runner.py::flake8 [GOOD] >> test_ttl.py::flake8 [GOOD] >> test_cp_ic.py::flake8 [GOOD] >> test_dispatch.py::flake8 [GOOD] >> tablet_scheme_tests.py::flake8 [GOOD] >> test_retry.py::flake8 [GOOD] >> test_retry_high_rate.py::flake8 [GOOD] >> test_async_replication.py::flake8 [GOOD] >> test_vector_index.py::flake8 [GOOD] >> test_vector_index_large_levels_and_clusters.py::flake8 [GOOD] >> conftest.py::flake8 [GOOD] >> test_multinode_cluster.py::flake8 [GOOD] >> test_auditlog.py::flake8 [GOOD] |39.6%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_ifpresent.cpp |39.6%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_grace_join_imp.cpp |39.6%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_match_recognize_list.cpp |39.6%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_hopping.cpp |39.6%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_join_dict.cpp |39.6%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_flatmap.cpp |39.6%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_skip.cpp |39.6%| [TS] {asan, default-linux-x86_64, release} ydb/tests/fq/yt/kqp_yt_file/part0/flake8 >> test.py::flake8 [GOOD] |39.6%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_now.cpp |39.6%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_mapnext.cpp |39.6%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_lookup.cpp |39.7%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_match_recognize_measure_arg.cpp |39.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/ymq/actor/libcore-ymq-actor.a |39.7%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_next_value.cpp |39.7%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/tests/sql/hybrid_file/part10/py2_flake8 >> test.py::py2_flake8 [GOOD] |39.7%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_map.cpp |39.7%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_match_recognize_rows_formatter.cpp |39.7%| [TS] {asan, default-linux-x86_64, release} ydb/tests/functional/script_execution/flake8 >> test_update_script_tables.py::flake8 [GOOD] |39.7%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/supp/ydb_supp >> test_recompiles_requests.py::flake8 [GOOD] >> collection.py::flake8 [GOOD] >> conftest.py::flake8 [GOOD] >> select_datetime.py::flake8 [GOOD] >> select_positive.py::flake8 [GOOD] >> test.py::flake8 [GOOD] |39.7%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_rh_hash.cpp |39.7%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_multimap.cpp |39.7%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_match_recognize.cpp |39.7%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_size.cpp |39.7%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_random.cpp |39.7%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/tests/sql/dq_file/part8/py2_flake8 >> test.py::py2_flake8 [GOOD] |39.7%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_queue.cpp |39.7%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_removemember.cpp |39.7%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_replicate.cpp |39.7%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_range.cpp |39.8%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/tests/sql/hybrid_file/part5/py2_flake8 >> test.py::py2_flake8 [GOOD] |39.8%| [TS] {asan, default-linux-x86_64, release} ydb/tests/stress/olap_workload/tests/flake8 >> test_workload.py::flake8 [GOOD] >> run_tests.py::flake8 [GOOD] |39.7%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_seq.cpp |39.7%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_nop.cpp |39.8%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_safe_circular_buffer.cpp |39.8%| [TS] {asan, default-linux-x86_64, release} ydb/library/benchmarks/runner/result_convert/flake8 >> gen-report.py::flake8 [GOOD] |39.8%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_round.cpp |39.8%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_varitem.cpp |39.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_index.cpp |39.8%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_squeeze_to_list.cpp >> __main__.py::flake8 [GOOD] >> parser.py::flake8 [GOOD] >> collection.py::flake8 [GOOD] >> conftest.py::flake8 [GOOD] >> select_datetime.py::flake8 [GOOD] >> select_positive.py::flake8 [GOOD] >> test.py::flake8 [GOOD] >> test_vector_index.py::flake8 [GOOD] >> test_vector_index_negative.py::flake8 [GOOD] |39.8%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_timezone.cpp |39.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_cdc_stream.cpp |39.8%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_squeeze_state.cpp |39.8%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_scalar_apply.cpp |39.8%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_take.cpp |39.8%| [TS] {asan, default-linux-x86_64, release} ydb/tests/fq/yt/kqp_yt_file/part14/flake8 >> test.py::flake8 [GOOD] |39.8%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_switch.cpp |39.8%| [TS] {asan, default-linux-x86_64, release} ydb/tests/functional/autoconfig/flake8 >> test_actorsystem.py::flake8 [GOOD] |39.8%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_tobytes.cpp |39.8%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_unwrap.cpp |39.8%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_udf.cpp |39.8%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_tostring.cpp |39.8%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_toindexdict.cpp |39.8%| [TS] {asan, default-linux-x86_64, release} ydb/tests/fq/yt/kqp_yt_file/part5/flake8 >> test.py::flake8 [GOOD] |39.8%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_tooptional.cpp |39.9%| [TS] {asan, default-linux-x86_64, release} ydb/tests/functional/wardens/flake8 >> test_liveness_wardens.py::flake8 [GOOD] |39.9%| [TS] {asan, default-linux-x86_64, release} ydb/tests/olap/s3_import/flake8 >> test_tpch_import.py::flake8 [GOOD] |39.9%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_way.cpp |39.9%| [TS] {asan, default-linux-x86_64, release} ydb/tests/fq/yt/kqp_yt_file/part17/flake8 >> test.py::flake8 [GOOD] |39.9%| [TS] {asan, default-linux-x86_64, release} ydb/tests/fq/yt/kqp_yt_file/part19/flake8 >> test.py::flake8 [GOOD] |39.9%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_join.cpp |39.9%| [TS] {asan, default-linux-x86_64, release} ydb/tests/olap/oom/flake8 >> overlapping_portions.py::flake8 [GOOD] |39.9%| [TS] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/flake8 >> test_tablet_channel_migration.py::flake8 [GOOD] |39.9%| [TS] {asan, default-linux-x86_64, release} ydb/tests/datashard/select/flake8 >> test_select.py::flake8 [GOOD] |39.9%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_block_agg.cpp |39.9%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_wide_chain_map.cpp |39.9%| [TS] {asan, default-linux-x86_64, release} ydb/tests/olap/load/flake8 >> test_workload_simple_queue.py::flake8 [GOOD] |39.9%| [TS] {asan, default-linux-x86_64, release} ydb/tests/stress/log/tests/flake8 >> test_workload.py::flake8 [GOOD] |39.9%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_weakmember.cpp |39.9%| [TS] {asan, default-linux-x86_64, release} ydb/tests/functional/rename/flake8 >> test_rename.py::flake8 [GOOD] |39.9%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_wide_filter.cpp |39.9%| [TS] {asan, default-linux-x86_64, release} ydb/tests/tools/nemesis/ut/flake8 >> test_tablet.py::flake8 [GOOD] |39.9%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_wide_map.cpp |40.0%| [TS] {asan, default-linux-x86_64, release} ydb/tests/functional/limits/flake8 >> test_schemeshard_limits.py::flake8 [GOOD] |40.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/invoke_builtins/llvm16/libminikql-invoke_builtins-llvm16.a |40.0%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_wide_chopper.cpp |40.0%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_wide_condense.cpp |40.0%| [TS] {asan, default-linux-x86_64, release} ydb/tests/functional/tpc/large/flake8 >> test_tpch_spilling.py::flake8 [GOOD] |40.0%| [TS] {asan, default-linux-x86_64, release} ydb/tests/stress/node_broker/tests/flake8 >> test_workload.py::flake8 [GOOD] |40.0%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_while.cpp |40.0%| [TS] {asan, default-linux-x86_64, release} ydb/tests/fq/yt/kqp_yt_file/part4/flake8 >> test.py::flake8 [GOOD] |40.0%| [TS] {asan, default-linux-x86_64, release} ydb/tests/olap/s3_import/large/flake8 >> test_large_import.py::flake8 [GOOD] |40.0%| [TS] {asan, default-linux-x86_64, release} ydb/tests/sql/lib/flake8 >> test_s3.py::flake8 [GOOD] |40.0%| [TS] {asan, default-linux-x86_64, release} ydb/tests/fq/yt/kqp_yt_file/part18/flake8 >> test.py::flake8 [GOOD] |40.0%| [TS] {asan, default-linux-x86_64, release} ydb/tests/datashard/secondary_index/flake8 >> test_secondary_index.py::flake8 [GOOD] |40.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_continuous_backup.cpp |40.0%| [TS] {asan, default-linux-x86_64, release} ydb/tests/datashard/dml/flake8 >> test_dml.py::flake8 [GOOD] |40.0%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_addmember.cpp |40.0%| [TS] {asan, default-linux-x86_64, release} ydb/tests/functional/scheme_tests/flake8 >> tablet_scheme_tests.py::flake8 [GOOD] |40.0%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_zip.cpp |40.0%| [TS] {asan, default-linux-x86_64, release} ydb/tests/fq/multi_plane/flake8 >> test_retry_high_rate.py::flake8 [GOOD] |40.1%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_invoke.cpp |40.1%| [TS] {asan, default-linux-x86_64, release} ydb/library/benchmarks/runner/flake8 >> tpc_tests.py::flake8 [GOOD] |40.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_backup_collection.cpp |40.1%| [CC] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/mkql_withcontext.cpp |40.1%| [TS] {asan, default-linux-x86_64, release} ydb/tests/datashard/async_replication/flake8 >> test_async_replication.py::flake8 [GOOD] |40.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_fs.cpp >> test.py::py2_flake8 [GOOD] >> allure_utils.py::flake8 [GOOD] >> remote_execution.py::flake8 [GOOD] >> results_processor.py::flake8 [GOOD] >> utils.py::flake8 [GOOD] >> ydb_cli.py::flake8 [GOOD] >> ydb_cluster.py::flake8 [GOOD] |40.1%| [TS] {asan, default-linux-x86_64, release} ydb/tests/datashard/vector_index/large/flake8 >> test_vector_index_large_levels_and_clusters.py::flake8 [GOOD] |40.1%| [TS] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/flake8 >> test_auditlog.py::flake8 [GOOD] |40.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_solomon.cpp |40.1%| [TS] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/multinode/flake8 >> test_recompiles_requests.py::flake8 [GOOD] |40.1%| [TS] {asan, default-linux-x86_64, release} ydb/library/benchmarks/runner/runner/flake8 >> runner.py::flake8 [GOOD] |40.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_lock.cpp |40.1%| [TS] {asan, default-linux-x86_64, release} ydb/tests/datashard/ttl/flake8 >> test_ttl.py::flake8 [GOOD] |40.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_fs.cpp >> conftest.py::flake8 [GOOD] >> test_auth_system_views.py::flake8 [GOOD] >> test_create_users.py::flake8 [GOOD] >> test_create_users_strict_acl_checks.py::flake8 [GOOD] >> test_db_counters.py::flake8 [GOOD] >> test_dynamic_tenants.py::flake8 [GOOD] >> test_publish_into_schemeboard_with_common_ssring.py::flake8 [GOOD] >> test_storage_config.py::flake8 [GOOD] >> test_system_views.py::flake8 [GOOD] >> test_tenants.py::flake8 [GOOD] >> test_user_administration.py::flake8 [GOOD] >> test_users_groups_with_acl.py::flake8 [GOOD] |40.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_kesus.cpp |40.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_external_table.cpp |40.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp |40.1%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/providers/generic/connector/tests/datasource/clickhouse/flake8 >> test.py::flake8 [GOOD] |40.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_indexed_table.cpp |40.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp |40.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_sequence.cpp |40.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_replication.cpp |40.2%| [TS] {asan, default-linux-x86_64, release} ydb/library/benchmarks/runner/run_tests/flake8 >> run_tests.py::flake8 [GOOD] |40.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_restore.cpp |40.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_rtmr.cpp |40.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_extsubdomain.cpp |40.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_cdc_stream.cpp |40.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_restore_incremental_backup.cpp |40.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_sysview.cpp |40.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_view.cpp |40.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_continuous_backup.cpp |40.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/util/liblibrary-actors-util.a |40.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_backup_collection.cpp |40.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_bsv.cpp |40.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_external_data_source.cpp |40.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/tiering/abstract/libtx-tiering-abstract.a |40.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp |40.2%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/providers/generic/connector/tests/datasource/mysql/flake8 >> test.py::flake8 [GOOD] |40.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/schemeshard__operation_drop_external_table.cpp |40.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/accessor/libydb-library-accessor.a |40.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/tracing/usage/libtx-tracing-usage.a |40.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/multidict/libpy3contrib-python-multidict.a |40.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/ymq/queues/std/libymq-queues-std.a |40.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/tiering/tier/libtx-tiering-tier.a |40.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/tracing/service/libtx-tracing-service.a |40.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/protobuf/builtin_proto/protos_from_protoc/libpy3protobuf-builtin_proto-protos_from_protoc.global.a |40.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/tx_allocator_client/libcore-tx-tx_allocator_client.a |40.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/lexer_common/libessentials-parser-lexer_common.a |40.3%| [TS] {asan, default-linux-x86_64, release} ydb/tests/datashard/vector_index/medium/flake8 >> test_vector_index_negative.py::flake8 [GOOD] |40.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/time_cast/libcore-tx-time_cast.a |40.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/protobuf/builtin_proto/protos_from_protobuf/libpy3protobuf-builtin_proto-protos_from_protobuf.global.a |40.3%| [TS] {asan, default-linux-x86_64, release} ydb/tests/olap/docs/generator/flake8 >> parser.py::flake8 [GOOD] |40.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/aiosignal/libpy3contrib-python-aiosignal.global.a |40.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/pg_catalog/proto/libparser-pg_catalog-proto.a |40.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/proto_ast/antlr3/libparser-proto_ast-antlr3.a |40.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/grpcio/py3/libpy3python-grpcio-py3.global.a |40.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/frozenlist/libpy3contrib-python-frozenlist.global.a |40.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/frozenlist/libpy3contrib-python-frozenlist.a |40.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/idna/py3/libpy3python-idna-py3.global.a >> test.py::py2_flake8 [GOOD] |40.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/viewer/json/libcore-viewer-json.a |40.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/config/libproviders-common-config.a |40.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/six/py3/libpy3python-six-py3.global.a |40.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/control_plane_proxy/ut/ydb-core-fq-libs-control_plane_proxy-ut |40.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/providers/dq/provider/ut/ydb-library-yql-providers-dq-provider-ut |40.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/proto_ast/gen/jsonpath/libproto_ast-gen-jsonpath.a |40.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_sequence/ydb-core-tx-datashard-ut_sequence |40.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/pg_catalog/libessentials-parser-pg_catalog.a >> test.py::flake8 [GOOD] >> test.py::py2_flake8 [GOOD] |40.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_compaction/ydb-core-tx-datashard-ut_compaction |40.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/public_http/ut/ydb-core-public_http-ut |40.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/codec/arrow/libcommon-codec-arrow.a |40.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/protos/libyql-essentials-protos.a >> test_ttl.py::flake8 [GOOD] >> test.py::py2_flake8 [GOOD] |40.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/activation/libproviders-common-activation.a |40.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/ydb/py3/libpy3python-ydb-py3.global.a |40.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/postgresql/ydb-tests-functional-postgresql |40.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/ymq/queues/fifo/libymq-queues-fifo.a |40.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/util/actorsys_test/libcore-util-actorsys_test.a |40.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/util/libydb-core-util.a |40.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_reassign/ydb-core-tx-datashard-ut_reassign >> test.py::flake8 [GOOD] >> test_cms_erasure.py::flake8 [GOOD] >> test_cms_restart.py::flake8 [GOOD] >> test_cms_state_storage.py::flake8 [GOOD] >> utils.py::flake8 [GOOD] >> test.py::py2_flake8 [GOOD] |40.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/util/evlog/libcore-util-evlog.a |40.4%| [TS] {asan, default-linux-x86_64, release} ydb/tests/olap/lib/flake8 >> ydb_cluster.py::flake8 [GOOD] |40.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/ymq/actor/cloud_events/libymq-actor-cloud_events.a |40.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/libyql-essentials-minikql.a |40.4%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/tests/sql/hybrid_file/part4/py2_flake8 >> test.py::py2_flake8 [GOOD] >> test.py::py2_flake8 [GOOD] |40.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/actor_type/liblibrary-actors-actor_type.a |40.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/gateway/libproviders-common-gateway.a |40.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/prof/liblibrary-actors-prof.a |40.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/gateways_utils/libproviders-common-gateways_utils.a |40.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/ymq/http/libcore-ymq-http.a |40.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/aclib/protos/liblibrary-aclib-protos.a >> base.py::flake8 [GOOD] >> data_correctness.py::flake8 [GOOD] >> data_migration_when_alter_ttl.py::flake8 [GOOD] >> tier_delete.py::flake8 [GOOD] >> ttl_delete_s3.py::flake8 [GOOD] >> ttl_unavailable_s3.py::flake8 [GOOD] >> unstable_connection.py::flake8 [GOOD] >> test.py::py2_flake8 [GOOD] >> kikimr_config.py::flake8 [GOOD] >> collection.py::flake8 [GOOD] >> conftest.py::flake8 [GOOD] >> scenario.py::flake8 [GOOD] >> test.py::flake8 [GOOD] >> test_case.py::flake8 [GOOD] |40.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/sqlite3/libcontrib-libs-sqlite3.a |40.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/codec/libproviders-common-codec.a |40.5%| [TS] {asan, default-linux-x86_64, release} ydb/tests/functional/tenants/flake8 >> test_users_groups_with_acl.py::flake8 [GOOD] |40.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/core/harmonizer/libactors-core-harmonizer.a |40.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/dnsresolver/liblibrary-actors-dnsresolver.a |40.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/wrappers/libydb-core-wrappers.a |40.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/jsonpath/rewrapper/re2/libjsonpath-rewrapper-re2.global.a >> conftest.py::flake8 [GOOD] >> test_2_selects_limit.py::flake8 [GOOD] >> test_3_selects.py::flake8 [GOOD] >> test_bad_syntax.py::flake8 [GOOD] >> test_base.py::flake8 [GOOD] >> test_big_state.py::flake8 [GOOD] >> test_continue_mode.py::flake8 [GOOD] >> test_cpu_quota.py::flake8 [GOOD] >> test_delete_read_rules_after_abort_by_system.py::flake8 [GOOD] >> test_disposition.py::flake8 [GOOD] >> test_eval.py::flake8 [GOOD] >> test_invalid_consumer.py::flake8 [GOOD] >> test_kill_pq_bill.py::flake8 [GOOD] >> test_mem_alloc.py::flake8 [GOOD] >> test_metrics_cleanup.py::flake8 [GOOD] >> test.py::py2_flake8 [GOOD] >> test_pq_read_write.py::flake8 [GOOD] >> test_public_metrics.py::flake8 [GOOD] >> test_read_rules_deletion.py::flake8 [GOOD] >> test_recovery.py::flake8 [GOOD] >> test_recovery_match_recognize.py::flake8 [GOOD] >> test_recovery_mz.py::flake8 [GOOD] >> test_restart_query.py::flake8 [GOOD] >> test_row_dispatcher.py::flake8 [GOOD] >> test_select_1.py::flake8 [GOOD] >> test_select_limit.py::flake8 [GOOD] >> test_select_limit_db_id.py::flake8 [GOOD] >> test_select_timings.py::flake8 [GOOD] >> test_stop.py::flake8 [GOOD] >> test_yds_bindings.py::flake8 [GOOD] >> test_yq_streaming.py::flake8 [GOOD] >> test.py::flake8 [GOOD] |40.5%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/streaming_optimize/ydb-tests-fq-streaming_optimize |40.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/comp_nodes/libproviders-common-comp_nodes.a |40.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/interconnect/mock/libactors-interconnect-mock.a |40.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/memory_log/liblibrary-actors-memory_log.a |40.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/protos/liblibrary-actors-protos.a >> test.py::flake8 [GOOD] >> test_postgres.py::flake8 [GOOD] >> test.py::py2_flake8 [GOOD] >> conftest.py::flake8 [GOOD] >> test_serverless.py::flake8 [GOOD] >> conftest.py::flake8 [GOOD] >> docker_wrapper_test.py::flake8 [GOOD] >> hive_matchers.py::flake8 [GOOD] >> test_create_tablets.py::flake8 [GOOD] >> test_drain.py::flake8 [GOOD] >> test_kill_tablets.py::flake8 [GOOD] |40.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/common/libessentials-parser-common.a |40.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/tx_allocator/libcore-tx-tx_allocator.a |40.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/proto_ast/gen/v0/libproto_ast-gen-v0.a |40.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/tiering/libcore-tx-tiering.a |40.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/proto_ast/antlr4/libparser-proto_ast-antlr4.a |40.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/apps/ydbd/ydbd |40.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/keyvalue/ut/ydb-services-keyvalue-ut |40.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/arrow_resolve/libproviders-common-arrow_resolve.a |40.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/query_cache/ydb-tests-functional-query_cache >> conftest.py::flake8 [GOOD] >> test_ydb_backup.py::flake8 [GOOD] >> test_ydb_flame_graph.py::flake8 [GOOD] >> test_ydb_impex.py::flake8 [GOOD] >> test_ydb_recursive_remove.py::flake8 [GOOD] >> test_ydb_scheme.py::flake8 [GOOD] >> test_ydb_scripting.py::flake8 [GOOD] >> test_ydb_sql.py::flake8 [GOOD] >> test_ydb_table.py::flake8 [GOOD] >> conftest.py::flake8 [GOOD] >> helpers.py::flake8 [GOOD] >> test_ctas.py::flake8 [GOOD] >> test_yt_reading.py::flake8 [GOOD] |40.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/dq/libproviders-common-dq.a |40.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/http/liblibrary-actors-http.a |40.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/jsonpath/rewrapper/proto/libjsonpath-rewrapper-proto.a |40.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/metrics/libproviders-common-metrics.a |40.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/sharding/libcore-tx-sharding.global.a |40.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/lua/libcontrib-libs-lua.a >> __main__.py::flake8 [GOOD] >> test_kqprun_recipe.py::flake8 [GOOD] >> test_mixed.py::flake8 [GOOD] >> test.py::py2_flake8 [GOOD] >> test_parametrized_queries.py::flake8 [GOOD] >> reconfig_state_storage_workload_test.py::flake8 [GOOD] >> test.py::flake8 [GOOD] >> test_board_workload.py::flake8 [GOOD] |40.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/time_cast/ut/ydb-core-tx-time_cast-ut >> test_scheme_board_workload.py::flake8 [GOOD] |40.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/int128/liblibrary-cpp-int128.a >> test_state_storage_workload.py::flake8 [GOOD] |40.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/mkql/libproviders-common-mkql.a >> conftest.py::flake8 [GOOD] |40.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/jsonschema/py3/libpy3python-jsonschema-py3.global.a >> test_join.py::flake8 [GOOD] |40.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/ymq/ut/ydb-core-ymq-ut |40.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/tools/pq_read/test/ydb-tests-tools-pq_read-test |40.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/metrics/protos/libcommon-metrics-protos.a |40.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/runtime_py3/libpy3library-python-runtime_py3.global.a |40.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/testing/common/libcpp-testing-common.a |40.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/lwtrace/mon/analytics/liblwtrace-mon-analytics.a |40.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/aws/aws-c-event-stream/librestricted-aws-aws-c-event-stream.a |40.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/pg_wrapper/interface/libparser-pg_wrapper-interface.a >> test_query_cache.py::flake8 [GOOD] >> test_quota_exhaustion.py::flake8 [GOOD] >> conftest.py::flake8 [GOOD] >> test_copy_table.py::flake8 [GOOD] >> test_alter_compression.py::flake8 [GOOD] >> test_alter_tiering.py::flake8 [GOOD] >> test_insert.py::flake8 [GOOD] >> test_read_update_write_load.py::flake8 [GOOD] >> test_scheme_load.py::flake8 [GOOD] >> test_simple.py::flake8 [GOOD] >> conftest.py::flake8 [GOOD] >> test_stats_mode.py::flake8 [GOOD] >> test_partitioning.py::flake8 [GOOD] |40.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/sharding/libcore-tx-sharding.a |40.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/blockcodecs/codecs/zlib/libblockcodecs-codecs-zlib.global.a |40.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/streams/lzma/libcpp-streams-lzma.a |40.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/oauthlib/libpy3contrib-python-oauthlib.global.a |40.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/boost/atomic/librestricted-boost-atomic.a |40.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/sqs/merge_split_common_table/std/functional-sqs-merge_split_common_table-std |40.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_followers/ydb-core-tx-datashard-ut_followers |40.6%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/tests/sql/solomon/py2_flake8 >> test.py::py2_flake8 [GOOD] |40.6%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/tests/sql/dq_file/part5/py2_flake8 >> test.py::py2_flake8 [GOOD] |40.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/jaraco.text/libpy3contrib-python-jaraco.text.global.a |40.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/jaraco.functools/py3/libpy3python-jaraco.functools-py3.global.a |40.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/jedi/py3/libpy3python-jedi-py3.global.a |40.7%| [TS] {asan, default-linux-x86_64, release} ydb/tests/fq/yt/kqp_yt_file/part10/flake8 >> test.py::flake8 [GOOD] >> test_workload.py::flake8 [GOOD] >> test_crud.py::flake8 [GOOD] >> test_inserts.py::flake8 [GOOD] >> test_kv.py::flake8 [GOOD] >> alter_compression.py::flake8 [GOOD] >> base.py::flake8 [GOOD] >> __main__.py::flake8 [GOOD] >> collection.py::flake8 [GOOD] >> conftest.py::flake8 [GOOD] >> select_datetime.py::flake8 [GOOD] >> select_positive.py::flake8 [GOOD] >> select_positive_with_schema.py::flake8 [GOOD] >> test.py::flake8 [GOOD] >> test.py::flake8 [GOOD] |40.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/protobuf/json/libcpp-protobuf-json.a |40.8%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/tests/sql/dq_file/part16/py2_flake8 >> test.py::py2_flake8 [GOOD] |40.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/parso/py3/libpy3python-parso-py3.global.a >> test_s3.py::flake8 [GOOD] >> test_quoting.py::flake8 [GOOD] |40.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/simdjson/libcontrib-libs-simdjson.a |40.8%| [TS] {asan, default-linux-x86_64, release} ydb/tests/functional/ttl/flake8 >> test_ttl.py::flake8 [GOOD] |40.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/proto_ast/gen/v1/libproto_ast-gen-v1.a |40.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/tx_proxy/libcore-tx-tx_proxy.a |40.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/proto/libproviders-common-proto.a |40.8%| [TS] {asan, default-linux-x86_64, release} ydb/tests/fq/yt/kqp_yt_file/part6/flake8 >> test.py::flake8 [GOOD] |40.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/aws/aws-c-auth/librestricted-aws-aws-c-auth.a |40.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/streams/zc_memory_input/libcpp-streams-zc_memory_input.a |40.8%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/tests/sql/hybrid_file/part7/py2_flake8 >> test.py::py2_flake8 [GOOD] >> test_workload.py::flake8 [GOOD] >> test_workload.py::flake8 [GOOD] |40.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/proto_ast/gen/v1_ansi_antlr4/libproto_ast-gen-v1_ansi_antlr4.a |40.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/interconnect/liblibrary-actors-interconnect.a |40.8%| [TS] {asan, default-linux-x86_64, release} ydb/tests/functional/cms/flake8 >> utils.py::flake8 [GOOD] |40.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/zstd06/libcontrib-libs-zstd06.a |40.8%| [TS] {asan, default-linux-x86_64, release} ydb/tests/olap/ttl_tiering/flake8 >> unstable_connection.py::flake8 [GOOD] |40.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/postgresql/objcopy_816e2dba53f55d924139cdb3c5.o |40.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/import_tracing/constructor/libpy3python-import_tracing-constructor.global.a |40.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/quota_manager/ut_helpers/liblibs-quota_manager-ut_helpers.a |40.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/providers/dq/provider/yql_dq_provider_ut.cpp |40.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/proto_ast/gen/v1_antlr4/libproto_ast-gen-v1_antlr4.a |40.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/nghttp3/libcontrib-libs-nghttp3.a |40.9%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/tests/sql/dq_file/part12/py2_flake8 >> test.py::py2_flake8 [GOOD] |40.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/proto_ast/gen/v0_proto_split/libproto_ast-gen-v0_proto_split.a |40.8%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/tests/sql/dq_file/part17/py2_flake8 >> test.py::py2_flake8 [GOOD] |40.9%| [TS] {asan, default-linux-x86_64, release} ydb/tests/library/ut/flake8 >> kikimr_config.py::flake8 [GOOD] |40.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/prompt-toolkit/py3/libpy3python-prompt-toolkit-py3.global.a |40.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/import_tracing/lib/libpy3python-import_tracing-lib.global.a |40.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/boost/random/librestricted-boost-random.a |40.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/typing-extensions/py3/libpy3python-typing-extensions-py3.global.a |40.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/proto_ast/gen/v1_ansi/libproto_ast-gen-v1_ansi.a |40.9%| [TS] {asan, default-linux-x86_64, release} ydb/tests/fq/yds/flake8 >> test_yq_streaming.py::flake8 [GOOD] |40.9%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/tests/sql/hybrid_file/part0/py2_flake8 >> test.py::py2_flake8 [GOOD] |40.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/postgresql/common/libpy3functional-postgresql-common.global.a |40.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/postgresql/objcopy_b9aaa278b10ed44e5645b3ef2f.o |40.9%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/providers/generic/connector/tests/join/flake8 >> test_case.py::flake8 [GOOD] |40.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/public_http/http_router_ut.cpp |40.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/viewer/libydb-core-viewer.global.a |40.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/monlib/libpy3library-python-monlib.a |40.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/ut_sequence/datashard_ut_sequence.cpp |40.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/postgresql/objcopy_e4166f3d104a6751b45e7e712f.o |40.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_compaction.cpp |40.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/kubernetes/libpy3contrib-python-kubernetes.global.a |40.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/aws/s2n/librestricted-aws-s2n.a |41.0%| [TS] {asan, default-linux-x86_64, release} ydb/tests/fq/yt/kqp_yt_file/part1/flake8 >> test.py::flake8 [GOOD] |41.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/runtime_py3/libpy3library-python-runtime_py3.a |40.9%| [TS] {asan, default-linux-x86_64, release} ydb/tests/fq/yt/kqp_yt_file/part3/flake8 >> test.py::flake8 [GOOD] |41.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_reassign.cpp |41.0%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/tests/sql/dq_file/part11/py2_flake8 >> test.py::py2_flake8 [GOOD] |41.0%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/test/test_import/libtest_import_udf.so |41.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/symbols/registry/libpython-symbols-registry.a |41.0%| [TS] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/flake8 >> test_kill_tablets.py::flake8 [GOOD] |41.0%| [TS] {asan, default-linux-x86_64, release} ydb/tests/functional/serverless/flake8 >> test_serverless.py::flake8 [GOOD] |41.0%| [TS] {asan, default-linux-x86_64, release} ydb/tests/functional/postgresql/flake8 >> test_postgres.py::flake8 [GOOD] |41.0%| [TS] {asan, default-linux-x86_64, release} ydb/tests/functional/ydb_cli/flake8 >> test_ydb_table.py::flake8 [GOOD] |41.0%| [TS] {asan, default-linux-x86_64, release} ydb/tests/postgres_integrations/go-libpq/flake8 >> docker_wrapper_test.py::flake8 [GOOD] |41.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/containers/absl_flat_hash/libcpp-containers-absl_flat_hash.a |41.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/requests-oauthlib/libpy3contrib-python-requests-oauthlib.global.a |41.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/symbols/python/libpy3cpython-symbols-python.global.a >> test_commit.py::flake8 [GOOD] |41.0%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/tests/sql/dq_file/part18/py2_flake8 >> test.py::py2_flake8 [GOOD] |41.0%| [TS] {asan, default-linux-x86_64, release} ydb/tests/fq/yt/kqp_yt_import/flake8 >> test_yt_reading.py::flake8 [GOOD] |41.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/messagebus/protobuf/libmessagebus_protobuf.a |41.0%| [TS] {asan, default-linux-x86_64, release} ydb/tests/tools/kqprun/recipe/flake8 >> __main__.py::flake8 [GOOD] |41.1%| [TS] {asan, default-linux-x86_64, release} ydb/tests/datashard/parametrized_queries/flake8 >> test_parametrized_queries.py::flake8 [GOOD] |41.1%| [TS] {asan, default-linux-x86_64, release} ydb/tests/fq/yt/kqp_yt_file/part8/flake8 >> test.py::flake8 [GOOD] |41.1%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/tests/integration/sessions_pool/public-sdk-cpp-tests-integration-sessions_pool |41.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_data_cleanup.cpp |41.1%| [TS] {asan, default-linux-x86_64, release} ydb/tests/stress/reconfig_state_storage_workload/tests/flake8 >> test_state_storage_workload.py::flake8 [GOOD] |41.1%| [TS] {asan, default-linux-x86_64, release} ydb/tests/stress/mixedpy/flake8 >> test_mixed.py::flake8 [GOOD] |41.1%| [TS] {asan, default-linux-x86_64, release} ydb/tests/tools/kqprun/tests/flake8 >> test_kqprun_recipe.py::flake8 [GOOD] |41.1%| [TS] {asan, default-linux-x86_64, release} ydb/tests/fq/generic/streaming/flake8 >> test_join.py::flake8 [GOOD] |41.1%| [TS] {asan, default-linux-x86_64, release} ydb/tests/datashard/copy_table/flake8 >> test_copy_table.py::flake8 [GOOD] >> test_timeout.py::flake8 [GOOD] |41.1%| [TS] {asan, default-linux-x86_64, release} ydb/tests/fq/plans/flake8 >> test_stats_mode.py::flake8 [GOOD] |41.1%| [TS] {asan, default-linux-x86_64, release} ydb/tests/functional/query_cache/flake8 >> test_query_cache.py::flake8 [GOOD] |41.1%| [TS] {asan, default-linux-x86_64, release} ydb/tests/olap/data_quotas/flake8 >> test_quota_exhaustion.py::flake8 [GOOD] |41.1%| [TS] {asan, default-linux-x86_64, release} ydb/tests/sql/flake8 >> test_kv.py::flake8 [GOOD] |41.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/control_plane_proxy/ut/control_plane_proxy_ut.cpp |41.1%| [TS] {asan, default-linux-x86_64, release} ydb/tests/olap/scenario/flake8 >> test_simple.py::flake8 [GOOD] |41.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/aws/aws-c-mqtt/librestricted-aws-aws-c-mqtt.a |41.1%| [TS] {asan, default-linux-x86_64, release} ydb/tests/library/compatibility/binaries/downloader/flake8 >> __main__.py::flake8 [GOOD] |41.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/transfer/tests/objcopy_60a4829fdc305e3a74a7ddcb41.o |41.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/t1ha/libcontrib-libs-t1ha.a |41.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/requests/py3/libpy3python-requests-py3.global.a |41.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/poco/Util/liblibs-poco-Util.a |41.2%| [TS] {asan, default-linux-x86_64, release} ydb/tests/fq/yt/kqp_yt_file/part15/flake8 >> test.py::flake8 [GOOD] |41.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/aws/aws-c-io/librestricted-aws-aws-c-io.a |41.2%| [TS] {asan, default-linux-x86_64, release} ydb/tests/datashard/partitioning/flake8 >> test_partitioning.py::flake8 [GOOD] |41.2%| [TS] {asan, default-linux-x86_64, release} ydb/tests/stress/cdc/tests/flake8 >> test_workload.py::flake8 [GOOD] |41.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/boto3/py3/libpy3python-boto3-py3.global.a |41.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/rsa/py3/libpy3python-rsa-py3.global.a >> column_table_helper.py::flake8 [GOOD] >> range_allocator.py::flake8 [GOOD] >> s3_client.py::flake8 [GOOD] >> thread_helper.py::flake8 [GOOD] >> time_histogram.py::flake8 [GOOD] >> utils.py::flake8 [GOOD] >> ydb_client.py::flake8 [GOOD] |41.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/symbols/libc/libpython-symbols-libc.global.a |41.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/TargetParser/libllvm16-lib-TargetParser.a |41.2%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/providers/generic/connector/tests/datasource/postgresql/flake8 >> test.py::flake8 [GOOD] |41.2%| [TS] {asan, default-linux-x86_64, release} ydb/tests/olap/column_family/compression/flake8 >> base.py::flake8 [GOOD] |41.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Transforms/Coroutines/liblib-Transforms-Coroutines.a |41.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/histogram/adaptive/protos/libhistogram-adaptive-protos.a |41.2%| [TS] {asan, default-linux-x86_64, release} ydb/tests/datashard/s3/flake8 >> test_s3.py::flake8 [GOOD] |41.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/apps/ydbd/main.cpp |41.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/json/libjson_udf.global.a |41.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/breakpad/src/client/linux/libsrc-client-linux.a |41.2%| [TS] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/flake8 >> test_quoting.py::flake8 [GOOD] |41.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/sasl/libcontrib-libs-sasl.a |41.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/url_base/lib/libcommon-url_base-lib.a >> test.py::py2_flake8 [GOOD] >> test_restarts.py::flake8 [GOOD] |41.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/stat/static/libcommon-stat-static.a |41.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/digest/sfh/libcpp-digest-sfh.a |41.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/breakpad/libydb-library-breakpad.global.a |41.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/udfs/common/roaring/libroaring.global.a |41.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/croaring/libcontrib-libs-croaring.a |41.3%| [TS] {asan, default-linux-x86_64, release} ydb/tests/stress/show_create/view/tests/flake8 >> test_workload.py::flake8 [GOOD] |41.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/udfs/common/knn/libknn_udf.global.a |41.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/containers/top_keeper/libcpp-containers-top_keeper.a |41.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/udfs/common/datetime/libdatetime_udf.global.a |41.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Transforms/Vectorize/liblib-Transforms-Vectorize.a |41.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/hyperloglog/liblibrary-cpp-hyperloglog.a |41.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/unicode/punycode/libcpp-unicode-punycode.a >> test_fifo_messaging.py::flake8 [GOOD] >> test_generic_messaging.py::flake8 [GOOD] >> test_polling.py::flake8 [GOOD] |41.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/topfreq/libtopfreq_udf.global.a |41.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/tld/liblibrary-cpp-tld.a |41.3%| [TS] {asan, default-linux-x86_64, release} ydb/tests/stress/s3_backups/tests/flake8 >> test_workload.py::flake8 [GOOD] |41.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/timezone_conversion/liblibrary-cpp-timezone_conversion.a |41.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/streams/xz/libcpp-streams-xz.a |41.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/ip_base/lib/libcommon-ip_base-lib.a |41.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/keys/libydb-library-keys.a |41.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/apps/ydbd/export.cpp |41.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/histogram/adaptive/libcpp-histogram-adaptive.a |41.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/keyvalue/grpc_service_ut.cpp |41.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/jmespath/py3/libpy3python-jmespath-py3.global.a |41.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/s3transfer/py3/libpy3python-s3transfer-py3.global.a |41.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/federated_query/ut/ydb-core-kqp-federated_query-ut |41.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/xmltodict/py3/libpy3python-xmltodict-py3.global.a |41.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/regex/pire/libcpp-regex-pire.a |41.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/time_cast/time_cast_ut.cpp >> test.py::py2_flake8 [GOOD] >> test.py::py2_flake8 [GOOD] |41.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/topfreq/static/libcommon-topfreq-static.a |41.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/pq_read/test/objcopy_9818d2b70aad7db98a0f9c044c.o |41.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/datastreams_helpers/libpy3tests-tools-datastreams_helpers.global.a |41.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/pq_read/test/objcopy_0035b673555f394234ae284e25.o |41.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/digest/libdigest_udf.global.a |41.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/checkpointing/ut/ydb-core-fq-libs-checkpointing-ut |41.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/fq_runner/libpy3tests-tools-fq_runner.global.a |41.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ymq/ut/queue_id_ut.cpp >> test.py::flake8 [GOOD] |41.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/merge_split_common_table/std/objcopy_242486256e1af973cd1d5376d1.o |41.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/compress_base/lib/libcommon-compress_base-lib.a |41.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/library/sqs/libpy3tests-library-sqs.global.a |41.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/merge_split_common_table/std/objcopy_5d73baff4bb68923ddbe5f4fcd.o |41.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/merge_split_common_table/libpy3functional-sqs-merge_split_common_table.global.a |41.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/merge_split_common_table/std/objcopy_2efdf95387a81f55cf9c81071a.o |41.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/client/ydb_topic/include/libclient-ydb_topic-include.a |41.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/viewer/libydb-core-viewer.a >> test.py::flake8 [GOOD] |41.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/pq_read/test/objcopy_45b6981aed17dda33d43217f52.o |41.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/dq/actors/compute/ut/ydb-library-yql-dq-actors-compute-ut |41.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/compress_base/libcompress_udf.global.a |41.4%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/tests/integration/sessions/ydb-public-sdk-cpp-tests-integration-sessions |41.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/providers/generic/provider/ut/pushdown/yql-providers-generic-provider-ut-pushdown |41.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/log/tests/ydb-tests-stress-log-tests |41.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/hyperscan/libhyperscan_udf.global.a |41.4%| [TS] {asan, default-linux-x86_64, release} ydb/tests/tools/pq_read/test/flake8 >> test_timeout.py::flake8 [GOOD] >> test.py::py2_flake8 [GOOD] |41.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/breakpad/src/liblibs-breakpad-src.a |41.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/hyperloglog/libhyperloglog_udf.global.a |41.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/ip_base/libip_udf.global.a |41.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/node_broker/tests/ydb-tests-stress-node_broker-tests |41.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/re2/libre2_udf.global.a |41.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/pire/libpire_udf.global.a |41.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/retry/libpy3library-python-retry.global.a >> test.py::flake8 [GOOD] >> test_bridge.py::flake8 [GOOD] >> test.py::py2_flake8 [GOOD] |41.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/histogram/libhistogram_udf.global.a |41.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/stat/libstat_udf.global.a |41.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/json2/libjson2_udf.global.a |41.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/query_cache/objcopy_f8b2cbafb1fed0e25bf9683c2d.o |41.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/query_cache/objcopy_e31620202d3ba8df14ff2a18e1.o |41.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/query_cache/objcopy_388aef0b6ac03d4f661ae7a30e.o |41.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/logs/dsv/libdsv_udf.global.a |41.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/set/libset_udf.global.a |41.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/db_id_async_resolver_impl/ut/ydb-core-fq-libs-db_id_async_resolver_impl-ut |41.5%| [TS] {asan, default-linux-x86_64, release} ydb/tests/olap/common/flake8 >> ydb_client.py::flake8 [GOOD] |41.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_followers.cpp |41.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/gateway/ut/ydb-core-kqp-gateway-ut |41.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/encryption/ydb-tests-functional-encryption |41.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/cms/ut_sentinel_unstable/ydb-core-cms-ut_sentinel_unstable |41.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ymq/ut/params_ut.cpp |41.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/pure-eval/libpy3contrib-python-pure-eval.global.a |41.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/aws/aws-checksums/librestricted-aws-aws-checksums.a |41.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/urllib3/py3/libpy3python-urllib3-py3.global.a >> conftest.py::flake8 [GOOD] >> test_insert_restarts.py::flake8 [GOOD] >> test.py::py2_flake8 [GOOD] >> test_workload.py::flake8 [GOOD] >> test_workload.py::flake8 [GOOD] |41.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/containers/atomizer/libcpp-containers-atomizer.a |41.6%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/tests/sql/dq_file/part1/py2_flake8 >> test.py::py2_flake8 [GOOD] |41.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_background_compaction/ydb-core-tx-datashard-ut_background_compaction |41.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/datetime2/libdatetime2_udf.global.a |41.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/yson2/libyson2_udf.global.a |41.6%| [TS] {asan, default-linux-x86_64, release} ydb/tests/functional/restarts/flake8 >> test_restarts.py::flake8 [GOOD] |41.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/nayuki_md5/libcontrib-libs-nayuki_md5.a |41.6%| [TS] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/flake8 >> test_polling.py::flake8 [GOOD] >> collection.py::flake8 [GOOD] >> conftest.py::flake8 [GOOD] >> select_datetime.py::flake8 [GOOD] >> select_positive.py::flake8 [GOOD] >> test.py::flake8 [GOOD] |41.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/ruamel.yaml/py3/libpy3python-ruamel.yaml-py3.global.a |41.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/py/py3/libpy3python-py-py3.global.a |41.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/url_base/liburl_udf.global.a |41.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/aws/aws-c-sdkutils/librestricted-aws-aws-c-sdkutils.a |41.6%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/tests/sql/hybrid_file/part8/py2_flake8 >> test.py::py2_flake8 [GOOD] |41.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/stack-data/libpy3contrib-python-stack-data.global.a |41.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/histogram/hdr/libcpp-histogram-hdr.a |41.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Transforms/ObjCARC/liblib-Transforms-ObjCARC.a >> test.py::py2_flake8 [GOOD] >> test.py::py2_flake8 [GOOD] >> test.py::py2_flake8 [GOOD] |41.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/tenacity/py3/libpy3python-tenacity-py3.global.a |41.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_range_ops/ydb-core-tx-datashard-ut_range_ops |41.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/pyasn1-modules/py3/libpy3python-pyasn1-modules-py3.global.a |41.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/config/ut/ydb-services-config-ut |41.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/monlib/encode/buffered/libmonlib-encode-buffered.a |41.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/utf8proc/libcontrib-libs-utf8proc.a |41.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/traitlets/py3/libpy3python-traitlets-py3.global.a >> test.py::flake8 [GOOD] >> test.py::flake8 [GOOD] >> test_log_scenario.py::flake8 [GOOD] >> upgrade_to_internal_path_id.py::flake8 [GOOD] >> zip_bomb.py::flake8 [GOOD] >> test.py::py2_flake8 [GOOD] >> test.py::py2_flake8 [GOOD] |41.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Target/X86/TargetInfo/libTarget-X86-TargetInfo.a |41.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/pyrsistent/py3/libpy3python-pyrsistent-py3.global.a |41.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/googletest/googlemock/librestricted-googletest-googlemock.a |41.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/pycparser/py3/libpy3python-pycparser-py3.global.a |41.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/mime/types/libcpp-mime-types.a |41.7%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/tests/sql/dq_file/part15/py2_flake8 >> test.py::py2_flake8 [GOOD] |41.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/draft/libpy3api-grpc-draft.global.a |41.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/archive/liblibrary-cpp-archive.a |41.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/olap/high_load/ydb-tests-olap-high_load >> test_leader_start_inflight.py::flake8 [GOOD] >> test.py::flake8 [GOOD] >> test.py::py2_flake8 [GOOD] >> test_workload.py::flake8 [GOOD] |41.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/python-dateutil/py3/libpy3python-python-dateutil-py3.global.a |41.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/protobuf/libcontrib-libs-protobuf.global.a |41.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/aws/aws-crt-cpp/librestricted-aws-aws-crt-cpp.a |41.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/highwayhash/arch/sse41/libhighwayhash-arch-sse41.a |41.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/top/libtop_udf.global.a |41.7%| [TS] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/merge_split_common_table/std/flake8 >> test.py::flake8 [GOOD] >> test.py::flake8 [GOOD] >> conftest.py::flake8 [GOOD] >> test_unknown_data_source.py::flake8 [GOOD] >> test.py::flake8 [GOOD] |41.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/pytest/py3/libpy3python-pytest-py3.global.a |41.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/aws/aws-c-http/librestricted-aws-aws-c-http.a |41.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/annotations/libpy3api-protos-annotations.global.a >> collection.py::flake8 [GOOD] >> conftest.py::flake8 [GOOD] >> select_datetime_with_service_name.py::flake8 [GOOD] >> select_positive_with_service_name.py::flake8 [GOOD] >> test.py::flake8 [GOOD] >> test_base.py::flake8 [GOOD] >> test_http_api.py::flake8 [GOOD] >> test_workload.py::flake8 [GOOD] |41.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/openldap/libcontrib-libs-openldap.a |41.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/boost/serialization/librestricted-boost-serialization.a |41.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/transfer/objcopy_b632f28ee823f938d14c0e85f9.o |41.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/blockcodecs/codecs/snappy/libblockcodecs-codecs-snappy.global.a |41.8%| [TS] {asan, default-linux-x86_64, release} ydb/tests/fq/yt/kqp_yt_file/part7/flake8 >> test.py::flake8 [GOOD] |41.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/googletest/googletest/librestricted-googletest-googletest.a |41.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_init/ydb-core-tx-datashard-ut_init >> test.py::flake8 [GOOD] >> test_bulkupserts_tpch.py::flake8 [GOOD] >> test_insert_delete_duplicate_records.py::flake8 [GOOD] >> test_insertinto_selectfrom.py::flake8 [GOOD] >> test_tiering.py::flake8 [GOOD] >> test_workload_manager.py::flake8 [GOOD] >> test_split_merge.py::flake8 [GOOD] |41.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/openldap/libraries/liblber/libopenldap-libraries-liblber.a |41.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/federated_query/kqp_federated_query_helpers_ut.cpp |41.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/botocore/py3/libpy3python-botocore-py3.global.a >> test_dump_restore.py::flake8 [GOOD] >> test.py::py2_flake8 [GOOD] |41.8%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/tests/sql/dq_file/part3/py2_flake8 >> test.py::py2_flake8 [GOOD] |41.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_blob_depot/ydb-core-blobstorage-ut_blobstorage-ut_blob_depot |41.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/charset-normalizer/libpy3contrib-python-charset-normalizer.global.a |41.8%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/tests/sql/dq_file/part13/py2_flake8 >> test.py::py2_flake8 [GOOD] |41.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Transforms/Instrumentation/liblib-Transforms-Instrumentation.a |41.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/google-auth/py3/libpy3python-google-auth-py3.global.a |41.8%| [TS] {asan, default-linux-x86_64, release} ydb/tests/fq/yt/kqp_yt_file/part16/flake8 >> test.py::flake8 [GOOD] |41.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/compproto/liblibrary-cpp-compproto.a |41.8%| [TS] {asan, default-linux-x86_64, release} ydb/tests/functional/bridge/flake8 >> test_bridge.py::flake8 [GOOD] >> collection.py::flake8 [GOOD] >> conftest.py::flake8 [GOOD] >> select_positive.py::flake8 [GOOD] >> test.py::flake8 [GOOD] >> test_alloc_default.py::flake8 [GOOD] >> test_dc_local.py::flake8 [GOOD] >> test_result_limits.py::flake8 [GOOD] >> test_scheduling.py::flake8 [GOOD] >> conftest.py::flake8 [GOOD] >> test_clickhouse.py::flake8 [GOOD] >> test_greenplum.py::flake8 [GOOD] >> test_join.py::flake8 [GOOD] >> test_mysql.py::flake8 [GOOD] >> test_postgresql.py::flake8 [GOOD] >> test_ydb.py::flake8 [GOOD] >> test_cte.py::flake8 [GOOD] >> test_config_migration.py::flake8 [GOOD] |41.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/highwayhash/libcontrib-libs-highwayhash.a >> test_config_with_metadata.py::flake8 [GOOD] |41.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/providers/solomon/actors/ut/ydb-library-yql-providers-solomon-actors-ut >> test_configuration_version.py::flake8 [GOOD] |41.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/wheel/libpy3contrib-python-wheel.global.a >> test_distconf.py::flake8 [GOOD] |41.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/ruamel.yaml.clib/py3/libpy3python-ruamel.yaml.clib-py3.global.a >> test_generate_dynamic_config.py::flake8 [GOOD] |41.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/executing/libpy3contrib-python-executing.global.a |41.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/fq/common/ydb-tests-fq-common >> test_account_actions.py::flake8 [GOOD] >> test_acl.py::flake8 [GOOD] >> test_counters.py::flake8 [GOOD] >> test_format_without_version.py::flake8 [GOOD] >> test_garbage_collection.py::flake8 [GOOD] >> test_multiplexing_tables_format.py::flake8 [GOOD] >> test_ping.py::flake8 [GOOD] >> test_queue_attributes_validation.py::flake8 [GOOD] >> test_queue_counters.py::flake8 [GOOD] >> test_queue_tags.py::flake8 [GOOD] >> test_queues_managing.py::flake8 [GOOD] >> test_throttling.py::flake8 [GOOD] >> test_encryption.py::flake8 [GOOD] |41.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/checkpointing/ut/checkpoint_coordinator_ut.cpp |41.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/yaml-cpp/libcontrib-libs-yaml-cpp.a |41.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/log/tests/objcopy_a926d3332cb769ac3e6c9e6e37.o |41.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/tools/ydb_recipe/ydb_recipe |41.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/dq/actors/compute/ut/dq_compute_actor_ut.cpp |41.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/pcre/pcre32/liblibs-pcre-pcre32.a |41.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/providers/generic/actors/ut/ydb-library-yql-providers-generic-actors-ut |41.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/olap_workload/olap_workload |41.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/cryptography/py3/libpy3python-cryptography-py3.global.a |41.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/db_id_async_resolver_impl/ut/mdb_endpoint_generator_ut.cpp |41.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/computation/no_llvm/libminikql-computation-no_llvm.a |41.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/log/tests/objcopy_854d6cc7a0cc5cdd793cfc1e6d.o |41.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/setuptools/py3/libpy3python-setuptools-py3.global.a |41.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/boost/locale/librestricted-boost-locale.a >> test.py::py2_flake8 [GOOD] >> test.py::flake8 [GOOD] |41.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/sequenceproxy/ut/ydb-core-tx-sequenceproxy-ut |41.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/log/tests/objcopy_2f7ac0f750374152d13c6bfbcf.o |41.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/dq/actors/compute/ut/dq_async_compute_actor_ut.cpp |41.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/node_broker/tests/objcopy_953328e5c3275a286b65dc3b1d.o |41.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/dq/actors/compute/ut/dq_compute_issues_buffer_ut.cpp |41.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/dq/actors/compute/ut/dq_compute_actor_async_input_helper_ut.cpp |41.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/dq/actors/compute/ut/dq_source_watermark_tracker_ut.cpp |42.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/node_broker/tests/objcopy_d2d4e3343da9b011ee6a983244.o >> test.py::py2_flake8 [GOOD] >> test.py::py2_flake8 [GOOD] |41.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/node_broker/tests/objcopy_d0e1cde98d2ab34e72d18aae9c.o |41.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/common/libpy3tests-stress-common.global.a |42.0%| [TS] {asan, default-linux-x86_64, release} ydb/tests/fq/restarts/flake8 >> test_insert_restarts.py::flake8 [GOOD] |42.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/node_broker/workload/libpy3stress-node_broker-workload.global.a |42.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/tools/kqprun/tests/ydb-tests-tools-kqprun-tests |42.0%| [TS] {asan, default-linux-x86_64, release} ydb/tests/stress/transfer/tests/flake8 >> test_workload.py::flake8 [GOOD] |42.0%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/providers/generic/connector/tests/datasource/ms_sql_server/flake8 >> test.py::flake8 [GOOD] |42.0%| [TS] {asan, default-linux-x86_64, release} ydb/tests/stress/simple_queue/tests/flake8 >> test_workload.py::flake8 [GOOD] |42.0%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/tests/sql/hybrid_file/part1/py2_flake8 >> test.py::py2_flake8 [GOOD] >> test.py::py2_flake8 [GOOD] |42.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/decorator/py3/libpy3python-decorator-py3.global.a |42.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/providers/generic/provider/ut/pushdown/pushdown_ut.cpp |42.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/cryptography/py3/libpy3python-cryptography-py3.a |42.0%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/tests/sql/dq_file/part7/py2_flake8 >> test.py::py2_flake8 [GOOD] |42.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/boost/context/fcontext_impl/libboost-context-fcontext_impl.a |42.0%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/tests/sql/dq_file/part0/py2_flake8 >> test.py::py2_flake8 [GOOD] |42.0%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/tests/sql/hybrid_file/part9/py2_flake8 >> test.py::py2_flake8 [GOOD] |42.0%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/tests/sql/dq_file/part19/py2_flake8 >> test.py::py2_flake8 [GOOD] |42.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/testing/gtest/libcpp-testing-gtest.a |42.0%| [TS] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/merge_split_common_table/fifo/flake8 >> test.py::flake8 [GOOD] |42.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/oltp_workload/tests/ydb-tests-stress-oltp_workload-tests |42.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/testing/gtest_main/libcpp-testing-gtest_main.a |42.0%| [TS] {asan, default-linux-x86_64, release} ydb/tests/fq/yt/kqp_yt_file/part12/flake8 >> test.py::flake8 [GOOD] |42.0%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/tests/sql/hybrid_file/part2/py2_flake8 >> test.py::py2_flake8 [GOOD] |42.1%| [TS] {asan, default-linux-x86_64, release} ydb/tests/olap/flake8 >> zip_bomb.py::flake8 [GOOD] |42.1%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/tests/sql/dq_file/part6/py2_flake8 >> test.py::py2_flake8 [GOOD] |42.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/encryption/objcopy_93dc3386250916dfae1ecb9b13.o |42.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/gateway/ut/metadata_conversion.cpp >> test.py::flake8 [GOOD] |42.1%| [TS] {asan, default-linux-x86_64, release} ydb/tests/fq/common/flake8 >> test_unknown_data_source.py::flake8 [GOOD] |42.1%| [TS] {asan, default-linux-x86_64, release} ydb/tests/fq/yt/kqp_yt_file/part11/flake8 >> test.py::flake8 [GOOD] |42.1%| [TS] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/large/flake8 >> test_leader_start_inflight.py::flake8 [GOOD] |42.1%| [TS] {asan, default-linux-x86_64, release} ydb/tests/stress/oltp_workload/tests/flake8 >> test_workload.py::flake8 [GOOD] |42.1%| [TS] {asan, default-linux-x86_64, release} ydb/tests/fq/solomon/flake8 >> test.py::flake8 [GOOD] |42.1%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/providers/generic/connector/tests/datasource/oracle/flake8 >> test.py::flake8 [GOOD] |42.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/graph/ut/ydb-core-graph-ut >> test_batch_operations.py::flake8 [GOOD] >> test_compatibility.py::flake8 [GOOD] >> test_data_type.py::flake8 [GOOD] >> test_example.py::flake8 [GOOD] >> test_export_s3.py::flake8 [GOOD] >> test_followers.py::flake8 [GOOD] >> test_rolling.py::flake8 [GOOD] >> test_statistics.py::flake8 [GOOD] >> test_stress.py::flake8 [GOOD] >> test_vector_index.py::flake8 [GOOD] |42.1%| [TS] {asan, default-linux-x86_64, release} ydb/tests/fq/yt/kqp_yt_file/part13/flake8 >> test.py::flake8 [GOOD] |42.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/encryption/objcopy_3d6916930a438b51675ef6dda7.o |42.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/encryption/objcopy_64cecb639c5f85fbf868097a08.o |42.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/cms/cms_ut_common.cpp |42.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/cms/sentinel_ut_unstable.cpp |42.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Transforms/IPO/liblib-Transforms-IPO.a |42.2%| [TS] {asan, default-linux-x86_64, release} ydb/tests/fq/http_api/flake8 >> test_http_api.py::flake8 [GOOD] |42.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/monlib/messagebus/libcpp-monlib-messagebus.a |42.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_background_compaction.cpp |42.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/transfer/libpy3transfer.global.a >> udf/test_datetime2.py::flake8 [GOOD] >> udf/test_digest.py::flake8 [GOOD] >> udf/test_digest_regression.py::flake8 [GOOD] >> test_common.py::flake8 [GOOD] >> test_yandex_audit.py::flake8 [GOOD] >> test_yandex_cloud_mode.py::flake8 [GOOD] >> test_yandex_cloud_queue_counters.py::flake8 [GOOD] |42.2%| [TS] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/flake8 >> test_split_merge.py::flake8 [GOOD] |42.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/durationpy/libpy3contrib-python-durationpy.global.a |42.2%| [TS] {asan, default-linux-x86_64, release} ydb/tests/stress/kv/tests/flake8 >> test_workload.py::flake8 [GOOD] |42.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/providers/dq/actors/grouped_issues_ut.cpp |42.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/providers/yt/actors/ut/ydb-library-yql-providers-yt-actors-ut |42.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/config/bsconfig_ut.cpp |42.2%| [TS] {asan, default-linux-x86_64, release} ydb/tests/sql/large/flake8 >> test_workload_manager.py::flake8 [GOOD] |42.2%| [TS] {asan, default-linux-x86_64, release} ydb/tests/fq/yt/kqp_yt_file/part2/flake8 >> test.py::flake8 [GOOD] |42.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_range_ops.cpp |42.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/row_dispatcher/ut/ydb-core-fq-libs-row_dispatcher-ut |42.3%| [TS] {asan, default-linux-x86_64, release} ydb/tests/datashard/dump_restore/flake8 >> test_dump_restore.py::flake8 [GOOD] |42.3%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/tests/sql/hybrid_file/part6/py2_flake8 >> test.py::py2_flake8 [GOOD] |42.3%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/providers/generic/connector/tests/datasource/ydb/flake8 >> test.py::flake8 [GOOD] >> test_crud.py::flake8 [GOOD] >> test_discovery.py::flake8 [GOOD] >> test_execute_scheme.py::flake8 [GOOD] >> test_indexes.py::flake8 [GOOD] >> test_insert.py::flake8 [GOOD] >> test_isolation.py::flake8 [GOOD] >> test_public_api.py::flake8 [GOOD] >> test_read_table.py::flake8 [GOOD] >> test_session_grace_shutdown.py::flake8 [GOOD] >> test_session_pool.py::flake8 [GOOD] >> test_clean.py::flake8 [GOOD] >> test_clickbench.py::flake8 [GOOD] >> test_diff_processing.py::flake8 [GOOD] >> test_external.py::flake8 [GOOD] >> test_import_csv.py::flake8 [GOOD] >> test_tpch.py::flake8 [GOOD] >> test_upload.py::flake8 [GOOD] >> test_workload_oltp.py::flake8 [GOOD] >> test_workload_simple_queue.py::flake8 [GOOD] >> test_sql_streaming.py::flake8 [GOOD] |42.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_ru_calculator/ydb-core-tx-schemeshard-ut_ru_calculator |42.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/proto_ast/gen/v1_proto_split/libproto_ast-gen-v1_proto_split.a |42.3%| [TS] {asan, default-linux-x86_64, release} ydb/tests/fq/mem_alloc/flake8 >> test_scheduling.py::flake8 [GOOD] |42.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/poco/NetSSL_OpenSSL/liblibs-poco-NetSSL_OpenSSL.a |42.3%| [TS] {asan, default-linux-x86_64, release} ydb/tests/fq/generic/analytics/flake8 >> test_ydb.py::flake8 [GOOD] |42.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/tests/olap/high_load/read_update_write.cpp |42.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/boost/program_options/librestricted-boost-program_options.a |42.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/blockcodecs/liblibrary-cpp-blockcodecs.a |42.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/opentelemetry-proto/libcontrib-libs-opentelemetry-proto.a |42.3%| [TS] {asan, default-linux-x86_64, release} ydb/tests/functional/kqp/plan2svg/flake8 >> test_cte.py::flake8 [GOOD] |42.3%| [TS] {asan, default-linux-x86_64, release} ydb/tests/functional/config/flake8 >> test_generate_dynamic_config.py::flake8 [GOOD] |42.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/re2/libcontrib-libs-re2.a |42.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Target/X86/liblib-Target-X86.a |42.3%| [TS] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/common/flake8 >> test_throttling.py::flake8 [GOOD] |42.3%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/tests/sql/dq_file/part4/py2_flake8 >> test.py::py2_flake8 [GOOD] >> test.py::flake8 [GOOD] >> base.py::flake8 [GOOD] >> test_delete_all_after_inserts.py::flake8 [GOOD] >> test_delete_by_explicit_row_id.py::flake8 [GOOD] |42.3%| [TS] {asan, default-linux-x86_64, release} ydb/tests/functional/encryption/flake8 >> test_encryption.py::flake8 [GOOD] |42.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/websocket-client/libpy3contrib-python-websocket-client.global.a |42.4%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/tests/sql/dq_file/part14/py2_flake8 >> test.py::py2_flake8 [GOOD] |42.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Transforms/Scalar/liblib-Transforms-Scalar.a >> test_alter_ops.py::flake8 [GOOD] >> test_copy_ops.py::flake8 [GOOD] >> test_scheme_shard_operations.py::flake8 [GOOD] |42.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/poco/Foundation/liblibs-poco-Foundation.a |42.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/boost/regex/librestricted-boost-regex.a |42.4%| [TS] {asan, default-linux-x86_64, release} ydb/core/viewer/tests/flake8 >> test.py::flake8 [GOOD] |42.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_init.cpp |42.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/tcmalloc/malloc_extension/liblibs-tcmalloc-malloc_extension.a >> compare.py::flake8 [GOOD] |42.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/zstd/libcontrib-libs-zstd.a |42.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/blob_depot_event_managers.cpp |42.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/blob_depot_test_functions.cpp |42.4%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/tests/sql/dq_file/part10/py2_flake8 >> test.py::py2_flake8 [GOOD] |42.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/fs/libpy3library-python-fs.global.a |42.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/ut_blobstorage/lib/libblobstorage-ut_blobstorage-lib.a |42.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/boost/coroutine/librestricted-boost-coroutine.a |42.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/blob_depot.cpp >> test_example.py::flake8 [GOOD] >> test_sql.py::flake8 [GOOD] |42.4%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/tests/sql/hybrid_file/part3/py2_flake8 >> test.py::py2_flake8 [GOOD] |42.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/common/objcopy_cca8dcd66462c9ca3c57fcb78e.o |42.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/common/objcopy_b34c6a8a5501db208eebc5d8e4.o |42.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/common/objcopy_9a3dabea847c21e0b4fa4cda26.o |42.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/testing/recipe/libpy3python-testing-recipe.global.a |42.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/providers/solomon/actors/ut/ut_helpers.cpp |42.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/common/objcopy_e32003454342267c2263935765.o |42.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Support/libllvm16-lib-Support.a |42.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/common/ut_helpers/libproviders-common-ut_helpers.a |42.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/common/aba998449c2518e3272d8e87fb_raw.auxcpp |42.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/ymq/http/ut/ydb-core-ymq-http-ut |42.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/tools/ydb_recipe/libpy3ydb_recipe.global.a |42.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/providers/solomon/actors/ut/dq_solomon_write_actor_ut.cpp |42.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/tools/lib/cmds/libpy3tools-lib-cmds.global.a |42.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/olap_workload/libpy3olap_workload.global.a |42.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/public/tools/ydb_recipe/objcopy_c55121179eeb3b5753498290c4.o |42.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/olap_workload/workload/libpy3stress-olap_workload-workload.global.a |42.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/transfer/workload/libpy3stress-transfer-workload.global.a |42.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/olap_workload/objcopy_9de271b22d7bcc64ef77cc3cde.o |42.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/generic/connector/libcpp/ut_helpers/libconnector-libcpp-ut_helpers.a |42.5%| [TS] {asan, default-linux-x86_64, release} ydb/tests/functional/serializable/flake8 >> test.py::flake8 [GOOD] |42.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/providers/yt/actors/libproviders-yt-actors.a |42.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/boost/container/librestricted-boost-container.a |42.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/logger/liblibrary-cpp-logger.global.a |42.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/sequenceproxy/sequenceproxy_ut.cpp |42.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/binsaver/liblibrary-cpp-binsaver.a |42.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/providers/generic/actors/ut/yql_generic_lookup_actor_ut.cpp |42.5%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/udfs/common/datetime/libdatetime_udf.so |42.5%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/pire/libpire_udf.so |42.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/coordinator/ut/ydb-core-tx-coordinator-ut |42.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/library/ut/ydb-tests-library-ut |42.5%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/re2/libre2_udf.so |42.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/abseil-cpp-tstring/libcontrib-restricted-abseil-cpp-tstring.a |42.5%| [TS] {asan, default-linux-x86_64, release} ydb/tests/compatibility/flake8 >> udf/test_digest_regression.py::flake8 [GOOD] |42.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_export/ydb-core-tx-datashard-ut_export |42.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/kqprun/tests/objcopy_278b1a63a14648a80c4b930adb.o |42.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/kqprun/tests/objcopy_5923b362516b6632b9769a5db2.o |42.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/kqprun/tests/objcopy_6b37760fb6a28054d0feafd61d.o |42.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/uriparser/libcontrib-restricted-uriparser.a |42.6%| [TS] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/flake8 >> test_yandex_cloud_queue_counters.py::flake8 [GOOD] |42.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/xxhash/libcontrib-libs-xxhash.a |42.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/scheme_tests/ydb-tests-functional-scheme_tests |42.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/comptable/liblibrary-cpp-comptable.a |42.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/containers/intrusive_rb_tree/libcpp-containers-intrusive_rb_tree.a |42.6%| [TS] {asan, default-linux-x86_64, release} ydb/tests/functional/api/flake8 >> test_session_pool.py::flake8 [GOOD] |42.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/case_insensitive_string/liblibrary-cpp-case_insensitive_string.a |42.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/certifi/libpy3library-python-certifi.global.a |42.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/responses/py3/libpy3python-responses-py3.global.a |42.6%| [TS] {asan, default-linux-x86_64, release} ydb/tests/functional/tpc/medium/flake8 >> test_workload_simple_queue.py::flake8 [GOOD] |42.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/boost/exception/librestricted-boost-exception.a |42.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/psutil/py3/libpy3python-psutil-py3.a |42.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_keys/ydb-core-tx-datashard-ut_keys |42.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/pytz/py3/libpy3python-pytz-py3.global.a |42.6%| [TS] {asan, default-linux-x86_64, release} ydb/tests/fq/streaming_optimize/flake8 >> test_sql_streaming.py::flake8 [GOOD] |42.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/recipes/common/libpy3library-recipes-common.global.a |42.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/oltp_workload/workload/type/libpy3oltp_workload-workload-type.global.a |42.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/Werkzeug/py3/libpy3python-Werkzeug-py3.global.a |42.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/oltp_workload/workload/libpy3stress-oltp_workload-workload.global.a |42.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/oltp_workload/tests/objcopy_367e2bc5d83faa0907a06d2976.o |42.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/psutil/py3/libpy3python-psutil-py3.global.a |42.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/library/test_meta/libpy3tests-library-test_meta.global.a |42.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/sql/lib/libpy3tests-sql-lib.global.a |42.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/lib/libpy3tests-datashard-lib.global.a |42.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/oltp_workload/tests/objcopy_0446f521b26a2e8128f94ac50f.o |42.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/oltp_workload/tests/objcopy_49a1ca9559288648fba9cf7b65.o |42.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/find_root/libpy3library-python-find_root.global.a |42.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/quoter/ut/ydb-core-quoter-ut |42.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/query_actor/ut/ydb-library-query_actor-ut |42.7%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/library/yql/providers/yt/actors/yql_yt_lookup_actor.cpp |42.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/cores/libpy3library-python-cores.global.a |42.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/graph/ut/graph_ut.cpp |42.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/filelock/libpy3library-python-filelock.global.a |42.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/charset/liblibrary-cpp-charset.a |42.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/poco/Crypto/liblibs-poco-Crypto.a |42.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/digest/old_crc/libcpp-digest-old_crc.a |42.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/json/easy_parse/libcpp-json-easy_parse.a |42.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/grpc_services/tablet/ut/ydb-core-grpc_services-tablet-ut |42.7%| [TS] {asan, default-linux-x86_64, release} ydb/tests/fq/yt/kqp_yt_file/part9/flake8 >> test.py::flake8 [GOOD] |42.7%| [TS] {asan, default-linux-x86_64, release} ydb/tests/olap/delete/flake8 >> test_delete_by_explicit_row_id.py::flake8 [GOOD] |42.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/providers/yt/actors/ut/yql_yt_lookup_actor_ut.cpp |42.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/codec/codegen/no_llvm/libcodec-codegen-no_llvm.a |42.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/mkql_simple_file/libproviders-common-mkql_simple_file.a |42.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/poco/JSON/liblibs-poco-JSON.a |42.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/codegen/no_llvm/libminikql-codegen-no_llvm.a |42.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/public/udf/service/terminate_policy/libudf-service-terminate_policy.global.a |42.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/comp_nodes/no_llvm/libyt-comp_nodes-no_llvm.a |42.8%| [TS] {asan, default-linux-x86_64, release} ydb/tests/functional/scheme_shard/flake8 >> test_scheme_shard_operations.py::flake8 [GOOD] |42.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/row_dispatcher/format_handler/ut/common/libformat_handler-ut-common.a |42.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/row_dispatcher/ut/leader_election_ut.cpp |42.8%| [TS] {asan, default-linux-x86_64, release} ydb/library/benchmarks/runner/result_compare/flake8 >> compare.py::flake8 [GOOD] |42.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/aws/aws-c-compression/librestricted-aws-aws-c-compression.a |42.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_ru_calculator/ut_ru_calculator.cpp |42.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/pq/gateway/dummy/libpq-gateway-dummy.a |42.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/row_dispatcher/ut/coordinator_ut.cpp |42.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/simple_queue/tests/ydb-tests-stress-simple_queue-tests |42.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/moto/py3/libpy3python-moto-py3.global.a |42.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/fq/http_api/ydb-tests-fq-http_api |42.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/dwarf_backtrace/liblibrary-cpp-dwarf_backtrace.a |42.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/pq_async_io/libtests-fq-pq_async_io.a |42.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/long_tx_service/ut/ydb-core-tx-long_tx_service-ut |42.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/row_dispatcher/ut/row_dispatcher_ut.cpp |42.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/row_dispatcher/ut/topic_session_ut.cpp |42.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_testshard/ydb-core-blobstorage-ut_testshard |42.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_user_attributes_reboots/ut_user_attributes_reboots.cpp |42.8%| [TS] {asan, default-linux-x86_64, release} ydb/tests/example/flake8 >> test_example.py::flake8 [GOOD] |42.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/asttokens/libpy3contrib-python-asttokens.global.a |42.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/python/enable_v3_new_behavior/libpy3sdk-python-enable_v3_new_behavior.global.a |42.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/cxxsupp/libcxx/liblibs-cxxsupp-libcxx.a |42.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_replication/ydb-core-tx-datashard-ut_replication |42.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/providers/dq/actors/actors_ut.cpp |42.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/cachetools/py3/libpy3python-cachetools-py3.global.a |42.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/symbols/module/libpy3python-symbols-module.global.a |42.9%| [TS] {asan, default-linux-x86_64, release} ydb/tests/functional/canonical/flake8 >> test_sql.py::flake8 [GOOD] |42.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/importlib-resources/libpy3contrib-python-importlib-resources.global.a |42.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/bit_io/liblibrary-cpp-bit_io.a |42.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/client/minikql_compile/ut/ydb-core-client-minikql_compile-ut |42.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/gateway/file/libyt-gateway-file.a |42.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/script_execution/ydb-tests-functional-script_execution |42.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/replication/controller/ut_stream_creator/tx-replication-controller-ut_stream_creator |42.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/invoke_builtins/no_llvm/libminikql-invoke_builtins-no_llvm.a |42.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/yql/ydb-core-kqp-ut-yql |42.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_kqp_errors/ydb-core-tx-datashard-ut_kqp_errors >> conftest.py::flake8 [GOOD] >> s3_helpers.py::flake8 [GOOD] >> test_bindings_0.py::flake8 [GOOD] >> test_bindings_1.py::flake8 [GOOD] >> test_compressions.py::flake8 [GOOD] >> test_early_finish.py::flake8 [GOOD] >> test_explicit_partitioning_0.py::flake8 [GOOD] >> test_explicit_partitioning_1.py::flake8 [GOOD] >> test_format_setting.py::flake8 [GOOD] >> test_formats.py::flake8 [GOOD] >> test_inflight.py::flake8 [GOOD] >> test_insert.py::flake8 [GOOD] >> test_public_metrics.py::flake8 [GOOD] >> test_push_down.py::flake8 [GOOD] >> test_s3_0.py::flake8 [GOOD] >> test_s3_1.py::flake8 [GOOD] >> test_size_limit.py::flake8 [GOOD] >> test_statistics.py::flake8 [GOOD] >> test_streaming_join.py::flake8 [GOOD] >> test_test_connection.py::flake8 [GOOD] >> test_validation.py::flake8 [GOOD] >> test_ydb_over_fq.py::flake8 [GOOD] >> test_yq_v2.py::flake8 [GOOD] |42.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/transfer/tests/objcopy_22b5b8dd6ea05f4194f60e6181.o |42.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_serverless/ydb-core-tx-schemeshard-ut_serverless |42.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/jaraco.collections/libpy3contrib-python-jaraco.collections.global.a |42.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/client/server/ut/ydb-core-client-server-ut |42.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/control_plane_storage/internal/ut/core-fq-libs-control_plane_storage-internal-ut |42.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_external_data_source_reboots/schemeshard-ut_external_data_source_reboots |42.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/dsproxy/ut_fat/ydb-core-blobstorage-dsproxy-ut_fat |42.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/persqueue_v1/ut/describes_ut/ydb-services-persqueue_v1-ut-describes_ut |42.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/symbols/module/libpy3python-symbols-module.a |42.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_bsvolume/ydb-core-tx-schemeshard-ut_bsvolume |42.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/replication/service/json_change_record_ut.cpp |42.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/statistics/aggregator/ut/ydb-core-statistics-aggregator-ut |42.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/streams/bzip2/libcpp-streams-bzip2.a |42.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/messagebus/www/libcpp-messagebus-www.a |43.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/rate_limiter/rate_limiter_ut.cpp |43.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/ipdb/py3/libpy3python-ipdb-py3.global.a |43.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/matplotlib-inline/libpy3contrib-python-matplotlib-inline.global.a |43.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/blockcodecs/codecs/brotli/libblockcodecs-codecs-brotli.global.a |43.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/ptyprocess/py3/libpy3python-ptyprocess-py3.global.a |43.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/messagebus/scheduler/libcpp-messagebus-scheduler.a |43.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/lua/liblibrary-cpp-lua.a |43.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/iniconfig/libpy3contrib-python-iniconfig.global.a |43.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/ipython/py3/libpy3python-ipython-py3.global.a |43.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/cffi/py3/libpy3python-cffi-py3.a |43.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ymq/http/ut/xml_builder_ut.cpp |43.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/protobuf/libcontrib-libs-protobuf.a |43.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/digest/argonish/libcpp-digest-argonish.a |43.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/pyasn1/py3/libpy3python-pyasn1-py3.global.a |43.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/tpc/large/ydb-tests-functional-tpc-large |43.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/olap_workload/tests/ydb-tests-stress-olap_workload-tests |43.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/execprofile/liblibrary-cpp-execprofile.a |42.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/highwayhash/arch/avx2/libhighwayhash-arch-avx2.a |43.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_restore/ydb-core-tx-schemeshard-ut_restore |43.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/datashard/s3/ydb-tests-datashard-s3 |43.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/scheme/ydb-core-kqp-ut-scheme |43.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/containers/stack_vector/libcpp-containers-stack_vector.a |43.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/common/ut/ydb-core-fq-libs-common-ut |43.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/codecs/liblibrary-cpp-codecs.a |43.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Target/X86/AsmParser/libTarget-X86-AsmParser.a |43.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/dns/liblibrary-cpp-dns.a |43.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/poco/XML/liblibs-poco-XML.a |43.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/providers/yt/actors/yql_yt_provider_factories.cpp |43.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/lzma/libcontrib-libs-lzma.a |43.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/libpy3api-grpc.global.a |43.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/library/ut/objcopy_6508d12aaafde6f0a60fe8fff3.o |43.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/aws/aws-c-s3/librestricted-aws-aws-c-s3.a |43.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/libpy3api-protos.global.a |43.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/library/ut/objcopy_cd9abca883cad9b25e20bf2f08.o |43.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/quoter/quoter_service_bandwidth_test/quoter_service_bandwidth_test |43.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/library/ut/objcopy_bd84885c5c24478d181ba9e493.o |43.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/export_s3_buffer_ut.cpp |43.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/json/liblibrary-cpp-json.a |43.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/ngtcp2/libcontrib-libs-ngtcp2.a |43.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blob_depot/agent/libcore-blob_depot-agent.a |43.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/lwtrace/liblibrary-cpp-lwtrace.a |43.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/scheme_tests/objcopy_4826ee2207124da1bc398e3bd8.o |43.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/coordinator/coordinator_ut.cpp |43.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/scheme_tests/objcopy_5b5c3367c789898aa5a6cae866.o |43.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/scheme_tests/objcopy_8e57113197bb359e3999b04aab.o |43.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/future/py3/libpy3python-future-py3.global.a |43.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/func/libpy3library-python-func.global.a |43.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/coordinator/coordinator_volatile_ut.cpp |43.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/more-itertools/py3/libpy3python-more-itertools-py3.global.a |43.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/jaraco.context/libpy3contrib-python-jaraco.context.global.a |43.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/monlib/encode/unistat/libmonlib-encode-unistat.a |43.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/snappy/libcontrib-libs-snappy.a |43.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/poco/Net/liblibs-poco-Net.a |43.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Target/X86/MCTargetDesc/libTarget-X86-MCTargetDesc.a |43.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/pexpect/py3/libpy3python-pexpect-py3.global.a |43.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_login_large/ydb-core-tx-schemeshard-ut_login_large |43.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_bsvolume_reboots/ydb-core-tx-schemeshard-ut_bsvolume_reboots |43.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_minikql/ydb-core-tx-datashard-ut_minikql |43.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_replication_reboots/ydb-core-tx-schemeshard-ut_replication_reboots |43.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/compute/common/ut/ydb-core-fq-libs-compute-common-ut |43.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/ruamel.yaml.clib/py3/libpy3python-ruamel.yaml.clib-py3.a |43.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_minstep/ydb-core-tx-datashard-ut_minstep |43.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/platformdirs/libpy3contrib-python-platformdirs.global.a |43.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/runtime/ut/ydb-core-kqp-runtime-ut |43.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/pluggy/py3/libpy3python-pluggy-py3.global.a |43.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/cffi/py3/libpy3python-cffi-py3.global.a |43.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_keys.cpp |43.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/typeguard/libpy3contrib-python-typeguard.global.a |43.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/wcwidth/py3/libpy3python-wcwidth-py3.global.a |43.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/monlib/libpy3library-python-monlib.global.a |43.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/lzmasdk/libcontrib-libs-lzmasdk.a |43.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/monlib/deprecated/json/libmonlib-deprecated-json.a |43.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/transfer/tests/objcopy_76cd981cf66123b7633d25b898.o |43.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/apache/avro/liblibs-apache-avro.a |43.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/runtime_py3/main/libpython-runtime_py3-main.a |43.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/quoter/quoter_service_ut.cpp |43.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Transforms/InstCombine/liblib-Transforms-InstCombine.a |43.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/lz4/libcontrib-libs-lz4.a |43.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/query_actor/query_actor_ut.cpp |43.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_rs.cpp |43.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/quoter/kesus_quoter_ut.cpp |43.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/grpc_services/tablet/rpc_change_schema_ut.cpp |43.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/grpc_services/tablet/rpc_restart_tablet_ut.cpp |43.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/quoter/ut_helpers.cpp |43.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/aws/aws-c-common/librestricted-aws-aws-c-common.a |43.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/grpc_services/tablet/rpc_execute_mkql_ut.cpp |43.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/simple_queue/tests/objcopy_2492aafb6862566a2398c9f27e.o |43.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/simple_queue/tests/objcopy_e66920085df69f6f7e41547063.o |43.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/restarts/ydb-tests-functional-restarts |43.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_blob_depot_fat/blobstorage-ut_blobstorage-ut_blob_depot_fat |43.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/http_api_client/libpy3fq-libs-http_api_client.global.a |43.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/simple_queue/tests/objcopy_3df021aac8504049c53286aea0.o |43.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/nodewarden/ut/ydb-core-blobstorage-nodewarden-ut |43.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/http_api/objcopy_7eade8c49389813f8c36b72b5b.o |43.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/http_api/objcopy_3209cda00462f2963f3cbbc912.o |43.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/http_api/objcopy_7bfd03a31f5e230607792f10cc.o |43.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/http_api/objcopy_4f92526e13553482736b942b2c.o |43.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/simple_queue/workload/libpy3stress-simple_queue-workload.global.a |43.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kesus/tablet/ut/ydb-core-kesus-tablet-ut |43.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/http_api/objcopy_1a1e300767b552f4c13c3295d0.o |43.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_testshard/main.cpp |43.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/audit/ydb-tests-functional-audit |43.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/formats/arrow/protos/accessor.pb.{h, cc} |43.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/row_dispatcher/format_handler/ut/ydb-core-fq-libs-row_dispatcher-format_handler-ut |43.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/providers/s3/provider/ut/ydb-library-yql-providers-s3-provider-ut |43.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/persqueue/ut/ydb-core-persqueue-ut |43.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/testing/group_overseer/libblobstorage-testing-group_overseer.a |43.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/common/protos/snapshot.pb.{h, cc} |43.4%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/string/libstring_udf.so |43.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/ydb/ut/ydb-core-fq-libs-ydb-ut |43.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/grpc_services/ut/ydb-core-grpc_services-ut |43.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/tools/kqprun/kqprun |43.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/mediator/ut/ydb-core-tx-mediator-ut |43.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_replication.cpp |43.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/long_tx_service/long_tx_service_ut.cpp |43.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/ttl/ydb-tests-functional-ttl |43.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/kv/tests/ydb-tests-stress-kv-tests |43.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/script_execution/objcopy_1aeeb50f676472f975830c135d.o |43.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/client/minikql_compile/yql_expr_minikql_compile_ut.cpp |43.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tools/stress_tool/ut/ydb-tools-stress_tool-ut |43.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/script_execution/objcopy_bcbbd2d8f2367d5f3ed5199234.o |43.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_view/ydb-core-tx-schemeshard-ut_view |43.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/script_execution/objcopy_f05ead59375a9db120b95dd730.o |43.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/graph/shard/ut/ydb-core-graph-shard-ut |43.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/dynamic_config/ut/ydb-services-dynamic_config-ut |43.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tablet/ut/ydb-core-tablet-ut |43.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_locks/ydb-core-tx-datashard-ut_locks |43.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/replication/ut_helpers/libtx-replication-ut_helpers.a |43.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/cost/ydb-core-kqp-ut-cost |43.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/replication/controller/stream_creator_ut.cpp |43.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/yql/kqp_pragma_ut.cpp |43.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/security/ut/ydb-core-security-ut |43.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/shared_cache.{pb.h ... grpc.pb.h} |43.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/engine/ut/ydb-core-engine-ut |43.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_move/ydb-core-tx-schemeshard-ut_move |43.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/control_plane_storage/internal/ut/utils_ut.cpp |43.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/threading/chunk_queue/libcpp-threading-chunk_queue.a |43.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/control_plane_storage/internal/ut/objcopy_c96ef635306ccee8a5cf6359f1.o |43.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/yql_testlib/libydb-core-yql_testlib.a |43.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_serverless/ut_serverless.cpp |43.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_vector_index_build_reboots/tx-schemeshard-ut_vector_index_build_reboots |43.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/dsproxy/ut_ftol/ydb-core-blobstorage-dsproxy-ut_ftol |43.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_external_data_source_reboots/ut_external_data_source_reboots.cpp |43.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/statistics/aggregator/ut/ut_analyze_datashard.cpp |43.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/ut_utils/libpersqueue_public-ut-ut_utils.a |43.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_bsvolume/ut_bsvolume.cpp |43.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_kqp_errors.cpp |43.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/persqueue_v1/ut/describes_ut/ic_cache_ut.cpp |43.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/yql/kqp_yql_ut.cpp |43.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/statistics/aggregator/ut/ut_traverse_datashard.cpp |43.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/persqueue_v1/ut/describes_ut/describe_topic_ut.cpp |43.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/statistics/aggregator/ut/ut_traverse_columnshard.cpp |43.6%| [TS] {asan, default-linux-x86_64, release} ydb/tests/fq/s3/flake8 >> test_yq_v2.py::flake8 [GOOD] |43.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/statistics/aggregator/ut/ut_analyze_columnshard.cpp |43.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/tpc/large/objcopy_52e86d5ee8fadefdbb415ca379.o |43.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/allure-python-commons/libpy3contrib-python-allure-python-commons.global.a |43.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/hooks/testing/libcolumnshard-hooks-testing.a |43.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/statistics/ut_common/libcore-statistics-ut_common.a |43.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/library/stress/libpy3tests-library-stress.global.a |43.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/olap_workload/tests/objcopy_e68ca1a2fa9943132c020ae028.o |43.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/olap_workload/tests/objcopy_9be8b6745d0fa150928bab4206.o |43.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/yql/kqp_scripting_ut.cpp |43.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/googleapis-common-protos/libpy3contrib-libs-googleapis-common-protos.global.a |43.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_public/iam/libpy3client-yc_public-iam.global.a |43.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/threading/local_executor/libcpp-threading-local_executor.a |43.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/allure-pytest/libpy3contrib-python-allure-pytest.global.a |43.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_public/common/libpy3client-yc_public-common.global.a |43.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/s3/objcopy_8685c3ae88e5169a5acffc7bc4.o |43.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/client/server/msgbus_server_pq_metarequest_ut.cpp |43.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/tbb/libcontrib-libs-tbb.a |43.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/s3/objcopy_ff581f3cff717ab223922f0cd8.o |43.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/common/cache_ut.cpp |43.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Transforms/Utils/liblib-Transforms-Utils.a |43.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/common/rows_proto_splitter_ut.cpp |43.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/common/iceberg_processor_ut.cpp |43.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/s3/objcopy_d191482d8b66f1c03ea8df56d3.o |43.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/common/entity_id_ut.cpp |43.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/scheme/kqp_secrets_ut.cpp |43.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/quoter/quoter_service_bandwidth_test/main.cpp |43.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/olap_workload/tests/objcopy_8e19d47784789c55156c57f816.o |43.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/common/util_ut.cpp |43.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/scenario/helpers/libpy3olap-scenario-helpers.global.a |43.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/quoter/quoter_service_bandwidth_test/quota_requester.cpp |43.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/tpc/large/objcopy_bac05c8b5a79735451f58d9322.o |43.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/tpc/large/objcopy_703c8e1d9a9a2b271b8b995a29.o |43.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/tpc/large/objcopy_2194854d9f8cbb3e0ba798b861.o |43.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/load/lib/libpy3olap-load-lib.global.a |43.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/quoter/quoter_service_bandwidth_test/server.cpp |43.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/tpc/large/objcopy_912038ceef7de48e0e15c25307.o |43.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/lib/libpy3tests-olap-lib.global.a |43.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/tpc/lib/libpy3functional-tpc-lib.global.a |43.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/scheme/kqp_constraints_ut.cpp |43.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/workload_service/ut/common/libworkload_service-ut-common.a |43.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/compute/common/ut/objcopy_caf222d14387d4810b5cb3e853.o |43.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_login_large/ut_login_large.cpp |43.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/scheme/kqp_acl_ut.cpp |43.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/compute/common/ut/config_ut.cpp |43.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_bsvolume_reboots/ut_bsvolume_reboots.cpp |43.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_replication_reboots/ut_replication_reboots.cpp |43.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/runtime/scheduler/old/kqp_compute_scheduler_ut.cpp |43.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_minstep.cpp |43.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/compute/common/ut/utils_ut.cpp |43.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/runtime/kqp_scan_data_ut.cpp |43.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_minikql.cpp |43.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_background_cleaning/ydb-core-tx-schemeshard-ut_background_cleaning |43.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_stop_pdisk/ydb-core-blobstorage-ut_blobstorage-ut_stop_pdisk |43.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/tx_proxy/ut_ext_tenant/ydb-core-tx-tx_proxy-ut_ext_tenant |43.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_incremental_backup/ydb-core-tx-datashard-ut_incremental_backup |43.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/postgres_integrations/go-libpq/ydb-tests-postgres_integrations-go-libpq |43.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/blobstorage/ydb-tests-functional-blobstorage |43.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/s3_backups/s3_backups |43.8%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/datetime2/libdatetime2_udf.so |43.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/cms/ydb-tests-functional-cms |43.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/dsproxy/ut_fat/dsproxy_ut.cpp |43.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_restore/ut_restore.cpp |43.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/restarts/objcopy_afdf6d60c4f76ae91a235d460b.o |43.8%| [PB] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/proto/source.pb.{h, cc} |43.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/restarts/objcopy_0359848ae21601186c5b0d9873.o |43.9%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/s3_recipe/s3_recipe |43.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/restarts/objcopy_277b7e8f79021687bec95be8db.o |43.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/nodewarden/bind_queue_ut.cpp |43.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/blob_depot_event_managers.cpp |43.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/checkpoint_storage/ut/ydb-core-fq-libs-checkpoint_storage-ut |43.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/blob_depot_test_functions.cpp |43.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/blob_depot_fat.cpp |43.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/audit/daf02fd86bb7e2296f1437ae1f_raw.auxcpp |43.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/library/fixtures/libpy3tests-library-fixtures.global.a |43.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/audit/objcopy_643fa2679e88d9b2d33558b050.o |43.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/audit/objcopy_53073eb93c76466fca8f474c5f.o |43.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/audit/objcopy_fe15eb83a42d9d70d347bbba65.o |43.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/batch_operations/ydb-core-kqp-ut-batch_operations |43.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kesus/tablet/quoter_resource_tree_ut.cpp |43.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/row_dispatcher/format_handler/ut/format_handler_ut.cpp |43.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/nodewarden/blobstorage_node_warden_ut.cpp |43.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/ut/make_config.cpp |43.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/ut/user_info_ut.cpp |43.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/ut/type_codecs_ut.cpp |43.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/row_dispatcher/format_handler/ut/topic_filter_ut.cpp |43.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/ut/utils_ut.cpp |43.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/ut/metering_sink_ut.cpp |43.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/providers/s3/provider/yql_s3_listing_strategy_ut.cpp |43.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kesus/tablet/ut_helpers.cpp |43.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tools/stress_tool/lib/libydb_device_test.a |43.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/row_dispatcher/format_handler/ut/topic_parser_ut.cpp |44.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/ut/quota_tracker_ut.cpp |44.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tools/stress_tool/proto/libtools-stress_tool-proto.a |43.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/ydb/ut/ydb_ut.cpp |43.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/grpc_services/rpc_calls_ut.cpp |44.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/ut/pqrb_describes_ut.cpp |44.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/ut/counters_ut.cpp |44.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/ut/internals_ut.cpp |44.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kesus/tablet/tablet_ut.cpp |44.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/kv/tests/objcopy_5294a064c14cf5a49516321590.o |44.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/ttl/objcopy_965640ca94893d27c182c611e2.o |44.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/kv/tests/objcopy_08f7acdb6eb761b28bf6990862.o |44.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/ut/sourceid_ut.cpp |44.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet/tablet_counters_ut.cpp |44.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/graph/shard/ut/shard_ut.cpp |44.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet/tablet_req_blockbs_ut.cpp |44.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/dynamic_config/dynamic_config_ut.cpp |44.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/kqprun/runlib/libtools-kqprun-runlib.a |44.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet/pipe_tracker_ut.cpp |44.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet/tablet_pipecache_ut.cpp |44.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/mkql_proto/ut/helpers/libmkql_proto-ut-helpers.a |44.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tablet_flat/test/libs/rows/libtest-libs-rows.a |44.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet/tablet_metrics_ut.cpp |44.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/kv/tests/objcopy_c7c229be41e9b028572ad1aab3.o |44.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_view/ut_view.cpp |44.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tablet_flat/test/libs/table/libtest-libs-table.a |44.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/engine/mkql_engine_flat_host_ut.cpp |44.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/oauth/libclient-yc_private-oauth.a |44.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet/tablet_counters_aggregator_ut.cpp |44.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/ut/pqtablet_ut.cpp |44.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/engine/mkql_proto_ut.cpp |44.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/mediator/mediator_ut.cpp |44.1%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/protos/actors.pb.{h, cc} |44.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_value.pb.{h, cc} |44.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/ttl/objcopy_c068ee86eb127df13256bfbe45.o |44.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/ttl/objcopy_0aefef587c181350d3a25f70e0.o |44.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet/tablet_resolver_ut.cpp |44.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_locks.cpp |44.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_vector_index_build_reboots/ut_vector_index_build_reboots.cpp |44.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_move/ut_move.cpp |44.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/cost/kqp_cost_ut.cpp |44.0%| [PB] {BAZEL_DOWNLOAD} $(B)/yql/essentials/protos/clickhouse.pb.{h, cc} |44.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/scheme_board/ut_cache/ydb-core-tx-scheme_board-ut_cache |44.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/persqueue/ut/objcopy_1d0482d354dc270d18e7123281.o |44.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet/tablet_pipe_ut.cpp |44.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/grpc_services/operation_helpers_ut.cpp |44.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/kqprun/src/proto/libkqprun-src-proto.a |44.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/sys_view/ut_large/ydb-core-sys_view-ut_large |44.1%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/msgbus_pq.{pb.h ... grpc.pb.h} |44.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet/bootstrapper_ut.cpp |44.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/ut/cache_eviction_ut.cpp |44.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blob_depot_config.{pb.h ... grpc.pb.h} |44.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet/resource_broker_ut.cpp |44.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/ut/microseconds_sliding_window_ut.cpp |44.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/ut/pqtablet_mock.cpp |44.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/node_broker/node_broker |44.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/persqueue/ut/slow/ydb-core-persqueue-ut-slow |44.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/dsproxy/ut_ftol/dsproxy_fault_tolerance_ut.cpp |44.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/engine/kikimr_program_builder_ut.cpp |44.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/ut/pq_ut.cpp |44.1%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/proto/dq_tasks.pb.{h, cc} |44.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/ut/partitiongraph_ut.cpp |44.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_data_erasure/ydb-core-tx-schemeshard-ut_data_erasure |44.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/rm_service/ut/ydb-core-kqp-rm_service-ut |44.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/join/ydb-core-kqp-ut-join |43.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_incremental_restore_scan/ydb-core-tx-datashard-ut_incremental_restore_scan |43.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/tools/stress_tool/device_test_tool_ut.cpp |43.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/cdc/tests/ydb-tests-stress-cdc-tests |44.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/ydb_cli/ydb-tests-functional-ydb_cli |44.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/tools/nemesis/ut/ydb-tests-tools-nemesis-ut |44.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/replication/controller/ut_dst_creator/ydb-core-tx-replication-controller-ut_dst_creator |44.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/sqs/messaging/ydb-tests-functional-sqs-messaging |44.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/tests/tools/kqprun/kqprun.cpp |44.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_cdc_stream/ydb-core-tx-schemeshard-ut_cdc_stream |44.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/ut/list_all_topics_ut.cpp |44.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/sqs/multinode/ydb-tests-functional-sqs-multinode |44.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/ut/fetch_request_ut.cpp |44.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/rbo/ydb-core-kqp-ut-rbo |44.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/datashard/split_merge/ydb-tests-datashard-split_merge |44.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/tools/kqprun/recipe/kqprun_recipe |44.1%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_query_stats.pb.{h, cc} |44.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_split_merge/ydb-core-tx-schemeshard-ut_split_merge |44.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/dq/actors/spilling/ut/ydb-library-yql-dq-actors-spilling-ut |44.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_export_reboots_s3/ydb-core-tx-schemeshard-ut_export_reboots_s3 |44.2%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_operation.pb.{h, cc} |44.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/engine/mkql_engine_flat_ut.cpp |44.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/fq/generic/streaming/ydb-tests-fq-generic-streaming |44.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/syncer/ut/ydb-core-blobstorage-vdisk-syncer-ut |44.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/ut/partition_chooser_ut.cpp |44.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/datashard/parametrized_queries/ydb-tests-datashard-parametrized_queries |44.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/olap/s3_import/large/ydb-tests-olap-s3_import-large |44.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/kqprun/src/libtools-kqprun-src.a |44.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tools/query_replay/ydb_query_replay |44.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/build_index/ut/ydb-core-tx-datashard-build_index-ut |44.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/show_create/view/tests/ydb-tests-stress-show_create-view-tests |44.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/security/ticket_parser_ut.cpp |44.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/result_formatter/ut/ydb-core-fq-libs-result_formatter-ut |44.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_read_only_pdisk/ut_blobstorage-ut_read_only_pdisk |44.2%| [PB] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/proto/gateways_config.pb.{h, cc} |44.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/datashard/select/ydb-tests-datashard-select |44.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/idx_test/ydb-core-kqp-ut-idx_test |44.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/ymq/actor/yc_search_ut/ydb-core-ymq-actor-yc_search_ut |44.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/fq/ut_integration/ydb-services-fq-ut_integration |44.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/dq/comp_nodes/ut/ydb-library-yql-dq-comp_nodes-ut |44.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_huge/ydb-core-blobstorage-ut_blobstorage-ut_huge |44.2%| [PB] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/proto/udf_resolver.pb.{h, cc} |44.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/sharding/ut/ydb-core-tx-sharding-ut |44.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_split_merge_reboots/ydb-core-tx-schemeshard-ut_split_merge_reboots |44.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_scrub/ydb-core-blobstorage-ut_blobstorage-ut_scrub |44.2%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/scheme/protos/pathid.{pb.h ... grpc.pb.h} |44.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/locks/ut_range_treap/ydb-core-tx-locks-ut_range_treap |44.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/ut_vdisk/lib/libblobstorage-ut_vdisk-lib.a |44.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/s3_backups/tests/ydb-tests-stress-s3_backups-tests |44.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/olap/ydb-core-kqp-ut-olap |44.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/ut/partition_ut.cpp |44.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/simple_queue/simple_queue |44.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/proxy_service/ut/ydb-core-kqp-proxy_service-ut |44.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/replication/controller/ut_target_discoverer/replication-controller-ut_target_discoverer |44.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/view/ydb-core-kqp-ut-view |44.3%| [PR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/expr_nodes/yql_expr_nodes.{gen.h ... defs.inl.h} |44.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_table.pb.{h, cc} |44.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_common.pb.{h, cc} |44.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/draft/persqueue_error_codes.pb.{h, cc} |44.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_export.pb.{h, cc} |44.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/compaction.{pb.h ... grpc.pb.h} |44.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_background_cleaning/ut_background_cleaning.cpp |44.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/s3_backups/objcopy_4508aef343f36758ea760320db.o |44.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/stop_pdisk.cpp |44.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/s3_backups/libpy3s3_backups.global.a |44.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/postgres_integrations/go-libpq/objcopy_4352b8b3e3cf61532c865b371b.o |44.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/index_builder.{pb.h ... grpc.pb.h} |44.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/postgres_integrations/go-libpq/objcopy_95b3eecc97c453f0c55c456659.o |44.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/blobstorage/objcopy_16842d72ae0dac1856818f841e.o |44.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/sys_view_types.{pb.h ... grpc.pb.h} |44.5%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/tests/postgres_integrations/go-libpq/d78d0f74a3f72be1016c0cf8cf_raw.auxcpp |44.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/postgres_integrations/go-libpq/objcopy_3ddbad334a37a829b3772ddb05.o |44.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/cms/objcopy_a38b1580810a6e4b419da99dcf.o |44.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/cms/objcopy_9ea5b1fb7a4f8e1b0b8d7cf345.o |44.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/login/protos/login.pb.{h, cc} |44.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/docker/libpy3contrib-python-docker.global.a |44.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/cms/objcopy_a5874452d3dbd6f6e49cd08be6.o |44.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/filestore_config.{pb.h ... grpc.pb.h} |44.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/cms/objcopy_b9fd5c62781ec3b78d111a0ba7.o |44.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/cms/objcopy_7f9e816a97aaeee837ac316091.o |44.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/tx_proxy/proxy_ut_helpers.cpp |44.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_incremental_backup.cpp |44.5%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/kqp/plan2svg/ydb-tests-functional-kqp-plan2svg |44.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/blobstorage/objcopy_790c6ea4aad5e761d21421b25d.o |44.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/tx_proxy/proxy_ext_tenant_ut.cpp |44.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/checkpoint_storage/ut/gc_ut.cpp |44.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/postgres_integrations/library/libpy3tests-postgres_integrations-library.global.a |44.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/blobstorage/objcopy_1c0f807c059fe226699115f242.o |44.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/checkpoint_storage/ut/ydb_checkpoint_storage_ut.cpp |44.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/checkpoint_storage/ut/storage_service_ydb_ut.cpp |44.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/mkql_proto/protos/minikql.{pb.h ... grpc.pb.h} |44.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/s3_backups/workload/libpy3stress-s3_backups-workload.global.a |44.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/checkpoint_storage/ut/ydb_state_storage_ut.cpp |44.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/scheme/kqp_scheme_ut.cpp |44.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/no_llvm/libminikql-comp_nodes-no_llvm.a |44.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/batch_operations/kqp_batch_update_ut.cpp |44.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/batch_operations/kqp_batch_delete_ut.cpp |44.6%| [PK] {default-linux-x86_64, release, asan} $(B)/yql/essentials/tests/common/test_framework/udfs_deps/{common-test_framework-udfs_deps.final.pkg.fake ... yql/essentials/udfs/common/hyperscan/libhyperscan_udf.so} |44.6%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/kqp/kqp_query_session/ydb-tests-functional-kqp-kqp_query_session |44.6%| [PB] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/issue/protos/issue_id.pb.{h, cc} |44.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_issue_message.pb.{h, cc} |44.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/scheme_board/ut_helpers.cpp |44.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/sys_view/ut_large.cpp |44.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/node_broker/objcopy_2a9fba044b5f98d2ff5f5c7f44.o |44.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/node_broker/libpy3node_broker.global.a |44.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/sys_view/ut_common.cpp |44.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/ut/slow/pq_ut.cpp |44.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/itsdangerous/py3/libpy3python-itsdangerous-py3.global.a |44.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/colorama/py3/libpy3python-colorama-py3.global.a |44.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/ut/slow/autopartitioning_ut.cpp |44.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/ydb_cli/5c5fdf614c3039a8dba94a4f38_raw.auxcpp |44.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/nemesis/ut/objcopy_c98e5b95c64b8486a12f10d408.o |44.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/join/kqp_flip_join_ut.cpp |44.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/click/py3/libpy3python-click-py3.global.a |44.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/Flask/py3/libpy3python-Flask-py3.global.a |44.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/messaging/objcopy_7211c23d9494c46f0f60063e9e.o |44.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/topic/ut/ut_utils/libtopic-ut-ut_utils.a |44.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_data_erasure/ut_data_erasure.cpp |44.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/libf2c/libcontrib-libs-libf2c.a |44.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/pyarrow/libpy3contrib-python-pyarrow.global.a |44.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/scheme_board/cache_ut.cpp |44.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ut_helpers/libpublic-lib-ut_helpers.a |44.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/numpy/py3/numpy/random/libpy3py3-numpy-random.global.a |44.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/multinode/objcopy_afb48e06933bdee6c5245db82e.o |44.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/split_merge/objcopy_b783a1a2aacb855daa1e55fad6.o |44.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/kqprun/recipe/objcopy_dcbdf62672440a626e79a64e14.o |44.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/messaging/objcopy_791e2f78c18891d943ecce5e41.o |44.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/split_merge/objcopy_93665db601a12d4842de4565e2.o |44.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/rbo/kqp_rbo_ut.cpp |44.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/join/kqp_join_ut.cpp |44.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_coordination.pb.{h, cc} |44.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/cblas/libcontrib-libs-cblas.a |44.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/kqprun/recipe/libpy3kqprun_recipe.global.a |44.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/join/kqp_join_order_ut.cpp |44.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_export_reboots_s3/ut_export_reboots_s3.cpp |44.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_persqueue_v1.pb.{h, cc} |44.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/parametrized_queries/objcopy_6d8369510b03c08a300f2e2657.o |44.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/python/yt/yson/libpy3python-yt-yson.global.a |44.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_split_merge/ut_split_merge.cpp |44.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/syncer/blobstorage_syncquorum_ut.cpp |44.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/draft/fq.pb.{h, cc} |44.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/syncer/blobstorage_syncer_localwriter_ut.cpp |44.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/tools/query_replay/query_replay.cpp |44.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/show_create/view/workload/libpy3show_create-view-workload.global.a |44.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/node_whiteboard.{pb.h ... grpc.pb.h} |44.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/tools/query_replay/main.cpp |44.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/python/yt/type_info/libpy3python-yt-type_info.global.a |44.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tablet_pipe.{pb.h ... grpc.pb.h} |44.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/show_create/view/tests/objcopy_59eb97971e5f83d3296e6c33b5.o |44.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/build_index/ut/ut_secondary_index.cpp |44.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ymq/actor/yc_search_ut/test_events_writer.cpp |44.7%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/tests/kikimr_tpch/ydb-core-kqp-tests-kikimr_tpch |44.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/result_formatter/result_formatter_ut.cpp |44.7%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/tests/integration/server_restart/public-sdk-cpp-tests-integration-server_restart |44.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/dq/comp_nodes/ut/dq_factories.cpp |44.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blockstore_config.{pb.h ... grpc.pb.h} |44.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/fq/ut_integration/ut_utils.cpp |44.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/build_index/ut/ut_reshuffle_kmeans.cpp |44.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/select/objcopy_d709b1895f91108d9f51b703ea.o |44.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/yql_translation_settings.{pb.h ... grpc.pb.h} |44.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/simple_queue/objcopy_6c8bedcdc8efb835a928b278ce.o |44.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ymq/actor/yc_search_ut/index_events_processor_ut.cpp |44.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/huge.cpp |44.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/simplejson/py3/libpy3python-simplejson-py3.global.a |44.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/olap/blobs_sharing_ut.cpp |44.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/prctl/libpy3library-python-prctl.global.a |44.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/s3_backups/tests/objcopy_3bb523a1011c0a7019f2684a90.o |44.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/s3_backups/tests/objcopy_e8c94c485e81b4b2899f52f594.o |44.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/generic/connector/tests/utils/libpy3connector-tests-utils.global.a |44.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/olap/compaction_ut.cpp |44.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/olap/locks_ut.cpp |44.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/olap/statistics_ut.cpp |44.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/olap/decimal_ut.cpp |44.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/numpy/py3/numpy/random/libpy3py3-numpy-random.a |44.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/olap/tiering_ut.cpp |44.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/services/services.{pb.h ... grpc.pb.h} |44.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/olap/dictionary_ut.cpp |44.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/proxy_service/kqp_script_executions_ut.cpp |44.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/simple_queue/libpy3simple_queue.global.a |44.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/generic/utils/libpy3fq-generic-utils.global.a |44.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/resource_broker.{pb.h ... grpc.pb.h} |44.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/python/yt/libpy3yt-python-yt.global.a |44.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/olap/sys_view_ut.cpp |44.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/olap/compression_ut.cpp |44.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/olap/delete_ut.cpp |44.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/clapack/part2/liblibs-clapack-part2.a |44.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/olap/indexes_ut.cpp |44.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/olap/clickbench_ut.cpp |44.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/s3_backups/tests/objcopy_cd57da3671b96739ee73293fb1.o |44.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/generic/connector/tests/utils/types/libpy3tests-utils-types.global.a |44.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/scheme/protos/type_info.{pb.h ... grpc.pb.h} |44.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/olap/sparsed_ut.cpp |44.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/replication/controller/target_discoverer_ut.cpp |44.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/olap/optimizer_ut.cpp |44.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/generic/streaming/objcopy_49e9948af399bc60603a7d2db5.o |44.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/generic/streaming/objcopy_181bdcd1743e9a1a78fafe4b60.o |44.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/olap/aggregations_ut.cpp |44.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/generic/streaming/objcopy_49bad8251d240ad7c49d384b91.o |44.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/olap/json_ut.cpp |44.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/proxy_service/kqp_proxy_ut.cpp |44.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/scheme_board/ut_replica/ydb-core-tx-scheme_board-ut_replica |44.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/olap/kqp_olap_stats_ut.cpp |44.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/providers/dq/runtime/ut/ydb-library-yql-providers-dq-runtime-ut |44.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/view/view_ut.cpp |44.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/locks/range_treap_ut.cpp |44.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/olap/datatime64_ut.cpp |44.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/ut/olap/combinatory/libut-olap-combinatory.a |44.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/control/ut/ydb-core-control-ut |44.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/sharding/ut/ut_sharding.cpp |44.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/utils/network/libessentials-utils-network.a |44.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/ut/olap/helpers/libut-olap-helpers.a |44.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/select/objcopy_dfbd751fc64901b06ded4354c8.o |44.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/dq/comp_nodes/ut/dq_block_hash_join_ut.cpp |45.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/olap/write_ut.cpp |44.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/http_proxy/ut/inside_ydb_ut/ydb-core-http_proxy-ut-inside_ydb_ut |44.9%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/tests/sql/solomon/ydb-library-yql-tests-sql-solomon |44.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/select/objcopy_ec9bc627b6d56d1a941c2b7e4f.o |44.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_split_merge_reboots/ut_split_merge_reboots.cpp |44.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/flat_tx_scheme.{pb.h ... grpc.pb.h} |44.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/follower_group.{pb.h ... grpc.pb.h} |44.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/scheme_log.{pb.h ... grpc.pb.h} |45.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/tx_allocator_client/ut/ydb-core-tx-tx_allocator_client-ut |45.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/build_index/ut/ut_recompute_kmeans.cpp |45.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/idx_test/ydb_index_ut.cpp |44.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/persqueue/dread_cache_service/ut/ydb-core-persqueue-dread_cache_service-ut |44.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/scrub.cpp |44.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/indexes/ydb-core-kqp-ut-indexes |45.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/fq/ut_integration/fq_ut.cpp |45.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/persqueue_cluster_discovery/ut/ydb-services-persqueue_cluster_discovery-ut |45.0%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/kqp/kqp_indexes/ydb-tests-functional-kqp-kqp_indexes |45.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/show_create/view/tests/objcopy_60e08504076128d310212c6460.o |45.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/client/ut/ydb-core-client-ut |45.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/metadata/secret/ut/ydb-services-metadata-secret-ut |44.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/replication/service/ut_table_writer/ydb-core-tx-replication-service-ut_table_writer |44.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_subdomain/ydb-core-tx-schemeshard-ut_subdomain |44.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/table_creator/ut/ydb-library-table_creator-ut |44.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/olap/kqp_olap_ut.cpp |44.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/olap/delete/ydb-tests-olap-delete |44.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/load_test/ut_ycsb/ydb-core-load_test-ut_ycsb |44.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/limits/ydb-tests-functional-limits |44.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/show_create/view/tests/objcopy_5acd2383ed2cd599cfd64f7c8a.o |45.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/sql/large/ydb-tests-sql-large |45.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/mind/ut_fat/ydb-core-mind-ut_fat |45.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/persqueue/ut/ut_with_sdk/ydb-core-persqueue-ut-ut_with_sdk |45.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/build_index/ut/ut_local_kmeans.cpp |45.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_backup/ydb-core-tx-schemeshard-ut_backup |45.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/idx_test/libpublic-lib-idx_test.a |45.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/flat_scheme_op.{pb.h ... grpc.pb.h} |45.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/http_proxy/ut/ydb-core-http_proxy-ut |45.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_olap_reboots/ydb-core-tx-schemeshard-ut_olap_reboots |45.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/ydb_convert/ut/ydb-core-ydb_convert-ut |45.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/read_only_pdisk.cpp |45.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/syncer/blobstorage_syncer_data_ut.cpp |45.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/runtime/ydb-core-kqp-ut-runtime |45.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/tools/query_replay/query_proccessor.cpp |45.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/generic/streaming/4399546af28cb40e5d74ea4a4b_raw.auxcpp |45.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/s3_import/large/objcopy_363cd92f1d4b79ca063627ba22.o |45.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/parametrized_queries/objcopy_7d0deb4120fbddf720c11b5358.o |45.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/tools/query_replay/query_compiler.cpp |45.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/build_index/ut/ut_sample_k.cpp |45.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/s3_import/large/objcopy_d305a8a4fbc1702039f0202072.o |45.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/s3_import/large/objcopy_4943008ec342eed836b4112777.o |45.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/messaging/objcopy_48a08121f0a68da2f2666b0341.o |45.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/simplejson/py3/libpy3python-simplejson-py3.a |45.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/syncer/blobstorage_syncer_broker_ut.cpp |45.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/parametrized_queries/objcopy_e1e64d508ce59834ec0a40f731.o |45.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/multinode/objcopy_b306c2955ce13e6db6cae73363.o |45.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/split_merge/objcopy_5accfe00d45fb7ebcc30e116b2.o |45.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/build_index/ut/ut_prefix_kmeans.cpp |45.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/multinode/objcopy_10b0cfa01297f7d7392eb4d9e4.o |45.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/dq/actors/spilling/spilling_file_ut.cpp |45.1%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/solomon/ydb-tests-fq-solomon |45.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/prctl/libpy3library-python-prctl.a |45.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_split_merge/ut_find_split_key.cpp |45.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/cdc/tests/objcopy_7c0098f27edc25092453a8033c.o |45.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/ydb_cli/objcopy_359d47616c1036f0865eb1e662.o |45.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/cdc/tests/objcopy_7f02665786b7523f76c02ad1dd.o |45.1%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/db_pool.pb.{h, cc} |45.1%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/row_dispatcher.pb.{h, cc} |45.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/nemesis/library/libpy3tools-nemesis-library.global.a |45.2%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/compute.pb.{h, cc} |45.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/nemesis/ut/objcopy_927a1f7611cf94fb1cd21ef8cf.o |45.2%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/gateways.pb.{h, cc} |45.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/rm_service/kqp_rm_ut.cpp |45.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_cdc_stream/ut_cdc_stream.cpp |45.2%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/audit.pb.{h, cc} |45.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/ydb_cli/objcopy_903d4758faea71f1363e296b3f.o |45.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/nemesis/ut/objcopy_b06d27009e49b9ba3df883a226.o |45.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/ydb_cli/objcopy_c52ec5ba5ab0b788efaa5ed704.o |45.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/ydb_cli/objcopy_c77713875cf17988efd8fc0fb3.o |45.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/join/kqp_index_lookup_join_ut.cpp |45.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/cdc/workload/libpy3stress-cdc-workload.global.a |45.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/cdc/tests/objcopy_b9fcf9641e3e569e88014f85ff.o |45.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/replication/controller/dst_creator_ut.cpp |45.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_incremental_restore_scan.cpp |45.2%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/protos/services_common.pb.{h, cc} |45.2%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/tests/integration/basic_example/public-sdk-cpp-tests-integration-basic_example |45.2%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/resource_manager.pb.{h, cc} |45.2%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/formats/arrow/protos/ssa.pb.{h, cc} |45.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/datashard/vector_index/large/ydb-tests-datashard-vector_index-large |45.2%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/pq_read/pq_read |45.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/mind/bscontroller/ut_bscontroller/ydb-core-mind-bscontroller-ut_bscontroller |45.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/olap/ttl_tiering/ydb-tests-olap-ttl_tiering |45.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_change_exchange/ydb-core-tx-datashard-ut_change_exchange |45.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/sys_view/partition_stats/ut/ydb-core-sys_view-partition_stats-ut |45.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/mind/bscontroller/ut_selfheal/ydb-core-mind-bscontroller-ut_selfheal |45.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/olap/ydb-tests-olap |45.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/apache/arrow/cpp/src/arrow/python/libpy3src-arrow-python.a |45.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/scan/ydb-core-kqp-ut-scan |45.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/tiering/ut/ydb-core-tx-tiering-ut |45.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/numpy/py3/libpy3python-numpy-py3.global.a |45.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/olap/oom/ydb-tests-olap-oom |45.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_replication/ydb-core-tx-schemeshard-ut_replication |45.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_object_storage_listing/ydb-core-tx-datashard-ut_object_storage_listing |45.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/datastreams/ut/ydb-services-datastreams-ut |45.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/mixedpy/ydb-tests-stress-mixedpy |45.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/health_config.pb.{h, cc} |45.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/ncloud/impl/ut/ydb-library-ncloud-impl-ut |45.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/scheme_board/ut_subscriber/ydb-core-tx-scheme_board-ut_subscriber |45.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/datashard/partitioning/ydb-tests-datashard-partitioning |45.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/hive/ydb-tests-functional-hive |45.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/conveyor_composite/ut/ydb-core-tx-conveyor_composite-ut |45.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/scheme_board/ut_helpers.cpp |45.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/storage.pb.{h, cc} |45.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/providers/dq/runtime/ut/file_cache_ut.cpp |45.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/export.{pb.h ... grpc.pb.h} |45.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/control/immediate_control_board_actor_ut.cpp |45.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/viewer/tests/ydb-core-viewer-tests |45.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/sqs/cloud/ydb-tests-functional-sqs-cloud |45.3%| [PR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/expr_nodes/kqp_expr_nodes.{gen.h ... defs.inl.h} |45.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/clapack/part1/liblibs-clapack-part1.a |45.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/read_actors_factory.pb.{h, cc} |45.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/pending_fetcher.pb.{h, cc} |45.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/persqueue/tests/liblibrary-persqueue-tests.a |45.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/memory_controller/ut/ydb-core-memory_controller-ut |45.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/scheme_board/replica_ut.cpp |45.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/http_proxy/ut/inside_ydb_ut/inside_ydb_ut.cpp |45.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/http_proxy/ut/inside_ydb_ut/objcopy_484246668d943fbae3b476ec7d.o |45.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/aclib/protos/aclib.pb.{h, cc} |45.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/tx_allocator_client/actor_client_ut.cpp |45.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/data_integrity/ydb-core-kqp-ut-data_integrity |45.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/rate_limiter.pb.{h, cc} |45.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_user_attributes/ydb-core-tx-schemeshard-ut_user_attributes |45.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/control_plane_storage.pb.{h, cc} |45.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/tx_allocator_client/ut_helpers.cpp |45.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/fq/plans/ydb-tests-fq-plans |45.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/mind/bscontroller/ut/ydb-core-mind-bscontroller-ut |45.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/scheme/protos/key_range.{pb.h ... grpc.pb.h} |45.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/dread_cache_service/ut/caching_proxy_ut.cpp |45.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_disk_color.{pb.h ... grpc.pb.h} |45.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/http_config.{pb.h ... grpc.pb.h} |45.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/data_integrity_trails.{pb.h ... grpc.pb.h} |45.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_extsubdomain/ydb-core-tx-schemeshard-ut_extsubdomain |45.4%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/replication/ydb-tests-functional-replication |45.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/effects/ydb-core-kqp-ut-effects |45.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/kqp_stats.{pb.h ... grpc.pb.h} |45.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/delete/objcopy_e6184a39b8332c221c5cda3c2f.o |45.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/common/libpy3tests-olap-common.global.a |45.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/limits/objcopy_14c03c6aecffbe39cb01ddf2ed.o |45.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/delete/objcopy_ffc5f76f7501b8251738448541.o |45.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/delete/objcopy_609c2613d8f9c513602350c6a8.o |45.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/client/cancel_tx_ut.cpp |45.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/metadata/secret/ut/ut_secret.cpp |45.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/sql/large/objcopy_27c0687ceeb7ce4ff5e4cea90a.o |45.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/table_creator/table_creator_ut.cpp |45.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/sql/large/objcopy_d68e1e5b762e412afe6a534487.o |45.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/indexes/kqp_indexes_prefixed_vector_ut.cpp |45.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/indexes/kqp_indexes_vector_ut.cpp |45.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/http_proxy/ut/ymq_ut.cpp |45.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/client/locks_ut.cpp |45.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/whiteboard_disk_states.{pb.h ... grpc.pb.h} |45.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/limits/objcopy_40779f0570229cef213050a4fa.o |45.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/persqueue_cluster_discovery/cluster_discovery_service_ut.cpp |45.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/replication/service/table_writer_ut.cpp |45.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/table_stats.{pb.h ... grpc.pb.h} |45.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/sql/large/objcopy_7eab954373d77ffb1fab95ca0d.o |45.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/compile_service_config.{pb.h ... grpc.pb.h} |45.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/draft/ydb_object_storage.pb.{h, cc} |45.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/channel_purpose.{pb.h ... grpc.pb.h} |45.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/client/object_storage_listing_ut.cpp |45.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tablet_counters.{pb.h ... grpc.pb.h} |45.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/memory_stats.{pb.h ... grpc.pb.h} |45.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_query.pb.{h, cc} |45.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/key.{pb.h ... grpc.pb.h} |45.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/activation.pb.{h, cc} |45.5%| [PB] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/file_storage/proto/file_storage.pb.{h, cc} |45.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/scheme_board.{pb.h ... grpc.pb.h} |45.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/ut/ut_with_sdk/topic_ut.cpp |45.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/node_broker.{pb.h ... grpc.pb.h} |45.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_backup/ut_backup.cpp |45.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/pyarrow/libpy3contrib-python-pyarrow.a |45.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/ut/ut_with_sdk/mirrorer_ut.cpp |45.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/ut/ut_with_sdk/balancing_ut.cpp |45.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/ut_fat/blobstorage_node_warden_ut_fat.cpp |45.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/bootstrap.{pb.h ... grpc.pb.h} |45.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/pqconfig.{pb.h ... grpc.pb.h} |45.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/http_proxy/ut/objcopy_5fddfa8f171a3216cad65e02ab.o |45.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/http_proxy/ut/kinesis_ut.cpp |45.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_olap_reboots/ut_olap_reboots.cpp |45.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/indexes/kqp_indexes_multishard_ut.cpp |45.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/limits/objcopy_d52256d4fa9895f38df6030445.o |45.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/config_units.{pb.h ... grpc.pb.h} |45.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/client/flat_ut.cpp |45.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/kqp_physical.{pb.h ... grpc.pb.h} |45.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_schemeshard.{pb.h ... grpc.pb.h} |45.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ydb_convert/compression_ut.cpp |45.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/runtime/kqp_scan_logging_ut.cpp |45.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tenant_pool.{pb.h ... grpc.pb.h} |45.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ydb_convert/table_description_ut.cpp |45.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters.{pb.h ... grpc.pb.h} |45.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/runtime/kqp_re2_ut.cpp |45.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_subdomain/ut_subdomain.cpp |45.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/data_events.{pb.h ... grpc.pb.h} |45.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/runtime/kqp_scan_spilling_ut.cpp |45.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/http_proxy/ut/json_proto_conversion_ut.cpp |45.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ydb_convert/ydb_convert_ut.cpp |45.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/load_test/ut_ycsb.cpp |45.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/drivemodel.{pb.h ... grpc.pb.h} |45.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/ut/ut_with_sdk/commitoffset_ut.cpp |45.5%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/tests/integration/bulk_upsert/ydb-public-sdk-cpp-tests-integration-bulk_upsert |45.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/datashard/dump_restore/ydb-tests-datashard-dump_restore |45.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/scheme_board/ut_monitoring/ydb-core-tx-scheme_board-ut_monitoring |45.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/federated_query/s3/ydb-core-kqp-ut-federated_query-s3 |45.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/ut/ut_with_sdk/autoscaling_ut.cpp |45.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_base3.{pb.h ... grpc.pb.h} |45.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/ymq/actor/cloud_events/cloud_events_ut/ydb-core-ymq-actor-cloud_events-cloud_events_ut |45.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tools/stress_tool/ydb_stress_tool |45.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_extsubdomain_reboots/ydb-core-tx-schemeshard-ut_extsubdomain_reboots |45.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_sequence_reboots/ydb-core-tx-schemeshard-ut_sequence_reboots |45.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/stream.{pb.h ... grpc.pb.h} |45.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/scheme_board/ut_populator/ydb-core-tx-scheme_board-ut_populator |45.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/auth.{pb.h ... grpc.pb.h} |45.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_rtmr/ydb-core-tx-schemeshard-ut_rtmr |45.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tx_columnshard.{pb.h ... grpc.pb.h} |45.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/pg/ydb-core-kqp-ut-pg |45.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/sqs/merge_split_common_table/fifo/functional-sqs-merge_split_common_table-fifo |45.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/security/ldap_auth_provider/ut/ydb-core-security-ldap_auth_provider-ut |45.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tx_proxy.{pb.h ... grpc.pb.h} |45.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/executer_actor/ut/ydb-core-kqp-executer_actor-ut |45.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/tools/dq/worker_node/worker_node |45.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/persqueue_error_codes_v1.pb.{h, cc} |45.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tablet_database.{pb.h ... grpc.pb.h} |45.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/apps/pgwire/pgwire |45.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/yql/tools/yqlrun/yqlrun |45.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/replication/service/ut_topic_reader/ydb-core-tx-replication-service-ut_topic_reader |45.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/tools/dq/service_node/service_node |45.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/dq/runtime/ut/ydb-library-yql-dq-runtime-ut |45.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/fq/restarts/ydb-tests-fq-restarts |45.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tablet_tx.{pb.h ... grpc.pb.h} |45.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_index_build/ydb-core-tx-schemeshard-ut_index_build |45.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/wardens/ydb-tests-functional-wardens |45.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/ext_index/ut/ydb-services-ext_index-ut |45.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/show_create/view/show_create_view |45.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/canonical/ydb-tests-functional-canonical |45.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_donor/ydb-core-blobstorage-ut_blobstorage-ut_donor |45.6%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/kqp/kqp_query_svc/ydb-tests-functional-kqp-kqp_query_svc |45.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/sys_view/ut/ydb-core-sys_view-ut |45.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/table_service_config.{pb.h ... grpc.pb.h} |45.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/grpc_streaming/ut/ydb-core-grpc_streaming-ut |45.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/compatibility/ydb-tests-compatibility |45.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/statistics/service/ut/ydb-core-statistics-service-ut |45.3%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/core/transfer/ut/functional/ydb-core-transfer-ut-functional |45.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/ydb/ut/ydb-services-ydb-ut |45.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_filestore_reboots/ydb-core-tx-schemeshard-ut_filestore_reboots |45.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_kqp_scan/ydb-core-tx-datashard-ut_kqp_scan |45.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/backup/impl/ut_table_writer/ydb-core-backup-impl-ut_table_writer |45.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_write/ydb-core-tx-datashard-ut_write |45.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/autoconfig/ydb-tests-functional-autoconfig |45.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/splitter/ut/ydb-core-tx-columnshard-splitter-ut |45.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/sysview/ydb-core-kqp-ut-sysview |45.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_ttl/ydb-core-tx-schemeshard-ut_ttl |45.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/vector_index/large/objcopy_04f2935f3ada8eb9d01ebaba6b.o |45.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/olap/s3_import/ydb-tests-olap-s3_import |45.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/viewer/ut/ydb-core-viewer-ut |45.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/datashard/vector_index/medium/ydb-tests-datashard-vector_index-medium |45.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_statestorage/core-blobstorage-ut_blobstorage-ut_statestorage |45.5%| [PR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/result/expr_nodes/yql_res_expr_nodes.{gen.h ... defs.inl.h} |45.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_balancing/ydb-core-blobstorage-ut_blobstorage-ut_balancing |45.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/replication/service/ut_worker/ydb-core-tx-replication-service-ut_worker |45.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/skeleton/ut/ydb-core-blobstorage-vdisk-skeleton-ut |45.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/vector_index/large/objcopy_6af7a7ce8a1ee5e67d75a2978a.o |45.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/vector_index/large/objcopy_28f172e1aa977d907bdfa0a81b.o |45.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/config.{pb.h ... grpc.pb.h} |45.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/ttl_tiering/objcopy_0664e2ab2eb37ae9f02538e483.o |45.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/ttl_tiering/objcopy_4b767dce2ddf7a5424aef828d6.o |45.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/objcopy_e872ffee323253a62fe108f2f4.o |45.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_erase_rows/ydb-core-tx-datashard-ut_erase_rows |45.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/tools/dqrun/dqrun |45.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/objcopy_ac3c83156eb65915b12091966a.o |45.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/objcopy_2cc418e8604751e5b8f9029a81.o |45.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_export/ydb-core-tx-schemeshard-ut_export |45.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/ttl_tiering/objcopy_19422d2b60428207055b4ed843.o |45.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_incremental_restore/ydb-core-tx-schemeshard-ut_incremental_restore |45.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_index/ydb-core-tx-schemeshard-ut_index |45.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/oom/objcopy_a0543c2dc30365e9b2ad3d0ca6.o |45.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/sys_view/partition_stats/partition_stats_ut.cpp |45.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/tiering/ut/ut_object.cpp |45.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/oom/objcopy_e0331f455507fe5ac3b71d0537.o |45.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/ydb_issue/proto/issue_id.{pb.h ... grpc.pb.h} |45.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/bscontroller/ut_selfheal/main.cpp |45.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/mixedpy/objcopy_d2e759e2d0ff1243166a3bc7d9.o |45.6%| [PB] {BAZEL_DOWNLOAD} $(B)/contrib/libs/opentelemetry-proto/opentelemetry/proto/trace/v1/trace.{pb.h ... grpc.pb.h} |45.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/bscontroller/ut_bscontroller/main.cpp |45.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/oom/objcopy_df0cb3f315162a3110ee243ecd.o |45.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/tiering/ut/ut_tiers.cpp |45.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/conveyor_composite/ut/ut_simple.cpp |45.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/mixedpy/objcopy_fe9c8c25e6c570097a9d0c06f9.o |45.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/hive/objcopy_48884f6b745ced4d3e78997cb1.o |45.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/indexes/kqp_indexes_ut.cpp |45.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_object_storage_listing.cpp |45.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/ncloud/impl/access_service_ut.cpp |45.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/scan/kqp_split_ut.cpp |45.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/partitioning/objcopy_40226ff8497733c6e798ee3940.o |45.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/partitioning/objcopy_265d7fd505d52534f38ea6fb7f.o |45.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/partitioning/objcopy_a52eb3c900a84eaad86a211549.o |45.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/scheme_board/ut_helpers.cpp |45.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/scheme_board/subscriber_ut.cpp |45.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/hive/objcopy_aebf7c73fcaf6a54715cc177c8.o |45.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/cloud/objcopy_fd8d9957a06c9923c501e36fd9.o |45.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_replication/ut_replication.cpp |45.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/cloud/objcopy_6e0da74b1512d0ffe19c5dc500.o |45.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/viewer/tests/objcopy_af18efc2f04dd1af5ca802c329.o |45.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/viewer/tests/objcopy_87b299e07b15c86f4f50f458ef.o |45.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/cloud/objcopy_8491a772a9425d10f304e6f0e9.o |45.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/viewer/tests/objcopy_f3c323ef80ada193284f036d44.o |45.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/hive/objcopy_5333c1912ecbac0f64ff97551f.o |45.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/ttl_tiering/objcopy_bd8a6d25e26a719f80141d0711.o |45.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/mixedpy/objcopy_51562f83ff52d1ceaac0c36a08.o |45.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/numpy/py3/libpy3python-numpy-py3.a |45.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/scan/kqp_flowcontrol_ut.cpp |45.9%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/plans/5a2f230528097042fdaf726fed_raw.auxcpp |45.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/plans/objcopy_b031a661ba244dffa03ab0c7ec.o |45.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/plans/objcopy_6a5c78aa9f679a0920be5264fe.o |45.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/plans/objcopy_96b8686cd075e874d95d4aa5c5.o |46.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/plans/objcopy_d0255dda539959b69d421868a2.o |45.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/scan/kqp_point_consolidation_ut.cpp |45.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/actors/protos/dq_events.pb.{h, cc} |45.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_user_attributes/ut_user_attributes.cpp |45.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/bscontroller/grouper_ut.cpp |46.0%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sdk/cpp/sdk_credprovider/ydb-tests-functional-sdk-cpp-sdk_credprovider |46.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/bscontroller/mv_object_map_ut.cpp |46.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/bscontroller/ut_selfheal/self_heal_actor_ut.cpp |46.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/memory_controller/memory_controller_ut.cpp |46.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/data_integrity/kqp_data_integrity_trails_ut.cpp |46.0%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/yt/kqp_yt_import/ydb-tests-fq-yt-kqp_yt_import |46.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/effects/kqp_write_ut.cpp |46.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/bscontroller/group_mapper_ut.cpp |46.1%| [PB] {BAZEL_DOWNLOAD} $(B)/yql/essentials/public/issue/protos/issue_severity.pb.{h, cc} |46.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/effects/kqp_effects_ut.cpp |46.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/memory_controller/memtable_collection_ut.cpp |46.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/effects/kqp_inplace_update_ut.cpp |46.1%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/actors/protos/dq_stats.pb.{h, cc} |46.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/dump_restore/objcopy_ce0222bab1634be9f9a52f715d.o |46.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/testlib/s3_recipe_helper/liblibrary-testlib-s3_recipe_helper.a |46.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/dump_restore/objcopy_ec94bbf9004678001f4c8195e3.o |46.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/federated_query/s3/s3_recipe_ut_helpers.cpp |46.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/scan/kqp_scan_ut.cpp |46.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/dump_restore/objcopy_da2669c2228a88c83cd32d45da.o |46.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/effects/kqp_immediate_effects_ut.cpp |46.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_extsubdomain/ut_extsubdomain.cpp |46.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/scheme_board/ut_helpers.cpp |46.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/ut/federated_query/common/libut-federated_query-common.a |46.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/scheme_board/monitoring_ut.cpp |46.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_sequence_reboots/ut_sequence_reboots.cpp |46.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/merge_split_common_table/fifo/objcopy_1574e8a5a6c530c7bfd6378c4d.o |46.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/tools/stress_tool/device_test_tool.cpp |46.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_rtmr/ut_rtmr.cpp |46.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/scheme_board/ut_helpers.cpp |46.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/federated_query/s3/kqp_s3_plan_ut.cpp |46.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/scheme_board/populator_ut.cpp |46.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/security/ldap_auth_provider/ldap_utils_ut.cpp |46.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_change_exchange.cpp |46.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/pg/pg_catalog_ut.cpp |46.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/federated_query/s3/kqp_federated_scheme_ut.cpp |46.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/wardens/objcopy_1555e67a3dd43a3e7f09bf8eee.o |46.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/merge_split_common_table/fifo/objcopy_2aa1916d45dca98014edb3d732.o |46.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/text/libv1-complete-text.a |46.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_extsubdomain_reboots/ut_extsubdomain_reboots.cpp |46.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ymq/actor/cloud_events/cloud_events_ut/cloud_events_ut.cpp |46.2%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/restarts/849c58233edc33539cbeb93a31_raw.auxcpp |46.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/merge_split_common_table/fifo/objcopy_504b845d57f1a23561e970de61.o |46.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/tools/dq/service_node/main.cpp |46.2%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/backup/s3_path_style/ydb-tests-functional-backup-s3_path_style |46.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/syntax/libv1-complete-syntax.a |46.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/name/service/union/libname-service-union.a |46.2%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/backup/ydb-tests-functional-backup |46.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/canonical/objcopy_461999da7ba13deab5689c18ec.o |46.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/canonical/objcopy_17cef60c2dd0eb7ea46181ba87.o |46.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/name/cache/libcomplete-name-cache.a |46.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/federated_query/s3/kqp_federated_query_ut.cpp |46.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/ext_index/ut/ut_ext_index.cpp |46.3%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/tools/solomon_emulator_grpc/solomon_recipe_grpc |46.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/datastreams/datastreams_ut.cpp |46.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/replication.{pb.h ... grpc.pb.h} |46.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/hive.{pb.h ... grpc.pb.h} |46.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/s3_import/objcopy_938861be99a6cedecb22904193.o |46.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/viewer/topic_data_ut.cpp |46.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/s3_import/objcopy_6e536fb2c379a4ebe79c499de8.o |46.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/skeleton/skeleton_oos_logic_ut.cpp |46.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/s3_import/objcopy_1dba5118ef0a485f3bf803be50.o |46.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/balancing.cpp |46.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/base.{pb.h ... grpc.pb.h} |46.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/donor.cpp |46.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/skeleton/skeleton_vpatch_actor_ut.cpp |46.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/yt_job_service/interface/libfmr-yt_job_service-interface.a |46.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/ydb/ydb_query_ut.cpp |46.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/ydb/ydb_monitoring_ut.cpp |46.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/gc_service/interface/libfmr-gc_service-interface.a |46.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/ydb/ydb_import_ut.cpp |46.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/fmr_tool_lib/libyt-fmr-fmr_tool_lib.a |46.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/ydb/ydb_olapstore_ut.cpp |46.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/ydb/ydb_object_storage_ut.cpp |46.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/coordinator/yt_coordinator_service/interface/libcoordinator-yt_coordinator_service-interface.a |46.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_index/ut_vector_index.cpp |46.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_incremental_restore/ut_incremental_restore.cpp |46.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/gateway/fmr/libyt-gateway-fmr.a |46.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/coordinator/yt_coordinator_service/impl/libcoordinator-yt_coordinator_service-impl.a |46.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/ydb/ydb_logstore_ut.cpp |46.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/coordinator/yt_coordinator_service/file/libcoordinator-yt_coordinator_service-file.a |46.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/coordinator/interface/proto_helpers/libcoordinator-interface-proto_helpers.a |46.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/coordinator/interface/libfmr-coordinator-interface.a |46.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/query/liblibrary-workload-query.global.a |46.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/clickbench/liblibrary-workload-clickbench.global.a |46.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/log/liblibrary-workload-log.a |46.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/clickbench/liblibrary-workload-clickbench.a |46.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/benchmarks/gen/tpch-dbgen/libbenchmarks-gen-tpch-dbgen.a |46.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/pg/kqp_pg_ut.cpp |46.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/compatibility/objcopy_9beede1c5ddb1a5202bb8125bf.o |46.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/analysis/yql/libcomplete-analysis-yql.a |46.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/compatibility/objcopy_b7f5600f224f7d7aa608ada59e.o |46.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/library/compatibility/libpy3tests-library-compatibility.global.a |46.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/statistics/service/ut/ut_http_request.cpp |46.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/compatibility/objcopy_81b7879c9cfa37bdcf437f5ff4.o |46.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/name/service/ranking/libname-service-ranking.global.a |46.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/compatibility/objcopy_65ac58c27d43a55d0ea4eda626.o |46.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/name/service/binding/libname-service-binding.a |46.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/sys_view/ut_counters.cpp |46.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/name/service/ranking/libname-service-ranking.a |46.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/name/service/libcomplete-name-service.a |46.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/benchmarks/queries/tpch/libbenchmarks-queries-tpch.global.a |46.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/streams/lz/snappy/libstreams-lz-snappy.a |46.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/streams/lz/lz4/libstreams-lz-lz4.a |46.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/sys_view/ut_common.cpp |46.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/name/service/cluster/libname-service-cluster.a |46.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/ydb/ydb_table_ut.cpp |46.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/streams/factory/open_by_signature/libstreams-factory-open_by_signature.a |46.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/bucket_quoter/liblibrary-cpp-bucket_quoter.a |46.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/core/libv1-complete-core.a |46.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/antlr4/libv1-complete-antlr4.a |46.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/statistics/service/ut/ut_column_statistics.cpp |46.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/patched/replxx/librestricted-patched-replxx.a |46.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/pg_ext/libessentials-core-pg_ext.a |46.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/apps/pgwire/main.cpp |46.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/name/object/simple/static/libobject-simple-static.a |46.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/statistics/service/ut/ut_basic_statistics.cpp |46.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/metrics/libproviders-dq-metrics.a |46.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/backup/libkikimr_backup.a |46.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/url_preprocessing/libessentials-core-url_preprocessing.a |46.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/qplayer/storage/memory/libqplayer-storage-memory.a |46.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/url_lister/libessentials-core-url_lister.a |46.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/qplayer/storage/file/libqplayer-storage-file.a |46.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/analysis/local/libcomplete-analysis-local.a |46.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/arrow_inference/libydb-library-arrow_inference.a |46.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/name/object/libcomplete-name-object.a |46.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/antlr4-c3/libcontrib-libs-antlr4-c3.a |46.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/streams/lz/libcpp-streams-lz.a |46.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/name/cluster/static/libname-cluster-static.a |46.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/formats/arrow/csv/table/libarrow-csv-table.a |46.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/check/libv1-complete-check.a |46.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/streams/factory/open_common/libstreams-factory-open_common.a |46.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/libsql-v1-complete.a |46.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/name/object/simple/libname-object-simple.a |46.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/tpch/liblibrary-workload-tpch.a |46.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/benchmarks/gen/tpcds-dbgen/libbenchmarks-gen-tpcds-dbgen.a |46.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/coordinator/client/libfmr-coordinator-client.a |46.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/benchmarks/gen/tpcds-dbgen/libbenchmarks-gen-tpcds-dbgen.global.a |46.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/benchmark_base/liblibrary-workload-benchmark_base.a |46.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/mixed/liblibrary-workload-mixed.global.a |46.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/mixed/liblibrary-workload-mixed.a |46.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/commands/transfer_workload/libtransfer_workload.a |46.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/commands/interactive/highlight/libcommands-interactive-highlight.a |46.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/tpc_base/liblibrary-workload-tpc_base.global.a |46.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/commands/interactive/highlight/color/libinteractive-highlight-color.a |46.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/stat_visualization/libpublic-lib-stat_visualization.a |46.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/commands/interactive/libydb_cli-commands-interactive.a |46.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/local_gateway/libproviders-dq-local_gateway.a |46.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/commands/interactive/complete/libcommands-interactive-complete.a |46.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/topic/libtopic.a |46.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/tpch/liblibrary-workload-tpch.global.a |46.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/tpcds/liblibrary-workload-tpcds.global.a |46.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/utils/bindings/libyql-utils-bindings.a |46.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/commands/topic_workload/libtopic_workload.a |46.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/utils/actor_system/libyql-utils-actor_system.a |46.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/ftxui/libcontrib-libs-ftxui.a |46.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/monitoring/libsrc-client-monitoring.a |46.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/debug/libsrc-client-debug.a |46.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/import/liblib-ydb_cli-import.a |46.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/cms/libsrc-client-cms.a |46.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/tools/dqrun/dqrun.cpp |46.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/benchmarks/queries/tpcds/libbenchmarks-queries-tpcds.global.a |46.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/coordinator/impl/libfmr-coordinator-impl.global.a |46.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/tools/dqrun/lib/libtools-dqrun-lib.a |46.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/table_data_service/interface/libfmr-table_data_service-interface.a |46.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/sys_view/ut_kqp.cpp |46.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/query/liblibrary-workload-query.a |46.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/tpc_base/liblibrary-workload-tpc_base.a |46.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/job_factory/impl/libfmr-job_factory-impl.a |46.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/gc_service/impl/libfmr-gc_service-impl.a |46.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/dump/liblib-ydb_cli-dump.a |46.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/job/interface/libfmr-job-interface.a |46.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/job/impl/libfmr-job-impl.a |46.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/ydb/ydb_bulk_upsert_olap_ut.cpp |46.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/job_factory/interface/libfmr-job_factory-interface.a |46.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/request_options/proto_helpers/libfmr-request_options-proto_helpers.a |46.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/request_options/libyt-fmr-request_options.a |46.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/coordinator/impl/libfmr-coordinator-impl.a |46.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/antlr_ast/gen/v1_ansi_antlr4/libantlr_ast-gen-v1_ansi_antlr4.a |46.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/antlr_ast/gen/v1_antlr4/libantlr_ast-gen-v1_antlr4.a |46.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/table_data_service/local/interface/libtable_data_service-local-interface.a |46.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/worker/impl/libfmr-worker-impl.a |46.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/table_data_service/local/impl/libtable_data_service-local-impl.a |46.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_erase_rows.cpp |46.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_index/ut_async_index.cpp |46.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/utils/libyt-fmr-utils.a |46.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/yt_job_service/impl/libfmr-yt_job_service-impl.a |46.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/yt_job_service/file/libfmr-yt_job_service-file.a |46.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/yt_url_lister/libyt-lib-yt_url_lister.a |46.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/proto/libyt-fmr-proto.a |46.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/secret_masker/dummy/liblib-secret_masker-dummy.a |46.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/tpcds/liblibrary-workload-tpcds.a |46.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/providers/pq/provider/ut/ydb-library-yql-providers-pq-provider-ut |46.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/olap/column_family/compression/ydb-tests-olap-column_family-compression |46.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_index/ut_unique_index.cpp |46.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/tools/ytrun/lib/libtools-ytrun-lib.a |46.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/tx_proxy/ut_storage_tenant/ydb-core-tx-tx_proxy-ut_storage_tenant |46.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_trace/ydb-core-tx-datashard-ut_trace |46.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kesus/tablet/quoter_performance_test/quoter_performance_test |46.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_topic_splitmerge/ydb-core-tx-schemeshard-ut_topic_splitmerge |46.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/ydb/ydb_ldap_login_ut.cpp |46.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/ydb/ydb_login_ut.cpp |46.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/autoconfig/objcopy_44fac4fe441507735704a000ad.o |46.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/persqueue_v1/ut/ydb-services-persqueue_v1-ut |46.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tablet_flat/benchmark/core_tablet_flat_benchmark |46.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/splitter/ut/batch_slice.cpp |46.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/wrappers/ut/ydb-core-wrappers-ut |46.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/s3_import/objcopy_2d296dfaf373f7f15e6312517a.o |46.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/serverless/ydb-tests-functional-serverless |46.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/ydb/ydb_bulk_upsert_ut.cpp |46.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/tpcc/liblibrary-workload-tpcc.a |46.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_backup_collection/ydb-core-tx-schemeshard-ut_backup_collection |46.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/ydb/ydb_coordination_ut.cpp |46.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_ttl/ut_ttl_utility.cpp |46.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/vector_index/medium/objcopy_71b7c7df3e7853e6e7cd11e484.o |46.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/ydb/ydb_index_table_ut.cpp |46.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/vector_index/medium/objcopy_1583476a2a074be936cf5a393e.o |46.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/sys_view/query_stats/ut/ydb-core-sys_view-query_stats-ut |46.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/tx_proxy/ut_schemereq/ydb-core-tx-tx_proxy-ut_schemereq |46.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/config/ydb-tests-functional-config |46.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_failure_injection/ydb-core-tx-schemeshard-ut_failure_injection |46.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/ydb/ydb_read_rows_ut.cpp |46.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/ut/ydb-core-tx-columnshard-engines-ut |46.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_upload_rows/ydb-core-tx-datashard-ut_upload_rows |46.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_check_integrity/ut_blobstorage-ut_check_integrity |46.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/actors/yt/libdq-actors-yt.a |46.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/viewer/ut/ut_utils.cpp |46.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/actors/ut/ydb-core-fq-libs-actors-ut |46.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/ydb/ydb_register_node_ut.cpp |46.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/opt/ydb-core-kqp-ut-opt |46.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/tx/ydb-core-kqp-ut-tx |46.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/datashard/secondary_index/ydb-tests-datashard-secondary_index |46.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tablet_flat/ut/ydb-core-tablet_flat-ut |46.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/fq/generic/analytics/ydb-tests-fq-generic-analytics |46.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_ttl/ut_ttl.cpp |46.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_export/ut_export.cpp |46.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/cdc/cdc |46.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/replication/service/worker_ut.cpp |46.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/scheme_shard/ydb-tests-functional-scheme_shard |46.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/vector_index/medium/objcopy_cc203073bb2a03b31e52a78f24.o |46.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_column_stats/ydb-core-tx-datashard-ut_column_stats |46.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/balance_coverage/ut/ydb-core-tx-balance_coverage-ut |46.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/autoconfig/objcopy_994fcbd53c4e2174c302bdb5ab.o |46.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/cms/console/ut/ydb-core-cms-console-ut |46.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/statestorage.cpp |46.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/test_connection/ut/ydb-core-fq-libs-test_connection-ut |46.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/with_offset_ranges_mode_ut/with_offset_ranges_mode_ut |46.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_rtmr_reboots/ydb-core-tx-schemeshard-ut_rtmr_reboots |46.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/sysview/kqp_sys_view_ut.cpp |46.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/s3_import/objcopy_a65a4fae8912a32233240d3c51.o |46.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tools/query_replay_yt/query_replay_yt |46.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/olap/load/ydb-tests-olap-load |46.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/health_check/ut/ydb-core-health_check-ut |46.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/tx_proxy/ut_base_tenant/ydb-core-tx-tx_proxy-ut_base_tenant |46.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/apps/ydb/ut/ydb-apps-ydb-ut |46.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/sysview/kqp_sys_col_ut.cpp |46.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/autoconfig/objcopy_7c328c2741f9dd7697a2e0e8b1.o |46.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_base_reboots/ydb-core-tx-schemeshard-ut_base_reboots |46.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/splitter/ut/ut_splitter.cpp |46.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/viewer/viewer_ut.cpp |46.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_write.cpp |46.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/log/liblibrary-workload-log.global.a |46.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/ydb/ydb_stats_ut.cpp |46.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/ydb/ydb_scripting_ut.cpp |46.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/backup/impl/table_writer_ut.cpp |46.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/name/cache/local/libname-cache-local.a |46.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/canonical/objcopy_065e9244d685c2b8f0ab66e414.o |46.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/highlight/libsql-v1-highlight.global.a |46.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/highlight/libsql-v1-highlight.a |46.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/show_create/view/libpy3show_create_view.global.a |46.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/show_create/view/objcopy_9ccdc4f01b578a43bc35d4d519.o |46.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/grpc_streaming/ut/grpc/libgrpc_streaming-ut-grpc.a |46.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/name/service/static/libname-service-static.global.a |46.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/name/object/simple/cached/libobject-simple-cached.a |46.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/sys_view/ut_labeled.cpp |46.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/wardens/objcopy_488333b1ebd4c1d6d8ec5bcb8f.o |46.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/lexer/antlr4_pure/libv1-lexer-antlr4_pure.a |46.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/wardens/objcopy_3db6af291678d4ac330517956a.o |46.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/restarts/objcopy_bf578b7161cc94bf18488d04ca.o |46.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/restarts/objcopy_b8d63b589074145793d63c27a3.o |46.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/proto/dq_transport.pb.{h, cc} |46.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/restarts/objcopy_f928a40774b17a9d6cd7cabd2c.o |46.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/restarts/objcopy_e7477203b27fa0321cf18fd7ee.o |46.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/name/service/static/libname-service-static.a |46.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/lexer/check/libv1-lexer-check.a |47.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/tools/yqlrun/http/libtools-yqlrun-http.a |47.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/lexer/antlr4_pure_ansi/libv1-lexer-antlr4_pure_ansi.a |47.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/dq/runtime/ut/ut_helper.cpp |47.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/reflect/libsql-v1-reflect.global.a |47.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/lexer/regex/libv1-lexer-regex.a |47.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_kqp_scan.cpp |47.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/cbo/simple/libcore-cbo-simple.a |47.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/reflect/libsql-v1-reflect.a |47.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/yt/dq_task_preprocessor/libproviders-yt-dq_task_preprocessor.a |46.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/grpc_streaming/grpc_streaming_ut.cpp |46.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/name/service/schema/libname-service-schema.a |47.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/dq/runtime/dq_output_channel_ut.cpp |47.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/scheduler/libproviders-dq-scheduler.a |47.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/testlib/service_mocks/ldap_mock/libtestlib-service_mocks-ldap_mock.a |47.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/apps/pgwire/pg_ydb_proxy.cpp |47.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/tools/yqlrun/lib/libtools-yqlrun-lib.a |47.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/tools/yql_facade_run/libessentials-tools-yql_facade_run.a |47.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yql/tools/yqlrun/yqlrun.cpp |47.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_filestore_reboots/ut_filestore_reboots.cpp |47.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/security/ldap_auth_provider/ldap_auth_provider_ut.cpp |47.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/apps/pgwire/pgwire.cpp |47.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/replication/service/topic_reader_ut.cpp |47.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/stats_collector/libproviders-dq-stats_collector.a |47.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/apps/pgwire/pg_ydb_connection.cpp |47.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/clickhouse/actors/libproviders-clickhouse-actors.a |47.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/ydb/actors/libproviders-ydb-actors.a |47.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/ydb/comp_nodes/libproviders-ydb-comp_nodes.a |47.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_index_build/ut_index_build.cpp |47.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/tools/dq/worker_node/main.cpp |47.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/commands/libclicommands.a |47.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/dq/runtime/dq_arrow_helpers_ut.cpp |47.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/analysis/global/libcomplete-analysis-global.a |47.1%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/pinger.pb.{h, cc} |47.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_index_build/ut_vector_index_build.cpp |47.1%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/common.pb.{h, cc} |47.1%| [PB] {BAZEL_DOWNLOAD} $(B)/yql/essentials/public/types/yql_types.pb.{h, cc} |47.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/global_worker_manager/libproviders-dq-global_worker_manager.a |47.1%| [PB] {BAZEL_DOWNLOAD} $(B)/yql/essentials/protos/yql_mount.pb.{h, cc} |47.1%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_topic.pb.{h, cc} |47.1%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/fq_config.pb.{h, cc} |47.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/ydb/ydb_ut.cpp |47.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/column_family/compression/objcopy_1ab2a5a6dd84a6c9ff5d5c50b0.o |47.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/providers/pq/provider/ut/yql_pq_ut.cpp |47.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/column_family/compression/objcopy_6887bde1dc99f5c5c2f0922842.o |47.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/service/libproviders-dq-service.a |47.1%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/test_connection.pb.{h, cc} |47.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/serverless/objcopy_e2acb41e7099c0db4fe54a1587.o |47.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/serverless/objcopy_7c81cbfa6b5ce112674cb0a849.o |47.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/testing/gbenchmark/libcpp-testing-gbenchmark.a |47.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/tornado/tornado-4/libpy3python-tornado-tornado-4.a |47.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/serverless/objcopy_cf3971576aced18377e99f5367.o |47.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/persqueue_v1/ut/kqp_mock.cpp |47.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/persqueue_v1/ut/functions_executor_wrapper.cpp |47.1%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/serverless/13360e4ecdf34efe6c3a817a44_raw.auxcpp |47.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/tx_proxy/proxy_ut_helpers.cpp |47.1%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/annotations/sensitive.pb.{h, cc} |47.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/wrappers/s3_wrapper_ut.cpp |47.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/persqueue_v1/ut/pqtablet_mock.cpp |47.1%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/task_controller.pb.{h, cc} |47.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/tx_proxy/storage_tenant_ut.cpp |47.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/persqueue_v1/ut/partition_writer_cache_actor_fixture.cpp |47.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_backup_collection/ut_backup_collection.cpp |47.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/persqueue_v1/ut/partition_writer_cache_actor_ut.cpp |47.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/persqueue_v1/first_class_src_ids_ut.cpp |47.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/tornado/tornado-4/libpy3python-tornado-tornado-4.global.a |47.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/secondary_index/objcopy_b83d9052e0bc89877bbe223294.o |47.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/sys_view/query_stats/query_stats_ut.cpp |47.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_failure_injection/ut_failure_injection.cpp |47.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/persqueue_v1/topic_yql_ut.cpp |47.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/persqueue_v1/persqueue_compat_ut.cpp |47.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/persqueue_v1/ut/demo_tx.cpp |47.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/persqueue_v1/persqueue_common_ut.cpp |47.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/actors/ut/database_resolver_ut.cpp |47.1%| [PR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/expr_nodes/dq_expr_nodes.{gen.h ... defs.inl.h} |47.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/persqueue_v1/ut/rate_limiter_test_setup.cpp |47.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/persqueue_v1/ut/topic_service_ut.cpp |47.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kesus/tablet/ut_helpers.cpp |47.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/cdc/objcopy_7d7339f4588397fc771e31030c.o |47.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/google/benchmark/librestricted-google-benchmark.a |47.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/cdc/libpy3cdc.global.a |47.2%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/generic/connector/api/service/connector.{pb.h ... grpc.pb.h} |47.2%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/node_limits.{pb.h ... grpc.pb.h} |47.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/opt/kqp_sqlin_ut.cpp |47.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/opt/kqp_kv_ut.cpp |47.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/benchmark/b_part.cpp |47.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/tx_proxy/schemereq_ut.cpp |47.2%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/subdomains.{pb.h ... grpc.pb.h} |47.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/scheme_shard/objcopy_8120ef49e7e653ed0601604313.o |47.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_column_stats.cpp |47.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/engines/ut/ut_program.cpp |47.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_part_multi.cpp |47.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/cms/console/log_settings_configurator_ut.cpp |47.2%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/datashard_config.{pb.h ... grpc.pb.h} |47.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/tablet_flat/ut/objcopy_9f29b589555ed64086e5eadccf.o |47.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/cms/console/net_classifier_updater_ut.cpp |47.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/common_ut.cpp |47.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/cms/console/configs_dispatcher_ut.cpp |47.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_pages.cpp |47.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/streaming/libstreaming_udf.global.a |47.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/compress_executor_ut.cpp |47.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/cms/console/configs_cache_ut.cpp |47.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/cms/console/jaeger_tracing_configurator_ut.cpp |47.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_comp_gen.cpp |47.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/test_connection/ut/test_connection_ut.cpp |47.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_rtmr_reboots/ut_rtmr_reboots.cpp |47.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/basic_usage_ut.cpp |47.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/apps/ydb/ut/supported_codecs_fixture.cpp |47.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/load/objcopy_323a17e94d8d570989807d19d3.o |47.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_part.cpp |47.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/apps/ydb/ut/ydb-dump.cpp |47.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/cms/console/modifications_validator_ut.cpp |47.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/cms/console/immediate_controls_configurator_ut.cpp |47.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/kqp.{pb.h ... grpc.pb.h} |47.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/tools/query_replay_yt/query_replay.cpp |47.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/apps/ydb/ut/workload-transfer-topic-to-table.cpp |47.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/netclassifier.{pb.h ... grpc.pb.h} |47.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/tests/unit/client/oauth2_token_exchange/helpers/libclient-oauth2_token_exchange-helpers.a |47.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/apps/ydb/ut/parse_command_line.cpp |47.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/cms/console/feature_flags_configurator_ut.cpp |47.2%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_config.{pb.h ... grpc.pb.h} |47.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/protobuf/libprotobuf_udf.global.a |47.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/opt/kqp_extract_predicate_unpack_ut.cpp |47.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/protobuf_udf/libessentials-minikql-protobuf_udf.a |47.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_db_scheme.cpp |47.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/scheme_shard/objcopy_f93c60b04a0499f2ec6880591a.o |47.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/balance_coverage/balance_coverage_builder_ut.cpp |47.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/retry_policy_ut.cpp |47.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/scheme_shard/objcopy_d3af02c7d57ea2cbbe5d381baa.o |47.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/sdk/cpp/src/client/topic/ut/ydb-public-sdk-cpp-src-client-topic-ut |47.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_index_build_reboots/ydb-core-tx-schemeshard-ut_index_build_reboots |47.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_other.cpp |47.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/storagepoolmon/ut/ydb-core-blobstorage-storagepoolmon-ut |47.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/sdk/cpp/src/client/federated_topic/ut/ydb-public-sdk-cpp-src-client-federated_topic-ut |47.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/cms/console/console_ut_tenants.cpp |47.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/fq/control_plane_storage/ydb-tests-fq-control_plane_storage |47.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_sysview_reboots/ydb-core-tx-schemeshard-ut_sysview_reboots |47.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/tx/kqp_locks_tricky_ut.cpp |47.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_proto.cpp |47.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/generic/analytics/objcopy_b91160bcee04ad1f57e80af064.o |47.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_vdisk_restart/blobstorage-ut_blobstorage-ut_vdisk_restart |47.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_self.cpp |47.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/tx/kqp_locks_ut.cpp |47.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/read_session_ut.cpp |47.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_group_reconfiguration/ut_group_reconfiguration |47.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/generic/analytics/objcopy_1326afc143d720f2af434cd836.o |47.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_datetime.cpp |47.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/generic/analytics/objcopy_1007df29dec27b0b7a1587d49f.o |47.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/service/ydb-core-kqp-ut-service |47.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_sausage.cpp |47.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_pdisk_config.{pb.h ... grpc.pb.h} |47.4%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/generic/analytics/edaf602b2011baa1519a223d63_raw.auxcpp |47.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_charge.cpp |47.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_redo.cpp |47.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/shared_cache_s3fifo_ut.cpp |47.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_compaction.cpp |47.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_compaction_multi.cpp |47.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/tx/kqp_tx_ut.cpp |47.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_slice.cpp |47.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/secondary_index/objcopy_6b62c1db41e3ebd0278a84dced.o |47.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/config/objcopy_ae5b9f6e7a00f305f01a3dde87.o |47.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_data_cleanup.cpp |47.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_external_data_source/ydb-core-tx-schemeshard-ut_external_data_source |47.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/tx/kqp_snapshot_isolation_ut.cpp |47.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_rename_table_column.cpp |47.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_memtable.cpp |47.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/keyvalue/ut_trace/ydb-core-keyvalue-ut_trace |47.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_slice_loader.cpp |47.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/config/objcopy_51fb1403d79c2fadb9d2ea6ce4.o |47.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_decimal.cpp |47.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/fq/pq_async_io/ut/ydb-tests-fq-pq_async_io-ut |47.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_forward.cpp |47.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/opt/kqp_returning_ut.cpp |47.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/opt/kqp_sort_ut.cpp |47.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/ycloud/impl/ut/ydb-library-ycloud-impl-ut |47.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_db_iface.cpp |47.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/cms/console/console_ut_configs.cpp |47.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/persqueue_v1/persqueue_ut.cpp |47.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/fq/yds/ydb-tests-fq-yds |47.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_screen.cpp |47.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kesus/proxy/ut/ydb-core-kesus-proxy-ut |47.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_column_build/ydb-core-tx-schemeshard-ut_column_build |47.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/ydb/table_split_ut/ydb-services-ydb-table_split_ut |47.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_bloom.cpp |47.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_iterator.cpp |47.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/actorlib_impl/ut/ydb-core-actorlib_impl-ut |47.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_data_erasure_reboots/ydb-core-tx-schemeshard-ut_data_erasure_reboots |47.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_pq_reboots/ydb-core-tx-schemeshard-ut_pq_reboots |47.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/serializable/ydb-tests-functional-serializable |47.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/opt/kqp_agg_ut.cpp |47.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/sqs/with_quotas/ydb-tests-functional-sqs-with_quotas |47.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_move_reboots/ydb-core-tx-schemeshard-ut_move_reboots |47.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/metadata/initializer/ut/ydb-services-metadata-initializer-ut |47.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_sequence/ydb-core-tx-schemeshard-ut_sequence |47.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/cms/ut/ydb-services-cms-ut |47.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_versions.cpp |47.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/federated_query/generic_ut/ydb-core-kqp-ut-federated_query-generic_ut |47.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/olap/scenario/ydb-tests-olap-scenario |47.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/apps/ydb/ydb |47.4%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/tools/combiner_perf/libkqp-tools-combiner_perf.a |47.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/rename/ydb-tests-functional-rename |47.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/data/ydb-core-kqp-ut-data |47.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/tx_allocator/ut/ydb-core-tx-tx_allocator-ut |47.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_stats/ydb-core-tx-datashard-ut_stats |47.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_shared_sausagecache.cpp |47.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_stat.cpp |47.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_shared_sausagecache_actor.cpp |47.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/config/objcopy_39cda017c3d5f0e18270b53881.o |46.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/opt/kqp_not_null_ut.cpp |46.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/sqs/large/ydb-tests-functional-sqs-large |46.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/provider/ut/ydb-core-kqp-provider-ut |46.8%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/yt/yt/core/libyt-yt-core.a |46.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/engines/ut/ut_script.cpp |46.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/opt/kqp_ranges_ut.cpp |46.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_serverless_reboots/ydb-core-tx-schemeshard-ut_serverless_reboots |46.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_backup_collection_reboots/tx-schemeshard-ut_backup_collection_reboots |46.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/column_family/compression/objcopy_3bdea7737a87c43bfaa0aaf4c3.o |46.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/tools/combiner_perf/bin/combiner_perf |47.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/nodewarden/ut_sequence/ydb-core-blobstorage-nodewarden-ut_sequence |47.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/engines/ut/helper.cpp |47.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/arrow/ydb-core-kqp-ut-arrow |47.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/load/objcopy_bfb03c74768170a0b82d2bf355.o |47.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_executor_ut.cpp |47.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/apps/ydb/ut/run_ydb.cpp |47.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/apps/ydb/ut/supported_codecs.cpp |47.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kesus/tablet/quoter_performance_test/main.cpp |47.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_topic_splitmerge/ut_topic_splitmerge.cpp |47.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/secondary_index/objcopy_716263ce181e67161f84180281.o |47.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/load/objcopy_347676f1cbc0086a238f181b11.o |47.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_volatile/ydb-core-tx-datashard-ut_volatile |47.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/check_integrity.cpp |47.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/protobuf/yql/libcpp-protobuf-yql.a |47.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_upload_rows.cpp |47.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/tx_proxy/proxy_ut_helpers.cpp |47.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/apps/ydb/ut/workload-topic.cpp |47.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/flat_test_db.cpp |47.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_trace.cpp |47.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/load/objcopy_d78a45708fbb346ab43f2c1bb7.o |47.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/protobuf/dynamic_prototype/libcpp-protobuf-dynamic_prototype.a |47.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_base_reboots/ut_base_reboots.cpp |47.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/opt/kqp_merge_ut.cpp |47.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/tools/query_replay_yt/query_compiler.cpp |47.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/file/libfile_udf.global.a |47.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/tools/query_replay_yt/main.cpp |47.2%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/quotas_manager.pb.{h, cc} |47.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_executor_gclogic_ut.cpp |47.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/tx/kqp_sink_tx_ut.cpp |47.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_row_versions_ut.cpp |47.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/tx/kqp_sink_mvcc_ut.cpp |47.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/shared_handle_ut.cpp |47.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/tx/kqp_sink_locks_ut.cpp |47.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/tx_proxy/proxy_ut.cpp |47.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/tx/kqp_mvcc_ut.cpp |47.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_btree_index_iter_charge.cpp |47.3%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/kqp/tools/combiner_perf/simple_block.cpp |47.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_executor_leases_ut.cpp |47.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/shared_cache_clock_pro_ut.cpp |47.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/opt/kqp_named_expressions_ut.cpp |47.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_range_cache_ut.cpp |47.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_clickhouse_internal.pb.{h, cc} |47.5%| [PR] {BAZEL_DOWNLOAD} $(B)/ydb/core/base/generated/runtime_feature_flags.h |47.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_table_part_ut.cpp |47.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_executor_database_ut.cpp |47.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_cxx_database_ut.cpp |47.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/shared_cache_switchable_ut.cpp |47.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/checkpoint_coordinator.pb.{h, cc} |47.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_btree_index_nodes.cpp |47.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/config/protos/marker.pb.{h, cc} |47.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_federation_discovery.pb.{h, cc} |47.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_discovery.pb.{h, cc} |47.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/formats/arrow/protos/fields.pb.{h, cc} |47.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/opt/kqp_ne_ut.cpp |47.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/health_check/health_check_ut.cpp |47.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/storagepoolmon/ut/storagepoolmon_ut.cpp |47.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/topic/ut/objcopy_1406195445f45d950dda89fcd8.o |47.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_index_build_reboots/ut_index_build_reboots.cpp |47.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/tests/fq/control_plane_storage/in_memory_control_plane_storage_ut.cpp |47.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/src/client/topic/ut/basic_usage_ut.cpp |47.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/vdisk_restart.cpp |47.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/tests/fq/control_plane_storage/ydb_control_plane_storage_ut.cpp |47.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/src/client/topic/ut/local_partition_ut.cpp |47.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/src/client/topic/ut/describe_topic_ut.cpp |47.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/tests/fq/control_plane_storage/ydb_control_plane_storage_quotas_ut.cpp |47.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/service/kqp_document_api_ut.cpp |47.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_sysview_reboots/ut_sysview_reboots.cpp |47.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/whiteboard_flags.{pb.h ... grpc.pb.h} |47.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/fqrun/src/libtools-fqrun-src.a |47.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/workload_manager_config.{pb.h ... grpc.pb.h} |47.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/tests/fq/pq_async_io/ut/dq_pq_write_actor_ut.cpp |47.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/service/kqp_service_ut.cpp |47.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/src/client/federated_topic/ut/basic_usage_ut.cpp |47.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/keyvalue/keyvalue_ut_trace.cpp |47.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/tests/fq/control_plane_storage/ydb_control_plane_storage_bindings_permissions_ut.cpp |47.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/race.cpp |47.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/tests/fq/pq_async_io/ut/dq_pq_read_actor_ut.cpp |47.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/ycloud/impl/access_service_ut.cpp |47.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/tests/fq/control_plane_storage/ydb_control_plane_storage_connections_permissions_ut.cpp |47.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_external_data_source/ut_external_data_source.cpp |47.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/ycloud/impl/service_account_service_ut.cpp |47.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/ycloud/impl/user_account_service_ut.cpp |47.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/service/kqp_qs_scripts_ut.cpp |47.8%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/yds/38dcacd12926621ca72e30ce1b_raw.auxcpp |47.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/ycloud/impl/folder_service_ut.cpp |47.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/yds/objcopy_4fdbe64ce62f955927d10364b5.o |47.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/yds/objcopy_c5a20cdd9533abc10e82efdd1a.o |47.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/yds/objcopy_6b8c453743f8fd2c5380af70c6.o |47.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/tests/fq/pq_async_io/ut/dq_pq_rd_read_actor_ut.cpp |47.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/src/client/topic/ut/topic_to_table_ut.cpp |47.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/yds/objcopy_1339ee5ef04af3a5a49d43a6c9.o |47.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/tests/fq/control_plane_storage/ydb_control_plane_storage_connections_ut.cpp |47.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kesus/proxy/proxy_actor_ut.cpp |47.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kesus/proxy/ut_helpers.cpp |47.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_data_erasure_reboots/ut_data_erasure_reboots.cpp |47.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/actorlib_impl/actor_bootstrapped_ut.cpp |47.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/actorlib_impl/actor_tracker_ut.cpp |47.9%| [PB] {BAZEL_DOWNLOAD} $(B)/contrib/libs/opentelemetry-proto/opentelemetry/proto/resource/v1/resource.{pb.h ... grpc.pb.h} |47.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_column_build/ut_column_build.cpp |47.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/actorlib_impl/test_protocols_ut.cpp |47.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/tests/fq/control_plane_storage/ydb_control_plane_storage_bindings_ut.cpp |47.9%| [PB] {BAZEL_DOWNLOAD} $(B)/library/cpp/lwtrace/protos/lwtrace.pb.{h, cc} |47.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/ydb/ydb_table_split_ut.cpp |47.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_move_reboots/ut_move_reboots.cpp |47.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/tests/fq/control_plane_storage/ydb_control_plane_storage_queries_permissions_ut.cpp |47.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/metadata/initializer/ut/ut_init.cpp |47.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/federated_query/generic_ut/iceberg_ut_data.cpp |47.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/rename/objcopy_5865a174a6c25ca1a2d6386702.o |47.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/Flask-Cors/py3/libpy3python-Flask-Cors-py3.global.a |47.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/tests/fq/control_plane_storage/ydb_control_plane_storage_queries_ut.cpp |47.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_sequence/ut_sequence.cpp |47.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/actorlib_impl/test_interconnect_ut.cpp |47.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/tools/combiner_perf/subprocess.cpp |47.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/tools/combiner_perf/printout.cpp |47.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/scenario/objcopy_36807918bd7a86c1ea37310c9c.o |47.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/tools/combiner_perf/converters.cpp |47.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/scenario/objcopy_0ab925f82bbba07bf3b749dc3c.o |47.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/cms/cms_ut.cpp |47.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/rename/objcopy_00c87b13e2f685811a9825079d.o |47.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/tools/combiner_perf/simple.cpp |47.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/tests/fq/control_plane_storage/ydb_control_plane_storage_internal_ut.cpp |47.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/rename/objcopy_c02c3d9f840d02af9fad858a55.o |47.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage.{pb.h ... grpc.pb.h} |48.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/tools/combiner_perf/simple_last.cpp |48.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/attributes_stripper.cpp |48.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_vdisk_config.{pb.h ... grpc.pb.h} |48.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/dwarf_backtrace/registry/libcpp-dwarf_backtrace-registry.global.a |47.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/federated_query/generic_ut/kqp_generic_provider_ut.cpp |48.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/config.cpp |48.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/cms.{pb.h ... grpc.pb.h} |48.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/tools/combiner_perf/tpch_last.cpp |48.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/attribute_filter.cpp |48.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/throttling_channel.cpp |48.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ypath/token.cpp |48.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/tools/combiner_perf/streams.cpp |48.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/serialized_channel.cpp |48.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/retrying_channel.cpp |48.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/arrow/kqp_types_arrow_ut.cpp |48.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/arrow/kqp_arrow_in_channels_ut.cpp |48.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tracing.{pb.h ... grpc.pb.h} |48.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/overload_controlling_service_base.cpp |48.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tenant_slot_broker.{pb.h ... grpc.pb.h} |48.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/roaming_channel.cpp |48.0%| [PB] {BAZEL_DOWNLOAD} $(B)/contrib/libs/opentelemetry-proto/opentelemetry/proto/common/v1/common.{pb.h ... grpc.pb.h} |48.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/peer_discovery.cpp |48.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/response_keeper.cpp |48.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_common_pq.cpp |48.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/stream.cpp |48.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/server_detail.cpp |48.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/local_server.cpp |48.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/hedging_channel.cpp |48.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/dispatcher.cpp |48.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/phoenix/type_def.cpp |48.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/service_detail.cpp |48.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/phoenix/type_registry.cpp |48.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/statistic_path.cpp |48.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/ref_counted_tracker_statistics_producer.cpp |48.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/serialize.cpp |48.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/local_channel.cpp |48.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/net/dialer.cpp |48.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/ref_counted_tracker_profiler.cpp |48.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/pattern_formatter.cpp |48.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/ref_counted_tracker.cpp |48.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/hazard_ptr.cpp |48.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/proc.cpp |48.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/helpers.cpp |48.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/bloom_filter.cpp |48.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/service/kqp_qs_queries_ut.cpp |48.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/bitmap.cpp |48.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/null_channel.cpp |48.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/adjusted_exponential_moving_average.cpp |48.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/message.cpp |48.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/per_key_request_queue_provider.cpp |48.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/logging/stream_log_writer.cpp |48.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/message_format.cpp |48.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/public.cpp |48.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/protocol_version.cpp |48.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/bus/tcp/packet.cpp |48.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/actions/codicil_guarded_invoker.cpp |48.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/arithmetic_formula.cpp |48.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/actions/future.cpp |48.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/bus/tcp/client.cpp |48.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/request_queue_provider.cpp |48.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/large/objcopy_5f161468ff5322b803d4d0dc79.o |48.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/actions/cancelation_token.cpp |48.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/actions/current_invoker.cpp |48.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/bus/tcp/dispatcher.cpp |48.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/compression/brotli.cpp |48.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/async_stream.cpp |48.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/compression/bzip2.cpp |48.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/static_channel_factory.cpp |48.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/bus/tcp/local_bypass.cpp |48.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/service.cpp |48.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/overload_controller.cpp |48.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/provider/read_attributes_utils_ut.cpp |48.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/compression/lz.cpp |48.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/bus/tcp/dispatcher_impl.cpp |48.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/compression/lzma.cpp |48.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/provider/yql_kikimr_provider_ut.cpp |48.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/async_rw_lock.cpp |48.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/compression/zlib.cpp |48.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ypath/helpers.cpp |48.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/threading/thread.cpp |48.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/nodewarden/ut_sequence/dsproxy_config_retrieval.cpp |48.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_volatile.cpp |48.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/provider/yql_kikimr_gateway_ut.cpp |48.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/compression/stream.cpp |48.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/service_discovery/service_discovery.cpp |48.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/async_looper.cpp |48.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/async_semaphore.cpp |48.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/threading/spin_wait_slow_path_logger.cpp |48.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/utilex/random.cpp |48.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/viable_peer_registry.cpp |48.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/tracing/public.cpp |48.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/tracing/allocation_tags.cpp |48.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/delayed_executor.cpp |48.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/fiber_manager.cpp |48.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/async_consumer.cpp |48.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/attribute_consumer.cpp |48.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ypath/stack.cpp |48.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/fair_share_invoker_queue.cpp |48.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/fls.cpp |48.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/fair_share_thread_pool.cpp |48.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/tools/combiner_perf/bin/main.cpp |48.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/config.cpp |48.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/invoker_alarm.cpp |48.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/fiber_scheduler_thread.cpp |48.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ypath/tokenizer.cpp |48.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/lease_manager.cpp |48.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/periodic_yielder.cpp |48.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/notify_manager.cpp |48.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/propagating_storage.cpp |48.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/async_writer.cpp |48.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/null_consumer.cpp |48.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/periodic_executor.cpp |48.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/attribute_consumer.cpp |48.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/retrying_periodic_executor.cpp |48.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/forwarding_consumer.cpp |48.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/token.cpp |48.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/consumer.cpp |48.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/system_invokers.cpp |48.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/suspendable_action_queue.cpp |48.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/depth_limiting_yson_consumer.cpp |48.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/thread_pool.cpp |48.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/thread_pool_detail.cpp |48.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/producer.cpp |48.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/lexer.cpp |48.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/crypto/config.cpp |48.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/dns/dns_resolver.cpp |48.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/throughput_throttler.cpp |48.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/stream.cpp |48.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/parser.cpp |48.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/protobuf_interop_options.cpp |48.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/crypto/tls.cpp |48.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/pull_parser.cpp |48.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/json/json_parser.cpp |48.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/protobuf_helpers.cpp |48.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/list_verb_lazy_yson_consumer.cpp |48.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/logging/fluent_log.cpp |48.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/json/json_writer.cpp |48.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/logging/compression.cpp |48.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/logging/formatter.cpp |48.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/logging/serializable_logger.cpp |48.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/logging/random_access_gzip.cpp |48.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/logging/stream_output.cpp |48.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/protobuf_interop_unknown_fields.cpp |48.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/logging/logger_owner.cpp |48.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/logging/zstd_compression.cpp |48.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/blob_output.cpp |48.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/pull_parser_deserialize.cpp |48.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/checksum.cpp |48.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytalloc/statistics_producer.cpp |48.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/coro_pipe.cpp |48.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/string_builder_stream.cpp |48.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/writer.cpp |48.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/fair_share_hierarchical_queue.cpp |48.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/string_merger.cpp |48.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/string.cpp |48.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/error.cpp |48.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/config.cpp |48.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/cache_config.cpp |48.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/linear_probe.cpp |48.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/histogram.cpp |48.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/string_filter.cpp |48.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/fs.cpp |48.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/tokenizer.cpp |48.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/protobuf_interop.cpp |48.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/token_writer.cpp |48.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/protobuf_helpers.cpp |48.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/random.cpp |48.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/public.cpp |48.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/syntax_checker.cpp |48.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/serialize_dump.cpp |48.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/utf8_decoder.cpp |48.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/zerocopy_output_writer.cpp |48.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/slab_allocator.cpp |48.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/ypath_designated_consumer.cpp |48.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/yson_builder.cpp |48.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/yson/ypath_filtering_consumer.cpp |48.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytalloc/config.cpp |48.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/net/address.cpp |48.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/net/socket.cpp |48.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytalloc/bindings.cpp |48.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/net/connection.cpp |48.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/net/public.cpp |48.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/phoenix/load.cpp |48.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/phoenix/context.cpp |48.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/actions/invoker_detail.cpp |48.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/profiling/timing.cpp |48.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/actions/invoker_util.cpp |48.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/phoenix/schemas.cpp |48.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/authenticator.cpp |48.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/bus/public.cpp |48.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/compression/snappy.cpp |48.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/actions/invoker_pool.cpp |48.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/ephemeral_attribute_owner.cpp |48.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/interned_attributes.cpp |48.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/size.cpp |48.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/attributes.cpp |48.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/caching_channel_factory.cpp |48.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/convert.cpp |48.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/compression/dictionary_codec.cpp |48.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/bus/tcp/connection.cpp |48.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_backup_collection_reboots/ut_backup_collection_reboots.cpp |48.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/exception_helpers.cpp |48.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/compression/codec.cpp |48.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/compression/public.cpp |48.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/bus/tcp/server.cpp |48.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/async_barrier.cpp |48.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/bus/tcp/config.cpp |48.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/compression/zstd.cpp |48.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/node.cpp |48.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/request_complexity_limits.cpp |48.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/helpers.cpp |48.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/action_queue.cpp |48.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/coroutine.cpp |48.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/request_complexity_limiter.cpp |48.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/ephemeral_node_factory.cpp |48.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/execution_stack.cpp |48.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_serverless_reboots/ut_serverless_reboots.cpp |48.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/fair_share_queue_scheduler_thread.cpp |48.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/fair_share_invoker_pool.cpp |48.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/serialize.cpp |48.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/async_stream_pipe.cpp |48.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/ypath_resolver.cpp |48.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/fiber.cpp |48.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/fair_share_action_queue.cpp |48.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/tree_visitor.cpp |48.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/system_attribute_provider.cpp |48.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/static_service_dispatcher.cpp |48.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/tree_builder.cpp |48.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/node_detail.cpp |48.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/nonblocking_batcher.cpp |48.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/profiling_helpers.cpp |48.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/invoker_queue.cpp |48.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/scheduler_thread.cpp |48.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/service_combiner.cpp |48.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/pollable_detail.cpp |48.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/single_queue_scheduler_thread.cpp |48.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/fair_throttler.cpp |48.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/scheduled_executor.cpp |48.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/permission.cpp |48.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/quantized_executor.cpp |48.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/ypath_service.cpp |48.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/yql/essentials/tools/sql2yql/sql2yql |48.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_restart_pdisk/blobstorage-ut_blobstorage-ut_restart_pdisk |48.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/yson_struct.cpp |48.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/parser_helpers.cpp |48.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/thread_affinity.cpp |48.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/yson_struct_update.cpp |48.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/sql/ydb-tests-sql |48.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_login/ydb-core-tx-schemeshard-ut_login |48.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/yson_struct_detail.cpp |48.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_continuous_backup/ydb-core-tx-schemeshard-ut_continuous_backup |48.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kafka_proxy/ut/ydb-core-kafka_proxy-ut |48.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/ypath_detail.cpp |48.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/virtual.cpp |48.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/thread_pool_poller.cpp |48.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_external_table_reboots/ydb-core-tx-schemeshard-ut_external_table_reboots |48.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/dns/config.cpp |48.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/crypto/crypto.cpp |48.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_reboots/ydb-core-tx-schemeshard-ut_reboots |48.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/json/helpers.cpp |48.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/federated_query/large_results/ydb-core-kqp-ut-federated_query-large_results |48.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/rename/objcopy_5db899a01c2ec6f53648af6840.o |48.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_kqp/ydb-core-tx-datashard-ut_kqp |48.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/concurrency/two_level_fair_share_thread_pool.cpp |48.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/ytree/ypath_client.cpp |48.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/cms/ut/ydb-core-cms-ut |48.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/ydb-public-sdk-cpp-src-client-persqueue_public-ut |48.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/ydb/backup_ut/ydb-services-ydb-backup_ut |48.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/json/json_callbacks.cpp |48.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/large/objcopy_422ca1effff14e5a08952658d0.o |48.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/large/objcopy_8ac5034640eee44b1cd5fa5253.o |48.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/tx_allocator/txallocator_ut.cpp |48.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_stats/ydb-core-tx-schemeshard-ut_stats |48.5%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/dns/ares_dns_resolver.cpp |48.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/dsproxy/ut/ydb-core-blobstorage-dsproxy-ut |48.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/olap/data_quotas/ydb-tests-olap-data_quotas |48.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/apps/etcd_proxy/etcd_proxy |48.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_cdc_stream_reboots/ydb-core-tx-schemeshard-ut_cdc_stream_reboots |48.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/json/config.cpp |48.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_read_table/ydb-core-tx-datashard-ut_read_table |48.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/actions/cancelable_context.cpp |48.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/rename/objcopy_bfa810e70cd1de18c5d4a18a62.o |48.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/logging/log_writer_detail.cpp |48.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/oltp_workload/oltp_workload |48.7%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/rename/dc048c91e67372877fc6ad2dfc_raw.auxcpp |48.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_read_iterator/ydb-core-tx-datashard-ut_read_iterator |48.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_snapshot/ydb-core-tx-datashard-ut_snapshot |48.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/fq/mem_alloc/ydb-tests-fq-mem_alloc |48.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/datashard/async_replication/ydb-tests-datashard-async_replication |48.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/logging/file_log_writer.cpp |48.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/data/kqp_read_null_ut.cpp |48.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/logging/system_log_event_provider.cpp |48.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/replication/ydb_proxy/ut/ydb-core-tx-replication-ydb_proxy-ut |48.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/datashard/dml/ydb-tests-datashard-dml |48.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/backoff_strategy.cpp |48.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/tx_allocator/txallocator_ut_helpers.cpp |48.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/bit_packed_unsigned_vector.cpp |48.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/example/ydb-tests-example |48.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/apps/ydb/objcopy_774cbd1f10ee287899289ecb3f.o |48.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/keyvalue/ut/ydb-core-keyvalue-ut |48.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/apps/ydb/main.cpp |48.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/bit_packing.cpp |48.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/tools/combiner_perf/factories.cpp |48.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/base/ut_board_subscriber/ydb-core-base-ut_board_subscriber |48.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/apps/ydb/commands/libcommands.a |48.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/sequenceshard/ut/ydb-core-tx-sequenceshard-ut |48.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/scenario/objcopy_656baae3c1e24959f5bcc457d7.o |48.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_subdomain_reboots/ydb-core-tx-schemeshard-ut_subdomain_reboots |48.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/backup/impl/ut_local_partition_reader/ydb-core-backup-impl-ut_local_partition_reader |48.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/tx_proxy/ut_encrypted_storage/ydb-core-tx-tx_proxy-ut_encrypted_storage |48.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/configurable_singleton_def.cpp |48.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/digest.cpp |48.6%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/scenario/709f125727d9ea4165df516509_raw.auxcpp |48.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/codicil.cpp |48.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/api/ydb-tests-functional-api |48.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/tenants/ydb-tests-functional-tenants |48.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_osiris/ydb-core-blobstorage-ut_blobstorage-ut_osiris |48.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_stats.cpp |48.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/ut_rw/ydb-core-tx-columnshard-ut_rw |48.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/library/yql/dq/comp_nodes/dq_hash_operator_serdes.cpp |48.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/perf/ydb-core-kqp-ut-perf |48.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/hedging_manager.cpp |48.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/mind/address_classification/ut/ydb-core-mind-address_classification-ut |48.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/scenario/objcopy_5992d4831c5055a481712a2a80.o |48.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_change_collector/ydb-core-tx-datashard-ut_change_collector |48.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/id_generator.cpp |48.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/providers/s3/actors/ut/ydb-library-yql-providers-s3-actors-ut |48.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/mind/hive/ut/ydb-core-mind-hive-ut |48.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/process_exit_profiler.cpp |48.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/persqueue_v1/ut/new_schemecache_ut/ydb-services-persqueue_v1-ut-new_schemecache_ut |48.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/memory_usage_tracker.cpp |48.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/serializable/objcopy_445797246443360525d31550d1.o |48.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/query/ydb-core-kqp-ut-query |48.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_base/ydb-core-tx-schemeshard-ut_base |48.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/fq/s3/ydb-tests-fq-s3 |48.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/pool_allocator.cpp |48.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/ydb_serializable/lib/libpy3tools-ydb_serializable-lib.global.a |48.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/library/serializability/libpy3tests-library-serializability.global.a |48.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/with_quotas/objcopy_7648c2519d02b8456f762efc4b.o |48.7%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/library/yql/dq/comp_nodes/dq_program_builder.cpp |48.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/with_quotas/objcopy_31d605682329607481eb568ed0.o |48.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/with_quotas/objcopy_245adf3e28f56e6467e034d9f2.o |48.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/relaxed_mpsc_queue.cpp |48.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/logging/config.cpp |48.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/shutdown.cpp |48.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/serializable/objcopy_3ea8aa67e7c24c4f0e3b0406b9.o |48.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/misc/statistics.cpp |48.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/serializable/objcopy_51b071d7746089933668451b33.o |48.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/net/local_address.cpp |48.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/yds/objcopy_7a185a4b35de7733fde931d298.o |48.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/yds/objcopy_fdd48fc620c42f480ae38b77f5.o |48.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/net/helpers.cpp |48.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/net/listener.cpp |48.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_pq_reboots/ut_pq_reboots.cpp |48.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/net/config.cpp |48.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/yds/objcopy_b08299d456f3448b368e814cb8.o |48.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/yds/objcopy_dae5a42f53b4f98bf1b9fd8118.o |48.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/actorlib_impl/actor_activity_ut.cpp |48.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/yds/objcopy_e3bb1c534d69f237b55dd8dfe7.o |48.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/authentication_identity.cpp |48.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/phoenix/descriptors.cpp |48.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/bus/server.cpp |48.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/balancing_channel.cpp |48.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/dynamic_channel_pool.cpp |48.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/channel_detail.cpp |48.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/client.cpp |48.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/bus/channel.cpp |48.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/private_api.pb.{h, cc} |48.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/common/protos/blob_range.pb.{h, cc} |48.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_persqueue_cluster_discovery.pb.{h, cc} |48.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_cms.pb.{h, cc} |48.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/generic/connector/api/service/protos/connector.pb.{h, cc} |48.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/core/rpc/config.cpp |48.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/actors/protos/dq_status_codes.pb.{h, cc} |48.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/sql/objcopy_2f0e0ac8198858b9ec9901778e.o |48.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/sql/objcopy_83efacabe56767ae4f106a6d27.o |48.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yql/essentials/tools/sql2yql/sql2yql.cpp |48.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/sql/objcopy_f738234258cd034cd5383f92ad.o |48.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_continuous_backup/ut_continuous_backup.cpp |48.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/restart_pdisk.cpp |48.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kafka_proxy/ut/ut_kafka_functions.cpp |48.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_external_table_reboots/ut_external_table_reboots.cpp |48.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/federated_query/large_results/kqp_scriptexec_results_ut.cpp |48.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/common_ut.cpp |48.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kafka_proxy/ut/metarequest_ut.cpp |48.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/data_quotas/objcopy_4b2e093abff756c97b675c0a31.o |48.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/apps/etcd_proxy/main.cpp |48.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/data_quotas/objcopy_89b3e69f7cdba68b4eefcae48c.o |48.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_kqp_stream_lookup.cpp |48.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/apps/etcd_proxy/service/libapps-etcd_proxy-service.global.a |48.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kafka_proxy/ut/kafka_test_client.cpp |48.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/cms/ut_helpers.cpp |48.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kafka_proxy/ut/ut_serialization.cpp |48.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_kqp.cpp |48.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/draft/persqueue_common.pb.{h, cc} |48.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/cms/cms_tenants_ut.cpp |48.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kafka_proxy/ut/ut_produce_actor.cpp |49.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/async_replication/objcopy_08a4b5d38a76e21591db0c3424.o |48.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/async_replication/objcopy_e2637cea0f2e4db109b364a246.o |48.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/cms/cms_maintenance_api_ut.cpp |48.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/data_quotas/objcopy_a6e393b6d53f4c73feac80b55c.o |48.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kafka_proxy/ut/actors_ut.cpp |48.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/async_replication/objcopy_f4b44a5d280d0f27f5ffd278e8.o |48.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/oltp_workload/objcopy_bcf2142e31bf537964dc063d11.o |48.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_reboots/ut_reboots.cpp |48.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/mem_alloc/objcopy_cee1e02beaf827051149b5ca30.o |48.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/dsproxy/ut/dsproxy_discover_ut.cpp |48.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/mem_alloc/objcopy_15e284a8ecb30c90903e842e70.o |48.9%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/mem_alloc/objcopy_12d01741952bd4afa836364d84.o |49.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/dml/objcopy_9314464e3560b2511ac931acd9.o |49.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kafka_proxy/ut/ut_transaction_actor.cpp |49.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/ydb/backup_ut/list_objects_in_s3_export_ut.cpp |49.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/cms/cluster_info_ut.cpp |49.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/dml/objcopy_8db6616d40f8020d0632222fe3.o |49.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/apps/etcd_proxy/proxy.cpp |49.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/example/objcopy_e0aef87c4bf15cfdc957f4bdd1.o |49.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_disk.{pb.h ... grpc.pb.h} |49.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_formats.pb.{h, cc} |49.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/example/objcopy_2b682e146a665bfa19210b0fd9.o |49.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/oltp_workload/libpy3oltp_workload.global.a |49.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/dml/objcopy_8fca143a218b930f297b779e3a.o |49.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/example/objcopy_c623700776b43ee95ec93c56f9.o |49.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_base.{pb.h ... grpc.pb.h} |49.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/replication/ydb_proxy/partition_end_watcher_ut.cpp |49.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/keyvalue/keyvalue_collector_ut.cpp |49.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/ydb/backup_ut/backup_path_ut.cpp |49.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_read_table.cpp |49.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/cms/cms_ut_common.cpp |49.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_cdc_stream_reboots/ut_cdc_stream_reboots.cpp |49.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/alloc.{pb.h ... grpc.pb.h} |49.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_login/ut_login.cpp |49.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/dsproxy/ut/dsproxy_patch_ut.cpp |49.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/api/objcopy_253d734e8c901d319d84fcc6e9.o |49.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kafka_proxy/ut/ut_transaction_coordinator.cpp |49.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/base/board_subscriber_ut.cpp |49.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/dsproxy/ut/dsproxy_put_ut.cpp |49.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/draft/dummy.{pb.h ... grpc.pb.h} |49.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/tenants/objcopy_951c70889c9404d1662da27090.o |49.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/api/objcopy_363b5875cc5c5e5745458b16b8.o |49.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/backup/impl/local_partition_reader_ut.cpp |49.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/api/objcopy_303f7409bfab4277e367bbd11a.o |49.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/sequenceshard/ut_helpers.cpp |49.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/tx_proxy/encrypted_storage_ut.cpp |49.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/tenants/aae788a890ddcb1702c659c8aa_raw.auxcpp |49.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_subdomain_reboots/ut_subdomain_reboots.cpp |49.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/cms/cms_ut.cpp |49.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/api/objcopy_e2a089b95d9316f6e26025d3e3.o |49.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/keyvalue/keyvalue_storage_read_request_ut.cpp |49.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/sequenceshard/ut_sequenceshard.cpp |49.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/perf/kqp_workload_ut.cpp |49.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/tx_proxy/proxy_ut_helpers.cpp |49.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_read_iterator_ext_blobs.cpp |49.1%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tx_scheme.{pb.h ... grpc.pb.h} |49.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/s3/objcopy_ce073e3cc612363936bdd04210.o |49.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_base/ut_table_pg_types.cpp |49.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/query/kqp_analyze_ut.cpp |49.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/s3/objcopy_c43ce24509a50b033fa4050a33.o |49.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/s3/objcopy_03f75cad4510fd9d018635026c.o |49.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/s3/objcopy_64bde13108f9284b2e9f0bbb7a.o |49.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/s3/objcopy_dc1e8788b8287c02880cfe2814.o |49.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/bind_channel_storage_pool.{pb.h ... grpc.pb.h} |49.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/replication/ydb_proxy/ydb_proxy_ut.cpp |49.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_base/ut_commit_redo_limit.cpp |49.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/address_classification/net_classifier_ut.cpp |49.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/s3/objcopy_0c451aebc6dafbdf0d9da2ab02.o |49.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/bridge/ydb-tests-functional-bridge |49.0%| [CC] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/s3/c664ef6ca80e747b410e1da324_raw.auxcpp |49.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_read_only_vdisk/ut_blobstorage-ut_read_only_vdisk |49.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/apps/etcd_proxy/proto/libetcd-grpc.a |49.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_sysview/ydb-core-tx-schemeshard-ut_sysview |49.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/hive/object_distribution_ut.cpp |49.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/datashard/copy_table/ydb-tests-datashard-copy_table |49.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/s3/objcopy_6cfba3dbee97ec121b2f346459.o |49.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/workload_service/ut/ydb-core-kqp-workload_service-ut |49.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/s3/objcopy_52d3e6a0651990fc997ab40ba2.o |49.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_base/ut_info_types.cpp |49.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/query/kqp_types_ut.cpp |49.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_base/ut_table_decimal_types.cpp |49.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/ut_schema/ydb-core-tx-columnshard-ut_schema |48.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/tpc/medium/ydb-tests-functional-tpc-medium |48.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/perf/kqp_query_perf_ut.cpp |48.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/providers/s3/actors/ut/yql_arrow_push_down_ut.cpp |48.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/cms/ut_sentinel/ydb-core-cms-ut_sentinel |49.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_order/ydb-core-tx-datashard-ut_order |49.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/statistics/database/ut/ydb-core-statistics-database-ut |49.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/hive/scale_recommender_policy_ut.cpp |49.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_replication/core-blobstorage-ut_blobstorage-ut_replication |49.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/providers/dq/scheduler/ut/ydb-library-yql-providers-dq-scheduler-ut |49.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_external_table/ydb-core-tx-schemeshard-ut_external_table |49.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ydb-core-blobstorage-ut_blobstorage |49.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/keyvalue/keyvalue_ut.cpp |49.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/lib/ydb_cli/topic/ut/ydb-public-lib-ydb_cli-topic-ut |49.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_change_collector.cpp |49.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/hive/hive_impl_ut.cpp |49.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/functional/sqs/common/ydb-tests-functional-sqs-common |49.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/tools/fqrun/fqrun |49.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/statistics/service/ut/ut_aggregation/ydb-core-statistics-service-ut-ut_aggregation |49.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/external_sources/s3/ut/ydb-core-external_sources-s3-ut |49.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/driver_lib/run/ut/ydb-core-driver_lib-run-ut |49.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/security/certificate_check/ut/ydb-core-security-certificate_check-ut |49.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_external_blobs/ydb-core-tx-datashard-ut_external_blobs |49.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/apps/etcd_proxy/service/libapps-etcd_proxy-service.a |49.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/external_sources/hive_metastore/ut/ydb-core-external_sources-hive_metastore-ut |49.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/persqueue_v1/ut/rate_limiter_test_setup.cpp |49.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/hive/sequencer_ut.cpp |49.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/query/kqp_params_ut.cpp |49.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/fq/multi_plane/ydb-tests-fq-multi_plane |49.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_compaction/ydb-core-tx-schemeshard-ut_compaction |49.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/mind/ut/ydb-core-mind-ut |49.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stress/reconfig_state_storage_workload/tests/stress-reconfig_state_storage_workload-tests |49.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/src-client-topic-ut-with_direct_read_ut |49.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kafka_proxy/ut/ut_protocol.cpp |49.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_transfer/ydb-core-tx-schemeshard-ut_transfer |49.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_auditsettings/ydb-core-tx-schemeshard-ut_auditsettings |49.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_olap/ydb-core-tx-schemeshard-ut_olap |49.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/hive/storage_pool_info_ut.cpp |49.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/query/kqp_stats_ut.cpp |49.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/query/kqp_limits_ut.cpp |49.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/persqueue_v1/persqueue_common_new_schemecache_ut.cpp |49.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/olap/docs/generator/generator |49.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/datashard/ttl/ydb-tests-datashard-ttl |49.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/ut_rw/ut_columnshard_read_write.cpp |49.2%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/schemeshard/operations.{pb.h ... grpc.pb.h} |49.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/load_test/ut/ydb-core-load_test-ut |49.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/replication/controller/ut_assign_tx_id/core-tx-replication-controller-ut_assign_tx_id |49.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tx_datashard.{pb.h ... grpc.pb.h} |49.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/tenants/objcopy_e317764e105a7e9e48b67a7b7e.o |49.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/persqueue_v1/persqueue_new_schemecache_ut.cpp |49.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/tenants/objcopy_86ad37399122e504f3e6d8378d.o |49.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/ut_rw/ut_backup.cpp |49.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/tenants/objcopy_5a4a401f33f46c70417a65f584.o |49.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/library/flavours/libpy3tests-library-flavours.global.a |49.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/query/kqp_explain_ut.cpp |49.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/osiris.cpp |49.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/dsproxy/ut/dsproxy_quorum_tracker_ut.cpp |49.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/ut_rw/ut_normalizer.cpp |49.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/compress_executor_ut.cpp |49.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/query/kqp_query_ut.cpp |49.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_read_iterator.cpp |49.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/dsproxy/ut/dsproxy_get_ut.cpp |49.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/dsproxy/ut/dsproxy_sequence_ut.cpp |49.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/nodes_manager.pb.{h, cc} |49.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_monitoring.pb.{h, cc} |49.4%| [PB] {BAZEL_DOWNLOAD} $(B)/yql/essentials/protos/common.pb.{h, cc} |49.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/hive/hive_ut.cpp |49.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_status_codes.pb.{h, cc} |49.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/ydb/backup_ut/ydb_backup_ut.cpp |49.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/dsproxy/ut/dsproxy_request_reporting_ut.cpp |49.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_snapshot.cpp |49.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/cms/downtime_ut.cpp |49.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/folder_service/proto/config.pb.{h, cc} |49.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_stats/ut_stats.cpp |49.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/dsproxy/ut/dsproxy_counters_ut.cpp |49.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/protos/interconnect.pb.{h, cc} |49.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/compression_ut.cpp |49.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/ydb/backup_ut/encrypted_backup_ut.cpp |49.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/basic_usage_ut.cpp |49.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_base/ut_base.cpp |49.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/retry_policy_ut.cpp |49.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/proto/dq_state_load_plan.pb.{h, cc} |49.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/control_plane_proxy.pb.{h, cc} |49.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/token_accessor.pb.{h, cc} |49.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_scheme.pb.{h, cc} |49.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/annotations/validation.pb.{h, cc} |49.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/private_proxy.pb.{h, cc} |49.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/scheme/defaults/protos/data.pb.{h, cc} |49.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/yql/dq/comp_nodes/dq_hash_operator_serdes.cpp |49.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/yql/dq/comp_nodes/dq_program_builder.cpp |49.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/bridge/objcopy_4b2ec656f7e85bc05586d7e6fc.o |49.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/protos/portion_info.pb.{h, cc} |49.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/bridge/objcopy_0adb3ed6d98cbd98d13d8a3085.o |49.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/copy_table/objcopy_c114cbf6b820d92320c1e2c912.o |49.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/copy_table/objcopy_589315062f5401a368910248f0.o |49.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/copy_table/objcopy_61613f0bd98876f149d8574891.o |49.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/bridge/objcopy_de8e7bde61396640f718e89d07.o |49.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/generic/proto/source.pb.{h, cc} |49.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/tpc/medium/objcopy_a51d334445f3e4e9170e666e7b.o |49.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/tpc/medium/objcopy_16997685291b6913e28a98236c.o |49.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/workload_service/ut/kqp_workload_service_tables_ut.cpp |49.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/read_session_ut.cpp |49.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_sysview/ut_sysview.cpp |49.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/tpc/medium/objcopy_e5d897582dc0fbda7c578cb53f.o |49.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/tpc/medium/objcopy_562a790b75f22ca86b37e5623e.o |49.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/ut_helpers.cpp |49.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/ut_schema/ut_columnshard_move_table.cpp |49.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/workload_service/ut/kqp_workload_service_actors_ut.cpp |49.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tablet.{pb.h ... grpc.pb.h} |49.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/statistics/database/ut/ut_database.cpp |49.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/metrics.{pb.h ... grpc.pb.h} |49.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/cms/sentinel_ut.cpp |49.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/read_only_vdisk.cpp |49.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/providers/dq/scheduler/ut/dq_scheduler_ut.cpp |49.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/query_stats.{pb.h ... grpc.pb.h} |49.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/ut_schema/ut_columnshard_schema.cpp |49.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/cms/cms_ut_common.cpp |49.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/replication_huge.cpp |49.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/decommit_3dc.cpp |49.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/get.cpp |49.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_external_table/ut_external_table.cpp |49.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/common/objcopy_0a1f127d9343562caddfbacf79.o |49.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/space_check.cpp |49.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/long_tx_service.{pb.h ... grpc.pb.h} |49.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/validation.cpp |49.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/backpressure.cpp |49.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/vdisk_malfunction.cpp |49.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/external_sources/hive_metastore/ut/common.cpp |49.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/replication.cpp |49.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/get_block.cpp |49.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/reconfig_state_storage_workload/tests/objcopy_988cc467d4da79de606ebf50ee.o |49.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/index_restore_get.cpp |49.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/extra_block_checks.cpp |49.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/memory_controller_config.{pb.h ... grpc.pb.h} |49.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/reconfig_state_storage_workload/tests/objcopy_f4efacd00293c5fe09c3f84a62.o |49.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/msgbus_kv.{pb.h ... grpc.pb.h} |49.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/group_reconfiguration.cpp |49.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/tenant_ut_pool.cpp |49.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/tests/tools/fqrun/fqrun.cpp |49.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/acceleration.cpp |49.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/ttl/objcopy_b1ab101896e634020e0c6ffeaf.o |49.6%| [AR] {default-linux-x86_64, release, asan, pic} $(B)/yt/yt/core/libyt-yt-core.a |49.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/gc_quorum_3dc.cpp |49.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/ttl/objcopy_589d529f9477963cf67237781c.o |49.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/multi_plane/objcopy_b8aa61f402be805d2e3e9e75a2.o |49.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/reconfig_state_storage_workload/workload/libpy3stress-reconfig_state_storage_workload-workload.global.a |49.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/reconfig_state_storage_workload/tests/objcopy_1f78e7638ae0f2e308bd7331f9.o |49.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/src/client/topic/ut/direct_read_ut.cpp |49.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/common/objcopy_178e64ce5db822fc6aa8b3e608.o |49.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/ttl/objcopy_82d6d29ac7be3798b7e748facc.o |49.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/external_sources/hive_metastore/ut/hive_metastore_fetcher_ut.cpp |49.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/statistics/service/ut/ut_aggregation/ut_aggregate_statistics.cpp |49.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/workload_service/ut/kqp_workload_service_ut.cpp |49.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/incorrect_queries.cpp |49.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/group_size_in_units.cpp |49.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/docs/generator/libpy3olap-docs-generator.global.a |49.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/common/objcopy_b866963286293af0b6f2139fed.o |49.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/gc.cpp |49.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/lib/ydb_cli/topic/topic_write.cpp |49.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/patch.cpp |49.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/driver_lib/run/auto_config_initializer_ut.cpp |49.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/lib/ydb_cli/topic/topic_write_ut.cpp |49.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/external_sources.{pb.h ... grpc.pb.h} |49.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/multi_plane/objcopy_c65a9d5efe13dc05c1466090ba.o |49.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/docs/generator/objcopy_ac8dbe7f54a2cb7efb6636f75f.o |49.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/mirror3dc.cpp |49.7%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/multi_plane/objcopy_d23500649301df2a8de48ba70d.o |49.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/replication/controller/assign_tx_id_ut.cpp |49.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/main.cpp |49.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/tenant_node_enumeration_ut.cpp |49.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_auditsettings/ut_auditsettings.cpp |49.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/local.{pb.h ... grpc.pb.h} |49.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/external_sources/hive_metastore/ut/hive_metastore_client_ut.cpp |49.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/lib/ydb_cli/topic/topic_read_ut.cpp |49.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/tenant_ut_local.cpp |49.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/load_test/ut/group_test_ut.cpp |49.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_olap/ut_olap.cpp |49.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/objcopy_4f055c289b3de8f2a1e827ae5c.o |49.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/deadlines.cpp |49.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/discover.cpp |49.8%| [AR] {RESULT} $(B)/yt/yt/core/libyt-yt-core.a |49.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/counting_events.cpp |49.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_transfer/ut_transfer.cpp |49.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/encryption.cpp |49.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/labeled_counters.{pb.h ... grpc.pb.h} |49.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/feature_flags.{pb.h ... grpc.pb.h} |49.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/src/client/topic/ut/basic_usage_ut.cpp |49.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/s3_settings.{pb.h ... grpc.pb.h} |49.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_compaction/ut_compaction.cpp |49.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/common/objcopy_f9b0feecd0e36f08cbf5c53562.o |49.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/ds_proxy_lwtrace.cpp |49.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/security/certificate_check/cert_check_ut.cpp |49.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tx.{pb.h ... grpc.pb.h} |49.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/assimilation.cpp |49.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/block_race.cpp |49.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/sys_view.{pb.h ... grpc.pb.h} |49.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/defrag.cpp |49.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/security/certificate_check/cert_utils_ut.cpp |49.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/phantom_blobs.cpp |49.9%| [AR] {BAZEL_UPLOAD, SKIPPED} $(B)/yt/yt/core/libyt-yt-core.a |50.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_order.cpp |50.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/ut_helpers.cpp |49.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/src/client/topic/ut/local_partition_ut.cpp |49.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/src/client/topic/ut/describe_topic_ut.cpp |49.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_ext_blobs_multiple_channels.cpp |49.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/external_sources/s3/ut/s3_aws_credentials_ut.cpp |49.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/external_sources/hive_metastore/libcore-external_sources-hive_metastore.a |49.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/src/client/topic/ut/topic_to_table_ut.cpp |49.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/mirror3of4.cpp |49.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/sync.cpp |49.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/multiget.cpp |49.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/recovery.cpp |49.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/self_heal.cpp |49.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/shred.cpp |49.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/sanitize_groups.cpp |50.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/scrub_fast.cpp |48.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/snapshots.cpp |48.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/monitoring.cpp |48.7%| COMPACTING CACHE 464.0MiB |48.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/node_broker_ut.cpp |48.7%| [TS] {RESULT} ydb/library/yql/tests/sql/hybrid_file/part6/py2_flake8 |48.7%| [TS] {RESULT} ydb/library/yql/providers/generic/connector/tests/datasource/ms_sql_server/flake8 |48.7%| [TS] {RESULT} ydb/tests/datashard/dump_restore/flake8 |48.7%| [TS] {RESULT} ydb/tests/fq/yt/kqp_yt_file/part5/flake8 |48.7%| [TS] {RESULT} ydb/tests/stress/log/tests/flake8 |48.7%| [TS] {RESULT} ydb/tests/datashard/s3/flake8 |48.7%| [TS] {RESULT} ydb/tests/functional/tenants/flake8 |48.7%| [TS] {RESULT} ydb/tests/datashard/vector_index/large/flake8 |48.7%| [TS] {RESULT} ydb/tests/fq/yt/kqp_yt_file/part16/flake8 |48.7%| [TS] {RESULT} ydb/library/yql/tests/sql/dq_file/part2/py2_flake8 |48.7%| [TS] {RESULT} ydb/tests/datashard/secondary_index/flake8 |48.7%| [TS] {RESULT} ydb/tests/functional/kqp/plan2svg/flake8 |48.7%| [TS] {RESULT} ydb/library/yql/providers/generic/connector/tests/datasource/ydb/flake8 |48.7%| [TS] {RESULT} ydb/tests/datashard/split_merge/flake8 |48.8%| [TS] {RESULT} ydb/tests/fq/yt/kqp_yt_file/part8/flake8 |48.8%| [TS] {RESULT} ydb/tests/fq/mem_alloc/flake8 |48.8%| [TS] {RESULT} ydb/library/yql/tests/sql/hybrid_file/part9/py2_flake8 |48.8%| [TS] {RESULT} ydb/library/yql/tests/sql/dq_file/part5/py2_flake8 |48.8%| [TS] {RESULT} ydb/tests/fq/yt/kqp_yt_file/part1/flake8 |48.8%| [TS] {RESULT} ydb/tests/fq/s3/flake8 |48.8%| [TS] {RESULT} ydb/tests/datashard/select/flake8 |48.8%| [TS] {RESULT} ydb/tests/olap/oom/flake8 |48.8%| [TS] {RESULT} ydb/tests/olap/scenario/flake8 |48.8%| [TS] {RESULT} ydb/library/yql/tests/sql/hybrid_file/part10/py2_flake8 |48.8%| [TS] {RESULT} ydb/tests/functional/blobstorage/flake8 |48.8%| [TS] {RESULT} ydb/tests/fq/yt/kqp_yt_file/part14/flake8 |48.8%| [TS] {RESULT} ydb/tests/datashard/vector_index/medium/flake8 |48.8%| [TS] {RESULT} ydb/tests/fq/yt/kqp_yt_file/part10/flake8 |48.8%| [TS] {RESULT} ydb/library/yql/tests/sql/hybrid_file/part0/py2_flake8 |48.8%| [TS] {RESULT} ydb/tests/datashard/copy_table/flake8 |48.8%| [TS] {RESULT} ydb/tests/functional/ydb_cli/flake8 |48.8%| [TS] {RESULT} ydb/library/yql/tests/sql/dq_file/part17/py2_flake8 |48.8%| [TS] {RESULT} ydb/tests/fq/yt/kqp_yt_import/flake8 |48.8%| [TS] {RESULT} ydb/tests/postgres_integrations/go-libpq/flake8 |48.8%| [TS] {RESULT} ydb/library/yql/tests/sql/hybrid_file/part7/py2_flake8 |48.9%| [TS] {RESULT} ydb/tests/olap/docs/generator/flake8 |48.9%| [TS] {RESULT} ydb/tests/functional/scheme_shard/flake8 |48.9%| [TS] {RESULT} ydb/tests/functional/limits/flake8 |48.9%| [TS] {RESULT} ydb/tests/functional/serverless/flake8 |48.9%| [TS] {RESULT} ydb/tests/functional/restarts/flake8 |48.9%| [TS] {RESULT} ydb/tests/olap/common/flake8 |48.9%| [TS] {RESULT} ydb/tests/compatibility/flake8 |48.9%| [TS] {RESULT} ydb/tests/functional/script_execution/flake8 |48.9%| [TS] {RESULT} ydb/tests/tools/kqprun/tests/flake8 |48.9%| [TS] {RESULT} ydb/tests/fq/yt/kqp_yt_file/part9/flake8 |48.9%| [TS] {RESULT} ydb/library/yql/tests/sql/dq_file/part0/py2_flake8 |48.9%| [TS] {RESULT} ydb/library/yql/tests/sql/dq_file/part9/py2_flake8 |48.9%| [TS] {RESULT} ydb/tests/datashard/dml/flake8 |48.9%| [TS] {RESULT} ydb/tests/functional/tpc/large/flake8 |48.9%| [TS] {RESULT} ydb/library/benchmarks/runner/run_tests/flake8 |48.9%| [TS] {RESULT} ydb/tests/functional/tpc/medium/flake8 |48.9%| [TS] {RESULT} ydb/library/yql/tests/sql/dq_file/part13/py2_flake8 |48.9%| [TS] {RESULT} ydb/tests/functional/hive/flake8 |48.9%| [TS] {RESULT} ydb/tests/functional/sqs/merge_split_common_table/std/flake8 |48.9%| [TS] {RESULT} ydb/tests/fq/yt/kqp_yt_file/part18/flake8 |48.9%| [TS] {RESULT} ydb/library/yql/tests/sql/hybrid_file/part4/py2_flake8 |48.9%| [TS] {RESULT} ydb/tests/fq/yt/kqp_yt_file/part12/flake8 |49.0%| [TS] {RESULT} ydb/tests/functional/bridge/flake8 |49.0%| [TS] {RESULT} ydb/library/yql/tests/sql/dq_file/part1/py2_flake8 |49.0%| [TS] {RESULT} ydb/tests/olap/ttl_tiering/flake8 |49.0%| [TS] {RESULT} ydb/tests/tools/kqprun/recipe/flake8 |49.0%| [TS] {RESULT} ydb/tests/functional/api/flake8 |49.0%| [TS] {RESULT} ydb/tests/sql/large/flake8 |49.0%| [TS] {RESULT} ydb/tests/functional/serializable/flake8 |49.0%| [TS] {RESULT} ydb/library/yql/tests/sql/dq_file/part19/py2_flake8 |49.0%| [TS] {RESULT} ydb/tests/fq/yt/kqp_yt_file/part13/flake8 |49.0%| [TS] {RESULT} ydb/tests/fq/common/flake8 |49.0%| [TS] {RESULT} ydb/tests/fq/yt/kqp_yt_file/part11/flake8 |49.0%| [TS] {RESULT} ydb/library/yql/tests/sql/dq_file/part12/py2_flake8 |49.0%| [TS] {RESULT} ydb/library/yql/tests/sql/dq_file/part11/py2_flake8 |49.0%| [TS] {RESULT} ydb/tests/fq/restarts/flake8 |49.0%| [TS] {RESULT} ydb/tests/fq/yt/kqp_yt_file/part2/flake8 |49.0%| [TS] {RESULT} ydb/tests/stress/mixedpy/flake8 |49.0%| [TS] {RESULT} ydb/tests/datashard/partitioning/flake8 |49.0%| [TS] {RESULT} ydb/tests/tools/pq_read/test/flake8 |49.0%| [TS] {RESULT} ydb/tests/stress/cdc/tests/flake8 |49.0%| [TS] {RESULT} ydb/tests/fq/generic/analytics/black |49.0%| [TS] {RESULT} ydb/tests/fq/solomon/flake8 |49.0%| [TS] {RESULT} ydb/tests/functional/config/flake8 |49.1%| [TS] {RESULT} ydb/library/yql/tests/sql/dq_file/part15/py2_flake8 |49.1%| [TS] {RESULT} ydb/library/yql/tests/sql/hybrid_file/part1/py2_flake8 |49.1%| [TS] {RESULT} ydb/library/benchmarks/runner/flake8 |49.1%| [TS] {RESULT} ydb/tests/functional/sqs/large/flake8 |49.1%| [TS] {RESULT} ydb/library/yql/tests/sql/dq_file/part3/py2_flake8 |49.1%| [TS] {RESULT} ydb/tests/fq/generic/streaming/black |49.1%| [TS] {RESULT} ydb/tests/functional/sqs/common/flake8 |49.1%| [TS] {RESULT} ydb/tests/fq/yt/kqp_yt_file/part3/flake8 |49.1%| [TS] {RESULT} ydb/tests/olap/lib/flake8 |49.1%| [TS] {RESULT} ydb/library/yql/tests/sql/hybrid_file/part5/py2_flake8 |49.1%| [TS] {RESULT} ydb/tests/fq/yt/kqp_yt_file/part15/flake8 |49.1%| [TS] {RESULT} ydb/tests/fq/generic/analytics/flake8 |49.1%| [TS] {RESULT} ydb/tests/olap/column_family/compression/flake8 |49.1%| [TS] {RESULT} ydb/library/yql/tests/sql/dq_file/part8/py2_flake8 |49.1%| [TS] {RESULT} ydb/tests/fq/generic/streaming/flake8 |49.1%| [TS] {RESULT} ydb/library/yql/tests/sql/dq_file/part7/py2_flake8 |49.1%| [TS] {RESULT} ydb/tests/functional/sqs/merge_split_common_table/fifo/flake8 |49.1%| [TS] {RESULT} ydb/library/yql/providers/generic/connector/tests/datasource/oracle/flake8 |49.1%| [TS] {RESULT} ydb/tests/fq/http_api/flake8 |49.1%| [TS] {RESULT} ydb/tests/functional/sqs/cloud/flake8 |49.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/external_sources/hive_metastore/hive_metastore_native/libexternal_sources-hive_metastore-hive_metastore_native.a |49.1%| [TS] {RESULT} ydb/tests/functional/autoconfig/flake8 |49.2%| [TS] {RESULT} ydb/library/yql/tests/sql/dq_file/part6/py2_flake8 |49.2%| [TS] {RESULT} ydb/tests/stress/olap_workload/tests/flake8 |49.2%| [TS] {RESULT} ydb/library/benchmarks/runner/result_convert/flake8 |49.2%| [TS] {RESULT} ydb/library/yql/tests/sql/hybrid_file/part3/py2_flake8 |49.2%| [TS] {RESULT} ydb/tests/olap/delete/flake8 |49.2%| [TS] {RESULT} ydb/tests/functional/query_cache/flake8 |49.2%| [TS] {RESULT} ydb/tests/functional/canonical/flake8 |49.2%| [TS] {RESULT} ydb/library/yql/tests/sql/dq_file/part18/py2_flake8 |49.2%| [TS] {RESULT} ydb/tests/sql/flake8 |49.2%| [TS] {RESULT} ydb/tests/fq/plans/flake8 |49.2%| [TS] {RESULT} ydb/tests/stress/reconfig_state_storage_workload/tests/flake8 |49.2%| [TS] {RESULT} ydb/tests/sql/lib/flake8 |49.2%| [TS] {RESULT} ydb/library/yql/tests/sql/dq_file/part4/py2_flake8 |49.2%| [TS] {RESULT} ydb/tests/library/ut/flake8 |49.2%| [TS] {RESULT} ydb/tests/functional/sqs/with_quotas/flake8 |49.2%| [TS] {RESULT} ydb/tests/functional/sqs/messaging/flake8 |49.2%| [TS] {RESULT} ydb/tests/functional/scheme_tests/flake8 |49.2%| [TS] {RESULT} ydb/tests/stress/kv/tests/flake8 |49.2%| [TS] {RESULT} ydb/library/yql/tests/sql/solomon/py2_flake8 |49.2%| [TS] {RESULT} ydb/tests/fq/yt/kqp_yt_file/part7/flake8 |49.2%| [TS] {RESULT} ydb/tests/fq/yt/kqp_yt_file/part6/flake8 |49.2%| [TS] {RESULT} ydb/tests/functional/cms/flake8 |49.3%| [TS] {RESULT} ydb/tests/stress/show_create/view/tests/flake8 |49.3%| [TS] {RESULT} ydb/tests/library/compatibility/binaries/downloader/flake8 |49.3%| [TS] {RESULT} ydb/library/yql/providers/generic/connector/tests/datasource/postgresql/flake8 |49.3%| [TS] {RESULT} ydb/tests/stress/oltp_workload/tests/flake8 |49.3%| [TS] {RESULT} ydb/tests/stress/node_broker/tests/flake8 |49.3%| [TS] {RESULT} ydb/tests/stress/simple_queue/tests/flake8 |49.3%| [TS] {RESULT} ydb/tests/olap/flake8 |49.3%| [TS] {RESULT} ydb/library/yql/tests/sql/hybrid_file/part8/py2_flake8 |49.3%| [TS] {RESULT} ydb/library/benchmarks/runner/result_compare/flake8 |49.3%| [TS] {RESULT} ydb/tests/example/flake8 |49.3%| [TS] {RESULT} ydb/tests/fq/yds/flake8 |49.3%| [TS] {RESULT} ydb/tests/olap/data_quotas/flake8 |49.3%| [TS] {RESULT} ydb/tests/tools/nemesis/ut/flake8 |49.3%| [TS] {RESULT} ydb/tests/olap/s3_import/flake8 |49.3%| [TS] {RESULT} ydb/tests/fq/yt/kqp_yt_file/part17/flake8 |49.3%| [TS] {RESULT} ydb/tests/stress/s3_backups/tests/flake8 |49.3%| [TS] {RESULT} ydb/tests/fq/streaming_optimize/flake8 |49.3%| [TS] {RESULT} ydb/library/yql/tests/sql/dq_file/part10/py2_flake8 |49.3%| [TS] {RESULT} ydb/tests/fq/yt/kqp_yt_file/part0/flake8 |49.3%| [TS] {RESULT} ydb/core/viewer/tests/flake8 |49.3%| [TS] {RESULT} ydb/tests/stress/transfer/tests/flake8 |49.3%| [TS] {RESULT} ydb/library/benchmarks/runner/runner/flake8 |49.4%| [TS] {RESULT} ydb/tests/olap/load/flake8 |49.4%| [TS] {RESULT} ydb/tests/functional/ttl/flake8 |49.4%| [TS] {RESULT} ydb/tests/fq/yt/kqp_yt_file/part4/flake8 |49.4%| [TS] {RESULT} ydb/tests/datashard/async_replication/flake8 |49.4%| [TS] {RESULT} ydb/library/yql/providers/generic/connector/tests/join/flake8 |49.4%| [TS] {RESULT} ydb/library/yql/tests/sql/dq_file/part16/py2_flake8 |49.4%| [TS] {RESULT} ydb/tests/fq/multi_plane/flake8 |49.4%| [TS] {RESULT} ydb/library/yql/providers/generic/connector/tests/datasource/clickhouse/flake8 |49.4%| [TS] {RESULT} ydb/library/yql/tests/sql/dq_file/part14/py2_flake8 |49.4%| [TS] {RESULT} ydb/tests/datashard/parametrized_queries/flake8 |49.4%| [TS] {RESULT} ydb/tests/functional/sqs/multinode/flake8 |49.4%| [TS] {RESULT} ydb/tests/functional/postgresql/flake8 |49.5%| [TS] {RESULT} ydb/tests/fq/yt/kqp_yt_file/part19/flake8 |49.5%| [TS] {RESULT} ydb/library/yql/providers/generic/connector/tests/datasource/mysql/flake8 |49.5%| [TS] {RESULT} ydb/tests/functional/wardens/flake8 |49.7%| [TS] {RESULT} ydb/tests/functional/encryption/flake8 |49.7%| [TS] {RESULT} ydb/tests/functional/rename/flake8 |49.7%| [TS] {RESULT} ydb/library/yql/tests/sql/hybrid_file/part2/py2_flake8 |49.7%| [TS] {RESULT} ydb/tests/datashard/ttl/flake8 |49.8%| [TS] {RESULT} ydb/tests/functional/audit/flake8 |49.8%| [TS] {RESULT} ydb/tests/olap/s3_import/large/flake8 |53.0%| [AR] {default-linux-x86_64, release, asan} $(B)/library/cpp/build_info/liblibrary-cpp-build_info.a |53.1%| [AR] {default-linux-x86_64, release, asan} $(B)/library/cpp/svnversion/liblibrary-cpp-svnversion.a |55.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/tools/combiner_perf/simple_block.cpp |55.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/tools/combiner_perf/simple_block.cpp |55.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/sqs/with_quotas/ydb-tests-functional-sqs-with_quotas |55.2%| [LD] {RESULT} $(B)/ydb/tests/functional/sqs/with_quotas/ydb-tests-functional-sqs-with_quotas |55.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/sqs/with_quotas/ydb-tests-functional-sqs-with_quotas |55.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/datashard/copy_table/ydb-tests-datashard-copy_table |55.2%| [LD] {RESULT} $(B)/ydb/tests/datashard/copy_table/ydb-tests-datashard-copy_table |55.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/datashard/copy_table/ydb-tests-datashard-copy_table |55.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/datashard/async_replication/ydb-tests-datashard-async_replication |55.2%| [LD] {RESULT} $(B)/ydb/tests/datashard/async_replication/ydb-tests-datashard-async_replication |55.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/api/ydb-tests-functional-api |55.2%| [LD] {RESULT} $(B)/ydb/tests/functional/api/ydb-tests-functional-api |55.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/datashard/async_replication/ydb-tests-datashard-async_replication |55.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/fq/multi_plane/ydb-tests-fq-multi_plane |55.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/api/ydb-tests-functional-api |55.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/rename/ydb-tests-functional-rename |55.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/fq/multi_plane/ydb-tests-fq-multi_plane |55.3%| [LD] {RESULT} $(B)/ydb/tests/fq/multi_plane/ydb-tests-fq-multi_plane |55.3%| [LD] {RESULT} $(B)/ydb/tests/functional/rename/ydb-tests-functional-rename |55.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/rename/ydb-tests-functional-rename |55.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/oltp_workload/oltp_workload |55.3%| [LD] {RESULT} $(B)/ydb/tests/stress/oltp_workload/oltp_workload |55.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stress/oltp_workload/oltp_workload |55.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/datashard/ttl/ydb-tests-datashard-ttl |55.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/bridge/ydb-tests-functional-bridge |55.3%| [LD] {RESULT} $(B)/ydb/tests/datashard/ttl/ydb-tests-datashard-ttl |55.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/datashard/ttl/ydb-tests-datashard-ttl |55.4%| [LD] {RESULT} $(B)/ydb/tests/functional/bridge/ydb-tests-functional-bridge |55.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/bridge/ydb-tests-functional-bridge |55.4%| [LD] {default-linux-x86_64, release, asan} $(B)/yql/essentials/tools/sql2yql/sql2yql |55.4%| [LD] {RESULT} $(B)/yql/essentials/tools/sql2yql/sql2yql |55.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/yql/essentials/tools/sql2yql/sql2yql |55.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/olap/scenario/ydb-tests-olap-scenario |55.4%| [LD] {RESULT} $(B)/ydb/tests/olap/scenario/ydb-tests-olap-scenario |55.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/olap/scenario/ydb-tests-olap-scenario |55.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/olap/column_family/compression/ydb-tests-olap-column_family-compression |55.4%| [LD] {RESULT} $(B)/ydb/tests/olap/column_family/compression/ydb-tests-olap-column_family-compression |55.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/olap/column_family/compression/ydb-tests-olap-column_family-compression |55.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/yql/dq/runtime/ut/ydb-library-yql-dq-runtime-ut |55.4%| [LD] {RESULT} $(B)/ydb/library/yql/dq/runtime/ut/ydb-library-yql-dq-runtime-ut |55.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/scheme_shard/ydb-tests-functional-scheme_shard |55.5%| [LD] {RESULT} $(B)/ydb/tests/functional/scheme_shard/ydb-tests-functional-scheme_shard |55.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/scheme_shard/ydb-tests-functional-scheme_shard |55.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/yql/dq/actors/spilling/ut/ydb-library-yql-dq-actors-spilling-ut |55.5%| [LD] {RESULT} $(B)/ydb/library/yql/dq/actors/spilling/ut/ydb-library-yql-dq-actors-spilling-ut |55.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/hive/ydb-tests-functional-hive |55.5%| [LD] {RESULT} $(B)/ydb/tests/functional/hive/ydb-tests-functional-hive |55.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/hive/ydb-tests-functional-hive |55.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/serverless/ydb-tests-functional-serverless |55.6%| [LD] {RESULT} $(B)/ydb/tests/functional/serverless/ydb-tests-functional-serverless |55.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/serverless/ydb-tests-functional-serverless |55.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/datashard/dump_restore/ydb-tests-datashard-dump_restore |55.6%| [LD] {RESULT} $(B)/ydb/tests/datashard/dump_restore/ydb-tests-datashard-dump_restore |55.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/datashard/dump_restore/ydb-tests-datashard-dump_restore >> ConvertUnboxedValueToArrowAndBack::VariantOverTupleWithOptionals [GOOD] >> DqOutputChannelTests::BigRow >> DqOutputChannelTests::Overflow [GOOD] >> ConvertUnboxedValueToArrowAndBack::DictUtf8ToInterval |55.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/datashard/split_merge/ydb-tests-datashard-split_merge |55.7%| [LD] {RESULT} $(B)/ydb/tests/datashard/split_merge/ydb-tests-datashard-split_merge |55.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/datashard/parametrized_queries/ydb-tests-datashard-parametrized_queries |55.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/datashard/split_merge/ydb-tests-datashard-split_merge |55.7%| [LD] {RESULT} $(B)/ydb/tests/datashard/parametrized_queries/ydb-tests-datashard-parametrized_queries |55.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/datashard/parametrized_queries/ydb-tests-datashard-parametrized_queries >> DqOutputWideChannelTests::BigRow >> DqOutputWideChannelTests::Overflow [GOOD] >> DqOutputChannelWithStorageTests::Spill [GOOD] >> DqUnboxedValueToNativeArrowConversion::VariantOverTupleWithOptionals [GOOD] >> DqUnboxedValueToNativeArrowConversion::VariantOverStruct [GOOD] >> TestArrowBlockSplitter::CheckLargeRows [GOOD] >> TestArrowBlockSplitter::CheckLargeScalarRows [GOOD] >> DqUnboxedValueToNativeArrowConversion::Struct [GOOD] >> DqUnboxedValueToNativeArrowConversion::Tuple [GOOD] >> DqUnboxedValueToNativeArrowConversion::DictUtf8ToInterval >> ConvertUnboxedValueToArrowAndBack::Struct [GOOD] >> ConvertUnboxedValueToArrowAndBack::Tuple [GOOD] >> ConvertUnboxedValueToArrowAndBack::VariantOverStruct [GOOD] >> ConvertUnboxedValueToArrowAndBack::OptionalOfOptional [GOOD] >> DqOutputChannelTests::PopAll [GOOD] >> DqOutputChannelWithStorageTests::Overflow [GOOD] >> DqOutputChannelTests::SingleRead [GOOD] >> DqOutputChannelTests::PartialRead [GOOD] |56.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/olap/ttl_tiering/ydb-tests-olap-ttl_tiering >> DqOutputWideChannelWithStorageTests::Spill [GOOD] >> DqOutputWideChannelTests::SingleRead [GOOD] >> DqOutputWideChannelTests::PartialRead [GOOD] >> DqOutputWideChannelTests::PopAll [GOOD] >> DqOutputWideChannelWithStorageTests::Overflow [GOOD] >> DqUnboxedValueDoNotFitToArrow::DictOptionalToTuple >> TestArrowBlockSplitter::SplitLargeBlock [GOOD] >> TestArrowBlockSplitter::SplitWithScalars [GOOD] >> TestArrowBlockSplitter::PassSmallBlock [GOOD] >> DqSpillingFileTests::ReadError [GOOD] >> DqOutputChannelTests::BigRow [GOOD] >> DqOutputChannelTests::ChunkSizeLimit [GOOD] >> DqOutputWideChannelTests::BigRow [GOOD] >> DqOutputWideChannelTests::ChunkSizeLimit [GOOD] >> DqSpillingFileTests::Write_TotalSizeLimitExceeded |56.1%| [LD] {RESULT} $(B)/ydb/tests/olap/ttl_tiering/ydb-tests-olap-ttl_tiering |56.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/olap/ttl_tiering/ydb-tests-olap-ttl_tiering |56.1%| [TM] {asan, default-linux-x86_64, release} ydb/library/yql/dq/runtime/ut/unittest >> ConvertUnboxedValueToArrowAndBack::OptionalOfOptional [GOOD] >> DqSpillingFileTests::MultipleFileParts [GOOD] >> DqSpillingFileTests::Write_FileSizeLimitExceeded [GOOD] >> DqSpillingFileTests::SingleFilePart [GOOD] >> DqSpillingFileTests::Write_TotalSizeLimitExceeded [GOOD] >> DqSpillingFileTests::FdCounterMultiFile [GOOD] >> DqSpillingFileTests::FdCounterSingleFile [GOOD] |56.1%| [TM] {asan, default-linux-x86_64, release} ydb/library/yql/dq/runtime/ut/unittest >> TestArrowBlockSplitter::PassSmallBlock [GOOD] |56.1%| [TM] {asan, default-linux-x86_64, release} ydb/library/yql/dq/runtime/ut/unittest >> DqOutputWideChannelWithStorageTests::Overflow [GOOD] |56.1%| [TM] {asan, default-linux-x86_64, release} ydb/library/yql/dq/runtime/ut/unittest >> TestArrowBlockSplitter::CheckLargeScalarRows [GOOD] |56.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/olap/oom/ydb-tests-olap-oom |56.1%| [LD] {RESULT} $(B)/ydb/tests/olap/oom/ydb-tests-olap-oom |56.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/olap/oom/ydb-tests-olap-oom |56.1%| [TM] {asan, default-linux-x86_64, release} ydb/library/yql/dq/runtime/ut/unittest >> DqOutputChannelTests::ChunkSizeLimit [GOOD] >> DqSpillingFileTests::ThreadPoolQueueOverflow >> DqSpillingFileTests::Simple [GOOD] >> DqSpillingFileTests::StartError [GOOD] >> DqUnboxedValueDoNotFitToArrow::DictOptionalToTuple [GOOD] >> DqUnboxedValueDoNotFitToArrow::OptionalOfOptional [GOOD] >> DqUnboxedValueDoNotFitToArrow::LargeVariant >> DqSpillingFileTests::NoSpillingService [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/library/yql/dq/actors/spilling/ut/unittest >> DqSpillingFileTests::ReadError [GOOD] Test command err: 2025-06-25T14:23:58.501268Z :KQP_COMPUTE ERROR: spilling_file.cpp:968: [Read async] file: /home/runner/.ya/build/build_root/yft8/001a6f/ydb/library/yql/dq/actors/spilling/ut/test-results/unittest/testing_out_stuff/chunk3/dq_spilling_7080/node_1_5a94822f-353915a5-3c49b47c-ae18b761/1_test_0, blobId: 0, offset: 0, error: (Error 2: No such file or directory) util/system/file.cpp:936: can't open "/home/runner/.ya/build/build_root/yft8/001a6f/ydb/library/yql/dq/actors/spilling/ut/test-results/unittest/testing_out_stuff/chunk3/dq_spilling_7080/node_1_5a94822f-353915a5-3c49b47c-ae18b761/1_test_0" with mode RdOnly (0x00000008) |56.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/datashard/select/ydb-tests-datashard-select |56.1%| [LD] {RESULT} $(B)/ydb/tests/datashard/select/ydb-tests-datashard-select ------- [TM] {asan, default-linux-x86_64, release} ydb/library/yql/dq/actors/spilling/ut/unittest >> DqSpillingFileTests::Write_TotalSizeLimitExceeded [GOOD] Test command err: 2025-06-25T14:23:58.577151Z :KQP_COMPUTE ERROR: spilling_file.cpp:425: [Write] Total size limit exceeded. From: [1:5:2052], blobId: 2, bytes: 50 |56.2%| [TM] {asan, default-linux-x86_64, release} ydb/library/yql/dq/runtime/ut/unittest >> DqOutputChannelTests::PartialRead [GOOD] |56.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/datashard/select/ydb-tests-datashard-select |56.2%| [TM] {asan, default-linux-x86_64, release} ydb/library/yql/dq/runtime/ut/unittest >> DqOutputWideChannelTests::ChunkSizeLimit [GOOD] >> DqSpillingFileTests::ThreadPoolQueueOverflow [GOOD] >> DqUnboxedValueDoNotFitToArrow::LargeVariant [GOOD] |56.2%| [TM] {asan, default-linux-x86_64, release} ydb/library/yql/dq/actors/spilling/ut/unittest >> DqSpillingFileTests::MultipleFileParts [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/library/yql/dq/actors/spilling/ut/unittest >> DqSpillingFileTests::Write_FileSizeLimitExceeded [GOOD] Test command err: 2025-06-25T14:23:58.654704Z :KQP_COMPUTE ERROR: spilling_file.cpp:412: [Write] File size limit exceeded. From: [1:5:2052], blobId: 2, bytes: 50 |56.2%| [TM] {asan, default-linux-x86_64, release} ydb/library/yql/dq/actors/spilling/ut/unittest >> DqSpillingFileTests::Simple [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/library/yql/dq/actors/spilling/ut/unittest >> DqSpillingFileTests::StartError [GOOD] Test command err: 2025-06-25T14:23:58.691087Z :KQP_COMPUTE ERROR: spilling_file.cpp:239: (TIoSystemError) (Error 13: Permission denied) util/folder/path.cpp:424: could not create directory /nonexistent 2025-06-25T14:23:58.691243Z :KQP_COMPUTE ERROR: spilling_file.cpp:278: Service is broken, send error to client [1:5:2052] 2025-06-25T14:23:58.691452Z :KQP_COMPUTE ERROR: spilling_file.cpp:278: Service is broken, send error to client [1:5:2052] 2025-06-25T14:23:58.691517Z :KQP_COMPUTE ERROR: spilling_file.cpp:278: Service is broken, send error to client [1:5:2052] |56.2%| [TM] {asan, default-linux-x86_64, release} ydb/library/yql/dq/actors/spilling/ut/unittest >> DqSpillingFileTests::SingleFilePart [GOOD] |56.2%| [TM] {asan, default-linux-x86_64, release} ydb/library/yql/dq/actors/spilling/ut/unittest >> DqSpillingFileTests::FdCounterSingleFile [GOOD] |56.2%| [TM] {asan, default-linux-x86_64, release} ydb/library/yql/dq/actors/spilling/ut/unittest >> DqSpillingFileTests::NoSpillingService [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/library/yql/dq/actors/spilling/ut/unittest >> DqSpillingFileTests::ThreadPoolQueueOverflow [GOOD] Test command err: 2025-06-25T14:23:58.728688Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-25T14:23:58.728798Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-25T14:23:58.728924Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-25T14:23:58.728970Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-25T14:23:58.729013Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-25T14:23:58.729056Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-25T14:23:58.729100Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-25T14:23:58.729138Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-25T14:23:58.729178Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-25T14:23:58.729235Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-25T14:23:58.729277Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-25T14:23:58.729310Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation 2025-06-25T14:23:58.729335Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation [Write] Can not run operation 2025-06-25T14:23:58.729373Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-25T14:23:58.729425Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-25T14:23:58.729467Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-25T14:23:58.729535Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-25T14:23:58.729583Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-25T14:23:58.729625Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-25T14:23:58.729673Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-25T14:23:58.729715Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-25T14:23:58.729756Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-25T14:23:58.729813Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation 2025-06-25T14:23:58.729847Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation [Write] Can not run operation 2025-06-25T14:23:58.729950Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-25T14:23:58.730013Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-25T14:23:58.730043Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-25T14:23:58.730101Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-25T14:23:58.730138Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation 2025-06-25T14:23:58.730165Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation [Write] Can not run operation 2025-06-25T14:23:58.730199Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-25T14:23:58.730248Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-25T14:23:58.730379Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-25T14:23:58.730420Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-25T14:23:58.730454Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation 2025-06-25T14:23:58.730521Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation [Write] Can not run operation 2025-06-25T14:23:58.730566Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-25T14:23:58.730625Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-25T14:23:58.730717Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-25T14:23:58.730768Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-25T14:23:58.730837Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-25T14:23:58.730893Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-25T14:23:58.730924Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-25T14:23:58.731028Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-25T14:23:58.731088Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-25T14:23:58.731134Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-25T14:23:58.731163Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-25T14:23:58.731218Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-25T14:23:58.731265Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-25T14:23:58.731313Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-25T14:23:58.731375Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-25T14:23:58.731408Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation 2025-06-25T14:23:58.731439Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation [Write] Can not run operation 2025-06-25T14:23:58.731482Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-25T14:23:58.731518Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-25T14:23:58.731563Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-25T14:23:58.731610Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-25T14:23:58.731775Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation 2025-06-25T14:23:58.731805Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation [Write] Can not run operation 2025-06-25T14:23:58.731844Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-25T14:23:58.731898Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-25T14:23:58.731933Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-25T14:23:58.731963Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-25T14:23:58.731996Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation 2025-06-25T14:23:58.732025Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation [Write] Can not run operation 2025-06-25T14:23:58.732361Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-25T14:23:58.732403Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-25T14:23:58.732451Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-25T14:23:58.732556Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation 2025-06-25T14:23:58.732594Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation [Write] Can not run operation 2025-06-25T14:23:58.732635Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-25T14:23:58.732681Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation 2025-06-25T14:23:58.732718Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation [Write] Can not run operation 2025-06-25T14:23:58.732779Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-25T14:23:58.732821Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-25T14:23:58.732864Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-25T14:23:58.732912Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-25T14:23:58.732958Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-25T14:23:58.733238Z :KQP_COMPUTE ERROR: spilling_file.cpp:357: [CloseFile] Can not run operation 2025-06-25T14:23:58.733276Z :KQP_COMPUTE ERROR: spilling_file.cpp:357: [CloseFile] Can not run operation 2025-06-25T14:23:58.733310Z :KQP_COMPUTE ERROR: spilling_file.cpp:357: [CloseFile] Can not run operation 2025-06-25T14:23:58.733331Z :KQP_COMPUTE ERROR: spilling_file.cpp:357: [CloseFile] Can not run operation 2025-06-25T14:23:58.733351Z :KQP_COMPUTE ERROR: spilling_file.cpp:357: [CloseFile] Can not run operation 2025-06-25T14:23:58.733367Z :KQP_COMPUTE ERROR: spilling_file.cpp:357: [CloseFile] Can not run operation 2025-06-25T14:23:58.733384Z :KQP_COMPUTE ERROR: spilling_file.cpp:357: [CloseFile] Can not run operation 2025-06-25T14:23:58.733400Z :KQP_COMPUTE ERROR: spilling_file.cpp:357: [CloseFile] Can not run operation 2025-06-25T14:23:58.733417Z :KQP_COMPUTE ERROR: spilling_file.cpp:357: [CloseFile] Can not run operation 2025-06-25T14:23:58.733445Z :KQP_COMPUTE ERROR: spilling_file.cpp:357: [CloseFile] Can not run operation 2025-06-25T14:23:58.733467Z :KQP_COMPUTE ERROR: spilling_file.cpp:357: [CloseFile] Can not run operation 2025-06-25T14:23:58.733502Z :KQP_COMPUTE ERROR: spilling_file.cpp:357: [CloseFile] Can not run operation 2025-06-25T14:23:58.733520Z :KQP_COMPUTE ERROR: spilling_file.cpp:357: [CloseFile] Can not run operation 2025-06-25T14:23:58.733537Z :KQP_COMPUTE ERROR: spilling_file.cpp:357: [CloseFile] Can not run operation 2025-06-25T14:23:58.733555Z :KQP_COMPUTE ERROR: spilling_file.cpp:357: [CloseFile] Can not run operation 2025-06-25T14:23:58.733572Z :KQP_COMPUTE ERROR: spilling_file.cpp:357: [CloseFile] Can not run operation 2025-06-25T14:23:58.733587Z :KQP_COMPUTE ERROR: spilling_file.cpp:357: [CloseFile] Can not run operation 2025-06-25T14:23:58.733607Z :KQP_COMPUTE ERROR: spilling_file.cpp:357: [CloseFile] Can not run operation 2025-06-25T14:23:58.733623Z :KQP_COMPUTE ERROR: spilling_file.cpp:357: [CloseFile] Can not run operation 2025-06-25T14:23:58.736740Z :KQP_COMPUTE ERROR: spilling_file.cpp:357: [CloseFile] Can not run operation |56.2%| [TM] {asan, default-linux-x86_64, release} ydb/library/yql/dq/runtime/ut/unittest >> DqUnboxedValueDoNotFitToArrow::LargeVariant [GOOD] |56.3%| [LD] {BAZEL_UPLOAD} $(B)/ydb/library/yql/dq/actors/spilling/ut/ydb-library-yql-dq-actors-spilling-ut |56.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/tools/nemesis/ut/ydb-tests-tools-nemesis-ut |56.3%| [LD] {RESULT} $(B)/ydb/tests/tools/nemesis/ut/ydb-tests-tools-nemesis-ut |56.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/tools/nemesis/ut/ydb-tests-tools-nemesis-ut |56.4%| [LD] {BAZEL_UPLOAD} $(B)/ydb/library/yql/dq/runtime/ut/ydb-library-yql-dq-runtime-ut |56.4%| [TA] $(B)/ydb/library/yql/dq/actors/spilling/ut/test-results/unittest/{meta.json ... results_accumulator.log} |56.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/ttl/ydb-tests-functional-ttl |56.4%| [TA] {RESULT} $(B)/ydb/library/yql/dq/actors/spilling/ut/test-results/unittest/{meta.json ... results_accumulator.log} |56.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/ttl/ydb-tests-functional-ttl |56.4%| [LD] {RESULT} $(B)/ydb/tests/functional/ttl/ydb-tests-functional-ttl |56.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/restarts/ydb-tests-functional-restarts |56.4%| [LD] {RESULT} $(B)/ydb/tests/functional/restarts/ydb-tests-functional-restarts |56.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/restarts/ydb-tests-functional-restarts |56.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/simple_queue/simple_queue >> ConvertUnboxedValueToArrowAndBack::DictUtf8ToInterval [GOOD] >> ConvertUnboxedValueToArrowAndBack::ListOfJsons [GOOD] >> ConvertUnboxedValueToArrowAndBack::DictOptionalToTuple |56.7%| [LD] {RESULT} $(B)/ydb/tests/stress/simple_queue/simple_queue |56.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stress/simple_queue/simple_queue |57.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/script_execution/ydb-tests-functional-script_execution |57.6%| [LD] {RESULT} $(B)/ydb/tests/functional/script_execution/ydb-tests-functional-script_execution |57.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/script_execution/ydb-tests-functional-script_execution >> ConvertUnboxedValueToArrowAndBack::DictOptionalToTuple [GOOD] >> ConvertUnboxedValueToArrowAndBack::LargeVariant |58.1%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/library/yql/dq/comp_nodes/dq_hash_combine.cpp >> ConvertUnboxedValueToArrowAndBack::LargeVariant [GOOD] >> DqUnboxedValueToNativeArrowConversion::DictUtf8ToInterval [GOOD] >> DqUnboxedValueToNativeArrowConversion::ListOfJsons [GOOD] |58.3%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/yql/dq/comp_nodes/dq_hash_combine.cpp |58.6%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/library/yql/dq/comp_nodes/libyql-dq-comp_nodes.a |58.6%| [AR] {RESULT} $(B)/ydb/library/yql/dq/comp_nodes/libyql-dq-comp_nodes.a |58.9%| [AR] {BAZEL_UPLOAD} $(B)/ydb/library/yql/dq/comp_nodes/libyql-dq-comp_nodes.a |59.2%| [TM] {asan, default-linux-x86_64, release} ydb/library/yql/dq/runtime/ut/unittest >> DqUnboxedValueToNativeArrowConversion::ListOfJsons [GOOD] |59.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/sqs/cloud/ydb-tests-functional-sqs-cloud |59.3%| [LD] {RESULT} $(B)/ydb/tests/functional/sqs/cloud/ydb-tests-functional-sqs-cloud |59.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/sqs/cloud/ydb-tests-functional-sqs-cloud |59.4%| [TM] {asan, default-linux-x86_64, release} ydb/library/yql/dq/runtime/ut/unittest >> ConvertUnboxedValueToArrowAndBack::LargeVariant [GOOD] |59.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/blobstorage/ydb-tests-functional-blobstorage |59.8%| [LD] {RESULT} $(B)/ydb/tests/functional/blobstorage/ydb-tests-functional-blobstorage |59.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/blobstorage/ydb-tests-functional-blobstorage |59.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/sqs/messaging/ydb-tests-functional-sqs-messaging |59.9%| [LD] {RESULT} $(B)/ydb/tests/functional/sqs/messaging/ydb-tests-functional-sqs-messaging |60.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/sqs/messaging/ydb-tests-functional-sqs-messaging |60.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/datashard/partitioning/ydb-tests-datashard-partitioning |60.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/datashard/partitioning/ydb-tests-datashard-partitioning |60.1%| [LD] {RESULT} $(B)/ydb/tests/datashard/partitioning/ydb-tests-datashard-partitioning |60.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/config/ydb-tests-functional-config |60.4%| [LD] {RESULT} $(B)/ydb/tests/functional/config/ydb-tests-functional-config |60.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/config/ydb-tests-functional-config |60.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/olap/delete/ydb-tests-olap-delete |60.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/limits/ydb-tests-functional-limits |60.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/olap/delete/ydb-tests-olap-delete |60.7%| [LD] {RESULT} $(B)/ydb/tests/olap/delete/ydb-tests-olap-delete |60.7%| [LD] {RESULT} $(B)/ydb/tests/functional/limits/ydb-tests-functional-limits |60.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/limits/ydb-tests-functional-limits |61.0%| [TA] $(B)/ydb/library/yql/dq/runtime/ut/test-results/unittest/{meta.json ... results_accumulator.log} |62.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/public/tools/ydb_recipe/ydb_recipe |62.5%| [LD] {RESULT} $(B)/ydb/public/tools/ydb_recipe/ydb_recipe |62.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/public/tools/ydb_recipe/ydb_recipe |62.7%| [TA] {RESULT} $(B)/ydb/library/yql/dq/runtime/ut/test-results/unittest/{meta.json ... results_accumulator.log} |62.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/datashard/s3/ydb-tests-datashard-s3 |62.7%| [LD] {RESULT} $(B)/ydb/tests/datashard/s3/ydb-tests-datashard-s3 |62.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/datashard/s3/ydb-tests-datashard-s3 |62.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/audit/ydb-tests-functional-audit |62.7%| [LD] {RESULT} $(B)/ydb/tests/functional/audit/ydb-tests-functional-audit |62.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/audit/ydb-tests-functional-audit |63.2%| [AR] {default-linux-x86_64, release, asan} $(B)/yql/essentials/minikql/comp_nodes/llvm16/libminikql-comp_nodes-llvm16.a |63.2%| [AR] {RESULT} $(B)/yql/essentials/minikql/comp_nodes/llvm16/libminikql-comp_nodes-llvm16.a |63.2%| [AR] {BAZEL_UPLOAD, SKIPPED} $(B)/yql/essentials/minikql/comp_nodes/llvm16/libminikql-comp_nodes-llvm16.a |63.6%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/libcore-tx-schemeshard.a |63.6%| [AR] {RESULT} $(B)/ydb/core/tx/schemeshard/libcore-tx-schemeshard.a |63.6%| [AR] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/libcore-tx-schemeshard.a |63.6%| [AR] {default-linux-x86_64, release, asan} $(B)/yt/yql/providers/yt/provider/libproviders-yt-provider.a |63.6%| [AR] {RESULT} $(B)/yt/yql/providers/yt/provider/libproviders-yt-provider.a |63.7%| [AR] {BAZEL_UPLOAD, SKIPPED} $(B)/yt/yql/providers/yt/provider/libproviders-yt-provider.a |64.7%| [AR] {default-linux-x86_64, release, asan} $(B)/yt/yt/core/libyt-yt-core.a |64.7%| [AR] {BAZEL_UPLOAD, SKIPPED} $(B)/yt/yt/core/libyt-yt-core.a |64.8%| [AR] {RESULT} $(B)/yt/yt/core/libyt-yt-core.a |65.3%| [AR] {default-linux-x86_64, release, asan} $(B)/yt/yt/client/libyt-yt-client.a |65.3%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/tools/combiner_perf/libkqp-tools-combiner_perf.a |65.3%| [AR] {RESULT} $(B)/yt/yt/client/libyt-yt-client.a |65.3%| [AR] {BAZEL_UPLOAD, SKIPPED} $(B)/yt/yt/client/libyt-yt-client.a |65.3%| [AR] {RESULT} $(B)/ydb/core/kqp/tools/combiner_perf/libkqp-tools-combiner_perf.a |66.0%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/tools/combiner_perf/libkqp-tools-combiner_perf.a |66.0%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/library/yql/udfs/common/clickhouse/client/libclickhouse_client_udf.global.a |66.0%| [AR] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/library/yql/udfs/common/clickhouse/client/libclickhouse_client_udf.global.a |66.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/datashard/secondary_index/ydb-tests-datashard-secondary_index |66.0%| [AR] {RESULT} $(B)/ydb/library/yql/udfs/common/clickhouse/client/libclickhouse_client_udf.global.a |66.0%| [LD] {RESULT} $(B)/ydb/tests/datashard/secondary_index/ydb-tests-datashard-secondary_index |66.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/datashard/secondary_index/ydb-tests-datashard-secondary_index |66.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/cms/ydb-tests-functional-cms |66.0%| [LD] {RESULT} $(B)/ydb/tests/functional/cms/ydb-tests-functional-cms |66.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/cms/ydb-tests-functional-cms |66.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/tpc/medium/ydb-tests-functional-tpc-medium |66.0%| [LD] {RESULT} $(B)/ydb/tests/functional/tpc/medium/ydb-tests-functional-tpc-medium |66.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/tpc/medium/ydb-tests-functional-tpc-medium |66.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/node_broker/node_broker |66.0%| [LD] {RESULT} $(B)/ydb/tests/stress/node_broker/node_broker |66.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stress/node_broker/node_broker |66.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/tools/kqprun/tests/ydb-tests-tools-kqprun-tests |66.0%| [LD] {RESULT} $(B)/ydb/tests/tools/kqprun/tests/ydb-tests-tools-kqprun-tests |66.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/tools/kqprun/tests/ydb-tests-tools-kqprun-tests |66.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/fq/libs/compute/common/ut/ydb-core-fq-libs-compute-common-ut |66.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/fq/libs/compute/common/ut/ydb-core-fq-libs-compute-common-ut |66.1%| [LD] {RESULT} $(B)/ydb/core/fq/libs/compute/common/ut/ydb-core-fq-libs-compute-common-ut |66.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/cdc/cdc |66.1%| [LD] {RESULT} $(B)/ydb/tests/stress/cdc/cdc |66.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stress/cdc/cdc |66.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/apps/ydb/ydb |66.1%| [LD] {RESULT} $(B)/ydb/apps/ydb/ydb |66.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/apps/ydb/ydb |66.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/yql/dq/actors/compute/ut/ydb-library-yql-dq-actors-compute-ut |66.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/serializable/ydb-tests-functional-serializable |66.1%| [LD] {RESULT} $(B)/ydb/library/yql/dq/actors/compute/ut/ydb-library-yql-dq-actors-compute-ut |66.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/library/yql/dq/actors/compute/ut/ydb-library-yql-dq-actors-compute-ut |66.1%| [LD] {RESULT} $(B)/ydb/tests/functional/serializable/ydb-tests-functional-serializable |66.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/serializable/ydb-tests-functional-serializable >> Config::IncludeScope [GOOD] |66.2%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/library/yql/providers/yt/actors/yql_yt_lookup_actor.cpp |66.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/compute/common/ut/unittest >> StatsFormat::FullStat [GOOD] >> Config::ExcludeScope [GOOD] >> FormatTimes::DurationMs [GOOD] >> FormatTimes::DurationUs [GOOD] >> FormatTimes::ParseDuration [GOOD] >> StatsFormat::AggregateStat [GOOD] |66.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/compute/common/ut/unittest |66.2%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/yql/providers/yt/actors/yql_yt_lookup_actor.cpp |66.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/compute/common/ut/unittest |66.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/compute/common/ut/unittest >> FormatTimes::DurationMs [GOOD] |66.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/compute/common/ut/unittest >> FormatTimes::ParseDuration [GOOD] |66.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/compute/common/ut/unittest >> StatsFormat::AggregateStat [GOOD] |66.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/compute/common/ut/unittest >> Config::IncludeScope [GOOD] |66.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/compute/common/ut/unittest >> StatsFormat::FullStat [GOOD] |66.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/compute/common/ut/unittest >> Config::ExcludeScope [GOOD] |66.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/compute/common/ut/unittest >> FormatTimes::DurationUs [GOOD] |66.3%| [TA] $(B)/ydb/core/fq/libs/compute/common/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TAsyncComputeActorTest::Empty [GOOD] >> TAsyncComputeActorTest::Basic >> TAsyncComputeActorTest::Basic [GOOD] >> TComputeActorAsyncInputHelperTest::PollAsyncInput [GOOD] >> TComputeActorTest::Empty [GOOD] >> TComputeActorTest::ReceiveData [GOOD] >> TDqSourceWatermarkTrackerTest::StartWatermark1 [GOOD] >> TDqSourceWatermarkTrackerTest::StartWatermark2 [GOOD] >> TDqSourceWatermarkTrackerTest::StartWatermark3 [GOOD] >> TDqSourceWatermarkTrackerTest::WatermarkMovement1 [GOOD] >> TDqSourceWatermarkTrackerTest::WatermarkMovement2 [GOOD] >> TDqSourceWatermarkTrackerTest::WatermarkMovement3 [GOOD] >> TDqSourceWatermarkTrackerTest::WatermarkMovement4 [GOOD] >> TDqSourceWatermarkTrackerTest::IdleFirstShouldReturnStartWatermark [GOOD] >> TDqSourceWatermarkTrackerTest::Idle1 [GOOD] >> TDqSourceWatermarkTrackerTest::IdleNextCheckAt [GOOD] >> TIssuesBufferTest::TestEmpty [GOOD] >> TIssuesBufferTest::TestSimplePush [GOOD] >> TIssuesBufferTest::TestPushWithOverflow [GOOD] >> TIssuesBufferTest::TestSmallBuffer [GOOD] >> TIssuesBufferTest::TestUseAfterDump [GOOD] ------- [TS] {asan, default-linux-x86_64, release} ydb/library/yql/dq/actors/compute/ut/unittest >> TIssuesBufferTest::TestUseAfterDump [GOOD] Test command err: 2025-06-25T14:24:07.131361Z :KQP_COMPUTE WARN: dq_compute_actor_impl.h:885: SelfId: [2:7519892655113380181:2059], TxId: Test for: packets=2 doWatermark=1 waitIntermediateAcks=0 , task: 0. Ctx: {}. Input channel 1 watermarks were collapsed. See YQ-1441. Dropped watermark: 1970-01-01T00:00:01.000000Z 2025-06-25T14:24:07.132635Z :KQP_COMPUTE WARN: dq_compute_actor_impl.h:885: SelfId: [2:7519892655113380184:2060], TxId: Test for: packets=3 doWatermark=1 waitIntermediateAcks=0 , task: 0. Ctx: {}. Input channel 1 watermarks were collapsed. See YQ-1441. Dropped watermark: 1970-01-01T00:00:01.000000Z 2025-06-25T14:24:07.132656Z :KQP_COMPUTE WARN: dq_compute_actor_impl.h:885: SelfId: [2:7519892655113380184:2060], TxId: Test for: packets=3 doWatermark=1 waitIntermediateAcks=0 , task: 0. Ctx: {}. Input channel 1 watermarks were collapsed. See YQ-1441. Dropped watermark: 1970-01-01T00:00:02.000000Z 2025-06-25T14:24:07.134064Z :KQP_COMPUTE WARN: dq_compute_actor_impl.h:885: SelfId: [2:7519892655113380187:2061], TxId: Test for: packets=4 doWatermark=1 waitIntermediateAcks=0 , task: 0. Ctx: {}. Input channel 1 watermarks were collapsed. See YQ-1441. Dropped watermark: 1970-01-01T00:00:01.000000Z 2025-06-25T14:24:07.134080Z :KQP_COMPUTE WARN: dq_compute_actor_impl.h:885: SelfId: [2:7519892655113380187:2061], TxId: Test for: packets=4 doWatermark=1 waitIntermediateAcks=0 , task: 0. Ctx: {}. Input channel 1 watermarks were collapsed. See YQ-1441. Dropped watermark: 1970-01-01T00:00:02.000000Z 2025-06-25T14:24:07.134094Z :KQP_COMPUTE WARN: dq_compute_actor_impl.h:885: SelfId: [2:7519892655113380187:2061], TxId: Test for: packets=4 doWatermark=1 waitIntermediateAcks=0 , task: 0. Ctx: {}. Input channel 1 watermarks were collapsed. See YQ-1441. Dropped watermark: 1970-01-01T00:00:03.000000Z 2025-06-25T14:24:07.135830Z :KQP_COMPUTE WARN: dq_compute_actor_impl.h:885: SelfId: [2:7519892655113380190:2062], TxId: Test for: packets=5 doWatermark=1 waitIntermediateAcks=0 , task: 0. Ctx: {}. Input channel 1 watermarks were collapsed. See YQ-1441. Dropped watermark: 1970-01-01T00:00:01.000000Z 2025-06-25T14:24:07.135849Z :KQP_COMPUTE WARN: dq_compute_actor_impl.h:885: SelfId: [2:7519892655113380190:2062], TxId: Test for: packets=5 doWatermark=1 waitIntermediateAcks=0 , task: 0. Ctx: {}. Input channel 1 watermarks were collapsed. See YQ-1441. Dropped watermark: 1970-01-01T00:00:02.000000Z 2025-06-25T14:24:07.135859Z :KQP_COMPUTE WARN: dq_compute_actor_impl.h:885: SelfId: [2:7519892655113380190:2062], TxId: Test for: packets=5 doWatermark=1 waitIntermediateAcks=0 , task: 0. Ctx: {}. Input channel 1 watermarks were collapsed. See YQ-1441. Dropped watermark: 1970-01-01T00:00:03.000000Z 2025-06-25T14:24:07.135870Z :KQP_COMPUTE WARN: dq_compute_actor_impl.h:885: SelfId: [2:7519892655113380190:2062], TxId: Test for: packets=5 doWatermark=1 waitIntermediateAcks=0 , task: 0. Ctx: {}. Input channel 1 watermarks were collapsed. See YQ-1441. Dropped watermark: 1970-01-01T00:00:04.000000Z 2025-06-25T14:24:07.145198Z :KQP_COMPUTE ERROR: dq_compute_actor_watermarks.cpp:86: SelfId: [2:7519892655113380211:2069], TxId: Test for: packets=2 doWatermark=1 waitIntermediateAcks=1 , task: 0. Ctx: {}. Watermarks. Output channel 2 notified about watermark '1970-01-01T00:00:01.000000Z' when '1970-01-01T00:00:02.000000Z' was expected 2025-06-25T14:24:07.145316Z :KQP_COMPUTE WARN: dq_compute_actor_impl.h:885: SelfId: [2:7519892655113380211:2069], TxId: Test for: packets=2 doWatermark=1 waitIntermediateAcks=1 , task: 0. Ctx: {}. Input channel 1 watermarks were collapsed. See YQ-1441. Dropped watermark: 1970-01-01T00:00:01.000000Z 2025-06-25T14:24:07.146387Z :KQP_COMPUTE ERROR: dq_compute_actor_watermarks.cpp:86: SelfId: [2:7519892655113380214:2070], TxId: Test for: packets=3 doWatermark=1 waitIntermediateAcks=1 , task: 0. Ctx: {}. Watermarks. Output channel 2 notified about watermark '1970-01-01T00:00:01.000000Z' when '1970-01-01T00:00:02.000000Z' was expected 2025-06-25T14:24:07.146538Z :KQP_COMPUTE WARN: dq_compute_actor_impl.h:885: SelfId: [2:7519892655113380214:2070], TxId: Test for: packets=3 doWatermark=1 waitIntermediateAcks=1 , task: 0. Ctx: {}. Input channel 1 watermarks were collapsed. See YQ-1441. Dropped watermark: 1970-01-01T00:00:01.000000Z 2025-06-25T14:24:07.146558Z :KQP_COMPUTE WARN: dq_compute_actor_impl.h:885: SelfId: [2:7519892655113380214:2070], TxId: Test for: packets=3 doWatermark=1 waitIntermediateAcks=1 , task: 0. Ctx: {}. Input channel 1 watermarks were collapsed. See YQ-1441. Dropped watermark: 1970-01-01T00:00:02.000000Z 2025-06-25T14:24:07.147723Z :KQP_COMPUTE ERROR: dq_compute_actor_watermarks.cpp:86: SelfId: [2:7519892655113380217:2071], TxId: Test for: packets=4 doWatermark=1 waitIntermediateAcks=1 , task: 0. Ctx: {}. Watermarks. Output channel 2 notified about watermark '1970-01-01T00:00:01.000000Z' when '1970-01-01T00:00:02.000000Z' was expected 2025-06-25T14:24:07.147898Z :KQP_COMPUTE ERROR: dq_compute_actor_watermarks.cpp:86: SelfId: [2:7519892655113380217:2071], TxId: Test for: packets=4 doWatermark=1 waitIntermediateAcks=1 , task: 0. Ctx: {}. Watermarks. Output channel 2 notified about watermark '1970-01-01T00:00:03.000000Z' when '1970-01-01T00:00:04.000000Z' was expected 2025-06-25T14:24:07.148007Z :KQP_COMPUTE WARN: dq_compute_actor_impl.h:885: SelfId: [2:7519892655113380217:2071], TxId: Test for: packets=4 doWatermark=1 waitIntermediateAcks=1 , task: 0. Ctx: {}. Input channel 1 watermarks were collapsed. See YQ-1441. Dropped watermark: 1970-01-01T00:00:01.000000Z 2025-06-25T14:24:07.148016Z :KQP_COMPUTE WARN: dq_compute_actor_impl.h:885: SelfId: [2:7519892655113380217:2071], TxId: Test for: packets=4 doWatermark=1 waitIntermediateAcks=1 , task: 0. Ctx: {}. Input channel 1 watermarks were collapsed. See YQ-1441. Dropped watermark: 1970-01-01T00:00:02.000000Z 2025-06-25T14:24:07.148021Z :KQP_COMPUTE WARN: dq_compute_actor_impl.h:885: SelfId: [2:7519892655113380217:2071], TxId: Test for: packets=4 doWatermark=1 waitIntermediateAcks=1 , task: 0. Ctx: {}. Input channel 1 watermarks were collapsed. See YQ-1441. Dropped watermark: 1970-01-01T00:00:03.000000Z 2025-06-25T14:24:07.149154Z :KQP_COMPUTE ERROR: dq_compute_actor_watermarks.cpp:86: SelfId: [2:7519892655113380220:2072], TxId: Test for: packets=5 doWatermark=1 waitIntermediateAcks=1 , task: 0. Ctx: {}. Watermarks. Output channel 2 notified about watermark '1970-01-01T00:00:01.000000Z' when '1970-01-01T00:00:02.000000Z' was expected 2025-06-25T14:24:07.149306Z :KQP_COMPUTE WARN: dq_compute_actor_impl.h:885: SelfId: [2:7519892655113380220:2072], TxId: Test for: packets=5 doWatermark=1 waitIntermediateAcks=1 , task: 0. Ctx: {}. Input channel 1 watermarks were collapsed. See YQ-1441. Dropped watermark: 1970-01-01T00:00:01.000000Z 2025-06-25T14:24:07.149325Z :KQP_COMPUTE WARN: dq_compute_actor_impl.h:885: SelfId: [2:7519892655113380220:2072], TxId: Test for: packets=5 doWatermark=1 waitIntermediateAcks=1 , task: 0. Ctx: {}. Input channel 1 watermarks were collapsed. See YQ-1441. Dropped watermark: 1970-01-01T00:00:02.000000Z 2025-06-25T14:24:07.149501Z :KQP_COMPUTE ERROR: dq_compute_actor_watermarks.cpp:86: SelfId: [2:7519892655113380220:2072], TxId: Test for: packets=5 doWatermark=1 waitIntermediateAcks=1 , task: 0. Ctx: {}. Watermarks. Output channel 2 notified about watermark '1970-01-01T00:00:04.000000Z' when '1970-01-01T00:00:05.000000Z' was expected 2025-06-25T14:24:07.149609Z :KQP_COMPUTE WARN: dq_compute_actor_impl.h:885: SelfId: [2:7519892655113380220:2072], TxId: Test for: packets=5 doWatermark=1 waitIntermediateAcks=1 , task: 0. Ctx: {}. Input channel 1 watermarks were collapsed. See YQ-1441. Dropped watermark: 1970-01-01T00:00:04.000000Z 2025-06-25T14:24:07.166622Z :Unused ERROR: dq_compute_actor_channels.cpp:133: TxId: TxId, task: 0. Unexpected input channelId: 0 seqNo: 0, expected: 1 |66.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/yql/providers/dq/runtime/ut/ydb-library-yql-providers-dq-runtime-ut |66.3%| [TA] {RESULT} $(B)/ydb/core/fq/libs/compute/common/ut/test-results/unittest/{meta.json ... results_accumulator.log} |66.3%| [TS] {RESULT} ydb/library/yql/dq/actors/compute/ut/unittest |66.3%| [LD] {RESULT} $(B)/ydb/library/yql/providers/dq/runtime/ut/ydb-library-yql-providers-dq-runtime-ut |66.3%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/library/yql/providers/yt/actors/libproviders-yt-actors.a |66.3%| [AR] {RESULT} $(B)/ydb/library/yql/providers/yt/actors/libproviders-yt-actors.a |66.3%| [AR] {BAZEL_UPLOAD} $(B)/ydb/library/yql/providers/yt/actors/libproviders-yt-actors.a >> TestFileCache::Find [GOOD] >> TestFileCache::Create [GOOD] >> TestFileCache::AddAfterRemoveAcquired [GOOD] >> TestFileCache::AcquireRelease [GOOD] >> TestFileCache::Add [GOOD] >> TestFileCache::ContainsReleased [GOOD] >> TestFileCache::Acquire [GOOD] >> TestFileCache::AcquireSingleFile2Times [GOOD] >> TestFileCache::Evict [GOOD] |66.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/reconfig_state_storage_workload/tests/stress-reconfig_state_storage_workload-tests |66.3%| [LD] {RESULT} $(B)/ydb/tests/stress/reconfig_state_storage_workload/tests/stress-reconfig_state_storage_workload-tests |66.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stress/reconfig_state_storage_workload/tests/stress-reconfig_state_storage_workload-tests |66.3%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/providers/dq/runtime/ut/unittest >> TestFileCache::Evict [GOOD] |66.3%| [LD] {BAZEL_UPLOAD} $(B)/ydb/library/yql/providers/dq/runtime/ut/ydb-library-yql-providers-dq-runtime-ut |66.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/federated_query/ut/ydb-core-kqp-federated_query-ut |66.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/federated_query/ut/ydb-core-kqp-federated_query-ut |66.3%| [TS] {RESULT} ydb/library/yql/providers/dq/runtime/ut/unittest |66.3%| [LD] {RESULT} $(B)/ydb/core/kqp/federated_query/ut/ydb-core-kqp-federated_query-ut |66.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_vdisk_restart/blobstorage-ut_blobstorage-ut_vdisk_restart |66.3%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_vdisk_restart/blobstorage-ut_blobstorage-ut_vdisk_restart |66.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_vdisk_restart/blobstorage-ut_blobstorage-ut_vdisk_restart |66.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/yql/providers/s3/provider/ut/ydb-library-yql-providers-s3-provider-ut |66.4%| [LD] {RESULT} $(B)/ydb/library/yql/providers/s3/provider/ut/ydb-library-yql-providers-s3-provider-ut |66.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/library/yql/providers/s3/provider/ut/ydb-library-yql-providers-s3-provider-ut >> TestFederatedQueryHelpers::TestCheckNestingDepth [GOOD] >> TestFederatedQueryHelpers::TestTruncateIssues [GOOD] >> TestFederatedQueryHelpers::TestValidateResultSetColumns [GOOD] |66.5%| [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/federated_query/ut/unittest >> TestFederatedQueryHelpers::TestValidateResultSetColumns [GOOD] |66.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_vdisk_restart/unittest >> VDiskRestart::Simple [GOOD] |66.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_vdisk_restart/unittest |66.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_vdisk_restart/unittest |66.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_vdisk_restart/unittest |66.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_vdisk_restart/unittest |66.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_vdisk_restart/unittest |66.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_vdisk_restart/unittest |66.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_vdisk_restart/unittest |66.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_vdisk_restart/unittest |66.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_vdisk_restart/unittest >> VDiskRestart::Simple [GOOD] >> TCollectingS3ListingStrategyTests::IfNoIssuesOccursShouldReturnCollectedPaths [GOOD] >> TCollectingS3ListingStrategyTests::IfThereAreMoreRecordsThanSpecifiedByLimitShouldReturnError [GOOD] >> TCollectingS3ListingStrategyTests::IfAnyIterationReturnIssueThanWholeStrategyShouldReturnIt [GOOD] >> TCollectingS3ListingStrategyTests::IfExceptionIsReturnedFromIteratorThanItShouldCovertItToIssue [GOOD] |66.5%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/library/yql/providers/generic/actors/yql_generic_lookup_actor.cpp |66.5%| [CC] {BAZEL_UPLOAD} $(S)/ydb/library/yql/providers/generic/actors/yql_generic_lookup_actor.cpp |66.5%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/providers/s3/provider/ut/unittest >> TCollectingS3ListingStrategyTests::IfExceptionIsReturnedFromIteratorThanItShouldCovertItToIssue [GOOD] |66.5%| [TA] $(B)/ydb/core/blobstorage/ut_blobstorage/ut_vdisk_restart/test-results/unittest/{meta.json ... results_accumulator.log} |66.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/mind/bscontroller/ut_selfheal/ydb-core-mind-bscontroller-ut_selfheal |66.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/mind/bscontroller/ut_selfheal/ydb-core-mind-bscontroller-ut_selfheal |66.5%| [TS] {RESULT} ydb/core/kqp/federated_query/ut/unittest |66.5%| [TS] {RESULT} ydb/library/yql/providers/s3/provider/ut/unittest |66.5%| [TA] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_vdisk_restart/test-results/unittest/{meta.json ... results_accumulator.log} |66.5%| [LD] {RESULT} $(B)/ydb/core/mind/bscontroller/ut_selfheal/ydb-core-mind-bscontroller-ut_selfheal |66.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_read_only_pdisk/ut_blobstorage-ut_read_only_pdisk |66.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_read_only_pdisk/ut_blobstorage-ut_read_only_pdisk |66.6%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_read_only_pdisk/ut_blobstorage-ut_read_only_pdisk |66.6%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/library/yql/providers/generic/actors/libproviders-generic-actors.a |66.6%| [AR] {RESULT} $(B)/ydb/library/yql/providers/generic/actors/libproviders-generic-actors.a |66.6%| [AR] {BAZEL_UPLOAD} $(B)/ydb/library/yql/providers/generic/actors/libproviders-generic-actors.a |66.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/ut_testshard/ydb-core-blobstorage-ut_testshard |66.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_testshard/ydb-core-blobstorage-ut_testshard |66.6%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_testshard/ydb-core-blobstorage-ut_testshard |66.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_balancing/ydb-core-blobstorage-ut_blobstorage-ut_balancing |66.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_balancing/ydb-core-blobstorage-ut_blobstorage-ut_balancing |66.6%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_balancing/ydb-core-blobstorage-ut_blobstorage-ut_balancing |66.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_restart_pdisk/blobstorage-ut_blobstorage-ut_restart_pdisk |66.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_restart_pdisk/blobstorage-ut_blobstorage-ut_restart_pdisk |66.7%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_restart_pdisk/blobstorage-ut_blobstorage-ut_restart_pdisk >> SelfHealActorTest::NoMoreThanOneReplicating [GOOD] >> BsControllerTest::TestLocalBrokenRelocation >> BsControllerTest::SelfHealBlock4Plus2 >> BsControllerTest::DecommitRejected >> BsControllerTest::SelfHealMirror3dc >> BSCReadOnlyPDisk::ReadOnlyOneByOne >> SelfHealActorTest::SingleErrorDisk [GOOD] |66.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/fq/common/ydb-tests-fq-common |66.7%| [LD] {RESULT} $(B)/ydb/tests/fq/common/ydb-tests-fq-common |66.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/fq/common/ydb-tests-fq-common |66.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut_selfheal/unittest |66.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut_selfheal/unittest |66.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_stop_pdisk/ydb-core-blobstorage-ut_blobstorage-ut_stop_pdisk |66.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_stop_pdisk/ydb-core-blobstorage-ut_blobstorage-ut_stop_pdisk |66.7%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_stop_pdisk/ydb-core-blobstorage-ut_blobstorage-ut_stop_pdisk |66.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_read_only_pdisk/unittest >> BsControllerTest::TestLocalSelfHeal >> BSCReadOnlyPDisk::RestartAndReadOnlyConsecutive >> BSCReadOnlyPDisk::ReadOnlyNotAllowed |66.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_read_only_pdisk/unittest |66.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut_selfheal/unittest |66.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_read_only_pdisk/unittest |66.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_read_only_pdisk/unittest >> BSCReadOnlyPDisk::ReadOnlySlay >> BSCReadOnlyPDisk::SetGoodDiskInBrokenGroupReadOnlyNotAllowed |67.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut_selfheal/unittest >> SelfHealActorTest::SingleErrorDisk [GOOD] >> BSCReadOnlyPDisk::SetBrokenDiskInBrokenGroupReadOnly |67.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_statestorage/core-blobstorage-ut_blobstorage-ut_statestorage |67.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_statestorage/core-blobstorage-ut_blobstorage-ut_statestorage |67.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut_selfheal/unittest >> SelfHealActorTest::NoMoreThanOneReplicating [GOOD] |67.3%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_statestorage/core-blobstorage-ut_blobstorage-ut_statestorage >> BsControllerTest::DecommitRejected [GOOD] >> BlobDepotWithTestShard::PlainGroup [GOOD] |67.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_donor/ydb-core-blobstorage-ut_blobstorage-ut_donor |67.4%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_donor/ydb-core-blobstorage-ut_blobstorage-ut_donor |67.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_donor/ydb-core-blobstorage-ut_blobstorage-ut_donor ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut_selfheal/unittest >> BsControllerTest::DecommitRejected [GOOD] Test command err: 2025-06-25T14:24:16.845415Z 1 00h00m00.000000s :BS_NODE DEBUG: [1] Bootstrap 2025-06-25T14:24:16.845470Z 1 00h00m00.000000s :BS_NODE DEBUG: [1] Connect 2025-06-25T14:24:16.845556Z 2 00h00m00.000000s :BS_NODE DEBUG: [2] Bootstrap 2025-06-25T14:24:16.845578Z 2 00h00m00.000000s :BS_NODE DEBUG: [2] Connect 2025-06-25T14:24:16.845620Z 3 00h00m00.000000s :BS_NODE DEBUG: [3] Bootstrap 2025-06-25T14:24:16.845645Z 3 00h00m00.000000s :BS_NODE DEBUG: [3] Connect 2025-06-25T14:24:16.845675Z 4 00h00m00.000000s :BS_NODE DEBUG: [4] Bootstrap 2025-06-25T14:24:16.845702Z 4 00h00m00.000000s :BS_NODE DEBUG: [4] Connect 2025-06-25T14:24:16.845738Z 5 00h00m00.000000s :BS_NODE DEBUG: [5] Bootstrap 2025-06-25T14:24:16.845762Z 5 00h00m00.000000s :BS_NODE DEBUG: [5] Connect 2025-06-25T14:24:16.845797Z 6 00h00m00.000000s :BS_NODE DEBUG: [6] Bootstrap 2025-06-25T14:24:16.845815Z 6 00h00m00.000000s :BS_NODE DEBUG: [6] Connect 2025-06-25T14:24:16.845843Z 7 00h00m00.000000s :BS_NODE DEBUG: [7] Bootstrap 2025-06-25T14:24:16.845863Z 7 00h00m00.000000s :BS_NODE DEBUG: [7] Connect 2025-06-25T14:24:16.845892Z 8 00h00m00.000000s :BS_NODE DEBUG: [8] Bootstrap 2025-06-25T14:24:16.845911Z 8 00h00m00.000000s :BS_NODE DEBUG: [8] Connect 2025-06-25T14:24:16.845952Z 9 00h00m00.000000s :BS_NODE DEBUG: [9] Bootstrap 2025-06-25T14:24:16.845975Z 9 00h00m00.000000s :BS_NODE DEBUG: [9] Connect 2025-06-25T14:24:16.846005Z 10 00h00m00.000000s :BS_NODE DEBUG: [10] Bootstrap 2025-06-25T14:24:16.846034Z 10 00h00m00.000000s :BS_NODE DEBUG: [10] Connect 2025-06-25T14:24:16.846078Z 11 00h00m00.000000s :BS_NODE DEBUG: [11] Bootstrap 2025-06-25T14:24:16.846098Z 11 00h00m00.000000s :BS_NODE DEBUG: [11] Connect 2025-06-25T14:24:16.846130Z 12 00h00m00.000000s :BS_NODE DEBUG: [12] Bootstrap 2025-06-25T14:24:16.846158Z 12 00h00m00.000000s :BS_NODE DEBUG: [12] Connect 2025-06-25T14:24:16.846189Z 13 00h00m00.000000s :BS_NODE DEBUG: [13] Bootstrap 2025-06-25T14:24:16.846208Z 13 00h00m00.000000s :BS_NODE DEBUG: [13] Connect 2025-06-25T14:24:16.846235Z 14 00h00m00.000000s :BS_NODE DEBUG: [14] Bootstrap 2025-06-25T14:24:16.846255Z 14 00h00m00.000000s :BS_NODE DEBUG: [14] Connect 2025-06-25T14:24:16.846291Z 15 00h00m00.000000s :BS_NODE DEBUG: [15] Bootstrap 2025-06-25T14:24:16.846311Z 15 00h00m00.000000s :BS_NODE DEBUG: [15] Connect 2025-06-25T14:24:16.864810Z 1 00h00m00.000000s :BS_NODE DEBUG: [1] ClientConnected Sender# [1:508:32] Status# ERROR ClientId# [1:508:32] ServerId# [0:0:0] PipeClient# [1:508:32] 2025-06-25T14:24:16.865548Z 2 00h00m00.000000s :BS_NODE DEBUG: [2] ClientConnected Sender# [2:509:20] Status# ERROR ClientId# [2:509:20] ServerId# [0:0:0] PipeClient# [2:509:20] 2025-06-25T14:24:16.865586Z 3 00h00m00.000000s :BS_NODE DEBUG: [3] ClientConnected Sender# [3:510:20] Status# ERROR ClientId# [3:510:20] ServerId# [0:0:0] PipeClient# [3:510:20] 2025-06-25T14:24:16.865703Z 4 00h00m00.000000s :BS_NODE DEBUG: [4] ClientConnected Sender# [4:511:20] Status# ERROR ClientId# [4:511:20] ServerId# [0:0:0] PipeClient# [4:511:20] 2025-06-25T14:24:16.865735Z 5 00h00m00.000000s :BS_NODE DEBUG: [5] ClientConnected Sender# [5:512:20] Status# ERROR ClientId# [5:512:20] ServerId# [0:0:0] PipeClient# [5:512:20] 2025-06-25T14:24:16.865807Z 6 00h00m00.000000s :BS_NODE DEBUG: [6] ClientConnected Sender# [6:513:20] Status# ERROR ClientId# [6:513:20] ServerId# [0:0:0] PipeClient# [6:513:20] 2025-06-25T14:24:16.865909Z 7 00h00m00.000000s :BS_NODE DEBUG: [7] ClientConnected Sender# [7:514:20] Status# ERROR ClientId# [7:514:20] ServerId# [0:0:0] PipeClient# [7:514:20] 2025-06-25T14:24:16.865988Z 8 00h00m00.000000s :BS_NODE DEBUG: [8] ClientConnected Sender# [8:515:20] Status# ERROR ClientId# [8:515:20] ServerId# [0:0:0] PipeClient# [8:515:20] 2025-06-25T14:24:16.866020Z 9 00h00m00.000000s :BS_NODE DEBUG: [9] ClientConnected Sender# [9:516:20] Status# ERROR ClientId# [9:516:20] ServerId# [0:0:0] PipeClient# [9:516:20] 2025-06-25T14:24:16.866046Z 10 00h00m00.000000s :BS_NODE DEBUG: [10] ClientConnected Sender# [10:517:20] Status# ERROR ClientId# [10:517:20] ServerId# [0:0:0] PipeClient# [10:517:20] 2025-06-25T14:24:16.866072Z 11 00h00m00.000000s :BS_NODE DEBUG: [11] ClientConnected Sender# [11:518:20] Status# ERROR ClientId# [11:518:20] ServerId# [0:0:0] PipeClient# [11:518:20] 2025-06-25T14:24:16.866140Z 12 00h00m00.000000s :BS_NODE DEBUG: [12] ClientConnected Sender# [12:519:20] Status# ERROR ClientId# [12:519:20] ServerId# [0:0:0] PipeClient# [12:519:20] 2025-06-25T14:24:16.866174Z 13 00h00m00.000000s :BS_NODE DEBUG: [13] ClientConnected Sender# [13:520:20] Status# ERROR ClientId# [13:520:20] ServerId# [0:0:0] PipeClient# [13:520:20] 2025-06-25T14:24:16.866265Z 14 00h00m00.000000s :BS_NODE DEBUG: [14] ClientConnected Sender# [14:521:20] Status# ERROR ClientId# [14:521:20] ServerId# [0:0:0] PipeClient# [14:521:20] 2025-06-25T14:24:16.866292Z 15 00h00m00.000000s :BS_NODE DEBUG: [15] ClientConnected Sender# [15:522:20] Status# ERROR ClientId# [15:522:20] ServerId# [0:0:0] PipeClient# [15:522:20] 2025-06-25T14:24:16.951334Z 1 00h00m00.100000s :BS_NODE DEBUG: [1] Connect 2025-06-25T14:24:16.951426Z 2 00h00m00.100000s :BS_NODE DEBUG: [2] Connect 2025-06-25T14:24:16.951465Z 3 00h00m00.100000s :BS_NODE DEBUG: [3] Connect 2025-06-25T14:24:16.951503Z 4 00h00m00.100000s :BS_NODE DEBUG: [4] Connect 2025-06-25T14:24:16.951558Z 5 00h00m00.100000s :BS_NODE DEBUG: [5] Connect 2025-06-25T14:24:16.951601Z 6 00h00m00.100000s :BS_NODE DEBUG: [6] Connect 2025-06-25T14:24:16.951645Z 7 00h00m00.100000s :BS_NODE DEBUG: [7] Connect 2025-06-25T14:24:16.951685Z 8 00h00m00.100000s :BS_NODE DEBUG: [8] Connect 2025-06-25T14:24:16.951726Z 9 00h00m00.100000s :BS_NODE DEBUG: [9] Connect 2025-06-25T14:24:16.951769Z 10 00h00m00.100000s :BS_NODE DEBUG: [10] Connect 2025-06-25T14:24:16.951808Z 11 00h00m00.100000s :BS_NODE DEBUG: [11] Connect 2025-06-25T14:24:16.951854Z 12 00h00m00.100000s :BS_NODE DEBUG: [12] Connect 2025-06-25T14:24:16.951911Z 13 00h00m00.100000s :BS_NODE DEBUG: [13] Connect 2025-06-25T14:24:16.951951Z 14 00h00m00.100000s :BS_NODE DEBUG: [14] Connect 2025-06-25T14:24:16.951992Z 15 00h00m00.100000s :BS_NODE DEBUG: [15] Connect 2025-06-25T14:24:16.954293Z 1 00h00m00.100000s :BS_NODE DEBUG: [1] ClientConnected Sender# [1:581:60] Status# OK ClientId# [1:581:60] ServerId# [1:610:61] PipeClient# [1:581:60] 2025-06-25T14:24:16.954357Z 1 00h00m00.100000s :BS_NODE DEBUG: [1] State switched from 0 to 1 2025-06-25T14:24:16.957908Z 2 00h00m00.100000s :BS_NODE DEBUG: [2] ClientConnected Sender# [2:582:21] Status# OK ClientId# [2:582:21] ServerId# [1:611:62] PipeClient# [2:582:21] 2025-06-25T14:24:16.957954Z 2 00h00m00.100000s :BS_NODE DEBUG: [2] State switched from 0 to 1 2025-06-25T14:24:16.958003Z 3 00h00m00.100000s :BS_NODE DEBUG: [3] ClientConnected Sender# [3:583:21] Status# OK ClientId# [3:583:21] ServerId# [1:612:63] PipeClient# [3:583:21] 2025-06-25T14:24:16.958031Z 3 00h00m00.100000s :BS_NODE DEBUG: [3] State switched from 0 to 1 2025-06-25T14:24:16.958069Z 4 00h00m00.100000s :BS_NODE DEBUG: [4] ClientConnected Sender# [4:584:21] Status# OK ClientId# [4:584:21] ServerId# [1:613:64] PipeClient# [4:584:21] 2025-06-25T14:24:16.958089Z 4 00h00m00.100000s :BS_NODE DEBUG: [4] State switched from 0 to 1 2025-06-25T14:24:16.958121Z 5 00h00m00.100000s :BS_NODE DEBUG: [5] ClientConnected Sender# [5:585:21] Status# OK ClientId# [5:585:21] ServerId# [1:614:65] PipeClient# [5:585:21] 2025-06-25T14:24:16.958145Z 5 00h00m00.100000s :BS_NODE DEBUG: [5] State switched from 0 to 1 2025-06-25T14:24:16.958180Z 6 00h00m00.100000s :BS_NODE DEBUG: [6] ClientConnected Sender# [6:586:21] Status# OK ClientId# [6:586:21] ServerId# [1:615:66] PipeClient# [6:586:21] 2025-06-25T14:24:16.958204Z 6 00h00m00.100000s :BS_NODE DEBUG: [6] State switched from 0 to 1 2025-06-25T14:24:16.958251Z 7 00h00m00.100000s :BS_NODE DEBUG: [7] ClientConnected Sender# [7:587:21] Status# OK ClientId# [7:587:21] ServerId# [1:616:67] PipeClient# [7:587:21] 2025-06-25T14:24:16.958288Z 7 00h00m00.100000s :BS_NODE DEBUG: [7] State switched from 0 to 1 2025-06-25T14:24:16.958326Z 8 00h00m00.100000s :BS_NODE DEBUG: [8] ClientConnected Sender# [8:588:21] Status# OK ClientId# [8:588:21] ServerId# [1:617:68] PipeClient# [8:588:21] 2025-06-25T14:24:16.958358Z 8 00h00m00.100000s :BS_NODE DEBUG: [8] State switched from 0 to 1 2025-06-25T14:24:16.958404Z 9 00h00m00.100000s :BS_NODE DEBUG: [9] ClientConnected Sender# [9:589:21] Status# OK ClientId# [9:589:21] ServerId# [1:618:69] PipeClient# [9:589:21] 2025-06-25T14:24:16.958428Z 9 00h00m00.100000s :BS_NODE DEBUG: [9] State switched from 0 to 1 2025-06-25T14:24:16.958464Z 10 00h00m00.100000s :BS_NODE DEBUG: [10] ClientConnected Sender# [10:590:21] Status# OK ClientId# [10:590:21] ServerId# [1:619:70] PipeClient# [10:590:21] 2025-06-25T14:24:16.958488Z 10 00h00m00.100000s :BS_NODE DEBUG: [10] State switched from 0 to 1 2025-06-25T14:24:16.958528Z 11 00h00m00.100000s :BS_NODE DEBUG: [11] ClientConnected Sender# [11:591:21] Status# OK ClientId# [11:591:21] ServerId# [1:620:71] PipeClient# [11:591:21] 2025-06-25T14:24:16.958558Z 11 00h00m00.100000s :BS_NODE DEBUG: [11] State switched from 0 to 1 2025-06-25T14:24:16.958628Z 12 00h00m00.100000s :BS_NODE DEBUG: [12] ClientConnected Sender# [12:592:21] Status# OK ClientId# [12:592:21] ServerId# [1:621:72] PipeClient# [12:592:21] 2025-06-25T14:24:16.958653Z 12 00h00m00.100000s :BS_NODE DEBUG: [12] State switched from 0 to 1 2025-06-25T14:24:16.958689Z 13 00h00m00.100000s :BS_NODE DEBUG: [13] ClientConnected Sender# [13:593:21] Status# OK ClientId# [13:593:21] ServerId# [1:622:73] PipeClient# [13:593:21] 2025-06-25T14:24:16.958714Z 13 00h00m00.100000s :BS_NODE DEBUG: [13] State switched from 0 to 1 2025-06-25T14:24:16.958750Z 14 00h00m00.100000s :BS_NODE DEBUG: [14] ClientConnected Sender# [14:594:21] Status# OK ClientId# [14:594:21] ServerId# [1:623:74] PipeClient# [14:594:21] 2025-06-25T14:24:16.958774Z 14 00h00m00.100000s :BS_NODE DEBUG: [14] State switched from 0 to 1 2025-06-25T14:24:16.958810Z 15 00h00m00.100000s :BS_NODE DEBUG: [15] ClientConnected Sender# [15:595:21] Status# OK ClientId# [15:595:21] ServerId# [1:624:75] PipeClient# [15:595:21] 2025-06-25T14:24:16.958845Z 15 00h00m00.100000s :BS_NODE DEBUG: [15] State switched from 0 to 1 2025-06-25T14:24:16.961427Z 1 00h00m00.100512s :BS_NODE DEBUG: [1] NodeServiceSetUpdate 2025-06-25T14:24:16.961510Z 1 00h00m00.100512s :BS_NODE DEBUG: [1] VDiskId# [80000000:1:0:0:0] PDiskId# 1000 VSlotId# 1000 created 2025-06-25T14:24:16.980086Z 1 00h00m00.100512s :BS_NODE DEBUG: [1] VDiskId# [80000000:1:0:0:0] status changed to INIT_PENDING 2025-06-25T14:24:16.981206Z 2 00h00m00.100512s :BS_NODE DEBUG: [2] NodeServiceSetUpdate 2025-06-25T14:24:16.981271Z 2 00h00m00.100512s :BS_NODE DEBUG: [2] VDiskId# [80000000:1:0:1:0] PDiskId# 1000 VSlotId# 1000 created 2025-06-25T14:24:16.981364Z 2 00h00m00.100512s :BS_NODE DEBUG: [2] VDiskId# [80000000:1:0:1:0] status changed to INIT_PENDING 2025-06-25T14:24:16.981484Z 3 00h00m00.100512s :BS_NODE DEBUG: [3] NodeServiceSetUpdate 2025-06-25T14:24:16.981528Z 3 00h00m00.100512s :BS_NODE DEBUG: [3] VDiskId# [80000000:1:0:2:0] PDiskId# 1000 VSlotId# 1000 created 2025-06-25T14:24:16.981616Z 3 00h00m00.100512s :BS_NODE DEBUG: [3] VDiskId# [80000000:1:0:2:0] status changed to INIT_PENDING 2025-06-25T14:24:16.981710Z 4 00h00m00.100512s :BS_NODE DEBUG: [4] NodeServiceSetUpdate 2025-06-25T14:24:16.981745Z 4 00h00m00.100512s :BS_NODE DEBUG: [4] VDiskId# [80000000:1:1:0:0] PDiskId# 1000 VSlotId# 1000 created 2025-06-25T14:24:16.981785Z 4 00h00m00.100512s :BS_NODE DEBUG: [4] VDiskId# [80000000:1:1:0:0] status changed to INIT_PENDING 2025-06-25T14:24:16.981877Z 5 00h00m00.100512s :BS_NODE DEBUG: [5] NodeServiceSetUpdate 2025-06-25T14:24:16.981914Z 5 00h00m00.100512s :BS_NODE DEBUG: [5] VDiskId# [80000000:1:1:1:0] PDiskId# 1000 VSlotId# 1000 created 2025-06-25T14:24:16.981955Z 5 00h00m00.100512s :BS_NODE DEBUG: [5] VDiskId# [80000000:1:1:1:0] status changed to INIT_PENDING 2025-06-25T1 ... eady},{[80000000:3:1:0:0] Ready},{[80000000:3:1:1:0] Ready},{[80000000:3:1:2:0] Ready},{[80000000:3:2:0:0] NotReady},{[80000000:3:2:1:0] NotReady},{[80000000:3:2:2:0] Ready RequiresReassignment Decommitted}] GroupId# 2147483648 2025-06-25T14:24:17.433011Z 13 00h01m22.763512s :BS_NODE DEBUG: [13] VDiskId# [80000001:1:1:0:0] status changed to READY 2025-06-25T14:24:17.435729Z 1 00h01m22.763512s :BS_SELFHEAL INFO: {BSSH11@self_heal.cpp:709} group can't be reassigned right now [{[80000000:3:0:0:0] Ready},{[80000000:3:0:1:0] Ready},{[80000000:3:0:2:0] Ready},{[80000000:3:1:0:0] Ready},{[80000000:3:1:1:0] Ready},{[80000000:3:1:2:0] Ready},{[80000000:3:2:0:0] NotReady},{[80000000:3:2:1:0] NotReady},{[80000000:3:2:2:0] Ready RequiresReassignment Decommitted}] GroupId# 2147483648 2025-06-25T14:24:17.436219Z 15 00h01m23.296512s :BS_NODE DEBUG: [15] VDiskId# [80000001:1:1:2:0] status changed to READY 2025-06-25T14:24:17.437309Z 1 00h01m23.296512s :BS_SELFHEAL INFO: {BSSH11@self_heal.cpp:709} group can't be reassigned right now [{[80000000:3:0:0:0] Ready},{[80000000:3:0:1:0] Ready},{[80000000:3:0:2:0] Ready},{[80000000:3:1:0:0] Ready},{[80000000:3:1:1:0] Ready},{[80000000:3:1:2:0] Ready},{[80000000:3:2:0:0] NotReady},{[80000000:3:2:1:0] NotReady},{[80000000:3:2:2:0] Ready RequiresReassignment Decommitted}] GroupId# 2147483648 2025-06-25T14:24:17.438255Z 1 00h01m24.563024s :BS_SELFHEAL INFO: {BSSH11@self_heal.cpp:709} group can't be reassigned right now [{[80000000:3:0:0:0] Ready},{[80000000:3:0:1:0] Ready},{[80000000:3:0:2:0] Ready},{[80000000:3:1:0:0] Ready},{[80000000:3:1:1:0] Ready},{[80000000:3:1:2:0] Ready},{[80000000:3:2:0:0] Ready},{[80000000:3:2:1:0] NotReady},{[80000000:3:2:2:0] Ready RequiresReassignment Decommitted}] GroupId# 2147483648 2025-06-25T14:24:17.438660Z 1 00h01m25.759512s :BS_NODE DEBUG: [1] VDiskId# [80000001:1:2:0:0] status changed to READY 2025-06-25T14:24:17.439486Z 1 00h01m25.759512s :BS_SELFHEAL INFO: {BSSH11@self_heal.cpp:709} group can't be reassigned right now [{[80000000:3:0:0:0] Ready},{[80000000:3:0:1:0] Ready},{[80000000:3:0:2:0] Ready},{[80000000:3:1:0:0] Ready},{[80000000:3:1:1:0] Ready},{[80000000:3:1:2:0] Ready},{[80000000:3:2:0:0] Ready},{[80000000:3:2:1:0] NotReady},{[80000000:3:2:2:0] Ready RequiresReassignment Decommitted}] GroupId# 2147483648 2025-06-25T14:24:17.440454Z 1 00h01m26.145512s :BS_SELFHEAL INFO: {BSSH11@self_heal.cpp:709} group can't be reassigned right now [{[80000000:3:0:0:0] Ready},{[80000000:3:0:1:0] Ready},{[80000000:3:0:2:0] Ready},{[80000000:3:1:0:0] Ready},{[80000000:3:1:1:0] Ready},{[80000000:3:1:2:0] Ready},{[80000000:3:2:0:0] Ready},{[80000000:3:2:1:0] NotReady},{[80000000:3:2:2:0] Ready RequiresReassignment Decommitted}] GroupId# 2147483648 2025-06-25T14:24:17.440831Z 1 00h01m27.462512s :BS_SELFHEAL INFO: {BSSH11@self_heal.cpp:709} group can't be reassigned right now [{[80000000:3:0:0:0] Ready},{[80000000:3:0:1:0] Ready},{[80000000:3:0:2:0] Ready},{[80000000:3:1:0:0] Ready},{[80000000:3:1:1:0] Ready},{[80000000:3:1:2:0] Ready},{[80000000:3:2:0:0] Ready},{[80000000:3:2:1:0] NotReady},{[80000000:3:2:2:0] Ready RequiresReassignment Decommitted}] GroupId# 2147483648 2025-06-25T14:24:17.442896Z 1 00h01m30.000000s :BS_SELFHEAL INFO: {BSSH11@self_heal.cpp:709} group can't be reassigned right now [{[80000000:3:0:0:0] Ready},{[80000000:3:0:1:0] Ready},{[80000000:3:0:2:0] Ready},{[80000000:3:1:0:0] Ready},{[80000000:3:1:1:0] Ready},{[80000000:3:1:2:0] Ready},{[80000000:3:2:0:0] Ready},{[80000000:3:2:1:0] NotReady},{[80000000:3:2:2:0] Ready RequiresReassignment Decommitted}] GroupId# 2147483648 2025-06-25T14:24:17.443253Z 11 00h01m30.050512s :BS_NODE DEBUG: [11] VDiskId# [80000001:1:0:1:0] status changed to READY 2025-06-25T14:24:17.444173Z 1 00h01m30.050512s :BS_SELFHEAL INFO: {BSSH11@self_heal.cpp:709} group can't be reassigned right now [{[80000000:3:0:0:0] Ready},{[80000000:3:0:1:0] Ready},{[80000000:3:0:2:0] Ready},{[80000000:3:1:0:0] Ready},{[80000000:3:1:1:0] Ready},{[80000000:3:1:2:0] Ready},{[80000000:3:2:0:0] Ready},{[80000000:3:2:1:0] NotReady},{[80000000:3:2:2:0] Ready RequiresReassignment Decommitted}] GroupId# 2147483648 2025-06-25T14:24:17.453197Z 1 00h01m32.588512s :BS_SELFHEAL INFO: {BSSH11@self_heal.cpp:709} group can't be reassigned right now [{[80000000:3:0:0:0] Ready},{[80000000:3:0:1:0] Ready},{[80000000:3:0:2:0] Ready},{[80000000:3:1:0:0] Ready},{[80000000:3:1:1:0] Ready},{[80000000:3:1:2:0] Ready},{[80000000:3:2:0:0] Ready},{[80000000:3:2:1:0] NotReady},{[80000000:3:2:2:0] Ready RequiresReassignment Decommitted}] GroupId# 2147483648 2025-06-25T14:24:17.453497Z 3 00h01m33.614512s :BS_NODE DEBUG: [3] VDiskId# [80000001:1:2:2:0] status changed to READY 2025-06-25T14:24:17.454902Z 1 00h01m33.614512s :BS_SELFHEAL INFO: {BSSH11@self_heal.cpp:709} group can't be reassigned right now [{[80000000:3:0:0:0] Ready},{[80000000:3:0:1:0] Ready},{[80000000:3:0:2:0] Ready},{[80000000:3:1:0:0] Ready},{[80000000:3:1:1:0] Ready},{[80000000:3:1:2:0] Ready},{[80000000:3:2:0:0] Ready},{[80000000:3:2:1:0] NotReady},{[80000000:3:2:2:0] Ready RequiresReassignment Decommitted}] GroupId# 2147483648 2025-06-25T14:24:17.455659Z 12 00h01m35.266512s :BS_NODE DEBUG: [12] VDiskId# [80000001:1:0:2:0] status changed to READY 2025-06-25T14:24:17.456534Z 1 00h01m35.266512s :BS_SELFHEAL INFO: {BSSH11@self_heal.cpp:709} group can't be reassigned right now [{[80000000:3:0:0:0] Ready},{[80000000:3:0:1:0] Ready},{[80000000:3:0:2:0] Ready},{[80000000:3:1:0:0] Ready},{[80000000:3:1:1:0] Ready},{[80000000:3:1:2:0] Ready},{[80000000:3:2:0:0] Ready},{[80000000:3:2:1:0] NotReady},{[80000000:3:2:2:0] Ready RequiresReassignment Decommitted}] GroupId# 2147483648 2025-06-25T14:24:17.457026Z 14 00h01m37.333536s :BS_NODE DEBUG: [14] VDiskId# [80000000:3:2:1:0] status changed to READY 2025-06-25T14:24:17.457547Z 1 00h01m37.333536s :BS_SELFHEAL DEBUG: {BSSH01@self_heal.cpp:71} Reassigner starting GroupId# 2147483648 2025-06-25T14:24:17.459648Z 1 00h01m37.333536s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483648 Status# OK JoinedGroup# true Replicated# true 2025-06-25T14:24:17.459686Z 1 00h01m37.333536s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483648 VDiskId# [80000000:3:0:0:0] DiskIsOk# true 2025-06-25T14:24:17.468598Z 1 00h01m37.333536s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483648 Status# OK JoinedGroup# true Replicated# true 2025-06-25T14:24:17.468654Z 1 00h01m37.333536s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483648 VDiskId# [80000000:3:0:1:0] DiskIsOk# true 2025-06-25T14:24:17.468784Z 1 00h01m37.333536s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483648 Status# OK JoinedGroup# true Replicated# true 2025-06-25T14:24:17.468813Z 1 00h01m37.333536s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483648 VDiskId# [80000000:3:0:2:0] DiskIsOk# true 2025-06-25T14:24:17.468961Z 1 00h01m37.333536s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483648 Status# OK JoinedGroup# true Replicated# true 2025-06-25T14:24:17.469089Z 1 00h01m37.333536s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483648 VDiskId# [80000000:3:1:0:0] DiskIsOk# true 2025-06-25T14:24:17.469118Z 1 00h01m37.333536s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483648 Status# OK JoinedGroup# true Replicated# true 2025-06-25T14:24:17.469231Z 1 00h01m37.333536s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483648 VDiskId# [80000000:3:1:1:0] DiskIsOk# true 2025-06-25T14:24:17.469386Z 1 00h01m37.333536s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483648 Status# OK JoinedGroup# true Replicated# true 2025-06-25T14:24:17.469499Z 1 00h01m37.333536s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483648 VDiskId# [80000000:3:1:2:0] DiskIsOk# true 2025-06-25T14:24:17.469634Z 1 00h01m37.333536s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483648 Status# OK JoinedGroup# true Replicated# true 2025-06-25T14:24:17.469661Z 1 00h01m37.333536s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483648 VDiskId# [80000000:3:2:0:0] DiskIsOk# true 2025-06-25T14:24:17.469687Z 1 00h01m37.333536s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483648 Status# OK JoinedGroup# true Replicated# true 2025-06-25T14:24:17.469710Z 1 00h01m37.333536s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483648 VDiskId# [80000000:3:2:1:0] DiskIsOk# true 2025-06-25T14:24:17.474755Z 1 00h01m37.334048s :BS_NODE DEBUG: [1] NodeServiceSetUpdate 2025-06-25T14:24:17.474949Z 1 00h01m37.334048s :BS_NODE DEBUG: [1] VDiskId# [80000000:3:0:0:0] -> [80000000:4:0:0:0] 2025-06-25T14:24:17.485137Z 1 00h01m37.334048s :BS_SELFHEAL INFO: {BSSH09@self_heal.cpp:207} Reassigner succeeded GroupId# 2147483648 Items# [80000000:3:2:2:0]: 9:1000:1000 -> 15:1000:1001 ConfigTxSeqNo# 23 2025-06-25T14:24:17.485288Z 1 00h01m37.334048s :BS_SELFHEAL DEBUG: {BSSH08@self_heal.cpp:218} Reassigner finished GroupId# 2147483648 Success# true 2025-06-25T14:24:17.485635Z 8 00h01m37.334048s :BS_NODE DEBUG: [8] NodeServiceSetUpdate 2025-06-25T14:24:17.485682Z 8 00h01m37.334048s :BS_NODE DEBUG: [8] VDiskId# [80000000:2:2:1:0] destroyed 2025-06-25T14:24:17.485860Z 2 00h01m37.334048s :BS_NODE DEBUG: [2] NodeServiceSetUpdate 2025-06-25T14:24:17.485910Z 2 00h01m37.334048s :BS_NODE DEBUG: [2] VDiskId# [80000000:3:0:1:0] -> [80000000:4:0:1:0] 2025-06-25T14:24:17.486207Z 3 00h01m37.334048s :BS_NODE DEBUG: [3] NodeServiceSetUpdate 2025-06-25T14:24:17.486254Z 3 00h01m37.334048s :BS_NODE DEBUG: [3] VDiskId# [80000000:3:0:2:0] -> [80000000:4:0:2:0] 2025-06-25T14:24:17.486544Z 4 00h01m37.334048s :BS_NODE DEBUG: [4] NodeServiceSetUpdate 2025-06-25T14:24:17.486585Z 4 00h01m37.334048s :BS_NODE DEBUG: [4] VDiskId# [80000000:3:1:0:0] -> [80000000:4:1:0:0] 2025-06-25T14:24:17.486743Z 5 00h01m37.334048s :BS_NODE DEBUG: [5] NodeServiceSetUpdate 2025-06-25T14:24:17.486783Z 5 00h01m37.334048s :BS_NODE DEBUG: [5] VDiskId# [80000000:3:1:1:0] -> [80000000:4:1:1:0] 2025-06-25T14:24:17.487159Z 6 00h01m37.334048s :BS_NODE DEBUG: [6] NodeServiceSetUpdate 2025-06-25T14:24:17.487203Z 6 00h01m37.334048s :BS_NODE DEBUG: [6] VDiskId# [80000000:3:1:2:0] -> [80000000:4:1:2:0] 2025-06-25T14:24:17.487347Z 9 00h01m37.334048s :BS_NODE DEBUG: [9] NodeServiceSetUpdate 2025-06-25T14:24:17.487525Z 13 00h01m37.334048s :BS_NODE DEBUG: [13] NodeServiceSetUpdate 2025-06-25T14:24:17.487564Z 13 00h01m37.334048s :BS_NODE DEBUG: [13] VDiskId# [80000000:3:2:0:0] -> [80000000:4:2:0:0] 2025-06-25T14:24:17.487643Z 14 00h01m37.334048s :BS_NODE DEBUG: [14] NodeServiceSetUpdate 2025-06-25T14:24:17.487684Z 14 00h01m37.334048s :BS_NODE DEBUG: [14] VDiskId# [80000000:3:2:1:0] -> [80000000:4:2:1:0] 2025-06-25T14:24:17.487760Z 15 00h01m37.334048s :BS_NODE DEBUG: [15] NodeServiceSetUpdate 2025-06-25T14:24:17.487795Z 15 00h01m37.334048s :BS_NODE DEBUG: [15] VDiskId# [80000000:4:2:2:0] PDiskId# 1000 VSlotId# 1001 created 2025-06-25T14:24:17.487863Z 15 00h01m37.334048s :BS_NODE DEBUG: [15] VDiskId# [80000000:4:2:2:0] status changed to INIT_PENDING 2025-06-25T14:24:17.498174Z 15 00h01m40.490048s :BS_NODE DEBUG: [15] VDiskId# [80000000:4:2:2:0] status changed to REPLICATING 2025-06-25T14:24:17.511634Z 15 00h01m53.053048s :BS_NODE DEBUG: [15] VDiskId# [80000000:4:2:2:0] status changed to READY 2025-06-25T14:24:17.513735Z 9 00h01m53.053560s :BS_NODE DEBUG: [9] NodeServiceSetUpdate 2025-06-25T14:24:17.513875Z 9 00h01m53.053560s :BS_NODE DEBUG: [9] VDiskId# [80000000:3:2:2:0] destroyed |67.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_testshard/unittest >> BlobDepotWithTestShard::PlainGroup [GOOD] >> VDiskBalancing::TwoPartsOnOneNodeTest_Block42_HugeBlob |67.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_check_integrity/ut_blobstorage-ut_check_integrity |67.4%| [TM] {RESULT} ydb/core/blobstorage/ut_testshard/unittest |67.4%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_check_integrity/ut_blobstorage-ut_check_integrity |67.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_check_integrity/ut_blobstorage-ut_check_integrity |67.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_restart_pdisk/unittest |67.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_restart_pdisk/unittest >> VDiskBalancing::TestStopOneNode_Mirror3dc >> VDiskBalancing::TestStopOneNode_Mirror3dc_HugeBlob >> VDiskBalancing::TestStopOneNode_Block42 >> BSCRestartPDisk::RestartBrokenDiskInBrokenGroup >> VDiskBalancing::TestStopOneNode_Block42_HugeBlob >> VDiskBalancing::TwoPartsOnOneNodeTest_Block42 |67.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_restart_pdisk/unittest |67.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_balancing/unittest |67.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_restart_pdisk/unittest >> VDiskBalancing::TestRandom_Block42 >> BSCRestartPDisk::RestartOneByOneWithReconnects >> VDiskBalancing::TestDontSendToReadOnlyTest_Block42 >> BSCRestartPDisk::RestartGoodDiskInBrokenGroupNotAllowed >> BSCRestartPDisk::RestartNotAllowed >> VDiskBalancing::TestRandom_Mirror3dc |67.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_stop_pdisk/unittest >> BSCRestartPDisk::RestartOneByOne >> BSCReadOnlyPDisk::SetGoodDiskInBrokenGroupReadOnlyNotAllowed [GOOD] |67.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_stop_pdisk/unittest >> BSCReadOnlyPDisk::SetBrokenDiskInBrokenGroupReadOnly [GOOD] |67.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_stop_pdisk/unittest |67.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_stop_pdisk/unittest |67.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_stop_pdisk/unittest |67.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_stop_pdisk/unittest >> BSCStopPDisk::PDiskStop |67.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_stop_pdisk/unittest |67.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_stop_pdisk/unittest |67.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_restart_pdisk/unittest |67.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_stop_pdisk/unittest |67.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/tools/pq_read/test/ydb-tests-tools-pq_read-test |67.5%| [LD] {RESULT} $(B)/ydb/tests/tools/pq_read/test/ydb-tests-tools-pq_read-test |67.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/tools/pq_read/test/ydb-tests-tools-pq_read-test ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_read_only_pdisk/unittest >> BSCReadOnlyPDisk::SetGoodDiskInBrokenGroupReadOnlyNotAllowed [GOOD] Test command err: RandomSeed# 3922969891009170174 2025-06-25T14:24:18.865137Z 1 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-25T14:24:18.865634Z 2 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-25T14:24:18.865932Z 3 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-25T14:24:18.865995Z 4 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-25T14:24:18.866269Z 5 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-25T14:24:18.866438Z 6 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-25T14:24:18.866607Z 7 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-25T14:24:18.867995Z 1 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-25T14:24:18.868076Z 2 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-25T14:24:18.868351Z 3 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-25T14:24:18.868398Z 4 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-25T14:24:18.868558Z 5 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-25T14:24:18.868605Z 6 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-25T14:24:18.868881Z 7 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-25T14:24:18.869169Z 1 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-25T14:24:18.869422Z 6 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-25T14:24:18.869462Z 7 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-25T14:24:18.869702Z 2 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-25T14:24:18.869834Z 3 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-25T14:24:18.869868Z 4 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-25T14:24:18.869896Z 5 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-25T14:24:18.875302Z 1 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-25T14:24:18.875501Z 6 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-25T14:24:18.875548Z 7 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-25T14:24:18.875917Z 2 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-25T14:24:18.876083Z 3 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-25T14:24:18.876134Z 4 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-25T14:24:18.876180Z 5 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_read_only_pdisk/unittest >> BSCReadOnlyPDisk::SetBrokenDiskInBrokenGroupReadOnly [GOOD] Test command err: RandomSeed# 13938073542811491174 2025-06-25T14:24:19.119942Z 1 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-25T14:24:19.121381Z 2 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-25T14:24:19.121670Z 3 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-25T14:24:19.121840Z 4 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-25T14:24:19.121998Z 5 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-25T14:24:19.122286Z 6 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-25T14:24:19.122454Z 7 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-25T14:24:19.122613Z 8 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-25T14:24:19.124994Z 1 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-25T14:24:19.125318Z 2 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-25T14:24:19.125365Z 3 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-25T14:24:19.125407Z 4 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-25T14:24:19.125450Z 5 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-25T14:24:19.125489Z 6 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-25T14:24:19.125538Z 7 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-25T14:24:19.125580Z 8 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-25T14:24:19.126457Z 1 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-25T14:24:19.127338Z 6 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-25T14:24:19.127379Z 7 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-25T14:24:19.127650Z 8 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-25T14:24:19.127692Z 2 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-25T14:24:19.127723Z 3 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-25T14:24:19.127975Z 4 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-25T14:24:19.128119Z 5 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-25T14:24:19.135721Z 1 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-25T14:24:19.136380Z 6 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-25T14:24:19.136533Z 7 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-25T14:24:19.136574Z 8 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-25T14:24:19.136715Z 2 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-25T14:24:19.137075Z 3 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-25T14:24:19.137116Z 4 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-25T14:24:19.137155Z 5 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-25T14:24:19.646354Z 1 00h01m30.011024s :BS_LOCALRECOVERY CRIT: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) LocalRecovery FINISHED: {RecoveryDuration# INPROGRESS RecoveredLogStartLsn# 0 SuccessfulRecovery# false EmptyLogoBlobsDb# true EmptyBlocksDb# true EmptyBarriersDb# true EmptySyncLog# true EmptySyncer# true EmptyHuge# true LogRecLogoBlob# 0 LogRecBlock# 0 LogRecGC# 0 LogRecSyncLogIdx# 0 LogRecLogoBlobsDB# 0 LogRecBlocksDB# 0 LogRecBarriersDB# 0 LogRecCutLog# 0 LogRecLocalSyncData# 0 LogRecSyncerState# 0 LogRecHandoffDel# 0 LogRecHugeBlobAllocChunk# 0 LogRecHugeBlobFreeChunk# 0 LogRecHugeBlobEntryPoint# 0 LogRecHugeLogoBlob# 0 LogRecLogoBlobOpt# 0 LogRecPhantomBlob# 0 LogRecAnubisOsirisPut# 0 LogRecAddBulkSst# 0 LogoBlobFreshApply# 0 LogoBlobFreshSkip# 0 LogoBlobsBatchFreshApply# 0 LogoBlobsBatchFreshSkip#0 LogoBlobSyncLogApply# 0 LogoBlobSyncLogSkip# 0 HugeLogoBlobFreshApply# 0 HugeLogoBlobFreshSkip# 0 HugeLogoBlobSyncLogApply# 0 HugeLogoBlobSyncLogSkip# 0 BlockFreshApply# 0 BlockFreshSkip# 0 BlocksBatchFreshApply# 0 BlocksBatchFreshSkip# 0 BlockSyncLogApply# 0 BlockSyncLogSkip# 0 BarrierFreshApply# 0 BarrierFreshSkip# 0 BarriersBatchFreshApply# 0 BarriersBatchFreshSkip# 0 BarrierSyncLogApply# 0 BarrierSyncLogSkip# 0 GCBarrierFreshApply# 0 GCBarrierFreshSkip# 0 GCLogoBlobFreshApply# 0 GCLogoBlobFreshSkip# 0 GCSyncLogApply# 0 GCSyncLogSkip# 0 TryPutLogoBlobSyncData# 0 TryPutBlockSyncData# 0 TryPutBarrierSyncData# 0 HandoffDelFreshApply# 0 HandoffDelFreshSkip# 0 HugeBlobAllocChunkApply# 0 HugeBlobAllocChunkSkip# 0 HugeBlobFreeChunkApply# 0 HugeBlobFreeChunkSkip# 0 HugeLogoBlobToHeapApply# 0 HugeLogoBlobToHeapSkip# 0 HugeSlotsDelGenericApply# 0 HugeSlotsDelGenericSkip# 0 TryPutLogoBlobPhantom# 0 RecoveryLogDiapason# [18446744073709551615 0] StartingPoints# {} ReadLogReplies# {}} reason# Yard::Init failed, errorReason# "Some error reason" status# CORRUPTED;VDISK LOCAL RECOVERY FAILURE DUE TO LOGICAL ERROR |67.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_statestorage/unittest |67.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_statestorage/unittest |67.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_statestorage/unittest |67.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_statestorage/unittest |67.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_statestorage/unittest >> TStateStorageRingGroupState::TestProxyConfigMismatch |67.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_statestorage/unittest >> TStateStorageRingGroupState::TestProxyNotifyReplicaConfigChanged1 >> BSCRestartPDisk::RestartBrokenDiskInBrokenGroup [GOOD] >> TStateStorageRingGroupState::TestBoardConfigMismatch >> TStateStorageRingGroupState::TestProxyConfigMismatchNotSent |67.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/library/ut/ydb-tests-library-ut |67.6%| [LD] {RESULT} $(B)/ydb/tests/library/ut/ydb-tests-library-ut >> BSCRestartPDisk::RestartGoodDiskInBrokenGroupNotAllowed [GOOD] |67.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/library/ut/ydb-tests-library-ut |67.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/fq/libs/control_plane_storage/internal/ut/core-fq-libs-control_plane_storage-internal-ut |67.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_donor/unittest |67.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/fq/libs/control_plane_storage/internal/ut/core-fq-libs-control_plane_storage-internal-ut |67.6%| [LD] {RESULT} $(B)/ydb/core/fq/libs/control_plane_storage/internal/ut/core-fq-libs-control_plane_storage-internal-ut >> TStateStorageRingGroupState::TestProxyConfigMismatchNotSent [GOOD] >> Donor::MultipleEvicts ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_restart_pdisk/unittest >> BSCRestartPDisk::RestartBrokenDiskInBrokenGroup [GOOD] Test command err: RandomSeed# 11534976844700880948 2025-06-25T14:24:20.981686Z 1 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-25T14:24:20.981939Z 2 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-25T14:24:20.982217Z 3 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-25T14:24:20.982264Z 4 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-25T14:24:20.982578Z 5 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-25T14:24:20.982627Z 6 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-25T14:24:20.982798Z 7 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-25T14:24:20.982970Z 8 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-25T14:24:20.985911Z 1 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-25T14:24:20.986102Z 2 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-25T14:24:20.986139Z 3 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-25T14:24:20.986300Z 4 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-25T14:24:20.986449Z 5 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-25T14:24:20.986725Z 6 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-25T14:24:20.986879Z 7 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-25T14:24:20.986914Z 8 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-25T14:24:20.987100Z 1 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-25T14:24:20.987256Z 6 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-25T14:24:20.987423Z 7 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-25T14:24:20.987565Z 8 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-25T14:24:20.987722Z 2 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-25T14:24:20.987748Z 3 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-25T14:24:20.988016Z 4 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-25T14:24:20.988167Z 5 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-25T14:24:20.992490Z 1 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-25T14:24:20.992765Z 6 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-25T14:24:20.992801Z 7 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-25T14:24:20.992832Z 8 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-25T14:24:20.992865Z 2 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-25T14:24:20.993122Z 3 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-25T14:24:20.993156Z 4 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-25T14:24:20.993293Z 5 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 |67.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_donor/unittest >> TStateStorageRingGroupState::TestProxyNotifyReplicaConfigChanged1 [GOOD] >> TStateStorageRingGroupState::TestBoardConfigMismatch [GOOD] |67.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_donor/unittest |67.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_donor/unittest >> TStateStorageRingGroupState::TestProxyConfigMismatch [GOOD] >> CheckIntegrityMirror3dc::PlacementOkWithErrors >> BSCStopPDisk::PDiskStop [GOOD] |67.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/ymq/ut/ydb-core-ymq-ut |67.6%| [LD] {RESULT} $(B)/ydb/core/ymq/ut/ydb-core-ymq-ut |67.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/ymq/ut/ydb-core-ymq-ut ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_restart_pdisk/unittest >> BSCRestartPDisk::RestartGoodDiskInBrokenGroupNotAllowed [GOOD] Test command err: RandomSeed# 6257372458069913862 2025-06-25T14:24:21.546117Z 1 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-25T14:24:21.546284Z 2 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-25T14:24:21.546357Z 3 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-25T14:24:21.546432Z 4 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-25T14:24:21.546504Z 5 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-25T14:24:21.546563Z 6 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-25T14:24:21.546637Z 7 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-25T14:24:21.547596Z 1 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-25T14:24:21.547694Z 2 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-25T14:24:21.547756Z 3 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-25T14:24:21.547801Z 4 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-25T14:24:21.547853Z 5 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-25T14:24:21.547898Z 6 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-25T14:24:21.547944Z 7 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-25T14:24:21.548062Z 1 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-25T14:24:21.548113Z 6 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-25T14:24:21.548170Z 7 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-25T14:24:21.548248Z 2 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-25T14:24:21.548282Z 3 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-25T14:24:21.548331Z 4 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-25T14:24:21.548378Z 5 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-25T14:24:21.550079Z 1 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-25T14:24:21.550163Z 6 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-25T14:24:21.550219Z 7 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-25T14:24:21.550280Z 2 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-25T14:24:21.550329Z 3 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-25T14:24:21.550371Z 4 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-25T14:24:21.550452Z 5 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 |67.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/show_create/view/show_create_view |67.7%| [LD] {RESULT} $(B)/ydb/tests/stress/show_create/view/show_create_view |67.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stress/show_create/view/show_create_view >> CheckIntegrityMirror3of4::PlacementOk ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_statestorage/unittest >> TStateStorageRingGroupState::TestProxyConfigMismatchNotSent [GOOD] Test command err: RandomSeed# 6458010032135524059 2025-06-25T14:24:21.743145Z 9 00h00m00.000000s :BS_NODE DEBUG: {NWDC15@distconf.cpp:345} StateFunc Type# 268639239 Sender# [6:249:20] SessionId# [9:47:6] Cookie# 13454428406099881791 2025-06-25T14:24:21.743219Z 9 00h00m00.000000s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:241} SubscribeToPeerNode NodeId# 6 SessionId# [9:47:6] Inserted# false Subscription# {SessionId# [9:47:6] SubscriptionCookie# 0} NextSubscribeCookie# 2 2025-06-25T14:24:21.747746Z 9 00h00m00.000000s :BS_NODE DEBUG: {NWDC17@distconf_binding.cpp:300} TEvNodeConfigReversePush NodeId# 6 Cookie# 13454428406099881791 SessionId# [9:47:6] Binding# {6.6/13454428406099881791@[9:47:6]} Record# {RootNodeId: 2 } 2025-06-25T14:24:21.747837Z 9 00h00m00.000000s :BS_NODE DEBUG: {NWDC13@distconf_binding.cpp:319} Binding updated Binding# {6.2/13454428406099881791@[9:47:6]} PrevRootNodeId# 6 ConfigUpdate# false 2025-06-25T14:24:21.747917Z 8 00h00m00.000000s :BS_NODE DEBUG: {NWDC15@distconf.cpp:345} StateFunc Type# 268639238 Sender# [2:221:20] SessionId# [8:124:2] Cookie# 6046365763786666659 2025-06-25T14:24:21.747964Z 8 00h00m00.000000s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:241} SubscribeToPeerNode NodeId# 2 SessionId# [8:124:2] Inserted# false Subscription# {SessionId# [8:124:2] SubscriptionCookie# 0} NextSubscribeCookie# 3 2025-06-25T14:24:21.748132Z 8 00h00m00.000000s :BS_NODE DEBUG: {NWDC02@distconf_binding.cpp:407} TEvNodeConfigPush NodeId# 2 Cookie# 6046365763786666659 SessionId# [8:124:2] Binding# {3.5/16791804614250908484@[8:101:3]} Record# {BoundNodes { NodeId { Host: "127.0.0.6" Port: 19001 NodeId: 6 } Meta { Fingerprint: "\3403\207\365\032>> TStateStorageRingGroupState::TestProxyNotifyReplicaConfigChanged1 [GOOD] Test command err: RandomSeed# 6054561169098198369 2025-06-25T14:24:21.839030Z 5 00h00m00.000000s :BS_NODE DEBUG: {NWDC15@distconf.cpp:345} StateFunc Type# 268639240 Sender# [4:235:20] SessionId# [5:71:4] Cookie# 11377924833534874517 2025-06-25T14:24:21.839088Z 5 00h00m00.000000s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:241} SubscribeToPeerNode NodeId# 4 SessionId# [5:71:4] Inserted# false Subscription# {SessionId# [5:71:4] SubscriptionCookie# 0} NextSubscribeCookie# 6 2025-06-25T14:24:21.839134Z 5 00h00m00.000000s :BS_NODE DEBUG: {NWDC16@distconf_binding.cpp:529} TEvNodeConfigUnbind NodeId# 4 Cookie# 11377924833534874517 SessionId# [5:71:4] Binding# {7.4/10096616997367320968@[5:57:6]} 2025-06-25T14:24:21.839171Z 5 00h00m00.000000s :BS_NODE DEBUG: {NWDC06@distconf_binding.cpp:538} UnbindNode NodeId# 4 Reason# explicit unbind request 2025-06-25T14:24:21.839227Z 5 00h00m00.000000s :BS_NODE DEBUG: {NWDC34@distconf_binding.cpp:373} DeleteBound RefererNodeId# 4 NodeId# 127.0.0.6:19001/6 2025-06-25T14:24:21.839274Z 5 00h00m00.000000s :BS_NODE DEBUG: {NWDC34@distconf_binding.cpp:373} DeleteBound RefererNodeId# 4 NodeId# 127.0.0.7:19001/7 2025-06-25T14:24:21.839299Z 5 00h00m00.000000s :BS_NODE DEBUG: {NWDC34@distconf_binding.cpp:373} DeleteBound RefererNodeId# 4 NodeId# 127.0.0.9:19001/9 2025-06-25T14:24:21.839346Z 5 00h00m00.000000s :BS_NODE DEBUG: {NWDC34@distconf_binding.cpp:373} DeleteBound RefererNodeId# 4 NodeId# 127.0.0.4:19001/4 2025-06-25T14:24:21.839383Z 5 00h00m00.000000s :BS_NODE DEBUG: {NWDC34@distconf_binding.cpp:373} DeleteBound RefererNodeId# 4 NodeId# 127.0.0.1:19001/1 2025-06-25T14:24:21.839408Z 5 00h00m00.000000s :BS_NODE DEBUG: {NWDC34@distconf_binding.cpp:373} DeleteBound RefererNodeId# 4 NodeId# 127.0.0.8:19001/8 2025-06-25T14:24:21.839430Z 5 00h00m00.000000s :BS_NODE DEBUG: {NWDC34@distconf_binding.cpp:373} DeleteBound RefererNodeId# 4 NodeId# 127.0.0.5:19001/5 2025-06-25T14:24:21.839452Z 5 00h00m00.000000s :BS_NODE DEBUG: {NWDC34@distconf_binding.cpp:373} DeleteBound RefererNodeId# 4 NodeId# 127.0.0.2:19001/2 2025-06-25T14:24:21.839473Z 5 00h00m00.000000s :BS_NODE DEBUG: {NWDC34@distconf_binding.cpp:373} DeleteBound RefererNodeId# 4 NodeId# 127.0.0.3:19001/3 2025-06-25T14:24:21.839521Z 5 00h00m00.000000s :BS_NODE DEBUG: {NWDC55@distconf_binding.cpp:219} UnsubscribeInterconnect NodeId# 4 Subscription# {SessionId# [5:71:4] SubscriptionCookie# 0} 2025-06-25T14:24:21.839573Z 6 00h00m00.000000s :BS_NODE DEBUG: {NWDC15@distconf.cpp:345} StateFunc Type# 268639239 Sender# [4:235:20] SessionId# [6:74:4] Cookie# 8161047459067277135 2025-06-25T14:24:21.839608Z 6 00h00m00.000000s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:241} SubscribeToPeerNode NodeId# 4 SessionId# [6:74:4] Inserted# false Subscription# {SessionId# [6:74:4] SubscriptionCookie# 0} NextSubscribeCookie# 5 2025-06-25T14:24:21.843865Z 6 00h00m00.000000s :BS_NODE DEBUG: {NWDC17@distconf_binding.cpp:300} TEvNodeConfigReversePush NodeId# 4 Cookie# 8161047459067277135 SessionId# [6:74:4] Binding# {4.5/8161047459067277135@[6:74:4]} Record# {RootNodeId: 4 } 2025-06-25T14:24:21.843940Z 6 00h00m00.000000s :BS_NODE DEBUG: {NWDC13@distconf_binding.cpp:319} Binding updated Binding# {4.4/8161047459067277135@[6:74:4]} PrevRootNodeId# 5 ConfigUpdate# false 2025-06-25T14:24:21.844008Z 3 00h00m00.000000s :BS_NODE DEBUG: {NWDC15@distconf.cpp:345} StateFunc Type# 268639239 Sender# [7:256:20] SessionId# [3:97:6] Cookie# 15725057812512817836 2025-06-25T14:24:21.844039Z 3 00h00m00.000000s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:241} SubscribeToPeerNode NodeId# 7 SessionId# [3:97:6] Inserted# false Subscription# {SessionId# [3:97:6] SubscriptionCookie# 0} NextSubscribeCookie# 5 2025-06-25T14:24:21.844132Z 3 00h00m00.000000s :BS_NODE DEBUG: {NWDC17@distconf_binding.cpp:300} TEvNodeConfigReversePush NodeId# 7 Cookie# 15725057812512817836 SessionId# [3:97:6] Binding# {7.4/15725057812512817836@[3:97:6]} Record# {RootNodeId: 5 } 2025-06-25T14:24:21.844159Z 3 00h00m00.000000s :BS_NODE DEBUG: {NWDC13@distconf_binding.cpp:319} Binding updated Binding# {7.5/15725057812512817836@[3:97:6]} PrevRootNodeId# 4 ConfigUpdate# false 2025-06-25T14:24:21.844189Z 5 00h00m00.000000s :BS_NODE DEBUG: {NWDC15@distconf.cpp:345} StateFunc Type# 268639239 Sender# [7:256:20] SessionId# [5:57:6] Cookie# 10096616997367320968 2025-06-25T14:24:21.844212Z 5 00h00m00.000000s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:241} SubscribeToPeerNode NodeId# 7 SessionId# [5:57:6] Inserted# false Subscription# {SessionId# [5:57:6] SubscriptionCookie# 0} NextSubscribeCookie# 6 2025-06-25T14:24:21.844236Z 5 00h00m00.000000s :BS_NODE DEBUG: {NWDC17@distconf_binding.cpp:300} TEvNodeConfigReversePush NodeId# 7 Cookie# 10096616997367320968 SessionId# [5:57:6] Binding# {7.4/10096616997367320968@[5:57:6]} Record# {RootNodeId: 5 } 2025-06-25T14:24:21.844257Z 5 00h00m00.000000s :BS_NODE DEBUG: {NWDC03@distconf_binding.cpp:271} AbortBinding Binding# {7.5/10096616997367320968@[5:57:6]} Reason# binding cycle 2025-06-25T14:24:21.844288Z 5 00h00m00.000000s :BS_NODE DEBUG: {NWDC24@distconf_scatter_gather.cpp:90} AbortAllScatterTasks Binding# {7.5/10096616997367320968@[5:57:6]} 2025-06-25T14:24:21.844342Z 5 00h00m00.000000s :BS_NODE DEBUG: {NWDC55@distconf_binding.cpp:219} UnsubscribeInterconnect NodeId# 7 Subscription# {SessionId# [5:57:6] SubscriptionCookie# 0} 2025-06-25T14:24:21.844386Z 5 00h00m00.000000s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:241} SubscribeToPeerNode NodeId# 8 SessionId# [0:0:0] Inserted# true Subscription# {SessionId# [0:0:0] SubscriptionCookie# 0} NextSubscribeCookie# 6 2025-06-25T14:24:21.844433Z 5 00h00m00.000000s :BS_NODE DEBUG: {NWDC29@distconf_binding.cpp:80} Initiated bind NodeId# 8 Binding# {8.0/10096616997367320969@[0:0:0]} SessionId# [0:0:0] 2025-06-25T14:24:21.844743Z 5 00h00m00.000000s :BS_NODE DEBUG: {NWDC15@distconf.cpp:345} StateFunc Type# 131077 Sender# [5:60:7] SessionId# [0:0:0] Cookie# 6 2025-06-25T14:24:21.844774Z 5 00h00m00.000000s :BS_NODE DEBUG: {NWDC14@distconf_binding.cpp:120} TEvNodeConnected NodeId# 8 SessionId# [5:60:7] Cookie# 6 CookieInFlight# true SubscriptionExists# true 2025-06-25T14:24:21.844805Z 5 00h00m00.000000s :BS_NODE DEBUG: {NWDC09@distconf_binding.cpp:156} Continuing bind Binding# {8.0/10096616997367320969@[0:0:0]} 2025-06-25T14:24:21.845467Z 7 00h00m00.000000s :BS_NODE DEBUG: {NWDC15@distconf.cpp:345} StateFunc Type# 268639238 Sender# [5:242:20] SessionId# [7:58:5] Cookie# 10096616997367320968 2025-06-25T14:24:21.845519Z 7 00h00m00.000000s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:241} SubscribeToPeerNode NodeId# 5 SessionId# [7:58:5] Inserted# false Subscription# {SessionId# [7:58:5] SubscriptionCookie# 0} NextSubscribeCookie# 4 2025-06-25T14:24:21.845689Z 7 00h00m00.000000s :BS_NODE DEBUG: {NWDC02@distconf_binding.cpp:407} TEvNodeConfigPush NodeId# 5 Cookie# 10096616997367320968 SessionId# [7:58:5] Binding# {6.5/2792736948813494462@[7:41:6]} Record# {DeletedBoundNodeIds { Host: "127.0.0.6" Port: 19001 NodeId: 6 } DeletedBoundNodeIds { Host: "127.0.0.7" Port: 19001 NodeId: 7 } DeletedBoundNodeIds { Host: "127.0.0.9" Port: 19001 NodeId: 9 } DeletedBoundNodeIds { Host: "127.0.0.4" Port: 19001 NodeId: 4 } DeletedBoundNodeIds { Host: "127.0.0.1" Port: 19001 NodeId: 1 } DeletedBoundNodeIds { Host: "127.0.0.8" Port: 19001 NodeId: 8 } DeletedBoundNodeIds { Host: "127.0.0.2" Port: 19001 NodeId: 2 } DeletedBoundNodeIds { Host: "127.0.0.3" Port: 19001 NodeId: 3 } } 2025-06-25T14:24:21.845730Z 7 00h00m00.000000s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:241} SubscribeToPeerNode NodeId# 5 SessionId# [7:58:5] Inserted# false Subscription# {SessionId# [7:58:5] SubscriptionCookie# 0} NextSubscribeCookie# 4 2025-06-25T14:24:21.845765Z 7 00h00m00.000000s :BS_NODE DEBUG: {NWDC34@distconf_binding.cpp:373} DeleteBound RefererNodeId# 5 NodeId# 127.0.0.6:19001/6 2025-06-25T14:24:21.845807Z 7 00h00m00.000000s :BS_NODE DEBUG: {NWDC34@distconf_binding.cpp:373} DeleteBound RefererNodeId# 5 NodeId# 127.0.0.7:19001/7 2025-06-25T14:24:21.845833Z 7 00h00m00.000000s :BS_NODE DEBUG: {NWDC34@distconf_binding.cpp:373} DeleteBound RefererNodeId# 5 NodeId# 127.0.0.9:19001/9 2025-06-25T14:24:21.845860Z 7 00h00m00.000000s :BS_NODE DEBUG: {NWDC34@distconf_binding.cpp:373} DeleteBound RefererNodeId# 5 NodeId# 127.0.0.4:19001/4 2025-06-25T14:24:21.845887Z 7 00h00m00.000000s :BS_NODE DEBUG: {NWDC34@distconf_binding.cpp:373} DeleteBound RefererNodeId# 5 NodeId# 127.0.0.1:19001/1 2025-06-25T14:24:21.845910Z 7 00h00m00.000000s :BS_NODE DEBUG: {NWDC34@distconf_binding.cpp:373} DeleteBound RefererNodeId# 5 NodeId# 127.0.0.8:19001/8 2025-06-25T14:24:21.845931Z 7 00h00m00.000000s :BS_NODE DEBUG: {NWDC34@distconf_binding.cpp:373} DeleteBound RefererNodeId# 5 NodeId# 127.0.0.2:19001/2 2025-06-25T14:24:21.845953Z 7 00h00m00.000000s :BS_NODE DEBUG: {NWDC34@distconf_binding.cpp:373} DeleteBound RefererNodeId# 5 NodeId# 127.0.0.3:19001/3 2025-06-25T14:24:21.846017Z 7 00h00m00.000000s :BS_NODE DEBUG: {NWDC15@distconf.cpp:345} StateFunc Type# 268639239 Sender# [6:249:20] SessionId# [7:41:6] Cookie# 2792736948813494462 2025-06-25T14:24:21.846060Z 7 00h00m00.000000s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:241} SubscribeToPeerNode NodeId# 6 SessionId# [7:41:6] Inserted# false Subscription# {SessionId# [7:41:6] SubscriptionCookie# 0} NextSubscribeCookie# 4 2025-06-25T14:24:21.846109Z 7 00h00m00.000000s :BS_NODE DEBUG: {NWDC17@distconf_binding.cpp:300} TEvNodeConfigReversePush NodeId# 6 Cookie# 2792736948813494462 SessionId# [7:41:6] Binding# {6.5/2792736948813494462@[7:41:6]} Record# {RootNodeId: 4 } 2025-06-25T14:24:21.846147Z 7 00h00m00.000000s :BS_NODE DEBUG: {NWDC13@distconf_binding.cpp:319} Binding updated Binding# {6.4/2792736948813494462@[7:41:6]} PrevRootNodeId# 5 ConfigUpdate# false 2025-06-25T14:24:21.846189Z 2 00h00m00.000000s :BS_NODE DEBUG: {NWDC15@distconf.cpp:345} StateFunc Type# 268639239 Sender# [6:249:20] SessionId# [2:117:5] Cookie# 9104860676236580540 2025-06-25T14:24:21.846216Z 2 00h00m00.000000s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:241} SubscribeToPeerNode NodeId# 6 SessionId# [2:117:5] Inserted# false Subscription# {SessionId# [2:117:5] SubscriptionCookie# 0} NextSubscribeCookie# 2 2025-06-25T14:24:21.846254Z 2 00h00m00.000000s :BS_NODE DEBUG: {NWDC17@distconf_binding.cpp:300} TEvNodeConfigReversePush NodeId# 6 Cookie# 9104860676236580540 SessionId# [2:117:5] Binding# {6.5/9104860676236580540@[2:117:5]} Record# {RootNodeId: 4 } 2025-06-25T14:24:21.846274Z 2 00h00m00.000000s :BS_NODE DEBUG: {NWDC13@distconf_binding.cpp:319} Binding updated Binding# {6.4/9104860676236580540@[2:117:5]} PrevRootNodeId# 5 ConfigUpdate# false 2025-06-25T14:24:21.846319Z 8 00h00m00.000000s :BS_NODE DEBUG: {NWDC15@distconf.cpp:345} StateFunc Type# 268639239 Sender# [3:228:20] SessionId# [8:101:3] Cookie# 1224545840641418216 2025-06-25T14:24:21.846345Z 8 00h00m00.000000s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:241} SubscribeToPeerNode NodeId# 3 SessionId# [8:101:3] Inserted# false Subscription# {SessionId# [8:101:3] SubscriptionCookie# 0} NextSubscribeCookie# 2 2025-06-25T14:24:21.846387Z 8 00h00m00.000000s :BS_NODE DEBUG: {NWDC17@distconf_binding.cpp:300} TEvNodeConfigReversePush NodeId# 3 Cookie# 1224545840641418216 SessionId# [8:101:3] Binding# {3.4/1224545840641418216@[8:101:3]} Record# {RootNodeId: 5 } 2025-06-25T14:24:21.846423Z 8 00h00m00.000000s :BS_NODE DEBUG: {NWDC13@distconf_binding.cpp:319} Binding updated Binding# {3.5/1224545840641418216@[8:101:3]} PrevRootNodeId# 4 ConfigUpdate# false 2025-06-25T14:24:21.846452Z 1 00h00m00.000000s :BS_NODE DEBUG: {NWDC15@distconf.cpp:345} StateFunc Type# 268639239 Sender# [3:228:20] SessionId# [1:133:2] Cookie# 15177265468524969868 2025-06-25T14:24:21.846477Z 1 00h00m00.000000s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:241} SubscribeToPeerNode NodeId# 3 SessionId# [1:133:2] Inserted# false Subscription# {SessionId# [1:133:2] SubscriptionCookie# 0} NextSubscribeCookie# 2 2025-06-25T14:24:21.846518Z 1 00h00m ... D: 72057594037936131 Cookie: 0} 2025-06-25T14:24:21.984619Z 1 00h00m03.745236s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 1} 2025-06-25T14:24:21.984639Z 1 00h00m03.745236s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 2} 2025-06-25T14:24:21.984662Z 1 00h00m03.745236s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-25T14:24:21.984787Z 1 00h00m03.745236s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-25T14:24:21.984810Z 1 00h00m03.745236s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-25T14:24:21.992561Z 1 00h00m07.650140s :STATESTORAGE DEBUG: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72057594037936131 Cookie: 0 ProxyOptions: SigNone} 2025-06-25T14:24:21.992618Z 1 00h00m07.650140s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 0} 2025-06-25T14:24:21.992648Z 1 00h00m07.650140s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 1} 2025-06-25T14:24:21.992774Z 1 00h00m07.650140s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 2} 2025-06-25T14:24:21.992801Z 1 00h00m07.650140s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-25T14:24:21.992827Z 1 00h00m07.650140s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-25T14:24:21.992846Z 1 00h00m07.650140s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-25T14:24:21.997746Z 1 00h00m10.002048s :STATESTORAGE DEBUG: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72057594037932033 Cookie: 0 ProxyOptions: SigNone} 2025-06-25T14:24:21.997855Z 1 00h00m10.002048s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 0} 2025-06-25T14:24:21.997946Z 1 00h00m10.002048s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 1} 2025-06-25T14:24:21.997967Z 1 00h00m10.002048s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 2} 2025-06-25T14:24:21.998045Z 1 00h00m10.002048s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 0 TabletID: 72057594037932033 ClusterStateGeneration: 0 ClusterStateGuid: 0 CurrentLeader: [1:300:34] CurrentLeaderTablet: [1:304:36] CurrentGeneration: 2 CurrentStep: 0} 2025-06-25T14:24:21.998180Z 1 00h00m10.002048s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 0 TabletID: 72057594037932033 ClusterStateGeneration: 0 ClusterStateGuid: 0 CurrentLeader: [1:300:34] CurrentLeaderTablet: [1:304:36] CurrentGeneration: 2 CurrentStep: 0} 2025-06-25T14:24:22.002636Z 1 00h00m15.538046s :STATESTORAGE DEBUG: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72057594037936131 Cookie: 0 ProxyOptions: SigNone} 2025-06-25T14:24:22.002725Z 1 00h00m15.538046s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 0} 2025-06-25T14:24:22.002766Z 1 00h00m15.538046s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 1} 2025-06-25T14:24:22.002842Z 1 00h00m15.538046s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 2} 2025-06-25T14:24:22.002882Z 1 00h00m15.538046s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-25T14:24:22.002922Z 1 00h00m15.538046s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-25T14:24:22.002955Z 1 00h00m15.538046s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-25T14:24:22.006774Z 1 00h00m20.100000s :STATESTORAGE DEBUG: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72057594037932033 Cookie: 0 ProxyOptions: SigNone} 2025-06-25T14:24:22.006858Z 1 00h00m20.100000s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 0} 2025-06-25T14:24:22.006911Z 1 00h00m20.100000s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 1} 2025-06-25T14:24:22.006943Z 1 00h00m20.100000s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 2} 2025-06-25T14:24:22.006990Z 1 00h00m20.100000s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 0 TabletID: 72057594037932033 ClusterStateGeneration: 0 ClusterStateGuid: 0 CurrentLeader: [1:300:34] CurrentLeaderTablet: [1:304:36] CurrentGeneration: 2 CurrentStep: 0} 2025-06-25T14:24:22.007049Z 1 00h00m20.100000s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 0 TabletID: 72057594037932033 ClusterStateGeneration: 0 ClusterStateGuid: 0 CurrentLeader: [1:300:34] CurrentLeaderTablet: [1:304:36] CurrentGeneration: 2 CurrentStep: 0} 2025-06-25T14:24:22.022484Z 1 00h00m30.182433s :STATESTORAGE DEBUG: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72057594037932033 Cookie: 0 ProxyOptions: SigNone} 2025-06-25T14:24:22.022540Z 1 00h00m30.182433s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 0} 2025-06-25T14:24:22.022697Z 1 00h00m30.182433s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 1} 2025-06-25T14:24:22.022838Z 1 00h00m30.182433s :STATESTORAGE DEBUG: Replica TEvNodeWardenNotifyConfigMismatch: Info->ClusterStateGeneration=0 msgGeneration=0 Info->ClusterStateGuid=2 msgGuid=0 2025-06-25T14:24:22.023230Z 1 00h00m30.182433s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 2} 2025-06-25T14:24:22.023264Z 1 00h00m30.182433s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 0 TabletID: 72057594037932033 ClusterStateGeneration: 0 ClusterStateGuid: 0 CurrentLeader: [1:300:34] CurrentLeaderTablet: [1:304:36] CurrentGeneration: 2 CurrentStep: 0} 2025-06-25T14:24:22.023416Z 1 00h00m30.182433s :BS_NODE INFO: {NW51@node_warden_resource.cpp:338} TEvNodeWardenNotifyConfigMismatch: NodeId: 1 ClusterStateGeneration: 0 ClusterStateGuid: 0 2025-06-25T14:24:22.023695Z 1 00h00m30.182433s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 0 TabletID: 72057594037932033 ClusterStateGeneration: 0 ClusterStateGuid: 2 CurrentLeader: [1:300:34] CurrentLeaderTablet: [1:304:36] CurrentGeneration: 2 CurrentStep: 0} 2025-06-25T14:24:22.023714Z 1 00h00m30.182433s :STATESTORAGE DEBUG: StateStorageProxy TEvNodeWardenNotifyConfigMismatch: Info->ClusterStateGeneration=0 clusterStateGeneration=0 Info->ClusterStateGuid=0 clusterStateGuid=2 2025-06-25T14:24:22.023877Z 1 00h00m30.182433s :BS_NODE INFO: {NW51@node_warden_resource.cpp:338} TEvNodeWardenNotifyConfigMismatch: NodeId: 1 ClusterStateGeneration: 0 ClusterStateGuid: 2 2025-06-25T14:24:22.027374Z 1 00h00m31.787132s :STATESTORAGE DEBUG: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72057594037936131 Cookie: 0 ProxyOptions: SigNone} 2025-06-25T14:24:22.027427Z 1 00h00m31.787132s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 0} 2025-06-25T14:24:22.027572Z 1 00h00m31.787132s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 1} 2025-06-25T14:24:22.027599Z 1 00h00m31.787132s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936131 Cookie: 2} 2025-06-25T14:24:22.027638Z 1 00h00m31.787132s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-25T14:24:22.027675Z 1 00h00m31.787132s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-25T14:24:22.027699Z 1 00h00m31.787132s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936131 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-25T14:24:22.040484Z 1 00h00m40.200000s :STATESTORAGE DEBUG: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72057594037932033 Cookie: 0 ProxyOptions: SigNone} 2025-06-25T14:24:22.040576Z 1 00h00m40.200000s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 0} 2025-06-25T14:24:22.040627Z 1 00h00m40.200000s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 1} 2025-06-25T14:24:22.040667Z 1 00h00m40.200000s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 2} 2025-06-25T14:24:22.040814Z 1 00h00m40.200000s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 0 TabletID: 72057594037932033 ClusterStateGeneration: 0 ClusterStateGuid: 0 CurrentLeader: [1:300:34] CurrentLeaderTablet: [1:304:36] CurrentGeneration: 2 CurrentStep: 0} 2025-06-25T14:24:22.040860Z 1 00h00m40.200000s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 0 TabletID: 72057594037932033 ClusterStateGeneration: 0 ClusterStateGuid: 0 CurrentLeader: [1:300:34] CurrentLeaderTablet: [1:304:36] CurrentGeneration: 2 CurrentStep: 0} 2025-06-25T14:24:22.051868Z 1 00h00m50.300000s :STATESTORAGE DEBUG: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72057594037932033 Cookie: 0 ProxyOptions: SigNone} 2025-06-25T14:24:22.051980Z 1 00h00m50.300000s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 0} 2025-06-25T14:24:22.052020Z 1 00h00m50.300000s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 1} 2025-06-25T14:24:22.052046Z 1 00h00m50.300000s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 2} 2025-06-25T14:24:22.052084Z 1 00h00m50.300000s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 0 TabletID: 72057594037932033 ClusterStateGeneration: 0 ClusterStateGuid: 0 CurrentLeader: [1:300:34] CurrentLeaderTablet: [1:304:36] CurrentGeneration: 2 CurrentStep: 0} 2025-06-25T14:24:22.052163Z 1 00h00m50.300000s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 0 TabletID: 72057594037932033 ClusterStateGeneration: 1 ClusterStateGuid: 0 CurrentLeader: [1:300:34] CurrentLeaderTablet: [1:304:36] CurrentGeneration: 2 CurrentStep: 0} 2025-06-25T14:24:22.052188Z 1 00h00m50.300000s :STATESTORAGE DEBUG: StateStorageProxy TEvNodeWardenNotifyConfigMismatch: Info->ClusterStateGeneration=0 clusterStateGeneration=1 Info->ClusterStateGuid=0 clusterStateGuid=0 2025-06-25T14:24:22.052277Z 1 00h00m50.300000s :BS_NODE INFO: {NW51@node_warden_resource.cpp:338} TEvNodeWardenNotifyConfigMismatch: NodeId: 1 ClusterStateGeneration: 1 ClusterStateGuid: 0 >> CheckIntegrityBlock42::DataErrorAdditionalUnequalParts >> CheckIntegrityBlock42::PlacementOkWithErrors >> CheckIntegrityMirror3dc::PlacementOk ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_statestorage/unittest >> TStateStorageRingGroupState::TestBoardConfigMismatch [GOOD] Test command err: RandomSeed# 10443012061158060813 2025-06-25T14:24:21.838662Z 8 00h00m00.000000s :BS_NODE DEBUG: {NWDC15@distconf.cpp:345} StateFunc Type# 268639239 Sender# [9:270:20] SessionId# [8:18:8] Cookie# 14266082087359100869 2025-06-25T14:24:21.838734Z 8 00h00m00.000000s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:241} SubscribeToPeerNode NodeId# 9 SessionId# [8:18:8] Inserted# false Subscription# {SessionId# [8:18:8] SubscriptionCookie# 0} NextSubscribeCookie# 6 2025-06-25T14:24:21.843635Z 8 00h00m00.000000s :BS_NODE DEBUG: {NWDC17@distconf_binding.cpp:300} TEvNodeConfigReversePush NodeId# 9 Cookie# 14266082087359100869 SessionId# [8:18:8] Binding# {9.9/14266082087359100869@[8:18:8]} Record# {RootNodeId: 7 } 2025-06-25T14:24:21.843727Z 8 00h00m00.000000s :BS_NODE DEBUG: {NWDC13@distconf_binding.cpp:319} Binding updated Binding# {9.7/14266082087359100869@[8:18:8]} PrevRootNodeId# 9 ConfigUpdate# false 2025-06-25T14:24:21.843798Z 5 00h00m00.000000s :BS_NODE DEBUG: {NWDC15@distconf.cpp:345} StateFunc Type# 268639239 Sender# [9:270:20] SessionId# [5:63:8] Cookie# 5823903272523961575 2025-06-25T14:24:21.843845Z 5 00h00m00.000000s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:241} SubscribeToPeerNode NodeId# 9 SessionId# [5:63:8] Inserted# false Subscription# {SessionId# [5:63:8] SubscriptionCookie# 0} NextSubscribeCookie# 4 2025-06-25T14:24:21.843908Z 5 00h00m00.000000s :BS_NODE DEBUG: {NWDC17@distconf_binding.cpp:300} TEvNodeConfigReversePush NodeId# 9 Cookie# 5823903272523961575 SessionId# [5:63:8] Binding# {9.9/5823903272523961575@[5:63:8]} Record# {RootNodeId: 7 } 2025-06-25T14:24:21.843944Z 5 00h00m00.000000s :BS_NODE DEBUG: {NWDC13@distconf_binding.cpp:319} Binding updated Binding# {9.7/5823903272523961575@[5:63:8]} PrevRootNodeId# 9 ConfigUpdate# false 2025-06-25T14:24:21.843979Z 7 00h00m00.000000s :BS_NODE DEBUG: {NWDC15@distconf.cpp:345} StateFunc Type# 268639238 Sender# [1:214:30] SessionId# [7:146:1] Cookie# 3292134888175156527 2025-06-25T14:24:21.844008Z 7 00h00m00.000000s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:241} SubscribeToPeerNode NodeId# 1 SessionId# [7:146:1] Inserted# false Subscription# {SessionId# [7:146:1] SubscriptionCookie# 0} NextSubscribeCookie# 4 2025-06-25T14:24:21.844193Z 7 00h00m00.000000s :BS_NODE DEBUG: {NWDC02@distconf_binding.cpp:407} TEvNodeConfigPush NodeId# 1 Cookie# 3292134888175156527 SessionId# [7:146:1] Binding# {3.9/4543741735667600825@[7:98:3]} Record# {BoundNodes { NodeId { Host: "127.0.0.9" Port: 19001 NodeId: 9 } Meta { Fingerprint: "\3403\207\365\032>> TStateStorageRingGroupState::TestProxyConfigMismatch [GOOD] Test command err: RandomSeed# 1158503251751795755 2025-06-25T14:24:21.835426Z 2 00h00m00.000000s :BS_NODE DEBUG: {NWDC15@distconf.cpp:345} StateFunc Type# 268639239 Sender# [4:235:20] SessionId# [2:111:3] Cookie# 10702550184275318972 2025-06-25T14:24:21.835496Z 2 00h00m00.000000s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:241} SubscribeToPeerNode NodeId# 4 SessionId# [2:111:3] Inserted# false Subscription# {SessionId# [2:111:3] SubscriptionCookie# 0} NextSubscribeCookie# 2 2025-06-25T14:24:21.840275Z 2 00h00m00.000000s :BS_NODE DEBUG: {NWDC17@distconf_binding.cpp:300} TEvNodeConfigReversePush NodeId# 4 Cookie# 10702550184275318972 SessionId# [2:111:3] Binding# {4.4/10702550184275318972@[2:111:3]} Record# {RootNodeId: 8 } 2025-06-25T14:24:21.840376Z 2 00h00m00.000000s :BS_NODE DEBUG: {NWDC13@distconf_binding.cpp:319} Binding updated Binding# {4.8/10702550184275318972@[2:111:3]} PrevRootNodeId# 4 ConfigUpdate# false 2025-06-25T14:24:21.840436Z 6 00h00m00.000000s :BS_NODE DEBUG: {NWDC15@distconf.cpp:345} StateFunc Type# 268639239 Sender# [4:235:20] SessionId# [6:74:4] Cookie# 5669951387869610033 2025-06-25T14:24:21.840479Z 6 00h00m00.000000s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:241} SubscribeToPeerNode NodeId# 4 SessionId# [6:74:4] Inserted# false Subscription# {SessionId# [6:74:4] SubscriptionCookie# 0} NextSubscribeCookie# 4 2025-06-25T14:24:21.840544Z 6 00h00m00.000000s :BS_NODE DEBUG: {NWDC17@distconf_binding.cpp:300} TEvNodeConfigReversePush NodeId# 4 Cookie# 5669951387869610033 SessionId# [6:74:4] Binding# {4.4/5669951387869610033@[6:74:4]} Record# {RootNodeId: 8 } 2025-06-25T14:24:21.840580Z 6 00h00m00.000000s :BS_NODE DEBUG: {NWDC13@distconf_binding.cpp:319} Binding updated Binding# {4.8/5669951387869610033@[6:74:4]} PrevRootNodeId# 4 ConfigUpdate# false 2025-06-25T14:24:21.840623Z 9 00h00m00.000000s :BS_NODE DEBUG: {NWDC15@distconf.cpp:345} StateFunc Type# 268639238 Sender# [8:263:20] SessionId# [9:19:8] Cookie# 5245357728673788352 2025-06-25T14:24:21.840667Z 9 00h00m00.000000s :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:241} SubscribeToPeerNode NodeId# 8 SessionId# [9:19:8] Inserted# false Subscription# {SessionId# [9:19:8] SubscriptionCookie# 0} NextSubscribeCookie# 4 2025-06-25T14:24:21.840856Z 9 00h00m00.000000s :BS_NODE DEBUG: {NWDC02@distconf_binding.cpp:407} TEvNodeConfigPush NodeId# 8 Cookie# 5245357728673788352 SessionId# [9:19:8] Binding# {6.4/12717044095074548890@[9:47:6]} Record# {BoundNodes { NodeId { Host: "127.0.0.6" Port: 19001 NodeId: 6 } Meta { Fingerprint: "\3403\207\365\032>ClusterStateGeneration=0 clusterStateGeneration=1 Info->ClusterStateGuid=0 clusterStateGuid=2 2025-06-25T14:24:22.033750Z 1 00h00m14.321244s :BS_NODE INFO: {NW51@node_warden_resource.cpp:338} TEvNodeWardenNotifyConfigMismatch: NodeId: 1 ClusterStateGeneration: 1 ClusterStateGuid: 2 2025-06-25T14:24:22.038887Z 1 00h00m20.100000s :STATESTORAGE DEBUG: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72057594037932033 Cookie: 0 ProxyOptions: SigNone} 2025-06-25T14:24:22.038970Z 1 00h00m20.100000s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 0} 2025-06-25T14:24:22.039014Z 1 00h00m20.100000s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 1} 2025-06-25T14:24:22.039047Z 1 00h00m20.100000s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 2} 2025-06-25T14:24:22.039095Z 1 00h00m20.100000s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 0 TabletID: 72057594037932033 ClusterStateGeneration: 0 ClusterStateGuid: 0 CurrentLeader: [1:300:34] CurrentLeaderTablet: [1:304:36] CurrentGeneration: 2 CurrentStep: 0} 2025-06-25T14:24:22.039148Z 1 00h00m20.100000s :STATESTORAGE DEBUG: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 0 TabletID: 72057594037932033 ClusterStateGeneration: 1 ClusterStateGuid: 2 CurrentLeader: [1:300:34] CurrentLeaderTablet: [1:304:36] CurrentGeneration: 2 CurrentStep: 0} 2025-06-25T14:24:22.039171Z 1 00h00m20.100000s :STATESTORAGE DEBUG: StateStorageProxy TEvNodeWardenNotifyConfigMismatch: Info->ClusterStateGeneration=0 clusterStateGeneration=1 Info->ClusterStateGuid=0 clusterStateGuid=2 2025-06-25T14:24:22.039247Z 1 00h00m20.100000s :BS_NODE INFO: {NW51@node_warden_resource.cpp:338} TEvNodeWardenNotifyConfigMismatch: NodeId: 1 ClusterStateGeneration: 1 ClusterStateGuid: 2 2025-06-25T14:24:22.039340Z 1 00h00m20.100000s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 0} 2025-06-25T14:24:22.039376Z 1 00h00m20.100000s :STATESTORAGE DEBUG: Replica TEvNodeWardenNotifyConfigMismatch: Info->ClusterStateGeneration=1 msgGeneration=3 Info->ClusterStateGuid=2 msgGuid=4 2025-06-25T14:24:22.039437Z 1 00h00m20.100000s :BS_NODE INFO: {NW51@node_warden_resource.cpp:338} TEvNodeWardenNotifyConfigMismatch: NodeId: 1 ClusterStateGeneration: 3 ClusterStateGuid: 4 2025-06-25T14:24:22.039516Z 1 00h00m20.100000s :STATESTORAGE DEBUG: Replica::Handle ev: {EvReplicaUpdate TabletID: 72057594037932033} 2025-06-25T14:24:22.039544Z 1 00h00m20.100000s :STATESTORAGE DEBUG: Replica TEvNodeWardenNotifyConfigMismatch: Info->ClusterStateGeneration=1 msgGeneration=3 Info->ClusterStateGuid=2 msgGuid=4 2025-06-25T14:24:22.039588Z 1 00h00m20.100000s :BS_NODE INFO: {NW51@node_warden_resource.cpp:338} TEvNodeWardenNotifyConfigMismatch: NodeId: 1 ClusterStateGeneration: 3 ClusterStateGuid: 4 2025-06-25T14:24:22.044150Z 1 00h00m20.100000s :STATESTORAGE DEBUG: Replica::Handle ev: NKikimrStateStorage.TEvCleanup TabletID: 72057594037932033 ProposedLeader { RawX1: 0 RawX2: 0 } ClusterStateGeneration: 3 ClusterStateGuid: 4 2025-06-25T14:24:22.044336Z 1 00h00m20.100000s :STATESTORAGE DEBUG: Replica TEvNodeWardenNotifyConfigMismatch: Info->ClusterStateGeneration=1 msgGeneration=3 Info->ClusterStateGuid=2 msgGuid=4 2025-06-25T14:24:22.044826Z 1 00h00m20.100000s :BS_NODE INFO: {NW51@node_warden_resource.cpp:338} TEvNodeWardenNotifyConfigMismatch: NodeId: 1 ClusterStateGeneration: 3 ClusterStateGuid: 4 |67.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/yql/providers/dq/actors/ut/ydb-library-yql-providers-dq-actors-ut |67.7%| [LD] {RESULT} $(B)/ydb/library/yql/providers/dq/actors/ut/ydb-library-yql-providers-dq-actors-ut >> CheckIntegrityBlock42::PlacementBlobIsLost |67.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/library/yql/providers/dq/actors/ut/ydb-library-yql-providers-dq-actors-ut >> CheckIntegrityBlock42::PlacementOk >> BsControllerTest::TestLocalSelfHeal [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_stop_pdisk/unittest >> BSCStopPDisk::PDiskStop [GOOD] Test command err: RandomSeed# 12758463747416976492 >> CheckIntegrityMirror3dc::PlacementBlobIsLost >> CheckIntegrityBlock42::DataOk >> Donor::SkipBadDonor >> Donor::ConsistentWritesWhenSwitchingToDonorMode >> Donor::ContinueWithFaultyDonor >> CheckIntegrityBlock42::PlacementWrongDisks |68.1%| [TA] $(B)/ydb/core/blobstorage/ut_blobstorage/ut_statestorage/test-results/unittest/{meta.json ... results_accumulator.log} |68.1%| [TA] $(B)/ydb/core/blobstorage/ut_blobstorage/ut_stop_pdisk/test-results/unittest/{meta.json ... results_accumulator.log} >> Donor::CheckOnlineReadRequestToDonor >> Donor::SlayAfterWiping ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut_selfheal/unittest >> BsControllerTest::TestLocalSelfHeal [GOOD] Test command err: 2025-06-25T14:24:17.135508Z 1 00h00m00.000000s :BS_NODE DEBUG: [1] Bootstrap 2025-06-25T14:24:17.135561Z 1 00h00m00.000000s :BS_NODE DEBUG: [1] Connect 2025-06-25T14:24:17.135654Z 2 00h00m00.000000s :BS_NODE DEBUG: [2] Bootstrap 2025-06-25T14:24:17.135679Z 2 00h00m00.000000s :BS_NODE DEBUG: [2] Connect 2025-06-25T14:24:17.135736Z 3 00h00m00.000000s :BS_NODE DEBUG: [3] Bootstrap 2025-06-25T14:24:17.135759Z 3 00h00m00.000000s :BS_NODE DEBUG: [3] Connect 2025-06-25T14:24:17.135809Z 4 00h00m00.000000s :BS_NODE DEBUG: [4] Bootstrap 2025-06-25T14:24:17.135833Z 4 00h00m00.000000s :BS_NODE DEBUG: [4] Connect 2025-06-25T14:24:17.135868Z 5 00h00m00.000000s :BS_NODE DEBUG: [5] Bootstrap 2025-06-25T14:24:17.135887Z 5 00h00m00.000000s :BS_NODE DEBUG: [5] Connect 2025-06-25T14:24:17.135921Z 6 00h00m00.000000s :BS_NODE DEBUG: [6] Bootstrap 2025-06-25T14:24:17.135939Z 6 00h00m00.000000s :BS_NODE DEBUG: [6] Connect 2025-06-25T14:24:17.135970Z 7 00h00m00.000000s :BS_NODE DEBUG: [7] Bootstrap 2025-06-25T14:24:17.135989Z 7 00h00m00.000000s :BS_NODE DEBUG: [7] Connect 2025-06-25T14:24:17.136022Z 8 00h00m00.000000s :BS_NODE DEBUG: [8] Bootstrap 2025-06-25T14:24:17.136039Z 8 00h00m00.000000s :BS_NODE DEBUG: [8] Connect 2025-06-25T14:24:17.136069Z 9 00h00m00.000000s :BS_NODE DEBUG: [9] Bootstrap 2025-06-25T14:24:17.136091Z 9 00h00m00.000000s :BS_NODE DEBUG: [9] Connect 2025-06-25T14:24:17.136124Z 10 00h00m00.000000s :BS_NODE DEBUG: [10] Bootstrap 2025-06-25T14:24:17.136144Z 10 00h00m00.000000s :BS_NODE DEBUG: [10] Connect 2025-06-25T14:24:17.136217Z 11 00h00m00.000000s :BS_NODE DEBUG: [11] Bootstrap 2025-06-25T14:24:17.136240Z 11 00h00m00.000000s :BS_NODE DEBUG: [11] Connect 2025-06-25T14:24:17.136274Z 12 00h00m00.000000s :BS_NODE DEBUG: [12] Bootstrap 2025-06-25T14:24:17.136295Z 12 00h00m00.000000s :BS_NODE DEBUG: [12] Connect 2025-06-25T14:24:17.136350Z 13 00h00m00.000000s :BS_NODE DEBUG: [13] Bootstrap 2025-06-25T14:24:17.136371Z 13 00h00m00.000000s :BS_NODE DEBUG: [13] Connect 2025-06-25T14:24:17.136422Z 14 00h00m00.000000s :BS_NODE DEBUG: [14] Bootstrap 2025-06-25T14:24:17.136454Z 14 00h00m00.000000s :BS_NODE DEBUG: [14] Connect 2025-06-25T14:24:17.136498Z 15 00h00m00.000000s :BS_NODE DEBUG: [15] Bootstrap 2025-06-25T14:24:17.136519Z 15 00h00m00.000000s :BS_NODE DEBUG: [15] Connect 2025-06-25T14:24:17.136549Z 16 00h00m00.000000s :BS_NODE DEBUG: [16] Bootstrap 2025-06-25T14:24:17.136569Z 16 00h00m00.000000s :BS_NODE DEBUG: [16] Connect 2025-06-25T14:24:17.136599Z 17 00h00m00.000000s :BS_NODE DEBUG: [17] Bootstrap 2025-06-25T14:24:17.136618Z 17 00h00m00.000000s :BS_NODE DEBUG: [17] Connect 2025-06-25T14:24:17.136662Z 18 00h00m00.000000s :BS_NODE DEBUG: [18] Bootstrap 2025-06-25T14:24:17.136685Z 18 00h00m00.000000s :BS_NODE DEBUG: [18] Connect 2025-06-25T14:24:17.136738Z 19 00h00m00.000000s :BS_NODE DEBUG: [19] Bootstrap 2025-06-25T14:24:17.136765Z 19 00h00m00.000000s :BS_NODE DEBUG: [19] Connect 2025-06-25T14:24:17.136796Z 20 00h00m00.000000s :BS_NODE DEBUG: [20] Bootstrap 2025-06-25T14:24:17.136817Z 20 00h00m00.000000s :BS_NODE DEBUG: [20] Connect 2025-06-25T14:24:17.136862Z 21 00h00m00.000000s :BS_NODE DEBUG: [21] Bootstrap 2025-06-25T14:24:17.136885Z 21 00h00m00.000000s :BS_NODE DEBUG: [21] Connect 2025-06-25T14:24:17.136924Z 22 00h00m00.000000s :BS_NODE DEBUG: [22] Bootstrap 2025-06-25T14:24:17.136944Z 22 00h00m00.000000s :BS_NODE DEBUG: [22] Connect 2025-06-25T14:24:17.136982Z 23 00h00m00.000000s :BS_NODE DEBUG: [23] Bootstrap 2025-06-25T14:24:17.137003Z 23 00h00m00.000000s :BS_NODE DEBUG: [23] Connect 2025-06-25T14:24:17.137038Z 24 00h00m00.000000s :BS_NODE DEBUG: [24] Bootstrap 2025-06-25T14:24:17.137058Z 24 00h00m00.000000s :BS_NODE DEBUG: [24] Connect 2025-06-25T14:24:17.137091Z 25 00h00m00.000000s :BS_NODE DEBUG: [25] Bootstrap 2025-06-25T14:24:17.137124Z 25 00h00m00.000000s :BS_NODE DEBUG: [25] Connect 2025-06-25T14:24:17.137164Z 26 00h00m00.000000s :BS_NODE DEBUG: [26] Bootstrap 2025-06-25T14:24:17.137187Z 26 00h00m00.000000s :BS_NODE DEBUG: [26] Connect 2025-06-25T14:24:17.137244Z 27 00h00m00.000000s :BS_NODE DEBUG: [27] Bootstrap 2025-06-25T14:24:17.137268Z 27 00h00m00.000000s :BS_NODE DEBUG: [27] Connect 2025-06-25T14:24:17.137303Z 28 00h00m00.000000s :BS_NODE DEBUG: [28] Bootstrap 2025-06-25T14:24:17.137324Z 28 00h00m00.000000s :BS_NODE DEBUG: [28] Connect 2025-06-25T14:24:17.137357Z 29 00h00m00.000000s :BS_NODE DEBUG: [29] Bootstrap 2025-06-25T14:24:17.137380Z 29 00h00m00.000000s :BS_NODE DEBUG: [29] Connect 2025-06-25T14:24:17.137416Z 30 00h00m00.000000s :BS_NODE DEBUG: [30] Bootstrap 2025-06-25T14:24:17.137437Z 30 00h00m00.000000s :BS_NODE DEBUG: [30] Connect 2025-06-25T14:24:17.137470Z 31 00h00m00.000000s :BS_NODE DEBUG: [31] Bootstrap 2025-06-25T14:24:17.137490Z 31 00h00m00.000000s :BS_NODE DEBUG: [31] Connect 2025-06-25T14:24:17.137523Z 32 00h00m00.000000s :BS_NODE DEBUG: [32] Bootstrap 2025-06-25T14:24:17.137543Z 32 00h00m00.000000s :BS_NODE DEBUG: [32] Connect 2025-06-25T14:24:17.137575Z 33 00h00m00.000000s :BS_NODE DEBUG: [33] Bootstrap 2025-06-25T14:24:17.137595Z 33 00h00m00.000000s :BS_NODE DEBUG: [33] Connect 2025-06-25T14:24:17.137638Z 34 00h00m00.000000s :BS_NODE DEBUG: [34] Bootstrap 2025-06-25T14:24:17.137661Z 34 00h00m00.000000s :BS_NODE DEBUG: [34] Connect 2025-06-25T14:24:17.137714Z 35 00h00m00.000000s :BS_NODE DEBUG: [35] Bootstrap 2025-06-25T14:24:17.137736Z 35 00h00m00.000000s :BS_NODE DEBUG: [35] Connect 2025-06-25T14:24:17.137788Z 36 00h00m00.000000s :BS_NODE DEBUG: [36] Bootstrap 2025-06-25T14:24:17.137809Z 36 00h00m00.000000s :BS_NODE DEBUG: [36] Connect 2025-06-25T14:24:17.153233Z 1 00h00m00.000000s :BS_NODE DEBUG: [1] ClientConnected Sender# [1:2713:53] Status# ERROR ClientId# [1:2713:53] ServerId# [0:0:0] PipeClient# [1:2713:53] 2025-06-25T14:24:17.154579Z 2 00h00m00.000000s :BS_NODE DEBUG: [2] ClientConnected Sender# [2:2714:41] Status# ERROR ClientId# [2:2714:41] ServerId# [0:0:0] PipeClient# [2:2714:41] 2025-06-25T14:24:17.154647Z 3 00h00m00.000000s :BS_NODE DEBUG: [3] ClientConnected Sender# [3:2715:41] Status# ERROR ClientId# [3:2715:41] ServerId# [0:0:0] PipeClient# [3:2715:41] 2025-06-25T14:24:17.154704Z 4 00h00m00.000000s :BS_NODE DEBUG: [4] ClientConnected Sender# [4:2716:41] Status# ERROR ClientId# [4:2716:41] ServerId# [0:0:0] PipeClient# [4:2716:41] 2025-06-25T14:24:17.154756Z 5 00h00m00.000000s :BS_NODE DEBUG: [5] ClientConnected Sender# [5:2717:41] Status# ERROR ClientId# [5:2717:41] ServerId# [0:0:0] PipeClient# [5:2717:41] 2025-06-25T14:24:17.154806Z 6 00h00m00.000000s :BS_NODE DEBUG: [6] ClientConnected Sender# [6:2718:41] Status# ERROR ClientId# [6:2718:41] ServerId# [0:0:0] PipeClient# [6:2718:41] 2025-06-25T14:24:17.154855Z 7 00h00m00.000000s :BS_NODE DEBUG: [7] ClientConnected Sender# [7:2719:41] Status# ERROR ClientId# [7:2719:41] ServerId# [0:0:0] PipeClient# [7:2719:41] 2025-06-25T14:24:17.154896Z 8 00h00m00.000000s :BS_NODE DEBUG: [8] ClientConnected Sender# [8:2720:41] Status# ERROR ClientId# [8:2720:41] ServerId# [0:0:0] PipeClient# [8:2720:41] 2025-06-25T14:24:17.154938Z 9 00h00m00.000000s :BS_NODE DEBUG: [9] ClientConnected Sender# [9:2721:41] Status# ERROR ClientId# [9:2721:41] ServerId# [0:0:0] PipeClient# [9:2721:41] 2025-06-25T14:24:17.154976Z 10 00h00m00.000000s :BS_NODE DEBUG: [10] ClientConnected Sender# [10:2722:41] Status# ERROR ClientId# [10:2722:41] ServerId# [0:0:0] PipeClient# [10:2722:41] 2025-06-25T14:24:17.155018Z 11 00h00m00.000000s :BS_NODE DEBUG: [11] ClientConnected Sender# [11:2723:41] Status# ERROR ClientId# [11:2723:41] ServerId# [0:0:0] PipeClient# [11:2723:41] 2025-06-25T14:24:17.155056Z 12 00h00m00.000000s :BS_NODE DEBUG: [12] ClientConnected Sender# [12:2724:41] Status# ERROR ClientId# [12:2724:41] ServerId# [0:0:0] PipeClient# [12:2724:41] 2025-06-25T14:24:17.155097Z 13 00h00m00.000000s :BS_NODE DEBUG: [13] ClientConnected Sender# [13:2725:41] Status# ERROR ClientId# [13:2725:41] ServerId# [0:0:0] PipeClient# [13:2725:41] 2025-06-25T14:24:17.155134Z 14 00h00m00.000000s :BS_NODE DEBUG: [14] ClientConnected Sender# [14:2726:41] Status# ERROR ClientId# [14:2726:41] ServerId# [0:0:0] PipeClient# [14:2726:41] 2025-06-25T14:24:17.155174Z 15 00h00m00.000000s :BS_NODE DEBUG: [15] ClientConnected Sender# [15:2727:41] Status# ERROR ClientId# [15:2727:41] ServerId# [0:0:0] PipeClient# [15:2727:41] 2025-06-25T14:24:17.155211Z 16 00h00m00.000000s :BS_NODE DEBUG: [16] ClientConnected Sender# [16:2728:41] Status# ERROR ClientId# [16:2728:41] ServerId# [0:0:0] PipeClient# [16:2728:41] 2025-06-25T14:24:17.155247Z 17 00h00m00.000000s :BS_NODE DEBUG: [17] ClientConnected Sender# [17:2729:41] Status# ERROR ClientId# [17:2729:41] ServerId# [0:0:0] PipeClient# [17:2729:41] 2025-06-25T14:24:17.155284Z 18 00h00m00.000000s :BS_NODE DEBUG: [18] ClientConnected Sender# [18:2730:41] Status# ERROR ClientId# [18:2730:41] ServerId# [0:0:0] PipeClient# [18:2730:41] 2025-06-25T14:24:17.155321Z 19 00h00m00.000000s :BS_NODE DEBUG: [19] ClientConnected Sender# [19:2731:41] Status# ERROR ClientId# [19:2731:41] ServerId# [0:0:0] PipeClient# [19:2731:41] 2025-06-25T14:24:17.155379Z 20 00h00m00.000000s :BS_NODE DEBUG: [20] ClientConnected Sender# [20:2732:41] Status# ERROR ClientId# [20:2732:41] ServerId# [0:0:0] PipeClient# [20:2732:41] 2025-06-25T14:24:17.155449Z 21 00h00m00.000000s :BS_NODE DEBUG: [21] ClientConnected Sender# [21:2733:41] Status# ERROR ClientId# [21:2733:41] ServerId# [0:0:0] PipeClient# [21:2733:41] 2025-06-25T14:24:17.155492Z 22 00h00m00.000000s :BS_NODE DEBUG: [22] ClientConnected Sender# [22:2734:41] Status# ERROR ClientId# [22:2734:41] ServerId# [0:0:0] PipeClient# [22:2734:41] 2025-06-25T14:24:17.155527Z 23 00h00m00.000000s :BS_NODE DEBUG: [23] ClientConnected Sender# [23:2735:41] Status# ERROR ClientId# [23:2735:41] ServerId# [0:0:0] PipeClient# [23:2735:41] 2025-06-25T14:24:17.155567Z 24 00h00m00.000000s :BS_NODE DEBUG: [24] ClientConnected Sender# [24:2736:41] Status# ERROR ClientId# [24:2736:41] ServerId# [0:0:0] PipeClient# [24:2736:41] 2025-06-25T14:24:17.155604Z 25 00h00m00.000000s :BS_NODE DEBUG: [25] ClientConnected Sender# [25:2737:41] Status# ERROR ClientId# [25:2737:41] ServerId# [0:0:0] PipeClient# [25:2737:41] 2025-06-25T14:24:17.155642Z 26 00h00m00.000000s :BS_NODE DEBUG: [26] ClientConnected Sender# [26:2738:41] Status# ERROR ClientId# [26:2738:41] ServerId# [0:0:0] PipeClient# [26:2738:41] 2025-06-25T14:24:17.155693Z 27 00h00m00.000000s :BS_NODE DEBUG: [27] ClientConnected Sender# [27:2739:41] Status# ERROR ClientId# [27:2739:41] ServerId# [0:0:0] PipeClient# [27:2739:41] 2025-06-25T14:24:17.155739Z 28 00h00m00.000000s :BS_NODE DEBUG: [28] ClientConnected Sender# [28:2740:41] Status# ERROR ClientId# [28:2740:41] ServerId# [0:0:0] PipeClient# [28:2740:41] 2025-06-25T14:24:17.155778Z 29 00h00m00.000000s :BS_NODE DEBUG: [29] ClientConnected Sender# [29:2741:41] Status# ERROR ClientId# [29:2741:41] ServerId# [0:0:0] PipeClient# [29:2741:41] 2025-06-25T14:24:17.155813Z 30 00h00m00.000000s :BS_NODE DEBUG: [30] ClientConnected Sender# [30:2742:41] Status# ERROR ClientId# [30:2742:41] ServerId# [0:0:0] PipeClient# [30:2742:41] 2025-06-25T14:24:17.155851Z 31 00h00m00.000000s :BS_NODE DEBUG: [31] ClientConnected Sender# [31:2743:41] Status# ERROR ClientId# [31:2743:41] ServerId# [0:0:0] PipeClient# [31:2743:41] 2025-06-25T14:24:17.155890Z 32 00h00m00.000000s :BS_NODE DEBUG: [32] ClientConnected Sender# [32:2744:41] Status# ERROR ClientId# [32:2744:41] ServerId# [0:0:0] PipeClient# [32:2744:41] 2025-06-25T14:24:17.155929Z 33 00h00m00.000000s :BS_NODE DEBUG: [33] ClientConnected Sender# [33:2745:41] Status# ERROR ClientId# [33:2745:41] ServerId# [0:0:0] PipeClient# [33:2745:41] 2025-06-25T14:24:17.155964Z 34 00h00m00.000000s :BS_NODE DEBUG: [34] ClientConnected Sender# [34:2746:41] Status# ERROR ClientId# [34:2746:41] ServerId# [0:0:0] PipeClient# [34:2746:41] 2025-06-25T14:24:17.156000Z 35 00h00m00.000000s :BS_NODE DEBUG: [35] ClientConnected Sender# [35:2747:41] Status# ERROR ClientId# [35:2747:41 ... true Replicated# true 2025-06-25T14:24:21.209647Z 1 00h05m00.104608s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483773 VDiskId# [8000007d:1:2:1:0] DiskIsOk# true 2025-06-25T14:24:21.209690Z 1 00h05m00.104608s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483773 Status# OK JoinedGroup# true Replicated# true 2025-06-25T14:24:21.209715Z 1 00h05m00.104608s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483773 VDiskId# [8000007d:1:2:2:0] DiskIsOk# true 2025-06-25T14:24:21.214832Z 1 00h05m00.105120s :BS_SELFHEAL INFO: {BSSH09@self_heal.cpp:207} Reassigner succeeded GroupId# 2147483773 Items# [8000007d:1:1:2:0]: 18:1002:1007 -> 18:1000:1010 ConfigTxSeqNo# 48 2025-06-25T14:24:21.214875Z 1 00h05m00.105120s :BS_SELFHEAL DEBUG: {BSSH08@self_heal.cpp:218} Reassigner finished GroupId# 2147483773 Success# true 2025-06-25T14:24:21.215027Z 18 00h05m00.105120s :BS_NODE DEBUG: [18] NodeServiceSetUpdate 2025-06-25T14:24:21.215073Z 18 00h05m00.105120s :BS_NODE DEBUG: [18] VDiskId# [8000007d:2:1:2:0] PDiskId# 1000 VSlotId# 1010 created 2025-06-25T14:24:21.215136Z 18 00h05m00.105120s :BS_NODE DEBUG: [18] VDiskId# [8000007d:2:1:2:0] status changed to INIT_PENDING 2025-06-25T14:24:21.215279Z 36 00h05m00.105120s :BS_NODE DEBUG: [36] NodeServiceSetUpdate 2025-06-25T14:24:21.215350Z 36 00h05m00.105120s :BS_NODE DEBUG: [36] VDiskId# [8000007d:1:2:0:0] -> [8000007d:2:2:0:0] 2025-06-25T14:24:21.215438Z 3 00h05m00.105120s :BS_NODE DEBUG: [3] NodeServiceSetUpdate 2025-06-25T14:24:21.215486Z 3 00h05m00.105120s :BS_NODE DEBUG: [3] VDiskId# [8000007d:1:0:1:0] -> [8000007d:2:0:1:0] 2025-06-25T14:24:21.215576Z 6 00h05m00.105120s :BS_NODE DEBUG: [6] NodeServiceSetUpdate 2025-06-25T14:24:21.215617Z 6 00h05m00.105120s :BS_NODE DEBUG: [6] VDiskId# [8000007d:1:0:2:0] -> [8000007d:2:0:2:0] 2025-06-25T14:24:21.215708Z 24 00h05m00.105120s :BS_NODE DEBUG: [24] NodeServiceSetUpdate 2025-06-25T14:24:21.215766Z 24 00h05m00.105120s :BS_NODE DEBUG: [24] VDiskId# [8000007d:1:1:0:0] -> [8000007d:2:1:0:0] 2025-06-25T14:24:21.215844Z 27 00h05m00.105120s :BS_NODE DEBUG: [27] NodeServiceSetUpdate 2025-06-25T14:24:21.215891Z 27 00h05m00.105120s :BS_NODE DEBUG: [27] VDiskId# [8000007d:1:2:1:0] -> [8000007d:2:2:1:0] 2025-06-25T14:24:21.215976Z 12 00h05m00.105120s :BS_NODE DEBUG: [12] NodeServiceSetUpdate 2025-06-25T14:24:21.216021Z 12 00h05m00.105120s :BS_NODE DEBUG: [12] VDiskId# [8000007d:1:0:0:0] -> [8000007d:2:0:0:0] 2025-06-25T14:24:21.216146Z 30 00h05m00.105120s :BS_NODE DEBUG: [30] NodeServiceSetUpdate 2025-06-25T14:24:21.216193Z 30 00h05m00.105120s :BS_NODE DEBUG: [30] VDiskId# [8000007d:1:2:2:0] -> [8000007d:2:2:2:0] 2025-06-25T14:24:21.216282Z 15 00h05m00.105120s :BS_NODE DEBUG: [15] NodeServiceSetUpdate 2025-06-25T14:24:21.216343Z 15 00h05m00.105120s :BS_NODE DEBUG: [15] VDiskId# [8000007d:1:1:1:0] -> [8000007d:2:1:1:0] 2025-06-25T14:24:21.216634Z 1 00h05m00.105120s :BS_SELFHEAL DEBUG: {BSSH01@self_heal.cpp:71} Reassigner starting GroupId# 2147483757 2025-06-25T14:24:21.217750Z 1 00h05m00.105120s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483757 Status# OK JoinedGroup# true Replicated# true 2025-06-25T14:24:21.217799Z 1 00h05m00.105120s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483757 VDiskId# [8000006d:1:0:0:0] DiskIsOk# true 2025-06-25T14:24:21.217838Z 1 00h05m00.105120s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483757 Status# OK JoinedGroup# true Replicated# true 2025-06-25T14:24:21.217873Z 1 00h05m00.105120s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483757 VDiskId# [8000006d:1:0:1:0] DiskIsOk# true 2025-06-25T14:24:21.217911Z 1 00h05m00.105120s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483757 Status# OK JoinedGroup# true Replicated# true 2025-06-25T14:24:21.217942Z 1 00h05m00.105120s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483757 VDiskId# [8000006d:1:0:2:0] DiskIsOk# true 2025-06-25T14:24:21.217971Z 1 00h05m00.105120s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483757 Status# OK JoinedGroup# true Replicated# true 2025-06-25T14:24:21.217995Z 1 00h05m00.105120s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483757 VDiskId# [8000006d:1:1:0:0] DiskIsOk# true 2025-06-25T14:24:21.218044Z 1 00h05m00.105120s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483757 Status# OK JoinedGroup# true Replicated# true 2025-06-25T14:24:21.218076Z 1 00h05m00.105120s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483757 VDiskId# [8000006d:1:1:1:0] DiskIsOk# true 2025-06-25T14:24:21.218104Z 1 00h05m00.105120s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483757 Status# OK JoinedGroup# true Replicated# true 2025-06-25T14:24:21.218131Z 1 00h05m00.105120s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483757 VDiskId# [8000006d:1:2:0:0] DiskIsOk# true 2025-06-25T14:24:21.218169Z 1 00h05m00.105120s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483757 Status# OK JoinedGroup# true Replicated# true 2025-06-25T14:24:21.218237Z 1 00h05m00.105120s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483757 VDiskId# [8000006d:1:2:1:0] DiskIsOk# true 2025-06-25T14:24:21.218277Z 1 00h05m00.105120s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483757 Status# OK JoinedGroup# true Replicated# true 2025-06-25T14:24:21.218309Z 1 00h05m00.105120s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483757 VDiskId# [8000006d:1:2:2:0] DiskIsOk# true 2025-06-25T14:24:21.223601Z 1 00h05m00.105632s :BS_SELFHEAL INFO: {BSSH09@self_heal.cpp:207} Reassigner succeeded GroupId# 2147483757 Items# [8000006d:1:1:2:0]: 18:1002:1006 -> 18:1001:1010 ConfigTxSeqNo# 49 2025-06-25T14:24:21.223645Z 1 00h05m00.105632s :BS_SELFHEAL DEBUG: {BSSH08@self_heal.cpp:218} Reassigner finished GroupId# 2147483757 Success# true 2025-06-25T14:24:21.224096Z 18 00h05m00.105632s :BS_NODE DEBUG: [18] NodeServiceSetUpdate 2025-06-25T14:24:21.224233Z 18 00h05m00.105632s :BS_NODE DEBUG: [18] VDiskId# [8000006d:2:1:2:0] PDiskId# 1001 VSlotId# 1010 created 2025-06-25T14:24:21.224303Z 18 00h05m00.105632s :BS_NODE DEBUG: [18] VDiskId# [8000006d:2:1:2:0] status changed to INIT_PENDING 2025-06-25T14:24:21.224514Z 36 00h05m00.105632s :BS_NODE DEBUG: [36] NodeServiceSetUpdate 2025-06-25T14:24:21.224655Z 36 00h05m00.105632s :BS_NODE DEBUG: [36] VDiskId# [8000006d:1:2:0:0] -> [8000006d:2:2:0:0] 2025-06-25T14:24:21.224747Z 3 00h05m00.105632s :BS_NODE DEBUG: [3] NodeServiceSetUpdate 2025-06-25T14:24:21.224945Z 3 00h05m00.105632s :BS_NODE DEBUG: [3] VDiskId# [8000006d:1:0:1:0] -> [8000006d:2:0:1:0] 2025-06-25T14:24:21.225096Z 6 00h05m00.105632s :BS_NODE DEBUG: [6] NodeServiceSetUpdate 2025-06-25T14:24:21.225224Z 6 00h05m00.105632s :BS_NODE DEBUG: [6] VDiskId# [8000006d:1:0:2:0] -> [8000006d:2:0:2:0] 2025-06-25T14:24:21.225370Z 24 00h05m00.105632s :BS_NODE DEBUG: [24] NodeServiceSetUpdate 2025-06-25T14:24:21.225415Z 24 00h05m00.105632s :BS_NODE DEBUG: [24] VDiskId# [8000006d:1:1:0:0] -> [8000006d:2:1:0:0] 2025-06-25T14:24:21.225626Z 27 00h05m00.105632s :BS_NODE DEBUG: [27] NodeServiceSetUpdate 2025-06-25T14:24:21.225739Z 27 00h05m00.105632s :BS_NODE DEBUG: [27] VDiskId# [8000006d:1:2:1:0] -> [8000006d:2:2:1:0] 2025-06-25T14:24:21.225904Z 12 00h05m00.105632s :BS_NODE DEBUG: [12] NodeServiceSetUpdate 2025-06-25T14:24:21.225949Z 12 00h05m00.105632s :BS_NODE DEBUG: [12] VDiskId# [8000006d:1:0:0:0] -> [8000006d:2:0:0:0] 2025-06-25T14:24:21.226091Z 30 00h05m00.105632s :BS_NODE DEBUG: [30] NodeServiceSetUpdate 2025-06-25T14:24:21.226136Z 30 00h05m00.105632s :BS_NODE DEBUG: [30] VDiskId# [8000006d:1:2:2:0] -> [8000006d:2:2:2:0] 2025-06-25T14:24:21.226499Z 15 00h05m00.105632s :BS_NODE DEBUG: [15] NodeServiceSetUpdate 2025-06-25T14:24:21.226626Z 15 00h05m00.105632s :BS_NODE DEBUG: [15] VDiskId# [8000006d:1:1:1:0] -> [8000006d:2:1:1:0] 2025-06-25T14:24:21.228802Z 18 00h05m01.225072s :BS_NODE DEBUG: [18] VDiskId# [8000003d:2:1:2:0] status changed to REPLICATING 2025-06-25T14:24:21.229857Z 18 00h05m01.295560s :BS_NODE DEBUG: [18] VDiskId# [8000004d:2:1:2:0] status changed to REPLICATING 2025-06-25T14:24:21.256729Z 18 00h05m01.418608s :BS_NODE DEBUG: [18] VDiskId# [8000000d:2:1:2:0] status changed to REPLICATING 2025-06-25T14:24:21.258978Z 18 00h05m01.667048s :BS_NODE DEBUG: [18] VDiskId# [8000005d:2:1:2:0] status changed to REPLICATING 2025-06-25T14:24:21.263091Z 18 00h05m02.198632s :BS_NODE DEBUG: [18] VDiskId# [8000006d:2:1:2:0] status changed to REPLICATING 2025-06-25T14:24:21.265592Z 18 00h05m02.372584s :BS_NODE DEBUG: [18] VDiskId# [8000002d:2:1:2:0] status changed to REPLICATING 2025-06-25T14:24:21.267696Z 18 00h05m04.163096s :BS_NODE DEBUG: [18] VDiskId# [8000001d:2:1:2:0] status changed to REPLICATING 2025-06-25T14:24:21.271840Z 18 00h05m06.033120s :BS_NODE DEBUG: [18] VDiskId# [8000007d:2:1:2:0] status changed to REPLICATING 2025-06-25T14:24:21.275070Z 18 00h05m11.089632s :BS_NODE DEBUG: [18] VDiskId# [8000006d:2:1:2:0] status changed to READY 2025-06-25T14:24:21.280283Z 18 00h05m11.090144s :BS_NODE DEBUG: [18] NodeServiceSetUpdate 2025-06-25T14:24:21.280410Z 18 00h05m11.090144s :BS_NODE DEBUG: [18] VDiskId# [8000006d:1:1:2:0] destroyed 2025-06-25T14:24:21.280931Z 18 00h05m13.440608s :BS_NODE DEBUG: [18] VDiskId# [8000000d:2:1:2:0] status changed to READY 2025-06-25T14:24:21.286037Z 18 00h05m13.441120s :BS_NODE DEBUG: [18] NodeServiceSetUpdate 2025-06-25T14:24:21.286183Z 18 00h05m13.441120s :BS_NODE DEBUG: [18] VDiskId# [8000000d:1:1:2:0] destroyed 2025-06-25T14:24:21.286627Z 18 00h05m13.454072s :BS_NODE DEBUG: [18] VDiskId# [8000003d:2:1:2:0] status changed to READY 2025-06-25T14:24:21.291256Z 18 00h05m13.454584s :BS_NODE DEBUG: [18] NodeServiceSetUpdate 2025-06-25T14:24:21.291299Z 18 00h05m13.454584s :BS_NODE DEBUG: [18] VDiskId# [8000003d:1:1:2:0] destroyed 2025-06-25T14:24:21.294240Z 18 00h05m15.681584s :BS_NODE DEBUG: [18] VDiskId# [8000002d:2:1:2:0] status changed to READY 2025-06-25T14:24:21.301723Z 18 00h05m15.682096s :BS_NODE DEBUG: [18] NodeServiceSetUpdate 2025-06-25T14:24:21.301897Z 18 00h05m15.682096s :BS_NODE DEBUG: [18] VDiskId# [8000002d:1:1:2:0] destroyed 2025-06-25T14:24:21.304018Z 18 00h05m23.409096s :BS_NODE DEBUG: [18] VDiskId# [8000001d:2:1:2:0] status changed to READY 2025-06-25T14:24:21.309958Z 18 00h05m23.409608s :BS_NODE DEBUG: [18] NodeServiceSetUpdate 2025-06-25T14:24:21.310017Z 18 00h05m23.409608s :BS_NODE DEBUG: [18] VDiskId# [8000001d:1:1:2:0] destroyed 2025-06-25T14:24:21.314517Z 18 00h05m30.125120s :BS_NODE DEBUG: [18] VDiskId# [8000007d:2:1:2:0] status changed to READY 2025-06-25T14:24:21.320336Z 18 00h05m30.125632s :BS_NODE DEBUG: [18] NodeServiceSetUpdate 2025-06-25T14:24:21.320389Z 18 00h05m30.125632s :BS_NODE DEBUG: [18] VDiskId# [8000007d:1:1:2:0] destroyed 2025-06-25T14:24:21.320776Z 18 00h05m30.381560s :BS_NODE DEBUG: [18] VDiskId# [8000004d:2:1:2:0] status changed to READY 2025-06-25T14:24:21.327155Z 18 00h05m30.382072s :BS_NODE DEBUG: [18] NodeServiceSetUpdate 2025-06-25T14:24:21.327345Z 18 00h05m30.382072s :BS_NODE DEBUG: [18] VDiskId# [8000004d:1:1:2:0] destroyed 2025-06-25T14:24:21.329388Z 18 00h05m34.961048s :BS_NODE DEBUG: [18] VDiskId# [8000005d:2:1:2:0] status changed to READY 2025-06-25T14:24:21.335576Z 18 00h05m34.961560s :BS_NODE DEBUG: [18] NodeServiceSetUpdate 2025-06-25T14:24:21.335618Z 18 00h05m34.961560s :BS_NODE DEBUG: [18] VDiskId# [8000005d:1:1:2:0] destroyed |68.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/sqs/merge_split_common_table/std/functional-sqs-merge_split_common_table-std |68.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/sqs/merge_split_common_table/std/functional-sqs-merge_split_common_table-std >> ParseStats::ParseWithSources [GOOD] >> ParseStats::ParseJustOutput |68.1%| [TA] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_statestorage/test-results/unittest/{meta.json ... results_accumulator.log} |68.1%| [TA] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_stop_pdisk/test-results/unittest/{meta.json ... results_accumulator.log} >> ParseStats::ParseJustOutput [GOOD] >> ParseStats::ParseMultipleGraphsV1 [GOOD] >> ParseStats::ParseMultipleGraphsV2 |68.1%| [LD] {RESULT} $(B)/ydb/tests/functional/sqs/merge_split_common_table/std/functional-sqs-merge_split_common_table-std |68.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/example/ydb-tests-example |68.2%| [LD] {RESULT} $(B)/ydb/tests/example/ydb-tests-example |68.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/example/ydb-tests-example >> ParseStats::ParseMultipleGraphsV2 [GOOD] |69.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/control_plane_storage/internal/ut/unittest >> ParseStats::ParseMultipleGraphsV2 [GOOD] >> TGenerateQueueIdTests::MakeQueueIdBasic [GOOD] >> TParseParamsTests::CreateUser [GOOD] >> TParseParamsTests::ChangeMessageVisibilityBatchRequest [GOOD] >> TParseParamsTests::DeleteMessageBatchRequest [GOOD] >> TParseParamsTests::MessageBody [GOOD] >> TParseParamsTests::SendMessageBatchRequest [GOOD] >> TParseParamsTests::DeleteQueueBatchRequest [GOOD] >> TParseParamsTests::PurgeQueueBatchRequest [GOOD] >> TParseParamsTests::GetQueueAttributesBatchRequest [GOOD] >> TParseParamsTests::UnnumberedAttribute [GOOD] >> TParseParamsTests::UnnumberedAttributeName [GOOD] >> TParseParamsTests::FailsOnInvalidDeduplicationId [GOOD] >> TParseParamsTests::FailsOnInvalidGroupId [GOOD] >> TParseParamsTests::FailsOnInvalidReceiveRequestAttemptId [GOOD] >> TParseParamsTests::FailsOnInvalidMaxNumberOfMessages [GOOD] >> TParseParamsTests::FailsOnInvalidWaitTime [GOOD] >> TParseParamsTests::FailsOnInvalidDelaySeconds >> TParseParamsTests::FailsOnInvalidDelaySeconds [GOOD] >> TestIssuesGrouping::ShouldCountEveryIssue [GOOD] >> TestIssuesGrouping::ShouldRemoveOldIssues [GOOD] >> TestIssuesGrouping::ShouldRemoveIfMoreThanMaxIssues [GOOD] >> TestIssuesGrouping::ShouldRemoveTheOldestIfMoreThanMaxIssues [GOOD] >> TestIssuesGrouping::ShouldSaveSubIssues [GOOD] >> ResultReceiver::ReceiveStatus [GOOD] >> ResultReceiver::ReceiveError [GOOD] >> ResultReceiver::WriteQueue [GOOD] |69.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/wardens/ydb-tests-functional-wardens |69.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/wardens/ydb-tests-functional-wardens |69.9%| [TM] {RESULT} ydb/core/fq/libs/control_plane_storage/internal/ut/unittest |69.9%| [LD] {RESULT} $(B)/ydb/tests/functional/wardens/ydb-tests-functional-wardens |70.0%| [TS] {asan, default-linux-x86_64, release} ydb/core/ymq/ut/unittest >> TParseParamsTests::FailsOnInvalidDelaySeconds [GOOD] |70.0%| [TS] {RESULT} ydb/core/ymq/ut/unittest >> CheckIntegrityMirror3of4::PlacementOk [GOOD] >> CheckIntegrityMirror3of4::PlacementMissingParts |70.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/fq/mem_alloc/ydb-tests-fq-mem_alloc |70.0%| [LD] {RESULT} $(B)/ydb/tests/fq/mem_alloc/ydb-tests-fq-mem_alloc |70.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/fq/mem_alloc/ydb-tests-fq-mem_alloc |70.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/olap/s3_import/ydb-tests-olap-s3_import |70.0%| [LD] {RESULT} $(B)/ydb/tests/olap/s3_import/ydb-tests-olap-s3_import |70.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/olap/s3_import/ydb-tests-olap-s3_import |70.1%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/providers/dq/actors/ut/unittest >> ResultReceiver::WriteQueue [GOOD] >> BsControllerTest::TestLocalBrokenRelocation [GOOD] >> VDiskBalancing::TwoPartsOnOneNodeTest_Block42 [GOOD] >> CheckIntegrityMirror3dc::PlacementOkWithErrors [GOOD] >> CheckIntegrityMirror3dc::PlacementOkWithErrorsOnBlobDisks >> VDiskBalancing::TestStopOneNode_Block42 [GOOD] >> CheckIntegrityBlock42::PlacementOk [GOOD] >> CheckIntegrityBlock42::PlacementOkHandoff >> CheckIntegrityBlock42::PlacementOkWithErrors [GOOD] >> CheckIntegrityBlock42::PlacementWithErrorsOnBlobDisks >> CheckIntegrityBlock42::PlacementBlobIsLost [GOOD] >> CheckIntegrityBlock42::PlacementAllOnHandoff >> CheckIntegrityBlock42::DataErrorAdditionalUnequalParts [GOOD] >> CheckIntegrityBlock42::DataErrorSixPartsOneBroken >> CheckIntegrityBlock42::DataOk [GOOD] >> CheckIntegrityBlock42::DataOkAdditionalEqualParts >> CheckIntegrityMirror3dc::PlacementOk [GOOD] >> CheckIntegrityMirror3dc::PlacementOkHandoff ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_balancing/unittest >> VDiskBalancing::TwoPartsOnOneNodeTest_Block42 [GOOD] Test command err: RandomSeed# 16307410242406553339 SEND TEvPut with key [1:1:1:0:0:100:0] 2025-06-25T14:24:22.364084Z 1 00h01m00.010512s :PIPE_SERVER ERROR: [72057594037932033] NodeDisconnected NodeId# 6 2025-06-25T14:24:22.364651Z 1 00h01m00.010512s :PIPE_SERVER ERROR: [72057594037932033] NodeDisconnected NodeId# 5 TEvPutResult: TEvPutResult {Id# [1:1:1:0:0:100:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Node 0: 4 Node 1: 5 Node 2: 6 Node 3: 1 Node 4: Node 5: Node 6: 2 Node 7: 3 2025-06-25T14:24:22.494537Z 1 00h01m00.011024s :PIPE_SERVER ERROR: [72057594037932033] NodeDisconnected NodeId# 7 Node 0: 4 Node 1: 5 Node 2: 6 Node 3: 1 2 Node 4: Node 5: 1 Node 6: Node 7: 3 Start compaction 1 Finish compaction 1 |70.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/kv/tests/ydb-tests-stress-kv-tests >> VDiskBalancing::TwoPartsOnOneNodeTest_Block42_HugeBlob [GOOD] >> CheckIntegrityBlock42::PlacementWrongDisks [GOOD] >> CheckIntegrityMirror3dc::DataErrorOneCopy |70.1%| [TS] {RESULT} ydb/library/yql/providers/dq/actors/ut/unittest |70.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stress/kv/tests/ydb-tests-stress-kv-tests |70.1%| [LD] {RESULT} $(B)/ydb/tests/stress/kv/tests/ydb-tests-stress-kv-tests |70.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/yql/providers/dq/provider/ut/ydb-library-yql-providers-dq-provider-ut |70.1%| [LD] {RESULT} $(B)/ydb/library/yql/providers/dq/provider/ut/ydb-library-yql-providers-dq-provider-ut >> VDiskBalancing::TestDontSendToReadOnlyTest_Block42 [GOOD] >> CheckIntegrityMirror3dc::PlacementBlobIsLost [GOOD] >> CheckIntegrityMirror3dc::PlacementDisintegrated >> VDiskBalancing::TestStopOneNode_Mirror3dc_HugeBlob [GOOD] >> VDiskBalancing::TestStopOneNode_Mirror3dc [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_balancing/unittest >> VDiskBalancing::TestStopOneNode_Block42 [GOOD] Test command err: RandomSeed# 12702689204773455595 SEND TEvPut with key [1:1:1:0:0:100:0] TEvPutResult: TEvPutResult {Id# [1:1:1:0:0:100:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:2:0:0:100:0] 2025-06-25T14:24:22.464936Z 3 00h01m00.010512s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [3:188:17] ServerId# [1:296:58] TabletId# 72057594037932033 PipeClientId# [3:188:17] 2025-06-25T14:24:22.465129Z 8 00h01m00.010512s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [8:223:17] ServerId# [1:301:63] TabletId# 72057594037932033 PipeClientId# [8:223:17] 2025-06-25T14:24:22.465247Z 6 00h01m00.010512s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [6:209:17] ServerId# [1:299:61] TabletId# 72057594037932033 PipeClientId# [6:209:17] 2025-06-25T14:24:22.465372Z 5 00h01m00.010512s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [5:202:17] ServerId# [1:298:60] TabletId# 72057594037932033 PipeClientId# [5:202:17] 2025-06-25T14:24:22.465457Z 4 00h01m00.010512s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [4:195:17] ServerId# [1:297:59] TabletId# 72057594037932033 PipeClientId# [4:195:17] 2025-06-25T14:24:22.465566Z 2 00h01m00.010512s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [2:181:17] ServerId# [1:295:57] TabletId# 72057594037932033 PipeClientId# [2:181:17] 2025-06-25T14:24:22.465663Z 7 00h01m00.010512s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [7:216:17] ServerId# [1:300:62] TabletId# 72057594037932033 PipeClientId# [7:216:17] TEvPutResult: TEvPutResult {Id# [1:1:2:0:0:100:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Start compaction Finish compaction ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut_selfheal/unittest >> BsControllerTest::TestLocalBrokenRelocation [GOOD] Test command err: 2025-06-25T14:24:16.917100Z 1 00h00m00.000000s :BS_NODE DEBUG: [1] Bootstrap 2025-06-25T14:24:16.917157Z 1 00h00m00.000000s :BS_NODE DEBUG: [1] Connect 2025-06-25T14:24:16.917247Z 2 00h00m00.000000s :BS_NODE DEBUG: [2] Bootstrap 2025-06-25T14:24:16.917270Z 2 00h00m00.000000s :BS_NODE DEBUG: [2] Connect 2025-06-25T14:24:16.917335Z 3 00h00m00.000000s :BS_NODE DEBUG: [3] Bootstrap 2025-06-25T14:24:16.917358Z 3 00h00m00.000000s :BS_NODE DEBUG: [3] Connect 2025-06-25T14:24:16.917415Z 4 00h00m00.000000s :BS_NODE DEBUG: [4] Bootstrap 2025-06-25T14:24:16.917435Z 4 00h00m00.000000s :BS_NODE DEBUG: [4] Connect 2025-06-25T14:24:16.917469Z 5 00h00m00.000000s :BS_NODE DEBUG: [5] Bootstrap 2025-06-25T14:24:16.917490Z 5 00h00m00.000000s :BS_NODE DEBUG: [5] Connect 2025-06-25T14:24:16.917525Z 6 00h00m00.000000s :BS_NODE DEBUG: [6] Bootstrap 2025-06-25T14:24:16.917547Z 6 00h00m00.000000s :BS_NODE DEBUG: [6] Connect 2025-06-25T14:24:16.917582Z 7 00h00m00.000000s :BS_NODE DEBUG: [7] Bootstrap 2025-06-25T14:24:16.917603Z 7 00h00m00.000000s :BS_NODE DEBUG: [7] Connect 2025-06-25T14:24:16.917636Z 8 00h00m00.000000s :BS_NODE DEBUG: [8] Bootstrap 2025-06-25T14:24:16.917656Z 8 00h00m00.000000s :BS_NODE DEBUG: [8] Connect 2025-06-25T14:24:16.917689Z 9 00h00m00.000000s :BS_NODE DEBUG: [9] Bootstrap 2025-06-25T14:24:16.917713Z 9 00h00m00.000000s :BS_NODE DEBUG: [9] Connect 2025-06-25T14:24:16.917753Z 10 00h00m00.000000s :BS_NODE DEBUG: [10] Bootstrap 2025-06-25T14:24:16.917774Z 10 00h00m00.000000s :BS_NODE DEBUG: [10] Connect 2025-06-25T14:24:16.917855Z 11 00h00m00.000000s :BS_NODE DEBUG: [11] Bootstrap 2025-06-25T14:24:16.917881Z 11 00h00m00.000000s :BS_NODE DEBUG: [11] Connect 2025-06-25T14:24:16.917916Z 12 00h00m00.000000s :BS_NODE DEBUG: [12] Bootstrap 2025-06-25T14:24:16.917937Z 12 00h00m00.000000s :BS_NODE DEBUG: [12] Connect 2025-06-25T14:24:16.917976Z 13 00h00m00.000000s :BS_NODE DEBUG: [13] Bootstrap 2025-06-25T14:24:16.918000Z 13 00h00m00.000000s :BS_NODE DEBUG: [13] Connect 2025-06-25T14:24:16.918049Z 14 00h00m00.000000s :BS_NODE DEBUG: [14] Bootstrap 2025-06-25T14:24:16.918084Z 14 00h00m00.000000s :BS_NODE DEBUG: [14] Connect 2025-06-25T14:24:16.918126Z 15 00h00m00.000000s :BS_NODE DEBUG: [15] Bootstrap 2025-06-25T14:24:16.918148Z 15 00h00m00.000000s :BS_NODE DEBUG: [15] Connect 2025-06-25T14:24:16.918182Z 16 00h00m00.000000s :BS_NODE DEBUG: [16] Bootstrap 2025-06-25T14:24:16.918204Z 16 00h00m00.000000s :BS_NODE DEBUG: [16] Connect 2025-06-25T14:24:16.918249Z 17 00h00m00.000000s :BS_NODE DEBUG: [17] Bootstrap 2025-06-25T14:24:16.918275Z 17 00h00m00.000000s :BS_NODE DEBUG: [17] Connect 2025-06-25T14:24:16.918309Z 18 00h00m00.000000s :BS_NODE DEBUG: [18] Bootstrap 2025-06-25T14:24:16.918329Z 18 00h00m00.000000s :BS_NODE DEBUG: [18] Connect 2025-06-25T14:24:16.918389Z 19 00h00m00.000000s :BS_NODE DEBUG: [19] Bootstrap 2025-06-25T14:24:16.918412Z 19 00h00m00.000000s :BS_NODE DEBUG: [19] Connect 2025-06-25T14:24:16.918450Z 20 00h00m00.000000s :BS_NODE DEBUG: [20] Bootstrap 2025-06-25T14:24:16.918470Z 20 00h00m00.000000s :BS_NODE DEBUG: [20] Connect 2025-06-25T14:24:16.918515Z 21 00h00m00.000000s :BS_NODE DEBUG: [21] Bootstrap 2025-06-25T14:24:16.918543Z 21 00h00m00.000000s :BS_NODE DEBUG: [21] Connect 2025-06-25T14:24:16.918578Z 22 00h00m00.000000s :BS_NODE DEBUG: [22] Bootstrap 2025-06-25T14:24:16.918600Z 22 00h00m00.000000s :BS_NODE DEBUG: [22] Connect 2025-06-25T14:24:16.918641Z 23 00h00m00.000000s :BS_NODE DEBUG: [23] Bootstrap 2025-06-25T14:24:16.918662Z 23 00h00m00.000000s :BS_NODE DEBUG: [23] Connect 2025-06-25T14:24:16.918695Z 24 00h00m00.000000s :BS_NODE DEBUG: [24] Bootstrap 2025-06-25T14:24:16.918718Z 24 00h00m00.000000s :BS_NODE DEBUG: [24] Connect 2025-06-25T14:24:16.918765Z 25 00h00m00.000000s :BS_NODE DEBUG: [25] Bootstrap 2025-06-25T14:24:16.918791Z 25 00h00m00.000000s :BS_NODE DEBUG: [25] Connect 2025-06-25T14:24:16.918834Z 26 00h00m00.000000s :BS_NODE DEBUG: [26] Bootstrap 2025-06-25T14:24:16.918859Z 26 00h00m00.000000s :BS_NODE DEBUG: [26] Connect 2025-06-25T14:24:16.918917Z 27 00h00m00.000000s :BS_NODE DEBUG: [27] Bootstrap 2025-06-25T14:24:16.918943Z 27 00h00m00.000000s :BS_NODE DEBUG: [27] Connect 2025-06-25T14:24:16.918978Z 28 00h00m00.000000s :BS_NODE DEBUG: [28] Bootstrap 2025-06-25T14:24:16.918999Z 28 00h00m00.000000s :BS_NODE DEBUG: [28] Connect 2025-06-25T14:24:16.919040Z 29 00h00m00.000000s :BS_NODE DEBUG: [29] Bootstrap 2025-06-25T14:24:16.919061Z 29 00h00m00.000000s :BS_NODE DEBUG: [29] Connect 2025-06-25T14:24:16.919095Z 30 00h00m00.000000s :BS_NODE DEBUG: [30] Bootstrap 2025-06-25T14:24:16.919116Z 30 00h00m00.000000s :BS_NODE DEBUG: [30] Connect 2025-06-25T14:24:16.919149Z 31 00h00m00.000000s :BS_NODE DEBUG: [31] Bootstrap 2025-06-25T14:24:16.919174Z 31 00h00m00.000000s :BS_NODE DEBUG: [31] Connect 2025-06-25T14:24:16.919209Z 32 00h00m00.000000s :BS_NODE DEBUG: [32] Bootstrap 2025-06-25T14:24:16.919230Z 32 00h00m00.000000s :BS_NODE DEBUG: [32] Connect 2025-06-25T14:24:16.919283Z 33 00h00m00.000000s :BS_NODE DEBUG: [33] Bootstrap 2025-06-25T14:24:16.919308Z 33 00h00m00.000000s :BS_NODE DEBUG: [33] Connect 2025-06-25T14:24:16.919343Z 34 00h00m00.000000s :BS_NODE DEBUG: [34] Bootstrap 2025-06-25T14:24:16.919365Z 34 00h00m00.000000s :BS_NODE DEBUG: [34] Connect 2025-06-25T14:24:16.919447Z 35 00h00m00.000000s :BS_NODE DEBUG: [35] Bootstrap 2025-06-25T14:24:16.919471Z 35 00h00m00.000000s :BS_NODE DEBUG: [35] Connect 2025-06-25T14:24:16.919528Z 36 00h00m00.000000s :BS_NODE DEBUG: [36] Bootstrap 2025-06-25T14:24:16.919549Z 36 00h00m00.000000s :BS_NODE DEBUG: [36] Connect 2025-06-25T14:24:16.935416Z 1 00h00m00.000000s :BS_NODE DEBUG: [1] ClientConnected Sender# [1:2713:53] Status# ERROR ClientId# [1:2713:53] ServerId# [0:0:0] PipeClient# [1:2713:53] 2025-06-25T14:24:16.936875Z 2 00h00m00.000000s :BS_NODE DEBUG: [2] ClientConnected Sender# [2:2714:41] Status# ERROR ClientId# [2:2714:41] ServerId# [0:0:0] PipeClient# [2:2714:41] 2025-06-25T14:24:16.936947Z 3 00h00m00.000000s :BS_NODE DEBUG: [3] ClientConnected Sender# [3:2715:41] Status# ERROR ClientId# [3:2715:41] ServerId# [0:0:0] PipeClient# [3:2715:41] 2025-06-25T14:24:16.937004Z 4 00h00m00.000000s :BS_NODE DEBUG: [4] ClientConnected Sender# [4:2716:41] Status# ERROR ClientId# [4:2716:41] ServerId# [0:0:0] PipeClient# [4:2716:41] 2025-06-25T14:24:16.937063Z 5 00h00m00.000000s :BS_NODE DEBUG: [5] ClientConnected Sender# [5:2717:41] Status# ERROR ClientId# [5:2717:41] ServerId# [0:0:0] PipeClient# [5:2717:41] 2025-06-25T14:24:16.937112Z 6 00h00m00.000000s :BS_NODE DEBUG: [6] ClientConnected Sender# [6:2718:41] Status# ERROR ClientId# [6:2718:41] ServerId# [0:0:0] PipeClient# [6:2718:41] 2025-06-25T14:24:16.937157Z 7 00h00m00.000000s :BS_NODE DEBUG: [7] ClientConnected Sender# [7:2719:41] Status# ERROR ClientId# [7:2719:41] ServerId# [0:0:0] PipeClient# [7:2719:41] 2025-06-25T14:24:16.937196Z 8 00h00m00.000000s :BS_NODE DEBUG: [8] ClientConnected Sender# [8:2720:41] Status# ERROR ClientId# [8:2720:41] ServerId# [0:0:0] PipeClient# [8:2720:41] 2025-06-25T14:24:16.937233Z 9 00h00m00.000000s :BS_NODE DEBUG: [9] ClientConnected Sender# [9:2721:41] Status# ERROR ClientId# [9:2721:41] ServerId# [0:0:0] PipeClient# [9:2721:41] 2025-06-25T14:24:16.937271Z 10 00h00m00.000000s :BS_NODE DEBUG: [10] ClientConnected Sender# [10:2722:41] Status# ERROR ClientId# [10:2722:41] ServerId# [0:0:0] PipeClient# [10:2722:41] 2025-06-25T14:24:16.937309Z 11 00h00m00.000000s :BS_NODE DEBUG: [11] ClientConnected Sender# [11:2723:41] Status# ERROR ClientId# [11:2723:41] ServerId# [0:0:0] PipeClient# [11:2723:41] 2025-06-25T14:24:16.937344Z 12 00h00m00.000000s :BS_NODE DEBUG: [12] ClientConnected Sender# [12:2724:41] Status# ERROR ClientId# [12:2724:41] ServerId# [0:0:0] PipeClient# [12:2724:41] 2025-06-25T14:24:16.937377Z 13 00h00m00.000000s :BS_NODE DEBUG: [13] ClientConnected Sender# [13:2725:41] Status# ERROR ClientId# [13:2725:41] ServerId# [0:0:0] PipeClient# [13:2725:41] 2025-06-25T14:24:16.937412Z 14 00h00m00.000000s :BS_NODE DEBUG: [14] ClientConnected Sender# [14:2726:41] Status# ERROR ClientId# [14:2726:41] ServerId# [0:0:0] PipeClient# [14:2726:41] 2025-06-25T14:24:16.937463Z 15 00h00m00.000000s :BS_NODE DEBUG: [15] ClientConnected Sender# [15:2727:41] Status# ERROR ClientId# [15:2727:41] ServerId# [0:0:0] PipeClient# [15:2727:41] 2025-06-25T14:24:16.937503Z 16 00h00m00.000000s :BS_NODE DEBUG: [16] ClientConnected Sender# [16:2728:41] Status# ERROR ClientId# [16:2728:41] ServerId# [0:0:0] PipeClient# [16:2728:41] 2025-06-25T14:24:16.937537Z 17 00h00m00.000000s :BS_NODE DEBUG: [17] ClientConnected Sender# [17:2729:41] Status# ERROR ClientId# [17:2729:41] ServerId# [0:0:0] PipeClient# [17:2729:41] 2025-06-25T14:24:16.937572Z 18 00h00m00.000000s :BS_NODE DEBUG: [18] ClientConnected Sender# [18:2730:41] Status# ERROR ClientId# [18:2730:41] ServerId# [0:0:0] PipeClient# [18:2730:41] 2025-06-25T14:24:16.937609Z 19 00h00m00.000000s :BS_NODE DEBUG: [19] ClientConnected Sender# [19:2731:41] Status# ERROR ClientId# [19:2731:41] ServerId# [0:0:0] PipeClient# [19:2731:41] 2025-06-25T14:24:16.937672Z 20 00h00m00.000000s :BS_NODE DEBUG: [20] ClientConnected Sender# [20:2732:41] Status# ERROR ClientId# [20:2732:41] ServerId# [0:0:0] PipeClient# [20:2732:41] 2025-06-25T14:24:16.937734Z 21 00h00m00.000000s :BS_NODE DEBUG: [21] ClientConnected Sender# [21:2733:41] Status# ERROR ClientId# [21:2733:41] ServerId# [0:0:0] PipeClient# [21:2733:41] 2025-06-25T14:24:16.937773Z 22 00h00m00.000000s :BS_NODE DEBUG: [22] ClientConnected Sender# [22:2734:41] Status# ERROR ClientId# [22:2734:41] ServerId# [0:0:0] PipeClient# [22:2734:41] 2025-06-25T14:24:16.937808Z 23 00h00m00.000000s :BS_NODE DEBUG: [23] ClientConnected Sender# [23:2735:41] Status# ERROR ClientId# [23:2735:41] ServerId# [0:0:0] PipeClient# [23:2735:41] 2025-06-25T14:24:16.937845Z 24 00h00m00.000000s :BS_NODE DEBUG: [24] ClientConnected Sender# [24:2736:41] Status# ERROR ClientId# [24:2736:41] ServerId# [0:0:0] PipeClient# [24:2736:41] 2025-06-25T14:24:16.937881Z 25 00h00m00.000000s :BS_NODE DEBUG: [25] ClientConnected Sender# [25:2737:41] Status# ERROR ClientId# [25:2737:41] ServerId# [0:0:0] PipeClient# [25:2737:41] 2025-06-25T14:24:16.937920Z 26 00h00m00.000000s :BS_NODE DEBUG: [26] ClientConnected Sender# [26:2738:41] Status# ERROR ClientId# [26:2738:41] ServerId# [0:0:0] PipeClient# [26:2738:41] 2025-06-25T14:24:16.937974Z 27 00h00m00.000000s :BS_NODE DEBUG: [27] ClientConnected Sender# [27:2739:41] Status# ERROR ClientId# [27:2739:41] ServerId# [0:0:0] PipeClient# [27:2739:41] 2025-06-25T14:24:16.938010Z 28 00h00m00.000000s :BS_NODE DEBUG: [28] ClientConnected Sender# [28:2740:41] Status# ERROR ClientId# [28:2740:41] ServerId# [0:0:0] PipeClient# [28:2740:41] 2025-06-25T14:24:16.938046Z 29 00h00m00.000000s :BS_NODE DEBUG: [29] ClientConnected Sender# [29:2741:41] Status# ERROR ClientId# [29:2741:41] ServerId# [0:0:0] PipeClient# [29:2741:41] 2025-06-25T14:24:16.938081Z 30 00h00m00.000000s :BS_NODE DEBUG: [30] ClientConnected Sender# [30:2742:41] Status# ERROR ClientId# [30:2742:41] ServerId# [0:0:0] PipeClient# [30:2742:41] 2025-06-25T14:24:16.938114Z 31 00h00m00.000000s :BS_NODE DEBUG: [31] ClientConnected Sender# [31:2743:41] Status# ERROR ClientId# [31:2743:41] ServerId# [0:0:0] PipeClient# [31:2743:41] 2025-06-25T14:24:16.938151Z 32 00h00m00.000000s :BS_NODE DEBUG: [32] ClientConnected Sender# [32:2744:41] Status# ERROR ClientId# [32:2744:41] ServerId# [0:0:0] PipeClient# [32:2744:41] 2025-06-25T14:24:16.938188Z 33 00h00m00.000000s :BS_NODE DEBUG: [33] ClientConnected Sender# [33:2745:41] Status# ERROR ClientId# [33:2745:41] ServerId# [0:0:0] PipeClient# [33:2745:41] 2025-06-25T14:24:16.938225Z 34 00h00m00.000000s :BS_NODE DEBUG: [34] ClientConnected Sender# [34:2746:41] Status# ERROR ClientId# [34:2746:41] ServerId# [0:0:0] PipeClient# [34:2746:41] 2025-06-25T14:24:16.938261Z 35 00h00m00.000000s :BS_NODE DEBUG: [35] ClientConnected Sender# [35:2747:41] Status# ERROR ClientId# [35:2747:41 ... 25m00.102560s :BS_NODE DEBUG: [28] VDiskId# [80000001:2:2:2:0] -> [80000001:3:2:2:0] 2025-06-25T14:24:23.943774Z 28 01h25m00.102560s :BS_NODE DEBUG: [28] VDiskId# [80000021:2:2:2:0] -> [80000021:3:2:2:0] 2025-06-25T14:24:23.943926Z 28 01h25m00.102560s :BS_NODE DEBUG: [28] VDiskId# [80000031:2:2:2:0] -> [80000031:3:2:2:0] 2025-06-25T14:24:23.943960Z 28 01h25m00.102560s :BS_NODE DEBUG: [28] VDiskId# [80000051:2:2:2:0] -> [80000051:3:2:2:0] 2025-06-25T14:24:23.944224Z 28 01h25m00.102560s :BS_NODE DEBUG: [28] VDiskId# [80000061:2:2:2:0] -> [80000061:3:2:2:0] 2025-06-25T14:24:23.946143Z 13 01h25m00.102560s :BS_NODE DEBUG: [13] NodeServiceSetUpdate 2025-06-25T14:24:23.946187Z 13 01h25m00.102560s :BS_NODE DEBUG: [13] VDiskId# [80000010:2:1:0:0] -> [80000010:3:1:0:0] 2025-06-25T14:24:23.946220Z 13 01h25m00.102560s :BS_NODE DEBUG: [13] VDiskId# [80000040:2:1:0:0] -> [80000040:3:1:0:0] 2025-06-25T14:24:23.946481Z 13 01h25m00.102560s :BS_NODE DEBUG: [13] VDiskId# [80000070:2:1:0:0] -> [80000070:3:1:0:0] 2025-06-25T14:24:23.946514Z 13 01h25m00.102560s :BS_NODE DEBUG: [13] VDiskId# [80000001:2:1:1:0] -> [80000001:3:1:1:0] 2025-06-25T14:24:23.946543Z 13 01h25m00.102560s :BS_NODE DEBUG: [13] VDiskId# [80000021:2:1:1:0] -> [80000021:3:1:1:0] 2025-06-25T14:24:23.946707Z 13 01h25m00.102560s :BS_NODE DEBUG: [13] VDiskId# [80000031:2:1:1:0] -> [80000031:3:1:1:0] 2025-06-25T14:24:23.946863Z 13 01h25m00.102560s :BS_NODE DEBUG: [13] VDiskId# [80000051:2:1:1:0] -> [80000051:3:1:1:0] 2025-06-25T14:24:23.947131Z 13 01h25m00.102560s :BS_NODE DEBUG: [13] VDiskId# [80000061:2:1:1:0] -> [80000061:3:1:1:0] 2025-06-25T14:24:23.947161Z 13 01h25m00.102560s :BS_NODE DEBUG: [13] VDiskId# [80000002:1:1:2:0] -> [80000002:2:1:2:0] 2025-06-25T14:24:23.947190Z 13 01h25m00.102560s :BS_NODE DEBUG: [13] VDiskId# [80000012:1:1:2:0] -> [80000012:2:1:2:0] 2025-06-25T14:24:23.947220Z 13 01h25m00.102560s :BS_NODE DEBUG: [13] VDiskId# [80000022:1:1:2:0] -> [80000022:2:1:2:0] 2025-06-25T14:24:23.947378Z 13 01h25m00.102560s :BS_NODE DEBUG: [13] VDiskId# [80000032:1:1:2:0] -> [80000032:2:1:2:0] 2025-06-25T14:24:23.947521Z 13 01h25m00.102560s :BS_NODE DEBUG: [13] VDiskId# [80000042:1:1:2:0] -> [80000042:2:1:2:0] 2025-06-25T14:24:23.947661Z 13 01h25m00.102560s :BS_NODE DEBUG: [13] VDiskId# [80000052:1:1:2:0] -> [80000052:2:1:2:0] 2025-06-25T14:24:23.947799Z 13 01h25m00.102560s :BS_NODE DEBUG: [13] VDiskId# [80000062:1:1:2:0] -> [80000062:2:1:2:0] 2025-06-25T14:24:23.947831Z 13 01h25m00.102560s :BS_NODE DEBUG: [13] VDiskId# [80000072:1:1:2:0] -> [80000072:2:1:2:0] 2025-06-25T14:24:23.949596Z 31 01h25m00.102560s :BS_NODE DEBUG: [31] NodeServiceSetUpdate 2025-06-25T14:24:23.949640Z 31 01h25m00.102560s :BS_NODE DEBUG: [31] VDiskId# [80000010:2:2:2:0] -> [80000010:3:2:2:0] 2025-06-25T14:24:23.949672Z 31 01h25m00.102560s :BS_NODE DEBUG: [31] VDiskId# [80000040:2:2:2:0] -> [80000040:3:2:2:0] 2025-06-25T14:24:23.949832Z 31 01h25m00.102560s :BS_NODE DEBUG: [31] VDiskId# [80000070:2:2:2:0] -> [80000070:3:2:2:0] 2025-06-25T14:24:23.949864Z 31 01h25m00.102560s :BS_NODE DEBUG: [31] VDiskId# [80000002:1:2:0:0] -> [80000002:2:2:0:0] 2025-06-25T14:24:23.950136Z 31 01h25m00.102560s :BS_NODE DEBUG: [31] VDiskId# [80000012:1:2:0:0] -> [80000012:2:2:0:0] 2025-06-25T14:24:23.950172Z 31 01h25m00.102560s :BS_NODE DEBUG: [31] VDiskId# [80000022:1:2:0:0] -> [80000022:2:2:0:0] 2025-06-25T14:24:23.950302Z 31 01h25m00.102560s :BS_NODE DEBUG: [31] VDiskId# [80000032:1:2:0:0] -> [80000032:2:2:0:0] 2025-06-25T14:24:23.950331Z 31 01h25m00.102560s :BS_NODE DEBUG: [31] VDiskId# [80000042:1:2:0:0] -> [80000042:2:2:0:0] 2025-06-25T14:24:23.950461Z 31 01h25m00.102560s :BS_NODE DEBUG: [31] VDiskId# [80000052:1:2:0:0] -> [80000052:2:2:0:0] 2025-06-25T14:24:23.950491Z 31 01h25m00.102560s :BS_NODE DEBUG: [31] VDiskId# [80000062:1:2:0:0] -> [80000062:2:2:0:0] 2025-06-25T14:24:23.950657Z 31 01h25m00.102560s :BS_NODE DEBUG: [31] VDiskId# [80000072:1:2:0:0] -> [80000072:2:2:0:0] 2025-06-25T14:24:23.952671Z 16 01h25m00.102560s :BS_NODE DEBUG: [16] NodeServiceSetUpdate 2025-06-25T14:24:23.952723Z 16 01h25m00.102560s :BS_NODE DEBUG: [16] VDiskId# [80000010:2:1:1:0] -> [80000010:3:1:1:0] 2025-06-25T14:24:23.953018Z 16 01h25m00.102560s :BS_NODE DEBUG: [16] VDiskId# [80000040:2:1:1:0] -> [80000040:3:1:1:0] 2025-06-25T14:24:23.953053Z 16 01h25m00.102560s :BS_NODE DEBUG: [16] VDiskId# [80000070:2:1:1:0] -> [80000070:3:1:1:0] 2025-06-25T14:24:23.953084Z 16 01h25m00.102560s :BS_NODE DEBUG: [16] VDiskId# [80000001:2:1:2:0] -> [80000001:3:1:2:0] 2025-06-25T14:24:23.953348Z 16 01h25m00.102560s :BS_NODE DEBUG: [16] VDiskId# [80000021:2:1:2:0] -> [80000021:3:1:2:0] 2025-06-25T14:24:23.953386Z 16 01h25m00.102560s :BS_NODE DEBUG: [16] VDiskId# [80000031:2:1:2:0] -> [80000031:3:1:2:0] 2025-06-25T14:24:23.953416Z 16 01h25m00.102560s :BS_NODE DEBUG: [16] VDiskId# [80000051:2:1:2:0] -> [80000051:3:1:2:0] 2025-06-25T14:24:23.953447Z 16 01h25m00.102560s :BS_NODE DEBUG: [16] VDiskId# [80000061:2:1:2:0] -> [80000061:3:1:2:0] 2025-06-25T14:24:23.963821Z 2 01h25m01.252560s :BS_NODE DEBUG: [2] VDiskId# [80000062:2:0:2:0] status changed to REPLICATING 2025-06-25T14:24:23.965606Z 10 01h25m01.720560s :BS_NODE DEBUG: [10] VDiskId# [80000040:3:0:0:0] status changed to REPLICATING 2025-06-25T14:24:23.967042Z 7 01h25m01.929560s :BS_NODE DEBUG: [7] VDiskId# [80000051:3:0:1:0] status changed to REPLICATING 2025-06-25T14:24:23.968706Z 10 01h25m02.458560s :BS_NODE DEBUG: [10] VDiskId# [80000010:3:0:0:0] status changed to REPLICATING 2025-06-25T14:24:23.970060Z 5 01h25m02.772560s :BS_NODE DEBUG: [5] VDiskId# [80000052:2:0:2:0] status changed to REPLICATING 2025-06-25T14:24:23.971187Z 10 01h25m02.796560s :BS_NODE DEBUG: [10] VDiskId# [80000070:3:0:0:0] status changed to REPLICATING 2025-06-25T14:24:23.972054Z 4 01h25m03.188560s :BS_NODE DEBUG: [4] VDiskId# [80000022:2:0:2:0] status changed to REPLICATING 2025-06-25T14:24:23.973146Z 7 01h25m03.258560s :BS_NODE DEBUG: [7] VDiskId# [80000001:3:0:1:0] status changed to REPLICATING 2025-06-25T14:24:23.974162Z 4 01h25m04.708560s :BS_NODE DEBUG: [4] VDiskId# [80000002:2:0:2:0] status changed to REPLICATING 2025-06-25T14:24:23.976699Z 4 01h25m04.881560s :BS_NODE DEBUG: [4] VDiskId# [80000012:2:0:2:0] status changed to REPLICATING 2025-06-25T14:24:23.978166Z 7 01h25m04.985560s :BS_NODE DEBUG: [7] VDiskId# [80000021:3:0:1:0] status changed to REPLICATING 2025-06-25T14:24:23.982589Z 4 01h25m05.106560s :BS_NODE DEBUG: [4] VDiskId# [80000032:2:0:2:0] status changed to REPLICATING 2025-06-25T14:24:23.984121Z 5 01h25m05.176560s :BS_NODE DEBUG: [5] VDiskId# [80000072:2:0:2:0] status changed to REPLICATING 2025-06-25T14:24:23.985522Z 2 01h25m05.612560s :BS_NODE DEBUG: [2] VDiskId# [80000042:2:0:2:0] status changed to REPLICATING 2025-06-25T14:24:23.986516Z 8 01h25m05.662560s :BS_NODE DEBUG: [8] VDiskId# [80000061:3:0:1:0] status changed to REPLICATING 2025-06-25T14:24:23.988519Z 7 01h25m06.056560s :BS_NODE DEBUG: [7] VDiskId# [80000031:3:0:1:0] status changed to REPLICATING 2025-06-25T14:24:23.989280Z 5 01h25m07.881560s :BS_NODE DEBUG: [5] VDiskId# [80000052:2:0:2:0] status changed to READY 2025-06-25T14:24:23.992508Z 1 01h25m07.882072s :BS_NODE DEBUG: [1] NodeServiceSetUpdate 2025-06-25T14:24:23.992676Z 1 01h25m07.882072s :BS_NODE DEBUG: [1] VDiskId# [80000052:1:0:2:0] destroyed 2025-06-25T14:24:23.993971Z 8 01h25m12.000560s :BS_NODE DEBUG: [8] VDiskId# [80000061:3:0:1:0] status changed to READY 2025-06-25T14:24:23.995966Z 1 01h25m12.001072s :BS_NODE DEBUG: [1] NodeServiceSetUpdate 2025-06-25T14:24:23.996013Z 1 01h25m12.001072s :BS_NODE DEBUG: [1] VDiskId# [80000061:2:0:1:0] destroyed 2025-06-25T14:24:23.996094Z 10 01h25m12.324560s :BS_NODE DEBUG: [10] VDiskId# [80000010:3:0:0:0] status changed to READY 2025-06-25T14:24:23.998178Z 1 01h25m12.325072s :BS_NODE DEBUG: [1] NodeServiceSetUpdate 2025-06-25T14:24:23.998216Z 1 01h25m12.325072s :BS_NODE DEBUG: [1] VDiskId# [80000010:2:0:0:0] destroyed 2025-06-25T14:24:23.998442Z 4 01h25m13.384560s :BS_NODE DEBUG: [4] VDiskId# [80000002:2:0:2:0] status changed to READY 2025-06-25T14:24:24.000242Z 1 01h25m13.385072s :BS_NODE DEBUG: [1] NodeServiceSetUpdate 2025-06-25T14:24:24.000407Z 1 01h25m13.385072s :BS_NODE DEBUG: [1] VDiskId# [80000002:1:0:2:0] destroyed 2025-06-25T14:24:24.002637Z 4 01h25m16.118560s :BS_NODE DEBUG: [4] VDiskId# [80000022:2:0:2:0] status changed to READY 2025-06-25T14:24:24.005958Z 1 01h25m16.119072s :BS_NODE DEBUG: [1] NodeServiceSetUpdate 2025-06-25T14:24:24.005994Z 1 01h25m16.119072s :BS_NODE DEBUG: [1] VDiskId# [80000022:1:0:2:0] destroyed 2025-06-25T14:24:24.008019Z 2 01h25m24.123560s :BS_NODE DEBUG: [2] VDiskId# [80000062:2:0:2:0] status changed to READY 2025-06-25T14:24:24.010703Z 1 01h25m24.124072s :BS_NODE DEBUG: [1] NodeServiceSetUpdate 2025-06-25T14:24:24.010746Z 1 01h25m24.124072s :BS_NODE DEBUG: [1] VDiskId# [80000062:1:0:2:0] destroyed 2025-06-25T14:24:24.011063Z 4 01h25m24.847560s :BS_NODE DEBUG: [4] VDiskId# [80000032:2:0:2:0] status changed to READY 2025-06-25T14:24:24.013416Z 1 01h25m24.848072s :BS_NODE DEBUG: [1] NodeServiceSetUpdate 2025-06-25T14:24:24.013452Z 1 01h25m24.848072s :BS_NODE DEBUG: [1] VDiskId# [80000032:1:0:2:0] destroyed 2025-06-25T14:24:24.013999Z 5 01h25m25.872560s :BS_NODE DEBUG: [5] VDiskId# [80000072:2:0:2:0] status changed to READY 2025-06-25T14:24:24.015833Z 1 01h25m25.873072s :BS_NODE DEBUG: [1] NodeServiceSetUpdate 2025-06-25T14:24:24.015868Z 1 01h25m25.873072s :BS_NODE DEBUG: [1] VDiskId# [80000072:1:0:2:0] destroyed 2025-06-25T14:24:24.018115Z 10 01h25m29.270560s :BS_NODE DEBUG: [10] VDiskId# [80000040:3:0:0:0] status changed to READY 2025-06-25T14:24:24.020676Z 1 01h25m29.271072s :BS_NODE DEBUG: [1] NodeServiceSetUpdate 2025-06-25T14:24:24.020820Z 1 01h25m29.271072s :BS_NODE DEBUG: [1] VDiskId# [80000040:2:0:0:0] destroyed 2025-06-25T14:24:24.023791Z 7 01h25m30.532560s :BS_NODE DEBUG: [7] VDiskId# [80000031:3:0:1:0] status changed to READY 2025-06-25T14:24:24.026035Z 1 01h25m30.533072s :BS_NODE DEBUG: [1] NodeServiceSetUpdate 2025-06-25T14:24:24.026073Z 1 01h25m30.533072s :BS_NODE DEBUG: [1] VDiskId# [80000031:2:0:1:0] destroyed 2025-06-25T14:24:24.026149Z 4 01h25m30.633560s :BS_NODE DEBUG: [4] VDiskId# [80000012:2:0:2:0] status changed to READY 2025-06-25T14:24:24.029043Z 1 01h25m30.634072s :BS_NODE DEBUG: [1] NodeServiceSetUpdate 2025-06-25T14:24:24.029195Z 1 01h25m30.634072s :BS_NODE DEBUG: [1] VDiskId# [80000012:1:0:2:0] destroyed 2025-06-25T14:24:24.029382Z 7 01h25m30.812560s :BS_NODE DEBUG: [7] VDiskId# [80000001:3:0:1:0] status changed to READY 2025-06-25T14:24:24.031473Z 1 01h25m30.813072s :BS_NODE DEBUG: [1] NodeServiceSetUpdate 2025-06-25T14:24:24.031509Z 1 01h25m30.813072s :BS_NODE DEBUG: [1] VDiskId# [80000001:2:0:1:0] destroyed 2025-06-25T14:24:24.032510Z 10 01h25m31.373560s :BS_NODE DEBUG: [10] VDiskId# [80000070:3:0:0:0] status changed to READY 2025-06-25T14:24:24.034685Z 1 01h25m31.374072s :BS_NODE DEBUG: [1] NodeServiceSetUpdate 2025-06-25T14:24:24.034722Z 1 01h25m31.374072s :BS_NODE DEBUG: [1] VDiskId# [80000070:2:0:0:0] destroyed 2025-06-25T14:24:24.035329Z 2 01h25m34.260560s :BS_NODE DEBUG: [2] VDiskId# [80000042:2:0:2:0] status changed to READY 2025-06-25T14:24:24.037505Z 1 01h25m34.261072s :BS_NODE DEBUG: [1] NodeServiceSetUpdate 2025-06-25T14:24:24.037660Z 1 01h25m34.261072s :BS_NODE DEBUG: [1] VDiskId# [80000042:1:0:2:0] destroyed 2025-06-25T14:24:24.037737Z 7 01h25m34.626560s :BS_NODE DEBUG: [7] VDiskId# [80000051:3:0:1:0] status changed to READY 2025-06-25T14:24:24.038786Z 1 01h25m34.627072s :BS_NODE DEBUG: [1] NodeServiceSetUpdate 2025-06-25T14:24:24.038824Z 1 01h25m34.627072s :BS_NODE DEBUG: [1] VDiskId# [80000051:2:0:1:0] destroyed 2025-06-25T14:24:24.042043Z 7 01h25m39.378560s :BS_NODE DEBUG: [7] VDiskId# [80000021:3:0:1:0] status changed to READY 2025-06-25T14:24:24.044582Z 1 01h25m39.379072s :BS_NODE DEBUG: [1] NodeServiceSetUpdate 2025-06-25T14:24:24.044619Z 1 01h25m39.379072s :BS_NODE DEBUG: [1] VDiskId# [80000021:2:0:1:0] destroyed >> VDiskBalancing::TestStopOneNode_Block42_HugeBlob [GOOD] >> CheckIntegrityMirror3of4::PlacementMissingParts [GOOD] >> CheckIntegrityMirror3of4::PlacementDisintegrated ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_balancing/unittest >> VDiskBalancing::TwoPartsOnOneNodeTest_Block42_HugeBlob [GOOD] Test command err: RandomSeed# 1102498188433041909 SEND TEvPut with key [1:1:1:0:0:3201024:0] 2025-06-25T14:24:22.379895Z 1 00h01m00.010512s :PIPE_SERVER ERROR: [72057594037932033] NodeDisconnected NodeId# 6 2025-06-25T14:24:22.380459Z 1 00h01m00.010512s :PIPE_SERVER ERROR: [72057594037932033] NodeDisconnected NodeId# 5 TEvPutResult: TEvPutResult {Id# [1:1:1:0:0:3201024:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Node 0: 4 Node 1: 5 Node 2: 6 Node 3: 1 Node 4: Node 5: Node 6: 2 Node 7: 3 2025-06-25T14:24:22.489740Z 1 00h01m00.011024s :PIPE_SERVER ERROR: [72057594037932033] NodeDisconnected NodeId# 7 Node 0: 4 Node 1: 5 Node 2: 6 Node 3: 1 2 Node 4: Node 5: 1 Node 6: Node 7: 3 Start compaction 1 Finish compaction 1 >> Donor::CheckOnlineReadRequestToDonor [GOOD] >> CheckIntegrityBlock42::PlacementOkHandoff [GOOD] >> CheckIntegrityBlock42::PlacementMissingParts >> CheckIntegrityBlock42::PlacementWithErrorsOnBlobDisks [GOOD] >> CheckIntegrityBlock42::PlacementStatusUnknown |70.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_blob_depot/ydb-core-blobstorage-ut_blobstorage-ut_blob_depot |70.2%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_blob_depot/ydb-core-blobstorage-ut_blobstorage-ut_blob_depot |70.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_blob_depot/ydb-core-blobstorage-ut_blobstorage-ut_blob_depot ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_balancing/unittest >> VDiskBalancing::TestDontSendToReadOnlyTest_Block42 [GOOD] Test command err: RandomSeed# 13490569624403868633 SEND TEvPut with key [1:1:1:0:0:100:0] TEvPutResult: TEvPutResult {Id# [1:1:1:0:0:100:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Setting VDisk read-only to 1 for position 0 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:0:0] SEND TEvPut with key [1:1:2:0:0:100:0] 2025-06-25T14:24:22.917512Z 1 00h01m30.060512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:6339:830] TEvPutResult: TEvPutResult {Id# [1:1:2:0:0:100:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Setting VDisk read-only to 0 for position 0 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:0:0] Start compaction Finish compaction >> CheckIntegrityBlock42::PlacementAllOnHandoff [GOOD] >> CheckIntegrityBlock42::PlacementDisintegrated >> Donor::MultipleEvicts [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_balancing/unittest >> VDiskBalancing::TestStopOneNode_Mirror3dc_HugeBlob [GOOD] Test command err: RandomSeed# 1128719588126243120 SEND TEvPut with key [1:1:1:0:0:533504:0] TEvPutResult: TEvPutResult {Id# [1:1:1:0:0:533504:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:2:0:0:533504:0] 2025-06-25T14:24:23.688003Z 1 00h01m00.010512s :PIPE_SERVER ERROR: [72057594037932033] NodeDisconnected NodeId# 2 TEvPutResult: TEvPutResult {Id# [1:1:2:0:0:533504:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Start compaction Finish compaction ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_balancing/unittest >> VDiskBalancing::TestStopOneNode_Block42_HugeBlob [GOOD] Test command err: RandomSeed# 779574786965575979 SEND TEvPut with key [1:1:1:0:0:3201024:0] TEvPutResult: TEvPutResult {Id# [1:1:1:0:0:3201024:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:2:0:0:3201024:0] 2025-06-25T14:24:23.335233Z 3 00h01m00.010512s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [3:188:17] ServerId# [1:296:58] TabletId# 72057594037932033 PipeClientId# [3:188:17] 2025-06-25T14:24:23.335685Z 8 00h01m00.010512s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [8:223:17] ServerId# [1:301:63] TabletId# 72057594037932033 PipeClientId# [8:223:17] 2025-06-25T14:24:23.335776Z 6 00h01m00.010512s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [6:209:17] ServerId# [1:299:61] TabletId# 72057594037932033 PipeClientId# [6:209:17] 2025-06-25T14:24:23.335862Z 5 00h01m00.010512s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [5:202:17] ServerId# [1:298:60] TabletId# 72057594037932033 PipeClientId# [5:202:17] 2025-06-25T14:24:23.336049Z 4 00h01m00.010512s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [4:195:17] ServerId# [1:297:59] TabletId# 72057594037932033 PipeClientId# [4:195:17] 2025-06-25T14:24:23.336290Z 2 00h01m00.010512s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [2:181:17] ServerId# [1:295:57] TabletId# 72057594037932033 PipeClientId# [2:181:17] 2025-06-25T14:24:23.336397Z 7 00h01m00.010512s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [7:216:17] ServerId# [1:300:62] TabletId# 72057594037932033 PipeClientId# [7:216:17] TEvPutResult: TEvPutResult {Id# [1:1:2:0:0:3201024:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Start compaction Finish compaction >> CheckIntegrityMirror3dc::PlacementOkWithErrorsOnBlobDisks [GOOD] >> CheckIntegrityMirror3of4::PlacementBlobIsLost ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_balancing/unittest >> VDiskBalancing::TestStopOneNode_Mirror3dc [GOOD] Test command err: RandomSeed# 10406788690817909976 SEND TEvPut with key [1:1:1:0:0:100:0] TEvPutResult: TEvPutResult {Id# [1:1:1:0:0:100:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:2:0:0:100:0] 2025-06-25T14:24:23.691565Z 1 00h01m00.010512s :PIPE_SERVER ERROR: [72057594037932033] NodeDisconnected NodeId# 2 TEvPutResult: TEvPutResult {Id# [1:1:2:0:0:100:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Start compaction Finish compaction |70.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/olap_workload/olap_workload |70.2%| [LD] {RESULT} $(B)/ydb/tests/stress/olap_workload/olap_workload |70.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stress/olap_workload/olap_workload |70.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/transfer/transfer |70.2%| [LD] {RESULT} $(B)/ydb/tests/stress/transfer/transfer |70.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stress/transfer/transfer >> CheckIntegrityBlock42::DataOkAdditionalEqualParts [GOOD] >> CheckIntegrityBlock42::DataErrorSixPartsTwoBroken >> TestCommon::CollectTaskRunnerStatisticsByStage [GOOD] >> TestCommon::CollectTaskRunnerStatisticsByTask [GOOD] >> TestCommon::Empty [GOOD] >> TestCommon::ParseCounterName [GOOD] >> CheckIntegrityMirror3dc::PlacementOkHandoff [GOOD] >> CheckIntegrityMirror3dc::PlacementMissingParts ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_donor/unittest >> Donor::CheckOnlineReadRequestToDonor [GOOD] Test command err: RandomSeed# 3935858431369846947 2025-06-25T14:24:27.787564Z 7 00h01m11.311024s :BS_SYNCER ERROR: PDiskId# 1001 VDISK[82000000:_:0:6:0]: (2181038080) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2025-06-25T14:24:27.789459Z 7 00h01m11.311024s :BS_SYNCER ERROR: PDiskId# 1001 VDISK[82000000:_:0:6:0]: (2181038080) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 12015657308627227238] 2025-06-25T14:24:27.808364Z 7 00h01m11.311024s :BS_SYNCER ERROR: PDiskId# 1001 VDISK[82000000:_:0:6:0]: (2181038080) THullOsirisActor: RESURRECT: id# [1:1:0:0:0:2097152:1] 2025-06-25T14:24:27.808577Z 7 00h01m11.311024s :BS_SYNCER ERROR: PDiskId# 1001 VDISK[82000000:_:0:6:0]: (2181038080) THullOsirisActor: FINISH: BlobsResurrected# 1 PartsResurrected# 1 >> CheckIntegrityBlock42::DataErrorSixPartsOneBroken [GOOD] |70.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/encryption/ydb-tests-functional-encryption >> CheckIntegrityBlock42::DataErrorFivePartsOneBroken |70.2%| [LD] {RESULT} $(B)/ydb/tests/functional/encryption/ydb-tests-functional-encryption |70.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/encryption/ydb-tests-functional-encryption |70.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/fq/http_api/ydb-tests-fq-http_api |70.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/fq/http_api/ydb-tests-fq-http_api |70.4%| [LD] {RESULT} $(B)/ydb/tests/fq/http_api/ydb-tests-fq-http_api |70.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/log/tests/ydb-tests-stress-log-tests |70.5%| [LD] {RESULT} $(B)/ydb/tests/stress/log/tests/ydb-tests-stress-log-tests |70.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stress/log/tests/ydb-tests-stress-log-tests >> CheckIntegrityMirror3dc::PlacementDisintegrated [GOOD] >> CheckIntegrityMirror3dc::DataOk ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_donor/unittest >> Donor::MultipleEvicts [GOOD] Test command err: RandomSeed# 10989443450570679125 0 donors: 2025-06-25T14:24:27.384508Z 4 00h00m20.011024s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2025-06-25T14:24:27.391937Z 4 00h00m20.011024s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 126813823252564330] 2025-06-25T14:24:27.408133Z 4 00h00m20.011024s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 1 donors: 2:1000 2025-06-25T14:24:27.494981Z 2 00h00m20.012048s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2025-06-25T14:24:27.502246Z 2 00h00m20.012048s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 126813823252564330] 2025-06-25T14:24:27.520399Z 2 00h00m20.012048s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 1 donors: 4:1000 2025-06-25T14:24:27.582673Z 4 00h00m20.013072s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2025-06-25T14:24:27.590035Z 4 00h00m20.013072s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 126813823252564330] 2025-06-25T14:24:27.602128Z 4 00h00m20.013072s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 1 donors: 2:1000 2025-06-25T14:24:27.669266Z 2 00h00m20.014096s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2025-06-25T14:24:27.676761Z 2 00h00m20.014096s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 126813823252564330] 2025-06-25T14:24:27.689006Z 2 00h00m20.014096s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 1 donors: 4:1000 2025-06-25T14:24:27.751545Z 4 00h00m20.015120s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2025-06-25T14:24:27.759832Z 4 00h00m20.015120s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 126813823252564330] 2025-06-25T14:24:27.772369Z 4 00h00m20.015120s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 1 donors: 2:1000 2025-06-25T14:24:27.846414Z 2 00h00m20.016144s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2025-06-25T14:24:27.853464Z 2 00h00m20.016144s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 126813823252564330] 2025-06-25T14:24:27.866046Z 2 00h00m20.016144s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 1 donors: 4:1000 2025-06-25T14:24:27.937191Z 4 00h00m20.017168s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2025-06-25T14:24:27.944695Z 4 00h00m20.017168s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 126813823252564330] 2025-06-25T14:24:27.957165Z 4 00h00m20.017168s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 1 donors: 2:1000 2025-06-25T14:24:28.031623Z 2 00h00m20.018192s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2025-06-25T14:24:28.039967Z 2 00h00m20.018192s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 126813823252564330] 2025-06-25T14:24:28.054252Z 2 00h00m20.018192s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 1 donors: 4:1000 2025-06-25T14:24:28.107924Z 4 00h00m20.019216s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2025-06-25T14:24:28.115173Z 4 00h00m20.019216s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 126813823252564330] 2025-06-25T14:24:28.127179Z 4 00h00m20.019216s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 1 donors: 2:1000 >> CheckIntegrityMirror3dc::DataErrorOneCopy [GOOD] >> CheckIntegrityMirror3dc::DataErrorManyCopies >> Donor::SkipBadDonor [GOOD] |70.8%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/providers/dq/provider/ut/unittest >> TestCommon::ParseCounterName [GOOD] |70.8%| [TS] {RESULT} ydb/library/yql/providers/dq/provider/ut/unittest >> Donor::ContinueWithFaultyDonor [GOOD] >> CheckIntegrityMirror3of4::PlacementDisintegrated [GOOD] >> CheckIntegrityBlock42::PlacementDisintegrated [GOOD] >> CheckIntegrityBlock42::DataStatusUnknown >> Donor::SlayAfterWiping [GOOD] >> CheckIntegrityBlock42::PlacementMissingParts [GOOD] >> CheckIntegrityBlock42::PlacementStatusUnknown [GOOD] >> CheckIntegrityMirror3of4::PlacementBlobIsLost [GOOD] >> BSCReadOnlyPDisk::RestartAndReadOnlyConsecutive [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_donor/unittest >> Donor::SkipBadDonor [GOOD] Test command err: RandomSeed# 8732622258278457440 2025-06-25T14:24:27.946774Z 1 00h01m14.361024s :BS_SYNCER ERROR: PDiskId# 1001 VDISK[82000000:_:0:0:0]: (2181038080) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2025-06-25T14:24:27.948730Z 1 00h01m14.361024s :BS_SYNCER ERROR: PDiskId# 1001 VDISK[82000000:_:0:0:0]: (2181038080) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 16992773752109288485] 2025-06-25T14:24:27.966908Z 1 00h01m14.361024s :BS_SYNCER ERROR: PDiskId# 1001 VDISK[82000000:_:0:0:0]: (2181038080) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 >> CheckIntegrityBlock42::DataErrorSixPartsTwoBroken [GOOD] >> CheckIntegrityBlock42::DataOkErasureFiveParts >> BSCReadOnlyPDisk::ReadOnlyNotAllowed [GOOD] |71.4%| [LD] {BAZEL_UPLOAD} $(B)/ydb/library/yql/providers/dq/provider/ut/ydb-library-yql-providers-dq-provider-ut ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_donor/unittest >> Donor::ContinueWithFaultyDonor [GOOD] Test command err: RandomSeed# 7971213434203871121 2025-06-25T14:24:28.111548Z 1 00h01m14.361024s :BS_SYNCER ERROR: PDiskId# 1001 VDISK[82000000:_:0:0:0]: (2181038080) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2025-06-25T14:24:28.113571Z 1 00h01m14.361024s :BS_SYNCER ERROR: PDiskId# 1001 VDISK[82000000:_:0:0:0]: (2181038080) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 13421191360045257637] 2025-06-25T14:24:28.138843Z 1 00h01m14.361024s :BS_SYNCER ERROR: PDiskId# 1001 VDISK[82000000:_:0:0:0]: (2181038080) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 >> CheckIntegrityBlock42::DataErrorFivePartsOneBroken [GOOD] >> CheckIntegrityBlock42::DataErrorHeavySixPartsWithManyBroken >> BlobDepot::BasicPutAndGet >> CheckIntegrityMirror3dc::PlacementMissingParts [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_check_integrity/unittest >> CheckIntegrityMirror3of4::PlacementDisintegrated [GOOD] Test command err: RandomSeed# 13570248867246646016 *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:5:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:0:6:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:7:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:1:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:2:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:5:0] FINISHED WITH OK *** Group is disintegrated or has network problems ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_donor/unittest >> Donor::SlayAfterWiping [GOOD] Test command err: RandomSeed# 2663247056958645568 2025-06-25T14:24:28.100365Z 1 00h01m14.361024s :BS_SYNCER ERROR: PDiskId# 1001 VDISK[82000000:_:0:0:0]: (2181038080) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2025-06-25T14:24:28.101594Z 1 00h01m14.361024s :BS_SYNCER ERROR: PDiskId# 1001 VDISK[82000000:_:0:0:0]: (2181038080) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 6896371296246389150] 2025-06-25T14:24:28.118220Z 1 00h01m14.361024s :BS_SYNCER ERROR: PDiskId# 1001 VDISK[82000000:_:0:0:0]: (2181038080) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_check_integrity/unittest >> CheckIntegrityBlock42::PlacementMissingParts [GOOD] Test command err: RandomSeed# 13848545866960669791 *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:5:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:0:6:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:7:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:4] TO [82000000:1:0:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:5] TO [82000000:1:0:1:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:6] TO [82000000:1:0:2:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:7:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:4] TO [82000000:1:0:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:5] TO [82000000:1:0:1:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:6] TO [82000000:1:0:2:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:3:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:0:4:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:7:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:4] TO [82000000:1:0:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:5] TO [82000000:1:0:1:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:6] TO [82000000:1:0:2:0] FINISHED WITH OK *** ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_check_integrity/unittest >> CheckIntegrityBlock42::PlacementStatusUnknown [GOOD] Test command err: RandomSeed# 453079598865568062 *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:5:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:0:6:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:7:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:4] TO [82000000:1:0:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:5] TO [82000000:1:0:1:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:6] TO [82000000:1:0:2:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:5:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:0:6:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:7:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:4] TO [82000000:1:0:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:5] TO [82000000:1:0:1:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:6] TO [82000000:1:0:2:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:5:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:0:6:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:7:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:4] TO [82000000:1:0:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:5] TO [82000000:1:0:1:0] FINISHED WITH OK *** ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_read_only_pdisk/unittest >> BSCReadOnlyPDisk::RestartAndReadOnlyConsecutive [GOOD] Test command err: RandomSeed# 3196076689330242928 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_check_integrity/unittest >> CheckIntegrityMirror3of4::PlacementBlobIsLost [GOOD] Test command err: RandomSeed# 4550107583280660859 *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:1:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:2:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:1:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:2:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:1:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:2:0] FINISHED WITH OK *** |71.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/yql/providers/s3/actors/ut/ydb-library-yql-providers-s3-actors-ut |71.4%| [LD] {RESULT} $(B)/ydb/library/yql/providers/s3/actors/ut/ydb-library-yql-providers-s3-actors-ut |71.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/library/yql/providers/s3/actors/ut/ydb-library-yql-providers-s3-actors-ut |71.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/fq/restarts/ydb-tests-fq-restarts |71.4%| [LD] {RESULT} $(B)/ydb/tests/fq/restarts/ydb-tests-fq-restarts |71.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/fq/restarts/ydb-tests-fq-restarts >> CheckIntegrityMirror3dc::DataOk [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_check_integrity/unittest >> CheckIntegrityMirror3dc::PlacementMissingParts [GOOD] Test command err: RandomSeed# 17788685194076844887 *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:1:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:2:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:1:1:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:2:1:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:1:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:1:0:0] FINISHED WITH OK *** ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_read_only_pdisk/unittest >> BSCReadOnlyPDisk::ReadOnlyNotAllowed [GOOD] Test command err: RandomSeed# 11710205566390752 >> CheckIntegrityMirror3dc::DataErrorManyCopies [GOOD] >> CheckIntegrityBlock42::DataStatusUnknown [GOOD] |71.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/yql/dq/comp_nodes/ut/ydb-library-yql-dq-comp_nodes-ut |71.5%| [LD] {RESULT} $(B)/ydb/library/yql/dq/comp_nodes/ut/ydb-library-yql-dq-comp_nodes-ut >> BsControllerTest::SelfHealBlock4Plus2 [GOOD] |71.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/library/yql/dq/comp_nodes/ut/ydb-library-yql-dq-comp_nodes-ut ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_check_integrity/unittest >> CheckIntegrityMirror3dc::DataOk [GOOD] Test command err: RandomSeed# 16622176195287372430 *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:1:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:2:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:0:0] FINISHED WITH OK *** Group is disintegrated or has network problems *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:1:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:2:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:0:0] FINISHED WITH OK *** Disks: 0: [82000000:1:1:0:0] 1: [82000000:1:2:0:0] 2: [82000000:1:0:0:0] 3: [82000000:1:1:1:0] 4: [82000000:1:2:1:0] 5: [82000000:1:0:1:0] 6: [82000000:1:1:2:0] 7: [82000000:1:2:2:0] 8: [82000000:1:0:2:0] Layout info: ver0 disks [ 0 1 2 ] >> CheckIntegrityBlock42::DataOkErasureFiveParts [GOOD] >> BlobDepot::BasicPutAndGet [GOOD] >> BlobDepot::TestBlockedEvGetRequest >> CheckIntegrityBlock42::DataErrorHeavySixPartsWithManyBroken [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_check_integrity/unittest >> CheckIntegrityBlock42::DataStatusUnknown [GOOD] Test command err: RandomSeed# 13394672105910890655 *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:5:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:0:6:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:7:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:3:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:0:3:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:3:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:4] TO [82000000:1:0:3:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:5] TO [82000000:1:0:3:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:6] TO [82000000:1:0:3:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:5:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:0:6:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:7:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:4] TO [82000000:1:0:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:5] TO [82000000:1:0:1:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:6] TO [82000000:1:0:2:0] FINISHED WITH OK *** Group is disintegrated or has network problems *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:5:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:0:6:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:7:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:4] TO [82000000:1:0:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:5] TO [82000000:1:0:1:0] FINISHED WITH OK *** Disks: 0: [82000000:1:0:5:0] 1: [82000000:1:0:6:0] 2: [82000000:1:0:7:0] 3: [82000000:1:0:0:0] 4: [82000000:1:0:1:0] 5: [82000000:1:0:2:0] 6: [82000000:1:0:3:0] 7: [82000000:1:0:4:0] Layout info: part 1: part 2: part 3: ver0 disks [ 2 ] part 4: ver0 disks [ 3 ] part 5: ver0 disks [ 4 ] part 6: |71.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/oltp_workload/tests/ydb-tests-stress-oltp_workload-tests >> TArrowPushDown::MatchSeveralRowGroups [GOOD] >> TArrowPushDown::SimplePushDown [GOOD] >> TArrowPushDown::FilterEverything [GOOD] |71.6%| [LD] {RESULT} $(B)/ydb/tests/stress/oltp_workload/tests/ydb-tests-stress-oltp_workload-tests |71.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stress/oltp_workload/tests/ydb-tests-stress-oltp_workload-tests ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_check_integrity/unittest >> CheckIntegrityMirror3dc::DataErrorManyCopies [GOOD] Test command err: RandomSeed# 9630375889747488680 *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:7:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:4] TO [82000000:1:0:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:5] TO [82000000:1:0:1:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:6] TO [82000000:1:0:2:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:3:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:0:3:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:1:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:2:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:0:0] FINISHED WITH OK *** Disks: 0: [82000000:1:1:0:0] 1: [82000000:1:2:0:0] 2: [82000000:1:0:0:0] 3: [82000000:1:1:1:0] 4: [82000000:1:2:1:0] 5: [82000000:1:0:1:0] 6: [82000000:1:1:2:0] 7: [82000000:1:2:2:0] 8: [82000000:1:0:2:0] Layout info: ver0 disks [ 0 1 ], ver1 disks [ 2 ] ERROR: There are unequal parts *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:1:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:2:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:1:1:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:2:1:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:1:0] FINISHED WITH OK *** Disks: 0: [82000000:1:1:0:0] 1: [82000000:1:2:0:0] 2: [82000000:1:0:0:0] 3: [82000000:1:1:1:0] 4: [82000000:1:2:1:0] 5: [82000000:1:0:1:0] 6: [82000000:1:1:2:0] 7: [82000000:1:2:2:0] 8: [82000000:1:0:2:0] Layout info: ver0 disks [ 0 1 2 ], ver1 disks [ 3 4 5 ] ERROR: There are unequal parts |71.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/scheme_tests/ydb-tests-functional-scheme_tests |71.6%| [LD] {RESULT} $(B)/ydb/tests/functional/scheme_tests/ydb-tests-functional-scheme_tests |71.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/scheme_tests/ydb-tests-functional-scheme_tests ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_check_integrity/unittest >> CheckIntegrityBlock42::DataOkErasureFiveParts [GOOD] Test command err: RandomSeed# 3219645202225364878 *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:5:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:0:6:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:7:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:4] TO [82000000:1:0:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:5] TO [82000000:1:0:1:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:6] TO [82000000:1:0:2:0] FINISHED WITH OK *** Disks: 0: [82000000:1:0:5:0] 1: [82000000:1:0:6:0] 2: [82000000:1:0:7:0] 3: [82000000:1:0:0:0] 4: [82000000:1:0:1:0] 5: [82000000:1:0:2:0] 6: [82000000:1:0:3:0] 7: [82000000:1:0:4:0] Layout info: part 1: ver0 disks [ 0 ] part 2: ver0 disks [ 1 ] part 3: ver0 disks [ 2 ] part 4: ver0 disks [ 3 ] part 5: ver0 disks [ 4 ] part 6: ver0 disks [ 5 ] Erasure info: { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; } CHECK part 5 disks [ 4 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; } CHECK part 6 disks [ 5 ] -> OK *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:5:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:0:6:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:7:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:4] TO [82000000:1:0:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:5] TO [82000000:1:0:1:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:6] TO [82000000:1:0:2:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:3:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:0:4:0] FINISHED WITH OK *** Disks: 0: [82000000:1:0:5:0] 1: [82000000:1:0:6:0] 2: [82000000:1:0:7:0] 3: [82000000:1:0:0:0] 4: [82000000:1:0:1:0] 5: [82000000:1:0:2:0] 6: [82000000:1:0:3:0] 7: [82000000:1:0:4:0] Layout info: part 1: ver0 disks [ 6 0 ] part 2: ver0 disks [ 7 1 ] part 3: ver0 disks [ 2 ] part 4: ver0 disks [ 3 ] part 5: ver0 disks [ 4 ] part 6: ver0 disks [ 5 ] Erasure info: { part 1 disks [ 6 0 ]; part 2 disks [ 7 1 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; } CHECK part 5 disks [ 4 ] -> OK { part 1 disks [ 6 0 ]; part 2 disks [ 7 1 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; } CHECK part 6 disks [ 5 ] -> OK *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:5:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:0:6:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:7:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:4] TO [82000000:1:0:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:5] TO [82000000:1:0:1:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:6] TO [82000000:1:0:2:0] FINISHED WITH OK *** Disks: 0: [82000000:1:0:5:0] 1: [82000000:1:0:6:0] 2: [82000000:1:0:7:0] 3: [82000000:1:0:0:0] 4: [82000000:1:0:1:0] 5: [82000000:1:0:2:0] 6: [82000000:1:0:3:0] 7: [82000000:1:0:4:0] Layout info: part 1: ver0 disks [ 0 ] part 2: ver0 disks [ 1 ] part 3: ver0 disks [ 2 ] part 4: ver0 disks [ 3 ] part 5: ver0 disks [ 4 ] part 6: ver0 disks [ 5 ] Erasure info: ERROR: There are erasure restore fails *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:5:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:0:6:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:7:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:4] TO [82000000:1:0:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:5] TO [82000000:1:0:1:0] FINISHED WITH OK *** Disks: 0: [82000000:1:0:5:0] 1: [82000000:1:0:6:0] 2: [82000000:1:0:7:0] 3: [82000000:1:0:0:0] 4: [82000000:1:0:1:0] 5: [82000000:1:0:2:0] 6: [82000000:1:0:3:0] 7: [82000000:1:0:4:0] Layout info: part 1: ver0 disks [ 0 ] part 2: ver0 disks [ 1 ] part 3: ver0 disks [ 2 ] part 4: ver0 disks [ 3 ] part 5: ver0 disks [ 4 ] part 6: Erasure info: { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; } CHECK part 5 disks [ 4 ] -> OK >> BlobDepot::TestBlockedEvGetRequest [GOOD] >> BlobDepot::BasicRange >> BSCReadOnlyPDisk::ReadOnlySlay [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_check_integrity/unittest >> CheckIntegrityBlock42::DataErrorHeavySixPartsWithManyBroken [GOOD] Test command err: RandomSeed# 13828473619095796383 *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:5:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:0:6:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:7:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:4] TO [82000000:1:0:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:5] TO [82000000:1:0:1:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:6] TO [82000000:1:0:2:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:3:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:4:0] FINISHED WITH OK *** Disks: 0: [82000000:1:0:5:0] 1: [82000000:1:0:6:0] 2: [82000000:1:0:7:0] 3: [82000000:1:0:0:0] 4: [82000000:1:0:1:0] 5: [82000000:1:0:2:0] 6: [82000000:1:0:3:0] 7: [82000000:1:0:4:0] Layout info: part 1: ver0 disks [ 6 ], ver1 disks [ 7 ], ver2 disks [ 0 ] part 2: ver0 disks [ 1 ] part 3: ver0 disks [ 2 ] part 4: ver0 disks [ 3 ] part 5: ver0 disks [ 4 ] part 6: ver0 disks [ 5 ] ERROR: There are unequal parts Erasure info: { part 3 disks [ 2 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 1 disks [ 0 ] -> OK { part 3 disks [ 2 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 2 disks [ 1 ] -> OK { part 2 disks [ 1 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 1 disks [ 0 ] -> OK { part 2 disks [ 1 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 3 disks [ 2 ] -> OK { part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 1 disks [ 0 ] -> OK { part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 4 disks [ 3 ] -> OK { part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; part 6 disks [ 5 ]; } CHECK part 1 disks [ 0 ] -> OK { part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; part 6 disks [ 5 ]; } CHECK part 5 disks [ 4 ] -> OK { part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; } CHECK part 1 disks [ 0 ] -> OK { part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; } CHECK part 6 disks [ 5 ] -> OK { part 1 disks [ 0 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 2 disks [ 1 ] -> OK { part 1 disks [ 0 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 3 disks [ 2 ] -> OK { part 1 disks [ 0 ]; part 3 disks [ 2 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 2 disks [ 1 ] -> OK { part 1 disks [ 0 ]; part 3 disks [ 2 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 4 disks [ 3 ] -> OK { part 1 disks [ 0 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; part 6 disks [ 5 ]; } CHECK part 2 disks [ 1 ] -> OK { part 1 disks [ 0 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; part 6 disks [ 5 ]; } CHECK part 5 disks [ 4 ] -> OK { part 1 disks [ 0 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; } CHECK part 2 disks [ 1 ] -> OK { part 1 disks [ 0 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; } CHECK part 6 disks [ 5 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 3 disks [ 2 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 4 disks [ 3 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 4 disks [ 3 ]; part 6 disks [ 5 ]; } CHECK part 3 disks [ 2 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 4 disks [ 3 ]; part 6 disks [ 5 ]; } CHECK part 5 disks [ 4 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; } CHECK part 3 disks [ 2 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; } CHECK part 6 disks [ 5 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 6 disks [ 5 ]; } CHECK part 4 disks [ 3 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 6 disks [ 5 ]; } CHECK part 5 disks [ 4 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 5 disks [ 4 ]; } CHECK part 4 disks [ 3 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 5 disks [ 4 ]; } CHECK part 6 disks [ 5 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; } CHECK part 5 disks [ 4 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; } CHECK part 6 disks [ 5 ] -> OK ERROR: There are erasure restore fails *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:5:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:0:6:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:7:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:4] TO [82000000:1:0:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:5] TO [82000000:1:0:1:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:6] TO [82000000:1:0:2:0] FINISHED WITH OK *** Disks: 0: [82000000:1:0:5:0] 1: [82000000:1:0:6:0] 2: [82000000:1:0:7:0] 3: [82000000:1:0:0:0] 4: [82000000:1:0:1:0] 5: [82000000:1:0:2:0] 6: [82000000:1:0:3:0] 7: [82000000:1:0:4:0] Layout info: part 1: ver0 disks [ 0 ] part 2: ver0 disks [ 1 ] part 3: ver0 disks [ 2 ] part 4: ver0 disks [ 3 ] part 5: ver0 disks [ 4 ] part 6: ver0 disks [ 5 ] Erasure info: { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; } CHECK part 5 disks [ 4 ] -> OK { part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; } CHECK part 1 disks [ 0 ] -> OK { part 1 disks [ 0 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; } CHECK part 2 disks [ 1 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; } CHECK part 3 disks [ 2 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 5 disks [ 4 ]; } CHECK part 4 disks [ 3 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; } CHECK part 5 disks [ 4 ] -> OK ERROR: There are erasure restore fails *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:5:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:0:6:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:7:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:4] TO [82000000:1:0:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:5] TO [82000000:1:0:1:0] FINISHED WITH OK *** Disks: 0: [82000000:1:0:5:0] 1: [82000000:1:0:6:0] 2: [82000000:1:0:7:0] 3: [82000000:1:0:0:0] 4: [82000000:1:0:1:0] 5: [82000000:1:0:2:0] 6: [82000000:1:0:3:0] 7: [82000000:1:0:4:0] Layout info: part 1: ver0 disks [ 0 ] part 2: ver0 disks [ 1 ] part 3: ver0 disks [ 2 ] part 4: ver0 disks [ 3 ] part 5: ver0 disks [ 4 ] part 6: Erasure info: ERROR: There are erasure restore fails *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:5:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:0:6:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:7:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:4] TO [82000000:1:0:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:5] TO [82000000:1:0:1:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:6] TO [82000000:1:0:2:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:3:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:0:3:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:3:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:4] TO [82000000:1:0:3:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:5] TO [82000000:1:0:3:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:6] TO [82000000:1:0:3:0] FINISHED WITH OK *** Disks: 0: [82000000:1:0:5:0] 1: [82000000:1:0:6:0] 2: [82000000:1:0:7:0] 3: [82000000:1:0:0:0] 4: [82000000:1:0:1:0] 5: [82000000:1:0:2:0] 6: [82000000:1:0:3:0] 7: [82000000:1:0:4:0] Layout info: part 1: ver0 disks [ 6 ], ver1 disks [ 0 ] part 2: ver0 disks [ 6 ], ver1 disks [ 1 ] part 3: ver0 disks [ 6 ], ver1 disks [ 2 ] part 4: ver0 disks [ 3 ], ver1 disks [ 6 ] part 5: ver0 disks [ 4 ], ver1 disks [ 6 ] part 6: ver0 disks [ 5 ], ver1 disks [ 6 ] ERROR: There are unequal parts Erasure info: { part 3 disks [ 6 ]; part 4 disks [ 6 ]; part 5 disks [ 6 ]; part 6 disks [ 6 ]; } CHECK part 1 disks [ 6 ] -> OK { part 3 disks [ 6 ]; part 4 disks [ 6 ]; part 5 disks [ 6 ]; part 6 disks [ 6 ]; } CHECK part 2 disks [ 6 ] -> OK { part 3 disks [ 2 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 1 disks [ 0 ] -> OK { part 3 disks [ 2 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 2 disks [ 1 ] -> OK { part 2 disks [ 6 ]; part 4 disks [ 6 ]; part 5 disks [ 6 ]; part 6 disks [ 6 ]; } CHECK part 1 disks [ 6 ] -> OK { part 2 disks [ 6 ]; part 4 disks [ 6 ]; part 5 disks [ 6 ]; part 6 disks [ 6 ]; } CHECK part 3 disks [ 6 ] -> OK { part 2 disks [ 1 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 1 disks [ 0 ] -> OK { part 2 disks [ 1 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 3 disks [ 2 ] -> OK { part 2 disks [ 6 ]; part 3 disks [ 6 ]; part 5 disks [ 6 ]; part 6 disks [ 6 ]; } CHECK part 1 disks [ 6 ] -> OK { part 2 disks [ 6 ]; part 3 disks [ 6 ]; part 5 disks [ 6 ]; part 6 disks [ 6 ]; } CHECK part 4 disks [ 6 ] -> OK { part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 1 disks [ 0 ] -> OK { part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 4 disks [ 3 ] -> OK { part 2 disks [ 6 ]; part 3 disks [ 6 ]; part 4 disks [ 6 ]; part 6 disks [ 6 ]; } CHECK part 1 disks [ 6 ] -> OK { part 2 disks [ 6 ]; part 3 disks [ 6 ]; part 4 disks [ 6 ]; part 6 disks [ 6 ]; } CHECK part 5 disks [ 6 ] -> OK { part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; part 6 disks [ 5 ]; } CHECK part 1 disks [ 0 ] -> OK { part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; part 6 disks [ 5 ]; } CHECK part 5 disks [ 4 ] -> OK { part 2 disks [ 6 ]; part 3 disks [ 6 ]; part 4 disks [ 6 ]; part 5 disks [ 6 ]; } CHECK part 1 disks [ 6 ] -> OK { part 2 disks [ 6 ]; part 3 disks [ 6 ]; part 4 disks [ 6 ]; part 5 disks [ 6 ]; } CHECK part 6 disks [ 6 ] -> OK { part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; } CHECK part 1 disks [ 0 ] -> OK { part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; } CHECK part 6 disks [ 5 ] -> OK { part 1 disks [ 6 ]; part 4 disks [ 6 ]; part 5 disks [ 6 ]; part 6 disks [ 6 ]; } CHECK part 2 disks [ 6 ] -> OK { part 1 disks [ 6 ]; part 4 disks [ 6 ]; part 5 disks [ 6 ]; part 6 disks [ 6 ]; } CHECK part 3 disks [ 6 ] -> OK { part 1 disks [ 0 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 2 disks [ 1 ] -> OK { part 1 disks [ 0 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 3 disks [ 2 ] -> OK { part 1 disks [ 6 ]; part 3 disks [ 6 ]; part 5 disks [ 6 ]; part 6 disks [ 6 ]; } CHECK part 2 disks [ 6 ] -> OK { part 1 disks [ 6 ]; part 3 disks [ 6 ]; part 5 disks [ 6 ]; part 6 disks [ 6 ]; } CHECK part 4 disks [ 6 ] -> OK { part 1 disks [ 0 ]; part 3 disks [ 2 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 2 disks [ 1 ] -> OK { part 1 disks [ 0 ]; part 3 disks [ 2 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 4 disks [ 3 ] -> OK { part 1 disks [ 6 ]; part 3 disks [ 6 ]; part 4 disks [ 6 ]; part 6 disks [ 6 ]; } CHECK part 2 disks [ 6 ] -> OK { part 1 disks [ 6 ]; part 3 disks [ 6 ]; part 4 disks [ 6 ]; part 6 disks [ 6 ]; } CHECK part 5 disks [ 6 ] -> OK { part 1 disks [ 0 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; part 6 disks [ 5 ]; } CHECK part 2 disks [ 1 ] -> OK { part 1 disks [ 0 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; part 6 disks [ 5 ]; } CHECK part 5 disks [ 4 ] -> OK { part 1 disks [ 6 ]; part 3 disks [ 6 ]; part 4 disks [ 6 ]; part 5 disks [ 6 ]; } CHECK part 2 disks [ 6 ] -> OK { part 1 disks [ 6 ]; part 3 disks [ 6 ]; part 4 disks [ 6 ]; part 5 disks [ 6 ]; } CHECK part 6 disks [ 6 ] -> OK { part 1 disks [ 0 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; } CHECK part 2 disks [ 1 ] -> OK { part 1 disks [ 0 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; } CHECK part 6 disks [ 5 ] -> OK { part 1 disks [ 6 ]; part 2 disks [ 6 ]; part 5 disks [ 6 ]; part 6 disks [ 6 ]; } CHECK part 3 disks [ 6 ] -> OK { part 1 disks [ 6 ]; part 2 disks [ 6 ]; part 5 disks [ 6 ]; part 6 disks [ 6 ]; } CHECK part 4 disks [ 6 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 3 disks [ 2 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 4 disks [ 3 ] -> OK { part 1 disks [ 6 ]; part 2 disks [ 6 ]; part 4 disks [ 6 ]; part 6 disks [ 6 ]; } CHECK part 3 disks [ 6 ] -> OK { part 1 disks [ 6 ]; part 2 disks [ 6 ]; part 4 disks [ 6 ]; part 6 disks [ 6 ]; } CHECK part 5 disks [ 6 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 4 disks [ 3 ]; part 6 disks [ 5 ]; } CHECK part 3 disks [ 2 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 4 disks [ 3 ]; part 6 disks [ 5 ]; } CHECK part 5 disks [ 4 ] -> OK { part 1 disks [ 6 ]; part 2 disks [ 6 ]; part 4 disks [ 6 ]; part 5 disks [ 6 ]; } CHECK part 3 disks [ 6 ] -> OK { part 1 disks [ 6 ]; part 2 disks [ 6 ]; part 4 disks [ 6 ]; part 5 disks [ 6 ]; } CHECK part 6 disks [ 6 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; } CHECK part 3 disks [ 2 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; } CHECK part 6 disks [ 5 ] -> OK { part 1 disks [ 6 ]; part 2 disks [ 6 ]; part 3 disks [ 6 ]; part 6 disks [ 6 ]; } CHECK part 4 disks [ 6 ] -> OK { part 1 disks [ 6 ]; part 2 disks [ 6 ]; part 3 disks [ 6 ]; part 6 disks [ 6 ]; } CHECK part 5 disks [ 6 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 6 disks [ 5 ]; } CHECK part 4 disks [ 3 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 6 disks [ 5 ]; } CHECK part 5 disks [ 4 ] -> OK { part 1 disks [ 6 ]; part 2 disks [ 6 ]; part 3 disks [ 6 ]; part 5 disks [ 6 ]; } CHECK part 4 disks [ 6 ] -> OK { part 1 disks [ 6 ]; part 2 disks [ 6 ]; part 3 disks [ 6 ]; part 5 disks [ 6 ]; } CHECK part 6 disks [ 6 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 5 disks [ 4 ]; } CHECK part 4 disks [ 3 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 5 disks [ 4 ]; } CHECK part 6 disks [ 5 ] -> OK { part 1 disks [ 6 ]; part 2 disks [ 6 ]; part 3 disks [ 6 ]; part 4 disks [ 6 ]; } CHECK part 5 disks [ 6 ] -> OK { part 1 disks [ 6 ]; part 2 disks [ 6 ]; part 3 disks [ 6 ]; part 4 disks [ 6 ]; } CHECK part 6 disks [ 6 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; } CHECK part 5 disks [ 4 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; } CHECK part 6 disks [ 5 ] -> OK ERROR: There are erasure restore fails |71.6%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/providers/s3/actors/ut/unittest >> TArrowPushDown::FilterEverything [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut_selfheal/unittest >> BsControllerTest::SelfHealBlock4Plus2 [GOOD] Test command err: 2025-06-25T14:24:16.973403Z 1 00h00m00.000000s :BS_NODE DEBUG: [1] Bootstrap 2025-06-25T14:24:16.973523Z 1 00h00m00.000000s :BS_NODE DEBUG: [1] Connect 2025-06-25T14:24:16.973694Z 2 00h00m00.000000s :BS_NODE DEBUG: [2] Bootstrap 2025-06-25T14:24:16.973712Z 2 00h00m00.000000s :BS_NODE DEBUG: [2] Connect 2025-06-25T14:24:16.973815Z 3 00h00m00.000000s :BS_NODE DEBUG: [3] Bootstrap 2025-06-25T14:24:16.973832Z 3 00h00m00.000000s :BS_NODE DEBUG: [3] Connect 2025-06-25T14:24:16.973890Z 4 00h00m00.000000s :BS_NODE DEBUG: [4] Bootstrap 2025-06-25T14:24:16.973907Z 4 00h00m00.000000s :BS_NODE DEBUG: [4] Connect 2025-06-25T14:24:16.974003Z 5 00h00m00.000000s :BS_NODE DEBUG: [5] Bootstrap 2025-06-25T14:24:16.974020Z 5 00h00m00.000000s :BS_NODE DEBUG: [5] Connect 2025-06-25T14:24:16.974085Z 6 00h00m00.000000s :BS_NODE DEBUG: [6] Bootstrap 2025-06-25T14:24:16.974100Z 6 00h00m00.000000s :BS_NODE DEBUG: [6] Connect 2025-06-25T14:24:16.974124Z 7 00h00m00.000000s :BS_NODE DEBUG: [7] Bootstrap 2025-06-25T14:24:16.974139Z 7 00h00m00.000000s :BS_NODE DEBUG: [7] Connect 2025-06-25T14:24:16.974164Z 8 00h00m00.000000s :BS_NODE DEBUG: [8] Bootstrap 2025-06-25T14:24:16.974180Z 8 00h00m00.000000s :BS_NODE DEBUG: [8] Connect 2025-06-25T14:24:16.974232Z 9 00h00m00.000000s :BS_NODE DEBUG: [9] Bootstrap 2025-06-25T14:24:16.974247Z 9 00h00m00.000000s :BS_NODE DEBUG: [9] Connect 2025-06-25T14:24:16.974317Z 10 00h00m00.000000s :BS_NODE DEBUG: [10] Bootstrap 2025-06-25T14:24:16.974413Z 10 00h00m00.000000s :BS_NODE DEBUG: [10] Connect 2025-06-25T14:24:16.974758Z 11 00h00m00.000000s :BS_NODE DEBUG: [11] Bootstrap 2025-06-25T14:24:16.974778Z 11 00h00m00.000000s :BS_NODE DEBUG: [11] Connect 2025-06-25T14:24:16.974835Z 12 00h00m00.000000s :BS_NODE DEBUG: [12] Bootstrap 2025-06-25T14:24:16.974851Z 12 00h00m00.000000s :BS_NODE DEBUG: [12] Connect 2025-06-25T14:24:16.974910Z 13 00h00m00.000000s :BS_NODE DEBUG: [13] Bootstrap 2025-06-25T14:24:16.974927Z 13 00h00m00.000000s :BS_NODE DEBUG: [13] Connect 2025-06-25T14:24:16.974990Z 14 00h00m00.000000s :BS_NODE DEBUG: [14] Bootstrap 2025-06-25T14:24:16.975006Z 14 00h00m00.000000s :BS_NODE DEBUG: [14] Connect 2025-06-25T14:24:16.975032Z 15 00h00m00.000000s :BS_NODE DEBUG: [15] Bootstrap 2025-06-25T14:24:16.975047Z 15 00h00m00.000000s :BS_NODE DEBUG: [15] Connect 2025-06-25T14:24:16.975131Z 16 00h00m00.000000s :BS_NODE DEBUG: [16] Bootstrap 2025-06-25T14:24:16.975146Z 16 00h00m00.000000s :BS_NODE DEBUG: [16] Connect 2025-06-25T14:24:16.975171Z 17 00h00m00.000000s :BS_NODE DEBUG: [17] Bootstrap 2025-06-25T14:24:16.975187Z 17 00h00m00.000000s :BS_NODE DEBUG: [17] Connect 2025-06-25T14:24:16.975295Z 18 00h00m00.000000s :BS_NODE DEBUG: [18] Bootstrap 2025-06-25T14:24:16.975343Z 18 00h00m00.000000s :BS_NODE DEBUG: [18] Connect 2025-06-25T14:24:16.975423Z 19 00h00m00.000000s :BS_NODE DEBUG: [19] Bootstrap 2025-06-25T14:24:16.975479Z 19 00h00m00.000000s :BS_NODE DEBUG: [19] Connect 2025-06-25T14:24:16.975507Z 20 00h00m00.000000s :BS_NODE DEBUG: [20] Bootstrap 2025-06-25T14:24:16.975525Z 20 00h00m00.000000s :BS_NODE DEBUG: [20] Connect 2025-06-25T14:24:16.975582Z 21 00h00m00.000000s :BS_NODE DEBUG: [21] Bootstrap 2025-06-25T14:24:16.975600Z 21 00h00m00.000000s :BS_NODE DEBUG: [21] Connect 2025-06-25T14:24:16.975628Z 22 00h00m00.000000s :BS_NODE DEBUG: [22] Bootstrap 2025-06-25T14:24:16.975643Z 22 00h00m00.000000s :BS_NODE DEBUG: [22] Connect 2025-06-25T14:24:16.975666Z 23 00h00m00.000000s :BS_NODE DEBUG: [23] Bootstrap 2025-06-25T14:24:16.975681Z 23 00h00m00.000000s :BS_NODE DEBUG: [23] Connect 2025-06-25T14:24:16.975712Z 24 00h00m00.000000s :BS_NODE DEBUG: [24] Bootstrap 2025-06-25T14:24:16.975730Z 24 00h00m00.000000s :BS_NODE DEBUG: [24] Connect 2025-06-25T14:24:16.975761Z 25 00h00m00.000000s :BS_NODE DEBUG: [25] Bootstrap 2025-06-25T14:24:16.975778Z 25 00h00m00.000000s :BS_NODE DEBUG: [25] Connect 2025-06-25T14:24:16.975830Z 26 00h00m00.000000s :BS_NODE DEBUG: [26] Bootstrap 2025-06-25T14:24:16.975857Z 26 00h00m00.000000s :BS_NODE DEBUG: [26] Connect 2025-06-25T14:24:16.976016Z 27 00h00m00.000000s :BS_NODE DEBUG: [27] Bootstrap 2025-06-25T14:24:16.976078Z 27 00h00m00.000000s :BS_NODE DEBUG: [27] Connect 2025-06-25T14:24:16.976112Z 28 00h00m00.000000s :BS_NODE DEBUG: [28] Bootstrap 2025-06-25T14:24:16.976130Z 28 00h00m00.000000s :BS_NODE DEBUG: [28] Connect 2025-06-25T14:24:16.976186Z 29 00h00m00.000000s :BS_NODE DEBUG: [29] Bootstrap 2025-06-25T14:24:16.976202Z 29 00h00m00.000000s :BS_NODE DEBUG: [29] Connect 2025-06-25T14:24:16.976263Z 30 00h00m00.000000s :BS_NODE DEBUG: [30] Bootstrap 2025-06-25T14:24:16.976278Z 30 00h00m00.000000s :BS_NODE DEBUG: [30] Connect 2025-06-25T14:24:16.976302Z 31 00h00m00.000000s :BS_NODE DEBUG: [31] Bootstrap 2025-06-25T14:24:16.976331Z 31 00h00m00.000000s :BS_NODE DEBUG: [31] Connect 2025-06-25T14:24:16.976359Z 32 00h00m00.000000s :BS_NODE DEBUG: [32] Bootstrap 2025-06-25T14:24:16.976374Z 32 00h00m00.000000s :BS_NODE DEBUG: [32] Connect 2025-06-25T14:24:17.026331Z 1 00h00m00.000000s :BS_NODE DEBUG: [1] ClientConnected Sender# [1:2157:49] Status# ERROR ClientId# [1:2157:49] ServerId# [0:0:0] PipeClient# [1:2157:49] 2025-06-25T14:24:17.027862Z 2 00h00m00.000000s :BS_NODE DEBUG: [2] ClientConnected Sender# [2:2158:37] Status# ERROR ClientId# [2:2158:37] ServerId# [0:0:0] PipeClient# [2:2158:37] 2025-06-25T14:24:17.027910Z 3 00h00m00.000000s :BS_NODE DEBUG: [3] ClientConnected Sender# [3:2159:37] Status# ERROR ClientId# [3:2159:37] ServerId# [0:0:0] PipeClient# [3:2159:37] 2025-06-25T14:24:17.027986Z 4 00h00m00.000000s :BS_NODE DEBUG: [4] ClientConnected Sender# [4:2160:37] Status# ERROR ClientId# [4:2160:37] ServerId# [0:0:0] PipeClient# [4:2160:37] 2025-06-25T14:24:17.028069Z 5 00h00m00.000000s :BS_NODE DEBUG: [5] ClientConnected Sender# [5:2161:37] Status# ERROR ClientId# [5:2161:37] ServerId# [0:0:0] PipeClient# [5:2161:37] 2025-06-25T14:24:17.028104Z 6 00h00m00.000000s :BS_NODE DEBUG: [6] ClientConnected Sender# [6:2162:37] Status# ERROR ClientId# [6:2162:37] ServerId# [0:0:0] PipeClient# [6:2162:37] 2025-06-25T14:24:17.028178Z 7 00h00m00.000000s :BS_NODE DEBUG: [7] ClientConnected Sender# [7:2163:37] Status# ERROR ClientId# [7:2163:37] ServerId# [0:0:0] PipeClient# [7:2163:37] 2025-06-25T14:24:17.028260Z 8 00h00m00.000000s :BS_NODE DEBUG: [8] ClientConnected Sender# [8:2164:37] Status# ERROR ClientId# [8:2164:37] ServerId# [0:0:0] PipeClient# [8:2164:37] 2025-06-25T14:24:17.032384Z 9 00h00m00.000000s :BS_NODE DEBUG: [9] ClientConnected Sender# [9:2165:37] Status# ERROR ClientId# [9:2165:37] ServerId# [0:0:0] PipeClient# [9:2165:37] 2025-06-25T14:24:17.032453Z 10 00h00m00.000000s :BS_NODE DEBUG: [10] ClientConnected Sender# [10:2166:37] Status# ERROR ClientId# [10:2166:37] ServerId# [0:0:0] PipeClient# [10:2166:37] 2025-06-25T14:24:17.032490Z 11 00h00m00.000000s :BS_NODE DEBUG: [11] ClientConnected Sender# [11:2167:37] Status# ERROR ClientId# [11:2167:37] ServerId# [0:0:0] PipeClient# [11:2167:37] 2025-06-25T14:24:17.032526Z 12 00h00m00.000000s :BS_NODE DEBUG: [12] ClientConnected Sender# [12:2168:37] Status# ERROR ClientId# [12:2168:37] ServerId# [0:0:0] PipeClient# [12:2168:37] 2025-06-25T14:24:17.032562Z 13 00h00m00.000000s :BS_NODE DEBUG: [13] ClientConnected Sender# [13:2169:37] Status# ERROR ClientId# [13:2169:37] ServerId# [0:0:0] PipeClient# [13:2169:37] 2025-06-25T14:24:17.032594Z 14 00h00m00.000000s :BS_NODE DEBUG: [14] ClientConnected Sender# [14:2170:37] Status# ERROR ClientId# [14:2170:37] ServerId# [0:0:0] PipeClient# [14:2170:37] 2025-06-25T14:24:17.032626Z 15 00h00m00.000000s :BS_NODE DEBUG: [15] ClientConnected Sender# [15:2171:37] Status# ERROR ClientId# [15:2171:37] ServerId# [0:0:0] PipeClient# [15:2171:37] 2025-06-25T14:24:17.032659Z 16 00h00m00.000000s :BS_NODE DEBUG: [16] ClientConnected Sender# [16:2172:37] Status# ERROR ClientId# [16:2172:37] ServerId# [0:0:0] PipeClient# [16:2172:37] 2025-06-25T14:24:17.032786Z 17 00h00m00.000000s :BS_NODE DEBUG: [17] ClientConnected Sender# [17:2173:37] Status# ERROR ClientId# [17:2173:37] ServerId# [0:0:0] PipeClient# [17:2173:37] 2025-06-25T14:24:17.032824Z 18 00h00m00.000000s :BS_NODE DEBUG: [18] ClientConnected Sender# [18:2174:37] Status# ERROR ClientId# [18:2174:37] ServerId# [0:0:0] PipeClient# [18:2174:37] 2025-06-25T14:24:17.032858Z 19 00h00m00.000000s :BS_NODE DEBUG: [19] ClientConnected Sender# [19:2175:37] Status# ERROR ClientId# [19:2175:37] ServerId# [0:0:0] PipeClient# [19:2175:37] 2025-06-25T14:24:17.032891Z 20 00h00m00.000000s :BS_NODE DEBUG: [20] ClientConnected Sender# [20:2176:37] Status# ERROR ClientId# [20:2176:37] ServerId# [0:0:0] PipeClient# [20:2176:37] 2025-06-25T14:24:17.032940Z 21 00h00m00.000000s :BS_NODE DEBUG: [21] ClientConnected Sender# [21:2177:37] Status# ERROR ClientId# [21:2177:37] ServerId# [0:0:0] PipeClient# [21:2177:37] 2025-06-25T14:24:17.032990Z 22 00h00m00.000000s :BS_NODE DEBUG: [22] ClientConnected Sender# [22:2178:37] Status# ERROR ClientId# [22:2178:37] ServerId# [0:0:0] PipeClient# [22:2178:37] 2025-06-25T14:24:17.033030Z 23 00h00m00.000000s :BS_NODE DEBUG: [23] ClientConnected Sender# [23:2179:37] Status# ERROR ClientId# [23:2179:37] ServerId# [0:0:0] PipeClient# [23:2179:37] 2025-06-25T14:24:17.033068Z 24 00h00m00.000000s :BS_NODE DEBUG: [24] ClientConnected Sender# [24:2180:37] Status# ERROR ClientId# [24:2180:37] ServerId# [0:0:0] PipeClient# [24:2180:37] 2025-06-25T14:24:17.033104Z 25 00h00m00.000000s :BS_NODE DEBUG: [25] ClientConnected Sender# [25:2181:37] Status# ERROR ClientId# [25:2181:37] ServerId# [0:0:0] PipeClient# [25:2181:37] 2025-06-25T14:24:17.033156Z 26 00h00m00.000000s :BS_NODE DEBUG: [26] ClientConnected Sender# [26:2182:37] Status# ERROR ClientId# [26:2182:37] ServerId# [0:0:0] PipeClient# [26:2182:37] 2025-06-25T14:24:17.033191Z 27 00h00m00.000000s :BS_NODE DEBUG: [27] ClientConnected Sender# [27:2183:37] Status# ERROR ClientId# [27:2183:37] ServerId# [0:0:0] PipeClient# [27:2183:37] 2025-06-25T14:24:17.033233Z 28 00h00m00.000000s :BS_NODE DEBUG: [28] ClientConnected Sender# [28:2184:37] Status# ERROR ClientId# [28:2184:37] ServerId# [0:0:0] PipeClient# [28:2184:37] 2025-06-25T14:24:17.033276Z 29 00h00m00.000000s :BS_NODE DEBUG: [29] ClientConnected Sender# [29:2185:37] Status# ERROR ClientId# [29:2185:37] ServerId# [0:0:0] PipeClient# [29:2185:37] 2025-06-25T14:24:17.033323Z 30 00h00m00.000000s :BS_NODE DEBUG: [30] ClientConnected Sender# [30:2186:37] Status# ERROR ClientId# [30:2186:37] ServerId# [0:0:0] PipeClient# [30:2186:37] 2025-06-25T14:24:17.033363Z 31 00h00m00.000000s :BS_NODE DEBUG: [31] ClientConnected Sender# [31:2187:37] Status# ERROR ClientId# [31:2187:37] ServerId# [0:0:0] PipeClient# [31:2187:37] 2025-06-25T14:24:17.033398Z 32 00h00m00.000000s :BS_NODE DEBUG: [32] ClientConnected Sender# [32:2188:37] Status# ERROR ClientId# [32:2188:37] ServerId# [0:0:0] PipeClient# [32:2188:37] 2025-06-25T14:24:17.279469Z 1 00h00m00.002048s :BS_CONTROLLER ERROR: {BSC07@impl.h:2206} ProcessControllerEvent event processing took too much time Type# 268637706 Duration# 0.174691s 2025-06-25T14:24:17.279739Z 1 00h00m00.002048s :BS_CONTROLLER ERROR: {BSC00@bsc.cpp:705} StateWork event processing took too much time Type# 2146435078 Duration# 0.174975s 2025-06-25T14:24:17.308603Z 1 00h00m00.002560s :BS_NODE DEBUG: [1] CheckState from [1:2257:73] expected 1 current 0 2025-06-25T14:24:17.308756Z 2 00h00m00.002560s :BS_NODE DEBUG: [2] CheckState from [2:2258:38] expected 1 current 0 2025-06-25T14:24:17.308783Z 3 00h00m00.002560s :BS_NODE DEBUG: [3] CheckState from [3:2259:38] expected 1 current 0 2025-06-25T14:24:17.308829Z 4 00h00m00.002560s :BS_NODE DEBUG: [4] CheckState from [4:2260:38] expected 1 current 0 2025-06-25T14:24:17.308857Z 5 00h00m00.002560s :BS_NODE DEBUG: [5] CheckState from [5:2261:38] expected 1 current 0 2025-06-25T14:24:17.308880Z 6 00h00m00.002560s :BS_NODE DEBUG: [6] CheckState from [6:2262:38] expected 1 current 0 2025-06-25T14:24:17.308912Z 7 00h00m00.002560s :BS_NODE DEBUG: [7] CheckState from [7 ... 1 05h15m00.121504s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483653 Status# OK JoinedGroup# true Replicated# true 2025-06-25T14:24:30.170501Z 1 05h15m00.121504s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483653 VDiskId# [80000005:2:0:7:0] DiskIsOk# true 2025-06-25T14:24:30.173940Z 1 05h15m00.122016s :BS_SELFHEAL INFO: {BSSH09@self_heal.cpp:207} Reassigner succeeded GroupId# 2147483653 Items# [80000005:2:0:5:0]: 14:1001:1000 -> 6:1001:1015 ConfigTxSeqNo# 488 2025-06-25T14:24:30.173980Z 1 05h15m00.122016s :BS_SELFHEAL DEBUG: {BSSH08@self_heal.cpp:218} Reassigner finished GroupId# 2147483653 Success# true 2025-06-25T14:24:30.174098Z 3 05h15m00.122016s :BS_NODE DEBUG: [3] NodeServiceSetUpdate 2025-06-25T14:24:30.174156Z 3 05h15m00.122016s :BS_NODE DEBUG: [3] VDiskId# [80000005:2:0:6:0] -> [80000005:3:0:6:0] 2025-06-25T14:24:30.174241Z 6 05h15m00.122016s :BS_NODE DEBUG: [6] NodeServiceSetUpdate 2025-06-25T14:24:30.174278Z 6 05h15m00.122016s :BS_NODE DEBUG: [6] VDiskId# [80000005:3:0:5:0] PDiskId# 1001 VSlotId# 1015 created 2025-06-25T14:24:30.174339Z 6 05h15m00.122016s :BS_NODE DEBUG: [6] VDiskId# [80000005:3:0:5:0] status changed to INIT_PENDING 2025-06-25T14:24:30.174417Z 9 05h15m00.122016s :BS_NODE DEBUG: [9] NodeServiceSetUpdate 2025-06-25T14:24:30.174462Z 9 05h15m00.122016s :BS_NODE DEBUG: [9] VDiskId# [80000005:2:0:0:0] -> [80000005:3:0:0:0] 2025-06-25T14:24:30.174532Z 10 05h15m00.122016s :BS_NODE DEBUG: [10] NodeServiceSetUpdate 2025-06-25T14:24:30.174572Z 10 05h15m00.122016s :BS_NODE DEBUG: [10] VDiskId# [80000005:2:0:1:0] -> [80000005:3:0:1:0] 2025-06-25T14:24:30.174634Z 11 05h15m00.122016s :BS_NODE DEBUG: [11] NodeServiceSetUpdate 2025-06-25T14:24:30.174674Z 11 05h15m00.122016s :BS_NODE DEBUG: [11] VDiskId# [80000005:2:0:2:0] -> [80000005:3:0:2:0] 2025-06-25T14:24:30.174740Z 12 05h15m00.122016s :BS_NODE DEBUG: [12] NodeServiceSetUpdate 2025-06-25T14:24:30.174783Z 12 05h15m00.122016s :BS_NODE DEBUG: [12] VDiskId# [80000005:2:0:3:0] -> [80000005:3:0:3:0] 2025-06-25T14:24:30.174853Z 13 05h15m00.122016s :BS_NODE DEBUG: [13] NodeServiceSetUpdate 2025-06-25T14:24:30.174896Z 13 05h15m00.122016s :BS_NODE DEBUG: [13] VDiskId# [80000005:2:0:4:0] -> [80000005:3:0:4:0] 2025-06-25T14:24:30.174955Z 14 05h15m00.122016s :BS_NODE DEBUG: [14] NodeServiceSetUpdate 2025-06-25T14:24:30.175024Z 16 05h15m00.122016s :BS_NODE DEBUG: [16] NodeServiceSetUpdate 2025-06-25T14:24:30.175065Z 16 05h15m00.122016s :BS_NODE DEBUG: [16] VDiskId# [80000005:2:0:7:0] -> [80000005:3:0:7:0] 2025-06-25T14:24:30.175337Z 1 05h15m00.122016s :BS_SELFHEAL DEBUG: {BSSH01@self_heal.cpp:71} Reassigner starting GroupId# 2147483655 2025-06-25T14:24:30.176023Z 1 05h15m00.122016s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483655 Status# OK JoinedGroup# true Replicated# true 2025-06-25T14:24:30.176061Z 1 05h15m00.122016s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483655 VDiskId# [80000007:4:0:0:0] DiskIsOk# true 2025-06-25T14:24:30.176096Z 1 05h15m00.122016s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483655 Status# OK JoinedGroup# true Replicated# true 2025-06-25T14:24:30.176123Z 1 05h15m00.122016s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483655 VDiskId# [80000007:4:0:1:0] DiskIsOk# true 2025-06-25T14:24:30.176152Z 1 05h15m00.122016s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483655 Status# OK JoinedGroup# true Replicated# true 2025-06-25T14:24:30.176180Z 1 05h15m00.122016s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483655 VDiskId# [80000007:4:0:2:0] DiskIsOk# true 2025-06-25T14:24:30.176208Z 1 05h15m00.122016s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483655 Status# OK JoinedGroup# true Replicated# true 2025-06-25T14:24:30.176236Z 1 05h15m00.122016s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483655 VDiskId# [80000007:4:0:3:0] DiskIsOk# true 2025-06-25T14:24:30.176268Z 1 05h15m00.122016s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483655 Status# OK JoinedGroup# true Replicated# true 2025-06-25T14:24:30.176296Z 1 05h15m00.122016s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483655 VDiskId# [80000007:4:0:5:0] DiskIsOk# true 2025-06-25T14:24:30.176389Z 1 05h15m00.122016s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483655 Status# OK JoinedGroup# true Replicated# true 2025-06-25T14:24:30.176419Z 1 05h15m00.122016s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483655 VDiskId# [80000007:4:0:6:0] DiskIsOk# true 2025-06-25T14:24:30.176453Z 1 05h15m00.122016s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483655 Status# OK JoinedGroup# true Replicated# true 2025-06-25T14:24:30.176483Z 1 05h15m00.122016s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483655 VDiskId# [80000007:4:0:7:0] DiskIsOk# true 2025-06-25T14:24:30.180040Z 1 05h15m00.122528s :BS_SELFHEAL INFO: {BSSH09@self_heal.cpp:207} Reassigner succeeded GroupId# 2147483655 Items# [80000007:4:0:4:0]: 14:1001:1009 -> 7:1001:1015 ConfigTxSeqNo# 489 2025-06-25T14:24:30.180077Z 1 05h15m00.122528s :BS_SELFHEAL DEBUG: {BSSH08@self_heal.cpp:218} Reassigner finished GroupId# 2147483655 Success# true 2025-06-25T14:24:30.180198Z 24 05h15m00.122528s :BS_NODE DEBUG: [24] NodeServiceSetUpdate 2025-06-25T14:24:30.180256Z 24 05h15m00.122528s :BS_NODE DEBUG: [24] VDiskId# [80000007:4:0:0:0] -> [80000007:5:0:0:0] 2025-06-25T14:24:30.180369Z 7 05h15m00.122528s :BS_NODE DEBUG: [7] NodeServiceSetUpdate 2025-06-25T14:24:30.180407Z 7 05h15m00.122528s :BS_NODE DEBUG: [7] VDiskId# [80000007:5:0:4:0] PDiskId# 1001 VSlotId# 1015 created 2025-06-25T14:24:30.180471Z 7 05h15m00.122528s :BS_NODE DEBUG: [7] VDiskId# [80000007:5:0:4:0] status changed to INIT_PENDING 2025-06-25T14:24:30.180547Z 26 05h15m00.122528s :BS_NODE DEBUG: [26] NodeServiceSetUpdate 2025-06-25T14:24:30.180587Z 26 05h15m00.122528s :BS_NODE DEBUG: [26] VDiskId# [80000007:4:0:1:0] -> [80000007:5:0:1:0] 2025-06-25T14:24:30.180658Z 11 05h15m00.122528s :BS_NODE DEBUG: [11] NodeServiceSetUpdate 2025-06-25T14:24:30.180698Z 11 05h15m00.122528s :BS_NODE DEBUG: [11] VDiskId# [80000007:4:0:2:0] -> [80000007:5:0:2:0] 2025-06-25T14:24:30.180767Z 28 05h15m00.122528s :BS_NODE DEBUG: [28] NodeServiceSetUpdate 2025-06-25T14:24:30.180810Z 28 05h15m00.122528s :BS_NODE DEBUG: [28] VDiskId# [80000007:4:0:3:0] -> [80000007:5:0:3:0] 2025-06-25T14:24:30.180884Z 30 05h15m00.122528s :BS_NODE DEBUG: [30] NodeServiceSetUpdate 2025-06-25T14:24:30.180923Z 30 05h15m00.122528s :BS_NODE DEBUG: [30] VDiskId# [80000007:4:0:5:0] -> [80000007:5:0:5:0] 2025-06-25T14:24:30.180991Z 31 05h15m00.122528s :BS_NODE DEBUG: [31] NodeServiceSetUpdate 2025-06-25T14:24:30.181036Z 31 05h15m00.122528s :BS_NODE DEBUG: [31] VDiskId# [80000007:4:0:6:0] -> [80000007:5:0:6:0] 2025-06-25T14:24:30.181085Z 14 05h15m00.122528s :BS_NODE DEBUG: [14] NodeServiceSetUpdate 2025-06-25T14:24:30.181149Z 32 05h15m00.122528s :BS_NODE DEBUG: [32] NodeServiceSetUpdate 2025-06-25T14:24:30.181187Z 32 05h15m00.122528s :BS_NODE DEBUG: [32] VDiskId# [80000007:4:0:7:0] -> [80000007:5:0:7:0] 2025-06-25T14:24:30.182046Z 6 05h15m02.008944s :BS_NODE DEBUG: [6] VDiskId# [8000001d:3:0:5:0] status changed to REPLICATING 2025-06-25T14:24:30.182576Z 6 05h15m02.424920s :BS_NODE DEBUG: [6] VDiskId# [8000003d:3:0:5:0] status changed to REPLICATING 2025-06-25T14:24:30.183182Z 6 05h15m02.557432s :BS_NODE DEBUG: [6] VDiskId# [8000002d:3:0:5:0] status changed to REPLICATING 2025-06-25T14:24:30.183899Z 7 05h15m03.052504s :BS_NODE DEBUG: [7] VDiskId# [80000015:3:0:5:0] status changed to REPLICATING 2025-06-25T14:24:30.184256Z 7 05h15m03.186528s :BS_NODE DEBUG: [7] VDiskId# [80000007:5:0:4:0] status changed to REPLICATING 2025-06-25T14:24:30.184614Z 6 05h15m03.627992s :BS_NODE DEBUG: [6] VDiskId# [80000025:4:0:5:0] status changed to REPLICATING 2025-06-25T14:24:30.185240Z 6 05h15m03.849016s :BS_NODE DEBUG: [6] VDiskId# [80000005:3:0:5:0] status changed to REPLICATING 2025-06-25T14:24:30.185879Z 6 05h15m04.384456s :BS_NODE DEBUG: [6] VDiskId# [8000000d:3:0:5:0] status changed to REPLICATING 2025-06-25T14:24:30.186526Z 9 05h15m04.431968s :BS_NODE DEBUG: [9] VDiskId# [80000020:5:0:1:0] status changed to REPLICATING 2025-06-25T14:24:30.187891Z 6 05h15m05.522480s :BS_NODE DEBUG: [6] VDiskId# [80000035:3:0:5:0] status changed to REPLICATING 2025-06-25T14:24:30.188583Z 6 05h15m08.084944s :BS_NODE DEBUG: [6] VDiskId# [8000001d:3:0:5:0] status changed to READY 2025-06-25T14:24:30.189624Z 14 05h15m08.085456s :BS_NODE DEBUG: [14] NodeServiceSetUpdate 2025-06-25T14:24:30.189674Z 14 05h15m08.085456s :BS_NODE DEBUG: [14] VDiskId# [8000001d:2:0:5:0] destroyed 2025-06-25T14:24:30.189997Z 6 05h15m10.262432s :BS_NODE DEBUG: [6] VDiskId# [8000002d:3:0:5:0] status changed to READY 2025-06-25T14:24:30.190947Z 14 05h15m10.262944s :BS_NODE DEBUG: [14] NodeServiceSetUpdate 2025-06-25T14:24:30.190989Z 14 05h15m10.262944s :BS_NODE DEBUG: [14] VDiskId# [8000002d:2:0:5:0] destroyed 2025-06-25T14:24:30.191117Z 7 05h15m10.386504s :BS_NODE DEBUG: [7] VDiskId# [80000015:3:0:5:0] status changed to READY 2025-06-25T14:24:30.191754Z 14 05h15m10.387016s :BS_NODE DEBUG: [14] NodeServiceSetUpdate 2025-06-25T14:24:30.191794Z 14 05h15m10.387016s :BS_NODE DEBUG: [14] VDiskId# [80000015:2:0:5:0] destroyed 2025-06-25T14:24:30.192467Z 7 05h15m19.067528s :BS_NODE DEBUG: [7] VDiskId# [80000007:5:0:4:0] status changed to READY 2025-06-25T14:24:30.193103Z 14 05h15m19.068040s :BS_NODE DEBUG: [14] NodeServiceSetUpdate 2025-06-25T14:24:30.193144Z 14 05h15m19.068040s :BS_NODE DEBUG: [14] VDiskId# [80000007:4:0:4:0] destroyed 2025-06-25T14:24:30.193243Z 6 05h15m19.171920s :BS_NODE DEBUG: [6] VDiskId# [8000003d:3:0:5:0] status changed to READY 2025-06-25T14:24:30.194122Z 14 05h15m19.172432s :BS_NODE DEBUG: [14] NodeServiceSetUpdate 2025-06-25T14:24:30.194162Z 14 05h15m19.172432s :BS_NODE DEBUG: [14] VDiskId# [8000003d:2:0:5:0] destroyed 2025-06-25T14:24:30.194451Z 6 05h15m20.463992s :BS_NODE DEBUG: [6] VDiskId# [80000025:4:0:5:0] status changed to READY 2025-06-25T14:24:30.195303Z 14 05h15m20.464504s :BS_NODE DEBUG: [14] NodeServiceSetUpdate 2025-06-25T14:24:30.195342Z 14 05h15m20.464504s :BS_NODE DEBUG: [14] VDiskId# [80000025:3:0:5:0] destroyed 2025-06-25T14:24:30.196003Z 6 05h15m25.296480s :BS_NODE DEBUG: [6] VDiskId# [80000035:3:0:5:0] status changed to READY 2025-06-25T14:24:30.196905Z 14 05h15m25.296992s :BS_NODE DEBUG: [14] NodeServiceSetUpdate 2025-06-25T14:24:30.196945Z 14 05h15m25.296992s :BS_NODE DEBUG: [14] VDiskId# [80000035:2:0:5:0] destroyed 2025-06-25T14:24:30.197725Z 9 05h15m32.975968s :BS_NODE DEBUG: [9] VDiskId# [80000020:5:0:1:0] status changed to READY 2025-06-25T14:24:30.198516Z 14 05h15m32.976480s :BS_NODE DEBUG: [14] NodeServiceSetUpdate 2025-06-25T14:24:30.198556Z 14 05h15m32.976480s :BS_NODE DEBUG: [14] VDiskId# [80000020:4:0:1:0] destroyed 2025-06-25T14:24:30.199041Z 6 05h15m34.488016s :BS_NODE DEBUG: [6] VDiskId# [80000005:3:0:5:0] status changed to READY 2025-06-25T14:24:30.199954Z 14 05h15m34.488528s :BS_NODE DEBUG: [14] NodeServiceSetUpdate 2025-06-25T14:24:30.199992Z 14 05h15m34.488528s :BS_NODE DEBUG: [14] VDiskId# [80000005:2:0:5:0] destroyed 2025-06-25T14:24:30.200537Z 6 05h15m37.186456s :BS_NODE DEBUG: [6] VDiskId# [8000000d:3:0:5:0] status changed to READY 2025-06-25T14:24:30.201410Z 14 05h15m37.186968s :BS_NODE DEBUG: [14] NodeServiceSetUpdate 2025-06-25T14:24:30.201454Z 14 05h15m37.186968s :BS_NODE DEBUG: [14] VDiskId# [8000000d:2:0:5:0] destroyed |71.6%| [TA] $(B)/ydb/core/blobstorage/ut_blobstorage/ut_check_integrity/test-results/unittest/{meta.json ... results_accumulator.log} >> TDqBlockHashJoinBasicTest::TestBasicPassthrough [GOOD] >> TDqBlockHashJoinBasicTest::TestEmptyStreams >> TDqBlockHashJoinBasicTest::TestEmptyStreams [GOOD] |71.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/show_create/view/tests/ydb-tests-stress-show_create-view-tests |71.6%| [TS] {RESULT} ydb/library/yql/providers/s3/actors/ut/unittest |71.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stress/show_create/view/tests/ydb-tests-stress-show_create-view-tests |71.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/viewer/tests/ydb-core-viewer-tests ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_read_only_pdisk/unittest >> BSCReadOnlyPDisk::ReadOnlySlay [GOOD] Test command err: RandomSeed# 6871912810270190544 2025-06-25T14:24:21.828644Z 1 00h01m14.361536s :BS_SYNCER ERROR: PDiskId# 1001 VDISK[82000000:_:0:0:0]: (2181038080) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2025-06-25T14:24:21.830640Z 1 00h01m14.361536s :BS_SYNCER ERROR: PDiskId# 1001 VDISK[82000000:_:0:0:0]: (2181038080) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 2945600917073217672] 2025-06-25T14:24:21.875354Z 1 00h01m14.361536s :BS_SYNCER ERROR: PDiskId# 1001 VDISK[82000000:_:0:0:0]: (2181038080) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 |71.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/viewer/tests/ydb-core-viewer-tests |71.6%| [LD] {RESULT} $(B)/ydb/tests/stress/show_create/view/tests/ydb-tests-stress-show_create-view-tests |71.6%| [LD] {RESULT} $(B)/ydb/core/viewer/tests/ydb-core-viewer-tests |71.7%| [TA] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_check_integrity/test-results/unittest/{meta.json ... results_accumulator.log} |71.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/query_cache/ydb-tests-functional-query_cache |71.7%| [LD] {RESULT} $(B)/ydb/tests/functional/query_cache/ydb-tests-functional-query_cache |71.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/query_cache/ydb-tests-functional-query_cache |71.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/fq/libs/test_connection/ut/ydb-core-fq-libs-test_connection-ut |71.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/fq/libs/test_connection/ut/ydb-core-fq-libs-test_connection-ut |71.7%| [LD] {RESULT} $(B)/ydb/core/fq/libs/test_connection/ut/ydb-core-fq-libs-test_connection-ut |71.9%| [TM] {asan, default-linux-x86_64, release} ydb/library/yql/dq/comp_nodes/ut/unittest >> TDqBlockHashJoinBasicTest::TestEmptyStreams [GOOD] |71.9%| [TM] {RESULT} ydb/library/yql/dq/comp_nodes/ut/unittest >> BlobDepot::BasicRange [GOOD] >> BlobDepot::BasicDiscover |72.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/yql/providers/dq/scheduler/ut/ydb-library-yql-providers-dq-scheduler-ut |72.0%| [LD] {RESULT} $(B)/ydb/library/yql/providers/dq/scheduler/ut/ydb-library-yql-providers-dq-scheduler-ut |72.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/yql/providers/generic/provider/ut/pushdown/yql-providers-generic-provider-ut-pushdown |72.0%| [LD] {RESULT} $(B)/ydb/library/yql/providers/generic/provider/ut/pushdown/yql-providers-generic-provider-ut-pushdown |72.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/library/yql/providers/generic/provider/ut/pushdown/yql-providers-generic-provider-ut-pushdown |72.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/postgresql/ydb-tests-functional-postgresql |72.1%| [LD] {RESULT} $(B)/ydb/tests/functional/postgresql/ydb-tests-functional-postgresql |72.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/postgresql/ydb-tests-functional-postgresql >> BSCRestartPDisk::RestartNotAllowed [GOOD] >> BlobDepot::BasicDiscover [GOOD] >> BlobDepot::BasicBlock |72.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/yql/providers/yt/actors/ut/ydb-library-yql-providers-yt-actors-ut |72.1%| [LD] {RESULT} $(B)/ydb/library/yql/providers/yt/actors/ut/ydb-library-yql-providers-yt-actors-ut |72.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/library/yql/providers/yt/actors/ut/ydb-library-yql-providers-yt-actors-ut |72.1%| [TS] {asan, default-linux-x86_64, release} ydb/core/fq/libs/test_connection/ut/unittest |72.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/sql/ydb-tests-sql |72.1%| [TS] {RESULT} ydb/core/fq/libs/test_connection/ut/unittest |72.1%| [LD] {RESULT} $(B)/ydb/tests/sql/ydb-tests-sql |72.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/sql/ydb-tests-sql >> TSchedulerTest::NewbieFirst [GOOD] >> TSchedulerTest::DoNotReserveForSmall [GOOD] >> TSchedulerTest::FifoAfterOneHour [GOOD] >> TSchedulerTest::ReserveForSmall [GOOD] >> TSchedulerTest::OneUserForCluster [GOOD] >> TSchedulerTest::HalfWorkersForSmall >> TSchedulerTest::SimpleFifo [GOOD] >> TSchedulerTest::HalfWorkersForSmall [GOOD] >> TSchedulerTest::Use75PercentForLargeInNonOverload [GOOD] >> TSchedulerTest::UseOnlyHalfForLargeInOverload [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_restart_pdisk/unittest >> BSCRestartPDisk::RestartNotAllowed [GOOD] Test command err: RandomSeed# 10721962519207248880 |72.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/s3_backups/tests/ydb-tests-stress-s3_backups-tests |72.5%| [LD] {RESULT} $(B)/ydb/tests/stress/s3_backups/tests/ydb-tests-stress-s3_backups-tests |72.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stress/s3_backups/tests/ydb-tests-stress-s3_backups-tests >> BlobDepot::BasicBlock [GOOD] >> BlobDepot::BasicCollectGarbage |72.6%| [LD] {BAZEL_UPLOAD} $(B)/ydb/library/yql/providers/dq/scheduler/ut/ydb-library-yql-providers-dq-scheduler-ut |72.6%| [TS] {asan, default-linux-x86_64, release} ydb/library/yql/providers/dq/scheduler/ut/unittest >> TSchedulerTest::UseOnlyHalfForLargeInOverload [GOOD] >> YtLookupActor::Lookup |72.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/ymq/http/ut/ydb-core-ymq-http-ut |72.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/ymq/http/ut/ydb-core-ymq-http-ut |72.6%| [TS] {RESULT} ydb/library/yql/providers/dq/scheduler/ut/unittest |72.6%| [LD] {RESULT} $(B)/ydb/core/ymq/http/ut/ydb-core-ymq-http-ut >> YtLookupActor::Lookup [GOOD] ------- [TS] {asan, default-linux-x86_64, release} ydb/library/yql/providers/yt/actors/ut/unittest >> YtLookupActor::Lookup [GOOD] Test command err: 2025-06-25 14:24:33.677 INFO ydb-library-yql-providers-yt-actors-ut(pid=11914, tid=0x00007F9632387B40) [default] storage.cpp:178: FileStorage initialized in "/home/runner/.ya/build/build_root/yft8/0007fb/r3tmp/tmppYThb9/", temporary dir: "/home/runner/.ya/build/build_root/yft8/0007fb/r3tmp/tmppYThb9/11914", files: 0, total size: 0 2025-06-25 14:24:33.909 INFO ydb-library-yql-providers-yt-actors-ut(pid=11914, tid=0x00007F9632387B40) [YT] yql_yt_lookup_actor.cpp:103: New Yt proivider lookup source actor(ActorId=[1:4:2051]) for cluster=Plato, table=Lookup 2025-06-25 14:24:33.910 DEBUG ydb-library-yql-providers-yt-actors-ut(pid=11914, tid=0x00007F9632387B40) [YT] yql_yt_lookup_actor.cpp:172: ActorId=[1:4:2051] Got LookupRequest for 4 keys |72.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/olap_workload/tests/ydb-tests-stress-olap_workload-tests |72.6%| [TS] {RESULT} ydb/library/yql/providers/yt/actors/ut/unittest |72.6%| [LD] {RESULT} $(B)/ydb/tests/stress/olap_workload/tests/ydb-tests-stress-olap_workload-tests |72.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stress/olap_workload/tests/ydb-tests-stress-olap_workload-tests |72.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/tools/kqprun/recipe/kqprun_recipe |72.6%| [LD] {RESULT} $(B)/ydb/tests/tools/kqprun/recipe/kqprun_recipe |72.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/tools/kqprun/recipe/kqprun_recipe |72.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/cdc/tests/ydb-tests-stress-cdc-tests |72.7%| [LD] {RESULT} $(B)/ydb/tests/stress/cdc/tests/ydb-tests-stress-cdc-tests |72.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stress/cdc/tests/ydb-tests-stress-cdc-tests |72.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/simple_queue/tests/ydb-tests-stress-simple_queue-tests |72.7%| [LD] {RESULT} $(B)/ydb/tests/stress/simple_queue/tests/ydb-tests-stress-simple_queue-tests |72.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stress/simple_queue/tests/ydb-tests-stress-simple_queue-tests |72.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/fq/libs/db_id_async_resolver_impl/ut/ydb-core-fq-libs-db_id_async_resolver_impl-ut >> PushdownTest::NoFilter [GOOD] |72.7%| [LD] {RESULT} $(B)/ydb/core/fq/libs/db_id_async_resolver_impl/ut/ydb-core-fq-libs-db_id_async_resolver_impl-ut >> PushdownTest::Equal [GOOD] >> XmlBuilderTest::WritesProperly [GOOD] >> XmlBuilderTest::MacroBuilder [GOOD] >> PushdownTest::NotEqualInt32Int64 [GOOD] >> BlobDepot::BasicCollectGarbage [GOOD] >> BlobDepot::VerifiedRandom >> PushdownTest::TrueCoalesce [GOOD] |72.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/s3_backups/s3_backups |72.7%| [LD] {RESULT} $(B)/ydb/tests/stress/s3_backups/s3_backups >> PushdownTest::CmpInt16AndInt32 [GOOD] |72.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stress/s3_backups/s3_backups |72.7%| [TS] {asan, default-linux-x86_64, release} ydb/core/ymq/http/ut/unittest >> XmlBuilderTest::MacroBuilder [GOOD] |72.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/olap/s3_import/large/ydb-tests-olap-s3_import-large >> PushdownTest::PartialAnd [GOOD] |72.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/olap/s3_import/large/ydb-tests-olap-s3_import-large |72.8%| [TS] {RESULT} ydb/core/ymq/http/ut/unittest |72.8%| [LD] {RESULT} $(B)/ydb/tests/olap/s3_import/large/ydb-tests-olap-s3_import-large |72.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/node_broker/tests/ydb-tests-stress-node_broker-tests |72.8%| [LD] {RESULT} $(B)/ydb/tests/stress/node_broker/tests/ydb-tests-stress-node_broker-tests |72.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stress/node_broker/tests/ydb-tests-stress-node_broker-tests >> PushdownTest::PartialAndOneBranchPushdownable >> PushdownTest::PartialAndOneBranchPushdownable [GOOD] >> BsControllerTest::SelfHealMirror3dc [GOOD] >> PushdownTest::NotNull >> PushdownTest::NotNull [GOOD] |72.8%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/fq/libs/db_id_async_resolver_impl/ut/ydb-core-fq-libs-db_id_async_resolver_impl-ut >> MdbEndpoingGenerator::Legacy [GOOD] >> MdbEndpoingGenerator::Generic_NoTransformHost [GOOD] >> MdbEndpoingGenerator::Generic_WithTransformHost [GOOD] >> PushdownTest::NotNullForDatetime [GOOD] >> PushdownTest::IsNull [GOOD] >> PushdownTest::StringFieldsNotSupported >> PushdownTest::StringFieldsNotSupported [GOOD] >> PushdownTest::StringFieldsNotSupported2 [GOOD] |72.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/transfer/tests/ydb-tests-stress-transfer-tests |72.8%| [LD] {RESULT} $(B)/ydb/tests/stress/transfer/tests/ydb-tests-stress-transfer-tests |72.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stress/transfer/tests/ydb-tests-stress-transfer-tests |72.8%| [TS] {asan, default-linux-x86_64, release} ydb/core/fq/libs/db_id_async_resolver_impl/ut/unittest >> MdbEndpoingGenerator::Generic_WithTransformHost [GOOD] |72.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/datashard/vector_index/large/ydb-tests-datashard-vector_index-large >> PushdownTest::RegexpPushdown [GOOD] |72.8%| [TS] {RESULT} ydb/core/fq/libs/db_id_async_resolver_impl/ut/unittest |72.8%| [LD] {RESULT} $(B)/ydb/tests/datashard/vector_index/large/ydb-tests-datashard-vector_index-large |72.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/datashard/vector_index/large/ydb-tests-datashard-vector_index-large ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut_selfheal/unittest >> BsControllerTest::SelfHealMirror3dc [GOOD] Test command err: 2025-06-25T14:24:17.130892Z 1 00h00m00.000000s :BS_NODE DEBUG: [1] Bootstrap 2025-06-25T14:24:17.130943Z 1 00h00m00.000000s :BS_NODE DEBUG: [1] Connect 2025-06-25T14:24:17.131027Z 2 00h00m00.000000s :BS_NODE DEBUG: [2] Bootstrap 2025-06-25T14:24:17.131049Z 2 00h00m00.000000s :BS_NODE DEBUG: [2] Connect 2025-06-25T14:24:17.131149Z 3 00h00m00.000000s :BS_NODE DEBUG: [3] Bootstrap 2025-06-25T14:24:17.131172Z 3 00h00m00.000000s :BS_NODE DEBUG: [3] Connect 2025-06-25T14:24:17.131255Z 4 00h00m00.000000s :BS_NODE DEBUG: [4] Bootstrap 2025-06-25T14:24:17.131307Z 4 00h00m00.000000s :BS_NODE DEBUG: [4] Connect 2025-06-25T14:24:17.131347Z 5 00h00m00.000000s :BS_NODE DEBUG: [5] Bootstrap 2025-06-25T14:24:17.131368Z 5 00h00m00.000000s :BS_NODE DEBUG: [5] Connect 2025-06-25T14:24:17.131412Z 6 00h00m00.000000s :BS_NODE DEBUG: [6] Bootstrap 2025-06-25T14:24:17.131431Z 6 00h00m00.000000s :BS_NODE DEBUG: [6] Connect 2025-06-25T14:24:17.131523Z 7 00h00m00.000000s :BS_NODE DEBUG: [7] Bootstrap 2025-06-25T14:24:17.131546Z 7 00h00m00.000000s :BS_NODE DEBUG: [7] Connect 2025-06-25T14:24:17.131616Z 8 00h00m00.000000s :BS_NODE DEBUG: [8] Bootstrap 2025-06-25T14:24:17.131635Z 8 00h00m00.000000s :BS_NODE DEBUG: [8] Connect 2025-06-25T14:24:17.131666Z 9 00h00m00.000000s :BS_NODE DEBUG: [9] Bootstrap 2025-06-25T14:24:17.131716Z 9 00h00m00.000000s :BS_NODE DEBUG: [9] Connect 2025-06-25T14:24:17.131774Z 10 00h00m00.000000s :BS_NODE DEBUG: [10] Bootstrap 2025-06-25T14:24:17.131795Z 10 00h00m00.000000s :BS_NODE DEBUG: [10] Connect 2025-06-25T14:24:17.131932Z 11 00h00m00.000000s :BS_NODE DEBUG: [11] Bootstrap 2025-06-25T14:24:17.131955Z 11 00h00m00.000000s :BS_NODE DEBUG: [11] Connect 2025-06-25T14:24:17.131985Z 12 00h00m00.000000s :BS_NODE DEBUG: [12] Bootstrap 2025-06-25T14:24:17.132005Z 12 00h00m00.000000s :BS_NODE DEBUG: [12] Connect 2025-06-25T14:24:17.132059Z 13 00h00m00.000000s :BS_NODE DEBUG: [13] Bootstrap 2025-06-25T14:24:17.132079Z 13 00h00m00.000000s :BS_NODE DEBUG: [13] Connect 2025-06-25T14:24:17.132108Z 14 00h00m00.000000s :BS_NODE DEBUG: [14] Bootstrap 2025-06-25T14:24:17.132152Z 14 00h00m00.000000s :BS_NODE DEBUG: [14] Connect 2025-06-25T14:24:17.132243Z 15 00h00m00.000000s :BS_NODE DEBUG: [15] Bootstrap 2025-06-25T14:24:17.132266Z 15 00h00m00.000000s :BS_NODE DEBUG: [15] Connect 2025-06-25T14:24:17.132297Z 16 00h00m00.000000s :BS_NODE DEBUG: [16] Bootstrap 2025-06-25T14:24:17.132350Z 16 00h00m00.000000s :BS_NODE DEBUG: [16] Connect 2025-06-25T14:24:17.132415Z 17 00h00m00.000000s :BS_NODE DEBUG: [17] Bootstrap 2025-06-25T14:24:17.132437Z 17 00h00m00.000000s :BS_NODE DEBUG: [17] Connect 2025-06-25T14:24:17.132471Z 18 00h00m00.000000s :BS_NODE DEBUG: [18] Bootstrap 2025-06-25T14:24:17.132491Z 18 00h00m00.000000s :BS_NODE DEBUG: [18] Connect 2025-06-25T14:24:17.132584Z 19 00h00m00.000000s :BS_NODE DEBUG: [19] Bootstrap 2025-06-25T14:24:17.132607Z 19 00h00m00.000000s :BS_NODE DEBUG: [19] Connect 2025-06-25T14:24:17.132662Z 20 00h00m00.000000s :BS_NODE DEBUG: [20] Bootstrap 2025-06-25T14:24:17.132711Z 20 00h00m00.000000s :BS_NODE DEBUG: [20] Connect 2025-06-25T14:24:17.132791Z 21 00h00m00.000000s :BS_NODE DEBUG: [21] Bootstrap 2025-06-25T14:24:17.132815Z 21 00h00m00.000000s :BS_NODE DEBUG: [21] Connect 2025-06-25T14:24:17.132875Z 22 00h00m00.000000s :BS_NODE DEBUG: [22] Bootstrap 2025-06-25T14:24:17.132896Z 22 00h00m00.000000s :BS_NODE DEBUG: [22] Connect 2025-06-25T14:24:17.132973Z 23 00h00m00.000000s :BS_NODE DEBUG: [23] Bootstrap 2025-06-25T14:24:17.132993Z 23 00h00m00.000000s :BS_NODE DEBUG: [23] Connect 2025-06-25T14:24:17.133023Z 24 00h00m00.000000s :BS_NODE DEBUG: [24] Bootstrap 2025-06-25T14:24:17.133041Z 24 00h00m00.000000s :BS_NODE DEBUG: [24] Connect 2025-06-25T14:24:17.133072Z 25 00h00m00.000000s :BS_NODE DEBUG: [25] Bootstrap 2025-06-25T14:24:17.133107Z 25 00h00m00.000000s :BS_NODE DEBUG: [25] Connect 2025-06-25T14:24:17.133156Z 26 00h00m00.000000s :BS_NODE DEBUG: [26] Bootstrap 2025-06-25T14:24:17.133193Z 26 00h00m00.000000s :BS_NODE DEBUG: [26] Connect 2025-06-25T14:24:17.133301Z 27 00h00m00.000000s :BS_NODE DEBUG: [27] Bootstrap 2025-06-25T14:24:17.133325Z 27 00h00m00.000000s :BS_NODE DEBUG: [27] Connect 2025-06-25T14:24:17.133359Z 28 00h00m00.000000s :BS_NODE DEBUG: [28] Bootstrap 2025-06-25T14:24:17.133380Z 28 00h00m00.000000s :BS_NODE DEBUG: [28] Connect 2025-06-25T14:24:17.133460Z 29 00h00m00.000000s :BS_NODE DEBUG: [29] Bootstrap 2025-06-25T14:24:17.133481Z 29 00h00m00.000000s :BS_NODE DEBUG: [29] Connect 2025-06-25T14:24:17.133536Z 30 00h00m00.000000s :BS_NODE DEBUG: [30] Bootstrap 2025-06-25T14:24:17.133555Z 30 00h00m00.000000s :BS_NODE DEBUG: [30] Connect 2025-06-25T14:24:17.133586Z 31 00h00m00.000000s :BS_NODE DEBUG: [31] Bootstrap 2025-06-25T14:24:17.133607Z 31 00h00m00.000000s :BS_NODE DEBUG: [31] Connect 2025-06-25T14:24:17.133663Z 32 00h00m00.000000s :BS_NODE DEBUG: [32] Bootstrap 2025-06-25T14:24:17.133685Z 32 00h00m00.000000s :BS_NODE DEBUG: [32] Connect 2025-06-25T14:24:17.133739Z 33 00h00m00.000000s :BS_NODE DEBUG: [33] Bootstrap 2025-06-25T14:24:17.133761Z 33 00h00m00.000000s :BS_NODE DEBUG: [33] Connect 2025-06-25T14:24:17.133796Z 34 00h00m00.000000s :BS_NODE DEBUG: [34] Bootstrap 2025-06-25T14:24:17.133816Z 34 00h00m00.000000s :BS_NODE DEBUG: [34] Connect 2025-06-25T14:24:17.133893Z 35 00h00m00.000000s :BS_NODE DEBUG: [35] Bootstrap 2025-06-25T14:24:17.133916Z 35 00h00m00.000000s :BS_NODE DEBUG: [35] Connect 2025-06-25T14:24:17.133969Z 36 00h00m00.000000s :BS_NODE DEBUG: [36] Bootstrap 2025-06-25T14:24:17.134014Z 36 00h00m00.000000s :BS_NODE DEBUG: [36] Connect 2025-06-25T14:24:17.154974Z 1 00h00m00.000000s :BS_NODE DEBUG: [1] ClientConnected Sender# [1:2713:53] Status# ERROR ClientId# [1:2713:53] ServerId# [0:0:0] PipeClient# [1:2713:53] 2025-06-25T14:24:17.156437Z 2 00h00m00.000000s :BS_NODE DEBUG: [2] ClientConnected Sender# [2:2714:41] Status# ERROR ClientId# [2:2714:41] ServerId# [0:0:0] PipeClient# [2:2714:41] 2025-06-25T14:24:17.156522Z 3 00h00m00.000000s :BS_NODE DEBUG: [3] ClientConnected Sender# [3:2715:41] Status# ERROR ClientId# [3:2715:41] ServerId# [0:0:0] PipeClient# [3:2715:41] 2025-06-25T14:24:17.156587Z 4 00h00m00.000000s :BS_NODE DEBUG: [4] ClientConnected Sender# [4:2716:41] Status# ERROR ClientId# [4:2716:41] ServerId# [0:0:0] PipeClient# [4:2716:41] 2025-06-25T14:24:17.156645Z 5 00h00m00.000000s :BS_NODE DEBUG: [5] ClientConnected Sender# [5:2717:41] Status# ERROR ClientId# [5:2717:41] ServerId# [0:0:0] PipeClient# [5:2717:41] 2025-06-25T14:24:17.156721Z 6 00h00m00.000000s :BS_NODE DEBUG: [6] ClientConnected Sender# [6:2718:41] Status# ERROR ClientId# [6:2718:41] ServerId# [0:0:0] PipeClient# [6:2718:41] 2025-06-25T14:24:17.156794Z 7 00h00m00.000000s :BS_NODE DEBUG: [7] ClientConnected Sender# [7:2719:41] Status# ERROR ClientId# [7:2719:41] ServerId# [0:0:0] PipeClient# [7:2719:41] 2025-06-25T14:24:17.156835Z 8 00h00m00.000000s :BS_NODE DEBUG: [8] ClientConnected Sender# [8:2720:41] Status# ERROR ClientId# [8:2720:41] ServerId# [0:0:0] PipeClient# [8:2720:41] 2025-06-25T14:24:17.156871Z 9 00h00m00.000000s :BS_NODE DEBUG: [9] ClientConnected Sender# [9:2721:41] Status# ERROR ClientId# [9:2721:41] ServerId# [0:0:0] PipeClient# [9:2721:41] 2025-06-25T14:24:17.156928Z 10 00h00m00.000000s :BS_NODE DEBUG: [10] ClientConnected Sender# [10:2722:41] Status# ERROR ClientId# [10:2722:41] ServerId# [0:0:0] PipeClient# [10:2722:41] 2025-06-25T14:24:17.156986Z 11 00h00m00.000000s :BS_NODE DEBUG: [11] ClientConnected Sender# [11:2723:41] Status# ERROR ClientId# [11:2723:41] ServerId# [0:0:0] PipeClient# [11:2723:41] 2025-06-25T14:24:17.157025Z 12 00h00m00.000000s :BS_NODE DEBUG: [12] ClientConnected Sender# [12:2724:41] Status# ERROR ClientId# [12:2724:41] ServerId# [0:0:0] PipeClient# [12:2724:41] 2025-06-25T14:24:17.157063Z 13 00h00m00.000000s :BS_NODE DEBUG: [13] ClientConnected Sender# [13:2725:41] Status# ERROR ClientId# [13:2725:41] ServerId# [0:0:0] PipeClient# [13:2725:41] 2025-06-25T14:24:17.157099Z 14 00h00m00.000000s :BS_NODE DEBUG: [14] ClientConnected Sender# [14:2726:41] Status# ERROR ClientId# [14:2726:41] ServerId# [0:0:0] PipeClient# [14:2726:41] 2025-06-25T14:24:17.157159Z 15 00h00m00.000000s :BS_NODE DEBUG: [15] ClientConnected Sender# [15:2727:41] Status# ERROR ClientId# [15:2727:41] ServerId# [0:0:0] PipeClient# [15:2727:41] 2025-06-25T14:24:17.157200Z 16 00h00m00.000000s :BS_NODE DEBUG: [16] ClientConnected Sender# [16:2728:41] Status# ERROR ClientId# [16:2728:41] ServerId# [0:0:0] PipeClient# [16:2728:41] 2025-06-25T14:24:17.157239Z 17 00h00m00.000000s :BS_NODE DEBUG: [17] ClientConnected Sender# [17:2729:41] Status# ERROR ClientId# [17:2729:41] ServerId# [0:0:0] PipeClient# [17:2729:41] 2025-06-25T14:24:17.157275Z 18 00h00m00.000000s :BS_NODE DEBUG: [18] ClientConnected Sender# [18:2730:41] Status# ERROR ClientId# [18:2730:41] ServerId# [0:0:0] PipeClient# [18:2730:41] 2025-06-25T14:24:17.157312Z 19 00h00m00.000000s :BS_NODE DEBUG: [19] ClientConnected Sender# [19:2731:41] Status# ERROR ClientId# [19:2731:41] ServerId# [0:0:0] PipeClient# [19:2731:41] 2025-06-25T14:24:17.157396Z 20 00h00m00.000000s :BS_NODE DEBUG: [20] ClientConnected Sender# [20:2732:41] Status# ERROR ClientId# [20:2732:41] ServerId# [0:0:0] PipeClient# [20:2732:41] 2025-06-25T14:24:17.157485Z 21 00h00m00.000000s :BS_NODE DEBUG: [21] ClientConnected Sender# [21:2733:41] Status# ERROR ClientId# [21:2733:41] ServerId# [0:0:0] PipeClient# [21:2733:41] 2025-06-25T14:24:17.157541Z 22 00h00m00.000000s :BS_NODE DEBUG: [22] ClientConnected Sender# [22:2734:41] Status# ERROR ClientId# [22:2734:41] ServerId# [0:0:0] PipeClient# [22:2734:41] 2025-06-25T14:24:17.157583Z 23 00h00m00.000000s :BS_NODE DEBUG: [23] ClientConnected Sender# [23:2735:41] Status# ERROR ClientId# [23:2735:41] ServerId# [0:0:0] PipeClient# [23:2735:41] 2025-06-25T14:24:17.157641Z 24 00h00m00.000000s :BS_NODE DEBUG: [24] ClientConnected Sender# [24:2736:41] Status# ERROR ClientId# [24:2736:41] ServerId# [0:0:0] PipeClient# [24:2736:41] 2025-06-25T14:24:17.157678Z 25 00h00m00.000000s :BS_NODE DEBUG: [25] ClientConnected Sender# [25:2737:41] Status# ERROR ClientId# [25:2737:41] ServerId# [0:0:0] PipeClient# [25:2737:41] 2025-06-25T14:24:17.157715Z 26 00h00m00.000000s :BS_NODE DEBUG: [26] ClientConnected Sender# [26:2738:41] Status# ERROR ClientId# [26:2738:41] ServerId# [0:0:0] PipeClient# [26:2738:41] 2025-06-25T14:24:17.157750Z 27 00h00m00.000000s :BS_NODE DEBUG: [27] ClientConnected Sender# [27:2739:41] Status# ERROR ClientId# [27:2739:41] ServerId# [0:0:0] PipeClient# [27:2739:41] 2025-06-25T14:24:17.157867Z 28 00h00m00.000000s :BS_NODE DEBUG: [28] ClientConnected Sender# [28:2740:41] Status# ERROR ClientId# [28:2740:41] ServerId# [0:0:0] PipeClient# [28:2740:41] 2025-06-25T14:24:17.157906Z 29 00h00m00.000000s :BS_NODE DEBUG: [29] ClientConnected Sender# [29:2741:41] Status# ERROR ClientId# [29:2741:41] ServerId# [0:0:0] PipeClient# [29:2741:41] 2025-06-25T14:24:17.157943Z 30 00h00m00.000000s :BS_NODE DEBUG: [30] ClientConnected Sender# [30:2742:41] Status# ERROR ClientId# [30:2742:41] ServerId# [0:0:0] PipeClient# [30:2742:41] 2025-06-25T14:24:17.157979Z 31 00h00m00.000000s :BS_NODE DEBUG: [31] ClientConnected Sender# [31:2743:41] Status# ERROR ClientId# [31:2743:41] ServerId# [0:0:0] PipeClient# [31:2743:41] 2025-06-25T14:24:17.158031Z 32 00h00m00.000000s :BS_NODE DEBUG: [32] ClientConnected Sender# [32:2744:41] Status# ERROR ClientId# [32:2744:41] ServerId# [0:0:0] PipeClient# [32:2744:41] 2025-06-25T14:24:17.158069Z 33 00h00m00.000000s :BS_NODE DEBUG: [33] ClientConnected Sender# [33:2745:41] Status# ERROR ClientId# [33:2745:41] ServerId# [0:0:0] PipeClient# [33:2745:41] 2025-06-25T14:24:17.158113Z 34 00h00m00.000000s :BS_NODE DEBUG: [34] ClientConnected Sender# [34:2746:41] Status# ERROR ClientId# [34:2746:41] ServerId# [0:0:0] PipeClient# [34:2746:41] 2025-06-25T14:24:17.158151Z 35 00h00m00.000000s :BS_NODE DEBUG: [35] ClientConnected Sender# [35:2747:41] Status# ERROR ClientId# [35:2747:41 ... :BS_NODE DEBUG: [1] NodeServiceSetUpdate 2025-06-25T14:24:34.782710Z 1 05h45m00.123040s :BS_NODE DEBUG: [1] VDiskId# [80000012:2:0:2:0] -> [80000012:3:0:2:0] 2025-06-25T14:24:34.783233Z 1 05h45m00.123040s :BS_SELFHEAL INFO: {BSSH09@self_heal.cpp:207} Reassigner succeeded GroupId# 2147483666 Items# [80000012:2:2:2:0]: 25:1002:1001 -> 26:1000:1012 ConfigTxSeqNo# 553 2025-06-25T14:24:34.783267Z 1 05h45m00.123040s :BS_SELFHEAL DEBUG: {BSSH08@self_heal.cpp:218} Reassigner finished GroupId# 2147483666 Success# true 2025-06-25T14:24:34.783415Z 34 05h45m00.123040s :BS_NODE DEBUG: [34] NodeServiceSetUpdate 2025-06-25T14:24:34.783470Z 34 05h45m00.123040s :BS_NODE DEBUG: [34] VDiskId# [80000012:2:2:1:0] -> [80000012:3:2:1:0] 2025-06-25T14:24:34.783577Z 19 05h45m00.123040s :BS_NODE DEBUG: [19] NodeServiceSetUpdate 2025-06-25T14:24:34.783625Z 19 05h45m00.123040s :BS_NODE DEBUG: [19] VDiskId# [80000012:2:1:0:0] -> [80000012:3:1:0:0] 2025-06-25T14:24:34.783709Z 23 05h45m00.123040s :BS_NODE DEBUG: [23] NodeServiceSetUpdate 2025-06-25T14:24:34.783756Z 23 05h45m00.123040s :BS_NODE DEBUG: [23] VDiskId# [80000012:2:1:1:0] -> [80000012:3:1:1:0] 2025-06-25T14:24:34.783866Z 7 05h45m00.123040s :BS_NODE DEBUG: [7] NodeServiceSetUpdate 2025-06-25T14:24:34.783914Z 7 05h45m00.123040s :BS_NODE DEBUG: [7] VDiskId# [80000012:2:0:0:0] -> [80000012:3:0:0:0] 2025-06-25T14:24:34.783977Z 25 05h45m00.123040s :BS_NODE DEBUG: [25] NodeServiceSetUpdate 2025-06-25T14:24:34.784054Z 26 05h45m00.123040s :BS_NODE DEBUG: [26] NodeServiceSetUpdate 2025-06-25T14:24:34.784099Z 26 05h45m00.123040s :BS_NODE DEBUG: [26] VDiskId# [80000012:3:2:2:0] PDiskId# 1000 VSlotId# 1012 created 2025-06-25T14:24:34.784165Z 26 05h45m00.123040s :BS_NODE DEBUG: [26] VDiskId# [80000012:3:2:2:0] status changed to INIT_PENDING 2025-06-25T14:24:34.784261Z 10 05h45m00.123040s :BS_NODE DEBUG: [10] NodeServiceSetUpdate 2025-06-25T14:24:34.784324Z 10 05h45m00.123040s :BS_NODE DEBUG: [10] VDiskId# [80000012:2:0:1:0] -> [80000012:3:0:1:0] 2025-06-25T14:24:34.784407Z 13 05h45m00.123040s :BS_NODE DEBUG: [13] NodeServiceSetUpdate 2025-06-25T14:24:34.784452Z 13 05h45m00.123040s :BS_NODE DEBUG: [13] VDiskId# [80000012:2:1:2:0] -> [80000012:3:1:2:0] 2025-06-25T14:24:34.784537Z 31 05h45m00.123040s :BS_NODE DEBUG: [31] NodeServiceSetUpdate 2025-06-25T14:24:34.784584Z 31 05h45m00.123040s :BS_NODE DEBUG: [31] VDiskId# [80000012:2:2:0:0] -> [80000012:3:2:0:0] 2025-06-25T14:24:34.784889Z 1 05h45m00.123040s :BS_SELFHEAL DEBUG: {BSSH01@self_heal.cpp:71} Reassigner starting GroupId# 2147483650 2025-06-25T14:24:34.785431Z 1 05h45m00.123040s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483650 Status# OK JoinedGroup# true Replicated# true 2025-06-25T14:24:34.785473Z 1 05h45m00.123040s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483650 VDiskId# [80000002:3:0:2:0] DiskIsOk# true 2025-06-25T14:24:34.785699Z 1 05h45m00.123040s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483650 Status# OK JoinedGroup# true Replicated# true 2025-06-25T14:24:34.785722Z 1 05h45m00.123040s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483650 VDiskId# [80000002:3:0:0:0] DiskIsOk# true 2025-06-25T14:24:34.785741Z 1 05h45m00.123040s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483650 Status# OK JoinedGroup# true Replicated# true 2025-06-25T14:24:34.785759Z 1 05h45m00.123040s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483650 VDiskId# [80000002:3:0:1:0] DiskIsOk# true 2025-06-25T14:24:34.785782Z 1 05h45m00.123040s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483650 Status# OK JoinedGroup# true Replicated# true 2025-06-25T14:24:34.785800Z 1 05h45m00.123040s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483650 VDiskId# [80000002:3:1:0:0] DiskIsOk# true 2025-06-25T14:24:34.785816Z 1 05h45m00.123040s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483650 Status# OK JoinedGroup# true Replicated# true 2025-06-25T14:24:34.785832Z 1 05h45m00.123040s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483650 VDiskId# [80000002:3:1:1:0] DiskIsOk# true 2025-06-25T14:24:34.785850Z 1 05h45m00.123040s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483650 Status# OK JoinedGroup# true Replicated# true 2025-06-25T14:24:34.785867Z 1 05h45m00.123040s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483650 VDiskId# [80000002:3:1:2:0] DiskIsOk# true 2025-06-25T14:24:34.785883Z 1 05h45m00.123040s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483650 Status# OK JoinedGroup# true Replicated# true 2025-06-25T14:24:34.785896Z 1 05h45m00.123040s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483650 VDiskId# [80000002:3:2:0:0] DiskIsOk# true 2025-06-25T14:24:34.785910Z 1 05h45m00.123040s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483650 Status# OK JoinedGroup# true Replicated# true 2025-06-25T14:24:34.785924Z 1 05h45m00.123040s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483650 VDiskId# [80000002:3:2:1:0] DiskIsOk# true 2025-06-25T14:24:34.789980Z 1 05h45m00.123552s :BS_NODE DEBUG: [1] NodeServiceSetUpdate 2025-06-25T14:24:34.790052Z 1 05h45m00.123552s :BS_NODE DEBUG: [1] VDiskId# [80000002:3:0:2:0] -> [80000002:4:0:2:0] 2025-06-25T14:24:34.790558Z 1 05h45m00.123552s :BS_SELFHEAL INFO: {BSSH09@self_heal.cpp:207} Reassigner succeeded GroupId# 2147483650 Items# [80000002:3:2:2:0]: 25:1002:1000 -> 29:1003:1013 ConfigTxSeqNo# 554 2025-06-25T14:24:34.790590Z 1 05h45m00.123552s :BS_SELFHEAL DEBUG: {BSSH08@self_heal.cpp:218} Reassigner finished GroupId# 2147483650 Success# true 2025-06-25T14:24:34.790736Z 34 05h45m00.123552s :BS_NODE DEBUG: [34] NodeServiceSetUpdate 2025-06-25T14:24:34.790787Z 34 05h45m00.123552s :BS_NODE DEBUG: [34] VDiskId# [80000002:3:2:1:0] -> [80000002:4:2:1:0] 2025-06-25T14:24:34.790887Z 19 05h45m00.123552s :BS_NODE DEBUG: [19] NodeServiceSetUpdate 2025-06-25T14:24:34.790934Z 19 05h45m00.123552s :BS_NODE DEBUG: [19] VDiskId# [80000002:3:1:0:0] -> [80000002:4:1:0:0] 2025-06-25T14:24:34.791017Z 24 05h45m00.123552s :BS_NODE DEBUG: [24] NodeServiceSetUpdate 2025-06-25T14:24:34.791066Z 24 05h45m00.123552s :BS_NODE DEBUG: [24] VDiskId# [80000002:3:1:1:0] -> [80000002:4:1:1:0] 2025-06-25T14:24:34.791165Z 7 05h45m00.123552s :BS_NODE DEBUG: [7] NodeServiceSetUpdate 2025-06-25T14:24:34.791214Z 7 05h45m00.123552s :BS_NODE DEBUG: [7] VDiskId# [80000002:3:0:0:0] -> [80000002:4:0:0:0] 2025-06-25T14:24:34.791278Z 25 05h45m00.123552s :BS_NODE DEBUG: [25] NodeServiceSetUpdate 2025-06-25T14:24:34.791347Z 10 05h45m00.123552s :BS_NODE DEBUG: [10] NodeServiceSetUpdate 2025-06-25T14:24:34.791393Z 10 05h45m00.123552s :BS_NODE DEBUG: [10] VDiskId# [80000002:3:0:1:0] -> [80000002:4:0:1:0] 2025-06-25T14:24:34.791475Z 29 05h45m00.123552s :BS_NODE DEBUG: [29] NodeServiceSetUpdate 2025-06-25T14:24:34.791516Z 29 05h45m00.123552s :BS_NODE DEBUG: [29] VDiskId# [80000002:4:2:2:0] PDiskId# 1003 VSlotId# 1013 created 2025-06-25T14:24:34.791587Z 29 05h45m00.123552s :BS_NODE DEBUG: [29] VDiskId# [80000002:4:2:2:0] status changed to INIT_PENDING 2025-06-25T14:24:34.791679Z 13 05h45m00.123552s :BS_NODE DEBUG: [13] NodeServiceSetUpdate 2025-06-25T14:24:34.791733Z 13 05h45m00.123552s :BS_NODE DEBUG: [13] VDiskId# [80000002:3:1:2:0] -> [80000002:4:1:2:0] 2025-06-25T14:24:34.791803Z 31 05h45m00.123552s :BS_NODE DEBUG: [31] NodeServiceSetUpdate 2025-06-25T14:24:34.791834Z 31 05h45m00.123552s :BS_NODE DEBUG: [31] VDiskId# [80000002:3:2:0:0] -> [80000002:4:2:0:0] 2025-06-25T14:24:34.792591Z 26 05h45m01.226016s :BS_NODE DEBUG: [26] VDiskId# [80000032:3:2:2:0] status changed to REPLICATING 2025-06-25T14:24:34.792882Z 26 05h45m01.504040s :BS_NODE DEBUG: [26] VDiskId# [80000012:3:2:2:0] status changed to REPLICATING 2025-06-25T14:24:34.793123Z 29 05h45m01.529480s :BS_NODE DEBUG: [29] VDiskId# [80000062:4:2:2:0] status changed to REPLICATING 2025-06-25T14:24:34.793431Z 29 05h45m01.873504s :BS_NODE DEBUG: [29] VDiskId# [80000042:3:2:2:0] status changed to REPLICATING 2025-06-25T14:24:34.793776Z 29 05h45m02.908528s :BS_NODE DEBUG: [29] VDiskId# [80000022:3:2:2:0] status changed to REPLICATING 2025-06-25T14:24:34.794092Z 34 05h45m03.022968s :BS_NODE DEBUG: [34] VDiskId# [80000040:5:2:0:0] status changed to REPLICATING 2025-06-25T14:24:34.794554Z 26 05h45m04.004992s :BS_NODE DEBUG: [26] VDiskId# [80000052:3:2:2:0] status changed to REPLICATING 2025-06-25T14:24:34.795737Z 29 05h45m05.144552s :BS_NODE DEBUG: [29] VDiskId# [80000002:4:2:2:0] status changed to REPLICATING 2025-06-25T14:24:34.796102Z 29 05h45m05.294456s :BS_NODE DEBUG: [29] VDiskId# [80000072:3:2:2:0] status changed to REPLICATING 2025-06-25T14:24:34.796508Z 29 05h45m08.801528s :BS_NODE DEBUG: [29] VDiskId# [80000022:3:2:2:0] status changed to READY 2025-06-25T14:24:34.797331Z 25 05h45m08.802040s :BS_NODE DEBUG: [25] NodeServiceSetUpdate 2025-06-25T14:24:34.797371Z 25 05h45m08.802040s :BS_NODE DEBUG: [25] VDiskId# [80000022:2:2:2:0] destroyed 2025-06-25T14:24:34.797718Z 34 05h45m10.660968s :BS_NODE DEBUG: [34] VDiskId# [80000040:5:2:0:0] status changed to READY 2025-06-25T14:24:34.798768Z 25 05h45m10.661480s :BS_NODE DEBUG: [25] NodeServiceSetUpdate 2025-06-25T14:24:34.798817Z 25 05h45m10.661480s :BS_NODE DEBUG: [25] VDiskId# [80000040:4:2:0:0] destroyed 2025-06-25T14:24:34.799727Z 29 05h45m18.058480s :BS_NODE DEBUG: [29] VDiskId# [80000062:4:2:2:0] status changed to READY 2025-06-25T14:24:34.800689Z 25 05h45m18.058992s :BS_NODE DEBUG: [25] NodeServiceSetUpdate 2025-06-25T14:24:34.800736Z 25 05h45m18.058992s :BS_NODE DEBUG: [25] VDiskId# [80000062:3:2:2:0] destroyed 2025-06-25T14:24:34.801378Z 26 05h45m24.762040s :BS_NODE DEBUG: [26] VDiskId# [80000012:3:2:2:0] status changed to READY 2025-06-25T14:24:34.802184Z 25 05h45m24.762552s :BS_NODE DEBUG: [25] NodeServiceSetUpdate 2025-06-25T14:24:34.802227Z 25 05h45m24.762552s :BS_NODE DEBUG: [25] VDiskId# [80000012:2:2:2:0] destroyed 2025-06-25T14:24:34.802810Z 26 05h45m25.728992s :BS_NODE DEBUG: [26] VDiskId# [80000052:3:2:2:0] status changed to READY 2025-06-25T14:24:34.803559Z 25 05h45m25.729504s :BS_NODE DEBUG: [25] NodeServiceSetUpdate 2025-06-25T14:24:34.803602Z 25 05h45m25.729504s :BS_NODE DEBUG: [25] VDiskId# [80000052:2:2:2:0] destroyed 2025-06-25T14:24:34.803730Z 29 05h45m26.346552s :BS_NODE DEBUG: [29] VDiskId# [80000002:4:2:2:0] status changed to READY 2025-06-25T14:24:34.804515Z 25 05h45m26.347064s :BS_NODE DEBUG: [25] NodeServiceSetUpdate 2025-06-25T14:24:34.804557Z 25 05h45m26.347064s :BS_NODE DEBUG: [25] VDiskId# [80000002:3:2:2:0] destroyed 2025-06-25T14:24:34.804680Z 29 05h45m28.960504s :BS_NODE DEBUG: [29] VDiskId# [80000042:3:2:2:0] status changed to READY 2025-06-25T14:24:34.805444Z 25 05h45m28.961016s :BS_NODE DEBUG: [25] NodeServiceSetUpdate 2025-06-25T14:24:34.805485Z 25 05h45m28.961016s :BS_NODE DEBUG: [25] VDiskId# [80000042:2:2:2:0] destroyed 2025-06-25T14:24:34.806727Z 26 05h45m35.084016s :BS_NODE DEBUG: [26] VDiskId# [80000032:3:2:2:0] status changed to READY 2025-06-25T14:24:34.807491Z 25 05h45m35.084528s :BS_NODE DEBUG: [25] NodeServiceSetUpdate 2025-06-25T14:24:34.807540Z 25 05h45m35.084528s :BS_NODE DEBUG: [25] VDiskId# [80000032:2:2:2:0] destroyed 2025-06-25T14:24:34.807687Z 29 05h45m37.792456s :BS_NODE DEBUG: [29] VDiskId# [80000072:3:2:2:0] status changed to READY 2025-06-25T14:24:34.808566Z 25 05h45m37.792968s :BS_NODE DEBUG: [25] NodeServiceSetUpdate 2025-06-25T14:24:34.808611Z 25 05h45m37.792968s :BS_NODE DEBUG: [25] VDiskId# [80000072:2:2:2:0] destroyed ------- [TS] {asan, default-linux-x86_64, release} ydb/library/yql/providers/generic/provider/ut/pushdown/unittest >> PushdownTest::RegexpPushdown [GOOD] Test command err: Initial program: ( (let $data_source (DataSource '"generic" '"test_cluster")) (let $empty_lambda (lambda '($arg) (Bool '"true"))) (let $table (MrTableConcat (Key '('table (String '"test_table")))) ) (let $read (Read! world $data_source $table)) (let $map_lambda (lambda '($row) (OptionalIf (Bool '"true") $row ) )) (let $filtered_data (FlatMap (Right! $read) $map_lambda)) (let $resulte_data_sink (DataSink '"result")) (let $result (ResWrite! (Left! $read) $resulte_data_sink (Key) $filtered_data '('('type)))) (return (Commit! $result $resulte_data_sink)) ) 2025-06-25 14:24:34.723 DEBUG yql-providers-generic-provider-ut-pushdown(pid=12014, tid=0x00007F8748A31F40) [core] yql_out_transformers.cpp:62: Expr: ( (let $1 (Read! world (DataSource '"generic" '"test_cluster") (MrTableConcat (Key '('table (String '"test_table")))))) (let $2 (DataSink '"result")) (let $3 (ResWrite! (Left! $1) $2 (Key) (FlatMap (Right! $1) (lambda '($4) (OptionalIf (Bool '"true") $4))) '('('type)))) (return (Commit! $3 $2)) ) 2025-06-25 14:24:34.727 DEBUG yql-providers-generic-provider-ut-pushdown(pid=12014, tid=0x00007F8748A31F40) [core] yql_out_transformers.cpp:62: Expr: ( (let $1 (Read! world (DataSource '"generic" '"test_cluster") (MrTableConcat (Key '('table (String '"test_table")))))) (let $2 (DataSink '"result")) (let $3 (ResWrite! (Left! $1) $2 (Key) (FlatMap (Right! $1) (lambda '($4) (OptionalIf (Bool '"true") $4))) '('('type)))) (return (Commit! $3 $2)) ) 2025-06-25 14:24:34.728 DEBUG yql-providers-generic-provider-ut-pushdown(pid=12014, tid=0x00007F8748A31F40) [generic] yql_generic_io_discovery.cpp:55: discovered cluster name: test_cluster 2025-06-25 14:24:34.728 INFO yql-providers-generic-provider-ut-pushdown(pid=12014, tid=0x00007F8748A31F40) [generic] yql_generic_load_meta.cpp:91: Loading table meta for: `test_cluster`.`test_table` 2025-06-25 14:24:34.731 DEBUG yql-providers-generic-provider-ut-pushdown(pid=12014, tid=0x00007F8748A31F40) [core] yql_out_transformers.cpp:62: Expr: ( (let $1 (GenReadTable! world (DataSource '"generic" '"test_cluster") (GenTable '"test_table") (Void) (lambda '($4) (Bool '"true")))) (let $2 (DataSink '"result")) (let $3 (ResWrite! (Left! $1) $2 (Key) (FlatMap (Right! $1) (lambda '($5) (OptionalIf (Bool '"true") $5))) '('('type)))) (return (Commit! $3 $2)) ) 2025-06-25 14:24:34.735 DEBUG yql-providers-generic-provider-ut-pushdown(pid=12014, tid=0x00007F8748A31F40) [core] yql_out_transformers.cpp:62: Expr: ( (let $1 (GenReadTable! world (DataSource '"generic" '"test_cluster") (GenTable '"test_table") (Void) (lambda '($4) (Bool '"true")))) (let $2 (DataSink '"result")) (let $3 (ResWrite! (Left! $1) $2 (Key) (FlatMap (Right! $1) (lambda '($5) (OptionalIf (Bool '"true") $5))) '('('type)))) (return (Commit! $3 $2)) ) 2025-06-25 14:24:34.736 DEBUG yql-providers-generic-provider-ut-pushdown(pid=12014, tid=0x00007F8748A31F40) [core] yql_out_transformers.cpp:62: Expr: ( (let $1 (GenReadTable! world (DataSource '"generic" '"test_cluster") (GenTable '"test_table") (Void) (lambda '($4) (Bool '"true")))) (let $2 (DataSink '"result")) (let $3 (ResWrite! (Left! $1) $2 (Key) (FlatMap (Right! $1) (lambda '($5) (OptionalIf (Bool '"true") $5))) '('('type)))) (return (Commit! $3 $2)) ) 2025-06-25 14:24:34.736 DEBUG yql-providers-generic-provider-ut-pushdown(pid=12014, tid=0x00007F8748A31F40) [core] yql_out_transformers.cpp:62: Expr to optimize: ( (let $1 (Bool '"true")) (let $2 (GenReadTable! world (DataSource '"generic" '"test_cluster") (GenTable '"test_table") (Void) (lambda '($5) $1))) (let $3 (DataSink '"result")) (let $4 (ResWrite! (Left! $2) $3 (Key) (FlatMap (Right! $2) (lambda '($6) (OptionalIf $1 $6))) '('('type)))) (return (Commit! $4 $3)) ) 2025-06-25 14:24:34.737 DEBUG yql-providers-generic-provider-ut-pushdown(pid=12014, tid=0x00007F8748A31F40) [core] yql_co_simple1.cpp:986: OptionalIf over Bool 'true 2025-06-25 14:24:34.738 DEBUG yql-providers-generic-provider-ut-pushdown(pid=12014, tid=0x00007F8748A31F40) [core] yql_out_transformers.cpp:62: Expr: ( (let $1 (GenReadTable! world (DataSource '"generic" '"test_cluster") (GenTable '"test_table") (Void) (lambda '($4) (Bool '"true")))) (let $2 (DataSink '"result")) (let $3 (ResWrite! (Left! $1) $2 (Key) (FlatMap (Right! $1) (lambda '($5) (Just $5))) '('('type)))) (return (Commit! $3 $2)) ) 2025-06-25 14:24:34.738 DEBUG yql-providers-generic-provider-ut-pushdown(pid=12014, tid=0x00007F8748A31F40) [core] yql_out_transformers.cpp:62: Expr: ( (let $1 (GenReadTable! world (DataSource '"generic" '"test_cluster") (GenTable '"test_table") (Void) (lambda '($4) (Bool '"true")))) (let $2 (DataSink '"result")) (let $3 (ResWrite! (Left! $1) $2 (Key) (FlatMap (Right! $1) (lambda '($5) (Just $5))) '('('type)))) (return (Commit! $3 $2)) ) 2025-06-25 14:24:34.739 DEBUG yql-providers-generic-provider-ut-pushdown(pid=12014, tid=0x00007F8748A31F40) [core] yql_out_transformers.cpp:62: Expr to optimize: ( (let $1 (GenReadTable! world (DataSource '"generic" '"test_cluster") (GenTable '"test_table") (Void) (lambda '($4) (Bool '"true")))) (let $2 (DataSink '"result")) (let $3 (ResWrite! (Left! $1) $2 (Key) (FlatMap (Right! $1) (lambda '($5) (Just $5))) '('('type)))) (return (Commit! $3 $2)) ) 2025-06-25 14:24:34.739 DEBUG yql-providers-generic-provider-ut-pushdown(pid=12014, tid=0x00007F8748A31F40) [core] yql_co_simple1.cpp:2040: FlatMap with Just 2025-06-25 14:24:34.739 DEBUG yql-providers-generic-provider-ut-pushdown(pid=12014, tid=0x00007F8748A31F40) [core] yql_out_transformers.cpp:62: Expr: ( (let $1 (GenReadTable! world (DataSource '"generic" '"test_cluster") (GenTable '"test_table") (Void) (lambda '($4) (Bool '"true")))) (let $2 (DataSink '"result")) (let $3 (ResWrite! (Left! $1) $2 (Key) (Right! $1) '('('type)))) (return (Commit! $3 $2)) ) 2025-06-25 14:24:34.740 DEBUG yql-providers-generic-provider-ut-pushdown(pid=12014, tid=0x00007F8748A31F40) [core] yql_out_transformers.cpp:62: Expr to optimize: ( (let $1 (GenReadTable! world (DataSource '"generic" '"test_cluster") (GenTable '"test_table") (Void) (lambda '($4) (Bool '"true")))) (let $2 (DataSink '"result")) (let $3 (ResWrite! (Left! $1) $2 (Key) (Right! $1) '('('type)))) (return (Commit! $3 $2)) ) 2025-06-25 14:24:34.742 INFO yql-providers-generic-provider-ut-pushdown(pid=12014, tid=0x00007F8748A31F40) [generic] yql_optimize.cpp:135: PhysicalOptimizer-TrimReadWorld 2025-06-25 14:24:34.742 DEBUG yql-providers-generic-provider-ut-pushdown(pid=12014, tid=0x00007F8748A31F40) [core] yql_out_transformers.cpp:62: Expr: ( (let $1 (DataSink '"result")) (let $2 (ResWrite! world $1 (Key) (Right! (GenReadTable! world (DataSource '"generic" '"test_cluster") (GenTable '"test_table") (Void) (lambda '($3) (Bool '"true")))) '('('type)))) (return (Commit! $2 $1)) ) 2025-06-25 14:24:34.743 DEBUG yql-providers-generic-provider-ut-pushdown(pid=12014, tid=0x00007F8748A31F40) [core] yql_out_transformers.cpp:62: Expr to optimize: ( (let $1 (DataSink '"result")) (let $2 (ResWrite! world $1 (Key) (Right! (GenReadTable! world (DataSource '"generic" '"test_cluster") (GenTable '"test_table") (Void) (lambda '($3) (Bool '"true")))) '('('type)))) (return (Commit! $2 $1)) ) 2025-06-25 14:24:34.744 INFO yql-providers-generic-provider-ut-pushdown(pid=12014, tid=0x00007F8748A31F40) [RESULT] yql_result_provider.cpp:773: ResPull 2025-06-25 14:24:34.744 DEBUG yql-providers-generic-provider-ut-pushdown(pid=12014, tid=0x00007F8748A31F40) [core] yql_out_transformers.cpp:62: Expr: ( (let $1 (DataSink '"result")) (let $2 (ResPull! world $1 (Key) (Right! (GenReadTable! world (DataSource '"generic" '"test_cluster") (GenTable '"test_table") (Void) (lambda '($3) (Bool '"true")))) '('('type)) '"generic")) (return (Commit! $2 $1)) ) 2025-06-25 14:24:34.745 DEBUG yql-providers-generic-provider-ut-pushdown(pid=12014, tid=0x00007F8748A31F40) [core] yql_out_transformers.cpp:62: Expr to optimize: ( (let $1 (DataSink '"result")) (let $2 (ResPull! world $1 (Key) (Right! (GenReadTable! world (DataSource '"generic" '"test_cluster") (GenTable '"test_table") (Void) (lambda '($3) (Bool '"true")))) '('('type)) '"generic")) (return (Commit! $2 $1)) ) 2025-06-25 14:24:34.746 DEBUG yql-providers-generic-provider-ut-pushdown(pid=12014, tid=0x00007F8748A31F40) [core] yql_out_transformers.cpp:62: Optimized expr: ( (let $1 (DataSink '"result")) (let $2 (ResPull! world $1 (Key) (Right! (GenReadTable! world (DataSource '"generic" '"test_cluster") (GenTable '"test_table") (Void) (lambda '($3) (Bool '"true")))) '('('type)) '"generic")) (return (Commit! $2 $1)) ) 2025-06-25 14:24:34.747 INFO yql-providers-generic-provider-ut-pushdown(pid=12014, tid=0x00007F8748A31F40) [generic] yql_generic_dq_integration.cpp:193: Filling source settings: cluster: test_cluster, table: test_table, endpoint: host: "host" port: 42 2025-06-25 14:24:34.755 INFO yql-providers-generic-provider-ut-pushdown(pid=12014, tid=0x00007F8748A31F40) [generic] yql_optimize.cpp:135: BuildGenericDqSourceSettings 2025-06-25 14:24:34.757 DEBUG yql-providers-generic-provider-ut-pushdown(pid=12014, tid=0x00007F8748A31F40) [core] yql_out_transformers.cpp:62: Built settings: ( (let $1 (DataSink '"result")) (let $2 '('"col_bool" '"col_date" '"col_datetime" '"col_double" '"col_dynumber" '"col_float" '"col_int16" '"col_int32" '"col_int64" '"col_int8" '"col_interval" '"col_json" '"col_json_document" '"col_optional_bool" '"col_optional_date" '"col_optional_datetime" '"col_optional_double" '"col_optional_dynumber" '"col_optional_float" '"col_optional_int16" '"col_optional_int32" '"col_optional_int64" '"col_optional_int8" '"col_optional_interval" '"col_optional_json" '"col_optional_json_document" '"col_optional_string" '"col_optional_timestamp" '"col_optional_tz_date" '"col_optional_tz_datetime" '"col_optional_tz_timestamp" '"col_optional_uint16" '"col_optional_uint32" '"col_optional_uint64" '"col_optional_uint8" '"col_optional_utf8" '"col_optional_uuid" '"col_optional_yson" '"col_string" '"col_timestamp" '"col_tz_date" '"col_tz_datetime" '"col_tz_timestamp" '"col_uint16" '"col_uint32" '"col_uint64" '"col_uint8" '"col_utf8" '"col_uuid" '"col_yson")) (let $3 (GenSourceSettings world '"test_cluster" '"test_table" (SecureParam '"cluster:default_test_cluster") $2 (lambda '($32) (Bool '"true")))) (let $4 (DataType 'Bool)) (let $5 (DataType 'Date)) (let $6 (DataType 'Datetime)) (let $7 (DataType 'Double)) (let $8 (DataType 'DyNumber)) (let $9 (DataType 'Float)) (let $10 (DataType 'Int16)) (let $11 (DataType 'Int32)) (let $12 (DataType 'Int64)) (let $13 (DataType 'Int8)) (let $14 (DataType 'Interval)) (let $15 (DataType 'Json)) (let $16 (DataType 'JsonDocument)) (let $17 (DataType 'String)) (let $18 (DataType 'Timestamp)) (let $19 (DataType 'TzDate)) (let $20 (DataType 'TzDatetime)) (let $21 (DataType 'TzTimestamp)) (let $22 (DataType 'Uint16)) (let $23 (DataType 'Uint32)) (let $24 (DataType 'Uint64)) (let $25 (DataType 'Uint8)) (let $26 (DataType 'Utf8)) (let $27 (DataType 'Uuid)) (let $28 (DataType 'Yson)) (let $29 (StructType '('"col_bool" $4) '('"col_date" $5) '('"col_datetime" $6) '('"col_double" $7) '('"col_dynumber" $8) '('"col_float" $9) '('"col_int16" $10) '('"col_int32" $11) '('"col_int64" $12) '('"col_int8" $13) '('"col_interval" $14) '('"col_json" $15) '('"col_json_document" $16) '('"col_optional_bool" (OptionalType $4)) '('"col_optional_date" (OptionalType $5)) '('"col_optional_datetime" (OptionalType $6)) '('"col_optional_double" (OptionalType $7)) '('"col_optional_dynumber" (OptionalType $8)) '('"col_optional_float" (OptionalType $9)) '('"col_optional_int16" (OptionalType $10)) '('"col_optional_int32" (OptionalType $11)) '('"col_optional_int64" (OptionalType $12)) '('"col_optional_int8" (OptionalType $13)) '('"col_optional_interval" (OptionalType $14)) '('"col_optional_json" (OptionalType $15)) '('"col_optional_json_document" (OptionalType $16)) '('"col_optional_string" (OptionalType $17)) '('"col_optional_timestamp" (OptionalType $18)) '('"col_optional_tz_date" (OptionalT ... sitive" $6) '('"DotNl" $6) '('"Literal" $6) '('"LogErrors" $6) '('"LongestMatch" $6) '('"MaxMem" (DataType 'Uint64)) '('"NeverCapture" $6) '('"NeverNl" $6) '('"OneLine" $6) '('"PerlClasses" $6) '('"PosixSyntax" $6) '('"Utf8" $6) '('"WordBoundary" $6)))) '"" '())) (return (OptionalIf (Apply $9 (Just (Member $5 '"col_string"))) $5)) )))) '('('type)))) (return (Commit! $3 $2)) ) 2025-06-25 14:24:36.259 DEBUG yql-providers-generic-provider-ut-pushdown(pid=12014, tid=0x00007F8748A31F40) [core] yql_out_transformers.cpp:62: Expr to optimize: ( (let $1 (GenReadTable! world (DataSource '"generic" '"test_cluster") (GenTable '"test_table") (Void) (lambda '($4) (Bool '"true")))) (let $2 (DataSink '"result")) (let $3 (ResWrite! (Left! $1) $2 (Key) (FlatMap (Right! $1) (lambda '($5) (block '( (let $6 (DataType 'Bool)) (let $7 (OptionalType (StructType '('"CaseSensitive" $6) '('"DotNl" $6) '('"Literal" $6) '('"LogErrors" $6) '('"LongestMatch" $6) '('"MaxMem" (DataType 'Uint64)) '('"NeverCapture" $6) '('"NeverNl" $6) '('"OneLine" $6) '('"PerlClasses" $6) '('"PosixSyntax" $6) '('"Utf8" $6) '('"WordBoundary" $6)))) (let $8 (DataType 'String)) (let $9 (CallableType '() '($6) '((OptionalType $8)))) (let $10 (Udf '"Re2.Grep" '((String '"\\\\d+") (Nothing $7)) (VoidType) '"" $9 (TupleType $8 $7) '"" '())) (return (OptionalIf (Apply $10 (Just (Member $5 '"col_string"))) $5)) )))) '('('type)))) (return (Commit! $3 $2)) ) 2025-06-25 14:24:36.261 INFO yql-providers-generic-provider-ut-pushdown(pid=12014, tid=0x00007F8748A31F40) [generic] yql_optimize.cpp:135: PhysicalOptimizer-TrimReadWorld 2025-06-25 14:24:36.262 INFO yql-providers-generic-provider-ut-pushdown(pid=12014, tid=0x00007F8748A31F40) [default] physical_opt.cpp:76: Push filter lambda: ( (return (lambda '($1) (block '( (let $2 (DataType 'Bool)) (let $3 (OptionalType (StructType '('"CaseSensitive" $2) '('"DotNl" $2) '('"Literal" $2) '('"LogErrors" $2) '('"LongestMatch" $2) '('"MaxMem" (DataType 'Uint64)) '('"NeverCapture" $2) '('"NeverNl" $2) '('"OneLine" $2) '('"PerlClasses" $2) '('"PosixSyntax" $2) '('"Utf8" $2) '('"WordBoundary" $2)))) (let $4 (DataType 'String)) (let $5 (CallableType '() '($2) '((OptionalType $4)))) (let $6 (Udf '"Re2.Grep" '((String '"\\\\d+") (Nothing $3)) (VoidType) '"" $5 (TupleType $4 $3) '"" '())) (return (Apply $6 (Just (Member $1 '"col_string")))) )))) ) 2025-06-25 14:24:36.262 INFO yql-providers-generic-provider-ut-pushdown(pid=12014, tid=0x00007F8748A31F40) [generic] yql_optimize.cpp:135: PhysicalOptimizer-PushFilterToReadTable 2025-06-25 14:24:36.262 DEBUG yql-providers-generic-provider-ut-pushdown(pid=12014, tid=0x00007F8748A31F40) [core] yql_out_transformers.cpp:62: Expr: ( (let $1 (DataSink '"result")) (let $2 (DataType 'Bool)) (let $3 (OptionalType (StructType '('"CaseSensitive" $2) '('"DotNl" $2) '('"Literal" $2) '('"LogErrors" $2) '('"LongestMatch" $2) '('"MaxMem" (DataType 'Uint64)) '('"NeverCapture" $2) '('"NeverNl" $2) '('"OneLine" $2) '('"PerlClasses" $2) '('"PosixSyntax" $2) '('"Utf8" $2) '('"WordBoundary" $2)))) (let $4 (DataType 'String)) (let $5 (CallableType '() '($2) '((OptionalType $4)))) (let $6 (Udf '"Re2.Grep" '((String '"\\\\d+") (Nothing $3)) (VoidType) '"" $5 (TupleType $4 $3) '"" '())) (let $7 (ResWrite! world $1 (Key) (FlatMap (Right! (GenReadTable! world (DataSource '"generic" '"test_cluster") (GenTable '"test_table") (Void) (lambda '($8) (Apply $6 (Just (Member $8 '"col_string")))))) (lambda '($9) (OptionalIf (Apply $6 (Just (Member $9 '"col_string"))) $9))) '('('type)))) (return (Commit! $7 $1)) ) 2025-06-25 14:24:36.263 DEBUG yql-providers-generic-provider-ut-pushdown(pid=12014, tid=0x00007F8748A31F40) [core] yql_out_transformers.cpp:62: Expr: ( (let $1 (DataSink '"result")) (let $2 (DataType 'Bool)) (let $3 (OptionalType (StructType '('"CaseSensitive" $2) '('"DotNl" $2) '('"Literal" $2) '('"LogErrors" $2) '('"LongestMatch" $2) '('"MaxMem" (DataType 'Uint64)) '('"NeverCapture" $2) '('"NeverNl" $2) '('"OneLine" $2) '('"PerlClasses" $2) '('"PosixSyntax" $2) '('"Utf8" $2) '('"WordBoundary" $2)))) (let $4 (DataType 'String)) (let $5 (CallableType '() '($2) '((OptionalType $4)))) (let $6 (Udf '"Re2.Grep" '((String '"\\\\d+") (Nothing $3)) (VoidType) '"" $5 (TupleType $4 $3) '"" '())) (let $7 (ResWrite! world $1 (Key) (FlatMap (Right! (GenReadTable! world (DataSource '"generic" '"test_cluster") (GenTable '"test_table") (Void) (lambda '($8) (Apply $6 (Just (Member $8 '"col_string")))))) (lambda '($9) (OptionalIf (Apply $6 (Just (Member $9 '"col_string"))) $9))) '('('type)))) (return (Commit! $7 $1)) ) 2025-06-25 14:24:36.264 DEBUG yql-providers-generic-provider-ut-pushdown(pid=12014, tid=0x00007F8748A31F40) [core] yql_out_transformers.cpp:62: Expr to optimize: ( (let $1 (DataSink '"result")) (let $2 (DataType 'Bool)) (let $3 (OptionalType (StructType '('"CaseSensitive" $2) '('"DotNl" $2) '('"Literal" $2) '('"LogErrors" $2) '('"LongestMatch" $2) '('"MaxMem" (DataType 'Uint64)) '('"NeverCapture" $2) '('"NeverNl" $2) '('"OneLine" $2) '('"PerlClasses" $2) '('"PosixSyntax" $2) '('"Utf8" $2) '('"WordBoundary" $2)))) (let $4 (DataType 'String)) (let $5 (CallableType '() '($2) '((OptionalType $4)))) (let $6 (Udf '"Re2.Grep" '((String '"\\\\d+") (Nothing $3)) (VoidType) '"" $5 (TupleType $4 $3) '"" '())) (let $7 (ResWrite! world $1 (Key) (FlatMap (Right! (GenReadTable! world (DataSource '"generic" '"test_cluster") (GenTable '"test_table") (Void) (lambda '($8) (Apply $6 (Just (Member $8 '"col_string")))))) (lambda '($9) (OptionalIf (Apply $6 (Just (Member $9 '"col_string"))) $9))) '('('type)))) (return (Commit! $7 $1)) ) 2025-06-25 14:24:36.266 TRACE yql-providers-generic-provider-ut-pushdown(pid=12014, tid=0x00007F8748A31F40) [generic] yql_generic_physical_opt.cpp:142: Push filter. Lambda is already not empty 2025-06-25 14:24:36.267 DEBUG yql-providers-generic-provider-ut-pushdown(pid=12014, tid=0x00007F8748A31F40) [core] yql_out_transformers.cpp:62: Optimized expr: ( (let $1 (DataSink '"result")) (let $2 (DataType 'Bool)) (let $3 (OptionalType (StructType '('"CaseSensitive" $2) '('"DotNl" $2) '('"Literal" $2) '('"LogErrors" $2) '('"LongestMatch" $2) '('"MaxMem" (DataType 'Uint64)) '('"NeverCapture" $2) '('"NeverNl" $2) '('"OneLine" $2) '('"PerlClasses" $2) '('"PosixSyntax" $2) '('"Utf8" $2) '('"WordBoundary" $2)))) (let $4 (DataType 'String)) (let $5 (CallableType '() '($2) '((OptionalType $4)))) (let $6 (Udf '"Re2.Grep" '((String '"\\\\d+") (Nothing $3)) (VoidType) '"" $5 (TupleType $4 $3) '"" '())) (let $7 (ResWrite! world $1 (Key) (FlatMap (Right! (GenReadTable! world (DataSource '"generic" '"test_cluster") (GenTable '"test_table") (Void) (lambda '($8) (Apply $6 (Just (Member $8 '"col_string")))))) (lambda '($9) (OptionalIf (Apply $6 (Just (Member $9 '"col_string"))) $9))) '('('type)))) (return (Commit! $7 $1)) ) 2025-06-25 14:24:36.267 INFO yql-providers-generic-provider-ut-pushdown(pid=12014, tid=0x00007F8748A31F40) [generic] yql_generic_dq_integration.cpp:193: Filling source settings: cluster: test_cluster, table: test_table, endpoint: host: "host" port: 42 2025-06-25 14:24:36.272 INFO yql-providers-generic-provider-ut-pushdown(pid=12014, tid=0x00007F8748A31F40) [generic] yql_optimize.cpp:135: BuildGenericDqSourceSettings 2025-06-25 14:24:36.274 DEBUG yql-providers-generic-provider-ut-pushdown(pid=12014, tid=0x00007F8748A31F40) [core] yql_out_transformers.cpp:62: Built settings: ( (let $1 (DataSink '"result")) (let $2 '('"col_bool" '"col_date" '"col_datetime" '"col_double" '"col_dynumber" '"col_float" '"col_int16" '"col_int32" '"col_int64" '"col_int8" '"col_interval" '"col_json" '"col_json_document" '"col_optional_bool" '"col_optional_date" '"col_optional_datetime" '"col_optional_double" '"col_optional_dynumber" '"col_optional_float" '"col_optional_int16" '"col_optional_int32" '"col_optional_int64" '"col_optional_int8" '"col_optional_interval" '"col_optional_json" '"col_optional_json_document" '"col_optional_string" '"col_optional_timestamp" '"col_optional_tz_date" '"col_optional_tz_datetime" '"col_optional_tz_timestamp" '"col_optional_uint16" '"col_optional_uint32" '"col_optional_uint64" '"col_optional_uint8" '"col_optional_utf8" '"col_optional_uuid" '"col_optional_yson" '"col_string" '"col_timestamp" '"col_tz_date" '"col_tz_datetime" '"col_tz_timestamp" '"col_uint16" '"col_uint32" '"col_uint64" '"col_uint8" '"col_utf8" '"col_uuid" '"col_yson")) (let $3 (DataType 'Bool)) (let $4 (OptionalType (StructType '('"CaseSensitive" $3) '('"DotNl" $3) '('"Literal" $3) '('"LogErrors" $3) '('"LongestMatch" $3) '('"MaxMem" (DataType 'Uint64)) '('"NeverCapture" $3) '('"NeverNl" $3) '('"OneLine" $3) '('"PerlClasses" $3) '('"PosixSyntax" $3) '('"Utf8" $3) '('"WordBoundary" $3)))) (let $5 (DataType 'String)) (let $6 (OptionalType $5)) (let $7 (CallableType '() '($3) '($6))) (let $8 (Udf '"Re2.Grep" '((String '"\\\\d+") (Nothing $4)) (VoidType) '"" $7 (TupleType $5 $4) '"" '())) (let $9 (GenSourceSettings world '"test_cluster" '"test_table" (SecureParam '"cluster:default_test_cluster") $2 (lambda '($37) (Apply $8 (Just (Member $37 '"col_string")))))) (let $10 (DataType 'Bool)) (let $11 (DataType 'Date)) (let $12 (DataType 'Datetime)) (let $13 (DataType 'Double)) (let $14 (DataType 'DyNumber)) (let $15 (DataType 'Float)) (let $16 (DataType 'Int16)) (let $17 (DataType 'Int32)) (let $18 (DataType 'Int64)) (let $19 (DataType 'Int8)) (let $20 (DataType 'Interval)) (let $21 (DataType 'Json)) (let $22 (DataType 'JsonDocument)) (let $23 (DataType 'Timestamp)) (let $24 (DataType 'TzDate)) (let $25 (DataType 'TzDatetime)) (let $26 (DataType 'TzTimestamp)) (let $27 (DataType 'Uint16)) (let $28 (DataType 'Uint32)) (let $29 (DataType 'Uint64)) (let $30 (DataType 'Uint8)) (let $31 (DataType 'Utf8)) (let $32 (DataType 'Uuid)) (let $33 (DataType 'Yson)) (let $34 (StructType '('"col_bool" $10) '('"col_date" $11) '('"col_datetime" $12) '('"col_double" $13) '('"col_dynumber" $14) '('"col_float" $15) '('"col_int16" $16) '('"col_int32" $17) '('"col_int64" $18) '('"col_int8" $19) '('"col_interval" $20) '('"col_json" $21) '('"col_json_document" $22) '('"col_optional_bool" (OptionalType $10)) '('"col_optional_date" (OptionalType $11)) '('"col_optional_datetime" (OptionalType $12)) '('"col_optional_double" (OptionalType $13)) '('"col_optional_dynumber" (OptionalType $14)) '('"col_optional_float" (OptionalType $15)) '('"col_optional_int16" (OptionalType $16)) '('"col_optional_int32" (OptionalType $17)) '('"col_optional_int64" (OptionalType $18)) '('"col_optional_int8" (OptionalType $19)) '('"col_optional_interval" (OptionalType $20)) '('"col_optional_json" (OptionalType $21)) '('"col_optional_json_document" (OptionalType $22)) '('"col_optional_string" $6) '('"col_optional_timestamp" (OptionalType $23)) '('"col_optional_tz_date" (OptionalType $24)) '('"col_optional_tz_datetime" (OptionalType $25)) '('"col_optional_tz_timestamp" (OptionalType $26)) '('"col_optional_uint16" (OptionalType $27)) '('"col_optional_uint32" (OptionalType $28)) '('"col_optional_uint64" (OptionalType $29)) '('"col_optional_uint8" (OptionalType $30)) '('"col_optional_utf8" (OptionalType $31)) '('"col_optional_uuid" (OptionalType $32)) '('"col_optional_yson" (OptionalType $33)) '('"col_string" $5) '('"col_timestamp" $23) '('"col_tz_date" $24) '('"col_tz_datetime" $25) '('"col_tz_timestamp" $26) '('"col_uint16" $27) '('"col_uint32" $28) '('"col_uint64" $29) '('"col_uint8" $30) '('"col_utf8" $31) '('"col_uuid" $32) '('"col_yson" $33))) (let $35 (DqSourceWrap $9 (DataSource '"generic" '"test_cluster") $34)) (let $36 (ResWrite! world $1 (Key) (FlatMap $35 (lambda '($38) (OptionalIf (Apply $8 (Just (Member $38 '"col_string"))) $38))) '('('type)))) (return (Commit! $36 $1)) ) Dq source filter settings: filter_typed { regexp { value { column: "col_string" } pattern { typed_value { type { type_id: STRING } value { bytes_value: "\\\\d+" } } } } } |73.0%| [TA] $(B)/ydb/core/mind/bscontroller/ut_selfheal/test-results/unittest/{meta.json ... results_accumulator.log} |72.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/sqs/common/ydb-tests-functional-sqs-common |73.0%| [TS] {RESULT} ydb/library/yql/providers/generic/provider/ut/pushdown/unittest |72.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/sqs/common/ydb-tests-functional-sqs-common |73.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/postgres_integrations/go-libpq/ydb-tests-postgres_integrations-go-libpq |73.0%| [LD] {RESULT} $(B)/ydb/tests/functional/sqs/common/ydb-tests-functional-sqs-common |73.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/postgres_integrations/go-libpq/ydb-tests-postgres_integrations-go-libpq |73.0%| [LD] {RESULT} $(B)/ydb/tests/postgres_integrations/go-libpq/ydb-tests-postgres_integrations-go-libpq |73.0%| [TA] {RESULT} $(B)/ydb/core/mind/bscontroller/ut_selfheal/test-results/unittest/{meta.json ... results_accumulator.log} |73.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/fq/plans/ydb-tests-fq-plans |73.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/fq/plans/ydb-tests-fq-plans |73.0%| [LD] {RESULT} $(B)/ydb/tests/fq/plans/ydb-tests-fq-plans |73.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/autoconfig/ydb-tests-functional-autoconfig |73.1%| [LD] {RESULT} $(B)/ydb/tests/functional/autoconfig/ydb-tests-functional-autoconfig |73.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/autoconfig/ydb-tests-functional-autoconfig |73.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/olap/data_quotas/ydb-tests-olap-data_quotas |73.2%| [LD] {RESULT} $(B)/ydb/tests/olap/data_quotas/ydb-tests-olap-data_quotas |73.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/olap/data_quotas/ydb-tests-olap-data_quotas |73.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/stress/mixedpy/ydb-tests-stress-mixedpy |73.2%| [LD] {RESULT} $(B)/ydb/tests/stress/mixedpy/ydb-tests-stress-mixedpy |73.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stress/mixedpy/ydb-tests-stress-mixedpy >> BlobDepot::VerifiedRandom [GOOD] >> BlobDepot::LoadPutAndRead |73.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/sqs/large/ydb-tests-functional-sqs-large |73.3%| [LD] {RESULT} $(B)/ydb/tests/functional/sqs/large/ydb-tests-functional-sqs-large |73.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/sqs/large/ydb-tests-functional-sqs-large |73.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/yql/tools/dq/service_node/service_node |73.3%| [LD] {RESULT} $(B)/ydb/library/yql/tools/dq/service_node/service_node |73.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/library/yql/tools/dq/service_node/service_node |73.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/datashard/dml/ydb-tests-datashard-dml |73.3%| [LD] {RESULT} $(B)/ydb/tests/datashard/dml/ydb-tests-datashard-dml |73.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/datashard/dml/ydb-tests-datashard-dml |73.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/compatibility/ydb-tests-compatibility |73.3%| [LD] {RESULT} $(B)/ydb/tests/compatibility/ydb-tests-compatibility |73.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/compatibility/ydb-tests-compatibility >> BlobDepot::LoadPutAndRead [GOOD] >> BlobDepot::DecommitPutAndRead |73.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/yql/tools/dqrun/dqrun |73.4%| [LD] {RESULT} $(B)/ydb/library/yql/tools/dqrun/dqrun |73.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/canonical/ydb-tests-functional-canonical |73.4%| [LD] {RESULT} $(B)/ydb/tests/functional/canonical/ydb-tests-functional-canonical |73.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/canonical/ydb-tests-functional-canonical |73.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/library/yql/tools/dqrun/dqrun ------- [LD] {default-linux-x86_64, release, asan} $(B)/yql/tools/yqlrun/yqlrun ld.lld: warning: version script assignment of 'global' to symbol '__after_morecore_hook' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'daylight' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'environ' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '_environ' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__free_hook' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__malloc_hook' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__malloc_initialize_hook' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__memalign_hook' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'program_invocation_name' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'program_invocation_short_name' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__realloc_hook' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'timezone' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'tzname' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__libc_start_main' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'AnnotateHappensAfter' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'AnnotateHappensBefore' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'AnnotateIgnoreWritesBegin' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'AnnotateIgnoreWritesEnd' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'AnnotateIgnoreReadsBegin' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'AnnotateIgnoreReadsEnd' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'abort' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'bind' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'close' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__close' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'closedir' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'connect' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'creat' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'creat64' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'dl_iterate_phdr' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'dup' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'dup2' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'dup3' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'epoll_create' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'epoll_create1' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'epoll_ctl' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'epoll_pwait' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'epoll_wait' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'eventfd' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'fork' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__fxstat' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__fxstat64' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'gettimeofday' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'inotify_init' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'inotify_init1' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'kill' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'listen' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'nanosleep' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'on_exit' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'open' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'open64' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pipe' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pipe2' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_barrier_destroy' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_barrier_init' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_barrier_wait' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_cond_broadcast' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_cond_destroy' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_cond_init' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_cond_signal' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_cond_timedwait' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_cond_wait' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_kill' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_mutex_destroy' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_mutex_init' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_mutex_lock' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_mutex_timedlock' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_mutex_trylock' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_mutex_unlock' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_once' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_rwlock_destroy' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_rwlock_init' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_rwlock_rdlock' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_rwlock_timedrdlock' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_rwlock_timedwrlock' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_rwlock_tryrdlock' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_rwlock_trywrlock' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_rwlock_unlock' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_rwlock_wrlock' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_spin_destroy' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_spin_init' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_spin_lock' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_spin_trylock' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_spin_unlock' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'raise' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__res_iclose' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'rmdir' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'setjmp' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '_setjmp' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'signalfd' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'sigsetjmp' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__sigsetjmp' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'sigsuspend' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'sleep' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'socket' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'socketpair' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'tmpfile' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'tmpfile64' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'unlink' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'usleep' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'bcopy' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'dladdr' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'dlerror' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'dl_iterate_phdr' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'epoll_pwait' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'epoll_wait' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'fcvt' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'fgets_unlocked' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'fork' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'forkpty' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'fread_unlocked' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__fxstat' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__fxstat64' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__fxstatat' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__fxstatat64' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'gcvt' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'getenv' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'gethostname' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'getrlimit' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'getrlimit64' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'getrusage' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'gettimeofday' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'mbrtowc' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'mbtowc' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'memccpy' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'mempcpy' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'openpty' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pipe' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pipe2' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'prlimit' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'prlimit64' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_key_create' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_mutex_lock' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_mutex_unlock' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'putenv' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'setenv' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'shmat' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'socketpair' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'stpcpy' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'strftime' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__strftime_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'strftime_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'strtod' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__strtod_internal' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__strtod_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'strtod_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'strtof' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__strtof_internal' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__strtof_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'strtof_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'strtold' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__strtold_internal' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__strtold_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'strtold_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__strtol_internal' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__strtol_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'strtol_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__strtoll_internal' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__strtoll_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'strtoll_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'strtoul' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__strtoul_internal' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'strtoull' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__strtoul_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'strtoul_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__strtoull_internal' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__strtoull_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'strtoull_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'swprintf' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'tzset' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'vswprintf' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'wcschr' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'wcscmp' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'wcscpy' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'wcsftime' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__wcsftime_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'wcsftime_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'wcstod' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__wcstod_internal' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__wcstod_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'wcstod_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'wcstof' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__wcstof_internal' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__wcstof_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'wcstof_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'wcstol' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'wcstold' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__wcstold_internal' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__wcstold_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'wcstold_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__wcstol_internal' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'wcstoll' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__wcstol_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'wcstol_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__wcstoll_internal' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__wcstoll_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'wcstoll_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'wcstoul' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__wcstoul_internal' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'wcstoull' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__wcstoul_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'wcstoul_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__wcstoull_internal' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__wcstoull_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'wcstoull_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'wmemcpy' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'wmemmove' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'wmempcpy' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'wmemset' failed: symbol not defined |73.5%| [LD] {RESULT} $(B)/yql/tools/yqlrun/yqlrun |73.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/yql/tools/yqlrun/yqlrun |73.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/fq/generic/analytics/ydb-tests-fq-generic-analytics |73.5%| [LD] {RESULT} $(B)/ydb/tests/fq/generic/analytics/ydb-tests-fq-generic-analytics |73.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/fq/generic/analytics/ydb-tests-fq-generic-analytics |73.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/yql/tools/dq/worker_node/worker_node |73.5%| [LD] {RESULT} $(B)/ydb/library/yql/tools/dq/worker_node/worker_node |73.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/library/yql/tools/dq/worker_node/worker_node |73.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/sqs/merge_split_common_table/fifo/functional-sqs-merge_split_common_table-fifo |73.5%| [LD] {RESULT} $(B)/ydb/tests/functional/sqs/merge_split_common_table/fifo/functional-sqs-merge_split_common_table-fifo |73.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/sqs/merge_split_common_table/fifo/functional-sqs-merge_split_common_table-fifo |73.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_osiris/ydb-core-blobstorage-ut_blobstorage-ut_osiris |73.6%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_osiris/ydb-core-blobstorage-ut_blobstorage-ut_osiris |73.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_osiris/ydb-core-blobstorage-ut_blobstorage-ut_osiris |73.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/olap/load/ydb-tests-olap-load |73.6%| [LD] {RESULT} $(B)/ydb/tests/olap/load/ydb-tests-olap-load |73.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/olap/load/ydb-tests-olap-load |73.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/ut_blobstorage/ydb-core-blobstorage-ut_blobstorage |73.7%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ydb-core-blobstorage-ut_blobstorage |73.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_blobstorage/ydb-core-blobstorage-ut_blobstorage >> test.py::test[solomon-BadDownsamplingAggregation-] >> BlobDepot::DecommitPutAndRead [GOOD] >> BlobDepot::DecommitVerifiedRandom |73.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/fq/yds/ydb-tests-fq-yds |73.7%| [LD] {RESULT} $(B)/ydb/tests/fq/yds/ydb-tests-fq-yds |73.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/fq/yds/ydb-tests-fq-yds |73.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_group_reconfiguration/ut_group_reconfiguration |73.7%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_group_reconfiguration/ut_group_reconfiguration |73.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_group_reconfiguration/ut_group_reconfiguration |73.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_replication/core-blobstorage-ut_blobstorage-ut_replication |73.7%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_replication/core-blobstorage-ut_blobstorage-ut_replication |73.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_replication/core-blobstorage-ut_blobstorage-ut_replication |73.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_scrub/ydb-core-blobstorage-ut_blobstorage-ut_scrub |73.7%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_scrub/ydb-core-blobstorage-ut_blobstorage-ut_scrub |73.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_scrub/ydb-core-blobstorage-ut_blobstorage-ut_scrub |73.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/sql/large/ydb-tests-sql-large |73.8%| [LD] {RESULT} $(B)/ydb/tests/sql/large/ydb-tests-sql-large |73.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/sql/large/ydb-tests-sql-large |73.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/datashard/vector_index/medium/ydb-tests-datashard-vector_index-medium |73.8%| [LD] {RESULT} $(B)/ydb/tests/datashard/vector_index/medium/ydb-tests-datashard-vector_index-medium |73.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/datashard/vector_index/medium/ydb-tests-datashard-vector_index-medium |73.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/ydb_cli/ydb-tests-functional-ydb_cli |73.8%| [LD] {RESULT} $(B)/ydb/tests/functional/ydb_cli/ydb-tests-functional-ydb_cli |73.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/ydb_cli/ydb-tests-functional-ydb_cli |73.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/olap/ydb-tests-olap |73.9%| [LD] {RESULT} $(B)/ydb/tests/olap/ydb-tests-olap |73.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/olap/ydb-tests-olap |73.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/fq/s3/ydb-tests-fq-s3 |73.9%| [LD] {RESULT} $(B)/ydb/tests/fq/s3/ydb-tests-fq-s3 |73.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/fq/s3/ydb-tests-fq-s3 |73.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/tools/combiner_perf/bin/combiner_perf |73.9%| [LD] {RESULT} $(B)/ydb/core/kqp/tools/combiner_perf/bin/combiner_perf |73.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/tools/combiner_perf/bin/combiner_perf |73.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_huge/ydb-core-blobstorage-ut_blobstorage-ut_huge |73.9%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_huge/ydb-core-blobstorage-ut_blobstorage-ut_huge |73.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_huge/ydb-core-blobstorage-ut_blobstorage-ut_huge |73.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/olap/docs/generator/generator |73.9%| [LD] {RESULT} $(B)/ydb/tests/olap/docs/generator/generator |73.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/olap/docs/generator/generator |73.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/apps/ydb/ut/ydb-apps-ydb-ut |73.9%| [LD] {RESULT} $(B)/ydb/apps/ydb/ut/ydb-apps-ydb-ut |73.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/apps/ydb/ut/ydb-apps-ydb-ut |73.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/tpc/large/ydb-tests-functional-tpc-large |74.0%| [LD] {RESULT} $(B)/ydb/tests/functional/tpc/large/ydb-tests-functional-tpc-large |74.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/tpc/large/ydb-tests-functional-tpc-large >> test.py::test[solomon-BadDownsamplingAggregation-] [GOOD] >> test.py::test[solomon-BadDownsamplingDisabled-] |74.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/sqs/multinode/ydb-tests-functional-sqs-multinode |74.0%| [LD] {RESULT} $(B)/ydb/tests/functional/sqs/multinode/ydb-tests-functional-sqs-multinode |74.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/sqs/multinode/ydb-tests-functional-sqs-multinode |74.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/functional/tenants/ydb-tests-functional-tenants |74.0%| [LD] {RESULT} $(B)/ydb/tests/functional/tenants/ydb-tests-functional-tenants |74.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/functional/tenants/ydb-tests-functional-tenants >> BlobDepot::DecommitVerifiedRandom [GOOD] |74.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/yql/providers/pq/provider/ut/ydb-library-yql-providers-pq-provider-ut |74.0%| [LD] {RESULT} $(B)/ydb/library/yql/providers/pq/provider/ut/ydb-library-yql-providers-pq-provider-ut |74.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/library/yql/providers/pq/provider/ut/ydb-library-yql-providers-pq-provider-ut |74.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/fq/generic/streaming/ydb-tests-fq-generic-streaming |74.0%| [LD] {RESULT} $(B)/ydb/tests/fq/generic/streaming/ydb-tests-fq-generic-streaming ------- [TS] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_blob_depot/unittest >> BlobDepot::DecommitVerifiedRandom [GOOD] Test command err: Mersenne random seed 3409645028 RandomSeed# 11640447752943630053 Mersenne random seed 698333460 Mersenne random seed 756016375 Mersenne random seed 1598982477 Mersenne random seed 743428749 2025-06-25T14:24:33.225315Z 1 00h00m25.012048s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) TEvVPut: failed to pass the Hull check; id# [15:1:1:0:1:100:1] status# {Status# BLOCKED} Marker# BSVS03 2025-06-25T14:24:33.225418Z 3 00h00m25.012048s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) TEvVPut: failed to pass the Hull check; id# [15:1:1:0:1:100:3] status# {Status# BLOCKED} Marker# BSVS03 2025-06-25T14:24:33.225455Z 4 00h00m25.012048s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) TEvVPut: failed to pass the Hull check; id# [15:1:1:0:1:100:3] status# {Status# BLOCKED} Marker# BSVS03 2025-06-25T14:24:33.225489Z 7 00h00m25.012048s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) TEvVPut: failed to pass the Hull check; id# [15:1:1:0:1:100:3] status# {Status# BLOCKED} Marker# BSVS03 2025-06-25T14:24:33.225524Z 8 00h00m25.012048s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) TEvVPut: failed to pass the Hull check; id# [15:1:1:0:1:100:3] status# {Status# BLOCKED} Marker# BSVS03 2025-06-25T14:24:33.225557Z 2 00h00m25.012048s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) TEvVPut: failed to pass the Hull check; id# [15:1:1:0:1:100:2] status# {Status# BLOCKED} Marker# BSVS03 2025-06-25T14:24:33.225596Z 6 00h00m25.012048s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) TEvVPut: failed to pass the Hull check; id# [15:1:1:0:1:100:1] status# {Status# BLOCKED} Marker# BSVS03 2025-06-25T14:24:33.225629Z 5 00h00m25.012048s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) TEvVPut: failed to pass the Hull check; id# [15:1:1:0:1:100:1] status# {Status# BLOCKED} Marker# BSVS03 2025-06-25T14:24:33.225804Z 1 00h00m25.012048s :BS_PROXY_PUT ERROR: [e3b3e64630f1f1bb] Result# TEvPutResult {Id# [15:1:1:0:1:100:0] Status# BLOCKED StatusFlags# { } ErrorReason# "Got VPutResult status# BLOCKED from VDiskId# [82000000:1:0:0:0]" ApproximateFreeSpaceShare# 0} GroupId# 2181038080 Marker# BPP12 2025-06-25T14:24:33.226532Z 1 00h00m25.012048s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) TEvVPut: failed to pass the Hull check; id# [15:3:1:0:1:100:2] status# {Status# BLOCKED} Marker# BSVS03 2025-06-25T14:24:33.226632Z 2 00h00m25.012048s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) TEvVPut: failed to pass the Hull check; id# [15:3:1:0:1:100:3] status# {Status# BLOCKED} Marker# BSVS03 2025-06-25T14:24:33.226665Z 3 00h00m25.012048s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) TEvVPut: failed to pass the Hull check; id# [15:3:1:0:1:100:3] status# {Status# BLOCKED} Marker# BSVS03 2025-06-25T14:24:33.226696Z 6 00h00m25.012048s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) TEvVPut: failed to pass the Hull check; id# [15:3:1:0:1:100:3] status# {Status# BLOCKED} Marker# BSVS03 2025-06-25T14:24:33.226727Z 7 00h00m25.012048s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) TEvVPut: failed to pass the Hull check; id# [15:3:1:0:1:100:3] status# {Status# BLOCKED} Marker# BSVS03 2025-06-25T14:24:33.226765Z 5 00h00m25.012048s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) TEvVPut: failed to pass the Hull check; id# [15:3:1:0:1:100:1] status# {Status# BLOCKED} Marker# BSVS03 2025-06-25T14:24:33.226795Z 8 00h00m25.012048s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) TEvVPut: failed to pass the Hull check; id# [15:3:1:0:1:100:1] status# {Status# BLOCKED} Marker# BSVS03 2025-06-25T14:24:33.226825Z 4 00h00m25.012048s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) TEvVPut: failed to pass the Hull check; id# [15:3:1:0:1:100:1] status# {Status# BLOCKED} Marker# BSVS03 2025-06-25T14:24:33.239599Z 1 00h00m25.012048s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) TEvVPut: failed to pass the Hull check; id# [16:2:2:0:2:100:3] status# {Status# BLOCKED} Marker# BSVS03 2025-06-25T14:24:33.239805Z 5 00h00m25.012048s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) TEvVPut: failed to pass the Hull check; id# [16:2:2:0:2:100:3] status# {Status# BLOCKED} Marker# BSVS03 2025-06-25T14:24:33.239862Z 6 00h00m25.012048s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) TEvVPut: failed to pass the Hull check; id# [16:2:2:0:2:100:3] status# {Status# BLOCKED} Marker# BSVS03 2025-06-25T14:24:33.239921Z 4 00h00m25.012048s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) TEvVPut: failed to pass the Hull check; id# [16:2:2:0:2:100:2] status# {Status# BLOCKED} Marker# BSVS03 2025-06-25T14:24:33.239975Z 3 00h00m25.012048s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) TEvVPut: failed to pass the Hull check; id# [16:2:2:0:2:100:1] status# {Status# BLOCKED} Marker# BSVS03 2025-06-25T14:24:33.240026Z 8 00h00m25.012048s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) TEvVPut: failed to pass the Hull check; id# [16:2:2:0:2:100:1] status# {Status# BLOCKED} Marker# BSVS03 2025-06-25T14:24:33.240076Z 7 00h00m25.012048s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) TEvVPut: failed to pass the Hull check; id# [16:2:2:0:2:100:1] status# {Status# BLOCKED} Marker# BSVS03 2025-06-25T14:24:33.240125Z 2 00h00m25.012048s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) TEvVPut: failed to pass the Hull check; id# [16:2:2:0:2:100:3] status# {Status# BLOCKED} Marker# BSVS03 2025-06-25T14:24:33.240359Z 1 00h00m25.012048s :BS_PROXY_PUT ERROR: [60de5c2f353c5d88] Result# TEvPutResult {Id# [16:2:2:0:2:100:0] Status# BLOCKED StatusFlags# { } ErrorReason# "Got VPutResult status# BLOCKED from VDiskId# [82000000:1:0:0:0]" ApproximateFreeSpaceShare# 0} GroupId# 2181038080 Marker# BPP12 Mersenne random seed 523663981 Read over the barrier, blob id# [15:1:1:0:1:100:0] Read over the barrier, blob id# [15:1:2:0:1:100:0] 2025-06-25T14:24:34.300157Z 1 00h00m25.012048s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [15 0 2 1 soft] barrier# 1:2 new key# [15 0 2 2 soft] barrier# 1:1 2025-06-25T14:24:34.300376Z 2 00h00m25.012048s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [15 0 2 1 soft] barrier# 1:2 new key# [15 0 2 2 soft] barrier# 1:1 2025-06-25T14:24:34.300428Z 3 00h00m25.012048s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [15 0 2 1 soft] barrier# 1:2 new key# [15 0 2 2 soft] barrier# 1:1 2025-06-25T14:24:34.300475Z 4 00h00m25.012048s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [15 0 2 1 soft] barrier# 1:2 new key# [15 0 2 2 soft] barrier# 1:1 2025-06-25T14:24:34.300524Z 5 00h00m25.012048s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [15 0 2 1 soft] barrier# 1:2 new key# [15 0 2 2 soft] barrier# 1:1 2025-06-25T14:24:34.300571Z 6 00h00m25.012048s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [15 0 2 1 soft] barrier# 1:2 new key# [15 0 2 2 soft] barrier# 1:1 2025-06-25T14:24:34.300618Z 7 00h00m25.012048s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [15 0 2 1 soft] barrier# 1:2 new key# [15 0 2 2 soft] barrier# 1:1 2025-06-25T14:24:34.300669Z 8 00h00m25.012048s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [15 0 2 1 soft] barrier# 1:2 new key# [15 0 2 2 soft] barrier# 1:1 Put over the barrier, blob id# [15:1:1:0:99:100:0] Put over the barrier, blob id# [15:1:3:0:99:100:0] 2025-06-25T14:24:34.318122Z 1 00h00m25.012048s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [15 0 2 3 hard] barrier# 1:3 new key# [15 0 2 4 hard] barrier# 1:1 2025-06-25T14:24:34.318315Z 2 00h00m25.012048s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [15 0 2 3 hard] barrier# 1:3 new key# [15 0 2 4 hard] barrier# 1:1 2025-06-25T14:24:34.318365Z 3 00h00m25.012048s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [15 0 2 3 hard] barrier# 1:3 new key# [15 0 2 4 hard] barrier# 1:1 2025-06-25T14:24:34.318411Z 4 00h00m25.012048s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [15 0 2 3 hard] barrier# 1:3 new key# [15 0 2 4 hard] barrier# 1:1 2025-06-25T14:24:34.318456Z 5 00h00m25.012048s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [15 0 2 3 hard] barrier# 1:3 new key# [15 0 2 4 hard] barrier# 1:1 2025-06-25T14:24:34.318500Z 6 00h00m25.012048s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [15 0 2 3 hard] barrier# 1:3 new key# [15 0 2 4 hard] barrier# 1:1 2025-06-25T14:24:34.318546Z 7 00h00m25.012048s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [15 0 2 3 hard] barrier# 1:3 new key# [15 0 2 4 hard] barrier# 1:1 2025-06-25T14:24:34.318594Z 8 00h00m25.012048s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [15 0 2 3 hard] barrier# 1:3 new key# [15 0 2 4 hard] barrier# 1:1 Read over the barrier, blob id# [15:1:5:0:1:100:0] Read over the barrier, blob id# [15:1:6:0:1:100:0] Read over the barrier, blob id# [15:1:19:0:1:100:0] Read over the barrier, blob id# [15:2:1:0:1:100:0] Read over the barrier, blob id# [15:2:2:0:1:100:0] TEvRange returned collected blob with id# [15:1:17:0:1:100:0] TEvRange returned collected blob with id# [15:1:19:0:1:100:0] TEvRange returned collected blob with id# [15:2:1:0:1:100:0] TEvRange returned collected blob with id# [15:2:2:0:1:100:0] TEvRange returned collected blob with id# [15:2:3:0:1:100:0] TEvRange returned collected blob with id# [15:2:4:0:1:100:0] TEvRange returned collected blob with id# [15:2:5:0:1:100:0] TEvRange returned collected blob with id# [15:2:6:0:1:100:0] Read over the barrier, blob id# [100:1:3:0:1:100:0] Read over the barrier, blob id# [100:1:5:0:1:100:0] Read over the barrier, blob id# [100:1:6:0:1:100:0] Read over the barrier, blob id# [100:2:1:0:1:100:0] Read over the barrier, blob id# [100:2:2:0:1:100:0] TEvRange returned collected blob with id# [100:2:2:0:1:100:0] TEvRange returned collected blob with id# [100:2:3:0:1:100:0] TEvRange returned collected blob with id# [100:2:4:0:1:100:0] TEvRange returned collected blob with id# [100:2:5:0:1:100:0] TEvRange returned collected blob with id# [100:2:6:0:1:100:0] Mersenne random seed 2530616200 Read over the barrier, blob id# [102:1:1:1:5206391:824:0] TEvRange returned collected blob with id# [102:1:1:1:5206391:824:0] Read over the barrier, blob id# [102:1:1:1:5206391:824:0] Read over the barrier, blob id# [102:1:1:1:5206391:824:0] Read over the barrier, blob id# [100:1:1: ... 5.013072s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Db# Barriers ValidateGCCmd: incorrect collect cmd: tabletID# 16 key# [16 0 14 0 hard] existing barrier# 1:1 new barrier# 0:1 2025-06-25T14:24:57.473939Z 2 00h00m25.013072s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Db# Barriers ValidateGCCmd: incorrect collect cmd: tabletID# 16 key# [16 0 14 0 hard] existing barrier# 1:1 new barrier# 0:1 2025-06-25T14:24:57.474021Z 3 00h00m25.013072s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Db# Barriers ValidateGCCmd: incorrect collect cmd: tabletID# 16 key# [16 0 14 0 hard] existing barrier# 1:1 new barrier# 0:1 2025-06-25T14:24:57.474083Z 4 00h00m25.013072s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) Db# Barriers ValidateGCCmd: incorrect collect cmd: tabletID# 16 key# [16 0 14 0 hard] existing barrier# 1:1 new barrier# 0:1 2025-06-25T14:24:57.474143Z 5 00h00m25.013072s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) Db# Barriers ValidateGCCmd: incorrect collect cmd: tabletID# 16 key# [16 0 14 0 hard] existing barrier# 1:1 new barrier# 0:1 2025-06-25T14:24:57.474226Z 7 00h00m25.013072s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) Db# Barriers ValidateGCCmd: incorrect collect cmd: tabletID# 16 key# [16 0 14 0 hard] existing barrier# 1:1 new barrier# 0:1 2025-06-25T14:24:57.474301Z 8 00h00m25.013072s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Db# Barriers ValidateGCCmd: incorrect collect cmd: tabletID# 16 key# [16 0 14 0 hard] existing barrier# 1:1 new barrier# 0:1 TEvRange returned collected blob with id# [15:1:1:1:2694683:352:0] TEvRange returned collected blob with id# [15:1:1:1:2694683:352:0] Read over the barrier, blob id# [15:1:2:0:12459178:592:0] TEvRange returned collected blob with id# [17:1:1:1:9464913:493:0] TEvRange returned collected blob with id# [17:1:1:1:14381361:99:0] TEvRange returned collected blob with id# [15:1:1:1:2694683:352:0] TEvRange returned collected blob with id# [15:1:1:2:3551935:660:0] TEvRange returned collected blob with id# [17:1:1:1:9464913:493:0] TEvRange returned collected blob with id# [17:1:1:1:14381361:99:0] TEvRange returned collected blob with id# [17:4:4:2:445612:390:0] Read over the barrier, blob id# [15:1:1:1:2694683:352:0] Read over the barrier, blob id# [15:1:2:0:12459178:592:0] Read over the barrier, blob id# [15:1:1:1:2694683:352:0] Read over the barrier, blob id# [15:1:1:2:3551935:660:0] TEvRange returned collected blob with id# [15:1:1:2:3551935:660:0] TEvRange returned collected blob with id# [15:1:1:1:2694683:352:0] Read over the barrier, blob id# [17:1:1:1:9464913:493:0] Read over the barrier, blob id# [17:4:4:2:445612:390:0] Read over the barrier, blob id# [17:4:4:2:445612:390:0] 2025-06-25T14:24:57.816599Z 4 00h00m25.013072s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [17 1 8 1 soft] barrier# 1:0 new key# [17 1 23 2 soft] barrier# 0:0 2025-06-25T14:24:57.817185Z 1 00h00m25.013072s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [17 1 8 1 soft] barrier# 1:0 new key# [17 1 23 2 soft] barrier# 0:0 2025-06-25T14:24:57.817421Z 2 00h00m25.013072s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [17 1 8 1 soft] barrier# 1:0 new key# [17 1 23 2 soft] barrier# 0:0 2025-06-25T14:24:57.817506Z 3 00h00m25.013072s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [17 1 8 1 soft] barrier# 1:0 new key# [17 1 23 2 soft] barrier# 0:0 2025-06-25T14:24:57.817783Z 5 00h00m25.013072s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [17 1 8 1 soft] barrier# 1:0 new key# [17 1 23 2 soft] barrier# 0:0 2025-06-25T14:24:57.817967Z 6 00h00m25.013072s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [17 1 8 1 soft] barrier# 1:0 new key# [17 1 23 2 soft] barrier# 0:0 2025-06-25T14:24:57.818057Z 7 00h00m25.013072s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [17 1 8 1 soft] barrier# 1:0 new key# [17 1 23 2 soft] barrier# 0:0 2025-06-25T14:24:57.818262Z 8 00h00m25.013072s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [17 1 8 1 soft] barrier# 1:0 new key# [17 1 23 2 soft] barrier# 0:0 Read over the barrier, blob id# [17:4:4:2:445612:390:0] Read over the barrier, blob id# [15:1:1:2:3551935:660:0] Read over the barrier, blob id# [15:1:1:1:2694683:352:0] 2025-06-25T14:24:58.140085Z 3 00h00m25.013072s :BS_HULLRECS ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Db# Barriers ValidateGCCmd: out-of-order requests: existing key# [16 2 20 1 soft] new key# [16 2 20 0 soft] new barrier# 3:0 2025-06-25T14:24:58.141334Z 1 00h00m25.013072s :BS_HULLRECS ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Db# Barriers ValidateGCCmd: out-of-order requests: existing key# [16 2 20 1 soft] new key# [16 2 20 0 soft] new barrier# 3:0 2025-06-25T14:24:58.141420Z 2 00h00m25.013072s :BS_HULLRECS ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Db# Barriers ValidateGCCmd: out-of-order requests: existing key# [16 2 20 1 soft] new key# [16 2 20 0 soft] new barrier# 3:0 2025-06-25T14:24:58.141569Z 4 00h00m25.013072s :BS_HULLRECS ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) Db# Barriers ValidateGCCmd: out-of-order requests: existing key# [16 2 20 1 soft] new key# [16 2 20 0 soft] new barrier# 3:0 2025-06-25T14:24:58.141794Z 5 00h00m25.013072s :BS_HULLRECS ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) Db# Barriers ValidateGCCmd: out-of-order requests: existing key# [16 2 20 1 soft] new key# [16 2 20 0 soft] new barrier# 3:0 2025-06-25T14:24:58.141918Z 6 00h00m25.013072s :BS_HULLRECS ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) Db# Barriers ValidateGCCmd: out-of-order requests: existing key# [16 2 20 1 soft] new key# [16 2 20 0 soft] new barrier# 3:0 2025-06-25T14:24:58.141992Z 7 00h00m25.013072s :BS_HULLRECS ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) Db# Barriers ValidateGCCmd: out-of-order requests: existing key# [16 2 20 1 soft] new key# [16 2 20 0 soft] new barrier# 3:0 2025-06-25T14:24:58.142149Z 8 00h00m25.013072s :BS_HULLRECS ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Db# Barriers ValidateGCCmd: out-of-order requests: existing key# [16 2 20 1 soft] new key# [16 2 20 0 soft] new barrier# 3:0 Read over the barrier, blob id# [15:1:1:1:2694683:352:0] Read over the barrier, blob id# [15:1:2:0:12459178:592:0] TEvRange returned collected blob with id# [17:4:4:2:445612:390:0] Read over the barrier, blob id# [16:4:11:0:11863428:71:0] Read over the barrier, blob id# [15:1:2:0:12459178:592:0] Read over the barrier, blob id# [15:1:2:0:12459178:592:0] Read over the barrier, blob id# [15:1:1:2:3551935:660:0] Read over the barrier, blob id# [15:1:1:2:3551935:660:0] Read over the barrier, blob id# [15:1:2:0:12459178:592:0] Read over the barrier, blob id# [17:4:4:2:445612:390:0] Read over the barrier, blob id# [15:1:1:1:2694683:352:0] Read over the barrier, blob id# [15:1:1:1:2694683:352:0] Read over the barrier, blob id# [16:3:10:0:10751410:791:0] Read over the barrier, blob id# [16:5:14:0:10616902:427:0] Read over the barrier, blob id# [16:4:11:0:11863428:71:0] 2025-06-25T14:24:58.891032Z 6 00h00m25.013072s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [17 1 27 2 soft] barrier# 1:2 new key# [17 1 30 5 soft] barrier# 0:2 2025-06-25T14:24:58.891608Z 1 00h00m25.013072s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [17 1 27 2 soft] barrier# 1:2 new key# [17 1 30 5 soft] barrier# 0:2 2025-06-25T14:24:58.891921Z 2 00h00m25.013072s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [17 1 27 2 soft] barrier# 1:2 new key# [17 1 30 5 soft] barrier# 0:2 2025-06-25T14:24:58.892112Z 3 00h00m25.013072s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [17 1 27 2 soft] barrier# 1:2 new key# [17 1 30 5 soft] barrier# 0:2 2025-06-25T14:24:58.892544Z 4 00h00m25.013072s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [17 1 27 2 soft] barrier# 1:2 new key# [17 1 30 5 soft] barrier# 0:2 2025-06-25T14:24:58.892627Z 5 00h00m25.013072s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [17 1 27 2 soft] barrier# 1:2 new key# [17 1 30 5 soft] barrier# 0:2 2025-06-25T14:24:58.892835Z 7 00h00m25.013072s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [17 1 27 2 soft] barrier# 1:2 new key# [17 1 30 5 soft] barrier# 0:2 2025-06-25T14:24:58.893148Z 8 00h00m25.013072s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [17 1 27 2 soft] barrier# 1:2 new key# [17 1 30 5 soft] barrier# 0:2 2025-06-25T14:24:59.026525Z 1 00h00m25.013072s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [17 1 27 2 soft] barrier# 1:2 new key# [17 1 32 1 soft] barrier# 1:1 2025-06-25T14:24:59.027181Z 2 00h00m25.013072s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [17 1 27 2 soft] barrier# 1:2 new key# [17 1 32 1 soft] barrier# 1:1 2025-06-25T14:24:59.027274Z 3 00h00m25.013072s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [17 1 27 2 soft] barrier# 1:2 new key# [17 1 32 1 soft] barrier# 1:1 2025-06-25T14:24:59.027358Z 4 00h00m25.013072s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [17 1 27 2 soft] barrier# 1:2 new key# [17 1 32 1 soft] barrier# 1:1 2025-06-25T14:24:59.027442Z 5 00h00m25.013072s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [17 1 27 2 soft] barrier# 1:2 new key# [17 1 32 1 soft] barrier# 1:1 2025-06-25T14:24:59.027552Z 6 00h00m25.013072s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [17 1 27 2 soft] barrier# 1:2 new key# [17 1 32 1 soft] barrier# 1:1 2025-06-25T14:24:59.031383Z 7 00h00m25.013072s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [17 1 27 2 soft] barrier# 1:2 new key# [17 1 32 1 soft] barrier# 1:1 2025-06-25T14:24:59.031476Z 8 00h00m25.013072s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Db# Barriers ValidateGCCmd: decreasing barrier: existing key# [17 1 27 2 soft] barrier# 1:2 new key# [17 1 32 1 soft] barrier# 1:1 Read over the barrier, blob id# [16:4:11:0:11863428:71:0] Read over the barrier, blob id# [17:4:4:2:445612:390:0] TEvRange returned collected blob with id# [15:1:1:1:2694683:352:0] |74.0%| [TS] {RESULT} ydb/core/blobstorage/ut_blobstorage/ut_blob_depot/unittest |74.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/fq/generic/streaming/ydb-tests-fq-generic-streaming >> BSCRestartPDisk::RestartOneByOne [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_restart_pdisk/unittest >> BSCRestartPDisk::RestartOneByOne [GOOD] Test command err: RandomSeed# 536572006144606791 >> BSCRestartPDisk::RestartOneByOneWithReconnects [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_restart_pdisk/unittest >> BSCRestartPDisk::RestartOneByOneWithReconnects [GOOD] Test command err: RandomSeed# 9873346823583404222 |74.0%| [CC] {default-linux-x86_64, release, asan} $(S)/ydb/core/kqp/executer_actor/kqp_tasks_graph.cpp |74.0%| [TA] $(B)/ydb/core/blobstorage/ut_blobstorage/ut_restart_pdisk/test-results/unittest/{meta.json ... results_accumulator.log} >> test.py::test[solomon-BadDownsamplingDisabled-] [GOOD] >> test.py::test[solomon-BadDownsamplingFill-] |74.0%| [TA] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_restart_pdisk/test-results/unittest/{meta.json ... results_accumulator.log} |74.1%| [AR] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/executer_actor/libcore-kqp-executer_actor.a |74.1%| [AR] {RESULT} $(B)/ydb/core/kqp/executer_actor/libcore-kqp-executer_actor.a |74.1%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/kqp/executer_actor/kqp_tasks_graph.cpp |74.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/apps/etcd_proxy/etcd_proxy |74.1%| [LD] {RESULT} $(B)/ydb/apps/etcd_proxy/etcd_proxy |74.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/apps/etcd_proxy/etcd_proxy |74.1%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/kqp/executer_actor/libcore-kqp-executer_actor.a |74.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_blob_depot_fat/blobstorage-ut_blobstorage-ut_blob_depot_fat |74.1%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_blob_depot_fat/blobstorage-ut_blobstorage-ut_blob_depot_fat |74.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_blob_depot_fat/blobstorage-ut_blobstorage-ut_blob_depot_fat >> test.py::test[solomon-BadDownsamplingFill-] [GOOD] >> test.py::test[solomon-BadDownsamplingInterval-] >> test.py::test[solomon-BadDownsamplingInterval-] [GOOD] >> test.py::test[solomon-Basic-default.txt] >> test.py::test[solomon-Basic-default.txt] [GOOD] >> test.py::test[solomon-BasicExtractMembers-default.txt] >> BSCReadOnlyPDisk::ReadOnlyOneByOne [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_read_only_pdisk/unittest >> BSCReadOnlyPDisk::ReadOnlyOneByOne [GOOD] Test command err: RandomSeed# 3547142228693051062 |74.1%| [TA] $(B)/ydb/core/blobstorage/ut_blobstorage/ut_read_only_pdisk/test-results/unittest/{meta.json ... results_accumulator.log} >> test.py::test[solomon-BasicExtractMembers-default.txt] [GOOD] >> test.py::test[solomon-Downsampling-default.txt] |74.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/grpc_services/ut/ydb-core-grpc_services-ut |74.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/grpc_services/ut/ydb-core-grpc_services-ut |74.1%| [TA] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_read_only_pdisk/test-results/unittest/{meta.json ... results_accumulator.log} |74.1%| [LD] {RESULT} $(B)/ydb/core/grpc_services/ut/ydb-core-grpc_services-ut |74.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/services/dynamic_config/ut/ydb-services-dynamic_config-ut |74.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/services/dynamic_config/ut/ydb-services-dynamic_config-ut |74.1%| [LD] {RESULT} $(B)/ydb/services/dynamic_config/ut/ydb-services-dynamic_config-ut |74.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/replication/service/ut_json_change_record/tx-replication-service-ut_json_change_record |74.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/replication/service/ut_json_change_record/tx-replication-service-ut_json_change_record |74.1%| [LD] {RESULT} $(B)/ydb/core/tx/replication/service/ut_json_change_record/tx-replication-service-ut_json_change_record |74.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/fq/libs/control_plane_proxy/ut/ydb-core-fq-libs-control_plane_proxy-ut |74.1%| [LD] {RESULT} $(B)/ydb/core/fq/libs/control_plane_proxy/ut/ydb-core-fq-libs-control_plane_proxy-ut |74.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/fq/libs/control_plane_proxy/ut/ydb-core-fq-libs-control_plane_proxy-ut |74.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/grpc_services/ut/unittest |74.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/grpc_services/ut/unittest |74.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/grpc_services/ut/unittest >> SplitPathTests::WithDatabaseShouldSuccess [GOOD] |74.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/grpc_services/ut/unittest |74.2%| [TM] {asan, default-linux-x86_64, release} ydb/services/dynamic_config/ut/unittest |74.2%| [TM] {asan, default-linux-x86_64, release} ydb/services/dynamic_config/ut/unittest >> OperationMapping::IndexBuildCanceled |74.2%| [TM] {asan, default-linux-x86_64, release} ydb/services/dynamic_config/ut/unittest |74.2%| [TM] {asan, default-linux-x86_64, release} ydb/services/dynamic_config/ut/unittest >> OperationMapping::IndexBuildCanceled [GOOD] >> OperationMapping::IndexBuildRejected [GOOD] >> SplitPathTests::WithoutDatabaseShouldSuccess [GOOD] |74.2%| [TM] {asan, default-linux-x86_64, release} ydb/services/dynamic_config/ut/unittest |74.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/grpc_services/ut/unittest >> SplitPathTests::WithDatabaseShouldSuccess [GOOD] |74.2%| [TM] {asan, default-linux-x86_64, release} ydb/services/dynamic_config/ut/unittest >> OperationMapping::IndexBuildSuccess |74.2%| [TS] {asan, default-linux-x86_64, release} ydb/core/tx/replication/service/ut_json_change_record/unittest >> OperationMapping::IndexBuildSuccess [GOOD] >> SplitPathTests::WithDatabaseShouldFail [GOOD] |74.2%| [TS] {asan, default-linux-x86_64, release} ydb/core/tx/replication/service/ut_json_change_record/unittest |74.2%| [TM] {asan, default-linux-x86_64, release} ydb/services/dynamic_config/ut/unittest |74.2%| [TS] {asan, default-linux-x86_64, release} ydb/core/tx/replication/service/ut_json_change_record/unittest >> TGRpcConsoleTest::SimpleConfigTest [GOOD] |74.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/grpc_services/ut/unittest >> OperationMapping::IndexBuildCanceled [GOOD] |74.2%| [TS] {asan, default-linux-x86_64, release} ydb/core/tx/replication/service/ut_json_change_record/unittest |74.2%| [TM] {asan, default-linux-x86_64, release} ydb/services/dynamic_config/ut/unittest |74.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/grpc_services/ut/unittest >> SplitPathTests::WithoutDatabaseShouldSuccess [GOOD] |74.2%| [TS] {asan, default-linux-x86_64, release} ydb/core/tx/replication/service/ut_json_change_record/unittest |74.2%| [TM] {asan, default-linux-x86_64, release} ydb/services/dynamic_config/ut/unittest |74.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/grpc_services/ut/unittest >> OperationMapping::IndexBuildRejected [GOOD] |74.2%| [TS] {asan, default-linux-x86_64, release} ydb/core/tx/replication/service/ut_json_change_record/unittest |74.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/grpc_services/ut/unittest >> OperationMapping::IndexBuildSuccess [GOOD] |74.2%| [TS] {asan, default-linux-x86_64, release} ydb/core/tx/replication/service/ut_json_change_record/unittest |74.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/grpc_services/ut/unittest >> SplitPathTests::WithDatabaseShouldFail [GOOD] |74.3%| [TM] {asan, default-linux-x86_64, release} ydb/services/dynamic_config/ut/unittest >> TGRpcConsoleTest::SimpleConfigTest [GOOD] >> JsonChangeRecord::DataChangeVersion [GOOD] |74.3%| [TA] $(B)/ydb/core/grpc_services/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> JsonChangeRecord::DataChange [GOOD] |74.3%| [TA] $(B)/ydb/services/dynamic_config/ut/test-results/unittest/{meta.json ... results_accumulator.log} |74.3%| [TS] {asan, default-linux-x86_64, release} ydb/core/tx/replication/service/ut_json_change_record/unittest >> JsonChangeRecord::DataChangeVersion [GOOD] |74.3%| [TS] {asan, default-linux-x86_64, release} ydb/core/tx/replication/service/ut_json_change_record/unittest >> JsonChangeRecord::DataChange [GOOD] >> JsonChangeRecord::Heartbeat [GOOD] >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendCreateQuery |74.3%| [TS] {asan, default-linux-x86_64, release} ydb/core/tx/replication/service/ut_json_change_record/unittest >> JsonChangeRecord::Heartbeat [GOOD] |74.3%| [TA] $(B)/ydb/core/tx/replication/service/ut_json_change_record/test-results/unittest/{meta.json ... results_accumulator.log} >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendCreateQuery [GOOD] >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendListQueries >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendListQueries [GOOD] >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendDescribeQuery >> test.py::test[solomon-Downsampling-default.txt] [GOOD] >> test.py::test[solomon-DownsamplingValidSettings-default.txt] |74.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_user_attributes_reboots/core-tx-schemeshard-ut_user_attributes_reboots |74.3%| [TA] {RESULT} $(B)/ydb/core/grpc_services/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendDescribeQuery [GOOD] >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendGetQueryStatus |74.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_user_attributes_reboots/core-tx-schemeshard-ut_user_attributes_reboots |74.3%| [TA] {RESULT} $(B)/ydb/services/dynamic_config/ut/test-results/unittest/{meta.json ... results_accumulator.log} |74.3%| [TA] {RESULT} $(B)/ydb/core/tx/replication/service/ut_json_change_record/test-results/unittest/{meta.json ... results_accumulator.log} |74.3%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_user_attributes_reboots/core-tx-schemeshard-ut_user_attributes_reboots >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendGetQueryStatus [GOOD] >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendModifyQuery |74.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_sequence/ydb-core-tx-datashard-ut_sequence >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendModifyQuery [GOOD] >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendDeleteQuery |74.3%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_sequence/ydb-core-tx-datashard-ut_sequence |74.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_sequence/ydb-core-tx-datashard-ut_sequence |74.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/services/rate_limiter/ut/ydb-services-rate_limiter-ut |74.3%| [LD] {RESULT} $(B)/ydb/services/rate_limiter/ut/ydb-services-rate_limiter-ut |74.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/services/rate_limiter/ut/ydb-services-rate_limiter-ut |74.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_data_cleanup/ydb-core-tx-datashard-ut_data_cleanup |74.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_data_cleanup/ydb-core-tx-datashard-ut_data_cleanup |74.3%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_data_cleanup/ydb-core-tx-datashard-ut_data_cleanup >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendDeleteQuery [GOOD] >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendControlQuery >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendControlQuery [GOOD] >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendGetResultData >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendGetResultData [GOOD] >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendListJobs >> TSequence::CreateTableWithDefaultFromSequence >> TGRpcRateLimiterTest::CreateResource >> DataCleanup::ForceDataCleanup >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendListJobs [GOOD] >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendDescribeJob >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendDescribeJob [GOOD] >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendCreateConnection >> test.py::test[solomon-DownsamplingValidSettings-default.txt] [GOOD] >> test.py::test[solomon-HistResponse-default.txt] >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendCreateConnection [GOOD] >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendCreateConnectionWithServiceAccount |74.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/tiering/ut/ydb-core-tx-tiering-ut |74.3%| [LD] {RESULT} $(B)/ydb/core/tx/tiering/ut/ydb-core-tx-tiering-ut |74.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/tiering/ut/ydb-core-tx-tiering-ut >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendCreateConnectionWithServiceAccount [GOOD] >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendListConnections |74.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/apps/ydbd/ydbd |74.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/apps/ydbd/ydbd |74.3%| [LD] {RESULT} $(B)/ydb/apps/ydbd/ydbd |74.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_rs/ydb-core-tx-datashard-ut_rs |74.3%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_rs/ydb-core-tx-datashard-ut_rs |74.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_rs/ydb-core-tx-datashard-ut_rs >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendListConnections [GOOD] >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendDescribeConnection |74.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/replication/controller/ut_stream_creator/tx-replication-controller-ut_stream_creator |74.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/replication/controller/ut_stream_creator/tx-replication-controller-ut_stream_creator |74.4%| [LD] {RESULT} $(B)/ydb/core/tx/replication/controller/ut_stream_creator/tx-replication-controller-ut_stream_creator >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendDescribeConnection [GOOD] >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendModifyConnection |74.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |74.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |74.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |74.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |74.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest >> S3SettingsConversion::FoldersStyleDeduction [GOOD] >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendModifyConnection [GOOD] >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendModifyConnectionWithServiceAccount >> ColumnShardTiers::DSConfigs |74.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |74.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |74.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |74.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |74.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |74.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |74.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |74.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |74.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest >> S3SettingsConversion::Basic [GOOD] >> S3SettingsConversion::Port [GOOD] |74.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |74.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |74.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |74.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest >> ColumnShardTiers::TTLUsage |74.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest >> TGRpcRateLimiterTest::CreateResource [GOOD] >> TGRpcRateLimiterTest::UpdateResource |74.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest >> S3SettingsConversion::FoldersStyleDeduction [GOOD] |74.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |74.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendModifyConnectionWithServiceAccount [GOOD] >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendDeleteConnection >> ColumnShardTiers::DSConfigsStub >> ColumnShardTiers::DSConfigsWithQueryServiceDdl |74.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |74.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |74.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |74.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |74.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |74.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |74.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |74.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |74.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest >> S3SettingsConversion::Port [GOOD] |74.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest >> S3SettingsConversion::Basic [GOOD] |74.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |74.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |74.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |74.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |74.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |74.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest >> S3SettingsConversion::StyleDeduction [GOOD] >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendDeleteConnection [GOOD] >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendTestConnection >> S3SettingsConversion::FoldersStrictStyle [GOOD] |74.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest >> S3SettingsConversion::StyleDeduction [GOOD] |74.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |74.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |74.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest >> S3SettingsConversion::FoldersStrictStyle [GOOD] >> DataCleanup::ForceDataCleanup [GOOD] >> DataCleanup::ForceDataCleanupWithoutCompaction |74.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/controller/ut_stream_creator/unittest |74.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |74.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/controller/ut_stream_creator/unittest >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendTestConnection [GOOD] >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendTestConnectionWithServiceAccount >> TSequence::CreateTableWithDefaultFromSequence [GOOD] >> TSequence::SequencesIndex |74.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/controller/ut_stream_creator/unittest |74.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/controller/ut_stream_creator/unittest |74.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |74.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/controller/ut_stream_creator/unittest |74.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/controller/ut_stream_creator/unittest |74.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/load_test/ut/ydb-core-load_test-ut |74.6%| [LD] {RESULT} $(B)/ydb/core/load_test/ut/ydb-core-load_test-ut |74.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/load_test/ut/ydb-core-load_test-ut |74.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/controller/ut_stream_creator/unittest |74.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/controller/ut_stream_creator/unittest >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendTestConnectionWithServiceAccount [GOOD] >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendCreateBinding >> StreamCreator::WithResolvedTimestamps >> StreamCreator::Basic >> test.py::test[solomon-HistResponse-default.txt] [GOOD] >> test.py::test[solomon-InvalidProject-] >> test_update_script_tables.py::TestUpdateScriptTablesYdb::test_recreate_tables[ALTER TABLE {} DROP COLUMN syntax, DROP COLUMN ast, DROP COLUMN stats-`.metadata/script_executions`] >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendCreateBinding [GOOD] >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendListBindings >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_1__ASYNC-pk_types10-all_types10-index10---ASYNC] >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendListBindings [GOOD] >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendDescribeBinding >> test_ttl.py::TestTTL::test_ttl[table_DyNumber_1_UNIQUE_SYNC-pk_types17-all_types17-index17-DyNumber-UNIQUE-SYNC] >> DataCleanup::ForceDataCleanupWithoutCompaction [GOOD] >> DataCleanup::MultipleDataCleanups |74.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/mind/hive/ut/ydb-core-mind-hive-ut |74.6%| [LD] {RESULT} $(B)/ydb/core/mind/hive/ut/ydb-core-mind-hive-ut |74.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/mind/hive/ut/ydb-core-mind-hive-ut >> test_select.py::TestDML::test_select[table_index_4__SYNC-pk_types5-all_types5-index5---SYNC] >> test_ttl.py::TestTTL::test_ttl[table_DyNumber_0__SYNC-pk_types12-all_types12-index12-DyNumber--SYNC] >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendDescribeBinding [GOOD] >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendModifyBinding >> TSequence::SequencesIndex [GOOD] >> TSequence::CreateTableWithDefaultFromSequenceFromSelect >> data_migration_when_alter_ttl.py::TestDataMigrationWhenAlterTtl::test |74.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/olap/ttl_tiering/py3test >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendModifyBinding [GOOD] >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendDeleteBinding >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_validates_message_body[tables_format_v0] >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_send_message_rate[tables_format_v1] >> TGRpcRateLimiterTest::UpdateResource [GOOD] >> TGRpcRateLimiterTest::DropResource >> TControlPlaneProxyCheckNegativePermissionsFailed::ShouldSendDeleteBinding [GOOD] >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendCreateQuery >> DataCleanup::MultipleDataCleanups [GOOD] >> DataCleanup::MultipleDataCleanupsWithOldGenerations |74.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_visibility_timeout_works[tables_format_v1] >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendCreateQuery [GOOD] >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendListQueries >> test_fifo_messaging.py::TestSqsFifoMicroBatchesWithPath::test_micro_batch_read[tables_format_v0] |74.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/client/server/ut/ydb-core-client-server-ut |74.6%| [LD] {RESULT} $(B)/ydb/core/client/server/ut/ydb-core-client-server-ut |74.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/client/server/ut/ydb-core-client-server-ut |74.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test |74.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> StreamCreator::WithResolvedTimestamps [GOOD] |74.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_get_queue_attributes_only_attributes_table[tables_format_v1-std] >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_does_actions_with_queue[tables_format_v1-std] >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendListQueries [GOOD] >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendDescribeQuery >> StreamCreator::Basic [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/controller/ut_stream_creator/unittest >> StreamCreator::WithResolvedTimestamps [GOOD] Test command err: 2025-06-25T14:26:09.916358Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519893182307311863:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:26:09.917275Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000c88/r3tmp/tmpuHXULI/pdisk_1.dat 2025-06-25T14:26:13.586647Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:26:14.916557Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519893182307311863:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:26:14.916609Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:26:15.435294Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:26:16.284065Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:26:16.703623Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:26:17.716761Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:26:17.780589Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:26:18.617101Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:26:18.617227Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:26:18.621447Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:26:18.661114Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:26:18.665779Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519893182307311844:2080] 1750861569893060 != 1750861569893063 TClient is connected to server localhost:8696 TServer::EnableGrpc on GrpcPort 29045, node 1 2025-06-25T14:26:23.446100Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:26:23.446117Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:26:23.446366Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:26:23.446447Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8696 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:26:32.383633Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:26:32.997613Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:26:33.623675Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7382: Cannot get console configs 2025-06-25T14:26:33.623690Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TClient::Ls request: /Root/Table TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710658 CreateStep: 1750861593782 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" Key... (TRUNCATED) TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1750861592592 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710658 CreateStep: 1750861593782 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: ".sys" PathId: 1844... (TRUNCATED) 2025-06-25T14:26:34.697753Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-25T14:26:34.698077Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-25T14:26:34.698087Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:87: [DstCreator][rid 1][tid 1] Get table profiles 2025-06-25T14:26:34.704413Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:106: [DstCreator][rid 1][tid 1] Handle NKikimr::NConsole::TEvConfigsDispatcher::TEvGetConfigResponse 2025-06-25T14:26:40.180509Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:158: [DstCreator][rid 1][tid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribeTableResponse { Result: { name: Table, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750861593782, tx_id: 281474976710658 } } } 2025-06-25T14:26:40.180822Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:249: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxUserProxy::TEvAllocateTxIdResult 2025-06-25T14:26:40.182207Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:26:40.183396Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:279: [DstCreator][rid 1][tid 1] Handle {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976710659} 2025-06-25T14:26:40.183409Z node 1 :REPLICATION_CONTROLLER DEBUG: dst_creator.cpp:306: [DstCreator][rid 1][tid 1] Subscribe tx: txId# 281474976710659 2025-06-25T14:26:40.228733Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:311: [DstCreator][rid 1][tid 1] Handle NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976710659 2025-06-25T14:26:40.228758Z node 1 :REPLICATION_CONTROLLER INFO: dst_creator.cpp:585: [DstCreator][rid 1][tid 1] Success: dstPathId# [OwnerId: 72057594046644480, LocalPathId: 3] 2025-06-25T14:26:40.231718Z node 1 :REPLICATION_CONTROLLER TRACE: stream_creator.cpp:57: [StreamCreator][rid 1][tid 1] Handle NKikimr::NReplication::NController::TEvPrivate::TEvAllowCreateStream 2025-06-25T14:26:40.356840Z node 1 :CHANGE_EXCHANGE WARN: change_sender_cdc_stream.cpp:398: [CdcChangeSenderMain][72075186224037888:1][1:7519893315451299006:2331] Failed entry at 'ResolveTopic': entry# { Path: TableId: [72057594046644480:5:0] RequestType: ByTableId Operation: OpTopic RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo } 2025-06-25T14:26:40.406657Z node 1 :REPLICATION_CONTROLLER TRACE: stream_creator.cpp:85: [StreamCreator][rid 1][tid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvAlterTableResponse { Result: { status: SUCCESS, issues: } } 2025-06-25T14:26:40.406678Z node 1 :REPLICATION_CONTROLLER INFO: stream_creator.cpp:100: [StreamCreator][rid 1][tid 1] Success: issues# 2025-06-25T14:26:40.431196Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_pq.cpp:268) TClient::Ls request: /Root/Table 2025-06-25T14:26:40.459473Z node 1 :REPLICATION_CONTROLLER TRACE: stream_creator.cpp:137: [StreamCreator][rid 1][tid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvAlterTopicResponse { Result: { status: SUCCESS, issues: } } 2025-06-25T14:26:40.459496Z node 1 :REPLICATION_CONTROLLER INFO: stream_creator.cpp:155: [StreamCreator][rid 1][tid 1] Success: issues# TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710658 CreateStep: 1750861593782 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 3 TablePartitionVersion: 1 } ChildrenExist: true } Table { Name: "Table" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyC... (TRUNCATED) >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_receive_attempt_reloads_same_messages[tables_format_v1-after_crutch_batch] |74.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/services/datastreams/ut/ydb-services-datastreams-ut |74.6%| [LD] {RESULT} $(B)/ydb/services/datastreams/ut/ydb-services-datastreams-ut |74.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/services/datastreams/ut/ydb-services-datastreams-ut >> TSequence::CreateTableWithDefaultFromSequenceFromSelect [GOOD] >> TSequence::CreateTableWithDefaultFromSequenceBadRequest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/controller/ut_stream_creator/unittest >> StreamCreator::Basic [GOOD] Test command err: 2025-06-25T14:26:08.561969Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519893175649834788:2135];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:26:08.562260Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:26:12.204514Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000c7d/r3tmp/tmp3haHTx/pdisk_1.dat 2025-06-25T14:26:13.489640Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519893175649834788:2135];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:26:13.489956Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:26:14.241292Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:26:14.888387Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:26:15.284572Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:26:15.967931Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:26:17.808553Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:26:18.188546Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:26:18.659148Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:26:18.659785Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:26:18.693822Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:26:18.774015Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:26:18.776584Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519893171354867392:2080] 1750861568420599 != 1750861568420602 TClient is connected to server localhost:25877 TServer::EnableGrpc on GrpcPort 23124, node 1 2025-06-25T14:26:26.252512Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:26:26.252529Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:26:26.252548Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:26:26.252630Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25877 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:26:32.744121Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:26:32.874846Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7382: Cannot get console configs 2025-06-25T14:26:32.874863Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded waiting... 2025-06-25T14:26:35.634968Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:26:36.064948Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... TClient::Ls request: /Root/Table TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710658 CreateStep: 1750861596729 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" Key... (TRUNCATED) TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1750861592991 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710658 CreateStep: 1750861596729 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: ".sys" PathId: 1844... (TRUNCATED) 2025-06-25T14:26:37.602101Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-25T14:26:37.602690Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-25T14:26:37.602698Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:87: [DstCreator][rid 1][tid 1] Get table profiles 2025-06-25T14:26:37.604435Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:106: [DstCreator][rid 1][tid 1] Handle NKikimr::NConsole::TEvConfigsDispatcher::TEvGetConfigResponse 2025-06-25T14:26:40.274547Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:158: [DstCreator][rid 1][tid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribeTableResponse { Result: { name: Table, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750861596729, tx_id: 281474976710658 } } } 2025-06-25T14:26:40.275544Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:249: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxUserProxy::TEvAllocateTxIdResult 2025-06-25T14:26:40.283391Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:26:40.286786Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:279: [DstCreator][rid 1][tid 1] Handle {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976710659} 2025-06-25T14:26:40.286799Z node 1 :REPLICATION_CONTROLLER DEBUG: dst_creator.cpp:306: [DstCreator][rid 1][tid 1] Subscribe tx: txId# 281474976710659 2025-06-25T14:26:40.424244Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:311: [DstCreator][rid 1][tid 1] Handle NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976710659 2025-06-25T14:26:40.424268Z node 1 :REPLICATION_CONTROLLER INFO: dst_creator.cpp:585: [DstCreator][rid 1][tid 1] Success: dstPathId# [OwnerId: 72057594046644480, LocalPathId: 3] 2025-06-25T14:26:40.432687Z node 1 :REPLICATION_CONTROLLER TRACE: stream_creator.cpp:57: [StreamCreator][rid 1][tid 1] Handle NKikimr::NReplication::NController::TEvPrivate::TEvAllowCreateStream 2025-06-25T14:26:40.703730Z node 1 :CHANGE_EXCHANGE WARN: change_sender_cdc_stream.cpp:398: [CdcChangeSenderMain][72075186224037888:1][1:7519893313088789146:2330] Failed entry at 'ResolveTopic': entry# { Path: TableId: [72057594046644480:5:0] RequestType: ByTableId Operation: OpTopic RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo } 2025-06-25T14:26:40.752796Z node 1 :REPLICATION_CONTROLLER TRACE: stream_creator.cpp:85: [StreamCreator][rid 1][tid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvAlterTableResponse { Result: { status: SUCCESS, issues: } } 2025-06-25T14:26:40.752819Z node 1 :REPLICATION_CONTROLLER INFO: stream_creator.cpp:100: [StreamCreator][rid 1][tid 1] Success: issues# 2025-06-25T14:26:40.830702Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_pq.cpp:268) 2025-06-25T14:26:41.870454Z node 1 :REPLICATION_CONTROLLER TRACE: stream_creator.cpp:137: [StreamCreator][rid 1][tid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvAlterTopicResponse { Result: { status: SUCCESS, issues: } } 2025-06-25T14:26:41.870485Z node 1 :REPLICATION_CONTROLLER INFO: stream_creator.cpp:155: [StreamCreator][rid 1][tid 1] Success: issues# TClient::Ls request: /Root/Table TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710658 CreateStep: 1750861596729 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 3 TablePartitionVersion: 1 } ChildrenExist: true } Table { Name: "Table" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyC... (TRUNCATED) >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendDescribeQuery [GOOD] >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendGetQueryStatus |74.7%| [TA] $(B)/ydb/core/tx/replication/controller/ut_stream_creator/test-results/unittest/{meta.json ... results_accumulator.log} >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendGetQueryStatus [GOOD] >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendModifyQuery >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendModifyQuery [GOOD] >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendDeleteQuery >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendDeleteQuery [GOOD] >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendControlQuery |74.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_serverless/ydb-core-tx-schemeshard-ut_serverless |74.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_serverless/ydb-core-tx-schemeshard-ut_serverless |74.7%| [TA] {RESULT} $(B)/ydb/core/tx/replication/controller/ut_stream_creator/test-results/unittest/{meta.json ... results_accumulator.log} |74.7%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_serverless/ydb-core-tx-schemeshard-ut_serverless |74.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_background_cleaning/ydb-core-tx-schemeshard-ut_background_cleaning |74.7%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_background_cleaning/ydb-core-tx-schemeshard-ut_background_cleaning |74.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_background_cleaning/ydb-core-tx-schemeshard-ut_background_cleaning >> DataCleanup::MultipleDataCleanupsWithOldGenerations [GOOD] >> DataCleanup::ForceDataCleanupWithRestart >> TSequence::CreateTableWithDefaultFromSequenceBadRequest [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_can_read_new_written_data_on_visibility_timeout[tables_format_v1] >> GroupWriteTest::WriteHardRateDispatcher >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendControlQuery [GOOD] >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendGetResultData >> test.py::test[solomon-InvalidProject-] [GOOD] >> test.py::test[solomon-LabelColumns-default.txt] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_sequence/unittest >> TSequence::CreateTableWithDefaultFromSequenceBadRequest [GOOD] Test command err: 2025-06-25T14:25:54.365357Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:25:54.365511Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:25:54.365570Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000845/r3tmp/tmpkgDeGm/pdisk_1.dat 2025-06-25T14:25:54.698761Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T14:25:54.702044Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:25:54.735073Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:25:54.739148Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750861551416844 != 1750861551416848 2025-06-25T14:25:54.786687Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:25:54.786822Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:25:54.798224Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:25:54.885400Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:25:55.304749Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:727:2599], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:25:55.304888Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:737:2604], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:25:55.304963Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:25:55.309983Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:25:55.364437Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:25:55.547968Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:741:2607], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:25:55.868720Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:811:2646] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:25:58.644428Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715660. Ctx: { TraceId: 01jykqp9k62dpw577pnnw8bm5k, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NTlmZjc5OWQtNzI5MmFmNWUtZmE4ZTk2ZC1hZWJlZDJhNQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:25:59.494792Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jykqpd767zwsw669rnjec962, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZjA5ZmI3NjQtMmNlYWZkMmUtNThjYzI2NmEtNWEzNWU2MTA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:26:00.558317Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jykqpdw2fpm1wferzdmjbe5x, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NzQ0NGY1N2UtZDRmZGVlZTAtYTk3YzVmZTEtZjI3MmFiNjc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:26:01.354603Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715663. Ctx: { TraceId: 01jykqpeve1xh0hza1g0sz5n0e, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODk0ZjA4ODYtZDVlOWZhMTItNTI5ZGE5YmMtNTlhN2UyMTc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root { items { int64_value: 1 } items { uint32_value: 1 } }, { items { int64_value: 2 } items { uint32_value: 2 } }, { items { int64_value: 3 } items { uint32_value: 3 } }, { items { int64_value: 4 } items { uint32_value: 4 } }, { items { int64_value: 5 } items { uint32_value: 5 } }, { items { int64_value: 6 } items { uint32_value: 6 } }, { items { int64_value: 7 } items { uint32_value: 7 } }, { items { int64_value: 8 } items { uint32_value: 8 } }, { items { int64_value: 9 } items { uint32_value: 9 } } 2025-06-25T14:26:10.358529Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:265:2309], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:26:10.359184Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:26:10.360269Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000845/r3tmp/tmpRI2lfB/pdisk_1.dat 2025-06-25T14:26:12.226168Z node 2 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 2 Type# 268639257 2025-06-25T14:26:12.248186Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:26:12.382173Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:26:12.393307Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:33:2080] 1750861562551031 != 1750861562551035 2025-06-25T14:26:12.508374Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:26:12.508941Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:26:12.523594Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:26:12.760101Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:26:14.594653Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:775:2636], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:26:14.601188Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:785:2641], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:26:14.602115Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:26:14.672278Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:26:14.891588Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:26:15.169148Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:789:2644], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:26:15.221804Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:860:2684] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:26:16.770013Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715660. Ctx: { TraceId: 01jykqpwccdn81xzpnxg7zn8jf, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NmI1NTAwMmYtOTRiYjg5YTYtMmRkNGUwNjMtY2JhMzhhNjA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:26:16.886590Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jykqpwccdn81xzpnxg7zn8jf, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NmI1NTAwMmYtOTRiYjg5YTYtMmRkNGUwNjMtY2JhMzhhNjA=, CurrentExecutionId: ... /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:26:39.074360Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:26:39.080990Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:33:2080] 1750861584046471 != 1750861584046474 2025-06-25T14:26:39.140191Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:26:39.144913Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:26:39.157774Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:26:39.360687Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:26:40.774636Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:727:2599], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:26:40.774750Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:737:2604], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:26:40.774819Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:26:40.824957Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:26:41.043966Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:26:41.257693Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:741:2607], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:26:41.304753Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:811:2646] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:26:42.021770Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715660. Ctx: { TraceId: 01jykqqp05d0vjpj0nsq6s9h62, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YzMxODZiZjAtZTk3OGNhZjYtZjNmNDRlOTYtODU0ZjM2YWU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:26:43.157459Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jykqqqd70bvcd8z0807dng9x, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=ZDNiMmVlZTEtNDcwZmVkZjItNDRmYWZhM2MtNWFmNmViOTE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root { items { int64_value: 1 } items { uint32_value: 303 } } 2025-06-25T14:26:44.003438Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jykqqrgw64jzgp6fq6xtgm1k, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=MjNiY2Q0ZTgtNTY4MzAwODUtOGM5OTM4ZWMtM2IyMjcxNDg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:26:44.592707Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715663. Ctx: { TraceId: 01jykqqsap52csmcasyn7sw74f, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=OWNjY2QzNDEtNzVkZTY4MTktODNiNDljZTctNzBiMjQ2NWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root { items { int64_value: 1 } items { uint32_value: 303 } }, { items { int64_value: 2 } items { uint32_value: 303 } } 2025-06-25T14:26:45.269706Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715664. Ctx: { TraceId: 01jykqqstd7x2k9yedhp80fwpc, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=ZjNkZDJkZS0yZjYyZjFhZi00ZmJmNGNmMy1mNTllMWU2ZQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:26:45.903293Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715665. Ctx: { TraceId: 01jykqqtg1fh3m7g6g2ce9s074, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=N2ZhMzQxZi1hNGEwNDg4ZC0xNDM4ODQ5Ni02MTQ3NWQ1NA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root { items { int64_value: 1 } items { uint32_value: 303 } }, { items { int64_value: 2 } items { uint32_value: 303 } }, { items { int64_value: 3 } items { uint32_value: 303 } } 2025-06-25T14:27:00.771315Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [4:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:27:00.771620Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:27:00.771771Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000845/r3tmp/tmpGZ5Oo5/pdisk_1.dat 2025-06-25T14:27:01.493767Z node 4 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 4 Type# 268639257 2025-06-25T14:27:01.527952Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:27:01.692518Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:27:01.700453Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:33:2080] 1750861611259936 != 1750861611259940 2025-06-25T14:27:01.834759Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:27:01.835382Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:27:01.862355Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:27:02.000570Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:27:03.457998Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:727:2599], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:27:03.458918Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:737:2604], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:27:03.459263Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:27:03.546902Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:27:03.769736Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:27:04.059924Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:741:2607], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:27:04.149902Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:811:2646] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:27:04.723697Z node 4 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [4:821:2655], status: BAD_REQUEST, issues:
: Error: Execution, code: 1060
:1:98: Error: Key columns are not specified., code: 2017
: Error: Execution, code: 1060
:1:98: Error: Key columns are not specified., code: 2017 2025-06-25T14:27:04.737573Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=4&id=ZTU2ODAwMTMtN2MxZWY4NDgtMWExNzIyNDMtZTYyZDdkYw==, ActorId: [4:724:2596], ActorState: ExecuteState, TraceId: 01jykqrc442xhps20fd8291p0k, ReplyQueryCompileError, status BAD_REQUEST remove tx with tx_id: 2025-06-25T14:27:04.965210Z node 4 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [4:843:2671], status: BAD_REQUEST, issues:
: Error: Execution, code: 1060
:1:103: Error: Key columns are not specified., code: 2017
: Error: Execution, code: 1060
:1:103: Error: Key columns are not specified., code: 2017 2025-06-25T14:27:04.974516Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=4&id=NDVkY2JmMTEtMjIyOTZkN2QtYWU3N2IyYi1kZTAzZDBlOQ==, ActorId: [4:835:2663], ActorState: ExecuteState, TraceId: 01jykqrde6av2vr872ten12c8m, ReplyQueryCompileError, status BAD_REQUEST remove tx with tx_id: >> TGRpcRateLimiterTest::DropResource [GOOD] >> TGRpcRateLimiterTest::DescribeResource >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_visibility_batch_works[tables_format_v1-fifo] >> YdbSdkSessionsPool1Session::FailTest/0 >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendGetResultData [GOOD] >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendListJobs |74.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test |74.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> YdbSdkSessionsPool1Session::FailTest/0 [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_visibility_change_disables_receive_attempt_id[tables_format_v0-with_change_visibility] >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendListJobs [GOOD] >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendDescribeJob >> YdbSdkSessionsPool::WaitQueue/1 >> YdbSdkSessions::CloseSessionAfterDriverDtorWithoutSessionPool ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest >> ColumnShardTiers::DSConfigsStub Test command err: 2025-06-25T14:26:08.512215Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:26:08.513947Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:26:08.514202Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000b99/r3tmp/tmpDln0NK/pdisk_1.dat 2025-06-25T14:26:10.507975Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 2913, node 1 TClient is connected to server localhost:21096 2025-06-25T14:26:16.103818Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:26:16.384345Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:26:16.437836Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:26:16.438109Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:26:16.438343Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:26:16.449133Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:26:16.452798Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750861555423337 != 1750861555423341 2025-06-25T14:26:16.534492Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:26:16.535892Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:26:16.558336Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:26:16.966380Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnStore, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_store.cpp:451) 2025-06-25T14:26:18.083780Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:688:2572];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:26:18.085371Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:688:2572];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:26:18.087558Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:688:2572];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:26:18.087945Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:688:2572];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:26:18.089062Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:688:2572];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:26:18.089636Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:688:2572];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:26:18.090206Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:688:2572];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:26:18.090738Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:688:2572];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:26:18.091378Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:688:2572];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:26:18.091708Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:688:2572];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:26:18.092028Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:688:2572];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:26:18.273941Z node 1 :TX_TIERING INFO: log.cpp:784: fline=manager.cpp:128;event=start_subscribing_metadata; 2025-06-25T14:26:18.301809Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-25T14:26:18.302572Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-25T14:26:18.303634Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-25T14:26:18.303911Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-25T14:26:18.313876Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-25T14:26:18.313960Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-25T14:26:18.315000Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-25T14:26:18.315268Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-25T14:26:18.315538Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-25T14:26:18.316284Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-25T14:26:18.318418Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-25T14:26:18.319296Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-25T14:26:18.326313Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-25T14:26:18.326817Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-25T14:26:18.328384Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-25T14:26:18.328694Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-25T14:26:18.329964Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-25T14:26:18.330304Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-25T14:26:18.330355Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-25T14:26:18.334665Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-25T14:26:18.335187Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-25T14:26:18.595146Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:694:2574];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:26:18.595244Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:694:2574];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:26:18.595444Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:694:2574];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:26:18.595529Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:694:2574];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;d ... fline=manager.cpp:62;event=TEvRefreshSubscriberData;snapshot=secrets; 2025-06-25T14:26:44.051720Z node 1 :TX_TIERING INFO: log.cpp:784: fline=manager.cpp:271;event=update_secrets;tablet=72075186224037888; 2025-06-25T14:26:44.074925Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:205;event=configs_updated;configs=TIERS=;SECRETS={}; 2025-06-25T14:26:44.075022Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:62;event=TEvRefreshSubscriberData;snapshot=secrets; 2025-06-25T14:26:44.075048Z node 1 :TX_TIERING INFO: log.cpp:784: fline=manager.cpp:271;event=update_secrets;tablet=72075186224037889; 2025-06-25T14:26:44.075086Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:205;event=configs_updated;configs=TIERS=;SECRETS={}; 2025-06-25T14:26:44.075110Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:62;event=TEvRefreshSubscriberData;snapshot=secrets; 2025-06-25T14:26:44.075128Z node 1 :TX_TIERING INFO: log.cpp:784: fline=manager.cpp:271;event=update_secrets;tablet=72075186224037891; 2025-06-25T14:26:44.075147Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:205;event=configs_updated;configs=TIERS=;SECRETS={}; 2025-06-25T14:26:44.075166Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:62;event=TEvRefreshSubscriberData;snapshot=secrets; 2025-06-25T14:26:44.075184Z node 1 :TX_TIERING INFO: log.cpp:784: fline=manager.cpp:271;event=update_secrets;tablet=72075186224037890; 2025-06-25T14:26:44.075454Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:205;event=configs_updated;configs=TIERS=;SECRETS={}; 2025-06-25T14:26:44.093153Z node 1 :TX_TIERING DEBUG: log.cpp:784: tablet_id=72075186224037888;self_id=[1:688:2572];ev=NKikimr::NColumnShard::TEvPrivate::TEvTieringModified;fline=tiering.cpp:244;event=refresh_tiering;has_tiering=0;tiers=0;had_tiering_before=0; 2025-06-25T14:26:44.093538Z node 1 :TX_TIERING DEBUG: log.cpp:784: tablet_id=72075186224037889;self_id=[1:694:2574];ev=NKikimr::NColumnShard::TEvPrivate::TEvTieringModified;fline=tiering.cpp:244;event=refresh_tiering;has_tiering=0;tiers=0;had_tiering_before=0; 2025-06-25T14:26:44.093603Z node 1 :TX_TIERING DEBUG: log.cpp:784: tablet_id=72075186224037890;self_id=[1:701:2578];ev=NKikimr::NColumnShard::TEvPrivate::TEvTieringModified;fline=tiering.cpp:244;event=refresh_tiering;has_tiering=0;tiers=0;had_tiering_before=0; 2025-06-25T14:26:44.113844Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:26:46.054932Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715677:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:26:47.647434Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715680:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:26:53.659356Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) REQUEST= UPSERT OBJECT `accessKey` (TYPE SECRET) WITH (value = `secretAccessKey`); UPSERT OBJECT `secretKey` (TYPE SECRET) WITH (value = `fakeSecret`); ;RESULT=
: Error: GRpc error: (1): Cancelled on the server side
: Error: Grpc error response on endpoint localhost:2913 ;EXPECTATION=1 VERIFY failed (2025-06-25T14:26:54.114937Z): assertion failed in non-unittest thread with message: assertion failed at ydb/core/testlib/common_helper.cpp:157, auto NKikimr::Tests::NCommon::THelper::StartSchemaRequestTableServiceImpl(const TString &, const bool, const bool)::(anonymous class)::operator()(NThreading::TFuture)::(anonymous class)::operator()(NYdb::TAsyncStatus) const: (expectation == f.GetValueSync().IsSuccess()) library/cpp/testing/unittest/registar.cpp:36 RaiseError(): requirement UnittestThread failed NPrivate::InternalPanicImpl(int, char const*, char const*, int, int, int, TBasicStringBuf>, char const*, unsigned long)+873 (0x19731AB9) NPrivate::Panic(NPrivate::TStaticBuf const&, int, char const*, char const*, char const*, ...)+571 (0x1972014B) NUnitTest::NPrivate::RaiseError(char const*, TBasicString> const&, bool)+1218 (0x19BA9D82) ??+0 (0x36B2BA31) ??+0 (0x36B2CCD3) NThreading::NImpl::TFutureState::RunCallbacks()+444 (0x20F9A79C) void NThreading::NImpl::TFutureState::SetValue(NYdb::Dev::TStatus&&)+521 (0x20F9A4C9) NThreading::TFuture NYdb::Dev::NSessionPool::InjectSessionStatusInterception(std::__y1::shared_ptr, NThreading::TFuture, bool, TDuration, std::__y1::function)::'lambda'(NThreading::TFuture)::operator()(NThreading::TFuture)+857 (0x34F713F9) std::__y1::__function::__func NYdb::Dev::NSessionPool::InjectSessionStatusInterception(std::__y1::shared_ptr, NThreading::TFuture, bool, TDuration, std::__y1::function)::'lambda'(NThreading::TFuture), std::__y1::allocator NYdb::Dev::NSessionPool::InjectSessionStatusInterception(std::__y1::shared_ptr, NThreading::TFuture, bool, TDuration, std::__y1::function)::'lambda'(NThreading::TFuture)>, void (NThreading::TFuture const&)>::operator()(NThreading::TFuture const&)+67 (0x34F72EE3) NThreading::NImpl::TFutureState::RunCallbacks()+444 (0x20F9A79C) void NThreading::NImpl::TFutureState::SetValue(NYdb::Dev::TStatus&&)+521 (0x20F9A4C9) decltype(std::declval()(std::declval(), std::declval())) std::__y1::__invoke[abi:fe200000] NYdb::Dev::TClientImplCommon::RunSimple(Ydb::Table::ExecuteSchemeQueryRequest&&, NYdbGrpc::Dev::TSimpleRequestProcessor::TAsyncRequest, NYdb::Dev::TRpcRequestSettings const&)::'lambda'(google::protobuf::Any*, NYdb::Dev::TPlainStatus)&, google::protobuf::Any*, NYdb::Dev::TPlainStatus>(Ydb::Table::V1::TableService&&, google::protobuf::Any*&&, NYdb::Dev::TPlainStatus&&)+280 (0x35186128) void NYdb::Dev::TGRpcConnectionsImpl::RunDeferred(NYdb::Dev::TGRpcConnectionsImpl::TRequestWrapper&&, std::__y1::function&&, NYdbGrpc::Dev::TSimpleRequestProcessor::TAsyncRequest, std::__y1::shared_ptr, TDuration, NYdb::Dev::TRpcRequestSettings const&, std::__y1::shared_ptr)::'lambda'(Ydb::Operations::Operation*, NYdb::Dev::TPlainStatus)::operator()(Ydb::Operations::Operation*, NYdb::Dev::TPlainStatus)+576 (0x351857D0) decltype(std::declval()(std::declval(), std::declval())) std::__y1::__invoke[abi:fe200000](NYdb::Dev::TGRpcConnectionsImpl::TRequestWrapper&&, std::__y1::function&&, NYdbGrpc::Dev::TSimpleRequestProcessor::TAsyncRequest, std::__y1::shared_ptr, TDuration, NYdb::Dev::TRpcRequestSettings const&, std::__y1::shared_ptr)::'lambda'(Ydb::Operations::Operation*, NYdb::Dev::TPlainStatus)&, Ydb::Operations::Operation*, NYdb::Dev::TPlainStatus>(Ydb::Table::V1::TableService&&, Ydb::Operations::Operation*&&, NYdb::D+218 (0x351853CA) void NYdb::Dev::TGRpcConnectionsImpl::RunDeferred(NYdb::Dev::TGRpcConnectionsImpl::TRequestWrapper&&, std::__y1::function&&, NYdbGrpc::Dev::TSimpleRequestProcessor::TAsyncRequest, std::__y1::shared_ptr, TDuration, NYdb::Dev::TRpcRequestSettings const&, bool, std::__y1::shared_ptr)::'lambda'(Ydb::Table::ExecuteSchemeQueryResponse*, NYdb::Dev::TPlainStatus)::operator()(Ydb::Table::ExecuteSchemeQueryResponse*, NYdb::Dev::TPlainStatus)+1075 (0x35184933) decltype(std::declval()(std::declval(), std::declval())) std::__y1::__invoke[abi:fe200000](NYdb::Dev::TGRpcConnectionsImpl::TRequestWrapper&&, std::__y1::function&&, NYdbGrpc::Dev::TSimpleRequestProcessor::TAsyncRequest, std::__y1::shared_ptr, TDuration, NYdb::Dev::TRpcRequestSettings const&, bool, std::__y1::shared_ptr)::'lambda'(Ydb::Table::ExecuteSchemeQueryResponse*, NYdb::Dev::TPlainStatus)&, Ydb::Table::ExecuteSchemeQueryResponse*, NYdb::Dev::TPlainStatus>(Ydb::Table::V1::TableSe+218 (0x3518433A) NYdb::Dev::TGRpcErrorResponse::Process(void*)+1678 (0x3517E88E) TAdaptiveThreadPool::TImpl::TThread::DoExecute()+966 (0x1A84B646) ??+0 (0x1A847E4D) ??+0 (0x19735F05) ??+0 (0x193EA4C9) ??+0 (0x7F58BE573AC3) ??+0 (0x7F58BE605850) >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendDescribeJob [GOOD] >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendCreateConnection >> GroupWriteTest::TwoTables >> YdbSdkSessionsPool::WaitQueue/0 >> YdbSdkSessionsPool::WaitQueue/1 [GOOD] >> ColumnShardTiers::DSConfigsWithQueryServiceDdl [FAIL] >> YdbSdkSessionsPool::PeriodicTask/0 |74.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/ut_rw/ydb-core-tx-columnshard-ut_rw |74.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/columnshard/ut_rw/ydb-core-tx-columnshard-ut_rw |74.7%| [TM] {RESULT} ydb/core/tx/datashard/ut_sequence/unittest |74.7%| [LD] {RESULT} $(B)/ydb/core/tx/columnshard/ut_rw/ydb-core-tx-columnshard-ut_rw ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest >> ColumnShardTiers::DSConfigs Test command err: 2025-06-25T14:26:04.461430Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:26:04.462243Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:26:04.462742Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000a71/r3tmp/tmpbK9jaD/pdisk_1.dat 2025-06-25T14:26:06.324882Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 19661, node 1 TClient is connected to server localhost:3304 2025-06-25T14:26:10.269394Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:26:10.451799Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:26:10.499038Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:26:10.499526Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:26:10.499743Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:26:10.501888Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:26:10.502865Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750861554738730 != 1750861554738734 2025-06-25T14:26:10.595893Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:26:10.598916Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:26:10.617022Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:26:11.014058Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Initialization finished REQUEST= UPSERT OBJECT `accessKey` (TYPE SECRET) WITH (value = `secretAccessKey`); UPSERT OBJECT `secretKey` (TYPE SECRET) WITH (value = `fakeSecret`); ;EXPECTATION=1;WAITING=1 2025-06-25T14:26:26.566472Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:720:2598], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:26:26.567645Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:26:26.657718Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:26:29.496210Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:843:2682], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:26:29.496946Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:26:29.498326Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:848:2687], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:26:29.611584Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:26:29.891603Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:850:2689], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:26:32.363666Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:942:2752] txid# 281474976715661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:26:37.487368Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:26:38.483610Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:26:40.588564Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:26:43.188050Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:26:45.074196Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:26:51.375980Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) REQUEST= UPSERT OBJECT `accessKey` (TYPE SECRET) WITH (value = `secretAccessKey`); UPSERT OBJECT `secretKey` (TYPE SECRET) WITH (value = `fakeSecret`); ;RESULT=
: Error: GRpc error: (1): Cancelled on the server side
: Error: Grpc error response on endpoint localhost:19661 ;EXPECTATION=1 VERIFY failed (2025-06-25T14:26:52.652695Z): assertion failed in non-unittest thread with message: assertion failed at ydb/core/testlib/common_helper.cpp:157, auto NKikimr::Tests::NCommon::THelper::StartSchemaRequestTableServiceImpl(const TString &, const bool, const bool)::(anonymous class)::operator()(NThreading::TFuture)::(anonymous class)::operator()(NYdb::TAsyncStatus) const: (expectation == f.GetValueSync().IsSuccess()) library/cpp/testing/unittest/registar.cpp:36 RaiseError(): requirement UnittestThread failed NPrivate::InternalPanicImpl(int, char const*, char const*, int, int, int, TBasicStringBuf>, char const*, unsigned long)+873 (0x19731AB9) NPrivate::Panic(NPrivate::TStaticBuf const&, int, char const*, char const*, char const*, ...)+571 (0x1972014B) NUnitTest::NPrivate::RaiseError(char const*, TBasicString> const&, bool)+1218 (0x19BA9D82) ??+0 (0x36B2BA31) ??+0 (0x36B2CCD3) NThreading::NImpl::TFutureState::RunCallbacks()+444 (0x20F9A79C) void NThreading::NImpl::TFutureState::SetValue(NYdb::Dev::TStatus&&)+521 (0x20F9A4C9) NThreading::TFuture NYdb::Dev::NSessionPool::InjectSessionStatusInterception(std::__y1::shared_ptr, NThreading::TFuture, bool, TDuration, std::__y1::function)::'lambda'(NThreading::TFuture)::operator()(NThreading::TFuture)+857 (0x34F713F9) std::__y1::__function::__func NYdb::Dev::NSessionPool::InjectSessionStatusInterception(std::__y1::shared_ptr, NThreading::TFuture, bool, TDuration, std::__y1::function)::'lambda'(NThreading::TFuture), std::__y1::allocator NYdb::Dev::NSessionPool::InjectSessionStatusInterception(std::__y1::shared_ptr, NThreading::TFuture, bool, TDuration, std::__y1::function)::'lambda'(NThreading::TFuture)>, void (NThreading::TFuture const&)>::operator()(NThreading::TFuture const&)+67 (0x34F72EE3) NThreading::NImpl::TFutureState::RunCallbacks()+444 (0x20F9A79C) void NThreading::NImpl::TFutureState::SetValue(NYdb::Dev::TStatus&&)+521 (0x20F9A4C9) decltype(std::declval()(std::declval(), std::declval())) std::__y1::__invoke[abi:fe200000] NYdb::Dev::TClientImplCommon::RunSimple(Ydb::Table::ExecuteSchemeQueryRequest&&, NYdbGrpc::Dev::TSimpleRequestProcessor::TAsyncRequest, NYdb::Dev::TRpcRequestSettings const&)::'lambda'(google::protobuf::Any*, NYdb::Dev::TPlainStatus)&, google::protobuf::Any*, NYdb::Dev::TPlainStatus>(Ydb::Table::V1::TableService&&, google::protobuf::Any*&&, NYdb::Dev::TPlainStatus&&)+280 (0x35186128) void NYdb::Dev::TGRpcConnectionsImpl::RunDeferred(NYdb::Dev::TGRpcConnectionsImpl::TRequestWrapper&&, std::__y1::function&&, NYdbGrpc::Dev::TSimpleRequestProcessor::TAsyncRequest, std::__y1::shared_ptr, TDuration, NYdb::Dev::TRpcRequestSettings const&, std::__y1::shared_ptr)::'lambda'(Ydb::Operations::Operation*, NYdb::Dev::TPlainStatus)::operator()(Ydb::Operations::Operation*, NYdb::Dev::TPlainStatus)+576 (0x351857D0) decltype(std::declval()(std::declval(), std::declval())) std::__y1::__invoke[abi:fe200000](NYdb::Dev::TGRpcConnectionsImpl::TRequestWrapper&&, std::__y1::function&&, NYdbGrpc::Dev::TSimpleRequestProcessor::TAsyncRequest, std::__y1::shared_ptr, TDuration, NYdb::Dev::TRpcRequestSettings const&, std::__y1::shared_ptr)::'lambda'(Ydb::Operations::Operation*, NYdb::Dev::TPlainStatus)&, Ydb::Operations::Operation*, NYdb::Dev::TPlainStatus>(Ydb::Table::V1::TableService&&, Ydb::Operations::Operation*&&, NYdb::D+218 (0x351853CA) void NYdb::Dev::TGRpcConnectionsImpl::RunDeferred(NYdb::Dev::TGRpcConnectionsImpl::TRequestWrapper&&, std::__y1::function&&, NYdbGrpc::Dev::TSimpleRequestProcessor::TAsyncRequest, std::__y1::shared_ptr, TDuration, NYdb::Dev::TRpcRequestSettings const&, bool, std::__y1::shared_ptr)::'lambda'(Ydb::Table::ExecuteSchemeQueryResponse*, NYdb::Dev::TPlainStatus)::operator()(Ydb::Table::ExecuteSchemeQueryResponse*, NYdb::Dev::TPlainStatus)+1075 (0x35184933) decltype(std::declval()(std::declval(), std::declval())) std::__y1::__invoke[abi:fe200000](NYdb::Dev::TGRpcConnectionsImpl::TRequestWrapper&&, std::__y1::function&&, NYdbGrpc::Dev::TSimpleRequestProcessor::TAsyncRequest, std::__y1::shared_ptr, TDuration, NYdb::Dev::TRpcRequestSettings const&, bool, std::__y1::shared_ptr)::'lambda'(Ydb::Table::ExecuteSchemeQueryResponse*, NYdb::Dev::TPlainStatus)&, Ydb::Table::ExecuteSchemeQueryResponse*, NYdb::Dev::TPlainStatus>(Ydb::Table::V1::TableSe+218 (0x3518433A) NYdb::Dev::TGRpcErrorResponse::Process(void*)+1678 (0x3517E88E) TAdaptiveThreadPool::TImpl::TThread::DoExecute()+966 (0x1A84B646) ??+0 (0x1A847E4D) ??+0 (0x19735F05) ??+0 (0x193EA4C9) ??+0 (0x7F9143017AC3) ??+0 (0x7F91430A9850) >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendCreateConnection [GOOD] >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendCreateConnectionWithServiceAccount >> YdbSdkSessions::MultiThreadSessionPoolLimitSyncTableClient >> YdbSdkSessions::TestActiveSessionCountAfterBadSession >> YdbSdkSessionsPool::WaitQueue/0 [GOOD] >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendCreateConnectionWithServiceAccount [GOOD] >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendListConnections >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendListConnections [GOOD] >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendDescribeConnection >> TMessageBusServerPersQueueGetTopicMetadataMetaRequestTest::HandlesTimeout >> YdbSdkSessions::TestActiveSessionCountAfterBadSession [GOOD] >> YdbSdkSessions::SessionsServerLimitWithSessionPool [SKIPPED] >> YdbSdkSessionsPool1Session::RunSmallPlan/0 >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendDescribeConnection [GOOD] >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendModifyConnection >> YdbSdkSessionsPool1Session::RunSmallPlan/0 [GOOD] >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendModifyConnection [GOOD] >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendModifyConnectionWithServiceAccount >> TMessageBusServerPersQueueGetTopicMetadataMetaRequestTest::HandlesTimeout [GOOD] >> TMessageBusServerPersQueueGetTopicMetadataMetaRequestTest::FailsOnZeroBalancerTabletIdInGetNodeRequest |74.7%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/integration/sessions_pool/gtest >> YdbSdkSessionsPool1Session::FailTest/0 [GOOD] |74.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tablet/ut/ydb-core-tablet-ut |74.7%| [LD] {RESULT} $(B)/ydb/core/tablet/ut/ydb-core-tablet-ut |74.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tablet/ut/ydb-core-tablet-ut >> YdbSdkSessions::TestMultipleSessions >> DataCleanup::ForceDataCleanupWithRestart [GOOD] >> DataCleanup::OutReadSetsCleanedAfterCopyTable >> YdbSdkSessionsPool::StressTestSync/1 >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendModifyConnectionWithServiceAccount [GOOD] >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendDeleteConnection >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_create_q_twice[tables_format_v0-fifo] >> TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest::FailsOnBadRootStatusInGetNodeRequest >> YdbSdkSessions::TestSessionPool >> YdbSdkSessionsPool::StressTestAsync/0 >> YdbSdkSessions::TestMultipleSessions [GOOD] >> YdbSdkSessions::TestActiveSessionCountAfterTransportError >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendDeleteConnection [GOOD] >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendTestConnection >> TMessageBusServerPersQueueGetTopicMetadataMetaRequestTest::FailsOnZeroBalancerTabletIdInGetNodeRequest [GOOD] >> TMessageBusServerPersQueueGetTopicMetadataMetaRequestTest::SuccessfullyReplies >> YdbSdkSessionsPool::StressTestSync/0 >> YdbSdkSessions::CloseSessionWithSessionPoolExplicitDriverStopOnly |74.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_kqp_errors/ydb-core-tx-datashard-ut_kqp_errors |74.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_kqp_errors/ydb-core-tx-datashard-ut_kqp_errors |74.8%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_kqp_errors/ydb-core-tx-datashard-ut_kqp_errors >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendTestConnection [GOOD] >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendTestConnectionWithServiceAccount >> YdbSdkSessions::TestSdkFreeSessionAfterBadSessionQueryServiceStreamCall [SKIPPED] >> TMessageBusServerPersQueueGetReadSessionsInfoMetaRequestTest::HandlesTimeout >> TGRpcRateLimiterTest::DescribeResource [GOOD] >> TGRpcRateLimiterTest::ListResources >> YdbSdkSessions::MultiThreadSync >> TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest::FailsOnBadRootStatusInGetNodeRequest [GOOD] >> TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest::FailesOnNotATopic >> YdbSdkSessions::TestSdkFreeSessionAfterBadSessionQueryService [SKIPPED] >> TMessageBusServerPersQueueGetTopicMetadataMetaRequestTest::SuccessfullyReplies [GOOD] >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendTestConnectionWithServiceAccount [GOOD] >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendCreateBinding >> TMessageBusServerPersQueueGetReadSessionsInfoMetaRequestTest::HandlesTimeout [GOOD] >> TMessageBusServerPersQueueGetReadSessionsInfoMetaRequestTest::SuccessfullyPassesResponsesFromTablets |74.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/statistics/aggregator/ut/ydb-core-statistics-aggregator-ut >> YdbSdkSessions::MultiThreadSync [GOOD] |74.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/statistics/aggregator/ut/ydb-core-statistics-aggregator-ut |74.8%| [LD] {RESULT} $(B)/ydb/core/statistics/aggregator/ut/ydb-core-statistics-aggregator-ut >> YdbSdkSessions::SessionsServerLimit [SKIPPED] >> YdbSdkSessions::MultiThreadMultipleRequestsOnSharedSessionsTableClient ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/server/ut/unittest >> TMessageBusServerPersQueueGetTopicMetadataMetaRequestTest::SuccessfullyReplies [GOOD] Test command err: Assert failed: Check response: { Status: 130 ErrorReason: "Timeout while waiting for response, may be just slow, Marker# PQ16" ErrorCode: ERROR } Assert failed: Check response: { Status: 128 ErrorReason: "topic \'rt3.dc1--topic1\' is not created, Marker# PQ94" ErrorCode: UNKNOWN_TOPIC } >> TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest::FailesOnNotATopic [GOOD] >> TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest::FailsOnBalancerDescribeResultFailureWhenTopicsAreGivenExplicitly >> TMessageBusServerPersQueueGetReadSessionsInfoMetaRequestTest::SuccessfullyPassesResponsesFromTablets [GOOD] >> TMessageBusServerPersQueueGetTopicMetadataMetaRequestTest::FailesOnNotATopic >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendCreateBinding [GOOD] >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendListBindings ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/integration/sessions/gtest >> YdbSdkSessions::SessionsServerLimitWithSessionPool [SKIPPED] Test command err: ydb/public/sdk/cpp/tests/integration/sessions/main.cpp:588: Enable after accepting a pull request with merging configs >> YdbSdkSessionsPool1Session::CustomPlan/0 >> YdbSdkSessions::CloseSessionAfterDriverDtorWithoutSessionPool [GOOD] >> YdbSdkSessions::CloseSessionWithSessionPoolExplicit >> Donor::ConsistentWritesWhenSwitchingToDonorMode [GOOD] >> YdbSdkSessions::TestSessionPool [GOOD] |74.8%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/integration/sessions_pool/gtest >> YdbSdkSessionsPool1Session::RunSmallPlan/0 [GOOD] |74.8%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/integration/sessions_pool/gtest >> YdbSdkSessionsPool::WaitQueue/1 [GOOD] >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendListBindings [GOOD] >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendDescribeBinding >> TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest::FailsOnBalancerDescribeResultFailureWhenTopicsAreGivenExplicitly [GOOD] >> TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest::FailsOnDuplicatedPartition >> TMessageBusServerPersQueueGetTopicMetadataMetaRequestTest::FailesOnNotATopic [GOOD] >> YdbSdkSessionsPool1Session::GetSession/0 >> test.py::test[solomon-LabelColumns-default.txt] [GOOD] >> test.py::test[solomon-Subquery-default.txt] >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendDescribeBinding [GOOD] >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendModifyBinding |74.8%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/integration/sessions_pool/gtest >> YdbSdkSessionsPool::WaitQueue/0 [GOOD] >> TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest::FailsOnDuplicatedPartition [GOOD] >> YdbSdkSessionsPool1Session::GetSession/0 [GOOD] >> TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest::FailsOnFailedGetAllTopicsRequest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/server/ut/unittest >> TMessageBusServerPersQueueGetTopicMetadataMetaRequestTest::FailesOnNotATopic [GOOD] Test command err: Assert failed: Check response: { Status: 130 ErrorReason: "Timeout while waiting for response, may be just slow, Marker# PQ16" ErrorCode: ERROR } 2025-06-25T14:27:35.324990Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3114: [PQ: 72057594037928037] Handle TEvInterconnect::TEvNodeInfo 2025-06-25T14:27:35.327810Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3146: [PQ: 72057594037928037] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-25T14:27:35.327995Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:752: [PQ: 72057594037928037] doesn't have tx info 2025-06-25T14:27:35.328029Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:764: [PQ: 72057594037928037] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-25T14:27:35.328053Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:985: [PQ: 72057594037928037] no config, start with empty partitions and default config 2025-06-25T14:27:35.328081Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:4949: [PQ: 72057594037928037] Txs.size=0, PlannedTxs.size=0 2025-06-25T14:27:35.328118Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037928037] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:27:35.328174Z node 2 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037928037] doesn't have tx writes info 2025-06-25T14:27:35.328642Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037928037] server connected, pipe [2:265:2255], now have 1 active actors on pipe 2025-06-25T14:27:35.328722Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1470: [PQ: 72057594037928037] Handle TEvPersQueue::TEvUpdateConfig 2025-06-25T14:27:35.344226Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1656: [PQ: 72057594037928037] Config update version 1(current 0) received from actor [2:103:2136] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic1" Version: 1 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-06-25T14:27:35.350014Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:591: [PQ: 72057594037928037] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic1" Version: 1 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-06-25T14:27:35.350174Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037928037] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:27:35.351116Z node 2 :PERSQUEUE INFO: pq_impl.cpp:1497: [PQ: 72057594037928037] Config applied version 1 actor [2:103:2136] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic1" Version: 1 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-06-25T14:27:35.351273Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic1:0:Initializer] Start initializing step TInitConfigStep 2025-06-25T14:27:35.351678Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic1:0:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-25T14:27:35.351966Z node 2 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037928037, Partition: 0, State: StateInit] bootstrapping 0 [2:273:2261] 2025-06-25T14:27:35.354117Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic1:0:Initializer] Initializing completed. 2025-06-25T14:27:35.354170Z node 2 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037928037, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--topic1' partition 0 generation 2 [2:273:2261] 2025-06-25T14:27:35.354206Z node 2 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037928037, Partition: 0, State: StateInit] SYNC INIT topic rt3.dc1--topic1 partitition 0 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-25T14:27:35.354245Z node 2 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037928037, Partition: 0, State: StateIdle] Process pending events. Count 0 2025-06-25T14:27:35.354502Z node 2 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037928037, Partition: 0, State: StateIdle] no data for compaction 2025-06-25T14:27:35.354978Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037928037] server connected, pipe [2:276:2263], now have 1 active actors on pipe 2025-06-25T14:27:35.404453Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3114: [PQ: 72057594037928138] Handle TEvInterconnect::TEvNodeInfo 2025-06-25T14:27:35.410492Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3146: [PQ: 72057594037928138] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-25T14:27:35.410753Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:752: [PQ: 72057594037928138] doesn't have tx info 2025-06-25T14:27:35.410790Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:764: [PQ: 72057594037928138] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-25T14:27:35.410817Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:985: [PQ: 72057594037928138] no config, start with empty partitions and default config 2025-06-25T14:27:35.410845Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:4949: [PQ: 72057594037928138] Txs.size=0, PlannedTxs.size=0 2025-06-25T14:27:35.410881Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037928138] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:27:35.410917Z node 2 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037928138] doesn't have tx writes info 2025-06-25T14:27:35.411471Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037928138] server connected, pipe [2:408:2361], now have 1 active actors on pipe 2025-06-25T14:27:35.411551Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1470: [PQ: 72057594037928138] Handle TEvPersQueue::TEvUpdateConfig 2025-06-25T14:27:35.411669Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1656: [PQ: 72057594037928138] Config update version 2(current 0) received from actor [2:103:2136] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 1 TopicName: "rt3.dc1--topic2" Version: 2 Partitions { PartitionId: 1 } AllPartitions { PartitionId: 1 } 2025-06-25T14:27:35.413139Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:591: [PQ: 72057594037928138] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 1 TopicName: "rt3.dc1--topic2" Version: 2 Partitions { PartitionId: 1 } AllPartitions { PartitionId: 1 } 2025-06-25T14:27:35.413228Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037928138] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:27:35.413745Z node 2 :PERSQUEUE INFO: pq_impl.cpp:1497: [PQ: 72057594037928138] Config applied version 2 actor [2:103:2136] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 1 TopicName: "rt3.dc1--topic2" Version: 2 Partitions { PartitionId: 1 } AllPartitions { PartitionId: 1 } 2025-06-25T14:27:35.413827Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:1:Initializer] Start initializing step TInitConfigStep 2025-06-25T14:27:35.414049Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:1:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-25T14:27:35.414183Z node 2 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037928138, Partition: 1, State: StateInit] bootstrapping 1 [2:416:2367] 2025-06-25T14:27:35.415327Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic2:1:Initializer] Initializing completed. 2025-06-25T14:27:35.415366Z node 2 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037928138, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--topic2' partition 1 generation 2 [2:416:2367] 2025-06-25T14:27:35.415400Z node 2 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037928138, Partition: 1, State: StateInit] SYNC INIT topic rt3.dc1--topic2 partitition 1 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-25T14:27:35.415430Z node 2 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037928138, Partition: 1, State: StateIdle] Process pending events. Count 0 2025-06-25T14:27:35.415582Z node 2 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037928138, Partition: 1, State: StateIdle] no data for compaction 2025-06-25T14:27:35.415899Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037928138] server connected, pipe [2:419:2369], now have 1 active actors on pipe 2025-06-25T14:27:35.429815Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3114: [PQ: 72057594037928139] Handle TEvInterconnect::TEvNodeInfo 2025-06-25T14:27:35.432957Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3146: [PQ: 72057594037928139] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-25T14:27:35.433224Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:752: [PQ: 72057594037928139] doesn't have tx info 2025-06-25T14:27:35.433268Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:764: [PQ: 72057594037928139] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-25T14:27:35.433301Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:985: [PQ: 72057594037928139] no config, start with empty partitions and default config 2025-06-25T14:27:35.433339Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:4949: [PQ: 72057594037928139] Txs.size=0, PlannedTxs.size=0 2025-06-25T14:27:35.433380Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037928139] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:27:35.433431Z node 2 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037928139] doesn't have tx writes info 2025-06-25T14:27:35.434005Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037928139] server connected, pipe [2:468:2406], now have 1 active actors on pipe 2025-06-25T14:27:35.434095Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1470: [PQ: 72057594037928139] Handle TEvPersQueue::TEvUpdateConfig 2025-06-25T14:27:35.434257Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1656: [PQ: 72057594037928139] Config update version 3(current 0) received from actor [2:103:2136] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 2 TopicName: "rt3.dc1--topic2" Version: 3 Partitions { PartitionId: 2 } AllPartitions { PartitionId: 2 } 2025-06-25T14:27:35.436488Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:591: [PQ: 72057594037928139] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 2 TopicName: "rt3.dc1--topic2" Version: 3 Partitions { PartitionId: 2 } AllPartitions { PartitionId: 2 } 2025-06-25T14:27:35.436609Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037928139] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:27:35.437366Z node 2 :PERSQUEUE INFO: pq_impl.cpp:1497: [PQ: 72057594037928139] Config applied version 3 actor [2:103:2136] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 2 TopicName: "rt3.dc1--topic2" Version: 3 Partitions { PartitionId: 2 } AllPartitions { PartitionId: 2 } 2025-06-25T14:27:35.437469Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitConfigStep 2025-06-25T14:27:35.437771Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-25T14:27:35.438016Z node 2 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037928139, Partition: 2, State: StateInit] bootstrapping 2 [2:476:2412] 2025-06-25T14:27:35.439905Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic2:2:Initializer] Initializing completed. 2025-06-25T14:27:35.439977Z node 2 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037928139, Partition: 2, State: StateInit] init complete for topic 'rt3.dc1--topic2' partition 2 generation 2 [2:476:2412] 2025-06-25T14:27:35.440029Z node 2 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037928139, Partition: 2, State: StateInit] SYNC INIT topic rt3.dc1--topic2 partitition 2 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-25T14:27:35.440077Z node 2 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037928139, Partition: 2, State: StateIdle] Process pending events. Count 0 2025-06-25T14:27:35.440382Z node 2 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037928139, Partition: 2, State: StateIdle] no data for compaction 2025-06-25T14:27:35.440867Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037928139] server connected, pipe [2:479:2414], now have 1 active actors on pipe REQUEST MetaRequest { CmdGetReadSessionsInfo { ClientId: "client_id" Topic: "rt3.dc1--topic1" Topic: "rt3.dc1--topic2" } } Ticket: "client_id@builtin" 2025-06-25T14:27:35.456000Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037928037] server connected, pipe [2:486:2417], now have 1 active actors on pipe 2025-06-25T14:27:35.456666Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037928138] server connected, pipe [2:489:2418], now have 1 active actors on pipe 2025-06-25T14:27:35.456884Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037928139] server connected, pipe [2:490:2418], now have 1 active actors on pipe 2025-06-25T14:27:35.457308Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72057594037928037] server disconnected, pipe [2:486:2417] destroyed 2025-06-25T14:27:35.457911Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72057594037928138] server disconnected, pipe [2:489:2418] destroyed 2025-06-25T14:27:35.458109Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72057594037928139] server disconnected, pipe [2:490:2418] destroyed RESULT Status: 1 ErrorCode: OK MetaResponse { CmdGetReadSessionsInfoResult { TopicResult { Topic: "rt3.dc1--topic1" PartitionResult { Partition: 0 ClientOffset: 0 StartOffset: 0 EndOffset: 0 TimeLag: 0 TabletNode: "::1" ClientReadOffset: 0 ReadTimeLag: 0 TabletNodeId: 2 ErrorCode: OK } ErrorCode: OK } TopicResult { Topic: "rt3.dc1--topic2" PartitionResult { Partition: 0 ErrorCode: INITIALIZING ErrorReason: "tablet for partition is not running" } PartitionResult { Partition: 1 ClientOffset: 0 StartOffset: 0 EndOffset: 0 TimeLag: 0 TabletNode: "::1" ClientReadOffset: 0 ReadTimeLag: 0 TabletNodeId: 2 ErrorCode: OK } PartitionResult { Partition: 2 ClientOffset: 0 StartOffset: 0 EndOffset: 0 TimeLag: 0 TabletNode: "::1" ClientReadOffset: 0 ReadTimeLag: 0 TabletNodeId: 2 ErrorCode: OK } ErrorCode: OK } } } Assert failed: Check response: { Status: 128 ErrorReason: "the following topics are not created: rt3.dc1--topic2, Marker# PQ95" ErrorCode: UNKNOWN_TOPIC } |74.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_change_collector/ydb-core-tx-datashard-ut_change_collector |74.8%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_change_collector/ydb-core-tx-datashard-ut_change_collector |74.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_change_collector/ydb-core-tx-datashard-ut_change_collector >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendModifyBinding [GOOD] >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendDeleteBinding ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/server/ut/unittest >> TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest::FailsOnDuplicatedPartition [GOOD] Test command err: Assert failed: Check response: { Status: 128 ErrorReason: "path \'Root/PQ\' has unknown/invalid root prefix \'Root\', Marker# PQ14" ErrorCode: UNKNOWN_TOPIC } Assert failed: Check response: { Status: 128 ErrorReason: "the following topics are not created: rt3.dc1--topic2, Marker# PQ95" ErrorCode: UNKNOWN_TOPIC } Assert failed: Check response: { Status: 128 ErrorReason: "topic \'Root/PQ\' describe error, Status# LookupError, Marker# PQ1" ErrorCode: ERROR } Assert failed: Check response: { Status: 128 ErrorReason: "multiple partition 2 in TopicRequest for topic \'rt3.dc1--topic2\'" ErrorCode: BAD_REQUEST } ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_donor/unittest >> Donor::ConsistentWritesWhenSwitchingToDonorMode [GOOD] Test command err: RandomSeed# 8597955652045564358 Reassign# 7 -- VSlotId { NodeId: 8 PDiskId: 1000 VSlotId: 1000 } GroupId: 2181038080 GroupGeneration: 1 VDiskKind: "Default" FailDomainIdx: 7 VDiskMetrics { SatisfactionRank: 0 VSlotId { NodeId: 8 PDiskId: 1000 VSlotId: 1000 } State: OK Replicated: true DiskSpace: Green IsThrottling: false ThrottlingRate: 1000 } Status: "READY" Ready: true Put# [1:1:1:0:0:71:0] Put# [1:1:2:0:0:27:0] Put# [1:1:3:0:0:43:0] Put# [1:1:4:0:0:78:0] 2025-06-25T14:24:26.995728Z 9 00h00m20.011024s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2025-06-25T14:24:27.008414Z 9 00h00m20.011024s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 18287315788390283495] 2025-06-25T14:24:27.076862Z 9 00h00m20.011024s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) THullOsirisActor: RESURRECT: id# [1:1:1:0:0:71:3] 2025-06-25T14:24:27.076928Z 9 00h00m20.011024s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) THullOsirisActor: RESURRECT: id# [1:1:2:0:0:27:4] 2025-06-25T14:24:27.076961Z 9 00h00m20.011024s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) THullOsirisActor: RESURRECT: id# [1:1:3:0:0:43:5] 2025-06-25T14:24:27.076994Z 9 00h00m20.011024s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) THullOsirisActor: RESURRECT: id# [1:1:4:0:0:78:5] 2025-06-25T14:24:27.077224Z 9 00h00m20.011024s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) THullOsirisActor: FINISH: BlobsResurrected# 4 PartsResurrected# 4 Put# [1:1:5:0:0:96:0] Put# [1:1:6:0:0:43:0] Put# [1:1:7:0:0:32:0] Put# [1:1:8:0:0:5:0] Put# [1:1:9:0:0:87:0] Put# [1:1:10:0:0:93:0] Put# [1:1:11:0:0:68:0] Put# [1:1:12:0:0:83:0] Put# [1:1:13:0:0:44:0] Put# [1:1:14:0:0:33:0] Put# [1:1:15:0:0:40:0] Put# [1:1:16:0:0:50:0] Put# [1:1:17:0:0:45:0] Put# [1:1:18:0:0:92:0] Put# [1:1:19:0:0:77:0] Put# [1:1:20:0:0:1:0] Put# [1:1:21:0:0:36:0] Put# [1:1:22:0:0:55:0] Put# [1:1:23:0:0:34:0] Put# [1:1:24:0:0:87:0] Put# [1:1:25:0:0:56:0] Put# [1:1:26:0:0:86:0] Put# [1:1:27:0:0:10:0] Put# [1:1:28:0:0:95:0] Put# [1:1:29:0:0:63:0] Put# [1:1:30:0:0:2:0] Put# [1:1:31:0:0:45:0] Put# [1:1:32:0:0:60:0] Put# [1:1:33:0:0:81:0] Put# [1:1:34:0:0:27:0] Put# [1:1:35:0:0:33:0] Put# [1:1:36:0:0:20:0] Put# [1:1:37:0:0:13:0] Put# [1:1:38:0:0:36:0] Put# [1:1:39:0:0:11:0] Put# [1:1:40:0:0:86:0] Put# [1:1:41:0:0:65:0] Put# [1:1:42:0:0:40:0] Put# [1:1:43:0:0:58:0] Put# [1:1:44:0:0:78:0] Put# [1:1:45:0:0:67:0] Put# [1:1:46:0:0:12:0] Put# [1:1:47:0:0:28:0] Put# [1:1:48:0:0:34:0] Put# [1:1:49:0:0:3:0] Put# [1:1:50:0:0:61:0] Put# [1:1:51:0:0:29:0] Put# [1:1:52:0:0:32:0] Put# [1:1:53:0:0:71:0] Put# [1:1:54:0:0:68:0] Put# [1:1:55:0:0:98:0] Put# [1:1:56:0:0:21:0] Put# [1:1:57:0:0:87:0] Put# [1:1:58:0:0:45:0] Put# [1:1:59:0:0:78:0] Put# [1:1:60:0:0:88:0] Put# [1:1:61:0:0:45:0] Put# [1:1:62:0:0:36:0] Put# [1:1:63:0:0:34:0] Put# [1:1:64:0:0:19:0] Put# [1:1:65:0:0:93:0] Put# [1:1:66:0:0:89:0] Put# [1:1:67:0:0:54:0] Put# [1:1:68:0:0:17:0] Put# [1:1:69:0:0:49:0] Put# [1:1:70:0:0:1:0] Put# [1:1:71:0:0:79:0] Put# [1:1:72:0:0:67:0] Put# [1:1:73:0:0:29:0] Put# [1:1:74:0:0:28:0] Put# [1:1:75:0:0:14:0] Put# [1:1:76:0:0:9:0] Put# [1:1:77:0:0:13:0] Put# [1:1:78:0:0:6:0] Put# [1:1:79:0:0:97:0] Put# [1:1:80:0:0:12:0] Put# [1:1:81:0:0:25:0] Put# [1:1:82:0:0:24:0] Put# [1:1:83:0:0:71:0] Put# [1:1:84:0:0:92:0] Put# [1:1:85:0:0:34:0] Put# [1:1:86:0:0:81:0] Put# [1:1:87:0:0:46:0] Put# [1:1:88:0:0:54:0] Put# [1:1:89:0:0:27:0] Put# [1:1:90:0:0:9:0] Put# [1:1:91:0:0:86:0] Put# [1:1:92:0:0:52:0] Put# [1:1:93:0:0:92:0] Put# [1:1:94:0:0:39:0] Put# [1:1:95:0:0:65:0] Put# [1:1:96:0:0:24:0] Put# [1:1:97:0:0:7:0] Put# [1:1:98:0:0:94:0] Put# [1:1:99:0:0:34:0] Put# [1:1:100:0:0:74:0] Put# [1:1:101:0:0:65:0] Put# [1:1:102:0:0:79:0] Put# [1:1:103:0:0:98:0] Put# [1:1:104:0:0:21:0] Put# [1:1:105:0:0:76:0] Put# [1:1:106:0:0:3:0] Put# [1:1:107:0:0:63:0] Put# [1:1:108:0:0:38:0] Put# [1:1:109:0:0:24:0] Put# [1:1:110:0:0:59:0] Put# [1:1:111:0:0:92:0] Put# [1:1:112:0:0:67:0] Put# [1:1:113:0:0:93:0] Put# [1:1:114:0:0:17:0] Put# [1:1:115:0:0:76:0] Put# [1:1:116:0:0:47:0] Put# [1:1:117:0:0:46:0] Put# [1:1:118:0:0:91:0] Put# [1:1:119:0:0:37:0] Put# [1:1:120:0:0:52:0] Put# [1:1:121:0:0:42:0] Put# [1:1:122:0:0:11:0] Put# [1:1:123:0:0:89:0] Put# [1:1:124:0:0:22:0] Put# [1:1:125:0:0:88:0] Put# [1:1:126:0:0:48:0] Put# [1:1:127:0:0:77:0] Put# [1:1:128:0:0:74:0] Put# [1:1:129:0:0:15:0] Put# [1:1:130:0:0:12:0] Put# [1:1:131:0:0:55:0] Put# [1:1:132:0:0:9:0] Put# [1:1:133:0:0:79:0] Put# [1:1:134:0:0:99:0] Put# [1:1:135:0:0:48:0] Put# [1:1:136:0:0:21:0] Put# [1:1:137:0:0:53:0] Put# [1:1:138:0:0:56:0] Put# [1:1:139:0:0:33:0] Put# [1:1:140:0:0:8:0] Put# [1:1:141:0:0:45:0] Put# [1:1:142:0:0:52:0] Put# [1:1:143:0:0:22:0] Put# [1:1:144:0:0:11:0] Put# [1:1:145:0:0:23:0] Put# [1:1:146:0:0:18:0] Put# [1:1:147:0:0:83:0] Put# [1:1:148:0:0:93:0] Put# [1:1:149:0:0:25:0] Put# [1:1:150:0:0:26:0] Put# [1:1:151:0:0:71:0] Put# [1:1:152:0:0:46:0] Put# [1:1:153:0:0:54:0] Put# [1:1:154:0:0:66:0] Put# [1:1:155:0:0:68:0] Put# [1:1:156:0:0:12:0] Put# [1:1:157:0:0:71:0] Put# [1:1:158:0:0:48:0] Put# [1:1:159:0:0:56:0] Put# [1:1:160:0:0:60:0] Put# [1:1:161:0:0:90:0] Put# [1:1:162:0:0:49:0] Put# [1:1:163:0:0:27:0] Put# [1:1:164:0:0:23:0] Put# [1:1:165:0:0:29:0] Put# [1:1:166:0:0:100:0] Put# [1:1:167:0:0:45:0] Put# [1:1:168:0:0:50:0] Put# [1:1:169:0:0:13:0] Put# [1:1:170:0:0:38:0] Put# [1:1:171:0:0:74:0] Put# [1:1:172:0:0:37:0] Put# [1:1:173:0:0:99:0] Put# [1:1:174:0:0:55:0] Put# [1:1:175:0:0:43:0] Put# [1:1:176:0:0:33:0] Put# [1:1:177:0:0:39:0] Put# [1:1:178:0:0:28:0] Put# [1:1:179:0:0:98:0] Put# [1:1:180:0:0:74:0] Put# [1:1:181:0:0:80:0] Put# [1:1:182:0:0:25:0] Put# [1:1:183:0:0:74:0] Put# [1:1:184:0:0:88:0] Put# [1:1:185:0:0:96:0] Put# [1:1:186:0:0:67:0] Put# [1:1:187:0:0:23:0] Put# [1:1:188:0:0:42:0] Put# [1:1:189:0:0:100:0] Put# [1:1:190:0:0:51:0] Put# [1:1:191:0:0:15:0] Put# [1:1:192:0:0:82:0] Put# [1:1:193:0:0:46:0] Put# [1:1:194:0:0:37:0] Put# [1:1:195:0:0:51:0] Put# [1:1:196:0:0:29:0] Put# [1:1:197:0:0:17:0] Put# [1:1:198:0:0:19:0] Put# [1:1:199:0:0:37:0] Put# [1:1:200:0:0:4:0] Put# [1:1:201:0:0:61:0] Put# [1:1:202:0:0:57:0] Put# [1:1:203:0:0:87:0] Put# [1:1:204:0:0:17:0] Put# [1:1:205:0:0:86:0] Put# [1:1:206:0:0:65:0] Put# [1:1:207:0:0:75:0] Put# [1:1:208:0:0:61:0] Put# [1:1:209:0:0:6:0] Put# [1:1:210:0:0:21:0] Put# [1:1:211:0:0:43:0] Put# [1:1:212:0:0:27:0] Put# [1:1:213:0:0:85:0] Put# [1:1:214:0:0:72:0] Put# [1:1:215:0:0:11:0] Put# [1:1:216:0:0:11:0] Put# [1:1:217:0:0:28:0] Put# [1:1:218:0:0:22:0] Put# [1:1:219:0:0:5:0] Put# [1:1:220:0:0:47:0] Put# [1:1:221:0:0:72:0] Put# [1:1:222:0:0:51:0] Put# [1:1:223:0:0:15:0] Put# [1:1:224:0:0:66:0] Put# [1:1:225:0:0:92:0] Put# [1:1:226:0:0:50:0] Put# [1:1:227:0:0:43:0] Put# [1:1:228:0:0:6:0] Put# [1:1:229:0:0:29:0] Put# [1:1:230:0:0:21:0] Put# [1:1:231:0:0:39:0] Put# [1:1:232:0:0:31:0] Put# [1:1:233:0:0:35:0] Put# [1:1:234:0:0:3:0] Put# [1:1:235:0:0:27:0] Put# [1:1:236:0:0:80:0] Put# [1:1:237:0:0:62:0] Put# [1:1:238:0:0:100:0] Put# [1:1:239:0:0:42:0] Put# [1:1:240:0:0:81:0] Put# [1:1:241:0:0:53:0] Put# [1:1:242:0:0:67:0] Put# [1:1:243:0:0:28:0] Put# [1:1:244:0:0:83:0] Put# [1:1:245:0:0:8:0] Put# [1:1:246:0:0:57:0] Put# [1:1:247:0:0:46:0] Put# [1:1:248:0:0:6:0] Put# [1:1:249:0:0:25:0] Put# [1:1:250:0:0:20:0] Put# [1:1:251:0:0:82:0] Put# [1:1:252:0:0:48:0] Put# [1:1:253:0:0:74:0] Put# [1:1:254:0:0:53:0] Put# [1:1:255:0:0:35:0] Put# [1:1:256:0:0:12:0] Put# [1:1:257:0:0:28:0] Put# [1:1:258:0:0:11:0] Put# [1:1:259:0:0:4:0] Put# [1:1:260:0:0:80:0] Put# [1:1:261:0:0:89:0] Put# [1:1:262:0:0:79:0] Put# [1:1:263:0:0:57:0] Put# [1:1:264:0:0:18:0] Put# [1:1:265:0:0:20:0] Put# [1:1:266:0:0:60:0] Put# [1:1:267:0:0:80:0] Put# [1:1:268:0:0:51:0] Put# [1:1:269:0:0:18:0] Put# [1:1:270:0:0:43:0] Put# [1:1:271:0:0:28:0] Put# [1:1:272:0:0:25:0] Put# [1:1:273:0:0:79:0] Put# [1:1:274:0:0:27:0] Put# [1:1:275:0:0:21:0] Put# [1:1:276:0:0:79:0] Put# [1:1:277:0:0:66:0] Put# [1:1:278:0:0:5:0] Put# [1:1:279:0:0:71:0] Put# [1:1:280:0:0:7:0] Put# [1:1:281:0:0:33:0] Put# [1:1:282:0:0:49:0] Put# [1:1:283:0:0:98:0] Put# [1:1:284:0:0:18:0] Put# [1:1:285:0:0:70:0] Put# [1:1:286:0:0:34:0] Put# [1:1:287:0:0:75:0] Put# [1:1:288:0:0:93:0] Put# [1:1:289:0:0:9:0] Put# [1:1:290:0:0:35:0] Put# [1:1:291:0:0:67:0] Put# [1:1:292:0:0:45:0] Put# [1:1:293:0:0:68:0] Put# [1:1:294:0:0:14:0] Put# [1:1:295:0:0:79:0] Put# [1:1:296:0:0:90:0] Put# [1:1:297:0:0:92:0] Put# [1:1:298:0:0:1:0] Put# [1:1:299:0:0:85:0] Put# [1:1:300:0:0:10:0] Put# [1:1:301:0:0:85:0] Put# [1:1:302:0:0:6:0] Put# [1:1:303:0:0:61:0] Put# [1:1:304:0:0:71:0] Put# [1:1:305:0:0:75:0] Put# [1:1:306:0:0:36:0] Put# [1:1:307:0:0:42:0] Put# [1:1:308:0:0:1:0] Put# [1:1:309:0:0:56:0] Put# [1:1:310:0:0:8:0] Put# [1:1:311:0:0:79:0] Put# [1:1:312:0:0:51:0] Put# [1:1:313:0:0:17:0] Put# [1:1:314:0:0:62:0] Put# [1:1:315:0:0:86:0] Put# [1:1:316:0:0:47:0] Put# [1:1:317:0:0:33:0] Put# [1:1:318:0:0:5:0] Put# [1:1:319:0:0:79:0] Put# [1:1:320:0:0:8:0] Put# [1:1:321:0:0:53:0] Put# [1:1:322:0:0:52:0] Put# [1:1:323:0:0:68:0] Put# [1:1:324:0:0:87:0] Put# [1:1:325:0:0:88:0] Put# [1:1:326:0:0:65:0] Put# [1:1:327:0:0:60:0] Put# [1:1:328:0:0:70:0] Put# [1:1:329:0:0:70:0] Put# [1:1:330:0:0:88:0] Put# [1:1:331:0:0:89:0] Put# [1:1:332:0:0:33:0] Put# [1:1:333:0:0:97:0] Put# [1:1:334:0:0:97:0] Put# [1:1:335:0:0:91:0] Put# [1:1:336:0:0:68:0] Put# [1:1:337:0:0:79:0] Put# [1:1:338:0:0:98:0] Put# [1:1:339:0:0:54:0] Put# [1:1:340:0:0:92:0] Put# [1:1:341:0:0:7:0] Put# [1:1:342:0:0:14:0] Put# [1:1:343:0:0:23:0] Put# [1:1:344:0:0:1:0] Put# [1:1:345:0:0:56:0] Put# [1:1:346:0:0:37:0] Put# [1:1:347:0:0:76:0] Put# [1:1:348:0:0:87:0] Put# [1:1:349:0:0:66:0] Put# [1:1:350:0:0:85:0] Put# [1:1:351:0:0:54:0] Put# [1:1:352:0:0:48:0] Put# [1:1:353:0:0:8:0] Put# [1:1:354:0:0:65:0] Put# [1:1:355:0:0:38:0] Put# [1:1:356:0:0:82:0] Put# [1:1:357:0:0:2:0] Put# [1:1:358:0:0:75:0] Put# [1:1:359:0:0:81:0] Put# [1:1:360:0:0:8:0] Put# [1:1:361:0:0:15:0] Put# [1:1:362:0:0:10:0] Put# [1:1:363:0:0:4:0] Put# [1:1:364:0:0:60:0] Put# [1:1:365:0:0:99:0] Put# [1:1:366:0:0:62:0] Put# [1:1:367:0:0:33:0] Put# [1:1:368:0:0:94:0] Put# [1:1:369:0:0:48:0] Put# [1:1:370:0:0:91:0] Put# [1:1:371:0:0:49:0] Put# [1:1:372:0:0:79:0] Put# [1:1:373:0:0:26:0] Put# [1:1:374:0:0:49:0] Put# [1:1:375:0:0:100:0] Put# [1:1:376:0:0:89:0] Put# [1:1:377:0:0:20:0] Put# [1:1:378:0:0:47:0] Put# [1:1:379:0:0:68:0] Put# [1:1:380:0:0:66:0] Put# [1:1:381:0:0:30:0] Put# [1:1:382:0:0:48:0] Put# [1:1:383:0:0:32:0] Put# [1:1:384:0:0:87:0] Put# [1:1:385:0:0:41:0] Put# [1:1:386:0:0:42:0] Put# [1:1:387:0:0:72:0] Put# [1:1:388:0:0:34:0] Put# [1:1:389:0:0:31:0] Put# [1:1:390:0:0:10:0] Put# [1:1:391:0:0:26:0] Put# [1:1:392:0:0:96:0] Put# [1:1:393:0:0:67:0] Put# [1:1:394:0:0:59:0] Put# [1:1:395:0:0:59:0] Put# [1:1:396:0:0:2:0] Put# [1:1:397:0:0:40:0] Put# [1:1:398:0:0:79:0] Put# [1:1:399:0:0:30:0] Put# [1:1:400:0:0:30:0] Put# [1:1:401:0:0:58:0] Put# [1:1:402:0:0:48:0] Put# [1:1:403:0:0:81:0] Put# [1:1:404:0:0:40:0] Put# [1:1:405:0:0:91:0] Put# [1:1:406:0:0:81:0] Put# [1:1:407:0:0:40:0] Put# [1:1:408:0:0:3:0] Put# [1:1:409:0:0:52:0] Put# [1:1:410:0:0:22:0] Put# [1:1:411:0:0:37:0] Put# [1:1:412:0:0:78:0] Put# [1:1:413:0:0:37:0] Put# [1:1:414:0:0:76:0] Put# [1:1:415:0:0:22:0] Put# [1:1:416:0:0:68:0] Put# [1:1:417:0:0:10:0] Put# [1:1:418:0:0:61:0] Put# [1:1:419:0:0:12:0] Put# [1:1:420:0:0:67:0] Put# [1:1:421:0:0:24:0] Put# [1:1:422:0:0:35:0] Put# [1:1:423:0:0:70:0] Put# [1:1:424:0:0:18:0] Put# [1:1:425:0:0:89:0] Put# [1:1:426:0:0:98:0] Put# [1:1:427:0:0:11:0] Put# [1:1:428:0:0:48:0] Put# [1:1:429:0:0:48:0] Put# [1:1:430:0:0:20:0] Put# [1:1:431:0:0:30:0] Put# [1:1:432:0:0:48:0] Put# [1:1:433:0:0:54:0] Put# [1:1:434:0:0:25:0] Put# [1:1:435:0:0:36:0] Put# [1:1:436:0:0:63:0] Put# [1:1:437:0:0:17:0] Put# [1:1:438:0:0:24:0] Put# [1:1:4 ... 19:0:0:87:0] Put# [1:3:9520:0:0:74:0] Put# [1:3:9521:0:0:46:0] Put# [1:3:9522:0:0:28:0] Put# [1:3:9523:0:0:55:0] Put# [1:3:9524:0:0:70:0] Put# [1:3:9525:0:0:80:0] Put# [1:3:9526:0:0:57:0] Put# [1:3:9527:0:0:82:0] Put# [1:3:9528:0:0:47:0] Put# [1:3:9529:0:0:21:0] Put# [1:3:9530:0:0:75:0] Put# [1:3:9531:0:0:100:0] Put# [1:3:9532:0:0:75:0] Put# [1:3:9533:0:0:17:0] Put# [1:3:9534:0:0:6:0] Put# [1:3:9535:0:0:46:0] Put# [1:3:9536:0:0:75:0] Put# [1:3:9537:0:0:39:0] Put# [1:3:9538:0:0:59:0] Put# [1:3:9539:0:0:54:0] Put# [1:3:9540:0:0:33:0] Put# [1:3:9541:0:0:30:0] Put# [1:3:9542:0:0:92:0] Put# [1:3:9543:0:0:39:0] Put# [1:3:9544:0:0:82:0] Put# [1:3:9545:0:0:13:0] Put# [1:3:9546:0:0:19:0] Put# [1:3:9547:0:0:88:0] Put# [1:3:9548:0:0:2:0] Put# [1:3:9549:0:0:65:0] Put# [1:3:9550:0:0:11:0] Put# [1:3:9551:0:0:91:0] Put# [1:3:9552:0:0:29:0] Put# [1:3:9553:0:0:78:0] Put# [1:3:9554:0:0:30:0] Put# [1:3:9555:0:0:12:0] Put# [1:3:9556:0:0:24:0] Put# [1:3:9557:0:0:49:0] Put# [1:3:9558:0:0:21:0] Put# [1:3:9559:0:0:99:0] Put# [1:3:9560:0:0:86:0] Put# [1:3:9561:0:0:62:0] Put# [1:3:9562:0:0:55:0] Put# [1:3:9563:0:0:4:0] Put# [1:3:9564:0:0:74:0] Put# [1:3:9565:0:0:44:0] Put# [1:3:9566:0:0:10:0] Put# [1:3:9567:0:0:36:0] Put# [1:3:9568:0:0:58:0] Put# [1:3:9569:0:0:41:0] Put# [1:3:9570:0:0:39:0] Put# [1:3:9571:0:0:70:0] Put# [1:3:9572:0:0:72:0] Put# [1:3:9573:0:0:11:0] Put# [1:3:9574:0:0:68:0] Put# [1:3:9575:0:0:59:0] Put# [1:3:9576:0:0:63:0] Put# [1:3:9577:0:0:13:0] Put# [1:3:9578:0:0:34:0] Put# [1:3:9579:0:0:47:0] Put# [1:3:9580:0:0:4:0] Put# [1:3:9581:0:0:65:0] Put# [1:3:9582:0:0:99:0] Put# [1:3:9583:0:0:53:0] Put# [1:3:9584:0:0:26:0] Put# [1:3:9585:0:0:97:0] Put# [1:3:9586:0:0:68:0] Put# [1:3:9587:0:0:94:0] Put# [1:3:9588:0:0:5:0] Put# [1:3:9589:0:0:63:0] Put# [1:3:9590:0:0:93:0] Put# [1:3:9591:0:0:4:0] Put# [1:3:9592:0:0:85:0] Put# [1:3:9593:0:0:43:0] Put# [1:3:9594:0:0:42:0] Put# [1:3:9595:0:0:54:0] Put# [1:3:9596:0:0:7:0] Put# [1:3:9597:0:0:9:0] Put# [1:3:9598:0:0:79:0] Put# [1:3:9599:0:0:69:0] Put# [1:3:9600:0:0:39:0] Put# [1:3:9601:0:0:73:0] Put# [1:3:9602:0:0:74:0] Put# [1:3:9603:0:0:17:0] Put# [1:3:9604:0:0:26:0] Put# [1:3:9605:0:0:67:0] Put# [1:3:9606:0:0:1:0] Put# [1:3:9607:0:0:64:0] Put# [1:3:9608:0:0:47:0] Put# [1:3:9609:0:0:33:0] Put# [1:3:9610:0:0:64:0] Put# [1:3:9611:0:0:25:0] Put# [1:3:9612:0:0:29:0] Put# [1:3:9613:0:0:87:0] Put# [1:3:9614:0:0:32:0] Put# [1:3:9615:0:0:1:0] Put# [1:3:9616:0:0:81:0] Put# [1:3:9617:0:0:39:0] Put# [1:3:9618:0:0:46:0] Put# [1:3:9619:0:0:59:0] Put# [1:3:9620:0:0:85:0] Put# [1:3:9621:0:0:69:0] Put# [1:3:9622:0:0:3:0] Put# [1:3:9623:0:0:78:0] Put# [1:3:9624:0:0:8:0] Put# [1:3:9625:0:0:41:0] Put# [1:3:9626:0:0:76:0] Put# [1:3:9627:0:0:91:0] Put# [1:3:9628:0:0:71:0] Put# [1:3:9629:0:0:72:0] Put# [1:3:9630:0:0:80:0] Put# [1:3:9631:0:0:62:0] Put# [1:3:9632:0:0:36:0] Put# [1:3:9633:0:0:68:0] Put# [1:3:9634:0:0:65:0] Put# [1:3:9635:0:0:13:0] Put# [1:3:9636:0:0:84:0] Put# [1:3:9637:0:0:74:0] Put# [1:3:9638:0:0:46:0] Put# [1:3:9639:0:0:65:0] Put# [1:3:9640:0:0:89:0] Put# [1:3:9641:0:0:73:0] Put# [1:3:9642:0:0:44:0] Put# [1:3:9643:0:0:4:0] Put# [1:3:9644:0:0:78:0] Put# [1:3:9645:0:0:40:0] Put# [1:3:9646:0:0:43:0] Put# [1:3:9647:0:0:47:0] Put# [1:3:9648:0:0:54:0] Put# [1:3:9649:0:0:60:0] Put# [1:3:9650:0:0:56:0] Put# [1:3:9651:0:0:85:0] Put# [1:3:9652:0:0:47:0] Put# [1:3:9653:0:0:63:0] Put# [1:3:9654:0:0:70:0] Put# [1:3:9655:0:0:98:0] Put# [1:3:9656:0:0:90:0] Put# [1:3:9657:0:0:2:0] Put# [1:3:9658:0:0:97:0] Put# [1:3:9659:0:0:57:0] Put# [1:3:9660:0:0:27:0] Put# [1:3:9661:0:0:24:0] Put# [1:3:9662:0:0:55:0] Put# [1:3:9663:0:0:60:0] Put# [1:3:9664:0:0:65:0] Put# [1:3:9665:0:0:94:0] Put# [1:3:9666:0:0:92:0] Put# [1:3:9667:0:0:59:0] Put# [1:3:9668:0:0:69:0] Put# [1:3:9669:0:0:89:0] Put# [1:3:9670:0:0:66:0] Put# [1:3:9671:0:0:9:0] Put# [1:3:9672:0:0:74:0] Put# [1:3:9673:0:0:67:0] Put# [1:3:9674:0:0:52:0] Put# [1:3:9675:0:0:99:0] Put# [1:3:9676:0:0:48:0] Put# [1:3:9677:0:0:63:0] Put# [1:3:9678:0:0:9:0] Put# [1:3:9679:0:0:52:0] Put# [1:3:9680:0:0:74:0] Put# [1:3:9681:0:0:44:0] Put# [1:3:9682:0:0:97:0] Put# [1:3:9683:0:0:36:0] Put# [1:3:9684:0:0:61:0] Put# [1:3:9685:0:0:85:0] Put# [1:3:9686:0:0:39:0] Put# [1:3:9687:0:0:29:0] Put# [1:3:9688:0:0:74:0] Put# [1:3:9689:0:0:93:0] Put# [1:3:9690:0:0:73:0] Put# [1:3:9691:0:0:93:0] Put# [1:3:9692:0:0:54:0] Put# [1:3:9693:0:0:40:0] Put# [1:3:9694:0:0:47:0] Put# [1:3:9695:0:0:92:0] Put# [1:3:9696:0:0:53:0] Put# [1:3:9697:0:0:18:0] Put# [1:3:9698:0:0:42:0] Put# [1:3:9699:0:0:49:0] Put# [1:3:9700:0:0:49:0] Put# [1:3:9701:0:0:10:0] Put# [1:3:9702:0:0:38:0] Put# [1:3:9703:0:0:57:0] Put# [1:3:9704:0:0:20:0] Put# [1:3:9705:0:0:23:0] Put# [1:3:9706:0:0:7:0] Put# [1:3:9707:0:0:11:0] Put# [1:3:9708:0:0:39:0] Put# [1:3:9709:0:0:43:0] Put# [1:3:9710:0:0:22:0] Put# [1:3:9711:0:0:63:0] Put# [1:3:9712:0:0:61:0] Put# [1:3:9713:0:0:6:0] Put# [1:3:9714:0:0:88:0] Put# [1:3:9715:0:0:8:0] Put# [1:3:9716:0:0:55:0] Put# [1:3:9717:0:0:94:0] Put# [1:3:9718:0:0:87:0] Put# [1:3:9719:0:0:42:0] Put# [1:3:9720:0:0:40:0] Put# [1:3:9721:0:0:33:0] Put# [1:3:9722:0:0:50:0] Put# [1:3:9723:0:0:53:0] Put# [1:3:9724:0:0:80:0] Put# [1:3:9725:0:0:37:0] Put# [1:3:9726:0:0:30:0] Put# [1:3:9727:0:0:27:0] Put# [1:3:9728:0:0:6:0] Put# [1:3:9729:0:0:48:0] Put# [1:3:9730:0:0:100:0] Put# [1:3:9731:0:0:83:0] Put# [1:3:9732:0:0:7:0] Put# [1:3:9733:0:0:63:0] Put# [1:3:9734:0:0:66:0] Put# [1:3:9735:0:0:64:0] Put# [1:3:9736:0:0:50:0] Put# [1:3:9737:0:0:42:0] Put# [1:3:9738:0:0:12:0] Put# [1:3:9739:0:0:92:0] Put# [1:3:9740:0:0:50:0] Put# [1:3:9741:0:0:98:0] Put# [1:3:9742:0:0:50:0] Put# [1:3:9743:0:0:24:0] Put# [1:3:9744:0:0:56:0] Put# [1:3:9745:0:0:53:0] Put# [1:3:9746:0:0:48:0] Put# [1:3:9747:0:0:90:0] Put# [1:3:9748:0:0:50:0] Put# [1:3:9749:0:0:38:0] Put# [1:3:9750:0:0:92:0] Put# [1:3:9751:0:0:64:0] Put# [1:3:9752:0:0:69:0] Put# [1:3:9753:0:0:52:0] Put# [1:3:9754:0:0:2:0] Put# [1:3:9755:0:0:21:0] Put# [1:3:9756:0:0:77:0] Put# [1:3:9757:0:0:10:0] Put# [1:3:9758:0:0:42:0] Put# [1:3:9759:0:0:41:0] Put# [1:3:9760:0:0:8:0] Put# [1:3:9761:0:0:77:0] Put# [1:3:9762:0:0:42:0] Put# [1:3:9763:0:0:43:0] Put# [1:3:9764:0:0:3:0] Put# [1:3:9765:0:0:42:0] Put# [1:3:9766:0:0:72:0] Put# [1:3:9767:0:0:14:0] Put# [1:3:9768:0:0:11:0] Put# [1:3:9769:0:0:29:0] Put# [1:3:9770:0:0:93:0] Put# [1:3:9771:0:0:92:0] Put# [1:3:9772:0:0:55:0] Put# [1:3:9773:0:0:25:0] Put# [1:3:9774:0:0:51:0] Put# [1:3:9775:0:0:21:0] Put# [1:3:9776:0:0:44:0] Put# [1:3:9777:0:0:28:0] Put# [1:3:9778:0:0:96:0] Put# [1:3:9779:0:0:44:0] Put# [1:3:9780:0:0:12:0] Put# [1:3:9781:0:0:12:0] Put# [1:3:9782:0:0:41:0] Put# [1:3:9783:0:0:89:0] Put# [1:3:9784:0:0:74:0] Put# [1:3:9785:0:0:81:0] Put# [1:3:9786:0:0:82:0] Put# [1:3:9787:0:0:52:0] Put# [1:3:9788:0:0:34:0] Put# [1:3:9789:0:0:72:0] Put# [1:3:9790:0:0:39:0] Put# [1:3:9791:0:0:66:0] Put# [1:3:9792:0:0:64:0] Put# [1:3:9793:0:0:49:0] Put# [1:3:9794:0:0:74:0] Put# [1:3:9795:0:0:28:0] Put# [1:3:9796:0:0:35:0] Put# [1:3:9797:0:0:2:0] Put# [1:3:9798:0:0:68:0] Put# [1:3:9799:0:0:43:0] Put# [1:3:9800:0:0:90:0] Put# [1:3:9801:0:0:51:0] Put# [1:3:9802:0:0:78:0] Put# [1:3:9803:0:0:40:0] Put# [1:3:9804:0:0:93:0] Put# [1:3:9805:0:0:68:0] Put# [1:3:9806:0:0:74:0] Put# [1:3:9807:0:0:8:0] Put# [1:3:9808:0:0:5:0] Put# [1:3:9809:0:0:11:0] Put# [1:3:9810:0:0:8:0] Put# [1:3:9811:0:0:12:0] Put# [1:3:9812:0:0:20:0] Put# [1:3:9813:0:0:22:0] Put# [1:3:9814:0:0:69:0] Put# [1:3:9815:0:0:27:0] Put# [1:3:9816:0:0:6:0] Put# [1:3:9817:0:0:15:0] Put# [1:3:9818:0:0:61:0] Put# [1:3:9819:0:0:70:0] Put# [1:3:9820:0:0:92:0] Put# [1:3:9821:0:0:86:0] Put# [1:3:9822:0:0:21:0] Put# [1:3:9823:0:0:62:0] Put# [1:3:9824:0:0:64:0] Put# [1:3:9825:0:0:47:0] Put# [1:3:9826:0:0:45:0] Put# [1:3:9827:0:0:73:0] Put# [1:3:9828:0:0:58:0] Put# [1:3:9829:0:0:56:0] Put# [1:3:9830:0:0:56:0] Put# [1:3:9831:0:0:35:0] Put# [1:3:9832:0:0:27:0] Put# [1:3:9833:0:0:83:0] Put# [1:3:9834:0:0:32:0] Put# [1:3:9835:0:0:30:0] Put# [1:3:9836:0:0:79:0] Put# [1:3:9837:0:0:76:0] Put# [1:3:9838:0:0:79:0] Put# [1:3:9839:0:0:47:0] Put# [1:3:9840:0:0:81:0] Put# [1:3:9841:0:0:62:0] Put# [1:3:9842:0:0:74:0] Put# [1:3:9843:0:0:76:0] Put# [1:3:9844:0:0:6:0] Put# [1:3:9845:0:0:5:0] Put# [1:3:9846:0:0:54:0] Put# [1:3:9847:0:0:18:0] Put# [1:3:9848:0:0:78:0] Put# [1:3:9849:0:0:10:0] Put# [1:3:9850:0:0:49:0] Put# [1:3:9851:0:0:40:0] Put# [1:3:9852:0:0:76:0] Put# [1:3:9853:0:0:31:0] Put# [1:3:9854:0:0:78:0] Put# [1:3:9855:0:0:3:0] Put# [1:3:9856:0:0:2:0] Put# [1:3:9857:0:0:65:0] Put# [1:3:9858:0:0:86:0] Put# [1:3:9859:0:0:18:0] Put# [1:3:9860:0:0:86:0] Put# [1:3:9861:0:0:27:0] Put# [1:3:9862:0:0:31:0] Put# [1:3:9863:0:0:66:0] Put# [1:3:9864:0:0:74:0] Put# [1:3:9865:0:0:87:0] Put# [1:3:9866:0:0:27:0] Put# [1:3:9867:0:0:82:0] Put# [1:3:9868:0:0:22:0] Put# [1:3:9869:0:0:42:0] Put# [1:3:9870:0:0:33:0] Put# [1:3:9871:0:0:25:0] Put# [1:3:9872:0:0:96:0] Put# [1:3:9873:0:0:78:0] Put# [1:3:9874:0:0:82:0] Put# [1:3:9875:0:0:77:0] Put# [1:3:9876:0:0:41:0] Put# [1:3:9877:0:0:77:0] Put# [1:3:9878:0:0:45:0] Put# [1:3:9879:0:0:25:0] Put# [1:3:9880:0:0:87:0] Put# [1:3:9881:0:0:86:0] Put# [1:3:9882:0:0:35:0] Put# [1:3:9883:0:0:74:0] Put# [1:3:9884:0:0:44:0] Put# [1:3:9885:0:0:24:0] Put# [1:3:9886:0:0:64:0] Put# [1:3:9887:0:0:86:0] Put# [1:3:9888:0:0:49:0] Put# [1:3:9889:0:0:64:0] Put# [1:3:9890:0:0:23:0] Put# [1:3:9891:0:0:30:0] Put# [1:3:9892:0:0:13:0] Put# [1:3:9893:0:0:74:0] Put# [1:3:9894:0:0:44:0] Put# [1:3:9895:0:0:70:0] Put# [1:3:9896:0:0:31:0] Put# [1:3:9897:0:0:5:0] Put# [1:3:9898:0:0:85:0] Put# [1:3:9899:0:0:56:0] Put# [1:3:9900:0:0:63:0] Put# [1:3:9901:0:0:53:0] Put# [1:3:9902:0:0:85:0] Put# [1:3:9903:0:0:83:0] Put# [1:3:9904:0:0:82:0] Put# [1:3:9905:0:0:48:0] Put# [1:3:9906:0:0:66:0] Put# [1:3:9907:0:0:66:0] Put# [1:3:9908:0:0:95:0] Put# [1:3:9909:0:0:11:0] Put# [1:3:9910:0:0:40:0] Put# [1:3:9911:0:0:91:0] Put# [1:3:9912:0:0:57:0] Put# [1:3:9913:0:0:18:0] Put# [1:3:9914:0:0:90:0] Put# [1:3:9915:0:0:21:0] Put# [1:3:9916:0:0:96:0] Put# [1:3:9917:0:0:57:0] Put# [1:3:9918:0:0:41:0] Put# [1:3:9919:0:0:97:0] Put# [1:3:9920:0:0:58:0] Put# [1:3:9921:0:0:74:0] Put# [1:3:9922:0:0:15:0] Put# [1:3:9923:0:0:78:0] Put# [1:3:9924:0:0:43:0] Put# [1:3:9925:0:0:82:0] Put# [1:3:9926:0:0:98:0] Put# [1:3:9927:0:0:39:0] Put# [1:3:9928:0:0:40:0] Put# [1:3:9929:0:0:15:0] Put# [1:3:9930:0:0:9:0] Put# [1:3:9931:0:0:96:0] Put# [1:3:9932:0:0:2:0] Put# [1:3:9933:0:0:85:0] Put# [1:3:9934:0:0:65:0] Put# [1:3:9935:0:0:61:0] Put# [1:3:9936:0:0:20:0] Put# [1:3:9937:0:0:13:0] Put# [1:3:9938:0:0:60:0] Put# [1:3:9939:0:0:7:0] Put# [1:3:9940:0:0:59:0] Put# [1:3:9941:0:0:12:0] Put# [1:3:9942:0:0:61:0] Put# [1:3:9943:0:0:12:0] Put# [1:3:9944:0:0:71:0] Put# [1:3:9945:0:0:72:0] Put# [1:3:9946:0:0:74:0] Put# [1:3:9947:0:0:69:0] Put# [1:3:9948:0:0:53:0] Put# [1:3:9949:0:0:65:0] Put# [1:3:9950:0:0:49:0] Put# [1:3:9951:0:0:44:0] Put# [1:3:9952:0:0:60:0] Put# [1:3:9953:0:0:41:0] Put# [1:3:9954:0:0:48:0] Put# [1:3:9955:0:0:100:0] Put# [1:3:9956:0:0:56:0] Put# [1:3:9957:0:0:16:0] Put# [1:3:9958:0:0:15:0] Put# [1:3:9959:0:0:42:0] Put# [1:3:9960:0:0:74:0] Put# [1:3:9961:0:0:67:0] Put# [1:3:9962:0:0:12:0] Put# [1:3:9963:0:0:52:0] Put# [1:3:9964:0:0:55:0] Put# [1:3:9965:0:0:78:0] Put# [1:3:9966:0:0:84:0] Put# [1:3:9967:0:0:22:0] Put# [1:3:9968:0:0:4:0] Put# [1:3:9969:0:0:18:0] Put# [1:3:9970:0:0:15:0] Put# [1:3:9971:0:0:48:0] Put# [1:3:9972:0:0:51:0] Put# [1:3:9973:0:0:52:0] Put# [1:3:9974:0:0:75:0] Put# [1:3:9975:0:0:94:0] Put# [1:3:9976:0:0:12:0] Put# [1:3:9977:0:0:46:0] Put# [1:3:9978:0:0:25:0] Put# [1:3:9979:0:0:20:0] Put# [1:3:9980:0:0:34:0] Put# [1:3:9981:0:0:64:0] Put# [1:3:9982:0:0:52:0] Put# [1:3:9983:0:0:36:0] Put# [1:3:9984:0:0:62:0] Put# [1:3:9985:0:0:3:0] Put# [1:3:9986:0:0:8:0] Put# [1:3:9987:0:0:73:0] Put# [1:3:9988:0:0:23:0] Put# [1:3:9989:0:0:97:0] Put# [1:3:9990:0:0:4:0] Put# [1:3:9991:0:0:21:0] Put# [1:3:9992:0:0:12:0] Put# [1:3:9993:0:0:18:0] Put# [1:3:9994:0:0:9:0] Put# [1:3:9995:0:0:71:0] Put# [1:3:9996:0:0:99:0] Put# [1:3:9997:0:0:51:0] Put# [1:3:9998:0:0:36:0] Put# [1:3:9999:0:0:19:0] Put# [1:3:10000:0:0:89:0] >> TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest::FailsOnFailedGetAllTopicsRequest [GOOD] >> TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest::FailsOnNoBalancerInGetNodeRequest >> TSchemeshardBackgroundCleaningTest::SchemeshardBackgroundCleaningTestSimpleDrop >> TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest::FailsOnNoBalancerInGetNodeRequest [GOOD] >> TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest::FailsOnEmptyTopicName >> TSchemeshardBackgroundCleaningTest::SchemeshardBackgroundCleaningTestCreateCleanWithRetry |74.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/mind/address_classification/ut/ydb-core-mind-address_classification-ut |74.8%| [LD] {RESULT} $(B)/ydb/core/mind/address_classification/ut/ydb-core-mind-address_classification-ut |74.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/mind/address_classification/ut/ydb-core-mind-address_classification-ut >> DataStreams::TestGetShardIterator >> TControlPlaneProxyCheckNegativePermissionsSuccess::ShouldSendDeleteBinding [GOOD] >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendCreateQuery |74.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/backup/impl/ut_local_partition_reader/ydb-core-backup-impl-ut_local_partition_reader |74.8%| [TA] $(B)/ydb/core/blobstorage/ut_blobstorage/ut_donor/test-results/unittest/{meta.json ... results_accumulator.log} |74.9%| [LD] {RESULT} $(B)/ydb/core/backup/impl/ut_local_partition_reader/ydb-core-backup-impl-ut_local_partition_reader |74.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/backup/impl/ut_local_partition_reader/ydb-core-backup-impl-ut_local_partition_reader >> TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest::FailsOnEmptyTopicName [GOOD] >> TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest::FailsOnDuplicatedTopicName >> TSchemeshardBackgroundCleaningTest::SchemeshardBackgroundCleaningTestSimpleCreateClean |74.9%| [TA] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_donor/test-results/unittest/{meta.json ... results_accumulator.log} |74.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/services/persqueue_v1/ut/new_schemecache_ut/ydb-services-persqueue_v1-ut-new_schemecache_ut |74.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/services/persqueue_v1/ut/new_schemecache_ut/ydb-services-persqueue_v1-ut-new_schemecache_ut |74.9%| [LD] {RESULT} $(B)/ydb/services/persqueue_v1/ut/new_schemecache_ut/ydb-services-persqueue_v1-ut-new_schemecache_ut |74.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_serverless/unittest >> TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest::FailsOnDuplicatedTopicName [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/integration/sessions/gtest >> YdbSdkSessions::TestSdkFreeSessionAfterBadSessionQueryServiceStreamCall [SKIPPED] Test command err: ydb/public/sdk/cpp/tests/integration/sessions/main.cpp:248: Test is failing right now ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/integration/sessions/gtest >> YdbSdkSessions::SessionsServerLimit [SKIPPED] Test command err: ydb/public/sdk/cpp/tests/integration/sessions/main.cpp:548: Enable after accepting a pull request with merging configs >> TColumnShardTestReadWrite::WriteOverload-InStore >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_does_actions_with_queue[tables_format_v1-std] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/server/ut/unittest >> TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest::FailsOnDuplicatedTopicName [GOOD] Test command err: Assert failed: Check response: { Status: 128 ErrorReason: "no path \'/Root/PQ/\', Marker# PQ17" ErrorCode: UNKNOWN_TOPIC } Assert failed: Check response: { Status: 128 ErrorReason: "topic \'rt3.dc1--topic1\' has no balancer, Marker# PQ193" ErrorCode: UNKNOWN_TOPIC } Assert failed: Check response: { Status: 128 ErrorReason: "TopicRequest must have Topic field." ErrorCode: BAD_REQUEST } Assert failed: Check response: { Status: 128 ErrorReason: "multiple TopicRequest for topic \'rt3.dc1--topic1\'" ErrorCode: BAD_REQUEST } >> TCutHistoryRestrictions::BasicTest [GOOD] >> TCutHistoryRestrictions::EmptyAllowList [GOOD] >> TCutHistoryRestrictions::EmptyDenyList [GOOD] >> TCutHistoryRestrictions::BothListsEmpty [GOOD] >> ObjectDistribution::TestImbalanceCalcualtion [GOOD] >> ObjectDistribution::TestAllowedDomainsAndDown >> ObjectDistribution::TestAllowedDomainsAndDown [GOOD] >> ObjectDistribution::TestAddSameNode [GOOD] >> ObjectDistribution::TestManyIrrelevantNodes |74.9%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/integration/sessions/gtest >> YdbSdkSessions::TestSessionPool [GOOD] >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendCreateQuery [GOOD] >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendListQueries >> TGRpcRateLimiterTest::ListResources [GOOD] >> TGRpcRateLimiterTest::AcquireResourceManyRequiredGrpcApi >> YdbSdkSessions::MultiThreadSessionPoolLimitSyncTableClient [GOOD] >> YdbSdkSessions::MultiThreadSessionPoolLimitSyncQueryClient >> TColumnShardTestReadWrite::CompactionInGranule_PKUInt64_Reboot ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/integration/sessions/gtest >> YdbSdkSessions::TestSdkFreeSessionAfterBadSessionQueryService [SKIPPED] Test command err: ydb/public/sdk/cpp/tests/integration/sessions/main.cpp:200: Test is failing right now >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendListQueries [GOOD] >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendDescribeQuery |74.9%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/integration/sessions_pool/gtest >> YdbSdkSessionsPool1Session::GetSession/0 [GOOD] >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_send_message_rate[tables_format_v1] [GOOD] >> TraverseDatashard::TraverseTwoTables >> GroupWriteTest::TwoTables [GOOD] |74.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendDescribeQuery [GOOD] >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendModifyQuery ------- [TM] {asan, default-linux-x86_64, release} ydb/core/load_test/ut/unittest >> GroupWriteTest::TwoTables [GOOD] Test command err: RandomSeed# 2296299050220888898 2025-06-25T14:27:28.808847Z 1 00h00m00.002048s :BS_CONTROLLER ERROR: {BSC07@impl.h:2206} ProcessControllerEvent event processing took too much time Type# 268637706 Duration# 0.305903s 2025-06-25T14:27:28.810033Z 1 00h00m00.002048s :BS_CONTROLLER ERROR: {BSC00@bsc.cpp:705} StateWork event processing took too much time Type# 2146435078 Duration# 0.333364s 2025-06-25T14:27:30.589149Z 1 00h01m00.010512s :BS_LOAD_TEST DEBUG: TabletId# 72058679074007041 Generation# 1 is bootstrapped, going to send TEvDiscover {TabletId# 72058679074007041 MinGeneration# 1 ReadBody# false DiscoverBlockedGeneration# true ForceBlockedGeneration# 0 FromLeader# true Deadline# 18446744073709551} 2025-06-25T14:27:30.589251Z 1 00h01m00.010512s :BS_LOAD_TEST DEBUG: TabletId# 72058502699329537 Generation# 1 is bootstrapped, going to send TEvDiscover {TabletId# 72058502699329537 MinGeneration# 1 ReadBody# false DiscoverBlockedGeneration# true ForceBlockedGeneration# 0 FromLeader# true Deadline# 18446744073709551} 2025-06-25T14:27:30.615158Z 1 00h01m00.010512s :BS_LOAD_TEST INFO: TabletId# 72058679074007041 Generation# 1 recieved TEvDiscoverResult {Status# NODATA BlockedGeneration# 0 Id# [0:0:0:0:0:0:0] Size# 0 MinGeneration# 1} 2025-06-25T14:27:30.615251Z 1 00h01m00.010512s :BS_LOAD_TEST DEBUG: TabletId# 72058679074007041 Generation# 1 going to send TEvBlock {TabletId# 72058679074007041 Generation# 1 Deadline# 18446744073709551 IsMonitored# 1} 2025-06-25T14:27:30.615360Z 1 00h01m00.010512s :BS_LOAD_TEST INFO: TabletId# 72058502699329537 Generation# 1 recieved TEvDiscoverResult {Status# NODATA BlockedGeneration# 0 Id# [0:0:0:0:0:0:0] Size# 0 MinGeneration# 1} 2025-06-25T14:27:30.615389Z 1 00h01m00.010512s :BS_LOAD_TEST DEBUG: TabletId# 72058502699329537 Generation# 1 going to send TEvBlock {TabletId# 72058502699329537 Generation# 1 Deadline# 18446744073709551 IsMonitored# 1} 2025-06-25T14:27:30.619349Z 1 00h01m00.010512s :BS_LOAD_TEST INFO: TabletId# 72058679074007041 Generation# 1 recieved TEvBlockResult {Status# OK} 2025-06-25T14:27:30.619442Z 1 00h01m00.010512s :BS_LOAD_TEST INFO: TabletId# 72058502699329537 Generation# 1 recieved TEvBlockResult {Status# OK} 2025-06-25T14:27:30.638634Z 1 00h01m00.010512s :BS_LOAD_TEST DEBUG: TabletId# 72058502699329537 Generation# 2 going to send TEvCollectGarbage {TabletId# 72058502699329537 RecordGeneration# 2 PerGenerationCounter# 1 Channel# 0 Deadline# 18446744073709551 Collect# true CollectGeneration# 2 CollectStep# 0 Hard# true IsMultiCollectAllowed# 0 IsMonitored# 1} 2025-06-25T14:27:30.638732Z 1 00h01m00.010512s :BS_LOAD_TEST DEBUG: TabletId# 72058679074007041 Generation# 2 going to send TEvCollectGarbage {TabletId# 72058679074007041 RecordGeneration# 2 PerGenerationCounter# 1 Channel# 0 Deadline# 18446744073709551 Collect# true CollectGeneration# 2 CollectStep# 0 Hard# true IsMultiCollectAllowed# 0 IsMonitored# 1} 2025-06-25T14:27:30.643204Z 1 00h01m00.010512s :BS_LOAD_TEST INFO: TabletId# 72058502699329537 Generation# 2 recieved TEvCollectGarbageResult {TabletId# 72058502699329537 RecordGeneration# 2 PerGenerationCounter# 1 Channel# 0 Status# OK} 2025-06-25T14:27:30.643295Z 1 00h01m00.010512s :BS_LOAD_TEST INFO: TabletId# 72058679074007041 Generation# 2 recieved TEvCollectGarbageResult {TabletId# 72058679074007041 RecordGeneration# 2 PerGenerationCounter# 1 Channel# 0 Status# OK} 2025-06-25T14:27:40.951466Z 1 00h01m20.010512s :BS_LOAD_TEST DEBUG: Load tablet recieved PoisonPill, going to die 2025-06-25T14:27:40.951566Z 1 00h01m20.010512s :BS_LOAD_TEST DEBUG: TabletId# 72058679074007041 Generation# 2 end working, going to send TEvCollectGarbage {TabletId# 72058679074007041 RecordGeneration# 2 PerGenerationCounter# 22 Channel# 0 Deadline# 18446744073709551 Collect# true CollectGeneration# 2 CollectStep# 4294967295 Hard# true IsMultiCollectAllowed# 0 IsMonitored# 1} 2025-06-25T14:27:40.951627Z 1 00h01m20.010512s :BS_LOAD_TEST DEBUG: TabletId# 72058502699329537 Generation# 2 end working, going to send TEvCollectGarbage {TabletId# 72058502699329537 RecordGeneration# 2 PerGenerationCounter# 22 Channel# 0 Deadline# 18446744073709551 Collect# true CollectGeneration# 2 CollectStep# 4294967295 Hard# true IsMultiCollectAllowed# 0 IsMonitored# 1} 2025-06-25T14:27:40.951663Z 1 00h01m20.010512s :BS_LOAD_TEST DEBUG: Load tablet recieved PoisonPill, going to die 2025-06-25T14:27:40.951702Z 1 00h01m20.010512s :BS_LOAD_TEST DEBUG: TabletId# 72058679074007041 Generation# 2 end working, going to send TEvCollectGarbage {TabletId# 72058679074007041 RecordGeneration# 2 PerGenerationCounter# 23 Channel# 0 Deadline# 18446744073709551 Collect# true CollectGeneration# 2 CollectStep# 4294967295 Hard# true IsMultiCollectAllowed# 0 IsMonitored# 1} 2025-06-25T14:27:40.951748Z 1 00h01m20.010512s :BS_LOAD_TEST DEBUG: TabletId# 72058502699329537 Generation# 2 end working, going to send TEvCollectGarbage {TabletId# 72058502699329537 RecordGeneration# 2 PerGenerationCounter# 23 Channel# 0 Deadline# 18446744073709551 Collect# true CollectGeneration# 2 CollectStep# 4294967295 Hard# true IsMultiCollectAllowed# 0 IsMonitored# 1} 2025-06-25T14:27:40.951777Z 1 00h01m20.010512s :BS_LOAD_TEST DEBUG: Load tablet recieved PoisonPill, going to die 2025-06-25T14:27:40.951811Z 1 00h01m20.010512s :BS_LOAD_TEST DEBUG: TabletId# 72058679074007041 Generation# 2 end working, going to send TEvCollectGarbage {TabletId# 72058679074007041 RecordGeneration# 2 PerGenerationCounter# 24 Channel# 0 Deadline# 18446744073709551 Collect# true CollectGeneration# 2 CollectStep# 4294967295 Hard# true IsMultiCollectAllowed# 0 IsMonitored# 1} 2025-06-25T14:27:40.951851Z 1 00h01m20.010512s :BS_LOAD_TEST DEBUG: TabletId# 72058502699329537 Generation# 2 end working, going to send TEvCollectGarbage {TabletId# 72058502699329537 RecordGeneration# 2 PerGenerationCounter# 24 Channel# 0 Deadline# 18446744073709551 Collect# true CollectGeneration# 2 CollectStep# 4294967295 Hard# true IsMultiCollectAllowed# 0 IsMonitored# 1} 2025-06-25T14:27:41.055220Z 1 00h01m20.010512s :BS_LOAD_TEST INFO: TabletId# 72058679074007041 Generation# 2 recieved TEvCollectGarbageResult {TabletId# 72058679074007041 RecordGeneration# 2 PerGenerationCounter# 22 Channel# 0 Status# OK} 2025-06-25T14:27:41.055326Z 1 00h01m20.010512s :BS_LOAD_TEST INFO: TabletId# 72058502699329537 Generation# 2 recieved TEvCollectGarbageResult {TabletId# 72058502699329537 RecordGeneration# 2 PerGenerationCounter# 22 Channel# 0 Status# OK} 2025-06-25T14:27:41.055375Z 1 00h01m20.010512s :BS_LOAD_TEST INFO: TabletId# 72058679074007041 Generation# 2 recieved TEvCollectGarbageResult {TabletId# 72058679074007041 RecordGeneration# 2 PerGenerationCounter# 23 Channel# 0 Status# OK} 2025-06-25T14:27:41.055422Z 1 00h01m20.010512s :BS_LOAD_TEST INFO: TabletId# 72058502699329537 Generation# 2 recieved TEvCollectGarbageResult {TabletId# 72058502699329537 RecordGeneration# 2 PerGenerationCounter# 23 Channel# 0 Status# OK} 2025-06-25T14:27:41.055469Z 1 00h01m20.010512s :BS_LOAD_TEST INFO: TabletId# 72058679074007041 Generation# 2 recieved TEvCollectGarbageResult {TabletId# 72058679074007041 RecordGeneration# 2 PerGenerationCounter# 24 Channel# 0 Status# OK} 2025-06-25T14:27:41.055522Z 1 00h01m20.010512s :BS_LOAD_TEST INFO: TabletId# 72058502699329537 Generation# 2 recieved TEvCollectGarbageResult {TabletId# 72058502699329537 RecordGeneration# 2 PerGenerationCounter# 24 Channel# 0 Status# OK} >> AnalyzeColumnshard::AnalyzeSameOperationId |74.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendModifyQuery [GOOD] >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendDeleteQuery >> TraverseColumnShard::TraverseColumnTableRebootSaTabletBeforeSave >> AsyncIndexChangeCollector::CoveredIndexUpdateCoveredColumn >> TraverseColumnShard::TraverseColumnTableRebootSaTabletInAggregate >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendDeleteQuery [GOOD] >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendControlQuery >> THiveTest::TestHiveBalancerWithPrefferedDC1 >> DataCleanup::OutReadSetsCleanedAfterCopyTable [GOOD] >> DataCleanup::BorrowerDataCleanedAfterCopyTable >> YdbSdkSessionsPool::StressTestSync/1 [GOOD] >> test_fifo_messaging.py::TestSqsFifoMicroBatchesWithPath::test_micro_batch_read[tables_format_v0] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMicroBatchesWithPath::test_micro_batch_read[tables_format_v1] >> CdcStreamChangeCollector::UpsertIntoTwoStreams >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendControlQuery [GOOD] >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendGetResultData >> test.py::test[solomon-Subquery-default.txt] [GOOD] >> test.py::test[solomon-UnknownSetting-] >> YdbSdkSessionsPool::StressTestSync/0 [GOOD] >> TPersQueueCommonTest::TestWriteWithRateLimiterWithBlobsRateLimit [GOOD] >> TPersQueueCommonTest::TestWriteWithRateLimiterWithUserPayloadRateLimit >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendGetResultData [GOOD] >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendListJobs >> TPersQueueCommonTest::TestLimiterLimitsWithBlobsRateLimit >> test_fifo_messaging.py::TestSqsFifoMicroBatchesWithPath::test_micro_batch_read[tables_format_v1] [GOOD] >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendListJobs [GOOD] >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendDescribeJob >> YdbSdkSessions::MultiThreadMultipleRequestsOnSharedSessionsTableClient [GOOD] >> YdbSdkSessions::MultiThreadMultipleRequestsOnSharedSessionsQueryClient [SKIPPED] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_get_queue_attributes_only_attributes_table[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_get_queue_attributes_only_runtime_attributes[tables_format_v0-fifo] >> YdbSdkSessionsPool1Session::CustomPlan/0 [GOOD] >> DataStreams::TestGetShardIterator [GOOD] >> DataStreams::TestGetRecordsWithoutPermission >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendDescribeJob [GOOD] >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendCreateConnection |74.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/sys_view/partition_stats/ut/ydb-core-sys_view-partition_stats-ut |74.9%| [LD] {RESULT} $(B)/ydb/core/sys_view/partition_stats/ut/ydb-core-sys_view-partition_stats-ut |74.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/sys_view/partition_stats/ut/ydb-core-sys_view-partition_stats-ut >> TGRpcRateLimiterTest::AcquireResourceManyRequiredGrpcApi [GOOD] >> TGRpcRateLimiterTest::AcquireResourceManyRequiredActorApi >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendCreateConnection [GOOD] >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendCreateConnectionWithServiceAccount >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendCreateConnectionWithServiceAccount [GOOD] >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendListConnections >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_get_queue_attributes_only_runtime_attributes[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_get_queue_attributes_only_runtime_attributes[tables_format_v0-std] >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendListConnections [GOOD] >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendDescribeConnection |74.9%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/integration/sessions_pool/gtest >> YdbSdkSessionsPool::StressTestSync/1 [GOOD] >> YdbSdkSessionsPool::PeriodicTask/0 [GOOD] >> YdbSdkSessionsPool::PeriodicTask/1 >> YdbSdkSessions::TestActiveSessionCountAfterTransportError [GOOD] |74.9%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/integration/sessions_pool/gtest >> YdbSdkSessionsPool::StressTestSync/0 [GOOD] >> YdbSdkSessions::MultiThreadSessionPoolLimitSyncQueryClient [GOOD] >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendDescribeConnection [GOOD] >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendModifyConnection |74.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/base/ut_board_subscriber/ydb-core-base-ut_board_subscriber |74.9%| [LD] {RESULT} $(B)/ydb/core/base/ut_board_subscriber/ydb-core-base-ut_board_subscriber |74.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/base/ut_board_subscriber/ydb-core-base-ut_board_subscriber |74.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_minikql/ydb-core-tx-datashard-ut_minikql |74.9%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_minikql/ydb-core-tx-datashard-ut_minikql |75.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_minikql/ydb-core-tx-datashard-ut_minikql |75.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_olap/ydb-core-tx-schemeshard-ut_olap |75.0%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_olap/ydb-core-tx-schemeshard-ut_olap |75.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_olap/ydb-core-tx-schemeshard-ut_olap >> test.py::test[solomon-UnknownSetting-] [GOOD] >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendModifyConnection [GOOD] >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendModifyConnectionWithServiceAccount >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_get_queue_attributes_only_runtime_attributes[tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_get_queue_attributes_only_runtime_attributes[tables_format_v1-fifo] >> THiveTest::TestHiveBalancerWithPrefferedDC1 [GOOD] >> THiveTest::TestHiveBalancerWithPrefferedDC2 ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/integration/sessions/gtest >> YdbSdkSessions::MultiThreadMultipleRequestsOnSharedSessionsQueryClient [SKIPPED] Test command err: ydb/public/sdk/cpp/tests/integration/sessions/main.cpp:539: Enable after interactive tx support >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendModifyConnectionWithServiceAccount [GOOD] >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendDeleteConnection >> TPersQueueCommonTest::Auth_MultipleUpdateTokenRequestIterationsWithValidToken_GotUpdateTokenResponseForEachRequest >> TPersQueueNewSchemeCacheTest::CheckGrpcWriteNoDC |75.0%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/integration/sessions_pool/gtest >> YdbSdkSessionsPool1Session::CustomPlan/0 [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest >> ColumnShardTiers::DSConfigsWithQueryServiceDdl [FAIL] Test command err: 2025-06-25T14:26:13.565023Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:26:13.565555Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:26:13.565799Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000c06/r3tmp/tmphTOQ7n/pdisk_1.dat 2025-06-25T14:26:15.671398Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 63665, node 1 TClient is connected to server localhost:61006 2025-06-25T14:26:18.786478Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:26:18.850257Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:26:18.876378Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:26:18.876640Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:26:18.876794Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:26:18.878148Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:26:18.878821Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750861556120891 != 1750861556120895 2025-06-25T14:26:18.952494Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:26:18.952849Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:26:18.966845Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:26:19.250127Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Initialization finished REQUEST= UPSERT OBJECT `accessKey` (TYPE SECRET) WITH (value = `secretAccessKey`); UPSERT OBJECT `secretKey` (TYPE SECRET) WITH (value = `fakeSecret`); ;EXPECTATION=1;WAITING=1 2025-06-25T14:26:31.957063Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:711:2591], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:26:31.957682Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:721:2596], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:26:31.958575Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:26:32.037095Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715657:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:26:32.560188Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:725:2599], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715657 completed, doublechecking } 2025-06-25T14:26:33.272075Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:777:2632] txid# 281474976715658, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:26:36.442628Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:26:40.834037Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:26:42.370849Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:26:45.569473Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:26:47.773729Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:26:50.355101Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:26:56.075188Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) REQUEST= UPSERT OBJECT `accessKey` (TYPE SECRET) WITH (value = `secretAccessKey`); UPSERT OBJECT `secretKey` (TYPE SECRET) WITH (value = `fakeSecret`); ;RESULT=
: Error: GRpc error: (1): Cancelled on the server side ;EXPECTATION=1 GRpc shutdown warning: left infly: 1, spent: 3.733411 sec GRpc shutdown warning: left infly: 1, spent: 7.524753 sec GRpc shutdown warning: left infly: 1, spent: 11.349619 sec GRpc shutdown warning: left infly: 1, spent: 15.094961 sec GRpc shutdown warning: left infly: 1, spent: 18.956814 sec GRpc shutdown warning: left infly: 1, spent: 22.770795 sec GRpc shutdown warning: left infly: 1, spent: 26.50289 sec GRpc shutdown warning: failed to shutdown all connections, left infly: 1, spent: 30.007878 sec assertion failed at ydb/core/testlib/common_helper.cpp:191, void NKikimr::Tests::NCommon::THelper::StartSchemaRequestQueryServiceImpl(const TString &, const bool, const bool) const: (*rrPtr) TBackTrace::Capture()+28 (0x196EA28C) NUnitTest::NPrivate::RaiseError(char const*, TBasicString> const&, bool)+592 (0x19BA9B10) NKikimr::Tests::NCommon::THelper::StartSchemaRequestQueryServiceImpl(TBasicString> const&, bool, bool) const+4144 (0x36B17010) NKikimr::TLocalHelper::CreateSecrets() const+387 (0x19304063) NKikimr::NTestSuiteColumnShardTiers::DSConfigsImpl(bool)+2955 (0x193071FB) std::__y1::__function::__func, void ()>::operator()()+280 (0x19327E18) TColoredProcessor::Run(std::__y1::function, TBasicString> const&, char const*, bool)+534 (0x19BE0D16) NUnitTest::TTestBase::Run(std::__y1::function, TBasicString> const&, char const*, bool)+505 (0x19BB0699) NKikimr::NTestSuiteColumnShardTiers::TCurrentTest::Execute()+1204 (0x19326DC4) NUnitTest::TTestFactory::Execute()+2438 (0x19BB1F66) NUnitTest::RunMain(int, char**)+5213 (0x19BDB28D) ??+0 (0x7F2B26546D90) __libc_start_main+128 (0x7F2B26546E40) _start+41 (0x16BD2029) ================================================================= ==15821==ERROR: LeakSanitizer: detected memory leaks Indirect leak of 26240 byte(s) in 1 object(s) allocated from: #0 0x1942050d in operator new(unsigned long) /-S/contrib/libs/clang18-rt/lib/asan/asan_new_delete.cpp:86:3 #1 0x20ebece8 in __libcpp_operator_new /-S/contrib/libs/cxxsupp/libcxx/include/new:271:10 #2 0x20ebece8 in __libcpp_allocate /-S/contrib/libs/cxxsupp/libcxx/include/new:295:10 #3 0x20ebece8 in allocate /-S/contrib/libs/cxxsupp/libcxx/include/__memory/allocator.h:103:32 #4 0x20ebece8 in __allocate_at_least > /-S/contrib/libs/cxxsupp/libcxx/include/__memory/allocate_at_least.h:41:19 #5 0x20ebece8 in __vallocate /-S/contrib/libs/cxxsupp/libcxx/include/vector:807:25 #6 0x20ebece8 in vector /-S/contrib/libs/cxxsupp/libcxx/include/vector:461:7 #7 0x20ebece8 in make_unique >, unsigned long &> /-S/contrib/libs/cxxsupp/libcxx/include/__memory/unique_ptr.h:642:30 #8 0x20ebece8 in grpc_core::Server::ChannelData::InitTransport(grpc_core::RefCountedPtr, grpc_core::RefCountedPtr, unsigned long, grpc_transport*, long) /-S/contrib/libs/grpc/src/core/lib/surface/server.cc:1155:9 #9 0x20ebe55d in grpc_core::Server::SetupTransport(grpc_transport*, grpc_pollset*, grpc_core::ChannelArgs const&, grpc_core::RefCountedPtr const&) /-S/contrib/libs/grpc/src/c ... NKikimr::NMetadata::NProvider::TInitializationSnapshotOwner> const&) /-S/ydb/services/metadata/initializer/accessor_init.cpp:109:30 #8 0x49a58323 in NKikimr::NMetadata::NProvider::TBehaviourRegistrator::Handle(TAutoPtr, TDelete>&) /-S/ydb/services/metadata/ds_table/behaviour_registrator_actor.cpp:45:5 #9 0x49a59994 in NKikimr::NMetadata::NProvider::TBehaviourRegistrator::StateMain(TAutoPtr&) /-S/ydb/services/metadata/ds_table/behaviour_registrator_actor.h:47:13 #10 0x1acc2dac in NActors::IActor::Receive(TAutoPtr&) /-S/ydb/library/actors/core/actor.cpp:280:13 #11 0x368dba44 in NActors::TTestActorRuntimeBase::SendInternal(TAutoPtr, unsigned int, bool) /-S/ydb/library/actors/testlib/test_runtime.cpp:1702:33 #12 0x368d42b9 in NActors::TTestActorRuntimeBase::DispatchEventsInternal(NActors::TDispatchOptions const&, TInstant) /-S/ydb/library/actors/testlib/test_runtime.cpp:1295:45 #13 0x368de633 in NActors::TTestActorRuntimeBase::WaitForEdgeEvents(std::__y1::function&)>, TSet, std::__y1::allocator> const&, TDuration) /-S/ydb/library/actors/testlib/test_runtime.cpp:1554:22 #14 0x36ab62e3 in NActors::TEvents::TEvWakeup::TPtr NActors::TTestActorRuntimeBase::GrabEdgeEventIf(TSet, std::__y1::allocator> const&, std::__y1::function const&, TDuration) /-S/ydb/library/actors/testlib/test_runtime.h:477:13 #15 0x36ab5402 in GrabEdgeEvent /-S/ydb/library/actors/testlib/test_runtime.h:526:20 #16 0x36ab5402 in NActors::TEvents::TEvWakeup::TPtr NActors::TTestActorRuntimeBase::GrabEdgeEvent(NActors::TActorId const&, TDuration) /-S/ydb/library/actors/testlib/test_runtime.h:532:20 #17 0x36aad1a2 in NActors::TEvents::TEvWakeup::TPtr NActors::TTestActorRuntimeBase::GrabEdgeEventRethrow(NActors::TActorId const&, TDuration) /-S/ydb/library/actors/testlib/test_runtime.h:577:24 #18 0x36aacd7a in NActors::TTestActorRuntime::SimulateSleep(TDuration) /-S/ydb/core/testlib/actors/test_runtime.cpp:327:9 #19 0x36b16cf1 in NKikimr::Tests::NCommon::THelper::StartSchemaRequestQueryServiceImpl(TBasicString> const&, bool, bool) const /-S/ydb/core/testlib/common_helper.cpp:189:34 #20 0x19304062 in NKikimr::TLocalHelper::CreateSecrets() const /-S/ydb/core/tx/tiering/ut/ut_tiers.cpp:125:9 #21 0x193071fa in NKikimr::NTestSuiteColumnShardTiers::DSConfigsImpl(bool) /-S/ydb/core/tx/tiering/ut/ut_tiers.cpp:308:17 #22 0x19327e17 in operator() /-S/ydb/core/tx/tiering/ut/ut_tiers.cpp:146:1 #23 0x19327e17 in __invoke<(lambda at /-S/ydb/core/tx/tiering/ut/ut_tiers.cpp:146:1) &> /-S/contrib/libs/cxxsupp/libcxx/include/__type_traits/invoke.h:149:25 #24 0x19327e17 in __call<(lambda at /-S/ydb/core/tx/tiering/ut/ut_tiers.cpp:146:1) &> /-S/contrib/libs/cxxsupp/libcxx/include/__type_traits/invoke.h:224:5 #25 0x19327e17 in operator() /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:169:12 #26 0x19327e17 in std::__y1::__function::__func, void ()>::operator()() /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:314:10 #27 0x19be0d15 in operator() /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:431:12 #28 0x19be0d15 in operator() /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:990:10 #29 0x19be0d15 in TColoredProcessor::Run(std::__y1::function, TBasicString> const&, char const*, bool) /-S/library/cpp/testing/unittest/utmain.cpp:525:20 #30 0x19bb0698 in NUnitTest::TTestBase::Run(std::__y1::function, TBasicString> const&, char const*, bool) /-S/library/cpp/testing/unittest/registar.cpp:373:18 #31 0x19326dc3 in NKikimr::NTestSuiteColumnShardTiers::TCurrentTest::Execute() /-S/ydb/core/tx/tiering/ut/ut_tiers.cpp:146:1 #32 0x19bb1f65 in NUnitTest::TTestFactory::Execute() /-S/library/cpp/testing/unittest/registar.cpp:494:19 #33 0x19bdb28c in NUnitTest::RunMain(int, char**) /-S/library/cpp/testing/unittest/utmain.cpp:872:44 #34 0x7f2b26546d8f (/lib/x86_64-linux-gnu/libc.so.6+0x29d8f) (BuildId: cd410b710f0f094c6832edd95931006d883af48e) Indirect leak of 8 byte(s) in 1 object(s) allocated from: #0 0x1942050d in operator new(unsigned long) /-S/contrib/libs/clang18-rt/lib/asan/asan_new_delete.cpp:86:3 #1 0x49a66a26 in __libcpp_operator_new /-S/contrib/libs/cxxsupp/libcxx/include/new:271:10 #2 0x49a66a26 in __libcpp_allocate /-S/contrib/libs/cxxsupp/libcxx/include/new:295:10 #3 0x49a66a26 in allocate /-S/contrib/libs/cxxsupp/libcxx/include/__memory/allocator.h:103:32 #4 0x49a66a26 in __allocate_at_least *> > /-S/contrib/libs/cxxsupp/libcxx/include/__memory/allocate_at_least.h:41:19 #5 0x49a66a26 in __split_buffer /-S/contrib/libs/cxxsupp/libcxx/include/__split_buffer:354:25 #6 0x49a66a26 in std::__y1::deque, std::__y1::allocator>>::__add_back_capacity() /-S/contrib/libs/cxxsupp/libcxx/include/deque:2186:51 #7 0x49a613f5 in emplace_back &> /-S/contrib/libs/cxxsupp/libcxx/include/deque:1611:5 #8 0x49a613f5 in NKikimr::NMetadata::NInitializer::TDSAccessorInitialized::OnPreparationFinished(TVector, std::__y1::allocator>> const&) /-S/ydb/services/metadata/initializer/accessor_init.cpp:70:19 #9 0x249c374b in NKikimr::NMetadata::NSecret::TSecretInitializer::DoPrepare(std::__y1::shared_ptr) const /-S/ydb/services/metadata/secret/initializer.cpp:49:17 #10 0x49a6442d in Prepare /-S/ydb/services/metadata/abstract/initialization.h:14:16 #11 0x49a6442d in NKikimr::NMetadata::NInitializer::TDSAccessorInitialized::Execute(NKikimr::NMetadata::NRequest::TConfig const&, TBasicString> const&, std::__y1::shared_ptr, std::__y1::shared_ptr, std::__y1::shared_ptr const&) /-S/ydb/services/metadata/initializer/accessor_init.cpp:109:30 #12 0x49a58323 in NKikimr::NMetadata::NProvider::TBehaviourRegistrator::Handle(TAutoPtr, TDelete>&) /-S/ydb/services/metadata/ds_table/behaviour_registrator_actor.cpp:45:5 #13 0x49a59994 in NKikimr::NMetadata::NProvider::TBehaviourRegistrator::StateMain(TAutoPtr&) /-S/ydb/services/metadata/ds_table/behaviour_registrator_actor.h:47:13 #14 0x1acc2dac in NActors::IActor::Receive(TAutoPtr&) /-S/ydb/library/actors/core/actor.cpp:280:13 #15 0x368dba44 in NActors::TTestActorRuntimeBase::SendInternal(TAutoPtr, unsigned int, bool) /-S/ydb/library/actors/testlib/test_runtime.cpp:1702:33 #16 0x368d42b9 in NActors::TTestActorRuntimeBase::DispatchEventsInternal(NActors::TDispatchOptions const&, TInstant) /-S/ydb/library/actors/testlib/test_runtime.cpp:1295:45 #17 0x368de633 in NActors::TTestActorRuntimeBase::WaitForEdgeEvents(std::__y1::function&)>, TSet, std::__y1::allocator> const&, TDuration) /-S/ydb/library/actors/testlib/test_runtime.cpp:1554:22 #18 0x36ab62e3 in NActors::TEvents::TEvWakeup::TPtr NActors::TTestActorRuntimeBase::GrabEdgeEventIf(TSet, std::__y1::allocator> const&, std::__y1::function const&, TDuration) /-S/ydb/library/actors/testlib/test_runtime.h:477:13 #19 0x36ab5402 in GrabEdgeEvent /-S/ydb/library/actors/testlib/test_runtime.h:526:20 #20 0x36ab5402 in NActors::TEvents::TEvWakeup::TPtr NActors::TTestActorRuntimeBase::GrabEdgeEvent(NActors::TActorId const&, TDuration) /-S/ydb/library/actors/testlib/test_runtime.h:532:20 #21 0x36aad1a2 in NActors::TEvents::TEvWakeup::TPtr NActors::TTestActorRuntimeBase::GrabEdgeEventRethrow(NActors::TActorId const&, TDuration) /-S/ydb/library/actors/testlib/test_runtime.h:577:24 #22 0x36aacd7a in NActors::TTestActorRuntime::SimulateSleep(TDuration) /-S/ydb/core/testlib/actors/test_runtime.cpp:327:9 #23 0x36b16cf1 in NKikimr::Tests::NCommon::THelper::StartSchemaRequestQueryServiceImpl(TBasicString> const&, bool, bool) const /-S/ydb/core/testlib/common_helper.cpp:189:34 #24 0x19304062 in NKikimr::TLocalHelper::CreateSecrets() const /-S/ydb/core/tx/tiering/ut/ut_tiers.cpp:125:9 #25 0x193071fa in NKikimr::NTestSuiteColumnShardTiers::DSConfigsImpl(bool) /-S/ydb/core/tx/tiering/ut/ut_tiers.cpp:308:17 #26 0x19327e17 in operator() /-S/ydb/core/tx/tiering/ut/ut_tiers.cpp:146:1 #27 0x19327e17 in __invoke<(lambda at /-S/ydb/core/tx/tiering/ut/ut_tiers.cpp:146:1) &> /-S/contrib/libs/cxxsupp/libcxx/include/__type_traits/invoke.h:149:25 #28 0x19327e17 in __call<(lambda at /-S/ydb/core/tx/tiering/ut/ut_tiers.cpp:146:1) &> /-S/contrib/libs/cxxsupp/libcxx/include/__type_traits/invoke.h:224:5 #29 0x19327e17 in operator() /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:169:12 #30 0x19327e17 in std::__y1::__function::__func, void ()>::operator()() /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:314:10 #31 0x19be0d15 in operator() /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:431:12 #32 0x19be0d15 in operator() /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:990:10 #33 0x19be0d15 in TColoredProcessor::Run(std::__y1::function, TBasicString> const&, char const*, bool) /-S/library/cpp/testing/unittest/utmain.cpp:525:20 #34 0x19bb0698 in NUnitTest::TTestBase::Run(std::__y1::function, TBasicString> const&, char const*, bool) /-S/library/cpp/testing/unittest/registar.cpp:373:18 #35 0x19326dc3 in NKikimr::NTestSuiteColumnShardTiers::TCurrentTest::Execute() /-S/ydb/core/tx/tiering/ut/ut_tiers.cpp:146:1 #36 0x19bb1f65 in NUnitTest::TTestFactory::Execute() /-S/library/cpp/testing/unittest/registar.cpp:494:19 #37 0x19bdb28c in NUnitTest::RunMain(int, char**) /-S/library/cpp/testing/unittest/utmain.cpp:872:44 #38 0x7f2b26546d8f (/lib/x86_64-linux-gnu/libc.so.6+0x29d8f) (BuildId: cd410b710f0f094c6832edd95931006d883af48e) SUMMARY: AddressSanitizer: 415967 byte(s) leaked in 5108 allocation(s). |75.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/ut/yql/ydb-core-kqp-ut-yql |75.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/yql/ydb-core-kqp-ut-yql |75.0%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/yql/ydb-core-kqp-ut-yql >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_validates_message_body[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_validates_message_body[tables_format_v1] >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendDeleteConnection [GOOD] >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendTestConnection >> DataStreams::TestGetRecordsWithoutPermission [GOOD] >> DataStreams::TestGetRecordsWithCount [GOOD] >> DataStreams::TestInvalidRetentionCombinations >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_get_queue_attributes_only_runtime_attributes[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_get_queue_attributes_only_runtime_attributes[tables_format_v1-std] >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendTestConnection [GOOD] >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendTestConnectionWithServiceAccount >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_validates_message_body[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_visibility_timeout_expires_on_wait_timeout[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_get_queue_attributes_only_runtime_attributes[tables_format_v1-std] [GOOD] |75.0%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/integration/sessions/gtest >> YdbSdkSessions::TestActiveSessionCountAfterTransportError [GOOD] >> CdcStreamChangeCollector::UpsertIntoTwoStreams [GOOD] >> CdcStreamChangeCollector::PageFaults >> TPersqueueControlPlaneTestSuite::SetupReadLockSessionWithDatabase >> DataCleanup::BorrowerDataCleanedAfterCopyTable [GOOD] >> TPersQueueCommonTest::TestLimiterLimitsWithBlobsRateLimit [GOOD] >> TPersQueueCommonTest::TestLimiterLimitsWithUserPayloadRateLimit >> TColumnShardTestReadWrite::WriteOverload-InStore [GOOD] >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendTestConnectionWithServiceAccount [GOOD] >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendCreateBinding >> TPersQueueNewSchemeCacheTest::TestReadAtTimestamp_3 >> TPersQueueCommonTest::Auth_CreateGrpcStreamWithInvalidTokenInInitialMetadata_SessionClosedWithUnauthenticatedError ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::WriteOverload-InStore [GOOD] Test command err: 2025-06-25T14:27:40.182698Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:99;event=initialize_shard;step=OnActivateExecutor; 2025-06-25T14:27:40.217262Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:117;event=initialize_shard;step=initialize_tiring_finished; 2025-06-25T14:27:40.217452Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-25T14:27:40.223921Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:27:40.224145Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:27:40.224394Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:27:40.224502Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:27:40.224624Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:27:40.224742Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:27:40.224853Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:27:40.224969Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:27:40.225067Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:27:40.225176Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:27:40.225287Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:27:40.255526Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-25T14:27:40.255706Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-25T14:27:40.255785Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-25T14:27:40.255984Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T14:27:40.256173Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-25T14:27:40.256262Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-25T14:27:40.256338Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-25T14:27:40.256428Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-25T14:27:40.256489Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-25T14:27:40.256531Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-25T14:27:40.256592Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-25T14:27:40.256756Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T14:27:40.256822Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-25T14:27:40.256860Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-25T14:27:40.256901Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-25T14:27:40.256992Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-25T14:27:40.257041Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-25T14:27:40.257082Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-25T14:27:40.257111Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-25T14:27:40.257154Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-25T14:27:40.257186Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-25T14:27:40.257215Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-25T14:27:40.257413Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-25T14:27:40.257456Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-25T14:27:40.257484Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-25T14:27:40.257668Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-25T14:27:40.257728Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-25T14:27:40.257766Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-25T14:27:40.257914Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-25T14:27:40.257965Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-25T14:27:40.257994Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-25T14:27:40.258067Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-25T14:27:40.258145Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-25T14:27:40.258186Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-25T14:27:40.258227Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-25T14:27:40.258422Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=42; 2025-06-25T14:27:40.258510Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=43; 2025-06-25T14:27:40.258607Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=50; 2025-06-25T14:27:40.258682Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=32; 2025-06-25T14:27:40.258777Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-25T14:27:40.258881Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-25T14:27:40.258929Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-25T14:27:40.258996Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: table ... e::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=21;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=constructor_meta.cpp:71;memory_size=246;data_size=236;sum=4428;count=36;size_of_meta=136; 2025-06-25T14:27:54.264399Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=21;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=constructor_portion.cpp:40;memory_size=318;data_size=308;sum=5724;count=18;size_of_portion=208; 2025-06-25T14:27:54.265900Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager on execute at tablet 9437184 Save Batch GenStep: 2:18 Blob count: 1 2025-06-25T14:27:54.267152Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=21;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=manager.h:156;event=add_by_insert_id;id=19;operation_id=18; 2025-06-25T14:27:54.285522Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Save Batch GenStep: 2:18 Blob count: 1 2025-06-25T14:27:54.287597Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;fline=columnshard__write.cpp:105;writing_size=6330728;event=data_write_finished;writing_id=92cc100e-51d011f0-a09e7b74-77bde5b5; 2025-06-25T14:27:54.287824Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=22;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=constructor_meta.cpp:51;memory_size=94;data_size=68;sum=1786;count=37; 2025-06-25T14:27:54.287913Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=22;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=constructor_meta.cpp:71;memory_size=246;data_size=236;sum=4674;count=38;size_of_meta=136; 2025-06-25T14:27:54.287980Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=22;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=constructor_portion.cpp:40;memory_size=318;data_size=308;sum=6042;count=19;size_of_portion=208; 2025-06-25T14:27:54.289445Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager on execute at tablet 9437184 Save Batch GenStep: 2:19 Blob count: 1 2025-06-25T14:27:54.289573Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=22;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=manager.h:156;event=add_by_insert_id;id=20;operation_id=19; 2025-06-25T14:27:54.302379Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Save Batch GenStep: 2:19 Blob count: 1 2025-06-25T14:27:54.316166Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;fline=columnshard__write.cpp:105;writing_size=6330728;event=data_write_finished;writing_id=933ab89c-51d011f0-b63a6206-91920bfe; 2025-06-25T14:27:54.316448Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=23;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=constructor_meta.cpp:51;memory_size=94;data_size=68;sum=1880;count=39; 2025-06-25T14:27:54.316526Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=23;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=constructor_meta.cpp:71;memory_size=246;data_size=236;sum=4920;count=40;size_of_meta=136; 2025-06-25T14:27:54.316591Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=23;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=constructor_portion.cpp:40;memory_size=318;data_size=308;sum=6360;count=20;size_of_portion=208; 2025-06-25T14:27:54.317618Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager on execute at tablet 9437184 Save Batch GenStep: 2:20 Blob count: 1 2025-06-25T14:27:54.317731Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=23;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=manager.h:156;event=add_by_insert_id;id=21;operation_id=20; 2025-06-25T14:27:54.330123Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Save Batch GenStep: 2:20 Blob count: 1 2025-06-25T14:27:54.331967Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;fline=columnshard__write.cpp:105;writing_size=6330728;event=data_write_finished;writing_id=937ac05e-51d011f0-8d0a01b3-bfdd30f1; 2025-06-25T14:27:54.332184Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=24;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=constructor_meta.cpp:51;memory_size=94;data_size=68;sum=1974;count=41; 2025-06-25T14:27:54.332255Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=24;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=constructor_meta.cpp:71;memory_size=246;data_size=236;sum=5166;count=42;size_of_meta=136; 2025-06-25T14:27:54.332337Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=24;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=constructor_portion.cpp:40;memory_size=318;data_size=308;sum=6678;count=21;size_of_portion=208; 2025-06-25T14:27:54.333492Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager on execute at tablet 9437184 Save Batch GenStep: 2:21 Blob count: 1 2025-06-25T14:27:54.333719Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=24;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=manager.h:156;event=add_by_insert_id;id=22;operation_id=21; 2025-06-25T14:27:54.347128Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Save Batch GenStep: 2:21 Blob count: 1 2025-06-25T14:27:54.363864Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=9437184;event=TEvWrite;fline=manager.cpp:210;event=register_operation;operation_id=22;last=22; 2025-06-25T14:27:54.363958Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=9437184;event=TEvWrite;fline=write_queue.cpp:14;writing_size=6330728;operation_id=950493dc-51d011f0-b2b390ad-c353e947;in_flight=1;size_in_flight=6330728; 2025-06-25T14:27:55.148158Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=9437184;event=TEvWrite;scope=TBuildBatchesTask::DoExecute;tablet_id=9437184;parent_id=[1:128:2158];write_id=22;path_id={internal: 9438184000001, ss: 1};fline=write_actor.cpp:24;event=actor_created;tablet_id=9437184;debug=size=8246112;count=1;actions=__DEFAULT,;waiting=1;; 2025-06-25T14:27:55.251135Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;fline=columnshard__write.cpp:105;writing_size=6330728;event=data_write_finished;writing_id=950493dc-51d011f0-b2b390ad-c353e947; 2025-06-25T14:27:55.251458Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=25;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=constructor_meta.cpp:51;memory_size=94;data_size=68;sum=2068;count=43; 2025-06-25T14:27:55.251551Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=25;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=constructor_meta.cpp:71;memory_size=246;data_size=236;sum=5412;count=44;size_of_meta=136; 2025-06-25T14:27:55.251631Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=25;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=constructor_portion.cpp:40;memory_size=318;data_size=308;sum=6996;count=22;size_of_portion=208; 2025-06-25T14:27:55.252840Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager on execute at tablet 9437184 Save Batch GenStep: 2:22 Blob count: 1 2025-06-25T14:27:55.252978Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=25;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=manager.h:156;event=add_by_insert_id;id=23;operation_id=22; 2025-06-25T14:27:55.265689Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Save Batch GenStep: 2:22 Blob count: 1 >> AsyncIndexChangeCollector::CoveredIndexUpdateCoveredColumn [GOOD] >> AsyncIndexChangeCollector::CoveredIndexUpsert >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendCreateBinding [GOOD] >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendListBindings ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_data_cleanup/unittest >> DataCleanup::BorrowerDataCleanedAfterCopyTable [GOOD] Test command err: 2025-06-25T14:25:53.685411Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:287:2329], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:25:53.685561Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:25:53.685621Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00086d/r3tmp/tmpd7ZzUQ/pdisk_1.dat 2025-06-25T14:25:54.008959Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T14:25:54.011774Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:25:54.086210Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:25:54.102987Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:35:2082] 1750861551489491 != 1750861551489495 2025-06-25T14:25:54.160291Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:25:54.160952Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:25:54.178602Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:25:54.284824Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:25:54.748011Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:727:2608], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:25:54.748095Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:736:2613], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:25:54.748157Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:25:54.751760Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:25:54.782273Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:25:54.932056Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:741:2616], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:25:55.003613Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:811:2655] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:25:56.866775Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715660. Ctx: { TraceId: 01jykqp91t3sc2dbh8rfctgqnv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OGRmMGZkN2YtYjk4ZDVlNTUtNmEwZWM3NWEtZjZmNGE0NmM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:26:11.714303Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:267:2311], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:26:11.715130Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:26:11.716666Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00086d/r3tmp/tmp0Rt7Om/pdisk_1.dat 2025-06-25T14:26:12.864465Z node 2 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 2 Type# 268639257 2025-06-25T14:26:12.880863Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:26:13.008756Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:26:13.046968Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:35:2082] 1750861562487110 != 1750861562487114 2025-06-25T14:26:13.142512Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:26:13.143355Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:26:13.157913Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:26:13.345874Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:26:15.462072Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:720:2602], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:26:15.462631Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:730:2607], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:26:15.463716Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:26:15.595072Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:26:15.725998Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:26:16.028761Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:734:2610], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:26:16.100260Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:803:2648] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:26:17.625129Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715660. Ctx: { TraceId: 01jykqpx7y2w8t9pq4p9fsvbx8, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YWRjYWM4YzQtM2E0ZDRlMTQtZWU4YjQ2MWUtMmY3MTNlYQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:26:27.576713Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [3:277:2320], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:26:27.577385Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:26:27.578036Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00086d/r3tmp/tmpHMYXCO/pdisk_1.dat 2025-06-25T14:26:28.835913Z node 3 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 3 Type# 268639257 2025-06-25T14:26:28.842132Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:26:28.933891Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:26:28.955661Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:35:2082] 1750861580160065 != 1750861580160068 2025-06-25T14:26:29.015784Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025 ... node 5 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [5:740:2616], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:27:30.689087Z node 5 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [5:811:2656] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:27:31.183098Z node 5 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715660. Ctx: { TraceId: 01jykqs6e9cpxw7hkkwhjxmzrq, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=5&id=NDEyYTMwNzUtYjYxZTgwMjAtY2Q1Mjg4MTctYTY4NjdjMzM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:27:38.859335Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [6:277:2320], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:27:38.859556Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:27:38.859720Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00086d/r3tmp/tmpbVFrT0/pdisk_1.dat 2025-06-25T14:27:39.227483Z node 6 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 6 Type# 268639257 2025-06-25T14:27:39.229566Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:27:39.281891Z node 6 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:27:39.284769Z node 6 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [6:35:2082] 1750861653419657 != 1750861653419660 2025-06-25T14:27:39.338486Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:27:39.338657Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:27:39.350834Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:27:39.451021Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:27:40.102320Z node 6 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:27:40.391133Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:830:2681], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:27:40.391261Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:840:2686], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:27:40.391353Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:27:40.405825Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:27:40.590606Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [6:844:2689], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-25T14:27:40.640876Z node 6 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [6:901:2727] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:27:41.384657Z node 6 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jykqsg7515czn752yycdwcj5, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=6&id=Y2Y4ZDIzNDMtMzI2YjE2YzAtYTNjNjdiNDgtYTYzMjhlZTc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:27:41.938207Z node 6 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jykqsh7444bxkq3w1w1a1hfh, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=6&id=NmY0YzhkNTktNGI2ZGVjM2UtYTkxNmI2MTUtNGIzMjZkNTM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:27:50.205227Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [7:287:2329], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:27:50.205507Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:27:50.205692Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00086d/r3tmp/tmpZhWBsy/pdisk_1.dat 2025-06-25T14:27:50.697552Z node 7 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 7 Type# 268639257 2025-06-25T14:27:50.699731Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:27:50.747260Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:27:50.762256Z node 7 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [7:35:2082] 1750861663703402 != 1750861663703406 2025-06-25T14:27:50.823414Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:27:50.823605Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:27:50.837830Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:27:50.953499Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:27:51.676361Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:27:51.928665Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:830:2681], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:27:51.928794Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:840:2686], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:27:51.928895Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:27:51.935495Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:27:52.097797Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:844:2689], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-25T14:27:52.137709Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:901:2727] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:27:52.815500Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jykqsvfk02c2rtqnc4fvg4tj, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=OTQ3Y2ZlYzEtZDUxMWVmMGUtOTU5MDNmODAtOTQ1MGJhYjQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:27:53.504275Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jykqswcp9e42gakv57x38g42, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=NTIwOGFkZmMtNjZmN2YxNy01M2JlYzgyMi04ZjViMmRm, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:27:54.012241Z node 7 :TX_DATASHARD WARN: datashard__data_cleanup.cpp:37: DataCleanup of tablet# 72075186224037888: has borrowed parts, requested from [7:555:2481] >> TPersQueueCommonTest::Auth_WriteUpdateTokenRequestWithInvalidToken_SessionClosedWithUnauthenticatedError |75.0%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/integration/sessions/gtest >> YdbSdkSessions::MultiThreadSessionPoolLimitSyncQueryClient [GOOD] >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendListBindings [GOOD] >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendDescribeBinding >> TGRpcRateLimiterTest::AcquireResourceManyRequiredActorApi [GOOD] >> TGRpcRateLimiterTest::AcquireResourceManyRequiredGrpcApiWithCancelAfter >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_visibility_batch_works[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_visibility_batch_works[tables_format_v1-std] >> TPersqueueControlPlaneTestSuite::TestAddRemoveReadRule >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendDescribeBinding [GOOD] >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendModifyBinding |75.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_transfer/ydb-core-tx-schemeshard-ut_transfer |75.0%| [TM] {RESULT} ydb/core/tx/datashard/ut_data_cleanup/unittest |75.0%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_transfer/ydb-core-tx-schemeshard-ut_transfer |75.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_transfer/ydb-core-tx-schemeshard-ut_transfer |75.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_compaction/ydb-core-tx-schemeshard-ut_compaction |75.0%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_compaction/ydb-core-tx-schemeshard-ut_compaction >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendModifyBinding [GOOD] >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendDeleteBinding >> TPersQueueNewSchemeCacheTest::TestWriteStat1stClass |75.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_compaction/ydb-core-tx-schemeshard-ut_compaction >> TraverseDatashard::TraverseTwoTables [GOOD] >> THiveTest::TestHiveBalancerWithPrefferedDC2 [GOOD] >> THiveTest::TestHiveBalancerWithPreferredDC3 |75.0%| [TM] {asan, default-linux-x86_64, pic, release} ydb/library/yql/tests/sql/solomon/pytest >> test.py::test[solomon-UnknownSetting-] [GOOD] >> TControlPlaneProxyCheckPermissionsControlPlaneStorageSuccess::ShouldSendDeleteBinding [GOOD] >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendCreateQuery >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_visibility_timeout_works[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_wrong_attribute_name[tables_format_v0] >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendCreateQuery [GOOD] >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendListQueries ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest >> TraverseDatashard::TraverseTwoTables [GOOD] Test command err: 2025-06-25T14:27:44.778932Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:419:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:27:44.779297Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:27:44.779400Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0020b9/r3tmp/tmpPvrZjZ/pdisk_1.dat 2025-06-25T14:27:45.234064Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 7796, node 1 2025-06-25T14:27:45.525889Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:27:45.525942Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:27:45.525971Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:27:45.526530Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:27:45.528894Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:27:45.627009Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:27:45.627138Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:27:45.642219Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:61588 2025-06-25T14:27:46.368174Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-25T14:27:51.960697Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-25T14:27:52.002755Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:27:52.002882Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:27:52.046019Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T14:27:52.057316Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:27:52.439761Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:27:52.482228Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:27:52.482909Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:27:52.483458Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:27:52.483586Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:27:52.483839Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:27:52.483919Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:27:52.484007Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:27:52.484081Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:27:52.484154Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:27:52.713717Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:27:52.713833Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:27:52.733898Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:27:53.012507Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:27:53.073069Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-25T14:27:53.073197Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-25T14:27:53.143126Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-25T14:27:53.143361Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-25T14:27:53.143590Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-25T14:27:53.143647Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-25T14:27:53.143715Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-25T14:27:53.143771Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-25T14:27:53.143820Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-25T14:27:53.143875Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-25T14:27:53.144460Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-25T14:27:53.179512Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7949: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-25T14:27:53.179636Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7979: ConnectToSA(), pipe client id: [2:1793:2562], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-25T14:27:53.195456Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1808:2573] 2025-06-25T14:27:53.217490Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1849:2589] 2025-06-25T14:27:53.217866Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1849:2589], schemeshard id = 72075186224037897 2025-06-25T14:27:53.260427Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-25T14:27:53.310092Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-25T14:27:53.310145Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-25T14:27:53.310217Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-25T14:27:53.490690Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:27:53.544957Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-25T14:27:53.545118Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-25T14:27:53.931986Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-25T14:27:54.187599Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-25T14:27:54.257064Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-25T14:27:54.929773Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:27:55.295973Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2153:3026], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:27:55.296118Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:27:55.329649Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:27:56.093367Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2452:3074], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:27:56.093568Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:27:56.094655Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [1:2457:3078]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T14:27:56.094860Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-25T14:27:56.094924Z node 1 :STATISTICS DEBUG: service_impl.cpp:1219: ConnectToSA(), pipe client id = [1:2459:3080] 2025-06-25T14:27:56.094971Z node 1 :STATISTICS DEBUG: service_impl.cpp:1248: SyncNode(), pipe client id = [1:2459:3080] 2025-06-25T14:27:56.095407Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:2460:2949] 2025-06-25T14:27:56.095553Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:2459:3080], server id = [2:2460:2949], tablet id = 72075186224037894, status = OK 2025-06-25T14:27:56.095659Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:133: [72075186224037894] EvConnectNode, pipe server id = [2:2460:2949], node id = 1, have schemeshards count = 0, need schemeshards count = 1 2025-06-25T14:27:56.095704Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:314: [72075186224037894] SendStatisticsToNode(), node id = 1, schemeshard count = 1 2025-06-25T14:27:56.095850Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-06-25T14:27:56.095899Z node 1 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 1, ReplyToActorId = [1:2457:3078], StatRequests.size() = 1 2025-06-25T14:27:56.117970Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2464:3084], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:27:56.118103Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:27:56.118571Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2469:3089], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:27:56.131646Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715660:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:27:56.352897Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:217: [72075186224037894] EvFastPropagateCheck 2025-06-25T14:27:56.352992Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:357: [72075186224037894] PropagateFastStatistics(), node count = 0, schemeshard count = 0 2025-06-25T14:27:56.432712Z node 1 :STATISTICS DEBUG: service_impl.cpp:1189: EvRequestTimeout, pipe client id = [1:2459:3080], schemeshard count = 1 2025-06-25T14:27:56.838149Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:2471:3091], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715660 completed, doublechecking } 2025-06-25T14:27:57.045100Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:2583:3161] txid# 281474976715661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:27:57.067610Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [1:2606:3177]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T14:27:57.067773Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-25T14:27:57.067809Z node 1 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 2, ReplyToActorId = [1:2606:3177], StatRequests.size() = 1 2025-06-25T14:27:57.146214Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jykqszfb5kwapngppmknd4kn, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDBhMjkwZmYtZTgyOTY0MGItZGVkZTI1OGUtMmI0YTJmNzE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:27:57.316928Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:27:57.770444Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 3 ], ReplyToActorId[ [1:2944:3239]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T14:27:57.770596Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] 2025-06-25T14:27:57.770627Z node 1 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 3, ReplyToActorId = [1:2944:3239], StatRequests.size() = 1 2025-06-25T14:27:57.798969Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 4 ], ReplyToActorId[ [1:2953:3248]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T14:27:57.799233Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 4 ] 2025-06-25T14:27:57.799278Z node 1 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 4, ReplyToActorId = [1:2953:3248], StatRequests.size() = 1 2025-06-25T14:27:57.846917Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715664. Ctx: { TraceId: 01jykqt15wc578g6pbhsqwvxdf, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MjNhZjFmMzUtZGFlOWMwZmQtNDM5ZjA5N2YtMTU3MDg4ODg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:27:57.918935Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [2:2997:3203]], StatType[ 2 ], StatRequestsCount[ 1 ] 2025-06-25T14:27:57.924477Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-25T14:27:57.924550Z node 2 :STATISTICS DEBUG: service_impl.cpp:812: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] resolve DatabasePath[ [OwnerId: 72057594046644480, LocalPathId: 2] ] 2025-06-25T14:27:57.924972Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-25T14:27:57.925020Z node 2 :STATISTICS DEBUG: service_impl.cpp:715: [TStatService::QueryStatistics] RequestId[ 1 ], Database[ Root/Database ], TablePath[ /Root/Database/.metadata/_statistics ] 2025-06-25T14:27:57.925075Z node 2 :STATISTICS DEBUG: service_impl.cpp:656: [TStatService::LoadStatistics] QueryId[ 1 ], PathId[ [OwnerId: 72075186224037897, LocalPathId: 4] ], StatType[ 2 ], ColumnTag[ 1 ] 2025-06-25T14:27:58.012176Z node 2 :STATISTICS ERROR: service_impl.cpp:691: [TStatService::ReadRowsResponse] QueryId[ 1 ], RowsCount[ 0 ] 2025-06-25T14:27:58.012947Z node 2 :STATISTICS DEBUG: service_impl.cpp:1152: TEvLoadStatisticsQueryResponse, request id = 1 2025-06-25T14:27:58.013379Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [2:3021:3215]], StatType[ 2 ], StatRequestsCount[ 1 ] 2025-06-25T14:27:58.016225Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-25T14:27:58.016284Z node 2 :STATISTICS DEBUG: service_impl.cpp:812: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] resolve DatabasePath[ [OwnerId: 72057594046644480, LocalPathId: 2] ] 2025-06-25T14:27:58.017213Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-25T14:27:58.017271Z node 2 :STATISTICS DEBUG: service_impl.cpp:715: [TStatService::QueryStatistics] RequestId[ 2 ], Database[ Root/Database ], TablePath[ /Root/Database/.metadata/_statistics ] 2025-06-25T14:27:58.017317Z node 2 :STATISTICS DEBUG: service_impl.cpp:656: [TStatService::LoadStatistics] QueryId[ 2 ], PathId[ [OwnerId: 72075186224037897, LocalPathId: 5] ], StatType[ 2 ], ColumnTag[ 1 ] 2025-06-25T14:27:58.020000Z node 2 :STATISTICS ERROR: service_impl.cpp:691: [TStatService::ReadRowsResponse] QueryId[ 2 ], RowsCount[ 0 ] 2025-06-25T14:27:58.020370Z node 2 :STATISTICS DEBUG: service_impl.cpp:1152: TEvLoadStatisticsQueryResponse, request id = 2 >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_visibility_change_disables_receive_attempt_id[tables_format_v0-with_change_visibility] [GOOD] >> DataStreams::TestInvalidRetentionCombinations [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_visibility_change_disables_receive_attempt_id[tables_format_v0-with_delete_message] >> test_fifo_messaging.py::TestSqsFifoMicroBatchesWithTenant::test_micro_batch_read[tables_format_v0] >> AsyncIndexChangeCollector::CoveredIndexUpsert [GOOD] >> AsyncIndexChangeCollector::AllColumnsInPk >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendListQueries [GOOD] >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendDescribeQuery >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendDescribeQuery [GOOD] >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendGetQueryStatus >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_create_q_twice[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_create_q_twice[tables_format_v0-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_wrong_attribute_name[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_wrong_attribute_name[tables_format_v1] ------- [TM] {asan, default-linux-x86_64, release} ydb/services/datastreams/ut/unittest >> DataStreams::TestInvalidRetentionCombinations [GOOD] Test command err: 2025-06-25T14:27:39.421881Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519893565907336627:2144];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:27:39.422088Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0016cc/r3tmp/tmpE77tSU/pdisk_1.dat 2025-06-25T14:27:40.328163Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:27:40.328417Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:27:40.333164Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:27:40.407295Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:27:40.444861Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TServer::EnableGrpc on GrpcPort 29633, node 1 2025-06-25T14:27:40.753645Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:27:40.753669Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:27:40.753676Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:27:40.753810Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11368 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:27:41.535504Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:27:41.730359Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) TClient is connected to server localhost:11368 2025-06-25T14:27:42.054050Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... 2025-06-25T14:27:47.026416Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519893595411430838:2171];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:27:47.031291Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0016cc/r3tmp/tmpl0bl3y/pdisk_1.dat 2025-06-25T14:27:47.491285Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:27:47.518516Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:27:47.518599Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:27:47.530374Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 61809, node 4 2025-06-25T14:27:48.026739Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:27:48.051216Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:27:48.051236Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:27:48.051244Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:27:48.051427Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:31107 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:27:49.023441Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:27:49.165295Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) TClient is connected to server localhost:31107 2025-06-25T14:27:49.416919Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... 2025-06-25T14:27:49.735563Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:27:49.785394Z node 4 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:304: Access denied: self# [4:7519893608296333884:2834], for# user2@builtin, access# DescribeSchema 2025-06-25T14:27:49.797834Z node 4 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:304: Access denied: self# [4:7519893608296333887:2835], for# user2@builtin, access# DescribeSchema 2025-06-25T14:27:49.808108Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:27:54.809402Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519893632843521413:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:27:54.809518Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0016cc/r3tmp/tmpIwPrbt/pdisk_1.dat 2025-06-25T14:27:55.291335Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:27:55.312707Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:27:55.312817Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:27:55.331785Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 6277, node 7 2025-06-25T14:27:55.534825Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:27:55.534874Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:27:55.534885Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:27:55.535109Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:27:55.930693Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:7381 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:27:56.028161Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:27:56.221108Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) TClient is connected to server localhost:7381 2025-06-25T14:27:56.669613Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... 2025-06-25T14:27:56.697328Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710659, at schemeshard: 72057594046644480
: Error: retention hours and storage megabytes must fit one of: { hours : [0, 24], storage : [0, 0]}, { hours : [0, 168], storage : [51200, 1048576]}, provided values: hours 168, storage 10, code: 500080
: Error: retention hours and storage megabytes must fit one of: { hours : [0, 24], storage : [0, 0]}, { hours : [0, 168], storage : [51200, 1048576]}, provided values: hours 144, storage 0, code: 500080
: Error: write_speed per second in partition must have values from set {131072,524288,1048576}, got 130048, code: 500080
: Error: write_speed per second in partition must have values from set {131072,524288,1048576}, got 1049600, code: 500080 >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_receive_attempt_reloads_same_messages[tables_format_v1-after_crutch_batch] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_receive_attempt_reloads_same_messages[tables_format_v1-standard_mode] >> TPersQueueCommonTest::TestWriteWithRateLimiterWithUserPayloadRateLimit [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest >> ColumnShardTiers::TTLUsage Test command err: 2025-06-25T14:26:05.439648Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:26:05.440668Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:26:05.441138Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000c14/r3tmp/tmp108xTG/pdisk_1.dat 2025-06-25T14:26:06.790815Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 18393, node 1 TClient is connected to server localhost:6090 2025-06-25T14:26:10.266107Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:26:10.478225Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:26:10.486269Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:26:10.486315Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:26:10.486341Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:26:10.486706Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:26:10.486966Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750861555166150 != 1750861555166154 2025-06-25T14:26:10.585851Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:26:10.586794Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:26:10.603586Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:26:11.028813Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnStore, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_store.cpp:451) 2025-06-25T14:26:11.754036Z node 1 :TX_COLUMNSHARD TRACE: columnshard_impl.h:381: StateInit, received event# 268828672, Sender [1:649:2541], Recipient [1:688:2572]: NKikimr::TEvTablet::TEvBoot 2025-06-25T14:26:11.764305Z node 1 :TX_COLUMNSHARD TRACE: columnshard_impl.h:381: StateInit, received event# 268828673, Sender [1:649:2541], Recipient [1:688:2572]: NKikimr::TEvTablet::TEvRestored 2025-06-25T14:26:11.765947Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=72075186224037888;self_id=[1:688:2572];fline=columnshard.cpp:99;event=initialize_shard;step=OnActivateExecutor; 2025-06-25T14:26:11.992370Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=72075186224037888;self_id=[1:688:2572];fline=columnshard.cpp:117;event=initialize_shard;step=initialize_tiring_finished; 2025-06-25T14:26:11.994916Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 72075186224037888 2025-06-25T14:26:12.103664Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:688:2572];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:26:12.105915Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:688:2572];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:26:12.107780Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:688:2572];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:26:12.108633Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:688:2572];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:26:12.109184Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:688:2572];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:26:12.109529Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:688:2572];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:26:12.110437Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:688:2572];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:26:12.111279Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:688:2572];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:26:12.111853Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:688:2572];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:26:12.112359Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:688:2572];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:26:12.112935Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:688:2572];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:26:12.134238Z node 1 :TX_COLUMNSHARD TRACE: columnshard_impl.h:381: StateInit, received event# 268828684, Sender [1:649:2541], Recipient [1:688:2572]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-25T14:26:12.373462Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 72075186224037888 2025-06-25T14:26:12.373671Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-25T14:26:12.373725Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-25T14:26:12.374409Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T14:26:12.374521Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-25T14:26:12.374573Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-25T14:26:12.374616Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-25T14:26:12.374692Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-25T14:26:12.374749Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-25T14:26:12.374794Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-25T14:26:12.374818Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-25T14:26:12.374943Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T14:26:12.375249Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-25T14:26:12.375538Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-25T14:26:12.375794Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-25T14:26:12.377084Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-25T14:26:12.377359Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-25T14:26:12.378117Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-25T14:26:12.378413Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-25T14:26:12.378987Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-25T14:26:12.379024Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-25T14:26:12.379287Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;se ... ;count=1;size=2818;count=21;;1:size=90;count=1;size=47874;count=10;;2:size=0;count=0;;3:size=1466448;count=1;;4:size=1479208;count=1;;5:size=1458600;count=1;;6:size=1445608;count=1;;7:size=1445920;count=1;;8:size=808584;count=1;;9:size=3299472;count=4;;10:size=1445744;count=1;;11:size=1445408;count=1;;12:size=1445360;count=1;;13:size=1746088;count=2;;14:size=2558640;count=6;;15:size=1445928;count=1;;16:size=1445448;count=1;;17:size=1445400;count=1;;18:size=1746648;count=2;;19:size=2601424;count=6;;20:size=1445376;count=1;;21:size=1445528;count=1;;22:size=1445360;count=1;;23:size=1745704;count=2;;24:size=0;count=0;;25:size=0;count=0;;26:size=0;count=0;;27:size=0;count=0;;28:size=0;count=0;;29:size=0;count=0;;30:size=0;count=0;;31:size=0;count=0;;32:size=0;count=0;;33:size=0;count=0;;34:size=0;count=0;;35:size=0;count=0;;36:size=0;count=0;;37:size=0;count=0;;38:size=0;count=0;;39:size=0;count=0;;40:size=0;count=0;;41:size=0;count=0;;42:size=0;count=0;;43:size=0;count=0;;44:size=0;count=0;;45:size=0;count=0;;46:size=0;count=0;;47:size=0;count=0;;48:size=0;count=0;;49:size=0;count=0;;50:size=0;count=0;;51:size=0;count=0;;52:size=0;count=0;;53:size=0;count=0;;54:size=0;count=0;;55:size=0;count=0;;56:size=0;count=0;;57:size=0;count=0;;58:size=0;count=0;;59:size=0;count=0;;60:size=0;count=0;;61:size=0;count=0;;62:size=0;count=0;;63:size=0;count=0;;64:size=0;count=0;;65:size=0;count=0;; TEvBlobStorage::TEvPut tId=72075186224037888;c=0;:74/0:size=69;count=1;size=2887;count=22;;1:size=90;count=1;size=47874;count=10;;2:size=0;count=0;;3:size=1466448;count=1;;4:size=1479208;count=1;;5:size=1458600;count=1;;6:size=1445608;count=1;;7:size=1445920;count=1;;8:size=808584;count=1;;9:size=3299472;count=4;;10:size=1445744;count=1;;11:size=1445408;count=1;;12:size=1445360;count=1;;13:size=1746088;count=2;;14:size=2558640;count=6;;15:size=1445928;count=1;;16:size=1445448;count=1;;17:size=1445400;count=1;;18:size=1746648;count=2;;19:size=2601424;count=6;;20:size=1445376;count=1;;21:size=1445528;count=1;;22:size=1445360;count=1;;23:size=1745704;count=2;;24:size=0;count=0;;25:size=0;count=0;;26:size=0;count=0;;27:size=0;count=0;;28:size=0;count=0;;29:size=0;count=0;;30:size=0;count=0;;31:size=0;count=0;;32:size=0;count=0;;33:size=0;count=0;;34:size=0;count=0;;35:size=0;count=0;;36:size=0;count=0;;37:size=0;count=0;;38:size=0;count=0;;39:size=0;count=0;;40:size=0;count=0;;41:size=0;count=0;;42:size=0;count=0;;43:size=0;count=0;;44:size=0;count=0;;45:size=0;count=0;;46:size=0;count=0;;47:size=0;count=0;;48:size=0;count=0;;49:size=0;count=0;;50:size=0;count=0;;51:size=0;count=0;;52:size=0;count=0;;53:size=0;count=0;;54:size=0;count=0;;55:size=0;count=0;;56:size=0;count=0;;57:size=0;count=0;;58:size=0;count=0;;59:size=0;count=0;;60:size=0;count=0;;61:size=0;count=0;;62:size=0;count=0;;63:size=0;count=0;;64:size=0;count=0;;65:size=0;count=0;; 2025-06-25T14:27:57.615010Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037888;task_id=94fa1344-51d011f0-8674e22f-577531ff;fline=abstract.cpp:53;event=WriteIndexComplete;type=CS::GENERAL;success=1; 2025-06-25T14:27:57.615124Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037888;task_id=94fa1344-51d011f0-8674e22f-577531ff;fline=with_appended.cpp:65;portions=36,37,;task_id=94fa1344-51d011f0-8674e22f-577531ff; 2025-06-25T14:27:57.615595Z node 1 :TX_COLUMNSHARD TRACE: log.cpp:784: tablet_id=72075186224037888;task_id=94fa1344-51d011f0-8674e22f-577531ff;fline=granule.cpp:17;event=upsert_portion;portion=(portion_id:36;path_id:3757128055669836289;records_count:61797;schema_version:1;level:1;;column_size:2172608;index_size:0;meta:(()););path_id=3757128055669836289; 2025-06-25T14:27:57.615769Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037888;task_id=94fa1344-51d011f0-8674e22f-577531ff;path_id=3757128055669836289;fline=common_level.h:121;from=412174000000,uid_412174,;to=515168000000,uid_515168,; 2025-06-25T14:27:57.615861Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037888;task_id=94fa1344-51d011f0-8674e22f-577531ff;path_id=3757128055669836289;fline=common_level.h:141;itFrom=0;itTo=1;raw=104801457;count=2;packed=3094120; 2025-06-25T14:27:57.615968Z node 1 :TX_COLUMNSHARD TRACE: log.cpp:784: tablet_id=72075186224037888;task_id=94fa1344-51d011f0-8674e22f-577531ff;fline=tiering.cpp:49;tiering_info=__DEFAULT/0.000000s;$$DELETE/515168.000000s;; 2025-06-25T14:27:57.616117Z node 1 :TX_COLUMNSHARD TRACE: log.cpp:784: tablet_id=72075186224037888;task_id=94fa1344-51d011f0-8674e22f-577531ff;fline=granule.cpp:17;event=upsert_portion;portion=(portion_id:37;path_id:3757128055669836289;records_count:61797;schema_version:1;level:1;;column_size:2172304;index_size:0;meta:(()););path_id=3757128055669836289; 2025-06-25T14:27:57.616219Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037888;task_id=94fa1344-51d011f0-8674e22f-577531ff;path_id=3757128055669836289;fline=common_level.h:121;from=412174000000,uid_412174,;to=576965000000,uid_576965,; 2025-06-25T14:27:57.616284Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037888;task_id=94fa1344-51d011f0-8674e22f-577531ff;path_id=3757128055669836289;fline=common_level.h:141;itFrom=0;itTo=1;raw=104801457;count=2;packed=3094120; 2025-06-25T14:27:57.616937Z node 1 :TX_COLUMNSHARD TRACE: log.cpp:784: tablet_id=72075186224037888;task_id=94fa1344-51d011f0-8674e22f-577531ff;fline=tiering.cpp:49;tiering_info=__DEFAULT/0.000000s;$$DELETE/576965.000000s;; 2025-06-25T14:27:57.617071Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037888;task_id=94fa1344-51d011f0-8674e22f-577531ff;fline=manager.cpp:15;event=unlock;process_id=CS::GENERAL::94fa1344-51d011f0-8674e22f-577531ff; 2025-06-25T14:27:57.617187Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037888;task_id=94fa1344-51d011f0-8674e22f-577531ff;fline=granule.cpp:97;event=OnCompactionFinished;info=(granule:3757128055669836289;path_id:3757128055669836289;size:21160728;portions_count:37;); 2025-06-25T14:27:57.617266Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037888;task_id=94fa1344-51d011f0-8674e22f-577531ff;tablet_id=72075186224037888;fline=columnshard_impl.cpp:442;event=EnqueueBackgroundActivities;periodic=0; 2025-06-25T14:27:57.617414Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037888;task_id=94fa1344-51d011f0-8674e22f-577531ff;tablet_id=72075186224037888;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=2; 2025-06-25T14:27:57.617517Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037888;task_id=94fa1344-51d011f0-8674e22f-577531ff;tablet_id=72075186224037888;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=0;tx_id=18446744073709551615;;current_snapshot_ts=21000; 2025-06-25T14:27:57.617583Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037888;task_id=94fa1344-51d011f0-8674e22f-577531ff;tablet_id=72075186224037888;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=2;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-25T14:27:57.617667Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037888;task_id=94fa1344-51d011f0-8674e22f-577531ff;tablet_id=72075186224037888;fline=columnshard_impl.cpp:791;background=cleanup;skip_reason=no_changes; 2025-06-25T14:27:57.617730Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037888;task_id=94fa1344-51d011f0-8674e22f-577531ff;tablet_id=72075186224037888;fline=columnshard_impl.cpp:820;background=cleanup;skip_reason=no_changes; 2025-06-25T14:27:57.617845Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037888;task_id=94fa1344-51d011f0-8674e22f-577531ff;tablet_id=72075186224037888;queue=ttl;external_count=0;fline=granule.cpp:168;event=skip_actualization;waiting=0.400000s; 2025-06-25T14:27:57.617926Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037888;task_id=94fa1344-51d011f0-8674e22f-577531ff;tablet_id=72075186224037888;fline=columnshard_impl.cpp:749;background=ttl;skip_reason=no_changes; 2025-06-25T14:27:57.618217Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 72075186224037888 Save Batch GenStep: 1:21 Blob count: 2 VERIFY failed (2025-06-25T14:27:57.618442Z): tablet_id=72075186224037888;task_id=94fa1344-51d011f0-8674e22f-577531ff;verification=CompactionsLimit.Dec() >= 0;fline=ro_controller.cpp:35; ydb/library/actors/core/log.cpp:800 ~TVerifyFormattedRecordWriter(): requirement false failed NPrivate::InternalPanicImpl(int, char const*, char const*, int, int, int, TBasicStringBuf>, char const*, unsigned long)+873 (0x19731AB9) NPrivate::Panic(NPrivate::TStaticBuf const&, int, char const*, char const*, char const*, ...)+571 (0x1972014B) NActors::TVerifyFormattedRecordWriter::~TVerifyFormattedRecordWriter()+326 (0x1AD95EB6) NKikimr::NYDBTest::NColumnShard::TReadOnlyController::DoOnWriteIndexComplete(NKikimr::NOlap::TColumnEngineChanges const&, NKikimr::NColumnShard::TColumnShard const&)+3892 (0x49D8E6E4) NKikimr::NColumnShard::TTxWriteIndex::Complete(NActors::TActorContext const&)+4781 (0x3105872D) NKikimr::NTabletFlatExecutor::TSeat::Complete(NActors::TActorContext const&, bool)+899 (0x1F5DA2B3) NKikimr::NTabletFlatExecutor::TLogicRedo::Confirm(unsigned int, NActors::TActorContext const&, NActors::TActorId const&)+3856 (0x1F4BE8A0) NKikimr::NTabletFlatExecutor::TExecutor::Handle(TAutoPtr, TDelete>&, NActors::TActorContext const&)+1521 (0x1F309B41) NKikimr::NTabletFlatExecutor::TExecutor::StateWork(TAutoPtr&)+3039 (0x1F2A5E7F) NActors::IActor::Receive(TAutoPtr&)+237 (0x1ACC2DAD) NActors::TTestActorRuntimeBase::SendInternal(TAutoPtr, unsigned int, bool)+3557 (0x368DBA45) NActors::TTestActorRuntimeBase::DispatchEventsInternal(NActors::TDispatchOptions const&, TInstant)+12602 (0x368D42BA) NActors::TTestActorRuntimeBase::WaitForEdgeEvents(std::__y1::function&)>, TSet, std::__y1::allocator> const&, TDuration)+1076 (0x368DE634) NActors::TEvents::TEvWakeup::TPtr NActors::TTestActorRuntimeBase::GrabEdgeEventIf(TSet, std::__y1::allocator> const&, std::__y1::function const&, TDuration)+292 (0x36AB62E4) NActors::TEvents::TEvWakeup::TPtr NActors::TTestActorRuntimeBase::GrabEdgeEvent(NActors::TActorId const&, TDuration)+419 (0x36AB5403) NActors::TEvents::TEvWakeup::TPtr NActors::TTestActorRuntimeBase::GrabEdgeEventRethrow(NActors::TActorId const&, TDuration)+307 (0x36AAD1A3) NActors::TTestActorRuntime::SimulateSleep(TDuration)+1115 (0x36AACD7B) NKikimr::NTestSuiteColumnShardTiers::TTestCaseTTLUsage::Execute_(NUnitTest::TTestContext&)+4913 (0x19315CE1) std::__y1::__function::__func, void ()>::operator()()+280 (0x19327E18) TColoredProcessor::Run(std::__y1::function, TBasicString> const&, char const*, bool)+534 (0x19BE0D16) NUnitTest::TTestBase::Run(std::__y1::function, TBasicString> const&, char const*, bool)+505 (0x19BB0699) NKikimr::NTestSuiteColumnShardTiers::TCurrentTest::Execute()+1204 (0x19326DC4) NUnitTest::TTestFactory::Execute()+2438 (0x19BB1F66) NUnitTest::RunMain(int, char**)+5213 (0x19BDB28D) ??+0 (0x7F2C6B37AD90) __libc_start_main+128 (0x7F2C6B37AE40) _start+41 (0x16BD2029) |75.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_view/ydb-core-tx-schemeshard-ut_view |75.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_view/ydb-core-tx-schemeshard-ut_view >> THiveTest::TestHiveBalancerWithPreferredDC3 [GOOD] >> THiveTest::TestHiveBalancerWithFollowers >> TPersQueueCommonTest::Auth_MultipleUpdateTokenRequestIterationsWithValidToken_GotUpdateTokenResponseForEachRequest [GOOD] >> TPersQueueCommonTest::Auth_WriteSessionWithValidTokenAndACEAndThenRemoveACEAndSendWriteRequest_SessionClosedWithUnauthorizedErrorAfterSuccessfullWriteResponse |75.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_does_actions_with_queue[tables_format_v1-std] [GOOD] >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendGetQueryStatus [GOOD] >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendModifyQuery |75.1%| [TM] {RESULT} ydb/library/yql/tests/sql/solomon/pytest |75.1%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_view/ydb-core-tx-schemeshard-ut_view >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_can_read_new_written_data_on_visibility_timeout[tables_format_v1] [GOOD] >> test_update_script_tables.py::TestUpdateScriptTablesYdb::test_recreate_tables[ALTER TABLE {} DROP COLUMN syntax, DROP COLUMN ast, DROP COLUMN stats-`.metadata/script_executions`] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_message_visibility_with_very_big_timeout[tables_format_v0] |75.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_send_message_rate[tables_format_v1] [GOOD] >> TPersqueueControlPlaneTestSuite::SetupReadLockSessionWithDatabase [GOOD] >> TPersqueueControlPlaneTestSuite::SetupWriteLockSessionWithDatabase >> TPersQueueCommonTest::Auth_CreateGrpcStreamWithInvalidTokenInInitialMetadata_SessionClosedWithUnauthenticatedError [GOOD] >> TPersQueueCommonTest::Auth_MultipleInflightWriteUpdateTokenRequestWithDifferentValidToken_SessionClosedWithOverloadedError >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_visibility_change_disables_receive_attempt_id[tables_format_v0-with_delete_message] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_wrong_attribute_name[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_wrong_delete_fails[tables_format_v0] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_visibility_change_disables_receive_attempt_id[tables_format_v1-with_change_visibility] >> TPersQueueCommonTest::TestLimiterLimitsWithUserPayloadRateLimit [GOOD] >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendModifyQuery [GOOD] >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendDeleteQuery |75.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_external_blobs/ydb-core-tx-datashard-ut_external_blobs |75.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_external_blobs/ydb-core-tx-datashard-ut_external_blobs |75.1%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_external_blobs/ydb-core-tx-datashard-ut_external_blobs ------- [TM] {asan, default-linux-x86_64, release} ydb/services/persqueue_v1/ut/new_schemecache_ut/unittest >> TPersQueueCommonTest::TestWriteWithRateLimiterWithUserPayloadRateLimit [GOOD] Test command err: === Server->StartServer(false); 2025-06-25T14:27:45.070996Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519893592659920325:2214];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:27:45.071636Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:27:45.127506Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519893594153196121:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:27:45.127558Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:27:45.432053Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00199b/r3tmp/tmpO3ql4U/pdisk_1.dat 2025-06-25T14:27:45.449942Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-25T14:27:45.854957Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:27:45.855058Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:27:45.857940Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:27:45.858011Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:27:45.874618Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T14:27:45.874774Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:27:45.878791Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:27:45.914872Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 19522, node 1 2025-06-25T14:27:46.084840Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:27:46.145044Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:27:46.372692Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/yft8/00199b/r3tmp/yandexcudYfK.tmp 2025-06-25T14:27:46.372719Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/yft8/00199b/r3tmp/yandexcudYfK.tmp 2025-06-25T14:27:46.448724Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/yft8/00199b/r3tmp/yandexcudYfK.tmp 2025-06-25T14:27:46.448920Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:27:46.627896Z INFO: TTestServer started on Port 31611 GrpcPort 19522 TClient is connected to server localhost:31611 PQClient connected to localhost:19522 === TenantModeEnabled() = 1 === Init PQ - start server on port 19522 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:27:48.449579Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "Root" StoragePools { Name: "/Root:test" Kind: "test" } } } TxId: 281474976710657 TabletId: 72057594046644480 PeerName: "" , at schemeshard: 72057594046644480 2025-06-25T14:27:48.449810Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //Root, opId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-25T14:27:48.450063Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 0 2025-06-25T14:27:48.450088Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 281474976710657:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046644480, LocalPathId: 1] source path: 2025-06-25T14:27:48.450471Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 281474976710657:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-25T14:27:48.450526Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:27:48.465418Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 281474976710657, response: Status: StatusAccepted TxId: 281474976710657 SchemeshardId: 72057594046644480 PathId: 1, at schemeshard: 72057594046644480 2025-06-25T14:27:48.466452Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976710657, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //Root 2025-06-25T14:27:48.466653Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-25T14:27:48.466706Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 281474976710657:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046644480 2025-06-25T14:27:48.466727Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 281474976710657:0 ProgressState no shards to create, do next state 2025-06-25T14:27:48.466739Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 281474976710657:0 2 -> 3 2025-06-25T14:27:48.469540Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-25T14:27:48.469600Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 281474976710657:0 ProgressState, at schemeshard: 72057594046644480 2025-06-25T14:27:48.469614Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 281474976710657:0 3 -> 128 2025-06-25T14:27:48.473402Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-25T14:27:48.473451Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-25T14:27:48.473490Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 281474976710657:0, at tablet# 72057594046644480 2025-06-25T14:27:48.473516Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 281474976710657 ready parts: 1/1 waiting... 2025-06-25T14:27:48.479081Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046644480 Flags: 2 } ExecLevel: 0 TxId: 281474976710657 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:27:48.482966Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:27:48.483000Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 281474976710657, ready parts: 0/1, is published: true 2025-06-25T14:27:48.483031Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:27:48.485728Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 281474976710657:4294967295 from tablet: 72057594046644480 to tablet: 72057594046316545 cookie: 0:281474976710657 msg type: 269090816 2025-06-25T14:27:48.485859Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 281474976710657, partId: 4294967295, tablet: 72057594046316545 2025-06-25T14:27:48.492717Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 1750861668535, transactions count in step: 1, at schemeshard: 72057594046644480 2025-06-25T14:27:48.492896Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976710657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1750861668535 MediatorID: 72057594046382081 TabletID: 72057594046644480, at schemeshard: 72057594046644480 2025-06-25T14:27:48.492936Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 281474976710657:0, at tablet# 72057594046644480 2025-06-25T14:27:48.493306Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 281474976710657:0 128 -> 240 2025-06-25T14:27:48.493354Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 281474976710657:0, at tablet# 72057594046644480 2025-06-25T14:27:48.493522Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 1 2025-06-25T14:27:48.493572Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046644480, Loc ... 0 part blob processing sourceId '\0001234' seqNo 1 partNo 0 2025-06-25T14:28:02.720168Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:57: [PQ: 72075186224037899, Partition: 0, State: StateIdle] Topic 'PQ/account3/folder1/folder2/topic' partition 0 part blob processing sourceId '\0001234' seqNo 1 partNo 1 2025-06-25T14:28:02.720782Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:57: [PQ: 72075186224037899, Partition: 0, State: StateIdle] Topic 'PQ/account3/folder1/folder2/topic' partition 0 part blob processing sourceId '\0001234' seqNo 1 partNo 2 2025-06-25T14:28:02.725178Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:135: [PQ: 72075186224037899, Partition: 0, State: StateIdle] Topic 'PQ/account3/folder1/folder2/topic' partition 0 part blob complete sourceId '\0001234' seqNo 1 partNo 2 FormedBlobsCount 0 NewHead: Offset 0 PartNo 0 PackedSize 6001297 count 5 nextOffset 5 batches 11 2025-06-25T14:28:02.726959Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:57: [PQ: 72075186224037899, Partition: 0, State: StateIdle] Topic 'PQ/account3/folder1/folder2/topic' partition 0 part blob processing sourceId '\0001235' seqNo 1 partNo 0 2025-06-25T14:28:02.728233Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:57: [PQ: 72075186224037899, Partition: 0, State: StateIdle] Topic 'PQ/account3/folder1/folder2/topic' partition 0 part blob processing sourceId '\0001235' seqNo 1 partNo 1 2025-06-25T14:28:02.728852Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:57: [PQ: 72075186224037899, Partition: 0, State: StateIdle] Topic 'PQ/account3/folder1/folder2/topic' partition 0 part blob processing sourceId '\0001235' seqNo 1 partNo 2 2025-06-25T14:28:02.792746Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:135: [PQ: 72075186224037899, Partition: 0, State: StateIdle] Topic 'PQ/account3/folder1/folder2/topic' partition 0 part blob complete sourceId '\0001235' seqNo 1 partNo 2 FormedBlobsCount 0 NewHead: Offset 0 PartNo 0 PackedSize 7201550 count 6 nextOffset 6 batches 13 2025-06-25T14:28:02.793698Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:57: [PQ: 72075186224037899, Partition: 0, State: StateIdle] Topic 'PQ/account3/folder1/folder2/topic' partition 0 part blob processing sourceId '\0001236' seqNo 1 partNo 0 2025-06-25T14:28:02.797355Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:57: [PQ: 72075186224037899, Partition: 0, State: StateIdle] Topic 'PQ/account3/folder1/folder2/topic' partition 0 part blob processing sourceId '\0001236' seqNo 1 partNo 1 2025-06-25T14:28:02.797954Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:57: [PQ: 72075186224037899, Partition: 0, State: StateIdle] Topic 'PQ/account3/folder1/folder2/topic' partition 0 part blob processing sourceId '\0001236' seqNo 1 partNo 2 2025-06-25T14:28:02.806745Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519893665674367622:2601], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:28:02.816797Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=OTcxOWJiM2UtNWU5ZjBlOTQtZjBjMzFjYmQtNjI1MjM2OGY=, ActorId: [1:7519893665674367620:2600], ActorState: ExecuteState, TraceId: 01jykqt62e3q9gx2rvwzb8jy17, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:28:02.817121Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-25T14:28:02.910393Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:96: [PQ: 72075186224037899, Partition: 0, State: StateIdle] Topic 'PQ/account3/folder1/folder2/topic' partition 0 part blob sourceId '\0001236' seqNo 1 partNo 2 result is x0000000000_00000000000000000000_00000_0000000006_00014 size 8225586 2025-06-25T14:28:02.910458Z node 1 :PERSQUEUE DEBUG: partition_write.cpp:1117: [PQ: 72075186224037899, Partition: 0, State: StateIdle] writing blob: topic 'PQ/account3/folder1/folder2/topic' partition 0 old key x0000000000_00000000000000000000_00000_0000000006_00014 new key d0000000000_00000000000000000000_00000_0000000006_00014 size 8225586 WTime 1750861682909 2025-06-25T14:28:02.914335Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:135: [PQ: 72075186224037899, Partition: 0, State: StateIdle] Topic 'PQ/account3/folder1/folder2/topic' partition 0 part blob complete sourceId '\0001236' seqNo 1 partNo 2 FormedBlobsCount 1 NewHead: Offset 6 PartNo 2 PackedSize 176227 count 1 nextOffset 7 batches 1 2025-06-25T14:28:02.915593Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:401: [PQ: 72075186224037899, Partition: 0, State: StateIdle] Add new write blob: topic 'PQ/account3/folder1/folder2/topic' partition 0 compactOffset 6,1 HeadOffset 0 endOffset 0 curOffset 7 d0000000000_00000000000000000006_00002_0000000001_00000| size 176217 WTime 1750861682909 2025-06-25T14:28:02.915904Z node 1 :PERSQUEUE DEBUG: read.h:272: CacheProxy. Passthrough write request to KV 2025-06-25T14:28:02.915962Z node 1 :PERSQUEUE DEBUG: read.h:310: CacheProxy. Passthrough blob. Partition 0 offset 0 partNo 0 count 6 size 8225586 2025-06-25T14:28:02.915999Z node 1 :PERSQUEUE DEBUG: read.h:310: CacheProxy. Passthrough blob. Partition 0 offset 6 partNo 2 count 1 size 176217 2025-06-25T14:28:02.916049Z node 1 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72075186224037899, Partition: 0, State: StateIdle] TPartition::DropOwner. 2025-06-25T14:28:02.920071Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037893, Partition: 0, State: StateIdle] no data for compaction 2025-06-25T14:28:02.940541Z node 1 :PERSQUEUE DEBUG: cache_eviction.h:319: Caching head blob in L1. Partition 0 offset 0 count 6 size 8225586 actorID [1:7519893652789465244:2525] 2025-06-25T14:28:02.940582Z node 1 :PERSQUEUE DEBUG: cache_eviction.h:319: Caching head blob in L1. Partition 0 offset 6 count 1 size 176217 actorID [1:7519893652789465244:2525] 2025-06-25T14:28:02.940644Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:323: [PQ: 72075186224037899, Partition: 0, State: StateIdle] compaction completed 2025-06-25T14:28:02.940795Z node 1 :PERSQUEUE DEBUG: pq_l2_cache.cpp:120: PQ Cache (L2). Adding blob. Tablet '72075186224037899' partition 0 offset 0 partno 0 count 6 parts 14 suffix '0' size 8225586 2025-06-25T14:28:02.940827Z node 1 :PERSQUEUE DEBUG: pq_l2_cache.cpp:120: PQ Cache (L2). Adding blob. Tablet '72075186224037899' partition 0 offset 6 partno 2 count 1 parts 0 suffix '124' size 176217 2025-06-25T14:28:02.941169Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037899, Partition: 0, State: StateIdle] no data for compaction 2025-06-25T14:28:02.941203Z node 1 :PERSQUEUE DEBUG: read.h:272: CacheProxy. Passthrough write request to KV 2025-06-25T14:28:02.941224Z node 1 :PERSQUEUE DEBUG: read.h:348: CacheProxy. Delete blobs from d0000000000_00000000000000000006_00000_0000000001_00002?(+) to d0000000000_00000000000000000006_00000_0000000001_00002?(+) 2025-06-25T14:28:02.941232Z node 1 :PERSQUEUE DEBUG: read.h:348: CacheProxy. Delete blobs from d0000000000_00000000000000000005_00000_0000000001_00002?(+) to d0000000000_00000000000000000005_00000_0000000001_00002?(+) 2025-06-25T14:28:02.941241Z node 1 :PERSQUEUE DEBUG: read.h:348: CacheProxy. Delete blobs from d0000000000_00000000000000000004_00000_0000000001_00002?(+) to d0000000000_00000000000000000004_00000_0000000001_00002?(+) 2025-06-25T14:28:02.941250Z node 1 :PERSQUEUE DEBUG: read.h:348: CacheProxy. Delete blobs from d0000000000_00000000000000000003_00000_0000000001_00002?(+) to d0000000000_00000000000000000003_00000_0000000001_00002?(+) 2025-06-25T14:28:02.941258Z node 1 :PERSQUEUE DEBUG: read.h:348: CacheProxy. Delete blobs from d0000000000_00000000000000000002_00000_0000000001_00002?(+) to d0000000000_00000000000000000002_00000_0000000001_00002?(+) 2025-06-25T14:28:02.941265Z node 1 :PERSQUEUE DEBUG: read.h:348: CacheProxy. Delete blobs from d0000000000_00000000000000000001_00000_0000000001_00002?(+) to d0000000000_00000000000000000001_00000_0000000001_00002?(+) 2025-06-25T14:28:02.941272Z node 1 :PERSQUEUE DEBUG: read.h:348: CacheProxy. Delete blobs from d0000000000_00000000000000000000_00000_0000000001_00002?(+) to d0000000000_00000000000000000000_00000_0000000001_00002?(+) 2025-06-25T14:28:02.946803Z node 1 :PERSQUEUE DEBUG: cache_eviction.h:369: Deleting head blob in L1. Partition 0 offset 6 count 1 actorID [1:7519893652789465244:2525] 2025-06-25T14:28:02.946844Z node 1 :PERSQUEUE DEBUG: cache_eviction.h:369: Deleting head blob in L1. Partition 0 offset 5 count 1 actorID [1:7519893652789465244:2525] 2025-06-25T14:28:02.946874Z node 1 :PERSQUEUE DEBUG: cache_eviction.h:369: Deleting head blob in L1. Partition 0 offset 4 count 1 actorID [1:7519893652789465244:2525] 2025-06-25T14:28:02.946902Z node 1 :PERSQUEUE DEBUG: cache_eviction.h:369: Deleting head blob in L1. Partition 0 offset 3 count 1 actorID [1:7519893652789465244:2525] 2025-06-25T14:28:02.946933Z node 1 :PERSQUEUE DEBUG: cache_eviction.h:369: Deleting head blob in L1. Partition 0 offset 2 count 1 actorID [1:7519893652789465244:2525] 2025-06-25T14:28:02.946959Z node 1 :PERSQUEUE DEBUG: cache_eviction.h:369: Deleting head blob in L1. Partition 0 offset 1 count 1 actorID [1:7519893652789465244:2525] 2025-06-25T14:28:02.946982Z node 1 :PERSQUEUE DEBUG: cache_eviction.h:369: Deleting head blob in L1. Partition 0 offset 0 count 1 actorID [1:7519893652789465244:2525] 2025-06-25T14:28:02.947071Z node 1 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72075186224037899, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-06-25T14:28:02.947117Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037899, Partition: 0, State: StateIdle] no data for compaction 2025-06-25T14:28:02.947815Z node 1 :PERSQUEUE DEBUG: pq_l2_cache.cpp:146: PQ Cache (L2). Removed. Tablet '72075186224037899' partition 0 offset 6 partno 0 count 1 parts 2 suffix '63' size 1200275 2025-06-25T14:28:02.948043Z node 1 :PERSQUEUE DEBUG: pq_l2_cache.cpp:146: PQ Cache (L2). Removed. Tablet '72075186224037899' partition 0 offset 5 partno 0 count 1 parts 2 suffix '63' size 1200275 2025-06-25T14:28:02.948273Z node 1 :PERSQUEUE DEBUG: pq_l2_cache.cpp:146: PQ Cache (L2). Removed. Tablet '72075186224037899' partition 0 offset 4 partno 0 count 1 parts 2 suffix '63' size 1200275 2025-06-25T14:28:02.948560Z node 1 :PERSQUEUE DEBUG: pq_l2_cache.cpp:146: PQ Cache (L2). Removed. Tablet '72075186224037899' partition 0 offset 3 partno 0 count 1 parts 2 suffix '63' size 1200275 2025-06-25T14:28:02.948878Z node 1 :PERSQUEUE DEBUG: pq_l2_cache.cpp:146: PQ Cache (L2). Removed. Tablet '72075186224037899' partition 0 offset 2 partno 0 count 1 parts 2 suffix '63' size 1200275 2025-06-25T14:28:02.949085Z node 1 :PERSQUEUE DEBUG: pq_l2_cache.cpp:146: PQ Cache (L2). Removed. Tablet '72075186224037899' partition 0 offset 1 partno 0 count 1 parts 2 suffix '63' size 1200275 2025-06-25T14:28:02.949295Z node 1 :PERSQUEUE DEBUG: pq_l2_cache.cpp:146: PQ Cache (L2). Removed. Tablet '72075186224037899' partition 0 offset 0 partno 0 count 1 parts 2 suffix '63' size 1200275 |75.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_move/ydb-core-tx-schemeshard-ut_move |75.1%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_move/ydb-core-tx-schemeshard-ut_move |75.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_move/ydb-core-tx-schemeshard-ut_move >> TPersQueueCommonTest::Auth_WriteUpdateTokenRequestWithInvalidToken_SessionClosedWithUnauthenticatedError [GOOD] >> TPersQueueCommonTest::Auth_WriteUpdateTokenRequestWithValidTokenButWithoutACL_SessionClosedWithUnauthorizedError >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendDeleteQuery [GOOD] >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendControlQuery >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_create_q_twice[tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_create_q_twice[tables_format_v1-fifo] >> TGRpcRateLimiterTest::AcquireResourceManyRequiredGrpcApiWithCancelAfter [GOOD] >> TGRpcRateLimiterTest::AcquireResourceManyRequiredActorApiWithCancelAfter >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_receive_attempt_reloads_same_messages[tables_format_v1-standard_mode] [GOOD] >> TOlap::StoreStats >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendControlQuery [GOOD] >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendGetResultData >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_send_and_read_multiple_messages[tables_format_v0] >> KqpScripting::QueryStats >> TOlapNaming::AlterColumnTableOk >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_message_visibility_with_very_big_timeout[tables_format_v0] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/services/persqueue_v1/ut/new_schemecache_ut/unittest >> TPersQueueCommonTest::TestLimiterLimitsWithUserPayloadRateLimit [GOOD] Test command err: === Server->StartServer(false); 2025-06-25T14:27:45.523333Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519893592283673079:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:27:45.523387Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:27:45.618330Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519893595212150057:2140];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:27:45.618497Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001962/r3tmp/tmptXMm31/pdisk_1.dat 2025-06-25T14:27:46.018074Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-25T14:27:46.043426Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-25T14:27:46.570515Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:27:46.618731Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:27:46.644484Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:27:46.698335Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:27:46.769072Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:27:46.769183Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:27:46.770086Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:27:46.770133Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:27:46.784706Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:27:46.835552Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T14:27:46.836726Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:27:46.836931Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 3387, node 1 2025-06-25T14:27:47.145720Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/yft8/001962/r3tmp/yandexaJHysM.tmp 2025-06-25T14:27:47.145740Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/yft8/001962/r3tmp/yandexaJHysM.tmp 2025-06-25T14:27:47.145876Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/yft8/001962/r3tmp/yandexaJHysM.tmp 2025-06-25T14:27:47.145985Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:27:47.465886Z INFO: TTestServer started on Port 23308 GrpcPort 3387 TClient is connected to server localhost:23308 PQClient connected to localhost:3387 === TenantModeEnabled() = 1 === Init PQ - start server on port 3387 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:27:49.058393Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "Root" StoragePools { Name: "/Root:test" Kind: "test" } } } TxId: 281474976715657 TabletId: 72057594046644480 PeerName: "" , at schemeshard: 72057594046644480 2025-06-25T14:27:49.058641Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //Root, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-25T14:27:49.058893Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 0 2025-06-25T14:27:49.058919Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 281474976715657:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046644480, LocalPathId: 1] source path: 2025-06-25T14:27:49.059170Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 281474976715657:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-25T14:27:49.059256Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:27:49.063178Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 281474976715657, response: Status: StatusAccepted TxId: 281474976715657 SchemeshardId: 72057594046644480 PathId: 1, at schemeshard: 72057594046644480 2025-06-25T14:27:49.063516Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976715657, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //Root 2025-06-25T14:27:49.063693Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-25T14:27:49.063730Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 281474976715657:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046644480 2025-06-25T14:27:49.063750Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 281474976715657:0 ProgressState no shards to create, do next state 2025-06-25T14:27:49.063766Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 281474976715657:0 2 -> 3 2025-06-25T14:27:49.072577Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-25T14:27:49.072641Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 281474976715657:0 ProgressState, at schemeshard: 72057594046644480 2025-06-25T14:27:49.072660Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 281474976715657:0 3 -> 128 2025-06-25T14:27:49.077568Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-25T14:27:49.077604Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-25T14:27:49.077639Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 281474976715657:0, at tablet# 72057594046644480 waiting... 2025-06-25T14:27:49.077663Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 281474976715657 ready parts: 1/1 2025-06-25T14:27:49.101291Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046644480 Flags: 2 } ExecLevel: 0 TxId: 281474976715657 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:27:49.101788Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:27:49.101810Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 281474976715657, ready parts: 0/1, is published: true 2025-06-25T14:27:49.101830Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:27:49.106454Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 281474976715657:4294967295 from tablet: 72057594046644480 to tablet: 72057594046316545 cookie: 0:281474976715657 msg type: 269090816 2025-06-25T14:27:49.106643Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 281474976715657, partId: 4294967295, tablet: 72057594046316545 2025-06-25T14:27:49.121049Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 1750861669151, transactions count in step: 1, at schemeshard: 72057594046644480 2025-06-25T14:27:49.121243Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1750861669151 MediatorID: 72057594046382081 TabletID: 72057594046644480, at schemeshard: 72057594046644480 2025-06-25T14:27:49.121283Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 281474976715657:0, at tablet# 72057594046644480 2025-06-25T14:27:49.121560Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 281474976715657:0 128 -> 240 2025-06-25T14:27:49.121594Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleRepl ... ion AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64; UPDATE `//Root/.metadata/TopicPartitionsMapping` SET AccessTime = $AccessTime WHERE Hash = $Hash AND Topic = $Topic AND ProducerId = $SourceId AND Partition = $Partition; 2025-06-25T14:28:03.344973Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:305: TPartitionChooser [3:7519893669560154980:2403] (SourceId=123, PreferedPartition=(NULL)) ReplyResult: Partition=0, SeqNo=0 2025-06-25T14:28:03.344992Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:689: ProceedPartition. session cookie: 4 sessionId: partition: 0 expectedGeneration: (NULL) 2025-06-25T14:28:03.345765Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:819: TPartitionWriter 72075186224037893 (partition=0) TEvClientConnected Status OK, TabletId: 72075186224037893, NodeId 3, Generation: 1 2025-06-25T14:28:03.345816Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72075186224037893] server connected, pipe [3:7519893669560154983:2403], now have 1 active actors on pipe 2025-06-25T14:28:03.345950Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:348: Handle TEvRequest topic: 'topic' requestId: 2025-06-25T14:28:03.345973Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2812: [PQ: 72075186224037893] got client message batch for topic 'PQ/account/topic' partition 0 2025-06-25T14:28:03.346039Z node 3 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie 123|85cee536-5c0738c8-657a63e5-ea5f2b30_0 generated for partition 0 topic 'PQ/account/topic' owner 123 2025-06-25T14:28:03.346131Z node 3 :PERSQUEUE DEBUG: partition_write.cpp:34: [PQ: 72075186224037893, Partition: 0, State: StateIdle] TPartition::ReplyOwnerOk. Partition: 0 2025-06-25T14:28:03.346173Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'topic' partition: 0 messageNo: 0 requestId: cookie: 0 2025-06-25T14:28:03.346419Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:348: Handle TEvRequest topic: 'topic' requestId: 2025-06-25T14:28:03.346437Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2812: [PQ: 72075186224037893] got client message batch for topic 'PQ/account/topic' partition 0 2025-06-25T14:28:03.347305Z :INFO: [] MessageGroupId [123] SessionId [] Counters: { Errors: 0 CurrentSessionLifetimeMs: 1750861683347 BytesWritten: 0 MessagesWritten: 0 BytesWrittenCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-25T14:28:03.346502Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'topic' partition: 0 messageNo: 0 requestId: cookie: 0 2025-06-25T14:28:03.346580Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:865: session inited cookie: 4 partition: 0 MaxSeqNo: 2 sessionId: 123|85cee536-5c0738c8-657a63e5-ea5f2b30_0 2025-06-25T14:28:03.347412Z :INFO: [] MessageGroupId [123] SessionId [] Write session established. Init response: last_sequence_number: 2 session_id: "123|85cee536-5c0738c8-657a63e5-ea5f2b30_0" topic: "PQ/account/topic" 2025-06-25T14:28:03.347656Z :DEBUG: [] MessageGroupId [123] SessionId [123|85cee536-5c0738c8-657a63e5-ea5f2b30_0] Write 1 messages with Id from 1 to 1 2025-06-25T14:28:03.347768Z :DEBUG: [] MessageGroupId [123] SessionId [123|85cee536-5c0738c8-657a63e5-ea5f2b30_0] Write session: try to update token 2025-06-25T14:28:03.347804Z :DEBUG: [] MessageGroupId [123] SessionId [123|85cee536-5c0738c8-657a63e5-ea5f2b30_0] Send 1 message(s) (0 left), first sequence number is 3 2025-06-25T14:28:03.347991Z :INFO: [] MessageGroupId [123] SessionId [123|85cee536-5c0738c8-657a63e5-ea5f2b30_0] Write session: close. Timeout = 10000 ms 2025-06-25T14:28:03.348233Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 4 sessionId: 123|85cee536-5c0738c8-657a63e5-ea5f2b30_0 grpc read done: success: 1 data: write_request[data omitted] 2025-06-25T14:28:03.348613Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037893 (partition=0) Received event: NKikimr::NPQ::TEvPartitionWriter::TEvWriteRequest 2025-06-25T14:28:03.348794Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:348: Handle TEvRequest topic: 'topic' requestId: 2025-06-25T14:28:03.348817Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2812: [PQ: 72075186224037893] got client message batch for topic 'PQ/account/topic' partition 0 2025-06-25T14:28:03.348905Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'topic' partition: 0 messageNo: 0 requestId: cookie: 1 2025-06-25T14:28:03.348943Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037893 (partition=0) Received event: NKikimr::TEvPersQueue::TEvResponse 2025-06-25T14:28:03.349185Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:348: Handle TEvRequest topic: 'topic' requestId: 2025-06-25T14:28:03.349201Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2812: [PQ: 72075186224037893] got client message batch for topic 'PQ/account/topic' partition 0 2025-06-25T14:28:03.349243Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2209: [PQ: 72075186224037893] got client message topic: PQ/account/topic partition: 0 SourceId: '\000123' SeqNo: 3 partNo : 0 messageNo: 1 size 372 offset: -1 2025-06-25T14:28:03.349312Z node 3 :PERSQUEUE DEBUG: partition_write.cpp:1843: [PQ: 72075186224037893, Partition: 0, State: StateIdle] Send write quota request. Topic: "PQ/account/topic". Partition: 0. Amount: 376. Cookie: 3 2025-06-25T14:28:03.349368Z node 3 :PERSQUEUE DEBUG: partition.cpp:3720: [PQ: 72075186224037893, Partition: 0, State: StateIdle] Got quota. Topic: "PQ/account/topic". Partition: 0: Cookie: 3 2025-06-25T14:28:03.349499Z node 3 :PERSQUEUE DEBUG: partition_write.cpp:1364: [PQ: 72075186224037893, Partition: 0, State: StateIdle] Topic 'PQ/account/topic' partition 0 part blob processing sourceId '\000123' seqNo 3 partNo 0 2025-06-25T14:28:03.402981Z node 3 :PERSQUEUE DEBUG: partition_write.cpp:1468: [PQ: 72075186224037893, Partition: 0, State: StateIdle] Topic 'PQ/account/topic' partition 0 part blob complete sourceId '\000123' seqNo 3 partNo 0 FormedBlobsCount 0 NewHead: Offset 2 PartNo 0 PackedSize 443 count 1 nextOffset 3 batches 1 2025-06-25T14:28:03.403421Z node 3 :PERSQUEUE DEBUG: partition_write.cpp:1762: [PQ: 72075186224037893, Partition: 0, State: StateIdle] Add new write blob: topic 'PQ/account/topic' partition 0 compactOffset 2,1 HeadOffset 2 endOffset 2 curOffset 3 d0000000000_00000000000000000002_00000_0000000001_00000? size 431 WTime 1750861683403 2025-06-25T14:28:03.403578Z node 3 :PERSQUEUE DEBUG: read.h:272: CacheProxy. Passthrough write request to KV 2025-06-25T14:28:03.403647Z node 3 :PERSQUEUE DEBUG: read.h:310: CacheProxy. Passthrough blob. Partition 0 offset 2 partNo 0 count 1 size 431 2025-06-25T14:28:03.415433Z node 3 :PERSQUEUE DEBUG: cache_eviction.h:319: Caching head blob in L1. Partition 0 offset 2 count 1 size 431 actorID [3:7519893665265187368:2367] 2025-06-25T14:28:03.415558Z node 3 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72075186224037893, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 376 WriteNewSizeFromSupportivePartitions# 0 2025-06-25T14:28:03.415611Z node 3 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72075186224037893, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-25T14:28:03.415650Z node 3 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72075186224037893, Partition: 0, State: StateIdle] Answering for message sourceid: '\000123', Topic: 'PQ/account/topic', Partition: 0, SeqNo: 3, partNo: 0, Offset: 2 is stored on disk 2025-06-25T14:28:03.415848Z node 3 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72075186224037893, Partition: 0, State: StateIdle] need more data for compaction. cumulativeSize=1293, count=3, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-25T14:28:03.415894Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'topic' partition: 0 messageNo: 1 requestId: cookie: 1 2025-06-25T14:28:03.415968Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037893 (partition=0) Received event: NKikimr::TEvPersQueue::TEvResponse 2025-06-25T14:28:03.416729Z node 3 :PERSQUEUE DEBUG: pq_l2_cache.cpp:120: PQ Cache (L2). Adding blob. Tablet '72075186224037893' partition 0 offset 2 partno 0 count 1 parts 0 suffix '63' size 431 2025-06-25T14:28:03.420566Z :DEBUG: [] MessageGroupId [123] SessionId [123|85cee536-5c0738c8-657a63e5-ea5f2b30_0] Write session got write response: sequence_numbers: 3 offsets: 2 already_written: false write_statistics { persist_duration_ms: 12 queued_in_partition_duration_ms: 53 } 2025-06-25T14:28:03.420626Z :DEBUG: [] MessageGroupId [123] SessionId [123|85cee536-5c0738c8-657a63e5-ea5f2b30_0] Write session: acknoledged message 1 2025-06-25T14:28:03.452378Z :INFO: [] MessageGroupId [123] SessionId [123|85cee536-5c0738c8-657a63e5-ea5f2b30_0] Write session will now close 2025-06-25T14:28:03.452462Z :DEBUG: [] MessageGroupId [123] SessionId [123|85cee536-5c0738c8-657a63e5-ea5f2b30_0] Write session: aborting 2025-06-25T14:28:03.452901Z :INFO: [] MessageGroupId [123] SessionId [123|85cee536-5c0738c8-657a63e5-ea5f2b30_0] Write session: gracefully shut down, all writes complete 2025-06-25T14:28:03.452950Z :DEBUG: [] MessageGroupId [123] SessionId [123|85cee536-5c0738c8-657a63e5-ea5f2b30_0] Write session: destroy 2025-06-25T14:28:03.456681Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 4 sessionId: 123|85cee536-5c0738c8-657a63e5-ea5f2b30_0 grpc read done: success: 0 data: 2025-06-25T14:28:03.456704Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 4 sessionId: 123|85cee536-5c0738c8-657a63e5-ea5f2b30_0 grpc read failed 2025-06-25T14:28:03.456727Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 4 sessionId: 123|85cee536-5c0738c8-657a63e5-ea5f2b30_0 grpc closed 2025-06-25T14:28:03.456739Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 4 sessionId: 123|85cee536-5c0738c8-657a63e5-ea5f2b30_0 is DEAD 2025-06-25T14:28:03.457372Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037893 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-25T14:28:03.457527Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037893] server disconnected, pipe [3:7519893669560154983:2403] destroyed 2025-06-25T14:28:03.457564Z node 3 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72075186224037893, Partition: 0, State: StateIdle] TPartition::DropOwner. 2025-06-25T14:28:03.863151Z node 3 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [3:7519893669560154996:2407], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:28:03.864831Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=3&id=MmJhNTA1YWEtM2U3MDlmMDUtYTI0NWEzZGUtZjc0N2IwODA=, ActorId: [3:7519893669560154994:2406], ActorState: ExecuteState, TraceId: 01jykqt73jejhmz4kmmhv2gvw8, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:28:03.865217Z node 3 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_message_visibility_with_very_big_timeout[tables_format_v1] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_visibility_change_disables_receive_attempt_id[tables_format_v1-with_change_visibility] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_visibility_change_disables_receive_attempt_id[tables_format_v1-with_delete_message] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_create_q_twice[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_create_q_twice[tables_format_v1-std] >> YdbSdkSessions::CloseSessionWithSessionPoolExplicit [GOOD] >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendGetResultData [GOOD] >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendListJobs >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_wrong_delete_fails[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_wrong_delete_fails[tables_format_v1] >> TPersqueueControlPlaneTestSuite::TestAddRemoveReadRule [GOOD] >> TPersqueueDataPlaneTestSuite::WriteSession >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendListJobs [GOOD] >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendDescribeJob >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_create_q_twice[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_create_queue_by_nonexistent_user_fails[tables_format_v0] >> TPersQueueNewSchemeCacheTest::CheckGrpcWriteNoDC [GOOD] >> TPersQueueNewSchemeCacheTest::CheckGrpcReadNoDC >> KqpYql::TestUuidPrimaryKeyPrefixSearch >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_message_visibility_with_very_big_timeout[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_visibility_batch_works[tables_format_v0-fifo] >> AsyncIndexChangeCollector::AllColumnsInPk [GOOD] >> AsyncIndexChangeCollector::CoverIndexedColumn >> KqpYql::RefSelect >> ObjectDistribution::TestManyIrrelevantNodes [GOOD] >> Sequencer::Basic1 [GOOD] >> StoragePool::TestDistributionRandomProbability |75.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/replication/ydb_proxy/ut/ydb-core-tx-replication-ydb_proxy-ut |75.1%| [LD] {RESULT} $(B)/ydb/core/tx/replication/ydb_proxy/ut/ydb-core-tx-replication-ydb_proxy-ut |75.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/replication/ydb_proxy/ut/ydb-core-tx-replication-ydb_proxy-ut >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendDescribeJob [GOOD] >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendCreateConnection >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_create_queue_by_nonexistent_user_fails[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_visibility_timeout_expires_on_wait_timeout[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_visibility_timeout_expires_on_wait_timeout[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_wrong_delete_fails[tables_format_v1] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_visibility_change_disables_receive_attempt_id[tables_format_v1-with_delete_message] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_visibility_timeout_works[tables_format_v0] >> CdcStreamChangeCollector::PageFaults [GOOD] >> CdcStreamChangeCollector::OldImage |75.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/partition_stats/ut/unittest >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendCreateConnection [GOOD] >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendCreateConnectionWithServiceAccount >> TSchemeshardBackgroundCleaningTest::SchemeshardBackgroundCleaningTestSimpleCreateClean [GOOD] >> TSchemeshardBackgroundCleaningTest::SchemeshardBackgroundCleaningTestReboot |75.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/statistics/service/ut/ut_aggregation/ydb-core-statistics-service-ut-ut_aggregation |75.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/statistics/service/ut/ut_aggregation/ydb-core-statistics-service-ut-ut_aggregation |75.1%| [LD] {RESULT} $(B)/ydb/core/statistics/service/ut/ut_aggregation/ydb-core-statistics-service-ut-ut_aggregation >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendCreateConnectionWithServiceAccount [GOOD] >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendListConnections >> YdbSdkSessionsPool::PeriodicTask/1 [GOOD] >> TPersqueueControlPlaneTestSuite::SetupWriteLockSessionWithDatabase [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_visibility_batch_works[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_visibility_to_zero_works[tables_format_v0-fifo] >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendListConnections [GOOD] >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendDescribeConnection >> KqpYql::InsertCV+useSink |75.1%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/integration/sessions/gtest >> YdbSdkSessions::CloseSessionWithSessionPoolExplicit [GOOD] >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendDescribeConnection [GOOD] >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendModifyConnection >> TPersQueueCommonTest::Auth_MultipleInflightWriteUpdateTokenRequestWithDifferentValidToken_SessionClosedWithOverloadedError [GOOD] >> TSchemeshardBackgroundCleaningTest::SchemeshardBackgroundCleaningTestSimpleDrop [GOOD] >> TPersQueueCommonTest::Auth_WriteSessionWithValidTokenAndACEAndThenRemoveACEAndSendWriteRequest_SessionClosedWithUnauthorizedErrorAfterSuccessfullWriteResponse [GOOD] |75.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_change_exchange/ydb-core-tx-datashard-ut_change_exchange |75.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_change_exchange/ydb-core-tx-datashard-ut_change_exchange |75.1%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_change_exchange/ydb-core-tx-datashard-ut_change_exchange >> TSchemeshardBackgroundCleaningTest::SchemeshardBackgroundCleaningTestSimpleDropIndex >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_send_and_read_multiple_messages[tables_format_v0] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_send_and_read_multiple_messages[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_visibility_to_zero_works[tables_format_v0-fifo] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/services/persqueue_v1/ut/new_schemecache_ut/unittest >> TPersqueueControlPlaneTestSuite::SetupWriteLockSessionWithDatabase [GOOD] Test command err: === Server->StartServer(false); 2025-06-25T14:27:55.634943Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519893635580100199:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:27:55.634977Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:27:55.812645Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519893634754070428:2099];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00191b/r3tmp/tmp2SX5z5/pdisk_1.dat 2025-06-25T14:27:56.384143Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-25T14:27:56.461785Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:27:56.462200Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-25T14:27:56.740503Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:27:56.741691Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:27:56.811818Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:27:57.004967Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:27:57.008718Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:27:57.008798Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:27:57.037286Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:27:57.037357Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:27:57.049306Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:27:57.076517Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T14:27:57.084028Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 5387, node 1 2025-06-25T14:27:57.285277Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/yft8/00191b/r3tmp/yandexNxa04i.tmp 2025-06-25T14:27:57.285315Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/yft8/00191b/r3tmp/yandexNxa04i.tmp 2025-06-25T14:27:57.285482Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/yft8/00191b/r3tmp/yandexNxa04i.tmp 2025-06-25T14:27:57.285619Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:27:57.471723Z INFO: TTestServer started on Port 4512 GrpcPort 5387 TClient is connected to server localhost:4512 PQClient connected to localhost:5387 === TenantModeEnabled() = 1 === Init PQ - start server on port 5387 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:27:58.213929Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "Root" StoragePools { Name: "/Root:test" Kind: "test" } } } TxId: 281474976715657 TabletId: 72057594046644480 PeerName: "" , at schemeshard: 72057594046644480 2025-06-25T14:27:58.214195Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //Root, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-25T14:27:58.214373Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 0 2025-06-25T14:27:58.214405Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 281474976715657:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046644480, LocalPathId: 1] source path: 2025-06-25T14:27:58.214600Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 281474976715657:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-25T14:27:58.214652Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:27:58.220250Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 281474976715657, response: Status: StatusAccepted TxId: 281474976715657 SchemeshardId: 72057594046644480 PathId: 1, at schemeshard: 72057594046644480 2025-06-25T14:27:58.220474Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976715657, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //Root 2025-06-25T14:27:58.220640Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-25T14:27:58.220667Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 281474976715657:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046644480 2025-06-25T14:27:58.220687Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 281474976715657:0 ProgressState no shards to create, do next state 2025-06-25T14:27:58.220699Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 281474976715657:0 2 -> 3 2025-06-25T14:27:58.224824Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-25T14:27:58.224899Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 281474976715657:0 ProgressState, at schemeshard: 72057594046644480 2025-06-25T14:27:58.224921Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 281474976715657:0 3 -> 128 2025-06-25T14:27:58.226581Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-25T14:27:58.226613Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-25T14:27:58.226643Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 281474976715657:0, at tablet# 72057594046644480 2025-06-25T14:27:58.226678Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 281474976715657 ready parts: 1/1 waiting... 2025-06-25T14:27:58.237888Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046644480 Flags: 2 } ExecLevel: 0 TxId: 281474976715657 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:27:58.238236Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:27:58.238250Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 281474976715657, ready parts: 0/1, is published: true 2025-06-25T14:27:58.238267Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:27:58.244151Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 281474976715657:4294967295 from tablet: 72057594046644480 to tablet: 72057594046316545 cookie: 0:281474976715657 msg type: 269090816 2025-06-25T14:27:58.244271Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 281474976715657, partId: 4294967295, tablet: 72057594046316545 2025-06-25T14:27:58.254070Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 1750861678293, transactions count in step: 1, at schemeshard: 72057594046644480 2025-06-25T14:27:58.254219Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1750861678293 MediatorID: 72057594046382081 TabletID: 72057594046644480, at schemeshard: 72057594046644480 2025-06-25T14:27:58.254247Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 281474976715657:0, at tablet# 72057594046644480 2025-06-25T14:27:58.254474Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 281474976715657:0 128 -> 240 2025-06-25T14:27:58.254501Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 281474976715657:0, at tablet# 72057594046644480 2025-06-25T14:27:58.254623Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 7205 ... 2057594046644480, txId: 281474976710664 2025-06-25T14:28:10.185690Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046644480, txId: 281474976710664, pathId: [OwnerId: 72057594046644480, LocalPathId: 12], version: 2 2025-06-25T14:28:10.185699Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046644480, LocalPathId: 12] was 4 2025-06-25T14:28:10.185741Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046644480, txId: 281474976710664, subscribers: 1 2025-06-25T14:28:10.185754Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:212: TTxAckPublishToSchemeBoard Notify send TEvNotifyTxCompletionResult, at schemeshard: 72057594046644480, to actorId: [3:7519893699457999986:2327] 2025-06-25T14:28:10.189730Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046644480, cookie: 281474976710664 2025-06-25T14:28:10.189775Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046644480, cookie: 281474976710664 Create topic result: 1 === EnablePQLogs === CreateChannel === NewStub === InitializeWritePQService === InitializeWritePQService start iteration === InitializeWritePQService create streamingWriter 2025-06-25T14:28:10.306110Z node 3 :PQ_WRITE_PROXY DEBUG: grpc_pq_write.h:107: new grpc connection 2025-06-25T14:28:10.306139Z node 3 :PQ_WRITE_PROXY DEBUG: grpc_pq_write.h:141: new session created cookie 1 === InitializeWritePQService Write 2025-06-25T14:28:10.306831Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 1 sessionId: grpc read done: success: 1 data: init_request { topic: "Root/acc/topic1" message_group_id: "12345678" } 2025-06-25T14:28:10.306921Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:442: session request cookie: 1 topic: "Root/acc/topic1" message_group_id: "12345678" from ipv6:[::1]:45858 2025-06-25T14:28:10.306936Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:1532: write session: cookie=1 sessionId= userAgent="pqv1 server" ip=ipv6:[::1]:45858 proto=v1 topic=Root/acc/topic1 durationSec=0 2025-06-25T14:28:10.306943Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:566: init check schema 2025-06-25T14:28:10.309416Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:627: session v1 cookie: 1 sessionId: describe result for acl check 2025-06-25T14:28:10.309541Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:62: TTableHelper SelectQuery: --!syntax_v1 DECLARE $Hash AS Uint64; DECLARE $Topic AS Utf8; DECLARE $SourceId AS Utf8; SELECT Partition, CreateTime, AccessTime, SeqNo FROM `//Root/.metadata/TopicPartitionsMapping` WHERE Hash == $Hash AND Topic == $Topic AND ProducerId == $SourceId; 2025-06-25T14:28:10.309554Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:63: TTableHelper UpdateQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint64; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64; DECLARE $SeqNo AS Uint64; UPSERT INTO `//Root/.metadata/TopicPartitionsMapping` (Hash, Topic, ProducerId, CreateTime, AccessTime, Partition, SeqNo) VALUES ($Hash, $Topic, $SourceId, $CreateTime, $AccessTime, $Partition, $SeqNo); 2025-06-25T14:28:10.309562Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:64: TTableHelper UpdateAccessTimeQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint64; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64; UPDATE `//Root/.metadata/TopicPartitionsMapping` SET AccessTime = $AccessTime WHERE Hash = $Hash AND Topic = $Topic AND ProducerId = $SourceId AND Partition = $Partition; 2025-06-25T14:28:10.309591Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:305: TPartitionChooser [3:7519893699458000155:2331] (SourceId=12345678, PreferedPartition=(NULL)) ReplyResult: Partition=0, SeqNo=0 2025-06-25T14:28:10.309608Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:689: ProceedPartition. session cookie: 1 sessionId: partition: 0 expectedGeneration: (NULL) 2025-06-25T14:28:10.311165Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:819: TPartitionWriter 72075186224037889 (partition=0) TEvClientConnected Status OK, TabletId: 72075186224037889, NodeId 4, Generation: 1 2025-06-25T14:28:10.311553Z node 4 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie 12345678|dbd4aa06-5d7a234f-f7e36fd3-5fd7e409_0 generated for partition 0 topic 'acc/topic1' owner 12345678 2025-06-25T14:28:10.312461Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:865: session inited cookie: 1 partition: 0 MaxSeqNo: 0 sessionId: 12345678|dbd4aa06-5d7a234f-f7e36fd3-5fd7e409_0 2025-06-25T14:28:10.315158Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 1 sessionId: 12345678|dbd4aa06-5d7a234f-f7e36fd3-5fd7e409_0 grpc read done: success: 0 data: 2025-06-25T14:28:10.315179Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 1 sessionId: 12345678|dbd4aa06-5d7a234f-f7e36fd3-5fd7e409_0 grpc read failed 2025-06-25T14:28:10.315349Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:818: session v1 closed cookie: 1 sessionId: 12345678|dbd4aa06-5d7a234f-f7e36fd3-5fd7e409_0 2025-06-25T14:28:10.315361Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 1 sessionId: 12345678|dbd4aa06-5d7a234f-f7e36fd3-5fd7e409_0 is DEAD 2025-06-25T14:28:10.315625Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037889 (partition=0) Received event: NActors::TEvents::TEvPoison Finish: 0 === InitializeWritePQService done === PersQueueClient === InitializePQ completed 2025-06-25T14:28:10.326511Z node 3 :PQ_WRITE_PROXY DEBUG: grpc_pq_write.h:107: new grpc connection 2025-06-25T14:28:10.326534Z node 3 :PQ_WRITE_PROXY DEBUG: grpc_pq_write.h:141: new session created cookie 2 2025-06-25T14:28:10.327779Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 2 sessionId: grpc read done: success: 1 data: init_request { topic: "topic1" message_group_id: "12345678" } 2025-06-25T14:28:10.327885Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:442: session request cookie: 2 topic: "topic1" message_group_id: "12345678" from ipv6:[::1]:45858 2025-06-25T14:28:10.327898Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:1532: write session: cookie=2 sessionId= userAgent="pqv1 server" ip=ipv6:[::1]:45858 proto=v1 topic=topic1 durationSec=0 2025-06-25T14:28:10.327908Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:566: init check schema 2025-06-25T14:28:10.330245Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:627: session v1 cookie: 2 sessionId: describe result for acl check 2025-06-25T14:28:10.330378Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:62: TTableHelper SelectQuery: --!syntax_v1 DECLARE $Hash AS Uint64; DECLARE $Topic AS Utf8; DECLARE $SourceId AS Utf8; SELECT Partition, CreateTime, AccessTime, SeqNo FROM `//Root/.metadata/TopicPartitionsMapping` WHERE Hash == $Hash AND Topic == $Topic AND ProducerId == $SourceId; 2025-06-25T14:28:10.330386Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:63: TTableHelper UpdateQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint64; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64; DECLARE $SeqNo AS Uint64; UPSERT INTO `//Root/.metadata/TopicPartitionsMapping` (Hash, Topic, ProducerId, CreateTime, AccessTime, Partition, SeqNo) VALUES ($Hash, $Topic, $SourceId, $CreateTime, $AccessTime, $Partition, $SeqNo); 2025-06-25T14:28:10.330395Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:64: TTableHelper UpdateAccessTimeQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint64; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64; UPDATE `//Root/.metadata/TopicPartitionsMapping` SET AccessTime = $AccessTime WHERE Hash = $Hash AND Topic = $Topic AND ProducerId = $SourceId AND Partition = $Partition; 2025-06-25T14:28:10.330424Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:305: TPartitionChooser [3:7519893699458000174:2340] (SourceId=12345678, PreferedPartition=(NULL)) ReplyResult: Partition=0, SeqNo=0 2025-06-25T14:28:10.330441Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:689: ProceedPartition. session cookie: 2 sessionId: partition: 0 expectedGeneration: (NULL) 2025-06-25T14:28:10.332167Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:819: TPartitionWriter 72075186224037889 (partition=0) TEvClientConnected Status OK, TabletId: 72075186224037889, NodeId 4, Generation: 1 2025-06-25T14:28:10.332606Z node 4 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie 12345678|7350609a-f3e7aa48-79c9dc32-daadf62a_0 generated for partition 0 topic 'acc/topic1' owner 12345678 2025-06-25T14:28:10.333695Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:865: session inited cookie: 2 partition: 0 MaxSeqNo: 0 sessionId: 12345678|7350609a-f3e7aa48-79c9dc32-daadf62a_0 2025-06-25T14:28:10.335752Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 2 sessionId: 12345678|7350609a-f3e7aa48-79c9dc32-daadf62a_0 grpc read done: success: 0 data: 2025-06-25T14:28:10.335773Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 2 sessionId: 12345678|7350609a-f3e7aa48-79c9dc32-daadf62a_0 grpc read failed 2025-06-25T14:28:10.335796Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 2 sessionId: 12345678|7350609a-f3e7aa48-79c9dc32-daadf62a_0 grpc closed 2025-06-25T14:28:10.335807Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 2 sessionId: 12345678|7350609a-f3e7aa48-79c9dc32-daadf62a_0 is DEAD 2025-06-25T14:28:10.336942Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037889 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-25T14:28:10.737117Z node 3 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [3:7519893699458000191:2347], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:28:10.738764Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=3&id=YmQyMzRiZWItOTc4ZTUwMDctNzRkMjc1Zi0yMGVlODVmMw==, ActorId: [3:7519893699458000189:2346], ActorState: ExecuteState, TraceId: 01jykqtdts7favxgyew9sst9pv, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:28:10.739120Z node 3 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } >> TSchemeshardBackgroundCleaningTest::SchemeshardBackgroundCleaningTestCreateCleanWithRetry [GOOD] >> TSchemeshardBackgroundCleaningTest::SchemeshardBackgroundCleaningTestCreateCleanManyTables >> TPersQueueCommonTest::Auth_WriteUpdateTokenRequestWithValidTokenButWithoutACL_SessionClosedWithUnauthorizedError [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_visibility_to_zero_works[tables_format_v0-std] >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendModifyConnection [GOOD] >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendModifyConnectionWithServiceAccount |75.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/scheme_board/ut_monitoring/ydb-core-tx-scheme_board-ut_monitoring |75.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/scheme_board/ut_monitoring/ydb-core-tx-scheme_board-ut_monitoring |75.1%| [LD] {RESULT} $(B)/ydb/core/tx/scheme_board/ut_monitoring/ydb-core-tx-scheme_board-ut_monitoring |75.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_get_queue_attributes_only_runtime_attributes[tables_format_v1-std] [GOOD] >> KqpScripting::QueryStats [GOOD] >> KqpScripting::Pure >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendModifyConnectionWithServiceAccount [GOOD] >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendDeleteConnection >> TGRpcRateLimiterTest::AcquireResourceManyRequiredActorApiWithCancelAfter [GOOD] >> TGRpcRateLimiterTest::AcquireResourceManyUsedGrpcApi >> TSchemeshardCompactionQueueTest::EnqueueEmptyShard [GOOD] >> TSchemeshardCompactionQueueTest::EnqueueSinglePartedShard [GOOD] >> TSchemeshardCompactionQueueTest::EnqueueSinglePartedShardWhenEnabled [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/services/persqueue_v1/ut/new_schemecache_ut/unittest >> TPersQueueCommonTest::Auth_WriteSessionWithValidTokenAndACEAndThenRemoveACEAndSendWriteRequest_SessionClosedWithUnauthorizedErrorAfterSuccessfullWriteResponse [GOOD] Test command err: === Server->StartServer(false); 2025-06-25T14:27:53.455959Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519893628887911259:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:27:53.456006Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:27:53.476975Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519893627603782798:2235];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:27:53.537435Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001957/r3tmp/tmpCVLuMw/pdisk_1.dat 2025-06-25T14:27:54.084710Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-25T14:27:54.255884Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-25T14:27:54.399430Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:27:54.424604Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:27:54.551622Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:27:54.554165Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:27:54.888685Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:27:54.987718Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:27:54.987803Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:27:55.004378Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T14:27:55.004865Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:27:55.004921Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:27:55.006725Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:27:55.009264Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 26310, node 1 2025-06-25T14:27:55.408953Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/yft8/001957/r3tmp/yandexb8ZFpx.tmp 2025-06-25T14:27:55.408974Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/yft8/001957/r3tmp/yandexb8ZFpx.tmp 2025-06-25T14:27:55.409116Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/yft8/001957/r3tmp/yandexb8ZFpx.tmp 2025-06-25T14:27:55.409221Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:27:55.561453Z INFO: TTestServer started on Port 9072 GrpcPort 26310 TClient is connected to server localhost:9072 PQClient connected to localhost:26310 === TenantModeEnabled() = 1 === Init PQ - start server on port 26310 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:27:56.717216Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "Root" StoragePools { Name: "/Root:test" Kind: "test" } } } TxId: 281474976715657 TabletId: 72057594046644480 PeerName: "" , at schemeshard: 72057594046644480 2025-06-25T14:27:56.725875Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //Root, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-25T14:27:56.726161Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 0 2025-06-25T14:27:56.726186Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 281474976715657:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046644480, LocalPathId: 1] source path: 2025-06-25T14:27:56.726423Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 281474976715657:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-25T14:27:56.726513Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:27:56.730811Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 281474976715657, response: Status: StatusAccepted TxId: 281474976715657 SchemeshardId: 72057594046644480 PathId: 1, at schemeshard: 72057594046644480 2025-06-25T14:27:56.730978Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976715657, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //Root 2025-06-25T14:27:56.731171Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-25T14:27:56.731212Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 281474976715657:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046644480 2025-06-25T14:27:56.731235Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 281474976715657:0 ProgressState no shards to create, do next state 2025-06-25T14:27:56.731247Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 281474976715657:0 2 -> 3 waiting... 2025-06-25T14:27:56.737068Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-25T14:27:56.737106Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 281474976715657:0 ProgressState, at schemeshard: 72057594046644480 2025-06-25T14:27:56.737127Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 281474976715657:0 3 -> 128 2025-06-25T14:27:56.740757Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:27:56.740785Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 281474976715657, ready parts: 0/1, is published: true 2025-06-25T14:27:56.740802Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:27:56.743260Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-25T14:27:56.743297Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-25T14:27:56.743331Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 281474976715657:0, at tablet# 72057594046644480 2025-06-25T14:27:56.743365Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 281474976715657 ready parts: 1/1 2025-06-25T14:27:56.748379Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046644480 Flags: 2 } ExecLevel: 0 TxId: 281474976715657 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:27:56.753150Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 281474976715657:4294967295 from tablet: 72057594046644480 to tablet: 72057594046316545 cookie: 0:281474976715657 msg type: 269090816 2025-06-25T14:27:56.753387Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 281474976715657, partId: 4294967295, tablet: 72057594046316545 2025-06-25T14:27:56.762502Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 1750861676802, transactions count in step: 1, at schemeshard: 72057594046644480 2025-06-25T14:27:56.762642Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1750861676802 MediatorID: 72057594046382081 TabletID: 72057594046644480, at schemeshard: 72057594046644480 2025-06-25T14:27:56.762681Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 281474976715657:0, at tablet# 72057594046644480 2025-06-25T14:27:56.762946Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 281474976715657:0 128 -> 240 2025-06-25T14:27:56.763003Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleRe ... actor.cpp:865: session inited cookie: 2 partition: 0 MaxSeqNo: 0 sessionId: test-group-id|ce871086-95dab89f-c9d624e3-ce6a87af_0 2025-06-25T14:28:10.228504Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 2 sessionId: test-group-id|ce871086-95dab89f-c9d624e3-ce6a87af_0 grpc read done: success: 1 data: write_request[data omitted] 2025-06-25T14:28:10.228806Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037889 (partition=0) Received event: NKikimr::NPQ::TEvPartitionWriter::TEvWriteRequest 2025-06-25T14:28:10.232604Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037889 (partition=0) Received event: NKikimr::TEvPersQueue::TEvResponse ===ModifyAcl BEFORE MODIFY PERMISSIONS 2025-06-25T14:28:10.241241Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037889 (partition=0) Received event: NKikimr::TEvPersQueue::TEvResponse 2025-06-25T14:28:10.253665Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/Root/acc" OperationType: ESchemeOpModifyACL ModifyACL { Name: "topic1" DiffACL: "\n\031\010\001\022\025\032\023test_user_0@builtin" } } TxId: 281474976720666 TabletId: 72057594046644480 PeerName: "ipv6:[::1]:35246" , at schemeshard: 72057594046644480 2025-06-25T14:28:10.253818Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_modify_acl.cpp:33: TModifyACL Propose, path: /Root/acc/topic1, operationId: 281474976720666:0, at schemeshard: 72057594046644480 2025-06-25T14:28:10.253934Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5302: ExamineTreeVFS visit path id [OwnerId: 72057594046644480, LocalPathId: 10] name: topic1 type: EPathTypePersQueueGroup state: EPathStateNoChanges stepDropped: 0 droppedTxId: 0 parent: [OwnerId: 72057594046644480, LocalPathId: 9] 2025-06-25T14:28:10.253943Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5318: ExamineTreeVFS run path id: [OwnerId: 72057594046644480, LocalPathId: 10] 2025-06-25T14:28:10.254082Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 281474976720666:1, propose status:StatusSuccess, reason: , at schemeshard: 72057594046644480 2025-06-25T14:28:10.254131Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976720666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:28:10.254210Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976720666:0 progress is 1/1 2025-06-25T14:28:10.254219Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976720666 ready parts: 1/1 2025-06-25T14:28:10.254236Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976720666:0 progress is 1/1 2025-06-25T14:28:10.254245Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976720666 ready parts: 1/1 2025-06-25T14:28:10.254278Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046644480, LocalPathId: 10] was 3 2025-06-25T14:28:10.254316Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 281474976720666, ready parts: 1/1, is published: false 2025-06-25T14:28:10.254335Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046644480, LocalPathId: 10], at schemeshard: 72057594046644480 2025-06-25T14:28:10.254345Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976720666 ready parts: 1/1 2025-06-25T14:28:10.254356Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976720666:0 2025-06-25T14:28:10.254368Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 281474976720666, publications: 1, subscribers: 0 2025-06-25T14:28:10.254378Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 281474976720666, [OwnerId: 72057594046644480, LocalPathId: 10], 4 2025-06-25T14:28:10.262380Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 281474976720666, response: Status: StatusSuccess TxId: 281474976720666 SchemeshardId: 72057594046644480, at schemeshard: 72057594046644480 2025-06-25T14:28:10.262723Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976720666, database: /Root, subject: , status: StatusSuccess, operation: MODIFY ACL, path: /Root/acc/topic1, remove access: -():test_user_0@builtin:- 2025-06-25T14:28:10.262889Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046644480 2025-06-25T14:28:10.262904Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046644480, txId: 281474976720666, path id: [OwnerId: 72057594046644480, LocalPathId: 10] 2025-06-25T14:28:10.263062Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046644480 2025-06-25T14:28:10.263080Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [3:7519893679558869022:2374], at schemeshard: 72057594046644480, txId: 281474976720666, path id: 10 2025-06-25T14:28:10.264264Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 10 Version: 4 PathOwnerId: 72057594046644480, cookie: 281474976720666 2025-06-25T14:28:10.264373Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 10 Version: 4 PathOwnerId: 72057594046644480, cookie: 281474976720666 2025-06-25T14:28:10.264386Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046644480, txId: 281474976720666 2025-06-25T14:28:10.264404Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046644480, txId: 281474976720666, pathId: [OwnerId: 72057594046644480, LocalPathId: 10], version: 4 2025-06-25T14:28:10.264424Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046644480, LocalPathId: 10] was 4 2025-06-25T14:28:10.264521Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046644480, txId: 281474976720666, subscribers: 0 ===Wait for session created with token with removed ACE to die2025-06-25T14:28:10.269269Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046644480, cookie: 281474976720666 2025-06-25T14:28:10.718504Z node 3 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [3:7519893701033706632:2361], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:28:10.718873Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=3&id=YjkxNTMzZjQtMTMwNjM3ODUtMzBjYmQ3YzUtZjU3MTJiMmQ=, ActorId: [3:7519893701033706630:2360], ActorState: ExecuteState, TraceId: 01jykqtdt7dbcxh60rdvhqzyqc, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:28:10.719302Z node 3 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } status: UNAUTHORIZED issues { message: "access to topic \'Topic /Root/acc/topic1 in database: /Root\' denied for \'test_user_0@builtin\' due to \'no WriteTopic rights\', Marker# PQ1125" issue_code: 500018 severity: 1 } 2025-06-25T14:28:11.224808Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:566: init check schema 2025-06-25T14:28:11.225827Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:627: session v1 cookie: 2 sessionId: test-group-id|ce871086-95dab89f-c9d624e3-ce6a87af_0 describe result for acl check 2025-06-25T14:28:11.225939Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:809: session v1 error cookie: 2 reason: access to topic 'Topic /Root/acc/topic1 in database: /Root' denied for 'test_user_0@builtin' due to 'no WriteTopic rights', Marker# PQ1125 sessionId: test-group-id|ce871086-95dab89f-c9d624e3-ce6a87af_0 2025-06-25T14:28:11.226192Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 2 sessionId: test-group-id|ce871086-95dab89f-c9d624e3-ce6a87af_0 is DEAD 2025-06-25T14:28:11.226482Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037889 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-25T14:28:11.785899Z node 3 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [3:7519893705328673953:2369], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:28:11.786236Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=3&id=ZjlhYWU1MTMtOTE1NTU1Y2QtYjM5ZmViYTMtM2I0NzI1ZGY=, ActorId: [3:7519893705328673951:2368], ActorState: ExecuteState, TraceId: 01jykqtetb9863n7b268ffazzt, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:28:11.786690Z node 3 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } ------- [TM] {asan, default-linux-x86_64, release} ydb/services/persqueue_v1/ut/new_schemecache_ut/unittest >> TPersQueueCommonTest::Auth_MultipleInflightWriteUpdateTokenRequestWithDifferentValidToken_SessionClosedWithOverloadedError [GOOD] Test command err: === Server->StartServer(false); 2025-06-25T14:27:56.920742Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519893640392822318:2081];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:27:56.928717Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:27:56.981582Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519893639423537859:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:27:56.981633Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001932/r3tmp/tmpIHf54L/pdisk_1.dat 2025-06-25T14:27:57.332224Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-25T14:27:57.345572Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-25T14:27:57.738876Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:27:57.800096Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:27:57.800188Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:27:57.802153Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:27:57.802220Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:27:57.810665Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T14:27:57.810815Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:27:57.812015Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 16875, node 1 2025-06-25T14:27:57.938743Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:27:57.991212Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/yft8/001932/r3tmp/yandexNqHxBp.tmp 2025-06-25T14:27:57.991244Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/yft8/001932/r3tmp/yandexNqHxBp.tmp 2025-06-25T14:27:57.991441Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/yft8/001932/r3tmp/yandexNqHxBp.tmp 2025-06-25T14:27:57.991569Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:27:58.001893Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:27:58.052894Z INFO: TTestServer started on Port 15835 GrpcPort 16875 TClient is connected to server localhost:15835 PQClient connected to localhost:16875 === TenantModeEnabled() = 1 === Init PQ - start server on port 16875 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:27:58.495242Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "Root" StoragePools { Name: "/Root:test" Kind: "test" } } } TxId: 281474976715657 TabletId: 72057594046644480 PeerName: "" , at schemeshard: 72057594046644480 2025-06-25T14:27:58.495441Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //Root, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-25T14:27:58.495623Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 0 2025-06-25T14:27:58.495645Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 281474976715657:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046644480, LocalPathId: 1] source path: 2025-06-25T14:27:58.495853Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 281474976715657:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-25T14:27:58.495926Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:27:58.498269Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 281474976715657, response: Status: StatusAccepted TxId: 281474976715657 SchemeshardId: 72057594046644480 PathId: 1, at schemeshard: 72057594046644480 2025-06-25T14:27:58.498449Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976715657, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //Root 2025-06-25T14:27:58.498624Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-25T14:27:58.498668Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 281474976715657:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046644480 2025-06-25T14:27:58.498690Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 281474976715657:0 ProgressState no shards to create, do next state 2025-06-25T14:27:58.498701Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 281474976715657:0 2 -> 3 waiting... 2025-06-25T14:27:58.500782Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-25T14:27:58.500820Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 281474976715657:0 ProgressState, at schemeshard: 72057594046644480 2025-06-25T14:27:58.500832Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 281474976715657:0 3 -> 128 2025-06-25T14:27:58.502505Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-25T14:27:58.502540Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-25T14:27:58.502568Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 281474976715657:0, at tablet# 72057594046644480 2025-06-25T14:27:58.502587Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 281474976715657 ready parts: 1/1 2025-06-25T14:27:58.506545Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046644480 Flags: 2 } ExecLevel: 0 TxId: 281474976715657 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:27:58.506800Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:27:58.506811Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 281474976715657, ready parts: 0/1, is published: true 2025-06-25T14:27:58.506841Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:27:58.508273Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 281474976715657:4294967295 from tablet: 72057594046644480 to tablet: 72057594046316545 cookie: 0:281474976715657 msg type: 269090816 2025-06-25T14:27:58.508450Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 281474976715657, partId: 4294967295, tablet: 72057594046316545 2025-06-25T14:27:58.510486Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 1750861678552, transactions count in step: 1, at schemeshard: 72057594046644480 2025-06-25T14:27:58.510636Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1750861678552 MediatorID: 72057594046382081 TabletID: 72057594046644480, at schemeshard: 72057594046644480 2025-06-25T14:27:58.510675Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 281474976715657:0, at tablet# 72057594046644480 2025-06-25T14:27:58.510949Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 281474976715657:0 128 -> 240 2025-06-25T14:27:58.510979Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 281474976715657:0, at tablet# 72057594046644480 2025-06-25T14:27:58.511132Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 1 2025-06-25T14:27:58.511183Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046644480, Loc ... : schemeshard__operation.cpp:135: IgniteOperation, opId: 281474976715665:1, propose status:StatusSuccess, reason: , at schemeshard: 72057594046644480 2025-06-25T14:28:11.133985Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:28:11.134055Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976715665:0 progress is 1/1 2025-06-25T14:28:11.134113Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976715665 ready parts: 1/1 2025-06-25T14:28:11.134138Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976715665:0 progress is 1/1 2025-06-25T14:28:11.134149Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976715665 ready parts: 1/1 2025-06-25T14:28:11.134185Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046644480, LocalPathId: 10] was 3 2025-06-25T14:28:11.134238Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 281474976715665, ready parts: 1/1, is published: false 2025-06-25T14:28:11.134270Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046644480, LocalPathId: 10], at schemeshard: 72057594046644480 2025-06-25T14:28:11.134282Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976715665 ready parts: 1/1 2025-06-25T14:28:11.134293Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976715665:0 2025-06-25T14:28:11.134303Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 281474976715665, publications: 1, subscribers: 0 2025-06-25T14:28:11.134316Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 281474976715665, [OwnerId: 72057594046644480, LocalPathId: 10], 3 2025-06-25T14:28:11.136153Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 281474976715665, response: Status: StatusSuccess TxId: 281474976715665 SchemeshardId: 72057594046644480, at schemeshard: 72057594046644480 2025-06-25T14:28:11.175791Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976715665, database: /Root, subject: , status: StatusSuccess, operation: MODIFY ACL, path: /Root/acc/topic1, add access: +W:test_user_0@builtin, add access: +W:test_user_1@builtin, add access: +W:test_user_2@builtin, remove access: -():test_user_0@builtin:-, remove access: -():test_user_1@builtin:-, remove access: -():test_user_2@builtin:- 2025-06-25T14:28:11.176618Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046644480 2025-06-25T14:28:11.176646Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046644480, txId: 281474976715665, path id: [OwnerId: 72057594046644480, LocalPathId: 10] 2025-06-25T14:28:11.176877Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046644480 2025-06-25T14:28:11.176907Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [3:7519893680277652359:2381], at schemeshard: 72057594046644480, txId: 281474976715665, path id: 10 2025-06-25T14:28:11.178145Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 10 Version: 3 PathOwnerId: 72057594046644480, cookie: 281474976715665 2025-06-25T14:28:11.178300Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 10 Version: 3 PathOwnerId: 72057594046644480, cookie: 281474976715665 2025-06-25T14:28:11.178344Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046644480, txId: 281474976715665 2025-06-25T14:28:11.178370Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046644480, txId: 281474976715665, pathId: [OwnerId: 72057594046644480, LocalPathId: 10], version: 3 2025-06-25T14:28:11.178387Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046644480, LocalPathId: 10] was 4 2025-06-25T14:28:11.178484Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046644480, txId: 281474976715665, subscribers: 0 2025-06-25T14:28:11.181491Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046644480, cookie: 281474976715665 2025-06-25T14:28:11.187574Z node 3 :PQ_WRITE_PROXY DEBUG: grpc_pq_write.h:107: new grpc connection 2025-06-25T14:28:11.187602Z node 3 :PQ_WRITE_PROXY DEBUG: grpc_pq_write.h:141: new session created cookie 2 2025-06-25T14:28:11.187902Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 2 sessionId: grpc read done: success: 1 data: init_request { topic: "/Root/acc/topic1" message_group_id: "test-group-id" } 2025-06-25T14:28:11.187984Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:442: session request cookie: 2 topic: "/Root/acc/topic1" message_group_id: "test-group-id" from ipv6:[::1]:57896 2025-06-25T14:28:11.188004Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:1532: write session: cookie=2 sessionId= userAgent="pqv1 server" ip=ipv6:[::1]:57896 proto=v1 topic=/Root/acc/topic1 durationSec=0 2025-06-25T14:28:11.188011Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:566: init check schema 2025-06-25T14:28:11.189046Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:627: session v1 cookie: 2 sessionId: describe result for acl check 2025-06-25T14:28:11.189217Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:62: TTableHelper SelectQuery: --!syntax_v1 DECLARE $Hash AS Uint64; DECLARE $Topic AS Utf8; DECLARE $SourceId AS Utf8; SELECT Partition, CreateTime, AccessTime, SeqNo FROM `//Root/.metadata/TopicPartitionsMapping` WHERE Hash == $Hash AND Topic == $Topic AND ProducerId == $SourceId; 2025-06-25T14:28:11.189226Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:63: TTableHelper UpdateQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint64; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64; DECLARE $SeqNo AS Uint64; UPSERT INTO `//Root/.metadata/TopicPartitionsMapping` (Hash, Topic, ProducerId, CreateTime, AccessTime, Partition, SeqNo) VALUES ($Hash, $Topic, $SourceId, $CreateTime, $AccessTime, $Partition, $SeqNo); 2025-06-25T14:28:11.189233Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:64: TTableHelper UpdateAccessTimeQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint64; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64; UPDATE `//Root/.metadata/TopicPartitionsMapping` SET AccessTime = $AccessTime WHERE Hash = $Hash AND Topic = $Topic AND ProducerId = $SourceId AND Partition = $Partition; 2025-06-25T14:28:11.189263Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:305: TPartitionChooser [3:7519893706047457118:2343] (SourceId=test-group-id, PreferedPartition=(NULL)) ReplyResult: Partition=0, SeqNo=0 2025-06-25T14:28:11.189281Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:689: ProceedPartition. session cookie: 2 sessionId: partition: 0 expectedGeneration: (NULL) 2025-06-25T14:28:11.191454Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:819: TPartitionWriter 72075186224037889 (partition=0) TEvClientConnected Status OK, TabletId: 72075186224037889, NodeId 4, Generation: 1 2025-06-25T14:28:11.194710Z node 4 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie test-group-id|34a00672-93cbe60e-4310c367-5e6f123c_0 generated for partition 0 topic 'acc/topic1' owner test-group-id 2025-06-25T14:28:11.196193Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:865: session inited cookie: 2 partition: 0 MaxSeqNo: 0 sessionId: test-group-id|34a00672-93cbe60e-4310c367-5e6f123c_0 2025-06-25T14:28:11.198294Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 2 sessionId: test-group-id|34a00672-93cbe60e-4310c367-5e6f123c_0 grpc read done: success: 1 data: update_token_request [content omitted] 2025-06-25T14:28:11.198518Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 2 sessionId: test-group-id|34a00672-93cbe60e-4310c367-5e6f123c_0 grpc read done: success: 1 data: update_token_request [content omitted] 2025-06-25T14:28:11.198559Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:809: session v1 error cookie: 2 reason: got another 'update_token_request' while previous still in progress, only single token update is allowed at a time sessionId: test-group-id|34a00672-93cbe60e-4310c367-5e6f123c_0 2025-06-25T14:28:11.198722Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 2 sessionId: test-group-id|34a00672-93cbe60e-4310c367-5e6f123c_0 is DEAD 2025-06-25T14:28:11.198999Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037889 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-25T14:28:11.708543Z node 3 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [3:7519893706047457136:2348], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:28:11.709057Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=3&id=ZTMzZDg1ZjQtZWIwODMyOTItYWZjNzYxNy1iNmE4MTdkMA==, ActorId: [3:7519893706047457134:2347], ActorState: ExecuteState, TraceId: 01jykqterh9cc4jfdxx7s5xes3, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:28:11.709464Z node 3 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } >> KqpYql::TestUuidPrimaryKeyPrefixSearch [GOOD] >> AsyncIndexChangeCollector::CoverIndexedColumn [GOOD] >> StoragePool::TestDistributionRandomProbability [GOOD] >> StoragePool::TestDistributionRandomProbabilityWithOverflow [GOOD] >> StoragePool::TestDistributionExactMin >> TPersqueueDataPlaneTestSuite::WriteSession [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/services/persqueue_v1/ut/new_schemecache_ut/unittest >> TPersQueueCommonTest::Auth_WriteUpdateTokenRequestWithValidTokenButWithoutACL_SessionClosedWithUnauthorizedError [GOOD] Test command err: === Server->StartServer(false); 2025-06-25T14:27:57.707760Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519893645127384610:2218];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:27:57.707963Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:27:58.067894Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-25T14:27:58.078137Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00190c/r3tmp/tmpFSR9Mq/pdisk_1.dat 2025-06-25T14:27:58.156648Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:27:58.336851Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:27:58.336928Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:27:58.338119Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:27:58.338182Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:27:58.344022Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:27:58.349742Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T14:27:58.357001Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:27:58.376866Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 11272, node 1 2025-06-25T14:27:58.401414Z node 1 :GRPC_SERVER WARN: grpc_request_proxy.cpp:529: SchemeBoardDelete /Root Strong=0 2025-06-25T14:27:58.401445Z node 1 :GRPC_SERVER WARN: grpc_request_proxy.cpp:529: SchemeBoardDelete /Root Strong=0 2025-06-25T14:27:58.486514Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/yft8/00190c/r3tmp/yandexu7Ylno.tmp 2025-06-25T14:27:58.486539Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/yft8/00190c/r3tmp/yandexu7Ylno.tmp 2025-06-25T14:27:58.486678Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/yft8/00190c/r3tmp/yandexu7Ylno.tmp 2025-06-25T14:27:58.486798Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:27:58.573964Z INFO: TTestServer started on Port 64240 GrpcPort 11272 2025-06-25T14:27:58.711397Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:27:58.732425Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:64240 PQClient connected to localhost:11272 === TenantModeEnabled() = 1 === Init PQ - start server on port 11272 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:27:59.421311Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "Root" StoragePools { Name: "/Root:test" Kind: "test" } } } TxId: 281474976710657 TabletId: 72057594046644480 PeerName: "" , at schemeshard: 72057594046644480 2025-06-25T14:27:59.426624Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //Root, opId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-25T14:27:59.427031Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 0 2025-06-25T14:27:59.427054Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 281474976710657:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046644480, LocalPathId: 1] source path: 2025-06-25T14:27:59.427300Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 281474976710657:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-25T14:27:59.429739Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:27:59.434030Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 281474976710657, response: Status: StatusAccepted TxId: 281474976710657 SchemeshardId: 72057594046644480 PathId: 1, at schemeshard: 72057594046644480 2025-06-25T14:27:59.434296Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976710657, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //Root 2025-06-25T14:27:59.434503Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-25T14:27:59.434562Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 281474976710657:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046644480 2025-06-25T14:27:59.434591Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 281474976710657:0 ProgressState no shards to create, do next state 2025-06-25T14:27:59.434607Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 281474976710657:0 2 -> 3 2025-06-25T14:27:59.437393Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-25T14:27:59.437460Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 281474976710657:0 ProgressState, at schemeshard: 72057594046644480 2025-06-25T14:27:59.437478Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 281474976710657:0 3 -> 128 2025-06-25T14:27:59.441145Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-25T14:27:59.441184Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-25T14:27:59.441214Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 281474976710657:0, at tablet# 72057594046644480 2025-06-25T14:27:59.441243Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 281474976710657 ready parts: 1/1 2025-06-25T14:27:59.462313Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046644480 Flags: 2 } ExecLevel: 0 TxId: 281474976710657 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:27:59.462668Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:27:59.462681Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 281474976710657, ready parts: 0/1, is published: true 2025-06-25T14:27:59.462699Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:27:59.473004Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 281474976710657:4294967295 from tablet: 72057594046644480 to tablet: 72057594046316545 cookie: 0:281474976710657 msg type: 269090816 2025-06-25T14:27:59.473167Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 281474976710657, partId: 4294967295, tablet: 72057594046316545 2025-06-25T14:27:59.493521Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 1750861679518, transactions count in step: 1, at schemeshard: 72057594046644480 2025-06-25T14:27:59.493664Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976710657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1750861679518 MediatorID: 72057594046382081 TabletID: 72057594046644480, at schemeshard: 72057594046644480 2025-06-25T14:27:59.493709Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 281474976710657:0, at tablet# 72057594046644480 2025-06-25T14:27:59.493936Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 281474976710657:0 128 -> 240 2025-06-25T14:27:59.493965Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 281474976710657:0, at tablet# 72057594046644480 2025-06-25T14:27:59.494112Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 1 2025-06-25T14:27:59.494166Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 720575940 ... propose status:StatusSuccess, reason: , at schemeshard: 72057594046644480 2025-06-25T14:28:11.779897Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:28:11.779961Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976715665:0 progress is 1/1 2025-06-25T14:28:11.779970Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976715665 ready parts: 1/1 2025-06-25T14:28:11.779986Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976715665:0 progress is 1/1 2025-06-25T14:28:11.779996Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976715665 ready parts: 1/1 2025-06-25T14:28:11.780031Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046644480, LocalPathId: 10] was 3 2025-06-25T14:28:11.780117Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 281474976715665, ready parts: 1/1, is published: false 2025-06-25T14:28:11.780137Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046644480, LocalPathId: 10], at schemeshard: 72057594046644480 2025-06-25T14:28:11.781092Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976715665 ready parts: 1/1 2025-06-25T14:28:11.781116Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976715665:0 2025-06-25T14:28:11.781127Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 281474976715665, publications: 1, subscribers: 0 2025-06-25T14:28:11.781138Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 281474976715665, [OwnerId: 72057594046644480, LocalPathId: 10], 3 2025-06-25T14:28:11.785353Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 281474976715665, response: Status: StatusSuccess TxId: 281474976715665 SchemeshardId: 72057594046644480, at schemeshard: 72057594046644480 2025-06-25T14:28:11.785651Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976715665, database: /Root, subject: , status: StatusSuccess, operation: MODIFY ACL, path: /Root/acc/topic1, add access: +W:test_user@builtin, remove access: -():test_user@builtin:- 2025-06-25T14:28:11.785800Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046644480 2025-06-25T14:28:11.785814Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046644480, txId: 281474976715665, path id: [OwnerId: 72057594046644480, LocalPathId: 10] 2025-06-25T14:28:11.785995Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046644480 2025-06-25T14:28:11.786012Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [3:7519893683687610747:2376], at schemeshard: 72057594046644480, txId: 281474976715665, path id: 10 2025-06-25T14:28:11.787067Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 10 Version: 3 PathOwnerId: 72057594046644480, cookie: 281474976715665 2025-06-25T14:28:11.787163Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 10 Version: 3 PathOwnerId: 72057594046644480, cookie: 281474976715665 2025-06-25T14:28:11.787175Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046644480, txId: 281474976715665 2025-06-25T14:28:11.787189Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046644480, txId: 281474976715665, pathId: [OwnerId: 72057594046644480, LocalPathId: 10], version: 3 2025-06-25T14:28:11.787205Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046644480, LocalPathId: 10] was 4 2025-06-25T14:28:11.787283Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046644480, txId: 281474976715665, subscribers: 0 2025-06-25T14:28:11.792944Z node 3 :PQ_WRITE_PROXY DEBUG: grpc_pq_write.h:107: new grpc connection 2025-06-25T14:28:11.792969Z node 3 :PQ_WRITE_PROXY DEBUG: grpc_pq_write.h:141: new session created cookie 2 2025-06-25T14:28:11.793257Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046644480, cookie: 281474976715665 2025-06-25T14:28:11.793285Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 2 sessionId: grpc read done: success: 1 data: init_request { topic: "/Root/acc/topic1" message_group_id: "test-message-group" } 2025-06-25T14:28:11.793354Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:442: session request cookie: 2 topic: "/Root/acc/topic1" message_group_id: "test-message-group" from ipv6:[::1]:38884 2025-06-25T14:28:11.793368Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:1532: write session: cookie=2 sessionId= userAgent="pqv1 server" ip=ipv6:[::1]:38884 proto=v1 topic=/Root/acc/topic1 durationSec=0 2025-06-25T14:28:11.793376Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:566: init check schema 2025-06-25T14:28:11.809001Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:627: session v1 cookie: 2 sessionId: describe result for acl check 2025-06-25T14:28:11.809199Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:62: TTableHelper SelectQuery: --!syntax_v1 DECLARE $Hash AS Uint64; DECLARE $Topic AS Utf8; DECLARE $SourceId AS Utf8; SELECT Partition, CreateTime, AccessTime, SeqNo FROM `//Root/.metadata/TopicPartitionsMapping` WHERE Hash == $Hash AND Topic == $Topic AND ProducerId == $SourceId; 2025-06-25T14:28:11.809208Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:63: TTableHelper UpdateQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint64; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64; DECLARE $SeqNo AS Uint64; UPSERT INTO `//Root/.metadata/TopicPartitionsMapping` (Hash, Topic, ProducerId, CreateTime, AccessTime, Partition, SeqNo) VALUES ($Hash, $Topic, $SourceId, $CreateTime, $AccessTime, $Partition, $SeqNo); 2025-06-25T14:28:11.809215Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:64: TTableHelper UpdateAccessTimeQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint64; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64; UPDATE `//Root/.metadata/TopicPartitionsMapping` SET AccessTime = $AccessTime WHERE Hash = $Hash AND Topic = $Topic AND ProducerId = $SourceId AND Partition = $Partition; 2025-06-25T14:28:11.809247Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:305: TPartitionChooser [3:7519893705162448302:2355] (SourceId=test-message-group, PreferedPartition=(NULL)) ReplyResult: Partition=0, SeqNo=0 2025-06-25T14:28:11.809267Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:689: ProceedPartition. session cookie: 2 sessionId: partition: 0 expectedGeneration: (NULL) 2025-06-25T14:28:11.810380Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:819: TPartitionWriter 72075186224037889 (partition=0) TEvClientConnected Status OK, TabletId: 72075186224037889, NodeId 3, Generation: 1 2025-06-25T14:28:11.810610Z node 3 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie test-message-group|18304864-a7444ed6-2a76a722-ea9393f9_0 generated for partition 0 topic 'acc/topic1' owner test-message-group 2025-06-25T14:28:11.810984Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:865: session inited cookie: 2 partition: 0 MaxSeqNo: 0 sessionId: test-message-group|18304864-a7444ed6-2a76a722-ea9393f9_0 2025-06-25T14:28:11.817160Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 2 sessionId: test-message-group|18304864-a7444ed6-2a76a722-ea9393f9_0 grpc read done: success: 1 data: update_token_request [content omitted] 2025-06-25T14:28:11.817464Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:1346: updating token 2025-06-25T14:28:11.817508Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:566: init check schema 2025-06-25T14:28:11.818329Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:627: session v1 cookie: 2 sessionId: test-message-group|18304864-a7444ed6-2a76a722-ea9393f9_0 describe result for acl check 2025-06-25T14:28:11.818419Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:809: session v1 error cookie: 2 reason: access to topic 'Topic /Root/acc/topic1 in database: /Root' denied for 'test_user_2@builtin' due to 'no WriteTopic rights', Marker# PQ1125 sessionId: test-message-group|18304864-a7444ed6-2a76a722-ea9393f9_0 2025-06-25T14:28:11.818680Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 2 sessionId: test-message-group|18304864-a7444ed6-2a76a722-ea9393f9_0 is DEAD 2025-06-25T14:28:11.818924Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037889 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-25T14:28:12.189241Z node 3 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [3:7519893709457415624:2364], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:28:12.191266Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=3&id=NTMyYmI1MzYtYTI0NWM5OTMtYmUwYTFlZTgtMWQxMTY5ODg=, ActorId: [3:7519893709457415617:2360], ActorState: ExecuteState, TraceId: 01jykqtf7n57fpe5wnesergtt7, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:28:12.192191Z node 3 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } |75.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_compaction/unittest >> TSchemeshardCompactionQueueTest::EnqueueSinglePartedShardWhenEnabled [GOOD] >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendDeleteConnection [GOOD] >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendTestConnection >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_visibility_to_zero_works[tables_format_v0-std] [GOOD] >> TSchemeshardBorrowedCompactionTest::SchemeshardShouldNotCompactBorrowedAfterSplitMergeWhenDisabled >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_visibility_to_zero_works[tables_format_v1-fifo] >> YdbSdkSessions::CloseSessionWithSessionPoolExplicitDriverStopOnly [GOOD] >> YdbSdkSessions::CloseSessionWithSessionPoolFromDtors >> KqpYql::RefSelect [GOOD] >> KqpYql::PgIntPrimaryKey >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendTestConnection [GOOD] >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendTestConnectionWithServiceAccount ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/yql/unittest >> KqpYql::TestUuidPrimaryKeyPrefixSearch [GOOD] Test command err: Trying to start YDB, gRPC: 26327, MsgBus: 17716 2025-06-25T14:28:08.740034Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519893692701687555:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:28:08.740116Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000df7/r3tmp/tmpWjDETt/pdisk_1.dat 2025-06-25T14:28:09.227124Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:28:09.228384Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:28:09.229793Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:28:09.264300Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 26327, node 1 2025-06-25T14:28:09.384926Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:28:09.384954Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:28:09.384970Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:28:09.385106Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17716 2025-06-25T14:28:09.756299Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:17716 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:28:10.065848Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:28:10.080992Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:28:12.225284Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519893709881557343:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:12.225430Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:12.539844Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:12.704370Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519893709881557445:2301], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:12.704467Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:12.704648Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519893709881557450:2304], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:12.710404Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:28:12.724360Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519893709881557452:2305], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-25T14:28:12.808388Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519893709881557505:2395] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:28:13.743897Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519893692701687555:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:28:13.743977Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; ------- [TM] {asan, default-linux-x86_64, release} ydb/services/persqueue_v1/ut/new_schemecache_ut/unittest >> TPersqueueDataPlaneTestSuite::WriteSession [GOOD] Test command err: === Server->StartServer(false); 2025-06-25T14:27:58.974433Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519893647355365565:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:27:58.974490Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:27:59.046652Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519893652522056274:2189];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:27:59.049693Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:27:59.234954Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-25T14:27:59.245002Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0018f9/r3tmp/tmpnemSkK/pdisk_1.dat 2025-06-25T14:27:59.997674Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:28:00.000021Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:28:00.068953Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:28:00.107539Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:28:00.133203Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:28:00.133301Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:28:00.134573Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:28:00.137310Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:28:00.137385Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:28:00.143937Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T14:28:00.144128Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:28:00.147331Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 13020, node 1 2025-06-25T14:28:00.402007Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/yft8/0018f9/r3tmp/yandexOEGEnK.tmp 2025-06-25T14:28:00.402028Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/yft8/0018f9/r3tmp/yandexOEGEnK.tmp 2025-06-25T14:28:00.402155Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/yft8/0018f9/r3tmp/yandexOEGEnK.tmp 2025-06-25T14:28:00.402264Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:28:00.545815Z INFO: TTestServer started on Port 21608 GrpcPort 13020 TClient is connected to server localhost:21608 PQClient connected to localhost:13020 === TenantModeEnabled() = 1 === Init PQ - start server on port 13020 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:28:01.726740Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "Root" StoragePools { Name: "/Root:test" Kind: "test" } } } TxId: 281474976715657 TabletId: 72057594046644480 PeerName: "" , at schemeshard: 72057594046644480 2025-06-25T14:28:01.726933Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //Root, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-25T14:28:01.727124Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 0 2025-06-25T14:28:01.727146Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 281474976715657:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046644480, LocalPathId: 1] source path: 2025-06-25T14:28:01.727371Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 281474976715657:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-25T14:28:01.727436Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:28:01.745084Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 281474976715657, response: Status: StatusAccepted TxId: 281474976715657 SchemeshardId: 72057594046644480 PathId: 1, at schemeshard: 72057594046644480 2025-06-25T14:28:01.745368Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976715657, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //Root 2025-06-25T14:28:01.745545Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-25T14:28:01.745596Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 281474976715657:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046644480 2025-06-25T14:28:01.745619Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 281474976715657:0 ProgressState no shards to create, do next state 2025-06-25T14:28:01.745633Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 281474976715657:0 2 -> 3 waiting... 2025-06-25T14:28:01.749561Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:28:01.749583Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 281474976715657, ready parts: 0/1, is published: true 2025-06-25T14:28:01.749615Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:28:01.753726Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-25T14:28:01.753780Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 281474976715657:0 ProgressState, at schemeshard: 72057594046644480 2025-06-25T14:28:01.753794Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 281474976715657:0 3 -> 128 2025-06-25T14:28:01.761337Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-25T14:28:01.761374Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-25T14:28:01.761402Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 281474976715657:0, at tablet# 72057594046644480 2025-06-25T14:28:01.761444Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 281474976715657 ready parts: 1/1 2025-06-25T14:28:01.767220Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046644480 Flags: 2 } ExecLevel: 0 TxId: 281474976715657 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:28:01.769777Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 281474976715657:4294967295 from tablet: 72057594046644480 to tablet: 72057594046316545 cookie: 0:281474976715657 msg type: 269090816 2025-06-25T14:28:01.769927Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 281474976715657, partId: 4294967295, tablet: 72057594046316545 2025-06-25T14:28:01.775563Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 1750861681814, transactions count in step: 1, at schemeshard: 72057594046644480 2025-06-25T14:28:01.775743Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1750861681814 MediatorID: 72057594046382081 TabletID: 72057594046644480, at schemeshard: 72057594046644480 2025-06-25T14:28:01.775795Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 281474976715657:0, at tablet# 72057594046644480 2025-06-25T14:28:01.776045Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 281474976715657:0 128 -> 240 2025-06-25T14:28:01.776072Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose Handle ... t_auth_actor.cpp:41: session cookie 2 consumer consumer_aba session consumer_aba_3_2_3669967975899000915_v1 auth for : consumer_aba 2025-06-25T14:28:13.022019Z node 3 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:131: session cookie 2 consumer consumer_aba session consumer_aba_3_2_3669967975899000915_v1 Handle describe topics response 2025-06-25T14:28:13.022097Z node 3 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:68: session cookie 2 consumer consumer_aba session consumer_aba_3_2_3669967975899000915_v1 auth is DEAD 2025-06-25T14:28:13.022155Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:1033: session cookie 2 consumer consumer_aba session consumer_aba_3_2_3669967975899000915_v1 auth ok: topics# 1, initDone# 0 2025-06-25T14:28:13.023182Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:1196: session cookie 2 consumer consumer_aba session consumer_aba_3_2_3669967975899000915_v1 register session: topic# /Root/account1/write_topic 2025-06-25T14:28:13.023825Z node 4 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72075186224037894][write_topic] pipe [3:7519893715313412739:2378] connected; active server actors: 1 2025-06-25T14:28:13.024089Z node 4 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1699: [72075186224037894][write_topic] consumer "consumer_aba" register session for pipe [3:7519893715313412739:2378] session consumer_aba_3_2_3669967975899000915_v1 2025-06-25T14:28:13.024140Z node 4 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:635: [72075186224037894][write_topic] consumer consumer_aba register readable partition 0 2025-06-25T14:28:13.024165Z :INFO: [/Root] [/Root] [3969ebae-c875346c-a1e6c6f8-4f87f5b8] [null] Server session id: consumer_aba_3_2_3669967975899000915_v1 2025-06-25T14:28:13.024204Z node 4 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:665: [72075186224037894][write_topic] consumer consumer_aba family created family=1 (Status=Free, Partitions=[0]) 2025-06-25T14:28:13.024449Z :DEBUG: [/Root] [/Root] [3969ebae-c875346c-a1e6c6f8-4f87f5b8] [null] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:28:13.024252Z node 4 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:867: [72075186224037894][write_topic] consumer consumer_aba register reading session ReadingSession "consumer_aba_3_2_3669967975899000915_v1" (Sender=[3:7519893715313412736:2378], Pipe=[3:7519893715313412739:2378], Partitions=[], ActiveFamilyCount=0) 2025-06-25T14:28:13.024278Z node 4 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1183: [72075186224037894][write_topic] consumer consumer_aba rebalancing was scheduled 2025-06-25T14:28:13.024337Z node 4 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1255: [72075186224037894][write_topic] consumer consumer_aba balancing. Sessions=1, Families=1, UnradableFamilies=1 [1 (0), ], RequireBalancing=0 [] 2025-06-25T14:28:13.024387Z node 4 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1302: [72075186224037894][write_topic] consumer consumer_aba balancing family=1 (Status=Free, Partitions=[0]) for ReadingSession "consumer_aba_3_2_3669967975899000915_v1" (Sender=[3:7519893715313412736:2378], Pipe=[3:7519893715313412739:2378], Partitions=[], ActiveFamilyCount=0) 2025-06-25T14:28:13.024444Z node 4 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:545: [72075186224037894][write_topic] consumer consumer_aba family 1 status Active partitions [0] session "consumer_aba_3_2_3669967975899000915_v1" sender [3:7519893715313412736:2378] lock partition 0 for ReadingSession "consumer_aba_3_2_3669967975899000915_v1" (Sender=[3:7519893715313412736:2378], Pipe=[3:7519893715313412739:2378], Partitions=[], ActiveFamilyCount=1) generation 1 step 1 2025-06-25T14:28:13.024493Z node 4 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1322: [72075186224037894][write_topic] consumer consumer_aba start rebalancing. familyCount=1, sessionCount=1, desiredFamilyCount=1, allowPlusOne=0 2025-06-25T14:28:13.024518Z node 4 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1399: [72075186224037894][write_topic] consumer consumer_aba balancing duration: 0.000160s 2025-06-25T14:28:13.024822Z node 3 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 2 consumer consumer_aba session consumer_aba_3_2_3669967975899000915_v1 grpc read done: success# 1, data# { read { } } 2025-06-25T14:28:13.024950Z node 3 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:1816: session cookie 2 consumer consumer_aba session consumer_aba_3_2_3669967975899000915_v1 got read request: guid# 681de995-edb7dc03-c2075385-202976f2 2025-06-25T14:28:13.025442Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:1315: session cookie 2 consumer consumer_aba session consumer_aba_3_2_3669967975899000915_v1 assign: record# { Partition: 0 TabletId: 72075186224037893 Topic: "write_topic" Generation: 1 Step: 1 Session: "consumer_aba_3_2_3669967975899000915_v1" ClientId: "consumer_aba" PipeClient { RawX1: 7519893715313412739 RawX2: 4503612512274762 } Path: "/Root/account1/write_topic" } 2025-06-25T14:28:13.025520Z node 3 :PQ_READ_PROXY INFO: partition_actor.cpp:1132: session cookie 2 consumer consumer_aba session consumer_aba_3_2_3669967975899000915_v1 INITING TopicId: Topic /Root/account1/write_topic in database: Root, partition 0(assignId:1) 2025-06-25T14:28:13.025777Z node 3 :PQ_READ_PROXY INFO: partition_actor.cpp:972: session cookie 2 consumer consumer_aba session consumer_aba_3_2_3669967975899000915_v1 TopicId: Topic /Root/account1/write_topic in database: Root, partition 0(assignId:1) pipe restart attempt 0 pipe creation result: OK TabletId: 72075186224037893 Generation: 1, pipe: [3:7519893715313412741:2381] 2025-06-25T14:28:13.025866Z node 3 :PQ_READ_PROXY DEBUG: caching_service.cpp:283: Direct read cache: registered server session: consumer_aba_3_2_3669967975899000915_v1:1 with generation 1 2025-06-25T14:28:13.051816Z node 3 :PQ_READ_PROXY DEBUG: partition_actor.cpp:652: session cookie 2 consumer consumer_aba session consumer_aba_3_2_3669967975899000915_v1 TopicId: Topic /Root/account1/write_topic in database: Root, partition 0(assignId:1) initDone 0 event { CmdGetClientOffsetResult { Offset: 0 EndOffset: 1 WriteTimestampMS: 1750861692891 CreateTimestampMS: 1750861692885 SizeLag: 165 WriteTimestampEstimateMS: 1750861692891 ClientHasAnyCommits: false } Cookie: 18446744073709551615 } 2025-06-25T14:28:13.051885Z node 3 :PQ_READ_PROXY INFO: partition_actor.cpp:683: session cookie 2 consumer consumer_aba session consumer_aba_3_2_3669967975899000915_v1 INIT DONE TopicId: Topic /Root/account1/write_topic in database: Root, partition 0(assignId:1) EndOffset 1 readOffset 0 committedOffset 0 2025-06-25T14:28:13.052006Z node 3 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:1413: session cookie 2 consumer consumer_aba session consumer_aba_3_2_3669967975899000915_v1 sending to client partition status Got new read session event: CreatePartitionStream { PartitionStreamId: 1 TopicPath: account1/write_topic Cluster: PartitionId: 0 CommittedOffset: 0 EndOffset: 1 } 2025-06-25T14:28:13.067558Z :INFO: [/Root] [/Root] [3969ebae-c875346c-a1e6c6f8-4f87f5b8] Closing read session. Close timeout: 0.000000s 2025-06-25T14:28:13.067631Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): null:account1/write_topic:0:1:0:0 2025-06-25T14:28:13.067679Z :INFO: [/Root] [/Root] [3969ebae-c875346c-a1e6c6f8-4f87f5b8] Counters: { Errors: 0 CurrentSessionLifetimeMs: 51 BytesRead: 0 MessagesRead: 0 BytesReadCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-25T14:28:13.067773Z :NOTICE: [/Root] [/Root] [3969ebae-c875346c-a1e6c6f8-4f87f5b8] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Close with zero timeout " } 2025-06-25T14:28:13.067812Z :DEBUG: [/Root] [/Root] [3969ebae-c875346c-a1e6c6f8-4f87f5b8] [null] Abort session to cluster 2025-06-25T14:28:13.068337Z :NOTICE: [/Root] [/Root] [3969ebae-c875346c-a1e6c6f8-4f87f5b8] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-06-25T14:28:13.069635Z node 3 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 2 consumer consumer_aba session consumer_aba_3_2_3669967975899000915_v1 grpc read done: success# 0, data# { } 2025-06-25T14:28:13.069665Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 2 consumer consumer_aba session consumer_aba_3_2_3669967975899000915_v1 grpc read failed 2025-06-25T14:28:13.069707Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:92: session cookie 2 consumer consumer_aba session consumer_aba_3_2_3669967975899000915_v1 grpc closed 2025-06-25T14:28:13.069771Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 2 consumer consumer_aba session consumer_aba_3_2_3669967975899000915_v1 is DEAD 2025-06-25T14:28:13.069980Z node 3 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: consumer_aba_3_2_3669967975899000915_v1 2025-06-25T14:28:13.071937Z node 4 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037894][write_topic] pipe [3:7519893715313412739:2378] disconnected; active server actors: 1 2025-06-25T14:28:13.071969Z node 4 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037894][write_topic] pipe [3:7519893715313412739:2378] client consumer_aba disconnected session consumer_aba_3_2_3669967975899000915_v1 2025-06-25T14:28:13.234517Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519893693838574211:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:28:13.234590Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:28:13.270749Z node 3 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [3:7519893715313412749:2383], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:28:13.273131Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=3&id=YmRjZmZmY2QtMWYzOTJhOGUtM2Y4MTk0ZmEtZjJjMGI0MmQ=, ActorId: [3:7519893715313412747:2382], ActorState: ExecuteState, TraceId: 01jykqtg8pb0nj1yj30d9naxg2, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:28:13.273524Z node 3 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-25T14:28:13.274724Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519893693040637844:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:28:13.274810Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_change_collector/unittest >> AsyncIndexChangeCollector::CoverIndexedColumn [GOOD] Test command err: 2025-06-25T14:27:47.034855Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:27:47.035153Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:27:47.035242Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0013c8/r3tmp/tmpgTKvyh/pdisk_1.dat 2025-06-25T14:27:48.220925Z node 1 :BS_CONTROLLER ERROR: {BSC07@impl.h:2206} ProcessControllerEvent event processing took too much time Type# 268637706 Duration# 0.236233s 2025-06-25T14:27:48.221100Z node 1 :BS_CONTROLLER ERROR: {BSC00@bsc.cpp:705} StateWork event processing took too much time Type# 2146435078 Duration# 0.236439s 2025-06-25T14:27:48.245278Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T14:27:48.296048Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:27:48.518805Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:27:48.537038Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750861663223809 != 1750861663223813 2025-06-25T14:27:48.655661Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:27:48.655925Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:27:48.682097Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:27:48.944373Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:27:49.148072Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:638:2539] 2025-06-25T14:27:49.151076Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T14:27:49.268997Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T14:27:49.269208Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T14:27:49.302250Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-25T14:27:49.302393Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-25T14:27:49.302464Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-25T14:27:49.328601Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T14:27:49.330059Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T14:27:49.330157Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:665:2539] in generation 1 2025-06-25T14:27:49.330907Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037889 actor [1:642:2541] 2025-06-25T14:27:49.331132Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T14:27:49.359603Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T14:27:49.359780Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T14:27:49.376124Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037889 2025-06-25T14:27:49.376236Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037889 2025-06-25T14:27:49.376284Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037889 2025-06-25T14:27:49.376697Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T14:27:49.376885Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T14:27:49.376992Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037889 persisting started state actor id [1:672:2541] in generation 1 2025-06-25T14:27:49.388019Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T14:27:49.517522Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-25T14:27:49.532563Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T14:27:49.544798Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:675:2560] 2025-06-25T14:27:49.544906Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T14:27:49.544950Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-25T14:27:49.544993Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:27:49.559105Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T14:27:49.559215Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037889 2025-06-25T14:27:49.559317Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037889 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T14:27:49.559411Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037889, actorId: [1:676:2561] 2025-06-25T14:27:49.559446Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037889 2025-06-25T14:27:49.559489Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037889, state: WaitScheme 2025-06-25T14:27:49.559524Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-25T14:27:49.572431Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-25T14:27:49.583466Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-25T14:27:49.583859Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:27:49.583927Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:27:49.583993Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-25T14:27:49.584050Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:27:49.584105Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037889 2025-06-25T14:27:49.584171Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037889 2025-06-25T14:27:49.584761Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:631:2535], serverId# [1:649:2545], sessionId# [0:0:0] 2025-06-25T14:27:49.584858Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2025-06-25T14:27:49.584887Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:27:49.584914Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037889 TxInFly 0 2025-06-25T14:27:49.584961Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-06-25T14:27:49.596674Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T14:27:49.609099Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-25T14:27:49.609277Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-25T14:27:49.620831Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [1:632:2536], serverId# [1:657:2551], sessionId# [0:0:0] 2025-06-25T14:27:49.621198Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037889 2025-06-25T14:27:49.621465Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037889 txId 281474976715657 ssId 72057594046644480 seqNo 2:2 2025-06-25T14:27:49.621549Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037889 2025-06-25T14:27:49.632185Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T14:27:49.632328Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2025-06-25T14:27:49.644553Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-25T14:27:49.644722Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-25T14:27:49.645439Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037889 2025-06-25T14:27:49.645513Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037889 not sending time cast registration request in state WaitScheme 2025-06-25T14:27:49.826958Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [1:694:2573], serverId# [1:697:2576], sessionId# [0:0:0] 2025-06-25T14:27:49.827123Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet ... 28:13.327614Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037889 2025-06-25T14:28:13.327667Z node 4 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037889 2025-06-25T14:28:13.327763Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037889 at tablet 72075186224037889 send result to client [4:373:2367], exec latency: 0 ms, propose latency: 0 ms 2025-06-25T14:28:13.327851Z node 4 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037889 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-25T14:28:13.327963Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-25T14:28:13.328860Z node 4 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-06-25T14:28:13.328924Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:28:13.331190Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:28:13.331237Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T14:28:13.331278Z node 4 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-25T14:28:13.331359Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [4:373:2367], exec latency: 0 ms, propose latency: 0 ms 2025-06-25T14:28:13.331410Z node 4 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-25T14:28:13.331484Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:28:13.331580Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037890 time 0 2025-06-25T14:28:13.331614Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037890 2025-06-25T14:28:13.332192Z node 4 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037890 step# 1000} 2025-06-25T14:28:13.332249Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037890 2025-06-25T14:28:13.335142Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T14:28:13.335247Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2025-06-25T14:28:13.335302Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037890 2025-06-25T14:28:13.335574Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037890 2025-06-25T14:28:13.335617Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037890 2025-06-25T14:28:13.335658Z node 4 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037890 2025-06-25T14:28:13.335719Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037890 at tablet 72075186224037890 send result to client [4:373:2367], exec latency: 0 ms, propose latency: 0 ms 2025-06-25T14:28:13.335770Z node 4 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037890 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-25T14:28:13.335841Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037890 2025-06-25T14:28:13.345310Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037889 coordinator 72057594046316545 last step 0 next step 1000 2025-06-25T14:28:13.346100Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037889 state Ready 2025-06-25T14:28:13.346197Z node 4 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037889 Got TEvSchemaChangedResult from SS at 72075186224037889 2025-06-25T14:28:13.347323Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-25T14:28:13.347687Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037890 coordinator 72057594046316545 last step 0 next step 1000 2025-06-25T14:28:13.347894Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-25T14:28:13.347946Z node 4 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-25T14:28:13.351348Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037890 state Ready 2025-06-25T14:28:13.351415Z node 4 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037890 Got TEvSchemaChangedResult from SS at 72075186224037890 2025-06-25T14:28:13.399851Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:791:2650], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:13.399969Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:802:2655], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:13.400055Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:13.406192Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:28:13.412414Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T14:28:13.412564Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2025-06-25T14:28:13.412624Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037890 2025-06-25T14:28:13.511119Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:28:13.668733Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T14:28:13.668901Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2025-06-25T14:28:13.668968Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037890 2025-06-25T14:28:13.677540Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:805:2658], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:28:13.716087Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:877:2699] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 10], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:28:13.811914Z node 4 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715660. Ctx: { TraceId: 01jykqtgen6xwvv8a5xzwm1a8y, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=ZjEyNGEzNzgtOWNiMjZjZDItYzg1ZTIxNTQtNDAxNDY0Y2E=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:13.814518Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [4:982:2742], serverId# [4:983:2743], sessionId# [0:0:0] 2025-06-25T14:28:13.814905Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:251: Executing write operation for [0:2] at 72075186224037889 2025-06-25T14:28:13.815197Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:874: PersistChangeRecord: record: { Order: 1 Group: 1750861693815080 Step: 1500 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] Kind: AsyncIndex Source: Unspecified Body: 38b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037889 2025-06-25T14:28:13.815373Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:874: PersistChangeRecord: record: { Order: 2 Group: 1750861693815080 Step: 1500 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 5] Kind: AsyncIndex Source: Unspecified Body: 42b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037889 2025-06-25T14:28:13.815499Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:416: Executed write operation for [0:2] at 72075186224037889, row count=1 2025-06-25T14:28:13.828365Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1170: EnqueueChangeRecords: at tablet: 72075186224037889, records: { Order: 1 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] BodySize: 38 TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 }, { Order: 2 PathId: [OwnerId: 72057594046644480, LocalPathId: 5] BodySize: 42 TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 } 2025-06-25T14:28:13.828474Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-25T14:28:13.862726Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [4:989:2748], serverId# [4:990:2749], sessionId# [0:0:0] 2025-06-25T14:28:13.870459Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [4:991:2750], serverId# [4:992:2751], sessionId# [0:0:0] >> CdcStreamChangeCollector::OldImage [GOOD] >> CdcStreamChangeCollector::SchemaChanges >> TSchemeshardBorrowedCompactionTest::SchemeshardShouldCompactBorrowedBeforeSplit >> TSchemeShardViewTest::EmptyQueryText |75.2%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/integration/sessions_pool/gtest >> YdbSdkSessionsPool::PeriodicTask/1 [GOOD] >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendTestConnectionWithServiceAccount [GOOD] >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendCreateBinding >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_visibility_to_zero_works[tables_format_v1-fifo] [GOOD] >> TOlapNaming::AlterColumnTableOk [GOOD] >> TOlapNaming::AlterColumnTableFailed >> TSchemeShardViewTest::AsyncDropSameView |75.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_external_blobs/unittest >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendCreateBinding [GOOD] >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendListBindings >> TSchemeShardViewTest::EmptyQueryText [GOOD] >> TPersQueueNewSchemeCacheTest::TestReadAtTimestamp_3 [GOOD] >> TPersQueueNewSchemeCacheTest::TestReadAtTimestamp_10 |75.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/ut_aggregation/unittest >> YdbProxy::MakeDirectory >> TSchemeShardViewTest::AsyncDropSameView [GOOD] >> TSchemeShardMoveTest::Chain ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_view/unittest >> TSchemeShardViewTest::EmptyQueryText [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:28:16.826620Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:28:16.826712Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:28:16.826751Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:28:16.826782Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:28:16.826822Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:28:16.826849Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:28:16.826899Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:28:16.826961Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:28:16.827639Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:28:16.827943Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:28:16.903264Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:28:16.903331Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:28:16.917758Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:28:16.920694Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:28:16.920832Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:28:16.930533Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:28:16.930870Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:28:16.931454Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:28:16.932493Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:28:16.936148Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:28:16.936342Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:28:16.937568Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:28:16.937629Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:28:16.937676Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:28:16.937718Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:28:16.937757Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:28:16.937975Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:28:16.944724Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:28:17.071726Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:28:17.071957Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:17.072210Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:28:17.072262Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:28:17.072498Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:28:17.072572Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:28:17.075941Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:28:17.076163Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:28:17.076394Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:17.076464Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:28:17.076511Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:28:17.076577Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:28:17.079212Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:17.079269Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:28:17.079312Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:28:17.081493Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:17.081548Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:17.081604Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:28:17.081683Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:28:17.085360Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:28:17.087665Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:28:17.087883Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:28:17.088894Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:28:17.089030Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:28:17.089090Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:28:17.089404Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:28:17.089457Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:28:17.089623Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:28:17.089729Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:28:17.092329Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:28:17.092375Z node 1 :FLAT_TX_SCHEMESHARD ... : actor# [1:274:2263] Become StateWork (SchemeCache [1:279:2268]) 2025-06-25T14:28:17.123220Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateView CreateView { Name: "MyView" QueryText: "" } } TxId: 101 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:28:17.123533Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_view.cpp:118: [72057594046678944] TCreateView Propose, path: /MyRoot/MyView, opId: 101:0 2025-06-25T14:28:17.123608Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_view.cpp:124: [72057594046678944] TCreateView Propose, path: /MyRoot/MyView, opId: 101:0, viewDescription: Name: "MyView" QueryText: "" 2025-06-25T14:28:17.123746Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:347: AttachChild: child attached as only one child to the parent, parent id: [OwnerId: 72057594046678944, LocalPathId: 1], parent name: MyRoot, child name: MyView, child id: [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-25T14:28:17.123805Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 0 2025-06-25T14:28:17.123834Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 101:0 type: TxCreateView target path: [OwnerId: 72057594046678944, LocalPathId: 2] source path: 2025-06-25T14:28:17.123884Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 101:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:28:17.125542Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:274:2263] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-25T14:28:17.140955Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 101, response: Status: StatusAccepted TxId: 101 SchemeshardId: 72057594046678944 PathId: 2, at schemeshard: 72057594046678944 2025-06-25T14:28:17.141246Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 101, database: /MyRoot, subject: , status: StatusAccepted, operation: CREATE VIEW, path: /MyRoot/MyView 2025-06-25T14:28:17.141502Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:28:17.141555Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_view.cpp:30: [72057594046678944] TCreateView::TPropose, opId: 101:0 ProgressState 2025-06-25T14:28:17.141607Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 101 ready parts: 1/1 2025-06-25T14:28:17.141726Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 101 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:28:17.142610Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 2025-06-25T14:28:17.145147Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 101:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:101 msg type: 269090816 2025-06-25T14:28:17.145371Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 101, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 101 at step: 5000002 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000001 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 101 at step: 5000002 2025-06-25T14:28:17.145801Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000002, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:28:17.145923Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 101 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000002 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:28:17.145979Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_view.cpp:45: [72057594046678944] TCreateView::TPropose, opId: 101:0 HandleReply TEvPrivate::TEvOperationPlan, step: 5000002 2025-06-25T14:28:17.146146Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 101:0 128 -> 240 2025-06-25T14:28:17.146337Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:28:17.146418Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 FAKE_COORDINATOR: Erasing txId 101 2025-06-25T14:28:17.152888Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:28:17.152952Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 101, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:28:17.153152Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 101, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-25T14:28:17.153309Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:28:17.153363Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 101, path id: 1 2025-06-25T14:28:17.153432Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 101, path id: 2 2025-06-25T14:28:17.153775Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:28:17.153824Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 101:0 ProgressState 2025-06-25T14:28:17.153924Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#101:0 progress is 1/1 2025-06-25T14:28:17.153950Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-25T14:28:17.153980Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#101:0 progress is 1/1 2025-06-25T14:28:17.154024Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-25T14:28:17.154065Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 101, ready parts: 1/1, is published: false 2025-06-25T14:28:17.154109Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-25T14:28:17.154155Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 101:0 2025-06-25T14:28:17.154179Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 101:0 2025-06-25T14:28:17.154243Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-25T14:28:17.154268Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 101, publications: 2, subscribers: 0 2025-06-25T14:28:17.154290Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 1], 4 2025-06-25T14:28:17.154310Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 2], 2 2025-06-25T14:28:17.155034Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 4 PathOwnerId: 72057594046678944, cookie: 101 2025-06-25T14:28:17.155122Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 4 PathOwnerId: 72057594046678944, cookie: 101 2025-06-25T14:28:17.155150Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 101 2025-06-25T14:28:17.155178Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 4 2025-06-25T14:28:17.155209Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-25T14:28:17.155826Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 2 PathOwnerId: 72057594046678944, cookie: 101 2025-06-25T14:28:17.155896Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 2 PathOwnerId: 72057594046678944, cookie: 101 2025-06-25T14:28:17.155927Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 101 2025-06-25T14:28:17.155955Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 2 2025-06-25T14:28:17.155975Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-25T14:28:17.156035Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 101, subscribers: 0 2025-06-25T14:28:17.159732Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-25T14:28:17.159869Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 TestModificationResult got TxId: 101, wait until txId: 101 >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendListBindings [GOOD] >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendDescribeBinding >> Cdc::KeysOnlyLogDebezium >> KqpYql::InsertCV+useSink [GOOD] >> KqpYql::InsertCV-useSink |75.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_monitoring/unittest >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_send_and_read_multiple_messages[tables_format_v1] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_view/unittest >> TSchemeShardViewTest::AsyncDropSameView [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:28:17.315421Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:28:17.315530Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:28:17.315599Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:28:17.315636Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:28:17.315681Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:28:17.315714Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:28:17.315772Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:28:17.315847Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:28:17.316901Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:28:17.317267Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:28:17.406824Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:28:17.406885Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:28:17.443650Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:28:17.444091Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:28:17.444244Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:28:17.451586Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:28:17.451828Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:28:17.452343Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:28:17.452593Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:28:17.458084Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:28:17.458345Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:28:17.459514Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:28:17.459583Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:28:17.459736Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:28:17.459783Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:28:17.459829Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:28:17.459938Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:28:17.490498Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:28:17.693368Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:28:17.693576Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:17.693767Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:28:17.693814Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:28:17.694033Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:28:17.694105Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:28:17.703884Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:28:17.704091Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:28:17.704273Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:17.704378Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:28:17.704433Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:28:17.704499Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:28:17.706631Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:17.706707Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:28:17.706759Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:28:17.708700Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:17.708751Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:17.708801Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:28:17.708858Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:28:17.712250Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:28:17.714456Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:28:17.714648Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:28:17.715472Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:28:17.715603Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:28:17.715654Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:28:17.715933Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:28:17.715987Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:28:17.716157Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:28:17.716257Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:28:17.718322Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:28:17.718364Z node 1 :FLAT_TX_SCHEMESHARD ... th for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:28:17.784299Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 FAKE_COORDINATOR: Erasing txId 102 2025-06-25T14:28:17.786247Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:28:17.786287Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:28:17.786435Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-25T14:28:17.786563Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:28:17.786597Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 102, path id: 1 2025-06-25T14:28:17.786659Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 102, path id: 2 2025-06-25T14:28:17.786928Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-25T14:28:17.786969Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 102:0 ProgressState 2025-06-25T14:28:17.787064Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:0 progress is 1/1 2025-06-25T14:28:17.787099Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-25T14:28:17.787156Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:0 progress is 1/1 2025-06-25T14:28:17.787189Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-25T14:28:17.787217Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: false 2025-06-25T14:28:17.787250Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-25T14:28:17.787282Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 102:0 2025-06-25T14:28:17.787315Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 102:0 2025-06-25T14:28:17.787388Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-25T14:28:17.787420Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 102, publications: 2, subscribers: 0 2025-06-25T14:28:17.787447Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 102, [OwnerId: 72057594046678944, LocalPathId: 1], 5 2025-06-25T14:28:17.787474Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 102, [OwnerId: 72057594046678944, LocalPathId: 2], 18446744073709551615 2025-06-25T14:28:17.788183Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T14:28:17.788294Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T14:28:17.788360Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 102 2025-06-25T14:28:17.788404Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 5 2025-06-25T14:28:17.788438Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-25T14:28:17.796167Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T14:28:17.796326Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T14:28:17.796371Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 102 2025-06-25T14:28:17.796403Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 18446744073709551615 2025-06-25T14:28:17.796434Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-25T14:28:17.796539Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 102, subscribers: 0 2025-06-25T14:28:17.796863Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-25T14:28:17.796924Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-25T14:28:17.796997Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:28:17.799984Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-25T14:28:17.801636Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-25T14:28:17.801750Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 102, wait until txId: 102 TestModificationResults wait txId: 103 TestModificationResult got TxId: 103, wait until txId: 103 TestModificationResults wait txId: 104 TestModificationResult got TxId: 104, wait until txId: 104 TestWaitNotification wait txId: 102 2025-06-25T14:28:17.802088Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-06-25T14:28:17.802137Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 TestWaitNotification wait txId: 103 2025-06-25T14:28:17.802218Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 103: send EvNotifyTxCompletion 2025-06-25T14:28:17.802237Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 103 TestWaitNotification wait txId: 104 2025-06-25T14:28:17.802279Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 104: send EvNotifyTxCompletion 2025-06-25T14:28:17.802297Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 104 2025-06-25T14:28:17.802813Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-06-25T14:28:17.802935Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-25T14:28:17.802968Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:334:2323] 2025-06-25T14:28:17.803158Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 103, at schemeshard: 72057594046678944 2025-06-25T14:28:17.803209Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-06-25T14:28:17.803228Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [1:334:2323] 2025-06-25T14:28:17.803276Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 104, at schemeshard: 72057594046678944 2025-06-25T14:28:17.803354Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 104: got EvNotifyTxCompletionResult 2025-06-25T14:28:17.803372Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 104: satisfy waiter [1:334:2323] TestWaitNotification: OK eventTxId 102 TestWaitNotification: OK eventTxId 103 TestWaitNotification: OK eventTxId 104 2025-06-25T14:28:17.803874Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/MyView" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:28:17.804051Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/MyView" took 191us result status StatusPathDoesNotExist 2025-06-25T14:28:17.804207Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/MyView\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/MyView" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_validates_deduplication_id[tables_format_v0] >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendDescribeBinding [GOOD] >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendModifyBinding >> StoragePool::TestDistributionExactMin [GOOD] >> StoragePool::TestDistributionExactMinWithOverflow >> StoragePool::TestDistributionExactMinWithOverflow [GOOD] >> StoragePool::TestDistributionRandomMin7p >> TMonitoringTests::InvalidActorId >> TTxDataShardMiniKQL::WriteEraseRead >> TTxDataShardMiniKQL::Write |75.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/security/certificate_check/ut/ydb-core-security-certificate_check-ut |75.2%| [LD] {RESULT} $(B)/ydb/core/security/certificate_check/ut/ydb-core-security-certificate_check-ut |75.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/security/certificate_check/ut/ydb-core-security-certificate_check-ut >> TTxDataShardMiniKQL::ReadSpecialColumns >> TTxDataShardMiniKQL::CrossShard_1_Cycle |75.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_monitoring/unittest >> TMonitoringTests::InvalidActorId [GOOD] >> THiveTest::TestHiveBalancerWithFollowers [GOOD] >> THiveTest::TestHiveBalancerWithLimit >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_validates_deduplication_id[tables_format_v0] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_validates_deduplication_id[tables_format_v1] >> KqpScripting::Pure [GOOD] >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendModifyBinding [GOOD] >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendDeleteBinding >> TMonitoringTests::ValidActorId >> TSchemeShardMoveTest::Chain [GOOD] >> TSchemeShardMoveTest::Index |75.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_monitoring/unittest |75.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_monitoring/unittest >> TMonitoringTests::InvalidActorId [GOOD] >> TMonitoringTests::ValidActorId [GOOD] >> TControlPlaneProxyCheckPermissionsFailed::ShouldSendDeleteBinding [GOOD] >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendCreateQuery >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_validates_deduplication_id[tables_format_v1] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/yql/unittest >> KqpScripting::Pure [GOOD] Test command err: Trying to start YDB, gRPC: 24297, MsgBus: 9453 2025-06-25T14:28:06.813984Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519893685235669707:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:28:06.814028Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000e18/r3tmp/tmpiEwkIG/pdisk_1.dat 2025-06-25T14:28:07.114400Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 24297, node 1 2025-06-25T14:28:07.181192Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:28:07.181218Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:28:07.181231Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:28:07.181416Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:28:07.188360Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:28:07.188503Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:28:07.192450Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:9453 TClient is connected to server localhost:9453 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:28:07.746812Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-25T14:28:07.765486Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:07.824778Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:28:07.918574Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:28:08.071002Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:28:08.132212Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:28:09.702210Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519893698120573199:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:09.702342Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:10.047667Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:10.136334Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:10.227478Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:10.318296Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:10.410031Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:10.467972Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:10.560578Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:10.669514Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519893702415541164:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:10.669622Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:10.669949Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519893702415541169:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:10.678906Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:28:10.703577Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519893702415541171:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:28:10.786007Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519893702415541224:3421] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:28:11.816602Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519893685235669707:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:28:11.816668Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:28:13.205577Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750861693245, txId: 281474976710674] shutting down Trying to start YDB, gRPC: 27438, MsgBus: 19633 2025-06-25T14:28:14.139578Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519893717825772657:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:28:14.139674Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000e18/r3tmp/tmpGW0ojO/pdisk_1.dat 2025-06-25T14:28:14.389562Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:28:14.391328Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519893717825772637:2080] 1750861694138798 != 1750861694138801 2025-06-25T14:28:14.398389Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:28:14.398472Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:28:14.400211Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 27438, node 2 2025-06-25T14:28:14.444018Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:28:14.444044Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:28:14.444066Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:28:14.444174Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19633 TClient is connected to server localhost:19633 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-25T14:28:14.891706Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:28:14.954084Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:28:15.060727Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:28:15.178782Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:28:15.281899Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:28:15.416974Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:28:17.516454Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519893730710676153:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:17.516574Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:17.582024Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:17.621800Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:17.657337Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:17.717727Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:17.774290Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:17.846879Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:17.918176Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:18.018663Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519893735005644110:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:18.018808Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:18.019457Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519893735005644115:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:18.022989Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:28:18.033277Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519893735005644117:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:28:18.115936Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519893735005644168:3412] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:28:19.142771Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519893717825772657:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:28:19.142865Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; |75.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_monitoring/unittest >> TMonitoringTests::ValidActorId [GOOD] |75.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_monitoring/unittest >> TSchemeShardMoveTest::Index [GOOD] >> TGRpcRateLimiterTest::AcquireResourceManyUsedGrpcApi [GOOD] >> TGRpcRateLimiterTest::AcquireResourceManyUsedActorApi >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_visibility_timeout_expires_on_wait_timeout[tables_format_v1] [GOOD] >> KqpYql::PgIntPrimaryKey [GOOD] >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendCreateQuery [GOOD] >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendListQueries >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_visibility_timeout_works[tables_format_v0] >> YdbProxy::MakeDirectory [GOOD] >> YdbProxy::OAuthToken |75.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_monitoring/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_move/unittest >> TSchemeShardMoveTest::Index [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:28:18.590848Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:28:18.590981Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:28:18.591029Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:28:18.591076Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:28:18.591127Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:28:18.591153Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:28:18.591203Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:28:18.591268Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:28:18.592022Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:28:18.592740Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:28:18.676290Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:28:18.676372Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:28:18.699563Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:28:18.699949Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:28:18.700140Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:28:18.706786Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:28:18.707159Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:28:18.707817Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:28:18.708113Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:28:18.716706Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:28:18.716937Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:28:18.718256Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:28:18.718325Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:28:18.718503Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:28:18.718556Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:28:18.718609Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:28:18.718700Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:28:18.724165Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:28:18.858603Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:28:18.858861Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:18.859095Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:28:18.859142Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:28:18.859446Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:28:18.859537Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:28:18.874616Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:28:18.874832Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:28:18.875052Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:18.875106Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:28:18.875150Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:28:18.875205Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:28:18.883112Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:18.883187Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:28:18.883250Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:28:18.891752Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:18.891839Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:18.891926Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:28:18.892023Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:28:18.895637Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:28:18.907676Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:28:18.907893Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:28:18.908949Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:28:18.909091Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:28:18.909136Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:28:18.909457Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:28:18.909520Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:28:18.909702Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:28:18.909790Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:28:18.917445Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:28:18.917505Z node 1 :FLAT_TX_SCHEMESHARD ... 57594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 5 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:28:21.533818Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/TableMove/Sync" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-25T14:28:21.534030Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/TableMove/Sync" took 219us result status StatusSuccess 2025-06-25T14:28:21.534593Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/TableMove/Sync" PathDescription { Self { Name: "Sync" PathId: 10 SchemeshardId: 72057594046678944 PathType: EPathTypeTableIndex CreateFinished: true CreateTxId: 102 CreateStep: 5000003 ParentPathId: 7 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableIndexVersion: 2 } ChildrenExist: true } Children { Name: "indexImplTable" PathId: 11 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 102 CreateStep: 5000003 ParentPathId: 10 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" PathSubType: EPathSubTypeSyncIndexImplTable Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 2 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 5 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } TableIndex { Name: "Sync" LocalPathId: 10 Type: EIndexTypeGlobal State: EIndexStateReady KeyColumnNames: "value0" SchemaVersion: 2 PathOwnerId: 72057594046678944 DataSize: 0 IndexImplTableDescriptions { Columns { Name: "value0" Type: "Utf8" TypeId: 4608 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "value0" KeyColumnNames: "key" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } } } } PathId: 10 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:28:21.535123Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/TableMove/Async" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-25T14:28:21.535280Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/TableMove/Async" took 174us result status StatusSuccess 2025-06-25T14:28:21.535718Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/TableMove/Async" PathDescription { Self { Name: "Async" PathId: 8 SchemeshardId: 72057594046678944 PathType: EPathTypeTableIndex CreateFinished: true CreateTxId: 102 CreateStep: 5000003 ParentPathId: 7 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableIndexVersion: 2 } ChildrenExist: true } Children { Name: "indexImplTable" PathId: 9 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 102 CreateStep: 5000003 ParentPathId: 8 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" PathSubType: EPathSubTypeAsyncIndexImplTable Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 2 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 5 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } TableIndex { Name: "Async" LocalPathId: 8 Type: EIndexTypeGlobalAsync State: EIndexStateReady KeyColumnNames: "value1" SchemaVersion: 2 PathOwnerId: 72057594046678944 DataSize: 0 IndexImplTableDescriptions { Columns { Name: "value1" Type: "Utf8" TypeId: 4608 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "value1" KeyColumnNames: "key" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } } } } PathId: 8 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 |75.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_monitoring/unittest >> CdcStreamChangeCollector::SchemaChanges [GOOD] >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendListQueries [GOOD] >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendDescribeQuery >> Cdc::KeysOnlyLogDebezium [GOOD] >> Cdc::DocApi[PqRunner] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_visibility_batch_works[tables_format_v0-fifo] [GOOD] |75.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_monitoring/unittest >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_visibility_batch_works[tables_format_v0-std] |75.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_monitoring/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/yql/unittest >> KqpYql::PgIntPrimaryKey [GOOD] Test command err: Trying to start YDB, gRPC: 11056, MsgBus: 28355 2025-06-25T14:28:08.962613Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519893692246910435:2227];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:28:08.968693Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000de9/r3tmp/tmpa9BkHa/pdisk_1.dat 2025-06-25T14:28:09.655172Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:28:09.659002Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519893692246910235:2080] 1750861688924031 != 1750861688924034 2025-06-25T14:28:09.667887Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:28:09.668002Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:28:09.681310Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 11056, node 1 2025-06-25T14:28:09.852869Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:28:09.852894Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:28:09.852900Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:28:09.853005Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:28:09.960450Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:28355 TClient is connected to server localhost:28355 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:28:10.488064Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:28:10.517043Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:28:10.541676Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:28:10.671584Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:28:10.840150Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:28:10.911918Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:28:13.031278Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519893713721748379:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:13.031411Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:13.462119Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:13.504096Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:13.585214Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:13.625335Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:13.680768Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:13.750974Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:13.831842Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:13.938033Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519893713721749047:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:13.938125Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:13.938361Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519893713721749052:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:13.942556Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:28:13.955479Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519893713721749054:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:28:13.959119Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519893692246910435:2227];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:28:13.959203Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:28:14.056580Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519893718016716403:3427] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 }
: Error: Optimization, code: 1070
:4:20: Error: RefSelect mode isn't supported by provider: kikimr Trying to start YDB, gRPC: 32163, MsgBus: 13063 2025-06-25T14:28:15.965665Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519893720736287262:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:28:15.965716Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000de9/r3tmp/tmpS4KKQc/pdisk_1.dat 2025-06-25T14:28:16.222354Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:28:16.222436Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:28:16.227304Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:28:16.247369Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:28:16.248501Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519893720736287241:2080] 1750861695965017 != 1750861695965020 TServer::EnableGrpc on GrpcPort 32163, node 2 2025-06-25T14:28:16.449210Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:28:16.449234Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:28:16.449241Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:28:16.449352Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13063 2025-06-25T14:28:17.020454Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:13063 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:28:17.158354Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:28:17.165841Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:28:19.710504Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519893737916157062:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:19.710590Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:19.740587Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:19.805738Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519893737916157163:2301], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:19.805830Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:19.806130Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519893737916157168:2304], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:19.809724Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:28:19.821345Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519893737916157170:2305], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-25T14:28:19.901749Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519893737916157221:2392] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:28:20.968583Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519893720736287262:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:28:20.968668Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> THiveTest::TestHiveBalancerWithLimit [GOOD] >> THiveTest::TestHiveBalancerIgnoreTablet >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendDescribeQuery [GOOD] >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendGetQueryStatus >> Cdc::KeysOnlyLog[PqRunner] |75.3%| [TA] $(B)/ydb/core/tx/scheme_board/ut_monitoring/test-results/unittest/{meta.json ... results_accumulator.log} |75.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_locks/ydb-core-tx-datashard-ut_locks |75.3%| [TA] {RESULT} $(B)/ydb/core/tx/scheme_board/ut_monitoring/test-results/unittest/{meta.json ... results_accumulator.log} |75.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_locks/ydb-core-tx-datashard-ut_locks |75.3%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_locks/ydb-core-tx-datashard-ut_locks >> StoragePool::TestDistributionRandomMin7p [GOOD] >> StoragePool::TestDistributionRandomMin7pWithOverflow [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_change_collector/unittest >> CdcStreamChangeCollector::SchemaChanges [GOOD] Test command err: 2025-06-25T14:27:49.377672Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:27:49.377800Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:27:49.377853Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0013b3/r3tmp/tmp71cmYx/pdisk_1.dat 2025-06-25T14:27:49.737178Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T14:27:49.754104Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:27:49.835242Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:27:49.847677Z node 1 :TABLET_SAUSAGECACHE NOTICE: shared_sausagecache.cpp:1191: Update config MemoryLimit: 33554432 2025-06-25T14:27:49.848460Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750861664101859 != 1750861664101863 2025-06-25T14:27:49.896146Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:27:49.896288Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:27:49.908709Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:27:49.995900Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:27:50.057463Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:627:2531] 2025-06-25T14:27:50.057657Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T14:27:50.117898Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T14:27:50.118167Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T14:27:50.120048Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-25T14:27:50.120124Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-25T14:27:50.120173Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-25T14:27:50.121875Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T14:27:50.122074Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T14:27:50.122165Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:643:2531] in generation 1 2025-06-25T14:27:50.133071Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T14:27:50.182959Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-25T14:27:50.183205Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T14:27:50.183320Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:645:2541] 2025-06-25T14:27:50.183388Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T14:27:50.183442Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-25T14:27:50.183479Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:27:50.184038Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-25T14:27:50.184175Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-25T14:27:50.184264Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:27:50.184485Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:27:50.184536Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-25T14:27:50.184588Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:27:50.185052Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:623:2528], serverId# [1:634:2535], sessionId# [0:0:0] 2025-06-25T14:27:50.185245Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T14:27:50.185513Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-25T14:27:50.185610Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-25T14:27:50.187113Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T14:27:50.201095Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-25T14:27:50.201217Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-25T14:27:50.391173Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:660:2550], serverId# [1:662:2552], sessionId# [0:0:0] 2025-06-25T14:27:50.403443Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1000 at tablet 72075186224037888 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1000 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-06-25T14:27:50.403549Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:27:50.405016Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:27:50.405097Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-06-25T14:27:50.405154Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-06-25T14:27:50.405461Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-06-25T14:27:50.405654Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-25T14:27:50.406241Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:27:50.406310Z node 1 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-06-25T14:27:50.421853Z node 1 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-25T14:27:50.422368Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:27:50.424370Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-06-25T14:27:50.424444Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:27:50.425033Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-06-25T14:27:50.425120Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:27:50.426004Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:27:50.426063Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T14:27:50.426109Z node 1 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-25T14:27:50.426176Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [1:373:2367], exec latency: 0 ms, propose latency: 0 ms 2025-06-25T14:27:50.426228Z node 1 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-25T14:27:50.426321Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:27:50.442514Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T14:27:50.448558Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-25T14:27:50.448681Z node 1 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-25T14:27:50.449777Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-25T14:27:50.632079Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T14:27:50.632252Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715658 ssId 72057594046644480 seqNo 2:2 2025-06-25T14:27:5 ... ransaction::Execute at 72075186224037888 2025-06-25T14:28:21.197696Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1822: Add schema snapshot: pathId# [OwnerId: 72057594046644480, LocalPathId: 2], version# 2, step# 1500, txId# 281474976715658, at tablet# 72075186224037888 2025-06-25T14:28:21.198084Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:28:21.276969Z node 4 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1500} 2025-06-25T14:28:21.277084Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:28:21.277125Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:28:21.277178Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:28:21.277264Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1500 : 281474976715658] from 72075186224037888 at tablet 72075186224037888 send result to client [4:373:2367], exec latency: 0 ms, propose latency: 0 ms 2025-06-25T14:28:21.277329Z node 4 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715658 state Ready TxInFly 0 2025-06-25T14:28:21.277441Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:28:21.279551Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715658 datashard 72075186224037888 state Ready 2025-06-25T14:28:21.279640Z node 4 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-25T14:28:21.323850Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:839:2677], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:21.323985Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:848:2682], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:21.324072Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:21.329792Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:28:21.336259Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T14:28:21.535920Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T14:28:21.545972Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:853:2685], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-25T14:28:21.574335Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:909:2722] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:28:21.645263Z node 4 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jykqtr695mqhfhcbq59rpyxv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=NThkMGU3ZWItODE5ZDczMDUtOTk2YzNlNTktOGMyNWJhMTY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:21.648129Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [4:940:2739], serverId# [4:941:2740], sessionId# [0:0:0] 2025-06-25T14:28:21.648666Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:251: Executing write operation for [0:3] at 72075186224037888 2025-06-25T14:28:21.648906Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:874: PersistChangeRecord: record: { Order: 1 Group: 1750861701648795 Step: 2000 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] Kind: CdcDataChange Source: Unspecified Body: 32b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 2 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037888 2025-06-25T14:28:21.649099Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:416: Executed write operation for [0:3] at 72075186224037888, row count=1 2025-06-25T14:28:21.663916Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1170: EnqueueChangeRecords: at tablet: 72075186224037888, records: { Order: 1 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] BodySize: 32 TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 2 } 2025-06-25T14:28:21.664025Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:28:21.976648Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:171) 2025-06-25T14:28:21.979530Z node 4 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T14:28:21.979782Z node 4 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715662 ssId 72057594046644480 seqNo 2:3 2025-06-25T14:28:21.979857Z node 4 :TX_DATASHARD INFO: check_scheme_tx_unit.cpp:234: Check scheme tx, proposed scheme version# 3 current version# 2 expected version# 3 at tablet# 72075186224037888 txId# 281474976715662 2025-06-25T14:28:21.979909Z node 4 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715662 at tablet 72075186224037888 2025-06-25T14:28:21.991489Z node 4 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-25T14:28:22.114662Z node 4 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715662 at step 2500 at tablet 72075186224037888 { Transactions { TxId: 281474976715662 AckTo { RawX1: 0 RawX2: 0 } } Step: 2500 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-06-25T14:28:22.114748Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:28:22.114981Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:28:22.115037Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-06-25T14:28:22.115095Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [2500:281474976715662] in PlanQueue unit at 72075186224037888 2025-06-25T14:28:22.115427Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 2500:281474976715662 keys extracted: 0 2025-06-25T14:28:22.115590Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-25T14:28:22.115868Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:28:22.115993Z node 4 :TX_DATASHARD INFO: alter_table_unit.cpp:145: Trying to ALTER TABLE at 72075186224037888 version 3 2025-06-25T14:28:22.118548Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1822: Add schema snapshot: pathId# [OwnerId: 72057594046644480, LocalPathId: 2], version# 3, step# 2500, txId# 281474976715662, at tablet# 72075186224037888 2025-06-25T14:28:22.118739Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:874: PersistChangeRecord: record: { Order: 2 Group: 0 Step: 2500 TxId: 281474976715662 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] Kind: CdcSchemaChange Source: Unspecified Body: 0b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 3 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037888 2025-06-25T14:28:22.119192Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:28:22.122322Z node 4 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 2500} 2025-06-25T14:28:22.122425Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:28:22.123536Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:28:22.123614Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1170: EnqueueChangeRecords: at tablet: 72075186224037888, records: { Order: 2 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] BodySize: 0 TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 3 } 2025-06-25T14:28:22.123722Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [2500 : 281474976715662] from 72075186224037888 at tablet 72075186224037888 send result to client [4:373:2367], exec latency: 0 ms, propose latency: 0 ms 2025-06-25T14:28:22.123789Z node 4 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715662 state Ready TxInFly 0 2025-06-25T14:28:22.123978Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1170: EnqueueChangeRecords: at tablet: 72075186224037888, records: { Order: 2 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] BodySize: 0 TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 3 } 2025-06-25T14:28:22.124038Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:28:22.130757Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715662 datashard 72075186224037888 state Ready 2025-06-25T14:28:22.130865Z node 4 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-25T14:28:22.137287Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [4:983:2777], serverId# [4:984:2778], sessionId# [0:0:0] 2025-06-25T14:28:22.158218Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [4:986:2780], serverId# [4:987:2781], sessionId# [0:0:0] >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendGetQueryStatus [GOOD] >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendModifyQuery >> AsyncIndexChangeExchange::SenderShouldBeActivatedOnTableWoIndexes >> Cdc::UuidExchange[PqRunner] >> AggregateStatistics::ShouldBePings >> TPersQueueNewSchemeCacheTest::CheckGrpcReadNoDC [GOOD] >> TOlapNaming::AlterColumnTableFailed [GOOD] >> AggregateStatistics::ShouldBePings [GOOD] |75.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/security/certificate_check/ut/unittest |75.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/security/certificate_check/ut/unittest >> TCertificateAuthUtilsTest::ClientCertAuthorizationParamsMatch [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/ut_aggregation/unittest >> AggregateStatistics::ShouldBePings [GOOD] Test command err: 2025-06-25T14:28:24.544370Z node 1 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 1, Round: 1, current Round: 0 2025-06-25T14:28:24.549320Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 1, Round: 1, current Round: 0 2025-06-25T14:28:24.656951Z node 1 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 1 2025-06-25T14:28:24.657056Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 2 2025-06-25T14:28:24.657089Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 1 2025-06-25T14:28:24.657789Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:16:2056], server id = [0:0:0], tablet id = 1, status = ERROR 2025-06-25T14:28:24.657834Z node 1 :STATISTICS DEBUG: service_impl.cpp:1110: Skip EvClientConnected 2025-06-25T14:28:24.657896Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:19:2055], server id = [0:0:0], tablet id = 2, status = ERROR 2025-06-25T14:28:24.657918Z node 2 :STATISTICS DEBUG: service_impl.cpp:1110: Skip EvClientConnected 2025-06-25T14:28:24.658002Z node 1 :STATISTICS DEBUG: service_impl.cpp:448: Received TEvAggregateStatisticsResponse SenderNodeId: 2 2025-06-25T14:28:24.658042Z node 1 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 1 >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendModifyQuery [GOOD] >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendDeleteQuery ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/hive/ut/unittest >> StoragePool::TestDistributionRandomMin7pWithOverflow [GOOD] Test command err: Took 9.575994 seconds >> VDiskBalancing::TestRandom_Mirror3dc [GOOD] |75.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/security/certificate_check/ut/unittest >> TCertificateAuthUtilsTest::ClientCertAuthorizationParamsMatch [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_olap/unittest >> TOlapNaming::AlterColumnTableFailed [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:28:07.312782Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:28:07.312893Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:28:07.312932Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:28:07.312963Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:28:07.313013Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:28:07.313044Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:28:07.313111Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:28:07.313168Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:28:07.313899Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:28:07.314648Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:28:07.402457Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:28:07.402545Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:28:07.419698Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:28:07.420093Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:28:07.420261Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:28:07.425551Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:28:07.425843Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:28:07.426425Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:28:07.426658Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:28:07.430185Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:28:07.430377Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:28:07.431402Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:28:07.431450Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:28:07.431600Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:28:07.431644Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:28:07.431680Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:28:07.431757Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:28:07.439365Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:28:07.657691Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:28:07.657929Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:07.658147Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:28:07.658196Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:28:07.658433Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:28:07.658503Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:28:07.661645Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:28:07.661845Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:28:07.662065Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:07.662119Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:28:07.662153Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:28:07.662199Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:28:07.667989Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:07.668068Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:28:07.668116Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:28:07.673934Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:07.673996Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:07.674049Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:28:07.674111Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:28:07.678057Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:28:07.680473Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:28:07.680664Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:28:07.681625Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:28:07.681763Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:28:07.681809Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:28:07.682068Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:28:07.682115Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:28:07.682269Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:28:07.682328Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:28:07.684868Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:28:07.684909Z node 1 :FLAT_TX_SCHEMESHARD ... 2025-06-25T14:28:23.930773Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:28:23.930834Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:28:23.930892Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:28:23.930950Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:28:23.931013Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:28:23.931081Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:28:23.935901Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:28:23.936073Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:28:23.936165Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:28:23.936316Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:28:23.936412Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:28:23.936497Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:28:23.936577Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:28:23.936661Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:28:23.940527Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:28:23.940936Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:28:23.941049Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:28:23.941261Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:28:23.941348Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:28:23.941456Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:28:23.941529Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:28:23.941613Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:28:23.942365Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:28:23.942453Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:28:23.942514Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:28:23.942574Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:28:23.942640Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:28:23.942700Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:28:23.942758Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:28:23.942835Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:28:23.942888Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:28:23.942963Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:28:23.943007Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 101:0 ProgressState 2025-06-25T14:28:23.943143Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#101:0 progress is 1/1 2025-06-25T14:28:23.943175Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-25T14:28:23.943216Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#101:0 progress is 1/1 2025-06-25T14:28:23.943251Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-25T14:28:23.943288Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 101, ready parts: 1/1, is published: true 2025-06-25T14:28:23.943368Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1656: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [2:2710:3909] message: TxId: 101 2025-06-25T14:28:23.943420Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-25T14:28:23.943483Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 101:0 2025-06-25T14:28:23.943514Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 101:0 2025-06-25T14:28:23.944703Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 66 2025-06-25T14:28:23.948071Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-25T14:28:23.948138Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [2:2711:3910] TestWaitNotification: OK eventTxId 101 TestModificationResults wait txId: 102 2025-06-25T14:28:23.950996Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpAlterColumnTable AlterColumnTable { Name: "TestTable" AlterSchema { AddColumns { Name: "New Column" Type: "Int32" } } } } TxId: 102 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:28:23.951214Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: alter_table.cpp:282: TAlterColumnTable Propose, path: /MyRoot/TestTable, opId: 102:0, at schemeshard: 72057594046678944 2025-06-25T14:28:23.951467Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 102:1, propose status:StatusSchemeError, reason: update parse error: Invalid name for column 'New Column'. in alter constructor STANDALONE_UPDATE, at schemeshard: 72057594046678944 2025-06-25T14:28:23.954520Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 102, response: Status: StatusSchemeError Reason: "update parse error: Invalid name for column \'New Column\'. in alter constructor STANDALONE_UPDATE" TxId: 102 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:28:23.954751Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 102, database: /MyRoot, subject: , status: StatusSchemeError, reason: update parse error: Invalid name for column 'New Column'. in alter constructor STANDALONE_UPDATE, operation: ALTER COLUMN TABLE, path: /MyRoot/TestTable TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 102 2025-06-25T14:28:23.955075Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-06-25T14:28:23.955119Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 2025-06-25T14:28:23.955527Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-06-25T14:28:23.955633Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-25T14:28:23.955668Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [2:3511:4637] TestWaitNotification: OK eventTxId 102 >> KqpYql::InsertCV-useSink [GOOD] |75.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/security/certificate_check/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/services/persqueue_v1/ut/new_schemecache_ut/unittest >> TPersQueueNewSchemeCacheTest::CheckGrpcReadNoDC [GOOD] Test command err: 2025-06-25T14:27:53.285176Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519893625624745507:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:27:53.285210Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:27:53.274260Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519893625521767733:2164];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:27:53.274322Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:27:53.652491Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-25T14:27:53.662114Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00193c/r3tmp/tmpzPZeuM/pdisk_1.dat 2025-06-25T14:27:53.943680Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:27:53.961010Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:27:53.961094Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:27:53.968186Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 14633, node 1 2025-06-25T14:27:54.043280Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:27:54.044119Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:27:54.078294Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T14:27:54.091354Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:27:54.133455Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/yft8/00193c/r3tmp/yandexxSwsjK.tmp 2025-06-25T14:27:54.133484Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/yft8/00193c/r3tmp/yandexxSwsjK.tmp 2025-06-25T14:27:54.133652Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/yft8/00193c/r3tmp/yandexxSwsjK.tmp 2025-06-25T14:27:54.133842Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:27:54.237685Z INFO: TTestServer started on Port 30537 GrpcPort 14633 2025-06-25T14:27:54.380089Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:27:54.385513Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:30537 PQClient connected to localhost:14633 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:27:54.910611Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-25T14:27:55.034370Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... waiting... 2025-06-25T14:27:57.930137Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519893642701637875:2300], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:27:57.930291Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:27:57.930687Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519893642701637887:2303], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:27:57.935100Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:27:57.990791Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519893642701637889:2304], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710662 completed, doublechecking } 2025-06-25T14:27:58.248471Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519893646996605272:2764] txid# 281474976710663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:27:58.288553Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519893625624745507:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:27:58.288704Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:27:58.302296Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519893625521767733:2164];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:27:58.302339Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:27:58.362185Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:27:58.466080Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519893646996605285:2311], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:27:58.466309Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=NmUwNmVhNTctNTQzMjVlYmMtMzkxNDMzZGEtNjBjMTQwOTU=, ActorId: [1:7519893642701637873:2299], ActorState: ExecuteState, TraceId: 01jykqt1b2bwm80t4dj8xtzq1x, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:27:58.468227Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-25T14:27:58.494587Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:27:58.689805Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); 2025-06-25T14:28:00.419734Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710667. Ctx: { TraceId: 01jykqt26qb6zwz5t3db53xxa8, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NWMwZmYyMmQtYzE2MjJlZGQtMTg3NDFlYjQtOWU3YjI0MmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root === CheckClustersList. Subcribe to ClusterTracker from [1:7519893655586540386:3134] === CheckClustersList. Ok WaitRootIsUp 'Root'... TClient::Ls request: ... ikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /Root/PQ/Config/V2/Cluster PathId: Partial: 0 } 2025-06-25T14:28:23.304277Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [3:7519893696669943182:2129], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /Root/PQ/Config/V2/Cluster PathId: Partial: 0 }, by path# { Subscriber: { Subscriber: [3:7519893713849813314:2757] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 20 } Filled: 1 Status: StatusSuccess Kind: 3 TableKind: 1 Created: 1 CreateStep: 1750861693945 PathId: [OwnerId: 72057594046644480, LocalPathId: 10] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 1 }, by pathId# nullptr 2025-06-25T14:28:23.304341Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7519893696669943182:2129], cacheItem# { Subscriber: { Subscriber: [3:7519893713849813314:2757] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 20 } Filled: 1 Status: StatusSuccess Kind: 3 TableKind: 1 Created: 1 CreateStep: 1750861693945 PathId: [OwnerId: 72057594046644480, LocalPathId: 10] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 1 }, entry# { Path: Root/PQ/Config/V2/Cluster TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpUnknown RedirectRequired: true ShowPrivatePath: false SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 20 IsSync: true Partial: 0 } 2025-06-25T14:28:23.304396Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [3:7519893696669943182:2129], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /Root/PQ/Config/V2/Versions PathId: Partial: 0 } 2025-06-25T14:28:23.304456Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [3:7519893696669943182:2129], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /Root/PQ/Config/V2/Versions PathId: Partial: 0 }, by path# { Subscriber: { Subscriber: [3:7519893713849813429:2826] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 20 } Filled: 1 Status: StatusSuccess Kind: 3 TableKind: 1 Created: 1 CreateStep: 1750861694274 PathId: [OwnerId: 72057594046644480, LocalPathId: 12] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 1 }, by pathId# nullptr 2025-06-25T14:28:23.304505Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7519893696669943182:2129], cacheItem# { Subscriber: { Subscriber: [3:7519893713849813429:2826] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 20 } Filled: 1 Status: StatusSuccess Kind: 3 TableKind: 1 Created: 1 CreateStep: 1750861694274 PathId: [OwnerId: 72057594046644480, LocalPathId: 12] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 1 }, entry# { Path: Root/PQ/Config/V2/Versions TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpUnknown RedirectRequired: true ShowPrivatePath: false SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 20 IsSync: true Partial: 0 } 2025-06-25T14:28:23.304692Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7519893756799489046:4834], recipient# [3:7519893756799489044:2575], result# { ErrorCount: 0 DatabaseName: /Root DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/PQ/Config/V2/Cluster TableId: [72057594046644480:10:1] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: false SyncVersion: true Status: Ok Kind: KindTable DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 2 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } },{ Path: Root/PQ/Config/V2/Cluster TableId: [72057594046644480:10:1] RequestType: ByPath Operation: OpUnknown RedirectRequired: true ShowPrivatePath: false SyncVersion: true Status: Ok Kind: KindTable DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 2 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-25T14:28:23.304712Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7519893756799489047:4835], recipient# [3:7519893756799489045:2576], result# { ErrorCount: 0 DatabaseName: /Root DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/PQ/Config/V2/Versions TableId: [72057594046644480:12:1] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: false SyncVersion: true Status: Ok Kind: KindTable DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 2 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } },{ Path: Root/PQ/Config/V2/Versions TableId: [72057594046644480:12:1] RequestType: ByPath Operation: OpUnknown RedirectRequired: true ShowPrivatePath: false SyncVersion: true Status: Ok Kind: KindTable DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 2 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-25T14:28:23.305539Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7519893696669943182:2129], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: TableId: [72057594046644480:10:0] RequestType: ByTableId Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:28:23.305623Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7519893696669943182:2129], cacheItem# { Subscriber: { Subscriber: [3:7519893713849813314:2757] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 20 } Filled: 1 Status: StatusSuccess Kind: 3 TableKind: 1 Created: 1 CreateStep: 1750861693945 PathId: [OwnerId: 72057594046644480, LocalPathId: 10] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 1 }, entry# { Path: TableId: [72057594046644480:10:0] RequestType: ByTableId Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:28:23.305700Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7519893696669943182:2129], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: TableId: [72057594046644480:12:0] RequestType: ByTableId Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:28:23.305744Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7519893696669943182:2129], cacheItem# { Subscriber: { Subscriber: [3:7519893713849813429:2826] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 20 } Filled: 1 Status: StatusSuccess Kind: 3 TableKind: 1 Created: 1 CreateStep: 1750861694274 PathId: [OwnerId: 72057594046644480, LocalPathId: 12] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 1 }, entry# { Path: TableId: [72057594046644480:12:0] RequestType: ByTableId Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:28:23.305882Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7519893756799489051:4837], recipient# [3:7519893696669943164:2248], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/PQ/Config/V2/Versions TableId: [72057594046644480:12:1] RequestType: ByTableId Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Ok Kind: KindTable DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 2 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-25T14:28:23.305885Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7519893756799489050:4836], recipient# [3:7519893696669943164:2248], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/PQ/Config/V2/Cluster TableId: [72057594046644480:10:1] RequestType: ByTableId Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Ok Kind: KindTable DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 2 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-25T14:28:23.307288Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7519893696669943182:2129], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:28:23.307360Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7519893696669943182:2129], cacheItem# { Subscriber: { Subscriber: [3:7519893700964910980:2453] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 29 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 1750861690445 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:28:23.307498Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7519893756799489052:4838], recipient# [3:7519893696669943164:2248], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: true SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 2 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendDeleteQuery [GOOD] >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendControlQuery >> YdbProxy::OAuthToken [GOOD] >> TOlap::StoreStats [GOOD] >> TOlap::StoreStatsQuota >> TTxDataShardMiniKQL::WriteEraseRead [GOOD] >> TTxDataShardMiniKQL::WriteAndReadMultipleShards >> TTxDataShardMiniKQL::ReadSpecialColumns [GOOD] >> TTxDataShardMiniKQL::SelectRange >> TTxDataShardMiniKQL::Write [GOOD] >> TTxDataShardMiniKQL::TableStats >> test_fifo_messaging.py::TestSqsFifoMicroBatchesWithTenant::test_micro_batch_read[tables_format_v0] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMicroBatchesWithTenant::test_micro_batch_read[tables_format_v1] >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendControlQuery [GOOD] >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendGetResultData |75.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/security/certificate_check/ut/unittest |75.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/security/certificate_check/ut/unittest |75.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/security/certificate_check/ut/unittest >> TCertificateAuthUtilsTest::GenerateAndVerifyCertificates ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/yql/unittest >> KqpYql::InsertCV-useSink [GOOD] Test command err: Trying to start YDB, gRPC: 14193, MsgBus: 26926 2025-06-25T14:28:12.097849Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519893710594289078:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:28:12.114340Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d5c/r3tmp/tmprn5jXA/pdisk_1.dat 2025-06-25T14:28:12.662735Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:28:12.662851Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:28:12.673854Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:28:12.674868Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:28:12.679902Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519893710594289059:2080] 1750861692090271 != 1750861692090274 TServer::EnableGrpc on GrpcPort 14193, node 1 2025-06-25T14:28:12.847042Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:28:12.847079Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:28:12.847089Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:28:12.847216Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26926 2025-06-25T14:28:13.120259Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:26926 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:28:13.419561Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:28:13.454569Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:28:13.469162Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:28:13.668117Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:28:13.896537Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:13.979895Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:28:15.895019Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519893723479192568:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:15.895118Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:16.218131Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:16.315039Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:16.357427Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:16.400526Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:16.450397Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:16.490223Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:16.533292Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:16.598892Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519893727774160523:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:16.598964Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:16.598998Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519893727774160528:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:16.602926Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:28:16.654173Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519893727774160530:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:28:16.750036Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519893727774160581:3419] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:28:17.098047Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519893710594289078:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:28:17.098154Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:28:17.917551Z node 1 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_CONSTRAINT_VIOLATION;details=Conflict with existing key.;tx_id=3; 2025-06-25T14:28:17.927464Z node 1 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:226: Prepare transaction failed. txid 3 at tablet 7207518622403791 ... (2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:28:19.001506Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 28757, node 2 2025-06-25T14:28:19.180770Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:28:19.180792Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:28:19.180799Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:28:19.180928Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3750 TClient is connected to server localhost:3750 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-25T14:28:19.785781Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:28:19.801072Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:28:19.810355Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:28:19.844590Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:19.919298Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:28:20.060083Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:28:20.145580Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:22.710928Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519893752919370336:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:22.711022Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:22.790250Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:22.838604Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:22.928663Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:23.029968Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:23.075209Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:23.125422Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:23.215120Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:23.305960Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519893757214338298:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:23.306049Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:23.306312Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519893757214338303:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:23.310753Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:28:23.332690Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519893757214338305:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:28:23.401692Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519893757214338356:3418] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:28:23.756509Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519893735739499588:2126];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:28:23.756567Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:28:25.084775Z node 2 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:678: SelfId: [2:7519893765804273227:2482], TxId: 281474976715673, task: 1. Ctx: { TraceId : 01jykqtve1ampnyptwns5mv334. SessionId : ydb://session/3?node_id=2&id=ZjRjMTEzNGQtMzg2ZGFhMTItNzJmYmViZTItMTYzMGEyYzk=. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. InternalError: PRECONDITION_FAILED KIKIMR_CONSTRAINT_VIOLATION: {
: Error: Conflict with existing key., code: 2012 }. 2025-06-25T14:28:25.085288Z node 2 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [2:7519893765804273228:2483], TxId: 281474976715673, task: 2. Ctx: { TraceId : 01jykqtve1ampnyptwns5mv334. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=2&id=ZjRjMTEzNGQtMzg2ZGFhMTItNzJmYmViZTItMTYzMGEyYzk=. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Handle abort execution event from: [2:7519893765804273224:2470], status: PRECONDITION_FAILED, reason: {
: Error: Terminate execution } 2025-06-25T14:28:25.085789Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=ZjRjMTEzNGQtMzg2ZGFhMTItNzJmYmViZTItMTYzMGEyYzk=, ActorId: [2:7519893761509305894:2470], ActorState: ExecuteState, TraceId: 01jykqtve1ampnyptwns5mv334, Create QueryResponse for error on request, msg:
: Error: Execution, code: 1060
: Error: Conflict with existing key., code: 2012 >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_visibility_timeout_works[tables_format_v0] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/ydb_proxy/ut/unittest >> YdbProxy::OAuthToken [GOOD] Test command err: 2025-06-25T14:28:18.290357Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519893733887954834:2135];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:28:18.290420Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000c43/r3tmp/tmpG6Nto5/pdisk_1.dat 2025-06-25T14:28:18.806834Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:28:18.807730Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519893733887954732:2080] 1750861698274018 != 1750861698274021 2025-06-25T14:28:18.852738Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:28:18.852847Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:28:18.857494Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:8523 TServer::EnableGrpc on GrpcPort 24751, node 1 2025-06-25T14:28:19.286881Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:28:19.286912Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:28:19.286920Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:28:19.287026Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:28:19.312514Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:8523 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:28:19.728675Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:28:22.308954Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519893753303777971:2078];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:28:22.325174Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000c43/r3tmp/tmp5D5AUX/pdisk_1.dat 2025-06-25T14:28:22.537397Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:28:22.537744Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:28:22.556124Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:28:22.564726Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:8766 TServer::EnableGrpc on GrpcPort 28433, node 2 2025-06-25T14:28:23.000946Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:28:23.000965Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:28:23.000971Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:28:23.001082Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:28:23.316488Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:8766 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:28:23.553681Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... |75.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_create_queue_by_nonexistent_user_fails[tables_format_v0] [GOOD] |75.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/security/certificate_check/ut/unittest >> TTxDataShardMiniKQL::SelectRange [GOOD] >> TTxDataShardMiniKQL::SelectRangeWithNotFullKey >> TSchemeshardBackgroundCleaningTest::SchemeshardBackgroundCleaningTestCreateCleanManyTables [GOOD] >> TSchemeshardBackgroundCleaningTest::CreateTableInTemp >> TCertificateCheckerTest::CheckSubjectDns >> TCertificateAuthUtilsTest::GenerateAndVerifyCertificates [GOOD] >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendGetResultData [GOOD] >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendListJobs >> test_fifo_messaging.py::TestSqsFifoMicroBatchesWithTenant::test_micro_batch_read[tables_format_v1] [GOOD] |75.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/ut_aggregation/unittest |75.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_wrong_delete_fails[tables_format_v1] [GOOD] >> AggregateStatistics::ChildNodesShouldBeInvalidateByTimeout >> AggregateStatistics::ChildNodesShouldBeInvalidateByTimeout [GOOD] >> TTxDataShardMiniKQL::TableStats [GOOD] >> TTxDataShardMiniKQL::TableStatsHistograms >> TTxDataShardMiniKQL::WriteAndReadMultipleShards [GOOD] >> TTxDataShardMiniKQL::WriteAndReadMany |75.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/security/certificate_check/ut/unittest >> TCertificateAuthUtilsTest::GenerateAndVerifyCertificates [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/ut_aggregation/unittest >> AggregateStatistics::ChildNodesShouldBeInvalidateByTimeout [GOOD] Test command err: 2025-06-25T14:28:27.583670Z node 1 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 1, Round: 1, current Round: 0 2025-06-25T14:28:27.584874Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:38:2058], server id = [1:38:2058], tablet id = 1, status = OK 2025-06-25T14:28:27.585200Z node 1 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [1:38:2058], path = { OwnerId: 3 LocalId: 3 } 2025-06-25T14:28:27.585342Z node 1 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 1 2025-06-25T14:28:27.585568Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 1, Round: 1, current Round: 0 2025-06-25T14:28:27.585698Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:38:2058], server id = [0:0:0], tablet id = 1, status = ERROR 2025-06-25T14:28:27.585721Z node 1 :STATISTICS DEBUG: service_impl.cpp:1110: Skip EvClientConnected 2025-06-25T14:28:27.585834Z node 3 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 1, Round: 1, current Round: 0 2025-06-25T14:28:27.586035Z node 4 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 1, current Round: 0 2025-06-25T14:28:27.586104Z node 3 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 3, client id = [3:44:2057], server id = [3:44:2057], tablet id = 3, status = OK 2025-06-25T14:28:27.586152Z node 3 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [3:44:2057], path = { OwnerId: 3 LocalId: 3 } 2025-06-25T14:28:27.586212Z node 3 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 3 2025-06-25T14:28:27.586239Z node 3 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 1 2025-06-25T14:28:27.586326Z node 4 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 4, client id = [4:47:2057], server id = [4:47:2057], tablet id = 4, status = OK 2025-06-25T14:28:27.586362Z node 4 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [4:47:2057], path = { OwnerId: 3 LocalId: 3 } 2025-06-25T14:28:27.586415Z node 3 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 3, client id = [3:44:2057], server id = [0:0:0], tablet id = 3, status = ERROR 2025-06-25T14:28:27.586447Z node 3 :STATISTICS DEBUG: service_impl.cpp:1110: Skip EvClientConnected 2025-06-25T14:28:27.586504Z node 4 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 4 2025-06-25T14:28:27.586530Z node 4 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-25T14:28:27.586617Z node 4 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 4, client id = [4:47:2057], server id = [0:0:0], tablet id = 4, status = ERROR 2025-06-25T14:28:27.586637Z node 4 :STATISTICS DEBUG: service_impl.cpp:1110: Skip EvClientConnected 2025-06-25T14:28:27.586743Z node 1 :STATISTICS DEBUG: service_impl.cpp:448: Received TEvAggregateStatisticsResponse SenderNodeId: 3 2025-06-25T14:28:27.586835Z node 2 :STATISTICS DEBUG: service_impl.cpp:448: Received TEvAggregateStatisticsResponse SenderNodeId: 4 2025-06-25T14:28:27.597875Z node 4 :STATISTICS DEBUG: service_impl.cpp:252: Event round 1 is different from the current 0 2025-06-25T14:28:27.597937Z node 4 :STATISTICS DEBUG: service_impl.cpp:379: Skip TEvDispatchKeepAlive 2025-06-25T14:28:27.598009Z node 3 :STATISTICS DEBUG: service_impl.cpp:252: Event round 1 is different from the current 0 2025-06-25T14:28:27.598042Z node 3 :STATISTICS DEBUG: service_impl.cpp:379: Skip TEvDispatchKeepAlive 2025-06-25T14:28:27.612552Z node 2 :STATISTICS DEBUG: service_impl.cpp:401: Skip TEvKeepAliveTimeout 2025-06-25T14:28:27.612636Z node 1 :STATISTICS INFO: service_impl.cpp:416: Node 2 is unavailable 2025-06-25T14:28:27.612681Z node 1 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 1 2025-06-25T14:28:27.612794Z node 1 :STATISTICS DEBUG: service_impl.cpp:252: Event round 1 is different from the current 0 2025-06-25T14:28:27.612827Z node 1 :STATISTICS DEBUG: service_impl.cpp:393: Skip TEvKeepAliveTimeout 2025-06-25T14:28:27.612874Z node 1 :STATISTICS DEBUG: service_impl.cpp:252: Event round 1 is different from the current 0 2025-06-25T14:28:27.612897Z node 1 :STATISTICS DEBUG: service_impl.cpp:379: Skip TEvDispatchKeepAlive 2025-06-25T14:28:27.613038Z node 1 :STATISTICS DEBUG: service_impl.cpp:252: Event round 1 is different from the current 0 2025-06-25T14:28:27.613084Z node 1 :STATISTICS DEBUG: service_impl.cpp:428: Skip TEvAggregateKeepAlive >> Cdc::KeysOnlyLog[PqRunner] [GOOD] >> Cdc::KeysOnlyLog[YdsRunner] >> TTxDataShardMiniKQL::SelectRangeWithNotFullKey [GOOD] |75.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/script_execution/py3test >> test_update_script_tables.py::TestUpdateScriptTablesYdb::test_recreate_tables[ALTER TABLE {} DROP COLUMN syntax, DROP COLUMN ast, DROP COLUMN stats-`.metadata/script_executions`] [GOOD] >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendListJobs [GOOD] >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendDescribeJob >> TDataShardLocksTest::MvccTestWriteBreaksLocks [GOOD] >> TDataShardLocksTest::Points_ManyTx >> AggregateStatistics::RootNodeShouldBeInvalidateByTimeout [GOOD] >> TGRpcRateLimiterTest::AcquireResourceManyUsedActorApi [GOOD] >> TPersQueueNewSchemeCacheTest::TestWriteStat1stClass [GOOD] >> TGRpcRateLimiterTest::AcquireResourceManyUsedGrpcApiWithCancelAfter >> TPersQueueNewSchemeCacheTest::TestWriteStat1stClassTopicAPI >> TDataShardLocksTest::Points_OneTx |75.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/ut_aggregation/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_minikql/unittest >> TTxDataShardMiniKQL::SelectRangeWithNotFullKey [GOOD] Test command err: 2025-06-25T14:28:23.808755Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:28:23.808819Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:28:23.811873Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:112:2142], Recipient [1:135:2156]: NKikimr::TEvTablet::TEvBoot 2025-06-25T14:28:23.916907Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:112:2142], Recipient [1:135:2156]: NKikimr::TEvTablet::TEvRestored 2025-06-25T14:28:23.917365Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 9437184 actor [1:135:2156] 2025-06-25T14:28:23.937309Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T14:28:24.041452Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:112:2142], Recipient [1:135:2156]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-25T14:28:24.272877Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T14:28:24.273083Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T14:28:24.274779Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 9437184 2025-06-25T14:28:24.274855Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 9437184 2025-06-25T14:28:24.274955Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 9437184 2025-06-25T14:28:24.291679Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T14:28:24.291775Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T14:28:24.291831Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 9437184 persisting started state actor id [1:204:2156] in generation 2 2025-06-25T14:28:24.512929Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T14:28:24.558924Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 9437184 2025-06-25T14:28:24.559195Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 9437184 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T14:28:24.559401Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 9437184, actorId: [1:219:2215] 2025-06-25T14:28:24.559458Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 9437184 2025-06-25T14:28:24.559496Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 9437184, state: WaitScheme 2025-06-25T14:28:24.559562Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T14:28:24.559770Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:135:2156], Recipient [1:135:2156]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T14:28:24.559828Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T14:28:24.591915Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 9437184 2025-06-25T14:28:24.592014Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 9437184 2025-06-25T14:28:24.592063Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-25T14:28:24.621818Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:28:24.621908Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 9437184 2025-06-25T14:28:24.621949Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437184 has no attached operations 2025-06-25T14:28:24.622002Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 9437184 2025-06-25T14:28:24.622048Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 9437184 TxInFly 0 2025-06-25T14:28:24.638463Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-25T14:28:24.638679Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:215:2212], Recipient [1:135:2156]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:28:24.638735Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T14:28:24.638789Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:213:2211], serverId# [1:215:2212], sessionId# [0:0:0] 2025-06-25T14:28:24.667492Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:103:2136], Recipient [1:135:2156]: NKikimrTxDataShard.TEvProposeTransaction TxKind: TX_KIND_SCHEME SourceDeprecated { RawX1: 103 RawX2: 4294969432 } TxBody: "\nx\n\006table1\020\r\032\t\n\003key\030\002 \"\032\014\n\005value\030\200$ 8\032\016\n\010__tablet\030\004 9\032\023\n\r__updateEpoch\030\004 :\032\020\n\n__updateNo\030\004 ;(\"J\014/Root/table1\222\002\013\th\020\000\000\000\000\000\000\020\r" TxId: 1 ExecLevel: 0 Flags: 0 SchemeShardId: 4200 ProcessingParams { } 2025-06-25T14:28:24.667572Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-25T14:28:24.667689Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-25T14:28:24.679679Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit CheckSchemeTx 2025-06-25T14:28:24.679774Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 9437184 txId 1 ssId 4200 seqNo 0:0 2025-06-25T14:28:24.689616Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 1 at tablet 9437184 2025-06-25T14:28:24.689745Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is ExecutedNoMoreRestarts 2025-06-25T14:28:24.689793Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit CheckSchemeTx 2025-06-25T14:28:24.689844Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit StoreSchemeTx 2025-06-25T14:28:24.689914Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit StoreSchemeTx 2025-06-25T14:28:24.690291Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayCompleteNoMoreRestarts 2025-06-25T14:28:24.690337Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit StoreSchemeTx 2025-06-25T14:28:24.690375Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit FinishPropose 2025-06-25T14:28:24.690416Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit FinishPropose 2025-06-25T14:28:24.690495Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayComplete 2025-06-25T14:28:24.690531Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit FinishPropose 2025-06-25T14:28:24.690569Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit WaitForPlan 2025-06-25T14:28:24.690604Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit WaitForPlan 2025-06-25T14:28:24.690631Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:1] at 9437184 is not ready to execute on unit WaitForPlan 2025-06-25T14:28:24.715949Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 9437184 2025-06-25T14:28:24.716018Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit StoreSchemeTx 2025-06-25T14:28:24.716044Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit FinishPropose 2025-06-25T14:28:24.716088Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 1 at tablet 9437184 send to client, exec latency: 0 ms, propose latency: 1 ms, status: PREPARED 2025-06-25T14:28:24.730084Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 9437184 not sending time cast registration request in state WaitScheme 2025-06-25T14:28:24.743352Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:225:2221], Recipient [1:135:2156]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:28:24.743425Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T14:28:24.743483Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:224:2220], serverId# [1:225:2221], sessionId# [0:0:0] 2025-06-25T14:28:24.743631Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287424, Sender [1:103:2136], Recipient [1:135:2156]: {TEvPlanStep step# 1000001 MediatorId# 0 TabletID 9437184} 2025-06-25T14:28:24.743666Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3150: StateWork, processing event TEvTxProcessing::TEvPlanStep 2025-06-25T14:28:24.743826Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1790: Trying to execute [1000001:1] at 9437184 on unit WaitForPlan 2025-06-25T14:28:24.743873Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1805: Execution status for [1000001:1] at 9437184 is Executed 2025-06-25T14:28:24.743923Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000001:1] at 9437184 executing on unit WaitForPlan 2025-06-25T14:28:24.743973Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000001:1] at 9437184 to execution unit PlanQueue 2025-06-25T14:28:24.748057Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 1 at step 1000001 at tablet 9437184 { Transactions { TxId: 1 AckTo { RawX1: 103 RawX2: 4294969432 } } Step: 1000001 MediatorID: 0 TabletID: 9437184 } 2025-06-25T14:28:24.760431Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T14:28:24.760812Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:135:2156], Recipient [1:135:2156]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T14:28:24.760874Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T14:28:24.760941Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-25T14:28:24.760990Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 1 2025-06-25T14:28:24.761026Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437184 2025-06-25T14:28:24.761087Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000001:1] in PlanQueue unit at 9437184 2025-06-25T14:28:24 ... 004\206\203\014\203\014,SelectRange\000\003?* h\020\000\000\000\000\000\000\016\000\000\000\000\000\000\000?\014\005?2\003?,D\003?.F\003?0p\007\013?:\003?4e\005\'?8\003\013?>\003?\000\003?@\000\003?B\000\006\004?F\003\203\014\000\003\203\014\000\003\003?H\000\377\007\002\000\005?\032\005?\026?r\000\005?\030\003\005? \005?\034?r\000\006\000?\036\003?x\005?&\006\ 2025-06-25T14:28:28.089803Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-25T14:28:28.089906Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-25T14:28:28.090722Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:8] at 9437184 on unit CheckDataTx 2025-06-25T14:28:28.090826Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:8] at 9437184 is Executed 2025-06-25T14:28:28.090874Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:8] at 9437184 executing on unit CheckDataTx 2025-06-25T14:28:28.090914Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:8] at 9437184 to execution unit BuildAndWaitDependencies 2025-06-25T14:28:28.090956Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:8] at 9437184 on unit BuildAndWaitDependencies 2025-06-25T14:28:28.091018Z node 3 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 9437184 CompleteEdge# v1000001/1 IncompleteEdge# v{min} UnprotectedReadEdge# v{min} ImmediateWriteEdge# v1000001/18446744073709551615 ImmediateWriteEdgeReplied# v1000001/18446744073709551615 2025-06-25T14:28:28.091072Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:8] at 9437184 2025-06-25T14:28:28.091125Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:8] at 9437184 is Executed 2025-06-25T14:28:28.091153Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:8] at 9437184 executing on unit BuildAndWaitDependencies 2025-06-25T14:28:28.091183Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:8] at 9437184 to execution unit ExecuteDataTx 2025-06-25T14:28:28.091208Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:8] at 9437184 on unit ExecuteDataTx 2025-06-25T14:28:28.091845Z node 3 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:306: Executed operation [0:8] at tablet 9437184 with status COMPLETE 2025-06-25T14:28:28.091947Z node 3 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:312: Datashard execution counters for [0:8] at 9437184: {NSelectRow: 0, NSelectRange: 1, NUpdateRow: 0, NEraseRow: 0, SelectRowRows: 0, SelectRowBytes: 0, SelectRangeRows: 2, SelectRangeBytes: 31, UpdateRowBytes: 0, EraseRowBytes: 0, SelectRangeDeletedRowSkips: 0, InvisibleRowSkips: 0} 2025-06-25T14:28:28.092007Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:8] at 9437184 is Executed 2025-06-25T14:28:28.092040Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:8] at 9437184 executing on unit ExecuteDataTx 2025-06-25T14:28:28.092070Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:8] at 9437184 to execution unit FinishPropose 2025-06-25T14:28:28.092099Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:8] at 9437184 on unit FinishPropose 2025-06-25T14:28:28.092139Z node 3 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 8 at tablet 9437184 send to client, exec latency: 0 ms, propose latency: 0 ms, status: COMPLETE 2025-06-25T14:28:28.092212Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:8] at 9437184 is DelayComplete 2025-06-25T14:28:28.092244Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:8] at 9437184 executing on unit FinishPropose 2025-06-25T14:28:28.092283Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:8] at 9437184 to execution unit CompletedOperations 2025-06-25T14:28:28.095651Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:8] at 9437184 on unit CompletedOperations 2025-06-25T14:28:28.095732Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:8] at 9437184 is Executed 2025-06-25T14:28:28.095765Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:8] at 9437184 executing on unit CompletedOperations 2025-06-25T14:28:28.095802Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:8] at 9437184 has finished 2025-06-25T14:28:28.095905Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 9437184 2025-06-25T14:28:28.095950Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:8] at 9437184 on unit FinishPropose 2025-06-25T14:28:28.096007Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 >> AsyncIndexChangeExchange::SenderShouldBeActivatedOnTableWoIndexes [GOOD] >> AsyncIndexChangeExchange::SenderShouldBeActivatedOnTableWithSyncIndex >> TDataShardLocksTest::MvccTestOooTxDoesntBreakPrecedingReadersLocks [GOOD] >> TDataShardLocksTest::MvccTestOutdatedLocksRemove [GOOD] >> TDataShardLocksTest::MvccTestBreakEdge [GOOD] >> TDataShardLocksTest::MvccTestAlreadyBrokenLocks [GOOD] >> AggregateStatistics::ShouldBeCcorrectProcessingOfLocalTablets [GOOD] >> AggregateStatistics::ShouldBeCcorrectProcessingTabletTimeout >> TDataShardLocksTest::Points_OneTx [GOOD] >> TDataShardLocksTest::Points_ManyTx_RemoveAll >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendDescribeJob [GOOD] >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendCreateConnection >> TDataShardLocksTest::Points_ManyTx [GOOD] >> TDataShardLocksTest::Points_ManyTx_BreakAll >> AggregateStatistics::ShouldBeCcorrectProcessingTabletTimeout [GOOD] |75.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/ut_aggregation/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/ut_aggregation/unittest >> AggregateStatistics::RootNodeShouldBeInvalidateByTimeout [GOOD] Test command err: 2025-06-25T14:28:28.832052Z node 1 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 1, Round: 1, current Round: 0 2025-06-25T14:28:28.833273Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:38:2058], server id = [1:38:2058], tablet id = 1, status = OK 2025-06-25T14:28:28.833575Z node 1 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [1:38:2058], path = { OwnerId: 3 LocalId: 3 } 2025-06-25T14:28:28.833713Z node 1 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 1 2025-06-25T14:28:28.833922Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 1, Round: 1, current Round: 0 2025-06-25T14:28:28.834062Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:38:2058], server id = [0:0:0], tablet id = 1, status = ERROR 2025-06-25T14:28:28.834087Z node 1 :STATISTICS DEBUG: service_impl.cpp:1110: Skip EvClientConnected 2025-06-25T14:28:28.834187Z node 3 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 1, Round: 1, current Round: 0 2025-06-25T14:28:28.834408Z node 4 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 1, current Round: 0 2025-06-25T14:28:28.834484Z node 3 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 3, client id = [3:44:2057], server id = [3:44:2057], tablet id = 3, status = OK 2025-06-25T14:28:28.834530Z node 3 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [3:44:2057], path = { OwnerId: 3 LocalId: 3 } 2025-06-25T14:28:28.834592Z node 3 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 3 2025-06-25T14:28:28.834619Z node 3 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 1 2025-06-25T14:28:28.834704Z node 4 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 4, client id = [4:47:2057], server id = [4:47:2057], tablet id = 4, status = OK 2025-06-25T14:28:28.834742Z node 4 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [4:47:2057], path = { OwnerId: 3 LocalId: 3 } 2025-06-25T14:28:28.834783Z node 3 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 3, client id = [3:44:2057], server id = [0:0:0], tablet id = 3, status = ERROR 2025-06-25T14:28:28.834821Z node 3 :STATISTICS DEBUG: service_impl.cpp:1110: Skip EvClientConnected 2025-06-25T14:28:28.834881Z node 4 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 4 2025-06-25T14:28:28.834903Z node 4 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-25T14:28:28.834994Z node 4 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 4, client id = [4:47:2057], server id = [0:0:0], tablet id = 4, status = ERROR 2025-06-25T14:28:28.835014Z node 4 :STATISTICS DEBUG: service_impl.cpp:1110: Skip EvClientConnected 2025-06-25T14:28:28.835129Z node 1 :STATISTICS DEBUG: service_impl.cpp:448: Received TEvAggregateStatisticsResponse SenderNodeId: 3 2025-06-25T14:28:28.835206Z node 2 :STATISTICS DEBUG: service_impl.cpp:448: Received TEvAggregateStatisticsResponse SenderNodeId: 4 2025-06-25T14:28:28.845719Z node 4 :STATISTICS DEBUG: service_impl.cpp:252: Event round 1 is different from the current 0 2025-06-25T14:28:28.845774Z node 4 :STATISTICS DEBUG: service_impl.cpp:379: Skip TEvDispatchKeepAlive 2025-06-25T14:28:28.845845Z node 3 :STATISTICS DEBUG: service_impl.cpp:252: Event round 1 is different from the current 0 2025-06-25T14:28:28.845878Z node 3 :STATISTICS DEBUG: service_impl.cpp:379: Skip TEvDispatchKeepAlive 2025-06-25T14:28:28.856462Z node 2 :STATISTICS DEBUG: service_impl.cpp:401: Skip TEvKeepAliveTimeout 2025-06-25T14:28:28.856543Z node 1 :STATISTICS INFO: service_impl.cpp:416: Node 2 is unavailable 2025-06-25T14:28:28.856580Z node 1 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 1 2025-06-25T14:28:28.856685Z node 1 :STATISTICS DEBUG: service_impl.cpp:252: Event round 1 is different from the current 0 2025-06-25T14:28:28.856714Z node 1 :STATISTICS DEBUG: service_impl.cpp:393: Skip TEvKeepAliveTimeout 2025-06-25T14:28:28.856758Z node 1 :STATISTICS DEBUG: service_impl.cpp:252: Event round 1 is different from the current 0 2025-06-25T14:28:28.856780Z node 1 :STATISTICS DEBUG: service_impl.cpp:379: Skip TEvDispatchKeepAlive 2025-06-25T14:28:28.856906Z node 1 :STATISTICS DEBUG: service_impl.cpp:252: Event round 1 is different from the current 0 2025-06-25T14:28:28.856947Z node 1 :STATISTICS DEBUG: service_impl.cpp:428: Skip TEvAggregateKeepAlive >> Cdc::UuidExchange[PqRunner] [GOOD] >> Cdc::UuidExchange[YdsRunner] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_balancing/unittest >> VDiskBalancing::TestRandom_Mirror3dc [GOOD] Test command err: RandomSeed# 15500996989514523862 Step = 0 SEND TEvPut with key [1:1:0:0:0:51943:0] TEvPutResult: TEvPutResult {Id# [1:1:0:0:0:51943:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Step = 1 SEND TEvPut with key [1:1:1:0:0:37868:0] TEvPutResult: TEvPutResult {Id# [1:1:1:0:0:37868:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Step = 2 SEND TEvPut with key [1:1:2:0:0:85877:0] TEvPutResult: TEvPutResult {Id# [1:1:2:0:0:85877:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Step = 3 SEND TEvPut with key [1:1:3:0:0:192081:0] TEvPutResult: TEvPutResult {Id# [1:1:3:0:0:192081:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Step = 4 SEND TEvPut with key [1:1:4:0:0:267203:0] TEvPutResult: TEvPutResult {Id# [1:1:4:0:0:267203:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Stop node 3 2025-06-25T14:24:23.760386Z 1 00h01m00.010512s :PIPE_SERVER ERROR: [72057594037932033] NodeDisconnected NodeId# 4 Step = 5 SEND TEvPut with key [1:1:5:0:0:502135:0] TEvPutResult: TEvPutResult {Id# [1:1:5:0:0:502135:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Step = 6 SEND TEvPut with key [1:1:6:0:0:377427:0] TEvPutResult: TEvPutResult {Id# [1:1:6:0:0:377427:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Stop node 4 2025-06-25T14:24:24.558832Z 1 00h01m10.060512s :PIPE_SERVER ERROR: [72057594037932033] NodeDisconnected NodeId# 5 Step = 7 SEND TEvPut with key [1:1:7:0:0:48850:0] TEvPutResult: TEvPutResult {Id# [1:1:7:0:0:48850:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Step = 8 SEND TEvPut with key [1:1:8:0:0:411812:0] TEvPutResult: TEvPutResult {Id# [1:1:8:0:0:411812:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Step = 9 SEND TEvPut with key [1:1:9:0:0:293766:0] TEvPutResult: TEvPutResult {Id# [1:1:9:0:0:293766:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Start node 3 Step = 10 SEND TEvPut with key [1:1:10:0:0:127358:0] TEvPutResult: TEvPutResult {Id# [1:1:10:0:0:127358:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Step = 11 SEND TEvPut with key [1:1:11:0:0:282945:0] TEvPutResult: TEvPutResult {Id# [1:1:11:0:0:282945:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999963} Step = 12 SEND TEvPut with key [1:1:12:0:0:34864:0] TEvPutResult: TEvPutResult {Id# [1:1:12:0:0:34864:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Step = 13 SEND TEvPut with key [1:1:13:0:0:363096:0] TEvPutResult: TEvPutResult {Id# [1:1:13:0:0:363096:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Step = 14 SEND TEvPut with key [1:1:14:0:0:179270:0] TEvPutResult: TEvPutResult {Id# [1:1:14:0:0:179270:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Step = 15 SEND TEvPut with key [1:1:15:0:0:358611:0] TEvPutResult: TEvPutResult {Id# [1:1:15:0:0:358611:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Step = 16 SEND TEvPut with key [1:1:16:0:0:136892:0] TEvPutResult: TEvPutResult {Id# [1:1:16:0:0:136892:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Step = 17 SEND TEvPut with key [1:1:17:0:0:517733:0] TEvPutResult: TEvPutResult {Id# [1:1:17:0:0:517733:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Step = 18 SEND TEvPut with key [1:1:18:0:0:250802:0] TEvPutResult: TEvPutResult {Id# [1:1:18:0:0:250802:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Step = 19 SEND TEvPut with key [1:1:19:0:0:199490:0] TEvPutResult: TEvPutResult {Id# [1:1:19:0:0:199490:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Step = 20 SEND TEvPut with key [1:1:20:0:0:244269:0] TEvPutResult: TEvPutResult {Id# [1:1:20:0:0:244269:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999963} Step = 21 SEND TEvPut with key [1:1:21:0:0:329606:0] TEvPutResult: TEvPutResult {Id# [1:1:21:0:0:329606:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Step = 22 SEND TEvPut with key [1:1:22:0:0:322338:0] TEvPutResult: TEvPutResult {Id# [1:1:22:0:0:322338:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999963} Step = 23 SEND TEvPut with key [1:1:23:0:0:519258:0] TEvPutResult: TEvPutResult {Id# [1:1:23:0:0:519258:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Step = 24 SEND TEvPut with key [1:1:24:0:0:56036:0] TEvPutResult: TEvPutResult {Id# [1:1:24:0:0:56036:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999963} Step = 25 SEND TEvPut with key [1:1:25:0:0:514591:0] TEvPutResult: TEvPutResult {Id# [1:1:25:0:0:514591:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999963} Stop node 7 2025-06-25T14:24:27.249797Z 1 00h01m30.100512s :PIPE_SERVER ERROR: [72057594037932033] NodeDisconnected NodeId# 8 Step = 26 SEND TEvPut with key [1:1:26:0:0:5927:0] TEvPutResult: TEvPutResult {Id# [1:1:26:0:0:5927:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999939} Step = 27 SEND TEvPut with key [1:1:27:0:0:148482:0] TEvPutResult: TEvPutResult {Id# [1:1:27:0:0:148482:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999939} Step = 28 SEND TEvPut with key [1:1:28:0:0:6043:0] TEvPutResult: TEvPutResult {Id# [1:1:28:0:0:6043:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999939} Step = 29 SEND TEvPut with key [1:1:29:0:0:265170:0] TEvPutResult: TEvPutResult {Id# [1:1:29:0:0:265170:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999939} Step = 30 SEND TEvPut with key [1:1:30:0:0:264716:0] TEvPutResult: TEvPutResult {Id# [1:1:30:0:0:264716:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999939} Compact vdisk 3 Step = 31 SEND TEvPut with key [1:1:31:0:0:168116:0] TEvPutResult: TEvPutResult {Id# [1:1:31:0:0:168116:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999915} Step = 32 SEND TEvPut with key [1:1:32:0:0:444749:0] TEvPutResult: TEvPutResult {Id# [1:1:32:0:0:444749:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999927} Step = 33 SEND TEvPut with key [1:1:33:0:0:350254:0] TEvPutResult: TEvPutResult {Id# [1:1:33:0:0:350254:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999915} Step = 34 SEND TEvPut with key [1:1:34:0:0:145950:0] TEvPutResult: TEvPutResult {Id# [1:1:34:0:0:145950:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999927} Step = 35 SEND TEvPut with key [1:1:35:0:0:358611:0] TEvPutResult: TEvPutResult {Id# [1:1:35:0:0:358611:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999915} Step = 36 SEND TEvPut with key [1:1:36:0:0:139148:0] TEvPutResult: TEvPutResult {Id# [1:1:36:0:0:139148:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999915} Step = 37 SEND TEvPut with key [1:1:37:0:0:200198:0] TEvPutResult: TEvPutResult {Id# [1:1:37:0:0:200198:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999927} Step = 38 SEND TEvPut with key [1:1:38:0:0:185170:0] TEvPutResult: TEvPutResult {Id# [1:1:38:0:0:185170:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999915} Step = 39 SEND TEvPut with key [1:1:39:0:0:297271:0] TEvPutResult: TEvPutResult {Id# [1:1:39:0:0:297271:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999927} Step = 40 SEND TEvPut with key [1:1:40:0:0:419670:0] TEvPutResult: TEvPutResult {Id# [1:1:40:0:0:419670:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999915} Step = 41 SEND TEvPut with key [1:1:41:0:0:218956:0] TEvPutResult: TEvPutResult {Id# [1:1:41:0:0:218956:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999927} Step = 42 SEND TEvPut with key [1:1:42:0:0:154723:0] TEvPutResult: TEvPutResult {Id# [1:1:42:0:0:154723:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999927} Step = 43 SEND TEvPut with key [1:1:43:0:0:13332:0] TEvPutResult: TEvPutResult {Id# [1:1:43:0:0:13332:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999915} Step = 44 SEND TEvPut with key [1:1:44:0:0:448892:0] TEvPutResult: TEvPutResult {Id# [1:1:44:0:0:448892:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999927} Step = 45 SEND TEvPut with key [1:1:45:0:0:103231:0] TEvPutResult: TEvPutResult {Id# [1:1:45:0:0:103231:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999915} Step = 46 SEND TEvPut with key [1:1:46:0:0:295973:0] TEvPutResult: TEvPutResult {Id# [1:1:46:0:0:295973:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999915} Step = 47 SEND TEvPut with key [1:1:47:0:0:402799:0] TEvPutResult: TEvPutResult {Id# [1:1:47:0:0:402799:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999915} Step = 48 SEND TEvPut with key [1:1:48:0:0:165045:0] TEvPutResult: TEvPutResult {Id# [1:1:48:0:0:165045:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999915} Step = 49 SEND TEvPut with key [1:1:49:0:0:360099:0] TEvPutResult: TEvPutResult {Id# [1:1:49:0:0:360099:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999915} Step = 50 SEND TEvPut with key [1:1:50:0:0:97222:0] TEvPutResult: TEvPutResult {Id# [1:1:50:0:0:97222:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999915} Step = 51 SEND TEvPut with key [1:1:51:0:0:303396:0] TEvPutResult: TEvPutResult {Id# [1:1:51:0:0:303396:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999915} Step = 52 SEND TEvPut with key [1:1:52:0:0:304876:0] TEvPutResult: TEvPutResult {Id# [1:1:52:0:0:304876:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999927} Step = 53 SEND TEvPut with key [1:1:53:0:0:375063:0] TEvPutResult: TEvPutResult {Id# [1:1:53:0:0:375063:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999927} Start node 4 Step = 54 SEND TEvPut with key [1:1:54:0:0:288044:0] TEvPutResult: TEvPutResult {Id# [1:1:54:0:0:288044:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999866} Step = 55 SEND TEvPut with key [1:1:55:0:0:181559:0] TEvPutResult: TEvPutResult {Id# [1:1:55:0:0:181559:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999927} Step = 56 SEND TEvPut with key [1:1:56:0:0:42993:0] TEvPutResult: TEvPutResult {Id# [1:1:56:0:0:42993:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999866} Step = 57 SEND TEvPut with key [1:1:57:0:0:424399:0] TEvPutResult: TEvPutResult {Id# [1:1:57:0:0:424399:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999927} Step = 58 SEND TEvPut with key [1:1:58:0:0:169341:0] TEvPutResult: TEvPutResult {Id# [1:1:58:0:0:169341:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999902} Step = 59 SEND TEvPut with key [1:1:59:0:0:405932:0] TEvPutResult: TEvPutResult {Id# [1:1:59:0:0:405932:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999902} Step = 60 SEND TEvPut with key [1:1:60:0:0:190148:0] TEvPutResult: TEvPutResult {Id# [1:1:60:0:0:190148:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999927} Stop node 3 2025-06-25T14:24:28.742804Z 1 00h02m00.150512s :PIPE_SERVER ERROR: [72057594037932033] NodeDisconnected NodeId# 4 Wipe node 0 2025-06-25T14:24:28.881573Z 1 00h02m10.161024s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2025-06-25T14:24:28.883480Z 1 00h02m10.161024s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 10995219182819201018] Step = 61 SEND TEvPut with key [1:1:61:0:0:500240:0] 2025-06-25T14:24:30.018633Z 1 00h03m50.161024s :BS_PROXY ERROR: Group# 2181038080 StateEstablishingSessions Wakeup TIMEOUT Marker# DSP12 TEvPutResult: TEvPutResult {Id# [1:1:61:0:0:500240:0] Status# ERROR StatusFlags# { } ErrorReason# "Timeout while establishing sessions (DSPE4)." ApproximateFreeSpaceShare# 0} Step = 62 SEND TEvPut with key [1:1:62:0:0:354994:0] TEvPutResult: TEvPutResult {Id# [1:1:62:0:0:354994:0] Status# ERROR StatusFlags# { } ErrorReason# "Timeout while establishing sessions (DSPE4)." ApproximateFreeSpa ... d# [1:1:945:0:0:76599:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999719} Step = 946 SEND TEvPut with key [1:1:946:0:0:24822:0] TEvPutResult: TEvPutResult {Id# [1:1:946:0:0:24822:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999756} Compact vdisk 2 Step = 947 SEND TEvPut with key [1:1:947:0:0:100167:0] TEvPutResult: TEvPutResult {Id# [1:1:947:0:0:100167:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999707} Step = 948 SEND TEvPut with key [1:1:948:0:0:112126:0] TEvPutResult: TEvPutResult {Id# [1:1:948:0:0:112126:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Step = 949 SEND TEvPut with key [1:1:949:0:0:525378:0] TEvPutResult: TEvPutResult {Id# [1:1:949:0:0:525378:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999695} Step = 950 SEND TEvPut with key [1:1:950:0:0:410875:0] TEvPutResult: TEvPutResult {Id# [1:1:950:0:0:410875:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999707} Step = 951 SEND TEvPut with key [1:1:951:0:0:113503:0] TEvPutResult: TEvPutResult {Id# [1:1:951:0:0:113503:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Step = 952 SEND TEvPut with key [1:1:952:0:0:431140:0] TEvPutResult: TEvPutResult {Id# [1:1:952:0:0:431140:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999695} Step = 953 SEND TEvPut with key [1:1:953:0:0:509293:0] TEvPutResult: TEvPutResult {Id# [1:1:953:0:0:509293:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Stop node 3 2025-06-25T14:27:57.292143Z 1 00h28m00.802048s :PIPE_SERVER ERROR: [72057594037932033] NodeDisconnected NodeId# 4 Step = 954 SEND TEvPut with key [1:1:954:0:0:286395:0] TEvPutResult: TEvPutResult {Id# [1:1:954:0:0:286395:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999695} Stop node 1 2025-06-25T14:27:57.601050Z 1 00h28m10.803584s :PIPE_SERVER ERROR: [72057594037932033] NodeDisconnected NodeId# 2 Step = 955 SEND TEvPut with key [1:1:955:0:0:219270:0] TEvPutResult: TEvPutResult {Id# [1:1:955:0:0:219270:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999792} Start node 1 Step = 956 SEND TEvPut with key [1:1:956:0:0:274971:0] TEvPutResult: TEvPutResult {Id# [1:1:956:0:0:274971:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999792} Step = 957 SEND TEvPut with key [1:1:957:0:0:487884:0] TEvPutResult: TEvPutResult {Id# [1:1:957:0:0:487884:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999744} Start node 3 Step = 958 SEND TEvPut with key [1:1:958:0:0:327302:0] TEvPutResult: TEvPutResult {Id# [1:1:958:0:0:327302:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999707} Step = 959 SEND TEvPut with key [1:1:959:0:0:385917:0] TEvPutResult: TEvPutResult {Id# [1:1:959:0:0:385917:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999731} Step = 960 SEND TEvPut with key [1:1:960:0:0:200998:0] TEvPutResult: TEvPutResult {Id# [1:1:960:0:0:200998:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999707} Step = 961 SEND TEvPut with key [1:1:961:0:0:61147:0] TEvPutResult: TEvPutResult {Id# [1:1:961:0:0:61147:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999707} Step = 962 SEND TEvPut with key [1:1:962:0:0:237906:0] TEvPutResult: TEvPutResult {Id# [1:1:962:0:0:237906:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999731} Step = 963 SEND TEvPut with key [1:1:963:0:0:347273:0] TEvPutResult: TEvPutResult {Id# [1:1:963:0:0:347273:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999707} Step = 964 SEND TEvPut with key [1:1:964:0:0:181317:0] TEvPutResult: TEvPutResult {Id# [1:1:964:0:0:181317:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999805} Step = 965 SEND TEvPut with key [1:1:965:0:0:456096:0] TEvPutResult: TEvPutResult {Id# [1:1:965:0:0:456096:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999707} Step = 966 SEND TEvPut with key [1:1:966:0:0:93776:0] TEvPutResult: TEvPutResult {Id# [1:1:966:0:0:93776:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999805} Step = 967 SEND TEvPut with key [1:1:967:0:0:447659:0] TEvPutResult: TEvPutResult {Id# [1:1:967:0:0:447659:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999805} Step = 968 SEND TEvPut with key [1:1:968:0:0:14298:0] TEvPutResult: TEvPutResult {Id# [1:1:968:0:0:14298:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999707} Step = 969 SEND TEvPut with key [1:1:969:0:0:92781:0] TEvPutResult: TEvPutResult {Id# [1:1:969:0:0:92781:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999805} Step = 970 SEND TEvPut with key [1:1:970:0:0:334566:0] TEvPutResult: TEvPutResult {Id# [1:1:970:0:0:334566:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999731} Stop node 0 2025-06-25T14:27:59.118611Z 9 00h28m40.805120s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [9:127528:351] ServerId# [1:128582:175] TabletId# 72057594037932033 PipeClientId# [9:127528:351] 2025-06-25T14:27:59.118787Z 8 00h28m40.805120s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [8:158120:17] ServerId# [1:158127:4097] TabletId# 72057594037932033 PipeClientId# [8:158120:17] 2025-06-25T14:27:59.118894Z 7 00h28m40.805120s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [7:157071:17] ServerId# [1:157078:3970] TabletId# 72057594037932033 PipeClientId# [7:157071:17] 2025-06-25T14:27:59.119023Z 6 00h28m40.805120s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [6:134172:17] ServerId# [1:134179:1014] TabletId# 72057594037932033 PipeClientId# [6:134172:17] 2025-06-25T14:27:59.119144Z 5 00h28m40.805120s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [5:154159:17] ServerId# [1:154167:3591] TabletId# 72057594037932033 PipeClientId# [5:154159:17] 2025-06-25T14:27:59.119264Z 4 00h28m40.805120s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [4:163087:17] ServerId# [1:163097:4695] TabletId# 72057594037932033 PipeClientId# [4:163087:17] 2025-06-25T14:27:59.119455Z 3 00h28m40.805120s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [3:153070:17] ServerId# [1:153080:3468] TabletId# 72057594037932033 PipeClientId# [3:153070:17] 2025-06-25T14:27:59.119595Z 2 00h28m40.805120s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [2:162122:17] ServerId# [1:162129:4586] TabletId# 72057594037932033 PipeClientId# [2:162122:17] Step = 971 SEND TEvPut with key [1:1:971:0:0:439384:0] TEvPutResult: TEvPutResult {Id# [1:1:971:0:0:439384:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99978} Step = 972 SEND TEvPut with key [1:1:972:0:0:252551:0] TEvPutResult: TEvPutResult {Id# [1:1:972:0:0:252551:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999731} Step = 973 SEND TEvPut with key [1:1:973:0:0:39982:0] TEvPutResult: TEvPutResult {Id# [1:1:973:0:0:39982:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999731} Stop node 2 Step = 974 SEND TEvPut with key [1:1:974:0:0:526796:0] TEvPutResult: TEvPutResult {Id# [1:1:974:0:0:526796:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999731} Start node 0 Step = 975 SEND TEvPut with key [1:1:975:0:0:337763:0] TEvPutResult: TEvPutResult {Id# [1:1:975:0:0:337763:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999695} Stop node 2 Step = 976 SEND TEvPut with key [1:1:976:0:0:475740:0] TEvPutResult: TEvPutResult {Id# [1:1:976:0:0:475740:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Step = 977 SEND TEvPut with key [1:1:977:0:0:169780:0] TEvPutResult: TEvPutResult {Id# [1:1:977:0:0:169780:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999719} Step = 978 SEND TEvPut with key [1:1:978:0:0:481535:0] TEvPutResult: TEvPutResult {Id# [1:1:978:0:0:481535:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999719} Step = 979 SEND TEvPut with key [1:1:979:0:0:24668:0] TEvPutResult: TEvPutResult {Id# [1:1:979:0:0:24668:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Step = 980 SEND TEvPut with key [1:1:980:0:0:159890:0] TEvPutResult: TEvPutResult {Id# [1:1:980:0:0:159890:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999719} Step = 981 SEND TEvPut with key [1:1:981:0:0:111300:0] TEvPutResult: TEvPutResult {Id# [1:1:981:0:0:111300:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Step = 982 SEND TEvPut with key [1:1:982:0:0:355914:0] TEvPutResult: TEvPutResult {Id# [1:1:982:0:0:355914:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99978} Step = 983 SEND TEvPut with key [1:1:983:0:0:399106:0] TEvPutResult: TEvPutResult {Id# [1:1:983:0:0:399106:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Step = 984 SEND TEvPut with key [1:1:984:0:0:347759:0] TEvPutResult: TEvPutResult {Id# [1:1:984:0:0:347759:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Step = 985 SEND TEvPut with key [1:1:985:0:0:261994:0] TEvPutResult: TEvPutResult {Id# [1:1:985:0:0:261994:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99978} Step = 986 SEND TEvPut with key [1:1:986:0:0:101043:0] TEvPutResult: TEvPutResult {Id# [1:1:986:0:0:101043:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Step = 987 SEND TEvPut with key [1:1:987:0:0:138774:0] TEvPutResult: TEvPutResult {Id# [1:1:987:0:0:138774:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99978} Step = 988 SEND TEvPut with key [1:1:988:0:0:441913:0] TEvPutResult: TEvPutResult {Id# [1:1:988:0:0:441913:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999719} Step = 989 SEND TEvPut with key [1:1:989:0:0:134469:0] TEvPutResult: TEvPutResult {Id# [1:1:989:0:0:134469:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99978} Step = 990 SEND TEvPut with key [1:1:990:0:0:123825:0] TEvPutResult: TEvPutResult {Id# [1:1:990:0:0:123825:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99978} Step = 991 SEND TEvPut with key [1:1:991:0:0:40387:0] TEvPutResult: TEvPutResult {Id# [1:1:991:0:0:40387:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999719} Step = 992 SEND TEvPut with key [1:1:992:0:0:193000:0] TEvPutResult: TEvPutResult {Id# [1:1:992:0:0:193000:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99978} Stop node 7 2025-06-25T14:28:01.618213Z 1 00h29m20.817168s :PIPE_SERVER ERROR: [72057594037932033] NodeDisconnected NodeId# 8 Step = 993 SEND TEvPut with key [1:1:993:0:0:455894:0] TEvPutResult: TEvPutResult {Id# [1:1:993:0:0:455894:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999707} Compact vdisk 0 Step = 994 SEND TEvPut with key [1:1:994:0:0:54378:0] TEvPutResult: TEvPutResult {Id# [1:1:994:0:0:54378:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999707} Compact vdisk 6 Step = 995 SEND TEvPut with key [1:1:995:0:0:487669:0] TEvPutResult: TEvPutResult {Id# [1:1:995:0:0:487669:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999829} Step = 996 SEND TEvPut with key [1:1:996:0:0:194641:0] TEvPutResult: TEvPutResult {Id# [1:1:996:0:0:194641:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999707} Step = 997 SEND TEvPut with key [1:1:997:0:0:74188:0] TEvPutResult: TEvPutResult {Id# [1:1:997:0:0:74188:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999707} Step = 998 SEND TEvPut with key [1:1:998:0:0:136082:0] TEvPutResult: TEvPutResult {Id# [1:1:998:0:0:136082:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999707} Step = 999 SEND TEvPut with key [1:1:999:0:0:145518:0] TEvPutResult: TEvPutResult {Id# [1:1:999:0:0:145518:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999707} Starting nodes Start compaction 1 Start checking |75.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_locks/unittest >> TDataShardLocksTest::MvccTestAlreadyBrokenLocks [GOOD] >> TCertificateCheckerTest::CheckSubjectDns [GOOD] >> TDataShardLocksTest::Points_ManyTx_RemoveAll [GOOD] >> TDataShardLocksTest::UseLocksCache ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/ut_aggregation/unittest >> AggregateStatistics::ShouldBeCcorrectProcessingOfLocalTablets [GOOD] Test command err: 2025-06-25T14:28:28.980489Z node 1 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 1, Round: 1, current Round: 0 2025-06-25T14:28:28.987310Z node 1 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 1, client id = [1:9:2056], server id = [1:9:2056], tablet id = 2 2025-06-25T14:28:28.987361Z node 1 :STATISTICS DEBUG: service_impl.cpp:1063: Tablet 2 is not local. 2025-06-25T14:28:28.987475Z node 1 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 3 2025-06-25T14:28:28.988087Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:8:2055], server id = [1:8:2055], tablet id = 1, status = ERROR 2025-06-25T14:28:28.988125Z node 1 :STATISTICS DEBUG: service_impl.cpp:1063: Tablet 1 is not local. 2025-06-25T14:28:28.988233Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:11:2058], server id = [1:11:2058], tablet id = 4, status = ERROR 2025-06-25T14:28:28.988254Z node 1 :STATISTICS DEBUG: service_impl.cpp:1063: Tablet 4 is not local. 2025-06-25T14:28:28.988305Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:10:2057], server id = [0:0:0], tablet id = 3, status = ERROR 2025-06-25T14:28:28.988347Z node 1 :STATISTICS DEBUG: service_impl.cpp:1110: Skip EvClientConnected 2025-06-25T14:28:28.988400Z node 1 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 1, client id = [1:12:2059], server id = [1:12:2059], tablet id = 5 2025-06-25T14:28:28.988422Z node 1 :STATISTICS DEBUG: service_impl.cpp:1063: Tablet 5 is not local. 2025-06-25T14:28:28.988488Z node 1 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 6 2025-06-25T14:28:28.988556Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:14:2061], server id = [1:14:2061], tablet id = 7, status = ERROR 2025-06-25T14:28:28.988591Z node 1 :STATISTICS DEBUG: service_impl.cpp:1063: Tablet 7 is not local. 2025-06-25T14:28:28.988624Z node 1 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 1, client id = [1:15:2062], server id = [1:15:2062], tablet id = 8 2025-06-25T14:28:28.988640Z node 1 :STATISTICS DEBUG: service_impl.cpp:1063: Tablet 8 is not local. 2025-06-25T14:28:28.988671Z node 1 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 1 2025-06-25T14:28:28.988756Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:13:2060], server id = [0:0:0], tablet id = 6, status = ERROR 2025-06-25T14:28:28.988775Z node 1 :STATISTICS DEBUG: service_impl.cpp:1110: Skip EvClientConnected ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/ut_aggregation/unittest >> AggregateStatistics::ShouldBeCcorrectProcessingTabletTimeout [GOOD] Test command err: 2025-06-25T14:28:29.034619Z node 1 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 1, Round: 1, current Round: 0 2025-06-25T14:28:29.042893Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:10:2057], server id = [1:10:2057], tablet id = 3, status = OK 2025-06-25T14:28:29.043267Z node 1 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [1:10:2057], path = { OwnerId: 3 LocalId: 3 } 2025-06-25T14:28:29.043384Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:11:2058], server id = [1:11:2058], tablet id = 4, status = OK 2025-06-25T14:28:29.043428Z node 1 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [1:11:2058], path = { OwnerId: 3 LocalId: 3 } 2025-06-25T14:28:29.043484Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:12:2059], server id = [1:12:2059], tablet id = 5, status = OK 2025-06-25T14:28:29.043518Z node 1 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [1:12:2059], path = { OwnerId: 3 LocalId: 3 } 2025-06-25T14:28:29.043589Z node 1 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 3 2025-06-25T14:28:29.043728Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:8:2055], server id = [1:8:2055], tablet id = 1, status = OK 2025-06-25T14:28:29.043764Z node 1 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [1:8:2055], path = { OwnerId: 3 LocalId: 3 } 2025-06-25T14:28:29.043807Z node 1 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 5 2025-06-25T14:28:29.043918Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:9:2056], server id = [1:9:2056], tablet id = 2, status = OK 2025-06-25T14:28:29.043965Z node 1 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [1:9:2056], path = { OwnerId: 3 LocalId: 3 } 2025-06-25T14:28:29.044018Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:10:2057], server id = [0:0:0], tablet id = 3, status = ERROR 2025-06-25T14:28:29.044036Z node 1 :STATISTICS DEBUG: service_impl.cpp:1110: Skip EvClientConnected 2025-06-25T14:28:29.044080Z node 1 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 1 2025-06-25T14:28:29.044127Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:13:2060], server id = [1:13:2060], tablet id = 6, status = OK 2025-06-25T14:28:29.044160Z node 1 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [1:13:2060], path = { OwnerId: 3 LocalId: 3 } 2025-06-25T14:28:29.044217Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:12:2059], server id = [0:0:0], tablet id = 5, status = ERROR 2025-06-25T14:28:29.044236Z node 1 :STATISTICS DEBUG: service_impl.cpp:1110: Skip EvClientConnected 2025-06-25T14:28:29.044273Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:14:2061], server id = [1:14:2061], tablet id = 7, status = OK 2025-06-25T14:28:29.046720Z node 1 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [1:14:2061], path = { OwnerId: 3 LocalId: 3 } 2025-06-25T14:28:29.046850Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:8:2055], server id = [0:0:0], tablet id = 1, status = ERROR 2025-06-25T14:28:29.046877Z node 1 :STATISTICS DEBUG: service_impl.cpp:1110: Skip EvClientConnected 2025-06-25T14:28:29.046915Z node 1 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 7 2025-06-25T14:28:29.046991Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:14:2061], server id = [0:0:0], tablet id = 7, status = ERROR 2025-06-25T14:28:29.047026Z node 1 :STATISTICS DEBUG: service_impl.cpp:1110: Skip EvClientConnected 2025-06-25T14:28:29.060605Z node 1 :STATISTICS DEBUG: service_impl.cpp:1028: Tablet 3 has already been processed 2025-06-25T14:28:29.060686Z node 1 :STATISTICS ERROR: service_impl.cpp:1032: No result was received from the tablet 4 2025-06-25T14:28:29.060726Z node 1 :STATISTICS DEBUG: service_impl.cpp:1063: Tablet 4 is not local. 2025-06-25T14:28:29.060847Z node 1 :STATISTICS DEBUG: service_impl.cpp:1028: Tablet 5 has already been processed 2025-06-25T14:28:29.060878Z node 1 :STATISTICS DEBUG: service_impl.cpp:1028: Tablet 1 has already been processed 2025-06-25T14:28:29.060900Z node 1 :STATISTICS ERROR: service_impl.cpp:1032: No result was received from the tablet 2 2025-06-25T14:28:29.060931Z node 1 :STATISTICS DEBUG: service_impl.cpp:1063: Tablet 2 is not local. 2025-06-25T14:28:29.060982Z node 1 :STATISTICS ERROR: service_impl.cpp:1032: No result was received from the tablet 6 2025-06-25T14:28:29.061002Z node 1 :STATISTICS DEBUG: service_impl.cpp:1063: Tablet 6 is not local. 2025-06-25T14:28:29.061024Z node 1 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 1 2025-06-25T14:28:29.061118Z node 1 :STATISTICS DEBUG: service_impl.cpp:252: Event round 1 is different from the current 0 2025-06-25T14:28:29.061150Z node 1 :STATISTICS DEBUG: service_impl.cpp:1021: Skip TEvStatisticsRequestTimeout 2025-06-25T14:28:29.061218Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:11:2058], server id = [0:0:0], tablet id = 4, status = ERROR 2025-06-25T14:28:29.061248Z node 1 :STATISTICS DEBUG: service_impl.cpp:1110: Skip EvClientConnected 2025-06-25T14:28:29.061309Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:9:2056], server id = [0:0:0], tablet id = 2, status = ERROR 2025-06-25T14:28:29.061329Z node 1 :STATISTICS DEBUG: service_impl.cpp:1110: Skip EvClientConnected 2025-06-25T14:28:29.061354Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:13:2060], server id = [0:0:0], tablet id = 6, status = ERROR 2025-06-25T14:28:29.061373Z node 1 :STATISTICS DEBUG: service_impl.cpp:1110: Skip EvClientConnected >> AggregateStatistics::ShouldBeCorrectlyAggregateStatisticsFromAllNodes [GOOD] >> TDataShardLocksTest::Points_ManyTx_BreakAll [GOOD] >> TDataShardLocksTest::Points_ManyTx_BreakHalf_RemoveHalf >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendCreateConnection [GOOD] >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendCreateConnectionWithServiceAccount |75.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/security/certificate_check/ut/unittest >> TCertificateCheckerTest::CheckSubjectDns [GOOD] >> YdbProxy::CreateTable >> TDataShardLocksTest::Points_ManyTx_BreakHalf_RemoveHalf [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/ut_aggregation/unittest >> AggregateStatistics::ShouldBeCorrectlyAggregateStatisticsFromAllNodes [GOOD] Test command err: 2025-06-25T14:28:29.555924Z node 1 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 1, Round: 1, current Round: 0 2025-06-25T14:28:29.556955Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:38:2058], server id = [1:38:2058], tablet id = 1, status = OK 2025-06-25T14:28:29.557242Z node 1 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [1:38:2058], path = { OwnerId: 3 LocalId: 3 } 2025-06-25T14:28:29.557464Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:39:2059], server id = [1:39:2059], tablet id = 2, status = OK 2025-06-25T14:28:29.557508Z node 1 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [1:39:2059], path = { OwnerId: 3 LocalId: 3 } 2025-06-25T14:28:29.557811Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 1, Round: 1, current Round: 0 2025-06-25T14:28:29.557946Z node 1 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 1 2025-06-25T14:28:29.558195Z node 3 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 1, Round: 1, current Round: 0 2025-06-25T14:28:29.558296Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:40:2060], server id = [1:40:2060], tablet id = 3, status = OK 2025-06-25T14:28:29.558364Z node 1 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [1:40:2060], path = { OwnerId: 3 LocalId: 3 } 2025-06-25T14:28:29.558425Z node 1 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 2 2025-06-25T14:28:29.558587Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:44:2057], server id = [2:44:2057], tablet id = 4, status = OK 2025-06-25T14:28:29.558638Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:44:2057], path = { OwnerId: 3 LocalId: 3 } 2025-06-25T14:28:29.558681Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:38:2058], server id = [0:0:0], tablet id = 1, status = ERROR 2025-06-25T14:28:29.558700Z node 1 :STATISTICS DEBUG: service_impl.cpp:1110: Skip EvClientConnected 2025-06-25T14:28:29.558851Z node 4 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 1, current Round: 0 2025-06-25T14:28:29.558914Z node 3 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 3, client id = [3:46:2057], server id = [3:46:2057], tablet id = 5, status = OK 2025-06-25T14:28:29.558946Z node 3 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [3:46:2057], path = { OwnerId: 3 LocalId: 3 } 2025-06-25T14:28:29.558991Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 4 2025-06-25T14:28:29.559088Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:39:2059], server id = [0:0:0], tablet id = 2, status = ERROR 2025-06-25T14:28:29.559117Z node 1 :STATISTICS DEBUG: service_impl.cpp:1110: Skip EvClientConnected 2025-06-25T14:28:29.559163Z node 3 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 5 2025-06-25T14:28:29.559217Z node 3 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 1 2025-06-25T14:28:29.559294Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:44:2057], server id = [0:0:0], tablet id = 4, status = ERROR 2025-06-25T14:28:29.559310Z node 2 :STATISTICS DEBUG: service_impl.cpp:1110: Skip EvClientConnected 2025-06-25T14:28:29.559350Z node 1 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 3 2025-06-25T14:28:29.559422Z node 4 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 4, client id = [4:49:2057], server id = [4:49:2057], tablet id = 6, status = OK 2025-06-25T14:28:29.559460Z node 4 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [4:49:2057], path = { OwnerId: 3 LocalId: 3 } 2025-06-25T14:28:29.559505Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:40:2060], server id = [0:0:0], tablet id = 3, status = ERROR 2025-06-25T14:28:29.559521Z node 1 :STATISTICS DEBUG: service_impl.cpp:1110: Skip EvClientConnected 2025-06-25T14:28:29.559543Z node 3 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 3, client id = [3:46:2057], server id = [0:0:0], tablet id = 5, status = ERROR 2025-06-25T14:28:29.559560Z node 3 :STATISTICS DEBUG: service_impl.cpp:1110: Skip EvClientConnected 2025-06-25T14:28:29.559659Z node 4 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 6 2025-06-25T14:28:29.559702Z node 4 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-25T14:28:29.559789Z node 4 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 4, client id = [4:49:2057], server id = [0:0:0], tablet id = 6, status = ERROR 2025-06-25T14:28:29.559820Z node 4 :STATISTICS DEBUG: service_impl.cpp:1110: Skip EvClientConnected 2025-06-25T14:28:29.559952Z node 1 :STATISTICS DEBUG: service_impl.cpp:448: Received TEvAggregateStatisticsResponse SenderNodeId: 3 2025-06-25T14:28:29.560139Z node 2 :STATISTICS DEBUG: service_impl.cpp:448: Received TEvAggregateStatisticsResponse SenderNodeId: 4 2025-06-25T14:28:29.560179Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 1 2025-06-25T14:28:29.560381Z node 1 :STATISTICS DEBUG: service_impl.cpp:448: Received TEvAggregateStatisticsResponse SenderNodeId: 2 2025-06-25T14:28:29.560461Z node 1 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 1 >> PartitionEndWatcher::EmptyPartition [GOOD] >> PartitionEndWatcher::AfterCommit >> PartitionEndWatcher::AfterCommit [GOOD] >> YdbProxy::AlterTable >> THiveTest::TestHiveBalancerIgnoreTablet [GOOD] >> THiveTest::TestHiveBalancerNodeRestarts |75.5%| [TA] $(B)/ydb/core/security/certificate_check/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> YdbProxy::DescribePath >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendCreateConnectionWithServiceAccount [GOOD] >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendListConnections |75.5%| [TA] $(B)/ydb/core/statistics/service/ut/ut_aggregation/test-results/unittest/{meta.json ... results_accumulator.log} >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_visibility_timeout_works[tables_format_v0] [GOOD] |75.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_locks/unittest >> TDataShardLocksTest::Points_ManyTx_BreakHalf_RemoveHalf [GOOD] >> YdbProxy::CreateTopic >> YdbProxy::DropTable >> YdbProxy::RemoveDirectory >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendListConnections [GOOD] >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendDescribeConnection >> YdbProxy::ReadTopic >> YdbProxy::ListDirectory >> YdbProxy::CopyTable >> TSchemeShardMoveTest::MoveMigratedTable >> TSchemeShardMoveTest::Replace >> Cdc::DocApi[PqRunner] [GOOD] >> Cdc::DocApi[YdsRunner] >> TSchemeShardMoveTest::MoveTableForBackup >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendDescribeConnection [GOOD] >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendModifyConnection >> TSchemeShardMoveTest::MoveIndexSameDst >> Cdc::KeysOnlyLog[YdsRunner] [GOOD] >> Cdc::KeysOnlyLog[TopicRunner] >> TSchemeShardMoveTest::Boot >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendModifyConnection [GOOD] >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendModifyConnectionWithServiceAccount >> AsyncIndexChangeExchange::SenderShouldBeActivatedOnTableWithSyncIndex [GOOD] >> AsyncIndexChangeExchange::SenderShouldBeActivatedOnTableWithAsyncIndex >> TSchemeShardMoveTest::MoveTableForBackup [GOOD] >> TSchemeShardMoveTest::MoveTableWithSequence >> YdbSdkSessions::CloseSessionWithSessionPoolFromDtors [GOOD] >> Cdc::UuidExchange[YdsRunner] [GOOD] >> Cdc::UuidExchange[TopicRunner] >> TSchemeShardMoveTest::MoveMigratedTable [GOOD] >> TSchemeShardMoveTest::MoveOldTableWithIndex >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendModifyConnectionWithServiceAccount [GOOD] >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendDeleteConnection >> YdbProxy::DescribePath [GOOD] >> YdbProxy::DescribeTable >> TSchemeShardMoveTest::Boot [GOOD] >> TSchemeShardMoveTest::AsyncIndexWithSyncInFly >> YdbProxy::CreateTable [GOOD] >> YdbProxy::CreateCdcStream >> TSchemeShardMoveTest::MoveIndexSameDst [GOOD] >> TSchemeShardMoveTest::MoveIntoBuildingIndex >> YdbProxy::AlterTable [GOOD] >> TSchemeShardMoveTest::MoveTableWithSequence [GOOD] >> TSchemeShardMoveTest::Replace [GOOD] >> TSchemeShardMoveTest::MoveOldTableWithIndex [GOOD] >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendDeleteConnection [GOOD] >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendTestConnection >> YdbProxy::CreateTopic [GOOD] >> YdbProxy::DescribeConsumer >> YdbProxy::RemoveDirectory [GOOD] >> YdbProxy::StaticCreds >> YdbProxy::ListDirectory [GOOD] >> YdbProxy::DropTopic |75.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_visibility_to_zero_works[tables_format_v1-fifo] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/ydb_proxy/ut/unittest >> YdbProxy::AlterTable [GOOD] Test command err: 2025-06-25T14:28:30.451486Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519893785155822336:2201];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:28:30.451860Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000c36/r3tmp/tmpaPR1VR/pdisk_1.dat 2025-06-25T14:28:30.843865Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:28:30.843968Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:28:30.848035Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:28:30.851707Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:28:30.872410Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519893785155822173:2080] 1750861710413650 != 1750861710413653 TClient is connected to server localhost:29118 TServer::EnableGrpc on GrpcPort 26986, node 1 2025-06-25T14:28:31.168130Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:28:31.168154Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:28:31.168161Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:28:31.168280Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:29118 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-25T14:28:31.455387Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:28:31.543405Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:28:33.308909Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519893798040724699:2301] txid# 281474976710658, issues: { message: "Path does not exist" issue_code: 200200 severity: 1 } 2025-06-25T14:28:33.321737Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:33.436203Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:171) 2025-06-25T14:28:33.456573Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519893798040724816:2381] txid# 281474976710661, issues: { message: "Can\'t drop unknown column: \'extra\'" severity: 1 } ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_move/unittest >> TSchemeShardMoveTest::Replace [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:28:32.138137Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:28:32.138238Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:28:32.138293Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:28:32.138345Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:28:32.138399Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:28:32.138432Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:28:32.138492Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:28:32.138613Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:28:32.139495Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:28:32.139859Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:28:32.229424Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:28:32.229511Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:28:32.252071Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:28:32.252587Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:28:32.252765Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:28:32.262814Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:28:32.263226Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:28:32.263929Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:28:32.264217Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:28:32.276925Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:28:32.277094Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:28:32.278059Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:28:32.278118Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:28:32.278245Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:28:32.278287Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:28:32.278320Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:28:32.278388Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:28:32.284630Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:28:32.446919Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:28:32.447163Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:32.447394Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:28:32.447438Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:28:32.447684Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:28:32.447754Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:28:32.450456Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:28:32.450672Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:28:32.450861Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:32.450917Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:28:32.450958Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:28:32.450993Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:28:32.453140Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:32.453211Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:28:32.453251Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:28:32.455068Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:32.455115Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:32.455165Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:28:32.455249Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:28:32.458825Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:28:32.461570Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:28:32.461734Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:28:32.462692Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:28:32.462830Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:28:32.462874Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:28:32.463141Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:28:32.463206Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:28:32.463373Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:28:32.463449Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:28:32.465786Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:28:32.465833Z node 1 :FLAT_TX_SCHEMESHARD ... one PersistRemovePath for 5 paths, skipped 0, left 2 candidates, at schemeshard: 72057594046678944 2025-06-25T14:28:34.242507Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 2 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-25T14:28:34.242563Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 16], at schemeshard: 72057594046678944 2025-06-25T14:28:34.242654Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 15] was 1 2025-06-25T14:28:34.242701Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 15], at schemeshard: 72057594046678944 2025-06-25T14:28:34.242746Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 12] was 2 2025-06-25T14:28:34.242790Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 14], at schemeshard: 72057594046678944 2025-06-25T14:28:34.242836Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 13] was 1 2025-06-25T14:28:34.242864Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 13], at schemeshard: 72057594046678944 2025-06-25T14:28:34.242901Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 12] was 1 2025-06-25T14:28:34.242930Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 12], at schemeshard: 72057594046678944 2025-06-25T14:28:34.242965Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-25T14:28:34.247060Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:3 2025-06-25T14:28:34.247125Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:3 tabletId 72075186233409546 2025-06-25T14:28:34.247224Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-06-25T14:28:34.247250Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:2 tabletId 72075186233409547 2025-06-25T14:28:34.247358Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-06-25T14:28:34.247396Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:1 tabletId 72075186233409548 2025-06-25T14:28:34.247528Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 5 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 105 wait until 72075186233409546 is deleted wait until 72075186233409547 is deleted wait until 72075186233409548 is deleted 2025-06-25T14:28:34.248112Z node 1 :HIVE INFO: tablet_helpers.cpp:1476: [72057594037968897] TEvSubscribeToTabletDeletion, 72075186233409546 2025-06-25T14:28:34.248175Z node 1 :HIVE INFO: tablet_helpers.cpp:1476: [72057594037968897] TEvSubscribeToTabletDeletion, 72075186233409547 2025-06-25T14:28:34.248236Z node 1 :HIVE INFO: tablet_helpers.cpp:1476: [72057594037968897] TEvSubscribeToTabletDeletion, 72075186233409548 Deleted tabletId 72075186233409546 Deleted tabletId 72075186233409547 Deleted tabletId 72075186233409548 2025-06-25T14:28:34.248874Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Src" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:28:34.249087Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Src" took 244us result status StatusPathDoesNotExist 2025-06-25T14:28:34.249268Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/Src\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/Src" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: true } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-25T14:28:34.249834Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Dst" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:28:34.250086Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Dst" took 251us result status StatusSuccess 2025-06-25T14:28:34.250572Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Dst" PathDescription { Self { Name: "Dst" PathId: 22 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 105 CreateStep: 5000006 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 2 } ChildrenExist: false } Table { Name: "Dst" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value0" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "value1" Type: "Utf8" TypeId: 4608 Id: 3 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableIndexes { Name: "Async" LocalPathId: 23 Type: EIndexTypeGlobalAsync State: EIndexStateReady KeyColumnNames: "value1" SchemaVersion: 2 PathOwnerId: 72057594046678944 DataSize: 0 IndexImplTableDescriptions { } } TableIndexes { Name: "Sync" LocalPathId: 25 Type: EIndexTypeGlobal State: EIndexStateReady KeyColumnNames: "value0" SchemaVersion: 2 PathOwnerId: 72057594046678944 DataSize: 0 IndexImplTableDescriptions { } } TableSchemaVersion: 2 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 5 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 22 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:28:34.251485Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:28:34.251685Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 174us result status StatusSuccess 2025-06-25T14:28:34.252113Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 28 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 28 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 26 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Dst" PathId: 22 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 105 CreateStep: 5000006 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 5 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> YdbProxy::DropTable [GOOD] >> YdbProxy::DescribeTopic ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_move/unittest >> TSchemeShardMoveTest::MoveTableWithSequence [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:28:32.426766Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:28:32.426858Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:28:32.426891Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:28:32.426923Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:28:32.426961Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:28:32.426987Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:28:32.427031Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:28:32.427089Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:28:32.427834Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:28:32.428140Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:28:32.494538Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:28:32.494604Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:28:32.509083Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:28:32.509457Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:28:32.509591Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:28:32.513752Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:28:32.514008Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:28:32.514557Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:28:32.514789Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:28:32.517494Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:28:32.517628Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:28:32.518692Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:28:32.518742Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:28:32.518892Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:28:32.518940Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:28:32.518979Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:28:32.519049Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:28:32.524636Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:28:32.654098Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:28:32.654314Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:32.654509Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:28:32.654547Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:28:32.654790Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:28:32.654854Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:28:32.660501Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:28:32.660690Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:28:32.660873Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:32.660923Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:28:32.660959Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:28:32.661007Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:28:32.663609Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:32.663690Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:28:32.663744Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:28:32.665997Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:32.666057Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:32.666112Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:28:32.666173Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:28:32.669572Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:28:32.671610Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:28:32.671777Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:28:32.672802Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:28:32.672933Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:28:32.672975Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:28:32.673241Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:28:32.673293Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:28:32.673432Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:28:32.673533Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:28:32.675681Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:28:32.675724Z node 1 :FLAT_TX_SCHEMESHARD ... DoNotify send TEvNotifyTxCompletionResult to actorId: [2:374:2341] message: TxId: 102 2025-06-25T14:28:34.369746Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 2/2 2025-06-25T14:28:34.369793Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 102:0 2025-06-25T14:28:34.369829Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 102:0 2025-06-25T14:28:34.369973Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 4 2025-06-25T14:28:34.370014Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate source path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-25T14:28:34.370052Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 102:1 2025-06-25T14:28:34.370072Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 102:1 2025-06-25T14:28:34.370119Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 2 2025-06-25T14:28:34.370144Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate source path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-25T14:28:34.370573Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-25T14:28:34.370623Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-06-25T14:28:34.370693Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-25T14:28:34.370733Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-25T14:28:34.370770Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-25T14:28:34.373040Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-25T14:28:34.373102Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [2:472:2426] 2025-06-25T14:28:34.373499Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 2 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 102 2025-06-25T14:28:34.403017Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/myseq" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:28:34.403286Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/myseq" took 314us result status StatusPathDoesNotExist 2025-06-25T14:28:34.403457Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/Table/myseq\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/Table/myseq" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: true } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-25T14:28:34.403896Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:28:34.404076Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table" took 191us result status StatusPathDoesNotExist 2025-06-25T14:28:34.404239Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/Table\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/Table" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: true } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-25T14:28:34.404653Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/TableMove" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:28:34.404885Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/TableMove" took 233us result status StatusSuccess 2025-06-25T14:28:34.405360Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/TableMove" PathDescription { Self { Name: "TableMove" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 102 CreateStep: 5000003 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 2 } ChildrenExist: true } Table { Name: "TableMove" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 DefaultFromSequence: "myseq" NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Uint64" TypeId: 4 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 2 IsBackup: false Sequences { Name: "myseq" PathId { OwnerId: 72057594046678944 LocalId: 5 } Version: 2 SequenceShard: 72075186233409546 MinValue: 1 MaxValue: 9223372036854775807 StartValue: 1 Cache: 1 Increment: 1 Cycle: false DataType: "Int64" } IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 4 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:28:34.405918Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/TableMove/myseq" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-25T14:28:34.406094Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/TableMove/myseq" took 194us result status StatusSuccess 2025-06-25T14:28:34.406382Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/TableMove/myseq" PathDescription { Self { Name: "myseq" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeSequence CreateFinished: true CreateTxId: 102 CreateStep: 5000003 ParentPathId: 4 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 SequenceVersion: 2 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } SequenceDescription { Name: "myseq" PathId { OwnerId: 72057594046678944 LocalId: 5 } Version: 2 SequenceShard: 72075186233409546 MinValue: 1 MaxValue: 9223372036854775807 StartValue: 1 Cache: 1 Increment: 1 Cycle: false DataType: "Int64" } } PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TSchemeShardMoveTest::AsyncIndexWithSyncInFly [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_move/unittest >> TSchemeShardMoveTest::MoveOldTableWithIndex [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:28:32.023376Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:28:32.023463Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:28:32.023503Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:28:32.023537Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:28:32.023575Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:28:32.023609Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:28:32.023657Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:28:32.023711Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:28:32.024473Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:28:32.024785Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:28:32.098992Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:28:32.099058Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:28:32.121290Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:28:32.121771Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:28:32.121931Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:28:32.129129Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:28:32.129477Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:28:32.130099Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:28:32.130362Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:28:32.137080Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:28:32.137249Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:28:32.138428Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:28:32.138494Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:28:32.138637Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:28:32.138688Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:28:32.138727Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:28:32.138808Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:28:32.147737Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:28:32.287752Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:28:32.288000Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:32.288221Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:28:32.288266Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:28:32.288535Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:28:32.288605Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:28:32.292239Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:28:32.292426Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:28:32.292603Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:32.292657Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:28:32.292689Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:28:32.292738Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:28:32.294704Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:32.294749Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:28:32.294787Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:28:32.296300Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:32.296403Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:32.296459Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:28:32.296515Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:28:32.299607Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:28:32.301418Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:28:32.301574Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:28:32.302629Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:28:32.302747Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:28:32.302790Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:28:32.303047Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:28:32.303094Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:28:32.303264Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:28:32.303348Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:28:32.305482Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:28:32.305521Z node 1 :FLAT_TX_SCHEMESHARD ... ESHARD INFO: schemeshard__operation_common.cpp:1047: NTableState::TProposedWaitParts operationId# 102:2 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 2025-06-25T14:28:34.469982Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1051: NTableState::TProposedWaitParts operationId# 102:2 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 323 RawX2: 8589936897 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 2025-06-25T14:28:34.470041Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:670: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 102:2, shardIdx: 72057594046678944:2, shard: 72075186233409546, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-25T14:28:34.470070Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:707: all shard schema changes has been received, operationId: 102:2, at schemeshard: 72057594046678944 2025-06-25T14:28:34.470099Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:719: send schema changes ack message, operation: 102:2, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-06-25T14:28:34.470127Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 102:2 129 -> 240 2025-06-25T14:28:34.470917Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5596: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 330 RawX2: 8589936902 } Origin: 72075186233409547 State: 2 TxId: 102 Step: 0 Generation: 2 2025-06-25T14:28:34.470947Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1791: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409547, partId: 0 2025-06-25T14:28:34.471026Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:632: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: Source { RawX1: 330 RawX2: 8589936902 } Origin: 72075186233409547 State: 2 TxId: 102 Step: 0 Generation: 2 2025-06-25T14:28:34.471061Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1047: NTableState::TProposedWaitParts operationId# 102:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 2025-06-25T14:28:34.471105Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1051: NTableState::TProposedWaitParts operationId# 102:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 330 RawX2: 8589936902 } Origin: 72075186233409547 State: 2 TxId: 102 Step: 0 Generation: 2 2025-06-25T14:28:34.471135Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:670: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 102:0, shardIdx: 72057594046678944:1, shard: 72075186233409547, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-25T14:28:34.471154Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:707: all shard schema changes has been received, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-25T14:28:34.471172Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:719: send schema changes ack message, operation: 102:0, datashard: 72075186233409547, at schemeshard: 72057594046678944 2025-06-25T14:28:34.471193Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 102:0 129 -> 240 2025-06-25T14:28:34.472814Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 102:2, at schemeshard: 72057594046678944 2025-06-25T14:28:34.479247Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-25T14:28:34.479635Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 102:2, at schemeshard: 72057594046678944 2025-06-25T14:28:34.480613Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 102:2, at schemeshard: 72057594046678944 2025-06-25T14:28:34.480677Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_move_table.cpp:564: TMoveTable TDone, operationId: 102:2 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:28:34.480732Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_move_table.cpp:574: TMoveTable TDone, operationId: 102:2 ProgressState, SourcePathId: [OwnerId: 72057594046678944, LocalPathId: 4], TargetPathId: [OwnerId: 72057594046678944, LocalPathId: 7], at schemeshard: 72057594046678944 2025-06-25T14:28:34.480860Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:2 progress is 2/3 2025-06-25T14:28:34.480908Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 2/3 2025-06-25T14:28:34.480964Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:2 progress is 2/3 2025-06-25T14:28:34.481014Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 2/3 2025-06-25T14:28:34.481060Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 102, ready parts: 2/3, is published: true 2025-06-25T14:28:34.481866Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-25T14:28:34.482116Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-25T14:28:34.482157Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_move_table.cpp:564: TMoveTable TDone, operationId: 102:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:28:34.482195Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_move_table.cpp:574: TMoveTable TDone, operationId: 102:0 ProgressState, SourcePathId: [OwnerId: 72057594046678944, LocalPathId: 2], TargetPathId: [OwnerId: 72057594046678944, LocalPathId: 5], at schemeshard: 72057594046678944 2025-06-25T14:28:34.482266Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:0 progress is 3/3 2025-06-25T14:28:34.482297Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 3/3 2025-06-25T14:28:34.482330Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:0 progress is 3/3 2025-06-25T14:28:34.482357Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 3/3 2025-06-25T14:28:34.482387Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 102, ready parts: 3/3, is published: true 2025-06-25T14:28:34.482475Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1656: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [2:379:2345] message: TxId: 102 2025-06-25T14:28:34.482533Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 3/3 2025-06-25T14:28:34.482583Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 102:0 2025-06-25T14:28:34.482628Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 102:0 2025-06-25T14:28:34.482757Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 4 2025-06-25T14:28:34.482798Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate source path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-25T14:28:34.482836Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 102:1 2025-06-25T14:28:34.482857Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 102:1 2025-06-25T14:28:34.482889Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 6] was 3 2025-06-25T14:28:34.482913Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate source path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-25T14:28:34.482939Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 102:2 2025-06-25T14:28:34.482962Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 102:2 2025-06-25T14:28:34.483010Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 7] was 3 2025-06-25T14:28:34.483036Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate source path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 1 2025-06-25T14:28:34.483430Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-25T14:28:34.483483Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 4], at schemeshard: 72057594046678944 2025-06-25T14:28:34.483556Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-25T14:28:34.483598Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-06-25T14:28:34.483632Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-25T14:28:34.483667Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-25T14:28:34.483702Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-25T14:28:34.487214Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-25T14:28:34.487273Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [2:476:2435] 2025-06-25T14:28:34.487448Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 3 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 102 >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendTestConnection [GOOD] >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendTestConnectionWithServiceAccount >> TSchemeShardMoveTest::MoveIntoBuildingIndex [GOOD] >> YdbProxy::CopyTable [GOOD] >> YdbProxy::CopyTables >> TGRpcRateLimiterTest::AcquireResourceManyUsedGrpcApiWithCancelAfter [GOOD] >> TGRpcRateLimiterTest::AcquireResourceManyUsedActorApiWithCancelAfter ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_move/unittest >> TSchemeShardMoveTest::AsyncIndexWithSyncInFly [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:28:33.176975Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:28:33.177060Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:28:33.177103Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:28:33.177140Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:28:33.177181Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:28:33.177204Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:28:33.177251Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:28:33.177353Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:28:33.178079Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:28:33.178413Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:28:33.265075Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:28:33.265126Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:28:33.281588Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:28:33.282019Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:28:33.282190Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:28:33.288984Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:28:33.289312Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:28:33.289933Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:28:33.290170Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:28:33.293334Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:28:33.293481Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:28:33.294625Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:28:33.294688Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:28:33.294836Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:28:33.294882Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:28:33.294919Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:28:33.294997Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:28:33.302955Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:28:33.461413Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:28:33.461631Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:33.461836Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:28:33.461879Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:28:33.462103Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:28:33.462173Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:28:33.464305Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:28:33.464510Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:28:33.464675Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:33.464723Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:28:33.464764Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:28:33.464820Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:28:33.467060Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:33.467113Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:28:33.467153Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:28:33.468893Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:33.468934Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:33.469004Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:28:33.469065Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:28:33.481515Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:28:33.483722Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:28:33.483906Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:28:33.484908Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:28:33.485039Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:28:33.485097Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:28:33.485372Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:28:33.485419Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:28:33.485588Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:28:33.485701Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:28:33.488143Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:28:33.488180Z node 1 :FLAT_TX_SCHEMESHARD ... 0 } Origin: 72075186233409546 State: 2 TxId: 103 Step: 0 Generation: 2 2025-06-25T14:28:34.984417Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:670: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 103:2, shardIdx: 72057594046678944:2, shard: 72075186233409546, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-25T14:28:34.984453Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:707: all shard schema changes has been received, operationId: 103:2, at schemeshard: 72057594046678944 2025-06-25T14:28:34.984489Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:719: send schema changes ack message, operation: 103:2, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-06-25T14:28:34.984529Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 103:2 129 -> 240 2025-06-25T14:28:34.985593Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5596: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 329 RawX2: 8589936902 } Origin: 72075186233409547 State: 2 TxId: 103 Step: 0 Generation: 2 2025-06-25T14:28:34.985626Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1791: TOperation FindRelatedPartByTabletId, TxId: 103, tablet: 72075186233409547, partId: 0 2025-06-25T14:28:34.985720Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:632: TTxOperationReply execute, operationId: 103:0, at schemeshard: 72057594046678944, message: Source { RawX1: 329 RawX2: 8589936902 } Origin: 72075186233409547 State: 2 TxId: 103 Step: 0 Generation: 2 2025-06-25T14:28:34.985768Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1047: NTableState::TProposedWaitParts operationId# 103:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 2025-06-25T14:28:34.985830Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1051: NTableState::TProposedWaitParts operationId# 103:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 329 RawX2: 8589936902 } Origin: 72075186233409547 State: 2 TxId: 103 Step: 0 Generation: 2 2025-06-25T14:28:34.985862Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:670: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 103:0, shardIdx: 72057594046678944:1, shard: 72075186233409547, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-25T14:28:34.985880Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:707: all shard schema changes has been received, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-25T14:28:34.985898Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:719: send schema changes ack message, operation: 103:0, datashard: 72075186233409547, at schemeshard: 72057594046678944 2025-06-25T14:28:34.985920Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 103:0 129 -> 240 2025-06-25T14:28:34.987848Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 103:2, at schemeshard: 72057594046678944 2025-06-25T14:28:34.990292Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-25T14:28:34.992025Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 103:2, at schemeshard: 72057594046678944 2025-06-25T14:28:34.992423Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 103:2, at schemeshard: 72057594046678944 2025-06-25T14:28:34.992458Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_move_table.cpp:564: TMoveTable TDone, operationId: 103:2 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:28:34.992496Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_move_table.cpp:574: TMoveTable TDone, operationId: 103:2 ProgressState, SourcePathId: [OwnerId: 72057594046678944, LocalPathId: 4], TargetPathId: [OwnerId: 72057594046678944, LocalPathId: 7], at schemeshard: 72057594046678944 2025-06-25T14:28:34.992575Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#103:2 progress is 2/3 2025-06-25T14:28:34.992608Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 103 ready parts: 2/3 2025-06-25T14:28:34.992668Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#103:2 progress is 2/3 2025-06-25T14:28:34.992694Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 103 ready parts: 2/3 2025-06-25T14:28:34.992733Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 103, ready parts: 2/3, is published: true 2025-06-25T14:28:34.992981Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-25T14:28:34.993183Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-25T14:28:34.993214Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_move_table.cpp:564: TMoveTable TDone, operationId: 103:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:28:34.993240Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_move_table.cpp:574: TMoveTable TDone, operationId: 103:0 ProgressState, SourcePathId: [OwnerId: 72057594046678944, LocalPathId: 2], TargetPathId: [OwnerId: 72057594046678944, LocalPathId: 5], at schemeshard: 72057594046678944 2025-06-25T14:28:34.993293Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#103:0 progress is 3/3 2025-06-25T14:28:34.993312Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 103 ready parts: 3/3 2025-06-25T14:28:34.993337Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#103:0 progress is 3/3 2025-06-25T14:28:34.993354Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 103 ready parts: 3/3 2025-06-25T14:28:34.993373Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 103, ready parts: 3/3, is published: true 2025-06-25T14:28:34.993404Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 103 ready parts: 3/3 2025-06-25T14:28:34.993443Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 103:0 2025-06-25T14:28:34.993480Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 103:0 2025-06-25T14:28:34.993593Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 4 2025-06-25T14:28:34.993626Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate source path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-25T14:28:34.993661Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 103:1 2025-06-25T14:28:34.993680Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 103:1 2025-06-25T14:28:34.993702Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 6] was 3 2025-06-25T14:28:34.993719Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate source path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-25T14:28:34.993737Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 103:2 2025-06-25T14:28:34.993752Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 103:2 2025-06-25T14:28:34.993790Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 7] was 3 2025-06-25T14:28:34.993810Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate source path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 1 2025-06-25T14:28:34.994286Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-25T14:28:34.994329Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 4], at schemeshard: 72057594046678944 2025-06-25T14:28:34.994385Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-25T14:28:34.994420Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-06-25T14:28:34.994446Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-25T14:28:34.994468Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-25T14:28:34.994493Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-25T14:28:35.003019Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 3 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-25T14:28:35.003555Z node 2 :TX_PROXY DEBUG: proxy_impl.cpp:392: actor# [2:271:2260] Handle TEvGetProxyServicesRequest TestWaitNotification wait txId: 103 2025-06-25T14:28:35.053246Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 103: send EvNotifyTxCompletion 2025-06-25T14:28:35.053312Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 103 2025-06-25T14:28:35.053690Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 103, at schemeshard: 72057594046678944 2025-06-25T14:28:35.053765Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-06-25T14:28:35.053798Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [2:674:2556] TestWaitNotification: OK eventTxId 103 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_move/unittest >> TSchemeShardMoveTest::MoveIntoBuildingIndex [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:28:32.911148Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:28:32.911226Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:28:32.911286Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:28:32.911324Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:28:32.911365Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:28:32.911395Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:28:32.911451Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:28:32.911513Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:28:32.912341Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:28:32.912691Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:28:32.980534Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:28:32.980614Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:28:33.002953Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:28:33.003344Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:28:33.003483Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:28:33.012497Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:28:33.012874Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:28:33.013403Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:28:33.013629Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:28:33.018163Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:28:33.018343Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:28:33.019620Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:28:33.019682Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:28:33.019868Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:28:33.019926Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:28:33.019975Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:28:33.020069Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:28:33.025958Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:28:33.179057Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:28:33.179285Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:33.179486Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:28:33.179530Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:28:33.179725Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:28:33.179808Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:28:33.185350Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:28:33.185537Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:28:33.185706Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:33.185771Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:28:33.185808Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:28:33.185862Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:28:33.189380Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:33.189459Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:28:33.189506Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:28:33.191560Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:33.191615Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:33.191673Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:28:33.191762Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:28:33.195309Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:28:33.197071Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:28:33.197241Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:28:33.198042Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:28:33.198165Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:28:33.198219Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:28:33.198481Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:28:33.198529Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:28:33.198682Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:28:33.198753Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:28:33.200555Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:28:33.200597Z node 1 :FLAT_TX_SCHEMESHARD ... saction is registered, txId: 281474976710760, at schemeshard: 72057594046678944 FAKE_COORDINATOR: Add transaction: 281474976710760 at step: 5000006 FAKE_COORDINATOR: advance: minStep5000006 State->FrontStep: 5000005 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710760 at step: 5000006 2025-06-25T14:28:35.014475Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000006, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:28:35.014541Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976710760 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 8589936750 } } Step: 5000006 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:28:35.014573Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_lock.cpp:44: [72057594046678944] TDropLock TPropose opId# 281474976710760:0 HandleReply TEvOperationPlan: step# 5000006 2025-06-25T14:28:35.014610Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 281474976710760:0 128 -> 240 2025-06-25T14:28:35.016049Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 281474976710760:0, at schemeshard: 72057594046678944 2025-06-25T14:28:35.016101Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 281474976710760:0 ProgressState 2025-06-25T14:28:35.016154Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976710760:0 progress is 1/1 2025-06-25T14:28:35.016171Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976710760 ready parts: 1/1 2025-06-25T14:28:35.016190Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976710760:0 progress is 1/1 2025-06-25T14:28:35.016204Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976710760 ready parts: 1/1 2025-06-25T14:28:35.016222Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 281474976710760, ready parts: 1/1, is published: true 2025-06-25T14:28:35.016252Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1656: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [2:128:2152] message: TxId: 281474976710760 2025-06-25T14:28:35.016277Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976710760 ready parts: 1/1 2025-06-25T14:28:35.016302Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976710760:0 2025-06-25T14:28:35.016342Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 281474976710760:0 2025-06-25T14:28:35.016380Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 FAKE_COORDINATOR: Erasing txId 281474976710760 2025-06-25T14:28:35.017428Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6830: Handle: TEvNotifyTxCompletionResult: txId# 281474976710760 2025-06-25T14:28:35.017483Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6832: Message: TxId: 281474976710760 2025-06-25T14:28:35.017537Z node 2 :BUILD_INDEX INFO: schemeshard_build_index__progress.cpp:1930: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvNotifyTxCompletionResult, id# 102, txId# 281474976710760 2025-06-25T14:28:35.017587Z node 2 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1933: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvNotifyTxCompletionResult, TIndexBuildInfo: TBuildInfo{ IndexBuildId: 102, Uid: , DomainPathId: [OwnerId: 72057594046678944, LocalPathId: 1], TablePathId: [OwnerId: 72057594046678944, LocalPathId: 2], IndexType: EIndexTypeGlobal, IndexName: Sync, IndexColumn: value0, State: Unlocking, IsBroken: 0, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [2:450:2409], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 281474976710757, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976710758, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 5000004, ApplyTxId: 281474976710759, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976710760, UnlockTxStatus: StatusAccepted, UnlockTxDone: 0, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }}, txId# 281474976710760 2025-06-25T14:28:35.018520Z node 2 :BUILD_INDEX NOTICE: schemeshard_build_index__progress.cpp:1129: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 102 Unlocking 2025-06-25T14:28:35.018575Z node 2 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1130: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 102 Unlocking TBuildInfo{ IndexBuildId: 102, Uid: , DomainPathId: [OwnerId: 72057594046678944, LocalPathId: 1], TablePathId: [OwnerId: 72057594046678944, LocalPathId: 2], IndexType: EIndexTypeGlobal, IndexName: Sync, IndexColumn: value0, State: Unlocking, IsBroken: 0, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [2:450:2409], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 281474976710757, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976710758, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 5000004, ApplyTxId: 281474976710759, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976710760, UnlockTxStatus: StatusAccepted, UnlockTxDone: 1, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }} 2025-06-25T14:28:35.018613Z node 2 :BUILD_INDEX INFO: schemeshard_build_index_tx_base.cpp:24: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: Change state from Unlocking to Done 2025-06-25T14:28:35.019812Z node 2 :BUILD_INDEX NOTICE: schemeshard_build_index__progress.cpp:1129: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 102 Done 2025-06-25T14:28:35.019880Z node 2 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1130: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 102 Done TBuildInfo{ IndexBuildId: 102, Uid: , DomainPathId: [OwnerId: 72057594046678944, LocalPathId: 1], TablePathId: [OwnerId: 72057594046678944, LocalPathId: 2], IndexType: EIndexTypeGlobal, IndexName: Sync, IndexColumn: value0, State: Done, IsBroken: 0, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [2:450:2409], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 281474976710757, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976710758, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 5000004, ApplyTxId: 281474976710759, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976710760, UnlockTxStatus: StatusAccepted, UnlockTxDone: 1, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }} 2025-06-25T14:28:35.019915Z node 2 :BUILD_INDEX TRACE: schemeshard_build_index_tx_base.cpp:333: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TIndexBuildInfo SendNotifications: : id# 102, subscribers count# 1 2025-06-25T14:28:35.020036Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-25T14:28:35.020070Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [2:628:2575] TestWaitNotification: OK eventTxId 102 2025-06-25T14:28:35.020624Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:28:35.020845Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table" took 254us result status StatusSuccess 2025-06-25T14:28:35.021293Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table" PathDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 TableSchemaVersion: 3 TablePartitionVersion: 1 } ChildrenExist: true } Table { Name: "Table" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value0" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "value1" Type: "Utf8" TypeId: 4608 Id: 3 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableIndexes { Name: "SomeIndex" LocalPathId: 3 Type: EIndexTypeGlobal State: EIndexStateReady KeyColumnNames: "value1" SchemaVersion: 1 PathOwnerId: 72057594046678944 DataSize: 0 IndexImplTableDescriptions { } } TableIndexes { Name: "Sync" LocalPathId: 5 Type: EIndexTypeGlobal State: EIndexStateReady KeyColumnNames: "value0" SchemaVersion: 2 PathOwnerId: 72057594046678944 DataSize: 0 IndexImplTableDescriptions { } } TableSchemaVersion: 3 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 5 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendTestConnectionWithServiceAccount [GOOD] >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendCreateBinding >> Cdc::KeysOnlyLog[TopicRunner] [GOOD] >> Cdc::NewAndOldImagesLog[PqRunner] >> TSchemeShardMoveTest::MoveIndex >> TSchemeShardMoveTest::TwoTables >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendCreateBinding [GOOD] >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendListBindings >> TSchemeShardMoveTest::ResetCachedPath >> ExternalBlobsMultipleChannels::Simple >> TSchemeShardMoveTest::Reject >> THiveTest::TestHiveBalancerNodeRestarts [GOOD] >> THiveTest::TestHiveBalancerDifferentResources >> YdbProxy::CreateCdcStream [GOOD] >> ExternalBlobsMultipleChannels::WithNewColumnFamilyAndCompaction >> AsyncIndexChangeExchange::SenderShouldBeActivatedOnTableWithAsyncIndex [GOOD] >> AsyncIndexChangeExchange::SenderShouldShakeHandsOnce >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_visibility_batch_works[tables_format_v0-std] [GOOD] >> Cdc::UuidExchange[TopicRunner] [GOOD] >> Cdc::UpdatesLog[PqRunner] |75.5%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/integration/sessions/gtest >> YdbSdkSessions::CloseSessionWithSessionPoolFromDtors [GOOD] >> YdbProxy::DescribeTable [GOOD] >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendListBindings [GOOD] >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendDescribeBinding >> ExternalBlobsMultipleChannels::SingleChannel >> YdbProxy::StaticCreds [GOOD] >> YdbProxy::DropTopic [GOOD] >> YdbProxy::DescribeConsumer [GOOD] >> YdbSdkSessionsPool::StressTestAsync/0 [GOOD] >> YdbSdkSessionsPool::StressTestAsync/1 >> TSchemeShardMoveTest::TwoTables [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/ydb_proxy/ut/unittest >> YdbProxy::CreateCdcStream [GOOD] Test command err: 2025-06-25T14:28:30.179741Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519893788187816455:2151];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:28:30.180013Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000c38/r3tmp/tmpvpKIiq/pdisk_1.dat 2025-06-25T14:28:30.609183Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519893788187816333:2080] 1750861710139977 != 1750861710139980 2025-06-25T14:28:30.616043Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:28:30.658892Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:28:30.659006Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:28:30.664656Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:4481 TServer::EnableGrpc on GrpcPort 6897, node 1 2025-06-25T14:28:30.968864Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:28:30.968893Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:28:30.968902Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:28:30.969041Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4481 2025-06-25T14:28:31.197796Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:28:31.342679Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:28:31.358393Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:28:33.205532Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519893801072718870:2304] txid# 281474976710658, issues: { message: "Column key has wrong key type Float" severity: 1 } 2025-06-25T14:28:33.223652Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:33.343693Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519893801072718958:2364] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/table\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 2], type: EPathTypeTable, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:28:34.015024Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519893804060683236:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:28:34.015099Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000c38/r3tmp/tmpQzuxoH/pdisk_1.dat 2025-06-25T14:28:34.126589Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:28:34.162914Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:28:34.163007Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:28:34.164510Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:8930 TServer::EnableGrpc on GrpcPort 2131, node 2 2025-06-25T14:28:34.309342Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:28:34.309365Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:28:34.309372Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:28:34.309483Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8930 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-25T14:28:34.594108Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:28:35.019258Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:28:36.713138Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:36.834698Z node 2 :CHANGE_EXCHANGE WARN: change_sender_cdc_stream.cpp:398: [CdcChangeSenderMain][72075186224037888:1][2:7519893812650618608:2302] Failed entry at 'ResolveTopic': entry# { Path: TableId: [72057594046644480:4:0] RequestType: ByTableId Operation: OpTopic RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo } 2025-06-25T14:28:36.906247Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519893812650618674:2445] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/table/updates\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 3], type: EPathTypeCdcStream, state: EPathStateNoChanges)" severity: 1 } >> TSchemeShardMoveTest::MoveIndex [GOOD] >> TSchemeShardMoveTest::MoveIndexDoesNonExisted >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendDescribeBinding [GOOD] >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendModifyBinding >> YdbProxy::DescribeTopic [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/ydb_proxy/ut/unittest >> YdbProxy::DescribeTable [GOOD] Test command err: 2025-06-25T14:28:30.688000Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519893785768171177:2227];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:28:30.688044Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000c2a/r3tmp/tmp89F8Ds/pdisk_1.dat 2025-06-25T14:28:31.038751Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519893785768170987:2080] 1750861710666916 != 1750861710666919 2025-06-25T14:28:31.041931Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:28:31.079770Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:28:31.079879Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:28:31.098094Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:3323 TServer::EnableGrpc on GrpcPort 19591, node 1 2025-06-25T14:28:31.374963Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:28:31.374980Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:28:31.374984Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:28:31.375089Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3323 2025-06-25T14:28:31.684487Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:28:31.808929Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:28:33.983969Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519893799950323705:2162];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:28:33.998943Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000c2a/r3tmp/tmp3jLuru/pdisk_1.dat 2025-06-25T14:28:34.154857Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:28:34.168413Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:28:34.168491Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:28:34.170275Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:23844 TServer::EnableGrpc on GrpcPort 61094, node 2 2025-06-25T14:28:34.359963Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:28:34.359983Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:28:34.359990Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:28:34.360073Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23844 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:28:34.648693Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:28:34.983688Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:28:37.012419Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) |75.5%| [TA] $(B)/ydb/public/sdk/cpp/tests/integration/sessions/test-results/gtest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_move/unittest >> TSchemeShardMoveTest::TwoTables [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:28:37.461257Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:28:37.461343Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:28:37.461380Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:28:37.461431Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:28:37.461471Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:28:37.461496Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:28:37.461543Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:28:37.461600Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:28:37.462301Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:28:37.462587Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:28:37.549615Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:28:37.549686Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:28:37.567787Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:28:37.568288Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:28:37.568472Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:28:37.574965Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:28:37.575380Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:28:37.576112Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:28:37.576416Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:28:37.580080Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:28:37.580285Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:28:37.581650Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:28:37.581722Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:28:37.581893Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:28:37.581944Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:28:37.581975Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:28:37.582043Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:28:37.587964Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:28:37.725621Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:28:37.725831Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:37.726060Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:28:37.726104Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:28:37.726358Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:28:37.726424Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:28:37.729215Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:28:37.729359Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:28:37.729488Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:37.729520Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:28:37.729545Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:28:37.729571Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:28:37.732184Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:37.732230Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:28:37.732258Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:28:37.735254Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:37.735299Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:37.735342Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:28:37.735423Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:28:37.738728Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:28:37.740651Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:28:37.740822Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:28:37.741659Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:28:37.741772Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:28:37.741820Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:28:37.742067Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:28:37.742124Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:28:37.742276Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:28:37.742347Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:28:37.744237Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:28:37.744278Z node 1 :FLAT_TX_SCHEMESHARD ... 33845Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:28:38.233978Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table1" took 145us result status StatusPathDoesNotExist 2025-06-25T14:28:38.234133Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/Table1\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/Table1" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: true } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-25T14:28:38.234450Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/TableMove1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:28:38.234604Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/TableMove1" took 137us result status StatusSuccess 2025-06-25T14:28:38.234861Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/TableMove1" PathDescription { Self { Name: "TableMove1" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 103 CreateStep: 5000004 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 2 } ChildrenExist: false } Table { Name: "TableMove1" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 2 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 4 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:28:38.235369Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table2" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:28:38.235509Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table2" took 153us result status StatusPathDoesNotExist 2025-06-25T14:28:38.235607Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/Table2\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/Table2" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: true } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-25T14:28:38.235990Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/TableMove2" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:28:38.236110Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/TableMove2" took 121us result status StatusSuccess 2025-06-25T14:28:38.236420Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/TableMove2" PathDescription { Self { Name: "TableMove2" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 103 CreateStep: 5000004 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 2 } ChildrenExist: false } Table { Name: "TableMove2" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 2 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:28:38.236907Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:28:38.237030Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 138us result status StatusSuccess 2025-06-25T14:28:38.237312Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 15 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 15 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 13 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "TableMove1" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 103 CreateStep: 5000004 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "TableMove2" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 103 CreateStep: 5000004 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 2 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TSchemeShardMoveTest::ResetCachedPath [GOOD] >> TPersQueueNewSchemeCacheTest::TestReadAtTimestamp_10 [GOOD] >> YdbProxy::CopyTables [GOOD] >> YdbProxy::AlterTopic >> TSchemeShardMoveTest::Reject [GOOD] >> TSchemeShardMoveTest::OneTable ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/ydb_proxy/ut/unittest >> YdbProxy::StaticCreds [GOOD] Test command err: 2025-06-25T14:28:31.520635Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519893791773328617:2135];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:28:31.526616Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000c0f/r3tmp/tmpAzmUIk/pdisk_1.dat 2025-06-25T14:28:31.977425Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519893791773328513:2080] 1750861711500815 != 1750861711500818 2025-06-25T14:28:31.998294Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:28:32.003768Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:28:32.003901Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:28:32.006918Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:5681 TServer::EnableGrpc on GrpcPort 13564, node 1 2025-06-25T14:28:32.312520Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:28:32.312548Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:28:32.312555Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:28:32.312704Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:28:32.528516Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:5681 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:28:32.693807Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:28:32.750153Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-25T14:28:32.775195Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpRmDir, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_rmdir.cpp:66) 2025-06-25T14:28:32.796946Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519893796068296448:2323] txid# 281474976710660, issues: { message: "Path does not exist" issue_code: 200200 severity: 1 } 2025-06-25T14:28:35.041170Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519893808132282422:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:28:35.041243Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000c0f/r3tmp/tmpSDFKet/pdisk_1.dat 2025-06-25T14:28:35.171966Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:28:35.196906Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:28:35.197033Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:28:35.199799Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:61958 TServer::EnableGrpc on GrpcPort 1443, node 2 2025-06-25T14:28:35.439713Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:28:35.439740Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:28:35.439749Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:28:35.439875Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:61958 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:28:35.758414Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:28:35.764993Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:28:35.779565Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1750861715806 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "user1" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 1 SecurityStateVersion: 1 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 ... (TRUNCATED) TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1750861715806 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "user1" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 1 SecurityStateVersion: 2 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 ... (TRUNCATED) 2025-06-25T14:28:36.057115Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/ydb_proxy/ut/unittest >> YdbProxy::DropTopic [GOOD] Test command err: 2025-06-25T14:28:31.716788Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519893789713561604:2078];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:28:31.721451Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000bfc/r3tmp/tmpNwgEQ9/pdisk_1.dat 2025-06-25T14:28:32.179885Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:28:32.234127Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:28:32.234216Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:28:32.244700Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:1620 TServer::EnableGrpc on GrpcPort 20953, node 1 2025-06-25T14:28:32.497524Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:28:32.497546Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:28:32.497553Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:28:32.497679Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1620 2025-06-25T14:28:32.722290Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:28:32.914318Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:28:34.984691Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519893801940756293:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:28:34.984814Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000bfc/r3tmp/tmpT5jONG/pdisk_1.dat 2025-06-25T14:28:35.080509Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:28:35.120245Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:28:35.120344Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:28:35.122273Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:28389 TServer::EnableGrpc on GrpcPort 24803, node 2 2025-06-25T14:28:35.314628Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:28:35.314649Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:28:35.314656Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:28:35.314755Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28389 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:28:35.603372Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:28:35.611166Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:28:35.791299Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpDropPersQueueGroup, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_drop_pq.cpp:421) 2025-06-25T14:28:35.822221Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037888 not found 2025-06-25T14:28:35.823388Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037889 not found 2025-06-25T14:28:35.835985Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519893806235724314:2394] txid# 281474976715660, issues: { message: "Path does not exist" issue_code: 200200 severity: 1 } >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendModifyBinding [GOOD] >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendDeleteBinding >> TSchemeShardMoveTest::MoveIndexDoesNonExisted [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/ydb_proxy/ut/unittest >> YdbProxy::DescribeConsumer [GOOD] Test command err: 2025-06-25T14:28:31.284055Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519893792761417058:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:28:31.286516Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000c19/r3tmp/tmp05XWSM/pdisk_1.dat 2025-06-25T14:28:31.689394Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:28:31.723694Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:28:31.723802Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:28:31.727219Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:21333 TServer::EnableGrpc on GrpcPort 10558, node 1 2025-06-25T14:28:31.925882Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:28:31.925912Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:28:31.925920Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:28:31.926067Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:21333 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:28:32.293564Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:28:32.300360Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:28:32.319672Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:28:32.369890Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519893797056384906:2292] txid# 281474976710658, issues: { message: "Invalid retention period: specified: 31536000s, min: 1s, max: 2678400s" severity: 1 } 2025-06-25T14:28:34.958328Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519893802605368005:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:28:34.958403Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000c19/r3tmp/tmpmn7Z0E/pdisk_1.dat 2025-06-25T14:28:35.057459Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:28:35.061422Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519893802605367982:2080] 1750861714957814 != 1750861714957817 2025-06-25T14:28:35.089212Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:28:35.089310Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:28:35.093804Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:21926 TServer::EnableGrpc on GrpcPort 31038, node 2 2025-06-25T14:28:35.270182Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:28:35.270209Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:28:35.270216Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:28:35.270310Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:21926 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:28:35.596179Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:28:35.606623Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 |75.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_external_blobs/unittest >> YdbProxy::ReadTopic [GOOD] >> YdbProxy::ReadNonExistentTopic ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_move/unittest >> TSchemeShardMoveTest::ResetCachedPath [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:28:37.524918Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:28:37.524998Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:28:37.525037Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:28:37.525090Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:28:37.525132Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:28:37.525157Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:28:37.525198Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:28:37.525268Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:28:37.526009Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:28:37.526301Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:28:37.604298Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:28:37.604373Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:28:37.622575Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:28:37.623021Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:28:37.623183Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:28:37.628944Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:28:37.629321Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:28:37.629911Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:28:37.630179Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:28:37.633664Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:28:37.633835Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:28:37.634949Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:28:37.635032Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:28:37.635180Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:28:37.635224Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:28:37.635265Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:28:37.635368Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:28:37.642611Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:28:37.793225Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:28:37.793456Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:37.793692Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:28:37.793743Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:28:37.793914Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:28:37.793963Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:28:37.795956Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:28:37.796113Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:28:37.796231Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:37.796263Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:28:37.796292Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:28:37.796339Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:28:37.800341Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:37.800401Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:28:37.800441Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:28:37.804510Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:37.804569Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:37.804610Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:28:37.804691Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:28:37.817675Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:28:37.820181Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:28:37.820397Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:28:37.821447Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:28:37.821584Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:28:37.821635Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:28:37.821944Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:28:37.822022Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:28:37.822202Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:28:37.822304Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:28:37.824666Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:28:37.824729Z node 1 :FLAT_TX_SCHEMESHARD ... ose operationId# 105:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:28:38.676875Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 105 ready parts: 1/1 2025-06-25T14:28:38.676981Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } AffectedSet { TabletId: 72075186233409549 Flags: 2 } ExecLevel: 0 TxId: 105 MinStep: 1 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:28:38.678451Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 105:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:105 msg type: 269090816 2025-06-25T14:28:38.678585Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 105, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 105 at step: 5000004 FAKE_COORDINATOR: advance: minStep5000004 State->FrontStep: 5000003 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 105 at step: 5000004 FAKE_COORDINATOR: Send Plan to tablet 72075186233409549 for txId: 105 at step: 5000004 2025-06-25T14:28:38.678994Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000004, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:28:38.679103Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 105 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000004 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:28:38.679177Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_table.cpp:357: TAlterTable TPropose operationId# 105:0 HandleReply TEvOperationPlan, operationId: 105:0, stepId: 5000004, at schemeshard: 72057594046678944 2025-06-25T14:28:38.679530Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 105:0 128 -> 129 2025-06-25T14:28:38.679672Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 FAKE_COORDINATOR: advance: minStep5000004 State->FrontStep: 5000004 2025-06-25T14:28:38.689547Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:28:38.689607Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 105, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-25T14:28:38.689846Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:28:38.689914Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 105, path id: 3 2025-06-25T14:28:38.690320Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 105:0, at schemeshard: 72057594046678944 2025-06-25T14:28:38.690382Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1086: NTableState::TProposedWaitParts operationId# 105:0 ProgressState at tablet: 72057594046678944 FAKE_COORDINATOR: Erasing txId 105 2025-06-25T14:28:38.691384Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 8 PathOwnerId: 72057594046678944, cookie: 105 2025-06-25T14:28:38.691536Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 8 PathOwnerId: 72057594046678944, cookie: 105 2025-06-25T14:28:38.691585Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 105 2025-06-25T14:28:38.691634Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 105, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 8 2025-06-25T14:28:38.691682Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-06-25T14:28:38.691796Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 105, ready parts: 0/1, is published: true 2025-06-25T14:28:38.693826Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6369: Handle TEvProposeTransactionResult, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409549 Status: COMPLETE TxId: 105 Step: 5000004 OrderId: 105 ExecLatency: 0 ProposeLatency: 3 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409549 CpuTimeUsec: 1274 } } CommitVersion { Step: 5000004 TxId: 105 } 2025-06-25T14:28:38.693892Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1791: TOperation FindRelatedPartByTabletId, TxId: 105, tablet: 72075186233409549, partId: 0 2025-06-25T14:28:38.694043Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:632: TTxOperationReply execute, operationId: 105:0, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409549 Status: COMPLETE TxId: 105 Step: 5000004 OrderId: 105 ExecLatency: 0 ProposeLatency: 3 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409549 CpuTimeUsec: 1274 } } CommitVersion { Step: 5000004 TxId: 105 } 2025-06-25T14:28:38.694150Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_part.cpp:109: HandleReply TEvDataShard::TEvProposeTransactionResult Ignore message: tablet# 72057594046678944, ev# TxKind: TX_KIND_SCHEME Origin: 72075186233409549 Status: COMPLETE TxId: 105 Step: 5000004 OrderId: 105 ExecLatency: 0 ProposeLatency: 3 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409549 CpuTimeUsec: 1274 } } CommitVersion { Step: 5000004 TxId: 105 } 2025-06-25T14:28:38.695530Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5596: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 674 RawX2: 4294969906 } Origin: 72075186233409549 State: 2 TxId: 105 Step: 0 Generation: 2 2025-06-25T14:28:38.695589Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1791: TOperation FindRelatedPartByTabletId, TxId: 105, tablet: 72075186233409549, partId: 0 2025-06-25T14:28:38.695742Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:632: TTxOperationReply execute, operationId: 105:0, at schemeshard: 72057594046678944, message: Source { RawX1: 674 RawX2: 4294969906 } Origin: 72075186233409549 State: 2 TxId: 105 Step: 0 Generation: 2 2025-06-25T14:28:38.695818Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1047: NTableState::TProposedWaitParts operationId# 105:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 2025-06-25T14:28:38.695946Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1051: NTableState::TProposedWaitParts operationId# 105:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 674 RawX2: 4294969906 } Origin: 72075186233409549 State: 2 TxId: 105 Step: 0 Generation: 2 2025-06-25T14:28:38.696044Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:670: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 105:0, shardIdx: 72057594046678944:4, shard: 72075186233409549, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-25T14:28:38.696083Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:707: all shard schema changes has been received, operationId: 105:0, at schemeshard: 72057594046678944 2025-06-25T14:28:38.696121Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:719: send schema changes ack message, operation: 105:0, datashard: 72075186233409549, at schemeshard: 72057594046678944 2025-06-25T14:28:38.696162Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 105:0 129 -> 240 2025-06-25T14:28:38.697501Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 105 2025-06-25T14:28:38.698746Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 105:0, at schemeshard: 72057594046678944 2025-06-25T14:28:38.698900Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 105:0, at schemeshard: 72057594046678944 2025-06-25T14:28:38.699181Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 105:0, at schemeshard: 72057594046678944 2025-06-25T14:28:38.699249Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 105:0 ProgressState 2025-06-25T14:28:38.699361Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#105:0 progress is 1/1 2025-06-25T14:28:38.699409Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 105 ready parts: 1/1 2025-06-25T14:28:38.699469Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#105:0 progress is 1/1 2025-06-25T14:28:38.699501Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 105 ready parts: 1/1 2025-06-25T14:28:38.699540Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 105, ready parts: 1/1, is published: true 2025-06-25T14:28:38.699606Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1656: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:340:2317] message: TxId: 105 2025-06-25T14:28:38.699660Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 105 ready parts: 1/1 2025-06-25T14:28:38.699702Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 105:0 2025-06-25T14:28:38.699745Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 105:0 2025-06-25T14:28:38.699871Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-25T14:28:38.701591Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 105: got EvNotifyTxCompletionResult 2025-06-25T14:28:38.701659Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 105: satisfy waiter [1:844:2761] TestWaitNotification: OK eventTxId 105 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/ydb_proxy/ut/unittest >> YdbProxy::DescribeTopic [GOOD] Test command err: 2025-06-25T14:28:31.405690Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519893790901276686:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:28:31.405749Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000c20/r3tmp/tmpP3z0w6/pdisk_1.dat 2025-06-25T14:28:31.848592Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:28:31.915826Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:28:31.915931Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:28:31.917936Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:6334 TServer::EnableGrpc on GrpcPort 10393, node 1 2025-06-25T14:28:32.117004Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:28:32.117023Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:28:32.117031Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:28:32.117131Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6334 2025-06-25T14:28:32.425816Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:28:32.612495Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:28:32.626899Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:28:34.453986Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:34.600377Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037888 not found 2025-06-25T14:28:34.603543Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519893803786179301:2395] txid# 281474976710660, issues: { message: "Path does not exist" issue_code: 200200 severity: 1 } 2025-06-25T14:28:35.265145Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519893808542389596:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:28:35.265231Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000c20/r3tmp/tmpwpQloT/pdisk_1.dat 2025-06-25T14:28:35.411481Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:28:35.413256Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519893808542389574:2080] 1750861715264749 != 1750861715264752 2025-06-25T14:28:35.452176Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:28:35.452249Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:28:35.454029Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:9797 TServer::EnableGrpc on GrpcPort 26504, node 2 2025-06-25T14:28:35.644911Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:28:35.644936Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:28:35.644944Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:28:35.645072Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9797 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:28:35.992614Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:28:35.998061Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:28:36.027574Z node 2 :PQ_READ_PROXY ERROR: grpc_pq_schema.cpp:148: new Describe topic request 2025-06-25T14:28:36.212787Z node 2 :PQ_READ_PROXY ERROR: grpc_pq_schema.cpp:148: new Describe topic request 2025-06-25T14:28:36.293469Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; >> TControlPlaneProxyCheckPermissionsSuccess::ShouldSendDeleteBinding [GOOD] >> TControlPlaneProxyShouldPassHids::ShouldCheckScenario |75.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_external_blobs/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_move/unittest >> TSchemeShardMoveTest::MoveIndexDoesNonExisted [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:28:37.343035Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:28:37.343117Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:28:37.343158Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:28:37.343191Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:28:37.343232Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:28:37.343259Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:28:37.343308Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:28:37.343370Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:28:37.344138Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:28:37.344484Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:28:37.422907Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:28:37.422962Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:28:37.439482Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:28:37.439923Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:28:37.440088Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:28:37.445749Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:28:37.446068Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:28:37.446685Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:28:37.446956Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:28:37.450171Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:28:37.450336Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:28:37.451502Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:28:37.451566Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:28:37.451904Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:28:37.451955Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:28:37.451999Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:28:37.452083Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:28:37.457640Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:28:37.573713Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:28:37.573902Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:37.574121Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:28:37.574161Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:28:37.574373Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:28:37.574438Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:28:37.576173Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:28:37.576300Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:28:37.576438Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:37.576473Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:28:37.576500Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:28:37.576539Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:28:37.578064Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:37.578119Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:28:37.578157Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:28:37.579693Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:37.579752Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:37.579813Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:28:37.579866Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:28:37.588721Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:28:37.590578Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:28:37.590734Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:28:37.591558Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:28:37.591670Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:28:37.591713Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:28:37.592004Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:28:37.592057Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:28:37.592234Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:28:37.592336Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:28:37.594328Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:28:37.594365Z node 1 :FLAT_TX_SCHEMESHARD ... DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 5 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:28:39.220410Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/Sync" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-25T14:28:39.220620Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/Sync" took 224us result status StatusSuccess 2025-06-25T14:28:39.221310Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table/Sync" PathDescription { Self { Name: "Sync" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeTableIndex CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableIndexVersion: 1 } ChildrenExist: true } Children { Name: "indexImplTable" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 3 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" PathSubType: EPathSubTypeSyncIndexImplTable Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 5 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } TableIndex { Name: "Sync" LocalPathId: 3 Type: EIndexTypeGlobal State: EIndexStateReady KeyColumnNames: "value0" SchemaVersion: 1 PathOwnerId: 72057594046678944 DataSize: 0 IndexImplTableDescriptions { Columns { Name: "value0" Type: "Utf8" TypeId: 4608 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "value0" KeyColumnNames: "key" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:28:39.221897Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/Async" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-25T14:28:39.222038Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/Async" took 152us result status StatusSuccess 2025-06-25T14:28:39.222547Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table/Async" PathDescription { Self { Name: "Async" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeTableIndex CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableIndexVersion: 1 } ChildrenExist: true } Children { Name: "indexImplTable" PathId: 6 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 5 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" PathSubType: EPathSubTypeAsyncIndexImplTable Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 5 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } TableIndex { Name: "Async" LocalPathId: 5 Type: EIndexTypeGlobalAsync State: EIndexStateReady KeyColumnNames: "value1" SchemaVersion: 1 PathOwnerId: 72057594046678944 DataSize: 0 IndexImplTableDescriptions { Columns { Name: "value1" Type: "Utf8" TypeId: 4608 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "value1" KeyColumnNames: "key" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } } } } PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/services/persqueue_v1/ut/new_schemecache_ut/unittest >> TPersQueueNewSchemeCacheTest::TestReadAtTimestamp_10 [GOOD] Test command err: 2025-06-25T14:27:57.087810Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519893644776752493:2234];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:27:57.088074Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:27:57.149530Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519893645372021399:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:27:57.177436Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:27:57.555634Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001933/r3tmp/tmpTAv3Pa/pdisk_1.dat 2025-06-25T14:27:57.589509Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-25T14:27:58.051445Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:27:58.152868Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:27:58.174277Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:27:58.177767Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:27:58.185358Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:27:58.206311Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:27:58.206401Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:27:58.212788Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:27:58.212867Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:27:58.333835Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:27:58.338145Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T14:27:58.347607Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 8631, node 1 2025-06-25T14:27:58.588740Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/yft8/001933/r3tmp/yandexc2TBFG.tmp 2025-06-25T14:27:58.588762Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/yft8/001933/r3tmp/yandexc2TBFG.tmp 2025-06-25T14:27:58.588921Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/yft8/001933/r3tmp/yandexc2TBFG.tmp 2025-06-25T14:27:58.589037Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:27:58.711491Z INFO: TTestServer started on Port 7049 GrpcPort 8631 TClient is connected to server localhost:7049 PQClient connected to localhost:8631 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:27:59.534658Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:27:59.574895Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:27:59.637719Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:28:00.185853Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710660, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:28:02.056470Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519893644776752493:2234];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:28:02.056550Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:28:02.120119Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519893645372021399:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:28:02.131105Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:28:03.940408Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519893670546557133:2303], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:03.940572Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:03.942819Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519893670546557146:2307], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:03.947041Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:28:03.965453Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519893670546557180:2310], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:03.965860Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:03.988229Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519893670546557148:2308], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710662 completed, doublechecking } 2025-06-25T14:28:04.300652Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519893674841524527:2748] txid# 281474976710663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:28:04.356172Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:04.458435Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519893674841524541:2316], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:28:04.460471Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=NjVmZDRiYjYtMTFhOTc2NmUtYmQ2NzJjNGItYjcyNGMyNzM=, ActorId: [1:7519893670546557131:2302], ActorState: ExecuteState, TraceId: 01jykqt76h3jeg5s6vxxb80dq3, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:28:04.462810Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-25T14:28:04.488192Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__oper ... ionId: account2/topic2|f62c2bef-6eb99cf9-5ee1bc3-2f88d81a_0 grpc read failed 2025-06-25T14:28:37.862100Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:818: session v1 closed cookie: 1 sessionId: account2/topic2|f62c2bef-6eb99cf9-5ee1bc3-2f88d81a_0 2025-06-25T14:28:37.863312Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 1 sessionId: account2/topic2|f62c2bef-6eb99cf9-5ee1bc3-2f88d81a_0 is DEAD 2025-06-25T14:28:37.864081Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037902 (partition=1) Received event: NActors::TEvents::TEvPoison 2025-06-25T14:28:37.868728Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037902] server disconnected, pipe [3:7519893788296790430:2495] destroyed 2025-06-25T14:28:37.868788Z node 3 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72075186224037902, Partition: 1, State: StateIdle] TPartition::DropOwner. 2025-06-25T14:28:37.951613Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7519893736757179723:2129], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:28:37.951731Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7519893736757179723:2129], cacheItem# { Subscriber: { Subscriber: [3:7519893736757180216:2450] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 28 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 1750861699020 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: Root TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:28:37.951958Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7519893818361562731:4932], recipient# [3:7519893736757179597:2146], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root TableId: [72057594046644480:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 2 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-25T14:28:37.990793Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2823: Handle TEvTxProxySchemeCache::TEvResolveKeySet: self# [3:7519893736757179723:2129], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 ResultSet [{ TableId: [OwnerId: 72057594046644480, LocalPathId: 12] Access: 1 SyncVersion: false Status: Unknown Kind: KindUnknown PartitionsCount: 0 DomainInfo From: (Utf8 : NULL) IncFrom: 1 To: () IncTo: 0 },{ TableId: [OwnerId: 72057594046644480, LocalPathId: 10] Access: 1 SyncVersion: false Status: Unknown Kind: KindUnknown PartitionsCount: 0 DomainInfo From: (Utf8 : NULL) IncFrom: 1 To: () IncTo: 0 }] } 2025-06-25T14:28:37.990906Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2062: FillEntry for TResolve: self# [3:7519893736757179723:2129], cacheItem# { Subscriber: { Subscriber: [3:7519893753937050040:2885] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 28 } Filled: 1 Status: StatusSuccess Kind: 3 TableKind: 1 Created: 1 CreateStep: 1750861702821 PathId: [OwnerId: 72057594046644480, LocalPathId: 12] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 1 }, entry# { TableId: [OwnerId: 72057594046644480, LocalPathId: 12] Access: 1 SyncVersion: false Status: Unknown Kind: KindUnknown PartitionsCount: 0 DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:28:37.990974Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2062: FillEntry for TResolve: self# [3:7519893736757179723:2129], cacheItem# { Subscriber: { Subscriber: [3:7519893753937049893:2779] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 28 } Filled: 1 Status: StatusSuccess Kind: 3 TableKind: 1 Created: 1 CreateStep: 1750861702534 PathId: [OwnerId: 72057594046644480, LocalPathId: 10] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 1 }, entry# { TableId: [OwnerId: 72057594046644480, LocalPathId: 10] Access: 1 SyncVersion: false Status: Unknown Kind: KindUnknown PartitionsCount: 0 DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:28:37.991206Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7519893818361562738:4936], recipient# [3:7519893818361562737:2838], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 ResultSet [{ TableId: [OwnerId: 72057594046644480, LocalPathId: 12] Access: 1 SyncVersion: false Status: OkData Kind: KindRegularTable PartitionsCount: 1 DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } From: (Utf8 : NULL) IncFrom: 1 To: () IncTo: 0 },{ TableId: [OwnerId: 72057594046644480, LocalPathId: 10] Access: 1 SyncVersion: false Status: OkData Kind: KindRegularTable PartitionsCount: 1 DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } From: (Utf8 : NULL) IncFrom: 1 To: () IncTo: 0 }] } 2025-06-25T14:28:37.993934Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2823: Handle TEvTxProxySchemeCache::TEvResolveKeySet: self# [4:7519893735498463143:2107], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 ResultSet [{ TableId: [OwnerId: 72057594046644480, LocalPathId: 12] Access: 0 SyncVersion: false Status: Unknown Kind: KindUnknown PartitionsCount: 0 DomainInfo From: (Utf8 : Cluster) IncFrom: 1 To: (Utf8 : Cluster) IncTo: 1 }] } 2025-06-25T14:28:37.994050Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2062: FillEntry for TResolve: self# [4:7519893735498463143:2107], cacheItem# { Subscriber: { Subscriber: [4:7519893756973299847:2219] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusSuccess Kind: 3 TableKind: 1 Created: 1 CreateStep: 1750861702821 PathId: [OwnerId: 72057594046644480, LocalPathId: 12] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 1 }, entry# { TableId: [OwnerId: 72057594046644480, LocalPathId: 12] Access: 0 SyncVersion: false Status: Unknown Kind: KindUnknown PartitionsCount: 0 DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:28:37.994361Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [4:7519893817102843510:3330], recipient# [4:7519893817102843508:2414], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 ResultSet [{ TableId: [OwnerId: 72057594046644480, LocalPathId: 12] Access: 0 SyncVersion: false Status: OkData Kind: KindRegularTable PartitionsCount: 1 DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } From: (Utf8 : Cluster) IncFrom: 1 To: (Utf8 : Cluster) IncTo: 1 }] } 2025-06-25T14:28:38.204431Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7519893736757179723:2129], request# { ErrorCount: 0 DatabaseName: /Root DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:28:38.204576Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7519893736757179723:2129], cacheItem# { Subscriber: { Subscriber: [3:7519893741052147573:2498] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: Root/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:28:38.204687Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7519893822656530044:4938], recipient# [3:7519893822656530043:2882], result# { ErrorCount: 1 DatabaseName: /Root DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:28:38.418152Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7519893736757179723:2129], request# { ErrorCount: 0 DatabaseName: /Root DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:28:38.418334Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7519893736757179723:2129], cacheItem# { Subscriber: { Subscriber: [3:7519893753937049791:2735] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: Root/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:28:38.418471Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7519893822656530048:4941], recipient# [3:7519893822656530047:2883], result# { ErrorCount: 1 DatabaseName: /Root DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } >> ExternalBlobsMultipleChannels::WithCompaction >> test_ttl.py::TestTTL::test_ttl[table_DyNumber_0__SYNC-pk_types12-all_types12-index12-DyNumber--SYNC] [GOOD] >> TDataShardLocksTest::UseLocksCache [GOOD] |75.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_external_blobs/unittest |75.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_external_blobs/unittest >> TSchemeShardMoveTest::OneTable [GOOD] >> THiveTest::TestHiveBalancerDifferentResources [GOOD] >> THiveTest::TestHiveBalancerDifferentResources2 |75.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_view/unittest |75.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/driver_lib/run/ut/ydb-core-driver_lib-run-ut >> ExternalBlobsMultipleChannels::ExtBlobsMultipleColumns >> Cdc::NewAndOldImagesLog[PqRunner] [GOOD] >> Cdc::NewAndOldImagesLog[YdsRunner] |75.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/driver_lib/run/ut/ydb-core-driver_lib-run-ut |75.6%| [TA] {RESULT} $(B)/ydb/core/security/certificate_check/ut/test-results/unittest/{meta.json ... results_accumulator.log} |75.6%| [TA] {RESULT} $(B)/ydb/core/statistics/service/ut/ut_aggregation/test-results/unittest/{meta.json ... results_accumulator.log} |75.6%| [TA] {RESULT} $(B)/ydb/public/sdk/cpp/tests/integration/sessions/test-results/gtest/{meta.json ... results_accumulator.log} |75.6%| [LD] {RESULT} $(B)/ydb/core/driver_lib/run/ut/ydb-core-driver_lib-run-ut >> TSchemeShardViewTest::AsyncCreateDifferentViews >> TSchemeShardViewTest::ReadOnlyMode >> Cdc::DocApi[YdsRunner] [GOOD] >> Cdc::DocApi[TopicRunner] >> TSchemeShardViewTest::DropView |75.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_view/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_move/unittest >> TSchemeShardMoveTest::OneTable [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:28:37.634068Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:28:37.634177Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:28:37.634221Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:28:37.634261Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:28:37.634303Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:28:37.634329Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:28:37.634373Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:28:37.634429Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:28:37.635148Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:28:37.635455Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:28:37.720779Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:28:37.720848Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:28:37.741197Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:28:37.741583Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:28:37.741721Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:28:37.747390Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:28:37.747704Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:28:37.748290Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:28:37.748554Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:28:37.751563Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:28:37.751740Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:28:37.752884Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:28:37.752943Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:28:37.753105Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:28:37.753156Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:28:37.753213Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:28:37.753297Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:28:37.765554Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:28:37.886906Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:28:37.887154Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:37.887358Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:28:37.887404Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:28:37.887635Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:28:37.887703Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:28:37.889956Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:28:37.890133Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:28:37.890302Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:37.890372Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:28:37.890408Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:28:37.890463Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:28:37.892504Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:37.892562Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:28:37.892599Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:28:37.894459Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:37.894503Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:37.894562Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:28:37.894622Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:28:37.898030Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:28:37.899977Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:28:37.900176Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:28:37.901143Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:28:37.901266Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:28:37.901307Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:28:37.901565Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:28:37.901608Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:28:37.901772Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:28:37.901840Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:28:37.903915Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:28:37.903964Z node 1 :FLAT_TX_SCHEMESHARD ... -25T14:28:40.524160Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [2:208:2208], at schemeshard: 72057594046678944, txId: 108, path id: 4 2025-06-25T14:28:40.524548Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 108:0, at schemeshard: 72057594046678944 2025-06-25T14:28:40.524607Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1086: NTableState::TProposedWaitParts operationId# 108:0 ProgressState at tablet: 72057594046678944 2025-06-25T14:28:40.524692Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:707: all shard schema changes has been received, operationId: 108:0, at schemeshard: 72057594046678944 2025-06-25T14:28:40.524757Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:719: send schema changes ack message, operation: 108:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-06-25T14:28:40.524802Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 108:0 129 -> 240 2025-06-25T14:28:40.525647Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 23 PathOwnerId: 72057594046678944, cookie: 108 2025-06-25T14:28:40.525746Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 23 PathOwnerId: 72057594046678944, cookie: 108 2025-06-25T14:28:40.525791Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 108 2025-06-25T14:28:40.525833Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 108, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 23 2025-06-25T14:28:40.525875Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-25T14:28:40.526507Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 108 2025-06-25T14:28:40.526574Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 108 2025-06-25T14:28:40.526599Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 108 2025-06-25T14:28:40.526627Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 108, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 18446744073709551615 2025-06-25T14:28:40.526656Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 4 2025-06-25T14:28:40.526720Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 108, ready parts: 0/1, is published: true 2025-06-25T14:28:40.529504Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 108:0, at schemeshard: 72057594046678944 2025-06-25T14:28:40.529565Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_table.cpp:414: TDropTable TProposedDeletePart operationId: 108:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:28:40.529791Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove table for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 2025-06-25T14:28:40.529925Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#108:0 progress is 1/1 2025-06-25T14:28:40.529962Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 108 ready parts: 1/1 2025-06-25T14:28:40.530008Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#108:0 progress is 1/1 2025-06-25T14:28:40.530038Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 108 ready parts: 1/1 2025-06-25T14:28:40.530078Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 108, ready parts: 1/1, is published: true 2025-06-25T14:28:40.530139Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1656: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [2:337:2314] message: TxId: 108 2025-06-25T14:28:40.530189Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 108 ready parts: 1/1 2025-06-25T14:28:40.530228Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 108:0 2025-06-25T14:28:40.530267Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 108:0 2025-06-25T14:28:40.530360Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 2 2025-06-25T14:28:40.531318Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 108 2025-06-25T14:28:40.531623Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 108 2025-06-25T14:28:40.532641Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 108: got EvNotifyTxCompletionResult 2025-06-25T14:28:40.532679Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 108: satisfy waiter [2:830:2785] TestWaitNotification: OK eventTxId 108 wait until 72075186233409546 is deleted wait until 72075186233409547 is deleted 2025-06-25T14:28:40.533246Z node 2 :HIVE INFO: tablet_helpers.cpp:1476: [72057594037968897] TEvSubscribeToTabletDeletion, 72075186233409546 2025-06-25T14:28:40.533307Z node 2 :HIVE INFO: tablet_helpers.cpp:1476: [72057594037968897] TEvSubscribeToTabletDeletion, 72075186233409547 Deleted tabletId 72075186233409547 2025-06-25T14:28:40.549161Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5633: Handle TEvStateChanged, at schemeshard: 72057594046678944, message: Source { RawX1: 309 RawX2: 8589936886 } TabletId: 72075186233409546 State: 4 2025-06-25T14:28:40.549268Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186233409546, state: Offline, at schemeshard: 72057594046678944 2025-06-25T14:28:40.550784Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:1 hive 72057594037968897 at ss 72057594046678944 2025-06-25T14:28:40.551193Z node 2 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 1 TxId_Deprecated: 1 TabletID: 72075186233409546 2025-06-25T14:28:40.553563Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 1 ShardOwnerId: 72057594046678944 ShardLocalIdx: 1, at schemeshard: 72057594046678944 2025-06-25T14:28:40.553901Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 1 Forgetting tablet 72075186233409546 2025-06-25T14:28:40.556024Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-25T14:28:40.556080Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 4], at schemeshard: 72057594046678944 2025-06-25T14:28:40.556178Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:28:40.559246Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-06-25T14:28:40.559318Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:1 tabletId 72075186233409546 2025-06-25T14:28:40.559824Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 Deleted tabletId 72075186233409546 2025-06-25T14:28:40.560541Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:28:40.560751Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 234us result status StatusSuccess 2025-06-25T14:28:40.561156Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 23 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 23 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 21 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> Cdc::UpdatesLog[PqRunner] [GOOD] >> Cdc::UpdatesLog[YdsRunner] >> TSchemeShardViewTest::AsyncCreateSameView ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_locks/unittest >> TDataShardLocksTest::UseLocksCache [GOOD] Test command err: 2025-06-25T14:28:32.701168Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:28:32.701328Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:28:32.701402Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0018fa/r3tmp/tmpyyLZYP/pdisk_1.dat 2025-06-25T14:28:33.026956Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T14:28:33.030704Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:28:33.082983Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:28:33.088358Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750861709662408 != 1750861709662412 2025-06-25T14:28:33.135274Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:28:33.135463Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:28:33.147083Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:28:33.233882Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:33.277118Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:618:2525], Recipient [1:633:2534]: NKikimr::TEvTablet::TEvBoot 2025-06-25T14:28:33.278520Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:619:2526], Recipient [1:635:2536]: NKikimr::TEvTablet::TEvBoot 2025-06-25T14:28:33.279340Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:618:2525], Recipient [1:633:2534]: NKikimr::TEvTablet::TEvRestored 2025-06-25T14:28:33.279823Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:633:2534] 2025-06-25T14:28:33.280100Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T14:28:33.331623Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:618:2525], Recipient [1:633:2534]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-25T14:28:33.331739Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:619:2526], Recipient [1:635:2536]: NKikimr::TEvTablet::TEvRestored 2025-06-25T14:28:33.332232Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037889 actor [1:635:2536] 2025-06-25T14:28:33.332462Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T14:28:33.341228Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:619:2526], Recipient [1:635:2536]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-25T14:28:33.342103Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T14:28:33.342295Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T14:28:33.344148Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-25T14:28:33.344254Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-25T14:28:33.344335Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-25T14:28:33.344763Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T14:28:33.345010Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T14:28:33.345105Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:666:2534] in generation 1 2025-06-25T14:28:33.345580Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T14:28:33.345675Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T14:28:33.347161Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037889 2025-06-25T14:28:33.347249Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037889 2025-06-25T14:28:33.347301Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037889 2025-06-25T14:28:33.347639Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T14:28:33.347762Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T14:28:33.347859Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037889 persisting started state actor id [1:667:2536] in generation 1 2025-06-25T14:28:33.358764Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T14:28:33.384848Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-25T14:28:33.385079Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T14:28:33.385198Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:670:2555] 2025-06-25T14:28:33.385235Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T14:28:33.385289Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-25T14:28:33.385333Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:28:33.385620Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:633:2534], Recipient [1:633:2534]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T14:28:33.385662Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T14:28:33.385749Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T14:28:33.385782Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037889 2025-06-25T14:28:33.385831Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037889 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T14:28:33.385880Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037889, actorId: [1:671:2556] 2025-06-25T14:28:33.385900Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037889 2025-06-25T14:28:33.385920Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037889, state: WaitScheme 2025-06-25T14:28:33.385939Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-25T14:28:33.386231Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:635:2536], Recipient [1:635:2536]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T14:28:33.386266Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T14:28:33.386495Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-25T14:28:33.386590Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-25T14:28:33.387051Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:28:33.387099Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:28:33.387141Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-25T14:28:33.387183Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-25T14:28:33.387212Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-25T14:28:33.387240Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-25T14:28:33.387289Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:28:33.387341Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037889 2025-06-25T14:28:33.387397Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037889 2025-06-25T14:28:33.387514Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:650:2544], Recipient [1:633:2534]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:28:33.387556Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T14:28:33.387607Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:626:2530], serverId# [1:650:2544], sessionId# [0:0:0] 2025-06-25T14:28:33.387646Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2025-06-25T14:28:33.387669Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:28:33.387684Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037889 2025-06-25T14:28:33.387700Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037889 has no attached operations 2025-06-25T14:28:33.387715Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037889 2025-06-25T14:28:33.387738Z node 1 :TX_DATASHARD ... node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:3] at 72075186224037888 on unit CheckRead 2025-06-25T14:28:40.198074Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:3] at 72075186224037888 is Executed 2025-06-25T14:28:40.198123Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:3] at 72075186224037888 executing on unit CheckRead 2025-06-25T14:28:40.198172Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:3] at 72075186224037888 to execution unit BuildAndWaitDependencies 2025-06-25T14:28:40.198216Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:3] at 72075186224037888 on unit BuildAndWaitDependencies 2025-06-25T14:28:40.198281Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:3] at 72075186224037888 2025-06-25T14:28:40.198326Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:3] at 72075186224037888 is Executed 2025-06-25T14:28:40.198355Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:3] at 72075186224037888 executing on unit BuildAndWaitDependencies 2025-06-25T14:28:40.198380Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:3] at 72075186224037888 to execution unit ExecuteRead 2025-06-25T14:28:40.198404Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:3] at 72075186224037888 on unit ExecuteRead 2025-06-25T14:28:40.198521Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1578: 72075186224037888 Execute read# 1, request: { ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 Snapshot { Step: 2500 TxId: 18446744073709551615 } ResultFormat: FORMAT_CELLVEC MaxRows: 1001 MaxBytes: 5242880 Reverse: false TotalRowsLimit: 1001 } 2025-06-25T14:28:40.198786Z node 2 :TX_DATASHARD TRACE: datashard.cpp:2476: PromoteImmediatePostExecuteEdges at 72075186224037888 promoting UnprotectedReadEdge to v2500/18446744073709551615 2025-06-25T14:28:40.198847Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:3] at 72075186224037888 is DelayComplete 2025-06-25T14:28:40.198881Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:3] at 72075186224037888 executing on unit ExecuteRead 2025-06-25T14:28:40.198920Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:3] at 72075186224037888 to execution unit CompletedOperations 2025-06-25T14:28:40.198958Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:3] at 72075186224037888 on unit CompletedOperations 2025-06-25T14:28:40.199008Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:3] at 72075186224037888 is Executed 2025-06-25T14:28:40.199031Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:3] at 72075186224037888 executing on unit CompletedOperations 2025-06-25T14:28:40.199061Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:3] at 72075186224037888 has finished 2025-06-25T14:28:40.199100Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2687: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037888 2025-06-25T14:28:40.209906Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:28:40.209977Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [2500:281474976715661] at 72075186224037888 on unit CompleteWrite 2025-06-25T14:28:40.210034Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:826: Complete write [2500 : 281474976715661] from 72075186224037888 at tablet 72075186224037888 send result to client [2:887:2674] 2025-06-25T14:28:40.210103Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:563: Send delayed Ack RS Ack at 72075186224037888 {TEvReadSet step# 2500 txid# 281474976715661 TabletSource# 72075186224037889 TabletDest# 72075186224037888 SetTabletConsumer# 72075186224037888 Flags# 0 Seqno# 1} 2025-06-25T14:28:40.210156Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:28:40.210241Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:28:40.210291Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:2] at 72075186224037888 on unit FinishProposeWrite 2025-06-25T14:28:40.210338Z node 2 :TX_DATASHARD TRACE: finish_propose_write_unit.cpp:163: Propose transaction complete txid 2 at tablet 72075186224037888 send to client, propose latency: 0 ms, status: STATUS_COMPLETED 2025-06-25T14:28:40.210445Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:28:40.210530Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2736: TTxReadViaPipeline(69) Complete: at tablet# 72075186224037888 2025-06-25T14:28:40.210573Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:3] at 72075186224037888 on unit ExecuteRead 2025-06-25T14:28:40.210623Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2163: 72075186224037888 Complete read# {[2:953:2741], 0} after executionsCount# 1 2025-06-25T14:28:40.210670Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2137: 72075186224037888 read iterator# {[2:953:2741], 0} sends rowCount# 2, bytes# 64, quota rows left# 999, quota bytes left# 5242816, hasUnreadQueries# 0, total queries# 1, firstUnprocessed# 0 2025-06-25T14:28:40.210761Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2188: 72075186224037888 read iterator# {[2:953:2741], 0} finished in read 2025-06-25T14:28:40.211008Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [2:925:2726], Recipient [2:635:2536]: {TEvReadSet step# 2500 txid# 281474976715661 TabletSource# 72075186224037889 TabletDest# 72075186224037888 SetTabletConsumer# 72075186224037888 Flags# 0 Seqno# 1} 2025-06-25T14:28:40.211046Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T14:28:40.211081Z node 2 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 72075186224037889 source 72075186224037889 dest 72075186224037888 consumer 72075186224037888 txId 281474976715661 2025-06-25T14:28:40.212115Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553219, Sender [2:953:2741], Recipient [2:925:2726]: NKikimrTxDataShard.TEvReadCancel ReadId: 0 2025-06-25T14:28:40.212182Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3409: 72075186224037888 ReadCancel: { ReadId: 0 } 2025-06-25T14:28:40.212386Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553215, Sender [2:953:2741], Recipient [2:635:2536]: NKikimrTxDataShard.TEvRead ReadId: 1 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 Snapshot { Step: 2500 TxId: 18446744073709551615 } ResultFormat: FORMAT_CELLVEC MaxRows: 999 MaxBytes: 5242880 Reverse: false TotalRowsLimit: 999 RangesSize: 1 2025-06-25T14:28:40.212492Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2452: TTxReadViaPipeline execute: at tablet# 72075186224037889, FollowerId 0 2025-06-25T14:28:40.212547Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:6] at 72075186224037889 on unit CheckRead 2025-06-25T14:28:40.212609Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:6] at 72075186224037889 is Executed 2025-06-25T14:28:40.212636Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:6] at 72075186224037889 executing on unit CheckRead 2025-06-25T14:28:40.212666Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:6] at 72075186224037889 to execution unit BuildAndWaitDependencies 2025-06-25T14:28:40.212693Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:6] at 72075186224037889 on unit BuildAndWaitDependencies 2025-06-25T14:28:40.212737Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:6] at 72075186224037889 2025-06-25T14:28:40.212768Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:6] at 72075186224037889 is Executed 2025-06-25T14:28:40.212791Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:6] at 72075186224037889 executing on unit BuildAndWaitDependencies 2025-06-25T14:28:40.212814Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:6] at 72075186224037889 to execution unit ExecuteRead 2025-06-25T14:28:40.212834Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:6] at 72075186224037889 on unit ExecuteRead 2025-06-25T14:28:40.212922Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1578: 72075186224037889 Execute read# 1, request: { ReadId: 1 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 Snapshot { Step: 2500 TxId: 18446744073709551615 } ResultFormat: FORMAT_CELLVEC MaxRows: 999 MaxBytes: 5242880 Reverse: false TotalRowsLimit: 999 } 2025-06-25T14:28:40.213136Z node 2 :TX_DATASHARD TRACE: datashard.cpp:2476: PromoteImmediatePostExecuteEdges at 72075186224037889 promoting UnprotectedReadEdge to v2500/18446744073709551615 2025-06-25T14:28:40.213185Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2163: 72075186224037889 Complete read# {[2:953:2741], 1} after executionsCount# 1 2025-06-25T14:28:40.213263Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2137: 72075186224037889 read iterator# {[2:953:2741], 1} sends rowCount# 2, bytes# 64, quota rows left# 997, quota bytes left# 5242816, hasUnreadQueries# 0, total queries# 1, firstUnprocessed# 0 2025-06-25T14:28:40.213346Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2188: 72075186224037889 read iterator# {[2:953:2741], 1} finished in read 2025-06-25T14:28:40.213411Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:6] at 72075186224037889 is Executed 2025-06-25T14:28:40.213435Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:6] at 72075186224037889 executing on unit ExecuteRead 2025-06-25T14:28:40.213459Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:6] at 72075186224037889 to execution unit CompletedOperations 2025-06-25T14:28:40.213481Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:6] at 72075186224037889 on unit CompletedOperations 2025-06-25T14:28:40.213524Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:6] at 72075186224037889 is Executed 2025-06-25T14:28:40.213546Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:6] at 72075186224037889 executing on unit CompletedOperations 2025-06-25T14:28:40.213567Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:6] at 72075186224037889 has finished 2025-06-25T14:28:40.213593Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2687: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037889 2025-06-25T14:28:40.213686Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2736: TTxReadViaPipeline(69) Complete: at tablet# 72075186224037889 2025-06-25T14:28:40.214389Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553219, Sender [2:953:2741], Recipient [2:635:2536]: NKikimrTxDataShard.TEvReadCancel ReadId: 1 2025-06-25T14:28:40.214432Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3409: 72075186224037889 ReadCancel: { ReadId: 1 } >> TGRpcRateLimiterTest::AcquireResourceManyUsedActorApiWithCancelAfter [GOOD] >> TSchemeShardViewTest::AsyncCreateDifferentViews [GOOD] >> TSchemeShardViewTest::EmptyName >> TSchemeShardViewTest::DropView [GOOD] >> TSchemeShardViewTest::CreateView >> YdbProxy::AlterTopic [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_view/unittest >> TSchemeShardViewTest::AsyncCreateDifferentViews [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:28:41.487169Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:28:41.487226Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:28:41.487255Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:28:41.487277Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:28:41.487311Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:28:41.487335Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:28:41.487374Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:28:41.487415Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:28:41.487890Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:28:41.488151Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:28:41.546150Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:28:41.546199Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:28:41.559354Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:28:41.559756Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:28:41.559887Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:28:41.567017Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:28:41.567336Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:28:41.567882Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:28:41.568097Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:28:41.571869Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:28:41.572090Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:28:41.573395Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:28:41.573468Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:28:41.573619Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:28:41.573669Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:28:41.573719Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:28:41.573817Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:28:41.583445Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:28:41.705926Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:28:41.706179Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:41.706410Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:28:41.706457Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:28:41.706698Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:28:41.706767Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:28:41.709541Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:28:41.709742Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:28:41.709946Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:41.710011Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:28:41.710071Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:28:41.710129Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:28:41.712246Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:41.712335Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:28:41.712393Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:28:41.714236Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:41.714282Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:41.714354Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:28:41.714412Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:28:41.718259Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:28:41.726471Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:28:41.726752Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:28:41.727720Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:28:41.727875Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:28:41.727942Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:28:41.728285Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:28:41.728367Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:28:41.728577Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:28:41.728673Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:28:41.731223Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:28:41.731279Z node 1 :FLAT_TX_SCHEMESHARD ... X_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T14:28:41.824556Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 102 2025-06-25T14:28:41.824606Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 2 2025-06-25T14:28:41.824639Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-25T14:28:41.824701Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 102, subscribers: 0 2025-06-25T14:28:41.826795Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-25T14:28:41.826886Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 TestModificationResult got TxId: 101, wait until txId: 101 TestModificationResults wait txId: 102 TestModificationResult got TxId: 102, wait until txId: 102 TestModificationResults wait txId: 103 TestModificationResult got TxId: 103, wait until txId: 103 TestWaitNotification wait txId: 101 2025-06-25T14:28:41.827178Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2025-06-25T14:28:41.827222Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 TestWaitNotification wait txId: 102 2025-06-25T14:28:41.827328Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-06-25T14:28:41.827357Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 TestWaitNotification wait txId: 103 2025-06-25T14:28:41.827414Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 103: send EvNotifyTxCompletion 2025-06-25T14:28:41.827437Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 103 2025-06-25T14:28:41.827949Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-06-25T14:28:41.828092Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-06-25T14:28:41.828133Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-25T14:28:41.828168Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:339:2328] 2025-06-25T14:28:41.828426Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 103, at schemeshard: 72057594046678944 2025-06-25T14:28:41.828528Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-25T14:28:41.828555Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:339:2328] 2025-06-25T14:28:41.828654Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-06-25T14:28:41.828684Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [1:339:2328] TestWaitNotification: OK eventTxId 101 TestWaitNotification: OK eventTxId 102 TestWaitNotification: OK eventTxId 103 2025-06-25T14:28:41.829184Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/SomeDir" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:28:41.829398Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/SomeDir" took 199us result status StatusSuccess 2025-06-25T14:28:41.829890Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/SomeDir" PathDescription { Self { Name: "SomeDir" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 4 } ChildrenExist: true } Children { Name: "FirstView" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeView CreateFinished: true CreateTxId: 102 CreateStep: 5000004 ParentPathId: 2 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "SecondView" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeView CreateFinished: true CreateTxId: 103 CreateStep: 5000003 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:28:41.830409Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/SomeDir/FirstView" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:28:41.830573Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/SomeDir/FirstView" took 183us result status StatusSuccess 2025-06-25T14:28:41.830906Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/SomeDir/FirstView" PathDescription { Self { Name: "FirstView" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeView CreateFinished: true CreateTxId: 102 CreateStep: 5000004 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ViewVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } ViewDescription { Name: "FirstView" PathId { OwnerId: 72057594046678944 LocalId: 3 } Version: 1 QueryText: "First query" CapturedContext { } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:28:41.831354Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/SomeDir/SecondView" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:28:41.831504Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/SomeDir/SecondView" took 143us result status StatusSuccess 2025-06-25T14:28:41.831799Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/SomeDir/SecondView" PathDescription { Self { Name: "SecondView" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeView CreateFinished: true CreateTxId: 103 CreateStep: 5000003 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ViewVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } ViewDescription { Name: "SecondView" PathId { OwnerId: 72057594046678944 LocalId: 4 } Version: 1 QueryText: "Second query" CapturedContext { } } } PathId: 4 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 |75.6%| [TA] $(B)/ydb/core/tx/datashard/ut_locks/test-results/unittest/{meta.json ... results_accumulator.log} >> TSchemeshardBackgroundCompactionTest::ShouldNotCompactServerless >> YdbProxy::ReadNonExistentTopic [GOOD] >> TSchemeshardCompactionQueueTest::ShouldNotEnqueueSinglePartedShardWithMemData [GOOD] >> TSchemeshardCompactionQueueTest::ShouldPopWhenOnlyLastCompactionQueue [GOOD] >> TSchemeShardViewTest::AsyncCreateSameView [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_view/unittest >> TSchemeShardViewTest::DropView [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:28:41.770493Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:28:41.770596Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:28:41.770662Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:28:41.770699Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:28:41.770737Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:28:41.770767Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:28:41.770830Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:28:41.770917Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:28:41.771636Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:28:41.772025Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:28:41.834882Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:28:41.834932Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:28:41.847643Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:28:41.848011Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:28:41.848127Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:28:41.852766Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:28:41.852999Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:28:41.853452Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:28:41.853627Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:28:41.856046Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:28:41.856175Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:28:41.857059Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:28:41.857122Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:28:41.857255Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:28:41.857303Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:28:41.857345Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:28:41.857417Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:28:41.862191Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:28:41.979147Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:28:41.979374Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:41.979574Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:28:41.979613Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:28:41.979793Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:28:41.979860Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:28:41.981961Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:28:41.982090Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:28:41.982231Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:41.982286Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:28:41.982325Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:28:41.982375Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:28:41.985437Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:41.985513Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:28:41.985563Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:28:41.987331Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:41.987372Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:41.987443Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:28:41.987491Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:28:41.990036Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:28:41.991548Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:28:41.991750Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:28:41.992461Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:28:41.992591Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:28:41.992649Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:28:41.992964Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:28:41.993015Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:28:41.993179Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:28:41.993263Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:28:41.994986Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:28:41.995020Z node 1 :FLAT_TX_SCHEMESHARD ... hard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 102 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:28:42.039668Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 102:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:102 msg type: 269090816 2025-06-25T14:28:42.039767Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 102, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 102 at step: 5000003 FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000002 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 102 at step: 5000003 2025-06-25T14:28:42.039982Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000003, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:28:42.040046Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 102 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000003 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:28:42.040092Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_view.cpp:43: [72057594046678944] TDropView TPropose, opId: 102:0 HandleReply TEvOperationPlan, step: 5000003 2025-06-25T14:28:42.040212Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 102:0 128 -> 240 2025-06-25T14:28:42.040370Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:28:42.040427Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 FAKE_COORDINATOR: Erasing txId 102 2025-06-25T14:28:42.041691Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:28:42.041722Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:28:42.041839Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-25T14:28:42.041937Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:28:42.041961Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 102, path id: 1 2025-06-25T14:28:42.041995Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 102, path id: 2 2025-06-25T14:28:42.042218Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-25T14:28:42.042248Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 102:0 ProgressState 2025-06-25T14:28:42.042325Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:0 progress is 1/1 2025-06-25T14:28:42.042351Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-25T14:28:42.042386Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:0 progress is 1/1 2025-06-25T14:28:42.042410Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-25T14:28:42.042437Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: false 2025-06-25T14:28:42.042462Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-25T14:28:42.042487Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 102:0 2025-06-25T14:28:42.042509Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 102:0 2025-06-25T14:28:42.042543Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-25T14:28:42.042565Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 102, publications: 2, subscribers: 0 2025-06-25T14:28:42.042589Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 102, [OwnerId: 72057594046678944, LocalPathId: 1], 5 2025-06-25T14:28:42.042608Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 102, [OwnerId: 72057594046678944, LocalPathId: 2], 18446744073709551615 2025-06-25T14:28:42.043151Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T14:28:42.043232Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T14:28:42.043261Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 102 2025-06-25T14:28:42.043284Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 5 2025-06-25T14:28:42.043312Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-25T14:28:42.043839Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T14:28:42.043904Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T14:28:42.043926Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 102 2025-06-25T14:28:42.043945Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 18446744073709551615 2025-06-25T14:28:42.043968Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-25T14:28:42.044042Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 102, subscribers: 0 2025-06-25T14:28:42.044295Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-25T14:28:42.044349Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-25T14:28:42.044400Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:28:42.048208Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-25T14:28:42.048778Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-25T14:28:42.048856Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 102 2025-06-25T14:28:42.049033Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-06-25T14:28:42.049069Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 2025-06-25T14:28:42.049505Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-06-25T14:28:42.049602Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-25T14:28:42.049636Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:328:2317] TestWaitNotification: OK eventTxId 102 2025-06-25T14:28:42.050127Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/MyView" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:28:42.050324Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/MyView" took 218us result status StatusPathDoesNotExist 2025-06-25T14:28:42.050487Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/MyView\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/MyView" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 >> AsyncIndexChangeExchange::SenderShouldShakeHandsOnce [GOOD] >> AsyncIndexChangeExchange::SenderShouldShakeHandsTwice >> TSchemeShardViewTest::EmptyName [GOOD] >> TSchemeShardViewTest::ReadOnlyMode [GOOD] |75.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_compaction/unittest >> TSchemeshardCompactionQueueTest::ShouldPopWhenOnlyLastCompactionQueue [GOOD] >> TSchemeShardViewTest::CreateView [GOOD] >> TSchemeshardCompactionQueueTest::ShouldNotEnqueueEmptyShard [GOOD] >> TSchemeshardCompactionQueueTest::RemoveLastShardFromSubQueues [GOOD] >> TSchemeshardBackgroundCompactionTest::SchemeshardShouldRequestCompactionsSchemeshardRestart ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_view/unittest >> TSchemeShardViewTest::AsyncCreateSameView [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:28:42.034701Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:28:42.034772Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:28:42.034808Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:28:42.034836Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:28:42.034871Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:28:42.034894Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:28:42.034940Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:28:42.035012Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:28:42.035672Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:28:42.035939Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:28:42.111335Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:28:42.111400Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:28:42.128808Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:28:42.129361Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:28:42.129525Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:28:42.136032Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:28:42.136423Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:28:42.137200Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:28:42.137446Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:28:42.140662Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:28:42.140837Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:28:42.141687Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:28:42.141729Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:28:42.141855Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:28:42.141902Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:28:42.141937Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:28:42.142000Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:28:42.148195Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:28:42.272520Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:28:42.272708Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:42.272890Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:28:42.272927Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:28:42.273072Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:28:42.273117Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:28:42.275221Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:28:42.275347Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:28:42.275489Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:42.275530Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:28:42.275579Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:28:42.275622Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:28:42.277043Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:42.277087Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:28:42.277134Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:28:42.278339Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:42.278368Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:42.278410Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:28:42.278454Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:28:42.280822Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:28:42.282108Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:28:42.282271Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:28:42.283015Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:28:42.283121Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:28:42.283161Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:28:42.283383Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:28:42.283424Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:28:42.283539Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:28:42.283601Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:28:42.285239Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:28:42.285280Z node 1 :FLAT_TX_SCHEMESHARD ... TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 FAKE_COORDINATOR: Erasing txId 101 2025-06-25T14:28:42.322612Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:28:42.322653Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 101, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:28:42.322769Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 101, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-25T14:28:42.322854Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:28:42.322883Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 101, path id: 1 2025-06-25T14:28:42.322916Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 101, path id: 2 2025-06-25T14:28:42.323118Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:28:42.323156Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 101:0 ProgressState 2025-06-25T14:28:42.323236Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#101:0 progress is 1/1 2025-06-25T14:28:42.323266Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-25T14:28:42.323301Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#101:0 progress is 1/1 2025-06-25T14:28:42.323324Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-25T14:28:42.323363Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 101, ready parts: 1/1, is published: false 2025-06-25T14:28:42.323394Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-25T14:28:42.323418Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 101:0 2025-06-25T14:28:42.323443Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 101:0 2025-06-25T14:28:42.323501Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-25T14:28:42.323529Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 101, publications: 2, subscribers: 0 2025-06-25T14:28:42.323554Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 1], 4 2025-06-25T14:28:42.323575Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 2], 2 2025-06-25T14:28:42.324063Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 4 PathOwnerId: 72057594046678944, cookie: 101 2025-06-25T14:28:42.324165Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 4 PathOwnerId: 72057594046678944, cookie: 101 2025-06-25T14:28:42.324208Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 101 2025-06-25T14:28:42.324237Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 4 2025-06-25T14:28:42.324286Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-25T14:28:42.325057Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 2 PathOwnerId: 72057594046678944, cookie: 101 2025-06-25T14:28:42.325129Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 2 PathOwnerId: 72057594046678944, cookie: 101 2025-06-25T14:28:42.325157Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 101 2025-06-25T14:28:42.325183Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 2 2025-06-25T14:28:42.325216Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-25T14:28:42.325285Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 101, subscribers: 0 2025-06-25T14:28:42.327917Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-25T14:28:42.328799Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 TestModificationResult got TxId: 101, wait until txId: 101 TestModificationResults wait txId: 102 TestModificationResult got TxId: 102, wait until txId: 102 TestModificationResults wait txId: 103 TestModificationResult got TxId: 103, wait until txId: 103 TestWaitNotification wait txId: 101 2025-06-25T14:28:42.329112Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2025-06-25T14:28:42.329159Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 TestWaitNotification wait txId: 102 2025-06-25T14:28:42.329265Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-06-25T14:28:42.329286Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 TestWaitNotification wait txId: 103 2025-06-25T14:28:42.329328Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 103: send EvNotifyTxCompletion 2025-06-25T14:28:42.329346Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 103 2025-06-25T14:28:42.329870Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-06-25T14:28:42.329972Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-06-25T14:28:42.330030Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-25T14:28:42.330067Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:308:2297] 2025-06-25T14:28:42.330220Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-25T14:28:42.330247Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:308:2297] 2025-06-25T14:28:42.330330Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 103, at schemeshard: 72057594046678944 2025-06-25T14:28:42.330434Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-06-25T14:28:42.330450Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [1:308:2297] TestWaitNotification: OK eventTxId 101 TestWaitNotification: OK eventTxId 102 TestWaitNotification: OK eventTxId 103 2025-06-25T14:28:42.330815Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/MyView" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:28:42.331028Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/MyView" took 214us result status StatusSuccess 2025-06-25T14:28:42.331377Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/MyView" PathDescription { Self { Name: "MyView" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeView CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ViewVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } ViewDescription { Name: "MyView" PathId { OwnerId: 72057594046678944 LocalId: 2 } Version: 1 QueryText: "Some query" CapturedContext { } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/services/rate_limiter/ut/unittest >> TGRpcRateLimiterTest::AcquireResourceManyUsedActorApiWithCancelAfter [GOOD] Test command err: 2025-06-25T14:25:51.578974Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519893104488661933:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:25:51.579055Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000708/r3tmp/tmp21Wiis/pdisk_1.dat 2025-06-25T14:25:51.819846Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 21571, node 1 2025-06-25T14:25:51.868076Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:25:51.868093Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:25:51.868098Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:25:51.868197Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:25:51.927945Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:25:51.928086Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:25:51.930373Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:10527 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:25:52.101798Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:25:52.159177Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateKesus, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_kesus.cpp:30) 2025-06-25T14:25:57.219915Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519893127784370113:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:25:57.242002Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000708/r3tmp/tmp5ErMKr/pdisk_1.dat 2025-06-25T14:25:59.411022Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:25:59.716443Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:26:01.204646Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:26:02.026141Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:26:02.220442Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519893127784370113:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:26:02.220493Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:26:02.220514Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:26:02.321352Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:26:02.321701Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:26:02.364178Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 2670, node 4 2025-06-25T14:26:03.064917Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:26:03.064945Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:26:03.064953Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:26:03.065320Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23357 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:26:12.145519Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:26:12.885969Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:26:14.644441Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateKesus, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_kesus.cpp:30) 2025-06-25T14:26:16.016466Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7382: Cannot get console configs 2025-06-25T14:26:16.016504Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:26:45.380632Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519893334342322681:2148];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:26:45.380698Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000708/r3tmp/tmpIrapuJ/pdisk_1.dat 2025-06-25T14:26:46.473539Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:26:47.243411Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:26:47.364707Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:26:47.446116Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:26:47.446205Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:26:47.608621Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 18971, node 7 2025-06-25T14:26:50.198968Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:26:50.224509Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:26:50.224527Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:26:50.225287Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:26:50.313154Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[7:7519893334342322681:2148];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:26:50.313210Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; TClient is connected to server localhost:21924 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: ... schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateKesus, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_kesus.cpp:30) 2025-06-25T14:28:22.181926Z node 31 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[31:7519893753492286212:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:28:22.182054Z node 31 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000708/r3tmp/tmp4EGJ5D/pdisk_1.dat 2025-06-25T14:28:22.513981Z node 31 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:28:22.556049Z node 31 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(31, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:28:22.556187Z node 31 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(31, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:28:22.570272Z node 31 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(31, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 65182, node 31 2025-06-25T14:28:22.757225Z node 31 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:28:22.757258Z node 31 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:28:22.757270Z node 31 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:28:22.757433Z node 31 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:28:23.218289Z node 31 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:10603 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:28:23.403653Z node 31 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:28:23.529008Z node 31 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateKesus, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_kesus.cpp:30) 2025-06-25T14:28:29.251772Z node 34 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[34:7519893780880021574:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:28:29.251859Z node 34 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000708/r3tmp/tmpH2PnIX/pdisk_1.dat 2025-06-25T14:28:29.621133Z node 34 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:28:29.656365Z node 34 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(34, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:28:29.656484Z node 34 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(34, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:28:29.670915Z node 34 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(34, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 19047, node 34 2025-06-25T14:28:29.740903Z node 34 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:28:29.740923Z node 34 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:28:29.740932Z node 34 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:28:29.741075Z node 34 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:30259 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:28:29.991873Z node 34 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:28:30.105958Z node 34 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateKesus, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_kesus.cpp:30) 2025-06-25T14:28:30.276473Z node 34 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:28:35.918049Z node 37 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[37:7519893806829695265:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:28:35.918118Z node 37 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000708/r3tmp/tmpdaMgYd/pdisk_1.dat 2025-06-25T14:28:36.128511Z node 37 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:28:36.154902Z node 37 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(37, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:28:36.155037Z node 37 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(37, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:28:36.162275Z node 37 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(37, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 7688, node 37 2025-06-25T14:28:36.274278Z node 37 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:28:36.274306Z node 37 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:28:36.274317Z node 37 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:28:36.274508Z node 37 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:21157 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:28:36.494517Z node 37 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:28:36.607010Z node 37 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateKesus, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_kesus.cpp:30) 2025-06-25T14:28:36.936398Z node 37 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/ydb_proxy/ut/unittest >> YdbProxy::AlterTopic [GOOD] Test command err: 2025-06-25T14:28:31.673117Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519893788591517270:2235];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:28:31.677106Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000bea/r3tmp/tmppDusjC/pdisk_1.dat 2025-06-25T14:28:32.185307Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:28:32.185428Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:28:32.190218Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:28:32.194862Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519893788591517060:2080] 1750861711615780 != 1750861711615783 2025-06-25T14:28:32.208766Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:25562 TServer::EnableGrpc on GrpcPort 14709, node 1 2025-06-25T14:28:32.529124Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:28:32.529148Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:28:32.529156Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:28:32.529302Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:28:32.663931Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:25562 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:28:32.893618Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:28:32.908535Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:28:34.673454Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519893801476419583:2300] txid# 281474976710658, issues: { message: "Path does not exist" issue_code: 200200 severity: 1 } 2025-06-25T14:28:34.689570Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:35.537470Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519893806169848479:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:28:35.537537Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000bea/r3tmp/tmpPeyea3/pdisk_1.dat 2025-06-25T14:28:35.758713Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:28:35.760888Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519893806169848449:2080] 1750861715534310 != 1750861715534313 2025-06-25T14:28:35.781670Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:28:35.781742Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:28:35.784024Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:5324 TServer::EnableGrpc on GrpcPort 1684, node 2 2025-06-25T14:28:35.975144Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:28:35.975163Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:28:35.975169Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:28:35.975269Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5324 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:28:36.362529Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:28:36.552067Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:28:38.388200Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:38.426256Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:39.232102Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519893825665665035:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:28:39.232170Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000bea/r3tmp/tmpzICYCV/pdisk_1.dat 2025-06-25T14:28:39.336457Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519893825665665016:2080] 1750861719231643 != 1750861719231646 2025-06-25T14:28:39.355052Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:28:39.364102Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:28:39.364180Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:28:39.365462Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:22243 TServer::EnableGrpc on GrpcPort 26798, node 3 2025-06-25T14:28:39.524983Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:28:39.525006Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:28:39.525015Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:28:39.525155Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:22243 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:28:39.836773Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:28:39.992176Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_pq.cpp:268) 2025-06-25T14:28:40.013751Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519893829960633073:2393] txid# 281474976715660, issues: { message: "Invalid retention period: specified: 31536000s, min: 1s, max: 2678400s" severity: 1 } |75.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/security/ut/ydb-core-security-ut >> TTxDataShardMiniKQL::CrossShard_1_Cycle [GOOD] >> TTxDataShardMiniKQL::CrossShard_2_SwapAndCopy >> TSchemeshardCompactionQueueTest::EnqueueBelowSearchHeightThreshold [GOOD] >> TSchemeshardCompactionQueueTest::EnqueueBelowRowDeletesThreshold [GOOD] >> TSchemeshardCompactionQueueTest::CheckOrderWhenAllQueues [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_view/unittest >> TSchemeShardViewTest::EmptyName [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:28:42.485187Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:28:42.485255Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:28:42.485286Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:28:42.485313Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:28:42.485345Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:28:42.485367Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:28:42.485403Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:28:42.485455Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:28:42.485979Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:28:42.486207Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:28:42.545283Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:28:42.545333Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:28:42.558985Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:28:42.559276Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:28:42.559391Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:28:42.564289Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:28:42.564530Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:28:42.565129Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:28:42.565364Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:28:42.567865Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:28:42.567995Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:28:42.569105Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:28:42.569173Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:28:42.569329Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:28:42.569375Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:28:42.569412Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:28:42.569481Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:28:42.574714Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:28:42.675100Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:28:42.675342Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:42.675559Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:28:42.675606Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:28:42.675816Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:28:42.675890Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:28:42.678073Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:28:42.678232Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:28:42.678430Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:42.678503Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:28:42.678554Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:28:42.678604Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:28:42.680412Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:42.680455Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:28:42.680489Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:28:42.681918Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:42.681959Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:42.682007Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:28:42.682045Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:28:42.685245Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:28:42.687214Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:28:42.687418Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:28:42.688355Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:28:42.688496Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:28:42.688554Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:28:42.688874Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:28:42.688933Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:28:42.689096Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:28:42.689203Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:28:42.691296Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:28:42.691342Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:28:42.691583Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:28:42.691625Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 1, path id: 1 2025-06-25T14:28:42.691967Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:42.692027Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 1:0 ProgressState 2025-06-25T14:28:42.692134Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1:0 progress is 1/1 2025-06-25T14:28:42.692174Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-25T14:28:42.692213Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1:0 progress is 1/1 2025-06-25T14:28:42.692245Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-25T14:28:42.692281Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 1, ready parts: 1/1, is published: false 2025-06-25T14:28:42.692342Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-25T14:28:42.692384Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1:0 2025-06-25T14:28:42.692416Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 1:0 2025-06-25T14:28:42.692513Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-25T14:28:42.692554Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 1, publications: 1, subscribers: 0 2025-06-25T14:28:42.692586Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 1, [OwnerId: 72057594046678944, LocalPathId: 1], 3 2025-06-25T14:28:42.700278Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-06-25T14:28:42.700455Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-06-25T14:28:42.700501Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1 2025-06-25T14:28:42.700545Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 3 2025-06-25T14:28:42.700588Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:28:42.700730Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1, subscribers: 0 2025-06-25T14:28:42.710203Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1 2025-06-25T14:28:42.710737Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1, at schemeshard: 72057594046678944 TestModificationResults wait txId: 101 2025-06-25T14:28:42.711263Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:434: actor# [1:274:2263] Bootstrap 2025-06-25T14:28:42.730555Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:453: actor# [1:274:2263] Become StateWork (SchemeCache [1:279:2268]) 2025-06-25T14:28:42.733229Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateView CreateView { Name: "" QueryText: "Some query" } } TxId: 101 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:28:42.733415Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_view.cpp:118: [72057594046678944] TCreateView Propose, path: /MyRoot/, opId: 101:0 2025-06-25T14:28:42.733524Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_view.cpp:124: [72057594046678944] TCreateView Propose, path: /MyRoot/, opId: 101:0, viewDescription: Name: "" QueryText: "Some query" 2025-06-25T14:28:42.733603Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 101:1, propose status:StatusSchemeError, reason: Check failed: path: '/MyRoot/', error: path part shouldn't be empty, at schemeshard: 72057594046678944 2025-06-25T14:28:42.734675Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:274:2263] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-25T14:28:42.737393Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 101, response: Status: StatusSchemeError Reason: "Check failed: path: \'/MyRoot/\', error: path part shouldn\'t be empty" TxId: 101 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:28:42.737622Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 101, database: /MyRoot, subject: , status: StatusSchemeError, reason: Check failed: path: '/MyRoot/', error: path part shouldn't be empty, operation: CREATE VIEW, path: /MyRoot/ 2025-06-25T14:28:42.738035Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 TestModificationResult got TxId: 101, wait until txId: 101 |75.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/security/ut/ydb-core-security-ut |75.7%| [TA] {RESULT} $(B)/ydb/core/tx/datashard/ut_locks/test-results/unittest/{meta.json ... results_accumulator.log} |75.7%| [TM] {RESULT} ydb/services/rate_limiter/ut/unittest |75.7%| [LD] {RESULT} $(B)/ydb/core/security/ut/ydb-core-security-ut ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_view/unittest >> TSchemeShardViewTest::ReadOnlyMode [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:28:41.686065Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:28:41.686164Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:28:41.686205Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:28:41.686238Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:28:41.686276Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:28:41.686304Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:28:41.686361Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:28:41.686439Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:28:41.687157Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:28:41.687479Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:28:41.760101Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:28:41.760152Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:28:41.774333Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:28:41.774649Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:28:41.774756Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:28:41.779735Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:28:41.780018Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:28:41.780713Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:28:41.780964Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:28:41.783985Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:28:41.784112Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:28:41.784924Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:28:41.784971Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:28:41.785068Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:28:41.785102Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:28:41.785134Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:28:41.785214Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:28:41.790185Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:28:41.914311Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:28:41.914519Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:41.914739Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:28:41.914793Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:28:41.914998Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:28:41.915061Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:28:41.917456Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:28:41.917654Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:28:41.917850Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:41.917917Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:28:41.917959Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:28:41.918017Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:28:41.920095Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:41.920157Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:28:41.920202Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:28:41.921991Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:41.922035Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:41.922097Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:28:41.922149Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:28:41.925947Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:28:41.928061Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:28:41.928283Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:28:41.929211Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:28:41.929347Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:28:41.929407Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:28:41.929722Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:28:41.929788Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:28:41.929960Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:28:41.930055Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:28:41.932226Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:28:41.932277Z node 1 :FLAT_TX_SCHEMESHARD ... meshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:28:42.591334Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 TestModificationResults wait txId: 103 Leader for TabletID 72057594046678944 is [1:385:2352] sender: [1:443:2058] recipient: [1:15:2062] 2025-06-25T14:28:42.636717Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateView CreateView { Name: "ThirdView" QueryText: "Some query" } } TxId: 103 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:28:42.636945Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_view.cpp:118: [72057594046678944] TCreateView Propose, path: /MyRoot/ThirdView, opId: 103:0 2025-06-25T14:28:42.637021Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_view.cpp:124: [72057594046678944] TCreateView Propose, path: /MyRoot/ThirdView, opId: 103:0, viewDescription: Name: "ThirdView" QueryText: "Some query" 2025-06-25T14:28:42.637159Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:347: AttachChild: child attached as only one child to the parent, parent id: [OwnerId: 72057594046678944, LocalPathId: 1], parent name: MyRoot, child name: ThirdView, child id: [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-06-25T14:28:42.637234Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 0 2025-06-25T14:28:42.637271Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 103:0 type: TxCreateView target path: [OwnerId: 72057594046678944, LocalPathId: 3] source path: 2025-06-25T14:28:42.637315Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 103:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:28:42.639986Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 103, response: Status: StatusAccepted TxId: 103 SchemeshardId: 72057594046678944 PathId: 3, at schemeshard: 72057594046678944 2025-06-25T14:28:42.640202Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 103, database: /MyRoot, subject: , status: StatusAccepted, operation: CREATE VIEW, path: /MyRoot/ThirdView 2025-06-25T14:28:42.640432Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-25T14:28:42.640478Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_view.cpp:30: [72057594046678944] TCreateView::TPropose, opId: 103:0 ProgressState 2025-06-25T14:28:42.640530Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 103 ready parts: 1/1 2025-06-25T14:28:42.640659Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 103 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:28:42.642575Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 103:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:103 msg type: 269090816 2025-06-25T14:28:42.642721Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 103, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 103 at step: 5000003 FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000002 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 103 at step: 5000003 2025-06-25T14:28:42.643405Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000003, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:28:42.643506Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 103 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000003 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:28:42.643584Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_view.cpp:45: [72057594046678944] TCreateView::TPropose, opId: 103:0 HandleReply TEvPrivate::TEvOperationPlan, step: 5000003 2025-06-25T14:28:42.643728Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 103:0 128 -> 240 2025-06-25T14:28:42.643891Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-25T14:28:42.643953Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 FAKE_COORDINATOR: Erasing txId 103 2025-06-25T14:28:42.645869Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:28:42.645919Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 103, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:28:42.646113Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 103, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-25T14:28:42.646209Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:28:42.646241Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:434:2390], at schemeshard: 72057594046678944, txId: 103, path id: 1 2025-06-25T14:28:42.646283Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:434:2390], at schemeshard: 72057594046678944, txId: 103, path id: 3 2025-06-25T14:28:42.646466Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-25T14:28:42.646508Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 103:0 ProgressState 2025-06-25T14:28:42.646622Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#103:0 progress is 1/1 2025-06-25T14:28:42.646657Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-25T14:28:42.646697Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#103:0 progress is 1/1 2025-06-25T14:28:42.646726Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-25T14:28:42.646759Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 103, ready parts: 1/1, is published: false 2025-06-25T14:28:42.646789Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-25T14:28:42.646816Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 103:0 2025-06-25T14:28:42.646839Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 103:0 2025-06-25T14:28:42.646923Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-25T14:28:42.646958Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 103, publications: 2, subscribers: 0 2025-06-25T14:28:42.646982Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 103, [OwnerId: 72057594046678944, LocalPathId: 1], 5 2025-06-25T14:28:42.647003Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 103, [OwnerId: 72057594046678944, LocalPathId: 3], 2 2025-06-25T14:28:42.647583Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 103 2025-06-25T14:28:42.647675Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 103 2025-06-25T14:28:42.647713Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 103 2025-06-25T14:28:42.647754Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 5 2025-06-25T14:28:42.647785Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-25T14:28:42.648544Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 103 2025-06-25T14:28:42.648605Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 103 2025-06-25T14:28:42.648627Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 103 2025-06-25T14:28:42.648655Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 2 2025-06-25T14:28:42.648677Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-25T14:28:42.648730Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 103, subscribers: 0 2025-06-25T14:28:42.653115Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-25T14:28:42.653624Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 TestModificationResult got TxId: 103, wait until txId: 103 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_view/unittest >> TSchemeShardViewTest::CreateView [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:28:42.652910Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:28:42.653010Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:28:42.653055Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:28:42.653092Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:28:42.653145Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:28:42.653172Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:28:42.653232Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:28:42.653301Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:28:42.654025Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:28:42.654359Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:28:42.734405Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:28:42.734475Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:28:42.752790Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:28:42.753263Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:28:42.753405Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:28:42.759567Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:28:42.759901Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:28:42.760508Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:28:42.760740Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:28:42.763816Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:28:42.763939Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:28:42.764775Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:28:42.764833Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:28:42.764928Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:28:42.764960Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:28:42.764990Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:28:42.765059Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:28:42.771005Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:28:42.888482Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:28:42.888638Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:42.888779Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:28:42.888808Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:28:42.888960Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:28:42.889005Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:28:42.891207Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:28:42.891367Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:28:42.891536Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:42.891596Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:28:42.891635Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:28:42.891705Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:28:42.894058Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:42.894123Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:28:42.894161Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:28:42.895938Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:42.895984Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:42.896042Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:28:42.896091Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:28:42.899348Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:28:42.901610Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:28:42.901797Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:28:42.902582Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:28:42.902694Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:28:42.902744Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:28:42.903046Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:28:42.903102Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:28:42.903259Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:28:42.903345Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:28:42.905397Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:28:42.905443Z node 1 :FLAT_TX_SCHEMESHARD ... 42.941620Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 2025-06-25T14:28:42.943250Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 101:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:101 msg type: 269090816 2025-06-25T14:28:42.943426Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 101, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 101 at step: 5000002 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000001 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 101 at step: 5000002 2025-06-25T14:28:42.943761Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000002, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:28:42.943861Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 101 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000002 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:28:42.943909Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_view.cpp:45: [72057594046678944] TCreateView::TPropose, opId: 101:0 HandleReply TEvPrivate::TEvOperationPlan, step: 5000002 2025-06-25T14:28:42.944041Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 101:0 128 -> 240 2025-06-25T14:28:42.944196Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:28:42.944273Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 FAKE_COORDINATOR: Erasing txId 101 2025-06-25T14:28:42.946043Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:28:42.946089Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 101, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:28:42.946201Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 101, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-25T14:28:42.946260Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:28:42.946291Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 101, path id: 1 2025-06-25T14:28:42.946343Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 101, path id: 2 2025-06-25T14:28:42.946571Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:28:42.946603Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 101:0 ProgressState 2025-06-25T14:28:42.946674Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#101:0 progress is 1/1 2025-06-25T14:28:42.946699Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-25T14:28:42.946733Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#101:0 progress is 1/1 2025-06-25T14:28:42.946753Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-25T14:28:42.946783Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 101, ready parts: 1/1, is published: false 2025-06-25T14:28:42.946815Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-25T14:28:42.946845Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 101:0 2025-06-25T14:28:42.946871Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 101:0 2025-06-25T14:28:42.946919Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-25T14:28:42.946945Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 101, publications: 2, subscribers: 0 2025-06-25T14:28:42.946975Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 1], 4 2025-06-25T14:28:42.946996Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 2], 2 2025-06-25T14:28:42.947480Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 4 PathOwnerId: 72057594046678944, cookie: 101 2025-06-25T14:28:42.947553Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 4 PathOwnerId: 72057594046678944, cookie: 101 2025-06-25T14:28:42.947585Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 101 2025-06-25T14:28:42.947623Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 4 2025-06-25T14:28:42.947668Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-25T14:28:42.948090Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 2 PathOwnerId: 72057594046678944, cookie: 101 2025-06-25T14:28:42.948136Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 2 PathOwnerId: 72057594046678944, cookie: 101 2025-06-25T14:28:42.948160Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 101 2025-06-25T14:28:42.948176Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 2 2025-06-25T14:28:42.948198Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-25T14:28:42.948254Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 101, subscribers: 0 2025-06-25T14:28:42.950827Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-25T14:28:42.951933Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 101 2025-06-25T14:28:42.952123Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2025-06-25T14:28:42.952168Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 2025-06-25T14:28:42.952554Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-06-25T14:28:42.952669Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-25T14:28:42.952704Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:304:2293] TestWaitNotification: OK eventTxId 101 2025-06-25T14:28:42.953084Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/MyView" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:28:42.953292Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/MyView" took 179us result status StatusSuccess 2025-06-25T14:28:42.953650Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/MyView" PathDescription { Self { Name: "MyView" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeView CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ViewVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } ViewDescription { Name: "MyView" PathId { OwnerId: 72057594046678944 LocalId: 2 } Version: 1 QueryText: "Some query" CapturedContext { } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 |75.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_compaction/unittest >> TSchemeshardCompactionQueueTest::RemoveLastShardFromSubQueues [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/ydb_proxy/ut/unittest >> YdbProxy::ReadNonExistentTopic [GOOD] Test command err: 2025-06-25T14:28:31.580112Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519893790697496617:2219];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:28:31.580879Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000c0e/r3tmp/tmpFjRA0x/pdisk_1.dat 2025-06-25T14:28:32.113535Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:28:32.114454Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519893790697496437:2080] 1750861711573338 != 1750861711573341 2025-06-25T14:28:32.131979Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:28:32.132077Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:28:32.133347Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:21041 TServer::EnableGrpc on GrpcPort 10767, node 1 2025-06-25T14:28:32.357103Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:28:32.357126Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:28:32.357139Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:28:32.357295Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:28:32.580465Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:21041 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:28:32.806111Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:28:32.820974Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:28:33.056462Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:34.771671Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519893803582399233:2333], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:34.771748Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519893803582399236:2334], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:34.773310Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519893803582399223:2328], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:34.773485Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:34.776100Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710661:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:28:34.787329Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519893803582399245:2442] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateCreate)" severity: 1 } 2025-06-25T14:28:34.789435Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519893803582399244:2336], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710661 completed, doublechecking } 2025-06-25T14:28:34.789623Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710661, at schemeshard: 72057594046644480 2025-06-25T14:28:34.789778Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519893803582399243:2335], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710661 completed, doublechecking } 2025-06-25T14:28:34.885167Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519893803582399293:2474] txid# 281474976710662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:28:34.885317Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519893803582399294:2475] txid# 281474976710663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:28:35.998770Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:28:36.422892Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:36.768390Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519893790697496617:2219];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:28:36.778724Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:28:36.948448Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976710676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:171) 2025-06-25T14:28:37.360407Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976710679:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:171) 2025-06-25T14:28:37.834912Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710682:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:28:39.595845Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519893826032672992:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:28:39.595915Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000c0e/r3tmp/tmpzZGlEm/pdisk_1.dat 2025-06-25T14:28:39.729010Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:28:39.730552Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519893826032672962:2080] 1750861719595376 != 1750861719595379 2025-06-25T14:28:39.743775Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:28:39.743852Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:28:39.748741Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:13934 TServer::EnableGrpc on GrpcPort 21361, node 2 2025-06-25T14:28:39.962713Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:28:39.962740Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:28:39.962748Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:28:39.962878Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13934 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:28:40.271584Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... >> TSchemeshardBackgroundCleaningTest::CreateTableInTemp [GOOD] |75.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_compaction/unittest >> TSchemeshardCompactionQueueTest::CheckOrderWhenAllQueues [GOOD] |75.7%| [TA] $(B)/ydb/core/tx/replication/ydb_proxy/ut/test-results/unittest/{meta.json ... results_accumulator.log} |75.7%| [TA] $(B)/ydb/core/tx/schemeshard/ut_view/test-results/unittest/{meta.json ... results_accumulator.log} >> TSchemeshardBackgroundCompactionTest::SchemeshardShouldNotCompactBackups >> THiveTest::TestHiveBalancerDifferentResources2 [GOOD] >> THiveTest::TestHiveBalancerUselessNeighbourMoves |75.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_validates_deduplication_id[tables_format_v1] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_background_cleaning/unittest >> TSchemeshardBackgroundCleaningTest::CreateTableInTemp [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:234:2060] recipient: [1:228:2144] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:234:2060] recipient: [1:228:2144] Leader for TabletID 72057594046678944 is [1:245:2155] sender: [1:246:2060] recipient: [1:228:2144] 2025-06-25T14:27:38.889099Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:27:38.889184Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:27:38.889224Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:27:38.889253Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:27:38.889305Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:27:38.889329Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:27:38.889387Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:27:38.889464Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:27:38.890097Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:27:38.890373Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:27:38.997498Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:27:38.997550Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:27:39.009344Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:27:39.009679Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:27:39.009823Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:27:39.023995Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:27:39.024427Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:27:39.025044Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:27:39.025284Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:27:39.029075Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:27:39.029235Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:27:39.030290Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:27:39.030364Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:27:39.030547Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:27:39.030590Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:27:39.030632Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:27:39.030757Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:27:39.037213Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:245:2155] sender: [1:358:2060] recipient: [1:17:2064] 2025-06-25T14:27:39.161026Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:27:39.161180Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:27:39.161344Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:27:39.161374Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:27:39.161513Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:27:39.161571Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:27:39.163258Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:27:39.163375Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:27:39.163525Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:27:39.163563Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:27:39.163605Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:27:39.163627Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:27:39.164949Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:27:39.164982Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:27:39.165009Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:27:39.166066Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:27:39.166102Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:27:39.166136Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:27:39.166172Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:27:39.173385Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:27:39.176142Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:27:39.176265Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:27:39.176989Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:27:39.177091Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 253 RawX2: 4294969456 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:27:39.177122Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:27:39.177283Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:27:39.177314Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:27:39.177421Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:27:39.177473Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:27:39.178846Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:27:39.178877Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_s ... ady parts: 2/3 2025-06-25T14:28:43.090805Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#104:2 progress is 2/3 2025-06-25T14:28:43.090845Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 104 ready parts: 2/3 2025-06-25T14:28:43.090880Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 104, ready parts: 2/3, is published: true 2025-06-25T14:28:43.091076Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-25T14:28:43.091093Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-25T14:28:43.091108Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:276: Activate send for 104:0 2025-06-25T14:28:43.091144Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:632: Send to actor: [7:989:2746] msg type: 269552132 msg: NKikimrTxDataShard.TEvSchemaChangedResult TxId: 104 at schemeshard: 72057594046678944 2025-06-25T14:28:43.091208Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269552132, Sender [7:244:2154], Recipient [7:989:2746]: NKikimrTxDataShard.TEvSchemaChangedResult TxId: 104 2025-06-25T14:28:43.091230Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3136: StateWork, processing event TEvDataShard::TEvSchemaChangedResult 2025-06-25T14:28:43.091247Z node 7 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 104 datashard 72075186233409551 state Ready 2025-06-25T14:28:43.091284Z node 7 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186233409551 Got TEvSchemaChangedResult from SS at 72075186233409551 2025-06-25T14:28:43.091398Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 2146435072, Sender [7:244:2154], Recipient [7:244:2154]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-25T14:28:43.091417Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4972: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-25T14:28:43.091449Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-25T14:28:43.091472Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 104:0 ProgressState 2025-06-25T14:28:43.091521Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-06-25T14:28:43.091543Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#104:0 progress is 3/3 2025-06-25T14:28:43.091561Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 104 ready parts: 3/3 2025-06-25T14:28:43.091579Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#104:0 progress is 3/3 2025-06-25T14:28:43.091592Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 104 ready parts: 3/3 2025-06-25T14:28:43.091615Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 104, ready parts: 3/3, is published: true 2025-06-25T14:28:43.091670Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1656: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [7:591:2406] message: TxId: 104 2025-06-25T14:28:43.091706Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 104 ready parts: 3/3 2025-06-25T14:28:43.091738Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 104:0 2025-06-25T14:28:43.091761Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 104:0 2025-06-25T14:28:43.091854Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 10] was 4 2025-06-25T14:28:43.091885Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 104:1 2025-06-25T14:28:43.091906Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 104:1 2025-06-25T14:28:43.091926Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 11] was 3 2025-06-25T14:28:43.091938Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 104:2 2025-06-25T14:28:43.091948Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 104:2 2025-06-25T14:28:43.091969Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 12] was 3 2025-06-25T14:28:43.093673Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-25T14:28:43.093774Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-25T14:28:43.093839Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:632: Send to actor: [7:591:2406] msg type: 271124998 msg: NKikimrScheme.TEvNotifyTxCompletionResult TxId: 104 at schemeshard: 72057594046678944 2025-06-25T14:28:43.093955Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 104: got EvNotifyTxCompletionResult 2025-06-25T14:28:43.093997Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 104: satisfy waiter [7:1042:2784] 2025-06-25T14:28:43.094220Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877764, Sender [7:1044:2786], Recipient [7:244:2154]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-25T14:28:43.094247Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5053: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-25T14:28:43.094265Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5885: Server pipe is reset, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 104 TestModificationResults wait txId: 105 2025-06-25T14:28:43.094937Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271122432, Sender [8:565:2104], Recipient [7:244:2154] 2025-06-25T14:28:43.094974Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4966: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-06-25T14:28:43.096763Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/tmp" OperationType: ESchemeOpCreateIndexedTable CreateIndexedTable { TableDescription { Name: "NotTempTable" Columns { Name: "key" Type: "Uint64" } Columns { Name: "value" Type: "Utf8" } KeyColumnNames: "key" } IndexDescription { Name: "ValueIndex" KeyColumnNames: "value" } } AllowCreateInTempDir: false } TxId: 105 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:28:43.097204Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_just_reject.cpp:47: TReject Propose, opId: 105:0, explain: Check failed: path: '/MyRoot/tmp', error: path is temporary (id: [OwnerId: 72057594046678944, LocalPathId: 3], type: EPathTypeDir, state: EPathStateNoChanges), at schemeshard: 72057594046678944 2025-06-25T14:28:43.097242Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 105:1, propose status:StatusPreconditionFailed, reason: Check failed: path: '/MyRoot/tmp', error: path is temporary (id: [OwnerId: 72057594046678944, LocalPathId: 3], type: EPathTypeDir, state: EPathStateNoChanges), at schemeshard: 72057594046678944 2025-06-25T14:28:43.125669Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-06-25T14:28:43.128238Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 105, response: Status: StatusPreconditionFailed Reason: "Check failed: path: \'/MyRoot/tmp\', error: path is temporary (id: [OwnerId: 72057594046678944, LocalPathId: 3], type: EPathTypeDir, state: EPathStateNoChanges)" TxId: 105 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:28:43.128581Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 105, database: /MyRoot, subject: , status: StatusPreconditionFailed, reason: Check failed: path: '/MyRoot/tmp', error: path is temporary (id: [OwnerId: 72057594046678944, LocalPathId: 3], type: EPathTypeDir, state: EPathStateNoChanges), operation: CREATE TABLE WITH INDEXES, path: /MyRoot/tmp/NotTempTable 2025-06-25T14:28:43.128659Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046678944 TestModificationResult got TxId: 105, wait until txId: 105 TestWaitNotification wait txId: 105 2025-06-25T14:28:43.129087Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 105: send EvNotifyTxCompletion 2025-06-25T14:28:43.129139Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 105 2025-06-25T14:28:43.129530Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877761, Sender [7:1112:2854], Recipient [7:244:2154]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:28:43.129580Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5052: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T14:28:43.129618Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5837: Pipe server connected, at tablet: 72057594046678944 2025-06-25T14:28:43.129750Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124996, Sender [7:591:2406], Recipient [7:244:2154]: NKikimrScheme.TEvNotifyTxCompletion TxId: 105 2025-06-25T14:28:43.129783Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvSchemeShard::TEvNotifyTxCompletion 2025-06-25T14:28:43.129854Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 105, at schemeshard: 72057594046678944 2025-06-25T14:28:43.129949Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 105: got EvNotifyTxCompletionResult 2025-06-25T14:28:43.129982Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 105: satisfy waiter [7:1110:2852] 2025-06-25T14:28:43.130146Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877764, Sender [7:1112:2854], Recipient [7:244:2154]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-25T14:28:43.130194Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5053: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-25T14:28:43.130229Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5885: Server pipe is reset, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 105 >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_can_read_new_written_data_on_visibility_timeout[tables_format_v0] >> KqpYql::FlexibleTypes >> AutoConfig::GetServicePoolsWith4AndMoreCPUs [GOOD] >> AutoConfig::GetASPoolsWith3CPUs [GOOD] >> AutoConfig::GetServicePoolsWith3CPUs [GOOD] >> KqpScripting::StreamExecuteYqlScriptScanClientTimeoutBruteForce |75.7%| [TS] {asan, default-linux-x86_64, release} ydb/core/driver_lib/run/ut/unittest >> KqpYql::EvaluateIf >> AutoConfig::GetASPoolsWith2CPUs [GOOD] |75.7%| [TS] {asan, default-linux-x86_64, release} ydb/core/driver_lib/run/ut/unittest >> AutoConfig::GetASPoolsWith3CPUs [GOOD] |75.7%| [TS] {asan, default-linux-x86_64, release} ydb/core/driver_lib/run/ut/unittest >> AutoConfig::GetServicePoolsWith4AndMoreCPUs [GOOD] >> AutoConfig::GetASPoolsith1CPU [GOOD] >> Cdc::UpdatesLog[YdsRunner] [GOOD] >> Cdc::UpdatesLog[TopicRunner] |75.7%| [TS] {asan, default-linux-x86_64, release} ydb/core/driver_lib/run/ut/unittest >> AutoConfig::GetServicePoolsWith3CPUs [GOOD] >> TSchemeshardBackgroundCleaningTest::SchemeshardBackgroundCleaningTestSimpleDropIndex [GOOD] >> TSchemeshardBackgroundCleaningTest::TempInTemp |75.7%| [TS] {asan, default-linux-x86_64, release} ydb/core/driver_lib/run/ut/unittest >> AutoConfig::GetASPoolsWith2CPUs [GOOD] |75.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_visibility_timeout_works[tables_format_v0] [GOOD] |75.7%| [TS] {asan, default-linux-x86_64, release} ydb/core/driver_lib/run/ut/unittest >> AutoConfig::GetASPoolsith1CPU [GOOD] >> Cdc::NewAndOldImagesLog[YdsRunner] [GOOD] >> Cdc::NewAndOldImagesLog[TopicRunner] |75.7%| [TA] $(B)/ydb/core/tx/schemeshard/ut_move/test-results/unittest/{meta.json ... results_accumulator.log} >> AutoConfig::GetASPoolsWith4AndMoreCPUs [GOOD] >> AutoConfig::GetServicePoolsWith1CPU [GOOD] |75.7%| [TS] {asan, default-linux-x86_64, release} ydb/core/driver_lib/run/ut/unittest >> AutoConfig::GetASPoolsWith4AndMoreCPUs [GOOD] |75.7%| [TS] {asan, default-linux-x86_64, release} ydb/core/driver_lib/run/ut/unittest >> AutoConfig::GetServicePoolsWith1CPU [GOOD] >> AutoConfig::GetServicePoolsWith2CPUs [GOOD] >> TTicketParserTest::LoginGood >> TTicketParserTest::BulkAuthorizationRetryError |75.8%| [TS] {asan, default-linux-x86_64, release} ydb/core/driver_lib/run/ut/unittest >> AutoConfig::GetServicePoolsWith2CPUs [GOOD] >> TTicketParserTest::LoginRefreshGroupsWithError >> TTicketParserTest::AuthenticationWithUserAccount >> TTicketParserTest::TicketFromCertificateCheckIssuerGood >> TTicketParserTest::LoginBad >> THiveTest::TestHiveBalancerUselessNeighbourMoves [GOOD] >> THiveTest::TestHiveBalancerWithImmovableTablets >> AsyncIndexChangeExchange::SenderShouldShakeHandsTwice [GOOD] >> AsyncIndexChangeExchange::SenderShouldShakeHandsAfterAddingIndex >> TTicketParserTest::NebiusAuthenticationUnavailable >> TTicketParserTest::TicketFromCertificateWithValidationGood >> TTicketParserTest::AccessServiceAuthenticationOk |75.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_visibility_timeout_works[tables_format_v0] [GOOD] >> ExternalBlobsMultipleChannels::WithNewColumnFamilyAndCompaction [GOOD] >> TTicketParserTest::LoginGood [GOOD] >> TTicketParserTest::LoginGoodWithGroups >> KqpYql::FlexibleTypes [GOOD] >> KqpYql::FromBytes >> ExternalBlobsMultipleChannels::Simple [GOOD] >> Cdc::UpdatesLog[TopicRunner] [GOOD] >> Cdc::VirtualTimestamps[PqRunner] >> ExternalBlobsMultipleChannels::SingleChannel [GOOD] >> Cdc::NewAndOldImagesLog[TopicRunner] [GOOD] >> Cdc::NewAndOldImagesLogDebezium >> KqpYql::EvaluateIf [GOOD] >> KqpYql::EvaluateFor >> TTicketParserTest::AuthenticationWithUserAccount [GOOD] >> TTicketParserTest::AuthenticationUnsupported >> Cdc::DocApi[TopicRunner] [GOOD] >> Cdc::HugeKey[PqRunner] >> TTicketParserTest::LoginBad [GOOD] >> TTicketParserTest::BulkAuthorizationWithRequiredPermissions >> TTicketParserTest::NebiusAuthenticationUnavailable [GOOD] >> TTicketParserTest::NebiusAuthorizationRetryError >> TTicketParserTest::TicketFromCertificateCheckIssuerGood [GOOD] >> TTicketParserTest::TicketFromCertificateCheckIssuerBad ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_external_blobs/unittest >> ExternalBlobsMultipleChannels::WithNewColumnFamilyAndCompaction [GOOD] Test command err: 2025-06-25T14:28:40.693676Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:28:40.693807Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:28:40.693858Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001502/r3tmp/tmpw9asFr/pdisk_1.dat 2025-06-25T14:28:41.120423Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T14:28:41.123496Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:28:41.195148Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:28:41.200030Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750861717867785 != 1750861717867789 2025-06-25T14:28:41.261021Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:28:41.261119Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:28:41.272468Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:28:41.380848Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:41.852197Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 100:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:171) 2025-06-25T14:28:41.980893Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:28:42.137520Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:771:2629], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:42.137662Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:782:2634], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:42.138023Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:42.141857Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:28:42.284577Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:785:2637], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:28:42.334681Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:841:2674] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:28:42.881334Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715660. Ctx: { TraceId: 01jykqvcgq5yw3bhjd8cpq95xq, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MTlmNjQxZWItMjA0Yjc0NGItN2I1MjI0MjQtZTJmYmY4YmM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:42.995924Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jykqvda2f46879ye9m8shqte, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YzQzYTAzYWMtOGUwMTdmNWMtZDEwNWUzYWYtMjlkMzk4ZjY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:43.052129Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jykqvdbtbhr2pf34qby22y9s, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NzhjYzJmMGQtYjZiMTVhM2QtZmNmYjVmMDQtODZlN2JhMWY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:43.106936Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715663. Ctx: { TraceId: 01jykqvddh7kaztw91zzh7fjgm, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MTljMjA4OWEtMmJhOTc2YzctMzQ0MzNjNDItMzBiYjlkN2Y=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:43.162139Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715664. Ctx: { TraceId: 01jykqvdf8ammxwbnb319gtges, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NjhhYjFiMWQtMjliZWZkM2EtOWQ0OTU3OGMtMWNmZmNmNTM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:43.218335Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715665. Ctx: { TraceId: 01jykqvdgzfadyyght5xxp6nn0, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OWQyNzYyMTMtMjAxYmU1NzgtYjllOGYwMDgtOWY4ZjgyMTg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:43.272088Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715666. Ctx: { TraceId: 01jykqvdjq8xtx7k0ds6ezsbxz, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Nzg0MDEzMjktNDhjMzcwOTAtYjA4YjU5OWYtN2VmMDhlN2U=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:43.325778Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715667. Ctx: { TraceId: 01jykqvdmd4vyy358922hd68bh, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NDc0YjM3NjAtY2Y2ZmUwNWUtODg5YzEwZjMtZTY0YTcwZGY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:43.380302Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715668. Ctx: { TraceId: 01jykqvdp30ew6bk28mv2b47t6, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NGU5ZmUxOTItZjhkMDVmMzAtYWY3ZDM0NTktNzRjNzhjYjc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:43.435567Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715669. Ctx: { TraceId: 01jykqvdqt80pxkh4fkxw9k55z, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NDg4OWMzNmMtZDBlYmQyNWYtM2JmNDYzYzQtYmVjZjkzNDE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:43.495963Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715670. Ctx: { TraceId: 01jykqvdsh1e2jvnbdwvk2129r, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTQ4OWU2MjMtNjZiYjhkMGQtNjQzNjAxZmUtNzE1ZGFkYzc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:43.555851Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715671. Ctx: { TraceId: 01jykqvdvd7jnwy5vn29awxc8k, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MzM3OGUzMjMtZjNjNTYtZDJjY2Y3Yy0xMTQ5YjIw, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:43.611951Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715672. Ctx: { TraceId: 01jykqvdx911hw55h3f4geyw13, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YzMzZTc5NzQtNDhhYjU0YjgtOWJhMDI1OTgtYjg0MjgxMTQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:43.675960Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715673. Ctx: { TraceId: 01jykqvdz2b31a05b6jk8p7q7w, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=N2VkMDEzNC0zMjAzZGMwNC0yNTc0ODU2Ny0yOTEwZjczYg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:43.735392Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715674. Ctx: { TraceId: 01jykqve1287ct80eyxnsgbths, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDgwMzNhM2YtNzY5NGE3NWQtY2QxYmViZjYtZjEwN2IzMzc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:43.796356Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715675. Ctx: { TraceId: 01jykqve2y3th85p1wcja58q21, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZWI2OWE1NDUtYzc0MTA0YTYtNjFhNjMwNC02ZmI4ZWYyMQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:43.853978Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715676. Ctx: { TraceId: 01jykqve4tddqjvn45hqkv1y2t, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MjZmYzQ2MWQtYWQ3YTVlZjAtMjZlMjJjNGEtNGI3ZmUzZTU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:43.911546Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715677. Ctx: { TraceId: 01jykqve6kbn9h2fdc5vexjcrd, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MTFmYjkxNWQtMTY4Zjk3MWItZGZhYTg2NmQtZjU0NWM2YzU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:43.967335Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715678. Ctx: { TraceId: 01jykqve8d2wymyxx3089wd8sz, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=M2Y3NzdmNmQtY2EzYWQ4NC1lNGUxN2M4Ni04ZmY0N2Ri, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:44.022508Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715679. Ctx: { TraceId: 01jykqvea5b3xvyfmg643z9hah, D ... anner.cpp:120: TxId: 281474976715727. Ctx: { TraceId: 01jykqvh1qbzxn05tzpae2rbvs, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OTZiYzNmZTItNjExZTQzYTQtNDViZDY1ZGUtZGVjYjQ5NDc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:46.855647Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715728. Ctx: { TraceId: 01jykqvh30es6qgwc7bpgvr72w, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTk0OTdkM2YtNDZmYWY3YjMtNmM3NzQyZDAtNWEwN2ZlZTI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:46.910054Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715729. Ctx: { TraceId: 01jykqvh4d6h8x86he2mc6mmwp, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OGJiYjlhNWItYzgxNzA0NjAtNjhiYTI3MmMtOGIwMGMyZTY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:46.966589Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715730. Ctx: { TraceId: 01jykqvh64ds4nbjc12v6yek86, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NDFlODk0ODgtYWZmZWU0MC1mMjRhNDc1Ni0yZGEyOTEw, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:47.047949Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715731. Ctx: { TraceId: 01jykqvh7wej72awwf1v7b8vs8, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NjEyZDFlZDgtNmNhOTM5ZGItZThlOGQ4MjEtNGIwNDM0Y2M=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:47.092847Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715732. Ctx: { TraceId: 01jykqvhad4f5fkf39b47235zd, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDc1OTQ4ZGUtN2Y0MWE1NTctZjJiMDQwYWMtY2QxNDA1ODg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:47.139122Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715733. Ctx: { TraceId: 01jykqvhbs1dej3zqzd7a0td1j, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NjI3NGM4MWYtY2RlMDI0ZTEtYjdiN2U5OWItODFiYWE3ZjM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:47.186789Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715734. Ctx: { TraceId: 01jykqvhd9b95sctbhp3n4pyvq, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZmUwNTUwOWYtOGM3ZjJhZjgtNmJkYTI5ODMtMTFhNjM4ZjQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:47.237114Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715735. Ctx: { TraceId: 01jykqvher8xmtzf9hpy79phdh, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YzFiMTk3MWItMjUwOWFhNDctYjM1NDVlMzMtZDYxZjM4NWU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:47.282869Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715736. Ctx: { TraceId: 01jykqvhga8gdxz3r823ynyfh1, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=N2U5YTRmYmMtY2Q0N2I4NjctNDAzMThjNGItZGI2MTVmNjI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:47.340778Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715737. Ctx: { TraceId: 01jykqvhht108vvdeefmdnb49z, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YWYyZGUwNzYtZjQ0Mjg5NC1kY2Q4NTJjLWNhMmViZGU5, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:47.398629Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715738. Ctx: { TraceId: 01jykqvhkk4dm14pww9eeafq2e, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDlkOTIzODktYzY3MWE0NjktYjIxNzkzOTYtOWEwZDJlNGQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:47.452903Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715739. Ctx: { TraceId: 01jykqvhndagyc1emb4gwtsysh, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTM3YWEzNDMtMzg1YzAzYTItODE2ODM0OGItYWY4YmZkZGQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:47.507681Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715740. Ctx: { TraceId: 01jykqvhq36n5c98z4deb9jjqk, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NGM2YmQ2ZDEtMzhlZjY4NDEtODFiN2UyOGQtNTg0MDA5Zjk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:47.561873Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715741. Ctx: { TraceId: 01jykqvhrt0dexnrqwf8cjywv6, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NzBhOTA2MDgtMTc4NmMwOTUtNjJkZmMzNjEtNDlkMmI5Njc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:47.622183Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715742. Ctx: { TraceId: 01jykqvhtg9st2j1q15gyk0kjb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NzY0ZTJjNTMtNWE0NzIyN2ItOGM3ZTE4NzEtZTYyZWE4ZTA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:47.667777Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715743. Ctx: { TraceId: 01jykqvhwa076d335gbw63c8hh, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YWI1NWVlNDktNjY1MGM5YzgtYmFiMzdhMGMtZjk3MWFjOGI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:47.716387Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715744. Ctx: { TraceId: 01jykqvhxr6ka158rvms74egcm, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NmVjZDkxZWMtZDUyZTNhOWItYWIzMTkzN2EtNDBjMDFlZDU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:47.766300Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715745. Ctx: { TraceId: 01jykqvhz99h4g57csmwex8dv5, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZGUwZTY0NGEtZmJjMmQ1ZGMtYjY3ZDM0YjktZDA4ZjJhMTQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:47.816782Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715746. Ctx: { TraceId: 01jykqvj0w69e4cy7v0v53jta6, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OTBmZTI2YjQtNWU1MzRlZjAtNDk5ZTQ3ZTEtMzg4Njk0MA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:47.871061Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715747. Ctx: { TraceId: 01jykqvj2feg9bx09rdyhp6s70, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTM5ZWJhYTEtNTJiODllZTktODRjN2M4ZGQtYzAxZDU3MTI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:47.924917Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715748. Ctx: { TraceId: 01jykqvj457whsq8g25n38exmw, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OThmYTU4NGEtN2Q0NmViODItYjdjNzUwNmQtZDViZThkOWM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:47.977571Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715749. Ctx: { TraceId: 01jykqvj5s3510raqyca175yx3, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=M2FkMDdkMzEtNWVjZGI2YzAtODE5MzliOTUtNGFjY2QwYmE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:48.027128Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715750. Ctx: { TraceId: 01jykqvj7g9yn6zhphtptx78ag, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDE4ZTlhMWItNzdjODQxMC1kMTcxOWUxNS0xNDQ2ODY0MQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:48.075696Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715751. Ctx: { TraceId: 01jykqvj918jcne6fxpfbvcqjs, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZmZhOGY2ZTItNzg2Zjk3ZjMtN2NjMjI5OGQtZmE3M2Y3Mjg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:48.143869Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715752. Ctx: { TraceId: 01jykqvjah644dyrvffy2pt1rc, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MTliYTM2MGMtODY3OWRmM2QtZjVmN2JjYmQtYmUxZTU5NmM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:48.186232Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715753. Ctx: { TraceId: 01jykqvjcn2v186c8sebpktart, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZGM5ZTE0ZjMtYTkxNDM4N2YtZWJjYWJhNGEtNWY5MjVjNjM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:48.234623Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715754. Ctx: { TraceId: 01jykqvjdzdcqfve9xs78yzh92, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NDdlMThjNzQtMzljMjc0MmItNmVlZGQxYWItNmQzNzY0NmM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:48.278694Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715755. Ctx: { TraceId: 01jykqvjfg10zw9nqz4j4yv15g, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YmQzZGY3ZDctNThiNDNiMmMtZTZhYTE4ODItNjQ0Y2MyNDU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:48.324332Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715756. Ctx: { TraceId: 01jykqvjgvfkx70nv0rgh9c98j, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YzZkMDA2YzQtNzAwOTYzNDItYWNkN2ViYmQtN2E2NjYzOTY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:48.367087Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715757. Ctx: { TraceId: 01jykqvjj9ap2y5zjxmrted5ag, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MzZkOTFiMC05OTNlZWIyZi1lYTBlODZhOS0xMDc3OTZhZA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:48.412553Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715758. Ctx: { TraceId: 01jykqvjkm0kvckewgyh289pq0, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OThiZjE0NC04NTA1NTY0Ny05N2ZkOWU2MC1kZDgwNTMxMA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:48.463599Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715759. Ctx: { TraceId: 01jykqvjn20p822h9w7j6p19nh, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YzVlMWEzZTAtNDk5NDU0YTgtYzIwNzViZWMtNWViN2RmZjU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:49.158389Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715760. Ctx: { TraceId: 01jykqvjxx368px6y2rzkmr0zz, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YzkxZjk4Y2MtNTcxNDgyZDctM2IwNzQ2YWEtYjY3YzZjYWE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_external_blobs/unittest >> ExternalBlobsMultipleChannels::Simple [GOOD] Test command err: 2025-06-25T14:28:40.538634Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:28:40.538786Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:28:40.538842Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001509/r3tmp/tmprHzEsi/pdisk_1.dat 2025-06-25T14:28:41.122567Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T14:28:41.125475Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:28:41.196221Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:28:41.201202Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750861717574017 != 1750861717574021 2025-06-25T14:28:41.262167Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:28:41.262285Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:28:41.273701Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:28:41.381310Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:41.844191Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:699:2580], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:41.844302Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:708:2585], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:41.844431Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:41.849324Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:28:41.902010Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:28:42.018208Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:713:2588], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:28:42.083855Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:783:2627] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:28:42.881369Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715660. Ctx: { TraceId: 01jykqvc6w02rz92y7egd22mbb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YmIxMTE5YTgtNjE2ZWNmNzQtYmQ0NGI4ZDMtYjA1YTgzYmE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:43.001176Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jykqvda2fxpyb7ytjsvhz0dj, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZjAwM2M5Yy1mMjUyYmZjNy1kNWVkNjU1MC04ZWI2MjhiOQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:43.051676Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jykqvdby9qwgnqg5n85zdjtk, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MWQ2MjFmZjQtMjQwYTFiOTgtMjc2YjkxMGMtN2ExY2E3Mjk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:43.106940Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715663. Ctx: { TraceId: 01jykqvddh26rcfk1vef7g1qm5, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YzAzYTJiYTMtMzExZWJiMzMtYjQ2Y2U4YzItZWFhNjJjMjU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:43.158705Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715664. Ctx: { TraceId: 01jykqvdf8179gbvpsmnmt1r70, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NjU1ZTMyZmQtOWZiYzRkOTYtM2M2Y2Y0ODktM2E4OTQ1N2U=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:43.213904Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715665. Ctx: { TraceId: 01jykqvdgw70pvtxwnwbz8daz9, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NmM3YTQ0MWYtZjk1Y2E1YjctZDhiNTBhNmUtNjE3ZjllZWM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:43.263291Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715666. Ctx: { TraceId: 01jykqvdjk9af7sa9dn0stszg7, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OTc5NTcyMmQtNmE1ZjU2NGItZjkxZmZiYWUtOTExNzVmMjc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:43.319009Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715667. Ctx: { TraceId: 01jykqvdm4b6k31fh7g43nvp32, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Y2YyZGE4MDctYjhlOThhNTAtNmQxZjZlMWYtZjFhNTYwOWY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:43.370932Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715668. Ctx: { TraceId: 01jykqvdnv260jqtb84dr8y67d, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjNkYjc5YWItOTY0OTg2YjgtZDFjZTgzNjMtNjQyMTFlMjM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:43.420330Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715669. Ctx: { TraceId: 01jykqvdqf9d12pdc2n2237pk5, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODg1M2FhYWMtOTQ4NDgxMjgtNDk5MzMxMzktZDVmNjJlNTA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:43.465867Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715670. Ctx: { TraceId: 01jykqvds1594statg9hxvvt3j, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NzkzNTU2YjItNmFlMWUzMzctNGI4MGMzZTItZGZmYmI2MzU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:43.512220Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715671. Ctx: { TraceId: 01jykqvdte653mpjeyaee32g93, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NGVmMDE0ZTMtYjkxYWE5MGItZGU3MTRjNzktZTllNjE1OWY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:43.567622Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715672. Ctx: { TraceId: 01jykqvdvxcd3q0bqfdyc8ttw4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NjIyYTk2OGQtZWY4M2Q4NDctNjYzNTNmOTItMmI5ZjkyYjY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:43.617262Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715673. Ctx: { TraceId: 01jykqvdxm69d6f7b5pd5wsm0z, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjljZDJiNy01NDMwNjc0ZC1jN2M1MDM5Mi1jZmM1NDhhMA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:43.671928Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715674. Ctx: { TraceId: 01jykqvdz78mxrpzf5f1whfdaw, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MmYzODgwMDktN2UzYTdmYTYtZmNiZDk4NjEtZTg1Mzg2Y2I=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:43.762861Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715675. Ctx: { TraceId: 01jykqve1t8z8mn9h1q19vyf9d, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZWJmNmYwODctNzVlYzBiYjMtMTBhNTBkNzEtZDAxNDYxN2Y=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:43.815683Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715676. Ctx: { TraceId: 01jykqve3sfa6yykk1fjaex4ah, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YmMxNGIwM2ItNjE5ODg0OGUtOWMzYmQ0NDgtODMyN2RhMDE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:43.870503Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715677. Ctx: { TraceId: 01jykqve5c94xzvf5e76vxva4c, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Yjg2NmU5MDAtYTBkYmYwZDgtZTBkODdkNjItNDJkMDIxYTg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:43.920788Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715678. Ctx: { TraceId: 01jykqve73361g2f1k29ssncg1, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NmU4MDhmOTctMWY1MjA5ZjMtOGJiM2RhY2ItOTlkNTU2M2I=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:43.968358Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715679. Ctx: { TraceId: 01jykqve8p7sfkrxc975q1h7jh, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTliZjM1MmMtOWM0YmFmZGQtYmZkNTBkMTYtYzM2MWVhYmY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:44.013241Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715680. Ctx: { TraceId: 01jykqvea4b32vajfg9m9nr9w8, Database: , DatabaseId: /Root, Sessio ... anner.cpp:120: TxId: 281474976715727. Ctx: { TraceId: 01jykqvgv05n0mn7vaw0gczbhq, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YmM1YzUzOTQtYzM4M2RjMTgtNDQ0YzU2NWEtYzliYzNkNGQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:46.669295Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715728. Ctx: { TraceId: 01jykqvgwt8svyz23z37c2xg0z, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=N2NkODA3OWYtYWI2NDJiOTEtZjUwOGYzZjctOWVlYjFhYg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:46.727639Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715729. Ctx: { TraceId: 01jykqvgyk4akfg14jqn3yv5xq, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OTMzOGMxMGMtMjI0ZTM3ZGYtZWYzZjJmYzQtNDNmNGZjNzI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:46.785031Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715730. Ctx: { TraceId: 01jykqvh0d980yyhrnvwm4rt57, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NTY1MDlhYjAtN2Q1YjMwMTMtOTIzMTU3M2QtY2MzMGIxMjE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:46.841960Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715731. Ctx: { TraceId: 01jykqvh26cwm9dzb1nbn5phwt, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NTQ2NmQ4Mi01NTBiNzAtMzI1NGNmYS1kNjhjNzliNw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:46.916952Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715732. Ctx: { TraceId: 01jykqvh4kcc12k7gtjf0n8df6, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OTBmYzZlMzEtMjIxMDAxMGQtYmNhZWZhY2EtZjhmNDgzMDg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:46.974452Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715733. Ctx: { TraceId: 01jykqvh6a37krpvvn24qer9pa, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjYyODg1NDktMTE5ODJhNzEtZjU3ZGY1NzMtNzY1MDQ5M2I=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:47.031925Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715734. Ctx: { TraceId: 01jykqvh844ckde2z7h06k2983, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OTBmZjhlZDYtODk4Y2ZjMzYtNjAwYzQ4OTAtNzY5N2E0OTk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:47.090492Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715735. Ctx: { TraceId: 01jykqvh9xbfcxp4a1d0fz263q, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OWQwMTMxYWMtM2U5MjI4ZGUtMTUzZjVlMmYtZmE2ZmUzOGM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:47.148642Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715736. Ctx: { TraceId: 01jykqvhbr39kemh2sw0envcsk, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MzQzNmM4NzEtODk0ZjE1ZjctZTMyMDcyM2ItZWJiMDVkM2M=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:47.206218Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715737. Ctx: { TraceId: 01jykqvhdkbj2cjs6vq1vzt4je, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=N2YwZTg4ZjItNzA0NDEyZDEtOWExMWU2LTg1MmI2ZTA3, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:47.264332Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715738. Ctx: { TraceId: 01jykqvhfbc2wsfmtpx862dcm0, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MjQxNzYzOTktM2FiYTAwMjgtNjA1ZWVkNWMtMzBjMGU5YmE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:47.325972Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715739. Ctx: { TraceId: 01jykqvhh66brfmjtpcp33f11h, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=N2JiYWQ0NGUtNTRlNGFjMDktYTNjZjkxOTEtN2JhZTJlMjM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:47.387918Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715740. Ctx: { TraceId: 01jykqvhk4d0chwpc5wpakvk6q, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MzI1ODM5NDEtM2JkYjAyYWQtODAwOTM3ZjQtOWVhY2EyOTA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:47.469921Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715741. Ctx: { TraceId: 01jykqvhn215wrvwbzagr9g4fg, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Yzc5NGRmNzEtNzAzMzdjZmEtYTgzMzllMTUtZTliMGNmYmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:47.528527Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715742. Ctx: { TraceId: 01jykqvhqk3eyb6xc5p7zfgq24, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YzA3NjU0MDAtOWU1MzA1ZWYtZDhhZjQwY2UtN2FkNGJjNjI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:47.586749Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715743. Ctx: { TraceId: 01jykqvhse90eyqagsm1z2fay1, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Yzg3YjE5ZjQtZGJmYzA2NjUtNTkzOGFlMDAtMmVhYTI2MDM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:47.644552Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715744. Ctx: { TraceId: 01jykqvhv81hn6phahxjnk3tqz, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZWEwMmE2MzgtYjQ5MzdkZGItZjBhYzY4YjgtOGQzNTg3NDA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:47.704816Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715745. Ctx: { TraceId: 01jykqvhx244hy0x7ce5pp2xmx, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=N2JlZDg0NGMtNWIxMmQ0NzktM2I0YzZhMDAtOWVhMmZkM2I=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:47.763362Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715746. Ctx: { TraceId: 01jykqvhyy5b1e6bphe19xqme0, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZWE5YWRlOWEtNzMyZmZkZDYtNjY4M2VjOC0xNDM1MWVkNw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:47.820464Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715747. Ctx: { TraceId: 01jykqvj0se4v5pea2m89djb8k, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Y2RlYzE1MjctYWZkZGMzYmYtNzc0ZmU4ODItMzg3MmRjZjU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:47.874935Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715748. Ctx: { TraceId: 01jykqvj2j3q2w0avbwr6r2b78, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NGYyYmFjNC0zYzVhYzA3My0yM2JkODczMy1hYmE2MDU4MA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:47.947660Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715749. Ctx: { TraceId: 01jykqvj49fxgkzp8cbh05x3cr, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDU3Yjg5ZDgtNjZhYzliMDctODgxYzJjZWYtNmM3YWUxOTY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:47.997149Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715750. Ctx: { TraceId: 01jykqvj6jd8j6bp5p57rbsz6g, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MWJkZjc0MGQtOWNhNjNiODYtZjE4NjhkZDUtMmMyODQwNDM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:48.046664Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715751. Ctx: { TraceId: 01jykqvj833y46ab5c26rnh8vd, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTc2NGY5MzItZjg5YTQ0ZmItNThlMTZmYmUtYTNlZWRmNGU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:48.102570Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715752. Ctx: { TraceId: 01jykqvj9mameg6k16nt4zws7p, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZjA1OGIzNzYtZmUxMThjODAtMmNlOTFjNTktNTVhN2YxOTc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:48.149534Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715753. Ctx: { TraceId: 01jykqvjbd7ndtmjbbnsbcck52, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZmQ2ZDViZmMtYWE1ZDVjNjYtNTUyOWZkMS03ZDU1Y2Q1MA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:48.195921Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715754. Ctx: { TraceId: 01jykqvjctfyys5qv0w8pb1hbn, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MzA3OGM1NWEtYzQxZjM3MGQtNDUxODI0ZDMtYTVkZjliYjE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:48.244634Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715755. Ctx: { TraceId: 01jykqvje95evanxbyzvttm2kh, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NDM5MGRlNi02YjQzMWFjZC0zZWNmMDg5Ni03NTRmYjgwZQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:48.301345Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715756. Ctx: { TraceId: 01jykqvjftey0x3s33jf4xhgyb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjU3YjM3ZTYtNDNkNGEyZDQtMmU5OGUyNi1hZWEzYWU1NQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:48.358504Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715757. Ctx: { TraceId: 01jykqvjhk4maxwr7tt89jfb0z, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NWQ3ODJkOWYtM2Q4Nzg3ZTEtMjUyMGJlOTYtNzM2MGVkMmE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:48.412475Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715758. Ctx: { TraceId: 01jykqvjkccpkdq61dgkjb3z72, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MTAyZTE2YTgtZTQ5ODk4Y2EtMmRmZjE1OGEtZmRjZTMwYTg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:48.479578Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715759. Ctx: { TraceId: 01jykqvjn1a3wcx2jx5rtf4b3g, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NTRkMzUzZDktN2VhYWJkN2QtNTQ2MTZlOWEtOGZjODM4ZTU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:49.158308Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715760. Ctx: { TraceId: 01jykqvjxe2rm1xvvksfjc6btr, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Mjk4ZGZlMmUtMWJlZDdiYzItYjdmMGE2ZWMtNTVmMTJkZWY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_external_blobs/unittest >> ExternalBlobsMultipleChannels::SingleChannel [GOOD] Test command err: 2025-06-25T14:28:40.585999Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:28:40.586112Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:28:40.586147Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0014fe/r3tmp/tmpBsejv5/pdisk_1.dat 2025-06-25T14:28:41.122799Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T14:28:41.126003Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:28:41.195210Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:28:41.200229Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750861718115945 != 1750861718115949 2025-06-25T14:28:41.262123Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:28:41.262251Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:28:41.273699Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:28:41.380847Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:41.844025Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:699:2580], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:41.844147Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:708:2585], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:41.844233Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:41.849214Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:28:41.902997Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:28:42.017468Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:713:2588], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:28:42.092917Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:783:2627] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:28:42.881341Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715660. Ctx: { TraceId: 01jykqvc6w9kjrzwkm8hbmb82a, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjU0MzZkM2MtNTJmYmI1M2EtM2Y4MDE2ZGMtYmU5YmM4MGM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:42.999729Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jykqvda2d1ct5121fkb6szk5, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZWJmMTZiNGQtOGEzODgwOS0yZmJkNDU4Ni1kYThiNDUzNA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:43.056408Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jykqvdbx0efk3dryda6kgkes, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NmEyMmZiZDctNDUwYjk2YzQtNjJlNTA1YTktMTkwMzU3OWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:43.114230Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715663. Ctx: { TraceId: 01jykqvddn3zfmxq7eycq13419, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NWFjZmRmZWItMzcwMjI3OWEtZDlkY2Q2NjktZTRlYjQzNDg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:43.170415Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715664. Ctx: { TraceId: 01jykqvdfffchpytry45re7bwh, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Zjg3MTJkMGEtOTI5YWIyNDQtZDA1YjM3ZGMtOGMzZDIzYmE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:43.227551Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715665. Ctx: { TraceId: 01jykqvdh7bwzarhbeje39rrgd, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NWQwYzliMjQtOGZkZmNlYTEtNWFjYmU3MTEtMzNhOTZmZGM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:43.281749Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715666. Ctx: { TraceId: 01jykqvdk02fz56e900s2vpvpx, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YmRkMTRkYi0yNzc2YmQ5MC05ZDgwZDkzYy1kOGZjMWI4Ng==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:43.333167Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715667. Ctx: { TraceId: 01jykqvdmp2nj0341as4j6hk93, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Yjg5ODNjMzUtMTUwYmQ4ZDctNTVjYzBlYmItOTFlNTU1OWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:43.386224Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715668. Ctx: { TraceId: 01jykqvdp9fqj14h74akd4xcs6, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MTRiNmFiZDktZGFhNDE0ODItYjRlNGRhNDYtNWI5ZGRlYjQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:43.447599Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715669. Ctx: { TraceId: 01jykqvdqz6f077xvy3p17s0hy, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YWI1NWE0ODYtYzViZjU1NjMtYjM1N2UzMzMtOGVmMzhiYmU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:43.499912Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715670. Ctx: { TraceId: 01jykqvdsx16r1f48xvtxa7sq9, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MWU0YTY3YmMtMTZmYWUyNjItZDNlOTc4ZTEtYzhmNmE4N2Q=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:43.550797Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715671. Ctx: { TraceId: 01jykqvdvhedb5pw2emfn0cbva, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NDYyZmVkNDMtNGNjZDE4NDktYmM3MDkxYjYtNjIzYjlkZWY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:43.612187Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715672. Ctx: { TraceId: 01jykqvdx4at57gyqqns0trdwv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YmRiMGFkYTctNzM4MWRiMzgtODA3YmI2YTQtMWQxZTdiMzI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:43.677980Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715673. Ctx: { TraceId: 01jykqvdz22n0axgjmvfq4mygj, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NDAxOWJhYWItNzUwNzVjM2ItZjJhYjlmYjUtZmY5MzEwNmY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:43.742807Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715674. Ctx: { TraceId: 01jykqve142nq40e0frzstbm5t, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MmIyNmY4MGItZDU1Njc5YzQtYWJmZWE1NTEtYWUxZWEzN2Q=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:43.809509Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715675. Ctx: { TraceId: 01jykqve35b4576ne0yf9m3fag, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Nzg1NjNmNzgtMzEyMTZhMzQtZTEwYTcyMTMtY2MwODUzYjU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:43.862011Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715676. Ctx: { TraceId: 01jykqve572aqwcmb1tvhxgemy, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YzA5MTA3NDktYTk2MzAzZmQtOTA5NzVjMzItOTQyMGU4NDY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:43.910982Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715677. Ctx: { TraceId: 01jykqve6t3xs3s06gxz4rjept, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTk4N2VhZC0xOGUxNTg2Zi03MTcyODBlZS03Yjk1MThmYw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:43.959416Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715678. Ctx: { TraceId: 01jykqve8bd8wp42xzsn6prcjp, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=M2ZlMzY1ZjktMjcyYTQ0MmEtNTNjZWYyYzQtMzRmOTUwYTI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:44.022763Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715679. Ctx: { TraceId: 01jykqve9x28582advy9zt6q63, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZWJiMDBhOGQtODFlMjY2MzAtNmE3ZTg5YTYtM2JiZTI4YWY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:44.085375Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715680. Ctx: { TraceId: 01jykqvebwc4sc549dz9syvnjn, Database: , DatabaseId: /Root, Sessio ... r.cpp:120: TxId: 281474976715727. Ctx: { TraceId: 01jykqvhgafqn58hscd4k3mznv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MWE5NWFkODYtNGFiMzNjYTktOGU2YTlmZWUtMjE0YzMyM2U=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:47.358724Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715728. Ctx: { TraceId: 01jykqvhjbeycpsf8p6t8gcj7w, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=N2IxMmI5ZS1iMWQwNjdmYi0yODUzZjg3Mi01YjQ4MjRmNw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:47.416610Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715729. Ctx: { TraceId: 01jykqvhm621mw35zhyekr5qg0, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NzVmZjBlYzAtOTBlNGVhOC05N2QwZmE1YS0yZTBjMTBjZA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:47.473945Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715730. Ctx: { TraceId: 01jykqvhnxc8gzkz25fnb6gj7v, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=M2Y2ZDM1YzQtM2EzOTU1ZTktMmZhMGUwMjctYWQ1M2M2ODU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:47.561670Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715731. Ctx: { TraceId: 01jykqvhqr4k85a6wbqhjz28pf, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MTcxN2E5ZjItOTM2ZDVkMmUtNDkxYjIzYTUtNTYyNWMyYmY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:47.626400Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715732. Ctx: { TraceId: 01jykqvhtg9561tj29bcv72twx, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODc3MjZlYzMtZTlhY2U5ZTMtNjA4YjczOGQtOTI1MWEwMzQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:47.695149Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715733. Ctx: { TraceId: 01jykqvhwgb375h21sxzcftnna, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=N2ZkMThkODctY2I5Mzk4Y2MtYjk4NjcxN2ItMTdjY2U1OQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:47.756407Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715734. Ctx: { TraceId: 01jykqvhyn91x7j1ge2r2v4nsr, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YmZiNTNlOWQtN2JlMzU1YWEtZmE3NWExNWQtNTVjOTVlOWM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:47.823988Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715735. Ctx: { TraceId: 01jykqvj0n14f4fgas4wmjektb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OGJlYTQxYTAtYTE1Y2FjNDItN2Q4ZWMwNjctZWEwNTI0MmE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:47.878523Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715736. Ctx: { TraceId: 01jykqvj2n4k9587mf31xsze2z, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YzcyZjc1MzctZDIzNjJmOTYtZTU3NmZiNjYtNjQ5MmI3YTU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:47.939347Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715737. Ctx: { TraceId: 01jykqvj4cdk4h9699v84x29rb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZjdkM2U1Mi03ODVkOTFmMC1iZTdiNDY3Zi1lZTM4MjQwMg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:48.001826Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715738. Ctx: { TraceId: 01jykqvj697383n2xxdacg44zw, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NTIzY2NiYzctNjg0MDQ5ZTgtYjczM2ZiMTAtNmZhM2JkOGY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:48.063196Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715739. Ctx: { TraceId: 01jykqvj88a96t589fsc8mha2t, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=M2IwNjMyMzAtODU3YTgzMTQtM2IzYTE3NGMtYWI1ZTZlZjc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:48.145630Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715740. Ctx: { TraceId: 01jykqvja4akrmb4m619r2pv8e, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NWViNmVlOGItMjBkNjE3OWYtOTk0NTdjNjItNzFmMzIxNzI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:48.206480Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715741. Ctx: { TraceId: 01jykqvjcq5y47xa42tzn57rp4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=N2E2NjgxZTMtODNiYTU2MWUtN2UxMjM3NWItNGNlNjc5MGI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:48.266435Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715742. Ctx: { TraceId: 01jykqvjemcm5fmfa0ypd665yy, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODk4ZjgxZmQtMzhkNTI0MDQtMmExNTFkYmUtYWRmNTI3MmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:48.328881Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715743. Ctx: { TraceId: 01jykqvjgg0e544bre9nxmypmn, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OWQ1MThkYTYtZWRmNjFmNTMtOWE5ODFiOWEtNWE5YTZkMWU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:48.391133Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715744. Ctx: { TraceId: 01jykqvjjf1wgezt1w3pyaeydh, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTMwN2UyLTQwM2I0YjNkLTg5YWRmOWNiLTllOTU2OGM4, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:48.449279Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715745. Ctx: { TraceId: 01jykqvjmdaytrcrrm3mg5j3br, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MzIzMjg0YjItYTcwNWZiYmQtZWM1ZjAxOTctZTlkZTc0NWY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:48.504233Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715746. Ctx: { TraceId: 01jykqvjp72q6jtabyd1bftage, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDY4N2QzYTgtMTlhZDEyMmUtZGQ0MjgyOGEtY2UwMjlhMWU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:48.560275Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715747. Ctx: { TraceId: 01jykqvjqw2ws8gx9gt5nacpa4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YmM3OGRkMWEtNjc5YWMyMjgtYjg0NDM4MmYtNzhkMzFmYmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:48.634016Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715748. Ctx: { TraceId: 01jykqvjsndr9tnng0m8d8zzkr, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OTg2MTQ2Y2MtNjI3YzAxMDYtNTUzNTQ0ZWMtZjFmMTVhNzQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:48.682010Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715749. Ctx: { TraceId: 01jykqvjvy36xnzreexxqvmsz9, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MzIyYzYxOTQtMjk5Nzg2Y2ItZGExM2I4ZWYtN2VmMDlmNjU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:48.731324Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715750. Ctx: { TraceId: 01jykqvjxed09jmt7vxk5qvfmx, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OThlYTNjZTEtMjYyYWVlMzYtNjk2YzE4YzgtNjc0MDhhOTA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:48.783377Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715751. Ctx: { TraceId: 01jykqvjz01faz99fvsygrvs4r, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=M2NkMGQ5MGYtYTRlNjAzMy03MmJmMGUyMS03NDA5YjE0Mg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:48.837253Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715752. Ctx: { TraceId: 01jykqvk0natw3h6etng7yy22q, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTMwMzQ1YmItNTcyZDAxYTUtZTNmMDlmM2UtZjE4NmFlODM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:48.891991Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715753. Ctx: { TraceId: 01jykqvk2c27h73mkj56xetvx6, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MTI5Yzg1NmItYjE2OTA4OTAtMzU4OTUxMGItNzc2Yzg2YzE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:48.932538Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715754. Ctx: { TraceId: 01jykqvk4034zkr3nkx9cd7cmh, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MjE2OGNhMzAtODcyMjk3NmUtM2M5OTAzMzctZTI2Zjk4OWY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:48.989438Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715755. Ctx: { TraceId: 01jykqvk584zcwnc1833f7qepw, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjAzYTg4ZWMtZDEzMTJiNC03MWZkODI0ZC1lMGY4ZjE3NQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:49.046978Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715756. Ctx: { TraceId: 01jykqvk73frcahqw7hsws2nq0, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NDcwNmYzYTItNjM0ZmIzMzYtY2NkODVlZS1hN2Q5YjI0Zg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:49.109795Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715757. Ctx: { TraceId: 01jykqvk8x9tt6e48g87zah6az, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OTlhY2ZiYTYtMWJiNWU1ZWItNDg5ZGY3NDUtZDNiMzE2MWE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:49.195276Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715758. Ctx: { TraceId: 01jykqvkawa3ntsvhwnf5ye158, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MzBiMjQ3ODMtNjVhMGRlNTYtNDhhMGM3MDItZWVmMDEwNTg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:49.250333Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715759. Ctx: { TraceId: 01jykqvkdh3q45pse35sy2vydq, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZmU0YzcyMDEtODYxMzlhZGUtYmUxODcwZDAtNWM5MTI1ZDk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:49.390748Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715760. Ctx: { TraceId: 01jykqvkfkar5j9hzgjrz30sf4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjUxOTc4MDUtMWQ0MTFjMjktZjY2NjY3ZGYtNjZhNTFjOGY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root >> TTicketParserTest::AuthorizationRetryError >> TTicketParserTest::TicketFromCertificateWithValidationGood [GOOD] >> TTicketParserTest::TicketFromCertificateWithValidationDifferentIssuersGood >> TTicketParserTest::AccessServiceAuthenticationOk [GOOD] >> TTicketParserTest::AccessServiceAuthenticationApiKeyOk >> ExternalBlobsMultipleChannels::WithCompaction [GOOD] |75.8%| [TS] {asan, default-linux-x86_64, release} ydb/core/driver_lib/run/ut/unittest >> ExternalBlobsMultipleChannels::ExtBlobsMultipleColumns [GOOD] >> THiveTest::TestHiveBalancerWithImmovableTablets [GOOD] >> THiveTest::TestHiveBalancerHighUsage >> TTxDataShardMiniKQL::TableStatsHistograms [GOOD] |75.8%| [TA] $(B)/ydb/core/driver_lib/run/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpScripting::StreamExecuteYqlScriptData >> KqpYql::ColumnNameConflict >> TTicketParserTest::LoginGoodWithGroups [GOOD] >> TTicketParserTest::LoginRefreshGroupsGood >> TTicketParserTest::AuthenticationUnsupported [GOOD] >> TTicketParserTest::AuthenticationUnknown ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_external_blobs/unittest >> ExternalBlobsMultipleChannels::WithCompaction [GOOD] Test command err: 2025-06-25T14:28:43.208908Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:28:43.209102Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:28:43.209172Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0014e6/r3tmp/tmpNzcygx/pdisk_1.dat 2025-06-25T14:28:43.522677Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T14:28:43.530476Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:28:43.564248Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:28:43.572304Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750861720625402 != 1750861720625406 2025-06-25T14:28:43.618498Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:28:43.618630Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:28:43.630318Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:28:43.712506Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:44.075669Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:699:2580], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:44.075814Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:708:2585], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:44.075895Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:44.080494Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:28:44.130771Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:28:44.246154Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:713:2588], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:28:44.311733Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:783:2627] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:28:44.562279Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715660. Ctx: { TraceId: 01jykqved98102ry727t4y8d33, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Mzc5YTlmNi1hYzg3MDZiLTI2Y2QzMjUyLTJhN2M4MTQw, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:44.630986Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jykqvex66ez0kdnrp2keq2ky, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjYyYTk4ZGUtMTg0YTVjZGYtMzY0NWFlNDktY2ViN2RmOTA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:44.688016Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jykqvez6fsghbfzt9wa227jq, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODFmY2I5MzktNmYxZGNhYmItMjlkNTNhYTktN2M1NThiOTI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:44.742980Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715663. Ctx: { TraceId: 01jykqvf0zfmte2jvrrhkka9bv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDZmMjNjYTctYTI1ZGFlYzctODI1NjJhYTItZWUyZjcyZWY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:44.804031Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715664. Ctx: { TraceId: 01jykqvf2nf7r5dd75f8q65jcv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OTBhOTM5OTgtMzFiOWRiZjQtODgwZjY4YTYtNDYzYTUxNWY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:44.862382Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715665. Ctx: { TraceId: 01jykqvf4mde1qk80h3b3xhf5t, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTc5ZDk2M2EtZTJmYTk5ZDktMjE3ZmFjZjMtNjU5ZDYxMWU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:44.921007Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715666. Ctx: { TraceId: 01jykqvf6e8sr61dd1frxk1zs5, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTc0OWMyNGItMzEwNTYzOGQtZjVhZTEwNGItNTUwZTFiMTU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:44.971316Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715667. Ctx: { TraceId: 01jykqvf89ftahnrdkfv80qean, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODU2M2FmZC00ODcxNjVmNy00Nzg0OWM2MC1lN2UzMmY4NA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:45.026846Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715668. Ctx: { TraceId: 01jykqvf9sdd2kd31qtfp7p6hx, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTZhN2Y0OGItMTgwZDJjNTctMmU0OTRhNmQtOTY2N2VjYjA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:45.084270Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715669. Ctx: { TraceId: 01jykqvfbjbbwp6fdvc20jxv0j, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NDk2ZmVjMzUtZmY3NjI3MDMtNmFiYzM4ZTUtOGVmZGI4YTQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:45.140126Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715670. Ctx: { TraceId: 01jykqvfdc50aqpp9xt1g6ny07, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MjM4YzAwNC1mMjNmM2NmYy1hZmIwZTBjMC05Y2YyMDA4Yg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:45.195950Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715671. Ctx: { TraceId: 01jykqvff44sxa378kwkakz6pc, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=N2Y0YmFhNzMtNTlhMDk3MjUtYjFlZDJlNjYtOGE5NDU0OGQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:45.270823Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715672. Ctx: { TraceId: 01jykqvfgy34jrev2cc2jn70rh, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OTkwZWY1OTctNGY0YWIyZmItZTBhZTVkMWItNjg2NjE4NTQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:45.328248Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715673. Ctx: { TraceId: 01jykqvfk7btwvvjcy6c8kjs2q, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDVkYzBlYWEtODczMTNiMjEtM2QwNTAyMzgtM2Y3YzM2MzY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:45.382845Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715674. Ctx: { TraceId: 01jykqvfmzawvgb0fd3px0akvn, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTZjZjUzYWYtNmYxNzFlMGYtNWZiMzNmYzgtYTMxYWUzYTk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:45.448368Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715675. Ctx: { TraceId: 01jykqvfpqfn748zmk0yv5nhs8, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MjRhZTdkNzMtZDVhY2FhOTAtNmJjMjU1MWYtNzJiZTBjMDQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:45.507274Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715676. Ctx: { TraceId: 01jykqvfrqbap644p9xdqxchat, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YmJlODBhNGEtYmI0MjgwYjEtZjExZWU5OGMtZjkwMmYzNw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:45.566858Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715677. Ctx: { TraceId: 01jykqvftj65p1qw0r85bx46rf, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=M2MwZWM5ZTYtMzVhZDRlNTUtZjJiNzM1ODMtODNhMjVhMzg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:45.628807Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715678. Ctx: { TraceId: 01jykqvfweecpbwsdrdcpe3bng, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OGNiYjgwMDctOGQwMDYxMjYtZWI0NGM2OGYtNWI3NTJlMzI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:45.702786Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715679. Ctx: { TraceId: 01jykqvfyddmzsbc5vrebv5g4e, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Nzg1YjE5ODYtNTNmNTI3MjQtMTI3ZTU2ZDUtMjQ0ZDBhM2Y=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:45.767739Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715680. Ctx: { TraceId: 01jykqvg0rfb448b0nwnc76ehg, Database: , DatabaseId: /Root, SessionId: ... : 01jykqvjzpaymzdsxf0jenp5dx, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NGM2NjZkZjItMzAyNTIwZGUtZWIxOGJlNDktNjNkNjY4NTg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:48.872635Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715729. Ctx: { TraceId: 01jykqvk1q3td51vey94e93ye1, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NjE3ZjQzZDMtZGU5ZDBkMGYtOWQ1ZTYwN2MtNDEyMWQ2N2I=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:48.926447Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715730. Ctx: { TraceId: 01jykqvk3s9fpda78xfmav0mzh, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MjgxYmI3NC0zZmE5YjdkZi01MDYyNGFhNy04Y2I2YjFjZA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:48.989808Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715731. Ctx: { TraceId: 01jykqvk5e9nppjwsztk2zm39g, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDU2MWFmZTEtNmZiY2ZhZi05NGY3YWU5NC1mZDRiYzE4Yg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:49.057596Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715732. Ctx: { TraceId: 01jykqvk7e8jrbe1dg4drvw9xb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Zjg2MjY3NjUtYjVmMjNmZTMtYTQyYzA4OC0zZDAwZmI0, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:49.126361Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715733. Ctx: { TraceId: 01jykqvk9jf7z6e2rmy2cmy2d7, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjljOGYxNDEtMmQ5MjZhYTItNjE5MWY4ODQtOWUyNWYzNjQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:49.231046Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715734. Ctx: { TraceId: 01jykqvkbq3ekee1dm4ktsb6bz, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OTQ1ODExMDYtM2VkZDU0MWYtYTA0NWFiY2MtNTJiZmU5Y2E=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:49.298711Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715735. Ctx: { TraceId: 01jykqvkez5kmn6j8aevaj58vw, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Yzc0YjBkNGYtZDU3ZWFiMDYtN2RhOTVlMzgtMmQzZmNjYjQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:49.353841Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715736. Ctx: { TraceId: 01jykqvkh243f8k9g839yqk54z, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NWE1OWEzZjQtNmUxMTg1YmUtNTg0ZTU5MWEtOGY1YWI5ZQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:49.406447Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715737. Ctx: { TraceId: 01jykqvkjtd218q1er4cctga6m, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NzY4ZTg4MGUtMmNiOGNhNTctYzI1ZTgxNDYtNWY1Zjk3NWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:49.461459Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715738. Ctx: { TraceId: 01jykqvkmg2k9ajpdbnr5sjp6j, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZWY2MDdjNGUtNGY3NTM4Y2UtODVlYmNlMmQtODVmMWE4Yjc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:49.516081Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715739. Ctx: { TraceId: 01jykqvkp40gz5enfq2m49y7an, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZmJmOTE3M2EtNDUxNTQ3NDMtZjBlYjhhNGQtYTE4M2Q0ZWU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:49.580142Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715740. Ctx: { TraceId: 01jykqvkqw363jkwfkv722eyx5, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MWJmMmUzZTItYTlkZDU2M2ItOGI5NDBkOGEtNzdlNTZmMTc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:49.643691Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715741. Ctx: { TraceId: 01jykqvksw38ntne24ndvs14mh, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NjE3ODRiZjAtODE1MDg1M2MtNjVhODY0YzMtMjM5NGY5MDA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:49.699882Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715742. Ctx: { TraceId: 01jykqvkvw1ysf0dzq8yrh9784, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MzYzOGRiN2QtNjNjYjNkM2QtYjM4NDJiOWYtYmEyM2NhYTE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:49.768109Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715743. Ctx: { TraceId: 01jykqvkxk3j7f37jgjmn2jb8m, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NjQ0OTQxNjgtNzgwOTI4OTYtYTgwYTE0YzEtYWI4YmJhMDQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:49.871814Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715744. Ctx: { TraceId: 01jykqvkzv23gsx99q08hfbypz, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OWNiNmRjNWEtMzU0ZjE3MTYtNTU5NTc2NjItYjgzNjdiZTk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:49.937760Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715745. Ctx: { TraceId: 01jykqvm334hkn0zmgwkrn3stw, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NjBiMWY4NTAtZDJmN2FhMjMtMzc3YWVmMTgtZmU0OTMyMmI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:50.001950Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715746. Ctx: { TraceId: 01jykqvm511tentyvsfy8caxh8, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Mjc1YjY4MWEtZTgyZDVlMDktOTVmZjViMDItZDRmY2Y2YWE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:50.064692Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715747. Ctx: { TraceId: 01jykqvm72fa2802kcm263ypkk, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MWMzYzE5YjgtZDRjOTBkNzAtZDkwNWI4NDYtM2NmZjdkMmY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:50.128194Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715748. Ctx: { TraceId: 01jykqvm905p6zjejbfgj88mhb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YzFjNjQyOWEtYmJlZDNlYjAtODM5OWZjYjItN2NjOTA1YzI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:50.191285Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715749. Ctx: { TraceId: 01jykqvmb01wgnxn42j5s41e22, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YzYzOGE0YjctNjA2NmViM2EtMjI0YzYxNGYtZjA2NjE0MTk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:50.261280Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715750. Ctx: { TraceId: 01jykqvmczcwx26daz0rsd2qq0, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTk3NmNhNzItNDRkZTcwMjAtNzFkMzQzYmMtYTM2YTQ2ZTU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:50.335582Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715751. Ctx: { TraceId: 01jykqvmf91z6sf5qff0rrheha, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MzYzNmYwNzQtZGU3MWJlNGItYjc4NzQ0NTktY2I4YTg5YmU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:50.399690Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715752. Ctx: { TraceId: 01jykqvmhh0gk9cz4n3pye3rg9, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODFjZjY3NWUtM2I2NjYxNWYtMjQ2MTNiMGQtZjM5ZWFmNzI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:50.460736Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715753. Ctx: { TraceId: 01jykqvmkf0tqr82ey2s600qfp, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NmYyYWMyYzYtNGJiZmNlNDEtYjdiOWU3NzItNDc0NzMyMDk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:50.556528Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715754. Ctx: { TraceId: 01jykqvmne3d4p2wh6y0k3kws6, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OTcyOWZhOS05YzE3YzMzYy03ODc0ZGNiNi1jYzcwN2M2MQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:50.623737Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715755. Ctx: { TraceId: 01jykqvmre2nb3n6svm8jtqzjt, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YmJjZmM4YWQtYTdhYjA5MDAtN2RlYzk3NGItYmRjMDU0OTI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:50.690705Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715756. Ctx: { TraceId: 01jykqvmtj049nwpp42jawz3qg, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OGJkYWZhZTAtODQzN2ZjOTMtMTYxYTk3N2ItNGUzMGYyN2U=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:50.756331Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715757. Ctx: { TraceId: 01jykqvmwkbsyewzr2btjchpay, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OGVhY2ZmY2ItMzM3N2E4OTctYzQxNWZkOGQtYzJiZjE0OWY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:50.811451Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715758. Ctx: { TraceId: 01jykqvmyp1cdsb84vkf0v8h3z, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Y2ZlNmUwNzMtNzBiZmVkZjMtM2ZmZTE5Y2QtM2UzYzQwZjM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:50.867765Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715759. Ctx: { TraceId: 01jykqvn0c1apt8spskcp6e84f, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZWFkMzliMDYtMmYyMTAyZmUtZTgyMzQ0MzAtNGIyNDk5NmY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:50.904299Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 100:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:171) 2025-06-25T14:28:51.308498Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715760. Ctx: { TraceId: 01jykqvnb576n1nzm5d7m9q7cn, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NWJjNGU4ZjAtODhkNzVhYmYtNmM0ZTBlMWEtNThkOTQ4OWE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root >> TTicketParserTest::BulkAuthorizationWithRequiredPermissions [GOOD] >> TTicketParserTest::BulkAuthorizationWithUserAccount >> AsyncIndexChangeExchange::SenderShouldShakeHandsAfterAddingIndex [GOOD] >> AsyncIndexChangeExchange::ShouldDeliverChangesOnFreshTable ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_minikql/unittest >> TTxDataShardMiniKQL::TableStatsHistograms [GOOD] Test command err: 2025-06-25T14:28:23.803687Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:28:23.803745Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:28:23.808097Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:112:2142], Recipient [1:135:2156]: NKikimr::TEvTablet::TEvBoot 2025-06-25T14:28:23.917816Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:112:2142], Recipient [1:135:2156]: NKikimr::TEvTablet::TEvRestored 2025-06-25T14:28:23.918255Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 9437184 actor [1:135:2156] 2025-06-25T14:28:23.936467Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T14:28:24.051187Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:112:2142], Recipient [1:135:2156]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-25T14:28:24.274277Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T14:28:24.274482Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T14:28:24.276091Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 9437184 2025-06-25T14:28:24.276207Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 9437184 2025-06-25T14:28:24.276258Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 9437184 2025-06-25T14:28:24.290234Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T14:28:24.290607Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T14:28:24.290743Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 9437184 persisting started state actor id [1:204:2156] in generation 2 2025-06-25T14:28:24.509504Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T14:28:24.550263Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 9437184 2025-06-25T14:28:24.559265Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 9437184 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T14:28:24.559544Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 9437184, actorId: [1:219:2215] 2025-06-25T14:28:24.559599Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 9437184 2025-06-25T14:28:24.559641Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 9437184, state: WaitScheme 2025-06-25T14:28:24.559675Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T14:28:24.560064Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:135:2156], Recipient [1:135:2156]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T14:28:24.560112Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T14:28:24.591060Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 9437184 2025-06-25T14:28:24.591177Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 9437184 2025-06-25T14:28:24.591252Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-25T14:28:24.621444Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:28:24.621524Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 9437184 2025-06-25T14:28:24.621568Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437184 has no attached operations 2025-06-25T14:28:24.621620Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 9437184 2025-06-25T14:28:24.621676Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 9437184 TxInFly 0 2025-06-25T14:28:24.637936Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-25T14:28:24.638151Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:215:2212], Recipient [1:135:2156]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:28:24.638209Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T14:28:24.638276Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:213:2211], serverId# [1:215:2212], sessionId# [0:0:0] 2025-06-25T14:28:24.658819Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:103:2136], Recipient [1:135:2156]: NKikimrTxDataShard.TEvProposeTransaction TxKind: TX_KIND_SCHEME SourceDeprecated { RawX1: 103 RawX2: 4294969432 } TxBody: "\nI\n\006table1\020\r\032\t\n\003key\030\002 \"\032\014\n\005value\030\200$ 8\032\n\n\004uint\030\002 9(\":\010Z\006\010\000\030\000(\000J\014/Root/table1" TxId: 1 ExecLevel: 0 Flags: 0 SchemeShardId: 4200 ProcessingParams { } 2025-06-25T14:28:24.658924Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-25T14:28:24.659043Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-25T14:28:24.679636Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit CheckSchemeTx 2025-06-25T14:28:24.679761Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 9437184 txId 1 ssId 4200 seqNo 0:0 2025-06-25T14:28:24.691254Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 1 at tablet 9437184 2025-06-25T14:28:24.691352Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is ExecutedNoMoreRestarts 2025-06-25T14:28:24.691401Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit CheckSchemeTx 2025-06-25T14:28:24.691441Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit StoreSchemeTx 2025-06-25T14:28:24.691481Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit StoreSchemeTx 2025-06-25T14:28:24.691762Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayCompleteNoMoreRestarts 2025-06-25T14:28:24.691788Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit StoreSchemeTx 2025-06-25T14:28:24.691846Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit FinishPropose 2025-06-25T14:28:24.691892Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit FinishPropose 2025-06-25T14:28:24.691958Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayComplete 2025-06-25T14:28:24.691987Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit FinishPropose 2025-06-25T14:28:24.692020Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit WaitForPlan 2025-06-25T14:28:24.692046Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit WaitForPlan 2025-06-25T14:28:24.692065Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:1] at 9437184 is not ready to execute on unit WaitForPlan 2025-06-25T14:28:24.714497Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 9437184 2025-06-25T14:28:24.714566Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit StoreSchemeTx 2025-06-25T14:28:24.714600Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit FinishPropose 2025-06-25T14:28:24.714669Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 1 at tablet 9437184 send to client, exec latency: 0 ms, propose latency: 1 ms, status: PREPARED 2025-06-25T14:28:24.728929Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 9437184 not sending time cast registration request in state WaitScheme 2025-06-25T14:28:24.740706Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:225:2221], Recipient [1:135:2156]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:28:24.740798Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T14:28:24.740873Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:224:2220], serverId# [1:225:2221], sessionId# [0:0:0] 2025-06-25T14:28:24.741057Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287424, Sender [1:103:2136], Recipient [1:135:2156]: {TEvPlanStep step# 1000001 MediatorId# 0 TabletID 9437184} 2025-06-25T14:28:24.741103Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3150: StateWork, processing event TEvTxProcessing::TEvPlanStep 2025-06-25T14:28:24.741275Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1790: Trying to execute [1000001:1] at 9437184 on unit WaitForPlan 2025-06-25T14:28:24.741326Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1805: Execution status for [1000001:1] at 9437184 is Executed 2025-06-25T14:28:24.741376Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000001:1] at 9437184 executing on unit WaitForPlan 2025-06-25T14:28:24.741412Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000001:1] at 9437184 to execution unit PlanQueue 2025-06-25T14:28:24.748704Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 1 at step 1000001 at tablet 9437184 { Transactions { TxId: 1 AckTo { RawX1: 103 RawX2: 4294969432 } } Step: 1000001 MediatorID: 0 TabletID: 9437184 } 2025-06-25T14:28:24.763362Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T14:28:24.763726Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:135:2156], Recipient [1:135:2156]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T14:28:24.763782Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T14:28:24.763851Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-25T14:28:24.763896Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 1 2025-06-25T14:28:24.763956Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437184 2025-06-25T14:28:24.764000Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000001:1] in PlanQueue unit at 9437184 2025-06-25T14:28:24.764041Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [100000 ... pp:1862: Execution status for [0:1002] at 9437184 is Executed 2025-06-25T14:28:51.909161Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1002] at 9437184 executing on unit BuildAndWaitDependencies 2025-06-25T14:28:51.909181Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1002] at 9437184 to execution unit ExecuteDataTx 2025-06-25T14:28:51.909203Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1002] at 9437184 on unit ExecuteDataTx 2025-06-25T14:28:51.909239Z node 3 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 9437184 CompleteEdge# v1000001/1 IncompleteEdge# v{min} UnprotectedReadEdge# v{min} ImmediateWriteEdge# v1000001/18446744073709551615 ImmediateWriteEdgeReplied# v1000001/18446744073709551615 2025-06-25T14:28:51.909566Z node 3 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:306: Executed operation [0:1002] at tablet 9437184 with status COMPLETE 2025-06-25T14:28:51.909614Z node 3 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:312: Datashard execution counters for [0:1002] at 9437184: {NSelectRow: 0, NSelectRange: 0, NUpdateRow: 1, NEraseRow: 0, SelectRowRows: 0, SelectRowBytes: 0, SelectRangeRows: 0, SelectRangeBytes: 0, UpdateRowBytes: 109, EraseRowBytes: 0, SelectRangeDeletedRowSkips: 0, InvisibleRowSkips: 0} 2025-06-25T14:28:51.909666Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1002] at 9437184 is ExecutedNoMoreRestarts 2025-06-25T14:28:51.909694Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1002] at 9437184 executing on unit ExecuteDataTx 2025-06-25T14:28:51.909720Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1002] at 9437184 to execution unit FinishPropose 2025-06-25T14:28:51.909747Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1002] at 9437184 on unit FinishPropose 2025-06-25T14:28:51.909778Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1002] at 9437184 is DelayComplete 2025-06-25T14:28:51.909800Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1002] at 9437184 executing on unit FinishPropose 2025-06-25T14:28:51.909822Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1002] at 9437184 to execution unit CompletedOperations 2025-06-25T14:28:51.909846Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1002] at 9437184 on unit CompletedOperations 2025-06-25T14:28:51.909887Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1002] at 9437184 is Executed 2025-06-25T14:28:51.909909Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1002] at 9437184 executing on unit CompletedOperations 2025-06-25T14:28:51.909932Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:1002] at 9437184 has finished 2025-06-25T14:28:51.925614Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 9437184 2025-06-25T14:28:51.925669Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1002] at 9437184 on unit FinishPropose 2025-06-25T14:28:51.925705Z node 3 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 1002 at tablet 9437184 send to client, exec latency: 0 ms, propose latency: 1 ms, status: COMPLETE 2025-06-25T14:28:51.925775Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 .2025-06-25T14:28:51.929626Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269551617, Sender [3:103:2136], Recipient [3:239:2230]: NKikimrTxDataShard.TEvGetShardState Source { RawX1: 103 RawX2: 12884904024 } 2025-06-25T14:28:51.929676Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3135: StateWork, processing event TEvDataShard::TEvGetShardState 2025-06-25T14:28:51.930457Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [3:4554:6468], Recipient [3:239:2230]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:28:51.930494Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T14:28:51.930527Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [3:4553:6467], serverId# [3:4554:6468], sessionId# [0:0:0] 2025-06-25T14:28:51.930800Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [3:103:2136], Recipient [3:239:2230]: NKikimrTxDataShard.TEvProposeTransaction TxKind: TX_KIND_DATA SourceDeprecated { RawX1: 103 RawX2: 12884904024 } TxBody: "\032\265\002\037\000\005\205\n\205\000\205\004?\000\205\002\202\0041\034MyReads MyWrites\205\004?\000\206\202\024Reply\024Write?\000?\000 AllReads\030MyKeys\014Run4ShardsForRead4ShardsToWrite\005?\000\005?\004?\014\005?\002)\211\006\202\203\005\004\213\002\203\004\205\002\203\001H\01056$UpdateRow\000\003?\016 h\020\000\000\000\000\000\000\r\000\000\000\000\000\000\000\013?\022\003?\020\235\017\001\005?\026\003?\024\322ImInShard111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111\001\007\002\000\003\005?\010?\014\006\002?\006?$\000\003?\014?\014\037/ \0018\000" TxId: 1003 ExecLevel: 0 Flags: 0 2025-06-25T14:28:51.930838Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-25T14:28:51.930903Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-25T14:28:51.931365Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1003] at 9437184 on unit CheckDataTx 2025-06-25T14:28:51.931416Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1003] at 9437184 is Executed 2025-06-25T14:28:51.931440Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1003] at 9437184 executing on unit CheckDataTx 2025-06-25T14:28:51.931465Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1003] at 9437184 to execution unit BuildAndWaitDependencies 2025-06-25T14:28:51.931489Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1003] at 9437184 on unit BuildAndWaitDependencies 2025-06-25T14:28:51.931539Z node 3 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 9437184 CompleteEdge# v1000001/1 IncompleteEdge# v{min} UnprotectedReadEdge# v{min} ImmediateWriteEdge# v1000001/18446744073709551615 ImmediateWriteEdgeReplied# v1000001/18446744073709551615 2025-06-25T14:28:51.931584Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:1003] at 9437184 2025-06-25T14:28:51.931613Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1003] at 9437184 is Executed 2025-06-25T14:28:51.931634Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1003] at 9437184 executing on unit BuildAndWaitDependencies 2025-06-25T14:28:51.931653Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1003] at 9437184 to execution unit ExecuteDataTx 2025-06-25T14:28:51.931674Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1003] at 9437184 on unit ExecuteDataTx 2025-06-25T14:28:51.931708Z node 3 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 9437184 CompleteEdge# v1000001/1 IncompleteEdge# v{min} UnprotectedReadEdge# v{min} ImmediateWriteEdge# v1000001/18446744073709551615 ImmediateWriteEdgeReplied# v1000001/18446744073709551615 2025-06-25T14:28:51.931998Z node 3 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:306: Executed operation [0:1003] at tablet 9437184 with status COMPLETE 2025-06-25T14:28:51.932040Z node 3 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:312: Datashard execution counters for [0:1003] at 9437184: {NSelectRow: 0, NSelectRange: 0, NUpdateRow: 1, NEraseRow: 0, SelectRowRows: 0, SelectRowBytes: 0, SelectRangeRows: 0, SelectRangeBytes: 0, UpdateRowBytes: 109, EraseRowBytes: 0, SelectRangeDeletedRowSkips: 0, InvisibleRowSkips: 0} 2025-06-25T14:28:51.932086Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1003] at 9437184 is ExecutedNoMoreRestarts 2025-06-25T14:28:51.932108Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1003] at 9437184 executing on unit ExecuteDataTx 2025-06-25T14:28:51.932135Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1003] at 9437184 to execution unit FinishPropose 2025-06-25T14:28:51.932158Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1003] at 9437184 on unit FinishPropose 2025-06-25T14:28:51.932188Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1003] at 9437184 is DelayComplete 2025-06-25T14:28:51.932209Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1003] at 9437184 executing on unit FinishPropose 2025-06-25T14:28:51.932230Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1003] at 9437184 to execution unit CompletedOperations 2025-06-25T14:28:51.932251Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1003] at 9437184 on unit CompletedOperations 2025-06-25T14:28:51.932289Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1003] at 9437184 is Executed 2025-06-25T14:28:51.932337Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1003] at 9437184 executing on unit CompletedOperations 2025-06-25T14:28:51.932360Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:1003] at 9437184 has finished 2025-06-25T14:28:51.942235Z node 3 :TX_DATASHARD DEBUG: datashard__compaction.cpp:203: CompactionComplete of tablet# 9437184, table# 1001, finished edge# 0, ts 1970-01-01T00:00:00.000000Z 2025-06-25T14:28:51.942283Z node 3 :TX_DATASHARD DEBUG: datashard__compaction.cpp:240: ReplyCompactionWaiters of tablet# 9437184, table# 1001, finished edge# 0, front# 0 2025-06-25T14:28:51.943427Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 9437184 2025-06-25T14:28:51.943467Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1003] at 9437184 on unit FinishPropose 2025-06-25T14:28:51.943501Z node 3 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 1003 at tablet 9437184 send to client, exec latency: 0 ms, propose latency: 3 ms, status: COMPLETE 2025-06-25T14:28:51.943579Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T14:28:51.946585Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268828683, Sender [3:236:2229], Recipient [3:239:2230]: NKikimr::TEvTablet::TEvFollowerGcApplied .2025-06-25T14:28:51.949083Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [3:4568:6481], Recipient [3:239:2230]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:28:51.949128Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T14:28:51.949170Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [3:4567:6480], serverId# [3:4568:6481], sessionId# [0:0:0] 2025-06-25T14:28:51.949495Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553160, Sender [3:4566:6479], Recipient [3:239:2230]: NKikimrTxDataShard.TEvGetTableStats TableId: 13 { InMemSize: 0 LastAccessTime: 1719 LastUpdateTime: 1719 } ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_external_blobs/unittest >> ExternalBlobsMultipleChannels::ExtBlobsMultipleColumns [GOOD] Test command err: 2025-06-25T14:28:43.566087Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:28:43.566204Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:28:43.566253Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0014cf/r3tmp/tmp3qqlkJ/pdisk_1.dat 2025-06-25T14:28:43.837415Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T14:28:43.841013Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:28:43.875277Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:28:43.879203Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750861721119559 != 1750861721119563 2025-06-25T14:28:43.924786Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:28:43.924937Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:28:43.936219Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:28:44.014952Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:44.345159Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:699:2580], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:44.345261Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:708:2585], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:44.345324Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:44.349850Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:28:44.400728Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:28:44.514221Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:713:2588], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:28:44.602099Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:783:2627] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:28:44.894511Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715660. Ctx: { TraceId: 01jykqvenq75b035j3z7rayk87, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjFlZmY0M2YtYjU3OTg4M2ItZjA5ZDJkY2EtMzczYmM3MzA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:44.970252Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jykqvf7a9fvt4sbxqzebr1tc, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MmI0MDdiY2QtNGEyM2EzMWQtNTUyMmZiZDgtYWU5NmE1YWE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:45.030802Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jykqvf9f8vvy36daws4t9rd6, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODc4ODIxNzQtYWFkM2JkYTYtNGJlNTI3Mi0zNmY1YTMzMQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:45.088421Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715663. Ctx: { TraceId: 01jykqvfbb69jkmq7x6f6z2pb1, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YWVhM2I2ZjktNjdiZGU2ZTctNjg5MmRlNGItNzEwYTM2NGU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:45.146297Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715664. Ctx: { TraceId: 01jykqvfd4cv5ks0h9f1zhg86n, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NjE5NzZkODgtZTNiZDY4YWQtNzk5NTdhMjMtYjY1NDg4ZDM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:45.216448Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715665. Ctx: { TraceId: 01jykqvff060a7dc0rv0n36j9d, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MjRiZjBmZjgtMjRhZjg0MDMtYWUxZTFiNzItM2U5Yzk3Nzc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:45.286445Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715666. Ctx: { TraceId: 01jykqvfh6aj0jw5ab05y6qkf8, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjY1NTM1MjUtMzUyYjZhNWUtNjM2ZjA5ZWEtNzdlZmM2NmI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:45.347851Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715667. Ctx: { TraceId: 01jykqvfkbdj4aq0t6fpmm9jxj, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MWJmNGEzOTEtOWMwMjYwNzktMzc1YjVjNGMtNzk0NDMzODY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:45.408147Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715668. Ctx: { TraceId: 01jykqvfn9b9j2nmmb55b1x40m, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZGE3YWRhYTUtN2FiYzdlMGItYTczMGIwMDQtNzc1NDE4OGM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:45.465199Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715669. Ctx: { TraceId: 01jykqvfq57182x1mys2a5cy19, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NDA2MzJhNjMtZDc1M2Y0MS03MzBiNGNiLThiMzkyYjdm, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:45.524453Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715670. Ctx: { TraceId: 01jykqvfry9bzy6bkpx7zfvneq, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YzNiOTY1YzAtN2E5ZjkzNWUtYzUyMmU1YTktNThlZjRmZmI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:45.587426Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715671. Ctx: { TraceId: 01jykqvftt158sa302742xz3xe, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZmVkMTAxNzAtM2U5ZWRlY2QtZTU3NTE0YzAtZWFhMDAyNWM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:45.653208Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715672. Ctx: { TraceId: 01jykqvfws7mbndb0v02yn39pm, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NjNhMzRmNjAtMzEzNTczNDUtYTNhNjE1OTItZWNmODVmZTc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:45.725811Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715673. Ctx: { TraceId: 01jykqvfyvba428rne71dheypy, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDYwNTY2NS0xODhjNDE1NC04ZTlkMTI1Mi05NGFhNjIyZA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:45.789771Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715674. Ctx: { TraceId: 01jykqvg1358jmf7983xmrns6q, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MTBkZGI5NjAtOTE1M2Y3M2UtZDliNjBkYTItYTc3ZDE1OTg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:45.853455Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715675. Ctx: { TraceId: 01jykqvg347436epqg760afs8j, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODQyYzM4ZTktYWI2YmYxYTYtNTg4ZjVjNTUtYjQ5MTQwOWY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:45.919784Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715676. Ctx: { TraceId: 01jykqvg53b4p5pap19knq5gds, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=N2U0NzQ2NjMtNDFiNTJjZjctMTg5OTJhMjUtMjM4YzMxMTU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:45.991445Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715677. Ctx: { TraceId: 01jykqvg74abffy8afebskr1f4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=N2RlN2FhOWMtOWUzNjA1YjAtMjU4MTJhOS05M2ZiNzVkMQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:46.055482Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715678. Ctx: { TraceId: 01jykqvg9dc25h9k6ywpkyagkg, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YzJiZGMxOGQtNTJiZDhkMDMtNDhiMTNhNy1hYjQ1NmMyNA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:46.120960Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715679. Ctx: { TraceId: 01jykqvgbeb0je9sqz6bn4565j, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDBjYTk0MTgtZWJjNTc0N2QtOTM3ZmExMzgtMTA1M2YxMmI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:46.190727Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715680. Ctx: { TraceId: 01jykqvgdfb8ps9ej110ra9wd1, Database: , DatabaseId: /Root, SessionId: ... p:120: TxId: 281474976715727. Ctx: { TraceId: 01jykqvkjr2q0dmpt5hbg0qgby, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MzhhODc5NmQtZDA2ZTdkM2QtZGU4ZGUwOGYtZmE4OGY2MjQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:49.508512Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715728. Ctx: { TraceId: 01jykqvkn72nq6d0rj7etqdevc, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OWQ3N2VmMWMtZmI3ODRlZmQtY2I0MDllYTktZGZiZDQ0ZDY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:49.575083Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715729. Ctx: { TraceId: 01jykqvkqa3gyyrr09r69r5fd1, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZjhlMTUzYzgtYjE4OTkxYWEtZjViYTNiODUtNTA2YzMxYjU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:49.641426Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715730. Ctx: { TraceId: 01jykqvksd9wkth4kez4mesc5b, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTFkMmVmOWEtMTc1OGRjYzAtZGI0MWRiMDgtMmFjNDcxNjE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:49.710654Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715731. Ctx: { TraceId: 01jykqvkvf7de7ewx4qp5hpvsb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MjcyZGQ5YjYtNDY2YWE3NWYtMTc2MjUzMi03NDJjZWRkZg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:49.778751Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715732. Ctx: { TraceId: 01jykqvkxmen4ycmnj3qsqqqs4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NWM1YmNlMGMtMzQ5YTE2MWUtNjdiZmNhMGYtNTJiMTlmM2I=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:49.855013Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715733. Ctx: { TraceId: 01jykqvkzs45x7r5vcmk2ec9v8, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NWNiNzhlYzEtNDE5NjhhNmItNWI4NjAyZGQtM2QzNTdhZDQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:49.954308Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715734. Ctx: { TraceId: 01jykqvm269m84r35b5whqspqh, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjgyOTZiZWQtZDhjZjA4NGItMzlkOWM3ZDYtNWI0YTIwZjc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:50.018133Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715735. Ctx: { TraceId: 01jykqvm598y593px6d2d9a1h0, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YmFhNThjNzMtNTJmYTFjNTYtNTU1Y2JlOTMtMzNiYTA5ZjE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:50.089172Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715736. Ctx: { TraceId: 01jykqvm786zx877wa8fae1nfy, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=M2RiMWQzZTEtOTc2ZDVkMWYtNjhmMDkwMTctOGIxOWVkMmI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:50.160525Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715737. Ctx: { TraceId: 01jykqvm9getwgp8pr76mg41vp, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjY3ZjAwMGMtNTI3ODk0NzItM2M2MzA4ZmEtMjE2NzJmMTk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:50.231777Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715738. Ctx: { TraceId: 01jykqvmbq5cpdfqx8yj1zhp8c, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MzBkYWMzNzMtZmVjOGQ4ZGQtNGQ0M2I4ZTctYmViZGE2NDQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:50.303728Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715739. Ctx: { TraceId: 01jykqvmdy6gs1rqn6az7svjhs, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NTM2Nzk2ZTMtMmQ5YTAzNjYtZTYwNzQxNC02M2M5Y2Y1Yg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:50.375851Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715740. Ctx: { TraceId: 01jykqvmg60cb6m9h80w9jfxfd, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZGI5MzZiZmYtZTVlYTc3OTktZDI0YTg2OGYtZDdlNTAyYWY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:50.464243Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715741. Ctx: { TraceId: 01jykqvmje8yw79dz7bt7yvaw8, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZjBlZjIzMDctMzYxZjJmM2YtZjE1NmUwNTctNDMyYzcxOTY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:50.532364Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715742. Ctx: { TraceId: 01jykqvmn661ek7twmav7xbw7w, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTZjMTcxZmUtZTZhNWMxODktMzA4Yzk4YjctYjhiMmRlZjg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:50.602215Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715743. Ctx: { TraceId: 01jykqvmqb1cdsb5wmjndpj8rx, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NWQ4NzI0ZDMtZGIxZTc4OC0zMmFkN2U0Ni03MjljMWNmMw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:50.663715Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715744. Ctx: { TraceId: 01jykqvmsg0mwh9vcsb2mxwq18, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NzQyYWJlODMtNDk0ZDM4MzUtNGI2MGQzMTgtYWMxM2VmOGU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:50.727698Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715745. Ctx: { TraceId: 01jykqvmvefznbs9dmmc8k5fzq, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MTQ4Y2FmN2YtMzI5YzExY2QtOWJiMTNmNTMtMWU2ZDljOTE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:50.782041Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715746. Ctx: { TraceId: 01jykqvmxddzgzgz46nvce3q7e, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=M2ZkYTViYzgtYTMxYjgxNWYtNjM0ODJhOTQtNTFlNmM5ZmY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:50.838957Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715747. Ctx: { TraceId: 01jykqvmz4002zp1pq1j0vzzgj, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODNiNjliZjgtZTYzMjEwY2ItNmVhNTlhMDItYmI0NmNjMDg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:50.897493Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715748. Ctx: { TraceId: 01jykqvn0w2az72dtfaxdnk82s, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZjVkZjQ4YmEtNTdjOWI1MWItNDlkNWM1MTItMWMzNGE5ZDQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:50.978093Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715749. Ctx: { TraceId: 01jykqvn3a1357t71356rcdfy3, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NzU2MWY3ZDItOThmMjA3ZmEtOWQxYTRiNC05NTg3M2IwMw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:51.036412Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715750. Ctx: { TraceId: 01jykqvn575t0741pevxtk7dqa, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MzQ5NmM5NDMtYzZjN2JlMzItODc1ZGE3MTAtYmNmY2EwM2Y=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:51.089436Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715751. Ctx: { TraceId: 01jykqvn72bgfqdsmj6352rccm, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OGNkZmYxZDItNzhkZThhYjAtODgyYmEwMDYtYWM3YTgwN2E=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:51.143610Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715752. Ctx: { TraceId: 01jykqvn8paay00snd9nsbdacr, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OGUyZjY5M2MtNWIzM2NhMzItNmE2MjE3YzMtOWEwZDcwYQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:51.199153Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715753. Ctx: { TraceId: 01jykqvnae061hrdj8r30v39qr, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OTQ1NGNkNDItMzU1ZjQ3MDUtMjk0NmMwYWYtNGNkZDZhZDk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:51.259589Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715754. Ctx: { TraceId: 01jykqvnc44cdcc0z7v6z6bfdx, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODE2MmJhYWItOGJmMzhlNzAtN2JlYzllZDItZTkzMWM0OWE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:51.316074Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715755. Ctx: { TraceId: 01jykqvne22ntbbdeman5wg64d, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MTZhYWE5YzgtNWEwY2FlYTctMzljZWNlMjctNDU5YWVhY2I=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:51.390695Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715756. Ctx: { TraceId: 01jykqvnfse5mgzrc91yw1qntf, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NzRmMjc1YS00YTBmMGY2Yi1lODJiYjdmMy1lZGE3ODNkZQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:51.447672Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715757. Ctx: { TraceId: 01jykqvnj5d3jf29rp4hzf4z0n, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OTM4ZTdiZWEtODMzNGRmMi1hY2I4MDAxOS1kN2VlMmZmYg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:51.507752Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715758. Ctx: { TraceId: 01jykqvnkxebjbsx238kdn403f, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NjY1NmViNS03MDFhYmQxNi02YjU3ZjFmMS01MTllYjVlOQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:51.569600Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715759. Ctx: { TraceId: 01jykqvnnt43xrz7sd10pz1tk0, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZWRlYTM0ZjUtY2U4OGVlY2MtN2UxOGNkOGYtNzg1ZjMxZTU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:28:51.732220Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715760. Ctx: { TraceId: 01jykqvnrc6qa441zds58rdr78, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YWE3YmZlYzYtMTZhZDA1YTctYTA0NWQyNmMtNWQ0MjE5OWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root >> TTicketParserTest::TicketFromCertificateCheckIssuerBad [GOOD] >> TTicketParserTest::TicketFromCertificateWithValidationBad >> KqpScripting::StreamScanQuery |75.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/ttl/py3test >> test_ttl.py::TestTTL::test_ttl[table_DyNumber_0__SYNC-pk_types12-all_types12-index12-DyNumber--SYNC] [GOOD] |75.8%| [TA] $(B)/ydb/core/tx/datashard/ut_external_blobs/test-results/unittest/{meta.json ... results_accumulator.log} >> TSchemeshardBackgroundCompactionTest::ShouldNotCompactServerless [GOOD] >> TSchemeshardBackgroundCompactionTest::ShouldCompactServerless >> Cdc::VirtualTimestamps[PqRunner] [GOOD] >> Cdc::VirtualTimestamps[YdsRunner] >> TTicketParserTest::AccessServiceAuthenticationApiKeyOk [GOOD] >> TTicketParserTest::AuthenticationUnavailable >> THiveTest::TestHiveBalancerHighUsage [GOOD] >> THiveTest::TestHiveBalancerHighUsageAndColumnShards >> TTicketParserTest::TicketFromCertificateWithValidationDifferentIssuersGood [GOOD] >> TTicketParserTest::TicketFromCertificateWithValidationDifferentIssuersBad >> Cdc::NewAndOldImagesLogDebezium [GOOD] >> Cdc::OldImageLogDebezium >> KqpYql::UpdateBadType >> KqpYql::FromBytes [GOOD] >> KqpYql::EvaluateFor [GOOD] >> TPersQueueNewSchemeCacheTest::TestWriteStat1stClassTopicAPI [GOOD] >> KqpScripting::LimitOnShard >> KqpScripting::ScanQueryInvalid >> KqpYql::EvaluateExprPgNull >> KqpScripting::StreamExecuteYqlScriptScanClientTimeoutBruteForce [GOOD] >> KqpScripting::StreamExecuteYqlScriptScanOperationTmeoutBruteForce |75.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_visibility_batch_works[tables_format_v0-std] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/yql/unittest >> KqpYql::FromBytes [GOOD] Test command err: Trying to start YDB, gRPC: 13522, MsgBus: 28276 2025-06-25T14:28:44.962257Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519893847523914695:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:28:44.962380Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d56/r3tmp/tmp4vz0sA/pdisk_1.dat 2025-06-25T14:28:45.235468Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:28:45.235772Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519893847523914673:2080] 1750861724961633 != 1750861724961636 TServer::EnableGrpc on GrpcPort 13522, node 1 2025-06-25T14:28:45.332440Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:28:45.332473Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:28:45.332481Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:28:45.332626Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:28:45.333891Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:28:45.334062Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:28:45.335740Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:28276 TClient is connected to server localhost:28276 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:28:45.847788Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:28:45.874144Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:28:45.970706Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:28:46.007767Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:28:46.153500Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:46.230246Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:28:47.513371Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519893860408818209:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:47.513508Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:47.797146Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:47.840812Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:47.868752Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:47.895265Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:47.921905Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:47.953609Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:48.026882Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:48.085648Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519893864703786165:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:48.085743Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:48.085780Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519893864703786170:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:48.089482Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:28:48.101939Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519893864703786172:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:28:48.159310Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519893864703786223:3420] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 14607, MsgBus: 18674 2025-06-25T14:28:49.832472Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519893869831370643:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:28:49.832552Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d56/r3tmp/tmpPr5HqJ/pdisk_1.dat 2025-06-25T14:28:49.957348Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 14607, node 2 2025-06-25T14:28:49.975869Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:28:49.975969Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:28:49.978425Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:28:50.021356Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:28:50.021391Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:28:50.021398Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:28:50.021519Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:18674 TClient is connected to server localhost:18674 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:28:50.430368Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:28:50.443231Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:28:50.513783Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:28:50.673029Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:28:50.746383Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:28:50.875723Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:28:52.889278Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519893882716274145:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:52.889371Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:52.933199Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:52.964934Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:52.988809Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:53.017334Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:53.044466Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:53.117892Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:53.187012Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:53.273995Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519893887011242113:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:53.274101Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:53.274473Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519893887011242118:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:53.278428Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:28:53.288563Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519893887011242120:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:28:53.352611Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519893887011242171:3420] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/yql/unittest >> KqpYql::EvaluateFor [GOOD] Test command err: Trying to start YDB, gRPC: 20896, MsgBus: 14334 2025-06-25T14:28:45.073846Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519893849792147409:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:28:45.073927Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d46/r3tmp/tmp85TPJT/pdisk_1.dat 2025-06-25T14:28:45.373646Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519893849792147389:2080] 1750861725071977 != 1750861725071980 2025-06-25T14:28:45.382703Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 20896, node 1 2025-06-25T14:28:45.445853Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:28:45.445872Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:28:45.445880Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:28:45.445998Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:28:45.479336Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:28:45.479475Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:28:45.481105Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:14334 TClient is connected to server localhost:14334 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:28:45.960949Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:28:45.977086Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:28:45.984060Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:28:46.080868Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... 2025-06-25T14:28:46.128781Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:46.259995Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:28:46.336303Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:28:47.772349Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519893858382083639:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:47.772454Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:48.088172Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:48.116145Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:48.138032Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:48.164097Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:48.187821Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:48.217600Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:48.248800Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:48.298224Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519893862677051590:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:48.298297Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519893862677051595:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:48.298299Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:48.301774Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:28:48.311610Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519893862677051597:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:28:48.372364Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519893862677051648:3422] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 14828, MsgBus: 1196 2025-06-25T14:28:50.140384Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519893873620536563:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:28:50.140478Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d46/r3tmp/tmph6leIE/pdisk_1.dat 2025-06-25T14:28:50.254182Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:28:50.266152Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519893873620536533:2080] 1750861730139923 != 1750861730139926 TServer::EnableGrpc on GrpcPort 14828, node 2 2025-06-25T14:28:50.283774Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:28:50.283867Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:28:50.286239Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:28:50.302719Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:28:50.302743Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:28:50.302755Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:28:50.302872Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1196 TClient is connected to server localhost:1196 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:28:50.703591Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:28:50.714053Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:28:50.761828Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:28:50.899971Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:28:50.950258Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:51.147651Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:28:52.850746Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519893882210472767:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:52.850831Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:52.898384Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:52.938813Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:52.969515Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:52.995447Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:53.024930Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:53.093335Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:53.128657Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:53.214149Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519893886505440726:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:53.214275Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:53.214488Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519893886505440731:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:53.217706Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:28:53.225987Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519893886505440733:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:28:53.304398Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519893886505440784:3416] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } >> TTicketParserTest::BulkAuthorizationWithUserAccount [GOOD] >> TTicketParserTest::BulkAuthorizationWithUserAccount2 >> TTicketParserTest::AuthenticationUnknown [GOOD] >> TTicketParserTest::Authorization ------- [TM] {asan, default-linux-x86_64, release} ydb/services/persqueue_v1/ut/new_schemecache_ut/unittest >> TPersQueueNewSchemeCacheTest::TestWriteStat1stClassTopicAPI [GOOD] Test command err: 2025-06-25T14:28:00.509125Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519893659038210045:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:28:00.509170Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:28:01.127797Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519893656843953644:2250];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:28:01.128087Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:28:01.128147Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0018f4/r3tmp/tmpNaNvk4/pdisk_1.dat 2025-06-25T14:28:01.133989Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-25T14:28:01.604457Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:28:01.697124Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:28:01.712883Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:28:01.769961Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:28:01.821666Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:28:01.821826Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:28:01.832212Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T14:28:01.833632Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 26538, node 1 2025-06-25T14:28:01.902368Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:28:01.902426Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:28:02.042007Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:28:02.153289Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/yft8/0018f4/r3tmp/yandexJNEQHG.tmp 2025-06-25T14:28:02.153312Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/yft8/0018f4/r3tmp/yandexJNEQHG.tmp 2025-06-25T14:28:02.153466Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/yft8/0018f4/r3tmp/yandexJNEQHG.tmp 2025-06-25T14:28:02.153564Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:28:02.287085Z INFO: TTestServer started on Port 62829 GrpcPort 26538 TClient is connected to server localhost:62829 PQClient connected to localhost:26538 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:28:03.047585Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-25T14:28:03.168590Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... waiting... 2025-06-25T14:28:05.509882Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519893659038210045:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:28:05.509941Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:28:05.706513Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519893656843953644:2250];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:28:05.706562Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:28:06.368065Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519893684808014953:2306], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:06.368212Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:06.368978Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519893684808014968:2309], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:06.374720Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:28:06.414100Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519893684808014970:2310], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715662 completed, doublechecking } 2025-06-25T14:28:06.664476Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519893684808015058:2804] txid# 281474976715663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:28:06.713974Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:06.865534Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:06.878708Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519893684808015070:2317], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:28:06.880575Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=NWFiNjllMjAtYjFlNjc3MDMtNDg2MDNjZjQtYTY1MmRiOGM=, ActorId: [1:7519893684808014950:2304], ActorState: ExecuteState, TraceId: 01jykqt9j6f17371r256ptwh7m, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:28:06.883180Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-25T14:28:07.051363Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); 2025-06-25T14:28:07.470069Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715667. Ctx: { TraceId: 01jykqtaex9pqce8a8j7j9d4bv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OTEzNjcyZDctNWRmYmQ5ZDEtOWMwOTZmMjItMzE5NDFkYjM=, CurrentEx ... 661Z node 7 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [7:7519893838462306708:2129], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /Root/PQ/Config/V2/Cluster PathId: Partial: 0 }, by path# { Subscriber: { Subscriber: [7:7519893855642176822:2751] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 18 } Filled: 1 Status: StatusSuccess Kind: 3 TableKind: 1 Created: 1 CreateStep: 1750861726208 PathId: [OwnerId: 72057594046644480, LocalPathId: 10] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 1 }, by pathId# nullptr 2025-06-25T14:28:54.041715Z node 7 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [7:7519893838462306708:2129], cacheItem# { Subscriber: { Subscriber: [7:7519893855642176822:2751] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 18 } Filled: 1 Status: StatusSuccess Kind: 3 TableKind: 1 Created: 1 CreateStep: 1750861726208 PathId: [OwnerId: 72057594046644480, LocalPathId: 10] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 1 }, entry# { Path: Root/PQ/Config/V2/Cluster TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpUnknown RedirectRequired: true ShowPrivatePath: false SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 18 IsSync: true Partial: 0 } 2025-06-25T14:28:54.041864Z node 7 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [7:7519893890001917116:4157], recipient# [7:7519893890001917115:2515], result# { ErrorCount: 0 DatabaseName: /Root DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/PQ/Config/V2/Versions TableId: [72057594046644480:12:1] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: false SyncVersion: true Status: Ok Kind: KindTable DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 2 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } },{ Path: Root/PQ/Config/V2/Versions TableId: [72057594046644480:12:1] RequestType: ByPath Operation: OpUnknown RedirectRequired: true ShowPrivatePath: false SyncVersion: true Status: Ok Kind: KindTable DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 2 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-25T14:28:54.041893Z node 7 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [7:7519893890001917117:4158], recipient# [7:7519893890001917114:2514], result# { ErrorCount: 0 DatabaseName: /Root DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/PQ/Config/V2/Cluster TableId: [72057594046644480:10:1] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: false SyncVersion: true Status: Ok Kind: KindTable DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 2 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } },{ Path: Root/PQ/Config/V2/Cluster TableId: [72057594046644480:10:1] RequestType: ByPath Operation: OpUnknown RedirectRequired: true ShowPrivatePath: false SyncVersion: true Status: Ok Kind: KindTable DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 2 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-25T14:28:54.042214Z node 7 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [7:7519893838462306708:2129], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: TableId: [72057594046644480:10:0] RequestType: ByTableId Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:28:54.042293Z node 7 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [7:7519893838462306708:2129], cacheItem# { Subscriber: { Subscriber: [7:7519893855642176822:2751] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 18 } Filled: 1 Status: StatusSuccess Kind: 3 TableKind: 1 Created: 1 CreateStep: 1750861726208 PathId: [OwnerId: 72057594046644480, LocalPathId: 10] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 1 }, entry# { Path: TableId: [72057594046644480:10:0] RequestType: ByTableId Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:28:54.042373Z node 7 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [7:7519893838462306708:2129], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: TableId: [72057594046644480:12:0] RequestType: ByTableId Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:28:54.042414Z node 7 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [7:7519893838462306708:2129], cacheItem# { Subscriber: { Subscriber: [7:7519893855642177034:2900] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 18 } Filled: 1 Status: StatusSuccess Kind: 3 TableKind: 1 Created: 1 CreateStep: 1750861726390 PathId: [OwnerId: 72057594046644480, LocalPathId: 12] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 1 }, entry# { Path: TableId: [72057594046644480:12:0] RequestType: ByTableId Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:28:54.042487Z node 7 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [7:7519893890001917120:4159], recipient# [7:7519893838462306660:2217], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/PQ/Config/V2/Cluster TableId: [72057594046644480:10:1] RequestType: ByTableId Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Ok Kind: KindTable DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 2 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-25T14:28:54.042527Z node 7 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [7:7519893890001917121:4160], recipient# [7:7519893838462306660:2217], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/PQ/Config/V2/Versions TableId: [72057594046644480:12:1] RequestType: ByTableId Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Ok Kind: KindTable DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 2 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-25T14:28:54.042582Z node 7 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [7:7519893838462306708:2129], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:28:54.042628Z node 7 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [7:7519893838462306708:2129], cacheItem# { Subscriber: { Subscriber: [7:7519893842757274493:2449] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 27 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 1750861723289 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:28:54.042733Z node 7 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [7:7519893890001917122:4161], recipient# [7:7519893838462306660:2217], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: true SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 2 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-25T14:28:54.042850Z node 7 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [7:7519893838462306708:2129], request# { ErrorCount: 0 DatabaseName: /Root DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:28:54.042971Z node 7 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [7:7519893838462306708:2129], cacheItem# { Subscriber: { Subscriber: [7:7519893855642176725:2694] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: Root/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:28:54.043026Z node 7 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [7:7519893890001917124:4162], recipient# [7:7519893890001917123:2518], result# { ErrorCount: 1 DatabaseName: /Root DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } |75.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/tools/fqrun/fqrun >> TSchemeshardBackgroundCompactionTest::SchemeshardShouldRequestCompactionsSchemeshardRestart [GOOD] >> Cdc::HugeKey[PqRunner] [GOOD] |75.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/tools/fqrun/fqrun >> TTicketParserTest::AuthenticationUnavailable [GOOD] >> KqpYql::ScriptUdf >> TTicketParserTest::AuthenticationRetryError >> TSchemeshardBackgroundCompactionTest::SchemeshardShouldRequestCompactionsConfigRequest |75.8%| [TA] {RESULT} $(B)/ydb/core/tx/replication/ydb_proxy/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> Cdc::HugeKey[YdsRunner] |75.8%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_view/test-results/unittest/{meta.json ... results_accumulator.log} >> TTicketParserTest::TicketFromCertificateWithValidationBad [GOOD] >> KqpYql::TableConcat >> KqpYql::EvaluateExpr1 >> TTicketParserTest::NebiusAuthorizationWithRequiredPermissions >> KqpYql::ColumnNameConflict [GOOD] >> KqpYql::ColumnTypeMismatch >> KqpScripting::StreamExecuteYqlScriptData [GOOD] >> KqpScripting::StreamExecuteYqlScriptEmptyResults >> KqpScripting::StreamExecuteYqlScriptScanWriteCancelAfterBruteForced |75.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/ut/data_integrity/ydb-core-kqp-ut-data_integrity |75.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/data_integrity/ydb-core-kqp-ut-data_integrity >> TTicketParserTest::TicketFromCertificateWithValidationDifferentIssuersBad [GOOD] >> Cdc::VirtualTimestamps[YdsRunner] [GOOD] >> TTicketParserTest::LoginRefreshGroupsWithError [GOOD] >> TTicketParserTest::TicketFromCertificateWithValidationDefaultGroupGood >> AsyncIndexChangeExchange::ShouldDeliverChangesOnFreshTable [GOOD] >> Cdc::VirtualTimestamps[TopicRunner] >> TTicketParserTest::NebiusAccessServiceAuthenticationOk >> TTxDataShardMiniKQL::CrossShard_2_SwapAndCopy [GOOD] >> AsyncIndexChangeExchange::ShouldDeliverChangesOnAlteredTable >> TTxDataShardMiniKQL::CrossShard_3_AllToOne >> TTicketParserTest::Authorization [GOOD] >> TTransferTests::Create >> TTransferTests::Create_Disabled >> TTicketParserTest::AuthorizationModify >> TTicketParserTest::BulkAuthorizationWithUserAccount2 [GOOD] >> TTicketParserTest::BulkAuthorizationUnavailable |75.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_external_table/ydb-core-tx-schemeshard-ut_external_table |75.8%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_move/test-results/unittest/{meta.json ... results_accumulator.log} |75.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_external_table/ydb-core-tx-schemeshard-ut_external_table |75.8%| [LD] {RESULT} $(B)/ydb/tests/tools/fqrun/fqrun |75.9%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/data_integrity/ydb-core-kqp-ut-data_integrity |75.9%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_external_table/ydb-core-tx-schemeshard-ut_external_table |75.9%| [TA] {RESULT} $(B)/ydb/core/driver_lib/run/ut/test-results/unittest/{meta.json ... results_accumulator.log} |75.9%| [TA] {RESULT} $(B)/ydb/core/tx/datashard/ut_external_blobs/test-results/unittest/{meta.json ... results_accumulator.log} >> Cdc::OldImageLogDebezium [GOOD] >> Cdc::NewImageLogDebezium >> KqpScripting::StreamScanQuery [GOOD] >> KqpScripting::SyncExecuteYqlScriptSeveralQueries >> KqpYql::UpdateBadType [GOOD] >> KqpScripting::ScanQueryInvalid [GOOD] >> KqpScripting::ScanQueryTruncate >> TTicketParserTest::BulkAuthorizationRetryError [GOOD] >> TTicketParserTest::BulkAuthorizationRetryErrorImmediately >> KqpScripting::LimitOnShard [GOOD] >> KqpScripting::NoAstSizeLimit >> TSchemeshardBackgroundCleaningTest::TempInTemp [GOOD] >> TTicketParserTest::NebiusAuthorizationWithRequiredPermissions [GOOD] >> TTicketParserTest::NebiusAuthorizationUnavailable >> THiveTest::TestHiveBalancerHighUsageAndColumnShards [GOOD] >> THiveTest::TestHiveBalancerOneTabletHighUsage ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/yql/unittest >> KqpYql::UpdateBadType [GOOD] Test command err: Trying to start YDB, gRPC: 9745, MsgBus: 63869 2025-06-25T14:28:54.953245Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519893887947299962:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:28:54.953323Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d2e/r3tmp/tmpldW7KF/pdisk_1.dat 2025-06-25T14:28:55.318580Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 9745, node 1 2025-06-25T14:28:55.392491Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:28:55.392625Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:28:55.396109Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:28:55.403730Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:28:55.403750Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:28:55.403756Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:28:55.403864Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:63869 TClient is connected to server localhost:63869 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:28:55.875122Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:28:55.898288Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:28:55.962350Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:28:56.010258Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:28:56.137709Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:28:56.214095Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:28:57.798986Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519893900832203451:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:57.799084Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:58.070192Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:58.099162Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:58.124026Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:58.148145Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:58.177284Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:58.205396Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:58.250656Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:58.305712Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519893905127171407:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:58.305791Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:58.305951Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519893905127171412:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:58.309621Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:28:58.318861Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519893905127171414:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:28:58.394988Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519893905127171465:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 }
: Error: Type annotation, code: 1030
:4:26: Error: At function: KiUpdateTable!
:3:20: Error: Failed to convert type: Struct<'Amount':String?> to Struct<'Amount':Uint64?>
:3:20: Error: Failed to convert 'Amount': Optional to Optional
:3:20: Error: Row type mismatch for table: db.[/Root/Test] >> KqpYql::EvaluateExprPgNull [GOOD] >> KqpYql::EvaluateExprYsonAndType ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_background_cleaning/unittest >> TSchemeshardBackgroundCleaningTest::TempInTemp [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:234:2060] recipient: [1:228:2144] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:234:2060] recipient: [1:228:2144] Leader for TabletID 72057594046678944 is [1:245:2155] sender: [1:246:2060] recipient: [1:228:2144] 2025-06-25T14:27:39.055540Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:27:39.055616Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:27:39.055660Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:27:39.055697Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:27:39.055735Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:27:39.055765Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:27:39.055811Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:27:39.055868Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:27:39.056632Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:27:39.056904Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:27:39.136528Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:27:39.136602Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:27:39.149578Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:27:39.149699Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:27:39.150078Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:27:39.172799Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:27:39.173044Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:27:39.173637Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:27:39.173912Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:27:39.179274Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:27:39.179487Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:27:39.180647Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:27:39.180699Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:27:39.180815Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:27:39.180856Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:27:39.180890Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:27:39.180988Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:27:39.187919Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:245:2155] sender: [1:357:2060] recipient: [1:17:2064] 2025-06-25T14:27:39.361878Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:27:39.362117Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:27:39.362309Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:27:39.362360Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:27:39.362566Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:27:39.362698Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:27:39.366120Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:27:39.366292Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:27:39.366497Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:27:39.366559Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:27:39.366595Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:27:39.366628Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:27:39.372718Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:27:39.372809Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:27:39.372865Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:27:39.375129Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:27:39.375175Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:27:39.375224Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:27:39.375267Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:27:39.384919Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:27:39.387029Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:27:39.387189Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:27:39.388149Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:27:39.388290Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 254 RawX2: 4294969457 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:27:39.388363Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:27:39.388713Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:27:39.388769Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:27:39.388933Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:27:39.389026Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:27:39.391019Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:27:39.391080Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_s ... 46678944 TestModificationResult got TxId: 106, wait until txId: 106 TestWaitNotification wait txId: 106 2025-06-25T14:29:00.104559Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 106: send EvNotifyTxCompletion 2025-06-25T14:29:00.104598Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 106 2025-06-25T14:29:00.104922Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877761, Sender [7:692:2507], Recipient [7:245:2155]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:29:00.104971Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5052: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T14:29:00.105006Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5837: Pipe server connected, at tablet: 72057594046678944 2025-06-25T14:29:00.105130Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124996, Sender [7:587:2402], Recipient [7:245:2155]: NKikimrScheme.TEvNotifyTxCompletion TxId: 106 2025-06-25T14:29:00.105165Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvSchemeShard::TEvNotifyTxCompletion 2025-06-25T14:29:00.105223Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 106, at schemeshard: 72057594046678944 2025-06-25T14:29:00.105309Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 106: got EvNotifyTxCompletionResult 2025-06-25T14:29:00.105344Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 106: satisfy waiter [7:690:2505] 2025-06-25T14:29:00.105490Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877764, Sender [7:692:2507], Recipient [7:245:2155]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-25T14:29:00.105519Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5053: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-25T14:29:00.105548Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5885: Server pipe is reset, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 106 TestModificationResults wait txId: 107 2025-06-25T14:29:00.105942Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271122432, Sender [8:565:2104], Recipient [7:245:2155] 2025-06-25T14:29:00.105982Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4966: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-06-25T14:29:00.108139Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/test/tmp/a/b" OperationType: ESchemeOpMkDir MkDir { Name: "tmp2" } TempDirOwnerActorId { RawX1: 565 RawX2: 34359740472 } AllowCreateInTempDir: false } TxId: 107 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:29:00.108427Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_mkdir.cpp:115: TMkDir Propose, path: /MyRoot/test/tmp/a/b/tmp2, operationId: 107:0, at schemeshard: 72057594046678944 2025-06-25T14:29:00.108545Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 107:1, propose status:StatusPreconditionFailed, reason: Check failed: path: '/MyRoot/test/tmp/a/b', error: path is temporary (id: [OwnerId: 72057594046678944, LocalPathId: 5], type: EPathTypeDir, state: EPathStateNoChanges), at schemeshard: 72057594046678944 2025-06-25T14:29:00.108747Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-06-25T14:29:00.110501Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 107, response: Status: StatusPreconditionFailed Reason: "Check failed: path: \'/MyRoot/test/tmp/a/b\', error: path is temporary (id: [OwnerId: 72057594046678944, LocalPathId: 5], type: EPathTypeDir, state: EPathStateNoChanges)" TxId: 107 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:29:00.110762Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 107, database: /MyRoot, subject: , status: StatusPreconditionFailed, reason: Check failed: path: '/MyRoot/test/tmp/a/b', error: path is temporary (id: [OwnerId: 72057594046678944, LocalPathId: 5], type: EPathTypeDir, state: EPathStateNoChanges), operation: CREATE DIRECTORY, path: /MyRoot/test/tmp/a/b/tmp2 2025-06-25T14:29:00.110816Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046678944 TestModificationResult got TxId: 107, wait until txId: 107 TestWaitNotification wait txId: 107 2025-06-25T14:29:00.111154Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 107: send EvNotifyTxCompletion 2025-06-25T14:29:00.111193Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 107 2025-06-25T14:29:00.111544Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877761, Sender [7:698:2513], Recipient [7:245:2155]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:29:00.111592Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5052: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T14:29:00.111624Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5837: Pipe server connected, at tablet: 72057594046678944 2025-06-25T14:29:00.111718Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124996, Sender [7:587:2402], Recipient [7:245:2155]: NKikimrScheme.TEvNotifyTxCompletion TxId: 107 2025-06-25T14:29:00.111752Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvSchemeShard::TEvNotifyTxCompletion 2025-06-25T14:29:00.111808Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 107, at schemeshard: 72057594046678944 2025-06-25T14:29:00.111887Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 107: got EvNotifyTxCompletionResult 2025-06-25T14:29:00.111920Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 107: satisfy waiter [7:696:2511] 2025-06-25T14:29:00.112070Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877764, Sender [7:698:2513], Recipient [7:245:2155]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-25T14:29:00.112102Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5053: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-25T14:29:00.112134Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5885: Server pipe is reset, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 107 TestModificationResults wait txId: 108 2025-06-25T14:29:00.112516Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271122432, Sender [8:565:2104], Recipient [7:245:2155] 2025-06-25T14:29:00.112555Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4966: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-06-25T14:29:00.114721Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/test/tmp/a/b" OperationType: ESchemeOpMkDir MkDir { Name: "tmp2" } TempDirOwnerActorId { RawX1: 565 RawX2: 34359740472 } AllowCreateInTempDir: true } TxId: 108 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:29:00.115002Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_mkdir.cpp:115: TMkDir Propose, path: /MyRoot/test/tmp/a/b/tmp2, operationId: 108:0, at schemeshard: 72057594046678944 2025-06-25T14:29:00.115062Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 108:1, propose status:StatusPreconditionFailed, reason: Can't create temporary directory while flag AllowCreateInTempDir is set. Temporary directory can't be created in another temporary directory., at schemeshard: 72057594046678944 2025-06-25T14:29:00.115249Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-06-25T14:29:00.117717Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 108, response: Status: StatusPreconditionFailed Reason: "Can\'t create temporary directory while flag AllowCreateInTempDir is set. Temporary directory can\'t be created in another temporary directory." TxId: 108 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:29:00.117993Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 108, database: /MyRoot, subject: , status: StatusPreconditionFailed, reason: Can't create temporary directory while flag AllowCreateInTempDir is set. Temporary directory can't be created in another temporary directory., operation: CREATE DIRECTORY, path: /MyRoot/test/tmp/a/b/tmp2 2025-06-25T14:29:00.118051Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046678944 TestModificationResult got TxId: 108, wait until txId: 108 TestWaitNotification wait txId: 108 2025-06-25T14:29:00.118419Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 108: send EvNotifyTxCompletion 2025-06-25T14:29:00.118461Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 108 2025-06-25T14:29:00.118770Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877761, Sender [7:704:2519], Recipient [7:245:2155]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:29:00.118821Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5052: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T14:29:00.118859Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5837: Pipe server connected, at tablet: 72057594046678944 2025-06-25T14:29:00.118973Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124996, Sender [7:587:2402], Recipient [7:245:2155]: NKikimrScheme.TEvNotifyTxCompletion TxId: 108 2025-06-25T14:29:00.119002Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvSchemeShard::TEvNotifyTxCompletion 2025-06-25T14:29:00.119061Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 108, at schemeshard: 72057594046678944 2025-06-25T14:29:00.119147Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 108: got EvNotifyTxCompletionResult 2025-06-25T14:29:00.119184Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 108: satisfy waiter [7:702:2517] 2025-06-25T14:29:00.119326Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877764, Sender [7:704:2519], Recipient [7:245:2155]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-25T14:29:00.119361Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5053: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-25T14:29:00.119393Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5885: Server pipe is reset, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 108 >> TTicketParserTest::NebiusAccessServiceAuthenticationOk [GOOD] >> TTicketParserTest::NebiusAuthenticationRetryError >> TTicketParserTest::TicketFromCertificateWithValidationDefaultGroupGood [GOOD] >> TTicketParserTest::TicketFromCertificateWithValidationCheckIssuerBad >> KqpYql::UuidPrimaryKeyBulkUpsert >> KqpYql::ColumnTypeMismatch [GOOD] >> KqpYql::ScriptUdf [GOOD] >> KqpYql::SelectNoAsciiValue >> KqpYql::EvaluateExpr1 [GOOD] >> KqpYql::Discard >> TTicketParserTest::AuthorizationModify [GOOD] >> KqpYql::TableConcat [GOOD] >> KqpYql::TableNameConflict >> KqpScripting::StreamExecuteYqlScriptEmptyResults [GOOD] >> KqpScripting::StreamExecuteYqlScriptMixed >> Cdc::VirtualTimestamps[TopicRunner] [GOOD] >> Cdc::Write[PqRunner] >> TTicketParserTest::NebiusAuthorizationRetryError [GOOD] >> TTicketParserTest::NebiusAuthorizationRetryErrorImmediately >> TTicketParserTest::BulkAuthorizationRetryErrorImmediately [GOOD] >> TTicketParserTest::BulkAuthorization >> TTicketParserTest::BulkAuthorizationUnavailable [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/yql/unittest >> KqpYql::ColumnTypeMismatch [GOOD] Test command err: Trying to start YDB, gRPC: 20774, MsgBus: 11843 2025-06-25T14:28:52.874902Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519893881384820069:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:28:52.877004Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d36/r3tmp/tmpM8eiBy/pdisk_1.dat 2025-06-25T14:28:53.193682Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:28:53.194232Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519893881384820044:2080] 1750861732871136 != 1750861732871139 TServer::EnableGrpc on GrpcPort 20774, node 1 2025-06-25T14:28:53.248751Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:28:53.248839Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:28:53.250510Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:28:53.277571Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:28:53.277599Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:28:53.277608Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:28:53.277713Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11843 TClient is connected to server localhost:11843 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:28:53.808392Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:28:53.838119Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:28:53.894685Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:28:53.950900Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:28:54.072515Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:28:54.150992Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:28:55.685107Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519893894269723586:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:55.685248Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:56.036086Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:56.059663Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:56.086796Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:56.113395Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:56.141842Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:56.216605Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:56.286934Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:56.369976Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519893898564691550:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:56.370072Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:56.370208Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519893898564691555:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:56.373425Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:28:56.384705Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519893898564691557:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:28:56.460892Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519893898564691608:3420] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 }
: Error: Type annotation, code: 1030
:7:30: Error: At function: KiCreateTable!
:7:30: Error: Duplicate column: Value. Trying to start YDB, gRPC: 63029, MsgBus: 3169 2025-06-25T14:28:58.058340Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519893908594086791:2098];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:28:58.059347Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d36/r3tmp/tmpog9Uw1/pdisk_1.dat 2025-06-25T14:28:58.170615Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 63029, node 2 2025-06-25T14:28:58.209971Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:28:58.210047Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:28:58.211722Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:28:58.234887Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:28:58.234912Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:28:58.234923Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:28:58.235035Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3169 TClient is connected to server localhost:3169 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:28:58.664051Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:28:58.679490Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:28:58.758376Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:28:58.902525Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:28:58.974659Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:59.108480Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:29:00.869484Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519893917184022943:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:00.869551Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:00.924263Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:00.954593Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:00.978443Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:01.010841Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:01.035212Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:01.099311Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:01.130337Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:01.208951Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519893921478990904:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:01.209025Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519893921478990909:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:01.209031Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:01.212693Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:29:01.222993Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519893921478990911:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:29:01.290961Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519893921478990962:3413] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:29:02.072262Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7519893925773958531:2477], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:5:26: Error: At function: KiWriteTable!
:6:27: Error: Failed to convert type: Struct<'Key':Uint64,'Value':Uint64> to Struct<'Key':Uint64?,'Value':String?>
:6:27: Error: Failed to convert 'Value': Uint64 to Optional
:6:27: Error: Failed to convert input columns types to scheme types, code: 2031 2025-06-25T14:29:02.072586Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=2&id=Yzg3ZTk4OTYtNzUxNTI1ZmYtODhkMWRmNTAtZTE3OGVmM2Q=, ActorId: [2:7519893925773958523:2472], ActorState: ExecuteState, TraceId: 01jykqvzyy69e1zn9nj15yf230, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id:
: Error: Type annotation, code: 1030
:5:26: Error: At function: KiWriteTable!
:6:27: Error: Failed to convert type: Struct<'Key':Uint64,'Value':Uint64> to Struct<'Key':Uint64?,'Value':String?>
:6:27: Error: Failed to convert 'Value': Uint64 to Optional
:6:27: Error: Failed to convert input columns types to scheme types, code: 2031 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/security/ut/unittest >> TTicketParserTest::AuthorizationModify [GOOD] Test command err: 2025-06-25T14:28:47.380229Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519893858506857566:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:28:47.381971Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0018d2/r3tmp/tmpfnKEQq/pdisk_1.dat 2025-06-25T14:28:47.697066Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519893858506857543:2080] 1750861727379041 != 1750861727379044 2025-06-25T14:28:47.698458Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 7304, node 1 2025-06-25T14:28:47.776461Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:28:47.776489Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:28:47.776498Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:28:47.776637Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:28:47.779395Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:28:47.779493Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:28:47.782040Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:29005 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:28:48.089487Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:28:48.106008Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-06-25T14:28:48.106061Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-25T14:28:48.106076Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-25T14:28:48.106532Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:557: Ticket **** (8E120919) asking for AccessServiceAuthentication 2025-06-25T14:28:48.106599Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [517000020008] Connect to grpc://localhost:15300 2025-06-25T14:28:48.109364Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000020008] Request AuthenticateRequest { iam_token: "**** (8E120919)" } 2025-06-25T14:28:48.120482Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [517000020008] Response AuthenticateResponse { subject { user_account { id: "user1" } } } 2025-06-25T14:28:48.120624Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:997: Ticket **** (8E120919) asking for UserAccount(user1@as) 2025-06-25T14:28:48.121668Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [517000020708] Connect to grpc://localhost:11067 2025-06-25T14:28:48.122291Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000020708] Request GetUserAccountRequest { user_account_id: "user1" } 2025-06-25T14:28:48.130355Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [517000020708] Response UserAccount { yandex_passport_user_account { login: "login1" } } 2025-06-25T14:28:48.130690Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (8E120919) () has now valid token of login1@passport 2025-06-25T14:28:50.119702Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519893870721736359:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:28:50.119784Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0018d2/r3tmp/tmpIAscTA/pdisk_1.dat 2025-06-25T14:28:50.317096Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:28:50.317994Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:28:50.318052Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:28:50.334043Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 11523, node 2 2025-06-25T14:28:50.377532Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:28:50.377548Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:28:50.377553Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:28:50.377624Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1800 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:28:50.600444Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:28:50.605965Z node 2 :TICKET_PARSER ERROR: ticket_parser_impl.h:963: Ticket **** (8E120919): Token is not supported 2025-06-25T14:28:53.102495Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519893883338718175:2057];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:28:53.102674Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0018d2/r3tmp/tmpzP1sVz/pdisk_1.dat 2025-06-25T14:28:53.273046Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:28:53.283956Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519893883338718159:2080] 1750861733100438 != 1750861733100441 2025-06-25T14:28:53.285572Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:28:53.285676Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:28:53.287810Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 21309, node 3 2025-06-25T14:28:53.340009Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:28:53.340032Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:28:53.340040Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:28:53.340168Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24944 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:28:53.596489Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called ... erviceAuthorization(something.read) 2025-06-25T14:28:56.856652Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000018c88] Request AuthorizeRequest { iam_token: "**** (8E120919)" permission: "something.read" resource_path { id: "XXXXXXXX" type: "ydb.database" } resource_path { id: "XXXXXXXX" type: "resource-manager.folder" } } 2025-06-25T14:28:56.857777Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [517000018c88] Status 16 Access Denied 2025-06-25T14:28:56.857855Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:1407: Ticket **** (8E120919) permission something.read now has a permanent error "Access Denied" retryable:0 2025-06-25T14:28:56.857884Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1809: Ticket **** (8E120919) () has now permanent error message 'Access Denied' 2025-06-25T14:28:56.858216Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-06-25T14:28:56.858230Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-25T14:28:56.858235Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-25T14:28:56.858248Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:487: Ticket **** (8E120919) asking for AccessServiceAuthorization(something.read) 2025-06-25T14:28:56.858318Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000018c88] Request AuthorizeRequest { iam_token: "**** (8E120919)" permission: "something.read" resource_path { id: "XXXXXXXX" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } } 2025-06-25T14:28:56.859338Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [517000018c88] Response AuthorizeResponse { subject { user_account { id: "user1" } } } 2025-06-25T14:28:56.859391Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:1392: Ticket **** (8E120919) permission something.read now has a valid subject "user1@as" 2025-06-25T14:28:56.859460Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (8E120919) () has now valid token of user1@as 2025-06-25T14:28:56.859773Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-06-25T14:28:56.859790Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-25T14:28:56.859797Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-25T14:28:56.859814Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:487: Ticket **** (8E120919) asking for AccessServiceAuthorization(something.read) 2025-06-25T14:28:56.859918Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000018c88] Request AuthorizeRequest { iam_token: "**** (8E120919)" permission: "something.read" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "XXXXXXXX" type: "resource-manager.folder" } } 2025-06-25T14:28:56.860972Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [517000018c88] Response AuthorizeResponse { subject { user_account { id: "user1" } } } 2025-06-25T14:28:56.861028Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:1392: Ticket **** (8E120919) permission something.read now has a valid subject "user1@as" 2025-06-25T14:28:56.861066Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (8E120919) () has now valid token of user1@as 2025-06-25T14:28:56.861354Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-06-25T14:28:56.861371Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-25T14:28:56.861377Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-25T14:28:56.861394Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:487: Ticket **** (8E120919) asking for AccessServiceAuthorization(monitoring.view) 2025-06-25T14:28:56.861475Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000018c88] Request AuthorizeRequest { iam_token: "**** (8E120919)" permission: "monitoring.view" resource_path { id: "gizmo" type: "iam.gizmo" } } 2025-06-25T14:28:56.862413Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [517000018c88] Response AuthorizeResponse { subject { user_account { id: "user1" } } } 2025-06-25T14:28:56.862497Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:1392: Ticket **** (8E120919) permission monitoring.view now has a valid subject "user1@as" 2025-06-25T14:28:56.862555Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (8E120919) () has now valid token of user1@as 2025-06-25T14:28:59.393332Z node 5 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[5:7519893908950753466:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:28:59.393520Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0018d2/r3tmp/tmpvkOxBX/pdisk_1.dat 2025-06-25T14:28:59.529620Z node 5 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:28:59.532435Z node 5 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [5:7519893908950753448:2080] 1750861739380381 != 1750861739380384 TServer::EnableGrpc on GrpcPort 24128, node 5 2025-06-25T14:28:59.567432Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:28:59.567559Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:28:59.570609Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:28:59.582062Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:28:59.582088Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:28:59.582095Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:28:59.582216Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24115 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:28:59.817754Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:28:59.823015Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-06-25T14:28:59.823048Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-25T14:28:59.823057Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-25T14:28:59.823084Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:487: Ticket **** (8E120919) asking for AccessServiceAuthorization(something.read) 2025-06-25T14:28:59.823135Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [517000111108] Connect to grpc://localhost:23700 2025-06-25T14:28:59.823811Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000111108] Request AuthorizeRequest { iam_token: "**** (8E120919)" permission: "something.read" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } } 2025-06-25T14:28:59.831445Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [517000111108] Response AuthorizeResponse { subject { user_account { id: "user1" } } } 2025-06-25T14:28:59.831579Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:1392: Ticket **** (8E120919) permission something.read now has a valid subject "user1@as" 2025-06-25T14:28:59.831653Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (8E120919) () has now valid token of user1@as 2025-06-25T14:28:59.832065Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-06-25T14:28:59.832088Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-25T14:28:59.832097Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-25T14:28:59.832119Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:487: Ticket **** (8E120919) asking for AccessServiceAuthorization(something.read) 2025-06-25T14:28:59.832155Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:487: Ticket **** (8E120919) asking for AccessServiceAuthorization(something.write) 2025-06-25T14:28:59.832272Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000111108] Request AuthorizeRequest { iam_token: "**** (8E120919)" permission: "something.read" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } } 2025-06-25T14:28:59.832745Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000111108] Request AuthorizeRequest { iam_token: "**** (8E120919)" permission: "something.write" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } } 2025-06-25T14:28:59.836835Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [517000111108] Response AuthorizeResponse { subject { user_account { id: "user1" } } } 2025-06-25T14:28:59.836875Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [517000111108] Response AuthorizeResponse { subject { user_account { id: "user1" } } } 2025-06-25T14:28:59.836940Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:1392: Ticket **** (8E120919) permission something.read now has a valid subject "user1@as" 2025-06-25T14:28:59.836976Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:1392: Ticket **** (8E120919) permission something.write now has a valid subject "user1@as" 2025-06-25T14:28:59.837050Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (8E120919) () has now valid token of user1@as >> KqpScripting::NoAstSizeLimit [GOOD] |75.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/balance_coverage/ut/ydb-core-tx-balance_coverage-ut |75.9%| [LD] {RESULT} $(B)/ydb/core/tx/balance_coverage/ut/ydb-core-tx-balance_coverage-ut |75.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/balance_coverage/ut/ydb-core-tx-balance_coverage-ut >> TTicketParserTest::NebiusAuthorizationUnavailable [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/yql/unittest >> KqpScripting::StreamExecuteYqlScriptEmptyResults [GOOD] Test command err: Trying to start YDB, gRPC: 19963, MsgBus: 61870 2025-06-25T14:28:52.704665Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519893878760502428:2137];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:28:52.705331Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d3f/r3tmp/tmpZ6hjRb/pdisk_1.dat 2025-06-25T14:28:53.083013Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519893878760502328:2080] 1750861732681990 != 1750861732681993 2025-06-25T14:28:53.083159Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:28:53.092874Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:28:53.092969Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:28:53.095507Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 19963, node 1 2025-06-25T14:28:53.163286Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:28:53.163315Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:28:53.163323Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:28:53.163455Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:61870 TClient is connected to server localhost:61870 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:28:53.672467Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-25T14:28:53.703965Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:53.707795Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... 2025-06-25T14:28:53.821427Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:53.965669Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:28:54.041515Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:28:55.450257Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519893891645405866:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:55.450367Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:55.759331Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:55.785676Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:55.849638Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:55.878238Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:55.903859Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:55.928226Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:55.956293Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:56.039615Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519893895940373824:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:56.039682Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:56.039689Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519893895940373829:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:56.042777Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:28:56.050135Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519893895940373831:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:28:56.121653Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519893895940373882:3420] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:28:57.496073Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750861737520, txId: 281474976710672] shutting down Trying to start YDB, gRPC: 27141, MsgBus: 15443 2025-06-25T14:28:58.148324Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519893905929384859:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:28:58.148403Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d3f/r3tmp/tmpKRiRNe/pdisk_1.dat 2025-06-25T14:28:58.282602Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:28:58.295170Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519893905929384832:2080] 1750861738147876 != 1750861738147879 2025-06-25T14:28:58.300636Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:28:58.300719Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:28:58.304210Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 27141, node 2 2025-06-25T14:28:58.346515Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:28:58.346540Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:28:58.346547Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:28:58.346657Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:15443 TClient is connected to server localhost:15443 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:28:58.783759Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:28:58.793140Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:28:58.803509Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:28:58.913076Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:28:59.053067Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:59.139710Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:28:59.160549Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:29:00.862569Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519893914519321053:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:00.862642Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:00.914226Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:00.940275Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:00.966035Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:00.993474Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:01.020290Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:01.050755Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:01.080948Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:01.138225Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519893918814289001:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:01.138317Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:01.138555Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519893918814289006:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:01.142596Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:29:01.156042Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519893918814289008:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:29:01.231239Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519893918814289061:3415] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:29:02.314168Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750861742343, txId: 281474976715672] shutting down 2025-06-25T14:29:02.461664Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750861742490, txId: 281474976715674] shutting down >> TTicketParserTest::AuthorizationRetryError [GOOD] >> TTicketParserTest::AuthorizationRetryErrorImmediately >> AsyncIndexChangeExchange::ShouldDeliverChangesOnAlteredTable [GOOD] >> AsyncIndexChangeExchange::ShouldRemoveRecordsAfterDroppingIndex ------- [TM] {asan, default-linux-x86_64, release} ydb/core/security/ut/unittest >> TTicketParserTest::BulkAuthorizationUnavailable [GOOD] Test command err: 2025-06-25T14:28:47.471609Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519893861467027494:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:28:47.471708Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0018b9/r3tmp/tmpoCc2NA/pdisk_1.dat 2025-06-25T14:28:47.815268Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 16821, node 1 2025-06-25T14:28:47.853964Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:28:47.854172Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:28:47.859879Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:28:47.880981Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:28:47.881008Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:28:47.881018Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:28:47.881176Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24032 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:28:48.161596Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:28:48.178192Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:28:48.231172Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-25T14:28:48.231438Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-06-25T14:28:48.231475Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-25T14:28:48.231956Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1809: Ticket **** (5DAB89DE) () has now permanent error message 'Token is not in correct format' 2025-06-25T14:28:48.231972Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:773: CanInitLoginToken, database /Root, A2 error Token is not in correct format 2025-06-25T14:28:48.231993Z node 1 :TICKET_PARSER ERROR: ticket_parser_impl.h:963: Ticket **** (5DAB89DE): Token is not in correct format 2025-06-25T14:28:50.247690Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519893873642613334:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:28:50.247821Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0018b9/r3tmp/tmpqtad5L/pdisk_1.dat 2025-06-25T14:28:50.355602Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 22728, node 2 2025-06-25T14:28:50.391936Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:28:50.392020Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:28:50.393547Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:28:50.418799Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:28:50.418827Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:28:50.418837Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:28:50.418947Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:7568 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:28:50.639794Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:28:50.648051Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-06-25T14:28:50.648089Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-25T14:28:50.648097Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-25T14:28:50.648241Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:508: Ticket **** (8E120919) asking for AccessServiceBulkAuthorization( something.read something.write) 2025-06-25T14:28:50.648335Z node 2 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [517000032608] Connect to grpc://localhost:12893 2025-06-25T14:28:50.651870Z node 2 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000032608] Request BulkAuthorizeRequest { iam_token: "**** (8E120919)" actions { items { resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } permission: "something.read" } items { resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } permission: "something.write" } } result_filter: ALL_FAILED } 2025-06-25T14:28:50.661102Z node 2 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [517000032608] Response BulkAuthorizeResponse { subject { user_account { id: "user1" } } results { items { permission: "something.write" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } permission_denied_error { message: "Access Denied" } } } } 2025-06-25T14:28:50.661342Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:1323: Ticket **** (8E120919) permission something.write access denied for subject "user1@as" 2025-06-25T14:28:50.661450Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (8E120919) () has now valid token of user1@as 2025-06-25T14:28:50.661911Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-06-25T14:28:50.661939Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-25T14:28:50.661947Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-25T14:28:50.662000Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:508: Ticket **** (8E120919) asking for AccessServiceBulkAuthorization( something.read something.write) 2025-06-25T14:28:50.662231Z node 2 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000032608] Request BulkAuthorizeRequest { iam_token: "**** (8E120919)" actions { items { resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } permission: "something.read" } items { resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } permission: "something.write" } } result_filter: ALL_FAILED } 2025-06-25T14:28:50.664157Z node 2 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [517000032608] Response BulkAuthorizeResponse { subject { user_account { id: "user1" } } results { items { permission: "something.write" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } permission_denied_error { message: "Access Denied" } } } } 2025-06-25T14:28:50.664273Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:1323: Ticket **** (8E120919) permission something.write access denied for subject "user1@as" 2025-06-25T14:28:50.664345Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1809: Ticket **** (8E120919) () has now permanent error message 'something.write for folder_id aaaa1234 - Access Denied' 2025-06-25T14:28:53.221968Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519893885169773355:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:28:53.222027Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root ... CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:28:56.652844Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:28:56.652979Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16501 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:28:56.941799Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:28:56.949411Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-06-25T14:28:56.949446Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-25T14:28:56.949454Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-25T14:28:56.949597Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:508: Ticket **** (8E120919) asking for AccessServiceBulkAuthorization( something.read somewhere.sleep something.list something.write something.eat) 2025-06-25T14:28:56.949644Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [517000133308] Connect to grpc://localhost:30175 2025-06-25T14:28:56.950715Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000133308] Request BulkAuthorizeRequest { iam_token: "**** (8E120919)" actions { items { resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } permission: "something.read" } items { resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } permission: "somewhere.sleep" } items { resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } permission: "something.list" ...(truncated) } 2025-06-25T14:28:56.960948Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [517000133308] Response BulkAuthorizeResponse { subject { user_account { id: "user1" } } results { items { permission: "something.read" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } permission_denied_error { message: "Access Denied" } } items { permission: "somewhere.sleep" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } permission_denied_error { message: "Access Denied" } } items { permission: "something.list" r...(truncated) } 2025-06-25T14:28:56.962582Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:1323: Ticket **** (8E120919) permission something.read access denied for subject "user1@as" 2025-06-25T14:28:56.962617Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:1323: Ticket **** (8E120919) permission somewhere.sleep access denied for subject "user1@as" 2025-06-25T14:28:56.962629Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:1323: Ticket **** (8E120919) permission something.list access denied for subject "user1@as" 2025-06-25T14:28:56.962657Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:1323: Ticket **** (8E120919) permission something.eat access denied for subject "user1@as" 2025-06-25T14:28:56.962677Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:997: Ticket **** (8E120919) asking for UserAccount(user1@as) 2025-06-25T14:28:56.962876Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [517000133688] Connect to grpc://localhost:13983 2025-06-25T14:28:56.963655Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000133688] Request GetUserAccountRequest { user_account_id: "user1" } 2025-06-25T14:28:56.974755Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [517000133688] Response UserAccount { yandex_passport_user_account { login: "login1" } } 2025-06-25T14:28:56.975066Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (8E120919) () has now valid token of login1@passport 2025-06-25T14:28:59.855389Z node 5 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[5:7519893910337586690:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:28:59.855485Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0018b9/r3tmp/tmp7tclti/pdisk_1.dat 2025-06-25T14:28:59.978424Z node 5 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 2644, node 5 2025-06-25T14:29:00.002468Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:29:00.002554Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:29:00.003745Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:29:00.035562Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:29:00.035585Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:29:00.035594Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:29:00.035728Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25024 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:29:00.349792Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:29:00.356961Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-06-25T14:29:00.356993Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-25T14:29:00.357002Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-25T14:29:00.357097Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:508: Ticket **** (8E120919) asking for AccessServiceBulkAuthorization( something.read something.write) 2025-06-25T14:29:00.357140Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [517000045308] Connect to grpc://localhost:4284 2025-06-25T14:29:00.358077Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000045308] Request BulkAuthorizeRequest { iam_token: "**** (8E120919)" actions { items { resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } permission: "something.read" } items { resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } permission: "something.write" } } result_filter: ALL_FAILED } 2025-06-25T14:29:00.377312Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [517000045308] Status 14 Service Unavailable 2025-06-25T14:29:00.377848Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:1139: Ticket **** (8E120919) permission something.read now has a retryable error "Service Unavailable" retryable: 1 2025-06-25T14:29:00.377869Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:1139: Ticket **** (8E120919) permission something.write now has a retryable error "Service Unavailable" retryable: 1 2025-06-25T14:29:00.377904Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket **** (8E120919) () has now retryable error message 'Service Unavailable' 2025-06-25T14:29:00.378027Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:508: Ticket **** (8E120919) asking for AccessServiceBulkAuthorization( something.read something.write) 2025-06-25T14:29:00.378367Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000045308] Request BulkAuthorizeRequest { iam_token: "**** (8E120919)" actions { items { resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } permission: "something.read" } items { resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } permission: "something.write" } } result_filter: ALL_FAILED } 2025-06-25T14:29:00.386663Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [517000045308] Status 1 CANCELLED 2025-06-25T14:29:00.387153Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:1139: Ticket **** (8E120919) permission something.read now has a retryable error "CANCELLED" retryable: 1 2025-06-25T14:29:00.387170Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:1139: Ticket **** (8E120919) permission something.write now has a retryable error "CANCELLED" retryable: 1 2025-06-25T14:29:00.387195Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket **** (8E120919) () has now retryable error message 'CANCELLED' >> Cdc::HugeKey[YdsRunner] [GOOD] >> Cdc::HugeKey[TopicRunner] >> KqpScripting::ScanQueryTruncate [GOOD] >> KqpScripting::SyncExecuteYqlScriptSeveralQueries [GOOD] >> Cdc::NewImageLogDebezium [GOOD] >> Cdc::NaN[PqRunner] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/yql/unittest >> KqpScripting::NoAstSizeLimit [GOOD] Test command err: Trying to start YDB, gRPC: 11598, MsgBus: 3852 2025-06-25T14:28:55.278233Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519893892027824457:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:28:55.278387Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d2a/r3tmp/tmpjZ7xyw/pdisk_1.dat 2025-06-25T14:28:55.608450Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:28:55.608750Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519893892027824434:2080] 1750861735277501 != 1750861735277504 TServer::EnableGrpc on GrpcPort 11598, node 1 2025-06-25T14:28:55.667194Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:28:55.667219Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:28:55.667230Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:28:55.667342Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:28:55.708664Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:28:55.708811Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:28:55.710684Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:3852 TClient is connected to server localhost:3852 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:28:56.136004Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:28:56.164648Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:28:56.171663Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:28:56.286581Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... 2025-06-25T14:28:56.318349Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:56.468513Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:28:56.545178Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:28:58.044725Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519893904912727969:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:58.044833Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:58.352011Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:58.379412Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:58.410399Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:58.437314Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:58.483284Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:58.520818Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:58.553939Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:58.647547Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519893904912728633:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:58.647623Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:58.647740Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519893904912728638:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:58.651359Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:28:58.669642Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519893904912728640:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:28:58.745274Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519893904912728691:3419] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:28:59.774736Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750861739809, txId: 281474976715672] shutting down Trying to start YDB, gRPC: 61227, MsgBus: 22771 2025-06-25T14:29:00.441589Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519893913768372820:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:29:00.441864Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d2a/r3tmp/tmpEeViCT/pdisk_1.dat 2025-06-25T14:29:00.567057Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:29:00.572103Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519893913768372800:2080] 1750861740440095 != 1750861740440098 2025-06-25T14:29:00.587332Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:29:00.587431Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:29:00.588988Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 61227, node 2 2025-06-25T14:29:00.621088Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:29:00.621112Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:29:00.621121Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:29:00.621215Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:22771 TClient is connected to server localhost:22771 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:29:01.057444Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:29:01.452614Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:29:03.177451Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519893926653275304:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:03.177530Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:03.188636Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:03.236575Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519893926653275422:2300], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:03.236643Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:03.265997Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519893926653275433:2305], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:03.266070Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:03.266158Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519893926653275438:2308], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:03.269706Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:29:03.280808Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519893926653275440:2309], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-25T14:29:03.369668Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519893926653275491:2395] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } >> TSchemeshardBackgroundCompactionTest::ShouldCompactServerless [GOOD] >> TSchemeshardBackgroundCompactionTest::ShouldNotCompactServerlessAfterDisable >> TTransferTests::Create_Disabled [GOOD] >> TTransferTests::CreateWithoutCredentials ------- [TM] {asan, default-linux-x86_64, release} ydb/core/security/ut/unittest >> TTicketParserTest::NebiusAuthorizationUnavailable [GOOD] Test command err: 2025-06-25T14:28:47.775217Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519893857655851911:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:28:47.775288Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0018dd/r3tmp/tmpMTqCCs/pdisk_1.dat 2025-06-25T14:28:48.068657Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:28:48.068757Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:28:48.085434Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:28:48.104150Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:28:48.104389Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519893857655851893:2080] 1750861727772227 != 1750861727772230 TServer::EnableGrpc on GrpcPort 2783, node 1 2025-06-25T14:28:48.148878Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:28:48.148908Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:28:48.148922Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:28:48.149034Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13608 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:28:48.436760Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:28:48.452367Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket B20143E8C5A87521E8FCB688AF71D581F4C55287F505DFB84F37170A54638CEA () has now valid token of C=RU,ST=MSK,L=MSK,O=YA,OU=UtTest,CN=localhost@cert 2025-06-25T14:28:51.010123Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519893877915584177:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:28:51.010196Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0018dd/r3tmp/tmpjdQmiI/pdisk_1.dat 2025-06-25T14:28:51.115127Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:28:51.116153Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519893877915584158:2080] 1750861731009552 != 1750861731009555 TServer::EnableGrpc on GrpcPort 1518, node 2 2025-06-25T14:28:51.147893Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:28:51.147968Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:28:51.149999Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:28:51.162154Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:28:51.162182Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:28:51.162189Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:28:51.162299Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4065 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:28:51.412936Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:28:51.420494Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1809: Ticket 7031560AC96A52E9CE06982D1CFD9724F6865F8BCCEB504A8184F7CC7941E02D () has now permanent error message 'Cannot create token from certificate. Client`s certificate and server`s certificate have different issuers' 2025-06-25T14:28:51.421084Z node 2 :TICKET_PARSER ERROR: ticket_parser_impl.h:963: Ticket 7031560AC96A52E9CE06982D1CFD9724F6865F8BCCEB504A8184F7CC7941E02D: Cannot create token from certificate. Client`s certificate and server`s certificate have different issuers 2025-06-25T14:28:54.208622Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519893888427708148:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:28:54.208687Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0018dd/r3tmp/tmpibEKkh/pdisk_1.dat 2025-06-25T14:28:54.357038Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:28:54.360420Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519893888427708128:2080] 1750861734208174 != 1750861734208177 2025-06-25T14:28:54.371546Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:28:54.371842Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:28:54.374230Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 25029, node 3 2025-06-25T14:28:54.419180Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:28:54.419203Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:28:54.419210Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:28:54.419350Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:7402 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-25T14:28:54.632266Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:28:54.639164Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1809: Ticket 51506AF128393A628B232212404906EAD60F5DA9F651C1E7BC9DE36A7F598043 () has now permanent error message 'Cannot create token from certificate. Client certificate failed verification' 2025-06-25T14:28:54.639633Z node 3 :TICKET_PARSER ERROR: ticket_parser_impl.h:963: Ticket 51506AF128393A628B232212404906EAD60F5DA9F651C1E7BC9DE36A7F598043: Cannot create token from certificate. Client certificate failed verification 2025-06-25T14:28:57.364953Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519893904210321660:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:28:57.365058Z node 4 ... :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-06-25T14:28:57.858829Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-25T14:28:57.858837Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-25T14:28:57.859247Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:526: Ticket **** (8E120919) asking for AccessServiceAuthorization( something.read something.write) 2025-06-25T14:28:57.859310Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [5170000e4a88] Connect to grpc://localhost:10791 2025-06-25T14:28:57.862742Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [5170000e4a88] Request AuthorizeRequest { checks { key: 0 value { permission { name: "something.read" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "**** (8E120919)" } } checks { key: 1 value { permission { name: "something.write" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "**** (8E120919)" } } } NebiusAccessService::Authorize request checks { key: 0 value { permission { name: "something.read" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "user1" } } checks { key: 1 value { permission { name: "something.write" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "user1" } } NebiusAccessService::Authorize response results { key: 0 value { account { user_account { id: "user1" } } } } results { key: 1 value { resultCode: PERMISSION_DENIED } } 0: "OK" 2025-06-25T14:28:57.871771Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [5170000e4a88] Response AuthorizeResponse { results { key: 0 value { account { user_account { id: "user1" } } } } results { key: 1 value { resultCode: PERMISSION_DENIED } } } 2025-06-25T14:28:57.871985Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:1219: Ticket **** (8E120919) permission something.write access denied for subject "user1@as" 2025-06-25T14:28:57.872131Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (8E120919) () has now valid token of user1@as 2025-06-25T14:28:57.872766Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-06-25T14:28:57.872785Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-25T14:28:57.872795Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-25T14:28:57.872860Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:526: Ticket **** (8E120919) asking for AccessServiceAuthorization( something.read something.write) 2025-06-25T14:28:57.873079Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [5170000e4a88] Request AuthorizeRequest { checks { key: 0 value { permission { name: "something.read" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "**** (8E120919)" } } checks { key: 1 value { permission { name: "something.write" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "**** (8E120919)" } } } NebiusAccessService::Authorize request checks { key: 0 value { permission { name: "something.read" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "user1" } } checks { key: 1 value { permission { name: "something.write" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "user1" } } NebiusAccessService::Authorize response results { key: 0 value { account { user_account { id: "user1" } } } } results { key: 1 value { resultCode: PERMISSION_DENIED } } 0: "OK" 2025-06-25T14:28:57.875117Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [5170000e4a88] Response AuthorizeResponse { results { key: 0 value { account { user_account { id: "user1" } } } } results { key: 1 value { resultCode: PERMISSION_DENIED } } } 2025-06-25T14:28:57.875230Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:1219: Ticket **** (8E120919) permission something.write access denied for subject "user1@as" 2025-06-25T14:28:57.875281Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1809: Ticket **** (8E120919) () has now permanent error message 'something.write for aaaa1234 bbbb4554 - PERMISSION_DENIED' 2025-06-25T14:29:00.791521Z node 5 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[5:7519893914569097212:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:29:00.791571Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0018dd/r3tmp/tmp2YbB73/pdisk_1.dat 2025-06-25T14:29:00.885859Z node 5 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:29:00.886864Z node 5 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [5:7519893914569097192:2080] 1750861740790892 != 1750861740790895 TServer::EnableGrpc on GrpcPort 12847, node 5 2025-06-25T14:29:00.927728Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:29:00.927830Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:29:00.929443Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:29:00.938346Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:29:00.938370Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:29:00.938380Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:29:00.938488Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10011 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:29:01.180148Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:29:01.187499Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-06-25T14:29:01.187523Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-25T14:29:01.187529Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-25T14:29:01.187578Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:526: Ticket **** (8E120919) asking for AccessServiceAuthorization( something.read something.write) 2025-06-25T14:29:01.187615Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [517000011208] Connect to grpc://localhost:1380 2025-06-25T14:29:01.188405Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000011208] Request AuthorizeRequest { checks { key: 0 value { permission { name: "something.read" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "**** (8E120919)" } } checks { key: 1 value { permission { name: "something.write" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "**** (8E120919)" } } } NebiusAccessService::Authorize request checks { key: 0 value { permission { name: "something.read" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "user1" } } checks { key: 1 value { permission { name: "something.write" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "user1" } } NebiusAccessService::Authorize response results { key: 0 value { account { user_account { id: "user1" } } } } 14: "Service Unavailable" 2025-06-25T14:29:01.198595Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [517000011208] Status 14 Service Unavailable 2025-06-25T14:29:01.198732Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:1139: Ticket **** (8E120919) permission something.read now has a retryable error "Service Unavailable" retryable: 1 2025-06-25T14:29:01.198754Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:1139: Ticket **** (8E120919) permission something.write now has a retryable error "Service Unavailable" retryable: 1 2025-06-25T14:29:01.198787Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket **** (8E120919) () has now retryable error message 'Service Unavailable' 2025-06-25T14:29:01.198855Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:526: Ticket **** (8E120919) asking for AccessServiceAuthorization( something.read something.write) 2025-06-25T14:29:01.199128Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000011208] Request AuthorizeRequest { checks { key: 0 value { permission { name: "something.read" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "**** (8E120919)" } } checks { key: 1 value { permission { name: "something.write" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "**** (8E120919)" } } } 2025-06-25T14:29:01.203108Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [517000011208] Status 1 CANCELLED 2025-06-25T14:29:01.203232Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:1139: Ticket **** (8E120919) permission something.read now has a retryable error "CANCELLED" retryable: 1 2025-06-25T14:29:01.203256Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:1139: Ticket **** (8E120919) permission something.write now has a retryable error "CANCELLED" retryable: 1 2025-06-25T14:29:01.203276Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket **** (8E120919) () has now retryable error message 'CANCELLED' >> KqpYql::TableRange >> KqpPragma::OrderedColumns >> TTransferTests::Create [GOOD] >> TTransferTests::CreateSequential >> KqpYql::EvaluateExprYsonAndType [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/yql/unittest >> KqpScripting::ScanQueryTruncate [GOOD] Test command err: Trying to start YDB, gRPC: 27550, MsgBus: 26273 2025-06-25T14:28:55.298905Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519893894085570102:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:28:55.298961Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d24/r3tmp/tmpkBOKe8/pdisk_1.dat 2025-06-25T14:28:55.655499Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519893894085570082:2080] 1750861735298252 != 1750861735298255 2025-06-25T14:28:55.662099Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 27550, node 1 2025-06-25T14:28:55.755481Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:28:55.755504Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:28:55.755525Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:28:55.755682Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:28:55.772753Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:28:55.772876Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:28:55.774628Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:26273 TClient is connected to server localhost:26273 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:28:56.259441Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:28:56.279594Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:28:56.324470Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:28:56.379753Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:28:56.515844Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:28:56.577696Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:28:57.765095Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519893902675506315:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:57.765192Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:58.059703Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:58.083381Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:58.109981Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:58.132747Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:58.159642Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:58.189600Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:58.225695Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:58.291707Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519893906970474265:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:58.291773Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:58.291844Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519893906970474270:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:58.295017Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:28:58.311003Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519893906970474272:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:28:58.413905Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519893906970474325:3421] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:28:59.515324Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519893911265441906:2481], status: PRECONDITION_FAILED, issues:
: Error: Default error
:1:746: Error: Scan query should have a single result set., code: 2029
: Error: Default error
:1:746: Error: Scan query should have a single result set., code: 2029 2025-06-25T14:28:59.515518Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=NTYyMjQ0MjMtNDQ5N2RmNmQtNzIxYzQ3YzEtNjBkYzkzYjE=, ActorId: [1:7519893911265441904:2480], ActorState: ExecuteState, TraceId: 01jykqvxf5a3npqttw5cmvc2ks, ReplyQueryCompileError, status PRECONDITION_FAILED remove tx with tx_id:
: Error: Execution, code: 1060
: Error: Default error
:1:746: Error: Scan query should have a single result set., code: 2029 2025-06-25T14:28:59.637720Z node 1 :KQP_COMP ... modifications., code: 2029 2025-06-25T14:28:59.639750Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=YjkzNWNhYS0yMjk4MjAxYS1lYjc2Yjc1LWVjNDE3ZmYy, ActorId: [1:7519893911265441933:2493], ActorState: ExecuteState, TraceId: 01jykqvxkdah4nxs0bpt967e8j, ReplyQueryCompileError, status PRECONDITION_FAILED remove tx with tx_id:
: Error: Execution, code: 1060
: Error: Default error
:1:375: Error: Scan query cannot have data modifications., code: 2029 Trying to start YDB, gRPC: 19918, MsgBus: 7870 2025-06-25T14:29:00.235384Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519893914435170714:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:29:00.235456Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d24/r3tmp/tmpO2cvRK/pdisk_1.dat 2025-06-25T14:29:00.340601Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:29:00.340972Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519893914435170692:2080] 1750861740235030 != 1750861740235033 TServer::EnableGrpc on GrpcPort 19918, node 2 2025-06-25T14:29:00.376253Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:29:00.376322Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:29:00.378308Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:29:00.400842Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:29:00.400865Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:29:00.400870Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:29:00.400981Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:7870 TClient is connected to server localhost:7870 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:29:00.787662Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:29:00.796985Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:00.845221Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:00.971029Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:01.045743Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:01.244981Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:29:02.845536Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519893923025106924:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:02.845644Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:02.874706Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:02.904097Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:02.931246Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:02.958545Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:02.984914Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:03.012365Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:03.047333Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:03.114831Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519893927320074875:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:03.114906Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:03.115134Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519893927320074880:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:03.117874Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:29:03.125595Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519893927320074882:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:29:03.202827Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519893927320074933:3416] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:29:04.260860Z node 2 :TX_DATASHARD ERROR: datashard__kqp_scan.cpp:163: Undelivered event: 65542, at: [2:7519893931615042595:2057], tablet: [2:7519893914435171483:2285], scanId: 4, table: /Root/EightShard 2025-06-25T14:29:04.269839Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750861744289, txId: 281474976715672] shutting down ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/yql/unittest >> KqpScripting::SyncExecuteYqlScriptSeveralQueries [GOOD] Test command err: Trying to start YDB, gRPC: 15733, MsgBus: 64313 2025-06-25T14:28:54.084088Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519893891006260244:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:28:54.084148Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d32/r3tmp/tmp7WTgyx/pdisk_1.dat 2025-06-25T14:28:54.413011Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 15733, node 1 2025-06-25T14:28:54.477283Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:28:54.477474Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:28:54.478763Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:28:54.478783Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:28:54.478809Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:28:54.478903Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:28:54.479000Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:64313 TClient is connected to server localhost:64313 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:28:54.992144Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:28:55.054985Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:28:55.105696Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:28:55.212450Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:28:55.355653Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:55.427507Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:28:57.082978Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519893903891163740:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:57.083084Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:57.416076Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:57.438646Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:57.461309Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:57.483056Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:57.507722Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:57.552293Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:57.575785Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:57.620092Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519893903891164395:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:57.620162Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:57.620169Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519893903891164400:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:57.623400Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:28:57.631583Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519893903891164402:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:28:57.723962Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519893903891164453:3421] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:28:59.115823Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519893891006260244:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:28:59.115915Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:28:59.166141Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750861739193, txId: 281474976715672] shutting down Trying to start YDB, gRPC: 29183, MsgBus: 10819 2025-06-25T14:28:59.887274Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519893911966711910:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:28:59.887325Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d32/r3tmp/tmp3W1MDP/pdisk_1.dat 2025-06-25T14:28:59.978160Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:28:59.987155Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519893911966711891:2080] 1750861739886865 != 1750861739886868 TServer::EnableGrpc on GrpcPort 29183, node 2 2025-06-25T14:29:00.022826Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:29:00.022897Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:29:00.024327Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:29:00.024697Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:29:00.024706Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:29:00.024713Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:29:00.024827Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10819 TClient is connected to server localhost:10819 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:29:00.436186Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:29:00.451153Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:29:00.520408Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:00.665304Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:00.739367Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:00.910131Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:29:02.942385Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519893924851615406:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:02.942466Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:03.021613Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:03.049808Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:03.076071Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:03.099675Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:03.124750Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:03.154696Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:03.188814Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:03.250985Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519893929146583356:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:03.251026Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519893929146583361:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:03.251085Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:03.254431Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:29:03.265032Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519893929146583363:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:29:03.326963Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519893929146583414:3414] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } >> TSchemeshardBackgroundCleaningTest::SchemeshardBackgroundCleaningTestReboot [GOOD] >> TSchemeshardBackgroundCleaningTest::SchemeshardBackgroundCleaningTestSimpleCleanIndex >> KqpScripting::StreamExecuteYqlScriptScan >> TTicketParserTest::TicketFromCertificateWithValidationCheckIssuerBad [GOOD] >> KqpPragma::Auth >> KqpYql::UuidPrimaryKeyBulkUpsert [GOOD] >> KqpYql::InsertIgnore >> TTicketParserTest::BulkAuthorization [GOOD] >> TTicketParserTest::AuthorizationWithUserAccount2 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/yql/unittest >> KqpYql::EvaluateExprYsonAndType [GOOD] Test command err: Trying to start YDB, gRPC: 28898, MsgBus: 20561 2025-06-25T14:28:55.948740Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519893892939128816:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:28:55.948885Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d1b/r3tmp/tmpgiuj2m/pdisk_1.dat 2025-06-25T14:28:56.244908Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519893892939128793:2080] 1750861735941831 != 1750861735941834 2025-06-25T14:28:56.246105Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 28898, node 1 2025-06-25T14:28:56.318755Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:28:56.318786Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:28:56.318797Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:28:56.318944Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:28:56.325979Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:28:56.326120Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:28:56.327698Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:20561 TClient is connected to server localhost:20561 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:28:56.870148Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:28:56.891557Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:28:56.960776Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:28:57.034398Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:28:57.165667Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:28:57.239949Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:28:58.760416Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519893905824032325:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:58.760527Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:59.152808Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:59.185724Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:59.225220Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:59.248182Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:59.275257Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:59.302143Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:59.382279Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:59.443758Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519893910119000288:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:59.443815Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:59.443950Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519893910119000293:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:59.447635Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:28:59.456696Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519893910119000295:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:28:59.540336Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519893910119000346:3426] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 25848, MsgBus: 9122 2025-06-25T14:29:01.129977Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519893917680859452:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:29:01.130046Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d1b/r3tmp/tmphmOzZP/pdisk_1.dat 2025-06-25T14:29:01.225281Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:29:01.226599Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519893917680859429:2080] 1750861741129596 != 1750861741129599 TServer::EnableGrpc on GrpcPort 25848, node 2 2025-06-25T14:29:01.265399Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:29:01.265470Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:29:01.267678Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:29:01.290412Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:29:01.290447Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:29:01.290455Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:29:01.290577Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9122 TClient is connected to server localhost:9122 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:29:01.677953Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:29:01.696072Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:01.791901Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:01.926433Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:01.999106Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:02.150405Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:29:04.021292Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519893930565762965:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:04.021384Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:04.082252Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:04.115703Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:04.152370Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:04.180704Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:04.205937Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:04.234117Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:04.265006Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:04.326235Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519893930565763619:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:04.326316Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:04.326376Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519893930565763624:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:04.329429Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:29:04.337886Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519893930565763626:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:29:04.403709Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519893930565763677:3415] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } >> TTicketParserTest::NebiusAuthorizationRetryErrorImmediately [GOOD] >> TTicketParserTest::NebiusAuthorization >> KqpScripting::EndOfQueryCommit >> TTransferTests::CreateWithoutCredentials [GOOD] >> TTransferTests::CreateWrongConfig >> TTicketParserTest::AuthorizationRetryErrorImmediately [GOOD] >> TTicketParserTest::AuthorizationWithRequiredPermissions ------- [TM] {asan, default-linux-x86_64, release} ydb/core/security/ut/unittest >> TTicketParserTest::TicketFromCertificateWithValidationCheckIssuerBad [GOOD] Test command err: 2025-06-25T14:28:48.643609Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519893863433684788:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:28:48.643686Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00188a/r3tmp/tmpvHoOQs/pdisk_1.dat 2025-06-25T14:28:48.902836Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519893863433684768:2080] 1750861728642696 != 1750861728642699 2025-06-25T14:28:48.909966Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:28:48.913124Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:28:48.913262Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:28:48.916106Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 6991, node 1 2025-06-25T14:28:48.974170Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:28:48.974199Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:28:48.974210Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:28:48.974329Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:32016 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:28:49.212330Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:28:49.231729Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket 7781B73CD7F4AA2C56CDD36E6F356B667BBA2592314C5D5459E1B8CEF0EF8FE9 () has now valid token of C=RU,ST=MSK,L=MSK,O=YA,OU=UtTest,CN=localhost@cert 2025-06-25T14:28:52.036479Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519893881614974779:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:28:52.036544Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00188a/r3tmp/tmpU60a4a/pdisk_1.dat 2025-06-25T14:28:52.139350Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:28:52.141084Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519893881614974760:2080] 1750861732035821 != 1750861732035824 TServer::EnableGrpc on GrpcPort 7273, node 2 2025-06-25T14:28:52.176629Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:28:52.176711Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:28:52.178483Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:28:52.191755Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:28:52.191781Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:28:52.191791Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:28:52.191916Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10401 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:28:52.473994Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:28:52.481319Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:28:52.485120Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket 2B676D59DDB4FAA12142028A7DD38451C67E0EA7AAC6889539DF9A865BEE4031 () has now valid token of C=RU,ST=MSK,L=MSK,O=YA,OU=UtTest,CN=localhost@cert 2025-06-25T14:28:55.415446Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519893893724404234:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:28:55.415953Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00188a/r3tmp/tmpZCY1cs/pdisk_1.dat 2025-06-25T14:28:55.548566Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:28:55.557847Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519893893724404207:2080] 1750861735413350 != 1750861735413353 TServer::EnableGrpc on GrpcPort 13702, node 3 2025-06-25T14:28:55.587716Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:28:55.587821Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:28:55.589552Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:28:55.596095Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:28:55.596122Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:28:55.596132Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:28:55.596268Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:18689 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:28:55.822106Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:28:55.828336Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1809: Ticket 41EC225C9DE507451315603305F6353D63D58822298703DCC8E2896A6855CC72 () has now permanent error message 'Cannot create token from certificate. Client certificate failed verification' 2025-06-25T14:28:55.828968Z node 3 :TICKET_PARSER ERROR: ticket_parser_impl.h:963: Ticket 41EC225C9DE507451315603305F6353D63D58822298703DCC8E2896A6855CC72: Cannot create token from certificate. Client certificate failed verification 2025-06-25T14:28:58.728463Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519893905106089120:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:28:58.728597Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00188a/r3tmp/tmp1ctq35/pdisk_1.dat 2025-06-25T14:28:58.880910Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:28:58.895330Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:28:58.895430Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:28:58.898961Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 21021, node 4 2025-06-25T14:28:58.945882Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:28:58.945912Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:28:58.945922Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:28:58.946099Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:12261 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:28:59.212185Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:28:59.224421Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket 51C1654299E352F7E354C1091B57FC667159291A1FB4A63D1684880ADAA045D4 () has now valid token of C=RU,ST=MSK,L=MSK,O=YA,OU=UtTest,CN=localhost@cert 2025-06-25T14:29:02.827422Z node 5 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[5:7519893925884038782:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:29:02.827463Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00188a/r3tmp/tmp2gx5aq/pdisk_1.dat 2025-06-25T14:29:02.930714Z node 5 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:29:02.932246Z node 5 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [5:7519893925884038761:2080] 1750861742826701 != 1750861742826704 2025-06-25T14:29:02.972919Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:29:02.973009Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 17460, node 5 2025-06-25T14:29:02.974433Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:29:03.003212Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:29:03.003238Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:29:03.003248Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:29:03.003412Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:15036 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:29:03.267918Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:29:03.276328Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1809: Ticket 24630CD21645F683243E308E158C5154A5F6B460CF32D55684871B7AEDB11E7D () has now permanent error message 'Cannot create token from certificate. Client certificate failed verification' 2025-06-25T14:29:03.276851Z node 5 :TICKET_PARSER ERROR: ticket_parser_impl.h:963: Ticket 24630CD21645F683243E308E158C5154A5F6B460CF32D55684871B7AEDB11E7D: Cannot create token from certificate. Client certificate failed verification >> Cdc::Write[PqRunner] [GOOD] >> Cdc::Write[YdsRunner] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/yql/unittest >> KqpYql::UuidPrimaryKeyBulkUpsert [GOOD] Test command err: Trying to start YDB, gRPC: 3378, MsgBus: 7386 2025-06-25T14:29:02.780397Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519893925531080012:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:29:02.780456Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000cf6/r3tmp/tmpyAGjx2/pdisk_1.dat 2025-06-25T14:29:03.075441Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 3378, node 1 2025-06-25T14:29:03.150858Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:29:03.150884Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:29:03.150895Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:29:03.151029Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:29:03.165553Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:29:03.165679Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:29:03.167636Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:7386 TClient is connected to server localhost:7386 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:29:03.774230Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:29:03.787520Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:29:05.727001Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519893938415982509:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:05.727080Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:05.995999Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:06.184672Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519893942710949918:2302], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:06.184742Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:06.185020Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519893942710949923:2305], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:06.191771Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:29:06.223008Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519893942710949925:2306], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-25T14:29:06.285658Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519893942710949976:2400] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } >> TExternalTableTest::SchemeErrors >> KqpYql::UuidPrimaryKeyDisabled >> KqpYql::Discard [GOOD] >> KqpYql::TableNameConflict [GOOD] >> TTransferTests::CreateSequential [GOOD] >> TTransferTests::CreateInParallel >> THiveTest::TestHiveBalancerOneTabletHighUsage [GOOD] >> THiveTest::TestHiveBalancerWithSpareNodes |75.9%| [TA] $(B)/ydb/services/persqueue_v1/ut/new_schemecache_ut/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpYql::SelectNoAsciiValue [GOOD] >> TExternalTableTest::ParallelCreateExternalTable >> TExternalTableTest::SchemeErrors [GOOD] >> TTransferTests::CreateWrongConfig [GOOD] >> TTransferTests::CreateWrongBatchSize >> KqpScripting::StreamExecuteYqlScriptScanOperationTmeoutBruteForce [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/yql/unittest >> KqpYql::Discard [GOOD] Test command err: Trying to start YDB, gRPC: 7750, MsgBus: 12922 2025-06-25T14:28:57.920787Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519893900524161064:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:28:57.920946Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d03/r3tmp/tmpsaxW3r/pdisk_1.dat 2025-06-25T14:28:58.258845Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519893900524161046:2080] 1750861737919898 != 1750861737919901 2025-06-25T14:28:58.272376Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 7750, node 1 2025-06-25T14:28:58.309231Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:28:58.309263Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:28:58.309272Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:28:58.309392Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:28:58.310658Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:28:58.310944Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:28:58.312878Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:12922 TClient is connected to server localhost:12922 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:28:58.806911Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-25T14:28:58.842106Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:58.938112Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:28:58.999287Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:28:59.158842Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:28:59.240168Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:00.729600Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519893913409064592:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:00.729713Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:01.017058Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:01.045297Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:01.071000Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:01.098243Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:01.123889Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:01.155855Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:01.183161Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:01.232416Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519893917704032545:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:01.232472Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:01.232669Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519893917704032550:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:01.235626Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:29:01.245473Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519893917704032552:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:29:01.347152Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519893917704032603:3420] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 32373, MsgBus: 12099 2025-06-25T14:29:03.118819Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519893926837394233:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:29:03.118925Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d03/r3tmp/tmpsp9Yzp/pdisk_1.dat 2025-06-25T14:29:03.248998Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:29:03.249282Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519893926837394202:2080] 1750861743118001 != 1750861743118004 2025-06-25T14:29:03.264243Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:29:03.264345Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 32373, node 2 2025-06-25T14:29:03.265755Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:29:03.312862Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:29:03.312896Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:29:03.312911Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:29:03.313018Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:12099 TClient is connected to server localhost:12099 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-25T14:29:03.782843Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:29:03.790500Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:29:03.799565Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:03.851926Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:03.993371Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:04.071271Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:04.196633Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:29:06.094269Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519893939722297731:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:06.094369Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:06.149021Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:06.181266Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:06.251305Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:06.319533Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:06.347284Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:06.420676Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:06.473353Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:06.530050Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519893939722298395:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:06.530122Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:06.530149Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519893939722298400:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:06.533157Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:29:06.542009Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519893939722298402:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:29:06.612646Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519893939722298453:3420] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:29:07.613004Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7519893944017266025:2477], status: GENERIC_ERROR, issues:
: Error: Execution, code: 1060
:2:13: Error: DISCARD not supported in YDB queries, code: 2008 2025-06-25T14:29:07.615648Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=2&id=MjdlZmRjMzEtYzc0MGEyMmQtZDllODhkMmEtODY0ZmYwYzU=, ActorId: [2:7519893944017266018:2473], ActorState: ExecuteState, TraceId: 01jykqw5a61fae0n5hm9jeazcg, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: >> TExternalTableTest::DropTableTwice ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_external_table/unittest >> TExternalTableTest::SchemeErrors [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046678944 is [1:129:2153] sender: [1:131:2058] recipient: [1:113:2143] 2025-06-25T14:29:08.511263Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:29:08.511367Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:29:08.511414Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:29:08.511462Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:29:08.511525Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:29:08.511568Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:29:08.511631Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:29:08.511732Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:29:08.512543Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:29:08.512914Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:29:08.596911Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7732: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-25T14:29:08.596991Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:29:08.597723Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:29:08.615692Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:29:08.616121Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:29:08.616329Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:29:08.628185Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:29:08.628449Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:29:08.629186Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:29:08.629500Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:29:08.632435Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:29:08.632633Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:29:08.633910Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:29:08.633986Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:29:08.634042Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:29:08.634088Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:29:08.634130Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:29:08.634337Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:29:08.641529Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:129:2153] sender: [1:243:2058] recipient: [1:15:2062] 2025-06-25T14:29:08.783762Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:29:08.784037Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:08.784265Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:29:08.784324Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:29:08.784559Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:29:08.784663Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:29:08.787139Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:29:08.787343Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:29:08.787527Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:08.787584Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:29:08.787639Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:29:08.787678Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:29:08.789810Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:08.789873Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:29:08.789925Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:29:08.791899Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:08.791951Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:08.791994Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:29:08.792076Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:29:08.795859Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:29:08.797758Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:29:08.797945Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:29:08.798903Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:29:08.799036Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 138 RawX2: 4294969455 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:29:08.799103Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:29:08.799430Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:29:08.799516Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:29:08.799682Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:29:08.799759Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:29: ... 025-06-25T14:29:08.886596Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_external_table.cpp:300: [72057594046678944] TCreateExternalTable Propose: opId# 126:0, path# /MyRoot/DirA/Table2 2025-06-25T14:29:08.886794Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 126:1, propose status:StatusSchemeError, reason: Type 'BlaBlaType' specified for column 'RowId' is not supported by storage, at schemeshard: 72057594046678944 2025-06-25T14:29:08.888801Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 126, response: Status: StatusSchemeError Reason: "Type \'BlaBlaType\' specified for column \'RowId\' is not supported by storage" TxId: 126 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:29:08.889011Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 126, database: /MyRoot, subject: , status: StatusSchemeError, reason: Type 'BlaBlaType' specified for column 'RowId' is not supported by storage, operation: CREATE EXTERNAL TABLE, path: /MyRoot/DirA/Table2 TestModificationResult got TxId: 126, wait until txId: 126 TestModificationResults wait txId: 127 2025-06-25T14:29:08.891559Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/DirA" OperationType: ESchemeOpCreateExternalTable CreateExternalTable { Name: "Table2" SourceType: "General" DataSourcePath: "/MyRoot/ExternalDataSource" Location: "/" Columns { Name: "" Type: "Uint64" } } } TxId: 127 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:29:08.892237Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_external_table.cpp:427: [72057594046678944] CreateNewExternalTable, opId 127:0, feature flag EnableReplaceIfExistsForExternalEntities 0, tx WorkingDir: "/MyRoot/DirA" OperationType: ESchemeOpCreateExternalTable FailOnExist: false CreateExternalTable { Name: "Table2" SourceType: "General" DataSourcePath: "/MyRoot/ExternalDataSource" Location: "/" Columns { Name: "" Type: "Uint64" } } 2025-06-25T14:29:08.892354Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_external_table.cpp:300: [72057594046678944] TCreateExternalTable Propose: opId# 127:0, path# /MyRoot/DirA/Table2 2025-06-25T14:29:08.892501Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 127:1, propose status:StatusSchemeError, reason: Columns cannot have an empty name, at schemeshard: 72057594046678944 2025-06-25T14:29:08.895153Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 127, response: Status: StatusSchemeError Reason: "Columns cannot have an empty name" TxId: 127 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:29:08.895437Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 127, database: /MyRoot, subject: , status: StatusSchemeError, reason: Columns cannot have an empty name, operation: CREATE EXTERNAL TABLE, path: /MyRoot/DirA/Table2 TestModificationResult got TxId: 127, wait until txId: 127 TestModificationResults wait txId: 128 2025-06-25T14:29:08.898364Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/DirA" OperationType: ESchemeOpCreateExternalTable CreateExternalTable { Name: "Table2" SourceType: "General" DataSourcePath: "/MyRoot/ExternalDataSource" Location: "/" Columns { Name: "RowId" TypeId: 27 } } } TxId: 128 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:29:08.898673Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_external_table.cpp:427: [72057594046678944] CreateNewExternalTable, opId 128:0, feature flag EnableReplaceIfExistsForExternalEntities 0, tx WorkingDir: "/MyRoot/DirA" OperationType: ESchemeOpCreateExternalTable FailOnExist: false CreateExternalTable { Name: "Table2" SourceType: "General" DataSourcePath: "/MyRoot/ExternalDataSource" Location: "/" Columns { Name: "RowId" TypeId: 27 } } 2025-06-25T14:29:08.898772Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_external_table.cpp:300: [72057594046678944] TCreateExternalTable Propose: opId# 128:0, path# /MyRoot/DirA/Table2 2025-06-25T14:29:08.898909Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 128:1, propose status:StatusSchemeError, reason: Cannot set TypeId for column 'RowId', use Type, at schemeshard: 72057594046678944 2025-06-25T14:29:08.902338Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 128, response: Status: StatusSchemeError Reason: "Cannot set TypeId for column \'RowId\', use Type" TxId: 128 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:29:08.902586Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 128, database: /MyRoot, subject: , status: StatusSchemeError, reason: Cannot set TypeId for column 'RowId', use Type, operation: CREATE EXTERNAL TABLE, path: /MyRoot/DirA/Table2 TestModificationResult got TxId: 128, wait until txId: 128 TestModificationResults wait txId: 129 2025-06-25T14:29:08.905257Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/DirA" OperationType: ESchemeOpCreateExternalTable CreateExternalTable { Name: "Table2" SourceType: "General" DataSourcePath: "/MyRoot/ExternalDataSource" Location: "/" Columns { Name: "RowId" } } } TxId: 129 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:29:08.905557Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_external_table.cpp:427: [72057594046678944] CreateNewExternalTable, opId 129:0, feature flag EnableReplaceIfExistsForExternalEntities 0, tx WorkingDir: "/MyRoot/DirA" OperationType: ESchemeOpCreateExternalTable FailOnExist: false CreateExternalTable { Name: "Table2" SourceType: "General" DataSourcePath: "/MyRoot/ExternalDataSource" Location: "/" Columns { Name: "RowId" } } 2025-06-25T14:29:08.905637Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_external_table.cpp:300: [72057594046678944] TCreateExternalTable Propose: opId# 129:0, path# /MyRoot/DirA/Table2 2025-06-25T14:29:08.905780Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 129:1, propose status:StatusSchemeError, reason: Missing Type for column 'RowId', at schemeshard: 72057594046678944 2025-06-25T14:29:08.908127Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 129, response: Status: StatusSchemeError Reason: "Missing Type for column \'RowId\'" TxId: 129 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:29:08.908385Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 129, database: /MyRoot, subject: , status: StatusSchemeError, reason: Missing Type for column 'RowId', operation: CREATE EXTERNAL TABLE, path: /MyRoot/DirA/Table2 TestModificationResult got TxId: 129, wait until txId: 129 TestModificationResults wait txId: 130 2025-06-25T14:29:08.910611Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/DirA" OperationType: ESchemeOpCreateExternalTable CreateExternalTable { Name: "Table2" SourceType: "General" DataSourcePath: "/MyRoot/ExternalDataSource" Location: "/" Columns { Name: "RowId" Type: "Uint64" Id: 2 } Columns { Name: "RowId2" Type: "Uint64" Id: 2 } } } TxId: 130 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:29:08.910959Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_external_table.cpp:427: [72057594046678944] CreateNewExternalTable, opId 130:0, feature flag EnableReplaceIfExistsForExternalEntities 0, tx WorkingDir: "/MyRoot/DirA" OperationType: ESchemeOpCreateExternalTable FailOnExist: false CreateExternalTable { Name: "Table2" SourceType: "General" DataSourcePath: "/MyRoot/ExternalDataSource" Location: "/" Columns { Name: "RowId" Type: "Uint64" Id: 2 } Columns { Name: "RowId2" Type: "Uint64" Id: 2 } } 2025-06-25T14:29:08.911042Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_external_table.cpp:300: [72057594046678944] TCreateExternalTable Propose: opId# 130:0, path# /MyRoot/DirA/Table2 2025-06-25T14:29:08.911212Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 130:1, propose status:StatusSchemeError, reason: Duplicate column id: 2, at schemeshard: 72057594046678944 2025-06-25T14:29:08.913805Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 130, response: Status: StatusSchemeError Reason: "Duplicate column id: 2" TxId: 130 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:29:08.913982Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 130, database: /MyRoot, subject: , status: StatusSchemeError, reason: Duplicate column id: 2, operation: CREATE EXTERNAL TABLE, path: /MyRoot/DirA/Table2 TestModificationResult got TxId: 130, wait until txId: 130 TestModificationResults wait txId: 131 2025-06-25T14:29:08.916371Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/DirA" OperationType: ESchemeOpCreateExternalTable CreateExternalTable { Name: "Table2" SourceType: "General" DataSourcePath: "/MyRoot/ExternalDataSource1" Location: "/" Columns { Name: "RowId" Type: "Uint64" } Columns { Name: "Value" Type: "Utf8" } } } TxId: 131 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:29:08.916650Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_external_table.cpp:427: [72057594046678944] CreateNewExternalTable, opId 131:0, feature flag EnableReplaceIfExistsForExternalEntities 0, tx WorkingDir: "/MyRoot/DirA" OperationType: ESchemeOpCreateExternalTable FailOnExist: false CreateExternalTable { Name: "Table2" SourceType: "General" DataSourcePath: "/MyRoot/ExternalDataSource1" Location: "/" Columns { Name: "RowId" Type: "Uint64" } Columns { Name: "Value" Type: "Utf8" } } 2025-06-25T14:29:08.916718Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_external_table.cpp:300: [72057594046678944] TCreateExternalTable Propose: opId# 131:0, path# /MyRoot/DirA/Table2 2025-06-25T14:29:08.916852Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 131:1, propose status:StatusPathDoesNotExist, reason: Check failed: path: '/MyRoot/ExternalDataSource1', error: path hasn't been resolved, nearest resolved path: '/MyRoot' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), at schemeshard: 72057594046678944 2025-06-25T14:29:08.918775Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 131, response: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/ExternalDataSource1\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" TxId: 131 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:29:08.919016Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 131, database: /MyRoot, subject: , status: StatusPathDoesNotExist, reason: Check failed: path: '/MyRoot/ExternalDataSource1', error: path hasn't been resolved, nearest resolved path: '/MyRoot' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), operation: CREATE EXTERNAL TABLE, path: /MyRoot/DirA/Table2 TestModificationResult got TxId: 131, wait until txId: 131 >> TExternalTableTest::ReadOnlyMode ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/yql/unittest >> KqpYql::TableNameConflict [GOOD] Test command err: Trying to start YDB, gRPC: 24361, MsgBus: 16581 2025-06-25T14:28:57.886836Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519893900865797871:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:28:57.887119Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d0d/r3tmp/tmpH07JVy/pdisk_1.dat 2025-06-25T14:28:58.203851Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519893900865797841:2080] 1750861737886102 != 1750861737886105 2025-06-25T14:28:58.211407Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 24361, node 1 2025-06-25T14:28:58.266691Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:28:58.266844Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:28:58.268984Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:28:58.284889Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:28:58.284920Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:28:58.284928Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:28:58.285051Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16581 TClient is connected to server localhost:16581 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:28:58.861311Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:28:58.893069Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:28:58.911085Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:28:59.050620Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:28:59.199195Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:28:59.274706Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:00.654179Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519893913750701373:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:00.654306Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:01.039125Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:01.103494Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:01.133121Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:01.164427Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:01.198687Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:01.231782Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:01.297879Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:01.374764Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519893918045669337:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:01.374858Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:01.374888Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519893918045669342:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:01.377948Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:29:01.386287Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519893918045669344:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:29:01.472344Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519893918045669395:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 }
: Error: Table intent determination, code: 1040
:3:27: Error: CONCAT is not supported on Kikimr clusters. Trying to start YDB, gRPC: 17513, MsgBus: 6308 2025-06-25T14:29:03.307527Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519893926575776798:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:29:03.307676Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d0d/r3tmp/tmpAzPVVG/pdisk_1.dat 2025-06-25T14:29:03.429216Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:29:03.430872Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519893926575776775:2080] 1750861743306659 != 1750861743306662 TServer::EnableGrpc on GrpcPort 17513, node 2 2025-06-25T14:29:03.454999Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:29:03.455080Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:29:03.456726Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:29:03.518888Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:29:03.518918Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:29:03.518926Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:29:03.519061Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6308 TClient is connected to server localhost:6308 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:29:03.981748Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:29:03.995497Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:04.075114Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:29:04.215013Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:04.280558Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:04.383258Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:29:06.313342Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519893939460680281:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:06.313425Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:06.376518Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:06.411880Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:06.438052Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:06.476907Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:06.510810Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:06.540106Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:06.632406Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:06.721851Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519893939460680945:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:06.721915Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:06.721998Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519893939460680950:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:06.724817Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:29:06.735118Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519893939460680952:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:29:06.793672Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519893939460681003:3418] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 }
: Error: Type annotation, code: 1030
:12:30: Error: At function: KiCreateTable!
:12:30: Error: Table name conflict: db.[/Root/Test] is used to reference multiple tables. ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/yql/unittest >> KqpYql::SelectNoAsciiValue [GOOD] Test command err: Trying to start YDB, gRPC: 62871, MsgBus: 23600 2025-06-25T14:28:57.832986Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519893900878788513:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:28:57.833087Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d13/r3tmp/tmplx0VEM/pdisk_1.dat 2025-06-25T14:28:58.135126Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:28:58.135496Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519893900878788488:2080] 1750861737832079 != 1750861737832082 TServer::EnableGrpc on GrpcPort 62871, node 1 2025-06-25T14:28:58.185643Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:28:58.185663Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:28:58.185671Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:28:58.185812Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:28:58.188956Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:28:58.189102Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:28:58.191897Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:23600 TClient is connected to server localhost:23600 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:28:58.727295Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:28:58.741465Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:28:58.755788Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:28:58.864078Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:28:58.915250Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:28:59.076011Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:28:59.153934Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:00.729391Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519893913763692022:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:00.729497Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:01.073274Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:01.101880Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:01.125524Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:01.154877Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:01.179842Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:01.222882Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:01.251866Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:01.328754Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519893918058659977:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:01.328840Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:01.328857Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519893918058659982:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:01.331730Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:29:01.339608Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519893918058659984:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:29:01.405397Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519893918058660035:3422] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 }
: Error: Type annotation, code: 1030
:10:13: Error: At function: RemovePrefixMembers, At function: Unordered, At function: PersistableRepr, At function: OrderedSqlProject, At tuple, At function: SqlProjectItem, At lambda
:10:20: Error: At function: Apply
:8:28: Error: At function: ScriptUdf
:8:28: Error: Module not loaded for script type: Python3 Trying to start YDB, gRPC: 25381, MsgBus: 19439 2025-06-25T14:29:03.195679Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519893927836597929:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:29:03.197685Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d13/r3tmp/tmpunAle6/pdisk_1.dat 2025-06-25T14:29:03.326414Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:29:03.341240Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:29:03.341321Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:29:03.342949Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 25381, node 2 2025-06-25T14:29:03.396555Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:29:03.396578Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:29:03.396587Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:29:03.396697Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19439 TClient is connected to server localhost:19439 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:29:03.868549Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:29:03.883045Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:03.969836Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:04.127633Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:04.208809Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... 2025-06-25T14:29:04.214000Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:06.220596Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519893940721501383:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:06.220671Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:06.281162Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:06.313726Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:06.344350Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:06.382037Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:06.410270Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:06.479799Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:06.566613Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:06.623199Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519893940721502043:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:06.623291Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:06.623377Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519893940721502048:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:06.626953Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:29:06.637862Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519893940721502050:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:29:06.736135Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519893940721502101:3411] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:29:07.783989Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:08.184638Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750861748216, txId: 281474976715674] shutting down 2025-06-25T14:29:08.194378Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519893927836597929:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:29:08.194435Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> TExternalTableTest::ParallelCreateExternalTable [GOOD] >> KqpScripting::StreamExecuteYqlScriptMixed [GOOD] >> KqpScripting::StreamExecuteYqlScriptLeadingEmptyScan >> AsyncIndexChangeExchange::ShouldRemoveRecordsAfterDroppingIndex [GOOD] >> AsyncIndexChangeExchange::ShouldRemoveRecordsAfterCancelIndexBuild >> TTransferTests::CreateWrongBatchSize [GOOD] >> TTransferTests::CreateWrongFlushIntervalIsSmall >> TExternalTableTest::DropTableTwice [GOOD] >> TSchemeshardBackgroundCompactionTest::SchemeshardShouldNotCompactBackups [GOOD] >> TSchemeshardBackgroundCompactionTest::SchemeshardShouldNotCompactBorrowed >> TTicketParserTest::AuthenticationRetryError [GOOD] >> TTicketParserTest::AuthenticationRetryErrorImmediately >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_can_read_new_written_data_on_visibility_timeout[tables_format_v0] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/yql/unittest >> KqpScripting::StreamExecuteYqlScriptScanOperationTmeoutBruteForce [GOOD] Test command err: Trying to start YDB, gRPC: 21144, MsgBus: 17558 2025-06-25T14:28:44.991541Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519893847047932514:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:28:44.991657Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d4c/r3tmp/tmp8lsKXZ/pdisk_1.dat 2025-06-25T14:28:45.257829Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:28:45.261278Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519893847047932495:2080] 1750861724990999 != 1750861724991002 TServer::EnableGrpc on GrpcPort 21144, node 1 2025-06-25T14:28:45.331538Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:28:45.331562Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:28:45.331568Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:28:45.331710Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:28:45.340947Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:28:45.341068Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:28:45.343216Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:17558 TClient is connected to server localhost:17558 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:28:45.849638Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:28:45.890847Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:28:46.000936Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:28:46.027282Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:28:46.167019Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:28:46.249519Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:28:47.470824Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519893859932836017:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:47.470947Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:47.755336Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:47.784838Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:47.812551Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:47.840002Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:47.872634Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:47.941437Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:48.011747Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:48.070897Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519893864227803977:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:48.070997Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:48.071256Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519893864227803982:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:28:48.074693Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:28:48.083712Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519893864227803984:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:28:48.146406Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519893864227804035:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:28:48.920197Z node 1 :RPC_REQUEST WARN: rpc_stream_execute_yql_script.cpp:377: Client lost, ActorId: [1:7519893864227804299:2471] 2025-06-25T14:28:48.991361Z node 1 :RPC_REQUEST WARN: rpc_stream_execute_yql_script.cpp:377: Client lost, ActorId: [1:7519893864227804312:2476] 2025-06-25T14:28:49.003037Z node 1 :RPC_REQUEST WARN: rpc_stream_execute_yql_script.cpp:377: Client lost, ActorId: [1:7519893864227804317:2478] 2025-06-25T14:28:49.018506Z node 1 :RPC_REQUEST WARN: rpc_stream_execute_yql_script.cpp:377: Client lost, ActorId: [1:7519893868522771626:2483] 2025-06-25T14:28:49.037560Z node 1 :RPC_REQUEST WARN: rpc_stream_execute_yql_script.cpp:377: Client lost, ActorId: [1:7519893868522771643:2489] 2025-06-25T14:28:49.089024Z node 1 :RPC_REQUEST WARN: rpc_stream_execute_yql_script.cpp:377: Client lost, ActorId: [1:7519893868522771653:2493] 2025-06-25T14:28:49.092253Z node 1 :RPC_REQUEST WARN: rpc_stream_execute_yql_script. ... te_actor_impl.h:1210: SelfId: [2:7519893937637796438:2951], TxId: 281474976715684, task: 1. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=2&id=MWZjMWFkMWItYzFhZWM5OTMtZWFlOTA4NTItNTcwZWYwZGY=. TraceId : 01jykqw3c791v20vnax9nknpwe. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Handle abort execution event from: [2:7519893937637796433:2947], status: ABORTED, reason: {
: Error: Terminate execution } 2025-06-25T14:29:05.718748Z node 2 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [2:7519893937637796440:2953], TxId: 281474976715684, task: 3. Ctx: { SessionId : ydb://session/3?node_id=2&id=MWZjMWFkMWItYzFhZWM5OTMtZWFlOTA4NTItNTcwZWYwZGY=. TraceId : 01jykqw3c791v20vnax9nknpwe. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Handle abort execution event from: [2:7519893937637796433:2947], status: ABORTED, reason: {
: Error: Terminate execution } 2025-06-25T14:29:06.264924Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750861746291, txId: 281474976715686] shutting down 2025-06-25T14:29:06.621799Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750861746655, txId: 281474976715688] shutting down 2025-06-25T14:29:06.827277Z node 2 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [2:7519893941932764137:3033] TxId: 281474976715691. Ctx: { TraceId: 01jykqw4ed0h16981g3q6gwja0, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=OTRkYzY4NDItOTAwZTUzOGYtNGYwODVjYWYtODJkOGYzMDk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Client lost } 2025-06-25T14:29:06.827506Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=OTRkYzY4NDItOTAwZTUzOGYtNGYwODVjYWYtODJkOGYzMDk=, ActorId: [2:7519893941932764101:3033], ActorState: ExecuteState, TraceId: 01jykqw4ed0h16981g3q6gwja0, Create QueryResponse for error on request, msg: 2025-06-25T14:29:06.828039Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750861746858, txId: 281474976715690] shutting down 2025-06-25T14:29:06.828153Z node 2 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [2:7519893941932764146:3042], TxId: 281474976715691, task: 5. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=2&id=OTRkYzY4NDItOTAwZTUzOGYtNGYwODVjYWYtODJkOGYzMDk=. TraceId : 01jykqw4ed0h16981g3q6gwja0. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Handle abort execution event from: [2:7519893941932764137:3033], status: ABORTED, reason: {
: Error: Terminate execution } 2025-06-25T14:29:06.828657Z node 2 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [2:7519893941932764145:3041], TxId: 281474976715691, task: 4. Ctx: { CustomerSuppliedId : . TraceId : 01jykqw4ed0h16981g3q6gwja0. SessionId : ydb://session/3?node_id=2&id=OTRkYzY4NDItOTAwZTUzOGYtNGYwODVjYWYtODJkOGYzMDk=. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Handle abort execution event from: [2:7519893941932764137:3033], status: ABORTED, reason: {
: Error: Terminate execution } 2025-06-25T14:29:07.027724Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=N2Y0NDRkNzMtMzM3ZmNmMWQtZTkzMzUxZjEtNzczMzgwYjU=, ActorId: [2:7519893941932764220:3054], ActorState: ExecuteState, TraceId: 01jykqw4mm1g8g5wyckr2y6kpa, Create QueryResponse for error on request, msg: 2025-06-25T14:29:07.028545Z node 2 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [2:7519893946227731568:3060] TxId: 281474976715694. Ctx: { TraceId: 01jykqw4mm1g8g5wyckr2y6kpa, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=N2E5Y2ZlOS02YzQyZGM2MC05MzAzMDQ4OC00ZjkzY2M4Yw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Client lost } 2025-06-25T14:29:07.031506Z node 2 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [2:7519893946227731578:3071], TxId: 281474976715694, task: 5. Ctx: { TraceId : 01jykqw4mm1g8g5wyckr2y6kpa. SessionId : ydb://session/3?node_id=2&id=N2E5Y2ZlOS02YzQyZGM2MC05MzAzMDQ4OC00ZjkzY2M4Yw==. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Handle abort execution event from: [2:7519893946227731568:3060], status: ABORTED, reason: {
: Error: Terminate execution } 2025-06-25T14:29:07.032011Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=N2E5Y2ZlOS02YzQyZGM2MC05MzAzMDQ4OC00ZjkzY2M4Yw==, ActorId: [2:7519893941932764232:3060], ActorState: ExecuteState, TraceId: 01jykqw4mm1g8g5wyckr2y6kpa, Create QueryResponse for error on request, msg: 2025-06-25T14:29:07.032678Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750861747061, txId: 281474976715693] shutting down 2025-06-25T14:29:07.233878Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=ZjgzMThlYzctM2FmZjM4MjYtNDZiOGU2NjQtYTFhZDliMTU=, ActorId: [2:7519893946227731638:3077], ActorState: ExecuteState, TraceId: 01jykqw4tzcgxjec5e8dymbqf1, Create QueryResponse for error on request, msg: 2025-06-25T14:29:07.656516Z node 2 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [2:7519893946227731749:3111] TxId: 281474976715697. Ctx: { TraceId: 01jykqw57wccnrhvpmdw6zdyr5, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ODVhY2RjZDctMTU3YzFlZmEtOGYwMDI4OGQtZmIyYjg4MDQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Client lost } 2025-06-25T14:29:07.656750Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=ODVhY2RjZDctMTU3YzFlZmEtOGYwMDI4OGQtZmIyYjg4MDQ=, ActorId: [2:7519893946227731715:3111], ActorState: ExecuteState, TraceId: 01jykqw57wccnrhvpmdw6zdyr5, Create QueryResponse for error on request, msg: 2025-06-25T14:29:07.657288Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750861747684, txId: 281474976715696] shutting down 2025-06-25T14:29:07.657403Z node 2 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [2:7519893946227731754:3116], TxId: 281474976715697, task: 2. Ctx: { TraceId : 01jykqw57wccnrhvpmdw6zdyr5. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=2&id=ODVhY2RjZDctMTU3YzFlZmEtOGYwMDI4OGQtZmIyYjg4MDQ=. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Handle abort execution event from: [2:7519893946227731749:3111], status: ABORTED, reason: {
: Error: Terminate execution } 2025-06-25T14:29:07.657810Z node 2 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [2:7519893946227731755:3117], TxId: 281474976715697, task: 3. Ctx: { TraceId : 01jykqw57wccnrhvpmdw6zdyr5. SessionId : ydb://session/3?node_id=2&id=ODVhY2RjZDctMTU3YzFlZmEtOGYwMDI4OGQtZmIyYjg4MDQ=. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Handle abort execution event from: [2:7519893946227731749:3111], status: ABORTED, reason: {
: Error: Terminate execution } 2025-06-25T14:29:07.658618Z node 2 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [2:7519893946227731753:3115], TxId: 281474976715697, task: 1. Ctx: { TraceId : 01jykqw57wccnrhvpmdw6zdyr5. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=2&id=ODVhY2RjZDctMTU3YzFlZmEtOGYwMDI4OGQtZmIyYjg4MDQ=. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Handle abort execution event from: [2:7519893946227731749:3111], status: ABORTED, reason: {
: Error: Terminate execution } 2025-06-25T14:29:07.659185Z node 2 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [2:7519893946227731756:3118], TxId: 281474976715697, task: 4. Ctx: { TraceId : 01jykqw57wccnrhvpmdw6zdyr5. SessionId : ydb://session/3?node_id=2&id=ODVhY2RjZDctMTU3YzFlZmEtOGYwMDI4OGQtZmIyYjg4MDQ=. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Handle abort execution event from: [2:7519893946227731749:3111], status: ABORTED, reason: {
: Error: Terminate execution } 2025-06-25T14:29:07.659320Z node 2 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [2:7519893946227731757:3119], TxId: 281474976715697, task: 5. Ctx: { SessionId : ydb://session/3?node_id=2&id=ODVhY2RjZDctMTU3YzFlZmEtOGYwMDI4OGQtZmIyYjg4MDQ=. TraceId : 01jykqw57wccnrhvpmdw6zdyr5. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Handle abort execution event from: [2:7519893946227731749:3111], status: ABORTED, reason: {
: Error: Terminate execution } 2025-06-25T14:29:07.659740Z node 2 :TX_DATASHARD ERROR: datashard__kqp_scan.cpp:163: Undelivered event: 65542, at: [2:7519893946227731797:2160], tablet: [2:7519893898983087457:2282], scanId: 82, table: /Root/EightShard 2025-06-25T14:29:07.659765Z node 2 :TX_DATASHARD ERROR: datashard__kqp_scan.cpp:163: Undelivered event: 65542, at: [2:7519893946227731800:2161], tablet: [2:7519893898983087463:2286], scanId: 83, table: /Root/EightShard 2025-06-25T14:29:07.851361Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750861747873, txId: 281474976715699] shutting down 2025-06-25T14:29:08.070983Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750861748104, txId: 281474976715701] shutting down 2025-06-25T14:29:08.287981Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=MmQ2Nzk2MTUtZmZmMWJkNy1hMWVmNjg5OS0yZTNlNDU5Yg==, ActorId: [2:7519893950522699336:3162], ActorState: ExecuteState, TraceId: 01jykqw5vc4dnsm1dv7999a8we, Create QueryResponse for error on request, msg: 2025-06-25T14:29:08.288813Z node 2 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [2:7519893950522699381:3167] TxId: 281474976715704. Ctx: { TraceId: 01jykqw5vc4dnsm1dv7999a8we, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MzRmZjhlNmQtZTY1YTEzODktMzcwZGUzZDUtOWU4OTRmYjc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Client lost } 2025-06-25T14:29:08.289003Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=MzRmZjhlNmQtZTY1YTEzODktMzcwZGUzZDUtOWU4OTRmYjc=, ActorId: [2:7519893950522699346:3167], ActorState: ExecuteState, TraceId: 01jykqw5vc4dnsm1dv7999a8we, Create QueryResponse for error on request, msg: 2025-06-25T14:29:08.289586Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750861748314, txId: 281474976715703] shutting down 2025-06-25T14:29:08.289689Z node 2 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [2:7519893950522699390:3176], TxId: 281474976715704, task: 5. Ctx: { TraceId : 01jykqw5vc4dnsm1dv7999a8we. SessionId : ydb://session/3?node_id=2&id=MzRmZjhlNmQtZTY1YTEzODktMzcwZGUzZDUtOWU4OTRmYjc=. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Handle abort execution event from: [2:7519893950522699381:3167], status: ABORTED, reason: {
: Error: Terminate execution } 2025-06-25T14:29:08.482460Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750861748510, txId: 281474976715706] shutting down ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_external_table/unittest >> TExternalTableTest::ParallelCreateExternalTable [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046678944 is [1:129:2153] sender: [1:131:2058] recipient: [1:113:2143] 2025-06-25T14:29:09.502908Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:29:09.502967Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:29:09.503007Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:29:09.503040Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:29:09.503096Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:29:09.503115Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:29:09.503150Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:29:09.503211Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:29:09.503718Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:29:09.503938Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:29:09.563364Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7732: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-25T14:29:09.563418Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:29:09.564134Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:29:09.574941Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:29:09.575235Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:29:09.575370Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:29:09.582147Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:29:09.582392Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:29:09.582862Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:29:09.583065Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:29:09.585051Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:29:09.585201Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:29:09.586096Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:29:09.586165Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:29:09.586201Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:29:09.586229Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:29:09.586257Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:29:09.586401Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:29:09.590991Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:129:2153] sender: [1:243:2058] recipient: [1:15:2062] 2025-06-25T14:29:09.718340Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:29:09.718605Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:09.718837Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:29:09.718882Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:29:09.719095Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:29:09.719175Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:29:09.722578Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:29:09.722760Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:29:09.722986Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:09.723052Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:29:09.723103Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:29:09.723145Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:29:09.726366Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:09.726426Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:29:09.726477Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:29:09.728563Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:09.728634Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:09.728678Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:29:09.728756Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:29:09.732876Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:29:09.735831Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:29:09.736027Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:29:09.737090Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:29:09.737231Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 138 RawX2: 4294969455 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:29:09.737287Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:29:09.737582Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:29:09.737652Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:29:09.737810Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:29:09.737895Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:29: ... 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } ExternalTableDescription { Name: "ExternalTable1" PathId { OwnerId: 72057594046678944 LocalId: 4 } Version: 1 SourceType: "ObjectStorage" DataSourcePath: "/MyRoot/ExternalDataSource" Location: "/" Columns { Name: "RowId" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false } Columns { Name: "Value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false } Content: "" } } PathId: 4 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:29:09.858839Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/DirA/ExternalTable2" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:29:09.859003Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/DirA/ExternalTable2" took 154us result status StatusSuccess 2025-06-25T14:29:09.859301Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/DirA/ExternalTable2" PathDescription { Self { Name: "ExternalTable2" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeExternalTable CreateFinished: true CreateTxId: 127 CreateStep: 5000004 ParentPathId: 3 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ExternalTableVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } ExternalTableDescription { Name: "ExternalTable2" PathId { OwnerId: 72057594046678944 LocalId: 5 } Version: 1 SourceType: "ObjectStorage" DataSourcePath: "/MyRoot/ExternalDataSource" Location: "/" Columns { Name: "key1" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false } Columns { Name: "key2" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false } Columns { Name: "RowId" Type: "Uint64" TypeId: 4 Id: 3 NotNull: false } Columns { Name: "Value" Type: "Utf8" TypeId: 4608 Id: 4 NotNull: false } Content: "" } } PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:29:09.859871Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/DirA" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:29:09.859974Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/DirA" took 101us result status StatusSuccess 2025-06-25T14:29:09.860242Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/DirA" PathDescription { Self { Name: "DirA" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 125 CreateStep: 5000003 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 6 } ChildrenExist: true } Children { Name: "ExternalTable1" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeExternalTable CreateFinished: true CreateTxId: 126 CreateStep: 5000005 ParentPathId: 3 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "ExternalTable2" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeExternalTable CreateFinished: true CreateTxId: 127 CreateStep: 5000004 ParentPathId: 3 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:29:09.860608Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/DirA/ExternalTable1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:29:09.860771Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/DirA/ExternalTable1" took 122us result status StatusSuccess 2025-06-25T14:29:09.860969Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/DirA/ExternalTable1" PathDescription { Self { Name: "ExternalTable1" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeExternalTable CreateFinished: true CreateTxId: 126 CreateStep: 5000005 ParentPathId: 3 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ExternalTableVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } ExternalTableDescription { Name: "ExternalTable1" PathId { OwnerId: 72057594046678944 LocalId: 4 } Version: 1 SourceType: "ObjectStorage" DataSourcePath: "/MyRoot/ExternalDataSource" Location: "/" Columns { Name: "RowId" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false } Columns { Name: "Value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false } Content: "" } } PathId: 4 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:29:09.861282Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/DirA/ExternalTable2" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:29:09.861423Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/DirA/ExternalTable2" took 139us result status StatusSuccess 2025-06-25T14:29:09.861683Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/DirA/ExternalTable2" PathDescription { Self { Name: "ExternalTable2" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeExternalTable CreateFinished: true CreateTxId: 127 CreateStep: 5000004 ParentPathId: 3 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ExternalTableVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } ExternalTableDescription { Name: "ExternalTable2" PathId { OwnerId: 72057594046678944 LocalId: 5 } Version: 1 SourceType: "ObjectStorage" DataSourcePath: "/MyRoot/ExternalDataSource" Location: "/" Columns { Name: "key1" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false } Columns { Name: "key2" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false } Columns { Name: "RowId" Type: "Uint64" TypeId: 4 Id: 3 NotNull: false } Columns { Name: "Value" Type: "Utf8" TypeId: 4608 Id: 4 NotNull: false } Content: "" } } PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TBalanceCoverageBuilderTest::TestSplitWithMergeBack [GOOD] >> TExternalTableTest::ReadOnlyMode [GOOD] >> TBalanceCoverageBuilderTest::TestSplitWithPartialMergeAll [GOOD] >> TTicketParserTest::AuthorizationWithUserAccount2 [GOOD] >> TTicketParserTest::BulkAuthorizationModify ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_external_table/unittest >> TExternalTableTest::DropTableTwice [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046678944 is [1:129:2153] sender: [1:131:2058] recipient: [1:113:2143] 2025-06-25T14:29:09.983227Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:29:09.983326Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:29:09.983364Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:29:09.983396Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:29:09.983450Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:29:09.983487Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:29:09.983548Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:29:09.983615Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:29:09.984257Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:29:09.984579Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:29:10.057248Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7732: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-25T14:29:10.057310Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:29:10.058000Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:29:10.071464Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:29:10.071833Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:29:10.071993Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:29:10.077708Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:29:10.077881Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:29:10.078391Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:29:10.078618Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:29:10.080887Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:29:10.081053Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:29:10.082125Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:29:10.082189Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:29:10.082235Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:29:10.082274Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:29:10.082334Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:29:10.082515Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:29:10.088065Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:129:2153] sender: [1:243:2058] recipient: [1:15:2062] 2025-06-25T14:29:10.196945Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:29:10.197113Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:10.197283Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:29:10.197322Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:29:10.197531Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:29:10.197581Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:29:10.199485Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:29:10.199645Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:29:10.199816Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:10.199857Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:29:10.199907Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:29:10.199950Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:29:10.201836Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:10.201893Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:29:10.201932Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:29:10.203647Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:10.203694Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:10.203737Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:29:10.203800Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:29:10.207117Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:29:10.208943Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:29:10.209122Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:29:10.209953Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:29:10.210069Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 138 RawX2: 4294969455 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:29:10.210122Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:29:10.210401Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:29:10.210464Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:29:10.210605Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:29:10.210675Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:29: ... .316865Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:29:10.316908Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 103, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:29:10.317034Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 103, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-25T14:29:10.317138Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 103, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-25T14:29:10.317232Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:29:10.317263Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:210:2210], at schemeshard: 72057594046678944, txId: 103, path id: 1 2025-06-25T14:29:10.317297Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:210:2210], at schemeshard: 72057594046678944, txId: 103, path id: 3 2025-06-25T14:29:10.317336Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:210:2210], at schemeshard: 72057594046678944, txId: 103, path id: 2 2025-06-25T14:29:10.317601Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-25T14:29:10.317638Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 103:0 ProgressState 2025-06-25T14:29:10.317743Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#103:0 progress is 1/1 2025-06-25T14:29:10.317775Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-25T14:29:10.317811Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#103:0 progress is 1/1 2025-06-25T14:29:10.317841Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-25T14:29:10.317873Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 103, ready parts: 1/1, is published: false 2025-06-25T14:29:10.317911Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-25T14:29:10.317971Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 103:0 2025-06-25T14:29:10.318002Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 103:0 2025-06-25T14:29:10.318062Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-25T14:29:10.318101Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate source path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-25T14:29:10.318141Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 103, publications: 3, subscribers: 0 2025-06-25T14:29:10.318170Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 103, [OwnerId: 72057594046678944, LocalPathId: 1], 9 2025-06-25T14:29:10.318209Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 103, [OwnerId: 72057594046678944, LocalPathId: 2], 2 2025-06-25T14:29:10.318232Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 103, [OwnerId: 72057594046678944, LocalPathId: 3], 18446744073709551615 2025-06-25T14:29:10.318625Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 103 2025-06-25T14:29:10.318699Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 103 2025-06-25T14:29:10.318737Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 3, at schemeshard: 72057594046678944, txId: 103 2025-06-25T14:29:10.318770Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 18446744073709551615 2025-06-25T14:29:10.318823Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-25T14:29:10.319301Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-25T14:29:10.319349Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-06-25T14:29:10.319452Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-25T14:29:10.319777Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 9 PathOwnerId: 72057594046678944, cookie: 103 2025-06-25T14:29:10.319834Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 9 PathOwnerId: 72057594046678944, cookie: 103 2025-06-25T14:29:10.319866Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 103 2025-06-25T14:29:10.319892Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 9 2025-06-25T14:29:10.319916Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-25T14:29:10.320622Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 2 PathOwnerId: 72057594046678944, cookie: 103 2025-06-25T14:29:10.320687Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 2 PathOwnerId: 72057594046678944, cookie: 103 2025-06-25T14:29:10.320722Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 103 2025-06-25T14:29:10.320760Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 2 2025-06-25T14:29:10.320786Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-25T14:29:10.320840Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 103, subscribers: 0 2025-06-25T14:29:10.324585Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-25T14:29:10.324778Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-25T14:29:10.325200Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-25T14:29:10.325510Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 TestModificationResult got TxId: 103, wait until txId: 103 TestWaitNotification wait txId: 103 2025-06-25T14:29:10.325703Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 103: send EvNotifyTxCompletion 2025-06-25T14:29:10.325735Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 103 2025-06-25T14:29:10.326010Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 103, at schemeshard: 72057594046678944 2025-06-25T14:29:10.326077Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-06-25T14:29:10.326098Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [1:369:2358] TestWaitNotification: OK eventTxId 103 2025-06-25T14:29:10.326477Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/ExternalTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:29:10.326644Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/ExternalTable" took 196us result status StatusPathDoesNotExist 2025-06-25T14:29:10.326793Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/ExternalTable\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/ExternalTable" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: true } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 >> TSchemeshardBackgroundCompactionTest::SchemeshardShouldRequestCompactionsConfigRequest [GOOD] >> TSchemeshardBackgroundCompactionTest::SchemeshardShouldNotRequestCompactionsAfterDisable >> TTicketParserTest::AuthorizationWithRequiredPermissions [GOOD] >> TBalanceCoverageBuilderTest::TestComplexSplit [GOOD] >> TTicketParserTest::AuthorizationWithUserAccount |76.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/balance_coverage/ut/unittest >> TBalanceCoverageBuilderTest::TestSplitWithMergeBack [GOOD] >> TTicketParserTest::NebiusAuthorization [GOOD] >> TTicketParserTest::NebiusAuthorizationModify >> KqpPragma::OrderedColumns [GOOD] >> KqpPragma::MatchRecognizeWithoutTimeOrderRecoverer >> KqpYql::TableRange [GOOD] >> TBalanceCoverageBuilderTest::TestZeroTracks [GOOD] |76.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/balance_coverage/ut/unittest >> TBalanceCoverageBuilderTest::TestSplitWithPartialMergeAll [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_external_table/unittest >> TExternalTableTest::ReadOnlyMode [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046678944 is [1:129:2153] sender: [1:131:2058] recipient: [1:113:2143] 2025-06-25T14:29:10.248680Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:29:10.248734Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:29:10.248762Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:29:10.248787Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:29:10.248833Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:29:10.248859Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:29:10.248899Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:29:10.248966Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:29:10.249529Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:29:10.249742Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:29:10.311728Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7732: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-25T14:29:10.311783Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:29:10.312489Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:29:10.325221Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:29:10.325589Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:29:10.325757Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:29:10.332731Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:29:10.332966Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:29:10.333562Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:29:10.333821Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:29:10.336757Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:29:10.336932Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:29:10.337999Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:29:10.338080Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:29:10.338153Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:29:10.338191Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:29:10.338224Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:29:10.338410Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:29:10.344565Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:129:2153] sender: [1:243:2058] recipient: [1:15:2062] 2025-06-25T14:29:10.459369Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:29:10.459557Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:10.459746Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:29:10.459788Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:29:10.459968Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:29:10.460026Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:29:10.462471Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:29:10.462654Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:29:10.462800Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:10.462844Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:29:10.462884Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:29:10.462921Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:29:10.466157Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:10.466212Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:29:10.466266Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:29:10.467862Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:10.467910Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:10.467949Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:29:10.468009Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:29:10.470819Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:29:10.472493Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:29:10.472670Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:29:10.473414Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:29:10.473524Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 138 RawX2: 4294969455 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:29:10.473591Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:29:10.473877Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:29:10.473949Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:29:10.474104Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:29:10.474174Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:29: ... ogressState, at schemeshard: 72057594046678944 2025-06-25T14:29:10.866842Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 129 ready parts: 1/1 2025-06-25T14:29:10.867055Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 129 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:29:10.868036Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 1 Version: 10 PathOwnerId: 72057594046678944, cookie: 129 2025-06-25T14:29:10.868149Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 1 Version: 10 PathOwnerId: 72057594046678944, cookie: 129 2025-06-25T14:29:10.868186Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 129 2025-06-25T14:29:10.868246Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 129, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 10 2025-06-25T14:29:10.868296Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 5 2025-06-25T14:29:10.869512Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 5 Version: 2 PathOwnerId: 72057594046678944, cookie: 129 2025-06-25T14:29:10.869604Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 5 Version: 2 PathOwnerId: 72057594046678944, cookie: 129 2025-06-25T14:29:10.869639Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 129 2025-06-25T14:29:10.869674Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 129, pathId: [OwnerId: 72057594046678944, LocalPathId: 5], version: 2 2025-06-25T14:29:10.869728Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 2 2025-06-25T14:29:10.869824Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 129, ready parts: 0/1, is published: true 2025-06-25T14:29:10.877814Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 129:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:129 msg type: 269090816 2025-06-25T14:29:10.878011Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 129, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 129 at step: 5000005 FAKE_COORDINATOR: advance: minStep5000005 State->FrontStep: 5000004 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 129 at step: 5000005 2025-06-25T14:29:10.880717Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000005, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:29:10.880880Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 129 Coordinator: 72057594046316545 AckTo { RawX1: 138 RawX2: 4294969455 } } Step: 5000005 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:29:10.881125Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_mkdir.cpp:33: MkDir::TPropose operationId# 129:0 HandleReply TEvPrivate::TEvOperationPlan, step: 5000005, at schemeshard: 72057594046678944 2025-06-25T14:29:10.881284Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 129:0 128 -> 240 2025-06-25T14:29:10.881464Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 4 2025-06-25T14:29:10.881547Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 1 2025-06-25T14:29:10.881947Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 129 2025-06-25T14:29:10.882121Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 129 FAKE_COORDINATOR: Erasing txId 129 2025-06-25T14:29:10.884487Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:29:10.884538Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 129, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:29:10.884805Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 129, path id: [OwnerId: 72057594046678944, LocalPathId: 5] 2025-06-25T14:29:10.884937Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:29:10.884974Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:489:2444], at schemeshard: 72057594046678944, txId: 129, path id: 1 2025-06-25T14:29:10.885026Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:489:2444], at schemeshard: 72057594046678944, txId: 129, path id: 5 2025-06-25T14:29:10.885294Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 129:0, at schemeshard: 72057594046678944 2025-06-25T14:29:10.885339Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 129:0 ProgressState 2025-06-25T14:29:10.885444Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#129:0 progress is 1/1 2025-06-25T14:29:10.885480Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 129 ready parts: 1/1 2025-06-25T14:29:10.885521Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#129:0 progress is 1/1 2025-06-25T14:29:10.885572Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 129 ready parts: 1/1 2025-06-25T14:29:10.885611Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 129, ready parts: 1/1, is published: false 2025-06-25T14:29:10.885658Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 129 ready parts: 1/1 2025-06-25T14:29:10.885714Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 129:0 2025-06-25T14:29:10.885752Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 129:0 2025-06-25T14:29:10.885854Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 2 2025-06-25T14:29:10.885899Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 129, publications: 2, subscribers: 0 2025-06-25T14:29:10.885932Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 129, [OwnerId: 72057594046678944, LocalPathId: 1], 11 2025-06-25T14:29:10.885967Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 129, [OwnerId: 72057594046678944, LocalPathId: 5], 3 2025-06-25T14:29:10.886872Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 1 Version: 11 PathOwnerId: 72057594046678944, cookie: 129 2025-06-25T14:29:10.886975Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 1 Version: 11 PathOwnerId: 72057594046678944, cookie: 129 2025-06-25T14:29:10.887013Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 129 2025-06-25T14:29:10.887053Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 129, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 11 2025-06-25T14:29:10.887108Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 5 2025-06-25T14:29:10.888019Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 5 Version: 3 PathOwnerId: 72057594046678944, cookie: 129 2025-06-25T14:29:10.888112Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 5 Version: 3 PathOwnerId: 72057594046678944, cookie: 129 2025-06-25T14:29:10.888156Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 129 2025-06-25T14:29:10.888190Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 129, pathId: [OwnerId: 72057594046678944, LocalPathId: 5], version: 3 2025-06-25T14:29:10.888233Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 1 2025-06-25T14:29:10.888361Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 129, subscribers: 0 2025-06-25T14:29:10.891395Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 129 2025-06-25T14:29:10.891543Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 129 TestModificationResult got TxId: 129, wait until txId: 129 |76.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_snapshot/ydb-core-tx-datashard-ut_snapshot |76.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_snapshot/ydb-core-tx-datashard-ut_snapshot >> KqpPragma::Auth [GOOD] >> TTransferTests::CreateWrongFlushIntervalIsSmall [GOOD] >> TTxDataShardMiniKQL::CrossShard_3_AllToOne [GOOD] >> KqpPragma::MatchRecognizeWithTimeOrderRecoverer >> TTransferTests::CreateWrongFlushIntervalIsBig >> TTxDataShardMiniKQL::CrossShard_4_OneToAll |76.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/balance_coverage/ut/unittest >> TBalanceCoverageBuilderTest::TestComplexSplit [GOOD] >> TTransferTests::CreateInParallel [GOOD] >> TTransferTests::CreateDropRecreate >> TBalanceCoverageBuilderTest::TestComplexSplitWithDuplicates [GOOD] >> KqpYql::UuidPrimaryKeyDisabled [GOOD] >> KqpScripting::StreamExecuteYqlScriptScan [GOOD] >> KqpYql::InsertIgnore [GOOD] >> Cdc::HugeKey[TopicRunner] [GOOD] >> TBalanceCoverageBuilderTest::TestOneSplit [GOOD] >> KqpYql::JsonCast >> KqpScripting::StreamExecuteYqlScriptScanCancelAfterBruteForce >> Cdc::HugeKeyDebezium |76.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/balance_coverage/ut/unittest >> TBalanceCoverageBuilderTest::TestZeroTracks [GOOD] |76.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/statistics/database/ut/ydb-core-statistics-database-ut |76.0%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_snapshot/ydb-core-tx-datashard-ut_snapshot |76.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/balance_coverage/ut/unittest |76.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/statistics/database/ut/ydb-core-statistics-database-ut |76.0%| [LD] {RESULT} $(B)/ydb/core/statistics/database/ut/ydb-core-statistics-database-ut |76.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/balance_coverage/ut/unittest >> TBalanceCoverageBuilderTest::TestComplexSplitWithDuplicates [GOOD] >> TBalanceCoverageBuilderTest::TestEmpty [GOOD] >> TBalanceCoverageBuilderTest::TestSimpleSplit [GOOD] >> Cdc::Write[YdsRunner] [GOOD] >> Cdc::Write[TopicRunner] >> TBalanceCoverageBuilderTest::TestSplitWithPartialMergeOne [GOOD] >> KqpScripting::EndOfQueryCommit [GOOD] |76.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/balance_coverage/ut/unittest >> TBalanceCoverageBuilderTest::TestOneSplit [GOOD] >> TTransferTests::CreateWrongFlushIntervalIsBig [GOOD] >> TTicketParserTest::LoginRefreshGroupsGood [GOOD] >> KqpScripting::ExecuteYqlScriptPg >> TTicketParserTest::LoginCheckRemovedUser >> TExternalTableTest::ParallelCreateSameExternalTable >> TTicketParserTest::AuthenticationRetryErrorImmediately [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/yql/unittest >> KqpYql::UuidPrimaryKeyDisabled [GOOD] Test command err: Trying to start YDB, gRPC: 7499, MsgBus: 32745 2025-06-25T14:29:08.349732Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519893949710272859:2134];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:29:08.350063Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0008ec/r3tmp/tmp1wExk4/pdisk_1.dat 2025-06-25T14:29:08.772643Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:29:08.778798Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519893949710272763:2080] 1750861748323185 != 1750861748323188 2025-06-25T14:29:08.843720Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:29:08.843840Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:29:08.845779Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 7499, node 1 2025-06-25T14:29:08.909897Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:29:08.909927Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:29:08.909942Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:29:08.910074Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:32745 2025-06-25T14:29:09.349574Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:32745 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:29:09.504411Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:29:11.232980Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519893962595175294:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:11.233103Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:11.528204Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519893962595175317:2307] txid# 281474976710658, issues: { message: "Uuid as primary key is forbiden by configuration: key" severity: 1 } 2025-06-25T14:29:11.568666Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519893962595175326:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:11.568732Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:11.587386Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519893962595175332:2315] txid# 281474976710659, issues: { message: "Uuid as primary key is forbiden by configuration: key" severity: 1 } 2025-06-25T14:29:11.600291Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519893962595175340:2302], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:11.600422Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:11.633357Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519893962595175347:2323] txid# 281474976710660, issues: { message: "Uuid as primary key is forbiden by configuration: val" severity: 1 } 2025-06-25T14:29:11.645229Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519893962595175355:2307], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:11.645304Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:11.661298Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:11.816615Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519893962595175444:2316], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:11.816688Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } |76.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/balance_coverage/ut/unittest >> TBalanceCoverageBuilderTest::TestSimpleSplit [GOOD] |76.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/balance_coverage/ut/unittest >> TBalanceCoverageBuilderTest::TestEmpty [GOOD] |76.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/balance_coverage/ut/unittest >> TBalanceCoverageBuilderTest::TestSplitWithPartialMergeOne [GOOD] |76.0%| [TA] {RESULT} $(B)/ydb/services/persqueue_v1/ut/new_schemecache_ut/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/yql/unittest >> KqpYql::TableRange [GOOD] Test command err: Trying to start YDB, gRPC: 20700, MsgBus: 32487 2025-06-25T14:29:06.099190Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519893942132771754:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:29:06.099276Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00092b/r3tmp/tmpXKTB7F/pdisk_1.dat 2025-06-25T14:29:06.432500Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519893942132771732:2080] 1750861746097055 != 1750861746097058 2025-06-25T14:29:06.443502Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 20700, node 1 2025-06-25T14:29:06.489129Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:29:06.489162Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:29:06.489174Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:29:06.489291Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:29:06.508809Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:29:06.508946Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:29:06.510139Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:32487 TClient is connected to server localhost:32487 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:29:07.004177Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:29:07.037350Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:07.106508Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:29:07.189139Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:07.319270Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:29:07.427395Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:09.237149Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519893955017675267:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:09.237246Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:09.532912Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:09.556161Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:09.578300Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:09.602819Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:09.629007Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:09.659034Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:09.717384Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:09.796590Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519893955017675930:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:09.796675Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:09.800550Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519893955017675935:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:09.804151Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:29:09.812907Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519893955017675937:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:29:09.907166Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519893955017675988:3423] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 }
: Error: Table intent determination, code: 1040
:3:27: Error: RANGE is not supported on Kikimr clusters. ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_transfer/unittest >> TTransferTests::CreateWrongFlushIntervalIsBig [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:29:02.966214Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:29:02.966351Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:29:02.966396Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:29:02.966436Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:29:02.966491Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:29:02.966521Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:29:02.966585Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:29:02.966661Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:29:02.977049Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:29:02.987050Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:29:03.439046Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:29:03.439124Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:29:03.540024Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:29:03.554134Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:29:03.590342Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:29:03.663234Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:29:03.663736Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:29:03.688385Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:29:03.712718Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:29:03.784060Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:29:03.804187Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:29:03.852711Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:29:03.852812Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:29:03.864576Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:29:03.864653Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:29:03.864723Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:29:03.864827Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:29:03.871724Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:29:04.044302Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:29:04.072079Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:04.087920Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:29:04.088047Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:29:04.095768Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:29:04.095938Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:29:04.100398Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:29:04.107839Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:29:04.108136Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:04.108242Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:29:04.108304Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:29:04.108384Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:29:04.112836Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:04.112924Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:29:04.112967Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:29:04.129712Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:04.129781Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:04.129844Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:29:04.129892Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:29:04.142374Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:29:04.145366Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:29:04.154481Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:29:04.156467Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:29:04.156685Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:29:04.160023Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:29:04.164855Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:29:04.164947Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:29:04.165167Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:29:04.165253Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:29:04.168418Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:29:04.168487Z node 1 :FLAT_TX_SCHEMESHARD ... :312:2297], Recipient [6:136:2157]: NKikimrTxColumnShard.TEvNotifyTxCompletionResult Origin: 72075186233409546 TxId: 101 2025-06-25T14:29:13.454567Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4998: StateWork, processing event TEvColumnShard::TEvNotifyTxCompletionResult 2025-06-25T14:29:13.454639Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6230: Handle TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, message: Origin: 72075186233409546 TxId: 101 2025-06-25T14:29:13.454689Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1791: TOperation FindRelatedPartByTabletId, TxId: 101, tablet: 72075186233409546, partId: 0 2025-06-25T14:29:13.454839Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:632: TTxOperationReply execute, operationId: 101:0, at schemeshard: 72057594046678944, message: Origin: 72075186233409546 TxId: 101 2025-06-25T14:29:13.455018Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046678944 FAKE_COORDINATOR: Erasing txId 101 2025-06-25T14:29:13.457771Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:29:13.457875Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-25T14:29:13.457934Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:276: Activate send for 101:0 2025-06-25T14:29:13.458111Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 2146435072, Sender [6:136:2157], Recipient [6:136:2157]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-25T14:29:13.458149Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4972: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-25T14:29:13.458208Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:29:13.458255Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 101:0 ProgressState 2025-06-25T14:29:13.458382Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-06-25T14:29:13.458420Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#101:0 progress is 1/1 2025-06-25T14:29:13.458459Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-25T14:29:13.458504Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#101:0 progress is 1/1 2025-06-25T14:29:13.458548Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-25T14:29:13.458590Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 101, ready parts: 1/1, is published: true 2025-06-25T14:29:13.458674Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1656: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [6:345:2321] message: TxId: 101 2025-06-25T14:29:13.458738Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-25T14:29:13.458784Z node 6 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 101:0 2025-06-25T14:29:13.458832Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 101:0 2025-06-25T14:29:13.459002Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-25T14:29:13.462044Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-25T14:29:13.462190Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:632: Send to actor: [6:345:2321] msg type: 271124998 msg: NKikimrScheme.TEvNotifyTxCompletionResult TxId: 101 at schemeshard: 72057594046678944 2025-06-25T14:29:13.462403Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-25T14:29:13.462469Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [6:346:2322] 2025-06-25T14:29:13.462700Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877764, Sender [6:348:2324], Recipient [6:136:2157]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-25T14:29:13.462758Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5053: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-25T14:29:13.505829Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5885: Server pipe is reset, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 101 TestModificationResults wait txId: 102 2025-06-25T14:29:13.506932Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271122432, Sender [6:392:2361], Recipient [6:136:2157]: {TEvModifySchemeTransaction txid# 102 TabletId# 72057594046678944} 2025-06-25T14:29:13.507001Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4966: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-06-25T14:29:13.509963Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateTransfer Replication { Name: "Transfer" Config { TransferSpecific { Target { SrcPath: "/MyRoot1/Table" DstPath: "/MyRoot/Table" } Batching { FlushIntervalMilliSeconds: 86400001 } } } } } TxId: 102 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:29:13.510244Z node 6 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_replication.cpp:349: [72057594046678944] TCreateReplication Propose: opId# 102:0, path# /MyRoot/Transfer 2025-06-25T14:29:13.510362Z node 6 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 102:1, propose status:StatusInvalidParameter, reason: Flush interval must be less than or equal to 24 hours, at schemeshard: 72057594046678944 2025-06-25T14:29:13.510671Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-06-25T14:29:13.520698Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 102, response: Status: StatusInvalidParameter Reason: "Flush interval must be less than or equal to 24 hours" TxId: 102 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:29:13.521048Z node 6 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 102, database: /MyRoot, subject: , status: StatusInvalidParameter, reason: Flush interval must be less than or equal to 24 hours, operation: CREATE TRANSFER, path: /MyRoot/Transfer 2025-06-25T14:29:13.521129Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046678944 TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 102 2025-06-25T14:29:13.521553Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-06-25T14:29:13.521625Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 2025-06-25T14:29:13.522129Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877761, Sender [6:398:2367], Recipient [6:136:2157]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:29:13.522199Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5052: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T14:29:13.522246Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5837: Pipe server connected, at tablet: 72057594046678944 2025-06-25T14:29:13.522378Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124996, Sender [6:345:2321], Recipient [6:136:2157]: NKikimrScheme.TEvNotifyTxCompletion TxId: 102 2025-06-25T14:29:13.522446Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvSchemeShard::TEvNotifyTxCompletion 2025-06-25T14:29:13.522534Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-06-25T14:29:13.522674Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-25T14:29:13.522749Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [6:396:2365] 2025-06-25T14:29:13.522988Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877764, Sender [6:398:2367], Recipient [6:136:2157]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-25T14:29:13.523029Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5053: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-25T14:29:13.523077Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5885: Server pipe is reset, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 102 2025-06-25T14:29:13.523538Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271122945, Sender [6:399:2368], Recipient [6:136:2157]: NKikimrSchemeOp.TDescribePath Path: "/MyRoot/Transfer" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false } 2025-06-25T14:29:13.523611Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4967: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2025-06-25T14:29:13.523738Z node 6 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Transfer" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:29:13.524009Z node 6 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Transfer" took 262us result status StatusPathDoesNotExist 2025-06-25T14:29:13.524231Z node 6 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/Transfer\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/Transfer" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: true } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 >> TTicketParserTest::NebiusAuthenticationRetryError [GOOD] >> TTicketParserTest::NebiusAuthenticationRetryErrorImmediately >> TExternalTableTest::ReplaceExternalTableIfNotExists >> TExternalTableTest::DropExternalTable |76.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_read_iterator/ydb-core-tx-datashard-ut_read_iterator |76.1%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_read_iterator/ydb-core-tx-datashard-ut_read_iterator |76.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_read_iterator/ydb-core-tx-datashard-ut_read_iterator |76.1%| [TA] $(B)/ydb/core/tx/balance_coverage/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> THiveTest::TestHiveBalancerWithSpareNodes [GOOD] >> TTicketParserTest::NebiusAuthorizationModify [GOOD] |76.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_order/ydb-core-tx-datashard-ut_order |76.1%| [TA] {RESULT} $(B)/ydb/core/tx/balance_coverage/ut/test-results/unittest/{meta.json ... results_accumulator.log} |76.1%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_order/ydb-core-tx-datashard-ut_order |76.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_order/ydb-core-tx-datashard-ut_order >> TTransferTests::CreateDropRecreate [GOOD] >> TTransferTests::ConsistencyLevel >> TExternalTableTest::ReplaceExternalTableIfNotExistsShouldFailIfFeatureFlagIsNotSet >> TTicketParserTest::AuthorizationWithUserAccount [GOOD] >> TTicketParserTest::AuthorizationUnavailable |76.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/ut/cost/ydb-core-kqp-ut-cost |76.1%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/cost/ydb-core-kqp-ut-cost |76.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/cost/ydb-core-kqp-ut-cost >> TTicketParserTest::BulkAuthorizationModify [GOOD] >> TExternalTableTest::ParallelCreateSameExternalTable [GOOD] >> TExternalTableTest::CreateExternalTable ------- [TM] {asan, default-linux-x86_64, release} ydb/core/security/ut/unittest >> TTicketParserTest::AuthenticationRetryErrorImmediately [GOOD] Test command err: 2025-06-25T14:28:48.834668Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519893862341601075:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:28:48.834754Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001888/r3tmp/tmpgdu4fv/pdisk_1.dat 2025-06-25T14:28:49.122487Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:28:49.123927Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519893862341601055:2080] 1750861728833884 != 1750861728833887 TServer::EnableGrpc on GrpcPort 27434, node 1 2025-06-25T14:28:49.153009Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:28:49.153032Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:28:49.153040Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:28:49.153165Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:28:49.168184Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:28:49.168347Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:28:49.169991Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:21475 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:28:49.397338Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:28:49.411422Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:557: Ticket **** (8E120919) asking for AccessServiceAuthentication 2025-06-25T14:28:49.411487Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [517000010408] Connect to grpc://localhost:22373 2025-06-25T14:28:49.414211Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000010408] Request AuthenticateRequest { iam_token: "**** (8E120919)" } 2025-06-25T14:28:49.421290Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [517000010408] Response AuthenticateResponse { subject { user_account { id: "user1" } } } 2025-06-25T14:28:49.421484Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (8E120919) () has now valid token of user1@as 2025-06-25T14:28:51.519609Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519893878308893672:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:28:51.519676Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001888/r3tmp/tmpiqaE02/pdisk_1.dat 2025-06-25T14:28:51.606806Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:28:51.608258Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519893878308893653:2080] 1750861731519232 != 1750861731519235 TServer::EnableGrpc on GrpcPort 26324, node 2 2025-06-25T14:28:51.651504Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:28:51.651539Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:28:51.651547Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:28:51.651651Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:28:51.655688Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:28:51.655760Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:28:51.657453Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:22419 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:28:51.868837Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:28:51.874425Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:557: Ticket ApiK****alid (AB5B5EA8) asking for AccessServiceAuthentication 2025-06-25T14:28:51.874489Z node 2 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [5170000c1a88] Connect to grpc://localhost:17361 2025-06-25T14:28:51.875320Z node 2 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [5170000c1a88] Request AuthenticateRequest { api_key: "ApiK****alid (AB5B5EA8)" } 2025-06-25T14:28:51.882051Z node 2 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [5170000c1a88] Response AuthenticateResponse { subject { user_account { id: "ApiKey-value-valid" } } } 2025-06-25T14:28:51.882248Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket ApiK****alid (AB5B5EA8) () has now valid token of ApiKey-value-valid@as 2025-06-25T14:28:54.586053Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519893890757142486:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:28:54.586111Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001888/r3tmp/tmp1qcTwP/pdisk_1.dat 2025-06-25T14:28:54.702655Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:28:54.704777Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519893890757142467:2080] 1750861734585725 != 1750861734585728 TServer::EnableGrpc on GrpcPort 14427, node 3 2025-06-25T14:28:54.736538Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:28:54.736648Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:28:54.738267Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:28:54.760994Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:28:54.761017Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:28:54.761026Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:28:54.761166Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8230 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:28:54.981355Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06 ... WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:28:57.843329Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 21941, node 4 2025-06-25T14:28:57.883309Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:28:57.883330Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:28:57.883338Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:28:57.883485Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17339 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:28:58.103786Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:28:58.111352Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:557: Ticket AKIA****MPLE (B3EDC139) asking for AccessServiceAuthentication 2025-06-25T14:28:58.111435Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [517000100e08] Connect to grpc://localhost:22937 2025-06-25T14:28:58.112602Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000100e08] Request AuthenticateRequest { signature { access_key_id: "AKIAIOSFODNN7EXAMPLE" v4_parameters { signed_at { } } } } 2025-06-25T14:28:58.123602Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [517000100e08] Status 14 Service Unavailable 2025-06-25T14:28:58.123721Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket AKIA****MPLE (B3EDC139) () has now retryable error message 'Service Unavailable' 2025-06-25T14:28:58.123761Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:557: Ticket AKIA****MPLE (B3EDC139) asking for AccessServiceAuthentication 2025-06-25T14:28:58.123922Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000100e08] Request AuthenticateRequest { signature { access_key_id: "AKIAIOSFODNN7EXAMPLE" v4_parameters { signed_at { } } } } 2025-06-25T14:28:58.126382Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [517000100e08] Status 14 Service Unavailable 2025-06-25T14:28:58.126510Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket AKIA****MPLE (B3EDC139) () has now retryable error message 'Service Unavailable' 2025-06-25T14:28:58.740605Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket AKIA****MPLE (B3EDC139) 2025-06-25T14:28:58.740647Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:557: Ticket AKIA****MPLE (B3EDC139) asking for AccessServiceAuthentication 2025-06-25T14:28:58.740949Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:28:58.741073Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000100e08] Request AuthenticateRequest { signature { access_key_id: "AKIAIOSFODNN7EXAMPLE" v4_parameters { signed_at { } } } } 2025-06-25T14:28:58.743272Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [517000100e08] Status 14 Service Unavailable 2025-06-25T14:28:58.743511Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket AKIA****MPLE (B3EDC139) () has now retryable error message 'Service Unavailable' 2025-06-25T14:28:59.741983Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket AKIA****MPLE (B3EDC139) 2025-06-25T14:28:59.742027Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:557: Ticket AKIA****MPLE (B3EDC139) asking for AccessServiceAuthentication 2025-06-25T14:28:59.742532Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000100e08] Request AuthenticateRequest { signature { access_key_id: "AKIAIOSFODNN7EXAMPLE" v4_parameters { signed_at { } } } } 2025-06-25T14:28:59.744829Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [517000100e08] Status 14 Service Unavailable 2025-06-25T14:28:59.744979Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket AKIA****MPLE (B3EDC139) () has now retryable error message 'Service Unavailable' 2025-06-25T14:29:02.697747Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519893903893700178:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:29:02.697861Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:29:02.743305Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket AKIA****MPLE (B3EDC139) 2025-06-25T14:29:02.743355Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:557: Ticket AKIA****MPLE (B3EDC139) asking for AccessServiceAuthentication 2025-06-25T14:29:02.743575Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000100e08] Request AuthenticateRequest { signature { access_key_id: "AKIAIOSFODNN7EXAMPLE" v4_parameters { signed_at { } } } } 2025-06-25T14:29:02.745494Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [517000100e08] Response AuthenticateResponse { subject { user_account { id: "user1" } } } 2025-06-25T14:29:02.745733Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket AKIA****MPLE (B3EDC139) () has now valid token of user1@as 2025-06-25T14:29:10.879905Z node 5 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[5:7519893958811642584:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:29:10.880022Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001888/r3tmp/tmpaJscui/pdisk_1.dat 2025-06-25T14:29:11.002697Z node 5 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 27170, node 5 2025-06-25T14:29:11.028696Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:29:11.029402Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:29:11.042589Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:29:11.145026Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:29:11.145055Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:29:11.145072Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:29:11.145309Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5956 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:29:11.539200Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:29:11.553026Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:29:11.555503Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:557: Ticket AKIA****MPLE (B3EDC139) asking for AccessServiceAuthentication 2025-06-25T14:29:11.555596Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [517000081c88] Connect to grpc://localhost:24100 2025-06-25T14:29:11.556603Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000081c88] Request AuthenticateRequest { signature { access_key_id: "AKIAIOSFODNN7EXAMPLE" v4_parameters { signed_at { } } } } 2025-06-25T14:29:11.566840Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [517000081c88] Status 14 Service Unavailable 2025-06-25T14:29:11.567018Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket AKIA****MPLE (B3EDC139) () has now retryable error message 'Service Unavailable' 2025-06-25T14:29:11.567049Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:557: Ticket AKIA****MPLE (B3EDC139) asking for AccessServiceAuthentication 2025-06-25T14:29:11.567283Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000081c88] Request AuthenticateRequest { signature { access_key_id: "AKIAIOSFODNN7EXAMPLE" v4_parameters { signed_at { } } } } 2025-06-25T14:29:11.569174Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [517000081c88] Response AuthenticateResponse { subject { user_account { id: "user1" } } } 2025-06-25T14:29:11.569381Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket AKIA****MPLE (B3EDC139) () has now valid token of user1@as 2025-06-25T14:29:11.898332Z node 5 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; >> TExternalTableTest::ReplaceExternalTableIfNotExists [GOOD] >> Cdc::NaN[PqRunner] [GOOD] >> Cdc::NaN[YdsRunner] >> TExternalTableTest::DropExternalTable [GOOD] >> TExternalTableTest::Decimal |76.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/with_offset_ranges_mode_ut/with_offset_ranges_mode_ut |76.1%| [LD] {RESULT} $(B)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/with_offset_ranges_mode_ut/with_offset_ranges_mode_ut |76.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/with_offset_ranges_mode_ut/with_offset_ranges_mode_ut ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_external_table/unittest >> TExternalTableTest::ParallelCreateSameExternalTable [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046678944 is [1:129:2153] sender: [1:131:2058] recipient: [1:113:2143] 2025-06-25T14:29:15.105635Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:29:15.105719Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:29:15.105761Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:29:15.105795Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:29:15.105869Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:29:15.105917Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:29:15.105974Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:29:15.106052Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:29:15.106750Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:29:15.107061Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:29:15.265130Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7732: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-25T14:29:15.265197Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:29:15.265906Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:29:15.294302Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:29:15.294733Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:29:15.294901Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:29:15.318107Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:29:15.318336Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:29:15.318974Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:29:15.319265Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:29:15.321905Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:29:15.322092Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:29:15.323303Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:29:15.323378Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:29:15.323432Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:29:15.323475Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:29:15.323508Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:29:15.323693Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:29:15.330020Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:129:2153] sender: [1:243:2058] recipient: [1:15:2062] 2025-06-25T14:29:15.455086Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:29:15.455349Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:15.455579Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:29:15.455627Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:29:15.455845Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:29:15.455916Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:29:15.465307Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:29:15.465502Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:29:15.465716Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:15.465771Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:29:15.465829Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:29:15.465880Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:29:15.469266Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:15.469355Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:29:15.469398Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:29:15.477772Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:15.477844Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:15.477890Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:29:15.477958Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:29:15.491253Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:29:15.498008Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:29:15.498213Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:29:15.498921Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:29:15.499025Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 138 RawX2: 4294969455 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:29:15.499067Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:29:15.499290Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:29:15.499333Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:29:15.499447Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:29:15.499522Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:29: ... ated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } ExternalTableDescription { Name: "NilNoviSubLuna" PathId { OwnerId: 72057594046678944 LocalId: 3 } Version: 1 SourceType: "ObjectStorage" DataSourcePath: "/MyRoot/ExternalDataSource" Location: "/" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false } Columns { Name: "value" Type: "Uint64" TypeId: 4 Id: 2 NotNull: false } Content: "" } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:29:15.620509Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/NilNoviSubLuna" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:29:15.620670Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/NilNoviSubLuna" took 161us result status StatusSuccess 2025-06-25T14:29:15.620918Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/NilNoviSubLuna" PathDescription { Self { Name: "NilNoviSubLuna" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeExternalTable CreateFinished: true CreateTxId: 125 CreateStep: 5000003 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ExternalTableVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } ExternalTableDescription { Name: "NilNoviSubLuna" PathId { OwnerId: 72057594046678944 LocalId: 3 } Version: 1 SourceType: "ObjectStorage" DataSourcePath: "/MyRoot/ExternalDataSource" Location: "/" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false } Columns { Name: "value" Type: "Uint64" TypeId: 4 Id: 2 NotNull: false } Content: "" } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 TestWaitNotification wait txId: 125 2025-06-25T14:29:15.621140Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 125: send EvNotifyTxCompletion 2025-06-25T14:29:15.621167Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 125 TestWaitNotification wait txId: 126 2025-06-25T14:29:15.621220Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 126: send EvNotifyTxCompletion 2025-06-25T14:29:15.621232Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 126 TestWaitNotification wait txId: 127 2025-06-25T14:29:15.621267Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 127: send EvNotifyTxCompletion 2025-06-25T14:29:15.621288Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 127 2025-06-25T14:29:15.621699Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 125, at schemeshard: 72057594046678944 2025-06-25T14:29:15.621809Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 125: got EvNotifyTxCompletionResult 2025-06-25T14:29:15.621836Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 125: satisfy waiter [1:347:2336] 2025-06-25T14:29:15.621956Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 126, at schemeshard: 72057594046678944 2025-06-25T14:29:15.622013Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 127, at schemeshard: 72057594046678944 2025-06-25T14:29:15.622059Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 126: got EvNotifyTxCompletionResult 2025-06-25T14:29:15.622078Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 126: satisfy waiter [1:347:2336] 2025-06-25T14:29:15.622099Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 127: got EvNotifyTxCompletionResult 2025-06-25T14:29:15.622110Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 127: satisfy waiter [1:347:2336] TestWaitNotification: OK eventTxId 125 TestWaitNotification: OK eventTxId 126 TestWaitNotification: OK eventTxId 127 2025-06-25T14:29:15.622571Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/NilNoviSubLuna" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:29:15.622698Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/NilNoviSubLuna" took 130us result status StatusSuccess 2025-06-25T14:29:15.622975Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/NilNoviSubLuna" PathDescription { Self { Name: "NilNoviSubLuna" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeExternalTable CreateFinished: true CreateTxId: 125 CreateStep: 5000003 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ExternalTableVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } ExternalTableDescription { Name: "NilNoviSubLuna" PathId { OwnerId: 72057594046678944 LocalId: 3 } Version: 1 SourceType: "ObjectStorage" DataSourcePath: "/MyRoot/ExternalDataSource" Location: "/" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false } Columns { Name: "value" Type: "Uint64" TypeId: 4 Id: 2 NotNull: false } Content: "" } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 TestModificationResults wait txId: 128 2025-06-25T14:29:15.625354Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateExternalTable CreateExternalTable { Name: "NilNoviSubLuna" SourceType: "General" DataSourcePath: "/MyRoot/ExternalDataSource" Location: "/" Columns { Name: "key" Type: "Uint64" } Columns { Name: "value" Type: "Uint64" } } } TxId: 128 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:29:15.625559Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_external_table.cpp:427: [72057594046678944] CreateNewExternalTable, opId 128:0, feature flag EnableReplaceIfExistsForExternalEntities 0, tx WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateExternalTable FailOnExist: false CreateExternalTable { Name: "NilNoviSubLuna" SourceType: "General" DataSourcePath: "/MyRoot/ExternalDataSource" Location: "/" Columns { Name: "key" Type: "Uint64" } Columns { Name: "value" Type: "Uint64" } } 2025-06-25T14:29:15.625628Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_external_table.cpp:300: [72057594046678944] TCreateExternalTable Propose: opId# 128:0, path# /MyRoot/NilNoviSubLuna 2025-06-25T14:29:15.625711Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 128:1, propose status:StatusAlreadyExists, reason: Check failed: path: '/MyRoot/NilNoviSubLuna', error: path exist, request accepts it (id: [OwnerId: 72057594046678944, LocalPathId: 3], type: EPathTypeExternalTable, state: EPathStateNoChanges), at schemeshard: 72057594046678944 2025-06-25T14:29:15.627489Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 128, response: Status: StatusAlreadyExists Reason: "Check failed: path: \'/MyRoot/NilNoviSubLuna\', error: path exist, request accepts it (id: [OwnerId: 72057594046678944, LocalPathId: 3], type: EPathTypeExternalTable, state: EPathStateNoChanges)" TxId: 128 SchemeshardId: 72057594046678944 PathId: 3 PathCreateTxId: 125, at schemeshard: 72057594046678944 2025-06-25T14:29:15.627684Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 128, database: /MyRoot, subject: , status: StatusAlreadyExists, reason: Check failed: path: '/MyRoot/NilNoviSubLuna', error: path exist, request accepts it (id: [OwnerId: 72057594046678944, LocalPathId: 3], type: EPathTypeExternalTable, state: EPathStateNoChanges), operation: CREATE EXTERNAL TABLE, path: /MyRoot/NilNoviSubLuna TestModificationResult got TxId: 128, wait until txId: 128 >> KqpScripting::StreamExecuteYqlScriptLeadingEmptyScan [GOOD] >> TExternalTableTest::ReplaceExternalTableShouldFailIfEntityOfAnotherTypeWithSameNameExists |76.1%| [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data_integrity/unittest >> TExternalTableTest::ReplaceExternalTableIfNotExistsShouldFailIfFeatureFlagIsNotSet [GOOD] >> DataShardSnapshots::MvccSnapshotTailCleanup ------- [TM] {asan, default-linux-x86_64, release} ydb/core/security/ut/unittest >> TTicketParserTest::NebiusAuthorizationModify [GOOD] Test command err: 2025-06-25T14:28:48.096120Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519893865734634651:2166];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:28:48.102642Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001898/r3tmp/tmpo4bHzH/pdisk_1.dat 2025-06-25T14:28:48.379657Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519893865734634523:2080] 1750861728078350 != 1750861728078353 2025-06-25T14:28:48.381517Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 10310, node 1 2025-06-25T14:28:48.415675Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:28:48.415694Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:28:48.415702Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:28:48.415789Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:28:48.481053Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:28:48.481157Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:28:48.482753Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:11191 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:28:48.700272Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:28:48.711689Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-06-25T14:28:48.711740Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-25T14:28:48.711756Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-25T14:28:48.712189Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:557: Ticket **** (8E120919) asking for AccessServiceAuthentication 2025-06-25T14:28:48.712289Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [517000010408] Connect to grpc://localhost:30755 2025-06-25T14:28:48.714818Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000010408] Request AuthenticateRequest { iam_token: "**** (8E120919)" } NebiusAccessService::Authenticate request iam_token: "user1" NebiusAccessService::Authenticate response 14: "Service Unavailable" 2025-06-25T14:28:48.726632Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [517000010408] Status 14 Service Unavailable 2025-06-25T14:28:48.726788Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket **** (8E120919) () has now retryable error message 'Service Unavailable' 2025-06-25T14:28:48.726834Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:557: Ticket **** (8E120919) asking for AccessServiceAuthentication 2025-06-25T14:28:48.727013Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000010408] Request AuthenticateRequest { iam_token: "**** (8E120919)" } 2025-06-25T14:28:48.729695Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [517000010408] Status 1 CANCELLED 2025-06-25T14:28:48.729854Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket **** (8E120919) () has now retryable error message 'CANCELLED' 2025-06-25T14:28:50.610048Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519893870753563135:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:28:50.610140Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001898/r3tmp/tmpFOd0pU/pdisk_1.dat 2025-06-25T14:28:50.711074Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 13838, node 2 2025-06-25T14:28:50.761771Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:28:50.761947Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:28:50.763646Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:28:50.778476Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:28:50.778497Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:28:50.778510Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:28:50.778600Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:20075 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:28:50.966023Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:28:50.972558Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-06-25T14:28:50.972602Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-25T14:28:50.972616Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-25T14:28:50.972711Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:526: Ticket **** (8E120919) asking for AccessServiceAuthorization( something.read) 2025-06-25T14:28:50.972753Z node 2 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [5170000a1808] Connect to grpc://localhost:7819 2025-06-25T14:28:50.973948Z node 2 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [5170000a1808] Request AuthorizeRequest { checks { key: 0 value { permission { name: "something.read" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "**** (8E120919)" } } } NebiusAccessService::Authorize request checks { key: 0 value { permission { name: "something.read" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "user1" } } NebiusAccessService::Authorize response 14: "Service Unavailable" 2025-06-25T14:28:50.983211Z node 2 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [5170000a1808] Status 14 Service Unavailable 2025-06-25T14:28:50.983387Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:1139: Ticket **** (8E120919) permission something.read now has a retryable error "Service Unavailable" retryable: 1 2025-06-25T14:28:50.983436Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket **** (8E120919) () has now retryable error message 'Service Unavailable' 2025-06-25T14:28:50.983511Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:526: Ticket **** (8E120919) asking for AccessServiceAuthorization( something.read) 2025-06-25T14:28:50.983776Z node 2 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [5170000a1808] Request AuthorizeRequest { checks { key: 0 value { permission { name: "something.read" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "**** (8E120919)" } } } NebiusAccessService::Authorize request checks { key: 0 value { permission { name: "something.read" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "user1" } } NebiusAccessService::Authorize response 14: "Service Unavailable" 2025-06-25T14:28:50.985499Z node 2 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [5170000a1808] Status 14 Service Unavailable 2025-06-25T14:28:50.985597Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:1139: Ticket **** (8E120919) permission something.read now has a retryable error "Service Unavailable" retryable: 1 2025-06-25T14:28:50.985614Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket **** (8E120919) () has now retryable error message 'Service Unavailable' 2025-06-25T14:28:51.615909Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket **** (8E120919) 2025-06-25T14:28:51.615995Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:526: Ticket **** (8E120919) asking for AccessServiceAuthorization( something.read) 2025-06-25T14:28:51.616244Z node 2 :GRPC_CLIENT DEBU ... db , DomainLoginOnly 1 2025-06-25T14:29:08.128727Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-25T14:29:08.128735Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-25T14:29:08.128773Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:526: Ticket **** (8E120919) asking for AccessServiceAuthorization( something.read) 2025-06-25T14:29:08.128921Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [51700006fd88] Request AuthorizeRequest { checks { key: 0 value { permission { name: "something.read" } container_id: "aaaa1234" resource_path { path { id: "XXXXXXXX" } } iam_token: "**** (8E120919)" } } } NebiusAccessService::Authorize request checks { key: 0 value { permission { name: "something.read" } container_id: "aaaa1234" resource_path { path { id: "XXXXXXXX" } } iam_token: "user1" } } NebiusAccessService::Authorize response results { key: 0 value { account { user_account { id: "user1" } } } } 0: "OK" 2025-06-25T14:29:08.130307Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [51700006fd88] Response AuthorizeResponse { results { key: 0 value { account { user_account { id: "user1" } } } } } 2025-06-25T14:29:08.130498Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (8E120919) () has now valid token of user1@as 2025-06-25T14:29:08.132742Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-06-25T14:29:08.132763Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-25T14:29:08.132770Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-25T14:29:08.132811Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:526: Ticket **** (8E120919) asking for AccessServiceAuthorization( something.read) 2025-06-25T14:29:08.132955Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [51700006fd88] Request AuthorizeRequest { checks { key: 0 value { permission { name: "something.read" } container_id: "XXXXXXXX" resource_path { path { id: "bbbb4554" } } iam_token: "**** (8E120919)" } } } NebiusAccessService::Authorize request checks { key: 0 value { permission { name: "something.read" } container_id: "XXXXXXXX" resource_path { path { id: "bbbb4554" } } iam_token: "user1" } } NebiusAccessService::Authorize response results { key: 0 value { account { user_account { id: "user1" } } } } 0: "OK" 2025-06-25T14:29:08.134431Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [51700006fd88] Response AuthorizeResponse { results { key: 0 value { account { user_account { id: "user1" } } } } } 2025-06-25T14:29:08.134650Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (8E120919) () has now valid token of user1@as 2025-06-25T14:29:08.135099Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-06-25T14:29:08.135115Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-25T14:29:08.135122Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-25T14:29:08.135154Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:526: Ticket **** (8E120919) asking for AccessServiceAuthorization( monitoring.view) 2025-06-25T14:29:08.135277Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [51700006fd88] Request AuthorizeRequest { checks { key: 0 value { permission { name: "monitoring.view" } container_id: "folder" iam_token: "**** (8E120919)" } } } NebiusAccessService::Authorize request checks { key: 0 value { permission { name: "monitoring.view" } container_id: "folder" iam_token: "user1" } } NebiusAccessService::Authorize response results { key: 0 value { account { user_account { id: "user1" } } } } 0: "OK" 2025-06-25T14:29:08.136887Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [51700006fd88] Response AuthorizeResponse { results { key: 0 value { account { user_account { id: "user1" } } } } } 2025-06-25T14:29:08.137104Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (8E120919) () has now valid token of user1@as 2025-06-25T14:29:11.672140Z node 5 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[5:7519893962273597395:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:29:11.672280Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001898/r3tmp/tmpacXc8J/pdisk_1.dat 2025-06-25T14:29:11.815707Z node 5 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:29:11.819887Z node 5 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [5:7519893962273597373:2080] 1750861751670909 != 1750861751670912 2025-06-25T14:29:11.833847Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:29:11.833951Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:29:11.841698Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 25882, node 5 2025-06-25T14:29:11.916622Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:29:11.916651Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:29:11.916662Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:29:11.916809Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:12379 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:29:12.223074Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:29:12.232685Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-06-25T14:29:12.232723Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-25T14:29:12.232732Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-25T14:29:12.232796Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:526: Ticket **** (8E120919) asking for AccessServiceAuthorization( something.read) 2025-06-25T14:29:12.232838Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [517000012388] Connect to grpc://localhost:25802 2025-06-25T14:29:12.233849Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000012388] Request AuthorizeRequest { checks { key: 0 value { permission { name: "something.read" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "**** (8E120919)" } } } NebiusAccessService::Authorize request checks { key: 0 value { permission { name: "something.read" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "user1" } } NebiusAccessService::Authorize response results { key: 0 value { account { user_account { id: "user1" } } } } 0: "OK" 2025-06-25T14:29:12.243370Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [517000012388] Response AuthorizeResponse { results { key: 0 value { account { user_account { id: "user1" } } } } } 2025-06-25T14:29:12.245182Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (8E120919) () has now valid token of user1@as 2025-06-25T14:29:12.246461Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-06-25T14:29:12.246494Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-25T14:29:12.246505Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-25T14:29:12.246570Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:526: Ticket **** (8E120919) asking for AccessServiceAuthorization( something.read something.write) 2025-06-25T14:29:12.246908Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000012388] Request AuthorizeRequest { checks { key: 0 value { permission { name: "something.read" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "**** (8E120919)" } } checks { key: 1 value { permission { name: "something.write" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "**** (8E120919)" } } } NebiusAccessService::Authorize request checks { key: 0 value { permission { name: "something.read" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "user1" } } checks { key: 1 value { permission { name: "something.write" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "user1" } } NebiusAccessService::Authorize response results { key: 0 value { account { user_account { id: "user1" } } } } results { key: 1 value { account { user_account { id: "user1" } } } } 0: "OK" 2025-06-25T14:29:12.249788Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [517000012388] Response AuthorizeResponse { results { key: 0 value { account { user_account { id: "user1" } } } } results { key: 1 value { account { user_account { id: "user1" } } } } } 2025-06-25T14:29:12.250886Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (8E120919) () has now valid token of user1@as |76.1%| [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data_integrity/unittest >> KqpDataIntegrityTrails::Upsert-LogEnabled-UseSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_external_table/unittest >> TExternalTableTest::ReplaceExternalTableIfNotExists [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046678944 is [1:129:2153] sender: [1:131:2058] recipient: [1:113:2143] 2025-06-25T14:29:15.643352Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:29:15.643439Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:29:15.643484Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:29:15.643522Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:29:15.643599Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:29:15.643635Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:29:15.643686Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:29:15.643782Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:29:15.644817Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:29:15.645151Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:29:15.726872Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7732: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-25T14:29:15.726938Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:29:15.727981Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:29:15.743191Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:29:15.743627Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:29:15.743800Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:29:15.751336Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:29:15.751557Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:29:15.752242Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:29:15.752555Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:29:15.755146Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:29:15.755351Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:29:15.756472Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:29:15.756539Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:29:15.756590Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:29:15.756628Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:29:15.756665Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:29:15.756873Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:29:15.762838Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:129:2153] sender: [1:243:2058] recipient: [1:15:2062] 2025-06-25T14:29:15.910427Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:29:15.910667Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:15.910900Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:29:15.910946Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:29:15.911182Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:29:15.911268Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:29:15.914377Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:29:15.914571Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:29:15.914753Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:15.914810Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:29:15.914872Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:29:15.914923Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:29:15.924383Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:15.924489Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:29:15.924538Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:29:15.927395Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:15.927462Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:15.927511Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:29:15.927591Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:29:15.931348Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:29:15.933666Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:29:15.933878Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:29:15.934896Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:29:15.935035Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 138 RawX2: 4294969455 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:29:15.935115Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:29:15.935440Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:29:15.935507Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:29:15.935677Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:29:15.935756Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:29: ... MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:29:16.057690Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_external_table.cpp:58: [72057594046678944] TAlterExternalTable TPropose, operationId: 104:0 HandleReply TEvOperationPlan: step# 5000005 2025-06-25T14:29:16.057824Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 104:0 128 -> 240 2025-06-25T14:29:16.058044Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-25T14:29:16.058115Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-25T14:29:16.059021Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2025-06-25T14:29:16.059145Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 FAKE_COORDINATOR: Erasing txId 104 2025-06-25T14:29:16.063417Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:29:16.063465Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 104, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:29:16.063613Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 104, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-25T14:29:16.063694Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 104, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-25T14:29:16.063796Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:29:16.063832Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:210:2210], at schemeshard: 72057594046678944, txId: 104, path id: 1 2025-06-25T14:29:16.063867Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:210:2210], at schemeshard: 72057594046678944, txId: 104, path id: 3 2025-06-25T14:29:16.063892Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:210:2210], at schemeshard: 72057594046678944, txId: 104, path id: 3 2025-06-25T14:29:16.064156Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-25T14:29:16.064208Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 104:0 ProgressState 2025-06-25T14:29:16.064332Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#104:0 progress is 1/1 2025-06-25T14:29:16.064375Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-25T14:29:16.064414Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#104:0 progress is 1/1 2025-06-25T14:29:16.064445Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-25T14:29:16.064497Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 104, ready parts: 1/1, is published: false 2025-06-25T14:29:16.064556Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-25T14:29:16.064594Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 104:0 2025-06-25T14:29:16.064632Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 104:0 2025-06-25T14:29:16.064710Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-25T14:29:16.064793Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate source path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-25T14:29:16.064838Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 104, publications: 2, subscribers: 0 2025-06-25T14:29:16.064872Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 104, [OwnerId: 72057594046678944, LocalPathId: 1], 11 2025-06-25T14:29:16.064904Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 104, [OwnerId: 72057594046678944, LocalPathId: 3], 4 2025-06-25T14:29:16.065711Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 11 PathOwnerId: 72057594046678944, cookie: 104 2025-06-25T14:29:16.065794Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 11 PathOwnerId: 72057594046678944, cookie: 104 2025-06-25T14:29:16.065844Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 104 2025-06-25T14:29:16.065892Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 104, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 11 2025-06-25T14:29:16.065942Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-25T14:29:16.066602Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 4 PathOwnerId: 72057594046678944, cookie: 104 2025-06-25T14:29:16.066662Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 4 PathOwnerId: 72057594046678944, cookie: 104 2025-06-25T14:29:16.066693Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 104 2025-06-25T14:29:16.066727Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 104, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 4 2025-06-25T14:29:16.066765Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-25T14:29:16.066828Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 104, subscribers: 0 2025-06-25T14:29:16.081684Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2025-06-25T14:29:16.083122Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 TestModificationResult got TxId: 104, wait until txId: 104 TestWaitNotification wait txId: 104 2025-06-25T14:29:16.083410Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 104: send EvNotifyTxCompletion 2025-06-25T14:29:16.083486Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 104 2025-06-25T14:29:16.083998Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 104, at schemeshard: 72057594046678944 2025-06-25T14:29:16.084120Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 104: got EvNotifyTxCompletionResult 2025-06-25T14:29:16.084164Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 104: satisfy waiter [1:391:2380] TestWaitNotification: OK eventTxId 104 2025-06-25T14:29:16.084746Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/ExternalTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:29:16.084971Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/ExternalTable" took 239us result status StatusSuccess 2025-06-25T14:29:16.085371Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/ExternalTable" PathDescription { Self { Name: "ExternalTable" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeExternalTable CreateFinished: true CreateTxId: 102 CreateStep: 5000003 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ExternalTableVersion: 3 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } ExternalTableDescription { Name: "ExternalTable" PathId { OwnerId: 72057594046678944 LocalId: 3 } Version: 3 SourceType: "ObjectStorage" DataSourcePath: "/MyRoot/ExternalDataSource" Location: "/other_location" Columns { Name: "value" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false } Content: "" } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/security/ut/unittest >> TTicketParserTest::BulkAuthorizationModify [GOOD] Test command err: 2025-06-25T14:28:47.110838Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519893860062312458:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:28:47.110935Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0019f8/r3tmp/tmpmZU6na/pdisk_1.dat 2025-06-25T14:28:47.394841Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519893860062312440:2080] 1750861727110133 != 1750861727110136 2025-06-25T14:28:47.401442Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 4654, node 1 2025-06-25T14:28:47.480368Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:28:47.480406Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:28:47.480413Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:28:47.480562Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:28:47.482097Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:28:47.482220Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:28:47.484078Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:8210 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:28:47.808235Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:28:47.827165Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:508: Ticket AKIA****MPLE (B3EDC139) asking for AccessServiceBulkAuthorization( something.read) 2025-06-25T14:28:47.827231Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [517000010788] Connect to grpc://localhost:20415 2025-06-25T14:28:47.830910Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000010788] Request BulkAuthorizeRequest { signature { access_key_id: "AKIAIOSFODNN7EXAMPLE" v4_parameters { signed_at { } } } actions { items { resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } permission: "something.read" } } result_filter: ALL_FAILED } 2025-06-25T14:28:47.847226Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [517000010788] Status 14 Service Unavailable 2025-06-25T14:28:47.849270Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:1139: Ticket AKIA****MPLE (B3EDC139) permission something.read now has a retryable error "Service Unavailable" retryable: 1 2025-06-25T14:28:47.849323Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket AKIA****MPLE (B3EDC139) () has now retryable error message 'Service Unavailable' 2025-06-25T14:28:47.849411Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:508: Ticket AKIA****MPLE (B3EDC139) asking for AccessServiceBulkAuthorization( something.read) 2025-06-25T14:28:47.849749Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000010788] Request BulkAuthorizeRequest { signature { access_key_id: "AKIAIOSFODNN7EXAMPLE" v4_parameters { signed_at { } } } actions { items { resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } permission: "something.read" } } result_filter: ALL_FAILED } 2025-06-25T14:28:47.852159Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [517000010788] Status 14 Service Unavailable 2025-06-25T14:28:47.852403Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:1139: Ticket AKIA****MPLE (B3EDC139) permission something.read now has a retryable error "Service Unavailable" retryable: 1 2025-06-25T14:28:47.852450Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket AKIA****MPLE (B3EDC139) () has now retryable error message 'Service Unavailable' 2025-06-25T14:28:48.121430Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:28:49.122262Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket AKIA****MPLE (B3EDC139) 2025-06-25T14:28:49.122378Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:508: Ticket AKIA****MPLE (B3EDC139) asking for AccessServiceBulkAuthorization( something.read) 2025-06-25T14:28:49.122682Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000010788] Request BulkAuthorizeRequest { signature { access_key_id: "AKIAIOSFODNN7EXAMPLE" v4_parameters { signed_at { } } } actions { items { resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } permission: "something.read" } } result_filter: ALL_FAILED } 2025-06-25T14:28:49.125507Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [517000010788] Status 14 Service Unavailable 2025-06-25T14:28:49.125611Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:1139: Ticket AKIA****MPLE (B3EDC139) permission something.read now has a retryable error "Service Unavailable" retryable: 1 2025-06-25T14:28:49.125660Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket AKIA****MPLE (B3EDC139) () has now retryable error message 'Service Unavailable' 2025-06-25T14:28:51.123252Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket AKIA****MPLE (B3EDC139) 2025-06-25T14:28:51.123361Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:508: Ticket AKIA****MPLE (B3EDC139) asking for AccessServiceBulkAuthorization( something.read) 2025-06-25T14:28:51.123618Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000010788] Request BulkAuthorizeRequest { signature { access_key_id: "AKIAIOSFODNN7EXAMPLE" v4_parameters { signed_at { } } } actions { items { resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } permission: "something.read" } } result_filter: ALL_FAILED } 2025-06-25T14:28:51.125745Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [517000010788] Response BulkAuthorizeResponse { subject { user_account { id: "user1" } } } 2025-06-25T14:28:51.125926Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket AKIA****MPLE (B3EDC139) () has now valid token of user1@as 2025-06-25T14:28:52.110936Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519893860062312458:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:28:52.111013Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:29:00.349067Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519893916537104876:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:29:00.349472Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0019f8/r3tmp/tmpSQ6sVR/pdisk_1.dat 2025-06-25T14:29:00.498392Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:29:00.500169Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519893916537104853:2080] 1750861740347848 != 1750861740347851 TServer::EnableGrpc on GrpcPort 12693, node 2 2025-06-25T14:29:00.526939Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:29:00.527019Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:29:00.532679Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:29:00.551383Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:29:00.551419Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:29:00.551426Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:29:00.551505Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27762 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:29:00.805013Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner ... sed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:29:08.000324Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:29:08.005072Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-06-25T14:29:08.005109Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-25T14:29:08.005117Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-25T14:29:08.005160Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:487: Ticket **** (8E120919) asking for AccessServiceAuthorization(something.read) 2025-06-25T14:29:08.005240Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:487: Ticket **** (8E120919) asking for AccessServiceAuthorization(somewhere.sleep) 2025-06-25T14:29:08.005263Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:487: Ticket **** (8E120919) asking for AccessServiceAuthorization(something.list) 2025-06-25T14:29:08.005285Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:487: Ticket **** (8E120919) asking for AccessServiceAuthorization(something.write) 2025-06-25T14:29:08.005307Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:487: Ticket **** (8E120919) asking for AccessServiceAuthorization(something.eat) 2025-06-25T14:29:08.005362Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [51700010dc88] Connect to grpc://localhost:21567 2025-06-25T14:29:08.008045Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [51700010dc88] Request AuthorizeRequest { iam_token: "**** (8E120919)" permission: "something.read" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } } 2025-06-25T14:29:08.011737Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [51700010dc88] Request AuthorizeRequest { iam_token: "**** (8E120919)" permission: "somewhere.sleep" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } } 2025-06-25T14:29:08.012067Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [51700010dc88] Request AuthorizeRequest { iam_token: "**** (8E120919)" permission: "something.list" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } } 2025-06-25T14:29:08.012260Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [51700010dc88] Request AuthorizeRequest { iam_token: "**** (8E120919)" permission: "something.write" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } } 2025-06-25T14:29:08.012462Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [51700010dc88] Request AuthorizeRequest { iam_token: "**** (8E120919)" permission: "something.eat" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } } 2025-06-25T14:29:08.029800Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [51700010dc88] Status 16 Access Denied 2025-06-25T14:29:08.030145Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [51700010dc88] Status 16 Access Denied 2025-06-25T14:29:08.032456Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:1407: Ticket **** (8E120919) permission something.eat now has a permanent error "Access Denied" retryable:0 2025-06-25T14:29:08.041626Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:1407: Ticket **** (8E120919) permission something.read now has a permanent error "Access Denied" retryable:0 2025-06-25T14:29:08.044750Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [51700010dc88] Status 16 Access Denied 2025-06-25T14:29:08.047776Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [51700010dc88] Response AuthorizeResponse { subject { user_account { id: "user1" } } } 2025-06-25T14:29:08.047899Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [51700010dc88] Status 16 Access Denied 2025-06-25T14:29:08.051521Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:1407: Ticket **** (8E120919) permission somewhere.sleep now has a permanent error "Access Denied" retryable:0 2025-06-25T14:29:08.051632Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:1392: Ticket **** (8E120919) permission something.write now has a valid subject "user1@as" 2025-06-25T14:29:08.055157Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:1407: Ticket **** (8E120919) permission something.list now has a permanent error "Access Denied" retryable:0 2025-06-25T14:29:08.055537Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:997: Ticket **** (8E120919) asking for UserAccount(user1@as) 2025-06-25T14:29:08.059201Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [51700010e388] Connect to grpc://localhost:21861 2025-06-25T14:29:08.060092Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [51700010e388] Request GetUserAccountRequest { user_account_id: "user1" } 2025-06-25T14:29:08.068221Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [51700010e388] Response UserAccount { yandex_passport_user_account { login: "login1" } } 2025-06-25T14:29:08.068619Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (8E120919) () has now valid token of login1@passport 2025-06-25T14:29:11.344167Z node 5 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[5:7519893961506177462:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:29:11.344368Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0019f8/r3tmp/tmpZP7fSX/pdisk_1.dat 2025-06-25T14:29:11.480821Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:29:11.480912Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:29:11.481168Z node 5 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:29:11.508807Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 14848, node 5 2025-06-25T14:29:11.600950Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:29:11.600972Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:29:11.600978Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:29:11.601102Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26159 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:29:11.936005Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:29:11.940838Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:29:11.942542Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-06-25T14:29:11.942572Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-25T14:29:11.942581Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-25T14:29:11.942635Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:508: Ticket **** (8E120919) asking for AccessServiceBulkAuthorization( something.read) 2025-06-25T14:29:11.942665Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [517000072e88] Connect to grpc://localhost:2253 2025-06-25T14:29:11.943391Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000072e88] Request BulkAuthorizeRequest { iam_token: "**** (8E120919)" actions { items { resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } permission: "something.read" } } result_filter: ALL_FAILED } 2025-06-25T14:29:11.952596Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [517000072e88] Response BulkAuthorizeResponse { subject { user_account { id: "user1" } } } 2025-06-25T14:29:11.952841Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (8E120919) () has now valid token of user1@as 2025-06-25T14:29:11.953423Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-06-25T14:29:11.953440Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-25T14:29:11.953448Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-25T14:29:11.953526Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:508: Ticket **** (8E120919) asking for AccessServiceBulkAuthorization( something.read something.write) 2025-06-25T14:29:11.953759Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000072e88] Request BulkAuthorizeRequest { iam_token: "**** (8E120919)" actions { items { resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } permission: "something.read" } items { resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } permission: "something.write" } } result_filter: ALL_FAILED } 2025-06-25T14:29:11.955611Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [517000072e88] Response BulkAuthorizeResponse { subject { user_account { id: "user1" } } } 2025-06-25T14:29:11.955806Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (8E120919) () has now valid token of user1@as >> DataShardSnapshots::LockedWriteBulkUpsertConflict+UseSink >> TExternalTableTest::Decimal [GOOD] >> TSchemeshardBorrowedCompactionTest::SchemeshardShouldNotCompactBorrowedAfterSplitMergeWhenDisabled [GOOD] >> TSchemeshardBorrowedCompactionTest::SchemeshardShouldHandleDataShardReboot >> TExternalTableTest::CreateExternalTable [GOOD] >> TExternalTableTest::CreateExternalTableShouldFailIfSuchEntityAlreadyExists ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_external_table/unittest >> TExternalTableTest::ReplaceExternalTableIfNotExistsShouldFailIfFeatureFlagIsNotSet [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046678944 is [1:129:2153] sender: [1:131:2058] recipient: [1:113:2143] 2025-06-25T14:29:16.018380Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:29:16.018443Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:29:16.018470Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:29:16.018492Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:29:16.018538Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:29:16.018562Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:29:16.018605Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:29:16.018671Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:29:16.019269Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:29:16.019505Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:29:16.096731Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7732: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-25T14:29:16.096788Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:29:16.097594Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:29:16.118218Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:29:16.118664Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:29:16.118849Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:29:16.129584Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:29:16.129814Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:29:16.130521Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:29:16.130797Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:29:16.133473Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:29:16.133641Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:29:16.134796Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:29:16.134894Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:29:16.134950Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:29:16.134991Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:29:16.135030Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:29:16.135260Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:29:16.142350Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:129:2153] sender: [1:243:2058] recipient: [1:15:2062] 2025-06-25T14:29:16.295097Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:29:16.295337Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:16.295555Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:29:16.295602Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:29:16.295818Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:29:16.295892Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:29:16.298716Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:29:16.298905Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:29:16.299096Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:16.299150Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:29:16.299211Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:29:16.299249Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:29:16.302458Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:16.302520Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:29:16.302566Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:29:16.304744Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:16.304794Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:16.304835Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:29:16.304894Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:29:16.308513Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:29:16.310555Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:29:16.310745Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:29:16.311693Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:29:16.311811Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 138 RawX2: 4294969455 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:29:16.311873Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:29:16.312159Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:29:16.312225Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:29:16.312419Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:29:16.312492Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:29: ... schemeshard_impl.cpp:5258: RemoveTx for txid 101:0 2025-06-25T14:29:16.362596Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-25T14:29:16.362634Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 101, publications: 2, subscribers: 0 2025-06-25T14:29:16.362666Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 1], 5 2025-06-25T14:29:16.362693Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 2], 2 2025-06-25T14:29:16.364049Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 101 2025-06-25T14:29:16.364127Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 101 2025-06-25T14:29:16.364161Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 101 2025-06-25T14:29:16.364189Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 5 2025-06-25T14:29:16.364227Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-25T14:29:16.365181Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 2 PathOwnerId: 72057594046678944, cookie: 101 2025-06-25T14:29:16.365257Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 2 PathOwnerId: 72057594046678944, cookie: 101 2025-06-25T14:29:16.365284Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 101 2025-06-25T14:29:16.365314Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 2 2025-06-25T14:29:16.365344Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-25T14:29:16.365406Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 101, subscribers: 0 2025-06-25T14:29:16.371431Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-25T14:29:16.371718Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 101 2025-06-25T14:29:16.371899Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2025-06-25T14:29:16.371945Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 2025-06-25T14:29:16.372333Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-06-25T14:29:16.372431Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-25T14:29:16.372470Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:307:2296] TestWaitNotification: OK eventTxId 101 2025-06-25T14:29:16.372983Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/ExternalDataSource" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:29:16.373200Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/ExternalDataSource" took 229us result status StatusSuccess 2025-06-25T14:29:16.373609Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/ExternalDataSource" PathDescription { Self { Name: "ExternalDataSource" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeExternalDataSource CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ExternalDataSourceVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } ExternalDataSourceDescription { Name: "ExternalDataSource" PathId { OwnerId: 72057594046678944 LocalId: 2 } Version: 1 SourceType: "ObjectStorage" Location: "https://s3.cloud.net/my_bucket" Installation: "" Auth { None { } } Properties { } References { } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 TestModificationResults wait txId: 102 2025-06-25T14:29:16.376606Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateExternalTable CreateExternalTable { Name: "ExternalTable" SourceType: "General" DataSourcePath: "/MyRoot/ExternalDataSource" Location: "/" Columns { Name: "key" Type: "Uint64" } ReplaceIfExists: true } } TxId: 102 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:29:16.376920Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_external_table.cpp:427: [72057594046678944] CreateNewExternalTable, opId 102:0, feature flag EnableReplaceIfExistsForExternalEntities 0, tx WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateExternalTable FailOnExist: false CreateExternalTable { Name: "ExternalTable" SourceType: "General" DataSourcePath: "/MyRoot/ExternalDataSource" Location: "/" Columns { Name: "key" Type: "Uint64" } ReplaceIfExists: true } 2025-06-25T14:29:16.377007Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_just_reject.cpp:47: TReject Propose, opId: 102:0, explain: Invalid TCreateExternalTable request: Unsupported: feature flag EnableReplaceIfExistsForExternalEntities is off, at schemeshard: 72057594046678944 2025-06-25T14:29:16.377055Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 102:1, propose status:StatusPreconditionFailed, reason: Invalid TCreateExternalTable request: Unsupported: feature flag EnableReplaceIfExistsForExternalEntities is off, at schemeshard: 72057594046678944 2025-06-25T14:29:16.379448Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 102, response: Status: StatusPreconditionFailed Reason: "Invalid TCreateExternalTable request: Unsupported: feature flag EnableReplaceIfExistsForExternalEntities is off" TxId: 102 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:29:16.379660Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 102, database: /MyRoot, subject: , status: StatusPreconditionFailed, reason: Invalid TCreateExternalTable request: Unsupported: feature flag EnableReplaceIfExistsForExternalEntities is off, operation: CREATE EXTERNAL TABLE, path: /MyRoot/ExternalTable TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 102 2025-06-25T14:29:16.379999Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-06-25T14:29:16.380046Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 2025-06-25T14:29:16.380478Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-06-25T14:29:16.380577Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-25T14:29:16.380614Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:315:2304] TestWaitNotification: OK eventTxId 102 2025-06-25T14:29:16.381143Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/ExternalTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:29:16.381332Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/ExternalTable" took 203us result status StatusPathDoesNotExist 2025-06-25T14:29:16.381547Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/ExternalTable\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/ExternalTable" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: true } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 >> AsyncIndexChangeExchange::ShouldRemoveRecordsAfterCancelIndexBuild [GOOD] >> AsyncIndexChangeExchange::ShouldDeliverChangesOnSplitMerge >> TExternalTableTest::ReplaceExternalTableShouldFailIfEntityOfAnotherTypeWithSameNameExists [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_external_table/unittest >> TExternalTableTest::Decimal [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046678944 is [1:129:2153] sender: [1:131:2058] recipient: [1:113:2143] 2025-06-25T14:29:15.576240Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:29:15.576327Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:29:15.576383Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:29:15.576435Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:29:15.576479Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:29:15.576507Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:29:15.576557Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:29:15.576623Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:29:15.577159Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:29:15.577421Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:29:15.657917Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7732: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-25T14:29:15.657981Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:29:15.658679Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:29:15.682929Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:29:15.683464Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:29:15.683668Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:29:15.692422Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:29:15.692683Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:29:15.693428Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:29:15.693734Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:29:15.704620Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:29:15.704854Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:29:15.706227Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:29:15.706288Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:29:15.706334Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:29:15.706375Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:29:15.706412Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:29:15.706602Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:29:15.717773Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:129:2153] sender: [1:243:2058] recipient: [1:15:2062] 2025-06-25T14:29:15.891518Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:29:15.891811Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:15.892049Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:29:15.892100Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:29:15.892345Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:29:15.892438Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:29:15.895356Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:29:15.895535Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:29:15.895739Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:15.895793Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:29:15.895845Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:29:15.895901Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:29:15.898183Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:15.898248Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:29:15.898294Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:29:15.900235Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:15.900284Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:15.900358Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:29:15.900414Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:29:15.903711Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:29:15.907337Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:29:15.907567Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:29:15.908671Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:29:15.908827Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 138 RawX2: 4294969455 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:29:15.908888Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:29:15.909190Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:29:15.909247Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:29:15.909415Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:29:15.909505Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:29: ... at schemeshard: 72057594046678944, txId: 101, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-25T14:29:16.724916Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 101, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-25T14:29:16.725028Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:29:16.725064Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [2:209:2209], at schemeshard: 72057594046678944, txId: 101, path id: 1 2025-06-25T14:29:16.725117Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [2:209:2209], at schemeshard: 72057594046678944, txId: 101, path id: 3 2025-06-25T14:29:16.725150Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [2:209:2209], at schemeshard: 72057594046678944, txId: 101, path id: 3 2025-06-25T14:29:16.725176Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [2:209:2209], at schemeshard: 72057594046678944, txId: 101, path id: 2 2025-06-25T14:29:16.725567Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:29:16.725613Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 101:0 ProgressState 2025-06-25T14:29:16.725722Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#101:0 progress is 1/1 2025-06-25T14:29:16.725760Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-25T14:29:16.725805Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#101:0 progress is 1/1 2025-06-25T14:29:16.725837Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-25T14:29:16.725878Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 101, ready parts: 1/1, is published: false 2025-06-25T14:29:16.725916Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-25T14:29:16.725952Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 101:0 2025-06-25T14:29:16.725985Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 101:0 2025-06-25T14:29:16.726074Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-25T14:29:16.726125Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate source path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-25T14:29:16.726170Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 101, publications: 3, subscribers: 0 2025-06-25T14:29:16.726205Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 1], 7 2025-06-25T14:29:16.726237Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 2], 2 2025-06-25T14:29:16.726258Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 3], 2 2025-06-25T14:29:16.727323Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 101 2025-06-25T14:29:16.727421Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 101 2025-06-25T14:29:16.727480Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 3, at schemeshard: 72057594046678944, txId: 101 2025-06-25T14:29:16.727525Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 7 2025-06-25T14:29:16.727587Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-25T14:29:16.729395Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 101 2025-06-25T14:29:16.729476Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 101 2025-06-25T14:29:16.729507Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 101 2025-06-25T14:29:16.729539Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 2 2025-06-25T14:29:16.729586Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-25T14:29:16.730417Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 2 PathOwnerId: 72057594046678944, cookie: 101 2025-06-25T14:29:16.730499Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 2 PathOwnerId: 72057594046678944, cookie: 101 2025-06-25T14:29:16.730528Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 101 2025-06-25T14:29:16.730557Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 2 2025-06-25T14:29:16.730591Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-25T14:29:16.730658Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 101, subscribers: 0 2025-06-25T14:29:16.733952Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-25T14:29:16.734865Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-25T14:29:16.734932Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 101 2025-06-25T14:29:16.735203Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2025-06-25T14:29:16.735253Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 2025-06-25T14:29:16.735665Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-06-25T14:29:16.735769Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-25T14:29:16.735809Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [2:336:2325] TestWaitNotification: OK eventTxId 101 2025-06-25T14:29:16.736337Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/ExternalTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:29:16.736570Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/ExternalTable" took 311us result status StatusSuccess 2025-06-25T14:29:16.736933Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/ExternalTable" PathDescription { Self { Name: "ExternalTable" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeExternalTable CreateFinished: true CreateTxId: 101 CreateStep: 5000003 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ExternalTableVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } ExternalTableDescription { Name: "ExternalTable" PathId { OwnerId: 72057594046678944 LocalId: 3 } Version: 1 SourceType: "ObjectStorage" DataSourcePath: "/MyRoot/ExternalDataSource" Location: "/" Columns { Name: "key" Type: "Decimal(35,9)" TypeId: 4865 Id: 1 NotNull: false TypeInfo { DecimalPrecision: 35 DecimalScale: 9 } } Content: "" } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 |76.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_sysview/ydb-core-tx-schemeshard-ut_sysview |76.2%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_sysview/ydb-core-tx-schemeshard-ut_sysview >> TExternalTableTest::CreateExternalTableShouldFailIfSuchEntityAlreadyExists [GOOD] |76.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_sysview/ydb-core-tx-schemeshard-ut_sysview ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/yql/unittest >> KqpScripting::StreamExecuteYqlScriptLeadingEmptyScan [GOOD] Test command err: Trying to start YDB, gRPC: 16093, MsgBus: 7618 2025-06-25T14:29:03.479908Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519893927274235512:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:29:03.480011Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000935/r3tmp/tmpYpv1ZM/pdisk_1.dat 2025-06-25T14:29:03.882016Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:29:03.882302Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:29:03.894335Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 16093, node 1 2025-06-25T14:29:03.899244Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:29:03.949485Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:29:03.949522Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:29:03.949534Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:29:03.949630Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:7618 TClient is connected to server localhost:7618 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:29:04.479328Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:29:04.489018Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:29:04.501428Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:04.638444Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:04.771901Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:04.852864Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:06.330011Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519893940159139007:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:06.330128Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:06.611177Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:06.639403Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:06.665320Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:06.690524Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:06.724448Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:06.802751Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:06.835637Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:06.911647Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519893940159139672:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:06.911745Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:06.911821Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519893940159139677:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:06.914938Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:29:06.923823Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519893940159139679:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:29:06.986326Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519893940159139730:3418] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:29:08.480422Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519893927274235512:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:29:08.480495Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:29:08.652674Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750861748671, txId: 281474976710672] shutting down 2025-06-25T14:29:09.015846Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750861749035, txId: 281474976710674] shutting down 2025-06-25T14:29:09.799075Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750861749833, txId: 281474976710678] shutting down Trying to start YDB, gRPC: 1892, MsgBus: 11253 2025-06-25T14:29:10.361191Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519893958055088475:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:29:10.361245Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000935/r3tmp/tmpbiIpZe/pdisk_1.dat 2025-06-25T14:29:10.474801Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:29:10.487288Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:29:10.487387Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:29:10.490483Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 1892, node 2 2025-06-25T14:29:10.543988Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:29:10.544013Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:29:10.544020Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:29:10.544140Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11253 TClient is connected to server localhost:11253 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:29:11.060238Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:29:11.070860Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:29:11.079836Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:11.158289Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:11.315419Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:11.416272Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:29:11.467152Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:13.737578Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519893970939991968:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:13.737670Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:13.818830Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:13.857100Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:13.887890Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:13.956992Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:13.988706Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:14.026457Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:14.060459Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:14.138245Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519893975234959925:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:14.138335Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:14.138378Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519893975234959930:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:14.141364Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:29:14.150828Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519893975234959932:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:29:14.204140Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519893975234959983:3418] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:29:15.361386Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519893958055088475:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:29:15.361465Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:29:15.486289Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750861755510, txId: 281474976715672] shutting down 2025-06-25T14:29:15.888322Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750861755895, txId: 281474976715674] shutting down |76.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/database/ut/unittest |76.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/scheme_board/ut_subscriber/ydb-core-tx-scheme_board-ut_subscriber |76.2%| [LD] {RESULT} $(B)/ydb/core/tx/scheme_board/ut_subscriber/ydb-core-tx-scheme_board-ut_subscriber |76.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/scheme_board/ut_subscriber/ydb-core-tx-scheme_board-ut_subscriber ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_external_table/unittest >> TExternalTableTest::ReplaceExternalTableShouldFailIfEntityOfAnotherTypeWithSameNameExists [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046678944 is [1:129:2153] sender: [1:131:2058] recipient: [1:113:2143] 2025-06-25T14:29:17.091395Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:29:17.091518Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:29:17.091569Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:29:17.091616Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:29:17.091677Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:29:17.091709Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:29:17.091761Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:29:17.091841Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:29:17.092693Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:29:17.093051Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:29:17.180029Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7732: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-25T14:29:17.180095Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:29:17.180854Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:29:17.198660Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:29:17.199098Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:29:17.199275Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:29:17.226902Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:29:17.227143Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:29:17.228045Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:29:17.228347Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:29:17.231067Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:29:17.231296Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:29:17.232618Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:29:17.232692Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:29:17.232742Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:29:17.232787Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:29:17.232830Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:29:17.233161Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:29:17.239576Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:129:2153] sender: [1:243:2058] recipient: [1:15:2062] 2025-06-25T14:29:17.387251Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:29:17.387513Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:17.387731Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:29:17.387777Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:29:17.388013Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:29:17.388093Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:29:17.391373Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:29:17.391574Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:29:17.391783Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:17.391844Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:29:17.391893Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:29:17.391937Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:29:17.394242Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:17.394306Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:29:17.394350Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:29:17.396507Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:17.396563Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:17.396607Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:29:17.396655Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:29:17.400270Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:29:17.402691Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:29:17.402904Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:29:17.403919Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:29:17.404084Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 138 RawX2: 4294969455 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:29:17.404158Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:29:17.404492Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:29:17.404549Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:29:17.404718Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:29:17.404803Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:29: ... # 102:0 ProgressState 2025-06-25T14:29:17.480715Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:0 progress is 1/1 2025-06-25T14:29:17.480751Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-25T14:29:17.480805Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:0 progress is 1/1 2025-06-25T14:29:17.480853Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-25T14:29:17.480896Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: false 2025-06-25T14:29:17.480935Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-25T14:29:17.480970Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 102:0 2025-06-25T14:29:17.481008Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 102:0 2025-06-25T14:29:17.481081Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-25T14:29:17.481117Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 102, publications: 2, subscribers: 0 2025-06-25T14:29:17.481155Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 102, [OwnerId: 72057594046678944, LocalPathId: 1], 6 2025-06-25T14:29:17.481186Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 102, [OwnerId: 72057594046678944, LocalPathId: 3], 2 2025-06-25T14:29:17.482096Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 6 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T14:29:17.482184Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 6 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T14:29:17.482218Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 102 2025-06-25T14:29:17.482256Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 6 2025-06-25T14:29:17.482298Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-25T14:29:17.483071Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T14:29:17.483162Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T14:29:17.483190Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 102 2025-06-25T14:29:17.483222Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 2 2025-06-25T14:29:17.483251Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-25T14:29:17.483314Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 102, subscribers: 0 2025-06-25T14:29:17.485921Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-25T14:29:17.487073Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 102 2025-06-25T14:29:17.487307Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-06-25T14:29:17.487351Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 2025-06-25T14:29:17.487861Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-06-25T14:29:17.487973Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-25T14:29:17.488015Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:332:2321] TestWaitNotification: OK eventTxId 102 2025-06-25T14:29:17.488512Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/ExternalDataSource" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:29:17.488716Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/ExternalDataSource" took 232us result status StatusSuccess 2025-06-25T14:29:17.489092Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/ExternalDataSource" PathDescription { Self { Name: "ExternalDataSource" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeExternalDataSource CreateFinished: true CreateTxId: 102 CreateStep: 5000003 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ExternalDataSourceVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } ExternalDataSourceDescription { Name: "ExternalDataSource" PathId { OwnerId: 72057594046678944 LocalId: 3 } Version: 1 SourceType: "ObjectStorage" Location: "https://s3.cloud.net/my_bucket" Installation: "" Auth { None { } } Properties { } References { } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 TestModificationResults wait txId: 103 2025-06-25T14:29:17.492344Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateExternalTable CreateExternalTable { Name: "UniqueName" SourceType: "General" DataSourcePath: "/MyRoot/ExternalDataSource" Location: "/" Columns { Name: "key" Type: "Uint64" } ReplaceIfExists: true } } TxId: 103 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:29:17.492674Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_external_table.cpp:427: [72057594046678944] CreateNewExternalTable, opId 103:0, feature flag EnableReplaceIfExistsForExternalEntities 1, tx WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateExternalTable FailOnExist: false CreateExternalTable { Name: "UniqueName" SourceType: "General" DataSourcePath: "/MyRoot/ExternalDataSource" Location: "/" Columns { Name: "key" Type: "Uint64" } ReplaceIfExists: true } 2025-06-25T14:29:17.492843Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_external_table.cpp:312: [72057594046678944] TAlterExternalTable Propose: opId# 103:0, path# /MyRoot/UniqueName, ReplaceIfExists: 1 2025-06-25T14:29:17.493010Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 103:1, propose status:StatusNameConflict, reason: Check failed: path: '/MyRoot/UniqueName', error: unexpected path type (id: [OwnerId: 72057594046678944, LocalPathId: 2], type: EPathTypeView, state: EPathStateNoChanges), expected types: EPathTypeExternalTable, at schemeshard: 72057594046678944 2025-06-25T14:29:17.495312Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 103, response: Status: StatusNameConflict Reason: "Check failed: path: \'/MyRoot/UniqueName\', error: unexpected path type (id: [OwnerId: 72057594046678944, LocalPathId: 2], type: EPathTypeView, state: EPathStateNoChanges), expected types: EPathTypeExternalTable" TxId: 103 SchemeshardId: 72057594046678944 PathId: 2 PathCreateTxId: 101, at schemeshard: 72057594046678944 2025-06-25T14:29:17.495548Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 103, database: /MyRoot, subject: , status: StatusNameConflict, reason: Check failed: path: '/MyRoot/UniqueName', error: unexpected path type (id: [OwnerId: 72057594046678944, LocalPathId: 2], type: EPathTypeView, state: EPathStateNoChanges), expected types: EPathTypeExternalTable, operation: CREATE EXTERNAL TABLE, path: /MyRoot/UniqueName TestModificationResult got TxId: 103, wait until txId: 103 TestWaitNotification wait txId: 103 2025-06-25T14:29:17.495850Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 103: send EvNotifyTxCompletion 2025-06-25T14:29:17.495891Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 103 2025-06-25T14:29:17.496303Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 103, at schemeshard: 72057594046678944 2025-06-25T14:29:17.496437Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-06-25T14:29:17.496489Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [1:340:2329] TestWaitNotification: OK eventTxId 103 |76.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/database/ut/unittest |76.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/database/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/hive/ut/unittest >> THiveTest::TestHiveBalancerWithSpareNodes [GOOD] Test command err: 2025-06-25T14:27:44.938915Z node 4 :BS_NODE DEBUG: {NW26@node_warden_impl.cpp:328} Bootstrap 2025-06-25T14:27:44.969344Z node 4 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# false Origin# initial ServiceSet# {PDisks { NodeID: 1 PDiskID: 1 Path: "SectorMap:0:3200" PDiskGuid: 1 } PDisks { NodeID: 2 PDiskID: 1 Path: "SectorMap:1:3200" PDiskGuid: 2 } PDisks { NodeID: 3 PDiskID: 1 Path: "SectorMap:2:3200" PDiskGuid: 3 } PDisks { NodeID: 4 PDiskID: 1 Path: "SectorMap:3:3200" PDiskGuid: 4 } PDisks { NodeID: 5 PDiskID: 1 Path: "SectorMap:4:3200" PDiskGuid: 5 } PDisks { NodeID: 6 PDiskID: 1 Path: "SectorMap:5:3200" PDiskGuid: 6 } VDisks { VDiskID { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } Groups { GroupID: 0 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } } } AvailabilityDomains: 0 } 2025-06-25T14:27:44.969633Z node 4 :BS_NODE DEBUG: {NW04@node_warden_pdisk.cpp:196} StartLocalPDisk NodeId# 4 PDiskId# 1 Path# "SectorMap:3:3200" PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} Temporary# false 2025-06-25T14:27:44.970491Z node 4 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:106} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2025-06-25T14:27:44.970770Z node 4 :BS_NODE DEBUG: {NW12@node_warden_proxy.cpp:24} StartLocalProxy GroupId# 0 2025-06-25T14:27:44.971572Z node 4 :BS_NODE DEBUG: {NW21@node_warden_pipe.cpp:23} EstablishPipe AvailDomainId# 0 PipeClientId# [4:159:2079] ControllerId# 72057594037932033 2025-06-25T14:27:44.971609Z node 4 :BS_NODE DEBUG: {NW20@node_warden_pipe.cpp:72} SendRegisterNode 2025-06-25T14:27:44.971695Z node 4 :BS_NODE DEBUG: {NW11@node_warden_impl.cpp:303} StartInvalidGroupProxy GroupId# 4294967295 2025-06-25T14:27:44.971805Z node 4 :BS_NODE DEBUG: {NW62@node_warden_impl.cpp:315} StartRequestReportingThrottler 2025-06-25T14:27:44.980854Z node 4 :BS_PROXY INFO: dsproxy_state.cpp:157: Group# 0 TEvConfigureProxy received GroupGeneration# 1 IsLimitedKeyless# false Marker# DSP02 2025-06-25T14:27:44.980928Z node 4 :BS_PROXY NOTICE: dsproxy_state.cpp:305: EnsureMonitoring Group# 0 IsLimitedKeyless# 0 fullIfPossible# 0 Marker# DSP58 2025-06-25T14:27:44.983006Z node 4 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [4:158:2078] Create Queue# [4:166:2083] targetNodeId# 1 Marker# DSP01 2025-06-25T14:27:44.983165Z node 4 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [4:158:2078] Create Queue# [4:167:2084] targetNodeId# 1 Marker# DSP01 2025-06-25T14:27:44.983291Z node 4 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [4:158:2078] Create Queue# [4:168:2085] targetNodeId# 1 Marker# DSP01 2025-06-25T14:27:44.983406Z node 4 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [4:158:2078] Create Queue# [4:169:2086] targetNodeId# 1 Marker# DSP01 2025-06-25T14:27:44.983562Z node 4 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [4:158:2078] Create Queue# [4:170:2087] targetNodeId# 1 Marker# DSP01 2025-06-25T14:27:44.983698Z node 4 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [4:158:2078] Create Queue# [4:171:2088] targetNodeId# 1 Marker# DSP01 2025-06-25T14:27:44.983821Z node 4 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [4:158:2078] Create Queue# [4:172:2089] targetNodeId# 1 Marker# DSP01 2025-06-25T14:27:44.983847Z node 4 :BS_PROXY INFO: dsproxy_state.cpp:31: Group# 0 SetStateEstablishingSessions Marker# DSP03 2025-06-25T14:27:44.983929Z node 4 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037932033] ::Bootstrap [4:159:2079] 2025-06-25T14:27:44.983964Z node 4 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037932033] lookup [4:159:2079] 2025-06-25T14:27:44.984020Z node 4 :BS_PROXY NOTICE: dsproxy_state.cpp:245: Group# 4294967295 HasInvalidGroupId# 1 Bootstrap -> StateEjected Marker# DSP42 2025-06-25T14:27:44.984069Z node 4 :BS_NODE DEBUG: {NWDC00@distconf.cpp:20} Bootstrap 2025-06-25T14:27:44.984766Z node 4 :BS_NODE DEBUG: {NWDC40@distconf_persistent_storage.cpp:25} TReaderActor bootstrap Paths# [] 2025-06-25T14:27:44.984878Z node 5 :BS_NODE DEBUG: {NW26@node_warden_impl.cpp:328} Bootstrap 2025-06-25T14:27:44.986812Z node 5 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# false Origin# initial ServiceSet# {PDisks { NodeID: 1 PDiskID: 1 Path: "SectorMap:0:3200" PDiskGuid: 1 } PDisks { NodeID: 2 PDiskID: 1 Path: "SectorMap:1:3200" PDiskGuid: 2 } PDisks { NodeID: 3 PDiskID: 1 Path: "SectorMap:2:3200" PDiskGuid: 3 } PDisks { NodeID: 4 PDiskID: 1 Path: "SectorMap:3:3200" PDiskGuid: 4 } PDisks { NodeID: 5 PDiskID: 1 Path: "SectorMap:4:3200" PDiskGuid: 5 } PDisks { NodeID: 6 PDiskID: 1 Path: "SectorMap:5:3200" PDiskGuid: 6 } VDisks { VDiskID { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } Groups { GroupID: 0 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } } } AvailabilityDomains: 0 } 2025-06-25T14:27:44.986913Z node 5 :BS_NODE DEBUG: {NW04@node_warden_pdisk.cpp:196} StartLocalPDisk NodeId# 5 PDiskId# 1 Path# "SectorMap:4:3200" PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} Temporary# false 2025-06-25T14:27:44.987195Z node 5 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:106} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2025-06-25T14:27:44.987419Z node 5 :BS_NODE DEBUG: {NW12@node_warden_proxy.cpp:24} StartLocalProxy GroupId# 0 2025-06-25T14:27:44.988153Z node 5 :BS_NODE DEBUG: {NW21@node_warden_pipe.cpp:23} EstablishPipe AvailDomainId# 0 PipeClientId# [5:182:2080] ControllerId# 72057594037932033 2025-06-25T14:27:44.988194Z node 5 :BS_NODE DEBUG: {NW20@node_warden_pipe.cpp:72} SendRegisterNode 2025-06-25T14:27:44.988262Z node 5 :BS_NODE DEBUG: {NW11@node_warden_impl.cpp:303} StartInvalidGroupProxy GroupId# 4294967295 2025-06-25T14:27:44.988393Z node 5 :BS_NODE DEBUG: {NW62@node_warden_impl.cpp:315} StartRequestReportingThrottler 2025-06-25T14:27:44.997309Z node 5 :BS_PROXY INFO: dsproxy_state.cpp:157: Group# 0 TEvConfigureProxy received GroupGeneration# 1 IsLimitedKeyless# false Marker# DSP02 2025-06-25T14:27:44.997377Z node 5 :BS_PROXY NOTICE: dsproxy_state.cpp:305: EnsureMonitoring Group# 0 IsLimitedKeyless# 0 fullIfPossible# 0 Marker# DSP58 2025-06-25T14:27:44.999252Z node 5 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [5:181:2079] Create Queue# [5:189:2084] targetNodeId# 1 Marker# DSP01 2025-06-25T14:27:44.999413Z node 5 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [5:181:2079] Create Queue# [5:190:2085] targetNodeId# 1 Marker# DSP01 2025-06-25T14:27:44.999563Z node 5 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [5:181:2079] Create Queue# [5:191:2086] targetNodeId# 1 Marker# DSP01 2025-06-25T14:27:44.999694Z node 5 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [5:181:2079] Create Queue# [5:192:2087] targetNodeId# 1 Marker# DSP01 2025-06-25T14:27:44.999837Z node 5 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [5:181:2079] Create Queue# [5:193:2088] targetNodeId# 1 Marker# DSP01 2025-06-25T14:27:45.000001Z node 5 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [5:181:2079] Create Queue# [5:194:2089] targetNodeId# 1 Marker# DSP01 2025-06-25T14:27:45.000145Z node 5 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [5:181:2079] Create Queue# [5:195:2090] targetNodeId# 1 Marker# DSP01 2025-06-25T14:27:45.000177Z node 5 :BS_PROXY INFO: dsproxy_state.cpp:31: Group# 0 SetStateEstablishingSessions Marker# DSP03 2025-06-25T14:27:45.000244Z node 5 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037932033] ::Bootstrap [5:182:2080] 2025-06-25T14:27:45.000273Z node 5 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037932033] lookup [5:182:2080] 2025-06-25T14:27:45.000345Z node 5 :BS_PROXY NOTICE: dsproxy_state.cpp:245: Group# 4294967295 HasInvalidGroupId# 1 Bootstrap -> StateEjected Marker# DSP42 2025-06-25T14:27:45.000411Z node 5 :BS_NODE DEBUG: {NWDC00@distconf.cpp:20} Bootstrap 2025-06-25T14:27:45.000983Z node 5 :BS_NODE DEBUG: {NWDC40@distconf_persistent_storage.cpp:25} TReaderActor bootstrap Paths# [] 2025-06-25T14:27:45.001055Z node 6 :BS_NODE DEBUG: {NW26@node_warden_impl.cpp:328} Bootstrap 2025-06-25T14:27:45.003727Z node 6 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# false Origin# initial ServiceSet# {PDisks { NodeID: 1 PDiskID: 1 Path: "SectorMap:0:3200" PDiskGuid: 1 } PDisks { NodeID: 2 PDiskID: 1 Path: "SectorMap:1:3200" PDiskGuid: 2 } PDisks { NodeID: 3 PDiskID: 1 Path: "SectorMap:2:3200" PDiskGuid: 3 } PDisks { NodeID: 4 PDiskID: 1 Path: "SectorMap:3:3200" PDiskGuid: 4 } PDisks { NodeID: 5 PDiskID: 1 Path: "SectorMap:4:3200" PDiskGuid: 5 } PDisks { NodeID: 6 PDiskID: 1 Path: "SectorMap:5:3200" PDiskGuid: 6 } VDisks { VDiskID { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } Groups { GroupID: 0 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } } } AvailabilityDomains: 0 } 2025-06-25T14:27:45.003897Z node 6 :BS_NODE DEBUG: {NW04@node_warden_pdisk.cpp:196} StartLocalPDisk NodeId# 6 PDiskId# 1 Path# "SectorMap:5:3200" PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} Temporary# false 2025-06-25T14:27:45.004394Z node 6 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:106} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2025-06-25T14:27:45.004618Z node 6 :BS_NODE DEBUG: {NW12@node_warden_proxy.cpp:24} StartLocalProxy GroupId# 0 2025-06-25T14:27:45.005453Z node 6 :BS_NODE DEBUG: {NW21@node_warden_pipe.cpp:23} EstablishPipe AvailDomainId# 0 PipeClientId# [6:205:2080] ControllerId# 72057594037932033 2025-06-25T14:27:45.005490Z node 6 :BS_NODE DEBUG: {NW20@node_warden_pipe.cpp:72} SendRegisterNode 2025-06-25T14:27:45.005557Z node 6 :BS_NODE DEBUG: {NW11@node_warden_impl.cpp:303} StartInvalidGroupProxy GroupId# 4294967295 2025-06-25T14:27:45.005656Z node 6 :BS_NODE DEBUG: {NW62@node_warden_impl.cpp:315} StartRequestReportingThrottler 2025-06-25T14:27:45.014805Z node 6 :BS_PROXY INFO: dsproxy_state.cpp:157: Group# 0 TEvConfigureProxy received GroupGeneration# 1 IsLimitedKeyless# false Marker# DSP02 2025-06-25T14:27:45.014868Z node 6 :BS_PROXY NOTICE: dsproxy_state.cpp:305: EnsureMonitoring Group# 0 IsLimitedKeyless# 0 fullIfPossible# 0 Marker# DSP58 2025-06-25T14:27:45.016553Z node 6 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [6:204:2079] Create Queue# [6:212:2084] targetNodeId# 1 Marker# DSP01 2025-06-25T14:27:45.016731Z node 6 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [6:204:2079] Create Queue# [6:213:2085] targetNodeId# 1 Marker# DSP01 2025-06-25T14:27:45.016887Z node 6 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [6:204:2079] Create Queue# [6:214:2086] targetNodeId# 1 Marker# DSP01 2025-06-25T14:27:45.017010Z node 6 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [6:204:2079] Create Queue# [6:215:2087] targetNodeId# 1 Marker# DSP01 2025-06-25T14:27:45.017143Z node 6 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [6:204:2079] Create Queue# [6:216:2088] targetNodeId# 1 Marker# DSP01 2025-06-25T14:27:45.017299Z node 6 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [6:204:2079] Create Queue# [6:217:2089] targetNodeId# 1 Marker# DSP01 2025-06-25T14:27:45.017437Z node 6 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [6:204:2079] Create Queue# [6:218:2090] targetNodeId# 1 Marker# DSP01 2025-06-25T14:27:45.017464Z node 6 :BS_PROXY INFO: dsproxy_state.cpp:31: Group# 0 SetStateEstablishingSessions ... llptr Flags: 1:2:0} 2025-06-25T14:29:13.813548Z node 58 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 58 selfDC 1 leaderDC 3 1:2:0 local 0 localDc 0 other 1 disallowed 0 tabletId: 72075186224037893 followers: 0 countLeader 1 allowFollowers 0 winner: [63:1321:2101] 2025-06-25T14:29:13.813643Z node 58 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:195: TClient[72075186224037893] forward result remote node 63 [58:2123:2503] 2025-06-25T14:29:13.813763Z node 58 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:229: TClient[72075186224037893] remote node connected [58:2123:2503] 2025-06-25T14:29:13.813811Z node 58 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[72075186224037893]::SendEvent [58:2123:2503] 2025-06-25T14:29:13.814054Z node 63 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:291: [72075186224037893] Accept Connect Originator# [58:2123:2503] 2025-06-25T14:29:13.814824Z node 58 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:310: TClient[72075186224037893] connected with status OK role: Leader [58:2123:2503] 2025-06-25T14:29:13.814878Z node 58 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:325: TClient[72075186224037893] send queued [58:2123:2503] 2025-06-25T14:29:13.815884Z node 58 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72075186224037894] ::Bootstrap [58:2126:2505] 2025-06-25T14:29:13.815923Z node 58 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72075186224037894] lookup [58:2126:2505] 2025-06-25T14:29:13.815987Z node 58 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72075186224037894 entry.State: StNormal ev: {EvForward TabletID: 72075186224037894 Ev: nullptr Flags: 1:2:0} 2025-06-25T14:29:13.816036Z node 58 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 58 selfDC 1 leaderDC 3 1:2:0 local 0 localDc 0 other 1 disallowed 0 tabletId: 72075186224037894 followers: 0 countLeader 1 allowFollowers 0 winner: [62:1328:2141] 2025-06-25T14:29:13.816130Z node 58 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:195: TClient[72075186224037894] forward result remote node 62 [58:2126:2505] 2025-06-25T14:29:13.816229Z node 58 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:229: TClient[72075186224037894] remote node connected [58:2126:2505] 2025-06-25T14:29:13.816268Z node 58 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[72075186224037894]::SendEvent [58:2126:2505] 2025-06-25T14:29:13.816632Z node 58 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:349: TClient[72075186224037894] connect request undelivered [58:2126:2505] 2025-06-25T14:29:13.816689Z node 58 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:559: TClient[72075186224037894] immediate retry [58:2126:2505] 2025-06-25T14:29:13.816720Z node 58 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72075186224037894] lookup [58:2126:2505] 2025-06-25T14:29:13.816770Z node 58 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:536: Handle TEvTabletProblem tabletId: 72075186224037894 entry.State: StNormal 2025-06-25T14:29:13.816923Z node 58 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72075186224037894 entry.State: StProblemResolve ev: {EvForward TabletID: 72075186224037894 Ev: nullptr Flags: 1:2:0} 2025-06-25T14:29:13.817010Z node 58 :STATESTORAGE DEBUG: statestorage_proxy.cpp:281: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72075186224037894 Cookie: 0 ProxyOptions: SigNone} 2025-06-25T14:29:13.817144Z node 58 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72075186224037894 Cookie: 0} 2025-06-25T14:29:13.817201Z node 58 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72075186224037894 Cookie: 1} 2025-06-25T14:29:13.817241Z node 58 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72075186224037894 Cookie: 2} 2025-06-25T14:29:13.817301Z node 58 :STATESTORAGE DEBUG: statestorage_proxy.cpp:390: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 0 TabletID: 72075186224037894 ClusterStateGeneration: 0 ClusterStateGuid: 0 CurrentLeader: [63:1992:2267] CurrentLeaderTablet: [63:1997:2270] CurrentGeneration: 3 CurrentStep: 0} 2025-06-25T14:29:13.817408Z node 58 :STATESTORAGE DEBUG: statestorage_proxy.cpp:390: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 0 TabletID: 72075186224037894 ClusterStateGeneration: 0 ClusterStateGuid: 0 CurrentLeader: [63:1992:2267] CurrentLeaderTablet: [63:1997:2270] CurrentGeneration: 3 CurrentStep: 0} 2025-06-25T14:29:13.817507Z node 58 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:610: Handle TEvInfo tabletId: 72075186224037894 entry.State: StProblemResolve success: true ev: {EvInfo Status: 0 TabletID: 72075186224037894 Cookie: 0 CurrentLeader: [63:1992:2267] CurrentLeaderTablet: [63:1997:2270] CurrentGeneration: 3 CurrentStep: 0 Locked: false LockedFor: 0 Signature: { Size: 2 Signature: {{[58:24343667:0] : 7}, {[58:1099535971443:0] : 10}}}} 2025-06-25T14:29:13.817541Z node 58 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:354: ApplyEntry leader tabletId: 72075186224037894 followers: 0 2025-06-25T14:29:13.817587Z node 58 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 58 selfDC 1 leaderDC 3 1:2:0 local 0 localDc 0 other 1 disallowed 0 tabletId: 72075186224037894 followers: 0 countLeader 1 allowFollowers 0 winner: [63:1992:2267] 2025-06-25T14:29:13.817699Z node 58 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:195: TClient[72075186224037894] forward result remote node 63 [58:2126:2505] 2025-06-25T14:29:13.817825Z node 58 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:229: TClient[72075186224037894] remote node connected [58:2126:2505] 2025-06-25T14:29:13.817865Z node 58 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[72075186224037894]::SendEvent [58:2126:2505] 2025-06-25T14:29:13.818106Z node 63 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:291: [72075186224037894] Accept Connect Originator# [58:2126:2505] 2025-06-25T14:29:13.818409Z node 58 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:310: TClient[72075186224037894] connected with status OK role: Leader [58:2126:2505] 2025-06-25T14:29:13.818459Z node 58 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:325: TClient[72075186224037894] send queued [58:2126:2505] 2025-06-25T14:29:13.819509Z node 58 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72075186224037895] ::Bootstrap [58:2130:2507] 2025-06-25T14:29:13.819554Z node 58 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72075186224037895] lookup [58:2130:2507] 2025-06-25T14:29:13.819629Z node 58 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72075186224037895 entry.State: StNormal ev: {EvForward TabletID: 72075186224037895 Ev: nullptr Flags: 1:2:0} 2025-06-25T14:29:13.819680Z node 58 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 58 selfDC 1 leaderDC 3 1:2:0 local 0 localDc 0 other 1 disallowed 0 tabletId: 72075186224037895 followers: 0 countLeader 1 allowFollowers 0 winner: [63:1835:2196] 2025-06-25T14:29:13.819778Z node 58 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:195: TClient[72075186224037895] forward result remote node 63 [58:2130:2507] 2025-06-25T14:29:13.821105Z node 58 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:229: TClient[72075186224037895] remote node connected [58:2130:2507] 2025-06-25T14:29:13.821164Z node 58 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[72075186224037895]::SendEvent [58:2130:2507] 2025-06-25T14:29:13.821434Z node 63 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:291: [72075186224037895] Accept Connect Originator# [58:2130:2507] 2025-06-25T14:29:13.821799Z node 58 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:310: TClient[72075186224037895] connected with status OK role: Leader [58:2130:2507] 2025-06-25T14:29:13.821844Z node 58 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:325: TClient[72075186224037895] send queued [58:2130:2507] 2025-06-25T14:29:13.822884Z node 58 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72075186224037896] ::Bootstrap [58:2133:2509] 2025-06-25T14:29:13.822931Z node 58 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72075186224037896] lookup [58:2133:2509] 2025-06-25T14:29:13.822999Z node 58 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72075186224037896 entry.State: StNormal ev: {EvForward TabletID: 72075186224037896 Ev: nullptr Flags: 1:2:0} 2025-06-25T14:29:13.823052Z node 58 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 58 selfDC 1 leaderDC 3 1:2:0 local 0 localDc 0 other 1 disallowed 0 tabletId: 72075186224037896 followers: 0 countLeader 1 allowFollowers 0 winner: [63:1839:2198] 2025-06-25T14:29:13.823184Z node 58 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:195: TClient[72075186224037896] forward result remote node 63 [58:2133:2509] 2025-06-25T14:29:13.823337Z node 58 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:229: TClient[72075186224037896] remote node connected [58:2133:2509] 2025-06-25T14:29:13.823388Z node 58 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[72075186224037896]::SendEvent [58:2133:2509] 2025-06-25T14:29:13.823606Z node 63 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:291: [72075186224037896] Accept Connect Originator# [58:2133:2509] 2025-06-25T14:29:13.824078Z node 58 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:310: TClient[72075186224037896] connected with status OK role: Leader [58:2133:2509] 2025-06-25T14:29:13.824128Z node 58 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:325: TClient[72075186224037896] send queued [58:2133:2509] 2025-06-25T14:29:13.825342Z node 58 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037927937] ::Bootstrap [58:2135:2510] 2025-06-25T14:29:13.825443Z node 58 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037927937] lookup [58:2135:2510] 2025-06-25T14:29:13.825567Z node 58 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72057594037927937 entry.State: StNormal ev: {EvForward TabletID: 72057594037927937 Ev: nullptr Flags: 1:2:0} 2025-06-25T14:29:13.825654Z node 58 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 58 selfDC 1 leaderDC 1 1:2:0 local 1 localDc 1 other 0 disallowed 0 tabletId: 72057594037927937 followers: 0 countLeader 1 allowFollowers 0 winner: [58:617:2179] 2025-06-25T14:29:13.825791Z node 58 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72057594037927937] queue send [58:2135:2510] 2025-06-25T14:29:13.825911Z node 58 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:411: TClient[72057594037927937] received pending shutdown [58:2135:2510] 2025-06-25T14:29:13.826016Z node 58 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:190: TClient[72057594037927937] forward result local node, try to connect [58:2135:2510] 2025-06-25T14:29:13.826121Z node 58 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[72057594037927937]::SendEvent [58:2135:2510] 2025-06-25T14:29:13.826279Z node 58 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:291: [72057594037927937] Accept Connect Originator# [58:2135:2510] 2025-06-25T14:29:13.826569Z node 58 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:310: TClient[72057594037927937] connected with status OK role: Leader [58:2135:2510] 2025-06-25T14:29:13.826653Z node 58 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:325: TClient[72057594037927937] send queued [58:2135:2510] 2025-06-25T14:29:13.826721Z node 58 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:629: TClient[72057594037927937] push event to server [58:2135:2510] 2025-06-25T14:29:13.826819Z node 58 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:332: TClient[72057594037927937] shutdown pipe due to pending shutdown request [58:2135:2510] 2025-06-25T14:29:13.826891Z node 58 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:505: TClient[72057594037927937] notify reset [58:2135:2510] 2025-06-25T14:29:13.826989Z node 58 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:141: [72057594037927937] HandleSend Sender# [58:588:2174] EventType# 268697616 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_external_table/unittest >> TExternalTableTest::CreateExternalTableShouldFailIfSuchEntityAlreadyExists [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046678944 is [1:129:2153] sender: [1:131:2058] recipient: [1:113:2143] 2025-06-25T14:29:16.512142Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:29:16.512228Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:29:16.512281Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:29:16.512347Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:29:16.512400Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:29:16.512427Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:29:16.512490Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:29:16.512571Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:29:16.513320Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:29:16.513656Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:29:16.587730Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7732: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-25T14:29:16.587790Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:29:16.588513Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:29:16.599799Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:29:16.600201Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:29:16.600401Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:29:16.607074Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:29:16.607240Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:29:16.607676Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:29:16.607871Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:29:16.609968Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:29:16.610146Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:29:16.611302Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:29:16.611349Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:29:16.611384Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:29:16.611414Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:29:16.611439Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:29:16.611590Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:29:16.618225Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:129:2153] sender: [1:243:2058] recipient: [1:15:2062] 2025-06-25T14:29:16.728205Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:29:16.728469Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:16.728717Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:29:16.728766Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:29:16.729004Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:29:16.729080Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:29:16.731273Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:29:16.731455Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:29:16.731631Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:16.731691Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:29:16.731743Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:29:16.731781Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:29:16.733608Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:16.733653Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:29:16.733689Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:29:16.735133Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:16.735190Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:16.735241Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:29:16.735293Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:29:16.738223Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:29:16.740104Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:29:16.740260Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:29:16.741099Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:29:16.741208Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 138 RawX2: 4294969455 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:29:16.741250Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:29:16.741509Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:29:16.741563Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:29:16.741691Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:29:16.741752Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:29: ... RD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 2 2025-06-25T14:29:17.565252Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-25T14:29:17.565905Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 2 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T14:29:17.565986Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 2 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T14:29:17.566027Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 102 2025-06-25T14:29:17.566049Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 2 2025-06-25T14:29:17.566070Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-25T14:29:17.566119Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 102, subscribers: 0 2025-06-25T14:29:17.567894Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-25T14:29:17.568741Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-25T14:29:17.568830Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 102 2025-06-25T14:29:17.568993Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-06-25T14:29:17.569026Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 2025-06-25T14:29:17.569421Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-06-25T14:29:17.569496Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-25T14:29:17.569531Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [2:336:2325] TestWaitNotification: OK eventTxId 102 2025-06-25T14:29:17.569920Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/ExternalTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:29:17.570101Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/ExternalTable" took 204us result status StatusSuccess 2025-06-25T14:29:17.570395Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/ExternalTable" PathDescription { Self { Name: "ExternalTable" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeExternalTable CreateFinished: true CreateTxId: 102 CreateStep: 5000003 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ExternalTableVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } ExternalTableDescription { Name: "ExternalTable" PathId { OwnerId: 72057594046678944 LocalId: 3 } Version: 1 SourceType: "ObjectStorage" DataSourcePath: "/MyRoot/ExternalDataSource" Location: "/" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false } Content: "" } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 TestModificationResults wait txId: 103 2025-06-25T14:29:17.573052Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateExternalTable CreateExternalTable { Name: "ExternalTable" SourceType: "General" DataSourcePath: "/MyRoot/ExternalDataSource" Location: "/new_location" Columns { Name: "key" Type: "Uint64" } Columns { Name: "value" Type: "Uint64" } } } TxId: 103 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:29:17.573325Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_external_table.cpp:427: [72057594046678944] CreateNewExternalTable, opId 103:0, feature flag EnableReplaceIfExistsForExternalEntities 1, tx WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateExternalTable FailOnExist: false CreateExternalTable { Name: "ExternalTable" SourceType: "General" DataSourcePath: "/MyRoot/ExternalDataSource" Location: "/new_location" Columns { Name: "key" Type: "Uint64" } Columns { Name: "value" Type: "Uint64" } } 2025-06-25T14:29:17.573411Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_external_table.cpp:300: [72057594046678944] TCreateExternalTable Propose: opId# 103:0, path# /MyRoot/ExternalTable 2025-06-25T14:29:17.573527Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 103:1, propose status:StatusAlreadyExists, reason: Check failed: path: '/MyRoot/ExternalTable', error: path exist, request accepts it (id: [OwnerId: 72057594046678944, LocalPathId: 3], type: EPathTypeExternalTable, state: EPathStateNoChanges), at schemeshard: 72057594046678944 2025-06-25T14:29:17.575355Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 103, response: Status: StatusAlreadyExists Reason: "Check failed: path: \'/MyRoot/ExternalTable\', error: path exist, request accepts it (id: [OwnerId: 72057594046678944, LocalPathId: 3], type: EPathTypeExternalTable, state: EPathStateNoChanges)" TxId: 103 SchemeshardId: 72057594046678944 PathId: 3 PathCreateTxId: 102, at schemeshard: 72057594046678944 2025-06-25T14:29:17.575569Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 103, database: /MyRoot, subject: , status: StatusAlreadyExists, reason: Check failed: path: '/MyRoot/ExternalTable', error: path exist, request accepts it (id: [OwnerId: 72057594046678944, LocalPathId: 3], type: EPathTypeExternalTable, state: EPathStateNoChanges), operation: CREATE EXTERNAL TABLE, path: /MyRoot/ExternalTable TestModificationResult got TxId: 103, wait until txId: 103 TestWaitNotification wait txId: 103 2025-06-25T14:29:17.575779Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 103: send EvNotifyTxCompletion 2025-06-25T14:29:17.575807Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 103 2025-06-25T14:29:17.576181Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 103, at schemeshard: 72057594046678944 2025-06-25T14:29:17.576286Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-06-25T14:29:17.576344Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [2:344:2333] TestWaitNotification: OK eventTxId 103 2025-06-25T14:29:17.576710Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/ExternalTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:29:17.576876Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/ExternalTable" took 166us result status StatusSuccess 2025-06-25T14:29:17.577150Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/ExternalTable" PathDescription { Self { Name: "ExternalTable" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeExternalTable CreateFinished: true CreateTxId: 102 CreateStep: 5000003 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ExternalTableVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } ExternalTableDescription { Name: "ExternalTable" PathId { OwnerId: 72057594046678944 LocalId: 3 } Version: 1 SourceType: "ObjectStorage" DataSourcePath: "/MyRoot/ExternalDataSource" Location: "/" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false } Content: "" } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> KqpYql::JsonCast [GOOD] |76.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/database/ut/unittest >> StatisticsSaveLoad::Delete >> StatisticsSaveLoad::ForbidAccess >> KqpPragma::MatchRecognizeWithTimeOrderRecoverer [GOOD] >> KqpPragma::MatchRecognizeWithoutTimeOrderRecoverer [GOOD] >> TTransferTests::ConsistencyLevel [GOOD] >> TTransferTests::Alter >> StatisticsSaveLoad::Simple |76.2%| [TA] $(B)/ydb/core/tx/schemeshard/ut_external_table/test-results/unittest/{meta.json ... results_accumulator.log} >> TTicketParserTest::NebiusAuthenticationRetryErrorImmediately [GOOD] >> TTicketParserTest::NebiusAccessKeySignatureUnsupported >> Cdc::Write[TopicRunner] [GOOD] >> Cdc::UpdateStream |76.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/database/ut/unittest >> KqpScripting::StreamExecuteYqlScriptScanCancelAfterBruteForce [GOOD] >> TTicketParserTest::AuthorizationUnavailable [GOOD] >> KqpScripting::ExecuteYqlScriptPg [GOOD] |76.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/database/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/yql/unittest >> KqpYql::JsonCast [GOOD] Test command err: Trying to start YDB, gRPC: 28181, MsgBus: 28056 2025-06-25T14:29:07.306663Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519893944716517546:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:29:07.306727Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0008fb/r3tmp/tmpDNGijt/pdisk_1.dat 2025-06-25T14:29:07.690063Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:29:07.713632Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:29:07.713730Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 28181, node 1 2025-06-25T14:29:07.717395Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:29:07.766273Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:29:07.766306Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:29:07.766316Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:29:07.766452Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28056 TClient is connected to server localhost:28056 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:29:08.280783Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:29:08.296230Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:29:08.316628Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:29:08.318473Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:08.501863Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:08.690498Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:29:08.758845Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:10.256674Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519893957601421049:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:10.256793Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:10.556983Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:10.632331Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:10.703056Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:10.741104Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:10.764996Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:10.804787Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:10.834579Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:10.900663Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519893957601421712:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:10.900779Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:10.901042Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519893957601421717:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:10.906219Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:29:10.918680Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519893957601421719:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:29:11.012495Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519893961896389066:3421] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 }
: Error: Table intent determination, code: 1040
:3:35: Error: INSERT OR IGNORE is not yet supported for Kikimr. Trying to start YDB, gRPC: 31334, MsgBus: 30255 2025-06-25T14:29:12.842301Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519893966689242167:2226];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:29:12.843200Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0008fb/r3tmp/tmpILd9ar/pdisk_1.dat 2025-06-25T14:29:12.999096Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:29:12.999754Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519893966689241977:2080] 1750861752708169 != 1750861752708172 2025-06-25T14:29:13.013043Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:29:13.013128Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:29:13.014910Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 31334, node 2 2025-06-25T14:29:13.119527Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:29:13.119551Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:29:13.119557Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:29:13.119659Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:30255 TClient is connected to server localhost:30255 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:29:13.649976Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:29:13.661167Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:29:13.680533Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:13.737165Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... 2025-06-25T14:29:13.766781Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:13.898421Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:13.975132Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:16.090721Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519893983869112806:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:16.090789Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:16.150817Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:16.220822Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:16.292025Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:16.318392Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:16.343492Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:16.410355Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:16.451986Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:16.546429Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519893983869113471:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:16.546539Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:16.546983Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519893983869113476:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:16.550771Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:29:16.561346Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519893983869113478:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:29:16.653538Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519893983869113529:3419] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:29:17.724445Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519893966689242167:2226];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:29:17.724517Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; [[#]] >> KqpCost::OltpWriteRow+isSink >> ReadSessionImplTest::UsesOnRetryStateDuringRetries [GOOD] >> RetryPolicy::TWriteSession_TestPolicy ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/yql/unittest >> KqpPragma::MatchRecognizeWithoutTimeOrderRecoverer [GOOD] Test command err: Trying to start YDB, gRPC: 28191, MsgBus: 62412 2025-06-25T14:29:06.050581Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519893939136006509:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:29:06.050922Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000934/r3tmp/tmpc1aDje/pdisk_1.dat 2025-06-25T14:29:06.330128Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:29:06.330480Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519893939136006478:2080] 1750861746049337 != 1750861746049340 TServer::EnableGrpc on GrpcPort 28191, node 1 2025-06-25T14:29:06.417170Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:29:06.417195Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:29:06.417215Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:29:06.417324Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:29:06.424172Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:29:06.424290Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:29:06.426042Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:62412 TClient is connected to server localhost:62412 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:29:06.858232Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:29:06.885675Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:29:06.897727Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:29:07.022436Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:07.130039Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... 2025-06-25T14:29:07.182391Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:07.254980Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:08.924564Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519893947725942715:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:08.924681Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:09.257607Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:09.295359Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:09.334226Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:09.368869Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:09.414850Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:09.452863Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:09.522500Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:09.563672Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519893952020910671:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:09.563735Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:09.563850Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519893952020910676:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:09.567401Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:29:09.575984Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519893952020910678:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:29:09.671394Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519893952020910729:3415] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:29:10.628992Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:10.817261Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750861750855, txId: 281474976715674] shutting down 2025-06-25T14:29:11.051062Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeo ... p:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 8810, MsgBus: 18820 2025-06-25T14:29:11.750408Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519893962777987380:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:29:11.750479Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000934/r3tmp/tmp8pYcF0/pdisk_1.dat 2025-06-25T14:29:11.894599Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:29:11.894685Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:29:11.900144Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:29:11.915469Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 8810, node 2 2025-06-25T14:29:11.978218Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:29:11.978242Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:29:11.978252Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:29:11.978378Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:18820 TClient is connected to server localhost:18820 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:29:12.481908Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:29:12.496910Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:12.569666Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:12.735386Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:12.806407Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:29:12.833744Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:15.105524Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519893979957858170:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:15.105631Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:15.202179Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:15.277204Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:15.323104Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:15.399909Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:15.451140Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:15.514235Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:15.556788Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:15.619719Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519893979957858830:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:15.619784Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:15.619944Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519893979957858835:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:15.623614Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:29:15.632988Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519893979957858837:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:29:15.696536Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519893979957858888:3415] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:29:16.751435Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519893962777987380:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:29:16.751520Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:29:17.021907Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:18.065413Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750861758058, txId: 281474976715674] shutting down ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/yql/unittest >> KqpPragma::MatchRecognizeWithTimeOrderRecoverer [GOOD] Test command err: Trying to start YDB, gRPC: 15928, MsgBus: 24899 2025-06-25T14:29:06.779923Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519893941556385440:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:29:06.779987Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00090e/r3tmp/tmpVJUG3n/pdisk_1.dat 2025-06-25T14:29:07.099903Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:29:07.100164Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519893941556385417:2080] 1750861746771352 != 1750861746771355 TServer::EnableGrpc on GrpcPort 15928, node 1 2025-06-25T14:29:07.175682Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:29:07.176759Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:29:07.179728Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:29:07.198561Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:29:07.198585Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:29:07.198592Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:29:07.198742Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24899 TClient is connected to server localhost:24899 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:29:07.759652Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:29:07.788357Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:07.824831Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:29:07.930440Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:08.078913Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:08.148160Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:09.716733Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519893954441288956:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:09.716845Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:09.973638Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:09.999859Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:10.025155Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:10.053970Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:10.081715Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:10.110405Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:10.156730Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:10.203750Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519893958736256913:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:10.203809Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:10.203821Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519893958736256918:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:10.207085Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:29:10.217544Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519893958736256920:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:29:10.315519Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519893958736256971:3424] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:29:11.265999Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519893963031224544:2478], status: GENERIC_ERROR, issues:
: Error: Pre type annotation, code: 1020
:2:34: Error: Pragma auth not supported inside Kikimr query., code: 2016 2025-06-25T14:29:11.266200Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=MzRhY2M5MzctZDYxOGNjMDYtYmMzOGJiMGEtNDMzN2E4Mzc=, ActorId: [1:7519893963031224536:2473], ActorState: ExecuteState, TraceId: 01jykqw8xbfhdaepxvqc98zg2v, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: Trying to start YDB, gRPC: 10879, MsgBus: 26981 2025-06-25T14:29:12.162738Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519893968576445127:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:29:12.162798Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00090e/r3tmp/tmpDnuyVq/pdisk_1.dat 2025-06-25T14:29:12.304118Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:29:12.320409Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519893968576445109:2080] 1750861752162162 != 1750861752162165 2025-06-25T14:29:12.323130Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:29:12.323232Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:29:12.324639Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 10879, node 2 2025-06-25T14:29:12.406374Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:29:12.406406Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:29:12.406413Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:29:12.406528Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26981 TClient is connected to server localhost:26981 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:29:12.933533Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:29:12.950520Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:29:12.965708Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:13.061865Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:13.174623Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... 2025-06-25T14:29:13.306498Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:13.396540Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:15.462823Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519893981461348645:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:15.462935Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:15.517199Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:15.550652Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:15.598872Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:15.631897Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:15.708413Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:15.779012Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:15.816096Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:15.876234Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519893981461349308:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:15.876333Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:15.876588Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519893981461349313:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:15.879868Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:29:15.889262Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519893981461349315:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:29:15.963227Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519893981461349366:3419] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:29:17.134387Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:17.166784Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519893968576445127:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:29:17.166942Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:29:18.064847Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750861758030, txId: 281474976715674] shutting down >> ReadSessionImplTest::ForcefulDestroyPartitionStream [GOOD] >> ReadSessionImplTest::DestroyPartitionStreamRequest |76.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/ncloud/impl/ut/ydb-library-ncloud-impl-ut |76.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/library/ncloud/impl/ut/ydb-library-ncloud-impl-ut |76.2%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_external_table/test-results/unittest/{meta.json ... results_accumulator.log} |76.2%| [LD] {RESULT} $(B)/ydb/library/ncloud/impl/ut/ydb-library-ncloud-impl-ut >> ReadSessionImplTest::DestroyPartitionStreamRequest [GOOD] >> ReadSessionImplTest::DecompressZstdEmptyMessage [GOOD] >> ReadSessionImplTest::PacksBatches_BatchABitBiggerThanLimit [GOOD] >> ReadSessionImplTest::PacksBatches_BatchesEqualToServerBatches [GOOD] >> ReadSessionImplTest::HoleBetweenOffsets [GOOD] >> ReadSessionImplTest::LOGBROKER_7702 [GOOD] >> ReadSessionImplTest::ReconnectOnTmpError [GOOD] >> ReadSessionImplTest::ReconnectOnTmpErrorAndThenTimeout [GOOD] >> ReadSessionImplTest::ReconnectOnTimeout [GOOD] >> ReadSessionImplTest::ReconnectOnTimeoutAndThenCreate [GOOD] >> ReadSessionImplTest::ReconnectsAfterFailure [GOOD] >> ReadSessionImplTest::SimpleDataHandlers >> BasicUsage::WriteAndReadSomeMessagesWithAsyncCompression >> ReadSessionImplTest::SuccessfulInit [GOOD] >> ReadSessionImplTest::SuccessfulInitAndThenTimeoutCallback [GOOD] >> ReadSessionImplTest::StopsRetryAfterFailedAttempt [GOOD] >> ReadSessionImplTest::StopsRetryAfterTimeout [GOOD] >> ReadSessionImplTest::UnpackBigBatchWithTwoPartitions [GOOD] >> ReadSessionImplTest::SimpleDataHandlersWithGracefulRelease >> ReadSessionImplTest::ProperlyOrdersDecompressedData [GOOD] >> ReadSessionImplTest::PacksBatches_ExactlyTwoMessagesInBatch [GOOD] >> ReadSessionImplTest::PacksBatches_OneMessageInEveryBatch [GOOD] >> ReadSessionImplTest::PacksBatches_BigBatchDecompressWithTwoBatchTasks >> ReadSessionImplTest::DecompressRaw >> ReadSessionImplTest::SimpleDataHandlers [GOOD] >> ReadSessionImplTest::SimpleDataHandlersWithCommit ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/yql/unittest >> KqpScripting::StreamExecuteYqlScriptScanCancelAfterBruteForce [GOOD] Test command err: Trying to start YDB, gRPC: 18623, MsgBus: 17799 2025-06-25T14:29:06.733907Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519893942584117025:2082];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:29:06.735039Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000917/r3tmp/tmp7LXj6Z/pdisk_1.dat 2025-06-25T14:29:07.085687Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 18623, node 1 2025-06-25T14:29:07.167703Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:29:07.167812Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:29:07.169460Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:29:07.174432Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:29:07.174452Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:29:07.174458Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:29:07.174558Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17799 TClient is connected to server localhost:17799 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-25T14:29:07.741102Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:29:07.814320Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:29:07.835094Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:29:07.844959Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:07.989686Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:08.141594Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:08.216252Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:09.722180Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519893955469020490:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:09.722250Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:10.045240Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:10.074626Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:10.102241Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:10.128376Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:10.167403Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:10.234997Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:10.262845Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:10.316178Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519893959763988452:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:10.316248Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519893959763988457:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:10.316253Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:10.319279Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:29:10.328709Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519893959763988459:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:29:10.395307Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519893959763988510:3420] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:29:11.732408Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519893942584117025:2082];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:29:11.732481Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:29:11.859448Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750861751884, txId: 281474976710672] shutting down Trying to start YDB, gRPC: 9656, MsgBus: 10279 2025-06-25T14:29:12.782079Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519893966726685060:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:29:12.782180Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000917/r3tmp/tmpnDkW3R/pdisk_1.dat 2025-06-25T14:29:13.005659Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:29:13.012271Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519893966726685038:2080] 1750861752743768 != 1750861752743771 2025-06-25T14:29:13.015964Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:29:13.016044Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:29:13.017678Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 9656, node 2 2025-06-25T14:29:13.163675Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:29:13.163696Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:29:13.163702Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:29:13.163813Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10279 TClient is connected to server localhost:10279 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-25T14:29:13.811009Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:29:13.844465Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:29:13.862699Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:13.932485Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:14.076862Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:14.136708Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:16.392589Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519893983906555854:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:16.392693Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:16.474607Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:16.508977Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:16.541754Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:16.576862Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:16.608452Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:16.640279Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:16.680383Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:16.768007Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519893983906556516:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:16.768106Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:16.768301Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519893983906556521:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:16.773610Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:29:16.786714Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519893983906556523:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:29:16.882384Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519893983906556576:3419] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:29:17.784416Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519893966726685060:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:29:17.784494Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:29:18.093904Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750861758128, txId: 281474976715672] shutting down 2025-06-25T14:29:18.268385Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750861758303, txId: 281474976715674] shutting down 2025-06-25T14:29:18.494720Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750861758527, txId: 281474976715676] shutting down >> ReadSessionImplTest::DecompressRaw [GOOD] >> ReadSessionImplTest::DecompressGzip [GOOD] >> ReadSessionImplTest::DecompressZstd [GOOD] >> ReadSessionImplTest::DecompressRawEmptyMessage [GOOD] >> ReadSessionImplTest::DecompressGzipEmptyMessage [GOOD] >> ReadSessionImplTest::DecompressWithSynchronousExecutor [GOOD] >> ReadSessionImplTest::DataReceivedCallbackReal ------- [TM] {asan, default-linux-x86_64, release} ydb/core/security/ut/unittest >> TTicketParserTest::AuthorizationUnavailable [GOOD] Test command err: 2025-06-25T14:28:51.298363Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519893875705897597:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:28:51.298449Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00187d/r3tmp/tmpvaWNgd/pdisk_1.dat 2025-06-25T14:28:51.611114Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519893875705897578:2080] 1750861731297658 != 1750861731297661 2025-06-25T14:28:51.621745Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 4398, node 1 2025-06-25T14:28:51.657947Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:28:51.657995Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:28:51.658005Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:28:51.658139Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:28:51.670720Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:28:51.670834Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:28:51.672616Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:22599 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:28:51.953386Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:28:51.965837Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:487: Ticket AKIA****MPLE (B3EDC139) asking for AccessServiceAuthorization(something.read) 2025-06-25T14:28:51.965939Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [517000010408] Connect to grpc://localhost:28042 2025-06-25T14:28:51.968732Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000010408] Request AuthorizeRequest { signature { access_key_id: "AKIAIOSFODNN7EXAMPLE" v4_parameters { signed_at { } } } permission: "something.read" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } } 2025-06-25T14:28:51.975223Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [517000010408] Status 14 Service Unavailable 2025-06-25T14:28:51.975341Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:1407: Ticket AKIA****MPLE (B3EDC139) permission something.read now has a permanent error "Service Unavailable" retryable:1 2025-06-25T14:28:51.975394Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket AKIA****MPLE (B3EDC139) () has now retryable error message 'Service Unavailable' 2025-06-25T14:28:51.975418Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:487: Ticket AKIA****MPLE (B3EDC139) asking for AccessServiceAuthorization(something.read) 2025-06-25T14:28:51.975651Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000010408] Request AuthorizeRequest { signature { access_key_id: "AKIAIOSFODNN7EXAMPLE" v4_parameters { signed_at { } } } permission: "something.read" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } } 2025-06-25T14:28:51.976856Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [517000010408] Status 14 Service Unavailable 2025-06-25T14:28:51.976927Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:1407: Ticket AKIA****MPLE (B3EDC139) permission something.read now has a permanent error "Service Unavailable" retryable:1 2025-06-25T14:28:51.976951Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket AKIA****MPLE (B3EDC139) () has now retryable error message 'Service Unavailable' 2025-06-25T14:28:52.306616Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:28:53.311491Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket AKIA****MPLE (B3EDC139) 2025-06-25T14:28:53.311556Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:487: Ticket AKIA****MPLE (B3EDC139) asking for AccessServiceAuthorization(something.read) 2025-06-25T14:28:53.312019Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000010408] Request AuthorizeRequest { signature { access_key_id: "AKIAIOSFODNN7EXAMPLE" v4_parameters { signed_at { } } } permission: "something.read" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } } 2025-06-25T14:28:53.314103Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [517000010408] Status 14 Service Unavailable 2025-06-25T14:28:53.314380Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:1407: Ticket AKIA****MPLE (B3EDC139) permission something.read now has a permanent error "Service Unavailable" retryable:1 2025-06-25T14:28:53.314405Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket AKIA****MPLE (B3EDC139) () has now retryable error message 'Service Unavailable' 2025-06-25T14:28:54.312251Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket AKIA****MPLE (B3EDC139) 2025-06-25T14:28:54.312295Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:487: Ticket AKIA****MPLE (B3EDC139) asking for AccessServiceAuthorization(something.read) 2025-06-25T14:28:54.313231Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000010408] Request AuthorizeRequest { signature { access_key_id: "AKIAIOSFODNN7EXAMPLE" v4_parameters { signed_at { } } } permission: "something.read" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } } 2025-06-25T14:28:54.325410Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [517000010408] Response AuthorizeResponse { subject { user_account { id: "user1" } } } 2025-06-25T14:28:54.325753Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:1392: Ticket AKIA****MPLE (B3EDC139) permission something.read now has a valid subject "user1@as" 2025-06-25T14:28:54.325893Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket AKIA****MPLE (B3EDC139) () has now valid token of user1@as 2025-06-25T14:28:56.298683Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519893875705897597:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:28:56.298800Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:29:04.595911Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519893930822481992:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:29:04.595975Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00187d/r3tmp/tmpygFsma/pdisk_1.dat 2025-06-25T14:29:04.743706Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:29:04.745136Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519893930822481973:2080] 1750861744595592 != 1750861744595595 TServer::EnableGrpc on GrpcPort 15557, node 2 2025-06-25T14:29:04.762142Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:29:04.762214Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:29:04.764414Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:29:04.819085Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:29:04.819107Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:29:04.819114Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:29:04.819238Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:61000 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:29:05.093017Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__ope ... ACE: ticket_parser_impl.h:1392: Ticket **** (8E120919) permission something.read now has a valid subject "user1@as" 2025-06-25T14:29:12.106468Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:997: Ticket **** (8E120919) asking for UserAccount(user1@as) 2025-06-25T14:29:12.108031Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [517000089388] Connect to grpc://localhost:20141 2025-06-25T14:29:12.109052Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000089388] Request GetUserAccountRequest { user_account_id: "user1" } 2025-06-25T14:29:12.128672Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [517000089388] Response UserAccount { yandex_passport_user_account { login: "login1" } } 2025-06-25T14:29:12.129215Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (8E120919) () has now valid token of login1@passport 2025-06-25T14:29:12.129669Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-06-25T14:29:12.129690Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-25T14:29:12.129700Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-25T14:29:12.129727Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:487: Ticket **** (8E120919) asking for AccessServiceAuthorization(something.write) 2025-06-25T14:29:12.129965Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [5170000f5108] Request AuthorizeRequest { iam_token: "**** (8E120919)" permission: "something.write" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } } 2025-06-25T14:29:12.134587Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [5170000f5108] Status 16 Access Denied 2025-06-25T14:29:12.134873Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:1407: Ticket **** (8E120919) permission something.write now has a permanent error "Access Denied" retryable:0 2025-06-25T14:29:12.134908Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1809: Ticket **** (8E120919) () has now permanent error message 'Access Denied' 2025-06-25T14:29:12.135768Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-06-25T14:29:12.135789Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-25T14:29:12.135798Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-25T14:29:12.135836Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:487: Ticket **** (8E120919) asking for AccessServiceAuthorization(something.read) 2025-06-25T14:29:12.135886Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:487: Ticket **** (8E120919) asking for AccessServiceAuthorization(something.write) 2025-06-25T14:29:12.136095Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [5170000f5108] Request AuthorizeRequest { iam_token: "**** (8E120919)" permission: "something.read" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } } 2025-06-25T14:29:12.136697Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [5170000f5108] Request AuthorizeRequest { iam_token: "**** (8E120919)" permission: "something.write" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } } 2025-06-25T14:29:12.138425Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [5170000f5108] Response AuthorizeResponse { subject { user_account { id: "user1" } } } 2025-06-25T14:29:12.140618Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [5170000f5108] Response AuthorizeResponse { subject { user_account { id: "user1" } } } 2025-06-25T14:29:12.140770Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:1392: Ticket **** (8E120919) permission something.read now has a valid subject "user1@as" 2025-06-25T14:29:12.140807Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:1392: Ticket **** (8E120919) permission something.write now has a valid subject "user1@as" 2025-06-25T14:29:12.140829Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:997: Ticket **** (8E120919) asking for UserAccount(user1@as) 2025-06-25T14:29:12.140987Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (8E120919) () has now valid token of login1@passport 2025-06-25T14:29:15.604604Z node 5 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[5:7519893978606906710:2147];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00187d/r3tmp/tmpErZzVn/pdisk_1.dat 2025-06-25T14:29:15.609593Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:29:15.726562Z node 5 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:29:15.728236Z node 5 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [5:7519893978606906588:2080] 1750861755581092 != 1750861755581095 TServer::EnableGrpc on GrpcPort 27301, node 5 2025-06-25T14:29:15.752430Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:29:15.752641Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:29:15.755220Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:29:15.780481Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:29:15.780505Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:29:15.780514Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:29:15.780666Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3510 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:29:16.174183Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:29:16.186723Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-06-25T14:29:16.186756Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-25T14:29:16.186766Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-25T14:29:16.186803Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:487: Ticket **** (8E120919) asking for AccessServiceAuthorization(something.read) 2025-06-25T14:29:16.186863Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:487: Ticket **** (8E120919) asking for AccessServiceAuthorization(something.write) 2025-06-25T14:29:16.186920Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [517000150488] Connect to grpc://localhost:16085 2025-06-25T14:29:16.187866Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000150488] Request AuthorizeRequest { iam_token: "**** (8E120919)" permission: "something.read" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } } 2025-06-25T14:29:16.191417Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000150488] Request AuthorizeRequest { iam_token: "**** (8E120919)" permission: "something.write" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } } 2025-06-25T14:29:16.196579Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [517000150488] Response AuthorizeResponse { subject { user_account { id: "user1" } } } 2025-06-25T14:29:16.196774Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [517000150488] Status 14 Service Unavailable 2025-06-25T14:29:16.197177Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:1392: Ticket **** (8E120919) permission something.read now has a valid subject "user1@as" 2025-06-25T14:29:16.197251Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:1407: Ticket **** (8E120919) permission something.write now has a permanent error "Service Unavailable" retryable:1 2025-06-25T14:29:16.197292Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket **** (8E120919) () has now retryable error message 'Service Unavailable' 2025-06-25T14:29:16.197323Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:487: Ticket **** (8E120919) asking for AccessServiceAuthorization(something.read) 2025-06-25T14:29:16.197380Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:487: Ticket **** (8E120919) asking for AccessServiceAuthorization(something.write) 2025-06-25T14:29:16.197621Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000150488] Request AuthorizeRequest { iam_token: "**** (8E120919)" permission: "something.read" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } } 2025-06-25T14:29:16.198274Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000150488] Request AuthorizeRequest { iam_token: "**** (8E120919)" permission: "something.write" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } } 2025-06-25T14:29:16.201010Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [517000150488] Status 1 CANCELLED 2025-06-25T14:29:16.201039Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [517000150488] Status 1 CANCELLED 2025-06-25T14:29:16.201483Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:1407: Ticket **** (8E120919) permission something.write now has a permanent error "CANCELLED" retryable:1 2025-06-25T14:29:16.201548Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:1415: Ticket **** (8E120919) permission something.read now has a retryable error "CANCELLED" 2025-06-25T14:29:16.201582Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket **** (8E120919) () has now retryable error message 'CANCELLED' >> ReadSessionImplTest::SimpleDataHandlersWithCommit [GOOD] >> ReadSessionImplTest::SimpleDataHandlersWithGracefulRelease [GOOD] >> ReadSessionImplTest::SimpleDataHandlersWithGracefulReleaseWithCommit ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/yql/unittest >> KqpScripting::ExecuteYqlScriptPg [GOOD] Test command err: Trying to start YDB, gRPC: 27782, MsgBus: 5028 2025-06-25T14:29:07.626410Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519893947463537890:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:29:07.626462Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0008ef/r3tmp/tmpPWoLvH/pdisk_1.dat 2025-06-25T14:29:07.984006Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:29:08.020350Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:29:08.020428Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 27782, node 1 2025-06-25T14:29:08.021607Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:29:08.070753Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:29:08.070776Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:29:08.070782Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:29:08.070872Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5028 TClient is connected to server localhost:5028 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:29:08.620904Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:29:08.632548Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... 2025-06-25T14:29:08.661330Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:29:08.674458Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:08.807813Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:29:08.966198Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:09.035911Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:10.652862Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519893960348441375:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:10.652986Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:10.960007Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:10.988670Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:11.024692Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:11.067882Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:11.123129Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:11.153286Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:11.223701Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:11.285636Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519893964643409333:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:11.285714Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:11.285896Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519893964643409338:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:11.289268Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:29:11.299191Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519893964643409340:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:29:11.373236Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519893964643409391:3414] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:29:12.519195Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:12.627538Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519893947463537890:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:29:12.627599Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:29:13.041117Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750861753074, txId: 281474976710674] shutting down Trying to start YDB, gRPC: 23215, MsgBus: 8265 2025-06-25T14:29:13.873905Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519893970123613347:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:29:13.874028Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0008ef/r3tmp/tmpl3XcWk/pdisk_1.dat 2025-06-25T14:29:13.978586Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519893970123613328:2080] 1750861753873539 != 1750861753873542 2025-06-25T14:29:13.986419Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 23215, node 2 2025-06-25T14:29:14.009597Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:29:14.009694Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:29:14.013091Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:29:14.035166Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:29:14.035194Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:29:14.035209Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:29:14.035330Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8265 TClient is connected to server localhost:8265 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting...2025-06-25T14:29:14.473192Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:29:14.487977Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:14.580132Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:14.718500Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:14.800210Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:14.949853Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:29:16.955455Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519893983008516869:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:16.955796Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:17.010720Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:17.043716Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:17.074411Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:17.109367Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:17.144911Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:17.221688Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:17.280647Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:17.379360Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519893987303484828:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:17.379453Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:17.379696Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519893987303484833:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:17.383918Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:29:17.397158Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519893987303484835:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:29:17.479644Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519893987303484886:3421] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:29:18.874018Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519893970123613347:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:29:18.874114Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; |76.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/public/lib/ydb_cli/topic/ut/ydb-public-lib-ydb_cli-topic-ut |76.3%| [LD] {RESULT} $(B)/ydb/public/lib/ydb_cli/topic/ut/ydb-public-lib-ydb_cli-topic-ut |76.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/public/lib/ydb_cli/topic/ut/ydb-public-lib-ydb_cli-topic-ut ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/persqueue_public/ut/with_offset_ranges_mode_ut/unittest >> ReadSessionImplTest::LOGBROKER_7702 [GOOD] Test command err: 2025-06-25T14:29:20.139264Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.139332Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.139359Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-25T14:29:20.148721Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-25T14:29:20.152624Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-25T14:29:20.175469Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.178051Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-25T14:29:20.181399Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.181420Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.181439Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-25T14:29:20.192589Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-25T14:29:20.193176Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-25T14:29:20.193427Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.196534Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-25T14:29:20.196922Z :INFO: [db] [sessionid] [cluster] Confirm partition stream destroy. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1 2025-06-25T14:29:20.202159Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.202214Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.202251Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-25T14:29:20.202649Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-25T14:29:20.203625Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-25T14:29:20.203756Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.204012Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-25T14:29:20.204845Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.205256Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2025-06-25T14:29:20.205374Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-25T14:29:20.205432Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 0 bytes 2025-06-25T14:29:20.206761Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.206806Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.206838Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-25T14:29:20.207214Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-25T14:29:20.207644Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-25T14:29:20.207800Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.208197Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) Message data size: 11 Compressed message data size: 31 2025-06-25T14:29:20.209348Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function 2025-06-25T14:29:20.209634Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function Getting new event 2025-06-25T14:29:20.210496Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (5-8) 2025-06-25T14:29:20.210758Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-4) 2025-06-25T14:29:20.210943Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-25T14:29:20.211008Z :DEBUG: Take Data. Partition 1. Read: {0, 1} (2-2) 2025-06-25T14:29:20.211047Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 22 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..11 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 1 SeqNo: 42 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..11 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 2 SeqNo: 43 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } } 2025-06-25T14:29:20.211244Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [0, 3). Partition stream id: 1 GOT RANGE 0 3 Getting new event 2025-06-25T14:29:20.211394Z :DEBUG: Take Data. Partition 1. Read: {0, 2} (3-3) 2025-06-25T14:29:20.211422Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (4-4) 2025-06-25T14:29:20.211451Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 22 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..11 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 3 SeqNo: 44 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..11 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 4 SeqNo: 45 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } } 2025-06-25T14:29:20.211603Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [3, 5). Partition stream id: 1 GOT RANGE 3 5 Getting new event 2025-06-25T14:29:20.211692Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (5-5) 2025-06-25T14:29:20.211722Z :DEBUG: Take Data. Partition 1. Read: {0, 1} (6-6) 2025-06-25T14:29:20.211748Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 22 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..11 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 5 SeqNo: 46 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..11 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 6 SeqNo: 47 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } } 2025-06-25T14:29:20.211868Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [5, 7). Partition stream id: 1 GOT RANGE 5 7 Getting new event 2025-06-25T14:29:20.211936Z :DEBUG: Take Data. Partition 1. Read: {0, 2} (7-7) 2025-06-25T14:29:20.211982Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (8-8) 2025-06-25T14:29:20.212024Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 22 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..11 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 7 SeqNo: 48 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..11 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 8 SeqNo: 49 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } } 2025-06-25T14:29:20.212142Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [7, 9). Partition stream id: 1 GOT RANGE 7 9 2025-06-25T14:29:20.213745Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.213774Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.213805Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-25T14:29:20.214168Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-25T14:29:20.214764Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-25T14:29:20.214938Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.215166Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) Message data size: 10 Compressed message data size: 30 2025-06-25T14:29:20.216431Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function 2025-06-25T14:29:20.216687Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function Getting new event 2025-06-25T14:29:20.216984Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (5-8) 2025-06-25T14:29:20.217186Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-4) 2025-06-25T14:29:20.217325Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-25T14:29:20.217383Z :DEBUG: Take Data. Partition 1. Read: {0, 1} (2-2) 2025-06-25T14:29:20.217409Z :DEBUG: Take Data. Partition 1. Read: {0, 2} (3-3) 2025-06-25T14:29:20.217428Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (4-4) 2025-06-25T14:29:20.217457Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 4, size 40 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 1 SeqNo: 42 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 2 SeqNo: 43 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 3 SeqNo: 44 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 4 SeqNo: 45 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } } 2025-06-25T14:29:20.217707Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [0, 5). Partition stream id: 1 GOT RANGE 0 5 Getting new event 2025-06-25T14:29:20.217885Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (5-5) 2025-06-25T14:29:20.217908Z :DEBUG: Take Data. Partition 1. Read: {0, 1} (6-6) 2025-06-25T14:29:20.217932Z :DEBUG: Take Data. Partition 1. Read: {0, 2} (7-7) 2025-06-25T14:29:20.217954Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (8-8) 2025-06-25T14:29:20.217980Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 4, size 40 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 5 SeqNo: 46 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 6 SeqNo: 47 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 7 SeqNo: 48 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 8 SeqNo: 49 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } } 2025-06-25T14:29:20.218148Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [5, 9). Partition stream id: 1 GOT RANGE 5 9 2025-06-25T14:29:20.219533Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.219568Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.219596Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-25T14:29:20.219943Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-25T14:29:20.220354Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-25T14:29:20.220519Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.221523Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-25T14:29:20.222552Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function 2025-06-25T14:29:20.223483Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function 2025-06-25T14:29:20.223968Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (10-11) 2025-06-25T14:29:20.224103Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-2) 2025-06-25T14:29:20.224241Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-25T14:29:20.224276Z :DEBUG: Take Data. Partition 1. Read: {0, 1} (2-2) 2025-06-25T14:29:20.224299Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (10-10) 2025-06-25T14:29:20.224338Z :DEBUG: Take Data. Partition 1. Read: {0, 1} (11-11) 2025-06-25T14:29:20.224379Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 16 bytes 2025-06-25T14:29:20.224416Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 16 bytes got data event: DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..8 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 1 SeqNo: 1 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:00:00.042000Z Ip: "::1" UncompressedSize: 0 Meta: { } } } Message { Data: ..8 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 2 SeqNo: 1 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:00:00.042000Z Ip: "::1" UncompressedSize: 0 Meta: { } } } Message { Data: ..8 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 10 SeqNo: 1 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:00:00.042000Z Ip: "::1" UncompressedSize: 0 Meta: { } } } Message { Data: ..8 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 11 SeqNo: 1 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:00:00.042000Z Ip: "::1" UncompressedSize: 0 Meta: { } } } } 2025-06-25T14:29:20.224605Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [0, 3). Partition stream id: 1 Got commit req { offset_ranges { assign_id: 1 end_offset: 3 } } RANGE 0 3 2025-06-25T14:29:20.224792Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [3, 12). Partition stream id: 1 Got commit req { offset_ranges { assign_id: 1 start_offset: 3 end_offset: 12 } } RANGE 3 12 >> ReadSessionImplTest::SimpleDataHandlersWithGracefulReleaseWithCommit [GOOD] >> PersQueueSdkReadSessionTest::SpecifyClustersExplicitly ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/persqueue_public/ut/with_offset_ranges_mode_ut/unittest >> ReadSessionImplTest::SimpleDataHandlersWithCommit [GOOD] Test command err: 2025-06-25T14:29:20.270518Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.270548Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.270571Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-25T14:29:20.271816Z :ERROR: [db] [sessionid] [cluster] Got error. Status: INTERNAL_ERROR. Description: 2025-06-25T14:29:20.271879Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.271908Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.273856Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.005099s 2025-06-25T14:29:20.274586Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-25T14:29:20.275017Z :INFO: [db] [sessionid] [cluster] Server session id: session id 2025-06-25T14:29:20.275101Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.304378Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.304421Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.304440Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-25T14:29:20.304815Z :ERROR: [db] [sessionid] [cluster] Got error. Status: INTERNAL_ERROR. Description: 2025-06-25T14:29:20.304856Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.304880Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.304947Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.006771s 2025-06-25T14:29:20.307156Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-25T14:29:20.307811Z :INFO: [db] [sessionid] [cluster] Server session id: session id 2025-06-25T14:29:20.307914Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.308943Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.308965Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.308984Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-25T14:29:20.309343Z :ERROR: [db] [sessionid] [cluster] Got error. Status: TIMEOUT. Description:
: Error: Failed to establish connection to server. Attempts done: 1 2025-06-25T14:29:20.309382Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.309401Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.309549Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.285490s 2025-06-25T14:29:20.309955Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-25T14:29:20.310615Z :INFO: [db] [sessionid] [cluster] Server session id: session id 2025-06-25T14:29:20.310693Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.311682Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.311703Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.311722Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-25T14:29:20.312104Z :ERROR: [db] [sessionid] [cluster] Got error. Status: TIMEOUT. Description:
: Error: Failed to establish connection to server. Attempts done: 1 2025-06-25T14:29:20.312168Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.312233Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.312304Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.238277s 2025-06-25T14:29:20.312727Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-25T14:29:20.313112Z :INFO: [db] [sessionid] [cluster] Server session id: session id 2025-06-25T14:29:20.313186Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.314594Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.314700Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.314737Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-25T14:29:20.315249Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-25T14:29:20.315728Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-25T14:29:20.328782Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.329214Z :ERROR: [db] [sessionid] [cluster] Got error. Status: TRANSPORT_UNAVAILABLE. Description:
: Error: GRpc error: (14): 2025-06-25T14:29:20.329253Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.329297Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.329372Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.206881s 2025-06-25T14:29:20.330096Z :DEBUG: [db] [sessionid] [cluster] Abort session to cluster 2025-06-25T14:29:20.331496Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.331525Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.331578Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-25T14:29:20.331966Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-25T14:29:20.332443Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-25T14:29:20.332611Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.333282Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-25T14:29:20.434740Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.435004Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-2) 2025-06-25T14:29:20.435064Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-25T14:29:20.435132Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (2-2) 2025-06-25T14:29:20.435209Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 6 bytes 2025-06-25T14:29:20.535842Z :INFO: [db] [sessionid] [cluster] Confirm partition stream destroy. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1 2025-06-25T14:29:20.540400Z :DEBUG: [db] [sessionid] [cluster] Abort session to cluster 2025-06-25T14:29:20.545406Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.545431Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.552416Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-25T14:29:20.552875Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-25T14:29:20.557104Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-25T14:29:20.557309Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.560821Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-25T14:29:20.665389Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.665614Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-2) 2025-06-25T14:29:20.665703Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-25T14:29:20.665764Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (2-2) 2025-06-25T14:29:20.665858Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [0, 3). Partition stream id: 1 2025-06-25T14:29:20.665976Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 6 bytes 2025-06-25T14:29:20.669677Z :DEBUG: [db] [sessionid] [cluster] Committed response: cookies { assign_id: 1 partition_cookie: 1 } 2025-06-25T14:29:20.669749Z :INFO: [db] [sessionid] [cluster] Confirm partition stream destroy. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1 2025-06-25T14:29:20.669954Z :DEBUG: [db] [sessionid] [cluster] Abort session to cluster >> TTransferTests::Alter [GOOD] >> TSchemeShardSysViewTest::CreateSysView ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/persqueue_public/ut/with_offset_ranges_mode_ut/unittest >> ReadSessionImplTest::SimpleDataHandlersWithGracefulReleaseWithCommit [GOOD] Test command err: 2025-06-25T14:29:20.353288Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.353322Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.353357Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-25T14:29:20.353734Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-25T14:29:20.360561Z :INFO: [db] [sessionid] [cluster] Server session id: session id 2025-06-25T14:29:20.360649Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.365282Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.365305Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.365324Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-25T14:29:20.365631Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-25T14:29:20.370817Z :INFO: [db] [sessionid] [cluster] Server session id: session id 2025-06-25T14:29:20.370910Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.384238Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.384281Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.384325Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-25T14:29:20.384776Z :ERROR: [db] [sessionid] [cluster] Got error. Status: INTERNAL_ERROR. Description: 2025-06-25T14:29:20.384829Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.384862Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.385008Z :INFO: [db] [sessionid] [cluster] Closing session to cluster: SessionClosed { Status: INTERNAL_ERROR Issues: "
: Error: Failed to establish connection to server "" ( cluster cluster). Attempts done: 1 " } 2025-06-25T14:29:20.389431Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.389458Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.389479Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-25T14:29:20.396859Z :ERROR: [db] [sessionid] [cluster] Got error. Status: TIMEOUT. Description:
: Error: Failed to establish connection to server. Attempts done: 1 2025-06-25T14:29:20.396917Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.396944Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.397027Z :INFO: [db] [sessionid] [cluster] Closing session to cluster: SessionClosed { Status: TIMEOUT Issues: "
: Error: Failed to establish connection to server. Attempts done: 1 " } 2025-06-25T14:29:20.398534Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 2500, ReadSizeServerDelta = 0 2025-06-25T14:29:20.398566Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 2500, ReadSizeServerDelta = 0 2025-06-25T14:29:20.398655Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-25T14:29:20.402992Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-25T14:29:20.403542Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-25T14:29:20.418435Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 2500, ReadSizeServerDelta = 0 2025-06-25T14:29:20.418872Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-25T14:29:20.419261Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 2. Cluster: "TestCluster". Topic: "TestTopic". Partition: 2. Read offset: (NULL) 2025-06-25T14:29:20.424372Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-50) 2025-06-25T14:29:20.424640Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-25T14:29:20.424679Z :DEBUG: Take Data. Partition 1. Read: {0, 1} (2-2) 2025-06-25T14:29:20.424711Z :DEBUG: Take Data. Partition 1. Read: {0, 2} (3-3) 2025-06-25T14:29:20.424731Z :DEBUG: Take Data. Partition 1. Read: {0, 3} (4-4) 2025-06-25T14:29:20.424765Z :DEBUG: Take Data. Partition 1. Read: {0, 4} (5-5) 2025-06-25T14:29:20.424784Z :DEBUG: Take Data. Partition 1. Read: {0, 5} (6-6) 2025-06-25T14:29:20.424801Z :DEBUG: Take Data. Partition 1. Read: {0, 6} (7-7) 2025-06-25T14:29:20.424817Z :DEBUG: Take Data. Partition 1. Read: {0, 7} (8-8) 2025-06-25T14:29:20.424863Z :DEBUG: Take Data. Partition 1. Read: {0, 8} (9-9) 2025-06-25T14:29:20.424884Z :DEBUG: Take Data. Partition 1. Read: {0, 9} (10-10) 2025-06-25T14:29:20.424902Z :DEBUG: Take Data. Partition 1. Read: {0, 10} (11-11) 2025-06-25T14:29:20.424920Z :DEBUG: Take Data. Partition 1. Read: {0, 11} (12-12) 2025-06-25T14:29:20.424938Z :DEBUG: Take Data. Partition 1. Read: {0, 12} (13-13) 2025-06-25T14:29:20.424956Z :DEBUG: Take Data. Partition 1. Read: {0, 13} (14-14) 2025-06-25T14:29:20.424973Z :DEBUG: Take Data. Partition 1. Read: {0, 14} (15-15) 2025-06-25T14:29:20.424990Z :DEBUG: Take Data. Partition 1. Read: {0, 15} (16-16) 2025-06-25T14:29:20.425038Z :DEBUG: Take Data. Partition 1. Read: {0, 16} (17-17) 2025-06-25T14:29:20.425067Z :DEBUG: Take Data. Partition 1. Read: {0, 17} (18-18) 2025-06-25T14:29:20.425087Z :DEBUG: Take Data. Partition 1. Read: {0, 18} (19-19) 2025-06-25T14:29:20.425116Z :DEBUG: Take Data. Partition 1. Read: {0, 19} (20-20) 2025-06-25T14:29:20.425136Z :DEBUG: Take Data. Partition 1. Read: {0, 20} (21-21) 2025-06-25T14:29:20.425163Z :DEBUG: Take Data. Partition 1. Read: {0, 21} (22-22) 2025-06-25T14:29:20.425183Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (23-23) 2025-06-25T14:29:20.425200Z :DEBUG: Take Data. Partition 1. Read: {1, 1} (24-24) 2025-06-25T14:29:20.425217Z :DEBUG: Take Data. Partition 1. Read: {1, 2} (25-25) 2025-06-25T14:29:20.425233Z :DEBUG: Take Data. Partition 1. Read: {1, 3} (26-26) 2025-06-25T14:29:20.425249Z :DEBUG: Take Data. Partition 1. Read: {1, 4} (27-27) 2025-06-25T14:29:20.425266Z :DEBUG: Take Data. Partition 1. Read: {1, 5} (28-28) 2025-06-25T14:29:20.425285Z :DEBUG: Take Data. Partition 1. Read: {1, 6} (29-29) 2025-06-25T14:29:20.425302Z :DEBUG: Take Data. Partition 1. Read: {1, 7} (30-30) 2025-06-25T14:29:20.425318Z :DEBUG: Take Data. Partition 1. Read: {1, 8} (31-31) 2025-06-25T14:29:20.425348Z :DEBUG: Take Data. Partition 1. Read: {1, 9} (32-32) 2025-06-25T14:29:20.425423Z :DEBUG: Take Data. Partition 1. Read: {1, 10} (33-33) 2025-06-25T14:29:20.425443Z :DEBUG: Take Data. Partition 1. Read: {1, 11} (34-34) 2025-06-25T14:29:20.425472Z :DEBUG: Take Data. Partition 1. Read: {1, 12} (35-35) 2025-06-25T14:29:20.425490Z :DEBUG: Take Data. Partition 1. Read: {1, 13} (36-36) 2025-06-25T14:29:20.425508Z :DEBUG: Take Data. Partition 1. Read: {1, 14} (37-37) 2025-06-25T14:29:20.425524Z :DEBUG: Take Data. Partition 1. Read: {1, 15} (38-38) 2025-06-25T14:29:20.425541Z :DEBUG: Take Data. Partition 1. Read: {1, 16} (39-39) 2025-06-25T14:29:20.425559Z :DEBUG: Take Data. Partition 1. Read: {1, 17} (40-40) 2025-06-25T14:29:20.425577Z :DEBUG: Take Data. Partition 1. Read: {1, 18} (41-41) 2025-06-25T14:29:20.425594Z :DEBUG: Take Data. Partition 1. Read: {1, 19} (42-42) 2025-06-25T14:29:20.425618Z :DEBUG: Take Data. Partition 1. Read: {1, 20} (43-43) 2025-06-25T14:29:20.425639Z :DEBUG: Take Data. Partition 1. Read: {1, 21} (44-44) 2025-06-25T14:29:20.425657Z :DEBUG: Take Data. Partition 1. Read: {1, 22} (45-45) 2025-06-25T14:29:20.425679Z :DEBUG: Take Data. Partition 1. Read: {1, 23} (46-46) 2025-06-25T14:29:20.425696Z :DEBUG: Take Data. Partition 1. Read: {1, 24} (47-47) 2025-06-25T14:29:20.425724Z :DEBUG: Take Data. Partition 1. Read: {1, 25} (48-48) 2025-06-25T14:29:20.425749Z :DEBUG: Take Data. Partition 1. Read: {1, 26} (49-49) 2025-06-25T14:29:20.425772Z :DEBUG: Take Data. Partition 1. Read: {1, 27} (50-50) 2025-06-25T14:29:20.425840Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 50, size 5000 bytes 2025-06-25T14:29:20.428402Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 2 (51-100) 2025-06-25T14:29:20.428603Z :DEBUG: Take Data. Partition 2. Read: {0, 0} (51-51) 2025-06-25T14:29:20.428643Z :DEBUG: Take Data. Partition 2. Read: {0, 1} (52-52) 2025-06-25T14:29:20.428678Z :DEBUG: Take Data. Partition 2. Read: {0, 2} (53-53) 2025-06-25T14:29:20.428699Z :DEBUG: Take Data. Partition 2. Read: {0, 3} (54-54) 2025-06-25T14:29:20.428723Z :DEBUG: Take Data. Partition 2. Read: {0, 4} (55-55) 2025-06-25T14:29:20.428743Z :DEBUG: Take Data. Partition 2. Read: {0, 5} (56-56) 2025-06-25T14:29:20.428762Z :DEBUG: Take Data. Partition 2. Read: {0, 6} (57-57) 2025-06-25T14:29:20.428782Z :DEBUG: Take Data. Partition 2. Read: {0, 7} (58-58) 2025-06-25T14:29:20.428826Z :DEBUG: Take Data. Partition 2. Read: {0, 8} (59-59) 2025-06-25T14:29:20.428847Z :DEBUG: Take Data. Partition 2. Read: {0, 9} (60-60) 2025-06-25T14:29:20.428866Z :DEBUG: Take Data. Partition 2. Read: {0, 10} (61-61) 2025-06-25T14:29:20.428889Z :DEBUG: Take Data. Partition 2. Read: {0, 11} (62-62) 2025-06-25T14:29:20.428916Z :DEBUG: Take Data. Partition 2. Read: {0, 12} (63-63) 2025-06-25T14:29:20.428938Z :DEBUG: Take Data. Partition 2. Read: {0, 13} (64-64) 2025-06-25T14:29:20.428964Z :DEBUG: Take Data. Partition 2. Read: {0, 14} (65-65) 2025-06-25T14:29:20.428986Z :DEBUG: Take Data. Partition 2. Read: {0, 15} (66-66) 2025-06-25T14:29:20.429040Z :DEBUG: Take Data. Partition 2. Read: {0, 16} (67-67) 2025-06-25T14:29:20.429062Z :DEBUG: Take Data. Partition 2. Read: {0, 17} (68-68) 2025-06-25T14:29:20.429080Z :DEBUG: Take Data. Partition 2. Read: {0, 18} (69-69) 2025-06-25T14:29:20.429097Z :DEBUG: Take Data. Partition 2. Read: {0, 19} (70-70) 2025-06-25T14:29:20.429122Z :DEBUG: Take Data. Partition 2. Read: {0, 20} (71-71) 2025-06-25T14:29:20.429144Z :DEBUG: Take Data. Partition 2. Read: {0, 21} (72-72) 2025-06-25T14:29:20.429164Z :DEBUG: Take Data. Partition 2. Read: {1, 0} (73-73) 2025-06-25T14:29:20.429181Z :DEBUG: Take Data. Partition 2. Read: {1, 1} (74-74) 2025-06-25T14:29:20.429198Z :DEBUG: Take Data. Partition 2. Read: {1, 2} (75-75) 2025-06-25T14:29:20.429215Z :DEBUG: Take Data. Partition 2. Read: {1, 3} (76-76) 2025-06-25T14:29:20.429233Z :DEBUG: Take Data. Partition 2. Read: {1, 4} (77-77) 2025-06-25T14:29:20.429257Z :DEBUG: Take Data. Partition 2. Read: {1, 5} (78-78) 2025-06-25T14:29:20.429282Z :DEBUG: Take Data. Partition 2. Read: {1, 6} (79-79) 2025-06-25T14:29:20.429314Z :DEBUG: Take Data. Partition 2. Read: {1, 7} (80-80) 2025-06-25T14:29:20.429350Z :DEBUG: Take Data. Partition 2. Read: {1, 8} (81-81) 2025-06-25T14:29:20.429385Z :DEBUG: Take Data. Partition 2. Read: {1, 9} (82-82) 2025-06-25T14:29:20.429448Z :DEBUG: Take Data. Partition 2. Read: {1, 10} (83-83) 2025-06-25T14:29:20.429468Z :DEBUG: Take Data. Partition 2. Read: {1, 11} (84-84) 2025-06-25T14:29:20.429487Z :DEBUG: Take Data. Partition 2. Read: {1, 12} (85-85) 2025-06-25T14:29:20.429504Z :DEBUG: Take Data. Partition 2. Read: {1, 13} (86-86) 2025-06-25T14:29:20.429520Z :DEBUG: Take Data. Partition 2. Read: {1, 14} (87-87) 2025-06-25T14:29:20.429538Z :DEBUG: Take Data. Partition 2. Read: {1, 15} (88-88) 2025-06-25T14:29:20.429555Z :DEBUG: Take Data. Partition 2. Read: {1, 16} (89-89) 2025-06-25T14:29:20.429573Z :DEBUG: Take Data. Partition 2. Read: {1, 17} (90-90) 2025-06-25T14:29:20.429590Z :DEBUG: Take Data. Partition 2. Read: {1, 18} (91-91) 2025-06-25T14:29:20.429610Z :DEBUG: Take Data. Partition 2. Read: {1, 19} (92-92) 2025-06-25T14:29:20.429669Z :DEBUG: Take Data. Partition 2. Read: {1, 20} (93-93) 2025-06-25T14:29:20.429744Z :DEBUG: Take Data. Partition 2. Read: {1, 21} (94-94) 2025-06-25T14:29:20.429770Z :DEBUG: Take Data. Partition 2. Read: {1, 22} (95-95) 2025-06-25T14:29:20.429792Z :DEBUG: Take Data. Partition 2. Read: {1, 23} (96-96) 2025-06-25T14:29:20.429817Z :DEBUG: Take Data. Partition 2. Read: {1, 24} (97-97) 2025-06-25T14:29:20.429839Z :DEBUG: Take Data. Partition 2. Read: {1, 25} (98-98) 2025-06-25T14:29:20.429858Z :DEBUG: Take Data. Partition 2. Read: {1, 26} (99-99) 2025-06-25T14:29:20.429874Z :DEBUG: Take Data. Partition 2. Read: {1, 27} (100-100) 2025-06-25T14:29:20.429930Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 50, size 5000 bytes 2025-06-25T14:29:20.430091Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 2500, ReadSizeServerDelta = 0 2025-06-25T14:29:20.431610Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.431639Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.431660Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-25T14:29:20.436434Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-25T14:29:20.437366Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-25T14:29:20.437560Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.438029Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-25T14:29:20.541354Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.541557Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-2) 2025-06-25T14:29:20.541616Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-25T14:29:20.541649Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (2-2) 2025-06-25T14:29:20.541711Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 6 bytes 2025-06-25T14:29:20.750450Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [0, 3). Partition stream id: 1 2025-06-25T14:29:20.853350Z :DEBUG: [db] [sessionid] [cluster] Committed response: cookies { assign_id: 1 partition_cookie: 1 } 2025-06-25T14:29:20.853519Z :INFO: [db] [sessionid] [cluster] Confirm partition stream destroy. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1 2025-06-25T14:29:20.853690Z :DEBUG: [db] [sessionid] [cluster] Abort session to cluster 2025-06-25T14:29:20.854938Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.854976Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.854995Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-25T14:29:20.855398Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-25T14:29:20.855858Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-25T14:29:20.856004Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.856455Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-25T14:29:20.980441Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.981705Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-2) 2025-06-25T14:29:20.981792Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-25T14:29:20.981847Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (2-2) 2025-06-25T14:29:20.981949Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [0, 3). Partition stream id: 1 2025-06-25T14:29:20.982066Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 6 bytes 2025-06-25T14:29:20.982316Z :DEBUG: [db] [sessionid] [cluster] Committed response: cookies { assign_id: 1 partition_cookie: 1 } 2025-06-25T14:29:20.982563Z :INFO: [db] [sessionid] [cluster] Confirm partition stream destroy. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1 2025-06-25T14:29:20.982685Z :DEBUG: [db] [sessionid] [cluster] Abort session to cluster >> TSchemeShardSysViewTest::DropSysView >> TSubscriberTest::NotifyUpdate >> Cdc::HugeKeyDebezium [GOOD] >> Cdc::Drop[PqRunner] >> TSubscriberTest::ReconnectOnFailure >> TSubscriberTest::ReconnectOnFailure [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_transfer/unittest >> TTransferTests::Alter [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:29:02.966218Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:29:02.966352Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:29:02.966395Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:29:02.966442Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:29:02.966508Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:29:02.966540Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:29:02.966608Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:29:02.966672Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:29:02.977052Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:29:02.987039Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:29:03.456483Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:29:03.456573Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:29:03.539865Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:29:03.554134Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:29:03.590354Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:29:03.669705Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:29:03.670122Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:29:03.688647Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:29:03.712696Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:29:03.793133Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:29:03.804182Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:29:03.850558Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:29:03.850659Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:29:03.863390Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:29:03.863485Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:29:03.863578Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:29:03.863705Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:29:03.876295Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:29:04.042535Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:29:04.071730Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:04.087916Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:29:04.088036Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:29:04.095744Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:29:04.095935Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:29:04.099860Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:29:04.107830Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:29:04.108152Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:04.108240Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:29:04.108328Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:29:04.108385Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:29:04.113887Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:04.113956Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:29:04.114008Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:29:04.130717Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:04.130777Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:04.130845Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:29:04.130902Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:29:04.135016Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:29:04.146846Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:29:04.154487Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:29:04.156467Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:29:04.156645Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:29:04.159959Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:29:04.164853Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:29:04.164952Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:29:04.165142Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:29:04.165233Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:29:04.168124Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:29:04.168203Z node 1 :FLAT_TX_SCHEMESHARD ... lterReplication TConfigureParts opId# 104:0 HandleReply NKikimrReplication.TEvAlterReplicationResult OperationId { TxId: 104 PartId: 0 } Origin: 72075186233409547 Status: SUCCESS 2025-06-25T14:29:20.897993Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 104:0 3 -> 128 2025-06-25T14:29:20.898122Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-06-25T14:29:20.898188Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:695: Ack tablet strongly msg opId: 104:0 from tablet: 72057594046678944 to tablet: 72075186233409547 cookie: 72057594046678944:3 2025-06-25T14:29:20.909693Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-25T14:29:20.909773Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-25T14:29:20.909829Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:276: Activate send for 104:0 2025-06-25T14:29:20.910026Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 2146435072, Sender [6:136:2157], Recipient [6:136:2157]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-25T14:29:20.910067Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4972: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-25T14:29:20.910133Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-25T14:29:20.910188Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_replication.cpp:189: [72057594046678944] TAlterReplication TPropose opId# 104:0 ProgressState 2025-06-25T14:29:20.910241Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-06-25T14:29:20.910294Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 104 ready parts: 1/1 2025-06-25T14:29:20.910484Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 104 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:29:20.983742Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-25T14:29:20.983824Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 104:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:104 msg type: 269090816 2025-06-25T14:29:20.983938Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 104, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 104 at step: 5000005 FAKE_COORDINATOR: advance: minStep5000005 State->FrontStep: 5000004 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 104 at step: 5000005 2025-06-25T14:29:20.984295Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269287424, Sender [6:131:2154], Recipient [6:262:2251] 2025-06-25T14:29:20.984373Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4975: StateWork, processing event TEvTxProcessing::TEvPlanStep 2025-06-25T14:29:20.984491Z node 6 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000005, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:29:20.984651Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 104 Coordinator: 72057594046316545 AckTo { RawX1: 131 RawX2: 25769805930 } } Step: 5000005 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:29:20.984725Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_replication.cpp:203: [72057594046678944] TAlterReplication TPropose opId# 104:0 HandleReply TEvOperationPlan: step# 5000005 2025-06-25T14:29:20.984884Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 104:0 128 -> 240 2025-06-25T14:29:20.985126Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-06-25T14:29:20.985206Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-25T14:29:20.985284Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:695: Ack tablet strongly msg opId: 104:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:104 2025-06-25T14:29:20.993135Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-25T14:29:20.993222Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:385: Ack coordinator stepId#5000005 first txId#104 countTxs#1 2025-06-25T14:29:20.993295Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:355: Ack mediator stepId#5000005 2025-06-25T14:29:20.993349Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:276: Activate send for 104:0 2025-06-25T14:29:20.993630Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 2146435072, Sender [6:136:2157], Recipient [6:136:2157]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-25T14:29:20.993672Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4972: StateWork, processing event TEvPrivate::TEvProgressOperation FAKE_COORDINATOR: Erasing txId 104 2025-06-25T14:29:20.993783Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:29:20.993839Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 104, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-25T14:29:20.994146Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:29:20.994206Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [6:211:2211], at schemeshard: 72057594046678944, txId: 104, path id: 3 2025-06-25T14:29:20.994836Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-25T14:29:20.994906Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 104:0 ProgressState 2025-06-25T14:29:20.995043Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-06-25T14:29:20.995092Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#104:0 progress is 1/1 2025-06-25T14:29:20.995150Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-25T14:29:20.995205Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#104:0 progress is 1/1 2025-06-25T14:29:20.995247Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-25T14:29:20.995298Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 104, ready parts: 1/1, is published: false 2025-06-25T14:29:20.995355Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-25T14:29:20.995406Z node 6 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 104:0 2025-06-25T14:29:20.995450Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 104:0 2025-06-25T14:29:20.995613Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-06-25T14:29:20.995666Z node 6 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 104, publications: 1, subscribers: 0 2025-06-25T14:29:20.995712Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 104, [OwnerId: 72057594046678944, LocalPathId: 3], 4 2025-06-25T14:29:20.996370Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 274137603, Sender [6:211:2211], Recipient [6:136:2157]: NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 3] Version: 4 } 2025-06-25T14:29:20.996426Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5039: StateWork, processing event NSchemeBoard::NSchemeshardEvents::TEvUpdateAck 2025-06-25T14:29:20.996550Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 4 PathOwnerId: 72057594046678944, cookie: 104 2025-06-25T14:29:20.996683Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 4 PathOwnerId: 72057594046678944, cookie: 104 2025-06-25T14:29:21.085365Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 104 2025-06-25T14:29:21.085477Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 104, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 4 2025-06-25T14:29:21.085555Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-25T14:29:21.085703Z node 6 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 104, subscribers: 0 2025-06-25T14:29:21.085772Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-06-25T14:29:21.100829Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-25T14:29:21.101704Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2025-06-25T14:29:21.101764Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046678944 TestModificationResult got TxId: 104, wait until txId: 104 >> TSubscriberTest::NotifyUpdate [GOOD] >> TSubscriberCombinationsTest::MigratedPathRecreation >> TSubscriberTest::Sync >> ReadSessionImplTest::PacksBatches_BigBatchDecompressWithTwoBatchTasks [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_subscriber/unittest >> TSubscriberTest::ReconnectOnFailure [GOOD] Test command err: 2025-06-25T14:29:21.989581Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1003: [main][2:36:2066][path] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-25T14:29:21.992790Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][2:40:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:3:2050] 2025-06-25T14:29:21.992922Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][2:41:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:6:2053] 2025-06-25T14:29:21.993100Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][2:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [2:37:2066] 2025-06-25T14:29:21.993227Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][2:42:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:9:2056] 2025-06-25T14:29:21.993298Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][2:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [2:38:2066] 2025-06-25T14:29:21.993348Z node 2 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:852: [main][2:36:2066][path] Set up state: owner# [2:35:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-25T14:29:21.993741Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][2:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [2:39:2066] 2025-06-25T14:29:21.993839Z node 2 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:870: [main][2:36:2066][path] Ignore empty state: owner# [2:35:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-25T14:29:21.994559Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][2:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [2:37:2066] 2025-06-25T14:29:21.994630Z node 2 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:870: [main][2:36:2066][path] Ignore empty state: owner# [2:35:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-25T14:29:21.994695Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][2:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [2:38:2066] 2025-06-25T14:29:21.994735Z node 2 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:870: [main][2:36:2066][path] Ignore empty state: owner# [2:35:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-25T14:29:21.994782Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][2:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [2:39:2066] 2025-06-25T14:29:21.994818Z node 2 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:870: [main][2:36:2066][path] Ignore empty state: owner# [2:35:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-25T14:29:22.009385Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][2:47:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:3:2050] 2025-06-25T14:29:22.009496Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][2:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [2:37:2066] 2025-06-25T14:29:22.009584Z node 2 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:870: [main][2:36:2066][path] Ignore empty state: owner# [2:35:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-25T14:29:22.009708Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][2:48:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:6:2053] 2025-06-25T14:29:22.009755Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][2:49:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:9:2056] 2025-06-25T14:29:22.009889Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][2:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [2:38:2066] 2025-06-25T14:29:22.009926Z node 2 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:870: [main][2:36:2066][path] Ignore empty state: owner# [2:35:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-25T14:29:22.010007Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][2:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [2:39:2066] 2025-06-25T14:29:22.010041Z node 2 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:870: [main][2:36:2066][path] Ignore empty state: owner# [2:35:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-25T14:29:22.010572Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][2:47:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 1, LocalPathId: 1] Version: 1 }: sender# [1:3:2050] 2025-06-25T14:29:22.010648Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][2:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 1, LocalPathId: 1] Version: 1 }: sender# [2:37:2066] 2025-06-25T14:29:22.010721Z node 2 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:858: [main][2:36:2066][path] Update to strong state: owner# [2:35:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, new state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 1, LocalPathId: 1], Version: 1) DomainId: AbandonedSchemeShards: there are 0 elements } |76.3%| [TA] $(B)/ydb/core/tx/schemeshard/ut_transfer/test-results/unittest/{meta.json ... results_accumulator.log} >> ReadSessionImplTest::PacksBatches_DecompressesOneMessagePerTime >> DataShardSnapshots::LockedWriteBulkUpsertConflict+UseSink [GOOD] >> DataShardSnapshots::LockedWriteBulkUpsertConflict-UseSink >> TSubscriberTest::InvalidNotification >> TSchemeShardSysViewTest::CreateSysView [GOOD] >> TSubscriberTest::SyncPartial >> ReadSessionImplTest::PacksBatches_DecompressesOneMessagePerTime [GOOD] >> ReadSessionImplTest::PartitionStreamStatus [GOOD] >> ReadSessionImplTest::PartitionStreamCallbacks [GOOD] >> TSubscriberCombinationsTest::MigratedPathRecreation [GOOD] >> TSubscriberTest::Boot ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_subscriber/unittest >> TSubscriberTest::NotifyUpdate [GOOD] Test command err: 2025-06-25T14:29:21.929156Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1003: [main][1:36:2066][path] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-25T14:29:21.930585Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:40:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:3:2050] 2025-06-25T14:29:21.930663Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:41:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:6:2053] 2025-06-25T14:29:21.930690Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:42:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:9:2056] 2025-06-25T14:29:21.930733Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:37:2066] 2025-06-25T14:29:21.930809Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:38:2066] 2025-06-25T14:29:21.930871Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:852: [main][1:36:2066][path] Set up state: owner# [1:35:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-25T14:29:21.930917Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:39:2066] 2025-06-25T14:29:21.930963Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:870: [main][1:36:2066][path] Ignore empty state: owner# [1:35:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-25T14:29:21.931360Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:40:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 1, LocalPathId: 1] Version: 1 }: sender# [1:3:2050] 2025-06-25T14:29:21.931424Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 1, LocalPathId: 1] Version: 1 }: sender# [1:37:2066] 2025-06-25T14:29:21.931467Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:858: [main][1:36:2066][path] Update to strong state: owner# [1:35:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, new state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 1, LocalPathId: 1], Version: 1) DomainId: AbandonedSchemeShards: there are 0 elements } >> TSchemeShardSysViewTest::DropSysView [GOOD] >> TTicketParserTest::NebiusAccessKeySignatureUnsupported [GOOD] >> TSubscriberTest::SyncWithOutdatedReplica >> TSubscriberTest::Sync [GOOD] >> TSubscriberTest::InvalidNotification [GOOD] >> KqpDataIntegrityTrails::Upsert-LogEnabled-UseSink [GOOD] >> TSubscriberTest::SyncPartial [GOOD] >> TSubscriberTest::Boot [GOOD] >> TSubscriberCombinationsTest::CombinationsRootDomain ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/persqueue_public/ut/with_offset_ranges_mode_ut/unittest >> ReadSessionImplTest::PartitionStreamCallbacks [GOOD] Test command err: 2025-06-25T14:29:20.418255Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.418291Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.418327Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-25T14:29:20.418965Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-25T14:29:20.420458Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-25T14:29:20.432223Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.434448Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-25T14:29:20.435376Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function 2025-06-25T14:29:20.435806Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function 2025-06-25T14:29:20.435967Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (2-2) 2025-06-25T14:29:20.436074Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2025-06-25T14:29:20.436147Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-25T14:29:20.436181Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (2-2) 2025-06-25T14:29:20.436214Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 8 bytes 2025-06-25T14:29:20.436235Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 8 bytes 2025-06-25T14:29:20.437687Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.437716Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.437735Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-25T14:29:20.438369Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-25T14:29:20.438860Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-25T14:29:20.438971Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.439256Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) Message data size: 10 Compressed message data size: 30 2025-06-25T14:29:20.440103Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function 2025-06-25T14:29:20.440288Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function Getting new event 2025-06-25T14:29:20.440600Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (5-8) 2025-06-25T14:29:20.440856Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-4) 2025-06-25T14:29:20.440985Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-25T14:29:20.441023Z :DEBUG: Take Data. Partition 1. Read: {0, 1} (2-2) 2025-06-25T14:29:20.441058Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 20 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 1 SeqNo: 42 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 2 SeqNo: 43 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } } 2025-06-25T14:29:20.441191Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [0, 3). Partition stream id: 1 GOT RANGE 0 3 Getting new event 2025-06-25T14:29:20.441284Z :DEBUG: Take Data. Partition 1. Read: {0, 2} (3-3) 2025-06-25T14:29:20.441315Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (4-4) 2025-06-25T14:29:20.441346Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 20 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 3 SeqNo: 44 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 4 SeqNo: 45 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } } 2025-06-25T14:29:20.441471Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [3, 5). Partition stream id: 1 GOT RANGE 3 5 Getting new event 2025-06-25T14:29:20.441526Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (5-5) 2025-06-25T14:29:20.441545Z :DEBUG: Take Data. Partition 1. Read: {0, 1} (6-6) 2025-06-25T14:29:20.441564Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 20 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 5 SeqNo: 46 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 6 SeqNo: 47 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } } 2025-06-25T14:29:20.441649Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [5, 7). Partition stream id: 1 GOT RANGE 5 7 Getting new event 2025-06-25T14:29:20.441696Z :DEBUG: Take Data. Partition 1. Read: {0, 2} (7-7) 2025-06-25T14:29:20.441717Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (8-8) 2025-06-25T14:29:20.441739Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 20 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 7 SeqNo: 48 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 8 SeqNo: 49 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } } 2025-06-25T14:29:20.441830Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [7, 9). Partition stream id: 1 GOT RANGE 7 9 2025-06-25T14:29:20.443407Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.443451Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.443477Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-25T14:29:20.444181Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-25T14:29:20.446042Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-25T14:29:20.446215Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.446496Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) Message data size: 100 Compressed message data size: 91 2025-06-25T14:29:20.447615Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function 2025-06-25T14:29:20.447897Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function Getting new event 2025-06-25T14:29:20.448264Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (5-8) 2025-06-25T14:29:20.448507Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-4) 2025-06-25T14:29:20.448654Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-25T14:29:20.448711Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 100 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..100 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 1 SeqNo: 42 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } } 2025-06-25T14:29:20.448832Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [0, 2). Partition stream id: 1 GOT RANGE 0 2 Getting new event 2025-06-25T14:29:20.448924Z :DEBUG: Take Data. Partition 1. Read: {0, 1} (2-2) 2025-06-25T14:29:20.448958Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 100 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..100 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 2 SeqNo: 43 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } } 2025-06-25T14:29:20.449028Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [2, 3). Partition stream id: 1 GOT RANGE 2 3 Getting new event 2025-06-25T14:29:20.449087Z :DEBUG: Take Data. Partition 1. Read: {0, 2} (3-3) 2025-06-25T14:29:20.449111Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 100 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..100 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 3 SeqNo: 44 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } } 2025-06-25T14:29:20.449170Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [3, 4). Partition stream id: 1 GOT RANGE 3 4 Getting new event 2025-06-25T14:29:20.449216Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (4-4) 2025-06-25T14:29:20.449232Z :DEBUG: [db] [sessionid] [cluster] The application data ... er". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 190 SeqNo: 231 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 191 SeqNo: 232 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 192 SeqNo: 233 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 193 SeqNo: 234 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 194 SeqNo: 235 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 195 SeqNo: 236 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 196 SeqNo: 237 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 197 SeqNo: 238 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 198 SeqNo: 239 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 199 SeqNo: 240 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 200 SeqNo: 241 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } } 2025-06-25T14:29:22.694721Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [0, 201). Partition stream id: 1 GOT RANGE 0 201 2025-06-25T14:29:22.764504Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 5, ReadSizeServerDelta = 0 2025-06-25T14:29:22.764606Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 5, ReadSizeServerDelta = 0 2025-06-25T14:29:22.764680Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-25T14:29:22.766615Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-25T14:29:22.767412Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-25T14:29:22.767678Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 5, ReadSizeServerDelta = 0 2025-06-25T14:29:22.768134Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) Message data size: 1000000 Compressed message data size: 3028 Post function Getting new event 2025-06-25T14:29:22.875372Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-10) 2025-06-25T14:29:22.877406Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-25T14:29:22.885156Z :DEBUG: Take Data. Partition 1. Read: {0, 1} (2-2) 2025-06-25T14:29:22.887764Z :DEBUG: Take Data. Partition 1. Read: {0, 2} (3-3) 2025-06-25T14:29:22.888657Z :DEBUG: Take Data. Partition 1. Read: {0, 3} (4-4) 2025-06-25T14:29:22.892914Z :DEBUG: Take Data. Partition 1. Read: {0, 4} (5-5) 2025-06-25T14:29:22.893799Z :DEBUG: Take Data. Partition 1. Read: {0, 5} (6-6) 2025-06-25T14:29:22.894678Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (7-7) 2025-06-25T14:29:22.895575Z :DEBUG: Take Data. Partition 1. Read: {1, 1} (8-8) 2025-06-25T14:29:22.904153Z :DEBUG: Take Data. Partition 1. Read: {1, 2} (9-9) 2025-06-25T14:29:22.905171Z :DEBUG: Take Data. Partition 1. Read: {1, 3} (10-10) 2025-06-25T14:29:22.905304Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 10, size 10000000 bytes 2025-06-25T14:29:22.910055Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 5, ReadSizeServerDelta = 0 DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 1 SeqNo: 42 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 2 SeqNo: 43 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 3 SeqNo: 44 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 4 SeqNo: 45 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 5 SeqNo: 46 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 6 SeqNo: 47 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 7 SeqNo: 48 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 8 SeqNo: 49 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 9 SeqNo: 50 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 10 SeqNo: 51 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } } 2025-06-25T14:29:22.913784Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [0, 11). Partition stream id: 1 GOT RANGE 0 11 2025-06-25T14:29:22.925001Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:22.925056Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:22.925084Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-25T14:29:22.925404Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-25T14:29:22.940795Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-25T14:29:22.940976Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:22.941348Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-25T14:29:22.941996Z :DEBUG: [db] [sessionid] [cluster] Requesting status for partition stream id: 1 2025-06-25T14:29:22.950516Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:22.950562Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:22.950591Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-25T14:29:22.951050Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-25T14:29:22.965872Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-25T14:29:22.966056Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:22.966809Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:22.966983Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2025-06-25T14:29:22.972430Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-25T14:29:22.972521Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 8 bytes 2025-06-25T14:29:22.972757Z :INFO: [db] [sessionid] [cluster] Confirm partition stream destroy. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_sysview/unittest >> TSchemeShardSysViewTest::CreateSysView [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:29:22.153117Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:29:22.153218Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:29:22.153258Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:29:22.153299Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:29:22.153344Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:29:22.153374Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:29:22.153419Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:29:22.153497Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:29:22.154372Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:29:22.154715Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:29:22.244695Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:29:22.244774Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:29:22.262023Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:29:22.262529Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:29:22.262709Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:29:22.269100Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:29:22.269482Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:29:22.270252Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:29:22.270547Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:29:22.274498Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:29:22.274693Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:29:22.275883Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:29:22.275953Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:29:22.276100Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:29:22.276145Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:29:22.276188Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:29:22.276275Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:29:22.288103Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:29:22.465031Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:29:22.465296Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:22.465518Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:29:22.465567Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:29:22.465840Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:29:22.465925Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:29:22.468850Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:29:22.469097Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:29:22.469335Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:22.469392Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:29:22.469439Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:29:22.469512Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:29:22.473704Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:22.473784Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:29:22.473828Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:29:22.476213Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:22.476284Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:22.476377Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:29:22.476462Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:29:22.480398Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:29:22.483123Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:29:22.483360Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:29:22.484552Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:29:22.484711Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:29:22.484768Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:29:22.485217Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:29:22.485284Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:29:22.485503Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:29:22.485594Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:29:22.488512Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:29:22.488580Z node 1 :FLAT_TX_SCHEMESHARD ... ient: [1:15:2062] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:342:2058] recipient: [1:340:2327] Leader for TabletID 72057594046678944 is [1:343:2328] sender: [1:344:2058] recipient: [1:340:2327] 2025-06-25T14:29:22.620460Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:29:22.620630Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:29:22.620691Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:29:22.620737Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:29:22.620783Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:29:22.620811Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:29:22.620876Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:29:22.620961Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:29:22.621873Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:29:22.622369Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:29:22.638844Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:29:22.640229Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:29:22.640449Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:29:22.640642Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:29:22.640679Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:29:22.640800Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:29:22.641614Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1393: TTxInit for Paths, read records: 3, at schemeshard: 72057594046678944 2025-06-25T14:29:22.641704Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:347: AttachChild: child attached as only one child to the parent, parent id: [OwnerId: 72057594046678944, LocalPathId: 1], parent name: MyRoot, child name: .sys, child id: [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-25T14:29:22.641752Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:347: AttachChild: child attached as only one child to the parent, parent id: [OwnerId: 72057594046678944, LocalPathId: 2], parent name: .sys, child name: new_sys_view, child id: [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-06-25T14:29:22.641825Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1467: TTxInit for UserAttributes, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:29:22.642080Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1493: TTxInit for UserAttributesAlterData, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:29:22.642740Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1795: TTxInit for Tables, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:29:22.642822Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__root_data_erasure_manager.cpp:452: [RootDataErasureManager] Restore: Generation# 0, Status# 0, WakeupInterval# 604800 s, NumberDataErasureTenantsInRunning# 0 2025-06-25T14:29:22.643095Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 0 2025-06-25T14:29:22.643322Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2043: TTxInit for Columns, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:29:22.643475Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2103: TTxInit for ColumnsAlters, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:29:22.643639Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2161: TTxInit for Shards, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:29:22.643855Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2247: TTxInit for TablePartitions, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:29:22.643982Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2313: TTxInit for TableShardPartitionConfigs, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:29:22.644115Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2463: TTxInit for ChannelsBinding, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:29:22.644443Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2842: TTxInit for TableIndexes, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:29:22.644611Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2921: TTxInit for TableIndexKeys, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:29:22.645047Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3422: TTxInit for KesusInfos, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:29:22.645120Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3458: TTxInit for KesusAlters, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:29:22.645313Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3684: TTxInit for TxShards, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:29:22.645403Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3829: TTxInit for ShardToDelete, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:29:22.645510Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3846: TTxInit for BackupSettings, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:29:22.645727Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4006: TTxInit for ShardBackupStatus, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:29:22.645824Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4022: TTxInit for CompletedBackup, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:29:22.646013Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4307: TTxInit for Publications, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:29:22.646295Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4646: IndexBuild , records: 0, at schemeshard: 72057594046678944 2025-06-25T14:29:22.646399Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4702: KMeansTreeSample records: 0, at schemeshard: 72057594046678944 2025-06-25T14:29:22.646546Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4791: SnapshotTables: snapshots: 0 tables: 0, at schemeshard: 72057594046678944 2025-06-25T14:29:22.646601Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4818: SnapshotSteps: snapshots: 0, at schemeshard: 72057594046678944 2025-06-25T14:29:22.646653Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4845: LongLocks: records: 0, at schemeshard: 72057594046678944 2025-06-25T14:29:22.655119Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:29:22.657761Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:29:22.657838Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:29:22.658344Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:29:22.658422Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:29:22.658479Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:29:22.664283Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594046678944 is [1:343:2328] sender: [1:404:2058] recipient: [1:15:2062] 2025-06-25T14:29:22.737509Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/.sys/new_sys_view" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:29:22.737780Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/.sys/new_sys_view" took 310us result status StatusSuccess 2025-06-25T14:29:22.738155Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/.sys/new_sys_view" PathDescription { Self { Name: "new_sys_view" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeSysView CreateFinished: true CreateTxId: 102 CreateStep: 5000003 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 SysViewVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } SysViewDescription { Name: "new_sys_view" Type: EPartitionStats SourceObject { OwnerId: 72057594046678944 LocalId: 1 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_sysview/unittest >> TSchemeShardSysViewTest::DropSysView [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:29:22.289260Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:29:22.289356Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:29:22.289395Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:29:22.289438Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:29:22.289482Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:29:22.289509Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:29:22.289557Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:29:22.289632Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:29:22.290379Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:29:22.290733Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:29:22.376667Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:29:22.376742Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:29:22.420625Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:29:22.421159Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:29:22.421343Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:29:22.428422Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:29:22.428925Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:29:22.429671Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:29:22.429979Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:29:22.434270Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:29:22.434471Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:29:22.435764Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:29:22.435844Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:29:22.436018Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:29:22.436071Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:29:22.436119Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:29:22.436220Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:29:22.443922Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:29:22.590571Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:29:22.590817Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:22.591020Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:29:22.591069Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:29:22.591343Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:29:22.591414Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:29:22.599695Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:29:22.599945Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:29:22.600198Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:22.600255Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:29:22.600336Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:29:22.600402Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:29:22.607846Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:22.607928Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:29:22.607996Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:29:22.617323Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:22.617399Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:22.617466Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:29:22.617539Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:29:22.627726Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:29:22.637125Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:29:22.637401Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:29:22.638457Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:29:22.638616Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:29:22.638670Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:29:22.639015Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:29:22.639089Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:29:22.639279Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:29:22.639372Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:29:22.651944Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:29:22.651997Z node 1 :FLAT_TX_SCHEMESHARD ... se BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:29:22.788068Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/.sys/new_sys_view" took 268us result status StatusPathDoesNotExist 2025-06-25T14:29:22.788259Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/.sys/new_sys_view\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot/.sys\' (id: [OwnerId: 72057594046678944, LocalPathId: 2])" Path: "/MyRoot/.sys/new_sys_view" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot/.sys" LastExistedPrefixPathId: 2 LastExistedPrefixDescription { Self { Name: ".sys" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-25T14:29:22.789622Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:363:2058] recipient: [1:106:2139] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:366:2058] recipient: [1:15:2062] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:367:2058] recipient: [1:365:2352] Leader for TabletID 72057594046678944 is [1:368:2353] sender: [1:369:2058] recipient: [1:365:2352] 2025-06-25T14:29:22.851087Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:29:22.851215Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:29:22.851260Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:29:22.851296Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:29:22.851337Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:29:22.851367Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:29:22.851428Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:29:22.851541Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:29:22.852339Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:29:22.852655Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:29:22.867943Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:29:22.869329Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:29:22.869544Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:29:22.869772Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:29:22.869806Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:29:22.869916Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:29:22.870704Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1393: TTxInit for Paths, read records: 2, at schemeshard: 72057594046678944 2025-06-25T14:29:22.870798Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:347: AttachChild: child attached as only one child to the parent, parent id: [OwnerId: 72057594046678944, LocalPathId: 1], parent name: MyRoot, child name: .sys, child id: [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-25T14:29:22.870895Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1467: TTxInit for UserAttributes, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:29:22.870973Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1493: TTxInit for UserAttributesAlterData, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:29:22.871431Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1795: TTxInit for Tables, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:29:22.871526Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__root_data_erasure_manager.cpp:452: [RootDataErasureManager] Restore: Generation# 0, Status# 0, WakeupInterval# 604800 s, NumberDataErasureTenantsInRunning# 0 2025-06-25T14:29:22.871824Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2043: TTxInit for Columns, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:29:22.871984Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2103: TTxInit for ColumnsAlters, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:29:22.872090Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2161: TTxInit for Shards, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:29:22.872195Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2247: TTxInit for TablePartitions, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:29:22.872291Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2313: TTxInit for TableShardPartitionConfigs, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:29:22.872428Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2463: TTxInit for ChannelsBinding, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:29:22.872714Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2842: TTxInit for TableIndexes, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:29:22.872846Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2921: TTxInit for TableIndexKeys, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:29:22.873298Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3422: TTxInit for KesusInfos, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:29:22.873377Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3458: TTxInit for KesusAlters, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:29:22.873558Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3684: TTxInit for TxShards, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:29:22.873660Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3829: TTxInit for ShardToDelete, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:29:22.873770Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3846: TTxInit for BackupSettings, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:29:22.873970Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4006: TTxInit for ShardBackupStatus, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:29:22.874060Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4022: TTxInit for CompletedBackup, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:29:22.874233Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4307: TTxInit for Publications, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:29:22.874463Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4646: IndexBuild , records: 0, at schemeshard: 72057594046678944 2025-06-25T14:29:22.874564Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4702: KMeansTreeSample records: 0, at schemeshard: 72057594046678944 2025-06-25T14:29:22.874695Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4791: SnapshotTables: snapshots: 0 tables: 0, at schemeshard: 72057594046678944 2025-06-25T14:29:22.874762Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4818: SnapshotSteps: snapshots: 0, at schemeshard: 72057594046678944 2025-06-25T14:29:22.874821Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4845: LongLocks: records: 0, at schemeshard: 72057594046678944 2025-06-25T14:29:22.880057Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:29:22.882440Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:29:22.882522Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:29:22.883858Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:29:22.883928Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:29:22.883985Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:29:22.884855Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594046678944 is [1:368:2353] sender: [1:429:2058] recipient: [1:15:2062] 2025-06-25T14:29:22.953422Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/.sys/new_sys_view" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:29:22.953702Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/.sys/new_sys_view" took 305us result status StatusPathDoesNotExist 2025-06-25T14:29:22.953900Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/.sys/new_sys_view\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot/.sys\' (id: [OwnerId: 72057594046678944, LocalPathId: 2])" Path: "/MyRoot/.sys/new_sys_view" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot/.sys" LastExistedPrefixPathId: 2 LastExistedPrefixDescription { Self { Name: ".sys" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 >> TSubscriberTest::SyncWithOutdatedReplica [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_subscriber/unittest >> TSubscriberTest::Sync [GOOD] Test command err: 2025-06-25T14:29:22.848241Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1003: [main][1:37:2067][path] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-25T14:29:22.850415Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:41:2067][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 1, LocalPathId: 1] Version: 1 }: sender# [1:3:2050] 2025-06-25T14:29:22.850523Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:42:2067][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 1, LocalPathId: 1] Version: 1 }: sender# [1:6:2053] 2025-06-25T14:29:22.850597Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:43:2067][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 1, LocalPathId: 1] Version: 1 }: sender# [1:9:2056] 2025-06-25T14:29:22.850673Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:37:2067][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 1, LocalPathId: 1] Version: 1 }: sender# [1:38:2067] 2025-06-25T14:29:22.850727Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:37:2067][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 1, LocalPathId: 1] Version: 1 }: sender# [1:39:2067] 2025-06-25T14:29:22.850779Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:852: [main][1:37:2067][path] Set up state: owner# [1:35:2065], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 1, LocalPathId: 1], Version: 1) DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-25T14:29:22.850866Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:37:2067][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 1, LocalPathId: 1] Version: 1 }: sender# [1:40:2067] 2025-06-25T14:29:22.850917Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:870: [main][1:37:2067][path] Path was already updated: owner# [1:35:2065], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 1, LocalPathId: 1], Version: 1) DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 1, LocalPathId: 1], Version: 1) DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-25T14:29:22.851034Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:887: [main][1:37:2067][path] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvSyncRequest: sender# [1:35:2065], cookie# 1 2025-06-25T14:29:22.851149Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:41:2067][path] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: path }: sender# [1:38:2067], cookie# 1 2025-06-25T14:29:22.851267Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:42:2067][path] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: path }: sender# [1:39:2067], cookie# 1 2025-06-25T14:29:22.851330Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:43:2067][path] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: path }: sender# [1:40:2067], cookie# 1 2025-06-25T14:29:22.851387Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:41:2067][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 1 Partial: 0 }: sender# [1:3:2050], cookie# 1 2025-06-25T14:29:22.851419Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:42:2067][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 1 Partial: 0 }: sender# [1:6:2053], cookie# 1 2025-06-25T14:29:22.851448Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:43:2067][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 1 Partial: 0 }: sender# [1:9:2056], cookie# 1 2025-06-25T14:29:22.851495Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:37:2067][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 1 Partial: 0 }: sender# [1:38:2067], cookie# 1 2025-06-25T14:29:22.851533Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:37:2067][path] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2025-06-25T14:29:22.851569Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:37:2067][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 1 Partial: 0 }: sender# [1:39:2067], cookie# 1 2025-06-25T14:29:22.851603Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:37:2067][path] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 2, failures# 0 2025-06-25T14:29:22.851650Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:37:2067][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 1 Partial: 0 }: sender# [1:40:2067], cookie# 1 2025-06-25T14:29:22.851693Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:984: [main][1:37:2067][path] Sync is done in the ring group: cookie# 1, ring group# 0, size# 3, half# 1, successes# 3, failures# 0, partial# 0 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_subscriber/unittest >> TSubscriberTest::Boot [GOOD] Test command err: 2025-06-25T14:29:22.536501Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [1:3:2050] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 800 Generation: 1 }: sender# [1:36:2066] 2025-06-25T14:29:22.536576Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [1:3:2050] Successful handshake: owner# 800, generation# 1 2025-06-25T14:29:22.536742Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:958: [1:3:2050] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 800 Generation: 1 }: sender# [1:36:2066] 2025-06-25T14:29:22.536773Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:985: [1:3:2050] Commit generation: owner# 800, generation# 1 2025-06-25T14:29:22.536824Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [1:6:2053] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 900 Generation: 1 }: sender# [1:37:2067] 2025-06-25T14:29:22.536854Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [1:6:2053] Successful handshake: owner# 900, generation# 1 2025-06-25T14:29:22.537111Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:958: [1:6:2053] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 900 Generation: 1 }: sender# [1:37:2067] 2025-06-25T14:29:22.537145Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:985: [1:6:2053] Commit generation: owner# 900, generation# 1 2025-06-25T14:29:22.537236Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1003: [main][1:39:2069][/root/db/dir_inside] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-25T14:29:22.537695Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:3:2050] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /root/db/dir_inside DomainOwnerId: 1 }: sender# [1:43:2069] 2025-06-25T14:29:22.537731Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [1:3:2050] Upsert description: path# /root/db/dir_inside 2025-06-25T14:29:22.537835Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:3:2050] Subscribe: subscriber# [1:43:2069], path# /root/db/dir_inside, domainOwnerId# 1, capabilities# AckNotifications: true 2025-06-25T14:29:22.538022Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:6:2053] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /root/db/dir_inside DomainOwnerId: 1 }: sender# [1:44:2069] 2025-06-25T14:29:22.538045Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [1:6:2053] Upsert description: path# /root/db/dir_inside 2025-06-25T14:29:22.538080Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:6:2053] Subscribe: subscriber# [1:44:2069], path# /root/db/dir_inside, domainOwnerId# 1, capabilities# AckNotifications: true 2025-06-25T14:29:22.538249Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:9:2056] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /root/db/dir_inside DomainOwnerId: 1 }: sender# [1:45:2069] 2025-06-25T14:29:22.538270Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [1:9:2056] Upsert description: path# /root/db/dir_inside 2025-06-25T14:29:22.538301Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:9:2056] Subscribe: subscriber# [1:45:2069], path# /root/db/dir_inside, domainOwnerId# 1, capabilities# AckNotifications: true 2025-06-25T14:29:22.538378Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:43:2069][/root/db/dir_inside] Handle NKikimrSchemeBoard.TEvNotify { Path: /root/db/dir_inside Version: 0 }: sender# [1:3:2050] 2025-06-25T14:29:22.538444Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:3:2050] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [1:43:2069] 2025-06-25T14:29:22.538488Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:44:2069][/root/db/dir_inside] Handle NKikimrSchemeBoard.TEvNotify { Path: /root/db/dir_inside Version: 0 }: sender# [1:6:2053] 2025-06-25T14:29:22.538531Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:6:2053] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [1:44:2069] 2025-06-25T14:29:22.538571Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:45:2069][/root/db/dir_inside] Handle NKikimrSchemeBoard.TEvNotify { Path: /root/db/dir_inside Version: 0 }: sender# [1:9:2056] 2025-06-25T14:29:22.538610Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:9:2056] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [1:45:2069] 2025-06-25T14:29:22.538685Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:39:2069][/root/db/dir_inside] Handle NKikimrSchemeBoard.TEvNotify { Path: /root/db/dir_inside Version: 0 }: sender# [1:40:2069] 2025-06-25T14:29:22.538776Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:39:2069][/root/db/dir_inside] Handle NKikimrSchemeBoard.TEvNotify { Path: /root/db/dir_inside Version: 0 }: sender# [1:41:2069] 2025-06-25T14:29:22.538816Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:852: [main][1:39:2069][/root/db/dir_inside] Set up state: owner# [1:38:2068], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-25T14:29:22.538868Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:39:2069][/root/db/dir_inside] Handle NKikimrSchemeBoard.TEvNotify { Path: /root/db/dir_inside Version: 0 }: sender# [1:42:2069] 2025-06-25T14:29:22.538902Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:870: [main][1:39:2069][/root/db/dir_inside] Ignore empty state: owner# [1:38:2068], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } =========== !argsLeft.IsDeletion 2025-06-25T14:29:22.539138Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [1:3:2050] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 800 Generation: 1 }: sender# [1:36:2066], cookie# 0, event size# 118 2025-06-25T14:29:22.539212Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [1:3:2050] Update description: path# /root/db/dir_inside, pathId# [OwnerId: 800, LocalPathId: 1111], deletion# false 2025-06-25T14:29:22.547755Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:550: [1:3:2050] Upsert description: path# /root/db/dir_inside, pathId# [OwnerId: 800, LocalPathId: 1111], pathDescription# {Status StatusSuccess, Path /root/db/dir_inside, PathId [OwnerId: 800, LocalPathId: 1111], PathVersion 1, SubdomainPathId [OwnerId: 800, LocalPathId: 1], PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 67} 2025-06-25T14:29:22.547971Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:43:2069][/root/db/dir_inside] Handle NKikimrSchemeBoard.TEvNotify { Path: /root/db/dir_inside PathId: [OwnerId: 800, LocalPathId: 1111] Version: 1 }: sender# [1:3:2050] 2025-06-25T14:29:22.548033Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:3:2050] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 1 }: sender# [1:43:2069] 2025-06-25T14:29:22.548092Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:39:2069][/root/db/dir_inside] Handle NKikimrSchemeBoard.TEvNotify { Path: /root/db/dir_inside PathId: [OwnerId: 800, LocalPathId: 1111] Version: 1 }: sender# [1:40:2069] 2025-06-25T14:29:22.548150Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:858: [main][1:39:2069][/root/db/dir_inside] Update to strong state: owner# [1:38:2068], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, new state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 800, LocalPathId: 1111], Version: 1) DomainId: [OwnerId: 800, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } =========== argsLeft.GetSuperId() < argsRight.GetSuperId() =========== !argsRight.IsDeletion 2025-06-25T14:29:22.548399Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [1:6:2053] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 900 Generation: 1 }: sender# [1:37:2067], cookie# 0, event size# 117 2025-06-25T14:29:22.548464Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [1:6:2053] Update description: path# /root/db/dir_inside, pathId# [OwnerId: 900, LocalPathId: 11], deletion# false 2025-06-25T14:29:22.548534Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:550: [1:6:2053] Upsert description: path# /root/db/dir_inside, pathId# [OwnerId: 900, LocalPathId: 11], pathDescription# {Status StatusSuccess, Path /root/db/dir_inside, PathId [OwnerId: 900, LocalPathId: 11], PathVersion 1, SubdomainPathId [OwnerId: 800, LocalPathId: 1], PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 67} 2025-06-25T14:29:22.548688Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:44:2069][/root/db/dir_inside] Handle NKikimrSchemeBoard.TEvNotify { Path: /root/db/dir_inside PathId: [OwnerId: 900, LocalPathId: 11] Version: 1 }: sender# [1:6:2053] 2025-06-25T14:29:22.548751Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:6:2053] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 1 }: sender# [1:44:2069] 2025-06-25T14:29:22.548808Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:39:2069][/root/db/dir_inside] Handle NKikimrSchemeBoard.TEvNotify { Path: /root/db/dir_inside PathId: [OwnerId: 900, LocalPathId: 11] Version: 1 }: sender# [1:41:2069] 2025-06-25T14:29:22.548896Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:858: [main][1:39:2069][/root/db/dir_inside] Path was updated to new version: owner# [1:38:2068], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 800, LocalPathId: 1111], Version: 1) DomainId: [OwnerId: 800, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements }, new state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 900, LocalPathId: 11], Version: 1) DomainId: [OwnerId: 800, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-25T14:29:23.049408Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1003: [main][3:36:2066][path] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-25T14:29:23.049972Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][3:40:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [3:3:2050] 2025-06-25T14:29:23.050030Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][3:41:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [3:6:2053] 2025-06-25T14:29:23.050096Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][3:42:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [3:9:2056] 2025-06-25T14:29:23.050164Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][3:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [3:37:2066] 2025-06-25T14:29:23.050239Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][3:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [3:38:2066] 2025-06-25T14:29:23.050291Z node 3 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:852: [main][3:36:2066][path] Set up state: owner# [3:35:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-25T14:29:23.050337Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][3:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [3:39:2066] 2025-06-25T14:29:23.050378Z node 3 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:870: [main][3:36:2066][path] Ignore empty state: owner# [3:35:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_subscriber/unittest >> TSubscriberTest::InvalidNotification [GOOD] Test command err: 2025-06-25T14:29:22.971194Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1003: [main][1:36:2066][path] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-25T14:29:22.973444Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:40:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:3:2050] 2025-06-25T14:29:22.973556Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:41:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:6:2053] 2025-06-25T14:29:22.973601Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:42:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:9:2056] 2025-06-25T14:29:22.973665Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:37:2066] 2025-06-25T14:29:22.973769Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:38:2066] 2025-06-25T14:29:22.973863Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:852: [main][1:36:2066][path] Set up state: owner# [1:35:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-25T14:29:22.973940Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:39:2066] 2025-06-25T14:29:22.974004Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:870: [main][1:36:2066][path] Ignore empty state: owner# [1:35:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-25T14:29:22.974169Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { PathId: [OwnerId: 1, LocalPathId: 1] Version: 0 }: sender# [1:35:2065] 2025-06-25T14:29:22.974220Z node 1 :SCHEME_BOARD_SUBSCRIBER ERROR: subscriber.cpp:821: [main][1:36:2066][path] Suspicious NKikimrSchemeBoard.TEvNotify { PathId: [OwnerId: 1, LocalPathId: 1] Version: 0 }: sender# [1:35:2065] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_subscriber/unittest >> TSubscriberTest::SyncPartial [GOOD] Test command err: 2025-06-25T14:29:23.046129Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1003: [main][1:36:2066][path] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-25T14:29:23.049131Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:40:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:3:2050] 2025-06-25T14:29:23.049241Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:41:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:6:2053] 2025-06-25T14:29:23.049282Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:42:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:9:2056] 2025-06-25T14:29:23.049348Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:37:2066] 2025-06-25T14:29:23.049446Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:38:2066] 2025-06-25T14:29:23.049521Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:852: [main][1:36:2066][path] Set up state: owner# [1:35:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-25T14:29:23.049597Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:39:2066] 2025-06-25T14:29:23.049652Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:870: [main][1:36:2066][path] Ignore empty state: owner# [1:35:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-25T14:29:23.049930Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:887: [main][1:36:2066][path] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvSyncRequest: sender# [1:35:2065], cookie# 1 2025-06-25T14:29:23.050071Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:40:2066][path] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: path }: sender# [1:37:2066], cookie# 1 2025-06-25T14:29:23.050134Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:41:2066][path] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: path }: sender# [1:38:2066], cookie# 1 2025-06-25T14:29:23.050221Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:42:2066][path] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: path }: sender# [1:39:2066], cookie# 1 2025-06-25T14:29:23.050300Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:41:2066][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 0 }: sender# [1:6:2053], cookie# 1 2025-06-25T14:29:23.050351Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:42:2066][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 0 }: sender# [1:9:2056], cookie# 1 2025-06-25T14:29:23.050405Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:36:2066][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 1 }: sender# [1:37:2066], cookie# 1 2025-06-25T14:29:23.050459Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:36:2066][path] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 0, failures# 1 2025-06-25T14:29:23.050513Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:37:2066] 2025-06-25T14:29:23.050565Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:870: [main][1:36:2066][path] Ignore empty state: owner# [1:35:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-25T14:29:23.050627Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:36:2066][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 0 }: sender# [1:38:2066], cookie# 1 2025-06-25T14:29:23.050657Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:36:2066][path] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 1, failures# 1 2025-06-25T14:29:23.050685Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:36:2066][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 0 }: sender# [1:39:2066], cookie# 1 2025-06-25T14:29:23.050729Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:984: [main][1:36:2066][path] Sync is done in the ring group: cookie# 1, ring group# 0, size# 3, half# 1, successes# 2, failures# 1, partial# 0 2025-06-25T14:29:23.050842Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:887: [main][1:36:2066][path] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvSyncRequest: sender# [1:35:2065], cookie# 2 2025-06-25T14:29:23.050958Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:36:2066][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 1 }: sender# [1:37:2066], cookie# 2 2025-06-25T14:29:23.050987Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:36:2066][path] Sync is in progress: cookie# 2, ring group# 0, size# 3, half# 1, successes# 0, failures# 1 2025-06-25T14:29:23.051017Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:41:2066][path] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: path }: sender# [1:38:2066], cookie# 2 2025-06-25T14:29:23.051049Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:42:2066][path] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: path }: sender# [1:39:2066], cookie# 2 2025-06-25T14:29:23.051134Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:42:2066][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 0 }: sender# [1:9:2056], cookie# 2 2025-06-25T14:29:23.051182Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:36:2066][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 1 }: sender# [1:38:2066], cookie# 2 2025-06-25T14:29:23.051218Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:36:2066][path] Sync is in progress: cookie# 2, ring group# 0, size# 3, half# 1, successes# 0, failures# 2 2025-06-25T14:29:23.051255Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:38:2066] 2025-06-25T14:29:23.051297Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:870: [main][1:36:2066][path] Ignore empty state: owner# [1:35:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-25T14:29:23.051332Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:36:2066][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 0 }: sender# [1:39:2066], cookie# 2 2025-06-25T14:29:23.051367Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:986: [main][1:36:2066][path] Sync is done in the ring group: cookie# 2, ring group# 0, size# 3, half# 1, successes# 1, failures# 2, partial# 1 2025-06-25T14:29:23.051402Z node 1 :SCHEME_BOARD_SUBSCRIBER WARN: subscriber.cpp:991: [main][1:36:2066][path] Sync is incomplete in one of the ring groups: cookie# 2 2025-06-25T14:29:23.051493Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:887: [main][1:36:2066][path] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvSyncRequest: sender# [1:35:2065], cookie# 3 2025-06-25T14:29:23.051601Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:36:2066][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 1 }: sender# [1:37:2066], cookie# 3 2025-06-25T14:29:23.051629Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:36:2066][path] Sync is in progress: cookie# 3, ring group# 0, size# 3, half# 1, successes# 0, failures# 1 2025-06-25T14:29:23.051661Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:36:2066][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 1 }: sender# [1:38:2066], cookie# 3 2025-06-25T14:29:23.051685Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:36:2066][path] Sync is in progress: cookie# 3, ring group# 0, size# 3, half# 1, successes# 0, failures# 2 2025-06-25T14:29:23.051730Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:42:2066][path] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: path }: sender# [1:39:2066], cookie# 3 2025-06-25T14:29:23.051802Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:36:2066][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 0 Partial: 1 }: sender# [1:39:2066], cookie# 3 2025-06-25T14:29:23.051844Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:986: [main][1:36:2066][path] Sync is done in the ring group: cookie# 3, ring group# 0, size# 3, half# 1, successes# 0, failures# 3, partial# 1 2025-06-25T14:29:23.051905Z node 1 :SCHEME_BOARD_SUBSCRIBER WARN: subscriber.cpp:991: [main][1:36:2066][path] Sync is incomplete in one of the ring groups: cookie# 3 2025-06-25T14:29:23.051956Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:39:2066] 2025-06-25T14:29:23.051995Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:870: [main][1:36:2066][path] Ignore empty state: owner# [1:35:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } >> TSubscriberTest::StrongNotificationAfterCommit ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_subscriber/unittest >> TSubscriberTest::SyncWithOutdatedReplica [GOOD] Test command err: 2025-06-25T14:29:23.227546Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1003: [main][1:37:2067][path] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-25T14:29:23.229880Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:41:2067][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 1, LocalPathId: 2] Version: 2 }: sender# [1:3:2050] 2025-06-25T14:29:23.229991Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:42:2067][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 2, LocalPathId: 2] Version: 1 }: sender# [1:6:2053] 2025-06-25T14:29:23.230050Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:43:2067][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 2, LocalPathId: 2] Version: 1 }: sender# [1:9:2056] 2025-06-25T14:29:23.230120Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:37:2067][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 1, LocalPathId: 2] Version: 2 }: sender# [1:38:2067] 2025-06-25T14:29:23.230173Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:37:2067][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 2, LocalPathId: 2] Version: 1 }: sender# [1:39:2067] 2025-06-25T14:29:23.230234Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:852: [main][1:37:2067][path] Set up state: owner# [1:35:2065], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 2, LocalPathId: 2], Version: 1) DomainId: [OwnerId: 2, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-25T14:29:23.230360Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:37:2067][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 2, LocalPathId: 2] Version: 1 }: sender# [1:40:2067] 2025-06-25T14:29:23.230419Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:870: [main][1:37:2067][path] Path was already updated: owner# [1:35:2065], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 2, LocalPathId: 2], Version: 1) DomainId: [OwnerId: 2, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 2, LocalPathId: 2], Version: 1) DomainId: [OwnerId: 2, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-25T14:29:23.230547Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:887: [main][1:37:2067][path] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvSyncRequest: sender# [1:35:2065], cookie# 1 2025-06-25T14:29:23.230653Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:41:2067][path] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: path }: sender# [1:38:2067], cookie# 1 2025-06-25T14:29:23.230787Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:42:2067][path] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: path }: sender# [1:39:2067], cookie# 1 2025-06-25T14:29:23.230839Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:43:2067][path] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: path }: sender# [1:40:2067], cookie# 1 2025-06-25T14:29:23.230908Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:41:2067][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:3:2050], cookie# 1 2025-06-25T14:29:23.230956Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:42:2067][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 1 Partial: 0 }: sender# [1:6:2053], cookie# 1 2025-06-25T14:29:23.230985Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:43:2067][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 1 Partial: 0 }: sender# [1:9:2056], cookie# 1 2025-06-25T14:29:23.231035Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:37:2067][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:38:2067], cookie# 1 2025-06-25T14:29:23.231097Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:37:2067][path] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2025-06-25T14:29:23.231154Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:37:2067][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 1 Partial: 0 }: sender# [1:39:2067], cookie# 1 2025-06-25T14:29:23.231191Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:37:2067][path] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 2, failures# 0 2025-06-25T14:29:23.231228Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:37:2067][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 1 Partial: 0 }: sender# [1:40:2067], cookie# 1 2025-06-25T14:29:23.231269Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:984: [main][1:37:2067][path] Sync is done in the ring group: cookie# 1, ring group# 0, size# 3, half# 1, successes# 3, failures# 0, partial# 0 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/security/ut/unittest >> TTicketParserTest::NebiusAccessKeySignatureUnsupported [GOOD] Test command err: 2025-06-25T14:28:47.198260Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519893858896372778:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:28:47.198325Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0018f6/r3tmp/tmpnAfhMP/pdisk_1.dat 2025-06-25T14:28:47.505595Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:28:47.506601Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519893858896372759:2080] 1750861727197593 != 1750861727197596 TServer::EnableGrpc on GrpcPort 16871, node 1 2025-06-25T14:28:47.584589Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:28:47.584714Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:28:47.586399Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:28:47.596688Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:28:47.596714Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:28:47.596724Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:28:47.596861Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26328 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:28:47.887775Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:28:47.958677Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 2025-06-25T14:28:47.967252Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-25T14:28:47.967287Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-25T14:28:47.967996Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket eyJh****WtSw (B7788DBC) () has now retryable error message 'Security state is empty' 2025-06-25T14:28:47.968216Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-25T14:28:47.968232Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-25T14:28:47.968467Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket eyJh****WtSw (B7788DBC) () has now retryable error message 'Security state is empty' 2025-06-25T14:28:47.968481Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:773: CanInitLoginToken, database /Root, A2 error Security state is empty 2025-06-25T14:28:47.968495Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:773: CanInitLoginToken, database /Root, A2 error Security state is empty 2025-06-25T14:28:47.968519Z node 1 :TICKET_PARSER ERROR: ticket_parser_impl.h:963: Ticket eyJh****WtSw (B7788DBC): Security state is empty 2025-06-25T14:28:48.206916Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:28:49.212491Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****WtSw (B7788DBC) 2025-06-25T14:28:49.212796Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-25T14:28:49.212820Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-25T14:28:49.213003Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket eyJh****WtSw (B7788DBC) () has now retryable error message 'Security state is empty' 2025-06-25T14:28:49.213019Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:773: CanInitLoginToken, database /Root, A2 error Security state is empty 2025-06-25T14:28:50.969032Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-25T14:28:52.198723Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519893858896372778:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:28:52.198848Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:28:53.214521Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****WtSw (B7788DBC) 2025-06-25T14:28:53.214755Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-25T14:28:53.214770Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-25T14:28:53.215767Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****WtSw (B7788DBC) () has now valid token of user1 2025-06-25T14:28:53.215783Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:800: CanInitLoginToken, database /Root, A4 success 2025-06-25T14:28:58.222332Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****WtSw (B7788DBC) 2025-06-25T14:28:58.222708Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****WtSw (B7788DBC) () has now valid token of user1 2025-06-25T14:28:58.628524Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519893907912394174:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:28:58.628573Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0018f6/r3tmp/tmpKJODBt/pdisk_1.dat 2025-06-25T14:28:58.757996Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 11954, node 2 2025-06-25T14:28:58.783565Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:28:58.783940Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:28:58.792521Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:28:58.852811Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:28:58.852836Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:28:58.852851Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:28:58.853009Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19964 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:28:59.129862Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:28:59.143876Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:557: Ticket **** (8E120919) asking for AccessServiceAuthentication 2025-06-25T14:28:59.143964Z node 2 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [517000048b08] Connect to grpc://localhost:21669 2025-06-25T14:28:59.146978Z node 2 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000048b08] Request AuthenticateRequest { iam_token: "**** (8E120919)" } NebiusAccessService::Authenticate request iam_token: "user1" NebiusAccessService::Authenticate response account { user_account { id: "user1" } } 0: "" 2025-06-25T14:28:59.159232Z node 2 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [517000048b08] Response AuthenticateResponse { account { user_account { id: "user1" } } } 2025-06-25T14:28:59.159465Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (8E120919) () has now valid token of user1@as 2025-06-25T14:29:01.806473Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519893918924635486:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:29:01.806538Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;pa ... line=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:29:02.816863Z node 3 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [5170000f0408] Status 14 Service Unavailable 2025-06-25T14:29:02.817304Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket **** (8E120919) () has now retryable error message 'Service Unavailable' NebiusAccessService::Authenticate request iam_token: "user1" 2025-06-25T14:29:03.812462Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket **** (8E120919) 2025-06-25T14:29:03.812510Z node 3 :TICKET_PARSER TRACE: ticket_parser_impl.h:557: Ticket **** (8E120919) asking for AccessServiceAuthentication 2025-06-25T14:29:03.812866Z node 3 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [5170000f0408] Request AuthenticateRequest { iam_token: "**** (8E120919)" } 2025-06-25T14:29:03.817070Z node 3 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [5170000f0408] Status 14 Service Unavailable 2025-06-25T14:29:03.817353Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket **** (8E120919) () has now retryable error message 'Service Unavailable' 2025-06-25T14:29:06.807154Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519893918924635486:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:29:06.807230Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:29:06.817620Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket **** (8E120919) 2025-06-25T14:29:06.817667Z node 3 :TICKET_PARSER TRACE: ticket_parser_impl.h:557: Ticket **** (8E120919) asking for AccessServiceAuthentication 2025-06-25T14:29:06.817849Z node 3 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [5170000f0408] Request AuthenticateRequest { iam_token: "**** (8E120919)" } NebiusAccessService::Authenticate request iam_token: "user1" NebiusAccessService::Authenticate response account { user_account { id: "user1" } } 0: "" 2025-06-25T14:29:06.821571Z node 3 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [5170000f0408] Response AuthenticateResponse { account { user_account { id: "user1" } } } 2025-06-25T14:29:06.821823Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (8E120919) () has now valid token of user1@as 2025-06-25T14:29:14.958720Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519893975768897846:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:29:14.958949Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0018f6/r3tmp/tmpOOVMsH/pdisk_1.dat 2025-06-25T14:29:15.159815Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:29:15.159912Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:29:15.165514Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:29:15.195030Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 24340, node 4 2025-06-25T14:29:15.347010Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:29:15.347036Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:29:15.347047Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:29:15.347228Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:61553 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:29:15.687213Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:29:15.696300Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:29:15.705482Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-06-25T14:29:15.705510Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-25T14:29:15.705518Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-25T14:29:15.705547Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:557: Ticket **** (8E120919) asking for AccessServiceAuthentication 2025-06-25T14:29:15.705591Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [5170000f4688] Connect to grpc://localhost:62909 2025-06-25T14:29:15.706363Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [5170000f4688] Request AuthenticateRequest { iam_token: "**** (8E120919)" } NebiusAccessService::Authenticate request iam_token: "user1" NebiusAccessService::Authenticate response 14: "Service Unavailable" 2025-06-25T14:29:15.729570Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [5170000f4688] Status 14 Service Unavailable NebiusAccessService::Authenticate request iam_token: "user1" NebiusAccessService::Authenticate response account { user_account { id: "user1" } } 0: "" 2025-06-25T14:29:15.731157Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket **** (8E120919) () has now retryable error message 'Service Unavailable' 2025-06-25T14:29:15.731200Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:557: Ticket **** (8E120919) asking for AccessServiceAuthentication 2025-06-25T14:29:15.731344Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [5170000f4688] Request AuthenticateRequest { iam_token: "**** (8E120919)" } 2025-06-25T14:29:15.733813Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [5170000f4688] Response AuthenticateResponse { account { user_account { id: "user1" } } } 2025-06-25T14:29:15.734195Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (8E120919) () has now valid token of user1@as 2025-06-25T14:29:15.966275Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:29:19.112767Z node 5 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[5:7519893998982202768:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:29:19.112833Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0018f6/r3tmp/tmpvhFb4C/pdisk_1.dat 2025-06-25T14:29:19.265420Z node 5 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:29:19.267853Z node 5 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [5:7519893998982202747:2080] 1750861759111969 != 1750861759111972 2025-06-25T14:29:19.277841Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:29:19.277928Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:29:19.279598Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 15607, node 5 2025-06-25T14:29:19.324936Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:29:19.324961Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:29:19.324970Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:29:19.325091Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5188 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:29:19.655724Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:29:19.666671Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:29:19.670388Z node 5 :TICKET_PARSER ERROR: ticket_parser_impl.h:908: Ticket AKIA****MPLE (B3EDC139): Access key signature is not supported >> TSubscriberTest::StrongNotificationAfterCommit [GOOD] ------- [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data_integrity/unittest >> KqpDataIntegrityTrails::Upsert-LogEnabled-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 10113, MsgBus: 7664 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00177e/r3tmp/tmp4X9skp/pdisk_1.dat TServer::EnableGrpc on GrpcPort 10113, node 1 TClient is connected to server localhost:7664 TClient is connected to server localhost:7664 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... waiting... waiting... waiting... waiting... >> TSubscriberTest::NotifyDelete >> TSchemeShardSysViewTest::AsyncDropSameSysView |76.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_base/ydb-core-tx-schemeshard-ut_base |76.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_base/ydb-core-tx-schemeshard-ut_base |76.4%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_transfer/test-results/unittest/{meta.json ... results_accumulator.log} |76.4%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_base/ydb-core-tx-schemeshard-ut_base >> Cdc::UpdateStream [GOOD] >> Cdc::UpdateShardCount >> TSubscriberTest::NotifyDelete [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_subscriber/unittest >> TSubscriberTest::StrongNotificationAfterCommit [GOOD] Test command err: 2025-06-25T14:29:24.160706Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1003: [main][1:36:2066][path] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-25T14:29:24.162617Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:40:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:3:2050] 2025-06-25T14:29:24.162709Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:41:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:6:2053] 2025-06-25T14:29:24.162746Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:42:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:9:2056] 2025-06-25T14:29:24.162804Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:37:2066] 2025-06-25T14:29:24.162889Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:38:2066] 2025-06-25T14:29:24.162966Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:852: [main][1:36:2066][path] Set up state: owner# [1:35:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-25T14:29:24.163022Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:39:2066] 2025-06-25T14:29:24.163090Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:870: [main][1:36:2066][path] Ignore empty state: owner# [1:35:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-25T14:29:24.163460Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:40:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:3:2050] 2025-06-25T14:29:24.163530Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:37:2066] 2025-06-25T14:29:24.163626Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:858: [main][1:36:2066][path] Update to strong state: owner# [1:35:2065], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, new state# { Deleted: 1 Strong: 1 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-25T14:29:24.163782Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:41:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:6:2053] 2025-06-25T14:29:24.163845Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:36:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:38:2066] 2025-06-25T14:29:24.163886Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:870: [main][1:36:2066][path] Ignore empty state: owner# [1:35:2065], state# { Deleted: 1 Strong: 1 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 1 Version: DomainId: AbandonedSchemeShards: there are 0 elements } >> TTicketParserTest::LoginCheckRemovedUser [GOOD] >> TTicketParserTest::LoginEmptyTicketBad |76.4%| [TM] {asan, default-linux-x86_64, release} ydb/library/ncloud/impl/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_subscriber/unittest >> TSubscriberTest::NotifyDelete [GOOD] Test command err: 2025-06-25T14:29:24.701376Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1003: [main][1:37:2067][path] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-25T14:29:24.704175Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:41:2067][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 1, LocalPathId: 1] Version: 1 }: sender# [1:3:2050] 2025-06-25T14:29:24.704341Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:42:2067][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 1, LocalPathId: 1] Version: 1 }: sender# [1:6:2053] 2025-06-25T14:29:24.704413Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:43:2067][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 1, LocalPathId: 1] Version: 1 }: sender# [1:9:2056] 2025-06-25T14:29:24.704478Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:37:2067][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 1, LocalPathId: 1] Version: 1 }: sender# [1:38:2067] 2025-06-25T14:29:24.704540Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:37:2067][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 1, LocalPathId: 1] Version: 1 }: sender# [1:39:2067] 2025-06-25T14:29:24.704599Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:852: [main][1:37:2067][path] Set up state: owner# [1:35:2065], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 1, LocalPathId: 1], Version: 1) DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-25T14:29:24.704707Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:37:2067][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 1, LocalPathId: 1] Version: 1 }: sender# [1:40:2067] 2025-06-25T14:29:24.704769Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:870: [main][1:37:2067][path] Path was already updated: owner# [1:35:2065], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 1, LocalPathId: 1], Version: 1) DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 1, LocalPathId: 1], Version: 1) DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-25T14:29:24.705101Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:43:2067][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 1, LocalPathId: 1] Version: 18446744073709551615 }: sender# [1:9:2056] 2025-06-25T14:29:24.705226Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:41:2067][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 1, LocalPathId: 1] Version: 18446744073709551615 }: sender# [1:3:2050] 2025-06-25T14:29:24.705353Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:42:2067][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 1, LocalPathId: 1] Version: 18446744073709551615 }: sender# [1:6:2053] 2025-06-25T14:29:24.705426Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:37:2067][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 1, LocalPathId: 1] Version: 18446744073709551615 }: sender# [1:40:2067] 2025-06-25T14:29:24.705532Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:858: [main][1:37:2067][path] Path was updated to new version: owner# [1:35:2065], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 1, LocalPathId: 1], Version: 1) DomainId: AbandonedSchemeShards: there are 0 elements }, new state# { Deleted: 1 Strong: 1 Version: (PathId: [OwnerId: 1, LocalPathId: 1], Version: 18446744073709551615) DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-25T14:29:24.705616Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:37:2067][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 1, LocalPathId: 1] Version: 18446744073709551615 }: sender# [1:38:2067] 2025-06-25T14:29:24.705673Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:870: [main][1:37:2067][path] Path was already updated: owner# [1:35:2065], state# { Deleted: 1 Strong: 1 Version: (PathId: [OwnerId: 1, LocalPathId: 1], Version: 18446744073709551615) DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 1 Version: (PathId: [OwnerId: 1, LocalPathId: 1], Version: 18446744073709551615) DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-25T14:29:24.705723Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:37:2067][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 1, LocalPathId: 1] Version: 18446744073709551615 }: sender# [1:39:2067] 2025-06-25T14:29:24.705770Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:870: [main][1:37:2067][path] Path was already updated: owner# [1:35:2065], state# { Deleted: 1 Strong: 1 Version: (PathId: [OwnerId: 1, LocalPathId: 1], Version: 18446744073709551615) DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 1 Version: (PathId: [OwnerId: 1, LocalPathId: 1], Version: 18446744073709551615) DomainId: AbandonedSchemeShards: there are 0 elements } |76.4%| [TM] {asan, default-linux-x86_64, release} ydb/library/ncloud/impl/ut/unittest |76.4%| [TM] {asan, default-linux-x86_64, release} ydb/library/ncloud/impl/ut/unittest >> DataShardSnapshots::MvccSnapshotTailCleanup [GOOD] >> DataShardSnapshots::MvccSnapshotReadWithLongPlanQueue >> TSchemeShardSysViewTest::AsyncDropSameSysView [GOOD] >> TTopicWriterTests::TestEnterMessage_EmptyInput [GOOD] >> TTopicWriterTests::TestEnterMessage_No_Base64_Transform [GOOD] >> TTopicWriterTests::TestEnterMessage_OnlyDelimiters [GOOD] >> TTopicWriterTests::TestEnterMessage_SomeBinaryData [GOOD] >> TTopicWriterTests::TestTopicWriterParams_No_Delimiter [GOOD] >> TTopicWriterTests::TestTopicWriterParams_InvalidDelimiter [GOOD] >> TTopicWriterTests::TestEnterMessage_With_Base64_Transform_Invalid_Encode [GOOD] >> TTopicWriterTests::TestEnterMessage_With_Base64_Transform [GOOD] >> TTopicReaderTests::TestRun_ReadOneMessage ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_sysview/unittest >> TSchemeShardSysViewTest::AsyncDropSameSysView [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:29:25.299831Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:29:25.299928Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:29:25.299964Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:29:25.299999Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:29:25.300040Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:29:25.300067Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:29:25.300113Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:29:25.300186Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:29:25.300922Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:29:25.301253Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:29:25.369218Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:29:25.369313Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:29:25.390406Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:29:25.390941Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:29:25.391140Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:29:25.398884Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:29:25.399316Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:29:25.399971Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:29:25.400285Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:29:25.404374Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:29:25.404586Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:29:25.405791Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:29:25.405854Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:29:25.406006Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:29:25.406052Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:29:25.406093Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:29:25.406184Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:29:25.413831Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:29:25.541223Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:29:25.541465Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:25.541675Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:29:25.541725Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:29:25.542065Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:29:25.542157Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:29:25.545241Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:29:25.545426Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:29:25.545618Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:25.545672Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:29:25.545720Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:29:25.545775Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:29:25.548220Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:25.548289Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:29:25.548344Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:29:25.550090Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:25.550129Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:25.550190Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:29:25.550245Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:29:25.559319Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:29:25.561943Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:29:25.562161Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:29:25.563221Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:29:25.563367Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:29:25.563424Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:29:25.563769Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:29:25.563824Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:29:25.564017Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:29:25.564097Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:29:25.566548Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:29:25.566597Z node 1 :FLAT_TX_SCHEMESHARD ... 78944, txId: 103, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-25T14:29:25.664539Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:29:25.664583Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 103, path id: 1 2025-06-25T14:29:25.664628Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 103, path id: 2 2025-06-25T14:29:25.664653Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 103, path id: 3 FAKE_COORDINATOR: Erasing txId 103 2025-06-25T14:29:25.665033Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-25T14:29:25.665094Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 103:0 ProgressState 2025-06-25T14:29:25.665186Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#103:0 progress is 1/1 2025-06-25T14:29:25.665217Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-25T14:29:25.665251Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#103:0 progress is 1/1 2025-06-25T14:29:25.665281Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-25T14:29:25.665315Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 103, ready parts: 1/1, is published: false 2025-06-25T14:29:25.665348Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-25T14:29:25.665380Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 103:0 2025-06-25T14:29:25.665426Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 103:0 2025-06-25T14:29:25.665515Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-25T14:29:25.665555Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 103, publications: 3, subscribers: 0 2025-06-25T14:29:25.665587Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 103, [OwnerId: 72057594046678944, LocalPathId: 1], 7 2025-06-25T14:29:25.665613Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 103, [OwnerId: 72057594046678944, LocalPathId: 2], 5 2025-06-25T14:29:25.665635Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 103, [OwnerId: 72057594046678944, LocalPathId: 3], 18446744073709551615 2025-06-25T14:29:25.666922Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 103 2025-06-25T14:29:25.667046Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 103 2025-06-25T14:29:25.667097Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 3, at schemeshard: 72057594046678944, txId: 103 2025-06-25T14:29:25.667144Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 7 2025-06-25T14:29:25.667187Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-25T14:29:25.667968Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 5 PathOwnerId: 72057594046678944, cookie: 103 2025-06-25T14:29:25.668058Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 5 PathOwnerId: 72057594046678944, cookie: 103 2025-06-25T14:29:25.668089Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 103 2025-06-25T14:29:25.668124Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 5 2025-06-25T14:29:25.668156Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-25T14:29:25.669596Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 103 2025-06-25T14:29:25.669707Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 103 2025-06-25T14:29:25.669735Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 103 2025-06-25T14:29:25.669763Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 18446744073709551615 2025-06-25T14:29:25.669800Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-25T14:29:25.669882Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 103, subscribers: 0 2025-06-25T14:29:25.670366Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-25T14:29:25.670412Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-06-25T14:29:25.670483Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-25T14:29:25.674636Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-25T14:29:25.674842Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-25T14:29:25.676621Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-25T14:29:25.676752Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 103, wait until txId: 103 TestModificationResults wait txId: 104 TestModificationResult got TxId: 104, wait until txId: 104 TestWaitNotification wait txId: 103 2025-06-25T14:29:25.677083Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 103: send EvNotifyTxCompletion 2025-06-25T14:29:25.677129Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 103 TestWaitNotification wait txId: 104 2025-06-25T14:29:25.677246Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 104: send EvNotifyTxCompletion 2025-06-25T14:29:25.677274Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 104 2025-06-25T14:29:25.677759Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 103, at schemeshard: 72057594046678944 2025-06-25T14:29:25.677859Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 104, at schemeshard: 72057594046678944 2025-06-25T14:29:25.677900Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-06-25T14:29:25.677933Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [1:362:2351] 2025-06-25T14:29:25.678045Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 104: got EvNotifyTxCompletionResult 2025-06-25T14:29:25.678065Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 104: satisfy waiter [1:362:2351] TestWaitNotification: OK eventTxId 103 TestWaitNotification: OK eventTxId 104 2025-06-25T14:29:25.678568Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/.sys/new_sys_view" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:29:25.678812Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/.sys/new_sys_view" took 247us result status StatusPathDoesNotExist 2025-06-25T14:29:25.678991Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/.sys/new_sys_view\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot/.sys\' (id: [OwnerId: 72057594046678944, LocalPathId: 2])" Path: "/MyRoot/.sys/new_sys_view" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot/.sys" LastExistedPrefixPathId: 2 LastExistedPrefixDescription { Self { Name: ".sys" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 >> TTopicWriterTests::TestEnterMessage_ZeroSymbol_Delimited [GOOD] >> TTopicWriterTests::TestEnterMessage_With_Base64_Transform_NewlineDelimited [GOOD] >> TTopicReaderTests::TestRun_ReadMoreMessagesThanLimit_Without_Wait_NewlineDelimited |76.4%| [TM] {asan, default-linux-x86_64, release} ydb/public/lib/ydb_cli/topic/ut/unittest >> TTopicWriterTests::TestTopicWriterParams_InvalidDelimiter [GOOD] |76.4%| [TM] {asan, default-linux-x86_64, release} ydb/public/lib/ydb_cli/topic/ut/unittest >> TTopicWriterTests::TestEnterMessage_No_Base64_Transform [GOOD] |76.4%| [TM] {asan, default-linux-x86_64, release} ydb/public/lib/ydb_cli/topic/ut/unittest >> TTopicWriterTests::TestEnterMessage_SomeBinaryData [GOOD] >> KqpCost::OltpWriteRow+isSink [GOOD] |76.4%| [TM] {asan, default-linux-x86_64, release} ydb/public/lib/ydb_cli/topic/ut/unittest >> TTopicWriterTests::TestEnterMessage_With_Base64_Transform [GOOD] |76.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/ut/query/ydb-core-kqp-ut-query |76.4%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/query/ydb-core-kqp-ut-query |76.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/query/ydb-core-kqp-ut-query >> TTopicWriterTests::TestTopicWriterParams_Format_NewlineDelimited [GOOD] >> TTopicWriterTests::TestTopicWriterParams_Format_Concatenated [GOOD] >> Cdc::Drop[PqRunner] [GOOD] >> Cdc::Drop[YdsRunner] >> AsyncIndexChangeExchange::ShouldDeliverChangesOnSplitMerge [GOOD] >> AsyncIndexChangeExchange::ShouldRejectChangesOnQueueOverflowByCount |76.4%| [TM] {asan, default-linux-x86_64, release} ydb/public/lib/ydb_cli/topic/ut/unittest >> TTopicWriterTests::TestEnterMessage_With_Base64_Transform_NewlineDelimited [GOOD] >> TSchemeshardBorrowedCompactionTest::SchemeshardShouldHandleDataShardReboot [GOOD] >> TSchemeshardBorrowedCompactionTest::SchemeshardShouldNotCompactAfterDrop |76.4%| [TM] {asan, default-linux-x86_64, release} ydb/library/ncloud/impl/ut/unittest >> TTopicWriterTests::TestEnterMessage_1KiB_Newline_Delimiter [GOOD] >> TTopicWriterTests::TestEnterMessage_1KiB_Newline_Delimited_With_Two_Delimiters_In_A_Row [GOOD] |76.4%| [TM] {asan, default-linux-x86_64, release} ydb/public/lib/ydb_cli/topic/ut/unittest >> TTopicWriterTests::TestTopicWriterParams_Format_Concatenated [GOOD] >> TTopicWriterTests::TestEnterMessage_1KiB_No_Delimiter [GOOD] >> TTopicWriterTests::TestEnterMessage_Custom_Delimiter_Delimited [GOOD] >> TNebiusAccessServiceTest::Authorize >> DataShardSnapshots::LockedWriteBulkUpsertConflict-UseSink [GOOD] >> DataShardSnapshots::LockedWriteDistributedCommitAborted+UseSink >> Cdc::NaN[YdsRunner] [GOOD] >> Cdc::NaN[TopicRunner] >> TNebiusAccessServiceTest::Authorize [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/cost/unittest >> KqpCost::OltpWriteRow+isSink [GOOD] Test command err: Trying to start YDB, gRPC: 11328, MsgBus: 12070 2025-06-25T14:29:19.869457Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519893997253422576:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:29:19.869521Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001692/r3tmp/tmpSGaBE7/pdisk_1.dat 2025-06-25T14:29:20.271380Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519893997253422556:2080] 1750861759868534 != 1750861759868537 2025-06-25T14:29:20.285409Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 11328, node 1 2025-06-25T14:29:20.359000Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:29:20.359131Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:29:20.382644Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:29:20.440935Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:29:20.440968Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:29:20.440979Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:29:20.441092Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:12070 2025-06-25T14:29:20.925565Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:12070 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:29:21.269882Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:29:21.288052Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:29:21.306691Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:21.487022Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:21.679588Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:21.757843Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:23.546613Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894014433293371:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:23.546729Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:23.878481Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:23.959002Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:24.029262Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:24.115540Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:24.190774Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:24.235016Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:24.282141Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:24.356682Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894018728261335:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:24.356753Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:24.357174Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894018728261340:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:24.361727Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:29:24.377086Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519894018728261342:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:29:24.459933Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519894018728261393:3424] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:29:24.869601Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519893997253422576:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:29:24.869662Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:29:25.388715Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) query_phases { duration_us: 4048 table_access { name: "/Root/TestTable" updates { rows: 1 bytes: 20 } partitions_count: 1 } cpu_time_us: 1467 affected_shards: 1 } compilation { duration_us: 54656 cpu_time_us: 52194 } process_cpu_time_us: 424 total_duration_us: 62247 total_cpu_time_us: 54085 query_phases { duration_us: 5763 table_access { name: "/Root/TestTable" updates { rows: 1 bytes: 20 } partitions_count: 1 } cpu_time_us: 1339 affected_shards: 1 } compilation { duration_us: 75571 cpu_time_us: 72906 } process_cpu_time_us: 508 total_duration_us: 87714 total_cpu_time_us: 74753 2025-06-25T14:29:25.839938Z node 1 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_CONSTRAINT_VIOLATION;details=Conflict with existing key.;tx_id=5; 2025-06-25T14:29:25.846674Z node 1 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:226: Prepare transaction failed. txid 5 at tablet 72075186224037922 errors: Status: STATUS_CONSTRAINT_VIOLATION Issues: { message: "Conflict with existing key." issue_code: 2012 severity: 1 } 2025-06-25T14:29:25.846845Z node 1 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:168: Errors while proposing transaction txid 5 at tablet 72075186224037922 Status: STATUS_CONSTRAINT_VIOLATION Issues: { message: "Conflict with existing key." issue_code: 2012 severity: 1 } 2025-06-25T14:29:25.847006Z node 1 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:819: SelfId: [1:7519894023023229119:2474], Table: `/Root/TestTable` ([72057594046644480:17:1]), SessionActorId: [1:7519894023023228958:2474]Got CONSTRAINT VIOLATION for table `/Root/TestTable`. ShardID=72075186224037922, Sink=[1:7519894023023229119:2474].{
: Error: Conflict with existing key., code: 2012 } 2025-06-25T14:29:25.847610Z node 1 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:3004: SelfId: [1:7519894023023229112:2474], SessionActorId: [1:7519894023023228958:2474], statusCode=PRECONDITION_FAILED. Issue=
: Error: Constraint violated. Table: `/Root/TestTable`., code: 2012
: Error: Conflict with existing key., code: 2012 . sessionActorId=[1:7519894023023228958:2474]. isRollback=0 2025-06-25T14:29:25.847822Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:1895: SessionId: ydb://session/3?node_id=1&id=ZjAzMWQ3OWYtMTg3MmU2ZjItZDgxMzM0NTQtY2I2MDIwOGE=, ActorId: [1:7519894023023228958:2474], ActorState: ExecuteState, TraceId: 01jykqwq4gcyah5bw1wm2j399q, got TEvKqpBuffer::TEvError in ExecuteState, status: PRECONDITION_FAILED send to: [1:7519894023023229113:2474] from: [1:7519894023023229112:2474] 2025-06-25T14:29:25.847897Z node 1 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [1:7519894023023229113:2474] TxId: 281474976710676. Ctx: { TraceId: 01jykqwq4gcyah5bw1wm2j399q, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZjAzMWQ3OWYtMTg3MmU2ZjItZDgxMzM0NTQtY2I2MDIwOGE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. PRECONDITION_FAILED: {
: Error: Constraint violated. Table: `/Root/TestTable`., code: 2012 subissue: {
: Error: Conflict with existing key., code: 2012 } } 2025-06-25T14:29:25.848085Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=1&id=ZjAzMWQ3OWYtMTg3MmU2ZjItZDgxMzM0NTQtY2I2MDIwOGE=, ActorId: [1:7519894023023228958:2474], ActorState: ExecuteState, TraceId: 01jykqwq4gcyah5bw1wm2j399q, Create QueryResponse for error on request, msg: query_phases { duration_us: 9781 cpu_time_us: 870 } compilation { duration_us: 58706 cpu_time_us: 56488 } process_cpu_time_us: 389 total_duration_us: 71056 total_cpu_time_us: 57747 query_phases { duration_us: 3293 table_access { name: "/Root/TestTable" updates { rows: 1 bytes: 20 } partitions_count: 1 } cpu_time_us: 1105 affected_shards: 1 } compilation { duration_us: 58620 cpu_time_us: 55138 } process_cpu_time_us: 447 total_duration_us: 65914 total_cpu_time_us: 56690 query_phases { duration_us: 11742 cpu_time_us: 1401 affected_shards: 1 } compilation { duration_us: 83293 cpu_time_us: 80904 } process_cpu_time_us: 482 total_duration_us: 97306 total_cpu_time_us: 82787 query_phases { duration_us: 4548 table_access { name: "/Root/TestTable" updates { rows: 1 bytes: 20 } partitions_count: 1 } cpu_time_us: 1737 affected_shards: 1 } compilation { duration_us: 68593 cpu_time_us: 66306 } process_cpu_time_us: 400 total_duration_us: 83061 total_cpu_time_us: 68443 query_phases { duration_us: 3798 table_access { name: "/Root/TestTable" deletes { rows: 1 } partitions_count: 1 } cpu_time_us: 1260 affected_shards: 1 } compilation { duration_us: 48396 cpu_time_us: 45977 } process_cpu_time_us: 433 total_duration_us: 54831 total_cpu_time_us: 47670 query_phases { duration_us: 6391 table_access { name: "/Root/TestTable" deletes { rows: 1 } partitions_count: 1 } cpu_time_us: 1572 affected_shards: 1 } compilation { duration_us: 78051 cpu_time_us: 75521 } process_cpu_time_us: 539 total_duration_us: 88066 total_cpu_time_us: 77632 |76.5%| [TM] {asan, default-linux-x86_64, release} ydb/public/lib/ydb_cli/topic/ut/unittest >> TTopicWriterTests::TestEnterMessage_1KiB_Newline_Delimited_With_Two_Delimiters_In_A_Row [GOOD] |76.5%| [TM] {asan, default-linux-x86_64, release} ydb/library/ncloud/impl/ut/unittest |76.5%| [TM] {asan, default-linux-x86_64, release} ydb/public/lib/ydb_cli/topic/ut/unittest >> TTopicWriterTests::TestEnterMessage_Custom_Delimiter_Delimited [GOOD] |76.5%| [TM] {asan, default-linux-x86_64, release} ydb/library/ncloud/impl/ut/unittest |76.5%| [TM] {asan, default-linux-x86_64, release} ydb/library/ncloud/impl/ut/unittest >> TSchemeShardTest::CacheEffectiveACL [GOOD] >> TSchemeShardTest::ConsistentCopyTable ------- [TM] {asan, default-linux-x86_64, release} ydb/library/ncloud/impl/ut/unittest >> TNebiusAccessServiceTest::Authorize [GOOD] Test command err: 2025-06-25T14:29:28.573528Z node 3 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [517000004388] Connect to grpc://localhost:12933 2025-06-25T14:29:28.657052Z node 3 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000004388] Request AuthorizeRequest { checks { key: 0 value { permission { name: "perm" } resource_path { path { id: "path_id" } } iam_token: "**** (717F937C)" } } } 2025-06-25T14:29:28.691873Z node 3 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [517000004388] Response AuthorizeResponse { results { key: 0 value { account { user_account { id: "user_id" } } } } } 2025-06-25T14:29:28.692866Z node 3 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000004388] Request AuthorizeRequest { checks { key: 0 value { permission { name: "perm" } resource_path { path { id: "path_id" } } iam_token: "**** (79225CA9)" } } } 2025-06-25T14:29:28.696945Z node 3 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [517000004388] Status 7 Permission Denied 2025-06-25T14:29:28.697957Z node 3 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000004388] Request AuthorizeRequest { checks { key: 0 value { permission { name: "denied" } resource_path { path { id: "path_id" } } iam_token: "**** (717F937C)" } } } 2025-06-25T14:29:28.700796Z node 3 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [517000004388] Status 7 Permission Denied 2025-06-25T14:29:28.701350Z node 3 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000004388] Request AuthorizeRequest { checks { key: 0 value { permission { name: "perm" } resource_path { path { id: "p" } } iam_token: "**** (717F937C)" } } } 2025-06-25T14:29:28.702801Z node 3 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [517000004388] Status 7 Permission Denied >> TTxDataShardMiniKQL::CrossShard_4_OneToAll [GOOD] >> TNebiusAccessServiceTest::Authenticate [GOOD] |76.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_can_read_new_written_data_on_visibility_timeout[tables_format_v0] [GOOD] >> TSchemeShardTest::PathName >> TSchemeShardTest::Boot >> TTicketParserTest::LoginEmptyTicketBad [GOOD] >> TSchemeShardTest::CreateTable ------- [TM] {asan, default-linux-x86_64, release} ydb/library/ncloud/impl/ut/unittest >> TNebiusAccessServiceTest::Authenticate [GOOD] Test command err: 2025-06-25T14:29:29.547711Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [517000002b08] Connect to grpc://localhost:17503 2025-06-25T14:29:29.562869Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000002b08] Request AuthenticateRequest { iam_token: "**** (3C4833B6)" } 2025-06-25T14:29:29.571889Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [517000002b08] Status 7 Permission Denied 2025-06-25T14:29:29.576246Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000002b08] Request AuthenticateRequest { iam_token: "**** (86DDB286)" } 2025-06-25T14:29:29.579348Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [517000002b08] Response AuthenticateResponse { account { user_account { id: "1234" } } } ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_minikql/unittest >> TTxDataShardMiniKQL::CrossShard_4_OneToAll [GOOD] Test command err: Leader for TabletID 9437184 is [0:0:0] sender: [1:118:2057] recipient: [1:112:2142] IGNORE Leader for TabletID 9437184 is [0:0:0] sender: [1:118:2057] recipient: [1:112:2142] Leader for TabletID 9437184 is [1:135:2156] sender: [1:138:2057] recipient: [1:112:2142] 2025-06-25T14:28:23.779821Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:28:23.779899Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:28:23.796676Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:112:2142], Recipient [1:135:2156]: NKikimr::TEvTablet::TEvBoot 2025-06-25T14:28:23.984035Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:112:2142], Recipient [1:135:2156]: NKikimr::TEvTablet::TEvRestored 2025-06-25T14:28:23.984594Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 9437184 actor [1:135:2156] 2025-06-25T14:28:23.984871Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T14:28:24.042347Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:112:2142], Recipient [1:135:2156]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-25T14:28:24.278611Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T14:28:24.279262Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T14:28:24.280936Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 9437184 2025-06-25T14:28:24.281000Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 9437184 2025-06-25T14:28:24.281070Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 9437184 2025-06-25T14:28:24.292919Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T14:28:24.293180Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T14:28:24.293253Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 9437184 persisting started state actor id [1:208:2156] in generation 2 Leader for TabletID 9437184 is [1:135:2156] sender: [1:216:2057] recipient: [1:14:2061] 2025-06-25T14:28:24.508997Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T14:28:24.546342Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 9437184 2025-06-25T14:28:24.559270Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 9437184 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T14:28:24.559503Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 9437184, actorId: [1:221:2217] 2025-06-25T14:28:24.559557Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 9437184 2025-06-25T14:28:24.559613Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 9437184, state: WaitScheme 2025-06-25T14:28:24.559660Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T14:28:24.559890Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:135:2156], Recipient [1:135:2156]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T14:28:24.560043Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T14:28:24.589026Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 9437184 2025-06-25T14:28:24.589252Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 9437184 2025-06-25T14:28:24.589352Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-25T14:28:24.620959Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:28:24.621084Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 9437184 2025-06-25T14:28:24.621134Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437184 has no attached operations 2025-06-25T14:28:24.621195Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 9437184 2025-06-25T14:28:24.621234Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 9437184 TxInFly 0 2025-06-25T14:28:24.637236Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-25T14:28:24.637529Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:217:2214], Recipient [1:135:2156]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:28:24.637595Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T14:28:24.637661Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:215:2213], serverId# [1:217:2214], sessionId# [0:0:0] 2025-06-25T14:28:24.658812Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:103:2136], Recipient [1:135:2156]: NKikimrTxDataShard.TEvProposeTransaction TxKind: TX_KIND_SCHEME SourceDeprecated { RawX1: 103 RawX2: 4294969432 } TxBody: "\nI\n\006table1\020\r\032\t\n\003key\030\002 \"\032\014\n\005value\030\200$ 8\032\n\n\004uint\030\002 9(\":\010Z\006\010\000\030\000(\000J\014/Root/table1" TxId: 1 ExecLevel: 0 Flags: 0 SchemeShardId: 4200 ProcessingParams { } 2025-06-25T14:28:24.658906Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-25T14:28:24.659012Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-25T14:28:24.679643Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit CheckSchemeTx 2025-06-25T14:28:24.679754Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 9437184 txId 1 ssId 4200 seqNo 0:0 2025-06-25T14:28:24.690979Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 1 at tablet 9437184 2025-06-25T14:28:24.691092Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is ExecutedNoMoreRestarts 2025-06-25T14:28:24.691151Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit CheckSchemeTx 2025-06-25T14:28:24.691200Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit StoreSchemeTx 2025-06-25T14:28:24.691238Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit StoreSchemeTx 2025-06-25T14:28:24.691571Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayCompleteNoMoreRestarts 2025-06-25T14:28:24.691610Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit StoreSchemeTx 2025-06-25T14:28:24.691650Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit FinishPropose 2025-06-25T14:28:24.691688Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit FinishPropose 2025-06-25T14:28:24.691756Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayComplete 2025-06-25T14:28:24.691793Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit FinishPropose 2025-06-25T14:28:24.691838Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit WaitForPlan 2025-06-25T14:28:24.691880Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit WaitForPlan 2025-06-25T14:28:24.691903Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:1] at 9437184 is not ready to execute on unit WaitForPlan 2025-06-25T14:28:24.721648Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 9437184 2025-06-25T14:28:24.721739Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit StoreSchemeTx 2025-06-25T14:28:24.721787Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit FinishPropose 2025-06-25T14:28:24.721847Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 1 at tablet 9437184 send to client, exec latency: 0 ms, propose latency: 1 ms, status: PREPARED 2025-06-25T14:28:24.731201Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 9437184 not sending time cast registration request in state WaitScheme 2025-06-25T14:28:24.744047Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:227:2223], Recipient [1:135:2156]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:28:24.744155Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T14:28:24.744215Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:226:2222], serverId# [1:227:2223], sessionId# [0:0:0] 2025-06-25T14:28:24.744359Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287424, Sender [1:103:2136], Recipient [1:135:2156]: {TEvPlanStep step# 2 MediatorId# 0 TabletID 9437184} 2025-06-25T14:28:24.744403Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3150: StateWork, processing event TEvTxProcessing::TEvPlanStep 2025-06-25T14:28:24.744550Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1790: Trying to execute [2:1] at 9437184 on unit WaitForPlan 2025-06-25T14:28:24.744622Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1805: Execution status for [2:1] at 9437184 is Executed 2025-06-25T14:28:24.744673Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [2:1] at 9437184 executing on unit WaitForPlan 2025-06-25T14:28:24.744716Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [2:1] at 9437184 to execution unit PlanQueue 2025-06-25T14:28:24.751508Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 1 at step 2 at tablet 9437184 { Transactions { TxId: 1 AckTo { RawX1: 103 RawX2: 4294969432 } } Step: 2 MediatorID: 0 TabletID: 9437184 } 2025-06-25T14:28:24.763876Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T14:28:24.764250Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:135:2156], Recipient [1:135:2156]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T14:28:24.764343Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T14:28:24.764429Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-25T14:28:24.764482Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 1 2025-06-25T14:28:24.764523Z node 1 :TX_DATASHARD TRACE: datashard_pipelin ... as no attached operations 2025-06-25T14:29:29.008139Z node 39 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 9437184 2025-06-25T14:29:29.008365Z node 39 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [39:459:2398], Recipient [39:459:2398]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T14:29:29.008404Z node 39 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T14:29:29.008456Z node 39 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437186 2025-06-25T14:29:29.008482Z node 39 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437186 active 0 active planned 0 immediate 0 planned 1 2025-06-25T14:29:29.008502Z node 39 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437186 2025-06-25T14:29:29.008525Z node 39 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [7:6] in PlanQueue unit at 9437186 2025-06-25T14:29:29.008545Z node 39 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [7:6] at 9437186 on unit PlanQueue 2025-06-25T14:29:29.008571Z node 39 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [7:6] at 9437186 is Executed 2025-06-25T14:29:29.008596Z node 39 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [7:6] at 9437186 executing on unit PlanQueue 2025-06-25T14:29:29.008620Z node 39 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [7:6] at 9437186 to execution unit LoadTxDetails 2025-06-25T14:29:29.008645Z node 39 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [7:6] at 9437186 on unit LoadTxDetails 2025-06-25T14:29:29.009411Z node 39 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 9437186 loaded tx from db 7:6 keys extracted: 1 2025-06-25T14:29:29.009454Z node 39 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [7:6] at 9437186 is Executed 2025-06-25T14:29:29.009481Z node 39 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [7:6] at 9437186 executing on unit LoadTxDetails 2025-06-25T14:29:29.009507Z node 39 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [7:6] at 9437186 to execution unit FinalizeDataTxPlan 2025-06-25T14:29:29.009532Z node 39 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [7:6] at 9437186 on unit FinalizeDataTxPlan 2025-06-25T14:29:29.009571Z node 39 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [7:6] at 9437186 is Executed 2025-06-25T14:29:29.009596Z node 39 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [7:6] at 9437186 executing on unit FinalizeDataTxPlan 2025-06-25T14:29:29.009621Z node 39 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [7:6] at 9437186 to execution unit BuildAndWaitDependencies 2025-06-25T14:29:29.009645Z node 39 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [7:6] at 9437186 on unit BuildAndWaitDependencies 2025-06-25T14:29:29.009690Z node 39 :TX_DATASHARD TRACE: datashard_pipeline.cpp:455: Operation [7:6] is the new logically complete end at 9437186 2025-06-25T14:29:29.009720Z node 39 :TX_DATASHARD TRACE: datashard_pipeline.cpp:461: Operation [7:6] is the new logically incomplete end at 9437186 2025-06-25T14:29:29.009747Z node 39 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [7:6] at 9437186 2025-06-25T14:29:29.009788Z node 39 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [7:6] at 9437186 is Executed 2025-06-25T14:29:29.009810Z node 39 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [7:6] at 9437186 executing on unit BuildAndWaitDependencies 2025-06-25T14:29:29.009831Z node 39 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [7:6] at 9437186 to execution unit BuildDataTxOutRS 2025-06-25T14:29:29.009859Z node 39 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [7:6] at 9437186 on unit BuildDataTxOutRS 2025-06-25T14:29:29.009907Z node 39 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [7:6] at 9437186 is Executed 2025-06-25T14:29:29.009980Z node 39 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [7:6] at 9437186 executing on unit BuildDataTxOutRS 2025-06-25T14:29:29.010003Z node 39 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [7:6] at 9437186 to execution unit StoreAndSendOutRS 2025-06-25T14:29:29.010028Z node 39 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [7:6] at 9437186 on unit StoreAndSendOutRS 2025-06-25T14:29:29.010054Z node 39 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [7:6] at 9437186 is Executed 2025-06-25T14:29:29.010078Z node 39 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [7:6] at 9437186 executing on unit StoreAndSendOutRS 2025-06-25T14:29:29.010102Z node 39 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [7:6] at 9437186 to execution unit PrepareDataTxInRS 2025-06-25T14:29:29.010126Z node 39 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [7:6] at 9437186 on unit PrepareDataTxInRS 2025-06-25T14:29:29.010156Z node 39 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [7:6] at 9437186 is Executed 2025-06-25T14:29:29.010178Z node 39 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [7:6] at 9437186 executing on unit PrepareDataTxInRS 2025-06-25T14:29:29.010200Z node 39 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [7:6] at 9437186 to execution unit LoadAndWaitInRS 2025-06-25T14:29:29.010225Z node 39 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [7:6] at 9437186 on unit LoadAndWaitInRS 2025-06-25T14:29:29.010252Z node 39 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [7:6] at 9437186 is Executed 2025-06-25T14:29:29.010278Z node 39 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [7:6] at 9437186 executing on unit LoadAndWaitInRS 2025-06-25T14:29:29.010304Z node 39 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [7:6] at 9437186 to execution unit ExecuteDataTx 2025-06-25T14:29:29.010328Z node 39 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [7:6] at 9437186 on unit ExecuteDataTx 2025-06-25T14:29:29.010758Z node 39 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:306: Executed operation [7:6] at tablet 9437186 with status COMPLETE 2025-06-25T14:29:29.010813Z node 39 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:312: Datashard execution counters for [7:6] at 9437186: {NSelectRow: 1, NSelectRange: 0, NUpdateRow: 0, NEraseRow: 0, SelectRowRows: 1, SelectRowBytes: 10, SelectRangeRows: 0, SelectRangeBytes: 0, UpdateRowBytes: 0, EraseRowBytes: 0, SelectRangeDeletedRowSkips: 0, InvisibleRowSkips: 0} 2025-06-25T14:29:29.010862Z node 39 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [7:6] at 9437186 is Executed 2025-06-25T14:29:29.010895Z node 39 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [7:6] at 9437186 executing on unit ExecuteDataTx 2025-06-25T14:29:29.010917Z node 39 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [7:6] at 9437186 to execution unit CompleteOperation 2025-06-25T14:29:29.010944Z node 39 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [7:6] at 9437186 on unit CompleteOperation 2025-06-25T14:29:29.011137Z node 39 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [7:6] at 9437186 is DelayComplete 2025-06-25T14:29:29.011166Z node 39 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [7:6] at 9437186 executing on unit CompleteOperation 2025-06-25T14:29:29.011193Z node 39 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [7:6] at 9437186 to execution unit CompletedOperations 2025-06-25T14:29:29.011224Z node 39 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [7:6] at 9437186 on unit CompletedOperations 2025-06-25T14:29:29.011258Z node 39 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [7:6] at 9437186 is Executed 2025-06-25T14:29:29.011283Z node 39 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [7:6] at 9437186 executing on unit CompletedOperations 2025-06-25T14:29:29.011308Z node 39 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [7:6] at 9437186 has finished 2025-06-25T14:29:29.011337Z node 39 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437186 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:29:29.011363Z node 39 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437186 2025-06-25T14:29:29.011389Z node 39 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437186 has no attached operations 2025-06-25T14:29:29.011415Z node 39 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 9437186 2025-06-25T14:29:29.028288Z node 39 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:91: Sending '{TEvPlanStepAck TabletId# 9437184 step# 7 txid# 6} 2025-06-25T14:29:29.028380Z node 39 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 9437184 step# 7} 2025-06-25T14:29:29.028437Z node 39 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-25T14:29:29.028473Z node 39 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [7:6] at 9437184 on unit CompleteOperation 2025-06-25T14:29:29.028529Z node 39 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [7 : 6] from 9437184 at tablet 9437184 send result to client [39:103:2136], exec latency: 0 ms, propose latency: 1 ms 2025-06-25T14:29:29.028574Z node 39 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T14:29:29.028862Z node 39 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:91: Sending '{TEvPlanStepAck TabletId# 9437186 step# 7 txid# 6} 2025-06-25T14:29:29.028909Z node 39 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 9437186 step# 7} 2025-06-25T14:29:29.028952Z node 39 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437186 2025-06-25T14:29:29.028982Z node 39 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [7:6] at 9437186 on unit CompleteOperation 2025-06-25T14:29:29.029024Z node 39 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [7 : 6] from 9437186 at tablet 9437186 send result to client [39:103:2136], exec latency: 0 ms, propose latency: 1 ms 2025-06-25T14:29:29.029059Z node 39 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437186 2025-06-25T14:29:29.029312Z node 39 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:91: Sending '{TEvPlanStepAck TabletId# 9437185 step# 7 txid# 6} 2025-06-25T14:29:29.029355Z node 39 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 9437185 step# 7} 2025-06-25T14:29:29.029398Z node 39 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437185 2025-06-25T14:29:29.029427Z node 39 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [7:6] at 9437185 on unit CompleteOperation 2025-06-25T14:29:29.029469Z node 39 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [7 : 6] from 9437185 at tablet 9437185 send result to client [39:103:2136], exec latency: 0 ms, propose latency: 1 ms 2025-06-25T14:29:29.029499Z node 39 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437185 >> TSchemeShardCheckProposeSize::CopyTable >> TSchemeShardTest::InitRootAgain >> TSchemeShardTest::PathName [GOOD] >> TSchemeShardTest::PathName_SetLocale >> TSchemeShardTest::RmDirTwice >> TSchemeShardPgTypesInTables::CreateTableWithPgTypeColumn-EnableTablePgTypes-false >> TSchemeShardTest::Boot [GOOD] >> TSchemeShardTest::AlterTableKeyColumns >> TSchemeShardTest::CreateIndexedTable >> KqpQuery::PreparedQueryInvalidate >> TSchemeShardTest::DropIndexedTableAndForceDropSimultaneously >> TSchemeShardTest::PathName_SetLocale [GOOD] >> TSchemeShardTest::ModifyACL >> Cdc::UpdateShardCount [GOOD] >> Cdc::UpdateRetentionPeriod ------- [TM] {asan, default-linux-x86_64, release} ydb/core/security/ut/unittest >> TTicketParserTest::LoginEmptyTicketBad [GOOD] Test command err: 2025-06-25T14:28:46.952030Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519893856088336280:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:28:46.952116Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00191d/r3tmp/tmpbp8cMC/pdisk_1.dat 2025-06-25T14:28:47.177500Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519893856088336262:2080] 1750861726951267 != 1750861726951270 2025-06-25T14:28:47.196050Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 23238, node 1 2025-06-25T14:28:47.259624Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:28:47.259643Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:28:47.259649Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:28:47.259726Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:28:47.298721Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:28:47.298884Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:28:47.301040Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:15690 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:28:47.561039Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:28:47.663680Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-25T14:28:47.677131Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-25T14:28:47.677182Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-25T14:28:47.678459Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****UOXg (3F5BDB72) () has now valid token of user1 2025-06-25T14:28:47.678489Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:800: CanInitLoginToken, database /Root, A4 success 2025-06-25T14:28:49.705820Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519893869720576913:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:28:49.705901Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00191d/r3tmp/tmpBGhVgD/pdisk_1.dat 2025-06-25T14:28:49.803871Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 25647, node 2 2025-06-25T14:28:49.834043Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:28:49.834133Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:28:49.838992Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:28:49.862662Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:28:49.862685Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:28:49.862692Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:28:49.862798Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16743 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:28:50.095849Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:28:50.164376Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-25T14:28:50.170638Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-25T14:28:50.170670Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-25T14:28:50.171306Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****255g (5CBEA5E4) () has now valid token of user1 2025-06-25T14:28:50.171318Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:800: CanInitLoginToken, database /Root, A4 success 2025-06-25T14:28:52.887879Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519893882626719328:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:28:52.888236Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00191d/r3tmp/tmpJNRHEu/pdisk_1.dat 2025-06-25T14:28:53.050949Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:28:53.078117Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:28:53.078206Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 64106, node 3 2025-06-25T14:28:53.079710Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:28:53.109973Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:28:53.109997Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:28:53.110004Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:28:53.110136Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23773 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:28:53.323609Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:28:53.393922Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-25T14:28:53.401638Z node 3 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-25T14:28:53.401669Z node 3 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-25T14:28:53.402224Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****o87g (99FF52B7) () has now valid token of user1 2025-06-25T14:28:53.402245Z node 3 :TICKET_PARSER TRACE: ticket_parser_impl.h:800: CanInitLoginToken, database /Root, A4 success 2025-06-25T14:28:53.402572Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-25T14:28:53.909076Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:28:57.887747Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519893882626719328:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:28:57.887824Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:28:57.905574Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****o87g (99FF52B7) 2025-06-25T14:28:57.905918Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****o87g (99FF52B7) () has now valid token of user1 2025-06-25T14:29:02.907946Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****o87g (99FF52B7) 2025-06-25T14:29:02.908250Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****o87g (99FF52B7) () has now valid token of user1 2025-06-25T14:29:03.405219Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-25T14:29:07.920457Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****o87g (99FF52B7) 2025-06-25T14:29:07.920744Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****o87g (99FF52B7) () has now valid token of user1 2025-06-25T14:29:08.028459Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7382: Cannot get console configs 2025-06-25T14:29:08.028489Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:29:12.924232Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****o87g (99FF52B7) 2025-06-25T14:29:12.924647Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****o87g (99FF52B7) () has now valid token of user1 2025-06-25T14:29:14.247653Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519893976394550916:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:29:14.247718Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00191d/r3tmp/tmpuxBllt/pdisk_1.dat 2025-06-25T14:29:14.387230Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:29:14.390862Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7519893976394550897:2080] 1750861754247303 != 1750861754247306 2025-06-25T14:29:14.404247Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:29:14.404635Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:29:14.411708Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 12841, node 4 2025-06-25T14:29:14.467551Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:29:14.467575Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:29:14.467583Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:29:14.467716Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5989 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:29:14.785315Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:29:14.795291Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:29:14.928146Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-25T14:29:14.934833Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-25T14:29:14.934863Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-25T14:29:14.935628Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****h4DQ (3F37A00A) () has now valid token of user1 2025-06-25T14:29:14.935654Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:800: CanInitLoginToken, database /Root, A4 success 2025-06-25T14:29:14.936001Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-25T14:29:15.260756Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:29:18.264372Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****h4DQ (3F37A00A) 2025-06-25T14:29:18.264614Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1809: Ticket eyJh****h4DQ (3F37A00A) () has now permanent error message 'User not found' 2025-06-25T14:29:19.249167Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519893976394550916:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:29:19.249255Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:29:23.271282Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****h4DQ (3F37A00A) 2025-06-25T14:29:25.663237Z node 5 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[5:7519894023663047423:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:29:25.663279Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00191d/r3tmp/tmpo5emQe/pdisk_1.dat 2025-06-25T14:29:25.908783Z node 5 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:29:25.910288Z node 5 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [5:7519894023663047386:2080] 1750861765662054 != 1750861765662057 2025-06-25T14:29:25.929403Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:29:25.929515Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:29:25.936259Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 20984, node 5 2025-06-25T14:29:26.030006Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:29:26.030028Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:29:26.030054Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:29:26.030196Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4308 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:29:26.382662Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:29:26.390700Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:29:26.412448Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-25T14:29:26.443662Z node 5 :TICKET_PARSER ERROR: ticket_parser_impl.h:916: Ticket **** (00000000): Ticket is empty >> KqpTypes::QuerySpecialTypes >> StatisticsSaveLoad::Delete [GOOD] >> TSchemeShardTest::InitRootAgain [GOOD] >> TSchemeShardTest::InitRootWithOwner >> StatisticsSaveLoad::Simple [GOOD] >> TSchemeShardPgTypesInTables::CreateTableWithPgTypeColumn-EnableTablePgTypes-false [GOOD] >> TSchemeShardPgTypesInTables::CreateTableWithPgTypeColumn-EnableTablePgTypes-true >> TSchemeShardTest::RmDirTwice [GOOD] >> TSchemeShardTest::TopicMeteringMode |76.5%| [TA] $(B)/ydb/core/security/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TSchemeShardTest::ModifyACL [GOOD] >> TSchemeShardTest::NameFormat >> TSchemeShardTest::CreateTable [GOOD] >> TSchemeShardTest::CreateTableWithDate >> TSchemeShardCheckProposeSize::CopyTable [GOOD] >> TSchemeShardCheckProposeSize::CopyTables >> TSchemeShardTest::ConsistentCopyTable [GOOD] >> TSchemeShardTest::ConsistentCopyTableAwait >> TSchemeShardTest::AlterTableKeyColumns [GOOD] >> TSchemeShardTest::AlterTableFollowers >> KqpExplain::UpdateSecondaryConditionalPrimaryKey+UseSink >> TSchemeShardTest::InitRootWithOwner [GOOD] >> TSchemeShardTest::MkRmDir >> KqpLimits::DatashardProgramSize+useSink >> TSchemeShardTest::DropIndexedTableAndForceDropSimultaneously [GOOD] >> TSchemeShardTest::DependentOps >> TSchemeShardTest::TopicMeteringMode [GOOD] >> TSchemeShardTest::Restart >> TSchemeShardTest::CreateIndexedTable [GOOD] >> TSchemeShardTest::CreateIndexedTableAndForceDrop ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/database/ut/unittest >> StatisticsSaveLoad::Simple [GOOD] Test command err: 2025-06-25T14:29:22.315419Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:419:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:29:22.315781Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:29:22.315884Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000f86/r3tmp/tmp7y9FbH/pdisk_1.dat 2025-06-25T14:29:22.693565Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 16657, node 1 2025-06-25T14:29:22.954216Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:29:22.954273Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:29:22.954327Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:29:22.954914Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:29:22.960888Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:29:23.064172Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:29:23.064329Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:29:23.084412Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:4398 2025-06-25T14:29:23.715421Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-25T14:29:27.269648Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-25T14:29:27.323708Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:29:27.323840Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:29:27.365848Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T14:29:27.369271Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:29:27.653964Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:29:27.689991Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:29:27.690643Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:29:27.691273Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:29:27.691420Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:29:27.691669Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:29:27.691755Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:29:27.691828Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:29:27.691903Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:29:27.691987Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:29:27.925360Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:29:27.925488Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:29:27.960755Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:29:28.301171Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:29:28.367865Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-25T14:29:28.368012Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-25T14:29:28.412036Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-25T14:29:28.412275Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-25T14:29:28.412540Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-25T14:29:28.412607Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-25T14:29:28.412660Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-25T14:29:28.412721Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-25T14:29:28.412780Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-25T14:29:28.412835Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-25T14:29:28.413440Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-25T14:29:28.439022Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7949: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-25T14:29:28.439116Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7979: ConnectToSA(), pipe client id: [2:1793:2562], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-25T14:29:28.453752Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1808:2573] 2025-06-25T14:29:28.469622Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1849:2589] 2025-06-25T14:29:28.470038Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1849:2589], schemeshard id = 72075186224037897 2025-06-25T14:29:28.482328Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-25T14:29:28.504633Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-25T14:29:28.504708Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-25T14:29:28.504803Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-25T14:29:28.518601Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:28.545340Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-25T14:29:28.545525Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-25T14:29:28.773145Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-25T14:29:29.074728Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-25T14:29:29.183623Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-25T14:29:29.719832Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:29:29.722222Z node 1 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-25T14:29:29.722683Z node 1 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-25T14:29:29.743983Z node 1 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-06-25T14:29:29.748116Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2190:3045], DatabaseId: /Root/Database, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:29.748259Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2173:3040], DatabaseId: /Root/Database, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:29.748617Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root/Database, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:29.754753Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:2, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:29:29.814396Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:2193:3048], DatabaseId: /Root/Database, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-25T14:29:30.063021Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:2280:3077] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/Database/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72075186224037897, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:29:30.418599Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [1:2302:3089]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T14:29:30.418804Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-25T14:29:30.418907Z node 1 :STATISTICS DEBUG: service_impl.cpp:1219: ConnectToSA(), pipe client id = [1:2304:3091] 2025-06-25T14:29:30.418992Z node 1 :STATISTICS DEBUG: service_impl.cpp:1248: SyncNode(), pipe client id = [1:2304:3091] 2025-06-25T14:29:30.419589Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:2305:2800] 2025-06-25T14:29:30.419831Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:2304:3091], server id = [2:2305:2800], tablet id = 72075186224037894, status = OK 2025-06-25T14:29:30.419986Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:133: [72075186224037894] EvConnectNode, pipe server id = [2:2305:2800], node id = 1, have schemeshards count = 0, need schemeshards count = 1 2025-06-25T14:29:30.420052Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:314: [72075186224037894] SendStatisticsToNode(), node id = 1, schemeshard count = 1 2025-06-25T14:29:30.420264Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-06-25T14:29:30.420362Z node 1 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 1, ReplyToActorId = [1:2302:3089], StatRequests.size() = 1 2025-06-25T14:29:30.617669Z node 1 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=1&id=OWIyMTVhNDUtN2Y5N2ZhZDQtMjM4ZjgzMmMtZmI3OWNkMTI=, TxId: 2025-06-25T14:29:30.617755Z node 1 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=1&id=OWIyMTVhNDUtN2Y5N2ZhZDQtMjM4ZjgzMmMtZmI3OWNkMTI=, TxId: 2025-06-25T14:29:30.622434Z node 1 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-25T14:29:30.626067Z node 1 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tag AS Uint32; SELECT data FROM `.metadata/_statistics` WHERE owner_id = $owner_id AND local_path_id = $local_path_id AND stat_type = $stat_type AND column_tag = $column_tag; 2025-06-25T14:29:30.755956Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [1:2338:3112]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T14:29:30.756190Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-25T14:29:30.756245Z node 1 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 2, ReplyToActorId = [1:2338:3112], StatRequests.size() = 1 2025-06-25T14:29:30.931561Z node 1 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=1&id=OTFkYWMzMGQtMmE5NzZkMDgtZmFlZGJhODItZDg0OWU0MzI=, TxId: 01jykqww4v0935gvvs3exnwc55 2025-06-25T14:29:30.931735Z node 1 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=1&id=OTFkYWMzMGQtMmE5NzZkMDgtZmFlZGJhODItZDg0OWU0MzI=, TxId: 01jykqww4v0935gvvs3exnwc55 2025-06-25T14:29:30.934049Z node 1 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-25T14:29:30.937529Z node 1 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tag AS Uint32; SELECT data FROM `.metadata/_statistics` WHERE owner_id = $owner_id AND local_path_id = $local_path_id AND stat_type = $stat_type AND column_tag = $column_tag; 2025-06-25T14:29:30.963162Z node 1 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=1&id=OTljZWU0OWUtNDczMjI3Y2UtNTI4Y2NiMGQtYjE0Zjg5NmQ=, TxId: 01jykqww5t2zes4v2e46ezjbpj 2025-06-25T14:29:30.963319Z node 1 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=1&id=OTljZWU0OWUtNDczMjI3Y2UtNTI4Y2NiMGQtYjE0Zjg5NmQ=, TxId: 01jykqww5t2zes4v2e46ezjbpj >> TSchemeShardPgTypesInTables::CreateTableWithPgTypeColumn-EnableTablePgTypes-true [GOOD] >> TSchemeShardTest::AlterTableAndConcurrentSplit >> StatisticsSaveLoad::ForbidAccess [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/database/ut/unittest >> StatisticsSaveLoad::Delete [GOOD] Test command err: 2025-06-25T14:29:22.045468Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:419:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:29:22.045813Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:29:22.045939Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000fa8/r3tmp/tmp40CB15/pdisk_1.dat 2025-06-25T14:29:22.456912Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 27699, node 1 2025-06-25T14:29:22.799903Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:29:22.799966Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:29:22.800009Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:29:22.800548Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:29:22.803266Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:29:22.909106Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:29:22.909240Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:29:22.926102Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:31740 2025-06-25T14:29:23.507274Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-25T14:29:27.051952Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-25T14:29:27.104284Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:29:27.104442Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:29:27.146337Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T14:29:27.149978Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:29:27.416092Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:29:27.451513Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:29:27.452104Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:29:27.452794Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:29:27.452943Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:29:27.453161Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:29:27.453217Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:29:27.453267Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:29:27.453316Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:29:27.453364Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:29:27.658572Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:29:27.658697Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:29:27.673878Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:29:27.856582Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:29:27.928984Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-25T14:29:27.929098Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-25T14:29:27.995499Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-25T14:29:27.995751Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-25T14:29:27.995997Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-25T14:29:27.996063Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-25T14:29:27.996125Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-25T14:29:27.996187Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-25T14:29:27.996254Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-25T14:29:27.996328Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-25T14:29:27.996937Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-25T14:29:28.032104Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7949: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-25T14:29:28.032228Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7979: ConnectToSA(), pipe client id: [2:1793:2562], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-25T14:29:28.065286Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1808:2573] 2025-06-25T14:29:28.073598Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1849:2589] 2025-06-25T14:29:28.073957Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1849:2589], schemeshard id = 72075186224037897 2025-06-25T14:29:28.085815Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-25T14:29:28.108143Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-25T14:29:28.108220Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-25T14:29:28.108320Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-25T14:29:28.133399Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:28.145663Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-25T14:29:28.145827Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-25T14:29:28.449667Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-25T14:29:28.693221Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-25T14:29:28.772993Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-25T14:29:29.416678Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:29:29.425407Z node 1 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-25T14:29:29.425878Z node 1 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-25T14:29:29.456129Z node 1 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-06-25T14:29:29.476874Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2190:3045], DatabaseId: /Root/Database, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:29.477035Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2173:3040], DatabaseId: /Root/Database, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:29.477444Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root/Database, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:29.484257Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:2, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:29:29.588171Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:2193:3048], DatabaseId: /Root/Database, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-25T14:29:29.881878Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:2280:3077] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/Database/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72075186224037897, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:29:30.166690Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [1:2302:3089]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T14:29:30.166923Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-25T14:29:30.167042Z node 1 :STATISTICS DEBUG: service_impl.cpp:1219: ConnectToSA(), pipe client id = [1:2304:3091] 2025-06-25T14:29:30.167111Z node 1 :STATISTICS DEBUG: service_impl.cpp:1248: SyncNode(), pipe client id = [1:2304:3091] 2025-06-25T14:29:30.167617Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:2305:2800] 2025-06-25T14:29:30.167987Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:2304:3091], server id = [2:2305:2800], tablet id = 72075186224037894, status = OK 2025-06-25T14:29:30.168176Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:133: [72075186224037894] EvConnectNode, pipe server id = [2:2305:2800], node id = 1, have schemeshards count = 0, need schemeshards count = 1 2025-06-25T14:29:30.168247Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:314: [72075186224037894] SendStatisticsToNode(), node id = 1, schemeshard count = 1 2025-06-25T14:29:30.168533Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-06-25T14:29:30.168615Z node 1 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 1, ReplyToActorId = [1:2302:3089], StatRequests.size() = 1 2025-06-25T14:29:30.325436Z node 1 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=1&id=ZDJlZDdiNDQtMTBhNzk4ZGUtZmRlNDlmN2ItNjY2MmMxNjk=, TxId: 2025-06-25T14:29:30.325547Z node 1 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=1&id=ZDJlZDdiNDQtMTBhNzk4ZGUtZmRlNDlmN2ItNjY2MmMxNjk=, TxId: 2025-06-25T14:29:30.333774Z node 1 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-25T14:29:30.336857Z node 1 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DELETE FROM `.metadata/_statistics` WHERE owner_id = $owner_id AND local_path_id = $local_path_id; 2025-06-25T14:29:30.388014Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [1:2338:3112]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T14:29:30.388216Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-25T14:29:30.388260Z node 1 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 2, ReplyToActorId = [1:2338:3112], StatRequests.size() = 1 2025-06-25T14:29:30.557370Z node 1 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=1&id=ZWY5ZDdlZDgtMTM1ZTExMjktNjk3N2I2MDgtNmZmYzIzYw==, TxId: 2025-06-25T14:29:30.557459Z node 1 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=1&id=ZWY5ZDdlZDgtMTM1ZTExMjktNjk3N2I2MDgtNmZmYzIzYw==, TxId: 2025-06-25T14:29:30.558445Z node 1 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-25T14:29:30.561409Z node 1 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tag AS Uint32; SELECT data FROM `.metadata/_statistics` WHERE owner_id = $owner_id AND local_path_id = $local_path_id AND stat_type = $stat_type AND column_tag = $column_tag; 2025-06-25T14:29:30.609931Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 3 ], ReplyToActorId[ [1:2370:3127]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T14:29:30.610132Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] 2025-06-25T14:29:30.610176Z node 1 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 3, ReplyToActorId = [1:2370:3127], StatRequests.size() = 1 2025-06-25T14:29:30.775081Z node 1 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=1&id=YjU4MTM2MDctMzYzYTQ1NjItNWE0MTE3ZWQtNWJjNDgxNjE=, TxId: 01jykqwvzc4nhqc7v9hw2j0e48 2025-06-25T14:29:30.775248Z node 1 :STATISTICS WARN: query_actor.cpp:372: [TQueryBase] Finish with BAD_REQUEST, Issues: {
: Error: No data }, SessionId: ydb://session/3?node_id=1&id=YjU4MTM2MDctMzYzYTQ1NjItNWE0MTE3ZWQtNWJjNDgxNjE=, TxId: 01jykqwvzc4nhqc7v9hw2j0e48 |76.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/mind/ut/ydb-core-mind-ut |76.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/mind/ut/ydb-core-mind-ut |76.5%| [TA] {RESULT} $(B)/ydb/core/security/ut/test-results/unittest/{meta.json ... results_accumulator.log} |76.5%| [LD] {RESULT} $(B)/ydb/core/mind/ut/ydb-core-mind-ut |76.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/ut_schema/ydb-core-tx-columnshard-ut_schema >> KqpExplain::UpdateSecondaryConditional+UseSink |76.5%| [LD] {RESULT} $(B)/ydb/core/tx/columnshard/ut_schema/ydb-core-tx-columnshard-ut_schema |76.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/columnshard/ut_schema/ydb-core-tx-columnshard-ut_schema >> TSchemeShardTest::NameFormat [GOOD] >> TSchemeShardTest::ParallelCreateTable >> TSchemeShardTest::MkRmDir [GOOD] >> TSchemeShardTest::DropTableTwice >> TSchemeShardTest::ConsistentCopyTableAwait [GOOD] >> TSchemeShardTest::ConsistentCopyTableRejects >> TSchemeShardTest::DependentOps [GOOD] >> TSchemeShardTest::DefaultColumnFamiliesWithNonCanonicName >> Cdc::Drop[YdsRunner] [GOOD] >> Cdc::Drop[TopicRunner] >> TSchemeShardTest::CreateIndexedTableAndForceDrop [GOOD] >> TSchemeShardTest::CreateAlterTableWithCodec >> TSchemeShardTest::CreateTableWithDate [GOOD] >> TSchemeShardTest::CreateIndexedTableRejects |76.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/ydb_convert/ut/ydb-core-ydb_convert-ut |76.5%| [LD] {RESULT} $(B)/ydb/core/ydb_convert/ut/ydb-core-ydb_convert-ut |76.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/ydb_convert/ut/ydb-core-ydb_convert-ut >> TSchemeShardTest::Restart [GOOD] >> TSchemeShardTest::SchemeErrors >> TSchemeShardTest::DropTableTwice [GOOD] >> TSchemeShardTest::IgnoreUserColumnIds >> TSchemeShardTest::AlterTableFollowers [GOOD] >> TSchemeShardTest::AlterTableSizeToSplit ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/database/ut/unittest >> StatisticsSaveLoad::ForbidAccess [GOOD] Test command err: 2025-06-25T14:29:22.058616Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:419:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:29:22.058984Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:29:22.059103Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000f8d/r3tmp/tmp6G6sQM/pdisk_1.dat 2025-06-25T14:29:22.488211Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 61752, node 1 2025-06-25T14:29:22.803751Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:29:22.803791Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:29:22.803825Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:29:22.804184Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:29:22.806686Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:29:22.916648Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:29:22.916794Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:29:22.939570Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:4146 2025-06-25T14:29:23.550385Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-25T14:29:27.137367Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-25T14:29:27.184099Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:29:27.184213Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:29:27.238113Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T14:29:27.241184Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:29:27.492779Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:29:27.527762Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:29:27.528878Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:29:27.529463Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:29:27.529618Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:29:27.529867Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:29:27.529993Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:29:27.530113Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:29:27.530217Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:29:27.530330Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:29:27.731921Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:29:27.732059Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:29:27.746475Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:29:27.945424Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:29:28.001273Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-25T14:29:28.001379Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-25T14:29:28.038755Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-25T14:29:28.038988Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-25T14:29:28.039237Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-25T14:29:28.039305Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-25T14:29:28.039364Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-25T14:29:28.039411Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-25T14:29:28.039477Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-25T14:29:28.039525Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-25T14:29:28.040052Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-25T14:29:28.065974Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7949: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-25T14:29:28.066077Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7979: ConnectToSA(), pipe client id: [2:1793:2562], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-25T14:29:28.076078Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1808:2573] 2025-06-25T14:29:28.082876Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1849:2589] 2025-06-25T14:29:28.083173Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1849:2589], schemeshard id = 72075186224037897 2025-06-25T14:29:28.097694Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-25T14:29:28.131440Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-25T14:29:28.131520Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-25T14:29:28.131603Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-25T14:29:28.146450Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:28.155128Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-25T14:29:28.155292Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-25T14:29:28.442069Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-25T14:29:28.683507Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-25T14:29:28.769051Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-25T14:29:29.331092Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:29:29.544029Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2153:3026], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:29.544190Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:29.562543Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:30.188577Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2452:3074], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:30.188773Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:30.190361Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [1:2457:3078]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T14:29:30.190574Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-25T14:29:30.190673Z node 1 :STATISTICS DEBUG: service_impl.cpp:1219: ConnectToSA(), pipe client id = [1:2459:3080] 2025-06-25T14:29:30.190741Z node 1 :STATISTICS DEBUG: service_impl.cpp:1248: SyncNode(), pipe client id = [1:2459:3080] 2025-06-25T14:29:30.191314Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:2460:2949] 2025-06-25T14:29:30.191548Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:2459:3080], server id = [2:2460:2949], tablet id = 72075186224037894, status = OK 2025-06-25T14:29:30.191875Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:133: [72075186224037894] EvConnectNode, pipe server id = [2:2460:2949], node id = 1, have schemeshards count = 0, need schemeshards count = 1 2025-06-25T14:29:30.191947Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:314: [72075186224037894] SendStatisticsToNode(), node id = 1, schemeshard count = 1 2025-06-25T14:29:30.192189Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-06-25T14:29:30.192278Z node 1 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 1, ReplyToActorId = [1:2457:3078], StatRequests.size() = 1 2025-06-25T14:29:30.212109Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2464:3084], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:30.212246Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:30.212786Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2469:3089], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:30.219301Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715660:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:29:30.404073Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:217: [72075186224037894] EvFastPropagateCheck 2025-06-25T14:29:30.404178Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:357: [72075186224037894] PropagateFastStatistics(), node count = 0, schemeshard count = 0 2025-06-25T14:29:30.510250Z node 1 :STATISTICS DEBUG: service_impl.cpp:1189: EvRequestTimeout, pipe client id = [1:2459:3080], schemeshard count = 1 2025-06-25T14:29:30.935710Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:2471:3091], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715660 completed, doublechecking } 2025-06-25T14:29:31.106149Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:2583:3161] txid# 281474976715661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:29:31.122620Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [1:2606:3177]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T14:29:31.122822Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-25T14:29:31.122859Z node 1 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 2, ReplyToActorId = [1:2606:3177], StatRequests.size() = 1 2025-06-25T14:29:31.212813Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jykqwvba3hns4a41rwr4dt9f, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MzdlNDcxNWMtNGI0YzlmNzMtZGE4NjllYjEtMTgyYTlhMGI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:29:31.495294Z node 1 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:304: Access denied: self# [1:2687:3207], for# user@builtin, access# DescribeSchema 2025-06-25T14:29:31.495362Z node 1 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:304: Access denied: self# [1:2687:3207], for# user@builtin, access# DescribeSchema 2025-06-25T14:29:31.507625Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:2677:3203], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:2:17: Error: At function: KiReadTable!
:2:17: Error: Cannot find table 'db.[/Root/Database/.metadata/_statistics]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:29:31.509779Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=MzU3MjI3ZTQtM2NiOTFkMWItMzFjZGRlMjUtNjk5YTkwN2Y=, ActorId: [1:2666:3195], ActorState: ExecuteState, TraceId: 01jykqwwnqes5cq83p7pgf8bse, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: >> TSchemeShardTest::AlterTableAndConcurrentSplit [GOOD] >> TSchemeShardTest::AlterTable >> TSchemeShardTest::ParallelCreateTable [GOOD] >> TSchemeShardTest::ParallelCreateSameTable >> BasicUsage::WriteAndReadSomeMessagesWithAsyncCompression [GOOD] >> BasicUsage::WriteAndReadSomeMessagesWithSyncCompression >> PersQueueSdkReadSessionTest::SpecifyClustersExplicitly [GOOD] >> PersQueueSdkReadSessionTest::StopResumeReadingData >> KqpQuery::QueryClientTimeout >> TSchemeShardTest::SchemeErrors [GOOD] >> TSchemeShardTest::SerializedCellVec [GOOD] >> TSchemeShardTest::UpdateChannelsBindingSolomonShouldNotUpdate >> DataShardSnapshots::LockedWriteDistributedCommitAborted+UseSink [GOOD] >> DataShardSnapshots::LockedWriteDistributedCommitAborted-UseSink >> VDiskBalancing::TestRandom_Block42 [GOOD] >> TSchemeShardTest::CreateAlterTableWithCodec [GOOD] >> TSchemeShardTest::CopyTableTwiceSimultaneously >> KqpLimits::BigParameter >> TSchemeShardTest::DefaultColumnFamiliesWithNonCanonicName [GOOD] >> TSchemeShardTest::DropPQ >> TSchemeShardTest::IgnoreUserColumnIds [GOOD] >> TSchemeShardTest::DropTableAndConcurrentSplit >> TSchemeShardTest::AlterTableSizeToSplit [GOOD] >> TSchemeShardTest::AlterTableSplitSchema |76.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/services/persqueue_v1/ut/describes_ut/ydb-services-persqueue_v1-ut-describes_ut |76.6%| [LD] {RESULT} $(B)/ydb/services/persqueue_v1/ut/describes_ut/ydb-services-persqueue_v1-ut-describes_ut |76.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/services/persqueue_v1/ut/describes_ut/ydb-services-persqueue_v1-ut-describes_ut >> TSchemeShardTest::AlterTable [GOOD] >> TSchemeShardTest::AlterTableDropColumnReCreateSplit >> AsyncIndexChangeExchange::ShouldRejectChangesOnQueueOverflowByCount [GOOD] >> AsyncIndexChangeExchange::ShouldRejectChangesOnQueueOverflowBySize >> TSchemeShardTest::UpdateChannelsBindingSolomonShouldNotUpdate [GOOD] >> TSchemeShardTest::UpdateChannelsBindingSolomonShouldUpdate >> TSchemeShardTest::ParallelCreateSameTable [GOOD] >> TSchemeShardTest::MultipleColumnFamilies >> TSchemeShardTest::ConsistentCopyTableRejects [GOOD] >> TSchemeShardTest::ConsistentCopyTableToDeletedPath |76.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_data_erasure/ydb-core-tx-schemeshard-ut_data_erasure |76.6%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_data_erasure/ydb-core-tx-schemeshard-ut_data_erasure |76.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_data_erasure/ydb-core-tx-schemeshard-ut_data_erasure >> KqpExplain::LimitOffset >> TSchemeShardTest::CopyTableTwiceSimultaneously [GOOD] >> TSchemeShardTest::CopyTableWithAlterConfig >> TSchemeShardTest::DropTableAndConcurrentSplit [GOOD] >> TSchemeShardTest::DropTable >> TSchemeShardTest::AlterTableSplitSchema [GOOD] >> TSchemeShardTest::AlterTableSettings >> TSchemeShardTest::UpdateChannelsBindingSolomonShouldUpdate [GOOD] >> TSchemeShardTest::UpdateChannelsBindingSolomonStorageConfig >> Cdc::UpdateRetentionPeriod [GOOD] >> Cdc::SupportedTypes >> TSchemeShardTest::AlterTableDropColumnReCreateSplit [GOOD] >> TSchemeShardTest::AlterTableDropColumnSplitThenReCreate >> KqpQuery::PreparedQueryInvalidate [GOOD] >> KqpQuery::QueryCache >> TSchemeShardTest::CreateIndexedTableRejects [GOOD] >> TSchemeShardTest::CreateIndexedTableAndForceDropSimultaneously >> DataShardSnapshots::MvccSnapshotReadWithLongPlanQueue [GOOD] >> DataShardSnapshots::MvccSnapshotLockedWritesWithoutConflicts-UseSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_balancing/unittest >> VDiskBalancing::TestRandom_Block42 [GOOD] Test command err: RandomSeed# 17899334700531420418 Step = 0 SEND TEvPut with key [1:1:0:0:0:585447:0] TEvPutResult: TEvPutResult {Id# [1:1:0:0:0:585447:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Step = 1 SEND TEvPut with key [1:1:1:0:0:37868:0] TEvPutResult: TEvPutResult {Id# [1:1:1:0:0:37868:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Step = 2 SEND TEvPut with key [1:1:2:0:0:619381:0] TEvPutResult: TEvPutResult {Id# [1:1:2:0:0:619381:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Step = 3 SEND TEvPut with key [1:1:3:0:0:725585:0] TEvPutResult: TEvPutResult {Id# [1:1:3:0:0:725585:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Step = 4 SEND TEvPut with key [1:1:4:0:0:2934723:0] TEvPutResult: TEvPutResult {Id# [1:1:4:0:0:2934723:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Stop node 4 2025-06-25T14:24:22.628222Z 1 00h01m00.010512s :PIPE_SERVER ERROR: [72057594037932033] NodeDisconnected NodeId# 5 Step = 5 SEND TEvPut with key [1:1:5:0:0:502135:0] TEvPutResult: TEvPutResult {Id# [1:1:5:0:0:502135:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999976} Step = 6 SEND TEvPut with key [1:1:6:0:0:3044947:0] TEvPutResult: TEvPutResult {Id# [1:1:6:0:0:3044947:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999976} Stop node 7 2025-06-25T14:24:23.447744Z 1 00h01m10.060512s :PIPE_SERVER ERROR: [72057594037932033] NodeDisconnected NodeId# 8 Step = 7 SEND TEvPut with key [1:1:7:0:0:582354:0] TEvPutResult: TEvPutResult {Id# [1:1:7:0:0:582354:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999963} Step = 8 SEND TEvPut with key [1:1:8:0:0:1478820:0] TEvPutResult: TEvPutResult {Id# [1:1:8:0:0:1478820:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999963} Step = 9 SEND TEvPut with key [1:1:9:0:0:1360774:0] TEvPutResult: TEvPutResult {Id# [1:1:9:0:0:1360774:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999963} Start node 4 Step = 10 SEND TEvPut with key [1:1:10:0:0:1727870:0] TEvPutResult: TEvPutResult {Id# [1:1:10:0:0:1727870:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999939} Step = 11 SEND TEvPut with key [1:1:11:0:0:1883457:0] TEvPutResult: TEvPutResult {Id# [1:1:11:0:0:1883457:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999939} Step = 12 SEND TEvPut with key [1:1:12:0:0:568368:0] TEvPutResult: TEvPutResult {Id# [1:1:12:0:0:568368:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999939} Step = 13 SEND TEvPut with key [1:1:13:0:0:896600:0] TEvPutResult: TEvPutResult {Id# [1:1:13:0:0:896600:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999939} Step = 14 SEND TEvPut with key [1:1:14:0:0:179270:0] TEvPutResult: TEvPutResult {Id# [1:1:14:0:0:179270:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999939} Step = 15 SEND TEvPut with key [1:1:15:0:0:3026131:0] TEvPutResult: TEvPutResult {Id# [1:1:15:0:0:3026131:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999939} Step = 16 SEND TEvPut with key [1:1:16:0:0:670396:0] TEvPutResult: TEvPutResult {Id# [1:1:16:0:0:670396:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999939} Step = 17 SEND TEvPut with key [1:1:17:0:0:1584741:0] TEvPutResult: TEvPutResult {Id# [1:1:17:0:0:1584741:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999939} Step = 18 SEND TEvPut with key [1:1:18:0:0:2384818:0] TEvPutResult: TEvPutResult {Id# [1:1:18:0:0:2384818:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999939} Step = 19 SEND TEvPut with key [1:1:19:0:0:2867010:0] TEvPutResult: TEvPutResult {Id# [1:1:19:0:0:2867010:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999939} Step = 20 SEND TEvPut with key [1:1:20:0:0:2911789:0] TEvPutResult: TEvPutResult {Id# [1:1:20:0:0:2911789:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999939} Step = 21 SEND TEvPut with key [1:1:21:0:0:2463622:0] TEvPutResult: TEvPutResult {Id# [1:1:21:0:0:2463622:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999939} Step = 22 SEND TEvPut with key [1:1:22:0:0:322338:0] TEvPutResult: TEvPutResult {Id# [1:1:22:0:0:322338:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999939} Step = 23 SEND TEvPut with key [1:1:23:0:0:2119770:0] TEvPutResult: TEvPutResult {Id# [1:1:23:0:0:2119770:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999939} Step = 24 SEND TEvPut with key [1:1:24:0:0:56036:0] TEvPutResult: TEvPutResult {Id# [1:1:24:0:0:56036:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999939} Step = 25 SEND TEvPut with key [1:1:25:0:0:2648607:0] TEvPutResult: TEvPutResult {Id# [1:1:25:0:0:2648607:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999939} Stop node 0 2025-06-25T14:24:27.561230Z 3 00h01m30.100512s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [3:188:17] ServerId# [1:296:58] TabletId# 72057594037932033 PipeClientId# [3:188:17] 2025-06-25T14:24:27.561472Z 6 00h01m30.100512s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [6:209:17] ServerId# [1:299:61] TabletId# 72057594037932033 PipeClientId# [6:209:17] 2025-06-25T14:24:27.561573Z 5 00h01m30.100512s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [5:7670:16] ServerId# [1:7679:1093] TabletId# 72057594037932033 PipeClientId# [5:7670:16] 2025-06-25T14:24:27.561679Z 4 00h01m30.100512s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [4:195:17] ServerId# [1:297:59] TabletId# 72057594037932033 PipeClientId# [4:195:17] 2025-06-25T14:24:27.561789Z 2 00h01m30.100512s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [2:181:17] ServerId# [1:295:57] TabletId# 72057594037932033 PipeClientId# [2:181:17] 2025-06-25T14:24:27.561881Z 7 00h01m30.100512s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [7:216:17] ServerId# [1:300:62] TabletId# 72057594037932033 PipeClientId# [7:216:17] Step = 26 SEND TEvPut with key [1:1:26:0:0:539431:0] TEvPutResult: TEvPutResult {Id# [1:1:26:0:0:539431:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 27 SEND TEvPut with key [1:1:27:0:0:148482:0] TEvPutResult: TEvPutResult {Id# [1:1:27:0:0:148482:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 28 SEND TEvPut with key [1:1:28:0:0:2673563:0] TEvPutResult: TEvPutResult {Id# [1:1:28:0:0:2673563:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 29 SEND TEvPut with key [1:1:29:0:0:265170:0] TEvPutResult: TEvPutResult {Id# [1:1:29:0:0:265170:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 30 SEND TEvPut with key [1:1:30:0:0:2398732:0] TEvPutResult: TEvPutResult {Id# [1:1:30:0:0:2398732:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Compact vdisk 2 Step = 31 SEND TEvPut with key [1:1:31:0:0:2302132:0] TEvPutResult: TEvPutResult {Id# [1:1:31:0:0:2302132:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 32 SEND TEvPut with key [1:1:32:0:0:3112269:0] TEvPutResult: TEvPutResult {Id# [1:1:32:0:0:3112269:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 33 SEND TEvPut with key [1:1:33:0:0:883758:0] TEvPutResult: TEvPutResult {Id# [1:1:33:0:0:883758:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 34 SEND TEvPut with key [1:1:34:0:0:1212958:0] TEvPutResult: TEvPutResult {Id# [1:1:34:0:0:1212958:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 35 SEND TEvPut with key [1:1:35:0:0:3026131:0] TEvPutResult: TEvPutResult {Id# [1:1:35:0:0:3026131:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 36 SEND TEvPut with key [1:1:36:0:0:139148:0] TEvPutResult: TEvPutResult {Id# [1:1:36:0:0:139148:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 37 SEND TEvPut with key [1:1:37:0:0:200198:0] TEvPutResult: TEvPutResult {Id# [1:1:37:0:0:200198:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 38 SEND TEvPut with key [1:1:38:0:0:1252178:0] TEvPutResult: TEvPutResult {Id# [1:1:38:0:0:1252178:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 39 SEND TEvPut with key [1:1:39:0:0:1897783:0] TEvPutResult: TEvPutResult {Id# [1:1:39:0:0:1897783:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 40 SEND TEvPut with key [1:1:40:0:0:1486678:0] TEvPutResult: TEvPutResult {Id# [1:1:40:0:0:1486678:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 41 SEND TEvPut with key [1:1:41:0:0:1285964:0] TEvPutResult: TEvPutResult {Id# [1:1:41:0:0:1285964:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 42 SEND TEvPut with key [1:1:42:0:0:1221731:0] TEvPutResult: TEvPutResult {Id# [1:1:42:0:0:1221731:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 43 SEND TEvPut with key [1:1:43:0:0:1613844:0] TEvPutResult: TEvPutResult {Id# [1:1:43:0:0:1613844:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 44 SEND TEvPut with key [1:1:44:0:0:2582908:0] TEvPutResult: TEvPutResult {Id# [1:1:44:0:0:2582908:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 45 SEND TEvPut with key [1:1:45:0:0:1703743:0] TEvPutResult: TEvPutResult {Id# [1:1:45:0:0:1703743:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 46 SEND TEvPut with key [1:1:46:0:0:1362981:0] TEvPutResult: TEvPutResult {Id# [1:1:46:0:0:1362981:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 47 SEND TEvPut with key [1:1:47:0:0:1469807:0] TEvPutResult: TEvPutResult {Id# [1:1:47:0:0:1469807:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 48 SEND TEvPut with key [1:1:48:0:0:2832565:0] TEvPutResult: TEvPutResult {Id# [1:1:48:0:0:2832565:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 49 SEND TEvPut with key [1:1:49:0:0:1960611:0] TEvPutResult: TEvPutResult {Id# [1:1:49:0:0:1960611:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 50 SEND TEvPut with key [1:1:50:0:0:1164230:0] TEvPutResult: TEvPutResult {Id# [1:1:50:0:0:1164230:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 51 SEND TEvPut with key [1:1:51:0:0:836900:0] TEvPutResult: TEvPutResult {Id# [1:1:51:0:0:836900:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 52 SEND TEvPut with key [1:1:52:0:0:838380:0] TEvPutResult: TEvPutResult {Id# [1:1:52:0:0:838380:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 53 SEND TEvPut with key [1:1:53:0:0:1975575:0] TEvPutResult: TEvPutResult {Id# [1:1:53:0:0:1975575:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Start node 0 Step = 54 SEND TEvPut with key [1:1:54:0:0:1888556:0] TEvPutResult: TEvPutResult {Id# [1:1:54:0:0:1888556:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999817} Step = 55 SEND TEvPut with key [1:1:55:0:0:715063:0] TEvPutResult: TEvPutResult {Id# [1:1:55:0:0:715063:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999817} Step = 56 SEND TEvPut with key [1:1:56:0:0:42993:0] TEvPutResult: TEvPutResult {Id# [1:1:56:0:0:42993:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999817} Step = 57 SEND TEvPut with key [1:1:57:0:0:1491407:0] TEvPutResult: TEvPutResult {Id# [1:1:57:0:0:1491407:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999817} Step = 58 SEND TEvPut with key [1:1:58:0:0:702845:0] TEvPutResult: TEvPutResult {Id# [1:1:58:0:0:702845:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999817} Step = 59 SEND TEvPut with key [1:1:59:0:0:2539948:0] TEvPutResult: TEvPutResult {Id# [1:1:59:0:0:2539948:0] Statu ... 3} Step = 936 SEND TEvPut with key [1:1:936:0:0:2748248:0] TEvPutResult: TEvPutResult {Id# [1:1:936:0:0:2748248:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999744} Step = 937 SEND TEvPut with key [1:1:937:0:0:112302:0] TEvPutResult: TEvPutResult {Id# [1:1:937:0:0:112302:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Step = 938 SEND TEvPut with key [1:1:938:0:0:800417:0] TEvPutResult: TEvPutResult {Id# [1:1:938:0:0:800417:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Step = 939 SEND TEvPut with key [1:1:939:0:0:2336442:0] TEvPutResult: TEvPutResult {Id# [1:1:939:0:0:2336442:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Step = 940 SEND TEvPut with key [1:1:940:0:0:982070:0] TEvPutResult: TEvPutResult {Id# [1:1:940:0:0:982070:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Start node 4 Step = 941 SEND TEvPut with key [1:1:941:0:0:713632:0] TEvPutResult: TEvPutResult {Id# [1:1:941:0:0:713632:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99967} Step = 942 SEND TEvPut with key [1:1:942:0:0:1644191:0] TEvPutResult: TEvPutResult {Id# [1:1:942:0:0:1644191:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99967} Step = 943 SEND TEvPut with key [1:1:943:0:0:254634:0] TEvPutResult: TEvPutResult {Id# [1:1:943:0:0:254634:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99967} Step = 944 SEND TEvPut with key [1:1:944:0:0:1141270:0] TEvPutResult: TEvPutResult {Id# [1:1:944:0:0:1141270:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99967} Step = 945 SEND TEvPut with key [1:1:945:0:0:610103:0] TEvPutResult: TEvPutResult {Id# [1:1:945:0:0:610103:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999756} Step = 946 SEND TEvPut with key [1:1:946:0:0:24822:0] TEvPutResult: TEvPutResult {Id# [1:1:946:0:0:24822:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999756} Compact vdisk 6 Step = 947 SEND TEvPut with key [1:1:947:0:0:100167:0] TEvPutResult: TEvPutResult {Id# [1:1:947:0:0:100167:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999585} Step = 948 SEND TEvPut with key [1:1:948:0:0:645630:0] TEvPutResult: TEvPutResult {Id# [1:1:948:0:0:645630:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999585} Step = 949 SEND TEvPut with key [1:1:949:0:0:2125890:0] TEvPutResult: TEvPutResult {Id# [1:1:949:0:0:2125890:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999585} Step = 950 SEND TEvPut with key [1:1:950:0:0:2544891:0] TEvPutResult: TEvPutResult {Id# [1:1:950:0:0:2544891:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999585} Step = 951 SEND TEvPut with key [1:1:951:0:0:647007:0] TEvPutResult: TEvPutResult {Id# [1:1:951:0:0:647007:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999585} Step = 952 SEND TEvPut with key [1:1:952:0:0:2031652:0] TEvPutResult: TEvPutResult {Id# [1:1:952:0:0:2031652:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999585} Step = 953 SEND TEvPut with key [1:1:953:0:0:2109805:0] TEvPutResult: TEvPutResult {Id# [1:1:953:0:0:2109805:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999585} Stop node 3 2025-06-25T14:28:52.929455Z 1 00h28m30.703072s :PIPE_SERVER ERROR: [72057594037932033] NodeDisconnected NodeId# 4 Step = 954 SEND TEvPut with key [1:1:954:0:0:1353403:0] TEvPutResult: TEvPutResult {Id# [1:1:954:0:0:1353403:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999707} Stop node 4 2025-06-25T14:28:54.034659Z 1 00h28m40.715818s :PIPE_SERVER ERROR: [72057594037932033] NodeDisconnected NodeId# 5 Step = 955 SEND TEvPut with key [1:1:955:0:0:1286278:0] TEvPutResult: TEvPutResult {Id# [1:1:955:0:0:1286278:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999658} Start node 3 Step = 956 SEND TEvPut with key [1:1:956:0:0:1875483:0] TEvPutResult: TEvPutResult {Id# [1:1:956:0:0:1875483:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999707} Step = 957 SEND TEvPut with key [1:1:957:0:0:1021388:0] TEvPutResult: TEvPutResult {Id# [1:1:957:0:0:1021388:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99967} Start node 4 Step = 958 SEND TEvPut with key [1:1:958:0:0:860806:0] TEvPutResult: TEvPutResult {Id# [1:1:958:0:0:860806:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99967} Step = 959 SEND TEvPut with key [1:1:959:0:0:385917:0] TEvPutResult: TEvPutResult {Id# [1:1:959:0:0:385917:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99967} Step = 960 SEND TEvPut with key [1:1:960:0:0:200998:0] TEvPutResult: TEvPutResult {Id# [1:1:960:0:0:200998:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99967} Step = 961 SEND TEvPut with key [1:1:961:0:0:1661659:0] TEvPutResult: TEvPutResult {Id# [1:1:961:0:0:1661659:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99967} Step = 962 SEND TEvPut with key [1:1:962:0:0:771410:0] TEvPutResult: TEvPutResult {Id# [1:1:962:0:0:771410:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99967} Step = 963 SEND TEvPut with key [1:1:963:0:0:1414281:0] TEvPutResult: TEvPutResult {Id# [1:1:963:0:0:1414281:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99967} Step = 964 SEND TEvPut with key [1:1:964:0:0:2848837:0] TEvPutResult: TEvPutResult {Id# [1:1:964:0:0:2848837:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999719} Step = 965 SEND TEvPut with key [1:1:965:0:0:989600:0] TEvPutResult: TEvPutResult {Id# [1:1:965:0:0:989600:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999719} Step = 966 SEND TEvPut with key [1:1:966:0:0:2761296:0] TEvPutResult: TEvPutResult {Id# [1:1:966:0:0:2761296:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99967} Step = 967 SEND TEvPut with key [1:1:967:0:0:981163:0] TEvPutResult: TEvPutResult {Id# [1:1:967:0:0:981163:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99967} Step = 968 SEND TEvPut with key [1:1:968:0:0:14298:0] TEvPutResult: TEvPutResult {Id# [1:1:968:0:0:14298:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99967} Step = 969 SEND TEvPut with key [1:1:969:0:0:626285:0] TEvPutResult: TEvPutResult {Id# [1:1:969:0:0:626285:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99967} Step = 970 SEND TEvPut with key [1:1:970:0:0:334566:0] TEvPutResult: TEvPutResult {Id# [1:1:970:0:0:334566:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99967} Stop node 7 2025-06-25T14:28:56.053170Z 1 00h29m10.741104s :PIPE_SERVER ERROR: [72057594037932033] NodeDisconnected NodeId# 8 Step = 971 SEND TEvPut with key [1:1:971:0:0:972888:0] TEvPutResult: TEvPutResult {Id# [1:1:971:0:0:972888:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999634} Step = 972 SEND TEvPut with key [1:1:972:0:0:786055:0] TEvPutResult: TEvPutResult {Id# [1:1:972:0:0:786055:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999634} Step = 973 SEND TEvPut with key [1:1:973:0:0:2707502:0] TEvPutResult: TEvPutResult {Id# [1:1:973:0:0:2707502:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999634} Stop node 1 2025-06-25T14:28:56.426773Z 1 00h29m20.742168s :PIPE_SERVER ERROR: [72057594037932033] NodeDisconnected NodeId# 2 Step = 974 SEND TEvPut with key [1:1:974:0:0:2660812:0] TEvPutResult: TEvPutResult {Id# [1:1:974:0:0:2660812:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999622} Start node 1 Step = 975 SEND TEvPut with key [1:1:975:0:0:3005283:0] TEvPutResult: TEvPutResult {Id# [1:1:975:0:0:3005283:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999731} Stop node 1 2025-06-25T14:28:56.908864Z 1 00h29m40.743192s :PIPE_SERVER ERROR: [72057594037932033] NodeDisconnected NodeId# 2 Step = 976 SEND TEvPut with key [1:1:976:0:0:1542748:0] TEvPutResult: TEvPutResult {Id# [1:1:976:0:0:1542748:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999622} Step = 977 SEND TEvPut with key [1:1:977:0:0:2837300:0] TEvPutResult: TEvPutResult {Id# [1:1:977:0:0:2837300:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999622} Step = 978 SEND TEvPut with key [1:1:978:0:0:481535:0] TEvPutResult: TEvPutResult {Id# [1:1:978:0:0:481535:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999622} Step = 979 SEND TEvPut with key [1:1:979:0:0:24668:0] TEvPutResult: TEvPutResult {Id# [1:1:979:0:0:24668:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999622} Step = 980 SEND TEvPut with key [1:1:980:0:0:1760402:0] TEvPutResult: TEvPutResult {Id# [1:1:980:0:0:1760402:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999622} Step = 981 SEND TEvPut with key [1:1:981:0:0:1711812:0] TEvPutResult: TEvPutResult {Id# [1:1:981:0:0:1711812:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999622} Step = 982 SEND TEvPut with key [1:1:982:0:0:1422922:0] TEvPutResult: TEvPutResult {Id# [1:1:982:0:0:1422922:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999622} Step = 983 SEND TEvPut with key [1:1:983:0:0:2533122:0] TEvPutResult: TEvPutResult {Id# [1:1:983:0:0:2533122:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999622} Step = 984 SEND TEvPut with key [1:1:984:0:0:347759:0] TEvPutResult: TEvPutResult {Id# [1:1:984:0:0:347759:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999622} Step = 985 SEND TEvPut with key [1:1:985:0:0:1862506:0] TEvPutResult: TEvPutResult {Id# [1:1:985:0:0:1862506:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999622} Step = 986 SEND TEvPut with key [1:1:986:0:0:101043:0] TEvPutResult: TEvPutResult {Id# [1:1:986:0:0:101043:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999622} Step = 987 SEND TEvPut with key [1:1:987:0:0:672278:0] TEvPutResult: TEvPutResult {Id# [1:1:987:0:0:672278:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999622} Step = 988 SEND TEvPut with key [1:1:988:0:0:2042425:0] TEvPutResult: TEvPutResult {Id# [1:1:988:0:0:2042425:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999622} Step = 989 SEND TEvPut with key [1:1:989:0:0:1201477:0] TEvPutResult: TEvPutResult {Id# [1:1:989:0:0:1201477:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999622} Step = 990 SEND TEvPut with key [1:1:990:0:0:1724337:0] TEvPutResult: TEvPutResult {Id# [1:1:990:0:0:1724337:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999622} Step = 991 SEND TEvPut with key [1:1:991:0:0:2174403:0] TEvPutResult: TEvPutResult {Id# [1:1:991:0:0:2174403:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999622} Step = 992 SEND TEvPut with key [1:1:992:0:0:193000:0] TEvPutResult: TEvPutResult {Id# [1:1:992:0:0:193000:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999622} Step = 993 SEND TEvPut with key [1:1:993:0:0:618508:0] TEvPutResult: TEvPutResult {Id# [1:1:993:0:0:618508:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999622} Step = 994 SEND TEvPut with key [1:1:994:0:0:2278246:0] TEvPutResult: TEvPutResult {Id# [1:1:994:0:0:2278246:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999622} Step = 995 SEND TEvPut with key [1:1:995:0:0:2001881:0] TEvPutResult: TEvPutResult {Id# [1:1:995:0:0:2001881:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999622} Step = 996 SEND TEvPut with key [1:1:996:0:0:1759634:0] TEvPutResult: TEvPutResult {Id# [1:1:996:0:0:1759634:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999622} Step = 997 SEND TEvPut with key [1:1:997:0:0:2469234:0] TEvPutResult: TEvPutResult {Id# [1:1:997:0:0:2469234:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999622} Step = 998 SEND TEvPut with key [1:1:998:0:0:1329395:0] TEvPutResult: TEvPutResult {Id# [1:1:998:0:0:1329395:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999622} Step = 999 SEND TEvPut with key [1:1:999:0:0:1243807:0] TEvPutResult: TEvPutResult {Id# [1:1:999:0:0:1243807:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999622} Starting nodes Start compaction 1 Start checking >> TSchemeShardTest::ConsistentCopyTableToDeletedPath [GOOD] >> TSchemeShardTest::CopyIndexedTable >> TSchemeShardTest::UpdateChannelsBindingSolomonStorageConfig [GOOD] >> TSchemeShardTest::RejectAlterSolomon >> TSchemeShardTest::AlterTableSettings [GOOD] >> TSchemeShardTest::AssignBlockStoreVolume >> KqpTypes::QuerySpecialTypes [GOOD] >> KqpTypes::DyNumberCompare >> TSchemeShardTest::CreateIndexedTableAndForceDropSimultaneously [GOOD] >> TSchemeShardTest::CreateTableWithUniformPartitioning >> TSchemeShardTest::AlterTableDropColumnSplitThenReCreate [GOOD] >> TSchemeShardTest::AlterTableById >> TSchemeShardTest::RejectAlterSolomon [GOOD] >> TSchemeShardTest::SimultaneousDropForceDrop >> TSchemeShardTest::CopyTableWithAlterConfig [GOOD] >> TSchemeShardTest::CopyTableOmitFollowers >> TSchemeShardTest::DropTable [GOOD] >> TSchemeShardTest::DropTableById >> TSchemeShardTest::AssignBlockStoreVolume [GOOD] >> TSchemeShardTest::AssignBlockStoreVolumeDuringAlter >> TSchemeShardTest::DropPQ [GOOD] >> TSchemeShardTest::DropPQAbort >> TSchemeShardTest::MultipleColumnFamilies [GOOD] >> TSchemeShardTest::MultipleColumnFamiliesWithStorage >> Cdc::Drop[TopicRunner] [GOOD] >> Cdc::DropColumn >> KqpLimits::DatashardProgramSize+useSink [GOOD] >> KqpLimits::DatashardProgramSize-useSink >> KqpStats::RequestUnitForSuccessExplicitPrepare >> TSchemeShardTest::AlterTableById [GOOD] >> TSchemeShardTest::AlterTableConfig >> TSchemeShardTest::CopyIndexedTable [GOOD] >> TSchemeShardTest::CopyTable >> TSchemeShardTest::AssignBlockStoreVolumeDuringAlter [GOOD] >> TSchemeShardTest::AssignBlockStoreCheckVersionInAlter >> TSchemeShardTest::SimultaneousDropForceDrop [GOOD] >> TSchemeShardTest::RejectSystemViewPath >> TSchemeShardTest::DropTableById [GOOD] >> TSchemeShardTest::DropPQFail |76.6%| [TA] $(B)/ydb/core/blobstorage/ut_blobstorage/ut_balancing/test-results/unittest/{meta.json ... results_accumulator.log} >> TSchemeShardTest::CopyTableOmitFollowers [GOOD] >> TSchemeShardTest::CreateIndexedTableAfterBackup >> TSchemeshardBorrowedCompactionTest::SchemeshardShouldNotCompactAfterDrop [GOOD] >> TSchemeShardTest::CreateTableWithUniformPartitioning [GOOD] >> TSchemeShardTest::CreateTableWithSplitBoundaries >> KqpExplain::UpdateSecondaryConditionalPrimaryKey+UseSink [GOOD] >> KqpExplain::UpdateSecondaryConditionalPrimaryKey-UseSink >> TSchemeShardTest::RejectSystemViewPath [GOOD] >> TSchemeShardTest::SplitKey [GOOD] >> TSchemeShardTest::SplitAlterCopy >> TTopicReaderTests::TestRun_ReadOneMessage [GOOD] >> TTopicReaderTests::TestRun_ReadTwoMessages_With_Limit_1 >> TSchemeShardTest::AssignBlockStoreCheckVersionInAlter [GOOD] >> TSchemeShardTest::AssignBlockStoreCheckFillGenerationInAlter >> TSchemeshardBackgroundCleaningTest::SchemeshardBackgroundCleaningTestSimpleCleanIndex [GOOD] >> TSchemeShardTest::CopyTable [GOOD] >> TSchemeShardTest::CopyTableAndConcurrentChanges >> Cdc::NaN[TopicRunner] [GOOD] >> Cdc::RacyRebootAndSplitWithTxInflight >> TSchemeShardTest::MultipleColumnFamiliesWithStorage [GOOD] >> TSchemeShardTest::ParallelModifying >> DataShardSnapshots::LockedWriteDistributedCommitAborted-UseSink [GOOD] >> DataShardSnapshots::LockedWriteDistributedCommitCrossConflict+UseSink >> KqpExplain::UpdateSecondaryConditional+UseSink [GOOD] >> KqpExplain::UpdateSecondaryConditional-UseSink >> TSchemeShardTest::AlterTableConfig [GOOD] >> TSchemeShardTest::AlterTableCompactionPolicy ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_compaction/unittest >> TSchemeshardBorrowedCompactionTest::SchemeshardShouldNotCompactAfterDrop [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:28:16.192711Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:28:16.192807Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:28:16.192844Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:28:16.192879Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:28:16.192924Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:28:16.192953Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:28:16.193008Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:28:16.193112Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:28:16.193848Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:28:16.194226Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:28:16.298023Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:28:16.298107Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:28:16.324000Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:28:16.324545Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:28:16.324724Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:28:16.331712Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:28:16.332278Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:28:16.333012Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:28:16.333317Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:28:16.337069Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:28:16.337272Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:28:16.338508Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:28:16.338571Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:28:16.338712Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:28:16.338759Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:28:16.338805Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:28:16.338902Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:28:16.346205Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:28:16.497462Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:28:16.497679Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:16.497904Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:28:16.497958Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:28:16.498178Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:28:16.498273Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:28:16.501814Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:28:16.502016Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:28:16.502223Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:16.502275Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:28:16.502310Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:28:16.502345Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:28:16.504853Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:16.504917Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:28:16.504957Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:28:16.509929Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:16.510007Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:16.510051Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:28:16.510121Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:28:16.513297Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:28:16.515857Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:28:16.516065Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:28:16.517016Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:28:16.517146Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:28:16.517200Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:28:16.517539Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:28:16.517596Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:28:16.517796Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:28:16.517891Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:28:16.520193Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:28:16.520258Z node 1 :FLAT_TX_SCHEMESHARD ... rId: 72057594046678944, LocalPathId: 2], pathId map=Simple, is column=0, is olap=0, RowCount 100, DataSize 13940 2025-06-25T14:29:42.231393Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186233409546, followerId 0 2025-06-25T14:29:42.231476Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:219: [BackgroundCompaction] [Update] Updated shard# 72057594046678944:1 with partCount# 1, rowCount# 100, searchHeight# 1, lastFullCompaction# 1970-01-01T00:00:18.000000Z at schemeshard 72057594046678944 2025-06-25T14:29:42.231552Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:477: Do not want to split tablet 72075186233409546 by size, its table already has 1 out of 1 partitions 2025-06-25T14:29:42.231641Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-25T14:29:42.243443Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [3:129:2153]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-06-25T14:29:42.243523Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5131: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-06-25T14:29:42.243553Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046678944, queue size# 0 2025-06-25T14:29:42.603416Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435079, Sender [0:0:0], Recipient [3:314:2299]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvPeriodicWakeup 2025-06-25T14:29:42.603578Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:129:2153]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:29:42.603611Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:29:42.603696Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [3:129:2153], Recipient [3:129:2153]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:29:42.603728Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:29:42.649051Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: operation_queue_timer.h:92: Operation queue wakeup 2025-06-25T14:29:42.649157Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_compaction.cpp:31: [BackgroundCompaction] [Start] Compacting for pathId# [OwnerId: 72057594046678944, LocalPathId: 2], datashard# 72075186233409546, compactionInfo# {72057594046678944:1, SH# 1, Rows# 100, Deletes# 0, Compaction# 1970-01-01T00:00:18.000000Z}, next wakeup in# 0.000000s, rate# 1, in queue# 1 shards, waiting after compaction# 0 shards, running# 0 shards at schemeshard 72057594046678944 2025-06-25T14:29:42.649250Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: operation_queue_timer.h:84: Operation queue set wakeup after delta# 30 seconds 2025-06-25T14:29:42.649407Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553210, Sender [3:129:2153], Recipient [3:314:2299]: NKikimrTxDataShard.TEvCompactTable PathId { OwnerId: 72057594046678944 LocalId: 2 } CompactSinglePartedShards: true 2025-06-25T14:29:42.649542Z node 3 :TX_DATASHARD INFO: datashard__compaction.cpp:141: Started background compaction# 7 of 72075186233409546 tableId# 2 localTid# 1001, requested from [3:129:2153], partsCount# 1, memtableSize# 0, memtableWaste# 0, memtableRows# 0 2025-06-25T14:29:42.650286Z node 3 :TX_DATASHARD DEBUG: datashard__compaction.cpp:203: CompactionComplete of tablet# 72075186233409546, table# 1001, finished edge# 6, ts 1970-01-01T00:00:19.151000Z 2025-06-25T14:29:42.650340Z node 3 :TX_DATASHARD DEBUG: datashard__compaction.cpp:240: ReplyCompactionWaiters of tablet# 72075186233409546, table# 1001, finished edge# 6, front# 7 2025-06-25T14:29:42.659174Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435080, Sender [3:1265:3198], Recipient [3:314:2299]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvAsyncTableStats 2025-06-25T14:29:42.659281Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3441: TEvPeriodicTableStats from datashard 72075186233409546, FollowerId 0, tableId 2 2025-06-25T14:29:42.664706Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269553162, Sender [3:314:2299], Recipient [3:129:2153]: NKikimrTxDataShard.TEvPeriodicTableStats DatashardId: 72075186233409546 TableLocalId: 2 Generation: 2 Round: 6 TableStats { DataSize: 13940 RowCount: 100 IndexSize: 102 InMemSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 2 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 SearchHeight: 1 LastFullCompactionTs: 19 HasLoanedParts: false Channels { Channel: 1 DataSize: 13940 IndexSize: 102 } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 24160 Memory: 124232 Storage: 14156 } ShardState: 2 UserTablePartOwners: 72075186233409546 NodeId: 3 StartTime: 41 TableOwnerId: 72057594046678944 FollowerId: 0 2025-06-25T14:29:42.664778Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4992: StateWork, processing event TEvDataShard::TEvPeriodicTableStats 2025-06-25T14:29:42.664831Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046678944 from shard 72075186233409546 followerId 0 pathId [OwnerId: 72057594046678944, LocalPathId: 2] state 'Ready' dataSize 13940 rowCount 100 cpuUsage 2.416 2025-06-25T14:29:42.664957Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:570: Got periodic table stats at tablet 72057594046678944 from shard 72075186233409546 followerId 0 pathId [OwnerId: 72057594046678944, LocalPathId: 2] raw table stats: DataSize: 13940 RowCount: 100 IndexSize: 102 InMemSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 2 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 SearchHeight: 1 LastFullCompactionTs: 19 HasLoanedParts: false Channels { Channel: 1 DataSize: 13940 IndexSize: 102 } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 2025-06-25T14:29:42.665009Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:610: Will delay TTxStoreTableStats on# 0.100000s, queue# 1 2025-06-25T14:29:42.667104Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268828683, Sender [3:304:2291], Recipient [3:314:2299]: NKikimr::TEvTablet::TEvFollowerGcApplied 2025-06-25T14:29:42.673950Z node 3 :TX_DATASHARD DEBUG: datashard__compaction.cpp:203: CompactionComplete of tablet# 72075186233409546, table# 1001, finished edge# 7, ts 1970-01-01T00:00:20.152000Z 2025-06-25T14:29:42.674035Z node 3 :TX_DATASHARD DEBUG: datashard__compaction.cpp:240: ReplyCompactionWaiters of tablet# 72075186233409546, table# 1001, finished edge# 7, front# 7 2025-06-25T14:29:42.674076Z node 3 :TX_DATASHARD DEBUG: datashard__compaction.cpp:260: ReplyCompactionWaiters of tablet# 72075186233409546, table# 1001 sending TEvCompactTableResult to# [3:129:2153]pathId# [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-25T14:29:42.674331Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269553211, Sender [3:314:2299], Recipient [3:129:2153]: NKikimrTxDataShard.TEvCompactTableResult TabletId: 72075186233409546 PathId { OwnerId: 72057594046678944 LocalId: 2 } Status: OK 2025-06-25T14:29:42.674379Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5031: StateWork, processing event TEvDataShard::TEvCompactTableResult 2025-06-25T14:29:42.674451Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: operation_queue_timer.h:84: Operation queue set wakeup after delta# 0 seconds 2025-06-25T14:29:42.674762Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_compaction.cpp:112: [BackgroundCompaction] [Finished] Compaction completed for pathId# [OwnerId: 72057594046678944, LocalPathId: 2], datashard# 72075186233409546, shardIdx# 72057594046678944:1 in# 4 ms, with status# 0, next wakeup in# 0.996000s, rate# 1, in queue# 1 shards, waiting after compaction# 0 shards, running# 0 shards at schemeshard 72057594046678944 2025-06-25T14:29:42.677432Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268828683, Sender [3:304:2291], Recipient [3:314:2299]: NKikimr::TEvTablet::TEvFollowerGcApplied 2025-06-25T14:29:42.692934Z node 3 :TX_DATASHARD DEBUG: datashard__compaction.cpp:189: Updated last full compaction of tablet# 72075186233409546, tableId# 2, last full compaction# 1970-01-01T00:00:20.152000Z 2025-06-25T14:29:42.746365Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [3:129:2153]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-06-25T14:29:42.746444Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5131: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-06-25T14:29:42.746476Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046678944, queue size# 1 2025-06-25T14:29:42.746546Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:601: Will execute TTxStoreStats, queue# 1 2025-06-25T14:29:42.746592Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:610: Will delay TTxStoreTableStats on# 0.000000s, queue# 1 2025-06-25T14:29:42.746703Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 2 shard idx 72057594046678944:1 data size 13940 row count 100 2025-06-25T14:29:42.746779Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186233409546 maps to shardIdx: 72057594046678944:1 followerId=0, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], pathId map=Simple, is column=0, is olap=0, RowCount 100, DataSize 13940 2025-06-25T14:29:42.746821Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186233409546, followerId 0 2025-06-25T14:29:42.746912Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:219: [BackgroundCompaction] [Update] Updated shard# 72057594046678944:1 with partCount# 1, rowCount# 100, searchHeight# 1, lastFullCompaction# 1970-01-01T00:00:19.000000Z at schemeshard 72057594046678944 2025-06-25T14:29:42.747006Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:477: Do not want to split tablet 72075186233409546 by size, its table already has 1 out of 1 partitions 2025-06-25T14:29:42.747097Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-25T14:29:42.757662Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [3:129:2153]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-06-25T14:29:42.757753Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5131: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-06-25T14:29:42.757783Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046678944, queue size# 0 >> TSchemeShardTest::AssignBlockStoreCheckFillGenerationInAlter [GOOD] >> TSchemeShardTest::BlockStoreVolumeLimits >> TSchemeShardTest::CreateIndexedTableAfterBackup [GOOD] >> TSchemeShardTest::CreateFinishedInDescription >> TSchemeShardTest::CreateTableWithSplitBoundaries [GOOD] >> TSchemeShardTest::CreateTableWithConfig ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_background_cleaning/unittest >> TSchemeshardBackgroundCleaningTest::SchemeshardBackgroundCleaningTestSimpleCleanIndex [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:234:2060] recipient: [1:228:2144] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:234:2060] recipient: [1:228:2144] Leader for TabletID 72057594046678944 is [1:245:2155] sender: [1:246:2060] recipient: [1:228:2144] 2025-06-25T14:27:39.537924Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:27:39.538008Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:27:39.538046Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:27:39.538369Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:27:39.538425Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:27:39.538463Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:27:39.538521Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:27:39.538594Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:27:39.539310Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:27:39.539672Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:27:39.618352Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:27:39.618414Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:27:39.630288Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:27:39.630774Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:27:39.631151Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:27:39.650273Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:27:39.650769Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:27:39.651489Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:27:39.651781Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:27:39.656001Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:27:39.656156Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:27:39.657283Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:27:39.657348Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:27:39.657530Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:27:39.657571Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:27:39.657614Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:27:39.657761Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:27:39.665196Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:245:2155] sender: [1:358:2060] recipient: [1:17:2064] 2025-06-25T14:27:39.807461Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:27:39.807697Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:27:39.807911Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:27:39.807963Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:27:39.808170Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:27:39.808283Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:27:39.813896Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:27:39.814162Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:27:39.814400Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:27:39.814474Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:27:39.814561Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:27:39.814598Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:27:39.817501Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:27:39.817592Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:27:39.817631Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:27:39.821468Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:27:39.821538Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:27:39.821613Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:27:39.821664Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:27:39.825081Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:27:39.828884Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:27:39.829093Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:27:39.830122Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:27:39.830293Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 253 RawX2: 4294969456 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:27:39.830341Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:27:39.830619Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:27:39.830667Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:27:39.830824Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:27:39.830897Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:27:39.833828Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:27:39.833884Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_s ... impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [7:253:2160]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:29:40.192602Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:29:40.192676Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [7:253:2160], Recipient [7:253:2160]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:29:40.192706Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:29:40.598618Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [7:253:2160]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:29:40.598690Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:29:40.598762Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [7:253:2160], Recipient [7:253:2160]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:29:40.598789Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:29:41.007192Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [7:253:2160]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:29:41.007257Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:29:41.007320Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [7:253:2160], Recipient [7:253:2160]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:29:41.007347Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:29:41.395143Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [7:253:2160]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:29:41.395221Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:29:41.395303Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [7:253:2160], Recipient [7:253:2160]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:29:41.395337Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:29:41.798348Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [7:253:2160]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:29:41.798423Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:29:41.798496Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [7:253:2160], Recipient [7:253:2160]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:29:41.798524Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:29:42.183271Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [7:253:2160]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:29:42.183352Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:29:42.183431Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [7:253:2160], Recipient [7:253:2160]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:29:42.183459Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:29:42.539944Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [7:253:2160]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:29:42.540011Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:29:42.540066Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [7:253:2160], Recipient [7:253:2160]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:29:42.540086Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:29:42.927204Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [7:253:2160]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:29:42.927254Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:29:42.927349Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [7:253:2160], Recipient [7:253:2160]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:29:42.927373Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:29:42.961206Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271122945, Sender [7:1098:2845], Recipient [7:253:2160]: NKikimrSchemeOp.TDescribePath Path: "/MyRoot/tmp/TempTable" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true } 2025-06-25T14:29:42.961276Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4967: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2025-06-25T14:29:42.961404Z node 7 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/tmp/TempTable" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-25T14:29:42.961639Z node 7 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/tmp/TempTable" took 227us result status StatusPathDoesNotExist 2025-06-25T14:29:42.961805Z node 7 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/tmp/TempTable\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/tmp/TempTable" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-25T14:29:42.962221Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271122945, Sender [7:1099:2846], Recipient [7:253:2160]: NKikimrSchemeOp.TDescribePath Path: "/MyRoot/tmp" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true } 2025-06-25T14:29:42.962267Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4967: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2025-06-25T14:29:42.962406Z node 7 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/tmp" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-25T14:29:42.962571Z node 7 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/tmp" took 167us result status StatusPathDoesNotExist 2025-06-25T14:29:42.962688Z node 7 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/tmp\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/tmp" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-25T14:29:42.963092Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271122945, Sender [7:1100:2847], Recipient [7:253:2160]: NKikimrSchemeOp.TDescribePath Path: "/MyRoot/tmp/TempTable/ValueIndex" Options { ShowPrivateTable: true } 2025-06-25T14:29:42.963137Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4967: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2025-06-25T14:29:42.963217Z node 7 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/tmp/TempTable/ValueIndex" Options { ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-25T14:29:42.963366Z node 7 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/tmp/TempTable/ValueIndex" took 153us result status StatusPathDoesNotExist 2025-06-25T14:29:42.963489Z node 7 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/tmp/TempTable/ValueIndex\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/tmp/TempTable/ValueIndex" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 >> TTopicReaderTests::TestRun_ReadMoreMessagesThanLimit_Without_Wait_NewlineDelimited [GOOD] >> TTopicReaderTests::TestRun_ReadMoreMessagesThanLimit_Without_Wait_NoDelimiter >> TSchemeShardTest::SplitAlterCopy [GOOD] >> TSchemeShardTest::TopicReserveSize >> KqpLimits::BigParameter [GOOD] >> KqpLimits::AffectedShardsLimit >> KqpExplain::LimitOffset [GOOD] >> KqpExplain::MultiUsedStage >> KqpQuery::QueryCache [GOOD] >> KqpQuery::QueryCacheInvalidate >> TSchemeShardTest::AlterTableCompactionPolicy [GOOD] >> TSchemeShardTest::AlterPersQueueGroup >> TSchemeShardTest::CreateFinishedInDescription [GOOD] >> TSchemeShardTest::CreateBlockStoreVolume >> TSchemeShardTest::CreateTableWithConfig [GOOD] >> TSchemeShardTest::CreateTableWithNamedConfig >> AsyncIndexChangeExchange::ShouldRejectChangesOnQueueOverflowBySize [GOOD] >> AsyncIndexChangeExchange::ShouldNotReorderChangesOnRace >> KqpStats::JoinNoStatsYql >> TSchemeShardTest::CopyTableAndConcurrentChanges [GOOD] >> TSchemeShardTest::CopyTableAndConcurrentSplit >> TSchemeShardTest::BlockStoreVolumeLimits [GOOD] >> TSchemeShardTest::BlockStoreNonreplVolumeLimits >> TSchemeShardTest::DropPQFail [GOOD] >> TSchemeShardTest::ManyDirs >> TSchemeShardTest::CreateBlockStoreVolume [GOOD] >> TSchemeShardTest::CreateBlockStoreVolumeWithVolumeChannelsProfiles >> DataShardSnapshots::MvccSnapshotLockedWritesWithoutConflicts-UseSink [GOOD] >> DataShardSnapshots::MvccSnapshotReadLockedWrites+UseSink >> KqpQuery::UdfTerminate >> KqpTypes::DyNumberCompare [GOOD] >> KqpTypes::SelectNull >> KqpQuery::QueryClientTimeout [GOOD] >> KqpQuery::QueryClientTimeoutPrecompiled >> TSchemeShardTest::TopicReserveSize [GOOD] >> TSchemeShardTest::TopicWithAutopartitioningReserveSize >> TSchemeShardTest::CreateBlockStoreVolumeWithVolumeChannelsProfiles [GOOD] >> TSchemeShardTest::CreateBlockStoreVolumeWithNonReplicatedPartitions >> KqpLimits::DatashardProgramSize-useSink [GOOD] >> KqpLimits::DatashardReplySize >> TSchemeShardTest::CreateTableWithNamedConfig [GOOD] >> TSchemeShardTest::CreateTableWithUnknownNamedConfig >> TSchemeShardTest::BlockStoreNonreplVolumeLimits [GOOD] >> TSchemeShardTest::BlockStoreSystemVolumeLimits >> TSchemeShardTest::CreateBlockStoreVolumeWithNonReplicatedPartitions [GOOD] >> TSchemeShardTest::CreateAlterBlockStoreVolumeWithInvalidPoolKinds >> TSchemeShardTest::CreateTableWithUnknownNamedConfig [GOOD] >> TSchemeShardTest::CreatePersQueueGroup >> TSchemeShardTest::CopyTableAndConcurrentSplit [GOOD] >> TSchemeShardTest::CopyTableAndConcurrentMerge >> KqpStats::RequestUnitForSuccessExplicitPrepare [GOOD] >> KqpStats::RequestUnitForExecute >> TSchemeShardTest::TopicWithAutopartitioningReserveSize [GOOD] >> TSchemeShardTest::BlockStoreSystemVolumeLimits [GOOD] >> TSchemeShardTest::AlterTableWithCompactionStrategies >> TSchemeShardTest::CreateAlterBlockStoreVolumeWithInvalidPoolKinds [GOOD] >> TSchemeShardTest::CreateDropKesus >> TSchemeshardBorrowedCompactionTest::SchemeshardShouldCompactBorrowedBeforeSplit [GOOD] >> TSchemeshardBorrowedCompactionTest::SchemeshardShouldCompactBorrowedAfterSplitMerge |76.6%| [TA] $(B)/ydb/core/tx/schemeshard/ut_background_cleaning/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_base/unittest >> TSchemeShardTest::TopicWithAutopartitioningReserveSize [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:29:32.233656Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:29:32.233759Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:29:32.233801Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:29:32.233833Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:29:32.233874Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:29:32.233903Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:29:32.243443Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:29:32.243607Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:29:32.244388Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:29:32.244781Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:29:32.464992Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:29:32.465047Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:29:32.480326Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:29:32.480702Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:29:32.480847Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:29:32.486311Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:29:32.486602Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:29:32.487214Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:29:32.487444Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:29:32.490591Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:29:32.490765Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:29:32.491824Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:29:32.491884Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:29:32.492038Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:29:32.492085Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:29:32.492121Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:29:32.492193Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:29:32.499916Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:29:32.684799Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:29:32.684987Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:32.685148Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:29:32.685199Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:29:32.685423Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:29:32.685506Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:29:32.693167Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:29:32.693338Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:29:32.693493Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:32.693546Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:29:32.693596Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:29:32.693630Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:29:32.695249Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:32.695312Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:29:32.695352Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:29:32.696866Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:32.696906Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:32.696944Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:29:32.696989Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:29:32.704266Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:29:32.712995Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:29:32.713167Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:29:32.714021Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:29:32.714138Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:29:32.714201Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:29:32.714449Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:29:32.714496Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:29:32.714643Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:29:32.714716Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:29:32.716733Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:29:32.716778Z node 1 :FLAT_TX_SCHEMESHARD ... 075186233409549, partId: 0 2025-06-25T14:29:48.568626Z node 13 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:632: TTxOperationReply execute, operationId: 104:0, at schemeshard: 72057594046678944, message: Origin: 72075186233409549 Status: COMPLETE TxId: 104 Step: 5000005 2025-06-25T14:29:48.568669Z node 13 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_pq.cpp:623: NPQState::TPropose operationId# 104:0 HandleReply TEvProposeTransactionResult triggers early, at schemeshard: 72057594046678944 message# Origin: 72075186233409549 Status: COMPLETE TxId: 104 Step: 5000005 2025-06-25T14:29:48.568705Z node 13 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_pq.cpp:264: CollectPQConfigChanged accept TEvPersQueue::TEvProposeTransactionResult, operationId: 104:0, shardIdx: 72057594046678944:4, shard: 72075186233409549, left await: 0, txState.State: Propose, txState.ReadyForNotifications: 0, at schemeshard: 72057594046678944 2025-06-25T14:29:48.568729Z node 13 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_pq.cpp:628: NPQState::TPropose operationId# 104:0 HandleReply TEvProposeTransactionResult CollectPQConfigChanged: true 2025-06-25T14:29:48.568904Z node 13 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 104:0 128 -> 240 2025-06-25T14:29:48.569129Z node 13 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 9 2025-06-25T14:29:48.576734Z node 13 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-25T14:29:48.576859Z node 13 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-25T14:29:48.576964Z node 13 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-25T14:29:48.577029Z node 13 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-25T14:29:48.577080Z node 13 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-25T14:29:48.577130Z node 13 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-25T14:29:48.577243Z node 13 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:29:48.577268Z node 13 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 104, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-25T14:29:48.577465Z node 13 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:29:48.577502Z node 13 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [13:208:2208], at schemeshard: 72057594046678944, txId: 104, path id: 2 2025-06-25T14:29:48.577724Z node 13 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-25T14:29:48.577764Z node 13 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 104:0 ProgressState 2025-06-25T14:29:48.577936Z node 13 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#104:0 progress is 1/1 2025-06-25T14:29:48.577993Z node 13 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-25T14:29:48.578049Z node 13 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#104:0 progress is 1/1 2025-06-25T14:29:48.578091Z node 13 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-25T14:29:48.578134Z node 13 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 104, ready parts: 1/1, is published: false 2025-06-25T14:29:48.578180Z node 13 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-25T14:29:48.578234Z node 13 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 104:0 2025-06-25T14:29:48.578271Z node 13 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 104:0 2025-06-25T14:29:48.578482Z node 13 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 10 2025-06-25T14:29:48.578544Z node 13 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 104, publications: 1, subscribers: 0 2025-06-25T14:29:48.578595Z node 13 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 104, [OwnerId: 72057594046678944, LocalPathId: 2], 5 2025-06-25T14:29:48.579076Z node 13 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 5 PathOwnerId: 72057594046678944, cookie: 104 2025-06-25T14:29:48.579134Z node 13 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 5 PathOwnerId: 72057594046678944, cookie: 104 2025-06-25T14:29:48.579161Z node 13 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 104 2025-06-25T14:29:48.579212Z node 13 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 104, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 5 2025-06-25T14:29:48.579261Z node 13 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 9 2025-06-25T14:29:48.579361Z node 13 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 104, subscribers: 0 2025-06-25T14:29:48.586220Z node 13 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 TestModificationResult got TxId: 104, wait until txId: 104 TestWaitNotification wait txId: 104 2025-06-25T14:29:48.610220Z node 13 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 104: send EvNotifyTxCompletion 2025-06-25T14:29:48.610283Z node 13 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 104 2025-06-25T14:29:48.610771Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 104, at schemeshard: 72057594046678944 2025-06-25T14:29:48.610881Z node 13 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 104: got EvNotifyTxCompletionResult 2025-06-25T14:29:48.610935Z node 13 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 104: satisfy waiter [13:1455:3252] TestWaitNotification: OK eventTxId 104 2025-06-25T14:29:48.611517Z node 13 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Topic1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:29:48.611728Z node 13 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Topic1" took 248us result status StatusSuccess 2025-06-25T14:29:48.612374Z node 13 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Topic1" PathDescription { Self { Name: "Topic1" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 4 } ChildrenExist: false BalancerTabletID: 72075186233409547 } PersQueueGroup { Name: "Topic1" PathId: 2 TotalGroupCount: 6 PartitionPerTablet: 1 PQTabletConfig { PartitionConfig { LifetimeSeconds: 13 WriteSpeedInBytesPerSecond: 19 } YdbDatabasePath: "/MyRoot" MeteringMode: METERING_MODE_RESERVED_CAPACITY PartitionStrategy { MinPartitionCount: 1 MaxPartitionCount: 7 PartitionStrategyType: CAN_SPLIT_AND_MERGE } } Partitions { PartitionId: 0 TabletId: 72075186233409546 Status: Inactive ChildPartitionIds: 1 ChildPartitionIds: 2 } Partitions { PartitionId: 1 TabletId: 72075186233409548 KeyRange { ToBound: "A" } Status: Inactive ParentPartitionIds: 0 ChildPartitionIds: 3 ChildPartitionIds: 4 } Partitions { PartitionId: 2 TabletId: 72075186233409549 KeyRange { FromBound: "A" } Status: Inactive ParentPartitionIds: 0 ChildPartitionIds: 5 } Partitions { PartitionId: 3 TabletId: 72075186233409550 KeyRange { ToBound: "0" } Status: Active ParentPartitionIds: 1 } Partitions { PartitionId: 4 TabletId: 72075186233409551 KeyRange { FromBound: "0" ToBound: "A" } Status: Inactive ParentPartitionIds: 1 ChildPartitionIds: 5 } Partitions { PartitionId: 5 TabletId: 72075186233409552 KeyRange { FromBound: "0" } Status: Active ParentPartitionIds: 2 ParentPartitionIds: 4 } AlterVersion: 4 BalancerTabletID: 72075186233409547 NextPartitionId: 6 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 7 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 494 AccountSize: 494 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 6 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TSchemeShardTest::ParallelModifying [GOOD] >> TSchemeShardTest::PQGroupExplicitChannels >> Cdc::DropColumn [GOOD] >> Cdc::DropIndex >> Cdc::SupportedTypes [GOOD] >> Cdc::SplitTopicPartition_TopicAutoPartitioning >> TSchemeShardTest::AlterTableWithCompactionStrategies [GOOD] >> TSchemeShardTest::BackupBackupCollection-WithIncremental-false >> TSchemeShardTest::CreateDropKesus [GOOD] >> TSchemeShardTest::CreateAlterKesus >> TSchemeShardTest::CopyTableAndConcurrentMerge [GOOD] >> TSchemeShardTest::CopyTableAndConcurrentSplitMerge >> BasicUsage::WriteAndReadSomeMessagesWithSyncCompression [GOOD] >> BasicUsage::WriteAndReadSomeMessagesWithNoCompression >> TSchemeShardTest::CreatePersQueueGroup [GOOD] >> TSchemeShardTest::CreatePersQueueGroupWithKeySchema >> KqpQuery::QueryCacheInvalidate [GOOD] >> KqpQuery::Pure >> TSchemeShardTest::PQGroupExplicitChannels [GOOD] >> TSchemeShardTest::ReadOnlyMode >> TSchemeShardTest::CreateAlterKesus [GOOD] >> TSchemeShardTest::CreateDropSolomon >> KqpExplain::UpdateOn+UseSink >> KqpExplain::MultiUsedStage [GOOD] >> KqpExplain::MergeConnection >> TSchemeShardTest::AlterPersQueueGroup [GOOD] >> TSchemeShardTest::AlterPersQueueGroupWithKeySchema >> DataShardSnapshots::LockedWriteDistributedCommitCrossConflict+UseSink [GOOD] >> DataShardSnapshots::LockedWriteCleanupOnSplit+UseSink >> ReadSessionImplTest::DataReceivedCallbackReal [GOOD] >> KqpLimits::AffectedShardsLimit [GOOD] >> KqpLimits::CancelAfterRoTx >> KqpExplain::UpdateSecondaryConditionalPrimaryKey-UseSink [GOOD] >> KqpExplain::UpdateSecondaryConditionalSecondaryKey+UseSink >> KqpStats::JoinNoStatsYql [GOOD] >> KqpStats::JoinNoStatsScan >> TSchemeShardTest::BackupBackupCollection-WithIncremental-false [GOOD] >> TSchemeShardTest::BackupBackupCollection-WithIncremental-true >> KqpExplain::UpdateSecondaryConditional-UseSink [GOOD] >> KqpExplain::UpdateOnSecondaryWithoutSecondaryKey+UseSink >> DataShardSnapshots::MvccSnapshotReadLockedWrites+UseSink [GOOD] >> DataShardSnapshots::MvccSnapshotReadLockedWrites-UseSink >> TSchemeShardTest::AlterPersQueueGroupWithKeySchema [GOOD] >> TSchemeShardTest::AlterBlockStoreVolume >> KqpTypes::SelectNull [GOOD] >> KqpTypes::MultipleCurrentUtcTimestamp >> TSchemeShardTest::CopyTableAndConcurrentSplitMerge [GOOD] >> TSchemeShardTest::CopyTableForBackup >> TSchemeShardTest::ReadOnlyMode [GOOD] >> TSchemeShardTest::PathErrors >> TSchemeShardTest::CreatePersQueueGroupWithKeySchema [GOOD] >> TSchemeShardTest::CreateTableWithCompactionStrategies >> KqpQuery::UdfTerminate [GOOD] >> KqpQuery::UdfMemoryLimit ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/persqueue_public/ut/with_offset_ranges_mode_ut/unittest >> ReadSessionImplTest::DataReceivedCallbackReal [GOOD] Test command err: 2025-06-25T14:29:20.533373Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.533396Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.533414Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-25T14:29:20.533760Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-25T14:29:20.544427Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-25T14:29:20.544615Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.545232Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-25T14:29:20.545604Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.545708Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2025-06-25T14:29:20.545798Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-25T14:29:20.545849Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 3 bytes 2025-06-25T14:29:20.546560Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.546582Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.546603Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-25T14:29:20.546915Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-25T14:29:20.547473Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-25T14:29:20.547579Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.547737Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-25T14:29:20.548098Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.549501Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2025-06-25T14:29:20.549594Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-25T14:29:20.549647Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 3 bytes 2025-06-25T14:29:20.551141Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.551159Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.551182Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-25T14:29:20.551494Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-25T14:29:20.552028Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-25T14:29:20.552140Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.552339Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-25T14:29:20.553042Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.553184Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2025-06-25T14:29:20.553280Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-25T14:29:20.553322Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 3 bytes 2025-06-25T14:29:20.554165Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.554186Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.554203Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-25T14:29:20.564530Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-25T14:29:20.565259Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-25T14:29:20.565373Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.568492Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-25T14:29:20.569982Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.572010Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2025-06-25T14:29:20.572272Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-25T14:29:20.572338Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 3 bytes 2025-06-25T14:29:20.573741Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.573761Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.573779Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-25T14:29:20.574041Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-25T14:29:20.574469Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-25T14:29:20.574578Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.580016Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-25T14:29:20.580578Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.581460Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2025-06-25T14:29:20.582129Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-25T14:29:20.582172Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 0 bytes 2025-06-25T14:29:20.583454Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.583472Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.583486Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-25T14:29:20.583828Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-25T14:29:20.584467Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-25T14:29:20.584584Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.584963Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-25T14:29:20.585346Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.585544Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2025-06-25T14:29:20.585609Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-25T14:29:20.585652Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 0 bytes 2025-06-25T14:29:20.586485Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.586523Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.586543Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-25T14:29:20.586837Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-25T14:29:20.587359Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-25T14:29:20.587656Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.587890Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-25T14:29:20.588767Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.588959Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2025-06-25T14:29:20.589055Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-25T14:29:20.589092Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 0 bytes 2025-06-25T14:29:20.589978Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.590049Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.590107Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-25T14:29:20.590359Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-25T14:29:20.604617Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-25T14:29:20.604936Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.605252Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-25T14:29:20.606738Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:20.607152Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2025-06-25T14:29:20.607229Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-25T14:29:20.607263Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 3 bytes 2025-06-25T14:29:20.647311Z :ReadSession INFO: Random seed for debugging is 1750861760647285 2025-06-25T14:29:21.200585Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519894003886835139:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:29:21.200657Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:29:21.340356Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519894006379139748:2163];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:29:21.340410Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;p ... ed/user_1_1_1842903698426148069_v1 grpc read done: success# 1, data# { read { } } 2025-06-25T14:29:40.585680Z node 1 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:1816: session cookie 1 consumer shared/user session shared/user_1_1_1842903698426148069_v1 got read request: guid# 6f35e760-d6c961b2-7e758ef7-fa892a46 GOT MESSAGE: Message { Data: "message3" Partition stream id: 1 Cluster: "dc1". Topic: "test-topic" Partition: 0 PartitionKey: "" Information: { Offset: 2 SeqNo: 3 MessageGroupId: "test-message-group-id" CreateTime: 2025-06-25T14:29:40.572000Z WriteTime: 2025-06-25T14:29:40.574000Z Ip: "ipv6:[::1]:55756" UncompressedSize: 8 Meta: { "logtype": "unknown", "ident": "unknown", "server": "ipv6:[::1]:55756" } } } 2025-06-25T14:29:40.588472Z :DEBUG: [/Root] [/Root] [3c0f5861-4c0e8733-8bf9bd5-ecdfecef] [dc1] Commit offsets [2, 3). Partition stream id: 1 2025-06-25T14:29:40.588726Z :DEBUG: [/Root] [/Root] [3c0f5861-4c0e8733-8bf9bd5-ecdfecef] [dc1] The application data is transferred to the client. Number of messages 1, size 8 bytes 2025-06-25T14:29:40.589061Z node 1 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer shared/user session shared/user_1_1_1842903698426148069_v1 grpc read done: success# 1, data# { commit { offset_ranges { assign_id: 1 start_offset: 2 end_offset: 3 } } } 2025-06-25T14:29:40.589254Z node 1 :PQ_READ_PROXY DEBUG: partition_actor.cpp:192: session cookie 1 consumer shared/user session shared/user_1_1_1842903698426148069_v1 TopicId: Topic rt3.dc1--test-topic in dc dc1 in database: Root, partition 0(assignId:1) committing to position 3 prev 2 end 3 by cookie 4 2025-06-25T14:29:40.589673Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:348: Handle TEvRequest topic: 'rt3.dc1--test-topic' requestId: 2025-06-25T14:29:40.589710Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2812: [PQ: 72075186224037892] got client message batch for topic 'rt3.dc1--test-topic' partition 0 2025-06-25T14:29:40.589836Z node 2 :PERSQUEUE DEBUG: partition.cpp:3346: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Topic 'rt3.dc1--test-topic' partition 0 user user offset is set to 3 (startOffset 0) session shared/user_1_1_1842903698426148069_v1 2025-06-25T14:29:40.589982Z node 2 :PERSQUEUE DEBUG: read.h:272: CacheProxy. Passthrough write request to KV 2025-06-25T14:29:40.593669Z node 2 :PERSQUEUE DEBUG: partition_read.cpp:882: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Topic 'rt3.dc1--test-topic' partition 0 user user readTimeStamp for offset 3 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 0 2025-06-25T14:29:40.593731Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-06-25T14:29:40.593784Z node 2 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72075186224037892, Partition: 0, State: StateIdle] need more data for compaction. cumulativeSize=468, count=3, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-25T14:29:40.593816Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'rt3.dc1--test-topic' partition: 0 messageNo: 0 requestId: cookie: 4 2025-06-25T14:29:40.594075Z node 1 :PQ_READ_PROXY DEBUG: partition_actor.cpp:652: session cookie 1 consumer shared/user session shared/user_1_1_1842903698426148069_v1 TopicId: Topic rt3.dc1--test-topic in dc dc1 in database: Root, partition 0(assignId:1) initDone 1 event { Cookie: 4 } 2025-06-25T14:29:40.594141Z node 1 :PQ_READ_PROXY DEBUG: partition_actor.cpp:950: session cookie 1 consumer shared/user session shared/user_1_1_1842903698426148069_v1 TopicId: Topic rt3.dc1--test-topic in dc dc1 in database: Root, partition 0(assignId:1) commit done to position 3 endOffset 3 with cookie 4 2025-06-25T14:29:40.594193Z node 1 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:696: session cookie 1 consumer shared/user session shared/user_1_1_1842903698426148069_v1 replying for commits: assignId# 1, from# 4, to# 4, offset# 3 2025-06-25T14:29:40.594651Z :DEBUG: [/Root] [/Root] [3c0f5861-4c0e8733-8bf9bd5-ecdfecef] [dc1] Committed response: offset_ranges { assign_id: 1 start_offset: 2 end_offset: 3 } 2025-06-25T14:29:40.676401Z :INFO: [] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|b081fb31-8b7e1ce5-f4ed481-ad84ef36_0] Write session will now close 2025-06-25T14:29:40.676487Z :DEBUG: [] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|b081fb31-8b7e1ce5-f4ed481-ad84ef36_0] Write session: aborting 2025-06-25T14:29:40.677045Z :INFO: [] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|b081fb31-8b7e1ce5-f4ed481-ad84ef36_0] Write session: gracefully shut down, all writes complete 2025-06-25T14:29:40.677097Z :DEBUG: [] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|b081fb31-8b7e1ce5-f4ed481-ad84ef36_0] Write session: destroy 2025-06-25T14:29:40.682183Z node 1 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 4 sessionId: test-message-group-id|b081fb31-8b7e1ce5-f4ed481-ad84ef36_0 grpc read done: success: 0 data: 2025-06-25T14:29:40.682213Z node 1 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 4 sessionId: test-message-group-id|b081fb31-8b7e1ce5-f4ed481-ad84ef36_0 grpc read failed 2025-06-25T14:29:40.682244Z node 1 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 4 sessionId: test-message-group-id|b081fb31-8b7e1ce5-f4ed481-ad84ef36_0 grpc closed 2025-06-25T14:29:40.682264Z node 1 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 4 sessionId: test-message-group-id|b081fb31-8b7e1ce5-f4ed481-ad84ef36_0 is DEAD 2025-06-25T14:29:40.682917Z node 1 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037892 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-25T14:29:40.683311Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037892] server disconnected, pipe [1:7519894085491216455:2540] destroyed 2025-06-25T14:29:40.683404Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::DropOwner. 2025-06-25T14:29:42.544233Z node 2 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72075186224037892, Partition: 0, State: StateIdle] need more data for compaction. cumulativeSize=468, count=3, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-25T14:29:43.027045Z node 1 :PQ_READ_PROXY DEBUG: partition_actor.cpp:1266: session cookie 1 consumer shared/user session shared/user_1_1_1842903698426148069_v1 TopicId: Topic rt3.dc1--test-topic in dc dc1 in database: Root, partition 0(assignId:1) wait data in partition inited, cookie 5 from offset 3 2025-06-25T14:29:47.544612Z node 2 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72075186224037892, Partition: 0, State: StateIdle] need more data for compaction. cumulativeSize=468, count=3, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-25T14:29:50.588441Z node 1 :PQ_READ_PROXY DEBUG: partition_actor.cpp:1266: session cookie 1 consumer shared/user session shared/user_1_1_1842903698426148069_v1 TopicId: Topic rt3.dc1--test-topic in dc dc1 in database: Root, partition 0(assignId:1) wait data in partition inited, cookie 6 from offset 3 2025-06-25T14:29:50.684692Z :INFO: [/Root] [/Root] [3c0f5861-4c0e8733-8bf9bd5-ecdfecef] Closing read session. Close timeout: 0.000000s 2025-06-25T14:29:50.684776Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): dc1:test-topic:0:1:2:3 2025-06-25T14:29:50.684837Z :INFO: [/Root] [/Root] [3c0f5861-4c0e8733-8bf9bd5-ecdfecef] Counters: { Errors: 0 CurrentSessionLifetimeMs: 16764 BytesRead: 24 MessagesRead: 3 BytesReadCompressed: 24 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-25T14:29:50.686177Z :NOTICE: [/Root] [/Root] [3c0f5861-4c0e8733-8bf9bd5-ecdfecef] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Close with zero timeout " } 2025-06-25T14:29:50.686241Z :DEBUG: [/Root] [/Root] [3c0f5861-4c0e8733-8bf9bd5-ecdfecef] [dc1] Abort session to cluster 2025-06-25T14:29:50.687371Z :NOTICE: [/Root] [/Root] [3c0f5861-4c0e8733-8bf9bd5-ecdfecef] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-06-25T14:29:50.687401Z node 1 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer shared/user session shared/user_1_1_1842903698426148069_v1 grpc read done: success# 0, data# { } 2025-06-25T14:29:50.687452Z node 1 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 1 consumer shared/user session shared/user_1_1_1842903698426148069_v1 grpc read failed 2025-06-25T14:29:50.687493Z node 1 :PQ_READ_PROXY INFO: read_session_actor.cpp:92: session cookie 1 consumer shared/user session shared/user_1_1_1842903698426148069_v1 grpc closed 2025-06-25T14:29:50.687544Z node 1 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 1 consumer shared/user session shared/user_1_1_1842903698426148069_v1 is DEAD 2025-06-25T14:29:50.692871Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2452: [PQ: 72075186224037892] Destroy direct read session shared/user_1_1_1842903698426148069_v1 2025-06-25T14:29:50.692919Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037892] server disconnected, pipe [1:7519894059721412245:2470] destroyed 2025-06-25T14:29:50.692969Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: shared/user_1_1_1842903698426148069_v1 2025-06-25T14:29:50.692394Z node 1 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037893][rt3.dc1--test-topic] pipe [1:7519894059721412242:2467] disconnected; active server actors: 1 2025-06-25T14:29:50.700435Z node 1 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037893][rt3.dc1--test-topic] pipe [1:7519894059721412242:2467] client user disconnected session shared/user_1_1_1842903698426148069_v1 2025-06-25T14:29:51.142047Z node 1 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1985: ActorId: [1:7519894132735857176:2619] TxId: 281474976715720. Ctx: { TraceId: 01jykqxfje9ss43naxf9c4edyh, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Njg3NmJkYS03MGYzMjg1My0yNzNjMWE2Ny00MGZhMmVjZg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. UNAVAILABLE: Failed to send EvStartKqpTasksRequest because node is unavailable: 2 2025-06-25T14:29:51.142050Z node 1 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976715719, task: 1, CA Id [1:7519894132735857181:2625]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 0 2025-06-25T14:29:51.142549Z node 1 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [1:7519894132735857183:2619], TxId: 281474976715720, task: 3. Ctx: { SessionId : ydb://session/3?node_id=1&id=Njg3NmJkYS03MGYzMjg1My0yNzNjMWE2Ny00MGZhMmVjZg==. TraceId : 01jykqxfje9ss43naxf9c4edyh. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Handle abort execution event from: [1:7519894132735857176:2619], status: UNAVAILABLE, reason: {
: Error: Terminate execution } 2025-06-25T14:29:51.170492Z node 1 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976715719, task: 1, CA Id [1:7519894132735857181:2625]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 1 2025-06-25T14:29:51.221229Z node 1 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976715719, task: 1, CA Id [1:7519894132735857181:2625]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 1 2025-06-25T14:29:51.294550Z node 1 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976715719, task: 1, CA Id [1:7519894132735857181:2625]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 1 >> TSchemeShardTest::CreateDropSolomon [GOOD] >> TSchemeShardTest::CreateAlterDropSolomon >> Cdc::RacyRebootAndSplitWithTxInflight [GOOD] >> Cdc::RacyActivateAndEnqueue >> PersQueueSdkReadSessionTest::StopResumeReadingData [GOOD] >> ReadSessionImplTest::CreatePartitionStream [GOOD] >> ReadSessionImplTest::BrokenCompressedData [GOOD] >> ReadSessionImplTest::CommitOffsetTwiceIsError [GOOD] >> ReadSessionImplTest::DataReceivedCallback >> TSchemeShardTest::CreateTableWithCompactionStrategies [GOOD] >> TSchemeShardTest::PathErrors [GOOD] >> TSchemeShardTest::NestedDirs >> TSchemeShardTest::CreateWithIntermediateDirs >> TSchemeShardTest::AlterBlockStoreVolume [GOOD] >> TSchemeShardTest::AlterBlockStoreVolumeWithNonReplicatedPartitions >> KqpStats::RequestUnitForExecute [GOOD] >> KqpStats::StatsProfile >> TSchemeShardTest::CreateAlterDropSolomon [GOOD] >> AsyncIndexChangeExchange::ShouldNotReorderChangesOnRace [GOOD] >> Cdc::AreJsonsEqualReturnsTrueOnEqual [GOOD] >> Cdc::AreJsonsEqualReturnsFalseOnDifferent [GOOD] >> Cdc::AreJsonsEqualFailsOnWildcardInArray [GOOD] >> Cdc::AlterViaTopicService >> KqpStats::DeferredEffects+UseSink >> TSchemeShardTest::AlterBlockStoreVolumeWithNonReplicatedPartitions [GOOD] >> TSchemeShardTest::AdoptDropSolomon ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_base/unittest >> TSchemeShardTest::CreateAlterDropSolomon [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:29:32.581121Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:29:32.581227Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:29:32.581382Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:29:32.581415Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:29:32.581467Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:29:32.581495Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:29:32.581552Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:29:32.581621Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:29:32.582371Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:29:32.582733Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:29:32.696464Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:29:32.696523Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:29:32.732830Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:29:32.733224Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:29:32.733367Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:29:32.750117Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:29:32.750430Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:29:32.751017Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:29:32.751255Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:29:32.759005Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:29:32.759188Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:29:32.760200Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:29:32.760252Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:29:32.760408Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:29:32.762865Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:29:32.762944Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:29:32.763029Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:29:32.777085Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:29:32.970342Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:29:32.970583Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:32.970787Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:29:32.970837Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:29:32.971057Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:29:32.971142Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:29:32.978608Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:29:32.978790Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:29:32.978997Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:32.979055Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:29:32.979090Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:29:32.979124Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:29:32.984698Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:32.984776Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:29:32.984832Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:29:32.986497Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:32.986542Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:32.986580Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:29:32.986628Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:29:33.006585Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:29:33.013036Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:29:33.013223Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:29:33.014357Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:29:33.014499Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:29:33.014550Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:29:33.014842Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:29:33.014889Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:29:33.015046Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:29:33.015117Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:29:33.018955Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:29:33.019009Z node 1 :FLAT_TX_SCHEMESHARD ... p:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-25T14:29:54.556588Z node 16 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 103, subscribers: 0 2025-06-25T14:29:54.559521Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:3 hive 72057594037968897 at ss 72057594046678944 2025-06-25T14:29:54.559584Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:1 hive 72057594037968897 at ss 72057594046678944 2025-06-25T14:29:54.559611Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:4 hive 72057594037968897 at ss 72057594046678944 2025-06-25T14:29:54.559637Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:2 hive 72057594037968897 at ss 72057594046678944 2025-06-25T14:29:54.559778Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-25T14:29:54.560035Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-25T14:29:54.560436Z node 16 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 3 TxId_Deprecated: 3 TabletID: 72075186233409548 Forgetting tablet 72075186233409548 2025-06-25T14:29:54.561556Z node 16 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 1 TxId_Deprecated: 1 TabletID: 72075186233409546 2025-06-25T14:29:54.561768Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 3 ShardOwnerId: 72057594046678944 ShardLocalIdx: 3, at schemeshard: 72057594046678944 2025-06-25T14:29:54.562086Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-25T14:29:54.563046Z node 16 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 4 TxId_Deprecated: 4 TabletID: 72075186233409549 2025-06-25T14:29:54.563255Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 1 ShardOwnerId: 72057594046678944 ShardLocalIdx: 1, at schemeshard: 72057594046678944 2025-06-25T14:29:54.563484Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 Forgetting tablet 72075186233409546 2025-06-25T14:29:54.563728Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 4 ShardOwnerId: 72057594046678944 ShardLocalIdx: 4, at schemeshard: 72057594046678944 2025-06-25T14:29:54.563880Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-25T14:29:54.564760Z node 16 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 2 TxId_Deprecated: 2 TabletID: 72075186233409547 Forgetting tablet 72075186233409549 2025-06-25T14:29:54.564959Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046678944 ShardLocalIdx: 2, at schemeshard: 72057594046678944 2025-06-25T14:29:54.565123Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 Forgetting tablet 72075186233409547 2025-06-25T14:29:54.566004Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-25T14:29:54.566070Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-25T14:29:54.566170Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:29:54.568199Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:3 2025-06-25T14:29:54.568283Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:3 tabletId 72075186233409548 2025-06-25T14:29:54.568999Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-06-25T14:29:54.569035Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:1 tabletId 72075186233409546 2025-06-25T14:29:54.570180Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:4 2025-06-25T14:29:54.570220Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:4 tabletId 72075186233409549 2025-06-25T14:29:54.570292Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-06-25T14:29:54.570342Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:2 tabletId 72075186233409547 2025-06-25T14:29:54.570407Z node 16 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 103, wait until txId: 103 TestWaitNotification wait txId: 103 2025-06-25T14:29:54.570857Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 103: send EvNotifyTxCompletion 2025-06-25T14:29:54.570927Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 103 2025-06-25T14:29:54.571448Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 103, at schemeshard: 72057594046678944 2025-06-25T14:29:54.571567Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-06-25T14:29:54.571626Z node 16 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [16:536:2488] TestWaitNotification: OK eventTxId 103 2025-06-25T14:29:54.572262Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Solomon" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:29:54.572543Z node 16 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Solomon" took 326us result status StatusPathDoesNotExist 2025-06-25T14:29:54.572743Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/Solomon\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/Solomon" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 wait until 72075186233409546 is deleted wait until 72075186233409547 is deleted wait until 72075186233409548 is deleted wait until 72075186233409549 is deleted 2025-06-25T14:29:54.573247Z node 16 :HIVE INFO: tablet_helpers.cpp:1476: [72057594037968897] TEvSubscribeToTabletDeletion, 72075186233409546 2025-06-25T14:29:54.573313Z node 16 :HIVE INFO: tablet_helpers.cpp:1476: [72057594037968897] TEvSubscribeToTabletDeletion, 72075186233409547 2025-06-25T14:29:54.573358Z node 16 :HIVE INFO: tablet_helpers.cpp:1476: [72057594037968897] TEvSubscribeToTabletDeletion, 72075186233409548 2025-06-25T14:29:54.573414Z node 16 :HIVE INFO: tablet_helpers.cpp:1476: [72057594037968897] TEvSubscribeToTabletDeletion, 72075186233409549 Deleted tabletId 72075186233409546 Deleted tabletId 72075186233409547 Deleted tabletId 72075186233409548 Deleted tabletId 72075186233409549 2025-06-25T14:29:54.573926Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:29:54.574160Z node 16 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 252us result status StatusSuccess 2025-06-25T14:29:54.574694Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> ReadSessionImplTest::DataReceivedCallback [GOOD] >> ReadSessionImplTest::CommonHandler [GOOD] >> TSchemeShardTest::BackupBackupCollection-WithIncremental-true [GOOD] >> TSchemeShardTest::CreateWithIntermediateDirs [GOOD] >> TSchemeShardTest::CreateTopicOverDiskSpaceQuotas >> TSchemeShardTest::NestedDirs [GOOD] >> TSchemeShardTest::NewOwnerOnDatabase >> KqpQuery::QueryClientTimeoutPrecompiled [GOOD] >> KqpQuery::QueryExplain >> TColumnShardTestReadWrite::CompactionInGranule_PKUInt64_Reboot [GOOD] >> TSchemeShardTest::AdoptDropSolomon [GOOD] >> TSchemeShardTest::AlterTableAndAfterSplit >> TSchemeShardTest::CreateTopicOverDiskSpaceQuotas [GOOD] >> TSchemeShardTest::CreateSystemColumn ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_base/unittest >> TSchemeShardTest::BackupBackupCollection-WithIncremental-true [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:29:30.839668Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:29:30.839758Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:29:30.839798Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:29:30.839854Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:29:30.839890Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:29:30.839926Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:29:30.839979Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:29:30.840046Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:29:30.840768Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:29:30.841098Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:29:30.929277Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:29:30.929328Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:29:30.936913Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:29:30.937125Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:29:30.937306Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:29:30.944139Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:29:30.944439Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:29:30.945119Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:29:30.945305Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:29:30.947871Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:29:30.948018Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:29:30.949140Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:29:30.949199Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:29:30.949370Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:29:30.949427Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:29:30.949469Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:29:30.949572Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:29:30.957914Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:243:2058] recipient: [1:15:2062] 2025-06-25T14:29:31.094100Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:29:31.094306Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:31.094493Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:29:31.094557Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:29:31.094761Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:29:31.094830Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:29:31.101074Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:29:31.101261Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:29:31.101470Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:31.101523Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:29:31.101559Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:29:31.101592Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:29:31.107404Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:31.107469Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:29:31.107512Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:29:31.109510Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:31.109573Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:31.109612Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:29:31.109656Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:29:31.117686Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:29:31.119652Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:29:31.119855Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:29:31.120796Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:29:31.120929Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:29:31.120969Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:29:31.121227Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:29:31.121273Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:29:31.121427Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:29:31.121515Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:29:31.123468Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:29:31.123540Z node 1 :FLAT_TX_SCHEMESHARD ... "" EffectiveACL: "" PathVersion: 8 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 8 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 7 } ChildrenExist: true } Children { Name: "DirB" PathId: 30 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 109 CreateStep: 5000010 ParentPathId: 29 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: true } Children { Name: "Table2" PathId: 32 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 109 CreateStep: 5000010 ParentPathId: 29 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 32 PathsLimit: 10000 ShardsInside: 18 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 3 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 29 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:29:55.555664Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/.backups/collections/MyCollection1/19700101000000Z_incremental/DirA/Table2" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:29:55.555932Z node 16 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/.backups/collections/MyCollection1/19700101000000Z_incremental/DirA/Table2" took 300us result status StatusSuccess 2025-06-25T14:29:55.556295Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/.backups/collections/MyCollection1/19700101000000Z_incremental/DirA/Table2" PathDescription { Self { Name: "Table2" PathId: 32 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 109 CreateStep: 5000010 ParentPathId: 29 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table2" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value0" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "value1" Type: "Utf8" TypeId: 4608 Id: 3 NotNull: false IsBuildInProgress: false } Columns { Name: "__ydb_incrBackupImpl_deleted" Type: "Bool" TypeId: 6 Id: 4 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 1 IsBackup: false ReplicationConfig { Mode: REPLICATION_MODE_READ_ONLY ConsistencyLevel: CONSISTENCY_LEVEL_ROW } IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 32 PathsLimit: 10000 ShardsInside: 18 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 3 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } UserAttributes { Key: "__incremental_backup" Value: "{}" } UserAttributes { Key: "__async_replica" Value: "true" } } PathId: 32 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:29:55.557469Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/.backups/collections/MyCollection1/19700101000000Z_incremental/DirA/DirB" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:29:55.557684Z node 16 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/.backups/collections/MyCollection1/19700101000000Z_incremental/DirA/DirB" took 240us result status StatusSuccess 2025-06-25T14:29:55.558070Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/.backups/collections/MyCollection1/19700101000000Z_incremental/DirA/DirB" PathDescription { Self { Name: "DirB" PathId: 30 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 109 CreateStep: 5000010 ParentPathId: 29 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 4 } ChildrenExist: true } Children { Name: "Table3" PathId: 33 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 109 CreateStep: 5000010 ParentPathId: 30 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 32 PathsLimit: 10000 ShardsInside: 18 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 3 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 30 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:29:55.558955Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/.backups/collections/MyCollection1/19700101000000Z_incremental/DirA/DirB/Table3" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:29:55.559223Z node 16 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/.backups/collections/MyCollection1/19700101000000Z_incremental/DirA/DirB/Table3" took 300us result status StatusSuccess 2025-06-25T14:29:55.559590Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/.backups/collections/MyCollection1/19700101000000Z_incremental/DirA/DirB/Table3" PathDescription { Self { Name: "Table3" PathId: 33 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 109 CreateStep: 5000010 ParentPathId: 30 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table3" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value0" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "__ydb_incrBackupImpl_deleted" Type: "Bool" TypeId: 6 Id: 3 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 1 IsBackup: false ReplicationConfig { Mode: REPLICATION_MODE_READ_ONLY ConsistencyLevel: CONSISTENCY_LEVEL_ROW } IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 32 PathsLimit: 10000 ShardsInside: 18 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 3 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } UserAttributes { Key: "__incremental_backup" Value: "{}" } UserAttributes { Key: "__async_replica" Value: "true" } } PathId: 33 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/persqueue_public/ut/with_offset_ranges_mode_ut/unittest >> ReadSessionImplTest::CommonHandler [GOOD] Test command err: 2025-06-25T14:29:21.195734Z :SpecifyClustersExplicitly INFO: Random seed for debugging is 1750861761195701 2025-06-25T14:29:21.889871Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519894004425379249:2169];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:29:21.890270Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:29:21.998854Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519894006139342630:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:29:22.003861Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:29:22.216540Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-25T14:29:22.234006Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017c3/r3tmp/tmpOgw7Yw/pdisk_1.dat 2025-06-25T14:29:22.531433Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:29:22.531524Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:29:22.553609Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:29:22.554764Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:29:22.554818Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:29:22.582445Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T14:29:22.582574Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:29:22.586637Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 10235, node 1 2025-06-25T14:29:22.796941Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/yft8/0017c3/r3tmp/yandexOo91Sv.tmp 2025-06-25T14:29:22.796968Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/yft8/0017c3/r3tmp/yandexOo91Sv.tmp 2025-06-25T14:29:22.797117Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/yft8/0017c3/r3tmp/yandexOo91Sv.tmp 2025-06-25T14:29:22.797238Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:29:22.893968Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:29:22.928537Z INFO: TTestServer started on Port 4389 GrpcPort 10235 2025-06-25T14:29:23.024551Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:4389 PQClient connected to localhost:10235 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:29:23.281334Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... waiting... waiting... 2025-06-25T14:29:23.398697Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715660, at schemeshard: 72057594046644480 2025-06-25T14:29:25.843919Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519894023319212084:2273], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:25.844495Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519894023319212071:2270], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:25.844604Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:25.844643Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894021605249292:2299], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:25.844740Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:25.848585Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894021605249296:2302], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:25.858163Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976720657:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:29:25.860453Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894021605249328:2305], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:25.860527Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:25.867718Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519894021605249307:2617] txid# 281474976715661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateCreate)" severity: 1 } 2025-06-25T14:29:25.916278Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519894021605249306:2303], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976720657 completed, doublechecking } 2025-06-25T14:29:25.919618Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519894023319212100:2274], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976720657 completed, doublechecking } 2025-06-25T14:29:25.977783Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519894021605249394:2680] txid# 281474976715662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:29:26.032857Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519894027614179424:2132] txid# 281474976720658, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:29:26.121647Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:26.126769Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519894021605249413:2310], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:29:26.127084Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=NTZiYzMzNzAtOWUzNzUwZDUtYzVjNjIwMGUtNTBkM2QwNzM=, ActorId: [1:7519894021605249287:2297], ActorState: ExecuteState, TraceId: 01jykqwq4yfaf1rg3pq75f7r5a, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:29:26.127436Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 c ... 2_v1 grpc closed 2025-06-25T14:29:52.217342Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 1 consumer shared/user session shared/user_3_1_6454060720168013782_v1 is DEAD 2025-06-25T14:29:52.217872Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2452: [PQ: 72075186224037892] Destroy direct read session shared/user_3_1_6454060720168013782_v1 2025-06-25T14:29:52.217926Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037892] server disconnected, pipe [3:7519894129442564835:2501] destroyed 2025-06-25T14:29:52.217979Z node 4 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: shared/user_3_1_6454060720168013782_v1 2025-06-25T14:29:52.218068Z node 3 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037893][rt3.dc1--test-topic] pipe [3:7519894129442564832:2498] disconnected; active server actors: 1 2025-06-25T14:29:52.218097Z node 3 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037893][rt3.dc1--test-topic] pipe [3:7519894129442564832:2498] client user disconnected session shared/user_3_1_6454060720168013782_v1 2025-06-25T14:29:52.681229Z node 3 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1985: ActorId: [3:7519894138032499606:2530] TxId: 281474976720698. Ctx: { TraceId: 01jykqxh2k2dn064d0xe7a25s0, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=ZDE2MTZiYzktNWFiMGZmNTAtNjIxNWNjZTQtYmFiMGM1MGQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. UNAVAILABLE: Failed to send EvStartKqpTasksRequest because node is unavailable: 4 2025-06-25T14:29:52.681380Z node 3 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [3:7519894138032499610:2530], TxId: 281474976720698, task: 3. Ctx: { TraceId : 01jykqxh2k2dn064d0xe7a25s0. SessionId : ydb://session/3?node_id=3&id=ZDE2MTZiYzktNWFiMGZmNTAtNjIxNWNjZTQtYmFiMGM1MGQ=. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Handle abort execution event from: [3:7519894138032499606:2530], status: UNAVAILABLE, reason: {
: Error: Terminate execution } 2025-06-25T14:29:52.748784Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7382: Cannot get console configs 2025-06-25T14:29:52.748817Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:29:52.923620Z node 3 :KQP_EXECUTER WARN: kqp_shards_resolver.cpp:86: [ShardsResolver] TxId: 281474976720699. Failed to resolve tablet: 72075186224037890 after several retries. 2025-06-25T14:29:52.923768Z node 3 :KQP_EXECUTER WARN: kqp_executer_impl.h:265: ActorId: [3:7519894138032499615:2537] TxId: 281474976720699. Ctx: { TraceId: 01jykqxhes3myzn0y1cpkxa8n9, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YWVkYzIyZGQtZWU2MTM4YzAtODgyYjJiMi1kYzc2ZWVmOA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Shards nodes resolve failed, status: UNAVAILABLE, issues:
: Error: Failed to resolve tablet: 72075186224037890 after several retries. 2025-06-25T14:29:52.924063Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=3&id=YWVkYzIyZGQtZWU2MTM4YzAtODgyYjJiMi1kYzc2ZWVmOA==, ActorId: [3:7519894138032499612:2537], ActorState: ExecuteState, TraceId: 01jykqxhes3myzn0y1cpkxa8n9, Create QueryResponse for error on request, msg: 2025-06-25T14:29:52.925568Z node 3 :PQ_METACACHE ERROR: msgbus_server_pq_metacache.cpp:260: Got error trying to perform request: { Response { QueryIssues { message: "Failed to resolve tablet: 72075186224037890 after several retries." severity: 1 } TxMeta { id: "01jykqxhew1gt7kmb5sgga5trx" } } YdbStatus: UNAVAILABLE ConsumedRu: 1 } 2025-06-25T14:29:53.809851Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:53.809898Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:53.809951Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-25T14:29:53.810324Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-25T14:29:53.816298Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-25T14:29:53.816649Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:53.817395Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: 13. Commit offset: 31 2025-06-25T14:29:53.820926Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:53.820962Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:53.820995Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-25T14:29:53.821489Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-25T14:29:53.821933Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-25T14:29:53.822086Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:53.822351Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-25T14:29:53.823566Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function 2025-06-25T14:29:53.824280Z :INFO: Error decompressing data: (TZLibDecompressorError) util/stream/zlib.cpp:143: inflate error(incorrect header check) 2025-06-25T14:29:53.824398Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-3) 2025-06-25T14:29:53.824599Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-25T14:29:53.824644Z :DEBUG: Take Data. Partition 1. Read: {0, 1} (2-2) 2025-06-25T14:29:53.824665Z :DEBUG: Take Data. Partition 1. Read: {0, 2} (3-3) 2025-06-25T14:29:53.824721Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 3, size 16 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { DataDecompressionError: "(TZLibDecompressorError) util/stream/zlib.cpp:143: inflate error(incorrect header check)" Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 1 SeqNo: 1 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:00:00.042000Z Ip: "::1" UncompressedSize: 0 Meta: { } } } Message { Data: ..8 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 2 SeqNo: 1 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:00:00.042000Z Ip: "::1" UncompressedSize: 0 Meta: { } } } Message { Data: ..8 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 3 SeqNo: 1 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:00:00.042000Z Ip: "::1" UncompressedSize: 0 Meta: { } } } } 2025-06-25T14:29:53.828418Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:53.828451Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:53.829779Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-25T14:29:53.830185Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-25T14:29:53.830717Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-25T14:29:53.830832Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:53.831064Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-25T14:29:53.831856Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:53.832046Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2025-06-25T14:29:53.832176Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-25T14:29:53.832244Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 8 bytes 2025-06-25T14:29:53.833819Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [0, 2). Partition stream id: 1 2025-06-25T14:29:53.835880Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:53.835922Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:53.835966Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-25T14:29:53.836331Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-25T14:29:53.836738Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-25T14:29:53.836861Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:53.840686Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-25T14:29:53.841514Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function 2025-06-25T14:29:53.842124Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function 2025-06-25T14:29:53.842314Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (2-2) 2025-06-25T14:29:53.842383Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2025-06-25T14:29:53.842435Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-25T14:29:53.842478Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (2-2) 2025-06-25T14:29:53.842632Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 8 bytes 2025-06-25T14:29:53.842673Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 8 bytes 2025-06-25T14:29:55.843757Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:55.843787Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:55.843820Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-25T14:29:55.867323Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-25T14:29:55.867780Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-25T14:29:55.868079Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:55.876793Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:55.876982Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2025-06-25T14:29:55.877072Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-25T14:29:55.877172Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 8 bytes >> KqpStats::MultiTxStatsFullYql >> TTopicReaderTests::TestRun_ReadTwoMessages_With_Limit_1 [GOOD] >> TTopicReaderTests::TestRun_Read_Less_Messages_Than_Sent >> KqpExplain::UpdateOn+UseSink [GOOD] >> KqpExplain::UpdateOn-UseSink >> KqpQuery::Pure [GOOD] >> TSchemeShardTest::NewOwnerOnDatabase [GOOD] >> TSchemeShardTest::PreserveColumnOrder >> TSchemeShardCheckProposeSize::CopyTables [GOOD] >> TSchemeShardDecimalTypesInTables::Parameterless >> KqpExplain::MergeConnection [GOOD] >> KqpExplain::IdxFullscan >> TSchemeShardTest::CreateSystemColumn [GOOD] >> DataShardSnapshots::LockedWriteCleanupOnSplit+UseSink [GOOD] >> DataShardSnapshots::LockedWriteCleanupOnSplit-UseSink >> TSchemeshardBackgroundCompactionTest::ShouldNotCompactServerlessAfterDisable [GOOD] >> TSchemeShardTest::PreserveColumnOrder [GOOD] >> Cdc::SplitTopicPartition_TopicAutoPartitioning [GOOD] >> Cdc::ShouldDeliverChangesOnSplitMerge >> KqpQuery::QueryTimeout ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpQuery::Pure [GOOD] Test command err: Trying to start YDB, gRPC: 21600, MsgBus: 11611 2025-06-25T14:29:31.949560Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519894049806069602:2170];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:29:31.961639Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001508/r3tmp/tmpsHKr3g/pdisk_1.dat 2025-06-25T14:29:32.705898Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:29:32.706016Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:29:32.709900Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:29:32.757863Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 21600, node 1 2025-06-25T14:29:32.908418Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:29:33.008834Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:29:33.008883Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:29:33.008891Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:29:33.008993Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11611 TClient is connected to server localhost:11611 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:29:34.175378Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:29:34.205213Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:29:34.218856Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:34.573006Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:34.749728Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:34.875163Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:36.613965Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894071280907572:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:36.614101Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:36.876083Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:36.917360Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:36.955319Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519894049806069602:2170];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:29:36.955539Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:29:36.960265Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:36.999321Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:37.030428Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:37.090519Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:37.153648Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:37.236624Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894075575875528:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:37.236707Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:37.236914Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894075575875533:2436], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:37.240544Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:29:37.258489Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519894075575875535:2437], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:29:37.316206Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519894075575875587:3422] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:29:38.526504Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:171) Trying to start YDB, gRPC: 9672, MsgBus: 62130 2025-06-25T14:29:39.687567Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fl ... r_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:29:50.368838Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:171) Trying to start YDB, gRPC: 22447, MsgBus: 22615 2025-06-25T14:29:51.693950Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519894133037359798:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:29:51.694019Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001508/r3tmp/tmpUVt78L/pdisk_1.dat 2025-06-25T14:29:51.811063Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 22447, node 4 2025-06-25T14:29:51.848209Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:29:51.848290Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:29:51.849134Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:29:51.863802Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:29:51.863829Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:29:51.863837Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:29:51.863951Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:22615 TClient is connected to server localhost:22615 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:29:52.399281Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:29:52.403641Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:29:52.416323Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:52.493018Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:52.687525Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:52.727080Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:29:52.752867Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:55.007532Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519894145922263281:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:55.007621Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:55.058323Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:55.087684Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:55.116362Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:55.183320Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:55.211115Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:55.239196Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:55.310955Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:55.403818Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519894150217231236:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:55.403913Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:55.406972Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519894150217231241:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:55.410791Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:29:55.421148Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519894150217231243:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:29:55.477528Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519894150217231294:3415] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:29:56.694035Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519894133037359798:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:29:56.694093Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> TSchemeShardTest::AlterTableAndAfterSplit [GOOD] >> TSchemeShardTest::AlterIndexTableDirectly >> DataShardSnapshots::MvccSnapshotReadLockedWrites-UseSink [GOOD] >> DataShardSnapshots::ReadIteratorLocalSnapshotThenRestart >> Cdc::DropIndex [GOOD] >> Cdc::DisableStream >> TSchemeShardDecimalTypesInTables::Parameterless [GOOD] >> TSchemeShardDecimalTypesInTables::Parameters_22_9-EnableParameterizedDecimal-false >> KqpQuery::YqlSyntaxV0 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_base/unittest >> TSchemeShardTest::CreateSystemColumn [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:29:31.136250Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:29:31.136365Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:29:31.136403Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:29:31.136438Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:29:31.136478Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:29:31.136504Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:29:31.136550Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:29:31.136621Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:29:31.137284Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:29:31.137592Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:29:31.233708Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:29:31.233778Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:29:31.253937Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:29:31.254152Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:29:31.254309Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:29:31.275086Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:29:31.275402Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:29:31.276083Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:29:31.276299Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:29:31.279338Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:29:31.279496Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:29:31.280578Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:29:31.280639Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:29:31.280786Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:29:31.280825Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:29:31.280875Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:29:31.280970Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:29:31.290805Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:243:2058] recipient: [1:15:2062] 2025-06-25T14:29:31.444919Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:29:31.445154Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:31.445352Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:29:31.445395Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:29:31.445651Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:29:31.445726Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:29:31.449813Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:29:31.450041Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:29:31.450285Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:31.450347Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:29:31.450407Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:29:31.450452Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:29:31.452948Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:31.453025Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:29:31.453100Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:29:31.456128Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:31.456204Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:31.456269Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:29:31.456345Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:29:31.460292Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:29:31.462523Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:29:31.462735Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:29:31.463909Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:29:31.464058Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:29:31.464122Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:29:31.464436Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:29:31.464506Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:29:31.464783Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:29:31.464876Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:29:31.467532Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:29:31.467592Z node 1 :FLAT_TX_SCHEMESHARD ... ached as only one child to the parent, parent id: [OwnerId: 72057594046678944, LocalPathId: 1], parent name: MyRoot, child name: SystemColumnInCopyAllowed, child id: [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-06-25T14:29:57.741755Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 0 2025-06-25T14:29:57.741806Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction source path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-25T14:29:57.741873Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 103:0 type: TxCopyTable target path: [OwnerId: 72057594046678944, LocalPathId: 3] source path: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-25T14:29:57.741949Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason new shard created for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-25T14:29:57.742130Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-25T14:29:57.742348Z node 15 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 103:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:29:57.743112Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-25T14:29:57.743192Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-25T14:29:57.750146Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 103, response: Status: StatusAccepted TxId: 103 SchemeshardId: 72057594046678944 PathId: 3, at schemeshard: 72057594046678944 2025-06-25T14:29:57.750534Z node 15 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 103, database: /MyRoot, subject: , status: StatusAccepted, operation: CREATE TABLE, path: /MyRoot/SystemColumnInCopyAllowed 2025-06-25T14:29:57.750872Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:29:57.750933Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 103, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:29:57.751175Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 103, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-25T14:29:57.751306Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:29:57.751383Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [15:210:2210], at schemeshard: 72057594046678944, txId: 103, path id: 1 2025-06-25T14:29:57.751462Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [15:210:2210], at schemeshard: 72057594046678944, txId: 103, path id: 3 2025-06-25T14:29:57.752070Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-25T14:29:57.752157Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 103:0 ProgressState, operation type: TxCopyTable, at tablet# 72057594046678944 2025-06-25T14:29:57.752487Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:359: TCreateParts opId# 103:0 CreateRequest Event to Hive: 72057594037968897 msg: Owner: 72057594046678944 OwnerIdx: 2 TabletType: DataShard ObjectDomain { SchemeShard: 72057594046678944 PathId: 1 } ObjectId: 3 BindedChannels { StoragePoolName: "pool-1" } BindedChannels { StoragePoolName: "pool-1" } BindedChannels { StoragePoolName: "pool-1" } AllowedDomains { SchemeShard: 72057594046678944 PathId: 1 } 2025-06-25T14:29:57.753606Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 6 PathOwnerId: 72057594046678944, cookie: 103 2025-06-25T14:29:57.753749Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 6 PathOwnerId: 72057594046678944, cookie: 103 2025-06-25T14:29:57.753809Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 103 2025-06-25T14:29:57.753873Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 6 2025-06-25T14:29:57.753943Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-25T14:29:57.754817Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 1 PathOwnerId: 72057594046678944, cookie: 103 2025-06-25T14:29:57.754915Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 1 PathOwnerId: 72057594046678944, cookie: 103 2025-06-25T14:29:57.754944Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 103 2025-06-25T14:29:57.754990Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 1 2025-06-25T14:29:57.755031Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-06-25T14:29:57.755100Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 103, ready parts: 0/1, is published: true 2025-06-25T14:29:57.757443Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 103:0 from tablet: 72057594046678944 to tablet: 72057594037968897 cookie: 72057594046678944:2 msg type: 268697601 2025-06-25T14:29:57.757629Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 103, partId: 0, tablet: 72057594037968897 2025-06-25T14:29:57.757696Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1806: TOperation RegisterRelationByShardIdx, TxId: 103, shardIdx: 72057594046678944:2, partId: 0 2025-06-25T14:29:57.758100Z node 15 :HIVE INFO: tablet_helpers.cpp:1181: [72057594037968897] TEvCreateTablet, msg: Owner: 72057594046678944 OwnerIdx: 2 TabletType: DataShard ObjectDomain { SchemeShard: 72057594046678944 PathId: 1 } ObjectId: 3 BindedChannels { StoragePoolName: "pool-1" } BindedChannels { StoragePoolName: "pool-1" } BindedChannels { StoragePoolName: "pool-1" } AllowedDomains { SchemeShard: 72057594046678944 PathId: 1 } 2025-06-25T14:29:57.758390Z node 15 :HIVE INFO: tablet_helpers.cpp:1245: [72057594037968897] TEvCreateTablet, Owner 72057594046678944, OwnerIdx 2, type DataShard, boot OK, tablet id 72075186233409547 2025-06-25T14:29:57.758590Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5951: Handle TEvCreateTabletReply at schemeshard: 72057594046678944 message: Status: OK Owner: 72057594046678944 OwnerIdx: 2 TabletID: 72075186233409547 Origin: 72057594037968897 2025-06-25T14:29:57.758664Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1820: TOperation FindRelatedPartByShardIdx, TxId: 103, shardIdx: 72057594046678944:2, partId: 0 2025-06-25T14:29:57.758828Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:632: TTxOperationReply execute, operationId: 103:0, at schemeshard: 72057594046678944, message: Status: OK Owner: 72057594046678944 OwnerIdx: 2 TabletID: 72075186233409547 Origin: 72057594037968897 2025-06-25T14:29:57.758905Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:177: TCreateParts opId# 103:0 HandleReply TEvCreateTabletReply, at tabletId: 72057594046678944 2025-06-25T14:29:57.759007Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:180: TCreateParts opId# 103:0 HandleReply TEvCreateTabletReply, message: Status: OK Owner: 72057594046678944 OwnerIdx: 2 TabletID: 72075186233409547 Origin: 72057594037968897 2025-06-25T14:29:57.759126Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 103:0 2 -> 3 2025-06-25T14:29:57.760367Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-25T14:29:57.760571Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-25T14:29:57.766018Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-25T14:29:57.766413Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-25T14:29:57.766528Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_copy_table.cpp:71: TCopyTable TConfigureParts operationId# 103:0 ProgressState at tablet# 72057594046678944 2025-06-25T14:29:57.766648Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_copy_table.cpp:103: TCopyTable TConfigureParts operationId# 103:0 Propose modify scheme on dstDatashard# 72075186233409547 idx# 72057594046678944:2 srcDatashard# 72075186233409546 idx# 72057594046678944:1 operationId# 103:0 seqNo# 2:2 at tablet# 72057594046678944 2025-06-25T14:29:57.771611Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 103:0 from tablet: 72057594046678944 to tablet: 72075186233409547 cookie: 72057594046678944:2 msg type: 269549568 2025-06-25T14:29:57.771789Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 103:0 from tablet: 72057594046678944 to tablet: 72075186233409546 cookie: 72057594046678944:1 msg type: 269549568 2025-06-25T14:29:57.771888Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 103, partId: 0, tablet: 72075186233409547 2025-06-25T14:29:57.771919Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 103, partId: 0, tablet: 72075186233409546 TestModificationResult got TxId: 103, wait until txId: 103 >> KqpTypes::MultipleCurrentUtcTimestamp [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_compaction/unittest >> TSchemeshardBackgroundCompactionTest::ShouldNotCompactServerlessAfterDisable [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:28:42.874027Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:28:42.874113Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:28:42.874144Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:28:42.874178Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:28:42.874220Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:28:42.874243Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:28:42.874304Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:28:42.874382Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:28:42.875066Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:28:42.875422Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:28:42.950563Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:28:42.950625Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:28:42.967030Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:28:42.967430Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:28:42.967573Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:28:42.972808Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:28:42.973147Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:28:42.973752Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:28:42.974004Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:28:42.977050Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:28:42.977209Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:28:42.978260Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:28:42.978312Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:28:42.978438Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:28:42.978479Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:28:42.978531Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:28:42.978619Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:28:42.985336Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:28:43.112803Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:28:43.113007Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:43.113227Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:28:43.113284Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:28:43.113513Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:28:43.113590Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:28:43.115799Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:28:43.115982Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:28:43.116159Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:43.116221Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:28:43.116256Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:28:43.116289Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:28:43.118233Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:43.118282Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:28:43.118331Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:28:43.119928Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:43.119976Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:43.120017Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:28:43.120085Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:28:43.123358Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:28:43.125050Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:28:43.125228Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:28:43.126159Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:28:43.126296Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:28:43.126350Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:28:43.126637Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:28:43.126694Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:28:43.126861Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:28:43.126930Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:28:43.128816Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:28:43.128854Z node 1 :FLAT_TX_SCHEMESHARD ... 4:29:57.736394Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5015: StateWork, processing event TEvPrivate::TEvRunConditionalErase 2025-06-25T14:29:57.736425Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6716: Handle: TEvRunConditionalErase, at schemeshard: 72075186233409546 2025-06-25T14:29:57.736501Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:56: TTxRunConditionalErase DoExecute: at schemeshard: 72075186233409546 2025-06-25T14:29:57.736570Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:189: TTxRunConditionalErase DoComplete: at schemeshard: 72075186233409546 2025-06-25T14:29:57.836378Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435073, Sender [0:0:0], Recipient [3:769:2653]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvCleanupTransaction 2025-06-25T14:29:57.836467Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3158: StateWork, processing event TEvPrivate::TEvCleanupTransaction 2025-06-25T14:29:57.836563Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:214: No cleanup at 72075186233409552 outdated step 200 last cleanup 0 2025-06-25T14:29:57.836632Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186233409552 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:29:57.836667Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186233409552 2025-06-25T14:29:57.836698Z node 3 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186233409552 has no attached operations 2025-06-25T14:29:57.836726Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186233409552 2025-06-25T14:29:57.836863Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435073, Sender [0:0:0], Recipient [3:773:2656]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvCleanupTransaction 2025-06-25T14:29:57.836894Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3158: StateWork, processing event TEvPrivate::TEvCleanupTransaction 2025-06-25T14:29:57.836945Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:214: No cleanup at 72075186233409553 outdated step 200 last cleanup 0 2025-06-25T14:29:57.836990Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186233409553 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:29:57.837014Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186233409553 2025-06-25T14:29:57.837053Z node 3 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186233409553 has no attached operations 2025-06-25T14:29:57.837073Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186233409553 2025-06-25T14:29:57.837157Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435079, Sender [0:0:0], Recipient [3:769:2653]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvPeriodicWakeup 2025-06-25T14:29:57.837286Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3441: TEvPeriodicTableStats from datashard 72075186233409552, FollowerId 0, tableId 2 2025-06-25T14:29:57.837360Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435079, Sender [0:0:0], Recipient [3:773:2656]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvPeriodicWakeup 2025-06-25T14:29:57.837450Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3441: TEvPeriodicTableStats from datashard 72075186233409553, FollowerId 0, tableId 2 2025-06-25T14:29:57.837773Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269553162, Sender [3:769:2653], Recipient [3:900:2756]: NKikimrTxDataShard.TEvPeriodicTableStats DatashardId: 72075186233409552 TableLocalId: 2 Generation: 2 Round: 10 TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 InMemSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 0 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 28 Memory: 119352 } ShardState: 2 UserTablePartOwners: 72075186233409552 NodeId: 3 StartTime: 120 TableOwnerId: 72075186233409549 FollowerId: 0 2025-06-25T14:29:57.837814Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4992: StateWork, processing event TEvDataShard::TEvPeriodicTableStats 2025-06-25T14:29:57.837868Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72075186233409549 from shard 72075186233409552 followerId 0 pathId [OwnerId: 72075186233409549, LocalPathId: 2] state 'Ready' dataSize 0 rowCount 0 cpuUsage 0.0028 2025-06-25T14:29:57.837968Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:570: Got periodic table stats at tablet 72075186233409549 from shard 72075186233409552 followerId 0 pathId [OwnerId: 72075186233409549, LocalPathId: 2] raw table stats: DataSize: 0 RowCount: 0 IndexSize: 0 InMemSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 0 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 2025-06-25T14:29:57.838002Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:610: Will delay TTxStoreTableStats on# 0.100000s, queue# 1 2025-06-25T14:29:57.838196Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269553162, Sender [3:773:2656], Recipient [3:900:2756]: NKikimrTxDataShard.TEvPeriodicTableStats DatashardId: 72075186233409553 TableLocalId: 2 Generation: 2 Round: 10 TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 InMemSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 0 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 15 Memory: 119352 } ShardState: 2 UserTablePartOwners: 72075186233409553 NodeId: 3 StartTime: 120 TableOwnerId: 72075186233409549 FollowerId: 0 2025-06-25T14:29:57.838229Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4992: StateWork, processing event TEvDataShard::TEvPeriodicTableStats 2025-06-25T14:29:57.838252Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72075186233409549 from shard 72075186233409553 followerId 0 pathId [OwnerId: 72075186233409549, LocalPathId: 2] state 'Ready' dataSize 0 rowCount 0 cpuUsage 0.0015 2025-06-25T14:29:57.838306Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:570: Got periodic table stats at tablet 72075186233409549 from shard 72075186233409553 followerId 0 pathId [OwnerId: 72075186233409549, LocalPathId: 2] raw table stats: DataSize: 0 RowCount: 0 IndexSize: 0 InMemSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 0 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 2025-06-25T14:29:57.850521Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:900:2756]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:29:57.850601Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:29:57.850678Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [3:900:2756], Recipient [3:900:2756]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:29:57.850706Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:29:57.861333Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 2146435096, Sender [0:0:0], Recipient [3:900:2756]: NKikimr::NSchemeShard::TEvPrivate::TEvSendBaseStatsToSA 2025-06-25T14:29:57.861410Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5140: StateWork, processing event TEvPrivate::TEvSendBaseStatsToSA 2025-06-25T14:29:57.861645Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 2146435076, Sender [0:0:0], Recipient [3:900:2756]: NKikimr::NSchemeShard::TEvPrivate::TEvRunConditionalErase 2025-06-25T14:29:57.861680Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5015: StateWork, processing event TEvPrivate::TEvRunConditionalErase 2025-06-25T14:29:57.861730Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6716: Handle: TEvRunConditionalErase, at schemeshard: 72075186233409549 2025-06-25T14:29:57.861822Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:56: TTxRunConditionalErase DoExecute: at schemeshard: 72075186233409549 2025-06-25T14:29:57.861895Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:189: TTxRunConditionalErase DoComplete: at schemeshard: 72075186233409549 2025-06-25T14:29:57.862082Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269746180, Sender [3:2016:3832], Recipient [3:900:2756]: NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult 2025-06-25T14:29:57.862127Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5139: StateWork, processing event TEvTxProxySchemeCache::TEvNavigateKeySetResult 2025-06-25T14:29:57.883604Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [3:2019:3835], Recipient [3:769:2653]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:29:57.883689Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T14:29:57.883750Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186233409552, clientId# [3:2018:3834], serverId# [3:2019:3835], sessionId# [0:0:0] 2025-06-25T14:29:57.883979Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553213, Sender [3:2017:3833], Recipient [3:769:2653]: NKikimrTxDataShard.TEvGetCompactTableStats PathId { OwnerId: 72075186233409549 LocalId: 2 } 2025-06-25T14:29:57.884688Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [3:2022:3838], Recipient [3:773:2656]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:29:57.884725Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T14:29:57.884757Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186233409553, clientId# [3:2021:3837], serverId# [3:2022:3838], sessionId# [0:0:0] 2025-06-25T14:29:57.884869Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553213, Sender [3:2020:3836], Recipient [3:773:2656]: NKikimrTxDataShard.TEvGetCompactTableStats PathId { OwnerId: 72075186233409549 LocalId: 2 } ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_base/unittest >> TSchemeShardTest::PreserveColumnOrder [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:29:30.411984Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:29:30.412086Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:29:30.412134Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:29:30.412183Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:29:30.412244Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:29:30.412274Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:29:30.412437Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:29:30.412535Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:29:30.413254Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:29:30.413652Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:29:30.520576Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:29:30.520651Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:29:30.568660Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:29:30.569197Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:29:30.569377Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:29:30.580592Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:29:30.581008Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:29:30.581615Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:29:30.581883Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:29:30.591632Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:29:30.591863Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:29:30.593123Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:29:30.593205Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:29:30.593367Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:29:30.593423Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:29:30.593463Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:29:30.593547Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:29:30.609185Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:29:30.815032Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:29:30.815292Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:30.815528Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:29:30.815581Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:29:30.815840Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:29:30.815928Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:29:30.818406Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:29:30.818588Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:29:30.818815Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:30.818868Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:29:30.818907Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:29:30.818944Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:29:30.821089Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:30.821168Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:29:30.821223Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:29:30.823079Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:30.823119Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:30.823167Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:29:30.823215Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:29:30.826638Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:29:30.828684Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:29:30.828865Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:29:30.829902Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:29:30.830019Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:29:30.830070Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:29:30.830357Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:29:30.830409Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:29:30.830568Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:29:30.830633Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:29:30.832987Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:29:30.833031Z node 1 :FLAT_TX_SCHEMESHARD ... 09546 Status: COMPLETE TxId: 101 Step: 5000002 OrderId: 101 ExecLatency: 0 ProposeLatency: 3 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 1937 } } CommitVersion { Step: 5000002 TxId: 101 } 2025-06-25T14:29:58.185011Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_part.cpp:109: HandleReply TEvDataShard::TEvProposeTransactionResult Ignore message: tablet# 72057594046678944, ev# TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 101 Step: 5000002 OrderId: 101 ExecLatency: 0 ProposeLatency: 3 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 1937 } } CommitVersion { Step: 5000002 TxId: 101 } 2025-06-25T14:29:58.189235Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5596: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 313 RawX2: 64424511738 } Origin: 72075186233409546 State: 2 TxId: 101 Step: 0 Generation: 2 2025-06-25T14:29:58.189323Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1791: TOperation FindRelatedPartByTabletId, TxId: 101, tablet: 72075186233409546, partId: 0 2025-06-25T14:29:58.189573Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:632: TTxOperationReply execute, operationId: 101:0, at schemeshard: 72057594046678944, message: Source { RawX1: 313 RawX2: 64424511738 } Origin: 72075186233409546 State: 2 TxId: 101 Step: 0 Generation: 2 2025-06-25T14:29:58.189696Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1047: NTableState::TProposedWaitParts operationId# 101:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 2025-06-25T14:29:58.189880Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1051: NTableState::TProposedWaitParts operationId# 101:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 313 RawX2: 64424511738 } Origin: 72075186233409546 State: 2 TxId: 101 Step: 0 Generation: 2 2025-06-25T14:29:58.190006Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:670: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 101:0, shardIdx: 72057594046678944:1, shard: 72075186233409546, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-25T14:29:58.190085Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:707: all shard schema changes has been received, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:29:58.190160Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:719: send schema changes ack message, operation: 101:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-06-25T14:29:58.190227Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 101:0 129 -> 240 2025-06-25T14:29:58.191562Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-25T14:29:58.193807Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-25T14:29:58.193949Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:29:58.194082Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:29:58.194472Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:29:58.194530Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 101:0 ProgressState 2025-06-25T14:29:58.194760Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#101:0 progress is 1/1 2025-06-25T14:29:58.194820Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-25T14:29:58.194889Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#101:0 progress is 1/1 2025-06-25T14:29:58.194940Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-25T14:29:58.195008Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 101, ready parts: 1/1, is published: true 2025-06-25T14:29:58.195124Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1656: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [15:341:2318] message: TxId: 101 2025-06-25T14:29:58.195214Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-25T14:29:58.195285Z node 15 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 101:0 2025-06-25T14:29:58.195346Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 101:0 2025-06-25T14:29:58.195495Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-25T14:29:58.200615Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-25T14:29:58.200701Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [15:342:2319] TestWaitNotification: OK eventTxId 101 2025-06-25T14:29:58.201442Z node 15 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:29:58.201893Z node 15 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table" took 479us result status StatusSuccess 2025-06-25T14:29:58.202549Z node 15 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table" PathDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table" Columns { Name: "col01" Type: "Utf8" TypeId: 4608 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "col02" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "col03" Type: "Utf8" TypeId: 4608 Id: 3 NotNull: false IsBuildInProgress: false } Columns { Name: "col04" Type: "Utf8" TypeId: 4608 Id: 4 NotNull: false IsBuildInProgress: false } Columns { Name: "col05" Type: "Utf8" TypeId: 4608 Id: 5 NotNull: false IsBuildInProgress: false } Columns { Name: "col06" Type: "Utf8" TypeId: 4608 Id: 6 NotNull: false IsBuildInProgress: false } Columns { Name: "col07" Type: "Utf8" TypeId: 4608 Id: 7 NotNull: false IsBuildInProgress: false } Columns { Name: "col08" Type: "Utf8" TypeId: 4608 Id: 8 NotNull: false IsBuildInProgress: false } Columns { Name: "col09" Type: "Utf8" TypeId: 4608 Id: 9 NotNull: false IsBuildInProgress: false } Columns { Name: "col10" Type: "Utf8" TypeId: 4608 Id: 10 NotNull: false IsBuildInProgress: false } Columns { Name: "col11" Type: "Utf8" TypeId: 4608 Id: 11 NotNull: false IsBuildInProgress: false } Columns { Name: "col12" Type: "Utf8" TypeId: 4608 Id: 12 NotNull: false IsBuildInProgress: false } Columns { Name: "col13" Type: "Utf8" TypeId: 4608 Id: 13 NotNull: false IsBuildInProgress: false } Columns { Name: "col14" Type: "Utf8" TypeId: 4608 Id: 14 NotNull: false IsBuildInProgress: false } Columns { Name: "col15" Type: "Utf8" TypeId: 4608 Id: 15 NotNull: false IsBuildInProgress: false } Columns { Name: "col16" Type: "Utf8" TypeId: 4608 Id: 16 NotNull: false IsBuildInProgress: false } Columns { Name: "col17" Type: "Utf8" TypeId: 4608 Id: 17 NotNull: false IsBuildInProgress: false } Columns { Name: "col18" Type: "Utf8" TypeId: 4608 Id: 18 NotNull: false IsBuildInProgress: false } Columns { Name: "col19" Type: "Utf8" TypeId: 4608 Id: 19 NotNull: false IsBuildInProgress: false } Columns { Name: "col20" Type: "Utf8" TypeId: 4608 Id: 20 NotNull: false IsBuildInProgress: false } Columns { Name: "col21" Type: "Utf8" TypeId: 4608 Id: 21 NotNull: false IsBuildInProgress: false } Columns { Name: "col22" Type: "Utf8" TypeId: 4608 Id: 22 NotNull: false IsBuildInProgress: false } Columns { Name: "col23" Type: "Utf8" TypeId: 4608 Id: 23 NotNull: false IsBuildInProgress: false } Columns { Name: "col24" Type: "Utf8" TypeId: 4608 Id: 24 NotNull: false IsBuildInProgress: false } Columns { Name: "col25" Type: "Utf8" TypeId: 4608 Id: 25 NotNull: false IsBuildInProgress: false } Columns { Name: "col26" Type: "Utf8" TypeId: 4608 Id: 26 NotNull: false IsBuildInProgress: false } Columns { Name: "col27" Type: "Utf8" TypeId: 4608 Id: 27 NotNull: false IsBuildInProgress: false } Columns { Name: "col28" Type: "Utf8" TypeId: 4608 Id: 28 NotNull: false IsBuildInProgress: false } Columns { Name: "col29" Type: "Utf8" TypeId: 4608 Id: 29 NotNull: false IsBuildInProgress: false } KeyColumnNames: "col01" KeyColumnIds: 1 TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 1 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TTopicReaderTests::TestRun_ReadMoreMessagesThanLimit_Without_Wait_NoDelimiter [GOOD] >> TTopicReaderTests::TestRun_ReadMessages_Output_Base64 >> TSchemeShardDecimalTypesInTables::Parameters_22_9-EnableParameterizedDecimal-false [GOOD] >> TSchemeShardDecimalTypesInTables::Parameters_22_9-EnableParameterizedDecimal-true >> Cdc::AlterViaTopicService [GOOD] >> Cdc::Alter >> ConvertMiniKQLValueToYdbValueTest::SimpleBool >> ConvertMiniKQLValueToYdbValueTest::SimpleBool [GOOD] >> ConvertMiniKQLValueToYdbValueTest::OptionalString [GOOD] >> ConvertMiniKQLValueToYdbValueTest::OptionalEmpty [GOOD] >> ConvertMiniKQLValueToYdbValueTest::OptionalOptionalEmpty [GOOD] >> ConvertMiniKQLValueToYdbValueTest::OptionalOptionalEmpty2 [GOOD] >> ConvertMiniKQLValueToYdbValueTest::List [GOOD] >> ConvertMiniKQLValueToYdbValueTest::Dict [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpTypes::MultipleCurrentUtcTimestamp [GOOD] Test command err: Trying to start YDB, gRPC: 20512, MsgBus: 23343 2025-06-25T14:29:32.860915Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519894053472261975:2223];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:29:32.861276Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0014dc/r3tmp/tmpEoeEL4/pdisk_1.dat 2025-06-25T14:29:33.557125Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:29:33.557224Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:29:33.572674Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519894053472261790:2080] 1750861772790741 != 1750861772790744 2025-06-25T14:29:33.604496Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:29:33.606115Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 20512, node 1 2025-06-25T14:29:33.861501Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:29:33.896982Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:29:33.897004Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:29:33.897022Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:29:33.897148Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23343 TClient is connected to server localhost:23343 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:29:34.958673Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:29:35.013289Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:29:35.229868Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:35.483724Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:35.595858Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:37.583603Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894074947099920:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:37.583730Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:37.850782Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519894053472261975:2223];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:29:37.850846Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:29:38.029869Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:38.079055Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:38.175047Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:38.239560Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:38.305795Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:38.367553Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:38.444370Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:38.516489Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894079242067888:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:38.516565Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:38.516926Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894079242067893:2437], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:38.522449Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:29:38.533146Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519894079242067895:2438], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:29:38.613940Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519894079242067947:3432] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 11868, MsgBus: 63729 2025-06-25T14:29:40.932988Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519894087546143840:2244];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:29:40.957185Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPa ... 644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:29:51.814348Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519894114586807201:2142];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:29:51.814427Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 30341, MsgBus: 65118 2025-06-25T14:29:53.187219Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519894142130498350:2133];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:29:53.190301Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0014dc/r3tmp/tmpnSWuqy/pdisk_1.dat 2025-06-25T14:29:53.342490Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:29:53.343361Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7519894142130498254:2080] 1750861793177854 != 1750861793177857 TServer::EnableGrpc on GrpcPort 30341, node 4 2025-06-25T14:29:53.360873Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:29:53.360956Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:29:53.363371Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:29:53.428957Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:29:53.428987Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:29:53.428996Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:29:53.429135Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:65118 TClient is connected to server localhost:65118 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:29:53.988823Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:29:54.002469Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:54.068633Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:54.190336Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:29:54.236790Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:54.320080Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:56.515400Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519894155015401781:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:56.515477Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:56.569919Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:56.639709Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:56.705946Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:56.742979Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:56.775214Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:56.847111Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:56.879071Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:56.960903Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519894155015402444:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:56.960984Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:56.961015Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519894155015402449:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:56.964511Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:29:56.973281Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519894155015402451:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:29:57.039787Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519894159310369798:3419] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:29:58.182219Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519894142130498350:2133];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:29:58.182314Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> KqpStats::JoinNoStatsScan [GOOD] >> KqpStats::JoinStatsBasicScan |76.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/ydb_convert/ut/unittest >> ConvertMiniKQLValueToYdbValueTest::Dict [GOOD] >> KqpExplain::UpdateOnSecondaryWithoutSecondaryKey+UseSink [GOOD] >> KqpExplain::UpdateOnSecondaryWithoutSecondaryKey-UseSink >> KqpStats::StatsProfile [GOOD] >> KqpStats::SelfJoin >> TSchemeShardDecimalTypesInTables::Parameters_22_9-EnableParameterizedDecimal-true [GOOD] >> TSchemeShardDecimalTypesInTables::Parameters_35_6-EnableParameterizedDecimal-false >> TestDataErasure::SimpleTestForTopic >> TestDataErasure::DataErasureWithSplit >> TSchemeshardBackgroundCompactionTest::SchemeshardShouldNotCompactBorrowed [GOOD] >> TestDataErasure::SchemeShardCounterDoesNotConsistWithBscCounter >> TSchemeshardBackgroundCompactionTest::SchemeshardShouldHandleCompactionTimeouts >> KqpExplain::UpdateSecondaryConditionalSecondaryKey+UseSink [GOOD] >> KqpExplain::UpdateSecondaryConditionalSecondaryKey-UseSink >> KqpLimits::DatashardReplySize [GOOD] >> KqpLimits::DataShardReplySizeExceeded >> KqpStats::DeferredEffects+UseSink [GOOD] >> KqpStats::DataQueryWithEffects+UseSink >> TSchemeShardDecimalTypesInTables::Parameters_35_6-EnableParameterizedDecimal-false [GOOD] >> TSchemeShardDecimalTypesInTables::Parameters_35_6-EnableParameterizedDecimal-true >> TSchemeshardBackgroundCompactionTest::SchemeshardShouldNotRequestCompactionsAfterDisable [GOOD] >> KqpQuery::QueryExplain [GOOD] >> KqpQuery::QueryFromSqs >> KqpQuery::UdfMemoryLimit [GOOD] >> KqpQuery::TryToUpdateNonExistentColumn >> TestDataErasure::SimpleTestForAllSupportedObjects >> TestDataErasure::SimpleTestForTables >> Cdc::RacyActivateAndEnqueue [GOOD] >> Cdc::RacyCreateAndSend >> KqpStats::MultiTxStatsFullYql [GOOD] >> KqpStats::MultiTxStatsFullScan >> KqpExplain::UpdateOn-UseSink [GOOD] >> KqpExplain::UpdateOnSecondary+UseSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_compaction/unittest >> TSchemeshardBackgroundCompactionTest::SchemeshardShouldNotRequestCompactionsAfterDisable [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:28:43.442363Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:28:43.442453Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:28:43.442498Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:28:43.442531Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:28:43.442574Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:28:43.442602Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:28:43.442667Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:28:43.442751Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:28:43.443515Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:28:43.443922Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:28:43.514598Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:28:43.514659Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:28:43.529409Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:28:43.529740Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:28:43.529888Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:28:43.534788Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:28:43.535161Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:28:43.535888Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:28:43.536156Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:28:43.539923Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:28:43.540054Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:28:43.540898Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:28:43.540936Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:28:43.541020Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:28:43.541053Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:28:43.541083Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:28:43.541137Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:28:43.546299Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:28:43.682170Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:28:43.682420Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:43.682661Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:28:43.682709Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:28:43.682989Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:28:43.683073Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:28:43.685675Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:28:43.685914Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:28:43.686137Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:43.686211Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:28:43.686263Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:28:43.686308Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:28:43.688584Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:43.688664Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:28:43.688710Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:28:43.690739Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:43.690798Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:43.690857Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:28:43.690923Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:28:43.694882Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:28:43.697394Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:28:43.697615Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:28:43.698602Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:28:43.698750Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:28:43.698811Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:28:43.699179Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:28:43.699235Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:28:43.699407Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:28:43.699503Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:28:43.702970Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:28:43.703032Z node 1 :FLAT_TX_SCHEMESHARD ... ard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186233409546 2025-06-25T14:30:01.185567Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435079, Sender [0:0:0], Recipient [3:326:2308]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvPeriodicWakeup 2025-06-25T14:30:01.185701Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3441: TEvPeriodicTableStats from datashard 72075186233409546, FollowerId 0, tableId 2 2025-06-25T14:30:01.186066Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269553162, Sender [3:326:2308], Recipient [3:129:2153]: NKikimrTxDataShard.TEvPeriodicTableStats DatashardId: 72075186233409546 TableLocalId: 2 Generation: 2 Round: 10 TableStats { DataSize: 13940 RowCount: 100 IndexSize: 102 InMemSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 SearchHeight: 1 LastFullCompactionTs: 29 HasLoanedParts: false Channels { Channel: 1 DataSize: 13940 IndexSize: 102 } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 31 Memory: 124232 Storage: 14156 } ShardState: 2 UserTablePartOwners: 72075186233409546 NodeId: 3 StartTime: 42 TableOwnerId: 72057594046678944 FollowerId: 0 2025-06-25T14:30:01.186109Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4992: StateWork, processing event TEvDataShard::TEvPeriodicTableStats 2025-06-25T14:30:01.186163Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046678944 from shard 72075186233409546 followerId 0 pathId [OwnerId: 72057594046678944, LocalPathId: 2] state 'Ready' dataSize 13940 rowCount 100 cpuUsage 0.0031 2025-06-25T14:30:01.186266Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:570: Got periodic table stats at tablet 72057594046678944 from shard 72075186233409546 followerId 0 pathId [OwnerId: 72057594046678944, LocalPathId: 2] raw table stats: DataSize: 13940 RowCount: 100 IndexSize: 102 InMemSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 SearchHeight: 1 LastFullCompactionTs: 29 HasLoanedParts: false Channels { Channel: 1 DataSize: 13940 IndexSize: 102 } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 2025-06-25T14:30:01.186313Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:610: Will delay TTxStoreTableStats on# 0.100000s, queue# 1 2025-06-25T14:30:01.196803Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435073, Sender [0:0:0], Recipient [3:329:2309]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvCleanupTransaction 2025-06-25T14:30:01.196871Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3158: StateWork, processing event TEvPrivate::TEvCleanupTransaction 2025-06-25T14:30:01.196947Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:214: No cleanup at 72075186233409547 outdated step 5000002 last cleanup 0 2025-06-25T14:30:01.197007Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186233409547 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:30:01.197055Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186233409547 2025-06-25T14:30:01.197087Z node 3 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186233409547 has no attached operations 2025-06-25T14:30:01.197116Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186233409547 2025-06-25T14:30:01.197245Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435079, Sender [0:0:0], Recipient [3:329:2309]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvPeriodicWakeup 2025-06-25T14:30:01.197370Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3441: TEvPeriodicTableStats from datashard 72075186233409547, FollowerId 0, tableId 2 2025-06-25T14:30:01.197709Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269553162, Sender [3:329:2309], Recipient [3:129:2153]: NKikimrTxDataShard.TEvPeriodicTableStats DatashardId: 72075186233409547 TableLocalId: 2 Generation: 2 Round: 10 TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 InMemSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 0 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 25 Memory: 119352 } ShardState: 2 UserTablePartOwners: 72075186233409547 NodeId: 3 StartTime: 42 TableOwnerId: 72057594046678944 FollowerId: 0 2025-06-25T14:30:01.197747Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4992: StateWork, processing event TEvDataShard::TEvPeriodicTableStats 2025-06-25T14:30:01.197790Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046678944 from shard 72075186233409547 followerId 0 pathId [OwnerId: 72057594046678944, LocalPathId: 2] state 'Ready' dataSize 0 rowCount 0 cpuUsage 0.0025 2025-06-25T14:30:01.197896Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:570: Got periodic table stats at tablet 72057594046678944 from shard 72075186233409547 followerId 0 pathId [OwnerId: 72057594046678944, LocalPathId: 2] raw table stats: DataSize: 0 RowCount: 0 IndexSize: 0 InMemSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 0 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 2025-06-25T14:30:01.244684Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [3:129:2153]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-06-25T14:30:01.244765Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5131: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-06-25T14:30:01.244796Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046678944, queue size# 2 2025-06-25T14:30:01.244927Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:601: Will execute TTxStoreStats, queue# 2 2025-06-25T14:30:01.244962Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:610: Will delay TTxStoreTableStats on# 0.000000s, queue# 2 2025-06-25T14:30:01.245078Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 2 shard idx 72057594046678944:1 data size 13940 row count 100 2025-06-25T14:30:01.245153Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186233409546 maps to shardIdx: 72057594046678944:1 followerId=0, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], pathId map=Simple, is column=0, is olap=0, RowCount 100, DataSize 13940 2025-06-25T14:30:01.245187Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186233409546, followerId 0 2025-06-25T14:30:01.245268Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:219: [BackgroundCompaction] [Update] Updated shard# 72057594046678944:1 with partCount# 1, rowCount# 100, searchHeight# 1, lastFullCompaction# 1970-01-01T00:00:29.000000Z at schemeshard 72057594046678944 2025-06-25T14:30:01.245354Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:477: Do not want to split tablet 72075186233409546 by size, its table already has 2 out of 2 partitions 2025-06-25T14:30:01.245432Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 2 shard idx 72057594046678944:2 data size 0 row count 0 2025-06-25T14:30:01.245480Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186233409547 maps to shardIdx: 72057594046678944:2 followerId=0, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], pathId map=Simple, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-25T14:30:01.245502Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186233409547, followerId 0 2025-06-25T14:30:01.245557Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:219: [BackgroundCompaction] [Update] Updated shard# 72057594046678944:2 with partCount# 0, rowCount# 0, searchHeight# 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046678944 2025-06-25T14:30:01.245594Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:477: Do not want to split tablet 72075186233409547 by size, its table already has 2 out of 2 partitions 2025-06-25T14:30:01.245671Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-25T14:30:01.256180Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [3:129:2153]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-06-25T14:30:01.256259Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5131: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-06-25T14:30:01.256291Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046678944, queue size# 0 2025-06-25T14:30:01.289752Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [3:1334:3251], Recipient [3:326:2308]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:30:01.289828Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T14:30:01.289876Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186233409546, clientId# [3:1333:3250], serverId# [3:1334:3251], sessionId# [0:0:0] 2025-06-25T14:30:01.290077Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553213, Sender [3:1332:3249], Recipient [3:326:2308]: NKikimrTxDataShard.TEvGetCompactTableStats PathId { OwnerId: 72057594046678944 LocalId: 2 } 2025-06-25T14:30:01.291977Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [3:1337:3254], Recipient [3:329:2309]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:30:01.292018Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T14:30:01.292046Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186233409547, clientId# [3:1336:3253], serverId# [3:1337:3254], sessionId# [0:0:0] 2025-06-25T14:30:01.292163Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553213, Sender [3:1335:3252], Recipient [3:329:2309]: NKikimrTxDataShard.TEvGetCompactTableStats PathId { OwnerId: 72057594046678944 LocalId: 2 } >> TSchemeShardDecimalTypesInTables::Parameters_35_6-EnableParameterizedDecimal-true [GOOD] >> TSchemeShardDecimalTypesInTables::CopyTableShouldNotFailOnDisabledFeatureFlag >> KqpQuery::QueryTimeout [GOOD] >> KqpQuery::QueryResultsTruncated >> TSchemeShardDecimalTypesInTables::CopyTableShouldNotFailOnDisabledFeatureFlag [GOOD] >> TSchemeShardDecimalTypesInTables::CreateWithWrongParameters >> TControlPlaneProxyShouldPassHids::ShouldCheckScenario [GOOD] >> DataShardSnapshots::LockedWriteCleanupOnSplit-UseSink [GOOD] >> DataShardSnapshots::LockedWriteCleanupOnCopyTable+UseSink >> TControlPlaneProxyTest::ShouldSendCreateQuery >> TestDataErasure::DataErasureManualLaunch |76.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_backup/ydb-core-tx-schemeshard-ut_backup |76.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_backup/ydb-core-tx-schemeshard-ut_backup |76.7%| [TA] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_balancing/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpExplain::IdxFullscan [GOOD] >> KqpQuery::YqlSyntaxV0 [GOOD] >> DataShardSnapshots::ReadIteratorLocalSnapshotThenRestart [GOOD] >> KqpExplain::MultiJoinCteLinks >> TSchemeShardDecimalTypesInTables::CreateWithWrongParameters [GOOD] >> KqpQuery::YqlTableSample ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::CompactionInGranule_PKUInt64_Reboot [GOOD] Test command err: 2025-06-25T14:27:41.329228Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:99;event=initialize_shard;step=OnActivateExecutor; 2025-06-25T14:27:41.359529Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:117;event=initialize_shard;step=initialize_tiring_finished; 2025-06-25T14:27:41.359772Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-25T14:27:41.391701Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:27:41.391954Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:27:41.392210Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:27:41.398034Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:27:41.398296Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:27:41.398421Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:27:41.398583Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:27:41.398731Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:27:41.398879Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:27:41.399006Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:27:41.399121Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:27:41.432885Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-25T14:27:41.433050Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-25T14:27:41.433102Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-25T14:27:41.433277Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T14:27:41.433458Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-25T14:27:41.433545Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-25T14:27:41.433591Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-25T14:27:41.433677Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-25T14:27:41.433734Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-25T14:27:41.433776Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-25T14:27:41.433828Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-25T14:27:41.434004Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T14:27:41.434060Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-25T14:27:41.434097Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-25T14:27:41.434124Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-25T14:27:41.434237Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-25T14:27:41.434291Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-25T14:27:41.434327Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-25T14:27:41.434355Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-25T14:27:41.434399Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-25T14:27:41.434433Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-25T14:27:41.434462Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-25T14:27:41.434667Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-25T14:27:41.434718Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-25T14:27:41.434773Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-25T14:27:41.434943Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-25T14:27:41.434991Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-25T14:27:41.435018Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-25T14:27:41.435163Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-25T14:27:41.435206Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-25T14:27:41.435237Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-25T14:27:41.435320Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-25T14:27:41.435379Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-25T14:27:41.435415Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-25T14:27:41.435458Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-25T14:27:41.435679Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=50; 2025-06-25T14:27:41.435764Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=42; 2025-06-25T14:27:41.435836Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=36; 2025-06-25T14:27:41.435914Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=36; 2025-06-25T14:27:41.436001Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-25T14:27:41.436071Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-25T14:27:41.436125Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-25T14:27:41.436183Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: table ... tions;fline=constructor_meta.cpp:71;memory_size=26214;data_size=26188;sum=13173016;count=14328;size_of_meta=136; 2025-06-25T14:29:54.634120Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;load_stage_name=EXECUTE:granule/portions;fline=constructor_portion.cpp:40;memory_size=26286;data_size=26260;sum=13688824;count=7164;size_of_portion=208; 2025-06-25T14:29:54.635128Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;EXECUTE:portionsLoadingTime=85377; 2025-06-25T14:29:54.635206Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;PRECHARGE:granule_finished_commonLoadingTime=11; 2025-06-25T14:29:54.637127Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;EXECUTE:granule_finished_commonLoadingTime=1865; 2025-06-25T14:29:54.637174Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;fline=common_data.cpp:29;EXECUTE:granuleLoadingTime=87550; 2025-06-25T14:29:54.637216Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:granulesLoadingTime=87655; 2025-06-25T14:29:54.637274Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;PRECHARGE:finishLoadingTime=10; 2025-06-25T14:29:54.638149Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:finishLoadingTime=829; 2025-06-25T14:29:54.638192Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:column_enginesLoadingTime=88999; 2025-06-25T14:29:54.638333Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tx_controllerLoadingTime=94; 2025-06-25T14:29:54.638441Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tx_controllerLoadingTime=66; 2025-06-25T14:29:54.638788Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:operations_managerLoadingTime=307; 2025-06-25T14:29:54.639039Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:operations_managerLoadingTime=209; 2025-06-25T14:29:54.663568Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:storages_managerLoadingTime=24456; 2025-06-25T14:29:54.696296Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:storages_managerLoadingTime=32606; 2025-06-25T14:29:54.696466Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:db_locksLoadingTime=15; 2025-06-25T14:29:54.696528Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:db_locksLoadingTime=10; 2025-06-25T14:29:54.696586Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:bg_sessionsLoadingTime=6; 2025-06-25T14:29:54.696673Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:bg_sessionsLoadingTime=49; 2025-06-25T14:29:54.696719Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:sharing_sessionsLoadingTime=8; 2025-06-25T14:29:54.696820Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:sharing_sessionsLoadingTime=60; 2025-06-25T14:29:54.696869Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:in_flight_readsLoadingTime=8; 2025-06-25T14:29:54.696941Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:in_flight_readsLoadingTime=33; 2025-06-25T14:29:54.697025Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tiers_managerLoadingTime=50; 2025-06-25T14:29:54.697101Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tiers_managerLoadingTime=42; 2025-06-25T14:29:54.697134Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;fline=common_data.cpp:29;EXECUTE:composite_initLoadingTime=154143; 2025-06-25T14:29:54.697278Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Index: tables 1 inserted {blob_bytes=0;raw_bytes=0;count=0;records=0} compacted {blob_bytes=0;raw_bytes=0;count=0;records=0} s-compacted {blob_bytes=22538992;raw_bytes=22128150;count=3;records=225200} inactive {blob_bytes=147791880;raw_bytes=143975050;count=221;records=1575200} evicted {blob_bytes=0;raw_bytes=0;count=0;records=0} at tablet 9437184 2025-06-25T14:29:54.697397Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:10283:11876];process=SwitchToWork;fline=columnshard.cpp:74;event=initialize_shard;step=SwitchToWork; 2025-06-25T14:29:54.697455Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:10283:11876];process=SwitchToWork;fline=columnshard.cpp:77;event=initialize_shard;step=SignalTabletActive; 2025-06-25T14:29:54.697522Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10283:11876];process=SwitchToWork;fline=columnshard_impl.cpp:1331;event=OnTieringModified;path_id=NO_VALUE_OPTIONAL; 2025-06-25T14:29:54.697572Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10283:11876];process=SwitchToWork;fline=column_engine_logs.cpp:471;event=OnTieringModified;new_count_tierings=0; 2025-06-25T14:29:54.697747Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:442;event=EnqueueBackgroundActivities;periodic=0; 2025-06-25T14:29:54.697821Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=9; 2025-06-25T14:29:54.697889Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750861366918;tx_id=18446744073709551615;;current_snapshot_ts=1750861663121; 2025-06-25T14:29:54.697933Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=9;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-25T14:29:54.697975Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:791;background=cleanup;skip_reason=no_changes; 2025-06-25T14:29:54.698012Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:820;background=cleanup;skip_reason=no_changes; 2025-06-25T14:29:54.698103Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:749;background=ttl;skip_reason=no_changes; 2025-06-25T14:29:54.704356Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10283:11876];ev=NActors::TEvents::TEvWakeup;fline=columnshard.cpp:250;event=TEvPrivate::TEvPeriodicWakeup::MANUAL;tablet_id=9437184; 2025-06-25T14:29:54.705171Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10283:11876];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;fline=columnshard.cpp:239;event=TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184; 2025-06-25T14:29:54.705225Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Send periodic stats. 2025-06-25T14:29:54.705255Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Disabled periodic stats at tablet 9437184 2025-06-25T14:29:54.705298Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10283:11876];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:442;event=EnqueueBackgroundActivities;periodic=0; 2025-06-25T14:29:54.705377Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10283:11876];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=9; 2025-06-25T14:29:54.705443Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10283:11876];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750861366918;tx_id=18446744073709551615;;current_snapshot_ts=1750861663121; 2025-06-25T14:29:54.705489Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10283:11876];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=9;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-25T14:29:54.705535Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10283:11876];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:791;background=cleanup;skip_reason=no_changes; 2025-06-25T14:29:54.705573Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10283:11876];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:820;background=cleanup;skip_reason=no_changes; 2025-06-25T14:29:54.705650Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10283:11876];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;queue=ttl;external_count=0;fline=granule.cpp:168;event=skip_actualization;waiting=1.000000s; 2025-06-25T14:29:54.705699Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10283:11876];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:749;background=ttl;skip_reason=no_changes; >> DataShardSnapshots::ReadIteratorLocalSnapshotThenWrite >> TSchemeShardDecimalTypesInTables::AlterWithWrongParameters >> TestDataErasure::SimpleTestForTopic [GOOD] >> Cdc::Alter [GOOD] >> Cdc::DescribeStream >> TControlPlaneProxyTest::ShouldSendCreateQuery [GOOD] |76.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/replication/controller/ut_assign_tx_id/core-tx-replication-controller-ut_assign_tx_id >> TSchemeShardDecimalTypesInTables::AlterWithWrongParameters [GOOD] >> TControlPlaneProxyTest::FailsOnCreateQueryWhenRateLimiterResourceNotCreated >> BasicUsage::WriteAndReadSomeMessagesWithNoCompression [GOOD] >> TControlPlaneProxyTest::FailsOnCreateQueryWhenRateLimiterResourceNotCreated [GOOD] >> Cdc::DisableStream [GOOD] >> KqpStats::DataQueryWithEffects+UseSink [GOOD] >> Cdc::ShouldDeliverChangesOnSplitMerge [GOOD] >> TestDataErasure::SchemeShardCounterDoesNotConsistWithBscCounter [GOOD] >> TSchemeShardInfoTypesTest::EmptyFamilies [GOOD] >> BasicUsage::TWriteSession_WriteAndReadAndCommitRandomMessages >> Cdc::InitialScan >> TControlPlaneProxyTest::ShouldSendListQueries >> TSchemeShardInfoTypesTest::LostId [GOOD] >> Cdc::ShouldBreakLocksOnConcurrentAlterTable >> KqpStats::DataQueryWithEffects-UseSink >> TSchemeShardInfoTypesTest::DeduplicationOrder [GOOD] >> TSchemeShardInfoTypesTest::MultipleDeduplications [GOOD] >> TSchemeShardPgTypesInTables::AlterTableAddPgTypeColumn-EnableTablePgTypes-false |76.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/replication/controller/ut_assign_tx_id/core-tx-replication-controller-ut_assign_tx_id |76.7%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_backup/ydb-core-tx-schemeshard-ut_backup ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_data_erasure/unittest >> TestDataErasure::SimpleTestForTopic [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:30:01.287937Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:30:01.288039Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:30:01.288078Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:30:01.288111Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:30:01.288174Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:30:01.288204Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:30:01.288255Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:30:01.288344Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:30:01.289116Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:30:01.289498Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:30:01.380106Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:30:01.380178Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:30:01.398248Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:30:01.398709Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:30:01.398902Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:30:01.405224Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:30:01.405600Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:30:01.406230Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:30:01.406530Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:30:01.409815Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:30:01.410025Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:30:01.411127Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:30:01.411188Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:30:01.411332Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:30:01.411377Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:30:01.411418Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:30:01.411505Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:30:01.417848Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:30:01.559223Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:30:01.559453Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:01.559628Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:30:01.559675Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:30:01.559919Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:30:01.559989Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:30:01.562822Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:30:01.563012Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:30:01.563179Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:01.563245Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:30:01.563297Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:30:01.563337Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:30:01.565689Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:01.565749Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:30:01.565787Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:30:01.567552Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:01.567612Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:01.567670Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:30:01.567712Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:30:01.571270Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:30:01.573268Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:30:01.573478Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:30:01.574405Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:30:01.574563Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:30:01.574620Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:30:01.574936Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:30:01.575000Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:30:01.575200Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:30:01.575281Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:30:01.578574Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:30:01.578632Z node 1 :FLAT_TX_SCHEMESHARD ... 7: TTxCompleteDataErasureBSC: Progress data shred in BSC 50% 2025-06-25T14:30:04.377297Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:665: TTxCompleteDataErasureBSC Complete at schemeshard: 72057594046678944, NeedScheduleRequestToBSC# true 2025-06-25T14:30:04.377345Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:348: [RootDataErasureManager] ScheduleRequestToBSC: Interval# 1.000000s 2025-06-25T14:30:04.444332Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:463:2414]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:30:04.444416Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:30:04.444561Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [1:463:2414], Recipient [1:463:2414]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:30:04.444605Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:30:04.444758Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271122945, Sender [1:643:2557], Recipient [1:463:2414]: NKikimrSchemeOp.TDescribePath PathId: 2 SchemeshardId: 72075186233409546 2025-06-25T14:30:04.444806Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4967: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2025-06-25T14:30:04.444897Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: PathId: 2 SchemeshardId: 72075186233409546, at schemeshard: 72075186233409546 2025-06-25T14:30:04.445120Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:44: Tablet 72075186233409546 describe pathId 2 took 134us result status StatusSuccess 2025-06-25T14:30:04.445570Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Database1/Topic1" PathDescription { Self { Name: "Topic1" PathId: 2 SchemeshardId: 72075186233409546 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 103 CreateStep: 200 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 1 } ChildrenExist: false BalancerTabletID: 72075186233409550 } PersQueueGroup { Name: "Topic1" PathId: 2 TotalGroupCount: 1 PartitionPerTablet: 1 PQTabletConfig { PartitionConfig { LifetimeSeconds: 10 } YdbDatabasePath: "/MyRoot/Database1" } Partitions { PartitionId: 0 TabletId: 72075186233409549 Status: Active } AlterVersion: 1 BalancerTabletID: 72075186233409550 NextPartitionId: 1 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 2 ProcessingParams { Version: 2 PlanResolution: 50 Coordinators: 72075186233409547 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409548 SchemeShard: 72075186233409546 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 5 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 1 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72075186233409546, at schemeshard: 72075186233409546 2025-06-25T14:30:04.497165Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:883:2758]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:30:04.497255Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:30:04.497387Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [1:883:2758], Recipient [1:883:2758]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:30:04.497414Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:30:04.497552Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271122945, Sender [1:1067:2903], Recipient [1:883:2758]: NKikimrSchemeOp.TDescribePath PathId: 2 SchemeshardId: 72075186233409551 2025-06-25T14:30:04.497579Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4967: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2025-06-25T14:30:04.497701Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: PathId: 2 SchemeshardId: 72075186233409551, at schemeshard: 72075186233409551 2025-06-25T14:30:04.497885Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:44: Tablet 72075186233409551 describe pathId 2 took 160us result status StatusSuccess 2025-06-25T14:30:04.498330Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Database2/Topic1" PathDescription { Self { Name: "Topic1" PathId: 2 SchemeshardId: 72075186233409551 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 106 CreateStep: 300 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 1 } ChildrenExist: false BalancerTabletID: 72075186233409555 } PersQueueGroup { Name: "Topic1" PathId: 2 TotalGroupCount: 1 PartitionPerTablet: 1 PQTabletConfig { PartitionConfig { LifetimeSeconds: 10 } YdbDatabasePath: "/MyRoot/Database2" } Partitions { PartitionId: 0 TabletId: 72075186233409554 Status: Active } AlterVersion: 1 BalancerTabletID: 72075186233409555 NextPartitionId: 1 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 3 ProcessingParams { Version: 2 PlanResolution: 50 Coordinators: 72075186233409552 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409553 SchemeShard: 72075186233409551 } DomainKey { SchemeShard: 72057594046678944 PathId: 3 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 5 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 3 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 1 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72075186233409551, at schemeshard: 72075186233409551 2025-06-25T14:30:04.827140Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:297:2279]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:30:04.827212Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:30:04.827297Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [1:297:2279], Recipient [1:297:2279]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:30:04.827325Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:30:04.847954Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125517, Sender [0:0:0], Recipient [1:297:2279]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToRunDataErasureBSC 2025-06-25T14:30:04.848034Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5156: StateWork, processing event TEvSchemeShard::TEvWakeupToRunDataErasureBSC 2025-06-25T14:30:04.848067Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:354: [RootDataErasureManager] SendRequestToBSC: Generation# 1 2025-06-25T14:30:04.848301Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 268637738, Sender [1:303:2283], Recipient [1:297:2279]: NKikimrBlobStorage.TEvControllerShredResponse CurrentGeneration: 1 Completed: true Progress10k: 10000 2025-06-25T14:30:04.848359Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5155: StateWork, processing event TEvBlobStorage::TEvControllerShredResponse 2025-06-25T14:30:04.848392Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7883: Handle TEvControllerShredResponse, at schemeshard: 72057594046678944 2025-06-25T14:30:04.848462Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:639: TTxCompleteDataErasureBSC Execute at schemeshard: 72057594046678944 2025-06-25T14:30:04.848492Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:653: TTxCompleteDataErasureBSC: Data shred in BSC is completed 2025-06-25T14:30:04.848563Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:170: [RootDataErasureManager] ScheduleDataErasureWakeup: Interval# 0.981000s, Timestamp# 1970-01-01T00:00:05.064000Z 2025-06-25T14:30:04.848619Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:378: [RootDataErasureManager] Complete: Generation# 1, duration# 2 s 2025-06-25T14:30:04.853671Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:665: TTxCompleteDataErasureBSC Complete at schemeshard: 72057594046678944, NeedScheduleRequestToBSC# false 2025-06-25T14:30:04.854258Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877761, Sender [1:1354:3157], Recipient [1:297:2279]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:30:04.854321Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5052: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T14:30:04.854356Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5837: Pipe server connected, at tablet: 72057594046678944 2025-06-25T14:30:04.854536Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125519, Sender [1:279:2268], Recipient [1:297:2279]: NKikimrScheme.TEvDataErasureInfoRequest 2025-06-25T14:30:04.854578Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5153: StateWork, processing event TEvSchemeShard::TEvDataErasureInfoRequest 2025-06-25T14:30:04.854629Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7834: Handle TEvDataErasureInfoRequest, at schemeshard: 72057594046678944 |76.7%| [LD] {RESULT} $(B)/ydb/core/tx/replication/controller/ut_assign_tx_id/core-tx-replication-controller-ut_assign_tx_id ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_data_erasure/unittest >> TestDataErasure::SchemeShardCounterDoesNotConsistWithBscCounter [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:30:01.573446Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:30:01.573520Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:30:01.573550Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:30:01.573582Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:30:01.573624Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:30:01.573657Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:30:01.573712Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:30:01.573798Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:30:01.574505Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:30:01.574882Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:30:01.653189Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:30:01.653260Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:30:01.678690Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:30:01.679218Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:30:01.679419Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:30:01.692707Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:30:01.693104Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:30:01.693817Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:30:01.694113Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:30:01.699719Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:30:01.699934Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:30:01.701148Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:30:01.701209Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:30:01.701353Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:30:01.701402Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:30:01.701448Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:30:01.701535Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:30:01.710715Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:30:01.850040Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:30:01.850314Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:01.850575Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:30:01.850621Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:30:01.850820Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:30:01.850891Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:30:01.854752Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:30:01.854954Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:30:01.855156Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:01.855238Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:30:01.855311Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:30:01.855360Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:30:01.856901Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:01.856950Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:30:01.856990Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:30:01.858312Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:01.858358Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:01.858424Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:30:01.858470Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:30:01.861552Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:30:01.863131Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:30:01.863268Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:30:01.863926Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:30:01.864026Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:30:01.864066Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:30:01.864340Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:30:01.864394Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:30:01.864551Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:30:01.864608Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:30:01.866217Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:30:01.866254Z node 1 :FLAT_TX_SCHEMESHARD ... HEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:639: TTxCompleteDataErasureBSC Execute at schemeshard: 72057594046678944 2025-06-25T14:30:04.811767Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:646: TTxCompleteDataErasureBSC Unknown generation#47, Expected gen# 1 at schemestard: 72057594046678944 2025-06-25T14:30:04.811859Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:354: [RootDataErasureManager] SendRequestToBSC: Generation# 48 2025-06-25T14:30:04.812179Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 268637738, Sender [1:306:2284], Recipient [1:298:2278]: NKikimrBlobStorage.TEvControllerShredResponse CurrentGeneration: 48 Completed: false Progress10k: 0 2025-06-25T14:30:04.812214Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5155: StateWork, processing event TEvBlobStorage::TEvControllerShredResponse 2025-06-25T14:30:04.812236Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7883: Handle TEvControllerShredResponse, at schemeshard: 72057594046678944 2025-06-25T14:30:04.812269Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:639: TTxCompleteDataErasureBSC Execute at schemeshard: 72057594046678944 2025-06-25T14:30:04.812297Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:657: TTxCompleteDataErasureBSC: Progress data shred in BSC 0% 2025-06-25T14:30:04.813886Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:665: TTxCompleteDataErasureBSC Complete at schemeshard: 72057594046678944, NeedScheduleRequestToBSC# false 2025-06-25T14:30:04.813963Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:665: TTxCompleteDataErasureBSC Complete at schemeshard: 72057594046678944, NeedScheduleRequestToBSC# true 2025-06-25T14:30:04.814024Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:348: [RootDataErasureManager] ScheduleRequestToBSC: Interval# 1.000000s 2025-06-25T14:30:05.252056Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:463:2412]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:30:05.252144Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:30:05.252231Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:837:2714]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:30:05.252271Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:30:05.252337Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:298:2278]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:30:05.252371Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:30:05.252431Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [1:463:2412], Recipient [1:463:2412]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:30:05.252458Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:30:05.252565Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [1:837:2714], Recipient [1:837:2714]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:30:05.252594Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:30:05.252646Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [1:298:2278], Recipient [1:298:2278]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:30:05.252669Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:30:05.293878Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125517, Sender [0:0:0], Recipient [1:298:2278]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToRunDataErasureBSC 2025-06-25T14:30:05.293970Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5156: StateWork, processing event TEvSchemeShard::TEvWakeupToRunDataErasureBSC 2025-06-25T14:30:05.294026Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:354: [RootDataErasureManager] SendRequestToBSC: Generation# 48 2025-06-25T14:30:05.294350Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 268637738, Sender [1:306:2284], Recipient [1:298:2278]: NKikimrBlobStorage.TEvControllerShredResponse CurrentGeneration: 48 Completed: false Progress10k: 5000 2025-06-25T14:30:05.294390Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5155: StateWork, processing event TEvBlobStorage::TEvControllerShredResponse 2025-06-25T14:30:05.294439Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7883: Handle TEvControllerShredResponse, at schemeshard: 72057594046678944 2025-06-25T14:30:05.294531Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:639: TTxCompleteDataErasureBSC Execute at schemeshard: 72057594046678944 2025-06-25T14:30:05.294582Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:657: TTxCompleteDataErasureBSC: Progress data shred in BSC 50% 2025-06-25T14:30:05.294650Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:665: TTxCompleteDataErasureBSC Complete at schemeshard: 72057594046678944, NeedScheduleRequestToBSC# true 2025-06-25T14:30:05.294722Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:348: [RootDataErasureManager] ScheduleRequestToBSC: Interval# 1.000000s 2025-06-25T14:30:05.651140Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:298:2278]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:30:05.651239Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:30:05.651312Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:463:2412]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:30:05.651339Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:30:05.651385Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:837:2714]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:30:05.651411Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:30:05.651496Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [1:298:2278], Recipient [1:298:2278]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:30:05.651529Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:30:05.651601Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [1:463:2412], Recipient [1:463:2412]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:30:05.651627Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:30:05.651675Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [1:837:2714], Recipient [1:837:2714]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:30:05.651700Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:30:05.696444Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125517, Sender [0:0:0], Recipient [1:298:2278]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToRunDataErasureBSC 2025-06-25T14:30:05.696513Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5156: StateWork, processing event TEvSchemeShard::TEvWakeupToRunDataErasureBSC 2025-06-25T14:30:05.696554Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:354: [RootDataErasureManager] SendRequestToBSC: Generation# 48 2025-06-25T14:30:05.696751Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 268637738, Sender [1:306:2284], Recipient [1:298:2278]: NKikimrBlobStorage.TEvControllerShredResponse CurrentGeneration: 48 Completed: true Progress10k: 10000 2025-06-25T14:30:05.696776Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5155: StateWork, processing event TEvBlobStorage::TEvControllerShredResponse 2025-06-25T14:30:05.696799Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7883: Handle TEvControllerShredResponse, at schemeshard: 72057594046678944 2025-06-25T14:30:05.696855Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:639: TTxCompleteDataErasureBSC Execute at schemeshard: 72057594046678944 2025-06-25T14:30:05.696882Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:653: TTxCompleteDataErasureBSC: Data shred in BSC is completed 2025-06-25T14:30:05.696936Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:170: [RootDataErasureManager] ScheduleDataErasureWakeup: Interval# 0.935000s, Timestamp# 1970-01-01T00:00:05.110000Z 2025-06-25T14:30:05.696973Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:378: [RootDataErasureManager] Complete: Generation# 48, duration# 2 s 2025-06-25T14:30:05.698595Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:665: TTxCompleteDataErasureBSC Complete at schemeshard: 72057594046678944, NeedScheduleRequestToBSC# false 2025-06-25T14:30:05.699039Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877761, Sender [1:1985:3648], Recipient [1:298:2278]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:30:05.699089Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5052: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T14:30:05.699121Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5837: Pipe server connected, at tablet: 72057594046678944 2025-06-25T14:30:05.699237Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125519, Sender [1:279:2268], Recipient [1:298:2278]: NKikimrScheme.TEvDataErasureInfoRequest 2025-06-25T14:30:05.699264Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5153: StateWork, processing event TEvSchemeShard::TEvDataErasureInfoRequest 2025-06-25T14:30:05.699306Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7834: Handle TEvDataErasureInfoRequest, at schemeshard: 72057594046678944 >> TestDataErasure::Run3CyclesForTopics >> KqpStats::SelfJoin [GOOD] >> TSchemeShardPgTypesInTables::AlterTableAddPgTypeColumn-EnableTablePgTypes-false [GOOD] >> TSchemeShardPgTypesInTables::AlterTableAddPgTypeColumn-EnableTablePgTypes-true >> TestDataErasure::SimpleTestForTables [GOOD] >> KqpQuery::TryToUpdateNonExistentColumn [GOOD] >> KqpQuery::UpdateThenDelete+UseSink >> KqpStats::MultiTxStatsFullScan [GOOD] >> TControlPlaneProxyTest::ShouldSendListQueries [GOOD] >> TestDataErasure::DataErasureWithSplit [GOOD] >> TestDataErasure::SimpleTestForAllSupportedObjects [GOOD] >> TControlPlaneProxyTest::ShouldSendDescribeQuery >> TSchemeShardPgTypesInTables::AlterTableAddPgTypeColumn-EnableTablePgTypes-true [GOOD] >> TestDataErasure::ManualLaunch3Cycles >> KqpQuery::QueryFromSqs [GOOD] >> TestDataErasure::DataErasureWithMerge >> TControlPlaneProxyTest::ShouldSendDescribeQuery [GOOD] >> TControlPlaneProxyTest::ShouldSendGetQueryStatus >> KqpStats::OneShardLocalExec+UseSink >> TestDataErasure::DataErasureManualLaunch [GOOD] >> TestDataErasure::DataErasureWithCopyTable |76.7%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_background_cleaning/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_data_erasure/unittest >> TestDataErasure::SimpleTestForAllSupportedObjects [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:30:02.539530Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:30:02.539601Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:30:02.539644Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:30:02.539670Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:30:02.539711Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:30:02.539740Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:30:02.539792Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:30:02.539863Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:30:02.540465Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:30:02.540763Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:30:02.614748Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:30:02.614816Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:30:02.630451Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:30:02.630894Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:30:02.631065Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:30:02.638726Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:30:02.639104Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:30:02.639799Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:30:02.640067Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:30:02.643710Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:30:02.643898Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:30:02.645062Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:30:02.645122Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:30:02.645268Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:30:02.645315Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:30:02.645364Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:30:02.645466Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:30:02.651632Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:30:02.783389Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:30:02.783568Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:02.783723Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:30:02.783765Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:30:02.783944Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:30:02.783994Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:30:02.793334Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:30:02.793514Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:30:02.793721Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:02.793794Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:30:02.793852Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:30:02.793895Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:30:02.801299Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:02.801378Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:30:02.801440Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:30:02.804887Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:02.804953Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:02.805012Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:30:02.805087Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:30:02.811806Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:30:02.814034Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:30:02.814222Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:30:02.815111Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:30:02.815253Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:30:02.815307Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:30:02.815578Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:30:02.815623Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:30:02.815811Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:30:02.815901Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:30:02.819270Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:30:02.819320Z node 1 :FLAT_TX_SCHEMESHARD ... 7: TTxCompleteDataErasureBSC: Progress data shred in BSC 50% 2025-06-25T14:30:07.224551Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:665: TTxCompleteDataErasureBSC Complete at schemeshard: 72057594046678944, NeedScheduleRequestToBSC# true 2025-06-25T14:30:07.224585Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:348: [RootDataErasureManager] ScheduleRequestToBSC: Interval# 1.000000s 2025-06-25T14:30:07.265874Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:463:2414]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:30:07.265951Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:30:07.266002Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [1:463:2414], Recipient [1:463:2414]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:30:07.266021Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:30:07.266138Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271122945, Sender [1:802:2688], Recipient [1:463:2414]: NKikimrSchemeOp.TDescribePath PathId: 3 SchemeshardId: 72075186233409546 2025-06-25T14:30:07.266156Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4967: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2025-06-25T14:30:07.266234Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: PathId: 3 SchemeshardId: 72075186233409546, at schemeshard: 72075186233409546 2025-06-25T14:30:07.266387Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:44: Tablet 72075186233409546 describe pathId 3 took 120us result status StatusSuccess 2025-06-25T14:30:07.266740Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Database1/Topic1" PathDescription { Self { Name: "Topic1" PathId: 3 SchemeshardId: 72075186233409546 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 104 CreateStep: 300 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 1 } ChildrenExist: false BalancerTabletID: 72075186233409552 } PersQueueGroup { Name: "Topic1" PathId: 3 TotalGroupCount: 1 PartitionPerTablet: 1 PQTabletConfig { PartitionConfig { LifetimeSeconds: 10 } YdbDatabasePath: "/MyRoot/Database1" } Partitions { PartitionId: 0 TabletId: 72075186233409551 Status: Active } AlterVersion: 1 BalancerTabletID: 72075186233409552 NextPartitionId: 1 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 2 ProcessingParams { Version: 2 PlanResolution: 50 Coordinators: 72075186233409547 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409548 SchemeShard: 72075186233409546 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 7 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 1 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72075186233409546, at schemeshard: 72075186233409546 2025-06-25T14:30:07.339571Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:962:2820]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:30:07.339648Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:30:07.339810Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [1:962:2820], Recipient [1:962:2820]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:30:07.339850Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:30:07.340007Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271122945, Sender [1:1386:3171], Recipient [1:962:2820]: NKikimrSchemeOp.TDescribePath PathId: 3 SchemeshardId: 72075186233409553 2025-06-25T14:30:07.340034Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4967: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2025-06-25T14:30:07.340114Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: PathId: 3 SchemeshardId: 72075186233409553, at schemeshard: 72075186233409553 2025-06-25T14:30:07.340276Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:44: Tablet 72075186233409553 describe pathId 3 took 131us result status StatusSuccess 2025-06-25T14:30:07.340733Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Database2/Topic1" PathDescription { Self { Name: "Topic1" PathId: 3 SchemeshardId: 72075186233409553 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 108 CreateStep: 350 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 1 } ChildrenExist: false BalancerTabletID: 72075186233409559 } PersQueueGroup { Name: "Topic1" PathId: 3 TotalGroupCount: 1 PartitionPerTablet: 1 PQTabletConfig { PartitionConfig { LifetimeSeconds: 10 } YdbDatabasePath: "/MyRoot/Database2" } Partitions { PartitionId: 0 TabletId: 72075186233409558 Status: Active } AlterVersion: 1 BalancerTabletID: 72075186233409559 NextPartitionId: 1 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 3 ProcessingParams { Version: 2 PlanResolution: 50 Coordinators: 72075186233409554 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409555 SchemeShard: 72075186233409553 } DomainKey { SchemeShard: 72057594046678944 PathId: 3 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 7 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 3 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 1 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72075186233409553, at schemeshard: 72075186233409553 2025-06-25T14:30:07.755108Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:297:2279]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:30:07.755194Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:30:07.755281Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [1:297:2279], Recipient [1:297:2279]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:30:07.755312Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:30:07.807607Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125517, Sender [0:0:0], Recipient [1:297:2279]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToRunDataErasureBSC 2025-06-25T14:30:07.807691Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5156: StateWork, processing event TEvSchemeShard::TEvWakeupToRunDataErasureBSC 2025-06-25T14:30:07.807730Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:354: [RootDataErasureManager] SendRequestToBSC: Generation# 1 2025-06-25T14:30:07.807965Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 268637738, Sender [1:303:2283], Recipient [1:297:2279]: NKikimrBlobStorage.TEvControllerShredResponse CurrentGeneration: 1 Completed: true Progress10k: 10000 2025-06-25T14:30:07.808025Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5155: StateWork, processing event TEvBlobStorage::TEvControllerShredResponse 2025-06-25T14:30:07.808061Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7883: Handle TEvControllerShredResponse, at schemeshard: 72057594046678944 2025-06-25T14:30:07.808139Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:639: TTxCompleteDataErasureBSC Execute at schemeshard: 72057594046678944 2025-06-25T14:30:07.808179Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:653: TTxCompleteDataErasureBSC: Data shred in BSC is completed 2025-06-25T14:30:07.808267Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:170: [RootDataErasureManager] ScheduleDataErasureWakeup: Interval# 0.921000s, Timestamp# 1970-01-01T00:00:05.124000Z 2025-06-25T14:30:07.808373Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:378: [RootDataErasureManager] Complete: Generation# 1, duration# 2 s 2025-06-25T14:30:07.812135Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:665: TTxCompleteDataErasureBSC Complete at schemeshard: 72057594046678944, NeedScheduleRequestToBSC# false 2025-06-25T14:30:07.812846Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877761, Sender [1:2426:4030], Recipient [1:297:2279]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:30:07.812905Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5052: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T14:30:07.812951Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5837: Pipe server connected, at tablet: 72057594046678944 2025-06-25T14:30:07.813101Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125519, Sender [1:279:2268], Recipient [1:297:2279]: NKikimrScheme.TEvDataErasureInfoRequest 2025-06-25T14:30:07.813140Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5153: StateWork, processing event TEvSchemeShard::TEvDataErasureInfoRequest 2025-06-25T14:30:07.813186Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7834: Handle TEvDataErasureInfoRequest, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpStats::SelfJoin [GOOD] Test command err: Trying to start YDB, gRPC: 25061, MsgBus: 29655 2025-06-25T14:29:42.439010Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519894097548020581:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:29:42.439268Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001487/r3tmp/tmpAvgplb/pdisk_1.dat 2025-06-25T14:29:42.825717Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519894097548020561:2080] 1750861782437224 != 1750861782437227 2025-06-25T14:29:42.848629Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 25061, node 1 2025-06-25T14:29:42.922236Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:29:42.922348Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:29:42.924115Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:29:42.925285Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:29:42.925332Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:29:42.925339Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:29:42.925482Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:29655 TClient is connected to server localhost:29655 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:29:43.444849Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:29:43.448676Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... 2025-06-25T14:29:43.464041Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:43.579564Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:43.724913Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:43.804859Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:45.398155Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894110432924104:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:45.398270Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:45.697733Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:45.737947Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:45.771541Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:45.842692Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:45.876057Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:45.917183Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:45.952549Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:46.054457Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894114727892069:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:46.054541Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:46.054684Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894114727892074:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:46.058993Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:29:46.071080Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519894114727892076:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:29:46.158353Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519894114727892127:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:29:47.439572Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519894097548020581:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:29:47.439623Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 11529, MsgBus: 14772 2025-06-25T14:29:48.631683Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519894121656532966:2076];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:29:48.631787Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPa ... Shard","Plans":[{"PlanNodeId":2,"Plans":[{"Tables":["TwoShard"],"PlanNodeId":1,"Operators":[{"Scan":"Parallel","E-Size":"0","ReadRanges":["Key (-∞, +∞)"],"Name":"TableFullScan","Inputs":[],"Path":"\/Root\/TwoShard","ReadRangesPointPrefixLen":"0","E-Rows":"0","Table":"TwoShard","ReadColumns":["Key"],"E-Cost":"0"}],"Node Type":"TableFullScan"}],"Node Type":"Stage","Stats":{"UseLlvm":"undefined","Output":[{"Pop":{"Chunks":{"Count":2,"Sum":2,"Max":1,"Min":1},"Rows":{"Count":2,"Sum":6,"Max":3,"Min":3},"LastMessageMs":{"Count":2,"Sum":6,"Max":3,"Min":3},"FirstMessageMs":{"Count":2,"Sum":6,"Max":3,"Min":3},"Bytes":{"Count":2,"Sum":48,"Max":36,"Min":12,"History":[4,48]}},"Name":"RESULT","Push":{"WaitTimeUs":{"Count":2,"Sum":5079,"Max":2553,"Min":2526,"History":[4,5079]},"WaitPeriods":{"Count":2,"Sum":2,"Max":1,"Min":1},"Chunks":{"Count":2,"Sum":6,"Max":3,"Min":3},"ResumeMessageMs":{"Count":2,"Sum":6,"Max":3,"Min":3},"Rows":{"Count":2,"Sum":6,"Max":3,"Min":3},"LastMessageMs":{"Count":2,"Sum":6,"Max":3,"Min":3},"FirstMessageMs":{"Count":2,"Sum":6,"Max":3,"Min":3}}}],"MaxMemoryUsage":{"Count":2,"Sum":2097152,"Max":1048576,"Min":1048576,"History":[4,2097152]},"ResultRows":{"Count":2,"Sum":6,"Max":3,"Min":3},"Tasks":2,"ResultBytes":{"Count":2,"Sum":48,"Max":36,"Min":12},"OutputRows":{"Count":2,"Sum":6,"Max":3,"Min":3},"FinishedTasks":2,"IngressRows":{"Count":2,"Sum":6,"Max":3,"Min":3},"PhysicalStageId":0,"StageDurationUs":0,"Table":[{"Path":"\/Root\/TwoShard","ReadRows":{"Count":2,"Sum":6,"Max":3,"Min":3},"ReadBytes":{"Count":2,"Sum":48,"Max":24,"Min":24}}],"BaseTimeMs":1750861806070,"OutputBytes":{"Count":2,"Sum":48,"Max":36,"Min":12},"CpuTimeUs":{"Count":2,"Sum":792,"Max":633,"Min":159,"History":[4,792]},"Ingress":[{"Pop":{"Chunks":{"Count":2,"Sum":2,"Max":1,"Min":1},"Rows":{"Count":2,"Sum":6,"Max":3,"Min":3},"LastMessageMs":{"Count":2,"Sum":6,"Max":3,"Min":3},"FirstMessageMs":{"Count":2,"Sum":6,"Max":3,"Min":3},"Bytes":{"Count":2,"Sum":96,"Max":48,"Min":48,"History":[4,96]}},"External":{},"Name":"KqpReadRangesSource","Ingress":{},"Push":{"LastMessageMs":{"Count":2,"Sum":6,"Max":3,"Min":3},"Rows":{"Count":2,"Sum":6,"Max":3,"Min":3},"Chunks":{"Count":2,"Sum":2,"Max":1,"Min":1},"ResumeMessageMs":{"Count":2,"Sum":6,"Max":3,"Min":3},"FirstMessageMs":{"Count":2,"Sum":6,"Max":3,"Min":3},"Bytes":{"Count":2,"Sum":96,"Max":48,"Min":48,"History":[4,96]},"WaitTimeUs":{"Count":2,"Sum":5104,"Max":2572,"Min":2532,"History":[4,5104]},"WaitPeriods":{"Count":2,"Sum":2,"Max":1,"Min":1}}}],"UpdateTimeMs":3}}],"PlanNodeType":"Connection","E-Cost":"0"}],"Node Type":"Collect","Stats":{"UseLlvm":"undefined","Table":[{"Path":"\/Root\/TwoShard","ReadRows":{"Count":2,"Sum":6,"Max":3,"Min":3},"ReadBytes":{"Count":2,"Sum":24,"Max":12,"Min":12}}],"OutputRows":{"Count":2,"Sum":6,"Max":3,"Min":3},"PhysicalStageId":1,"FinishedTasks":2,"InputBytes":{"Count":2,"Sum":48,"Max":36,"Min":12},"DurationUs":{"Count":2,"Sum":5000,"Max":3000,"Min":2000},"MaxMemoryUsage":{"Count":2,"Sum":2097152,"Max":1048576,"Min":1048576,"History":[5,1048576,6,2097152]},"BaseTimeMs":1750861806070,"Output":[{"Pop":{"Chunks":{"Count":2,"Sum":2,"Max":1,"Min":1},"Rows":{"Count":2,"Sum":6,"Max":3,"Min":3},"LastMessageMs":{"Count":2,"Sum":9,"Max":5,"Min":4},"ActiveMessageMs":{"Count":2,"Max":5,"Min":4},"FirstMessageMs":{"Count":2,"Sum":9,"Max":5,"Min":4},"Bytes":{"Count":2,"Sum":48,"Max":36,"Min":12,"History":[5,12,6,48]}},"Name":"6","Push":{"LastMessageMs":{"Count":2,"Sum":9,"Max":5,"Min":4},"Rows":{"Count":2,"Sum":6,"Max":3,"Min":3},"Chunks":{"Count":2,"Sum":6,"Max":3,"Min":3},"ResumeMessageMs":{"Count":2,"Sum":9,"Max":5,"Min":4},"FirstMessageMs":{"Count":2,"Sum":9,"Max":5,"Min":4},"ActiveMessageMs":{"Count":2,"Max":5,"Min":4},"PauseMessageMs":{"Count":2,"Sum":6,"Max":3,"Min":3},"WaitTimeUs":{"Count":2,"Sum":6724,"Max":3526,"Min":3198,"History":[5,3198,6,6724]},"WaitPeriods":{"Count":2,"Sum":2,"Max":1,"Min":1},"WaitMessageMs":{"Count":2,"Max":5,"Min":3}}}],"CpuTimeUs":{"Count":2,"Sum":616,"Max":438,"Min":178,"History":[5,438,6,616]},"StageDurationUs":3000,"WaitInputTimeUs":{"Count":2,"Sum":6526,"Max":3446,"Min":3080,"History":[5,3080,6,6526]},"OutputBytes":{"Count":2,"Sum":48,"Max":36,"Min":12},"Input":[{"Pop":{"Chunks":{"Count":2,"Sum":2,"Max":1,"Min":1},"Rows":{"Count":2,"Sum":6,"Max":3,"Min":3},"LastMessageMs":{"Count":2,"Sum":6,"Max":3,"Min":3},"FirstMessageMs":{"Count":2,"Sum":6,"Max":3,"Min":3},"Bytes":{"Count":2,"Sum":48,"Max":36,"Min":12,"History":[5,12,6,48]}},"Name":"2","Push":{"LastMessageMs":{"Count":2,"Sum":6,"Max":3,"Min":3},"Rows":{"Count":2,"Sum":6,"Max":3,"Min":3},"Chunks":{"Count":2,"Sum":2,"Max":1,"Min":1},"ResumeMessageMs":{"Count":2,"Sum":6,"Max":3,"Min":3},"FirstMessageMs":{"Count":2,"Sum":6,"Max":3,"Min":3},"Bytes":{"Count":2,"Sum":48,"Max":36,"Min":12,"History":[5,12,6,48]},"PauseMessageMs":{"Count":2,"Sum":2,"Max":1,"Min":1},"WaitTimeUs":{"Count":2,"Sum":3436,"Max":1782,"Min":1654,"History":[5,1782,6,3436]},"WaitPeriods":{"Count":2,"Sum":2,"Max":1,"Min":1},"WaitMessageMs":{"Count":2,"Max":3,"Min":1}}}],"UpdateTimeMs":6,"InputRows":{"Count":2,"Sum":6,"Max":3,"Min":3},"Tasks":2}}],"Node Type":"UnionAll","PlanNodeType":"Connection"}],"Operators":[{"Inputs":[{"ExternalPlanNodeId":5}],"Name":"Limit","Limit":"1001"}],"Node Type":"Limit","Stats":{"UseLlvm":"undefined","Output":[{"Pop":{"Chunks":{"Count":1,"Sum":2,"Max":2,"Min":2},"Rows":{"Count":1,"Sum":6,"Max":6,"Min":6},"LastMessageMs":{"Count":1,"Sum":6,"Max":6,"Min":6},"ActiveMessageMs":{"Count":1,"Max":6,"Min":5},"FirstMessageMs":{"Count":1,"Sum":5,"Max":5,"Min":5},"Bytes":{"Count":1,"Sum":27,"Max":27,"Min":27,"History":[6,27]},"ActiveTimeUs":{"Count":1,"Sum":1000,"Max":1000,"Min":1000}},"Name":"8","Push":{"Rows":{"Count":1,"Sum":6,"Max":6,"Min":6},"LastMessageMs":{"Count":1,"Sum":6,"Max":6,"Min":6},"Chunks":{"Count":1,"Sum":6,"Max":6,"Min":6},"ResumeMessageMs":{"Count":1,"Sum":6,"Max":6,"Min":6},"FirstMessageMs":{"Count":1,"Sum":5,"Max":5,"Min":5},"ActiveMessageMs":{"Count":1,"Max":6,"Min":5},"PauseMessageMs":{"Count":1,"Sum":2,"Max":2,"Min":2},"ActiveTimeUs":{"Count":1,"Sum":1000,"Max":1000,"Min":1000},"WaitTimeUs":{"Count":1,"Sum":3307,"Max":3307,"Min":3307,"History":[6,3307]},"WaitPeriods":{"Count":1,"Sum":1,"Max":1,"Min":1},"WaitMessageMs":{"Count":1,"Max":6,"Min":2}}}],"MaxMemoryUsage":{"Count":1,"Sum":1048576,"Max":1048576,"Min":1048576,"History":[6,1048576]},"DurationUs":{"Count":1,"Sum":1000,"Max":1000,"Min":1000},"InputBytes":{"Count":1,"Sum":48,"Max":48,"Min":48},"Tasks":1,"OutputRows":{"Count":1,"Sum":6,"Max":6,"Min":6},"FinishedTasks":1,"InputRows":{"Count":1,"Sum":6,"Max":6,"Min":6},"PhysicalStageId":2,"StageDurationUs":1000,"BaseTimeMs":1750861806070,"WaitInputTimeUs":{"Count":1,"Sum":609,"Max":609,"Min":609,"History":[6,609]},"OutputBytes":{"Count":1,"Sum":27,"Max":27,"Min":27},"CpuTimeUs":{"Count":1,"Sum":811,"Max":811,"Min":811,"History":[6,811]},"UpdateTimeMs":6,"Input":[{"Pop":{"Chunks":{"Count":1,"Sum":2,"Max":2,"Min":2},"Rows":{"Count":1,"Sum":6,"Max":6,"Min":6},"LastMessageMs":{"Count":1,"Sum":5,"Max":5,"Min":5},"FirstMessageMs":{"Count":1,"Sum":5,"Max":5,"Min":5},"Bytes":{"Count":1,"Sum":48,"Max":48,"Min":48,"History":[6,48]}},"Name":"4","Push":{"LastMessageMs":{"Count":1,"Sum":5,"Max":5,"Min":5},"Rows":{"Count":1,"Sum":6,"Max":6,"Min":6},"Chunks":{"Count":1,"Sum":2,"Max":2,"Min":2},"ResumeMessageMs":{"Count":1,"Sum":5,"Max":5,"Min":5},"FirstMessageMs":{"Count":1,"Sum":5,"Max":5,"Min":5},"Bytes":{"Count":1,"Sum":48,"Max":48,"Min":48,"History":[6,48]},"PauseMessageMs":{"Count":1,"Sum":2,"Max":2,"Min":2},"WaitTimeUs":{"Count":1,"Sum":2842,"Max":2842,"Min":2842,"History":[6,2842]},"WaitPeriods":{"Count":1,"Sum":2,"Max":2,"Min":2},"WaitMessageMs":{"Count":1,"Max":5,"Min":2}}}]}}],"Node Type":"UnionAll","PlanNodeType":"Connection"}],"Operators":[{"Inputs":[{"ExternalPlanNodeId":7}],"Name":"Limit","Limit":"1001"}],"Node Type":"Limit","Stats":{"UseLlvm":"undefined","OutputRows":{"Count":1,"Sum":6,"Max":6,"Min":6},"PhysicalStageId":3,"FinishedTasks":1,"InputBytes":{"Count":1,"Sum":27,"Max":27,"Min":27},"DurationUs":{"Count":1,"Sum":1000,"Max":1000,"Min":1000},"MaxMemoryUsage":{"Count":1,"Sum":1048576,"Max":1048576,"Min":1048576,"History":[6,1048576]},"BaseTimeMs":1750861806070,"Output":[{"Pop":{"Chunks":{"Count":1,"Sum":2,"Max":2,"Min":2},"Rows":{"Count":1,"Sum":6,"Max":6,"Min":6},"LastMessageMs":{"Count":1,"Sum":6,"Max":6,"Min":6},"ActiveMessageMs":{"Count":1,"Max":6,"Min":5},"FirstMessageMs":{"Count":1,"Sum":5,"Max":5,"Min":5},"Bytes":{"Count":1,"Sum":27,"Max":27,"Min":27,"History":[6,27]},"ActiveTimeUs":{"Count":1,"Sum":1000,"Max":1000,"Min":1000}},"Name":"RESULT","Push":{"Rows":{"Count":1,"Sum":6,"Max":6,"Min":6},"LastMessageMs":{"Count":1,"Sum":6,"Max":6,"Min":6},"Chunks":{"Count":1,"Sum":6,"Max":6,"Min":6},"ResumeMessageMs":{"Count":1,"Sum":6,"Max":6,"Min":6},"FirstMessageMs":{"Count":1,"Sum":5,"Max":5,"Min":5},"ActiveMessageMs":{"Count":1,"Max":6,"Min":5},"PauseMessageMs":{"Count":1,"Sum":3,"Max":3,"Min":3},"ActiveTimeUs":{"Count":1,"Sum":1000,"Max":1000,"Min":1000},"WaitTimeUs":{"Count":1,"Sum":2989,"Max":2989,"Min":2989,"History":[6,2989]},"WaitPeriods":{"Count":1,"Sum":1,"Max":1,"Min":1},"WaitMessageMs":{"Count":1,"Max":6,"Min":3}}}],"CpuTimeUs":{"Count":1,"Sum":530,"Max":530,"Min":530,"History":[6,530]},"StageDurationUs":1000,"ResultRows":{"Count":1,"Sum":6,"Max":6,"Min":6},"ResultBytes":{"Count":1,"Sum":27,"Max":27,"Min":27},"OutputBytes":{"Count":1,"Sum":27,"Max":27,"Min":27},"Input":[{"Pop":{"Chunks":{"Count":1,"Sum":2,"Max":2,"Min":2},"Rows":{"Count":1,"Sum":6,"Max":6,"Min":6},"LastMessageMs":{"Count":1,"Sum":6,"Max":6,"Min":6},"ActiveMessageMs":{"Count":1,"Max":6,"Min":5},"FirstMessageMs":{"Count":1,"Sum":5,"Max":5,"Min":5},"Bytes":{"Count":1,"Sum":27,"Max":27,"Min":27,"History":[6,27]},"ActiveTimeUs":{"Count":1,"Sum":1000,"Max":1000,"Min":1000}},"Name":"6","Push":{"Rows":{"Count":1,"Sum":6,"Max":6,"Min":6},"LastMessageMs":{"Count":1,"Sum":6,"Max":6,"Min":6},"Chunks":{"Count":1,"Sum":2,"Max":2,"Min":2},"ResumeMessageMs":{"Count":1,"Sum":6,"Max":6,"Min":6},"FirstMessageMs":{"Count":1,"Sum":5,"Max":5,"Min":5},"ActiveMessageMs":{"Count":1,"Max":6,"Min":5},"Bytes":{"Count":1,"Sum":27,"Max":27,"Min":27,"History":[6,27]},"PauseMessageMs":{"Count":1,"Sum":3,"Max":3,"Min":3},"ActiveTimeUs":{"Count":1,"Sum":1000,"Max":1000,"Min":1000},"WaitTimeUs":{"Count":1,"Sum":2833,"Max":2833,"Min":2833,"History":[6,2833]},"WaitPeriods":{"Count":1,"Sum":2,"Max":2,"Min":2},"WaitMessageMs":{"Count":1,"Max":6,"Min":3}}}],"UpdateTimeMs":6,"InputRows":{"Count":1,"Sum":6,"Max":6,"Min":6},"Tasks":1}}],"Node Type":"ResultSet","PlanNodeType":"ResultSet"}],"Node Type":"Query","Stats":{"Compilation":{"FromCache":false,"DurationUs":261211,"CpuTimeUs":255497},"ProcessCpuTimeUs":348,"TotalDurationUs":307494,"ResourcePoolId":"default","QueuedTimeUs":470},"PlanNodeType":"Query"},"meta":{"version":"0.2","type":"query"},"SimplifiedPlan":{"PlanNodeId":0,"Plans":[{"PlanNodeId":1,"Plans":[{"PlanNodeId":2,"Plans":[{"PlanNodeId":4,"Plans":[{"PlanNodeId":7,"Plans":[{"PlanNodeId":9,"Operators":[{"Scan":"Parallel","E-Size":"0","ReadRanges":["Key (-∞, +∞)"],"Name":"TableFullScan","Path":"\/Root\/TwoShard","ReadRangesPointPrefixLen":"0","E-Rows":"0","Table":"TwoShard","ReadColumns":["Key"],"E-Cost":"0"}],"Node Type":"TableFullScan"},{"Operators":[{"E-Rows":"0","Columns":["Key"],"E-Size":"0","E-Cost":"0","Name":"TableLookup","Table":"TwoShard","LookupKeyColumns":["Key"]}],"Node Type":"TableLookup","PlanNodeType":"TableLookup"}],"Operators":[{"Name":"LookupJoin","LookupKeyColumns":["Key"]}],"Node Type":"LookupJoin","PlanNodeType":"Connection"}],"Operators":[{"A-Rows":6,"A-SelfCpu":0.811,"A-Cpu":0.811,"A-Size":27,"Name":"Limit","Limit":"1001"}],"Node Type":"Limit"}],"Operators":[{"A-Rows":6,"A-SelfCpu":0.53,"A-Cpu":1.341,"A-Size":27,"Name":"Limit","Limit":"1001"}],"Node Type":"Limit"}],"Node Type":"ResultSet","PlanNodeType":"ResultSet"}],"Node Type":"Query","PlanNodeType":"Query"}} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_data_erasure/unittest >> TestDataErasure::SimpleTestForTables [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:30:02.839931Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:30:02.840017Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:30:02.840071Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:30:02.840109Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:30:02.840165Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:30:02.840199Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:30:02.840247Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:30:02.840333Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:30:02.841102Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:30:02.841471Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:30:02.913512Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:30:02.913574Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:30:02.931698Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:30:02.932079Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:30:02.932267Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:30:02.938837Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:30:02.939219Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:30:02.940040Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:30:02.940360Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:30:02.945975Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:30:02.946222Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:30:02.947579Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:30:02.947658Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:30:02.947831Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:30:02.947888Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:30:02.947955Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:30:02.948059Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:30:02.956610Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:30:03.095216Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:30:03.095480Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:03.095688Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:30:03.095737Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:30:03.096003Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:30:03.096084Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:30:03.101328Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:30:03.101550Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:30:03.101757Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:03.101842Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:30:03.101900Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:30:03.101968Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:30:03.104040Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:03.104101Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:30:03.104160Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:30:03.105958Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:03.106016Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:03.106077Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:30:03.106125Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:30:03.109855Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:30:03.111749Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:30:03.111931Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:30:03.112859Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:30:03.112988Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:30:03.113046Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:30:03.113380Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:30:03.113437Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:30:03.113623Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:30:03.113691Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:30:03.115539Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:30:03.115589Z node 1 :FLAT_TX_SCHEMESHARD ... [RootDataErasureManager] SendRequestToBSC: Generation# 1 2025-06-25T14:30:05.950599Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877760, Sender [1:1967:3634], Recipient [1:297:2279]: NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594037932033 Status: OK ServerId: [1:1968:3635] Leader: 1 Dead: 0 Generation: 2 VersionInfo: } 2025-06-25T14:30:05.950628Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5050: StateWork, processing event TEvTabletPipe::TEvClientConnected 2025-06-25T14:30:05.950655Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5787: Handle TEvClientConnected, tabletId: 72057594037932033, status: OK, at schemeshard: 72057594046678944 2025-06-25T14:30:05.950772Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 268637738, Sender [1:303:2283], Recipient [1:297:2279]: NKikimrBlobStorage.TEvControllerShredResponse CurrentGeneration: 1 Completed: false Progress10k: 0 2025-06-25T14:30:05.950803Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5155: StateWork, processing event TEvBlobStorage::TEvControllerShredResponse 2025-06-25T14:30:05.950834Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7883: Handle TEvControllerShredResponse, at schemeshard: 72057594046678944 2025-06-25T14:30:05.950890Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:639: TTxCompleteDataErasureBSC Execute at schemeshard: 72057594046678944 2025-06-25T14:30:05.950944Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:657: TTxCompleteDataErasureBSC: Progress data shred in BSC 0% 2025-06-25T14:30:05.951006Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:665: TTxCompleteDataErasureBSC Complete at schemeshard: 72057594046678944, NeedScheduleRequestToBSC# true 2025-06-25T14:30:05.951057Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:348: [RootDataErasureManager] ScheduleRequestToBSC: Interval# 1.000000s 2025-06-25T14:30:06.356148Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:297:2279]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:30:06.356216Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:30:06.356294Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:463:2414]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:30:06.356350Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:30:06.356401Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:838:2719]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:30:06.356424Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:30:06.356516Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [1:297:2279], Recipient [1:297:2279]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:30:06.356553Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:30:06.356619Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [1:463:2414], Recipient [1:463:2414]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:30:06.356643Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:30:06.356710Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [1:838:2719], Recipient [1:838:2719]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:30:06.356733Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:30:06.401194Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125517, Sender [0:0:0], Recipient [1:297:2279]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToRunDataErasureBSC 2025-06-25T14:30:06.401294Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5156: StateWork, processing event TEvSchemeShard::TEvWakeupToRunDataErasureBSC 2025-06-25T14:30:06.401348Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:354: [RootDataErasureManager] SendRequestToBSC: Generation# 1 2025-06-25T14:30:06.401674Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 268637738, Sender [1:303:2283], Recipient [1:297:2279]: NKikimrBlobStorage.TEvControllerShredResponse CurrentGeneration: 1 Completed: false Progress10k: 5000 2025-06-25T14:30:06.401726Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5155: StateWork, processing event TEvBlobStorage::TEvControllerShredResponse 2025-06-25T14:30:06.401771Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7883: Handle TEvControllerShredResponse, at schemeshard: 72057594046678944 2025-06-25T14:30:06.401844Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:639: TTxCompleteDataErasureBSC Execute at schemeshard: 72057594046678944 2025-06-25T14:30:06.401895Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:657: TTxCompleteDataErasureBSC: Progress data shred in BSC 50% 2025-06-25T14:30:06.401959Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:665: TTxCompleteDataErasureBSC Complete at schemeshard: 72057594046678944, NeedScheduleRequestToBSC# true 2025-06-25T14:30:06.402006Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:348: [RootDataErasureManager] ScheduleRequestToBSC: Interval# 1.000000s 2025-06-25T14:30:06.775646Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:838:2719]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:30:06.775729Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:30:06.775802Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:297:2279]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:30:06.775828Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:30:06.775881Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:463:2414]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:30:06.775908Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:30:06.775970Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [1:297:2279], Recipient [1:297:2279]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:30:06.776000Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:30:06.776065Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [1:463:2414], Recipient [1:463:2414]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:30:06.776090Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:30:06.776138Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [1:838:2719], Recipient [1:838:2719]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:30:06.776164Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:30:06.817413Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125517, Sender [0:0:0], Recipient [1:297:2279]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToRunDataErasureBSC 2025-06-25T14:30:06.817500Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5156: StateWork, processing event TEvSchemeShard::TEvWakeupToRunDataErasureBSC 2025-06-25T14:30:06.817537Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:354: [RootDataErasureManager] SendRequestToBSC: Generation# 1 2025-06-25T14:30:06.817810Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 268637738, Sender [1:303:2283], Recipient [1:297:2279]: NKikimrBlobStorage.TEvControllerShredResponse CurrentGeneration: 1 Completed: true Progress10k: 10000 2025-06-25T14:30:06.817844Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5155: StateWork, processing event TEvBlobStorage::TEvControllerShredResponse 2025-06-25T14:30:06.817875Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7883: Handle TEvControllerShredResponse, at schemeshard: 72057594046678944 2025-06-25T14:30:06.817942Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:639: TTxCompleteDataErasureBSC Execute at schemeshard: 72057594046678944 2025-06-25T14:30:06.817989Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:653: TTxCompleteDataErasureBSC: Data shred in BSC is completed 2025-06-25T14:30:06.818066Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:170: [RootDataErasureManager] ScheduleDataErasureWakeup: Interval# 0.933000s, Timestamp# 1970-01-01T00:00:05.112000Z 2025-06-25T14:30:06.818130Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:378: [RootDataErasureManager] Complete: Generation# 1, duration# 2 s 2025-06-25T14:30:06.820222Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:665: TTxCompleteDataErasureBSC Complete at schemeshard: 72057594046678944, NeedScheduleRequestToBSC# false 2025-06-25T14:30:06.820917Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877761, Sender [1:1989:3656], Recipient [1:297:2279]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:30:06.820980Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5052: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T14:30:06.821023Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5837: Pipe server connected, at tablet: 72057594046678944 2025-06-25T14:30:06.821112Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125519, Sender [1:279:2268], Recipient [1:297:2279]: NKikimrScheme.TEvDataErasureInfoRequest 2025-06-25T14:30:06.821145Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5153: StateWork, processing event TEvSchemeShard::TEvDataErasureInfoRequest 2025-06-25T14:30:06.821187Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7834: Handle TEvDataErasureInfoRequest, at schemeshard: 72057594046678944 >> TestDataErasure::Run3CyclesForTables ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_base/unittest >> TSchemeShardPgTypesInTables::AlterTableAddPgTypeColumn-EnableTablePgTypes-true [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:29:31.580215Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:29:31.580295Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:29:31.580422Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:29:31.580466Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:29:31.580520Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:29:31.580548Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:29:31.580612Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:29:31.580691Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:29:31.581374Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:29:31.581734Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:29:31.686721Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:29:31.686779Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:29:31.703891Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:29:31.704275Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:29:31.704449Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:29:31.710185Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:29:31.710508Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:29:31.711138Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:29:31.711384Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:29:31.714616Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:29:31.714793Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:29:31.716054Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:29:31.716113Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:29:31.716261Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:29:31.716330Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:29:31.716371Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:29:31.716458Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:29:31.723255Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:29:32.082961Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:29:32.083168Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:32.083368Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:29:32.083412Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:29:32.083661Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:29:32.083729Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:29:32.102197Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:29:32.188550Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:29:32.188805Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:32.188877Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:29:32.188933Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:29:32.188967Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:29:32.195544Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:32.195650Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:29:32.195693Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:29:32.197652Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:32.208533Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:32.208613Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:29:32.208663Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:29:32.212041Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:29:32.214529Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:29:32.214718Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:29:32.215686Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:29:32.215812Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:29:32.215857Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:29:32.216174Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:29:32.216226Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:29:32.216414Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:29:32.216492Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:29:32.221674Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:29:32.221722Z node 1 :FLAT_TX_SCHEMESHARD ... 6678944 2025-06-25T14:30:07.868285Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 102 ready parts: 1/1 2025-06-25T14:30:07.868492Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } AffectedSet { TabletId: 72075186233409546 Flags: 2 } ExecLevel: 0 TxId: 102 MinStep: 5000003 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:30:07.870287Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 102:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:102 msg type: 269090816 2025-06-25T14:30:07.870468Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 102, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 102 at step: 5000003 FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000002 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 102 at step: 5000003 FAKE_COORDINATOR: Send Plan to tablet 72075186233409546 for txId: 102 at step: 5000003 2025-06-25T14:30:07.871188Z node 12 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000003, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:30:07.871335Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 102 Coordinator: 72057594046316545 AckTo { RawX1: 138 RawX2: 51539609711 } } Step: 5000003 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:30:07.871420Z node 12 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_table.cpp:357: TAlterTable TPropose operationId# 102:0 HandleReply TEvOperationPlan, operationId: 102:0, stepId: 5000003, at schemeshard: 72057594046678944 2025-06-25T14:30:07.871755Z node 12 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 102:0 128 -> 129 2025-06-25T14:30:07.871920Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000003 2025-06-25T14:30:07.894487Z node 12 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:30:07.894571Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-25T14:30:07.895188Z node 12 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:30:07.895263Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [12:210:2210], at schemeshard: 72057594046678944, txId: 102, path id: 2 2025-06-25T14:30:07.895786Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-25T14:30:07.895868Z node 12 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1086: NTableState::TProposedWaitParts operationId# 102:0 ProgressState at tablet: 72057594046678944 2025-06-25T14:30:07.897057Z node 12 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 4 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T14:30:07.897223Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 4 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T14:30:07.897371Z node 12 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 102 2025-06-25T14:30:07.897436Z node 12 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 4 2025-06-25T14:30:07.897501Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-25T14:30:07.897619Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 102, ready parts: 0/1, is published: true FAKE_COORDINATOR: Erasing txId 102 2025-06-25T14:30:07.899646Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6369: Handle TEvProposeTransactionResult, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 102 Step: 5000003 OrderId: 102 ExecLatency: 0 ProposeLatency: 2 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 1855 } } CommitVersion { Step: 5000003 TxId: 102 } 2025-06-25T14:30:07.899689Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1791: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409546, partId: 0 2025-06-25T14:30:07.899853Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:632: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 102 Step: 5000003 OrderId: 102 ExecLatency: 0 ProposeLatency: 2 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 1855 } } CommitVersion { Step: 5000003 TxId: 102 } 2025-06-25T14:30:07.899992Z node 12 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_part.cpp:109: HandleReply TEvDataShard::TEvProposeTransactionResult Ignore message: tablet# 72057594046678944, ev# TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 102 Step: 5000003 OrderId: 102 ExecLatency: 0 ProposeLatency: 2 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 1855 } } CommitVersion { Step: 5000003 TxId: 102 } 2025-06-25T14:30:07.900949Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5596: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 309 RawX2: 51539609846 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 2025-06-25T14:30:07.901016Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1791: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409546, partId: 0 2025-06-25T14:30:07.901185Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:632: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: Source { RawX1: 309 RawX2: 51539609846 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 2025-06-25T14:30:07.901263Z node 12 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1047: NTableState::TProposedWaitParts operationId# 102:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 2025-06-25T14:30:07.901410Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1051: NTableState::TProposedWaitParts operationId# 102:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 309 RawX2: 51539609846 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 2025-06-25T14:30:07.901509Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:670: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 102:0, shardIdx: 72057594046678944:1, shard: 72075186233409546, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-25T14:30:07.901572Z node 12 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:707: all shard schema changes has been received, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-25T14:30:07.901631Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:719: send schema changes ack message, operation: 102:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-06-25T14:30:07.901692Z node 12 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 102:0 129 -> 240 2025-06-25T14:30:07.906972Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-25T14:30:07.907199Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-25T14:30:07.908790Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-25T14:30:07.908965Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-25T14:30:07.909020Z node 12 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 102:0 ProgressState 2025-06-25T14:30:07.909216Z node 12 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:0 progress is 1/1 2025-06-25T14:30:07.909275Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-25T14:30:07.909332Z node 12 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:0 progress is 1/1 2025-06-25T14:30:07.909375Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-25T14:30:07.909433Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: true 2025-06-25T14:30:07.909525Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1656: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [12:337:2314] message: TxId: 102 2025-06-25T14:30:07.909605Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-25T14:30:07.909662Z node 12 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 102:0 2025-06-25T14:30:07.909710Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 102:0 2025-06-25T14:30:07.909882Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-25T14:30:07.913244Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-25T14:30:07.913316Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [12:397:2367] TestWaitNotification: OK eventTxId 102 >> KqpStats::JoinStatsBasicScan [GOOD] >> KqpStats::DeferredEffects-UseSink >> KqpQuery::QueryResultsTruncated [GOOD] >> KqpQuery::QueryStats+UseSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpQuery::QueryFromSqs [GOOD] Test command err: Trying to start YDB, gRPC: 13405, MsgBus: 7367 2025-06-25T14:29:36.984218Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519894069547225713:2227];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:29:36.984517Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001496/r3tmp/tmpKJCwoa/pdisk_1.dat 2025-06-25T14:29:37.743822Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:29:37.743919Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:29:37.746320Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:29:37.766592Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519894069547225515:2080] 1750861776920391 != 1750861776920394 2025-06-25T14:29:37.779187Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 13405, node 1 2025-06-25T14:29:37.832818Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:29:37.832843Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:29:37.832850Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:29:37.832996Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:29:37.964528Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:7367 TClient is connected to server localhost:7367 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:29:38.768768Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:29:38.804533Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:38.943528Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:39.248878Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:39.394953Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:41.399305Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894091022063633:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:41.399418Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:41.759523Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:41.796708Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:41.834301Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:41.863933Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:41.893755Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:41.931619Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:41.981550Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519894069547225713:2227];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:29:41.981615Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:29:42.004282Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:42.089031Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894095317031590:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:42.089096Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:42.089396Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894095317031595:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:42.092914Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:29:42.105231Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519894095317031597:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:29:42.196485Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519894095317031650:3420] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:29:43.165805Z node 1 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0002e0c80] received request Name# ExecuteDataQuery ok# true data# session_id: "ydb://session/3?node_id=1&id=ZjcwOGU1OTEtMTg1ZjUyZDUtYzBkZTZlYzgtZmJmMDY0NA==" tx_control { begin_tx { serializable_read_write { } } commit_tx: true } query { yql_text: "\n SELECT * FROM `/Root/TwoShard`;\n " } query_cache_policy { } operation_params { } peer# ipv6:%5B::1%5D:54946 2025-06-25T14:29:43.165883Z node 1 :GRPC_SERVER DEBUG: logger.cpp:36: ... :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519894175744057339:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:30:01.943089Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001496/r3tmp/tmpHTQL6V/pdisk_1.dat 2025-06-25T14:30:02.102294Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:30:02.108434Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7519894175744057310:2080] 1750861801942351 != 1750861801942354 2025-06-25T14:30:02.115039Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:30:02.115139Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:30:02.117378Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 14842, node 4 2025-06-25T14:30:02.203835Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:30:02.203859Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:30:02.203868Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:30:02.203984Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14861 TClient is connected to server localhost:14861 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:30:02.724775Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:30:02.733053Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:30:02.747576Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:30:02.821829Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:30:02.961273Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:30:03.008844Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:30:03.114107Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:30:05.306852Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519894192923928127:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:05.306938Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:05.349695Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:05.376866Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:05.404650Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:05.431943Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:05.461930Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:05.495621Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:05.568338Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:05.669591Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519894192923928791:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:05.669684Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:05.669879Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519894192923928796:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:05.675426Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:30:05.685985Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519894192923928798:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:30:05.747351Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519894192923928849:3421] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:30:06.782009Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:06.943087Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519894175744057339:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:30:06.943162Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> TControlPlaneProxyTest::ShouldSendGetQueryStatus [GOOD] >> TControlPlaneProxyTest::ShouldSendModifyQuery >> KqpExplain::UpdateOnSecondaryWithoutSecondaryKey-UseSink [GOOD] >> KqpQuery::YqlTableSample [GOOD] >> KqpQuery::UpdateWhereInSubquery >> Cdc::RacyCreateAndSend [GOOD] >> Cdc::RacySplitAndDropTable >> TSchemeShardTest::CopyTableForBackup [GOOD] >> TSchemeShardTest::ConsistentCopyTablesForBackup >> KqpExplain::UpdateSecondaryConditionalSecondaryKey-UseSink [GOOD] >> KqpLimits::DataShardReplySizeExceeded [GOOD] >> Cdc::DescribeStream [GOOD] >> Cdc::DecimalKey |76.7%| [TM] {asan, default-linux-x86_64, release} ydb/services/persqueue_v1/ut/describes_ut/unittest |76.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/table_creator/ut/ydb-library-table_creator-ut >> TestDataErasure::Run3CyclesForAllSupportedObjects >> TTopicApiDescribes::DescribeConsumer >> TControlPlaneProxyTest::ShouldSendModifyQuery [GOOD] >> TControlPlaneProxyTest::ShouldSendDeleteQuery |76.7%| [LD] {RESULT} $(B)/ydb/library/table_creator/ut/ydb-library-table_creator-ut |76.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/library/table_creator/ut/ydb-library-table_creator-ut >> KqpExplain::UpdateOnSecondary+UseSink [GOOD] >> KqpExplain::UpdateOnSecondary-UseSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpExplain::UpdateOnSecondaryWithoutSecondaryKey-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 24566, MsgBus: 1423 2025-06-25T14:29:35.291706Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519894065664636514:2137];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:29:35.293612Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001498/r3tmp/tmpHF7ZQC/pdisk_1.dat 2025-06-25T14:29:35.844060Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:29:35.844172Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:29:35.853101Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:29:35.872424Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:29:35.876571Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519894065664636406:2080] 1750861775280842 != 1750861775280845 TServer::EnableGrpc on GrpcPort 24566, node 1 2025-06-25T14:29:36.071036Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:29:36.071061Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:29:36.071068Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:29:36.071196Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1423 2025-06-25T14:29:36.309096Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:1423 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:29:36.675616Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:29:36.692545Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:29:36.699647Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:36.833158Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:37.075136Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:37.212138Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:39.344435Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894082844507221:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:39.344524Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:39.758623Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:39.812100Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:39.854488Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:39.900589Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:39.980674Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:40.032611Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:40.104418Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:40.171359Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894087139475179:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:40.171433Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:40.171781Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894087139475184:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:40.176034Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:29:40.189193Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519894087139475186:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:29:40.264195Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519894087139475239:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:29:40.297181Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519894065664636514:2137];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:29:40.297261Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:29:41.340651Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_wo ... epricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:30:01.620233Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:30:01.633127Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:30:01.702647Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:30:01.865016Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... 2025-06-25T14:30:01.909079Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:01.980805Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:30:04.615449Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519894188986013194:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:04.615530Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:04.680364Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:04.710464Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:04.741518Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:04.773974Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:04.810499Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:04.886565Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:04.920942Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:05.015126Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519894193280981157:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:05.015234Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:05.015407Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519894193280981162:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:05.018917Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:30:05.029342Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519894193280981164:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:30:05.131487Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519894193280981215:3420] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:30:05.823318Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519894171806142449:2092];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:30:05.823387Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:30:06.459194Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:06.508745Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:06.558072Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) {"Plan":{"Plans":[{"PlanNodeId":14,"Plans":[{"Tables":["SecondaryKeys"],"PlanNodeId":13,"Operators":[{"Inputs":[{"InternalOperatorId":1}],"Path":"\/Root\/SecondaryKeys","Name":"Upsert","Table":"SecondaryKeys"},{"Inputs":[],"Iterator":"precompute_2_0","Name":"Iterator"}],"Node Type":"Upsert-ConstantExpr","CTE Name":"precompute_2_0"}],"Node Type":"Effect"},{"PlanNodeId":11,"Plans":[{"PlanNodeId":10,"Plans":[{"PlanNodeId":9,"Plans":[{"PlanNodeId":8,"Operators":[{"Inputs":[{"InternalOperatorId":1}],"Iterator":"Filter","Name":"Iterator"},{"E-Rows":"2","Inputs":[],"Predicate":"Contains","E-Cost":"0","E-Size":"10","Name":"Filter"}],"Node Type":"ConstantExpr-Filter"}],"Node Type":"UnionAll","PlanNodeType":"Connection"}],"Node Type":"Collect"}],"Subplan Name":"CTE precompute_2_0","Node Type":"Precompute_2","Parent Relationship":"InitPlan","PlanNodeType":"Materialize"},{"PlanNodeId":6,"Plans":[{"PlanNodeId":5,"Plans":[{"PlanNodeId":4,"Plans":[{"PlanNodeId":3,"Plans":[{"E-Size":"0","LookupKeyColumns":["Key"],"Node Type":"TableLookup","PlanNodeId":2,"Path":"\/Root\/SecondaryKeys","Columns":["Key"],"E-Rows":"2","Plans":[{"PlanNodeId":1,"Operators":[{"Inputs":[],"Iterator":"precompute_0_1","Name":"Iterator"}],"Node Type":"ConstantExpr","CTE Name":"precompute_0_1"}],"Table":"SecondaryKeys","PlanNodeType":"Connection","E-Cost":"0"}],"Node Type":"Stage"}],"Node Type":"UnionAll","PlanNodeType":"Connection"}],"Node Type":"Stage"}],"Subplan Name":"CTE precompute_1_0","Node Type":"Precompute_1","Parent Relationship":"InitPlan","PlanNodeType":"Materialize"}],"Node Type":"Query","PlanNodeType":"Query","Stats":{"ResourcePoolId":"default"}},"meta":{"version":"0.2","type":"query"},"tables":[{"name":"\/Root\/SecondaryKeys","reads":[{"lookup_by":["Key"],"columns":["Key"],"type":"Lookup"}],"writes":[{"columns":["Key","Value"],"type":"MultiUpsert"}]}],"SimplifiedPlan":{"PlanNodeId":0,"Plans":[{"PlanNodeId":1,"Plans":[{"PlanNodeId":2,"Operators":[{"Path":"\/Root\/SecondaryKeys","Name":"Upsert","Table":"SecondaryKeys"}],"Plans":[{"PlanNodeId":8,"Operators":[{"E-Rows":"2","Predicate":"Contains","E-Cost":"0","E-Size":"10","Name":"Filter"}],"Node Type":"Filter"}],"Node Type":"Upsert"}],"Node Type":"Effect"}],"Node Type":"Query","PlanNodeType":"Query","OptimizerStats":{"EquiJoinsCount":0,"JoinsCount":0}}} >> TIcNodeCache::GetNodesInfoTest >> TTopicApiDescribes::GetLocalDescribe >> DataShardSnapshots::ReadIteratorLocalSnapshotThenWrite [GOOD] >> DataShardSnapshots::RepeatableReadAfterSplitRace >> TTopicReaderTests::TestRun_Read_Less_Messages_Than_Sent [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpExplain::UpdateSecondaryConditionalSecondaryKey-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 6272, MsgBus: 24477 2025-06-25T14:29:34.174116Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519894062166725886:2143];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:29:34.174757Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0014ae/r3tmp/tmpGUEgJV/pdisk_1.dat 2025-06-25T14:29:34.801832Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:29:34.847733Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:29:34.848023Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:29:34.851648Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 6272, node 1 2025-06-25T14:29:34.983556Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:29:34.983586Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:29:34.983593Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:29:34.983717Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:29:35.167904Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:24477 TClient is connected to server localhost:24477 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:29:35.659246Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:29:35.681535Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:29:35.705950Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:35.874852Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:36.040279Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:36.118013Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:37.924302Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894075051629300:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:37.924411Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:38.422573Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:38.463761Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:38.507427Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:38.558669Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:38.599164Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:38.678653Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:38.719086Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:38.786445Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894079346597256:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:38.786570Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:38.787819Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894079346597261:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:38.792305Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:29:38.818778Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519894079346597263:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:29:38.912317Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519894079346597314:3425] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:29:39.170420Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519894062166725886:2143];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:29:39.170475Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:29:40.230238Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:40.290306Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, ... NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:04.904210Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:04.960708Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:04.995352Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:05.025701Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:05.059133Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:05.102904Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:05.140703Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:05.210511Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:05.266516Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519894195984324334:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:05.266605Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519894195984324339:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:05.266616Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:05.270454Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:30:05.281789Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519894195984324341:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:30:05.376245Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519894195984324392:3420] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:30:06.345473Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519894178804452893:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:30:06.345546Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:30:06.731142Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:06.821115Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:06.896556Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) {"Plan":{"Plans":[{"PlanNodeId":18,"Plans":[{"Tables":["SecondaryKeys"],"PlanNodeId":17,"Operators":[{"Inputs":[{"InternalOperatorId":1}],"Path":"\/Root\/SecondaryKeys","Name":"Upsert","Table":"SecondaryKeys"},{"Inputs":[],"Iterator":"precompute_1_2","Name":"Iterator"}],"Node Type":"Upsert-ConstantExpr","CTE Name":"precompute_1_2"}],"Node Type":"Effect"},{"PlanNodeId":16,"Plans":[{"Tables":["SecondaryKeys\/Index\/indexImplTable"],"PlanNodeId":15,"Operators":[{"Inputs":[{"InternalOperatorId":1}],"Path":"\/Root\/SecondaryKeys\/Index\/indexImplTable","Name":"Delete","Table":"SecondaryKeys\/Index\/indexImplTable"},{"Inputs":[],"Iterator":"precompute_1_1","Name":"Iterator"}],"Node Type":"Delete-ConstantExpr","CTE Name":"precompute_1_1"}],"Node Type":"Effect"},{"PlanNodeId":14,"Plans":[{"Tables":["SecondaryKeys\/Index\/indexImplTable"],"PlanNodeId":13,"Operators":[{"Inputs":[{"InternalOperatorId":1}],"Path":"\/Root\/SecondaryKeys\/Index\/indexImplTable","Name":"Upsert","Table":"SecondaryKeys\/Index\/indexImplTable"},{"Inputs":[],"Iterator":"precompute_1_0","Name":"Iterator"}],"Node Type":"Upsert-ConstantExpr","CTE Name":"precompute_1_0"}],"Node Type":"Effect"},{"PlanNodeId":11,"Plans":[{"PlanNodeId":10,"Plans":[{"PlanNodeId":9,"Plans":[{"PlanNodeId":8,"Plans":[{"Tables":["SecondaryKeys\/Index\/indexImplTable"],"PlanNodeId":7,"Operators":[{"Scan":"Parallel","E-Size":"0","ReadRanges":["Fk [1, 4)"],"Name":"TableRangeScan","Inputs":[],"Path":"\/Root\/SecondaryKeys\/Index\/indexImplTable","E-Rows":"0","ReadRangesPointPrefixLen":"1","ReadRangesKeys":["Fk"],"Table":"SecondaryKeys\/Index\/indexImplTable","ReadColumns":["Fk","Key"],"E-Cost":"0","ReadRangesExpectedSize":"3"}],"Node Type":"TableRangeScan"}],"Subplan Name":"CTE Stage_5","Node Type":"Stage","Parent Relationship":"InitPlan"}],"Node Type":"UnionAll","PlanNodeType":"Connection"}],"Node Type":"Collect"}],"Subplan Name":"CTE precompute_1_0","Node Type":"Precompute_1_0","Parent Relationship":"InitPlan","PlanNodeType":"Materialize"},{"PlanNodeId":6,"Plans":[{"PlanNodeId":5,"Plans":[{"PlanNodeId":4,"Node Type":"UnionAll","CTE Name":"Stage_5","PlanNodeType":"Connection"}],"Node Type":"Collect"}],"Subplan Name":"CTE precompute_1_1","Node Type":"Precompute_1_1","Parent Relationship":"InitPlan","PlanNodeType":"Materialize"},{"PlanNodeId":3,"Plans":[{"PlanNodeId":2,"Plans":[{"PlanNodeId":1,"Node Type":"UnionAll","CTE Name":"Stage_5","PlanNodeType":"Connection"}],"Node Type":"Collect"}],"Subplan Name":"CTE precompute_1_2","Node Type":"Precompute_1_2","Parent Relationship":"InitPlan","PlanNodeType":"Materialize"}],"Node Type":"Query","PlanNodeType":"Query","Stats":{"ResourcePoolId":"default"}},"meta":{"version":"0.2","type":"query"},"tables":[{"name":"\/Root\/SecondaryKeys","writes":[{"columns":["Key","Fk"],"type":"MultiUpsert"}]},{"name":"\/Root\/SecondaryKeys\/Index\/indexImplTable","reads":[{"columns":["Fk","Key"],"scan_by":["Fk [1, 4)"],"type":"Scan"}],"writes":[{"columns":["Key","Fk"],"type":"MultiUpsert"},{"type":"MultiErase"}]}],"SimplifiedPlan":{"PlanNodeId":0,"Plans":[{"PlanNodeId":1,"Plans":[{"PlanNodeId":2,"Operators":[{"Path":"\/Root\/SecondaryKeys","Name":"Upsert","Table":"SecondaryKeys"}],"Plans":[{"PlanNodeId":7,"Operators":[{"Scan":"Parallel","E-Size":"0","ReadRanges":["Fk [1, 4)"],"Name":"TableRangeScan","Path":"\/Root\/SecondaryKeys\/Index\/indexImplTable","ReadRangesPointPrefixLen":"1","E-Rows":"0","ReadRangesKeys":["Fk"],"Table":"SecondaryKeys\/Index\/indexImplTable","ReadColumns":["Fk","Key"],"E-Cost":"0","ReadRangesExpectedSize":"3"}],"Node Type":"TableRangeScan"}],"Node Type":"Upsert"}],"Node Type":"Effect"},{"PlanNodeId":8,"Plans":[{"PlanNodeId":9,"Operators":[{"Path":"\/Root\/SecondaryKeys\/Index\/indexImplTable","Name":"Delete","Table":"SecondaryKeys\/Index\/indexImplTable"}],"Plans":[{"PlanNodeId":14,"Operators":[{"Scan":"Parallel","E-Size":"0","ReadRanges":["Fk [1, 4)"],"Name":"TableRangeScan","Path":"\/Root\/SecondaryKeys\/Index\/indexImplTable","ReadRangesPointPrefixLen":"1","E-Rows":"0","ReadRangesKeys":["Fk"],"Table":"SecondaryKeys\/Index\/indexImplTable","ReadColumns":["Fk","Key"],"E-Cost":"0","ReadRangesExpectedSize":"3"}],"Node Type":"TableRangeScan"}],"Node Type":"Delete"}],"Node Type":"Effect"},{"PlanNodeId":15,"Plans":[{"PlanNodeId":16,"Operators":[{"Path":"\/Root\/SecondaryKeys\/Index\/indexImplTable","Name":"Upsert","Table":"SecondaryKeys\/Index\/indexImplTable"}],"Plans":[{"PlanNodeId":22,"Operators":[{"Scan":"Parallel","E-Size":"0","ReadRanges":["Fk [1, 4)"],"Name":"TableRangeScan","Path":"\/Root\/SecondaryKeys\/Index\/indexImplTable","ReadRangesPointPrefixLen":"1","E-Rows":"0","ReadRangesKeys":["Fk"],"Table":"SecondaryKeys\/Index\/indexImplTable","ReadColumns":["Fk","Key"],"E-Cost":"0","ReadRangesExpectedSize":"3"}],"Node Type":"TableRangeScan"}],"Node Type":"Upsert"}],"Node Type":"Effect"}],"Node Type":"Query","PlanNodeType":"Query","OptimizerStats":{"EquiJoinsCount":0,"JoinsCount":0}}} >> TSchemeShardTest::ConsistentCopyTablesForBackup [GOOD] >> TSchemeShardTest::CopyLockedTableForBackup >> KqpExplain::MultiJoinCteLinks [GOOD] >> TControlPlaneProxyTest::ShouldSendDeleteQuery [GOOD] >> TControlPlaneProxyTest::ShouldSendControlQuery ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpLimits::DataShardReplySizeExceeded [GOOD] Test command err: Trying to start YDB, gRPC: 14763, MsgBus: 13791 2025-06-25T14:29:34.387730Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519894061912796265:2145];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:29:34.389444Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0014cb/r3tmp/tmpKld597/pdisk_1.dat 2025-06-25T14:29:34.911847Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:29:34.911968Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:29:34.918999Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:29:34.968761Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:29:34.969943Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519894061912796131:2080] 1750861774318632 != 1750861774318635 TServer::EnableGrpc on GrpcPort 14763, node 1 2025-06-25T14:29:35.141525Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:29:35.141546Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:29:35.141553Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:29:35.141667Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:29:35.388556Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:13791 TClient is connected to server localhost:13791 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:29:36.044100Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:29:36.085834Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:39.384766Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519894061912796265:2145];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:29:39.384834Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:29:39.395771Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894083387633613:2322], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:39.399962Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:29:39.400942Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894083387633602:2319], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:39.401074Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:39.412565Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519894083387633616:2323], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-25T14:29:39.510725Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519894083387633670:2565] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 23836, MsgBus: 18745 2025-06-25T14:29:42.242287Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519894095443412470:2057];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0014cb/r3tmp/tmp5xwa4p/pdisk_1.dat 2025-06-25T14:29:42.297993Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:29:42.403656Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519894095443412454:2080] 1750861782228022 != 1750861782228025 2025-06-25T14:29:42.405655Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:29:42.415696Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:29:42.415767Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:29:42.416927Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 23836, node 2 2025-06-25T14:29:42.468136Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:29:42.468159Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:29:42.468167Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:29:42.468285Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:18745 TClient is connected to server localhost:18745 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:29:42.903216Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:29:42.909911Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:29:42.923421Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:43.242721Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:29:45.594022Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519894108328315335:2317], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:45.594127Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:45.594374Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519894108328315347:2320], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:45.598189Z nod ... tart YDB, gRPC: 23823, MsgBus: 4390 2025-06-25T14:30:01.408202Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519894176514249276:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:30:01.408446Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0014cb/r3tmp/tmpq9fNw1/pdisk_1.dat 2025-06-25T14:30:01.549948Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:30:01.564202Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:30:01.564495Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:30:01.567121Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 23823, node 4 2025-06-25T14:30:01.636846Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:30:01.636872Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:30:01.636883Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:30:01.637024Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4390 TClient is connected to server localhost:4390 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:30:02.223926Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:30:02.243638Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:30:02.337925Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:30:02.490755Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:30:02.538251Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:30:02.621681Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:30:05.001478Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519894193694120073:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:05.001592Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:05.065565Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:05.134938Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:05.168123Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:05.201648Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:05.238032Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:05.311619Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:05.348473Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:05.409293Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519894193694120741:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:05.409395Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:05.409623Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519894193694120746:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:05.418077Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:30:05.426480Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519894193694120748:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:30:05.483176Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519894193694120799:3424] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:30:06.409215Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519894176514249276:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:30:06.411803Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:30:06.500090Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:09.627222Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=NDllZDU5ZmUtOGE0NjQxNDEtYzc5MTI5YTgtYTEzY2RiNw==, ActorId: [4:7519894197989088364:2473], ActorState: ExecuteState, TraceId: 01jykqy1m00b73c9j165kzgr96, Create QueryResponse for error on request, msg: >> DataShardSnapshots::LockedWriteCleanupOnCopyTable+UseSink [GOOD] >> DataShardSnapshots::LockedWriteCleanupOnCopyTable-UseSink |76.8%| [TM] {asan, default-linux-x86_64, release} ydb/services/persqueue_v1/ut/describes_ut/unittest >> TSubscriberCombinationsTest::CombinationsRootDomain [GOOD] >> TSubscriberCombinationsTest::CombinationsMigratedPath >> KqpStats::DataQueryWithEffects-UseSink [GOOD] >> KqpStats::DataQueryMulti >> TControlPlaneProxyTest::ShouldSendControlQuery [GOOD] >> TControlPlaneProxyTest::ShouldSendGetResultData ------- [TM] {asan, default-linux-x86_64, release} ydb/public/lib/ydb_cli/topic/ut/unittest >> TTopicReaderTests::TestRun_Read_Less_Messages_Than_Sent [GOOD] Test command err: === Starting PQ server === Server->StartServer(false); 2025-06-25T14:29:26.913610Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519894027341749594:2081];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:29:26.988673Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:29:26.975157Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519894025186325779:2249];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000b6e/r3tmp/tmp2MgybK/pdisk_1.dat 2025-06-25T14:29:27.410572Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-25T14:29:27.412973Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-25T14:29:27.447020Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:29:27.591677Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:29:27.606070Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:29:27.606202Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:29:27.611301Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T14:29:27.612055Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:29:27.638583Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:29:27.638671Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:29:27.647304Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 63667, node 1 2025-06-25T14:29:27.759898Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/yft8/000b6e/r3tmp/yandex48UQOJ.tmp 2025-06-25T14:29:27.759923Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/yft8/000b6e/r3tmp/yandex48UQOJ.tmp 2025-06-25T14:29:27.760113Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/yft8/000b6e/r3tmp/yandex48UQOJ.tmp 2025-06-25T14:29:27.760253Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:29:27.936774Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:29:27.936296Z INFO: TTestServer started on Port 23080 GrpcPort 63667 2025-06-25T14:29:27.953374Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:23080 PQClient connected to localhost:63667 === TenantModeEnabled() = 0 === Init PQ - start server on port 63667 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:29:28.554200Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "Root" StoragePools { Name: "/Root:test" Kind: "test" } } } TxId: 281474976710657 TabletId: 72057594046644480 PeerName: "" , at schemeshard: 72057594046644480 2025-06-25T14:29:28.554423Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //Root, opId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-25T14:29:28.554652Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 0 2025-06-25T14:29:28.554683Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 281474976710657:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046644480, LocalPathId: 1] source path: 2025-06-25T14:29:28.554971Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 281474976710657:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-25T14:29:28.555041Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:29:28.561829Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 281474976710657, response: Status: StatusAccepted TxId: 281474976710657 SchemeshardId: 72057594046644480 PathId: 1, at schemeshard: 72057594046644480 2025-06-25T14:29:28.562353Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976710657, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //Root 2025-06-25T14:29:28.562562Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-25T14:29:28.562617Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 281474976710657:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046644480 2025-06-25T14:29:28.562631Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 281474976710657:0 ProgressState no shards to create, do next state 2025-06-25T14:29:28.562650Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 281474976710657:0 2 -> 3 waiting... 2025-06-25T14:29:28.569479Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-25T14:29:28.569522Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 281474976710657:0 ProgressState, at schemeshard: 72057594046644480 2025-06-25T14:29:28.569538Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 281474976710657:0 3 -> 128 2025-06-25T14:29:28.571860Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-25T14:29:28.571892Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-25T14:29:28.571930Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 281474976710657:0, at tablet# 72057594046644480 2025-06-25T14:29:28.571961Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 281474976710657 ready parts: 1/1 2025-06-25T14:29:28.577349Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046644480 Flags: 2 } ExecLevel: 0 TxId: 281474976710657 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:29:28.578154Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:29:28.578181Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 281474976710657, ready parts: 0/1, is published: true 2025-06-25T14:29:28.578204Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:29:28.579752Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 281474976710657:4294967295 from tablet: 72057594046644480 to tablet: 72057594046316545 cookie: 0:281474976710657 msg type: 269090816 2025-06-25T14:29:28.579916Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 281474976710657, partId: 4294967295, tablet: 72057594046316545 2025-06-25T14:29:28.582935Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 1750861768628, transactions count in step: 1, at schemeshard: 72057594046644480 2025-06-25T14:29:28.584001Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976710657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1750861768628 MediatorID: 72057594046382081 TabletID: 72057594046644480, at schemeshard: 72057594046644480 2025-06-25T14:29:28.584045Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 281474976710657:0, at tablet# 72057594046644480 2025-06-25T14:29:28.584393Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 281474976710657:0 128 -> 240 2025-06-25T14:29:28.584431Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 281474976710657:0, at tablet# 72057594046644480 2025-06-25T14:29:28.584615Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 1 2025-06-25T14:29:28.584697Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: ... alancing. familyCount=1, sessionCount=1, desiredFamilyCount=1, allowPlusOne=0 2025-06-25T14:30:09.767785Z node 6 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1399: [72075186224037898][rt3.dc1--topic1] consumer cli balancing duration: 0.000211s 2025-06-25T14:30:09.769468Z node 5 :PQ_READ_PROXY INFO: read_session_actor.cpp:1315: session cookie 1 consumer shared/cli session shared/cli_5_1_639847772563310540_v1 assign: record# { Partition: 0 TabletId: 72075186224037897 Topic: "rt3.dc1--topic1" Generation: 1 Step: 1 Session: "shared/cli_5_1_639847772563310540_v1" ClientId: "cli" PipeClient { RawX1: 7519894210251408327 RawX2: 4503621102209586 } Path: "/Root/PQ/rt3.dc1--topic1" } 2025-06-25T14:30:09.769614Z node 5 :PQ_READ_PROXY INFO: partition_actor.cpp:1132: session cookie 1 consumer shared/cli session shared/cli_5_1_639847772563310540_v1 INITING TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1) 2025-06-25T14:30:09.770057Z node 5 :PQ_READ_PROXY INFO: partition_actor.cpp:972: session cookie 1 consumer shared/cli session shared/cli_5_1_639847772563310540_v1 TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1) pipe restart attempt 0 pipe creation result: OK TabletId: 72075186224037897 Generation: 1, pipe: [5:7519894210251408330:2613] 2025-06-25T14:30:09.770241Z node 5 :PQ_READ_PROXY DEBUG: caching_service.cpp:283: Direct read cache: registered server session: shared/cli_5_1_639847772563310540_v1:1 with generation 1 2025-06-25T14:30:09.778186Z node 5 :PQ_READ_PROXY DEBUG: partition_actor.cpp:652: session cookie 1 consumer shared/cli session shared/cli_5_1_639847772563310540_v1 TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1) initDone 0 event { CmdGetClientOffsetResult { Offset: 0 EndOffset: 3 SizeLag: 280 WriteTimestampEstimateMS: 1750861809750 ClientHasAnyCommits: false } Cookie: 18446744073709551615 } 2025-06-25T14:30:09.778241Z node 5 :PQ_READ_PROXY INFO: partition_actor.cpp:683: session cookie 1 consumer shared/cli session shared/cli_5_1_639847772563310540_v1 INIT DONE TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1) EndOffset 3 readOffset 0 committedOffset 0 2025-06-25T14:30:09.778300Z node 5 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:1413: session cookie 1 consumer shared/cli session shared/cli_5_1_639847772563310540_v1 sending to client partition status 2025-06-25T14:30:09.779195Z :INFO: [] [] [6140e8ef-6fe44427-fc553cd8-7b1c9c70] [] Confirm partition stream create. Partition stream id: 1. Cluster: "-". Topic: "/topic1". Partition: 0. Read offset: (NULL) 2025-06-25T14:30:09.783905Z node 5 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer shared/cli session shared/cli_5_1_639847772563310540_v1 grpc read done: success# 1, data# { start_partition_session_response { partition_session_id: 1 } } 2025-06-25T14:30:09.784062Z node 5 :PQ_READ_PROXY INFO: read_session_actor.cpp:533: session cookie 1 consumer shared/cli session shared/cli_5_1_639847772563310540_v1 got StartRead from client: partition# TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1), readOffset# 0, commitOffset# (empty maybe) 2025-06-25T14:30:09.784142Z node 5 :PQ_READ_PROXY INFO: partition_actor.cpp:1012: session cookie 1 consumer shared/cli session shared/cli_5_1_639847772563310540_v1 Start reading TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1) EndOffset 3 readOffset 0 committedOffset 0 clientCommitOffset (empty maybe) clientReadOffset 0 2025-06-25T14:30:09.784174Z node 5 :PQ_READ_PROXY DEBUG: partition_actor.cpp:958: session cookie 1 consumer shared/cli session shared/cli_5_1_639847772563310540_v1 TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1) ready for read with readOffset 0 endOffset 3 2025-06-25T14:30:09.784229Z node 5 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2309: session cookie 1 consumer shared/cli session shared/cli_5_1_639847772563310540_v1 partition ready for read: partition# TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1), readOffset# 0, endOffset# 3, WTime# 0, sizeLag# 280 2025-06-25T14:30:09.784248Z node 5 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2320: session cookie 1 consumer shared/cli session shared/cli_5_1_639847772563310540_v1TEvPartitionReady. Aval parts: 1 2025-06-25T14:30:09.784290Z node 5 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2243: session cookie 1 consumer shared/cli session shared/cli_5_1_639847772563310540_v1 performing read request: guid# 83fc74bd-29e42b0c-3bcd4f12-6763deee, from# TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1), count# 3, size# 336, partitionsAsked# 1, maxTimeLag# 0ms 2025-06-25T14:30:09.784430Z node 5 :PQ_READ_PROXY DEBUG: partition_actor.cpp:1384: session cookie 1 consumer shared/cli session shared/cli_5_1_639847772563310540_v1 READ FROM TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1)maxCount 3 maxSize 336 maxTimeLagMs 0 readTimestampMs 0 readOffset 0 EndOffset 3 ClientCommitOffset 0 committedOffset 0 Guid 83fc74bd-29e42b0c-3bcd4f12-6763deee 2025-06-25T14:30:09.787803Z node 5 :PQ_READ_PROXY DEBUG: partition_actor.cpp:652: session cookie 1 consumer shared/cli session shared/cli_5_1_639847772563310540_v1 TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1) initDone 1 event { CmdReadResult { MaxOffset: 3 Result { Offset: 0 Data: "... 79 bytes ..." SourceId: "\000source1" SeqNo: 1 WriteTimestampMS: 1750861809644 CreateTimestampMS: 1750861809642 UncompressedSize: 8 PartitionKey: "" ExplicitHash: "" } Result { Offset: 1 Data: "... 79 bytes ..." SourceId: "\000source1" SeqNo: 2 WriteTimestampMS: 1750861809649 CreateTimestampMS: 1750861809642 UncompressedSize: 8 PartitionKey: "" ExplicitHash: "" } Result { Offset: 2 Data: "... 79 bytes ..." SourceId: "\000source1" SeqNo: 3 WriteTimestampMS: 1750861809650 CreateTimestampMS: 1750861809643 UncompressedSize: 8 PartitionKey: "" ExplicitHash: "" } BlobsFromDisk: 0 BlobsFromCache: 2 SizeLag: 18446744073709551530 RealReadOffset: 2 WaitQuotaTimeMs: 0 EndOffset: 3 StartOffset: 0 } Cookie: 0 } 2025-06-25T14:30:09.787970Z node 5 :PQ_READ_PROXY DEBUG: partition_actor.cpp:1266: session cookie 1 consumer shared/cli session shared/cli_5_1_639847772563310540_v1 TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1) wait data in partition inited, cookie 1 from offset 3 2025-06-25T14:30:09.788017Z node 5 :PQ_READ_PROXY DEBUG: partition_actor.cpp:890: session cookie 1 consumer shared/cli session shared/cli_5_1_639847772563310540_v1 after read state TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1) EndOffset 3 ReadOffset 3 ReadGuid 83fc74bd-29e42b0c-3bcd4f12-6763deee has messages 1 2025-06-25T14:30:09.788093Z node 5 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:1917: session cookie 1 consumer shared/cli session shared/cli_5_1_639847772563310540_v1 read done: guid# 83fc74bd-29e42b0c-3bcd4f12-6763deee, partition# TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1), size# 490 2025-06-25T14:30:09.788147Z node 5 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2079: session cookie 1 consumer shared/cli session shared/cli_5_1_639847772563310540_v1 response to read: guid# 83fc74bd-29e42b0c-3bcd4f12-6763deee 2025-06-25T14:30:09.789285Z :DEBUG: [] [] [6140e8ef-6fe44427-fc553cd8-7b1c9c70] [] Got ReadResponse, serverBytesSize = 490, now ReadSizeBudget = 0, ReadSizeServerDelta = 52428310 2025-06-25T14:30:09.788452Z node 5 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2122: session cookie 1 consumer shared/cli session shared/cli_5_1_639847772563310540_v1 Process answer. Aval parts: 0 2025-06-25T14:30:09.789417Z :DEBUG: [] [] [6140e8ef-6fe44427-fc553cd8-7b1c9c70] [] In ContinueReadingDataImpl, ReadSizeBudget = 0, ReadSizeServerDelta = 52428310 2025-06-25T14:30:09.789691Z :DEBUG: [] Decompression task done. Partition/PartitionSessionId: 1 (0-2) 2025-06-25T14:30:09.789763Z :DEBUG: [] [] [6140e8ef-6fe44427-fc553cd8-7b1c9c70] [] Returning serverBytesSize = 490 to budget 2025-06-25T14:30:09.789799Z :DEBUG: [] [] [6140e8ef-6fe44427-fc553cd8-7b1c9c70] [] In ContinueReadingDataImpl, ReadSizeBudget = 490, ReadSizeServerDelta = 52428310 2025-06-25T14:30:09.790071Z :DEBUG: [] [] [6140e8ef-6fe44427-fc553cd8-7b1c9c70] [] After sending read request: ReadSizeBudget = 0, ReadSizeServerDelta = 52428800 2025-06-25T14:30:09.791245Z :DEBUG: [] Take Data. Partition 0. Read: {0, 0} (0-0) 2025-06-25T14:30:09.791297Z :DEBUG: [] Take Data. Partition 0. Read: {1, 0} (1-1) 2025-06-25T14:30:09.791238Z node 5 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer shared/cli session shared/cli_5_1_639847772563310540_v1 grpc read done: success# 1, data# { read_request { bytes_size: 490 } } 2025-06-25T14:30:09.791323Z :DEBUG: [] Take Data. Partition 0. Read: {2, 0} (2-2) 2025-06-25T14:30:09.791365Z :DEBUG: [] [] [6140e8ef-6fe44427-fc553cd8-7b1c9c70] [] The application data is transferred to the client. Number of messages 3, size 24 bytes 2025-06-25T14:30:09.791415Z :DEBUG: [] [] [6140e8ef-6fe44427-fc553cd8-7b1c9c70] [] Returning serverBytesSize = 0 to budget 2025-06-25T14:30:09.791359Z node 5 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:1816: session cookie 1 consumer shared/cli session shared/cli_5_1_639847772563310540_v1 got read request: guid# cebc8aab-bddaed-4d632ed8-6d48c2dc 2025-06-25T14:30:09.791577Z :INFO: [] [] [6140e8ef-6fe44427-fc553cd8-7b1c9c70] Closing read session. Close timeout: 0.000000s 2025-06-25T14:30:09.791622Z :INFO: [] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): -:/topic1:0:1:2:0 2025-06-25T14:30:09.791677Z :INFO: [] [] [6140e8ef-6fe44427-fc553cd8-7b1c9c70] Counters: { Errors: 0 CurrentSessionLifetimeMs: 41 BytesRead: 24 MessagesRead: 3 BytesReadCompressed: 24 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-25T14:30:09.791774Z :NOTICE: [] [] [6140e8ef-6fe44427-fc553cd8-7b1c9c70] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Close with zero timeout " } 2025-06-25T14:30:09.791818Z :DEBUG: [] [] [6140e8ef-6fe44427-fc553cd8-7b1c9c70] [] Abort session to cluster 2025-06-25T14:30:09.792591Z :NOTICE: [] [] [6140e8ef-6fe44427-fc553cd8-7b1c9c70] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-06-25T14:30:09.792741Z node 5 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer shared/cli session shared/cli_5_1_639847772563310540_v1 grpc read done: success# 0, data# { } 2025-06-25T14:30:09.792775Z node 5 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 1 consumer shared/cli session shared/cli_5_1_639847772563310540_v1 grpc read failed 2025-06-25T14:30:09.792798Z node 5 :PQ_READ_PROXY INFO: read_session_actor.cpp:92: session cookie 1 consumer shared/cli session shared/cli_5_1_639847772563310540_v1 grpc closed 2025-06-25T14:30:09.792840Z node 5 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 1 consumer shared/cli session shared/cli_5_1_639847772563310540_v1 is DEAD 2025-06-25T14:30:09.793092Z node 5 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: shared/cli_5_1_639847772563310540_v1 2025-06-25T14:30:09.794023Z node 6 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037898][rt3.dc1--topic1] pipe [5:7519894210251408327:2610] disconnected; active server actors: 1 2025-06-25T14:30:09.794055Z node 6 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037898][rt3.dc1--topic1] pipe [5:7519894210251408327:2610] client cli disconnected session shared/cli_5_1_639847772563310540_v1 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpExplain::MultiJoinCteLinks [GOOD] Test command err: Trying to start YDB, gRPC: 28793, MsgBus: 10793 2025-06-25T14:29:38.847525Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519894079913715227:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:29:38.847586Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00148b/r3tmp/tmpZYAqRI/pdisk_1.dat 2025-06-25T14:29:39.453366Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:29:39.453467Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:29:39.457738Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:29:39.513958Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 28793, node 1 2025-06-25T14:29:39.709627Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:29:39.709663Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:29:39.709673Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:29:39.709820Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:29:39.869429Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:10793 TClient is connected to server localhost:10793 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:29:40.405331Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:29:40.429629Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:40.561667Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:29:40.709740Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:40.791882Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:42.571054Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894097093586018:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:42.571192Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:42.896525Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:42.928251Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:42.967721Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:43.010431Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:43.041746Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:43.070843Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:43.103018Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:43.197120Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894101388553971:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:43.197201Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894101388553976:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:43.197202Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:43.200669Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:29:43.217476Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519894101388553978:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:29:43.274588Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519894101388554029:3416] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:29:43.847778Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519894079913715227:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:29:43.847880Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; {"Plan":{"Plans":[{"PlanNodeId":6,"Plans":[{"PlanNodeId":5,"Plans":[{"PlanNodeId":4,"Plans":[{"PlanNodeId":3,"Plans":[{"PlanNodeId":2,"Plans":[{"Tables":["EightShard"],"PlanNodeId":1,"Operators":[{"Inputs":[{"InternalOperatorId":1}],"Name":"TopSort","Limit":"SUM(10,15)","TopSortBy":"row.Text"},{"Scan":"Parallel","E-Size":"0","ReadRanges":["Key (-∞, +∞)"],"Name":"TableFullScan","Inputs":[],"Path":"\/Root\/EightShard","E-Rows":"0","Table":"EightShard","ReadColumns":["Data","Key","Text"],"E-Cost":"0"}],"Node Type":"TopSort-TableFullScan"}],"Node Type":"Merge","SortColumns":["Text (Asc)"],"PlanNodeType":"Connection"}],"Operators":[{"Inputs":[{"ExternalPlanNodeId":2}],"Name":"Limit","Limit":"Min(If,SUM(10,15))"}],"Node Ty ... Id_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:30:05.664483Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:30:05.677897Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:30:05.755796Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:30:05.872590Z node 5 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:30:05.911239Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:30:05.977161Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:30:08.675992Z node 5 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [5:7519894206023754057:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:08.676101Z node 5 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:08.742247Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:08.819933Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:08.861089Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:08.916457Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:08.989399Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:09.026695Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:09.100532Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:09.190599Z node 5 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [5:7519894210318722028:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:09.190704Z node 5 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:09.190978Z node 5 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [5:7519894210318722033:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:09.194800Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:30:09.206213Z node 5 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [5:7519894210318722035:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:30:09.304152Z node 5 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [5:7519894210318722086:3424] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:30:09.865039Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[5:7519894188843883250:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:30:09.865110Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; {"Plan":{"Plans":[{"PlanNodeId":12,"Plans":[{"PlanNodeId":11,"Plans":[{"PlanNodeId":10,"Plans":[{"PlanNodeId":9,"Plans":[{"E-Size":"0","PlanNodeId":8,"LookupKeyColumns":["Key"],"Node Type":"TableLookup","Path":"\/Root\/EightShard","Columns":["Data","Key","Text"],"E-Rows":"0","Table":"EightShard","Plans":[{"PlanNodeId":7,"Operators":[{"Inputs":[{"InternalOperatorId":1}],"Iterator":"PartitionByKey","Name":"Iterator"},{"Inputs":[],"Name":"PartitionByKey","Input":"precompute_0_0"}],"Node Type":"ConstantExpr-Aggregate","CTE Name":"precompute_0_0"}],"PlanNodeType":"Connection","E-Cost":"0"}],"Operators":[{"Inputs":[{"InternalOperatorId":1}],"Name":"Limit","Limit":"1001"},{"Inputs":[{"InternalOperatorId":3},{"InternalOperatorId":2}],"E-Rows":"0","Condition":"es.Key = kv.Key","Name":"InnerJoin (MapJoin)","E-Size":"0","E-Cost":"0"},{"Inputs":[],"ToFlow":"precompute_0_0","Name":"ToFlow"},{"Inputs":[{"ExternalPlanNodeId":8}],"E-Rows":"0","Predicate":"Exist(item.Key)","Name":"Filter","E-Size":"0","E-Cost":"0"}],"Node Type":"Limit-InnerJoin (MapJoin)-ConstantExpr-Filter","CTE Name":"precompute_0_0"}],"Node Type":"UnionAll","PlanNodeType":"Connection"}],"Operators":[{"Inputs":[{"ExternalPlanNodeId":10}],"Name":"Limit","Limit":"1001"}],"Node Type":"Limit"}],"Node Type":"ResultSet_1","PlanNodeType":"ResultSet"},{"PlanNodeId":5,"Subplan Name":"CTE precompute_0_0","Plans":[{"PlanNodeId":4,"Plans":[{"PlanNodeId":3,"Plans":[{"PlanNodeId":2,"Plans":[{"Tables":["KeyValue"],"PlanNodeId":1,"Operators":[{"Scan":"Parallel","E-Size":"0","ReadRanges":["Key (-∞, +∞)"],"Name":"TableFullScan","Inputs":[],"Path":"\/Root\/KeyValue","ReadRangesPointPrefixLen":"0","E-Rows":"0","Table":"KeyValue","ReadColumns":["Key","Value"],"E-Cost":"0"}],"Node Type":"TableFullScan"}],"Node Type":"Collect"}],"Node Type":"UnionAll","PlanNodeType":"Connection"}],"Node Type":"Collect"}],"Node Type":"Precompute_0","Parent Relationship":"InitPlan","PlanNodeType":"Materialize"}],"Node Type":"Query","Stats":{"ResourcePoolId":"default"},"PlanNodeType":"Query"},"meta":{"version":"0.2","type":"query"},"tables":[{"name":"\/Root\/EightShard","reads":[{"lookup_by":["Key"],"columns":["Data","Key","Text"],"type":"Lookup"}]},{"name":"\/Root\/KeyValue","reads":[{"columns":["Key","Value"],"scan_by":["Key (-∞, +∞)"],"type":"FullScan"}]}],"SimplifiedPlan":{"PlanNodeId":0,"Plans":[{"PlanNodeId":1,"Plans":[{"PlanNodeId":2,"Plans":[{"PlanNodeId":4,"Plans":[{"PlanNodeId":5,"Plans":[{"PlanNodeId":6,"Plans":[{"PlanNodeId":7,"Operators":[{"E-Rows":"0","Columns":["Data","Key","Text"],"E-Size":"0","E-Cost":"0","Name":"TableLookup","Table":"EightShard","LookupKeyColumns":["Key"]}],"Node Type":"TableLookup","PlanNodeType":"Connection"}],"Operators":[{"E-Rows":"0","Predicate":"Exist(item.Key)","Name":"Filter","E-Size":"0","E-Cost":"0"}],"Node Type":"Filter"},{"PlanNodeId":13,"Operators":[{"Scan":"Parallel","E-Size":"0","ReadRanges":["Key (-∞, +∞)"],"Name":"TableFullScan","Path":"\/Root\/KeyValue","ReadRangesPointPrefixLen":"0","E-Rows":"0","Table":"KeyValue","ReadColumns":["Key","Value"],"E-Cost":"0"}],"Node Type":"TableFullScan"}],"Operators":[{"E-Rows":"0","Condition":"es.Key = kv.Key","Name":"InnerJoin (MapJoin)","E-Size":"0","E-Cost":"0"}],"Node Type":"InnerJoin (MapJoin)"}],"Operators":[{"Name":"Limit","Limit":"1001"}],"Node Type":"Limit"}],"Operators":[{"Name":"Limit","Limit":"1001"}],"Node Type":"Limit"}],"Node Type":"ResultSet_1","PlanNodeType":"ResultSet"}],"Node Type":"Query","OptimizerStats":{"EquiJoinsCount":0,"JoinsCount":0},"PlanNodeType":"Query"}} |76.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/controller/ut_assign_tx_id/unittest |76.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/controller/ut_assign_tx_id/unittest >> TSchemeShardTest::CopyLockedTableForBackup [GOOD] >> TSchemeShardTest::ConfigColumnFamily |76.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/http_proxy/ut/ydb-core-http_proxy-ut |76.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/http_proxy/ut/ydb-core-http_proxy-ut |76.8%| [LD] {RESULT} $(B)/ydb/core/http_proxy/ut/ydb-core-http_proxy-ut >> TTopicReaderTests::TestRun_ReadMessages_Output_Base64 [GOOD] |76.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/controller/ut_assign_tx_id/unittest >> KqpStats::OneShardLocalExec+UseSink [GOOD] >> KqpStats::OneShardLocalExec-UseSink >> TControlPlaneProxyTest::ShouldSendGetResultData [GOOD] >> TControlPlaneProxyTest::ShouldSendListJobs >> KqpQuery::UpdateThenDelete+UseSink [GOOD] >> Cdc::InitialScan [GOOD] >> Cdc::InitialScan_WithTopicSchemeTx >> TestDataErasure::Run3CyclesForTopics [GOOD] >> TestDataErasure::DataErasureWithMerge [GOOD] |76.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/controller/ut_assign_tx_id/unittest |76.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/controller/ut_assign_tx_id/unittest >> Cdc::ShouldBreakLocksOnConcurrentAlterTable [GOOD] >> Cdc::ShouldBreakLocksOnConcurrentMoveTable [GOOD] >> Cdc::ShouldBreakLocksOnConcurrentMoveIndex >> TControlPlaneProxyTest::ShouldSendListJobs [GOOD] >> TControlPlaneProxyTest::ShouldSendDescribeJob |76.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/controller/ut_assign_tx_id/unittest |76.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/controller/ut_assign_tx_id/unittest >> TSchemeShardTest::ConfigColumnFamily [GOOD] >> TSchemeShardTest::ConsistentCopyAfterDropIndexes ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpQuery::UpdateThenDelete+UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 63842, MsgBus: 27770 2025-06-25T14:29:46.836686Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519894111251915066:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:29:46.836768Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001485/r3tmp/tmpayGYbF/pdisk_1.dat 2025-06-25T14:29:47.277201Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:29:47.277334Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:29:47.282039Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:29:47.333279Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 63842, node 1 2025-06-25T14:29:47.409232Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:29:47.409255Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:29:47.409282Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:29:47.409425Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27770 2025-06-25T14:29:47.884494Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:27770 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:29:48.227235Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:29:48.257890Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:29:48.288057Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:48.491832Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:48.666591Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:48.741950Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:50.672724Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894128431785874:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:50.672809Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:51.026180Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:51.067858Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:51.140909Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:51.166519Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:51.199741Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:51.268016Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:51.299324Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:51.349226Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894132726753835:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:51.349311Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:51.349376Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894132726753840:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:51.353320Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:29:51.361791Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519894132726753842:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:29:51.464589Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519894132726753893:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:29:51.840422Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519894111251915066:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:29:51.840498Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:29:52.690997Z node 1 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:678: SelfId: [1:7519894137021721480:2483], TxId: 281474976710672, task: 1. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=1&id=YzgyZmU5ZDYtZWU3Zjk5MmEtYzczMzdjODAtZTYzNjc1ZjY=. TraceId : 01jykqxh5cafrp7hkj37b6v3kj. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. InternalError: PRECONDITION_FAILED DEFAULT_ERROR: {
: Error: Terminate was called, reason(17): Bad filter value. }. 2025-06-25T14: ... : 1 } 2025-06-25T14:30:06.579401Z node 3 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [3:7519894199894502938:2478], status: BAD_REQUEST, issues:
: Error: Type annotation, code: 1030
:3:84: Error: At function: KiUpdateTable!
:3:84: Error: Column 'NonExistentColumn' does not exist in table '/Root/KeyValue'., code: 2017 2025-06-25T14:30:06.579632Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=3&id=ZDkyNWZhOTQtOTI3YzdlOTQtNTJlMjM5ZDAtM2UyOGYxMWI=, ActorId: [3:7519894199894502930:2473], ActorState: ExecuteState, TraceId: 01jykqxyyv3qg789bavwnt2s1b, ReplyQueryCompileError, status BAD_REQUEST remove tx with tx_id: Trying to start YDB, gRPC: 18766, MsgBus: 10477 2025-06-25T14:30:07.358596Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519894204439819647:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:30:07.358640Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001485/r3tmp/tmpaN6iBc/pdisk_1.dat 2025-06-25T14:30:07.501088Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:30:07.515268Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:30:07.515354Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:30:07.520264Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 18766, node 4 2025-06-25T14:30:07.561733Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:30:07.561760Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:30:07.561772Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:30:07.561903Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10477 TClient is connected to server localhost:10477 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:30:08.153536Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:30:08.166323Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:30:08.273331Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:30:08.385097Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:30:08.433089Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:30:08.511763Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:30:11.220868Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519894221619690435:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:11.220946Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:11.283214Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:11.315506Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:11.347477Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:11.386525Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:11.419994Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:11.489863Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:11.529649Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:11.591874Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519894221619691097:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:11.591934Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:11.592053Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519894221619691102:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:11.595909Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:30:11.607472Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519894221619691104:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:30:11.701904Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519894221619691155:3419] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:30:12.358983Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519894204439819647:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:30:12.359066Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; [] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_data_erasure/unittest >> TestDataErasure::Run3CyclesForTopics [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:30:07.413583Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:30:07.413695Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:30:07.413750Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:30:07.413790Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:30:07.413845Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:30:07.413878Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:30:07.413938Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:30:07.414020Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:30:07.414803Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:30:07.415214Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:30:07.496817Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:30:07.496895Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:30:07.519864Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:30:07.520351Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:30:07.520551Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:30:07.530601Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:30:07.530986Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:30:07.531694Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:30:07.531982Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:30:07.535643Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:30:07.535862Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:30:07.537107Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:30:07.537180Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:30:07.537337Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:30:07.537388Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:30:07.537437Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:30:07.537532Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:30:07.544834Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:30:07.675989Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:30:07.676244Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:07.676485Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:30:07.676531Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:30:07.676789Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:30:07.676862Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:30:07.680177Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:30:07.680388Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:30:07.680590Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:07.680670Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:30:07.680725Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:30:07.680767Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:30:07.682923Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:07.682984Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:30:07.683024Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:30:07.684811Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:07.684871Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:07.684932Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:30:07.684980Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:30:07.688641Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:30:07.690755Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:30:07.690951Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:30:07.691859Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:30:07.691998Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:30:07.692055Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:30:07.692432Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:30:07.692494Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:30:07.692706Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:30:07.692802Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:30:07.701517Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:30:07.701582Z node 1 :FLAT_TX_SCHEMESHARD ... 7: TTxCompleteDataErasureBSC: Progress data shred in BSC 50% 2025-06-25T14:30:13.638624Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:665: TTxCompleteDataErasureBSC Complete at schemeshard: 72057594046678944, NeedScheduleRequestToBSC# true 2025-06-25T14:30:13.638762Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:348: [RootDataErasureManager] ScheduleRequestToBSC: Interval# 1.000000s 2025-06-25T14:30:13.733882Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:463:2414]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:30:13.733969Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:30:13.734115Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [1:463:2414], Recipient [1:463:2414]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:30:13.734148Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:30:13.734330Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271122945, Sender [1:643:2557], Recipient [1:463:2414]: NKikimrSchemeOp.TDescribePath PathId: 2 SchemeshardId: 72075186233409546 2025-06-25T14:30:13.734381Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4967: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2025-06-25T14:30:13.734602Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: PathId: 2 SchemeshardId: 72075186233409546, at schemeshard: 72075186233409546 2025-06-25T14:30:13.734905Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:44: Tablet 72075186233409546 describe pathId 2 took 297us result status StatusSuccess 2025-06-25T14:30:13.735632Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Database1/Topic1" PathDescription { Self { Name: "Topic1" PathId: 2 SchemeshardId: 72075186233409546 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 103 CreateStep: 200 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 1 } ChildrenExist: false BalancerTabletID: 72075186233409550 } PersQueueGroup { Name: "Topic1" PathId: 2 TotalGroupCount: 1 PartitionPerTablet: 1 PQTabletConfig { PartitionConfig { LifetimeSeconds: 10 } YdbDatabasePath: "/MyRoot/Database1" } Partitions { PartitionId: 0 TabletId: 72075186233409549 Status: Active } AlterVersion: 1 BalancerTabletID: 72075186233409550 NextPartitionId: 1 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 2 ProcessingParams { Version: 2 PlanResolution: 50 Coordinators: 72075186233409547 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409548 SchemeShard: 72075186233409546 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 5 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 1 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72075186233409546, at schemeshard: 72075186233409546 2025-06-25T14:30:13.819323Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:883:2758]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:30:13.819399Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:30:13.819487Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [1:883:2758], Recipient [1:883:2758]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:30:13.819532Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:30:13.819704Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271122945, Sender [1:1067:2903], Recipient [1:883:2758]: NKikimrSchemeOp.TDescribePath PathId: 2 SchemeshardId: 72075186233409551 2025-06-25T14:30:13.819739Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4967: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2025-06-25T14:30:13.819831Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: PathId: 2 SchemeshardId: 72075186233409551, at schemeshard: 72075186233409551 2025-06-25T14:30:13.820010Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:44: Tablet 72075186233409551 describe pathId 2 took 144us result status StatusSuccess 2025-06-25T14:30:13.820504Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Database2/Topic1" PathDescription { Self { Name: "Topic1" PathId: 2 SchemeshardId: 72075186233409551 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 106 CreateStep: 300 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 1 } ChildrenExist: false BalancerTabletID: 72075186233409555 } PersQueueGroup { Name: "Topic1" PathId: 2 TotalGroupCount: 1 PartitionPerTablet: 1 PQTabletConfig { PartitionConfig { LifetimeSeconds: 10 } YdbDatabasePath: "/MyRoot/Database2" } Partitions { PartitionId: 0 TabletId: 72075186233409554 Status: Active } AlterVersion: 1 BalancerTabletID: 72075186233409555 NextPartitionId: 1 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 3 ProcessingParams { Version: 2 PlanResolution: 50 Coordinators: 72075186233409552 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409553 SchemeShard: 72075186233409551 } DomainKey { SchemeShard: 72057594046678944 PathId: 3 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 5 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 3 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 1 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72075186233409551, at schemeshard: 72075186233409551 2025-06-25T14:30:14.150591Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:297:2279]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:30:14.150671Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:30:14.150760Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [1:297:2279], Recipient [1:297:2279]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:30:14.150798Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:30:14.164605Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125517, Sender [0:0:0], Recipient [1:297:2279]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToRunDataErasureBSC 2025-06-25T14:30:14.164684Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5156: StateWork, processing event TEvSchemeShard::TEvWakeupToRunDataErasureBSC 2025-06-25T14:30:14.164725Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:354: [RootDataErasureManager] SendRequestToBSC: Generation# 3 2025-06-25T14:30:14.164966Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 268637738, Sender [1:303:2283], Recipient [1:297:2279]: NKikimrBlobStorage.TEvControllerShredResponse CurrentGeneration: 3 Completed: true Progress10k: 10000 2025-06-25T14:30:14.165027Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5155: StateWork, processing event TEvBlobStorage::TEvControllerShredResponse 2025-06-25T14:30:14.165068Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7883: Handle TEvControllerShredResponse, at schemeshard: 72057594046678944 2025-06-25T14:30:14.165151Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:639: TTxCompleteDataErasureBSC Execute at schemeshard: 72057594046678944 2025-06-25T14:30:14.165186Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:653: TTxCompleteDataErasureBSC: Data shred in BSC is completed 2025-06-25T14:30:14.165288Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:170: [RootDataErasureManager] ScheduleDataErasureWakeup: Interval# 0.981000s, Timestamp# 1970-01-01T00:00:11.064000Z 2025-06-25T14:30:14.165348Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:378: [RootDataErasureManager] Complete: Generation# 3, duration# 2 s 2025-06-25T14:30:14.168180Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:665: TTxCompleteDataErasureBSC Complete at schemeshard: 72057594046678944, NeedScheduleRequestToBSC# false 2025-06-25T14:30:14.169012Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877761, Sender [1:1534:3321], Recipient [1:297:2279]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:30:14.169143Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5052: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T14:30:14.169205Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5837: Pipe server connected, at tablet: 72057594046678944 2025-06-25T14:30:14.169473Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125519, Sender [1:279:2268], Recipient [1:297:2279]: NKikimrScheme.TEvDataErasureInfoRequest 2025-06-25T14:30:14.169532Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5153: StateWork, processing event TEvSchemeShard::TEvDataErasureInfoRequest 2025-06-25T14:30:14.169588Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7834: Handle TEvDataErasureInfoRequest, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/public/lib/ydb_cli/topic/ut/unittest >> TTopicReaderTests::TestRun_ReadMessages_Output_Base64 [GOOD] Test command err: === Starting PQ server === Server->StartServer(false); 2025-06-25T14:29:27.276564Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519894032675413936:2076];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:29:27.276622Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:29:27.300815Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519894032729646617:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:29:27.307494Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000b0c/r3tmp/tmptCbfx8/pdisk_1.dat 2025-06-25T14:29:27.501095Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-25T14:29:27.509669Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-25T14:29:27.753608Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:29:27.768119Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:29:27.768213Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:29:27.769222Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:29:27.769280Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:29:27.777028Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T14:29:27.777191Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:29:27.778609Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 18479, node 1 2025-06-25T14:29:28.109159Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/yft8/000b0c/r3tmp/yandexnI6bEw.tmp 2025-06-25T14:29:28.109185Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/yft8/000b0c/r3tmp/yandexnI6bEw.tmp 2025-06-25T14:29:28.109346Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/yft8/000b0c/r3tmp/yandexnI6bEw.tmp 2025-06-25T14:29:28.109462Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:29:28.302132Z INFO: TTestServer started on Port 26150 GrpcPort 18479 2025-06-25T14:29:28.304736Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:29:28.321833Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:26150 PQClient connected to localhost:18479 === TenantModeEnabled() = 0 === Init PQ - start server on port 18479 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:29:29.015269Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "Root" StoragePools { Name: "/Root:test" Kind: "test" } } } TxId: 281474976710657 TabletId: 72057594046644480 PeerName: "" , at schemeshard: 72057594046644480 2025-06-25T14:29:29.015695Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //Root, opId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-25T14:29:29.015931Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 0 2025-06-25T14:29:29.015957Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 281474976710657:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046644480, LocalPathId: 1] source path: 2025-06-25T14:29:29.016359Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 281474976710657:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-25T14:29:29.016428Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:29:29.026010Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 281474976710657, response: Status: StatusAccepted TxId: 281474976710657 SchemeshardId: 72057594046644480 PathId: 1, at schemeshard: 72057594046644480 2025-06-25T14:29:29.026214Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976710657, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //Root 2025-06-25T14:29:29.026421Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-25T14:29:29.026464Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 281474976710657:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046644480 2025-06-25T14:29:29.026486Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 281474976710657:0 ProgressState no shards to create, do next state 2025-06-25T14:29:29.026500Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 281474976710657:0 2 -> 3 waiting... 2025-06-25T14:29:29.033327Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:29:29.033375Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 281474976710657, ready parts: 0/1, is published: true 2025-06-25T14:29:29.033394Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:29:29.034732Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-25T14:29:29.034787Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 281474976710657:0 ProgressState, at schemeshard: 72057594046644480 2025-06-25T14:29:29.034804Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 281474976710657:0 3 -> 128 2025-06-25T14:29:29.039711Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-25T14:29:29.039747Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-25T14:29:29.039784Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 281474976710657:0, at tablet# 72057594046644480 2025-06-25T14:29:29.039826Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 281474976710657 ready parts: 1/1 2025-06-25T14:29:29.043426Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046644480 Flags: 2 } ExecLevel: 0 TxId: 281474976710657 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:29:29.046891Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 281474976710657:4294967295 from tablet: 72057594046644480 to tablet: 72057594046316545 cookie: 0:281474976710657 msg type: 269090816 2025-06-25T14:29:29.047116Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 281474976710657, partId: 4294967295, tablet: 72057594046316545 2025-06-25T14:29:29.050176Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 1750861769097, transactions count in step: 1, at schemeshard: 72057594046644480 2025-06-25T14:29:29.050430Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976710657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1750861769097 MediatorID: 72057594046382081 TabletID: 72057594046644480, at schemeshard: 72057594046644480 2025-06-25T14:29:29.050470Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 281474976710657:0, at tablet# 72057594046644480 2025-06-25T14:29:29.050808Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 281474976710657:0 128 -> 240 2025-06-25T14:29:29.050842Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 281474976710657:0, at tablet# 72057594046644480 2025-06-25T14:29:29.051042Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 1 2025-06-25T14:29:29.051098Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: ... cli_5_1_5459548686121778862_v1 TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1) pipe restart attempt 0 pipe creation result: OK TabletId: 72075186224037897 Generation: 1, pipe: [5:7519894223867526202:2560] 2025-06-25T14:30:12.200552Z node 6 :PQ_READ_PROXY DEBUG: caching_service.cpp:283: Direct read cache: registered server session: shared/cli_5_1_5459548686121778862_v1:1 with generation 1 2025-06-25T14:30:12.206291Z node 5 :PQ_READ_PROXY DEBUG: partition_actor.cpp:652: session cookie 1 consumer shared/cli session shared/cli_5_1_5459548686121778862_v1 TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1) initDone 0 event { CmdGetClientOffsetResult { Offset: 0 EndOffset: 3 SizeLag: 280 WriteTimestampEstimateMS: 1750861812186 ClientHasAnyCommits: false } Cookie: 18446744073709551615 } 2025-06-25T14:30:12.206342Z node 5 :PQ_READ_PROXY INFO: partition_actor.cpp:683: session cookie 1 consumer shared/cli session shared/cli_5_1_5459548686121778862_v1 INIT DONE TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1) EndOffset 3 readOffset 0 committedOffset 0 2025-06-25T14:30:12.206424Z node 5 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:1413: session cookie 1 consumer shared/cli session shared/cli_5_1_5459548686121778862_v1 sending to client partition status 2025-06-25T14:30:12.207352Z :INFO: [] [] [cebf2f8-d4bd5a00-11f85fd3-bb76c6f9] [] Confirm partition stream create. Partition stream id: 1. Cluster: "-". Topic: "/topic1". Partition: 0. Read offset: (NULL) 2025-06-25T14:30:12.210037Z node 5 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer shared/cli session shared/cli_5_1_5459548686121778862_v1 grpc read done: success# 1, data# { start_partition_session_response { partition_session_id: 1 } } 2025-06-25T14:30:12.210186Z node 5 :PQ_READ_PROXY INFO: read_session_actor.cpp:533: session cookie 1 consumer shared/cli session shared/cli_5_1_5459548686121778862_v1 got StartRead from client: partition# TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1), readOffset# 0, commitOffset# (empty maybe) 2025-06-25T14:30:12.210245Z node 5 :PQ_READ_PROXY INFO: partition_actor.cpp:1012: session cookie 1 consumer shared/cli session shared/cli_5_1_5459548686121778862_v1 Start reading TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1) EndOffset 3 readOffset 0 committedOffset 0 clientCommitOffset (empty maybe) clientReadOffset 0 2025-06-25T14:30:12.210273Z node 5 :PQ_READ_PROXY DEBUG: partition_actor.cpp:958: session cookie 1 consumer shared/cli session shared/cli_5_1_5459548686121778862_v1 TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1) ready for read with readOffset 0 endOffset 3 2025-06-25T14:30:12.210330Z node 5 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2309: session cookie 1 consumer shared/cli session shared/cli_5_1_5459548686121778862_v1 partition ready for read: partition# TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1), readOffset# 0, endOffset# 3, WTime# 0, sizeLag# 280 2025-06-25T14:30:12.210352Z node 5 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2320: session cookie 1 consumer shared/cli session shared/cli_5_1_5459548686121778862_v1TEvPartitionReady. Aval parts: 1 2025-06-25T14:30:12.210408Z node 5 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2243: session cookie 1 consumer shared/cli session shared/cli_5_1_5459548686121778862_v1 performing read request: guid# 2d966a8a-f6564b24-a449c3b5-6d4f6b53, from# TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1), count# 3, size# 336, partitionsAsked# 1, maxTimeLag# 0ms 2025-06-25T14:30:12.210502Z node 5 :PQ_READ_PROXY DEBUG: partition_actor.cpp:1384: session cookie 1 consumer shared/cli session shared/cli_5_1_5459548686121778862_v1 READ FROM TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1)maxCount 3 maxSize 336 maxTimeLagMs 0 readTimestampMs 0 readOffset 0 EndOffset 3 ClientCommitOffset 0 committedOffset 0 Guid 2d966a8a-f6564b24-a449c3b5-6d4f6b53 2025-06-25T14:30:12.213226Z node 5 :PQ_READ_PROXY DEBUG: partition_actor.cpp:652: session cookie 1 consumer shared/cli session shared/cli_5_1_5459548686121778862_v1 TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1) initDone 1 event { CmdReadResult { MaxOffset: 3 Result { Offset: 0 Data: "... 79 bytes ..." SourceId: "\000source1" SeqNo: 1 WriteTimestampMS: 1750861812082 CreateTimestampMS: 1750861812080 UncompressedSize: 8 PartitionKey: "" ExplicitHash: "" } Result { Offset: 1 Data: "... 79 bytes ..." SourceId: "\000source1" SeqNo: 2 WriteTimestampMS: 1750861812090 CreateTimestampMS: 1750861812080 UncompressedSize: 8 PartitionKey: "" ExplicitHash: "" } Result { Offset: 2 Data: "... 79 bytes ..." SourceId: "\000source1" SeqNo: 3 WriteTimestampMS: 1750861812113 CreateTimestampMS: 1750861812080 UncompressedSize: 8 PartitionKey: "" ExplicitHash: "" } BlobsFromDisk: 0 BlobsFromCache: 2 SizeLag: 18446744073709551530 RealReadOffset: 2 WaitQuotaTimeMs: 0 EndOffset: 3 StartOffset: 0 } Cookie: 0 } 2025-06-25T14:30:12.213434Z node 5 :PQ_READ_PROXY DEBUG: partition_actor.cpp:1266: session cookie 1 consumer shared/cli session shared/cli_5_1_5459548686121778862_v1 TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1) wait data in partition inited, cookie 1 from offset 3 2025-06-25T14:30:12.213488Z node 5 :PQ_READ_PROXY DEBUG: partition_actor.cpp:890: session cookie 1 consumer shared/cli session shared/cli_5_1_5459548686121778862_v1 after read state TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1) EndOffset 3 ReadOffset 3 ReadGuid 2d966a8a-f6564b24-a449c3b5-6d4f6b53 has messages 1 2025-06-25T14:30:12.213615Z node 5 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:1917: session cookie 1 consumer shared/cli session shared/cli_5_1_5459548686121778862_v1 read done: guid# 2d966a8a-f6564b24-a449c3b5-6d4f6b53, partition# TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1), size# 484 2025-06-25T14:30:12.213651Z node 5 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2079: session cookie 1 consumer shared/cli session shared/cli_5_1_5459548686121778862_v1 response to read: guid# 2d966a8a-f6564b24-a449c3b5-6d4f6b53 2025-06-25T14:30:12.213849Z node 5 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2122: session cookie 1 consumer shared/cli session shared/cli_5_1_5459548686121778862_v1 Process answer. Aval parts: 0 2025-06-25T14:30:12.214165Z :DEBUG: [] [] [cebf2f8-d4bd5a00-11f85fd3-bb76c6f9] [] Got ReadResponse, serverBytesSize = 484, now ReadSizeBudget = 0, ReadSizeServerDelta = 52428316 2025-06-25T14:30:12.214281Z :DEBUG: [] [] [cebf2f8-d4bd5a00-11f85fd3-bb76c6f9] [] In ContinueReadingDataImpl, ReadSizeBudget = 0, ReadSizeServerDelta = 52428316 2025-06-25T14:30:12.214518Z :DEBUG: [] Decompression task done. Partition/PartitionSessionId: 1 (0-2) 2025-06-25T14:30:12.214583Z :DEBUG: [] [] [cebf2f8-d4bd5a00-11f85fd3-bb76c6f9] [] Returning serverBytesSize = 484 to budget 2025-06-25T14:30:12.214622Z :DEBUG: [] [] [cebf2f8-d4bd5a00-11f85fd3-bb76c6f9] [] In ContinueReadingDataImpl, ReadSizeBudget = 484, ReadSizeServerDelta = 52428316 2025-06-25T14:30:12.214847Z :DEBUG: [] [] [cebf2f8-d4bd5a00-11f85fd3-bb76c6f9] [] After sending read request: ReadSizeBudget = 0, ReadSizeServerDelta = 52428800 2025-06-25T14:30:12.215009Z :DEBUG: [] Take Data. Partition 0. Read: {0, 0} (0-0) 2025-06-25T14:30:12.215065Z :DEBUG: [] Take Data. Partition 0. Read: {1, 0} (1-1) 2025-06-25T14:30:12.215100Z :DEBUG: [] Take Data. Partition 0. Read: {2, 0} (2-2) 2025-06-25T14:30:12.215145Z :DEBUG: [] [] [cebf2f8-d4bd5a00-11f85fd3-bb76c6f9] [] The application data is transferred to the client. Number of messages 3, size 24 bytes 2025-06-25T14:30:12.215197Z :DEBUG: [] [] [cebf2f8-d4bd5a00-11f85fd3-bb76c6f9] [] Returning serverBytesSize = 0 to budget 2025-06-25T14:30:12.215174Z node 5 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer shared/cli session shared/cli_5_1_5459548686121778862_v1 grpc read done: success# 1, data# { read_request { bytes_size: 484 } } 2025-06-25T14:30:12.215276Z node 5 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:1816: session cookie 1 consumer shared/cli session shared/cli_5_1_5459548686121778862_v1 got read request: guid# b01585df-2431ae3f-f7b35fd7-731a572 2025-06-25T14:30:12.215398Z :INFO: [] [] [cebf2f8-d4bd5a00-11f85fd3-bb76c6f9] Closing read session. Close timeout: 0.000000s 2025-06-25T14:30:12.215445Z :INFO: [] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): -:/topic1:0:1:2:0 2025-06-25T14:30:12.215490Z :INFO: [] [] [cebf2f8-d4bd5a00-11f85fd3-bb76c6f9] Counters: { Errors: 0 CurrentSessionLifetimeMs: 32 BytesRead: 24 MessagesRead: 3 BytesReadCompressed: 24 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-25T14:30:12.215595Z :NOTICE: [] [] [cebf2f8-d4bd5a00-11f85fd3-bb76c6f9] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Close with zero timeout " } 2025-06-25T14:30:12.215632Z :DEBUG: [] [] [cebf2f8-d4bd5a00-11f85fd3-bb76c6f9] [] Abort session to cluster 2025-06-25T14:30:12.216069Z :NOTICE: [] [] [cebf2f8-d4bd5a00-11f85fd3-bb76c6f9] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-06-25T14:30:12.216974Z node 5 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer shared/cli session shared/cli_5_1_5459548686121778862_v1 grpc read done: success# 0, data# { } 2025-06-25T14:30:12.217009Z node 5 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 1 consumer shared/cli session shared/cli_5_1_5459548686121778862_v1 grpc read failed 2025-06-25T14:30:12.217045Z node 5 :PQ_READ_PROXY INFO: read_session_actor.cpp:1645: session cookie 1 consumer shared/cli session shared/cli_5_1_5459548686121778862_v1 closed 2025-06-25T14:30:12.217577Z node 5 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 1 consumer shared/cli session shared/cli_5_1_5459548686121778862_v1 is DEAD 2025-06-25T14:30:12.218196Z node 5 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037898][rt3.dc1--topic1] pipe [5:7519894223867526199:2557] disconnected; active server actors: 1 2025-06-25T14:30:12.218223Z node 5 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037898][rt3.dc1--topic1] pipe [5:7519894223867526199:2557] client cli disconnected session shared/cli_5_1_5459548686121778862_v1 2025-06-25T14:30:12.222183Z node 6 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: shared/cli_5_1_5459548686121778862_v1 2025-06-25T14:30:12.685101Z node 5 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976710699, task: 1, CA Id [5:7519894223867526226:2561]. Got EvDeliveryProblem, TabletId: 72075186224037891, NotDelivered: 0 2025-06-25T14:30:12.720402Z node 5 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976710699, task: 1, CA Id [5:7519894223867526226:2561]. Got EvDeliveryProblem, TabletId: 72075186224037891, NotDelivered: 1 2025-06-25T14:30:12.769962Z node 5 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976710699, task: 1, CA Id [5:7519894223867526226:2561]. Got EvDeliveryProblem, TabletId: 72075186224037891, NotDelivered: 1 2025-06-25T14:30:12.833753Z node 5 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976710699, task: 1, CA Id [5:7519894223867526226:2561]. Got EvDeliveryProblem, TabletId: 72075186224037891, NotDelivered: 1 2025-06-25T14:30:12.937861Z node 5 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976710699, task: 1, CA Id [5:7519894223867526226:2561]. Got EvDeliveryProblem, TabletId: 72075186224037891, NotDelivered: 1 >> KqpQuery::QueryStats+UseSink [GOOD] >> KqpQuery::QueryStats-UseSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_data_erasure/unittest >> TestDataErasure::DataErasureWithMerge [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:68:2058] recipient: [1:61:2102] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:68:2058] recipient: [1:61:2102] Leader for TabletID 72057594046678944 is [1:72:2106] sender: [1:76:2058] recipient: [1:61:2102] 2025-06-25T14:30:01.254566Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:30:01.254656Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:30:01.254686Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:30:01.254711Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:30:01.254746Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:30:01.254766Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:30:01.254825Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:30:01.254880Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:30:01.255433Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:30:01.255695Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:30:01.322631Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:30:01.322688Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:30:01.326928Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:30:01.327153Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:30:01.327319Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:30:01.328535Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:30:01.328664Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:30:01.329101Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:30:01.329272Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:30:01.329823Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:30:01.329964Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:30:01.330850Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:30:01.330901Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:30:01.331122Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:30:01.331164Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:30:01.331201Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:30:01.331276Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:30:01.333402Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:72:2106] sender: [1:151:2058] recipient: [1:16:2063] 2025-06-25T14:30:01.449982Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:30:01.450184Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:01.450342Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:30:01.450394Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:30:01.450617Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:30:01.450672Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:30:01.451308Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:30:01.451452Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:30:01.451583Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:01.451633Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:30:01.451668Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:30:01.451693Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:30:01.451997Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:01.452024Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:30:01.452052Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:30:01.452363Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:01.452393Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:01.452427Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:30:01.452465Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:30:01.460086Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:30:01.460608Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:30:01.460763Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:30:01.461582Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:30:01.461680Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 77 RawX2: 4294969406 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:30:01.461724Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:30:01.461924Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:30:01.461957Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:30:01.462083Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:30:01.462135Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:30:01.462612Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:30:01.462652Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: s ... 5: StateWork, received event# 271125000, Sender [0:0:0], Recipient [2:278:2240]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:30:14.279311Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:30:14.279435Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [2:278:2240], Recipient [2:278:2240]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:30:14.279470Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:30:14.311623Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [2:188:2180]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:30:14.311696Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:30:14.311778Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [2:188:2180], Recipient [2:188:2180]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:30:14.311813Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:30:14.322205Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [2:278:2240]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:30:14.322309Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:30:14.322385Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [2:278:2240], Recipient [2:278:2240]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:30:14.322408Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:30:14.358580Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [2:188:2180]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:30:14.358653Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:30:14.358726Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [2:188:2180], Recipient [2:188:2180]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:30:14.358756Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:30:14.370502Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [2:278:2240]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:30:14.370583Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:30:14.370680Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [2:278:2240], Recipient [2:278:2240]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:30:14.370709Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:30:14.408358Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [2:188:2180]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:30:14.408437Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:30:14.408523Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [2:188:2180], Recipient [2:188:2180]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:30:14.408552Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:30:14.419128Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [2:278:2240]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:30:14.419210Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:30:14.419322Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [2:278:2240], Recipient [2:278:2240]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:30:14.419368Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:30:14.452814Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [2:188:2180]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:30:14.452881Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:30:14.452985Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [2:188:2180], Recipient [2:188:2180]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:30:14.453013Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:30:14.464006Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269553162, Sender [2:1205:3017], Recipient [2:278:2240]: NKikimrTxDataShard.TEvPeriodicTableStats DatashardId: 72075186233409551 TableLocalId: 2 Generation: 2 Round: 1 TableStats { DataSize: 10141461 RowCount: 99 IndexSize: 4463 InMemSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 SearchHeight: 1 LastFullCompactionTs: 50 HasLoanedParts: false Channels { Channel: 1 DataSize: 10141461 IndexSize: 4463 } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 2687 Memory: 90317 Storage: 10149823 } ShardState: 2 UserTablePartOwners: 72075186233409551 NodeId: 2 StartTime: 50000 TableOwnerId: 72075186233409546 IsDstSplit: true FollowerId: 0 2025-06-25T14:30:14.464076Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4992: StateWork, processing event TEvDataShard::TEvPeriodicTableStats 2025-06-25T14:30:14.464127Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72075186233409546 from shard 72075186233409551 followerId 0 pathId [OwnerId: 72075186233409546, LocalPathId: 2] state 'Ready' dataSize 10141461 rowCount 99 cpuUsage 0.2687 2025-06-25T14:30:14.464231Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:570: Got periodic table stats at tablet 72075186233409546 from shard 72075186233409551 followerId 0 pathId [OwnerId: 72075186233409546, LocalPathId: 2] raw table stats: DataSize: 10141461 RowCount: 99 IndexSize: 4463 InMemSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 SearchHeight: 1 LastFullCompactionTs: 50 HasLoanedParts: false Channels { Channel: 1 DataSize: 10141461 IndexSize: 4463 } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 2025-06-25T14:30:14.464268Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:610: Will delay TTxStoreTableStats on# 0.100000s, queue# 1 2025-06-25T14:30:14.474805Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125517, Sender [0:0:0], Recipient [2:188:2180]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToRunDataErasureBSC 2025-06-25T14:30:14.474879Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5156: StateWork, processing event TEvSchemeShard::TEvWakeupToRunDataErasureBSC 2025-06-25T14:30:14.474914Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:354: [RootDataErasureManager] SendRequestToBSC: Generation# 1 2025-06-25T14:30:14.475154Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 268637738, Sender [2:187:2179], Recipient [2:188:2180]: NKikimrBlobStorage.TEvControllerShredResponse CurrentGeneration: 1 Completed: true Progress10k: 10000 2025-06-25T14:30:14.475192Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5155: StateWork, processing event TEvBlobStorage::TEvControllerShredResponse 2025-06-25T14:30:14.475235Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7883: Handle TEvControllerShredResponse, at schemeshard: 72057594046678944 2025-06-25T14:30:14.475300Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:639: TTxCompleteDataErasureBSC Execute at schemeshard: 72057594046678944 2025-06-25T14:30:14.475341Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:653: TTxCompleteDataErasureBSC: Data shred in BSC is completed 2025-06-25T14:30:14.475413Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:170: [RootDataErasureManager] ScheduleDataErasureWakeup: Interval# 29.998000s, Timestamp# 1970-01-01T00:01:10.002000Z 2025-06-25T14:30:14.475453Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:378: [RootDataErasureManager] Complete: Generation# 1, duration# 20 s 2025-06-25T14:30:14.476033Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:665: TTxCompleteDataErasureBSC Complete at schemeshard: 72057594046678944, NeedScheduleRequestToBSC# false 2025-06-25T14:30:14.481128Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877761, Sender [2:1523:3276], Recipient [2:188:2180]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:30:14.481206Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5052: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T14:30:14.481252Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5837: Pipe server connected, at tablet: 72057594046678944 2025-06-25T14:30:14.481438Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125519, Sender [2:172:2171], Recipient [2:188:2180]: NKikimrScheme.TEvDataErasureInfoRequest 2025-06-25T14:30:14.481474Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5153: StateWork, processing event TEvSchemeShard::TEvDataErasureInfoRequest 2025-06-25T14:30:14.481513Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7834: Handle TEvDataErasureInfoRequest, at schemeshard: 72057594046678944 >> AssignTxId::Basic >> KqpQuery::UpdateWhereInSubquery [GOOD] >> KqpQuery::UpdateThenDelete-UseSink >> TSchemeShardTest::DropPQAbort [GOOD] >> TSchemeShardTest::DropBlockStoreVolume >> TControlPlaneProxyTest::ShouldSendDescribeJob [GOOD] >> TControlPlaneProxyTest::ShouldSendCreateConnection >> TestDataErasure::ManualLaunch3Cycles [GOOD] >> TestDataErasure::ManualLaunch3CyclesWithNotConsistentCountersInSchemeShardAndBSC >> TBackupTests::ShouldSucceedOnLargeData_MinWriteBatch >> TableCreator::CreateTables |76.8%| [TM] {asan, default-linux-x86_64, release} ydb/library/table_creator/ut/unittest |76.8%| [TM] {asan, default-linux-x86_64, release} ydb/library/table_creator/ut/unittest >> Cdc::DecimalKey [GOOD] >> Cdc::AddColumn |76.8%| [TM] {asan, default-linux-x86_64, release} ydb/library/table_creator/ut/unittest |76.8%| [TM] {asan, default-linux-x86_64, release} ydb/library/table_creator/ut/unittest |76.8%| [TM] {asan, default-linux-x86_64, release} ydb/library/table_creator/ut/unittest |76.8%| [TM] {asan, default-linux-x86_64, release} ydb/library/table_creator/ut/unittest >> TestDataErasure::DataErasureWithCopyTable [GOOD] >> TSchemeShardTest::DropBlockStoreVolume [GOOD] >> TSchemeShardTest::DropBlockStoreVolumeWithNonReplicatedPartitions >> KqpScripting::StreamExecuteYqlScriptScanWriteCancelAfterBruteForced [GOOD] >> KqpScripting::StreamExecuteYqlScriptScanScalar >> TControlPlaneProxyTest::ShouldSendCreateConnection [GOOD] >> TControlPlaneProxyTest::ShouldSendListConnections >> KqpStats::DeferredEffects-UseSink [GOOD] |76.9%| [TM] {asan, default-linux-x86_64, release} ydb/library/table_creator/ut/unittest >> TestDataErasure::Run3CyclesForTables [GOOD] >> Cdc::RacySplitAndDropTable [GOOD] >> Cdc::RenameTable ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_data_erasure/unittest >> TestDataErasure::Run3CyclesForTables [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:30:09.525965Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:30:09.526086Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:30:09.526133Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:30:09.526192Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:30:09.526246Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:30:09.526279Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:30:09.526366Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:30:09.526467Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:30:09.527314Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:30:09.527755Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:30:09.614686Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:30:09.614756Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:30:09.636196Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:30:09.636662Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:30:09.636847Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:30:09.643890Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:30:09.644300Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:30:09.645037Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:30:09.645346Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:30:09.652789Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:30:09.652969Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:30:09.653842Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:30:09.653891Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:30:09.654015Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:30:09.654052Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:30:09.654088Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:30:09.654152Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:30:09.660348Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:30:09.798339Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:30:09.798598Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:09.798795Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:30:09.798838Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:30:09.799081Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:30:09.799152Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:30:09.802919Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:30:09.803125Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:30:09.803294Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:09.803362Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:30:09.803415Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:30:09.803454Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:30:09.805585Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:09.805654Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:30:09.805702Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:30:09.807308Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:09.807355Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:09.807402Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:30:09.807437Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:30:09.810327Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:30:09.812024Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:30:09.812168Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:30:09.812879Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:30:09.812995Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:30:09.813046Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:30:09.813354Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:30:09.813427Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:30:09.813619Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:30:09.813698Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:30:09.817257Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:30:09.817309Z node 1 :FLAT_TX_SCHEMESHARD ... 594046678944, LocalPathId: 2] in# 65 ms, next wakeup# 593.935000s, rate# 0, in queue# 0 tenants, running# 0 tenants at schemeshard 72057594046678944 2025-06-25T14:30:16.038716Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__root_data_erasure_manager.cpp:327: [RootDataErasureManager] Data erasure in tenants is completed. Send request to BS controller 2025-06-25T14:30:16.039959Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:615: TTxCompleteDataErasureTenant Complete at schemeshard: 72057594046678944, NeedSendRequestToBSC# true 2025-06-25T14:30:16.039998Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:354: [RootDataErasureManager] SendRequestToBSC: Generation# 3 2025-06-25T14:30:16.040157Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 268637738, Sender [1:303:2283], Recipient [1:297:2279]: NKikimrBlobStorage.TEvControllerShredResponse CurrentGeneration: 3 Completed: false Progress10k: 0 2025-06-25T14:30:16.040191Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5155: StateWork, processing event TEvBlobStorage::TEvControllerShredResponse 2025-06-25T14:30:16.040215Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7883: Handle TEvControllerShredResponse, at schemeshard: 72057594046678944 2025-06-25T14:30:16.040257Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:639: TTxCompleteDataErasureBSC Execute at schemeshard: 72057594046678944 2025-06-25T14:30:16.040289Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:657: TTxCompleteDataErasureBSC: Progress data shred in BSC 0% 2025-06-25T14:30:16.040354Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:665: TTxCompleteDataErasureBSC Complete at schemeshard: 72057594046678944, NeedScheduleRequestToBSC# true 2025-06-25T14:30:16.040396Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:348: [RootDataErasureManager] ScheduleRequestToBSC: Interval# 1.000000s 2025-06-25T14:30:16.511283Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:297:2279]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:30:16.511370Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:30:16.511447Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:463:2414]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:30:16.511472Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:30:16.511517Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:838:2719]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:30:16.511539Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:30:16.511595Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [1:463:2414], Recipient [1:463:2414]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:30:16.511623Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:30:16.511692Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [1:838:2719], Recipient [1:838:2719]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:30:16.511715Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:30:16.511764Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [1:297:2279], Recipient [1:297:2279]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:30:16.511787Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:30:16.543621Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125517, Sender [0:0:0], Recipient [1:297:2279]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToRunDataErasureBSC 2025-06-25T14:30:16.543722Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5156: StateWork, processing event TEvSchemeShard::TEvWakeupToRunDataErasureBSC 2025-06-25T14:30:16.543759Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:354: [RootDataErasureManager] SendRequestToBSC: Generation# 3 2025-06-25T14:30:16.543988Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 268637738, Sender [1:303:2283], Recipient [1:297:2279]: NKikimrBlobStorage.TEvControllerShredResponse CurrentGeneration: 3 Completed: false Progress10k: 5000 2025-06-25T14:30:16.544023Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5155: StateWork, processing event TEvBlobStorage::TEvControllerShredResponse 2025-06-25T14:30:16.544052Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7883: Handle TEvControllerShredResponse, at schemeshard: 72057594046678944 2025-06-25T14:30:16.544117Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:639: TTxCompleteDataErasureBSC Execute at schemeshard: 72057594046678944 2025-06-25T14:30:16.544158Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:657: TTxCompleteDataErasureBSC: Progress data shred in BSC 50% 2025-06-25T14:30:16.544217Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:665: TTxCompleteDataErasureBSC Complete at schemeshard: 72057594046678944, NeedScheduleRequestToBSC# true 2025-06-25T14:30:16.544262Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:348: [RootDataErasureManager] ScheduleRequestToBSC: Interval# 1.000000s 2025-06-25T14:30:16.966382Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:463:2414]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:30:16.966454Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:30:16.966527Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:838:2719]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:30:16.966553Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:30:16.966606Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:297:2279]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:30:16.966643Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:30:16.966699Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [1:297:2279], Recipient [1:297:2279]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:30:16.966728Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:30:16.966794Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [1:463:2414], Recipient [1:463:2414]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:30:16.966823Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:30:16.966906Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [1:838:2719], Recipient [1:838:2719]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:30:16.966933Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:30:16.998578Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125517, Sender [0:0:0], Recipient [1:297:2279]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToRunDataErasureBSC 2025-06-25T14:30:16.998649Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5156: StateWork, processing event TEvSchemeShard::TEvWakeupToRunDataErasureBSC 2025-06-25T14:30:16.998686Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:354: [RootDataErasureManager] SendRequestToBSC: Generation# 3 2025-06-25T14:30:16.998904Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 268637738, Sender [1:303:2283], Recipient [1:297:2279]: NKikimrBlobStorage.TEvControllerShredResponse CurrentGeneration: 3 Completed: true Progress10k: 10000 2025-06-25T14:30:16.998942Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5155: StateWork, processing event TEvBlobStorage::TEvControllerShredResponse 2025-06-25T14:30:16.998974Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7883: Handle TEvControllerShredResponse, at schemeshard: 72057594046678944 2025-06-25T14:30:16.999058Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:639: TTxCompleteDataErasureBSC Execute at schemeshard: 72057594046678944 2025-06-25T14:30:16.999089Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:653: TTxCompleteDataErasureBSC: Data shred in BSC is completed 2025-06-25T14:30:16.999136Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:170: [RootDataErasureManager] ScheduleDataErasureWakeup: Interval# 0.934000s, Timestamp# 1970-01-01T00:00:11.111000Z 2025-06-25T14:30:16.999171Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:378: [RootDataErasureManager] Complete: Generation# 3, duration# 2 s 2025-06-25T14:30:17.001199Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:665: TTxCompleteDataErasureBSC Complete at schemeshard: 72057594046678944, NeedScheduleRequestToBSC# false 2025-06-25T14:30:17.001881Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877761, Sender [1:3578:4933], Recipient [1:297:2279]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:30:17.001939Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5052: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T14:30:17.001979Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5837: Pipe server connected, at tablet: 72057594046678944 2025-06-25T14:30:17.002153Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125519, Sender [1:279:2268], Recipient [1:297:2279]: NKikimrScheme.TEvDataErasureInfoRequest 2025-06-25T14:30:17.002188Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5153: StateWork, processing event TEvSchemeShard::TEvDataErasureInfoRequest 2025-06-25T14:30:17.002229Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7834: Handle TEvDataErasureInfoRequest, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_data_erasure/unittest >> TestDataErasure::DataErasureWithCopyTable [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:30:04.876764Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:30:04.876851Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:30:04.876889Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:30:04.876928Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:30:04.876968Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:30:04.876999Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:30:04.877048Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:30:04.877142Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:30:04.877887Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:30:04.878236Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:30:04.947386Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:30:04.947443Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:30:04.962942Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:30:04.963363Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:30:04.963534Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:30:04.970105Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:30:04.970520Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:30:04.971290Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:30:04.971603Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:30:04.980403Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:30:04.980656Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:30:04.982035Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:30:04.982108Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:30:04.982308Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:30:04.982370Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:30:04.982436Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:30:04.982567Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:30:04.991584Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:30:05.160243Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:30:05.160542Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:05.160771Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:30:05.160865Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:30:05.161202Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:30:05.161294Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:30:05.163833Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:30:05.164042Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:30:05.164257Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:05.164392Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:30:05.164448Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:30:05.164491Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:30:05.166736Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:05.166814Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:30:05.166866Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:30:05.168877Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:05.168938Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:05.169016Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:30:05.169075Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:30:05.173246Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:30:05.175804Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:30:05.176008Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:30:05.177187Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:30:05.177348Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:30:05.177410Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:30:05.177746Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:30:05.177791Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:30:05.177936Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:30:05.178002Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:30:05.180293Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:30:05.180358Z node 1 :FLAT_TX_SCHEMESHARD ... d(TabletID)=72075186233409552 maps to shardIdx: 72075186233409546:7 followerId=0, pathId: [OwnerId: 72075186233409546, LocalPathId: 3], pathId map=SimpleCopy, is column=0, is olap=0, RowCount 50, DataSize 5121950 2025-06-25T14:30:16.500134Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186233409552, followerId 0 2025-06-25T14:30:16.500180Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72075186233409546:7 with partCount# 1, rowCount# 50, searchHeight# 1, lastFullCompaction# 1970-01-01T00:00:50.000000Z at schemeshard 72075186233409546 2025-06-25T14:30:16.500212Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186233409552 2025-06-25T14:30:16.500281Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72075186233409546 2025-06-25T14:30:16.510957Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [2:278:2240]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-06-25T14:30:16.511040Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5131: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-06-25T14:30:16.511074Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72075186233409546, queue size# 0 2025-06-25T14:30:16.535334Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [2:188:2180]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:30:16.535405Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:30:16.535495Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [2:188:2180], Recipient [2:188:2180]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:30:16.535528Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:30:16.546164Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [2:278:2240]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:30:16.546253Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:30:16.546374Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [2:278:2240], Recipient [2:278:2240]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:30:16.546408Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:30:16.580778Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [2:188:2180]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:30:16.580855Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:30:16.580962Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [2:188:2180], Recipient [2:188:2180]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:30:16.580991Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:30:16.592444Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [2:278:2240]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:30:16.592522Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:30:16.592609Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [2:278:2240], Recipient [2:278:2240]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:30:16.592644Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:30:16.625495Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [2:188:2180]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:30:16.625575Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:30:16.625689Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [2:188:2180], Recipient [2:188:2180]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:30:16.625720Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:30:16.636222Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [2:278:2240]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:30:16.636300Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:30:16.636423Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [2:278:2240], Recipient [2:278:2240]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:30:16.636455Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:30:16.668906Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [2:188:2180]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:30:16.668987Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:30:16.669082Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [2:188:2180], Recipient [2:188:2180]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:30:16.669116Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:30:16.679604Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [2:278:2240]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:30:16.679682Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:30:16.679774Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [2:278:2240], Recipient [2:278:2240]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:30:16.679807Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:30:16.712008Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [2:188:2180]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:30:16.712086Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:30:16.712175Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [2:188:2180], Recipient [2:188:2180]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:30:16.712223Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:30:16.722887Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125517, Sender [0:0:0], Recipient [2:188:2180]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToRunDataErasureBSC 2025-06-25T14:30:16.722964Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5156: StateWork, processing event TEvSchemeShard::TEvWakeupToRunDataErasureBSC 2025-06-25T14:30:16.723001Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:354: [RootDataErasureManager] SendRequestToBSC: Generation# 1 2025-06-25T14:30:16.723217Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 268637738, Sender [2:187:2179], Recipient [2:188:2180]: NKikimrBlobStorage.TEvControllerShredResponse CurrentGeneration: 1 Completed: true Progress10k: 10000 2025-06-25T14:30:16.723251Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5155: StateWork, processing event TEvBlobStorage::TEvControllerShredResponse 2025-06-25T14:30:16.723283Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7883: Handle TEvControllerShredResponse, at schemeshard: 72057594046678944 2025-06-25T14:30:16.723351Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:639: TTxCompleteDataErasureBSC Execute at schemeshard: 72057594046678944 2025-06-25T14:30:16.723387Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:653: TTxCompleteDataErasureBSC: Data shred in BSC is completed 2025-06-25T14:30:16.723463Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:170: [RootDataErasureManager] ScheduleDataErasureWakeup: Interval# 14.999500s, Timestamp# 1970-01-01T00:01:25.000500Z 2025-06-25T14:30:16.723505Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:378: [RootDataErasureManager] Complete: Generation# 1, duration# 35 s 2025-06-25T14:30:16.724080Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:665: TTxCompleteDataErasureBSC Complete at schemeshard: 72057594046678944, NeedScheduleRequestToBSC# false 2025-06-25T14:30:16.727259Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877761, Sender [2:1736:3441], Recipient [2:188:2180]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:30:16.727326Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5052: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T14:30:16.727365Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5837: Pipe server connected, at tablet: 72057594046678944 2025-06-25T14:30:16.727558Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125519, Sender [2:172:2171], Recipient [2:188:2180]: NKikimrScheme.TEvDataErasureInfoRequest 2025-06-25T14:30:16.727595Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5153: StateWork, processing event TEvSchemeShard::TEvDataErasureInfoRequest 2025-06-25T14:30:16.727637Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7834: Handle TEvDataErasureInfoRequest, at schemeshard: 72057594046678944 >> TSchemeShardTest::ConsistentCopyAfterDropIndexes [GOOD] |76.9%| [TA] $(B)/ydb/public/lib/ydb_cli/topic/ut/test-results/unittest/{meta.json ... results_accumulator.log} |76.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/http_proxy/ut/unittest >> TSchemeShardTest::DropBlockStoreVolumeWithNonReplicatedPartitions [GOOD] >> TSchemeShardTest::DropBlockStoreVolume2 |76.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/http_proxy/ut/unittest >> TControlPlaneProxyTest::ShouldSendListConnections [GOOD] >> TControlPlaneProxyTest::ShouldSendDescribeConnection >> JsonProtoConversion::JsonToProtoSingleValue [GOOD] >> JsonProtoConversion::NlohmannJsonToProtoMap [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpStats::DeferredEffects-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 7423, MsgBus: 32044 2025-06-25T14:29:46.105326Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519894113786991863:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:29:46.105643Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001486/r3tmp/tmp2ja6jq/pdisk_1.dat 2025-06-25T14:29:46.432168Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:29:46.433978Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519894113786991842:2080] 1750861786104241 != 1750861786104244 TServer::EnableGrpc on GrpcPort 7423, node 1 2025-06-25T14:29:46.511600Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:29:46.511700Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:29:46.519827Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:29:46.545477Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:29:46.545500Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:29:46.545506Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:29:46.545650Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:32044 TClient is connected to server localhost:32044 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:29:47.091860Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:29:47.123283Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... 2025-06-25T14:29:47.137470Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:29:47.156142Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:47.287030Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:47.453638Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:47.523520Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:49.282532Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894126671895365:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:49.282640Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:49.599553Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:49.667267Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:49.696964Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:49.735267Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:49.805835Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:49.885567Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:49.964421Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:50.031951Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894130966863328:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:50.032013Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:50.032227Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894130966863333:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:50.035407Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:29:50.046570Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519894130966863335:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:29:50.129149Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519894130966863386:3421] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:29:51.105608Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519894113786991863:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:29:51.105658Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 4137, MsgBus: 21533 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001486/r3tmp/tmpjjNyST/pdisk_1.dat 2025-06-25T14:29:52.952594Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migra ... 83Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:30:08.914804Z node 3 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750861806589, txId: 281474976715672] shutting down Trying to start YDB, gRPC: 20121, MsgBus: 64817 2025-06-25T14:30:09.679106Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519894211260277139:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:30:09.679196Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001486/r3tmp/tmpEeUIes/pdisk_1.dat 2025-06-25T14:30:09.800274Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:30:09.819207Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:30:09.819306Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:30:09.820867Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 20121, node 4 2025-06-25T14:30:09.852844Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:30:09.852867Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:30:09.852876Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:30:09.853011Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:64817 TClient is connected to server localhost:64817 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:30:10.417544Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:30:10.435976Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:30:10.514800Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:30:10.659878Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:30:10.689757Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:30:10.760079Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:30:13.191336Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519894228440147940:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:13.191427Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:13.251058Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:13.285063Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:13.318740Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:13.393848Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:13.429689Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:13.467239Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:13.510032Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:13.594972Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519894228440148601:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:13.595085Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:13.595120Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519894228440148606:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:13.598741Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:30:13.610063Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519894228440148608:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:30:13.696075Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519894228440148659:3421] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:30:14.679679Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519894211260277139:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:30:14.679752Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout;
: Warning: Type annotation, code: 1030
:3:46: Warning: At lambda, At function: Coalesce
:3:58: Warning: At function: SqlIn
:3:58: Warning: IN may produce unexpected result when used with nullable arguments. Consider adding 'PRAGMA AnsiInForEmptyOrNullableItemsCollections;', code: 1108 |76.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/http_proxy/ut/unittest >> JsonProtoConversion::ProtoMapToJson [GOOD] |76.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/http_proxy/ut/unittest >> JsonProtoConversion::JsonToProtoSingleValue [GOOD] >> JsonProtoConversion::JsonToProtoMap [GOOD] |76.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/http_proxy/ut/unittest >> JsonProtoConversion::NlohmannJsonToProtoMap [GOOD] >> KqpStats::DataQueryMulti [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_base/unittest >> TSchemeShardTest::ConsistentCopyAfterDropIndexes [GOOD] Test command err: canonic: ACE { AccessType: 0 AccessRight: 59391 SID: "2@staff" InheritanceType: 1 } ACE { AccessType: 0 AccessRight: 521 SID: "4@staff" InheritanceType: 4 } ACE { AccessType: 0 AccessRight: 521 SID: "5@staff" InheritanceType: 5 } ACE { AccessType: 1 AccessRight: 59391 SID: "0@staff" InheritanceType: 3 } ACE { AccessType: 1 AccessRight: 32768 SID: "1@staff" InheritanceType: 0 } ACE { AccessType: 1 AccessRight: 59391 SID: "3@staff" InheritanceType: 2 } ACE { AccessType: 1 AccessRight: 521 SID: "6@staff" InheritanceType: 6 } ACE { AccessType: 1 AccessRight: 521 SID: "7@staff" InheritanceType: 7 } result: ACE { AccessType: 0 AccessRight: 59391 SID: "2@staff" InheritanceType: 1 } ACE { AccessType: 0 AccessRight: 521 SID: "4@staff" InheritanceType: 4 } ACE { AccessType: 0 AccessRight: 521 SID: "5@staff" InheritanceType: 5 } ACE { AccessType: 1 AccessRight: 59391 SID: "0@staff" InheritanceType: 3 } ACE { AccessType: 1 AccessRight: 32768 SID: "1@staff" InheritanceType: 0 } ACE { AccessType: 1 AccessRight: 59391 SID: "3@staff" InheritanceType: 2 } ACE { AccessType: 1 AccessRight: 521 SID: "6@staff" InheritanceType: 6 } ACE { AccessType: 1 AccessRight: 521 SID: "7@staff" InheritanceType: 7 } canonic: ACE { AccessType: 0 AccessRight: 59391 SID: "2@staff" InheritanceType: 1 } ACE { AccessType: 0 AccessRight: 521 SID: "4@staff" InheritanceType: 4 } ACE { AccessType: 0 AccessRight: 521 SID: "5@staff" InheritanceType: 5 } ACE { AccessType: 1 AccessRight: 59391 SID: "0@staff" InheritanceType: 3 } ACE { AccessType: 1 AccessRight: 32768 SID: "1@staff" InheritanceType: 0 } ACE { AccessType: 1 AccessRight: 59391 SID: "3@staff" InheritanceType: 2 } ACE { AccessType: 1 AccessRight: 521 SID: "6@staff" InheritanceType: 6 } ACE { AccessType: 1 AccessRight: 521 SID: "7@staff" InheritanceType: 7 } result: ACE { AccessType: 0 AccessRight: 59391 SID: "2@staff" InheritanceType: 1 } ACE { AccessType: 0 AccessRight: 521 SID: "4@staff" InheritanceType: 4 } ACE { AccessType: 0 AccessRight: 521 SID: "5@staff" InheritanceType: 5 } ACE { AccessType: 1 AccessRight: 59391 SID: "0@staff" InheritanceType: 3 } ACE { AccessType: 1 AccessRight: 32768 SID: "1@staff" InheritanceType: 0 } ACE { AccessType: 1 AccessRight: 59391 SID: "3@staff" InheritanceType: 2 } ACE { AccessType: 1 AccessRight: 521 SID: "6@staff" InheritanceType: 6 } ACE { AccessType: 1 AccessRight: 521 SID: "7@staff" InheritanceType: 7 } canonic: ACE { AccessType: 1 AccessRight: 59391 SID: "0@staff" InheritanceType: 3 Inherited: true } ACE { AccessType: 1 AccessRight: 59391 SID: "3@staff" InheritanceType: 2 Inherited: true } ACE { AccessType: 1 AccessRight: 521 SID: "6@staff" InheritanceType: 2 Inherited: true } ACE { AccessType: 1 AccessRight: 521 SID: "7@staff" InheritanceType: 3 Inherited: true } result: ACE { AccessType: 1 AccessRight: 59391 SID: "0@staff" InheritanceType: 3 Inherited: true } ACE { AccessType: 1 AccessRight: 59391 SID: "3@staff" InheritanceType: 2 Inherited: true } ACE { AccessType: 1 AccessRight: 521 SID: "6@staff" InheritanceType: 2 Inherited: true } ACE { AccessType: 1 AccessRight: 521 SID: "7@staff" InheritanceType: 3 Inherited: true } canonic: ACE { AccessType: 0 AccessRight: 59391 SID: "2@staff" InheritanceType: 1 Inherited: true } ACE { AccessType: 0 AccessRight: 521 SID: "5@staff" InheritanceType: 1 Inherited: true } ACE { AccessType: 1 AccessRight: 59391 SID: "0@staff" InheritanceType: 3 Inherited: true } ACE { AccessType: 1 AccessRight: 521 SID: "7@staff" InheritanceType: 3 Inherited: true } result: ACE { AccessType: 0 AccessRight: 59391 SID: "2@staff" InheritanceType: 1 Inherited: true } ACE { AccessType: 0 AccessRight: 521 SID: "5@staff" InheritanceType: 1 Inherited: true } ACE { AccessType: 1 AccessRight: 59391 SID: "0@staff" InheritanceType: 3 Inherited: true } ACE { AccessType: 1 AccessRight: 521 SID: "7@staff" InheritanceType: 3 Inherited: true } canonic: ACE { AccessType: 0 AccessRight: 59391 SID: "22@staff" InheritanceType: 1 } ACE { AccessType: 0 AccessRight: 521 SID: "44@staff" InheritanceType: 4 } ACE { AccessType: 0 AccessRight: 521 SID: "55@staff" InheritanceType: 5 } ACE { AccessType: 1 AccessRight: 59391 SID: "0@staff" InheritanceType: 3 Inherited: true } ACE { AccessType: 1 AccessRight: 59391 SID: "3@staff" InheritanceType: 2 Inherited: true } ACE { AccessType: 1 AccessRight: 521 SID: "6@staff" InheritanceType: 2 Inherited: true } ACE { AccessType: 1 AccessRight: 521 SID: "7@staff" InheritanceType: 3 Inherited: true } ACE { AccessType: 1 AccessRight: 59391 SID: "00@staff" InheritanceType: 3 } ACE { AccessType: 1 AccessRight: 32768 SID: "11@staff" InheritanceType: 0 } ACE { AccessType: 1 AccessRight: 59391 SID: "33@staff" InheritanceType: 2 } ACE { AccessType: 1 AccessRight: 521 SID: "66@staff" InheritanceType: 6 } ACE { AccessType: 1 AccessRight: 521 SID: "77@staff" InheritanceType: 7 } result: ACE { AccessType: 0 AccessRight: 59391 SID: "22@staff" InheritanceType: 1 } ACE { AccessType: 0 AccessRight: 521 SID: "44@staff" InheritanceType: 4 } ACE { AccessType: 0 AccessRight: 521 SID: "55@staff" InheritanceType: 5 } ACE { AccessType: 1 AccessRight: 59391 SID: "0@staff" InheritanceType: 3 Inherited: true } ACE { AccessType: 1 AccessRight: 59391 SID: "3@staff" InheritanceType: 2 Inherited: true } ACE { AccessType: 1 AccessRight: 521 SID: "6@staff" InheritanceType: 2 Inherited: true } ACE { AccessType: 1 AccessRight: 521 SID: "7@staff" InheritanceType: 3 Inherited: true } ACE { AccessType: 1 AccessRight: 59391 SID: "00@staff" InheritanceType: 3 } ACE { AccessType: 1 AccessRight: 32768 SID: "11@staff" InheritanceType: 0 } ACE { AccessType: 1 AccessRight: 59391 SID: "33@staff" InheritanceType: 2 } ACE { AccessType: 1 AccessRight: 521 SID: "66@staff" InheritanceType: 6 } ACE { AccessType: 1 AccessRight: 521 SID: "77@staff" InheritanceType: 7 } canonic: ACE { AccessType: 1 AccessRight: 59391 SID: "0@staff" InheritanceType: 3 Inherited: true } ACE { AccessType: 1 AccessRight: 59391 SID: "3@staff" InheritanceType: 2 Inherited: true } ACE { AccessType: 1 AccessRight: 521 SID: "6@staff" InheritanceType: 2 Inherited: true } ACE { AccessType: 1 AccessRight: 521 SID: "7@staff" InheritanceType: 3 Inherited: true } ACE { AccessType: 1 AccessRight: 59391 SID: "00@staff" InheritanceType: 3 Inherited: true } ACE { AccessType: 1 AccessRight: 59391 SID: "33@staff" InheritanceType: 2 Inherited: true } ACE { AccessType: 1 AccessRight: 521 SID: "66@staff" InheritanceType: 2 Inherited: true } ACE { AccessType: 1 AccessRight: 521 SID: "77@staff" InheritanceType: 3 Inherited: true } result: ACE { AccessType: 1 AccessRight: 59391 SID: "0@staff" InheritanceType: 3 Inherited: true } ACE { AccessType: 1 AccessRight: 59391 SID: "3@staff" InheritanceType: 2 Inherited: true } ACE { AccessType: 1 AccessRight: 521 SID: "6@staff" InheritanceType: 2 Inherited: true } ACE { AccessType: 1 AccessRight: 521 SID: "7@staff" InheritanceType: 3 Inherited: true } ACE { AccessType: 1 AccessRight: 59391 SID: "00@staff" InheritanceType: 3 Inherited: true } ACE { AccessType: 1 AccessRight: 59391 SID: "33@staff" InheritanceType: 2 Inherited: true } ACE { AccessType: 1 AccessRight: 521 SID: "66@staff" InheritanceType: 2 Inherited: true } ACE { AccessType: 1 AccessRight: 521 SID: "77@staff" InheritanceType: 3 Inherited: true } canonic: ACE { AccessType: 0 AccessRight: 59391 SID: "22@staff" InheritanceType: 1 Inherited: true } ACE { AccessType: 0 AccessRight: 521 SID: "55@staff" InheritanceType: 1 Inherited: true } ACE { AccessType: 1 AccessRight: 59391 SID: "0@staff" InheritanceType: 3 Inherited: true } ACE { AccessType: 1 AccessRight: 521 SID: "7@staff" InheritanceType: 3 Inherited: true } ACE { AccessType: 1 AccessRight: 59391 SID: "00@staff" InheritanceType: 3 Inherited: true } ACE { AccessType: 1 AccessRight: 521 SID: "77@staff" InheritanceType: 3 Inherited: true } result: ACE { AccessType: 0 AccessRight: 59391 SID: "22@staff" InheritanceType: 1 Inherited: true } ACE { AccessType: 0 AccessRight: 521 SID: "55@staff" InheritanceType: 1 Inherited: true } ACE { AccessType: 1 AccessRight: 59391 SID: "0@staff" InheritanceType: 3 Inherited: true } ACE { AccessType: 1 AccessRight: 521 SID: "7@staff" InheritanceType: 3 Inherited: true } ACE { AccessType: 1 AccessRight: 59391 SID: "00@staff" InheritanceType: 3 Inherited: true } ACE { AccessType: 1 AccessRight: 521 SID: "77@staff" InheritanceType: 3 Inherited: true } canonic: ACE { AccessType: 0 AccessRight: 59391 SID: "2@staff" InheritanceType: 1 Inherited: true } ACE { AccessType: 0 AccessRight: 521 SID: "5@staff" InheritanceType: 1 Inherited: true } ACE { AccessType: 0 AccessRight: 59391 SID: "22@staff" InheritanceType: 1 } ACE { AccessType: 0 AccessRight: 521 SID: "44@staff" InheritanceType: 4 } ACE { AccessType: 0 AccessRight: 521 SID: "55@staff" InheritanceType: 5 } ACE { AccessType: 1 AccessRight: 59391 SID: "0@staff" InheritanceType: 3 Inherited: true } ACE { AccessType: 1 AccessRight: 521 SID: "7@staff" InheritanceType: 3 Inherited: true } ACE { AccessType: 1 AccessRight: 59391 SID: "00@staff" InheritanceType: 3 } ACE { AccessType: 1 AccessRight: 32768 SID: "11@staff" InheritanceType: 0 } ACE { AccessType: 1 AccessRight: 59391 SID: "33@staff" InheritanceType: 2 } ACE { AccessType: 1 AccessRight: 521 SID: "66@staff" InheritanceType: 6 } ACE { AccessType: 1 AccessRight: 521 SID: "77@staff" InheritanceType: 7 } result: ACE { AccessType: 0 AccessRight: 59391 SID: "2@staff" InheritanceType: 1 Inherited: true } ACE { AccessType: 0 AccessRight: 521 SID: "5@staff" InheritanceType: 1 Inherited: true } ACE { AccessType: 0 AccessRight: 59391 SID: "22@staff" InheritanceType: 1 } ACE { AccessType: 0 AccessRight: 521 SID: "44@staff" InheritanceType: 4 } ACE { AccessType: 0 AccessRight: 521 SID: "55@staff" InheritanceType: 5 } ACE { AccessType: 1 AccessRight: 59391 SID: "0@staff" InheritanceType: 3 Inherited: true } ACE { AccessType: 1 AccessRight: 521 SID: "7@staff" InheritanceType: 3 Inherited: true } ACE { AccessType: 1 AccessRight: 59391 SID: "00@staff" InheritanceType: 3 } ACE { AccessType: 1 AccessRight: 32768 SID: "11@staff" InheritanceType: 0 } ACE { AccessType: 1 AccessRight: 59391 SID: "33@staff" InheritanceType: 2 } ACE { AccessType: 1 AccessRight: 521 SID: "66@staff" InheritanceType: 6 } ACE { AccessType: 1 AccessRight: 521 SID: "77@staff" InheritanceType: 7 } canonic: ACE { AccessType: 1 AccessRight: 59391 SID: "0@staff" InheritanceType: 3 Inherited: true } ACE { AccessType: 1 AccessRight: 521 SID: "7@staff" InheritanceType: 3 Inherited: true } ACE { AccessType: 1 AccessRight: 59391 SID: "00@staff" InheritanceType: 3 Inherited: true } ACE { AccessType: 1 AccessRight: 59391 SID: "33@staff" InheritanceType: 2 Inherited: true } ACE { AccessType: 1 AccessRight: 521 SID: "66@staff" InheritanceType: 2 Inherited: true } ACE { AccessType: 1 AccessRight: 521 SID: "77@sta ... pe: EPathSubTypeEmpty Version { GeneralVersion: 8 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 2 TableSchemaVersion: 4 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table1" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 4 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 8 PathsLimit: 10000 ShardsInside: 7 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:30:17.442714Z node 15 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Copy1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:30:17.443105Z node 15 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Copy1" took 388us result status StatusSuccess 2025-06-25T14:30:17.443664Z node 15 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Copy1" PathDescription { Self { Name: "Copy1" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 103 CreateStep: 5000007 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: true } Table { Name: "Copy1" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableIndexes { Name: "Sync" LocalPathId: 6 Type: EIndexTypeGlobal State: EIndexStateReady KeyColumnNames: "value" SchemaVersion: 1 PathOwnerId: 72057594046678944 DataSize: 0 IndexImplTableDescriptions { } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 8 PathsLimit: 10000 ShardsInside: 7 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:30:17.445101Z node 15 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Copy2" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:30:17.445428Z node 15 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Copy2" took 350us result status StatusSuccess 2025-06-25T14:30:17.445999Z node 15 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Copy2" PathDescription { Self { Name: "Copy2" PathId: 8 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 105 CreateStep: 5000009 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Copy2" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 8 PathsLimit: 10000 ShardsInside: 7 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 8 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:30:17.447242Z node 15 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Copy3" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:30:17.448144Z node 15 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Copy3" took 917us result status StatusSuccess 2025-06-25T14:30:17.448764Z node 15 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Copy3" PathDescription { Self { Name: "Copy3" PathId: 9 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 105 CreateStep: 5000009 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: true } Table { Name: "Copy3" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableIndexes { Name: "Sync" LocalPathId: 10 Type: EIndexTypeGlobal State: EIndexStateReady KeyColumnNames: "value" SchemaVersion: 1 PathOwnerId: 72057594046678944 DataSize: 0 IndexImplTableDescriptions { } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 8 PathsLimit: 10000 ShardsInside: 7 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 9 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 |76.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/http_proxy/ut/unittest >> JsonProtoConversion::ProtoMapToJson [GOOD] >> TSchemeShardTest::DropBlockStoreVolume2 [GOOD] >> TSchemeShardTest::DropBlockStoreVolumeWithFillGeneration |76.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/http_proxy/ut/unittest >> JsonProtoConversion::JsonToProtoMap [GOOD] >> JsonProtoConversion::ProtoMapToJson_ReceiveMessageResult [GOOD] >> JsonProtoConversion::NlohmannJsonToProtoArray >> TControlPlaneProxyTest::ShouldSendDescribeConnection [GOOD] >> TControlPlaneProxyTest::ShouldSendModifyConnection >> AssignTxId::Basic [GOOD] >> JsonProtoConversion::JsonToProtoArray [GOOD] >> JsonProtoConversion::NlohmannJsonToProtoArray [GOOD] >> DataShardSnapshots::LockedWriteCleanupOnCopyTable-UseSink [GOOD] >> DataShardSnapshots::DelayedWriteReadableAfterSplit |76.9%| [TM] {asan, default-linux-x86_64, release} ydb/library/table_creator/ut/unittest >> KqpExplain::UpdateOnSecondary-UseSink [GOOD] |76.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/http_proxy/ut/unittest >> JsonProtoConversion::ProtoMapToJson_ReceiveMessageResult [GOOD] |76.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/http_proxy/ut/unittest >> JsonProtoConversion::NlohmannJsonToProtoArray [GOOD] |76.9%| [TM] {asan, default-linux-x86_64, release} ydb/library/table_creator/ut/unittest |76.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/http_proxy/ut/unittest >> JsonProtoConversion::JsonToProtoArray [GOOD] >> TableCreator::CreateTables [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpStats::DataQueryMulti [GOOD] Test command err: Trying to start YDB, gRPC: 12143, MsgBus: 16038 2025-06-25T14:29:55.525542Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519894150123035801:2138];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:29:55.525755Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00147e/r3tmp/tmpoJmGqU/pdisk_1.dat 2025-06-25T14:29:55.835841Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519894150123035692:2080] 1750861795513904 != 1750861795513907 2025-06-25T14:29:55.840770Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 12143, node 1 2025-06-25T14:29:55.901341Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:29:55.901454Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:29:55.904781Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:29:55.922955Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:29:55.922973Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:29:55.922977Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:29:55.923073Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16038 TClient is connected to server localhost:16038 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:29:56.447248Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:29:56.470325Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:29:56.481971Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:56.532081Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:29:56.591457Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:56.722676Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:56.796374Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:58.271862Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894163007939219:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:58.271993Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:58.617482Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:58.645137Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:58.673436Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:58.701002Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:58.740628Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:58.811761Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:58.844223Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:58.894112Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894163007939881:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:58.894185Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:58.894330Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894163007939886:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:58.898330Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:29:58.911684Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519894163007939888:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:29:59.011791Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519894167302907235:3424] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:30:00.520970Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519894150123035801:2138];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:30:00.521040Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout;
: Warning: Type annotation, code: 1030
:3:46: Warning: At lambda, At function: Coalesce
:3:58: Warning: At function: SqlIn
:3:58: Warning: IN may produce unexpected result when used with nullable arguments. Consider adding 'PRAGMA AnsiInForEmptyOrNullableItemsCollections;', code: 1108 ... 644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:30:11.701720Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519894198134392565:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:30:11.701783Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 14068, MsgBus: 26886 2025-06-25T14:30:12.802013Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519894224594397551:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:30:12.802070Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00147e/r3tmp/tmpzW09it/pdisk_1.dat 2025-06-25T14:30:12.931096Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:30:12.932515Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7519894224594397530:2080] 1750861812801163 != 1750861812801166 2025-06-25T14:30:12.945513Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:30:12.945605Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:30:12.947122Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 14068, node 4 2025-06-25T14:30:13.012373Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:30:13.012405Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:30:13.012415Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:30:13.012552Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26886 TClient is connected to server localhost:26886 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:30:13.560209Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:30:13.571933Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:30:13.627766Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:30:13.783386Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:30:13.813287Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:30:13.866142Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:30:16.330046Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519894241774268352:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:16.330126Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:16.392798Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:16.470381Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:16.504085Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:16.540099Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:16.608770Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:16.680705Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:16.753155Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:16.834755Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519894241774269022:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:16.834856Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:16.835274Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519894241774269027:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:16.839413Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:30:16.854457Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519894241774269029:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:30:16.946025Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519894241774269080:3419] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:30:17.804446Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519894224594397551:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:30:17.804538Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; |76.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/controller/ut_assign_tx_id/unittest |76.9%| [TA] $(B)/ydb/core/http_proxy/ut/test-results/unittest/{meta.json ... results_accumulator.log} |76.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/controller/ut_assign_tx_id/unittest >> KqpStats::OneShardLocalExec-UseSink [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/controller/ut_assign_tx_id/unittest >> AssignTxId::Basic [GOOD] Test command err: 2025-06-25T14:30:15.726190Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519894238508598988:2141];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:30:15.732394Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000733/r3tmp/tmp6rDgE4/pdisk_1.dat 2025-06-25T14:30:16.042042Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:30:16.043317Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519894238508598875:2080] 1750861815716392 != 1750861815716395 2025-06-25T14:30:16.110014Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:30:16.110101Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:30:16.111518Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:5490 TServer::EnableGrpc on GrpcPort 21073, node 1 2025-06-25T14:30:16.368795Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:30:16.368821Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:30:16.368828Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:30:16.368994Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5490 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:30:16.709520Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:30:16.737214Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:30:18.674890Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894251393501412:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:18.674991Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:18.993982Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateReplication, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_replication.cpp:473) 2025-06-25T14:30:19.013558Z node 1 :REPLICATION_CONTROLLER TRACE: controller.cpp:41: [controller 72075186224037888] OnActivateExecutor 2025-06-25T14:30:19.013638Z node 1 :REPLICATION_CONTROLLER DEBUG: tx_init_schema.cpp:17: [controller 72075186224037888][TxInitSchema] Execute 2025-06-25T14:30:19.019771Z node 1 :REPLICATION_CONTROLLER DEBUG: tx_init_schema.cpp:26: [controller 72075186224037888][TxInitSchema] Complete 2025-06-25T14:30:19.019886Z node 1 :REPLICATION_CONTROLLER DEBUG: tx_init.cpp:240: [controller 72075186224037888][TxInit] Execute 2025-06-25T14:30:19.020168Z node 1 :REPLICATION_CONTROLLER DEBUG: tx_init.cpp:245: [controller 72075186224037888][TxInit] Complete 2025-06-25T14:30:19.020178Z node 1 :REPLICATION_CONTROLLER TRACE: controller.cpp:113: [controller 72075186224037888] SwitchToWork 2025-06-25T14:30:19.067253Z node 1 :REPLICATION_CONTROLLER TRACE: controller.cpp:142: [controller 72075186224037888] Handle NKikimrReplication.TEvCreateReplication PathId { OwnerId: 72057594046644480 LocalId: 2 } OperationId { TxId: 281474976710658 PartId: 0 } Config { SrcConnectionParams { Endpoint: "localhost:21073" Database: "/Root" OAuthToken { Token: "***" } EnableSsl: false } Specific { Targets { SrcPath: "/Root/table" DstPath: "/Root/replica" } } ConsistencySettings { Global { CommitIntervalMilliSeconds: 10000 } } } Database: "/Root" 2025-06-25T14:30:19.067498Z node 1 :REPLICATION_CONTROLLER DEBUG: tx_create_replication.cpp:22: [controller 72075186224037888][TxCreateReplication] Execute: NKikimrReplication.TEvCreateReplication PathId { OwnerId: 72057594046644480 LocalId: 2 } OperationId { TxId: 281474976710658 PartId: 0 } Config { SrcConnectionParams { Endpoint: "localhost:21073" Database: "/Root" OAuthToken { Token: "***" } EnableSsl: false } Specific { Targets { SrcPath: "/Root/table" DstPath: "/Root/replica" } } ConsistencySettings { Global { CommitIntervalMilliSeconds: 10000 } } } Database: "/Root" 2025-06-25T14:30:19.067581Z node 1 :REPLICATION_CONTROLLER NOTICE: tx_create_replication.cpp:43: [controller 72075186224037888][TxCreateReplication] Add replication: rid# 1, pathId# [OwnerId: 72057594046644480, LocalPathId: 2] 2025-06-25T14:30:19.068331Z node 1 :REPLICATION_CONTROLLER DEBUG: tx_create_replication.cpp:58: [controller 72075186224037888][TxCreateReplication] Complete 2025-06-25T14:30:19.071493Z node 1 :REPLICATION_CONTROLLER TRACE: tenant_resolver.cpp:33: [TenantResolver][rid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root/replication TableId: [72057594046644480:2:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindReplication DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-25T14:30:19.075694Z node 1 :REPLICATION_CONTROLLER TRACE: tenant_resolver.cpp:33: [TenantResolver][rid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-25T14:30:19.075829Z node 1 :REPLICATION_CONTROLLER TRACE: controller.cpp:252: [controller 72075186224037888] Handle NKikimr::NReplication::NController::TEvPrivate::TEvResolveTenantResult { ReplicationId: 1 Tenant: /Root Sucess: 1 } 2025-06-25T14:30:19.075842Z node 1 :REPLICATION_CONTROLLER NOTICE: controller.cpp:267: [controller 72075186224037888] Tenant resolved: rid# 1, tenant# /Root 2025-06-25T14:30:19.075849Z node 1 :REPLICATION_CONTROLLER INFO: controller.cpp:271: [controller 72075186224037888] Discover tenant nodes: tenant# /Root 2025-06-25T14:30:19.079803Z node 1 :REPLICATION_CONTROLLER TRACE: controller.cpp:297: [controller 72075186224037888] Handle NKikimr::TEvDiscovery::TEvDiscoveryData 2025-06-25T14:30:19.079853Z node 1 :REPLICATION_CONTROLLER DEBUG: controller.cpp:321: [controller 72075186224037888] Create session: nodeId# 1 TClient::Ls request: /Root/replication TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "replication" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeReplication CreateFinished: true CreateTxId: 281474976710658 CreateStep: 1750861819119 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ReplicationVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsIns... (TRUNCATED) 2025-06-25T14:30:19.119593Z node 1 :REPLICATION_CONTROLLER TRACE: controller.cpp:757: [controller 72075186224037888] Handle NKikimrReplication.TEvGetTxId Versions { Step: 1 TxId: 0 } 2025-06-25T14:30:19.119691Z node 1 :REPLICATION_CONTROLLER DEBUG: tx_assign_tx_id.cpp:76: [controller 72075186224037888][TxAssignTxId] Execute: pending# 1, assigned# 0, allocated# 0 2025-06-25T14:30:19.119744Z node 1 :REPLICATION_CONTROLLER DEBUG: tx_assign_tx_id.cpp:142: [controller 72075186224037888][TxAssignTxId] Complete: pending# 1, assigned# 0, allocated# 0, exhausted# 1 2025-06-25T14:30:19.119841Z node 1 :REPLICATION_CONTROLLER TRACE: tx_assign_tx_id.cpp:174: [controller 72075186224037888] Handle NKikimr::TEvTxAllocatorClient::TEvAllocateResult 2025-06-25T14:30:19.119924Z node 1 :REPLICATION_CONTROLLER DEBUG: tx_assign_tx_id.cpp:76: [controller 72075186224037888][TxAssignTxId] Execute: pending# 1, assigned# 0, allocated# 5 2025-06-25T14:30:19.120530Z node 1 :REPLICATION_CONTROLLER DEBUG: tx_assign_tx_id.cpp:142: [controller 72075186224037888][TxAssignTxId] Complete: pending# 0, assigned# 1, allocated# 4, exhausted# 0 2025-06-25T14:30:19.121067Z node 1 :REPLICATION_CONTROLLER TRACE: controller.cpp:757: [controller 72075186224037888] Handle NKikimrReplication.TEvGetTxId Versions { Step: 9999 TxId: 0 } 2025-06-25T14:30:19.121097Z node 1 :REPLICATION_CONTROLLER DEBUG: tx_assign_tx_id.cpp:76: [controller 72075186224037888][TxAssignTxId] Execute: pending# 1, assigned# 1, allocated# 4 2025-06-25T14:30:19.121140Z node 1 :REPLICATION_CONTROLLER DEBUG: tx_assign_tx_id.cpp:142: [controller 72075186224037888][TxAssignTxId] Complete: pending# 0, assigned# 1, allocated# 4, exhausted# 0 2025-06-25T14:30:19.121534Z node 1 :REPLICATION_CONTROLLER TRACE: controller.cpp:757: [controller 72075186224037888] Handle NKikimrReplication.TEvGetTxId Versions { Step: 9999 TxId: 18446744073709551615 } 2025-06-25T14:30:19.121573Z node 1 :REPLICATION_CONTROLLER DEBUG: tx_assign_tx_id.cpp:76: [controller 72075186224037888][TxAssignTxId] Execute: pending# 1, assigned# 1, allocated# 4 2025-06-25T14:30:19.121601Z node 1 :REPLICATION_CONTROLLER DEBUG: tx_assign_tx_id.cpp:142: [controller 72075186224037888][TxAssignTxId] Complete: pending# 0, assigned# 1, allocated# 4, exhausted# 0 2025-06-25T14:30:19.122448Z node 1 :REPLICATION_CONTROLLER TRACE: controller.cpp:757: [controller 72075186224037888] Handle NKikimrReplication.TEvGetTxId Versions { Step: 10000 TxId: 0 } 2025-06-25T14:30:19.122487Z node 1 :REPLICATION_CONTROLLER DEBUG: tx_assign_tx_id.cpp:76: [controller 72075186224037888][TxAssignTxId] Execute: pending# 1, assigned# 1, allocated# 4 2025-06-25T14:30:19.123125Z node 1 :REPLICATION_CONTROLLER DEBUG: tx_assign_tx_id.cpp:142: [controller 72075186224037888][TxAssignTxId] Complete: pending# 0, assigned# 2, allocated# 3, exhausted# 0 2025-06-25T14:30:19.126058Z node 1 :REPLICATION_CONTROLLER TRACE: controller.cpp:757: [controller 72075186224037888] Handle NKikimrReplication.TEvGetTxId Versions { Step: 5000 TxId: 0 } 2025-06-25T14:30:19.126132Z node 1 :REPLICATION_CONTROLLER DEBUG: tx_assign_tx_id.cpp:76: [controller 72075186224037888][TxAssignTxId] Execute: pending# 1, assigned# 2, allocated# 3 2025-06-25T14:30:19.126175Z node 1 :REPLICATION_CONTROLLER DEBUG: tx_assign_tx_id.cpp:142: [controller 72075186224037888][TxAssignTxId] Complete: pending# 0, assigned# 2, allocated# 3, exhausted# 0 2025-06-25T14:30:19.126667Z node 1 :REPLICATION_CONTROLLER TRACE: controller.cpp:757: [controller 72075186224037888] Handle NKikimrReplication.TEvGetTxId Versions { Step: 20000 TxId: 0 } Versions { Step: 30000 TxId: 0 } Versions { Step: 40000 TxId: 0 } 2025-06-25T14:30:19.126701Z node 1 :REPLICATION_CONTROLLER DEBUG: tx_assign_tx_id.cpp:76: [controller 72075186224037888][TxAssignTxId] Execute: pending# 3, assigned# 2, allocated# 3 2025-06-25T14:30:19.127994Z node 1 :REPLICATION_CONTROLLER DEBUG: tx_assign_tx_id.cpp:142: [controller 72075186224037888][TxAssignTxId] Complete: pending# 0, assigned# 5, allocated# 0, exhausted# 0 2025-06-25T14:30:19.128107Z node 1 :REPLICATION_CONTROLLER TRACE: tx_assign_tx_id.cpp:174: [controller 72075186224037888] Handle NKikimr::TEvTxAllocatorClient::TEvAllocateResult 2025-06-25T14:30:19.128135Z node 1 :REPLICATION_CONTROLLER DEBUG: tx_assign_tx_id.cpp:76: [controller 72075186224037888][TxAssignTxId] Execute: pending# 0, assigned# 5, allocated# 5 2025-06-25T14:30:19.128165Z node 1 :REPLICATION_CONTROLLER DEBUG: tx_assign_tx_id.cpp:142: [controller 72075186224037888][TxAssignTxId] Complete: pending# 0, assigned# 5, allocated# 5, exhausted# 0 2025-06-25T14:30:19.128615Z node 1 :REPLICATION_CONTROLLER TRACE: controller.cpp:757: [controller 72075186224037888] Handle NKikimrReplication.TEvGetTxId Versions { Step: 50000 TxId: 0 } 2025-06-25T14:30:19.128650Z node 1 :REPLICATION_CONTROLLER DEBUG: tx_assign_tx_id.cpp:76: [controller 72075186224037888][TxAssignTxId] Execute: pending# 1, assigned# 5, allocated# 5 2025-06-25T14:30:19.129196Z node 1 :REPLICATION_CONTROLLER DEBUG: tx_assign_tx_id.cpp:142: [controller 72075186224037888][TxAssignTxId] Complete: pending# 0, assigned# 5, allocated# 5, exhausted# 0 2025-06-25T14:30:19.168598Z node 1 :REPLICATION_CONTROLLER TRACE: target_discoverer.cpp:27: [TargetDiscoverer][rid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribePathResponse { Result: { status: SCHEME_ERROR, issues: {
: Error: Path not found } } } 2025-06-25T14:30:19.168648Z node 1 :REPLICATION_CONTROLLER ERROR: target_discoverer.cpp:78: [TargetDiscoverer][rid 1] Describe path failed: path# /Root/table, status# SCHEME_ERROR, issues# {
: Error: Path not found } 2025-06-25T14:30:19.168847Z node 1 :REPLICATION_CONTROLLER TRACE: controller.cpp:172: [controller 72075186224037888] Handle NKikimr::NReplication::NController::TEvPrivate::TEvDiscoveryTargetsResult { ReplicationId: 1 ToAdd [] ToDelete [] Failed [/Root/table: SCHEME_ERROR ({
: Error: Path not found })] } 2025-06-25T14:30:19.168959Z node 1 :REPLICATION_CONTROLLER DEBUG: tx_discovery_targets_result.cpp:24: [controller 72075186224037888][TxDiscoveryTargetsResult] Execute: NKikimr::NReplication::NController::TEvPrivate::TEvDiscoveryTargetsResult { ReplicationId: 1 ToAdd [] ToDelete [] Failed [/Root/table: SCHEME_ERROR ({
: Error: Path not found })] } 2025-06-25T14:30:19.168999Z node 1 :REPLICATION_CONTROLLER ERROR: tx_discovery_targets_result.cpp:76: [controller 72075186224037888][TxDiscoveryTargetsResult] Discovery error: rid# 1, error# /Root/table: SCHEME_ERROR ({
: Error: Path not found }) 2025-06-25T14:30:19.169863Z node 1 :REPLICATION_CONTROLLER DEBUG: tx_discovery_targets_result.cpp:89: [controller 72075186224037888][TxDiscoveryTargetsResult] Complete >> TSchemeShardTest::DropBlockStoreVolumeWithFillGeneration [GOOD] >> TSchemeShardTest::DocumentApiVersion >> TControlPlaneProxyTest::ShouldSendModifyConnection [GOOD] >> TControlPlaneProxyTest::ShouldSendDeleteConnection |77.0%| [TA] $(B)/ydb/core/tx/replication/controller/ut_assign_tx_id/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/library/table_creator/ut/unittest >> TableCreator::CreateTables [GOOD] Test command err: 2025-06-25T14:30:16.691175Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519894243485650437:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:30:16.691265Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001e3f/r3tmp/tmpu35dvt/pdisk_1.dat 2025-06-25T14:30:17.034510Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:30:17.035524Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519894243485650416:2080] 1750861816689858 != 1750861816689861 2025-06-25T14:30:17.142145Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:30:17.142287Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:30:17.145531Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:25006 TServer::EnableGrpc on GrpcPort 9274, node 1 2025-06-25T14:30:17.253198Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:30:17.253244Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:30:17.253257Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:30:17.253415Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:30:17.406227Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:30:17.427970Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:17.429836Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:17.712721Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpExplain::UpdateOnSecondary-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 17115, MsgBus: 19298 2025-06-25T14:29:52.068798Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519894137447807053:2079];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:29:52.078644Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00147f/r3tmp/tmp38BAX4/pdisk_1.dat 2025-06-25T14:29:52.544015Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:29:52.558943Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:29:52.559040Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:29:52.560906Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 17115, node 1 2025-06-25T14:29:52.618457Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:29:52.618487Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:29:52.618493Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:29:52.618631Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19298 2025-06-25T14:29:53.082986Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:19298 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:29:53.232146Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:29:53.253502Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:29:53.278596Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:53.487335Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:53.641069Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:53.718951Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:55.133850Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894150332710534:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:55.133982Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:55.440347Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:55.467424Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:55.533346Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:55.558815Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:55.585122Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:55.624200Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:55.656808Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:55.711024Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894150332711194:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:55.711099Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:55.711373Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894150332711199:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:55.723176Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:29:55.734166Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519894150332711201:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:29:55.824971Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519894150332711252:3428] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } {"Plan":{"Plans":[{"Tables":["EightShard"],"PlanNodeId":2,"Operators":[{"Inputs":[],"Path":"\/Root\/EightShard","Name":"Update","SinkType":"KqpTableSink","Table":"EightShard"}],"Plans":[{"PlanNodeId":1,"Operators":[{"Inputs":[],"Iterator":"[{Data: 0,Key: 100}]","Name":"Iterator"}],"Node Type":"ConstantExpr"}],"Node Type":"Sink"}],"Node Type":"Query","PlanNodeType":"Query","Stats":{"ResourcePoolId":"default"}},"meta":{"version":"0.2","type":"query"},"tables":[{"name":"\/Root\/EightShard","writes":[{"columns":["Data","Key"],"type":"MultiUpdate"}]}],"SimplifiedPlan":{"PlanNodeId":0,"Plans":[{"PlanNodeId":1,"Operators":[{"Path":"\/Root\/EightShard","Name":"Update","SinkType":"KqpTableSink","Table":"EightShard"}],"Node Type":"Update"}],"Node Type":"Query","PlanNodeType":"Query","OptimizerStats":{"EquiJoinsCount":0,"JoinsCount":0}}} Trying to start YDB, gRPC: 28871, MsgBus: 1212 2025-06-25T14:29:57.371812Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fli ... [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:14.494591Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:14.541724Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:14.596710Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:14.632381Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:14.671558Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:14.714247Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:14.794698Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:14.863710Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519894235059762322:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:14.863807Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:14.864115Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519894235059762327:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:14.868074Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:30:14.878893Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519894235059762329:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:30:14.976602Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519894235059762380:3416] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:30:16.017252Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519894222174858177:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:30:16.017316Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:30:16.263393Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:16.331082Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:16.375680Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) {"Plan":{"Plans":[{"PlanNodeId":29,"Plans":[{"Tables":["SecondaryKeys"],"PlanNodeId":28,"Operators":[{"Inputs":[{"InternalOperatorId":1}],"Path":"\/Root\/SecondaryKeys","Name":"Upsert","Table":"SecondaryKeys"},{"Inputs":[],"Iterator":"precompute_2_1","Name":"Iterator"}],"Node Type":"Upsert-ConstantExpr","CTE Name":"precompute_2_1"}],"Node Type":"Effect"},{"PlanNodeId":27,"Plans":[{"Tables":["SecondaryKeys\/Index\/indexImplTable"],"PlanNodeId":26,"Operators":[{"Inputs":[{"InternalOperatorId":1}],"Path":"\/Root\/SecondaryKeys\/Index\/indexImplTable","Name":"Delete","Table":"SecondaryKeys\/Index\/indexImplTable"},{"Inputs":[],"Iterator":"precompute_3_1","Name":"Iterator"}],"Node Type":"Delete-ConstantExpr","CTE Name":"precompute_3_1"}],"Node Type":"Effect"},{"PlanNodeId":25,"Plans":[{"Tables":["SecondaryKeys\/Index\/indexImplTable"],"PlanNodeId":24,"Operators":[{"Inputs":[{"InternalOperatorId":1}],"Path":"\/Root\/SecondaryKeys\/Index\/indexImplTable","Name":"Upsert","Table":"SecondaryKeys\/Index\/indexImplTable"},{"Inputs":[],"Iterator":"precompute_3_0","Name":"Iterator"}],"Node Type":"Upsert-ConstantExpr","CTE Name":"precompute_3_0"}],"Node Type":"Effect"},{"PlanNodeId":22,"Plans":[{"PlanNodeId":21,"Plans":[{"PlanNodeId":20,"Plans":[{"PlanNodeId":19,"Operators":[{"Inputs":[{"Other":"ConstantExpression"},{"Other":"ConstantExpression"},{"Other":"ConstantExpression"}],"Iterator":"FlatMap","Name":"Iterator"}],"Node Type":"ConstantExpr"}],"Node Type":"UnionAll","PlanNodeType":"Connection"}],"Node Type":"Collect"}],"Subplan Name":"CTE precompute_3_0","Node Type":"Precompute_3_0","Parent Relationship":"InitPlan","PlanNodeType":"Materialize"},{"PlanNodeId":18,"Plans":[{"PlanNodeId":17,"Plans":[{"PlanNodeId":16,"Plans":[{"PlanNodeId":15,"Operators":[{"Inputs":[{"InternalOperatorId":1},{"InternalOperatorId":1}],"Iterator":"Map","Name":"Iterator"},{"E-Rows":"1","Inputs":[],"Predicate":"","E-Cost":"0","E-Size":"5","Name":"Filter"}],"Node Type":"ConstantExpr-Filter"}],"Node Type":"UnionAll","PlanNodeType":"Connection"}],"Node Type":"Collect"}],"Subplan Name":"CTE precompute_3_1","Node Type":"Precompute_3_1","Parent Relationship":"InitPlan","PlanNodeType":"Materialize"},{"PlanNodeId":13,"Plans":[{"PlanNodeId":12,"Operators":[{"Inputs":[{"Other":"ConstantExpression"}],"Iterator":"[ToDict]","Name":"Iterator"}],"Node Type":"ConstantExpr"}],"Subplan Name":"CTE precompute_2_0","Node Type":"Precompute_2_0","Parent Relationship":"InitPlan","PlanNodeType":"Materialize"},{"PlanNodeId":11,"Plans":[{"PlanNodeId":10,"Plans":[{"PlanNodeId":9,"Plans":[{"PlanNodeId":8,"Operators":[{"Inputs":[{"InternalOperatorId":1}],"Iterator":"Filter","Name":"Iterator"},{"E-Rows":"2","Inputs":[],"Predicate":"Contains","E-Cost":"0","E-Size":"10","Name":"Filter"}],"Node Type":"ConstantExpr-Filter"}],"Node Type":"UnionAll","PlanNodeType":"Connection"}],"Node Type":"Collect"}],"Subplan Name":"CTE precompute_2_1","Node Type":"Precompute_2_1","Parent Relationship":"InitPlan","PlanNodeType":"Materialize"},{"PlanNodeId":6,"Plans":[{"PlanNodeId":5,"Plans":[{"PlanNodeId":4,"Plans":[{"PlanNodeId":3,"Plans":[{"E-Size":"0","LookupKeyColumns":["Key"],"Node Type":"TableLookup","PlanNodeId":2,"Path":"\/Root\/SecondaryKeys","Columns":["Fk","Key"],"E-Rows":"2","Plans":[{"PlanNodeId":1,"Operators":[{"Inputs":[],"Iterator":"precompute_0_1","Name":"Iterator"}],"Node Type":"ConstantExpr","CTE Name":"precompute_0_1"}],"Table":"SecondaryKeys","PlanNodeType":"Connection","E-Cost":"0"}],"Node Type":"Stage"}],"Node Type":"UnionAll","PlanNodeType":"Connection"}],"Node Type":"Stage"}],"Subplan Name":"CTE precompute_1_0","Node Type":"Precompute_1","Parent Relationship":"InitPlan","PlanNodeType":"Materialize"}],"Node Type":"Query","PlanNodeType":"Query","Stats":{"ResourcePoolId":"default"}},"meta":{"version":"0.2","type":"query"},"tables":[{"name":"\/Root\/SecondaryKeys","reads":[{"lookup_by":["Key"],"columns":["Fk","Key"],"type":"Lookup"}],"writes":[{"columns":["Fk","Key","Value"],"type":"MultiUpsert"}]},{"name":"\/Root\/SecondaryKeys\/Index\/indexImplTable","writes":[{"columns":["Fk","Key"],"type":"MultiUpsert"},{"type":"MultiErase"}]}],"SimplifiedPlan":{"PlanNodeId":0,"Plans":[{"PlanNodeId":1,"Plans":[{"PlanNodeId":2,"Operators":[{"Path":"\/Root\/SecondaryKeys","Name":"Upsert","Table":"SecondaryKeys"}],"Plans":[{"PlanNodeId":8,"Operators":[{"E-Rows":"2","Predicate":"Contains","E-Cost":"0","E-Size":"10","Name":"Filter"}],"Node Type":"Filter"}],"Node Type":"Upsert"}],"Node Type":"Effect"},{"PlanNodeId":9,"Plans":[{"PlanNodeId":10,"Operators":[{"Path":"\/Root\/SecondaryKeys\/Index\/indexImplTable","Name":"Delete","Table":"SecondaryKeys\/Index\/indexImplTable"}],"Plans":[{"PlanNodeId":16,"Operators":[{"E-Rows":"1","Predicate":"","E-Cost":"0","E-Size":"5","Name":"Filter"}],"Node Type":"Filter"}],"Node Type":"Delete"}],"Node Type":"Effect"},{"PlanNodeId":17,"Plans":[{"PlanNodeId":18,"Operators":[{"Path":"\/Root\/SecondaryKeys\/Index\/indexImplTable","Name":"Upsert","Table":"SecondaryKeys\/Index\/indexImplTable"}],"Node Type":"Upsert"}],"Node Type":"Effect"}],"Node Type":"Query","PlanNodeType":"Query","OptimizerStats":{"EquiJoinsCount":0,"JoinsCount":0}}} >> TestDataErasure::Run3CyclesForAllSupportedObjects [GOOD] >> BasicUsage::TWriteSession_WriteAndReadAndCommitRandomMessages [GOOD] >> BasicUsage::TWriteSession_WriteAndReadAndCommitRandomMessagesNoClusterDiscovery >> KqpQuery::QueryStats-UseSink [GOOD] |77.0%| [TA] $(B)/ydb/library/table_creator/ut/test-results/unittest/{meta.json ... results_accumulator.log} |77.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/mind/ut_fat/ydb-core-mind-ut_fat |77.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/mind/ut_fat/ydb-core-mind-ut_fat >> TControlPlaneProxyTest::ShouldSendDeleteConnection [GOOD] >> TBackupTests::ShouldSucceedOnSingleShardTable[Raw] >> Cdc::InitialScan_WithTopicSchemeTx [GOOD] >> TBackupTests::ShouldSucceedOnSingleShardTable[Zstd] >> TSchemeShardTest::DocumentApiVersion [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpStats::OneShardLocalExec-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 27972, MsgBus: 64637 2025-06-25T14:29:57.386847Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519894161757479559:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:29:57.386927Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001475/r3tmp/tmpFeA5e1/pdisk_1.dat 2025-06-25T14:29:57.659539Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:29:57.659839Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519894161757479536:2080] 1750861797385528 != 1750861797385531 TServer::EnableGrpc on GrpcPort 27972, node 1 2025-06-25T14:29:57.702909Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:29:57.702931Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:29:57.702938Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:29:57.703074Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:29:57.733131Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:29:57.733207Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:29:57.735934Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:64637 TClient is connected to server localhost:64637 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:29:58.253754Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:29:58.266685Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:29:58.283206Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:58.398658Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:29:58.410283Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:58.555427Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:58.615596Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:30:00.145097Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894174642383072:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:00.145233Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:00.430365Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:00.463516Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:00.488426Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:00.514558Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:00.539068Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:00.578080Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:00.652645Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:00.720465Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894174642383734:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:00.720623Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:00.720806Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894174642383739:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:00.725198Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:30:00.734705Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519894174642383741:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:30:00.830901Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519894174642383792:3421] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:30:02.088973Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750861802095, txId: 281474976715672] shutting down Trying to start YDB, gRPC: 16087, MsgBus: 22382 2025-06-25T14:30:02.770681Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519894183553148669:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:30:02.770798Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/b ... 644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:30:12.912471Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519894201561502228:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:30:12.912540Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 25919, MsgBus: 18145 2025-06-25T14:30:14.014014Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519894235032993122:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:30:14.014124Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001475/r3tmp/tmpEaMdor/pdisk_1.dat 2025-06-25T14:30:14.121515Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:30:14.123274Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7519894235032993103:2080] 1750861814013618 != 1750861814013621 TServer::EnableGrpc on GrpcPort 25919, node 4 2025-06-25T14:30:14.151229Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:30:14.151318Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:30:14.152711Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:30:14.174952Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:30:14.174994Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:30:14.175007Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:30:14.175167Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:18145 TClient is connected to server localhost:18145 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:30:14.745946Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:30:14.762662Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:30:14.837944Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:30:15.021562Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:30:15.053342Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:30:15.142032Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:30:17.371153Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519894247917896631:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:17.371271Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:17.443407Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:17.480969Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:17.519960Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:17.560994Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:17.601127Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:17.677169Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:17.740658Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:17.815209Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519894247917897289:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:17.815287Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:17.815458Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519894247917897294:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:17.819143Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:30:17.833649Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519894247917897296:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:30:17.939559Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519894247917897347:3416] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:30:19.015358Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519894235032993122:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:30:19.015447Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; |77.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/cms/ut/ydb-core-cms-ut |77.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_backup/unittest |77.0%| [TM] {asan, default-linux-x86_64, release} ydb/services/persqueue_v1/ut/describes_ut/unittest >> Cdc::InitialScan_TopicAutoPartitioning >> TControlPlaneProxyTest::ShouldSendTestConnection >> TSchemeShardTest::DisablePublicationsOfDropping_Dir |77.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/cms/ut/ydb-core-cms-ut |77.0%| [TM] {asan, default-linux-x86_64, release} ydb/services/persqueue_v1/ut/describes_ut/unittest |77.0%| [TA] {RESULT} $(B)/ydb/public/lib/ydb_cli/topic/ut/test-results/unittest/{meta.json ... results_accumulator.log} |77.0%| [TA] {RESULT} $(B)/ydb/core/http_proxy/ut/test-results/unittest/{meta.json ... results_accumulator.log} |77.0%| [TA] {RESULT} $(B)/ydb/core/tx/replication/controller/ut_assign_tx_id/test-results/unittest/{meta.json ... results_accumulator.log} |77.0%| [LD] {RESULT} $(B)/ydb/core/mind/ut_fat/ydb-core-mind-ut_fat |77.0%| [TA] {RESULT} $(B)/ydb/library/table_creator/ut/test-results/unittest/{meta.json ... results_accumulator.log} |77.0%| [LD] {RESULT} $(B)/ydb/core/cms/ut/ydb-core-cms-ut ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_data_erasure/unittest >> TestDataErasure::Run3CyclesForAllSupportedObjects [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:30:10.948535Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:30:10.948613Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:30:10.948650Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:30:10.948684Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:30:10.948724Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:30:10.948752Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:30:10.948787Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:30:10.948856Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:30:10.949412Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:30:10.949678Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:30:11.013615Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:30:11.013684Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:30:11.027331Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:30:11.027743Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:30:11.027899Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:30:11.048668Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:30:11.049029Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:30:11.049627Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:30:11.049839Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:30:11.058206Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:30:11.058435Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:30:11.059618Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:30:11.059680Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:30:11.059835Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:30:11.059888Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:30:11.059934Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:30:11.060045Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:30:11.067416Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:30:11.205857Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:30:11.206105Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:11.206302Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:30:11.206350Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:30:11.206681Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:30:11.206802Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:30:11.209575Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:30:11.209761Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:30:11.209943Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:11.210017Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:30:11.210081Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:30:11.210125Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:30:11.212354Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:11.212411Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:30:11.212485Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:30:11.214702Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:11.214797Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:11.214874Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:30:11.214969Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:30:11.227681Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:30:11.230105Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:30:11.230298Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:30:11.231330Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:30:11.231485Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:30:11.231548Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:30:11.231888Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:30:11.231954Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:30:11.232184Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:30:11.232277Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:30:11.234965Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:30:11.235016Z node 1 :FLAT_TX_SCHEMESHARD ... 7: TTxCompleteDataErasureBSC: Progress data shred in BSC 50% 2025-06-25T14:30:20.599652Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:665: TTxCompleteDataErasureBSC Complete at schemeshard: 72057594046678944, NeedScheduleRequestToBSC# true 2025-06-25T14:30:20.599697Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:348: [RootDataErasureManager] ScheduleRequestToBSC: Interval# 1.000000s 2025-06-25T14:30:20.702218Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:463:2414]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:30:20.702309Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:30:20.702429Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [1:463:2414], Recipient [1:463:2414]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:30:20.702459Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:30:20.702640Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271122945, Sender [1:802:2688], Recipient [1:463:2414]: NKikimrSchemeOp.TDescribePath PathId: 3 SchemeshardId: 72075186233409546 2025-06-25T14:30:20.702668Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4967: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2025-06-25T14:30:20.702753Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: PathId: 3 SchemeshardId: 72075186233409546, at schemeshard: 72075186233409546 2025-06-25T14:30:20.702928Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:44: Tablet 72075186233409546 describe pathId 3 took 144us result status StatusSuccess 2025-06-25T14:30:20.703351Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Database1/Topic1" PathDescription { Self { Name: "Topic1" PathId: 3 SchemeshardId: 72075186233409546 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 104 CreateStep: 300 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 1 } ChildrenExist: false BalancerTabletID: 72075186233409552 } PersQueueGroup { Name: "Topic1" PathId: 3 TotalGroupCount: 1 PartitionPerTablet: 1 PQTabletConfig { PartitionConfig { LifetimeSeconds: 10 } YdbDatabasePath: "/MyRoot/Database1" } Partitions { PartitionId: 0 TabletId: 72075186233409551 Status: Active } AlterVersion: 1 BalancerTabletID: 72075186233409552 NextPartitionId: 1 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 2 ProcessingParams { Version: 2 PlanResolution: 50 Coordinators: 72075186233409547 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409548 SchemeShard: 72075186233409546 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 7 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 1 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72075186233409546, at schemeshard: 72075186233409546 2025-06-25T14:30:20.828860Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:962:2820]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:30:20.828921Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:30:20.828986Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [1:962:2820], Recipient [1:962:2820]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:30:20.829006Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:30:20.829119Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271122945, Sender [1:1386:3171], Recipient [1:962:2820]: NKikimrSchemeOp.TDescribePath PathId: 3 SchemeshardId: 72075186233409553 2025-06-25T14:30:20.829138Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4967: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2025-06-25T14:30:20.829208Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: PathId: 3 SchemeshardId: 72075186233409553, at schemeshard: 72075186233409553 2025-06-25T14:30:20.829344Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:44: Tablet 72075186233409553 describe pathId 3 took 106us result status StatusSuccess 2025-06-25T14:30:20.829763Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Database2/Topic1" PathDescription { Self { Name: "Topic1" PathId: 3 SchemeshardId: 72075186233409553 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 108 CreateStep: 350 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 1 } ChildrenExist: false BalancerTabletID: 72075186233409559 } PersQueueGroup { Name: "Topic1" PathId: 3 TotalGroupCount: 1 PartitionPerTablet: 1 PQTabletConfig { PartitionConfig { LifetimeSeconds: 10 } YdbDatabasePath: "/MyRoot/Database2" } Partitions { PartitionId: 0 TabletId: 72075186233409558 Status: Active } AlterVersion: 1 BalancerTabletID: 72075186233409559 NextPartitionId: 1 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 3 ProcessingParams { Version: 2 PlanResolution: 50 Coordinators: 72075186233409554 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409555 SchemeShard: 72075186233409553 } DomainKey { SchemeShard: 72057594046678944 PathId: 3 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 7 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 3 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 1 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72075186233409553, at schemeshard: 72075186233409553 2025-06-25T14:30:21.254455Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:297:2279]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:30:21.254534Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:30:21.254606Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [1:297:2279], Recipient [1:297:2279]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:30:21.254633Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:30:21.286188Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125517, Sender [0:0:0], Recipient [1:297:2279]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToRunDataErasureBSC 2025-06-25T14:30:21.286286Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5156: StateWork, processing event TEvSchemeShard::TEvWakeupToRunDataErasureBSC 2025-06-25T14:30:21.286323Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:354: [RootDataErasureManager] SendRequestToBSC: Generation# 3 2025-06-25T14:30:21.286595Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 268637738, Sender [1:303:2283], Recipient [1:297:2279]: NKikimrBlobStorage.TEvControllerShredResponse CurrentGeneration: 3 Completed: true Progress10k: 10000 2025-06-25T14:30:21.286631Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5155: StateWork, processing event TEvBlobStorage::TEvControllerShredResponse 2025-06-25T14:30:21.286661Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7883: Handle TEvControllerShredResponse, at schemeshard: 72057594046678944 2025-06-25T14:30:21.286736Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:639: TTxCompleteDataErasureBSC Execute at schemeshard: 72057594046678944 2025-06-25T14:30:21.286766Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:653: TTxCompleteDataErasureBSC: Data shred in BSC is completed 2025-06-25T14:30:21.286814Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:170: [RootDataErasureManager] ScheduleDataErasureWakeup: Interval# 0.925000s, Timestamp# 1970-01-01T00:00:11.120000Z 2025-06-25T14:30:21.286846Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:378: [RootDataErasureManager] Complete: Generation# 3, duration# 2 s 2025-06-25T14:30:21.292785Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:665: TTxCompleteDataErasureBSC Complete at schemeshard: 72057594046678944, NeedScheduleRequestToBSC# false 2025-06-25T14:30:21.293457Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877761, Sender [1:4096:5372], Recipient [1:297:2279]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:30:21.293514Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5052: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T14:30:21.293558Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5837: Pipe server connected, at tablet: 72057594046678944 2025-06-25T14:30:21.293717Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125519, Sender [1:279:2268], Recipient [1:297:2279]: NKikimrScheme.TEvDataErasureInfoRequest 2025-06-25T14:30:21.293753Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5153: StateWork, processing event TEvSchemeShard::TEvDataErasureInfoRequest 2025-06-25T14:30:21.293791Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7834: Handle TEvDataErasureInfoRequest, at schemeshard: 72057594046678944 >> TTopicApiDescribes::DescribeTopic |77.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_backup/unittest >> TBackupTests::ShouldSucceedOnLargeData[Zstd] >> KqpQuery::UpdateThenDelete-UseSink [GOOD] >> TBackupTests::ShouldSucceedOnLargeData[Raw] >> TBackupTests::BackupUuidColumn[Zstd] >> TBackupTests::BackupUuidColumn[Raw] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpQuery::QueryStats-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 10122, MsgBus: 63752 2025-06-25T14:29:58.746478Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519894163159852696:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:29:58.765041Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00146b/r3tmp/tmpKgYSCM/pdisk_1.dat 2025-06-25T14:29:59.023354Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:29:59.023507Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519894163159852678:2080] 1750861798743923 != 1750861798743926 2025-06-25T14:29:59.031526Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:29:59.031637Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:29:59.049741Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 10122, node 1 2025-06-25T14:29:59.090513Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:29:59.090533Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:29:59.090542Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:29:59.090664Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:63752 TClient is connected to server localhost:63752 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:29:59.575025Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:29:59.598398Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:59.740288Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:59.845321Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:29:59.899628Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:59.973906Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:30:01.453366Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894176044756203:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:01.453521Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:01.797202Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:01.831311Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:01.901450Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:01.932853Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:01.964176Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:02.015953Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:02.090816Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:02.146810Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894180339724159:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:02.146900Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:02.147112Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894180339724164:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:02.150659Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:30:02.161285Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519894180339724166:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:30:02.265717Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519894180339724217:3419] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:30:03.282268Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=1&id=YmRlOTc3N2YtNGUyN2NhZjctMmQ4OTg4NDgtYmRmNGZkNWI=, ActorId: [1:7519894184634691782:2473], ActorState: ExecuteState, TraceId: 01jykqxvq1dh97vzvdz3yxp1dv, Create QueryResponse for error on request, msg:
: Error: Request timeout 50ms exceeded
: Error: Cancelling after 48ms during compilation Trying to start YDB, gRPC: 10153, MsgBus: 20087 2025-06-25T14:30:04.047834Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519894190317952411:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:30:04.047914Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPat ... 9894235651370382:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:30:15.513446Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00146b/r3tmp/tmpGbKfSP/pdisk_1.dat 2025-06-25T14:30:15.647946Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:30:15.649271Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7519894235651370363:2080] 1750861815512359 != 1750861815512362 TServer::EnableGrpc on GrpcPort 26792, node 4 2025-06-25T14:30:15.668594Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:30:15.668681Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:30:15.687403Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:30:15.727202Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:30:15.727227Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:30:15.727238Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:30:15.727372Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:64945 TClient is connected to server localhost:64945 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-25T14:30:16.238352Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:30:16.248092Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:30:16.266134Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:30:16.325890Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:30:16.467837Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:16.535844Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:30:16.558222Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:30:18.793515Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519894248536273884:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:18.793602Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:18.853894Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:18.922858Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:18.971142Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:19.006676Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:19.086852Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:19.133873Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:19.205680Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:19.284854Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519894252831241839:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:19.285020Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:19.285401Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519894252831241844:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:19.289549Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:30:19.304450Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519894252831241846:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:30:19.371066Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519894252831241897:3421] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:30:20.515869Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519894235651370382:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:30:20.525183Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; query_phases { duration_us: 5090 table_access { name: "/Root/TwoShard" reads { rows: 3 bytes: 35 } partitions_count: 1 } cpu_time_us: 3948 affected_shards: 1 } query_phases { duration_us: 4763 table_access { name: "/Root/EightShard" updates { rows: 3 bytes: 47 } partitions_count: 1 } cpu_time_us: 2519 affected_shards: 2 } compilation { duration_us: 246378 cpu_time_us: 242659 } process_cpu_time_us: 644 total_duration_us: 261659 total_cpu_time_us: 249770 >> CellsFromTupleTest::CellsFromTupleSuccess [GOOD] >> CellsFromTupleTest::CellsFromTupleSuccessPg >> TBackupTests::ShouldSucceedOnMultiShardTable[Raw] >> TTopicApiDescribes::GetPartitionDescribe >> TControlPlaneProxyTest::ShouldSendTestConnection [GOOD] >> TControlPlaneProxyTest::ShouldSendCreateBinding >> TBackupTests::ShouldSucceedOnMultiShardTable[Zstd] >> ConvertYdbPermissionNameToACLAttrs::TestEqualGranularAndDeprecatedAcl [GOOD] >> ConvertYdbValueToMiniKQLValueTest::OptionalEmpty [GOOD] >> ConvertYdbValueToMiniKQLValueTest::OptionalOptionalEmpty [GOOD] >> ConvertYdbValueToMiniKQLValueTest::OptionalOptionalEmpty2 [GOOD] >> ConvertYdbValueToMiniKQLValueTest::List [GOOD] >> ConvertYdbValueToMiniKQLValueTest::Dict [GOOD] >> TBackupTests::ShouldSucceedOnSingleShardTable[Zstd] [GOOD] >> CellsFromTupleTest::CellsFromTupleSuccessPg [GOOD] >> CellsFromTupleTest::CellsFromTupleFails [GOOD] >> CellsFromTupleTest::CellsFromTupleFailsPg [GOOD] >> CompressionTests::Zstd [GOOD] >> CompressionTests::Unsupported [GOOD] >> ConvertMiniKQLTypeToYdbTypeTest::DecimalType [GOOD] >> TestDataErasure::ManualLaunch3CyclesWithNotConsistentCountersInSchemeShardAndBSC [GOOD] >> TSchemeShardTest::DisablePublicationsOfDropping_Dir [GOOD] >> TSchemeShardTest::DisablePublicationsOfDropping_Table >> TBackupTests::ShouldSucceedOnSingleShardTable[Raw] [GOOD] >> TTopicApiDescribes::DescribeConsumer [GOOD] >> ConvertMiniKQLValueToYdbValueTest::Void [GOOD] >> ConvertMiniKQLValueToYdbValueTest::Struct [GOOD] >> ConvertMiniKQLValueToYdbValueTest::Tuple [GOOD] >> ConvertMiniKQLValueToYdbValueTest::Variant [GOOD] >> ConvertTableDescription::StorageSettings [GOOD] >> ConvertTableDescription::ColumnFamilies [GOOD] >> ConvertYdbPermissionNameToACLAttrs::SimpleConvertGood [GOOD] |77.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/ydb_convert/ut/unittest >> ConvertYdbValueToMiniKQLValueTest::Dict [GOOD] |77.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/ydb_convert/ut/unittest >> ConvertMiniKQLTypeToYdbTypeTest::DecimalType [GOOD] >> ConvertMiniKQLTypeToYdbTypeTest::SimpleType [GOOD] >> ConvertMiniKQLTypeToYdbTypeTest::TTzDate [GOOD] >> ConvertMiniKQLTypeToYdbTypeTest::Optional [GOOD] >> ConvertMiniKQLTypeToYdbTypeTest::List >> TTopicApiDescribes::GetLocalDescribe [GOOD] >> ConvertYdbValueToMiniKQLValueTest::Void [GOOD] >> ConvertYdbValueToMiniKQLValueTest::SimpleUuidTypeMissmatch [GOOD] >> ConvertYdbValueToMiniKQLValueTest::Struct [GOOD] >> ConvertYdbValueToMiniKQLValueTest::Tuple [GOOD] >> ConvertYdbValueToMiniKQLValueTest::Variant [GOOD] >> ConvertYdbValueToMiniKQLValueTest::VariantIndexUnderflow [GOOD] >> ConvertMiniKQLTypeToYdbTypeTest::List [GOOD] >> ConvertMiniKQLTypeToYdbTypeTest::Struct [GOOD] >> ConvertMiniKQLTypeToYdbTypeTest::Dict [GOOD] >> ConvertMiniKQLTypeToYdbTypeTest::PgType [GOOD] >> ConvertYdbValueToMiniKQLValueTest::SimpleBool [GOOD] >> ConvertYdbValueToMiniKQLValueTest::SimpleBoolTypeMissmatch [GOOD] >> ConvertYdbValueToMiniKQLValueTest::SimpleDecimal [GOOD] >> ConvertYdbValueToMiniKQLValueTest::SimpleDecimalTypeMissmatch [GOOD] >> ConvertYdbValueToMiniKQLValueTest::OptionalString [GOOD] >> ConvertYdbValueToMiniKQLValueTest::PgValue [GOOD] |77.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/ut/runtime/ydb-core-kqp-ut-runtime |77.1%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/runtime/ydb-core-kqp-ut-runtime |77.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/runtime/ydb-core-kqp-ut-runtime >> ConvertMiniKQLValueToYdbValueTest::SimpleInt32 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpQuery::UpdateThenDelete-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 9115, MsgBus: 65356 2025-06-25T14:29:59.065393Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519894170744645190:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:29:59.065475Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001463/r3tmp/tmpkOYUJe/pdisk_1.dat 2025-06-25T14:29:59.414192Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:29:59.414518Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519894170744645162:2080] 1750861799064204 != 1750861799064207 TServer::EnableGrpc on GrpcPort 9115, node 1 2025-06-25T14:29:59.472078Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:29:59.472102Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:29:59.472108Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:29:59.472228Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:29:59.484171Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:29:59.484296Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:29:59.485956Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:65356 TClient is connected to server localhost:65356 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:30:00.019030Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:30:00.040809Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:30:00.080547Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:30:00.181881Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:30:00.360138Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:30:00.443314Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:30:01.980933Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894179334581413:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:01.981084Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:02.338692Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:02.372989Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:02.402515Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:02.430663Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:02.471491Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:02.505887Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:02.534815Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:02.622291Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894183629549372:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:02.622357Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:02.622494Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894183629549377:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:02.626408Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:30:02.638850Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519894183629549379:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:30:02.713805Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519894183629549430:3421] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:30:03.825452Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519894187924517019:2482], status: GENERIC_ERROR, issues:
:3:26: Error: mismatched input '[' expecting {'*', '(', '@', '$', ABORT, ACTION, ADD, AFTER, ALL, ALTER, ANALYZE, AND, ANSI, ANY, ARRAY, AS, ASC, ASSUME, ASYMMETRIC, ASYNC, AT, ATTACH, ATTRIBUTES, AUTOINCREMENT, BACKUP, BATCH, COLLECTION, BEFORE, BEGIN, BERNOULLI, BETWEEN, BITCAST, BY, CALLABLE, CASCADE, CASE, CAST, CHANGEFEED, CHECK, CLASSIFIER, COLLATE, COLUMN, COLUMNS, COMMIT, COMPACT, CONDITIONAL, CONFLICT, CONNECT, CONSTRAINT, CONSUMER, COVER, CREATE, CROSS, CUBE, CURRENT, CURRENT_DATE, CURRENT_TIME, CURRENT_TIMESTAMP, DATA, DATABASE, DECIMAL, DECLARE, DEFAULT, DEFERRABLE, DEFERRED, DEFINE, DELETE, DESC, DESCRIBE, DETACH, DICT, DIRECTORY, DISABLE, DISCARD, DISTINCT, DO, DROP, EACH, ELSE, EMPTY, EMPTY_ACTION, ENCRYPTED, END, ENUM, ERASE, ERROR, ESCAPE, EV ... 46644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:30:14.872323Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519894212375612745:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:30:14.872388Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 9938, MsgBus: 7927 2025-06-25T14:30:15.927241Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519894238636541720:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:30:15.927320Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001463/r3tmp/tmp4lEYje/pdisk_1.dat 2025-06-25T14:30:16.041220Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:30:16.042078Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7519894238636541701:2080] 1750861815926858 != 1750861815926861 TServer::EnableGrpc on GrpcPort 9938, node 4 2025-06-25T14:30:16.076997Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:30:16.077093Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:30:16.090567Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:30:16.120953Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:30:16.120978Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:30:16.120988Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:30:16.121136Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:7927 TClient is connected to server localhost:7927 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:30:16.648787Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:30:16.660734Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:30:16.740341Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:30:16.918608Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:30:16.940649Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:30:17.006925Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:30:19.542946Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519894255816412513:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:19.543090Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:19.624045Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:19.667792Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:19.714955Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:19.762335Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:19.818197Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:19.912178Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:19.983656Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:20.089332Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519894260111380470:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:20.089428Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:20.089648Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519894260111380475:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:20.093340Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:30:20.103282Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519894260111380477:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:30:20.186239Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519894260111380528:3418] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:30:20.930867Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519894238636541720:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:30:20.930952Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; [] >> TBackupTests::BackupUuidColumn[Zstd] [GOOD] >> ConvertMiniKQLValueToYdbValueTest::SimpleInt32 [GOOD] >> ConvertMiniKQLValueToYdbValueTest::SimpleInt64 [GOOD] >> ConvertMiniKQLValueToYdbValueTest::SimpleTzDate [GOOD] >> ConvertMiniKQLValueToYdbValueTest::SimpleTzDateTime [GOOD] >> ConvertMiniKQLValueToYdbValueTest::SimpleTzTimeStamp [GOOD] >> ConvertMiniKQLValueToYdbValueTest::SimpleDecimal [GOOD] >> ConvertMiniKQLValueToYdbValueTest::SimpleUuid [GOOD] >> TBackupTests::BackupUuidColumn[Raw] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_data_erasure/unittest >> TestDataErasure::ManualLaunch3CyclesWithNotConsistentCountersInSchemeShardAndBSC [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:30:08.997853Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:30:08.997913Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:30:08.997942Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:30:08.997994Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:30:08.998025Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:30:08.998045Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:30:08.998080Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:30:08.998150Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:30:08.998742Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:30:08.998984Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:30:09.083130Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:30:09.083187Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:30:09.100188Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:30:09.100646Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:30:09.100849Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:30:09.106865Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:30:09.107221Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:30:09.107876Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:30:09.108133Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:30:09.111362Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:30:09.111543Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:30:09.112611Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:30:09.112671Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:30:09.112838Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:30:09.112882Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:30:09.112922Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:30:09.113014Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:30:09.119582Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:30:09.266104Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:30:09.266338Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:09.266546Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:30:09.266598Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:30:09.266823Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:30:09.266888Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:30:09.269304Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:30:09.269518Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:30:09.269698Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:09.269780Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:30:09.269822Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:30:09.269856Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:30:09.271920Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:09.271981Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:30:09.272019Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:30:09.274869Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:09.274917Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:09.274973Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:30:09.275021Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:30:09.285361Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:30:09.287414Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:30:09.287573Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:30:09.288477Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:30:09.288612Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:30:09.288657Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:30:09.288901Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:30:09.288957Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:30:09.289130Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:30:09.289214Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:30:09.291407Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:30:09.291456Z node 1 :FLAT_TX_SCHEMESHARD ... ard__root_data_erasure_manager.cpp:639: TTxCompleteDataErasureBSC Execute at schemeshard: 72057594046678944 2025-06-25T14:30:23.193411Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:657: TTxCompleteDataErasureBSC: Progress data shred in BSC 50% 2025-06-25T14:30:23.193496Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:665: TTxCompleteDataErasureBSC Complete at schemeshard: 72057594046678944, NeedScheduleRequestToBSC# true 2025-06-25T14:30:23.193564Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:348: [RootDataErasureManager] ScheduleRequestToBSC: Interval# 1.000000s 2025-06-25T14:30:23.576112Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [2:462:2413]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:30:23.576190Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:30:23.576300Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [2:462:2413], Recipient [2:462:2413]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:30:23.576350Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:30:23.576497Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271122945, Sender [2:808:2691], Recipient [2:462:2413]: NKikimrSchemeOp.TDescribePath PathId: 3 SchemeshardId: 72075186233409546 2025-06-25T14:30:23.576527Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4967: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2025-06-25T14:30:23.576620Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: PathId: 3 SchemeshardId: 72075186233409546, at schemeshard: 72075186233409546 2025-06-25T14:30:23.576798Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:44: Tablet 72075186233409546 describe pathId 3 took 142us result status StatusSuccess 2025-06-25T14:30:23.577250Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Database1/Topic1" PathDescription { Self { Name: "Topic1" PathId: 3 SchemeshardId: 72075186233409546 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 104 CreateStep: 300 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 1 } ChildrenExist: false BalancerTabletID: 72075186233409552 } PersQueueGroup { Name: "Topic1" PathId: 3 TotalGroupCount: 1 PartitionPerTablet: 1 PQTabletConfig { PartitionConfig { LifetimeSeconds: 10 } YdbDatabasePath: "/MyRoot/Database1" } Partitions { PartitionId: 0 TabletId: 72075186233409551 Status: Active } AlterVersion: 1 BalancerTabletID: 72075186233409552 NextPartitionId: 1 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 2 ProcessingParams { Version: 2 PlanResolution: 50 Coordinators: 72075186233409547 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409548 SchemeShard: 72075186233409546 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 7 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 1 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72075186233409546, at schemeshard: 72075186233409546 2025-06-25T14:30:23.659406Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [2:969:2821]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:30:23.659500Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:30:23.659632Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [2:969:2821], Recipient [2:969:2821]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:30:23.659671Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:30:23.659851Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271122945, Sender [2:1387:3167], Recipient [2:969:2821]: NKikimrSchemeOp.TDescribePath PathId: 3 SchemeshardId: 72075186233409553 2025-06-25T14:30:23.659889Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4967: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2025-06-25T14:30:23.659997Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: PathId: 3 SchemeshardId: 72075186233409553, at schemeshard: 72075186233409553 2025-06-25T14:30:23.660201Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:44: Tablet 72075186233409553 describe pathId 3 took 174us result status StatusSuccess 2025-06-25T14:30:23.660768Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Database2/Topic1" PathDescription { Self { Name: "Topic1" PathId: 3 SchemeshardId: 72075186233409553 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 108 CreateStep: 400 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 1 } ChildrenExist: false BalancerTabletID: 72075186233409559 } PersQueueGroup { Name: "Topic1" PathId: 3 TotalGroupCount: 1 PartitionPerTablet: 1 PQTabletConfig { PartitionConfig { LifetimeSeconds: 10 } YdbDatabasePath: "/MyRoot/Database2" } Partitions { PartitionId: 0 TabletId: 72075186233409558 Status: Active } AlterVersion: 1 BalancerTabletID: 72075186233409559 NextPartitionId: 1 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 3 ProcessingParams { Version: 2 PlanResolution: 50 Coordinators: 72075186233409554 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409555 SchemeShard: 72075186233409553 } DomainKey { SchemeShard: 72057594046678944 PathId: 3 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 7 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 3 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 1 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72075186233409553, at schemeshard: 72075186233409553 2025-06-25T14:30:23.742250Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [2:294:2276]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:30:23.742314Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:30:23.742371Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [2:294:2276], Recipient [2:294:2276]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:30:23.742393Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:30:23.826297Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125517, Sender [0:0:0], Recipient [2:294:2276]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToRunDataErasureBSC 2025-06-25T14:30:23.826393Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5156: StateWork, processing event TEvSchemeShard::TEvWakeupToRunDataErasureBSC 2025-06-25T14:30:23.826428Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:354: [RootDataErasureManager] SendRequestToBSC: Generation# 101 2025-06-25T14:30:23.826739Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 268637738, Sender [2:300:2280], Recipient [2:294:2276]: NKikimrBlobStorage.TEvControllerShredResponse CurrentGeneration: 101 Completed: true Progress10k: 10000 2025-06-25T14:30:23.826780Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5155: StateWork, processing event TEvBlobStorage::TEvControllerShredResponse 2025-06-25T14:30:23.826811Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7883: Handle TEvControllerShredResponse, at schemeshard: 72057594046678944 2025-06-25T14:30:23.826888Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:639: TTxCompleteDataErasureBSC Execute at schemeshard: 72057594046678944 2025-06-25T14:30:23.826930Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:653: TTxCompleteDataErasureBSC: Data shred in BSC is completed 2025-06-25T14:30:23.826972Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:378: [RootDataErasureManager] Complete: Generation# 101, duration# 2 s 2025-06-25T14:30:23.829414Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__root_data_erasure_manager.cpp:665: TTxCompleteDataErasureBSC Complete at schemeshard: 72057594046678944, NeedScheduleRequestToBSC# false 2025-06-25T14:30:23.830206Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877761, Sender [2:4079:5350], Recipient [2:294:2276]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:30:23.830272Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5052: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T14:30:23.830318Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5837: Pipe server connected, at tablet: 72057594046678944 2025-06-25T14:30:23.830492Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125519, Sender [2:3235:4676], Recipient [2:294:2276]: NKikimrScheme.TEvDataErasureInfoRequest 2025-06-25T14:30:23.830535Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5153: StateWork, processing event TEvSchemeShard::TEvDataErasureInfoRequest 2025-06-25T14:30:23.830580Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:7834: Handle TEvDataErasureInfoRequest, at schemeshard: 72057594046678944 |77.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/ydb_convert/ut/unittest >> ConvertMiniKQLTypeToYdbTypeTest::PgType [GOOD] >> TControlPlaneProxyTest::ShouldSendCreateBinding [GOOD] >> TControlPlaneProxyTest::ShouldSendListBindings |77.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/ydb_convert/ut/unittest >> ConvertYdbPermissionNameToACLAttrs::SimpleConvertGood [GOOD] |77.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/ydb_convert/ut/unittest >> ConvertYdbValueToMiniKQLValueTest::PgValue [GOOD] |77.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/ydb_convert/ut/unittest >> ConvertYdbValueToMiniKQLValueTest::VariantIndexUnderflow [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_backup/unittest >> TBackupTests::ShouldSucceedOnSingleShardTable[Zstd] [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:30:22.890453Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:30:22.890545Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:30:22.890586Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:30:22.890623Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:30:22.890665Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:30:22.890696Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:30:22.890752Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:30:22.890817Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:30:22.891596Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:30:22.891971Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:30:23.104670Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:30:23.104770Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:30:23.130344Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:30:23.130650Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:30:23.130843Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:30:23.161694Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:30:23.162070Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:30:23.162915Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:30:23.163171Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:30:23.175373Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:30:23.175623Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:30:23.177107Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:30:23.177182Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:30:23.177364Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:30:23.177423Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:30:23.177495Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:30:23.177607Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:30:23.201718Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:243:2058] recipient: [1:15:2062] 2025-06-25T14:30:23.366933Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:30:23.367338Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:23.367636Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:30:23.367701Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:30:23.368016Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:30:23.368171Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:30:23.371501Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:30:23.371777Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:30:23.372031Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:23.372099Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:30:23.372168Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:30:23.372212Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:30:23.374537Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:23.374600Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:30:23.374662Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:30:23.377203Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:23.377253Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:23.377323Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:30:23.377383Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:30:23.388078Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:30:23.390834Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:30:23.391121Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:30:23.392380Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:30:23.392585Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:30:23.392669Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:30:23.392994Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:30:23.393058Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:30:23.393260Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:30:23.393348Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:30:23.395845Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:30:23.395909Z node 1 :FLAT_TX_SCHEMESHARD ... meshard: 72057594046678944 2025-06-25T14:30:23.776741Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:412: TBackup TPropose, opId: 102:0 HandleReply TEvOperationPlan, stepId: 5000003, at schemeshard: 72057594046678944 2025-06-25T14:30:23.776886Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 102:0 128 -> 129 2025-06-25T14:30:23.777044Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-25T14:30:23.787099Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:797: [Export] [s3] Bootstrap: self# [1:419:2388], attempt# 0 2025-06-25T14:30:23.817363Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:441: [Export] [s3] Handle TEvExportScan::TEvReady: self# [1:419:2388], sender# [1:418:2386] REQUEST: PUT /metadata.json HTTP/1.1 HEADERS: Host: localhost:7641 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 6618EA6B-DC79-4BD5-A332-CA36F5EE1E18 amz-sdk-request: attempt=1 content-length: 94 content-md5: ZpDejBbuBPHjGq8ZC8z8QA== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /metadata.json / / 94 2025-06-25T14:30:23.832857Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:401: [Export] [s3] HandleMetadata TEvExternalStorage::TEvPutObjectResponse: self# [1:419:2388], result# PutObjectResult { ETag: 6690de8c16ee04f1e31aaf190bccfc40 } 2025-06-25T14:30:23.837118Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:30:23.837196Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-25T14:30:23.837524Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:30:23.837574Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:210:2210], at schemeshard: 72057594046678944, txId: 102, path id: 2 FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000003 2025-06-25T14:30:23.838253Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-25T14:30:23.838357Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:258: TBackup TProposedWaitParts, opId: 102:0 ProgressState, at schemeshard: 72057594046678944 REQUEST: PUT /scheme.pb HTTP/1.1 HEADERS: Host: localhost:7641 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: ACBF3219-389F-4AF3-86AA-7155DAEE3FFC amz-sdk-request: attempt=1 content-length: 357 content-md5: csvC5nqNTZsSLy4ymlp0/Q== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /scheme.pb / / 357 2025-06-25T14:30:23.839336Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T14:30:23.839476Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T14:30:23.839539Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 102 2025-06-25T14:30:23.839603Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 3 2025-06-25T14:30:23.839676Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-25T14:30:23.839775Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 102, ready parts: 0/1, is published: true FAKE_COORDINATOR: Erasing txId 102 2025-06-25T14:30:23.840073Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:306: [Export] [s3] HandleScheme TEvExternalStorage::TEvPutObjectResponse: self# [1:419:2388], result# PutObjectResult { ETag: 72cbc2e67a8d4d9b122f2e329a5a74fd } 2025-06-25T14:30:23.840201Z node 1 :DATASHARD_BACKUP DEBUG: export_scan.cpp:130: [Export] [scanner] Handle TEvExportScan::TEvFeed: self# [1:418:2386] 2025-06-25T14:30:23.840521Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:459: [Export] [s3] Handle TEvExportScan::TEvBuffer: self# [1:419:2388], sender# [1:418:2386], msg# NKikimr::NDataShard::TEvExportScan::TEvBuffer { Last: 1 Checksum: } REQUEST: PUT /data_00.csv.zst HTTP/1.1 HEADERS: Host: localhost:7641 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 7E68DB3E-09DE-4770-A609-E432EFA8E408 amz-sdk-request: attempt=1 content-length: 20 content-md5: 2qFn9G0TW8wfvJ9C+A5Jbw== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /data_00.csv.zst / / 20 2025-06-25T14:30:23.848760Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:501: [Export] [s3] HandleData TEvExternalStorage::TEvPutObjectResponse: self# [1:419:2388], result# PutObjectResult { ETag: daa167f46d135bcc1fbc9f42f80e496f } 2025-06-25T14:30:23.848843Z node 1 :DATASHARD_BACKUP INFO: export_s3_uploader.cpp:716: [Export] [s3] Finish: self# [1:419:2388], success# 1, error# , multipart# 0, uploadId# (empty maybe) 2025-06-25T14:30:23.849048Z node 1 :DATASHARD_BACKUP DEBUG: export_scan.cpp:144: [Export] [scanner] Handle TEvExportScan::TEvFinish: self# [1:418:2386], msg# NKikimr::NDataShard::TEvExportScan::TEvFinish { Success: 1 Error: } 2025-06-25T14:30:23.856361Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-25T14:30:23.870027Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5596: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 313 RawX2: 4294969594 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-06-25T14:30:23.870106Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1791: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409546, partId: 0 2025-06-25T14:30:23.870320Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:632: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: Source { RawX1: 313 RawX2: 4294969594 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-06-25T14:30:23.870440Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:233: TBackup TProposedWaitParts, opId: 102:0 HandleReply TEvSchemaChanged at tablet# 72057594046678944 message# Source { RawX1: 313 RawX2: 4294969594 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-06-25T14:30:23.870519Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:670: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 102:0, shardIdx: 72057594046678944:1, shard: 72075186233409546, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-25T14:30:23.870581Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:707: all shard schema changes has been received, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-25T14:30:23.870618Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:719: send schema changes ack message, operation: 102:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-06-25T14:30:23.870660Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 102:0 129 -> 240 2025-06-25T14:30:23.870849Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:116: Unable to make a bill: kind# TBackup, opId# 102:0, reason# domain is not a serverless db, domain# /MyRoot, domainPathId# [OwnerId: 72057594046678944, LocalPathId: 1], IsDomainSchemeShard: 1, ParentDomainId: [OwnerId: 72057594046678944, LocalPathId: 1], ResourcesDomainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:30:23.873020Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-25T14:30:23.873375Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-25T14:30:23.873429Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 102:0 ProgressState 2025-06-25T14:30:23.873563Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:0 progress is 1/1 2025-06-25T14:30:23.873601Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-25T14:30:23.873642Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:0 progress is 1/1 2025-06-25T14:30:23.873678Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-25T14:30:23.873722Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: true 2025-06-25T14:30:23.873807Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1656: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:341:2318] message: TxId: 102 2025-06-25T14:30:23.873864Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-25T14:30:23.873914Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 102:0 2025-06-25T14:30:23.873962Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 102:0 2025-06-25T14:30:23.874088Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-25T14:30:23.876189Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-25T14:30:23.876246Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:403:2373] TestWaitNotification: OK eventTxId 102 >> TBackupTests::ShouldSucceedOnMultiShardTable[Raw] [GOOD] |77.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/ydb_convert/ut/unittest >> ConvertMiniKQLValueToYdbValueTest::SimpleUuid [GOOD] >> ConvertMiniKQLTypeToYdbTypeTest::TTzDateTime [GOOD] >> ConvertMiniKQLTypeToYdbTypeTest::TTzTimeStamp [GOOD] >> ConvertMiniKQLTypeToYdbTypeTest::UuidType [GOOD] >> ConvertMiniKQLTypeToYdbTypeTest::VariantTuple [GOOD] >> ConvertMiniKQLTypeToYdbTypeTest::VariantStruct [GOOD] >> ConvertMiniKQLTypeToYdbTypeTest::Void [GOOD] >> ConvertMiniKQLTypeToYdbTypeTest::Tuple [GOOD] |77.1%| [TA] $(B)/ydb/core/tx/schemeshard/ut_data_erasure/test-results/unittest/{meta.json ... results_accumulator.log} >> TBackupTests::ShouldSucceedOnMultiShardTable[Zstd] [GOOD] >> TIcNodeCache::GetNodesInfoTest [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_backup/unittest >> TBackupTests::ShouldSucceedOnSingleShardTable[Raw] [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:30:23.136845Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:30:23.136947Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:30:23.136990Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:30:23.137025Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:30:23.137069Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:30:23.137096Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:30:23.137146Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:30:23.137210Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:30:23.137941Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:30:23.138280Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:30:23.256147Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:30:23.256220Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:30:23.270186Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:30:23.270428Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:30:23.270579Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:30:23.278364Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:30:23.281376Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:30:23.285092Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:30:23.285374Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:30:23.289275Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:30:23.289477Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:30:23.290638Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:30:23.290694Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:30:23.290847Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:30:23.290892Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:30:23.290950Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:30:23.291040Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:30:23.300035Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:243:2058] recipient: [1:15:2062] 2025-06-25T14:30:23.452965Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:30:23.453229Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:23.453455Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:30:23.453511Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:30:23.453732Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:30:23.453827Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:30:23.456173Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:30:23.456393Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:30:23.456630Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:23.456682Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:30:23.456734Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:30:23.456768Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:30:23.461087Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:23.461156Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:30:23.461201Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:30:23.465284Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:23.465354Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:23.465402Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:30:23.465464Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:30:23.468930Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:30:23.473188Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:30:23.473490Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:30:23.474573Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:30:23.474725Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:30:23.474797Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:30:23.475096Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:30:23.475162Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:30:23.475353Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:30:23.475435Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:30:23.478297Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:30:23.478352Z node 1 :FLAT_TX_SCHEMESHARD ... schemeshard: 72057594046678944 2025-06-25T14:30:23.802387Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:412: TBackup TPropose, opId: 102:0 HandleReply TEvOperationPlan, stepId: 5000003, at schemeshard: 72057594046678944 2025-06-25T14:30:23.802496Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 102:0 128 -> 129 2025-06-25T14:30:23.802626Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-25T14:30:23.828934Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:797: [Export] [s3] Bootstrap: self# [1:419:2388], attempt# 0 2025-06-25T14:30:23.856154Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:441: [Export] [s3] Handle TEvExportScan::TEvReady: self# [1:419:2388], sender# [1:418:2386] REQUEST: PUT /metadata.json HTTP/1.1 HEADERS: Host: localhost:22516 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 4D349674-5406-41C9-A391-B7DC1E7CEF0D amz-sdk-request: attempt=1 content-length: 94 content-md5: ZpDejBbuBPHjGq8ZC8z8QA== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /metadata.json / / 94 2025-06-25T14:30:23.860588Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:401: [Export] [s3] HandleMetadata TEvExternalStorage::TEvPutObjectResponse: self# [1:419:2388], result# PutObjectResult { ETag: 6690de8c16ee04f1e31aaf190bccfc40 } 2025-06-25T14:30:23.862179Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:30:23.862236Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-25T14:30:23.862448Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:30:23.862479Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:210:2210], at schemeshard: 72057594046678944, txId: 102, path id: 2 FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000003 REQUEST: PUT /scheme.pb HTTP/1.1 HEADERS: Host: localhost:22516 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 95F19F6D-4C03-47B5-ACE7-64A6E96937DD amz-sdk-request: attempt=1 content-length: 357 content-md5: csvC5nqNTZsSLy4ymlp0/Q== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /scheme.pb / / 357 2025-06-25T14:30:23.868970Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:306: [Export] [s3] HandleScheme TEvExternalStorage::TEvPutObjectResponse: self# [1:419:2388], result# PutObjectResult { ETag: 72cbc2e67a8d4d9b122f2e329a5a74fd } 2025-06-25T14:30:23.869095Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-25T14:30:23.869154Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:258: TBackup TProposedWaitParts, opId: 102:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:30:23.869728Z node 1 :DATASHARD_BACKUP DEBUG: export_scan.cpp:130: [Export] [scanner] Handle TEvExportScan::TEvFeed: self# [1:418:2386] 2025-06-25T14:30:23.869888Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:459: [Export] [s3] Handle TEvExportScan::TEvBuffer: self# [1:419:2388], sender# [1:418:2386], msg# NKikimr::NDataShard::TEvExportScan::TEvBuffer { Last: 1 Checksum: } 2025-06-25T14:30:23.870600Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T14:30:23.870753Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T14:30:23.870817Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 102 2025-06-25T14:30:23.870875Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 3 2025-06-25T14:30:23.870924Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-25T14:30:23.871019Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 102, ready parts: 0/1, is published: true FAKE_COORDINATOR: Erasing txId 102 REQUEST: PUT /data_00.csv HTTP/1.1 HEADERS: Host: localhost:22516 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: C6F5C44E-F033-46AB-8453-B7B9E937B42B amz-sdk-request: attempt=1 content-length: 11 content-md5: bj4KQf2rit2DOGLxvSlUww== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /data_00.csv / / 11 2025-06-25T14:30:23.873289Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:501: [Export] [s3] HandleData TEvExternalStorage::TEvPutObjectResponse: self# [1:419:2388], result# PutObjectResult { ETag: 6e3e0a41fdab8add833862f1bd2954c3 } 2025-06-25T14:30:23.873345Z node 1 :DATASHARD_BACKUP INFO: export_s3_uploader.cpp:716: [Export] [s3] Finish: self# [1:419:2388], success# 1, error# , multipart# 0, uploadId# (empty maybe) 2025-06-25T14:30:23.873538Z node 1 :DATASHARD_BACKUP DEBUG: export_scan.cpp:144: [Export] [scanner] Handle TEvExportScan::TEvFinish: self# [1:418:2386], msg# NKikimr::NDataShard::TEvExportScan::TEvFinish { Success: 1 Error: } 2025-06-25T14:30:23.883708Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-25T14:30:23.905693Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5596: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 313 RawX2: 4294969594 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-06-25T14:30:23.905792Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1791: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409546, partId: 0 2025-06-25T14:30:23.905965Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:632: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: Source { RawX1: 313 RawX2: 4294969594 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-06-25T14:30:23.906083Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:233: TBackup TProposedWaitParts, opId: 102:0 HandleReply TEvSchemaChanged at tablet# 72057594046678944 message# Source { RawX1: 313 RawX2: 4294969594 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-06-25T14:30:23.906171Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:670: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 102:0, shardIdx: 72057594046678944:1, shard: 72075186233409546, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-25T14:30:23.906231Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:707: all shard schema changes has been received, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-25T14:30:23.906274Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:719: send schema changes ack message, operation: 102:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-06-25T14:30:23.906329Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 102:0 129 -> 240 2025-06-25T14:30:23.906555Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:116: Unable to make a bill: kind# TBackup, opId# 102:0, reason# domain is not a serverless db, domain# /MyRoot, domainPathId# [OwnerId: 72057594046678944, LocalPathId: 1], IsDomainSchemeShard: 1, ParentDomainId: [OwnerId: 72057594046678944, LocalPathId: 1], ResourcesDomainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:30:23.910032Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-25T14:30:23.910431Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-25T14:30:23.910485Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 102:0 ProgressState 2025-06-25T14:30:23.910614Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:0 progress is 1/1 2025-06-25T14:30:23.910671Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-25T14:30:23.910732Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:0 progress is 1/1 2025-06-25T14:30:23.910786Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-25T14:30:23.910835Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: true 2025-06-25T14:30:23.910921Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1656: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:341:2318] message: TxId: 102 2025-06-25T14:30:23.910980Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-25T14:30:23.911022Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 102:0 2025-06-25T14:30:23.911071Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 102:0 2025-06-25T14:30:23.911268Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-25T14:30:23.914850Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-25T14:30:23.914912Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:403:2373] TestWaitNotification: OK eventTxId 102 >> Cdc::AddColumn [GOOD] >> Cdc::AddColumn_TopicAutoPartitioning |77.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/ydb_convert/ut/unittest >> ConvertMiniKQLTypeToYdbTypeTest::Tuple [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/services/persqueue_v1/ut/describes_ut/unittest >> TTopicApiDescribes::DescribeConsumer [GOOD] Test command err: 2025-06-25T14:30:10.951316Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519894216065279627:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:30:10.951382Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:30:10.993105Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519894215052394496:2146];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:30:11.000702Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001d22/r3tmp/tmpwBhfA8/pdisk_1.dat 2025-06-25T14:30:11.189281Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-25T14:30:11.198800Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-25T14:30:11.357093Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:30:11.381994Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:30:11.382085Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:30:11.386202Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T14:30:11.387247Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:30:11.411889Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:30:11.411961Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 4613, node 1 2025-06-25T14:30:11.424771Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:30:11.460593Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/yft8/001d22/r3tmp/yandexfWDNfl.tmp 2025-06-25T14:30:11.460625Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/yft8/001d22/r3tmp/yandexfWDNfl.tmp 2025-06-25T14:30:11.460764Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/yft8/001d22/r3tmp/yandexfWDNfl.tmp 2025-06-25T14:30:11.460912Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:30:11.514173Z INFO: TTestServer started on Port 3486 GrpcPort 4613 TClient is connected to server localhost:3486 PQClient connected to localhost:4613 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:30:11.778545Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-25T14:30:11.859523Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:30:11.987970Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:30:12.004072Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... waiting... 2025-06-25T14:30:14.060231Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894233245149923:2304], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:14.060343Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:14.060539Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894233245149936:2307], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:14.060222Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519894232232263871:2270], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:14.060231Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519894232232263879:2273], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:14.060367Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:14.063768Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:30:14.066620Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519894232232263886:2126] txid# 281474976720657, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateCreate)" severity: 1 } 2025-06-25T14:30:14.068377Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894233245149977:2311], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:14.068452Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:14.077667Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519894233245149938:2308], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710662 completed, doublechecking } 2025-06-25T14:30:14.077709Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519894232232263885:2274], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710662 completed, doublechecking } 2025-06-25T14:30:14.140394Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519894233245150017:2799] txid# 281474976710663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:30:14.177881Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519894232232263913:2132] txid# 281474976720658, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:30:14.393208Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:14.410980Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519894233245150035:2314], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:30:14.412251Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=ZmMzOGIxYWYtN2Q2MjMxLTMyYjFkOTM5LWQxMDgwYTk=, ActorId: [1:7519894233245149906:2302], ActorState: ExecuteState, TraceId: 01jykqy68se083dht896nykx8t, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:30:14.413491Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7519894232232263920:2278], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and ... max_write_time_lag { } bytes_written { } partition_node_id: 1 } partition_consumer_stats { last_read_time { seconds: 1750861821 nanos: 724000000 } max_read_time_lag { } max_write_time_lag { } bytes_read { } max_committed_time_lag { } } } partitions { partition_id: 14 active: true partition_stats { partition_offsets { } last_write_time { seconds: 1750861821 nanos: 785000000 } max_write_time_lag { } bytes_written { } partition_node_id: 2 } partition_consumer_stats { last_read_time { seconds: 1750861821 nanos: 791000000 } max_read_time_lag { } max_write_time_lag { } bytes_read { } max_committed_time_lag { } } } } } } 2025-06-25T14:30:22.328654Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:157: new Describe consumer request 2025-06-25T14:30:22.328758Z node 1 :PQ_READ_PROXY DEBUG: schema_actors.cpp:473: TDescribeConsumerActor for request path: "/Root/PQ//rt3.dc1--topic-x" consumer: "my-consumer" include_location: true 2025-06-25T14:30:22.329378Z node 1 :PQ_READ_PROXY DEBUG: schema_actors.cpp:657: DescribeTopicImpl [1:7519894267604891054:2582]: Request location 2025-06-25T14:30:22.329701Z node 1 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72075186224037900][rt3.dc1--topic-x] pipe [1:7519894267604891056:2583] connected; active server actors: 1 2025-06-25T14:30:22.329736Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037892, partitionId 0, NodeId 1, Generation 2 2025-06-25T14:30:22.329754Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037893, partitionId 1, NodeId 2, Generation 2 2025-06-25T14:30:22.329789Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037895, partitionId 2, NodeId 1, Generation 2 2025-06-25T14:30:22.329800Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037892, partitionId 3, NodeId 1, Generation 2 2025-06-25T14:30:22.329809Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037899, partitionId 4, NodeId 2, Generation 2 2025-06-25T14:30:22.329817Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037896, partitionId 5, NodeId 2, Generation 2 2025-06-25T14:30:22.329824Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037893, partitionId 6, NodeId 2, Generation 2 2025-06-25T14:30:22.329832Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037897, partitionId 7, NodeId 1, Generation 2 2025-06-25T14:30:22.329842Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037894, partitionId 8, NodeId 1, Generation 2 2025-06-25T14:30:22.329872Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037895, partitionId 9, NodeId 1, Generation 2 2025-06-25T14:30:22.329903Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037896, partitionId 10, NodeId 2, Generation 2 2025-06-25T14:30:22.329915Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037898, partitionId 11, NodeId 2, Generation 2 2025-06-25T14:30:22.329923Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037894, partitionId 12, NodeId 1, Generation 2 2025-06-25T14:30:22.329932Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037897, partitionId 13, NodeId 1, Generation 2 2025-06-25T14:30:22.329940Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037898, partitionId 14, NodeId 2, Generation 2 2025-06-25T14:30:22.329983Z node 1 :PQ_READ_PROXY DEBUG: schema_actors.cpp:750: DescribeTopicImpl [1:7519894267604891054:2582]: Got location 2025-06-25T14:30:22.330187Z node 1 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037900][rt3.dc1--topic-x] pipe [1:7519894267604891056:2583] disconnected; active server actors: 1 2025-06-25T14:30:22.330203Z node 1 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1688: [72075186224037900][rt3.dc1--topic-x] pipe [1:7519894267604891056:2583] disconnected no session Got response: operation { ready: true status: SUCCESS result { [type.googleapis.com/Ydb.Topic.DescribeConsumerResult] { self { name: "rt3.dc1--topic-x/my-consumer" owner: "root@builtin" type: TOPIC created_at { plan_step: 1750861821275 tx_id: 281474976710679 } } consumer { name: "shared/my-consumer" important: true read_from { } attributes { key: "_service_type" value: "data-streams" } } partitions { active: true partition_location { node_id: 1 generation: 2 } } partitions { partition_id: 1 active: true partition_location { node_id: 2 generation: 2 } } partitions { partition_id: 2 active: true partition_location { node_id: 1 generation: 2 } } partitions { partition_id: 3 active: true partition_location { node_id: 1 generation: 2 } } partitions { partition_id: 4 active: true partition_location { node_id: 2 generation: 2 } } partitions { partition_id: 5 active: true partition_location { node_id: 2 generation: 2 } } partitions { partition_id: 6 active: true partition_location { node_id: 2 generation: 2 } } partitions { partition_id: 7 active: true partition_location { node_id: 1 generation: 2 } } partitions { partition_id: 8 active: true partition_location { node_id: 1 generation: 2 } } partitions { partition_id: 9 active: true partition_location { node_id: 1 generation: 2 } } partitions { partition_id: 10 active: true partition_location { node_id: 2 generation: 2 } } partitions { partition_id: 11 active: true partition_location { node_id: 2 generation: 2 } } partitions { partition_id: 12 active: true partition_location { node_id: 1 generation: 2 } } partitions { partition_id: 13 active: true partition_location { node_id: 1 generation: 2 } } partitions { partition_id: 14 active: true partition_location { node_id: 2 generation: 2 } } } } } Got response: 2025-06-25T14:30:22.338596Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:157: new Describe consumer request 2025-06-25T14:30:22.338719Z node 1 :PQ_READ_PROXY DEBUG: schema_actors.cpp:473: TDescribeConsumerActor for request path: "/Root/PQ//rt3.dc1--topic-x" consumer: "my-consumer" operation { ready: true status: SUCCESS result { [type.googleapis.com/Ydb.Topic.DescribeConsumerResult] { self { name: "rt3.dc1--topic-x/my-consumer" owner: "root@builtin" type: TOPIC created_at { plan_step: 1750861821275 tx_id: 281474976710679 } } consumer { name: "shared/my-consumer" important: true read_from { } attributes { key: "_service_type" value: "data-streams" } } partitions { active: true } partitions { partition_id: 1 active: true } partitions { partition_id: 2 active: true } partitions { partition_id: 3 active: true } partitions { partition_id: 4 active: true } partitions { partition_id: 5 active: true } partitions { partition_id: 6 active: true } partitions { partition_id: 7 active: true } partitions { partition_id: 8 active: true } partitions { partition_id: 9 active: true } partitions { partition_id: 10 active: true } partitions { partition_id: 11 active: true } partitions { partition_id: 12 active: true } partitions { partition_id: 13 active: true } partitions { partition_id: 14 active: true } } } } Got response: operation { ready: true status: SCHEME_ERROR issues { message: "path \'Root/PQ/bad-topic\' does not exist or you do not have access rights" issue_code: 500018 severity: 1 } } 2025-06-25T14:30:22.347066Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:157: new Describe consumer request 2025-06-25T14:30:22.347184Z node 1 :PQ_READ_PROXY DEBUG: schema_actors.cpp:473: TDescribeConsumerActor for request path: "/Root/PQ//bad-topic" consumer: "my-consumer" include_stats: true include_location: true 2025-06-25T14:30:23.181487Z node 1 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976710688, task: 1, CA Id [1:7519894271899858392:2589]. Got EvDeliveryProblem, TabletId: 72075186224037891, NotDelivered: 0 2025-06-25T14:30:23.220642Z node 1 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976710688, task: 1, CA Id [1:7519894271899858392:2589]. Got EvDeliveryProblem, TabletId: 72075186224037891, NotDelivered: 1 2025-06-25T14:30:23.281261Z node 1 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976710688, task: 1, CA Id [1:7519894271899858392:2589]. Got EvDeliveryProblem, TabletId: 72075186224037891, NotDelivered: 1 2025-06-25T14:30:23.352223Z node 1 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976710688, task: 1, CA Id [1:7519894271899858392:2589]. Got EvDeliveryProblem, TabletId: 72075186224037891, NotDelivered: 1 2025-06-25T14:30:23.469636Z node 1 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976710688, task: 1, CA Id [1:7519894271899858392:2589]. Got EvDeliveryProblem, TabletId: 72075186224037891, NotDelivered: 1 >> TSchemeShardTest::DisablePublicationsOfDropping_Table [GOOD] >> TSchemeShardTest::DisablePublicationsOfDropping_IndexedTable >> ConvertYdbValueToMiniKQLValueTest::SimpleInt32 [GOOD] >> ConvertYdbValueToMiniKQLValueTest::SimpleTzDate [GOOD] >> ConvertYdbValueToMiniKQLValueTest::SimpleTzDateTime [GOOD] >> ConvertYdbValueToMiniKQLValueTest::SimpleTzTimeStamp [GOOD] >> ConvertYdbValueToMiniKQLValueTest::SimpleInt32TypeMissmatch [GOOD] >> ConvertYdbValueToMiniKQLValueTest::SimpleUuid [GOOD] >> KqpScripting::StreamExecuteYqlScriptScanScalar [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/services/persqueue_v1/ut/describes_ut/unittest >> TTopicApiDescribes::GetLocalDescribe [GOOD] Test command err: 2025-06-25T14:30:11.852806Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519894221633615349:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:30:11.852865Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:30:11.942725Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519894221930703396:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:30:11.942772Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:30:12.165712Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001d0f/r3tmp/tmpM8q1ZS/pdisk_1.dat 2025-06-25T14:30:12.189417Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-25T14:30:12.422657Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:30:12.435756Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:30:12.435845Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:30:12.437893Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:30:12.437946Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:30:12.442826Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T14:30:12.444118Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:30:12.444354Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 14651, node 1 2025-06-25T14:30:12.553741Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/yft8/001d0f/r3tmp/yandexjHKTPB.tmp 2025-06-25T14:30:12.553780Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/yft8/001d0f/r3tmp/yandexjHKTPB.tmp 2025-06-25T14:30:12.553986Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/yft8/001d0f/r3tmp/yandexjHKTPB.tmp 2025-06-25T14:30:12.554118Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:30:12.605441Z INFO: TTestServer started on Port 1834 GrpcPort 14651 TClient is connected to server localhost:1834 PQClient connected to localhost:14651 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:30:12.857253Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-25T14:30:12.913298Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:30:12.918502Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:30:12.968980Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... waiting... 2025-06-25T14:30:14.967177Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894234518518349:2304], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:14.967330Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:14.967677Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894234518518368:2307], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:14.972352Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:30:14.995706Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519894234518518370:2308], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710662 completed, doublechecking } 2025-06-25T14:30:15.074664Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519894238813485751:2802] txid# 281474976710663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:30:15.313107Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:15.315610Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7519894239110572874:2275], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:30:15.316259Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=2&id=ZWU4NjZjY2YtZDhlODM1ZTQtYmI1ZjA5MTktMWI2OTk4NWQ=, ActorId: [2:7519894239110572849:2269], ActorState: ExecuteState, TraceId: 01jykqy7969zcxx0s9xbpzn5zr, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:30:15.317325Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519894238813485768:2314], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:30:15.317519Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=ZmRlYmE3MjItMzgzOTM3ZDYtYTQwOWZiNDEtNTdlN2RkMjI=, ActorId: [1:7519894234518518338:2302], ActorState: ExecuteState, TraceId: 01jykqy75dcz2md4h1m3rhybdz, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:30:15.334722Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-25T14:30:15.334724Z node 2 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-25T14:30:15.397105Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:15.515783Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` ... 72075186224037894, Partition: 12, State: StateInit] bootstrapping 12 [1:7519894268878258088:2457] 2025-06-25T14:30:22.569310Z node 2 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72075186224037899, Partition: 4, State: StateInit] bootstrapping 4 [2:7519894269175344762:2390] 2025-06-25T14:30:22.579531Z node 2 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72075186224037896, Partition: 10, State: StateInit] bootstrapping 10 [2:7519894269175344761:2389] 2025-06-25T14:30:22.580926Z node 2 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72075186224037899, Partition: 4, State: StateInit] init complete for topic 'rt3.dc1--topic-x' partition 4 generation 1 [2:7519894269175344762:2390] 2025-06-25T14:30:22.581361Z node 2 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72075186224037896, Partition: 10, State: StateInit] init complete for topic 'rt3.dc1--topic-x' partition 10 generation 1 [2:7519894269175344761:2389] 2025-06-25T14:30:22.587679Z node 2 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72075186224037896, Partition: 5, State: StateInit] bootstrapping 5 [2:7519894269175344760:2388] 2025-06-25T14:30:22.574668Z node 1 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72075186224037894, Partition: 12, State: StateInit] init complete for topic 'rt3.dc1--topic-x' partition 12 generation 1 [1:7519894268878258088:2457] 2025-06-25T14:30:22.576733Z node 1 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72075186224037895, Partition: 2, State: StateInit] bootstrapping 2 [1:7519894268878258091:2460] 2025-06-25T14:30:22.578673Z node 1 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72075186224037895, Partition: 2, State: StateInit] init complete for topic 'rt3.dc1--topic-x' partition 2 generation 1 [1:7519894268878258091:2460] 2025-06-25T14:30:22.586113Z node 1 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72075186224037897, Partition: 7, State: StateInit] bootstrapping 7 [1:7519894268878258121:2463] 2025-06-25T14:30:22.588109Z node 1 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72075186224037897, Partition: 7, State: StateInit] init complete for topic 'rt3.dc1--topic-x' partition 7 generation 1 [1:7519894268878258121:2463] 2025-06-25T14:30:22.590352Z node 2 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72075186224037898, Partition: 14, State: StateInit] bootstrapping 14 [2:7519894269175344765:2393] 2025-06-25T14:30:22.593890Z node 2 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72075186224037896, Partition: 5, State: StateInit] init complete for topic 'rt3.dc1--topic-x' partition 5 generation 1 [2:7519894269175344760:2388] 2025-06-25T14:30:22.597228Z node 2 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72075186224037898, Partition: 14, State: StateInit] init complete for topic 'rt3.dc1--topic-x' partition 14 generation 1 [2:7519894269175344765:2393] 2025-06-25T14:30:22.608614Z node 2 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72075186224037898, Partition: 11, State: StateInit] bootstrapping 11 [2:7519894269175344782:2396] 2025-06-25T14:30:22.610418Z node 1 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72075186224037897, Partition: 13, State: StateInit] bootstrapping 13 [1:7519894268878258123:2465] 2025-06-25T14:30:22.612224Z node 1 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72075186224037897, Partition: 13, State: StateInit] init complete for topic 'rt3.dc1--topic-x' partition 13 generation 1 [1:7519894268878258123:2465] 2025-06-25T14:30:22.612954Z node 2 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72075186224037898, Partition: 11, State: StateInit] init complete for topic 'rt3.dc1--topic-x' partition 11 generation 1 [2:7519894269175344782:2396] 2025-06-25T14:30:22.669480Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72075186224037898] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:30:22.678279Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72075186224037895] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:30:22.678324Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72075186224037892] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:30:22.679040Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72075186224037897] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:30:22.679041Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72075186224037894] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:30:22.682751Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72075186224037899] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:30:22.683188Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72075186224037893] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:30:22.683588Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72075186224037896] disable metering: reason# billing is not enabled in BillingMeteringConfig Create topic result: 1 2025-06-25T14:30:22.747182Z node 1 :PQ_READ_PROXY DEBUG: schema_actors.cpp:657: DescribeTopicImpl [1:7519894268878258342:3831]: Request location 2025-06-25T14:30:22.747933Z node 1 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72075186224037900][rt3.dc1--topic-x] pipe [1:7519894268878258357:3840] connected; active server actors: 1 2025-06-25T14:30:22.748021Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037892, partitionId 0, NodeId 1, Generation 1 2025-06-25T14:30:22.748042Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037893, partitionId 1, NodeId 2, Generation 1 2025-06-25T14:30:22.748057Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037895, partitionId 2, NodeId 1, Generation 1 2025-06-25T14:30:22.748081Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037892, partitionId 3, NodeId 1, Generation 1 2025-06-25T14:30:22.748103Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037899, partitionId 4, NodeId 2, Generation 1 2025-06-25T14:30:22.748118Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037896, partitionId 5, NodeId 2, Generation 1 2025-06-25T14:30:22.748132Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037893, partitionId 6, NodeId 2, Generation 1 2025-06-25T14:30:22.748156Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037897, partitionId 7, NodeId 1, Generation 1 2025-06-25T14:30:22.748172Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037894, partitionId 8, NodeId 1, Generation 1 2025-06-25T14:30:22.748184Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037895, partitionId 9, NodeId 1, Generation 1 2025-06-25T14:30:22.748218Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037896, partitionId 10, NodeId 2, Generation 1 2025-06-25T14:30:22.748234Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037898, partitionId 11, NodeId 2, Generation 1 2025-06-25T14:30:22.748243Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037894, partitionId 12, NodeId 1, Generation 1 2025-06-25T14:30:22.748254Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037897, partitionId 13, NodeId 1, Generation 1 2025-06-25T14:30:22.748264Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037898, partitionId 14, NodeId 2, Generation 1 2025-06-25T14:30:22.748373Z node 1 :PQ_READ_PROXY DEBUG: schema_actors.cpp:750: DescribeTopicImpl [1:7519894268878258342:3831]: Got location 2025-06-25T14:30:22.748872Z node 1 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037900][rt3.dc1--topic-x] pipe [1:7519894268878258357:3840] disconnected; active server actors: 1 2025-06-25T14:30:22.748897Z node 1 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1688: [72075186224037900][rt3.dc1--topic-x] pipe [1:7519894268878258357:3840] disconnected no session 2025-06-25T14:30:22.749336Z node 1 :PQ_READ_PROXY DEBUG: schema_actors.cpp:657: DescribeTopicImpl [1:7519894268878258361:3844]: Request location 2025-06-25T14:30:22.749610Z node 1 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72075186224037900][rt3.dc1--topic-x] pipe [1:7519894268878258363:3846] connected; active server actors: 1 2025-06-25T14:30:22.749650Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037893, partitionId 1, NodeId 2, Generation 1 2025-06-25T14:30:22.749666Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037892, partitionId 3, NodeId 1, Generation 1 2025-06-25T14:30:22.749677Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037896, partitionId 5, NodeId 2, Generation 1 2025-06-25T14:30:22.749731Z node 1 :PQ_READ_PROXY DEBUG: schema_actors.cpp:750: DescribeTopicImpl [1:7519894268878258361:3844]: Got location 2025-06-25T14:30:22.750050Z node 1 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037900][rt3.dc1--topic-x] pipe [1:7519894268878258363:3846] disconnected; active server actors: 1 2025-06-25T14:30:22.750066Z node 1 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1688: [72075186224037900][rt3.dc1--topic-x] pipe [1:7519894268878258363:3846] disconnected no session 2025-06-25T14:30:22.750502Z node 1 :PQ_READ_PROXY DEBUG: schema_actors.cpp:657: DescribeTopicImpl [1:7519894268878258365:3848]: Request location 2025-06-25T14:30:22.750748Z node 1 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72075186224037900][rt3.dc1--topic-x] pipe [1:7519894268878258367:3850] connected; active server actors: 1 2025-06-25T14:30:23.637804Z node 1 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1985: ActorId: [1:7519894273173225780:2498] TxId: 281474976710686. Ctx: { TraceId: 01jykqyf4t07crqk0fb8ep7h14, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NzQ2MTVjNTItZDAyN2U2YzgtOWRhMjk2ZGQtYzE2NTVkZmI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. UNAVAILABLE: Failed to send EvStartKqpTasksRequest because node is unavailable: 2 2025-06-25T14:30:23.638386Z node 1 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [1:7519894273173225784:2498], TxId: 281474976710686, task: 3. Ctx: { TraceId : 01jykqyf4t07crqk0fb8ep7h14. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=1&id=NzQ2MTVjNTItZDAyN2U2YzgtOWRhMjk2ZGQtYzE2NTVkZmI=. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Handle abort execution event from: [1:7519894273173225780:2498], status: UNAVAILABLE, reason: {
: Error: Terminate execution } >> TBlobStorageWardenTest::TestCreatePDiskAndGroup >> Cdc::RenameTable [GOOD] >> Cdc::ResolvedTimestamps >> TControlPlaneProxyTest::ShouldSendListBindings [GOOD] >> TControlPlaneProxyTest::ShouldSendDescribeBinding ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_backup/unittest >> TBackupTests::BackupUuidColumn[Zstd] [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:30:23.870129Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:30:23.870228Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:30:23.870270Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:30:23.870313Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:30:23.870370Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:30:23.870399Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:30:23.870445Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:30:23.870551Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:30:23.871249Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:30:23.871562Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:30:23.944030Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:30:23.944101Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:30:23.972795Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:30:23.973261Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:30:23.973422Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:30:23.987697Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:30:23.988146Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:30:23.988982Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:30:23.989293Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:30:23.993800Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:30:23.993982Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:30:23.995236Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:30:23.995298Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:30:23.995449Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:30:23.995515Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:30:23.995562Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:30:23.995660Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:30:24.003433Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:30:24.154870Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:30:24.155090Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:24.155300Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:30:24.155379Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:30:24.155672Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:30:24.155745Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:30:24.161407Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:30:24.161658Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:30:24.161864Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:24.161921Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:30:24.161981Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:30:24.162017Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:30:24.169210Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:24.169287Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:30:24.169451Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:30:24.173193Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:24.173276Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:24.173327Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:30:24.173439Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:30:24.177734Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:30:24.180382Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:30:24.180579Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:30:24.181739Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:30:24.181898Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:30:24.181958Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:30:24.182300Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:30:24.182371Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:30:24.182533Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:30:24.182608Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:30:24.185312Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:30:24.185363Z node 1 :FLAT_TX_SCHEMESHARD ... meshard: 72057594046678944 2025-06-25T14:30:24.584537Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:412: TBackup TPropose, opId: 102:0 HandleReply TEvOperationPlan, stepId: 5000003, at schemeshard: 72057594046678944 2025-06-25T14:30:24.584709Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 102:0 128 -> 129 2025-06-25T14:30:24.584845Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-25T14:30:24.594864Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:797: [Export] [s3] Bootstrap: self# [1:417:2386], attempt# 0 2025-06-25T14:30:24.609868Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:441: [Export] [s3] Handle TEvExportScan::TEvReady: self# [1:417:2386], sender# [1:416:2385] FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000003 2025-06-25T14:30:24.613999Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:30:24.614120Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-25T14:30:24.614434Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:30:24.614491Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 102, path id: 2 2025-06-25T14:30:24.614587Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-25T14:30:24.614651Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:258: TBackup TProposedWaitParts, opId: 102:0 ProgressState, at schemeshard: 72057594046678944 REQUEST: PUT /metadata.json HTTP/1.1 HEADERS: Host: localhost:2173 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 7F2DF98F-3B0F-4095-BF66-0B53237B5C45 amz-sdk-request: attempt=1 content-length: 94 content-md5: ZpDejBbuBPHjGq8ZC8z8QA== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /metadata.json / / 94 FAKE_COORDINATOR: Erasing txId 102 2025-06-25T14:30:24.615633Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T14:30:24.615777Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T14:30:24.615829Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 102 2025-06-25T14:30:24.615870Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 3 2025-06-25T14:30:24.615908Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-25T14:30:24.616013Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 102, ready parts: 0/1, is published: true 2025-06-25T14:30:24.616282Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:401: [Export] [s3] HandleMetadata TEvExternalStorage::TEvPutObjectResponse: self# [1:417:2386], result# PutObjectResult { ETag: 6690de8c16ee04f1e31aaf190bccfc40 } 2025-06-25T14:30:24.621932Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 REQUEST: PUT /scheme.pb HTTP/1.1 HEADERS: Host: localhost:2173 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: E28B4FAC-9DE2-4B92-AC26-EC8B6369CADB amz-sdk-request: attempt=1 content-length: 357 content-md5: IxJB3qM/y2xlsv8qcwTF7g== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /scheme.pb / / 357 2025-06-25T14:30:24.625241Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:306: [Export] [s3] HandleScheme TEvExternalStorage::TEvPutObjectResponse: self# [1:417:2386], result# PutObjectResult { ETag: 231241dea33fcb6c65b2ff2a7304c5ee } 2025-06-25T14:30:24.625370Z node 1 :DATASHARD_BACKUP DEBUG: export_scan.cpp:130: [Export] [scanner] Handle TEvExportScan::TEvFeed: self# [1:416:2385] 2025-06-25T14:30:24.625628Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:459: [Export] [s3] Handle TEvExportScan::TEvBuffer: self# [1:417:2386], sender# [1:416:2385], msg# NKikimr::NDataShard::TEvExportScan::TEvBuffer { Last: 1 Checksum: } REQUEST: PUT /data_00.csv.zst HTTP/1.1 HEADERS: Host: localhost:2173 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: DF22CCE2-13B9-4C1C-BFB5-239FA866A6F9 amz-sdk-request: attempt=1 content-length: 40 content-md5: LXbLDYru8NmFsYXNSXjnpQ== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /data_00.csv.zst / / 40 2025-06-25T14:30:24.630741Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:501: [Export] [s3] HandleData TEvExternalStorage::TEvPutObjectResponse: self# [1:417:2386], result# PutObjectResult { ETag: 2d76cb0d8aeef0d985b185cd4978e7a5 } 2025-06-25T14:30:24.630828Z node 1 :DATASHARD_BACKUP INFO: export_s3_uploader.cpp:716: [Export] [s3] Finish: self# [1:417:2386], success# 1, error# , multipart# 0, uploadId# (empty maybe) 2025-06-25T14:30:24.631007Z node 1 :DATASHARD_BACKUP DEBUG: export_scan.cpp:144: [Export] [scanner] Handle TEvExportScan::TEvFinish: self# [1:416:2385], msg# NKikimr::NDataShard::TEvExportScan::TEvFinish { Success: 1 Error: } 2025-06-25T14:30:24.660341Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5596: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 313 RawX2: 4294969594 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 20 RowsProcessed: 1 } 2025-06-25T14:30:24.660432Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1791: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409546, partId: 0 2025-06-25T14:30:24.660628Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:632: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: Source { RawX1: 313 RawX2: 4294969594 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 20 RowsProcessed: 1 } 2025-06-25T14:30:24.660743Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:233: TBackup TProposedWaitParts, opId: 102:0 HandleReply TEvSchemaChanged at tablet# 72057594046678944 message# Source { RawX1: 313 RawX2: 4294969594 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 20 RowsProcessed: 1 } 2025-06-25T14:30:24.660816Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:670: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 102:0, shardIdx: 72057594046678944:1, shard: 72075186233409546, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-25T14:30:24.660860Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:707: all shard schema changes has been received, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-25T14:30:24.660922Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:719: send schema changes ack message, operation: 102:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-06-25T14:30:24.660979Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 102:0 129 -> 240 2025-06-25T14:30:24.661169Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:116: Unable to make a bill: kind# TBackup, opId# 102:0, reason# domain is not a serverless db, domain# /MyRoot, domainPathId# [OwnerId: 72057594046678944, LocalPathId: 1], IsDomainSchemeShard: 1, ParentDomainId: [OwnerId: 72057594046678944, LocalPathId: 1], ResourcesDomainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:30:24.666747Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-25T14:30:24.667013Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-25T14:30:24.667078Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 102:0 ProgressState 2025-06-25T14:30:24.667198Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:0 progress is 1/1 2025-06-25T14:30:24.667235Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-25T14:30:24.667283Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:0 progress is 1/1 2025-06-25T14:30:24.667329Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-25T14:30:24.667369Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: true 2025-06-25T14:30:24.667462Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1656: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:340:2317] message: TxId: 102 2025-06-25T14:30:24.667519Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-25T14:30:24.667556Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 102:0 2025-06-25T14:30:24.667590Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 102:0 2025-06-25T14:30:24.667753Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-25T14:30:24.670371Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-25T14:30:24.670440Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:402:2372] TestWaitNotification: OK eventTxId 102 >> TCmsTest::TestKeepAvailableModeDisconnects >> Cdc::ShouldBreakLocksOnConcurrentMoveIndex [GOOD] >> Cdc::ShouldBreakLocksOnConcurrentDropIndex >> TCmsTest::StateRequest |77.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/ydb_convert/ut/unittest >> ConvertYdbValueToMiniKQLValueTest::SimpleUuid [GOOD] |77.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/services/ydb/backup_ut/ydb-services-ydb-backup_ut |77.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/services/ydb/backup_ut/ydb-services-ydb-backup_ut |77.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_stats/ydb-core-tx-schemeshard-ut_stats |77.1%| [LD] {RESULT} $(B)/ydb/services/ydb/backup_ut/ydb-services-ydb-backup_ut |77.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_stats/ydb-core-tx-schemeshard-ut_stats ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_backup/unittest >> TBackupTests::BackupUuidColumn[Raw] [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:30:23.990966Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:30:23.991056Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:30:23.991094Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:30:23.991131Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:30:23.991189Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:30:23.991224Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:30:23.991272Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:30:23.991389Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:30:23.992011Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:30:23.992277Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:30:24.056957Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:30:24.057049Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:30:24.073142Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:30:24.073504Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:30:24.073651Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:30:24.078600Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:30:24.078927Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:30:24.079574Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:30:24.079827Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:30:24.082945Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:30:24.083101Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:30:24.084180Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:30:24.084233Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:30:24.084372Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:30:24.084422Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:30:24.084462Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:30:24.084549Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:30:24.097362Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:30:24.242113Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:30:24.242305Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:24.242468Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:30:24.242512Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:30:24.242683Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:30:24.242747Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:30:24.245209Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:30:24.245405Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:30:24.245601Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:24.245637Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:30:24.245675Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:30:24.245699Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:30:24.247750Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:24.247793Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:30:24.247843Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:30:24.249442Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:24.249509Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:24.249554Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:30:24.249657Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:30:24.257767Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:30:24.259807Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:30:24.259981Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:30:24.261014Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:30:24.261167Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:30:24.261226Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:30:24.261507Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:30:24.261561Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:30:24.261716Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:30:24.261776Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:30:24.263712Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:30:24.263756Z node 1 :FLAT_TX_SCHEMESHARD ... schemeshard: 72057594046678944 2025-06-25T14:30:24.597651Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:412: TBackup TPropose, opId: 102:0 HandleReply TEvOperationPlan, stepId: 5000003, at schemeshard: 72057594046678944 2025-06-25T14:30:24.597753Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 102:0 128 -> 129 2025-06-25T14:30:24.597870Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-25T14:30:24.660735Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:797: [Export] [s3] Bootstrap: self# [1:417:2386], attempt# 0 2025-06-25T14:30:24.709493Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:441: [Export] [s3] Handle TEvExportScan::TEvReady: self# [1:417:2386], sender# [1:416:2385] FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000003 2025-06-25T14:30:24.721874Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:30:24.721964Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-25T14:30:24.722284Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:30:24.722323Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 102, path id: 2 2025-06-25T14:30:24.722436Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-25T14:30:24.722483Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:258: TBackup TProposedWaitParts, opId: 102:0 ProgressState, at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 102 2025-06-25T14:30:24.723913Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T14:30:24.724030Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T14:30:24.724088Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 102 2025-06-25T14:30:24.724159Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 3 2025-06-25T14:30:24.724208Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-25T14:30:24.724385Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 102, ready parts: 0/1, is published: true REQUEST: PUT /metadata.json HTTP/1.1 HEADERS: Host: localhost:64456 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 435913FC-1F39-4DE0-87D6-DE4DA1DAC147 amz-sdk-request: attempt=1 content-length: 94 content-md5: ZpDejBbuBPHjGq8ZC8z8QA== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /metadata.json / / 94 2025-06-25T14:30:24.727666Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-25T14:30:24.729618Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:401: [Export] [s3] HandleMetadata TEvExternalStorage::TEvPutObjectResponse: self# [1:417:2386], result# PutObjectResult { ETag: 6690de8c16ee04f1e31aaf190bccfc40 } REQUEST: PUT /scheme.pb HTTP/1.1 HEADERS: Host: localhost:64456 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 90503570-953E-4E91-B243-4F152626D033 amz-sdk-request: attempt=1 content-length: 357 content-md5: IxJB3qM/y2xlsv8qcwTF7g== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /scheme.pb / / 357 2025-06-25T14:30:24.744844Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:306: [Export] [s3] HandleScheme TEvExternalStorage::TEvPutObjectResponse: self# [1:417:2386], result# PutObjectResult { ETag: 231241dea33fcb6c65b2ff2a7304c5ee } 2025-06-25T14:30:24.744954Z node 1 :DATASHARD_BACKUP DEBUG: export_scan.cpp:130: [Export] [scanner] Handle TEvExportScan::TEvFeed: self# [1:416:2385] 2025-06-25T14:30:24.745084Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:459: [Export] [s3] Handle TEvExportScan::TEvBuffer: self# [1:417:2386], sender# [1:416:2385], msg# NKikimr::NDataShard::TEvExportScan::TEvBuffer { Last: 1 Checksum: } REQUEST: PUT /data_00.csv HTTP/1.1 HEADERS: Host: localhost:64456 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: C9EF98E9-7446-404A-BE95-594F1A509F71 amz-sdk-request: attempt=1 content-length: 39 content-md5: GLX1nc5/cKhlAfxBHlykQA== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /data_00.csv / / 39 2025-06-25T14:30:24.748776Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:501: [Export] [s3] HandleData TEvExternalStorage::TEvPutObjectResponse: self# [1:417:2386], result# PutObjectResult { ETag: 18b5f59dce7f70a86501fc411e5ca440 } 2025-06-25T14:30:24.748856Z node 1 :DATASHARD_BACKUP INFO: export_s3_uploader.cpp:716: [Export] [s3] Finish: self# [1:417:2386], success# 1, error# , multipart# 0, uploadId# (empty maybe) 2025-06-25T14:30:24.748993Z node 1 :DATASHARD_BACKUP DEBUG: export_scan.cpp:144: [Export] [scanner] Handle TEvExportScan::TEvFinish: self# [1:416:2385], msg# NKikimr::NDataShard::TEvExportScan::TEvFinish { Success: 1 Error: } 2025-06-25T14:30:24.769143Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5596: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 313 RawX2: 4294969594 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 20 RowsProcessed: 1 } 2025-06-25T14:30:24.769218Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1791: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409546, partId: 0 2025-06-25T14:30:24.769573Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:632: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: Source { RawX1: 313 RawX2: 4294969594 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 20 RowsProcessed: 1 } 2025-06-25T14:30:24.769712Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:233: TBackup TProposedWaitParts, opId: 102:0 HandleReply TEvSchemaChanged at tablet# 72057594046678944 message# Source { RawX1: 313 RawX2: 4294969594 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 20 RowsProcessed: 1 } 2025-06-25T14:30:24.769787Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:670: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 102:0, shardIdx: 72057594046678944:1, shard: 72075186233409546, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-25T14:30:24.769828Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:707: all shard schema changes has been received, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-25T14:30:24.769895Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:719: send schema changes ack message, operation: 102:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-06-25T14:30:24.769950Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 102:0 129 -> 240 2025-06-25T14:30:24.770131Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:116: Unable to make a bill: kind# TBackup, opId# 102:0, reason# domain is not a serverless db, domain# /MyRoot, domainPathId# [OwnerId: 72057594046678944, LocalPathId: 1], IsDomainSchemeShard: 1, ParentDomainId: [OwnerId: 72057594046678944, LocalPathId: 1], ResourcesDomainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:30:24.775116Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-25T14:30:24.775432Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-25T14:30:24.775488Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 102:0 ProgressState 2025-06-25T14:30:24.775637Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:0 progress is 1/1 2025-06-25T14:30:24.775676Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-25T14:30:24.775716Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:0 progress is 1/1 2025-06-25T14:30:24.775769Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-25T14:30:24.775813Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: true 2025-06-25T14:30:24.775908Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1656: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:340:2317] message: TxId: 102 2025-06-25T14:30:24.775969Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-25T14:30:24.776013Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 102:0 2025-06-25T14:30:24.776048Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 102:0 2025-06-25T14:30:24.776349Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-25T14:30:24.785155Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-25T14:30:24.785222Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:402:2372] TestWaitNotification: OK eventTxId 102 |77.2%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_stats/ydb-core-tx-schemeshard-ut_stats >> TCmsTest::TestTwoOrMoreDisksFromGroupAtTheSameRequestBlock42 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_backup/unittest >> TBackupTests::ShouldSucceedOnMultiShardTable[Raw] [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:30:24.200510Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:30:24.200597Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:30:24.200635Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:30:24.200662Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:30:24.200694Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:30:24.200710Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:30:24.200747Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:30:24.200831Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:30:24.201531Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:30:24.201826Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:30:24.261295Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:30:24.261346Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:30:24.272182Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:30:24.272466Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:30:24.272585Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:30:24.277072Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:30:24.277295Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:30:24.277771Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:30:24.277961Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:30:24.280593Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:30:24.280777Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:30:24.281787Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:30:24.281830Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:30:24.281945Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:30:24.281977Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:30:24.282011Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:30:24.282073Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:30:24.287595Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:30:24.405701Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:30:24.405919Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:24.406139Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:30:24.406201Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:30:24.406556Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:30:24.406625Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:30:24.408801Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:30:24.408991Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:30:24.409163Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:24.409234Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:30:24.409290Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:30:24.409331Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:30:24.410807Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:24.410855Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:30:24.410903Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:30:24.412180Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:24.412225Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:24.412263Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:30:24.412303Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:30:24.415522Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:30:24.417016Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:30:24.417193Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:30:24.417989Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:30:24.418110Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:30:24.418179Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:30:24.418452Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:30:24.418495Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:30:24.418634Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:30:24.418695Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:30:24.420899Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:30:24.420939Z node 1 :FLAT_TX_SCHEMESHARD ... 19-90C9-94FA22FC2906 amz-sdk-request: attempt=1 content-length: 11 content-md5: jsMhyzH+cyrvZpBm0dQVGQ== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /data_01.csv / / 11 REQUEST: PUT /metadata.json HTTP/1.1 HEADERS: Host: localhost:12217 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 42AEA847-B4E0-4806-B6B8-482229948D9F amz-sdk-request: attempt=1 content-length: 94 content-md5: ZpDejBbuBPHjGq8ZC8z8QA== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /metadata.json / / 94 2025-06-25T14:30:25.022059Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-25T14:30:25.025523Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:401: [Export] [s3] HandleMetadata TEvExternalStorage::TEvPutObjectResponse: self# [1:485:2441], result# PutObjectResult { ETag: 6690de8c16ee04f1e31aaf190bccfc40 } 2025-06-25T14:30:25.027623Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:501: [Export] [s3] HandleData TEvExternalStorage::TEvPutObjectResponse: self# [1:487:2442], result# PutObjectResult { ETag: 8ec321cb31fe732aef669066d1d41519 } 2025-06-25T14:30:25.027684Z node 1 :DATASHARD_BACKUP INFO: export_s3_uploader.cpp:716: [Export] [s3] Finish: self# [1:487:2442], success# 1, error# , multipart# 0, uploadId# (empty maybe) 2025-06-25T14:30:25.027812Z node 1 :DATASHARD_BACKUP DEBUG: export_scan.cpp:144: [Export] [scanner] Handle TEvExportScan::TEvFinish: self# [1:486:2440], msg# NKikimr::NDataShard::TEvExportScan::TEvFinish { Success: 1 Error: } REQUEST: PUT /scheme.pb HTTP/1.1 HEADERS: Host: localhost:12217 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 4C87CF66-FFB3-4BDF-BBCA-E765016B9D9C amz-sdk-request: attempt=1 content-length: 638 content-md5: Myp3UygaBNGp6+7AMgyRnQ== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /scheme.pb / / 638 2025-06-25T14:30:25.044029Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:306: [Export] [s3] HandleScheme TEvExternalStorage::TEvPutObjectResponse: self# [1:485:2441], result# PutObjectResult { ETag: 332a7753281a04d1a9ebeec0320c919d } 2025-06-25T14:30:25.044154Z node 1 :DATASHARD_BACKUP DEBUG: export_scan.cpp:130: [Export] [scanner] Handle TEvExportScan::TEvFeed: self# [1:484:2439] 2025-06-25T14:30:25.044232Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:459: [Export] [s3] Handle TEvExportScan::TEvBuffer: self# [1:485:2441], sender# [1:484:2439], msg# NKikimr::NDataShard::TEvExportScan::TEvBuffer { Last: 1 Checksum: } REQUEST: PUT /data_00.csv HTTP/1.1 HEADERS: Host: localhost:12217 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: A30F1AB8-218A-4158-8141-CBB254C2EF9A amz-sdk-request: attempt=1 content-length: 11 content-md5: bj4KQf2rit2DOGLxvSlUww== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /data_00.csv / / 11 2025-06-25T14:30:25.052427Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:501: [Export] [s3] HandleData TEvExternalStorage::TEvPutObjectResponse: self# [1:485:2441], result# PutObjectResult { ETag: 6e3e0a41fdab8add833862f1bd2954c3 } 2025-06-25T14:30:25.052495Z node 1 :DATASHARD_BACKUP INFO: export_s3_uploader.cpp:716: [Export] [s3] Finish: self# [1:485:2441], success# 1, error# , multipart# 0, uploadId# (empty maybe) 2025-06-25T14:30:25.052682Z node 1 :DATASHARD_BACKUP DEBUG: export_scan.cpp:144: [Export] [scanner] Handle TEvExportScan::TEvFinish: self# [1:484:2439], msg# NKikimr::NDataShard::TEvExportScan::TEvFinish { Success: 1 Error: } 2025-06-25T14:30:25.078098Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5596: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 328 RawX2: 4294969604 } Origin: 72075186233409547 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-06-25T14:30:25.078196Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1791: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409547, partId: 0 2025-06-25T14:30:25.078399Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:632: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: Source { RawX1: 328 RawX2: 4294969604 } Origin: 72075186233409547 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-06-25T14:30:25.078535Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:233: TBackup TProposedWaitParts, opId: 102:0 HandleReply TEvSchemaChanged at tablet# 72057594046678944 message# Source { RawX1: 328 RawX2: 4294969604 } Origin: 72075186233409547 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-06-25T14:30:25.078618Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:670: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 102:0, shardIdx: 72057594046678944:2, shard: 72075186233409547, left await: 1, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-25T14:30:25.078849Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:116: Unable to make a bill: kind# TBackup, opId# 102:0, reason# domain is not a serverless db, domain# /MyRoot, domainPathId# [OwnerId: 72057594046678944, LocalPathId: 1], IsDomainSchemeShard: 1, ParentDomainId: [OwnerId: 72057594046678944, LocalPathId: 1], ResourcesDomainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:30:25.079428Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5596: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 327 RawX2: 4294969603 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-06-25T14:30:25.079465Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1791: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409546, partId: 0 2025-06-25T14:30:25.079611Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:632: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: Source { RawX1: 327 RawX2: 4294969603 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-06-25T14:30:25.079671Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:233: TBackup TProposedWaitParts, opId: 102:0 HandleReply TEvSchemaChanged at tablet# 72057594046678944 message# Source { RawX1: 327 RawX2: 4294969603 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-06-25T14:30:25.079707Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:670: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 102:0, shardIdx: 72057594046678944:1, shard: 72075186233409546, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-25T14:30:25.079737Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:707: all shard schema changes has been received, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-25T14:30:25.079780Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:719: send schema changes ack message, operation: 102:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-06-25T14:30:25.079822Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:719: send schema changes ack message, operation: 102:0, datashard: 72075186233409547, at schemeshard: 72057594046678944 2025-06-25T14:30:25.079862Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 102:0 129 -> 240 2025-06-25T14:30:25.079967Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:116: Unable to make a bill: kind# TBackup, opId# 102:0, reason# domain is not a serverless db, domain# /MyRoot, domainPathId# [OwnerId: 72057594046678944, LocalPathId: 1], IsDomainSchemeShard: 1, ParentDomainId: [OwnerId: 72057594046678944, LocalPathId: 1], ResourcesDomainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:30:25.096730Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-25T14:30:25.097177Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-25T14:30:25.097629Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-25T14:30:25.097678Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 102:0 ProgressState 2025-06-25T14:30:25.097787Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:0 progress is 1/1 2025-06-25T14:30:25.097824Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-25T14:30:25.097884Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:0 progress is 1/1 2025-06-25T14:30:25.097916Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-25T14:30:25.097968Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: true 2025-06-25T14:30:25.098053Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1656: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:377:2341] message: TxId: 102 2025-06-25T14:30:25.098107Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-25T14:30:25.098146Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 102:0 2025-06-25T14:30:25.098192Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 102:0 2025-06-25T14:30:25.098350Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-25T14:30:25.105124Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-25T14:30:25.105191Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:461:2418] TestWaitNotification: OK eventTxId 102 |77.2%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_data_erasure/test-results/unittest/{meta.json ... results_accumulator.log} >> TCmsTenatsTest::TestClusterLimit ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_backup/unittest >> TBackupTests::ShouldSucceedOnMultiShardTable[Zstd] [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:30:24.448787Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:30:24.448905Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:30:24.448954Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:30:24.448993Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:30:24.449035Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:30:24.449058Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:30:24.449106Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:30:24.449218Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:30:24.450003Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:30:24.450329Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:30:24.524860Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:30:24.524960Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:30:24.552531Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:30:24.552938Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:30:24.553096Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:30:24.559007Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:30:24.559344Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:30:24.559992Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:30:24.560268Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:30:24.564855Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:30:24.565053Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:30:24.566315Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:30:24.566370Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:30:24.566510Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:30:24.566555Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:30:24.566598Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:30:24.566679Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:30:24.573459Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:30:24.730387Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:30:24.730657Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:24.730907Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:30:24.730956Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:30:24.731216Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:30:24.731303Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:30:24.740358Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:30:24.740618Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:30:24.740881Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:24.740960Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:30:24.741026Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:30:24.741086Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:30:24.744969Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:24.745091Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:30:24.745170Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:30:24.747874Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:24.747954Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:24.748016Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:30:24.748070Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:30:24.752237Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:30:24.754640Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:30:24.754833Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:30:24.755875Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:30:24.756021Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:30:24.756097Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:30:24.756418Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:30:24.756472Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:30:24.756635Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:30:24.756703Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:30:24.759172Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:30:24.759221Z node 1 :FLAT_TX_SCHEMESHARD ... , operationId: 102:0, at schemeshard: 72057594046678944 2025-06-25T14:30:25.308438Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:258: TBackup TProposedWaitParts, opId: 102:0 ProgressState, at schemeshard: 72057594046678944 FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000003 FAKE_COORDINATOR: Erasing txId 102 2025-06-25T14:30:25.310297Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T14:30:25.310413Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T14:30:25.310482Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 102 2025-06-25T14:30:25.310538Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 3 2025-06-25T14:30:25.310621Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-06-25T14:30:25.310714Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 102, ready parts: 0/1, is published: true REQUEST: PUT /scheme.pb HTTP/1.1 HEADERS: Host: localhost:17142 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 9EA034F8-2FE1-4C41-802B-CCC8D7E1302C amz-sdk-request: attempt=1 content-length: 638 content-md5: Myp3UygaBNGp6+7AMgyRnQ== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /scheme.pb / / 638 2025-06-25T14:30:25.315510Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-25T14:30:25.317024Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:306: [Export] [s3] HandleScheme TEvExternalStorage::TEvPutObjectResponse: self# [1:485:2441], result# PutObjectResult { ETag: 332a7753281a04d1a9ebeec0320c919d } 2025-06-25T14:30:25.317153Z node 1 :DATASHARD_BACKUP DEBUG: export_scan.cpp:130: [Export] [scanner] Handle TEvExportScan::TEvFeed: self# [1:484:2439] 2025-06-25T14:30:25.317299Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:459: [Export] [s3] Handle TEvExportScan::TEvBuffer: self# [1:485:2441], sender# [1:484:2439], msg# NKikimr::NDataShard::TEvExportScan::TEvBuffer { Last: 1 Checksum: } REQUEST: PUT /data_00.csv.zst HTTP/1.1 HEADERS: Host: localhost:17142 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 1047CD39-9C1E-428D-B822-87E9EBAD6E90 amz-sdk-request: attempt=1 content-length: 20 content-md5: 2qFn9G0TW8wfvJ9C+A5Jbw== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /data_00.csv.zst / / 20 2025-06-25T14:30:25.325762Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:501: [Export] [s3] HandleData TEvExternalStorage::TEvPutObjectResponse: self# [1:485:2441], result# PutObjectResult { ETag: daa167f46d135bcc1fbc9f42f80e496f } 2025-06-25T14:30:25.325844Z node 1 :DATASHARD_BACKUP INFO: export_s3_uploader.cpp:716: [Export] [s3] Finish: self# [1:485:2441], success# 1, error# , multipart# 0, uploadId# (empty maybe) 2025-06-25T14:30:25.326036Z node 1 :DATASHARD_BACKUP DEBUG: export_scan.cpp:144: [Export] [scanner] Handle TEvExportScan::TEvFinish: self# [1:484:2439], msg# NKikimr::NDataShard::TEvExportScan::TEvFinish { Success: 1 Error: } 2025-06-25T14:30:25.336854Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5596: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 327 RawX2: 4294969603 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-06-25T14:30:25.336932Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1791: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409546, partId: 0 2025-06-25T14:30:25.337102Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:632: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: Source { RawX1: 327 RawX2: 4294969603 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-06-25T14:30:25.337200Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:233: TBackup TProposedWaitParts, opId: 102:0 HandleReply TEvSchemaChanged at tablet# 72057594046678944 message# Source { RawX1: 327 RawX2: 4294969603 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-06-25T14:30:25.337258Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:670: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 102:0, shardIdx: 72057594046678944:1, shard: 72075186233409546, left await: 1, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-25T14:30:25.337392Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:116: Unable to make a bill: kind# TBackup, opId# 102:0, reason# domain is not a serverless db, domain# /MyRoot, domainPathId# [OwnerId: 72057594046678944, LocalPathId: 1], IsDomainSchemeShard: 1, ParentDomainId: [OwnerId: 72057594046678944, LocalPathId: 1], ResourcesDomainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:30:25.338020Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5596: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 328 RawX2: 4294969604 } Origin: 72075186233409547 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-06-25T14:30:25.338055Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1791: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409547, partId: 0 2025-06-25T14:30:25.338224Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:632: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: Source { RawX1: 328 RawX2: 4294969604 } Origin: 72075186233409547 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-06-25T14:30:25.338317Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:233: TBackup TProposedWaitParts, opId: 102:0 HandleReply TEvSchemaChanged at tablet# 72057594046678944 message# Source { RawX1: 328 RawX2: 4294969604 } Origin: 72075186233409547 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-06-25T14:30:25.338364Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:670: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 102:0, shardIdx: 72057594046678944:2, shard: 72075186233409547, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-25T14:30:25.338396Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:707: all shard schema changes has been received, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-25T14:30:25.338464Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:719: send schema changes ack message, operation: 102:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-06-25T14:30:25.338527Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:719: send schema changes ack message, operation: 102:0, datashard: 72075186233409547, at schemeshard: 72057594046678944 2025-06-25T14:30:25.338554Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 102:0 129 -> 240 2025-06-25T14:30:25.338679Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:116: Unable to make a bill: kind# TBackup, opId# 102:0, reason# domain is not a serverless db, domain# /MyRoot, domainPathId# [OwnerId: 72057594046678944, LocalPathId: 1], IsDomainSchemeShard: 1, ParentDomainId: [OwnerId: 72057594046678944, LocalPathId: 1], ResourcesDomainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:30:25.341856Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-25T14:30:25.342848Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-25T14:30:25.343273Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-25T14:30:25.343322Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 102:0 ProgressState 2025-06-25T14:30:25.343420Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:0 progress is 1/1 2025-06-25T14:30:25.343452Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-25T14:30:25.343516Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:0 progress is 1/1 2025-06-25T14:30:25.343550Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-25T14:30:25.343588Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: true 2025-06-25T14:30:25.343668Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1656: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:377:2341] message: TxId: 102 2025-06-25T14:30:25.343738Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-25T14:30:25.343779Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 102:0 2025-06-25T14:30:25.343811Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 102:0 2025-06-25T14:30:25.343954Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-25T14:30:25.348055Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-25T14:30:25.348116Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:461:2418] TestWaitNotification: OK eventTxId 102 >> TCmsTest::TestKeepAvailableMode >> TCmsTest::ScheduledEmergencyDuringRollingRestart >> TCmsTest::ManageRequestsWrong |77.2%| [TA] $(B)/ydb/core/ydb_convert/ut/test-results/unittest/{meta.json ... results_accumulator.log} |77.2%| [TA] {RESULT} $(B)/ydb/core/ydb_convert/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TCmsTest::WalleTasks ------- [TM] {asan, default-linux-x86_64, release} ydb/services/persqueue_v1/ut/describes_ut/unittest >> TIcNodeCache::GetNodesInfoTest [GOOD] Test command err: 2025-06-25T14:30:11.574662Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519894222281265148:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:30:11.574791Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:30:11.610834Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519894221167965031:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:30:11.610891Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001d11/r3tmp/tmpTCcfgl/pdisk_1.dat 2025-06-25T14:30:11.800736Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-25T14:30:11.803610Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-25T14:30:12.065587Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:30:12.066854Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:30:12.066910Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:30:12.068094Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:30:12.068124Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:30:12.074506Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:30:12.075845Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T14:30:12.076868Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:30:12.121289Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 17872, node 1 2025-06-25T14:30:12.190198Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/yft8/001d11/r3tmp/yandexBmIvOP.tmp 2025-06-25T14:30:12.190226Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/yft8/001d11/r3tmp/yandexBmIvOP.tmp 2025-06-25T14:30:12.190373Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/yft8/001d11/r3tmp/yandexBmIvOP.tmp 2025-06-25T14:30:12.190513Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:30:12.238207Z INFO: TTestServer started on Port 23615 GrpcPort 17872 TClient is connected to server localhost:23615 PQClient connected to localhost:17872 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:30:12.520326Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976720657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-25T14:30:12.564023Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:30:12.584411Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:30:12.621468Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... waiting... 2025-06-25T14:30:14.635245Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519894234052867243:2273], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:14.635335Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519894234052867262:2276], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:14.635390Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:14.649229Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715657:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:30:14.661952Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894235166168067:2300], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:14.662097Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:14.662573Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894235166168102:2303], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:14.680069Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519894235166168105:2701] txid# 281474976720662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateCreate)" severity: 1 } 2025-06-25T14:30:14.690028Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519894235166168104:2304], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715657 completed, doublechecking } 2025-06-25T14:30:14.689236Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519894234052867272:2277], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715657 completed, doublechecking } 2025-06-25T14:30:14.759719Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519894234052867301:2176] txid# 281474976715658, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:30:14.796740Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519894235166168196:2762] txid# 281474976720663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:30:14.979843Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:14.978717Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7519894234052867315:2282], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:30:14.978965Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=2&id=YWY4Njk2MDctMjQ0YzQxMzgtNjA5OTRhMGEtZGNlOTdlMWI=, ActorId: [2:7519894234052867241:2272], ActorState: ExecuteState, TraceId: 01jykqy6v87xgdh6kebesbrbtk, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:30:14.981255Z node 2 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-25T14:30:14.980878Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519894235166168206:2310], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:30:14.981055Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=MTdmZTQ3YzAtYmMxYThmZjgtNTY4YWY0YzUtY2U3YmNlMjA=, ActorId: [1:7519894235166168063:2298], ActorState: ExecuteState, TraceId: 01jykqy6w2219c7mmqs6m24w5p, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:30:14.981415Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-25T14:30:15.066247Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:15.210434Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); 2025-06-25T14:30:15.446357Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976720667. Ctx: { TraceId: 01jykqy7gab3y5s17xeray3n10, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OTkwY2Y3OWEtZGM5ZWZhMjktNTczMzg3NmEtNDM3ZWU2NzA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root === CheckClustersList. Subcribe to ClusterTracker from [1:7519894239461135945:3091] 2025-06-25T14:30:16.565082Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519894222281265148:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:30:16.565181Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:30:16.610867Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519894221167965031:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:30:16.610938Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; === CheckClustersList. Ok 2025-06-25T14:30:24.703065Z node 1 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1985: ActorId: [1:7519894278115842278:2477] TxId: 281474976720688. Ctx: { TraceId: 01jykqygnweytnp61e5015bjtt, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MTI2MTc0ZDgtMTcxNmY0YTAtN2E3ZWJjNDktNTJkM2Q1ZjI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. UNAVAILABLE: Failed to send EvStartKqpTasksRequest because node is unavailable: 2 2025-06-25T14:30:24.703679Z node 1 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [1:7519894278115842282:2477], TxId: 281474976720688, task: 2. Ctx: { CustomerSuppliedId : . TraceId : 01jykqygnweytnp61e5015bjtt. SessionId : ydb://session/3?node_id=1&id=MTI2MTc0ZDgtMTcxNmY0YTAtN2E3ZWJjNDktNTJkM2Q1ZjI=. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Handle abort execution event from: [1:7519894278115842278:2477], status: UNAVAILABLE, reason: {
: Error: Terminate execution } >> TControlPlaneProxyTest::ShouldSendDescribeBinding [GOOD] >> TControlPlaneProxyTest::ShouldSendModifyBinding ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/yql/unittest >> KqpScripting::StreamExecuteYqlScriptScanScalar [GOOD] Test command err: Trying to start YDB, gRPC: 18882, MsgBus: 18403 2025-06-25T14:28:58.239033Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519893905102690074:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:28:58.239149Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000cfd/r3tmp/tmpbUMNKU/pdisk_1.dat 2025-06-25T14:28:58.504866Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 18882, node 1 2025-06-25T14:28:58.550086Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:28:58.550116Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:28:58.550122Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:28:58.550247Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:28:58.575091Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:28:58.575204Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:28:58.576795Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:18403 TClient is connected to server localhost:18403 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:28:59.063061Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:28:59.085953Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:28:59.198529Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:28:59.285598Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:28:59.323994Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:28:59.401803Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:00.995798Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519893913692626257:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:00.995921Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:01.344701Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:01.375014Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:01.400505Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:01.428097Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:01.454873Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:01.524601Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:01.552879Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:01.633588Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519893917987594215:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:01.633663Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519893917987594220:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:01.633682Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:01.636980Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:29:01.646502Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519893917987594222:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:29:01.749353Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519893917987594273:3422] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:29:03.113420Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750861743141, txId: 281474976715673] shutting down 2025-06-25T14:29:03.239358Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519893905102690074:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:29:03.239450Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:29:03.431363Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750861743463, txId: 281474976715676] shutting down 2025-06-25T14:29:03.688751Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750861743708, txId: 281474976715679] shutting down 2025-06-25T14:29:03.980390Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotM ... pshotManager: discarding snapshot; our snapshot: [step: 1750861815626, txId: 281474976716264] shutting down 2025-06-25T14:30:15.910857Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750861815941, txId: 281474976716267] shutting down 2025-06-25T14:30:16.340683Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750861816333, txId: 281474976716270] shutting down Trying to start YDB, gRPC: 14953, MsgBus: 4170 2025-06-25T14:30:17.313592Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519894244797528315:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:30:17.313644Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000cfd/r3tmp/tmpPn1fth/pdisk_1.dat 2025-06-25T14:30:17.472093Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:30:17.474101Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:30:17.474195Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:30:17.478241Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 14953, node 2 2025-06-25T14:30:17.568479Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:30:17.568503Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:30:17.568510Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:30:17.568658Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4170 TClient is connected to server localhost:4170 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-25T14:30:18.328414Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:30:18.331711Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:30:18.346000Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:30:18.413284Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:30:18.571182Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:30:18.655262Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:30:21.499392Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519894261977399099:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:21.499457Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:21.558885Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:21.596275Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:21.637460Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:21.695040Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:21.757774Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:21.849437Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:21.890970Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:21.991328Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519894261977399764:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:21.991511Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:21.991903Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519894261977399769:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:21.998367Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:30:22.015353Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519894261977399771:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:30:22.093675Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519894266272367118:3422] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:30:22.315232Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519894244797528315:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:30:22.315791Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:30:25.373373Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750861825335, txId: 281474976715672] shutting down |77.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/ydb-public-sdk-cpp-src-client-persqueue_public-ut |77.2%| [LD] {RESULT} $(B)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/ydb-public-sdk-cpp-src-client-persqueue_public-ut |77.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/ydb-public-sdk-cpp-src-client-persqueue_public-ut >> KqpScanSpilling::SelfJoinQueryService >> TSchemeShardTest::DisablePublicationsOfDropping_IndexedTable [GOOD] >> TSchemeShardTest::DisablePublicationsOfDropping_Pq >> KqpScanSpilling::SpillingInRuntimeNodes-EnabledSpilling |77.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_kqp/ydb-core-tx-datashard-ut_kqp |77.2%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_kqp/ydb-core-tx-datashard-ut_kqp |77.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/load_test/ut_ycsb/ydb-core-load_test-ut_ycsb |77.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_kqp/ydb-core-tx-datashard-ut_kqp |77.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/load_test/ut_ycsb/ydb-core-load_test-ut_ycsb |77.2%| [LD] {RESULT} $(B)/ydb/core/load_test/ut_ycsb/ydb-core-load_test-ut_ycsb >> KqpScanSpilling::HandleErrorsCorrectly >> KqpScanLogs::WideCombine-EnabledLogs >> KqpScanSpilling::SpillingPragmaParseError >> KqpScanLogs::WideCombine+EnabledLogs >> TControlPlaneProxyTest::ShouldSendModifyBinding [GOOD] >> TControlPlaneProxyTest::ShouldSendDeleteBinding >> KqpScanLogs::GraceJoin+EnabledLogs >> DataShardSnapshots::RepeatableReadAfterSplitRace [GOOD] >> KqpScanSpilling::SpillingInRuntimeNodes+EnabledSpilling >> DataShardSnapshots::PostMergeNotCompactedTooEarly >> KqpRe2::IncorrectRegexNoError >> TCmsTest::TestKeepAvailableModeDisconnects [GOOD] >> TCmsTest::TestKeepAvailableModeScheduled >> TCmsTest::StateRequest [GOOD] >> TCmsTest::StateRequestNode >> TCmsTest::WalleRebootDownNode >> TCmsTest::TestTwoOrMoreDisksFromGroupAtTheSameRequestBlock42 [GOOD] >> TCmsTest::TestTwoOrMoreDisksFromGroupAtTheSameRequestMirror3dc |77.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_continuous_backup/ydb-core-tx-schemeshard-ut_continuous_backup |77.2%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_continuous_backup/ydb-core-tx-schemeshard-ut_continuous_backup |77.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_continuous_backup/ydb-core-tx-schemeshard-ut_continuous_backup >> KqpScanSpilling::SelfJoin >> TCmsTest::ManageRequestsWrong [GOOD] >> TCmsTest::ManageRequestsDry >> TSchemeShardTest::DisablePublicationsOfDropping_Pq [GOOD] >> TSchemeShardTest::DisablePublicationsOfDropping_Solomon >> TCmsTest::ScheduledEmergencyDuringRollingRestart [GOOD] >> TCmsTest::ScheduledWalleRequestDuringRollingRestart >> TCmsTest::TestKeepAvailableMode [GOOD] >> TCmsTest::TestForceRestartModeDisconnects >> TControlPlaneProxyTest::ShouldSendDeleteBinding [GOOD] >> TCmsTenatsTest::TestClusterLimit [GOOD] >> TCmsTenatsTest::RequestShutdownHost >> Cdc::InitialScan_TopicAutoPartitioning [GOOD] >> Cdc::InitialScanDebezium >> TSchemeShardTest::DisablePublicationsOfDropping_Solomon [GOOD] >> TCmsTest::TestKeepAvailableModeScheduled [GOOD] >> TCmsTest::TestKeepAvailableModeScheduledDisconnects >> TCmsTest::StateRequestNode [GOOD] >> TCmsTest::StateRequestUnknownNode >> TCmsTest::ManageRequestsDry [GOOD] >> TCmsTest::Notifications >> TCmsTest::WalleRebootDownNode [GOOD] >> TCmsTest::WalleCleanupTest >> TCmsTest::TestTwoOrMoreDisksFromGroupAtTheSameRequestMirror3dc [GOOD] >> TCmsTest::VDisksEviction >> TCmsTest::TestForceRestartModeDisconnects [GOOD] >> TCmsTest::TestForceRestartModeScheduled >> TBlobStorageWardenTest::TestCreatePDiskAndGroup [GOOD] >> TCmsTest::ScheduledWalleRequestDuringRollingRestart [GOOD] >> TCmsTest::SamePriorityRequest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_base/unittest >> TSchemeShardTest::DisablePublicationsOfDropping_Solomon [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:29:32.572591Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:29:32.572702Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:29:32.572747Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:29:32.572786Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:29:32.572832Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:29:32.572881Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:29:32.572968Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:29:32.573078Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:29:32.573848Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:29:32.574283Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:29:32.702481Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:29:32.702547Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:29:32.737220Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:29:32.737714Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:29:32.737888Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:29:32.751008Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:29:32.751367Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:29:32.752018Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:29:32.752302Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:29:32.766163Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:29:32.766394Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:29:32.767618Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:29:32.767679Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:29:32.767813Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:29:32.767874Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:29:32.767913Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:29:32.768002Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:29:32.804656Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:29:33.002218Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:29:33.002451Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:33.002748Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:29:33.002795Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:29:33.003082Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:29:33.003160Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:29:33.011147Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:29:33.011616Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:29:33.011928Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:33.012019Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:29:33.012081Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:29:33.012126Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:29:33.022020Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:33.022120Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:29:33.022168Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:29:33.025309Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:33.025368Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:33.025418Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:29:33.025468Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:29:33.035834Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:29:33.047769Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:29:33.048001Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:29:33.049129Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:29:33.049309Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:29:33.049372Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:29:33.049732Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:29:33.049797Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:29:33.049993Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:29:33.050079Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:29:33.057743Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:29:33.057806Z node 1 :FLAT_TX_SCHEMESHARD ... 25T14:30:31.353498Z node 15 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000005, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:30:31.353703Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 104 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 64424511597 } } Step: 5000005 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:30:31.353829Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_solomon.cpp:47: TDropSolomon TPropose operationId# 104:0 HandleReply TEvOperationPlan, step: 5000005, at schemeshard: 72057594046678944 2025-06-25T14:30:31.353935Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5302: ExamineTreeVFS visit path id [OwnerId: 72057594046678944, LocalPathId: 3] name: Obj type: EPathTypeSolomonVolume state: EPathStateDrop stepDropped: 0 droppedTxId: 104 parent: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:30:31.354000Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5318: ExamineTreeVFS run path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-25T14:30:31.354182Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-25T14:30:31.354309Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 104:0 128 -> 130 2025-06-25T14:30:31.354526Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:30:31.354633Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-25T14:30:31.360570Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2025-06-25T14:30:31.361028Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2025-06-25T14:30:31.363164Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:30:31.363225Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 104, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:30:31.363387Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 104, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-25T14:30:31.363587Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:30:31.363632Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [15:451:2409], at schemeshard: 72057594046678944, txId: 104, path id: 1 2025-06-25T14:30:31.363679Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [15:451:2409], at schemeshard: 72057594046678944, txId: 104, path id: 3 FAKE_COORDINATOR: Erasing txId 104 2025-06-25T14:30:31.364125Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-25T14:30:31.364185Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:418: [72057594046678944] TDeleteParts opId# 104:0 ProgressState 2025-06-25T14:30:31.364322Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#104:0 progress is 1/1 2025-06-25T14:30:31.364391Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-25T14:30:31.364479Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#104:0 progress is 1/1 2025-06-25T14:30:31.364556Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-25T14:30:31.364642Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 104, ready parts: 1/1, is published: false 2025-06-25T14:30:31.364724Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-25T14:30:31.364802Z node 15 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 104:0 2025-06-25T14:30:31.364866Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 104:0 2025-06-25T14:30:31.365073Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-25T14:30:31.365148Z node 15 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 104, publications: 2, subscribers: 0 2025-06-25T14:30:31.365213Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 104, [OwnerId: 72057594046678944, LocalPathId: 1], 11 2025-06-25T14:30:31.365274Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 104, [OwnerId: 72057594046678944, LocalPathId: 3], 18446744073709551615 2025-06-25T14:30:31.368287Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 3 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 104 2025-06-25T14:30:31.368435Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 3 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 104 2025-06-25T14:30:31.368485Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 104 2025-06-25T14:30:31.368571Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 104, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 18446744073709551615 2025-06-25T14:30:31.368669Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-25T14:30:31.369222Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 3 LocalPathId: 1 Version: 11 PathOwnerId: 72057594046678944, cookie: 104 2025-06-25T14:30:31.369310Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 3 LocalPathId: 1 Version: 11 PathOwnerId: 72057594046678944, cookie: 104 2025-06-25T14:30:31.369342Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 104 2025-06-25T14:30:31.369378Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 104, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 11 2025-06-25T14:30:31.369416Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-25T14:30:31.369511Z node 15 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 104, subscribers: 0 2025-06-25T14:30:31.376541Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:2 hive 72057594037968897 at ss 72057594046678944 2025-06-25T14:30:31.380903Z node 15 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 2 TxId_Deprecated: 2 TabletID: 72075186233409547 Forgetting tablet 72075186233409547 2025-06-25T14:30:31.384512Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046678944 ShardLocalIdx: 2, at schemeshard: 72057594046678944 2025-06-25T14:30:31.384929Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-25T14:30:31.385512Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-25T14:30:31.385607Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-06-25T14:30:31.385727Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:30:31.386059Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2025-06-25T14:30:31.386275Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2025-06-25T14:30:31.388638Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-06-25T14:30:31.388754Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:2 tabletId 72075186233409547 2025-06-25T14:30:31.388899Z node 15 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 104, wait until txId: 104 TestWaitNotification wait txId: 104 2025-06-25T14:30:31.389445Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 104: send EvNotifyTxCompletion 2025-06-25T14:30:31.389508Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 104 2025-06-25T14:30:31.390051Z node 15 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 104, at schemeshard: 72057594046678944 2025-06-25T14:30:31.390175Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 104: got EvNotifyTxCompletionResult 2025-06-25T14:30:31.390239Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 104: satisfy waiter [15:575:2514] TestWaitNotification: OK eventTxId 104 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut_fat/unittest >> TBlobStorageWardenTest::TestCreatePDiskAndGroup [GOOD] Test command err: 2025-06-25T14:30:29.192862Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[3e000000:_:0:0:0]: (1040187392) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [3e000000:1:0:1:0] targetVDisk# [3e000000:1:0:0:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-25T14:30:29.193071Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[3e000000:_:0:2:0]: (1040187392) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [3e000000:1:0:1:0] targetVDisk# [3e000000:1:0:2:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-25T14:30:29.224412Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[3e000000:_:0:1:0]: (1040187392) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [3e000000:1:0:2:0] targetVDisk# [3e000000:1:0:1:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-25T14:30:29.225233Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[3e000000:_:0:0:0]: (1040187392) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [3e000000:1:0:2:0] targetVDisk# [3e000000:1:0:0:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-25T14:30:29.317753Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[3e000000:_:0:0:0]: (1040187392) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [3e000000:1:0:3:0] targetVDisk# [3e000000:1:0:0:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-25T14:30:29.318667Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[3e000000:_:0:1:0]: (1040187392) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [3e000000:1:0:3:0] targetVDisk# [3e000000:1:0:1:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-25T14:30:29.318829Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[3e000000:_:0:2:0]: (1040187392) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [3e000000:1:0:3:0] targetVDisk# [3e000000:1:0:2:0] oldSyncState# [0 0] DbBirthLsn# 0 Sending TEvPut Sending TEvGet Sending TEvVGet Sending TEvPut Sending TEvGet >> TCmsTest::StateRequestUnknownNode [GOOD] >> TCmsTest::StateRequestUnknownMultipleNodes >> TCmsTest::TestKeepAvailableModeScheduledDisconnects [GOOD] >> TCmsTest::TestLoadLog >> AnalyzeColumnshard::AnalyzeSameOperationId [GOOD] >> TCmsTenatsTest::RequestShutdownHost [GOOD] >> TCmsTenatsTest::RequestShutdownHostWithTenantPolicy >> Cdc::AddColumn_TopicAutoPartitioning [GOOD] >> Cdc::AddIndex >> TCmsTest::Notifications [GOOD] >> TCmsTest::Mirror3dcPermissions >> KqpScanSpilling::SpillingPragmaParseError [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/control_plane_proxy/ut/unittest >> TControlPlaneProxyTest::ShouldSendDeleteBinding [GOOD] Test command err: 2025-06-25T14:25:42.023482Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:674: CreateQueryRequest, validation failed: test_user@staff **** (00000000) content { name: "my_query_name" } error:
: Error: No permission yq.queries.create@as in a given scope , code: 1000 2025-06-25T14:25:42.328867Z node 2 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:744: ListQueriesRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) error:
: Error: No permission yq.queries.get@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-25T14:25:42.655358Z node 3 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:810: DescribeQueryRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) error:
: Error: No permission yq.queries.get@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-25T14:25:43.031299Z node 4 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:878: GetQueryStatusRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) error:
: Error: No permission yq.queries.getStatus@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-25T14:25:43.472907Z node 5 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:945: ModifyQueryRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) error:
: Error: No permission yq.queries.update@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-25T14:25:43.971507Z node 6 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1021: DeleteQueryRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) error:
: Error: No permission yq.queries.delete@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-25T14:25:48.384267Z node 7 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1087: ControlQueryRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) error:
: Error: No permission yq.queries.control@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-25T14:25:50.995942Z node 8 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1156: GetResultDataRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) error:
: Error: No permission yq.queries.getData@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-25T14:25:51.357190Z node 9 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1222: ListJobsRequest, validation failed: test_user@staff **** (00000000) query_id: "my_query_id" error:
: Error: No permission yq.jobs.get@as in a given scope , code: 1000 2025-06-25T14:25:51.732202Z node 10 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1288: DescribeJobRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) error:
: Error: No permission yq.jobs.get@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-25T14:25:52.083352Z node 11 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1360: CreateConnectionRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) error:
: Error: No permission yq.connections.create@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-25T14:25:52.477940Z node 12 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1360: CreateConnectionRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) content { setting { ydb_database { auth { service_account { id: "my_sa_id" } } } } } error:
: Error: No permission iam.serviceAccounts.use@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-25T14:25:53.035158Z node 13 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1509: ListConnectionsRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) error:
: Error: No permission yq.connections.get@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-25T14:25:53.675508Z node 14 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1575: DescribeConnectionRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) error:
: Error: No permission yq.connections.get@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-25T14:25:54.242741Z node 15 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1646: ModifyConnectionRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) error:
: Error: No permission yq.connections.update@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-25T14:25:54.864884Z node 16 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1646: ModifyConnectionRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) content { setting { ydb_database { auth { service_account { id: "my_sa_id" } } } } } error:
: Error: No permission iam.serviceAccounts.use@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-25T14:25:56.839664Z node 17 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1798: DeleteConnectionRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) error:
: Error: No permission yq.connections.delete@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-25T14:26:01.777030Z node 18 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1920: TestConnectionRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) error:
: Error: No permission yq.connections.create@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-25T14:26:03.217629Z node 19 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1920: TestConnectionRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) setting { ydb_database { auth { service_account { id: "my_sa_id" } } } } error:
: Error: No permission iam.serviceAccounts.use@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-25T14:26:07.472954Z node 20 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1979: CreateBindingRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) error:
: Error: No permission yq.bindings.create@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-25T14:26:15.009948Z node 21 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:2134: ListBindingsRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) error:
: Error: No permission yq.bindings.get@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-25T14:26:19.673312Z node 22 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:2200: DescribeBindingRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) error:
: Error: No permission yq.bindings.get@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-25T14:26:26.931822Z node 23 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:2266: ModifyBindingRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) error:
: Error: No permission yq.bindings.update@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-25T14:26:34.961616Z node 24 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:2413: DeleteBindingRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) error:
: Error: No permission yq.bindings.delete@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-25T14:28:01.400997Z node 72 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:674: CreateQueryRequest, validation failed: test_user@staff **** (00000000) content { name: "my_query_name" } error:
: Error: No permission yq.queries.create@as in a given scope , code: 1000 2025-06-25T14:28:02.516393Z node 73 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:744: ListQueriesRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) error:
: Error: No permission yq.queries.get@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-25T14:28:03.271722Z node 74 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:810: DescribeQueryRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) error:
: Error: No permission yq.queries.get@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-25T14:28:03.990409Z node 75 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:878: GetQueryStatusRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) error:
: Error: No permission yq.queries.getStatus@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-25T14:28:04.839947Z node 76 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:945: ModifyQueryRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) error:
: Error: No permission yq.queries.update@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-25T14:28:05.719875Z node 77 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1021: DeleteQueryRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) error:
: Error: No permission yq.queries.delete@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-25T14:28:06.497410Z node 78 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1087: ControlQueryRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) error:
: Error: No permission yq.queries.control@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-25T14:28:07.197018Z node 79 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1156: GetResultDataRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) error:
: Error: No permission yq.queries.getData@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-25T14:28:07.892932Z node 80 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1222: ListJobsRequest, validation failed: test_user@staff **** (00000000) query_id: "my_query_id" error:
: Error: No permission yq.jobs.get@as in a given scope , code: 1000 2025-06-25T14:28:08.701145Z node 81 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1288: DescribeJobRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) error:
: Error: No permission yq.jobs.get@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-25T14:28:09.751799Z node 82 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1360: CreateConnectionRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) error:
: Error: No permission yq.connections.create@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-25T14:28:10.554659Z node 83 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1360: CreateConnectionRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) content { setting { ydb_database { auth { service_account { id: "my_sa_id" } } } } } error:
: Error: No permission iam.serviceAccounts.use@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-25T14:28:11.290263Z node 84 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1509: ListConnectionsRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) error:
: Error: No permission yq.connections.get@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-25T14:28:12.277955Z node 85 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1575: DescribeConnectionRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) error:
: Error: No permission yq.connections.get@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-25T14:28:13.091528Z node 86 :YQ_CONTROL_PLANE_STORAG ... L_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1920: TestConnectionRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) error:
: Error: No permission yq.connections.create@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-25T14:28:16.376500Z node 90 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1920: TestConnectionRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) setting { ydb_database { auth { service_account { id: "my_sa_id" } } } } error:
: Error: No permission iam.serviceAccounts.use@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-25T14:28:17.169839Z node 91 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1979: CreateBindingRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) error:
: Error: No permission yq.bindings.create@as in a given scope yandexcloud://my_folder, code: 1000
: Error: No permission yq.connections.get@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-25T14:28:17.996364Z node 92 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:2134: ListBindingsRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) error:
: Error: No permission yq.bindings.get@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-25T14:28:18.903410Z node 93 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:2200: DescribeBindingRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) error:
: Error: No permission yq.bindings.get@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-25T14:28:19.935881Z node 94 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:2266: ModifyBindingRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) error:
: Error: No permission yq.bindings.update@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-25T14:28:20.950429Z node 95 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:2413: DeleteBindingRequest, validation failed: yandexcloud://my_folder test_user@staff **** (00000000) error:
: Error: No permission yq.bindings.delete@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-25T14:29:16.591699Z node 163 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:674: CreateQueryRequest, validation failed: test_user_3@staff **** (00000000) content { name: "my_query_name" } error:
: Error: No permission yq.queries.create@as in a given scope , code: 1000 2025-06-25T14:29:19.418231Z node 166 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:878: GetQueryStatusRequest, validation failed: yandexcloud://my_folder test_user_3@staff **** (00000000) error:
: Error: No permission yq.queries.getStatus@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-25T14:29:20.383642Z node 167 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:945: ModifyQueryRequest, validation failed: yandexcloud://my_folder test_user_3@staff **** (00000000) error:
: Error: No permission yq.queries.update@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-25T14:29:21.820123Z node 168 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1021: DeleteQueryRequest, validation failed: yandexcloud://my_folder test_user_3@staff **** (00000000) error:
: Error: No permission yq.queries.delete@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-25T14:29:23.088503Z node 169 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1087: ControlQueryRequest, validation failed: yandexcloud://my_folder test_user_3@staff **** (00000000) error:
: Error: No permission yq.queries.control@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-25T14:29:24.196660Z node 170 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1156: GetResultDataRequest, validation failed: yandexcloud://my_folder test_user_3@staff **** (00000000) error:
: Error: No permission yq.queries.getData@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-25T14:29:27.649457Z node 173 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1360: CreateConnectionRequest, validation failed: yandexcloud://my_folder test_user_3@staff **** (00000000) error:
: Error: No permission yq.connections.create@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-25T14:29:31.651841Z node 176 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1646: ModifyConnectionRequest, validation failed: yandexcloud://my_folder test_user_3@staff **** (00000000) error:
: Error: No permission yq.connections.update@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-25T14:29:33.609327Z node 177 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1798: DeleteConnectionRequest, validation failed: yandexcloud://my_folder test_user_3@staff **** (00000000) error:
: Error: No permission yq.connections.delete@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-25T14:29:35.131969Z node 178 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1920: TestConnectionRequest, validation failed: yandexcloud://my_folder test_user_3@staff **** (00000000) error:
: Error: No permission yq.connections.create@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-25T14:29:36.325978Z node 179 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1979: CreateBindingRequest, validation failed: yandexcloud://my_folder test_user_3@staff **** (00000000) error:
: Error: No permission yq.bindings.create@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-25T14:29:40.408464Z node 182 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:2266: ModifyBindingRequest, validation failed: yandexcloud://my_folder test_user_3@staff **** (00000000) error:
: Error: No permission yq.bindings.update@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-25T14:29:41.555852Z node 183 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:2413: DeleteBindingRequest, validation failed: yandexcloud://my_folder test_user_3@staff **** (00000000) error:
: Error: No permission yq.bindings.delete@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-25T14:29:43.900650Z node 185 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:744: ListQueriesRequest, validation failed: yandexcloud://my_folder test_user_4@staff **** (00000000) error:
: Error: No permission yq.queries.get@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-25T14:29:44.903799Z node 186 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:810: DescribeQueryRequest, validation failed: yandexcloud://my_folder test_user_4@staff **** (00000000) error:
: Error: No permission yq.queries.get@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-25T14:29:47.004099Z node 188 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:945: ModifyQueryRequest, validation failed: yandexcloud://my_folder test_user_4@staff **** (00000000) error:
: Error: No permission yq.queries.update@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-25T14:29:48.103315Z node 189 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1021: DeleteQueryRequest, validation failed: yandexcloud://my_folder test_user_4@staff **** (00000000) error:
: Error: No permission yq.queries.delete@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-25T14:29:49.230617Z node 190 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1087: ControlQueryRequest, validation failed: yandexcloud://my_folder test_user_4@staff **** (00000000) error:
: Error: No permission yq.queries.control@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-25T14:29:51.592494Z node 192 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1222: ListJobsRequest, validation failed: test_user_4@staff **** (00000000) query_id: "my_query_id" error:
: Error: No permission yq.jobs.get@as in a given scope , code: 1000 2025-06-25T14:29:52.627905Z node 193 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1288: DescribeJobRequest, validation failed: yandexcloud://my_folder test_user_4@staff **** (00000000) error:
: Error: No permission yq.jobs.get@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-25T14:29:53.729330Z node 194 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1360: CreateConnectionRequest, validation failed: yandexcloud://my_folder test_user_4@staff **** (00000000) error:
: Error: No permission yq.connections.create@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-25T14:29:54.776843Z node 195 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1509: ListConnectionsRequest, validation failed: yandexcloud://my_folder test_user_4@staff **** (00000000) error:
: Error: No permission yq.connections.get@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-25T14:29:55.812700Z node 196 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1575: DescribeConnectionRequest, validation failed: yandexcloud://my_folder test_user_4@staff **** (00000000) error:
: Error: No permission yq.connections.get@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-25T14:29:56.768169Z node 197 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1646: ModifyConnectionRequest, validation failed: yandexcloud://my_folder test_user_4@staff **** (00000000) error:
: Error: No permission yq.connections.update@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-25T14:29:57.703745Z node 198 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1798: DeleteConnectionRequest, validation failed: yandexcloud://my_folder test_user_4@staff **** (00000000) error:
: Error: No permission yq.connections.delete@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-25T14:29:58.790427Z node 199 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1920: TestConnectionRequest, validation failed: yandexcloud://my_folder test_user_4@staff **** (00000000) error:
: Error: No permission yq.connections.create@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-25T14:29:59.807234Z node 200 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:1979: CreateBindingRequest, validation failed: yandexcloud://my_folder test_user_4@staff **** (00000000) error:
: Error: No permission yq.bindings.create@as in a given scope yandexcloud://my_folder, code: 1000
: Error: No permission yq.connections.get@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-25T14:30:00.883088Z node 201 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:2134: ListBindingsRequest, validation failed: yandexcloud://my_folder test_user_4@staff **** (00000000) error:
: Error: No permission yq.bindings.get@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-25T14:30:01.919015Z node 202 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:2200: DescribeBindingRequest, validation failed: yandexcloud://my_folder test_user_4@staff **** (00000000) error:
: Error: No permission yq.bindings.get@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-25T14:30:02.921058Z node 203 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:2266: ModifyBindingRequest, validation failed: yandexcloud://my_folder test_user_4@staff **** (00000000) error:
: Error: No permission yq.bindings.update@as in a given scope yandexcloud://my_folder, code: 1000 2025-06-25T14:30:03.973123Z node 204 :YQ_CONTROL_PLANE_STORAGE ERROR: control_plane_proxy.cpp:2413: DeleteBindingRequest, validation failed: yandexcloud://my_folder test_user_4@staff **** (00000000) error:
: Error: No permission yq.bindings.delete@as in a given scope yandexcloud://my_folder, code: 1000 >> TCmsTest::WalleCleanupTest [GOOD] >> TCmsTest::WalleRequestDuringRollingRestart >> TCmsTest::TestForceRestartModeScheduled [GOOD] >> TCmsTest::TestForceRestartModeScheduledDisconnects >> TCmsTest::VDisksEviction [GOOD] >> TCmsTest::TestOutdatedState |77.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/persqueue/ut/ut_with_sdk/ydb-core-persqueue-ut-ut_with_sdk |77.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/persqueue/ut/ut_with_sdk/ydb-core-persqueue-ut-ut_with_sdk |77.3%| [TM] {RESULT} ydb/core/fq/libs/control_plane_proxy/ut/unittest >> TCmsTenatsTest::TestClusterRatioLimit |77.3%| [LD] {RESULT} $(B)/ydb/core/persqueue/ut/ut_with_sdk/ydb-core-persqueue-ut-ut_with_sdk >> TCmsTest::TestLoadLog [GOOD] >> TCmsTest::SamePriorityRequest [GOOD] >> TCmsTest::SamePriorityRequest2 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/cms/ut/unittest >> TCmsTest::VDisksEviction [GOOD] Test command err: 2025-06-25T14:30:33.005543Z node 18 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-25T14:30:33.005649Z node 18 :CMS DEBUG: cms_tx_update_downtimes.cpp:26: TTxUpdateDowntimes Complete 2025-06-25T14:30:33.005807Z node 18 :CMS DEBUG: cluster_info.cpp:968: Timestamp: 1970-01-01T00:02:00Z 2025-06-25T14:30:33.007741Z node 18 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvClusterStateRequest { }, response# NKikimr::NCms::TEvCms::TEvClusterStateResponse { Status { Code: OK } State { Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120027000 } Devices { Name: "vdisk-0-1-0-0-0" State: UP Timestamp: 120027000 } Devices { Name: "vdisk-1-1-0-0-0" State: UP Timestamp: 120027000 } Devices { Name: "vdisk-2-1-0-0-0" State: UP Timestamp: 120027000 } Devices { Name: "vdisk-3-1-0-0-0" State: UP Timestamp: 120027000 } Devices { Name: "pdisk-18-18" State: UP Timestamp: 120027000 } Timestamp: 120027000 NodeId: 18 InterconnectPort: 12001 Location { DataCenter: "1" Module: "1" Rack: "1" Unit: "1" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120027000 } Devices { Name: "vdisk-0-1-0-1-0" State: UP Timestamp: 120027000 } Devices { Name: "vdisk-1-1-0-1-0" State: UP Timestamp: 120027000 } Devices { Name: "vdisk-2-1-0-1-0" State: UP Timestamp: 120027000 } Devices { Name: "vdisk-3-1-0-1-0" State: UP Timestamp: 120027000 } Devices { Name: "pdisk-19-19" State: UP Timestamp: 120027000 } Timestamp: 120027000 NodeId: 19 InterconnectPort: 12002 Location { DataCenter: "1" Module: "2" Rack: "2" Unit: "2" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120027000 } Devices { Name: "vdisk-0-1-0-2-0" State: UP Timestamp: 120027000 } Devices { Name: "vdisk-1-1-0-2-0" State: UP Timestamp: 120027000 } Devices { Name: "vdisk-2-1-0-2-0" State: UP Timestamp: 120027000 } Devices { Name: "vdisk-3-1-0-2-0" State: UP Timestamp: 120027000 } Devices { Name: "pdisk-20-20" State: UP Timestamp: 120027000 } Timestamp: 120027000 NodeId: 20 InterconnectPort: 12003 Location { DataCenter: "1" Module: "3" Rack: "3" Unit: "3" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120027000 } Devices { Name: "vdisk-0-1-0-3-0" State: UP Timestamp: 120027000 } Devices { Name: "vdisk-1-1-0-3-0" State: UP Timestamp: 120027000 } Devices { Name: "vdisk-2-1-0-3-0" State: UP Timestamp: 120027000 } Devices { Name: "vdisk-3-1-0-3-0" State: UP Timestamp: 120027000 } Devices { Name: "pdisk-21-21" State: UP Timestamp: 120027000 } Timestamp: 120027000 NodeId: 21 InterconnectPort: 12004 Location { DataCenter: "1" Module: "4" Rack: "4" Unit: "4" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120027000 } Devices { Name: "vdisk-0-1-0-4-0" State: UP Timestamp: 120027000 } Devices { Name: "vdisk-1-1-0-4-0" State: UP Timestamp: 120027000 } Devices { Name: "vdisk-2-1-0-4-0" State: UP Timestamp: 120027000 } Devices { Name: "vdisk-3-1-0-4-0" State: UP Timestamp: 120027000 } Devices { Name: "pdisk-22-22" State: UP Timestamp: 120027000 } Timestamp: 120027000 NodeId: 22 InterconnectPort: 12005 Location { DataCenter: "1" Module: "5" Rack: "5" Unit: "5" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120027000 } Devices { Name: "vdisk-0-1-0-5-0" State: UP Timestamp: 120027000 } Devices { Name: "vdisk-1-1-0-5-0" State: UP Timestamp: 120027000 } Devices { Name: "vdisk-2-1-0-5-0" State: UP Timestamp: 120027000 } Devices { Name: "vdisk-3-1-0-5-0" State: UP Timestamp: 120027000 } Devices { Name: "pdisk-23-23" State: UP Timestamp: 120027000 } Timestamp: 120027000 NodeId: 23 InterconnectPort: 12006 Location { DataCenter: "1" Module: "6" Rack: "6" Unit: "6" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120027000 } Devices { Name: "vdisk-0-1-0-6-0" State: UP Timestamp: 120027000 } Devices { Name: "vdisk-1-1-0-6-0" State: UP Timestamp: 120027000 } Devices { Name: "vdisk-2-1-0-6-0" State: UP Timestamp: 120027000 } Devices { Name: "vdisk-3-1-0-6-0" State: UP Timestamp: 120027000 } Devices { Name: "pdisk-24-24" State: UP Timestamp: 120027000 } Timestamp: 120027000 NodeId: 24 InterconnectPort: 12007 Location { DataCenter: "1" Module: "7" Rack: "7" Unit: "7" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120027000 } Devices { Name: "vdisk-0-1-0-7-0" State: UP Timestamp: 120027000 } Devices { Name: "vdisk-1-1-0-7-0" State: UP Timestamp: 120027000 } Devices { Name: "vdisk-2-1-0-7-0" State: UP Timestamp: 120027000 } Devices { Name: "vdisk-3-1-0-7-0" State: UP Timestamp: 120027000 } Devices { Name: "pdisk-25-25" State: UP Timestamp: 120027000 } Timestamp: 120027000 NodeId: 25 InterconnectPort: 12008 Location { DataCenter: "1" Module: "8" Rack: "8" Unit: "8" } StartTimeSeconds: 0 } Timestamp: 120027000 } } 2025-06-25T14:30:33.008477Z node 18 :CMS DEBUG: sentinel.cpp:486: [Sentinel] [ConfigUpdater] Handle TEvCms::TEvClusterStateResponse: response# Status { Code: OK } State { Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120027000 } Devices { Name: "vdisk-0-1-0-0-0" State: UP Timestamp: 120027000 } Devices { Name: "vdisk-1-1-0-0-0" State: UP Timestamp: 120027000 } Devices { Name: "vdisk-2-1-0-0-0" State: UP Timestamp: 120027000 } Devices { Name: "vdisk-3-1-0-0-0" State: UP Timestamp: 120027000 } Devices { Name: "pdisk-18-18" State: UP Timestamp: 120027000 } Timestamp: 120027000 NodeId: 18 InterconnectPort: 12001 Location { DataCenter: "1" Module: "1" Rack: "1" Unit: "1" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120027000 } Devices { Name: "vdisk-0-1-0-1-0" State: UP Timestamp: 120027000 } Devices { Name: "vdisk-1-1-0-1-0" State: UP Timestamp: 120027000 } Devices { Name: "vdisk-2-1-0-1-0" State: UP Timestamp: 120027000 } Devices { Name: "vdisk-3-1-0-1-0" State: UP Timestamp: 120027000 } Devices { Name: "pdisk-19-19" State: UP Timestamp: 120027000 } Timestamp: 120027000 NodeId: 19 InterconnectPort: 12002 Location { DataCenter: "1" Module: "2" Rack: "2" Unit: "2" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120027000 } Devices { Name: "vdisk-0-1-0-2-0" State: UP Timestamp: 120027000 } Devices { Name: "vdisk-1-1-0-2-0" State: UP Timestamp: 120027000 } Devices { Name: "vdisk-2-1-0-2-0" State: UP Timestamp: 120027000 } Devices { Name: "vdisk-3-1-0-2-0" State: UP Timestamp: 120027000 } Devices { Name: "pdisk-20-20" State: UP Timestamp: 120027000 } Timestamp: 120027000 NodeId: 20 InterconnectPort: 12003 Location { DataCenter: "1" Module: "3" Rack: "3" Unit: "3" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120027000 } Devices { Name: "vdisk-0-1-0-3-0" State: UP Timestamp: 120027000 } Devices { Name: "vdisk-1-1-0-3-0" State: UP Timestamp: 120027000 } Devices { Name: "vdisk-2-1-0-3-0" State: UP Timestamp: 120027000 } Devices { Name: "vdisk-3-1-0-3-0" State: UP Timestamp: 120027000 } Devices { Name: "pdisk-21-21" State: UP Timestamp: 120027000 } Timestamp: 120027000 NodeId: 21 InterconnectPort: 12004 Location { DataCenter: "1" Module: "4" Rack: "4" Unit: "4" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120027000 } Devices { Name: "vdisk-0-1-0-4-0" State: UP Timestamp: 120027000 } Devices { Name: "vdisk-1-1-0-4-0" State: UP Timestamp: 120027000 } Devices { Name: "vdisk-2-1-0-4-0" State: UP Timestamp: 120027000 } Devices { Name: "vdisk-3-1-0-4-0" State: UP Timestamp: 120027000 } Devices { Name: "pdisk-22-22" State: UP Timestamp: 120027000 } Timestamp: 120027000 NodeId: 22 InterconnectPort: 12005 Location { DataCenter: "1" Module: "5" Rack: "5" Unit: "5" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120027000 } Devices { Name: "vdisk-0-1-0-5-0" State: UP Timestamp: 120027000 } Devices { Name: "vdisk-1-1-0-5-0" State: UP Timestamp: 120027000 } Devices { Name: "vdisk-2-1-0-5-0" State: UP Timestamp: 120027000 } Devices { Name: "vdisk-3-1-0-5-0" State: UP Timestamp: 120027000 } Devices { Name: "pdisk-23-23" State: UP Timestamp: 120027000 } Timestamp: 120027000 NodeId: 23 InterconnectPort: 12006 Location { DataCenter: "1" Module: "6" Rack: "6" Unit: "6" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120027000 } Devices { Name: "vdisk-0-1-0-6-0" State: UP Timestamp: 120027000 } Devices { Name: "vdisk-1-1-0-6-0" State: UP Timestamp: 120027000 } Devices { Name: "vdisk-2-1-0-6-0" State: UP Timestamp: 120027000 } Devices { Name: "vdisk-3-1-0-6-0" State: UP Timestamp: 120027000 } Devices { Name: "pdisk-24-24" State: UP Timestamp: 120027000 } Timestamp: 120027000 NodeId: 24 InterconnectPort: 12007 Location { DataCenter: "1" Module: "7" Rack: "7" Unit: "7" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120027000 } Devices { Name: "vdisk-0-1-0-7-0" State: UP Timestamp: 120027000 } Devices { Name: "vdisk-1-1-0-7-0" State: UP Timestamp: 120027000 } Devices { Name: "vdisk-2-1-0-7-0" State: UP Timestamp: 120027000 } Devices { Name: "vdisk-3-1-0-7-0" State: UP Timestamp: 120027000 } Devices { Name: "pdisk-25-25" State: UP Timestamp: 120027000 } Timestamp: 120027000 NodeId: 25 InterconnectPort: 12008 Location { DataCenter: "1" Module: "8" Rack: "8" Unit: "8" } StartTimeSeconds: 0 } Timestamp: 120027000 } 2025-06-25T14:30:33.008719Z node 18 :CMS DEBUG: sentinel.cpp:944: [Sentinel] [Main] Config was updated in 120.003000s 2025-06-25T14:30:33.008775Z node 18 :CMS DEBUG: sentinel.cpp:884: [Sentinel] [Main] Start StateUpdater 2025-06-25T14:30:33.008949Z node 18 :CMS INFO: cms.cpp:347: Check request: User: "user" Actions { Type: RESTART_SERVICES Host: "18" Services: "storage" Duration: 600000000 } PartialPermissionAllowed: false Schedule: false DryRun: false EvictVDisks: true 2025-06-25T14:30:33.009020Z node 18 :CMS DEBUG: cms.cpp:379: Checking action: Type: RESTART_SERVICES Host: "18" Services: "storage" Duration: 600000000 2025-06-25T14:30:33.009088Z node 18 :CMS DEBUG: cms.cpp:398: Result: DISALLOW_TEMP (reason: VDisks eviction from host 18 has not yet been completed) 2025-06-25T14:30:33.009315Z node 18 :CMS DEBUG: cms_tx_store_permissions.cpp:26: TTxStorePermissions Execute 2025-06-25T14:30:33.009556Z node 18 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store request: id# user-r-1, owner# user, order# 1, priority# 0, body# User: "user" Actions { Type: RESTART_SERVICES Host: "18" Services: "storage" Duration: 600000000 Issue { Type: GENERIC Message: "VDisks eviction from host 18 has not yet been completed" } } PartialPermissionAllowed: false Schedule: false Reason: "" TenantPolicy: DEFAULT AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: true 2025-06-25T14:30:33.009619Z node 18 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Add host marker: host# 18, marker# MARKER_DISK_FAULTY 2025-06-25T14:30:33.009868Z node 18 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 18, wbId# [18:8388350642965737326:1634689637] 2025-06-25T14:30:33.009921Z node 18 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 19, wbId# [19:8388350642965737326:1634689637] 2025-06-25T14:30:33.009952Z node 18 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 20, wbId# [20:8388350642965737326:1 ... "storage" State: UP Version: "-1" Timestamp: 120540048 } Devices { Name: "vdisk-0-1-0-0-0" State: UP Timestamp: 120540048 } Devices { Name: "vdisk-1-1-0-0-0" State: UP Timestamp: 120540048 } Devices { Name: "vdisk-2-1-0-0-0" State: UP Timestamp: 120540048 } Devices { Name: "vdisk-3-1-0-0-0" State: UP Timestamp: 120540048 } Devices { Name: "pdisk-18-18" State: UP Timestamp: 120540048 } Timestamp: 120540048 NodeId: 18 InterconnectPort: 12001 Location { DataCenter: "1" Module: "1" Rack: "1" Unit: "1" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120540048 } Devices { Name: "vdisk-0-1-0-1-0" State: UP Timestamp: 120540048 } Devices { Name: "vdisk-1-1-0-1-0" State: UP Timestamp: 120540048 } Devices { Name: "vdisk-2-1-0-1-0" State: UP Timestamp: 120540048 } Devices { Name: "vdisk-3-1-0-1-0" State: UP Timestamp: 120540048 } Devices { Name: "pdisk-19-19" State: UP Timestamp: 120540048 } Timestamp: 120540048 NodeId: 19 InterconnectPort: 12002 Location { DataCenter: "1" Module: "2" Rack: "2" Unit: "2" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120540048 } Devices { Name: "vdisk-0-1-0-2-0" State: UP Timestamp: 120540048 } Devices { Name: "vdisk-1-1-0-2-0" State: UP Timestamp: 120540048 } Devices { Name: "vdisk-2-1-0-2-0" State: UP Timestamp: 120540048 } Devices { Name: "vdisk-3-1-0-2-0" State: UP Timestamp: 120540048 } Devices { Name: "pdisk-20-20" State: UP Timestamp: 120540048 } Timestamp: 120540048 NodeId: 20 InterconnectPort: 12003 Location { DataCenter: "1" Module: "3" Rack: "3" Unit: "3" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120540048 } Devices { Name: "vdisk-0-1-0-3-0" State: UP Timestamp: 120540048 } Devices { Name: "vdisk-1-1-0-3-0" State: UP Timestamp: 120540048 } Devices { Name: "vdisk-2-1-0-3-0" State: UP Timestamp: 120540048 } Devices { Name: "vdisk-3-1-0-3-0" State: UP Timestamp: 120540048 } Devices { Name: "pdisk-21-21" State: UP Timestamp: 120540048 } Timestamp: 120540048 NodeId: 21 InterconnectPort: 12004 Location { DataCenter: "1" Module: "4" Rack: "4" Unit: "4" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120540048 } Devices { Name: "vdisk-0-1-0-4-0" State: UP Timestamp: 120540048 } Devices { Name: "vdisk-1-1-0-4-0" State: UP Timestamp: 120540048 } Devices { Name: "vdisk-2-1-0-4-0" State: UP Timestamp: 120540048 } Devices { Name: "vdisk-3-1-0-4-0" State: UP Timestamp: 120540048 } Devices { Name: "pdisk-22-22" State: UP Timestamp: 120540048 } Timestamp: 120540048 NodeId: 22 InterconnectPort: 12005 Location { DataCenter: "1" Module: "5" Rack: "5" Unit: "5" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120540048 } Devices { Name: "vdisk-0-1-0-5-0" State: UP Timestamp: 120540048 } Devices { Name: "vdisk-1-1-0-5-0" State: UP Timestamp: 120540048 } Devices { Name: "vdisk-2-1-0-5-0" State: UP Timestamp: 120540048 } Devices { Name: "vdisk-3-1-0-5-0" State: UP Timestamp: 120540048 } Devices { Name: "pdisk-23-23" State: UP Timestamp: 120540048 } Timestamp: 120540048 NodeId: 23 InterconnectPort: 12006 Location { DataCenter: "1" Module: "6" Rack: "6" Unit: "6" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120540048 } Devices { Name: "vdisk-0-1-0-6-0" State: UP Timestamp: 120540048 } Devices { Name: "vdisk-1-1-0-6-0" State: UP Timestamp: 120540048 } Devices { Name: "vdisk-2-1-0-6-0" State: UP Timestamp: 120540048 } Devices { Name: "vdisk-3-1-0-6-0" State: UP Timestamp: 120540048 } Devices { Name: "pdisk-24-24" State: UP Timestamp: 120540048 } Timestamp: 120540048 NodeId: 24 InterconnectPort: 12007 Location { DataCenter: "1" Module: "7" Rack: "7" Unit: "7" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120540048 } Devices { Name: "vdisk-0-1-0-7-0" State: UP Timestamp: 120540048 } Devices { Name: "vdisk-1-1-0-7-0" State: UP Timestamp: 120540048 } Devices { Name: "vdisk-2-1-0-7-0" State: UP Timestamp: 120540048 } Devices { Name: "vdisk-3-1-0-7-0" State: UP Timestamp: 120540048 } Devices { Name: "pdisk-25-25" State: UP Timestamp: 120540048 } Timestamp: 120540048 NodeId: 25 InterconnectPort: 12008 Location { DataCenter: "1" Module: "8" Rack: "8" Unit: "8" } StartTimeSeconds: 0 } Timestamp: 120540048 } 2025-06-25T14:30:33.502206Z node 18 :CMS INFO: cms.cpp:347: Check request: User: "user" Actions { Type: RESTART_SERVICES Host: "18" Services: "storage" Duration: 600000000 } PartialPermissionAllowed: false Schedule: false DryRun: false EvictVDisks: true 2025-06-25T14:30:33.502286Z node 18 :CMS DEBUG: cms.cpp:379: Checking action: Type: RESTART_SERVICES Host: "18" Services: "storage" Duration: 600000000 2025-06-25T14:30:33.502343Z node 18 :CMS DEBUG: cms.cpp:398: Result: DISALLOW_TEMP (reason: VDisks eviction from host 18 has not yet been completed) 2025-06-25T14:30:33.502511Z node 18 :CMS DEBUG: cms_tx_store_permissions.cpp:26: TTxStorePermissions Execute 2025-06-25T14:30:33.502721Z node 18 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store request: id# user-r-3, owner# user, order# 3, priority# 0, body# User: "user" Actions { Type: RESTART_SERVICES Host: "18" Services: "storage" Duration: 600000000 Issue { Type: GENERIC Message: "VDisks eviction from host 18 has not yet been completed" } } PartialPermissionAllowed: false Schedule: false Reason: "" TenantPolicy: DEFAULT AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: true 2025-06-25T14:30:33.502771Z node 18 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Add host marker: host# 18, marker# MARKER_DISK_FAULTY 2025-06-25T14:30:33.503055Z node 18 :CMS DEBUG: sentinel.cpp:944: [Sentinel] [Main] Config was updated in 0.100000s 2025-06-25T14:30:33.503114Z node 18 :CMS DEBUG: sentinel.cpp:884: [Sentinel] [Main] Start StateUpdater 2025-06-25T14:30:33.503234Z node 18 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 18, wbId# [18:8388350642965737326:1634689637] 2025-06-25T14:30:33.503299Z node 18 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 19, wbId# [19:8388350642965737326:1634689637] 2025-06-25T14:30:33.503335Z node 18 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 20, wbId# [20:8388350642965737326:1634689637] 2025-06-25T14:30:33.503366Z node 18 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 21, wbId# [21:8388350642965737326:1634689637] 2025-06-25T14:30:33.503396Z node 18 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 22, wbId# [22:8388350642965737326:1634689637] 2025-06-25T14:30:33.503427Z node 18 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 23, wbId# [23:8388350642965737326:1634689637] 2025-06-25T14:30:33.503463Z node 18 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 24, wbId# [24:8388350642965737326:1634689637] 2025-06-25T14:30:33.503495Z node 18 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 25, wbId# [25:8388350642965737326:1634689637] 2025-06-25T14:30:33.503711Z node 18 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 18, response# PDiskStateInfo { PDiskId: 18 CreateTime: 120441560 ChangeTime: 120441560 Path: "/18/pdisk-18.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 120540 2025-06-25T14:30:33.504468Z node 18 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 20, response# PDiskStateInfo { PDiskId: 20 CreateTime: 120441560 ChangeTime: 120441560 Path: "/20/pdisk-20.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 120540 2025-06-25T14:30:33.504581Z node 18 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 21, response# PDiskStateInfo { PDiskId: 21 CreateTime: 120441560 ChangeTime: 120441560 Path: "/21/pdisk-21.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 120540 2025-06-25T14:30:33.504669Z node 18 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 22, response# PDiskStateInfo { PDiskId: 22 CreateTime: 120441560 ChangeTime: 120441560 Path: "/22/pdisk-22.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 120540 2025-06-25T14:30:33.504733Z node 18 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 23, response# PDiskStateInfo { PDiskId: 23 CreateTime: 120441560 ChangeTime: 120441560 Path: "/23/pdisk-23.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 120540 2025-06-25T14:30:33.504800Z node 18 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 24, response# PDiskStateInfo { PDiskId: 24 CreateTime: 120441560 ChangeTime: 120441560 Path: "/24/pdisk-24.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 120540 2025-06-25T14:30:33.504861Z node 18 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 19, response# PDiskStateInfo { PDiskId: 19 CreateTime: 120441560 ChangeTime: 120441560 Path: "/19/pdisk-19.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 120540 2025-06-25T14:30:33.504935Z node 18 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 25, response# PDiskStateInfo { PDiskId: 25 CreateTime: 120441560 ChangeTime: 120441560 Path: "/25/pdisk-25.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 120540 2025-06-25T14:30:33.504987Z node 18 :CMS DEBUG: sentinel.cpp:960: [Sentinel] [Main] State was updated in 0.000000s 2025-06-25T14:30:33.523425Z node 18 :CMS DEBUG: cms_tx_store_permissions.cpp:137: TTxStorePermissions complete 2025-06-25T14:30:33.523723Z node 18 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvPermissionRequest { User: "user" Actions { Type: RESTART_SERVICES Host: "18" Services: "storage" Duration: 600000000 } PartialPermissionAllowed: false Schedule: false DryRun: false EvictVDisks: true }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: DISALLOW_TEMP Reason: "VDisks eviction from host 18 has not yet been completed" } RequestId: "user-r-3" Deadline: 0 } 2025-06-25T14:30:33.524484Z node 18 :CMS INFO: cms.cpp:1410: User user removes request user-r-3 2025-06-25T14:30:33.524547Z node 18 :CMS DEBUG: cms.cpp:1433: Resulting status: OK 2025-06-25T14:30:33.524622Z node 18 :CMS DEBUG: cms_tx_remove_request.cpp:21: TTxRemoveRequest Execute 2025-06-25T14:30:33.524673Z node 18 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reset host markers: host# 18 2025-06-25T14:30:33.524821Z node 18 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Remove request: id# user-r-3, reason# explicit remove 2025-06-25T14:30:33.545211Z node 18 :CMS DEBUG: cms_tx_remove_request.cpp:45: TTxRemoveRequest Complete 2025-06-25T14:30:33.545468Z node 18 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvManageRequestRequest { User: "user" Command: REJECT RequestId: "user-r-3" DryRun: false }, response# NKikimr::NCms::TEvCms::TEvManageRequestResponse { Status { Code: OK } } >> KqpRe2::IncorrectRegexNoError [GOOD] >> KqpRe2::IncorrectRegexWithoutExecutionNoError >> KqpScanSpilling::SelfJoinQueryService [GOOD] >> TCmsTest::StateRequestUnknownMultipleNodes [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/runtime/unittest >> KqpScanSpilling::SpillingPragmaParseError [GOOD] Test command err: cwd: /home/runner/.ya/build/build_root/yft8/0015c8/ydb/core/kqp/ut/runtime/test-results/unittest/testing_out_stuff/chunk9 Trying to start YDB, gRPC: 18379, MsgBus: 62283 2025-06-25T14:30:28.796331Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519894292191988838:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:30:28.796429Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0015c8/r3tmp/tmpbvuGSO/pdisk_1.dat 2025-06-25T14:30:29.107793Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:30:29.108126Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519894292191988816:2080] 1750861828792214 != 1750861828792217 TServer::EnableGrpc on GrpcPort 18379, node 1 2025-06-25T14:30:29.206398Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:30:29.206560Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:30:29.208128Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:30:29.211302Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:30:29.211335Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:30:29.211342Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:30:29.211480Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:62283 TClient is connected to server localhost:62283 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-25T14:30:29.811277Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:30:29.832011Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:30:29.847930Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:30:30.031031Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:30:30.216634Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:30:30.299938Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:30:32.080885Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894309371859640:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:32.081000Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:32.372782Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:32.401729Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:32.431570Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:32.465593Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:32.491370Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:32.560146Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:32.595269Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:32.657337Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894309371860300:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:32.657441Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:32.657775Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894309371860305:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:32.661646Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:30:32.672627Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519894309371860307:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:30:32.735534Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519894309371860358:3418] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:30:33.798770Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519894292191988838:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:30:33.801729Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:30:33.970388Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519894313666827931:2478], status: GENERIC_ERROR, issues:
: Error: Pre type annotation, code: 1020
:3:40: Error: Bad "EnableSpillingNodes" setting for "$all" cluster: (yexception) tools/enum_parser/enum_serialization_runtime/enum_runtime.cpp:70: Key 'GraceJoin1' not found in enum NYql::NDq::EEnabledSpillingNodes. Valid options are: 'None', 'GraceJoin', 'Aggregation', 'All'. 2025-06-25T14:30:33.972050Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=OGI3NTI4NmMtZTFlNDQ3MDktZGM2YmU2MC0zMGMyZjU5Nw==, ActorId: [1:7519894313666827924:2474], ActorState: ExecuteState, TraceId: 01jykqysmn70w5c3gafct2c5x7, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: |77.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/cms/ut/unittest >> TCmsTest::TestLoadLog [GOOD] >> TTopicApiDescribes::DescribeTopic [GOOD] |77.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/ut/arrow/ydb-core-kqp-ut-arrow |77.3%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/arrow/ydb-core-kqp-ut-arrow |77.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/arrow/ydb-core-kqp-ut-arrow >> TDowntimeTest::SetIgnoredDowntimeGap [GOOD] >> TMaintenanceApiTest::CompositeActionGroupSameStorageGroup >> TCmsTest::WalleTasks [GOOD] >> TCmsTest::WalleTasksWithNodeLimit |77.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/cms/ut/unittest >> TCmsTest::StateRequestUnknownMultipleNodes [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest >> AnalyzeColumnshard::AnalyzeSameOperationId [GOOD] Test command err: 2025-06-25T14:27:46.273356Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:419:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:27:46.273751Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:27:46.273853Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0020ad/r3tmp/tmpOgtU7T/pdisk_1.dat 2025-06-25T14:27:46.890090Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 21326, node 1 2025-06-25T14:27:47.824572Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:27:47.824626Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:27:47.824660Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:27:47.825264Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:27:47.862399Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:27:48.010734Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:27:48.010937Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:27:48.038620Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:1201 2025-06-25T14:27:49.023539Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-25T14:27:53.107381Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-25T14:27:53.235810Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:27:53.235998Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:27:53.290727Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T14:27:53.303729Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:27:53.680904Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:27:53.722177Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:27:53.722846Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:27:53.723466Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:27:53.723610Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:27:53.723850Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:27:53.723943Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:27:53.724028Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:27:53.724108Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:27:53.724179Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:27:54.029827Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:27:54.029952Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:27:54.050110Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:27:54.357270Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:27:54.441897Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-25T14:27:54.442010Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-25T14:27:54.604850Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-25T14:27:54.605144Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-25T14:27:54.605417Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-25T14:27:54.605483Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-25T14:27:54.605551Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-25T14:27:54.605790Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-25T14:27:54.605873Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-25T14:27:54.605933Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-25T14:27:54.606572Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-25T14:27:54.666559Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7949: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-25T14:27:54.666673Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7979: ConnectToSA(), pipe client id: [2:1793:2562], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-25T14:27:54.691607Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1808:2573] 2025-06-25T14:27:54.710681Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1849:2589] 2025-06-25T14:27:54.710988Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1849:2589], schemeshard id = 72075186224037897 2025-06-25T14:27:54.733542Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-25T14:27:54.757151Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-25T14:27:54.757217Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-25T14:27:54.757299Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-25T14:27:54.771342Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:27:54.780006Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-25T14:27:54.780202Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-25T14:27:55.173648Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-25T14:27:55.539781Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-25T14:27:55.609313Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-25T14:27:56.241359Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:27:56.715129Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2153:3026], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:27:56.715270Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:27:56.743879Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715659:0, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T14:27:57.002438Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2224:2794];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:27:57.002679Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2224:2794];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:27:57.003012Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2224:2794];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:27:57.003143Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2224:2794];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:27:57.003263Z node 2 :TX_COLUMNSHARD WARN: ... 5-06-25T14:30:22.620904Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7175:5292], DatabaseId: /Root/Database, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976720658 completed, doublechecking } 2025-06-25T14:30:22.986382Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7259:5337] txid# 281474976720659, issues: { message: "Check failed: path: \'/Root/Database/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72075186224037897, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:30:23.319311Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [2:7281:5351]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T14:30:23.319608Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-25T14:30:23.319712Z node 2 :STATISTICS DEBUG: service_impl.cpp:1219: ConnectToSA(), pipe client id = [2:7283:5353] 2025-06-25T14:30:23.319799Z node 2 :STATISTICS DEBUG: service_impl.cpp:1248: SyncNode(), pipe client id = [2:7283:5353] 2025-06-25T14:30:23.320130Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:7284:5354] 2025-06-25T14:30:23.320251Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:133: [72075186224037894] EvConnectNode, pipe server id = [2:7284:5354], node id = 2, have schemeshards count = 0, need schemeshards count = 1 2025-06-25T14:30:23.320353Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:314: [72075186224037894] SendStatisticsToNode(), node id = 2, schemeshard count = 1 2025-06-25T14:30:23.320540Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:7283:5353], server id = [2:7284:5354], tablet id = 72075186224037894, status = OK 2025-06-25T14:30:23.320617Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-25T14:30:23.320716Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 1, ReplyToActorId = [2:7281:5351], StatRequests.size() = 1 2025-06-25T14:30:24.144584Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=ZThlNmM3NzItMjZmN2ZlYmQtMWE4MDg5NmMtZTBmNzFlNzg=, TxId: 2025-06-25T14:30:24.144680Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=ZThlNmM3NzItMjZmN2ZlYmQtMWE4MDg5NmMtZTBmNzFlNzg=, TxId: 2025-06-25T14:30:24.156840Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-25T14:30:24.177250Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 3] 2025-06-25T14:30:24.177325Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-25T14:30:24.246305Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:217: [72075186224037894] EvFastPropagateCheck 2025-06-25T14:30:24.246614Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:357: [72075186224037894] PropagateFastStatistics(), node count = 0, schemeshard count = 0 2025-06-25T14:30:24.317128Z node 2 :STATISTICS DEBUG: service_impl.cpp:1189: EvRequestTimeout, pipe client id = [2:7283:5353], schemeshard count = 1 2025-06-25T14:30:25.319791Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:626: [72075186224037894] ScheduleNextAnalyze 2025-06-25T14:30:25.319900Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 4] is column table. 2025-06-25T14:30:25.323149Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-25T14:30:25.352512Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-25T14:30:25.353079Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-25T14:30:25.353154Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:51: [72075186224037894] TTxResolve::ExecuteAnalyze. Table OperationId operationId, PathId [OwnerId: 72075186224037897, LocalPathId: 4], AnalyzedShards 1 2025-06-25T14:30:25.387819Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-25T14:30:25.399135Z node 2 :STATISTICS DEBUG: tx_analyze_table_request.cpp:56: [72075186224037894] TTxAnalyzeTableRequest::Complete. Send 1 events. ... blocking NKikimr::NStat::TEvStatistics::TEvAnalyzeTableResponse from TX_COLUMNSHARD_ACTOR to STATISTICS_AGGREGATOR cookie 0 ... waiting for TEvAnalyzeTableResponse (done) ... unblocking NKikimr::NStat::TEvStatistics::TEvAnalyzeTableResponse from TX_COLUMNSHARD_ACTOR to STATISTICS_AGGREGATOR 2025-06-25T14:30:25.400437Z node 2 :STATISTICS DEBUG: tx_analyze_table_response.cpp:21: [72075186224037894] TTxAnalyzeTableResponse::Execute 2025-06-25T14:30:25.400542Z node 2 :STATISTICS DEBUG: tx_analyze_table_response.cpp:52: [72075186224037894] TTxAnalyzeTableResponse::Execute. All shards are analyzed 2025-06-25T14:30:25.401052Z node 2 :STATISTICS DEBUG: tx_analyze.cpp:22: [72075186224037894] TTxAnalyze::Execute. ReplyToActorId [1:3051:3302] , Record { OperationId: "operationId" Tables { PathId { OwnerId: 72075186224037897 LocalId: 4 } } Types: TYPE_COUNT_MIN_SKETCH } 2025-06-25T14:30:25.401116Z node 2 :STATISTICS DEBUG: tx_analyze.cpp:38: [72075186224037894] TTxAnalyze::Execute. Update existing force traversal. OperationId operationId , ReplyToActorId [1:3051:3302] 2025-06-25T14:30:25.414896Z node 2 :STATISTICS DEBUG: tx_analyze_table_response.cpp:57: [72075186224037894] TTxAnalyzeTableResponse::Complete. 2025-06-25T14:30:25.415004Z node 2 :STATISTICS DEBUG: tx_analyze.cpp:97: [72075186224037894] TTxAnalyze::Complete 2025-06-25T14:30:26.677178Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-25T14:30:26.677321Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 4] is column table. 2025-06-25T14:30:26.677379Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:732: [72075186224037894] Start force traversal navigate for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-25T14:30:26.677876Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-25T14:30:26.693437Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-25T14:30:26.693873Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-25T14:30:26.693975Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-25T14:30:26.694879Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:58: [72075186224037894] TTxResponseTabletDistribution::Execute. Node count = 1 2025-06-25T14:30:26.737747Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:92: [72075186224037894] TTxResponseTabletDistribution::Complete 2025-06-25T14:30:26.738039Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 2, current Round: 0 2025-06-25T14:30:26.738622Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:7398:5423], server id = [2:7399:5424], tablet id = 72075186224037899, status = OK 2025-06-25T14:30:26.738745Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:7398:5423], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-25T14:30:26.743514Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037899 2025-06-25T14:30:26.743647Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-25T14:30:26.743923Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:27: [72075186224037894] TTxAggregateStatisticsResponse::Execute 2025-06-25T14:30:26.744135Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:118: [72075186224037894] TTxAggregateStatisticsResponse::Complete 2025-06-25T14:30:26.744409Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:7398:5423], server id = [2:7399:5424], tablet id = 72075186224037899 2025-06-25T14:30:26.744455Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-25T14:30:26.744652Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-25T14:30:26.748286Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-06-25T14:30:26.815712Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [2:7419:5443]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T14:30:26.815939Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-25T14:30:26.815996Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 2, ReplyToActorId = [2:7419:5443], StatRequests.size() = 1 2025-06-25T14:30:27.195864Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=NTIwN2UwN2ItOTM1M2M0NDktY2UxOTRjZGMtMTAyNDdkOGE=, TxId: 2025-06-25T14:30:27.195956Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=NTIwN2UwN2ItOTM1M2M0NDktY2UxOTRjZGMtMTAyNDdkOGE=, TxId: 2025-06-25T14:30:27.196533Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-25T14:30:27.221328Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete force traversal for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-25T14:30:27.221418Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:50: [72075186224037894] TTxFinishTraversal::Complete. Send TEvAnalyzeResponse, OperationId=operationId, ActorId=[1:3051:3302] 2025-06-25T14:30:27.806874Z node 2 :STATISTICS DEBUG: service_impl.cpp:252: Event round 2 is different from the current 0 2025-06-25T14:30:27.806967Z node 2 :STATISTICS DEBUG: service_impl.cpp:379: Skip TEvDispatchKeepAlive 2025-06-25T14:30:29.731640Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-25T14:30:30.982705Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 1, schemeshard count = 1 2025-06-25T14:30:30.982929Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-25T14:30:32.170963Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-25T14:30:32.171074Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-25T14:30:33.399612Z node 2 :STATISTICS DEBUG: service_impl.cpp:252: Event round 2 is different from the current 0 2025-06-25T14:30:33.399711Z node 2 :STATISTICS DEBUG: service_impl.cpp:1021: Skip TEvStatisticsRequestTimeout ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/runtime/unittest >> KqpScanSpilling::SelfJoinQueryService [GOOD] Test command err: cwd: /home/runner/.ya/build/build_root/yft8/001611/ydb/core/kqp/ut/runtime/test-results/unittest/testing_out_stuff/chunk6 Trying to start YDB, gRPC: 8470, MsgBus: 23067 2025-06-25T14:30:28.439531Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519894292192566238:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:30:28.439601Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001611/r3tmp/tmpo2NHrG/pdisk_1.dat 2025-06-25T14:30:28.734872Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 8470, node 1 2025-06-25T14:30:28.795776Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:30:28.795860Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:30:28.797976Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:30:28.803624Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:30:28.803636Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:30:28.803640Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:30:28.803718Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23067 TClient is connected to server localhost:23067 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:30:29.256932Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:30:29.283817Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:30:29.427858Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:30:29.454915Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:30:29.638833Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:30:29.759686Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:30:31.796670Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894305077469735:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:31.796775Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:32.058135Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:32.090631Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:32.119082Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:32.155582Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:32.185920Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:32.248242Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:32.301509Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:32.363055Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894309372437689:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:32.363152Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:32.363269Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894309372437694:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:32.366998Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:30:32.377229Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519894309372437696:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:30:32.478710Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519894309372437749:3421] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:30:33.440426Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519894292192566238:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:30:33.440502Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; ( (let $1 (KqpTable '"/Root/KeyValue" '"72057594046644480:6" '"" '1)) (let $2 (KqpRowsSourceSettings $1 '('"Key" '"Value") '() (Void) '())) (let $3 (OptionalType (DataType 'Uint64))) (let $4 (DataType 'String)) (let $5 (OptionalType $4)) (let $6 (StructType '('"Key" $3) '('"Value" $5))) (let $7 (DqPhyStage '((DqSource (DataSource '"KqpReadRangesSource") $2)) (lambda '($18) (block '( (let $19 (lambda '($20) (block '( (let $21 (VariantType (TupleType $6 $6))) (let $22 (Variant $20 '0 $21)) (let $23 (Variant $20 '1 $21)) (return $22 $23) )))) (return (FromFlow (MultiMap (ToFlow $18) $19))) ))) '('('"_logical_id" '689) '('"_id" '"730cbf87-573e107c-b7bfd234-21361b76")))) (let $8 (DqCnMap (TDqOutput $7 '0))) (let $9 (DqCnBroadcast (TDqOutput $7 '1))) (let $10 (StructType '('"t1.Key" $3) '('"t1.Value" $5) '('"t2.Key" $3) '('"t2.Value" $5))) (let $11 '('('"_logical_id" '608) '('"_id" '"4efd608-9817a0f4-224962a1-35ccfbbf") '('"_wide_channels" $10))) (let $12 (DqPhyStage '($8 $9) (lambda '($24 $25) (block '( (let $26 '('Many 'Hashed 'Compact)) (let $27 (SqueezeToDict (FlatMap (ToFlow $25) (lambda '($30) (block '( (let $31 (Member $30 '"Value")) (let $32 (Nothing (OptionalType (TupleType $4 $6)))) (let $33 (IfPresent $31 (lambda '($34) (Just '($34 $30))) $32)) (return (If (Exists $31) $33 $32)) )))) (lambda '($35) (Nth $35 '0)) (lambda '($36) (Nth $36 '1)) $26)) (let $28 (Sort (FlatMap $27 (lambda '($37) (block '( (let $38 '('"Value")) (let $39 '('"Key" '"t1.Key" '"Value" '"t1.Value")) (let $40 '('"Key" '"t2.Key" '"Value" '"t2.Value")) (return (MapJoinCore (OrderedFilter (ToFlow $24) (lambda '($41) (Exists (Member $41 '"Value")))) $37 'Inner $38 $38 $39 $40 '('"t1.Value") '('"t2.Value"))) )))) (Bool 'true) (lambda '($42) (Member $42 '"t1.Key")))) (let $29 (lambda '($43) (Member $43 '"t1.Key") (Member $43 '"t1.Value") (Member $43 '"t2.Key") (Member $43 '"t2.Value"))) (return (FromFlow (ExpandMap $28 $29))) ))) $11)) (let $13 (DqCnMerge (TDqOutput $12 '0) '('('0 '"Asc")))) (let $14 (DqPhyStage '($13) (lambda '($44) (FromFlow (NarrowMap (ToFlow $44) (lambda '($45 $46 $47 $48) (AsStruct '('"t1.Key" $45) '('"t1.Value" $46) '('"t2.Key" $47) '('"t2.Value" $48)))))) '('('"_logical_id" '620) '('"_id" '"78fe6b55-8a252c23-fd5fc8d-9a612091")))) (let $15 '($7 $12 $14)) (let $16 '('"t1.Key" '"t1.Value" '"t2.Key" '"t2.Value")) (let $17 (DqCnResult (TDqOutput $14 '0) $16)) (return (KqpPhysicalQuery '((KqpPhysicalTx $15 '($17) '() '('('"type" '"generic")))) '((KqpTxResultBinding (ListType $10) '0 '0)) '('('"type" '"query")))) ) >> TCmsTest::WalleRequestDuringRollingRestart [GOOD] >> KqpScanSpilling::SelfJoin [GOOD] >> TCmsTest::TestForceRestartModeScheduledDisconnects [GOOD] >> TCmsTest::TestOutdatedState [GOOD] >> TCmsTest::TestSetResetMarkers |77.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/cms/ut/unittest >> TCmsTest::WalleRequestDuringRollingRestart [GOOD] >> TCmsTest::SamePriorityRequest2 [GOOD] |77.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/cms/ut/unittest >> TCmsTest::TestForceRestartModeScheduledDisconnects [GOOD] |77.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/partition_stats/ut/unittest >> TStoragePoolsStatsPersistence::SameAggregatedStatsAfterRestart >> DataShardSnapshots::DelayedWriteReadableAfterSplit [GOOD] >> DataShardSnapshots::DelayedWriteReplyAfterSplit ------- [TM] {asan, default-linux-x86_64, release} ydb/services/persqueue_v1/ut/describes_ut/unittest >> TTopicApiDescribes::DescribeTopic [GOOD] Test command err: 2025-06-25T14:30:23.526332Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519894270316935165:2192];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:30:23.533476Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:30:23.644749Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519894271812181669:2092];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:30:23.652468Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:30:23.917188Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001cdf/r3tmp/tmpgLwAyg/pdisk_1.dat 2025-06-25T14:30:23.943076Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-25T14:30:24.151193Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:30:24.159600Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:30:24.159704Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:30:24.166028Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T14:30:24.167275Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:30:24.177439Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:30:24.177537Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:30:24.180956Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 10135, node 1 2025-06-25T14:30:24.267363Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/yft8/001cdf/r3tmp/yandexXQnqpu.tmp 2025-06-25T14:30:24.267390Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/yft8/001cdf/r3tmp/yandexXQnqpu.tmp 2025-06-25T14:30:24.267533Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/yft8/001cdf/r3tmp/yandexXQnqpu.tmp 2025-06-25T14:30:24.267662Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:30:24.323420Z INFO: TTestServer started on Port 61712 GrpcPort 10135 2025-06-25T14:30:24.530139Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:61712 PQClient connected to localhost:10135 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-25T14:30:24.728606Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:30:24.949320Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-25T14:30:25.013284Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... waiting... 2025-06-25T14:30:27.782629Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519894288992051116:2274], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:27.782707Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519894288992051105:2271], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:27.782845Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:27.790001Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976720657:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:30:27.793849Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894287496805330:2305], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:27.793937Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:27.796440Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894287496805346:2308], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:27.806306Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519894287496805354:2745] txid# 281474976710662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateCreate)" severity: 1 } 2025-06-25T14:30:27.814644Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519894288992051119:2275], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976720657 completed, doublechecking } 2025-06-25T14:30:27.814515Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519894287496805352:2309], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976720657 completed, doublechecking } 2025-06-25T14:30:27.893207Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519894287496805438:2799] txid# 281474976710663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:30:27.906829Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519894288992051147:2133] txid# 281474976720658, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:30:28.091289Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:28.097353Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519894287496805448:2315], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:30:28.097655Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=YTY2YjFhOGEtZmNhOGE2ODQtNjE4OTBmYTQtYjk1M2U5MDM=, ActorId: [1:7519894287496805327:2303], ActorState: ExecuteState, TraceId: 01jykqykpfetndxx8jb64mfc49, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:30:28.099493Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-25T14:30:28.102601Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7519894288992051154:2279], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do n ... } attributes { key: "_message_group_seqno_retention_period_ms" value: "1382400000" } consumers { name: "shared/user" read_from { } attributes { key: "_service_type" value: "data-streams" } consumer_stats { min_partitions_last_read_time { seconds: 1750861834 nanos: 51000000 } max_read_time_lag { } max_write_time_lag { } bytes_read { } max_committed_time_lag { } } } topic_stats { min_last_write_time { seconds: 1750861834 nanos: 86000000 } max_write_time_lag { } bytes_written { } } } } } Describe topic with location Got response: 2025-06-25T14:30:34.904770Z node 1 :PQ_READ_PROXY ERROR: grpc_pq_schema.cpp:148: new Describe topic request 2025-06-25T14:30:34.904929Z node 1 :PQ_READ_PROXY DEBUG: schema_actors.cpp:1186: Describe topic actor for path /Root/PQ//rt3.dc1--topic-x 2025-06-25T14:30:34.905825Z node 1 :PQ_READ_PROXY DEBUG: schema_actors.cpp:657: DescribeTopicImpl [1:7519894317561578677:2550]: Request location 2025-06-25T14:30:34.906015Z node 1 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72075186224037900][rt3.dc1--topic-x] pipe [1:7519894317561578679:2551] connected; active server actors: 1 2025-06-25T14:30:34.906067Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037892, partitionId 0, NodeId 1, Generation 2 2025-06-25T14:30:34.906083Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037893, partitionId 1, NodeId 2, Generation 2 2025-06-25T14:30:34.906096Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037895, partitionId 2, NodeId 1, Generation 2 2025-06-25T14:30:34.906110Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037892, partitionId 3, NodeId 1, Generation 2 2025-06-25T14:30:34.906124Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037899, partitionId 4, NodeId 2, Generation 2 2025-06-25T14:30:34.906142Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037896, partitionId 5, NodeId 2, Generation 2 2025-06-25T14:30:34.906157Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037893, partitionId 6, NodeId 2, Generation 2 2025-06-25T14:30:34.906170Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037897, partitionId 7, NodeId 1, Generation 2 2025-06-25T14:30:34.906182Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037894, partitionId 8, NodeId 1, Generation 2 2025-06-25T14:30:34.906200Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037895, partitionId 9, NodeId 1, Generation 2 2025-06-25T14:30:34.906214Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037896, partitionId 10, NodeId 2, Generation 2 2025-06-25T14:30:34.906225Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037898, partitionId 11, NodeId 2, Generation 2 2025-06-25T14:30:34.906236Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037894, partitionId 12, NodeId 1, Generation 2 2025-06-25T14:30:34.906246Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037897, partitionId 13, NodeId 1, Generation 2 2025-06-25T14:30:34.906259Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037898, partitionId 14, NodeId 2, Generation 2 2025-06-25T14:30:34.906298Z node 1 :PQ_READ_PROXY DEBUG: schema_actors.cpp:750: DescribeTopicImpl [1:7519894317561578677:2550]: Got location 2025-06-25T14:30:34.906436Z node 1 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037900][rt3.dc1--topic-x] pipe [1:7519894317561578679:2551] disconnected; active server actors: 1 2025-06-25T14:30:34.906452Z node 1 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1688: [72075186224037900][rt3.dc1--topic-x] pipe [1:7519894317561578679:2551] disconnected no session operation { ready: true status: SUCCESS result { [type.googleapis.com/Ydb.Topic.DescribeTopicResult] { self { name: "rt3.dc1--topic-x" owner: "root@builtin" type: TOPIC created_at { plan_step: 1750861833819 tx_id: 281474976710676 } } partitioning_settings { min_active_partitions: 15 max_active_partitions: 1 auto_partitioning_settings { strategy: AUTO_PARTITIONING_STRATEGY_DISABLED partition_write_speed { stabilization_window { seconds: 300 } up_utilization_percent: 80 down_utilization_percent: 20 } } } partitions { active: true partition_location { node_id: 1 generation: 2 } } partitions { partition_id: 1 active: true partition_location { node_id: 2 generation: 2 } } partitions { partition_id: 2 active: true partition_location { node_id: 1 generation: 2 } } partitions { partition_id: 3 active: true partition_location { node_id: 1 generation: 2 } } partitions { partition_id: 4 active: true partition_location { node_id: 2 generation: 2 } } partitions { partition_id: 5 active: true partition_location { node_id: 2 generation: 2 } } partitions { partition_id: 6 active: true partition_location { node_id: 2 generation: 2 } } partitions { partition_id: 7 active: true partition_location { node_id: 1 generation: 2 } } partitions { partition_id: 8 active: true partition_location { node_id: 1 generation: 2 } } partitions { partition_id: 9 active: true partition_location { node_id: 1 generation: 2 } } partitions { partition_id: 10 active: true partition_location { node_id: 2 generation: 2 } } partitions { partition_id: 11 active: true partition_location { node_id: 2 generation: 2 } } partitions { partition_id: 12 active: true partition_location { node_id: 1 generation: 2 } } partitions { partition_id: 13 active: true partition_location { node_id: 1 generation: 2 } } partitions { partition_id: 14 active: true partition_location { node_id: 2 generation: 2 } } retention_period { seconds: 64800 } partition_write_speed_bytes_per_second: 2097152 partition_write_burst_bytes: 2097152 attributes { key: "__max_partition_message_groups_seqno_stored" value: "6000000" } attributes { key: "_message_group_seqno_retention_period_ms" value: "1382400000" } consumers { name: "shared/user" read_from { } attributes { key: "_service_type" value: "data-streams" } } } } } Describe topic with no stats or location Got response: 2025-06-25T14:30:34.910396Z node 1 :PQ_READ_PROXY ERROR: grpc_pq_schema.cpp:148: new Describe topic request 2025-06-25T14:30:34.910505Z node 1 :PQ_READ_PROXY DEBUG: schema_actors.cpp:1186: Describe topic actor for path /Root/PQ//rt3.dc1--topic-x operation { ready: true status: SUCCESS result { [type.googleapis.com/Ydb.Topic.DescribeTopicResult] { self { name: "rt3.dc1--topic-x" owner: "root@builtin" type: TOPIC created_at { plan_step: 1750861833819 tx_id: 281474976710676 } } partitioning_settings { min_active_partitions: 15 max_active_partitions: 1 auto_partitioning_settings { strategy: AUTO_PARTITIONING_STRATEGY_DISABLED partition_write_speed { stabilization_window { seconds: 300 } up_utilization_percent: 80 down_utilization_percent: 20 } } } partitions { active: true } partitions { partition_id: 1 active: true } partitions { partition_id: 2 active: true } partitions { partition_id: 3 active: true } partitions { partition_id: 4 active: true } partitions { partition_id: 5 active: true } partitions { partition_id: 6 active: true } partitions { partition_id: 7 active: true } partitions { partition_id: 8 active: true } partitions { partition_id: 9 active: true } partitions { partition_id: 10 active: true } partitions { partition_id: 11 active: true } partitions { partition_id: 12 active: true } partitions { partition_id: 13 active: true } partitions { partition_id: 14 active: true } retention_period { seconds: 64800 } partition_write_speed_bytes_per_second: 2097152 partition_write_burst_bytes: 2097152 attributes { key: "__max_partition_message_groups_seqno_stored" value: "6000000" } attributes { key: "_message_group_seqno_retention_period_ms" value: "1382400000" } consumers { name: "shared/user" read_from { } attributes { key: "_service_type" value: "data-streams" } } } } } Describe bad topic 2025-06-25T14:30:34.915073Z node 1 :PQ_READ_PROXY ERROR: grpc_pq_schema.cpp:148: new Describe topic request 2025-06-25T14:30:34.915225Z node 1 :PQ_READ_PROXY DEBUG: schema_actors.cpp:1186: Describe topic actor for path /Root/PQ//bad-topic Got response: operation { ready: true status: SCHEME_ERROR issues { message: "path \'Root/PQ/bad-topic\' does not exist or you do not have access rights" issue_code: 500018 severity: 1 } } >> TTopicApiDescribes::GetPartitionDescribe [GOOD] >> TSchemeshardStatsBatchingTest::TopicAccountSizeAndUsedReserveSize >> BasicUsage::TWriteSession_WriteAndReadAndCommitRandomMessagesNoClusterDiscovery [GOOD] >> BasicUsage::TWriteSession_WriteEncoded >> TCmsTenatsTest::TestClusterRatioLimit [GOOD] >> TCmsTenatsTest::TestClusterRatioLimitForceRestartMode >> Cdc::ShouldBreakLocksOnConcurrentDropIndex [GOOD] >> Cdc::ShouldBreakLocksOnConcurrentCancelBuildIndex >> TCmsTenatsTest::RequestShutdownHostWithTenantPolicy [GOOD] >> TCmsTenatsTest::TestClusterLimitForceRestartMode ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/runtime/unittest >> KqpScanSpilling::SelfJoin [GOOD] Test command err: cwd: /home/runner/.ya/build/build_root/yft8/0015a3/ydb/core/kqp/ut/runtime/test-results/unittest/testing_out_stuff/chunk5 Trying to start YDB, gRPC: 16458, MsgBus: 6814 2025-06-25T14:30:30.240252Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519894303291038200:2080];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:30:30.250944Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0015a3/r3tmp/tmp3Ytenp/pdisk_1.dat 2025-06-25T14:30:30.782272Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:30:30.808712Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:30:30.812990Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:30:30.815540Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 16458, node 1 2025-06-25T14:30:30.911346Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:30:30.911381Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:30:30.911394Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:30:30.911512Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6814 2025-06-25T14:30:31.242846Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:6814 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:30:31.526550Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:30:31.549903Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:30:31.659832Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:30:31.797865Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:30:31.860240Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:30:33.429710Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894316175941695:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:33.429910Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:33.735195Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:33.768812Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:33.836253Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:33.888560Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:33.957319Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:34.040832Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:34.111066Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:34.198982Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894320470909661:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:34.199089Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:34.199164Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894320470909666:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:34.202505Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:30:34.213998Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519894320470909668:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:30:34.309996Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519894320470909721:3425] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:30:35.226758Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519894303291038200:2080];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:30:35.226838Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:30:35.458608Z node 1 :KQP_COMPUTE DEBUG: log.cpp:784: fline=kqp_compute_actor_factory.cpp:161;event=channel_info;ch_size=50;ch_count=1;ch_limit=50;inputs=0;input_channels_count=0; 2025-06-25T14:30:35.458770Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:134: SelfId: [1:7519894324765877306:2475], TxId: 281474976710672, task: 1. Ctx: { TraceId : 01jykqyv46663r310j3xxm8tr8. SessionId : ydb://session/3?node_id=1&id=ZjUxZTlmNTItOTc4MmEzODYtYzhiMmFiODQtNzIwNjZkNDE=. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Start comput ... atabase : /Root. PoolId : default. }. CA StateFunc 271646927 2025-06-25T14:30:36.593379Z node 1 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [1:7519894329060844805:2528], TxId: 281474976710683, task: 2. Ctx: { SessionId : ydb://session/3?node_id=1&id=NzU0ZjkwOWUtOWFlYzFhYTAtOTQ2ZWRjM2YtYjRhNWZjYjQ=. TraceId : 01jykqyvspdr8q4qj59kf9jva4. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. CA StateFunc 271646922 2025-06-25T14:30:36.593427Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:502: SelfId: [1:7519894329060844805:2528], TxId: 281474976710683, task: 2. Ctx: { SessionId : ydb://session/3?node_id=1&id=NzU0ZjkwOWUtOWFlYzFhYTAtOTQ2ZWRjM2YtYjRhNWZjYjQ=. TraceId : 01jykqyvspdr8q4qj59kf9jva4. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Continue execution, either output buffers are not empty or not all channels are ready, hasDataToSend: 1, channelsReady: 1 2025-06-25T14:30:36.593601Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:2021: SelfId: [1:7519894329060844805:2528], TxId: 281474976710683, task: 2. Ctx: { SessionId : ydb://session/3?node_id=1&id=NzU0ZjkwOWUtOWFlYzFhYTAtOTQ2ZWRjM2YtYjRhNWZjYjQ=. TraceId : 01jykqyvspdr8q4qj59kf9jva4. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Send stats to executor actor [1:7519894329060844800:2520] TaskId: 2 Stats: CpuTimeUs: 11769 DurationUs: 21000 Tasks { TaskId: 2 StageId: 1 CpuTimeUs: 3660 FinishTimeMs: 1750861836593 InputRows: 20 InputBytes: 560 OutputRows: 10 OutputBytes: 500 ComputeCpuTimeUs: 2172 BuildCpuTimeUs: 1488 WaitOutputTimeUs: 9889 WaitInputTimeUs: 806 HostName: "ghrun-kqfvx6aroe" NodeId: 1 StartTimeMs: 1750861836572 CreateTimeMs: 1750861836533 CurrentWaitOutputTimeUs: 32 UpdateTimeMs: 1750861836593 } MaxMemoryUsage: 104857600 2025-06-25T14:30:36.593776Z node 1 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [1:7519894329060844806:2529], TxId: 281474976710683, task: 3. Ctx: { TraceId : 01jykqyvspdr8q4qj59kf9jva4. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=1&id=NzU0ZjkwOWUtOWFlYzFhYTAtOTQ2ZWRjM2YtYjRhNWZjYjQ=. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. CA StateFunc 271646923 2025-06-25T14:30:36.593803Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:163: TxId: 281474976710683, task: 3. Finish input channelId: 3, from: [1:7519894329060844805:2528] 2025-06-25T14:30:36.593826Z node 1 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [1:7519894329060844806:2529], TxId: 281474976710683, task: 3. Ctx: { TraceId : 01jykqyvspdr8q4qj59kf9jva4. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=1&id=NzU0ZjkwOWUtOWFlYzFhYTAtOTQ2ZWRjM2YtYjRhNWZjYjQ=. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. CA StateFunc 271646922 2025-06-25T14:30:36.593905Z node 1 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [1:7519894329060844805:2528], TxId: 281474976710683, task: 2. Ctx: { SessionId : ydb://session/3?node_id=1&id=NzU0ZjkwOWUtOWFlYzFhYTAtOTQ2ZWRjM2YtYjRhNWZjYjQ=. TraceId : 01jykqyvspdr8q4qj59kf9jva4. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. CA StateFunc 271646927 2025-06-25T14:30:36.593916Z node 1 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [1:7519894329060844805:2528], TxId: 281474976710683, task: 2. Ctx: { SessionId : ydb://session/3?node_id=1&id=NzU0ZjkwOWUtOWFlYzFhYTAtOTQ2ZWRjM2YtYjRhNWZjYjQ=. TraceId : 01jykqyvspdr8q4qj59kf9jva4. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. CA StateFunc 271646922 2025-06-25T14:30:36.593960Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976710683, task: 2. Tasks execution finished, don't wait for ack delivery in input channelId: 1, seqNo: [10] 2025-06-25T14:30:36.593974Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976710683, task: 2. Tasks execution finished, don't wait for ack delivery in input channelId: 2, seqNo: [10] 2025-06-25T14:30:36.593983Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:675: TxId: 281474976710683, task: 2. Tasks execution finished 2025-06-25T14:30:36.593995Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:510: SelfId: [1:7519894329060844805:2528], TxId: 281474976710683, task: 2. Ctx: { SessionId : ydb://session/3?node_id=1&id=NzU0ZjkwOWUtOWFlYzFhYTAtOTQ2ZWRjM2YtYjRhNWZjYjQ=. TraceId : 01jykqyvspdr8q4qj59kf9jva4. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Compute state finished. All channels and sinks finished 2025-06-25T14:30:36.594071Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:494: TxId: 281474976710683, task: 2. pass away 2025-06-25T14:30:36.594160Z node 1 :KQP_COMPUTE DEBUG: log.cpp:784: fline=kqp_compute_actor_factory.cpp:67;problem=finish_compute_actor;tx_id=281474976710683;task_id=2;success=1;message={
: Error: COMPUTE_STATE_FINISHED }; 2025-06-25T14:30:36.594446Z node 1 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [1:7519894329060844806:2529], TxId: 281474976710683, task: 3. Ctx: { TraceId : 01jykqyvspdr8q4qj59kf9jva4. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=1&id=NzU0ZjkwOWUtOWFlYzFhYTAtOTQ2ZWRjM2YtYjRhNWZjYjQ=. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. CA StateFunc 271646922 2025-06-25T14:30:36.596101Z node 1 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [1:7519894329060844806:2529], TxId: 281474976710683, task: 3. Ctx: { TraceId : 01jykqyvspdr8q4qj59kf9jva4. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=1&id=NzU0ZjkwOWUtOWFlYzFhYTAtOTQ2ZWRjM2YtYjRhNWZjYjQ=. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. CA StateFunc 271646922 2025-06-25T14:30:36.596172Z node 1 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [1:7519894329060844806:2529], TxId: 281474976710683, task: 3. Ctx: { TraceId : 01jykqyvspdr8q4qj59kf9jva4. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=1&id=NzU0ZjkwOWUtOWFlYzFhYTAtOTQ2ZWRjM2YtYjRhNWZjYjQ=. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. CA StateFunc 271646922 2025-06-25T14:30:36.596667Z node 1 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [1:7519894329060844806:2529], TxId: 281474976710683, task: 3. Ctx: { TraceId : 01jykqyvspdr8q4qj59kf9jva4. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=1&id=NzU0ZjkwOWUtOWFlYzFhYTAtOTQ2ZWRjM2YtYjRhNWZjYjQ=. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. CA StateFunc 271646922 2025-06-25T14:30:36.597033Z node 1 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [1:7519894329060844806:2529], TxId: 281474976710683, task: 3. Ctx: { TraceId : 01jykqyvspdr8q4qj59kf9jva4. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=1&id=NzU0ZjkwOWUtOWFlYzFhYTAtOTQ2ZWRjM2YtYjRhNWZjYjQ=. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. CA StateFunc 271646922 2025-06-25T14:30:36.597120Z node 1 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [1:7519894329060844806:2529], TxId: 281474976710683, task: 3. Ctx: { TraceId : 01jykqyvspdr8q4qj59kf9jva4. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=1&id=NzU0ZjkwOWUtOWFlYzFhYTAtOTQ2ZWRjM2YtYjRhNWZjYjQ=. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. CA StateFunc 271646922 2025-06-25T14:30:36.597163Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:502: SelfId: [1:7519894329060844806:2529], TxId: 281474976710683, task: 3. Ctx: { TraceId : 01jykqyvspdr8q4qj59kf9jva4. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=1&id=NzU0ZjkwOWUtOWFlYzFhYTAtOTQ2ZWRjM2YtYjRhNWZjYjQ=. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Continue execution, either output buffers are not empty or not all channels are ready, hasDataToSend: 1, channelsReady: 1 2025-06-25T14:30:36.597478Z node 1 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [1:7519894329060844806:2529], TxId: 281474976710683, task: 3. Ctx: { TraceId : 01jykqyvspdr8q4qj59kf9jva4. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=1&id=NzU0ZjkwOWUtOWFlYzFhYTAtOTQ2ZWRjM2YtYjRhNWZjYjQ=. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. CA StateFunc 271646922 2025-06-25T14:30:36.597491Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:502: SelfId: [1:7519894329060844806:2529], TxId: 281474976710683, task: 3. Ctx: { TraceId : 01jykqyvspdr8q4qj59kf9jva4. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=1&id=NzU0ZjkwOWUtOWFlYzFhYTAtOTQ2ZWRjM2YtYjRhNWZjYjQ=. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Continue execution, either output buffers are not empty or not all channels are ready, hasDataToSend: 1, channelsReady: 1 2025-06-25T14:30:36.597640Z node 1 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [1:7519894329060844806:2529], TxId: 281474976710683, task: 3. Ctx: { TraceId : 01jykqyvspdr8q4qj59kf9jva4. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=1&id=NzU0ZjkwOWUtOWFlYzFhYTAtOTQ2ZWRjM2YtYjRhNWZjYjQ=. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. CA StateFunc 271646922 2025-06-25T14:30:36.597695Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:502: SelfId: [1:7519894329060844806:2529], TxId: 281474976710683, task: 3. Ctx: { TraceId : 01jykqyvspdr8q4qj59kf9jva4. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=1&id=NzU0ZjkwOWUtOWFlYzFhYTAtOTQ2ZWRjM2YtYjRhNWZjYjQ=. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Continue execution, either output buffers are not empty or not all channels are ready, hasDataToSend: 1, channelsReady: 1 2025-06-25T14:30:36.597960Z node 1 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [1:7519894329060844806:2529], TxId: 281474976710683, task: 3. Ctx: { TraceId : 01jykqyvspdr8q4qj59kf9jva4. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=1&id=NzU0ZjkwOWUtOWFlYzFhYTAtOTQ2ZWRjM2YtYjRhNWZjYjQ=. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. CA StateFunc 271646922 2025-06-25T14:30:36.597993Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976710683, task: 3. Tasks execution finished, don't wait for ack delivery in input channelId: 3, seqNo: [11] 2025-06-25T14:30:36.598003Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:675: TxId: 281474976710683, task: 3. Tasks execution finished 2025-06-25T14:30:36.598039Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:510: SelfId: [1:7519894329060844806:2529], TxId: 281474976710683, task: 3. Ctx: { TraceId : 01jykqyvspdr8q4qj59kf9jva4. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=1&id=NzU0ZjkwOWUtOWFlYzFhYTAtOTQ2ZWRjM2YtYjRhNWZjYjQ=. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Compute state finished. All channels and sinks finished 2025-06-25T14:30:36.598121Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:494: TxId: 281474976710683, task: 3. pass away 2025-06-25T14:30:36.598198Z node 1 :KQP_COMPUTE DEBUG: log.cpp:784: fline=kqp_compute_actor_factory.cpp:67;problem=finish_compute_actor;tx_id=281474976710683;task_id=3;success=1;message={
: Error: COMPUTE_STATE_FINISHED }; 2025-06-25T14:30:36.599568Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750861836570, txId: 281474976710682] shutting down |77.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/cms/ut/unittest >> TCmsTest::SamePriorityRequest2 [GOOD] >> Cdc::InitialScanDebezium [GOOD] >> Cdc::InitialScanRacyCompleteAndRequest >> TTxDataShardMiniKQL::MemoryUsageImmediateSmallTx |77.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_stats/unittest >> TMaintenanceApiTest::CompositeActionGroupSameStorageGroup [GOOD] >> TMaintenanceApiTest::ActionReason >> Cdc::ResolvedTimestamps [GOOD] >> Cdc::ResolvedTimestampsMultiplePartitions |77.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_volatile/ydb-core-tx-datashard-ut_volatile |77.3%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_volatile/ydb-core-tx-datashard-ut_volatile |77.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_volatile/ydb-core-tx-datashard-ut_volatile |77.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_stats/unittest >> TCmsTest::TestSetResetMarkers [GOOD] >> TCmsTest::TestProcessingQueue ------- [TM] {asan, default-linux-x86_64, release} ydb/services/persqueue_v1/ut/describes_ut/unittest >> TTopicApiDescribes::GetPartitionDescribe [GOOD] Test command err: 2025-06-25T14:30:24.169473Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519894276786881924:2172];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:30:24.170727Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:30:24.226387Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519894275816291043:2082];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:30:24.229284Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:30:24.487763Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-25T14:30:24.501926Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001cd3/r3tmp/tmpW7M1J9/pdisk_1.dat 2025-06-25T14:30:24.943825Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:30:24.945224Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:30:24.946257Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:30:24.946320Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:30:24.954293Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T14:30:24.954436Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:30:24.956332Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:30:24.960022Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 27506, node 1 2025-06-25T14:30:25.168446Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:30:25.272295Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:30:25.319678Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/yft8/001cd3/r3tmp/yandex4CPhA7.tmp 2025-06-25T14:30:25.319704Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/yft8/001cd3/r3tmp/yandex4CPhA7.tmp 2025-06-25T14:30:25.319849Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/yft8/001cd3/r3tmp/yandex4CPhA7.tmp 2025-06-25T14:30:25.319976Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:30:25.392390Z INFO: TTestServer started on Port 65444 GrpcPort 27506 TClient is connected to server localhost:65444 PQClient connected to localhost:27506 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:30:25.722911Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-25T14:30:25.820333Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... waiting... 2025-06-25T14:30:28.398439Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894293966752068:2303], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:28.398498Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894293966752060:2300], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:28.398764Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:28.402333Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:30:28.422909Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519894293966752075:2305], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710662 completed, doublechecking } 2025-06-25T14:30:28.488600Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519894293966752162:2757] txid# 281474976710663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:30:28.711669Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:28.711816Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519894293966752172:2311], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:30:28.711406Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7519894292996160579:2280], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:30:28.711645Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=2&id=YTc3OWQ3YzUtY2Y4NGJlOTEtZjJkOWJlYjYtMzc5ZGQxMWU=, ActorId: [2:7519894292996160540:2274], ActorState: ExecuteState, TraceId: 01jykqymaw6z7n40xdgxcj3vxb, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:30:28.712796Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=ODJlZjljYTUtNDYwMjM2MTYtYjNjNzUzZTctNDdlMjc5Yzk=, ActorId: [1:7519894293966752058:2299], ActorState: ExecuteState, TraceId: 01jykqym9cedbpv2364vmryfe5, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:30:28.713842Z node 2 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-25T14:30:28.714315Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-25T14:30:28.798988Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:28.910003Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster ... ampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-25T14:30:36.122422Z node 1 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72075186224037896, Partition: 5, State: StateInit] init complete for topic 'rt3.dc1--topic-x' partition 5 generation 2 [1:7519894328326492581:2525] 2025-06-25T14:30:36.122425Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72075186224037897] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:30:36.122581Z node 1 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72075186224037898, Partition: 14, State: StateInit] bootstrapping 14 [1:7519894328326492642:2529] 2025-06-25T14:30:36.122460Z node 2 :PERSQUEUE INFO: pq_impl.cpp:788: [PQ: 72075186224037897] has a tx writes info 2025-06-25T14:30:36.122697Z node 1 :PERSQUEUE INFO: partition_init.cpp:895: [rt3.dc1--topic-x:10:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-25T14:30:36.122549Z node 2 :PERSQUEUE INFO: partition_init.cpp:895: [rt3.dc1--topic-x:3:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-25T14:30:36.122720Z node 1 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72075186224037896, Partition: 10, State: StateInit] init complete for topic 'rt3.dc1--topic-x' partition 10 generation 2 [1:7519894328326492583:2526] 2025-06-25T14:30:36.122566Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72075186224037894] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:30:36.122580Z node 2 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72075186224037892, Partition: 3, State: StateInit] init complete for topic 'rt3.dc1--topic-x' partition 3 generation 2 [2:7519894327355900051:2422] 2025-06-25T14:30:36.123662Z node 1 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72075186224037898, Partition: 11, State: StateInit] bootstrapping 11 [1:7519894328326492653:2530] 2025-06-25T14:30:36.122582Z node 2 :PERSQUEUE INFO: pq_impl.cpp:788: [PQ: 72075186224037894] has a tx writes info 2025-06-25T14:30:36.122857Z node 2 :PERSQUEUE INFO: partition_init.cpp:895: [rt3.dc1--topic-x:2:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-25T14:30:36.122876Z node 2 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72075186224037895, Partition: 2, State: StateInit] init complete for topic 'rt3.dc1--topic-x' partition 2 generation 2 [2:7519894327355900044:2419] 2025-06-25T14:30:36.123223Z node 2 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72075186224037894, Partition: 8, State: StateInit] bootstrapping 8 [2:7519894327355900149:2430] 2025-06-25T14:30:36.123621Z node 2 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72075186224037894, Partition: 12, State: StateInit] bootstrapping 12 [2:7519894327355900151:2432] 2025-06-25T14:30:36.126260Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:493: [72075186224037900][rt3.dc1--topic-x] TEvClientConnected TabletId 72075186224037896, NodeId 1, Generation 2 2025-06-25T14:30:36.126296Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:493: [72075186224037900][rt3.dc1--topic-x] TEvClientConnected TabletId 72075186224037895, NodeId 2, Generation 2 2025-06-25T14:30:36.126717Z node 2 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72075186224037897, Partition: 13, State: StateInit] bootstrapping 13 [2:7519894327355900150:2431] 2025-06-25T14:30:36.126732Z node 2 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72075186224037897, Partition: 7, State: StateInit] bootstrapping 7 [2:7519894327355900148:2429] 2025-06-25T14:30:36.128012Z node 1 :PERSQUEUE INFO: partition_init.cpp:895: [rt3.dc1--topic-x:14:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-25T14:30:36.128049Z node 1 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72075186224037898, Partition: 14, State: StateInit] init complete for topic 'rt3.dc1--topic-x' partition 14 generation 2 [1:7519894328326492642:2529] 2025-06-25T14:30:36.130149Z node 1 :PERSQUEUE INFO: partition_init.cpp:895: [rt3.dc1--topic-x:11:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-25T14:30:36.130187Z node 1 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72075186224037898, Partition: 11, State: StateInit] init complete for topic 'rt3.dc1--topic-x' partition 11 generation 2 [1:7519894328326492653:2530] 2025-06-25T14:30:36.129513Z node 2 :PERSQUEUE INFO: partition_init.cpp:895: [rt3.dc1--topic-x:0:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-25T14:30:36.129580Z node 2 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72075186224037892, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--topic-x' partition 0 generation 2 [2:7519894327355900074:2423] 2025-06-25T14:30:36.131399Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:493: [72075186224037900][rt3.dc1--topic-x] TEvClientConnected TabletId 72075186224037898, NodeId 1, Generation 2 2025-06-25T14:30:36.131432Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:493: [72075186224037900][rt3.dc1--topic-x] TEvClientConnected TabletId 72075186224037892, NodeId 2, Generation 2 2025-06-25T14:30:36.131640Z node 2 :PERSQUEUE INFO: partition_init.cpp:895: [rt3.dc1--topic-x:8:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-25T14:30:36.131660Z node 2 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72075186224037894, Partition: 8, State: StateInit] init complete for topic 'rt3.dc1--topic-x' partition 8 generation 2 [2:7519894327355900149:2430] 2025-06-25T14:30:36.131807Z node 2 :PERSQUEUE INFO: partition_init.cpp:895: [rt3.dc1--topic-x:12:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-25T14:30:36.131835Z node 2 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72075186224037894, Partition: 12, State: StateInit] init complete for topic 'rt3.dc1--topic-x' partition 12 generation 2 [2:7519894327355900151:2432] 2025-06-25T14:30:36.132516Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:493: [72075186224037900][rt3.dc1--topic-x] TEvClientConnected TabletId 72075186224037894, NodeId 2, Generation 2 2025-06-25T14:30:36.133382Z node 2 :PERSQUEUE INFO: partition_init.cpp:895: [rt3.dc1--topic-x:7:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-25T14:30:36.133403Z node 2 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72075186224037897, Partition: 7, State: StateInit] init complete for topic 'rt3.dc1--topic-x' partition 7 generation 2 [2:7519894327355900148:2429] 2025-06-25T14:30:36.135361Z node 2 :PERSQUEUE INFO: partition_init.cpp:895: [rt3.dc1--topic-x:13:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-25T14:30:36.135383Z node 2 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72075186224037897, Partition: 13, State: StateInit] init complete for topic 'rt3.dc1--topic-x' partition 13 generation 2 [2:7519894327355900150:2431] 2025-06-25T14:30:36.135644Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:493: [72075186224037900][rt3.dc1--topic-x] TEvClientConnected TabletId 72075186224037897, NodeId 2, Generation 2 2025-06-25T14:30:36.783337Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:166: new Describe partition request 2025-06-25T14:30:36.783465Z node 1 :PQ_READ_PROXY DEBUG: schema_actors.cpp:1209: TDescribePartitionActor for request path: "/Root/PQ//rt3.dc1--topic-x" partition_id: 1 include_location: true 2025-06-25T14:30:36.783512Z node 1 :PQ_READ_PROXY DEBUG: schema_actors.cpp:1219: TDescribePartitionActor[1:7519894328326492793:2550]: Bootstrap 2025-06-25T14:30:36.785102Z node 1 :PQ_READ_PROXY DEBUG: schema_actors.cpp:657: DescribeTopicImpl [1:7519894328326492793:2550]: Request location 2025-06-25T14:30:36.786776Z node 1 :PQ_READ_PROXY DEBUG: schema_actors.cpp:750: DescribeTopicImpl [1:7519894328326492793:2550]: Got location Got response: 2025-06-25T14:30:36.786282Z node 2 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72075186224037900][rt3.dc1--topic-x] pipe [1:7519894328326492795:2551] connected; active server actors: 1 2025-06-25T14:30:36.786642Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037893, partitionId 1, NodeId 1, Generation 2 2025-06-25T14:30:36.787526Z node 2 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037900][rt3.dc1--topic-x] pipe [1:7519894328326492795:2551] disconnected; active server actors: 1 2025-06-25T14:30:36.787557Z node 2 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1688: [72075186224037900][rt3.dc1--topic-x] pipe [1:7519894328326492795:2551] disconnected no session operation { ready: true status: SUCCESS result { [type.googleapis.com/Ydb.Topic.DescribePartitionResult] { partition { partition_id: 1 active: true partition_location { node_id: 1 generation: 2 } } } } } 2025-06-25T14:30:36.793239Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:166: new Describe partition request 2025-06-25T14:30:36.793341Z node 1 :PQ_READ_PROXY DEBUG: schema_actors.cpp:1209: TDescribePartitionActor for request path: "/Root/PQ//rt3.dc1--topic-x" partition_id: 3 include_stats: true include_location: true 2025-06-25T14:30:36.793375Z node 1 :PQ_READ_PROXY DEBUG: schema_actors.cpp:1219: TDescribePartitionActor[1:7519894328326492796:2552]: Bootstrap 2025-06-25T14:30:36.793898Z node 1 :PQ_READ_PROXY DEBUG: schema_actors.cpp:657: DescribeTopicImpl [1:7519894328326492796:2552]: Request location 2025-06-25T14:30:36.794460Z node 2 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72075186224037900][rt3.dc1--topic-x] pipe [1:7519894328326492799:2554] connected; active server actors: 1 2025-06-25T14:30:36.794728Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037892, partitionId 3, NodeId 2, Generation 2 2025-06-25T14:30:36.795684Z node 2 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037900][rt3.dc1--topic-x] pipe [1:7519894328326492799:2554] disconnected; active server actors: 1 Got response: operation { ready: true status: SUCCESS result { [type.googleapis.com/Ydb.Topic.DescribePartitionResult] { partition { partition_id: 3 active: true partition_stats { partition_offsets { } last_write_time { seconds: 1750861836 nanos: 105000000 } max_write_time_lag { } bytes_written { } partition_node_id: 2 } partition_location { node_id: 2 generation: 2 } } } } } 2025-06-25T14:30:36.795717Z node 2 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1688: [72075186224037900][rt3.dc1--topic-x] pipe [1:7519894328326492799:2554] disconnected no session 2025-06-25T14:30:36.795354Z node 1 :PQ_READ_PROXY DEBUG: schema_actors.cpp:750: DescribeTopicImpl [1:7519894328326492796:2552]: Got location Got response: operation { ready: true status: SCHEME_ERROR issues { message: "path \'Root/PQ/bad-topic\' does not exist or you do not have access rights" issue_code: 500018 severity: 1 } } 2025-06-25T14:30:36.803377Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:166: new Describe partition request 2025-06-25T14:30:36.803537Z node 1 :PQ_READ_PROXY DEBUG: schema_actors.cpp:1209: TDescribePartitionActor for request path: "/Root/PQ//bad-topic" include_stats: true include_location: true 2025-06-25T14:30:36.803603Z node 1 :PQ_READ_PROXY DEBUG: schema_actors.cpp:1219: TDescribePartitionActor[1:7519894328326492801:2555]: Bootstrap >> TCmsTest::Mirror3dcPermissions [GOOD] >> TContinuousBackupTests::Basic >> TSchemeshardStatsBatchingTest::TopicAccountSizeAndUsedReserveSize [GOOD] >> UpsertLoad::ShouldWriteKqpUpsertKeyFrom >> TContinuousBackupTests::TakeIncrementalBackup >> TTxDataShardMiniKQL::MemoryUsageImmediateSmallTx [GOOD] >> TTxDataShardMiniKQL::MemoryUsageImmediateMediumTx |77.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/cms/ut/unittest >> TCmsTest::Mirror3dcPermissions [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_stats/unittest >> TSchemeshardStatsBatchingTest::TopicAccountSizeAndUsedReserveSize [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:30:38.767880Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:30:38.768002Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:30:38.768035Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:30:38.768068Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:30:38.768099Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:30:38.768120Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:30:38.768153Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:30:38.768212Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:30:38.768930Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:30:38.769294Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:30:38.838326Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:30:38.838391Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:30:38.855924Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:30:38.856275Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:30:38.858845Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:30:38.866607Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:30:38.866906Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:30:38.867532Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:30:38.867887Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:30:38.870868Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:30:38.871054Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:30:38.872273Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:30:38.872370Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:30:38.872614Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:30:38.872652Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:30:38.872681Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:30:38.872791Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:30:38.890835Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:242:2058] recipient: [1:15:2062] 2025-06-25T14:30:39.048608Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:30:39.048858Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:39.049115Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:30:39.049166Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:30:39.049393Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:30:39.049461Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:30:39.054380Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:30:39.054558Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:30:39.054773Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:39.054837Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:30:39.054875Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:30:39.054906Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:30:39.058117Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:39.058183Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:30:39.058229Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:30:39.060172Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:39.060224Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:39.060285Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:30:39.060385Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:30:39.065315Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:30:39.067409Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:30:39.067605Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:30:39.068515Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:30:39.068624Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:30:39.068659Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:30:39.068874Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:30:39.068910Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:30:39.069041Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:30:39.069104Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:30:39.070690Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:30:39.070723Z node 1 :FLAT_TX_SCHEMESHARD ... SQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:1040: [72075186233409551][Topic3] Discovered subdomain [OwnerId: 72057594046678944, LocalPathId: 1] state, outOfSpace = 0 at RB 72075186233409551 2025-06-25T14:30:39.818342Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:1040: [72075186233409547][Topic1] Discovered subdomain [OwnerId: 72057594046678944, LocalPathId: 1] state, outOfSpace = 0 at RB 72075186233409547 2025-06-25T14:30:39.819856Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-25T14:30:39.823608Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2025-06-25T14:30:39.823681Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-25T14:30:39.823832Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-25T14:30:39.824459Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2025-06-25T14:30:39.824514Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-25T14:30:39.825995Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:3 2025-06-25T14:30:39.826125Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:4 2025-06-25T14:30:39.826322Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877763, Sender [1:1036:2895], Recipient [1:288:2273]: NKikimr::TEvTabletPipe::TEvClientDestroyed { TabletId: 72057594037968897 ClientId: [1:1036:2895] ServerId: [1:1038:2897] } 2025-06-25T14:30:39.826361Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5051: StateWork, processing event TEvTabletPipe::TEvClientDestroyed 2025-06-25T14:30:39.826394Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5847: Client pipe, to tablet: 72057594037968897, from:72057594046678944 is reset TestModificationResult got TxId: 104, wait until txId: 104 TestWaitNotification wait txId: 104 2025-06-25T14:30:39.826733Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 104: send EvNotifyTxCompletion 2025-06-25T14:30:39.826786Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 104 2025-06-25T14:30:39.827271Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877761, Sender [1:1052:2911], Recipient [1:288:2273]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:30:39.827318Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5052: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T14:30:39.827354Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5837: Pipe server connected, at tablet: 72057594046678944 2025-06-25T14:30:39.827521Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124996, Sender [1:553:2485], Recipient [1:288:2273]: NKikimrScheme.TEvNotifyTxCompletion TxId: 104 2025-06-25T14:30:39.827559Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvSchemeShard::TEvNotifyTxCompletion 2025-06-25T14:30:39.827623Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 104, at schemeshard: 72057594046678944 2025-06-25T14:30:39.827722Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 104: got EvNotifyTxCompletionResult 2025-06-25T14:30:39.827757Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 104: satisfy waiter [1:1050:2909] 2025-06-25T14:30:39.827913Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877764, Sender [1:1052:2911], Recipient [1:288:2273]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-25T14:30:39.827949Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5053: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-25T14:30:39.827989Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5885: Server pipe is reset, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 104 2025-06-25T14:30:39.828633Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271122945, Sender [1:1053:2912], Recipient [1:288:2273]: NKikimrSchemeOp.TDescribePath Path: "/MyRoot/Topic1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false } 2025-06-25T14:30:39.828700Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4967: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2025-06-25T14:30:39.828790Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Topic1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:30:39.829023Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Topic1" took 216us result status StatusSuccess 2025-06-25T14:30:39.829480Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Topic1" PathDescription { Self { Name: "Topic1" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 1 } ChildrenExist: false BalancerTabletID: 72075186233409547 } PersQueueGroup { Name: "Topic1" PathId: 2 TotalGroupCount: 1 PartitionPerTablet: 1 PQTabletConfig { PartitionConfig { LifetimeSeconds: 13 WriteSpeedInBytesPerSecond: 19 } YdbDatabasePath: "/MyRoot" MeteringMode: METERING_MODE_RESERVED_CAPACITY } Partitions { PartitionId: 0 TabletId: 72075186233409546 Status: Active } AlterVersion: 1 BalancerTabletID: 72075186233409547 NextPartitionId: 1 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 808 AccountSize: 808 DataSize: 31 UsedReserveSize: 31 } } PQPartitionsInside: 4 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:30:39.830323Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271188001, Sender [1:1054:2913], Recipient [1:288:2273]: NKikimrPQ.TEvPeriodicTopicStats PathId: 4 Generation: 1 Round: 6 DataSize: 151 UsedReserveSize: 151 2025-06-25T14:30:39.830371Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4993: StateWork, processing event TEvPersQueue::TEvPeriodicTopicStats 2025-06-25T14:30:39.830405Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__pq_stats.cpp:100: Got periodic topic stats at partition [OwnerId: 72057594046678944, LocalPathId: 4] DataSize 151 UsedReserveSize 151 2025-06-25T14:30:39.830440Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__pq_stats.cpp:128: Will execute TTxStoreStats, queue# 1 2025-06-25T14:30:39.830908Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271122945, Sender [1:1055:2914], Recipient [1:288:2273]: NKikimrSchemeOp.TDescribePath Path: "/MyRoot/Topic1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false } 2025-06-25T14:30:39.830953Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4967: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2025-06-25T14:30:39.831068Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Topic1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:30:39.833338Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Topic1" took 223us result status StatusSuccess 2025-06-25T14:30:39.833841Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Topic1" PathDescription { Self { Name: "Topic1" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 1 } ChildrenExist: false BalancerTabletID: 72075186233409547 } PersQueueGroup { Name: "Topic1" PathId: 2 TotalGroupCount: 1 PartitionPerTablet: 1 PQTabletConfig { PartitionConfig { LifetimeSeconds: 13 WriteSpeedInBytesPerSecond: 19 } YdbDatabasePath: "/MyRoot" MeteringMode: METERING_MODE_RESERVED_CAPACITY } Partitions { PartitionId: 0 TabletId: 72075186233409546 Status: Active } AlterVersion: 1 BalancerTabletID: 72075186233409547 NextPartitionId: 1 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 808 AccountSize: 808 DataSize: 182 UsedReserveSize: 182 } } PQPartitionsInside: 4 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TopicAutoscaling::Simple_BeforeAutoscaleAwareSDK >> CommitOffset::PartitionSplit_OffsetCommit >> TSchemeShardTest::AlterIndexTableDirectly [GOOD] >> CommitOffset::Commit_WithoutSession_TopPast >> TCmsTenatsTest::TestClusterRatioLimitForceRestartMode [GOOD] >> TCmsTenatsTest::TestClusterLimitForceRestartModeScheduled >> TMaintenanceApiTest::ActionReason [GOOD] >> TCmsTenatsTest::TestClusterLimitForceRestartMode [GOOD] >> TTxDataShardMiniKQL::MemoryUsageImmediateMediumTx [GOOD] >> TTxDataShardMiniKQL::MemoryUsageMultiShard >> TSchemeshardBackgroundCompactionTest::SchemeshardShouldHandleCompactionTimeouts [GOOD] >> KqpScanArrowFormat::AggregateCountStar >> TCmsTest::TestProcessingQueue [GOOD] >> TCmsTest::TestLogOperationsRollback |77.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/cms/ut/unittest >> TMaintenanceApiTest::ActionReason [GOOD] |77.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/cms/ut/unittest >> TCmsTenatsTest::TestClusterLimitForceRestartMode [GOOD] >> KqpScanArrowInChanels::AllTypesColumns >> TContinuousBackupTests::Basic [GOOD] >> TContinuousBackupTests::TakeIncrementalBackup [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_base/unittest >> TSchemeShardTest::AlterIndexTableDirectly [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:29:32.302653Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:29:32.302787Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:29:32.302839Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:29:32.302879Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:29:32.302926Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:29:32.302969Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:29:32.303043Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:29:32.303158Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:29:32.303928Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:29:32.304385Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:29:32.514668Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:29:32.514731Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:29:32.534055Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:29:32.534556Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:29:32.534754Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:29:32.542619Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:29:32.543098Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:29:32.543891Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:29:32.544246Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:29:32.549308Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:29:32.549536Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:29:32.551021Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:29:32.551094Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:29:32.551282Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:29:32.551345Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:29:32.551396Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:29:32.551505Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:29:32.559706Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:29:32.720125Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:29:32.720428Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:32.720654Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:29:32.720708Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:29:32.720951Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:29:32.721027Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:29:32.723319Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:29:32.723519Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:29:32.723740Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:32.723803Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:29:32.723856Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:29:32.723897Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:29:32.725820Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:32.725914Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:29:32.725962Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:29:32.727753Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:32.727801Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:32.727881Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:29:32.727937Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:29:32.731489Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:29:32.733475Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:29:32.733641Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:29:32.734587Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:29:32.734724Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:29:32.734768Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:29:32.735079Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:29:32.735127Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:29:32.735302Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:29:32.735377Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:29:32.737676Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:29:32.737722Z node 1 :FLAT_TX_SCHEMESHARD ... 0 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 4 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:30:40.353802Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/table/indexByValue" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-25T14:30:40.354358Z node 16 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/table/indexByValue" took 603us result status StatusSuccess 2025-06-25T14:30:40.355688Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/table/indexByValue" PathDescription { Self { Name: "indexByValue" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeTableIndex CreateFinished: true CreateTxId: 281474976710758 CreateStep: 5000004 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableIndexVersion: 3 } ChildrenExist: true } Children { Name: "indexImplTable" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710758 CreateStep: 5000004 ParentPathId: 3 PathState: EPathStateAlter Owner: "root@builtin" ACL: "" PathSubType: EPathSubTypeSyncIndexImplTable Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 3 TablePartitionVersion: 2 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 1592 DataSize: 1592 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } StoragePoolsUsage { PoolKind: "pool-kind-1" TotalSize: 0 DataSize: 0 IndexSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } TableIndex { Name: "indexByValue" LocalPathId: 3 Type: EIndexTypeGlobal State: EIndexStateReady KeyColumnNames: "value" SchemaVersion: 3 PathOwnerId: 72057594046678944 DataSize: 0 IndexImplTableDescriptions { Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "value" KeyColumnNames: "key" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 KeepEraseMarkers: false MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 100500 MinPartitionsCount: 1 FastSplitSettings { SizeThreshold: 100500 RowCountThreshold: 100500 } } } } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:30:40.366312Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/table/indexByValue/indexImplTable" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-25T14:30:40.366825Z node 16 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/table/indexByValue/indexImplTable" took 548us result status StatusSuccess 2025-06-25T14:30:40.368179Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/table/indexByValue/indexImplTable" PathDescription { Self { Name: "indexImplTable" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710758 CreateStep: 5000004 ParentPathId: 3 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 8 PathSubType: EPathSubTypeSyncIndexImplTable Version { GeneralVersion: 8 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 3 TablePartitionVersion: 4 } ChildrenExist: false } Table { Name: "indexImplTable" Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "value" KeyColumnNames: "key" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 KeepEraseMarkers: false MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 100500 MinPartitionsCount: 1 FastSplitSettings { SizeThreshold: 100500 RowCountThreshold: 100500 } } } TableSchemaVersion: 3 IsBackup: false IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409552 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 3 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 1592 DataSize: 1592 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } StoragePoolsUsage { PoolKind: "pool-kind-1" TotalSize: 0 DataSize: 0 IndexSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 4 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_compaction/unittest >> TSchemeshardBackgroundCompactionTest::SchemeshardShouldHandleCompactionTimeouts [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:28:44.595659Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:28:44.595737Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:28:44.595787Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:28:44.595836Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:28:44.595891Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:28:44.595922Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:28:44.595974Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:28:44.596045Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:28:44.596931Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:28:44.597301Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:28:44.677720Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:28:44.677777Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:28:44.696135Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:28:44.696576Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:28:44.696730Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:28:44.702888Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:28:44.703156Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:28:44.703612Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:28:44.703833Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:28:44.707028Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:28:44.707195Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:28:44.708464Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:28:44.708527Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:28:44.708677Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:28:44.708731Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:28:44.708776Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:28:44.708866Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:28:44.715577Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:28:44.846784Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:28:44.847008Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:44.847229Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:28:44.847276Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:28:44.847497Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:28:44.847571Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:28:44.850006Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:28:44.850179Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:28:44.850358Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:44.850426Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:28:44.850462Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:28:44.850493Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:28:44.852218Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:44.852264Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:28:44.852304Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:28:44.854030Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:44.854079Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:44.854124Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:28:44.854183Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:28:44.857676Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:28:44.859571Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:28:44.859761Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:28:44.860705Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:28:44.860850Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:28:44.860905Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:28:44.861253Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:28:44.861314Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:28:44.861486Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:28:44.861566Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:28:44.863693Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:28:44.863740Z node 1 :FLAT_TX_SCHEMESHARD ... [0:0:0], Recipient [3:314:2299]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvCleanupTransaction 2025-06-25T14:30:38.521553Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3158: StateWork, processing event TEvPrivate::TEvCleanupTransaction 2025-06-25T14:30:38.521649Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:214: No cleanup at 72075186233409546 outdated step 5000002 last cleanup 0 2025-06-25T14:30:38.521711Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186233409546 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:30:38.521745Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186233409546 2025-06-25T14:30:38.521778Z node 3 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186233409546 has no attached operations 2025-06-25T14:30:38.521809Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186233409546 2025-06-25T14:30:38.521937Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435079, Sender [0:0:0], Recipient [3:314:2299]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvPeriodicWakeup 2025-06-25T14:30:38.522157Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3441: TEvPeriodicTableStats from datashard 72075186233409546, FollowerId 0, tableId 2 2025-06-25T14:30:38.523608Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269553162, Sender [3:314:2299], Recipient [3:129:2153]: NKikimrTxDataShard.TEvPeriodicTableStats DatashardId: 72075186233409546 TableLocalId: 2 Generation: 2 Round: 7 TableStats { DataSize: 13940 RowCount: 100 IndexSize: 102 InMemSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 SearchHeight: 1 LastFullCompactionTs: 80 HasLoanedParts: false Channels { Channel: 1 DataSize: 13940 IndexSize: 102 } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 141 Memory: 124232 Storage: 14156 GroupWriteThroughput { GroupID: 0 Channel: 0 Throughput: 261 } GroupWriteThroughput { GroupID: 0 Channel: 1 Throughput: 444 } GroupWriteIops { GroupID: 0 Channel: 0 Iops: 1 } } ShardState: 2 UserTablePartOwners: 72075186233409546 NodeId: 3 StartTime: 41 TableOwnerId: 72057594046678944 FollowerId: 2025-06-25T14:30:38.523663Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4992: StateWork, processing event TEvDataShard::TEvPeriodicTableStats 2025-06-25T14:30:38.523705Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046678944 from shard 72075186233409546 followerId 0 pathId [OwnerId: 72057594046678944, LocalPathId: 2] state 'Ready' dataSize 13940 rowCount 100 cpuUsage 0.0141 2025-06-25T14:30:38.523806Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:570: Got periodic table stats at tablet 72057594046678944 from shard 72075186233409546 followerId 0 pathId [OwnerId: 72057594046678944, LocalPathId: 2] raw table stats: DataSize: 13940 RowCount: 100 IndexSize: 102 InMemSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 SearchHeight: 1 LastFullCompactionTs: 80 HasLoanedParts: false Channels { Channel: 1 DataSize: 13940 IndexSize: 102 } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 2025-06-25T14:30:38.523853Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:610: Will delay TTxStoreTableStats on# 0.100000s, queue# 1 2025-06-25T14:30:38.524791Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435080, Sender [3:1063:3004], Recipient [3:314:2299]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvAsyncTableStats 2025-06-25T14:30:38.584648Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [3:129:2153]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-06-25T14:30:38.584714Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5131: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-06-25T14:30:38.584743Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046678944, queue size# 1 2025-06-25T14:30:38.584804Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:601: Will execute TTxStoreStats, queue# 1 2025-06-25T14:30:38.584838Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:610: Will delay TTxStoreTableStats on# 0.000000s, queue# 1 2025-06-25T14:30:38.584931Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 2 shard idx 72057594046678944:1 data size 13940 row count 100 2025-06-25T14:30:38.584991Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186233409546 maps to shardIdx: 72057594046678944:1 followerId=0, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], pathId map=Simple, is column=0, is olap=0, RowCount 100, DataSize 13940 2025-06-25T14:30:38.585023Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186233409546, followerId 0 2025-06-25T14:30:38.585095Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:219: [BackgroundCompaction] [Update] Updated shard# 72057594046678944:1 with partCount# 1, rowCount# 100, searchHeight# 1, lastFullCompaction# 1970-01-01T00:01:20.000000Z at schemeshard 72057594046678944 2025-06-25T14:30:38.585165Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:477: Do not want to split tablet 72075186233409546 by size, its table already has 1 out of 1 partitions 2025-06-25T14:30:38.585248Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-25T14:30:38.596413Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [3:129:2153]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-06-25T14:30:38.596497Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5131: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-06-25T14:30:38.596531Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046678944, queue size# 0 2025-06-25T14:30:38.913085Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:129:2153]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:30:38.913169Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:30:38.913255Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [3:129:2153], Recipient [3:129:2153]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:30:38.913288Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:30:39.308761Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:129:2153]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:30:39.308840Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:30:39.308937Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [3:129:2153], Recipient [3:129:2153]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:30:39.308969Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:30:39.725333Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:129:2153]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:30:39.725420Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:30:39.725508Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [3:129:2153], Recipient [3:129:2153]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:30:39.725538Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:30:40.110388Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:129:2153]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:30:40.110470Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:30:40.110573Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [3:129:2153], Recipient [3:129:2153]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:30:40.110607Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:30:40.516701Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:129:2153]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:30:40.516800Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:30:40.516908Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [3:129:2153], Recipient [3:129:2153]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:30:40.516946Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:30:40.556714Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435079, Sender [0:0:0], Recipient [3:314:2299]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvPeriodicWakeup 2025-06-25T14:30:40.940683Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:129:2153]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:30:40.940754Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:30:40.940830Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [3:129:2153], Recipient [3:129:2153]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:30:40.940860Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime >> TCmsTest::WalleTasksWithNodeLimit [GOOD] >> TCmsTest::WalleTasksDifferentPriorities >> KqpScanArrowFormat::SingleKey >> TraverseColumnShard::TraverseColumnTableRebootSaTabletBeforeSave [GOOD] >> KqpScanArrowInChanels::AggregateNoColumn ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_continuous_backup/unittest >> TContinuousBackupTests::Basic [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:30:40.577094Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:30:40.577184Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:30:40.577231Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:30:40.577265Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:30:40.577308Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:30:40.577344Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:30:40.577406Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:30:40.577477Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:30:40.578238Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:30:40.578598Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:30:40.664663Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:30:40.664763Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:30:40.684780Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:30:40.685240Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:30:40.685433Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:30:40.691239Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:30:40.691509Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:30:40.692287Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:30:40.692557Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:30:40.695559Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:30:40.695757Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:30:40.696873Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:30:40.696944Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:30:40.697075Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:30:40.697113Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:30:40.697160Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:30:40.697236Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:30:40.703441Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:30:40.914197Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:30:40.914512Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:40.914759Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:30:40.914805Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:30:40.915057Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:30:40.915136Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:30:40.929209Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:30:40.929466Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:30:40.929683Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:40.929756Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:30:40.929807Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:30:40.929859Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:30:40.990499Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:40.990591Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:30:40.990635Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:30:40.993454Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:40.993530Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:40.993594Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:30:40.993664Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:30:41.036953Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:30:41.045296Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:30:41.045563Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:30:41.046706Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:30:41.046872Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:30:41.046927Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:30:41.047270Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:30:41.047346Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:30:41.047566Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:30:41.047662Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:30:41.058425Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:30:41.058497Z node 1 :FLAT_TX_SCHEMESHARD ... 72057594046678944, cookie: 104 2025-06-25T14:30:42.246903Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6369: Handle TEvProposeTransactionResult, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 104 Step: 5000005 OrderId: 104 ExecLatency: 0 ProposeLatency: 5 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 1006 } } CommitVersion { Step: 5000005 TxId: 104 } 2025-06-25T14:30:42.246978Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1791: TOperation FindRelatedPartByTabletId, TxId: 104, tablet: 72075186233409546, partId: 0 2025-06-25T14:30:42.247137Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:632: TTxOperationReply execute, operationId: 104:0, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 104 Step: 5000005 OrderId: 104 ExecLatency: 0 ProposeLatency: 5 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 1006 } } CommitVersion { Step: 5000005 TxId: 104 } 2025-06-25T14:30:42.247252Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_part.cpp:109: HandleReply TEvDataShard::TEvProposeTransactionResult Ignore message: tablet# 72057594046678944, ev# TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 104 Step: 5000005 OrderId: 104 ExecLatency: 0 ProposeLatency: 5 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 1006 } } CommitVersion { Step: 5000005 TxId: 104 } FAKE_COORDINATOR: Erasing txId 104 2025-06-25T14:30:42.248449Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5596: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 313 RawX2: 4294969594 } Origin: 72075186233409546 State: 2 TxId: 104 Step: 0 Generation: 2 2025-06-25T14:30:42.248500Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1791: TOperation FindRelatedPartByTabletId, TxId: 104, tablet: 72075186233409546, partId: 0 2025-06-25T14:30:42.248625Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:632: TTxOperationReply execute, operationId: 104:0, at schemeshard: 72057594046678944, message: Source { RawX1: 313 RawX2: 4294969594 } Origin: 72075186233409546 State: 2 TxId: 104 Step: 0 Generation: 2 2025-06-25T14:30:42.248676Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1047: NTableState::TProposedWaitParts operationId# 104:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 2025-06-25T14:30:42.248783Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1051: NTableState::TProposedWaitParts operationId# 104:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 313 RawX2: 4294969594 } Origin: 72075186233409546 State: 2 TxId: 104 Step: 0 Generation: 2 2025-06-25T14:30:42.248845Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:670: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 104:0, shardIdx: 72057594046678944:1, shard: 72075186233409546, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-25T14:30:42.248878Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:707: all shard schema changes has been received, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-25T14:30:42.248912Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:719: send schema changes ack message, operation: 104:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-06-25T14:30:42.248973Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 104:0 129 -> 240 2025-06-25T14:30:42.254561Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-25T14:30:42.255024Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-25T14:30:42.255353Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-25T14:30:42.255396Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 104:0 ProgressState 2025-06-25T14:30:42.255493Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#104:0 progress is 3/3 2025-06-25T14:30:42.255531Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 104 ready parts: 3/3 2025-06-25T14:30:42.255568Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#104:0 progress is 3/3 2025-06-25T14:30:42.255597Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 104 ready parts: 3/3 2025-06-25T14:30:42.255630Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 104, ready parts: 3/3, is published: true 2025-06-25T14:30:42.255700Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1656: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:340:2317] message: TxId: 104 2025-06-25T14:30:42.255760Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 104 ready parts: 3/3 2025-06-25T14:30:42.255805Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 104:0 2025-06-25T14:30:42.255834Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 104:0 2025-06-25T14:30:42.255970Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-25T14:30:42.256019Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 104:1 2025-06-25T14:30:42.256041Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 104:1 2025-06-25T14:30:42.256070Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-25T14:30:42.256090Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 104:2 2025-06-25T14:30:42.256117Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 104:2 2025-06-25T14:30:42.256179Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 1 2025-06-25T14:30:42.256695Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-25T14:30:42.256749Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 4], at schemeshard: 72057594046678944 2025-06-25T14:30:42.256821Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-25T14:30:42.256859Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-06-25T14:30:42.256907Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-25T14:30:42.268734Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 104: got EvNotifyTxCompletionResult 2025-06-25T14:30:42.268819Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 104: satisfy waiter [1:729:2641] 2025-06-25T14:30:42.269571Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 2 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 104 2025-06-25T14:30:42.270253Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/continuousBackupImpl" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-25T14:30:42.270548Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/continuousBackupImpl" took 253us result status StatusPathDoesNotExist 2025-06-25T14:30:42.270707Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/Table/continuousBackupImpl\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot/Table\' (id: [OwnerId: 72057594046678944, LocalPathId: 2])" Path: "/MyRoot/Table/continuousBackupImpl" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot/Table" LastExistedPrefixPathId: 2 LastExistedPrefixDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-25T14:30:42.271216Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/continuousBackupImpl/streamImpl" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-25T14:30:42.271413Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/continuousBackupImpl/streamImpl" took 205us result status StatusPathDoesNotExist 2025-06-25T14:30:42.271609Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/Table/continuousBackupImpl/streamImpl\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot/Table\' (id: [OwnerId: 72057594046678944, LocalPathId: 2])" Path: "/MyRoot/Table/continuousBackupImpl/streamImpl" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot/Table" LastExistedPrefixPathId: 2 LastExistedPrefixDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 >> TraverseColumnShard::TraverseColumnTableRebootSaTabletInAggregate [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_continuous_backup/unittest >> TContinuousBackupTests::TakeIncrementalBackup [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:30:40.695409Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:30:40.695508Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:30:40.695554Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:30:40.695587Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:30:40.695632Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:30:40.695660Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:30:40.695716Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:30:40.695778Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:30:40.696607Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:30:40.696951Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:30:40.833826Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:30:40.833912Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:30:40.872702Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:30:40.873339Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:30:40.873543Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:30:40.917550Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:30:40.917999Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:30:40.918779Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:30:40.919146Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:30:40.941210Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:30:40.941453Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:30:40.942883Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:30:40.942976Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:30:40.943139Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:30:40.943199Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:30:40.943249Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:30:40.943346Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:30:40.957811Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:30:41.367353Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:30:41.367651Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:41.367915Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:30:41.367976Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:30:41.368303Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:30:41.372563Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:30:41.385814Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:30:41.386078Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:30:41.386343Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:41.386421Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:30:41.386491Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:30:41.386557Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:30:41.393473Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:41.393584Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:30:41.393646Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:30:41.401468Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:41.401555Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:41.401639Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:30:41.401725Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:30:41.414340Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:30:41.433339Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:30:41.433620Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:30:41.435058Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:30:41.435249Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:30:41.435323Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:30:41.435697Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:30:41.435777Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:30:41.435994Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:30:41.436099Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:30:41.445719Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:30:41.445791Z node 1 :FLAT_TX_SCHEMESHARD ... alPathId: 3] was 3 2025-06-25T14:30:42.658325Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 103:1 2025-06-25T14:30:42.658372Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 103:1 2025-06-25T14:30:42.658478Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-25T14:30:42.658513Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 103:2 2025-06-25T14:30:42.658534Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 103:2 2025-06-25T14:30:42.658582Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 3 2025-06-25T14:30:42.658606Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 103:3 2025-06-25T14:30:42.658626Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 103:3 2025-06-25T14:30:42.658681Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 4 2025-06-25T14:30:42.665328Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-06-25T14:30:42.665415Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [1:724:2626] TestWaitNotification: OK eventTxId 103 2025-06-25T14:30:42.666099Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/IncrBackupImpl" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-25T14:30:42.666396Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/IncrBackupImpl" took 322us result status StatusSuccess 2025-06-25T14:30:42.666952Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/IncrBackupImpl" PathDescription { Self { Name: "IncrBackupImpl" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 103 CreateStep: 5000004 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "IncrBackupImpl" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Uint64" TypeId: 4 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "__ydb_incrBackupImpl_deleted" Type: "Bool" TypeId: 6 Id: 3 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 1 IsBackup: false ReplicationConfig { Mode: REPLICATION_MODE_READ_ONLY ConsistencyLevel: CONSISTENCY_LEVEL_ROW } IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 1 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } UserAttributes { Key: "__incremental_backup" Value: "{}" } UserAttributes { Key: "__async_replica" Value: "true" } } PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:30:42.667578Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/continuousBackupImpl/streamImpl" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-25T14:30:42.667838Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/continuousBackupImpl/streamImpl" took 226us result status StatusSuccess 2025-06-25T14:30:42.668401Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table/continuousBackupImpl/streamImpl" PathDescription { Self { Name: "streamImpl" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 102 CreateStep: 5000003 ParentPathId: 3 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeStreamImpl Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 2 } ChildrenExist: false BalancerTabletID: 72075186233409548 } PersQueueGroup { Name: "streamImpl" PathId: 4 TotalGroupCount: 1 PartitionPerTablet: 2 PQTabletConfig { PartitionConfig { MaxCountInPartition: 2147483647 LifetimeSeconds: 86400 WriteSpeedInBytesPerSecond: 1048576 BurstSize: 1048576 } TopicName: "continuousBackupImpl" TopicPath: "/MyRoot/Table/continuousBackupImpl/streamImpl" YdbDatabasePath: "/MyRoot" PartitionKeySchema { Name: "key" TypeId: 4 } MeteringMode: METERING_MODE_REQUEST_UNITS OffloadConfig { IncrementalBackup { DstPath: "/MyRoot/IncrBackupImpl" DstPathId { OwnerId: 72057594046678944 LocalId: 5 } } } } Partitions { PartitionId: 0 TabletId: 72075186233409547 Status: Active } AlterVersion: 2 BalancerTabletID: 72075186233409548 NextPartitionId: 1 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 1 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 4 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:30:42.669467Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/IncrBackupImpl" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-25T14:30:42.669707Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/IncrBackupImpl" took 259us result status StatusSuccess 2025-06-25T14:30:42.670239Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/IncrBackupImpl" PathDescription { Self { Name: "IncrBackupImpl" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 103 CreateStep: 5000004 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "IncrBackupImpl" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Uint64" TypeId: 4 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "__ydb_incrBackupImpl_deleted" Type: "Bool" TypeId: 6 Id: 3 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 1 IsBackup: false ReplicationConfig { Mode: REPLICATION_MODE_READ_ONLY ConsistencyLevel: CONSISTENCY_LEVEL_ROW } IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 1 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } UserAttributes { Key: "__incremental_backup" Value: "{}" } UserAttributes { Key: "__async_replica" Value: "true" } } PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> KqpRe2::IncorrectRegexWithoutExecutionNoError [GOOD] >> TStoragePoolsStatsPersistence::SameAggregatedStatsAfterRestart [GOOD] >> TCmsTenatsTest::TestClusterLimitForceRestartModeScheduled [GOOD] >> TCmsTenatsTest::TestClusterRatioLimitForceRestartModeScheduled |77.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_login/ydb-core-tx-schemeshard-ut_login |77.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/fq/libs/ydb/ut/ydb-core-fq-libs-ydb-ut |77.4%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_login/ydb-core-tx-schemeshard-ut_login |77.4%| [LD] {RESULT} $(B)/ydb/core/fq/libs/ydb/ut/ydb-core-fq-libs-ydb-ut |77.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_login/ydb-core-tx-schemeshard-ut_login |77.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/fq/libs/ydb/ut/ydb-core-fq-libs-ydb-ut |77.4%| [TA] $(B)/ydb/core/tx/schemeshard/ut_continuous_backup/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpScanArrowFormat::AllTypesColumns |77.4%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_continuous_backup/test-results/unittest/{meta.json ... results_accumulator.log} >> Balancing::Balancing_OneTopic_TopicApi ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_stats/unittest >> TStoragePoolsStatsPersistence::SameAggregatedStatsAfterRestart [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:30:38.489473Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:30:38.489581Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:30:38.489617Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:30:38.489656Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:30:38.489706Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:30:38.489735Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:30:38.489780Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:30:38.489847Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:30:38.490625Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:30:38.490995Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:30:38.559492Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:30:38.559576Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:30:38.585314Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:30:38.585804Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:30:38.586027Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:30:38.592223Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:30:38.592644Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:30:38.593111Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:30:38.593339Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:30:38.596390Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:30:38.596572Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:30:38.597795Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:30:38.597852Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:30:38.597975Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:30:38.598032Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:30:38.598068Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:30:38.598159Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:30:38.604044Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:30:38.723756Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:30:38.724056Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:38.724288Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:30:38.724375Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:30:38.724627Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:30:38.724703Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:30:38.733764Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:30:38.734020Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:30:38.734257Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:38.734310Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:30:38.734345Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:30:38.734401Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:30:38.738482Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:38.738551Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:30:38.738594Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:30:38.740895Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:38.740956Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:38.741019Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:30:38.741083Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:30:38.758621Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:30:38.764992Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:30:38.765172Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:30:38.766237Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:30:38.766391Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:30:38.766436Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:30:38.766729Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:30:38.766778Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:30:38.766943Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:30:38.767019Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:30:38.773900Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:30:38.773968Z node 1 :FLAT_TX_SCHEMESHARD D ... shard: 72057594046678944 2025-06-25T14:30:43.689312Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1795: TTxInit for Tables, read records: 1, at schemeshard: 72057594046678944 2025-06-25T14:30:43.689492Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 0 2025-06-25T14:30:43.689589Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__root_data_erasure_manager.cpp:452: [RootDataErasureManager] Restore: Generation# 0, Status# 0, WakeupInterval# 604800 s, NumberDataErasureTenantsInRunning# 0 2025-06-25T14:30:43.689816Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2043: TTxInit for Columns, read records: 2, at schemeshard: 72057594046678944 2025-06-25T14:30:43.689978Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2103: TTxInit for ColumnsAlters, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:30:43.690081Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2161: TTxInit for Shards, read records: 1, at schemeshard: 72057594046678944 2025-06-25T14:30:43.690116Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-25T14:30:43.690224Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2247: TTxInit for TablePartitions, read records: 1, at schemeshard: 72057594046678944 2025-06-25T14:30:43.690401Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2313: TTxInit for TableShardPartitionConfigs, read records: 1, at schemeshard: 72057594046678944 2025-06-25T14:30:43.690703Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2463: TTxInit for ChannelsBinding, read records: 3, at schemeshard: 72057594046678944 2025-06-25T14:30:43.691011Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2842: TTxInit for TableIndexes, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:30:43.691133Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2921: TTxInit for TableIndexKeys, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:30:43.691474Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3422: TTxInit for KesusInfos, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:30:43.691539Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3458: TTxInit for KesusAlters, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:30:43.691748Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3684: TTxInit for TxShards, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:30:43.691860Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3829: TTxInit for ShardToDelete, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:30:43.691950Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3846: TTxInit for BackupSettings, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:30:43.692130Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4006: TTxInit for ShardBackupStatus, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:30:43.692216Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4022: TTxInit for CompletedBackup, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:30:43.692380Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4307: TTxInit for Publications, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:30:43.692614Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4646: IndexBuild , records: 0, at schemeshard: 72057594046678944 2025-06-25T14:30:43.692694Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4702: KMeansTreeSample records: 0, at schemeshard: 72057594046678944 2025-06-25T14:30:43.692865Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4791: SnapshotTables: snapshots: 0 tables: 0, at schemeshard: 72057594046678944 2025-06-25T14:30:43.692922Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4818: SnapshotSteps: snapshots: 0, at schemeshard: 72057594046678944 2025-06-25T14:30:43.692971Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4845: LongLocks: records: 0, at schemeshard: 72057594046678944 2025-06-25T14:30:43.708924Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:30:43.717795Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:30:43.717883Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:30:43.724683Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:30:43.724778Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:30:43.724863Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:30:43.733419Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594046678944 is [1:759:2710] sender: [1:813:2058] recipient: [1:15:2062] 2025-06-25T14:30:43.791039Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/SomeTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:30:43.791301Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/SomeTable" took 318us result status StatusSuccess 2025-06-25T14:30:43.791706Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/SomeTable" PathDescription { Self { Name: "SomeTable" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "SomeTable" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 Family: 1 FamilyName: "alternative" NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 4140 RowCount: 100 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { PoolsUsage { PoolKind: "pool-kind-1" DataSize: 1020 IndexSize: 0 } PoolsUsage { PoolKind: "pool-kind-2" DataSize: 3120 IndexSize: 0 } } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 82488 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 1 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 4140 DataSize: 4140 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } StoragePoolsUsage { PoolKind: "pool-kind-1" TotalSize: 1020 DataSize: 1020 IndexSize: 0 } StoragePoolsUsage { PoolKind: "pool-kind-2" TotalSize: 3120 DataSize: 3120 IndexSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:30:43.794036Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:30:43.794238Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 243us result status StatusSuccess 2025-06-25T14:30:43.794687Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "SomeTable" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 1 PathsLimit: 10000 ShardsInside: 1 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 4140 DataSize: 4140 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } StoragePoolsUsage { PoolKind: "pool-kind-1" TotalSize: 1020 DataSize: 1020 IndexSize: 0 } StoragePoolsUsage { PoolKind: "pool-kind-2" TotalSize: 3120 DataSize: 3120 IndexSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest >> TraverseColumnShard::TraverseColumnTableRebootSaTabletBeforeSave [GOOD] Test command err: 2025-06-25T14:27:46.713682Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:419:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:27:46.713996Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:27:46.714035Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0020b1/r3tmp/tmpnM46dz/pdisk_1.dat 2025-06-25T14:27:47.437661Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 3293, node 1 2025-06-25T14:27:48.292420Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:27:48.292494Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:27:48.292525Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:27:48.292897Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:27:48.295234Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:27:48.499006Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:27:48.499142Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:27:48.529911Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:29664 2025-06-25T14:27:49.427890Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-25T14:27:53.374232Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-25T14:27:53.451196Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:27:53.451315Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:27:53.510684Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T14:27:53.513075Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:27:53.742200Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:27:53.766029Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:27:53.766540Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:27:53.767073Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:27:53.767440Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:27:53.767521Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:27:53.767615Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:27:53.767702Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:27:53.767773Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:27:53.767889Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:27:53.958886Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:27:53.959005Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:27:53.978686Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:27:54.109819Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:27:54.158192Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-25T14:27:54.158329Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-25T14:27:54.188835Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-25T14:27:54.190782Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-25T14:27:54.191006Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-25T14:27:54.191084Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-25T14:27:54.191157Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-25T14:27:54.191208Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-25T14:27:54.191260Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-25T14:27:54.191311Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-25T14:27:54.191898Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-25T14:27:54.273154Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7949: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-25T14:27:54.273273Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7979: ConnectToSA(), pipe client id: [2:1788:2562], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-25T14:27:54.301818Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1801:2571] 2025-06-25T14:27:54.306956Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1831:2586] 2025-06-25T14:27:54.307684Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1831:2586], schemeshard id = 72075186224037897 2025-06-25T14:27:54.312799Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-25T14:27:54.340868Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-25T14:27:54.340947Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-25T14:27:54.341037Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-25T14:27:54.358372Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:27:54.363756Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-25T14:27:54.363875Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-25T14:27:54.649359Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-25T14:27:54.867679Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-25T14:27:54.955417Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-25T14:27:55.724953Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:27:56.049375Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2142:3018], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:27:56.049511Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:27:56.069235Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715659:0, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T14:27:56.378282Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2237:2807];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:27:56.378538Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2237:2807];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:27:56.378828Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2237:2807];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:27:56.378952Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2237:2807];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:27:56.379056Z node 2 :TX_COLUMNSHARD WARN: ... 292:6109], server id = [2:8293:6110], tablet id = 72075186224037894 2025-06-25T14:30:41.357524Z node 2 :STATISTICS DEBUG: service_impl.cpp:1219: ConnectToSA(), pipe client id = [2:8429:6185] 2025-06-25T14:30:41.357575Z node 2 :STATISTICS DEBUG: service_impl.cpp:1248: SyncNode(), pipe client id = [2:8429:6185] 2025-06-25T14:30:41.357842Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7979: ConnectToSA(), pipe client id: [2:8430:6186], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-25T14:30:41.461673Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-25T14:30:41.461795Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-25T14:30:41.462363Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-25T14:30:41.463113Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-25T14:30:41.463479Z node 2 :STATISTICS DEBUG: tx_init.cpp:55: [72075186224037894] Loaded database: /Root/Database 2025-06-25T14:30:41.463539Z node 2 :STATISTICS DEBUG: tx_init.cpp:59: [72075186224037894] Loaded traversal start key 2025-06-25T14:30:41.463582Z node 2 :STATISTICS DEBUG: tx_init.cpp:64: [72075186224037894] Loaded traversal table owner id: 72075186224037897 2025-06-25T14:30:41.463625Z node 2 :STATISTICS DEBUG: tx_init.cpp:69: [72075186224037894] Loaded traversal table local path id: 4 2025-06-25T14:30:41.463671Z node 2 :STATISTICS DEBUG: tx_init.cpp:74: [72075186224037894] Loaded traversal start time: 1750861841197341 2025-06-25T14:30:41.463710Z node 2 :STATISTICS DEBUG: tx_init.cpp:79: [72075186224037894] Loaded traversal IsColumnTable: 1 2025-06-25T14:30:41.463751Z node 2 :STATISTICS DEBUG: tx_init.cpp:84: [72075186224037894] Loaded global traversal round: 2 2025-06-25T14:30:41.463833Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 1 2025-06-25T14:30:41.463917Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-25T14:30:41.464026Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 2 2025-06-25T14:30:41.464095Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-25T14:30:41.464163Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-25T14:30:41.464237Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-25T14:30:41.464452Z node 2 :STATISTICS DEBUG: tx_init.cpp:295: [72075186224037894] TTxInit::Complete. Start navigate. PathId [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-25T14:30:41.465874Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-25T14:30:41.466895Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-25T14:30:41.466978Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-25T14:30:41.467139Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-25T14:30:41.469088Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-25T14:30:41.469159Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-25T14:30:41.469809Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:58: [72075186224037894] TTxResponseTabletDistribution::Execute. Node count = 1 2025-06-25T14:30:41.553617Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:92: [72075186224037894] TTxResponseTabletDistribution::Complete 2025-06-25T14:30:41.553900Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 3, current Round: 0 2025-06-25T14:30:41.554734Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8477:6217], server id = [2:8481:6221], tablet id = 72075186224037899, status = OK 2025-06-25T14:30:41.554847Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8477:6217], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-25T14:30:41.554976Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8478:6218], server id = [2:8482:6222], tablet id = 72075186224037900, status = OK 2025-06-25T14:30:41.555015Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8478:6218], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-25T14:30:41.555876Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8479:6219], server id = [2:8483:6223], tablet id = 72075186224037901, status = OK 2025-06-25T14:30:41.555931Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8479:6219], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-25T14:30:41.556300Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8480:6220], server id = [2:8484:6224], tablet id = 72075186224037902, status = OK 2025-06-25T14:30:41.560486Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8480:6220], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-25T14:30:41.562278Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037899 2025-06-25T14:30:41.562590Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037900 2025-06-25T14:30:41.562927Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8477:6217], server id = [2:8481:6221], tablet id = 72075186224037899 2025-06-25T14:30:41.562960Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-25T14:30:41.563195Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8478:6218], server id = [2:8482:6222], tablet id = 72075186224037900 2025-06-25T14:30:41.563215Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-25T14:30:41.563273Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037901 2025-06-25T14:30:41.563374Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037902 2025-06-25T14:30:41.563406Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-25T14:30:41.563556Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8479:6219], server id = [2:8483:6223], tablet id = 72075186224037901 2025-06-25T14:30:41.563577Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-25T14:30:41.563680Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:27: [72075186224037894] TTxAggregateStatisticsResponse::Execute 2025-06-25T14:30:41.563862Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:118: [72075186224037894] TTxAggregateStatisticsResponse::Complete 2025-06-25T14:30:41.564078Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-25T14:30:41.575697Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8480:6220], server id = [2:8484:6224], tablet id = 72075186224037902 2025-06-25T14:30:41.575747Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-25T14:30:41.584808Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-06-25T14:30:41.684375Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [2:8505:6245]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T14:30:41.684650Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-25T14:30:41.684717Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 2, ReplyToActorId = [2:8505:6245], StatRequests.size() = 1 2025-06-25T14:30:41.848829Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=NjkxMGQ3YWMtMjZhNjA1OWMtZWJhMzFiZTAtOWNmOGZlNQ==, TxId: 2025-06-25T14:30:41.848930Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=NjkxMGQ3YWMtMjZhNjA1OWMtZWJhMzFiZTAtOWNmOGZlNQ==, TxId: 2025-06-25T14:30:41.849595Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-25T14:30:41.866750Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:8520:6251] 2025-06-25T14:30:41.866902Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8429:6185], server id = [2:8520:6251], tablet id = 72075186224037894, status = OK 2025-06-25T14:30:41.867050Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:133: [72075186224037894] EvConnectNode, pipe server id = [2:8520:6251], node id = 2, have schemeshards count = 1, need schemeshards count = 0 2025-06-25T14:30:41.867229Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:8521:6252] 2025-06-25T14:30:41.867319Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:8521:6252], schemeshard id = 72075186224037897 2025-06-25T14:30:41.881201Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-25T14:30:41.881273Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-25T14:30:41.985202Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 3 ], ReplyToActorId[ [2:8524:6255]], StatType[ 2 ], StatRequestsCount[ 1 ] 2025-06-25T14:30:41.985504Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] 2025-06-25T14:30:41.985577Z node 2 :STATISTICS DEBUG: service_impl.cpp:812: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] resolve DatabasePath[ [OwnerId: 72057594046644480, LocalPathId: 2] ] 2025-06-25T14:30:41.988046Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] 2025-06-25T14:30:41.988105Z node 2 :STATISTICS DEBUG: service_impl.cpp:715: [TStatService::QueryStatistics] RequestId[ 3 ], Database[ Root/Database ], TablePath[ /Root/Database/.metadata/_statistics ] 2025-06-25T14:30:41.988156Z node 2 :STATISTICS DEBUG: service_impl.cpp:656: [TStatService::LoadStatistics] QueryId[ 1 ], PathId[ [OwnerId: 72075186224037897, LocalPathId: 4] ], StatType[ 2 ], ColumnTag[ 1 ] 2025-06-25T14:30:41.992344Z node 2 :STATISTICS DEBUG: service_impl.cpp:1152: TEvLoadStatisticsQueryResponse, request id = 3 >>> failedEstimatesCount = 0 >> TTxDataShardMiniKQL::MemoryUsageMultiShard [GOOD] |77.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kesus/tablet/ut/ydb-core-kesus-tablet-ut |77.4%| [LD] {RESULT} $(B)/ydb/core/kesus/tablet/ut/ydb-core-kesus-tablet-ut |77.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kesus/tablet/ut/ydb-core-kesus-tablet-ut ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/runtime/unittest >> KqpRe2::IncorrectRegexWithoutExecutionNoError [GOOD] Test command err: Trying to start YDB, gRPC: 14893, MsgBus: 26406 2025-06-25T14:30:29.510550Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519894295448565001:2141];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:30:29.524804Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0015a8/r3tmp/tmpMRImUg/pdisk_1.dat 2025-06-25T14:30:29.968043Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:30:29.968140Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:30:29.975266Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:30:29.981691Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:30:29.984465Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519894295448564893:2080] 1750861829476409 != 1750861829476412 TServer::EnableGrpc on GrpcPort 14893, node 1 2025-06-25T14:30:30.204881Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:30:30.204907Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:30:30.204913Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:30:30.205000Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26406 2025-06-25T14:30:30.529099Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:26406 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:30:31.018309Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:30:31.033306Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:30:31.051355Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:30:31.202832Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:30:31.380237Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:30:31.446550Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:30:33.244768Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894312628435701:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:33.244887Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:33.626438Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:33.677060Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:33.713706Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:33.739633Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:33.765630Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:33.819568Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:33.850534Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:33.936019Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894312628436358:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:33.936107Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:33.936186Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894312628436363:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:33.944806Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:30:33.956290Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519894312628436365:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:30:34.012304Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519894316923403712:3419] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:30:34.510887Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519894295448565001:2141];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:30:34.510953Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; ( (let $1 (DataType 'Bool)) (let $2 '('('"_logical_id" '235) '('"_id" '"385aad61-a3f2a9b5-ac7682f0-91d364d2") '('"_partition_mode" '"single"))) (let $3 (DqPhyStage '() (lambda '() (block '( (let $5 (String '"a[x")) (let $6 (OptionalType (StructType '('"CaseSensitive" $1) '('"DotNl" $1) '('"Literal" $1) '('"LogErrors" $1) '('"Lon ... e 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:30:36.151475Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:30:36.180905Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:30:36.180934Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:30:36.180941Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:30:36.181053Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26418 TClient is connected to server localhost:26418 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:30:36.924331Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:30:36.930568Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:30:36.943721Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:30:37.008482Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:30:37.015509Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:30:37.156036Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:30:37.244094Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:30:39.435117Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519894341229497500:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:39.435212Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:39.492232Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:39.538288Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:39.578864Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:39.620405Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:39.660020Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:39.738424Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:39.788305Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:39.906578Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519894341229498163:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:39.906661Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:39.906679Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519894341229498168:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:39.911220Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:30:39.924721Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519894341229498170:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:30:39.991364Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519894341229498221:3414] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:30:40.993825Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519894324049626708:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:30:40.993912Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; ( (let $1 (KqpTable '"/Root/KeyValue" '"72057594046644480:6" '"" '1)) (let $2 (KqpRowsSourceSettings $1 '('"Key" '"Value") '() (Void) '())) (let $3 (DataType 'Uint64)) (let $4 (DataType 'String)) (let $5 (OptionalType $4)) (let $6 (DqPhyStage '((DqSource (DataSource '"KqpReadRangesSource") $2)) (lambda '($10) (FromFlow (Filter (ToFlow $10) (lambda '($11) (block '( (let $12 (DataType 'Bool)) (let $13 (OptionalType (StructType '('"CaseSensitive" $12) '('"DotNl" $12) '('"Literal" $12) '('"LogErrors" $12) '('"LongestMatch" $12) '('"MaxMem" $3) '('"NeverCapture" $12) '('"NeverNl" $12) '('"OneLine" $12) '('"PerlClasses" $12) '('"PosixSyntax" $12) '('"Utf8" $12) '('"WordBoundary" $12)))) (let $14 (CallableType '() '($12) '($5))) (let $15 (Udf '"Re2.Grep" '((String '"[") (Nothing $13)) (VoidType) '"" $14 (TupleType $4 $13) '"" '())) (return (Or (Coalesce (== (Member $11 '"Key") (Int32 '1)) (Bool 'false)) (Apply $15 (Member $11 '"Value")))) )))))) '('('"_logical_id" '493) '('"_id" '"e38e9325-88d60f05-5a4e6bc4-f2070643")))) (let $7 (DqCnUnionAll (TDqOutput $6 '"0"))) (let $8 (DqPhyStage '($7) (lambda '($16) $16) '('('"_logical_id" '573) '('"_id" '"16167e96-8ec777a3-d4e9a963-684d8792")))) (let $9 (DqCnResult (TDqOutput $8 '"0") '())) (return (KqpPhysicalQuery '((KqpPhysicalTx '($6 $8) '($9) '() '('('"type" '"generic")))) '((KqpTxResultBinding (ListType (StructType '('"Key" (OptionalType $3)) '('"Value" $5))) '"0" '"0")) '('('"type" '"query")))) ) >> TopicAutoscaling::PartitionSplit_BeforeAutoscaleAwareSDK >> TopicAutoscaling::ReadingAfterSplitTest_BeforeAutoscaleAwareSDK ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest >> TraverseColumnShard::TraverseColumnTableRebootSaTabletInAggregate [GOOD] Test command err: 2025-06-25T14:27:49.102198Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:419:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:27:49.102602Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:27:49.102653Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0020a8/r3tmp/tmp1P0ooU/pdisk_1.dat 2025-06-25T14:27:49.699227Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 19189, node 1 2025-06-25T14:27:50.219255Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:27:50.219323Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:27:50.219357Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:27:50.219749Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:27:50.236739Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:27:50.379038Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:27:50.379175Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:27:50.427392Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:7388 2025-06-25T14:27:51.204820Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-25T14:27:55.218251Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-25T14:27:55.290784Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:27:55.290917Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:27:55.340013Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T14:27:55.345783Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:27:55.600697Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:27:55.634176Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:27:55.634764Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:27:55.635343Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:27:55.635747Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:27:55.635843Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:27:55.635932Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:27:55.636021Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:27:55.636100Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:27:55.636184Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:27:55.897825Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:27:55.897949Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:27:55.915755Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:27:56.259607Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:27:56.341073Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-25T14:27:56.341333Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-25T14:27:56.397325Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-25T14:27:56.399417Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-25T14:27:56.399659Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-25T14:27:56.399727Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-25T14:27:56.399784Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-25T14:27:56.399847Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-25T14:27:56.399904Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-25T14:27:56.399974Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-25T14:27:56.400610Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-25T14:27:56.465617Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7949: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-25T14:27:56.465728Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7979: ConnectToSA(), pipe client id: [2:1788:2562], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-25T14:27:56.494309Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1801:2571] 2025-06-25T14:27:56.517806Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1831:2586] 2025-06-25T14:27:56.518729Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1831:2586], schemeshard id = 72075186224037897 2025-06-25T14:27:56.525528Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-25T14:27:56.577588Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-25T14:27:56.577659Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-25T14:27:56.577741Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-25T14:27:56.595744Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:27:56.603862Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-25T14:27:56.604047Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-25T14:27:56.892484Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-25T14:27:57.185678Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-25T14:27:57.281010Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-25T14:27:58.050826Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:27:58.387359Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2142:3018], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:27:58.387553Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:27:58.413090Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715659:0, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T14:27:58.751839Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2237:2807];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:27:58.752124Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2237:2807];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:27:58.752811Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2237:2807];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:27:58.753036Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2237:2807];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:27:58.753222Z node 2 :TX_COLUMNSHARD WARN: ... STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8399:6163], server id = [2:8403:6167], tablet id = 72075186224037901 2025-06-25T14:30:41.492969Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-25T14:30:41.625892Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-25T14:30:41.626042Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-25T14:30:41.626647Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-25T14:30:41.627253Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-25T14:30:41.627602Z node 2 :STATISTICS DEBUG: tx_init.cpp:55: [72075186224037894] Loaded database: /Root/Database 2025-06-25T14:30:41.627660Z node 2 :STATISTICS DEBUG: tx_init.cpp:59: [72075186224037894] Loaded traversal start key 2025-06-25T14:30:41.627711Z node 2 :STATISTICS DEBUG: tx_init.cpp:64: [72075186224037894] Loaded traversal table owner id: 72075186224037897 2025-06-25T14:30:41.627751Z node 2 :STATISTICS DEBUG: tx_init.cpp:69: [72075186224037894] Loaded traversal table local path id: 4 2025-06-25T14:30:41.627798Z node 2 :STATISTICS DEBUG: tx_init.cpp:74: [72075186224037894] Loaded traversal start time: 1750861841329669 2025-06-25T14:30:41.627844Z node 2 :STATISTICS DEBUG: tx_init.cpp:79: [72075186224037894] Loaded traversal IsColumnTable: 1 2025-06-25T14:30:41.627888Z node 2 :STATISTICS DEBUG: tx_init.cpp:84: [72075186224037894] Loaded global traversal round: 2 2025-06-25T14:30:41.627990Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 1 2025-06-25T14:30:41.628062Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-25T14:30:41.628173Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 2 2025-06-25T14:30:41.628257Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-25T14:30:41.628346Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-25T14:30:41.628425Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-25T14:30:41.628587Z node 2 :STATISTICS DEBUG: tx_init.cpp:295: [72075186224037894] TTxInit::Complete. Start navigate. PathId [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-25T14:30:41.630288Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-25T14:30:41.630939Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-25T14:30:41.631558Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-25T14:30:41.631642Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-25T14:30:41.632764Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-25T14:30:41.632836Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-25T14:30:41.634593Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:58: [72075186224037894] TTxResponseTabletDistribution::Execute. Node count = 1 2025-06-25T14:30:41.738023Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:92: [72075186224037894] TTxResponseTabletDistribution::Complete 2025-06-25T14:30:41.738273Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 3, current Round: 2 2025-06-25T14:30:41.738748Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8398:6162], server id = [2:8402:6166], tablet id = 72075186224037900 2025-06-25T14:30:41.738801Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-25T14:30:41.739518Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8462:6207], server id = [2:8466:6211], tablet id = 72075186224037899, status = OK 2025-06-25T14:30:41.739642Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8462:6207], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-25T14:30:41.740011Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8463:6208], server id = [2:8467:6212], tablet id = 72075186224037900, status = OK 2025-06-25T14:30:41.740090Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8463:6208], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-25T14:30:41.743639Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8464:6209], server id = [2:8469:6214], tablet id = 72075186224037901, status = OK 2025-06-25T14:30:41.743715Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8464:6209], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-25T14:30:41.743893Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8465:6210], server id = [2:8468:6213], tablet id = 72075186224037902, status = OK 2025-06-25T14:30:41.743935Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8465:6210], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-25T14:30:41.745593Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037899 2025-06-25T14:30:41.746545Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8462:6207], server id = [2:8466:6211], tablet id = 72075186224037899 2025-06-25T14:30:41.746589Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-25T14:30:41.747321Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037902 2025-06-25T14:30:41.747522Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037901 2025-06-25T14:30:41.747995Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8465:6210], server id = [2:8468:6213], tablet id = 72075186224037902 2025-06-25T14:30:41.748025Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-25T14:30:41.761264Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8464:6209], server id = [2:8469:6214], tablet id = 72075186224037901 2025-06-25T14:30:41.761328Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-25T14:30:41.762819Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037900 2025-06-25T14:30:41.762896Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-25T14:30:41.763176Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:27: [72075186224037894] TTxAggregateStatisticsResponse::Execute 2025-06-25T14:30:41.763412Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:118: [72075186224037894] TTxAggregateStatisticsResponse::Complete 2025-06-25T14:30:41.763723Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-25T14:30:41.778064Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8463:6208], server id = [2:8467:6212], tablet id = 72075186224037900 2025-06-25T14:30:41.778130Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-25T14:30:41.778873Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-06-25T14:30:41.942148Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [2:8492:6236]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T14:30:41.942347Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-25T14:30:41.942397Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 2, ReplyToActorId = [2:8492:6236], StatRequests.size() = 1 2025-06-25T14:30:42.087983Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=ODdiZjZlNGUtZTQ4YzIxMDQtNmJmMzBlNzEtMTJiMzc0ZWE=, TxId: 2025-06-25T14:30:42.088062Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=ODdiZjZlNGUtZTQ4YzIxMDQtNmJmMzBlNzEtMTJiMzc0ZWE=, TxId: 2025-06-25T14:30:42.088866Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-25T14:30:42.101658Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:8507:6242] 2025-06-25T14:30:42.101879Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:8507:6242], schemeshard id = 72075186224037897 2025-06-25T14:30:42.101968Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8413:6176], server id = [2:8508:6243], tablet id = 72075186224037894, status = OK 2025-06-25T14:30:42.102078Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:8508:6243] 2025-06-25T14:30:42.102148Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:133: [72075186224037894] EvConnectNode, pipe server id = [2:8508:6243], node id = 2, have schemeshards count = 1, need schemeshards count = 0 2025-06-25T14:30:42.116527Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-25T14:30:42.116600Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-25T14:30:42.195432Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 3 ], ReplyToActorId[ [2:8511:6246]], StatType[ 2 ], StatRequestsCount[ 1 ] 2025-06-25T14:30:42.195758Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] 2025-06-25T14:30:42.195811Z node 2 :STATISTICS DEBUG: service_impl.cpp:812: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] resolve DatabasePath[ [OwnerId: 72057594046644480, LocalPathId: 2] ] 2025-06-25T14:30:42.203266Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] 2025-06-25T14:30:42.203354Z node 2 :STATISTICS DEBUG: service_impl.cpp:715: [TStatService::QueryStatistics] RequestId[ 3 ], Database[ Root/Database ], TablePath[ /Root/Database/.metadata/_statistics ] 2025-06-25T14:30:42.203405Z node 2 :STATISTICS DEBUG: service_impl.cpp:656: [TStatService::LoadStatistics] QueryId[ 1 ], PathId[ [OwnerId: 72075186224037897, LocalPathId: 4] ], StatType[ 2 ], ColumnTag[ 1 ] 2025-06-25T14:30:42.212884Z node 2 :STATISTICS DEBUG: service_impl.cpp:1152: TEvLoadStatisticsQueryResponse, request id = 3 >>> failedEstimatesCount = 0 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_minikql/unittest >> TTxDataShardMiniKQL::MemoryUsageMultiShard [GOOD] Test command err: 2025-06-25T14:30:39.381290Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:30:39.381350Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:30:39.382560Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:112:2142], Recipient [1:135:2156]: NKikimr::TEvTablet::TEvBoot 2025-06-25T14:30:39.393724Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:112:2142], Recipient [1:135:2156]: NKikimr::TEvTablet::TEvRestored 2025-06-25T14:30:39.394115Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 9437184 actor [1:135:2156] 2025-06-25T14:30:39.394356Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T14:30:39.404487Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:112:2142], Recipient [1:135:2156]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-25T14:30:39.447093Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T14:30:39.447315Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T14:30:39.449485Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 9437184 2025-06-25T14:30:39.449560Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 9437184 2025-06-25T14:30:39.449629Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 9437184 2025-06-25T14:30:39.450010Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T14:30:39.450093Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T14:30:39.450161Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 9437184 persisting started state actor id [1:204:2156] in generation 2 2025-06-25T14:30:39.510531Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T14:30:39.543546Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 9437184 2025-06-25T14:30:39.543722Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 9437184 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T14:30:39.543814Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 9437184, actorId: [1:219:2215] 2025-06-25T14:30:39.543849Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 9437184 2025-06-25T14:30:39.543875Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 9437184, state: WaitScheme 2025-06-25T14:30:39.543900Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T14:30:39.544063Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:135:2156], Recipient [1:135:2156]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T14:30:39.544111Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T14:30:39.544388Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 9437184 2025-06-25T14:30:39.544505Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 9437184 2025-06-25T14:30:39.544573Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-25T14:30:39.544606Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:30:39.544645Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 9437184 2025-06-25T14:30:39.544676Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437184 has no attached operations 2025-06-25T14:30:39.544720Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 9437184 2025-06-25T14:30:39.544751Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 9437184 TxInFly 0 2025-06-25T14:30:39.544791Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-25T14:30:39.544876Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:215:2212], Recipient [1:135:2156]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:30:39.544924Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T14:30:39.544974Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:213:2211], serverId# [1:215:2212], sessionId# [0:0:0] 2025-06-25T14:30:39.548205Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:103:2136], Recipient [1:135:2156]: NKikimrTxDataShard.TEvProposeTransaction TxKind: TX_KIND_SCHEME SourceDeprecated { RawX1: 103 RawX2: 4294969432 } TxBody: "\nI\n\006table1\020\r\032\t\n\003key\030\002 \"\032\014\n\005value\030\200$ 8\032\n\n\004uint\030\002 9(\":\010Z\006\010\000\030\000(\000J\014/Root/table1" TxId: 1 ExecLevel: 0 Flags: 0 SchemeShardId: 4200 ProcessingParams { } 2025-06-25T14:30:39.548272Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-25T14:30:39.548405Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-25T14:30:39.548556Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit CheckSchemeTx 2025-06-25T14:30:39.548611Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 9437184 txId 1 ssId 4200 seqNo 0:0 2025-06-25T14:30:39.548667Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 1 at tablet 9437184 2025-06-25T14:30:39.548708Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is ExecutedNoMoreRestarts 2025-06-25T14:30:39.548740Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit CheckSchemeTx 2025-06-25T14:30:39.548771Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit StoreSchemeTx 2025-06-25T14:30:39.548802Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit StoreSchemeTx 2025-06-25T14:30:39.549150Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayCompleteNoMoreRestarts 2025-06-25T14:30:39.549201Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit StoreSchemeTx 2025-06-25T14:30:39.549240Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit FinishPropose 2025-06-25T14:30:39.549273Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit FinishPropose 2025-06-25T14:30:39.549333Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayComplete 2025-06-25T14:30:39.549367Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit FinishPropose 2025-06-25T14:30:39.549413Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit WaitForPlan 2025-06-25T14:30:39.549443Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit WaitForPlan 2025-06-25T14:30:39.549469Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:1] at 9437184 is not ready to execute on unit WaitForPlan 2025-06-25T14:30:39.564171Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 9437184 2025-06-25T14:30:39.564268Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit StoreSchemeTx 2025-06-25T14:30:39.564306Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit FinishPropose 2025-06-25T14:30:39.564374Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 1 at tablet 9437184 send to client, exec latency: 0 ms, propose latency: 1 ms, status: PREPARED 2025-06-25T14:30:39.564444Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 9437184 not sending time cast registration request in state WaitScheme 2025-06-25T14:30:39.564964Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:225:2221], Recipient [1:135:2156]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:30:39.565034Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T14:30:39.565078Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:224:2220], serverId# [1:225:2221], sessionId# [0:0:0] 2025-06-25T14:30:39.565226Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287424, Sender [1:103:2136], Recipient [1:135:2156]: {TEvPlanStep step# 2 MediatorId# 0 TabletID 9437184} 2025-06-25T14:30:39.565257Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3150: StateWork, processing event TEvTxProcessing::TEvPlanStep 2025-06-25T14:30:39.565410Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1790: Trying to execute [2:1] at 9437184 on unit WaitForPlan 2025-06-25T14:30:39.565452Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1805: Execution status for [2:1] at 9437184 is Executed 2025-06-25T14:30:39.565485Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [2:1] at 9437184 executing on unit WaitForPlan 2025-06-25T14:30:39.565515Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [2:1] at 9437184 to execution unit PlanQueue 2025-06-25T14:30:39.569112Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 1 at step 2 at tablet 9437184 { Transactions { TxId: 1 AckTo { RawX1: 103 RawX2: 4294969432 } } Step: 2 MediatorID: 0 TabletID: 9437184 } 2025-06-25T14:30:39.569183Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T14:30:39.569428Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:135:2156], Recipient [1:135:2156]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T14:30:39.569486Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T14:30:39.569553Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-25T14:30:39.569594Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 1 2025-06-25T14:30:39.569630Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437184 2025-06-25T14:30:39.569668Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [2:1] in PlanQueue unit at 9437184 2025-06-25T14:30:39.569699Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [2:1] at 9437184 on unit PlanQueue 2025-06-25T14:30:39. ... ressTransaction} at tablet 9437185 (3 by [3:372:2315]) from queue queue_transaction 2025-06-25T14:30:44.035879Z node 3 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task Tx{19, NKikimr::NDataShard::TDataShard::TTxProgressTransaction} at tablet 9437185 (3 by [3:372:2315]) to queue queue_transaction 2025-06-25T14:30:44.035922Z node 3 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_transaction from 16.936776 to 33.873553 (insert task Tx{19, NKikimr::NDataShard::TDataShard::TTxProgressTransaction} at tablet 9437185 (3 by [3:372:2315])) 2025-06-25T14:30:44.036002Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-25T14:30:44.036036Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [6:5] at 9437184 on unit ExecuteDataTx 2025-06-25T14:30:44.036983Z node 3 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:661: tx 5 at 9437184 restored its data 2025-06-25T14:30:44.357888Z node 3 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:306: Executed operation [6:5] at tablet 9437184 with status COMPLETE 2025-06-25T14:30:44.358463Z node 3 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:312: Datashard execution counters for [6:5] at 9437184: {NSelectRow: 0, NSelectRange: 0, NUpdateRow: 2, NEraseRow: 0, SelectRowRows: 0, SelectRowBytes: 0, SelectRangeRows: 0, SelectRangeBytes: 0, UpdateRowBytes: 22, EraseRowBytes: 0, SelectRangeDeletedRowSkips: 0, InvisibleRowSkips: 0} 2025-06-25T14:30:44.358800Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [6:5] at 9437184 is ExecutedNoMoreRestarts 2025-06-25T14:30:44.358995Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [6:5] at 9437184 executing on unit ExecuteDataTx 2025-06-25T14:30:44.359144Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [6:5] at 9437184 to execution unit CompleteOperation 2025-06-25T14:30:44.359439Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [6:5] at 9437184 on unit CompleteOperation 2025-06-25T14:30:44.360179Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [6:5] at 9437184 is DelayComplete 2025-06-25T14:30:44.360271Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [6:5] at 9437184 executing on unit CompleteOperation 2025-06-25T14:30:44.360527Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [6:5] at 9437184 to execution unit CompletedOperations 2025-06-25T14:30:44.360633Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [6:5] at 9437184 on unit CompletedOperations 2025-06-25T14:30:44.360702Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [6:5] at 9437184 is Executed 2025-06-25T14:30:44.360821Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [6:5] at 9437184 executing on unit CompletedOperations 2025-06-25T14:30:44.360953Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [6:5] at 9437184 has finished 2025-06-25T14:30:44.361136Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:30:44.361250Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437184 2025-06-25T14:30:44.361517Z node 3 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437184 has no attached operations 2025-06-25T14:30:44.361646Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 9437184 2025-06-25T14:30:44.362058Z node 3 :TABLET_EXECUTOR DEBUG: Leader{9437184:3:9} Tx{19, NKikimr::NDataShard::TDataShard::TTxProgressTransaction} hope 5 -> done Change{16, redo 636b alter 0b annex 0, ~{ 1001, 1, 3, 4, 12, 7, 8, 5 } -{ }, 0 gb} 2025-06-25T14:30:44.362221Z node 3 :TABLET_EXECUTOR DEBUG: Leader{9437184:3:9} Tx{19, NKikimr::NDataShard::TDataShard::TTxProgressTransaction} release Res{3 96990534b}, Memory{0 dyn 0} 2025-06-25T14:30:44.362836Z node 3 :RESOURCE_BROKER DEBUG: resource_broker.cpp:528: Finish task Tx{19, NKikimr::NDataShard::TDataShard::TTxProgressTransaction} at tablet 9437184 (3 by [3:260:2229]) (release resources {0, 96990534}) 2025-06-25T14:30:44.362980Z node 3 :RESOURCE_BROKER DEBUG: resource_broker.cpp:577: Updated planned resource usage for queue queue_transaction from 33.873553 to 16.936776 (remove task Tx{19, NKikimr::NDataShard::TDataShard::TTxProgressTransaction} at tablet 9437184 (3 by [3:260:2229])) 2025-06-25T14:30:44.363288Z node 3 :TABLET_EXECUTOR DEBUG: Leader{9437185:3:9} Tx{19, NKikimr::NDataShard::TDataShard::TTxProgressTransaction} acquired dyn mem Res{3 96990534b}, Memory{0 dyn 96990534} 2025-06-25T14:30:44.363567Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437185 2025-06-25T14:30:44.363648Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [6:5] at 9437185 on unit ExecuteDataTx 2025-06-25T14:30:44.366754Z node 3 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:661: tx 5 at 9437185 restored its data 2025-06-25T14:30:44.835767Z node 3 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:306: Executed operation [6:5] at tablet 9437185 with status COMPLETE 2025-06-25T14:30:44.835888Z node 3 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:312: Datashard execution counters for [6:5] at 9437185: {NSelectRow: 0, NSelectRange: 0, NUpdateRow: 2, NEraseRow: 0, SelectRowRows: 0, SelectRowBytes: 0, SelectRangeRows: 0, SelectRangeBytes: 0, UpdateRowBytes: 22, EraseRowBytes: 0, SelectRangeDeletedRowSkips: 0, InvisibleRowSkips: 0} 2025-06-25T14:30:44.835969Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [6:5] at 9437185 is ExecutedNoMoreRestarts 2025-06-25T14:30:44.836007Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [6:5] at 9437185 executing on unit ExecuteDataTx 2025-06-25T14:30:44.836043Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [6:5] at 9437185 to execution unit CompleteOperation 2025-06-25T14:30:44.836076Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [6:5] at 9437185 on unit CompleteOperation 2025-06-25T14:30:44.836365Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [6:5] at 9437185 is DelayComplete 2025-06-25T14:30:44.836398Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [6:5] at 9437185 executing on unit CompleteOperation 2025-06-25T14:30:44.836427Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [6:5] at 9437185 to execution unit CompletedOperations 2025-06-25T14:30:44.836456Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [6:5] at 9437185 on unit CompletedOperations 2025-06-25T14:30:44.836518Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [6:5] at 9437185 is Executed 2025-06-25T14:30:44.836545Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [6:5] at 9437185 executing on unit CompletedOperations 2025-06-25T14:30:44.836573Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [6:5] at 9437185 has finished 2025-06-25T14:30:44.836604Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437185 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:30:44.836630Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437185 2025-06-25T14:30:44.836659Z node 3 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437185 has no attached operations 2025-06-25T14:30:44.836687Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 9437185 2025-06-25T14:30:44.836809Z node 3 :TABLET_EXECUTOR DEBUG: Leader{9437185:3:9} Tx{19, NKikimr::NDataShard::TDataShard::TTxProgressTransaction} hope 5 -> done Change{16, redo 636b alter 0b annex 0, ~{ 1001, 1, 3, 4, 12, 7, 8, 5 } -{ }, 0 gb} 2025-06-25T14:30:44.836882Z node 3 :TABLET_EXECUTOR DEBUG: Leader{9437185:3:9} Tx{19, NKikimr::NDataShard::TDataShard::TTxProgressTransaction} release Res{3 96990534b}, Memory{0 dyn 0} 2025-06-25T14:30:44.842263Z node 3 :RESOURCE_BROKER DEBUG: resource_broker.cpp:528: Finish task Tx{19, NKikimr::NDataShard::TDataShard::TTxProgressTransaction} at tablet 9437185 (3 by [3:372:2315]) (release resources {0, 96990534}) 2025-06-25T14:30:44.842356Z node 3 :RESOURCE_BROKER DEBUG: resource_broker.cpp:577: Updated planned resource usage for queue queue_transaction from 16.936776 to 0.000000 (remove task Tx{19, NKikimr::NDataShard::TDataShard::TTxProgressTransaction} at tablet 9437185 (3 by [3:372:2315])) 2025-06-25T14:30:44.869814Z node 3 :TABLET_EXECUTOR DEBUG: Leader{9437185:3:10} commited cookie 1 for step 9 2025-06-25T14:30:44.869963Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437185 2025-06-25T14:30:44.870028Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [6:5] at 9437185 on unit CompleteOperation 2025-06-25T14:30:44.870150Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [6 : 5] from 9437185 at tablet 9437185 send result to client [3:103:2136], exec latency: 1 ms, propose latency: 3 ms 2025-06-25T14:30:44.870333Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:563: Send delayed Ack RS Ack at 9437185 {TEvReadSet step# 6 txid# 5 TabletSource# 9437186 TabletDest# 9437185 SetTabletConsumer# 9437185 Flags# 0 Seqno# 2} 2025-06-25T14:30:44.870444Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437185 2025-06-25T14:30:44.871023Z node 3 :TABLET_EXECUTOR DEBUG: Leader{9437184:3:10} commited cookie 1 for step 9 2025-06-25T14:30:44.871071Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-25T14:30:44.871104Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [6:5] at 9437184 on unit CompleteOperation 2025-06-25T14:30:44.871178Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [6 : 5] from 9437184 at tablet 9437184 send result to client [3:103:2136], exec latency: 1 ms, propose latency: 3 ms 2025-06-25T14:30:44.871236Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:563: Send delayed Ack RS Ack at 9437184 {TEvReadSet step# 6 txid# 5 TabletSource# 9437186 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 1} 2025-06-25T14:30:44.871267Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T14:30:44.871593Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [3:350:2315], Recipient [3:460:2399]: {TEvReadSet step# 6 txid# 5 TabletSource# 9437186 TabletDest# 9437185 SetTabletConsumer# 9437185 Flags# 0 Seqno# 2} 2025-06-25T14:30:44.871663Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T14:30:44.871753Z node 3 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437186 source 9437186 dest 9437185 consumer 9437185 txId 5 2025-06-25T14:30:44.871891Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [3:238:2229], Recipient [3:460:2399]: {TEvReadSet step# 6 txid# 5 TabletSource# 9437186 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 1} 2025-06-25T14:30:44.871926Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T14:30:44.871953Z node 3 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437186 source 9437186 dest 9437184 consumer 9437184 txId 5 >> TopicAutoscaling::ControlPlane_BackCompatibility >> TopicAutoscaling::PartitionSplit_ReadEmptyPartitions_BeforeAutoscaleAwareSDK >> TCmsTest::WalleTasksDifferentPriorities [GOOD] >> UpsertLoad::ShouldWriteKqpUpsertKeyFrom [GOOD] >> Cdc::AddIndex [GOOD] >> TCmsTenatsTest::TestClusterRatioLimitForceRestartModeScheduled [GOOD] >> Cdc::AddStream |77.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/cms/ut/unittest >> TCmsTest::WalleTasksDifferentPriorities [GOOD] >> DataShardVolatile::DistributedWriteThenImmediateUpsert >> DataShardVolatile::DistributedWrite >> Cdc::InitialScanRacyCompleteAndRequest [GOOD] >> Cdc::InitialScanUpdatedRows >> TopicAutoscaling::PartitionSplit_PQv1 |77.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/cms/ut/unittest >> TCmsTenatsTest::TestClusterRatioLimitForceRestartModeScheduled [GOOD] >> TopicAutoscaling::PartitionMerge_PreferedPartition_BeforeAutoscaleAwareSDK ------- [TM] {asan, default-linux-x86_64, release} ydb/core/load_test/ut_ycsb/unittest >> UpsertLoad::ShouldWriteKqpUpsertKeyFrom [GOOD] Test command err: 2025-06-25T14:30:44.129840Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:30:44.130014Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:30:44.139157Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001aa3/r3tmp/tmpoaxQXy/pdisk_1.dat 2025-06-25T14:30:44.551458Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T14:30:44.554633Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:30:44.668619Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:30:44.690136Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750861840295304 != 1750861840295308 2025-06-25T14:30:44.746448Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:30:44.746583Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:30:44.759597Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:30:44.861040Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:45.368226Z node 1 :DS_LOAD_TEST DEBUG: test_load_actor.cpp:425: TLoad# 0 created load actor of type# kUpsertKqpStart with tag# 1, proto# NotifyWhenFinished: true TargetShard { TabletId: 72075186224037888 TableId: 2 WorkingDir: "/Root" TableName: "usertable" } UpsertKqpStart { RowCount: 20 Inflight: 5 KeyFrom: 12345 } 2025-06-25T14:30:45.368422Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:298: TKqpUpsertActorMultiSession# {Tag: 0, parent: [1:695:2577], subTag: 2} Bootstrap called: RowCount: 20 Inflight: 5 KeyFrom: 12345 2025-06-25T14:30:45.372441Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:361: TKqpUpsertActorMultiSession# {Tag: 0, parent: [1:695:2577], subTag: 2} started# 5 actors each with inflight# 4 2025-06-25T14:30:45.372533Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:116: TKqpUpsertActor# {Tag: 0, parent: [1:696:2578], subTag: 1} Bootstrap called: RowCount: 4 Inflight: 1 KeyFrom: 12345 2025-06-25T14:30:45.372610Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:116: TKqpUpsertActor# {Tag: 0, parent: [1:696:2578], subTag: 2} Bootstrap called: RowCount: 4 Inflight: 1 KeyFrom: 12345 2025-06-25T14:30:45.372640Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:116: TKqpUpsertActor# {Tag: 0, parent: [1:696:2578], subTag: 3} Bootstrap called: RowCount: 4 Inflight: 1 KeyFrom: 12345 2025-06-25T14:30:45.372677Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:116: TKqpUpsertActor# {Tag: 0, parent: [1:696:2578], subTag: 4} Bootstrap called: RowCount: 4 Inflight: 1 KeyFrom: 12345 2025-06-25T14:30:45.372784Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:116: TKqpUpsertActor# {Tag: 0, parent: [1:696:2578], subTag: 5} Bootstrap called: RowCount: 4 Inflight: 1 KeyFrom: 12345 2025-06-25T14:30:45.376360Z node 1 :DS_LOAD_TEST DEBUG: kqp_upsert.cpp:207: TKqpUpsertActor# {Tag: 0, parent: [1:696:2578], subTag: 1} session: ydb://session/3?node_id=1&id=OWUxMTcxMmMtYzRkYThkMDMtZDM2NDdkMDAtOTE2OTA0MGU= 2025-06-25T14:30:45.378998Z node 1 :DS_LOAD_TEST DEBUG: kqp_upsert.cpp:207: TKqpUpsertActor# {Tag: 0, parent: [1:696:2578], subTag: 2} session: ydb://session/3?node_id=1&id=NjBlOWQ0MzctN2ZkMGNiYzMtNmNjZGNhMmMtZGFjMmQ3ZWM= 2025-06-25T14:30:45.379062Z node 1 :DS_LOAD_TEST DEBUG: kqp_upsert.cpp:207: TKqpUpsertActor# {Tag: 0, parent: [1:696:2578], subTag: 3} session: ydb://session/3?node_id=1&id=MTQ3MDc2ZDMtZDRjNTc0ODctMTBjYmIwZWUtNDFlYjk5NzQ= 2025-06-25T14:30:45.380576Z node 1 :DS_LOAD_TEST DEBUG: kqp_upsert.cpp:207: TKqpUpsertActor# {Tag: 0, parent: [1:696:2578], subTag: 4} session: ydb://session/3?node_id=1&id=YTU3MGQzOS04ZTQ5ODQzMS1jODAyYmRjMy0xN2E1ZDMyZQ== 2025-06-25T14:30:45.382101Z node 1 :DS_LOAD_TEST DEBUG: kqp_upsert.cpp:207: TKqpUpsertActor# {Tag: 0, parent: [1:696:2578], subTag: 5} session: ydb://session/3?node_id=1&id=ZGVhNTg0OTktNDQ5YmY0YTgtNmJjN2JhM2ItOGFkMGQzYTY= 2025-06-25T14:30:45.386444Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:709:2591], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:45.386556Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:733:2609], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:45.386599Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:734:2610], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:45.386647Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:735:2611], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:45.386692Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:736:2612], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:45.386750Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:738:2614], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:45.386829Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:45.394826Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:30:45.456903Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:754:2630] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateCreate)" severity: 1 } 2025-06-25T14:30:45.457780Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:755:2631] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateCreate)" severity: 1 } 2025-06-25T14:30:45.458575Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:756:2632] txid# 281474976715661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateCreate)" severity: 1 } 2025-06-25T14:30:45.459810Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:757:2633] txid# 281474976715662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateCreate)" severity: 1 } 2025-06-25T14:30:45.505061Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:30:45.640976Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:745:2621], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:30:45.641073Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:746:2622], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:30:45.641122Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:747:2623], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:30:45.641213Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:748:2624], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:30:45.641266Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:749:2625], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:30:45.677895Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:851:2692] txid# 281474976715663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:30:46.253907Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:190: TKqpUpsertActor# {Tag: 0, parent: [1:696:2578], subTag: 2} finished in 1750861846.253828s, errors=0 2025-06-25T14:30:46.254260Z node 1 :DS_LOAD_TEST INFO: kqp_upsert.cpp:376: kqp# {Tag: 0, parent: [1:695:2577], subTag: 2} finished: 2 { Tag: 2 DurationMs: 1750861846253 OperationsOK: 4 OperationsError: 0 } 2025-06-25T14:30:46.271848Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:924:2730] txid# 281474976715668, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:30:46.343203Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:974:2751] txid# 281474976715673, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:30:46.362280Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:190: TKqpUpsertActor# {Tag: 0, parent: [1:696:2578], subTag: 4} finished in 1750861846.362242s, errors=0 2025-06-25T14:30:46.362524Z node 1 :DS_LOAD_TEST INFO: kqp_upsert.cpp:376: kqp# {Tag: 0, parent: [1:695:2577], subTag: 2} finished: 4 { Tag: 4 DurationMs: 1750861846362 OperationsOK: 4 OperationsError: 0 } 2025-06-25T14:30:46.384004Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:1002:2764] txid# 281474976715676, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:30:46.457885Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:190: TKqpUpsertActor# {Tag: 0, parent: [1:696:2578], subTag: 1} finished in 1750861846.457829s, errors=0 2025-06-25T14:30:46.458357Z node 1 :DS_LOAD_TEST INFO: kqp_upsert.cpp:376: kqp# {Tag: 0, parent: [1:695:2577], subTag: 2} finished: 1 { Tag: 1 DurationMs: 1750861846457 OperationsOK: 4 OperationsError: 0 } 2025-06-25T14:30:46.475843Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:190: TKqpUpsertActor# {Tag: 0, parent: [1:696:2578], subTag: 3} finished in 1750861846.475801s, errors=0 2025-06-25T14:30:46.476194Z node 1 :DS_LOAD_TEST INFO: kqp_upsert.cpp:376: kqp# {Tag: 0, parent: [1:695:2577], subTag: 2} finished: 3 { Tag: 3 DurationMs: 1750861846475 OperationsOK: 4 OperationsError: 0 } 2025-06-25T14:30:46.490460Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:1073:2792] txid# 281474976715683, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:30:46.563122Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:190: TKqpUpsertActor# {Tag: 0, parent: [1:696:2578], subTag: 5} finished in 1750861846.563077s, errors=0 2025-06-25T14:30:46.563437Z node 1 :DS_LOAD_TEST INFO: kqp_upsert.cpp:376: kqp# {Tag: 0, parent: [1:695:2577], subTag: 2} finished: 5 { Tag: 5 DurationMs: 1750861846563 OperationsOK: 4 OperationsError: 0 } 2025-06-25T14:30:46.563514Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:395: TKqpUpsertActorMultiSession# {Tag: 0, parent: [1:695:2577], subTag: 2} finished in 1.191297s, oks# 20, errors# 0 2025-06-25T14:30:46.563631Z node 1 :DS_LOAD_TEST INFO: test_load_actor.cpp:447: TLoad# 0 received finished from actor# [1:696:2578] with tag# 2 >> TCmsTest::TestLogOperationsRollback [GOOD] >> UpsertLoad::ShouldWriteDataBulkUpsertLocalMkql >> Cdc::ResolvedTimestampsMultiplePartitions [GOOD] >> Cdc::ResolvedTimestampsVolatileOutOfOrder ------- [TM] {asan, default-linux-x86_64, release} ydb/core/cms/ut/unittest >> TCmsTest::TestLogOperationsRollback [GOOD] Test command err: 2025-06-25T14:30:35.983766Z node 1 :CMS ERROR: info_collector.cpp:281: [InfoCollector] Couldn't get base config 2025-06-25T14:30:36.139798Z node 1 :CMS ERROR: info_collector.cpp:281: [InfoCollector] Couldn't get base config 2025-06-25T14:30:36.155291Z node 1 :CMS ERROR: info_collector.cpp:281: [InfoCollector] Couldn't get base config 2025-06-25T14:30:36.305029Z node 1 :CMS ERROR: info_collector.cpp:281: [InfoCollector] Couldn't get base config 2025-06-25T14:30:40.547370Z node 17 :CMS ERROR: cluster_info.cpp:489: Cannot update state for unknown PDisk 17:17 2025-06-25T14:30:40.547454Z node 17 :CMS ERROR: cluster_info.cpp:489: Cannot update state for unknown PDisk 18:18 2025-06-25T14:30:40.547480Z node 17 :CMS ERROR: cluster_info.cpp:489: Cannot update state for unknown PDisk 19:19 2025-06-25T14:30:40.547524Z node 17 :CMS ERROR: cluster_info.cpp:489: Cannot update state for unknown PDisk 20:20 2025-06-25T14:30:40.547550Z node 17 :CMS ERROR: cluster_info.cpp:489: Cannot update state for unknown PDisk 21:21 2025-06-25T14:30:40.547573Z node 17 :CMS ERROR: cluster_info.cpp:489: Cannot update state for unknown PDisk 22:22 2025-06-25T14:30:40.547607Z node 17 :CMS ERROR: cluster_info.cpp:489: Cannot update state for unknown PDisk 23:23 2025-06-25T14:30:40.547631Z node 17 :CMS ERROR: cluster_info.cpp:489: Cannot update state for unknown PDisk 24:24 |77.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_minstep/ydb-core-tx-datashard-ut_minstep |77.5%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_minstep/ydb-core-tx-datashard-ut_minstep |77.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_minstep/ydb-core-tx-datashard-ut_minstep >> UpsertLoad::ShouldWriteDataBulkUpsertLocalMkqlKeyFrom >> KqpScanArrowFormat::AggregateCountStar [GOOD] >> KqpScanArrowFormat::AggregateByColumn >> UpsertLoad::ShouldWriteDataBulkUpsertBatch >> KqpScanArrowFormat::SingleKey [GOOD] >> KqpScanArrowFormat::JoinWithParams >> KqpScanArrowInChanels::AllTypesColumns [GOOD] >> UpsertLoad::ShouldCreateTable >> KqpScanArrowInChanels::SingleKey >> data_migration_when_alter_ttl.py::TestDataMigrationWhenAlterTtl::test [GOOD] >> ReadLoad::ShouldReadKqp >> KqpScanArrowFormat::AllTypesColumns [GOOD] >> KqpScanArrowFormat::AllTypesColumnsCellvec >> Cdc::ShouldBreakLocksOnConcurrentCancelBuildIndex [GOOD] >> Cdc::ShouldBreakLocksOnConcurrentAddStream |77.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_subdomain/ydb-core-tx-schemeshard-ut_subdomain |77.5%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_subdomain/ydb-core-tx-schemeshard-ut_subdomain |77.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_subdomain/ydb-core-tx-schemeshard-ut_subdomain |77.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/base/ut_board_subscriber/unittest >> UpsertLoad::ShouldWriteDataBulkUpsertLocalMkql [GOOD] >> UpsertLoad::ShouldWriteDataBulkUpsertLocalMkql2 >> DataShardSnapshots::PostMergeNotCompactedTooEarly [GOOD] >> DataShardSnapshots::PipelineAndMediatorRestoreRace >> KqpScanArrowInChanels::AggregateNoColumn [GOOD] >> KqpScanArrowInChanels::AggregateNoColumnNoRemaps >> DataShardVolatile::DistributedWriteThenImmediateUpsert [GOOD] >> DataShardVolatile::DistributedWriteThenSplit >> UpsertLoad::ShouldWriteKqpUpsert >> UpsertLoad::ShouldWriteDataBulkUpsertLocalMkqlKeyFrom [GOOD] >> DataShardVolatile::DistributedWrite [GOOD] >> DataShardVolatile::DistributedWriteBrokenLock >> UpsertLoad::ShouldWriteDataBulkUpsertBatch [GOOD] >> UpsertLoad::ShouldWriteDataBulkUpsertKeyFrom >> UpsertLoad::ShouldCreateTable [GOOD] >> UpsertLoad::ShouldDropCreateTable ------- [TM] {asan, default-linux-x86_64, release} ydb/core/load_test/ut_ycsb/unittest >> UpsertLoad::ShouldWriteDataBulkUpsertLocalMkqlKeyFrom [GOOD] Test command err: 2025-06-25T14:30:54.150540Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:30:54.150681Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:30:54.150763Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001a7a/r3tmp/tmp12TUyi/pdisk_1.dat 2025-06-25T14:30:54.509029Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T14:30:54.512227Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:30:54.574027Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:30:54.579067Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750861850144891 != 1750861850144895 2025-06-25T14:30:54.630086Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:30:54.630207Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:30:54.643887Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:30:54.731071Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:55.101097Z node 1 :DS_LOAD_TEST DEBUG: test_load_actor.cpp:425: TLoad# 0 created load actor of type# kUpsertLocalMkqlStart with tag# 1, proto# NotifyWhenFinished: true TargetShard { TabletId: 72075186224037888 TableId: 2 TableName: "usertable" } UpsertLocalMkqlStart { RowCount: 10 Inflight: 3 KeyFrom: 12345 } 2025-06-25T14:30:55.101281Z node 1 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:157: Id# {Tag: 0, parent: [1:695:2577], subTag: 2} TUpsertActor Bootstrap called: RowCount: 10 Inflight: 3 KeyFrom: 12345 with type# 1, target# TabletId: 72075186224037888 TableId: 2 TableName: "usertable" 2025-06-25T14:30:55.222759Z node 1 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:255: Id# {Tag: 0, parent: [1:695:2577], subTag: 2} TUpsertActor finished in 0.121038s, errors=0 2025-06-25T14:30:55.222853Z node 1 :DS_LOAD_TEST INFO: test_load_actor.cpp:447: TLoad# 0 received finished from actor# [1:696:2578] with tag# 2 >> KqpScanArrowInChanels::SingleKey [GOOD] >> KqpScanArrowInChanels::JoinWithParams >> DataShardSnapshots::DelayedWriteReplyAfterSplit [GOOD] >> DataShardSnapshots::DelayedWriteReadableAfterSplitAndReboot >> KqpScanArrowFormat::AggregateByColumn [GOOD] >> KqpScanArrowFormat::AggregateNoColumn >> KqpScanArrowFormat::AllTypesColumnsCellvec [GOOD] >> KqpScanArrowFormat::AggregateNoColumnNoRemaps >> KqpScanArrowFormat::JoinWithParams [GOOD] >> KqpScanArrowInChanels::AggregateCountStar >> TSchemeshardBorrowedCompactionTest::SchemeshardShouldCompactBorrowedAfterSplitMerge [GOOD] >> TSchemeshardBorrowedCompactionTest::SchemeshardShouldHandleBorrowCompactionTimeouts >> UpsertLoad::ShouldWriteDataBulkUpsertLocalMkql2 [GOOD] >> TopicAutoscaling::ControlPlane_BackCompatibility [GOOD] >> TopicAutoscaling::ControlPlane_AutoscalingWithStorageSizeRetention >> TopicAutoscaling::Simple_BeforeAutoscaleAwareSDK [GOOD] >> TopicAutoscaling::Simple_AutoscaleAwareSDK >> Cdc::AddStream [GOOD] >> Cdc::AwsRegion >> Cdc::InitialScanUpdatedRows [GOOD] >> Cdc::InitialScanAndLimits ------- [TM] {asan, default-linux-x86_64, release} ydb/core/load_test/ut_ycsb/unittest >> UpsertLoad::ShouldWriteDataBulkUpsertLocalMkql2 [GOOD] Test command err: 2025-06-25T14:30:52.360863Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:30:52.361004Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:30:52.361081Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001a91/r3tmp/tmpGRUbZD/pdisk_1.dat 2025-06-25T14:30:52.786272Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T14:30:52.789431Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:30:52.959286Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:30:52.973962Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750861848859775 != 1750861848859779 2025-06-25T14:30:53.030261Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:30:53.030489Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:30:53.043785Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:30:53.145736Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:53.651782Z node 1 :DS_LOAD_TEST DEBUG: test_load_actor.cpp:425: TLoad# 0 created load actor of type# kUpsertLocalMkqlStart with tag# 1, proto# NotifyWhenFinished: true TargetShard { TabletId: 72075186224037888 TableId: 2 TableName: "usertable" } UpsertLocalMkqlStart { RowCount: 10 Inflight: 3 } 2025-06-25T14:30:53.651925Z node 1 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:157: Id# {Tag: 0, parent: [1:695:2577], subTag: 2} TUpsertActor Bootstrap called: RowCount: 10 Inflight: 3 with type# 1, target# TabletId: 72075186224037888 TableId: 2 TableName: "usertable" 2025-06-25T14:30:53.771590Z node 1 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:255: Id# {Tag: 0, parent: [1:695:2577], subTag: 2} TUpsertActor finished in 0.119210s, errors=0 2025-06-25T14:30:53.771708Z node 1 :DS_LOAD_TEST INFO: test_load_actor.cpp:447: TLoad# 0 received finished from actor# [1:696:2578] with tag# 2 2025-06-25T14:30:57.163927Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:265:2309], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:30:57.164102Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:30:57.164237Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001a91/r3tmp/tmpmII2JT/pdisk_1.dat 2025-06-25T14:30:57.426787Z node 2 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 2 Type# 268639257 2025-06-25T14:30:57.428186Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:30:57.457369Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:30:57.459371Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:33:2080] 1750861854261235 != 1750861854261239 2025-06-25T14:30:57.506800Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:30:57.506921Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:30:57.519018Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:30:57.612296Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:57.920550Z node 2 :DS_LOAD_TEST DEBUG: test_load_actor.cpp:425: TLoad# 0 created load actor of type# kUpsertLocalMkqlStart with tag# 1, proto# NotifyWhenFinished: true TargetShard { TabletId: 72075186224037888 TableId: 2 TableName: "JustTable" } UpsertLocalMkqlStart { RowCount: 10 Inflight: 3 } 2025-06-25T14:30:57.920689Z node 2 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:157: Id# {Tag: 0, parent: [2:695:2577], subTag: 2} TUpsertActor Bootstrap called: RowCount: 10 Inflight: 3 with type# 1, target# TabletId: 72075186224037888 TableId: 2 TableName: "JustTable" 2025-06-25T14:30:58.074391Z node 2 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:255: Id# {Tag: 0, parent: [2:695:2577], subTag: 2} TUpsertActor finished in 0.153316s, errors=0 2025-06-25T14:30:58.074498Z node 2 :DS_LOAD_TEST INFO: test_load_actor.cpp:447: TLoad# 0 received finished from actor# [2:696:2578] with tag# 2 >> UpsertLoad::ShouldWriteDataBulkUpsertKeyFrom [GOOD] >> UpsertLoad::ShouldWriteKqpUpsert [GOOD] |77.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/persqueue/ut/ydb-core-persqueue-ut |77.5%| [LD] {RESULT} $(B)/ydb/core/persqueue/ut/ydb-core-persqueue-ut |77.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/persqueue/ut/ydb-core-persqueue-ut ------- [TM] {asan, default-linux-x86_64, release} ydb/core/load_test/ut_ycsb/unittest >> UpsertLoad::ShouldWriteKqpUpsert [GOOD] Test command err: 2025-06-25T14:30:58.550289Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:30:58.550462Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:30:58.550539Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001a5f/r3tmp/tmpCOqrkm/pdisk_1.dat 2025-06-25T14:30:58.897060Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T14:30:58.912799Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:30:58.970859Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:30:58.976089Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750861855670825 != 1750861855670829 2025-06-25T14:30:59.024512Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:30:59.024660Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:30:59.036173Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:30:59.133569Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:59.548327Z node 1 :DS_LOAD_TEST DEBUG: test_load_actor.cpp:425: TLoad# 0 created load actor of type# kUpsertKqpStart with tag# 1, proto# NotifyWhenFinished: true TargetShard { TabletId: 72075186224037888 TableId: 2 WorkingDir: "/Root" TableName: "usertable" } UpsertKqpStart { RowCount: 20 Inflight: 5 } 2025-06-25T14:30:59.548478Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:298: TKqpUpsertActorMultiSession# {Tag: 0, parent: [1:695:2577], subTag: 2} Bootstrap called: RowCount: 20 Inflight: 5 2025-06-25T14:30:59.552184Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:361: TKqpUpsertActorMultiSession# {Tag: 0, parent: [1:695:2577], subTag: 2} started# 5 actors each with inflight# 4 2025-06-25T14:30:59.552266Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:116: TKqpUpsertActor# {Tag: 0, parent: [1:696:2578], subTag: 1} Bootstrap called: RowCount: 4 Inflight: 1 2025-06-25T14:30:59.552352Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:116: TKqpUpsertActor# {Tag: 0, parent: [1:696:2578], subTag: 2} Bootstrap called: RowCount: 4 Inflight: 1 2025-06-25T14:30:59.552382Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:116: TKqpUpsertActor# {Tag: 0, parent: [1:696:2578], subTag: 3} Bootstrap called: RowCount: 4 Inflight: 1 2025-06-25T14:30:59.552409Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:116: TKqpUpsertActor# {Tag: 0, parent: [1:696:2578], subTag: 4} Bootstrap called: RowCount: 4 Inflight: 1 2025-06-25T14:30:59.552472Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:116: TKqpUpsertActor# {Tag: 0, parent: [1:696:2578], subTag: 5} Bootstrap called: RowCount: 4 Inflight: 1 2025-06-25T14:30:59.555954Z node 1 :DS_LOAD_TEST DEBUG: kqp_upsert.cpp:207: TKqpUpsertActor# {Tag: 0, parent: [1:696:2578], subTag: 1} session: ydb://session/3?node_id=1&id=YTg1ZWRiYzktNTcxMGE3ODktMmFkNjZiMjUtNjAxYWU5ZjI= 2025-06-25T14:30:59.559301Z node 1 :DS_LOAD_TEST DEBUG: kqp_upsert.cpp:207: TKqpUpsertActor# {Tag: 0, parent: [1:696:2578], subTag: 2} session: ydb://session/3?node_id=1&id=NGU4MWJkMzYtYzU5YTRhMzctODNmZTQ2MWMtNmVlNzU2MjA= 2025-06-25T14:30:59.559375Z node 1 :DS_LOAD_TEST DEBUG: kqp_upsert.cpp:207: TKqpUpsertActor# {Tag: 0, parent: [1:696:2578], subTag: 3} session: ydb://session/3?node_id=1&id=NTAzYjViMGEtNjUwYjRhNTQtNDE3NDg3NzMtYThlMmQzOTY= 2025-06-25T14:30:59.560809Z node 1 :DS_LOAD_TEST DEBUG: kqp_upsert.cpp:207: TKqpUpsertActor# {Tag: 0, parent: [1:696:2578], subTag: 4} session: ydb://session/3?node_id=1&id=MzE3NWU1ZWQtN2M0ZDY5OTctNWE1MmYzNTMtZjkxNWJhNjI= 2025-06-25T14:30:59.562182Z node 1 :DS_LOAD_TEST DEBUG: kqp_upsert.cpp:207: TKqpUpsertActor# {Tag: 0, parent: [1:696:2578], subTag: 5} session: ydb://session/3?node_id=1&id=NjI1YzlmYjItY2Q5NGYxOTktNjk2NzNiZjItMThkY2EzNTY= 2025-06-25T14:30:59.566270Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:709:2591], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:59.566374Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:733:2609], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:59.566461Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:734:2610], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:59.566531Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:735:2611], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:59.566576Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:736:2612], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:59.566621Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:738:2614], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:59.566701Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:59.574176Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:30:59.633652Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:754:2630] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateCreate)" severity: 1 } 2025-06-25T14:30:59.634577Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:755:2631] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateCreate)" severity: 1 } 2025-06-25T14:30:59.635308Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:756:2632] txid# 281474976715661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateCreate)" severity: 1 } 2025-06-25T14:30:59.636696Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:757:2633] txid# 281474976715662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateCreate)" severity: 1 } 2025-06-25T14:30:59.680668Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:30:59.804514Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:745:2621], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:30:59.804611Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:746:2622], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:30:59.804647Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:747:2623], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:30:59.804705Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:748:2624], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:30:59.804742Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:749:2625], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:30:59.839445Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:851:2692] txid# 281474976715663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:31:00.253888Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:190: TKqpUpsertActor# {Tag: 0, parent: [1:696:2578], subTag: 3} finished in 1750861860.253836s, errors=0 2025-06-25T14:31:00.254279Z node 1 :DS_LOAD_TEST INFO: kqp_upsert.cpp:376: kqp# {Tag: 0, parent: [1:695:2577], subTag: 2} finished: 3 { Tag: 3 DurationMs: 1750861860253 OperationsOK: 4 OperationsError: 0 } 2025-06-25T14:31:00.272067Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:924:2730] txid# 281474976715668, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:31:00.338710Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:190: TKqpUpsertActor# {Tag: 0, parent: [1:696:2578], subTag: 2} finished in 1750861860.338661s, errors=0 2025-06-25T14:31:00.339072Z node 1 :DS_LOAD_TEST INFO: kqp_upsert.cpp:376: kqp# {Tag: 0, parent: [1:695:2577], subTag: 2} finished: 2 { Tag: 2 DurationMs: 1750861860338 OperationsOK: 4 OperationsError: 0 } 2025-06-25T14:31:00.353042Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:975:2752] txid# 281474976715673, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:31:00.429817Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:190: TKqpUpsertActor# {Tag: 0, parent: [1:696:2578], subTag: 4} finished in 1750861860.429768s, errors=0 2025-06-25T14:31:00.430115Z node 1 :DS_LOAD_TEST INFO: kqp_upsert.cpp:376: kqp# {Tag: 0, parent: [1:695:2577], subTag: 2} finished: 4 { Tag: 4 DurationMs: 1750861860429 OperationsOK: 4 OperationsError: 0 } 2025-06-25T14:31:00.444231Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:1026:2774] txid# 281474976715678, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:31:00.516222Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:190: TKqpUpsertActor# {Tag: 0, parent: [1:696:2578], subTag: 1} finished in 1750861860.516174s, errors=0 2025-06-25T14:31:00.516389Z node 1 :DS_LOAD_TEST INFO: kqp_upsert.cpp:376: kqp# {Tag: 0, parent: [1:695:2577], subTag: 2} finished: 1 { Tag: 1 DurationMs: 1750861860516 OperationsOK: 4 OperationsError: 0 } 2025-06-25T14:31:00.530217Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:1077:2796] txid# 281474976715683, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:31:00.613995Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:190: TKqpUpsertActor# {Tag: 0, parent: [1:696:2578], subTag: 5} finished in 1750861860.613952s, errors=0 2025-06-25T14:31:00.614379Z node 1 :DS_LOAD_TEST INFO: kqp_upsert.cpp:376: kqp# {Tag: 0, parent: [1:695:2577], subTag: 2} finished: 5 { Tag: 5 DurationMs: 1750861860613 OperationsOK: 4 OperationsError: 0 } 2025-06-25T14:31:00.614442Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:395: TKqpUpsertActorMultiSession# {Tag: 0, parent: [1:695:2577], subTag: 2} finished in 1.062436s, oks# 20, errors# 0 2025-06-25T14:31:00.614606Z node 1 :DS_LOAD_TEST INFO: test_load_actor.cpp:447: TLoad# 0 received finished from actor# [1:696:2578] with tag# 2 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/load_test/ut_ycsb/unittest >> UpsertLoad::ShouldWriteDataBulkUpsertKeyFrom [GOOD] Test command err: 2025-06-25T14:30:54.551807Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:30:54.551977Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:30:54.552059Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001a73/r3tmp/tmpoXNlAF/pdisk_1.dat 2025-06-25T14:30:54.941189Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T14:30:54.945046Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:30:55.036363Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:30:55.050864Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750861850920235 != 1750861850920239 2025-06-25T14:30:55.106865Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:30:55.107062Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:30:55.121713Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:30:55.222435Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:55.611567Z node 1 :DS_LOAD_TEST DEBUG: test_load_actor.cpp:425: TLoad# 0 created load actor of type# kUpsertBulkStart with tag# 1, proto# NotifyWhenFinished: true TargetShard { TabletId: 72075186224037888 TableId: 2 TableName: "usertable" } UpsertBulkStart { RowCount: 100 Inflight: 3 BatchSize: 7 } 2025-06-25T14:30:55.611734Z node 1 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:157: Id# {Tag: 0, parent: [1:695:2577], subTag: 2} TUpsertActor Bootstrap called: RowCount: 100 Inflight: 3 BatchSize: 7 with type# 0, target# TabletId: 72075186224037888 TableId: 2 TableName: "usertable" 2025-06-25T14:30:55.698900Z node 1 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:255: Id# {Tag: 0, parent: [1:695:2577], subTag: 2} TUpsertActor finished in 0.086653s, errors=0 2025-06-25T14:30:55.699028Z node 1 :DS_LOAD_TEST INFO: test_load_actor.cpp:447: TLoad# 0 received finished from actor# [1:696:2578] with tag# 2 2025-06-25T14:30:59.365624Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:265:2309], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:30:59.365796Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:30:59.365952Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001a73/r3tmp/tmp6q45ui/pdisk_1.dat 2025-06-25T14:30:59.696102Z node 2 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 2 Type# 268639257 2025-06-25T14:30:59.697495Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:30:59.730423Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:30:59.733005Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:33:2080] 1750861856140135 != 1750861856140139 2025-06-25T14:30:59.782199Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:30:59.782332Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:30:59.797513Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:30:59.885344Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:31:00.188123Z node 2 :DS_LOAD_TEST DEBUG: test_load_actor.cpp:425: TLoad# 0 created load actor of type# kUpsertBulkStart with tag# 1, proto# NotifyWhenFinished: true TargetShard { TabletId: 72075186224037888 TableId: 2 TableName: "usertable" } UpsertBulkStart { RowCount: 10 Inflight: 3 KeyFrom: 12345 } 2025-06-25T14:31:00.188281Z node 2 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:157: Id# {Tag: 0, parent: [2:695:2577], subTag: 2} TUpsertActor Bootstrap called: RowCount: 10 Inflight: 3 KeyFrom: 12345 with type# 0, target# TabletId: 72075186224037888 TableId: 2 TableName: "usertable" 2025-06-25T14:31:00.268386Z node 2 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:255: Id# {Tag: 0, parent: [2:695:2577], subTag: 2} TUpsertActor finished in 0.075718s, errors=0 2025-06-25T14:31:00.268503Z node 2 :DS_LOAD_TEST INFO: test_load_actor.cpp:447: TLoad# 0 received finished from actor# [2:696:2578] with tag# 2 >> THDRRQuoterResourceTreeRuntimeTest::TestHierarchicalQuotas [GOOD] >> THDRRQuoterResourceTreeRuntimeTest::TestHangDefence [GOOD] >> THDRRQuoterResourceTreeRuntimeTest::TestMoreStrongChildLimit [GOOD] >> THDRRQuoterResourceTreeRuntimeTest::TestInactiveSessionDisconnectsAndThenConnectsAgain [GOOD] >> THDRRQuoterResourceTreeRuntimeTest::TestInactiveMultiresourceSessionDisconnectsAndThenConnectsAgain [GOOD] >> DataShardVolatile::DistributedWriteThenSplit [GOOD] >> DataShardVolatile::DistributedWriteThenReadIterator >> UpsertLoad::ShouldDropCreateTable [GOOD] >> Cdc::ShouldBreakLocksOnConcurrentAddStream [GOOD] >> Cdc::ShouldBreakLocksOnConcurrentAlterStream >> TSubscriberCombinationsTest::CombinationsMigratedPath [GOOD] >> DataShardVolatile::DistributedWriteBrokenLock [GOOD] >> DataShardVolatile::DistributedWriteShardRestartBeforePlan+UseSink |77.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/kesus/tablet/ut/unittest >> THDRRQuoterResourceTreeRuntimeTest::TestInactiveMultiresourceSessionDisconnectsAndThenConnectsAgain [GOOD] >> BasicUsage::TWriteSession_WriteEncoded [GOOD] >> CompressExecutor::TestExecutorMemUsage >> Cdc::ResolvedTimestampsVolatileOutOfOrder [GOOD] >> Cdc::SequentialSplitMerge >> CommitOffset::PartitionSplit_OffsetCommit [GOOD] >> CommitOffset::DistributedTxCommit >> KqpScanArrowInChanels::AggregateNoColumnNoRemaps [GOOD] >> KqpScanArrowInChanels::AggregateWithFunction ------- [TM] {asan, default-linux-x86_64, release} ydb/core/load_test/ut_ycsb/unittest >> UpsertLoad::ShouldDropCreateTable [GOOD] Test command err: 2025-06-25T14:30:54.491605Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:30:54.491780Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:30:54.491845Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001a74/r3tmp/tmphtdW2J/pdisk_1.dat 2025-06-25T14:30:54.933357Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T14:30:54.937027Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:30:55.007043Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:30:55.014376Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750861851160905 != 1750861851160909 2025-06-25T14:30:55.075511Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:30:55.075689Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:30:55.093039Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:30:55.194168Z node 1 :DS_LOAD_TEST NOTICE: test_load_actor.cpp:194: TLoad# 0 creates table# BrandNewTable in dir# /Root 2025-06-25T14:30:55.516540Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:606:2513], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:55.516759Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:55.541588Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:55.898320Z node 1 :DS_LOAD_TEST INFO: test_load_actor.cpp:346: TLoad# 0 warmups table# BrandNewTable in dir# /Root with rows# 10 2025-06-25T14:30:55.900056Z node 1 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:157: Id# {Tag: 0, parent: [1:602:2510], subTag: 1} TUpsertActor Bootstrap called: RowCount: 10 Inflight: 100 BatchSize: 100 with type# 0, target# TabletId: 72075186224037888 TableId: 2 WorkingDir: "/Root" TableName: "BrandNewTable" 2025-06-25T14:30:55.922886Z node 1 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:255: Id# {Tag: 0, parent: [1:602:2510], subTag: 1} TUpsertActor finished in 0.022479s, errors=0 2025-06-25T14:30:55.923198Z node 1 :DS_LOAD_TEST DEBUG: test_load_actor.cpp:425: TLoad# 0 created load actor of type# kUpsertBulkStart with tag# 2, proto# NotifyWhenFinished: true TableSetup { WorkingDir: "/Root" TableName: "BrandNewTable" CreateTable: true MinParts: 11 MaxParts: 13 MaxPartSizeMb: 1234 } TargetShard { TabletId: 72075186224037888 TableId: 2 WorkingDir: "/Root" TableName: "BrandNewTable" } UpsertBulkStart { RowCount: 10 Inflight: 3 } 2025-06-25T14:30:55.923361Z node 1 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:157: Id# {Tag: 0, parent: [1:602:2510], subTag: 3} TUpsertActor Bootstrap called: RowCount: 10 Inflight: 3 with type# 0, target# TabletId: 72075186224037888 TableId: 2 WorkingDir: "/Root" TableName: "BrandNewTable" 2025-06-25T14:30:55.989116Z node 1 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:255: Id# {Tag: 0, parent: [1:602:2510], subTag: 3} TUpsertActor finished in 0.065448s, errors=0 2025-06-25T14:30:55.989211Z node 1 :DS_LOAD_TEST INFO: test_load_actor.cpp:447: TLoad# 0 received finished from actor# [1:712:2587] with tag# 3 2025-06-25T14:31:00.036958Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:265:2309], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:31:00.037137Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:31:00.037260Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001a74/r3tmp/tmpCyv4lB/pdisk_1.dat 2025-06-25T14:31:00.341349Z node 2 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 2 Type# 268639257 2025-06-25T14:31:00.343062Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:31:00.375080Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:31:00.378266Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:33:2080] 1750861856580228 != 1750861856580232 2025-06-25T14:31:00.434561Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:31:00.434698Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:31:00.450103Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:31:00.542911Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:31:00.894449Z node 2 :DS_LOAD_TEST DEBUG: test_load_actor.cpp:425: TLoad# 0 created load actor of type# kUpsertBulkStart with tag# 1, proto# NotifyWhenFinished: true TargetShard { TabletId: 72075186224037888 TableId: 2 } UpsertBulkStart { RowCount: 100 Inflight: 3 } 2025-06-25T14:31:00.894576Z node 2 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:157: Id# {Tag: 0, parent: [2:695:2577], subTag: 2} TUpsertActor Bootstrap called: RowCount: 100 Inflight: 3 with type# 0, target# TabletId: 72075186224037888 TableId: 2 2025-06-25T14:31:01.333163Z node 2 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:255: Id# {Tag: 0, parent: [2:695:2577], subTag: 2} TUpsertActor finished in 0.438192s, errors=0 2025-06-25T14:31:01.333269Z node 2 :DS_LOAD_TEST INFO: test_load_actor.cpp:447: TLoad# 0 received finished from actor# [2:696:2578] with tag# 2 2025-06-25T14:31:01.339155Z node 2 :DS_LOAD_TEST NOTICE: test_load_actor.cpp:174: TLoad# 0 drops table# table in dir# /Root 2025-06-25T14:31:01.371621Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:737:2619], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:31:01.371778Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:31:01.434403Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:31:01.637926Z node 2 :DS_LOAD_TEST NOTICE: test_load_actor.cpp:194: TLoad# 0 creates table# table in dir# /Root 2025-06-25T14:31:01.657043Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:801:2662], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:31:01.657127Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:31:01.668347Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:31:01.720811Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037888 not found 2025-06-25T14:31:01.927136Z node 2 :DS_LOAD_TEST INFO: test_load_actor.cpp:346: TLoad# 0 warmups table# table in dir# /Root with rows# 10 2025-06-25T14:31:01.927484Z node 2 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:157: Id# {Tag: 0, parent: [2:734:2616], subTag: 1} TUpsertActor Bootstrap called: RowCount: 10 Inflight: 100 BatchSize: 100 with type# 0, target# TabletId: 72075186224037889 TableId: 3 WorkingDir: "/Root" TableName: "table" 2025-06-25T14:31:01.939604Z node 2 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:255: Id# {Tag: 0, parent: [2:734:2616], subTag: 1} TUpsertActor finished in 0.011801s, errors=0 2025-06-25T14:31:01.939942Z node 2 :DS_LOAD_TEST DEBUG: test_load_actor.cpp:425: TLoad# 0 created load actor of type# kUpsertBulkStart with tag# 2, proto# NotifyWhenFinished: true TableSetup { WorkingDir: "/Root" TableName: "table" DropTable: true } TargetShard { TabletId: 72075186224037889 TableId: 3 WorkingDir: "/Root" TableName: "table" } UpsertBulkStart { RowCount: 10 Inflight: 3 } 2025-06-25T14:31:01.940104Z node 2 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:157: Id# {Tag: 0, parent: [2:734:2616], subTag: 3} TUpsertActor Bootstrap called: RowCount: 10 Inflight: 3 with type# 0, target# TabletId: 72075186224037889 TableId: 3 WorkingDir: "/Root" TableName: "table" 2025-06-25T14:31:01.997527Z node 2 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:255: Id# {Tag: 0, parent: [2:734:2616], subTag: 3} TUpsertActor finished in 0.057135s, errors=0 2025-06-25T14:31:01.997631Z node 2 :DS_LOAD_TEST INFO: test_load_actor.cpp:447: TLoad# 0 received finished from actor# [2:891:2734] with tag# 3 >> TKesusTest::TestAcquireUpgrade ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_subscriber/unittest >> TSubscriberCombinationsTest::CombinationsMigratedPath [GOOD] Test command err: =========== Path: "/root/tenant" PathDescription { Self { PathVersion: 1 } DomainDescription { DomainKey { SchemeShard: 800 PathId: 2 } } } PathId: 2 PathOwnerId: 800 =========== Path: "/root/tenant" PathDescription { Self { PathVersion: 1 } DomainDescription { DomainKey { SchemeShard: 800 PathId: 2 } } } PathId: 2 PathOwnerId: 800 2025-06-25T14:29:23.700875Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [1:3:2050] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 800 Generation: 1 }: sender# [1:36:2066] 2025-06-25T14:29:23.700951Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [1:3:2050] Successful handshake: owner# 800, generation# 1 2025-06-25T14:29:23.701152Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:958: [1:3:2050] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 800 Generation: 1 }: sender# [1:36:2066] 2025-06-25T14:29:23.701190Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:985: [1:3:2050] Commit generation: owner# 800, generation# 1 2025-06-25T14:29:23.701268Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [1:6:2053] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 800 Generation: 1 }: sender# [1:37:2067] 2025-06-25T14:29:23.701314Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [1:6:2053] Successful handshake: owner# 800, generation# 1 2025-06-25T14:29:23.701575Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:958: [1:6:2053] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 800 Generation: 1 }: sender# [1:37:2067] 2025-06-25T14:29:23.701614Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:985: [1:6:2053] Commit generation: owner# 800, generation# 1 2025-06-25T14:29:23.701735Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1003: [main][1:39:2069][/root/tenant] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-25T14:29:23.702189Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:3:2050] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /root/tenant DomainOwnerId: 1 }: sender# [1:43:2069] 2025-06-25T14:29:23.702249Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [1:3:2050] Upsert description: path# /root/tenant 2025-06-25T14:29:23.702350Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:3:2050] Subscribe: subscriber# [1:43:2069], path# /root/tenant, domainOwnerId# 1, capabilities# AckNotifications: true 2025-06-25T14:29:23.702503Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:6:2053] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /root/tenant DomainOwnerId: 1 }: sender# [1:44:2069] 2025-06-25T14:29:23.702523Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [1:6:2053] Upsert description: path# /root/tenant 2025-06-25T14:29:23.702566Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:6:2053] Subscribe: subscriber# [1:44:2069], path# /root/tenant, domainOwnerId# 1, capabilities# AckNotifications: true 2025-06-25T14:29:23.702706Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:9:2056] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /root/tenant DomainOwnerId: 1 }: sender# [1:45:2069] 2025-06-25T14:29:23.702727Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [1:9:2056] Upsert description: path# /root/tenant 2025-06-25T14:29:23.702756Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:9:2056] Subscribe: subscriber# [1:45:2069], path# /root/tenant, domainOwnerId# 1, capabilities# AckNotifications: true 2025-06-25T14:29:23.702826Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:43:2069][/root/tenant] Handle NKikimrSchemeBoard.TEvNotify { Path: /root/tenant Version: 0 }: sender# [1:3:2050] 2025-06-25T14:29:23.702897Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:3:2050] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [1:43:2069] 2025-06-25T14:29:23.702938Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:44:2069][/root/tenant] Handle NKikimrSchemeBoard.TEvNotify { Path: /root/tenant Version: 0 }: sender# [1:6:2053] 2025-06-25T14:29:23.702973Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:6:2053] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [1:44:2069] 2025-06-25T14:29:23.703009Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:45:2069][/root/tenant] Handle NKikimrSchemeBoard.TEvNotify { Path: /root/tenant Version: 0 }: sender# [1:9:2056] 2025-06-25T14:29:23.703037Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:9:2056] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [1:45:2069] 2025-06-25T14:29:23.703118Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:39:2069][/root/tenant] Handle NKikimrSchemeBoard.TEvNotify { Path: /root/tenant Version: 0 }: sender# [1:40:2069] 2025-06-25T14:29:23.703203Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:39:2069][/root/tenant] Handle NKikimrSchemeBoard.TEvNotify { Path: /root/tenant Version: 0 }: sender# [1:41:2069] 2025-06-25T14:29:23.703250Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:852: [main][1:39:2069][/root/tenant] Set up state: owner# [1:38:2068], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-25T14:29:23.703297Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:39:2069][/root/tenant] Handle NKikimrSchemeBoard.TEvNotify { Path: /root/tenant Version: 0 }: sender# [1:42:2069] 2025-06-25T14:29:23.703334Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:870: [main][1:39:2069][/root/tenant] Ignore empty state: owner# [1:38:2068], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } =========== !argsLeft.IsDeletion 2025-06-25T14:29:23.703542Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [1:3:2050] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 800 Generation: 1 }: sender# [1:36:2066], cookie# 0, event size# 103 2025-06-25T14:29:23.703584Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [1:3:2050] Update description: path# /root/tenant, pathId# [OwnerId: 800, LocalPathId: 2], deletion# false 2025-06-25T14:29:23.703653Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:550: [1:3:2050] Upsert description: path# /root/tenant, pathId# [OwnerId: 800, LocalPathId: 2], pathDescription# {Status StatusSuccess, Path /root/tenant, PathId [OwnerId: 800, LocalPathId: 2], PathVersion 1, SubdomainPathId [OwnerId: 800, LocalPathId: 2], PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 60} 2025-06-25T14:29:23.703818Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:43:2069][/root/tenant] Handle NKikimrSchemeBoard.TEvNotify { Path: /root/tenant PathId: [OwnerId: 800, LocalPathId: 2] Version: 1 }: sender# [1:3:2050] 2025-06-25T14:29:23.703867Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:3:2050] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 1 }: sender# [1:43:2069] 2025-06-25T14:29:23.703934Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:39:2069][/root/tenant] Handle NKikimrSchemeBoard.TEvNotify { Path: /root/tenant PathId: [OwnerId: 800, LocalPathId: 2] Version: 1 }: sender# [1:40:2069] 2025-06-25T14:29:23.703994Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:858: [main][1:39:2069][/root/tenant] Update to strong state: owner# [1:38:2068], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, new state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 800, LocalPathId: 2], Version: 1) DomainId: [OwnerId: 800, LocalPathId: 2] AbandonedSchemeShards: there are 0 elements } =========== argsLeft.GetSuperId() >= argsRight.GetSuperId() =========== Path: "/root/tenant" PathDescription { Self { PathVersion: 1 } DomainDescription { DomainKey { SchemeShard: 800 PathId: 2 } } } PathId: 2 PathOwnerId: 800 =========== Path: "/root/tenant" PathDescription { Self { PathVersion: 1 } DomainDescription { DomainKey { SchemeShard: 800 PathId: 2 } } } PathId: 1 PathOwnerId: 900 2025-06-25T14:29:24.177821Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [3:3:2050] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 800 Generation: 1 }: sender# [3:36:2066] 2025-06-25T14:29:24.177878Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [3:3:2050] Successful handshake: owner# 800, generation# 1 2025-06-25T14:29:24.178010Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:958: [3:3:2050] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 800 Generation: 1 }: sender# [3:36:2066] 2025-06-25T14:29:24.178040Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:985: [3:3:2050] Commit generation: owner# 800, generation# 1 2025-06-25T14:29:24.178093Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [3:6:2053] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 900 Generation: 1 }: sender# [3:37:2067] 2025-06-25T14:29:24.178123Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [3:6:2053] Successful handshake: owner# 900, generation# 1 2025-06-25T14:29:24.178307Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:958: [3:6:2053] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 900 Generation: 1 }: sender# [3:37:2067] 2025-06-25T14:29:24.178341Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:985: [3:6:2053] Commit generation: owner# 900, generation# 1 2025-06-25T14:29:24.178412Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1003: [main][3:39:2069][/root/tenant] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-25T14:29:24.178812Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [3:3:2050] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /root/tenant DomainOwnerId: 1 }: sender# [3:43:2069] 2025-06-25T14:29:24.178845Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [3:3:2050] Upsert description: path# /root/tenant 2025-06-25T14:29:24.178913Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [3:3:2050] Subscribe: subscriber# [3:43:2069], path# /root/tenant, domainOwnerId# 1, capabilities# AckNotifications: true 2025-06-25T14:29:24.179030Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [3:6:2053] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /root/tenant DomainOwnerId: 1 }: sender# [3:44:2069] 2025-06-25T14:29:24.179063Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [3:6:2053] Upsert description: path# /root/tenant 2025-06-25T14:29:24.179112Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [3:6:2053] Subscribe: subscriber# [3:44:2069], path# /root/tenant, domainOwnerId# 1, capabilities# AckNotifications: true 2025-06-25T14:29:24.179225Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [3:9:2056] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /root/tenant DomainOwnerId: 1 }: sender# [3:45:2069] 2025-06-25T14:29:24.179251Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [3:9:2056] Upsert description: path# /root/tenant 2025-06-25T14:29:24.179280Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [3:9:2056] Subscribe: subscriber# [3:45:2069], path# /root/tenant, domainOwnerId# 1, capabilities# AckNotifications: true 2025-06-25T14:29:24.179327Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][3:43:2069][/root/tenant] Handle NKikimrSchemeBoard.TEvNotify { Path: /root/tenant Version: 0 }: sender# [3:3:2050] 2025-06-25T14:29:24.179373Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [3:3:2050] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [3:43:2069] 2025-06-25T14:29:24.179431Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][3:44:2069][/root/tenant] Handle NKikimrSchemeBoard.TEvNotify { Path: /root/tenant Version: 0 }: sender# [3:6:2053] 2025-06-25T14:29:24.179470Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [3:6:2053] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [3:44:2069] 2025-06-25T1 ... 50] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 910 Generation: 1 }: sender# [397:36:2066] 2025-06-25T14:31:02.027613Z node 397 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:985: [397:3:2050] Commit generation: owner# 910, generation# 1 2025-06-25T14:31:02.027645Z node 397 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [397:6:2053] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 910 Generation: 1 }: sender# [397:37:2067] 2025-06-25T14:31:02.027668Z node 397 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [397:6:2053] Successful handshake: owner# 910, generation# 1 2025-06-25T14:31:02.027833Z node 397 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:958: [397:6:2053] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 910 Generation: 1 }: sender# [397:37:2067] 2025-06-25T14:31:02.027853Z node 397 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:985: [397:6:2053] Commit generation: owner# 910, generation# 1 2025-06-25T14:31:02.027908Z node 397 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1003: [main][397:39:2069][/Root/Tenant/table_inside] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-25T14:31:02.028224Z node 397 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [397:3:2050] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /Root/Tenant/table_inside DomainOwnerId: 1 }: sender# [397:43:2069] 2025-06-25T14:31:02.028249Z node 397 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [397:3:2050] Upsert description: path# /Root/Tenant/table_inside 2025-06-25T14:31:02.028348Z node 397 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [397:3:2050] Subscribe: subscriber# [397:43:2069], path# /Root/Tenant/table_inside, domainOwnerId# 1, capabilities# AckNotifications: true 2025-06-25T14:31:02.028552Z node 397 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [397:6:2053] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /Root/Tenant/table_inside DomainOwnerId: 1 }: sender# [397:44:2069] 2025-06-25T14:31:02.028589Z node 397 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [397:6:2053] Upsert description: path# /Root/Tenant/table_inside 2025-06-25T14:31:02.028644Z node 397 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [397:6:2053] Subscribe: subscriber# [397:44:2069], path# /Root/Tenant/table_inside, domainOwnerId# 1, capabilities# AckNotifications: true 2025-06-25T14:31:02.028811Z node 397 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [397:9:2056] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /Root/Tenant/table_inside DomainOwnerId: 1 }: sender# [397:45:2069] 2025-06-25T14:31:02.028840Z node 397 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [397:9:2056] Upsert description: path# /Root/Tenant/table_inside 2025-06-25T14:31:02.028885Z node 397 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [397:9:2056] Subscribe: subscriber# [397:45:2069], path# /Root/Tenant/table_inside, domainOwnerId# 1, capabilities# AckNotifications: true 2025-06-25T14:31:02.028957Z node 397 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][397:43:2069][/Root/Tenant/table_inside] Handle NKikimrSchemeBoard.TEvNotify { Path: /Root/Tenant/table_inside Version: 0 }: sender# [397:3:2050] 2025-06-25T14:31:02.029013Z node 397 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [397:3:2050] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [397:43:2069] 2025-06-25T14:31:02.029058Z node 397 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][397:44:2069][/Root/Tenant/table_inside] Handle NKikimrSchemeBoard.TEvNotify { Path: /Root/Tenant/table_inside Version: 0 }: sender# [397:6:2053] 2025-06-25T14:31:02.029107Z node 397 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [397:6:2053] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [397:44:2069] 2025-06-25T14:31:02.029159Z node 397 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][397:45:2069][/Root/Tenant/table_inside] Handle NKikimrSchemeBoard.TEvNotify { Path: /Root/Tenant/table_inside Version: 0 }: sender# [397:9:2056] 2025-06-25T14:31:02.029203Z node 397 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [397:9:2056] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [397:45:2069] 2025-06-25T14:31:02.029283Z node 397 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][397:39:2069][/Root/Tenant/table_inside] Handle NKikimrSchemeBoard.TEvNotify { Path: /Root/Tenant/table_inside Version: 0 }: sender# [397:40:2069] 2025-06-25T14:31:02.029385Z node 397 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][397:39:2069][/Root/Tenant/table_inside] Handle NKikimrSchemeBoard.TEvNotify { Path: /Root/Tenant/table_inside Version: 0 }: sender# [397:41:2069] 2025-06-25T14:31:02.029441Z node 397 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:852: [main][397:39:2069][/Root/Tenant/table_inside] Set up state: owner# [397:38:2068], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-25T14:31:02.029494Z node 397 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][397:39:2069][/Root/Tenant/table_inside] Handle NKikimrSchemeBoard.TEvNotify { Path: /Root/Tenant/table_inside Version: 0 }: sender# [397:42:2069] 2025-06-25T14:31:02.029538Z node 397 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:870: [main][397:39:2069][/Root/Tenant/table_inside] Ignore empty state: owner# [397:38:2068], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } =========== argsLeft.GetSuperId() >= argsRight.GetSuperId() =========== Path: "/Root/Tenant/table_inside" PathDescription { Self { PathVersion: 18446744073709551615 } DomainDescription { DomainKey { SchemeShard: 800 PathId: 333 } } } PathId: 9 PathOwnerId: 910 =========== Path: "/Root/Tenant/table_inside" PathDescription { Self { PathVersion: 18446744073709551615 } DomainDescription { DomainKey { SchemeShard: 800 PathId: 333 } } } PathId: 9 PathOwnerId: 910 2025-06-25T14:31:02.523257Z node 399 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [399:3:2050] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 910 Generation: 1 }: sender# [399:36:2066] 2025-06-25T14:31:02.523324Z node 399 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [399:3:2050] Successful handshake: owner# 910, generation# 1 2025-06-25T14:31:02.523461Z node 399 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:958: [399:3:2050] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 910 Generation: 1 }: sender# [399:36:2066] 2025-06-25T14:31:02.523499Z node 399 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:985: [399:3:2050] Commit generation: owner# 910, generation# 1 2025-06-25T14:31:02.523552Z node 399 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [399:6:2053] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 910 Generation: 1 }: sender# [399:37:2067] 2025-06-25T14:31:02.523588Z node 399 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [399:6:2053] Successful handshake: owner# 910, generation# 1 2025-06-25T14:31:02.523746Z node 399 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:958: [399:6:2053] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 910 Generation: 1 }: sender# [399:37:2067] 2025-06-25T14:31:02.523776Z node 399 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:985: [399:6:2053] Commit generation: owner# 910, generation# 1 2025-06-25T14:31:02.524111Z node 399 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1003: [main][399:39:2069][/Root/Tenant/table_inside] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-25T14:31:02.524579Z node 399 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [399:3:2050] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /Root/Tenant/table_inside DomainOwnerId: 1 }: sender# [399:43:2069] 2025-06-25T14:31:02.524621Z node 399 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [399:3:2050] Upsert description: path# /Root/Tenant/table_inside 2025-06-25T14:31:02.524708Z node 399 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [399:3:2050] Subscribe: subscriber# [399:43:2069], path# /Root/Tenant/table_inside, domainOwnerId# 1, capabilities# AckNotifications: true 2025-06-25T14:31:02.524871Z node 399 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [399:6:2053] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /Root/Tenant/table_inside DomainOwnerId: 1 }: sender# [399:44:2069] 2025-06-25T14:31:02.524898Z node 399 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [399:6:2053] Upsert description: path# /Root/Tenant/table_inside 2025-06-25T14:31:02.524946Z node 399 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [399:6:2053] Subscribe: subscriber# [399:44:2069], path# /Root/Tenant/table_inside, domainOwnerId# 1, capabilities# AckNotifications: true 2025-06-25T14:31:02.525062Z node 399 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [399:9:2056] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /Root/Tenant/table_inside DomainOwnerId: 1 }: sender# [399:45:2069] 2025-06-25T14:31:02.525082Z node 399 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [399:9:2056] Upsert description: path# /Root/Tenant/table_inside 2025-06-25T14:31:02.525110Z node 399 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [399:9:2056] Subscribe: subscriber# [399:45:2069], path# /Root/Tenant/table_inside, domainOwnerId# 1, capabilities# AckNotifications: true 2025-06-25T14:31:02.525158Z node 399 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][399:43:2069][/Root/Tenant/table_inside] Handle NKikimrSchemeBoard.TEvNotify { Path: /Root/Tenant/table_inside Version: 0 }: sender# [399:3:2050] 2025-06-25T14:31:02.525199Z node 399 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [399:3:2050] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [399:43:2069] 2025-06-25T14:31:02.525238Z node 399 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][399:44:2069][/Root/Tenant/table_inside] Handle NKikimrSchemeBoard.TEvNotify { Path: /Root/Tenant/table_inside Version: 0 }: sender# [399:6:2053] 2025-06-25T14:31:02.525277Z node 399 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [399:6:2053] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [399:44:2069] 2025-06-25T14:31:02.525310Z node 399 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][399:45:2069][/Root/Tenant/table_inside] Handle NKikimrSchemeBoard.TEvNotify { Path: /Root/Tenant/table_inside Version: 0 }: sender# [399:9:2056] 2025-06-25T14:31:02.525344Z node 399 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [399:9:2056] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [399:45:2069] 2025-06-25T14:31:02.525400Z node 399 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][399:39:2069][/Root/Tenant/table_inside] Handle NKikimrSchemeBoard.TEvNotify { Path: /Root/Tenant/table_inside Version: 0 }: sender# [399:40:2069] 2025-06-25T14:31:02.525452Z node 399 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][399:39:2069][/Root/Tenant/table_inside] Handle NKikimrSchemeBoard.TEvNotify { Path: /Root/Tenant/table_inside Version: 0 }: sender# [399:41:2069] 2025-06-25T14:31:02.525500Z node 399 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:852: [main][399:39:2069][/Root/Tenant/table_inside] Set up state: owner# [399:38:2068], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-25T14:31:02.525538Z node 399 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][399:39:2069][/Root/Tenant/table_inside] Handle NKikimrSchemeBoard.TEvNotify { Path: /Root/Tenant/table_inside Version: 0 }: sender# [399:42:2069] 2025-06-25T14:31:02.525570Z node 399 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:870: [main][399:39:2069][/Root/Tenant/table_inside] Ignore empty state: owner# [399:38:2068], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } =========== argsLeft.GetSuperId() >= argsRight.GetSuperId() >> TKesusTest::TestSessionTimeoutAfterDetach >> TBackupTests::ShouldSucceedOnLargeData_MinWriteBatch [GOOD] >> TopicAutoscaling::PartitionSplit_BeforeAutoscaleAwareSDK [GOOD] >> TopicAutoscaling::PartitionSplit_AutoscaleAwareSDK >> TKesusTest::TestAcquireUpgrade [GOOD] >> TKesusTest::TestAcquireTimeout >> KqpScanArrowInChanels::JoinWithParams [GOOD] >> TKesusTest::TestAcquireWaiterDowngrade >> ReadLoad::ShouldReadKqp [GOOD] >> ReadLoad::ShouldReadKqpMoreThanRows >> DataShardSnapshots::PipelineAndMediatorRestoreRace [GOOD] >> DataShardSnapshots::ShardRestartLockBasic >> TKesusTest::TestAcquireWaiterDowngrade [GOOD] >> TKesusTest::TestAcquireWaiterUpgrade >> CommitOffset::Commit_WithoutSession_TopPast [GOOD] >> CommitOffset::Commit_WithWrongSession_ToParent >> Balancing::Balancing_OneTopic_TopicApi [GOOD] >> Balancing::Balancing_OneTopic_PQv1 >> THDRRQuoterResourceTreeRuntimeTest::TestWeights [GOOD] >> THDRRQuoterResourceTreeRuntimeTest::TestWeightsChange [GOOD] >> THDRRQuoterResourceTreeRuntimeTest::TestVerySmallSpeed [GOOD] >> TKesusTest::TestAcquireBeforeTimeoutViaRelease >> KqpScanArrowInChanels::AggregateCountStar [GOOD] >> KqpScanArrowInChanels::AggregateByColumn >> TKesusTest::TestAcquireLocks >> TKesusTest::TestAcquireWaiterUpgrade [GOOD] >> TKesusTest::TestAcquireWaiterChangeTimeoutToZero ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/arrow/unittest >> KqpScanArrowInChanels::JoinWithParams [GOOD] Test command err: Trying to start YDB, gRPC: 63988, MsgBus: 28098 2025-06-25T14:30:42.578184Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519894352493678092:2234];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:30:42.578240Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000dcd/r3tmp/tmpderyyn/pdisk_1.dat 2025-06-25T14:30:43.213018Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:30:43.213104Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:30:43.241760Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:30:43.242298Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 63988, node 1 2025-06-25T14:30:43.596446Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:30:43.597161Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:30:43.597170Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:30:43.597177Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:30:43.597288Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28098 TClient is connected to server localhost:28098 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:30:44.632874Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:30:44.696497Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:30:44.708252Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:30:44.918823Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:30:45.214811Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:30:45.303945Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:30:47.350772Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894373968516008:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:47.350904Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:47.580990Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519894352493678092:2234];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:30:47.581061Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:30:47.769219Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:47.820011Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:47.858696Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:47.891020Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:47.935962Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:48.047016Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:48.115035Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:48.265920Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894378263483967:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:48.266009Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:48.266418Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894378263483972:2436], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:48.270173Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:30:48.298288Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519894378263483974:2437], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:30:48.354853Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519894378263484027:3421] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:30:49.662838Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:50.245808Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discard ... e.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:30:56.626073Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750861856660, txId: 281474976710672] shutting down Trying to start YDB, gRPC: 24328, MsgBus: 9621 2025-06-25T14:30:57.502156Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519894419845526815:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:30:57.502227Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000dcd/r3tmp/tmp2nl3Xe/pdisk_1.dat 2025-06-25T14:30:57.671469Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:30:57.686475Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:30:57.686583Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 24328, node 3 2025-06-25T14:30:57.690509Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:30:57.740645Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:30:57.740674Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:30:57.740687Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:30:57.740819Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9621 TClient is connected to server localhost:9621 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:30:58.387426Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:30:58.416754Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:30:58.552485Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:30:58.643089Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:30:58.882784Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:30:58.979843Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:31:01.443790Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519894437025397609:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:31:01.443876Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:31:01.519123Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:31:01.559539Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:31:01.607661Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:31:01.689831Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:31:01.735314Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:31:01.815250Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:31:01.869462Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:31:01.969563Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519894437025398271:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:31:01.969697Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:31:01.969944Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519894437025398276:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:31:01.974914Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:31:01.995101Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519894437025398278:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:31:02.064927Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519894441320365625:3414] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:31:02.505058Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519894419845526815:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:31:02.505134Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:31:03.575440Z node 3 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750861863611, txId: 281474976715672] shutting down 2025-06-25T14:31:03.829578Z node 3 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750861863863, txId: 281474976715674] shutting down >> TopicAutoscaling::PartitionSplit_ReadEmptyPartitions_BeforeAutoscaleAwareSDK [GOOD] >> TopicAutoscaling::PartitionSplit_ReadEmptyPartitions_PQv1 |77.5%| [TA] $(B)/ydb/core/tx/scheme_board/ut_subscriber/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_backup/unittest >> TBackupTests::ShouldSucceedOnLargeData_MinWriteBatch [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:30:16.922356Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:30:16.922451Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:30:16.922492Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:30:16.922529Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:30:16.922570Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:30:16.922595Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:30:16.922641Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:30:16.922761Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:30:16.923450Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:30:16.923766Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:30:17.002513Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:30:17.002590Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:30:17.019403Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:30:17.019678Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:30:17.019794Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:30:17.026132Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:30:17.026490Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:30:17.027137Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:30:17.027404Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:30:17.031996Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:30:17.032196Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:30:17.033399Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:30:17.033459Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:30:17.033608Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:30:17.033656Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:30:17.033696Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:30:17.033783Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:30:17.039266Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:30:17.170650Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:30:17.170930Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:17.171166Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:30:17.171215Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:30:17.171466Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:30:17.171541Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:30:17.185267Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:30:17.185476Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:30:17.185674Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:17.185732Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:30:17.185796Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:30:17.185821Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:30:17.195469Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:17.195551Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:30:17.195615Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:30:17.201377Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:17.201469Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:17.201539Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:30:17.201647Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:30:17.209152Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:30:17.211680Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:30:17.211937Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:30:17.213030Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:30:17.213200Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:30:17.213274Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:30:17.213692Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:30:17.213750Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:30:17.213913Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:30:17.213999Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:30:17.216643Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:30:17.216693Z node 1 :FLAT_TX_SCHEMESHARD ... 25T14:31:03.809671Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:412: TBackup TPropose, opId: 102:0 HandleReply TEvOperationPlan, stepId: 5000003, at schemeshard: 72057594046678944 2025-06-25T14:31:03.809783Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 102:0 128 -> 129 2025-06-25T14:31:03.809886Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-25T14:31:03.823888Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:797: [Export] [s3] Bootstrap: self# [1:3460:5421], attempt# 0 2025-06-25T14:31:03.854187Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:441: [Export] [s3] Handle TEvExportScan::TEvReady: self# [1:3460:5421], sender# [1:3459:5420] REQUEST: PUT /metadata.json HTTP/1.1 HEADERS: Host: localhost:19806 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: DD945FEA-BFC5-415A-B3F1-C37E8C822459 amz-sdk-request: attempt=1 content-length: 94 content-md5: ZpDejBbuBPHjGq8ZC8z8QA== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /metadata.json / / 94 2025-06-25T14:31:03.867928Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:401: [Export] [s3] HandleMetadata TEvExternalStorage::TEvPutObjectResponse: self# [1:3460:5421], result# PutObjectResult { ETag: 6690de8c16ee04f1e31aaf190bccfc40 } FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000003 2025-06-25T14:31:03.870653Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:31:03.870734Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-25T14:31:03.871020Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:31:03.871081Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 102, path id: 2 2025-06-25T14:31:03.871936Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-25T14:31:03.872012Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:258: TBackup TProposedWaitParts, opId: 102:0 ProgressState, at schemeshard: 72057594046678944 REQUEST: PUT /scheme.pb HTTP/1.1 HEADERS: Host: localhost:19806 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 8F0C5639-DF39-4B25-AD32-C218C01C446E amz-sdk-request: attempt=1 content-length: 357 content-md5: csvC5nqNTZsSLy4ymlp0/Q== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /scheme.pb / / 357 2025-06-25T14:31:03.873574Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T14:31:03.873695Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T14:31:03.873748Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 102 2025-06-25T14:31:03.873805Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 3 2025-06-25T14:31:03.873867Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-25T14:31:03.873973Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 102, ready parts: 0/1, is published: true FAKE_COORDINATOR: Erasing txId 102 2025-06-25T14:31:03.874640Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:306: [Export] [s3] HandleScheme TEvExternalStorage::TEvPutObjectResponse: self# [1:3460:5421], result# PutObjectResult { ETag: 72cbc2e67a8d4d9b122f2e329a5a74fd } 2025-06-25T14:31:03.874953Z node 1 :DATASHARD_BACKUP DEBUG: export_scan.cpp:130: [Export] [scanner] Handle TEvExportScan::TEvFeed: self# [1:3459:5420] 2025-06-25T14:31:03.886297Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:459: [Export] [s3] Handle TEvExportScan::TEvBuffer: self# [1:3460:5421], sender# [1:3459:5420], msg# NKikimr::NDataShard::TEvExportScan::TEvBuffer { Last: 1 Checksum: } REQUEST: PUT /data_00.csv.zst HTTP/1.1 HEADERS: Host: localhost:19806 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: D8540BFF-7AB6-47D5-8D4A-E46C8F4C3894 amz-sdk-request: attempt=1 content-length: 740 content-md5: P/a/uWmNWYxyRT1pAtAE7A== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /data_00.csv.zst / / 740 2025-06-25T14:31:03.894510Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:501: [Export] [s3] HandleData TEvExternalStorage::TEvPutObjectResponse: self# [1:3460:5421], result# PutObjectResult { ETag: 3ff6bfb9698d598c72453d6902d004ec } 2025-06-25T14:31:03.894591Z node 1 :DATASHARD_BACKUP INFO: export_s3_uploader.cpp:716: [Export] [s3] Finish: self# [1:3460:5421], success# 1, error# , multipart# 0, uploadId# (empty maybe) 2025-06-25T14:31:03.895065Z node 1 :DATASHARD_BACKUP DEBUG: export_scan.cpp:144: [Export] [scanner] Handle TEvExportScan::TEvFinish: self# [1:3459:5420], msg# NKikimr::NDataShard::TEvExportScan::TEvFinish { Success: 1 Error: } 2025-06-25T14:31:03.917369Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-25T14:31:03.962264Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5596: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 313 RawX2: 4294969594 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10000 RowsProcessed: 1000 } 2025-06-25T14:31:03.962331Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1791: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409546, partId: 0 2025-06-25T14:31:03.962512Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:632: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: Source { RawX1: 313 RawX2: 4294969594 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10000 RowsProcessed: 1000 } 2025-06-25T14:31:03.962618Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:233: TBackup TProposedWaitParts, opId: 102:0 HandleReply TEvSchemaChanged at tablet# 72057594046678944 message# Source { RawX1: 313 RawX2: 4294969594 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10000 RowsProcessed: 1000 } 2025-06-25T14:31:03.962711Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:670: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 102:0, shardIdx: 72057594046678944:1, shard: 72075186233409546, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-25T14:31:03.962753Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:707: all shard schema changes has been received, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-25T14:31:03.962794Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:719: send schema changes ack message, operation: 102:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-06-25T14:31:03.962836Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 102:0 129 -> 240 2025-06-25T14:31:03.962983Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:116: Unable to make a bill: kind# TBackup, opId# 102:0, reason# domain is not a serverless db, domain# /MyRoot, domainPathId# [OwnerId: 72057594046678944, LocalPathId: 1], IsDomainSchemeShard: 1, ParentDomainId: [OwnerId: 72057594046678944, LocalPathId: 1], ResourcesDomainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:31:03.970142Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-25T14:31:03.970775Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-25T14:31:03.970828Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 102:0 ProgressState 2025-06-25T14:31:03.970943Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:0 progress is 1/1 2025-06-25T14:31:03.970992Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-25T14:31:03.971053Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:0 progress is 1/1 2025-06-25T14:31:03.971088Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-25T14:31:03.971126Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: true 2025-06-25T14:31:03.971194Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1656: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:340:2317] message: TxId: 102 2025-06-25T14:31:03.971249Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-25T14:31:03.971287Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 102:0 2025-06-25T14:31:03.971318Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 102:0 2025-06-25T14:31:03.971432Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-25T14:31:03.978569Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-25T14:31:03.978647Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:3445:5407] TestWaitNotification: OK eventTxId 102 >> TKesusTest::TestAcquireBeforeTimeoutViaRelease [GOOD] >> TKesusTest::TestAcquireBeforeTimeoutViaModeChange >> TKesusTest::TestAcquireWaiterChangeTimeoutToZero [GOOD] >> TKesusTest::TestAcquireWaiterRelease >> TopicAutoscaling::PartitionSplit_PQv1 [GOOD] >> TopicAutoscaling::PartitionSplit_PreferedPartition_BeforeAutoscaleAwareSDK >> TKesusTest::TestAcquireWaiterRelease [GOOD] >> TKesusTest::TestAllocatesResources >> TKesusTest::TestAcquireBeforeTimeoutViaModeChange [GOOD] >> KqpScanArrowFormat::AggregateNoColumnNoRemaps [GOOD] >> KqpScanArrowFormat::AggregateWithFunction >> KqpScanArrowFormat::AggregateNoColumn [GOOD] >> KqpScanArrowFormat::AggregateEmptySum >> TKesusTest::TestQuoterResourceDescribe ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kesus/tablet/ut/unittest >> TKesusTest::TestAcquireBeforeTimeoutViaModeChange [GOOD] Test command err: 2025-06-25T14:31:06.118999Z node 1 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-25T14:31:06.119123Z node 1 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-25T14:31:06.147501Z node 1 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-25T14:31:06.147665Z node 1 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-25T14:31:06.183959Z node 1 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-25T14:31:06.184790Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[1:136:2160], cookie=1258036269853993574, session=0, seqNo=0) 2025-06-25T14:31:06.184955Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 1 2025-06-25T14:31:06.204967Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[1:136:2160], cookie=1258036269853993574, session=1) 2025-06-25T14:31:06.205333Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[1:137:2161], cookie=14356691285502964023, session=0, seqNo=0) 2025-06-25T14:31:06.205455Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 2 2025-06-25T14:31:06.224955Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[1:137:2161], cookie=14356691285502964023, session=2) 2025-06-25T14:31:06.226117Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[1:136:2160], cookie=111, session=1, semaphore="Lock1" count=18446744073709551615) 2025-06-25T14:31:06.226274Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:126: [72057594037927937] Created new ephemeral semaphore 1 "Lock1" 2025-06-25T14:31:06.226393Z node 1 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 1 "Lock1" queue: next order #1 session 1 2025-06-25T14:31:06.249019Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[1:136:2160], cookie=111) 2025-06-25T14:31:06.249436Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[1:136:2160], cookie=112, session=1, semaphore="Lock2" count=1) 2025-06-25T14:31:06.249580Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:126: [72057594037927937] Created new ephemeral semaphore 2 "Lock2" 2025-06-25T14:31:06.249705Z node 1 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 2 "Lock2" queue: next order #2 session 1 2025-06-25T14:31:06.270645Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[1:136:2160], cookie=112) 2025-06-25T14:31:06.271146Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_release.cpp:37: [72057594037927937] TTxSemaphoreRelease::Execute (sender=[1:136:2160], cookie=333, name="Lock1") 2025-06-25T14:31:06.271254Z node 1 :KESUS_TABLET DEBUG: tablet_db.cpp:98: [72057594037927937] Deleting session 1 / semaphore 1 "Lock1" owner link 2025-06-25T14:31:06.271473Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[1:137:2161], cookie=222, session=2, semaphore="Lock1" count=1) 2025-06-25T14:31:06.271573Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:126: [72057594037927937] Created new ephemeral semaphore 3 "Lock1" 2025-06-25T14:31:06.271644Z node 1 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 3 "Lock1" queue: next order #3 session 2 2025-06-25T14:31:06.271762Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[1:137:2161], cookie=223, session=2, semaphore="Lock2" count=18446744073709551615) 2025-06-25T14:31:06.285215Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_release.cpp:93: [72057594037927937] TTxSemaphoreRelease::Complete (sender=[1:136:2160], cookie=333) 2025-06-25T14:31:06.285298Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[1:137:2161], cookie=222) 2025-06-25T14:31:06.285336Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[1:137:2161], cookie=223) 2025-06-25T14:31:06.285688Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_release.cpp:37: [72057594037927937] TTxSemaphoreRelease::Execute (sender=[1:136:2160], cookie=334, name="Lock2") 2025-06-25T14:31:06.285804Z node 1 :KESUS_TABLET DEBUG: tablet_db.cpp:98: [72057594037927937] Deleting session 1 / semaphore 2 "Lock2" owner link 2025-06-25T14:31:06.285877Z node 1 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 2 "Lock2" queue: next order #4 session 2 2025-06-25T14:31:06.298227Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_release.cpp:93: [72057594037927937] TTxSemaphoreRelease::Complete (sender=[1:136:2160], cookie=334) 2025-06-25T14:31:06.298840Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[1:165:2187], cookie=295406428799341387, name="Lock1") 2025-06-25T14:31:06.298933Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[1:165:2187], cookie=295406428799341387) 2025-06-25T14:31:06.299413Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[1:168:2190], cookie=1760355251752383, name="Lock2") 2025-06-25T14:31:06.299476Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[1:168:2190], cookie=1760355251752383) 2025-06-25T14:31:06.323668Z node 1 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-25T14:31:06.323783Z node 1 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-25T14:31:06.324230Z node 1 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-25T14:31:06.324845Z node 1 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-25T14:31:06.372969Z node 1 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-25T14:31:06.373135Z node 1 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 2 "Lock2" queue: next order #4 session 2 2025-06-25T14:31:06.373191Z node 1 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 3 "Lock1" queue: next order #3 session 2 2025-06-25T14:31:06.373533Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[1:208:2220], cookie=12141008266639181636, name="Lock1") 2025-06-25T14:31:06.373644Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[1:208:2220], cookie=12141008266639181636) 2025-06-25T14:31:06.374260Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[1:216:2227], cookie=10598337799186116632, name="Lock2") 2025-06-25T14:31:06.374330Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[1:216:2227], cookie=10598337799186116632) 2025-06-25T14:31:06.907141Z node 2 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-25T14:31:06.907224Z node 2 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-25T14:31:06.919943Z node 2 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-25T14:31:06.920344Z node 2 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-25T14:31:06.945313Z node 2 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-25T14:31:06.945932Z node 2 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[2:136:2160], cookie=14149843329274168946, session=0, seqNo=0) 2025-06-25T14:31:06.946034Z node 2 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 1 2025-06-25T14:31:06.958041Z node 2 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[2:136:2160], cookie=14149843329274168946, session=1) 2025-06-25T14:31:06.958332Z node 2 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[2:137:2161], cookie=7968561265212078825, session=0, seqNo=0) 2025-06-25T14:31:06.958447Z node 2 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 2 2025-06-25T14:31:06.970507Z node 2 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[2:137:2161], cookie=7968561265212078825, session=2) 2025-06-25T14:31:06.971458Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[2:136:2160], cookie=111, session=1, semaphore="Lock1" count=18446744073709551615) 2025-06-25T14:31:06.971593Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:126: [72057594037927937] Created new ephemeral semaphore 1 "Lock1" 2025-06-25T14:31:06.971669Z node 2 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 1 "Lock1" queue: next order #1 session 1 2025-06-25T14:31:06.983497Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[2:136:2160], cookie=111) 2025-06-25T14:31:06.983800Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[2:136:2160], cookie=112, session=1, semaphore="Lock2" count=1) 2025-06-25T14:31:06.983951Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:126: [72057594037927937] Created new ephemeral semaphore 2 "Lock2" 2025-06-25T14:31:06.984026Z node 2 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 2 "Lock2" queue: next order #2 session 1 2025-06-25T14:31:06.995888Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[2:136:2160], cookie=112) 2025-06-25T14:31:06.996214Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[2:136:2160], cookie=333, session=1, semaphore="Lock1" count=1) 2025-06-25T14:31:06.996447Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[2:137:2161], cookie=222, session=2, semaphore="Lock1" count=1) 2025-06-25T14:31:06.996522Z node 2 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 1 "Lock1" queue: next order #3 session 2 2025-06-25T14:31:06.996630Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[2:137:2161], cookie=223, session=2, semaphore="Lock2" count=18446744073709551615) 2025-06-25T14:31:07.008644Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[2:136:2160], cookie=333) 2025-06-25T14:31:07.008722Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[2:137:2161], cookie=222) 2025-06-25T14:31:07.008751Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[2:137:2161], cookie=223) 2025-06-25T14:31:07.009266Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[2:163:2185], cookie=2384847445928403629, name="Lock1") 2025-06-25T14:31:07.009355Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[2:163:2185], cookie=2384847445928403629) 2025-06-25T14:31:07.009819Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[2:166:2188], cookie=15477125134915738247, name="Lock2") 2025-06-25T14:31:07.009885Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[2:166:2188], cookie=15477125134915738247) 2025-06-25T14:31:07.010264Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[2:169:2191], cookie=1473827863540335691, name="Lock1") 2025-06-25T14:31:07.010323Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[2:169:2191], cookie=1473827863540335691) 2025-06-25T14:31:07.010735Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[2:172:2194], cookie=4509695399612621372, name="Lock2") 2025-06-25T14:31:07.010801Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[2:172:2194], cookie=4509695399612621372) 2025-06-25T14:31:07.011035Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[2:137:2161], cookie=444, session=2, semaphore="Lock2" count=1) 2025-06-25T14:31:07.011190Z node 2 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 2 "Lock2" queue: next order #4 session 2 2025-06-25T14:31:07.023163Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[2:137:2161], cookie=444) 2025-06-25T14:31:07.023794Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[2:177:2199], cookie=15056653636740428785, name="Lock2") 2025-06-25T14:31:07.023885Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[2:177:2199], cookie=15056653636740428785) 2025-06-25T14:31:07.024388Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[2:180:2202], cookie=6514871117769282130, name="Lock2") 2025-06-25T14:31:07.024459Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[2:180:2202], cookie=6514871117769282130) 2025-06-25T14:31:07.036171Z node 2 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-25T14:31:07.036237Z node 2 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-25T14:31:07.036578Z node 2 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-25T14:31:07.036788Z node 2 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-25T14:31:07.072142Z node 2 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-25T14:31:07.072254Z node 2 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 1 "Lock1" queue: next order #1 session 1 2025-06-25T14:31:07.072294Z node 2 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 1 "Lock1" queue: next order #3 session 2 2025-06-25T14:31:07.072344Z node 2 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 2 "Lock2" queue: next order #2 session 1 2025-06-25T14:31:07.072382Z node 2 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 2 "Lock2" queue: next order #4 session 2 2025-06-25T14:31:07.072677Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[2:220:2232], cookie=6903495605798980821, name="Lock1") 2025-06-25T14:31:07.072746Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[2:220:2232], cookie=6903495605798980821) 2025-06-25T14:31:07.073284Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[2:228:2239], cookie=14651129711291852247, name="Lock2") 2025-06-25T14:31:07.073340Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[2:228:2239], cookie=14651129711291852247) >> TKesusTest::TestAllocatesResources [GOOD] >> TopicAutoscaling::ReadingAfterSplitTest_BeforeAutoscaleAwareSDK [GOOD] >> TopicAutoscaling::ReadingAfterSplitTest_AutoscaleAwareSDK >> TSchemeShardSubDomainTest::DeclareAndDelete ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kesus/tablet/ut/unittest >> TKesusTest::TestAllocatesResources [GOOD] Test command err: 2025-06-25T14:31:05.052246Z node 1 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-25T14:31:05.052432Z node 1 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-25T14:31:05.071823Z node 1 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-25T14:31:05.071962Z node 1 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-25T14:31:05.096890Z node 1 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-25T14:31:05.097448Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[1:136:2160], cookie=12377922549373080057, session=0, seqNo=0) 2025-06-25T14:31:05.097640Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 1 2025-06-25T14:31:05.109746Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[1:136:2160], cookie=12377922549373080057, session=1) 2025-06-25T14:31:05.110115Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[1:136:2160], cookie=3361929665440894583, session=0, seqNo=0) 2025-06-25T14:31:05.110223Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 2 2025-06-25T14:31:05.122414Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[1:136:2160], cookie=3361929665440894583, session=2) 2025-06-25T14:31:05.122770Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[1:136:2160], cookie=111, session=1, semaphore="Lock1" count=1) 2025-06-25T14:31:05.122926Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:126: [72057594037927937] Created new ephemeral semaphore 1 "Lock1" 2025-06-25T14:31:05.123026Z node 1 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 1 "Lock1" queue: next order #1 session 1 2025-06-25T14:31:05.135218Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[1:136:2160], cookie=111) 2025-06-25T14:31:05.135620Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[1:136:2160], cookie=222, session=2, semaphore="Lock1" count=18446744073709551615) 2025-06-25T14:31:05.135950Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[1:136:2160], cookie=333, session=2, semaphore="Lock1" count=1) 2025-06-25T14:31:05.136049Z node 1 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 1 "Lock1" queue: next order #2 session 2 2025-06-25T14:31:05.148041Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[1:136:2160], cookie=222) 2025-06-25T14:31:05.148134Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[1:136:2160], cookie=333) 2025-06-25T14:31:05.148770Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[1:152:2174], cookie=8089043609845329968, name="Lock1") 2025-06-25T14:31:05.148873Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[1:152:2174], cookie=8089043609845329968) 2025-06-25T14:31:05.626057Z node 2 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-25T14:31:05.626158Z node 2 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-25T14:31:05.657608Z node 2 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-25T14:31:05.668739Z node 2 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-25T14:31:05.712820Z node 2 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-25T14:31:05.713361Z node 2 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[2:136:2160], cookie=14669723363628703111, session=0, seqNo=0) 2025-06-25T14:31:05.713518Z node 2 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 1 2025-06-25T14:31:05.732917Z node 2 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[2:136:2160], cookie=14669723363628703111, session=1) 2025-06-25T14:31:05.733205Z node 2 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[2:136:2160], cookie=9366834361555134886, session=0, seqNo=0) 2025-06-25T14:31:05.733320Z node 2 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 2 2025-06-25T14:31:05.750861Z node 2 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[2:136:2160], cookie=9366834361555134886, session=2) 2025-06-25T14:31:05.751210Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[2:136:2160], cookie=111, session=1, semaphore="Lock1" count=18446744073709551615) 2025-06-25T14:31:05.751360Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:126: [72057594037927937] Created new ephemeral semaphore 1 "Lock1" 2025-06-25T14:31:05.751441Z node 2 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 1 "Lock1" queue: next order #1 session 1 2025-06-25T14:31:05.765690Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[2:136:2160], cookie=111) 2025-06-25T14:31:05.766056Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[2:136:2160], cookie=222, session=2, semaphore="Lock1" count=1) 2025-06-25T14:31:05.766376Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[2:136:2160], cookie=333, session=2, semaphore="Lock1" count=18446744073709551615) 2025-06-25T14:31:05.783460Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[2:136:2160], cookie=222) 2025-06-25T14:31:05.783545Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[2:136:2160], cookie=333) 2025-06-25T14:31:05.784069Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[2:152:2174], cookie=11872727377065265596, name="Lock1") 2025-06-25T14:31:05.784156Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[2:152:2174], cookie=11872727377065265596) 2025-06-25T14:31:05.784590Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[2:155:2177], cookie=7776846238230162063, name="Lock1") 2025-06-25T14:31:05.784669Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[2:155:2177], cookie=7776846238230162063) 2025-06-25T14:31:06.467281Z node 3 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-25T14:31:06.467401Z node 3 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-25T14:31:06.508587Z node 3 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-25T14:31:06.509169Z node 3 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-25T14:31:06.538115Z node 3 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-25T14:31:06.538562Z node 3 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[3:136:2160], cookie=4582387044195994710, session=0, seqNo=0) 2025-06-25T14:31:06.538705Z node 3 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 1 2025-06-25T14:31:06.554088Z node 3 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[3:136:2160], cookie=4582387044195994710, session=1) 2025-06-25T14:31:06.554407Z node 3 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[3:136:2160], cookie=17482178427688718776, session=0, seqNo=0) 2025-06-25T14:31:06.554529Z node 3 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 2 2025-06-25T14:31:06.572483Z node 3 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[3:136:2160], cookie=17482178427688718776, session=2) 2025-06-25T14:31:06.573142Z node 3 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[3:136:2160], cookie=111, session=1, semaphore="Lock1" count=18446744073709551615) 2025-06-25T14:31:06.573309Z node 3 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:126: [72057594037927937] Created new ephemeral semaphore 1 "Lock1" 2025-06-25T14:31:06.573399Z node 3 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 1 "Lock1" queue: next order #1 session 1 2025-06-25T14:31:06.592929Z node 3 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[3:136:2160], cookie=111) 2025-06-25T14:31:06.593251Z node 3 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[3:136:2160], cookie=222, session=2, semaphore="Lock1" count=1) 2025-06-25T14:31:06.593592Z node 3 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[3:136:2160], cookie=333, session=2, semaphore="Lock1" count=1) 2025-06-25T14:31:06.593665Z node 3 :KESUS_TABLET DEBUG: tablet_db.cpp:124: [72057594037927937] Deleting session 2 / semaphore 1 "Lock1" waiter link 2025-06-25T14:31:06.616999Z node 3 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[3:136:2160], cookie=222) 2025-06-25T14:31:06.617079Z node 3 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[3:136:2160], cookie=333) 2025-06-25T14:31:06.617653Z node 3 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[3:155:2177], cookie=5197693357593864353, name="Lock1") 2025-06-25T14:31:06.617737Z node 3 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[3:155:2177], cookie=5197693357593864353) 2025-06-25T14:31:06.618132Z node 3 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[3:158:2180], cookie=21022370304524413, name="Lock1") 2025-06-25T14:31:06.618196Z node 3 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[3:158:2180], cookie=21022370304524413) 2025-06-25T14:31:06.651990Z node 3 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-25T14:31:06.652106Z node 3 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-25T14:31:06.652539Z node 3 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-25T14:31:06.653054Z node 3 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-25T14:31:06.704463Z node 3 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-25T14:31:06.704630Z node 3 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 1 "Lock1" queue: next order #1 session 1 2025-06-25T14:31:06.705012Z node 3 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[3:198:2210], cookie=17380254722503133232, name="Lock1") 2025-06-25T14:31:06.705092Z node 3 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[3:198:2210], cookie=17380254722503133232) 2025-06-25T14:31:06.705557Z node 3 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[3:206:2217], cookie=16067399049219778577, name="Lock1") 2025-06-25T14:31:06.705627Z node 3 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[3:206:2217], cookie=16067399049219778577) 2025-06-25T14:31:07.070907Z node 4 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-25T14:31:07.070997Z node 4 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-25T14:31:07.083283Z node 4 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-25T14:31:07.083388Z node 4 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-25T14:31:07.097627Z node 4 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-25T14:31:07.098082Z node 4 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[4:134:2158], cookie=8695921075542399472, session=0, seqNo=0) 2025-06-25T14:31:07.098232Z node 4 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 1 2025-06-25T14:31:07.121178Z node 4 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[4:134:2158], cookie=8695921075542399472, session=1) 2025-06-25T14:31:07.121476Z node 4 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[4:134:2158], cookie=5060523762262526957, session=0, seqNo=0) 2025-06-25T14:31:07.121611Z node 4 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 2 2025-06-25T14:31:07.133234Z node 4 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[4:134:2158], cookie=5060523762262526957, session=2) 2025-06-25T14:31:07.133484Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[4:134:2158], cookie=111, session=1, semaphore="Lock1" count=18446744073709551615) 2025-06-25T14:31:07.133607Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:126: [72057594037927937] Created new ephemeral semaphore 1 "Lock1" 2025-06-25T14:31:07.133667Z node 4 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 1 "Lock1" queue: next order #1 session 1 2025-06-25T14:31:07.145439Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[4:134:2158], cookie=111) 2025-06-25T14:31:07.145712Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[4:134:2158], cookie=222, session=2, semaphore="Lock1" count=1) 2025-06-25T14:31:07.145969Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_release.cpp:37: [72057594037927937] TTxSemaphoreRelease::Execute (sender=[4:134:2158], cookie=333, name="Lock1") 2025-06-25T14:31:07.146026Z node 4 :KESUS_TABLET DEBUG: tablet_db.cpp:124: [72057594037927937] Deleting session 2 / semaphore 1 "Lock1" waiter link 2025-06-25T14:31:07.158017Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[4:134:2158], cookie=222) 2025-06-25T14:31:07.158097Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_release.cpp:93: [72057594037927937] TTxSemaphoreRelease::Complete (sender=[4:134:2158], cookie=333) 2025-06-25T14:31:07.520064Z node 5 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-25T14:31:07.520208Z node 5 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-25T14:31:07.541997Z node 5 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-25T14:31:07.542112Z node 5 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-25T14:31:07.565546Z node 5 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-25T14:31:07.570978Z node 5 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:36: [72057594037927937] TTxQuoterResourceAdd::Execute (sender=[5:136:2160], cookie=6664783482498128907, path="/Root", config={ MaxUnitsPerSecond: 100 }) 2025-06-25T14:31:07.571172Z node 5 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:76: [72057594037927937] Created new quoter resource 1 "Root" 2025-06-25T14:31:07.584167Z node 5 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:85: [72057594037927937] TTxQuoterResourceAdd::Complete (sender=[5:136:2160], cookie=6664783482498128907) 2025-06-25T14:31:07.584698Z node 5 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:36: [72057594037927937] TTxQuoterResourceAdd::Execute (sender=[5:145:2167], cookie=17758147139343463395, path="/Root/Res", config={ }) 2025-06-25T14:31:07.584927Z node 5 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:76: [72057594037927937] Created new quoter resource 2 "Root/Res" 2025-06-25T14:31:07.598521Z node 5 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:85: [72057594037927937] TTxQuoterResourceAdd::Complete (sender=[5:145:2167], cookie=17758147139343463395) 2025-06-25T14:31:07.600033Z node 5 :KESUS_TABLET TRACE: quoter_runtime.cpp:145: [72057594037927937] Send TEvSubscribeOnResourcesResult to [5:150:2172]. Cookie: 4527514404583964591. Data: { Results { ResourceId: 2 Error { Status: SUCCESS } EffectiveProps { ResourceId: 2 ResourcePath: "Root/Res" HierarchicalDRRResourceConfig { MaxUnitsPerSecond: 100 MaxBurstSizeCoefficient: 1 Weight: 1 } AccountingConfig { ReportPeriodMs: 5000 AccountPeriodMs: 1000 CollectPeriodSec: 30 ProvisionedCoefficient: 60 OvershootCoefficient: 1.1 Provisioned { BillingPeriodSec: 60 } OnDemand { BillingPeriodSec: 60 } Overshoot { BillingPeriodSec: 60 } } } } ProtocolVersion: 1 } 2025-06-25T14:31:07.600100Z node 5 :KESUS_TABLET DEBUG: quoter_runtime.cpp:150: [72057594037927937] Subscribe on quoter resources (sender=[5:150:2172], cookie=4527514404583964591) 2025-06-25T14:31:07.600515Z node 5 :KESUS_TABLET TRACE: quoter_runtime.cpp:193: [72057594037927937] Send TEvUpdateConsumptionStateAck to [5:150:2172]. Cookie: 8494556148073435096. Data: { } 2025-06-25T14:31:07.600548Z node 5 :KESUS_TABLET DEBUG: quoter_runtime.cpp:198: [72057594037927937] Update quoter resources consumption state (sender=[5:150:2172], cookie=8494556148073435096) 2025-06-25T14:31:07.642326Z node 5 :KESUS_TABLET TRACE: quoter_runtime.cpp:93: [72057594037927937] Send TEvResourcesAllocated to [5:150:2172]. Cookie: 0. Data: { ResourcesInfo { ResourceId: 2 Amount: 10 StateNotification { Status: SUCCESS } } } 2025-06-25T14:31:07.694818Z node 5 :KESUS_TABLET TRACE: quoter_runtime.cpp:93: [72057594037927937] Send TEvResourcesAllocated to [5:150:2172]. Cookie: 0. Data: { ResourcesInfo { ResourceId: 2 Amount: 10 StateNotification { Status: SUCCESS } } } 2025-06-25T14:31:07.725775Z node 5 :KESUS_TABLET TRACE: quoter_runtime.cpp:93: [72057594037927937] Send TEvResourcesAllocated to [5:150:2172]. Cookie: 0. Data: { ResourcesInfo { ResourceId: 2 Amount: 10 StateNotification { Status: SUCCESS } } } 2025-06-25T14:31:07.770793Z node 5 :KESUS_TABLET TRACE: quoter_runtime.cpp:93: [72057594037927937] Send TEvResourcesAllocated to [5:150:2172]. Cookie: 0. Data: { ResourcesInfo { ResourceId: 2 Amount: 10 StateNotification { Status: SUCCESS } } } 2025-06-25T14:31:07.812011Z node 5 :KESUS_TABLET TRACE: quoter_runtime.cpp:93: [72057594037927937] Send TEvResourcesAllocated to [5:150:2172]. Cookie: 0. Data: { ResourcesInfo { ResourceId: 2 Amount: 10 StateNotification { Status: SUCCESS } } } >> TKesusTest::TestQuoterResourceDescribe [GOOD] >> TKesusTest::TestQuoterResourceCreation >> DataShardVolatile::DistributedWriteThenReadIterator [GOOD] >> DataShardVolatile::DistributedWriteThenReadIteratorStream >> TKesusTest::TestQuoterResourceCreation [GOOD] >> TKesusTest::TestQuoterResourceModification >> TSchemeShardSubDomainTest::DeclareAndDelete [GOOD] >> Cdc::InitialScanAndLimits [GOOD] >> Cdc::InitialScanComplete |77.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/olap/ttl_tiering/py3test >> data_migration_when_alter_ttl.py::TestDataMigrationWhenAlterTtl::test [GOOD] >> TSchemeShardSubDomainTest::CreateWithNoEqualName >> DataShardVolatile::DistributedWriteShardRestartBeforePlan+UseSink [GOOD] >> DataShardVolatile::DistributedWriteShardRestartBeforePlan-UseSink >> TopicAutoscaling::ControlPlane_AutoscalingWithStorageSizeRetention [GOOD] >> TopicAutoscaling::CDC_PartitionSplit_AutosplitByLoad >> Cdc::AwsRegion [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::DeclareAndDelete [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:31:08.827871Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:31:08.827961Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:31:08.828002Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:31:08.828045Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:31:08.828083Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:31:08.828110Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:31:08.828161Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:31:08.828229Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:31:08.828974Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:31:08.829300Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:31:08.905998Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:31:08.906062Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:31:08.923536Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:31:08.923949Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:31:08.924123Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:31:08.930089Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:31:08.930429Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:31:08.931050Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:31:08.931309Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:31:08.935220Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:31:08.935436Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:31:08.936770Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:31:08.936839Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:31:08.936990Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:31:08.937044Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:31:08.937085Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:31:08.937179Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:31:08.945055Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:31:09.090207Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:31:09.090487Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:31:09.090709Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:31:09.090759Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:31:09.091055Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:31:09.091137Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:31:09.095835Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:31:09.096045Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:31:09.096325Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:31:09.096380Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:31:09.096442Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:31:09.096479Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:31:09.099750Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:31:09.099800Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:31:09.099841Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:31:09.101901Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:31:09.101948Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:31:09.101997Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:31:09.102033Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:31:09.104707Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:31:09.107151Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:31:09.107335Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:31:09.108289Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:31:09.108461Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:31:09.108522Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:31:09.108844Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:31:09.108905Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:31:09.109079Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:31:09.109152Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:31:09.111992Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:31:09.112037Z node 1 :FLAT_TX_SCHEMESHARD ... LAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5302: ExamineTreeVFS visit path id [OwnerId: 72057594046678944, LocalPathId: 2] name: USER_0 type: EPathTypeSubDomain state: EPathStateDrop stepDropped: 0 droppedTxId: 101 parent: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:31:09.172980Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5318: ExamineTreeVFS run path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-25T14:31:09.173096Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 101:0 128 -> 130 2025-06-25T14:31:09.173210Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:31:09.173259Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-25T14:31:09.174492Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-25T14:31:09.174932Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-25T14:31:09.176690Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:31:09.176730Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 101, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:31:09.176918Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 101, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-25T14:31:09.177041Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:31:09.177074Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 101, path id: 1 2025-06-25T14:31:09.177109Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 101, path id: 2 FAKE_COORDINATOR: Erasing txId 101 2025-06-25T14:31:09.177486Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:31:09.177527Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:418: [72057594046678944] TDeleteParts opId# 101:0 ProgressState 2025-06-25T14:31:09.177591Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#101:0 progress is 1/1 2025-06-25T14:31:09.177622Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-25T14:31:09.177689Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#101:0 progress is 1/1 2025-06-25T14:31:09.177722Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-25T14:31:09.177762Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 101, ready parts: 1/1, is published: false 2025-06-25T14:31:09.177809Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-25T14:31:09.177839Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 101:0 2025-06-25T14:31:09.177866Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 101:0 2025-06-25T14:31:09.177928Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-25T14:31:09.177969Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 101, publications: 2, subscribers: 0 2025-06-25T14:31:09.178007Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 1], 7 2025-06-25T14:31:09.178038Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 2], 18446744073709551615 2025-06-25T14:31:09.178509Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 101 2025-06-25T14:31:09.178562Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 101 2025-06-25T14:31:09.178586Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 101 2025-06-25T14:31:09.178609Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 7 2025-06-25T14:31:09.178635Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-25T14:31:09.179109Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 101 2025-06-25T14:31:09.179184Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 101 2025-06-25T14:31:09.179206Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 101 2025-06-25T14:31:09.179239Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 18446744073709551615 2025-06-25T14:31:09.179265Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-25T14:31:09.179326Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 101, subscribers: 0 2025-06-25T14:31:09.179617Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-25T14:31:09.179664Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-25T14:31:09.179726Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-25T14:31:09.181256Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-25T14:31:09.181294Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-25T14:31:09.181360Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:31:09.183282Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-25T14:31:09.184233Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-25T14:31:09.184299Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-25T14:31:09.184367Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 101 2025-06-25T14:31:09.184612Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2025-06-25T14:31:09.184648Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 2025-06-25T14:31:09.184984Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-06-25T14:31:09.185056Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-25T14:31:09.185082Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:347:2336] TestWaitNotification: OK eventTxId 101 2025-06-25T14:31:09.185534Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:31:09.185731Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 230us result status StatusPathDoesNotExist 2025-06-25T14:31:09.185912Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/USER_0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 >> TSchemeShardSubDomainTest::DeleteAndRestart >> TopicAutoscaling::PartitionMerge_PreferedPartition_BeforeAutoscaleAwareSDK [GOOD] >> TopicAutoscaling::PartitionMerge_PreferedPartition_AutoscaleAwareSDK >> TKesusTest::TestQuoterResourceModification [GOOD] >> TKesusTest::TestQuoterResourceDeletion >> TKesusTest::TestQuoterResourceDeletion [GOOD] >> TKesusTest::TestQuoterSubscribeOnResource >> KqpScanArrowInChanels::AggregateWithFunction [GOOD] >> KqpScanArrowInChanels::AggregateEmptySum >> TKesusTest::TestQuoterSubscribeOnResource [GOOD] >> TMeteringSink::FlushThroughputV1 [GOOD] >> TMeteringSink::UsedStorageV1 [GOOD] >> TMicrosecondsSlidingWindow::Basic [GOOD] >> TMultiBucketCounter::InsertAndUpdate [GOOD] >> TMultiBucketCounter::ManyCounters [GOOD] >> TPQRBDescribes::PartitionLocations >> TSchemeShardSubDomainTest::CreateWithNoEqualName [GOOD] >> TSchemeShardSubDomainTest::DeleteAndRestart [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kesus/tablet/ut/unittest >> TKesusTest::TestQuoterSubscribeOnResource [GOOD] Test command err: 2025-06-25T14:31:08.156064Z node 1 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-25T14:31:08.156179Z node 1 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-25T14:31:08.172619Z node 1 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-25T14:31:08.172749Z node 1 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-25T14:31:08.198322Z node 1 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-25T14:31:08.203992Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:36: [72057594037927937] TTxQuoterResourceAdd::Execute (sender=[1:136:2160], cookie=15847043438975831999, path="/Root", config={ MaxUnitsPerSecond: 100500 MaxBurstSizeCoefficient: 1.5 }) 2025-06-25T14:31:08.204276Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:76: [72057594037927937] Created new quoter resource 1 "Root" 2025-06-25T14:31:08.216587Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:85: [72057594037927937] TTxQuoterResourceAdd::Complete (sender=[1:136:2160], cookie=15847043438975831999) 2025-06-25T14:31:08.217250Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:36: [72057594037927937] TTxQuoterResourceAdd::Execute (sender=[1:145:2167], cookie=9592043196491755360, path="/Root/Folder", config={ MaxUnitsPerSecond: 100500 MaxBurstSizeCoefficient: 1.5 }) 2025-06-25T14:31:08.217490Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:76: [72057594037927937] Created new quoter resource 2 "Root/Folder" 2025-06-25T14:31:08.229722Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:85: [72057594037927937] TTxQuoterResourceAdd::Complete (sender=[1:145:2167], cookie=9592043196491755360) 2025-06-25T14:31:08.230332Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:36: [72057594037927937] TTxQuoterResourceAdd::Execute (sender=[1:150:2172], cookie=8000567817862511170, path="/Root/Q1", config={ MaxUnitsPerSecond: 10 }) 2025-06-25T14:31:08.230550Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:76: [72057594037927937] Created new quoter resource 3 "Root/Q1" 2025-06-25T14:31:08.253734Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:85: [72057594037927937] TTxQuoterResourceAdd::Complete (sender=[1:150:2172], cookie=8000567817862511170) 2025-06-25T14:31:08.254346Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:36: [72057594037927937] TTxQuoterResourceAdd::Execute (sender=[1:155:2177], cookie=15098999160995952669, path="/Root/Folder/Q1", config={ MaxUnitsPerSecond: 10 }) 2025-06-25T14:31:08.254576Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:76: [72057594037927937] Created new quoter resource 4 "Root/Folder/Q1" 2025-06-25T14:31:08.269135Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:85: [72057594037927937] TTxQuoterResourceAdd::Complete (sender=[1:155:2177], cookie=15098999160995952669) 2025-06-25T14:31:08.269816Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:36: [72057594037927937] TTxQuoterResourceAdd::Execute (sender=[1:160:2182], cookie=8252106931749209708, path="/Root/Folder/Q2", config={ MaxUnitsPerSecond: 10 }) 2025-06-25T14:31:08.270053Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:76: [72057594037927937] Created new quoter resource 5 "Root/Folder/Q2" 2025-06-25T14:31:08.285038Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:85: [72057594037927937] TTxQuoterResourceAdd::Complete (sender=[1:160:2182], cookie=8252106931749209708) 2025-06-25T14:31:08.285666Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:36: [72057594037927937] TTxQuoterResourceAdd::Execute (sender=[1:165:2187], cookie=16694212435895932523, path="/Root/Folder/Q3", config={ MaxUnitsPerSecond: 10 }) 2025-06-25T14:31:08.285852Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:76: [72057594037927937] Created new quoter resource 6 "Root/Folder/Q3" 2025-06-25T14:31:08.298163Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:85: [72057594037927937] TTxQuoterResourceAdd::Complete (sender=[1:165:2187], cookie=16694212435895932523) 2025-06-25T14:31:08.298797Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:36: [72057594037927937] TTxQuoterResourceAdd::Execute (sender=[1:170:2192], cookie=460522978244499312, path="/Root2", config={ MaxUnitsPerSecond: 100500 MaxBurstSizeCoefficient: 1.5 }) 2025-06-25T14:31:08.299028Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:76: [72057594037927937] Created new quoter resource 7 "Root2" 2025-06-25T14:31:08.311376Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:85: [72057594037927937] TTxQuoterResourceAdd::Complete (sender=[1:170:2192], cookie=460522978244499312) 2025-06-25T14:31:08.312064Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:36: [72057594037927937] TTxQuoterResourceAdd::Execute (sender=[1:175:2197], cookie=17507911717216368796, path="/Root2/Q", config={ MaxUnitsPerSecond: 10 }) 2025-06-25T14:31:08.312330Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:76: [72057594037927937] Created new quoter resource 8 "Root2/Q" 2025-06-25T14:31:08.331423Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:85: [72057594037927937] TTxQuoterResourceAdd::Complete (sender=[1:175:2197], cookie=17507911717216368796) 2025-06-25T14:31:08.332049Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_describe.cpp:78: [72057594037927937] TTxQuoterResourceDescribe::Execute (sender=[1:180:2202], cookie=2663968907054604696, ids=[100], paths=[], recursive=0) 2025-06-25T14:31:08.332141Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_describe.cpp:115: [72057594037927937] TTxQuoterResourceDescribe::Complete (sender=[1:180:2202], cookie=2663968907054604696) 2025-06-25T14:31:08.332690Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_describe.cpp:78: [72057594037927937] TTxQuoterResourceDescribe::Execute (sender=[1:183:2205], cookie=12389207319199555201, ids=[], paths=[Nonexistent/Path], recursive=0) 2025-06-25T14:31:08.332774Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_describe.cpp:115: [72057594037927937] TTxQuoterResourceDescribe::Complete (sender=[1:183:2205], cookie=12389207319199555201) 2025-06-25T14:31:08.333257Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_describe.cpp:78: [72057594037927937] TTxQuoterResourceDescribe::Execute (sender=[1:186:2208], cookie=15908827943675012302, ids=[], paths=[/Root, ], recursive=0) 2025-06-25T14:31:08.333352Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_describe.cpp:115: [72057594037927937] TTxQuoterResourceDescribe::Complete (sender=[1:186:2208], cookie=15908827943675012302) 2025-06-25T14:31:08.333848Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_describe.cpp:78: [72057594037927937] TTxQuoterResourceDescribe::Execute (sender=[1:189:2211], cookie=8499878681716463653, ids=[1, 1], paths=[], recursive=0) 2025-06-25T14:31:08.333912Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_describe.cpp:115: [72057594037927937] TTxQuoterResourceDescribe::Complete (sender=[1:189:2211], cookie=8499878681716463653) 2025-06-25T14:31:08.334466Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_describe.cpp:78: [72057594037927937] TTxQuoterResourceDescribe::Execute (sender=[1:192:2214], cookie=5357328226828674790, ids=[], paths=[/Root2/Q, /Root2/Q], recursive=0) 2025-06-25T14:31:08.334553Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_describe.cpp:115: [72057594037927937] TTxQuoterResourceDescribe::Complete (sender=[1:192:2214], cookie=5357328226828674790) 2025-06-25T14:31:08.335026Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_describe.cpp:78: [72057594037927937] TTxQuoterResourceDescribe::Execute (sender=[1:195:2217], cookie=18042388864119563108, ids=[], paths=[], recursive=1) 2025-06-25T14:31:08.335110Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_describe.cpp:115: [72057594037927937] TTxQuoterResourceDescribe::Complete (sender=[1:195:2217], cookie=18042388864119563108) 2025-06-25T14:31:08.335688Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_describe.cpp:78: [72057594037927937] TTxQuoterResourceDescribe::Execute (sender=[1:198:2220], cookie=9240587216115041527, ids=[], paths=[], recursive=0) 2025-06-25T14:31:08.335749Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_describe.cpp:115: [72057594037927937] TTxQuoterResourceDescribe::Complete (sender=[1:198:2220], cookie=9240587216115041527) 2025-06-25T14:31:08.336231Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_describe.cpp:78: [72057594037927937] TTxQuoterResourceDescribe::Execute (sender=[1:201:2223], cookie=11426423452305974780, ids=[3, 2], paths=[], recursive=1) 2025-06-25T14:31:08.336553Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_describe.cpp:115: [72057594037927937] TTxQuoterResourceDescribe::Complete (sender=[1:201:2223], cookie=11426423452305974780) 2025-06-25T14:31:08.337270Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_describe.cpp:78: [72057594037927937] TTxQuoterResourceDescribe::Execute (sender=[1:204:2226], cookie=10869044270677233042, ids=[3, 2], paths=[], recursive=0) 2025-06-25T14:31:08.337326Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_describe.cpp:115: [72057594037927937] TTxQuoterResourceDescribe::Complete (sender=[1:204:2226], cookie=10869044270677233042) 2025-06-25T14:31:08.337868Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_describe.cpp:78: [72057594037927937] TTxQuoterResourceDescribe::Execute (sender=[1:207:2229], cookie=1080803780405532967, ids=[], paths=[Root2/], recursive=1) 2025-06-25T14:31:08.337940Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_describe.cpp:115: [72057594037927937] TTxQuoterResourceDescribe::Complete (sender=[1:207:2229], cookie=1080803780405532967) 2025-06-25T14:31:08.338435Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_describe.cpp:78: [72057594037927937] TTxQuoterResourceDescribe::Execute (sender=[1:210:2232], cookie=10933905021335870335, ids=[], paths=[Root2/], recursive=0) 2025-06-25T14:31:08.338523Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_describe.cpp:115: [72057594037927937] TTxQuoterResourceDescribe::Complete (sender=[1:210:2232], cookie=10933905021335870335) 2025-06-25T14:31:08.356857Z node 1 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-25T14:31:08.356990Z node 1 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-25T14:31:08.357495Z node 1 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-25T14:31:08.358049Z node 1 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-25T14:31:08.405755Z node 1 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-25T14:31:08.406181Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_describe.cpp:78: [72057594037927937] TTxQuoterResourceDescribe::Execute (sender=[1:250:2262], cookie=13916693720007099316, ids=[100], paths=[], recursive=0) 2025-06-25T14:31:08.406275Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_describe.cpp:115: [72057594037927937] TTxQuoterResourceDescribe::Complete (sender=[1:250:2262], cookie=13916693720007099316) 2025-06-25T14:31:08.407112Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_describe.cpp:78: [72057594037927937] TTxQuoterResourceDescribe::Execute (sender=[1:256:2267], cookie=8992110503791340582, ids=[], paths=[Nonexistent/Path], recursive=0) 2025-06-25T14:31:08.407218Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_describe.cpp:115: [72057594037927937] TTxQuoterResourceDescribe::Complete (sender=[1:256:2267], cookie=8992110503791340582) 2025-06-25T14:31:08.407864Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_describe.cpp:78: [72057594037927937] TTxQuoterResourceDescribe::Execute (sender=[1:259:2270], cookie=12785676347536796135, ids=[], paths=[/Root, ], recursive=0) 2025-06-25T14:31:08.407962Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_describe.cpp:115: [72057594037927937] TTxQuoterResourceDescribe::Complete (sender=[1:259:2270], cookie=12785676347536796135) 2025-06-25T14:31:08.408912Z ... ESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:76: [72057594037927937] Created new quoter resource 4 "Root/Folder/Q1" 2025-06-25T14:31:10.666315Z node 4 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:85: [72057594037927937] TTxQuoterResourceAdd::Complete (sender=[4:153:2175], cookie=5064971758232804897) 2025-06-25T14:31:10.666902Z node 4 :KESUS_TABLET DEBUG: tx_quoter_resource_describe.cpp:78: [72057594037927937] TTxQuoterResourceDescribe::Execute (sender=[4:158:2180], cookie=12855952633491601043, ids=[], paths=[], recursive=1) 2025-06-25T14:31:10.666999Z node 4 :KESUS_TABLET DEBUG: tx_quoter_resource_describe.cpp:115: [72057594037927937] TTxQuoterResourceDescribe::Complete (sender=[4:158:2180], cookie=12855952633491601043) 2025-06-25T14:31:10.667839Z node 4 :KESUS_TABLET DEBUG: tx_quoter_resource_describe.cpp:78: [72057594037927937] TTxQuoterResourceDescribe::Execute (sender=[4:164:2186], cookie=2510240635728864614, ids=[], paths=[], recursive=1) 2025-06-25T14:31:10.667918Z node 4 :KESUS_TABLET DEBUG: tx_quoter_resource_describe.cpp:115: [72057594037927937] TTxQuoterResourceDescribe::Complete (sender=[4:164:2186], cookie=2510240635728864614) 2025-06-25T14:31:10.668754Z node 4 :KESUS_TABLET DEBUG: tx_quoter_resource_describe.cpp:78: [72057594037927937] TTxQuoterResourceDescribe::Execute (sender=[4:170:2192], cookie=3294893456756262146, ids=[], paths=[], recursive=1) 2025-06-25T14:31:10.668832Z node 4 :KESUS_TABLET DEBUG: tx_quoter_resource_describe.cpp:115: [72057594037927937] TTxQuoterResourceDescribe::Complete (sender=[4:170:2192], cookie=3294893456756262146) 2025-06-25T14:31:10.669291Z node 4 :KESUS_TABLET DEBUG: tx_quoter_resource_delete.cpp:32: [72057594037927937] TTxQuoterResourceDelete::Execute (sender=[4:173:2195], cookie=1105962472755332587, id=0, path="/Root/Folder/NonexistingRes") 2025-06-25T14:31:10.669377Z node 4 :KESUS_TABLET DEBUG: tx_quoter_resource_delete.cpp:70: [72057594037927937] TTxQuoterResourceDelete::Complete (sender=[4:173:2195], cookie=1105962472755332587) 2025-06-25T14:31:10.669837Z node 4 :KESUS_TABLET DEBUG: tx_quoter_resource_describe.cpp:78: [72057594037927937] TTxQuoterResourceDescribe::Execute (sender=[4:176:2198], cookie=17197411960034509665, ids=[], paths=[], recursive=1) 2025-06-25T14:31:10.669907Z node 4 :KESUS_TABLET DEBUG: tx_quoter_resource_describe.cpp:115: [72057594037927937] TTxQuoterResourceDescribe::Complete (sender=[4:176:2198], cookie=17197411960034509665) 2025-06-25T14:31:10.670396Z node 4 :KESUS_TABLET DEBUG: tx_quoter_resource_delete.cpp:32: [72057594037927937] TTxQuoterResourceDelete::Execute (sender=[4:179:2201], cookie=12453419180650049965, id=100, path="") 2025-06-25T14:31:10.670465Z node 4 :KESUS_TABLET DEBUG: tx_quoter_resource_delete.cpp:70: [72057594037927937] TTxQuoterResourceDelete::Complete (sender=[4:179:2201], cookie=12453419180650049965) 2025-06-25T14:31:10.670945Z node 4 :KESUS_TABLET DEBUG: tx_quoter_resource_describe.cpp:78: [72057594037927937] TTxQuoterResourceDescribe::Execute (sender=[4:182:2204], cookie=1765572312081779480, ids=[], paths=[], recursive=1) 2025-06-25T14:31:10.671010Z node 4 :KESUS_TABLET DEBUG: tx_quoter_resource_describe.cpp:115: [72057594037927937] TTxQuoterResourceDescribe::Complete (sender=[4:182:2204], cookie=1765572312081779480) 2025-06-25T14:31:10.671487Z node 4 :KESUS_TABLET DEBUG: tx_quoter_resource_delete.cpp:32: [72057594037927937] TTxQuoterResourceDelete::Execute (sender=[4:185:2207], cookie=2640707331880208058, id=3, path="") 2025-06-25T14:31:10.671548Z node 4 :KESUS_TABLET DEBUG: tx_quoter_resource_delete.cpp:70: [72057594037927937] TTxQuoterResourceDelete::Complete (sender=[4:185:2207], cookie=2640707331880208058) 2025-06-25T14:31:10.672009Z node 4 :KESUS_TABLET DEBUG: tx_quoter_resource_describe.cpp:78: [72057594037927937] TTxQuoterResourceDescribe::Execute (sender=[4:188:2210], cookie=12031506042475969739, ids=[], paths=[], recursive=1) 2025-06-25T14:31:10.672068Z node 4 :KESUS_TABLET DEBUG: tx_quoter_resource_describe.cpp:115: [72057594037927937] TTxQuoterResourceDescribe::Complete (sender=[4:188:2210], cookie=12031506042475969739) 2025-06-25T14:31:10.678009Z node 4 :KESUS_TABLET DEBUG: tx_quoter_resource_delete.cpp:32: [72057594037927937] TTxQuoterResourceDelete::Execute (sender=[4:191:2213], cookie=18279777624302345323, id=0, path="/Root/Folder/Q1") 2025-06-25T14:31:10.678153Z node 4 :KESUS_TABLET DEBUG: tx_quoter_resource_delete.cpp:61: [72057594037927937] Deleted quoter resource 4 "Root/Folder/Q1" 2025-06-25T14:31:10.690482Z node 4 :KESUS_TABLET DEBUG: tx_quoter_resource_delete.cpp:70: [72057594037927937] TTxQuoterResourceDelete::Complete (sender=[4:191:2213], cookie=18279777624302345323) 2025-06-25T14:31:10.691148Z node 4 :KESUS_TABLET DEBUG: tx_quoter_resource_describe.cpp:78: [72057594037927937] TTxQuoterResourceDescribe::Execute (sender=[4:196:2218], cookie=8475432562935822588, ids=[], paths=[], recursive=1) 2025-06-25T14:31:10.691243Z node 4 :KESUS_TABLET DEBUG: tx_quoter_resource_describe.cpp:115: [72057594037927937] TTxQuoterResourceDescribe::Complete (sender=[4:196:2218], cookie=8475432562935822588) 2025-06-25T14:31:10.709323Z node 4 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-25T14:31:10.709430Z node 4 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-25T14:31:10.709947Z node 4 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-25T14:31:10.710588Z node 4 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-25T14:31:10.751434Z node 4 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-25T14:31:10.753419Z node 4 :KESUS_TABLET DEBUG: tx_quoter_resource_describe.cpp:78: [72057594037927937] TTxQuoterResourceDescribe::Execute (sender=[4:236:2248], cookie=978505676957156372, ids=[], paths=[], recursive=1) 2025-06-25T14:31:10.753934Z node 4 :KESUS_TABLET DEBUG: tx_quoter_resource_describe.cpp:115: [72057594037927937] TTxQuoterResourceDescribe::Complete (sender=[4:236:2248], cookie=978505676957156372) 2025-06-25T14:31:10.754681Z node 4 :KESUS_TABLET DEBUG: tx_quoter_resource_delete.cpp:32: [72057594037927937] TTxQuoterResourceDelete::Execute (sender=[4:242:2253], cookie=4809016880066202231, id=3, path="") 2025-06-25T14:31:10.754844Z node 4 :KESUS_TABLET DEBUG: tx_quoter_resource_delete.cpp:61: [72057594037927937] Deleted quoter resource 3 "Root/Folder" 2025-06-25T14:31:10.777624Z node 4 :KESUS_TABLET DEBUG: tx_quoter_resource_delete.cpp:70: [72057594037927937] TTxQuoterResourceDelete::Complete (sender=[4:242:2253], cookie=4809016880066202231) 2025-06-25T14:31:10.778380Z node 4 :KESUS_TABLET DEBUG: tx_quoter_resource_describe.cpp:78: [72057594037927937] TTxQuoterResourceDescribe::Execute (sender=[4:247:2258], cookie=12556544294987924105, ids=[], paths=[], recursive=1) 2025-06-25T14:31:10.778474Z node 4 :KESUS_TABLET DEBUG: tx_quoter_resource_describe.cpp:115: [72057594037927937] TTxQuoterResourceDescribe::Complete (sender=[4:247:2258], cookie=12556544294987924105) 2025-06-25T14:31:10.791469Z node 4 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-25T14:31:10.791582Z node 4 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-25T14:31:10.792041Z node 4 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-25T14:31:10.792695Z node 4 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-25T14:31:10.829967Z node 4 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-25T14:31:10.830350Z node 4 :KESUS_TABLET DEBUG: tx_quoter_resource_describe.cpp:78: [72057594037927937] TTxQuoterResourceDescribe::Execute (sender=[4:287:2288], cookie=5520119371711011303, ids=[], paths=[], recursive=1) 2025-06-25T14:31:10.830431Z node 4 :KESUS_TABLET DEBUG: tx_quoter_resource_describe.cpp:115: [72057594037927937] TTxQuoterResourceDescribe::Complete (sender=[4:287:2288], cookie=5520119371711011303) 2025-06-25T14:31:11.179215Z node 5 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-25T14:31:11.179337Z node 5 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-25T14:31:11.196848Z node 5 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-25T14:31:11.196987Z node 5 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-25T14:31:11.221265Z node 5 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-25T14:31:11.221699Z node 5 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:36: [72057594037927937] TTxQuoterResourceAdd::Execute (sender=[5:136:2160], cookie=4198346343773263693, path="/Q1", config={ MaxUnitsPerSecond: 10 }) 2025-06-25T14:31:11.221897Z node 5 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:76: [72057594037927937] Created new quoter resource 1 "Q1" 2025-06-25T14:31:11.235747Z node 5 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:85: [72057594037927937] TTxQuoterResourceAdd::Complete (sender=[5:136:2160], cookie=4198346343773263693) 2025-06-25T14:31:11.236408Z node 5 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:36: [72057594037927937] TTxQuoterResourceAdd::Execute (sender=[5:145:2167], cookie=1439032931015977617, path="/Q2", config={ MaxUnitsPerSecond: 10 }) 2025-06-25T14:31:11.236620Z node 5 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:76: [72057594037927937] Created new quoter resource 2 "Q2" 2025-06-25T14:31:11.248726Z node 5 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:85: [72057594037927937] TTxQuoterResourceAdd::Complete (sender=[5:145:2167], cookie=1439032931015977617) 2025-06-25T14:31:11.250495Z node 5 :KESUS_TABLET TRACE: quoter_runtime.cpp:145: [72057594037927937] Send TEvSubscribeOnResourcesResult to [5:150:2172]. Cookie: 2700372655390259023. Data: { Results { ResourceId: 1 Error { Status: SUCCESS } EffectiveProps { ResourceId: 1 ResourcePath: "Q1" HierarchicalDRRResourceConfig { MaxUnitsPerSecond: 10 MaxBurstSizeCoefficient: 1 Weight: 1 } AccountingConfig { ReportPeriodMs: 5000 AccountPeriodMs: 1000 CollectPeriodSec: 30 ProvisionedCoefficient: 60 OvershootCoefficient: 1.1 Provisioned { BillingPeriodSec: 60 } OnDemand { BillingPeriodSec: 60 } Overshoot { BillingPeriodSec: 60 } } } } ProtocolVersion: 1 } 2025-06-25T14:31:11.250586Z node 5 :KESUS_TABLET DEBUG: quoter_runtime.cpp:150: [72057594037927937] Subscribe on quoter resources (sender=[5:150:2172], cookie=2700372655390259023) 2025-06-25T14:31:11.251436Z node 5 :KESUS_TABLET TRACE: quoter_runtime.cpp:145: [72057594037927937] Send TEvSubscribeOnResourcesResult to [5:150:2172]. Cookie: 1447242913105330080. Data: { Results { ResourceId: 1 Error { Status: SUCCESS } EffectiveProps { ResourceId: 1 ResourcePath: "Q1" HierarchicalDRRResourceConfig { MaxUnitsPerSecond: 10 MaxBurstSizeCoefficient: 1 Weight: 1 } AccountingConfig { ReportPeriodMs: 5000 AccountPeriodMs: 1000 CollectPeriodSec: 30 ProvisionedCoefficient: 60 OvershootCoefficient: 1.1 Provisioned { BillingPeriodSec: 60 } OnDemand { BillingPeriodSec: 60 } Overshoot { BillingPeriodSec: 60 } } } } Results { ResourceId: 2 Error { Status: SUCCESS } EffectiveProps { ResourceId: 2 ResourcePath: "Q2" HierarchicalDRRResourceConfig { MaxUnitsPerSecond: 10 MaxBurstSizeCoefficient: 1 Weight: 1 } AccountingConfig { ReportPeriodMs: 5000 AccountPeriodMs: 1000 CollectPeriodSec: 30 ProvisionedCoefficient: 60 OvershootCoefficient: 1.1 Provisioned { BillingPeriodSec: 60 } OnDemand { BillingPeriodSec: 60 } Overshoot { BillingPeriodSec: 60 } } } } Results { Error { Status: NOT_FOUND Issues { message: "Resource \"/Q3\" doesn\'t exist." } } } ProtocolVersion: 1 } 2025-06-25T14:31:11.251492Z node 5 :KESUS_TABLET DEBUG: quoter_runtime.cpp:150: [72057594037927937] Subscribe on quoter resources (sender=[5:150:2172], cookie=1447242913105330080) >> TBackupTests::ShouldSucceedOnLargeData[Zstd] [GOOD] >> TPartitionTests::CorrectRange_Commit >> TBackupTests::ShouldSucceedOnLargeData[Raw] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::CreateWithNoEqualName [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:31:10.109102Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:31:10.109192Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:31:10.109229Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:31:10.109268Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:31:10.109312Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:31:10.109342Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:31:10.109411Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:31:10.109494Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:31:10.110227Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:31:10.110578Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:31:10.206921Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:31:10.206988Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:31:10.227500Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:31:10.227993Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:31:10.228184Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:31:10.234711Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:31:10.235113Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:31:10.235766Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:31:10.236096Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:31:10.239755Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:31:10.239951Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:31:10.241204Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:31:10.241266Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:31:10.241441Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:31:10.241490Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:31:10.241547Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:31:10.241662Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:31:10.248837Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:31:10.379621Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:31:10.379904Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:31:10.380134Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:31:10.380200Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:31:10.380503Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:31:10.380594Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:31:10.384984Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:31:10.385208Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:31:10.385431Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:31:10.385491Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:31:10.385565Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:31:10.385612Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:31:10.387926Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:31:10.387985Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:31:10.388027Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:31:10.390225Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:31:10.390296Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:31:10.390360Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:31:10.390411Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:31:10.394523Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:31:10.396690Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:31:10.396887Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:31:10.397783Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:31:10.397918Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:31:10.397976Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:31:10.398310Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:31:10.398379Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:31:10.398544Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:31:10.398645Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:31:10.400968Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:31:10.401022Z node 1 :FLAT_TX_SCHEMESHARD ... perationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpMkDir MkDir { Name: "USER_3" } } TxId: 108 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:31:11.496062Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_mkdir.cpp:115: TMkDir Propose, path: /MyRoot/USER_3, operationId: 108:0, at schemeshard: 72057594046678944 2025-06-25T14:31:11.496215Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 108:1, propose status:StatusAlreadyExists, reason: Check failed: path: '/MyRoot/USER_3', error: path exist, request accepts it (id: [OwnerId: 72057594046678944, LocalPathId: 5], type: EPathTypeSubDomain, state: EPathStateNoChanges), at schemeshard: 72057594046678944 2025-06-25T14:31:11.498457Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 108, response: Status: StatusAlreadyExists Reason: "Check failed: path: \'/MyRoot/USER_3\', error: path exist, request accepts it (id: [OwnerId: 72057594046678944, LocalPathId: 5], type: EPathTypeSubDomain, state: EPathStateNoChanges)" TxId: 108 SchemeshardId: 72057594046678944 PathId: 5 PathCreateTxId: 106, at schemeshard: 72057594046678944 2025-06-25T14:31:11.498687Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 108, database: /MyRoot, subject: , status: StatusAlreadyExists, reason: Check failed: path: '/MyRoot/USER_3', error: path exist, request accepts it (id: [OwnerId: 72057594046678944, LocalPathId: 5], type: EPathTypeSubDomain, state: EPathStateNoChanges), operation: CREATE DIRECTORY, path: /MyRoot/USER_3 TestModificationResult got TxId: 108, wait until txId: 108 2025-06-25T14:31:11.499270Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:31:11.499499Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 208us result status StatusSuccess 2025-06-25T14:31:11.499912Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0" PathDescription { Self { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:31:11.500594Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:31:11.500798Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_1" took 205us result status StatusSuccess 2025-06-25T14:31:11.501275Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_1" PathDescription { Self { Name: "USER_1" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 102 CreateStep: 5000005 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "USER_1" Columns { Name: "RowId" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "RowId" KeyColumnIds: 1 TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 1 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:31:11.502035Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_2" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:31:11.502193Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_2" took 166us result status StatusSuccess 2025-06-25T14:31:11.502509Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_2" PathDescription { Self { Name: "USER_2" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 104 CreateStep: 5000003 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 2 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 1 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 4 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:31:11.503166Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_3" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:31:11.503364Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_3" took 184us result status StatusSuccess 2025-06-25T14:31:11.503716Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_3" PathDescription { Self { Name: "USER_3" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 106 CreateStep: 5000004 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409549 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409550 } DomainKey { SchemeShard: 72057594046678944 PathId: 5 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 5 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::DeleteAndRestart [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:31:10.590963Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:31:10.591027Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:31:10.591052Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:31:10.591080Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:31:10.591109Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:31:10.591130Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:31:10.591169Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:31:10.591222Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:31:10.591837Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:31:10.592164Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:31:10.672360Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:31:10.672411Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:31:10.696013Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:31:10.696493Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:31:10.696659Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:31:10.704172Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:31:10.704558Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:31:10.705072Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:31:10.705268Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:31:10.709134Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:31:10.709352Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:31:10.710617Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:31:10.710684Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:31:10.710828Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:31:10.710874Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:31:10.710914Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:31:10.711148Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:31:10.718230Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:31:10.889978Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:31:10.890177Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:31:10.890344Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:31:10.890376Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:31:10.890563Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:31:10.890622Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:31:10.892569Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:31:10.892813Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:31:10.893002Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:31:10.893053Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:31:10.893115Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:31:10.893156Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:31:10.894832Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:31:10.894879Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:31:10.894917Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:31:10.896184Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:31:10.896237Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:31:10.896288Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:31:10.896369Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:31:10.905073Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:31:10.907093Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:31:10.907243Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:31:10.907977Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:31:10.908089Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:31:10.908127Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:31:10.908366Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:31:10.908408Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:31:10.908533Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:31:10.908600Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:31:10.910369Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:31:10.910406Z node 1 :FLAT_TX_SCHEMESHARD ... ySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:31:11.394871Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:31:11.394936Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:31:11.394972Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:31:11.395002Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:31:11.395026Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:31:11.395083Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:31:11.395152Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:31:11.395771Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:31:11.396073Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:31:11.411785Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:31:11.413034Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:31:11.413213Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:31:11.413369Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:31:11.413411Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:31:11.413562Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:31:11.414112Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1393: TTxInit for Paths, read records: 1, at schemeshard: 72057594046678944 2025-06-25T14:31:11.414200Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1467: TTxInit for UserAttributes, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:31:11.414250Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1493: TTxInit for UserAttributesAlterData, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:31:11.414598Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1795: TTxInit for Tables, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:31:11.414689Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__root_data_erasure_manager.cpp:452: [RootDataErasureManager] Restore: Generation# 0, Status# 0, WakeupInterval# 604800 s, NumberDataErasureTenantsInRunning# 0 2025-06-25T14:31:11.414913Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2043: TTxInit for Columns, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:31:11.415007Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2103: TTxInit for ColumnsAlters, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:31:11.415131Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2161: TTxInit for Shards, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:31:11.415271Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2247: TTxInit for TablePartitions, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:31:11.415382Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2313: TTxInit for TableShardPartitionConfigs, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:31:11.415569Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2463: TTxInit for ChannelsBinding, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:31:11.415794Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2842: TTxInit for TableIndexes, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:31:11.415877Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2921: TTxInit for TableIndexKeys, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:31:11.416194Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3422: TTxInit for KesusInfos, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:31:11.416263Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3458: TTxInit for KesusAlters, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:31:11.416477Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3684: TTxInit for TxShards, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:31:11.416633Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3829: TTxInit for ShardToDelete, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:31:11.416720Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3846: TTxInit for BackupSettings, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:31:11.416890Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4006: TTxInit for ShardBackupStatus, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:31:11.416975Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4022: TTxInit for CompletedBackup, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:31:11.417150Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4307: TTxInit for Publications, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:31:11.417377Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4646: IndexBuild , records: 0, at schemeshard: 72057594046678944 2025-06-25T14:31:11.417476Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4702: KMeansTreeSample records: 0, at schemeshard: 72057594046678944 2025-06-25T14:31:11.417648Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4791: SnapshotTables: snapshots: 0 tables: 0, at schemeshard: 72057594046678944 2025-06-25T14:31:11.417699Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4818: SnapshotSteps: snapshots: 0, at schemeshard: 72057594046678944 2025-06-25T14:31:11.417743Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4845: LongLocks: records: 0, at schemeshard: 72057594046678944 2025-06-25T14:31:11.423217Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:31:11.424547Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:31:11.424597Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:31:11.426039Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:31:11.426104Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:31:11.426150Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:31:11.426900Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594046678944 is [1:635:2546] sender: [1:694:2058] recipient: [1:15:2062] 2025-06-25T14:31:11.480104Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:31:11.480366Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 283us result status StatusPathDoesNotExist 2025-06-25T14:31:11.480542Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/USER_0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-25T14:31:11.481188Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:31:11.481350Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 168us result status StatusSuccess 2025-06-25T14:31:11.481775Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> DataShardSnapshots::ShardRestartLockBasic [GOOD] >> DataShardSnapshots::ShardRestartAfterDropTable ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_change_exchange/unittest >> Cdc::AwsRegion [GOOD] Test command err: 2025-06-25T14:28:27.449226Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:28:27.449367Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:28:27.449427Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00129a/r3tmp/tmpLz0WRC/pdisk_1.dat 2025-06-25T14:28:27.829092Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T14:28:27.832387Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:28:27.891515Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:28:27.896077Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750861704573808 != 1750861704573812 2025-06-25T14:28:27.945328Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:28:27.945491Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:28:27.957626Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:28:28.061667Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:28.143559Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:627:2531] 2025-06-25T14:28:28.143764Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T14:28:28.188054Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T14:28:28.188186Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T14:28:28.189879Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-25T14:28:28.189965Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-25T14:28:28.190017Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-25T14:28:28.190433Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T14:28:28.190579Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T14:28:28.190665Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:643:2531] in generation 1 2025-06-25T14:28:28.201744Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T14:28:28.246499Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-25T14:28:28.246761Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T14:28:28.247107Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:645:2541] 2025-06-25T14:28:28.247166Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T14:28:28.247250Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-25T14:28:28.247289Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:28:28.247874Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-25T14:28:28.248001Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-25T14:28:28.248098Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:28:28.248139Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:28:28.248181Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-25T14:28:28.248234Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:28:28.248755Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:623:2528], serverId# [1:634:2535], sessionId# [0:0:0] 2025-06-25T14:28:28.248908Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T14:28:28.249183Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-25T14:28:28.249278Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-25T14:28:28.251189Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T14:28:28.262043Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-25T14:28:28.262185Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-25T14:28:28.417419Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:660:2550], serverId# [1:662:2552], sessionId# [0:0:0] 2025-06-25T14:28:28.423705Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1000 at tablet 72075186224037888 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1000 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-06-25T14:28:28.423811Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:28:28.424908Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:28:28.424974Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-06-25T14:28:28.425048Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-06-25T14:28:28.425350Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-06-25T14:28:28.425520Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-25T14:28:28.426083Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:28:28.426154Z node 1 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-06-25T14:28:28.431662Z node 1 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-25T14:28:28.432285Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:28:28.434263Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-06-25T14:28:28.434328Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:28:28.434921Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-06-25T14:28:28.434999Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:28:28.435815Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:28:28.435869Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T14:28:28.435951Z node 1 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-25T14:28:28.436037Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [1:373:2367], exec latency: 0 ms, propose latency: 0 ms 2025-06-25T14:28:28.436117Z node 1 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-25T14:28:28.436194Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:28:28.439040Z node 1 :CHANGE_EXCHANGE DEBUG: change_sender.cpp:153: [ChangeSender][72075186224037888:1][1:645:2541][Inactive] Handle NKikimrChangeExchange.TEvActivateSender 2025-06-25T14:28:28.444484Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T14:28:28.446656Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-25T14:28:28.446735Z node 1 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-25T14:28:28.447624Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-25T14:28:32.054740Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:265:2309], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:28:32.054919Z node 2 :METADATA ... 037891' partition 0 offset 0 partno 0 count 1 parts 0 suffix '63' size 426 >>>>> GetRecords path=/Root/Table/Stream1 partitionId=0 2025-06-25T14:31:08.937616Z node 23 :PERSQUEUE DEBUG: pq_impl.cpp:348: Handle TEvRequest topic: 'streamImpl' requestId: 2025-06-25T14:31:08.937756Z node 23 :PERSQUEUE DEBUG: pq_impl.cpp:2812: [PQ: 72075186224037889] got client message batch for topic 'Table/Stream1/streamImpl' partition 0 2025-06-25T14:31:08.938634Z node 23 :PERSQUEUE DEBUG: partition_read.cpp:839: [PQ: 72075186224037889, Partition: 0, State: StateIdle] read cookie 2 Topic 'Table/Stream1/streamImpl' partition 0 user $without_consumer offset 0 count 10000 size 26214400 endOffset 0 max time lag 0ms effective offset 0 2025-06-25T14:31:08.938736Z node 23 :PERSQUEUE DEBUG: subscriber.cpp:68: waiting read cookie 2 partition 0 user $without_consumer offset 0 count 10000 size 26214400 timeout 0 2025-06-25T14:31:08.938895Z node 23 :PERSQUEUE DEBUG: partition_read.cpp:551: FormAnswer for 0 blobs 2025-06-25T14:31:08.939031Z node 23 :PERSQUEUE DEBUG: partition_read.cpp:678: [PQ: 72075186224037889, Partition: 0, State: StateIdle] waiting read cookie 2 partition 0 read timeout for $without_consumer offset 0 2025-06-25T14:31:08.939174Z node 23 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'streamImpl' partition: 0 messageNo: 0 requestId: cookie: 0 2025-06-25T14:31:08.950628Z node 23 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72075186224037891, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 341 WriteNewSizeFromSupportivePartitions# 0 2025-06-25T14:31:08.950844Z node 23 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72075186224037891, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-25T14:31:08.951053Z node 23 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72075186224037891, Partition: 0, State: StateIdle] Answering for message sourceid: '\00072075186224037888', Topic: 'Table/Stream2/streamImpl', Partition: 0, SeqNo: 2, partNo: 0, Offset: 0 is stored on disk 2025-06-25T14:31:08.951659Z node 23 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72075186224037891, Partition: 0, State: StateIdle] need more data for compaction. cumulativeSize=426, count=1, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-25T14:31:08.951839Z node 23 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72075186224037889, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 342 WriteNewSizeFromSupportivePartitions# 0 2025-06-25T14:31:08.951903Z node 23 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72075186224037889, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-25T14:31:08.951969Z node 23 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72075186224037889, Partition: 0, State: StateIdle] Answering for message sourceid: '\00072075186224037888', Topic: 'Table/Stream1/streamImpl', Partition: 0, SeqNo: 1, partNo: 0, Offset: 0 is stored on disk 2025-06-25T14:31:08.952213Z node 23 :PERSQUEUE DEBUG: partition_read.cpp:882: [PQ: 72075186224037889, Partition: 0, State: StateIdle] Topic 'Table/Stream1/streamImpl' partition 0 user $without_consumer readTimeStamp for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 1 2025-06-25T14:31:08.952381Z node 23 :PERSQUEUE DEBUG: partition_read.cpp:924: [PQ: 72075186224037889, Partition: 0, State: StateIdle] Topic 'Table/Stream1/streamImpl' partition 0 user $without_consumer send read request for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 1 rrg 1 2025-06-25T14:31:08.952507Z node 23 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72075186224037889, Partition: 0, State: StateIdle] need more data for compaction. cumulativeSize=427, count=1, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-25T14:31:08.952641Z node 23 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'streamImpl' partition: 0 messageNo: 1 requestId: cookie: 1 2025-06-25T14:31:08.952977Z node 23 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'streamImpl' partition: 0 messageNo: 1 requestId: cookie: 1 2025-06-25T14:31:08.953138Z node 23 :PERSQUEUE DEBUG: partition_read.cpp:839: [PQ: 72075186224037889, Partition: 0, State: StateIdle] read cookie 3 Topic 'Table/Stream1/streamImpl' partition 0 user $without_consumer offset 0 count 1 size 1024000 endOffset 1 max time lag 0ms effective offset 0 2025-06-25T14:31:08.953807Z node 23 :PERSQUEUE DEBUG: partition_read.cpp:1043: [PQ: 72075186224037889, Partition: 0, State: StateIdle] read cookie 3 added 1 blobs, size 427 count 1 last offset 0, current partition end offset: 1 2025-06-25T14:31:08.953941Z node 23 :PERSQUEUE DEBUG: partition_read.cpp:1069: [PQ: 72075186224037889, Partition: 0, State: StateIdle] Reading cookie 3. Send blob request. 2025-06-25T14:31:08.954387Z node 23 :CHANGE_EXCHANGE DEBUG: change_sender_cdc_stream.cpp:160: [CdcChangeSenderPartition][72075186224037888:1][0][72075186224037889][23:1125:2667] Handle NKikimrClient.TResponse { SessionId: TxId: Success { Response: Status: 1 ErrorCode: OK PartitionResponse { CmdWriteResult { AlreadyWritten: false SourceId: "\00072075186224037888" SeqNo: 1 Offset: 0 WriteTimestampMS: 2523 PartitionQuotedTimeMs: 0 TotalTimeInPartitionQueueMs: 0 WriteTimeMs: 0 TopicQuotedTimeMs: 0 WrittenInTx: false } Cookie: 1 } } } 2025-06-25T14:31:08.954587Z node 23 :PERSQUEUE DEBUG: cache_eviction.h:492: Got data from cache. Partition 0 offset 0 partno 0 count 1 parts_count 0 source 1 size 427 accessed 0 times before, last time 1970-01-01T00:00:02.000000Z 2025-06-25T14:31:08.954699Z node 23 :PERSQUEUE DEBUG: read.h:121: Reading cookie 3. All 1 blobs are from cache. 2025-06-25T14:31:08.954844Z node 23 :PERSQUEUE DEBUG: partition_read.cpp:551: FormAnswer for 1 blobs 2025-06-25T14:31:08.955226Z node 23 :PERSQUEUE DEBUG: partition_read.cpp:476: FormAnswer processing batch offset 0 totakecount 1 count 1 size 407 from pos 0 cbcount 1 2025-06-25T14:31:08.955516Z node 23 :CHANGE_EXCHANGE DEBUG: change_sender_cdc_stream.cpp:643: [CdcChangeSenderMain][72075186224037888:1][23:829:2667] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 0 } 2025-06-25T14:31:08.955746Z node 23 :CHANGE_EXCHANGE DEBUG: change_sender_cdc_stream.cpp:160: [CdcChangeSenderPartition][72075186224037888:1][0][72075186224037891][23:1126:2765] Handle NKikimrClient.TResponse { SessionId: TxId: Success { Response: Status: 1 ErrorCode: OK PartitionResponse { CmdWriteResult { AlreadyWritten: false SourceId: "\00072075186224037888" SeqNo: 2 Offset: 0 WriteTimestampMS: 2523 PartitionQuotedTimeMs: 0 TotalTimeInPartitionQueueMs: 0 WriteTimeMs: 0 TopicQuotedTimeMs: 0 WrittenInTx: false } Cookie: 1 } } } 2025-06-25T14:31:08.955899Z node 23 :PERSQUEUE DEBUG: pq_l2_cache.cpp:192: PQ Cache (L2). Touched. Tablet '72075186224037889' partition 0 offset 0 partno 0 count 1 parts 0 suffix '63' 2025-06-25T14:31:08.956064Z node 23 :CHANGE_EXCHANGE DEBUG: change_sender_cdc_stream.cpp:643: [CdcChangeSenderMain][72075186224037888:1][23:983:2765] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 0 } 2025-06-25T14:31:08.956173Z node 23 :PERSQUEUE DEBUG: partition_read.cpp:964: Topic 'Table/Stream1/streamImpl' partition 0 user $without_consumer readTimeStamp done, result 2523 queuesize 0 startOffset 0 2025-06-25T14:31:08.958494Z node 23 :TX_DATASHARD INFO: datashard_change_sending.cpp:310: TTxRemoveChangeRecords Execute: records# 2, at tablet# 72075186224037888 2025-06-25T14:31:08.958632Z node 23 :TX_DATASHARD DEBUG: datashard.cpp:1087: RemoveChangeRecord: order: 1, at tablet: 72075186224037888 2025-06-25T14:31:08.958833Z node 23 :TX_DATASHARD DEBUG: datashard.cpp:1087: RemoveChangeRecord: order: 2, at tablet: 72075186224037888 2025-06-25T14:31:08.971756Z node 23 :TX_DATASHARD INFO: datashard_change_sending.cpp:335: TTxRemoveChangeRecords Complete: removed# 2, left# 0, at tablet# 72075186224037888 >>>>> GetRecords path=/Root/Table/Stream1 partitionId=0 2025-06-25T14:31:09.237529Z node 23 :PERSQUEUE DEBUG: pq_impl.cpp:348: Handle TEvRequest topic: 'streamImpl' requestId: 2025-06-25T14:31:09.237621Z node 23 :PERSQUEUE DEBUG: pq_impl.cpp:2812: [PQ: 72075186224037889] got client message batch for topic 'Table/Stream1/streamImpl' partition 0 2025-06-25T14:31:09.237790Z node 23 :PERSQUEUE DEBUG: partition_read.cpp:839: [PQ: 72075186224037889, Partition: 0, State: StateIdle] read cookie 4 Topic 'Table/Stream1/streamImpl' partition 0 user $without_consumer offset 0 count 10000 size 26214400 endOffset 1 max time lag 0ms effective offset 0 2025-06-25T14:31:09.238426Z node 23 :PERSQUEUE DEBUG: partition_read.cpp:1043: [PQ: 72075186224037889, Partition: 0, State: StateIdle] read cookie 4 added 1 blobs, size 427 count 1 last offset 0, current partition end offset: 1 2025-06-25T14:31:09.238557Z node 23 :PERSQUEUE DEBUG: partition_read.cpp:1069: [PQ: 72075186224037889, Partition: 0, State: StateIdle] Reading cookie 4. Send blob request. 2025-06-25T14:31:09.238757Z node 23 :PERSQUEUE DEBUG: cache_eviction.h:492: Got data from cache. Partition 0 offset 0 partno 0 count 1 parts_count 0 source 1 size 427 accessed 1 times before, last time 1970-01-01T00:00:02.000000Z 2025-06-25T14:31:09.238881Z node 23 :PERSQUEUE DEBUG: read.h:121: Reading cookie 4. All 1 blobs are from cache. 2025-06-25T14:31:09.239028Z node 23 :PERSQUEUE DEBUG: partition_read.cpp:551: FormAnswer for 1 blobs 2025-06-25T14:31:09.239392Z node 23 :PERSQUEUE DEBUG: partition_read.cpp:476: FormAnswer processing batch offset 0 totakecount 1 count 1 size 407 from pos 0 cbcount 1 2025-06-25T14:31:09.240282Z node 23 :PERSQUEUE DEBUG: pq_l2_cache.cpp:192: PQ Cache (L2). Touched. Tablet '72075186224037889' partition 0 offset 0 partno 0 count 1 parts 0 suffix '63' 2025-06-25T14:31:09.240464Z node 23 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'streamImpl' partition: 0 messageNo: 0 requestId: cookie: 0 >>>>> GetRecords path=/Root/Table/Stream2 partitionId=0 2025-06-25T14:31:09.242307Z node 23 :PERSQUEUE DEBUG: pq_impl.cpp:348: Handle TEvRequest topic: 'streamImpl' requestId: 2025-06-25T14:31:09.242437Z node 23 :PERSQUEUE DEBUG: pq_impl.cpp:2812: [PQ: 72075186224037891] got client message batch for topic 'Table/Stream2/streamImpl' partition 0 2025-06-25T14:31:09.243434Z node 23 :PERSQUEUE DEBUG: partition_read.cpp:839: [PQ: 72075186224037891, Partition: 0, State: StateIdle] read cookie 2 Topic 'Table/Stream2/streamImpl' partition 0 user $without_consumer offset 0 count 10000 size 26214400 endOffset 1 max time lag 0ms effective offset 0 2025-06-25T14:31:09.244024Z node 23 :PERSQUEUE DEBUG: partition_read.cpp:1043: [PQ: 72075186224037891, Partition: 0, State: StateIdle] read cookie 2 added 1 blobs, size 426 count 1 last offset 0, current partition end offset: 1 2025-06-25T14:31:09.244144Z node 23 :PERSQUEUE DEBUG: partition_read.cpp:1069: [PQ: 72075186224037891, Partition: 0, State: StateIdle] Reading cookie 2. Send blob request. 2025-06-25T14:31:09.244328Z node 23 :PERSQUEUE DEBUG: cache_eviction.h:492: Got data from cache. Partition 0 offset 0 partno 0 count 1 parts_count 0 source 1 size 426 accessed 0 times before, last time 1970-01-01T00:00:02.000000Z 2025-06-25T14:31:09.244438Z node 23 :PERSQUEUE DEBUG: read.h:121: Reading cookie 2. All 1 blobs are from cache. 2025-06-25T14:31:09.244579Z node 23 :PERSQUEUE DEBUG: partition_read.cpp:551: FormAnswer for 1 blobs 2025-06-25T14:31:09.244902Z node 23 :PERSQUEUE DEBUG: partition_read.cpp:476: FormAnswer processing batch offset 0 totakecount 1 count 1 size 406 from pos 0 cbcount 1 2025-06-25T14:31:09.245699Z node 23 :PERSQUEUE DEBUG: pq_l2_cache.cpp:192: PQ Cache (L2). Touched. Tablet '72075186224037891' partition 0 offset 0 partno 0 count 1 parts 0 suffix '63' 2025-06-25T14:31:09.245894Z node 23 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'streamImpl' partition: 0 messageNo: 0 requestId: cookie: 0 >> TopicAutoscaling::Simple_AutoscaleAwareSDK [GOOD] >> TopicAutoscaling::Simple_PQv1 >> TPartitionTests::CorrectRange_Commit [GOOD] >> Cdc::ShouldBreakLocksOnConcurrentAlterStream [GOOD] >> Cdc::ShouldBreakLocksOnConcurrentDropStream >> TPartitionTests::CorrectRange_Multiple_Consumers >> Cdc::SequentialSplitMerge [GOOD] >> Cdc::MustNotLoseSchemaSnapshot >> KqpScanArrowInChanels::AggregateByColumn [GOOD] >> TPartitionTests::CorrectRange_Multiple_Consumers [GOOD] >> TPartitionTests::IncorrectRange ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_backup/unittest >> TBackupTests::ShouldSucceedOnLargeData[Raw] [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:30:23.811291Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:30:23.811366Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:30:23.811415Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:30:23.811457Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:30:23.811492Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:30:23.811511Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:30:23.811548Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:30:23.811635Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:30:23.812213Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:30:23.812675Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:30:23.888869Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:30:23.888947Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:30:23.906546Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:30:23.906931Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:30:23.907090Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:30:23.912786Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:30:23.913174Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:30:23.913804Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:30:23.914045Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:30:23.917245Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:30:23.917422Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:30:23.918545Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:30:23.918601Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:30:23.918922Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:30:23.918974Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:30:23.919020Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:30:23.919112Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:30:23.926390Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:30:24.087629Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:30:24.087887Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:24.088179Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:30:24.088235Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:30:24.088506Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:30:24.088593Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:30:24.094544Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:30:24.094743Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:30:24.094955Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:24.095007Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:30:24.095056Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:30:24.095088Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:30:24.101279Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:24.101351Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:30:24.101409Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:30:24.105775Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:24.105851Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:24.105896Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:30:24.105987Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:30:24.108517Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:30:24.110700Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:30:24.110845Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:30:24.111662Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:30:24.111767Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:30:24.111806Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:30:24.112000Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:30:24.112033Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:30:24.112153Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:30:24.112223Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:30:24.114131Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:30:24.114175Z node 1 :FLAT_TX_SCHEMESHARD ... :3459:5420], msg# NKikimr::NDataShard::TEvExportScan::TEvBuffer { Last: 0 Checksum: } REQUEST: PUT /data_00.csv?partNumber=100&uploadId=1 HTTP/1.1 HEADERS: Host: localhost:11650 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 0E681214-1506-47EB-8A7B-63CDAFD857CE amz-sdk-request: attempt=1 content-length: 130 content-md5: Wyd1w7MZYbbZucaVvuRDAw== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 S3_MOCK::HttpServeWrite: /data_00.csv / partNumber=100&uploadId=1 / 130 2025-06-25T14:31:11.492441Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:592: [Export] [s3] Handle TEvExternalStorage::TEvUploadPartResponse: self# [1:3460:5421], result# UploadPartResult { ETag: 5b2775c3b31961b6d9b9c695bee44303 } 2025-06-25T14:31:11.492663Z node 1 :DATASHARD_BACKUP DEBUG: export_scan.cpp:130: [Export] [scanner] Handle TEvExportScan::TEvFeed: self# [1:3459:5420] 2025-06-25T14:31:11.492750Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:459: [Export] [s3] Handle TEvExportScan::TEvBuffer: self# [1:3460:5421], sender# [1:3459:5420], msg# NKikimr::NDataShard::TEvExportScan::TEvBuffer { Last: 1 Checksum: } REQUEST: PUT /data_00.csv?partNumber=101&uploadId=1 HTTP/1.1 HEADERS: Host: localhost:11650 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 3D5A929D-77D2-4B85-9B70-50DBB1B9527B amz-sdk-request: attempt=1 content-length: 0 content-md5: 1B2M2Y8AsgTpgAmY7PhCfg== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 S3_MOCK::HttpServeWrite: /data_00.csv / partNumber=101&uploadId=1 / 0 2025-06-25T14:31:11.495682Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:592: [Export] [s3] Handle TEvExternalStorage::TEvUploadPartResponse: self# [1:3460:5421], result# UploadPartResult { ETag: d41d8cd98f00b204e9800998ecf8427e } 2025-06-25T14:31:11.495735Z node 1 :DATASHARD_BACKUP INFO: export_s3_uploader.cpp:716: [Export] [s3] Finish: self# [1:3460:5421], success# 1, error# , multipart# 1, uploadId# 1 2025-06-25T14:31:11.501682Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:526: [Export] [s3] Handle TEvDataShard::TEvS3Upload: self# [1:3460:5421], upload# { Id: 1 Status: Complete Error: (empty maybe) Parts: [a59dd9a97cf3685e69093fb2d96653c6,bdbb215613239cb3a835fee1fe7e7ca3,cb38dbc776d5763f1926dfb22d508c87,3c430d66d07a0a4b1fa889f321fce197,43baf91083f286b60bf15e7786459cd9,90b5581bef612fa3bf9b38b336af405f,fd4869c26a12d22ee79256d778954d04,a9459bc28198b0b6bd67732c492fd740,697a3f8386ea1ff4e327de943224cb1a,614da0b4ec9464e69cd0c59909e80fbb,9b94eb3f67aa4c8a0bcbf546833ed966,fd45c3afacec641ad19e59d2b31aeba4,fd69678aecbc149601f58cf13c64d33e,90c09ab4923bc9f97f825d36e32bf362,c1586416a281a4cca2b2b4e333d9b079,f31908576272623f9f0a19bf774cde8e,6fe3b42388304d2af07c629aeb683581,7bc90eec21ca5bb3648e6a48e83c5730,8e1dda26de1af89bdffe2eefdcebea1d,14dc42d90caa1575bbfffa9dc8f21d66,92efb2368eecb32d4075c09294fde0b7,98efff5f7c7ecb42e7af65142ce05af9,6206c81807b3b9283b0173ee2c682100,616b431b91aedc9de4593321eb42ba96,9ae4762563ffdec596cc9ca4cb8913e1,946ebf2d95b4796ea2faee21f017be79,45834a9948bb4ab8b62d1894156d13ed,6ad3fe7286856927c1e00422bc8da697,ef89464d20eae46829e1bf557e4d04ce,f128e5de32097d205453080b01c94ac3,c13e650ee2cfcecfdf4f578a2e5b1c2d,fc26314711b25d20fc654cf59301b806,56f6f2c574fba86496a87a7dd5fab46c,c7951eace72cfe0f14f808173e07bc64,3d9ad3340e58b973eaf8d4f14ba3b0f9,fc41d6fdfb52389dda8b26d7a0a3a889,9974b6ae96ffd0b756acb67088e890f9,cde8a5604010abe8fccfa9492144036f,0364e048eaac35c26d48b0c5072b5255,aac5a84927124d6ae4931e2650c80d9f,eab068fe4ca35c2f3e35890bd727eb4f,bc3646bdbcbc7f97dcddf2202ea9421f,6d3f63d672eda4a4617c9e7589a68bfc,0401bade6c3031b5be872238520b993a,1c6405688f86423480173e3e316a20bd,52395f68e877cbb8d7115a247331b0a7,4b0673ac18058554d2c53bf9f99b34b2,87bc1b9e650b31e81a9ad2531e3ef9da,b29053c8cd093c8b92ad3954c42cb7be,faf1084f6b33b00e2e822d1d3c3f0083,eedec03ee8d7eda4654db7206ad0889e,be4469dd028d5519a67098055f25513f,a7afa9827ec27c565cff1ed505a06f4b,91fe8109d2ad934c4364d90c29aaba71,73b81ea00e11db12d66497d30eb48446,cce69ef69777afeab34eefa515abc7f4,4e4ac1a421353964356400b8be8e21da,32cd6083b12660bcd4062af08d89eb05,71957b9db37811c7680638b82dc6384b,a8787e692c423a2dfa07dd261e72790a,283838ab16206b27738ea6653110f833,88bf084fb3029f0d5c0705eece930d70,1ed2f9f7221f1718b81fdf2d846347dd,406706cfbc454922dcad50b9c534b8d1,dbb606c993d798974ed4f5c9ebf195ca,1a4a3868dc6fa26c6b019d237f9ea6f4,82660a3c6b576a1b3fea925f3c179a2e,d393db2749ae42e854e85eeec2ea3592,b42c92ad14ee0e5351fec7e5a045a91b,2c7af27f9dc77efbcbe71c2d7997d6e9,278aba62ab1d9e3ff16df2d82ac5f5c7,6b8380404a7e7ec95ad5f3941d5d404c,c9813b9fc1d6b5087e64849076edd0f8,160785e4dac02a91c43a497ee59eea06,db529a9ba22f60f404031cfe85e966e9,9b70af168e2d3769bd8bc4dffa3202ea,9ac39c3843b6621ace44acf430a59e06,4603ff564a46e93951f246ed18926071,66b85f35ee76a7f71f50e9aad56758de,1665c284ad04d6b893b69372bf8fc6b9,8c1c27ec88fb52f06de6e7516a392672,0a5f992db51277a05ec12f0d6459ef21,8debe3a6023155561cb0890fc05bd7fb,938ece258b7596f8eea7e82bc2b8f88c,767ca0dcf0b154fa3c818044bbfc58fd,914cc7165d994bb05824332ac120446f,ab0ece250f5959a510170ee07aa21b5d,8bf4b44d67f062026b0010a8a0b39cc0,e0aa13fa8246e68c18905d3abadfc44d,27b021b75b6a95f63ea27f7ec238c05f,673e661e4cfea1e431678dd9881c2a8c,f101b34943f1831ae8c0b46ffcb1c2d6,562b32a8142b29c1a88e507ab1981a6b,fdea4c6fc2befb44614992ca8bf34b21,b7c8ec6acc45b037978482996e910b75,aec72fbd2e171b798900b22897d00941,710ef5b5e8eba750b6acc9b32dff42a3,821c7e22ef9c22098171e7f837dcfcc8,aecc9f6d0e6f54e938a10d40fda96d7b,5b2775c3b31961b6d9b9c695bee44303,d41d8cd98f00b204e9800998ecf8427e] } REQUEST: POST /data_00.csv?uploadId=1 HTTP/1.1 HEADERS: Host: localhost:11650 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: B7F03C22-5134-4E5D-BBF3-FF8C042B55A9 amz-sdk-request: attempt=1 content-length: 11529 content-type: application/xml user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-api-version: 2006-03-01 S3_MOCK::HttpServeAction: 4 / /data_00.csv / uploadId=1 2025-06-25T14:31:11.592881Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:623: [Export] [s3] Handle TEvExternalStorage::TEvCompleteMultipartUploadResponse: self# [1:3460:5421], result# CompleteMultipartUploadResult { Bucket: Key: data_00.csv ETag: 5d8c28efc812b445ddd02900ff3ee599 } 2025-06-25T14:31:11.593244Z node 1 :DATASHARD_BACKUP DEBUG: export_scan.cpp:144: [Export] [scanner] Handle TEvExportScan::TEvFinish: self# [1:3459:5420], msg# NKikimr::NDataShard::TEvExportScan::TEvFinish { Success: 1 Error: } 2025-06-25T14:31:11.608209Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5596: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 313 RawX2: 4294969594 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10000 RowsProcessed: 1000 } 2025-06-25T14:31:11.608300Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1791: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409546, partId: 0 2025-06-25T14:31:11.608508Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:632: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: Source { RawX1: 313 RawX2: 4294969594 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10000 RowsProcessed: 1000 } 2025-06-25T14:31:11.608623Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:233: TBackup TProposedWaitParts, opId: 102:0 HandleReply TEvSchemaChanged at tablet# 72057594046678944 message# Source { RawX1: 313 RawX2: 4294969594 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10000 RowsProcessed: 1000 } 2025-06-25T14:31:11.608699Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:670: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 102:0, shardIdx: 72057594046678944:1, shard: 72075186233409546, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-25T14:31:11.608748Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:707: all shard schema changes has been received, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-25T14:31:11.608789Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:719: send schema changes ack message, operation: 102:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-06-25T14:31:11.608836Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 102:0 129 -> 240 2025-06-25T14:31:11.609010Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:116: Unable to make a bill: kind# TBackup, opId# 102:0, reason# domain is not a serverless db, domain# /MyRoot, domainPathId# [OwnerId: 72057594046678944, LocalPathId: 1], IsDomainSchemeShard: 1, ParentDomainId: [OwnerId: 72057594046678944, LocalPathId: 1], ResourcesDomainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:31:11.613881Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-25T14:31:11.614519Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-25T14:31:11.614580Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 102:0 ProgressState 2025-06-25T14:31:11.614702Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:0 progress is 1/1 2025-06-25T14:31:11.614748Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-25T14:31:11.614800Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:0 progress is 1/1 2025-06-25T14:31:11.614836Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-25T14:31:11.614879Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: true 2025-06-25T14:31:11.614987Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1656: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:340:2317] message: TxId: 102 2025-06-25T14:31:11.615059Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-25T14:31:11.615099Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 102:0 2025-06-25T14:31:11.615136Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 102:0 2025-06-25T14:31:11.615267Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-25T14:31:11.619579Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-25T14:31:11.619638Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:3445:5407] TestWaitNotification: OK eventTxId 102 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_backup/unittest >> TBackupTests::ShouldSucceedOnLargeData[Zstd] [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:30:23.866205Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:30:23.866303Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:30:23.866350Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:30:23.866384Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:30:23.866425Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:30:23.866449Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:30:23.866492Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:30:23.866598Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:30:23.867270Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:30:23.867565Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:30:23.943962Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:30:23.944041Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:30:23.963337Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:30:23.963714Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:30:23.963855Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:30:23.969012Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:30:23.969346Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:30:23.969939Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:30:23.970166Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:30:23.973076Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:30:23.973230Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:30:23.974237Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:30:23.974286Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:30:23.974410Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:30:23.974459Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:30:23.974500Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:30:23.974585Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:30:23.980616Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:30:24.110652Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:30:24.110890Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:24.111104Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:30:24.111158Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:30:24.111436Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:30:24.111520Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:30:24.113962Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:30:24.114140Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:30:24.114358Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:24.114413Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:30:24.114460Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:30:24.114492Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:30:24.120978Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:24.121045Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:30:24.121097Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:30:24.126876Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:24.126970Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:30:24.127039Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:30:24.127146Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:30:24.132381Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:30:24.137428Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:30:24.137634Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:30:24.138649Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:30:24.138798Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:30:24.138848Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:30:24.139118Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:30:24.139170Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:30:24.139324Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:30:24.139415Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:30:24.145577Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:30:24.145627Z node 1 :FLAT_TX_SCHEMESHARD ... mr::NDataShard::TEvExportScan::TEvBuffer { Last: 0 Checksum: } REQUEST: PUT /data_00.csv.zst?partNumber=100&uploadId=1 HTTP/1.1 HEADERS: Host: localhost:2839 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: E4F05C34-C90D-4B7D-A167-8E0B1DDBFD3F amz-sdk-request: attempt=1 content-length: 55 content-md5: B5SOCmjwb1RI3tHamcoRHA== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 S3_MOCK::HttpServeWrite: /data_00.csv.zst / partNumber=100&uploadId=1 / 55 2025-06-25T14:31:11.497718Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:592: [Export] [s3] Handle TEvExternalStorage::TEvUploadPartResponse: self# [1:3460:5421], result# UploadPartResult { ETag: 07948e0a68f06f5448ded1da99ca111c } 2025-06-25T14:31:11.497978Z node 1 :DATASHARD_BACKUP DEBUG: export_scan.cpp:130: [Export] [scanner] Handle TEvExportScan::TEvFeed: self# [1:3459:5420] 2025-06-25T14:31:11.498092Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:459: [Export] [s3] Handle TEvExportScan::TEvBuffer: self# [1:3460:5421], sender# [1:3459:5420], msg# NKikimr::NDataShard::TEvExportScan::TEvBuffer { Last: 1 Checksum: } REQUEST: PUT /data_00.csv.zst?partNumber=101&uploadId=1 HTTP/1.1 HEADERS: Host: localhost:2839 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: FEED65FC-B60C-40DC-B7A6-F1EF60EDE332 amz-sdk-request: attempt=1 content-length: 0 content-md5: 1B2M2Y8AsgTpgAmY7PhCfg== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 S3_MOCK::HttpServeWrite: /data_00.csv.zst / partNumber=101&uploadId=1 / 0 2025-06-25T14:31:11.501008Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:592: [Export] [s3] Handle TEvExternalStorage::TEvUploadPartResponse: self# [1:3460:5421], result# UploadPartResult { ETag: d41d8cd98f00b204e9800998ecf8427e } 2025-06-25T14:31:11.501062Z node 1 :DATASHARD_BACKUP INFO: export_s3_uploader.cpp:716: [Export] [s3] Finish: self# [1:3460:5421], success# 1, error# , multipart# 1, uploadId# 1 2025-06-25T14:31:11.506833Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:526: [Export] [s3] Handle TEvDataShard::TEvS3Upload: self# [1:3460:5421], upload# { Id: 1 Status: Complete Error: (empty maybe) Parts: [f8f51a1e4a70db44fa91cc2ab9680824,9eba675fd7f187274786dff2f47292df,921325fb6b8811df3d06a44dbe1f8523,4eeb6b90e8e61075275bd8a42f56bd69,2840a487abe8cb9502b3d9c8a8e1c942,607d8f6e3b235a360d63796efd3a51c2,ed22e08df7fb8840f7cabc779cc86885,efeff2c7731061edd9a39059cc078045,4af01cb3455932f28e3bba713dcd57c9,dc94d36ecf3b36d183d75c84b9b2fac6,e2ce425dd2bb582abcc13d0d714c3554,b71e46686939d2cdf046520dd2774281,ab731a82a161e5e044b24e895a1713d6,1df51aaec89711e13a6f95c13113e36c,b6066b2ed343831b1b0ee0076179981e,332d34d77adc2b024a33d87e07d4233f,cf0093cc99590a0e8f9c199ed6deca07,8cc923ec76224e69263ac93b7bfabd30,690d66897e0780f2dfe3614e5a659a22,7502aae0ec253663b1cbfdc8ede92ab9,7d2c6f728ee0c12097dfe5441970b946,5fc7b9b675e0a125eea67cf05f82627f,fc8c5faa99cc7f4ce7ca320f8e7adb58,8e305c5aca758683ff25407a7bbd9220,181bce9c6393e22a0ac359a7b45d8187,639677548f0a8b776a6db92f44d96505,390ff8f57cfa4c04bfbed0d7a63c90e8,3dd76756e6558fd6c8c918210f7dc136,a3f5254fdad3ded54edef910e704c151,e9186373f80dbaa55dd04d07621de277,8898b965060a431b499261ec0cd3cee3,3ed51c736e64defe04980ce328b17aa4,bb0e45971888796588c12ea1c1bec162,e2b3defa84005d3892986ca6894b811f,656c7c809c8c8485f6e91892591cd284,779c6827126f255bde25ae242bf4c8ff,8883fc9b073e683558f1231c5f2142d0,19390a0e3340bcb6ccfe866a790f05cb,305182d3e9745fba3aad1973bb1bfc93,002819d72a6dc7954ecc1bcd2bd20254,325c6bc3cdd6fd83083cf0126c606218,b86932903843b9626e80bd9ccb5d0571,b5054116537a7c467bdb488c9d67dee7,fc3a45bd17a00b147e4f9c55bc2493da,1118e2f41e8839211163250796a65dce,b403ff17c2c269a79201a03ce439dc2a,88f2692ee439cfadef1cd21d58aac8d3,e5bef12f89b101af84d52299a5867d99,ed613335180c53f69d450ef8b176a4d5,150fd7dcdc86eb38c7f821ff4698d8bc,a0c18bf08acc6ebecac04a2520efee9b,e8463d7ce8f502d1575a433c1b30a9af,f123e0fc879e2fdc2c3e2f698fc4176d,d7ab79d73e4648e0a2bf8dec3a19c019,4e74b82f6a8ea7fad8790ee7dfcdb76e,f72bb1d8aa0f5c9265bae10a3784d8e8,924b317371d16363a37962b17a2ae4bb,7214b458c7e25c791e54bd430b835a6e,e79dba1b56122372af3fe7b06ea91bda,6aae345b94d78fc7c1ed0b8697cf5e62,fd3636ed699facb5f0c12f81741cabc5,2c4a198408c3eb9577fcd339ca62c539,59fbf761f9b7574b65fa6877b167bb8c,14f9f5cfdf3a6c33c577a54429b19cb6,c6d078b3be9cd7943e8145fd982baeef,198f55ae25539fbd54a4a6075beac2d1,939123b44e362c76a151a85af0247fb7,0147f8bd741be7780cbc900b6f4b0899,43453200aeaf201420737354cd73cfe4,de26d1339779fe0c538d01d5963fd423,5c903650e719f959dc9f37ea360c6319,23607b3f36e0a2abae7f1ed8e38596f3,0db9af920c6d1cf868e470bf7a349747,aed6ac19c60d08500582eea9dadcdfee,3f4e37ddd3e2e56a725323fad4d85cf6,942b269af420b4277d025cea489dcb25,89eddc25ba615b6cf09b9cd9a11a16bb,1d8e7f0613dc1919ee90133c468380bd,8bf1e4c1266d8437c1bd85e0fca6640a,e9eabcf5b61cf257f530b156dbd77a88,411f1661ae7650d2144e8c6f8a33b28f,6706ec5b8771e555779d5cbeca41aa75,b3a33ef21a8224ddc78a52e8d7ca8357,58749d344f42c192e572eda4ee66fb01,381aeb5ee3014e2c0fd9b85bd59ce005,9aed2297cd10dce10d68de3ff1830b42,be88e095fc3a13708b714db03b1f2744,5628e81ee17fb22fc828ed1b2169578b,a1cfb563fa4af884fe02ced05c26c881,fc602b8ee2e9746fb52823f8fd1f0f28,a1de256e94c7baa9b8ab905c892d1a14,6bff895b0b5f3552ad4bdc61b0d24148,fcba1d258a8651d831767b42e010e439,bef6e3d7088e671809fe584531f96971,f0b489242271d11200dbdbc78e4ce715,372d2d6877fff7c04433e492ad4dbd45,32191cf1972dcccd59c0b5a8b53d4f23,25928b7997b97ac58f18fbbe589573e8,472e53a27497661c6400410909405c4e,07948e0a68f06f5448ded1da99ca111c,d41d8cd98f00b204e9800998ecf8427e] } REQUEST: POST /data_00.csv.zst?uploadId=1 HTTP/1.1 HEADERS: Host: localhost:2839 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 19F706DA-9557-4392-9D4D-C9B2DC80AAC8 amz-sdk-request: attempt=1 content-length: 11529 content-type: application/xml user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-api-version: 2006-03-01 S3_MOCK::HttpServeAction: 4 / /data_00.csv.zst / uploadId=1 2025-06-25T14:31:11.588093Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:623: [Export] [s3] Handle TEvExternalStorage::TEvCompleteMultipartUploadResponse: self# [1:3460:5421], result# CompleteMultipartUploadResult { Bucket: Key: data_00.csv.zst ETag: c902b621cdd1ee89b9f1c4e6c36e6e45 } 2025-06-25T14:31:11.588447Z node 1 :DATASHARD_BACKUP DEBUG: export_scan.cpp:144: [Export] [scanner] Handle TEvExportScan::TEvFinish: self# [1:3459:5420], msg# NKikimr::NDataShard::TEvExportScan::TEvFinish { Success: 1 Error: } 2025-06-25T14:31:11.610661Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5596: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 313 RawX2: 4294969594 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10000 RowsProcessed: 1000 } 2025-06-25T14:31:11.610732Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1791: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409546, partId: 0 2025-06-25T14:31:11.610914Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:632: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: Source { RawX1: 313 RawX2: 4294969594 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10000 RowsProcessed: 1000 } 2025-06-25T14:31:11.611037Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:233: TBackup TProposedWaitParts, opId: 102:0 HandleReply TEvSchemaChanged at tablet# 72057594046678944 message# Source { RawX1: 313 RawX2: 4294969594 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10000 RowsProcessed: 1000 } 2025-06-25T14:31:11.611109Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:670: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 102:0, shardIdx: 72057594046678944:1, shard: 72075186233409546, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-25T14:31:11.611158Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:707: all shard schema changes has been received, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-25T14:31:11.611203Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:719: send schema changes ack message, operation: 102:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-06-25T14:31:11.611251Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 102:0 129 -> 240 2025-06-25T14:31:11.611410Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:116: Unable to make a bill: kind# TBackup, opId# 102:0, reason# domain is not a serverless db, domain# /MyRoot, domainPathId# [OwnerId: 72057594046678944, LocalPathId: 1], IsDomainSchemeShard: 1, ParentDomainId: [OwnerId: 72057594046678944, LocalPathId: 1], ResourcesDomainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:31:11.615284Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-25T14:31:11.615570Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-25T14:31:11.615624Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 102:0 ProgressState 2025-06-25T14:31:11.615739Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:0 progress is 1/1 2025-06-25T14:31:11.615777Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-25T14:31:11.615819Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:0 progress is 1/1 2025-06-25T14:31:11.615863Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-25T14:31:11.615904Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: true 2025-06-25T14:31:11.615972Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1656: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:340:2317] message: TxId: 102 2025-06-25T14:31:11.616040Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-25T14:31:11.616085Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 102:0 2025-06-25T14:31:11.616121Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 102:0 2025-06-25T14:31:11.616245Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-25T14:31:11.619581Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-25T14:31:11.619639Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:3445:5407] TestWaitNotification: OK eventTxId 102 >> TPartitionTests::ConflictingTxIsAborted >> TPartitionTests::ConflictingTxIsAborted [GOOD] >> TSourceIdTests::SourceIdWriterAddMessage [GOOD] >> TSourceIdTests::SourceIdWriterClean [GOOD] >> TSourceIdTests::SourceIdWriterFormCommand >> TPartitionTests::DifferentWriteTxBatchingOptions >> TSourceIdTests::SourceIdWriterFormCommand [GOOD] >> TTypeCodecsTest::TestBoolCodec [GOOD] >> TTypeCodecsTest::TestDeltaVarIntCodecAndRev [GOOD] >> TPartitionTests::ConflictingTxProceedAfterRollback >> TSourceIdTests::SourceIdStorageAdd [GOOD] >> TSourceIdTests::ProtoSourceIdStorageParseAndAdd [GOOD] >> TSourceIdTests::SourceIdStorageComplexDelete [GOOD] >> TSourceIdTests::HeartbeatEmitter [GOOD] >> TSourceIdTests::SourceIdMinSeqNo [GOOD] >> TPartitionTests::IncorrectRange [GOOD] >> TPartitionTests::GetPartitionWriteInfoSuccess ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/arrow/unittest >> KqpScanArrowInChanels::AggregateByColumn [GOOD] Test command err: Trying to start YDB, gRPC: 29502, MsgBus: 14632 2025-06-25T14:30:43.615799Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519894356501973012:2238];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:30:43.656808Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d9e/r3tmp/tmpd5r19p/pdisk_1.dat 2025-06-25T14:30:44.425062Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:30:44.425162Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:30:44.432647Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:30:44.449802Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519894356501972794:2080] 1750861843487983 != 1750861843487986 2025-06-25T14:30:44.477871Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 29502, node 1 2025-06-25T14:30:44.707453Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:30:44.715825Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:30:44.715846Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:30:44.715853Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:30:44.716000Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14632 TClient is connected to server localhost:14632 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:30:45.363336Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:30:45.389506Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:30:45.403352Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:30:45.615022Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:30:45.838002Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:30:45.924238Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:30:47.744863Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894373681843607:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:47.744964Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:48.113297Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:48.237137Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:48.272731Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:48.313655Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:48.363480Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:48.444576Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:48.515770Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:48.588411Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519894356501973012:2238];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:30:48.588473Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:30:48.607124Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894377976811573:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:48.607201Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:48.607674Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894377976811578:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:48.612123Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:30:48.623311Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519894377976811580:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:30:48.696886Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519894377976811631:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:30:50.115185Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750861850143, txId: 281474976710672] shutting down Trying to start YDB, gRPC: 62593, MsgBus: 19909 2025-06-25T14:30:50.997453Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline= ... _base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:31:05.059829Z node 3 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750861864682, txId: 281474976715672] shutting down Trying to start YDB, gRPC: 63019, MsgBus: 5814 2025-06-25T14:31:06.145126Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519894458233761481:2147];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d9e/r3tmp/tmpbdbmEV/pdisk_1.dat 2025-06-25T14:31:06.347520Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:31:06.452199Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:31:06.455546Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7519894458233761351:2080] 1750861866082980 != 1750861866082983 2025-06-25T14:31:06.467856Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:31:06.468236Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:31:06.473861Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 63019, node 4 2025-06-25T14:31:06.664991Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:31:06.665019Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:31:06.665031Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:31:06.665175Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5814 2025-06-25T14:31:07.099571Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:5814 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:31:07.220244Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:31:07.230489Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:31:07.311524Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:31:07.488784Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:31:07.560495Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:31:09.774397Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519894471118664871:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:31:09.774488Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:31:09.828357Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:31:09.866007Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:31:09.904292Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:31:09.944976Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:31:09.986782Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:31:10.030901Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:31:10.079859Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:31:10.206792Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519894475413632828:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:31:10.206904Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:31:10.207236Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519894475413632833:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:31:10.211676Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:31:10.239698Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519894475413632835:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:31:10.319744Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519894475413632886:3415] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:31:11.089612Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519894458233761481:2147];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:31:11.089709Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:31:12.456033Z node 4 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750861871815, txId: 281474976710672] shutting down ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/unittest >> TTypeCodecsTest::TestDeltaVarIntCodecAndRev [GOOD] Test command err: Size: 128 Create chunk: 0.000038s Read by index: 0.000016s Iterate: 0.000014s Size: 252 Create chunk: 0.000042s Read by index: 0.000018s Iterate: 0.000019s Size: 1887 Create chunk: 0.000083s Read by index: 0.000131s Iterate: 0.000086s Size: 1658 Create chunk: 0.000109s Read by index: 0.000118s Iterate: 0.000079s Size: 1889 Create chunk: 0.000090s Read by index: 0.000098s Iterate: 0.000040s Size: 1660 Create chunk: 0.000097s Read by index: 0.000077s Iterate: 0.000051s >> KqpScanArrowFormat::AggregateWithFunction [GOOD] |77.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/unittest >> TSourceIdTests::SourceIdMinSeqNo [GOOD] >> KqpScanArrowFormat::AggregateEmptySum [GOOD] >> TPQTabletTests::DropTablet_And_Tx >> DataShardVolatile::DistributedWriteThenReadIteratorStream [GOOD] >> DataShardVolatile::DistributedWriteThenScanQuery >> TPartitionChooserSuite::TPartitionChooserActor_SplitMergeEnabled_SourceId_PartitionActive_BoundaryTrue_Test >> DataShardSnapshots::DelayedWriteReadableAfterSplitAndReboot [GOOD] >> DataShardSnapshots::BrokenLockChangesDontLeak |77.6%| [TA] $(B)/ydb/core/tx/schemeshard/ut_backup/test-results/unittest/{meta.json ... results_accumulator.log} >> ReadLoad::ShouldReadKqpMoreThanRows [GOOD] >> TPartitionTests::GetPartitionWriteInfoSuccess [GOOD] >> TPQTabletTests::DropTablet_And_Tx [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/arrow/unittest >> KqpScanArrowFormat::AggregateWithFunction [GOOD] Test command err: Trying to start YDB, gRPC: 13722, MsgBus: 22860 2025-06-25T14:30:44.865675Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519894360083963483:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:30:44.866728Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d99/r3tmp/tmpPTalwe/pdisk_1.dat 2025-06-25T14:30:45.358549Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519894360083963453:2080] 1750861844864755 != 1750861844864758 2025-06-25T14:30:45.370966Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:30:45.414037Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:30:45.414169Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:30:45.418152Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 13722, node 1 2025-06-25T14:30:45.616885Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:30:45.616906Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:30:45.616914Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:30:45.617029Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:22860 2025-06-25T14:30:45.914351Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:22860 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:30:46.299592Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:30:46.329679Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:30:46.465136Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:30:46.645376Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:30:46.728733Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:30:48.462090Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894377263834300:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:48.462182Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:48.724933Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:48.794249Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:48.886758Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:48.962293Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:49.020459Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:49.099897Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:49.190462Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:49.286870Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894381558802259:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:49.286950Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:49.287636Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894381558802264:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:49.292050Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:30:49.304409Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519894381558802266:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:30:49.370182Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519894381558802317:3427] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:30:49.866620Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519894360083963483:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:30:49.866678Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:30:50.627330Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 864000000000 2025-06-25T14:30:51.141353Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: ... : Actor# [3:7519894440608137560:3416] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:31:06.888332Z node 3 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750861864633, txId: 281474976715672] shutting down Trying to start YDB, gRPC: 16019, MsgBus: 6369 2025-06-25T14:31:07.664848Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519894458696284154:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:31:07.664939Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d99/r3tmp/tmpARn3Q3/pdisk_1.dat 2025-06-25T14:31:07.806853Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:31:07.820099Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:31:07.820197Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 16019, node 4 2025-06-25T14:31:07.826291Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:31:07.862254Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:31:07.862276Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:31:07.862286Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:31:07.862402Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6369 TClient is connected to server localhost:6369 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:31:08.416359Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:31:08.431903Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:31:08.525668Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:31:08.682075Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:31:08.728199Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:31:08.810029Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:31:11.284643Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519894475876154940:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:31:11.284729Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:31:11.339147Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:31:11.376092Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:31:11.431601Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:31:11.462266Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:31:11.504089Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:31:11.575315Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:31:11.610241Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:31:11.669617Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519894475876155598:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:31:11.669706Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:31:11.669772Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519894475876155603:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:31:11.673351Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:31:11.688973Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519894475876155605:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:31:11.767252Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519894475876155658:3422] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:31:12.665899Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519894458696284154:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:31:12.665995Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:31:13.940916Z node 4 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750861873509, txId: 281474976715672] shutting down >> TPartitionTests::GetPartitionWriteInfoError >> TPQTabletTests::DropTablet_Before_Write >> DataShardVolatile::DistributedWriteShardRestartBeforePlan-UseSink [GOOD] >> DataShardVolatile::DistributedWriteShardRestartAfterExpectation+UseSink >> TPQTest::TestUserInfoCompatibility >> TPQTabletTests::DropTablet_Before_Write [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/arrow/unittest >> KqpScanArrowFormat::AggregateEmptySum [GOOD] Test command err: Trying to start YDB, gRPC: 28276, MsgBus: 30518 2025-06-25T14:30:42.114999Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519894352004958348:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:30:42.116122Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000dd2/r3tmp/tmpQqku16/pdisk_1.dat 2025-06-25T14:30:42.560027Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:30:42.560145Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:30:42.562578Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:30:42.592413Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519894352004958320:2080] 1750861842101128 != 1750861842101131 2025-06-25T14:30:42.630589Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 28276, node 1 2025-06-25T14:30:42.831142Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:30:42.831165Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:30:42.831171Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:30:42.831294Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:30:43.156843Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:30518 TClient is connected to server localhost:30518 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:30:43.830924Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:30:43.895122Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:30:44.199848Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:30:44.457373Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:30:44.552154Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:30:46.477639Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894369184829129:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:46.477804Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:46.792455Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:46.838230Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:46.875777Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:46.916770Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:46.985972Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:47.026417Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:47.067962Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:47.113029Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519894352004958348:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:30:47.113089Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:30:47.199844Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894373479797091:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:47.199940Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:47.200371Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894373479797096:2436], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:47.206461Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:30:47.220518Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519894373479797098:2437], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:30:47.300347Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519894373479797149:3420] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:30:49.777674Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750861849247, txId: 281474976715672] shutting down Trying to start YDB, gRPC: 28009, MsgBus: 23266 2025-06-25T14:30:50.718416Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519894387253727924:2243];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build ... se.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:31:07.184101Z node 3 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750861864850, txId: 281474976710672] shutting down Trying to start YDB, gRPC: 17667, MsgBus: 25504 2025-06-25T14:31:07.973017Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519894462402506200:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:31:07.973122Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000dd2/r3tmp/tmpIOAAmV/pdisk_1.dat 2025-06-25T14:31:08.104325Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:31:08.104927Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7519894462402506181:2080] 1750861867972570 != 1750861867972573 2025-06-25T14:31:08.121360Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:31:08.121447Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:31:08.123278Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 17667, node 4 2025-06-25T14:31:08.172044Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:31:08.172077Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:31:08.172090Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:31:08.172239Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25504 TClient is connected to server localhost:25504 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:31:08.761912Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-25T14:31:08.776341Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:31:08.864792Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:31:09.011341Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:31:09.071752Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:31:09.144275Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:31:11.709690Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519894479582376990:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:31:11.709852Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:31:11.774695Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:31:11.832399Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:31:11.873615Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:31:11.922364Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:31:11.961183Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:31:12.002600Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:31:12.039357Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:31:12.124533Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519894483877344941:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:31:12.124621Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:31:12.124822Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519894483877344946:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:31:12.128461Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:31:12.137897Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519894483877344948:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:31:12.230129Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519894483877344999:3417] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:31:12.973507Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519894462402506200:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:31:12.973605Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:31:14.221139Z node 4 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750861873866, txId: 281474976715672] shutting down >> TPartitionTests::CorrectRange_Multiple_Transactions >> TPQTabletTests::DropTablet_And_UnplannedConfigTransaction >> TFetchRequestTests::HappyWay ------- [TM] {asan, default-linux-x86_64, release} ydb/core/load_test/ut_ycsb/unittest >> ReadLoad::ShouldReadKqpMoreThanRows [GOOD] Test command err: 2025-06-25T14:30:55.607395Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:30:55.607558Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:30:55.607635Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001a72/r3tmp/tmpX9sQSd/pdisk_1.dat 2025-06-25T14:30:55.945596Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T14:30:55.948332Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:30:55.989377Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:30:55.994519Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750861851783578 != 1750861851783582 2025-06-25T14:30:56.041838Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:30:56.042018Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:30:56.053758Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:30:56.142754Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:56.514158Z node 1 :DS_LOAD_TEST INFO: test_load_actor.cpp:346: TLoad# 0 warmups table# usertable in dir# /Root with rows# 100 2025-06-25T14:30:56.515902Z node 1 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:157: Id# {Tag: 0, parent: [1:695:2577], subTag: 1} TUpsertActor Bootstrap called: RowCount: 100 Inflight: 100 BatchSize: 100 with type# 0, target# TabletId: 72075186224037888 TableId: 2 WorkingDir: "/Root" TableName: "usertable" 2025-06-25T14:30:56.541969Z node 1 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:255: Id# {Tag: 0, parent: [1:695:2577], subTag: 1} TUpsertActor finished in 0.025717s, errors=0 2025-06-25T14:30:56.542299Z node 1 :DS_LOAD_TEST DEBUG: test_load_actor.cpp:425: TLoad# 0 created load actor of type# kReadKqpStart with tag# 2, proto# NotifyWhenFinished: true TableSetup { WorkingDir: "/Root" TableName: "usertable" } TargetShard { TabletId: 72075186224037888 TableId: 2 WorkingDir: "/Root" TableName: "usertable" } ReadKqpStart { RowCount: 100 Inflights: 10 } 2025-06-25T14:30:56.542411Z node 1 :DS_LOAD_TEST NOTICE: kqp_select.cpp:322: TKqpSelectActorMultiSession# {Tag: 0, parent: [1:695:2577], subTag: 3} Bootstrap called: RowCount: 100 Inflights: 10 2025-06-25T14:30:56.582662Z node 1 :DS_LOAD_TEST INFO: kqp_select.cpp:366: TKqpSelectActorMultiSession# {Tag: 0, parent: [1:695:2577], subTag: 3} will work with tablet# 72075186224037888 with ownerId# 72057594046644480 with tableId# 2 resolved for path# /Root/usertable with columnsCount# 11, keyColumnCount# 1 2025-06-25T14:30:56.582849Z node 1 :DS_LOAD_TEST DEBUG: kqp_select.cpp:400: TKqpSelectActorMultiSession# {Tag: 0, parent: [1:695:2577], subTag: 3} started fullscan actor# [1:707:2589] 2025-06-25T14:30:56.582962Z node 1 :DS_LOAD_TEST INFO: common.cpp:52: ReadIteratorScan# {Tag: 0, parent: [1:704:2586], subTag: 1} Bootstrap called, sample# 100 2025-06-25T14:30:56.583019Z node 1 :DS_LOAD_TEST DEBUG: common.cpp:61: ReadIteratorScan# {Tag: 0, parent: [1:704:2586], subTag: 1} Connect to# 72075186224037888 called 2025-06-25T14:30:56.583343Z node 1 :DS_LOAD_TEST DEBUG: common.cpp:75: ReadIteratorScan# {Tag: 0, parent: [1:704:2586], subTag: 1} Handle TEvClientConnected called, Status# OK 2025-06-25T14:30:56.584434Z node 1 :DS_LOAD_TEST NOTICE: common.cpp:137: ReadIteratorScan# {Tag: 0, parent: [1:704:2586], subTag: 1} finished in 0.000952s, sampled# 100, iter finished# 1, oks# 100 2025-06-25T14:30:56.584602Z node 1 :DS_LOAD_TEST INFO: kqp_select.cpp:416: TKqpSelectActorMultiSession# {Tag: 0, parent: [1:695:2577], subTag: 3} received keyCount# 100 2025-06-25T14:30:56.584827Z node 1 :DS_LOAD_TEST NOTICE: kqp_select.cpp:445: TKqpSelectActorMultiSession# {Tag: 0, parent: [1:695:2577], subTag: 3} started# 10 actors each with inflight# 1 2025-06-25T14:30:56.584907Z node 1 :DS_LOAD_TEST INFO: kqp_select.cpp:130: TKqpSelectActor# {Tag: 0, parent: [1:704:2586], subTag: 2} Bootstrap called 2025-06-25T14:30:56.584949Z node 1 :DS_LOAD_TEST DEBUG: kqp_select.cpp:142: TKqpSelectActor# {Tag: 0, parent: [1:704:2586], subTag: 2} sends event for session creation to proxy: [1:8678280833929343339:121] 2025-06-25T14:30:56.584989Z node 1 :DS_LOAD_TEST INFO: kqp_select.cpp:130: TKqpSelectActor# {Tag: 0, parent: [1:704:2586], subTag: 3} Bootstrap called 2025-06-25T14:30:56.585010Z node 1 :DS_LOAD_TEST DEBUG: kqp_select.cpp:142: TKqpSelectActor# {Tag: 0, parent: [1:704:2586], subTag: 3} sends event for session creation to proxy: [1:8678280833929343339:121] 2025-06-25T14:30:56.585044Z node 1 :DS_LOAD_TEST INFO: kqp_select.cpp:130: TKqpSelectActor# {Tag: 0, parent: [1:704:2586], subTag: 4} Bootstrap called 2025-06-25T14:30:56.585065Z node 1 :DS_LOAD_TEST DEBUG: kqp_select.cpp:142: TKqpSelectActor# {Tag: 0, parent: [1:704:2586], subTag: 4} sends event for session creation to proxy: [1:8678280833929343339:121] 2025-06-25T14:30:56.585088Z node 1 :DS_LOAD_TEST INFO: kqp_select.cpp:130: TKqpSelectActor# {Tag: 0, parent: [1:704:2586], subTag: 5} Bootstrap called 2025-06-25T14:30:56.585121Z node 1 :DS_LOAD_TEST DEBUG: kqp_select.cpp:142: TKqpSelectActor# {Tag: 0, parent: [1:704:2586], subTag: 5} sends event for session creation to proxy: [1:8678280833929343339:121] 2025-06-25T14:30:56.585150Z node 1 :DS_LOAD_TEST INFO: kqp_select.cpp:130: TKqpSelectActor# {Tag: 0, parent: [1:704:2586], subTag: 6} Bootstrap called 2025-06-25T14:30:56.585178Z node 1 :DS_LOAD_TEST DEBUG: kqp_select.cpp:142: TKqpSelectActor# {Tag: 0, parent: [1:704:2586], subTag: 6} sends event for session creation to proxy: [1:8678280833929343339:121] 2025-06-25T14:30:56.585201Z node 1 :DS_LOAD_TEST INFO: kqp_select.cpp:130: TKqpSelectActor# {Tag: 0, parent: [1:704:2586], subTag: 7} Bootstrap called 2025-06-25T14:30:56.585234Z node 1 :DS_LOAD_TEST DEBUG: kqp_select.cpp:142: TKqpSelectActor# {Tag: 0, parent: [1:704:2586], subTag: 7} sends event for session creation to proxy: [1:8678280833929343339:121] 2025-06-25T14:30:56.585259Z node 1 :DS_LOAD_TEST INFO: kqp_select.cpp:130: TKqpSelectActor# {Tag: 0, parent: [1:704:2586], subTag: 8} Bootstrap called 2025-06-25T14:30:56.585279Z node 1 :DS_LOAD_TEST DEBUG: kqp_select.cpp:142: TKqpSelectActor# {Tag: 0, parent: [1:704:2586], subTag: 8} sends event for session creation to proxy: [1:8678280833929343339:121] 2025-06-25T14:30:56.585302Z node 1 :DS_LOAD_TEST INFO: kqp_select.cpp:130: TKqpSelectActor# {Tag: 0, parent: [1:704:2586], subTag: 9} Bootstrap called 2025-06-25T14:30:56.585326Z node 1 :DS_LOAD_TEST DEBUG: kqp_select.cpp:142: TKqpSelectActor# {Tag: 0, parent: [1:704:2586], subTag: 9} sends event for session creation to proxy: [1:8678280833929343339:121] 2025-06-25T14:30:56.585351Z node 1 :DS_LOAD_TEST INFO: kqp_select.cpp:130: TKqpSelectActor# {Tag: 0, parent: [1:704:2586], subTag: 10} Bootstrap called 2025-06-25T14:30:56.585372Z node 1 :DS_LOAD_TEST DEBUG: kqp_select.cpp:142: TKqpSelectActor# {Tag: 0, parent: [1:704:2586], subTag: 10} sends event for session creation to proxy: [1:8678280833929343339:121] 2025-06-25T14:30:56.585409Z node 1 :DS_LOAD_TEST INFO: kqp_select.cpp:130: TKqpSelectActor# {Tag: 0, parent: [1:704:2586], subTag: 11} Bootstrap called 2025-06-25T14:30:56.585431Z node 1 :DS_LOAD_TEST DEBUG: kqp_select.cpp:142: TKqpSelectActor# {Tag: 0, parent: [1:704:2586], subTag: 11} sends event for session creation to proxy: [1:8678280833929343339:121] 2025-06-25T14:30:56.589894Z node 1 :DS_LOAD_TEST DEBUG: kqp_select.cpp:214: TKqpSelectActor# {Tag: 0, parent: [1:704:2586], subTag: 2} session: ydb://session/3?node_id=1&id=ZTc2OTA1NjMtNmZjZGMyMjQtNDM4YTc3OWUtYjI1NmU0NzQ= 2025-06-25T14:30:56.590086Z node 1 :DS_LOAD_TEST DEBUG: kqp_select.cpp:214: TKqpSelectActor# {Tag: 0, parent: [1:704:2586], subTag: 3} session: ydb://session/3?node_id=1&id=OGJjZDg1NTItNTQwOGRlNzMtMjQ5OTE5ZDYtNWVhN2UzMDg= 2025-06-25T14:30:56.591182Z node 1 :DS_LOAD_TEST DEBUG: kqp_select.cpp:214: TKqpSelectActor# {Tag: 0, parent: [1:704:2586], subTag: 4} session: ydb://session/3?node_id=1&id=MTdiY2RmNS02YWFhNTgwNC03MTFkMmQwZC1lOGU2YmQw 2025-06-25T14:30:56.592335Z node 1 :DS_LOAD_TEST DEBUG: kqp_select.cpp:214: TKqpSelectActor# {Tag: 0, parent: [1:704:2586], subTag: 5} session: ydb://session/3?node_id=1&id=NDRlOTAxYjYtM2Q0MmE3ODAtOTNiNjdjM2YtZmIzMzYwOGM= 2025-06-25T14:30:56.593271Z node 1 :DS_LOAD_TEST DEBUG: kqp_select.cpp:214: TKqpSelectActor# {Tag: 0, parent: [1:704:2586], subTag: 6} session: ydb://session/3?node_id=1&id=ZmVkZmFmOWQtOGUyMWYzMGYtMWRjOTdkNjgtMmM1YTFjNjQ= 2025-06-25T14:30:56.595955Z node 1 :DS_LOAD_TEST DEBUG: kqp_select.cpp:214: TKqpSelectActor# {Tag: 0, parent: [1:704:2586], subTag: 7} session: ydb://session/3?node_id=1&id=NTM1MmI4YzgtZTdhMjVlOTMtY2MxMjU0Yi1jMjA1MWEzOA== 2025-06-25T14:30:56.597402Z node 1 :DS_LOAD_TEST DEBUG: kqp_select.cpp:214: TKqpSelectActor# {Tag: 0, parent: [1:704:2586], subTag: 8} session: ydb://session/3?node_id=1&id=MTAyNGE4NGQtNGExNTk0OC1kZDNiOGYyZS1kMmFhZTU1MA== 2025-06-25T14:30:56.598910Z node 1 :DS_LOAD_TEST DEBUG: kqp_select.cpp:214: TKqpSelectActor# {Tag: 0, parent: [1:704:2586], subTag: 9} session: ydb://session/3?node_id=1&id=YTE4ZTJjZDEtZWJlZTBkYjAtZDRjZDY1MzMtYTEyYWQ4NzE= 2025-06-25T14:30:56.600268Z node 1 :DS_LOAD_TEST DEBUG: kqp_select.cpp:214: TKqpSelectActor# {Tag: 0, parent: [1:704:2586], subTag: 10} session: ydb://session/3?node_id=1&id=NGM3MDZiZGYtOTU2OGVjZjctNGRlMWU5MTQtOWE1OWQyMjU= 2025-06-25T14:30:56.601674Z node 1 :DS_LOAD_TEST DEBUG: kqp_select.cpp:214: TKqpSelectActor# {Tag: 0, parent: [1:704:2586], subTag: 11} session: ydb://session/3?node_id=1&id=Njg4YmRlNDAtOThjYWZkMjUtZmE2ZGNmNzAtZTEwZWI0NjA= 2025-06-25T14:30:56.606815Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:732:2614], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:56.606925Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:769:2645], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:56.607005Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:770:2646], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: R ... cePool, state: EPathStateCreate)" severity: 1 } 2025-06-25T14:31:08.834178Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:821:2687] txid# 281474976715667, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateCreate)" severity: 1 } 2025-06-25T14:31:08.834835Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:806:2682] txid# 281474976715662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateCreate)" severity: 1 } 2025-06-25T14:31:08.835437Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:810:2683] txid# 281474976715663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateCreate)" severity: 1 } 2025-06-25T14:31:08.891107Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:31:09.018587Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:786:2662], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-25T14:31:09.018707Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:787:2663], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-25T14:31:09.018779Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:788:2664], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-25T14:31:09.018841Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:789:2665], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-25T14:31:09.018896Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:790:2666], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-25T14:31:09.018986Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:791:2667], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-25T14:31:09.019046Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:792:2668], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-25T14:31:09.019098Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:794:2670], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-25T14:31:09.019170Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:799:2675], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-25T14:31:09.019248Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:804:2680], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-25T14:31:09.058317Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:939:2775] txid# 281474976715668, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:31:09.530366Z node 2 :DS_LOAD_TEST NOTICE: kqp_select.cpp:197: TKqpSelectActor# {Tag: 0, parent: [2:704:2586], subTag: 5} finished in 0.813875s, errors=0 2025-06-25T14:31:09.530553Z node 2 :DS_LOAD_TEST DEBUG: kqp_select.cpp:461: TKqpSelectActorMultiSession# {Tag: 0, parent: [2:695:2577], subTag: 3} finished: 5 { Tag: 5 DurationMs: 813 OperationsOK: 100 OperationsError: 0 } 2025-06-25T14:31:09.544354Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:1968:3197] txid# 281474976715769, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:31:09.900884Z node 2 :DS_LOAD_TEST NOTICE: kqp_select.cpp:197: TKqpSelectActor# {Tag: 0, parent: [2:704:2586], subTag: 9} finished in 1.132467s, errors=0 2025-06-25T14:31:09.901195Z node 2 :DS_LOAD_TEST DEBUG: kqp_select.cpp:461: TKqpSelectActorMultiSession# {Tag: 0, parent: [2:695:2577], subTag: 3} finished: 9 { Tag: 9 DurationMs: 1132 OperationsOK: 100 OperationsError: 0 } 2025-06-25T14:31:09.915682Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:2975:3603] txid# 281474976715870, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:31:10.434023Z node 2 :DS_LOAD_TEST NOTICE: kqp_select.cpp:197: TKqpSelectActor# {Tag: 0, parent: [2:704:2586], subTag: 2} finished in 1.720608s, errors=0 2025-06-25T14:31:10.434356Z node 2 :DS_LOAD_TEST DEBUG: kqp_select.cpp:461: TKqpSelectActorMultiSession# {Tag: 0, parent: [2:695:2577], subTag: 3} finished: 2 { Tag: 2 DurationMs: 1720 OperationsOK: 100 OperationsError: 0 } 2025-06-25T14:31:10.449641Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:3982:4009] txid# 281474976715971, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:31:11.064909Z node 2 :DS_LOAD_TEST NOTICE: kqp_select.cpp:197: TKqpSelectActor# {Tag: 0, parent: [2:704:2586], subTag: 4} finished in 2.349343s, errors=0 2025-06-25T14:31:11.065284Z node 2 :DS_LOAD_TEST DEBUG: kqp_select.cpp:461: TKqpSelectActorMultiSession# {Tag: 0, parent: [2:695:2577], subTag: 3} finished: 4 { Tag: 4 DurationMs: 2349 OperationsOK: 100 OperationsError: 0 } 2025-06-25T14:31:11.080763Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:4989:4415] txid# 281474976716072, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:31:11.676657Z node 2 :DS_LOAD_TEST NOTICE: kqp_select.cpp:197: TKqpSelectActor# {Tag: 0, parent: [2:704:2586], subTag: 11} finished in 2.906020s, errors=0 2025-06-25T14:31:11.677038Z node 2 :DS_LOAD_TEST DEBUG: kqp_select.cpp:461: TKqpSelectActorMultiSession# {Tag: 0, parent: [2:695:2577], subTag: 3} finished: 11 { Tag: 11 DurationMs: 2906 OperationsOK: 100 OperationsError: 0 } 2025-06-25T14:31:11.694219Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:5996:4821] txid# 281474976716173, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:31:12.362379Z node 2 :DS_LOAD_TEST NOTICE: kqp_select.cpp:197: TKqpSelectActor# {Tag: 0, parent: [2:704:2586], subTag: 7} finished in 3.643882s, errors=0 2025-06-25T14:31:12.362582Z node 2 :DS_LOAD_TEST DEBUG: kqp_select.cpp:461: TKqpSelectActorMultiSession# {Tag: 0, parent: [2:695:2577], subTag: 3} finished: 7 { Tag: 7 DurationMs: 3643 OperationsOK: 100 OperationsError: 0 } 2025-06-25T14:31:12.378523Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7003:5227] txid# 281474976716274, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:31:12.934003Z node 2 :DS_LOAD_TEST NOTICE: kqp_select.cpp:197: TKqpSelectActor# {Tag: 0, parent: [2:704:2586], subTag: 8} finished in 4.214123s, errors=0 2025-06-25T14:31:12.934290Z node 2 :DS_LOAD_TEST DEBUG: kqp_select.cpp:461: TKqpSelectActorMultiSession# {Tag: 0, parent: [2:695:2577], subTag: 3} finished: 8 { Tag: 8 DurationMs: 4214 OperationsOK: 100 OperationsError: 0 } 2025-06-25T14:31:12.951343Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:8010:5633] txid# 281474976716375, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:31:13.614660Z node 2 :DS_LOAD_TEST NOTICE: kqp_select.cpp:197: TKqpSelectActor# {Tag: 0, parent: [2:704:2586], subTag: 6} finished in 4.897170s, errors=0 2025-06-25T14:31:13.615155Z node 2 :DS_LOAD_TEST DEBUG: kqp_select.cpp:461: TKqpSelectActorMultiSession# {Tag: 0, parent: [2:695:2577], subTag: 3} finished: 6 { Tag: 6 DurationMs: 4897 OperationsOK: 100 OperationsError: 0 } 2025-06-25T14:31:13.633266Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:9017:6039] txid# 281474976716476, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:31:14.354969Z node 2 :DS_LOAD_TEST NOTICE: kqp_select.cpp:197: TKqpSelectActor# {Tag: 0, parent: [2:704:2586], subTag: 10} finished in 5.586318s, errors=0 2025-06-25T14:31:14.355348Z node 2 :DS_LOAD_TEST DEBUG: kqp_select.cpp:461: TKqpSelectActorMultiSession# {Tag: 0, parent: [2:695:2577], subTag: 3} finished: 10 { Tag: 10 DurationMs: 5586 OperationsOK: 100 OperationsError: 0 } 2025-06-25T14:31:14.374419Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:10024:6445] txid# 281474976716577, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:31:15.180098Z node 2 :DS_LOAD_TEST NOTICE: kqp_select.cpp:197: TKqpSelectActor# {Tag: 0, parent: [2:704:2586], subTag: 3} finished in 6.465525s, errors=0 2025-06-25T14:31:15.180574Z node 2 :DS_LOAD_TEST DEBUG: kqp_select.cpp:461: TKqpSelectActorMultiSession# {Tag: 0, parent: [2:695:2577], subTag: 3} finished: 3 { Tag: 3 DurationMs: 6465 OperationsOK: 100 OperationsError: 0 } 2025-06-25T14:31:15.180654Z node 2 :DS_LOAD_TEST NOTICE: kqp_select.cpp:480: TKqpSelectActorMultiSession# {Tag: 0, parent: [2:695:2577], subTag: 3} finished in 6.469123s, oks# 1000, errors# 0 2025-06-25T14:31:15.180986Z node 2 :DS_LOAD_TEST INFO: test_load_actor.cpp:447: TLoad# 0 received finished from actor# [2:704:2586] with tag# 3 |77.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/replication/service/ut_table_writer/ydb-core-tx-replication-service-ut_table_writer >> TPartitionTests::GetPartitionWriteInfoError [GOOD] >> TPQTabletTests::DropTablet_And_UnplannedConfigTransaction [GOOD] |77.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/replication/service/ut_table_writer/ydb-core-tx-replication-service-ut_table_writer >> TPartitionTests::NonConflictingCommitsBatch >> TPartitionTests::CorrectRange_Multiple_Transactions [GOOD] >> TPQTabletTests::Huge_ProposeTransacton >> TOlap::StoreStatsQuota [GOOD] >> TPartitionTests::CorrectRange_Rollback >> TPartitionTests::ConflictingTxProceedAfterRollback [GOOD] >> TPartitionTests::ConflictingSrcIdTxAndWritesDifferentBatches |77.6%| [TA] {RESULT} $(B)/ydb/core/tx/scheme_board/ut_subscriber/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpScanArrowInChanels::AggregateEmptySum [GOOD] >> CacheEviction::DeleteKeys [GOOD] >> PQCountersLabeled::Partition >> TPartitionTests::CorrectRange_Rollback [GOOD] >> Cdc::InitialScanComplete [GOOD] >> Cdc::InitialScanEnqueuesZeroRecords >> TPartitionTests::DataTxCalcPredicateOk >> TPQTest::TestAccountReadQuota >> TPQTest::TestUserInfoCompatibility [GOOD] >> TPQTest::TestMessageNo >> TPQTest::TestWaitInOwners |77.6%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_backup/test-results/unittest/{meta.json ... results_accumulator.log} |77.6%| [LD] {RESULT} $(B)/ydb/core/tx/replication/service/ut_table_writer/ydb-core-tx-replication-service-ut_table_writer ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/arrow/unittest >> KqpScanArrowInChanels::AggregateEmptySum [GOOD] Test command err: Trying to start YDB, gRPC: 6147, MsgBus: 16940 2025-06-25T14:30:43.816361Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519894359353804980:2234];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:30:43.816514Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000da7/r3tmp/tmpoVmdBk/pdisk_1.dat 2025-06-25T14:30:44.723115Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:30:44.723240Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:30:44.797651Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:30:44.831539Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519894359353804773:2080] 1750861843729803 != 1750861843729806 2025-06-25T14:30:44.877872Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:30:44.878499Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:30:44.915363Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; TServer::EnableGrpc on GrpcPort 6147, node 1 2025-06-25T14:30:45.076818Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:30:45.076840Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:30:45.076847Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:30:45.076953Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16940 TClient is connected to server localhost:16940 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:30:46.007319Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:30:46.091262Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:30:46.239474Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:30:46.411231Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:30:46.494308Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:30:48.213108Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894380828642896:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:48.213209Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:48.511879Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:48.613385Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:48.651404Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:48.694362Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:48.749586Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:48.812697Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519894359353804980:2234];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:30:48.815154Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:30:48.825060Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:48.903629Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:49.044769Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894385123610851:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:49.044876Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:49.045161Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894385123610856:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:49.049816Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:30:49.085080Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519894385123610858:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:30:49.153258Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519894385123610911:3428] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:30:54.373526Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750861851648, txId: 281474976710672] shutting down Trying to start YDB, gRPC: 29796, MsgBus: 1561 2025-06-25T14:30:55.276612Z node 2 ... base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:31:10.436932Z node 3 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750861870065, txId: 281474976715672] shutting down Trying to start YDB, gRPC: 4967, MsgBus: 19577 2025-06-25T14:31:11.155185Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519894477286326089:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:31:11.155271Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000da7/r3tmp/tmp72Ymw3/pdisk_1.dat 2025-06-25T14:31:11.297050Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:31:11.298393Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7519894477286326068:2080] 1750861871154418 != 1750861871154421 2025-06-25T14:31:11.316131Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:31:11.316223Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:31:11.318638Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 4967, node 4 2025-06-25T14:31:11.372512Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:31:11.372536Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:31:11.372549Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:31:11.372697Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19577 TClient is connected to server localhost:19577 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:31:11.879040Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:31:11.899208Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:31:11.964935Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:31:12.115290Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:31:12.167553Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:31:12.203424Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:31:14.633705Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519894490171229584:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:31:14.633795Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:31:14.702168Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:31:14.739220Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:31:14.811038Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:31:14.843365Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:31:14.877069Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:31:14.913357Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:31:14.947236Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:31:15.001752Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519894494466197537:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:31:15.001842Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:31:15.001928Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519894494466197542:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:31:15.005225Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:31:15.014385Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519894494466197544:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:31:15.111165Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519894494466197595:3416] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:31:16.156581Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519894477286326089:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:31:16.157405Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:31:17.114732Z node 4 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750861876743, txId: 281474976715672] shutting down >> PQCountersLabeled::Partition [GOOD] >> PQCountersLabeled::PartitionFirstClass |77.7%| [TA] $(B)/ydb/core/kqp/ut/arrow/test-results/unittest/{meta.json ... results_accumulator.log} >> TPartitionTests::NonConflictingCommitsBatch [GOOD] >> TPQTest::TestMessageNo [GOOD] >> TPQTest::TestPQPartialRead >> DataShardSnapshots::ShardRestartAfterDropTable [GOOD] >> DataShardSnapshots::ShardRestartAfterDropTableAndAbort >> TPartitionTests::GetUsedStorage >> test_select.py::TestDML::test_select[table_index_4__SYNC-pk_types5-all_types5-index5---SYNC] [GOOD] |77.7%| [TA] {RESULT} $(B)/ydb/core/kqp/ut/arrow/test-results/unittest/{meta.json ... results_accumulator.log} >> TPartitionTests::GetUsedStorage [GOOD] >> TSourceIdTests::SourceIdStorageParseAndAdd [GOOD] >> TSourceIdTests::SourceIdStorageMinDS [GOOD] >> TSourceIdTests::SourceIdStorageTestClean >> TSourceIdTests::SourceIdStorageTestClean [GOOD] >> TSourceIdTests::SourceIdStorageDeleteByMaxCount [GOOD] >> TSourceIdTests::SourceIdStorageDeleteAndOwnersMark [GOOD] >> TopicAutoscaling::PartitionSplit_AutoscaleAwareSDK [GOOD] >> TopicAutoscaling::PartitionMerge_PreferedPartition_PQv1 >> TPartitionTests::ConflictingSrcIdTxAndWritesDifferentBatches [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_olap/unittest >> TOlap::StoreStatsQuota [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:28:07.124899Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:28:07.124995Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:28:07.125033Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:28:07.125070Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:28:07.125111Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:28:07.125141Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:28:07.125221Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:28:07.125289Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:28:07.126092Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:28:07.126401Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:28:07.196458Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:28:07.196540Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:28:07.214739Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:28:07.215174Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:28:07.215340Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:28:07.223525Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:28:07.223940Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:28:07.224672Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:28:07.225003Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:28:07.230847Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:28:07.231089Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:28:07.232382Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:28:07.232456Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:28:07.232632Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:28:07.232683Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:28:07.232733Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:28:07.232847Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:28:07.240140Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:28:07.379633Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:28:07.379881Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:07.380078Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:28:07.380122Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:28:07.380400Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:28:07.380535Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:28:07.385356Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:28:07.385564Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:28:07.385791Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:07.385853Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:28:07.385899Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:28:07.385959Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:28:07.387930Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:07.387996Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:28:07.388044Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:28:07.389958Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:07.390006Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:07.390063Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:28:07.390119Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:28:07.393495Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:28:07.395784Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:28:07.395978Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:28:07.396952Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:28:07.397092Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:28:07.397139Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:28:07.397429Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:28:07.397482Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:28:07.397659Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:28:07.397736Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:28:07.399952Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:28:07.400010Z node 1 :FLAT_TX_SCHEMESHARD ... tablet_id=72075186233409546;self_id=[2:463:2429];ev=NActors::TEvents::TEvWakeup;fline=columnshard.cpp:250;event=TEvPrivate::TEvPeriodicWakeup::MANUAL;tablet_id=72075186233409546; 2025-06-25T14:31:15.534938Z node 2 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186233409546;self_id=[2:463:2429];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;fline=columnshard.cpp:239;event=TEvPrivate::TEvPeriodicWakeup;tablet_id=72075186233409546; 2025-06-25T14:31:15.535024Z node 2 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Send periodic stats. 2025-06-25T14:31:15.535079Z node 2 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186233409546;self_id=[2:463:2429];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=72075186233409546;fline=columnshard_impl.cpp:442;event=EnqueueBackgroundActivities;periodic=0; 2025-06-25T14:31:15.535177Z node 2 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186233409546;self_id=[2:463:2429];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=72075186233409546;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=0; 2025-06-25T14:31:15.535250Z node 2 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186233409546;self_id=[2:463:2429];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=72075186233409546;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=0;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-25T14:31:15.535319Z node 2 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186233409546;self_id=[2:463:2429];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=72075186233409546;fline=columnshard_impl.cpp:791;background=cleanup;skip_reason=no_changes; 2025-06-25T14:31:15.535373Z node 2 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186233409546;self_id=[2:463:2429];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=72075186233409546;fline=columnshard_impl.cpp:820;background=cleanup;skip_reason=no_changes; 2025-06-25T14:31:15.535470Z node 2 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186233409546;self_id=[2:463:2429];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=72075186233409546;fline=columnshard_impl.cpp:749;background=ttl;skip_reason=no_changes; 2025-06-25T14:31:15.795667Z node 2 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186233409546;parent=[2:463:2429];fline=actor.cpp:33;event=skip_flush_writing; 2025-06-25T14:31:16.047931Z node 2 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186233409546;parent=[2:463:2429];fline=actor.cpp:33;event=skip_flush_writing; 2025-06-25T14:31:16.059457Z node 2 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186233409546;self_id=[2:463:2429];ev=NActors::TEvents::TEvWakeup;fline=columnshard.cpp:250;event=TEvPrivate::TEvPeriodicWakeup::MANUAL;tablet_id=72075186233409546; 2025-06-25T14:31:16.069919Z node 2 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186233409546;self_id=[2:463:2429];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;fline=columnshard.cpp:239;event=TEvPrivate::TEvPeriodicWakeup;tablet_id=72075186233409546; 2025-06-25T14:31:16.070008Z node 2 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Send periodic stats. 2025-06-25T14:31:16.070088Z node 2 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186233409546;self_id=[2:463:2429];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=72075186233409546;fline=columnshard_impl.cpp:442;event=EnqueueBackgroundActivities;periodic=0; 2025-06-25T14:31:16.070199Z node 2 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186233409546;self_id=[2:463:2429];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=72075186233409546;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=0; 2025-06-25T14:31:16.070277Z node 2 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186233409546;self_id=[2:463:2429];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=72075186233409546;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=0;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-25T14:31:16.070336Z node 2 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186233409546;self_id=[2:463:2429];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=72075186233409546;fline=columnshard_impl.cpp:791;background=cleanup;skip_reason=no_changes; 2025-06-25T14:31:16.070380Z node 2 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186233409546;self_id=[2:463:2429];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=72075186233409546;fline=columnshard_impl.cpp:820;background=cleanup;skip_reason=no_changes; 2025-06-25T14:31:16.070483Z node 2 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186233409546;self_id=[2:463:2429];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=72075186233409546;fline=columnshard_impl.cpp:749;background=ttl;skip_reason=no_changes; 2025-06-25T14:31:16.333248Z node 2 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186233409546;parent=[2:463:2429];fline=actor.cpp:33;event=skip_flush_writing; 2025-06-25T14:31:16.591657Z node 2 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186233409546;parent=[2:463:2429];fline=actor.cpp:33;event=skip_flush_writing; 2025-06-25T14:31:16.603343Z node 2 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186233409546;self_id=[2:463:2429];ev=NActors::TEvents::TEvWakeup;fline=columnshard.cpp:250;event=TEvPrivate::TEvPeriodicWakeup::MANUAL;tablet_id=72075186233409546; 2025-06-25T14:31:16.614029Z node 2 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186233409546;self_id=[2:463:2429];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;fline=columnshard.cpp:239;event=TEvPrivate::TEvPeriodicWakeup;tablet_id=72075186233409546; 2025-06-25T14:31:16.614102Z node 2 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Send periodic stats. 2025-06-25T14:31:16.614201Z node 2 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: There are stats for 1 tables 2025-06-25T14:31:16.614325Z node 2 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186233409546;self_id=[2:463:2429];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=72075186233409546;fline=columnshard_impl.cpp:442;event=EnqueueBackgroundActivities;periodic=0; 2025-06-25T14:31:16.614414Z node 2 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186233409546;self_id=[2:463:2429];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=72075186233409546;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=0; 2025-06-25T14:31:16.614489Z node 2 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186233409546;self_id=[2:463:2429];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=72075186233409546;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=0;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-25T14:31:16.614554Z node 2 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186233409546;self_id=[2:463:2429];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=72075186233409546;fline=columnshard_impl.cpp:791;background=cleanup;skip_reason=no_changes; 2025-06-25T14:31:16.614598Z node 2 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186233409546;self_id=[2:463:2429];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=72075186233409546;fline=columnshard_impl.cpp:820;background=cleanup;skip_reason=no_changes; 2025-06-25T14:31:16.614691Z node 2 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186233409546;self_id=[2:463:2429];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=72075186233409546;fline=columnshard_impl.cpp:749;background=ttl;skip_reason=no_changes; 2025-06-25T14:31:16.614902Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046678944 from shard 72075186233409546 followerId 0 pathId [OwnerId: 72057594046678944, LocalPathId: 4] state 'Ready' dataSize 424 rowCount 1 cpuUsage 2.1408 2025-06-25T14:31:16.639630Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 4 shard idx 72057594046678944:1 data size 424 row count 1 2025-06-25T14:31:16.639832Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186233409546 maps to shardIdx: 72057594046678944:1 followerId=0, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], pathId map=OlapStore, is column=0, is olap=1, RowCount 1, DataSize 424 2025-06-25T14:31:16.639945Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:353: OLAP store contains 1 tables. 2025-06-25T14:31:16.640003Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:188: PrepareStats: SchemeShard has no info on DataShard 72075186233409546 channel 2 binding 2025-06-25T14:31:16.640084Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:397: Aggregated stats for pathId 4: RowCount 1, DataSize 424 2025-06-25T14:31:16.640446Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/SomeDatabase" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:31:16.640691Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/SomeDatabase" took 286us result status StatusSuccess 2025-06-25T14:31:16.641221Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/SomeDatabase" PathDescription { Self { Name: "SomeDatabase" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 8 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 8 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 4 SubDomainVersion: 1 SubDomainStateVersion: 2 SecurityStateVersion: 0 } } Children { Name: "OlapStore" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeColumnStore CreateFinished: true CreateTxId: 104 CreateStep: 1750861106684 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" ChildrenExist: true } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 0 TimeCastBucketsPerMediator: 0 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 2 PathsLimit: 10000 ShardsInside: 1 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 424 DataSize: 424 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } StoragePoolsUsage { PoolKind: "pool-kind-1" TotalSize: 424 DataSize: 424 IndexSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 DatabaseQuotas { data_size_hard_quota: 1000000 data_size_soft_quota: 900000 } SecurityState { } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 |77.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/unittest >> TSourceIdTests::SourceIdStorageDeleteAndOwnersMark [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/unittest >> TPartitionTests::GetUsedStorage [GOOD] Test command err: 2025-06-25T14:31:13.916615Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:31:13.916733Z node 1 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-25T14:31:13.931910Z node 1 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 3, State: StateInit] bootstrapping 3 [1:181:2194] 2025-06-25T14:31:13.933364Z node 1 :PERSQUEUE INFO: partition_init.cpp:911: [Root/PQ/rt3.dc1--account--topic:3:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp from keys completed. Value 2025-06-25T14:31:13.000000Z 2025-06-25T14:31:13.933422Z node 1 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 3, State: StateInit] init complete for topic 'Root/PQ/rt3.dc1--account--topic' partition 3 generation 0 [1:181:2194] Got cmd write: CmdWrite { Key: "i0000000003" Value: "\030\000(\350\376\377\273\3722" StorageChannel: INLINE } CmdWrite { Key: "m0000000003cclient" Value: "\010\000\020\001\030\001\"\007session(\0000\001@\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000003uclient" Value: "\000\000\000\000\000\000\000\000\001\000\000\000\001\000\000\000session" StorageChannel: INLINE } Got cmd write: CmdWrite { Key: "i0000000003" Value: "\030\000(\350\376\377\273\3722" StorageChannel: INLINE } CmdWrite { Key: "I0000000003" Value: "\010\271`\020\262\222\004" StorageChannel: INLINE } Got cmd write: CmdWrite { Key: "i0000000003" Value: "\030\000(\350\376\377\273\3722" StorageChannel: INLINE } CmdWrite { Key: "I0000000003" Value: "\010\271`\020\263\222\004" StorageChannel: INLINE } 2025-06-25T14:31:14.712240Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:31:14.712333Z node 2 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-25T14:31:14.727224Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:{2, {0, 10}, 100001}:Initializer] Start initializing step TInitConfigStep 2025-06-25T14:31:14.727433Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:{2, {0, 10}, 100001}:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-25T14:31:14.727695Z node 2 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: {2, {0, 10}, 100001}, State: StateInit] bootstrapping {2, {0, 10}, 100001} [2:182:2195] 2025-06-25T14:31:14.728526Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:55: [Root/PQ/rt3.dc1--account--topic:{2, {0, 10}, 100001}:Initializer] Initializing completed. 2025-06-25T14:31:14.728585Z node 2 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: {2, {0, 10}, 100001}, State: StateInit] init complete for topic 'Root/PQ/rt3.dc1--account--topic' partition {2, {0, 10}, 100001} generation 0 [2:182:2195] 2025-06-25T14:31:14.728630Z node 2 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037927937, Partition: {2, {0, 10}, 100001}, State: StateInit] SYNC INIT topic Root/PQ/rt3.dc1--account--topic partitition {2, {0, 10}, 100001} so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-25T14:31:14.728668Z node 2 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037927937, Partition: {2, {0, 10}, 100001}, State: StateIdle] Process pending events. Count 0 2025-06-25T14:31:14.728994Z node 2 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie owner1|68396563-e9e6747-dac12fe3-e3cef8d3_0 generated for partition {2, {0, 10}, 100001} topic 'Root/PQ/rt3.dc1--account--topic' owner owner1 2025-06-25T14:31:14.729136Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:34: [PQ: 72057594037927937, Partition: {2, {0, 10}, 100001}, State: StateIdle] TPartition::ReplyOwnerOk. Partition: {2, {0, 10}, 100001} 2025-06-25T14:31:14.729300Z node 2 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: {2, {0, 10}, 100001}, State: StateIdle] no data for compaction 2025-06-25T14:31:14.729610Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:1364: [PQ: 72057594037927937, Partition: {2, {0, 10}, 100001}, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition {2, {0, 10}, 100001} part blob processing sourceId 'SourceId' seqNo 2 partNo 0 2025-06-25T14:31:14.730437Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:1468: [PQ: 72057594037927937, Partition: {2, {0, 10}, 100001}, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition {2, {0, 10}, 100001} part blob complete sourceId 'SourceId' seqNo 2 partNo 0 FormedBlobsCount 0 NewHead: Offset 100 PartNo 0 PackedSize 118 count 1 nextOffset 101 batches 1 2025-06-25T14:31:14.730984Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:1762: [PQ: 72057594037927937, Partition: {2, {0, 10}, 100001}, State: StateIdle] Add new write blob: topic 'Root/PQ/rt3.dc1--account--topic' partition {2, {0, 10}, 100001} compactOffset 100,1 HeadOffset 0 endOffset 0 curOffset 101 D0000100001_00000000000000000100_00000_0000000001_00000? size 104 WTime 128 2025-06-25T14:31:14.784618Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72057594037927937, Partition: {2, {0, 10}, 100001}, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 22 WriteNewSizeFromSupportivePartitions# 0 2025-06-25T14:31:14.784760Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72057594037927937, Partition: {2, {0, 10}, 100001}, State: StateIdle] TPartition::ReplyWrite. Partition: {2, {0, 10}, 100001} 2025-06-25T14:31:14.784873Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72057594037927937, Partition: {2, {0, 10}, 100001}, State: StateIdle] Answering for message sourceid: 'SourceId', Topic: 'Root/PQ/rt3.dc1--account--topic', Partition: {2, {0, 10}, 100001}, SeqNo: 2, partNo: 0, Offset: 100 is stored on disk 2025-06-25T14:31:14.785125Z node 2 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72057594037927937, Partition: {2, {0, 10}, 100001}, State: StateIdle] need more data for compaction. cumulativeSize=104, count=1, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-25T14:31:15.068561Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:1364: [PQ: 72057594037927937, Partition: {2, {0, 10}, 100001}, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition {2, {0, 10}, 100001} part blob processing sourceId 'SourceId' seqNo 4 partNo 0 2025-06-25T14:31:15.069513Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:1468: [PQ: 72057594037927937, Partition: {2, {0, 10}, 100001}, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition {2, {0, 10}, 100001} part blob complete sourceId 'SourceId' seqNo 4 partNo 0 FormedBlobsCount 0 NewHead: Offset 101 PartNo 0 PackedSize 118 count 1 nextOffset 102 batches 1 2025-06-25T14:31:15.069973Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:1762: [PQ: 72057594037927937, Partition: {2, {0, 10}, 100001}, State: StateIdle] Add new write blob: topic 'Root/PQ/rt3.dc1--account--topic' partition {2, {0, 10}, 100001} compactOffset 101,1 HeadOffset 101 endOffset 101 curOffset 102 D0000100001_00000000000000000101_00000_0000000001_00000? size 104 WTime 1129 2025-06-25T14:31:15.111766Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72057594037927937, Partition: {2, {0, 10}, 100001}, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 22 WriteNewSizeFromSupportivePartitions# 0 2025-06-25T14:31:15.111895Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72057594037927937, Partition: {2, {0, 10}, 100001}, State: StateIdle] TPartition::ReplyWrite. Partition: {2, {0, 10}, 100001} 2025-06-25T14:31:15.111973Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72057594037927937, Partition: {2, {0, 10}, 100001}, State: StateIdle] Answering for message sourceid: 'SourceId', Topic: 'Root/PQ/rt3.dc1--account--topic', Partition: {2, {0, 10}, 100001}, SeqNo: 4, partNo: 0, Offset: 101 is stored on disk 2025-06-25T14:31:15.112183Z node 2 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72057594037927937, Partition: {2, {0, 10}, 100001}, State: StateIdle] need more data for compaction. cumulativeSize=208, count=2, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-25T14:31:15.332999Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:1364: [PQ: 72057594037927937, Partition: {2, {0, 10}, 100001}, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition {2, {0, 10}, 100001} part blob processing sourceId 'SourceId' seqNo 6 partNo 0 2025-06-25T14:31:15.335796Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:1468: [PQ: 72057594037927937, Partition: {2, {0, 10}, 100001}, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition {2, {0, 10}, 100001} part blob complete sourceId 'SourceId' seqNo 6 partNo 0 FormedBlobsCount 0 NewHead: Offset 102 PartNo 0 PackedSize 118 count 1 nextOffset 103 batches 1 2025-06-25T14:31:15.336253Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:1762: [PQ: 72057594037927937, Partition: {2, {0, 10}, 100001}, State: StateIdle] Add new write blob: topic 'Root/PQ/rt3.dc1--account--topic' partition {2, {0, 10}, 100001} compactOffset 102,1 HeadOffset 102 endOffset 102 curOffset 103 D0000100001_00000000000000000102_00000_0000000001_00000? size 104 WTime 2130 2025-06-25T14:31:15.371055Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72057594037927937, Partition: {2, {0, 10}, 100001}, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 22 WriteNewSizeFromSupportivePartitions# 0 2025-06-25T14:31:15.371174Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72057594037927937, Partition: {2, {0, 10}, 100001}, State: StateIdle] TPartition::ReplyWrite. Partition: {2, {0, 10}, 100001} 2025-06-25T14:31:15.371253Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72057594037927937, Partition: {2, {0, 10}, 100001}, State: StateIdle] Answering for message sourceid: 'SourceId', Topic: 'Root/PQ/rt3.dc1--account--topic', Partition: {2, {0, 10}, 100001}, SeqNo: 6, partNo: 0, Offset: 102 is stored on disk 2025-06-25T14:31:15.371429Z node 2 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72057594037927937, Partition: {2, {0, 10}, 100001}, State: StateIdle] need more data for compaction. cumulativeSize=312, count=3, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-25T14:31:15.614605Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:1364: [PQ: 72057594037927937, Partition: {2, {0, 10}, 100001}, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition {2, {0, 10}, 100001} part blob processing sourceId 'SourceId' seqNo 7 partNo 0 2025-06-25T14:31:15.615497Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:1468: [PQ: 72057594037927937, Partition: {2, {0, 10}, 100001}, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition {2, {0, 10}, 100001} part blob complete sourceId 'SourceId' seqNo 7 partNo 0 FormedBlobsCount 0 NewHead: Offset 110 PartNo 0 PackedSize 118 count 1 nextOffset 111 batches 1 2025-06-25T14:31:15.616050Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:1762: [PQ: 72057594037927937, Partition: {2, {0, 10}, 100001}, State: StateIdle] Add new write blob: topic 'Root/PQ/rt3.dc1--account--topic' partition {2, {0, 10}, 100001} compactOffset 110,1 HeadOffset 103 endOffset 103 curOffset 111 D0000100001_00000000000000000110_00000_0000000001_00000? size 104 WTime 3231 2025-06-25T14:31:15.647390Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72057594037927937, Partition: {2, {0, 10}, 100001}, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 22 WriteNewSizeFromSupportivePartitions# 0 2025-06-25T14:31:15.647537Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72057594037927937, Partition: {2, {0, 10}, 100001}, State: StateIdle] TPartition::ReplyWrite. Partition: {2, {0, 10}, 100001} 2025-06-25T14:31:15.647644Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72057594037927937, Partition: {2, {0, 10} ... _00000_0000000001_00000?, size: 0 Body key last D0000100001_00000000000000000110_00000_0000000001_00000?, size: 312 2025-06-25T14:31:16.319872Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:31:16.319953Z node 3 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-25T14:31:16.335420Z node 3 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: {2, {0, 10}, 100001}, State: StateInit] bootstrapping {2, {0, 10}, 100001} [3:183:2196] 2025-06-25T14:31:16.337306Z node 3 :PERSQUEUE INFO: partition_init.cpp:911: [Root/PQ/rt3.dc1--account--topic:{2, {0, 10}, 100001}:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp from keys completed. Value 2025-06-25T14:31:16.000000Z 2025-06-25T14:31:16.337383Z node 3 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: {2, {0, 10}, 100001}, State: StateInit] init complete for topic 'Root/PQ/rt3.dc1--account--topic' partition {2, {0, 10}, 100001} generation 0 [3:183:2196] 2025-06-25T14:31:16.689852Z node 3 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie owner1|fb0b936c-66739e42-b34b8efd-1e83e681_0 generated for partition {2, {0, 10}, 100001} topic 'Root/PQ/rt3.dc1--account--topic' owner owner1 Wait write info error(2) 2025-06-25T14:31:17.163499Z node 4 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:31:17.163572Z node 4 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-25T14:31:17.179588Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitConfigStep Got KV request 2025-06-25T14:31:17.179858Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-25T14:31:17.180110Z node 4 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [4:182:2195] 2025-06-25T14:31:17.181001Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitDiskStatusStep Got KV request 2025-06-25T14:31:17.181160Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitMetaStep Got KV request 2025-06-25T14:31:17.181305Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitInfoRangeStep Got KV request 2025-06-25T14:31:17.182206Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitDataRangeStep Got KV request 2025-06-25T14:31:17.182469Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:524: key[0]: d0000000000_00000000000000000000_00000_0000000050_00000 2025-06-25T14:31:17.182633Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:624: [Root/PQ/rt3.dc1--account--topic:0:TInitDataRangeStep] Got data offset 0 count 50 size 684 so 0 eo 50 d0000000000_00000000000000000000_00000_0000000050_00000 2025-06-25T14:31:17.182750Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitDataStep 2025-06-25T14:31:17.182793Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitEndWriteTimestampStep 2025-06-25T14:31:17.182836Z node 4 :PERSQUEUE INFO: partition_init.cpp:911: [Root/PQ/rt3.dc1--account--topic:0:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp from keys completed. Value 2025-06-25T14:31:17.000000Z 2025-06-25T14:31:17.182875Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:55: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Initializing completed. 2025-06-25T14:31:17.182918Z node 4 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'Root/PQ/rt3.dc1--account--topic' partition 0 generation 0 [4:182:2195] 2025-06-25T14:31:17.182966Z node 4 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037927937, Partition: 0, State: StateInit] SYNC INIT topic Root/PQ/rt3.dc1--account--topic partitition 0 so 50 endOffset 50 Head Offset 50 PartNo 0 PackedSize 0 count 0 nextOffset 50 batches 0 2025-06-25T14:31:17.183020Z node 4 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Process pending events. Count 0 2025-06-25T14:31:17.183094Z node 4 :PERSQUEUE DEBUG: partition_read.cpp:882: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 user client-2 readTimeStamp for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 0 2025-06-25T14:31:17.183139Z node 4 :PERSQUEUE DEBUG: partition_read.cpp:924: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 user client-2 send read request for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 1 rrg 0 2025-06-25T14:31:17.183182Z node 4 :PERSQUEUE DEBUG: partition_read.cpp:882: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 user client-1 readTimeStamp for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 1 rrg 0 2025-06-25T14:31:17.183212Z node 4 :PERSQUEUE DEBUG: partition_read.cpp:882: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 user client-0 readTimeStamp for offset 0 initiated queuesize 1 startOffset 0 ReadingTimestamp 1 rrg 0 2025-06-25T14:31:17.183409Z node 4 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-25T14:31:17.183527Z node 4 :PERSQUEUE DEBUG: partition_read.cpp:839: [PQ: 72057594037927937, Partition: 0, State: StateIdle] read cookie 2 Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 user client-2 offset 0 count 1 size 1024000 endOffset 50 max time lag 0ms effective offset 0 2025-06-25T14:31:17.183722Z node 4 :PERSQUEUE DEBUG: partition_read.cpp:1043: [PQ: 72057594037927937, Partition: 0, State: StateIdle] read cookie 2 added 1 blobs, size 684 count 50 last offset 0, current partition end offset: 50 2025-06-25T14:31:17.183766Z node 4 :PERSQUEUE DEBUG: partition_read.cpp:1069: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Reading cookie 2. Send blob request. 2025-06-25T14:31:17.552255Z node 4 :PERSQUEUE DEBUG: partition.cpp:3346: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 user client-0 session is set to 0 (startOffset 50) session session-client-0 Got KV request Got batch complete: 1 Got KV request Got KV request Got cmd write: CmdWrite { Key: "i0000000000" Value: "\030\000(\210\236\200\274\3722" StorageChannel: INLINE } CmdWrite { Key: "m0000000000cclient-0" Value: "\010\000\020\001\030\001\"\020session-client-0(\0000\000@\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000000uclient-0" Value: "\000\000\000\000\000\000\000\000\001\000\000\000\001\000\000\000session-client-0" StorageChannel: INLINE } 2025-06-25T14:31:17.584470Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-06-25T14:31:17.584583Z node 4 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-25T14:31:18.579736Z node 4 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction Create distr tx with id = 0 and act no: 1 Created Tx with id 3 as act# 3 Created Tx with id 4 as act# 4 2025-06-25T14:31:18.580079Z node 4 :PERSQUEUE DEBUG: partition.cpp:1170: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCalcPredicate Step 1, TxId 0 2025-06-25T14:31:18.580216Z node 4 :PERSQUEUE DEBUG: partition.cpp:1170: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCalcPredicate Step 1, TxId 3 2025-06-25T14:31:18.580251Z node 4 :PERSQUEUE DEBUG: partition.cpp:1170: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCalcPredicate Step 1, TxId 4 2025-06-25T14:31:19.961965Z node 4 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-25T14:31:19.962101Z node 4 :PERSQUEUE DEBUG: partition.cpp:1401: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvGetWriteInfoResponse Got batch complete: 6 Wait batch completion Wait kv request 2025-06-25T14:31:19.962362Z node 4 :PERSQUEUE DEBUG: partition.cpp:3346: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 user client-0 offset is set to 5 (startOffset 50) session session-client-0 2025-06-25T14:31:19.962414Z node 4 :PERSQUEUE DEBUG: partition.cpp:1216: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCommit Step 1, TxId 3 2025-06-25T14:31:19.962448Z node 4 :PERSQUEUE DEBUG: partition.cpp:2502: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::CommitWriteOperations TxId: 3 2025-06-25T14:31:19.962477Z node 4 :PERSQUEUE DEBUG: partition.cpp:1216: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCommit Step 1, TxId 4 2025-06-25T14:31:19.962493Z node 4 :PERSQUEUE DEBUG: partition.cpp:2502: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::CommitWriteOperations TxId: 4 2025-06-25T14:31:19.962522Z node 4 :PERSQUEUE DEBUG: partition.cpp:3346: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 user client-0 offset is set to 10 (startOffset 50) session session-client-0 2025-06-25T14:31:19.976710Z node 4 :PERSQUEUE DEBUG: partition.cpp:3403: [PQ: 72057594037927937, Partition: 0, State: StateIdle] schedule TEvPersQueue::TEvProposeTransactionResult(ABORTED), reason=incorrect offset range (gap) Got KV request Wait tx committed for tx 3 2025-06-25T14:31:19.997659Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-06-25T14:31:19.997729Z node 4 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction Wait tx committed for tx 4 Wait immediate tx complete 6 Got propose resutl: Origin: 72057594037927937 Status: ABORTED TxId: 6 Errors { Kind: BAD_REQUEST Reason: "incorrect offset range (gap)" } 2025-06-25T14:31:20.405929Z node 5 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:31:20.406021Z node 5 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-25T14:31:20.423716Z node 5 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: {2, {0, 10}, 100001}, State: StateInit] bootstrapping {2, {0, 10}, 100001} [5:183:2196] 2025-06-25T14:31:20.425467Z node 5 :PERSQUEUE INFO: partition_init.cpp:911: [Root/PQ/rt3.dc1--account--topic:{2, {0, 10}, 100001}:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp from keys completed. Value 2025-06-25T14:31:20.000000Z 2025-06-25T14:31:20.425543Z node 5 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: {2, {0, 10}, 100001}, State: StateInit] init complete for topic 'Root/PQ/rt3.dc1--account--topic' partition {2, {0, 10}, 100001} generation 0 [5:183:2196] >> TopicAutoscaling::CDC_PartitionSplit_AutosplitByLoad [GOOD] >> TopicAutoscaling::ControlPlane_CDC >> Cdc::MustNotLoseSchemaSnapshot [GOOD] >> Cdc::MustNotLoseSchemaSnapshotWithVolatileTx >> TopicAutoscaling::PartitionSplit_ReadEmptyPartitions_PQv1 [GOOD] >> TopicAutoscaling::PartitionSplit_ReadNotEmptyPartitions_BeforeAutoscaleAwareSDK ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/unittest >> TPartitionTests::ConflictingSrcIdTxAndWritesDifferentBatches [GOOD] Test command err: 2025-06-25T14:31:12.270216Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:31:12.270331Z node 1 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-25T14:31:12.293295Z node 1 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 3, State: StateInit] bootstrapping 3 [1:181:2194] 2025-06-25T14:31:12.295348Z node 1 :PERSQUEUE INFO: partition_init.cpp:911: [Root/PQ/rt3.dc1--account--topic:3:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp from keys completed. Value 2025-06-25T14:31:12.000000Z 2025-06-25T14:31:12.295433Z node 1 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 3, State: StateInit] init complete for topic 'Root/PQ/rt3.dc1--account--topic' partition 3 generation 0 [1:181:2194] Got cmd write: CmdWrite { Key: "i0000000003" Value: "\030\000(\200\367\377\273\3722" StorageChannel: INLINE } CmdWrite { Key: "m0000000003cclient" Value: "\010\000\020\001\030\001\"\007session(\0000\001@\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000003uclient" Value: "\000\000\000\000\000\000\000\000\001\000\000\000\001\000\000\000session" StorageChannel: INLINE } Got cmd write: CmdWrite { Key: "i0000000003" Value: "\030\000(\200\367\377\273\3722" StorageChannel: INLINE } CmdWrite { Key: "I0000000003" Value: "\010\271`\020\262\222\004" StorageChannel: INLINE } CmdWrite { Key: "m0000000003cclient" Value: "\010\002\020\001\030\001\"\007session(\0000\001@\001" StorageChannel: INLINE } CmdWrite { Key: "m0000000003uclient" Value: "\002\000\000\000\000\000\000\000\001\000\000\000\001\000\000\000session" StorageChannel: INLINE } 2025-06-25T14:31:13.090574Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:31:13.090645Z node 2 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-25T14:31:13.103213Z node 2 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 3, State: StateInit] bootstrapping 3 [2:183:2196] 2025-06-25T14:31:13.104545Z node 2 :PERSQUEUE INFO: partition_init.cpp:911: [Root/PQ/rt3.dc1--account--topic:3:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp from keys completed. Value 2025-06-25T14:31:13.000000Z 2025-06-25T14:31:13.104592Z node 2 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 3, State: StateInit] init complete for topic 'Root/PQ/rt3.dc1--account--topic' partition 3 generation 0 [2:183:2196] Got cmd write: CmdWrite { Key: "i0000000003" Value: "\030\000(\350\376\377\273\3722" StorageChannel: INLINE } CmdWrite { Key: "m0000000003cclient-1" Value: "\010\000\020\001\030\001\"\tsession-1(\0000\001@\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000003uclient-1" Value: "\000\000\000\000\000\000\000\000\001\000\000\000\001\000\000\000session-1" StorageChannel: INLINE } Got cmd write: CmdWrite { Key: "i0000000003" Value: "\030\000(\350\376\377\273\3722" StorageChannel: INLINE } CmdWrite { Key: "m0000000003cclient-2" Value: "\010\000\020\001\030\001\"\tsession-2(\0000\003@\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000003uclient-2" Value: "\000\000\000\000\000\000\000\000\001\000\000\000\001\000\000\000session-2" StorageChannel: INLINE } Got cmd write: CmdWrite { Key: "i0000000003" Value: "\030\000(\350\376\377\273\3722" StorageChannel: INLINE } CmdWrite { Key: "m0000000003cclient-1" Value: "\010\003\020\001\030\001\"\tsession-1(\0000\001@\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000003uclient-1" Value: "\003\000\000\000\000\000\000\000\001\000\000\000\001\000\000\000session-1" StorageChannel: INLINE } Got cmd write: CmdWrite { Key: "i0000000003" Value: "\030\000(\350\376\377\273\3722" StorageChannel: INLINE } CmdWrite { Key: "I0000000003" Value: "\010\271`\020\262\222\004" StorageChannel: INLINE } CmdWrite { Key: "m0000000003cclient-2" Value: "\010\001\020\001\030\001\"\tsession-2(\0000\003@\001" StorageChannel: INLINE } CmdWrite { Key: "m0000000003uclient-2" Value: "\001\000\000\000\000\000\000\000\001\000\000\000\001\000\000\000session-2" StorageChannel: INLINE } CmdWrite { Key: "m0000000003cclient-1" Value: "\010\006\020\001\030\001\"\tsession-1(\0000\001@\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000003uclient-1" Value: "\006\000\000\000\000\000\000\000\001\000\000\000\001\000\000\000session-1" StorageChannel: INLINE } 2025-06-25T14:31:13.970065Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:31:13.970146Z node 3 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-25T14:31:14.392789Z node 4 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:31:14.392851Z node 4 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-25T14:31:14.404827Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitConfigStep Got KV request 2025-06-25T14:31:14.405027Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-25T14:31:14.405267Z node 4 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [4:182:2195] 2025-06-25T14:31:14.406006Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitDiskStatusStep Got KV request 2025-06-25T14:31:14.406134Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitMetaStep Got KV request 2025-06-25T14:31:14.406258Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitInfoRangeStep Got KV request 2025-06-25T14:31:14.406406Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitDataRangeStep Got KV request 2025-06-25T14:31:14.406599Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:524: key[0]: d0000000000_00000000000000000000_00000_0000000050_00000 2025-06-25T14:31:14.406764Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:624: [Root/PQ/rt3.dc1--account--topic:0:TInitDataRangeStep] Got data offset 0 count 50 size 684 so 0 eo 50 d0000000000_00000000000000000000_00000_0000000050_00000 2025-06-25T14:31:14.406862Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitDataStep 2025-06-25T14:31:14.406893Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitEndWriteTimestampStep 2025-06-25T14:31:14.406937Z node 4 :PERSQUEUE INFO: partition_init.cpp:911: [Root/PQ/rt3.dc1--account--topic:0:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp from keys completed. Value 2025-06-25T14:31:14.000000Z 2025-06-25T14:31:14.406969Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:55: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Initializing completed. 2025-06-25T14:31:14.407007Z node 4 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'Root/PQ/rt3.dc1--account--topic' partition 0 generation 0 [4:182:2195] 2025-06-25T14:31:14.407052Z node 4 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037927937, Partition: 0, State: StateInit] SYNC INIT topic Root/PQ/rt3.dc1--account--topic partitition 0 so 50 endOffset 50 Head Offset 50 PartNo 0 PackedSize 0 count 0 nextOffset 50 batches 0 2025-06-25T14:31:14.407091Z node 4 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Process pending events. Count 0 2025-06-25T14:31:14.407276Z node 4 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-25T14:31:15.806956Z node 4 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction Create distr tx with id = 0 and act no: 1 Create distr tx with id = 2 and act no: 3 Create immediate tx with id = 4 and act no: 5 2025-06-25T14:31:15.807327Z node 4 :PERSQUEUE DEBUG: partition.cpp:1170: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCalcPredicate Step 1, TxId 0 2025-06-25T14:31:15.807439Z node 4 :PERSQUEUE DEBUG: partition.cpp:1170: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCalcPredicate Step 1, TxId 2 2025-06-25T14:31:17.185452Z node 4 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-25T14:31:17.185676Z node 4 :PERSQUEUE DEBUG: partition.cpp:1401: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvGetWriteInfoResponse 2025-06-25T14:31:17.185793Z node 4 :PERSQUEUE DEBUG: partition.cpp:1401: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvGetWriteInfoResponse 2025-06-25T14:31:17.185842Z node 4 :PERSQUEUE DEBUG: partition.cpp:1401: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvGetWriteInfoResponse Got batch complete: 1 Wait batch completion Got batch complete: 2 Wait batch completion Wait kv request 2025-06-25T14:31:17.445959Z node 4 :PERSQUEUE DEBUG: partition.cpp:1216: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCommit Step 1, TxId 2 2025-06-25T14:31:17.446038Z node 4 :PERSQUEUE DEBUG: partition.cpp:2502: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::CommitWriteOperations TxId: 2 2025-06-25T14:31:17.446126Z node 4 :PERSQUEUE DEBUG: partition.cpp:2528: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Head=Offset 50 PartNo 0 PackedSize 0 count 0 nextOffset 50 batches 0, NewHead=Offset 50 PartNo 0 PackedSize 0 count 0 nextOffset 50 batches 0 2025-06-25T14:31:17.446199Z node 4 :PERSQUEUE DEBUG: partition.cpp:2502: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::CommitWriteOperations TxId: (empty maybe) 2025-06-25T14:31:17.446254Z node 4 :PERSQUEUE DEBUG: partition.cpp:2528: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Head=Offset 50 PartNo 0 PackedSize 0 count 0 nextOffset 50 batches 0, NewHead=Offset 50 PartNo 0 PackedSize 0 count 0 nextOffset 50 batches 0 2025-06-25T14:31:17.479956Z node 4 :PERSQUEUE DEBUG: partition.cpp:3403: [PQ: 72057594037927937, Partition: 0, State: StateIdle] schedule TEvPersQueue::TEvProposeTransactionResult(COMPLETE), reason= Got KV request Wait tx committed for tx 2 2025-06-25T14:31:17.511470Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 2 2025-06-25T14:31:17.511608Z node 4 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction Wait immediate tx complete 4 Got propose resutl: Origin: 72057594037927937 Status: COMPLETE TxId: 4 2025-06-25T14:31:17.924083Z node 5 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:31:17.924149Z node 5 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-25T14:31:17.938541Z node 5 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitConfigStep Got KV request 2025-06-25T14:31:17.938750Z node 5 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-25T14:31:17.938961Z node 5 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [5:182:2195] 2025-06-25T14:31:17.939810Z node 5 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitDiskStatusStep Got KV request 2025-06-25T14:31:17.939931Z node 5 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitMetaStep Got KV request 2025-06-25T14:31:17.940069Z node 5 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitInfoRangeStep Got KV request 2025-06-25T14:31:17.940276Z node 5 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitDataRangeStep Got KV request 2025-06-25T14:31:17.940549Z node 5 :PERSQUEUE DEBUG: partition_init.cpp:524: key[0]: d0000000000_00000000000000000000_00000_0000000001_00000 2025-06-25T14:31:17.940764Z node 5 :PERSQUEUE DEBUG: partition_init.cpp:624: [Root/PQ/rt3.dc1--account--topic:0:TInitDataRangeStep] Got data offset 0 count 1 size 684 so 0 eo 1 d0000000000_00000000000000000000_00000_0000000001_00000 2025-06-25T14:31:17.940895Z node 5 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitDataStep 2025-06-25T14:31:17.940941Z node 5 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitEndWriteTimestampStep 2025-06-25T14:31:17.940989Z node 5 :PERSQUEUE INFO: partition_init.cpp:911: [Root/PQ/rt3.dc1--account--topic:0:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp from keys completed. Value 2025-06-25T14:31:17.000000Z 2025-06-25T14:31:17.941028Z node 5 :PERSQUEUE DEBUG: partition_init.cpp:55: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Initializing completed. 2025-06-25T14:31:17.941072Z node 5 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'Root/PQ/rt3.dc1--account--topic' partition 0 generation 0 [5:182:2195] 2025-06-25T14:31:17.941127Z node 5 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037927937, Partition: 0, State: StateInit] SYNC INIT topic Root/PQ/rt3.dc1--account--topic partitition 0 so 1 endOffset 1 Head Offset 1 PartNo 0 PackedSize 0 count 0 nextOffset 1 batches 0 2025-06-25T14:31:17.941172Z node 5 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Process pending events. Count 0 2025-06-25T14:31:17.941394Z node 5 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-25T14:31:18.298165Z node 5 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie src1|29b24d57-7e60a936-8a780158-ae100d76_0 generated for partition 0 topic 'Root/PQ/rt3.dc1--account--topic' owner src1 2025-06-25T14:31:18.298307Z node 5 :PERSQUEUE DEBUG: partition_write.cpp:34: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::ReplyOwnerOk. Partition: 0 Got batch complete: 1 2025-06-25T14:31:19.301418Z node 5 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction Create distr tx with id = 0 and act no: 1 Create distr tx with id = 2 and act no: 3 Create distr tx with id = 4 and act no: 5 2025-06-25T14:31:19.301791Z node 5 :PERSQUEUE DEBUG: partition.cpp:1170: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCalcPredicate Step 1, TxId 0 2025-06-25T14:31:19.301893Z node 5 :PERSQUEUE DEBUG: partition.cpp:1170: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCalcPredicate Step 1, TxId 2 2025-06-25T14:31:19.301944Z node 5 :PERSQUEUE DEBUG: partition.cpp:1170: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCalcPredicate Step 1, TxId 4 2025-06-25T14:31:20.596246Z node 5 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-25T14:31:20.596465Z node 5 :PERSQUEUE DEBUG: partition.cpp:1401: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvGetWriteInfoResponse 2025-06-25T14:31:20.596593Z node 5 :PERSQUEUE DEBUG: partition.cpp:1401: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvGetWriteInfoResponse 2025-06-25T14:31:20.596644Z node 5 :PERSQUEUE DEBUG: partition.cpp:1401: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvGetWriteInfoResponse Got batch complete: 1 Wait batch completion 2025-06-25T14:31:20.596810Z node 5 :PERSQUEUE DEBUG: partition.cpp:1216: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCommit Step 1, TxId 0 2025-06-25T14:31:20.596867Z node 5 :PERSQUEUE DEBUG: partition.cpp:2502: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::CommitWriteOperations TxId: 0 2025-06-25T14:31:20.596937Z node 5 :PERSQUEUE DEBUG: partition.cpp:2528: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Head=Offset 1 PartNo 0 PackedSize 0 count 0 nextOffset 1 batches 0, NewHead=Offset 1 PartNo 0 PackedSize 0 count 0 nextOffset 1 batches 0 Got batch complete: 2 Wait batch completion Wait for no tx committed 2025-06-25T14:31:20.836067Z node 5 :PERSQUEUE DEBUG: partition.cpp:1216: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCommit Step 1, TxId 4 2025-06-25T14:31:20.836186Z node 5 :PERSQUEUE DEBUG: partition.cpp:2502: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::CommitWriteOperations TxId: 4 2025-06-25T14:31:20.836252Z node 5 :PERSQUEUE DEBUG: partition.cpp:2528: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Head=Offset 1 PartNo 0 PackedSize 0 count 0 nextOffset 1 batches 0, NewHead=Offset 1 PartNo 0 PackedSize 0 count 0 nextOffset 1 batches 0 Got KV request Wait kv request Wait tx committed for tx 0 2025-06-25T14:31:21.068983Z node 5 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 2 2025-06-25T14:31:21.069148Z node 5 :PERSQUEUE DEBUG: partition_write.cpp:1257: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Already written message. Topic: 'Root/PQ/rt3.dc1--account--topic' Partition: 0 SourceId: 'src1'. Message seqNo: 1. Committed seqNo: 6. Writing seqNo: (NULL). EndOffset: 1. CurOffset: 1. Offset: 60 2025-06-25T14:31:21.069277Z node 5 :PERSQUEUE DEBUG: partition_write.cpp:1364: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 part blob processing sourceId 'src1' seqNo 7 partNo 0 2025-06-25T14:31:21.070257Z node 5 :PERSQUEUE DEBUG: partition_write.cpp:1468: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 part blob complete sourceId 'src1' seqNo 7 partNo 0 FormedBlobsCount 0 NewHead: Offset 70 PartNo 0 PackedSize 84 count 1 nextOffset 71 batches 1 2025-06-25T14:31:21.070356Z node 5 :PERSQUEUE DEBUG: partition_write.cpp:1257: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Already written message. Topic: 'Root/PQ/rt3.dc1--account--topic' Partition: 0 SourceId: 'src1'. Message seqNo: 7. Committed seqNo: 6. Writing seqNo: 7. EndOffset: 1. CurOffset: 71. Offset: 80 2025-06-25T14:31:21.070881Z node 5 :PERSQUEUE DEBUG: partition_write.cpp:1762: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Add new write blob: topic 'Root/PQ/rt3.dc1--account--topic' partition 0 compactOffset 70,1 HeadOffset 1 endOffset 1 curOffset 71 d0000000000_00000000000000000070_00000_0000000001_00000? size 70 WTime 12141 2025-06-25T14:31:21.071037Z node 5 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction Got KV request Got batch complete: 3 Got KV request Got KV request Wait tx committed for tx 4 Wait batch completion Wait kv request 2025-06-25T14:31:21.092048Z node 5 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 17 WriteNewSizeFromSupportivePartitions# 0 2025-06-25T14:31:21.092146Z node 5 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-25T14:31:21.092265Z node 5 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Answering for message sourceid: 'src1', Topic: 'Root/PQ/rt3.dc1--account--topic', Partition: 0, SeqNo: 1, partNo: 0, Offset: 1 is already written 2025-06-25T14:31:21.092456Z node 5 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-25T14:31:21.092514Z node 5 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Answering for message sourceid: 'src1', Topic: 'Root/PQ/rt3.dc1--account--topic', Partition: 0, SeqNo: 7, partNo: 0, Offset: 70 is stored on disk 2025-06-25T14:31:21.092551Z node 5 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-25T14:31:21.092605Z node 5 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Answering for message sourceid: 'src1', Topic: 'Root/PQ/rt3.dc1--account--topic', Partition: 0, SeqNo: 7, partNo: 0, Offset: 71 is already written 2025-06-25T14:31:21.092860Z node 5 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72057594037927937, Partition: 0, State: StateIdle] need more data for compaction. cumulativeSize=70, count=1, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 >> TPQTest::TestAccountReadQuota [GOOD] >> TPQTest::TestCheckACL >> Cdc::ShouldBreakLocksOnConcurrentDropStream [GOOD] |77.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/tx_allocator/ut/ydb-core-tx-tx_allocator-ut |77.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/tx_allocator/ut/ydb-core-tx-tx_allocator-ut |77.7%| [LD] {RESULT} $(B)/ydb/core/tx/tx_allocator/ut/ydb-core-tx-tx_allocator-ut >> DataShardVolatile::DistributedWriteShardRestartAfterExpectation+UseSink [GOOD] >> DataShardVolatile::DistributedWriteShardRestartAfterExpectation-UseSink >> DataShardVolatile::DistributedWriteThenScanQuery [GOOD] >> DataShardVolatile::DistributedWriteWithAsyncIndex >> DataShardSnapshots::BrokenLockChangesDontLeak [GOOD] >> TPartitionChooserSuite::TPartitionChooserActor_SplitMergeEnabled_SourceId_PartitionInactive_1_Test >> TPartitionChooserSuite::TBoundaryChooserTest [GOOD] >> TPartitionChooserSuite::TBoundaryChooser_GetTabletIdTest [GOOD] >> TPartitionChooserSuite::THashChooserTest [GOOD] >> TPartitionChooserSuite::THashChooser_GetTabletIdTest [GOOD] >> TPartitionChooserSuite::TPartitionChooserActor_SplitMergeDisabled_BadSourceId_Test >> TPQTest::TestSeveralOwners >> Balancing::Balancing_OneTopic_PQv1 [GOOD] >> Balancing::Balancing_ManyTopics_TopicApi >> TPartitionTests::UserActCount >> TPartitionTests::DataTxCalcPredicateOk [GOOD] >> TPartitionTests::DataTxCalcPredicateError >> TPQRBDescribes::PartitionLocations [GOOD] >> CompressExecutor::TestExecutorMemUsage [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_change_exchange/unittest >> Cdc::ShouldBreakLocksOnConcurrentDropStream [GOOD] Test command err: 2025-06-25T14:28:24.656294Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519893760009425659:2134];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:28:24.659194Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001271/r3tmp/tmpMj0EY8/pdisk_1.dat 2025-06-25T14:28:25.183422Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519893760009425560:2080] 1750861704653027 != 1750861704653030 2025-06-25T14:28:25.197071Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:28:25.214710Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:28:25.214820Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:28:25.219961Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 3835, node 1 2025-06-25T14:28:25.360919Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:28:25.360936Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:28:25.360941Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:28:25.361040Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:28:25.423114Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:28:25.442879Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:25.469068Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:7519893764304393455:2268] 2025-06-25T14:28:25.469362Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T14:28:25.480973Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T14:28:25.481057Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T14:28:25.483323Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-25T14:28:25.483391Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-25T14:28:25.483426Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-25T14:28:25.483835Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T14:28:25.483910Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T14:28:25.483946Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:7519893764304393469:2268] in generation 1 2025-06-25T14:28:25.485260Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T14:28:25.522875Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-25T14:28:25.523013Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T14:28:25.523065Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:7519893764304393473:2269] 2025-06-25T14:28:25.523082Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T14:28:25.523096Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-25T14:28:25.523110Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:28:25.523265Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-25T14:28:25.523364Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-25T14:28:25.523418Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:7519893764304393452:2299], serverId# [1:7519893764304393472:2308], sessionId# [0:0:0] 2025-06-25T14:28:25.523545Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:28:25.523576Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:28:25.523595Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-25T14:28:25.523614Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:28:25.523644Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T14:28:25.523871Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976710657 ssId 72057594046644480 seqNo 2:1 2025-06-25T14:28:25.523956Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976710657 at tablet 72075186224037888 2025-06-25T14:28:25.524955Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T14:28:25.527507Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-25T14:28:25.527599Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-25T14:28:25.529648Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:7519893764304393487:2316], serverId# [1:7519893764304393488:2317], sessionId# [0:0:0] 2025-06-25T14:28:25.536029Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976710657 at step 1750861705572 at tablet 72075186224037888 { Transactions { TxId: 281474976710657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1750861705572 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-06-25T14:28:25.536073Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:28:25.536267Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T14:28:25.536365Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:28:25.536384Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-06-25T14:28:25.536405Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1750861705572:281474976710657] in PlanQueue unit at 72075186224037888 2025-06-25T14:28:25.536675Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1750861705572:281474976710657 keys extracted: 0 2025-06-25T14:28:25.536827Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-25T14:28:25.536931Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:28:25.536961Z node 1 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-06-25T14:28:25.539226Z node 1 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-25T14:28:25.539633Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:28:25.541044Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 1750861705571 2025-06-25T14:28:25.541071Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:28:25.541093Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1750861705579 2025-06-25T14:28:25.541148Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1750861705572} 2025-06-25T14:28:25.541183Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:28:25.541240Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:28:25.541257Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T14:28:25.541278Z node 1 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-25T14:28:25.541330Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1750861705572 : 281474976710657] from 72075186224037888 at tablet 72075186224037888 send result to client [1:7519893760009425890:2146], exec latency: 2 ms, propose latency: 4 ms 2025-06-25T14:28:25.541357Z node 1 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976710657 state Ready TxInFly 0 2025-06-25T14:28:25.541391Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:28:25.545176Z node 1 :CHANGE_EXCHANGE DEBUG: change_sender.cpp:153: [ChangeSender][72075186224037888:1][1:7519893764304393473:2269][Inactive] Handle NKikimrChangeExchange.TEvActivateSender 2025-06-25T14:28:25.547425Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976710657 datashard 72075186224037888 state Ready 2025-06-25T14:28:25.547471Z node 1 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075 ... tus from node 25, TabletId: 72075186224037892 not found 2025-06-25T14:31:21.262967Z node 25 :CHANGE_EXCHANGE DEBUG: change_sender_cdc_stream.cpp:228: [CdcChangeSenderPartition][72075186224037888:1][0][72075186224037891][25:1125:2765] Disconnected 2025-06-25T14:31:21.263265Z node 25 :CHANGE_EXCHANGE DEBUG: change_sender_cdc_stream.cpp:648: [CdcChangeSenderMain][72075186224037888:1][25:982:2765] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvGone { PartitionId: 0 HardError: 0 } 2025-06-25T14:31:21.264064Z node 25 :CHANGE_EXCHANGE DEBUG: change_sender_cdc_stream.cpp:468: [CdcChangeSenderMain][72075186224037888:1][25:982:2765] Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult: result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Table/Stream2 TableId: [72057594046644480:5:0] RequestType: ByTableId Operation: OpList RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Ok Kind: KindCdcStream DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } Children [streamImpl] }] } 2025-06-25T14:31:21.264159Z node 25 :CHANGE_EXCHANGE DEBUG: change_sender_cdc_stream.cpp:497: [CdcChangeSenderMain][72075186224037888:1][25:982:2765] Stream is planned to drop, waiting for the EvRemoveSender command 2025-06-25T14:31:21.264276Z node 25 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 25, TabletId: 72075186224037891 not found 2025-06-25T14:31:21.418726Z node 25 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715665 at step 3000 at tablet 72075186224037888 { Transactions { TxId: 281474976715665 AckTo { RawX1: 0 RawX2: 0 } } Step: 3000 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-06-25T14:31:21.418835Z node 25 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:31:21.419204Z node 25 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:31:21.419296Z node 25 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-06-25T14:31:21.419383Z node 25 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [3000:281474976715665] in PlanQueue unit at 72075186224037888 2025-06-25T14:31:21.419701Z node 25 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 3000:281474976715665 keys extracted: 0 2025-06-25T14:31:21.419926Z node 25 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-25T14:31:21.420280Z node 25 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:31:21.421471Z node 25 :TX_DATASHARD DEBUG: datashard.cpp:1822: Add schema snapshot: pathId# [OwnerId: 72057594046644480, LocalPathId: 2], version# 4, step# 3000, txId# 281474976715665, at tablet# 72075186224037888 2025-06-25T14:31:21.421872Z node 25 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:31:21.426200Z node 25 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,5) wasn't found 2025-06-25T14:31:21.427355Z node 25 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,4) wasn't found 2025-06-25T14:31:21.451918Z node 25 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 3000} 2025-06-25T14:31:21.452112Z node 25 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:31:21.452187Z node 25 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:31:21.452353Z node 25 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [3000 : 281474976715665] from 72075186224037888 at tablet 72075186224037888 send result to client [25:373:2367], exec latency: 0 ms, propose latency: 0 ms 2025-06-25T14:31:21.452478Z node 25 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715665 state Ready TxInFly 0 2025-06-25T14:31:21.452682Z node 25 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:31:21.452981Z node 25 :CHANGE_EXCHANGE DEBUG: change_sender.cpp:131: [ChangeSender][72075186224037888:1][25:644:2541] Handle NKikimr::NDataShard::TEvChangeExchange::TEvRemoveSender { PathId: [OwnerId: 72057594046644480, LocalPathId: 5] } 2025-06-25T14:31:21.453085Z node 25 :CHANGE_EXCHANGE NOTICE: change_sender.cpp:143: [ChangeSender][72075186224037888:1][25:644:2541] Remove sender: type# CdcStream, pathId# [OwnerId: 72057594046644480, LocalPathId: 5] 2025-06-25T14:31:21.453764Z node 25 :CHANGE_EXCHANGE DEBUG: change_sender_cdc_stream.cpp:653: [CdcChangeSenderMain][72075186224037888:1][25:982:2765] Handle NKikimr::NDataShard::TEvChangeExchange::TEvRemoveSender { PathId: [OwnerId: 72057594046644480, LocalPathId: 5] } 2025-06-25T14:31:21.456183Z node 25 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715665 datashard 72075186224037888 state Ready 2025-06-25T14:31:21.456336Z node 25 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-25T14:31:21.565907Z node 25 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715666. Ctx: { TraceId: 01jykr083kayj1r3j9qymjk0pf, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=25&id=NThmMWFhMTctNTZlZTMzZTAtYzljZWNiMDMtZmM4Y2IzMjI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:31:21.566798Z node 25 :TX_DATASHARD DEBUG: execute_write_unit.cpp:251: Executing write operation for [0:8] at 72075186224037888 2025-06-25T14:31:21.566963Z node 25 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_LOCKS_BROKEN;details=Operation is aborting because locks are not valid;tx_id=8; 2025-06-25T14:31:21.567087Z node 25 :TX_DATASHARD INFO: datashard_write_operation.cpp:707: Write transaction 8 at 72075186224037888 has an error: Operation is aborting because locks are not valid 2025-06-25T14:31:21.567394Z node 25 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:226: Prepare transaction failed. txid 8 at tablet 72075186224037888 errors: Status: STATUS_LOCKS_BROKEN Issues: { message: "Operation is aborting because locks are not valid" issue_code: 2001 severity: 1 } 2025-06-25T14:31:21.578911Z node 25 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:168: Errors while proposing transaction txid 8 at tablet 72075186224037888 Status: STATUS_LOCKS_BROKEN Issues: { message: "Operation is aborting because locks are not valid" issue_code: 2001 severity: 1 } 2025-06-25T14:31:21.579086Z node 25 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:31:21.579639Z node 25 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:802: SelfId: [25:1175:2849], Table: `/Root/Table` ([72057594046644480:2:3]), SessionActorId: [25:1138:2849]Got LOCKS BROKEN for table `/Root/Table`. ShardID=72075186224037888, Sink=[25:1175:2849].{
: Error: Operation is aborting because locks are not valid, code: 2001 } 2025-06-25T14:31:21.579910Z node 25 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:3004: SelfId: [25:1168:2849], SessionActorId: [25:1138:2849], statusCode=ABORTED. Issue=
: Error: Transaction locks invalidated. Table: `/Root/Table`., code: 2001
: Error: Operation is aborting because locks are not valid, code: 2001 . sessionActorId=[25:1138:2849]. isRollback=0 2025-06-25T14:31:21.580503Z node 25 :KQP_SESSION WARN: kqp_session_actor.cpp:1895: SessionId: ydb://session/3?node_id=25&id=NThmMWFhMTctNTZlZTMzZTAtYzljZWNiMDMtZmM4Y2IzMjI=, ActorId: [25:1138:2849], ActorState: ExecuteState, TraceId: 01jykr083kayj1r3j9qymjk0pf, got TEvKqpBuffer::TEvError in ExecuteState, status: ABORTED send to: [25:1248:2849] from: [25:1168:2849] 2025-06-25T14:31:21.580863Z node 25 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [25:1248:2849] TxId: 281474976715666. Ctx: { TraceId: 01jykr083kayj1r3j9qymjk0pf, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=25&id=NThmMWFhMTctNTZlZTMzZTAtYzljZWNiMDMtZmM4Y2IzMjI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Transaction locks invalidated. Table: `/Root/Table`., code: 2001 subissue: {
: Error: Operation is aborting because locks are not valid, code: 2001 } } 2025-06-25T14:31:21.581272Z node 25 :TX_DATASHARD DEBUG: execute_write_unit.cpp:251: Executing write operation for [0:9] at 72075186224037888 2025-06-25T14:31:21.581354Z node 25 :TX_DATASHARD DEBUG: execute_write_unit.cpp:420: Skip empty write operation for [0:9] at 72075186224037888 2025-06-25T14:31:21.581629Z node 25 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:31:21.581947Z node 25 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=25&id=NThmMWFhMTctNTZlZTMzZTAtYzljZWNiMDMtZmM4Y2IzMjI=, ActorId: [25:1138:2849], ActorState: ExecuteState, TraceId: 01jykr083kayj1r3j9qymjk0pf, Create QueryResponse for error on request, msg: >>>>> GetRecords path=/Root/Table/Stream partitionId=0 2025-06-25T14:31:21.586167Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:348: Handle TEvRequest topic: 'streamImpl' requestId: 2025-06-25T14:31:21.586299Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:2812: [PQ: 72075186224037889] got client message batch for topic 'Table/Stream/streamImpl' partition 0 2025-06-25T14:31:21.587135Z node 25 :PERSQUEUE DEBUG: partition_read.cpp:839: [PQ: 72075186224037889, Partition: 0, State: StateIdle] read cookie 2 Topic 'Table/Stream/streamImpl' partition 0 user $without_consumer offset 0 count 10000 size 26214400 endOffset 1 max time lag 0ms effective offset 0 2025-06-25T14:31:21.587666Z node 25 :PERSQUEUE DEBUG: partition_read.cpp:1043: [PQ: 72075186224037889, Partition: 0, State: StateIdle] read cookie 2 added 1 blobs, size 139 count 1 last offset 0, current partition end offset: 1 2025-06-25T14:31:21.587778Z node 25 :PERSQUEUE DEBUG: partition_read.cpp:1069: [PQ: 72075186224037889, Partition: 0, State: StateIdle] Reading cookie 2. Send blob request. 2025-06-25T14:31:21.587942Z node 25 :PERSQUEUE DEBUG: cache_eviction.h:492: Got data from cache. Partition 0 offset 0 partno 0 count 1 parts_count 0 source 1 size 139 accessed 0 times before, last time 1970-01-01T00:00:02.000000Z 2025-06-25T14:31:21.588042Z node 25 :PERSQUEUE DEBUG: read.h:121: Reading cookie 2. All 1 blobs are from cache. 2025-06-25T14:31:21.588170Z node 25 :PERSQUEUE DEBUG: partition_read.cpp:551: FormAnswer for 1 blobs 2025-06-25T14:31:21.588481Z node 25 :PERSQUEUE DEBUG: partition_read.cpp:476: FormAnswer processing batch offset 0 totakecount 1 count 1 size 121 from pos 0 cbcount 1 2025-06-25T14:31:21.589299Z node 25 :PERSQUEUE DEBUG: pq_l2_cache.cpp:192: PQ Cache (L2). Touched. Tablet '72075186224037889' partition 0 offset 0 partno 0 count 1 parts 0 suffix '63' 2025-06-25T14:31:21.589463Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'streamImpl' partition: 0 messageNo: 0 requestId: cookie: 0 >> TPQTest::TestSeveralOwners [GOOD] >> TPQTest::TestSourceIdDropByUserWrites ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/unittest >> TPQRBDescribes::PartitionLocations [GOOD] Test command err: Bucket: 100 elems count: 97 Bucket: 200 elems count: 104 Bucket: 500 elems count: 288 Bucket: 1000 elems count: 528 Bucket: 2000 elems count: 1008 Bucket: 5000 elems count: 2976 2025-06-25T14:31:11.986013Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519894476664247476:2085];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:31:11.986631Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:31:12.018144Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519894484303755225:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:31:12.018185Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:31:12.175797Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000bf7/r3tmp/tmpbFkMls/pdisk_1.dat 2025-06-25T14:31:12.186612Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-25T14:31:12.318648Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:31:12.318761Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:31:12.332945Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T14:31:12.333932Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:31:12.350623Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:31:12.355052Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 21338, node 1 2025-06-25T14:31:12.359283Z node 1 :GRPC_SERVER WARN: grpc_request_proxy.cpp:529: SchemeBoardDelete /Root Strong=0 2025-06-25T14:31:12.363678Z node 1 :GRPC_SERVER WARN: grpc_request_proxy.cpp:529: SchemeBoardDelete /Root Strong=0 2025-06-25T14:31:12.404501Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/yft8/000bf7/r3tmp/yandexPziw2P.tmp 2025-06-25T14:31:12.404532Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/yft8/000bf7/r3tmp/yandexPziw2P.tmp 2025-06-25T14:31:12.404913Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/yft8/000bf7/r3tmp/yandexPziw2P.tmp 2025-06-25T14:31:12.405059Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:31:12.407469Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:31:12.407589Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:31:12.413527Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:31:12.467139Z INFO: TTestServer started on Port 14900 GrpcPort 21338 TClient is connected to server localhost:14900 PQClient connected to localhost:21338 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:31:12.764271Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-25T14:31:12.838575Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:31:12.991782Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... 2025-06-25T14:31:13.027111Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:31:15.039132Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894493844117765:2307], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:31:15.039230Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894493844117747:2304], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:31:15.039385Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:31:15.042887Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:31:15.046540Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894493844117808:2310], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:31:15.046626Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:31:15.067172Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519894493844117776:2308], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710662 completed, doublechecking } 2025-06-25T14:31:15.297938Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519894493844117850:2796] txid# 281474976710663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:31:15.325203Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:31:15.325273Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7519894497188657406:2275], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:31:15.325543Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=2&id=ODc0ZDNmNjctMTA4ZWMzMjMtMzM0YTQ1MjMtOGZiZDJkYTg=, ActorId: [2:7519894497188657381:2269], ActorState: ExecuteState, TraceId: 01jykr01x0fpkskc44e40mvxrh, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:31:15.327680Z node 2 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-25T14:31:15.329057Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519894493844117868:2314], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:31:15.329286Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=Y2Y2MGM0OTItZDQxYjJlMDQtNTJiNDkwY2QtOGMzYWY5Nzk=, ActorId: [1:7519894493844117744:2302], ActorState: ExecuteState, TraceId: 01jykr01txfv24p6fee7bzrw2j, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:31:15.329692Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/ ... athStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 1 } ChildrenExist: false BalancerTabletID: 72075186224037893 } PersQueueGroup { Name: "rt3.dc1--topic" PathId: 13 TotalGroupCount: 5 PartitionPerTablet: 5 PQTabletConfig { PartitionConfig { LifetimeSeconds: 86400 LowWatermark: 8388608 SourceIdLifetimeSeconds: 86400 WriteSpeedInBytesPerSecond: 20000000 BurstSize: 20000000 SourceIdMaxCounts: 6000000 } LocalDC: true ReadRules: "user" ReadFromTimestampsMs: 0 ConsumerFormatVersions: 0 ConsumerCodecs { } Codecs { Ids: 0 Ids: 1 Ids: 2 Codecs: "raw" Codecs: "gzip" Codecs: "lzop" } ReadRuleVersions: 0 YdbDatabasePath: "/Root" } Partitions { PartitionId: 0 TabletId: 72075186224037892 Status: Active } Partitions { PartitionId: 1 TabletId: 72075186224037892 Status: Active } Partitions { PartitionId: 2 TabletId: 72075186224037892 Status: Active } Partitions { PartitionId: 3 TabletId: 72075186224037892 Status: Active } Partitions { PartitionId: 4 TabletId: 72075186224037892 Status: Active } AlterVersion: 1 BalancerTabletID: 72075186224037893 NextPartitionId: 5 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 12 PathsLimit: 10000 ShardsInside: 6 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 5 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } Path: "/Root/PQ/rt3.dc1--topic" name rt3.dc1--topic version1 CallPersQueueGRPC request to localhost:21338 MetaRequest { CmdGetTopicMetadata { Topic: "rt3.dc1--topic" } } 2025-06-25T14:31:21.962010Z node 1 :PERSQUEUE INFO: msgbus_server_persqueue.cpp:1531: proxy answer CallPersQueueGRPC response: Status: 128 ErrorReason: "the following topics are not created: rt3.dc1--topic, Marker# PQ95" ErrorCode: UNKNOWN_TOPIC CallPersQueueGRPC request to localhost:21338 MetaRequest { CmdGetTopicMetadata { Topic: "rt3.dc1--topic" } } 2025-06-25T14:31:22.469266Z node 1 :PERSQUEUE INFO: msgbus_server_persqueue.cpp:1531: proxy answer CallPersQueueGRPC response: Status: 1 ErrorCode: OK MetaResponse { CmdGetTopicMetadataResult { TopicInfo { Topic: "rt3.dc1--topic" NumPartitions: 5 Config { PartitionConfig { LifetimeSeconds: 86400 LowWatermark: 8388608 SourceIdLifetimeSeconds: 86400 WriteSpeedInBytesPerSecond: 20000000 BurstSize: 20000000 SourceIdMaxCounts: 6000000 } Version: 1 LocalDC: true Codecs { Ids: 0 Ids: 1 Ids: 2 Codecs: "raw" Codecs: "gzip" Codecs: "lzop" } TopicPath: "/Root/PQ/rt3.dc1--topic" YdbDatabasePath: "/Root" Consumers { Name: "user" ReadFromTimestampsMs: 0 FormatVersion: 0 Codec { } Version: 0 Important: false } } ErrorCode: OK } } } === Topic created, have version: 1 TClient::Ls request: /Root/PQ/rt3.dc1--topic TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "rt3.dc1--topic" PathId: 13 SchemeshardId: 72057594046644480 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 281474976710679 CreateStep: 1750861881902 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 1 } ChildrenExist: false BalancerTabletID: 72075186224037893 } PersQueueGroup { Name: "rt3.dc1--topic" PathId: 13 TotalGroupCount: 5 PartitionPerTablet: 5 PQTabletConfig { PartitionConfig { LifetimeSeconds: 86400 LowWatermark: 8388608 SourceIdLifetimeSeconds: 86400 WriteSpeedInBytesPerSecond: 20000000 BurstSize: 2000... (TRUNCATED) 2025-06-25T14:31:22.484948Z node 2 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72075186224037893][rt3.dc1--topic] pipe [1:7519894523908889960:3412] connected; active server actors: 1 2025-06-25T14:31:22.485189Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037893][rt3.dc1--topic] addPartitionToResponse tabletId 72075186224037892, partitionId 0, NodeId 1, Generation 1 2025-06-25T14:31:22.485205Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037893][rt3.dc1--topic] addPartitionToResponse tabletId 72075186224037892, partitionId 1, NodeId 1, Generation 1 2025-06-25T14:31:22.485212Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037893][rt3.dc1--topic] addPartitionToResponse tabletId 72075186224037892, partitionId 2, NodeId 1, Generation 1 2025-06-25T14:31:22.485222Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037893][rt3.dc1--topic] addPartitionToResponse tabletId 72075186224037892, partitionId 3, NodeId 1, Generation 1 2025-06-25T14:31:22.485229Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037893][rt3.dc1--topic] addPartitionToResponse tabletId 72075186224037892, partitionId 4, NodeId 1, Generation 1 2025-06-25T14:31:22.485667Z node 2 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72075186224037893][rt3.dc1--topic] pipe [1:7519894523908889961:3413] connected; active server actors: 1 2025-06-25T14:31:22.485856Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037893][rt3.dc1--topic] addPartitionToResponse tabletId 72075186224037892, partitionId 0, NodeId 1, Generation 1 2025-06-25T14:31:22.485878Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037893][rt3.dc1--topic] addPartitionToResponse tabletId 72075186224037892, partitionId 1, NodeId 1, Generation 1 2025-06-25T14:31:22.485891Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037893][rt3.dc1--topic] addPartitionToResponse tabletId 72075186224037892, partitionId 2, NodeId 1, Generation 1 2025-06-25T14:31:22.485903Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037893][rt3.dc1--topic] addPartitionToResponse tabletId 72075186224037892, partitionId 3, NodeId 1, Generation 1 2025-06-25T14:31:22.485914Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037893][rt3.dc1--topic] addPartitionToResponse tabletId 72075186224037892, partitionId 4, NodeId 1, Generation 1 response: Status: true Locations { PartitionId: 0 NodeId: 1 Generation: 1 } Locations { PartitionId: 1 NodeId: 1 Generation: 1 } Locations { PartitionId: 2 NodeId: 1 Generation: 1 } Locations { PartitionId: 3 NodeId: 1 Generation: 1 } Locations { PartitionId: 4 NodeId: 1 Generation: 1 } 2025-06-25T14:31:22.486545Z node 2 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72075186224037893][rt3.dc1--topic] pipe [1:7519894523908889962:3414] connected; active server actors: 1 2025-06-25T14:31:22.486754Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037893][rt3.dc1--topic] addPartitionToResponse tabletId 72075186224037892, partitionId 3, NodeId 1, Generation 1 response: Status: true Locations { PartitionId: 3 NodeId: 1 Generation: 1 } 2025-06-25T14:31:22.487300Z node 2 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72075186224037893][rt3.dc1--topic] pipe [1:7519894523908889963:3415] connected; active server actors: 1 response: Status: false 2025-06-25T14:31:22.978837Z node 1 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976710684, task: 1, CA Id [1:7519894523908889992:2473]. Got EvDeliveryProblem, TabletId: 72075186224037891, NotDelivered: 0 2025-06-25T14:31:23.011997Z node 1 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976710684, task: 1, CA Id [1:7519894523908889992:2473]. Got EvDeliveryProblem, TabletId: 72075186224037891, NotDelivered: 1 2025-06-25T14:31:23.047860Z node 1 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976710684, task: 1, CA Id [1:7519894523908889992:2473]. Got EvDeliveryProblem, TabletId: 72075186224037891, NotDelivered: 1 2025-06-25T14:31:23.110771Z node 1 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976710684, task: 1, CA Id [1:7519894523908889992:2473]. Got EvDeliveryProblem, TabletId: 72075186224037891, NotDelivered: 1 2025-06-25T14:31:23.175179Z node 1 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976710684, task: 1, CA Id [1:7519894523908889992:2473]. Got EvDeliveryProblem, TabletId: 72075186224037891, NotDelivered: 1 2025-06-25T14:31:23.175256Z node 1 :KQP_EXECUTER WARN: kqp_shards_resolver.cpp:86: [ShardsResolver] TxId: 281474976710685. Failed to resolve tablet: 72075186224037891 after several retries. 2025-06-25T14:31:23.175380Z node 1 :KQP_EXECUTER WARN: kqp_executer_impl.h:265: ActorId: [1:7519894528203857309:2466] TxId: 281474976710685. Ctx: { TraceId: 01jykr09b81bd0ejrw14z3ns0r, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTMwNjViMzktNDhmNTU5MjEtOTRmNjQ1ZjMtODYzYmNlYTU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Shards nodes resolve failed, status: UNAVAILABLE, issues:
: Error: Failed to resolve tablet: 72075186224037891 after several retries. 2025-06-25T14:31:23.175631Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=1&id=ZTMwNjViMzktNDhmNTU5MjEtOTRmNjQ1ZjMtODYzYmNlYTU=, ActorId: [1:7519894523908889965:2466], ActorState: ExecuteState, TraceId: 01jykr09b81bd0ejrw14z3ns0r, Create QueryResponse for error on request, msg: 2025-06-25T14:31:23.176572Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Failed to resolve tablet: 72075186224037891 after several retries." severity: 1 } TxMeta { id: "01jykr09m710f85zpzsf1hfhzg" } } YdbStatus: UNAVAILABLE ConsumedRu: 188 } 2025-06-25T14:31:23.317199Z node 1 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976710684, task: 1, CA Id [1:7519894523908889992:2473]. Got EvDeliveryProblem, TabletId: 72075186224037891, NotDelivered: 1 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_snapshot/unittest >> DataShardSnapshots::BrokenLockChangesDontLeak [GOOD] Test command err: 2025-06-25T14:29:19.721480Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:29:19.721589Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:29:19.721649Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001bca/r3tmp/tmpxji4It/pdisk_1.dat 2025-06-25T14:29:20.054753Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T14:29:20.058835Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:29:20.097493Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:29:20.102240Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750861757008110 != 1750861757008114 2025-06-25T14:29:20.153250Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:61:2108] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-25T14:29:20.154359Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 2025-06-25T14:29:20.154892Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:29:20.155130Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:29:20.166851Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:29:20.250369Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:61:2108] Handle TEvProposeTransaction 2025-06-25T14:29:20.250443Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:61:2108] TxId# 281474976715657 ProcessProposeTransaction 2025-06-25T14:29:20.250613Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:61:2108] Cookie# 0 userReqId# "" txid# 281474976715657 SEND to# [1:602:2510] 2025-06-25T14:29:20.365265Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:602:2510] txid# 281474976715657 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateTable CreateTable { Name: "table-1" Columns { Name: "key" Type: "Uint32" FamilyName: "" NotNull: false } Columns { Name: "value" Type: "Uint32" FamilyName: "" NotNull: false } KeyColumnNames: "key" UniformPartitionsCount: 1 } } } ExecTimeoutPeriod: 18446744073709551615 2025-06-25T14:29:20.365388Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:602:2510] txid# 281474976715657 Bootstrap, UserSID: CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-25T14:29:20.365987Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1660: Actor# [1:602:2510] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-06-25T14:29:20.366067Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:602:2510] txid# 281474976715657 TEvNavigateKeySet requested from SchemeCache 2025-06-25T14:29:20.366386Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:602:2510] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-25T14:29:20.366556Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:602:2510] HANDLE EvNavigateKeySetResult, txid# 281474976715657 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-25T14:29:20.366707Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:602:2510] txid# 281474976715657 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715657 TabletId# 72057594046644480} 2025-06-25T14:29:20.366975Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:602:2510] txid# 281474976715657 HANDLE EvClientConnected 2025-06-25T14:29:20.368426Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:20.369513Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [1:602:2510] txid# 281474976715657 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715657} 2025-06-25T14:29:20.369582Z node 1 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [1:602:2510] txid# 281474976715657 SEND to# [1:554:2480] Source {TEvProposeTransactionStatus txid# 281474976715657 Status# 53} 2025-06-25T14:29:20.403236Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvBoot 2025-06-25T14:29:20.404492Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvRestored 2025-06-25T14:29:20.405032Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:627:2531] 2025-06-25T14:29:20.405313Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T14:29:20.452615Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-25T14:29:20.453319Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T14:29:20.453487Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T14:29:20.455147Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-25T14:29:20.455237Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-25T14:29:20.455303Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-25T14:29:20.455665Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T14:29:20.455812Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T14:29:20.455932Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:643:2531] in generation 1 2025-06-25T14:29:20.456511Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T14:29:20.537015Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-25T14:29:20.537218Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T14:29:20.537352Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:645:2541] 2025-06-25T14:29:20.537399Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T14:29:20.537438Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-25T14:29:20.537492Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:29:20.537738Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:627:2531], Recipient [1:627:2531]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T14:29:20.537793Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T14:29:20.538165Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-25T14:29:20.538296Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-25T14:29:20.538362Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:29:20.538406Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:29:20.538449Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-25T14:29:20.538483Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-25T14:29:20.538520Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-25T14:29:20.538552Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-25T14:29:20.538590Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:29:20.539053Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:634:2535], Recipient [1:627:2531]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:29:20.539099Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T14:29:20.539158Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:623:2528], serverId# [1:634:2535], sessionId# [0:0:0] 2025-06-25T14:29:20.539286Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:373:2367], Recipient [1:634:2535] 2025-06-25T14:29:20.539333Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-25T14:29:20.539461Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T14:29:20.539729Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-25T14:29:20.539808Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-25T14:29:20.539911Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-25T14:29:20.539962Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-25T14: ... X_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:5] at 72075186224037888 to execution unit FinishProposeWrite 2025-06-25T14:31:22.162128Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:5] at 72075186224037888 on unit FinishProposeWrite 2025-06-25T14:31:22.162311Z node 16 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:226: Prepare transaction failed. txid 5 at tablet 72075186224037888 errors: Status: STATUS_LOCKS_BROKEN Issues: { message: "Operation is aborting because it cannot acquire locks" issue_code: 2001 severity: 1 } 2025-06-25T14:31:22.162376Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:5] at 72075186224037888 is DelayComplete 2025-06-25T14:31:22.162422Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:5] at 72075186224037888 executing on unit FinishProposeWrite 2025-06-25T14:31:22.162496Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:5] at 72075186224037888 to execution unit CompletedOperations 2025-06-25T14:31:22.162586Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:5] at 72075186224037888 on unit CompletedOperations 2025-06-25T14:31:22.162654Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:5] at 72075186224037888 is Executed 2025-06-25T14:31:22.162680Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:5] at 72075186224037888 executing on unit CompletedOperations 2025-06-25T14:31:22.162729Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:5] at 72075186224037888 has finished 2025-06-25T14:31:22.162848Z node 16 :TX_DATASHARD TRACE: datashard__write.cpp:150: TTxWrite complete: at tablet# 72075186224037888 2025-06-25T14:31:22.162928Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:5] at 72075186224037888 on unit FinishProposeWrite 2025-06-25T14:31:22.163012Z node 16 :TX_DATASHARD TRACE: finish_propose_write_unit.cpp:163: Propose transaction complete txid 5 at tablet 72075186224037888 send to client, propose latency: 0 ms, status: STATUS_LOCKS_BROKEN 2025-06-25T14:31:22.163220Z node 16 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:168: Errors while proposing transaction txid 5 at tablet 72075186224037888 Status: STATUS_LOCKS_BROKEN Issues: { message: "Operation is aborting because it cannot acquire locks" issue_code: 2001 severity: 1 } 2025-06-25T14:31:22.163334Z node 16 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:31:22.163658Z node 16 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:802: SelfId: [16:867:2642], Table: `/Root/table` ([72057594046644480:2:1]), SessionActorId: [16:802:2642]Got LOCKS BROKEN for table `/Root/table`. ShardID=72075186224037888, Sink=[16:867:2642].{
: Error: Operation is aborting because it cannot acquire locks, code: 2001 } 2025-06-25T14:31:22.163892Z node 16 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:3004: SelfId: [16:860:2642], SessionActorId: [16:802:2642], statusCode=ABORTED. Issue=
: Error: Transaction locks invalidated. Table: `/Root/table`., code: 2001
: Error: Operation is aborting because it cannot acquire locks, code: 2001 . sessionActorId=[16:802:2642]. isRollback=0 2025-06-25T14:31:22.164483Z node 16 :KQP_SESSION WARN: kqp_session_actor.cpp:1895: SessionId: ydb://session/3?node_id=16&id=NTNiNTUxZWMtNTdmNDliNy0xMjkwMTA4YS02MWVlYTgwNA==, ActorId: [16:802:2642], ActorState: ExecuteState, TraceId: 01jykr08gv4fs0z1rxf969t3tr, got TEvKqpBuffer::TEvError in ExecuteState, status: ABORTED send to: [16:861:2642] from: [16:860:2642] 2025-06-25T14:31:22.164737Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 278003712, Sender [16:860:2642], Recipient [16:652:2542]: NKikimrDataEvents.TEvWrite TxMode: MODE_IMMEDIATE Locks { Locks { LockId: 281474976715661 DataShard: 72075186224037888 Generation: 1 Counter: 0 SchemeShard: 72057594046644480 PathId: 2 } Op: Rollback } 2025-06-25T14:31:22.164785Z node 16 :TX_DATASHARD TRACE: datashard__write.cpp:182: Handle TTxWrite: at tablet# 72075186224037888 2025-06-25T14:31:22.164983Z node 16 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [16:861:2642] TxId: 281474976715663. Ctx: { TraceId: 01jykr08gv4fs0z1rxf969t3tr, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=16&id=NTNiNTUxZWMtNTdmNDliNy0xMjkwMTA4YS02MWVlYTgwNA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Transaction locks invalidated. Table: `/Root/table`., code: 2001 subissue: {
: Error: Operation is aborting because it cannot acquire locks, code: 2001 } } 2025-06-25T14:31:22.165268Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435074, Sender [16:652:2542], Recipient [16:652:2542]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvDelayedProposeTransaction 2025-06-25T14:31:22.165308Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3159: StateWork, processing event TEvPrivate::TEvDelayedProposeTransaction 2025-06-25T14:31:22.165385Z node 16 :TX_DATASHARD TRACE: datashard__write.cpp:28: TTxWrite:: execute at tablet# 72075186224037888 2025-06-25T14:31:22.165544Z node 16 :TX_DATASHARD TRACE: datashard_write_operation.cpp:64: Parsing write transaction for 0 at 72075186224037888, record: TxMode: MODE_IMMEDIATE Locks { Locks { LockId: 281474976715661 DataShard: 72075186224037888 Generation: 1 Counter: 0 SchemeShard: 72057594046644480 PathId: 2 } Op: Rollback } 2025-06-25T14:31:22.165705Z node 16 :TX_DATASHARD TRACE: key_validator.cpp:54: -- AddWriteRange: (Uint64 : 281474976715661, Uint64 : 72075186224037888, Uint64 : 72057594046644480, Uint64 : 2) table: [1:997:0] 2025-06-25T14:31:22.165832Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:6] at 72075186224037888 on unit CheckWrite 2025-06-25T14:31:22.165889Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:6] at 72075186224037888 is Executed 2025-06-25T14:31:22.165952Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:6] at 72075186224037888 executing on unit CheckWrite 2025-06-25T14:31:22.165988Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:6] at 72075186224037888 to execution unit BuildAndWaitDependencies 2025-06-25T14:31:22.166036Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:6] at 72075186224037888 on unit BuildAndWaitDependencies 2025-06-25T14:31:22.166076Z node 16 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 72075186224037888 CompleteEdge# v400/0 IncompleteEdge# v{min} UnprotectedReadEdge# v400/18446744073709551615 ImmediateWriteEdge# v401/0 ImmediateWriteEdgeReplied# v401/0 2025-06-25T14:31:22.166124Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:6] at 72075186224037888 2025-06-25T14:31:22.166178Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:6] at 72075186224037888 is Executed 2025-06-25T14:31:22.166219Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:6] at 72075186224037888 executing on unit BuildAndWaitDependencies 2025-06-25T14:31:22.166247Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:6] at 72075186224037888 to execution unit ExecuteWrite 2025-06-25T14:31:22.166274Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:6] at 72075186224037888 on unit ExecuteWrite 2025-06-25T14:31:22.166307Z node 16 :TX_DATASHARD DEBUG: execute_write_unit.cpp:251: Executing write operation for [0:6] at 72075186224037888 2025-06-25T14:31:22.166439Z node 16 :TX_DATASHARD TRACE: datashard_kqp.cpp:787: KqpEraseLock LockId: 281474976715661 DataShard: 72075186224037888 Generation: 1 Counter: 0 SchemeShard: 72057594046644480 PathId: 2 2025-06-25T14:31:22.166522Z node 16 :TX_DATASHARD DEBUG: execute_write_unit.cpp:420: Skip empty write operation for [0:6] at 72075186224037888 2025-06-25T14:31:22.166650Z node 16 :TX_DATASHARD TRACE: execute_write_unit.cpp:47: add locks to result: 0 2025-06-25T14:31:22.166754Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:6] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-25T14:31:22.166813Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:6] at 72075186224037888 executing on unit ExecuteWrite 2025-06-25T14:31:22.166902Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:6] at 72075186224037888 to execution unit FinishProposeWrite 2025-06-25T14:31:22.166979Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:6] at 72075186224037888 on unit FinishProposeWrite 2025-06-25T14:31:22.167018Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:6] at 72075186224037888 is DelayComplete 2025-06-25T14:31:22.167047Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:6] at 72075186224037888 executing on unit FinishProposeWrite 2025-06-25T14:31:22.167077Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:6] at 72075186224037888 to execution unit CompletedOperations 2025-06-25T14:31:22.167107Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:6] at 72075186224037888 on unit CompletedOperations 2025-06-25T14:31:22.167148Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:6] at 72075186224037888 is Executed 2025-06-25T14:31:22.167188Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:6] at 72075186224037888 executing on unit CompletedOperations 2025-06-25T14:31:22.167219Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:6] at 72075186224037888 has finished 2025-06-25T14:31:22.167275Z node 16 :TX_DATASHARD TRACE: datashard__write.cpp:150: TTxWrite complete: at tablet# 72075186224037888 2025-06-25T14:31:22.167306Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:6] at 72075186224037888 on unit FinishProposeWrite 2025-06-25T14:31:22.167345Z node 16 :TX_DATASHARD TRACE: finish_propose_write_unit.cpp:163: Propose transaction complete txid 6 at tablet 72075186224037888 send to client, propose latency: 0 ms, status: STATUS_COMPLETED 2025-06-25T14:31:22.167427Z node 16 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:31:22.167680Z node 16 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=16&id=NTNiNTUxZWMtNTdmNDliNy0xMjkwMTA4YS02MWVlYTgwNA==, ActorId: [16:802:2642], ActorState: ExecuteState, TraceId: 01jykr08gv4fs0z1rxf969t3tr, Create QueryResponse for error on request, msg: 2025-06-25T14:31:22.169129Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 275709965, Sender [16:63:2110], Recipient [16:652:2542]: NKikimrLongTxService.TEvLockStatus LockId: 281474976715661 LockNode: 16 Status: STATUS_NOT_FOUND 2025-06-25T14:31:22.224431Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [16:874:2692], Recipient [16:652:2542]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:31:22.224575Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T14:31:22.224674Z node 16 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [16:873:2691], serverId# [16:874:2692], sessionId# [0:0:0] 2025-06-25T14:31:22.224926Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553224, Sender [16:554:2480], Recipient [16:652:2542]: NKikimr::TEvDataShard::TEvGetOpenTxs >> TopicAutoscaling::ReadingAfterSplitTest_AutoscaleAwareSDK [GOOD] >> TopicAutoscaling::ReadingAfterSplitTest_AutoscaleAwareSDK_AutoCommit >> YdbSdkSessionsPool::StressTestAsync/1 [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/persqueue_public/ut/with_offset_ranges_mode_ut/unittest >> CompressExecutor::TestExecutorMemUsage [GOOD] Test command err: 2025-06-25T14:29:20.296406Z :WriteAndReadSomeMessagesWithAsyncCompression INFO: Random seed for debugging is 1750861760296371 2025-06-25T14:29:20.841469Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519893999824360657:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:29:20.841535Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:29:20.996568Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519894001256701177:2249];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:29:20.996664Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017e5/r3tmp/tmpXPUV0q/pdisk_1.dat 2025-06-25T14:29:21.408367Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-25T14:29:21.484679Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-25T14:29:21.876846Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:29:21.900501Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:29:21.988796Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:29:21.977113Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:29:22.028546Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:29:22.030547Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:29:22.030679Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:29:22.033179Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:29:22.033281Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:29:22.051346Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:29:22.055025Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T14:29:22.058658Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 64093, node 1 2025-06-25T14:29:22.221313Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/yft8/0017e5/r3tmp/yandexxdIlJE.tmp 2025-06-25T14:29:22.221332Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/yft8/0017e5/r3tmp/yandexxdIlJE.tmp 2025-06-25T14:29:22.221430Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/yft8/0017e5/r3tmp/yandexxdIlJE.tmp 2025-06-25T14:29:22.221513Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:29:22.320382Z INFO: TTestServer started on Port 14304 GrpcPort 64093 TClient is connected to server localhost:14304 PQClient connected to localhost:64093 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:29:22.753216Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:29:22.791728Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... waiting... waiting... 2025-06-25T14:29:24.942470Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894017004230819:2303], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:24.942470Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894017004230807:2299], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:24.942566Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:24.946226Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715661:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:29:24.974495Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519894017004230822:2304], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715661 completed, doublechecking } 2025-06-25T14:29:25.282555Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519894021299198208:2676] txid# 281474976715662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:29:25.312030Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:25.319532Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7519894022731537758:2275], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:29:25.322434Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519894021299198221:2310], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:29:25.324163Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=YzdkYjZiZDAtODBiNzM2MWQtNWYwMGJkNWEtMzM1NDI2NA==, ActorId: [1:7519894017004230805:2298], ActorState: ExecuteState, TraceId: 01jykqwpa21tzswpe767k803d4, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:29:25.323135Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=2&id=ZDIyZTU5OTQtNmYwYTE3YTAtZmI4ZGUzMGEtZjA1NjkwN2Q=, ActorId: [2:7519894022731537727:2269], ActorState: ExecuteState, TraceId: 01jykqwpcqccq21zr9fnwz41zw, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:29:25.325527Z node 2 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-25T14:29:25.328944Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-25T14:29:25.460721Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:25.605829Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part p ... erver" ip=ipv6:[::1]:41064 proto=v1 topic=test-topic durationSec=0 2025-06-25T14:31:21.974655Z node 13 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:566: init check schema 2025-06-25T14:31:21.976422Z node 13 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:627: session v1 cookie: 3 sessionId: describe result for acl check 2025-06-25T14:31:21.976576Z node 13 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:62: TTableHelper SelectQuery: --!syntax_v1 DECLARE $Hash AS Uint32; DECLARE $Topic AS Utf8; DECLARE $SourceId AS Utf8; SELECT Partition, CreateTime, AccessTime, SeqNo FROM `/Root/PQ/SourceIdMeta2` WHERE Hash == $Hash AND Topic == $Topic AND SourceId == $SourceId; 2025-06-25T14:31:21.976599Z node 13 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:63: TTableHelper UpdateQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint32; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64;DECLARE $SeqNo AS Uint64; UPSERT INTO `/Root/PQ/SourceIdMeta2` (Hash, Topic, SourceId, CreateTime, AccessTime, Partition, SeqNo) VALUES ($Hash, $Topic, $SourceId, $CreateTime, $AccessTime, $Partition, $SeqNo); 2025-06-25T14:31:21.976615Z node 13 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:64: TTableHelper UpdateAccessTimeQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint32; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64; UPDATE `/Root/PQ/SourceIdMeta2` SET AccessTime = $AccessTime WHERE Hash = $Hash AND Topic = $Topic AND SourceId = $SourceId AND Partition = $Partition; 2025-06-25T14:31:21.976637Z node 13 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:111: TPartitionChooser [13:7519894520543955220:2526] (SourceId=test-message-group-id, PreferedPartition=(NULL)) StartKqpSession 2025-06-25T14:31:21.980403Z node 13 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:142: TPartitionChooser [13:7519894520543955220:2526] (SourceId=test-message-group-id, PreferedPartition=(NULL)) Select from the table 2025-06-25T14:31:22.148741Z node 13 :KQP_EXECUTER WARN: kqp_shards_resolver.cpp:86: [ShardsResolver] TxId: 281474976715698. Failed to resolve tablet: 72075186224037891 after several retries. 2025-06-25T14:31:22.148841Z node 13 :KQP_EXECUTER WARN: kqp_executer_impl.h:265: ActorId: [13:7519894520543955228:2528] TxId: 281474976715698. Ctx: { TraceId: 01jykr08kwd0hcad0h28y66rts, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=ODU3NTY1MDAtNGZlNjQwMjAtMjFmNjA2Y2ItNjQ0NmFjNw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Shards nodes resolve failed, status: UNAVAILABLE, issues:
: Error: Failed to resolve tablet: 72075186224037891 after several retries. 2025-06-25T14:31:22.148999Z node 13 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=13&id=ODU3NTY1MDAtNGZlNjQwMjAtMjFmNjA2Y2ItNjQ0NmFjNw==, ActorId: [13:7519894520543955221:2528], ActorState: ExecuteState, TraceId: 01jykr08kwd0hcad0h28y66rts, Create QueryResponse for error on request, msg: 2025-06-25T14:31:22.150056Z node 13 :PQ_PARTITION_CHOOSER INFO: partition_chooser_impl__abstract_chooser_actor.h:312: TPartitionChooser [13:7519894520543955220:2526] (SourceId=test-message-group-id, PreferedPartition=(NULL)) ReplyError: kqp error Marker# PQ50 : Response { SessionId: "ydb://session/3?node_id=13&id=ODU3NTY1MDAtNGZlNjQwMjAtMjFmNjA2Y2ItNjQ0NmFjNw==" QueryIssues { message: "Failed to resolve tablet: 72075186224037891 after several retries." severity: 1 } TxMeta { id: "01jykr08kxakfwkp8v2y77b9j6" } } YdbStatus: UNAVAILABLE ConsumedRu: 1 2025-06-25T14:31:22.150159Z node 13 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:809: session v1 error cookie: 3 reason: kqp error Marker# PQ50 : Response { SessionId: "ydb://session/3?node_id=13&id=ODU3NTY1MDAtNGZlNjQwMjAtMjFmNjA2Y2ItNjQ0NmFjNw==" QueryIssues { message: "Failed to resolve tablet: 72075186224037891 after several retries." severity: 1 } TxMeta { id: "01jykr08kxakfwkp8v2y77b9j6" } } YdbStatus: UNAVAILABLE ConsumedRu: 1 sessionId: 2025-06-25T14:31:22.150424Z node 13 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 3 sessionId: is DEAD Test retry state: get retry delay 2025-06-25T14:31:22.152928Z :INFO: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|2dac8438-2f0b453e-1e08ca7c-11574c58_0] Got error. Status: UNAVAILABLE, Description:
: Error: kqp error Marker# PQ50 : Response { SessionId: "ydb://session/3?node_id=13&id=ODU3NTY1MDAtNGZlNjQwMjAtMjFmNjA2Y2ItNjQ0NmFjNw==" QueryIssues { message: "Failed to resolve tablet: 72075186224037891 after several retries." severity: 1 } TxMeta { id: "01jykr08kxakfwkp8v2y77b9j6" } } YdbStatus: UNAVAILABLE ConsumedRu: 1 , code: 500001 2025-06-25T14:31:22.152968Z :INFO: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|2dac8438-2f0b453e-1e08ca7c-11574c58_0] Write session will restart in 2.000000s 2025-06-25T14:31:22.153106Z :INFO: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|2dac8438-2f0b453e-1e08ca7c-11574c58_0] Write session: Do CDS request 2025-06-25T14:31:22.153158Z :INFO: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|2dac8438-2f0b453e-1e08ca7c-11574c58_0] Do schedule cds request after 2000 ms 2025-06-25T14:31:22.604881Z node 13 :KQP_EXECUTER WARN: kqp_shards_resolver.cpp:86: [ShardsResolver] TxId: 281474976715700. Failed to resolve tablet: 72075186224037890 after several retries. 2025-06-25T14:31:22.605003Z node 13 :KQP_EXECUTER WARN: kqp_executer_impl.h:265: ActorId: [13:7519894524838922587:2536] TxId: 281474976715700. Ctx: { TraceId: 01jykr091zdhbcnprkb36v0vvd, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=YjhmMzFlYmMtZDdkYmFmMjYtYjJjZDNhZGEtNjYxZGEwMWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Shards nodes resolve failed, status: UNAVAILABLE, issues:
: Error: Failed to resolve tablet: 72075186224037890 after several retries. 2025-06-25T14:31:22.605218Z node 13 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=13&id=YjhmMzFlYmMtZDdkYmFmMjYtYjJjZDNhZGEtNjYxZGEwMWI=, ActorId: [13:7519894524838922584:2536], ActorState: ExecuteState, TraceId: 01jykr091zdhbcnprkb36v0vvd, Create QueryResponse for error on request, msg: 2025-06-25T14:31:22.606202Z node 13 :PQ_METACACHE ERROR: msgbus_server_pq_metacache.cpp:260: Got error trying to perform request: { Response { QueryIssues { message: "Failed to resolve tablet: 72075186224037890 after several retries." severity: 1 } TxMeta { id: "01jykr091zdhbcnprkb6bt2zz5" } } YdbStatus: UNAVAILABLE ConsumedRu: 1 } 2025-06-25T14:31:22.650984Z node 14 :KQP_EXECUTER WARN: kqp_shards_resolver.cpp:86: [ShardsResolver] TxId: 281474976720680. Failed to resolve tablet: 72075186224037890 after several retries. 2025-06-25T14:31:22.651068Z node 14 :KQP_EXECUTER WARN: kqp_executer_impl.h:265: ActorId: [14:7519894525634657354:2434] TxId: 281474976720680. Ctx: { TraceId: 01jykr093fe1pbnfthxdwmpcja, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=14&id=ZDc0MzhhNTYtZjBmYWEyOS1lMDk5MGYxYy1kNGVmODY5ZA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Shards nodes resolve failed, status: UNAVAILABLE, issues:
: Error: Failed to resolve tablet: 72075186224037890 after several retries. 2025-06-25T14:31:22.651205Z node 14 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=14&id=ZDc0MzhhNTYtZjBmYWEyOS1lMDk5MGYxYy1kNGVmODY5ZA==, ActorId: [14:7519894525634657351:2434], ActorState: ExecuteState, TraceId: 01jykr093fe1pbnfthxdwmpcja, Create QueryResponse for error on request, msg: 2025-06-25T14:31:22.651875Z node 14 :PQ_METACACHE ERROR: msgbus_server_pq_metacache.cpp:260: Got error trying to perform request: { Response { QueryIssues { message: "Failed to resolve tablet: 72075186224037890 after several retries." severity: 1 } TxMeta { id: "01jykr093fe1pbnfthxfxr8k2e" } } YdbStatus: UNAVAILABLE ConsumedRu: 1 } 2025-06-25T14:31:22.708752Z node 13 :KQP_EXECUTER WARN: kqp_shards_resolver.cpp:86: [ShardsResolver] TxId: 281474976715701. Failed to resolve tablet: 72075186224037890 after several retries. 2025-06-25T14:31:22.708890Z node 13 :KQP_EXECUTER WARN: kqp_executer_impl.h:265: ActorId: [13:7519894524838922616:2530] TxId: 281474976715701. Ctx: { TraceId: 01jykr08q684enmx4hs3thewbh, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=ZDVlZThmYzEtNTQwODAzYmItZjY2NzVkMTAtNzdkYjcyMTU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Shards nodes resolve failed, status: UNAVAILABLE, issues:
: Error: Failed to resolve tablet: 72075186224037890 after several retries. 2025-06-25T14:31:22.709119Z node 13 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=13&id=ZDVlZThmYzEtNTQwODAzYmItZjY2NzVkMTAtNzdkYjcyMTU=, ActorId: [13:7519894524838922554:2530], ActorState: ExecuteState, TraceId: 01jykr08q684enmx4hs3thewbh, Create QueryResponse for error on request, msg: 2025-06-25T14:31:22.710160Z node 13 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Failed to resolve tablet: 72075186224037890 after several retries." severity: 1 } TxMeta { id: "01jykr095k4bc08cyt23stnjgv" } } YdbStatus: UNAVAILABLE ConsumedRu: 303 } 2025-06-25T14:31:22.718166Z node 14 :KQP_EXECUTER WARN: kqp_shards_resolver.cpp:86: [ShardsResolver] TxId: 281474976720681. Failed to resolve tablet: 72075186224037890 after several retries. 2025-06-25T14:31:22.718324Z node 14 :KQP_EXECUTER WARN: kqp_executer_impl.h:265: ActorId: [14:7519894525634657377:2427] TxId: 281474976720681. Ctx: { TraceId: 01jykr08qy6smdmykdqvhbmw42, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=14&id=ZDBiZDU2OGMtOTM4NmMwY2QtNGRhYWE0YTYtZWIyNjQzMjc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Shards nodes resolve failed, status: UNAVAILABLE, issues:
: Error: Failed to resolve tablet: 72075186224037890 after several retries. 2025-06-25T14:31:22.718615Z node 14 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=14&id=ZDBiZDU2OGMtOTM4NmMwY2QtNGRhYWE0YTYtZWIyNjQzMjc=, ActorId: [14:7519894525634657336:2427], ActorState: ExecuteState, TraceId: 01jykr08qy6smdmykdqvhbmw42, Create QueryResponse for error on request, msg: 2025-06-25T14:31:22.719672Z node 14 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Failed to resolve tablet: 72075186224037890 after several retries." severity: 1 } TxMeta { id: "01jykr0963asgcx94952dny7hp" } } YdbStatus: UNAVAILABLE ConsumedRu: 297 } 2025-06-25T14:31:22.966974Z :INFO: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|2dac8438-2f0b453e-1e08ca7c-11574c58_0] Write session: close. Timeout = 0 ms 2025-06-25T14:31:22.967053Z :INFO: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|2dac8438-2f0b453e-1e08ca7c-11574c58_0] Write session will now close 2025-06-25T14:31:22.967156Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|2dac8438-2f0b453e-1e08ca7c-11574c58_0] Write session: aborting 2025-06-25T14:31:22.968024Z :WARNING: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|2dac8438-2f0b453e-1e08ca7c-11574c58_0] Write session: could not confirm all writes in time or session aborted, perform hard shutdown 2025-06-25T14:31:22.968098Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|2dac8438-2f0b453e-1e08ca7c-11574c58_0] Write session: destroy >> TPQTest::TestWritePQCompact >> PQCountersLabeled::PartitionFirstClass [GOOD] >> PQCountersLabeled::ImportantFlagSwitching |77.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/provider/ut/ydb-core-kqp-provider-ut |77.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/provider/ut/ydb-core-kqp-provider-ut |77.7%| [LD] {RESULT} $(B)/ydb/core/kqp/provider/ut/ydb-core-kqp-provider-ut >> TopicAutoscaling::Simple_PQv1 [GOOD] >> CommitOffset::Commit_WithWrongSession_ToParent [GOOD] >> CommitOffset::Commit_WithoutSession_ParentNotFinished >> TPQTest::TestWaitInOwners [GOOD] >> TPQTest::TestWritePQ >> TopicAutoscaling::ReadingAfterSplitTest_PreferedPartition_BeforeAutoscaleAwareSDK >> TPQTabletTests::Parallel_Transactions_1 >> TPartitionTests::DataTxCalcPredicateError [GOOD] >> TListAllTopicsTests::PlainList >> Cdc::InitialScanEnqueuesZeroRecords [GOOD] >> Cdc::InitialScanRacyProgressAndDrop >> TPartitionTests::DataTxCalcPredicateOrder >> TPQTabletTests::Parallel_Transactions_1 [GOOD] >> TRegisterCheckTest::ShouldRegisterCheckSameGeneration >> TPQTest::TestCheckACL [GOOD] >> TPQTest::TestAlreadyWritten >> TPQTabletTests::Parallel_Transactions_2 >> DataShardSnapshots::ShardRestartAfterDropTableAndAbort [GOOD] >> TPQTabletTests::Parallel_Transactions_2 [GOOD] >> TPartitionTests::DifferentWriteTxBatchingOptions [GOOD] >> TPQTest::TestReadRuleVersions >> TPQTabletTests::PQTablet_Send_RS_With_Abort >> TPartitionTests::FailedTxsDontBlock >> TKesusTest::TestSessionTimeoutAfterDetach [GOOD] >> TKesusTest::TestSessionTimeoutAfterReboot >> TPQTabletTests::PQTablet_Send_RS_With_Abort [GOOD] >> TPQTabletTests::Partition_Send_Predicate_With_False >> TopicAutoscaling::PartitionSplit_PreferedPartition_BeforeAutoscaleAwareSDK [GOOD] >> TopicAutoscaling::PartitionSplit_PreferedPartition_AutoscaleAwareSDK >> TopicAutoscaling::PartitionMerge_PreferedPartition_AutoscaleAwareSDK [GOOD] >> TopicAutoscaling::ControlPlane_CreateAlterDescribe >> TKesusTest::TestAcquireTimeout [GOOD] >> TKesusTest::TestAcquireSharedBlocked >> DataShardVolatile::DistributedWriteShardRestartAfterExpectation-UseSink [GOOD] >> DataShardVolatile::DistributedWriteEarlierSnapshotNotBlocked >> TPQTabletTests::Partition_Send_Predicate_With_False [GOOD] >> TPQTabletTests::ProposeTx_Missing_Operations >> TRegisterCheckTest::ShouldRegisterCheckSameGeneration [GOOD] >> DataShardVolatile::DistributedWriteWithAsyncIndex [GOOD] >> DataShardVolatile::DistributedWriteThenLateWriteReadCommit >> TKesusTest::TestAcquireSharedBlocked [GOOD] >> TKesusTest::TestAcquireTimeoutAfterReboot >> TPQTabletTests::ProposeTx_Missing_Operations [GOOD] >> TPQTabletTests::ProposeTx_Command_After_Propose >> Cdc::MustNotLoseSchemaSnapshotWithVolatileTx [GOOD] >> Cdc::ShouldBreakLocksOnConcurrentAddIndex >> CommitOffset::DistributedTxCommit [GOOD] >> CommitOffset::DistributedTxCommit_ChildFirst >> TPQTabletTests::ProposeTx_Command_After_Propose [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_snapshot/unittest >> DataShardSnapshots::ShardRestartAfterDropTableAndAbort [GOOD] Test command err: 2025-06-25T14:29:19.814800Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:29:19.814958Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:29:19.815027Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001be7/r3tmp/tmpxyTzWa/pdisk_1.dat 2025-06-25T14:29:20.147659Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T14:29:20.151080Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:29:20.192655Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:29:20.198449Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750861756835214 != 1750861756835218 2025-06-25T14:29:20.245928Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:61:2108] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-25T14:29:20.247209Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 2025-06-25T14:29:20.247849Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:29:20.247994Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:29:20.261403Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:29:20.355055Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:61:2108] Handle TEvProposeTransaction 2025-06-25T14:29:20.355144Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:61:2108] TxId# 281474976715657 ProcessProposeTransaction 2025-06-25T14:29:20.355319Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:61:2108] Cookie# 0 userReqId# "" txid# 281474976715657 SEND to# [1:602:2510] 2025-06-25T14:29:20.497802Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:602:2510] txid# 281474976715657 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateTable CreateTable { Name: "table-1" Columns { Name: "key" Type: "Uint32" FamilyName: "" NotNull: false } Columns { Name: "value" Type: "Uint32" FamilyName: "" NotNull: false } KeyColumnNames: "key" UniformPartitionsCount: 1 } } } ExecTimeoutPeriod: 18446744073709551615 2025-06-25T14:29:20.497931Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:602:2510] txid# 281474976715657 Bootstrap, UserSID: CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-25T14:29:20.498598Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1660: Actor# [1:602:2510] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-06-25T14:29:20.498705Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:602:2510] txid# 281474976715657 TEvNavigateKeySet requested from SchemeCache 2025-06-25T14:29:20.499083Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:602:2510] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-25T14:29:20.499312Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:602:2510] HANDLE EvNavigateKeySetResult, txid# 281474976715657 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-25T14:29:20.499485Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:602:2510] txid# 281474976715657 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715657 TabletId# 72057594046644480} 2025-06-25T14:29:20.499852Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:602:2510] txid# 281474976715657 HANDLE EvClientConnected 2025-06-25T14:29:20.501413Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:20.502602Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [1:602:2510] txid# 281474976715657 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715657} 2025-06-25T14:29:20.502674Z node 1 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [1:602:2510] txid# 281474976715657 SEND to# [1:554:2480] Source {TEvProposeTransactionStatus txid# 281474976715657 Status# 53} 2025-06-25T14:29:20.535220Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvBoot 2025-06-25T14:29:20.536596Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvRestored 2025-06-25T14:29:20.537162Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:627:2531] 2025-06-25T14:29:20.537412Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T14:29:20.580169Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-25T14:29:20.580979Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T14:29:20.581159Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T14:29:20.582944Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-25T14:29:20.583033Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-25T14:29:20.583087Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-25T14:29:20.583473Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T14:29:20.583626Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T14:29:20.583708Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:643:2531] in generation 1 2025-06-25T14:29:20.584189Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T14:29:20.609332Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-25T14:29:20.609570Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T14:29:20.609704Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:645:2541] 2025-06-25T14:29:20.609743Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T14:29:20.609779Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-25T14:29:20.609831Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:29:20.610086Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:627:2531], Recipient [1:627:2531]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T14:29:20.610137Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T14:29:20.610533Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-25T14:29:20.610650Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-25T14:29:20.610717Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:29:20.610761Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:29:20.610803Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-25T14:29:20.610840Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-25T14:29:20.610892Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-25T14:29:20.610938Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-25T14:29:20.610982Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:29:20.611432Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:634:2535], Recipient [1:627:2531]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:29:20.611490Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T14:29:20.611538Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:623:2528], serverId# [1:634:2535], sessionId# [0:0:0] 2025-06-25T14:29:20.611638Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:373:2367], Recipient [1:634:2535] 2025-06-25T14:29:20.611675Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-25T14:29:20.611779Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T14:29:20.612056Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-25T14:29:20.612146Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-25T14:29:20.612261Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-25T14:29:20.612338Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-25T14: ... ionActorId=[14:805:2646]. isRollback=0 2025-06-25T14:31:26.705756Z node 14 :KQP_SESSION WARN: kqp_session_actor.cpp:1895: SessionId: ydb://session/3?node_id=14&id=ZGQ1Nzg1Mi02ZWU0YmIxYS00YjU4YTJlNi1jZDM0OGQzMQ==, ActorId: [14:805:2646], ActorState: ExecuteState, TraceId: 01jykr0d485qy2dw14fhxpv0pm, got TEvKqpBuffer::TEvError in ExecuteState, status: UNAVAILABLE send to: [14:959:2646] from: [14:826:2646] 2025-06-25T14:31:26.705958Z node 14 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 278003712, Sender [14:826:2646], Recipient [14:653:2543]: NKikimrDataEvents.TEvWrite TxMode: MODE_IMMEDIATE Locks { Locks { LockId: 281474976715661 DataShard: 72075186224037888 Generation: 1 Counter: 0 SchemeShard: 72057594046644480 PathId: 2 HasWrites: true } Op: Rollback } 2025-06-25T14:31:26.705996Z node 14 :TX_DATASHARD TRACE: datashard__write.cpp:182: Handle TTxWrite: at tablet# 72075186224037888 2025-06-25T14:31:26.706061Z node 14 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_WRONG_SHARD_STATE;details=Rejecting data TxId 0 because datashard 72075186224037888: is in a pre/offline state assuming this is due to a finished split (wrong shard state);tx_id=0; 2025-06-25T14:31:26.706099Z node 14 :TX_DATASHARD NOTICE: datashard.cpp:3137: Rejecting data TxId 0 because datashard 72075186224037888: is in a pre/offline state assuming this is due to a finished split (wrong shard state) 2025-06-25T14:31:26.706250Z node 14 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [14:959:2646] TxId: 281474976715665. Ctx: { TraceId: 01jykr0d485qy2dw14fhxpv0pm, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=14&id=ZGQ1Nzg1Mi02ZWU0YmIxYS00YjU4YTJlNi1jZDM0OGQzMQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. UNAVAILABLE: {
: Error: Wrong shard state. Table `/Root/table`., code: 2005 subissue: {
: Error: Rejecting data TxId 0 because datashard 72075186224037888: is in a pre/offline state assuming this is due to a finished split (wrong shard state), code: 2029 } } 2025-06-25T14:31:26.706579Z node 14 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=14&id=ZGQ1Nzg1Mi02ZWU0YmIxYS00YjU4YTJlNi1jZDM0OGQzMQ==, ActorId: [14:805:2646], ActorState: ExecuteState, TraceId: 01jykr0d485qy2dw14fhxpv0pm, Create QueryResponse for error on request, msg: ... blocking NKikimr::NLongTxService::TEvLongTxService::TEvLockStatus from LONG_TX_SERVICE to TX_DATASHARD_ACTOR cookie 0 2025-06-25T14:31:26.708485Z node 14 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 65543, Sender [14:555:2481], Recipient [14:653:2543]: NActors::TEvents::TEvPoison 2025-06-25T14:31:26.708985Z node 14 :TX_DATASHARD INFO: datashard.cpp:190: OnDetach: 72075186224037888 2025-06-25T14:31:26.709069Z node 14 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186224037888 2025-06-25T14:31:26.729026Z node 14 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [14:963:2775], Recipient [14:965:2776]: NKikimr::TEvTablet::TEvBoot 2025-06-25T14:31:26.736088Z node 14 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [14:963:2775], Recipient [14:965:2776]: NKikimr::TEvTablet::TEvRestored 2025-06-25T14:31:26.736255Z node 14 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828684, Sender [14:963:2775], Recipient [14:965:2776]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-25T14:31:26.740281Z node 14 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [14:965:2776] 2025-06-25T14:31:26.740692Z node 14 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T14:31:26.747414Z node 14 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T14:31:26.749094Z node 14 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T14:31:26.752217Z node 14 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-25T14:31:26.752350Z node 14 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-25T14:31:26.752448Z node 14 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-25T14:31:26.753066Z node 14 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T14:31:26.753406Z node 14 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T14:31:26.753492Z node 14 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T14:31:26.753589Z node 14 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state PreOffline tabletId 72075186224037888 2025-06-25T14:31:26.753755Z node 14 :TX_DATASHARD DEBUG: datashard_loans.cpp:220: 72075186224037888 in PreOffline state HasSharedBobs: 1 SchemaOperations: [ ] OutReadSets count: 1 ChangesQueue size: 0 ChangeExchangeSplit: 1 siblings to be activated: wait to activation from: 2025-06-25T14:31:26.753836Z node 14 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast PreOffline tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-25T14:31:26.754004Z node 14 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [14:979:2783] 2025-06-25T14:31:26.754065Z node 14 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T14:31:26.754146Z node 14 :TX_DATASHARD INFO: datashard.cpp:1283: Cannot activate change sender: at tablet: 72075186224037888, state: PreOffline, queue size: 0 2025-06-25T14:31:26.754215Z node 14 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:31:26.754550Z node 14 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 275709965, Sender [14:63:2110], Recipient [14:965:2776]: NKikimrLongTxService.TEvLockStatus LockId: 281474976715661 LockNode: 14 Status: STATUS_NOT_FOUND 2025-06-25T14:31:26.754899Z node 14 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [14:965:2776], Recipient [14:965:2776]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T14:31:26.754945Z node 14 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T14:31:26.755281Z node 14 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435075, Sender [14:965:2776], Recipient [14:965:2776]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressResendReadSet 2025-06-25T14:31:26.755328Z node 14 :TX_DATASHARD TRACE: datashard_impl.h:3160: StateWork, processing event TEvPrivate::TEvProgressResendReadSet 2025-06-25T14:31:26.756656Z node 14 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 270270976, Sender [14:25:2072], Recipient [14:965:2776]: {TEvRegisterTabletResult TabletId# 72075186224037888 Entry# 600} 2025-06-25T14:31:26.756724Z node 14 :TX_DATASHARD TRACE: datashard_impl.h:3170: StateWork, processing event TEvMediatorTimecast::TEvRegisterTabletResult 2025-06-25T14:31:26.756790Z node 14 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 600 2025-06-25T14:31:26.756859Z node 14 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:31:26.757240Z node 14 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:31:26.757319Z node 14 :TX_DATASHARD INFO: datashard__progress_tx.cpp:21: Progress tx at non-ready tablet 72075186224037888 state 5 2025-06-25T14:31:26.757567Z node 14 :TX_DATASHARD DEBUG: datashard__progress_resend_rs.cpp:14: Start TTxProgressResendRS at tablet 72075186224037888 2025-06-25T14:31:26.757653Z node 14 :TX_DATASHARD INFO: datashard.cpp:4101: Resend RS at 72075186224037888 from 72075186224037888 to 72075186224037889 txId 281474976715663 2025-06-25T14:31:26.757729Z node 14 :TX_DATASHARD DEBUG: datashard.cpp:3990: Send RS 1 at 72075186224037888 from 72075186224037888 to 72075186224037889 txId 281474976715663 2025-06-25T14:31:26.758258Z node 14 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287425, Sender [14:965:2776], Recipient [14:867:2690]: {TEvReadSet step# 500 txid# 281474976715663 TabletSource# 72075186224037888 TabletDest# 72075186224037889 SetTabletProducer# 72075186224037888 ReadSet.Size()# 138 Seqno# 1 Flags# 0} 2025-06-25T14:31:26.758310Z node 14 :TX_DATASHARD TRACE: datashard_impl.h:3151: StateWork, processing event TEvTxProcessing::TEvReadSet 2025-06-25T14:31:26.758382Z node 14 :TX_DATASHARD DEBUG: datashard.cpp:3359: Receive RS at 72075186224037889 source 72075186224037888 dest 72075186224037889 producer 72075186224037888 txId 281474976715663 2025-06-25T14:31:26.758502Z node 14 :TX_DATASHARD DEBUG: datashard__readset.cpp:15: TTxReadSet::Execute at 72075186224037889 got read set: {TEvReadSet step# 500 txid# 281474976715663 TabletSource# 72075186224037888 TabletDest# 72075186224037889 SetTabletProducer# 72075186224037888 ReadSet.Size()# 138 Seqno# 1 Flags# 0} 2025-06-25T14:31:26.758588Z node 14 :TX_DATASHARD NOTICE: datashard_pipeline.cpp:734: Outdated readset for 500:281474976715663 at 72075186224037889 2025-06-25T14:31:26.758669Z node 14 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 72075186224037889 2025-06-25T14:31:26.758785Z node 14 :TX_DATASHARD DEBUG: datashard__readset.cpp:99: Send RS Ack at 72075186224037889 {TEvReadSet step# 500 txid# 281474976715663 TabletSource# 72075186224037888 TabletDest# 72075186224037889 SetTabletProducer# 72075186224037888 ReadSet.Size()# 138 Seqno# 1 Flags# 0} 2025-06-25T14:31:26.758940Z node 14 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:31:26.759092Z node 14 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [14:867:2690], Recipient [14:965:2776]: {TEvReadSet step# 500 txid# 281474976715663 TabletSource# 72075186224037888 TabletDest# 72075186224037889 SetTabletConsumer# 72075186224037889 Flags# 0 Seqno# 1} 2025-06-25T14:31:26.759163Z node 14 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T14:31:26.759248Z node 14 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 72075186224037888 source 72075186224037888 dest 72075186224037889 consumer 72075186224037889 txId 281474976715663 2025-06-25T14:31:26.759344Z node 14 :TX_DATASHARD DEBUG: datashard_loans.cpp:220: 72075186224037888 in PreOffline state HasSharedBobs: 1 SchemaOperations: [ ] OutReadSets count: 0 ChangesQueue size: 0 ChangeExchangeSplit: 1 siblings to be activated: wait to activation from: 2025-06-25T14:31:26.759485Z node 14 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 270270978, Sender [14:25:2072], Recipient [14:965:2776]: NKikimr::TEvMediatorTimecast::TEvSubscribeReadStepResult{ CoordinatorId# 72057594046316545 LastReadStep# 400 NextReadStep# 600 ReadStep# 600 } 2025-06-25T14:31:26.759531Z node 14 :TX_DATASHARD TRACE: datashard_impl.h:3171: StateWork, processing event TEvMediatorTimecast::TEvSubscribeReadStepResult 2025-06-25T14:31:26.759601Z node 14 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 400 next step 600 2025-06-25T14:31:26.940296Z node 14 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; >> TPQTest::TestReadRuleVersions [GOOD] >> TPQTest::TestReserveBytes >> TPartitionChooserSuite::TPartitionChooserActor_SplitMergeEnabled_SourceId_PartitionActive_BoundaryTrue_Test [GOOD] >> TPartitionChooserSuite::TPartitionChooserActor_SplitMergeEnabled_SourceId_PartitionActive_BoundaryFalse_Test |77.7%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/integration/sessions_pool/gtest >> YdbSdkSessionsPool::StressTestAsync/1 [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/unittest >> TPQTabletTests::ProposeTx_Command_After_Propose [GOOD] Test command err: 2025-06-25T14:31:27.243062Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3114: [PQ: 72057594037927937] Handle TEvInterconnect::TEvNodeInfo 2025-06-25T14:31:27.248371Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3146: [PQ: 72057594037927937] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-25T14:31:27.248589Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:752: [PQ: 72057594037927937] doesn't have tx info 2025-06-25T14:31:27.248626Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:764: [PQ: 72057594037927937] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-25T14:31:27.248655Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:985: [PQ: 72057594037927937] no config, start with empty partitions and default config 2025-06-25T14:31:27.248683Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4949: [PQ: 72057594037927937] Txs.size=0, PlannedTxs.size=0 2025-06-25T14:31:27.248723Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:31:27.248762Z node 1 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-25T14:31:27.270710Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037927937] server connected, pipe [1:208:2212], now have 1 active actors on pipe 2025-06-25T14:31:27.270833Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:1470: [PQ: 72057594037927937] Handle TEvPersQueue::TEvUpdateConfig 2025-06-25T14:31:27.286497Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:1656: [PQ: 72057594037927937] Config update version 1(current 0) received from actor [1:179:2192] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 ImportantClientId: "consumer" LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "topic" Version: 1 LocalDC: true Topic: "topic" TopicPath: "/topic" YcCloudId: "somecloud" YcFolderId: "somefolder" YdbDatabaseId: "PQ" YdbDatabasePath: "/Root/PQ" Partitions { PartitionId: 0 } ReadRuleGenerations: 1 ReadRuleGenerations: 1 FederationAccount: "federationAccount" MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 1 Important: false } Consumers { Name: "consumer" Generation: 1 Important: true } 2025-06-25T14:31:27.288616Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:591: [PQ: 72057594037927937] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 ImportantClientId: "consumer" LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "topic" Version: 1 LocalDC: true Topic: "topic" TopicPath: "/topic" YcCloudId: "somecloud" YcFolderId: "somefolder" YdbDatabaseId: "PQ" YdbDatabasePath: "/Root/PQ" Partitions { PartitionId: 0 } ReadRuleGenerations: 1 ReadRuleGenerations: 1 FederationAccount: "federationAccount" MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 1 Important: false } Consumers { Name: "consumer" Generation: 1 Important: true } 2025-06-25T14:31:27.288755Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:31:27.289795Z node 1 :PERSQUEUE INFO: pq_impl.cpp:1497: [PQ: 72057594037927937] Config applied version 1 actor [1:179:2192] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 ImportantClientId: "consumer" LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "topic" Version: 1 LocalDC: true Topic: "topic" TopicPath: "/topic" YcCloudId: "somecloud" YcFolderId: "somefolder" YdbDatabaseId: "PQ" YdbDatabasePath: "/Root/PQ" Partitions { PartitionId: 0 } ReadRuleGenerations: 1 ReadRuleGenerations: 1 FederationAccount: "federationAccount" MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 1 Important: false } Consumers { Name: "consumer" Generation: 1 Important: true } 2025-06-25T14:31:27.289934Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:0:Initializer] Start initializing step TInitConfigStep 2025-06-25T14:31:27.290332Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:0:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-25T14:31:27.290747Z node 1 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [1:216:2218] 2025-06-25T14:31:27.291627Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:55: [topic:0:Initializer] Initializing completed. 2025-06-25T14:31:27.291695Z node 1 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'topic' partition 0 generation 2 [1:216:2218] 2025-06-25T14:31:27.291762Z node 1 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037927937, Partition: 0, State: StateInit] SYNC INIT topic topic partitition 0 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-25T14:31:27.292579Z node 1 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Process pending events. Count 0 2025-06-25T14:31:27.292693Z node 1 :PERSQUEUE DEBUG: partition.cpp:3232: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'topic' partition 0 user user reinit request with generation 1 2025-06-25T14:31:27.292773Z node 1 :PERSQUEUE DEBUG: partition.cpp:3302: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'topic' partition 0 user user reinit with generation 1 done 2025-06-25T14:31:27.292835Z node 1 :PERSQUEUE DEBUG: partition.cpp:3232: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'topic' partition 0 user consumer reinit request with generation 1 2025-06-25T14:31:27.292866Z node 1 :PERSQUEUE DEBUG: partition.cpp:3302: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'topic' partition 0 user consumer reinit with generation 1 done 2025-06-25T14:31:27.293052Z node 1 :PERSQUEUE DEBUG: partition_read.cpp:882: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'topic' partition 0 user user readTimeStamp for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 0 2025-06-25T14:31:27.293091Z node 1 :PERSQUEUE DEBUG: partition_read.cpp:882: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'topic' partition 0 user consumer readTimeStamp for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 0 2025-06-25T14:31:27.293214Z node 1 :PERSQUEUE DEBUG: read.h:272: CacheProxy. Passthrough write request to KV 2025-06-25T14:31:27.293450Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-25T14:31:27.295788Z node 1 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-06-25T14:31:27.295862Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-25T14:31:27.296254Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037927937] server connected, pipe [1:223:2223], now have 1 active actors on pipe 2025-06-25T14:31:27.296810Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037927937] server connected, pipe [1:226:2225], now have 1 active actors on pipe 2025-06-25T14:31:27.297589Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3255: [PQ: 72057594037927937] Handle TEvPersQueue::TEvProposeTransaction SourceActor { RawX1: 179 RawX2: 4294969488 } TxId: 67890 Data { Operations { PartitionId: 0 CommitOffsetsBegin: 0 CommitOffsetsEnd: 0 Consumer: "consumer" Path: "/topic" } SendingShards: 22222 ReceivingShards: 22222 Immediate: false } 2025-06-25T14:31:27.297656Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3431: [PQ: 72057594037927937] distributed transaction 2025-06-25T14:31:27.297731Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3745: [PQ: 72057594037927937] Propose TxId 67890, WriteId (empty maybe) 2025-06-25T14:31:27.297770Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4353: [PQ: 72057594037927937] Try execute txs with state UNKNOWN 2025-06-25T14:31:27.297813Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4398: [PQ: 72057594037927937] TxId 67890, State UNKNOWN 2025-06-25T14:31:27.297858Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3979: [PQ: 72057594037927937] schedule TEvProposeTransactionResult(PREPARED) 2025-06-25T14:31:27.297909Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4288: [PQ: 72057594037927937] TxId 67890, NewState PREPARING 2025-06-25T14:31:27.297964Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3866: [PQ: 72057594037927937] write key for TxId 67890 2025-06-25T14:31:27.298153Z node 1 :PERSQUEUE DEBUG: transaction.cpp:374: [TxId: 67890] save tx TxId: 67890 State: PREPARED MinStep: 133 MaxStep: 30133 PredicatesReceived { TabletId: 22222 } PredicateRecipients: 22222 Operations { PartitionId: 0 CommitOffsetsBegin: 0 CommitOffsetsEnd: 0 Consumer: "consumer" Path: "/topic" } Kind: KIND_DATA SourceActor { RawX1: 179 RawX2: 4294969488 } Partitions { } 2025-06-25T14:31:27.298244Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3683: [PQ: 72057594037927937] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-06-25T14:31:27.301735Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:1241: [PQ: 72057594037927937] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-25T14:31:27.301794Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4353: [PQ: 72057594037927937] Try execute txs with state PREPARING 2025-06-25T14:31:27.301835Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4398: [PQ: 72057594037927937] TxId 67890, State PREPARING 2025-06-25T14:31:27.301869Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4288: [PQ: 72057594037927937] TxId 67890, NewState PREPARED 2025-06-25T14:31:27.302203Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3255: [PQ: 72057594037927937] Handle TEvPersQueue::TEvProposeTransaction SourceActor { RawX1: 179 RawX2: 4294969488 } TxId: 67891 Data { Operations { PartitionId: 0 CommitOffsetsBegin: 0 CommitOffsetsEnd: 0 Consumer: "consumer" Path: "/topic" } SendingShards: 22222 ReceivingShards: 22222 Immediate: false } 2025-06-25T14:31:27.302249Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3431: [PQ: 72057594037927937] distributed transaction 2025-06-25T14:31:27.302311Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3745: [PQ: 72057594037927937] Propose TxId 67891, WriteId (empty maybe) 2025-06-25T14:31:27.302348Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4353: [PQ: 72057594037927937] Try execute txs with state UNKNOWN 2025-06-25T14:31:27.302394Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4398: [PQ: 72057594037927937] TxId 67891, State UNKNOWN 2025-06-25T14:31:27.302434Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3979: [PQ: 72057594037927937] schedule TEvProposeTransactionResult(PREPARED) 2025-06-25T14:31:27.302473Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4288: [PQ: 72057594037927937] TxId 67891, NewState PREPARING 2025-06-25T14:31:27.302517Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3866: [PQ: 72057594037927937] write key for TxId 67891 2025-06-25T14:31:27.302659Z node 1 :PERSQUEUE DEBUG: transaction.cpp:374: [TxId: 67891] save tx TxId: 67891 State: PREPARED MinStep: 135 MaxStep: 30135 PredicatesReceived { TabletId: 22222 } PredicateRecipients: 22222 Operations { PartitionId: 0 CommitOffsetsBegin: 0 CommitOffsetsEnd: 0 Consumer: "consumer" Path: "/topic" } Kind: KIND_DATA SourceActor { RawX1: 179 RawX2: 4294969488 } Partitions { } 2025-06-25T14:31:27.302739Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3683: [PQ: 72057594037927937] Send TEvKeyValue::TEvRequest (WRITE ... s: 0 Generation: 6 Important: false } 2025-06-25T14:31:29.975134Z node 6 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:31:29.975793Z node 6 :PERSQUEUE INFO: pq_impl.cpp:1497: [PQ: 72057594037927937] Config applied version 6 actor [6:179:2192] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "topic" Version: 6 LocalDC: true Topic: "topic" TopicPath: "/topic" YcCloudId: "somecloud" YcFolderId: "somefolder" YdbDatabaseId: "PQ" YdbDatabasePath: "/Root/PQ" Partitions { PartitionId: 0 } ReadRuleGenerations: 6 FederationAccount: "federationAccount" MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 6 Important: false } 2025-06-25T14:31:29.975885Z node 6 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:0:Initializer] Start initializing step TInitConfigStep 2025-06-25T14:31:29.976231Z node 6 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:0:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-25T14:31:29.976513Z node 6 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [6:188:2199] 2025-06-25T14:31:29.977510Z node 6 :PERSQUEUE DEBUG: partition_init.cpp:55: [topic:0:Initializer] Initializing completed. 2025-06-25T14:31:29.977566Z node 6 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'topic' partition 0 generation 2 [6:188:2199] 2025-06-25T14:31:29.977624Z node 6 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037927937, Partition: 0, State: StateInit] SYNC INIT topic topic partitition 0 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-25T14:31:29.977976Z node 6 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Process pending events. Count 0 2025-06-25T14:31:29.978067Z node 6 :PERSQUEUE DEBUG: partition.cpp:3232: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'topic' partition 0 user user reinit request with generation 6 2025-06-25T14:31:29.978122Z node 6 :PERSQUEUE DEBUG: partition.cpp:3302: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'topic' partition 0 user user reinit with generation 6 done 2025-06-25T14:31:29.978277Z node 6 :PERSQUEUE DEBUG: partition_read.cpp:882: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'topic' partition 0 user user readTimeStamp for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 0 2025-06-25T14:31:29.978411Z node 6 :PERSQUEUE DEBUG: read.h:272: CacheProxy. Passthrough write request to KV 2025-06-25T14:31:29.978586Z node 6 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-25T14:31:29.980775Z node 6 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-06-25T14:31:29.980858Z node 6 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-25T14:31:29.981188Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037927937] server connected, pipe [6:195:2204], now have 1 active actors on pipe 2025-06-25T14:31:29.981808Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037927937] server connected, pipe [6:198:2206], now have 1 active actors on pipe 2025-06-25T14:31:29.981908Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:348: Handle TEvRequest topic: 'topic' requestId: 2025-06-25T14:31:29.981954Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:2812: [PQ: 72057594037927937] got client message batch for topic 'topic' partition 0 2025-06-25T14:31:29.982004Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:2729: [PQ: 72057594037927937] partition {0, {0, 3}, 100000} for WriteId {0, 3} 2025-06-25T14:31:29.982190Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3630: [PQ: 72057594037927937] send TEvSubscribeLock for WriteId {0, 3} 2025-06-25T14:31:29.982280Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3683: [PQ: 72057594037927937] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-06-25T14:31:29.984148Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:1241: [PQ: 72057594037927937] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-25T14:31:29.984645Z node 6 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:{0, {0, 3}, 100000}:Initializer] Start initializing step TInitConfigStep 2025-06-25T14:31:29.984939Z node 6 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:{0, {0, 3}, 100000}:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-25T14:31:29.985174Z node 6 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: {0, {0, 3}, 100000}, State: StateInit] bootstrapping {0, {0, 3}, 100000} [6:204:2211] 2025-06-25T14:31:29.986044Z node 6 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:{0, {0, 3}, 100000}:Initializer] Start initializing step TInitDiskStatusStep 2025-06-25T14:31:29.987100Z node 6 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:{0, {0, 3}, 100000}:Initializer] Start initializing step TInitMetaStep 2025-06-25T14:31:29.987338Z node 6 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:{0, {0, 3}, 100000}:Initializer] Start initializing step TInitInfoRangeStep 2025-06-25T14:31:29.987625Z node 6 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:{0, {0, 3}, 100000}:Initializer] Start initializing step TInitDataRangeStep 2025-06-25T14:31:29.987817Z node 6 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:{0, {0, 3}, 100000}:Initializer] Start initializing step TInitDataStep 2025-06-25T14:31:29.987857Z node 6 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:{0, {0, 3}, 100000}:Initializer] Start initializing step TInitEndWriteTimestampStep 2025-06-25T14:31:29.987902Z node 6 :PERSQUEUE INFO: partition_init.cpp:895: [topic:{0, {0, 3}, 100000}:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-25T14:31:29.987951Z node 6 :PERSQUEUE DEBUG: partition_init.cpp:55: [topic:{0, {0, 3}, 100000}:Initializer] Initializing completed. 2025-06-25T14:31:29.988003Z node 6 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: {0, {0, 3}, 100000}, State: StateInit] init complete for topic 'topic' partition {0, {0, 3}, 100000} generation 2 [6:204:2211] 2025-06-25T14:31:29.988065Z node 6 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037927937, Partition: {0, {0, 3}, 100000}, State: StateInit] SYNC INIT topic topic partitition {0, {0, 3}, 100000} so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-25T14:31:29.988112Z node 6 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037927937, Partition: {0, {0, 3}, 100000}, State: StateIdle] Process pending events. Count 0 2025-06-25T14:31:29.988383Z node 6 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: {0, {0, 3}, 100000}, State: StateIdle] no data for compaction 2025-06-25T14:31:29.988588Z node 6 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie -=[ 0wn3r ]=-|396c62b3-2b86935f-f0a88762-3ea58f83_0 generated for partition {0, {0, 3}, 100000} topic 'topic' owner -=[ 0wn3r ]=- 2025-06-25T14:31:29.988717Z node 6 :PERSQUEUE DEBUG: partition_write.cpp:34: [PQ: 72057594037927937, Partition: {0, {0, 3}, 100000}, State: StateIdle] TPartition::ReplyOwnerOk. Partition: {0, {0, 3}, 100000} 2025-06-25T14:31:29.988802Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'topic' partition: 0 messageNo: 0 requestId: cookie: 4 2025-06-25T14:31:29.989160Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72057594037927937] server disconnected, pipe [6:198:2206] destroyed 2025-06-25T14:31:29.989219Z node 6 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72057594037927937, Partition: {0, {0, 3}, 100000}, State: StateIdle] TPartition::DropOwner. 2025-06-25T14:31:29.989457Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037927937] server connected, pipe [6:216:2218], now have 1 active actors on pipe 2025-06-25T14:31:29.989613Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3255: [PQ: 72057594037927937] Handle TEvPersQueue::TEvProposeTransaction SourceActor { RawX1: 179 RawX2: 25769805968 } TxId: 2 Data { Operations { PartitionId: 0 Path: "/topic" SupportivePartition: 100000 } Immediate: false WriteId { NodeId: 0 KeyId: 3 KafkaTransaction: false } } 2025-06-25T14:31:29.989659Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3295: [PQ: 72057594037927937] PartitionId {0, {0, 3}, 100000} for WriteId {0, 3} 2025-06-25T14:31:29.989709Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3385: [PQ: 72057594037927937] TxId 2 has WriteId {0, 3} 2025-06-25T14:31:29.989750Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3431: [PQ: 72057594037927937] distributed transaction 2025-06-25T14:31:29.989814Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3745: [PQ: 72057594037927937] Propose TxId 2, WriteId {0, 3} 2025-06-25T14:31:29.989857Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3757: [PQ: 72057594037927937] Link TxId 2 with WriteId {0, 3} 2025-06-25T14:31:29.989895Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4353: [PQ: 72057594037927937] Try execute txs with state UNKNOWN 2025-06-25T14:31:29.989932Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4398: [PQ: 72057594037927937] TxId 2, State UNKNOWN 2025-06-25T14:31:29.989976Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3979: [PQ: 72057594037927937] schedule TEvProposeTransactionResult(PREPARED) 2025-06-25T14:31:29.990020Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4288: [PQ: 72057594037927937] TxId 2, NewState PREPARING 2025-06-25T14:31:29.990068Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3866: [PQ: 72057594037927937] write key for TxId 2 2025-06-25T14:31:29.990188Z node 6 :PERSQUEUE DEBUG: transaction.cpp:374: [TxId: 2] save tx TxId: 2 State: PREPARED MinStep: 230 MaxStep: 30230 Operations { PartitionId: 0 Path: "/topic" SupportivePartition: 100000 } Kind: KIND_DATA SourceActor { RawX1: 179 RawX2: 25769805968 } WriteId { NodeId: 0 KeyId: 3 KafkaTransaction: false } Partitions { } 2025-06-25T14:31:29.990296Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3683: [PQ: 72057594037927937] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-06-25T14:31:29.994200Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:1241: [PQ: 72057594037927937] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-25T14:31:29.994263Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4353: [PQ: 72057594037927937] Try execute txs with state PREPARING 2025-06-25T14:31:29.994304Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4398: [PQ: 72057594037927937] TxId 2, State PREPARING 2025-06-25T14:31:29.994344Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4288: [PQ: 72057594037927937] TxId 2, NewState PREPARED 2025-06-25T14:31:29.994703Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037927937] server connected, pipe [6:223:2224], now have 1 active actors on pipe 2025-06-25T14:31:29.994808Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:348: Handle TEvRequest topic: 'topic' requestId: 2025-06-25T14:31:29.994849Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:2812: [PQ: 72057594037927937] got client message batch for topic 'topic' partition 0 2025-06-25T14:31:29.994897Z node 6 :PERSQUEUE WARN: event_helpers.cpp:42: tablet 72057594037927937 topic 'topic error: it is forbidden to write after a commit 2025-06-25T14:31:29.994971Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:1434: [PQ: 72057594037927937] Handle TEvPQ::TEvError Cookie 2, Error it is forbidden to write after a commit 2025-06-25T14:31:29.995009Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:402: Answer error topic: 'topic' partition: 0 messageNo: 0 requestId: error: it is forbidden to write after a commit >> TSchemeshardBorrowedCompactionTest::SchemeshardShouldHandleBorrowCompactionTimeouts [GOOD] >> TFetchRequestTests::HappyWay [GOOD] >> TFetchRequestTests::BadTopicName >> TPartitionTests::DataTxCalcPredicateOrder [GOOD] >> TopicAutoscaling::ControlPlane_CDC [GOOD] >> TopicAutoscaling::ControlPlane_CDC_Disable |77.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/client/ut/ydb-core-client-ut |77.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/client/ut/ydb-core-client-ut |77.8%| [LD] {RESULT} $(B)/ydb/core/client/ut/ydb-core-client-ut ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/unittest >> TPartitionTests::DataTxCalcPredicateOrder [GOOD] Test command err: 2025-06-25T14:31:16.849180Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:31:16.849286Z node 1 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-25T14:31:16.868223Z node 1 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 3, State: StateInit] bootstrapping 3 [1:181:2194] 2025-06-25T14:31:16.869882Z node 1 :PERSQUEUE INFO: partition_init.cpp:911: [Root/PQ/rt3.dc1--account--topic:3:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp from keys completed. Value 2025-06-25T14:31:16.000000Z 2025-06-25T14:31:16.869939Z node 1 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 3, State: StateInit] init complete for topic 'Root/PQ/rt3.dc1--account--topic' partition 3 generation 0 [1:181:2194] Got cmd write: CmdWrite { Key: "i0000000003" Value: "\030\000(\240\226\200\274\3722" StorageChannel: INLINE } CmdWrite { Key: "m0000000003cclient" Value: "\010\000\020\001\030\001\"\007session(\0000\001@\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000003uclient" Value: "\000\000\000\000\000\000\000\000\001\000\000\000\001\000\000\000session" StorageChannel: INLINE } Got cmd write: CmdWrite { Key: "i0000000003" Value: "\030\000(\240\226\200\274\3722" StorageChannel: INLINE } CmdWrite { Key: "I0000000003" Value: "\010\271`\020\264\222\004" StorageChannel: INLINE } CmdWrite { Key: "m0000000003cclient" Value: "\010\001\020\001\030\001\"\007session(\0000\001@\001" StorageChannel: INLINE } CmdWrite { Key: "m0000000003uclient" Value: "\001\000\000\000\000\000\000\000\001\000\000\000\001\000\000\000session" StorageChannel: INLINE } 2025-06-25T14:31:17.695409Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:31:17.695487Z node 2 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-25T14:31:17.713961Z node 2 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 3, State: StateInit] bootstrapping 3 [2:183:2196] 2025-06-25T14:31:17.715905Z node 2 :PERSQUEUE INFO: partition_init.cpp:911: [Root/PQ/rt3.dc1--account--topic:3:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp from keys completed. Value 2025-06-25T14:31:17.000000Z 2025-06-25T14:31:17.715974Z node 2 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 3, State: StateInit] init complete for topic 'Root/PQ/rt3.dc1--account--topic' partition 3 generation 0 [2:183:2196] Got cmd write: CmdWrite { Key: "i0000000003" Value: "\030\000(\210\236\200\274\3722" StorageChannel: INLINE } CmdWrite { Key: "m0000000003cclient" Value: "\010\000\020\001\030\001\"\007session(\0000\001@\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000003uclient" Value: "\000\000\000\000\000\000\000\000\001\000\000\000\001\000\000\000session" StorageChannel: INLINE } 2025-06-25T14:31:18.511215Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:31:18.511294Z node 3 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-25T14:31:18.530506Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitConfigStep Got KV request 2025-06-25T14:31:18.530722Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-25T14:31:18.530971Z node 3 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [3:182:2195] 2025-06-25T14:31:18.531909Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitDiskStatusStep Got KV request 2025-06-25T14:31:18.532083Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitMetaStep Got KV request 2025-06-25T14:31:18.532249Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitInfoRangeStep Got KV request 2025-06-25T14:31:18.532455Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitDataRangeStep Got KV request 2025-06-25T14:31:18.532721Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:524: key[0]: d0000000000_00000000000000000000_00000_0000000050_00000 2025-06-25T14:31:18.532912Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:624: [Root/PQ/rt3.dc1--account--topic:0:TInitDataRangeStep] Got data offset 0 count 50 size 684 so 0 eo 50 d0000000000_00000000000000000000_00000_0000000050_00000 2025-06-25T14:31:18.533042Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitDataStep 2025-06-25T14:31:18.533086Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitEndWriteTimestampStep 2025-06-25T14:31:18.533136Z node 3 :PERSQUEUE INFO: partition_init.cpp:911: [Root/PQ/rt3.dc1--account--topic:0:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp from keys completed. Value 2025-06-25T14:31:18.000000Z 2025-06-25T14:31:18.533177Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:55: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Initializing completed. 2025-06-25T14:31:18.533227Z node 3 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'Root/PQ/rt3.dc1--account--topic' partition 0 generation 0 [3:182:2195] 2025-06-25T14:31:18.533283Z node 3 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037927937, Partition: 0, State: StateInit] SYNC INIT topic Root/PQ/rt3.dc1--account--topic partitition 0 so 50 endOffset 50 Head Offset 50 PartNo 0 PackedSize 0 count 0 nextOffset 50 batches 0 2025-06-25T14:31:18.533334Z node 3 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Process pending events. Count 0 2025-06-25T14:31:18.533597Z node 3 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-25T14:31:19.887084Z node 3 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-25T14:31:19.887319Z node 3 :PERSQUEUE DEBUG: partition.cpp:3346: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 user client session is set to 0 (startOffset 50) session session Got KV request Got batch complete: 1 Got KV request Got KV request Got cmd write: CmdWrite { Key: "i0000000000" Value: "\030\000(\360\245\200\274\3722" StorageChannel: INLINE } CmdWrite { Key: "m0000000000cclient" Value: "\010\000\020\001\030\001\"\007session(\0000\001@\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000000uclient" Value: "\000\000\000\000\000\000\000\000\001\000\000\000\001\000\000\000session" StorageChannel: INLINE } 2025-06-25T14:31:19.898201Z node 3 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-06-25T14:31:19.898277Z node 3 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction Create distr tx with id = 0 and act no: 1 2025-06-25T14:31:19.898438Z node 3 :PERSQUEUE DEBUG: partition.cpp:1170: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCalcPredicate Step 1, TxId 0 2025-06-25T14:31:21.158332Z node 3 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction Wait first predicate result 2025-06-25T14:31:21.158528Z node 3 :PERSQUEUE DEBUG: partition.cpp:1401: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvGetWriteInfoResponse Got batch complete: 1 Create distr tx with id = 2 and act no: 3 2025-06-25T14:31:21.158795Z node 3 :PERSQUEUE DEBUG: partition.cpp:1170: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCalcPredicate Step 1, TxId 2 2025-06-25T14:31:22.407366Z node 3 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction Wait second predicate result 2025-06-25T14:31:22.407581Z node 3 :PERSQUEUE DEBUG: partition.cpp:1401: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvGetWriteInfoResponse 2025-06-25T14:31:22.407652Z node 3 :PERSQUEUE DEBUG: partition.cpp:1216: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCommit Step 1, TxId 0 2025-06-25T14:31:22.407709Z node 3 :PERSQUEUE DEBUG: partition.cpp:2502: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::CommitWriteOperations TxId: 0 2025-06-25T14:31:22.407805Z node 3 :PERSQUEUE DEBUG: partition.cpp:2528: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Head=Offset 50 PartNo 0 PackedSize 0 count 0 nextOffset 50 batches 0, NewHead=Offset 50 PartNo 0 PackedSize 0 count 0 nextOffset 50 batches 0 Got batch complete: 1 2025-06-25T14:31:22.408068Z node 3 :PERSQUEUE DEBUG: partition.cpp:1216: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCommit Step 1, TxId 2 2025-06-25T14:31:22.408130Z node 3 :PERSQUEUE DEBUG: partition.cpp:2502: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::CommitWriteOperations TxId: 2 2025-06-25T14:31:22.408191Z node 3 :PERSQUEUE DEBUG: partition.cpp:2528: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Head=Offset 50 PartNo 0 PackedSize 0 count 0 nextOffset 50 batches 0, NewHead=Offset 50 PartNo 0 PackedSize 0 count 0 nextOffset 50 batches 0 Got KV request Send disk status response with cookie: 0 2025-06-25T14:31:22.408602Z node 3 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie owner1|de0ee06b-6f5d2d09-2fe26932-ab52e696_0 generated for partition 0 topic 'Root/PQ/rt3.dc1--account--topic' owner owner1 2025-06-25T14:31:22.432869Z node 3 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 2 2025-06-25T14:31:22.433028Z node 3 :PERSQUEUE DEBUG: partition_write.cpp:34: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::ReplyOwnerOk. Partition: 0 2025-06-25T14:31:22.433096Z node 3 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction Got batch complete: 1 2025-06-25T14:31:22.433999Z node 3 :PERSQUEUE DEBUG: partition_write.cpp:1364: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 part blob processing sourceId 'SourceId' seqNo 5 partNo 0 2025-06-25T14:31:22.434798Z node 3 :PERSQUEUE DEBUG: partition_write.cpp:1468: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 part blob complete sourceId 'SourceId' seqNo 5 partNo 0 FormedBlobsCount 0 NewHead: Offset 51 PartNo 0 PackedSize 118 count 1 nextOffset 52 batches 1 2025-06-25T14:31:22.435366Z node 3 :PERSQUEUE DEBUG: partition_write.cpp:1762: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Add new write blob: topic 'Root/PQ/rt3.dc1--account--topic' partition 0 compactOffset 51,1 HeadOffset 50 endOffset 50 curOffset 52 d0000000000_00000000000000000051_00000_0000000001_00000? size 104 WTime 15244 Got KV request Got batch complete ... art initializing step TInitMetaStep Got KV request 2025-06-25T14:31:24.204708Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitInfoRangeStep Got KV request 2025-06-25T14:31:24.204887Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitDataRangeStep Got KV request 2025-06-25T14:31:24.205111Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:524: key[0]: d0000000000_00000000000000000000_00000_0000000001_00000 2025-06-25T14:31:24.205294Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:624: [Root/PQ/rt3.dc1--account--topic:0:TInitDataRangeStep] Got data offset 0 count 1 size 684 so 0 eo 1 d0000000000_00000000000000000000_00000_0000000001_00000 2025-06-25T14:31:24.205435Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitDataStep 2025-06-25T14:31:24.205476Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitEndWriteTimestampStep 2025-06-25T14:31:24.205522Z node 4 :PERSQUEUE INFO: partition_init.cpp:911: [Root/PQ/rt3.dc1--account--topic:0:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp from keys completed. Value 2025-06-25T14:31:24.000000Z 2025-06-25T14:31:24.205558Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:55: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Initializing completed. 2025-06-25T14:31:24.205608Z node 4 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'Root/PQ/rt3.dc1--account--topic' partition 0 generation 0 [4:182:2195] 2025-06-25T14:31:24.205656Z node 4 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037927937, Partition: 0, State: StateInit] SYNC INIT topic Root/PQ/rt3.dc1--account--topic partitition 0 so 1 endOffset 1 Head Offset 1 PartNo 0 PackedSize 0 count 0 nextOffset 1 batches 0 2025-06-25T14:31:24.205697Z node 4 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Process pending events. Count 0 2025-06-25T14:31:24.205919Z node 4 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-25T14:31:25.579169Z node 4 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-25T14:31:25.579509Z node 4 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie SourceId|daa5ed67-ffac3dbb-92773247-694dd39c_0 generated for partition 0 topic 'Root/PQ/rt3.dc1--account--topic' owner SourceId 2025-06-25T14:31:25.579646Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:34: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::ReplyOwnerOk. Partition: 0 Got batch complete: 1 Wait write response Wait kv request 2025-06-25T14:31:25.579970Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:1364: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 part blob processing sourceId 'SourceId' seqNo 4 partNo 0 2025-06-25T14:31:25.580797Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:1468: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 part blob complete sourceId 'SourceId' seqNo 4 partNo 0 FormedBlobsCount 0 NewHead: Offset 11 PartNo 0 PackedSize 118 count 1 nextOffset 12 batches 1 2025-06-25T14:31:25.581366Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:1762: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Add new write blob: topic 'Root/PQ/rt3.dc1--account--topic' partition 0 compactOffset 11,1 HeadOffset 1 endOffset 1 curOffset 12 d0000000000_00000000000000000011_00000_0000000001_00000? size 104 WTime 5132 Got KV request Got batch complete: 1 Got KV request Got KV request 2025-06-25T14:31:25.602324Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 22 WriteNewSizeFromSupportivePartitions# 0 2025-06-25T14:31:25.602445Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-25T14:31:25.602540Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Answering for message sourceid: 'SourceId', Topic: 'Root/PQ/rt3.dc1--account--topic', Partition: 0, SeqNo: 4, partNo: 0, Offset: 11 is stored on disk 2025-06-25T14:31:25.602745Z node 4 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72057594037927937, Partition: 0, State: StateIdle] need more data for compaction. cumulativeSize=104, count=1, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 Wait second predicate result Create distr tx with id = 0 and act no: 1 2025-06-25T14:31:25.602977Z node 4 :PERSQUEUE DEBUG: partition.cpp:1170: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCalcPredicate Step 1, TxId 0 2025-06-25T14:31:26.944552Z node 4 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72057594037927937, Partition: 0, State: StateIdle] need more data for compaction. cumulativeSize=104, count=1, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-25T14:31:26.944741Z node 4 :PERSQUEUE DEBUG: partition.cpp:1401: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvGetWriteInfoResponse Got batch complete: 1 2025-06-25T14:31:27.414510Z node 5 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:31:27.414574Z node 5 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-25T14:31:27.427839Z node 5 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitConfigStep Got KV request 2025-06-25T14:31:27.428027Z node 5 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-25T14:31:27.428256Z node 5 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [5:182:2195] 2025-06-25T14:31:27.429244Z node 5 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitDiskStatusStep Got KV request 2025-06-25T14:31:27.429414Z node 5 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitMetaStep Got KV request 2025-06-25T14:31:27.429552Z node 5 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitInfoRangeStep Got KV request 2025-06-25T14:31:27.429717Z node 5 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitDataRangeStep Got KV request 2025-06-25T14:31:27.429935Z node 5 :PERSQUEUE DEBUG: partition_init.cpp:524: key[0]: d0000000000_00000000000000000000_00000_0000000050_00000 2025-06-25T14:31:27.430121Z node 5 :PERSQUEUE DEBUG: partition_init.cpp:624: [Root/PQ/rt3.dc1--account--topic:0:TInitDataRangeStep] Got data offset 0 count 50 size 684 so 0 eo 50 d0000000000_00000000000000000000_00000_0000000050_00000 2025-06-25T14:31:27.430237Z node 5 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitDataStep 2025-06-25T14:31:27.430275Z node 5 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitEndWriteTimestampStep 2025-06-25T14:31:27.430317Z node 5 :PERSQUEUE INFO: partition_init.cpp:911: [Root/PQ/rt3.dc1--account--topic:0:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp from keys completed. Value 2025-06-25T14:31:27.000000Z 2025-06-25T14:31:27.430355Z node 5 :PERSQUEUE DEBUG: partition_init.cpp:55: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Initializing completed. 2025-06-25T14:31:27.430399Z node 5 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'Root/PQ/rt3.dc1--account--topic' partition 0 generation 0 [5:182:2195] 2025-06-25T14:31:27.430454Z node 5 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037927937, Partition: 0, State: StateInit] SYNC INIT topic Root/PQ/rt3.dc1--account--topic partitition 0 so 50 endOffset 50 Head Offset 50 PartNo 0 PackedSize 0 count 0 nextOffset 50 batches 0 2025-06-25T14:31:27.430515Z node 5 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Process pending events. Count 0 2025-06-25T14:31:27.430701Z node 5 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-25T14:31:28.779819Z node 5 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction Create distr tx with id = 0 and act no: 1 2025-06-25T14:31:28.780068Z node 5 :PERSQUEUE DEBUG: partition.cpp:1170: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCalcPredicate Step 1, TxId 0 2025-06-25T14:31:30.114776Z node 5 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-25T14:31:30.115126Z node 5 :PERSQUEUE DEBUG: partition.cpp:1401: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvGetWriteInfoResponse Got batch complete: 1 Create distr tx with id = 2 and act no: 3 2025-06-25T14:31:30.115393Z node 5 :PERSQUEUE DEBUG: partition.cpp:1170: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCalcPredicate Step 1, TxId 2 2025-06-25T14:31:30.115484Z node 5 :PERSQUEUE DEBUG: partition.cpp:1216: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCommit Step 1, TxId 0 2025-06-25T14:31:30.115521Z node 5 :PERSQUEUE DEBUG: partition.cpp:2502: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::CommitWriteOperations TxId: 0 2025-06-25T14:31:30.115595Z node 5 :PERSQUEUE DEBUG: partition.cpp:2528: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Head=Offset 50 PartNo 0 PackedSize 0 count 0 nextOffset 50 batches 0, NewHead=Offset 50 PartNo 0 PackedSize 0 count 0 nextOffset 50 batches 0 2025-06-25T14:31:31.412671Z node 5 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-25T14:31:31.412878Z node 5 :PERSQUEUE DEBUG: partition.cpp:1401: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvGetWriteInfoResponse Got batch complete: 1 2025-06-25T14:31:31.413103Z node 5 :PERSQUEUE DEBUG: partition.cpp:1216: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCommit Step 1, TxId 2 2025-06-25T14:31:31.413173Z node 5 :PERSQUEUE DEBUG: partition.cpp:2502: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::CommitWriteOperations TxId: 2 2025-06-25T14:31:31.413253Z node 5 :PERSQUEUE DEBUG: partition.cpp:2528: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Head=Offset 50 PartNo 0 PackedSize 0 count 0 nextOffset 50 batches 0, NewHead=Offset 50 PartNo 0 PackedSize 0 count 0 nextOffset 50 batches 0 Got KV request Got KV request Send disk status response with cookie: 0 Wait tx committed for tx 0 2025-06-25T14:31:31.427330Z node 5 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 2 2025-06-25T14:31:31.427455Z node 5 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction Wait tx committed for tx 2 >> TPartitionTests::FailedTxsDontBlock [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_compaction/unittest >> TSchemeshardBorrowedCompactionTest::SchemeshardShouldHandleBorrowCompactionTimeouts [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:28:16.602200Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:28:16.602304Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:28:16.602344Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:28:16.602382Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:28:16.602428Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:28:16.602458Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:28:16.602514Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:28:16.602588Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:28:16.603353Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:28:16.603740Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:28:16.725253Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:28:16.725340Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:28:16.759194Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:28:16.759685Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:28:16.759847Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:28:16.780494Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:28:16.780917Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:28:16.781576Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:28:16.781869Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:28:16.791803Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:28:16.791999Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:28:16.793309Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:28:16.793383Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:28:16.793519Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:28:16.793571Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:28:16.793608Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:28:16.793702Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:28:16.800675Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:28:16.971039Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:28:16.971262Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:16.971476Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:28:16.971520Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:28:16.971739Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:28:16.971823Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:28:16.974302Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:28:16.974504Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:28:16.974730Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:16.974793Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:28:16.974830Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:28:16.974873Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:28:16.976865Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:16.976930Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:28:16.976979Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:28:16.978745Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:16.978789Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:28:16.978830Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:28:16.978896Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:28:16.982770Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:28:16.985197Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:28:16.985393Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:28:16.986319Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:28:16.986471Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:28:16.986526Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:28:16.986783Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:28:16.986827Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:28:16.986982Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:28:16.987103Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:28:16.989543Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:28:16.989591Z node 1 :FLAT_TX_SCHEMESHARD ... d: 0 PlannedTxCompleted: 2 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 SearchHeight: 1 LastFullCompactionTs: 0 HasLoanedParts: true Channels { Channel: 1 DataSize: 13940 IndexSize: 102 } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 455 Memory: 124232 Storage: 14156 } ShardState: 2 UserTablePartOwners: 72075186233409546 NodeId: 3 StartTime: 41 TableOwnerId: 72057594046678944 FollowerId: 0 2025-06-25T14:31:29.863424Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4992: StateWork, processing event TEvDataShard::TEvPeriodicTableStats 2025-06-25T14:31:29.863480Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046678944 from shard 72075186233409546 followerId 0 pathId [OwnerId: 72057594046678944, LocalPathId: 2] state 'Ready' dataSize 13940 rowCount 100 cpuUsage 0.0455 2025-06-25T14:31:29.863599Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:570: Got periodic table stats at tablet 72057594046678944 from shard 72075186233409546 followerId 0 pathId [OwnerId: 72057594046678944, LocalPathId: 2] raw table stats: DataSize: 13940 RowCount: 100 IndexSize: 102 InMemSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 2 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 SearchHeight: 1 LastFullCompactionTs: 0 HasLoanedParts: true Channels { Channel: 1 DataSize: 13940 IndexSize: 102 } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 2025-06-25T14:31:29.863633Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:610: Will delay TTxStoreTableStats on# 0.100000s, queue# 1 2025-06-25T14:31:29.906552Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [3:129:2153]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-06-25T14:31:29.906641Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5131: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-06-25T14:31:29.906681Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046678944, queue size# 1 2025-06-25T14:31:29.906763Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:601: Will execute TTxStoreStats, queue# 1 2025-06-25T14:31:29.906802Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:610: Will delay TTxStoreTableStats on# 0.000000s, queue# 1 2025-06-25T14:31:29.906924Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 2 shard idx 72057594046678944:1 data size 13940 row count 100 2025-06-25T14:31:29.907019Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186233409546 maps to shardIdx: 72057594046678944:1 followerId=0, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], pathId map=Simple, is column=0, is olap=0, RowCount 100, DataSize 13940 2025-06-25T14:31:29.907057Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186233409546, followerId 0 2025-06-25T14:31:29.907154Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:477: Do not want to split tablet 72075186233409546 by size, its table already has 1 out of 1 partitions 2025-06-25T14:31:29.907248Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-25T14:31:29.917769Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [3:129:2153]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-06-25T14:31:29.917845Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5131: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-06-25T14:31:29.917879Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046678944, queue size# 0 2025-06-25T14:31:29.949271Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435079, Sender [0:0:0], Recipient [3:721:2684]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvPeriodicWakeup 2025-06-25T14:31:29.949523Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3441: TEvPeriodicTableStats from datashard 72075186233409547, FollowerId 0, tableId 3 2025-06-25T14:31:29.949887Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269553162, Sender [3:721:2684], Recipient [3:129:2153]: NKikimrTxDataShard.TEvPeriodicTableStats DatashardId: 72075186233409547 TableLocalId: 3 Generation: 2 Round: 12 TableStats { DataSize: 13940 RowCount: 100 IndexSize: 102 InMemSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 2 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 SearchHeight: 1 LastFullCompactionTs: 0 HasLoanedParts: false Channels { Channel: 1 DataSize: 13940 IndexSize: 102 } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 74 Memory: 124232 } ShardState: 2 UserTablePartOwners: 72075186233409547 UserTablePartOwners: 72075186233409546 NodeId: 3 StartTime: 213 TableOwnerId: 72057594046678944 FollowerId: 0 2025-06-25T14:31:29.949934Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4992: StateWork, processing event TEvDataShard::TEvPeriodicTableStats 2025-06-25T14:31:29.949984Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046678944 from shard 72075186233409547 followerId 0 pathId [OwnerId: 72057594046678944, LocalPathId: 3] state 'Ready' dataSize 13940 rowCount 100 cpuUsage 0.0074 2025-06-25T14:31:29.950111Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:570: Got periodic table stats at tablet 72057594046678944 from shard 72075186233409547 followerId 0 pathId [OwnerId: 72057594046678944, LocalPathId: 3] raw table stats: DataSize: 13940 RowCount: 100 IndexSize: 102 InMemSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 2 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 SearchHeight: 1 LastFullCompactionTs: 0 HasLoanedParts: false Channels { Channel: 1 DataSize: 13940 IndexSize: 102 } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 2025-06-25T14:31:29.950171Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:610: Will delay TTxStoreTableStats on# 0.100000s, queue# 1 2025-06-25T14:31:29.981616Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: operation_queue_timer.h:92: Operation queue wakeup 2025-06-25T14:31:29.981717Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__borrowed_compaction.cpp:65: Borrowed compaction timeout for pathId# [OwnerId: 72057594046678944, LocalPathId: 3], datashard# 72075186233409547, next wakeup# 0.000000s, in queue# 0 shards, running# 0 shards at schemeshard 72057594046678944 2025-06-25T14:31:29.981779Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__borrowed_compaction.cpp:28: RunBorrowedCompaction for pathId# [OwnerId: 72057594046678944, LocalPathId: 3], datashard# 72075186233409547, next wakeup# 0.000000s, rate# 0, in queue# 1 shards, running# 0 shards at schemeshard 72057594046678944 2025-06-25T14:31:29.981865Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: operation_queue_timer.h:84: Operation queue set wakeup after delta# 3 seconds 2025-06-25T14:31:29.981895Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__borrowed_compaction.cpp:100: Borrowed compaction enqueued shard# 72057594046678944:2 at schemeshard 72057594046678944 2025-06-25T14:31:29.982024Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [3:129:2153]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-06-25T14:31:29.982069Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5131: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-06-25T14:31:29.982104Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046678944, queue size# 1 2025-06-25T14:31:29.982192Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:601: Will execute TTxStoreStats, queue# 1 2025-06-25T14:31:29.982230Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:610: Will delay TTxStoreTableStats on# 0.000000s, queue# 1 2025-06-25T14:31:29.982351Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 3 shard idx 72057594046678944:2 data size 13940 row count 100 2025-06-25T14:31:29.982435Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186233409547 maps to shardIdx: 72057594046678944:2 followerId=0, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], pathId map=CopyTable, is column=0, is olap=0, RowCount 100, DataSize 13940, with borrowed parts 2025-06-25T14:31:29.982471Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186233409547, followerId 0 2025-06-25T14:31:29.982568Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:472: Want to split tablet 72075186233409547 by size split by size (shardCount: 1, maxShardCount: 2, shardSize: 13940, maxShardSize: 1) 2025-06-25T14:31:29.982630Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:505: Postpone split tablet 72075186233409547 because it has borrow parts, enqueue compact them first 2025-06-25T14:31:29.982668Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__borrowed_compaction.cpp:100: Borrowed compaction enqueued shard# 72057594046678944:2 at schemeshard 72057594046678944 2025-06-25T14:31:29.982751Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-25T14:31:29.993289Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [3:129:2153]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-06-25T14:31:29.993385Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5131: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-06-25T14:31:29.993424Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046678944, queue size# 0 2025-06-25T14:31:30.245740Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:129:2153]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:31:30.245846Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:31:30.245969Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [3:129:2153], Recipient [3:129:2153]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:31:30.246062Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime >> TPartitionTests::EndWriteTimestamp_DataKeysBody |77.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/tx_allocator_client/ut/ydb-core-tx-tx_allocator_client-ut |77.8%| [LD] {RESULT} $(B)/ydb/core/tx/tx_allocator_client/ut/ydb-core-tx-tx_allocator_client-ut |77.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/tx_allocator_client/ut/ydb-core-tx-tx_allocator_client-ut |77.8%| [TS] {asan, default-linux-x86_64, release} ydb/core/fq/libs/ydb/ut/unittest >> TRegisterCheckTest::ShouldRegisterCheckSameGeneration [GOOD] |77.8%| [TA] $(B)/ydb/public/sdk/cpp/tests/integration/sessions_pool/test-results/gtest/{meta.json ... results_accumulator.log} >> TPartitionTests::EndWriteTimestamp_DataKeysBody [GOOD] >> TPartitionTests::EndWriteTimestamp_FromMeta >> TKesusTest::TestAcquireLocks [GOOD] >> TKesusTest::TestAcquireRepeat >> TPartitionTests::EndWriteTimestamp_FromMeta [GOOD] >> TPartitionTests::EndWriteTimestamp_HeadKeys >> TPartitionChooserSuite::TPartitionChooserActor_SplitMergeDisabled_BadSourceId_Test [GOOD] >> TTxDataShardMiniKQL::WriteAndReadMany [GOOD] >> TKesusTest::TestAcquireRepeat [GOOD] >> TKesusTest::TestAcquireDowngrade >> Cdc::InitialScanRacyProgressAndDrop [GOOD] >> Cdc::EnqueueRequestProcessSend >> KqpLimits::CancelAfterRoTx [GOOD] >> KqpLimits::CancelAfterRoTxWithFollowerStreamLookup >> TPartitionTests::EndWriteTimestamp_HeadKeys [GOOD] >> TKesusTest::TestAcquireDowngrade [GOOD] >> TKesusTest::TestAcquireBeforeTimeoutViaSessionTimeout |77.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_allocator/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/unittest >> TPartitionChooserSuite::TPartitionChooserActor_SplitMergeDisabled_BadSourceId_Test [GOOD] Test command err: 2025-06-25T14:31:23.662135Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519894531419662635:2236];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:31:23.662189Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:31:23.729140Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519894530416551196:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:31:23.729192Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:31:23.893376Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000b55/r3tmp/tmpJgDCXQ/pdisk_1.dat 2025-06-25T14:31:23.905347Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-25T14:31:24.134010Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:31:24.147627Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:31:24.147713Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:31:24.161342Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 26707, node 1 2025-06-25T14:31:24.248504Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:31:24.248567Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:31:24.251091Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T14:31:24.252147Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:31:24.285480Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/yft8/000b55/r3tmp/yandexKVeLUN.tmp 2025-06-25T14:31:24.285524Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/yft8/000b55/r3tmp/yandexKVeLUN.tmp 2025-06-25T14:31:24.285680Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/yft8/000b55/r3tmp/yandexKVeLUN.tmp 2025-06-25T14:31:24.285787Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:31:24.337363Z INFO: TTestServer started on Port 28924 GrpcPort 26707 TClient is connected to server localhost:28924 PQClient connected to localhost:26707 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:31:24.578387Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-25T14:31:24.623503Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:31:24.630298Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:31:24.744346Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... waiting... 2025-06-25T14:31:26.629073Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519894543301453440:2276], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:31:26.629189Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519894543301453429:2273], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:31:26.629454Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:31:26.634083Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976720657:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:31:26.650196Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519894543301453443:2277], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976720657 completed, doublechecking } 2025-06-25T14:31:26.746966Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519894543301453472:2177] txid# 281474976720658, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:31:27.000889Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:31:27.003578Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519894544304565463:2305], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:31:27.004011Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=NjFlMDlhZTMtNGIyNDM5OWUtMTMxNjFmZC0yMmY4OGNmYw==, ActorId: [1:7519894544304565437:2298], ActorState: ExecuteState, TraceId: 01jykr0d8h7kp17r1xnm3e2yd0, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:31:27.005341Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7519894543301453486:2282], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:31:27.005542Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=2&id=ZGYxMjk3ODctZjAzNzkwYmYtYzRlMWMyYjctOGNjMjRiNTE=, ActorId: [2:7519894543301453426:2272], ActorState: ExecuteState, TraceId: 01jykr0d535t0xmr1y4snz8mjc, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:31:27.006063Z node 2 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-25T14:31:27.006454Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-25T14:31:27.072373Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:31:27.190796Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); 2025-06-25T14:31:27.402776Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710665. Ctx: { TraceId: 01jykr0dsd4qn6x8axnwxnykvv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MTZhOGY0MGQtMjA4MDMzYjctNGZmOGI3OTYtYWRiY2M0MmU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root === CheckClustersList. Subcribe to ClusterTracker from [1:7519894548599533205:3065] 2025-06-25T14:31:28.640765Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519894531419662635:2236];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:31:28.640866Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:31:28.729354Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519894530416551196:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:31:28.729436Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; === CheckClustersList. Ok Received TEvChooseError: Bad SourceId 2025-06-25T14:31:33.561563Z node 1 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:268: TPartitionChooser [1:7519894574369337394:3258] (SourceId=base64:a***, PreferedPartition=(NULL)) Start idle 2025-06-25T14:31:33.561590Z node 1 :PQ_PARTITION_CHOOSER INFO: partition_chooser_impl__abstract_chooser_actor.h:312: TPartitionChooser [1:7519894574369337394:3258] (SourceId=base64:a***, PreferedPartition=(NULL)) ReplyError: Bad SourceId ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/unittest >> TPartitionTests::EndWriteTimestamp_HeadKeys [GOOD] Test command err: 2025-06-25T14:31:14.389358Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:31:14.389458Z node 1 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-25T14:31:14.405198Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitConfigStep Got KV request 2025-06-25T14:31:14.405450Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-25T14:31:14.405844Z node 1 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [1:180:2193] 2025-06-25T14:31:14.406844Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitDiskStatusStep Got KV request 2025-06-25T14:31:14.407032Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitMetaStep Got KV request 2025-06-25T14:31:14.407186Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitInfoRangeStep Got KV request 2025-06-25T14:31:14.407359Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitDataRangeStep Got KV request 2025-06-25T14:31:14.409449Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:524: key[0]: d0000000000_00000000000000000000_00000_0000000001_00000 2025-06-25T14:31:14.409633Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:624: [Root/PQ/rt3.dc1--account--topic:0:TInitDataRangeStep] Got data offset 0 count 1 size 684 so 0 eo 1 d0000000000_00000000000000000000_00000_0000000001_00000 2025-06-25T14:31:14.409798Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitDataStep 2025-06-25T14:31:14.409830Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitEndWriteTimestampStep 2025-06-25T14:31:14.409865Z node 1 :PERSQUEUE INFO: partition_init.cpp:911: [Root/PQ/rt3.dc1--account--topic:0:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp from keys completed. Value 2025-06-25T14:31:14.000000Z 2025-06-25T14:31:14.409889Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:55: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Initializing completed. 2025-06-25T14:31:14.409932Z node 1 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'Root/PQ/rt3.dc1--account--topic' partition 0 generation 0 [1:180:2193] 2025-06-25T14:31:14.409968Z node 1 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037927937, Partition: 0, State: StateInit] SYNC INIT topic Root/PQ/rt3.dc1--account--topic partitition 0 so 1 endOffset 1 Head Offset 1 PartNo 0 PackedSize 0 count 0 nextOffset 1 batches 0 2025-06-25T14:31:14.410020Z node 1 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Process pending events. Count 0 2025-06-25T14:31:14.410237Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-25T14:31:14.754138Z node 1 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie src1|d8818e8d-aa778881-5284c2ee-f61ffdf3_0 generated for partition 0 topic 'Root/PQ/rt3.dc1--account--topic' owner src1 2025-06-25T14:31:14.754317Z node 1 :PERSQUEUE DEBUG: partition_write.cpp:34: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::ReplyOwnerOk. Partition: 0 Got batch complete: 1 2025-06-25T14:31:15.767509Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction Create distr tx with id = 0 and act no: 1 Create immediate tx with id = 3 and act no: 4 Create immediate tx with id = 6 and act no: 7 2025-06-25T14:31:15.767905Z node 1 :PERSQUEUE DEBUG: partition.cpp:1170: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCalcPredicate Step 1, TxId 0 2025-06-25T14:31:17.067339Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-25T14:31:17.067537Z node 1 :PERSQUEUE DEBUG: partition.cpp:1401: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvGetWriteInfoResponse Wait batch completion 2025-06-25T14:31:17.067757Z node 1 :PERSQUEUE DEBUG: partition.cpp:1401: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvGetWriteInfoResponse 2025-06-25T14:31:17.067847Z node 1 :PERSQUEUE DEBUG: partition.cpp:1401: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvGetWriteInfoResponse 2025-06-25T14:31:17.067997Z node 1 :PERSQUEUE DEBUG: partition_write.cpp:1364: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 part blob processing sourceId 'src1' seqNo 1 partNo 0 2025-06-25T14:31:17.069099Z node 1 :PERSQUEUE DEBUG: partition_write.cpp:1468: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 part blob complete sourceId 'src1' seqNo 1 partNo 0 FormedBlobsCount 0 NewHead: Offset 20 PartNo 0 PackedSize 84 count 1 nextOffset 21 batches 1 2025-06-25T14:31:17.069198Z node 1 :PERSQUEUE DEBUG: partition.cpp:2502: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::CommitWriteOperations TxId: (empty maybe) 2025-06-25T14:31:17.069254Z node 1 :PERSQUEUE DEBUG: partition.cpp:2528: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Head=Offset 1 PartNo 0 PackedSize 0 count 0 nextOffset 1 batches 0, NewHead=Offset 20 PartNo 0 PackedSize 84 count 1 nextOffset 21 batches 1 2025-06-25T14:31:17.090180Z node 1 :PERSQUEUE DEBUG: partition.cpp:3403: [PQ: 72057594037927937, Partition: 0, State: StateIdle] schedule TEvPersQueue::TEvProposeTransactionResult(COMPLETE), reason= 2025-06-25T14:31:17.090327Z node 1 :PERSQUEUE DEBUG: partition_write.cpp:1364: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 part blob processing sourceId 'src1' seqNo 3 partNo 0 2025-06-25T14:31:17.091493Z node 1 :PERSQUEUE DEBUG: partition_write.cpp:1425: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 part blob sourceId 'src1' seqNo 3 partNo 0 result is x0000000000_00000000000000000020_00000_0000000001_00000? size 70 2025-06-25T14:31:17.091585Z node 1 :PERSQUEUE DEBUG: partition_write.cpp:1117: [PQ: 72057594037927937, Partition: 0, State: StateIdle] writing blob: topic 'Root/PQ/rt3.dc1--account--topic' partition 0 old key x0000000000_00000000000000000020_00000_0000000001_00000? new key d0000000000_00000000000000000020_00000_0000000001_00000? size 70 WTime 10138 2025-06-25T14:31:17.092343Z node 1 :PERSQUEUE DEBUG: partition_write.cpp:1468: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 part blob complete sourceId 'src1' seqNo 3 partNo 0 FormedBlobsCount 1 NewHead: Offset 50 PartNo 0 PackedSize 84 count 1 nextOffset 51 batches 1 2025-06-25T14:31:17.092427Z node 1 :PERSQUEUE DEBUG: partition.cpp:2502: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::CommitWriteOperations TxId: (empty maybe) 2025-06-25T14:31:17.092495Z node 1 :PERSQUEUE DEBUG: partition.cpp:2528: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Head=Offset 1 PartNo 0 PackedSize 0 count 0 nextOffset 1 batches 0, NewHead=Offset 50 PartNo 0 PackedSize 84 count 1 nextOffset 51 batches 1 2025-06-25T14:31:17.092536Z node 1 :PERSQUEUE DEBUG: partition.cpp:3403: [PQ: 72057594037927937, Partition: 0, State: StateIdle] schedule TEvPersQueue::TEvProposeTransactionResult(COMPLETE), reason= 2025-06-25T14:31:17.092984Z node 1 :PERSQUEUE DEBUG: partition_write.cpp:1762: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Add new write blob: topic 'Root/PQ/rt3.dc1--account--topic' partition 0 compactOffset 50,1 HeadOffset 1 endOffset 1 curOffset 51 d0000000000_00000000000000000050_00000_0000000001_00000? size 70 WTime 10138 Got KV request Got batch complete: 5 Got KV request Got KV request Send disk status response with cookie: 0 Wait immediate tx complete 3 2025-06-25T14:31:17.114059Z node 1 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 34 WriteNewSizeFromSupportivePartitions# 2 2025-06-25T14:31:17.114148Z node 1 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-25T14:31:17.114213Z node 1 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Answering for message sourceid: 'src1', Topic: 'Root/PQ/rt3.dc1--account--topic', Partition: 0, SeqNo: 1, partNo: 0, Offset: 1 is already written 2025-06-25T14:31:17.114248Z node 1 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-25T14:31:17.114284Z node 1 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Answering for message sourceid: 'src1', Topic: 'Root/PQ/rt3.dc1--account--topic', Partition: 0, SeqNo: 3, partNo: 0, Offset: 1 is already written 2025-06-25T14:31:17.114543Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72057594037927937, Partition: 0, State: StateIdle] need more data for compaction. cumulativeSize=140, count=2, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 Got propose resutl: Origin: 72057594037927937 Status: COMPLETE TxId: 3 Wait immediate tx complete 6 Got propose resutl: Origin: 72057594037927937 Status: COMPLETE TxId: 6 2025-06-25T14:31:18.379364Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72057594037927937, Partition: 0, State: StateIdle] need more data for compaction. cumulativeSize=140, count=2, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 Create distr tx with id = 8 and act no: 9 Create immediate tx with id = 10 and act no: 11 Create distr tx with id = 12 and act no: 13 2025-06-25T14:31:18.379757Z node 1 :PERSQUEUE DEBUG: partition.cpp:1170: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCalcPredicate Step 1, TxId 8 2025-06-25T14:31:18.379908Z node 1 :PERSQUEUE DEBUG: partition.cpp:1170: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCalcPredicate Step 1, TxId 12 2025-06-25T14:31:19.684949Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72057594037927937, Partition: 0, State: StateIdle] need more data for compaction. cumulativeSize=140, count=2, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-25T14:31:19.685186Z node 1 :PERSQUEUE DEBUG: partition.cpp:1401: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvGetWriteInfoResponse Wait batch completion 2025-06-25T14:31:19.685364Z node 1 :PERSQUEUE DEBUG: partition.cpp:1401: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvGetWriteInfoResponse 2025-06-25T14:31:19.685455Z node 1 :PERSQUEUE DEBUG: partition.cpp:1401: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvGetWriteInfoResponse 2025-06-25T14:31:19.685528Z node 1 :PERSQUEUE DEBUG: partition.cpp:2502: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::CommitWriteOperations TxId: (empty maybe) 2025-06-25T14:31:19.685601Z node 1 :PERSQUEUE DEBUG: partition.cpp:2528: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Head=Offset 51 PartNo 0 PackedSize 0 count 0 nextOffset 51 b ... /rt3.dc1--account--topic' partition 0 part blob processing sourceId 'src2' seqNo 10 partNo 0 2025-06-25T14:31:31.522815Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:1468: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 part blob complete sourceId 'src2' seqNo 10 partNo 0 FormedBlobsCount 0 NewHead: Offset 70 PartNo 0 PackedSize 552 count 10 nextOffset 80 batches 1 2025-06-25T14:31:31.523369Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:1762: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Add new write blob: topic 'Root/PQ/rt3.dc1--account--topic' partition 0 compactOffset 70,10 HeadOffset 25 endOffset 25 curOffset 80 d0000000000_00000000000000000070_00000_0000000010_00000? size 299 WTime 11240 Got KV request Got batch complete: 10 Got KV request Got KV request Send disk status response with cookie: 0 2025-06-25T14:31:31.545955Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 170 WriteNewSizeFromSupportivePartitions# 0 2025-06-25T14:31:31.546069Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-25T14:31:31.546155Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Answering for message sourceid: 'src2', Topic: 'Root/PQ/rt3.dc1--account--topic', Partition: 0, SeqNo: 1, partNo: 0, Offset: 70 is stored on disk 2025-06-25T14:31:31.546208Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-25T14:31:31.546242Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Answering for message sourceid: 'src2', Topic: 'Root/PQ/rt3.dc1--account--topic', Partition: 0, SeqNo: 2, partNo: 0, Offset: 71 is stored on disk 2025-06-25T14:31:31.546267Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-25T14:31:31.546301Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Answering for message sourceid: 'src2', Topic: 'Root/PQ/rt3.dc1--account--topic', Partition: 0, SeqNo: 3, partNo: 0, Offset: 72 is stored on disk 2025-06-25T14:31:31.546332Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-25T14:31:31.546367Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Answering for message sourceid: 'src2', Topic: 'Root/PQ/rt3.dc1--account--topic', Partition: 0, SeqNo: 4, partNo: 0, Offset: 73 is stored on disk 2025-06-25T14:31:31.546393Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-25T14:31:31.546427Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Answering for message sourceid: 'src2', Topic: 'Root/PQ/rt3.dc1--account--topic', Partition: 0, SeqNo: 5, partNo: 0, Offset: 74 is stored on disk 2025-06-25T14:31:31.546455Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-25T14:31:31.546488Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Answering for message sourceid: 'src2', Topic: 'Root/PQ/rt3.dc1--account--topic', Partition: 0, SeqNo: 6, partNo: 0, Offset: 75 is stored on disk 2025-06-25T14:31:31.546516Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-25T14:31:31.546552Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Answering for message sourceid: 'src2', Topic: 'Root/PQ/rt3.dc1--account--topic', Partition: 0, SeqNo: 7, partNo: 0, Offset: 76 is stored on disk 2025-06-25T14:31:31.546578Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-25T14:31:31.546612Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Answering for message sourceid: 'src2', Topic: 'Root/PQ/rt3.dc1--account--topic', Partition: 0, SeqNo: 8, partNo: 0, Offset: 77 is stored on disk 2025-06-25T14:31:31.546637Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-25T14:31:31.546669Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Answering for message sourceid: 'src2', Topic: 'Root/PQ/rt3.dc1--account--topic', Partition: 0, SeqNo: 9, partNo: 0, Offset: 78 is stored on disk 2025-06-25T14:31:31.546696Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-25T14:31:31.546731Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Answering for message sourceid: 'src2', Topic: 'Root/PQ/rt3.dc1--account--topic', Partition: 0, SeqNo: 10, partNo: 0, Offset: 79 is stored on disk 2025-06-25T14:31:31.547047Z node 2 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72057594037927937, Partition: 0, State: StateIdle] need more data for compaction. cumulativeSize=488, count=2, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 Create distr tx with id = 8 and act no: 9 Create immediate tx with id = 10 and act no: 11 Create distr tx with id = 12 and act no: 13 2025-06-25T14:31:31.547347Z node 2 :PERSQUEUE DEBUG: partition.cpp:1170: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCalcPredicate Step 1, TxId 8 2025-06-25T14:31:31.547475Z node 2 :PERSQUEUE DEBUG: partition.cpp:1170: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCalcPredicate Step 1, TxId 12 2025-06-25T14:31:32.588988Z node 2 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72057594037927937, Partition: 0, State: StateIdle] need more data for compaction. cumulativeSize=488, count=2, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-25T14:31:32.589179Z node 2 :PERSQUEUE DEBUG: partition.cpp:1401: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvGetWriteInfoResponse Wait batch completion 2025-06-25T14:31:32.589375Z node 2 :PERSQUEUE DEBUG: partition.cpp:1401: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvGetWriteInfoResponse 2025-06-25T14:31:32.589432Z node 2 :PERSQUEUE DEBUG: partition.cpp:1401: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvGetWriteInfoResponse 2025-06-25T14:31:32.589501Z node 2 :PERSQUEUE DEBUG: partition.cpp:3403: [PQ: 72057594037927937, Partition: 0, State: StateIdle] schedule TEvPersQueue::TEvProposeTransactionResult(ABORTED), reason=MinSeqNo violation failure on src2 Got batch complete: 3 2025-06-25T14:31:32.836986Z node 2 :PERSQUEUE DEBUG: partition.cpp:1216: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCommit Step 1, TxId 12 2025-06-25T14:31:32.837069Z node 2 :PERSQUEUE DEBUG: partition.cpp:2502: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::CommitWriteOperations TxId: 12 2025-06-25T14:31:32.837162Z node 2 :PERSQUEUE DEBUG: partition.cpp:2528: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Head=Offset 80 PartNo 0 PackedSize 0 count 0 nextOffset 80 batches 0, NewHead=Offset 80 PartNo 0 PackedSize 0 count 0 nextOffset 80 batches 0 Got KV request Send disk status response with cookie: 0 Wait immediate tx complete 10 2025-06-25T14:31:32.868368Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 1 2025-06-25T14:31:32.868502Z node 2 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72057594037927937, Partition: 0, State: StateIdle] need more data for compaction. cumulativeSize=488, count=2, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 Got propose resutl: Origin: 72057594037927937 Status: ABORTED TxId: 10 Errors { Kind: BAD_REQUEST Reason: "MinSeqNo violation failure on src2" } Wait tx committed for tx 12 2025-06-25T14:31:33.450025Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:31:33.450107Z node 3 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-25T14:31:33.462157Z node 3 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 2, State: StateInit] bootstrapping 2 [3:183:2196] 2025-06-25T14:31:33.463626Z node 3 :PERSQUEUE INFO: partition_init.cpp:911: [Root/PQ/rt3.dc1--account--topic:2:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp from keys completed. Value 2025-06-25T14:31:33.000000Z 2025-06-25T14:31:33.463690Z node 3 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 2, State: StateInit] init complete for topic 'Root/PQ/rt3.dc1--account--topic' partition 2 generation 0 [3:183:2196] 2025-06-25T14:31:34.244652Z node 4 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:31:34.244732Z node 4 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-25T14:31:34.261144Z node 4 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 2, State: StateInit] bootstrapping 2 [4:183:2196] 2025-06-25T14:31:34.262967Z node 4 :PERSQUEUE INFO: partition_init.cpp:895: [Root/PQ/rt3.dc1--account--topic:2:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-25T14:31:34.263033Z node 4 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 2, State: StateInit] init complete for topic 'Root/PQ/rt3.dc1--account--topic' partition 2 generation 0 [4:183:2196] 2025-06-25T14:31:35.190778Z node 5 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:31:35.190857Z node 5 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-25T14:31:35.207313Z node 5 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 2, State: StateInit] bootstrapping 2 [5:183:2196] >>>> ADD BLOB 0 writeTimestamp=2025-06-25T14:31:35.199306Z >>>> ADD BLOB 1 writeTimestamp=2025-06-25T14:31:35.199334Z >>>> ADD BLOB 2 writeTimestamp=2025-06-25T14:31:35.199351Z >>>> ADD BLOB 3 writeTimestamp=2025-06-25T14:31:35.199366Z >>>> ADD BLOB 4 writeTimestamp=2025-06-25T14:31:35.199379Z >>>> ADD BLOB 5 writeTimestamp=2025-06-25T14:31:35.199394Z >>>> ADD BLOB 6 writeTimestamp=2025-06-25T14:31:35.199408Z >>>> ADD BLOB 7 writeTimestamp=2025-06-25T14:31:35.199422Z >>>> ADD BLOB 8 writeTimestamp=2025-06-25T14:31:35.199434Z >>>> ADD BLOB 9 writeTimestamp=2025-06-25T14:31:35.199449Z 2025-06-25T14:31:35.210434Z node 5 :PERSQUEUE INFO: partition_init.cpp:911: [Root/PQ/rt3.dc1--account--topic:2:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp from keys completed. Value 2025-06-25T14:31:35.000000Z 2025-06-25T14:31:35.210501Z node 5 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 2, State: StateInit] init complete for topic 'Root/PQ/rt3.dc1--account--topic' partition 2 generation 0 [5:183:2196] >> DataShardVolatile::DistributedWriteEarlierSnapshotNotBlocked [GOOD] >> DataShardVolatile::DistributedWriteLaterSnapshotBlockedThenCommit+UseSink >> DataShardVolatile::DistributedWriteThenLateWriteReadCommit [GOOD] >> DataShardVolatile::TwoAppendsMustBeVolatile+UseSink >> TListAllTopicsTests::PlainList [GOOD] >> TListAllTopicsTests::RecursiveList >> TPQTest::TestPQPartialRead [GOOD] >> TPQTest::TestOwnership |77.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/services/metadata/secret/ut/ydb-services-metadata-secret-ut |77.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/services/metadata/secret/ut/ydb-services-metadata-secret-ut |77.8%| [TA] {RESULT} $(B)/ydb/public/sdk/cpp/tests/integration/sessions_pool/test-results/gtest/{meta.json ... results_accumulator.log} |77.8%| [LD] {RESULT} $(B)/ydb/services/metadata/secret/ut/ydb-services-metadata-secret-ut |77.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_allocator/ut/unittest >> TTxLocatorTest::TestSignificantRequestWhenRunReserveTx >> TTxLocatorTest::TestWithReboot >> TTxLocatorTest::TestSignificantRequestWhenRunReserveTx [GOOD] |77.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/service/ut_table_writer/unittest |77.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/service/ut_table_writer/unittest >> LocalTableWriter::ApplyInCorrectOrder >> PQCountersLabeled::ImportantFlagSwitching [GOOD] >> PQCountersLabeled::NewConsumersCountersAppear >> LocalTableWriter::WaitTxIds >> LocalTableWriter::WriteTable ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_minikql/unittest >> TTxDataShardMiniKQL::WriteAndReadMany [GOOD] Test command err: 2025-06-25T14:28:23.769253Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:28:23.769312Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:28:23.776471Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:112:2142], Recipient [1:135:2156]: NKikimr::TEvTablet::TEvBoot 2025-06-25T14:28:23.916792Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:112:2142], Recipient [1:135:2156]: NKikimr::TEvTablet::TEvRestored 2025-06-25T14:28:23.917315Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 9437184 actor [1:135:2156] 2025-06-25T14:28:23.937007Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T14:28:24.043711Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:112:2142], Recipient [1:135:2156]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-25T14:28:24.270197Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T14:28:24.270433Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T14:28:24.272184Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 9437184 2025-06-25T14:28:24.272260Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 9437184 2025-06-25T14:28:24.272326Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 9437184 2025-06-25T14:28:24.290234Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T14:28:24.290407Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T14:28:24.290506Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 9437184 persisting started state actor id [1:204:2156] in generation 2 2025-06-25T14:28:24.507210Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T14:28:24.545729Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 9437184 2025-06-25T14:28:24.559223Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 9437184 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T14:28:24.559412Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 9437184, actorId: [1:219:2215] 2025-06-25T14:28:24.559473Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 9437184 2025-06-25T14:28:24.559508Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 9437184, state: WaitScheme 2025-06-25T14:28:24.559542Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T14:28:24.559766Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:135:2156], Recipient [1:135:2156]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T14:28:24.559818Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T14:28:24.590549Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 9437184 2025-06-25T14:28:24.590715Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 9437184 2025-06-25T14:28:24.590806Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-25T14:28:24.623865Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:28:24.623981Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 9437184 2025-06-25T14:28:24.624026Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437184 has no attached operations 2025-06-25T14:28:24.624088Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 9437184 2025-06-25T14:28:24.624132Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 9437184 TxInFly 0 2025-06-25T14:28:24.638486Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-25T14:28:24.638776Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:215:2212], Recipient [1:135:2156]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:28:24.638837Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T14:28:24.638896Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:213:2211], serverId# [1:215:2212], sessionId# [0:0:0] 2025-06-25T14:28:24.659076Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:103:2136], Recipient [1:135:2156]: NKikimrTxDataShard.TEvProposeTransaction TxKind: TX_KIND_SCHEME SourceDeprecated { RawX1: 103 RawX2: 4294969432 } TxBody: "\nI\n\006table1\020\r\032\t\n\003key\030\002 \"\032\014\n\005value\030\200$ 8\032\n\n\004uint\030\002 9(\":\010Z\006\010\000\030\000(\000J\014/Root/table1" TxId: 1 ExecLevel: 0 Flags: 0 SchemeShardId: 4200 ProcessingParams { } 2025-06-25T14:28:24.659165Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-25T14:28:24.659271Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-25T14:28:24.679632Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit CheckSchemeTx 2025-06-25T14:28:24.679750Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 9437184 txId 1 ssId 4200 seqNo 0:0 2025-06-25T14:28:24.689646Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 1 at tablet 9437184 2025-06-25T14:28:24.689758Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is ExecutedNoMoreRestarts 2025-06-25T14:28:24.689808Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit CheckSchemeTx 2025-06-25T14:28:24.689847Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit StoreSchemeTx 2025-06-25T14:28:24.689898Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit StoreSchemeTx 2025-06-25T14:28:24.690288Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayCompleteNoMoreRestarts 2025-06-25T14:28:24.690337Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit StoreSchemeTx 2025-06-25T14:28:24.690397Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit FinishPropose 2025-06-25T14:28:24.690441Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit FinishPropose 2025-06-25T14:28:24.690512Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayComplete 2025-06-25T14:28:24.690554Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit FinishPropose 2025-06-25T14:28:24.690612Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit WaitForPlan 2025-06-25T14:28:24.690652Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit WaitForPlan 2025-06-25T14:28:24.690676Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:1] at 9437184 is not ready to execute on unit WaitForPlan 2025-06-25T14:28:24.722581Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 9437184 2025-06-25T14:28:24.722658Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit StoreSchemeTx 2025-06-25T14:28:24.722695Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit FinishPropose 2025-06-25T14:28:24.722743Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 1 at tablet 9437184 send to client, exec latency: 0 ms, propose latency: 1 ms, status: PREPARED 2025-06-25T14:28:24.730164Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 9437184 not sending time cast registration request in state WaitScheme 2025-06-25T14:28:24.740724Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:225:2221], Recipient [1:135:2156]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:28:24.740795Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T14:28:24.740939Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:224:2220], serverId# [1:225:2221], sessionId# [0:0:0] 2025-06-25T14:28:24.741104Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287424, Sender [1:103:2136], Recipient [1:135:2156]: {TEvPlanStep step# 1000001 MediatorId# 0 TabletID 9437184} 2025-06-25T14:28:24.741144Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3150: StateWork, processing event TEvTxProcessing::TEvPlanStep 2025-06-25T14:28:24.741327Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1790: Trying to execute [1000001:1] at 9437184 on unit WaitForPlan 2025-06-25T14:28:24.741369Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1805: Execution status for [1000001:1] at 9437184 is Executed 2025-06-25T14:28:24.741402Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000001:1] at 9437184 executing on unit WaitForPlan 2025-06-25T14:28:24.741432Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000001:1] at 9437184 to execution unit PlanQueue 2025-06-25T14:28:24.755855Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 1 at step 1000001 at tablet 9437184 { Transactions { TxId: 1 AckTo { RawX1: 103 RawX2: 4294969432 } } Step: 1000001 MediatorID: 0 TabletID: 9437184 } 2025-06-25T14:28:24.764910Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T14:28:24.765212Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:135:2156], Recipient [1:135:2156]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T14:28:24.765273Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T14:28:24.765367Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-25T14:28:24.765416Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 1 2025-06-25T14:28:24.765458Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437184 2025-06-25T14:28:24.765503Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000001:1] in PlanQueue unit at 9437184 2025-06-25T14:28:24.765546Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [100000 ... 02?\022\002\205\000\034MyReads MyWrites\205\004\205\002?\022\002\206\202\024Reply\024Write?\030\205\002\206\203\010\002 AllReads\030MyKeys\014Run4ShardsForRead4ShardsToWrite\005?\024)\211\026?\022\203\005\004\200\205\006\203\004\203\004\203\004\006\n\016\213\004\203\004\207\203\001H\213\002\203\004\203\004\203\010\203\010\203\004\206\203\014\203\014,SelectRange\000\003?* h\020\000\000\000\000\000\000\016\000\000\000\000\000\000\000?\014\005?2\003?,D\003?.F\003?0p\007\013?:\003?4\000\'?8\003\013?>\003?<\003j\030\001\003?@\000\003?B\000\003?D\007\240%&\003?F\000\006\004?J\003\203\014\000\003\203\014\000\003\003?L\000\377\007\002\000\005?\032\005?\026?x\000\005?\030\003\005? \005?\034?x\000\006 2025-06-25T14:31:27.786245Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-25T14:31:27.786334Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-25T14:31:27.787103Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:11] at 9437184 on unit CheckDataTx 2025-06-25T14:31:27.801352Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:11] at 9437184 is Executed 2025-06-25T14:31:27.801443Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:11] at 9437184 executing on unit CheckDataTx 2025-06-25T14:31:27.801483Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:11] at 9437184 to execution unit BuildAndWaitDependencies 2025-06-25T14:31:27.801522Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:11] at 9437184 on unit BuildAndWaitDependencies 2025-06-25T14:31:27.801579Z node 3 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 9437184 CompleteEdge# v1000001/1 IncompleteEdge# v{min} UnprotectedReadEdge# v{min} ImmediateWriteEdge# v1000001/18446744073709551615 ImmediateWriteEdgeReplied# v1000001/18446744073709551615 2025-06-25T14:31:27.801642Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:11] at 9437184 2025-06-25T14:31:27.801677Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:11] at 9437184 is Executed 2025-06-25T14:31:27.801703Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:11] at 9437184 executing on unit BuildAndWaitDependencies 2025-06-25T14:31:27.801729Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:11] at 9437184 to execution unit ExecuteDataTx 2025-06-25T14:31:27.801755Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:11] at 9437184 on unit ExecuteDataTx 2025-06-25T14:31:27.806626Z node 3 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:189: Tablet 9437184 is not ready for [0:11] execution 2025-06-25T14:31:27.806815Z node 3 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:561: tx 11 released its data 2025-06-25T14:31:27.806855Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:11] at 9437184 is Restart 2025-06-25T14:31:27.872928Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-25T14:31:27.873009Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:11] at 9437184 on unit ExecuteDataTx 2025-06-25T14:31:27.873808Z node 3 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:661: tx 11 at 9437184 restored its data 2025-06-25T14:31:27.876868Z node 3 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:189: Tablet 9437184 is not ready for [0:11] execution 2025-06-25T14:31:27.877039Z node 3 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:561: tx 11 released its data 2025-06-25T14:31:27.877085Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:11] at 9437184 is Restart 2025-06-25T14:31:28.002198Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-25T14:31:28.002276Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:11] at 9437184 on unit ExecuteDataTx 2025-06-25T14:31:28.003012Z node 3 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:661: tx 11 at 9437184 restored its data 2025-06-25T14:31:28.023174Z node 3 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:175: Operation [0:11] at 9437184 exceeded memory limit 4194304 and requests 33554432 more for the next try 2025-06-25T14:31:28.023490Z node 3 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:561: tx 11 released its data 2025-06-25T14:31:28.023544Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:11] at 9437184 is Restart 2025-06-25T14:31:28.023911Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-25T14:31:28.023944Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:11] at 9437184 on unit ExecuteDataTx 2025-06-25T14:31:28.024568Z node 3 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:661: tx 11 at 9437184 restored its data 2025-06-25T14:31:28.229052Z node 3 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:189: Tablet 9437184 is not ready for [0:11] execution 2025-06-25T14:31:28.231186Z node 3 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:561: tx 11 released its data 2025-06-25T14:31:28.231284Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:11] at 9437184 is Restart 2025-06-25T14:31:28.451294Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-25T14:31:28.451389Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:11] at 9437184 on unit ExecuteDataTx 2025-06-25T14:31:28.452110Z node 3 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:661: tx 11 at 9437184 restored its data 2025-06-25T14:31:28.637293Z node 3 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:175: Operation [0:11] at 9437184 exceeded memory limit 37748736 and requests 301989888 more for the next try 2025-06-25T14:31:28.638783Z node 3 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:561: tx 11 released its data 2025-06-25T14:31:28.638839Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:11] at 9437184 is Restart 2025-06-25T14:31:28.778243Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-25T14:31:28.778320Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:11] at 9437184 on unit ExecuteDataTx 2025-06-25T14:31:28.779042Z node 3 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:661: tx 11 at 9437184 restored its data 2025-06-25T14:31:28.783024Z node 3 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:189: Tablet 9437184 is not ready for [0:11] execution 2025-06-25T14:31:28.783201Z node 3 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:561: tx 11 released its data 2025-06-25T14:31:28.783249Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:11] at 9437184 is Restart 2025-06-25T14:31:28.811207Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-25T14:31:28.811266Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:11] at 9437184 on unit ExecuteDataTx 2025-06-25T14:31:28.811800Z node 3 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:661: tx 11 at 9437184 restored its data 2025-06-25T14:31:28.813322Z node 3 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:189: Tablet 9437184 is not ready for [0:11] execution 2025-06-25T14:31:28.813465Z node 3 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:561: tx 11 released its data 2025-06-25T14:31:28.813506Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:11] at 9437184 is Restart 2025-06-25T14:31:28.833878Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-25T14:31:28.833944Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:11] at 9437184 on unit ExecuteDataTx 2025-06-25T14:31:28.834628Z node 3 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:661: tx 11 at 9437184 restored its data 2025-06-25T14:31:28.840736Z node 3 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:189: Tablet 9437184 is not ready for [0:11] execution 2025-06-25T14:31:28.840931Z node 3 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:561: tx 11 released its data 2025-06-25T14:31:28.840978Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:11] at 9437184 is Restart 2025-06-25T14:31:29.232820Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-25T14:31:29.232903Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:11] at 9437184 on unit ExecuteDataTx 2025-06-25T14:31:29.233714Z node 3 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:661: tx 11 at 9437184 restored its data 2025-06-25T14:31:30.287887Z node 3 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:306: Executed operation [0:11] at tablet 9437184 with status COMPLETE 2025-06-25T14:31:30.287998Z node 3 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:312: Datashard execution counters for [0:11] at 9437184: {NSelectRow: 0, NSelectRange: 1, NUpdateRow: 0, NEraseRow: 0, SelectRowRows: 0, SelectRowBytes: 0, SelectRangeRows: 129871, SelectRangeBytes: 40000268, UpdateRowBytes: 0, EraseRowBytes: 0, SelectRangeDeletedRowSkips: 0, InvisibleRowSkips: 0} 2025-06-25T14:31:30.288069Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:11] at 9437184 is Executed 2025-06-25T14:31:30.288110Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:11] at 9437184 executing on unit ExecuteDataTx 2025-06-25T14:31:30.288142Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:11] at 9437184 to execution unit FinishPropose 2025-06-25T14:31:30.288177Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:11] at 9437184 on unit FinishPropose 2025-06-25T14:31:30.288235Z node 3 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 11 at tablet 9437184 send to client, exec latency: 62 ms, propose latency: 62 ms, status: COMPLETE 2025-06-25T14:31:30.288349Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:11] at 9437184 is DelayComplete 2025-06-25T14:31:30.288380Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:11] at 9437184 executing on unit FinishPropose 2025-06-25T14:31:30.288407Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:11] at 9437184 to execution unit CompletedOperations 2025-06-25T14:31:30.288436Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:11] at 9437184 on unit CompletedOperations 2025-06-25T14:31:30.288486Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:11] at 9437184 is Executed 2025-06-25T14:31:30.288510Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:11] at 9437184 executing on unit CompletedOperations 2025-06-25T14:31:30.288536Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:11] at 9437184 has finished 2025-06-25T14:31:30.315265Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 9437184 2025-06-25T14:31:30.315335Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:11] at 9437184 on unit FinishPropose 2025-06-25T14:31:30.315389Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 >> TTxLocatorTest::TestWithReboot [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_allocator/ut/unittest >> TTxLocatorTest::TestSignificantRequestWhenRunReserveTx [GOOD] Test command err: 2025-06-25T14:31:38.102075Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:1925: Tablet: 72057594046447617 LockedInitializationPath Marker# TSYS32 2025-06-25T14:31:38.102653Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:911: Tablet: 72057594046447617 HandleFindLatestLogEntry, NODATA Promote Marker# TSYS19 2025-06-25T14:31:38.103345Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:225: Tablet: 72057594046447617 TTablet::WriteZeroEntry. logid# [72057594046447617:2:0:0:0:0:0] Marker# TSYS01 2025-06-25T14:31:38.108989Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:0:0:0:20:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:31:38.109660Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:17: tablet# 72057594046447617 OnActivateExecutor 2025-06-25T14:31:38.119635Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:1:1:28672:35:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:31:38.119744Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:1:0:0:42:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:31:38.119874Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:1396: Tablet: 72057594046447617 GcCollect 0 channel, tablet:gen:step => 2:0 Marker# TSYS28 2025-06-25T14:31:38.119993Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:2:1:8192:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:31:38.120153Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:2:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:31:38.120256Z node 1 :TX_ALLOCATOR DEBUG: txallocator__scheme.cpp:22: tablet# 72057594046447617 TTxSchema Complete 2025-06-25T14:31:38.120391Z node 1 :TABLET_MAIN INFO: tablet_sys.cpp:1009: Tablet: 72057594046447617 Active! Generation: 2, Type: TxAllocator started in 0msec Marker# TSYS24 2025-06-25T14:31:38.121682Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:81:2115] requested range size#100000 2025-06-25T14:31:38.122182Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:83:2117] requested range size#100000 2025-06-25T14:31:38.122585Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:87:2121] requested range size#100000 2025-06-25T14:31:38.122789Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:85:2119] requested range size#100000 2025-06-25T14:31:38.123364Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:91:2125] requested range size#100000 2025-06-25T14:31:38.123546Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:3:1:24576:70:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:31:38.123640Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:89:2123] requested range size#100000 2025-06-25T14:31:38.123821Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:3:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:31:38.124014Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:4:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:31:38.124132Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:4:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:31:38.124210Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:73:2107] requested range size#100000 2025-06-25T14:31:38.124431Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:75:2109] requested range size#100000 2025-06-25T14:31:38.124588Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:5:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:31:38.124709Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:5:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:31:38.124839Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:77:2111] requested range size#100000 2025-06-25T14:31:38.125018Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:6:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:31:38.125160Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:6:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:31:38.125262Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:7:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:31:38.125307Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:79:2113] requested range size#100000 2025-06-25T14:31:38.125493Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:7:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:31:38.125610Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 0 Reserved to# 100000 2025-06-25T14:31:38.125657Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:81:2115] TEvAllocateResult from# 0 to# 100000 2025-06-25T14:31:38.125819Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:8:1:24576:74:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:31:38.125896Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 100000 Reserved to# 200000 2025-06-25T14:31:38.125919Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:83:2117] TEvAllocateResult from# 100000 to# 200000 2025-06-25T14:31:38.126003Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:8:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:31:38.126069Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:9:1:24576:74:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:31:38.126149Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 200000 Reserved to# 300000 2025-06-25T14:31:38.126173Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:87:2121] TEvAllocateResult from# 200000 to# 300000 2025-06-25T14:31:38.126261Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:9:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:31:38.126331Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 300000 Reserved to# 400000 2025-06-25T14:31:38.126354Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:85:2119] TEvAllocateResult from# 300000 to# 400000 2025-06-25T14:31:38.126420Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 400000 Reserved to# 500000 2025-06-25T14:31:38.126460Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:91:2125] TEvAllocateResult from# 400000 to# 500000 2025-06-25T14:31:38.126556Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:10:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:31:38.126597Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:10:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:31:38.126638Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 500000 Reserved to# 600000 2025-06-25T14:31:38.126659Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:89:2123] TEvAllocateResult from# 500000 to# 600000 2025-06-25T14:31:38.126796Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:11:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:31:38.126839Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 600000 Reserved to# 700000 2025-06-25T14:31:38.126860Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:73:2107] TEvAllocateResult from# 600000 to# 700000 2025-06-25T14:31:38.126957Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:11:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:31:38.127006Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 700000 Reserved to# 800000 2025-06-25T14:31:38.127025Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:75:2109] TEvAllocateResult from# 700000 to# 800000 2025-06-25T14:31:38.127165Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 800000 Reserved to# 900000 2025-06-25T14:31:38.127195Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:77:2111] TEvAllocateResult from# 800000 to# 900000 2025-06-25T14:31:38.127320Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:12:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:31:38.127420Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:12:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:31:38.127525Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 900000 Reserved to# 1000000 2025-06-25T14:31:38.127563Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:79:2113] TEvAllocateResult from# 900000 to# 1000000 expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS 2025-06-25T14:31:38.132103Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 720575 ... erved from# 8600000 Reserved to# 8700000 2025-06-25T14:31:38.188964Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:406:2439] TEvAllocateResult from# 8600000 to# 8700000 2025-06-25T14:31:38.189068Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:91:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:31:38.189121Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:92:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:31:38.189289Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 8700000 Reserved to# 8800000 2025-06-25T14:31:38.189317Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:408:2441] TEvAllocateResult from# 8700000 to# 8800000 2025-06-25T14:31:38.189404Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:92:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:31:38.189467Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 8800000 Reserved to# 8900000 2025-06-25T14:31:38.189490Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:410:2443] TEvAllocateResult from# 8800000 to# 8900000 2025-06-25T14:31:38.189536Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 8900000 Reserved to# 9000000 2025-06-25T14:31:38.189583Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:412:2445] TEvAllocateResult from# 8900000 to# 9000000 expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS 2025-06-25T14:31:38.192707Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:434:2467] requested range size#100000 2025-06-25T14:31:38.193043Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:436:2469] requested range size#100000 2025-06-25T14:31:38.193163Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:93:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:31:38.193308Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:93:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:31:38.193370Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:438:2471] requested range size#100000 2025-06-25T14:31:38.193593Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:440:2473] requested range size#100000 2025-06-25T14:31:38.193792Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:94:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:31:38.193919Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:442:2475] requested range size#100000 2025-06-25T14:31:38.194016Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:94:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:31:38.194164Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:444:2477] requested range size#100000 2025-06-25T14:31:38.194249Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:95:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:31:38.194344Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:95:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:31:38.194430Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:446:2479] requested range size#100000 2025-06-25T14:31:38.194631Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:96:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:31:38.194739Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:96:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:31:38.194806Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 9000000 Reserved to# 9100000 2025-06-25T14:31:38.194826Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:434:2467] TEvAllocateResult from# 9000000 to# 9100000 2025-06-25T14:31:38.194951Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:448:2481] requested range size#100000 2025-06-25T14:31:38.195036Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:97:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:31:38.195087Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:97:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:31:38.195230Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:450:2483] requested range size#100000 2025-06-25T14:31:38.195347Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:98:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:31:38.195415Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:98:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:31:38.195458Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:99:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:31:38.195518Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:452:2485] requested range size#100000 2025-06-25T14:31:38.195592Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:99:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:31:38.195687Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 9100000 Reserved to# 9200000 2025-06-25T14:31:38.195706Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:436:2469] TEvAllocateResult from# 9100000 to# 9200000 2025-06-25T14:31:38.195747Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:100:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:31:38.195837Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 9200000 Reserved to# 9300000 2025-06-25T14:31:38.195861Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:438:2471] TEvAllocateResult from# 9200000 to# 9300000 2025-06-25T14:31:38.195917Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 9300000 Reserved to# 9400000 2025-06-25T14:31:38.195929Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:440:2473] TEvAllocateResult from# 9300000 to# 9400000 2025-06-25T14:31:38.195956Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:100:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:31:38.196052Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 9400000 Reserved to# 9500000 2025-06-25T14:31:38.196069Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:442:2475] TEvAllocateResult from# 9400000 to# 9500000 2025-06-25T14:31:38.196098Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:101:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:31:38.196171Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 9500000 Reserved to# 9600000 2025-06-25T14:31:38.196208Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:444:2477] TEvAllocateResult from# 9500000 to# 9600000 2025-06-25T14:31:38.196244Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:101:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:31:38.196324Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 9600000 Reserved to# 9700000 2025-06-25T14:31:38.196341Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:446:2479] TEvAllocateResult from# 9600000 to# 9700000 2025-06-25T14:31:38.196375Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:102:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:31:38.196417Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 9700000 Reserved to# 9800000 2025-06-25T14:31:38.196433Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:448:2481] TEvAllocateResult from# 9700000 to# 9800000 2025-06-25T14:31:38.196514Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 9800000 Reserved to# 9900000 2025-06-25T14:31:38.196538Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:450:2483] TEvAllocateResult from# 9800000 to# 9900000 2025-06-25T14:31:38.196574Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:102:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:31:38.196627Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 9900000 Reserved to# 10000000 2025-06-25T14:31:38.196642Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:452:2485] TEvAllocateResult from# 9900000 to# 10000000 expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS >> TPartitionChooserSuite::TPartitionChooserActor_SplitMergeEnabled_SourceId_PartitionInactive_1_Test [GOOD] >> TPartitionChooserSuite::TPartitionChooserActor_SplitMergeEnabled_SourceId_PartitionNotExists_Test >> TPQTest::TestReserveBytes [GOOD] >> TPQTest::TestPartitionedBlobFails [GOOD] >> TPQTest::TestReadSessions >> TopicAutoscaling::ControlPlane_CreateAlterDescribe [GOOD] >> TopicAutoscaling::ControlPlane_DisableAutoPartitioning |77.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/select/py3test >> test_select.py::TestDML::test_select[table_index_4__SYNC-pk_types5-all_types5-index5---SYNC] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_allocator/ut/unittest >> TTxLocatorTest::TestWithReboot [GOOD] Test command err: 2025-06-25T14:31:38.191230Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:1925: Tablet: 72057594046447617 LockedInitializationPath Marker# TSYS32 2025-06-25T14:31:38.191784Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:911: Tablet: 72057594046447617 HandleFindLatestLogEntry, NODATA Promote Marker# TSYS19 2025-06-25T14:31:38.192510Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:225: Tablet: 72057594046447617 TTablet::WriteZeroEntry. logid# [72057594046447617:2:0:0:0:0:0] Marker# TSYS01 2025-06-25T14:31:38.193821Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:0:0:0:20:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:31:38.194160Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:17: tablet# 72057594046447617 OnActivateExecutor 2025-06-25T14:31:38.201075Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:1:1:28672:35:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:31:38.201238Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:1:0:0:42:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:31:38.201391Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:1396: Tablet: 72057594046447617 GcCollect 0 channel, tablet:gen:step => 2:0 Marker# TSYS28 2025-06-25T14:31:38.201499Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:2:1:8192:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:31:38.201641Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:2:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:31:38.201743Z node 1 :TX_ALLOCATOR DEBUG: txallocator__scheme.cpp:22: tablet# 72057594046447617 TTxSchema Complete 2025-06-25T14:31:38.201869Z node 1 :TABLET_MAIN INFO: tablet_sys.cpp:1009: Tablet: 72057594046447617 Active! Generation: 2, Type: TxAllocator started in 0msec Marker# TSYS24 2025-06-25T14:31:38.203459Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:81:2115] requested range size#100000 2025-06-25T14:31:38.204032Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:83:2117] requested range size#100000 2025-06-25T14:31:38.204562Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:87:2121] requested range size#100000 2025-06-25T14:31:38.204881Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:85:2119] requested range size#100000 2025-06-25T14:31:38.205512Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:91:2125] requested range size#100000 2025-06-25T14:31:38.205722Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:3:1:24576:70:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:31:38.205829Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:89:2123] requested range size#100000 2025-06-25T14:31:38.206016Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:3:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:31:38.206229Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:4:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:31:38.206315Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:4:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:31:38.206566Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:73:2107] requested range size#100000 2025-06-25T14:31:38.206768Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:75:2109] requested range size#100000 2025-06-25T14:31:38.206944Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:5:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:31:38.207069Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:5:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:31:38.207186Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:77:2111] requested range size#100000 2025-06-25T14:31:38.207356Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:6:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:31:38.207507Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:6:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:31:38.207606Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:7:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:31:38.207667Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:79:2113] requested range size#100000 2025-06-25T14:31:38.207854Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:7:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:31:38.207979Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 0 Reserved to# 100000 2025-06-25T14:31:38.208032Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:81:2115] TEvAllocateResult from# 0 to# 100000 2025-06-25T14:31:38.208207Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:8:1:24576:74:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:31:38.208287Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 100000 Reserved to# 200000 2025-06-25T14:31:38.208323Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:83:2117] TEvAllocateResult from# 100000 to# 200000 2025-06-25T14:31:38.208407Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:8:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:31:38.208478Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:9:1:24576:74:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:31:38.208561Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 200000 Reserved to# 300000 2025-06-25T14:31:38.208587Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:87:2121] TEvAllocateResult from# 200000 to# 300000 2025-06-25T14:31:38.208680Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:9:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:31:38.208763Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 300000 Reserved to# 400000 2025-06-25T14:31:38.208787Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:85:2119] TEvAllocateResult from# 300000 to# 400000 2025-06-25T14:31:38.208859Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 400000 Reserved to# 500000 2025-06-25T14:31:38.208898Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:91:2125] TEvAllocateResult from# 400000 to# 500000 2025-06-25T14:31:38.209038Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:10:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:31:38.209081Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:10:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:31:38.209136Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 500000 Reserved to# 600000 2025-06-25T14:31:38.209157Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:89:2123] TEvAllocateResult from# 500000 to# 600000 2025-06-25T14:31:38.209285Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:11:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:31:38.209340Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 600000 Reserved to# 700000 2025-06-25T14:31:38.209364Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:73:2107] TEvAllocateResult from# 600000 to# 700000 2025-06-25T14:31:38.209639Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:11:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:31:38.209692Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 700000 Reserved to# 800000 2025-06-25T14:31:38.209714Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:75:2109] TEvAllocateResult from# 700000 to# 800000 2025-06-25T14:31:38.209790Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 800000 Reserved to# 900000 2025-06-25T14:31:38.209813Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:77:2111] TEvAllocateResult from# 800000 to# 900000 2025-06-25T14:31:38.209928Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:12:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:31:38.210042Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:12:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:31:38.210145Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 900000 Reserved to# 1000000 2025-06-25T14:31:38.210191Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:79:2113] TEvAllocateResult from# 900000 to# 1000000 expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS 2025-06-25T14:31:38.214625Z node 1 :TABLET_MAIN NOTICE: tablet_sys.cpp:1849: Tablet: 7205759404 ... 46447617 Send to Sender# [1:624:2552] TEvAllocateResult from# 9300000 to# 9400000 2025-06-25T14:31:38.554949Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:11:8:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:31:38.554980Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:11:9:1:24576:78:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:31:38.555029Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 9400000 Reserved to# 9500000 2025-06-25T14:31:38.555051Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:626:2554] TEvAllocateResult from# 9400000 to# 9500000 2025-06-25T14:31:38.555134Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:11:9:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:31:38.555196Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 9500000 Reserved to# 9600000 2025-06-25T14:31:38.555212Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:628:2556] TEvAllocateResult from# 9500000 to# 9600000 2025-06-25T14:31:38.555266Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 9600000 Reserved to# 9700000 2025-06-25T14:31:38.555282Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:630:2558] TEvAllocateResult from# 9600000 to# 9700000 2025-06-25T14:31:38.555347Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:11:10:1:24576:78:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:31:38.555381Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 9700000 Reserved to# 9800000 2025-06-25T14:31:38.555418Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:632:2560] TEvAllocateResult from# 9700000 to# 9800000 2025-06-25T14:31:38.555505Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:11:10:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:31:38.555573Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 9800000 Reserved to# 9900000 2025-06-25T14:31:38.555602Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:634:2562] TEvAllocateResult from# 9800000 to# 9900000 2025-06-25T14:31:38.555649Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:11:11:1:24576:72:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:31:38.555709Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:11:11:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:31:38.555747Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 9900000 Reserved to# 10000000 2025-06-25T14:31:38.555762Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:636:2564] TEvAllocateResult from# 9900000 to# 10000000 expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS expected SUCCESS 2025-06-25T14:31:38.559839Z node 1 :TABLET_MAIN NOTICE: tablet_sys.cpp:1849: Tablet: 72057594046447617 Type: TxAllocator, EReason: ReasonPill, SuggestedGeneration: 0, KnownGeneration: 11 Marker# TSYS31 2025-06-25T14:31:38.560891Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:811: Tablet: 72057594046447617 HandleStateStorageInfoResolve, KnownGeneration: 11 Promote Marker# TSYS16 2025-06-25T14:31:38.561363Z node 1 :TABLET_MAIN DEBUG: tablet_req_rebuildhistory.cpp:421: TabletId# 72057594046447617 TTabletReqRebuildHistoryGraph::ProcessKeyEntry, LastBlobID: [72057594046447617:11:11:0:0:71:0] Snap: 11:1 for 72057594046447617 Marker# TRRH04 2025-06-25T14:31:38.561418Z node 1 :TABLET_MAIN DEBUG: tablet_req_rebuildhistory.cpp:356: TTabletReqRebuildHistoryGraph::ProcessLogEntry - TabletID: 72057594046447617, id [72057594046447617:11:11:0:0:71:0], refs: [[72057594046447617:11:11:1:24576:72:0],] for 72057594046447617 2025-06-25T14:31:38.561533Z node 1 :TABLET_MAIN DEBUG: tablet_req_rebuildhistory.cpp:356: TTabletReqRebuildHistoryGraph::ProcessLogEntry - TabletID: 72057594046447617, id [72057594046447617:11:1:0:0:42:0], refs: [[72057594046447617:11:1:1:28672:1483:0],] for 72057594046447617 2025-06-25T14:31:38.561561Z node 1 :TABLET_MAIN DEBUG: tablet_req_rebuildhistory.cpp:356: TTabletReqRebuildHistoryGraph::ProcessLogEntry - TabletID: 72057594046447617, id [72057594046447617:11:2:0:0:69:0], refs: [[72057594046447617:11:2:1:24576:76:0],] for 72057594046447617 2025-06-25T14:31:38.561584Z node 1 :TABLET_MAIN DEBUG: tablet_req_rebuildhistory.cpp:356: TTabletReqRebuildHistoryGraph::ProcessLogEntry - TabletID: 72057594046447617, id [72057594046447617:11:3:0:0:71:0], refs: [[72057594046447617:11:3:1:24576:78:0],] for 72057594046447617 2025-06-25T14:31:38.561607Z node 1 :TABLET_MAIN DEBUG: tablet_req_rebuildhistory.cpp:356: TTabletReqRebuildHistoryGraph::ProcessLogEntry - TabletID: 72057594046447617, id [72057594046447617:11:4:0:0:71:0], refs: [[72057594046447617:11:4:1:24576:75:0],] for 72057594046447617 2025-06-25T14:31:38.561627Z node 1 :TABLET_MAIN DEBUG: tablet_req_rebuildhistory.cpp:356: TTabletReqRebuildHistoryGraph::ProcessLogEntry - TabletID: 72057594046447617, id [72057594046447617:11:5:0:0:71:0], refs: [[72057594046447617:11:5:1:24576:78:0],] for 72057594046447617 2025-06-25T14:31:38.561659Z node 1 :TABLET_MAIN DEBUG: tablet_req_rebuildhistory.cpp:356: TTabletReqRebuildHistoryGraph::ProcessLogEntry - TabletID: 72057594046447617, id [72057594046447617:11:6:0:0:71:0], refs: [[72057594046447617:11:6:1:24576:78:0],] for 72057594046447617 2025-06-25T14:31:38.561685Z node 1 :TABLET_MAIN DEBUG: tablet_req_rebuildhistory.cpp:356: TTabletReqRebuildHistoryGraph::ProcessLogEntry - TabletID: 72057594046447617, id [72057594046447617:11:7:0:0:71:0], refs: [[72057594046447617:11:7:1:24576:78:0],] for 72057594046447617 2025-06-25T14:31:38.561717Z node 1 :TABLET_MAIN DEBUG: tablet_req_rebuildhistory.cpp:356: TTabletReqRebuildHistoryGraph::ProcessLogEntry - TabletID: 72057594046447617, id [72057594046447617:11:8:0:0:71:0], refs: [[72057594046447617:11:8:1:24576:75:0],] for 72057594046447617 2025-06-25T14:31:38.561762Z node 1 :TABLET_MAIN DEBUG: tablet_req_rebuildhistory.cpp:356: TTabletReqRebuildHistoryGraph::ProcessLogEntry - TabletID: 72057594046447617, id [72057594046447617:11:9:0:0:71:0], refs: [[72057594046447617:11:9:1:24576:78:0],] for 72057594046447617 2025-06-25T14:31:38.561802Z node 1 :TABLET_MAIN DEBUG: tablet_req_rebuildhistory.cpp:356: TTabletReqRebuildHistoryGraph::ProcessLogEntry - TabletID: 72057594046447617, id [72057594046447617:11:10:0:0:71:0], refs: [[72057594046447617:11:10:1:24576:78:0],] for 72057594046447617 2025-06-25T14:31:38.561907Z node 1 :TABLET_MAIN DEBUG: tablet_req_rebuildhistory.cpp:625: TabletId# 72057594046447617 TTabletReqRebuildHistoryGraph::BuildHistory - Process generation 11 from 1 with 11 steps Marker# TRRH09 2025-06-25T14:31:38.561932Z node 1 :TABLET_MAIN DEBUG: tablet_req_rebuildhistory.cpp:729: TTabletReqRebuildHistoryGraph::BuildHistory - NOT A TAIL - References: [[72057594046447617:11:1:1:28672:1483:0],] for 72057594046447617 2025-06-25T14:31:38.561953Z node 1 :TABLET_MAIN DEBUG: tablet_req_rebuildhistory.cpp:729: TTabletReqRebuildHistoryGraph::BuildHistory - NOT A TAIL - References: [[72057594046447617:11:2:1:24576:76:0],] for 72057594046447617 2025-06-25T14:31:38.561967Z node 1 :TABLET_MAIN DEBUG: tablet_req_rebuildhistory.cpp:729: TTabletReqRebuildHistoryGraph::BuildHistory - NOT A TAIL - References: [[72057594046447617:11:3:1:24576:78:0],] for 72057594046447617 2025-06-25T14:31:38.561982Z node 1 :TABLET_MAIN DEBUG: tablet_req_rebuildhistory.cpp:729: TTabletReqRebuildHistoryGraph::BuildHistory - NOT A TAIL - References: [[72057594046447617:11:4:1:24576:75:0],] for 72057594046447617 2025-06-25T14:31:38.562001Z node 1 :TABLET_MAIN DEBUG: tablet_req_rebuildhistory.cpp:729: TTabletReqRebuildHistoryGraph::BuildHistory - NOT A TAIL - References: [[72057594046447617:11:5:1:24576:78:0],] for 72057594046447617 2025-06-25T14:31:38.562027Z node 1 :TABLET_MAIN DEBUG: tablet_req_rebuildhistory.cpp:691: TTabletReqRebuildHistoryGraph::BuildHistory - THE TAIL - References: [[72057594046447617:11:6:1:24576:78:0],] for 72057594046447617, Gc+: [[72057594046447617:11:6:1:24576:78:0],] 2025-06-25T14:31:38.562055Z node 1 :TABLET_MAIN DEBUG: tablet_req_rebuildhistory.cpp:691: TTabletReqRebuildHistoryGraph::BuildHistory - THE TAIL - References: [[72057594046447617:11:7:1:24576:78:0],] for 72057594046447617, Gc+: [[72057594046447617:11:7:1:24576:78:0],] 2025-06-25T14:31:38.562071Z node 1 :TABLET_MAIN DEBUG: tablet_req_rebuildhistory.cpp:691: TTabletReqRebuildHistoryGraph::BuildHistory - THE TAIL - References: [[72057594046447617:11:8:1:24576:75:0],] for 72057594046447617, Gc+: [[72057594046447617:11:8:1:24576:75:0],] 2025-06-25T14:31:38.562089Z node 1 :TABLET_MAIN DEBUG: tablet_req_rebuildhistory.cpp:691: TTabletReqRebuildHistoryGraph::BuildHistory - THE TAIL - References: [[72057594046447617:11:9:1:24576:78:0],] for 72057594046447617, Gc+: [[72057594046447617:11:9:1:24576:78:0],] 2025-06-25T14:31:38.562105Z node 1 :TABLET_MAIN DEBUG: tablet_req_rebuildhistory.cpp:691: TTabletReqRebuildHistoryGraph::BuildHistory - THE TAIL - References: [[72057594046447617:11:10:1:24576:78:0],] for 72057594046447617, Gc+: [[72057594046447617:11:10:1:24576:78:0],] 2025-06-25T14:31:38.562136Z node 1 :TABLET_MAIN DEBUG: tablet_req_rebuildhistory.cpp:691: TTabletReqRebuildHistoryGraph::BuildHistory - THE TAIL - References: [[72057594046447617:11:11:1:24576:72:0],] for 72057594046447617, Gc+: [[72057594046447617:11:11:1:24576:72:0],] 2025-06-25T14:31:38.562308Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:225: Tablet: 72057594046447617 TTablet::WriteZeroEntry. logid# [72057594046447617:12:0:0:0:0:0] Marker# TSYS01 2025-06-25T14:31:38.563107Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:12:0:0:0:20:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:31:38.565697Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:17: tablet# 72057594046447617 OnActivateExecutor 2025-06-25T14:31:38.565905Z node 1 :TX_ALLOCATOR DEBUG: txallocator__scheme.cpp:22: tablet# 72057594046447617 TTxSchema Complete 2025-06-25T14:31:38.566556Z node 1 :TABLET_MAIN INFO: tablet_sys.cpp:1009: Tablet: 72057594046447617 Active! Generation: 12, Type: TxAllocator started in 0msec Marker# TSYS24 2025-06-25T14:31:38.566615Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:12:1:1:28672:1639:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:31:38.566713Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:12:1:0:0:42:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:31:38.566786Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:1396: Tablet: 72057594046447617 GcCollect 0 channel, tablet:gen:step => 12:0 Marker# TSYS28 >> KikimrIcGateway::TestListPath >> ReadAttributesUtils::ReplaceAttributesEmpty [GOOD] >> ReadAttributesUtils::ReplaceAttributesFilter [GOOD] >> KikimrIcGateway::TestLoadBasicSecretValueFromExternalDataSourceMetadata >> Cdc::ShouldBreakLocksOnConcurrentAddIndex [GOOD] >> Cdc::ResolvedTimestampsContinueAfterMerge |77.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/ut/data/ydb-core-kqp-ut-data |77.9%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/data/ydb-core-kqp-ut-data |77.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/data/ydb-core-kqp-ut-data |77.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/provider/ut/unittest >> ReadAttributesUtils::ReplaceAttributesFilter [GOOD] >> TLocksTest::NoLocksSet |77.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_allocator_client/ut/unittest |77.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_allocator_client/ut/unittest >> Balancing::Balancing_ManyTopics_TopicApi [GOOD] >> Balancing::Balancing_ManyTopics_PQv1 >> TPQTabletTests::Huge_ProposeTransacton [GOOD] >> TFlatTest::WriteSplitByPartialKeyAndRead >> TopicAutoscaling::PartitionMerge_PreferedPartition_PQv1 [GOOD] >> TopicAutoscaling::PartitionSplit_ManySession_BeforeAutoscaleAwareSDK |77.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/services/cms/ut/ydb-services-cms-ut |77.9%| [LD] {RESULT} $(B)/ydb/services/cms/ut/ydb-services-cms-ut |77.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/services/cms/ut/ydb-services-cms-ut >> TPQTabletTests::Kafka_Transaction_Supportive_Partitions_Should_Be_Deleted_After_Timeout >> LocalTableWriter::WriteTable [GOOD] >> LocalTableWriter::ApplyInCorrectOrder [GOOD] >> TTxAllocatorClientTest::Boot >> TopicAutoscaling::ControlPlane_CDC_Disable [GOOD] >> TopicAutoscaling::BalancingAfterSplit_sessionsWithPartition >> LocalTableWriter::WaitTxIds [GOOD] >> TTxAllocatorClientTest::Boot [GOOD] >> TPQTabletTests::Kafka_Transaction_Supportive_Partitions_Should_Be_Deleted_After_Timeout [GOOD] >> TTxAllocatorClientTest::ZeroRange |77.9%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest >> TPQTabletTests::In_Kafka_Txn_Only_Supportive_Partitions_That_Exceeded_Timeout_Should_Be_Deleted ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/service/ut_table_writer/unittest >> LocalTableWriter::WriteTable [GOOD] Test command err: 2025-06-25T14:31:38.748649Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519894594703804057:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:31:38.748700Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0016cd/r3tmp/tmpso6m4V/pdisk_1.dat 2025-06-25T14:31:39.255666Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:31:39.312333Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:31:39.312434Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:31:39.313675Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:20212 TServer::EnableGrpc on GrpcPort 30002, node 1 2025-06-25T14:31:39.509086Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:31:39.509119Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:31:39.509132Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:31:39.509296Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:20212 2025-06-25T14:31:39.768021Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:31:39.965573Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:31:39.981590Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:31:39.985467Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... TClient::Ls request: /Root/Table TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710658 CreateStep: 1750861900109 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" Key... (TRUNCATED) 2025-06-25T14:31:40.210016Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:295: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519894603293739299:2352] Handshake: worker# [1:7519894598998771910:2291] 2025-06-25T14:31:40.210261Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:312: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519894603293739299:2352] Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult: result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Table TableId: [72057594046644480:2:1] RequestType: ByTableId Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Ok Kind: KindTable DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-25T14:31:40.210491Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:387: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519894603293739299:2352] Handle TEvTxProxySchemeCache::TEvResolveKeySetResult: result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 ResultSet [{ TableId: [OwnerId: 72057594046644480, LocalPathId: 2] Access: 0 SyncVersion: false Status: OkData Kind: KindRegularTable PartitionsCount: 1 DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } From: (Uint32 : NULL) IncFrom: 1 To: () IncTo: 0 }] } 2025-06-25T14:31:40.210527Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:417: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519894603293739299:2352] Send handshake: worker# [1:7519894598998771910:2291] 2025-06-25T14:31:40.211135Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:431: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519894603293739299:2352] Handle NKikimr::NReplication::NService::TEvWorker::TEvData { Source: TestSource Records [{ Codec: RAW Data: 36b Offset: 1 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 36b Offset: 2 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 36b Offset: 3 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: }] } 2025-06-25T14:31:40.211347Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:556: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519894603293739299:2352] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRequestRecords { Records [{ Order: 1 BodySize: 36 },{ Order: 2 BodySize: 36 },{ Order: 3 BodySize: 36 }] } 2025-06-25T14:31:40.211487Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:54: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7519894603293739302:2352] Handle NKikimr::TEvTxUserProxy::TEvGetProxyServicesResponse 2025-06-25T14:31:40.211521Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:587: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519894603293739299:2352] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186224037888 } 2025-06-25T14:31:40.211613Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:74: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7519894603293739302:2352] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 1 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 36b },{ Order: 2 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 36b },{ Order: 3 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 36b }] } 2025-06-25T14:31:40.225019Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:111: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7519894603293739302:2352] Handle NKikimrTxDataShard.TEvApplyReplicationChangesResult Status: STATUS_OK 2025-06-25T14:31:40.225103Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:587: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519894603293739299:2352] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186224037888 } 2025-06-25T14:31:40.225150Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:570: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519894603293739299:2352] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRemoveRecords { Records [1,2,3] } ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/service/ut_table_writer/unittest >> LocalTableWriter::ApplyInCorrectOrder [GOOD] Test command err: 2025-06-25T14:31:38.754975Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519894591948071498:2187];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:31:38.755130Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0016f7/r3tmp/tmpwTTXez/pdisk_1.dat 2025-06-25T14:31:39.253146Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:31:39.271191Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:31:39.271289Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:31:39.272927Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:8102 TServer::EnableGrpc on GrpcPort 23643, node 1 2025-06-25T14:31:39.644991Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:31:39.645016Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:31:39.645024Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:31:39.645152Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:31:39.762864Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:8102 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:31:40.134124Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:31:40.164932Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... TClient::Ls request: /Root/Table TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710658 CreateStep: 1750861900312 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" Key... (TRUNCATED) 2025-06-25T14:31:40.360332Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:295: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519894600538006607:2353] Handshake: worker# [1:7519894600538006514:2292] 2025-06-25T14:31:40.360680Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:312: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519894600538006607:2353] Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult: result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Table TableId: [72057594046644480:2:1] RequestType: ByTableId Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Ok Kind: KindTable DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-25T14:31:40.360987Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:387: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519894600538006607:2353] Handle TEvTxProxySchemeCache::TEvResolveKeySetResult: result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 ResultSet [{ TableId: [OwnerId: 72057594046644480, LocalPathId: 2] Access: 0 SyncVersion: false Status: OkData Kind: KindRegularTable PartitionsCount: 1 DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } From: (Uint32 : NULL) IncFrom: 1 To: () IncTo: 0 }] } 2025-06-25T14:31:40.361029Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:417: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519894600538006607:2353] Send handshake: worker# [1:7519894600538006514:2292] 2025-06-25T14:31:40.364014Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:431: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519894600538006607:2353] Handle NKikimr::NReplication::NService::TEvWorker::TEvData { Source: TestSource Records [{ Codec: RAW Data: 48b Offset: 1 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: }] } 2025-06-25T14:31:40.369002Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:490: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519894600538006607:2353] Handle NKikimrReplication.TEvTxIdResult VersionTxIds { Version { Step: 10 TxId: 0 } TxId: 1 } 2025-06-25T14:31:40.369152Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:556: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519894600538006607:2353] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRequestRecords { Records [{ Order: 1 BodySize: 48 }] } 2025-06-25T14:31:40.369322Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:54: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7519894600538006610:2353] Handle NKikimr::TEvTxUserProxy::TEvGetProxyServicesResponse 2025-06-25T14:31:40.369370Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:587: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519894600538006607:2353] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186224037888 } 2025-06-25T14:31:40.369454Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:74: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7519894600538006610:2353] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 1 Group: 0 Step: 1 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 48b }] } 2025-06-25T14:31:40.380630Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:111: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7519894600538006610:2353] Handle NKikimrTxDataShard.TEvApplyReplicationChangesResult Status: STATUS_OK 2025-06-25T14:31:40.380703Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:587: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519894600538006607:2353] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186224037888 } 2025-06-25T14:31:40.380757Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:570: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519894600538006607:2353] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRemoveRecords { Records [1] } 2025-06-25T14:31:40.381381Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:431: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519894600538006607:2353] Handle NKikimr::NReplication::NService::TEvWorker::TEvData { Source: TestSource Records [{ Codec: RAW Data: 49b Offset: 2 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 48b Offset: 3 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 19b Offset: 4 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: }] } 2025-06-25T14:31:40.381860Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:490: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519894600538006607:2353] Handle NKikimrReplication.TEvTxIdResult VersionTxIds { Version { Step: 20 TxId: 0 } TxId: 2 } 2025-06-25T14:31:40.381955Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:556: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519894600538006607:2353] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRequestRecords { Records [{ Order: 2 BodySize: 49 },{ Order: 3 BodySize: 48 }] } 2025-06-25T14:31:40.382065Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:74: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7519894600538006610:2353] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 2 Group: 0 Step: 11 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 49b },{ Order: 3 Group: 0 Step: 2 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 48b }] } 2025-06-25T14:31:40.383683Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:111: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7519894600538006610:2353] Handle NKikimrTxDataShard.TEvApplyReplicationChangesResult Status: STATUS_OK 2025-06-25T14:31:40.383737Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:587: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519894600538006607:2353] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186224037888 } 2025-06-25T14:31:40.383814Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:570: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519894600538006607:2353] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRemoveRecords { Records [2,3] } |77.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_sequence/ydb-core-tx-schemeshard-ut_sequence |77.9%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_sequence/ydb-core-tx-schemeshard-ut_sequence |77.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_sequence/ydb-core-tx-schemeshard-ut_sequence ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_allocator_client/ut/unittest >> TTxAllocatorClientTest::Boot [GOOD] Test command err: 2025-06-25T14:31:43.053758Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:1925: Tablet: 72057594046447617 LockedInitializationPath Marker# TSYS32 2025-06-25T14:31:43.054382Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:911: Tablet: 72057594046447617 HandleFindLatestLogEntry, NODATA Promote Marker# TSYS19 2025-06-25T14:31:43.055136Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:225: Tablet: 72057594046447617 TTablet::WriteZeroEntry. logid# [72057594046447617:2:0:0:0:0:0] Marker# TSYS01 2025-06-25T14:31:43.057069Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:0:0:0:20:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:31:43.057627Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:17: tablet# 72057594046447617 OnActivateExecutor 2025-06-25T14:31:43.067807Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:1:1:28672:35:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:31:43.067931Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:1:0:0:42:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:31:43.068065Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:1396: Tablet: 72057594046447617 GcCollect 0 channel, tablet:gen:step => 2:0 Marker# TSYS28 2025-06-25T14:31:43.068182Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:2:1:8192:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:31:43.076430Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:2:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:31:43.076675Z node 1 :TX_ALLOCATOR DEBUG: txallocator__scheme.cpp:22: tablet# 72057594046447617 TTxSchema Complete 2025-06-25T14:31:43.076838Z node 1 :TABLET_MAIN INFO: tablet_sys.cpp:1009: Tablet: 72057594046447617 Active! Generation: 2, Type: TxAllocator started in 0msec Marker# TSYS24 |77.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/services/persqueue_cluster_discovery/ut/ydb-services-persqueue_cluster_discovery-ut |77.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/services/persqueue_cluster_discovery/ut/ydb-services-persqueue_cluster_discovery-ut ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/service/ut_table_writer/unittest >> LocalTableWriter::WaitTxIds [GOOD] Test command err: 2025-06-25T14:31:38.722653Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519894595881874754:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:31:38.722718Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0016df/r3tmp/tmplcp4ws/pdisk_1.dat 2025-06-25T14:31:39.236398Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:31:39.254765Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519894595881874733:2080] 1750861898718585 != 1750861898718588 2025-06-25T14:31:39.272704Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:31:39.272785Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:31:39.277613Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:62722 TServer::EnableGrpc on GrpcPort 25722, node 1 2025-06-25T14:31:39.619630Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:31:39.619664Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:31:39.619674Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:31:39.619813Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:31:39.737304Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:62722 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:31:40.028381Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:31:40.076565Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:31:40.084375Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) TClient::Ls request: /Root/Table TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710658 CreateStep: 1750861900242 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" Key... (TRUNCATED) 2025-06-25T14:31:40.333008Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:295: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519894604471810014:2353] Handshake: worker# [1:7519894604471810015:2354] 2025-06-25T14:31:40.333329Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:312: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519894604471810014:2353] Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult: result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Table TableId: [72057594046644480:2:1] RequestType: ByTableId Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Ok Kind: KindTable DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-25T14:31:40.333565Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:387: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519894604471810014:2353] Handle TEvTxProxySchemeCache::TEvResolveKeySetResult: result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 ResultSet [{ TableId: [OwnerId: 72057594046644480, LocalPathId: 2] Access: 0 SyncVersion: false Status: OkData Kind: KindRegularTable PartitionsCount: 1 DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } From: (Uint32 : NULL) IncFrom: 1 To: () IncTo: 0 }] } 2025-06-25T14:31:40.333603Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:417: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519894604471810014:2353] Send handshake: worker# [1:7519894604471810015:2354] 2025-06-25T14:31:40.334156Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:431: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519894604471810014:2353] Handle NKikimr::NReplication::NService::TEvWorker::TEvData { Source: TestSource Records [{ Codec: RAW Data: 48b Offset: 1 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 49b Offset: 2 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: }] } 2025-06-25T14:31:40.338433Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:490: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519894604471810014:2353] Handle NKikimrReplication.TEvTxIdResult VersionTxIds { Version { Step: 10 TxId: 0 } TxId: 1 } 2025-06-25T14:31:40.338579Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:556: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519894604471810014:2353] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRequestRecords { Records [{ Order: 1 BodySize: 48 }] } 2025-06-25T14:31:40.338742Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:54: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7519894604471810018:2353] Handle NKikimr::TEvTxUserProxy::TEvGetProxyServicesResponse 2025-06-25T14:31:40.338785Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:587: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519894604471810014:2353] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186224037888 } 2025-06-25T14:31:40.338872Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:74: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7519894604471810018:2353] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 1 Group: 0 Step: 1 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 48b }] } 2025-06-25T14:31:40.345159Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:111: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7519894604471810018:2353] Handle NKikimrTxDataShard.TEvApplyReplicationChangesResult Status: STATUS_OK 2025-06-25T14:31:40.345245Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:587: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519894604471810014:2353] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186224037888 } 2025-06-25T14:31:40.345298Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:570: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519894604471810014:2353] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRemoveRecords { Records [1] } 2025-06-25T14:31:41.336371Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:490: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519894604471810014:2353] Handle NKikimrReplication.TEvTxIdResult VersionTxIds { Version { Step: 20 TxId: 0 } TxId: 2 } 2025-06-25T14:31:41.336522Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:556: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519894604471810014:2353] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRequestRecords { Records [{ Order: 2 BodySize: 49 }] } 2025-06-25T14:31:41.336694Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:74: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7519894604471810018:2353] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 2 Group: 0 Step: 11 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 49b }] } 2025-06-25T14:31:41.344956Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:111: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7519894604471810018:2353] Handle NKikimrTxDataShard.TEvApplyReplicationChangesResult Status: STATUS_OK 2025-06-25T14:31:41.345023Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:587: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519894604471810014:2353] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186224037888 } 2025-06-25T14:31:41.345064Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:570: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519894604471810014:2353] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRemoveRecords { Records [2] } |77.9%| [LD] {RESULT} $(B)/ydb/services/persqueue_cluster_discovery/ut/ydb-services-persqueue_cluster_discovery-ut >> TopicAutoscaling::PartitionSplit_ReadNotEmptyPartitions_BeforeAutoscaleAwareSDK [GOOD] >> TopicAutoscaling::PartitionSplit_ReadNotEmptyPartitions_PQv1 >> TPQTabletTests::In_Kafka_Txn_Only_Supportive_Partitions_That_Exceeded_Timeout_Should_Be_Deleted [GOOD] |77.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/ut/federated_query/generic_ut/ydb-core-kqp-ut-federated_query-generic_ut |77.9%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/federated_query/generic_ut/ydb-core-kqp-ut-federated_query-generic_ut |77.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/federated_query/generic_ut/ydb-core-kqp-ut-federated_query-generic_ut >> DataShardVolatile::DistributedWriteLaterSnapshotBlockedThenCommit+UseSink [GOOD] >> DataShardVolatile::DistributedWriteLaterSnapshotBlockedThenCommit-UseSink >> KikimrIcGateway::TestListPath [GOOD] >> KikimrIcGateway::TestDropTable |77.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/ut/indexes/ydb-core-kqp-ut-indexes |77.9%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/indexes/ydb-core-kqp-ut-indexes |78.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/indexes/ydb-core-kqp-ut-indexes |78.0%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest |78.0%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest >> TFlatTest::WriteSplitByPartialKeyAndRead [GOOD] >> TFlatTest::WriteSplitAndReadFromFollower >> TopicAutoscaling::ReadingAfterSplitTest_AutoscaleAwareSDK_AutoCommit [GOOD] >> TopicAutoscaling::ReadingAfterSplitTest_PQv1 |78.0%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest |78.0%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest |78.0%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest >> TPQTest::TestWritePQCompact [GOOD] >> TPQTest::TestWritePQBigMessage [GOOD] >> TPQTest::TestWriteSplit |78.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/persqueue/dread_cache_service/ut/ydb-core-persqueue-dread_cache_service-ut |78.0%| [LD] {RESULT} $(B)/ydb/core/persqueue/dread_cache_service/ut/ydb-core-persqueue-dread_cache_service-ut |78.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/persqueue/dread_cache_service/ut/ydb-core-persqueue-dread_cache_service-ut |78.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/actorlib_impl/ut/ydb-core-actorlib_impl-ut |78.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/actorlib_impl/ut/ydb-core-actorlib_impl-ut |78.0%| [LD] {RESULT} $(B)/ydb/core/actorlib_impl/ut/ydb-core-actorlib_impl-ut ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/unittest >> TPQTabletTests::In_Kafka_Txn_Only_Supportive_Partitions_That_Exceeded_Timeout_Should_Be_Deleted [GOOD] Test command err: 2025-06-25T14:31:15.700935Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3114: [PQ: 72057594037927937] Handle TEvInterconnect::TEvNodeInfo 2025-06-25T14:31:15.705082Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3146: [PQ: 72057594037927937] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-25T14:31:15.705426Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:752: [PQ: 72057594037927937] doesn't have tx info 2025-06-25T14:31:15.705501Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:764: [PQ: 72057594037927937] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-25T14:31:15.705541Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:985: [PQ: 72057594037927937] no config, start with empty partitions and default config 2025-06-25T14:31:15.705579Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4949: [PQ: 72057594037927937] Txs.size=0, PlannedTxs.size=0 2025-06-25T14:31:15.705623Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:31:15.705688Z node 1 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-25T14:31:15.724982Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037927937] server connected, pipe [1:180:2193], now have 1 active actors on pipe 2025-06-25T14:31:15.725120Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:1470: [PQ: 72057594037927937] Handle TEvPersQueue::TEvUpdateConfig 2025-06-25T14:31:15.739990Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:1656: [PQ: 72057594037927937] Config update version 1(current 0) received from actor [1:179:2192] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "topic" Version: 1 LocalDC: true Topic: "topic" TopicPath: "/topic" YcCloudId: "somecloud" YcFolderId: "somefolder" YdbDatabaseId: "PQ" YdbDatabasePath: "/Root/PQ" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 1 FederationAccount: "federationAccount" MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 1 Important: false } 2025-06-25T14:31:15.747082Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:591: [PQ: 72057594037927937] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "topic" Version: 1 LocalDC: true Topic: "topic" TopicPath: "/topic" YcCloudId: "somecloud" YcFolderId: "somefolder" YdbDatabaseId: "PQ" YdbDatabasePath: "/Root/PQ" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 1 FederationAccount: "federationAccount" MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 1 Important: false } 2025-06-25T14:31:15.747239Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:31:15.748828Z node 1 :PERSQUEUE INFO: pq_impl.cpp:1497: [PQ: 72057594037927937] Config applied version 1 actor [1:179:2192] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "topic" Version: 1 LocalDC: true Topic: "topic" TopicPath: "/topic" YcCloudId: "somecloud" YcFolderId: "somefolder" YdbDatabaseId: "PQ" YdbDatabasePath: "/Root/PQ" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 1 FederationAccount: "federationAccount" MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 1 Important: false } 2025-06-25T14:31:15.748948Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:0:Initializer] Start initializing step TInitConfigStep 2025-06-25T14:31:15.749022Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:1:Initializer] Start initializing step TInitConfigStep 2025-06-25T14:31:15.749540Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:0:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-25T14:31:15.749918Z node 1 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [1:188:2199] 2025-06-25T14:31:15.750843Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:55: [topic:0:Initializer] Initializing completed. 2025-06-25T14:31:15.750900Z node 1 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'topic' partition 0 generation 2 [1:188:2199] 2025-06-25T14:31:15.750964Z node 1 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037927937, Partition: 0, State: StateInit] SYNC INIT topic topic partitition 0 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-25T14:31:15.751437Z node 1 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Process pending events. Count 0 2025-06-25T14:31:15.751560Z node 1 :PERSQUEUE DEBUG: partition.cpp:3232: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'topic' partition 0 user user reinit request with generation 1 2025-06-25T14:31:15.751623Z node 1 :PERSQUEUE DEBUG: partition.cpp:3302: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'topic' partition 0 user user reinit with generation 1 done 2025-06-25T14:31:15.751815Z node 1 :PERSQUEUE DEBUG: partition_read.cpp:882: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'topic' partition 0 user user readTimeStamp for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 0 2025-06-25T14:31:15.751973Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:1:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-25T14:31:15.752197Z node 1 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [1:190:2201] 2025-06-25T14:31:15.757095Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:55: [topic:1:Initializer] Initializing completed. 2025-06-25T14:31:15.757163Z node 1 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'topic' partition 1 generation 2 [1:190:2201] 2025-06-25T14:31:15.757247Z node 1 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037927937, Partition: 1, State: StateInit] SYNC INIT topic topic partitition 1 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-25T14:31:15.757612Z node 1 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037927937, Partition: 1, State: StateIdle] Process pending events. Count 0 2025-06-25T14:31:15.757684Z node 1 :PERSQUEUE DEBUG: partition.cpp:3232: [PQ: 72057594037927937, Partition: 1, State: StateIdle] Topic 'topic' partition 1 user user reinit request with generation 1 2025-06-25T14:31:15.757721Z node 1 :PERSQUEUE DEBUG: partition.cpp:3302: [PQ: 72057594037927937, Partition: 1, State: StateIdle] Topic 'topic' partition 1 user user reinit with generation 1 done 2025-06-25T14:31:15.757860Z node 1 :PERSQUEUE DEBUG: partition_read.cpp:882: [PQ: 72057594037927937, Partition: 1, State: StateIdle] Topic 'topic' partition 1 user user readTimeStamp for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 0 2025-06-25T14:31:15.757971Z node 1 :PERSQUEUE DEBUG: read.h:272: CacheProxy. Passthrough write request to KV 2025-06-25T14:31:15.758276Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-25T14:31:15.758379Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 1, State: StateIdle] no data for compaction 2025-06-25T14:31:15.758406Z node 1 :PERSQUEUE DEBUG: read.h:272: CacheProxy. Passthrough write request to KV 2025-06-25T14:31:15.766235Z node 1 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-06-25T14:31:15.766345Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-25T14:31:15.767055Z node 1 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72057594037927937, Partition: 1, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-06-25T14:31:15.767117Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 1, State: StateIdle] no data for compaction 2025-06-25T14:31:15.767583Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037927937] server connected, pipe [1:203:2210], now have 1 active actors on pipe 2025-06-25T14:31:15.768400Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037927937] server connected, pipe [1:206:2212], now have 1 active actors on pipe 2025-06-25T14:31:15.769362Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3255: [PQ: 72057594037927937] Handle TEvPersQueue::TEvProposeTransaction SourceActor { RawX1: 179 RawX2: 4294969488 } TxId: 67890 Data { Operations { PartitionId: 0 CommitOffsetsBegin: 0 CommitOffsetsEnd: 0 Consumer: "user" Path: "/topic" } Operations { PartitionId: 1 CommitOffsetsBegin: 0 CommitOffsetsEnd: 0 Consumer: "user" Path: "/topic" } Immediate: false } 2025-06-25T14:31:15.769424Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3431: [PQ: 72057594037927937] distributed transaction 2025-06-25T14:31:15.769509Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3745: [PQ: 72057594037927937] Propose TxId 67890, WriteId (empty maybe) 2025-06-25T14:31:15.769558Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4353: [PQ: 72057594037927937] Try execute txs with state UNKNOWN 2025-06-25T14:31:15.769601Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4398: [PQ: 72057594037927937] TxId 67890, State UNKNOWN 2025-06-25T14:31:15.769754Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3979: [PQ: 72057594037927937] schedule TEvProposeTransactionResult(PREPARED) 2025-06-25T14:31:15.769814Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4288: [PQ: 72057594037927937] TxId 67890, NewState PREPARING 2025-06-25T14:31:15.769894Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3866: [PQ: 72057594037927937] write key for TxId 67890 2025-06-25T14:31:15.770049Z node 1 :PERSQUEUE DEBUG: transaction.cpp:374: [TxId: 67890] save tx TxId: 67890 State: PREPARED MinStep: 230 MaxStep: 30230 Operations { PartitionId: 0 CommitOffsetsBegin: 0 CommitOffsetsEnd: 0 Consumer: "user" Path: "/topic" } Operations { PartitionId: 1 CommitOffsetsBegin: 0 CommitOffsetsEnd: 0 Consumer: "user" Path: "/topic" } Kind: KIND_DATA SourceActor { RawX1: 179 RawX2: 4294969488 } Partitions { } 2025-06-25T14:31:15.770139Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3683: [PQ: 72057594037927937] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-06-25T14:31:15.770458Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:1721: [PQ: 72057594037927937] Handle TEvPersQueue::TEvDropTablet 2025-06-25T14:31:15.778293Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:1241: [PQ: 72057594037927937] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-25T14:31:15.778391Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4353: [PQ: 72057594037927937] Try execute txs with state PREPARING 2025-06-25T14:31:15.778446Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4398: [PQ: 72057594037927937] TxId 67890, State PREPARING 2 ... ivePartitions# 0 2025-06-25T14:31:44.220866Z node 6 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72057594037927937, Partition: {0, {0, 0}, 100000}, State: StateIdle] TPartition::ReplyWrite. Partition: {0, {0, 0}, 100000} 2025-06-25T14:31:44.220953Z node 6 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72057594037927937, Partition: {0, {0, 0}, 100000}, State: StateIdle] Answering for message sourceid: '1', Topic: 'topic', Partition: {0, {0, 0}, 100000}, SeqNo: 0, partNo: 0, Offset: 0 is stored on disk 2025-06-25T14:31:44.221191Z node 6 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72057594037927937, Partition: {0, {0, 0}, 100000}, State: StateIdle] need more data for compaction. cumulativeSize=64, count=1, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-25T14:31:44.221290Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'topic' partition: 0 messageNo: 0 requestId: cookie: 123 2025-06-25T14:31:44.222091Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72057594037927937] server disconnected, pipe [6:198:2206] destroyed 2025-06-25T14:31:44.222184Z node 6 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72057594037927937, Partition: {0, {0, 0}, 100000}, State: StateIdle] TPartition::DropOwner. 2025-06-25T14:31:44.222391Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037927937] server connected, pipe [6:221:2221], now have 1 active actors on pipe 2025-06-25T14:31:44.222494Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:348: Handle TEvRequest topic: 'topic' requestId: 2025-06-25T14:31:44.222564Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:2812: [PQ: 72057594037927937] got client message batch for topic 'topic' partition 0 2025-06-25T14:31:44.222622Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:2729: [PQ: 72057594037927937] partition {0, {0, 0}, 100001} for WriteId {0, 0} 2025-06-25T14:31:44.222911Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3683: [PQ: 72057594037927937] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-06-25T14:31:44.225410Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:1241: [PQ: 72057594037927937] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-25T14:31:44.226007Z node 6 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:{0, {0, 0}, 100001}:Initializer] Start initializing step TInitConfigStep 2025-06-25T14:31:44.226548Z node 6 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:{0, {0, 0}, 100001}:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-25T14:31:44.226838Z node 6 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: {0, {0, 0}, 100001}, State: StateInit] bootstrapping {0, {0, 0}, 100001} [6:227:2226] 2025-06-25T14:31:44.227909Z node 6 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:{0, {0, 0}, 100001}:Initializer] Start initializing step TInitDiskStatusStep 2025-06-25T14:31:44.229313Z node 6 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:{0, {0, 0}, 100001}:Initializer] Start initializing step TInitMetaStep 2025-06-25T14:31:44.229629Z node 6 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:{0, {0, 0}, 100001}:Initializer] Start initializing step TInitInfoRangeStep 2025-06-25T14:31:44.230007Z node 6 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:{0, {0, 0}, 100001}:Initializer] Start initializing step TInitDataRangeStep 2025-06-25T14:31:44.230275Z node 6 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:{0, {0, 0}, 100001}:Initializer] Start initializing step TInitDataStep 2025-06-25T14:31:44.230334Z node 6 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:{0, {0, 0}, 100001}:Initializer] Start initializing step TInitEndWriteTimestampStep 2025-06-25T14:31:44.230396Z node 6 :PERSQUEUE INFO: partition_init.cpp:895: [topic:{0, {0, 0}, 100001}:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-25T14:31:44.230435Z node 6 :PERSQUEUE DEBUG: partition_init.cpp:55: [topic:{0, {0, 0}, 100001}:Initializer] Initializing completed. 2025-06-25T14:31:44.230505Z node 6 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: {0, {0, 0}, 100001}, State: StateInit] init complete for topic 'topic' partition {0, {0, 0}, 100001} generation 2 [6:227:2226] 2025-06-25T14:31:44.230570Z node 6 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037927937, Partition: {0, {0, 0}, 100001}, State: StateInit] SYNC INIT topic topic partitition {0, {0, 0}, 100001} so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-25T14:31:44.230625Z node 6 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037927937, Partition: {0, {0, 0}, 100001}, State: StateIdle] Process pending events. Count 0 2025-06-25T14:31:44.230907Z node 6 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: {0, {0, 0}, 100001}, State: StateIdle] no data for compaction 2025-06-25T14:31:44.231136Z node 6 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie -=[ 0wn3r ]=-|33483348-aec777fe-78b64539-dfa93a43_0 generated for partition {0, {0, 0}, 100001} topic 'topic' owner -=[ 0wn3r ]=- 2025-06-25T14:31:44.231246Z node 6 :PERSQUEUE DEBUG: partition_write.cpp:34: [PQ: 72057594037927937, Partition: {0, {0, 0}, 100001}, State: StateIdle] TPartition::ReplyOwnerOk. Partition: {0, {0, 0}, 100001} 2025-06-25T14:31:44.231334Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'topic' partition: 0 messageNo: 0 requestId: cookie: 4 2025-06-25T14:31:44.231635Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:348: Handle TEvRequest topic: 'topic' requestId: 2025-06-25T14:31:44.231692Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:2812: [PQ: 72057594037927937] got client message batch for topic 'topic' partition 0 2025-06-25T14:31:44.231819Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:2058: [PQ: 72057594037927937] Write in transaction. Partition: 0, WriteId: { KafkaTransaction: true KafkaProducerInstanceId { Id: 2 Epoch: 0 } }, NeedSupportivePartition: 0 2025-06-25T14:31:44.231872Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:2209: [PQ: 72057594037927937] got client message topic: topic partition: 0 SourceId: '2' SeqNo: 0 partNo : 0 messageNo: 0 size 10 offset: -1 2025-06-25T14:31:44.232025Z node 6 :PERSQUEUE DEBUG: partition_write.cpp:1843: [PQ: 72057594037927937, Partition: {0, {0, 0}, 100001}, State: StateIdle] Send write quota request. Topic: "topic". Partition: {0, {0, 0}, 100001}. Amount: 11. Cookie: 1 2025-06-25T14:31:44.232114Z node 6 :PERSQUEUE DEBUG: partition.cpp:3720: [PQ: 72057594037927937, Partition: {0, {0, 0}, 100001}, State: StateIdle] Got quota. Topic: "topic". Partition: {0, {0, 0}, 100001}: Cookie: 1 2025-06-25T14:31:44.232286Z node 6 :PERSQUEUE DEBUG: partition_write.cpp:1364: [PQ: 72057594037927937, Partition: {0, {0, 0}, 100001}, State: StateIdle] Topic 'topic' partition {0, {0, 0}, 100001} part blob processing sourceId '2' seqNo 0 partNo 0 2025-06-25T14:31:44.233206Z node 6 :PERSQUEUE DEBUG: partition_write.cpp:1468: [PQ: 72057594037927937, Partition: {0, {0, 0}, 100001}, State: StateIdle] Topic 'topic' partition {0, {0, 0}, 100001} part blob complete sourceId '2' seqNo 0 partNo 0 FormedBlobsCount 0 NewHead: Offset 0 PartNo 0 PackedSize 78 count 1 nextOffset 1 batches 1 2025-06-25T14:31:44.233783Z node 6 :PERSQUEUE DEBUG: partition_write.cpp:1762: [PQ: 72057594037927937, Partition: {0, {0, 0}, 100001}, State: StateIdle] Add new write blob: topic 'topic' partition {0, {0, 0}, 100001} compactOffset 0,1 HeadOffset 0 endOffset 0 curOffset 1 D0000100001_00000000000000000000_00000_0000000001_00000? size 64 WTime 1800332 2025-06-25T14:31:44.234033Z node 6 :PERSQUEUE DEBUG: read.h:272: CacheProxy. Passthrough write request to KV 2025-06-25T14:31:44.234151Z node 6 :PERSQUEUE DEBUG: read.h:310: CacheProxy. Passthrough blob. Partition 100001 offset 0 partNo 0 count 1 size 64 2025-06-25T14:31:44.237158Z node 6 :PERSQUEUE DEBUG: cache_eviction.h:319: Caching head blob in L1. Partition 100001 offset 0 count 1 size 64 actorID [6:138:2162] 2025-06-25T14:31:44.237289Z node 6 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72057594037927937, Partition: {0, {0, 0}, 100001}, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 11 WriteNewSizeFromSupportivePartitions# 0 2025-06-25T14:31:44.237387Z node 6 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72057594037927937, Partition: {0, {0, 0}, 100001}, State: StateIdle] TPartition::ReplyWrite. Partition: {0, {0, 0}, 100001} 2025-06-25T14:31:44.237476Z node 6 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72057594037927937, Partition: {0, {0, 0}, 100001}, State: StateIdle] Answering for message sourceid: '2', Topic: 'topic', Partition: {0, {0, 0}, 100001}, SeqNo: 0, partNo: 0, Offset: 0 is stored on disk 2025-06-25T14:31:44.237728Z node 6 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72057594037927937, Partition: {0, {0, 0}, 100001}, State: StateIdle] need more data for compaction. cumulativeSize=64, count=1, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-25T14:31:44.237847Z node 6 :PERSQUEUE DEBUG: pq_l2_cache.cpp:120: PQ Cache (L2). Adding blob. Tablet '72057594037927937' partition 100001 offset 0 partno 0 count 1 parts 0 suffix '63' size 64 2025-06-25T14:31:44.237912Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:1382: [PQ: 72057594037927937] Topic 'topic' counters. CacheSize 128 CachedBlobs 2 2025-06-25T14:31:44.237977Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'topic' partition: 0 messageNo: 0 requestId: cookie: 123 2025-06-25T14:31:44.238735Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:5262: [PQ: 72057594037927937] send TEvPQ::TEvDeletePartition to partition {0, {0, 0}, 100000} 2025-06-25T14:31:44.239388Z node 6 :PERSQUEUE DEBUG: partition.cpp:3863: [PQ: 72057594037927937, Partition: {0, {0, 0}, 100000}, State: StateIdle] Handle TEvPQ::TEvDeletePartition 2025-06-25T14:31:44.239641Z node 6 :PERSQUEUE DEBUG: read.h:272: CacheProxy. Passthrough write request to KV 2025-06-25T14:31:44.239689Z node 6 :PERSQUEUE DEBUG: read.h:348: CacheProxy. Delete blobs from D0000100000(+) to D0000100001(-) 2025-06-25T14:31:44.242653Z node 6 :PERSQUEUE DEBUG: cache_eviction.h:369: Deleting head blob in L1. Partition 100000 offset 0 count 1 actorID [6:138:2162] 2025-06-25T14:31:44.242915Z node 6 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72057594037927937, Partition: {0, {0, 0}, 100000}, State: StateIdle] need more data for compaction. cumulativeSize=64, count=1, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-25T14:31:44.243005Z node 6 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72057594037927937, Partition: {0, {0, 0}, 100000}, State: StateIdle] need more data for compaction. cumulativeSize=64, count=1, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-25T14:31:44.243510Z node 6 :PERSQUEUE DEBUG: pq_l2_cache.cpp:146: PQ Cache (L2). Removed. Tablet '72057594037927937' partition 100000 offset 0 partno 0 count 1 parts 0 suffix '63' size 64 2025-06-25T14:31:44.243623Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:1382: [PQ: 72057594037927937] Topic 'topic' counters. CacheSize 64 CachedBlobs 1 2025-06-25T14:31:44.243859Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:5196: [PQ: 72057594037927937] Handle TEvPQ::TEvDeletePartitionDone {0, {0, 0}, 100000} 2025-06-25T14:31:44.243964Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4647: [PQ: 72057594037927937] delete WriteId {0, 0} 2025-06-25T14:31:44.244058Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3683: [PQ: 72057594037927937] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-06-25T14:31:44.246113Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:1241: [PQ: 72057594037927937] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-25T14:31:44.428122Z node 6 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-25T14:31:44.768019Z node 6 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72057594037927937, Partition: {0, {0, 0}, 100001}, State: StateIdle] need more data for compaction. cumulativeSize=64, count=1, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 >> TListAllTopicsTests::RecursiveList [GOOD] >> TListAllTopicsTests::ListLimitAndPaging >> TopicAutoscaling::ReadingAfterSplitTest_PreferedPartition_BeforeAutoscaleAwareSDK [GOOD] >> TopicAutoscaling::ReadingAfterSplitTest_PreferedPartition_PQv1 >> TFetchRequestTests::BadTopicName [GOOD] >> TFetchRequestTests::CheckAccess |78.0%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest |78.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data/unittest |78.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data/unittest |78.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data/unittest >> TGRpcCmsTest::SimpleTenantsTestSyncOperation >> TKesusTest::TestAcquireBeforeTimeoutViaSessionTimeout [GOOD] >> TKesusTest::TestAcquireSemaphore >> TGRpcCmsTest::AlterRemoveTest >> CommitOffset::Commit_WithoutSession_ParentNotFinished [GOOD] >> CommitOffset::Commit_WithoutSession_ToPastParentPartition >> TPQTest::TestWriteSplit [GOOD] >> TPQTest::TestWriteTimeStampEstimate |78.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/services/metadata/initializer/ut/ydb-services-metadata-initializer-ut |78.0%| [LD] {RESULT} $(B)/ydb/services/metadata/initializer/ut/ydb-services-metadata-initializer-ut |78.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/services/metadata/initializer/ut/ydb-services-metadata-initializer-ut >> TKesusTest::TestAcquireSemaphore [GOOD] >> Cdc::EnqueueRequestProcessSend [GOOD] >> Cdc::InitialScanAndResolvedTimestamps >> TPQTest::TestWriteTimeStampEstimate [GOOD] >> TPQTest::TestWriteTimeLag >> TopicAutoscaling::ControlPlane_DisableAutoPartitioning [GOOD] >> TopicAutoscaling::ControlPlane_PauseAutoPartitioning >> KikimrIcGateway::TestDropTable [GOOD] >> KikimrIcGateway::TestDropResourcePool >> TPartitionChooserSuite::TPartitionChooserActor_SplitMergeEnabled_SourceId_PartitionActive_BoundaryFalse_Test [GOOD] >> TPartitionChooserSuite::TPartitionChooserActor_SplitMergeEnabled_SourceId_PartitionInactive_0_Test ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kesus/tablet/ut/unittest >> TKesusTest::TestAcquireSemaphore [GOOD] Test command err: 2025-06-25T14:31:06.427034Z node 1 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-25T14:31:06.427159Z node 1 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-25T14:31:06.446821Z node 1 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-25T14:31:06.446988Z node 1 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-25T14:31:06.474309Z node 1 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-25T14:31:06.474841Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[1:136:2160], cookie=1896602481982108955, session=0, seqNo=0) 2025-06-25T14:31:06.475023Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 1 2025-06-25T14:31:06.493081Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[1:136:2160], cookie=1896602481982108955, session=1) 2025-06-25T14:31:06.493454Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[1:136:2160], cookie=6930090848297180942, session=0, seqNo=0) 2025-06-25T14:31:06.493603Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 2 2025-06-25T14:31:06.505488Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[1:136:2160], cookie=6930090848297180942, session=2) 2025-06-25T14:31:06.506282Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[1:136:2160], cookie=111, session=1, semaphore="Lock1" count=18446744073709551615) 2025-06-25T14:31:06.506431Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:126: [72057594037927937] Created new ephemeral semaphore 1 "Lock1" 2025-06-25T14:31:06.506530Z node 1 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 1 "Lock1" queue: next order #1 session 1 2025-06-25T14:31:06.506703Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[1:136:2160], cookie=222, session=2, semaphore="Lock2" count=1) 2025-06-25T14:31:06.506799Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:126: [72057594037927937] Created new ephemeral semaphore 2 "Lock2" 2025-06-25T14:31:06.506858Z node 1 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 2 "Lock2" queue: next order #2 session 2 2025-06-25T14:31:06.506942Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[1:136:2160], cookie=333, session=1, semaphore="Lock2" count=1) 2025-06-25T14:31:06.507011Z node 1 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 2 "Lock2" queue: next order #3 session 1 2025-06-25T14:31:06.528940Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[1:136:2160], cookie=111) 2025-06-25T14:31:06.529028Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[1:136:2160], cookie=222) 2025-06-25T14:31:06.529056Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[1:136:2160], cookie=333) 2025-06-25T14:31:06.529664Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[1:152:2174], cookie=3758269974125640942, name="Lock1") 2025-06-25T14:31:06.529757Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[1:152:2174], cookie=3758269974125640942) 2025-06-25T14:31:06.530172Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[1:155:2177], cookie=14083099325959634505, name="Lock2") 2025-06-25T14:31:06.530244Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[1:155:2177], cookie=14083099325959634505) 2025-06-25T14:31:06.555313Z node 1 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-25T14:31:06.555431Z node 1 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-25T14:31:06.555930Z node 1 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-25T14:31:06.556697Z node 1 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-25T14:31:06.620919Z node 1 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-25T14:31:06.621181Z node 1 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 1 "Lock1" queue: next order #1 session 1 2025-06-25T14:31:06.621247Z node 1 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 2 "Lock2" queue: next order #2 session 2 2025-06-25T14:31:06.621277Z node 1 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 2 "Lock2" queue: next order #3 session 1 2025-06-25T14:31:06.621641Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[1:195:2207], cookie=17241165473374678438, name="Lock1") 2025-06-25T14:31:06.621722Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[1:195:2207], cookie=17241165473374678438) 2025-06-25T14:31:06.622251Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[1:203:2214], cookie=14732693521659900928, name="Lock2") 2025-06-25T14:31:06.622317Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[1:203:2214], cookie=14732693521659900928) 2025-06-25T14:31:07.098104Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:07.110471Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:07.484589Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:07.497368Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:07.853565Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:07.871396Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:08.263962Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:08.276364Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:08.635086Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:08.652163Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:09.003239Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:09.019199Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:09.370153Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:09.382787Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:09.755666Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:09.767859Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:10.122440Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:10.137625Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:10.554338Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:10.573102Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:10.945014Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:10.958720Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:11.318993Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:11.332836Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:11.710954Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:11.723654Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:12.123804Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:12.136482Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:12.558888Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:12.571498Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:12.946759Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:12.960117Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:13.327337Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:13.339371Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:13.711650Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:13.729966Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:14.103063Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:14.119526Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:14.501102Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:14.512870Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:14.880148Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:14.892056Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:15.247707Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:15.259998Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:15.640299Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:15.653316Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:16.033331Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:16.045831Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:16.428670Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [7205759403 ... pp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:44.504190Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:44.908668Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:44.928965Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:45.336682Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:45.349934Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:45.759012Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:45.775853Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:46.190169Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:46.209113Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:46.636731Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:46.661036Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:47.075390Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:47.093245Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:47.472780Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:47.493859Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:47.912918Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:47.929500Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:48.355526Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:48.373234Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:48.792736Z node 4 :KESUS_TABLET DEBUG: tx_session_timeout.cpp:27: [72057594037927937] TTxSessionTimeout::Execute (session=1) 2025-06-25T14:31:48.792854Z node 4 :KESUS_TABLET DEBUG: tablet_db.cpp:32: [72057594037927937] Deleting session 1 2025-06-25T14:31:48.792924Z node 4 :KESUS_TABLET DEBUG: tablet_db.cpp:98: [72057594037927937] Deleting session 1 / semaphore 1 "Lock1" owner link 2025-06-25T14:31:48.793046Z node 4 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 1 "Lock1" queue: next order #3 session 2 2025-06-25T14:31:48.793113Z node 4 :KESUS_TABLET DEBUG: tablet_db.cpp:98: [72057594037927937] Deleting session 1 / semaphore 2 "Lock2" owner link 2025-06-25T14:31:48.793155Z node 4 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 2 "Lock2" queue: next order #4 session 2 2025-06-25T14:31:48.809167Z node 4 :KESUS_TABLET DEBUG: tx_session_timeout.cpp:56: [72057594037927937] TTxSessionTimeout::Complete (session=1) 2025-06-25T14:31:48.809989Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[4:363:2344], cookie=17416807988379529517, name="Lock1") 2025-06-25T14:31:48.810106Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[4:363:2344], cookie=17416807988379529517) 2025-06-25T14:31:48.810731Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[4:366:2347], cookie=8660776397027119177, name="Lock2") 2025-06-25T14:31:48.810817Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[4:366:2347], cookie=8660776397027119177) 2025-06-25T14:31:48.811293Z node 4 :KESUS_TABLET DEBUG: tx_sessions_describe.cpp:23: [72057594037927937] TTxSessionsDescribe::Execute (sender=[4:369:2350], cookie=625051486395743468) 2025-06-25T14:31:48.811366Z node 4 :KESUS_TABLET DEBUG: tx_sessions_describe.cpp:48: [72057594037927937] TTxSessionsDescribe::Complete (sender=[4:369:2350], cookie=625051486395743468) 2025-06-25T14:31:48.836733Z node 4 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-25T14:31:48.836858Z node 4 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-25T14:31:48.837459Z node 4 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-25T14:31:48.838234Z node 4 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-25T14:31:48.897235Z node 4 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-25T14:31:48.897400Z node 4 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 1 "Lock1" queue: next order #3 session 2 2025-06-25T14:31:48.897454Z node 4 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 2 "Lock2" queue: next order #4 session 2 2025-06-25T14:31:48.897825Z node 4 :KESUS_TABLET DEBUG: tx_sessions_describe.cpp:23: [72057594037927937] TTxSessionsDescribe::Execute (sender=[4:409:2380], cookie=1420096315490699749) 2025-06-25T14:31:48.897907Z node 4 :KESUS_TABLET DEBUG: tx_sessions_describe.cpp:48: [72057594037927937] TTxSessionsDescribe::Complete (sender=[4:409:2380], cookie=1420096315490699749) 2025-06-25T14:31:48.898517Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[4:416:2386], cookie=11507088672567977635, name="Lock1") 2025-06-25T14:31:48.898601Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[4:416:2386], cookie=11507088672567977635) 2025-06-25T14:31:48.899141Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[4:419:2389], cookie=16146713076067386797, name="Lock2") 2025-06-25T14:31:48.899212Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[4:419:2389], cookie=16146713076067386797) 2025-06-25T14:31:49.491883Z node 5 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-25T14:31:49.492004Z node 5 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-25T14:31:49.514008Z node 5 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-25T14:31:49.514167Z node 5 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-25T14:31:49.554005Z node 5 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-25T14:31:49.554620Z node 5 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[5:136:2160], cookie=13158600800888119618, session=0, seqNo=0) 2025-06-25T14:31:49.554806Z node 5 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 1 2025-06-25T14:31:49.577514Z node 5 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[5:136:2160], cookie=13158600800888119618, session=1) 2025-06-25T14:31:49.577934Z node 5 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[5:136:2160], cookie=14866099105820755788, session=0, seqNo=0) 2025-06-25T14:31:49.578087Z node 5 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 2 2025-06-25T14:31:49.591850Z node 5 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[5:136:2160], cookie=14866099105820755788, session=2) 2025-06-25T14:31:49.592242Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[5:136:2160], cookie=111, session=1, semaphore="Sem1" count=1) 2025-06-25T14:31:49.607847Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[5:136:2160], cookie=111) 2025-06-25T14:31:49.608555Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:32: [72057594037927937] TTxSemaphoreCreate::Execute (sender=[5:149:2171], cookie=4979454924623476849, name="Sem1", limit=1) 2025-06-25T14:31:49.608753Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:104: [72057594037927937] Created new semaphore 1 "Sem1" 2025-06-25T14:31:49.621106Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:112: [72057594037927937] TTxSemaphoreCreate::Complete (sender=[5:149:2171], cookie=4979454924623476849) 2025-06-25T14:31:49.621593Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[5:136:2160], cookie=333, session=1, semaphore="Sem1" count=100500) 2025-06-25T14:31:49.635024Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[5:136:2160], cookie=333) 2025-06-25T14:31:49.635420Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[5:136:2160], cookie=222, session=1, semaphore="Sem1" count=1) 2025-06-25T14:31:49.635595Z node 5 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 1 "Sem1" queue: next order #1 session 1 2025-06-25T14:31:49.635806Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[5:136:2160], cookie=333, session=2, semaphore="Sem1" count=1) 2025-06-25T14:31:49.648258Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[5:136:2160], cookie=222) 2025-06-25T14:31:49.648376Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[5:136:2160], cookie=333) 2025-06-25T14:31:49.649028Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[5:159:2181], cookie=13738711626771930671, name="Sem1") 2025-06-25T14:31:49.649138Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[5:159:2181], cookie=13738711626771930671) 2025-06-25T14:31:49.649666Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[5:162:2184], cookie=14584388315500363498, name="Sem1") 2025-06-25T14:31:49.649759Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[5:162:2184], cookie=14584388315500363498) 2025-06-25T14:31:49.650258Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_delete.cpp:28: [72057594037927937] TTxSemaphoreDelete::Execute (sender=[5:165:2187], cookie=11623010619323687682, name="Sem1", force=0) 2025-06-25T14:31:49.670144Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_delete.cpp:95: [72057594037927937] TTxSemaphoreDelete::Complete (sender=[5:165:2187], cookie=11623010619323687682) 2025-06-25T14:31:49.670833Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_delete.cpp:28: [72057594037927937] TTxSemaphoreDelete::Execute (sender=[5:170:2192], cookie=3133440326707790021, name="Sem1", force=1) 2025-06-25T14:31:49.670942Z node 5 :KESUS_TABLET DEBUG: tablet_db.cpp:58: [72057594037927937] Deleting semaphore 1 "Sem1" 2025-06-25T14:31:49.684262Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_delete.cpp:95: [72057594037927937] TTxSemaphoreDelete::Complete (sender=[5:170:2192], cookie=3133440326707790021) >> RetryPolicy::TWriteSession_TestPolicy [GOOD] >> RetryPolicy::TWriteSession_TestBrokenPolicy >> TSequence::CreateSequence >> TGRpcCmsTest::DisabledTxTest >> TPQCDTest::TestCloudClientsAreConsistentlyDistributed >> KqpUniqueIndex::ReplaceFkPartialColumnSet >> TFlatTest::WriteSplitAndReadFromFollower [GOOD] >> TPQTest::TestOwnership [GOOD] >> TPQTest::TestPQCacheSizeManagement [GOOD] >> TPQTest::TestOffsetEstimation [GOOD] >> TPQTest::TestMaxTimeLagRewind >> TSequence::CreateSequence [GOOD] >> TSequence::CreateDropRecreate >> TPQTest::TestWriteTimeLag [GOOD] >> TPQTest::The_Value_Of_CreationUnixTime_Must_Not_Decrease >> GroupWriteTest::WriteHardRateDispatcher [GOOD] |78.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/control/ut/ydb-core-control-ut |78.0%| [LD] {RESULT} $(B)/ydb/core/control/ut/ydb-core-control-ut |78.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/control/ut/ydb-core-control-ut >> PQCountersLabeled::NewConsumersCountersAppear [GOOD] >> PQCountersSimple::Partition >> KikimrIcGateway::TestLoadBasicSecretValueFromExternalDataSourceMetadata [GOOD] >> KikimrIcGateway::TestLoadAwsSecretValueFromExternalDataSourceMetadata >> KqpIndexes::SecondaryIndexOrderBy2 >> TopicAutoscaling::PartitionSplit_PreferedPartition_AutoscaleAwareSDK [GOOD] >> TopicAutoscaling::PartitionSplit_PreferedPartition_PQv1 >> TPartitionTests::UserActCount [GOOD] |78.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_column_build/ydb-core-tx-schemeshard-ut_column_build |78.0%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_column_build/ydb-core-tx-schemeshard-ut_column_build |78.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_column_build/ydb-core-tx-schemeshard-ut_column_build >> TPartitionTests::TooManyImmediateTxs >> TSequence::CreateDropRecreate [GOOD] >> TSequence::CreateSequenceInsideSequenceNotAllowed >> PQCountersSimple::Partition [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TFlatTest::WriteSplitAndReadFromFollower [GOOD] Test command err: 2025-06-25T14:31:41.757257Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519894606567253305:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:31:41.760379Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/002093/r3tmp/tmpfpGn50/pdisk_1.dat 2025-06-25T14:31:42.283170Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:31:42.283288Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:31:42.288469Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:31:42.338099Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:31:42.338789Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519894606567253286:2080] 1750861901755244 != 1750861901755247 TClient is connected to server localhost:29026 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:31:42.646758Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:31:42.669165Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:31:42.706547Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:31:42.822138Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls request: /dc-1/Dir/TableOld TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "TableOld" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710659 CreateStep: 1750861902888 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "TableOld" Columns { Name: "Key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Key2" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "Va... (TRUNCATED) 2025-06-25T14:31:43.016523Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T14:31:43.018324Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-25T14:31:43.018378Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:31:43.167927Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { OperationType: ESchemeOpSplitMergeTablePartitions SplitMergeTablePartitions { TablePath: "/dc-1/Dir/TableOld" SourceTabletId: 72075186224037888 SplitBoundary { KeyPrefix { Tuple { Optional { Uint32: 100 } } } } } } TxId: 281474976710668 TabletId: 72057594046644480 PeerName: "" , at schemeshard: 72057594046644480 2025-06-25T14:31:43.168132Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_split_merge.cpp:804: TSplitMerge Propose, tableStr: /dc-1/Dir/TableOld, tableId: , opId: 281474976710668:0, at schemeshard: 72057594046644480, request: TablePath: "/dc-1/Dir/TableOld" SourceTabletId: 72075186224037888 SplitBoundary { KeyPrefix { Tuple { Optional { Uint32: 100 } } } } 2025-06-25T14:31:43.168370Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason new shard created for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 2 2025-06-25T14:31:43.168406Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason new shard created for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 3 2025-06-25T14:31:43.168666Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 4 2025-06-25T14:31:43.168684Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 281474976710668:0 type: TxSplitTablePartition target path: [OwnerId: 72057594046644480, LocalPathId: 3] source path: 2025-06-25T14:31:43.168903Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_split_merge.cpp:1083: TSplitMerge Propose accepted, tableStr: /dc-1/Dir/TableOld, tableId: , opId: 281474976710668:0, at schemeshard: 72057594046644480, op: SourceRanges { KeyRangeBegin: "\002\000\000\000\000\200\000\000\000\200" KeyRangeEnd: "" TabletID: 72075186224037888 ShardIdx: 1 } DestinationRanges { KeyRangeBegin: "\002\000\000\000\000\200\000\000\000\200" KeyRangeEnd: "\002\000\004\000\000\000d\000\000\000\000\000\000\200" ShardIdx: 2 } DestinationRanges { KeyRangeBegin: "\002\000\004\000\000\000d\000\000\000\000\000\000\200" KeyRangeEnd: "" ShardIdx: 3 }, request: TablePath: "/dc-1/Dir/TableOld" SourceTabletId: 72075186224037888 SplitBoundary { KeyPrefix { Tuple { Optional { Uint32: 100 } } } } 2025-06-25T14:31:43.168939Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 281474976710668:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-25T14:31:43.169934Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 281474976710668, response: Status: StatusAccepted TxId: 281474976710668 SchemeshardId: 72057594046644480, at schemeshard: 72057594046644480 2025-06-25T14:31:43.170045Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976710668, subject: , status: StatusAccepted, operation: ALTER TABLE PARTITIONS, path: /dc-1/Dir/TableOld 2025-06-25T14:31:43.170205Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 281474976710668:0, at schemeshard: 72057594046644480 2025-06-25T14:31:43.170254Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 281474976710668:0 ProgressState, operation type: TxSplitTablePartition, at tablet# 72057594046644480 2025-06-25T14:31:43.170516Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:359: TCreateParts opId# 281474976710668:0 CreateRequest Event to Hive: 72057594037968897 msg: Owner: 72057594046644480 OwnerIdx: 2 TabletType: DataShard ObjectDomain { SchemeShard: 72057594046644480 PathId: 1 } ObjectId: 3 BindedChannels { StoragePoolName: "/dc-1:test" } BindedChannels { StoragePoolName: "/dc-1:test" } BindedChannels { StoragePoolName: "/dc-1:test" } AllowedDomains { SchemeShard: 72057594046644480 PathId: 1 } 2025-06-25T14:31:43.170720Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:359: TCreateParts opId# 281474976710668:0 CreateRequest Event to Hive: 72057594037968897 msg: Owner: 72057594046644480 OwnerIdx: 3 TabletType: DataShard ObjectDomain { SchemeShard: 72057594046644480 PathId: 1 } ObjectId: 3 BindedChannels { StoragePoolName: "/dc-1:test" } BindedChannels { StoragePoolName: "/dc-1:test" } BindedChannels { StoragePoolName: "/dc-1:test" } AllowedDomains { SchemeShard: 72057594046644480 PathId: 1 } 2025-06-25T14:31:43.171365Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 281474976710668:0 from tablet: 72057594046644480 to tablet: 72057594037968897 cookie: 72057594046644480:2 msg type: 268697601 2025-06-25T14:31:43.171458Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 281474976710668:0 from tablet: 72057594046644480 to tablet: 72057594037968897 cookie: 72057594046644480:3 msg type: 268697601 2025-06-25T14:31:43.171521Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 281474976710668, partId: 0, tablet: 72057594037968897 2025-06-25T14:31:43.171574Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1806: TOperation RegisterRelationByShardIdx, TxId: 281474976710668, shardIdx: 72057594046644480:2, partId: 0 2025-06-25T14:31:43.171587Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1806: TOperation RegisterRelationByShardIdx, TxId: 281474976710668, shardIdx: 72057594046644480:3, partId: 0 2025-06-25T14:31:43.173932Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5951: Handle TEvCreateTabletReply at schemeshard: 72057594046644480 message: Status: OK Owner: 72057594046644480 OwnerIdx: 2 TabletID: 72075186224037889 Origin: 72057594037968897 2025-06-25T14:31:43.173964Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1820: TOperation FindRelatedPartByShardIdx, TxId: 281474976710668, shardIdx: 72057594046644480:2, partId: 0 2025-06-25T14:31:43.174057Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:632: TTxOperationReply execute, operationId: 281474976710668:0, at schemeshard: 72057594046644480, message: Status: OK Owner: 72057594046644480 OwnerIdx: 2 TabletID: 72075186224037889 Origin: 72057594037968897 2025-06-25T14:31:43.174093Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:177: TCreateParts opId# 281474976710668:0 Handle ... 2025-06-25T14:31:48.348187Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:4 hive 72057594037968897 at ss 72057594046644480 2025-06-25T14:31:48.348277Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 1 ShardOwnerId: 72057594046644480 ShardLocalIdx: 1, at schemeshard: 72057594046644480 TClient::Ls response: Status: 128 StatusCode: PATH_NOT_EXIST Issues { message: "Path not exist" issue_code: 200200 severity: 1 } SchemeStatus: 2 ErrorReason: "Path not found" 2025-06-25T14:31:48.348602Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 5 2025-06-25T14:31:48.348837Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 1 ShardOwnerId: 72057594046644480 ShardLocalIdx: 1, at schemeshard: 72057594046644480 2025-06-25T14:31:48.349584Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:2962: Handle TEvStateChangedResult datashard 72075186224037889 state Offline 2025-06-25T14:31:48.349609Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:2962: Handle TEvStateChangedResult datashard 72075186224037891 state Offline 2025-06-25T14:31:48.349702Z node 3 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186224037888 2025-06-25T14:31:48.349728Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2962: Handle TEvStateChangedResult datashard 72075186224037892 state Offline 2025-06-25T14:31:48.349756Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2962: Handle TEvStateChangedResult datashard 72075186224037890 state Offline 2025-06-25T14:31:48.349784Z node 2 :TX_DATASHARD INFO: datashard.cpp:197: OnTabletStop: 72075186224037888 reason = ReasonStop 2025-06-25T14:31:48.349853Z node 2 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186224037888 2025-06-25T14:31:48.351345Z node 2 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186224037888 2025-06-25T14:31:48.351432Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037888 not found 2025-06-25T14:31:48.351478Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037888 not found 2025-06-25T14:31:48.351501Z node 2 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186224037888 2025-06-25T14:31:48.351524Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037888 not found 2025-06-25T14:31:48.353622Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:1 2025-06-25T14:31:48.353648Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:1 tabletId 72075186224037888 2025-06-25T14:31:48.353692Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:1 2025-06-25T14:31:48.354634Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 3 ShardOwnerId: 72057594046644480 ShardLocalIdx: 3, at schemeshard: 72057594046644480 2025-06-25T14:31:48.354888Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 4 2025-06-25T14:31:48.355052Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 5 ShardOwnerId: 72057594046644480 ShardLocalIdx: 5, at schemeshard: 72057594046644480 2025-06-25T14:31:48.355175Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 3 2025-06-25T14:31:48.355293Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046644480 ShardLocalIdx: 2, at schemeshard: 72057594046644480 2025-06-25T14:31:48.355407Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 2 2025-06-25T14:31:48.355497Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 4 ShardOwnerId: 72057594046644480 ShardLocalIdx: 4, at schemeshard: 72057594046644480 2025-06-25T14:31:48.355601Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 1 2025-06-25T14:31:48.355687Z node 3 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186224037890 2025-06-25T14:31:48.356528Z node 3 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186224037892 2025-06-25T14:31:48.357219Z node 3 :TX_DATASHARD INFO: datashard.cpp:197: OnTabletStop: 72075186224037889 reason = ReasonStop 2025-06-25T14:31:48.357283Z node 3 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186224037889 2025-06-25T14:31:48.358021Z node 3 :TX_DATASHARD INFO: datashard.cpp:197: OnTabletStop: 72075186224037891 reason = ReasonStop 2025-06-25T14:31:48.358068Z node 3 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186224037891 2025-06-25T14:31:48.355703Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046644480 2025-06-25T14:31:48.355730Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046644480, LocalPathId: 3], at schemeshard: 72057594046644480 2025-06-25T14:31:48.355770Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 1 2025-06-25T14:31:48.358537Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037889 not found 2025-06-25T14:31:48.358558Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037891 not found 2025-06-25T14:31:48.358573Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037890 not found 2025-06-25T14:31:48.358588Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037892 not found 2025-06-25T14:31:48.358600Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037892 not found 2025-06-25T14:31:48.358649Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037889 not found 2025-06-25T14:31:48.358662Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037891 not found 2025-06-25T14:31:48.358675Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037890 not found 2025-06-25T14:31:48.360372Z node 2 :TX_DATASHARD INFO: datashard.cpp:197: OnTabletStop: 72075186224037890 reason = ReasonStop 2025-06-25T14:31:48.360387Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037889 not found 2025-06-25T14:31:48.360412Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037891 not found 2025-06-25T14:31:48.360491Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037890, clientId# [2:7519894633642390183:3248], serverId# [2:7519894633642390186:3251], sessionId# [0:0:0] 2025-06-25T14:31:48.360668Z node 2 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186224037890 2025-06-25T14:31:48.360734Z node 2 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186224037890 2025-06-25T14:31:48.360900Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037890 not found 2025-06-25T14:31:48.361745Z node 2 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186224037892 2025-06-25T14:31:48.362483Z node 2 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186224037889 2025-06-25T14:31:48.363239Z node 2 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186224037891 2025-06-25T14:31:48.363920Z node 2 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186224037890 2025-06-25T14:31:48.364886Z node 2 :TX_DATASHARD INFO: datashard.cpp:197: OnTabletStop: 72075186224037892 reason = ReasonStop 2025-06-25T14:31:48.364936Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037892, clientId# [2:7519894633642390185:3250], serverId# [2:7519894633642390187:3252], sessionId# [0:0:0] 2025-06-25T14:31:48.364578Z node 3 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186224037889 2025-06-25T14:31:48.364654Z node 3 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186224037889 2025-06-25T14:31:48.365398Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037892 not found 2025-06-25T14:31:48.365869Z node 3 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186224037891 2025-06-25T14:31:48.365915Z node 3 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186224037891 2025-06-25T14:31:48.367066Z node 2 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186224037892 2025-06-25T14:31:48.367114Z node 2 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186224037892 2025-06-25T14:31:48.367964Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:3 2025-06-25T14:31:48.367977Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:3 tabletId 72075186224037890 2025-06-25T14:31:48.368017Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:5 2025-06-25T14:31:48.368024Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:5 tabletId 72075186224037892 2025-06-25T14:31:48.368040Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:2 2025-06-25T14:31:48.368046Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:2 tabletId 72075186224037889 2025-06-25T14:31:48.368062Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:4 2025-06-25T14:31:48.368079Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:4 tabletId 72075186224037891 2025-06-25T14:31:48.368106Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046644480 >> Cdc::ResolvedTimestampsContinueAfterMerge [GOOD] >> Cdc::ResolvedTimestampForDisplacedUpsert >> DataShardVolatile::TwoAppendsMustBeVolatile+UseSink [GOOD] >> DataShardVolatile::TwoAppendsMustBeVolatile-UseSink >> KikimrIcGateway::TestDropResourcePool [GOOD] >> TSequence::CreateSequenceInsideSequenceNotAllowed [GOOD] >> TSequence::CreateSequenceInsideIndexTableNotAllowed ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/unittest >> PQCountersSimple::Partition [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:130:2057] recipient: [1:128:2160] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:130:2057] recipient: [1:128:2160] Leader for TabletID 72057594037927937 is [1:134:2164] sender: [1:135:2057] recipient: [1:128:2160] 2025-06-25T14:31:18.237646Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:31:18.237723Z node 1 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [1:176:2057] recipient: [1:174:2195] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [1:176:2057] recipient: [1:174:2195] Leader for TabletID 72057594037927938 is [1:180:2199] sender: [1:181:2057] recipient: [1:174:2195] Leader for TabletID 72057594037927937 is [1:134:2164] sender: [1:206:2057] recipient: [1:14:2061] 2025-06-25T14:31:18.254910Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:31:18.270330Z node 1 :PERSQUEUE INFO: pq_impl.cpp:1497: [PQ: 72057594037927937] Config applied version 1 actor [1:204:2217] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "rt3.dc1--asdfgs--topic" Version: 1 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 1 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 1 Important: false } 2025-06-25T14:31:18.271530Z node 1 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [1:212:2223] 2025-06-25T14:31:18.274306Z node 1 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [1:212:2223] 2025-06-25T14:31:18.276641Z node 1 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [1:213:2224] 2025-06-25T14:31:18.278399Z node 1 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 2 [1:213:2224] 2025-06-25T14:31:18.291348Z node 1 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|dae186dd-65b2fe2-1a903a64-1f11ed07_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-25T14:31:18.297781Z node 1 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|3ae7ce92-69ab3b1a-4e20c77b-178aa5c3_1 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-25T14:31:18.302398Z node 1 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|21f41126-d8586cc2-9337e0a5-4fe03cd1_2 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default Expected: { "sensors": [ { "kind": "GAUGE", "labels": { "user_counters": "PersQueue", "client": "total", "important": "0", "topic": "rt3.dc1--asdfgs--topic", "sensor": "PQ/MessageLagByCommitted" }, "value": 30 }, { "kind": "GAUGE", "labels": { "user_counters": "PersQueue", "client": "total", "important": "0", "topic": "rt3.dc1--asdfgs--topic", "sensor": "PQ/MessageLagByLastRead" }, "value": 29 }, { "kind": "GAUGE", "labels": { "user_counters": "PersQueue", "client": "total", "important": "0", "topic": "rt3.dc1--asdfgs--topic", "sensor": "PQ/PartitionMaxReadQuotaUsage" }, "value": 0 }, { "kind": "GAUGE", "labels": { "user_counters": "PersQueue", "client": "total", "important": "0", "topic": "rt3.dc1--asdfgs--topic", "sensor": "PQ/ReadBytesAvailAvgMin" }, "value": 1000000000 }, { "kind": "GAUGE", "labels": { "user_counters": "PersQueue", "client": "total", "important": "0", "topic": "rt3.dc1--asdfgs--topic", "sensor": "PQ/ReadBytesAvailAvgSec" }, "value": 1000000000 }, { "kind": "GAUGE", "labels": { "user_counters": "PersQueue", "client": "total", "important": "0", "topic": "rt3.dc1--asdfgs--topic", "sensor": "PQ/ReadBytesMaxPerDay" }, "value": 0 }, { "kind": "GAUGE", "labels": { "user_counters": "PersQueue", "client": "total", "important": "0", "topic": "rt3.dc1--asdfgs--topic", "sensor": "PQ/ReadBytesMaxPerHour" }, "value": 0 }, { "kind": "GAUGE", "labels": { "user_counters": "PersQueue", "client": "total", "important": "0", "topic": "rt3.dc1--asdfgs--topic", "sensor": "PQ/ReadBytesMaxPerMin" }, "value": 0 }, { "kind": "GAUGE", "labels": { "user_counters": "PersQueue", "client": "total", "important": "0", "topic": "rt3.dc1--asdfgs--topic", "sensor": "PQ/ReadBytesMaxPerSec" }, "value": 0 }, { "kind": "GAUGE", "labels": { "user_counters": "PersQueue", "client": "total", "important": "0", "topic": "rt3.dc1--asdfgs--topic", "sensor": "PQ/ReadBytesPerDay" }, "value": 0 }, { "kind": "GAUGE", "labels": { "user_counters": "PersQueue", "client": "total", "important": "0", "topic": "rt3.dc1--asdfgs--topic", "sensor": "PQ/ReadBytesPerHour" }, "value": 0 }, { "kind": "GAUGE", "labels": { "user_counters": "PersQueue", "client": "total", "important": "0", "topic": "rt3.dc1--asdfgs--topic", "sensor": "PQ/ReadBytesPerMin" }, "value": 0 }, { "kind": "GAUGE", "labels": { "user_counters": "PersQueue", "client": "total", "important": "0", "topic": "rt3.dc1--asdfgs--topic", "sensor": "PQ/ReadBytesPerSec" }, "value": 0 }, { "kind": "GAUGE", "labels": { "user_counters": "PersQueue", "client": "total", "important": "0", "topic": "rt3.dc1--asdfgs--topic", "sensor": "PQ/ReadBytesQuota" }, "value": 1000000000 }, { "kind": "RATE", "labels": { "user_counters": "PersQueue", "client": "total", "important": "0", "topic": "rt3.dc1--asdfgs--topic", "sensor": "PQ/ReadOffsetRewindSum" }, "value": 0 }, { "kind": "GAUGE", "labels": { "user_counters": "PersQueue", "client": "total", "important": "0", "topic": "rt3.dc1--asdfgs--topic", "sensor": "PQ/ReadTimeLagMs" }, "value": 0 }, { "kind": "GAUGE", "labels": { "user_counters": "PersQueue", "client": "total", "important": "0", "topic": "rt3.dc1--asdfgs--topic", "sensor": "PQ/SizeLagByCommitted" }, "value": 747 }, { "kind": "GAUGE", "labels": { "user_counters": "PersQueue", "client": "total", "important": "0", "topic": "rt3.dc1--asdfgs--topic", "sensor": "PQ/SizeLagByLastRead" }, "value": 747 }, { "kind": "GAUGE", "labels": { "user_counters": "PersQueue", "client": "total", "important": "0", "topic": "rt3.dc1--asdfgs--topic", "sensor": "PQ/TimeSinceLastReadMs" }, "value": 5000 }, { "kind": "GAUGE", "labels": { "user_counters": "PersQueue", "client": "total", "important": "0", "topic": "rt3.dc1--asdfgs--topic", "sensor": "PQ/TotalMessageLagByLastRead" }, "value": 29 }, { "kind": "GAUGE", "labels": { "user_counters": "PersQueue", "client": "total", "important": "0", "topic": "rt3.dc1--asdfgs--topic", "sensor": "PQ/TotalSizeLagByLastRead" }, "value": 747 }, { "kind": "GAUGE", "labels": { "user_counters": "PersQueue", "client": "total", "important": "0", "topic": "rt3.dc1--asdfgs--topic", "sensor": "PQ/TotalTimeLagMsByLastRead" }, "value": 5000 }, { "kind": "GAUGE", "labels": { "user_counters": "PersQueue", "client": "total", "important": "0", "topic": "rt3.dc1--asdfgs--topic", "sensor": "PQ/UserPartitionsAnswered" }, "value": 2 }, { "kind": "GAUGE", "labels": { "user_counters": "PersQueue", "client": "total", "important": "0", "topic": "rt3.dc1--asdfgs--topic", "sensor": "PQ/WriteTimeLagMsByLastRead" }, "value": 30 }, { "kind": "GAUGE", "labels": { "user_counters": "PersQueue", "client": "total", "important": "0", "topic": "rt3.dc1--asdfgs--topic", "sensor": "PQ/WriteTimeLagMsByLastReadOld" }, "value": 5000 }, { "kind": "GAUGE", "labels": { "user_counters": "PersQueue", "client": "total", "important": "total", "topic": "rt3.dc1--asdfgs--topic", "sensor": "PQ/MessageLagByCommitted" }, "value": 30 }, { "kind": "GAUGE", "labels": { "user_counters": "PersQueue", "client": "total", "important": "total", "topic": "rt3.dc1--asdfgs--topic", "sensor": "PQ/MessageLagByLastRead" }, "value": 29 }, { "kind": "GAUGE", "labels": { "user_counters": "PersQueue", "client": "total", "important": "total", "topic": "rt3.dc1--asdfgs--topic", "sensor": "PQ/PartitionMaxReadQuotaUsage" }, "value": 0 }, { "kind": "GAUGE", "labels": { "user_counters": "PersQueue", "client": "total", "important": "total", "topic": "rt3.dc1--asdfgs--topic", "sensor": "PQ/ReadBytesAvailAvgMin" }, "value": 1000000000 }, { "kind": "GAUGE", "labels": { "user_counters": "PersQueue", "client": "total", "important": "total", "topic": "rt3.dc1--asdfgs--topic", "sensor": "PQ/ReadBytesAvailAvgSec" }, "value": 1000000000 }, { "kind": "GAUGE", "labels": { "user_counters": "PersQueue", "client": "total", "important": "total", "topic": "rt3.dc1--asdfgs--topic", "sensor": "PQ/ReadBytesMaxPerDay" }, "value": 0 }, { "kind": "GAUGE", "labels": { "user_counters": "PersQueue", "client": "total", "important": "total", "topic": "rt3.dc1--asdfgs--topic", "sensor": "PQ/ReadBytesMaxPerHour" }, "value": 0 }, { "kind": "GAUGE", "labels": { "user_counters": "PersQueue", "client": "total", "important": "total", "topic": "rt3.dc1--asdfgs--topic", "sensor": "PQ/ReadBytesMaxPerMin" }, "value": 0 }, { "kind": "GAUGE", "labels": { "user_count ... n BillingMeteringConfig 2025-06-25T14:31:28.964261Z node 3 :PERSQUEUE INFO: pq_impl.cpp:1497: [PQ: 72057594037927937] Config applied version 5 actor [3:181:2194] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "rt3.dc1--asdfgs--topic" Version: 5 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 3 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 3 Important: false } NEW ANS: ANS GROUP total/total/rt3.dc1--asdfgs--topic ANS GROUP user/0/total ANS GROUP user/total/total ANS GROUP user/0/rt3.dc1--asdfgs--topic ANS GROUP total/total/total ANS GROUP rt3.dc1--asdfgs--topic ANS GROUP total/0/rt3.dc1--asdfgs--topic ANS GROUP total CHECKING GROUP user/0/rt3.dc1--asdfgs--topic 2025-06-25T14:31:31.061440Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:31:31.077679Z node 3 :PERSQUEUE INFO: pq_impl.cpp:1497: [PQ: 72057594037927937] Config applied version 6 actor [3:181:2194] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 ImportantClientId: "user" ImportantClientId: "user2" LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "rt3.dc1--asdfgs--topic" Version: 6 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 3 ReadRuleGenerations: 6 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 3 Important: true } Consumers { Name: "user2" Generation: 6 Important: true } NEW ANS: ANS GROUP total/total/rt3.dc1--asdfgs--topic ANS GROUP user2/1/total ANS GROUP user/1/rt3.dc1--asdfgs--topic ANS GROUP user/1/total ANS GROUP user2/total/total ANS GROUP user/total/total ANS GROUP user2/1/rt3.dc1--asdfgs--topic ANS GROUP total/total/total ANS GROUP rt3.dc1--asdfgs--topic ANS GROUP total ANS GROUP total/1/rt3.dc1--asdfgs--topic CHECKING GROUP user/1/rt3.dc1--asdfgs--topic CHECKING GROUP user2/1/rt3.dc1--asdfgs--topic 2025-06-25T14:31:33.453360Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:31:33.459619Z node 3 :PERSQUEUE INFO: pq_impl.cpp:1497: [PQ: 72057594037927937] Config applied version 7 actor [3:181:2194] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 ImportantClientId: "user" LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "rt3.dc1--asdfgs--topic" Version: 7 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 3 ReadRuleGenerations: 6 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 3 Important: true } Consumers { Name: "user2" Generation: 6 Important: false } NEW ANS: ANS GROUP total/total/rt3.dc1--asdfgs--topic ANS GROUP user/1/rt3.dc1--asdfgs--topic ANS GROUP user2/0/total ANS GROUP user/1/total ANS GROUP user2/total/total ANS GROUP user/total/total ANS GROUP user2/0/rt3.dc1--asdfgs--topic ANS GROUP total/total/total ANS GROUP total/0/rt3.dc1--asdfgs--topic ANS GROUP rt3.dc1--asdfgs--topic ANS GROUP total ANS GROUP total/1/rt3.dc1--asdfgs--topic CHECKING GROUP user/1/rt3.dc1--asdfgs--topic CHECKING GROUP user2/0/rt3.dc1--asdfgs--topic 2025-06-25T14:31:35.859489Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:31:35.866410Z node 3 :PERSQUEUE INFO: pq_impl.cpp:1497: [PQ: 72057594037927937] Config applied version 8 actor [3:181:2194] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 ImportantClientId: "user" LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "rt3.dc1--asdfgs--topic" Version: 8 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 3 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 3 Important: true } NEW ANS: ANS GROUP total/total/rt3.dc1--asdfgs--topic ANS GROUP user/1/rt3.dc1--asdfgs--topic ANS GROUP user/1/total ANS GROUP user/total/total ANS GROUP total/total/total ANS GROUP rt3.dc1--asdfgs--topic ANS GROUP total ANS GROUP total/1/rt3.dc1--asdfgs--topic CHECKING GROUP user/1/rt3.dc1--asdfgs--topic 2025-06-25T14:31:38.703499Z node 4 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:31:38.703572Z node 4 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-25T14:31:38.720538Z node 4 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:31:38.721563Z node 4 :PERSQUEUE INFO: pq_impl.cpp:1497: [PQ: 72057594037927937] Config applied version 9 actor [4:204:2217] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 3600 ImportantClientId: "client" LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 WriteSpeedInBytesPerSecond: 102400 BurstSize: 102400 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "topic" Version: 9 LocalDC: true Topic: "topic" TopicPath: "/topic" YcCloudId: "somecloud" YcFolderId: "somefolder" YdbDatabaseId: "PQ" YdbDatabasePath: "/Root/PQ" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 9 ReadRuleGenerations: 9 FederationAccount: "federationAccount" MeteringMode: METERING_MODE_REQUEST_UNITS AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 9 Important: false } Consumers { Name: "client" Generation: 9 Important: true } 2025-06-25T14:31:38.722236Z node 4 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [4:212:2223] 2025-06-25T14:31:38.723275Z node 4 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'topic' partition 0 generation 2 [4:212:2223] 2025-06-25T14:31:38.724849Z node 4 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [4:213:2224] 2025-06-25T14:31:38.725510Z node 4 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'topic' partition 1 generation 2 [4:213:2224] 2025-06-25T14:31:38.740935Z node 4 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][] pipe [4:257:2254] connected; active server actors: 1 2025-06-25T14:31:38.745060Z node 4 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|db60658a-db116396-235bbfd7-51846307_0 generated for partition 0 topic 'topic' owner default 2025-06-25T14:31:38.751182Z node 4 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|2f9b2f3f-2a914bee-ce1ce8d0-ad1b5897_1 generated for partition 0 topic 'topic' owner default 2025-06-25T14:31:38.758168Z node 4 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|a0956f90-26661739-ebf258bb-5b4bfc94_2 generated for partition 0 topic 'topic' owner default 2025-06-25T14:31:38.763723Z node 4 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][topic] pipe [4:304:2295] connected; active server actors: 1 2025-06-25T14:31:45.444792Z node 4 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][topic] pipe [4:430:2381] connected; active server actors: 1 2025-06-25T14:31:54.099233Z node 5 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:31:54.099333Z node 5 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-25T14:31:54.126919Z node 5 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:31:54.127887Z node 5 :PERSQUEUE INFO: pq_impl.cpp:1497: [PQ: 72057594037927937] Config applied version 10 actor [5:200:2213] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "rt3.dc1--asdfgs--topic" Version: 10 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 10 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 10 Important: false } 2025-06-25T14:31:54.128591Z node 5 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [5:208:2219] 2025-06-25T14:31:54.131588Z node 5 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [5:208:2219] 2025-06-25T14:31:54.133765Z node 5 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [5:209:2220] 2025-06-25T14:31:54.135738Z node 5 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 2 [5:209:2220] 2025-06-25T14:31:54.143055Z node 5 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|11d76556-3fa8a3e5-a2b3860b-8415422_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-25T14:31:54.149371Z node 5 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|6a056341-f45b499f-9490fb6e-996c4279_1 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-25T14:31:54.158144Z node 5 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|ead0ed6f-468b8a1d-5a548134-48ceaf6b_2 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-25T14:31:54.164402Z node 5 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|df9eae85-a2173e49-927646e6-c42f0ee_3 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-25T14:31:54.166030Z node 5 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|5d64bae3-a6376cc7-9329681f-69ad3014_4 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default >> DataShardVolatile::DistributedWriteLaterSnapshotBlockedThenCommit-UseSink [GOOD] >> DataShardVolatile::DistributedWriteLaterSnapshotBlockedThenAbort >> TGRpcCmsTest::AlterRemoveTest [GOOD] >> TKesusTest::TestAcquireTimeoutAfterReboot [GOOD] >> TKesusTest::TestAcquireSemaphoreViaRelease >> TPartitionTests::TooManyImmediateTxs [GOOD] |78.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/services/ydb/table_split_ut/ydb-services-ydb-table_split_ut |78.1%| [LD] {RESULT} $(B)/ydb/services/ydb/table_split_ut/ydb-services-ydb-table_split_ut |78.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/services/ydb/table_split_ut/ydb-services-ydb-table_split_ut >> TPartitionTests::WriteSubDomainOutOfSpace ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/provider/ut/unittest >> KikimrIcGateway::TestDropResourcePool [GOOD] Test command err: Trying to start YDB, gRPC: 8844, MsgBus: 31868 2025-06-25T14:31:40.011340Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519894603047613261:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:31:40.023627Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0013d0/r3tmp/tmpdSdLYR/pdisk_1.dat 2025-06-25T14:31:40.534418Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519894598752645935:2080] 1750861900005911 != 1750861900005914 2025-06-25T14:31:40.564744Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:31:40.568327Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:31:40.568409Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:31:40.574888Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 8844, node 1 2025-06-25T14:31:40.732787Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:31:40.732807Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:31:40.732813Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:31:40.732915Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:31868 2025-06-25T14:31:41.027438Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:31868 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:31:41.466623Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:31:41.482656Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:31:41.531112Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-25T14:31:41.555488Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710659, at schemeshard: 72057594046644480 2025-06-25T14:31:43.943161Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894615932515808:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:31:43.943305Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:31:44.250384Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:31:44.372857Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:31:44.451727Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:31:44.496697Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:31:44.551406Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894620227483421:2327], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:31:44.551486Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:31:44.552033Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894620227483426:2330], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:31:44.556107Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710664:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:31:44.571073Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710664, at schemeshard: 72057594046644480 2025-06-25T14:31:44.571322Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519894620227483428:2331], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710664 completed, doublechecking } 2025-06-25T14:31:44.640222Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519894620227483479:2570] txid# 281474976710665, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 11], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:31:45.012424Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519894603047613261:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:31:45.012492Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 15649, MsgBus: 3536 2025-06-25T14:31:45.855170Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519894626039263086:2172];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0013d0/r3tmp/tmpPMk5y3/pdisk_1.dat 2025-06-25T14:31:46.027186Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:31:46.099387Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:31:46.099473Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:31:46.103312Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:31:46.108521Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519894626039262926:2080] 1750861905754311 != 1750861905754314 2025-06-25T14:31:46.126370Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 15649, node 2 2025-06-25T14:31:46.309262Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:31:46.309289Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:31:46.309296Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:31:46.309426Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3536 TClient is connected to server localhost:3536 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:31:46.835127Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:31:46.854910Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:31:46.887180Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710659, at schemeshard: 72057594046644480 2025-06-25T14:31:49.309742Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519894643219132787:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:31:49.309842Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:31:49.353672Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:31:49.427846Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:31:49.472005Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:31:49.511408Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:31:49.601602Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519894643219133103:2326], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:31:49.601729Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:31:49.602112Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519894643219133108:2329], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:31:49.606083Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710664:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:31:49.642577Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519894643219133110:2330], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710664 completed, doublechecking } 2025-06-25T14:31:49.702908Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519894643219133161:2561] txid# 281474976710665, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 11], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:31:49.990739Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037888 not found
: Info: Success, code: 4 Trying to start YDB, gRPC: 8201, MsgBus: 5984 2025-06-25T14:31:50.922593Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519894644541820881:2236];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:31:50.932374Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0013d0/r3tmp/tmpGqZc1K/pdisk_1.dat 2025-06-25T14:31:51.142912Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:31:51.142990Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:31:51.147842Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:31:51.154264Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519894644541820667:2080] 1750861910866118 != 1750861910866121 2025-06-25T14:31:51.157407Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 8201, node 3 2025-06-25T14:31:51.274866Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:31:51.274899Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:31:51.274908Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:31:51.275012Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5984 TClient is connected to server localhost:5984 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-25T14:31:51.949617Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:31:52.033935Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:31:52.040630Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:31:52.059042Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) >> TKesusTest::TestAcquireSemaphoreViaRelease [GOOD] >> TPartitionTests::WriteSubDomainOutOfSpace [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/services/cms/ut/unittest >> TGRpcCmsTest::AlterRemoveTest [GOOD] Test command err: 2025-06-25T14:31:50.038455Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519894643378320120:2144];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:31:50.038634Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0013be/r3tmp/tmp4WRckZ/pdisk_1.dat 2025-06-25T14:31:50.713107Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:31:50.713219Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:31:50.751965Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:31:50.830883Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 21749, node 1 2025-06-25T14:31:51.069250Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:31:51.126276Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:31:51.126302Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:31:51.126311Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:31:51.126423Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:63929 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:31:51.523490Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:31:51.622403Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285120, Sender [1:7519894647673288094:2274], Recipient [1:7519894643378320529:2197]: NKikimr::NConsole::TEvConsole::TEvCreateTenantRequest { Request { path: "/Root/users/user-1" resources { storage_units { unit_kind: "hdd" count: 1 } } } UserToken: "" PeerName: "ipv6:[::1]:60568" } 2025-06-25T14:31:51.622444Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:963: StateWork, processing event TEvConsole::TEvCreateTenantRequest 2025-06-25T14:31:51.622470Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-25T14:31:51.622481Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-25T14:31:51.622604Z node 1 :CMS_TENANTS DEBUG: console__create_tenant.cpp:71: TTxCreateTenant: Request { path: "/Root/users/user-1" resources { storage_units { unit_kind: "hdd" count: 1 } } } UserToken: "" PeerName: "ipv6:[::1]:60568" 2025-06-25T14:31:51.622715Z node 1 :CMS_TENANTS DEBUG: console__create_tenant.cpp:365: Add tenant /Root/users/user-1 (txid = 1750861911622590) 2025-06-25T14:31:51.623219Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2577: Add tenant /Root/users/user-1 to database state=CREATING_POOLS coordinators=3 mediators=3 planresolution=10 timecastbucketspermediator=2 issue= txid=1750861911622590 subdomainversion=1 confirmedsubdomain=0 attrs= generation=1 errorcode=STATUS_CODE_UNSPECIFIED isExternalSubDomain=1 isExternalHive=1 isExternalSysViewProcessor=1 isExternalStatisticsAggregator=1 areResourcesShared=0 sharedDomainId= 2025-06-25T14:31:51.623404Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2637: Add tenant pool /Root/users/user-1:hdd to database kind=hdd config=BoxId: 999 StoragePoolId: 4 Name: "/Root/users/user-1:hdd" ErasureSpecies: "none" VDiskKind: "Default" Kind: "hdd" NumGroups: 1 PDiskFilter { Property { Type: ROT } } allocatednumgroups=0 state=NOT_ALLOCATED 2025-06-25T14:31:51.632874Z node 1 :CMS_TENANTS DEBUG: console__create_tenant.cpp:375: TTxCreateTenant Complete 2025-06-25T14:31:51.636267Z node 1 :CMS_TENANTS TRACE: console__create_tenant.cpp:383: Send: NKikimr::NConsole::TEvConsole::TEvCreateTenantResponse { Response { operation { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1750861911622590&action=1" } } } 2025-06-25T14:31:51.636415Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-25T14:31:51.636502Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:158: TPoolManip(/Root/users/user-1:hdd) Bootstrap 2025-06-25T14:31:51.636621Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:117: TPoolManip(/Root/users/user-1:hdd) read pool state: Request { Command { ReadStoragePool { BoxId: 999 Name: "/Root/users/user-1:hdd" } } } 2025-06-25T14:31:51.637055Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:198: TPoolManip(/Root/users/user-1:hdd) got read response: Status { Success: true } Success: true ConfigTxSeqNo: 5 2025-06-25T14:31:51.637171Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:131: TPoolManip(/Root/users/user-1:hdd) send pool request: Request { Command { DefineStoragePool { BoxId: 999 StoragePoolId: 4 Name: "/Root/users/user-1:hdd" ErasureSpecies: "none" VDiskKind: "Default" Kind: "hdd" NumGroups: 1 PDiskFilter { Property { Type: ROT } } } } } 2025-06-25T14:31:51.640770Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:244: TPoolManip(/Root/users/user-1:hdd) got config response: Status { Success: true } Success: true ConfigTxSeqNo: 6 2025-06-25T14:31:51.640817Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:168: TPoolManip(/Root/users/user-1:hdd) reply with NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvPoolAllocated 2025-06-25T14:31:51.640885Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 2146435079, Sender [1:7519894647673288099:2197], Recipient [1:7519894643378320529:2197]: NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvPoolAllocated 2025-06-25T14:31:51.640902Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:972: StateWork, processing event TEvPrivate::TEvPoolAllocated 2025-06-25T14:31:51.640923Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-25T14:31:51.640943Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-25T14:31:51.640980Z node 1 :CMS_TENANTS DEBUG: console__update_pool_state.cpp:28: TTxUpdatePoolState for pool /Root/users/user-1:hdd of /Root/users/user-1 state=ALLOCATED 2025-06-25T14:31:51.641002Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3047: Update pool state in database for /Root/users/user-1:hdd state=ALLOCATED allocatednumgroups=1 2025-06-25T14:31:51.641071Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3206: Update subdomain version in database for /Root/users/user-1 subdomainversion=2 2025-06-25T14:31:51.649579Z node 1 :CMS_TENANTS DEBUG: console__update_pool_state.cpp:73: TTxUpdatePoolState complete for /Root/users/user-1:hdd 2025-06-25T14:31:51.649610Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-25T14:31:51.649618Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-25T14:31:51.649634Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-25T14:31:51.649694Z node 1 :CMS_TENANTS DEBUG: console__update_tenant_state.cpp:23: TTxUpdateTenantState for tenant /Root/users/user-1 to CREATING_SUBDOMAIN 2025-06-25T14:31:51.649713Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3146: Update tenant state in database for /Root/users/user-1 state=CREATING_SUBDOMAIN txid=1750861911622590 errorcode=STATUS_CODE_UNSPECIFIED issue= 2025-06-25T14:31:51.652623Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285131, Sender [1:7519894647673288113:2276], Recipient [1:7519894643378320529:2197]: NKikimr::NConsole::TEvConsole::TEvGetOperationRequest { Request { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1750861911622590&action=1" } UserToken: "" } 2025-06-25T14:31:51.652648Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:965: StateWork, processing event TEvConsole::TEvGetOperationRequest 2025-06-25T14:31:51.652842Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3353: Send TEvConsole::TEvGetOperationResponse: Response { operation { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1750861911622590&action=1" } } 2025-06-25T14:31:51.654610Z node 1 :CMS_TENANTS DEBUG: console__update_tenant_state.cpp:45: TTxUpdateTenantState complete for /Root/users/user-1 2025-06-25T14:31:51.654734Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-25T14:31:51.654770Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:784: TSubdomainManip(/Root/users/user-1)::Bootstrap 2025-06-25T14:31:51.654780Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:596: TSubDomainManip(/Root/users/user-1) create subdomain 2025-06-25T14:31:51.664758Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:620: TSubdomainManip(/Root/users/user-1) send subdomain creation cmd: NKikimrTxUserProxy.TEvProposeTransaction Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateExtSubDomain SubDomain { Name: "users/user-1" ExternalSchemeShard: true ExternalHive: true ExternalSysViewProcessor: true ExternalStatisticsAggregator: true GraphShard: true } } } ExecTimeoutPeriod: 18446744073709551615 DatabaseName: "Root" 2025-06-25T14:31:51.665911Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976710658:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-25T14:31:51.681039Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:832: TSubdomainManip(/Root/users/user-1) got propose result: Status: 53 TxId: 281474976710658 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 PathId: 3 2025-06-25T14:31:51.681096Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:768: TSubdomainManip(/Root/users/user-1) send notification request ... called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_drop_extsubdomain.cpp:309) 2025-06-25T14:31:51.805280Z node 1 :CMS_TENANTS DEBUG: console__update_pool_state.cpp:73: TTxUpdatePoolState complete for /Root/users/user-1:hdd 2025-06-25T14:31:51.805306Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-25T14:31:51.806178Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:5496: Mark as Dropping path id [OwnerId: 72057594046644480, LocalPathId: 3] by tx: 281474976710660 2025-06-25T14:31:51.806529Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:809: TSubdomainManip(/Root/users/user-1) got TEvNotifyTxCompletionResult: TxId: 281474976710659 2025-06-25T14:31:51.806537Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:694: TSubdomainManip(/Root/users/user-1) done 2025-06-25T14:31:51.806575Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:710: TSubdomainManip(/Root/users/user-1) reply with NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvSubdomainReady 2025-06-25T14:31:51.806661Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:832: TSubdomainManip(/Root/users/user-1) got propose result: Status: 53 TxId: 281474976710660 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 2025-06-25T14:31:51.806689Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:768: TSubdomainManip(/Root/users/user-1) send notification request: NKikimrScheme.TEvNotifyTxCompletion TxId: 281474976710660 2025-06-25T14:31:51.806740Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 2146435076, Sender [1:7519894647673288193:2197], Recipient [1:7519894643378320529:2197]: NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvSubdomainReady 2025-06-25T14:31:51.806755Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:979: StateWork, processing event TEvPrivate::TEvSubdomainReady 2025-06-25T14:31:51.806765Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:3661: Ignoring ready subdomain for tenant /Root/users/user-1 in REMOVING_SUBDOMAIN state 2025-06-25T14:31:51.807139Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285131, Sender [1:7519894647673288312:2283], Recipient [1:7519894643378320529:2197]: NKikimr::NConsole::TEvConsole::TEvGetOperationRequest { Request { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1750861911769127&action=2" } UserToken: "" } 2025-06-25T14:31:51.807151Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:965: StateWork, processing event TEvConsole::TEvGetOperationRequest 2025-06-25T14:31:51.807297Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3353: Send TEvConsole::TEvGetOperationResponse: Response { operation { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1750861911769127&action=2" } } 2025-06-25T14:31:51.811220Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:804: TSubdomainManip(/Root/users/user-1) got TEvNotifyTxCompletionRegistered: TxId: 281474976710660 2025-06-25T14:31:51.825229Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:809: TSubdomainManip(/Root/users/user-1) got TEvNotifyTxCompletionResult: TxId: 281474976710660 2025-06-25T14:31:51.825249Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:694: TSubdomainManip(/Root/users/user-1) done 2025-06-25T14:31:51.825284Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:710: TSubdomainManip(/Root/users/user-1) reply with NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvSubdomainRemoved 2025-06-25T14:31:51.825380Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 2146435077, Sender [1:7519894647673288300:2197], Recipient [1:7519894643378320529:2197]: NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvSubdomainRemoved 2025-06-25T14:31:51.825404Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:980: StateWork, processing event TEvPrivate::TEvSubdomainRemoved 2025-06-25T14:31:51.825418Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-25T14:31:51.825426Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-25T14:31:51.825449Z node 1 :CMS_TENANTS DEBUG: console__remove_computational_units.cpp:20: TTxRemoveComputationalUnits Execute /Root/users/user-1 2025-06-25T14:31:51.825470Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3146: Update tenant state in database for /Root/users/user-1 state=REMOVING_UNITS txid=1750861911769127 errorcode=STATUS_CODE_UNSPECIFIED issue= 2025-06-25T14:31:51.825504Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2927: Remove computational units of /Root/users/user-1 from database txid=1750861911769127 issue= 2025-06-25T14:31:51.833197Z node 1 :CMS_TENANTS DEBUG: console__remove_computational_units.cpp:34: TTxRemoveComputationalUnits Complete /Root/users/user-1 2025-06-25T14:31:51.833271Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2114: Send TEvTenantSlotBroker::TEvAlterTenant: TenantName: "/Root/users/user-1" 2025-06-25T14:31:51.833288Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-25T14:31:51.833461Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273154052, Sender [1:7519894643378320379:2201], Recipient [1:7519894643378320529:2197]: NKikimrTenantSlotBroker.TTenantState TenantName: "/Root/users/user-1" 2025-06-25T14:31:51.833478Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:984: StateWork, processing event TEvTenantSlotBroker::TEvTenantState 2025-06-25T14:31:51.833491Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-25T14:31:51.833498Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-25T14:31:51.833520Z node 1 :CMS_TENANTS DEBUG: console__update_tenant_state.cpp:23: TTxUpdateTenantState for tenant /Root/users/user-1 to REMOVING_POOLS 2025-06-25T14:31:51.833551Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3146: Update tenant state in database for /Root/users/user-1 state=REMOVING_POOLS txid=1750861911769127 errorcode=STATUS_CODE_UNSPECIFIED issue= 2025-06-25T14:31:51.836216Z node 1 :CMS_TENANTS DEBUG: console__update_tenant_state.cpp:45: TTxUpdateTenantState complete for /Root/users/user-1 2025-06-25T14:31:51.836268Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-25T14:31:51.836300Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:158: TPoolManip(/Root/users/user-1:hdd) Bootstrap 2025-06-25T14:31:51.836420Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:117: TPoolManip(/Root/users/user-1:hdd) read pool state: Request { Command { ReadStoragePool { BoxId: 999 Name: "/Root/users/user-1:hdd" } } } 2025-06-25T14:31:51.836969Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:198: TPoolManip(/Root/users/user-1:hdd) got read response: Status { Success: true StoragePool { BoxId: 999 StoragePoolId: 4 Name: "/Root/users/user-1:hdd" ErasureSpecies: "none" Geometry { } VDiskKind: "Default" Kind: "hdd" NumGroups: 2 PDiskFilter { Property { Type: ROT } } ScopeId { X1: 72057594046644480 X2: 3 } ItemConfigGeneration: 3 } } Success: true ConfigTxSeqNo: 13 2025-06-25T14:31:51.837036Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:151: TPoolManip(/Root/users/user-1:hdd) send pool request: Request { Command { DeleteStoragePool { BoxId: 999 StoragePoolId: 4 ItemConfigGeneration: 3 } } } 2025-06-25T14:31:51.845265Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:306: TPoolManip(/Root/users/user-1:hdd) got config response: Status { Success: true } Success: true ConfigTxSeqNo: 14 2025-06-25T14:31:51.845344Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 2146435081, Sender [1:7519894647673288363:2197], Recipient [1:7519894643378320529:2197]: NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvPoolDeleted 2025-06-25T14:31:51.845375Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:973: StateWork, processing event TEvPrivate::TEvPoolDeleted 2025-06-25T14:31:51.845388Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-25T14:31:51.845395Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-25T14:31:51.845434Z node 1 :CMS_TENANTS DEBUG: console__update_pool_state.cpp:28: TTxUpdatePoolState for pool /Root/users/user-1:hdd of /Root/users/user-1 state=DELETED 2025-06-25T14:31:51.845453Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3047: Update pool state in database for /Root/users/user-1:hdd state=DELETED allocatednumgroups=0 2025-06-25T14:31:51.886125Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285131, Sender [1:7519894647673288377:2285], Recipient [1:7519894643378320529:2197]: NKikimr::NConsole::TEvConsole::TEvGetOperationRequest { Request { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1750861911769127&action=2" } UserToken: "" } 2025-06-25T14:31:51.886154Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:965: StateWork, processing event TEvConsole::TEvGetOperationRequest 2025-06-25T14:31:51.886325Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3353: Send TEvConsole::TEvGetOperationResponse: Response { operation { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1750861911769127&action=2" } } 2025-06-25T14:31:51.896892Z node 1 :CMS_TENANTS DEBUG: console__update_pool_state.cpp:73: TTxUpdatePoolState complete for /Root/users/user-1:hdd 2025-06-25T14:31:51.896947Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-25T14:31:51.896955Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-25T14:31:51.896961Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-25T14:31:51.897012Z node 1 :CMS_TENANTS DEBUG: console__remove_tenant_done.cpp:22: TTxRemoveTenantDone for tenant /Root/users/user-1 txid=1750861911769127 2025-06-25T14:31:51.897025Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2927: Remove computational units of /Root/users/user-1 from database txid=1750861911769127 issue= 2025-06-25T14:31:51.897049Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2958: Remove tenant /Root/users/user-1 from database txid=1750861911769127 issue= 2025-06-25T14:31:51.897060Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2963: Remove pool /Root/users/user-1:hdd from database 2025-06-25T14:31:51.897110Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3083: Add tenant removal info for /Root/users/user-1 txid=1750861911769127 code=SUCCESS errorcode=STATUS_CODE_UNSPECIFIED issue= 2025-06-25T14:31:51.918816Z node 1 :CMS_TENANTS DEBUG: console__remove_tenant_done.cpp:34: TTxRemoveTenantDone Complete 2025-06-25T14:31:51.918883Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-25T14:31:51.948847Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285131, Sender [1:7519894647673288388:2287], Recipient [1:7519894643378320529:2197]: NKikimr::NConsole::TEvConsole::TEvGetOperationRequest { Request { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1750861911769127&action=2" } UserToken: "" } 2025-06-25T14:31:51.948870Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:965: StateWork, processing event TEvConsole::TEvGetOperationRequest 2025-06-25T14:31:51.949045Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3353: Send TEvConsole::TEvGetOperationResponse: Response { operation { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1750861911769127&action=2" ready: true status: SUCCESS } } >> TPartitionTests::TestTxBatchInFederation >> TSequence::CreateSequenceInsideIndexTableNotAllowed [GOOD] >> TSequence::CopyTableWithSequence ------- [TM] {asan, default-linux-x86_64, release} ydb/core/load_test/ut/unittest >> GroupWriteTest::WriteHardRateDispatcher [GOOD] Test command err: RandomSeed# 2917188036212835799 2025-06-25T14:27:22.094699Z 1 00h01m00.010512s :BS_LOAD_TEST DEBUG: TabletId# 5 Generation# 1 is bootstrapped, going to send TEvDiscover {TabletId# 5 MinGeneration# 1 ReadBody# false DiscoverBlockedGeneration# true ForceBlockedGeneration# 0 FromLeader# true Deadline# 18446744073709551} 2025-06-25T14:27:22.161173Z 1 00h01m00.010512s :BS_LOAD_TEST INFO: TabletId# 5 Generation# 1 recieved TEvDiscoverResult {Status# NODATA BlockedGeneration# 0 Id# [0:0:0:0:0:0:0] Size# 0 MinGeneration# 1} 2025-06-25T14:27:22.161228Z 1 00h01m00.010512s :BS_LOAD_TEST DEBUG: TabletId# 5 Generation# 1 going to send TEvBlock {TabletId# 5 Generation# 1 Deadline# 18446744073709551 IsMonitored# 1} 2025-06-25T14:27:22.199446Z 1 00h01m00.010512s :BS_LOAD_TEST INFO: TabletId# 5 Generation# 1 recieved TEvBlockResult {Status# OK} 2025-06-25T14:27:23.262183Z 1 00h01m00.010512s :BS_LOAD_TEST DEBUG: TabletId# 5 Generation# 2 going to send TEvCollectGarbage {TabletId# 5 RecordGeneration# 2 PerGenerationCounter# 1 Channel# 0 Deadline# 18446744073709551 Collect# true CollectGeneration# 2 CollectStep# 0 Hard# true IsMultiCollectAllowed# 0 IsMonitored# 1} 2025-06-25T14:27:23.306020Z 1 00h01m00.010512s :BS_LOAD_TEST INFO: TabletId# 5 Generation# 2 recieved TEvCollectGarbageResult {TabletId# 5 RecordGeneration# 2 PerGenerationCounter# 1 Channel# 0 Status# OK} 2025-06-25T14:28:29.887003Z 6 00h01m04.602828s :BS_LOGCUTTER ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) KEEPER: RetryCutLogEvent: limit exceeded; FreeUpToLsn# 4690 2025-06-25T14:31:49.888263Z 1 00h01m10.010512s :BS_LOAD_TEST DEBUG: Load tablet recieved PoisonPill, going to die 2025-06-25T14:31:49.888391Z 1 00h01m10.010512s :BS_LOAD_TEST DEBUG: TabletId# 5 Generation# 2 end working, going to send TEvCollectGarbage {TabletId# 5 RecordGeneration# 2 PerGenerationCounter# 12 Channel# 0 Deadline# 18446744073709551 Collect# true CollectGeneration# 2 CollectStep# 4294967295 Hard# true IsMultiCollectAllowed# 0 IsMonitored# 1} 2025-06-25T14:31:49.888452Z 1 00h01m10.010512s :BS_LOAD_TEST DEBUG: Load tablet recieved PoisonPill, going to die 2025-06-25T14:31:49.888493Z 1 00h01m10.010512s :BS_LOAD_TEST DEBUG: TabletId# 5 Generation# 2 end working, going to send TEvCollectGarbage {TabletId# 5 RecordGeneration# 2 PerGenerationCounter# 13 Channel# 0 Deadline# 18446744073709551 Collect# true CollectGeneration# 2 CollectStep# 4294967295 Hard# true IsMultiCollectAllowed# 0 IsMonitored# 1} 2025-06-25T14:31:50.153457Z 1 00h01m10.010512s :BS_LOAD_TEST INFO: TabletId# 5 Generation# 2 recieved TEvCollectGarbageResult {TabletId# 5 RecordGeneration# 2 PerGenerationCounter# 12 Channel# 0 Status# OK} 2025-06-25T14:31:50.153573Z 1 00h01m10.010512s :BS_LOAD_TEST INFO: TabletId# 5 Generation# 2 recieved TEvCollectGarbageResult {TabletId# 5 RecordGeneration# 2 PerGenerationCounter# 13 Channel# 0 Status# OK} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kesus/tablet/ut/unittest >> TKesusTest::TestAcquireSemaphoreViaRelease [GOOD] Test command err: 2025-06-25T14:31:04.455509Z node 1 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-25T14:31:04.455664Z node 1 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-25T14:31:04.476707Z node 1 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-25T14:31:04.476869Z node 1 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-25T14:31:04.501880Z node 1 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-25T14:31:04.502437Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[1:136:2160], cookie=2967933013329538995, session=0, seqNo=0) 2025-06-25T14:31:04.502634Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 1 2025-06-25T14:31:04.524944Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[1:136:2160], cookie=2967933013329538995, session=1) 2025-06-25T14:31:04.525877Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[1:136:2160], cookie=111, session=1, semaphore="Lock1" count=1) 2025-06-25T14:31:04.526052Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:126: [72057594037927937] Created new ephemeral semaphore 1 "Lock1" 2025-06-25T14:31:04.526161Z node 1 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 1 "Lock1" queue: next order #1 session 1 2025-06-25T14:31:04.544266Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[1:136:2160], cookie=111) 2025-06-25T14:31:04.544655Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[1:136:2160], cookie=222, session=1, semaphore="Lock1" count=18446744073709551615) 2025-06-25T14:31:04.556785Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[1:136:2160], cookie=222) 2025-06-25T14:31:04.557259Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[1:152:2174], cookie=7995991831293274284, name="Lock1") 2025-06-25T14:31:04.557354Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[1:152:2174], cookie=7995991831293274284) 2025-06-25T14:31:04.929584Z node 2 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-25T14:31:04.929691Z node 2 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-25T14:31:04.944147Z node 2 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-25T14:31:04.944722Z node 2 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-25T14:31:04.971142Z node 2 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-25T14:31:04.971916Z node 2 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[2:136:2160], cookie=14807733583773160508, session=0, seqNo=0) 2025-06-25T14:31:04.972049Z node 2 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 1 2025-06-25T14:31:04.989044Z node 2 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[2:136:2160], cookie=14807733583773160508, session=1) 2025-06-25T14:31:04.989379Z node 2 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[2:137:2161], cookie=14820551250868404153, session=0, seqNo=0) 2025-06-25T14:31:04.989504Z node 2 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 2 2025-06-25T14:31:05.003160Z node 2 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[2:137:2161], cookie=14820551250868404153, session=2) 2025-06-25T14:31:05.004250Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[2:136:2160], cookie=111, session=1, semaphore="Lock1" count=18446744073709551615) 2025-06-25T14:31:05.004427Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:126: [72057594037927937] Created new ephemeral semaphore 1 "Lock1" 2025-06-25T14:31:05.004520Z node 2 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 1 "Lock1" queue: next order #1 session 1 2025-06-25T14:31:05.019894Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[2:136:2160], cookie=111) 2025-06-25T14:31:05.020242Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[2:136:2160], cookie=112, session=1, semaphore="Lock2" count=1) 2025-06-25T14:31:05.020403Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:126: [72057594037927937] Created new ephemeral semaphore 2 "Lock2" 2025-06-25T14:31:05.020489Z node 2 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 2 "Lock2" queue: next order #2 session 1 2025-06-25T14:31:05.034796Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[2:136:2160], cookie=112) 2025-06-25T14:31:05.035179Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[2:137:2161], cookie=222, session=2, semaphore="Lock1" count=1) 2025-06-25T14:31:05.035468Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[2:137:2161], cookie=223, session=2, semaphore="Lock2" count=18446744073709551615) 2025-06-25T14:31:05.060940Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[2:137:2161], cookie=222) 2025-06-25T14:31:05.061022Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[2:137:2161], cookie=223) 2025-06-25T14:31:05.061403Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[2:137:2161], cookie=333, session=2, semaphore="Lock1" count=1) 2025-06-25T14:31:05.061785Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[2:137:2161], cookie=334, session=2, semaphore="Lock2" count=18446744073709551615) 2025-06-25T14:31:05.079575Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[2:137:2161], cookie=333) 2025-06-25T14:31:05.079656Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[2:137:2161], cookie=334) 2025-06-25T14:31:05.522751Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:05.537206Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:05.928707Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:05.944989Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:06.340399Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:06.357234Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:06.755140Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:06.767704Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:07.129274Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:07.140984Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:07.489106Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:07.501662Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:07.843293Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:07.855791Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:08.232867Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:08.244574Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:08.632670Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:08.649046Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:09.106017Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:09.120377Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:09.496913Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:09.508900Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:09.892926Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:09.906515Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:10.297045Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:10.309100Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:10.692147Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:10.704068Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:11.097855Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:11.110020Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:11.482625Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:11.495146Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:11.877905Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:11.901548Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:12.261595Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:12.274900Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:12.635581Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:12.647781Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:13.054440Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:13.066853Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:13.439830Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [720575940379 ... 06-25T14:31:48.413293Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:48.886814Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:48.905223Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:49.342151Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:49.359464Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:49.776631Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:49.793712Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:50.228720Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:50.251522Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:50.656995Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:50.673333Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:51.168684Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:51.180784Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:51.592575Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:51.606036Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:51.992745Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:52.014344Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:52.464683Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:52.485195Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:52.920715Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:52.941337Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:53.410438Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:53.425192Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:53.836655Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:53.851320Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:54.256666Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:54.270280Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:54.689967Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:54.709162Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:55.100166Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:55.117127Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:55.643298Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_timeout.cpp:30: [72057594037927937] TTxSemaphoreTimeout::Execute (session=2, semaphore=1) 2025-06-25T14:31:55.643398Z node 4 :KESUS_TABLET DEBUG: tablet_db.cpp:124: [72057594037927937] Deleting session 2 / semaphore 1 "Lock1" waiter link 2025-06-25T14:31:55.658189Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_timeout.cpp:71: [72057594037927937] TTxSemaphoreTimeout::Complete (session=2, semaphore=1) 2025-06-25T14:31:55.685125Z node 4 :KESUS_TABLET DEBUG: tx_sessions_describe.cpp:23: [72057594037927937] TTxSessionsDescribe::Execute (sender=[4:597:2533], cookie=11291845980227402547) 2025-06-25T14:31:55.685256Z node 4 :KESUS_TABLET DEBUG: tx_sessions_describe.cpp:48: [72057594037927937] TTxSessionsDescribe::Complete (sender=[4:597:2533], cookie=11291845980227402547) 2025-06-25T14:31:55.685721Z node 4 :KESUS_TABLET DEBUG: tx_sessions_describe.cpp:23: [72057594037927937] TTxSessionsDescribe::Execute (sender=[4:600:2536], cookie=4593297328461359286) 2025-06-25T14:31:55.685784Z node 4 :KESUS_TABLET DEBUG: tx_sessions_describe.cpp:48: [72057594037927937] TTxSessionsDescribe::Complete (sender=[4:600:2536], cookie=4593297328461359286) 2025-06-25T14:31:55.686214Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[4:603:2539], cookie=4371924713979734273, name="Lock1") 2025-06-25T14:31:55.686277Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[4:603:2539], cookie=4371924713979734273) 2025-06-25T14:31:55.686705Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[4:606:2542], cookie=3733744035307264290, name="Lock1") 2025-06-25T14:31:55.686758Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[4:606:2542], cookie=3733744035307264290) 2025-06-25T14:31:56.309629Z node 5 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-25T14:31:56.309757Z node 5 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-25T14:31:56.328421Z node 5 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-25T14:31:56.328551Z node 5 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-25T14:31:56.363768Z node 5 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-25T14:31:56.364268Z node 5 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[5:136:2160], cookie=14112158635039759489, session=0, seqNo=0) 2025-06-25T14:31:56.364459Z node 5 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 1 2025-06-25T14:31:56.383681Z node 5 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[5:136:2160], cookie=14112158635039759489, session=1) 2025-06-25T14:31:56.392636Z node 5 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[5:136:2160], cookie=12222215313584351328, session=0, seqNo=0) 2025-06-25T14:31:56.392811Z node 5 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 2 2025-06-25T14:31:56.406312Z node 5 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[5:136:2160], cookie=12222215313584351328, session=2) 2025-06-25T14:31:56.406632Z node 5 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[5:136:2160], cookie=8676218781283029963, session=0, seqNo=0) 2025-06-25T14:31:56.406767Z node 5 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 3 2025-06-25T14:31:56.422826Z node 5 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[5:136:2160], cookie=8676218781283029963, session=3) 2025-06-25T14:31:56.423472Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:32: [72057594037927937] TTxSemaphoreCreate::Execute (sender=[5:149:2171], cookie=6172835733272524001, name="Sem1", limit=3) 2025-06-25T14:31:56.423621Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:104: [72057594037927937] Created new semaphore 1 "Sem1" 2025-06-25T14:31:56.435788Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:112: [72057594037927937] TTxSemaphoreCreate::Complete (sender=[5:149:2171], cookie=6172835733272524001) 2025-06-25T14:31:56.437690Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[5:136:2160], cookie=111, session=1, semaphore="Sem1" count=2) 2025-06-25T14:31:56.437878Z node 5 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 1 "Sem1" queue: next order #1 session 1 2025-06-25T14:31:56.438147Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[5:136:2160], cookie=222, session=2, semaphore="Sem1" count=2) 2025-06-25T14:31:56.438354Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[5:136:2160], cookie=333, session=3, semaphore="Sem1" count=1) 2025-06-25T14:31:56.450481Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[5:136:2160], cookie=111) 2025-06-25T14:31:56.450560Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[5:136:2160], cookie=222) 2025-06-25T14:31:56.450588Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[5:136:2160], cookie=333) 2025-06-25T14:31:56.451128Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[5:158:2180], cookie=843292615355905664, name="Sem1") 2025-06-25T14:31:56.451216Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[5:158:2180], cookie=843292615355905664) 2025-06-25T14:31:56.451616Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[5:161:2183], cookie=8578822616627074585, name="Sem1") 2025-06-25T14:31:56.451683Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[5:161:2183], cookie=8578822616627074585) 2025-06-25T14:31:56.451915Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_release.cpp:37: [72057594037927937] TTxSemaphoreRelease::Execute (sender=[5:136:2160], cookie=444, name="Sem1") 2025-06-25T14:31:56.452015Z node 5 :KESUS_TABLET DEBUG: tablet_db.cpp:98: [72057594037927937] Deleting session 1 / semaphore 1 "Sem1" owner link 2025-06-25T14:31:56.452099Z node 5 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 1 "Sem1" queue: next order #2 session 2 2025-06-25T14:31:56.452146Z node 5 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 1 "Sem1" queue: next order #3 session 3 2025-06-25T14:31:56.465874Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_release.cpp:93: [72057594037927937] TTxSemaphoreRelease::Complete (sender=[5:136:2160], cookie=444) 2025-06-25T14:31:56.466529Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[5:166:2188], cookie=13905699184949646961, name="Sem1") 2025-06-25T14:31:56.466632Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[5:166:2188], cookie=13905699184949646961) 2025-06-25T14:31:56.467067Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[5:169:2191], cookie=15553411954214444091, name="Sem1") 2025-06-25T14:31:56.467137Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[5:169:2191], cookie=15553411954214444091) >> KqpIndexes::CheckUpsertNonEquatableType+NotNull |78.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/runtime/ut/ydb-core-kqp-runtime-ut |78.1%| [LD] {RESULT} $(B)/ydb/core/kqp/runtime/ut/ydb-core-kqp-runtime-ut |78.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/runtime/ut/ydb-core-kqp-runtime-ut >> TGRpcCmsTest::SimpleTenantsTestSyncOperation [GOOD] >> test_ttl.py::TestTTL::test_ttl[table_DyNumber_1_UNIQUE_SYNC-pk_types17-all_types17-index17-DyNumber-UNIQUE-SYNC] [FAIL] >> KqpUniqueIndex::InsertFkPkOverlap >> TGRpcCmsTest::DisabledTxTest [GOOD] >> TKesusTest::TestSessionTimeoutAfterReboot [GOOD] >> TKesusTest::TestSessionStealingSameKey >> TPQCDTest::TestCloudClientsAreConsistentlyDistributed [GOOD] >> TPQTest::TestAlreadyWritten [GOOD] >> TPQTest::TestAlreadyWrittenWithoutDeduplication [GOOD] >> TPQTest::TestChangeConfig >> TKesusTest::TestSessionStealingSameKey [GOOD] >> TKesusTest::TestSessionStealingDifferentKey >> KqpPrefixedVectorIndexes::OrderByCosineLevel2-Nullable-UseSimilarity >> TSequence::CopyTableWithSequence [GOOD] >> TSequence::AlterSequence >> TListAllTopicsTests::ListLimitAndPaging [GOOD] >> TMeteringSink::FlushPutEventsV1 [GOOD] >> TMeteringSink::FlushResourcesReservedV1 [GOOD] >> TMeteringSink::FlushStorageV1 [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/services/cms/ut/unittest >> TGRpcCmsTest::SimpleTenantsTestSyncOperation [GOOD] Test command err: 2025-06-25T14:31:49.732985Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519894642436287879:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:31:49.740197Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0013c3/r3tmp/tmpu8i4uY/pdisk_1.dat 2025-06-25T14:31:50.416139Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:31:50.428177Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:31:50.428274Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:31:50.442312Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 4448, node 1 2025-06-25T14:31:50.834844Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:31:50.909040Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:31:50.909063Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:31:50.909074Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:31:50.909190Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:22976 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:31:51.499337Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:31:51.631321Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285120, Sender [1:7519894651026223194:2274], Recipient [1:7519894646731255585:2197]: NKikimr::NConsole::TEvConsole::TEvCreateTenantRequest { Request { operation_params { operation_mode: SYNC } path: "/Root/users/user-1" resources { storage_units { unit_kind: "hdd" count: 1 } } } UserToken: "" PeerName: "ipv6:[::1]:51980" } 2025-06-25T14:31:51.631360Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:963: StateWork, processing event TEvConsole::TEvCreateTenantRequest 2025-06-25T14:31:51.631386Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-25T14:31:51.631396Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-25T14:31:51.631494Z node 1 :CMS_TENANTS DEBUG: console__create_tenant.cpp:71: TTxCreateTenant: Request { operation_params { operation_mode: SYNC } path: "/Root/users/user-1" resources { storage_units { unit_kind: "hdd" count: 1 } } } UserToken: "" PeerName: "ipv6:[::1]:51980" 2025-06-25T14:31:51.631627Z node 1 :CMS_TENANTS DEBUG: console__create_tenant.cpp:365: Add tenant /Root/users/user-1 (txid = 1750861911631504) 2025-06-25T14:31:51.632151Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2577: Add tenant /Root/users/user-1 to database state=CREATING_POOLS coordinators=3 mediators=3 planresolution=10 timecastbucketspermediator=2 issue= txid=1750861911631504 subdomainversion=1 confirmedsubdomain=0 attrs= generation=1 errorcode=STATUS_CODE_UNSPECIFIED isExternalSubDomain=1 isExternalHive=1 isExternalSysViewProcessor=1 isExternalStatisticsAggregator=1 areResourcesShared=0 sharedDomainId= 2025-06-25T14:31:51.632304Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2637: Add tenant pool /Root/users/user-1:hdd to database kind=hdd config=BoxId: 999 StoragePoolId: 4 Name: "/Root/users/user-1:hdd" ErasureSpecies: "none" VDiskKind: "Default" Kind: "hdd" NumGroups: 1 PDiskFilter { Property { Type: ROT } } allocatednumgroups=0 state=NOT_ALLOCATED 2025-06-25T14:31:51.650873Z node 1 :CMS_TENANTS DEBUG: console__create_tenant.cpp:375: TTxCreateTenant Complete 2025-06-25T14:31:51.651380Z node 1 :CMS_TENANTS TRACE: console__create_tenant.cpp:383: Send: NKikimr::NConsole::TEvConsole::TEvCreateTenantResponse { Response { operation { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1750861911631504&action=1" } } } 2025-06-25T14:31:51.651547Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-25T14:31:51.651612Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:158: TPoolManip(/Root/users/user-1:hdd) Bootstrap 2025-06-25T14:31:51.651743Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:117: TPoolManip(/Root/users/user-1:hdd) read pool state: Request { Command { ReadStoragePool { BoxId: 999 Name: "/Root/users/user-1:hdd" } } } 2025-06-25T14:31:51.652126Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:198: TPoolManip(/Root/users/user-1:hdd) got read response: Status { Success: true } Success: true ConfigTxSeqNo: 5 2025-06-25T14:31:51.652237Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:131: TPoolManip(/Root/users/user-1:hdd) send pool request: Request { Command { DefineStoragePool { BoxId: 999 StoragePoolId: 4 Name: "/Root/users/user-1:hdd" ErasureSpecies: "none" VDiskKind: "Default" Kind: "hdd" NumGroups: 1 PDiskFilter { Property { Type: ROT } } } } } 2025-06-25T14:31:51.653224Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285139, Sender [1:7519894651026223194:2274], Recipient [1:7519894646731255585:2197]: NKikimr::NConsole::TEvConsole::TEvNotifyOperationCompletionRequest { Request { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1750861911631504&action=1" } UserToken: "" PeerName: "ipv6:[::1]:51980" } 2025-06-25T14:31:51.653278Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:968: StateWork, processing event TEvConsole::TEvNotifyOperationCompletionRequest 2025-06-25T14:31:51.653486Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:3443: Add subscription to /Root/users/user-1 for [1:7519894651026223194:2274] 2025-06-25T14:31:51.653618Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3451: Send TEvConsole::TEvNotifyOperationCompletionResponse: Response { operation { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1750861911631504&action=1" } } 2025-06-25T14:31:51.662450Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:244: TPoolManip(/Root/users/user-1:hdd) got config response: Status { Success: true } Success: true ConfigTxSeqNo: 6 2025-06-25T14:31:51.662523Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:168: TPoolManip(/Root/users/user-1:hdd) reply with NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvPoolAllocated 2025-06-25T14:31:51.662596Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 2146435079, Sender [1:7519894651026223199:2197], Recipient [1:7519894646731255585:2197]: NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvPoolAllocated 2025-06-25T14:31:51.662611Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:972: StateWork, processing event TEvPrivate::TEvPoolAllocated 2025-06-25T14:31:51.662621Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-25T14:31:51.662641Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-25T14:31:51.662677Z node 1 :CMS_TENANTS DEBUG: console__update_pool_state.cpp:28: TTxUpdatePoolState for pool /Root/users/user-1:hdd of /Root/users/user-1 state=ALLOCATED 2025-06-25T14:31:51.662692Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3047: Update pool state in database for /Root/users/user-1:hdd state=ALLOCATED allocatednumgroups=1 2025-06-25T14:31:51.662756Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3206: Update subdomain version in database for /Root/users/user-1 subdomainversion=2 2025-06-25T14:31:51.670470Z node 1 :CMS_TENANTS DEBUG: console__update_pool_state.cpp:73: TTxUpdatePoolState complete for /Root/users/user-1:hdd 2025-06-25T14:31:51.670502Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-25T14:31:51.670530Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-25T14:31:51.670547Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-25T14:31:51.670603Z node 1 :CMS_TENANTS DEBUG: console__update_tenant_state.cpp:23: TTxUpdateTenantState for tenant /Root/users/user-1 to CREATING_SUBDOMAIN 2025-06-25T14:31:51.670620Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3146: Update tenant state in database for /Root/users/user-1 state=CREATING_SUBDOMAIN txid=1750861911631504 errorcode=STATUS_CODE_UNSPECIFIED issue= 2025-06-25T14:31:51.677856Z node 1 :CMS_TENANTS DEBUG: console__update_tenant_state.cpp:45: TTxUpdateTenantState complete for /Root/users/user-1 2025-06-25T14:31:51.678031Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-25T14:31:51.678064Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:784: TSubdomainManip(/Root/users/user-1)::Bootstrap 2025-06-25T14:31:51.678073Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:596: TSubDomainManip(/Root/users/user-1) create subdomain 2025-06-25T14:31:51.689474Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:620: TSubdomainManip(/Root/users/user-1) send subdomain creation cmd: NKikimrTxUserProxy.TEvProposeTransaction Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateExtSubDomain SubDomain { Name: "users/user-1" ExternalSchemeShard: true ExternalHive: true ExternalSysViewProcessor: true ExternalStatisticsAggregator: true GraphShard: true } } } ExecTimeoutPeriod: 18446744073709551615 DatabaseName: "Root" 2025-06-25T14:31:51.690900Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715658:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-25T14:31:51.702617Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:8 ... 3.276933Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-25T14:31:53.276941Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-25T14:31:53.276972Z node 1 :CMS_TENANTS DEBUG: console__remove_computational_units.cpp:20: TTxRemoveComputationalUnits Execute /Root/users/user-1 2025-06-25T14:31:53.276988Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3146: Update tenant state in database for /Root/users/user-1 state=REMOVING_UNITS txid=1750861913082057 errorcode=STATUS_CODE_UNSPECIFIED issue= 2025-06-25T14:31:53.277046Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2927: Remove computational units of /Root/users/user-1 from database txid=1750861913082057 issue= 2025-06-25T14:31:53.281224Z node 3 :HIVE WARN: tx__delete_tablet.cpp:88: HIVE#72075186224037888 THive::TTxDeleteTablet tablet (72057594046644480,1) wasn't found - using supplied 72075186224037888 2025-06-25T14:31:53.285052Z node 1 :CMS_TENANTS DEBUG: console__remove_computational_units.cpp:34: TTxRemoveComputationalUnits Complete /Root/users/user-1 2025-06-25T14:31:53.285125Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2114: Send TEvTenantSlotBroker::TEvAlterTenant: TenantName: "/Root/users/user-1" 2025-06-25T14:31:53.285142Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-25T14:31:53.285304Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273154052, Sender [1:7519894646731255464:2196], Recipient [1:7519894646731255585:2197]: NKikimrTenantSlotBroker.TTenantState TenantName: "/Root/users/user-1" 2025-06-25T14:31:53.285318Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:984: StateWork, processing event TEvTenantSlotBroker::TEvTenantState 2025-06-25T14:31:53.285329Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-25T14:31:53.285335Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-25T14:31:53.285358Z node 1 :CMS_TENANTS DEBUG: console__update_tenant_state.cpp:23: TTxUpdateTenantState for tenant /Root/users/user-1 to REMOVING_POOLS 2025-06-25T14:31:53.285376Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3146: Update tenant state in database for /Root/users/user-1 state=REMOVING_POOLS txid=1750861913082057 errorcode=STATUS_CODE_UNSPECIFIED issue= 2025-06-25T14:31:53.291703Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__delete_tablet_reply.cpp:39: Got DeleteTabletReply with Forward response from Hive 72075186224037888 to Hive 72057594037968897 shardIdx 72057594046644480:1 2025-06-25T14:31:53.302019Z node 1 :CMS_TENANTS DEBUG: console__update_tenant_state.cpp:45: TTxUpdateTenantState complete for /Root/users/user-1 2025-06-25T14:31:53.302070Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-25T14:31:53.302102Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:158: TPoolManip(/Root/users/user-1:hdd) Bootstrap 2025-06-25T14:31:53.302246Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:117: TPoolManip(/Root/users/user-1:hdd) read pool state: Request { Command { ReadStoragePool { BoxId: 999 Name: "/Root/users/user-1:hdd" } } } 2025-06-25T14:31:53.302733Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:198: TPoolManip(/Root/users/user-1:hdd) got read response: Status { Success: true StoragePool { BoxId: 999 StoragePoolId: 4 Name: "/Root/users/user-1:hdd" ErasureSpecies: "none" Geometry { } VDiskKind: "Default" Kind: "hdd" NumGroups: 2 PDiskFilter { Property { Type: ROT } } ScopeId { X1: 72057594046644480 X2: 3 } ItemConfigGeneration: 3 } } Success: true ConfigTxSeqNo: 13 2025-06-25T14:31:53.300093Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72075186224037888 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037897 not found 2025-06-25T14:31:53.300141Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72075186224037888 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037890 not found 2025-06-25T14:31:53.300156Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72075186224037888 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037893 not found 2025-06-25T14:31:53.300166Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72075186224037888 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037894 not found 2025-06-25T14:31:53.300190Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72075186224037888 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037891 not found 2025-06-25T14:31:53.300203Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72075186224037888 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037892 not found 2025-06-25T14:31:53.300214Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72075186224037888 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037895 not found 2025-06-25T14:31:53.300226Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72075186224037888 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037889 not found 2025-06-25T14:31:53.300246Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72075186224037888 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037896 not found 2025-06-25T14:31:53.305782Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:151: TPoolManip(/Root/users/user-1:hdd) send pool request: Request { Command { DeleteStoragePool { BoxId: 999 StoragePoolId: 4 ItemConfigGeneration: 3 } } } 2025-06-25T14:31:53.323102Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037888 not found 2025-06-25T14:31:53.341122Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:306: TPoolManip(/Root/users/user-1:hdd) got config response: Status { Success: true } Success: true ConfigTxSeqNo: 14 2025-06-25T14:31:53.341210Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 2146435081, Sender [1:7519894659616158532:2197], Recipient [1:7519894646731255585:2197]: NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvPoolDeleted 2025-06-25T14:31:53.341237Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:973: StateWork, processing event TEvPrivate::TEvPoolDeleted 2025-06-25T14:31:53.341249Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-25T14:31:53.341256Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-25T14:31:53.341293Z node 1 :CMS_TENANTS DEBUG: console__update_pool_state.cpp:28: TTxUpdatePoolState for pool /Root/users/user-1:hdd of /Root/users/user-1 state=DELETED 2025-06-25T14:31:53.341308Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3047: Update pool state in database for /Root/users/user-1:hdd state=DELETED allocatednumgroups=0 2025-06-25T14:31:53.417980Z node 1 :BS_PDISK ERROR: {BPD01@blobstorage_pdisk_impl.cpp:2989} PDiskId# 1 request from unregistered ownerId# 5 error in TLogWrite for ownerId# 5 ownerRound# 5 lsn# 185 PDiskId# 1 2025-06-25T14:31:53.418039Z node 1 :BS_PDISK ERROR: {BPD01@blobstorage_pdisk_impl.cpp:2989} PDiskId# 1 request from unregistered ownerId# 5 error in TLogWrite for ownerId# 5 ownerRound# 5 lsn# 186 PDiskId# 1 2025-06-25T14:31:53.418069Z node 1 :BS_PDISK ERROR: {BPD01@blobstorage_pdisk_impl.cpp:2989} PDiskId# 1 request from unregistered ownerId# 5 error in TLogWrite for ownerId# 5 ownerRound# 5 lsn# 187 PDiskId# 1 2025-06-25T14:31:53.418082Z node 1 :BS_PDISK ERROR: {BPD01@blobstorage_pdisk_impl.cpp:2989} PDiskId# 1 request from unregistered ownerId# 5 error in TLogWrite for ownerId# 5 ownerRound# 5 lsn# 188 PDiskId# 1 2025-06-25T14:31:53.418103Z node 1 :BS_PDISK ERROR: {BPD01@blobstorage_pdisk_impl.cpp:2989} PDiskId# 1 request from unregistered ownerId# 5 error in TLogWrite for ownerId# 5 ownerRound# 5 lsn# 189 PDiskId# 1 2025-06-25T14:31:53.418119Z node 1 :BS_PDISK ERROR: {BPD01@blobstorage_pdisk_impl.cpp:2989} PDiskId# 1 request from unregistered ownerId# 5 error in TLogWrite for ownerId# 5 ownerRound# 5 lsn# 190 PDiskId# 1 2025-06-25T14:31:53.418132Z node 1 :BS_PDISK ERROR: {BPD01@blobstorage_pdisk_impl.cpp:2989} PDiskId# 1 request from unregistered ownerId# 5 error in TLogWrite for ownerId# 5 ownerRound# 5 lsn# 191 PDiskId# 1 2025-06-25T14:31:53.441759Z node 1 :CMS_TENANTS DEBUG: console__update_pool_state.cpp:73: TTxUpdatePoolState complete for /Root/users/user-1:hdd 2025-06-25T14:31:53.441797Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-25T14:31:53.441803Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-25T14:31:53.441809Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-25T14:31:53.441848Z node 1 :CMS_TENANTS DEBUG: console__remove_tenant_done.cpp:22: TTxRemoveTenantDone for tenant /Root/users/user-1 txid=1750861913082057 2025-06-25T14:31:53.441867Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2927: Remove computational units of /Root/users/user-1 from database txid=1750861913082057 issue= 2025-06-25T14:31:53.441875Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2958: Remove tenant /Root/users/user-1 from database txid=1750861913082057 issue= 2025-06-25T14:31:53.442495Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2963: Remove pool /Root/users/user-1:hdd from database 2025-06-25T14:31:53.448451Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3083: Add tenant removal info for /Root/users/user-1 txid=1750861913082057 code=SUCCESS errorcode=STATUS_CODE_UNSPECIFIED issue= 2025-06-25T14:31:53.476976Z node 1 :CMS_TENANTS DEBUG: console__remove_tenant_done.cpp:34: TTxRemoveTenantDone Complete 2025-06-25T14:31:53.477179Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2431: Send /Root/users/user-1 notification to [1:7519894659616158433:2360]: Response { operation { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1750861913082057&action=2" ready: true status: SUCCESS } } 2025-06-25T14:31:53.477242Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-25T14:31:53.593399Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285122, Sender [1:7519894659616158582:2364], Recipient [1:7519894646731255585:2197]: NKikimr::NConsole::TEvConsole::TEvGetTenantStatusRequest { Request { path: "/Root/users/user-1" } UserToken: "" PeerName: "ipv6:[::1]:51980" } 2025-06-25T14:31:53.593431Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:966: StateWork, processing event TEvConsole::TEvGetTenantStatusRequest 2025-06-25T14:31:53.593543Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3377: Send TEvConsole::TEvGetTenantStatusResponse: Response { operation { ready: true status: NOT_FOUND issues { message: "Unknown tenant /Root/users/user-1" severity: 1 } } } 2025-06-25T14:31:53.596836Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285123, Sender [1:7519894659616158585:2365], Recipient [1:7519894646731255585:2197]: NKikimr::NConsole::TEvConsole::TEvListTenantsRequest { Request { } UserToken: "" PeerName: "ipv6:[::1]:51980" } 2025-06-25T14:31:53.596876Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:967: StateWork, processing event TEvConsole::TEvListTenantsRequest 2025-06-25T14:31:53.597051Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3421: Send TEvConsole::TEvListTenantsResponse: Response { operation { ready: true status: SUCCESS result { [type.googleapis.com/Ydb.Cms.ListDatabasesResult] { } } } } 2025-06-25T14:31:53.611928Z node 1 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 3 2025-06-25T14:31:53.612101Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connected -> Disconnected >> TKesusTest::TestSessionStealingDifferentKey [GOOD] >> TestProtocols::TestResolveProtocol ------- [TM] {asan, default-linux-x86_64, release} ydb/services/cms/ut/unittest >> TGRpcCmsTest::DisabledTxTest [GOOD] Test command err: test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0013b9/r3tmp/tmp9T9xAY/pdisk_1.dat 2025-06-25T14:31:52.039041Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519894652718483642:2177];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:31:52.050822Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:31:52.880946Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:31:52.937513Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:31:52.937603Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:31:52.972140Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:31:53.032855Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TServer::EnableGrpc on GrpcPort 18733, node 1 2025-06-25T14:31:53.229613Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:31:53.229636Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:31:53.229641Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:31:53.229740Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:62766 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:31:53.741524Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:31:53.912867Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateSubDomain, opId: 281474976715658:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_subdomain.cpp:259) 2025-06-25T14:31:54.099743Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) >> TestProtocols::TestResolveProtocol [GOOD] >> TestProtocols::TestHTTPCollectedVerySlow ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kesus/tablet/ut/unittest >> TKesusTest::TestSessionStealingDifferentKey [GOOD] Test command err: 2025-06-25T14:31:04.588419Z node 1 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-25T14:31:04.588549Z node 1 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-25T14:31:04.603198Z node 1 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-25T14:31:04.603297Z node 1 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-25T14:31:04.628038Z node 1 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-25T14:31:04.628528Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[1:136:2160], cookie=16308933891191733753, session=0, seqNo=0) 2025-06-25T14:31:04.628702Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 1 2025-06-25T14:31:04.640908Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[1:136:2160], cookie=16308933891191733753, session=1) 2025-06-25T14:31:04.641760Z node 1 :KESUS_TABLET DEBUG: tx_session_detach.cpp:100: [72057594037927937] Fast-path detach session=1 from sender=[1:136:2160], cookie=13703117365615661314 2025-06-25T14:31:04.642180Z node 1 :KESUS_TABLET DEBUG: tx_sessions_describe.cpp:23: [72057594037927937] TTxSessionsDescribe::Execute (sender=[1:149:2171], cookie=13893415516637662759) 2025-06-25T14:31:04.642274Z node 1 :KESUS_TABLET DEBUG: tx_sessions_describe.cpp:48: [72057594037927937] TTxSessionsDescribe::Complete (sender=[1:149:2171], cookie=13893415516637662759) 2025-06-25T14:31:05.108668Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:05.121164Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:05.497950Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:05.510306Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:05.907286Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:05.924973Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:06.313180Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:06.330384Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:06.748028Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:06.761052Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:07.116764Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:07.128334Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:07.470102Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:07.482099Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:07.836921Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:07.850954Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:08.232768Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:08.252928Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:08.656792Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:08.668948Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:09.040616Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:09.060882Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:09.430343Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:09.442403Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:09.803113Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:09.817457Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:10.203711Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:10.217956Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:10.659529Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:10.677651Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:11.051603Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:11.063314Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:11.425571Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:11.437505Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:11.799951Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:11.812249Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:12.177351Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:12.189458Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:12.571342Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:12.583262Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:12.950059Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:12.965715Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:13.342339Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:13.354516Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:13.756017Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:13.769192Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:14.133693Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:14.148917Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:14.519030Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:14.531766Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:14.912606Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:14.924849Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:15.292747Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:15.309108Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:15.680281Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:15.692826Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:16.069295Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:16.081726Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:16.487587Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:16.505257Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:16.874777Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:16.887459Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:17.274434Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:17.286747Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:17.643925Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:17.656567Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:18.037905Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:18.050298Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:18.448061Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:18.461011Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:18.848015Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:18.862505Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:19.240604Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:19.253065Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:19.606473Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:19.618674Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:19.995422Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:20.007652Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:20.403946Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:20.418421Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:20.780813Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:20.792975Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:21.166454Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:21.178667Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:21.545637Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:21.563854Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfChec ... UG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:42.913446Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:43.336542Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:43.360970Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:43.764644Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:43.781711Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:44.200658Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:44.217224Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:44.652635Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:44.672935Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:45.112915Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:45.133142Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:45.527277Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:45.539540Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:45.956003Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:45.973472Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:46.378452Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:46.402485Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:46.826885Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:46.848921Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:47.264607Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:47.284955Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:47.708709Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:47.728230Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:48.171204Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:48.186625Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:48.588615Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:48.608951Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:49.005103Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:49.019620Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:49.436047Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:49.449818Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:49.862982Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:49.885071Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:50.304728Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:50.320126Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:50.709553Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:50.726617Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:51.131180Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:51.146228Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:51.556563Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:51.572667Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:52.006792Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:52.029026Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:52.428195Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:52.443266Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:52.836564Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:52.855185Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:53.252774Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:53.273055Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:53.808716Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:53.829262Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:54.244710Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:54.264988Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:54.676524Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:54.690418Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:55.124912Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:55.141963Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:55.559110Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:55.577787Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:56.004239Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:56.025492Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:56.425600Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:56.445176Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:56.869345Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:56.885113Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:57.301641Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:57.316232Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:57.744200Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:31:57.756624Z node 2 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:31:58.113664Z node 2 :KESUS_TABLET DEBUG: tx_session_timeout.cpp:27: [72057594037927937] TTxSessionTimeout::Execute (session=1) 2025-06-25T14:31:58.113766Z node 2 :KESUS_TABLET DEBUG: tablet_db.cpp:32: [72057594037927937] Deleting session 1 2025-06-25T14:31:58.136574Z node 2 :KESUS_TABLET DEBUG: tx_session_timeout.cpp:56: [72057594037927937] TTxSessionTimeout::Complete (session=1) 2025-06-25T14:31:58.149420Z node 2 :KESUS_TABLET DEBUG: tx_sessions_describe.cpp:23: [72057594037927937] TTxSessionsDescribe::Execute (sender=[2:643:2568], cookie=2952601249433820771) 2025-06-25T14:31:58.149532Z node 2 :KESUS_TABLET DEBUG: tx_sessions_describe.cpp:48: [72057594037927937] TTxSessionsDescribe::Complete (sender=[2:643:2568], cookie=2952601249433820771) 2025-06-25T14:31:58.701058Z node 3 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-25T14:31:58.701173Z node 3 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-25T14:31:58.720615Z node 3 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-25T14:31:58.721178Z node 3 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-25T14:31:58.749491Z node 3 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-25T14:31:58.750497Z node 3 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[3:136:2160], cookie=12345, session=0, seqNo=0) 2025-06-25T14:31:58.750669Z node 3 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 1 2025-06-25T14:31:58.777044Z node 3 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[3:136:2160], cookie=12345, session=1) 2025-06-25T14:31:58.777910Z node 3 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[3:143:2165], cookie=23456, session=1, seqNo=0) 2025-06-25T14:31:58.797107Z node 3 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[3:143:2165], cookie=23456, session=1) 2025-06-25T14:31:59.457554Z node 4 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-25T14:31:59.457668Z node 4 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-25T14:31:59.475946Z node 4 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-25T14:31:59.476072Z node 4 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-25T14:31:59.491348Z node 4 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-25T14:31:59.492336Z node 4 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[4:134:2158], cookie=12345, session=0, seqNo=0) 2025-06-25T14:31:59.492504Z node 4 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 1 2025-06-25T14:31:59.518676Z node 4 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[4:134:2158], cookie=12345, session=1) 2025-06-25T14:31:59.519516Z node 4 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[4:140:2163], cookie=23456, session=1, seqNo=0) 2025-06-25T14:31:59.538065Z node 4 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[4:140:2163], cookie=23456, session=1) ------- [TM] {asan, default-linux-x86_64, release} ydb/services/persqueue_cluster_discovery/ut/unittest >> TPQCDTest::TestCloudClientsAreConsistentlyDistributed [GOOD] Test command err: 2025-06-25T14:31:51.543494Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519894649389827829:2225];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:31:51.543776Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000c2e/r3tmp/tmpZGUHCo/pdisk_1.dat 2025-06-25T14:31:52.250837Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519894649389827638:2080] 1750861911510575 != 1750861911510578 2025-06-25T14:31:52.314957Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:31:52.315042Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:31:52.318010Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:31:52.328560Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 5633, node 1 2025-06-25T14:31:52.531163Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:31:52.552829Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/yft8/000c2e/r3tmp/yandexHDZeBu.tmp 2025-06-25T14:31:52.552855Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/yft8/000c2e/r3tmp/yandexHDZeBu.tmp 2025-06-25T14:31:52.553005Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/yft8/000c2e/r3tmp/yandexHDZeBu.tmp 2025-06-25T14:31:52.553116Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11124 PQClient connected to localhost:5633 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:31:53.227471Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:31:53.256743Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:31:53.277499Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:31:53.295524Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710659, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:31:55.618786Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894666569697532:2295], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:31:55.619226Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:31:55.628482Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894666569697545:2299], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:31:55.632817Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710661:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:31:55.662051Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710661, at schemeshard: 72057594046644480 2025-06-25T14:31:55.662758Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519894666569697547:2300], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710661 completed, doublechecking } 2025-06-25T14:31:55.952679Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519894666569697612:2391] txid# 281474976710662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:31:55.985786Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:31:56.342322Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:31:56.343509Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519894666569697621:2306], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:5:17: Error: At function: KiReadTable!
:5:17: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Versions]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:31:56.344902Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=NTBmNTdiMjktMWM5YmZkMTItZmYwZTFmY2MtMzNlYjhkMjk=, ActorId: [1:7519894666569697530:2294], ActorState: ExecuteState, TraceId: 01jykr19f02cgyw05p25wfyfeh, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:31:56.346716Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 5 column: 17 } message: "At function: KiReadTable!" end_position { row: 5 column: 17 } severity: 1 issues { position { row: 5 column: 17 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Versions]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 5 column: 17 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-25T14:31:56.510868Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:31:56.536980Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519894649389827829:2225];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:31:56.537114Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); 2025-06-25T14:31:56.836893Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710666. Ctx: { TraceId: 01jykr1aee6586y5hcrj0na432, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MjAwZjQxZDItNzhlNjhkOWEtMTdmY2UwYjEtNDk1MzE0MWY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/unittest >> TMeteringSink::FlushStorageV1 [GOOD] Test command err: 2025-06-25T14:31:27.151491Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519894547803150704:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:31:27.151576Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000b1c/r3tmp/tmpahUIeS/pdisk_1.dat 2025-06-25T14:31:27.337788Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-25T14:31:27.487655Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519894547803150670:2080] 1750861887146269 != 1750861887146272 2025-06-25T14:31:27.487937Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 25219, node 1 2025-06-25T14:31:27.541311Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:31:27.541453Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:31:27.543660Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:31:27.547338Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/yft8/000b1c/r3tmp/yandexEtNpQS.tmp 2025-06-25T14:31:27.547366Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/yft8/000b1c/r3tmp/yandexEtNpQS.tmp 2025-06-25T14:31:27.547644Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/yft8/000b1c/r3tmp/yandexEtNpQS.tmp 2025-06-25T14:31:27.547801Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:31:27.598289Z INFO: TTestServer started on Port 18685 GrpcPort 25219 TClient is connected to server localhost:18685 PQClient connected to localhost:25219 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:31:27.905822Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-25T14:31:27.950869Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... waiting... 2025-06-25T14:31:28.158606Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:31:29.751790Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894556393086052:2300], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:31:29.751844Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894556393086044:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:31:29.752213Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:31:29.756072Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:31:29.766486Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519894556393086066:2302], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715662 completed, doublechecking } 2025-06-25T14:31:29.976475Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519894556393086130:2440] txid# 281474976715663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:31:30.007630Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:31:30.040046Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:31:30.096356Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519894556393086139:2308], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:5:17: Error: At function: KiReadTable!
:5:17: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Versions]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:31:30.096636Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=NDRkN2VhNmYtM2E4MmViZDYtMzVmYTRmYy1hYzdhY2Q5Yw==, ActorId: [1:7519894556393086042:2296], ActorState: ExecuteState, TraceId: 01jykr0g6p97esx0m2t9wpn83q, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:31:30.098803Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 5 column: 17 } message: "At function: KiReadTable!" end_position { row: 5 column: 17 } severity: 1 issues { position { row: 5 column: 17 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Versions]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 5 column: 17 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-25T14:31:30.144222Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); 2025-06-25T14:31:30.364414Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715667. Ctx: { TraceId: 01jykr0gn98yf3phybrxwqhme4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NzQ5NzBhM2EtNzg4ZTdlMmUtYWYwMGEzNDctYTczNTE5ZmE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root === CheckClustersList. Subcribe to ClusterTracker from [1:7519894560688053727:2619] 2025-06-25T14:31:32.156519Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519894547803150704:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:31:32.156635Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; === CheckClustersList. Ok 2025-06-25T14:31:36.570265Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:186: new Create topic request 2025-06-25T14:31:36.571245Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:186: new Create topic request 2025-06-25T14:31:36.598787Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3114: [PQ: 72075186224037894] Handle TEvInterconnect::TEvNodeInfo 2025-06-25T14:31:36.598788Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3114: [PQ: 72075186224037892] Handle TEvInterconnect::TEvNodeInfo 2025-06-25T14:31:36.599051Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3107: [PQ: 72075186224037894] Registered with mediator time cast 2025-06-25T14:31:36.599076Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3107: [PQ: 72075186224037892] Registered with mediator time cast 2025-06-25T14:31:36.599596Z node 1 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72075186224037895][] pipe [1:7519894586457857749:2731] connected; active server actors: 1 2025-06-25T14:31:36.599609Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3146: [PQ: 72075186224037894] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-25T14:31:36.599739Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:752: [PQ: 72075186224037894] doesn't have tx info 2025-06-25T14:31:36.599765Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:764: [PQ: 72075186224037894] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-25T14:31:36.599778Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:985: [PQ: 72075186224037894] no config, start with empty partitions and default config 2025-06-25T14:31:36.599792Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4949: [PQ: ... 37896] TxId 281474976715675, NewState DELETING 2025-06-25T14:31:58.638970Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:3882: [PQ: 72075186224037896] delete key for TxId 281474976715675 2025-06-25T14:31:58.639017Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:3683: [PQ: 72075186224037896] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-06-25T14:31:58.644755Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:1241: [PQ: 72075186224037896] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-25T14:31:58.644783Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4353: [PQ: 72075186224037896] Try execute txs with state DELETING 2025-06-25T14:31:58.644799Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4398: [PQ: 72075186224037896] TxId 281474976715675, State DELETING 2025-06-25T14:31:58.644818Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4610: [PQ: 72075186224037896] delete TxId 281474976715675 2025-06-25T14:31:58.645965Z node 3 :PERSQUEUE DEBUG: transaction.cpp:374: [TxId: 281474976715676] save tx TxId: 281474976715676 State: CALCULATED MinStep: 1750861918603 MaxStep: 18446744073709551615 Step: 1750861918673 Predicate: true Kind: KIND_CONFIG TabletConfig { PartitionConfig { MaxCountInPartition: 2147483647 MaxSizeInPartition: 9223372036854775807 LifetimeSeconds: 64800 SourceIdLifetimeSeconds: 1382400 WriteSpeedInBytesPerSecond: 2097152 BurstSize: 2097152 TotalPartitions: 1 ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } SourceIdMaxCounts: 6000000 } PartitionIds: 0 TopicName: "topic3" Version: 0 RequireAuthWrite: true RequireAuthRead: true FormatVersion: 0 Codecs { } TopicPath: "/Root/dir2/topic3" YcCloudId: "" YcFolderId: "" YdbDatabaseId: "" YdbDatabasePath: "/Root" Partitions { PartitionId: 0 Status: Active CreateVersion: 1 TabletId: 0 } AllPartitions { PartitionId: 0 Status: Active CreateVersion: 1 TabletId: 0 } } BootstrapConfig { } SourceActor { RawX1: 7519894633990883668 RawX2: 12884904038 } Partitions { Partition { PartitionId: 0 } } 2025-06-25T14:31:58.646074Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:3683: [PQ: 72075186224037894] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-06-25T14:31:58.647245Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:1241: [PQ: 72075186224037894] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-25T14:31:58.647267Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4353: [PQ: 72075186224037894] Try execute txs with state CALCULATED 2025-06-25T14:31:58.647280Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4398: [PQ: 72075186224037894] TxId 281474976715676, State CALCULATED 2025-06-25T14:31:58.647297Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4345: [PQ: 72075186224037894] TxId 281474976715676 State CALCULATED FrontTxId 281474976715676 2025-06-25T14:31:58.647314Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4288: [PQ: 72075186224037894] TxId 281474976715676, NewState WAIT_RS 2025-06-25T14:31:58.647333Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4323: [PQ: 72075186224037894] TxId 281474976715676 moved from CALCULATED to WAIT_RS 2025-06-25T14:31:58.647365Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4027: [PQ: 72075186224037894] Send TEvTxProcessing::TEvReadSet to 0 receivers. Wait TEvTxProcessing::TEvReadSet from 0 senders. 2025-06-25T14:31:58.647385Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4521: [PQ: 72075186224037894] HaveParticipantsDecision 1 2025-06-25T14:31:58.647428Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4288: [PQ: 72075186224037894] TxId 281474976715676, NewState EXECUTING 2025-06-25T14:31:58.647446Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4323: [PQ: 72075186224037894] TxId 281474976715676 moved from WAIT_RS to EXECUTING 2025-06-25T14:31:58.647457Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4551: [PQ: 72075186224037894] Received 0, Expected 1 2025-06-25T14:31:58.647510Z node 3 :PERSQUEUE DEBUG: partition.cpp:1216: [PQ: 72075186224037894, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCommit Step 1750861918673, TxId 281474976715676 2025-06-25T14:31:58.647738Z node 3 :PERSQUEUE DEBUG: read.h:272: CacheProxy. Passthrough write request to KV 2025-06-25T14:31:58.648854Z node 3 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72075186224037894, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-06-25T14:31:58.648895Z node 3 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037894, Partition: 0, State: StateIdle] no data for compaction 2025-06-25T14:31:58.648972Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:3583: [PQ: 72075186224037894] Handle TEvPQ::TEvTxCommitDone Step 1750861918673, TxId 281474976715676, Partition 0 2025-06-25T14:31:58.648988Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4353: [PQ: 72075186224037894] Try execute txs with state EXECUTING 2025-06-25T14:31:58.649002Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4398: [PQ: 72075186224037894] TxId 281474976715676, State EXECUTING 2025-06-25T14:31:58.649016Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4345: [PQ: 72075186224037894] TxId 281474976715676 State EXECUTING FrontTxId 281474976715676 2025-06-25T14:31:58.649030Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4551: [PQ: 72075186224037894] Received 1, Expected 1 2025-06-25T14:31:58.649052Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4224: [PQ: 72075186224037894] TxId: 281474976715676 send TEvPersQueue::TEvProposeTransactionResult(COMPLETE) 2025-06-25T14:31:58.649067Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4555: [PQ: 72075186224037894] complete TxId 281474976715676 2025-06-25T14:31:58.649321Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:591: [PQ: 72075186224037894] Apply new config PartitionConfig { MaxCountInPartition: 2147483647 MaxSizeInPartition: 9223372036854775807 LifetimeSeconds: 64800 SourceIdLifetimeSeconds: 1382400 WriteSpeedInBytesPerSecond: 2097152 BurstSize: 2097152 TotalPartitions: 1 ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } SourceIdMaxCounts: 6000000 } PartitionIds: 0 TopicName: "topic3" Version: 0 RequireAuthWrite: true RequireAuthRead: true FormatVersion: 0 Codecs { } TopicPath: "/Root/dir2/topic3" YcCloudId: "" YcFolderId: "" YdbDatabaseId: "" YdbDatabasePath: "/Root" Partitions { PartitionId: 0 Status: Active CreateVersion: 1 TabletId: 0 } AllPartitions { PartitionId: 0 Status: Active CreateVersion: 1 TabletId: 0 } 2025-06-25T14:31:58.649376Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72075186224037894] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:31:58.649436Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4573: [PQ: 72075186224037894] delete partitions for TxId 281474976715676 2025-06-25T14:31:58.649450Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4288: [PQ: 72075186224037894] TxId 281474976715676, NewState EXECUTED 2025-06-25T14:31:58.649468Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4323: [PQ: 72075186224037894] TxId 281474976715676 moved from EXECUTING to EXECUTED 2025-06-25T14:31:58.649483Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:3866: [PQ: 72075186224037894] write key for TxId 281474976715676 2025-06-25T14:31:58.649741Z node 3 :PERSQUEUE DEBUG: transaction.cpp:374: [TxId: 281474976715676] save tx TxId: 281474976715676 State: EXECUTED MinStep: 1750861918603 MaxStep: 18446744073709551615 Step: 1750861918673 Predicate: true Kind: KIND_CONFIG TabletConfig { PartitionConfig { MaxCountInPartition: 2147483647 MaxSizeInPartition: 9223372036854775807 LifetimeSeconds: 64800 SourceIdLifetimeSeconds: 1382400 WriteSpeedInBytesPerSecond: 2097152 BurstSize: 2097152 TotalPartitions: 1 ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } SourceIdMaxCounts: 6000000 } PartitionIds: 0 TopicName: "topic3" Version: 0 RequireAuthWrite: true RequireAuthRead: true FormatVersion: 0 Codecs { } TopicPath: "/Root/dir2/topic3" YcCloudId: "" YcFolderId: "" YdbDatabaseId: "" YdbDatabasePath: "/Root" Partitions { PartitionId: 0 Status: Active CreateVersion: 1 TabletId: 0 } AllPartitions { PartitionId: 0 Status: Active CreateVersion: 1 TabletId: 0 } } BootstrapConfig { } SourceActor { RawX1: 7519894633990883668 RawX2: 12884904038 } Partitions { Partition { PartitionId: 0 } } 2025-06-25T14:31:58.649880Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:3683: [PQ: 72075186224037894] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-06-25T14:31:58.653559Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:1241: [PQ: 72075186224037894] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-25T14:31:58.653582Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4353: [PQ: 72075186224037894] Try execute txs with state EXECUTED 2025-06-25T14:31:58.653595Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4398: [PQ: 72075186224037894] TxId 281474976715676, State EXECUTED 2025-06-25T14:31:58.653610Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4345: [PQ: 72075186224037894] TxId 281474976715676 State EXECUTED FrontTxId 281474976715676 2025-06-25T14:31:58.653625Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4046: [PQ: 72075186224037894] TPersQueue::SendEvReadSetAckToSenders 2025-06-25T14:31:58.653639Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4288: [PQ: 72075186224037894] TxId 281474976715676, NewState WAIT_RS_ACKS 2025-06-25T14:31:58.653652Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4323: [PQ: 72075186224037894] TxId 281474976715676 moved from EXECUTED to WAIT_RS_ACKS 2025-06-25T14:31:58.653671Z node 3 :PERSQUEUE DEBUG: transaction.cpp:366: [TxId: 281474976715676] PredicateAcks: 0/0 2025-06-25T14:31:58.653677Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4599: [PQ: 72075186224037894] HaveAllRecipientsReceive 1, AllSupportivePartitionsHaveBeenDeleted 1 2025-06-25T14:31:58.653689Z node 3 :PERSQUEUE DEBUG: transaction.cpp:366: [TxId: 281474976715676] PredicateAcks: 0/0 2025-06-25T14:31:58.653703Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4660: [PQ: 72075186224037894] add an TxId 281474976715676 to the list for deletion 2025-06-25T14:31:58.653721Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4288: [PQ: 72075186224037894] TxId 281474976715676, NewState DELETING 2025-06-25T14:31:58.653738Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:3882: [PQ: 72075186224037894] delete key for TxId 281474976715676 2025-06-25T14:31:58.653780Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:3683: [PQ: 72075186224037894] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-06-25T14:31:58.660605Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:1241: [PQ: 72075186224037894] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-25T14:31:58.660638Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4353: [PQ: 72075186224037894] Try execute txs with state DELETING 2025-06-25T14:31:58.660654Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4398: [PQ: 72075186224037894] TxId 281474976715676, State DELETING 2025-06-25T14:31:58.660672Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4610: [PQ: 72075186224037894] delete TxId 281474976715676 >> TSequence::AlterSequence [GOOD] >> TSequence::AlterTableSetDefaultFromSequence >> TPartitionChooserSuite::TPartitionChooserActor_SplitMergeEnabled_SourceId_PartitionNotExists_Test [GOOD] >> TPartitionGraphTest::BuildGraph [GOOD] >> TPartitionTests::AfterRestart_1 >> TInterconnectTest::TestBlobEvent >> TInterconnectTest::TestBlobEvent220BytesPreSerialized >> TInterconnectTest::TestSimplePingPong >> TInterconnectTest::TestBlobEvent220BytesPreSerialized [GOOD] >> TInterconnectTest::TestBlobEventDifferentSizes >> KqpUniqueIndex::ReplaceFkPartialColumnSet [GOOD] >> KqpUniqueIndex::ReplaceFkDuplicate >> TInterconnectTest::TestBlobEvent [GOOD] >> TInterconnectTest::TestBlobEvent220Bytes |78.1%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest >> TPQTest::The_Value_Of_CreationUnixTime_Must_Not_Decrease [GOOD] |78.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/nodewarden/ut/ydb-core-blobstorage-nodewarden-ut |78.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/nodewarden/ut/ydb-core-blobstorage-nodewarden-ut |78.1%| [LD] {RESULT} $(B)/ydb/core/blobstorage/nodewarden/ut/ydb-core-blobstorage-nodewarden-ut >> TInterconnectTest::TestSimplePingPong [GOOD] >> TInterconnectTest::TestSubscribeByFlag >> TestProtocols::TestHTTPCollectedVerySlow [GOOD] >> TestProtocols::TestHTTPRequest >> TInterconnectTest::TestBlobEventDifferentSizes [GOOD] >> TInterconnectTest::TestBlobEventDifferentSizesPreSerialized >> TPQTest::TestChangeConfig [GOOD] >> TPQTest::PQ_Tablet_Removes_Blobs_Asynchronously >> TInterconnectTest::TestBlobEvent220Bytes [GOOD] >> TInterconnectTest::TestAddressResolve >> TPartitionTests::TestTxBatchInFederation [GOOD] >> TestProtocols::TestHTTPRequest [GOOD] >> TPartitionTests::AfterRestart_1 [GOOD] >> TInterconnectTest::TestSubscribeByFlag [GOOD] >> TInterconnectTest::TestReconnect >> TLocksTest::NoLocksSet [GOOD] >> TLocksTest::MultipleLocks >> TPartitionTests::AfterRestart_2 >> ColumnBuildTest::CancelBuild ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/unittest >> TPQTest::The_Value_Of_CreationUnixTime_Must_Not_Decrease [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:107:2057] recipient: [1:105:2137] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:107:2057] recipient: [1:105:2137] Leader for TabletID 72057594037927937 is [1:111:2141] sender: [1:112:2057] recipient: [1:105:2137] 2025-06-25T14:31:26.763137Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:31:26.763227Z node 1 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [1:153:2057] recipient: [1:151:2172] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [1:153:2057] recipient: [1:151:2172] Leader for TabletID 72057594037927938 is [1:157:2176] sender: [1:158:2057] recipient: [1:151:2172] Leader for TabletID 72057594037927937 is [1:111:2141] sender: [1:183:2057] recipient: [1:14:2061] 2025-06-25T14:31:26.779776Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:31:26.796264Z node 1 :PERSQUEUE INFO: pq_impl.cpp:1497: [PQ: 72057594037927937] Config applied version 1 actor [1:181:2194] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 7864320 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "rt3.dc1--asdfgs--topic" Version: 1 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 1 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 1 Important: false } 2025-06-25T14:31:26.797157Z node 1 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [1:189:2200] 2025-06-25T14:31:26.798746Z node 1 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [1:189:2200] 2025-06-25T14:31:26.800344Z node 1 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [1:190:2201] 2025-06-25T14:31:26.801546Z node 1 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 2 [1:190:2201] 2025-06-25T14:31:26.820858Z node 1 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|1dd239e1-152f3353-c8c2f38-2fc55947_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-25T14:31:27.011343Z node 1 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|37234333-a27ea292-2f69397b-5d7f8022_1 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-25T14:31:27.192909Z node 1 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|3923a2d-64223c20-e59f6812-feeb6843_2 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-25T14:31:27.247689Z node 1 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|bd47519e-1f6b1d2-4f988a4-f6482e76_3 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-25T14:31:27.261680Z node 1 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|781d6565-b8eb052f-e6e4a0d5-75612c54_4 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-25T14:31:27.276637Z node 1 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|55cb56b0-49696d51-d75ffb04-12d2b05b_5 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:107:2057] recipient: [2:105:2137] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:107:2057] recipient: [2:105:2137] Leader for TabletID 72057594037927937 is [2:111:2141] sender: [2:112:2057] recipient: [2:105:2137] 2025-06-25T14:31:27.770172Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:31:27.770253Z node 2 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [2:153:2057] recipient: [2:151:2172] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [2:153:2057] recipient: [2:151:2172] Leader for TabletID 72057594037927938 is [2:157:2176] sender: [2:158:2057] recipient: [2:151:2172] Leader for TabletID 72057594037927937 is [2:111:2141] sender: [2:183:2057] recipient: [2:14:2061] 2025-06-25T14:31:27.792645Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:31:27.793743Z node 2 :PERSQUEUE INFO: pq_impl.cpp:1497: [PQ: 72057594037927937] Config applied version 2 actor [2:181:2194] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 7864320 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "rt3.dc1--asdfgs--topic" Version: 2 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 2 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 2 Important: false } 2025-06-25T14:31:27.794429Z node 2 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [2:189:2200] 2025-06-25T14:31:27.797344Z node 2 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [2:189:2200] 2025-06-25T14:31:27.798884Z node 2 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [2:190:2201] 2025-06-25T14:31:27.800597Z node 2 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 2 [2:190:2201] 2025-06-25T14:31:27.837004Z node 2 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|2f47a541-10334686-c0b034b8-89493ea9_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-25T14:31:28.063434Z node 2 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|c0a21cd4-2eba23ea-84104a42-21a4cc18_1 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-25T14:31:28.224811Z node 2 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|1ca25a41-36b6f12f-ccd75c5b-fb6edf5f_2 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-25T14:31:28.278639Z node 2 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|5693165a-3c030fe6-bc9fe9b1-233bdf02_3 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-25T14:31:28.297432Z node 2 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|e718fcff-7fd6ca0c-8d915855-73ba418_4 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default !Reboot 72057594037927937 (actor [2:111:2141]) on event NKikimr::TEvPersQueue::TEvRequest ! Leader for TabletID 72057594037927937 is [2:111:2141] sender: [2:339:2057] recipient: [2:103:2136] Leader for TabletID 72057594037927937 is [2:111:2141] sender: [2:342:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [2:111:2141] sender: [2:343:2057] recipient: [2:341:2328] Leader for TabletID 72057594037927937 is [2:344:2329] sender: [2:345:2057] recipient: [2:341:2328] 2025-06-25T14:31:28.355725Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:31:28.355792Z node 2 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-25T14:31:28.356631Z node 2 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [2:393:2370] 2025-06-25T14:31:28.359046Z node 2 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [2:394:2371] 2025-06-25T14:31:28.368499Z node 2 :PERSQUEUE INFO: partition_init.cpp:895: [rt3.dc1--asdfgs--topic:1:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-25T14:31:28.368576Z node 2 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 3 [2:394:2371] 2025-06-25T14:31:28.369549Z node 2 :PERSQUEUE INFO: partition_init.cpp:895: [rt3.dc1--asdfgs--topic:0:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-25T14:31:28.369613Z node 2 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 3 [2:393:2370] !Reboot 72057594037927937 (actor [2:111:2141]) rebooted! !Reboot 72057594037927937 (actor [2:111:2141]) tablet resolver refreshed! new actor is[2:344:2329] Leader for TabletID 72057594037927937 is [2:344:2329] sender: [2:443:2057] recipient: [2:14:2061] 2025-06-25T14:31:29.599213Z node 2 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|9737fffb-cd5b81cc-3cee20a5-370b1b57_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:107:2057] recipient: [3:105:2137] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:107:2057] recipient: [3:105:2137] Leader for TabletID 72057594037927937 is [3:111:2141] sender: [3:112:2057] recipient: [3:105:2137] 2025-06-25T14:31:30.000125Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:31:30.000202Z node 3 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [3:153:2057] recipient: [3:151:2172] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [3:153:2057] recipient: [3:151:2172] Leader for TabletID 72057594037927938 is [3:157:2176] sender: [3:158:2057] recipient: [3:151:2172] Leader for TabletID 72057594037927937 is [3:111:2141] sender: [3:183:2057] recipient: [3:14:2061] 2025-06-25T14:31:30.019569Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:31:30.020391Z node 3 :PERSQUEUE INFO: pq_impl.cpp:1497: [PQ: 72057594037927937] Config applied version 3 actor [3:181:2194] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 7864320 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "rt3.dc1--asdfgs--topic" Version: 3 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 3 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 3 Important: false } 2025-06-25T14:31:30.021013Z node 3 :PERSQUEUE INFO: ... PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|55e83081-93b3cd94-c2fb43b9-84eb4594_16 generated for partition 0 topic 'topic' owner default 2025-06-25T14:31:55.760161Z node 16 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|32b5cfd3-f9d2ff38-623f30cb-276d634f_17 generated for partition 0 topic 'topic' owner default 2025-06-25T14:31:55.875526Z node 16 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|c67b341-f00d154a-83487e67-a43b11a4_18 generated for partition 0 topic 'topic' owner default 2025-06-25T14:31:56.002166Z node 16 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|a447c4b2-6b16ff5d-ae57c26d-e0203988_19 generated for partition 0 topic 'topic' owner default 2025-06-25T14:31:56.108372Z node 16 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|55162ce2-ac7277c-2d52881b-1276c8d_20 generated for partition 0 topic 'topic' owner default 2025-06-25T14:31:56.229315Z node 16 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|932ff2f1-cbe62a4-4adf0ec0-b9968d48_21 generated for partition 0 topic 'topic' owner default 2025-06-25T14:31:56.312625Z node 16 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|871adadf-24344a01-e882d67-b2ff5920_22 generated for partition 0 topic 'topic' owner default 2025-06-25T14:31:56.404255Z node 16 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|deca744f-73e5b24f-c85c41c8-d2d67180_23 generated for partition 0 topic 'topic' owner default 2025-06-25T14:31:56.614225Z node 16 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:31:56.614306Z node 16 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-25T14:31:56.614946Z node 16 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [16:653:2589] 2025-06-25T14:31:56.621362Z node 16 :PERSQUEUE INFO: partition_init.cpp:895: [topic:0:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-25T14:31:56.621450Z node 16 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'topic' partition 0 generation 3 [16:653:2589] 2025-06-25T14:31:56.847283Z node 16 :PERSQUEUE WARN: pq_l2_cache.cpp:94: PQ Cache (L2). Same blob insertion. Tablet '72057594037927937' partition 0 offset 1 partno 0 count 1 parts 2 suffix '63' size 1048783 2025-06-25T14:31:56.847370Z node 16 :PERSQUEUE WARN: pq_l2_cache.cpp:94: PQ Cache (L2). Same blob insertion. Tablet '72057594037927937' partition 0 offset 2 partno 0 count 1 parts 2 suffix '63' size 1048783 2025-06-25T14:31:56.847411Z node 16 :PERSQUEUE WARN: pq_l2_cache.cpp:94: PQ Cache (L2). Same blob insertion. Tablet '72057594037927937' partition 0 offset 3 partno 0 count 1 parts 2 suffix '63' size 1048783 2025-06-25T14:31:56.847447Z node 16 :PERSQUEUE WARN: pq_l2_cache.cpp:94: PQ Cache (L2). Same blob insertion. Tablet '72057594037927937' partition 0 offset 4 partno 0 count 1 parts 2 suffix '63' size 1048783 2025-06-25T14:31:56.847486Z node 16 :PERSQUEUE WARN: pq_l2_cache.cpp:94: PQ Cache (L2). Same blob insertion. Tablet '72057594037927937' partition 0 offset 5 partno 0 count 1 parts 2 suffix '63' size 1048783 2025-06-25T14:31:56.847521Z node 16 :PERSQUEUE WARN: pq_l2_cache.cpp:94: PQ Cache (L2). Same blob insertion. Tablet '72057594037927937' partition 0 offset 6 partno 0 count 1 parts 2 suffix '63' size 1048783 2025-06-25T14:31:56.847559Z node 16 :PERSQUEUE WARN: pq_l2_cache.cpp:94: PQ Cache (L2). Same blob insertion. Tablet '72057594037927937' partition 0 offset 7 partno 0 count 1 parts 2 suffix '63' size 1048783 2025-06-25T14:31:56.847596Z node 16 :PERSQUEUE WARN: pq_l2_cache.cpp:94: PQ Cache (L2). Same blob insertion. Tablet '72057594037927937' partition 0 offset 8 partno 0 count 1 parts 2 suffix '63' size 1048783 2025-06-25T14:31:56.847635Z node 16 :PERSQUEUE WARN: pq_l2_cache.cpp:94: PQ Cache (L2). Same blob insertion. Tablet '72057594037927937' partition 0 offset 9 partno 0 count 1 parts 2 suffix '63' size 1048783 2025-06-25T14:31:56.847673Z node 16 :PERSQUEUE WARN: pq_l2_cache.cpp:94: PQ Cache (L2). Same blob insertion. Tablet '72057594037927937' partition 0 offset 10 partno 0 count 1 parts 2 suffix '63' size 1048783 2025-06-25T14:31:56.847707Z node 16 :PERSQUEUE WARN: pq_l2_cache.cpp:94: PQ Cache (L2). Same blob insertion. Tablet '72057594037927937' partition 0 offset 11 partno 0 count 1 parts 2 suffix '63' size 1048783 2025-06-25T14:31:56.847743Z node 16 :PERSQUEUE WARN: pq_l2_cache.cpp:94: PQ Cache (L2). Same blob insertion. Tablet '72057594037927937' partition 0 offset 12 partno 0 count 1 parts 2 suffix '63' size 1048783 2025-06-25T14:31:56.847783Z node 16 :PERSQUEUE WARN: pq_l2_cache.cpp:94: PQ Cache (L2). Same blob insertion. Tablet '72057594037927937' partition 0 offset 13 partno 0 count 1 parts 2 suffix '63' size 1048783 2025-06-25T14:31:56.847827Z node 16 :PERSQUEUE WARN: pq_l2_cache.cpp:94: PQ Cache (L2). Same blob insertion. Tablet '72057594037927937' partition 0 offset 14 partno 0 count 1 parts 2 suffix '63' size 1048783 2025-06-25T14:31:56.847864Z node 16 :PERSQUEUE WARN: pq_l2_cache.cpp:94: PQ Cache (L2). Same blob insertion. Tablet '72057594037927937' partition 0 offset 15 partno 0 count 1 parts 2 suffix '63' size 1048783 Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user" SessionId: "" Offset: 1 Count: 1 Bytes: 2147483647 } Cookie: 123 } via pipe: [16:181:2194] Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user" SessionId: "" Offset: 2 Count: 1 Bytes: 2147483647 } Cookie: 123 } via pipe: [16:181:2194] Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user" SessionId: "" Offset: 3 Count: 1 Bytes: 2147483647 } Cookie: 123 } via pipe: [16:181:2194] Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user" SessionId: "" Offset: 4 Count: 1 Bytes: 2147483647 } Cookie: 123 } via pipe: [16:181:2194] Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user" SessionId: "" Offset: 5 Count: 1 Bytes: 2147483647 } Cookie: 123 } via pipe: [16:181:2194] Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user" SessionId: "" Offset: 6 Count: 1 Bytes: 2147483647 } Cookie: 123 } via pipe: [16:181:2194] Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user" SessionId: "" Offset: 7 Count: 1 Bytes: 2147483647 } Cookie: 123 } via pipe: [16:181:2194] Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user" SessionId: "" Offset: 8 Count: 1 Bytes: 2147483647 } Cookie: 123 } via pipe: [16:181:2194] Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user" SessionId: "" Offset: 9 Count: 1 Bytes: 2147483647 } Cookie: 123 } via pipe: [16:181:2194] Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user" SessionId: "" Offset: 10 Count: 1 Bytes: 2147483647 } Cookie: 123 } via pipe: [16:181:2194] Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user" SessionId: "" Offset: 11 Count: 1 Bytes: 2147483647 } Cookie: 123 } via pipe: [16:181:2194] Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user" SessionId: "" Offset: 12 Count: 1 Bytes: 2147483647 } Cookie: 123 } via pipe: [16:181:2194] Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user" SessionId: "" Offset: 13 Count: 1 Bytes: 2147483647 } Cookie: 123 } via pipe: [16:181:2194] Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user" SessionId: "" Offset: 14 Count: 1 Bytes: 2147483647 } Cookie: 123 } via pipe: [16:181:2194] Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user" SessionId: "" Offset: 15 Count: 1 Bytes: 2147483647 } Cookie: 123 } via pipe: [16:181:2194] Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user" SessionId: "" Offset: 16 Count: 1 Bytes: 2147483647 } Cookie: 123 } via pipe: [16:181:2194] 2025-06-25T14:32:02.286877Z node 16 :PERSQUEUE WARN: pq_l2_cache.cpp:94: PQ Cache (L2). Same blob insertion. Tablet '72057594037927937' partition 0 offset 16 partno 0 count 1 parts 2 suffix '63' size 1048783 Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user" SessionId: "" Offset: 17 Count: 1 Bytes: 2147483647 } Cookie: 123 } via pipe: [16:181:2194] 2025-06-25T14:32:02.304052Z node 16 :PERSQUEUE WARN: pq_l2_cache.cpp:94: PQ Cache (L2). Same blob insertion. Tablet '72057594037927937' partition 0 offset 17 partno 0 count 1 parts 2 suffix '63' size 1048783 Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user" SessionId: "" Offset: 18 Count: 1 Bytes: 2147483647 } Cookie: 123 } via pipe: [16:181:2194] 2025-06-25T14:32:02.327066Z node 16 :PERSQUEUE WARN: pq_l2_cache.cpp:94: PQ Cache (L2). Same blob insertion. Tablet '72057594037927937' partition 0 offset 18 partno 0 count 1 parts 2 suffix '63' size 1048783 Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user" SessionId: "" Offset: 19 Count: 1 Bytes: 2147483647 } Cookie: 123 } via pipe: [16:181:2194] 2025-06-25T14:32:02.343799Z node 16 :PERSQUEUE WARN: pq_l2_cache.cpp:94: PQ Cache (L2). Same blob insertion. Tablet '72057594037927937' partition 0 offset 19 partno 0 count 1 parts 2 suffix '63' size 1048783 Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user" SessionId: "" Offset: 20 Count: 1 Bytes: 2147483647 } Cookie: 123 } via pipe: [16:181:2194] 2025-06-25T14:32:02.349667Z node 16 :PERSQUEUE WARN: pq_l2_cache.cpp:94: PQ Cache (L2). Same blob insertion. Tablet '72057594037927937' partition 0 offset 20 partno 0 count 1 parts 0 suffix '63' size 41021 Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user" SessionId: "" Offset: 21 Count: 1 Bytes: 2147483647 } Cookie: 123 } via pipe: [16:181:2194] 2025-06-25T14:32:02.352141Z node 16 :PERSQUEUE WARN: pq_l2_cache.cpp:94: PQ Cache (L2). Same blob insertion. Tablet '72057594037927937' partition 0 offset 21 partno 0 count 1 parts 0 suffix '63' size 41021 Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user" SessionId: "" Offset: 22 Count: 1 Bytes: 2147483647 } Cookie: 123 } via pipe: [16:181:2194] 2025-06-25T14:32:02.354902Z node 16 :PERSQUEUE WARN: pq_l2_cache.cpp:94: PQ Cache (L2). Same blob insertion. Tablet '72057594037927937' partition 0 offset 22 partno 0 count 1 parts 0 suffix '63' size 41021 Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user" SessionId: "" Offset: 23 Count: 1 Bytes: 2147483647 } Cookie: 123 } via pipe: [16:181:2194] 2025-06-25T14:32:02.361221Z node 16 :PERSQUEUE WARN: pq_l2_cache.cpp:94: PQ Cache (L2). Same blob insertion. Tablet '72057594037927937' partition 0 offset 23 partno 0 count 1 parts 0 suffix '63' size 41021 Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user" SessionId: "" Offset: 24 Count: 1 Bytes: 2147483647 } Cookie: 123 } via pipe: [16:181:2194] 2025-06-25T14:32:02.363732Z node 16 :PERSQUEUE WARN: pq_l2_cache.cpp:94: PQ Cache (L2). Same blob insertion. Tablet '72057594037927937' partition 0 offset 24 partno 0 count 1 parts 0 suffix '63' size 41021 >> TPartitionTests::The_DeletePartition_Message_Arrives_Before_The_ApproveWriteQuota_Message >> Balancing::Balancing_ManyTopics_PQv1 [GOOD] >> CommitOffset::Commit_Flat_WithWrongSession >> TInterconnectTest::TestBlobEventDifferentSizesPreSerialized [GOOD] >> TInterconnectTest::TestBlobEventDifferentSizesPreSerializedAndRaw >> TInterconnectTest::TestAddressResolve [GOOD] >> TInterconnectTest::OldNbs >> Cdc::InitialScanAndResolvedTimestamps [GOOD] |78.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/actorlib_impl/ut/unittest >> TestProtocols::TestHTTPRequest [GOOD] >> TPQTest::PQ_Tablet_Removes_Blobs_Asynchronously [GOOD] >> TInterconnectTest::TestReconnect [GOOD] >> TInterconnectTest::TestSubscribeAndUnsubsribeByEvent >> TInterconnectTest::TestBlobEventDifferentSizesPreSerializedAndRaw [GOOD] >> TopicAutoscaling::ControlPlane_PauseAutoPartitioning [GOOD] >> TopicAutoscaling::ControlPlane_CDC_Enable >> TInterconnectTest::TestSubscribeAndUnsubsribeByEvent [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/unittest >> TPQTest::PQ_Tablet_Removes_Blobs_Asynchronously [GOOD] Test command err: 2025-06-25T14:31:18.632842Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:31:18.632931Z node 1 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-25T14:31:18.652227Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:31:18.670770Z node 1 :PERSQUEUE INFO: pq_impl.cpp:1497: [PQ: 72057594037927937] Config applied version 1 actor [1:179:2192] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 ImportantClientId: "important_user" LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 WriteSpeedInBytesPerSecond: 102400 BurstSize: 102400 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "topic" Version: 1 LocalDC: true Topic: "topic" TopicPath: "/topic" YcCloudId: "somecloud" YcFolderId: "somefolder" YdbDatabaseId: "PQ" YdbDatabasePath: "/Root/PQ" Partitions { PartitionId: 0 } ReadRuleGenerations: 1 ReadRuleGenerations: 1 FederationAccount: "federationAccount" MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 1 Important: false } Consumers { Name: "important_user" Generation: 1 Important: true } 2025-06-25T14:31:18.671728Z node 1 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [1:188:2199] 2025-06-25T14:31:18.672697Z node 1 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'topic' partition 0 generation 2 [1:188:2199] Run 1 CmdWrite 2025-06-25T14:31:18.678068Z node 1 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|7730a7cd-98b6a8f7-d0e2eae2-78116600_0 generated for partition 0 topic 'topic' owner default Captured kesus quota request event from [1:209:2216] Captured kesus quota request event from [1:210:2217] CmdRead Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user" SessionId: "" Offset: 0 Count: 2147483647 Bytes: 2147483647 } Cookie: 123 } via pipe: [1:179:2192] Captured kesus quota request event from [1:209:2216] Currently have 3 quoter requests Run 2 CmdWrite 2025-06-25T14:31:19.701633Z node 1 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|7e414b55-246b92d7-da83bd58-736549cf_1 generated for partition 0 topic 'topic' owner default CmdRead Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user" SessionId: "" Offset: 0 Count: 2147483647 Bytes: 2147483647 } Cookie: 123 } via pipe: [1:179:2192] Captured kesus quota request event from [1:209:2216] Currently have 4 quoter requests Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:107:2057] recipient: [2:105:2137] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:107:2057] recipient: [2:105:2137] Leader for TabletID 72057594037927937 is [2:111:2141] sender: [2:112:2057] recipient: [2:105:2137] 2025-06-25T14:31:22.463482Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:31:22.463564Z node 2 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [2:153:2057] recipient: [2:151:2172] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [2:153:2057] recipient: [2:151:2172] Leader for TabletID 72057594037927938 is [2:157:2176] sender: [2:158:2057] recipient: [2:151:2172] Leader for TabletID 72057594037927938 is [2:157:2176] sender: [2:213:2057] recipient: [2:14:2061] 2025-06-25T14:31:22.491911Z node 2 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][] pipe [2:211:2215] connected; active server actors: 1 2025-06-25T14:31:22.491966Z node 2 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][] pipe [2:212:2216] connected; active server actors: 1 Leader for TabletID 72057594037927938 is [2:157:2176] sender: [2:223:2057] recipient: [2:149:2171] Leader for TabletID 72057594037927938 is [2:157:2176] sender: [2:226:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927938 is [2:157:2176] sender: [2:227:2057] recipient: [2:225:2224] Leader for TabletID 72057594037927938 is [2:228:2225] sender: [2:229:2057] recipient: [2:225:2224] Leader for TabletID 72057594037927938 is [2:228:2225] sender: [2:284:2057] recipient: [2:14:2061] 2025-06-25T14:31:23.412710Z node 2 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [2:283:2263] connected; active server actors: 1 2025-06-25T14:31:23.790198Z node 2 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [2:291:2268] connected; active server actors: 1 2025-06-25T14:31:23.790552Z node 2 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [2:293:2270] connected; active server actors: 1 2025-06-25T14:31:23.790915Z node 2 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [2:295:2272] connected; active server actors: 1 2025-06-25T14:31:23.791198Z node 2 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [2:297:2274] connected; active server actors: 1 2025-06-25T14:31:23.791523Z node 2 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [2:299:2276] connected; active server actors: 1 Leader for TabletID 72057594037927938 is [2:228:2225] sender: [2:304:2057] recipient: [2:149:2171] Leader for TabletID 72057594037927938 is [2:228:2225] sender: [2:307:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927938 is [2:228:2225] sender: [2:308:2057] recipient: [2:306:2281] Leader for TabletID 72057594037927938 is [2:309:2282] sender: [2:310:2057] recipient: [2:306:2281] Leader for TabletID 72057594037927938 is [2:309:2282] sender: [2:351:2057] recipient: [2:14:2061] 2025-06-25T14:31:23.826642Z node 2 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [2:350:2316] connected; active server actors: 1 2025-06-25T14:31:23.827054Z node 2 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [2:353:2318] connected; active server actors: 1 Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:107:2057] recipient: [3:105:2137] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:107:2057] recipient: [3:105:2137] Leader for TabletID 72057594037927937 is [3:111:2141] sender: [3:112:2057] recipient: [3:105:2137] 2025-06-25T14:31:24.317907Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:31:24.317992Z node 3 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [3:153:2057] recipient: [3:151:2172] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [3:153:2057] recipient: [3:151:2172] Leader for TabletID 72057594037927938 is [3:157:2176] sender: [3:158:2057] recipient: [3:151:2172] Leader for TabletID 72057594037927938 is [3:157:2176] sender: [3:213:2057] recipient: [3:14:2061] 2025-06-25T14:31:24.345982Z node 3 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][] pipe [3:211:2215] connected; active server actors: 1 2025-06-25T14:31:24.346114Z node 3 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][] pipe [3:212:2216] connected; active server actors: 1 Leader for TabletID 72057594037927938 is [3:157:2176] sender: [3:224:2057] recipient: [3:149:2171] Leader for TabletID 72057594037927938 is [3:157:2176] sender: [3:227:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927938 is [3:157:2176] sender: [3:228:2057] recipient: [3:226:2225] Leader for TabletID 72057594037927938 is [3:229:2226] sender: [3:230:2057] recipient: [3:226:2225] Leader for TabletID 72057594037927938 is [3:229:2226] sender: [3:285:2057] recipient: [3:14:2061] 2025-06-25T14:31:25.258736Z node 3 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [3:284:2264] connected; active server actors: 1 2025-06-25T14:31:25.644909Z node 3 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [3:292:2269] connected; active server actors: 1 2025-06-25T14:31:25.645366Z node 3 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [3:294:2271] connected; active server actors: 1 2025-06-25T14:31:25.645736Z node 3 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [3:296:2273] connected; active server actors: 1 2025-06-25T14:31:25.646073Z node 3 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [3:298:2275] connected; active server actors: 1 2025-06-25T14:31:25.646452Z node 3 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [3:300:2277] connected; active server actors: 1 Leader for TabletID 72057594037927938 is [3:229:2226] sender: [3:305:2057] recipient: [3:149:2171] Leader for TabletID 72057594037927938 is [3:229:2226] sender: [3:308:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927938 is [3:229:2226] sender: [3:309:2057] recipient: [3:307:2282] Leader for TabletID 72057594037927938 is [3:310:2283] sender: [3:311:2057] recipient: [3:307:2282] Leader for TabletID 72057594037927938 is [3:310:2283] sender: [3:351:2057] recipient: [3:14:2061] 2025-06-25T14:31:25.666776Z node 3 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [3:350:2316] connected; active server actors: 1 2025-06-25T14:31:25.667179Z node 3 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [3:353:2318] connected; active server actors: 1 Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:107:2057] recipient: [4:105:2137] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:107:2057] recipient: [4:105:2137] Leader for TabletID 72057594037927937 is [4:111:2141] sender: [4:112:2057] recipient: [4:105:2137] 2025-06-25T14:31:26.146591Z node 4 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:31:26.146660Z node 4 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [4:153:2057] recipient: [4:151:2172] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [4:153:2057] recipient: [4:151:2172] Leader for TabletID 72057594037927938 is [4:157:2176] sender: [4:158:2057] recipient: [4:151:2172] Leader for TabletID 72057594037927938 is [4:157:2176] sender: [4:213:2057] recipient: [4:14:2061] 2025-06-25T14:31:26.172609Z node 4 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][] pipe [4:211:2215] connected; active server actors: 1 2025-06-25T14:31:26.172760Z node 4 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][] pipe [4:212:2216] connected; active server actors: 1 Leader for TabletID 72057594037927938 is [4:157:2176] sender: [4: ... 2:03.722226Z node 25 :PERSQUEUE DEBUG: read.h:272: CacheProxy. Passthrough write request to KV 2025-06-25T14:32:03.722291Z node 25 :PERSQUEUE DEBUG: read.h:348: CacheProxy. Delete blobs from d0000000000_00000000000000000001_00000_0000000001_00002?(+) to d0000000000_00000000000000000001_00000_0000000001_00002?(+) 2025-06-25T14:32:03.722330Z node 25 :PERSQUEUE DEBUG: read.h:348: CacheProxy. Delete blobs from d0000000000_00000000000000000000_00000_0000000001_00000|(+) to d0000000000_00000000000000000000_00000_0000000001_00000|(+) 2025-06-25T14:32:03.722524Z node 25 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-25T14:32:03.722846Z node 25 :PERSQUEUE DEBUG: partition_read.cpp:839: [PQ: 72057594037927937, Partition: 0, State: StateIdle] read cookie 2 Topic 'topic' partition 0 user user offset 0 count 1 size 1024000 endOffset 2 max time lag 0ms effective offset 0 2025-06-25T14:32:03.722917Z node 25 :PERSQUEUE DEBUG: partition_read.cpp:1043: [PQ: 72057594037927937, Partition: 0, State: StateIdle] read cookie 2 added 0 blobs, size 0 count 0 last offset 0, current partition end offset: 2 2025-06-25T14:32:03.723978Z node 25 :PERSQUEUE DEBUG: partition_read.cpp:1062: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Reading cookie 2. All data is from uncompacted head. 2025-06-25T14:32:03.724044Z node 25 :PERSQUEUE DEBUG: partition_read.cpp:551: FormAnswer for 0 blobs 2025-06-25T14:32:03.724188Z node 25 :PERSQUEUE DEBUG: partition_read.cpp:964: Topic 'topic' partition 0 user user readTimeStamp done, result 130 queuesize 0 startOffset 0 2025-06-25T14:32:03.727453Z node 25 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-06-25T14:32:03.727548Z node 25 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction >>> write #3 2025-06-25T14:32:03.746631Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037927937] server connected, pipe [25:330:2312], now have 1 active actors on pipe 2025-06-25T14:32:03.746794Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:348: Handle TEvRequest topic: 'topic' requestId: 2025-06-25T14:32:03.746861Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:2812: [PQ: 72057594037927937] got client message batch for topic 'topic' partition 0 2025-06-25T14:32:03.746943Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:2209: [PQ: 72057594037927937] got client message topic: topic partition: 0 SourceId: 'sourceid1' SeqNo: 3 partNo : 0 messageNo: 1 size 1024 offset: 2 2025-06-25T14:32:03.747049Z node 25 :PERSQUEUE DEBUG: event_helpers.cpp:40: tablet 72057594037927937 topic 'topic' partition 0 error: new GetOwnership request needed for owner 2025-06-25T14:32:03.747162Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:1434: [PQ: 72057594037927937] Handle TEvPQ::TEvError Cookie 1, Error new GetOwnership request needed for owner 2025-06-25T14:32:03.747214Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:402: Answer error topic: 'topic' partition: 0 messageNo: 1 requestId: error: new GetOwnership request needed for owner 2025-06-25T14:32:03.747586Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037927937] server connected, pipe [25:333:2314], now have 1 active actors on pipe 2025-06-25T14:32:03.747696Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:348: Handle TEvRequest topic: 'topic' requestId: 2025-06-25T14:32:03.747752Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:2812: [PQ: 72057594037927937] got client message batch for topic 'topic' partition 0 2025-06-25T14:32:03.747879Z node 25 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|8bd01c78-e10d7006-e2248636-e5bcf78e_0 generated for partition 0 topic 'topic' owner default 2025-06-25T14:32:03.748011Z node 25 :PERSQUEUE DEBUG: partition_write.cpp:34: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::ReplyOwnerOk. Partition: 0 2025-06-25T14:32:03.748108Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'topic' partition: 0 messageNo: 0 requestId: cookie: 0 2025-06-25T14:32:03.748481Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037927937] server connected, pipe [25:335:2316], now have 1 active actors on pipe 2025-06-25T14:32:03.748569Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:348: Handle TEvRequest topic: 'topic' requestId: 2025-06-25T14:32:03.748604Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:2812: [PQ: 72057594037927937] got client message batch for topic 'topic' partition 0 2025-06-25T14:32:03.748653Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:2209: [PQ: 72057594037927937] got client message topic: topic partition: 0 SourceId: 'sourceid1' SeqNo: 3 partNo : 0 messageNo: 0 size 1024 offset: 2 2025-06-25T14:32:03.748780Z node 25 :PERSQUEUE DEBUG: partition_write.cpp:1843: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Send write quota request. Topic: "topic". Partition: 0. Amount: 1033. Cookie: 1 2025-06-25T14:32:03.748890Z node 25 :PERSQUEUE DEBUG: partition.cpp:3720: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Got quota. Topic: "topic". Partition: 0: Cookie: 1 2025-06-25T14:32:03.749078Z node 25 :PERSQUEUE DEBUG: partition_write.cpp:1364: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'topic' partition 0 part blob processing sourceId 'sourceid1' seqNo 3 partNo 0 2025-06-25T14:32:03.749906Z node 25 :PERSQUEUE DEBUG: partition_write.cpp:1468: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'topic' partition 0 part blob complete sourceId 'sourceid1' seqNo 3 partNo 0 FormedBlobsCount 0 NewHead: Offset 2 PartNo 0 PackedSize 1096 count 1 nextOffset 3 batches 1 2025-06-25T14:32:03.750583Z node 25 :PERSQUEUE DEBUG: partition_write.cpp:1762: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Add new write blob: topic 'topic' partition 0 compactOffset 2,1 HeadOffset 2 endOffset 2 curOffset 3 d0000000000_00000000000000000002_00000_0000000001_00000? size 1084 WTime 264 2025-06-25T14:32:03.750829Z node 25 :PERSQUEUE DEBUG: read.h:272: CacheProxy. Passthrough write request to KV 2025-06-25T14:32:03.750954Z node 25 :PERSQUEUE DEBUG: read.h:310: CacheProxy. Passthrough blob. Partition 0 offset 2 partNo 0 count 1 size 1084 2025-06-25T14:32:03.754734Z node 25 :PERSQUEUE DEBUG: cache_eviction.h:319: Caching head blob in L1. Partition 0 offset 2 count 1 size 1084 actorID [25:295:2286] 2025-06-25T14:32:03.754912Z node 25 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 1033 WriteNewSizeFromSupportivePartitions# 0 2025-06-25T14:32:03.754999Z node 25 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-25T14:32:03.755091Z node 25 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Answering for message sourceid: 'sourceid1', Topic: 'topic', Partition: 0, SeqNo: 3, partNo: 0, Offset: 2 is stored on disk 2025-06-25T14:32:03.755348Z node 25 :PERSQUEUE DEBUG: partition_compaction.cpp:179: [PQ: 72057594037927937, Partition: 0, State: StateIdle] need run compaction for 1084 bytes in 1 blobs 2025-06-25T14:32:03.755442Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'topic' partition: 0 messageNo: 0 requestId: cookie: 0 2025-06-25T14:32:03.755559Z node 25 :PERSQUEUE DEBUG: pq_l2_cache.cpp:120: PQ Cache (L2). Adding blob. Tablet '72057594037927937' partition 0 offset 2 partno 0 count 1 parts 0 suffix '63' size 1084 2025-06-25T14:32:03.755639Z node 25 :PERSQUEUE DEBUG: partition_compaction.cpp:191: [PQ: 72057594037927937, Partition: 0, State: StateIdle] begin compaction for 1084 bytes in 1 blobs 2025-06-25T14:32:03.755756Z node 25 :PERSQUEUE DEBUG: partition_compaction.cpp:230: [PQ: 72057594037927937, Partition: 0, State: StateIdle] request key d0000000000_00000000000000000002_00000_0000000001_00000?, size 1084 2025-06-25T14:32:03.755822Z node 25 :PERSQUEUE DEBUG: partition_compaction.cpp:238: [PQ: 72057594037927937, Partition: 0, State: StateIdle] request 1 blobs for compaction 2025-06-25T14:32:03.755891Z node 25 :PERSQUEUE DEBUG: cache_eviction.h:492: Got data from cache. Partition 0 offset 2 partno 0 count 1 parts_count 0 source 1 size 1084 accessed 0 times before, last time 1970-01-01T00:00:00.000000Z 2025-06-25T14:32:03.755945Z node 25 :PERSQUEUE DEBUG: read.h:121: Reading cookie 0. All 1 blobs are from cache. 2025-06-25T14:32:03.756030Z node 25 :PERSQUEUE DEBUG: partition_compaction.cpp:245: [PQ: 72057594037927937, Partition: 0, State: StateIdle] continue compaction 2025-06-25T14:32:03.756131Z node 25 :PERSQUEUE DEBUG: partition_compaction.cpp:57: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'topic' partition 0 part blob processing sourceId 'sourceid1' seqNo 3 partNo 0 2025-06-25T14:32:03.756980Z node 25 :PERSQUEUE DEBUG: partition_compaction.cpp:135: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'topic' partition 0 part blob complete sourceId 'sourceid1' seqNo 3 partNo 0 FormedBlobsCount 0 NewHead: Offset 2 PartNo 0 PackedSize 1096 count 1 nextOffset 3 batches 1 2025-06-25T14:32:03.757613Z node 25 :PERSQUEUE DEBUG: partition_compaction.cpp:401: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Add new write blob: topic 'topic' partition 0 compactOffset 2,1 HeadOffset 0 endOffset 2 curOffset 3 d0000000000_00000000000000000002_00000_0000000001_00000| size 1084 WTime 266 2025-06-25T14:32:03.757830Z node 25 :PERSQUEUE DEBUG: pq_l2_cache.cpp:192: PQ Cache (L2). Touched. Tablet '72057594037927937' partition 0 offset 2 partno 0 count 1 parts 0 suffix '63' 2025-06-25T14:32:03.757891Z node 25 :PERSQUEUE DEBUG: read.h:272: CacheProxy. Passthrough write request to KV 2025-06-25T14:32:03.757968Z node 25 :PERSQUEUE DEBUG: read.h:310: CacheProxy. Passthrough blob. Partition 0 offset 2 partNo 0 count 1 size 1084 2025-06-25T14:32:03.761052Z node 25 :PERSQUEUE DEBUG: cache_eviction.h:319: Caching head blob in L1. Partition 0 offset 2 count 1 size 1084 actorID [25:295:2286] 2025-06-25T14:32:03.761161Z node 25 :PERSQUEUE DEBUG: partition_compaction.cpp:323: [PQ: 72057594037927937, Partition: 0, State: StateIdle] compaction completed 2025-06-25T14:32:03.761435Z node 25 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-25T14:32:03.761523Z node 25 :PERSQUEUE DEBUG: pq_l2_cache.cpp:120: PQ Cache (L2). Adding blob. Tablet '72057594037927937' partition 0 offset 2 partno 0 count 1 parts 0 suffix '124' size 1084 2025-06-25T14:32:03.761580Z node 25 :PERSQUEUE DEBUG: read.h:272: CacheProxy. Passthrough write request to KV 2025-06-25T14:32:03.761624Z node 25 :PERSQUEUE DEBUG: read.h:348: CacheProxy. Delete blobs from d0000000000_00000000000000000002_00000_0000000001_00000?(+) to d0000000000_00000000000000000002_00000_0000000001_00000?(+) 2025-06-25T14:32:03.763450Z node 25 :PERSQUEUE DEBUG: cache_eviction.h:369: Deleting head blob in L1. Partition 0 offset 2 count 1 actorID [25:295:2286] 2025-06-25T14:32:03.763562Z node 25 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-06-25T14:32:03.763637Z node 25 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-25T14:32:03.763805Z node 25 :PERSQUEUE DEBUG: pq_l2_cache.cpp:146: PQ Cache (L2). Removed. Tablet '72057594037927937' partition 0 offset 2 partno 0 count 1 parts 0 suffix '63' size 1084 >>> write #1 2025-06-25T14:32:03.767870Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037927937] server connected, pipe [25:358:2336], now have 1 active actors on pipe >> TInterconnectTest::OldNbs [GOOD] >> TPartitionTests::AfterRestart_2 [GOOD] >> TPartitionTests::The_DeletePartition_Message_Arrives_Before_The_ApproveWriteQuota_Message [GOOD] >> TSequence::AlterTableSetDefaultFromSequence [GOOD] |78.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/actorlib_impl/ut/unittest >> TInterconnectTest::TestBlobEventDifferentSizesPreSerializedAndRaw [GOOD] |78.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_column_build/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/actorlib_impl/ut/unittest >> TInterconnectTest::TestSubscribeAndUnsubsribeByEvent [GOOD] Test command err: 2025-06-25T14:32:03.547544Z node 6 :INTERCONNECT DEBUG: interconnect_tcp_proxy.h:180: Proxy [6:10:2048] [node 5] ICP77 @206 (null) -> PendingActivation 2025-06-25T14:32:03.547664Z node 6 :INTERCONNECT INFO: interconnect_tcp_proxy.cpp:43: Proxy [6:10:2048] [node 5] ICP01 ready to work 2025-06-25T14:32:03.547889Z node 5 :INTERCONNECT DEBUG: interconnect_tcp_proxy.h:180: Proxy [5:1:2048] [node 6] ICP77 @206 (null) -> PendingActivation 2025-06-25T14:32:03.547921Z node 5 :INTERCONNECT INFO: interconnect_tcp_proxy.cpp:43: Proxy [5:1:2048] [node 6] ICP01 ready to work 2025-06-25T14:32:03.548936Z node 5 :INTERCONNECT DEBUG: interconnect_tcp_proxy.h:180: Proxy [5:1:2048] [node 6] ICP77 @99 PendingActivation -> PendingNodeInfo 2025-06-25T14:32:03.550523Z node 5 :INTERCONNECT DEBUG: interconnect_tcp_proxy.cpp:119: Proxy [5:1:2048] [node 6] ICP02 configured for host ::1:29348 2025-06-25T14:32:03.550700Z node 5 :INTERCONNECT DEBUG: interconnect_tcp_proxy.h:180: Proxy [5:1:2048] [node 6] ICP77 @488 PendingNodeInfo -> PendingConnection 2025-06-25T14:32:03.551226Z node 5 :INTERCONNECT DEBUG: interconnect_handshake.cpp:677: Handshake [5:21:2058] [node 6] ICH01 starting outgoing handshake 2025-06-25T14:32:03.551394Z node 5 :INTERCONNECT DEBUG: interconnect_resolve.cpp:128: ICR04 Host: ::1, RESOLVED address 2025-06-25T14:32:03.556720Z node 5 :INTERCONNECT DEBUG: interconnect_handshake.cpp:682: Handshake [5:21:2058] [node 6] ICH05 connected to peer 2025-06-25T14:32:03.557352Z node 6 :INTERCONNECT DEBUG: interconnect_tcp_server.cpp:104: ICListener: [0:0:0] ICL04 Accepted from: ::1:39026 2025-06-25T14:32:03.557872Z node 6 :INTERCONNECT DEBUG: interconnect_handshake.cpp:914: Handshake [6:23:2058] [node 0] ICH02 starting incoming handshake 2025-06-25T14:32:03.565096Z node 5 :INTERCONNECT DEBUG: interconnect_handshake.cpp:612: Handshake [5:21:2058] [node 6] ICH07 SendExBlock ExRequest Protocol: 2 ProgramPID: 156436 ProgramStartTime: 1848379745448 Serial: 4016281040 ReceiverNodeId: 6 SenderActorId: "[5:4016281040:0]" SenderHostName: "::1" ReceiverHostName: "::1" UUID: "Cluster for process with id: 156436" ClusterUUIDs { ClusterUUID: "Cluster for process with id: 156436" AcceptUUID: "Cluster for process with id: 156436" } RequestModernFrame: true RequestAuthOnly: false RequestExtendedTraceFmt: true RequestExternalDataChannel: true HandshakeId: "\244\t6\332\212\373W\014)?\371g\364\271\211\202$\317\036I\371\370Oow\t\275V-\323\364p" RequestXxhash: true RequestXdcShuffle: true 2025-06-25T14:32:03.565869Z node 6 :INTERCONNECT DEBUG: interconnect_handshake.cpp:612: Handshake [6:23:2058] [node 5] ICH07 ReceiveExBlock ExRequest Protocol: 2 ProgramPID: 156436 ProgramStartTime: 1848379745448 Serial: 4016281040 ReceiverNodeId: 6 SenderActorId: "[5:4016281040:0]" SenderHostName: "::1" ReceiverHostName: "::1" UUID: "Cluster for process with id: 156436" ClusterUUIDs { ClusterUUID: "Cluster for process with id: 156436" AcceptUUID: "Cluster for process with id: 156436" } RequestModernFrame: true RequestAuthOnly: false RequestExtendedTraceFmt: true RequestExternalDataChannel: true HandshakeId: "\244\t6\332\212\373W\014)?\371g\364\271\211\202$\317\036I\371\370Oow\t\275V-\323\364p" RequestXxhash: true RequestXdcShuffle: true 2025-06-25T14:32:03.565953Z node 6 :INTERCONNECT WARN: interconnect_handshake.cpp:501: Handshake [6:23:2058] [node 5] ICH09 Neither CompatibilityInfo nor VersionTag of the peer can be validated, accepting by default 2025-06-25T14:32:03.566429Z node 6 :INTERCONNECT DEBUG: interconnect_tcp_proxy.h:180: Proxy [6:10:2048] [node 5] ICP77 @99 PendingActivation -> PendingNodeInfo 2025-06-25T14:32:03.567905Z node 6 :INTERCONNECT DEBUG: interconnect_tcp_proxy.cpp:119: Proxy [6:10:2048] [node 5] ICP02 configured for host ::1:21181 2025-06-25T14:32:03.567974Z node 6 :INTERCONNECT DEBUG: interconnect_tcp_proxy.cpp:262: Proxy [6:10:2048] [node 5] ICP17 incoming handshake (actor [6:23:2058]) 2025-06-25T14:32:03.568030Z node 6 :INTERCONNECT DEBUG: interconnect_tcp_proxy.h:180: Proxy [6:10:2048] [node 5] ICP77 @488 PendingNodeInfo -> PendingConnection 2025-06-25T14:32:03.568094Z node 6 :INTERCONNECT DEBUG: interconnect_tcp_proxy.cpp:210: Proxy [6:10:2048] [node 5] ICP07 issued incoming handshake reply 2025-06-25T14:32:03.568174Z node 6 :INTERCONNECT INFO: interconnect_tcp_proxy.cpp:219: Proxy [6:10:2048] [node 5] ICP08 No active sessions, becoming PendingConnection 2025-06-25T14:32:03.568227Z node 6 :INTERCONNECT DEBUG: interconnect_tcp_proxy.h:180: Proxy [6:10:2048] [node 5] ICP77 @220 PendingConnection -> PendingConnection 2025-06-25T14:32:03.568738Z node 6 :INTERCONNECT DEBUG: interconnect_handshake.cpp:612: Handshake [6:23:2058] [node 5] ICH07 SendExBlock ExReply Success { Protocol: 2 ProgramPID: 156436 ProgramStartTime: 1848393074744 Serial: 2656716856 SenderActorId: "[6:2656716856:0]" ClusterUUIDs { ClusterUUID: "Cluster for process with id: 156436" AcceptUUID: "Cluster for process with id: 156436" } StartEncryption: false UseModernFrame: true AuthOnly: false UseExtendedTraceFmt: true UseExternalDataChannel: true UseXxhash: true UseXdcShuffle: true } 2025-06-25T14:32:03.570375Z node 5 :INTERCONNECT DEBUG: interconnect_handshake.cpp:612: Handshake [5:21:2058] [node 6] ICH07 ReceiveExBlock ExReply Success { Protocol: 2 ProgramPID: 156436 ProgramStartTime: 1848393074744 Serial: 2656716856 SenderActorId: "[6:2656716856:0]" ClusterUUIDs { ClusterUUID: "Cluster for process with id: 156436" AcceptUUID: "Cluster for process with id: 156436" } StartEncryption: false UseModernFrame: true AuthOnly: false UseExtendedTraceFmt: true UseExternalDataChannel: true UseXxhash: true UseXdcShuffle: true } 2025-06-25T14:32:03.570461Z node 5 :INTERCONNECT WARN: interconnect_handshake.cpp:501: Handshake [5:21:2058] [node 6] ICH09 Neither CompatibilityInfo nor VersionTag of the peer can be validated, accepting by default 2025-06-25T14:32:03.570649Z node 5 :INTERCONNECT DEBUG: interconnect_resolve.cpp:128: ICR04 Host: ::1, RESOLVED address 2025-06-25T14:32:03.571713Z node 5 :INTERCONNECT DEBUG: interconnect_handshake.cpp:612: Handshake [5:21:2058] [node 6] ICH07 SendExBlock ExternalDataChannelParams HandshakeId: "\244\t6\332\212\373W\014)?\371g\364\271\211\202$\317\036I\371\370Oow\t\275V-\323\364p" 2025-06-25T14:32:03.571832Z node 5 :INTERCONNECT INFO: interconnect_handshake.cpp:375: Handshake [5:21:2058] [node 6] ICH04 handshake succeeded 2025-06-25T14:32:03.572112Z node 5 :INTERCONNECT INFO: interconnect_tcp_proxy.cpp:338: Proxy [5:1:2048] [node 6] ICP20 outgoing handshake succeeded 2025-06-25T14:32:03.572169Z node 5 :INTERCONNECT DEBUG: interconnect_tcp_proxy.h:460: Proxy [5:1:2048] [node 6] ICP052 dropped outgoing handshake: [5:21:2058] poison: false 2025-06-25T14:32:03.572215Z node 5 :INTERCONNECT DEBUG: interconnect_tcp_proxy.h:180: Proxy [5:1:2048] [node 6] ICP77 @350 PendingConnection -> StateWork 2025-06-25T14:32:03.572375Z node 5 :INTERCONNECT INFO: interconnect_tcp_proxy.cpp:377: Proxy [5:1:2048] [node 6] ICP22 created new session: [5:25:2048] 2025-06-25T14:32:03.572457Z node 5 :INTERCONNECT_SESSION INFO: interconnect_tcp_session.cpp:259: Session [5:25:2048] [node 6] ICS09 handshake done sender: [5:21:2058] self: [5:4016281040:0] peer: [6:2656716856:0] socket: 24 2025-06-25T14:32:03.572522Z node 5 :INTERCONNECT_SESSION INFO: interconnect_tcp_session.cpp:281: Session [5:25:2048] [node 6] ICS10 traffic start 2025-06-25T14:32:03.572608Z node 5 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_session.cpp:297: Session [5:25:2048] [node 6] ICS11 registering socket in PollerActor 2025-06-25T14:32:03.572683Z node 5 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_session.cpp:940: Session [5:25:2048] [node 6] ICS23 confirm count: 0 2025-06-25T14:32:03.572734Z node 5 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_session.cpp:336: Session [5:25:2048] [node 6] ICS06 rewind SendQueue size# 0 LastConfirmed# 0 NextSerial# 1 2025-06-25T14:32:03.572808Z node 5 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_session.cpp:940: Session [5:25:2048] [node 6] ICS23 confirm count: 0 2025-06-25T14:32:03.572872Z node 5 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_session.cpp:222: Session [5:25:2048] [node 6] ICS04 subscribe for session state for [5:19:2057] 2025-06-25T14:32:03.572988Z node 6 :INTERCONNECT DEBUG: interconnect_tcp_server.cpp:104: ICListener: [0:0:0] ICL04 Accepted from: ::1:39038 2025-06-25T14:32:03.573406Z node 6 :INTERCONNECT DEBUG: interconnect_handshake.cpp:914: Handshake [6:27:2059] [node 0] ICH02 starting incoming handshake 2025-06-25T14:32:03.578880Z node 5 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_input_session.cpp:143: InputSession [5:26:2048] [node 6] ICIS01 InputSession created 2025-06-25T14:32:03.578977Z node 5 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_input_session.cpp:219: InputSession [5:26:2048] [node 6] ICIS02 ReceiveData called 2025-06-25T14:32:03.579080Z node 5 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_input_session.cpp:704: InputSession [5:26:2048] [node 6] ICIS12 Read recvres# -11 num# 1 err# 2025-06-25T14:32:03.579479Z node 6 :INTERCONNECT INFO: interconnect_handshake.cpp:375: Handshake [6:23:2058] [node 5] ICH04 handshake succeeded 2025-06-25T14:32:03.579716Z node 6 :INTERCONNECT INFO: interconnect_tcp_proxy.cpp:334: Proxy [6:10:2048] [node 5] ICP19 incoming handshake succeeded 2025-06-25T14:32:03.579774Z node 6 :INTERCONNECT DEBUG: interconnect_tcp_proxy.h:445: Proxy [6:10:2048] [node 5] ICP111 dropped incoming handshake: [6:23:2058] poison: false 2025-06-25T14:32:03.579822Z node 6 :INTERCONNECT DEBUG: interconnect_tcp_proxy.h:180: Proxy [6:10:2048] [node 5] ICP77 @350 PendingConnection -> StateWork 2025-06-25T14:32:03.579934Z node 6 :INTERCONNECT INFO: interconnect_tcp_proxy.cpp:377: Proxy [6:10:2048] [node 5] ICP22 created new session: [6:28:2048] 2025-06-25T14:32:03.579999Z node 6 :INTERCONNECT_SESSION INFO: interconnect_tcp_session.cpp:259: Session [6:28:2048] [node 5] ICS09 handshake done sender: [6:23:2058] self: [6:2656716856:0] peer: [5:4016281040:0] socket: 25 2025-06-25T14:32:03.580047Z node 6 :INTERCONNECT_SESSION INFO: interconnect_tcp_session.cpp:281: Session [6:28:2048] [node 5] ICS10 traffic start 2025-06-25T14:32:03.580129Z node 6 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_session.cpp:297: Session [6:28:2048] [node 5] ICS11 registering socket in PollerActor 2025-06-25T14:32:03.580189Z node 6 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_session.cpp:940: Session [6:28:2048] [node 5] ICS23 confirm count: 0 2025-06-25T14:32:03.580229Z node 6 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_session.cpp:336: Session [6:28:2048] [node 5] ICS06 rewind SendQueue size# 0 LastConfirmed# 0 NextSerial# 1 2025-06-25T14:32:03.580284Z node 6 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_session.cpp:940: Session [6:28:2048] [node 5] ICS23 confirm count: 0 2025-06-25T14:32:03.580431Z node 6 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_input_session.cpp:143: InputSession [6:29:2048] [node 5] ICIS01 InputSession created 2025-06-25T14:32:03.580501Z node 6 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_input_session.cpp:219: InputSession [6:29:2048] [node 5] ICIS02 ReceiveData called 2025-06-25T14:32:03.580566Z node 6 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_input_session.cpp:704: InputSession [6:29:2048] [node 5] ICIS12 Read recvres# -11 num# 1 err# 2025-06-25T14:32:03.580608Z node 5 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_input_session.cpp:219: InputSession [5:26:2048] [node 6] ICIS02 ReceiveData called 2025-06-25T14:32:03.580638Z node 5 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_input_session.cpp:704: InputSession [5:26:2048] [node 6] ICIS12 Read recvres# -11 num# 1 err# 2025-06-25T14:32:03.580693Z node 5 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_input_session.cpp:219: InputSession [5:26:2048] [node 6] ICIS02 ReceiveData called 2025-06-25T14:32:03.580741Z node 5 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_input_session.cpp:704: InputSession [5:26:2048] [node 6] ICIS12 Read re ... proxy.cpp:234: Proxy [5:1:2048] [node 6] ICP09 (actor [5:35:2060]) from: [6:2656716856:0] for: [5:4016281040:0] 2025-06-25T14:32:03.596246Z node 5 :INTERCONNECT_SESSION INFO: interconnect_tcp_session.cpp:245: Session [5:25:2048] [node 6] ICS08 incoming handshake Self# [6:2656716856:0] Peer# [5:4016281040:0] Counter# 1 LastInputSerial# 1 2025-06-25T14:32:03.596288Z node 5 :INTERCONNECT DEBUG: interconnect_tcp_proxy.cpp:203: Proxy [5:1:2048] [node 6] ICP06 reply for incoming handshake (actor [5:35:2060]) is held 2025-06-25T14:32:03.597628Z node 5 :INTERCONNECT DEBUG: interconnect_resolve.cpp:128: ICR04 Host: ::1, RESOLVED address 2025-06-25T14:32:03.599233Z node 5 :INTERCONNECT DEBUG: interconnect_handshake.cpp:612: Handshake [5:31:2059] [node 6] ICH07 SendExBlock ExternalDataChannelParams HandshakeId: "\265#\317\321\3419\300\301\316+\260-\312v{N\212J\034\265\213K\n\020\303\311\362\317n\316R<" 2025-06-25T14:32:03.599334Z node 5 :INTERCONNECT INFO: interconnect_handshake.cpp:375: Handshake [5:31:2059] [node 6] ICH04 handshake succeeded 2025-06-25T14:32:03.600645Z node 5 :INTERCONNECT INFO: interconnect_tcp_proxy.cpp:338: Proxy [5:1:2048] [node 6] ICP20 outgoing handshake succeeded 2025-06-25T14:32:03.600708Z node 5 :INTERCONNECT DEBUG: interconnect_tcp_proxy.h:445: Proxy [5:1:2048] [node 6] ICP111 dropped incoming handshake: [5:35:2060] poison: true 2025-06-25T14:32:03.600787Z node 5 :INTERCONNECT DEBUG: interconnect_tcp_proxy.h:460: Proxy [5:1:2048] [node 6] ICP052 dropped outgoing handshake: [5:31:2059] poison: false 2025-06-25T14:32:03.600835Z node 5 :INTERCONNECT DEBUG: interconnect_tcp_proxy.h:180: Proxy [5:1:2048] [node 6] ICP77 @350 StateWork -> StateWork 2025-06-25T14:32:03.600913Z node 5 :INTERCONNECT_SESSION INFO: interconnect_tcp_session.cpp:259: Session [5:25:2048] [node 6] ICS09 handshake done sender: [5:31:2059] self: [5:4016281040:0] peer: [6:2656716856:0] socket: 28 2025-06-25T14:32:03.600981Z node 5 :INTERCONNECT_SESSION INFO: interconnect_tcp_session.cpp:281: Session [5:25:2048] [node 6] ICS10 traffic start 2025-06-25T14:32:03.601080Z node 5 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_session.cpp:297: Session [5:25:2048] [node 6] ICS11 registering socket in PollerActor 2025-06-25T14:32:03.601161Z node 5 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_session.cpp:940: Session [5:25:2048] [node 6] ICS23 confirm count: 1 2025-06-25T14:32:03.601224Z node 5 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_session.cpp:336: Session [5:25:2048] [node 6] ICS06 rewind SendQueue size# 1 LastConfirmed# 1 NextSerial# 2 2025-06-25T14:32:03.601310Z node 5 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_session.cpp:940: Session [5:25:2048] [node 6] ICS23 confirm count: 1 2025-06-25T14:32:03.601388Z node 6 :INTERCONNECT DEBUG: interconnect_tcp_server.cpp:104: ICListener: [0:0:0] ICL04 Accepted from: ::1:39056 2025-06-25T14:32:03.601870Z node 6 :INTERCONNECT DEBUG: interconnect_handshake.cpp:914: Handshake [6:38:2062] [node 0] ICH02 starting incoming handshake 2025-06-25T14:32:03.608686Z node 5 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_input_session.cpp:143: InputSession [5:37:2048] [node 6] ICIS01 InputSession created 2025-06-25T14:32:03.612748Z node 5 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_input_session.cpp:219: InputSession [5:37:2048] [node 6] ICIS02 ReceiveData called 2025-06-25T14:32:03.612885Z node 5 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_input_session.cpp:704: InputSession [5:37:2048] [node 6] ICIS12 Read recvres# -11 num# 1 err# 2025-06-25T14:32:03.613757Z node 6 :INTERCONNECT NOTICE: interconnect_tcp_proxy.cpp:408: Proxy [6:10:2048] [node 5] ICP25 outgoing handshake failed, temporary: 0 explanation: outgoing handshake Peer# ::1(::1:21181) Socket error# connection unexpectedly closed state# ReceiveResponse processed# 0 remain# 52 incoming: [6:34:2061] held: no 2025-06-25T14:32:03.613832Z node 6 :INTERCONNECT DEBUG: interconnect_tcp_proxy.h:460: Proxy [6:10:2048] [node 5] ICP052 dropped outgoing handshake: [6:30:2060] poison: false 2025-06-25T14:32:03.613900Z node 6 :INTERCONNECT DEBUG: interconnect_tcp_proxy.cpp:431: Proxy [6:10:2048] [node 5] ICP28 other handshake is still going on 2025-06-25T14:32:03.614323Z node 6 :INTERCONNECT INFO: interconnect_handshake.cpp:375: Handshake [6:34:2061] [node 5] ICH04 handshake succeeded 2025-06-25T14:32:03.614545Z node 6 :INTERCONNECT INFO: interconnect_tcp_proxy.cpp:334: Proxy [6:10:2048] [node 5] ICP19 incoming handshake succeeded 2025-06-25T14:32:03.614601Z node 6 :INTERCONNECT DEBUG: interconnect_tcp_proxy.h:445: Proxy [6:10:2048] [node 5] ICP111 dropped incoming handshake: [6:34:2061] poison: false 2025-06-25T14:32:03.614648Z node 6 :INTERCONNECT DEBUG: interconnect_tcp_proxy.h:180: Proxy [6:10:2048] [node 5] ICP77 @350 StateWork -> StateWork 2025-06-25T14:32:03.614701Z node 6 :INTERCONNECT_SESSION INFO: interconnect_tcp_session.cpp:259: Session [6:28:2048] [node 5] ICS09 handshake done sender: [6:34:2061] self: [6:2656716856:0] peer: [5:4016281040:0] socket: 30 2025-06-25T14:32:03.614753Z node 6 :INTERCONNECT_SESSION INFO: interconnect_tcp_session.cpp:281: Session [6:28:2048] [node 5] ICS10 traffic start 2025-06-25T14:32:03.614833Z node 6 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_session.cpp:297: Session [6:28:2048] [node 5] ICS11 registering socket in PollerActor 2025-06-25T14:32:03.614906Z node 6 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_session.cpp:940: Session [6:28:2048] [node 5] ICS23 confirm count: 1 2025-06-25T14:32:03.614944Z node 6 :INTERCONNECT_SESSION DEBUG: interconnect_channel.cpp:59: OutputChannel 0 [node 5] ICOCH98 Dropping confirmed messages 2025-06-25T14:32:03.615023Z node 6 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_session.cpp:991: Session [6:28:2048] [node 5] ICS24 exit InflightDataAmount: 0 bytes droppedDataAmount: 84 bytes dropped 1 packets 2025-06-25T14:32:03.615081Z node 6 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_session.cpp:336: Session [6:28:2048] [node 5] ICS06 rewind SendQueue size# 0 LastConfirmed# 1 NextSerial# 2 2025-06-25T14:32:03.615132Z node 6 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_session.cpp:940: Session [6:28:2048] [node 5] ICS23 confirm count: 1 2025-06-25T14:32:03.615218Z node 6 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_input_session.cpp:143: InputSession [6:39:2048] [node 5] ICIS01 InputSession created 2025-06-25T14:32:03.615271Z node 5 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_input_session.cpp:219: InputSession [5:37:2048] [node 6] ICIS02 ReceiveData called 2025-06-25T14:32:03.615326Z node 5 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_input_session.cpp:704: InputSession [5:37:2048] [node 6] ICIS12 Read recvres# -11 num# 1 err# 2025-06-25T14:32:03.615426Z node 6 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_input_session.cpp:219: InputSession [6:39:2048] [node 5] ICIS02 ReceiveData called 2025-06-25T14:32:03.615495Z node 6 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_input_session.cpp:704: InputSession [6:39:2048] [node 5] ICIS12 Read recvres# 106 num# 1 err# 2025-06-25T14:32:03.615572Z node 6 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_input_session.cpp:704: InputSession [6:39:2048] [node 5] ICIS12 Read recvres# -11 num# 1 err# 2025-06-25T14:32:03.615612Z node 5 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_input_session.cpp:219: InputSession [5:37:2048] [node 6] ICIS02 ReceiveData called 2025-06-25T14:32:03.615648Z node 5 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_input_session.cpp:704: InputSession [5:37:2048] [node 6] ICIS12 Read recvres# -11 num# 1 err# 2025-06-25T14:32:03.615691Z node 6 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_input_session.cpp:219: InputSession [6:39:2048] [node 5] ICIS02 ReceiveData called 2025-06-25T14:32:03.615718Z node 6 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_input_session.cpp:704: InputSession [6:39:2048] [node 5] ICIS12 Read recvres# -11 num# 1 err# 2025-06-25T14:32:03.615888Z node 5 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_session.cpp:940: Session [5:25:2048] [node 6] ICS23 confirm count: 1 2025-06-25T14:32:03.615948Z node 5 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_session.cpp:940: Session [5:25:2048] [node 6] ICS23 confirm count: 1 2025-06-25T14:32:03.616006Z node 6 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_input_session.cpp:219: InputSession [6:39:2048] [node 5] ICIS02 ReceiveData called 2025-06-25T14:32:03.616050Z node 6 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_input_session.cpp:704: InputSession [6:39:2048] [node 5] ICIS12 Read recvres# -11 num# 1 err# 2025-06-25T14:32:03.616133Z node 5 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_session.cpp:940: Session [5:25:2048] [node 6] ICS23 confirm count: 1 2025-06-25T14:32:03.616161Z node 5 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_session.cpp:940: Session [5:25:2048] [node 6] ICS23 confirm count: 1 2025-06-25T14:32:03.616228Z node 6 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_session.cpp:940: Session [6:28:2048] [node 5] ICS23 confirm count: 1 2025-06-25T14:32:03.616257Z node 6 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_session.cpp:940: Session [6:28:2048] [node 5] ICS23 confirm count: 1 2025-06-25T14:32:03.616302Z node 6 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_session.cpp:147: Session [6:28:2048] [node 5] ICS02 send event from: [6:20:2057] to: [5:19:2057] 2025-06-25T14:32:03.616403Z node 6 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_session.cpp:929: Session [6:28:2048] [node 5] ICS22 outgoing packet Serial# 2 Confirm# 2 DataSize# 84 InflightDataAmount# 84 2025-06-25T14:32:03.616483Z node 6 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_session.cpp:940: Session [6:28:2048] [node 5] ICS23 confirm count: 1 2025-06-25T14:32:03.616526Z node 6 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_session.cpp:940: Session [6:28:2048] [node 5] ICS23 confirm count: 1 2025-06-25T14:32:03.616563Z node 6 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_session.cpp:940: Session [6:28:2048] [node 5] ICS23 confirm count: 1 2025-06-25T14:32:03.616641Z node 5 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_input_session.cpp:219: InputSession [5:37:2048] [node 6] ICIS02 ReceiveData called 2025-06-25T14:32:03.616684Z node 5 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_input_session.cpp:704: InputSession [5:37:2048] [node 6] ICIS12 Read recvres# 106 num# 1 err# 2025-06-25T14:32:03.616767Z node 5 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_input_session.cpp:704: InputSession [5:37:2048] [node 6] ICIS12 Read recvres# -11 num# 1 err# 2025-06-25T14:32:03.616803Z node 5 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_session.cpp:940: Session [5:25:2048] [node 6] ICS23 confirm count: 2 2025-06-25T14:32:03.616843Z node 5 :INTERCONNECT_SESSION DEBUG: interconnect_channel.cpp:59: OutputChannel 0 [node 6] ICOCH98 Dropping confirmed messages 2025-06-25T14:32:03.616916Z node 5 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_session.cpp:991: Session [5:25:2048] [node 6] ICS24 exit InflightDataAmount: 0 bytes droppedDataAmount: 84 bytes dropped 1 packets 2025-06-25T14:32:03.616985Z node 5 :INTERCONNECT_SESSION DEBUG: interconnect_tcp_session.cpp:940: Session [5:25:2048] [node 6] ICS23 confirm count: 2 2025-06-25T14:32:03.617085Z node 5 :INTERCONNECT_SESSION INFO: interconnect_tcp_session.cpp:96: Session [5:25:2048] [node 6] ICS01 socket: 28 reason# 2025-06-25T14:32:03.617154Z node 5 :INTERCONNECT INFO: interconnect_tcp_proxy.cpp:542: Proxy [5:1:2048] [node 6] ICP30 unregister session Session# [5:25:2048] VirtualId# [5:4016281040:0] 2025-06-25T14:32:03.617211Z node 5 :INTERCONNECT DEBUG: interconnect_tcp_proxy.h:180: Proxy [5:1:2048] [node 6] ICP77 @206 StateWork -> PendingActivation 2025-06-25T14:32:03.617259Z node 5 :INTERCONNECT_SESSION INFO: interconnect_tcp_session.cpp:544: Session [5:25:2048] [node 6] ICS25 shutdown socket, reason# 2025-06-25T14:32:03.617359Z node 5 :INTERCONNECT_SESSION DEBUG: interconnect_channel.cpp:337: OutputChannel 0 [node 6] ICOCH89 Notyfying about Undelivered messages! NotYetConfirmed size: 0, Queue size: 0 >> KqpIndexes::CheckUpsertNonEquatableType+NotNull [GOOD] >> KqpIndexes::CheckUpsertNonEquatableType-NotNull |78.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/actorlib_impl/ut/unittest >> TInterconnectTest::OldNbs [GOOD] >> CommitOffset::DistributedTxCommit_ChildFirst [GOOD] >> CommitOffset::DistributedTxCommit_CheckSessionResetAfterCommit >> TopicAutoscaling::PartitionSplit_ManySession_BeforeAutoscaleAwareSDK [GOOD] >> TopicAutoscaling::PartitionSplit_ManySession_AutoscaleAwareSDK >> YdbTableSplit::SplitByLoadWithNonEmptyRangeReads ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/unittest >> TPartitionTests::The_DeletePartition_Message_Arrives_Before_The_ApproveWriteQuota_Message [GOOD] Test command err: 2025-06-25T14:31:24.002565Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:31:24.002654Z node 1 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-25T14:31:24.024088Z node 1 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [1:181:2194] 2025-06-25T14:31:24.024787Z node 1 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'Root/PQ/rt3.dc1--account--topic' partition 1 generation 0 [1:181:2194] Got cmd write: CmdWrite { Key: "i0000000001" Value: "\030\000(\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001cclient" Value: "\010\000\020\002\030\003\"\nsession-id(\0000\001@\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001uclient" Value: "\000\000\000\000\000\000\000\000\002\000\000\000\003\000\000\000session-id" StorageChannel: INLINE } Got cmd write: CmdWrite { Key: "i0000000001" Value: "\030\000(\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001cclient" Value: "\010\000\020\002\030\003\"\nsession-id(\0000\001@\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001uclient" Value: "\000\000\000\000\000\000\000\000\002\000\000\000\003\000\000\000session-id" StorageChannel: INLINE } Got cmd write: CmdWrite { Key: "i0000000001" Value: "\030\000(\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001cclient" Value: "\010\000\020\002\030\003\"\nsession-id(\0000\001@\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001uclient" Value: "\000\000\000\000\000\000\000\000\002\000\000\000\003\000\000\000session-id" StorageChannel: INLINE } Got cmd write: CmdWrite { Key: "i0000000001" Value: "\030\000(\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001cclient" Value: "\010\000\020\002\030\003\"\nsession-id(\0000\001@\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001uclient" Value: "\000\000\000\000\000\000\000\000\002\000\000\000\003\000\000\000session-id" StorageChannel: INLINE } Got cmd write: CmdWrite { Key: "i0000000001" Value: "\030\000(\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001cclient" Value: "\010\000\020\002\030\003\"\nsession-id(\0000\001@\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001uclient" Value: "\000\000\000\000\000\000\000\000\002\000\000\000\003\000\000\000session-id" StorageChannel: INLINE } Got cmd write: CmdWrite { Key: "i0000000001" Value: "\030\000(\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001cclient" Value: "\010\000\020\002\030\003\"\nsession-id(\0000\001@\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001uclient" Value: "\000\000\000\000\000\000\000\000\002\000\000\000\003\000\000\000session-id" StorageChannel: INLINE } Got cmd write: CmdWrite { Key: "i0000000001" Value: "\030\000(\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001cclient" Value: "\010\000\020\002\030\003\"\nsession-id(\0000\001@\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001uclient" Value: "\000\000\000\000\000\000\000\000\002\000\000\000\003\000\000\000session-id" StorageChannel: INLINE } Got cmd write: CmdWrite { Key: "i0000000001" Value: "\030\000(\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001cclient" Value: "\010\000\020\002\030\003\"\nsession-id(\0000\001@\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001uclient" Value: "\000\000\000\000\000\000\000\000\002\000\000\000\003\000\000\000session-id" StorageChannel: INLINE } Got cmd write: CmdWrite { Key: "i0000000001" Value: "\030\000(\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001cclient" Value: "\010\000\020\002\030\003\"\nsession-id(\0000\001@\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001uclient" Value: "\000\000\000\000\000\000\000\000\002\000\000\000\003\000\000\000session-id" StorageChannel: INLINE } Got cmd write: CmdWrite { Key: "i0000000001" Value: "\030\000(\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001cclient" Value: "\010\000\020\002\030\003\"\nsession-id(\0000\001@\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001uclient" Value: "\000\000\000\000\000\000\000\000\002\000\000\000\003\000\000\000session-id" StorageChannel: INLINE } Got cmd write: CmdWrite { Key: "i0000000001" Value: "\030\000(\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001cclient" Value: "\010\000\020\002\030\003\"\nsession-id(\0000\001@\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001uclient" Value: "\000\000\000\000\000\000\000\000\002\000\000\000\003\000\000\000session-id" StorageChannel: INLINE } Got cmd write: CmdWrite { Key: "i0000000001" Value: "\030\000(\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001cclient" Value: "\010\000\020\002\030\003\"\nsession-id(\0000\001@\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001uclient" Value: "\000\000\000\000\000\000\000\000\002\000\000\000\003\000\000\000session-id" StorageChannel: INLINE } Got cmd write: CmdWrite { Key: "i0000000001" Value: "\030\000(\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001cclient" Value: "\010\000\020\002\030\003\"\nsession-id(\0000\001@\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001uclient" Value: "\000\000\000\000\000\000\000\000\002\000\000\000\003\000\000\000session-id" StorageChannel: INLINE } Got cmd write: CmdWrite { Key: "i0000000001" Value: "\030\000(\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001cclient" Value: "\010\000\020\002\030\003\"\nsession-id(\0000\001@\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001uclient" Value: "\000\000\000\000\000\000\000\000\002\000\000\000\003\000\000\000session-id" StorageChannel: INLINE } Got cmd write: CmdWrite { Key: "i0000000001" Value: "\030\000(\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001cclient" Value: "\010\000\020\002\030\003\"\nsession-id(\0000\001@\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001uclient" Value: "\000\000\000\000\000\000\000\000\002\000\000\000\003\000\000\000session-id" StorageChannel: INLINE } Got cmd write: CmdWrite { Key: "i0000000001" Value: "\030\000(\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001cclient" Value: "\010\000\020\002\030\003\"\nsession-id(\0000\001@\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001uclient" Value: "\000\000\000\000\000\000\000\000\002\000\000\000\003\000\000\000session-id" StorageChannel: INLINE } Got cmd write: CmdWrite { Key: "i0000000001" Value: "\030\000(\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001cclient" Value: "\010\000\020\002\030\003\"\nsession-id(\0000\001@\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001uclient" Value: "\000\000\000\000\000\000\000\000\002\000\000\000\003\000\000\000session-id" StorageChannel: INLINE } Got cmd write: CmdWrite { Key: "i0000000001" Value: "\030\000(\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001cclient" Value: "\010\000\020\002\030\003\"\nsession-id(\0000\001@\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001uclient" Value: "\000\000\000\000\000\000\000\000\002\000\000\000\003\000\000\000session-id" StorageChannel: INLINE } Got cmd write: CmdWrite { Key: "i0000000001" Value: "\030\000(\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001cclient" Value: "\010\000\020\002\030\003\"\nsession-id(\0000\001@\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001uclient" Value: "\000\000\000\000\000\000\000\000\002\000\000\000\003\000\000\000session-id" StorageChannel: INLINE } Got cmd write: CmdWrite { Key: "i0000000001" Value: "\030\000(\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001cclient" Value: "\010\000\020\002\030\003\"\nsession-id(\0000\001@\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001uclient" Value: "\000\000\000\000\000\000\000\000\002\000\000\000\003\000\000\000session-id" StorageChannel: INLINE } Got cmd write: CmdWrite { Key: "i0000000001" Value: "\030\000(\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001cclient" Value: "\010\000\020\002\030\003\"\nsession-id(\0000\001@\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001uclient" Value: "\000\000\000\000\000\000\000\000\002\000\000\000\003\000\000\000session-id" StorageChannel: INLINE } Got cmd write: CmdWrite { Key: "i0000000001" Value: "\030\000(\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001cclient" Value: "\010\000\020\002\030\003\"\nsession-id(\0000\001@\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001uclient" Value: "\000\000\000\000\000\000\000\000\002\000\000\000\003\000\000\000session-id" StorageChannel: INLINE } Got cmd write: CmdWrite { Key: "i0000000001" Value: "\030\000(\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001cclient" Value: "\010\000\020\002\030\003\"\nsession-id(\0000\001@\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001uclient" Value: "\000\000\000\000\000\000\000\000\002\000\000\000\003\000\000\000session-id" StorageChannel: INLINE } Got cmd write: CmdWrite { Key: "i0000000001" Value: "\030\000(\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001cclient" Value: "\010\000\020\002\030\003\"\nsession-id(\0000\001@\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001uclient" Value: "\000\000\000\000\000\000\000\000\002\000\000\000\003\000\000\000session-id" StorageChannel: INLINE } Got cmd write: CmdWrite { Key: "i0000000001" Value: "\030\000(\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001cclient" Value: "\010\000\020\002\030\003\"\nsession-id(\0000\001@\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001uclient" Value: "\000\000\000\000\000\000\000\000\002\000\000\000\003\000\000\000session-id" StorageChannel: INLINE } Got cmd write: CmdWrite { Key: "i0000000001" Value: "\030\000(\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001cclient" Value: "\010\000\020\002\030\003\"\nsession-id(\0000\001@\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001uclient" Value: "\000\000\000\000\000\000\000\000\002\000\000\000\003\000\000\000session-id" StorageChannel: INLINE } Got cmd write: CmdWrite { Key: "i0000000001" Value: "\030\000(\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001cclient" Value: "\010\000\020\002\030\003\"\nsession-id(\0000\001@\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001uclient" Value: "\000\000\000\000\000\000\000\000\002\000\000\000\003\000\000\000session-id" StorageChannel: INLINE } Got cmd write: CmdWrite { Key: "i0000000001" Value: "\030\000(\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001cclient" Value: "\010\000\020\002\030\003\"\nsession-id(\0000\001@\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001uclient" Value: "\000\000\000\000\000\000\000\000\002\000\000\000\003\000\000\000session-id" StorageChannel: INLINE } Got cmd write: CmdWrite { Key: "i0000000001" Value: "\030\000(\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001cclient" Value: "\010\000\020\002\030\003\"\nsession-id(\0000\001@\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001uclient" Value: "\000\000\000\000\000\000\000\000\002\000\000\000\003\000\000\000session-id" StorageChannel: INLINE } Got cmd write: CmdWrite { Key: "i0000000001" Value: "\030\000(\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001cclient" Value: "\010\000\020\002\030\003\"\nsession-id(\0000\001@\000" StorageChan ... ition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-25T14:32:01.551582Z node 4 :PERSQUEUE DEBUG: partition.cpp:1401: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvGetWriteInfoResponse 2025-06-25T14:32:02.950958Z node 4 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-25T14:32:02.951151Z node 4 :PERSQUEUE DEBUG: partition.cpp:1401: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvGetWriteInfoResponse 2025-06-25T14:32:02.951365Z node 4 :PERSQUEUE DEBUG: partition.cpp:1401: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvGetWriteInfoResponse Got batch complete: 17 Wait batch completion 2025-06-25T14:32:02.951680Z node 4 :PERSQUEUE DEBUG: partition.cpp:1216: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCommit Step 1, TxId 10 Wait kv request 2025-06-25T14:32:03.224243Z node 4 :PERSQUEUE DEBUG: partition.cpp:1216: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCommit Step 1, TxId 0 2025-06-25T14:32:03.224331Z node 4 :PERSQUEUE DEBUG: partition.cpp:2502: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::CommitWriteOperations TxId: 0 2025-06-25T14:32:03.224404Z node 4 :PERSQUEUE DEBUG: partition.cpp:2528: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Head=Offset 50 PartNo 0 PackedSize 0 count 0 nextOffset 50 batches 0, NewHead=Offset 50 PartNo 0 PackedSize 0 count 0 nextOffset 50 batches 0 2025-06-25T14:32:03.224514Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::DropOwner. 2025-06-25T14:32:03.224553Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::DropOwner. 2025-06-25T14:32:03.224651Z node 4 :PERSQUEUE DEBUG: partition.cpp:2502: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::CommitWriteOperations TxId: (empty maybe) 2025-06-25T14:32:03.224706Z node 4 :PERSQUEUE DEBUG: partition.cpp:2528: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Head=Offset 50 PartNo 0 PackedSize 0 count 0 nextOffset 50 batches 0, NewHead=Offset 50 PartNo 0 PackedSize 0 count 0 nextOffset 50 batches 0 2025-06-25T14:32:03.224989Z node 4 :PERSQUEUE DEBUG: partition.cpp:3403: [PQ: 72057594037927937, Partition: 0, State: StateIdle] schedule TEvPersQueue::TEvProposeTransactionResult(COMPLETE), reason= 2025-06-25T14:32:03.225069Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:1257: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Already written message. Topic: 'rt3.dc1--account--topic' Partition: 0 SourceId: 'src4'. Message seqNo: 7. Committed seqNo: (NULL). Writing seqNo: 7. EndOffset: 50. CurOffset: 50. Offset: 50 2025-06-25T14:32:03.225185Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:1364: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'rt3.dc1--account--topic' partition 0 part blob processing sourceId 'src4' seqNo 8 partNo 0 2025-06-25T14:32:03.226032Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:1468: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'rt3.dc1--account--topic' partition 0 part blob complete sourceId 'src4' seqNo 8 partNo 0 FormedBlobsCount 0 NewHead: Offset 51 PartNo 0 PackedSize 84 count 1 nextOffset 52 batches 1 2025-06-25T14:32:03.226129Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:1364: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'rt3.dc1--account--topic' partition 0 part blob processing sourceId 'src4' seqNo 9 partNo 0 2025-06-25T14:32:03.226181Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:1468: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'rt3.dc1--account--topic' partition 0 part blob complete sourceId 'src4' seqNo 9 partNo 0 FormedBlobsCount 0 NewHead: Offset 51 PartNo 0 PackedSize 136 count 2 nextOffset 53 batches 1 2025-06-25T14:32:03.226214Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:1364: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'rt3.dc1--account--topic' partition 0 part blob processing sourceId 'src4' seqNo 10 partNo 0 2025-06-25T14:32:03.226249Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:1468: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'rt3.dc1--account--topic' partition 0 part blob complete sourceId 'src4' seqNo 10 partNo 0 FormedBlobsCount 0 NewHead: Offset 51 PartNo 0 PackedSize 188 count 3 nextOffset 54 batches 1 2025-06-25T14:32:03.226284Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:1364: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'rt3.dc1--account--topic' partition 0 part blob processing sourceId 'src4' seqNo 11 partNo 0 2025-06-25T14:32:03.226315Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:1468: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'rt3.dc1--account--topic' partition 0 part blob complete sourceId 'src4' seqNo 11 partNo 0 FormedBlobsCount 0 NewHead: Offset 51 PartNo 0 PackedSize 240 count 4 nextOffset 55 batches 1 2025-06-25T14:32:03.227413Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:1364: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'rt3.dc1--account--topic' partition 0 part blob processing sourceId 'src4' seqNo 12 partNo 0 2025-06-25T14:32:03.227475Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:1468: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'rt3.dc1--account--topic' partition 0 part blob complete sourceId 'src4' seqNo 12 partNo 0 FormedBlobsCount 0 NewHead: Offset 51 PartNo 0 PackedSize 292 count 5 nextOffset 56 batches 1 2025-06-25T14:32:03.227524Z node 4 :PERSQUEUE DEBUG: partition.cpp:2502: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::CommitWriteOperations TxId: (empty maybe) 2025-06-25T14:32:03.227569Z node 4 :PERSQUEUE DEBUG: partition.cpp:2528: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Head=Offset 50 PartNo 0 PackedSize 0 count 0 nextOffset 50 batches 0, NewHead=Offset 51 PartNo 0 PackedSize 292 count 5 nextOffset 56 batches 1 2025-06-25T14:32:03.227605Z node 4 :PERSQUEUE DEBUG: partition.cpp:3403: [PQ: 72057594037927937, Partition: 0, State: StateIdle] schedule TEvPersQueue::TEvProposeTransactionResult(COMPLETE), reason= 2025-06-25T14:32:03.227654Z node 4 :PERSQUEUE DEBUG: partition.cpp:2502: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::CommitWriteOperations TxId: 10 2025-06-25T14:32:03.227932Z node 4 :PERSQUEUE DEBUG: partition.cpp:2528: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Head=Offset 50 PartNo 0 PackedSize 0 count 0 nextOffset 50 batches 0, NewHead=Offset 51 PartNo 0 PackedSize 292 count 5 nextOffset 56 batches 1 2025-06-25T14:32:03.228481Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:1762: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Add new write blob: topic 'rt3.dc1--account--topic' partition 0 compactOffset 51,5 HeadOffset 50 endOffset 50 curOffset 56 d0000000000_00000000000000000051_00000_0000000005_00000? size 189 WTime 21151 Got KV request Got KV request Got KV request Got KV request Got KV request Got KV request Got KV request Got KV request Wait tx committed for tx 0 2025-06-25T14:32:03.249803Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 85 WriteNewSizeFromSupportivePartitions# 4 2025-06-25T14:32:03.249910Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-25T14:32:03.249986Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Answering for message sourceid: 'src4', Topic: 'rt3.dc1--account--topic', Partition: 0, SeqNo: 7, partNo: 0, Offset: 50 is already written 2025-06-25T14:32:03.250029Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-25T14:32:03.250062Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Answering for message sourceid: 'src4', Topic: 'rt3.dc1--account--topic', Partition: 0, SeqNo: 8, partNo: 0, Offset: 50 is already written 2025-06-25T14:32:03.250086Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-25T14:32:03.250115Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Answering for message sourceid: 'src4', Topic: 'rt3.dc1--account--topic', Partition: 0, SeqNo: 9, partNo: 0, Offset: 50 is already written 2025-06-25T14:32:03.250140Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-25T14:32:03.250172Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Answering for message sourceid: 'src4', Topic: 'rt3.dc1--account--topic', Partition: 0, SeqNo: 10, partNo: 0, Offset: 50 is already written 2025-06-25T14:32:03.250196Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-25T14:32:03.250228Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Answering for message sourceid: 'src4', Topic: 'rt3.dc1--account--topic', Partition: 0, SeqNo: 11, partNo: 0, Offset: 50 is already written 2025-06-25T14:32:03.250254Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-25T14:32:03.250287Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Answering for message sourceid: 'src4', Topic: 'rt3.dc1--account--topic', Partition: 0, SeqNo: 12, partNo: 0, Offset: 50 is already written 2025-06-25T14:32:03.250697Z node 4 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72057594037927937, Partition: 0, State: StateIdle] need more data for compaction. cumulativeSize=189, count=1, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 Wait immediate tx complete 3 Got propose resutl: Origin: 72057594037927937 Status: COMPLETE TxId: 3 Wait immediate tx complete 6 Got propose resutl: Origin: 72057594037927937 Status: COMPLETE TxId: 6 Wait tx committed for tx 10 2025-06-25T14:32:04.038344Z node 5 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:32:04.038422Z node 5 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-25T14:32:04.066301Z node 5 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: {1, {2, 3}, 4}, State: StateInit] bootstrapping {1, {2, 3}, 4} [5:183:2196] 2025-06-25T14:32:04.067251Z node 5 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: {1, {2, 3}, 4}, State: StateInit] init complete for topic 'Root/PQ/rt3.dc1--account--topic' partition {1, {2, 3}, 4} generation 0 [5:183:2196] Got cmd write: CmdDeleteRange { Range { From: "M0000000004" IncludeFrom: true To: "M0000000005" IncludeTo: false } } CmdDeleteRange { Range { From: "D0000000004" IncludeFrom: true To: "D0000000005" IncludeTo: false } } CmdDeleteRange { Range { From: "X0000000004" IncludeFrom: true To: "X0000000005" IncludeTo: false } } CmdDeleteRange { Range { From: "J0000000004" IncludeFrom: true To: "J0000000005" IncludeTo: false } } CmdDeleteRange { Range { From: "K0000000004" IncludeFrom: true To: "K0000000005" IncludeTo: false } } ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_sequence/unittest >> TSequence::AlterTableSetDefaultFromSequence [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:31:51.620639Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:31:51.620731Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:31:51.620774Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:31:51.620817Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:31:51.620865Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:31:51.620888Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:31:51.620957Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:31:51.621021Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:31:51.621708Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:31:51.621986Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:31:51.752922Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:31:51.752996Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:31:51.778390Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:31:51.778604Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:31:51.778780Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:31:51.797804Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:31:51.798096Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:31:51.798793Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:31:51.799024Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:31:51.802341Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:31:51.802554Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:31:51.803699Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:31:51.803770Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:31:51.803942Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:31:51.804006Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:31:51.804064Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:31:51.804167Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:31:51.825456Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:243:2058] recipient: [1:15:2062] 2025-06-25T14:31:52.233880Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:31:52.234109Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:31:52.234328Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:31:52.234405Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:31:52.239694Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:31:52.239816Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:31:52.247281Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:31:52.247544Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:31:52.247810Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:31:52.247883Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:31:52.248034Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:31:52.248108Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:31:52.262465Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:31:52.262552Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:31:52.262603Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:31:52.270429Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:31:52.270504Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:31:52.270571Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:31:52.270656Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:31:52.285197Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:31:52.287728Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:31:52.287927Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:31:52.296774Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:31:52.297002Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:31:52.297061Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:31:52.297410Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:31:52.297475Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:31:52.297663Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:31:52.297765Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:31:52.305553Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:31:52.305618Z node 1 :FLAT_TX_SCHEMESHARD ... 409549 Status: COMPLETE TxId: 114 Step: 5000014 OrderId: 114 ExecLatency: 0 ProposeLatency: 3 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409549 CpuTimeUsec: 1273 } } CommitVersion { Step: 5000014 TxId: 114 } 2025-06-25T14:32:04.734397Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-06-25T14:32:04.735193Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877761, Sender [7:1051:2986], Recipient [7:134:2156]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:32:04.735232Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5052: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T14:32:04.735271Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5837: Pipe server connected, at tablet: 72057594046678944 2025-06-25T14:32:04.735492Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269551620, Sender [7:989:2932], Recipient [7:134:2156]: NKikimrTxDataShard.TEvSchemaChanged Source { RawX1: 989 RawX2: 30064774004 } Origin: 72075186233409549 State: 2 TxId: 114 Step: 0 Generation: 2 2025-06-25T14:32:04.735530Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4987: StateWork, processing event TEvDataShard::TEvSchemaChanged 2025-06-25T14:32:04.735636Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5596: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 989 RawX2: 30064774004 } Origin: 72075186233409549 State: 2 TxId: 114 Step: 0 Generation: 2 2025-06-25T14:32:04.735691Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1791: TOperation FindRelatedPartByTabletId, TxId: 114, tablet: 72075186233409549, partId: 0 2025-06-25T14:32:04.735832Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:632: TTxOperationReply execute, operationId: 114:0, at schemeshard: 72057594046678944, message: Source { RawX1: 989 RawX2: 30064774004 } Origin: 72075186233409549 State: 2 TxId: 114 Step: 0 Generation: 2 2025-06-25T14:32:04.735900Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1047: NTableState::TProposedWaitParts operationId# 114:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 2025-06-25T14:32:04.736037Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1051: NTableState::TProposedWaitParts operationId# 114:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 989 RawX2: 30064774004 } Origin: 72075186233409549 State: 2 TxId: 114 Step: 0 Generation: 2 2025-06-25T14:32:04.736116Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:670: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 114:0, shardIdx: 72057594046678944:4, shard: 72075186233409549, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-25T14:32:04.736172Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:707: all shard schema changes has been received, operationId: 114:0, at schemeshard: 72057594046678944 2025-06-25T14:32:04.736235Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:719: send schema changes ack message, operation: 114:0, datashard: 72075186233409549, at schemeshard: 72057594046678944 2025-06-25T14:32:04.736283Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 114:0 129 -> 240 2025-06-25T14:32:04.736524Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-06-25T14:32:04.738999Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-25T14:32:04.739599Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 114 2025-06-25T14:32:04.739651Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-25T14:32:04.745650Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 114 2025-06-25T14:32:04.745711Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-25T14:32:04.745838Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 114:0, at schemeshard: 72057594046678944 2025-06-25T14:32:04.745893Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-25T14:32:04.746047Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 114:0, at schemeshard: 72057594046678944 2025-06-25T14:32:04.746106Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-25T14:32:04.746149Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:276: Activate send for 114:0 2025-06-25T14:32:04.746284Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:632: Send to actor: [7:989:2932] msg type: 269552132 msg: NKikimrTxDataShard.TEvSchemaChangedResult TxId: 114 at schemeshard: 72057594046678944 2025-06-25T14:32:04.746723Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 2146435072, Sender [7:134:2156], Recipient [7:134:2156]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-25T14:32:04.746770Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4972: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-25T14:32:04.746821Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 114:0, at schemeshard: 72057594046678944 2025-06-25T14:32:04.746863Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 114:0 ProgressState 2025-06-25T14:32:04.746993Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-06-25T14:32:04.747038Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#114:0 progress is 1/1 2025-06-25T14:32:04.747086Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 114 ready parts: 1/1 2025-06-25T14:32:04.747138Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#114:0 progress is 1/1 2025-06-25T14:32:04.747185Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 114 ready parts: 1/1 2025-06-25T14:32:04.747240Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 114, ready parts: 1/1, is published: true 2025-06-25T14:32:04.747312Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1656: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [7:389:2355] message: TxId: 114 2025-06-25T14:32:04.747370Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 114 ready parts: 1/1 2025-06-25T14:32:04.747436Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 114:0 2025-06-25T14:32:04.747478Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 114:0 2025-06-25T14:32:04.747614Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 6] was 3 2025-06-25T14:32:04.753039Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-25T14:32:04.753199Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:632: Send to actor: [7:389:2355] msg type: 271124998 msg: NKikimrScheme.TEvNotifyTxCompletionResult TxId: 114 at schemeshard: 72057594046678944 2025-06-25T14:32:04.753423Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 114: got EvNotifyTxCompletionResult 2025-06-25T14:32:04.753467Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 114: satisfy waiter [7:1019:2954] 2025-06-25T14:32:04.753693Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877764, Sender [7:1021:2956], Recipient [7:134:2156]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-25T14:32:04.753730Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5053: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-25T14:32:04.753755Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5885: Server pipe is reset, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 114 TestModificationResults wait txId: 115 2025-06-25T14:32:04.754820Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271122432, Sender [7:1060:2995], Recipient [7:134:2156]: {TEvModifySchemeTransaction txid# 115 TabletId# 72057594046678944} 2025-06-25T14:32:04.754890Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4966: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-06-25T14:32:04.761740Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpAlterTable AlterTable { Name: "Table3" Columns { Name: "value" DefaultFromSequence: "/MyRoot/seq1" } } } TxId: 115 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:32:04.762028Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_table.cpp:506: TAlterTable Propose, path: /MyRoot/Table3, pathId: , opId: 115:0, at schemeshard: 72057594046678944 2025-06-25T14:32:04.762460Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 115:1, propose status:StatusInvalidParameter, reason: Column 'value' is of type Bool but default expression is of type Int64, at schemeshard: 72057594046678944 2025-06-25T14:32:04.765770Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-06-25T14:32:04.768496Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 115, response: Status: StatusInvalidParameter Reason: "Column \'value\' is of type Bool but default expression is of type Int64" TxId: 115 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:32:04.768825Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 115, database: /MyRoot, subject: , status: StatusInvalidParameter, reason: Column 'value' is of type Bool but default expression is of type Int64, operation: ALTER TABLE, path: /MyRoot/Table3 2025-06-25T14:32:04.768885Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046678944 TestModificationResult got TxId: 115, wait until txId: 115 >> YdbTableSplit::SplitByLoadWithUpdates >> KqpIndexes::SecondaryIndexOrderBy2 [GOOD] >> KqpIndexes::SecondaryIndexReplace+UseSink >> YdbTableSplit::MergeByNoLoadAfterSplit |78.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/scheme_board/ut_replica/ydb-core-tx-scheme_board-ut_replica |78.2%| [LD] {RESULT} $(B)/ydb/core/tx/scheme_board/ut_replica/ydb-core-tx-scheme_board-ut_replica |78.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/scheme_board/ut_replica/ydb-core-tx-scheme_board-ut_replica ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/unittest >> TPartitionTests::AfterRestart_2 [GOOD] Test command err: 2025-06-25T14:31:23.354281Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519894531217979051:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:31:23.354317Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:31:23.391944Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519894531205550930:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:31:23.392034Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:31:23.607335Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000b61/r3tmp/tmphMWZPi/pdisk_1.dat 2025-06-25T14:31:23.643038Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-25T14:31:23.879162Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:31:23.879240Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:31:23.886937Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:31:23.888425Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 11483, node 1 2025-06-25T14:31:23.983276Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/yft8/000b61/r3tmp/yandexXR4U1a.tmp 2025-06-25T14:31:23.983317Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/yft8/000b61/r3tmp/yandexXR4U1a.tmp 2025-06-25T14:31:23.983546Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/yft8/000b61/r3tmp/yandexXR4U1a.tmp 2025-06-25T14:31:23.983711Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:31:23.989970Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:31:23.990070Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:31:23.992886Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T14:31:23.993748Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:31:24.041550Z INFO: TTestServer started on Port 19515 GrpcPort 11483 TClient is connected to server localhost:19515 PQClient connected to localhost:11483 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:31:24.320194Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-25T14:31:24.364556Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:31:24.371665Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:31:24.413601Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... waiting... 2025-06-25T14:31:26.372951Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894544102881989:2302], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:31:26.372951Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894544102881981:2299], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:31:26.373148Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:31:26.376845Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:31:26.396657Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519894544102881996:2304], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710662 completed, doublechecking } 2025-06-25T14:31:26.703757Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519894544102882083:2747] txid# 281474976710663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:31:26.726701Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519894544102882096:2310], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:31:26.726931Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=NmZkYzIyZTgtYWJmODhmZmQtY2MzZDkwZDYtNzc1YTMwNjY=, ActorId: [1:7519894544102881979:2298], ActorState: ExecuteState, TraceId: 01jykr0cx3c50hexdqk2db04p3, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:31:26.727488Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:31:26.728956Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-25T14:31:26.729529Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7519894544090453189:2279], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:31:26.729745Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=2&id=OWY5MzViZjMtZjBlZjQxMzItZjhkYWE1MmUtYWE2OTVkOWM=, ActorId: [2:7519894544090453150:2273], ActorState: ExecuteState, TraceId: 01jykr0cywbfgm052mx20e586k, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:31:26.730087Z node 2 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-25T14:31:26.803094Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:31:26.882656Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster ... ce_5" 2025-06-25T14:31:56.940940Z node 3 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_impl__sm_chooser_actor.h:139: StateOwnershipFast, received event# 271188558, Sender [3:7519894645068182507:3317], Recipient [3:7519894670837987365:3931]: NKikimrPQ.TEvCheckPartitionStatusResponse Status: Active 2025-06-25T14:31:56.940970Z node 3 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_impl__abstract_chooser_actor.h:88: TPartitionChooser [3:7519894670837987365:3931] (SourceId=A_Source_5, PreferedPartition=(NULL)) InitTable: SourceId=A_Source_5 TopicsAreFirstClassCitizen=1 UseSrcIdMetaMappingInFirstClass=1 2025-06-25T14:31:56.941038Z node 3 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_ut.cpp:382: StateMockWork, received event# 65543, Sender [3:7519894670837987365:3931], Recipient [3:7519894645068182507:3317]: NActors::TEvents::TEvPoison 2025-06-25T14:31:56.941621Z node 3 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_impl__abstract_chooser_actor.h:101: StateInitTable, received event# 277020685, Sender [3:7519894597823540145:2069], Recipient [3:7519894670837987365:3931]: NKikimr::NMetadata::NProvider::TEvManagerPrepared 2025-06-25T14:31:56.941649Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:111: TPartitionChooser [3:7519894670837987365:3931] (SourceId=A_Source_5, PreferedPartition=(NULL)) StartKqpSession 2025-06-25T14:31:56.944915Z node 3 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_impl__abstract_chooser_actor.h:132: StateCreateKqpSession, received event# 271646728, Sender [3:7519894597823540167:2085], Recipient [3:7519894670837987365:3931]: NKikimrKqp.TEvCreateSessionResponse Error: "" Response { SessionId: "ydb://session/3?node_id=3&id=NjU3YTdlOGUtMTVhMjMyZDQtOGJlZWUzMjktODE2MmUyN2U=" NodeId: 3 } YdbStatus: SUCCESS ResourceExhausted: false 2025-06-25T14:31:56.944947Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:142: TPartitionChooser [3:7519894670837987365:3931] (SourceId=A_Source_5, PreferedPartition=(NULL)) Select from the table 2025-06-25T14:31:57.429229Z node 3 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_impl__abstract_chooser_actor.h:163: StateSelect, received event# 271646721, Sender [3:7519894597823540167:2085], Recipient [3:7519894670837987365:3931]: NKikimrKqp.TEvQueryResponse Response { SessionId: "ydb://session/3?node_id=3&id=NjU3YTdlOGUtMTVhMjMyZDQtOGJlZWUzMjktODE2MmUyN2U=" PreparedQuery: "f87a249c-83a2d799-26dac4f9-52fae783" QueryParameters { Name: "$Hash" Type { Kind: Data Data { Scheme: 4 } } } QueryParameters { Name: "$Topic" Type { Kind: Data Data { Scheme: 4608 } } } QueryParameters { Name: "$SourceId" Type { Kind: Data Data { Scheme: 4608 } } } TxMeta { id: "01jykr1b551934khatygesm42v" } YdbResults { columns { name: "Partition" type { optional_type { item { type_id: UINT32 } } } } columns { name: "CreateTime" type { optional_type { item { type_id: UINT64 } } } } columns { name: "AccessTime" type { optional_type { item { type_id: UINT64 } } } } columns { name: "SeqNo" type { optional_type { item { type_id: UINT64 } } } } rows { items { uint32_value: 0 } items { uint64_value: 1750861916736 } items { uint64_value: 1750861916736 } items { uint64_value: 13 } } } QueryDiagnostics: "" } YdbStatus: SUCCESS ConsumedRu: 262 2025-06-25T14:31:57.429401Z node 3 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_impl__abstract_chooser_actor.h:151: TPartitionChooser [3:7519894670837987365:3931] (SourceId=A_Source_5, PreferedPartition=(NULL)) Selected from table PartitionId=0 SeqNo=13 2025-06-25T14:31:57.429422Z node 3 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_impl__sm_chooser_actor.h:209: TPartitionChooser [3:7519894670837987365:3931] (SourceId=A_Source_5, PreferedPartition=(NULL)) OnPartitionChosen 2025-06-25T14:31:57.429550Z node 3 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_ut.cpp:382: StateMockWork, received event# 269877760, Sender [3:7519894675132954734:3931], Recipient [3:7519894645068182507:3317]: NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 1001 Status: OK ServerId: [3:7519894670837987365:3931] Leader: 1 Dead: 0 Generation: 1 VersionInfo: } 2025-06-25T14:31:57.429604Z node 3 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_ut.cpp:382: StateMockWork, received event# 271188557, Sender [3:7519894670837987365:3931], Recipient [3:7519894645068182507:3317]: NKikimrPQ.TEvCheckPartitionStatusRequest Partition: 1 2025-06-25T14:31:57.429675Z node 3 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_impl__abstract_chooser_actor.h:240: StateCheckPartition, received event# 271188558, Sender [3:7519894645068182507:3317], Recipient [3:7519894670837987365:3931]: NKikimrPQ.TEvCheckPartitionStatusResponse Status: Active 2025-06-25T14:31:57.429703Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:174: TPartitionChooser [3:7519894670837987365:3931] (SourceId=A_Source_5, PreferedPartition=(NULL)) Update the table 2025-06-25T14:31:57.430103Z node 3 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_ut.cpp:382: StateMockWork, received event# 65543, Sender [3:7519894670837987365:3931], Recipient [3:7519894645068182507:3317]: NActors::TEvents::TEvPoison Received TEvChooseResult: 1 Run query: --!syntax_v1 SELECT Partition, SeqNo FROM `//Root/.metadata/TopicPartitionsMapping` WHERE Hash = 11131928866524144434 AND Topic = "Root" AND ProducerId = "00415F536F757263655F35" 2025-06-25T14:31:57.543192Z node 3 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_impl__abstract_chooser_actor.h:212: StateUpdate, received event# 271646721, Sender [3:7519894597823540167:2085], Recipient [3:7519894670837987365:3931]: NKikimrKqp.TEvQueryResponse Response { SessionId: "ydb://session/3?node_id=3&id=NjU3YTdlOGUtMTVhMjMyZDQtOGJlZWUzMjktODE2MmUyN2U=" PreparedQuery: "e62f64cd-25ac2692-6a8b8e51-3dd353dd" QueryParameters { Name: "$AccessTime" Type { Kind: Data Data { Scheme: 4 } } } QueryParameters { Name: "$CreateTime" Type { Kind: Data Data { Scheme: 4 } } } QueryParameters { Name: "$Hash" Type { Kind: Data Data { Scheme: 4 } } } QueryParameters { Name: "$Partition" Type { Kind: Data Data { Scheme: 2 } } } QueryParameters { Name: "$SourceId" Type { Kind: Data Data { Scheme: 4608 } } } QueryParameters { Name: "$SeqNo" Type { Kind: Data Data { Scheme: 4 } } } QueryParameters { Name: "$Topic" Type { Kind: Data Data { Scheme: 4608 } } } TxMeta { } QueryDiagnostics: "" } YdbStatus: SUCCESS ConsumedRu: 60 2025-06-25T14:31:57.543256Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:183: TPartitionChooser [3:7519894670837987365:3931] (SourceId=A_Source_5, PreferedPartition=(NULL)) HandleUpdate PartitionPersisted=0 Status=SUCCESS 2025-06-25T14:31:57.543297Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:305: TPartitionChooser [3:7519894670837987365:3931] (SourceId=A_Source_5, PreferedPartition=(NULL)) ReplyResult: Partition=1, SeqNo=13 2025-06-25T14:31:57.543331Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:268: TPartitionChooser [3:7519894670837987365:3931] (SourceId=A_Source_5, PreferedPartition=(NULL)) Start idle 2025-06-25T14:31:57.801592Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710717. Ctx: { TraceId: 01jykr1bd6eb1g8qk5qfx0xqwc, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=ZGFlOTFhZC1hZGY2OGI4Yy05OWI1MWIxMi1hMGMyNWMwNA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:31:58.806644Z node 3 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1985: ActorId: [3:7519894679427922166:2699] TxId: 281474976710719. Ctx: { TraceId: 01jykr1caaeqw61fc3rp5fp60d, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=Y2JkZTFmZDctY2JmY2U4NjMtOWIyOTdjMTAtOWVkY2FlZWM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. UNAVAILABLE: Failed to send EvStartKqpTasksRequest because node is unavailable: 4 2025-06-25T14:31:58.806801Z node 3 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [3:7519894679427922172:2699], TxId: 281474976710719, task: 2. Ctx: { SessionId : ydb://session/3?node_id=3&id=Y2JkZTFmZDctY2JmY2U4NjMtOWIyOTdjMTAtOWVkY2FlZWM=. TraceId : 01jykr1caaeqw61fc3rp5fp60d. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Handle abort execution event from: [3:7519894679427922166:2699], status: UNAVAILABLE, reason: {
: Error: Terminate execution } 2025-06-25T14:31:59.810243Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=3&id=Y2JkZTFmZDctY2JmY2U4NjMtOWIyOTdjMTAtOWVkY2FlZWM=, ActorId: [3:7519894679427922141:2699], ActorState: ExecuteState, TraceId: 01jykr1caaeqw61fc3rp5fp60d, Create QueryResponse for error on request, msg: 2025-06-25T14:31:59.848654Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=request_actor_cb.h:34;event=unexpected reply;response=operation { ready: true status: UNAVAILABLE issues { message: "Kikimr cluster or one of its subsystems was unavailable." issue_code: 2005 severity: 1 issues { message: "Failed to send EvStartKqpTasksRequest because node is unavailable: 4" severity: 1 } } result { [type.googleapis.com/Ydb.Table.ExecuteQueryResult] { tx_meta { id: "01jykr1chz8ybvaj397d8ayexz" } } } } ; 2025-06-25T14:31:59.849164Z node 3 :METADATA_PROVIDER ERROR: log.h:466: accessor_snapshot_base.cpp:16 :cannot construct snapshot: on request failed:
: Error: Kikimr cluster or one of its subsystems was unavailable., code: 2005
: Error: Failed to send EvStartKqpTasksRequest because node is unavailable: 4 2025-06-25T14:32:02.888937Z node 5 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:32:02.889060Z node 5 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-25T14:32:02.935237Z node 5 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 3, State: StateInit] bootstrapping 3 [5:183:2196] 2025-06-25T14:32:02.938408Z node 5 :PERSQUEUE INFO: partition_init.cpp:911: [Root/PQ/rt3.dc1--account--topic:3:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp from keys completed. Value 2025-06-25T14:32:02.000000Z 2025-06-25T14:32:02.938528Z node 5 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 3, State: StateInit] init complete for topic 'Root/PQ/rt3.dc1--account--topic' partition 3 generation 0 [5:183:2196] Got cmd write: CmdWrite { Key: "i0000000003" Value: "\030\000(\320\375\202\274\3722" StorageChannel: INLINE } CmdWrite { Key: "I0000000003" Value: "\010\271`\020\316\255\001" StorageChannel: INLINE } CmdWrite { Key: "m0000000003cclient" Value: "\010\004\020\000\030\000\"\007session(\0000\000@\001" StorageChannel: INLINE } CmdWrite { Key: "m0000000003uclient" Value: "\004\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000session" StorageChannel: INLINE } 2025-06-25T14:32:04.386067Z node 6 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:32:04.386180Z node 6 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-25T14:32:04.467643Z node 6 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 3, State: StateInit] bootstrapping 3 [6:181:2194] 2025-06-25T14:32:04.471864Z node 6 :PERSQUEUE INFO: partition_init.cpp:911: [Root/PQ/rt3.dc1--account--topic:3:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp from keys completed. Value 2025-06-25T14:32:04.000000Z 2025-06-25T14:32:04.471980Z node 6 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 3, State: StateInit] init complete for topic 'Root/PQ/rt3.dc1--account--topic' partition 3 generation 0 [6:181:2194] >> TKqpScanData::ArrowToUnboxedValueConverter >> TKqpScanData::EmptyColumnsAndNonEmptyArrowBatch >> YdbTableSplit::SplitByLoadWithDeletes >> TopicAutoscaling::BalancingAfterSplit_sessionsWithPartition [GOOD] >> TPersQueueMirrorer::ValidStartStream >> TKqpScanData::EmptyColumns >> DataShardVolatile::DistributedWriteLaterSnapshotBlockedThenAbort [GOOD] >> DataShardVolatile::DistributedWriteAsymmetricExecute >> TKqpScanData::ArrowToUnboxedValueConverter [GOOD] >> TKqpScanData::EmptyColumnsAndNonEmptyArrowBatch [GOOD] >> TKqpScanData::EmptyColumns [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_change_exchange/unittest >> Cdc::InitialScanAndResolvedTimestamps [GOOD] Test command err: 2025-06-25T14:28:18.717605Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519893733706331276:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:28:18.717650Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0012f7/r3tmp/tmpyOzmDQ/pdisk_1.dat 2025-06-25T14:28:19.235245Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:28:19.256693Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:28:19.256791Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:28:19.258499Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 4326, node 1 2025-06-25T14:28:19.296996Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:28:19.297023Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:28:19.297030Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:28:19.297144Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:28:19.360674Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:28:19.389776Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:19.425679Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:7519893738001299135:2268] 2025-06-25T14:28:19.425916Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T14:28:19.450119Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T14:28:19.450196Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T14:28:19.452421Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-25T14:28:19.452510Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-25T14:28:19.452546Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-25T14:28:19.452949Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T14:28:19.453047Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T14:28:19.453094Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:7519893738001299149:2268] in generation 1 2025-06-25T14:28:19.454456Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T14:28:19.508676Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-25T14:28:19.508821Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T14:28:19.508881Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:7519893738001299153:2269] 2025-06-25T14:28:19.508904Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T14:28:19.508914Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-25T14:28:19.508939Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:28:19.509084Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-25T14:28:19.509143Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-25T14:28:19.509158Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:28:19.509169Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:28:19.509182Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-25T14:28:19.509194Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:28:19.510340Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:7519893738001299132:2299], serverId# [1:7519893738001299152:2308], sessionId# [0:0:0] 2025-06-25T14:28:19.510436Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T14:28:19.510665Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-25T14:28:19.510744Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-25T14:28:19.512710Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T14:28:19.512925Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-25T14:28:19.512981Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-25T14:28:19.514842Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:7519893738001299167:2316], serverId# [1:7519893738001299168:2317], sessionId# [0:0:0] 2025-06-25T14:28:19.519383Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1750861699559 at tablet 72075186224037888 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1750861699559 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-06-25T14:28:19.519408Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:28:19.519530Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T14:28:19.519584Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:28:19.519596Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-06-25T14:28:19.519616Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1750861699559:281474976715657] in PlanQueue unit at 72075186224037888 2025-06-25T14:28:19.519867Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1750861699559:281474976715657 keys extracted: 0 2025-06-25T14:28:19.520054Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-25T14:28:19.520156Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:28:19.520194Z node 1 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-06-25T14:28:19.522846Z node 1 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-25T14:28:19.523251Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:28:19.525677Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-06-25T14:28:19.525702Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:28:19.526142Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1750861699566 2025-06-25T14:28:19.526194Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1750861699559} 2025-06-25T14:28:19.526228Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:28:19.526260Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:28:19.526279Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T14:28:19.526292Z node 1 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-25T14:28:19.526356Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1750861699559 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [1:7519893738001298879:2151], exec latency: 3 ms, propose latency: 6 ms 2025-06-25T14:28:19.526383Z node 1 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-25T14:28:19.526410Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:28:19.527838Z node 1 :CHANGE_EXCHANGE DEBUG: change_sender.cpp:153: [ChangeSender][72075186224037888:1][1:7519893738001299153:2269][Inactive] Handle NKikimrChangeExchange.TEvActivateSender 2025-06-25T14:28:19.533910Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-25T14:28:19.533963Z node 1 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-25T14:28:19.569271Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, Loc ... pp:42: [CdcStreamHeartbeat] Emit change records: edge# v7500/18446744073709551615, at tablet# 72075186224037888 2025-06-25T14:32:02.634569Z node 29 :TX_DATASHARD DEBUG: datashard.cpp:874: PersistChangeRecord: record: { Order: 4 Group: 0 Step: 6000 TxId: 0 PathId: [OwnerId: 72057594046644480, LocalPathId: 7] Kind: CdcHeartbeat Source: Unspecified Body: 0b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 0 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037888 2025-06-25T14:32:02.635447Z node 29 :TX_DATASHARD INFO: cdc_stream_heartbeat.cpp:42: [CdcStreamHeartbeat] Emit change records: edge# v7500/18446744073709551615, at tablet# 72075186224037888 2025-06-25T14:32:02.638719Z node 29 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715662 datashard 72075186224037888 state Ready 2025-06-25T14:32:02.638869Z node 29 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-25T14:32:02.650323Z node 29 :TX_DATASHARD INFO: cdc_stream_heartbeat.cpp:78: [CdcStreamHeartbeat] Enqueue 1 change record(s): at tablet# 72075186224037888 2025-06-25T14:32:02.650542Z node 29 :TX_DATASHARD DEBUG: datashard.cpp:1170: EnqueueChangeRecords: at tablet: 72075186224037888, records: { Order: 4 PathId: [OwnerId: 72057594046644480, LocalPathId: 7] BodySize: 0 TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 0 } 2025-06-25T14:32:02.650695Z node 29 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:32:02.650841Z node 29 :TX_DATASHARD DEBUG: datashard.cpp:3812: Waiting for PlanStep# 9000 from mediator time cast 2025-06-25T14:32:02.650969Z node 29 :TX_DATASHARD INFO: cdc_stream_heartbeat.cpp:78: [CdcStreamHeartbeat] Enqueue 0 change record(s): at tablet# 72075186224037888 2025-06-25T14:32:02.651070Z node 29 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:32:02.651343Z node 29 :CHANGE_EXCHANGE DEBUG: change_sender.cpp:71: [ChangeSender][72075186224037888:1][29:645:2541] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvEnqueueRecords { Records [{ Order: 4 PathId: [OwnerId: 72057594046644480, LocalPathId: 7] BodySize: 0 }] } 2025-06-25T14:32:02.651495Z node 29 :CHANGE_EXCHANGE DEBUG: change_sender_cdc_stream.cpp:628: [CdcChangeSenderMain][72075186224037888:1][29:934:2732] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvEnqueueRecords { Records [{ Order: 4 PathId: [OwnerId: 72057594046644480, LocalPathId: 7] BodySize: 0 }] } 2025-06-25T14:32:02.651794Z node 29 :TX_DATASHARD INFO: datashard_change_sending.cpp:215: TTxRequestChangeRecords Execute: at tablet# 72075186224037888 2025-06-25T14:32:02.651992Z node 29 :TX_DATASHARD DEBUG: datashard_change_sending.cpp:235: Send 1 change records: to# [29:934:2732], at tablet# 72075186224037888 2025-06-25T14:32:02.652052Z node 29 :TX_DATASHARD INFO: datashard_change_sending.cpp:260: TTxRequestChangeRecords Complete: sent# 1, forgotten# 0, left# 0, at tablet# 72075186224037888 2025-06-25T14:32:02.652205Z node 29 :CHANGE_EXCHANGE DEBUG: change_sender_cdc_stream.cpp:633: [CdcChangeSenderMain][72075186224037888:1][29:934:2732] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 4 Group: 0 Step: 6000 TxId: 0 PathId: [OwnerId: 72057594046644480, LocalPathId: 7] Kind: CdcHeartbeat Source: Unspecified Body: 0b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 0 LockId: 0 LockOffset: 0 }] } 2025-06-25T14:32:02.652768Z node 29 :CHANGE_EXCHANGE DEBUG: change_sender_cdc_stream.cpp:111: [CdcChangeSenderPartition][72075186224037888:1][0][72075186224037889][29:1016:2732] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 4 Group: 0 Step: 6000 TxId: 0 PathId: [OwnerId: 72057594046644480, LocalPathId: 7] Kind: CdcHeartbeat Source: Unspecified Body: 0b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 0 LockId: 0 LockOffset: 0 }] } 2025-06-25T14:32:02.653236Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:348: Handle TEvRequest topic: 'streamImpl' requestId: 2025-06-25T14:32:02.653306Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:2812: [PQ: 72075186224037889] got client message batch for topic 'Table/Stream/streamImpl' partition 0 2025-06-25T14:32:02.653538Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'streamImpl' partition: 0 messageNo: 2 requestId: cookie: 2 2025-06-25T14:32:02.653679Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:348: Handle TEvRequest topic: 'streamImpl' requestId: 2025-06-25T14:32:02.653711Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:2812: [PQ: 72075186224037889] got client message batch for topic 'Table/Stream/streamImpl' partition 0 2025-06-25T14:32:02.653762Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:2209: [PQ: 72075186224037889] got client message topic: Table/Stream/streamImpl partition: 0 SourceId: '\00072075186224037888' SeqNo: 4 partNo : 0 messageNo: 3 size 26 offset: -1 2025-06-25T14:32:02.653971Z node 29 :PERSQUEUE DEBUG: partition_write.cpp:1293: [PQ: 72075186224037889, Partition: 0, State: StateIdle] Topic 'Table/Stream/streamImpl' partition 0 process heartbeat sourceId '\00072075186224037888' version v6000/0 2025-06-25T14:32:02.654156Z node 29 :PERSQUEUE INFO: partition_write.cpp:1797: [PQ: 72075186224037889, Partition: 0, State: StateIdle] Topic 'Table/Stream/streamImpl' partition 0 emit heartbeat v6000/0 2025-06-25T14:32:02.660620Z node 29 :PERSQUEUE DEBUG: partition_write.cpp:1364: [PQ: 72075186224037889, Partition: 0, State: StateIdle] Topic 'Table/Stream/streamImpl' partition 0 part blob processing sourceId '\00072075186224037889' seqNo 0 partNo 0 2025-06-25T14:32:02.661799Z node 29 :PERSQUEUE DEBUG: partition_write.cpp:1468: [PQ: 72075186224037889, Partition: 0, State: StateIdle] Topic 'Table/Stream/streamImpl' partition 0 part blob complete sourceId '\00072075186224037889' seqNo 0 partNo 0 FormedBlobsCount 0 NewHead: Offset 3 PartNo 0 PackedSize 107 count 1 nextOffset 4 batches 1 2025-06-25T14:32:02.662387Z node 29 :PERSQUEUE DEBUG: partition_write.cpp:1762: [PQ: 72075186224037889, Partition: 0, State: StateIdle] Add new write blob: topic 'Table/Stream/streamImpl' partition 0 compactOffset 3,1 HeadOffset 3 endOffset 3 curOffset 4 d0000000000_00000000000000000003_00000_0000000001_00000? size 93 WTime 7451 2025-06-25T14:32:02.662610Z node 29 :PERSQUEUE DEBUG: read.h:272: CacheProxy. Passthrough write request to KV 2025-06-25T14:32:02.662709Z node 29 :PERSQUEUE DEBUG: read.h:310: CacheProxy. Passthrough blob. Partition 0 offset 3 partNo 0 count 1 size 93 2025-06-25T14:32:02.663615Z node 29 :PERSQUEUE DEBUG: cache_eviction.h:319: Caching head blob in L1. Partition 0 offset 3 count 1 size 93 actorID [29:873:2686] 2025-06-25T14:32:02.663757Z node 29 :PERSQUEUE DEBUG: pq_l2_cache.cpp:120: PQ Cache (L2). Adding blob. Tablet '72075186224037889' partition 0 offset 3 partno 0 count 1 parts 0 suffix '63' size 93 2025-06-25T14:32:02.676682Z node 29 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72075186224037889, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 44 WriteNewSizeFromSupportivePartitions# 0 2025-06-25T14:32:02.676881Z node 29 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72075186224037889, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-25T14:32:02.676960Z node 29 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72075186224037889, Partition: 0, State: StateIdle] Answering for message sourceid: '\00072075186224037888', Topic: 'Table/Stream/streamImpl', Partition: 0, SeqNo: 4, partNo: 0, Offset: 3 is stored on disk 2025-06-25T14:32:02.677185Z node 29 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72075186224037889, Partition: 0, State: StateIdle] need more data for compaction. cumulativeSize=452, count=2, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-25T14:32:02.677270Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'streamImpl' partition: 0 messageNo: 3 requestId: cookie: 2 2025-06-25T14:32:02.677533Z node 29 :CHANGE_EXCHANGE DEBUG: change_sender_cdc_stream.cpp:160: [CdcChangeSenderPartition][72075186224037888:1][0][72075186224037889][29:1016:2732] Handle NKikimrClient.TResponse { SessionId: TxId: Success { Response: Status: 1 ErrorCode: OK PartitionResponse { CmdWriteResult { AlreadyWritten: false SourceId: "\00072075186224037888" SeqNo: 4 Offset: 3 WriteTimestampMS: 7451 PartitionQuotedTimeMs: 0 TotalTimeInPartitionQueueMs: 0 WriteTimeMs: 0 TopicQuotedTimeMs: 0 WrittenInTx: false } Cookie: 2 } } } 2025-06-25T14:32:02.677627Z node 29 :CHANGE_EXCHANGE DEBUG: change_sender_cdc_stream.cpp:643: [CdcChangeSenderMain][72075186224037888:1][29:934:2732] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 0 } 2025-06-25T14:32:02.677824Z node 29 :TX_DATASHARD INFO: datashard_change_sending.cpp:310: TTxRemoveChangeRecords Execute: records# 1, at tablet# 72075186224037888 2025-06-25T14:32:02.677858Z node 29 :TX_DATASHARD DEBUG: datashard.cpp:1087: RemoveChangeRecord: order: 4, at tablet: 72075186224037888 2025-06-25T14:32:02.693020Z node 29 :TX_DATASHARD INFO: datashard_change_sending.cpp:335: TTxRemoveChangeRecords Complete: removed# 1, left# 0, at tablet# 72075186224037888 >>>>> GetRecords path=/Root/Table/Stream partitionId=0 2025-06-25T14:32:02.853186Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:348: Handle TEvRequest topic: 'streamImpl' requestId: 2025-06-25T14:32:02.853257Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:2812: [PQ: 72075186224037889] got client message batch for topic 'Table/Stream/streamImpl' partition 0 2025-06-25T14:32:02.853390Z node 29 :PERSQUEUE DEBUG: partition_read.cpp:839: [PQ: 72075186224037889, Partition: 0, State: StateIdle] read cookie 4 Topic 'Table/Stream/streamImpl' partition 0 user $without_consumer offset 0 count 10000 size 26214400 endOffset 4 max time lag 0ms effective offset 0 2025-06-25T14:32:02.855888Z node 29 :PERSQUEUE DEBUG: partition_read.cpp:1043: [PQ: 72075186224037889, Partition: 0, State: StateIdle] read cookie 4 added 2 blobs, size 452 count 4 last offset 3, current partition end offset: 4 2025-06-25T14:32:02.856063Z node 29 :PERSQUEUE DEBUG: partition_read.cpp:1069: [PQ: 72075186224037889, Partition: 0, State: StateIdle] Reading cookie 4. Send blob request. 2025-06-25T14:32:02.856335Z node 29 :PERSQUEUE DEBUG: cache_eviction.h:492: Got data from cache. Partition 0 offset 0 partno 0 count 3 parts_count 0 source 1 size 359 accessed 1 times before, last time 1970-01-01T00:00:06.000000Z 2025-06-25T14:32:02.856432Z node 29 :PERSQUEUE DEBUG: cache_eviction.h:492: Got data from cache. Partition 0 offset 3 partno 0 count 1 parts_count 0 source 1 size 93 accessed 0 times before, last time 1970-01-01T00:00:07.000000Z 2025-06-25T14:32:02.856527Z node 29 :PERSQUEUE DEBUG: read.h:121: Reading cookie 4. All 2 blobs are from cache. 2025-06-25T14:32:02.856682Z node 29 :PERSQUEUE DEBUG: partition_read.cpp:551: FormAnswer for 2 blobs 2025-06-25T14:32:02.857407Z node 29 :PERSQUEUE DEBUG: partition_read.cpp:476: FormAnswer processing batch offset 0 totakecount 3 count 3 size 339 from pos 0 cbcount 3 2025-06-25T14:32:02.857623Z node 29 :PERSQUEUE DEBUG: partition_read.cpp:476: FormAnswer processing batch offset 3 totakecount 1 count 1 size 75 from pos 0 cbcount 1 2025-06-25T14:32:02.858500Z node 29 :PERSQUEUE DEBUG: pq_l2_cache.cpp:192: PQ Cache (L2). Touched. Tablet '72075186224037889' partition 0 offset 0 partno 0 count 3 parts 0 suffix '63' 2025-06-25T14:32:02.858591Z node 29 :PERSQUEUE DEBUG: pq_l2_cache.cpp:192: PQ Cache (L2). Touched. Tablet '72075186224037889' partition 0 offset 3 partno 0 count 1 parts 0 suffix '63' 2025-06-25T14:32:02.858738Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'streamImpl' partition: 0 messageNo: 0 requestId: cookie: 0 >> TComputeScheduler::ResourceWeight [GOOD] |78.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/runtime/ut/unittest >> TKqpScanData::ArrowToUnboxedValueConverter [GOOD] |78.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/runtime/ut/unittest >> TKqpScanData::EmptyColumnsAndNonEmptyArrowBatch [GOOD] >> KqpUniqueIndex::InsertFkPkOverlap [GOOD] >> KqpUniqueIndex::InsertNullInComplexFk |78.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/runtime/ut/unittest >> TKqpScanData::EmptyColumns [GOOD] |78.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kesus/proxy/ut/ydb-core-kesus-proxy-ut |78.2%| [LD] {RESULT} $(B)/ydb/core/kesus/proxy/ut/ydb-core-kesus-proxy-ut |78.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kesus/proxy/ut/ydb-core-kesus-proxy-ut >> TKqpScanData::DifferentNumberOfInputAndResultColumns >> TKqpScanData::DifferentNumberOfInputAndResultColumns [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/runtime/ut/unittest >> TComputeScheduler::ResourceWeight [GOOD] Test command err: 510 500 1510 1500 990 1000 1000 1000 >> ColumnBuildTest::CancelBuild [GOOD] >> TopicAutoscaling::ReadingAfterSplitTest_PQv1 [GOOD] >> TopicAutoscaling::ReadingAfterSplitTest_PreferedPartition_AutoscaleAwareSDK >> TopicAutoscaling::PartitionSplit_ReadNotEmptyPartitions_PQv1 [GOOD] >> TopicAutoscaling::PartitionSplit_ReadNotEmptyPartitions_AutoscaleAwareSDK |78.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/keyvalue/ut_trace/ydb-core-keyvalue-ut_trace |78.2%| [LD] {RESULT} $(B)/ydb/core/keyvalue/ut_trace/ydb-core-keyvalue-ut_trace >> TKqpScanData::UnboxedValueSize |78.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/keyvalue/ut_trace/ydb-core-keyvalue-ut_trace |78.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/runtime/ut/unittest >> TKqpScanData::DifferentNumberOfInputAndResultColumns [GOOD] >> TKqpScanData::UnboxedValueSize [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_column_build/unittest >> ColumnBuildTest::CancelBuild [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:32:04.411812Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:32:04.411917Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:32:04.411954Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:32:04.411987Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:32:04.412034Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:32:04.412065Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:32:04.412119Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:32:04.412181Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:32:04.412964Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:32:04.413306Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:32:04.492618Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:32:04.492715Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:32:04.517034Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:32:04.517484Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:32:04.517641Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:32:04.525872Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:32:04.526265Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:32:04.526892Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:32:04.527202Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:32:04.542838Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:32:04.543085Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:32:04.544247Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:32:04.544391Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:32:04.544542Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:32:04.544591Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:32:04.544632Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:32:04.544714Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:32:04.558433Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:32:04.710884Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:32:04.711102Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:32:04.711317Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:32:04.711361Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:32:04.711577Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:32:04.711656Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:32:04.717462Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:32:04.717690Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:32:04.717913Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:32:04.717975Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:32:04.718012Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:32:04.718082Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:32:04.726455Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:32:04.726533Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:32:04.726575Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:32:04.739129Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:32:04.739213Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:32:04.739292Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:32:04.739366Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:32:04.743206Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:32:04.753404Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:32:04.753764Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:32:04.754856Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:32:04.755012Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:32:04.755068Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:32:04.755517Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:32:04.755596Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:32:04.755803Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:32:04.755885Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:32:04.762457Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:32:04.762539Z node 1 :FLAT_TX_SCHEMESHARD ... 5T14:32:09.302110Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_lock.cpp:44: [72057594046678944] TDropLock TPropose opId# 281474976710761:0 HandleReply TEvOperationPlan: step# 5000007 2025-06-25T14:32:09.302153Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 281474976710761:0 128 -> 240 2025-06-25T14:32:09.304025Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 281474976710761:0, at schemeshard: 72057594046678944 2025-06-25T14:32:09.304089Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 281474976710761:0 ProgressState 2025-06-25T14:32:09.304165Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976710761:0 progress is 1/1 2025-06-25T14:32:09.304195Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976710761 ready parts: 1/1 2025-06-25T14:32:09.304226Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976710761:0 progress is 1/1 2025-06-25T14:32:09.304263Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976710761 ready parts: 1/1 2025-06-25T14:32:09.304292Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 281474976710761, ready parts: 1/1, is published: true 2025-06-25T14:32:09.304368Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1656: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:126:2150] message: TxId: 281474976710761 2025-06-25T14:32:09.304409Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976710761 ready parts: 1/1 2025-06-25T14:32:09.304438Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976710761:0 2025-06-25T14:32:09.304503Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 281474976710761:0 2025-06-25T14:32:09.304592Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 12 FAKE_COORDINATOR: Erasing txId 281474976710761 2025-06-25T14:32:09.307857Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6830: Handle: TEvNotifyTxCompletionResult: txId# 281474976710761 2025-06-25T14:32:09.307924Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6832: Message: TxId: 281474976710761 2025-06-25T14:32:09.308000Z node 1 :BUILD_INDEX INFO: schemeshard_build_index__progress.cpp:1930: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvNotifyTxCompletionResult, id# 102, txId# 281474976710761 2025-06-25T14:32:09.308084Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1933: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvNotifyTxCompletionResult, TIndexBuildInfo: TBuildInfo{ IndexBuildId: 102, Uid: , DomainPathId: [OwnerId: 72057594046678944, LocalPathId: 1], TablePathId: [OwnerId: 72057594046678944, LocalPathId: 2], IndexType: EIndexTypeInvalid, IndexName: , State: Cancellation_Unlocking, IsBroken: 0, IsCancellationRequested: 1, Issue: , SubscribersCount: 1, CreateSender: [1:1175:3026], AlterMainTableTxId: 281474976710757, AlterMainTableTxStatus: StatusAccepted, AlterMainTableTxDone: 1, LockTxId: 281474976710758, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976710759, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 0, ApplyTxId: 281474976710760, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976710761, UnlockTxStatus: StatusAccepted, UnlockTxDone: 0, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }}, txId# 281474976710761 2025-06-25T14:32:09.310319Z node 1 :BUILD_INDEX NOTICE: schemeshard_build_index__progress.cpp:1129: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 102 Cancellation_Unlocking 2025-06-25T14:32:09.310408Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1130: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 102 Cancellation_Unlocking TBuildInfo{ IndexBuildId: 102, Uid: , DomainPathId: [OwnerId: 72057594046678944, LocalPathId: 1], TablePathId: [OwnerId: 72057594046678944, LocalPathId: 2], IndexType: EIndexTypeInvalid, IndexName: , State: Cancellation_Unlocking, IsBroken: 0, IsCancellationRequested: 1, Issue: , SubscribersCount: 1, CreateSender: [1:1175:3026], AlterMainTableTxId: 281474976710757, AlterMainTableTxStatus: StatusAccepted, AlterMainTableTxDone: 1, LockTxId: 281474976710758, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976710759, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 0, ApplyTxId: 281474976710760, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976710761, UnlockTxStatus: StatusAccepted, UnlockTxDone: 1, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }} 2025-06-25T14:32:09.310461Z node 1 :BUILD_INDEX INFO: schemeshard_build_index_tx_base.cpp:24: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: Change state from Cancellation_Unlocking to Cancelled 2025-06-25T14:32:09.312167Z node 1 :BUILD_INDEX NOTICE: schemeshard_build_index__progress.cpp:1129: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 102 Cancelled 2025-06-25T14:32:09.312257Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1130: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 102 Cancelled TBuildInfo{ IndexBuildId: 102, Uid: , DomainPathId: [OwnerId: 72057594046678944, LocalPathId: 1], TablePathId: [OwnerId: 72057594046678944, LocalPathId: 2], IndexType: EIndexTypeInvalid, IndexName: , State: Cancelled, IsBroken: 0, IsCancellationRequested: 1, Issue: , SubscribersCount: 1, CreateSender: [1:1175:3026], AlterMainTableTxId: 281474976710757, AlterMainTableTxStatus: StatusAccepted, AlterMainTableTxDone: 1, LockTxId: 281474976710758, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976710759, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 0, ApplyTxId: 281474976710760, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976710761, UnlockTxStatus: StatusAccepted, UnlockTxDone: 1, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }} 2025-06-25T14:32:09.312304Z node 1 :BUILD_INDEX TRACE: schemeshard_build_index_tx_base.cpp:333: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TIndexBuildInfo SendNotifications: : id# 102, subscribers count# 1 2025-06-25T14:32:09.312485Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-25T14:32:09.312538Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:1199:3050] TestWaitNotification: OK eventTxId 102 2025-06-25T14:32:09.314852Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__get.cpp:19: TIndexBuilder::TXTYPE_GET_INDEX_BUILD: DoExecute DatabaseName: "/MyRoot" IndexBuildId: 102 2025-06-25T14:32:09.315140Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index_tx_base.h:104: TIndexBuilder::TXTYPE_GET_INDEX_BUILD: Reply Status: SUCCESS IndexBuild { Id: 102 State: STATE_CANCELLED Settings { source_path: "/MyRoot/Table" max_shards_in_flight: 2 column_build_operation { column { ColumnName: "DefaultValue" default_from_literal { type { type_id: UINT64 } value { uint64_value: 10 } } } } ScanSettings { MaxBatchRows: 1 } } Progress: 0 StartTime { } EndTime { } } BUILDINDEX RESPONSE Get: NKikimrIndexBuilder.TEvGetResponse Status: SUCCESS IndexBuild { Id: 102 State: STATE_CANCELLED Settings { source_path: "/MyRoot/Table" max_shards_in_flight: 2 column_build_operation { column { ColumnName: "DefaultValue" default_from_literal { type { type_id: UINT64 } value { uint64_value: 10 } } } } ScanSettings { MaxBatchRows: 1 } } Progress: 0 StartTime { } EndTime { } } 2025-06-25T14:32:09.317615Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:32:09.317856Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table" took 258us result status StatusSuccess 2025-06-25T14:32:09.318303Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table" PathDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 4 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "index" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 3 NotNull: false IsBuildInProgress: false } Columns { Name: "DefaultValue" Type: "Uint64" TypeId: 4 Id: 4 NotNull: false DefaultFromLiteral { type { type_id: UINT64 } value { uint64_value: 10 } } IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 4 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 10 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 10 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> RetryPolicy::TWriteSession_TestBrokenPolicy [GOOD] >> RetryPolicy::TWriteSession_RetryOnTargetCluster |78.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/runtime/ut/unittest >> TKqpScanData::UnboxedValueSize [GOOD] >> TBlobStorageWardenTest::TestFilterBadSerials [GOOD] >> TBlobStorageWardenTest::TestGivenPDiskFormatedWithGuid1AndCreatedWithGuid2WhenYardInitThenError >> TBlobStorageWardenTest::TestUnmonitoredEventsThenNoMonitorings >> TBlobStorageWardenTest::TestSendUsefulMonitoring >> BindQueue::Basic >> DataShardVolatile::TwoAppendsMustBeVolatile-UseSink [GOOD] >> DataShardVolatile::VolatileCommitOnBlobStorageFailure+UseSink >> KikimrIcGateway::TestLoadAwsSecretValueFromExternalDataSourceMetadata [GOOD] >> KikimrIcGateway::TestLoadDataSourceProperties >> TBlobStorageWardenTest::TestDeleteStoragePool >> KqpIndexes::CheckUpsertNonEquatableType-NotNull [GOOD] >> KqpIndexes::CreateTableWithExplicitAsyncIndexSQL >> TBlobStorageWardenTest::TestLimitedKeylessGroupThenNoMonitoring >> TBlobStorageWardenTest::TestGivenPDiskFormatedWithGuid1AndCreatedWithGuid2WhenYardInitThenError [GOOD] >> TBlobStorageWardenTest::TestSendToInvalidGroupId >> TBlobStorageWardenTest::TestUnmonitoredEventsThenNoMonitorings [GOOD] >> Cdc::ResolvedTimestampForDisplacedUpsert [GOOD] |78.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/ttl/py3test >> test_ttl.py::TestTTL::test_ttl[table_DyNumber_1_UNIQUE_SYNC-pk_types17-all_types17-index17-DyNumber-UNIQUE-SYNC] [FAIL] >> TopicAutoscaling::ReadingAfterSplitTest_PreferedPartition_PQv1 [GOOD] >> TopicAutoscaling::WithDir_PartitionSplit_AutosplitByLoad ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/nodewarden/ut/unittest >> TBlobStorageWardenTest::TestGivenPDiskFormatedWithGuid1AndCreatedWithGuid2WhenYardInitThenError [GOOD] Test command err: 2025-06-25T14:32:12.361892Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:0:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:1:0] targetVDisk# [2000000:1:0:0:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-25T14:32:12.362104Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:3:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:2:0] targetVDisk# [2000000:1:0:3:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-25T14:32:12.369543Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:3:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:0:0] targetVDisk# [2000000:1:0:3:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-25T14:32:12.378000Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:2:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:0:0] targetVDisk# [2000000:1:0:2:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-25T14:32:12.378143Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:3:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:1:0] targetVDisk# [2000000:1:0:3:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-25T14:32:12.382102Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:2:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:1:0] targetVDisk# [2000000:1:0:2:0] oldSyncState# [0 0] DbBirthLsn# 0 tablet_helpers.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000e89/r3tmp/tmpZoEjmz/pdisk_1.dat Formatting PDisk with guid1 14625684334740716571 Creating PDisk with guid2 16884904156087623658 Creating pdisk 2025-06-25T14:32:13.260083Z node 1 :BS_PDISK ERROR: {BSP01@blobstorage_pdisk_actor.cpp:590} PDiskId# 1001 Can't start due to a guid error expected# 16884904156087623658 on-disk# 14625684334740716571 PDiskId# 1001 2025-06-25T14:32:13.283873Z node 1 :BS_PROXY_PUT INFO: dsproxy_put.cpp:645: [abc2fc901918ac71] bootstrap ActorId# [1:484:2461] Group# 33554432 BlobCount# 1 BlobIDs# [[72057594037932033:2:8:0:0:347:0]] HandleClass# TabletLog Tactic# MinLatency RestartCounter# 0 Marker# BPP13 2025-06-25T14:32:13.284004Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [abc2fc901918ac71] Id# [72057594037932033:2:8:0:0:347:0] restore disk# 0 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-06-25T14:32:13.284035Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [abc2fc901918ac71] Id# [72057594037932033:2:8:0:0:347:0] restore disk# 1 part# 1 situation# ESituation::Unknown Marker# BPG51 2025-06-25T14:32:13.284053Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [abc2fc901918ac71] Id# [72057594037932033:2:8:0:0:347:0] restore disk# 2 part# 2 situation# ESituation::Unknown Marker# BPG51 2025-06-25T14:32:13.284070Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [abc2fc901918ac71] Id# [72057594037932033:2:8:0:0:347:0] restore disk# 3 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-06-25T14:32:13.284087Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [abc2fc901918ac71] Id# [72057594037932033:2:8:0:0:347:0] restore disk# 3 part# 1 situation# ESituation::Unknown Marker# BPG51 2025-06-25T14:32:13.284105Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [abc2fc901918ac71] Id# [72057594037932033:2:8:0:0:347:0] restore disk# 3 part# 2 situation# ESituation::Unknown Marker# BPG51 2025-06-25T14:32:13.284203Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:65: [abc2fc901918ac71] restore Id# [72057594037932033:2:8:0:0:347:0] optimisticReplicas# 3 optimisticState# EBS_FULL Marker# BPG55 2025-06-25T14:32:13.284269Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [abc2fc901918ac71] partPlacement record partSituation# ESituation::Unknown to# 0 blob Id# [72057594037932033:2:8:0:0:347:1] Marker# BPG33 2025-06-25T14:32:13.284331Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [abc2fc901918ac71] Sending missing VPut part# 0 to# 0 blob Id# [72057594037932033:2:8:0:0:347:1] Marker# BPG32 2025-06-25T14:32:13.284369Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [abc2fc901918ac71] partPlacement record partSituation# ESituation::Unknown to# 1 blob Id# [72057594037932033:2:8:0:0:347:2] Marker# BPG33 2025-06-25T14:32:13.284394Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [abc2fc901918ac71] Sending missing VPut part# 1 to# 1 blob Id# [72057594037932033:2:8:0:0:347:2] Marker# BPG32 2025-06-25T14:32:13.284422Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [abc2fc901918ac71] partPlacement record partSituation# ESituation::Unknown to# 2 blob Id# [72057594037932033:2:8:0:0:347:3] Marker# BPG33 2025-06-25T14:32:13.284448Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [abc2fc901918ac71] Sending missing VPut part# 2 to# 2 blob Id# [72057594037932033:2:8:0:0:347:3] Marker# BPG32 2025-06-25T14:32:13.284573Z node 1 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [1:47:2091] NKikimr::TEvBlobStorage::TEvVPut# {ID# [72057594037932033:2:8:0:0:347:3] FDS# 347 HandleClass# TabletLog {MsgQoS ExtQueueId# PutTabletLog} DataSize# 0 Data# } cookie# 0 2025-06-25T14:32:13.284631Z node 1 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [1:40:2084] NKikimr::TEvBlobStorage::TEvVPut# {ID# [72057594037932033:2:8:0:0:347:2] FDS# 347 HandleClass# TabletLog {MsgQoS ExtQueueId# PutTabletLog} DataSize# 0 Data# } cookie# 0 2025-06-25T14:32:13.284663Z node 1 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [1:61:2105] NKikimr::TEvBlobStorage::TEvVPut# {ID# [72057594037932033:2:8:0:0:347:1] FDS# 347 HandleClass# TabletLog {MsgQoS ExtQueueId# PutTabletLog} DataSize# 0 Data# } cookie# 0 2025-06-25T14:32:13.287746Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [abc2fc901918ac71] received {EvVPutResult Status# OK ID# [72057594037932033:2:8:0:0:347:2] {MsgQoS MsgId# { SequenceId: 1 MsgId: 9 } Cost# 82732 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 10 }}}} from# [2000000:1:0:0:0] Marker# BPP01 2025-06-25T14:32:13.287948Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [abc2fc901918ac71] received {EvVPutResult Status# OK ID# [72057594037932033:2:8:0:0:347:3] {MsgQoS MsgId# { SequenceId: 1 MsgId: 10 } Cost# 82732 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 11 }}}} from# [2000000:1:0:1:0] Marker# BPP01 2025-06-25T14:32:13.288028Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [abc2fc901918ac71] received {EvVPutResult Status# OK ID# [72057594037932033:2:8:0:0:347:1] {MsgQoS MsgId# { SequenceId: 1 MsgId: 8 } Cost# 82732 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 9 }}}} from# [2000000:1:0:3:0] Marker# BPP01 2025-06-25T14:32:13.288105Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_put_impl.cpp:72: [abc2fc901918ac71] Result# TEvPutResult {Id# [72057594037932033:2:8:0:0:347:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.999479} GroupId# 33554432 Marker# BPP12 2025-06-25T14:32:13.288165Z node 1 :BS_PROXY_PUT INFO: dsproxy_put.cpp:486: [abc2fc901918ac71] SendReply putResult# TEvPutResult {Id# [72057594037932033:2:8:0:0:347:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.999479} ResponsesSent# 0 PutImpl.Blobs.size# 1 Last# true Marker# BPP21 2025-06-25T14:32:13.288373Z node 1 :BS_PROXY_PUT DEBUG: {BPP72@dsproxy_put.cpp:470} Query history GroupId# 33554432 HandleClass# TabletLog Tactic# MinLatency History# THistory { Entries# [ TEvVPut{ TimestampMs# 0.932 sample PartId# [72057594037932033:2:8:0:0:347:3] QueryCount# 1 VDiskId# [2000000:1:0:1:0] NodeId# 1 } TEvVPut{ TimestampMs# 0.933 sample PartId# [72057594037932033:2:8:0:0:347:2] QueryCount# 1 VDiskId# [2000000:1:0:0:0] NodeId# 1 } TEvVPut{ TimestampMs# 0.933 sample PartId# [72057594037932033:2:8:0:0:347:1] QueryCount# 1 VDiskId# [2000000:1:0:3:0] NodeId# 1 } TEvVPutResult{ TimestampMs# 4.065 VDiskId# [2000000:1:0:0:0] NodeId# 1 Status# OK } TEvVPutResult{ TimestampMs# 4.226 VDiskId# [2000000:1:0:1:0] NodeId# 1 Status# OK } TEvVPutResult{ TimestampMs# 4.304 VDiskId# [2000000:1:0:3:0] NodeId# 1 Status# OK } ] } Verify that PDisk returns ERROR YardInitResult: {EvYardInitResult Status# CORRUPTED ErrorReason# "PDisk is in StateError, reason# PDiskId# 1001 Can't start due to a guid error expected# 16884904156087623658 on-disk# 14625684334740716571" StatusFlags# None PDiskParams# {{TPDiskParams ownerId# 0 ownerRound# 0 OwnerWeight# 0 SlotSizeInUnits# 0 ChunkSize# 0 AppendBlockSize# 0 RecommendedReadSize# 0 SeekTimeUs# 0 ReadSpeedBps# 0 WriteSpeedBps# 0 ReadBlockSize# 0 WriteBlockSize# 0 BulkWriteBlockSize# 0 PrefetchSizeBytes# 0 GlueRequestDistanceBytes# 0}} OwnedChunks# {}} >> TBlobStorageWardenTest::TestSendUsefulMonitoring [GOOD] >> TBlobStorageWardenTest::TestHttpMonPage ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/nodewarden/ut/unittest >> TBlobStorageWardenTest::TestUnmonitoredEventsThenNoMonitorings [GOOD] Test command err: 2025-06-25T14:32:12.656543Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:3:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:2:0] targetVDisk# [2000000:1:0:3:0] oldSyncState# [0 0] DbBirthLsn# 0 tablet_helpers.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000e96/r3tmp/tmp4is54N/pdisk_1.dat 2025-06-25T14:32:12.742920Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:2:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:0:0] targetVDisk# [2000000:1:0:2:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-25T14:32:12.743025Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:3:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:0:0] targetVDisk# [2000000:1:0:3:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-25T14:32:12.757636Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:2:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:1:0] targetVDisk# [2000000:1:0:2:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-25T14:32:12.757752Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:3:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:1:0] targetVDisk# [2000000:1:0:3:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-25T14:32:12.769221Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:0:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:1:0] targetVDisk# [2000000:1:0:0:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-25T14:32:13.840965Z node 1 :BS_PROXY_PUT INFO: dsproxy_put.cpp:645: [b6b2c6548553d7a5] bootstrap ActorId# [1:487:2461] Group# 33554432 BlobCount# 1 BlobIDs# [[72057594037932033:2:8:0:0:1333:0]] HandleClass# TabletLog Tactic# MinLatency RestartCounter# 0 Marker# BPP13 2025-06-25T14:32:13.841118Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [b6b2c6548553d7a5] Id# [72057594037932033:2:8:0:0:1333:0] restore disk# 0 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-06-25T14:32:13.841158Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [b6b2c6548553d7a5] Id# [72057594037932033:2:8:0:0:1333:0] restore disk# 1 part# 1 situation# ESituation::Unknown Marker# BPG51 2025-06-25T14:32:13.841191Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [b6b2c6548553d7a5] Id# [72057594037932033:2:8:0:0:1333:0] restore disk# 2 part# 2 situation# ESituation::Unknown Marker# BPG51 2025-06-25T14:32:13.841213Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [b6b2c6548553d7a5] Id# [72057594037932033:2:8:0:0:1333:0] restore disk# 3 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-06-25T14:32:13.841236Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [b6b2c6548553d7a5] Id# [72057594037932033:2:8:0:0:1333:0] restore disk# 3 part# 1 situation# ESituation::Unknown Marker# BPG51 2025-06-25T14:32:13.841256Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [b6b2c6548553d7a5] Id# [72057594037932033:2:8:0:0:1333:0] restore disk# 3 part# 2 situation# ESituation::Unknown Marker# BPG51 2025-06-25T14:32:13.841286Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:65: [b6b2c6548553d7a5] restore Id# [72057594037932033:2:8:0:0:1333:0] optimisticReplicas# 3 optimisticState# EBS_FULL Marker# BPG55 2025-06-25T14:32:13.841348Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [b6b2c6548553d7a5] partPlacement record partSituation# ESituation::Unknown to# 0 blob Id# [72057594037932033:2:8:0:0:1333:1] Marker# BPG33 2025-06-25T14:32:13.841416Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [b6b2c6548553d7a5] Sending missing VPut part# 0 to# 0 blob Id# [72057594037932033:2:8:0:0:1333:1] Marker# BPG32 2025-06-25T14:32:13.841451Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [b6b2c6548553d7a5] partPlacement record partSituation# ESituation::Unknown to# 1 blob Id# [72057594037932033:2:8:0:0:1333:2] Marker# BPG33 2025-06-25T14:32:13.841474Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [b6b2c6548553d7a5] Sending missing VPut part# 1 to# 1 blob Id# [72057594037932033:2:8:0:0:1333:2] Marker# BPG32 2025-06-25T14:32:13.841502Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [b6b2c6548553d7a5] partPlacement record partSituation# ESituation::Unknown to# 2 blob Id# [72057594037932033:2:8:0:0:1333:3] Marker# BPG33 2025-06-25T14:32:13.841526Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [b6b2c6548553d7a5] Sending missing VPut part# 2 to# 2 blob Id# [72057594037932033:2:8:0:0:1333:3] Marker# BPG32 2025-06-25T14:32:13.841671Z node 1 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [1:47:2091] NKikimr::TEvBlobStorage::TEvVPut# {ID# [72057594037932033:2:8:0:0:1333:3] FDS# 1333 HandleClass# TabletLog {MsgQoS ExtQueueId# PutTabletLog} DataSize# 0 Data# } cookie# 0 2025-06-25T14:32:13.841736Z node 1 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [1:40:2084] NKikimr::TEvBlobStorage::TEvVPut# {ID# [72057594037932033:2:8:0:0:1333:2] FDS# 1333 HandleClass# TabletLog {MsgQoS ExtQueueId# PutTabletLog} DataSize# 0 Data# } cookie# 0 2025-06-25T14:32:13.841775Z node 1 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [1:61:2105] NKikimr::TEvBlobStorage::TEvVPut# {ID# [72057594037932033:2:8:0:0:1333:1] FDS# 1333 HandleClass# TabletLog {MsgQoS ExtQueueId# PutTabletLog} DataSize# 0 Data# } cookie# 0 2025-06-25T14:32:13.844342Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [b6b2c6548553d7a5] received {EvVPutResult Status# OK ID# [72057594037932033:2:8:0:0:1333:2] {MsgQoS MsgId# { SequenceId: 1 MsgId: 9 } Cost# 90496 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 10 }}}} from# [2000000:1:0:0:0] Marker# BPP01 2025-06-25T14:32:13.844529Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [b6b2c6548553d7a5] received {EvVPutResult Status# OK ID# [72057594037932033:2:8:0:0:1333:3] {MsgQoS MsgId# { SequenceId: 1 MsgId: 10 } Cost# 90496 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 11 }}}} from# [2000000:1:0:1:0] Marker# BPP01 2025-06-25T14:32:13.844630Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [b6b2c6548553d7a5] received {EvVPutResult Status# OK ID# [72057594037932033:2:8:0:0:1333:1] {MsgQoS MsgId# { SequenceId: 1 MsgId: 8 } Cost# 90496 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 9 }}}} from# [2000000:1:0:3:0] Marker# BPP01 2025-06-25T14:32:13.844703Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_put_impl.cpp:72: [b6b2c6548553d7a5] Result# TEvPutResult {Id# [72057594037932033:2:8:0:0:1333:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.999479} GroupId# 33554432 Marker# BPP12 2025-06-25T14:32:13.844760Z node 1 :BS_PROXY_PUT INFO: dsproxy_put.cpp:486: [b6b2c6548553d7a5] SendReply putResult# TEvPutResult {Id# [72057594037932033:2:8:0:0:1333:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.999479} ResponsesSent# 0 PutImpl.Blobs.size# 1 Last# true Marker# BPP21 2025-06-25T14:32:13.844935Z node 1 :BS_PROXY_PUT DEBUG: {BPP72@dsproxy_put.cpp:470} Query history GroupId# 33554432 HandleClass# TabletLog Tactic# MinLatency History# THistory { Entries# [ TEvVPut{ TimestampMs# 0.993 sample PartId# [72057594037932033:2:8:0:0:1333:3] QueryCount# 1 VDiskId# [2000000:1:0:1:0] NodeId# 1 } TEvVPut{ TimestampMs# 0.993 sample PartId# [72057594037932033:2:8:0:0:1333:2] QueryCount# 1 VDiskId# [2000000:1:0:0:0] NodeId# 1 } TEvVPut{ TimestampMs# 0.994 sample PartId# [72057594037932033:2:8:0:0:1333:1] QueryCount# 1 VDiskId# [2000000:1:0:3:0] NodeId# 1 } TEvVPutResult{ TimestampMs# 3.585 VDiskId# [2000000:1:0:0:0] NodeId# 1 Status# OK } TEvVPutResult{ TimestampMs# 3.732 VDiskId# [2000000:1:0:1:0] NodeId# 1 Status# OK } TEvVPutResult{ TimestampMs# 3.832 VDiskId# [2000000:1:0:3:0] NodeId# 1 Status# OK } ] } 2025-06-25T14:32:13.964555Z node 1 :BS_PROXY_PUT INFO: dsproxy_put.cpp:645: [f913878b3da83702] bootstrap ActorId# [1:533:2498] Group# 33554432 BlobCount# 1 BlobIDs# [[72057594037932033:2:9:0:0:224:0]] HandleClass# TabletLog Tactic# MinLatency RestartCounter# 0 Marker# BPP13 2025-06-25T14:32:13.964726Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [f913878b3da83702] Id# [72057594037932033:2:9:0:0:224:0] restore disk# 0 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-06-25T14:32:13.964763Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [f913878b3da83702] Id# [72057594037932033:2:9:0:0:224:0] restore disk# 1 part# 1 situation# ESituation::Unknown Marker# BPG51 2025-06-25T14:32:13.964785Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [f913878b3da83702] Id# [72057594037932033:2:9:0:0:224:0] restore disk# 2 part# 2 situation# ESituation::Unknown Marker# BPG51 2025-06-25T14:32:13.964811Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [f913878b3da83702] Id# [72057594037932033:2:9:0:0:224:0] restore disk# 3 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-06-25T14:32:13.964832Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [f913878b3da83702] Id# [72057594037932033:2:9:0:0:224:0] restore disk# 3 part# 1 situation# ESituation::Unknown Marker# BPG51 2025-06-25T14:32:13.964854Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [f913878b3da83702] Id# [72057594037932033:2:9:0:0:224:0] restore disk# 3 part# 2 situation# ESituation::Unknown Marker# BPG51 2025-06-25T14:32:13.964905Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:65: [f913878b3da83702] restore Id# [72057594037932033:2:9:0:0:224:0] optimisticReplicas# 3 optimisticState# EBS_FULL Marker# BPG55 2025-06-25T14:32:13.964971Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [f913878b3da83702] partPlacement record partSituation# ESituation::Unknown to# 0 blob Id# [72057594037932033:2:9:0:0:224:1] Marker# BPG33 2025-06-25T14:32:13.965008Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [f913878b3da83702] Sending missing VPut part# 0 to# 0 blob Id# [72057594037932033:2:9:0:0:224:1] Marker# BPG32 2025-06-25T14:32:13.965048Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [f913878b3da83702] partPlacement record partSituation# ESituation::Unknown to# 1 blob Id# [72057594037932033:2:9:0:0:224:2] Marker# BPG33 2025-06-25T14:32:13.965079Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [f913878b3da83702] Sending missing VPut part# 1 to# 1 blob Id# [72057594037932033:2:9:0:0:224:2] Marker# BPG32 2025-06-25T14:32:13.965105Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [f913878b3da83702] partPlacement record partSituation# ESituation::Unknown to# 2 blob Id# [72057594037932033:2:9:0:0:224:3] Marker# BPG33 2025-06-25T14:32:13.965125Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [f913878b3da83702] Sending missing VPut part# 2 to# 2 blob Id# [72057594037932033:2:9:0:0:224:3] Marker# BPG32 2025-06-25T14:32:13.965301Z node 1 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [1:40:2084] NKikimr::TEvBlobStorage::TEvVPut# {ID# [72057594037932033:2:9:0:0:224:3] FDS# 224 HandleClass# TabletLog {MsgQoS ExtQueueId# PutTabletLog} DataSize# 0 Data# } cookie# 0 2025-06-25T14:32:13.965372Z node 1 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [1:61:2105] NKikimr::TEvBlobStorage::TEvVPut# {ID# [72057594037932033:2:9:0:0:224:2] FDS# 224 HandleClass# TabletLog {MsgQoS ExtQueueId# PutTabletLog} DataSize# 0 Data# } cookie# 0 2025-06-25T14:32:13.965410Z node 1 :BS_PROXY DEBUG: group_sessions.h:1 ... TEvVPut# {ID# [72057594037932033:2:10:0:0:238:3] FDS# 238 HandleClass# TabletLog {MsgQoS ExtQueueId# PutTabletLog} DataSize# 0 Data# } cookie# 0 2025-06-25T14:32:14.001756Z node 1 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [1:54:2098] NKikimr::TEvBlobStorage::TEvVPut# {ID# [72057594037932033:2:10:0:0:238:2] FDS# 238 HandleClass# TabletLog {MsgQoS ExtQueueId# PutTabletLog} DataSize# 0 Data# } cookie# 0 2025-06-25T14:32:14.001800Z node 1 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [1:47:2091] NKikimr::TEvBlobStorage::TEvVPut# {ID# [72057594037932033:2:10:0:0:238:1] FDS# 238 HandleClass# TabletLog {MsgQoS ExtQueueId# PutTabletLog} DataSize# 0 Data# } cookie# 0 2025-06-25T14:32:14.003436Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [d70ef3c23a1a2346] received {EvVPutResult Status# OK ID# [72057594037932033:2:10:0:0:238:1] {MsgQoS MsgId# { SequenceId: 1 MsgId: 11 } Cost# 81874 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 12 }}}} from# [2000000:1:0:1:0] Marker# BPP01 2025-06-25T14:32:14.003603Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [d70ef3c23a1a2346] received {EvVPutResult Status# OK ID# [72057594037932033:2:10:0:0:238:2] {MsgQoS MsgId# { SequenceId: 1 MsgId: 10 } Cost# 81874 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 11 }}}} from# [2000000:1:0:2:0] Marker# BPP01 2025-06-25T14:32:14.003682Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [d70ef3c23a1a2346] received {EvVPutResult Status# OK ID# [72057594037932033:2:10:0:0:238:3] {MsgQoS MsgId# { SequenceId: 1 MsgId: 10 } Cost# 81874 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 11 }}}} from# [2000000:1:0:3:0] Marker# BPP01 2025-06-25T14:32:14.003749Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_put_impl.cpp:72: [d70ef3c23a1a2346] Result# TEvPutResult {Id# [72057594037932033:2:10:0:0:238:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.999479} GroupId# 33554432 Marker# BPP12 2025-06-25T14:32:14.003817Z node 1 :BS_PROXY_PUT INFO: dsproxy_put.cpp:486: [d70ef3c23a1a2346] SendReply putResult# TEvPutResult {Id# [72057594037932033:2:10:0:0:238:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.999479} ResponsesSent# 0 PutImpl.Blobs.size# 1 Last# true Marker# BPP21 2025-06-25T14:32:14.004047Z node 1 :BS_PROXY_PUT DEBUG: {BPP72@dsproxy_put.cpp:470} Query history GroupId# 33554432 HandleClass# TabletLog Tactic# MinLatency History# THistory { Entries# [ TEvVPut{ TimestampMs# 0.998 sample PartId# [72057594037932033:2:10:0:0:238:3] QueryCount# 1 VDiskId# [2000000:1:0:3:0] NodeId# 1 } TEvVPut{ TimestampMs# 0.999 sample PartId# [72057594037932033:2:10:0:0:238:2] QueryCount# 1 VDiskId# [2000000:1:0:2:0] NodeId# 1 } TEvVPut{ TimestampMs# 1 sample PartId# [72057594037932033:2:10:0:0:238:1] QueryCount# 1 VDiskId# [2000000:1:0:1:0] NodeId# 1 } TEvVPutResult{ TimestampMs# 2.667 VDiskId# [2000000:1:0:1:0] NodeId# 1 Status# OK } TEvVPutResult{ TimestampMs# 2.8 VDiskId# [2000000:1:0:2:0] NodeId# 1 Status# OK } TEvVPutResult{ TimestampMs# 2.88 VDiskId# [2000000:1:0:3:0] NodeId# 1 Status# OK } ] } 2025-06-25T14:32:14.005444Z node 1 :BS_PROXY INFO: dsproxy_state.cpp:157: Group# 2181038082 TEvConfigureProxy received GroupGeneration# 1 IsLimitedKeyless# false Marker# DSP02 2025-06-25T14:32:14.005487Z node 1 :BS_PROXY NOTICE: dsproxy_state.cpp:305: EnsureMonitoring Group# 2181038082 IsLimitedKeyless# 0 fullIfPossible# 0 Marker# DSP58 2025-06-25T14:32:14.007264Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 2181038082 Actor# [1:534:2499] Create Queue# [1:538:2502] targetNodeId# 1 Marker# DSP01 2025-06-25T14:32:14.007384Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 2181038082 Actor# [1:534:2499] Create Queue# [1:539:2503] targetNodeId# 1 Marker# DSP01 2025-06-25T14:32:14.007472Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 2181038082 Actor# [1:534:2499] Create Queue# [1:540:2504] targetNodeId# 1 Marker# DSP01 2025-06-25T14:32:14.007564Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 2181038082 Actor# [1:534:2499] Create Queue# [1:541:2505] targetNodeId# 1 Marker# DSP01 2025-06-25T14:32:14.007659Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 2181038082 Actor# [1:534:2499] Create Queue# [1:542:2506] targetNodeId# 1 Marker# DSP01 2025-06-25T14:32:14.007759Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 2181038082 Actor# [1:534:2499] Create Queue# [1:543:2507] targetNodeId# 1 Marker# DSP01 2025-06-25T14:32:14.007853Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 2181038082 Actor# [1:534:2499] Create Queue# [1:544:2508] targetNodeId# 1 Marker# DSP01 2025-06-25T14:32:14.007878Z node 1 :BS_PROXY INFO: dsproxy_state.cpp:31: Group# 2181038082 SetStateEstablishingSessions Marker# DSP03 2025-06-25T14:32:14.012623Z node 1 :BS_PROXY DEBUG: dsproxy_state.cpp:220: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 1 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-06-25T14:32:14.012741Z node 1 :BS_PROXY DEBUG: dsproxy_state.cpp:220: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 2 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-06-25T14:32:14.012817Z node 1 :BS_PROXY DEBUG: dsproxy_state.cpp:220: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 3 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-06-25T14:32:14.012876Z node 1 :BS_PROXY DEBUG: dsproxy_state.cpp:220: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 4 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-06-25T14:32:14.012937Z node 1 :BS_PROXY DEBUG: dsproxy_state.cpp:220: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 5 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-06-25T14:32:14.013005Z node 1 :BS_PROXY DEBUG: dsproxy_state.cpp:220: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 6 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-06-25T14:32:14.013048Z node 1 :BS_PROXY DEBUG: dsproxy_state.cpp:220: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 7 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-06-25T14:32:14.013087Z node 1 :BS_PROXY INFO: dsproxy_state.cpp:194: Group# 2181038082 -> StateWork Marker# DSP11 2025-06-25T14:32:14.013122Z node 1 :BS_PROXY INFO: dsproxy_state.cpp:80: Group# 2181038082 SetStateWork Marker# DSP15 2025-06-25T14:32:14.013248Z node 1 :BS_PROXY_BLOCK DEBUG: dsproxy_block.cpp:150: [91379e686f748e92] bootstrap ActorId# [1:545:2509] Group# 2181038082 TabletId# 1234 Generation# 1 Deadline# 586524-01-19T08:01:49.551615Z RestartCounter# 0 Marker# DSPB05 2025-06-25T14:32:14.013294Z node 1 :BS_PROXY_BLOCK DEBUG: dsproxy_block.cpp:111: [91379e686f748e92] Sending TEvVBlock Tablet# 1234 Generation# 1 vdiskId# [82000002:1:0:0:0] node# 1 Marker# DSPB03 2025-06-25T14:32:14.013468Z node 1 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [1:538:2502] NKikimr::TEvBlobStorage::TEvVBlock# NKikimrBlobStorage.TEvVBlock TabletId: 1234 Generation: 1 VDiskID { GroupID: 2181038082 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } IssuerGuid: 17232277661776484861 MsgQoS { ExtQueueId: PutTabletLog } cookie# 0 2025-06-25T14:32:14.020735Z node 1 :BS_PROXY_BLOCK DEBUG: dsproxy_block.cpp:43: [91379e686f748e92] Handle TEvVBlockResult status# OK From# [82000002:1:0:0:0] NodeId# 1 Marker# DSPB01 2025-06-25T14:32:14.020809Z node 1 :BS_PROXY_BLOCK DEBUG: dsproxy_block.cpp:100: [91379e686f748e92] Result# TEvBlockResult {Status# OK} Marker# DSPB04 2025-06-25T14:32:14.021112Z node 1 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [1:538:2502] NKikimr::TEvBlobStorage::TEvVCollectGarbage# {TEvVCollectGarbage for [tablet:gen:cnt:channel]=[1234:4294967295:4294967295:0] collect=[4294967295:4294967295] cookie# 0 2025-06-25T14:32:14.022197Z node 1 :BS_PROXY NOTICE: dsproxy_state.cpp:301: EnsureMonitoring Group# 2181038082 IsLimitedKeyless# 0 Marker# DSP57 initialize full monitoring 2025-06-25T14:32:14.022833Z node 1 :BS_PROXY_BLOCK DEBUG: dsproxy_block.cpp:150: [efc53170c63234c6] bootstrap ActorId# [1:547:2511] Group# 2181038082 TabletId# 1234 Generation# 3 Deadline# 586524-01-19T08:01:49.551615Z RestartCounter# 0 Marker# DSPB05 2025-06-25T14:32:14.022880Z node 1 :BS_PROXY_BLOCK DEBUG: dsproxy_block.cpp:111: [efc53170c63234c6] Sending TEvVBlock Tablet# 1234 Generation# 3 vdiskId# [82000002:1:0:0:0] node# 1 Marker# DSPB03 2025-06-25T14:32:14.023020Z node 1 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [1:538:2502] NKikimr::TEvBlobStorage::TEvVBlock# NKikimrBlobStorage.TEvVBlock TabletId: 1234 Generation: 3 VDiskID { GroupID: 2181038082 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } IssuerGuid: 10680916696279155463 MsgQoS { ExtQueueId: PutTabletLog } cookie# 0 2025-06-25T14:32:14.023621Z node 1 :BS_PROXY_BLOCK DEBUG: dsproxy_block.cpp:43: [efc53170c63234c6] Handle TEvVBlockResult status# OK From# [82000002:1:0:0:0] NodeId# 1 Marker# DSPB01 2025-06-25T14:32:14.023670Z node 1 :BS_PROXY_BLOCK DEBUG: dsproxy_block.cpp:100: [efc53170c63234c6] Result# TEvBlockResult {Status# OK} Marker# DSPB04 2025-06-25T14:32:14.023970Z node 1 :BS_PROXY_BLOCK DEBUG: dsproxy_block.cpp:150: [c85e1a21dcb31b54] bootstrap ActorId# [1:548:2512] Group# 2181038082 TabletId# 1234 Generation# 4 Deadline# 586524-01-19T08:01:49.551615Z RestartCounter# 0 Marker# DSPB05 2025-06-25T14:32:14.024080Z node 1 :BS_PROXY_BLOCK DEBUG: dsproxy_block.cpp:111: [c85e1a21dcb31b54] Sending TEvVBlock Tablet# 1234 Generation# 4 vdiskId# [82000002:1:0:0:0] node# 1 Marker# DSPB03 2025-06-25T14:32:14.024192Z node 1 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [1:538:2502] NKikimr::TEvBlobStorage::TEvVBlock# NKikimrBlobStorage.TEvVBlock TabletId: 1234 Generation: 4 VDiskID { GroupID: 2181038082 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } IssuerGuid: 17343775395054571879 MsgQoS { ExtQueueId: PutTabletLog } cookie# 0 2025-06-25T14:32:14.024855Z node 1 :BS_PROXY_BLOCK DEBUG: dsproxy_block.cpp:43: [c85e1a21dcb31b54] Handle TEvVBlockResult status# OK From# [82000002:1:0:0:0] NodeId# 1 Marker# DSPB01 2025-06-25T14:32:14.024924Z node 1 :BS_PROXY_BLOCK DEBUG: dsproxy_block.cpp:100: [c85e1a21dcb31b54] Result# TEvBlockResult {Status# OK} Marker# DSPB04 |78.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_external_data_source/ydb-core-tx-schemeshard-ut_external_data_source |78.3%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_external_data_source/ydb-core-tx-schemeshard-ut_external_data_source >> TFetchRequestTests::CheckAccess [GOOD] >> PQCountersSimple::PartitionWriteQuota |78.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_external_data_source/ydb-core-tx-schemeshard-ut_external_data_source ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/nodewarden/ut/unittest >> TBlobStorageWardenTest::TestSendUsefulMonitoring [GOOD] Test command err: 2025-06-25T14:32:13.192907Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:0:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:1:0] targetVDisk# [2000000:1:0:0:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-25T14:32:13.194776Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:0:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:2:0] targetVDisk# [2000000:1:0:0:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-25T14:32:13.196494Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:0:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:3:0] targetVDisk# [2000000:1:0:0:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-25T14:32:13.199625Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:1:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:2:0] targetVDisk# [2000000:1:0:1:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-25T14:32:13.200143Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:1:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:3:0] targetVDisk# [2000000:1:0:1:0] oldSyncState# [0 0] DbBirthLsn# 0 tablet_helpers.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000e7d/r3tmp/tmpYjpgDX/pdisk_1.dat 2025-06-25T14:32:14.409209Z node 1 :BS_PROXY_PUT INFO: dsproxy_put.cpp:645: [3ca1a99c83a6f037] bootstrap ActorId# [1:549:2461] Group# 33554432 BlobCount# 1 BlobIDs# [[72057594037932033:2:8:0:0:1340:0]] HandleClass# TabletLog Tactic# MinLatency RestartCounter# 0 Marker# BPP13 2025-06-25T14:32:14.409364Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [3ca1a99c83a6f037] Id# [72057594037932033:2:8:0:0:1340:0] restore disk# 0 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-06-25T14:32:14.409405Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [3ca1a99c83a6f037] Id# [72057594037932033:2:8:0:0:1340:0] restore disk# 1 part# 1 situation# ESituation::Unknown Marker# BPG51 2025-06-25T14:32:14.409432Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [3ca1a99c83a6f037] Id# [72057594037932033:2:8:0:0:1340:0] restore disk# 2 part# 2 situation# ESituation::Unknown Marker# BPG51 2025-06-25T14:32:14.409457Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [3ca1a99c83a6f037] Id# [72057594037932033:2:8:0:0:1340:0] restore disk# 3 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-06-25T14:32:14.409480Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [3ca1a99c83a6f037] Id# [72057594037932033:2:8:0:0:1340:0] restore disk# 3 part# 1 situation# ESituation::Unknown Marker# BPG51 2025-06-25T14:32:14.409501Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [3ca1a99c83a6f037] Id# [72057594037932033:2:8:0:0:1340:0] restore disk# 3 part# 2 situation# ESituation::Unknown Marker# BPG51 2025-06-25T14:32:14.409537Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:65: [3ca1a99c83a6f037] restore Id# [72057594037932033:2:8:0:0:1340:0] optimisticReplicas# 3 optimisticState# EBS_FULL Marker# BPG55 2025-06-25T14:32:14.409598Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [3ca1a99c83a6f037] partPlacement record partSituation# ESituation::Unknown to# 0 blob Id# [72057594037932033:2:8:0:0:1340:1] Marker# BPG33 2025-06-25T14:32:14.409638Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [3ca1a99c83a6f037] Sending missing VPut part# 0 to# 0 blob Id# [72057594037932033:2:8:0:0:1340:1] Marker# BPG32 2025-06-25T14:32:14.409676Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [3ca1a99c83a6f037] partPlacement record partSituation# ESituation::Unknown to# 1 blob Id# [72057594037932033:2:8:0:0:1340:2] Marker# BPG33 2025-06-25T14:32:14.409699Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [3ca1a99c83a6f037] Sending missing VPut part# 1 to# 1 blob Id# [72057594037932033:2:8:0:0:1340:2] Marker# BPG32 2025-06-25T14:32:14.409728Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [3ca1a99c83a6f037] partPlacement record partSituation# ESituation::Unknown to# 2 blob Id# [72057594037932033:2:8:0:0:1340:3] Marker# BPG33 2025-06-25T14:32:14.409751Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [3ca1a99c83a6f037] Sending missing VPut part# 2 to# 2 blob Id# [72057594037932033:2:8:0:0:1340:3] Marker# BPG32 2025-06-25T14:32:14.409903Z node 1 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [1:67:2092] NKikimr::TEvBlobStorage::TEvVPut# {ID# [72057594037932033:2:8:0:0:1340:3] FDS# 1340 HandleClass# TabletLog {MsgQoS ExtQueueId# PutTabletLog} DataSize# 0 Data# } cookie# 0 2025-06-25T14:32:14.409965Z node 1 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [1:60:2085] NKikimr::TEvBlobStorage::TEvVPut# {ID# [72057594037932033:2:8:0:0:1340:2] FDS# 1340 HandleClass# TabletLog {MsgQoS ExtQueueId# PutTabletLog} DataSize# 0 Data# } cookie# 0 2025-06-25T14:32:14.410006Z node 1 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [1:81:2106] NKikimr::TEvBlobStorage::TEvVPut# {ID# [72057594037932033:2:8:0:0:1340:1] FDS# 1340 HandleClass# TabletLog {MsgQoS ExtQueueId# PutTabletLog} DataSize# 0 Data# } cookie# 0 2025-06-25T14:32:14.435070Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [3ca1a99c83a6f037] received {EvVPutResult Status# OK ID# [72057594037932033:2:8:0:0:1340:2] {MsgQoS MsgId# { SequenceId: 1 MsgId: 9 } Cost# 90551 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 10 }}}} from# [2000000:1:0:0:0] Marker# BPP01 2025-06-25T14:32:14.435291Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [3ca1a99c83a6f037] received {EvVPutResult Status# OK ID# [72057594037932033:2:8:0:0:1340:3] {MsgQoS MsgId# { SequenceId: 1 MsgId: 10 } Cost# 90551 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 11 }}}} from# [2000000:1:0:1:0] Marker# BPP01 2025-06-25T14:32:14.435378Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [3ca1a99c83a6f037] received {EvVPutResult Status# OK ID# [72057594037932033:2:8:0:0:1340:1] {MsgQoS MsgId# { SequenceId: 1 MsgId: 8 } Cost# 90551 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 9 }}}} from# [2000000:1:0:3:0] Marker# BPP01 2025-06-25T14:32:14.435450Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_put_impl.cpp:72: [3ca1a99c83a6f037] Result# TEvPutResult {Id# [72057594037932033:2:8:0:0:1340:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.999479} GroupId# 33554432 Marker# BPP12 2025-06-25T14:32:14.435515Z node 1 :BS_PROXY_PUT INFO: dsproxy_put.cpp:486: [3ca1a99c83a6f037] SendReply putResult# TEvPutResult {Id# [72057594037932033:2:8:0:0:1340:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.999479} ResponsesSent# 0 PutImpl.Blobs.size# 1 Last# true Marker# BPP21 2025-06-25T14:32:14.435678Z node 1 :BS_PROXY_PUT DEBUG: {BPP72@dsproxy_put.cpp:470} Query history GroupId# 33554432 HandleClass# TabletLog Tactic# MinLatency History# THistory { Entries# [ TEvVPut{ TimestampMs# 1.143 sample PartId# [72057594037932033:2:8:0:0:1340:3] QueryCount# 1 VDiskId# [2000000:1:0:1:0] NodeId# 1 } TEvVPut{ TimestampMs# 1.143 sample PartId# [72057594037932033:2:8:0:0:1340:2] QueryCount# 1 VDiskId# [2000000:1:0:0:0] NodeId# 1 } TEvVPut{ TimestampMs# 1.144 sample PartId# [72057594037932033:2:8:0:0:1340:1] QueryCount# 1 VDiskId# [2000000:1:0:3:0] NodeId# 1 } TEvVPutResult{ TimestampMs# 26.255 VDiskId# [2000000:1:0:0:0] NodeId# 1 Status# OK } TEvVPutResult{ TimestampMs# 26.433 VDiskId# [2000000:1:0:1:0] NodeId# 1 Status# OK } TEvVPutResult{ TimestampMs# 26.518 VDiskId# [2000000:1:0:3:0] NodeId# 1 Status# OK } ] } 2025-06-25T14:32:14.539808Z node 1 :BS_PROXY_PUT INFO: dsproxy_put.cpp:645: [bba3bffd2e286f4b] bootstrap ActorId# [1:595:2498] Group# 33554432 BlobCount# 1 BlobIDs# [[72057594037932033:2:9:0:0:229:0]] HandleClass# TabletLog Tactic# MinLatency RestartCounter# 0 Marker# BPP13 2025-06-25T14:32:14.539950Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [bba3bffd2e286f4b] Id# [72057594037932033:2:9:0:0:229:0] restore disk# 0 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-06-25T14:32:14.539995Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [bba3bffd2e286f4b] Id# [72057594037932033:2:9:0:0:229:0] restore disk# 1 part# 1 situation# ESituation::Unknown Marker# BPG51 2025-06-25T14:32:14.540021Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [bba3bffd2e286f4b] Id# [72057594037932033:2:9:0:0:229:0] restore disk# 2 part# 2 situation# ESituation::Unknown Marker# BPG51 2025-06-25T14:32:14.540045Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [bba3bffd2e286f4b] Id# [72057594037932033:2:9:0:0:229:0] restore disk# 3 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-06-25T14:32:14.540071Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [bba3bffd2e286f4b] Id# [72057594037932033:2:9:0:0:229:0] restore disk# 3 part# 1 situation# ESituation::Unknown Marker# BPG51 2025-06-25T14:32:14.540096Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [bba3bffd2e286f4b] Id# [72057594037932033:2:9:0:0:229:0] restore disk# 3 part# 2 situation# ESituation::Unknown Marker# BPG51 2025-06-25T14:32:14.540131Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:65: [bba3bffd2e286f4b] restore Id# [72057594037932033:2:9:0:0:229:0] optimisticReplicas# 3 optimisticState# EBS_FULL Marker# BPG55 2025-06-25T14:32:14.540197Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [bba3bffd2e286f4b] partPlacement record partSituation# ESituation::Unknown to# 0 blob Id# [72057594037932033:2:9:0:0:229:1] Marker# BPG33 2025-06-25T14:32:14.540301Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [bba3bffd2e286f4b] Sending missing VPut part# 0 to# 0 blob Id# [72057594037932033:2:9:0:0:229:1] Marker# BPG32 2025-06-25T14:32:14.540368Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [bba3bffd2e286f4b] partPlacement record partSituation# ESituation::Unknown to# 1 blob Id# [72057594037932033:2:9:0:0:229:2] Marker# BPG33 2025-06-25T14:32:14.540393Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [bba3bffd2e286f4b] Sending missing VPut part# 1 to# 1 blob Id# [72057594037932033:2:9:0:0:229:2] Marker# BPG32 2025-06-25T14:32:14.540426Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [bba3bffd2e286f4b] partPlacement record partSituation# ESituation::Unknown to# 2 blob Id# [72057594037932033:2:9:0:0:229:3] Marker# BPG33 2025-06-25T14:32:14.540450Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [bba3bffd2e286f4b] Sending missing VPut part# 2 to# 2 blob Id# [72057594037932033:2:9:0:0:229:3] Marker# BPG32 2025-06-25T14:32:14.540585Z node 1 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [1:60:2085] NKikimr::TEvBlobStorage::TEvVPut# {ID# [72057594037932033:2:9:0:0:229:3] FDS# 229 HandleClass# TabletLog {MsgQoS ExtQueueId# PutTabletLog} DataSize# 0 Data# } cookie# 0 2025-06-25T14:32:14.540667Z node 1 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [1:81:2106] NKikimr::TEvBlobStorage::TEvVPut# {ID# [72057594037932033:2:9:0:0:229:2] FDS# 229 HandleClass# TabletLog {MsgQoS ExtQueueId# PutTabletLog} DataSize# 0 Data# } cookie# 0 2025-06-25T14:32:14.540713Z node 1 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [1:74:2099] NKikimr::TEvBlobStorage::TEvVPut# {ID# [72057594037932033:2:9:0:0:229:1] FDS# 229 HandleClass# TabletLog {MsgQoS ExtQueueId# PutTabletLog} DataSize# 0 Data# } cookie# 0 2025-06-25T14:32:14.554220Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_pu ... p# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 1 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-06-25T14:32:14.595049Z node 1 :BS_PROXY DEBUG: dsproxy_state.cpp:220: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 2 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-06-25T14:32:14.595119Z node 1 :BS_PROXY DEBUG: dsproxy_state.cpp:220: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 3 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-06-25T14:32:14.595199Z node 1 :BS_PROXY DEBUG: dsproxy_state.cpp:220: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 4 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-06-25T14:32:14.595265Z node 1 :BS_PROXY DEBUG: dsproxy_state.cpp:220: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 5 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-06-25T14:32:14.595352Z node 1 :BS_PROXY DEBUG: dsproxy_state.cpp:220: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 6 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-06-25T14:32:14.595398Z node 1 :BS_PROXY DEBUG: dsproxy_state.cpp:220: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 7 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-06-25T14:32:14.595423Z node 1 :BS_PROXY INFO: dsproxy_state.cpp:194: Group# 2181038082 -> StateWork Marker# DSP11 2025-06-25T14:32:14.595453Z node 1 :BS_PROXY INFO: dsproxy_state.cpp:80: Group# 2181038082 SetStateWork Marker# DSP15 2025-06-25T14:32:14.595493Z node 1 :BS_PROXY NOTICE: dsproxy_state.cpp:301: EnsureMonitoring Group# 2181038082 IsLimitedKeyless# 0 Marker# DSP57 initialize full monitoring 2025-06-25T14:32:14.596202Z node 1 :BS_PROXY_PUT INFO: dsproxy_put.cpp:645: [d70ef3c23a1a2346] bootstrap ActorId# [1:607:2509] Group# 2181038082 BlobCount# 1 BlobIDs# [[1234:2:0:0:0:5:0]] HandleClass# TabletLog Tactic# Default RestartCounter# 0 Marker# BPP13 2025-06-25T14:32:14.596331Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [d70ef3c23a1a2346] Id# [1234:2:0:0:0:5:0] restore disk# 0 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-06-25T14:32:14.596372Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:65: [d70ef3c23a1a2346] restore Id# [1234:2:0:0:0:5:0] optimisticReplicas# 1 optimisticState# EBS_FULL Marker# BPG55 2025-06-25T14:32:14.596418Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [d70ef3c23a1a2346] partPlacement record partSituation# ESituation::Unknown to# 0 blob Id# [1234:2:0:0:0:5:1] Marker# BPG33 2025-06-25T14:32:14.596448Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [d70ef3c23a1a2346] Sending missing VPut part# 0 to# 0 blob Id# [1234:2:0:0:0:5:1] Marker# BPG32 2025-06-25T14:32:14.596539Z node 1 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [1:600:2502] NKikimr::TEvBlobStorage::TEvVPut# {ID# [1234:2:0:0:0:5:1] FDS# 5 HandleClass# TabletLog {MsgQoS ExtQueueId# PutTabletLog} DataSize# 0 Data# } cookie# 0 2025-06-25T14:32:14.602745Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [d70ef3c23a1a2346] received {EvVPutResult Status# OK ID# [1234:2:0:0:0:5:1] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 } Cost# 80039 ExtQueueId# PutTabletLog IntQueueId# IntPutLog CostSettings# { SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257} Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 1 }}}} from# [82000002:1:0:0:0] Marker# BPP01 2025-06-25T14:32:14.602852Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_put_impl.cpp:72: [d70ef3c23a1a2346] Result# TEvPutResult {Id# [1234:2:0:0:0:5:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.999479} GroupId# 2181038082 Marker# BPP12 2025-06-25T14:32:14.602899Z node 1 :BS_PROXY_PUT INFO: dsproxy_put.cpp:486: [d70ef3c23a1a2346] SendReply putResult# TEvPutResult {Id# [1234:2:0:0:0:5:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.999479} ResponsesSent# 0 PutImpl.Blobs.size# 1 Last# true Marker# BPP21 2025-06-25T14:32:14.603005Z node 1 :BS_PROXY_PUT DEBUG: {BPP72@dsproxy_put.cpp:470} Query history GroupId# 2181038082 HandleClass# TabletLog Tactic# Default History# THistory { Entries# [ TEvVPut{ TimestampMs# 0.465 sample PartId# [1234:2:0:0:0:5:1] QueryCount# 1 VDiskId# [82000002:1:0:0:0] NodeId# 1 } TEvVPutResult{ TimestampMs# 6.701 VDiskId# [82000002:1:0:0:0] NodeId# 1 Status# OK } ] } 2025-06-25T14:32:14.603463Z node 2 :BS_PROXY INFO: dsproxy_state.cpp:157: Group# 2181038082 TEvConfigureProxy received GroupGeneration# IsLimitedKeyless# false Marker# DSP02 2025-06-25T14:32:14.603504Z node 2 :BS_PROXY INFO: dsproxy_state.cpp:57: Group# 2181038082 SetStateUnconfigured Marker# DSP07 2025-06-25T14:32:14.603594Z node 2 :BS_PROXY DEBUG: dsproxy_impl.h:205: Group# 2181038082 HandleEnqueue# TEvCollectGarbage {TabletId# 1234 RecordGeneration# 4294967295 PerGenerationCounter# 4294967295 Channel# 0 Deadline# 18446744073709551 Collect# true CollectGeneration# 4294967295 CollectStep# 4294967295 Hard# true IsMultiCollectAllowed# 1 IsMonitored# 1} Marker# DSP17 2025-06-25T14:32:14.608943Z node 2 :BS_PROXY INFO: dsproxy_state.cpp:157: Group# 2181038082 TEvConfigureProxy received GroupGeneration# 1 IsLimitedKeyless# true Marker# DSP02 2025-06-25T14:32:14.609004Z node 2 :BS_PROXY NOTICE: dsproxy_state.cpp:305: EnsureMonitoring Group# 2181038082 IsLimitedKeyless# 1 fullIfPossible# 0 Marker# DSP58 2025-06-25T14:32:14.610846Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 2181038082 Actor# [2:609:2106] Create Queue# [2:611:2107] targetNodeId# 1 Marker# DSP01 2025-06-25T14:32:14.610981Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 2181038082 Actor# [2:609:2106] Create Queue# [2:612:2108] targetNodeId# 1 Marker# DSP01 2025-06-25T14:32:14.611085Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 2181038082 Actor# [2:609:2106] Create Queue# [2:613:2109] targetNodeId# 1 Marker# DSP01 2025-06-25T14:32:14.611189Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 2181038082 Actor# [2:609:2106] Create Queue# [2:614:2110] targetNodeId# 1 Marker# DSP01 2025-06-25T14:32:14.611293Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 2181038082 Actor# [2:609:2106] Create Queue# [2:615:2111] targetNodeId# 1 Marker# DSP01 2025-06-25T14:32:14.611403Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 2181038082 Actor# [2:609:2106] Create Queue# [2:616:2112] targetNodeId# 1 Marker# DSP01 2025-06-25T14:32:14.611502Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 2181038082 Actor# [2:609:2106] Create Queue# [2:617:2113] targetNodeId# 1 Marker# DSP01 2025-06-25T14:32:14.611527Z node 2 :BS_PROXY INFO: dsproxy_state.cpp:31: Group# 2181038082 SetStateEstablishingSessions Marker# DSP03 2025-06-25T14:32:14.617956Z node 2 :BS_NODE ERROR: {NW19@node_warden_group.cpp:212} error while parsing group GroupId# 2181038082 Err# LifeCyclePhase# KEY_NOT_LOADED Key.Id# "" Key.Version# 0 MainKey.Id# "/home/runner/.ya/build/build_root/yft8/000e7d/r3tmp/tmpYjpgDX//key.txt" MainKey.Version# 1 GroupKeyNonce# 2181038082 2025-06-25T14:32:14.618426Z node 2 :BS_PROXY DEBUG: dsproxy_state.cpp:220: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 1 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-06-25T14:32:14.618501Z node 2 :BS_PROXY DEBUG: dsproxy_state.cpp:220: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 2 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-06-25T14:32:14.618675Z node 2 :BS_PROXY DEBUG: dsproxy_state.cpp:220: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 3 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-06-25T14:32:14.618732Z node 2 :BS_PROXY DEBUG: dsproxy_state.cpp:220: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 4 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-06-25T14:32:14.618868Z node 2 :BS_PROXY DEBUG: dsproxy_state.cpp:220: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 5 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-06-25T14:32:14.618914Z node 2 :BS_PROXY DEBUG: dsproxy_state.cpp:220: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 6 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-06-25T14:32:14.618995Z node 2 :BS_PROXY DEBUG: dsproxy_state.cpp:220: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 7 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-06-25T14:32:14.619020Z node 2 :BS_PROXY INFO: dsproxy_state.cpp:194: Group# 2181038082 -> StateWork Marker# DSP11 2025-06-25T14:32:14.619051Z node 2 :BS_PROXY INFO: dsproxy_state.cpp:80: Group# 2181038082 SetStateWork Marker# DSP15 2025-06-25T14:32:14.619220Z node 2 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [2:611:2107] NKikimr::TEvBlobStorage::TEvVCollectGarbage# {TEvVCollectGarbage for [tablet:gen:cnt:channel]=[1234:4294967295:4294967295:0] collect=[4294967295:4294967295] cookie# 0 |78.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/ycloud/impl/ut/ydb-library-ycloud-impl-ut |78.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/library/ycloud/impl/ut/ydb-library-ycloud-impl-ut |78.3%| [LD] {RESULT} $(B)/ydb/library/ycloud/impl/ut/ydb-library-ycloud-impl-ut |78.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/http_proxy/ut/inside_ydb_ut/ydb-core-http_proxy-ut-inside_ydb_ut |78.3%| [LD] {RESULT} $(B)/ydb/core/http_proxy/ut/inside_ydb_ut/ydb-core-http_proxy-ut-inside_ydb_ut |78.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/http_proxy/ut/inside_ydb_ut/ydb-core-http_proxy-ut-inside_ydb_ut >> TBlobStorageWardenTest::TestLimitedKeylessGroupThenNoMonitoring [GOOD] >> TPQTest::TestReadSessions [GOOD] >> TPQTest::TestReadSubscription >> TPartitionChooserSuite::TPartitionChooserActor_SplitMergeEnabled_SourceId_PartitionInactive_0_Test [GOOD] >> TPartitionChooserSuite::TPartitionChooserActor_SplitMergeEnabled_SourceId_OldPartitionExists_Test >> TBlobStorageWardenTest::TestSendToInvalidGroupId [GOOD] >> TLocksTest::MultipleLocks [GOOD] >> TBlobStorageWardenTest::TestDeleteStoragePool [GOOD] >> TBlobStorageWardenTest::TestBlockEncriptedGroup ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/nodewarden/ut/unittest >> TBlobStorageWardenTest::TestLimitedKeylessGroupThenNoMonitoring [GOOD] Test command err: 2025-06-25T14:32:14.622100Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:0:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:1:0] targetVDisk# [2000000:1:0:0:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-25T14:32:14.629970Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:0:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:2:0] targetVDisk# [2000000:1:0:0:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-25T14:32:14.647929Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:0:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:3:0] targetVDisk# [2000000:1:0:0:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-25T14:32:14.651575Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:1:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:2:0] targetVDisk# [2000000:1:0:1:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-25T14:32:14.652125Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:1:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:3:0] targetVDisk# [2000000:1:0:1:0] oldSyncState# [0 0] DbBirthLsn# 0 tablet_helpers.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000a02/r3tmp/tmpeVzMlP/pdisk_1.dat 2025-06-25T14:32:15.453159Z node 1 :BS_PROXY_PUT INFO: dsproxy_put.cpp:645: [3ca1a99c83a6f037] bootstrap ActorId# [1:549:2461] Group# 33554432 BlobCount# 1 BlobIDs# [[72057594037932033:2:8:0:0:1340:0]] HandleClass# TabletLog Tactic# MinLatency RestartCounter# 0 Marker# BPP13 2025-06-25T14:32:15.453347Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [3ca1a99c83a6f037] Id# [72057594037932033:2:8:0:0:1340:0] restore disk# 0 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-06-25T14:32:15.453391Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [3ca1a99c83a6f037] Id# [72057594037932033:2:8:0:0:1340:0] restore disk# 1 part# 1 situation# ESituation::Unknown Marker# BPG51 2025-06-25T14:32:15.453426Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [3ca1a99c83a6f037] Id# [72057594037932033:2:8:0:0:1340:0] restore disk# 2 part# 2 situation# ESituation::Unknown Marker# BPG51 2025-06-25T14:32:15.453455Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [3ca1a99c83a6f037] Id# [72057594037932033:2:8:0:0:1340:0] restore disk# 3 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-06-25T14:32:15.453484Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [3ca1a99c83a6f037] Id# [72057594037932033:2:8:0:0:1340:0] restore disk# 3 part# 1 situation# ESituation::Unknown Marker# BPG51 2025-06-25T14:32:15.453514Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [3ca1a99c83a6f037] Id# [72057594037932033:2:8:0:0:1340:0] restore disk# 3 part# 2 situation# ESituation::Unknown Marker# BPG51 2025-06-25T14:32:15.453556Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:65: [3ca1a99c83a6f037] restore Id# [72057594037932033:2:8:0:0:1340:0] optimisticReplicas# 3 optimisticState# EBS_FULL Marker# BPG55 2025-06-25T14:32:15.453635Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [3ca1a99c83a6f037] partPlacement record partSituation# ESituation::Unknown to# 0 blob Id# [72057594037932033:2:8:0:0:1340:1] Marker# BPG33 2025-06-25T14:32:15.453683Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [3ca1a99c83a6f037] Sending missing VPut part# 0 to# 0 blob Id# [72057594037932033:2:8:0:0:1340:1] Marker# BPG32 2025-06-25T14:32:15.453730Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [3ca1a99c83a6f037] partPlacement record partSituation# ESituation::Unknown to# 1 blob Id# [72057594037932033:2:8:0:0:1340:2] Marker# BPG33 2025-06-25T14:32:15.453759Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [3ca1a99c83a6f037] Sending missing VPut part# 1 to# 1 blob Id# [72057594037932033:2:8:0:0:1340:2] Marker# BPG32 2025-06-25T14:32:15.453793Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [3ca1a99c83a6f037] partPlacement record partSituation# ESituation::Unknown to# 2 blob Id# [72057594037932033:2:8:0:0:1340:3] Marker# BPG33 2025-06-25T14:32:15.453821Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [3ca1a99c83a6f037] Sending missing VPut part# 2 to# 2 blob Id# [72057594037932033:2:8:0:0:1340:3] Marker# BPG32 2025-06-25T14:32:15.454010Z node 1 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [1:67:2092] NKikimr::TEvBlobStorage::TEvVPut# {ID# [72057594037932033:2:8:0:0:1340:3] FDS# 1340 HandleClass# TabletLog {MsgQoS ExtQueueId# PutTabletLog} DataSize# 0 Data# } cookie# 0 2025-06-25T14:32:15.454088Z node 1 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [1:60:2085] NKikimr::TEvBlobStorage::TEvVPut# {ID# [72057594037932033:2:8:0:0:1340:2] FDS# 1340 HandleClass# TabletLog {MsgQoS ExtQueueId# PutTabletLog} DataSize# 0 Data# } cookie# 0 2025-06-25T14:32:15.454137Z node 1 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [1:81:2106] NKikimr::TEvBlobStorage::TEvVPut# {ID# [72057594037932033:2:8:0:0:1340:1] FDS# 1340 HandleClass# TabletLog {MsgQoS ExtQueueId# PutTabletLog} DataSize# 0 Data# } cookie# 0 2025-06-25T14:32:15.473127Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [3ca1a99c83a6f037] received {EvVPutResult Status# OK ID# [72057594037932033:2:8:0:0:1340:2] {MsgQoS MsgId# { SequenceId: 1 MsgId: 9 } Cost# 90551 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 10 }}}} from# [2000000:1:0:0:0] Marker# BPP01 2025-06-25T14:32:15.473370Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [3ca1a99c83a6f037] received {EvVPutResult Status# OK ID# [72057594037932033:2:8:0:0:1340:3] {MsgQoS MsgId# { SequenceId: 1 MsgId: 10 } Cost# 90551 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 11 }}}} from# [2000000:1:0:1:0] Marker# BPP01 2025-06-25T14:32:15.473467Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [3ca1a99c83a6f037] received {EvVPutResult Status# OK ID# [72057594037932033:2:8:0:0:1340:1] {MsgQoS MsgId# { SequenceId: 1 MsgId: 8 } Cost# 90551 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 9 }}}} from# [2000000:1:0:3:0] Marker# BPP01 2025-06-25T14:32:15.473548Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_put_impl.cpp:72: [3ca1a99c83a6f037] Result# TEvPutResult {Id# [72057594037932033:2:8:0:0:1340:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.999479} GroupId# 33554432 Marker# BPP12 2025-06-25T14:32:15.473615Z node 1 :BS_PROXY_PUT INFO: dsproxy_put.cpp:486: [3ca1a99c83a6f037] SendReply putResult# TEvPutResult {Id# [72057594037932033:2:8:0:0:1340:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.999479} ResponsesSent# 0 PutImpl.Blobs.size# 1 Last# true Marker# BPP21 2025-06-25T14:32:15.473821Z node 1 :BS_PROXY_PUT DEBUG: {BPP72@dsproxy_put.cpp:470} Query history GroupId# 33554432 HandleClass# TabletLog Tactic# MinLatency History# THistory { Entries# [ TEvVPut{ TimestampMs# 1.19 sample PartId# [72057594037932033:2:8:0:0:1340:3] QueryCount# 1 VDiskId# [2000000:1:0:1:0] NodeId# 1 } TEvVPut{ TimestampMs# 1.19 sample PartId# [72057594037932033:2:8:0:0:1340:2] QueryCount# 1 VDiskId# [2000000:1:0:0:0] NodeId# 1 } TEvVPut{ TimestampMs# 1.191 sample PartId# [72057594037932033:2:8:0:0:1340:1] QueryCount# 1 VDiskId# [2000000:1:0:3:0] NodeId# 1 } TEvVPutResult{ TimestampMs# 20.25 VDiskId# [2000000:1:0:0:0] NodeId# 1 Status# OK } TEvVPutResult{ TimestampMs# 20.425 VDiskId# [2000000:1:0:1:0] NodeId# 1 Status# OK } TEvVPutResult{ TimestampMs# 20.522 VDiskId# [2000000:1:0:3:0] NodeId# 1 Status# OK } ] } 2025-06-25T14:32:15.569104Z node 1 :BS_PROXY_PUT INFO: dsproxy_put.cpp:645: [bba3bffd2e286f4b] bootstrap ActorId# [1:595:2498] Group# 33554432 BlobCount# 1 BlobIDs# [[72057594037932033:2:9:0:0:229:0]] HandleClass# TabletLog Tactic# MinLatency RestartCounter# 0 Marker# BPP13 2025-06-25T14:32:15.569272Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [bba3bffd2e286f4b] Id# [72057594037932033:2:9:0:0:229:0] restore disk# 0 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-06-25T14:32:15.569318Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [bba3bffd2e286f4b] Id# [72057594037932033:2:9:0:0:229:0] restore disk# 1 part# 1 situation# ESituation::Unknown Marker# BPG51 2025-06-25T14:32:15.569349Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [bba3bffd2e286f4b] Id# [72057594037932033:2:9:0:0:229:0] restore disk# 2 part# 2 situation# ESituation::Unknown Marker# BPG51 2025-06-25T14:32:15.569379Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [bba3bffd2e286f4b] Id# [72057594037932033:2:9:0:0:229:0] restore disk# 3 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-06-25T14:32:15.569407Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [bba3bffd2e286f4b] Id# [72057594037932033:2:9:0:0:229:0] restore disk# 3 part# 1 situation# ESituation::Unknown Marker# BPG51 2025-06-25T14:32:15.569434Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [bba3bffd2e286f4b] Id# [72057594037932033:2:9:0:0:229:0] restore disk# 3 part# 2 situation# ESituation::Unknown Marker# BPG51 2025-06-25T14:32:15.569476Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:65: [bba3bffd2e286f4b] restore Id# [72057594037932033:2:9:0:0:229:0] optimisticReplicas# 3 optimisticState# EBS_FULL Marker# BPG55 2025-06-25T14:32:15.569549Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [bba3bffd2e286f4b] partPlacement record partSituation# ESituation::Unknown to# 0 blob Id# [72057594037932033:2:9:0:0:229:1] Marker# BPG33 2025-06-25T14:32:15.569594Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [bba3bffd2e286f4b] Sending missing VPut part# 0 to# 0 blob Id# [72057594037932033:2:9:0:0:229:1] Marker# BPG32 2025-06-25T14:32:15.569643Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [bba3bffd2e286f4b] partPlacement record partSituation# ESituation::Unknown to# 1 blob Id# [72057594037932033:2:9:0:0:229:2] Marker# BPG33 2025-06-25T14:32:15.569672Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [bba3bffd2e286f4b] Sending missing VPut part# 1 to# 1 blob Id# [72057594037932033:2:9:0:0:229:2] Marker# BPG32 2025-06-25T14:32:15.569705Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [bba3bffd2e286f4b] partPlacement record partSituation# ESituation::Unknown to# 2 blob Id# [72057594037932033:2:9:0:0:229:3] Marker# BPG33 2025-06-25T14:32:15.569733Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [bba3bffd2e286f4b] Sending missing VPut part# 2 to# 2 blob Id# [72057594037932033:2:9:0:0:229:3] Marker# BPG32 2025-06-25T14:32:15.569893Z node 1 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [1:60:2085] NKikimr::TEvBlobStorage::TEvVPut# {ID# [72057594037932033:2:9:0:0:229:3] FDS# 229 HandleClass# TabletLog {MsgQoS ExtQueueId# PutTabletLog} DataSize# 0 Data# } cookie# 0 2025-06-25T14:32:15.569956Z node 1 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [1:81:2106] NKikimr::TEvBlobStorage::TEvVPut# {ID# [72057594037932033:2:9:0:0:229:2] FDS# 229 HandleClass# TabletLog {MsgQoS ExtQueueId# PutTabletLog} DataSize# 0 Data# } cookie# 0 2025-06-25T14:32:15.570007Z node 1 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [1:74:2099] NKikimr::TEvBlobStorage::TEvVPut# {ID# [72057594037932033:2:9:0:0:229:1] FDS# 229 HandleClass# TabletLog {MsgQoS ExtQueueId# PutTabletLog} DataSize# 0 Data# } cookie# 0 2025-06-25T14:32:15.578148Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_put.c ... PutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 3 }}}} from# [82000002:1:0:0:0] Marker# BPP01 2025-06-25T14:32:15.667097Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_put_impl.cpp:72: [cd65997ea3b51537] Result# TEvPutResult {Id# [1234:2:0:0:0:5:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.999479} GroupId# 2181038082 Marker# BPP12 2025-06-25T14:32:15.667159Z node 1 :BS_PROXY_PUT INFO: dsproxy_put.cpp:486: [cd65997ea3b51537] SendReply putResult# TEvPutResult {Id# [1234:2:0:0:0:5:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.999479} ResponsesSent# 0 PutImpl.Blobs.size# 1 Last# true Marker# BPP21 2025-06-25T14:32:15.667323Z node 1 :BS_PROXY_PUT DEBUG: {BPP72@dsproxy_put.cpp:470} Query history GroupId# 2181038082 HandleClass# TabletLog Tactic# Default History# THistory { Entries# [ TEvVPut{ TimestampMs# 0.578 sample PartId# [1234:2:0:0:0:5:1] QueryCount# 1 VDiskId# [82000002:1:0:0:0] NodeId# 1 } TEvVPutResult{ TimestampMs# 3.8 VDiskId# [82000002:1:0:0:0] NodeId# 1 Status# OK } ] } 2025-06-25T14:32:15.667948Z node 2 :BS_PROXY INFO: dsproxy_state.cpp:157: Group# 2181038082 TEvConfigureProxy received GroupGeneration# IsLimitedKeyless# false Marker# DSP02 2025-06-25T14:32:15.667999Z node 2 :BS_PROXY INFO: dsproxy_state.cpp:57: Group# 2181038082 SetStateUnconfigured Marker# DSP07 2025-06-25T14:32:15.668097Z node 2 :BS_PROXY DEBUG: dsproxy_impl.h:205: Group# 2181038082 HandleEnqueue# TEvBlock {TabletId# 1234 Generation# 3 Deadline# 18446744073709551 IsMonitored# 1} Marker# DSP17 2025-06-25T14:32:15.669236Z node 2 :BS_PROXY INFO: dsproxy_state.cpp:157: Group# 2181038082 TEvConfigureProxy received GroupGeneration# 1 IsLimitedKeyless# true Marker# DSP02 2025-06-25T14:32:15.669284Z node 2 :BS_PROXY NOTICE: dsproxy_state.cpp:305: EnsureMonitoring Group# 2181038082 IsLimitedKeyless# 1 fullIfPossible# 0 Marker# DSP58 2025-06-25T14:32:15.671366Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 2181038082 Actor# [2:611:2106] Create Queue# [2:613:2107] targetNodeId# 1 Marker# DSP01 2025-06-25T14:32:15.671527Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 2181038082 Actor# [2:611:2106] Create Queue# [2:614:2108] targetNodeId# 1 Marker# DSP01 2025-06-25T14:32:15.671647Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 2181038082 Actor# [2:611:2106] Create Queue# [2:615:2109] targetNodeId# 1 Marker# DSP01 2025-06-25T14:32:15.671771Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 2181038082 Actor# [2:611:2106] Create Queue# [2:616:2110] targetNodeId# 1 Marker# DSP01 2025-06-25T14:32:15.671900Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 2181038082 Actor# [2:611:2106] Create Queue# [2:617:2111] targetNodeId# 1 Marker# DSP01 2025-06-25T14:32:15.672021Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 2181038082 Actor# [2:611:2106] Create Queue# [2:618:2112] targetNodeId# 1 Marker# DSP01 2025-06-25T14:32:15.672146Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 2181038082 Actor# [2:611:2106] Create Queue# [2:619:2113] targetNodeId# 1 Marker# DSP01 2025-06-25T14:32:15.672175Z node 2 :BS_PROXY INFO: dsproxy_state.cpp:31: Group# 2181038082 SetStateEstablishingSessions Marker# DSP03 2025-06-25T14:32:15.673798Z node 2 :BS_NODE ERROR: {NW19@node_warden_group.cpp:212} error while parsing group GroupId# 2181038082 Err# LifeCyclePhase# KEY_NOT_LOADED Key.Id# "" Key.Version# 0 MainKey.Id# "/home/runner/.ya/build/build_root/yft8/000a02/r3tmp/tmpeVzMlP//key.txt" MainKey.Version# 1 GroupKeyNonce# 2181038082 2025-06-25T14:32:15.674179Z node 2 :BS_PROXY DEBUG: dsproxy_state.cpp:220: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 1 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-06-25T14:32:15.674549Z node 2 :BS_PROXY DEBUG: dsproxy_state.cpp:220: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 2 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-06-25T14:32:15.674639Z node 2 :BS_PROXY DEBUG: dsproxy_state.cpp:220: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 3 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-06-25T14:32:15.674860Z node 2 :BS_PROXY DEBUG: dsproxy_state.cpp:220: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 4 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-06-25T14:32:15.674951Z node 2 :BS_PROXY DEBUG: dsproxy_state.cpp:220: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 5 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-06-25T14:32:15.675047Z node 2 :BS_PROXY DEBUG: dsproxy_state.cpp:220: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 6 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-06-25T14:32:15.675122Z node 2 :BS_PROXY DEBUG: dsproxy_state.cpp:220: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 7 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-06-25T14:32:15.675152Z node 2 :BS_PROXY INFO: dsproxy_state.cpp:194: Group# 2181038082 -> StateWork Marker# DSP11 2025-06-25T14:32:15.675192Z node 2 :BS_PROXY INFO: dsproxy_state.cpp:80: Group# 2181038082 SetStateWork Marker# DSP15 2025-06-25T14:32:15.675340Z node 2 :BS_PROXY_BLOCK DEBUG: dsproxy_block.cpp:150: [efc53170c63234c6] bootstrap ActorId# [2:620:2114] Group# 2181038082 TabletId# 1234 Generation# 3 Deadline# 586524-01-19T08:01:49.551615Z RestartCounter# 0 Marker# DSPB05 2025-06-25T14:32:15.675415Z node 2 :BS_PROXY_BLOCK DEBUG: dsproxy_block.cpp:111: [efc53170c63234c6] Sending TEvVBlock Tablet# 1234 Generation# 3 vdiskId# [82000002:1:0:0:0] node# 1 Marker# DSPB03 2025-06-25T14:32:15.675609Z node 2 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [2:613:2107] NKikimr::TEvBlobStorage::TEvVBlock# NKikimrBlobStorage.TEvVBlock TabletId: 1234 Generation: 3 VDiskID { GroupID: 2181038082 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } IssuerGuid: 3597774034870281229 MsgQoS { ExtQueueId: PutTabletLog } cookie# 0 2025-06-25T14:32:15.680658Z node 2 :BS_PROXY_BLOCK DEBUG: dsproxy_block.cpp:43: [efc53170c63234c6] Handle TEvVBlockResult status# OK From# [82000002:1:0:0:0] NodeId# 1 Marker# DSPB01 2025-06-25T14:32:15.680731Z node 2 :BS_PROXY_BLOCK DEBUG: dsproxy_block.cpp:100: [efc53170c63234c6] Result# TEvBlockResult {Status# OK} Marker# DSPB04 Sending TEvPut 2025-06-25T14:32:15.681013Z node 2 :BS_PROXY INFO: dsproxy_impl.h:309: Group# 2181038082 HandleError ev# TEvPut {Id# [1234:3:0:0:0:10:0] Size# 10 Deadline# 18446744073709551 HandleClass# TabletLog Tactic# Default} Response# TEvPutResult {Id# [1234:3:0:0:0:10:0] Status# ERROR StatusFlags# { } ErrorReason# "Created as LIMITED without keys. It happens when tenant keys are missing on the node." ApproximateFreeSpaceShare# 0} Marker# DSP31 Sending TEvPut 2025-06-25T14:32:15.681159Z node 2 :BS_PROXY DEBUG: dsproxy_impl.h:309: Group# 2181038082 HandleError ev# TEvPut {Id# [1234:4:0:0:0:10:0] Size# 10 Deadline# 18446744073709551 HandleClass# TabletLog Tactic# Default} Response# TEvPutResult {Id# [1234:4:0:0:0:10:0] Status# ERROR StatusFlags# { } ErrorReason# "Created as LIMITED without keys. It happens when tenant keys are missing on the node." ApproximateFreeSpaceShare# 0} Marker# DSP31 Sending TEvPut 2025-06-25T14:32:15.681436Z node 1 :BS_PROXY_PUT INFO: dsproxy_put.cpp:645: [c85e1a21dcb31b54] bootstrap ActorId# [1:621:2512] Group# 2181038082 BlobCount# 1 BlobIDs# [[1234:2:0:0:0:11:0]] HandleClass# TabletLog Tactic# Default RestartCounter# 0 Marker# BPP13 2025-06-25T14:32:15.681546Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [c85e1a21dcb31b54] Id# [1234:2:0:0:0:11:0] restore disk# 0 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-06-25T14:32:15.681582Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:65: [c85e1a21dcb31b54] restore Id# [1234:2:0:0:0:11:0] optimisticReplicas# 1 optimisticState# EBS_FULL Marker# BPG55 2025-06-25T14:32:15.681626Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [c85e1a21dcb31b54] partPlacement record partSituation# ESituation::Unknown to# 0 blob Id# [1234:2:0:0:0:11:1] Marker# BPG33 2025-06-25T14:32:15.681661Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [c85e1a21dcb31b54] Sending missing VPut part# 0 to# 0 blob Id# [1234:2:0:0:0:11:1] Marker# BPG32 2025-06-25T14:32:15.681760Z node 1 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [1:600:2502] NKikimr::TEvBlobStorage::TEvVPut# {ID# [1234:2:0:0:0:11:1] FDS# 11 HandleClass# TabletLog {MsgQoS ExtQueueId# PutTabletLog} DataSize# 0 Data# } cookie# 0 2025-06-25T14:32:15.681939Z node 1 :BS_VDISK_PUT ERROR: blobstorage_skeleton.cpp:569: PDiskId# 1000 VDISK[82000002:_:0:0:0]: (2181038082) TEvVPut: failed to pass the Hull check; id# [1234:2:0:0:0:11:1] status# {Status# BLOCKED} Marker# BSVS03 2025-06-25T14:32:15.682174Z node 1 :BS_PROXY_PUT INFO: dsproxy_put.cpp:260: [c85e1a21dcb31b54] received {EvVPutResult Status# BLOCKED ErrorReason# "blocked" ID# [1234:2:0:0:0:11:1] {MsgQoS MsgId# { SequenceId: 1 MsgId: 3 } Cost# 80086 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 4 }}}} from# [82000002:1:0:0:0] Marker# BPP01 2025-06-25T14:32:15.682264Z node 1 :BS_PROXY_PUT ERROR: dsproxy_put_impl.cpp:72: [c85e1a21dcb31b54] Result# TEvPutResult {Id# [1234:2:0:0:0:11:0] Status# BLOCKED StatusFlags# { } ErrorReason# "Got VPutResult status# BLOCKED from VDiskId# [82000002:1:0:0:0]" ApproximateFreeSpaceShare# 0} GroupId# 2181038082 Marker# BPP12 2025-06-25T14:32:15.682323Z node 1 :BS_PROXY_PUT NOTICE: dsproxy_put.cpp:486: [c85e1a21dcb31b54] SendReply putResult# TEvPutResult {Id# [1234:2:0:0:0:11:0] Status# BLOCKED StatusFlags# { } ErrorReason# "Got VPutResult status# BLOCKED from VDiskId# [82000002:1:0:0:0]" ApproximateFreeSpaceShare# 0} ResponsesSent# 0 PutImpl.Blobs.size# 1 Last# true Marker# BPP21 2025-06-25T14:32:15.682438Z node 1 :BS_PROXY_PUT DEBUG: {BPP72@dsproxy_put.cpp:470} Query history GroupId# 2181038082 HandleClass# TabletLog Tactic# Default History# THistory { Entries# [ TEvVPut{ TimestampMs# 0.479 sample PartId# [1234:2:0:0:0:11:1] QueryCount# 1 VDiskId# [82000002:1:0:0:0] NodeId# 1 } ] } 2025-06-25T14:32:15.682771Z node 2 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [2:613:2107] NKikimr::TEvBlobStorage::TEvVCollectGarbage# {TEvVCollectGarbage for [tablet:gen:cnt:channel]=[1234:4294967295:4294967295:0] collect=[4294967295:4294967295] cookie# 0 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/nodewarden/ut/unittest >> TBlobStorageWardenTest::TestSendToInvalidGroupId [GOOD] Test command err: 2025-06-25T14:32:14.947136Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:0:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:1:0] targetVDisk# [2000000:1:0:0:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-25T14:32:14.947383Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:3:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:2:0] targetVDisk# [2000000:1:0:3:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-25T14:32:14.949108Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:3:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:0:0] targetVDisk# [2000000:1:0:3:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-25T14:32:14.950172Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:2:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:0:0] targetVDisk# [2000000:1:0:2:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-25T14:32:14.950244Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:3:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:1:0] targetVDisk# [2000000:1:0:3:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-25T14:32:14.951331Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:2:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:1:0] targetVDisk# [2000000:1:0:2:0] oldSyncState# [0 0] DbBirthLsn# 0 tablet_helpers.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0009e6/r3tmp/tmployxYQ/pdisk_1.dat 2025-06-25T14:32:15.925637Z node 1 :BS_PROXY_PUT INFO: dsproxy_put.cpp:645: [e2e5f1b9c917f854] bootstrap ActorId# [1:483:2461] Group# 33554432 BlobCount# 1 BlobIDs# [[72057594037932033:2:8:0:0:1330:0]] HandleClass# TabletLog Tactic# MinLatency RestartCounter# 0 Marker# BPP13 2025-06-25T14:32:15.925776Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [e2e5f1b9c917f854] Id# [72057594037932033:2:8:0:0:1330:0] restore disk# 0 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-06-25T14:32:15.925814Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [e2e5f1b9c917f854] Id# [72057594037932033:2:8:0:0:1330:0] restore disk# 1 part# 1 situation# ESituation::Unknown Marker# BPG51 2025-06-25T14:32:15.925854Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [e2e5f1b9c917f854] Id# [72057594037932033:2:8:0:0:1330:0] restore disk# 2 part# 2 situation# ESituation::Unknown Marker# BPG51 2025-06-25T14:32:15.925880Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [e2e5f1b9c917f854] Id# [72057594037932033:2:8:0:0:1330:0] restore disk# 3 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-06-25T14:32:15.925903Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [e2e5f1b9c917f854] Id# [72057594037932033:2:8:0:0:1330:0] restore disk# 3 part# 1 situation# ESituation::Unknown Marker# BPG51 2025-06-25T14:32:15.925926Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [e2e5f1b9c917f854] Id# [72057594037932033:2:8:0:0:1330:0] restore disk# 3 part# 2 situation# ESituation::Unknown Marker# BPG51 2025-06-25T14:32:15.925965Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:65: [e2e5f1b9c917f854] restore Id# [72057594037932033:2:8:0:0:1330:0] optimisticReplicas# 3 optimisticState# EBS_FULL Marker# BPG55 2025-06-25T14:32:15.926021Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [e2e5f1b9c917f854] partPlacement record partSituation# ESituation::Unknown to# 0 blob Id# [72057594037932033:2:8:0:0:1330:1] Marker# BPG33 2025-06-25T14:32:15.926067Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [e2e5f1b9c917f854] Sending missing VPut part# 0 to# 0 blob Id# [72057594037932033:2:8:0:0:1330:1] Marker# BPG32 2025-06-25T14:32:15.926104Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [e2e5f1b9c917f854] partPlacement record partSituation# ESituation::Unknown to# 1 blob Id# [72057594037932033:2:8:0:0:1330:2] Marker# BPG33 2025-06-25T14:32:15.926127Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [e2e5f1b9c917f854] Sending missing VPut part# 1 to# 1 blob Id# [72057594037932033:2:8:0:0:1330:2] Marker# BPG32 2025-06-25T14:32:15.926152Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [e2e5f1b9c917f854] partPlacement record partSituation# ESituation::Unknown to# 2 blob Id# [72057594037932033:2:8:0:0:1330:3] Marker# BPG33 2025-06-25T14:32:15.926174Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [e2e5f1b9c917f854] Sending missing VPut part# 2 to# 2 blob Id# [72057594037932033:2:8:0:0:1330:3] Marker# BPG32 2025-06-25T14:32:15.926325Z node 1 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [1:47:2091] NKikimr::TEvBlobStorage::TEvVPut# {ID# [72057594037932033:2:8:0:0:1330:3] FDS# 1330 HandleClass# TabletLog {MsgQoS ExtQueueId# PutTabletLog} DataSize# 0 Data# } cookie# 0 2025-06-25T14:32:15.926380Z node 1 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [1:40:2084] NKikimr::TEvBlobStorage::TEvVPut# {ID# [72057594037932033:2:8:0:0:1330:2] FDS# 1330 HandleClass# TabletLog {MsgQoS ExtQueueId# PutTabletLog} DataSize# 0 Data# } cookie# 0 2025-06-25T14:32:15.926423Z node 1 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [1:61:2105] NKikimr::TEvBlobStorage::TEvVPut# {ID# [72057594037932033:2:8:0:0:1330:1] FDS# 1330 HandleClass# TabletLog {MsgQoS ExtQueueId# PutTabletLog} DataSize# 0 Data# } cookie# 0 2025-06-25T14:32:15.949100Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [e2e5f1b9c917f854] received {EvVPutResult Status# OK ID# [72057594037932033:2:8:0:0:1330:2] {MsgQoS MsgId# { SequenceId: 1 MsgId: 9 } Cost# 90472 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 10 }}}} from# [2000000:1:0:0:0] Marker# BPP01 2025-06-25T14:32:15.949342Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [e2e5f1b9c917f854] received {EvVPutResult Status# OK ID# [72057594037932033:2:8:0:0:1330:3] {MsgQoS MsgId# { SequenceId: 1 MsgId: 10 } Cost# 90472 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 11 }}}} from# [2000000:1:0:1:0] Marker# BPP01 2025-06-25T14:32:15.949428Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [e2e5f1b9c917f854] received {EvVPutResult Status# OK ID# [72057594037932033:2:8:0:0:1330:1] {MsgQoS MsgId# { SequenceId: 1 MsgId: 8 } Cost# 90472 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 9 }}}} from# [2000000:1:0:3:0] Marker# BPP01 2025-06-25T14:32:15.949495Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_put_impl.cpp:72: [e2e5f1b9c917f854] Result# TEvPutResult {Id# [72057594037932033:2:8:0:0:1330:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.999479} GroupId# 33554432 Marker# BPP12 2025-06-25T14:32:15.949553Z node 1 :BS_PROXY_PUT INFO: dsproxy_put.cpp:486: [e2e5f1b9c917f854] SendReply putResult# TEvPutResult {Id# [72057594037932033:2:8:0:0:1330:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.999479} ResponsesSent# 0 PutImpl.Blobs.size# 1 Last# true Marker# BPP21 2025-06-25T14:32:15.949723Z node 1 :BS_PROXY_PUT DEBUG: {BPP72@dsproxy_put.cpp:470} Query history GroupId# 33554432 HandleClass# TabletLog Tactic# MinLatency History# THistory { Entries# [ TEvVPut{ TimestampMs# 0.937 sample PartId# [72057594037932033:2:8:0:0:1330:3] QueryCount# 1 VDiskId# [2000000:1:0:1:0] NodeId# 1 } TEvVPut{ TimestampMs# 0.937 sample PartId# [72057594037932033:2:8:0:0:1330:2] QueryCount# 1 VDiskId# [2000000:1:0:0:0] NodeId# 1 } TEvVPut{ TimestampMs# 0.938 sample PartId# [72057594037932033:2:8:0:0:1330:1] QueryCount# 1 VDiskId# [2000000:1:0:3:0] NodeId# 1 } TEvVPutResult{ TimestampMs# 23.665 VDiskId# [2000000:1:0:0:0] NodeId# 1 Status# OK } TEvVPutResult{ TimestampMs# 23.862 VDiskId# [2000000:1:0:1:0] NodeId# 1 Status# OK } TEvVPutResult{ TimestampMs# 23.945 VDiskId# [2000000:1:0:3:0] NodeId# 1 Status# OK } ] } 2025-06-25T14:32:15.989041Z node 1 :BS_PROXY NOTICE: dsproxy_state.cpp:305: EnsureMonitoring Group# 4294967295 IsLimitedKeyless# 0 fullIfPossible# 1 Marker# DSP58 2025-06-25T14:32:15.991047Z node 1 :BS_PROXY CRIT: dsproxy_impl.h:309: The request was sent for an invalid groupID Group# 4294967295 HandleError ev# TEvBlock {TabletId# 1234 Generation# 1 Deadline# 18446744073709551 IsMonitored# 1} Response# TEvBlockResult {Status# ERROR ErrorReason# "Created as unconfigured in error state (DSPE11). It happens when the request was sent for an invalid groupID"} Marker# DSP31 Sending TEvPut 2025-06-25T14:32:15.991333Z node 1 :BS_PROXY DEBUG: dsproxy_impl.h:309: The request was sent for an invalid groupID Group# 4294967295 HandleError ev# TEvPut {Id# [1234:1:0:0:0:5:0] Size# 5 Deadline# 18446744073709551 HandleClass# TabletLog Tactic# Default} Response# TEvPutResult {Id# [1234:1:0:0:0:5:0] Status# ERROR StatusFlags# { } ErrorReason# "Created as unconfigured in error state (DSPE11). It happens when the request was sent for an invalid groupID" ApproximateFreeSpaceShare# 0} Marker# DSP31 2025-06-25T14:32:15.991467Z node 1 :BS_PROXY DEBUG: dsproxy_impl.h:309: The request was sent for an invalid groupID Group# 4294967295 HandleError ev# TEvCollectGarbage {TabletId# 1234 RecordGeneration# 4294967295 PerGenerationCounter# 4294967295 Channel# 0 Deadline# 18446744073709551 Collect# true CollectGeneration# 4294967295 CollectStep# 4294967295 Hard# true IsMultiCollectAllowed# 1 IsMonitored# 1} Response# TEvCollectGarbageResult {TabletId# 1234 RecordGeneration# 4294967295 PerGenerationCounter# 4294967295 Channel# 0 Status# ERROR ErrorReason# "Created as unconfigured in error state (DSPE11). It happens when the request was sent for an invalid groupID"} Marker# DSP31 >> TBlobStorageWardenTest::ObtainTenantKeySamePin [GOOD] >> TBlobStorageWardenTest::ObtainTenantKeyDifferentPin [GOOD] >> BindQueue::Basic [GOOD] >> TBlobStorageWardenTest::ObtainPDiskKeySamePin [GOOD] >> TReplicaTest::Merge >> TReplicaCombinationTest::UpdatesCombinationsDomainRoot >> PQCountersSimple::PartitionWriteQuota [GOOD] >> PQCountersSimple::PartitionFirstClass >> TReplicaTest::Merge [GOOD] >> TReplicaTest::IdempotencyUpdatesWithoutSubscribers >> TBlobStorageWardenTest::TestHttpMonPage [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_change_exchange/unittest >> Cdc::ResolvedTimestampForDisplacedUpsert [GOOD] Test command err: 2025-06-25T14:28:23.842691Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519893755234637894:2221];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:28:23.842850Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0012bd/r3tmp/tmpDaIIi9/pdisk_1.dat 2025-06-25T14:28:24.232951Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:28:24.233062Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:28:24.235208Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:28:24.243390Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:28:24.245076Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519893755234637712:2080] 1750861703832933 != 1750861703832936 TServer::EnableGrpc on GrpcPort 17252, node 1 2025-06-25T14:28:24.411081Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:28:24.411105Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:28:24.411132Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:28:24.411794Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:28:24.471307Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:28:24.502876Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:28:24.543584Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:7519893759529605604:2268] 2025-06-25T14:28:24.543873Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T14:28:24.556672Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T14:28:24.556811Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T14:28:24.559845Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-25T14:28:24.559901Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-25T14:28:24.560038Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-25T14:28:24.560532Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T14:28:24.560681Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T14:28:24.560722Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:7519893759529605618:2268] in generation 1 2025-06-25T14:28:24.564214Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T14:28:24.604990Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-25T14:28:24.605175Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T14:28:24.605239Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:7519893759529605622:2269] 2025-06-25T14:28:24.605278Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T14:28:24.605291Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-25T14:28:24.605302Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:28:24.605479Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-25T14:28:24.605554Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-25T14:28:24.605575Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:28:24.605594Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:28:24.605612Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-25T14:28:24.605640Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:28:24.605968Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:7519893759529605597:2299], serverId# [1:7519893759529605621:2310], sessionId# [0:0:0] 2025-06-25T14:28:24.606497Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T14:28:24.606796Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976710657 ssId 72057594046644480 seqNo 2:1 2025-06-25T14:28:24.606877Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976710657 at tablet 72075186224037888 2025-06-25T14:28:24.608565Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T14:28:24.612787Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-25T14:28:24.612849Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-25T14:28:24.616415Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:7519893759529605636:2318], serverId# [1:7519893759529605638:2320], sessionId# [0:0:0] 2025-06-25T14:28:24.619747Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976710657 at step 1750861704662 at tablet 72075186224037888 { Transactions { TxId: 281474976710657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1750861704662 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-06-25T14:28:24.619772Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:28:24.619863Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T14:28:24.619917Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:28:24.619930Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-06-25T14:28:24.619949Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1750861704662:281474976710657] in PlanQueue unit at 72075186224037888 2025-06-25T14:28:24.620134Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1750861704662:281474976710657 keys extracted: 0 2025-06-25T14:28:24.620228Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-25T14:28:24.620290Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:28:24.620361Z node 1 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-06-25T14:28:24.621949Z node 1 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-25T14:28:24.622271Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:28:24.623277Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 1750861704661 2025-06-25T14:28:24.623288Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:28:24.623304Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1750861704669 2025-06-25T14:28:24.623329Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1750861704662} 2025-06-25T14:28:24.623353Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:28:24.623677Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:28:24.623703Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T14:28:24.623724Z node 1 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-25T14:28:24.623791Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1750861704662 : 281474976710657] from 72075186224037888 at tablet 72075186224037888 send result to client [1:7519893759529605332:2145], exec latency: 1 ms, propose latency: 3 ms 2025-06-25T14:28:24.623811Z node 1 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976710657 state Ready TxInFly 0 2025-06-25T14:28:24.623835Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:28:24.627316Z node 1 :CHANGE_EXCHANGE DEBUG: change_sender.cpp:153: [ChangeSender][72075186224037888:1][1:7519893759529605622:2269][Inactive] Handle NKikimrChangeExchange.TEvActivateSender 2025-06-25T14:28:24.631814Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976710657 datashard 72075186224037888 state Ready 2025-06-25T14:28:24.631879Z node 1 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 7207 ... GE DEBUG: change_sender_cdc_stream.cpp:628: [CdcChangeSenderMain][72075186224037888:1][29:806:2649] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvEnqueueRecords { Records [{ Order: 6 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] BodySize: 0 }] } 2025-06-25T14:32:12.727162Z node 29 :TX_DATASHARD INFO: datashard_change_sending.cpp:215: TTxRequestChangeRecords Execute: at tablet# 72075186224037888 2025-06-25T14:32:12.727517Z node 29 :TX_DATASHARD DEBUG: datashard_change_sending.cpp:235: Send 1 change records: to# [29:806:2649], at tablet# 72075186224037888 2025-06-25T14:32:12.727838Z node 29 :TX_DATASHARD INFO: datashard_change_sending.cpp:260: TTxRequestChangeRecords Complete: sent# 1, forgotten# 0, left# 0, at tablet# 72075186224037888 2025-06-25T14:32:12.729395Z node 29 :CHANGE_EXCHANGE DEBUG: change_sender_cdc_stream.cpp:633: [CdcChangeSenderMain][72075186224037888:1][29:806:2649] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 6 Group: 0 Step: 9000 TxId: 0 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] Kind: CdcHeartbeat Source: Unspecified Body: 0b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 0 LockId: 0 LockOffset: 0 }] } 2025-06-25T14:32:12.730259Z node 29 :CHANGE_EXCHANGE DEBUG: change_sender_cdc_stream.cpp:111: [CdcChangeSenderPartition][72075186224037888:1][0][72075186224037889][29:891:2649] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 6 Group: 0 Step: 9000 TxId: 0 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] Kind: CdcHeartbeat Source: Unspecified Body: 0b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 0 LockId: 0 LockOffset: 0 }] } 2025-06-25T14:32:12.730734Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:348: Handle TEvRequest topic: 'streamImpl' requestId: 2025-06-25T14:32:12.730892Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:2812: [PQ: 72075186224037889] got client message batch for topic 'Table/Stream/streamImpl' partition 0 2025-06-25T14:32:12.731112Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'streamImpl' partition: 0 messageNo: 10 requestId: cookie: 6 2025-06-25T14:32:12.731367Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:348: Handle TEvRequest topic: 'streamImpl' requestId: 2025-06-25T14:32:12.731411Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:2812: [PQ: 72075186224037889] got client message batch for topic 'Table/Stream/streamImpl' partition 0 2025-06-25T14:32:12.731527Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:2209: [PQ: 72075186224037889] got client message topic: Table/Stream/streamImpl partition: 0 SourceId: '\00072075186224037888' SeqNo: 6 partNo : 0 messageNo: 11 size 26 offset: -1 2025-06-25T14:32:12.732064Z node 29 :PERSQUEUE DEBUG: partition_write.cpp:1293: [PQ: 72075186224037889, Partition: 0, State: StateIdle] Topic 'Table/Stream/streamImpl' partition 0 process heartbeat sourceId '\00072075186224037888' version v9000/0 2025-06-25T14:32:12.732258Z node 29 :PERSQUEUE INFO: partition_write.cpp:1797: [PQ: 72075186224037889, Partition: 0, State: StateIdle] Topic 'Table/Stream/streamImpl' partition 0 emit heartbeat v9000/0 2025-06-25T14:32:12.732559Z node 29 :PERSQUEUE DEBUG: partition_write.cpp:1364: [PQ: 72075186224037889, Partition: 0, State: StateIdle] Topic 'Table/Stream/streamImpl' partition 0 part blob processing sourceId '\00072075186224037889' seqNo 0 partNo 0 2025-06-25T14:32:12.733807Z node 29 :PERSQUEUE DEBUG: partition_write.cpp:1468: [PQ: 72075186224037889, Partition: 0, State: StateIdle] Topic 'Table/Stream/streamImpl' partition 0 part blob complete sourceId '\00072075186224037889' seqNo 0 partNo 0 FormedBlobsCount 0 NewHead: Offset 5 PartNo 0 PackedSize 107 count 1 nextOffset 6 batches 1 2025-06-25T14:32:12.735808Z node 29 :PERSQUEUE DEBUG: partition_write.cpp:1762: [PQ: 72075186224037889, Partition: 0, State: StateIdle] Add new write blob: topic 'Table/Stream/streamImpl' partition 0 compactOffset 5,1 HeadOffset 5 endOffset 5 curOffset 6 d0000000000_00000000000000000005_00000_0000000001_00000? size 93 WTime 8979 2025-06-25T14:32:12.736522Z node 29 :PERSQUEUE DEBUG: read.h:272: CacheProxy. Passthrough write request to KV 2025-06-25T14:32:12.736821Z node 29 :PERSQUEUE DEBUG: read.h:310: CacheProxy. Passthrough blob. Partition 0 offset 5 partNo 0 count 1 size 93 2025-06-25T14:32:12.739854Z node 29 :PERSQUEUE DEBUG: cache_eviction.h:319: Caching head blob in L1. Partition 0 offset 5 count 1 size 93 actorID [29:756:2620] 2025-06-25T14:32:12.740210Z node 29 :PERSQUEUE DEBUG: pq_l2_cache.cpp:120: PQ Cache (L2). Adding blob. Tablet '72075186224037889' partition 0 offset 5 partno 0 count 1 parts 0 suffix '63' size 93 2025-06-25T14:32:12.750885Z node 29 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72075186224037889, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 44 WriteNewSizeFromSupportivePartitions# 0 2025-06-25T14:32:12.751552Z node 29 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72075186224037889, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-25T14:32:12.751757Z node 29 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72075186224037889, Partition: 0, State: StateIdle] Answering for message sourceid: '\00072075186224037888', Topic: 'Table/Stream/streamImpl', Partition: 0, SeqNo: 6, partNo: 0, Offset: 5 is stored on disk 2025-06-25T14:32:12.753378Z node 29 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72075186224037889, Partition: 0, State: StateIdle] need more data for compaction. cumulativeSize=763, count=6, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-25T14:32:12.753592Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'streamImpl' partition: 0 messageNo: 11 requestId: cookie: 6 2025-06-25T14:32:12.754327Z node 29 :CHANGE_EXCHANGE DEBUG: change_sender_cdc_stream.cpp:160: [CdcChangeSenderPartition][72075186224037888:1][0][72075186224037889][29:891:2649] Handle NKikimrClient.TResponse { SessionId: TxId: Success { Response: Status: 1 ErrorCode: OK PartitionResponse { CmdWriteResult { AlreadyWritten: false SourceId: "\00072075186224037888" SeqNo: 6 Offset: 5 WriteTimestampMS: 8979 PartitionQuotedTimeMs: 0 TotalTimeInPartitionQueueMs: 0 WriteTimeMs: 0 TopicQuotedTimeMs: 0 WrittenInTx: false } Cookie: 6 } } } 2025-06-25T14:32:12.754775Z node 29 :CHANGE_EXCHANGE DEBUG: change_sender_cdc_stream.cpp:643: [CdcChangeSenderMain][72075186224037888:1][29:806:2649] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 0 } 2025-06-25T14:32:12.755128Z node 29 :TX_DATASHARD INFO: datashard_change_sending.cpp:310: TTxRemoveChangeRecords Execute: records# 1, at tablet# 72075186224037888 2025-06-25T14:32:12.755234Z node 29 :TX_DATASHARD DEBUG: datashard.cpp:1087: RemoveChangeRecord: order: 6, at tablet: 72075186224037888 2025-06-25T14:32:12.756396Z node 29 :TX_DATASHARD INFO: datashard_change_sending.cpp:335: TTxRemoveChangeRecords Complete: removed# 1, left# 0, at tablet# 72075186224037888 ... checking the update is logged before the new resolved timestamp >>>>> GetRecords path=/Root/Table/Stream partitionId=0 2025-06-25T14:32:12.881104Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:348: Handle TEvRequest topic: 'streamImpl' requestId: 2025-06-25T14:32:12.881312Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:2812: [PQ: 72075186224037889] got client message batch for topic 'Table/Stream/streamImpl' partition 0 2025-06-25T14:32:12.881685Z node 29 :PERSQUEUE DEBUG: partition_read.cpp:839: [PQ: 72075186224037889, Partition: 0, State: StateIdle] read cookie 10 Topic 'Table/Stream/streamImpl' partition 0 user $without_consumer offset 0 count 10000 size 26214400 endOffset 6 max time lag 0ms effective offset 0 2025-06-25T14:32:12.884028Z node 29 :PERSQUEUE DEBUG: partition_read.cpp:1043: [PQ: 72075186224037889, Partition: 0, State: StateIdle] read cookie 10 added 6 blobs, size 763 count 6 last offset 5, current partition end offset: 6 2025-06-25T14:32:12.884201Z node 29 :PERSQUEUE DEBUG: partition_read.cpp:1069: [PQ: 72075186224037889, Partition: 0, State: StateIdle] Reading cookie 10. Send blob request. 2025-06-25T14:32:12.884673Z node 29 :PERSQUEUE DEBUG: cache_eviction.h:492: Got data from cache. Partition 0 offset 0 partno 0 count 1 parts_count 0 source 1 size 93 accessed 6 times before, last time 1970-01-01T00:00:06.000000Z 2025-06-25T14:32:12.884781Z node 29 :PERSQUEUE DEBUG: cache_eviction.h:492: Got data from cache. Partition 0 offset 1 partno 0 count 1 parts_count 0 source 1 size 174 accessed 3 times before, last time 1970-01-01T00:00:06.000000Z 2025-06-25T14:32:12.884825Z node 29 :PERSQUEUE DEBUG: cache_eviction.h:492: Got data from cache. Partition 0 offset 2 partno 0 count 1 parts_count 0 source 1 size 93 accessed 1 times before, last time 1970-01-01T00:00:06.000000Z 2025-06-25T14:32:12.884881Z node 29 :PERSQUEUE DEBUG: cache_eviction.h:492: Got data from cache. Partition 0 offset 3 partno 0 count 1 parts_count 0 source 1 size 155 accessed 0 times before, last time 1970-01-01T00:00:08.000000Z 2025-06-25T14:32:12.884923Z node 29 :PERSQUEUE DEBUG: cache_eviction.h:492: Got data from cache. Partition 0 offset 4 partno 0 count 1 parts_count 0 source 1 size 155 accessed 0 times before, last time 1970-01-01T00:00:08.000000Z 2025-06-25T14:32:12.884963Z node 29 :PERSQUEUE DEBUG: cache_eviction.h:492: Got data from cache. Partition 0 offset 5 partno 0 count 1 parts_count 0 source 1 size 93 accessed 0 times before, last time 1970-01-01T00:00:08.000000Z 2025-06-25T14:32:12.885068Z node 29 :PERSQUEUE DEBUG: read.h:121: Reading cookie 10. All 6 blobs are from cache. 2025-06-25T14:32:12.885244Z node 29 :PERSQUEUE DEBUG: partition_read.cpp:551: FormAnswer for 6 blobs 2025-06-25T14:32:12.885820Z node 29 :PERSQUEUE DEBUG: partition_read.cpp:476: FormAnswer processing batch offset 0 totakecount 1 count 1 size 75 from pos 0 cbcount 1 2025-06-25T14:32:12.886044Z node 29 :PERSQUEUE DEBUG: partition_read.cpp:476: FormAnswer processing batch offset 1 totakecount 1 count 1 size 154 from pos 0 cbcount 1 2025-06-25T14:32:12.886138Z node 29 :PERSQUEUE DEBUG: partition_read.cpp:476: FormAnswer processing batch offset 2 totakecount 1 count 1 size 75 from pos 0 cbcount 1 2025-06-25T14:32:12.886224Z node 29 :PERSQUEUE DEBUG: partition_read.cpp:476: FormAnswer processing batch offset 3 totakecount 1 count 1 size 135 from pos 0 cbcount 1 2025-06-25T14:32:12.886313Z node 29 :PERSQUEUE DEBUG: partition_read.cpp:476: FormAnswer processing batch offset 4 totakecount 1 count 1 size 135 from pos 0 cbcount 1 2025-06-25T14:32:12.886397Z node 29 :PERSQUEUE DEBUG: partition_read.cpp:476: FormAnswer processing batch offset 5 totakecount 1 count 1 size 75 from pos 0 cbcount 1 2025-06-25T14:32:12.886742Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'streamImpl' partition: 0 messageNo: 0 requestId: cookie: 0 2025-06-25T14:32:12.887103Z node 29 :PERSQUEUE DEBUG: pq_l2_cache.cpp:192: PQ Cache (L2). Touched. Tablet '72075186224037889' partition 0 offset 0 partno 0 count 1 parts 0 suffix '63' 2025-06-25T14:32:12.887208Z node 29 :PERSQUEUE DEBUG: pq_l2_cache.cpp:192: PQ Cache (L2). Touched. Tablet '72075186224037889' partition 0 offset 1 partno 0 count 1 parts 0 suffix '63' 2025-06-25T14:32:12.887261Z node 29 :PERSQUEUE DEBUG: pq_l2_cache.cpp:192: PQ Cache (L2). Touched. Tablet '72075186224037889' partition 0 offset 2 partno 0 count 1 parts 0 suffix '63' 2025-06-25T14:32:12.887306Z node 29 :PERSQUEUE DEBUG: pq_l2_cache.cpp:192: PQ Cache (L2). Touched. Tablet '72075186224037889' partition 0 offset 3 partno 0 count 1 parts 0 suffix '63' 2025-06-25T14:32:12.887350Z node 29 :PERSQUEUE DEBUG: pq_l2_cache.cpp:192: PQ Cache (L2). Touched. Tablet '72075186224037889' partition 0 offset 4 partno 0 count 1 parts 0 suffix '63' 2025-06-25T14:32:12.887396Z node 29 :PERSQUEUE DEBUG: pq_l2_cache.cpp:192: PQ Cache (L2). Touched. Tablet '72075186224037889' partition 0 offset 5 partno 0 count 1 parts 0 suffix '63' |78.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/nodewarden/ut/unittest >> TBlobStorageWardenTest::ObtainTenantKeyDifferentPin [GOOD] >> TReplicaTest::IdempotencyUpdatesWithoutSubscribers [GOOD] >> TReplicaTest::StrongNotificationAfterCommit >> TReplicaTest::StrongNotificationAfterCommit [GOOD] >> TReplicaCombinationTest::UpdatesCombinationsDomainRoot [GOOD] >> TReplicaCombinationTest::UpdatesCombinationsMigratedPath >> KqpIndexes::SecondaryIndexReplace+UseSink [GOOD] >> KqpIndexes::SecondaryIndexReplace-UseSink |78.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/ut/service/ydb-core-kqp-ut-service |78.4%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/service/ydb-core-kqp-ut-service |78.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/service/ydb-core-kqp-ut-service >> KqpUniqueIndex::ReplaceFkDuplicate [GOOD] >> TReplicaCombinationTest::UpdatesCombinationsMigratedPath [GOOD] >> TReplicaCombinationTest::MigratedPathRecreation ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/nodewarden/ut/unittest >> TBlobStorageWardenTest::TestHttpMonPage [GOOD] Test command err: 2025-06-25T14:32:15.792006Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:0:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:1:0] targetVDisk# [2000000:1:0:0:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-25T14:32:15.792228Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:3:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:2:0] targetVDisk# [2000000:1:0:3:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-25T14:32:15.794076Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:3:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:0:0] targetVDisk# [2000000:1:0:3:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-25T14:32:15.796069Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:2:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:0:0] targetVDisk# [2000000:1:0:2:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-25T14:32:15.796146Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:3:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:1:0] targetVDisk# [2000000:1:0:3:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-25T14:32:15.797279Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:2:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:1:0] targetVDisk# [2000000:1:0:2:0] oldSyncState# [0 0] DbBirthLsn# 0 tablet_helpers.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0009f7/r3tmp/tmp4DJDom/pdisk_1.dat 2025-06-25T14:32:17.130710Z node 2 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:0:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:1:0] targetVDisk# [2000000:1:0:0:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-25T14:32:17.130849Z node 2 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:1:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:0:0] targetVDisk# [2000000:1:0:1:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-25T14:32:17.132687Z node 2 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:3:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:1:0] targetVDisk# [2000000:1:0:3:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-25T14:32:17.138683Z node 2 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:3:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:0:0] targetVDisk# [2000000:1:0:3:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-25T14:32:17.140622Z node 2 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:3:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:2:0] targetVDisk# [2000000:1:0:3:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-25T14:32:17.141735Z node 2 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:0:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:2:0] targetVDisk# [2000000:1:0:0:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-25T14:32:17.141817Z node 2 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:1:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:2:0] targetVDisk# [2000000:1:0:1:0] oldSyncState# [0 0] DbBirthLsn# 0 tablet_helpers.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0009f7/r3tmp/tmpB15UmO/pdisk_1.dat ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TLocksTest::MultipleLocks [GOOD] Test command err: 2025-06-25T14:31:41.394648Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519894607856219026:2209];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:31:41.394832Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/002094/r3tmp/tmpdI1pjC/pdisk_1.dat 2025-06-25T14:31:41.853024Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:31:41.853211Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:31:41.862089Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519894607856218853:2080] 1750861901341668 != 1750861901341671 2025-06-25T14:31:41.871857Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:31:41.873608Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:15517 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:31:42.268621Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:31:42.283309Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:31:42.302127Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:31:42.407392Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:31:42.472504Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:31:42.546839Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:31:45.215721Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519894623639574952:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:31:45.215787Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/002094/r3tmp/tmpAOk6Q2/pdisk_1.dat 2025-06-25T14:31:45.555285Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:31:45.558187Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:31:45.558289Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:31:45.566152Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:13617 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:31:45.996797Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:31:46.012666Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:31:46.029288Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-25T14:31:46.035174Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:31:46.171109Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:31:46.279448Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:31:46.297118Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:31:49.351035Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519894639619509008:2082];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:31:49.355108Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/002094/r3tmp/tmpWPK0mX/pdisk_1.dat 2025-06-25T14:31:49.612036Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:31:49.612117Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:31:49.619087Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:31:49.624502Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519894639619508951:2080] 1750861909322211 != 1750861909322214 2025-06-25T14:31:49.631257Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:22603 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:31:50.054780Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 202 ... ) VolatileState: Disconnected -> Connecting 2025-06-25T14:31:58.617568Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:64238 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. waiting... 2025-06-25T14:31:58.907236Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:31:58.928716Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-25T14:31:58.936995Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:31:59.041125Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:31:59.136273Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:32:03.706699Z node 6 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[6:7519894700786508297:2236];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/002094/r3tmp/tmpRh5Skd/pdisk_1.dat 2025-06-25T14:32:03.729093Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:32:03.873239Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:32:03.873632Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:32:03.885043Z node 6 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:32:03.892279Z node 6 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [6:7519894700786508085:2080] 1750861923551143 != 1750861923551146 2025-06-25T14:32:03.928848Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:9633 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:32:04.426440Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-25T14:32:04.450303Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:32:04.523061Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:32:04.552236Z node 6 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:32:04.613674Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:32:09.907267Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519894727247588305:2189];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/002094/r3tmp/tmp1nSUwV/pdisk_1.dat 2025-06-25T14:32:09.988231Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:32:10.060165Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:32:10.065524Z node 7 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [7:7519894727247588141:2080] 1750861929675762 != 1750861929675765 2025-06-25T14:32:10.088203Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:32:10.088329Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:32:10.093749Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:32:10.784511Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:29855 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:32:11.033753Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-25T14:32:11.098325Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:32:11.428478Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:32:11.558399Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... >> TReplicaTest::HandshakeWithStaleGeneration >> TReplicaCombinationTest::MigratedPathRecreation [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/nodewarden/ut/unittest >> TBlobStorageWardenTest::ObtainPDiskKeySamePin [GOOD] Test command err: Disable nodeId# 8 Enable nodeId# 8 Delete nodeId# 43 Pick Disable nodeId# 64 Delete nodeId# 80 Delete nodeId# 48 Enable nodeId# 64 Pick Disable nodeId# 42 Add nodeId# 101 Disable nodeId# 95 Delete nodeId# 57 Disable nodeId# 30 Enable nodeId# 30 Disable nodeId# 101 Pick Enable nodeId# 42 Disable nodeId# 5 Enable nodeId# 95 Delete nodeId# 44 Enable nodeId# 5 Disable nodeId# 31 Enable nodeId# 101 Delete nodeId# 61 Add nodeId# 102 Disable nodeId# 88 Delete nodeId# 23 Disable nodeId# 42 Disable nodeId# 41 Add nodeId# 103 Enable nodeId# 88 Pick Enable nodeId# 41 Disable nodeId# 35 Pick Disable nodeId# 55 Enable nodeId# 31 Enable nodeId# 35 Add nodeId# 104 Pick Pick Pick Pick Pick Delete nodeId# 10 Disable nodeId# 73 Disable nodeId# 91 Pick Add nodeId# 105 Enable nodeId# 73 Delete nodeId# 37 Disable nodeId# 7 Disable nodeId# 92 Add nodeId# 106 Add nodeId# 107 Add nodeId# 108 Delete nodeId# 85 Add nodeId# 109 Disable nodeId# 3 Add nodeId# 110 Delete nodeId# 73 Pick Enable nodeId# 91 Disable nodeId# 33 Disable nodeId# 59 Delete nodeId# 79 Pick Delete nodeId# 102 Add nodeId# 111 Enable nodeId# 42 Add nodeId# 112 Enable nodeId# 3 Pick Delete nodeId# 93 Add nodeId# 113 Delete nodeId# 65 Add nodeId# 114 Pick Pick Delete nodeId# 38 Add nodeId# 115 Delete nodeId# 58 Disable nodeId# 75 Disable nodeId# 12 Delete nodeId# 50 Delete nodeId# 87 Delete nodeId# 76 Add nodeId# 116 Enable nodeId# 33 Pick Pick Add nodeId# 117 Disable nodeId# 78 Add nodeId# 118 Delete nodeId# 34 Add nodeId# 119 Disable nodeId# 98 Enable nodeId# 59 Add nodeId# 120 Add nodeId# 121 Enable nodeId# 55 Delete nodeId# 68 Disable nodeId# 114 Enable nodeId# 92 Disable nodeId# 67 Add nodeId# 122 Enable nodeId# 114 Delete nodeId# 120 Pick Disable nodeId# 3 Enable nodeId# 12 Enable nodeId# 75 Disable nodeId# 14 Pick Disable nodeId# 41 Add nodeId# 123 Enable nodeId# 98 Pick Enable nodeId# 7 Enable nodeId# 41 Delete nodeId# 89 Disable nodeId# 46 Delete nodeId# 29 Disable nodeId# 100 Pick Pick Delete nodeId# 119 Add nodeId# 124 Pick Enable nodeId# 14 Disable nodeId# 55 Add nodeId# 125 Enable nodeId# 78 Disable nodeId# 104 Disable nodeId# 21 Delete nodeId# 21 Delete nodeId# 40 Delete nodeId# 42 Enable nodeId# 3 Enable nodeId# 100 Delete nodeId# 110 Delete nodeId# 86 Enable nodeId# 55 Add nodeId# 126 Enable nodeId# 46 Disable nodeId# 15 Disable nodeId# 39 Delete nodeId# 116 Add nodeId# 127 Pick Add nodeId# 128 Pick Add nodeId# 129 Pick Enable nodeId# 39 Delete nodeId# 17 Pick Add nodeId# 130 Enable nodeId# 67 Disable nodeId# 47 Enable nodeId# 15 Pick Delete nodeId# 82 Pick Enable nodeId# 47 Add nodeId# 131 Add nodeId# 132 Add nodeId# 133 Pick Pick Delete nodeId# 59 Enable nodeId# 104 Delete nodeId# 115 Pick Disable nodeId# 53 Enable nodeId# 53 Add nodeId# 134 Disable nodeId# 16 Delete nodeId# 96 Disable nodeId# 46 Pick Disable nodeId# 117 Add nodeId# 135 Pick Pick Enable nodeId# 46 Disable nodeId# 18 Enable nodeId# 18 Pick Enable nodeId# 117 Pick Delete nodeId# 117 Add nodeId# 136 Pick Enable nodeId# 16 Add nodeId# 137 Add nodeId# 138 Disable nodeId# 113 Enable nodeId# 113 Delete nodeId# 2 Add nodeId# 139 Disable nodeId# 135 Enable nodeId# 135 Disable nodeId# 53 Pick Enable nodeId# 53 Pick Pick Pick Disable nodeId# 126 Enable nodeId# 126 Pick Pick Delete nodeId# 81 Delete nodeId# 25 Disable nodeId# 27 Enable nodeId# 27 Pick Delete nodeId# 26 Pick Add nodeId# 140 Pick Add nodeId# 141 Pick Delete nodeId# 41 Add nodeId# 142 Pick Add nodeId# 143 Delete nodeId# 7 Delete nodeId# 131 Add nodeId# 144 Disable nodeId# 3 Delete nodeId# 83 Delete nodeId# 11 Pick Enable nodeId# 3 Add nodeId# 145 Delete nodeId# 137 Disable nodeId# 138 Enable nodeId# 138 Pick Pick Pick Delete nodeId# 91 Add nodeId# 146 Add nodeId# 147 Delete nodeId# 70 Pick Add nodeId# 148 Delete nodeId# 125 Add nodeId# 149 Disable nodeId# 147 Delete nodeId# 12 Add nodeId# 150 Pick Add nodeId# 151 Disable nodeId# 112 Pick Pick Delete nodeId# 45 Delete nodeId# 94 Add nodeId# 152 Enable nodeId# 147 Disable nodeId# 67 Enable nodeId# 67 Pick Enable nodeId# 112 Delete nodeId# 20 Delete nodeId# 84 Add nodeId# 153 Delete nodeId# 97 Delete nodeId# 72 Delete nodeId# 138 Pick Pick Add nodeId# 154 Delete nodeId# 105 Pick Add nodeId# 155 Pick Pick Delete nodeId# 1 Pick Pick Delete nodeId# 147 Add nodeId# 156 Delete nodeId# 30 Delete nodeId# 121 Pick Disable nodeId# 151 Delete nodeId# 6 Delete nodeId# 13 Pick Pick Enable nodeId# 151 Pick Pick Pick Disable nodeId# 99 Pick Add nodeId# 157 Delete nodeId# 148 Delete nodeId# 19 Add nodeId# 158 Add nodeId# 159 Enable nodeId# 99 Add nodeId# 160 Add nodeId# 161 Add nodeId# 162 Add nodeId# 163 Disable nodeId# 4 Pick Enable nodeId# 4 Delete nodeId# 71 Disable nodeId# 55 Delete nodeId# 67 Pick Add nodeId# 164 Pick Pick Enable nodeId# 55 Add nodeId# 165 Disable nodeId# 123 Pick Disable nodeId# 49 Pick Enable nodeId# 123 Add nodeId# 166 Pick Delete nodeId# 66 Pick Add nodeId# 167 Pick Delete nodeId# 164 Disable nodeId# 163 Delete nodeId# 106 Delete nodeId# 75 Disable nodeId# 74 Enable nodeId# 74 Disable nodeId# 155 Pick Enable nodeId# 49 Delete nodeId# 152 Add nodeId# 168 Delete nodeId# 104 Enable nodeId# 155 Disable nodeId# 32 Enable nodeId# 32 Add nodeId# 169 Delete nodeId# 130 Disable nodeId# 28 Enable nodeId# 163 Pick Pick Add nodeId# 170 Disable nodeId# 122 Enable nodeId# 122 Pick Disable nodeId# 165 Add nodeId# 171 Add nodeId# 172 Add nodeId# 173 Add nodeId# 174 Add nodeId# 175 Add nodeId# 176 Delete nodeId# 144 Enable nodeId# 28 Disable nodeId# 39 Delete nodeId# 146 Disable nodeId# 53 Add nodeId# 177 Delete nodeId# 64 Pick Pick Add nodeId# 178 Delete nodeId# 177 Pick Disable nodeId# 14 Pick Pick Delete nodeId# 32 Delete nodeId# 133 Add nodeId# 179 Delete nodeId# 4 Enable nodeId# 14 Delete nodeId# 168 Pick Disable nodeId# 100 Add nodeId# 180 Pick Disable nodeId# 35 Pick Pick Delete nodeId# 173 Add nodeId# 181 Add nodeId# 182 Add nodeId# 183 Pick Add nodeId# 184 Pick Pick Delete nodeId# 114 Enable nodeId# 100 Disable nodeId# 167 Disable nodeId# 62 Add nodeId# 185 Add nodeId# 186 Pick Add nodeId# 187 Disable nodeId# 134 Pick Delete nodeId# 129 Delete nodeId# 118 Delete nodeId# 18 Pick Pick Enable nodeId# 134 Disable nodeId# 55 Enable nodeId# 165 Disable nodeId# 16 Disable nodeId# 15 Add nodeId# 188 Enable nodeId# 167 Delete nodeId# 122 Enable nodeId# 53 Disable nodeId# 22 Pick Disable nodeId# 163 Delete nodeId# 163 Disable nodeId# 24 Enable nodeId# 62 Disable nodeId# 99 Disable nodeId# 101 Enable nodeId# 16 Enable nodeId# 99 Pick Add nodeId# 189 Disable nodeId# 141 Pick Delete nodeId# 145 Enable nodeId# 22 Pick Pick Delete nodeId# 188 Enable nodeId# 39 Add nodeId# 190 Add nodeId# 191 Delete nodeId# 175 Disable nodeId# 54 Disable nodeId# 99 Add nodeId# 192 Add nodeId# 193 Add nodeId# 194 Enable nodeId# 101 Add nodeId# 195 Disable nodeId# 192 Delete nodeId# 78 Delete nodeId# 186 Delete nodeId# 36 Enable nodeId# 192 Disable nodeId# 49 Pick Disable nodeId# 9 Pick Pick Delete nodeId# 132 Disable nodeId# 153 Pick Add nodeId# 196 Enable nodeId# 141 Add nodeId# 197 Add nodeId# 198 Delete nodeId# 142 Delete nodeId# 181 Delete nodeId# 198 Delete nodeId# 174 Pick Enable nodeId# 24 Delete nodeId# 149 Delete nodeId# 157 Pick Delete nodeId# 193 Enable nodeId# 9 Pick Delete nodeId# 69 Enable nodeId# 49 Pick Add nodeId# 199 Pick Disable nodeId# 151 Add nodeId# 200 Delete nodeId# 51 Disable nodeId# 154 Disable nodeId# 100 Enable nodeId# 15 Add nodeId# 201 Delete nodeId# 167 Pick Enable nodeId# 54 Enable nodeId# 99 Enable nodeId# 100 Delete nodeId# 8 Add nodeId# 202 Disable nodeId# 182 Delete nodeId# 101 Disable nodeId# 180 Pick Pick Enable nodeId# 182 Delete nodeId# 200 Disable nodeId# 185 Add nodeId# 203 Pick Delete nodeId# 15 Delete nodeId# 63 Delete nodeId# 90 Add nodeId# 204 Enable nodeId# 35 Enable nodeId# 180 Add nodeId# 205 Delete nodeId# 54 Disable nodeId# 205 Pick Add nodeId# 206 Enable nodeId# 154 Disable nodeId# 24 Pick Delete nodeId# 47 Add nodeId# 207 Enable nodeId# 24 Enable nodeId# 55 Disable nodeId# 189 Add nodeId# 208 Delete nodeId# 156 Enable nodeId# 189 Disable nodeId# 170 Delete nodeId# 202 Add nodeId# 209 Disable nodeId# 24 Disable nodeId# 160 Delete nodeId# 165 Disable nodeId# 100 Delete nodeId# 151 Enable nodeId# 170 Enable nodeId# 205 Pick Pick Pick Disable nodeId# 5 Add nodeId# 210 Delete nodeId# 166 Delete nodeId# 190 Delete nodeId# 185 Add nodeId# 211 Delete nodeId# 171 Disable nodeId# 3 Add nodeId# 212 Pick Disable nodeId# 126 Add nodeId# 213 Enable nodeId# 3 Enable nodeId# 24 Add nodeId# 214 Add nodeId# 215 Pick Add nodeId# 216 Add nodeId# 217 Add nodeId# 218 Delete nodeId# 192 Disable nodeId# 109 Pick Disable nodeId# 77 Enable nodeId# 5 Disable nodeId# 24 Disable nodeId# 49 Delete nodeId# 141 Enable nodeId# 109 Pick Enable nodeId# 24 Delete nodeId# 53 Add nodeId# 219 Enable nodeId# 160 Disable nodeId# 217 Disable nodeId# 56 Add nodeId# 220 Disable nodeId# 55 Pick Delete nodeId# 172 Delete nodeId# 150 Disable nodeId# 170 Enable nodeId# 153 Add nodeId# 221 Pick Disable nodeId# 103 Disable nodeId# 128 Enable nodeId# 49 Add nodeId# 222 Enable nodeId# 170 Enable nodeId# 56 Add nodeId# 223 Delete nodeId# 123 Add nodeId# 224 Add nodeId# 225 Add nodeId# 226 Pick Disable nodeId# 207 Disable nodeId# 159 Disable nodeId# 107 Pick Pick Disable nodeId# 88 Pick Disable nodeId# 183 Disable nodeId# 179 Add nodeId# 227 Enable nodeId# 88 Delete nodeId# 16 Pick Pick Disable nodeId# 224 Enable nodeId# 128 Pick Pick Disable nodeId# 195 Enable nodeId# 159 Add nodeId# 228 Delete nodeId# 107 Pick Enable nodeId# 100 Pick Delete nodeId# 162 Enable nodeId# 224 Disable nodeId# 223 Enable nodeId# 55 Delete nodeId# 56 Disable nodeId# 158 Enable nodeId# 103 Disable nodeId# 136 Enable nodeId# 126 Delete nodeId# 223 Enable nodeId# 77 Pick Delete nodeId# 160 Delete nodeId# 108 Enable nodeId# 136 Disable nodeId# 24 Pick Pick Add nodeId# 229 Disable nodeId# 219 Pick Disable nodeId# 52 Disable nodeId# 210 Delete nodeId# 127 Disable nodeId# 49 Add nodeId# 230 Delete nodeId# 35 Enable nodeId# 24 Add nodeId# 231 Enable nodeId# 210 Add nodeId# 232 Delete nodeId# 100 Disable nodeId# 215 Pick Disable nodeId# 211 Disable nodeId# 153 Pick Delete nodeId# 207 Enable nodeId# 153 Pick Disable nodeId# 221 Enable nodeId# 49 Enable nodeId# 221 Enable nodeId# 183 Disable nodeId# 134 Add nodeId# 233 Add nodeId# 234 Delete nodeId# 140 Delete nodeId# 183 Add nodeId# 235 Add nodeId# 236 Delete nodeId# 230 Pick Enable nodeId# 179 Delete nodeId# 213 Disable nodeId# 92 Delete nodeId# 14 Pick Disable nodeId# 169 Add nodeId# 237 Pick Add nodeId# 238 Add nodeId# 239 Enable nodeId# 217 Delete nodeId# 134 Delete nodeId# 103 Add nodeId# 240 Disable nodeId# 240 Delete nodeId# 210 Disable nodeId# 62 Add nodeId# 241 Delete nodeId# 199 Pick Pick Enable nodeId# 195 Disable nodeId# 139 Enable nodeId# 158 Pick Enable nodeId# 139 Delete nodeId# 74 Disable nodeId# 237 Add nodeId# 242 Disable nodeId# 236 Pick Delete nodeId# 194 Enable nodeId# 92 Disable nodeId# 92 Delete nodeId# 231 Delete nodeId# 128 Enable nodeId# 92 Disable nodeId# 212 Add nodeId# 243 Delete nodeId# 239 Disable nodeId# 39 Add nodeId# 244 Pick Delete nodeId# 220 Pick Delete nodeId# 77 Delete nodeId# 180 Delete nodeId# 212 Delete nodeId# 232 Add nodeId# 245 Enable nodeId# 237 Delete nodeId# 245 Add nodeId# 246 Enable nodeId# 52 Pick Add nodeId# 247 Disable nodeId# 60 Disable nodeId# 92 Disable nodeId# 221 Delete nodeId# 124 Enable nodeId# 92 Disable nodeId# 98 Disable nodeId# 176 Add nodeId# 248 Enable nodeId# 240 Delete nodeId# 209 Enable nodeId# 211 Add nodeId# 249 Pick Delete nodeId# 27 Add nodeId# 250 Disable nodeId# 197 Pick Disable nodeId# 187 Enable nodeId# 219 Add nodeId# 251 Enable nodeId# 236 Pick Delete nodeId# 9 Delete nodeId# 250 Delete nodeId# 135 Add nodeId# 252 Add nodeId# 253 Add nodeId# 254 Pick Add nodeId# 255 Pick Pick Disable nodeId# 31 Pick Add nodeId# 256 Delete nodeId# 222 Pick Pick Pick Disable nodeId# 55 Disable nodeId# 237 Delete nodeId# 225 Pick Pick Delete nodeId# 221 Disable nodeId# 206 Enable nodeId# 206 Pick Add ... 20152 Delete nodeId# 20225 Add nodeId# 20284 Enable nodeId# 20246 Enable nodeId# 20189 Enable nodeId# 20283 Enable nodeId# 20081 Delete nodeId# 20283 Add nodeId# 20285 Delete nodeId# 20284 Delete nodeId# 20272 Delete nodeId# 20203 Delete nodeId# 20121 Add nodeId# 20286 Disable nodeId# 20182 Enable nodeId# 20218 Delete nodeId# 20260 Pick Disable nodeId# 20176 Delete nodeId# 20263 Enable nodeId# 20176 Disable nodeId# 20253 Enable nodeId# 20253 Pick Enable nodeId# 20182 Pick Delete nodeId# 20152 Pick Disable nodeId# 20270 Add nodeId# 20287 Add nodeId# 20288 Pick Disable nodeId# 20278 Enable nodeId# 20278 Delete nodeId# 20196 Delete nodeId# 20287 Delete nodeId# 20189 Add nodeId# 20289 Add nodeId# 20290 Add nodeId# 20291 Delete nodeId# 20285 Disable nodeId# 20253 Pick Delete nodeId# 20233 Disable nodeId# 20256 Pick Add nodeId# 20292 Disable nodeId# 20292 Disable nodeId# 20252 Add nodeId# 20293 Pick Delete nodeId# 20169 Pick Pick Disable nodeId# 20216 Delete nodeId# 20293 Delete nodeId# 20247 Pick Add nodeId# 20294 Disable nodeId# 20277 Add nodeId# 20295 Delete nodeId# 20281 Disable nodeId# 20288 Disable nodeId# 20182 Disable nodeId# 20081 Pick Disable nodeId# 20246 Delete nodeId# 20255 Pick Disable nodeId# 20176 Pick Add nodeId# 20296 Pick Enable nodeId# 20182 Add nodeId# 20297 Enable nodeId# 20256 Enable nodeId# 20292 Disable nodeId# 20210 Pick Enable nodeId# 20210 Pick Disable nodeId# 20104 Enable nodeId# 20253 Enable nodeId# 20252 Add nodeId# 20298 Add nodeId# 20299 Add nodeId# 20300 Disable nodeId# 20105 Add nodeId# 20301 Delete nodeId# 20290 Delete nodeId# 20301 Disable nodeId# 20182 Enable nodeId# 20176 Disable nodeId# 20298 Delete nodeId# 20291 Enable nodeId# 20105 Add nodeId# 20302 Pick Disable nodeId# 20258 Delete nodeId# 20182 Delete nodeId# 20241 Pick Enable nodeId# 20277 Add nodeId# 20303 Disable nodeId# 20238 Delete nodeId# 20295 Disable nodeId# 20274 Disable nodeId# 20252 Disable nodeId# 20302 Add nodeId# 20304 Disable nodeId# 20155 Delete nodeId# 20292 Enable nodeId# 20298 Delete nodeId# 20269 Enable nodeId# 20246 Pick Pick Pick Pick Pick Disable nodeId# 20160 Add nodeId# 20305 Enable nodeId# 20302 Add nodeId# 20306 Add nodeId# 20307 Disable nodeId# 20249 Pick Enable nodeId# 20270 Add nodeId# 20308 Pick Disable nodeId# 20277 Disable nodeId# 20176 Add nodeId# 20309 Enable nodeId# 20081 Pick Add nodeId# 20310 Pick Add nodeId# 20311 Disable nodeId# 20273 Enable nodeId# 20104 Delete nodeId# 20297 Disable nodeId# 20305 Delete nodeId# 20303 Delete nodeId# 20278 Add nodeId# 20312 Pick Delete nodeId# 20253 Enable nodeId# 20274 Delete nodeId# 20277 Pick Add nodeId# 20313 Enable nodeId# 20238 Delete nodeId# 20218 Enable nodeId# 20252 Disable nodeId# 20221 Enable nodeId# 20221 Disable nodeId# 20310 Enable nodeId# 20160 Disable nodeId# 20299 Enable nodeId# 20216 Enable nodeId# 20310 Enable nodeId# 20299 Delete nodeId# 20289 Enable nodeId# 20273 Add nodeId# 20314 Pick Enable nodeId# 20249 Add nodeId# 20315 Pick Add nodeId# 20316 Delete nodeId# 20300 Delete nodeId# 20176 Delete nodeId# 20310 Pick Pick Disable nodeId# 20296 Pick Pick Delete nodeId# 20246 Disable nodeId# 20308 Delete nodeId# 20314 Enable nodeId# 20296 Enable nodeId# 20308 Disable nodeId# 20307 Enable nodeId# 20258 Disable nodeId# 20220 Delete nodeId# 20081 Pick Disable nodeId# 20104 Enable nodeId# 20155 Delete nodeId# 20286 Add nodeId# 20317 Disable nodeId# 20276 Enable nodeId# 20288 Delete nodeId# 20267 Pick Enable nodeId# 20220 Add nodeId# 20318 Enable nodeId# 20104 Add nodeId# 20319 Pick Pick Delete nodeId# 20273 Disable nodeId# 20261 Enable nodeId# 20307 Disable nodeId# 20224 Add nodeId# 20320 Pick Add nodeId# 20321 Pick Add nodeId# 20322 Disable nodeId# 20282 Enable nodeId# 20282 Enable nodeId# 20276 Enable nodeId# 20261 Enable nodeId# 20224 Add nodeId# 20323 Delete nodeId# 20210 Delete nodeId# 20249 Disable nodeId# 20104 Add nodeId# 20324 Delete nodeId# 20309 Add nodeId# 20325 Disable nodeId# 20251 Enable nodeId# 20251 Delete nodeId# 20274 Disable nodeId# 20318 Enable nodeId# 20305 Disable nodeId# 20299 Enable nodeId# 20104 Add nodeId# 20326 Pick Enable nodeId# 20299 Disable nodeId# 20322 Disable nodeId# 20304 Delete nodeId# 20321 Add nodeId# 20327 Enable nodeId# 20322 Enable nodeId# 20304 Enable nodeId# 20318 Pick Disable nodeId# 20316 Delete nodeId# 20282 Delete nodeId# 20193 Add nodeId# 20328 Add nodeId# 20329 Disable nodeId# 20305 Pick Disable nodeId# 20104 Disable nodeId# 20324 Add nodeId# 20330 Disable nodeId# 20296 Pick Add nodeId# 20331 Disable nodeId# 20304 Disable nodeId# 20315 Add nodeId# 20332 Delete nodeId# 20062 Add nodeId# 20333 Disable nodeId# 20330 Add nodeId# 20334 Enable nodeId# 20316 Enable nodeId# 20104 Disable nodeId# 20216 Delete nodeId# 20327 Delete nodeId# 20326 Disable nodeId# 20256 Delete nodeId# 20328 Delete nodeId# 20034 Delete nodeId# 20315 Add nodeId# 20335 Enable nodeId# 20330 Add nodeId# 20336 Disable nodeId# 20276 Disable nodeId# 20155 Add nodeId# 20337 Enable nodeId# 20324 Add nodeId# 20338 Add nodeId# 20339 Add nodeId# 20340 Delete nodeId# 20333 Disable nodeId# 20275 Delete nodeId# 20298 Pick Add nodeId# 20341 Add nodeId# 20342 Disable nodeId# 20316 Disable nodeId# 20337 Delete nodeId# 20318 Add nodeId# 20343 Delete nodeId# 20319 Enable nodeId# 20304 Add nodeId# 20344 Add nodeId# 20345 Pick Add nodeId# 20346 Delete nodeId# 20155 Enable nodeId# 20256 Delete nodeId# 20185 Delete nodeId# 20105 Disable nodeId# 20339 Enable nodeId# 20316 Disable nodeId# 20308 Pick Pick Add nodeId# 20347 Delete nodeId# 20342 Pick Delete nodeId# 20104 Pick Delete nodeId# 20251 Delete nodeId# 20306 Add nodeId# 20348 Delete nodeId# 20336 Add nodeId# 20349 Pick Delete nodeId# 20331 Enable nodeId# 20308 Delete nodeId# 20220 Disable nodeId# 20256 Enable nodeId# 20305 Pick Add nodeId# 20350 Pick Delete nodeId# 20323 Add nodeId# 20351 Disable nodeId# 20288 Enable nodeId# 20276 Add nodeId# 20352 Add nodeId# 20353 Add nodeId# 20354 Disable nodeId# 20230 Add nodeId# 20355 Enable nodeId# 20296 Pick Pick Enable nodeId# 20216 Enable nodeId# 20288 Pick Add nodeId# 20356 Enable nodeId# 20337 Add nodeId# 20357 Add nodeId# 20358 Disable nodeId# 20304 Add nodeId# 20359 Disable nodeId# 20357 Delete nodeId# 20261 Pick Add nodeId# 20360 Add nodeId# 20361 Enable nodeId# 20357 Pick Add nodeId# 20362 Enable nodeId# 20256 Pick Add nodeId# 20363 Delete nodeId# 20276 Add nodeId# 20364 Add nodeId# 20365 Enable nodeId# 20275 Delete nodeId# 20353 Enable nodeId# 20230 Enable nodeId# 20304 Delete nodeId# 20294 Disable nodeId# 20364 Add nodeId# 20366 Disable nodeId# 20296 Disable nodeId# 20160 Disable nodeId# 20329 Add nodeId# 20367 Delete nodeId# 20275 Add nodeId# 20368 Delete nodeId# 20312 Pick Add nodeId# 20369 Disable nodeId# 20230 Enable nodeId# 20230 Disable nodeId# 20299 Add nodeId# 20370 Disable nodeId# 20316 Add nodeId# 20371 Pick Pick Pick Add nodeId# 20372 Add nodeId# 20373 Delete nodeId# 20351 Delete nodeId# 20365 Pick Disable nodeId# 20361 Add nodeId# 20374 Enable nodeId# 20339 Disable nodeId# 20224 Delete nodeId# 20216 Delete nodeId# 20337 Delete nodeId# 20329 Enable nodeId# 20299 Add nodeId# 20375 Disable nodeId# 20338 Enable nodeId# 20364 Enable nodeId# 20296 Disable nodeId# 20221 Delete nodeId# 20362 Enable nodeId# 20221 Delete nodeId# 20355 Delete nodeId# 20357 Enable nodeId# 20224 Pick Enable nodeId# 20316 Enable nodeId# 20361 Add nodeId# 20376 Pick Add nodeId# 20377 Delete nodeId# 20370 Add nodeId# 20378 Add nodeId# 20379 Delete nodeId# 20316 Disable nodeId# 20348 Add nodeId# 20380 Enable nodeId# 20338 Add nodeId# 20381 Add nodeId# 20382 Delete nodeId# 20221 Disable nodeId# 20366 Disable nodeId# 20335 Pick Enable nodeId# 20366 Disable nodeId# 20288 Enable nodeId# 20348 Delete nodeId# 20360 Pick Add nodeId# 20383 Add nodeId# 20384 Disable nodeId# 20350 Disable nodeId# 20382 Delete nodeId# 20373 Delete nodeId# 20334 Add nodeId# 20385 Pick Disable nodeId# 20296 Delete nodeId# 20307 Disable nodeId# 20317 Add nodeId# 20386 Enable nodeId# 20288 Delete nodeId# 20340 Pick Add nodeId# 20387 Pick Add nodeId# 20388 Delete nodeId# 20313 Disable nodeId# 20363 Disable nodeId# 20324 Pick Delete nodeId# 20308 Add nodeId# 20389 Disable nodeId# 20348 Add nodeId# 20390 Pick Disable nodeId# 20332 Disable nodeId# 20374 Disable nodeId# 20320 Disable nodeId# 20325 Delete nodeId# 20324 Delete nodeId# 20377 Enable nodeId# 20317 Add nodeId# 20391 Disable nodeId# 20358 Delete nodeId# 20384 Disable nodeId# 20341 Pick Disable nodeId# 20339 Delete nodeId# 20354 Add nodeId# 20392 Disable nodeId# 20352 Add nodeId# 20393 Disable nodeId# 20359 Pick Disable nodeId# 20371 Delete nodeId# 20367 Pick Add nodeId# 20394 Delete nodeId# 20350 Pick Add nodeId# 20395 Add nodeId# 20396 Delete nodeId# 20386 Disable nodeId# 20345 Add nodeId# 20397 Disable nodeId# 20302 Delete nodeId# 20387 Add nodeId# 20398 Enable nodeId# 20160 Add nodeId# 20399 Disable nodeId# 20394 Delete nodeId# 20349 Delete nodeId# 20392 Delete nodeId# 20343 Add nodeId# 20400 Add nodeId# 20401 Disable nodeId# 20252 Disable nodeId# 20356 Delete nodeId# 20383 Delete nodeId# 20160 Delete nodeId# 20395 Disable nodeId# 20401 Disable nodeId# 20381 Enable nodeId# 20394 Delete nodeId# 20270 Disable nodeId# 20256 Pick Delete nodeId# 20288 Pick Add nodeId# 20402 Pick Delete nodeId# 20258 Disable nodeId# 20376 Disable nodeId# 20311 Disable nodeId# 20379 Delete nodeId# 20332 Disable nodeId# 20366 Disable nodeId# 20369 Enable nodeId# 20374 Enable nodeId# 20325 Enable nodeId# 20358 Delete nodeId# 20330 Delete nodeId# 20322 Disable nodeId# 20144 Add nodeId# 20403 Add nodeId# 20404 Enable nodeId# 20352 Pick Pick Pick Add nodeId# 20405 Enable nodeId# 20359 Disable nodeId# 20361 Add nodeId# 20406 Pick Add nodeId# 20407 Enable nodeId# 20345 Add nodeId# 20408 Enable nodeId# 20379 Enable nodeId# 20376 Add nodeId# 20409 Disable nodeId# 20407 Enable nodeId# 20256 Enable nodeId# 20381 Disable nodeId# 20352 Add nodeId# 20410 Add nodeId# 20411 Pick Pick Disable nodeId# 20325 Pick Delete nodeId# 20279 Add nodeId# 20412 Enable nodeId# 20302 Add nodeId# 20413 Add nodeId# 20414 Enable nodeId# 20341 Disable nodeId# 20411 Delete nodeId# 20361 Enable nodeId# 20382 Pick Enable nodeId# 20407 Pick Add nodeId# 20415 Disable nodeId# 20317 Add nodeId# 20416 Disable nodeId# 20396 Enable nodeId# 20320 Enable nodeId# 20325 Add nodeId# 20417 Add nodeId# 20418 Enable nodeId# 20144 Add nodeId# 20419 Pick Delete nodeId# 20380 Pick Enable nodeId# 20296 Enable nodeId# 20401 Disable nodeId# 20415 Add nodeId# 20420 Delete nodeId# 20299 Disable nodeId# 20402 Disable nodeId# 20345 Enable nodeId# 20356 Delete nodeId# 20376 Disable nodeId# 20302 Disable nodeId# 20256 Enable nodeId# 20256 Delete nodeId# 20397 Pick Add nodeId# 20421 Disable nodeId# 20224 Delete nodeId# 20394 Pick Enable nodeId# 20363 Add nodeId# 20422 Add nodeId# 20423 Delete nodeId# 20402 Enable nodeId# 20339 Pick Delete nodeId# 20317 Pick Enable nodeId# 20348 Enable nodeId# 20335 Pick Enable nodeId# 20411 Pick Disable nodeId# 20399 Enable nodeId# 20302 Delete nodeId# 20405 Delete nodeId# 20339 Pick Enable nodeId# 20345 Disable nodeId# 20296 Delete nodeId# 20320 Enable nodeId# 20396 Add nodeId# 20424 Disable nodeId# 20421 Pick Enable nodeId# 20421 Disable nodeId# 20411 Enable nodeId# 20224 Delete nodeId# 20347 Pick Add nodeId# 20425 Pick Add nodeId# 20426 Add nodeId# 20427 Enable nodeId# 20399 Enable nodeId# 20415 Pick Enable nodeId# 20411 Delete nodeId# 20304 Delete nodeId# 20418 Enable nodeId# 20296 Pick Enable nodeId# 20352 Pick Delete nodeId# 20382 Pick Pick Pick Add nodeId# 20428 Delete nodeId# 20338 Pick Add nodeId# 20429 Delete nodeId# 20230 Disable nodeId# 20302 Disable nodeId# 20428 Enable nodeId# 20371 Pick Disable nodeId# 20413 Add nodeId# 20430 Disable nodeId# 20425 Delete nodeId# 20403 Add nodeId# 20431 Add nodeId# 20432 Disable nodeId# 20396 Disable nodeId# 20423 Enable nodeId# 20302 Delete nodeId# 20426 Disable nodeId# 20416 Disable nodeId# 20372 Pick Enable nodeId# 20413 Delete nodeId# 20409 Add nodeId# 20433 Delete nodeId# 20364 Disable nodeId# 20391 >> PQCountersSimple::PartitionFirstClass [GOOD] >> PQCountersSimple::SupportivePartitionCountersPersist ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_replica/unittest >> TReplicaTest::StrongNotificationAfterCommit [GOOD] Test command err: 2025-06-25T14:32:17.674659Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7:2054] Handle NKikimrSchemeBoard.TEvSubscribe { Path: path DomainOwnerId: 0 }: sender# [1:9:2056] 2025-06-25T14:32:17.674712Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [1:7:2054] Upsert description: path# path 2025-06-25T14:32:17.675086Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7:2054] Subscribe: subscriber# [1:9:2056], path# path, domainOwnerId# 0, capabilities# 2025-06-25T14:32:17.675188Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7:2054] Handle NKikimrSchemeBoard.TEvSubscribe { PathId: [OwnerId: 1, LocalPathId: 1] DomainOwnerId: 0 }: sender# [1:10:2057] 2025-06-25T14:32:17.675209Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [1:7:2054] Upsert description: path# [OwnerId: 1, LocalPathId: 1] 2025-06-25T14:32:17.675245Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7:2054] Subscribe: subscriber# [1:10:2057], path# [OwnerId: 1, LocalPathId: 1], domainOwnerId# 0, capabilities# 2025-06-25T14:32:17.675297Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [1:7:2054] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 1 }: sender# [1:8:2055] 2025-06-25T14:32:17.675323Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [1:7:2054] Successful handshake: owner# 1, generation# 1 2025-06-25T14:32:17.675409Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [1:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [1:8:2055], cookie# 0, event size# 72 2025-06-25T14:32:17.675453Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [1:7:2054] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], deletion# false 2025-06-25T14:32:17.681591Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:550: [1:7:2054] Upsert description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], pathDescription# {Status StatusSuccess, Path path, PathId [OwnerId: 1, LocalPathId: 1], PathVersion 1, SubdomainPathId , PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 30} 2025-06-25T14:32:17.681847Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [1:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [1:8:2055], cookie# 0, event size# 40 2025-06-25T14:32:17.681905Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [1:7:2054] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], deletion# true 2025-06-25T14:32:17.681935Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:575: [1:7:2054] Delete description: path# path, pathId# [OwnerId: 1, LocalPathId: 1] 2025-06-25T14:32:18.069982Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [2:7:2054] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 1 }: sender# [2:8:2055] 2025-06-25T14:32:18.070044Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [2:7:2054] Successful handshake: owner# 1, generation# 1 2025-06-25T14:32:18.070144Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [2:7:2054] Handle NKikimrSchemeBoard.TEvSubscribe { PathId: [OwnerId: 1, LocalPathId: 1] DomainOwnerId: 0 }: sender# [2:9:2056] 2025-06-25T14:32:18.070184Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [2:7:2054] Upsert description: path# [OwnerId: 1, LocalPathId: 1] 2025-06-25T14:32:18.070238Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [2:7:2054] Subscribe: subscriber# [2:9:2056], path# [OwnerId: 1, LocalPathId: 1], domainOwnerId# 0, capabilities# 2025-06-25T14:32:18.070355Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [2:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [2:8:2055], cookie# 0, event size# 72 2025-06-25T14:32:18.070385Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [2:7:2054] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], deletion# false 2025-06-25T14:32:18.070434Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:550: [2:7:2054] Upsert description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], pathDescription# {Status StatusSuccess, Path path, PathId [OwnerId: 1, LocalPathId: 1], PathVersion 1, SubdomainPathId , PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 30} 2025-06-25T14:32:18.070575Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [2:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [2:8:2055], cookie# 0, event size# 40 2025-06-25T14:32:18.070609Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [2:7:2054] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], deletion# true 2025-06-25T14:32:18.070644Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:575: [2:7:2054] Delete description: path# path, pathId# [OwnerId: 1, LocalPathId: 1] 2025-06-25T14:32:18.070724Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1075: [2:7:2054] Handle NKikimrSchemeBoard.TEvUnsubscribe { PathId: [OwnerId: 1, LocalPathId: 1] }: sender# [2:9:2056] 2025-06-25T14:32:18.070783Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:662: [2:7:2054] Unsubscribe: subscriber# [2:9:2056], path# [OwnerId: 1, LocalPathId: 1] 2025-06-25T14:32:18.070857Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [2:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [2:8:2055], cookie# 0, event size# 72 2025-06-25T14:32:18.070896Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [2:7:2054] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], deletion# false 2025-06-25T14:32:18.070926Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:834: [2:7:2054] Path was explicitly deleted, ignoring: path# path, pathId# [OwnerId: 1, LocalPathId: 1] 2025-06-25T14:32:18.070989Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [2:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [2:8:2055], cookie# 0, event size# 72 2025-06-25T14:32:18.071016Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [2:7:2054] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 2], deletion# false 2025-06-25T14:32:18.071062Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:550: [2:7:2054] Upsert description: path# path, pathId# [OwnerId: 1, LocalPathId: 2], pathDescription# {Status StatusSuccess, Path path, PathId [OwnerId: 1, LocalPathId: 2], PathVersion 1, SubdomainPathId , PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 30} 2025-06-25T14:32:18.071140Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [2:7:2054] Handle NKikimrSchemeBoard.TEvSubscribe { PathId: [OwnerId: 1, LocalPathId: 2] DomainOwnerId: 0 }: sender# [2:10:2057] 2025-06-25T14:32:18.071192Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [2:7:2054] Subscribe: subscriber# [2:10:2057], path# [OwnerId: 1, LocalPathId: 2], domainOwnerId# 0, capabilities# 2025-06-25T14:32:18.364098Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [3:7:2054] Handle NKikimrSchemeBoard.TEvSubscribe { Path: path DomainOwnerId: 1 }: sender# [3:9:2056] 2025-06-25T14:32:18.364148Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [3:7:2054] Upsert description: path# path 2025-06-25T14:32:18.364209Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [3:7:2054] Subscribe: subscriber# [3:9:2056], path# path, domainOwnerId# 1, capabilities# 2025-06-25T14:32:18.364399Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [3:7:2054] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 1 }: sender# [3:8:2055] 2025-06-25T14:32:18.364437Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [3:7:2054] Successful handshake: owner# 1, generation# 1 2025-06-25T14:32:18.364501Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:958: [3:7:2054] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 1 Generation: 1 }: sender# [3:8:2055] 2025-06-25T14:32:18.364541Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:985: [3:7:2054] Commit generation: owner# 1, generation# 1 2025-06-25T14:32:18.364653Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:997: [3:7:2054] Handle NKikimr::NSchemeBoard::TReplica::TEvPrivate::TEvSendStrongNotifications { Owner: 1 } >> TReplicaTest::HandshakeWithStaleGeneration [GOOD] >> TReplicaTest::IdempotencyUpdatesAliveSubscriber >> TBlobStorageWardenTest::TestBlockEncriptedGroup [GOOD] |78.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/storagepoolmon/ut/ydb-core-blobstorage-storagepoolmon-ut |78.4%| [LD] {RESULT} $(B)/ydb/core/blobstorage/storagepoolmon/ut/ydb-core-blobstorage-storagepoolmon-ut |78.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/storagepoolmon/ut/ydb-core-blobstorage-storagepoolmon-ut >> TReplicaTest::IdempotencyUpdatesAliveSubscriber [GOOD] >> TReplicaTest::IdempotencyUpdatesVariant2 |78.4%| [TA] $(B)/ydb/core/tx/datashard/ut_change_exchange/test-results/unittest/{meta.json ... results_accumulator.log} >> TReplicaTest::Commit >> DataShardVolatile::DistributedWriteAsymmetricExecute [GOOD] >> DataShardVolatile::DistributedWriteThenDropTable >> TReplicaTest::Commit [GOOD] >> TReplicaTest::AckNotifications >> TReplicaTest::IdempotencyUpdatesVariant2 [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_replica/unittest >> TReplicaCombinationTest::MigratedPathRecreation [GOOD] Test command err: 2025-06-25T14:32:17.948249Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [1:7:2054] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 800 Generation: 1 }: sender# [1:8:2055] 2025-06-25T14:32:17.948345Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [1:7:2054] Successful handshake: owner# 800, generation# 1 2025-06-25T14:32:17.948447Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:958: [1:7:2054] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 800 Generation: 1 }: sender# [1:8:2055] 2025-06-25T14:32:17.948482Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:985: [1:7:2054] Commit generation: owner# 800, generation# 1 2025-06-25T14:32:17.948582Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [1:7:2054] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 800 Generation: 1 }: sender# [1:9:2056] 2025-06-25T14:32:17.948621Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [1:7:2054] Successful handshake: owner# 800, generation# 1 2025-06-25T14:32:17.948703Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:958: [1:7:2054] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 800 Generation: 1 }: sender# [1:9:2056] 2025-06-25T14:32:17.948739Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:985: [1:7:2054] Commit generation: owner# 800, generation# 1 2025-06-25T14:32:17.948905Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [1:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 800 Generation: 1 }: sender# [1:8:2055], cookie# 0, event size# 103 2025-06-25T14:32:17.948961Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [1:7:2054] Update description: path# /Root/Tenant, pathId# [OwnerId: 800, LocalPathId: 2], deletion# false 2025-06-25T14:32:17.960800Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:550: [1:7:2054] Upsert description: path# /Root/Tenant, pathId# [OwnerId: 800, LocalPathId: 2], pathDescription# {Status StatusSuccess, Path /Root/Tenant, PathId [OwnerId: 800, LocalPathId: 2], PathVersion 1, SubdomainPathId [OwnerId: 800, LocalPathId: 2], PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 60} 2025-06-25T14:32:17.961050Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [1:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 800 Generation: 1 }: sender# [1:9:2056], cookie# 0, event size# 103 2025-06-25T14:32:17.961096Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [1:7:2054] Update description: path# /Root/Tenant, pathId# [OwnerId: 800, LocalPathId: 2], deletion# false 2025-06-25T14:32:17.961155Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:550: [1:7:2054] Upsert description: path# /Root/Tenant, pathId# [OwnerId: 800, LocalPathId: 2], pathDescription# {Status StatusSuccess, Path /Root/Tenant, PathId [OwnerId: 800, LocalPathId: 2], PathVersion 1, SubdomainPathId [OwnerId: 800, LocalPathId: 2], PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 60} 2025-06-25T14:32:17.961280Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7:2054] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /Root/Tenant DomainOwnerId: 0 }: sender# [1:10:2057] 2025-06-25T14:32:17.961369Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7:2054] Subscribe: subscriber# [1:10:2057], path# /Root/Tenant, domainOwnerId# 0, capabilities# =========== Path: "/Root/Tenant" PathDescription { Self { PathVersion: 1 } DomainDescription { DomainKey { SchemeShard: 800 PathId: 2 } } } PathId: 2 PathOwnerId: 800 =========== Path: "/Root/Tenant" PathDescription { Self { PathVersion: 1 } DomainDescription { DomainKey { SchemeShard: 800 PathId: 2 } } } PathId: 2 PathOwnerId: 800 2025-06-25T14:32:18.010826Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [1:11:2058] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 800 Generation: 1 }: sender# [1:12:2059] 2025-06-25T14:32:18.010884Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [1:11:2058] Successful handshake: owner# 800, generation# 1 2025-06-25T14:32:18.010962Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:958: [1:11:2058] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 800 Generation: 1 }: sender# [1:12:2059] 2025-06-25T14:32:18.010993Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:985: [1:11:2058] Commit generation: owner# 800, generation# 1 2025-06-25T14:32:18.011056Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [1:11:2058] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 900 Generation: 1 }: sender# [1:13:2060] 2025-06-25T14:32:18.011087Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [1:11:2058] Successful handshake: owner# 900, generation# 1 2025-06-25T14:32:18.011146Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:958: [1:11:2058] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 900 Generation: 1 }: sender# [1:13:2060] 2025-06-25T14:32:18.011174Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:985: [1:11:2058] Commit generation: owner# 900, generation# 1 2025-06-25T14:32:18.011281Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [1:11:2058] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 800 Generation: 1 }: sender# [1:12:2059], cookie# 0, event size# 103 2025-06-25T14:32:18.011316Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [1:11:2058] Update description: path# /Root/Tenant, pathId# [OwnerId: 800, LocalPathId: 2], deletion# false 2025-06-25T14:32:18.011371Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:550: [1:11:2058] Upsert description: path# /Root/Tenant, pathId# [OwnerId: 800, LocalPathId: 2], pathDescription# {Status StatusSuccess, Path /Root/Tenant, PathId [OwnerId: 800, LocalPathId: 2], PathVersion 1, SubdomainPathId [OwnerId: 800, LocalPathId: 2], PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 60} 2025-06-25T14:32:18.011447Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [1:11:2058] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 900 Generation: 1 }: sender# [1:13:2060], cookie# 0, event size# 103 2025-06-25T14:32:18.011486Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [1:11:2058] Update description: path# /Root/Tenant, pathId# [OwnerId: 900, LocalPathId: 1], deletion# false 2025-06-25T14:32:18.011544Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:884: [1:11:2058] Replace GSS by TSS description: path# /Root/Tenant, pathId# [OwnerId: 900, LocalPathId: 1], domainId# [OwnerId: 800, LocalPathId: 2], curPathId# [OwnerId: 800, LocalPathId: 2], curDomainId# [OwnerId: 800, LocalPathId: 2] 2025-06-25T14:32:18.011598Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:550: [1:11:2058] Upsert description: path# /Root/Tenant, pathId# [OwnerId: 900, LocalPathId: 1], pathDescription# {Status StatusSuccess, Path /Root/Tenant, PathId [OwnerId: 900, LocalPathId: 1], PathVersion 1, SubdomainPathId [OwnerId: 800, LocalPathId: 2], PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 60} 2025-06-25T14:32:18.011680Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:11:2058] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /Root/Tenant DomainOwnerId: 0 }: sender# [1:14:2061] 2025-06-25T14:32:18.011725Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:11:2058] Subscribe: subscriber# [1:14:2061], path# /Root/Tenant, domainOwnerId# 0, capabilities# =========== Path: "/Root/Tenant" PathDescription { Self { PathVersion: 1 } DomainDescription { DomainKey { SchemeShard: 800 PathId: 2 } } } PathId: 2 PathOwnerId: 800 =========== Path: "/Root/Tenant" PathDescription { Self { PathVersion: 1 } DomainDescription { DomainKey { SchemeShard: 800 PathId: 2 } } } PathId: 1 PathOwnerId: 900 2025-06-25T14:32:18.012019Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [1:15:2062] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 800 Generation: 1 }: sender# [1:16:2063] 2025-06-25T14:32:18.012056Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [1:15:2062] Successful handshake: owner# 800, generation# 1 2025-06-25T14:32:18.012111Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:958: [1:15:2062] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 800 Generation: 1 }: sender# [1:16:2063] 2025-06-25T14:32:18.012136Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:985: [1:15:2062] Commit generation: owner# 800, generation# 1 2025-06-25T14:32:18.012181Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [1:15:2062] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 800 Generation: 1 }: sender# [1:17:2064] 2025-06-25T14:32:18.012203Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [1:15:2062] Successful handshake: owner# 800, generation# 1 2025-06-25T14:32:18.012255Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:958: [1:15:2062] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 800 Generation: 1 }: sender# [1:17:2064] 2025-06-25T14:32:18.012276Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:985: [1:15:2062] Commit generation: owner# 800, generation# 1 2025-06-25T14:32:18.012932Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [1:15:2062] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 800 Generation: 1 }: sender# [1:16:2063], cookie# 0, event size# 103 2025-06-25T14:32:18.012987Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [1:15:2062] Update description: path# /Root/Tenant, pathId# [OwnerId: 800, LocalPathId: 2], deletion# false 2025-06-25T14:32:18.013109Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:550: [1:15:2062] Upsert description: path# /Root/Tenant, pathId# [OwnerId: 800, LocalPathId: 2], pathDescription# {Status StatusSuccess, Path /Root/Tenant, PathId [OwnerId: 800, LocalPathId: 2], PathVersion 1, SubdomainPathId [OwnerId: 800, LocalPathId: 2], PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 60} 2025-06-25T14:32:18.013185Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [1:15:2062] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 800 Generation: 1 }: sender# [1:17:2064], cookie# 0, event size# 103 2025-06-25T14:32:18.013213Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [1:15:2062] Update description: path# /Root/Tenant, pathId# [OwnerId: 800, LocalPathId: 2], deletion# false 2025-06-25T14:32:18.013252Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:550: [1:15:2062] Upsert description: path# /Root/Tenant, pathId# [OwnerId: 800, LocalPathId: 2], pathDescription# {Status StatusSuccess, Path /Root/Tenant, PathId [OwnerId: 800, LocalPathId: 2], PathVersion 2, SubdomainPathId [OwnerId: 800, LocalPathId: 2], PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 60} 2025-06-25T14:32:18.013339Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:15:2062] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /Root/Tenant DomainOwnerId: 0 }: sender# [1:18:2065] 2025-06-25T14:32:18.013383Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:15:2062] Subscribe: subscriber# [1:18:2065], path# /Root/Tenant, domainOwnerId# 0, capabilities# =========== Path: "/Root/Tenant" PathDescription { Self { PathVersion: 1 } DomainDescription { DomainKey { SchemeShard: 800 PathId: 2 } } } PathId: 2 PathOwnerId: 800 =========== Path: "/Root/Tenant" PathDescription { Self { PathVersion: 2 } DomainDescription { DomainKey { SchemeShard: 800 PathId: 2 } } } PathId: 2 PathOwnerId: 800 2025-06-25T14:32:18.013729Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [1:19:2066] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 800 Generation: 1 }: sender# [1:20:2067] 2025-06-25T14:32:18.013758Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [1:19:2066] Successful handshake: owner# 800, generation# 1 2025-06-25T14:32:18.013802Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:958: [1:19:2066] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 800 Generation: 1 }: sender# [1:20:2067] 2025-06-25T14:32:18.013839Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:985: [1:19:2066] Commit generation: owner# 800, generation# 1 2025-06-25T14:32:18.013888Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [1:19:2066] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 900 Gener ... DomainKey { SchemeShard: 800 PathId: 333 } } } PathId: 9 PathOwnerId: 910 =========== super id == DomainId: [OwnerId: 800, LocalPathId: 333] IsDeletion: 1 PathId: [OwnerId: 910, LocalPathId: 9] Verions: 18446744073709551615 =========== WIN ==/Root/Tenant/table_inside PathID: [OwnerId: 0, LocalPathId: 0] deleted: 1 version: 0 domainId: [OwnerId: 0, LocalPathId: 0] 2025-06-25T14:32:18.605792Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [2:399:2446] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 910 Generation: 1 }: sender# [2:400:2447] 2025-06-25T14:32:18.605829Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [2:399:2446] Successful handshake: owner# 910, generation# 1 2025-06-25T14:32:18.605938Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:958: [2:399:2446] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 910 Generation: 1 }: sender# [2:400:2447] 2025-06-25T14:32:18.605966Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:985: [2:399:2446] Commit generation: owner# 910, generation# 1 2025-06-25T14:32:18.606009Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [2:399:2446] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 910 Generation: 1 }: sender# [2:401:2448] 2025-06-25T14:32:18.606044Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [2:399:2446] Successful handshake: owner# 910, generation# 1 2025-06-25T14:32:18.606087Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:958: [2:399:2446] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 910 Generation: 1 }: sender# [2:401:2448] 2025-06-25T14:32:18.606110Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:985: [2:399:2446] Commit generation: owner# 910, generation# 1 2025-06-25T14:32:18.606183Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [2:399:2446] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 910 Generation: 1 }: sender# [2:400:2447], cookie# 0, event size# 64 2025-06-25T14:32:18.606211Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [2:399:2446] Update description: path# /Root/Tenant/table_inside, pathId# [OwnerId: 910, LocalPathId: 9], deletion# true 2025-06-25T14:32:18.606239Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [2:399:2446] Upsert description: path# [OwnerId: 910, LocalPathId: 9] 2025-06-25T14:32:18.606301Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [2:399:2446] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 910 Generation: 1 }: sender# [2:401:2448], cookie# 0, event size# 130 2025-06-25T14:32:18.606325Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [2:399:2446] Update description: path# /Root/Tenant/table_inside, pathId# [OwnerId: 910, LocalPathId: 9], deletion# false 2025-06-25T14:32:18.606355Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:834: [2:399:2446] Path was explicitly deleted, ignoring: path# /Root/Tenant/table_inside, pathId# [OwnerId: 910, LocalPathId: 9] 2025-06-25T14:32:18.606409Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [2:399:2446] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /Root/Tenant/table_inside DomainOwnerId: 0 }: sender# [2:402:2449] 2025-06-25T14:32:18.606431Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [2:399:2446] Upsert description: path# /Root/Tenant/table_inside 2025-06-25T14:32:18.606487Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [2:399:2446] Subscribe: subscriber# [2:402:2449], path# /Root/Tenant/table_inside, domainOwnerId# 0, capabilities# =========== Left ==Path: "/Root/Tenant/table_inside" PathDescription { Self { PathVersion: 18446744073709551615 } DomainDescription { DomainKey { SchemeShard: 800 PathId: 333 } } } PathId: 9 PathOwnerId: 910 =========== Right ==Path: "/Root/Tenant/table_inside" PathDescription { Self { PathVersion: 2 } DomainDescription { DomainKey { SchemeShard: 800 PathId: 333 } } } PathId: 9 PathOwnerId: 910 =========== super id == DomainId: [OwnerId: 800, LocalPathId: 333] IsDeletion: 1 PathId: [OwnerId: 910, LocalPathId: 9] Verions: 18446744073709551615 =========== WIN ==/Root/Tenant/table_inside PathID: [OwnerId: 0, LocalPathId: 0] deleted: 1 version: 0 domainId: [OwnerId: 0, LocalPathId: 0] 2025-06-25T14:32:18.608684Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [2:403:2450] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 910 Generation: 1 }: sender# [2:404:2451] 2025-06-25T14:32:18.608721Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [2:403:2450] Successful handshake: owner# 910, generation# 1 2025-06-25T14:32:18.608784Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:958: [2:403:2450] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 910 Generation: 1 }: sender# [2:404:2451] 2025-06-25T14:32:18.608808Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:985: [2:403:2450] Commit generation: owner# 910, generation# 1 2025-06-25T14:32:18.608851Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [2:403:2450] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 910 Generation: 1 }: sender# [2:405:2452] 2025-06-25T14:32:18.608885Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [2:403:2450] Successful handshake: owner# 910, generation# 1 2025-06-25T14:32:18.608930Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:958: [2:403:2450] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 910 Generation: 1 }: sender# [2:405:2452] 2025-06-25T14:32:18.608958Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:985: [2:403:2450] Commit generation: owner# 910, generation# 1 2025-06-25T14:32:18.609044Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [2:403:2450] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 910 Generation: 1 }: sender# [2:404:2451], cookie# 0, event size# 64 2025-06-25T14:32:18.609075Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [2:403:2450] Update description: path# /Root/Tenant/table_inside, pathId# [OwnerId: 910, LocalPathId: 9], deletion# true 2025-06-25T14:32:18.609098Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [2:403:2450] Upsert description: path# [OwnerId: 910, LocalPathId: 9] 2025-06-25T14:32:18.609153Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [2:403:2450] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 910 Generation: 1 }: sender# [2:405:2452], cookie# 0, event size# 64 2025-06-25T14:32:18.609193Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [2:403:2450] Update description: path# /Root/Tenant/table_inside, pathId# [OwnerId: 910, LocalPathId: 9], deletion# true 2025-06-25T14:32:18.609254Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [2:403:2450] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /Root/Tenant/table_inside DomainOwnerId: 0 }: sender# [2:406:2453] 2025-06-25T14:32:18.609280Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [2:403:2450] Upsert description: path# /Root/Tenant/table_inside 2025-06-25T14:32:18.609322Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [2:403:2450] Subscribe: subscriber# [2:406:2453], path# /Root/Tenant/table_inside, domainOwnerId# 0, capabilities# =========== Left ==Path: "/Root/Tenant/table_inside" PathDescription { Self { PathVersion: 18446744073709551615 } DomainDescription { DomainKey { SchemeShard: 800 PathId: 333 } } } PathId: 9 PathOwnerId: 910 =========== Right ==Path: "/Root/Tenant/table_inside" PathDescription { Self { PathVersion: 18446744073709551615 } DomainDescription { DomainKey { SchemeShard: 800 PathId: 333 } } } PathId: 9 PathOwnerId: 910 =========== super id == DomainId: [OwnerId: 800, LocalPathId: 333] IsDeletion: 1 PathId: [OwnerId: 910, LocalPathId: 9] Verions: 18446744073709551615 =========== WIN ==/Root/Tenant/table_inside PathID: [OwnerId: 0, LocalPathId: 0] deleted: 1 version: 0 domainId: [OwnerId: 0, LocalPathId: 0] 2025-06-25T14:32:18.728175Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [3:7:2054] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 800 Generation: 1 }: sender# [3:8:2055] 2025-06-25T14:32:18.728248Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [3:7:2054] Successful handshake: owner# 800, generation# 1 2025-06-25T14:32:18.728376Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:958: [3:7:2054] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 800 Generation: 1 }: sender# [3:8:2055] 2025-06-25T14:32:18.728427Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:985: [3:7:2054] Commit generation: owner# 800, generation# 1 2025-06-25T14:32:18.728534Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [3:7:2054] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 900 Generation: 1 }: sender# [3:9:2056] 2025-06-25T14:32:18.728606Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [3:7:2054] Successful handshake: owner# 900, generation# 1 2025-06-25T14:32:18.728744Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:958: [3:7:2054] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 900 Generation: 1 }: sender# [3:9:2056] 2025-06-25T14:32:18.728781Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:985: [3:7:2054] Commit generation: owner# 900, generation# 1 2025-06-25T14:32:18.728923Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [3:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 800 Generation: 1 }: sender# [3:8:2055], cookie# 0, event size# 118 2025-06-25T14:32:18.729012Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [3:7:2054] Update description: path# /root/db/dir_inside, pathId# [OwnerId: 800, LocalPathId: 1111], deletion# false 2025-06-25T14:32:18.729079Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:550: [3:7:2054] Upsert description: path# /root/db/dir_inside, pathId# [OwnerId: 800, LocalPathId: 1111], pathDescription# {Status StatusSuccess, Path /root/db/dir_inside, PathId [OwnerId: 800, LocalPathId: 1111], PathVersion 1, SubdomainPathId [OwnerId: 800, LocalPathId: 1], PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 67} 2025-06-25T14:32:18.729187Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [3:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 900 Generation: 1 }: sender# [3:9:2056], cookie# 0, event size# 117 2025-06-25T14:32:18.729226Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [3:7:2054] Update description: path# /root/db/dir_inside, pathId# [OwnerId: 900, LocalPathId: 11], deletion# false 2025-06-25T14:32:18.729283Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:884: [3:7:2054] Update description by newest path form tenant schemeshard: path# /root/db/dir_inside, pathId# [OwnerId: 900, LocalPathId: 11], domainId# [OwnerId: 800, LocalPathId: 1], curPathId# [OwnerId: 800, LocalPathId: 1111], curDomainId# [OwnerId: 800, LocalPathId: 1] 2025-06-25T14:32:18.729346Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:575: [3:7:2054] Delete description: path# /root/db/dir_inside, pathId# [OwnerId: 800, LocalPathId: 1111] 2025-06-25T14:32:18.729417Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:550: [3:7:2054] Upsert description: path# /root/db/dir_inside, pathId# [OwnerId: 900, LocalPathId: 11], pathDescription# {Status StatusSuccess, Path /root/db/dir_inside, PathId [OwnerId: 900, LocalPathId: 11], PathVersion 1, SubdomainPathId [OwnerId: 800, LocalPathId: 1], PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 67} 2025-06-25T14:32:18.729524Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [3:7:2054] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /root/db/dir_inside DomainOwnerId: 0 }: sender# [3:10:2057] 2025-06-25T14:32:18.729593Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [3:7:2054] Subscribe: subscriber# [3:10:2057], path# /root/db/dir_inside, domainOwnerId# 0, capabilities# =========== Path: "/root/db/dir_inside" PathDescription { Self { PathVersion: 1 } DomainDescription { DomainKey { SchemeShard: 800 PathId: 1 } } } PathId: 1111 PathOwnerId: 800 =========== Path: "/root/db/dir_inside" PathDescription { Self { PathVersion: 1 } DomainDescription { DomainKey { SchemeShard: 800 PathId: 1 } } } PathId: 11 PathOwnerId: 900 =========== DomainId: [OwnerId: 800, LocalPathId: 1] IsDeletion: 0 PathId: [OwnerId: 900, LocalPathId: 11] Versions: 1 >> TReplicaTest::CommitWithoutHandshake ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpUniqueIndex::ReplaceFkDuplicate [GOOD] Test command err: Trying to start YDB, gRPC: 17230, MsgBus: 28964 2025-06-25T14:31:51.708905Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519894650816605743:2079];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:31:51.725176Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001cab/r3tmp/tmp7flKGQ/pdisk_1.dat 2025-06-25T14:31:52.541296Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:31:52.541385Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:31:52.547038Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:31:52.561556Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 17230, node 1 2025-06-25T14:31:52.756537Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:31:52.860798Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:31:52.860819Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:31:52.860825Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:31:52.860963Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28964 TClient is connected to server localhost:28964 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:31:53.991221Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:31:54.034037Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:31:54.042815Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:31:54.255514Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:31:54.440209Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:31:54.556677Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:31:56.712446Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519894650816605743:2079];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:31:56.738075Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:31:56.809304Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894672291443808:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:31:56.809450Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:31:57.145045Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:31:57.191878Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:31:57.231124Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:31:57.283970Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:31:57.328856Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:31:57.371363Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:31:57.426266Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:31:57.498687Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894676586411762:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:31:57.498773Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:31:57.499033Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894676586411767:2436], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:31:57.503190Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:31:57.528134Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519894676586411769:2437], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:31:57.598565Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519894676586411821:3426] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:31:58.950186Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... Trying to start YDB, gRPC: 14571, MsgBus: 19969 2025-06-25T14:32:02.706740Z node 2 :METADATA_PROVIDER WARN: l ... ty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:32:03.173579Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:32:03.173587Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:32:03.173726Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19969 2025-06-25T14:32:03.648467Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:19969 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:32:03.861689Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:32:03.872584Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:32:03.887740Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:04.038652Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:32:04.269130Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:32:04.354700Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:32:07.297906Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519894717476952187:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:07.298030Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:07.422546Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:07.513883Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:07.584659Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:07.644198Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:07.702479Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519894696002114284:2228];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:32:07.703542Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:32:07.719117Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:07.824272Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:07.865639Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:08.001727Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519894721771920148:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:08.001840Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:08.002127Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519894721771920153:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:08.006237Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:32:08.023563Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519894721771920155:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:32:08.111075Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519894721771920206:3416] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:32:09.611366Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:32:13.793476Z node 2 :KQP_EXECUTER ERROR: kqp_literal_executer.cpp:107: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: 01jykr1skh9b4xrkkt7b27qhd4, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YjQ3ZmU0Y2EtYTMwMmJjZjMtZWMwOTJlZDktNGE1MDhjM2M=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. TKqpLiteralExecuter, TKqpEnsure failed. 2025-06-25T14:32:13.809562Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=YjQ3ZmU0Y2EtYTMwMmJjZjMtZWMwOTJlZDktNGE1MDhjM2M=, ActorId: [2:7519894730361855881:2536], ActorState: ExecuteState, TraceId: 01jykr1skh9b4xrkkt7b27qhd4, Create QueryResponse for error on request, msg: 2025-06-25T14:32:15.256294Z node 2 :KQP_EXECUTER ERROR: kqp_literal_executer.cpp:107: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: 01jykr1v7y2fgbs0wn9ttwn25z, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YjQ3ZmU0Y2EtYTMwMmJjZjMtZWMwOTJlZDktNGE1MDhjM2M=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. TKqpLiteralExecuter, TKqpEnsure failed. 2025-06-25T14:32:15.256553Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=YjQ3ZmU0Y2EtYTMwMmJjZjMtZWMwOTJlZDktNGE1MDhjM2M=, ActorId: [2:7519894730361855881:2536], ActorState: ExecuteState, TraceId: 01jykr1v7y2fgbs0wn9ttwn25z, Create QueryResponse for error on request, msg: >> TReplicaTest::CommitWithoutHandshake [GOOD] >> TReplicaTest::CommitWithStaleGeneration >> TReplicaTest::AckNotifications [GOOD] >> TReplicaTest::AckNotificationsUponPathRecreation ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/nodewarden/ut/unittest >> TBlobStorageWardenTest::TestBlockEncriptedGroup [GOOD] Test command err: 2025-06-25T14:32:14.201086Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:3:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:2:0] targetVDisk# [2000000:1:0:3:0] oldSyncState# [0 0] DbBirthLsn# 0 tablet_helpers.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000a11/r3tmp/tmpPDfVba/pdisk_1.dat 2025-06-25T14:32:14.285424Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:2:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:0:0] targetVDisk# [2000000:1:0:2:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-25T14:32:14.285519Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:3:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:0:0] targetVDisk# [2000000:1:0:3:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-25T14:32:14.296802Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:2:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:1:0] targetVDisk# [2000000:1:0:2:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-25T14:32:14.296896Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:3:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:1:0] targetVDisk# [2000000:1:0:3:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-25T14:32:14.321090Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:0:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:1:0] targetVDisk# [2000000:1:0:0:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-25T14:32:15.413096Z node 1 :BS_PROXY_PUT INFO: dsproxy_put.cpp:645: [b6b2c6548553d7a5] bootstrap ActorId# [1:487:2461] Group# 33554432 BlobCount# 1 BlobIDs# [[72057594037932033:2:8:0:0:1333:0]] HandleClass# TabletLog Tactic# MinLatency RestartCounter# 0 Marker# BPP13 2025-06-25T14:32:15.413258Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [b6b2c6548553d7a5] Id# [72057594037932033:2:8:0:0:1333:0] restore disk# 0 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-06-25T14:32:15.413307Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [b6b2c6548553d7a5] Id# [72057594037932033:2:8:0:0:1333:0] restore disk# 1 part# 1 situation# ESituation::Unknown Marker# BPG51 2025-06-25T14:32:15.413334Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [b6b2c6548553d7a5] Id# [72057594037932033:2:8:0:0:1333:0] restore disk# 2 part# 2 situation# ESituation::Unknown Marker# BPG51 2025-06-25T14:32:15.413363Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [b6b2c6548553d7a5] Id# [72057594037932033:2:8:0:0:1333:0] restore disk# 3 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-06-25T14:32:15.413408Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [b6b2c6548553d7a5] Id# [72057594037932033:2:8:0:0:1333:0] restore disk# 3 part# 1 situation# ESituation::Unknown Marker# BPG51 2025-06-25T14:32:15.413434Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [b6b2c6548553d7a5] Id# [72057594037932033:2:8:0:0:1333:0] restore disk# 3 part# 2 situation# ESituation::Unknown Marker# BPG51 2025-06-25T14:32:15.413473Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:65: [b6b2c6548553d7a5] restore Id# [72057594037932033:2:8:0:0:1333:0] optimisticReplicas# 3 optimisticState# EBS_FULL Marker# BPG55 2025-06-25T14:32:15.413545Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [b6b2c6548553d7a5] partPlacement record partSituation# ESituation::Unknown to# 0 blob Id# [72057594037932033:2:8:0:0:1333:1] Marker# BPG33 2025-06-25T14:32:15.413594Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [b6b2c6548553d7a5] Sending missing VPut part# 0 to# 0 blob Id# [72057594037932033:2:8:0:0:1333:1] Marker# BPG32 2025-06-25T14:32:15.413658Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [b6b2c6548553d7a5] partPlacement record partSituation# ESituation::Unknown to# 1 blob Id# [72057594037932033:2:8:0:0:1333:2] Marker# BPG33 2025-06-25T14:32:15.413697Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [b6b2c6548553d7a5] Sending missing VPut part# 1 to# 1 blob Id# [72057594037932033:2:8:0:0:1333:2] Marker# BPG32 2025-06-25T14:32:15.413730Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [b6b2c6548553d7a5] partPlacement record partSituation# ESituation::Unknown to# 2 blob Id# [72057594037932033:2:8:0:0:1333:3] Marker# BPG33 2025-06-25T14:32:15.413757Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [b6b2c6548553d7a5] Sending missing VPut part# 2 to# 2 blob Id# [72057594037932033:2:8:0:0:1333:3] Marker# BPG32 2025-06-25T14:32:15.413913Z node 1 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [1:47:2091] NKikimr::TEvBlobStorage::TEvVPut# {ID# [72057594037932033:2:8:0:0:1333:3] FDS# 1333 HandleClass# TabletLog {MsgQoS ExtQueueId# PutTabletLog} DataSize# 0 Data# } cookie# 0 2025-06-25T14:32:15.413978Z node 1 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [1:40:2084] NKikimr::TEvBlobStorage::TEvVPut# {ID# [72057594037932033:2:8:0:0:1333:2] FDS# 1333 HandleClass# TabletLog {MsgQoS ExtQueueId# PutTabletLog} DataSize# 0 Data# } cookie# 0 2025-06-25T14:32:15.414023Z node 1 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [1:61:2105] NKikimr::TEvBlobStorage::TEvVPut# {ID# [72057594037932033:2:8:0:0:1333:1] FDS# 1333 HandleClass# TabletLog {MsgQoS ExtQueueId# PutTabletLog} DataSize# 0 Data# } cookie# 0 2025-06-25T14:32:15.416677Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [b6b2c6548553d7a5] received {EvVPutResult Status# OK ID# [72057594037932033:2:8:0:0:1333:2] {MsgQoS MsgId# { SequenceId: 1 MsgId: 9 } Cost# 90496 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 10 }}}} from# [2000000:1:0:0:0] Marker# BPP01 2025-06-25T14:32:15.416868Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [b6b2c6548553d7a5] received {EvVPutResult Status# OK ID# [72057594037932033:2:8:0:0:1333:3] {MsgQoS MsgId# { SequenceId: 1 MsgId: 10 } Cost# 90496 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 11 }}}} from# [2000000:1:0:1:0] Marker# BPP01 2025-06-25T14:32:15.416957Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [b6b2c6548553d7a5] received {EvVPutResult Status# OK ID# [72057594037932033:2:8:0:0:1333:1] {MsgQoS MsgId# { SequenceId: 1 MsgId: 8 } Cost# 90496 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 9 }}}} from# [2000000:1:0:3:0] Marker# BPP01 2025-06-25T14:32:15.417032Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_put_impl.cpp:72: [b6b2c6548553d7a5] Result# TEvPutResult {Id# [72057594037932033:2:8:0:0:1333:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.999479} GroupId# 33554432 Marker# BPP12 2025-06-25T14:32:15.417104Z node 1 :BS_PROXY_PUT INFO: dsproxy_put.cpp:486: [b6b2c6548553d7a5] SendReply putResult# TEvPutResult {Id# [72057594037932033:2:8:0:0:1333:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.999479} ResponsesSent# 0 PutImpl.Blobs.size# 1 Last# true Marker# BPP21 2025-06-25T14:32:15.417279Z node 1 :BS_PROXY_PUT DEBUG: {BPP72@dsproxy_put.cpp:470} Query history GroupId# 33554432 HandleClass# TabletLog Tactic# MinLatency History# THistory { Entries# [ TEvVPut{ TimestampMs# 1.113 sample PartId# [72057594037932033:2:8:0:0:1333:3] QueryCount# 1 VDiskId# [2000000:1:0:1:0] NodeId# 1 } TEvVPut{ TimestampMs# 1.113 sample PartId# [72057594037932033:2:8:0:0:1333:2] QueryCount# 1 VDiskId# [2000000:1:0:0:0] NodeId# 1 } TEvVPut{ TimestampMs# 1.113 sample PartId# [72057594037932033:2:8:0:0:1333:1] QueryCount# 1 VDiskId# [2000000:1:0:3:0] NodeId# 1 } TEvVPutResult{ TimestampMs# 3.821 VDiskId# [2000000:1:0:0:0] NodeId# 1 Status# OK } TEvVPutResult{ TimestampMs# 3.96 VDiskId# [2000000:1:0:1:0] NodeId# 1 Status# OK } TEvVPutResult{ TimestampMs# 4.049 VDiskId# [2000000:1:0:3:0] NodeId# 1 Status# OK } ] } 2025-06-25T14:32:15.509270Z node 1 :BS_PROXY_PUT INFO: dsproxy_put.cpp:645: [f913878b3da83702] bootstrap ActorId# [1:533:2498] Group# 33554432 BlobCount# 1 BlobIDs# [[72057594037932033:2:9:0:0:224:0]] HandleClass# TabletLog Tactic# MinLatency RestartCounter# 0 Marker# BPP13 2025-06-25T14:32:15.509431Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [f913878b3da83702] Id# [72057594037932033:2:9:0:0:224:0] restore disk# 0 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-06-25T14:32:15.509478Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [f913878b3da83702] Id# [72057594037932033:2:9:0:0:224:0] restore disk# 1 part# 1 situation# ESituation::Unknown Marker# BPG51 2025-06-25T14:32:15.509512Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [f913878b3da83702] Id# [72057594037932033:2:9:0:0:224:0] restore disk# 2 part# 2 situation# ESituation::Unknown Marker# BPG51 2025-06-25T14:32:15.509541Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [f913878b3da83702] Id# [72057594037932033:2:9:0:0:224:0] restore disk# 3 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-06-25T14:32:15.509569Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [f913878b3da83702] Id# [72057594037932033:2:9:0:0:224:0] restore disk# 3 part# 1 situation# ESituation::Unknown Marker# BPG51 2025-06-25T14:32:15.509597Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [f913878b3da83702] Id# [72057594037932033:2:9:0:0:224:0] restore disk# 3 part# 2 situation# ESituation::Unknown Marker# BPG51 2025-06-25T14:32:15.509637Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:65: [f913878b3da83702] restore Id# [72057594037932033:2:9:0:0:224:0] optimisticReplicas# 3 optimisticState# EBS_FULL Marker# BPG55 2025-06-25T14:32:15.509709Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [f913878b3da83702] partPlacement record partSituation# ESituation::Unknown to# 0 blob Id# [72057594037932033:2:9:0:0:224:1] Marker# BPG33 2025-06-25T14:32:15.509759Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [f913878b3da83702] Sending missing VPut part# 0 to# 0 blob Id# [72057594037932033:2:9:0:0:224:1] Marker# BPG32 2025-06-25T14:32:15.509804Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [f913878b3da83702] partPlacement record partSituation# ESituation::Unknown to# 1 blob Id# [72057594037932033:2:9:0:0:224:2] Marker# BPG33 2025-06-25T14:32:15.509853Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [f913878b3da83702] Sending missing VPut part# 1 to# 1 blob Id# [72057594037932033:2:9:0:0:224:2] Marker# BPG32 2025-06-25T14:32:15.509889Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [f913878b3da83702] partPlacement record partSituation# ESituation::Unknown to# 2 blob Id# [72057594037932033:2:9:0:0:224:3] Marker# BPG33 2025-06-25T14:32:15.509914Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [f913878b3da83702] Sending missing VPut part# 2 to# 2 blob Id# [72057594037932033:2:9:0:0:224:3] Marker# BPG32 2025-06-25T14:32:15.510064Z node 1 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [1:40:2084] NKikimr::TEvBlobStorage::TEvVPut# {ID# [72057594037932033:2:9:0:0:224:3] FDS# 224 HandleClass# TabletLog {MsgQoS ExtQueueId# PutTabletLog} DataSize# 0 Data# } cookie# 0 2025-06-25T14:32:15.510128Z node 1 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [1:61:2105] NKikimr::TEvBlobStorage::TEvVPut# {ID# [72057594037932033:2:9:0:0:224:2] FDS# 224 HandleClass# TabletLog {MsgQoS ExtQueueId# PutTabletLog} DataSize# 0 Data# } cookie# 0 2025-06-25T14:32:15.510172Z node 1 :BS_PROXY DEBUG: group_sessions.h:16 ... PutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 3 }}}} from# [82000002:1:0:0:0] Marker# BPP01 2025-06-25T14:32:18.595191Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_put_impl.cpp:72: [cd65997ea3b51537] Result# TEvPutResult {Id# [1234:2:0:0:0:5:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.999479} GroupId# 2181038082 Marker# BPP12 2025-06-25T14:32:18.595263Z node 2 :BS_PROXY_PUT INFO: dsproxy_put.cpp:486: [cd65997ea3b51537] SendReply putResult# TEvPutResult {Id# [1234:2:0:0:0:5:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.999479} ResponsesSent# 0 PutImpl.Blobs.size# 1 Last# true Marker# BPP21 2025-06-25T14:32:18.595407Z node 2 :BS_PROXY_PUT DEBUG: {BPP72@dsproxy_put.cpp:470} Query history GroupId# 2181038082 HandleClass# TabletLog Tactic# Default History# THistory { Entries# [ TEvVPut{ TimestampMs# 0.543 sample PartId# [1234:2:0:0:0:5:1] QueryCount# 1 VDiskId# [82000002:1:0:0:0] NodeId# 2 } TEvVPutResult{ TimestampMs# 9.875 VDiskId# [82000002:1:0:0:0] NodeId# 2 Status# OK } ] } 2025-06-25T14:32:18.595937Z node 3 :BS_PROXY INFO: dsproxy_state.cpp:157: Group# 2181038082 TEvConfigureProxy received GroupGeneration# IsLimitedKeyless# false Marker# DSP02 2025-06-25T14:32:18.595982Z node 3 :BS_PROXY INFO: dsproxy_state.cpp:57: Group# 2181038082 SetStateUnconfigured Marker# DSP07 2025-06-25T14:32:18.596109Z node 3 :BS_PROXY DEBUG: dsproxy_impl.h:205: Group# 2181038082 HandleEnqueue# TEvBlock {TabletId# 1234 Generation# 3 Deadline# 18446744073709551 IsMonitored# 1} Marker# DSP17 2025-06-25T14:32:18.597414Z node 3 :BS_PROXY INFO: dsproxy_state.cpp:157: Group# 2181038082 TEvConfigureProxy received GroupGeneration# 1 IsLimitedKeyless# true Marker# DSP02 2025-06-25T14:32:18.597475Z node 3 :BS_PROXY NOTICE: dsproxy_state.cpp:305: EnsureMonitoring Group# 2181038082 IsLimitedKeyless# 1 fullIfPossible# 0 Marker# DSP58 2025-06-25T14:32:18.599917Z node 3 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 2181038082 Actor# [3:613:2106] Create Queue# [3:615:2107] targetNodeId# 2 Marker# DSP01 2025-06-25T14:32:18.600092Z node 3 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 2181038082 Actor# [3:613:2106] Create Queue# [3:616:2108] targetNodeId# 2 Marker# DSP01 2025-06-25T14:32:18.600240Z node 3 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 2181038082 Actor# [3:613:2106] Create Queue# [3:617:2109] targetNodeId# 2 Marker# DSP01 2025-06-25T14:32:18.604589Z node 3 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 2181038082 Actor# [3:613:2106] Create Queue# [3:618:2110] targetNodeId# 2 Marker# DSP01 2025-06-25T14:32:18.604784Z node 3 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 2181038082 Actor# [3:613:2106] Create Queue# [3:619:2111] targetNodeId# 2 Marker# DSP01 2025-06-25T14:32:18.604906Z node 3 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 2181038082 Actor# [3:613:2106] Create Queue# [3:620:2112] targetNodeId# 2 Marker# DSP01 2025-06-25T14:32:18.605032Z node 3 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 2181038082 Actor# [3:613:2106] Create Queue# [3:621:2113] targetNodeId# 2 Marker# DSP01 2025-06-25T14:32:18.605068Z node 3 :BS_PROXY INFO: dsproxy_state.cpp:31: Group# 2181038082 SetStateEstablishingSessions Marker# DSP03 2025-06-25T14:32:18.606429Z node 3 :BS_NODE ERROR: {NW19@node_warden_group.cpp:212} error while parsing group GroupId# 2181038082 Err# LifeCyclePhase# KEY_NOT_LOADED Key.Id# "" Key.Version# 0 MainKey.Id# "/home/runner/.ya/build/build_root/yft8/000a11/r3tmp/tmpEDs64u//key.txt" MainKey.Version# 1 GroupKeyNonce# 2181038082 2025-06-25T14:32:18.606793Z node 3 :BS_PROXY DEBUG: dsproxy_state.cpp:220: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 1 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-06-25T14:32:18.607111Z node 3 :BS_PROXY DEBUG: dsproxy_state.cpp:220: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 2 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-06-25T14:32:18.607189Z node 3 :BS_PROXY DEBUG: dsproxy_state.cpp:220: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 3 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-06-25T14:32:18.607398Z node 3 :BS_PROXY DEBUG: dsproxy_state.cpp:220: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 4 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-06-25T14:32:18.607482Z node 3 :BS_PROXY DEBUG: dsproxy_state.cpp:220: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 5 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-06-25T14:32:18.607541Z node 3 :BS_PROXY DEBUG: dsproxy_state.cpp:220: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 6 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-06-25T14:32:18.607594Z node 3 :BS_PROXY DEBUG: dsproxy_state.cpp:220: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 7 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-06-25T14:32:18.607626Z node 3 :BS_PROXY INFO: dsproxy_state.cpp:194: Group# 2181038082 -> StateWork Marker# DSP11 2025-06-25T14:32:18.607670Z node 3 :BS_PROXY INFO: dsproxy_state.cpp:80: Group# 2181038082 SetStateWork Marker# DSP15 2025-06-25T14:32:18.607817Z node 3 :BS_PROXY_BLOCK DEBUG: dsproxy_block.cpp:150: [efc53170c63234c6] bootstrap ActorId# [3:622:2114] Group# 2181038082 TabletId# 1234 Generation# 3 Deadline# 586524-01-19T08:01:49.551615Z RestartCounter# 0 Marker# DSPB05 2025-06-25T14:32:18.607876Z node 3 :BS_PROXY_BLOCK DEBUG: dsproxy_block.cpp:111: [efc53170c63234c6] Sending TEvVBlock Tablet# 1234 Generation# 3 vdiskId# [82000002:1:0:0:0] node# 2 Marker# DSPB03 2025-06-25T14:32:18.608046Z node 3 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [3:615:2107] NKikimr::TEvBlobStorage::TEvVBlock# NKikimrBlobStorage.TEvVBlock TabletId: 1234 Generation: 3 VDiskID { GroupID: 2181038082 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } IssuerGuid: 82889675892339311 MsgQoS { ExtQueueId: PutTabletLog } cookie# 0 2025-06-25T14:32:18.613489Z node 3 :BS_PROXY_BLOCK DEBUG: dsproxy_block.cpp:43: [efc53170c63234c6] Handle TEvVBlockResult status# OK From# [82000002:1:0:0:0] NodeId# 2 Marker# DSPB01 2025-06-25T14:32:18.613568Z node 3 :BS_PROXY_BLOCK DEBUG: dsproxy_block.cpp:100: [efc53170c63234c6] Result# TEvBlockResult {Status# OK} Marker# DSPB04 Sending TEvPut 2025-06-25T14:32:18.613899Z node 3 :BS_PROXY INFO: dsproxy_impl.h:309: Group# 2181038082 HandleError ev# TEvPut {Id# [1234:3:0:0:0:10:0] Size# 10 Deadline# 18446744073709551 HandleClass# TabletLog Tactic# Default} Response# TEvPutResult {Id# [1234:3:0:0:0:10:0] Status# ERROR StatusFlags# { } ErrorReason# "Created as LIMITED without keys. It happens when tenant keys are missing on the node." ApproximateFreeSpaceShare# 0} Marker# DSP31 Sending TEvPut 2025-06-25T14:32:18.614129Z node 3 :BS_PROXY DEBUG: dsproxy_impl.h:309: Group# 2181038082 HandleError ev# TEvPut {Id# [1234:4:0:0:0:10:0] Size# 10 Deadline# 18446744073709551 HandleClass# TabletLog Tactic# Default} Response# TEvPutResult {Id# [1234:4:0:0:0:10:0] Status# ERROR StatusFlags# { } ErrorReason# "Created as LIMITED without keys. It happens when tenant keys are missing on the node." ApproximateFreeSpaceShare# 0} Marker# DSP31 Sending TEvPut 2025-06-25T14:32:18.614456Z node 2 :BS_PROXY_PUT INFO: dsproxy_put.cpp:645: [c85e1a21dcb31b54] bootstrap ActorId# [2:623:2514] Group# 2181038082 BlobCount# 1 BlobIDs# [[1234:2:0:0:0:11:0]] HandleClass# TabletLog Tactic# Default RestartCounter# 0 Marker# BPP13 2025-06-25T14:32:18.614590Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [c85e1a21dcb31b54] Id# [1234:2:0:0:0:11:0] restore disk# 0 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-06-25T14:32:18.614647Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:65: [c85e1a21dcb31b54] restore Id# [1234:2:0:0:0:11:0] optimisticReplicas# 1 optimisticState# EBS_FULL Marker# BPG55 2025-06-25T14:32:18.614705Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [c85e1a21dcb31b54] partPlacement record partSituation# ESituation::Unknown to# 0 blob Id# [1234:2:0:0:0:11:1] Marker# BPG33 2025-06-25T14:32:18.614748Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [c85e1a21dcb31b54] Sending missing VPut part# 0 to# 0 blob Id# [1234:2:0:0:0:11:1] Marker# BPG32 2025-06-25T14:32:18.614877Z node 2 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [2:602:2504] NKikimr::TEvBlobStorage::TEvVPut# {ID# [1234:2:0:0:0:11:1] FDS# 11 HandleClass# TabletLog {MsgQoS ExtQueueId# PutTabletLog} DataSize# 0 Data# } cookie# 0 2025-06-25T14:32:18.615087Z node 2 :BS_VDISK_PUT ERROR: blobstorage_skeleton.cpp:569: PDiskId# 1000 VDISK[82000002:_:0:0:0]: (2181038082) TEvVPut: failed to pass the Hull check; id# [1234:2:0:0:0:11:1] status# {Status# BLOCKED} Marker# BSVS03 2025-06-25T14:32:18.615381Z node 2 :BS_PROXY_PUT INFO: dsproxy_put.cpp:260: [c85e1a21dcb31b54] received {EvVPutResult Status# BLOCKED ErrorReason# "blocked" ID# [1234:2:0:0:0:11:1] {MsgQoS MsgId# { SequenceId: 1 MsgId: 3 } Cost# 80086 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 4 }}}} from# [82000002:1:0:0:0] Marker# BPP01 2025-06-25T14:32:18.615480Z node 2 :BS_PROXY_PUT ERROR: dsproxy_put_impl.cpp:72: [c85e1a21dcb31b54] Result# TEvPutResult {Id# [1234:2:0:0:0:11:0] Status# BLOCKED StatusFlags# { } ErrorReason# "Got VPutResult status# BLOCKED from VDiskId# [82000002:1:0:0:0]" ApproximateFreeSpaceShare# 0} GroupId# 2181038082 Marker# BPP12 2025-06-25T14:32:18.615544Z node 2 :BS_PROXY_PUT NOTICE: dsproxy_put.cpp:486: [c85e1a21dcb31b54] SendReply putResult# TEvPutResult {Id# [1234:2:0:0:0:11:0] Status# BLOCKED StatusFlags# { } ErrorReason# "Got VPutResult status# BLOCKED from VDiskId# [82000002:1:0:0:0]" ApproximateFreeSpaceShare# 0} ResponsesSent# 0 PutImpl.Blobs.size# 1 Last# true Marker# BPP21 2025-06-25T14:32:18.615653Z node 2 :BS_PROXY_PUT DEBUG: {BPP72@dsproxy_put.cpp:470} Query history GroupId# 2181038082 HandleClass# TabletLog Tactic# Default History# THistory { Entries# [ TEvVPut{ TimestampMs# 0.587 sample PartId# [1234:2:0:0:0:11:1] QueryCount# 1 VDiskId# [82000002:1:0:0:0] NodeId# 2 } ] } 2025-06-25T14:32:18.616064Z node 3 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [3:615:2107] NKikimr::TEvBlobStorage::TEvVCollectGarbage# {TEvVCollectGarbage for [tablet:gen:cnt:channel]=[1234:4294967295:4294967295:0] collect=[4294967295:4294967295] cookie# 0 >> TReplicaTest::CommitWithStaleGeneration [GOOD] >> TReplicaTest::Delete >> TopicAutoscaling::ControlPlane_CDC_Enable [GOOD] >> TopicAutoscaling::MidOfRange [GOOD] >> TReplicaTest::AckNotificationsUponPathRecreation [GOOD] >> CommitOffset::Commit_WithoutSession_ToPastParentPartition [GOOD] >> CommitOffset::Commit_WithSession_ParentNotFinished_SameSession |78.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/kesus/proxy/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_replica/unittest >> TReplicaTest::IdempotencyUpdatesVariant2 [GOOD] Test command err: 2025-06-25T14:32:18.961387Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [1:7:2054] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 2 }: sender# [1:8:2055] 2025-06-25T14:32:18.961468Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [1:7:2054] Successful handshake: owner# 1, generation# 2 2025-06-25T14:32:18.961538Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [1:7:2054] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 1 }: sender# [1:8:2055] 2025-06-25T14:32:18.961569Z node 1 :SCHEME_BOARD_REPLICA ERROR: replica.cpp:763: [1:7:2054] Reject handshake from stale populator: sender# [1:8:2055], owner# 1, generation# 1, pending generation# 2 2025-06-25T14:32:19.344272Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [2:7:2054] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 1 }: sender# [2:8:2055] 2025-06-25T14:32:19.348510Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [2:7:2054] Successful handshake: owner# 1, generation# 1 2025-06-25T14:32:19.348716Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [2:7:2054] Handle NKikimrSchemeBoard.TEvSubscribe { PathId: [OwnerId: 1, LocalPathId: 1] DomainOwnerId: 0 }: sender# [2:9:2056] 2025-06-25T14:32:19.348761Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [2:7:2054] Upsert description: path# [OwnerId: 1, LocalPathId: 1] 2025-06-25T14:32:19.348867Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [2:7:2054] Subscribe: subscriber# [2:9:2056], path# [OwnerId: 1, LocalPathId: 1], domainOwnerId# 0, capabilities# 2025-06-25T14:32:19.349018Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [2:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [2:8:2055], cookie# 0, event size# 72 2025-06-25T14:32:19.349055Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [2:7:2054] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], deletion# false 2025-06-25T14:32:19.367966Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:550: [2:7:2054] Upsert description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], pathDescription# {Status StatusSuccess, Path path, PathId [OwnerId: 1, LocalPathId: 1], PathVersion 1, SubdomainPathId , PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 30} 2025-06-25T14:32:19.368154Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [2:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [2:8:2055], cookie# 0, event size# 40 2025-06-25T14:32:19.368191Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [2:7:2054] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], deletion# true 2025-06-25T14:32:19.368221Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:575: [2:7:2054] Delete description: path# path, pathId# [OwnerId: 1, LocalPathId: 1] 2025-06-25T14:32:19.368303Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [2:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [2:8:2055], cookie# 0, event size# 72 2025-06-25T14:32:19.368372Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [2:7:2054] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], deletion# false 2025-06-25T14:32:19.368418Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:834: [2:7:2054] Path was explicitly deleted, ignoring: path# path, pathId# [OwnerId: 1, LocalPathId: 1] 2025-06-25T14:32:19.368516Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [2:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [2:8:2055], cookie# 0, event size# 72 2025-06-25T14:32:19.368563Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [2:7:2054] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 2], deletion# false 2025-06-25T14:32:19.368620Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:550: [2:7:2054] Upsert description: path# path, pathId# [OwnerId: 1, LocalPathId: 2], pathDescription# {Status StatusSuccess, Path path, PathId [OwnerId: 1, LocalPathId: 2], PathVersion 1, SubdomainPathId , PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 30} 2025-06-25T14:32:19.368719Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [2:7:2054] Handle NKikimrSchemeBoard.TEvSubscribe { PathId: [OwnerId: 1, LocalPathId: 2] DomainOwnerId: 0 }: sender# [2:10:2057] 2025-06-25T14:32:19.368773Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [2:7:2054] Subscribe: subscriber# [2:10:2057], path# [OwnerId: 1, LocalPathId: 2], domainOwnerId# 0, capabilities# 2025-06-25T14:32:19.707920Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [3:7:2054] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 1 }: sender# [3:8:2055] 2025-06-25T14:32:19.707977Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [3:7:2054] Successful handshake: owner# 1, generation# 1 2025-06-25T14:32:19.708086Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [3:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [3:8:2055], cookie# 0, event size# 72 2025-06-25T14:32:19.708137Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [3:7:2054] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], deletion# false 2025-06-25T14:32:19.708207Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:550: [3:7:2054] Upsert description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], pathDescription# {Status StatusSuccess, Path path, PathId [OwnerId: 1, LocalPathId: 1], PathVersion 1, SubdomainPathId , PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 30} 2025-06-25T14:32:19.708289Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [3:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [3:8:2055], cookie# 0, event size# 72 2025-06-25T14:32:19.708449Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [3:7:2054] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 2], deletion# false 2025-06-25T14:32:19.716545Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:575: [3:7:2054] Delete description: path# path, pathId# [OwnerId: 1, LocalPathId: 1] 2025-06-25T14:32:19.716631Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:550: [3:7:2054] Upsert description: path# path, pathId# [OwnerId: 1, LocalPathId: 2], pathDescription# {Status StatusSuccess, Path path, PathId [OwnerId: 1, LocalPathId: 2], PathVersion 1, SubdomainPathId , PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 30} 2025-06-25T14:32:19.716762Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [3:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [3:8:2055], cookie# 0, event size# 40 2025-06-25T14:32:19.716803Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [3:7:2054] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 2], deletion# true 2025-06-25T14:32:19.716835Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:575: [3:7:2054] Delete description: path# path, pathId# [OwnerId: 1, LocalPathId: 2] 2025-06-25T14:32:19.716900Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [3:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [3:8:2055], cookie# 0, event size# 72 2025-06-25T14:32:19.716928Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [3:7:2054] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], deletion# false 2025-06-25T14:32:19.716961Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:834: [3:7:2054] Path was explicitly deleted, ignoring: path# path, pathId# [OwnerId: 1, LocalPathId: 1] 2025-06-25T14:32:19.717020Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [3:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [3:8:2055], cookie# 0, event size# 72 2025-06-25T14:32:19.717051Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [3:7:2054] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 2], deletion# false 2025-06-25T14:32:19.717078Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:834: [3:7:2054] Path was explicitly deleted, ignoring: path# path, pathId# [OwnerId: 1, LocalPathId: 2] >> TReplicaTest::Delete [GOOD] >> TKeyValueTracingTest::WriteHuge >> TKeyValueTracingTest::ReadHuge >> TKeyValueTracingTest::WriteSmall |78.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/keyvalue/ut_trace/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_replica/unittest >> TReplicaTest::AckNotificationsUponPathRecreation [GOOD] Test command err: 2025-06-25T14:32:19.813621Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [1:7:2054] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 1 }: sender# [1:8:2055] 2025-06-25T14:32:19.813691Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [1:7:2054] Successful handshake: owner# 1, generation# 1 2025-06-25T14:32:19.813780Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:958: [1:7:2054] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 1 Generation: 1 }: sender# [1:8:2055] 2025-06-25T14:32:19.813814Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:985: [1:7:2054] Commit generation: owner# 1, generation# 1 2025-06-25T14:32:19.813858Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [1:7:2054] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 2 }: sender# [1:8:2055] 2025-06-25T14:32:19.813892Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [1:7:2054] Successful handshake: owner# 1, generation# 2 2025-06-25T14:32:19.929152Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [2:7:2054] Handle NKikimrSchemeBoard.TEvSubscribe { Path: path DomainOwnerId: 0 }: sender# [2:9:2056] 2025-06-25T14:32:19.929204Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [2:7:2054] Upsert description: path# path 2025-06-25T14:32:19.929334Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [2:7:2054] Subscribe: subscriber# [2:9:2056], path# path, domainOwnerId# 0, capabilities# AckNotifications: true 2025-06-25T14:32:19.929431Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [2:7:2054] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 1 }: sender# [2:8:2055] 2025-06-25T14:32:19.929471Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [2:7:2054] Successful handshake: owner# 1, generation# 1 2025-06-25T14:32:19.929573Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [2:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [2:8:2055], cookie# 0, event size# 72 2025-06-25T14:32:19.929618Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [2:7:2054] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], deletion# false 2025-06-25T14:32:19.936613Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:550: [2:7:2054] Upsert description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], pathDescription# {Status StatusSuccess, Path path, PathId [OwnerId: 1, LocalPathId: 1], PathVersion 1, SubdomainPathId , PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 30} 2025-06-25T14:32:19.936785Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [2:7:2054] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [2:9:2056] 2025-06-25T14:32:19.936879Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [2:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [2:8:2055], cookie# 0, event size# 40 2025-06-25T14:32:19.936918Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [2:7:2054] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], deletion# true 2025-06-25T14:32:19.936953Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:575: [2:7:2054] Delete description: path# path, pathId# [OwnerId: 1, LocalPathId: 1] 2025-06-25T14:32:19.937014Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [2:7:2054] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 1 }: sender# [2:9:2056] 2025-06-25T14:32:20.222780Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [3:7:2054] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 1 }: sender# [3:8:2055] 2025-06-25T14:32:20.222844Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [3:7:2054] Successful handshake: owner# 1, generation# 1 2025-06-25T14:32:20.222949Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [3:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [3:8:2055], cookie# 0, event size# 72 2025-06-25T14:32:20.222997Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [3:7:2054] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], deletion# false 2025-06-25T14:32:20.223070Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:550: [3:7:2054] Upsert description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], pathDescription# {Status StatusSuccess, Path path, PathId [OwnerId: 1, LocalPathId: 1], PathVersion 2, SubdomainPathId , PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 30} 2025-06-25T14:32:20.223162Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [3:7:2054] Handle NKikimrSchemeBoard.TEvSubscribe { Path: path DomainOwnerId: 0 }: sender# [3:9:2056] 2025-06-25T14:32:20.223236Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [3:7:2054] Subscribe: subscriber# [3:9:2056], path# path, domainOwnerId# 0, capabilities# AckNotifications: true 2025-06-25T14:32:20.223334Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [3:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [3:8:2055], cookie# 0, event size# 72 2025-06-25T14:32:20.223375Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [3:7:2054] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], deletion# false 2025-06-25T14:32:20.223436Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:550: [3:7:2054] Upsert description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], pathDescription# {Status StatusSuccess, Path path, PathId [OwnerId: 1, LocalPathId: 1], PathVersion 3, SubdomainPathId , PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 30} 2025-06-25T14:32:20.223633Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [3:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [3:8:2055], cookie# 0, event size# 72 2025-06-25T14:32:20.223691Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [3:7:2054] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 2], deletion# false 2025-06-25T14:32:20.224136Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:575: [3:7:2054] Delete description: path# path, pathId# [OwnerId: 1, LocalPathId: 1] 2025-06-25T14:32:20.224228Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [3:7:2054] Upsert description: path# path 2025-06-25T14:32:20.224295Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [3:7:2054] Subscribe: subscriber# [3:9:2056], path# path, domainOwnerId# 0, capabilities# AckNotifications: true 2025-06-25T14:32:20.224377Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:550: [3:7:2054] Upsert description: path# path, pathId# [OwnerId: 1, LocalPathId: 2], pathDescription# {Status StatusSuccess, Path path, PathId [OwnerId: 1, LocalPathId: 2], PathVersion 1, SubdomainPathId , PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 30} 2025-06-25T14:32:20.224476Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [3:7:2054] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 3 }: sender# [3:9:2056] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_replica/unittest >> TReplicaTest::Delete [GOOD] Test command err: 2025-06-25T14:32:20.116382Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:958: [1:7:2054] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 1 Generation: 1 }: sender# [1:8:2055] 2025-06-25T14:32:20.116453Z node 1 :SCHEME_BOARD_REPLICA ERROR: replica.cpp:969: [1:7:2054] Reject commit from unknown populator: sender# [1:8:2055], owner# 1, generation# 1 2025-06-25T14:32:20.116547Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [1:7:2054] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 1 }: sender# [1:8:2055] 2025-06-25T14:32:20.116585Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [1:7:2054] Successful handshake: owner# 1, generation# 1 2025-06-25T14:32:20.276141Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [2:7:2054] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 0 }: sender# [2:8:2055] 2025-06-25T14:32:20.276207Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [2:7:2054] Successful handshake: owner# 1, generation# 0 2025-06-25T14:32:20.276325Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [2:7:2054] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 1 }: sender# [2:9:2056] 2025-06-25T14:32:20.276357Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [2:7:2054] Successful handshake: owner# 1, generation# 1 2025-06-25T14:32:20.276422Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:958: [2:7:2054] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 1 Generation: 1 }: sender# [2:9:2056] 2025-06-25T14:32:20.276464Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:985: [2:7:2054] Commit generation: owner# 1, generation# 1 2025-06-25T14:32:20.276512Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:958: [2:7:2054] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 1 Generation: 0 }: sender# [2:8:2055] 2025-06-25T14:32:20.276556Z node 2 :SCHEME_BOARD_REPLICA ERROR: replica.cpp:979: [2:7:2054] Reject commit from stale populator: sender# [2:8:2055], owner# 1, generation# 0, pending generation# 1 2025-06-25T14:32:20.276605Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [2:7:2054] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 2 }: sender# [2:8:2055] 2025-06-25T14:32:20.276637Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [2:7:2054] Successful handshake: owner# 1, generation# 2 2025-06-25T14:32:20.397499Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [3:7:2054] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 1 }: sender# [3:8:2055] 2025-06-25T14:32:20.397553Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [3:7:2054] Successful handshake: owner# 1, generation# 1 2025-06-25T14:32:20.397669Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [3:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [3:8:2055], cookie# 0, event size# 72 2025-06-25T14:32:20.397715Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [3:7:2054] Update description: path# path, pathId# [OwnerId: 42, LocalPathId: 1], deletion# false 2025-06-25T14:32:20.406996Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:550: [3:7:2054] Upsert description: path# path, pathId# [OwnerId: 42, LocalPathId: 1], pathDescription# {Status StatusSuccess, Path path, PathId [OwnerId: 42, LocalPathId: 1], PathVersion 1, SubdomainPathId , PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 30} 2025-06-25T14:32:20.407170Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [3:7:2054] Handle NKikimrSchemeBoard.TEvSubscribe { Path: path DomainOwnerId: 0 }: sender# [3:9:2056] 2025-06-25T14:32:20.407245Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [3:7:2054] Subscribe: subscriber# [3:9:2056], path# path, domainOwnerId# 0, capabilities# 2025-06-25T14:32:20.407378Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [3:7:2054] Handle NKikimrSchemeBoard.TEvSubscribe { PathId: [OwnerId: 42, LocalPathId: 1] DomainOwnerId: 0 }: sender# [3:10:2057] 2025-06-25T14:32:20.407424Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [3:7:2054] Subscribe: subscriber# [3:10:2057], path# [OwnerId: 42, LocalPathId: 1], domainOwnerId# 0, capabilities# 2025-06-25T14:32:20.407538Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [3:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [3:8:2055], cookie# 0, event size# 40 2025-06-25T14:32:20.407569Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [3:7:2054] Update description: path# path, pathId# [OwnerId: 42, LocalPathId: 1], deletion# true 2025-06-25T14:32:20.407600Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:575: [3:7:2054] Delete description: path# path, pathId# [OwnerId: 42, LocalPathId: 1] 2025-06-25T14:32:20.407718Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [3:7:2054] Handle NKikimrSchemeBoard.TEvSubscribe { Path: path DomainOwnerId: 0 }: sender# [3:11:2058] 2025-06-25T14:32:20.407754Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [3:7:2054] Subscribe: subscriber# [3:11:2058], path# path, domainOwnerId# 0, capabilities# 2025-06-25T14:32:20.407850Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [3:7:2054] Handle NKikimrSchemeBoard.TEvSubscribe { PathId: [OwnerId: 42, LocalPathId: 1] DomainOwnerId: 0 }: sender# [3:12:2059] 2025-06-25T14:32:20.407892Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [3:7:2054] Subscribe: subscriber# [3:12:2059], path# [OwnerId: 42, LocalPathId: 1], domainOwnerId# 0, capabilities# 2025-06-25T14:32:20.407974Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [3:7:2054] Handle NKikimrSchemeBoard.TEvSubscribe { Path: path DomainOwnerId: 0 }: sender# [3:13:2060] 2025-06-25T14:32:20.408005Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [3:7:2054] Subscribe: subscriber# [3:13:2060], path# path, domainOwnerId# 0, capabilities# >> KqpIndexes::CreateTableWithExplicitAsyncIndexSQL [GOOD] >> PQCountersSimple::SupportivePartitionCountersPersist [GOOD] |78.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/kesus/proxy/ut/unittest >> TPQTest::TestReadSubscription [GOOD] >> TPQTest::TestReadAndDeleteConsumer >> KqpUniqueIndex::InsertNullInComplexFk [GOOD] >> KqpUniqueIndex::InsertNullInComplexFkDuplicate >> TBoardSubscriberTest::ManySubscribersManyPublisher >> DataShardVolatile::VolatileCommitOnBlobStorageFailure+UseSink [GOOD] >> DataShardVolatile::VolatileCommitOnBlobStorageFailure-UseSink |78.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/base/ut_board_subscriber/unittest >> TKeyValueTracingTest::ReadHuge [FAIL] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpIndexes::CreateTableWithExplicitAsyncIndexSQL [GOOD] Test command err: Trying to start YDB, gRPC: 22833, MsgBus: 18055 2025-06-25T14:31:57.872849Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519894674683757787:2228];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:31:57.872898Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001c7c/r3tmp/tmpXstkEF/pdisk_1.dat 2025-06-25T14:31:58.326945Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:31:58.327035Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:31:58.346232Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:31:58.346844Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 22833, node 1 2025-06-25T14:31:58.498298Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:31:58.498323Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:31:58.498330Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:31:58.498492Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:18055 2025-06-25T14:31:58.884937Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:18055 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:31:59.342458Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:31:59.365380Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:31:59.386847Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:31:59.535504Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:31:59.736149Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:31:59.804361Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:32:01.818754Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894691863628418:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:01.818882Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:02.191079Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:02.264133Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:02.312773Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:02.362302Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:02.419608Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:02.467954Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:02.530200Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:02.660836Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894696158596371:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:02.660931Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:02.661309Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894696158596376:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:02.667079Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:32:02.688753Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519894696158596378:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:32:02.757933Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519894696158596431:3419] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:32:02.898814Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519894674683757787:2228];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:32:02.898869Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:32:04.095315Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:04.697055Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPi ... ccessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001c7c/r3tmp/tmpLBicgl/pdisk_1.dat 2025-06-25T14:32:14.286500Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:32:14.286588Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:32:14.291829Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:32:14.297331Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 3254, node 3 2025-06-25T14:32:14.444785Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:32:14.444822Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:32:14.444832Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:32:14.444970Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14251 TClient is connected to server localhost:14251 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:32:15.035058Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:32:15.049521Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:32:15.073223Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:32:15.119667Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:32:15.157439Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:32:15.328575Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:32:15.423244Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:32:17.794407Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519894761160477022:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:17.794499Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:17.852059Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:17.884407Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:17.935075Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:18.024435Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:18.098665Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:18.176268Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:18.248721Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:18.312281Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519894765455444988:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:18.312379Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:18.312457Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519894765455444993:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:18.315758Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:32:18.324700Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519894765455444995:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:32:18.398962Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519894765455445046:3421] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:32:19.024511Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519894748275573548:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:32:19.024601Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:32:19.538958Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:19.686275Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519894769750412882:3747] txid# 281474976710673, issues: { message: "Check failed: path: \'/Root/TestTable\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 17], type: EPathTypeTable, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:32:20.952668Z node 3 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/ut_with_sdk/unittest >> TopicAutoscaling::MidOfRange [GOOD] Test command err: 2025-06-25T14:30:48.369992Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519894381108623739:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:30:48.370063Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0018d5/r3tmp/tmpzIM5YH/pdisk_1.dat 2025-06-25T14:30:48.672992Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-25T14:30:48.945926Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:30:48.946031Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:30:48.956841Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:30:48.958412Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:30:48.972479Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519894381108623716:2080] 1750861848367098 != 1750861848367101 TServer::EnableGrpc on GrpcPort 61872, node 1 2025-06-25T14:30:49.198933Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/yft8/0018d5/r3tmp/yandex4B9yXV.tmp 2025-06-25T14:30:49.198971Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/yft8/0018d5/r3tmp/yandex4B9yXV.tmp 2025-06-25T14:30:49.199186Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/yft8/0018d5/r3tmp/yandex4B9yXV.tmp 2025-06-25T14:30:49.199327Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:30:49.266134Z INFO: TTestServer started on Port 16653 GrpcPort 61872 2025-06-25T14:30:49.414126Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:16653 PQClient connected to localhost:61872 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:30:49.614512Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:30:49.650176Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:30:49.671827Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:30:49.825065Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710660, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:30:51.873250Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894393993526390:2299], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:51.873349Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:51.876383Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894393993526411:2303], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:51.882899Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:30:51.899049Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519894393993526413:2304], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710662 completed, doublechecking } 2025-06-25T14:30:52.137670Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519894393993526477:2442] txid# 281474976710663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:30:52.170745Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:52.245073Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:52.309588Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519894398288493782:2310], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:5:17: Error: At function: KiReadTable!
:5:17: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Versions]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:30:52.311694Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=OTQzYjA1YjEtNjIzZTJlZi01NTE0Zjg1NC01MDQ4OGY3, ActorId: [1:7519894393993526388:2298], ActorState: ExecuteState, TraceId: 01jykqzb697ppdfkpakn6pdteq, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:30:52.315495Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 5 column: 17 } message: "At function: KiReadTable!" end_position { row: 5 column: 17 } severity: 1 issues { position { row: 5 column: 17 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Versions]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 5 column: 17 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-25T14:30:52.415361Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); === CheckClustersList. Subcribe to ClusterTracker from [1:7519894398288494077:2620] 2025-06-25T14:30:53.370473Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519894381108623739:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:30:53.370538Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; === CheckClustersList. Ok 2025-06-25T14:30:58.932532Z :TopicSplitMerge INFO: TTopicSdkTestSetup started 2025-06-25T14:30:58.944087Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:7519894381108624038:2142]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:30:58.944136Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:30:58.944193Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [1:7519894381108624038:2142], Recipient [1:7519894381108624038:2142]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:30:58.944213Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:30:58.954733Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:132: new create topic request 2025-06-25T14:30:58.955773Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877761, Sender [1:7519894424058298052:2699], Recipient [1:7519894381108624038:2142]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:30:58.955791Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5052: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T14:30:58.955811Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5837: Pipe server connected, at tablet ... -06-25T14:32:19.325023Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3683: [PQ: 72075186224037893] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-06-25T14:32:19.325085Z node 6 :PERSQUEUE TRACE: pq_impl.cpp:5307: HandleHook, received event# 270794756, Sender [6:7519894770378838972:2432], Recipient [6:7519894770378838972:2432]: NKikimr::TEvKeyValue::TEvCollect 2025-06-25T14:32:19.325233Z node 6 :PERSQUEUE TRACE: pq_impl.cpp:5307: HandleHook, received event# 270794752, Sender [6:7519894770378838972:2432], Recipient [6:7519894770378838972:2432]: NKikimrClient.TKeyValueRequest Cookie: 5 CmdDeleteRange { Range { From: "tx_00000281474976710673" IncludeFrom: true To: "tx_00000281474976710673" IncludeTo: true } } CmdWrite { Key: "_txinfo" Value: "\020\373\204\204\274\3722\030\221\200\200\200\200\200@(\240\215\0060\373\204\204\274\37228\221\200\200\200\200\200@" StorageChannel: INLINE } 2025-06-25T14:32:19.325364Z node 6 :PERSQUEUE TRACE: pq_impl.cpp:5307: HandleHook, received event# 270794753, Sender [6:7519894770378839092:2432], Recipient [6:7519894770378838972:2432]: NKikimr::TEvKeyValue::TEvIntermediate 2025-06-25T14:32:19.325812Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 14 Version: 4 PathOwnerId: 72057594046644480, cookie: 281474976710673 2025-06-25T14:32:19.325829Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046644480, txId: 281474976710673 2025-06-25T14:32:19.325846Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046644480, txId: 281474976710673, pathId: [OwnerId: 72057594046644480, LocalPathId: 14], version: 4 2025-06-25T14:32:19.325863Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046644480, LocalPathId: 14] was 3 2025-06-25T14:32:19.325934Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-25T14:32:19.326085Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 274137603, Sender [6:7519894710249295853:2243], Recipient [6:7519894710249295702:2154]: NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046644480 Generation: 2 PathId: [OwnerId: 72057594046644480, LocalPathId: 15] Version: 2 } 2025-06-25T14:32:19.326105Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5039: StateWork, processing event NSchemeBoard::NSchemeshardEvents::TEvUpdateAck 2025-06-25T14:32:19.326147Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 15 Version: 2 PathOwnerId: 72057594046644480, cookie: 281474976710673 2025-06-25T14:32:19.326200Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 15 Version: 2 PathOwnerId: 72057594046644480, cookie: 281474976710673 2025-06-25T14:32:19.326210Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046644480, txId: 281474976710673 2025-06-25T14:32:19.326222Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046644480, txId: 281474976710673, pathId: [OwnerId: 72057594046644480, LocalPathId: 15], version: 2 2025-06-25T14:32:19.326235Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046644480, LocalPathId: 15] was 4 2025-06-25T14:32:19.326276Z node 6 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046644480, txId: 281474976710673, subscribers: 1 2025-06-25T14:32:19.326293Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:212: TTxAckPublishToSchemeBoard Notify send TEvNotifyTxCompletionResult, at schemeshard: 72057594046644480, to actorId: [6:7519894770378838945:2430] 2025-06-25T14:32:19.326319Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-25T14:32:19.327290Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:32:19.327463Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046644480, cookie: 281474976710673 2025-06-25T14:32:19.327477Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:32:19.327504Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046644480, cookie: 281474976710673 2025-06-25T14:32:19.327510Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:32:19.327566Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:632: Send to actor: [6:7519894770378838945:2430] msg type: 271124998 msg: NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976710673 at schemeshard: 72057594046644480 2025-06-25T14:32:19.327796Z node 6 :PERSQUEUE TRACE: pq_impl.cpp:5307: HandleHook, received event# 270794760, Sender [6:7519894770378839091:2442], Recipient [6:7519894770378838972:2432]: NKikimr::TEvKeyValue::TEvCompleteGC 2025-06-25T14:32:19.328068Z node 6 :PERSQUEUE TRACE: pq_impl.cpp:5307: HandleHook, received event# 270795264, Sender [6:7519894770378838972:2432], Recipient [6:7519894770378838972:2432]: NKikimrClient.TResponse Status: 1 Cookie: 5 DeleteRangeResult { Status: 0 } WriteResult { Status: 0 StatusFlags: 1 } 2025-06-25T14:32:19.328086Z node 6 :PERSQUEUE TRACE: pq_impl.cpp:5317: HandleHook, processing event TEvKeyValue::TEvResponse 2025-06-25T14:32:19.328104Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:1241: [PQ: 72075186224037893] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-25T14:32:19.328128Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4353: [PQ: 72075186224037893] Try execute txs with state DELETING 2025-06-25T14:32:19.328144Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4398: [PQ: 72075186224037893] TxId 281474976710673, State DELETING 2025-06-25T14:32:19.328169Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4610: [PQ: 72075186224037893] delete TxId 281474976710673 2025-06-25T14:32:19.332634Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877764, Sender [6:7519894770378838959:2756], Recipient [6:7519894710249295702:2154]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-25T14:32:19.332660Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5053: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-25T14:32:19.332674Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5885: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-25T14:32:19.332940Z node 6 :PERSQUEUE TRACE: pq_impl.cpp:5307: HandleHook, received event# 270794756, Sender [6:7519894770378838972:2432], Recipient [6:7519894770378838972:2432]: NKikimr::TEvKeyValue::TEvCollect 2025-06-25T14:32:19.333112Z node 6 :PERSQUEUE TRACE: pq_impl.cpp:5307: HandleHook, received event# 270794760, Sender [6:7519894770378839098:2443], Recipient [6:7519894770378838972:2432]: NKikimr::TEvKeyValue::TEvCompleteGC 2025-06-25T14:32:19.354177Z node 6 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:140: new alter topic request 2025-06-25T14:32:19.400545Z node 6 :PERSQUEUE TRACE: partition.h:582: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [6:7519894770378838972:2432], Partition 0, Sender [0:0:0], Recipient [6:7519894770378839057:2438], Cookie: 0 2025-06-25T14:32:19.400635Z node 6 :PERSQUEUE TRACE: partition.h:584: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [6:7519894770378839057:2438]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-25T14:32:19.400674Z node 6 :PERSQUEUE TRACE: partition.h:610: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-25T14:32:19.400743Z node 6 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037893, Partition: 0, State: StateIdle] Have 0 items to delete old stuff 2025-06-25T14:32:19.400829Z node 6 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037893, Partition: 0, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-25T14:32:19.400861Z node 6 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037893, Partition: 0, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-25T14:32:19.400896Z node 6 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037893, Partition: 0, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-25T14:32:19.501659Z node 6 :PERSQUEUE TRACE: partition.h:582: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [6:7519894770378838972:2432], Partition 0, Sender [0:0:0], Recipient [6:7519894770378839057:2438], Cookie: 0 2025-06-25T14:32:19.501736Z node 6 :PERSQUEUE TRACE: partition.h:584: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [6:7519894770378839057:2438]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-25T14:32:19.501763Z node 6 :PERSQUEUE TRACE: partition.h:610: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-25T14:32:19.501812Z node 6 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037893, Partition: 0, State: StateIdle] Have 0 items to delete old stuff 2025-06-25T14:32:19.501887Z node 6 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037893, Partition: 0, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-25T14:32:19.501911Z node 6 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037893, Partition: 0, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-25T14:32:19.501941Z node 6 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037893, Partition: 0, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-25T14:32:19.604690Z node 6 :PERSQUEUE TRACE: partition.h:582: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [6:7519894770378838972:2432], Partition 0, Sender [0:0:0], Recipient [6:7519894770378839057:2438], Cookie: 0 2025-06-25T14:32:19.604790Z node 6 :PERSQUEUE TRACE: partition.h:584: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [6:7519894770378839057:2438]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-25T14:32:19.604826Z node 6 :PERSQUEUE TRACE: partition.h:610: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-25T14:32:19.604889Z node 6 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037893, Partition: 0, State: StateIdle] Have 0 items to delete old stuff 2025-06-25T14:32:19.604986Z node 6 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037893, Partition: 0, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-25T14:32:19.605016Z node 6 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037893, Partition: 0, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-25T14:32:19.605048Z node 6 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037893, Partition: 0, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 >> TKeyValueTracingTest::ReadSmall >> TExternalDataSourceTest::ReplaceExternalDataStoreShouldFailIfEntityOfAnotherTypeWithSameNameExists >> TestYmqHttpProxy::TestSendMessageEmptyQueueUrl >> TKeyValueTracingTest::WriteHuge [FAIL] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/unittest >> PQCountersSimple::SupportivePartitionCountersPersist [GOOD] Test command err: 2025-06-25T14:31:16.510426Z :HappyWay INFO: Random seed for debugging is 1750861876510385 2025-06-25T14:31:16.824679Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519894499930036529:2145];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:31:16.824756Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:31:16.855250Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519894498829216353:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:31:16.855357Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000ba6/r3tmp/tmpwQGIDb/pdisk_1.dat 2025-06-25T14:31:17.050184Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-25T14:31:17.059058Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-25T14:31:17.237907Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:31:17.238015Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:31:17.238945Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:31:17.246362Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T14:31:17.248360Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:31:17.273243Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:31:17.273306Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:31:17.277961Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 1083, node 1 2025-06-25T14:31:17.338089Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/yft8/000ba6/r3tmp/yandexoEkme6.tmp 2025-06-25T14:31:17.338118Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/yft8/000ba6/r3tmp/yandexoEkme6.tmp 2025-06-25T14:31:17.338317Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/yft8/000ba6/r3tmp/yandexoEkme6.tmp 2025-06-25T14:31:17.338465Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:31:17.393214Z INFO: TTestServer started on Port 5182 GrpcPort 1083 TClient is connected to server localhost:5182 PQClient connected to localhost:1083 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:31:17.700304Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... waiting... waiting... 2025-06-25T14:31:17.835324Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:31:17.868615Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:31:19.683961Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519894511714118525:2272], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:31:19.684006Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519894511714118507:2269], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:31:19.684094Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:31:19.683616Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894512814939314:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:31:19.683721Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:31:19.683870Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894512814939334:2300], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:31:19.688127Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710661:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:31:19.708642Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519894512814939337:2302], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710661 completed, doublechecking } 2025-06-25T14:31:19.717150Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519894511714118530:2125] txid# 281474976715657, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateCreate)" severity: 1 } 2025-06-25T14:31:19.718436Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519894511714118529:2273], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710661 completed, doublechecking } 2025-06-25T14:31:19.718115Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710661, at schemeshard: 72057594046644480 2025-06-25T14:31:19.781665Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519894512814939427:2684] txid# 281474976710662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:31:19.808045Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519894511714118557:2131] txid# 281474976715658, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:31:19.947667Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:31:19.950193Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519894512814939437:2308], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:31:19.950466Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=ZjRjOTVkZWEtZmJkMjA3ZDQtZWEyZTgyZTctY2NjM2MxNA==, ActorId: [1:7519894512814939312:2296], ActorState: ExecuteState, TraceId: 01jykr06c26pqmfejabpe5d4mq, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:31:19.952197Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7519894511714118571:2277], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:31:19.952747Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-25T14:31:19.952397Z node 2 :KQP_SESSION WARN: ... action.cpp:162: [PQ: 72075186224037894, Partition: 1, State: StateIdle] no data for compaction 2025-06-25T14:32:12.380803Z node 5 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037894, Partition: 3, State: StateIdle] no data for compaction 2025-06-25T14:32:12.380972Z node 5 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037894, Partition: 4, State: StateIdle] no data for compaction 2025-06-25T14:32:12.448743Z node 5 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037894, Partition: 2, State: StateIdle] no data for compaction 2025-06-25T14:32:12.448948Z node 5 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037894, Partition: 0, State: StateIdle] no data for compaction 2025-06-25T14:32:13.608682Z node 5 :PERSQUEUE DEBUG: fetch_request_actor.cpp:298: got HasDatainfoResponse 2025-06-25T14:32:13.609050Z node 5 :PERSQUEUE DEBUG: pq_impl.cpp:348: Handle TEvRequest topic: 'rt3.dc1--topic1' requestId: request-id-0-1 2025-06-25T14:32:13.609093Z node 5 :PERSQUEUE DEBUG: pq_impl.cpp:2812: [PQ: 72075186224037894] got client message batch for topic 'rt3.dc1--topic1' partition 1 2025-06-25T14:32:13.609171Z node 5 :PERSQUEUE ERROR: partition_read.cpp:780: [PQ: 72075186224037894, Partition: 1, State: StateIdle] reading from too big offset - topic rt3.dc1--topic1 partition 1 client $without_consumer EndOffset 0 offset 1 2025-06-25T14:32:13.609203Z node 5 :PERSQUEUE DEBUG: event_helpers.cpp:40: tablet 72075186224037894 topic 'rt3.dc1--topic1' partition 1 error: trying to read from future. ReadOffset 1, 0 EndOffset 0 2025-06-25T14:32:13.609238Z node 5 :PERSQUEUE DEBUG: pq_impl.cpp:1434: [PQ: 72075186224037894] Handle TEvPQ::TEvError Cookie 1, Error trying to read from future. ReadOffset 1, 0 EndOffset 0 2025-06-25T14:32:13.609261Z node 5 :PERSQUEUE DEBUG: pq_impl.cpp:402: Answer error topic: 'rt3.dc1--topic1' partition: 1 messageNo: 0 requestId: request-id-0-1 error: trying to read from future. ReadOffset 1, 0 EndOffset 0 2025-06-25T14:32:13.616507Z node 5 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037894] server disconnected, pipe [5:7519894700490401396:3740] destroyed 2025-06-25T14:32:16.055705Z node 7 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:32:16.055841Z node 7 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-25T14:32:16.081332Z node 7 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:32:16.082409Z node 7 :PERSQUEUE INFO: pq_impl.cpp:1497: [PQ: 72057594037927937] Config applied version 1 actor [7:200:2213] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 WriteSpeedInBytesPerSecond: 30720 BurstSize: 30720 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--asdfgs--topic" Version: 1 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } ReadRuleGenerations: 1 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 1 Important: false } 2025-06-25T14:32:16.083282Z node 7 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [7:209:2220] 2025-06-25T14:32:16.087746Z node 7 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [7:209:2220] 2025-06-25T14:32:16.095514Z node 7 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|6412d733-224e9ba0-c514479e-f015e7dd_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default Captured kesus quota request event from [7:225:2233] 2025-06-25T14:32:16.106103Z node 7 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|184107b4-1f872da9-5be7a41c-280d6f9c_1 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default Captured kesus quota request event from [7:225:2233] 2025-06-25T14:32:16.469270Z node 7 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|132d947e-449e8b52-5cd4f7fb-a66ac866_2 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default Captured kesus quota request event from [7:225:2233] 2025-06-25T14:32:16.778730Z node 7 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|700dbeaf-79cc7b64-c23967e2-4f9bac2a_3 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default Captured kesus quota request event from [7:225:2233] 2025-06-25T14:32:17.042666Z node 7 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|c782dc12-cc718aad-844944b1-1590691a_4 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default Captured kesus quota request event from [7:225:2233] 2025-06-25T14:32:17.301152Z node 7 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|adcf8c85-529b72bc-753e4068-e69f8f57_5 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default Captured kesus quota request event from [7:225:2233] **** Total histogram: ****
Interval=0ms: 1
Interval=10000ms: 0
Interval=1000ms: 3
Interval=100ms: 0
Interval=10ms: 0
Interval=1ms: 0
Interval=20ms: 0
Interval=2500ms: 2
Interval=5000ms: 0
Interval=500ms: 0
Interval=50ms: 0
Interval=5ms: 0
Interval=999999ms: 0
**** **** **** **** 2025-06-25T14:32:18.448875Z node 8 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:32:18.449008Z node 8 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-25T14:32:18.542346Z node 8 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:32:18.545838Z node 8 :PERSQUEUE INFO: pq_impl.cpp:1497: [PQ: 72057594037927937] Config applied version 2 actor [8:202:2215] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "topic" Version: 2 LocalDC: true Topic: "topic" TopicPath: "/topic" YcCloudId: "somecloud" YcFolderId: "somefolder" YdbDatabaseId: "PQ" YdbDatabasePath: "/Root/PQ" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 2 FederationAccount: "federationAccount" MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 2 Important: false } 2025-06-25T14:32:18.547695Z node 8 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [8:210:2221] 2025-06-25T14:32:18.554013Z node 8 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'topic' partition 0 generation 2 [8:210:2221] 2025-06-25T14:32:18.555734Z node 8 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [8:211:2222] 2025-06-25T14:32:18.561346Z node 8 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'topic' partition 1 generation 2 [8:211:2222] 2025-06-25T14:32:18.582723Z node 8 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|90db5da2-c6d0720d-cd88cc81-c5f121f2_0 generated for partition 0 topic 'topic' owner default 2025-06-25T14:32:18.603241Z node 8 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|ebc1b157-1439dbc4-2556b391-16b3139b_1 generated for partition 0 topic 'topic' owner default 2025-06-25T14:32:18.634968Z node 8 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|ede0fc74-41f1f8b7-5b961112-8283d02f_2 generated for partition 0 topic 'topic' owner default 2025-06-25T14:32:18.675913Z node 8 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|38c01f83-8d9836e1-fcb8c2a2-8731819c_3 generated for partition 0 topic 'topic' owner default 2025-06-25T14:32:19.635487Z node 9 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:32:19.635616Z node 9 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-25T14:32:19.666381Z node 9 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:32:19.667462Z node 9 :PERSQUEUE INFO: pq_impl.cpp:1497: [PQ: 72057594037927937] Config applied version 3 actor [9:202:2215] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 WriteSpeedInBytesPerSecond: 30720 BurstSize: 30720 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--asdfgs--topic" Version: 3 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } ReadRuleGenerations: 3 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 3 Important: false } 2025-06-25T14:32:19.668261Z node 9 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [9:211:2222] 2025-06-25T14:32:19.672689Z node 9 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [9:211:2222] 2025-06-25T14:32:19.680839Z node 9 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|5a5b5349-b41503d6-7c1cf338-fbfb8c30_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default Captured kesus quota request event from [9:227:2235] Captured TEvRequest, cmd write size: 3 Captured TEvRequest, cmd write size: 3 2025-06-25T14:32:19.707209Z node 9 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|e037a3bd-7e6d84d2-46621553-9523e1bd_1 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default Captured kesus quota request event from [9:227:2235] Captured TEvRequest, cmd write size: 3 Captured TEvRequest, cmd write size: 3 2025-06-25T14:32:20.127013Z node 9 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|805a8a9e-5b3d3060-62852875-133891c2_2 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default Captured kesus quota request event from [9:227:2235] Captured TEvRequest, cmd write size: 3 Captured TEvRequest, cmd write size: 3 2025-06-25T14:32:20.495716Z node 9 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|3ff4016b-5a4281f8-df1ca82a-a7bbf3e6_3 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default Captured TEvRequest, cmd write size: 3 Captured TEvRequest, cmd write size: 3 Captured kesus quota request event from [9:227:2235] 2025-06-25T14:32:20.811553Z node 9 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|675d876-b709070c-44ee2931-ebabbc64_4 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default Captured TEvRequest, cmd write size: 3 Captured TEvRequest, cmd write size: 3 Captured kesus quota request event from [9:227:2235] 2025-06-25T14:32:21.147765Z node 9 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|f9719fa5-a7f25eae-6e05c396-dd6d69a2_5 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default Captured TEvRequest, cmd write size: 3 Captured TEvRequest, cmd write size: 3 Captured kesus quota request event from [9:227:2235] >> TestKinesisHttpProxy::TestPing >> TBoardSubscriberTest::ManySubscribersManyPublisher [GOOD] >> TestKinesisHttpProxy::MissingAction >> TopicAutoscaling::PartitionSplit_PreferedPartition_PQv1 [GOOD] >> TopicAutoscaling::PartitionSplit_ReadEmptyPartitions_AutoscaleAwareSDK >> TKeyValueTracingTest::WriteSmall [FAIL] >> TestKinesisHttpProxy::UnauthorizedGetShardIteratorRequest >> TExternalDataSourceTest::ReplaceExternalDataStoreShouldFailIfEntityOfAnotherTypeWithSameNameExists [GOOD] |78.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/base/ut_board_subscriber/unittest >> TBoardSubscriberTest::ManySubscribersManyPublisher [GOOD] >> KqpDocumentApi::RestrictWrite >> KqpQueryService::ShowCreateTableNotSuccess >> TKeyValueTracingTest::ReadSmall [FAIL] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_external_data_source/unittest >> TExternalDataSourceTest::ReplaceExternalDataStoreShouldFailIfEntityOfAnotherTypeWithSameNameExists [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:32:23.912238Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:32:23.920582Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:32:23.920677Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:32:23.920723Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:32:23.920770Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:32:23.920798Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:32:23.920880Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:32:23.920960Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:32:23.921871Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:32:23.922269Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:32:24.092503Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:32:24.092580Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:32:24.128823Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:32:24.129337Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:32:24.129527Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:32:24.165591Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:32:24.165974Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:32:24.166658Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:32:24.166955Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:32:24.175705Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:32:24.175906Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:32:24.177071Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:32:24.177134Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:32:24.177271Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:32:24.177315Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:32:24.177354Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:32:24.177432Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:32:24.197370Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:32:24.344856Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:32:24.345119Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:32:24.345328Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:32:24.345366Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:32:24.345634Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:32:24.345715Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:32:24.357503Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:32:24.357735Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:32:24.357966Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:32:24.358041Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:32:24.358086Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:32:24.358117Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:32:24.365556Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:32:24.365633Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:32:24.365684Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:32:24.372444Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:32:24.372529Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:32:24.372570Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:32:24.372618Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:32:24.376142Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:32:24.382350Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:32:24.382587Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:32:24.383586Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:32:24.383744Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:32:24.383788Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:32:24.384109Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:32:24.384156Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:32:24.384341Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:32:24.384428Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:32:24.392618Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:32:24.392690Z node 1 :FLAT_TX_SCHEMESHARD ... xecute, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:32:24.503048Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 101:0 ProgressState 2025-06-25T14:32:24.503141Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#101:0 progress is 1/1 2025-06-25T14:32:24.503172Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-25T14:32:24.503225Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#101:0 progress is 1/1 2025-06-25T14:32:24.503288Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-25T14:32:24.503331Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 101, ready parts: 1/1, is published: false 2025-06-25T14:32:24.503371Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-25T14:32:24.503401Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 101:0 2025-06-25T14:32:24.503433Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 101:0 2025-06-25T14:32:24.503507Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-25T14:32:24.503549Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 101, publications: 2, subscribers: 0 2025-06-25T14:32:24.503577Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 1], 4 2025-06-25T14:32:24.503600Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 2], 2 2025-06-25T14:32:24.504490Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 4 PathOwnerId: 72057594046678944, cookie: 101 2025-06-25T14:32:24.504589Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 4 PathOwnerId: 72057594046678944, cookie: 101 2025-06-25T14:32:24.504624Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 101 2025-06-25T14:32:24.504661Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 4 2025-06-25T14:32:24.504704Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-25T14:32:24.505424Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 2 PathOwnerId: 72057594046678944, cookie: 101 2025-06-25T14:32:24.505504Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 2 PathOwnerId: 72057594046678944, cookie: 101 2025-06-25T14:32:24.505531Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 101 2025-06-25T14:32:24.505558Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 2 2025-06-25T14:32:24.505581Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-25T14:32:24.505646Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 101, subscribers: 0 2025-06-25T14:32:24.522094Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-25T14:32:24.529399Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 101 2025-06-25T14:32:24.529708Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2025-06-25T14:32:24.529756Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 2025-06-25T14:32:24.530121Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-06-25T14:32:24.530231Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-25T14:32:24.530274Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:304:2293] TestWaitNotification: OK eventTxId 101 2025-06-25T14:32:24.530758Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/UniqueName" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:32:24.530965Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/UniqueName" took 230us result status StatusSuccess 2025-06-25T14:32:24.531301Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/UniqueName" PathDescription { Self { Name: "UniqueName" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeView CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ViewVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } ViewDescription { Name: "UniqueName" PathId { OwnerId: 72057594046678944 LocalId: 2 } Version: 1 QueryText: "Some query" CapturedContext { } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 TestModificationResults wait txId: 102 2025-06-25T14:32:24.538906Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateExternalDataSource CreateExternalDataSource { Name: "UniqueName" SourceType: "ObjectStorage" Location: "https://s3.cloud.net/my_bucket" Auth { None { } } ReplaceIfExists: true } } TxId: 102 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:32:24.539214Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_external_data_source.cpp:336: [72057594046678944] CreateNewExternalDataSource, opId 102:0, feature flag EnableReplaceIfExistsForExternalEntities 1, tx WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateExternalDataSource FailOnExist: false CreateExternalDataSource { Name: "UniqueName" SourceType: "ObjectStorage" Location: "https://s3.cloud.net/my_bucket" Auth { None { } } ReplaceIfExists: true } 2025-06-25T14:32:24.539311Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_external_data_source.cpp:212: [72057594046678944] TAlterExternalDataSource Propose: opId# 102:0, path# /MyRoot/UniqueName 2025-06-25T14:32:24.539469Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 102:1, propose status:StatusNameConflict, reason: Check failed: path: '/MyRoot/UniqueName', error: unexpected path type (id: [OwnerId: 72057594046678944, LocalPathId: 2], type: EPathTypeView, state: EPathStateNoChanges), expected types: EPathTypeExternalDataSource, at schemeshard: 72057594046678944 2025-06-25T14:32:24.557792Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 102, response: Status: StatusNameConflict Reason: "Check failed: path: \'/MyRoot/UniqueName\', error: unexpected path type (id: [OwnerId: 72057594046678944, LocalPathId: 2], type: EPathTypeView, state: EPathStateNoChanges), expected types: EPathTypeExternalDataSource" TxId: 102 SchemeshardId: 72057594046678944 PathId: 2 PathCreateTxId: 101, at schemeshard: 72057594046678944 2025-06-25T14:32:24.558113Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 102, database: /MyRoot, subject: , status: StatusNameConflict, reason: Check failed: path: '/MyRoot/UniqueName', error: unexpected path type (id: [OwnerId: 72057594046678944, LocalPathId: 2], type: EPathTypeView, state: EPathStateNoChanges), expected types: EPathTypeExternalDataSource, operation: CREATE EXTERNAL DATA SOURCE, path: /MyRoot/UniqueName TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 102 2025-06-25T14:32:24.558677Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-06-25T14:32:24.558719Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 2025-06-25T14:32:24.559265Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-06-25T14:32:24.559372Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-25T14:32:24.559420Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:312:2301] TestWaitNotification: OK eventTxId 102 |78.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/storagepoolmon/ut/unittest >> KqpQueryService::ExecStats |78.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/storagepoolmon/ut/unittest >> KqpQueryService::ExecuteCollectMeta ------- [TM] {asan, default-linux-x86_64, release} ydb/core/keyvalue/ut_trace/unittest >> TKeyValueTracingTest::ReadHuge [FAIL] Test command err: equal assertion failed at ydb/core/keyvalue/keyvalue_ut_trace.cpp:124, void TestOneRead(TString, TString): env.WilsonUploader->Traces.size() == 1 TBackTrace::Capture()+28 (0x1080818C) NUnitTest::NPrivate::RaiseError(char const*, TBasicString> const&, bool)+592 (0x10CC5FA0) TestOneRead(TBasicString>, TBasicString>)+4828 (0x1045701C) NTestSuiteTKeyValueTracingTest::TTestCaseReadHuge::Execute_(NUnitTest::TTestContext&)+318 (0x1045DBFE) std::__y1::__function::__func, void ()>::operator()()+280 (0x10470E88) TColoredProcessor::Run(std::__y1::function, TBasicString> const&, char const*, bool)+534 (0x10CF40C6) NUnitTest::TTestBase::Run(std::__y1::function, TBasicString> const&, char const*, bool)+505 (0x10CCCB29) NTestSuiteTKeyValueTracingTest::TCurrentTest::Execute()+1204 (0x1046FD34) NUnitTest::TTestFactory::Execute()+2438 (0x10CCE3F6) NUnitTest::RunMain(int, char**)+5213 (0x10CEE63D) ??+0 (0x7F73390CAD90) __libc_start_main+128 (0x7F73390CAE40) _start+41 (0xDD25029) |78.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/replication/controller/ut_target_discoverer/replication-controller-ut_target_discoverer |78.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/replication/controller/ut_target_discoverer/replication-controller-ut_target_discoverer |78.5%| [LD] {RESULT} $(B)/ydb/core/tx/replication/controller/ut_target_discoverer/replication-controller-ut_target_discoverer |78.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/storagepoolmon/ut/unittest |78.5%| [TA] {RESULT} $(B)/ydb/core/tx/datashard/ut_change_exchange/test-results/unittest/{meta.json ... results_accumulator.log} |78.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/storagepoolmon/ut/unittest |78.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/storagepoolmon/ut/unittest |78.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/storagepoolmon/ut/unittest |78.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/storagepoolmon/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/keyvalue/ut_trace/unittest >> TKeyValueTracingTest::WriteSmall [FAIL] Test command err: assertion failed at ydb/core/keyvalue/keyvalue_ut_trace.cpp:103, void TestOneWrite(TString, TVector &&): (env.WilsonUploader->Traces.size() == 1) failed: (2 != 1) TBackTrace::Capture()+28 (0x1080818C) NUnitTest::NPrivate::RaiseError(char const*, TBasicString> const&, bool)+592 (0x10CC5FA0) TestOneWrite(TBasicString>, TVector>, std::__y1::allocator>>>&&)+4253 (0x1045164D) NTestSuiteTKeyValueTracingTest::TTestCaseWriteSmall::Execute_(NUnitTest::TTestContext&)+216 (0x1045D188) std::__y1::__function::__func, void ()>::operator()()+280 (0x10470E88) TColoredProcessor::Run(std::__y1::function, TBasicString> const&, char const*, bool)+534 (0x10CF40C6) NUnitTest::TTestBase::Run(std::__y1::function, TBasicString> const&, char const*, bool)+505 (0x10CCCB29) NTestSuiteTKeyValueTracingTest::TCurrentTest::Execute()+1204 (0x1046FD34) NUnitTest::TTestFactory::Execute()+2438 (0x10CCE3F6) NUnitTest::RunMain(int, char**)+5213 (0x10CEE63D) ??+0 (0x7F179162CD90) __libc_start_main+128 (0x7F179162CE40) _start+41 (0xDD25029) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/keyvalue/ut_trace/unittest >> TKeyValueTracingTest::WriteHuge [FAIL] Test command err: assertion failed at ydb/core/keyvalue/keyvalue_ut_trace.cpp:103, void TestOneWrite(TString, TVector &&): (env.WilsonUploader->Traces.size() == 1) failed: (2 != 1) TBackTrace::Capture()+28 (0x1080818C) NUnitTest::NPrivate::RaiseError(char const*, TBasicString> const&, bool)+592 (0x10CC5FA0) TestOneWrite(TBasicString>, TVector>, std::__y1::allocator>>>&&)+4253 (0x1045164D) NTestSuiteTKeyValueTracingTest::TTestCaseWriteHuge::Execute_(NUnitTest::TTestContext&)+216 (0x1045D498) std::__y1::__function::__func, void ()>::operator()()+280 (0x10470E88) TColoredProcessor::Run(std::__y1::function, TBasicString> const&, char const*, bool)+534 (0x10CF40C6) NUnitTest::TTestBase::Run(std::__y1::function, TBasicString> const&, char const*, bool)+505 (0x10CCCB29) NTestSuiteTKeyValueTracingTest::TCurrentTest::Execute()+1204 (0x1046FD34) NUnitTest::TTestFactory::Execute()+2438 (0x10CCE3F6) NUnitTest::RunMain(int, char**)+5213 (0x10CEE63D) ??+0 (0x7F69D37FED90) __libc_start_main+128 (0x7F69D37FEE40) _start+41 (0xDD25029) |78.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/storagepoolmon/ut/unittest |78.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/proxy_service/ut/ydb-core-kqp-proxy_service-ut |78.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/proxy_service/ut/ydb-core-kqp-proxy_service-ut |78.5%| [LD] {RESULT} $(B)/ydb/core/kqp/proxy_service/ut/ydb-core-kqp-proxy_service-ut |78.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/storagepoolmon/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/keyvalue/ut_trace/unittest >> TKeyValueTracingTest::ReadSmall [FAIL] Test command err: equal assertion failed at ydb/core/keyvalue/keyvalue_ut_trace.cpp:124, void TestOneRead(TString, TString): env.WilsonUploader->Traces.size() == 1 TBackTrace::Capture()+28 (0x1080818C) NUnitTest::NPrivate::RaiseError(char const*, TBasicString> const&, bool)+592 (0x10CC5FA0) TestOneRead(TBasicString>, TBasicString>)+4828 (0x1045701C) NTestSuiteTKeyValueTracingTest::TTestCaseReadSmall::Execute_(NUnitTest::TTestContext&)+318 (0x1045D80E) std::__y1::__function::__func, void ()>::operator()()+280 (0x10470E88) TColoredProcessor::Run(std::__y1::function, TBasicString> const&, char const*, bool)+534 (0x10CF40C6) NUnitTest::TTestBase::Run(std::__y1::function, TBasicString> const&, char const*, bool)+505 (0x10CCCB29) NTestSuiteTKeyValueTracingTest::TCurrentTest::Execute()+1204 (0x1046FD34) NUnitTest::TTestFactory::Execute()+2438 (0x10CCE3F6) NUnitTest::RunMain(int, char**)+5213 (0x10CEE63D) ??+0 (0x7F3495B41D90) __libc_start_main+128 (0x7F3495B41E40) _start+41 (0xDD25029) |78.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/storagepoolmon/ut/unittest >> TBlobStorageStoragePoolMonTest::SizeClassCalcTest [GOOD] |78.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/storagepoolmon/ut/unittest |78.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/storagepoolmon/ut/unittest |78.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/storagepoolmon/ut/unittest >> TBlobStorageStoragePoolMonTest::SizeClassCalcTest [GOOD] |78.6%| [TA] $(B)/ydb/core/keyvalue/ut_trace/test-results/unittest/{meta.json ... results_accumulator.log} >> CommitOffset::Commit_Flat_WithWrongSession [GOOD] >> CommitOffset::Commit_Flat_WithWrongSession_ToPast |78.6%| [TA] {RESULT} $(B)/ydb/core/keyvalue/ut_trace/test-results/unittest/{meta.json ... results_accumulator.log} |78.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/storagepoolmon/ut/unittest >> TPersQueueMirrorer::ValidStartStream [GOOD] |78.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/storagepoolmon/ut/unittest |78.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/tx_proxy/ut_base_tenant/ydb-core-tx-tx_proxy-ut_base_tenant |78.6%| [LD] {RESULT} $(B)/ydb/core/tx/tx_proxy/ut_base_tenant/ydb-core-tx-tx_proxy-ut_base_tenant |78.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/tx_proxy/ut_base_tenant/ydb-core-tx-tx_proxy-ut_base_tenant |78.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/storagepoolmon/ut/unittest >> KqpIndexes::SecondaryIndexReplace-UseSink [GOOD] >> TopicAutoscaling::WithDir_PartitionSplit_AutosplitByLoad [GOOD] >> WithSDK::DescribeConsumer >> KqpUniqueIndex::InsertNullInComplexFkDuplicate [GOOD] >> TestKinesisHttpProxy::UnauthorizedGetShardIteratorRequest [GOOD] >> KqpQueryService::ShowCreateTableNotSuccess [GOOD] >> KqpQueryService::ShowCreateTableOnView |78.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/storagepoolmon/ut/unittest >> TargetDiscoverer::SystemObjects >> TestKinesisHttpProxy::MissingAction [GOOD] >> TargetDiscoverer::InvalidCredentials >> TestKinesisHttpProxy::TestRequestWithWrongRegion >> TestYmqHttpProxy::TestSendMessageEmptyQueueUrl [GOOD] >> TargetDiscoverer::IndexedTable ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpIndexes::SecondaryIndexReplace-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 18771, MsgBus: 25521 2025-06-25T14:31:54.008931Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519894664359757104:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:31:54.008977Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001c8a/r3tmp/tmpmdTqMg/pdisk_1.dat 2025-06-25T14:31:54.711941Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:31:54.712010Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:31:54.713760Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:31:54.751694Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 18771, node 1 2025-06-25T14:31:54.995806Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:31:54.995847Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:31:54.995857Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:31:54.996002Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:31:55.034114Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:25521 TClient is connected to server localhost:25521 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:31:56.171962Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:31:56.189312Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:31:56.205622Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:31:56.399661Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:31:56.653560Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:31:56.773679Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:31:59.016441Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519894664359757104:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:31:59.016525Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:31:59.331013Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894685834595185:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:31:59.331113Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:31:59.826664Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:31:59.889413Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:31:59.963623Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:00.055572Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:00.103068Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:00.192034Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:00.253450Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:00.336003Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894690129563148:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:00.336106Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:00.336386Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894690129563153:2438], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:00.341097Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:32:00.362279Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519894690129563155:2439], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:32:00.419947Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519894690129563206:3425] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:32:01.966909Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877761, Sender [1:7519894694424530801:3601], Recipient [1:7519894664359757392:2149]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:32:01.966954Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5052: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T14:32:01.966972Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5837: Pipe server connected, at tablet: 7205759404664 ... ain stats from datashardId(TabletID)=72075186224037906 maps to shardIdx: 72057594046644480:19 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 5], pathId map=BatchUpload, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-25T14:32:30.989336Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037906, followerId 0 2025-06-25T14:32:30.989361Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:19 with partCount# 0, rowCount# 0, searchHeight# 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-25T14:32:30.989371Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037906 2025-06-25T14:32:30.989387Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 5 shard idx 72057594046644480:18 data size 0 row count 0 2025-06-25T14:32:30.989414Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037905 maps to shardIdx: 72057594046644480:18 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 5], pathId map=BatchUpload, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-25T14:32:30.989421Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037905, followerId 0 2025-06-25T14:32:30.989444Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:18 with partCount# 0, rowCount# 0, searchHeight# 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-25T14:32:30.989453Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037905 2025-06-25T14:32:30.989470Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 5 shard idx 72057594046644480:23 data size 0 row count 0 2025-06-25T14:32:30.989496Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037910 maps to shardIdx: 72057594046644480:23 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 5], pathId map=BatchUpload, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-25T14:32:30.989506Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037910, followerId 0 2025-06-25T14:32:30.989530Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:23 with partCount# 0, rowCount# 0, searchHeight# 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-25T14:32:30.989538Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037910 2025-06-25T14:32:30.989553Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 5 shard idx 72057594046644480:16 data size 0 row count 0 2025-06-25T14:32:30.989576Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037903 maps to shardIdx: 72057594046644480:16 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 5], pathId map=BatchUpload, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-25T14:32:30.989584Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037903, followerId 0 2025-06-25T14:32:30.989607Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:16 with partCount# 0, rowCount# 0, searchHeight# 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-25T14:32:30.989618Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037903 2025-06-25T14:32:30.989634Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 5 shard idx 72057594046644480:15 data size 0 row count 0 2025-06-25T14:32:30.989657Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037902 maps to shardIdx: 72057594046644480:15 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 5], pathId map=BatchUpload, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-25T14:32:30.989664Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037902, followerId 0 2025-06-25T14:32:30.989685Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:15 with partCount# 0, rowCount# 0, searchHeight# 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-25T14:32:30.989694Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037902 2025-06-25T14:32:30.989710Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 5 shard idx 72057594046644480:17 data size 0 row count 0 2025-06-25T14:32:30.989760Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037904 maps to shardIdx: 72057594046644480:17 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 5], pathId map=BatchUpload, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-25T14:32:30.989772Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037904, followerId 0 2025-06-25T14:32:30.989799Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:17 with partCount# 0, rowCount# 0, searchHeight# 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-25T14:32:30.989813Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037904 2025-06-25T14:32:30.989830Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 5 shard idx 72057594046644480:21 data size 0 row count 0 2025-06-25T14:32:30.989857Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037908 maps to shardIdx: 72057594046644480:21 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 5], pathId map=BatchUpload, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-25T14:32:30.989863Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037908, followerId 0 2025-06-25T14:32:30.989888Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:21 with partCount# 0, rowCount# 0, searchHeight# 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-25T14:32:30.989906Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037908 2025-06-25T14:32:30.989920Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 5 shard idx 72057594046644480:14 data size 0 row count 0 2025-06-25T14:32:30.989944Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037901 maps to shardIdx: 72057594046644480:14 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 5], pathId map=BatchUpload, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-25T14:32:30.989951Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037901, followerId 0 2025-06-25T14:32:30.989972Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:14 with partCount# 0, rowCount# 0, searchHeight# 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-25T14:32:30.989980Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037901 2025-06-25T14:32:30.989994Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 5 shard idx 72057594046644480:22 data size 0 row count 0 2025-06-25T14:32:30.990017Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037909 maps to shardIdx: 72057594046644480:22 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 5], pathId map=BatchUpload, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-25T14:32:30.990023Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037909, followerId 0 2025-06-25T14:32:30.990044Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:22 with partCount# 0, rowCount# 0, searchHeight# 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-25T14:32:30.990052Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037909 2025-06-25T14:32:30.990125Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:32:30.990306Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [3:7519894766004782041:2142]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-06-25T14:32:30.990321Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5131: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-06-25T14:32:30.990335Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046644480, queue size# 0 2025-06-25T14:32:31.013329Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:7519894766004782041:2142]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:32:31.013381Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:32:31.013458Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [3:7519894766004782041:2142], Recipient [3:7519894766004782041:2142]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:32:31.013478Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:32:31.192675Z node 3 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-25T14:32:31.231940Z node 3 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill |78.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/controller/ut_target_discoverer/unittest >> TestKinesisHttpProxy::PutRecordsWithLongExplicitHashKey ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/ut_with_sdk/unittest >> TPersQueueMirrorer::ValidStartStream [GOOD] Test command err: 2025-06-25T14:30:46.536923Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519894370411206311:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:30:46.545459Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:30:46.844783Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0018eb/r3tmp/tmpcgqp4d/pdisk_1.dat 2025-06-25T14:30:47.064913Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:30:47.072491Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519894370411206294:2080] 1750861846529843 != 1750861846529846 2025-06-25T14:30:47.107955Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:30:47.108049Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:30:47.136265Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 29413, node 1 2025-06-25T14:30:47.321001Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/yft8/0018eb/r3tmp/yandexK1DFAc.tmp 2025-06-25T14:30:47.321026Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/yft8/0018eb/r3tmp/yandexK1DFAc.tmp 2025-06-25T14:30:47.321215Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/yft8/0018eb/r3tmp/yandexK1DFAc.tmp 2025-06-25T14:30:47.321371Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:30:47.390632Z INFO: TTestServer started on Port 3718 GrpcPort 29413 2025-06-25T14:30:47.578533Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:3718 PQClient connected to localhost:29413 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:30:47.844231Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:30:47.858148Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:30:47.879103Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:30:48.024986Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715660, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:30:50.213690Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894387591076270:2299], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:50.213848Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:50.217223Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894387591076283:2303], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:50.222489Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:30:50.251372Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519894387591076285:2304], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715662 completed, doublechecking } 2025-06-25T14:30:50.316263Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519894387591076349:2445] txid# 281474976715663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:30:50.727935Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:50.770616Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519894387591076357:2310], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:30:50.772504Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=NTRlMmMxM2YtNDlkYzIwNjUtNWVmMDQ3MWItNWUyNGM3NDg=, ActorId: [1:7519894387591076245:2298], ActorState: ExecuteState, TraceId: 01jykqz9k303ghnzmcqjcw8h8h, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:30:50.774706Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-25T14:30:50.799429Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:50.950342Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); === CheckClustersList. Subcribe to ClusterTracker from [1:7519894391886043945:2623] 2025-06-25T14:30:51.532435Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519894370411206311:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:30:51.532526Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; === CheckClustersList. Ok 2025-06-25T14:30:57.253743Z :TopicSplitMerge INFO: TTopicSdkTestSetup started 2025-06-25T14:30:57.279959Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:132: new create topic request 2025-06-25T14:30:57.281027Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877761, Sender [1:7519894417655847919:2702], Recipient [1:7519894370411206620:2147]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:30:57.281064Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5052: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T14:30:57.281082Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5837: Pipe server connected, at tablet: 72057594046644480 2025-06-25T14:30:57.281119Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271122432, Sender [1:7519894417655847915:2699], Recipient [1:7519894370411206620:2147]: {TEvModifySchemeTransaction txid# 281474976715673 TabletId# 72057594046644480} 2025-06-25T14:30:57.281133Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4966: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-06-25T14:30:57.392018Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/Root" OperationType: ESchemeOpCreatePersQueueGroup CreatePersQueueGroup { Name: "back-compatibility-test" TotalGroupCount: 3 PartitionPerTablet: 1 PQTabletConfig { PartitionConfig { MaxCountInPartition: 2147483647 LifetimeSeconds: 86 ... Z node 7 :PQ_READ_PROXY DEBUG: partition_actor.cpp:958: session cookie 2 consumer shared/user session shared/user_7_2_7312408051733431793_v1 TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1) ready for read with readOffset 5 endOffset 10 2025-06-25T14:32:29.059100Z node 7 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2309: session cookie 2 consumer shared/user session shared/user_7_2_7312408051733431793_v1 partition ready for read: partition# TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1), readOffset# 5, endOffset# 10, WTime# 1750861948676, sizeLag# 1050 2025-06-25T14:32:29.059121Z node 7 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2320: session cookie 2 consumer shared/user session shared/user_7_2_7312408051733431793_v1TEvPartitionReady. Aval parts: 1 2025-06-25T14:32:29.059159Z node 7 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2243: session cookie 2 consumer shared/user session shared/user_7_2_7312408051733431793_v1 performing read request: guid# 90366595-a252930c-5b3fbea2-4ee02f2d, from# TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1), count# 6, size# 1260, partitionsAsked# 1, maxTimeLag# 0ms 2025-06-25T14:32:29.059258Z node 7 :PQ_READ_PROXY DEBUG: partition_actor.cpp:1384: session cookie 2 consumer shared/user session shared/user_7_2_7312408051733431793_v1 READ FROM TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1)maxCount 6 maxSize 1260 maxTimeLagMs 0 readTimestampMs 0 readOffset 5 EndOffset 10 ClientCommitOffset 0 committedOffset 0 Guid 90366595-a252930c-5b3fbea2-4ee02f2d 2025-06-25T14:32:29.059633Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:348: Handle TEvRequest topic: 'rt3.dc1--topic1' requestId: 2025-06-25T14:32:29.059673Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2812: [PQ: 72075186224037892] got client message batch for topic 'rt3.dc1--topic1' partition 0 2025-06-25T14:32:29.059799Z node 8 :PERSQUEUE DEBUG: partition_read.cpp:839: [PQ: 72075186224037892, Partition: 0, State: StateIdle] read cookie 4 Topic 'rt3.dc1--topic1' partition 0 user user offset 5 count 6 size 1260 endOffset 10 max time lag 0ms effective offset 5 2025-06-25T14:32:29.060189Z node 8 :PERSQUEUE DEBUG: partition_read.cpp:1043: [PQ: 72075186224037892, Partition: 0, State: StateIdle] read cookie 4 added 2 blobs, size 670 count 5 last offset 6, current partition end offset: 10 2025-06-25T14:32:29.060211Z node 8 :PERSQUEUE DEBUG: partition_read.cpp:1069: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Reading cookie 4. Send blob request. 2025-06-25T14:32:29.060259Z node 8 :PERSQUEUE DEBUG: cache_eviction.h:492: Got data from cache. Partition 0 offset 5 partno 0 count 1 parts_count 0 source 1 size 161 accessed 0 times before, last time 2025-06-25T14:32:28.000000Z 2025-06-25T14:32:29.060290Z node 8 :PERSQUEUE DEBUG: cache_eviction.h:492: Got data from cache. Partition 0 offset 6 partno 0 count 4 parts_count 0 source 1 size 509 accessed 0 times before, last time 2025-06-25T14:32:28.000000Z 2025-06-25T14:32:29.060330Z node 8 :PERSQUEUE DEBUG: read.h:121: Reading cookie 4. All 2 blobs are from cache. 2025-06-25T14:32:29.060372Z node 8 :PERSQUEUE DEBUG: partition_read.cpp:551: FormAnswer for 2 blobs 2025-06-25T14:32:29.060521Z node 8 :PERSQUEUE DEBUG: partition_read.cpp:476: FormAnswer processing batch offset 5 totakecount 1 count 1 size 141 from pos 0 cbcount 1 2025-06-25T14:32:29.060673Z node 8 :PERSQUEUE DEBUG: partition_read.cpp:476: FormAnswer processing batch offset 6 totakecount 4 count 4 size 489 from pos 0 cbcount 4 2025-06-25T14:32:29.060790Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'rt3.dc1--topic1' partition: 0 messageNo: 0 requestId: cookie: 5 2025-06-25T14:32:29.060815Z node 8 :PERSQUEUE DEBUG: pq_l2_cache.cpp:192: PQ Cache (L2). Touched. Tablet '72075186224037892' partition 0 offset 5 partno 0 count 1 parts 0 suffix '63' 2025-06-25T14:32:29.060844Z node 8 :PERSQUEUE DEBUG: pq_l2_cache.cpp:192: PQ Cache (L2). Touched. Tablet '72075186224037892' partition 0 offset 6 partno 0 count 4 parts 0 suffix '63' 2025-06-25T14:32:29.062726Z :DEBUG: [] [] [d0573ddd-48b962ad-ffe66349-4db0ef3b] [] Got ReadResponse, serverBytesSize = 681, now ReadSizeBudget = 0, ReadSizeServerDelta = 52428119 2025-06-25T14:32:29.062849Z :DEBUG: [] [] [d0573ddd-48b962ad-ffe66349-4db0ef3b] [] In ContinueReadingDataImpl, ReadSizeBudget = 0, ReadSizeServerDelta = 52428119 2025-06-25T14:32:29.061536Z node 7 :PQ_READ_PROXY DEBUG: partition_actor.cpp:652: session cookie 2 consumer shared/user session shared/user_7_2_7312408051733431793_v1 TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1) initDone 1 event { CmdReadResult { MaxOffset: 10 Result { Offset: 5 Data: "... 94 bytes ..." SourceId: "\000src-id-test" SeqNo: 6 WriteTimestampMS: 1750861948939 CreateTimestampMS: 1750861948930 UncompressedSize: 10 PartitionKey: "" ExplicitHash: "" } Result { Offset: 6 Data: "... 94 bytes ..." SourceId: "\000src-id-test" SeqNo: 7 WriteTimestampMS: 1750861948943 CreateTimestampMS: 1750861948930 UncompressedSize: 10 PartitionKey: "" ExplicitHash: "" } Result { Offset: 7 Data: "... 94 bytes ..." SourceId: "\000src-id-test" SeqNo: 8 WriteTimestampMS: 1750861948988 CreateTimestampMS: 1750861948930 UncompressedSize: 10 PartitionKey: "" ExplicitHash: "" } Result { Offset: 8 Data: "... 94 bytes ..." SourceId: "\000src-id-test" SeqNo: 9 WriteTimestampMS: 1750861948988 CreateTimestampMS: 1750861948930 UncompressedSize: 10 PartitionKey: "" ExplicitHash: "" } Result { Offset: 9 Data: "... 94 bytes ..." SourceId: "\000src-id-test" SeqNo: 10 WriteTimestampMS: 1750861948988 CreateTimestampMS: 1750861948930 UncompressedSize: 10 PartitionKey: "" ExplicitHash: "" } BlobsFromDisk: 0 BlobsFromCache: 2 SizeLag: 18446744073709551233 RealReadOffset: 9 WaitQuotaTimeMs: 0 EndOffset: 10 StartOffset: 0 } Cookie: 5 } 2025-06-25T14:32:29.061738Z node 7 :PQ_READ_PROXY DEBUG: partition_actor.cpp:1266: session cookie 2 consumer shared/user session shared/user_7_2_7312408051733431793_v1 TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1) wait data in partition inited, cookie 1 from offset 10 2025-06-25T14:32:29.061776Z node 7 :PQ_READ_PROXY DEBUG: partition_actor.cpp:890: session cookie 2 consumer shared/user session shared/user_7_2_7312408051733431793_v1 after read state TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1) EndOffset 10 ReadOffset 10 ReadGuid 90366595-a252930c-5b3fbea2-4ee02f2d has messages 1 2025-06-25T14:32:29.061885Z node 7 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:1917: session cookie 2 consumer shared/user session shared/user_7_2_7312408051733431793_v1 read done: guid# 90366595-a252930c-5b3fbea2-4ee02f2d, partition# TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1), size# 681 2025-06-25T14:32:29.061911Z node 7 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2079: session cookie 2 consumer shared/user session shared/user_7_2_7312408051733431793_v1 response to read: guid# 90366595-a252930c-5b3fbea2-4ee02f2d 2025-06-25T14:32:29.062091Z node 7 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2122: session cookie 2 consumer shared/user session shared/user_7_2_7312408051733431793_v1 Process answer. Aval parts: 0 2025-06-25T14:32:29.064434Z :DEBUG: [] Decompression task done. Partition/PartitionSessionId: 1 (5-9) 2025-06-25T14:32:29.064491Z :DEBUG: [] [] [d0573ddd-48b962ad-ffe66349-4db0ef3b] [] Returning serverBytesSize = 681 to budget 2025-06-25T14:32:29.064522Z :DEBUG: [] [] [d0573ddd-48b962ad-ffe66349-4db0ef3b] [] In ContinueReadingDataImpl, ReadSizeBudget = 681, ReadSizeServerDelta = 52428119 2025-06-25T14:32:29.064754Z :DEBUG: [] [] [d0573ddd-48b962ad-ffe66349-4db0ef3b] [] After sending read request: ReadSizeBudget = 0, ReadSizeServerDelta = 52428800 2025-06-25T14:32:29.065075Z :DEBUG: [] Take Data. Partition 0. Read: {0, 0} (5-5) 2025-06-25T14:32:29.065143Z :DEBUG: [] Take Data. Partition 0. Read: {1, 0} (6-6) 2025-06-25T14:32:29.065189Z :DEBUG: [] Take Data. Partition 0. Read: {2, 0} (7-7) 2025-06-25T14:32:29.065217Z :DEBUG: [] Take Data. Partition 0. Read: {2, 1} (8-8) 2025-06-25T14:32:29.065250Z :DEBUG: [] Take Data. Partition 0. Read: {2, 2} (9-9) 2025-06-25T14:32:29.065092Z node 7 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 2 consumer shared/user session shared/user_7_2_7312408051733431793_v1 grpc read done: success# 1, data# { read_request { bytes_size: 681 } } 2025-06-25T14:32:29.065196Z node 7 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:1816: session cookie 2 consumer shared/user session shared/user_7_2_7312408051733431793_v1 got read request: guid# b70af970-fb8a695f-860f13bc-478ed9b4 2025-06-25T14:32:29.065301Z :DEBUG: [] [] [d0573ddd-48b962ad-ffe66349-4db0ef3b] [] The application data is transferred to the client. Number of messages 5, size 115 bytes 2025-06-25T14:32:29.065363Z :DEBUG: [] [] [d0573ddd-48b962ad-ffe66349-4db0ef3b] [] Returning serverBytesSize = 0 to budget 2025-06-25T14:32:29.065526Z :INFO: [] [] [d0573ddd-48b962ad-ffe66349-4db0ef3b] Closing read session. Close timeout: 0.000000s 2025-06-25T14:32:29.065575Z :INFO: [] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): -:/topic1:0:1:9:0 2025-06-25T14:32:29.065640Z :INFO: [] [] [d0573ddd-48b962ad-ffe66349-4db0ef3b] Counters: { Errors: 0 CurrentSessionLifetimeMs: 33 BytesRead: 115 MessagesRead: 5 BytesReadCompressed: 115 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-25T14:32:29.065777Z :NOTICE: [] [] [d0573ddd-48b962ad-ffe66349-4db0ef3b] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Close with zero timeout " } 2025-06-25T14:32:29.065829Z :DEBUG: [] [] [d0573ddd-48b962ad-ffe66349-4db0ef3b] [] Abort session to cluster 2025-06-25T14:32:29.066327Z :NOTICE: [] [] [d0573ddd-48b962ad-ffe66349-4db0ef3b] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-06-25T14:32:29.070502Z node 7 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 2 consumer shared/user session shared/user_7_2_7312408051733431793_v1 grpc read done: success# 0, data# { } 2025-06-25T14:32:29.070530Z node 7 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 2 consumer shared/user session shared/user_7_2_7312408051733431793_v1 grpc read failed 2025-06-25T14:32:29.070558Z node 7 :PQ_READ_PROXY INFO: read_session_actor.cpp:92: session cookie 2 consumer shared/user session shared/user_7_2_7312408051733431793_v1 grpc closed 2025-06-25T14:32:29.070590Z node 7 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 2 consumer shared/user session shared/user_7_2_7312408051733431793_v1 is DEAD 2025-06-25T14:32:29.071898Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2452: [PQ: 72075186224037892] Destroy direct read session shared/user_7_2_7312408051733431793_v1 2025-06-25T14:32:29.071944Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037892] server disconnected, pipe [7:7519894811839658602:2511] destroyed 2025-06-25T14:32:29.071985Z node 8 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: shared/user_7_2_7312408051733431793_v1 2025-06-25T14:32:29.072051Z node 7 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037893][rt3.dc1--topic1] pipe [7:7519894811839658599:2508] disconnected; active server actors: 1 2025-06-25T14:32:29.072074Z node 7 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037893][rt3.dc1--topic1] pipe [7:7519894811839658599:2508] client user disconnected session shared/user_7_2_7312408051733431793_v1 2025-06-25T14:32:29.072602Z :DEBUG: [] MessageGroupId [src-id-test] SessionId [src-id-test|f7712371-462b5829-dc4760c1-35328cac_0] Write session: destroy >> TargetDiscoverer::Basic >> DataShardVolatile::VolatileCommitOnBlobStorageFailure-UseSink [GOOD] >> DataShardVolatile::VolatileTxAbortedOnSplit >> TargetDiscoverer::Negative >> TPQTest::TestReadAndDeleteConsumer [GOOD] >> DataShardVolatile::DistributedWriteThenDropTable [GOOD] >> DataShardVolatile::DistributedWriteThenCopyTable >> TestKinesisHttpProxy::TestPing [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpUniqueIndex::InsertNullInComplexFkDuplicate [GOOD] Test command err: Trying to start YDB, gRPC: 16892, MsgBus: 63534 2025-06-25T14:31:58.572968Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519894680444965533:2223];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:31:58.573183Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001c7b/r3tmp/tmpkGr3uV/pdisk_1.dat 2025-06-25T14:31:59.410129Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:31:59.410211Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:31:59.437464Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:31:59.446697Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:31:59.452707Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519894680444965348:2080] 1750861918502966 != 1750861918502969 TServer::EnableGrpc on GrpcPort 16892, node 1 2025-06-25T14:31:59.568731Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:31:59.737065Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:31:59.737086Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:31:59.737093Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:31:59.737194Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:63534 TClient is connected to server localhost:63534 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:32:00.597762Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:32:00.626393Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:32:00.764996Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:32:01.056697Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:32:01.155210Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:32:03.402419Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894701919803457:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:03.402572Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:03.564898Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519894680444965533:2223];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:32:03.564972Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:32:03.912805Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:03.949926Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:03.988502Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:04.042109Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:04.087518Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:04.178417Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:04.259310Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:04.388872Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894706214771410:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:04.388984Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:04.389373Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894706214771415:2436], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:04.394140Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:32:04.409009Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519894706214771417:2437], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:32:04.519931Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519894706214771469:3418] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:32:05.875407Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... Trying to start YDB, gRPC: 8828, MsgBus: 20911 2025-06-25T14:32:09.261187Z node 2 :METADATA_PRO ... /_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... Trying to start YDB, gRPC: 19245, MsgBus: 62081 2025-06-25T14:32:22.358432Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519894784080961475:2081];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001c7b/r3tmp/tmpssHGTf/pdisk_1.dat 2025-06-25T14:32:22.418108Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:32:22.485576Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:32:22.490437Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519894784080961434:2080] 1750861942335715 != 1750861942335718 TServer::EnableGrpc on GrpcPort 19245, node 3 2025-06-25T14:32:22.503170Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:32:22.503260Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:32:22.504888Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:32:22.548297Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:32:22.548333Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:32:22.548341Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:32:22.548478Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:62081 TClient is connected to server localhost:62081 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:32:23.113723Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:32:23.131938Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:32:23.221590Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:32:23.371589Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:32:23.445975Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:32:23.543317Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:32:27.342486Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519894784080961475:2081];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:32:27.342587Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:32:28.177575Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519894809850766861:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:28.177685Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:28.302524Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:28.418547Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:28.511416Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:28.603258Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:28.695217Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:28.804673Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:28.904811Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:29.004992Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519894814145734842:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:29.005108Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:29.005361Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519894814145734847:2437], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:29.010250Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:32:29.025213Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519894814145734849:2438], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:32:29.079494Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519894814145734900:3428] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:32:30.535610Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... >> TestYmqHttpProxy::TestSendMessageFifoQueue >> TestKinesisHttpProxy::TestRequestBadJson |78.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/controller/ut_target_discoverer/unittest >> TargetDiscoverer::Dirs ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/unittest >> TPQTest::TestReadAndDeleteConsumer [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:107:2057] recipient: [1:105:2137] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:107:2057] recipient: [1:105:2137] Leader for TabletID 72057594037927937 is [1:111:2141] sender: [1:112:2057] recipient: [1:105:2137] 2025-06-25T14:31:28.500866Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:31:28.500938Z node 1 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [1:153:2057] recipient: [1:151:2172] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [1:153:2057] recipient: [1:151:2172] Leader for TabletID 72057594037927938 is [1:157:2176] sender: [1:158:2057] recipient: [1:151:2172] Leader for TabletID 72057594037927937 is [1:111:2141] sender: [1:183:2057] recipient: [1:14:2061] 2025-06-25T14:31:28.518934Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:31:28.541287Z node 1 :PERSQUEUE INFO: pq_impl.cpp:1497: [PQ: 72057594037927937] Config applied version 1 actor [1:181:2194] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 PartitionIds: 2 TopicName: "rt3.dc1--asdfgs--topic" Version: 1 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } Partitions { PartitionId: 2 } ReadRuleGenerations: 1 ReadRuleGenerations: 1 ReadRuleGenerations: 1 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } AllPartitions { PartitionId: 2 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 1 Important: false } Consumers { Name: "test" Generation: 1 Important: false } Consumers { Name: "another-user" Generation: 1 Important: false } 2025-06-25T14:31:28.542354Z node 1 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [1:189:2200] 2025-06-25T14:31:28.544949Z node 1 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [1:189:2200] 2025-06-25T14:31:28.548883Z node 1 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [1:190:2201] 2025-06-25T14:31:28.550839Z node 1 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 2 [1:190:2201] 2025-06-25T14:31:28.554193Z node 1 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 2, State: StateInit] bootstrapping 2 [1:191:2202] 2025-06-25T14:31:28.555990Z node 1 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 2, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 2 generation 2 [1:191:2202] 2025-06-25T14:31:28.565482Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037927937] server connected, pipe [1:211:2215], now have 1 active actors on pipe 2025-06-25T14:31:28.565614Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:348: Handle TEvRequest topic: 'rt3.dc1--asdfgs--topic' requestId: 2025-06-25T14:31:28.565673Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2812: [PQ: 72057594037927937] got client message batch for topic 'rt3.dc1--asdfgs--topic' partition 0 2025-06-25T14:31:28.565945Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2209: [PQ: 72057594037927937] got client message topic: rt3.dc1--asdfgs--topic partition: 0 SourceId: 'sourceid' SeqNo: 1 partNo : 0 messageNo: 0 size 1 offset: -1 2025-06-25T14:31:28.565991Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2209: [PQ: 72057594037927937] got client message topic: rt3.dc1--asdfgs--topic partition: 0 SourceId: 'sourceid' SeqNo: 2 partNo : 0 messageNo: 0 size 1 offset: -1 2025-06-25T14:31:28.566077Z node 1 :PERSQUEUE DEBUG: event_helpers.cpp:40: tablet 72057594037927937 topic 'rt3.dc1--asdfgs--topic' partition 0 error: new GetOwnership request needed for owner 2025-06-25T14:31:28.566147Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:1434: [PQ: 72057594037927937] Handle TEvPQ::TEvError Cookie 1, Error new GetOwnership request needed for owner 2025-06-25T14:31:28.566183Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:402: Answer error topic: 'rt3.dc1--asdfgs--topic' partition: 0 messageNo: 0 requestId: error: new GetOwnership request needed for owner 2025-06-25T14:31:28.566468Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037927937] server connected, pipe [1:213:2217], now have 1 active actors on pipe 2025-06-25T14:31:28.566569Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:348: Handle TEvRequest topic: 'rt3.dc1--asdfgs--topic' requestId: 2025-06-25T14:31:28.566620Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2812: [PQ: 72057594037927937] got client message batch for topic 'rt3.dc1--asdfgs--topic' partition 0 2025-06-25T14:31:28.566716Z node 1 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|2ba7b45b-77d9dd3d-d5f47044-740f6e80_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-25T14:31:28.566841Z node 1 :PERSQUEUE DEBUG: partition_write.cpp:34: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::ReplyOwnerOk. Partition: 0 2025-06-25T14:31:28.566938Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'rt3.dc1--asdfgs--topic' partition: 0 messageNo: 0 requestId: cookie: 0 2025-06-25T14:31:28.567250Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037927937] server connected, pipe [1:215:2219], now have 1 active actors on pipe 2025-06-25T14:31:28.567347Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:348: Handle TEvRequest topic: 'rt3.dc1--asdfgs--topic' requestId: 2025-06-25T14:31:28.567380Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2812: [PQ: 72057594037927937] got client message batch for topic 'rt3.dc1--asdfgs--topic' partition 0 2025-06-25T14:31:28.567419Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2209: [PQ: 72057594037927937] got client message topic: rt3.dc1--asdfgs--topic partition: 0 SourceId: 'sourceid' SeqNo: 1 partNo : 0 messageNo: 0 size 1 offset: -1 2025-06-25T14:31:28.567451Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2209: [PQ: 72057594037927937] got client message topic: rt3.dc1--asdfgs--topic partition: 0 SourceId: 'sourceid' SeqNo: 2 partNo : 0 messageNo: 0 size 1 offset: -1 2025-06-25T14:31:28.567624Z node 1 :PERSQUEUE DEBUG: partition_write.cpp:1364: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'rt3.dc1--asdfgs--topic' partition 0 part blob processing sourceId 'sourceid' seqNo 1 partNo 0 2025-06-25T14:31:28.568540Z node 1 :PERSQUEUE DEBUG: partition_write.cpp:1468: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'rt3.dc1--asdfgs--topic' partition 0 part blob complete sourceId 'sourceid' seqNo 1 partNo 0 FormedBlobsCount 0 NewHead: Offset 0 PartNo 0 PackedSize 72 count 1 nextOffset 1 batches 1 2025-06-25T14:31:28.568641Z node 1 :PERSQUEUE DEBUG: partition_write.cpp:1364: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'rt3.dc1--asdfgs--topic' partition 0 part blob processing sourceId 'sourceid' seqNo 2 partNo 0 2025-06-25T14:31:28.568705Z node 1 :PERSQUEUE DEBUG: partition_write.cpp:1468: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'rt3.dc1--asdfgs--topic' partition 0 part blob complete sourceId 'sourceid' seqNo 2 partNo 0 FormedBlobsCount 0 NewHead: Offset 0 PartNo 0 PackedSize 112 count 2 nextOffset 2 batches 1 2025-06-25T14:31:28.569184Z node 1 :PERSQUEUE DEBUG: partition_write.cpp:1762: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Add new write blob: topic 'rt3.dc1--asdfgs--topic' partition 0 compactOffset 0,2 HeadOffset 0 endOffset 0 curOffset 2 d0000000000_00000000000000000000_00000_0000000002_00000? size 94 WTime 331 2025-06-25T14:31:28.569359Z node 1 :PERSQUEUE DEBUG: read.h:272: CacheProxy. Passthrough write request to KV 2025-06-25T14:31:28.569445Z node 1 :PERSQUEUE DEBUG: read.h:310: CacheProxy. Passthrough blob. Partition 0 offset 0 partNo 0 count 2 size 94 2025-06-25T14:31:28.571839Z node 1 :PERSQUEUE DEBUG: cache_eviction.h:319: Caching head blob in L1. Partition 0 offset 0 count 2 size 94 actorID [1:138:2162] 2025-06-25T14:31:28.571957Z node 1 :PERSQUEUE DEBUG: pq_l2_cache.cpp:120: PQ Cache (L2). Adding blob. Tablet '72057594037927937' partition 0 offset 0 partno 0 count 2 parts 0 suffix '63' size 94 2025-06-25T14:31:28.572043Z node 1 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 18 WriteNewSizeFromSupportivePartitions# 0 2025-06-25T14:31:28.572118Z node 1 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-25T14:31:28.572225Z node 1 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Answering for message sourceid: 'sourceid', Topic: 'rt3.dc1--asdfgs--topic', Partition: 0, SeqNo: 1, partNo: 0, Offset: 0 is stored on disk 2025-06-25T14:31:28.572277Z node 1 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-25T14:31:28.572338Z node 1 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Answering for message sourceid: 'sourceid', Topic: 'rt3.dc1--asdfgs--topic', Partition: 0, SeqNo: 2, partNo: 0, Offset: 1 is stored on disk 2025-06-25T14:31:28.572558Z node 1 :PERSQUEUE DEBUG: partition_read.cpp:882: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'rt3.dc1--asdfgs--topic' partition 0 user user readTimeStamp for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 1 2025-06-25T14:31:28.572613Z node 1 :PERSQUEUE DEBUG: partition_read.cpp:924: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'rt3.dc1--asdfgs--topic' partition 0 user user send read request for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 1 rrg 1 2025-06-25T14:31:28.572661Z node 1 :PERSQUEUE DEBUG: partition_read.cpp:882: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'rt3.dc1--asdfgs--topic' partition 0 user another-user readTimeStamp for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 1 rrg 1 2025-06-25T14:31:28.572693Z node 1 :PERSQUEUE DEBUG: partition_read.cpp:882: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'rt3.dc1--asdfgs--topic' partition 0 user test readTimeStamp for offset 0 initiated queuesize 1 startOffset 0 ReadingTimestamp 1 rrg 1 2025-06-25T14:31:28.572747Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72057594037927937, Partition: 0, State: StateIdle] need more data for compaction. cumulativeSize=94, count=1, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-25T14:31:28.572915Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'rt3.dc1--asdfgs--topic' partition: 0 messageNo: 0 requestId: cookie: 0 2025-06-25T14:31:28.573027Z node 1 :PERSQUEUE DEBUG: partition_read.cpp:839: [PQ: 72057594037927937, Partition: 0, State: StateIdle] read cookie 2 Topic 'rt3.dc1--asdfgs--topic' partition 0 user user offset 0 count 1 size 1024000 endOffset 2 max time lag 0ms effective offset 0 2025-06-25T14:31:28.573276Z node 1 :PERSQUEUE DEBUG: partition_read.cpp:1043: [PQ: 72057594037927937, Partition: 0, State: StateIdle] read cookie 2 added 1 blobs, size 94 count 2 last offset 0, current partition end offset: 2 202 ... opicName: "rt3.dc1--asdfgs--topic" Version: 1002 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } ReadRuleGenerations: 1002 ReadRuleGenerations: 1002 ReadRuleGenerations: 1002 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 1002 Important: false } Consumers { Name: "user1" Generation: 1002 Important: true } Consumers { Name: "user2" Generation: 1002 Important: true } 2025-06-25T14:32:26.894658Z node 38 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [38:189:2200] 2025-06-25T14:32:26.897364Z node 38 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [38:189:2200] 2025-06-25T14:32:26.909998Z node 38 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|28819089-5b47bdce-40ed32f1-d9b43e4c_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-25T14:32:28.351467Z node 38 :PERSQUEUE NOTICE: read.h:371: Have to remove new data from cache. Topic rt3.dc1--asdfgs--topic, tablet id72057594037927937, cookie 0 2025-06-25T14:32:28.666270Z node 38 :PERSQUEUE NOTICE: read.h:371: Have to remove new data from cache. Topic rt3.dc1--asdfgs--topic, tablet id72057594037927937, cookie 0 2025-06-25T14:32:28.725606Z node 38 :PERSQUEUE NOTICE: read.h:371: Have to remove new data from cache. Topic rt3.dc1--asdfgs--topic, tablet id72057594037927937, cookie 0 2025-06-25T14:32:29.046579Z node 38 :PERSQUEUE NOTICE: read.h:371: Have to remove new data from cache. Topic rt3.dc1--asdfgs--topic, tablet id72057594037927937, cookie 0 2025-06-25T14:32:29.272657Z node 38 :PERSQUEUE NOTICE: read.h:371: Have to remove new data from cache. Topic rt3.dc1--asdfgs--topic, tablet id72057594037927937, cookie 0 2025-06-25T14:32:29.353639Z node 38 :PERSQUEUE NOTICE: read.h:371: Have to remove new data from cache. Topic rt3.dc1--asdfgs--topic, tablet id72057594037927937, cookie 0 2025-06-25T14:32:29.621721Z node 38 :PERSQUEUE NOTICE: read.h:371: Have to remove new data from cache. Topic rt3.dc1--asdfgs--topic, tablet id72057594037927937, cookie 0 2025-06-25T14:32:29.707445Z node 38 :PERSQUEUE NOTICE: read.h:371: Have to remove new data from cache. Topic rt3.dc1--asdfgs--topic, tablet id72057594037927937, cookie 0 2025-06-25T14:32:29.932907Z node 38 :PERSQUEUE NOTICE: read.h:371: Have to remove new data from cache. Topic rt3.dc1--asdfgs--topic, tablet id72057594037927937, cookie 0 2025-06-25T14:32:30.197249Z node 38 :PERSQUEUE NOTICE: read.h:371: Have to remove new data from cache. Topic rt3.dc1--asdfgs--topic, tablet id72057594037927937, cookie 0 2025-06-25T14:32:30.249645Z node 38 :PERSQUEUE NOTICE: read.h:371: Have to remove new data from cache. Topic rt3.dc1--asdfgs--topic, tablet id72057594037927937, cookie 0 Leader for TabletID 72057594037927937 is [38:111:2141] sender: [38:395:2057] recipient: [38:103:2136] Leader for TabletID 72057594037927937 is [38:111:2141] sender: [38:398:2057] recipient: [38:14:2061] Leader for TabletID 72057594037927937 is [38:111:2141] sender: [38:399:2057] recipient: [38:397:2375] Leader for TabletID 72057594037927937 is [38:400:2376] sender: [38:401:2057] recipient: [38:397:2375] 2025-06-25T14:32:30.340777Z node 38 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:32:30.340872Z node 38 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-25T14:32:30.341633Z node 38 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [38:449:2417] 2025-06-25T14:32:30.363621Z node 38 :PERSQUEUE INFO: partition_init.cpp:895: [rt3.dc1--asdfgs--topic:0:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-25T14:32:30.363727Z node 38 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 3 [38:449:2417] Leader for TabletID 72057594037927937 is [38:400:2376] sender: [38:468:2057] recipient: [38:14:2061] 2025-06-25T14:32:30.438108Z node 38 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:32:30.446344Z node 38 :PERSQUEUE INFO: pq_impl.cpp:1497: [PQ: 72057594037927937] Config applied version 1003 actor [38:465:2427] txId 42 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 100 MaxSizeInPartition: 104857600 LifetimeSeconds: 172800 ImportantClientId: "user1" ImportantClientId: "user2" LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 Version: 1003 LocalDC: true Topic: "topic" Partitions { PartitionId: 0 } ReadRuleGenerations: 1002 AllPartitions { PartitionId: 0 } Consumers { Name: "user2" Generation: 1002 Important: true } Leader for TabletID 72057594037927937 is [0:0:0] sender: [39:107:2057] recipient: [39:105:2137] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [39:107:2057] recipient: [39:105:2137] Leader for TabletID 72057594037927937 is [39:111:2141] sender: [39:112:2057] recipient: [39:105:2137] 2025-06-25T14:32:31.130688Z node 39 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:32:31.130760Z node 39 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [39:153:2057] recipient: [39:151:2172] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [39:153:2057] recipient: [39:151:2172] Leader for TabletID 72057594037927938 is [39:157:2176] sender: [39:158:2057] recipient: [39:151:2172] Leader for TabletID 72057594037927937 is [39:111:2141] sender: [39:183:2057] recipient: [39:14:2061] 2025-06-25T14:32:31.173246Z node 39 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:32:31.173899Z node 39 :PERSQUEUE INFO: pq_impl.cpp:1497: [PQ: 72057594037927937] Config applied version 1004 actor [39:181:2194] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 100 MaxSizeInPartition: 104857600 LifetimeSeconds: 172800 ImportantClientId: "user1" ImportantClientId: "user2" LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--asdfgs--topic" Version: 1004 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } ReadRuleGenerations: 1004 ReadRuleGenerations: 1004 ReadRuleGenerations: 1004 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 1004 Important: false } Consumers { Name: "user1" Generation: 1004 Important: true } Consumers { Name: "user2" Generation: 1004 Important: true } 2025-06-25T14:32:31.174471Z node 39 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [39:189:2200] 2025-06-25T14:32:31.177172Z node 39 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [39:189:2200] 2025-06-25T14:32:31.189525Z node 39 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|ac3a11da-bc7cc8ac-8278419b-d35123e1_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-25T14:32:32.349397Z node 39 :PERSQUEUE NOTICE: read.h:371: Have to remove new data from cache. Topic rt3.dc1--asdfgs--topic, tablet id72057594037927937, cookie 0 2025-06-25T14:32:32.625112Z node 39 :PERSQUEUE NOTICE: read.h:371: Have to remove new data from cache. Topic rt3.dc1--asdfgs--topic, tablet id72057594037927937, cookie 0 2025-06-25T14:32:32.707485Z node 39 :PERSQUEUE NOTICE: read.h:371: Have to remove new data from cache. Topic rt3.dc1--asdfgs--topic, tablet id72057594037927937, cookie 0 2025-06-25T14:32:32.764279Z node 39 :PERSQUEUE NOTICE: read.h:371: Have to remove new data from cache. Topic rt3.dc1--asdfgs--topic, tablet id72057594037927937, cookie 0 2025-06-25T14:32:32.955344Z node 39 :PERSQUEUE NOTICE: read.h:371: Have to remove new data from cache. Topic rt3.dc1--asdfgs--topic, tablet id72057594037927937, cookie 0 2025-06-25T14:32:33.223203Z node 39 :PERSQUEUE NOTICE: read.h:371: Have to remove new data from cache. Topic rt3.dc1--asdfgs--topic, tablet id72057594037927937, cookie 0 2025-06-25T14:32:33.414703Z node 39 :PERSQUEUE NOTICE: read.h:371: Have to remove new data from cache. Topic rt3.dc1--asdfgs--topic, tablet id72057594037927937, cookie 0 2025-06-25T14:32:33.471179Z node 39 :PERSQUEUE NOTICE: read.h:371: Have to remove new data from cache. Topic rt3.dc1--asdfgs--topic, tablet id72057594037927937, cookie 0 2025-06-25T14:32:33.761690Z node 39 :PERSQUEUE NOTICE: read.h:371: Have to remove new data from cache. Topic rt3.dc1--asdfgs--topic, tablet id72057594037927937, cookie 0 2025-06-25T14:32:33.814898Z node 39 :PERSQUEUE NOTICE: read.h:371: Have to remove new data from cache. Topic rt3.dc1--asdfgs--topic, tablet id72057594037927937, cookie 0 2025-06-25T14:32:34.019536Z node 39 :PERSQUEUE NOTICE: read.h:371: Have to remove new data from cache. Topic rt3.dc1--asdfgs--topic, tablet id72057594037927937, cookie 0 Leader for TabletID 72057594037927937 is [39:111:2141] sender: [39:393:2057] recipient: [39:103:2136] Leader for TabletID 72057594037927937 is [39:111:2141] sender: [39:396:2057] recipient: [39:395:2373] Leader for TabletID 72057594037927937 is [39:111:2141] sender: [39:397:2057] recipient: [39:14:2061] Leader for TabletID 72057594037927937 is [39:398:2374] sender: [39:399:2057] recipient: [39:395:2373] 2025-06-25T14:32:34.098644Z node 39 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:32:34.098704Z node 39 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-25T14:32:34.099178Z node 39 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [39:447:2415] 2025-06-25T14:32:34.107571Z node 39 :PERSQUEUE INFO: partition_init.cpp:895: [rt3.dc1--asdfgs--topic:0:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-25T14:32:34.107639Z node 39 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 3 [39:447:2415] Leader for TabletID 72057594037927937 is [39:398:2374] sender: [39:466:2057] recipient: [39:14:2061] 2025-06-25T14:32:34.146574Z node 39 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:32:34.149940Z node 39 :PERSQUEUE INFO: pq_impl.cpp:1497: [PQ: 72057594037927937] Config applied version 1005 actor [39:463:2425] txId 42 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 100 MaxSizeInPartition: 104857600 LifetimeSeconds: 172800 ImportantClientId: "user1" ImportantClientId: "user2" LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 Version: 1005 LocalDC: true Topic: "topic" Partitions { PartitionId: 0 } ReadRuleGenerations: 1004 AllPartitions { PartitionId: 0 } Consumers { Name: "user2" Generation: 1004 Important: true } >> TopicAutoscaling::PartitionSplit_ManySession_AutoscaleAwareSDK [GOOD] >> TopicAutoscaling::PartitionSplit_AutosplitByLoad >> TopicAutoscaling::ReadingAfterSplitTest_PreferedPartition_AutoscaleAwareSDK [GOOD] >> TopicAutoscaling::ReadFromTimestamp_BeforeAutoscaleAwareSDK >> KqpDocumentApi::RestrictWrite [GOOD] >> KqpDocumentApi::AllowRead >> YdbTableSplit::SplitByLoadWithUpdates [GOOD] >> TableCreation::ConcurrentTableCreation >> KqpQueryService::ExecStats [GOOD] >> KqpQueryService::ExecStatsPlan >> TableCreation::ConcurrentTableCreationWithDifferentVersions >> KqpQueryService::ExecuteCollectMeta [GOOD] >> KqpQueryService::ExecuteQuery >> TableCreation::MultipleTablesCreation >> KqpProxy::CalcPeerStats [GOOD] >> KqpProxy::CreatesScriptExecutionsTable >> KqpProxy::NoLocalSessionExecution ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/table_split_ut/unittest >> YdbTableSplit::SplitByLoadWithUpdates [GOOD] Test command err: 2025-06-25T14:32:07.084154Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519894719875601987:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:32:07.084209Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0013ab/r3tmp/tmpWlHeIK/pdisk_1.dat 2025-06-25T14:32:07.674907Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:32:07.681447Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:32:07.681577Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:32:07.709184Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:32:07.710797Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 21125, node 1 2025-06-25T14:32:07.792920Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:32:07.792941Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:32:07.792948Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:32:07.793052Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:32:08.115793Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:8478 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:32:08.402520Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... TClient is connected to server localhost:8478 2025-06-25T14:32:11.946003Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894737055472184:2299], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:11.946100Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:12.084052Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519894719875601987:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:32:12.084134Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:32:12.416377Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:12.733300Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894741350439673:2313], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:12.733386Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:12.751795Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:171) TClient::Ls request: /Root/Foo TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Foo" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710658 CreateStep: 1750861932631 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Foo" Columns { Name: "NameHash" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Name" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "Versio... (TRUNCATED) Table has 1 shards TClient::Ls request: /Root/Foo TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Foo" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710658 CreateStep: 1750861932631 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Foo" Columns { Name: "NameHash" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Name" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "Versio... (TRUNCATED) 2025-06-25T14:32:13.273953Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894745645407098:2346], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:13.274163Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:13.284437Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894745645407111:2355], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:13.289835Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894745645407112:2356], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:13.289935Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894745645407110:2354], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:13.309217Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894745645407147:2362], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:13.309266Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894745645407149:2364], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:13.309304Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:13.318966Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_mkdir.cpp:115: TMkDir Propose, path: /Root/.metadata, operationId: 281474976710661:0, at schemeshard: 72057594046644480 2025-06-25T14:32:13.319137Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 281474976710661:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-25T14:32:13.319156Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_mkdir.cpp:115: TMkDir Propose, path: /Root/.metadata/workload_manager, operationId: 281474976710661:1, at schemeshard: 72057594046644480 2025-06-25T14:32:13.319227Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 281474976710661:2, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-25T14:32:13.319242Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_mkdir.cpp:115: TMkDir Propose, path: /Root/.metadata/workload_manager/pools, operationId: 281474976710661:2, at schemeshard: 72057594046644480 2025-06-25T14:32:13.319307Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: ... 9. Ctx: { TraceId: 01jykr2e703vtqe9t0sbmbzgpn, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTgwNTg1ZS1iNzA3NTg0Mi1jMGMwNWNmZi1hYWI5ZmIyMA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:32:33.283356Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714860. Ctx: { TraceId: 01jykr2e797n0h0wejkjhtws1n, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YzBiNjRiOGEtZDdiZjU4ODAtZGNiZGY3ODEtZWQwZTE0NGQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:32:33.289972Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714861. Ctx: { TraceId: 01jykr2e7w96tcgw9nfq0znmwh, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTUxMjJmYjEtYmRhYmJjOTctZDNjNThkYjgtMjI1OTQ0NGQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:32:33.291774Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714864. Ctx: { TraceId: 01jykr2e86c9fkf5a0qrj9rxbk, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NTJhZWQxMDItZTZiNTZjYzMtZmUwOTNiMGUtNmRiMTAzZQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:32:33.300901Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714862. Ctx: { TraceId: 01jykr2e81bh5ezd0n0vzk57nk, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NTA4YjdhNjItNDE3MGRmODctOTBhODA5MjYtZjdjNjMwMzY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:32:33.302744Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714863. Ctx: { TraceId: 01jykr2e81frc79gqs9qz3k4e8, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjVlYmMyY2QtYjFkMGFjNDMtYWU3YTg4ZjQtOWZmNjhmNzc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:32:33.316766Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714867. Ctx: { TraceId: 01jykr2e8bamr1tyvmhdmv76d8, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NmI4NGU5OGUtNjhkZDk2MzEtMjIyNjEwZDQtYWNkMWVlY2U=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:32:33.323905Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714865. Ctx: { TraceId: 01jykr2e8ab59vmmprmk216be1, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YzVlNjQxNmYtYzY1ODU0ZGYtMzE2OTEzZi05NGZkZTViNw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:32:33.325359Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714866. Ctx: { TraceId: 01jykr2e8a9x2zrchakx2xa6zg, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODNjYTlkNzgtZmRkODQ5Y2QtOGEzNDNjZi02YTU2ZmZkMg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:32:33.331256Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714868. Ctx: { TraceId: 01jykr2e8ge4jx0pn8mq8kxxkg, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NDE1OThhOGMtZTUzOGY0NjEtYjk4NGFjNmItNDEwN2FjODg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:32:33.343048Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714869. Ctx: { TraceId: 01jykr2e9b5ekn36et5mqqsqdn, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTgwNTg1ZS1iNzA3NTg0Mi1jMGMwNWNmZi1hYWI5ZmIyMA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:32:33.344504Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714870. Ctx: { TraceId: 01jykr2e9b7qj4d4pgrzpbpst8, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YzBiNjRiOGEtZDdiZjU4ODAtZGNiZGY3ODEtZWQwZTE0NGQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:32:33.345744Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714871. Ctx: { TraceId: 01jykr2e9bar7w6rc2d9e6gfvw, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NTJhZWQxMDItZTZiNTZjYzMtZmUwOTNiMGUtNmRiMTAzZQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:32:33.346882Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714872. Ctx: { TraceId: 01jykr2e9bdt6b37j3wmr11twa, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTUxMjJmYjEtYmRhYmJjOTctZDNjNThkYjgtMjI1OTQ0NGQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:32:33.354409Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714873. Ctx: { TraceId: 01jykr2ea35yk6h3xxypb1rrxj, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NmI4NGU5OGUtNjhkZDk2MzEtMjIyNjEwZDQtYWNkMWVlY2U=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:32:33.355706Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714874. Ctx: { TraceId: 01jykr2ea336k6t9q4ck69bxax, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjVlYmMyY2QtYjFkMGFjNDMtYWU3YTg4ZjQtOWZmNjhmNzc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:32:33.357407Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714875. Ctx: { TraceId: 01jykr2ea6f2qkd3y937jxnt6g, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NTA4YjdhNjItNDE3MGRmODctOTBhODA5MjYtZjdjNjMwMzY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root TClient::Ls request: /Root/Foo 2025-06-25T14:32:33.372852Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714876. Ctx: { TraceId: 01jykr2eac2bz778y2e7rmg4nx, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODNjYTlkNzgtZmRkODQ5Y2QtOGEzNDNjZi02YTU2ZmZkMg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Foo" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710658 CreateStep: 1750861932631 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 2 } ChildrenExist: false } Table { Name: "Foo" Columns { Name: "NameHash" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Name" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "Versio... (TRUNCATED) 2025-06-25T14:32:33.393185Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714883. Ctx: { TraceId: 01jykr2eb068wt2vsn2ptdqpna, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NDE1OThhOGMtZTUzOGY0NjEtYjk4NGFjNmItNDEwN2FjODg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:32:33.393185Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714880. Ctx: { TraceId: 01jykr2eawav01y5ak88a2pky9, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTUxMjJmYjEtYmRhYmJjOTctZDNjNThkYjgtMjI1OTQ0NGQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:32:33.394606Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714882. Ctx: { TraceId: 01jykr2eb0dw478syresh9qebx, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NmI4NGU5OGUtNjhkZDk2MzEtMjIyNjEwZDQtYWNkMWVlY2U=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:32:33.395390Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714881. Ctx: { TraceId: 01jykr2eb0csekk380am4j22ea, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NTJhZWQxMDItZTZiNTZjYzMtZmUwOTNiMGUtNmRiMTAzZQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:32:33.395807Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714877. Ctx: { TraceId: 01jykr2eam1s1kvc98y9jzychk, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YzVlNjQxNmYtYzY1ODU0ZGYtMzE2OTEzZi05NGZkZTViNw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:32:33.396709Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714879. Ctx: { TraceId: 01jykr2eav3cahj7dgp30as4b0, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTgwNTg1ZS1iNzA3NTg0Mi1jMGMwNWNmZi1hYWI5ZmIyMA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:32:33.396982Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714878. Ctx: { TraceId: 01jykr2eav6v4rs034dtyme9zc, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YzBiNjRiOGEtZDdiZjU4ODAtZGNiZGY3ODEtZWQwZTE0NGQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:32:33.399725Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714884. Ctx: { TraceId: 01jykr2ebc8dwjjns70b2j18r7, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjVlYmMyY2QtYjFkMGFjNDMtYWU3YTg4ZjQtOWZmNjhmNzc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:32:33.399825Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714885. Ctx: { TraceId: 01jykr2ebccfw63wns54wg5zpj, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NTA4YjdhNjItNDE3MGRmODctOTBhODA5MjYtZjdjNjMwMzY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:32:33.401102Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714886. Ctx: { TraceId: 01jykr2ebm07050a2370dw3q0b, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODNjYTlkNzgtZmRkODQ5Y2QtOGEzNDNjZi02YTU2ZmZkMg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root TClient::Ls request: /Root/Foo TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Foo" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710658 CreateStep: 1750861932631 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 2 } ChildrenExist: false } Table { Name: "Foo" Columns { Name: "NameHash" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Name" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "Versio... (TRUNCATED) Table has 2 shards >> TopicAutoscaling::PartitionSplit_ReadNotEmptyPartitions_AutoscaleAwareSDK [GOOD] >> TopicAutoscaling::ReBalancingAfterSplit_sessionsWithPartition >> KqpProxy::PassErrroViaSessionActor >> TargetDiscoverer::InvalidCredentials [GOOD] >> KikimrIcGateway::TestLoadDataSourceProperties [GOOD] |78.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/public/sdk/cpp/src/client/federated_topic/ut/ydb-public-sdk-cpp-src-client-federated_topic-ut |78.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/public/sdk/cpp/src/client/federated_topic/ut/ydb-public-sdk-cpp-src-client-federated_topic-ut |78.6%| [LD] {RESULT} $(B)/ydb/public/sdk/cpp/src/client/federated_topic/ut/ydb-public-sdk-cpp-src-client-federated_topic-ut >> TargetDiscoverer::Basic [GOOD] >> YdbTableSplit::SplitByLoadWithDeletes [GOOD] >> CommitOffset::DistributedTxCommit_CheckSessionResetAfterCommit [GOOD] >> CommitOffset::DistributedTxCommit_CheckOffsetCommitForDifferentCases >> KqpProxy::InvalidSessionID >> KqpQueryService::ShowCreateTableOnView [GOOD] >> KqpQueryService::ShowCreateView >> TargetDiscoverer::Negative [GOOD] >> TestKinesisHttpProxy::TestRequestWithWrongRegion [GOOD] >> TargetDiscoverer::SystemObjects [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/controller/ut_target_discoverer/unittest >> TargetDiscoverer::Basic [GOOD] Test command err: 2025-06-25T14:32:34.341028Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519894833606036885:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:32:34.379402Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000ca1/r3tmp/tmpmPgtZB/pdisk_1.dat 2025-06-25T14:32:34.818402Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:32:34.818493Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:32:34.823469Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:32:34.831407Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:32:34.832936Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519894833606036856:2080] 1750861954330477 != 1750861954330480 TClient is connected to server localhost:3923 TServer::EnableGrpc on GrpcPort 26996, node 1 2025-06-25T14:32:35.223645Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:32:35.223669Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:32:35.223677Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:32:35.223818Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:32:35.378593Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:3923 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:32:36.065850Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:32:36.089186Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:32:36.097649Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:32:36.373345Z node 1 :REPLICATION_CONTROLLER TRACE: target_discoverer.cpp:27: [TargetDiscoverer][rid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribePathResponse { Result: { name: Root, owner: root@builtin, type: Directory, size_bytes: 0, created_at: { plan_step: 1750861956130, tx_id: 1 } } } 2025-06-25T14:32:36.373371Z node 1 :REPLICATION_CONTROLLER DEBUG: target_discoverer.cpp:42: [TargetDiscoverer][rid 1] Describe path succeeded: path# /Root 2025-06-25T14:32:36.423139Z node 1 :REPLICATION_CONTROLLER TRACE: target_discoverer.cpp:247: [TargetDiscoverer][rid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvListDirectoryResponse { Result: { children [{ name: Table, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750861956214, tx_id: 281474976710658 } }, { name: .sys, owner: , type: Directory, size_bytes: 0, created_at: { plan_step: 0, tx_id: 0 } }] } } 2025-06-25T14:32:36.423164Z node 1 :REPLICATION_CONTROLLER DEBUG: target_discoverer.cpp:260: [TargetDiscoverer][rid 1] Listing succeeded: path# /Root 2025-06-25T14:32:38.416733Z node 1 :REPLICATION_CONTROLLER TRACE: target_discoverer.cpp:98: [TargetDiscoverer][rid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribeTableResponse { Result: { name: Table, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750861956214, tx_id: 281474976710658 } } } 2025-06-25T14:32:38.416760Z node 1 :REPLICATION_CONTROLLER DEBUG: target_discoverer.cpp:113: [TargetDiscoverer][rid 1] Describe table succeeded: path# /Root/Table 2025-06-25T14:32:38.416801Z node 1 :REPLICATION_CONTROLLER INFO: target_discoverer.cpp:120: [TargetDiscoverer][rid 1] Add target: srcPath# /Root/Table, dstPath# /Root/Replicated/Table, kind# Table ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/controller/ut_target_discoverer/unittest >> TargetDiscoverer::InvalidCredentials [GOOD] Test command err: 2025-06-25T14:32:33.762006Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519894830322036607:2240];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:32:33.762220Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000caa/r3tmp/tmpny95M0/pdisk_1.dat 2025-06-25T14:32:34.475945Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:32:34.480441Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519894830322036387:2080] 1750861953657488 != 1750861953657491 2025-06-25T14:32:34.501797Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:32:34.501890Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:32:34.527819Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:32:34.660190Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:19999 TServer::EnableGrpc on GrpcPort 4751, node 1 2025-06-25T14:32:34.893338Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:32:34.893370Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:32:34.893381Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:32:34.893516Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19999 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:32:35.599732Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:32:35.643168Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:32:36.023114Z node 1 :REPLICATION_CONTROLLER TRACE: target_discoverer.cpp:27: [TargetDiscoverer][rid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribePathResponse { Result: { status: CLIENT_UNAUTHENTICATED, issues: {
: Error: Can't get Authentication info from CredentialsProvider. ydb/public/sdk/cpp/src/client/types/credentials/login/login.cpp:217: Cannot find user: user } } } 2025-06-25T14:32:36.023174Z node 1 :REPLICATION_CONTROLLER ERROR: target_discoverer.cpp:78: [TargetDiscoverer][rid 1] Describe path failed: path# /Root, status# CLIENT_UNAUTHENTICATED, issues# {
: Error: Can't get Authentication info from CredentialsProvider. ydb/public/sdk/cpp/src/client/types/credentials/login/login.cpp:217: Cannot find user: user } |78.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/health_check/ut/ydb-core-health_check-ut |78.7%| [LD] {RESULT} $(B)/ydb/core/health_check/ut/ydb-core-health_check-ut |78.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/health_check/ut/ydb-core-health_check-ut ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/provider/ut/unittest >> KikimrIcGateway::TestLoadDataSourceProperties [GOOD] Test command err: Trying to start YDB, gRPC: 6187, MsgBus: 3530 2025-06-25T14:31:40.488579Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519894604546657383:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:31:40.492164Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0013a2/r3tmp/tmpFNR9Uh/pdisk_1.dat 2025-06-25T14:31:41.031489Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519894604546657356:2080] 1750861900485150 != 1750861900485153 2025-06-25T14:31:41.040219Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:31:41.051647Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:31:41.051775Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:31:41.057173Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 6187, node 1 2025-06-25T14:31:41.252850Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:31:41.252875Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:31:41.252880Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:31:41.252992Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3530 2025-06-25T14:31:41.512446Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:3530 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:31:41.964669Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:31:41.992130Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:31:42.010894Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:31:42.175850Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:31:42.337033Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:31:42.409784Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:31:44.186682Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894621726528166:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:31:44.186820Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:31:44.739201Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:31:44.805090Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:31:44.877947Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:31:44.983688Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:31:45.028927Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:31:45.085671Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:31:45.129173Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:31:45.205764Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894626021496124:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:31:45.205842Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:31:45.206021Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894626021496129:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:31:45.209309Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:31:45.219476Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519894626021496131:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:31:45.285092Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519894626021496182:3418] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:31:45.488428Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519894604546657383:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:31:45.488508Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:31:46.528642Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work ... undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:15.214171Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:32:15.397970Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:32:15.495286Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:32:18.272443Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519894744920188962:2175];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:32:18.272544Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:32:19.268477Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519894770689994258:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:19.268601Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:19.556077Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:19.612587Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:19.691255Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:19.785518Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:19.886826Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:19.941661Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:20.060304Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:20.193346Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519894774984962223:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:20.193447Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:20.194034Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519894774984962228:2437], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:20.200333Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:32:20.241659Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519894774984962230:2438], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:32:20.328338Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519894774984962285:3429] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:32:22.448015Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:23.251929Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710677:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:32:24.420010Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710683:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:25.811599Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710689:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:27.350089Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710698:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:32:28.656911Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7382: Cannot get console configs 2025-06-25T14:32:28.656939Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:32:29.223888Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710703:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:32:30.846683Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976715758:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:30.988215Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976715759:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:383) 2025-06-25T14:32:34.374823Z node 3 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jykr23m35hbwran30cvswb38", SessionId: ydb://session/3?node_id=3&id=NTU0OWE3YzYtMWFkZDY1NmItNmNmMjY3OTUtMzE2M2ZkMzI=, Slow query, duration: 11.967887s, status: SUCCESS, user: UNAUTHENTICATED, results: 0b, text: "CREATE OBJECT myPasswordSecretId (TYPE SECRET) WITH value = `pswd`;", parameters: 0b 2025-06-25T14:32:37.540343Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976710741:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp:267) >> TestKinesisHttpProxy::TestRequestWithIAM ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/table_split_ut/unittest >> YdbTableSplit::SplitByLoadWithDeletes [GOOD] Test command err: 2025-06-25T14:32:08.969364Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519894724677176864:2237];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:32:08.969558Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00138e/r3tmp/tmp45EngP/pdisk_1.dat 2025-06-25T14:32:09.755457Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:32:09.810530Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:32:09.810637Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:32:09.816869Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:32:09.948519Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TServer::EnableGrpc on GrpcPort 30034, node 1 2025-06-25T14:32:10.402380Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:32:10.402402Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:32:10.402414Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:32:10.402541Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4376 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:32:11.490277Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... TClient is connected to server localhost:4376 2025-06-25T14:32:13.966899Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519894724677176864:2237];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:32:13.966960Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:32:15.010462Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894754741948819:2301], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:15.010591Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:15.271350Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:15.596233Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894754741949004:2315], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:15.596293Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:15.623314Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:171) TClient::Ls request: /Root/Foo TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Foo" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710658 CreateStep: 1750861935557 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Foo" Columns { Name: "NameHash" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Name" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "Versio... (TRUNCATED) Table has 1 shards TClient::Ls request: /Root/Foo 2025-06-25T14:32:15.832857Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894754741949094:2341], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:15.832961Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Foo" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710658 CreateStep: 1750861935557 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Foo" Columns { Name: "NameHash" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Name" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "Versio... (TRUNCATED) 2025-06-25T14:32:15.840045Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894754741949106:2350], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:15.840104Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894754741949107:2351], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:15.844887Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894754741949115:2353], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:15.844968Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894754741949119:2357], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:15.848674Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:15.857512Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894754741949151:2364], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:15.858801Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894754741949153:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:15.861944Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_mkdir.cpp:115: TMkDir Propose, path: /Root/.metadata, operationId: 281474976710660:0, at schemeshard: 72057594046644480 2025-06-25T14:32:15.862136Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 281474976710660:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-25T14:32:15.862158Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_mkdir.cpp:115: TMkDir Propose, path: /Root/.metadata/workload_manager, operationId: 281474976710660:1, at schemeshard: 72057594046644480 2025-06-25T14:32:15.862852Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 281474976710660:2, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-25T14:32:15.862905Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_mkdir.cpp:11 ... 4. Ctx: { TraceId: 01jykr2gqw8129tbptnrjarvfa, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTgzYmNjYjItYjE1ZmJjYzItNzBkZjllMDktM2M3NDkxMmE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:32:35.859675Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715395. Ctx: { TraceId: 01jykr2gqw579aaqdkkfttg50d, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjNlODE2NzAtYWQ3NTVkMDYtZmZmMmNjYzAtYTMwMWU0ZDg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:32:35.879163Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715396. Ctx: { TraceId: 01jykr2gr045em5xfrr6eq74x9, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YzNlOTk4NTgtNTJkZmM1ZTktOTBlMmZiNGMtN2Y4Y2M1ZTU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:32:35.890079Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715397. Ctx: { TraceId: 01jykr2gqgazd2z8v35dc1jw62, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YmEzMWMzMWUtNjEzNGFiZjAtOTVjNWY0YjQtY2ZmYjhhNzY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:32:35.918671Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715398. Ctx: { TraceId: 01jykr2grm395f632hbwhewpnt, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NmZiODc2MDItNzg2NjA3NjMtOGQ0ZTAxZjktZDZlZjBmZjk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:32:35.921063Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715399. Ctx: { TraceId: 01jykr2gsm2eepcp5ensa1ayaw, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTUwNjk0ZGQtYmMwYjI4OGItZmU0YWU0YTUtNmIyOGQ2MTk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:32:35.925201Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715400. Ctx: { TraceId: 01jykr2gt597s94ytyrqyjn03w, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YmM5YTY1YTYtYzg2YWNhZjktOTg0ZDhkZjItOGY2YWFjZDY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:32:35.931072Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715401. Ctx: { TraceId: 01jykr2gtn2qc8wg0q1sbqekjb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MWIzYzEzYmItNjllMDY4MzAtY2JiOWI3MWItODkzOGE2ZmY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:32:35.932676Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715402. Ctx: { TraceId: 01jykr2gtnd6sgmnxmng0ec0fj, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OGE2MDUzZDEtMTk2OTI0M2EtNzdjZTRmMWMtNjA3NDQ4NzA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:32:35.945041Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715404. Ctx: { TraceId: 01jykr2gv1c58p4s6r6xgx4eyz, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjNlODE2NzAtYWQ3NTVkMDYtZmZmMmNjYzAtYTMwMWU0ZDg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:32:35.946818Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715405. Ctx: { TraceId: 01jykr2gtxcy715hrs3gysje91, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTgzYmNjYjItYjE1ZmJjYzItNzBkZjllMDktM2M3NDkxMmE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:32:35.949288Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715403. Ctx: { TraceId: 01jykr2gtxbrw4tp8pe0a8tft9, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MTdlMjk5NjUtZGM0MWI2MzMtOGRmNjQ2ZjctMjkzMmI2NDk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:32:35.955656Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715406. Ctx: { TraceId: 01jykr2gvc1msfr7834m3kgdrf, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YzNlOTk4NTgtNTJkZmM1ZTktOTBlMmZiNGMtN2Y4Y2M1ZTU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root TClient::Ls request: /Root/Foo 2025-06-25T14:32:35.960552Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715407. Ctx: { TraceId: 01jykr2gvg40sc8wtksz7zf4cd, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YmM5YTY1YTYtYzg2YWNhZjktOTg0ZDhkZjItOGY2YWFjZDY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:32:35.965880Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715408. Ctx: { TraceId: 01jykr2gvg635a6kevkrphkj2p, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTUwNjk0ZGQtYmMwYjI4OGItZmU0YWU0YTUtNmIyOGQ2MTk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:32:35.967566Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715409. Ctx: { TraceId: 01jykr2gvf19dxft8c6ey974sg, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YmEzMWMzMWUtNjEzNGFiZjAtOTVjNWY0YjQtY2ZmYjhhNzY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:32:35.968932Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715410. Ctx: { TraceId: 01jykr2gvf7becqbev8tbpwsfp, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NmZiODc2MDItNzg2NjA3NjMtOGQ0ZTAxZjktZDZlZjBmZjk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:32:35.972923Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715411. Ctx: { TraceId: 01jykr2gvg298dneavrgyy806c, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MWIzYzEzYmItNjllMDY4MzAtY2JiOWI3MWItODkzOGE2ZmY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Foo" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710658 CreateStep: 1750861935557 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 2 } ChildrenExist: false } Table { Name: "Foo" Columns { Name: "NameHash" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Name" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "Versio... (TRUNCATED) 2025-06-25T14:32:36.013949Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715412. Ctx: { TraceId: 01jykr2gw85746fk7gykk3995n, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTgzYmNjYjItYjE1ZmJjYzItNzBkZjllMDktM2M3NDkxMmE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:32:36.013958Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715413. Ctx: { TraceId: 01jykr2gvw3304s2z523gmxhc7, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OGE2MDUzZDEtMTk2OTI0M2EtNzdjZTRmMWMtNjA3NDQ4NzA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:32:36.015781Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715415. Ctx: { TraceId: 01jykr2gwcfh4zywjq53q3g82s, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MTdlMjk5NjUtZGM0MWI2MzMtOGRmNjQ2ZjctMjkzMmI2NDk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:32:36.015991Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715414. Ctx: { TraceId: 01jykr2gwcfxfp9b2gj2avygan, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjNlODE2NzAtYWQ3NTVkMDYtZmZmMmNjYzAtYTMwMWU0ZDg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:32:36.022345Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715417. Ctx: { TraceId: 01jykr2gx04zkb3g6q318haexa, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YzNlOTk4NTgtNTJkZmM1ZTktOTBlMmZiNGMtN2Y4Y2M1ZTU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:32:36.023703Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715416. Ctx: { TraceId: 01jykr2gx2c0aafryxxg43w631, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YmM5YTY1YTYtYzg2YWNhZjktOTg0ZDhkZjItOGY2YWFjZDY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:32:36.027365Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715418. Ctx: { TraceId: 01jykr2gxde5r5324zp08q5pmk, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NmZiODc2MDItNzg2NjA3NjMtOGQ0ZTAxZjktZDZlZjBmZjk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:32:36.027972Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715419. Ctx: { TraceId: 01jykr2gxda8w4q3rjd2zx60fp, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTUwNjk0ZGQtYmMwYjI4OGItZmU0YWU0YTUtNmIyOGQ2MTk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:32:36.028933Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715420. Ctx: { TraceId: 01jykr2gxd6x1race1fc9a3mx9, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YmEzMWMzMWUtNjEzNGFiZjAtOTVjNWY0YjQtY2ZmYjhhNzY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:32:36.029437Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715421. Ctx: { TraceId: 01jykr2gxd99b9y8t8kfpe995h, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MWIzYzEzYmItNjllMDY4MzAtY2JiOWI3MWItODkzOGE2ZmY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root TClient::Ls request: /Root/Foo TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Foo" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710658 CreateStep: 1750861935557 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 2 } ChildrenExist: false } Table { Name: "Foo" Columns { Name: "NameHash" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Name" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "Versio... (TRUNCATED) Table has 2 shards >> TargetDiscoverer::IndexedTable [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/controller/ut_target_discoverer/unittest >> TargetDiscoverer::Negative [GOOD] Test command err: 2025-06-25T14:32:34.542028Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519894835189810038:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:32:34.547467Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000ca6/r3tmp/tmp0q9O7U/pdisk_1.dat 2025-06-25T14:32:35.384163Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519894835189809998:2080] 1750861954516832 != 1750861954516835 2025-06-25T14:32:35.403469Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:32:35.410111Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:32:35.410191Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:32:35.421880Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:32:35.549888Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:3628 TServer::EnableGrpc on GrpcPort 63080, node 1 2025-06-25T14:32:36.120076Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:32:36.120100Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:32:36.120110Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:32:36.120240Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3628 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:32:37.009702Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:32:37.107105Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:32:37.237736Z node 1 :REPLICATION_CONTROLLER TRACE: target_discoverer.cpp:27: [TargetDiscoverer][rid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribePathResponse { Result: { status: SCHEME_ERROR, issues: {
: Error: Path not found } } } 2025-06-25T14:32:37.237788Z node 1 :REPLICATION_CONTROLLER ERROR: target_discoverer.cpp:78: [TargetDiscoverer][rid 1] Describe path failed: path# /Root/Table, status# SCHEME_ERROR, issues# {
: Error: Path not found } ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/controller/ut_target_discoverer/unittest >> TargetDiscoverer::SystemObjects [GOOD] Test command err: 2025-06-25T14:32:33.700739Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519894830238137704:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:32:33.700789Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000caf/r3tmp/tmpZNH3P4/pdisk_1.dat 2025-06-25T14:32:34.368680Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:32:34.368769Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:32:34.385936Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:32:34.396257Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:32:34.579949Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:2571 TServer::EnableGrpc on GrpcPort 1104, node 1 2025-06-25T14:32:35.006522Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:32:35.006542Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:32:35.006549Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:32:35.006651Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2571 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:32:35.658943Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:32:35.710783Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:32:35.714151Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:32:35.874427Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715659, at schemeshard: 72057594046644480 2025-06-25T14:32:35.886797Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:32:36.086799Z node 1 :REPLICATION_CONTROLLER TRACE: target_discoverer.cpp:27: [TargetDiscoverer][rid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribePathResponse { Result: { name: Root, owner: root@builtin, type: Directory, size_bytes: 0, created_at: { plan_step: 1750861955731, tx_id: 1 } } } 2025-06-25T14:32:36.086824Z node 1 :REPLICATION_CONTROLLER DEBUG: target_discoverer.cpp:42: [TargetDiscoverer][rid 1] Describe path succeeded: path# /Root 2025-06-25T14:32:36.135522Z node 1 :REPLICATION_CONTROLLER TRACE: target_discoverer.cpp:247: [TargetDiscoverer][rid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvListDirectoryResponse { Result: { children [{ name: Table, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750861955829, tx_id: 281474976715658 } }, { name: export-100500, owner: root@builtin, type: Directory, size_bytes: 0, created_at: { plan_step: 1750861955920, tx_id: 281474976715659 } }, { name: .sys, owner: , type: Directory, size_bytes: 0, created_at: { plan_step: 0, tx_id: 0 } }] } } 2025-06-25T14:32:36.135550Z node 1 :REPLICATION_CONTROLLER DEBUG: target_discoverer.cpp:260: [TargetDiscoverer][rid 1] Listing succeeded: path# /Root 2025-06-25T14:32:38.701716Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519894830238137704:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:32:38.701813Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:32:39.325292Z node 1 :REPLICATION_CONTROLLER TRACE: target_discoverer.cpp:98: [TargetDiscoverer][rid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribeTableResponse { Result: { name: Table, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750861955829, tx_id: 281474976715658 } } } 2025-06-25T14:32:39.325325Z node 1 :REPLICATION_CONTROLLER DEBUG: target_discoverer.cpp:113: [TargetDiscoverer][rid 1] Describe table succeeded: path# /Root/Table 2025-06-25T14:32:39.325384Z node 1 :REPLICATION_CONTROLLER INFO: target_discoverer.cpp:120: [TargetDiscoverer][rid 1] Add target: srcPath# /Root/Table, dstPath# /Root/Replicated/Table, kind# Table >> TableCreation::ConcurrentTableCreation [GOOD] >> TableCreation::ConcurrentMultipleTablesCreation >> ScriptExecutionsTest::RunCheckLeaseStatus >> TestKinesisHttpProxy::PutRecordsWithLongExplicitHashKey [GOOD] >> TestKinesisHttpProxy::TestRequestBadJson [GOOD] >> TPQTest::TestMaxTimeLagRewind [GOOD] >> TestKinesisHttpProxy::PutRecordsWithIncorrectHashKey >> TargetDiscoverer::Dirs [GOOD] >> TestYmqHttpProxy::TestSendMessageFifoQueue [GOOD] >> TSubDomainTest::Boot >> TSubDomainTest::CreateTableInsideAndForceDeleteSubDomain ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/controller/ut_target_discoverer/unittest >> TargetDiscoverer::IndexedTable [GOOD] Test command err: 2025-06-25T14:32:34.248928Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519894833512653779:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:32:34.248979Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000cb7/r3tmp/tmpqNOOJw/pdisk_1.dat 2025-06-25T14:32:35.040275Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:32:35.042603Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:32:35.042690Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:32:35.059359Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:32:35.260659Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:5013 TServer::EnableGrpc on GrpcPort 26523, node 1 2025-06-25T14:32:35.594763Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:32:35.594797Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:32:35.594808Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:32:35.594943Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5013 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:32:36.460221Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:32:36.497197Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:32:36.505258Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:32:37.490651Z node 1 :REPLICATION_CONTROLLER TRACE: target_discoverer.cpp:27: [TargetDiscoverer][rid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribePathResponse { Result: { name: Root, owner: root@builtin, type: Directory, size_bytes: 0, created_at: { plan_step: 1750861956529, tx_id: 1 } } } 2025-06-25T14:32:37.490680Z node 1 :REPLICATION_CONTROLLER DEBUG: target_discoverer.cpp:42: [TargetDiscoverer][rid 1] Describe path succeeded: path# /Root 2025-06-25T14:32:37.539568Z node 1 :REPLICATION_CONTROLLER TRACE: target_discoverer.cpp:247: [TargetDiscoverer][rid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvListDirectoryResponse { Result: { children [{ name: Table, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750861957152, tx_id: 281474976710658 } }, { name: .sys, owner: , type: Directory, size_bytes: 0, created_at: { plan_step: 0, tx_id: 0 } }] } } 2025-06-25T14:32:37.539591Z node 1 :REPLICATION_CONTROLLER DEBUG: target_discoverer.cpp:260: [TargetDiscoverer][rid 1] Listing succeeded: path# /Root 2025-06-25T14:32:39.252428Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519894833512653779:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:32:39.252560Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:32:40.112063Z node 1 :REPLICATION_CONTROLLER TRACE: target_discoverer.cpp:98: [TargetDiscoverer][rid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribeTableResponse { Result: { name: Table, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750861957152, tx_id: 281474976710658 } } } 2025-06-25T14:32:40.112099Z node 1 :REPLICATION_CONTROLLER DEBUG: target_discoverer.cpp:113: [TargetDiscoverer][rid 1] Describe table succeeded: path# /Root/Table 2025-06-25T14:32:40.112142Z node 1 :REPLICATION_CONTROLLER INFO: target_discoverer.cpp:120: [TargetDiscoverer][rid 1] Add target: srcPath# /Root/Table, dstPath# /Root/Replicated/Table, kind# Table 2025-06-25T14:32:40.112251Z node 1 :REPLICATION_CONTROLLER INFO: target_discoverer.cpp:140: [TargetDiscoverer][rid 1] Add target: srcPath# /Root/Table/Index, dstPath# /Root/Replicated/Table/Index/indexImplTable, kind# IndexTable >> TSubDomainTest::FailIfAffectedSetNotInterior >> TestYmqHttpProxy::TestSendMessageWithAttributes >> KqpProxy::PassErrroViaSessionActor [GOOD] >> KqpProxy::NodeDisconnectedTest >> TSubDomainTest::CreateDummyTabletsInDifferentDomains >> DataShardVolatile::VolatileTxAbortedOnSplit [GOOD] >> DataShardVolatile::VolatileTxAbortedOnDrop >> TestKinesisHttpProxy::TestConsumersEmptyNames >> TSubDomainTest::LsLs ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/unittest >> TPQTest::TestMaxTimeLagRewind [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:107:2057] recipient: [1:105:2137] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:107:2057] recipient: [1:105:2137] Leader for TabletID 72057594037927937 is [1:111:2141] sender: [1:112:2057] recipient: [1:105:2137] 2025-06-25T14:31:18.846969Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:31:18.847049Z node 1 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [1:153:2057] recipient: [1:151:2172] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [1:153:2057] recipient: [1:151:2172] Leader for TabletID 72057594037927938 is [1:157:2176] sender: [1:158:2057] recipient: [1:151:2172] Leader for TabletID 72057594037927937 is [1:111:2141] sender: [1:183:2057] recipient: [1:14:2061] 2025-06-25T14:31:18.875707Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:31:18.891742Z node 1 :PERSQUEUE INFO: pq_impl.cpp:1497: [PQ: 72057594037927937] Config applied version 1 actor [1:181:2194] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "rt3.dc1--asdfgs--topic" Version: 1 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 1 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 1 Important: false } 2025-06-25T14:31:18.892691Z node 1 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [1:189:2200] 2025-06-25T14:31:18.894832Z node 1 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [1:189:2200] 2025-06-25T14:31:18.896782Z node 1 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [1:190:2201] 2025-06-25T14:31:18.897914Z node 1 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 2 [1:190:2201] 2025-06-25T14:31:18.905121Z node 1 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|fbe68153-29de0929-b08b1c33-4b79c61a_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:107:2057] recipient: [2:105:2137] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:107:2057] recipient: [2:105:2137] Leader for TabletID 72057594037927937 is [2:111:2141] sender: [2:112:2057] recipient: [2:105:2137] 2025-06-25T14:31:19.434123Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:31:19.434209Z node 2 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [2:153:2057] recipient: [2:151:2172] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [2:153:2057] recipient: [2:151:2172] Leader for TabletID 72057594037927938 is [2:157:2176] sender: [2:158:2057] recipient: [2:151:2172] Leader for TabletID 72057594037927937 is [2:111:2141] sender: [2:183:2057] recipient: [2:14:2061] 2025-06-25T14:31:19.452997Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:31:19.453990Z node 2 :PERSQUEUE INFO: pq_impl.cpp:1497: [PQ: 72057594037927937] Config applied version 2 actor [2:181:2194] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "rt3.dc1--asdfgs--topic" Version: 2 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 2 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 2 Important: false } 2025-06-25T14:31:19.454530Z node 2 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [2:189:2200] 2025-06-25T14:31:19.456166Z node 2 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [2:189:2200] 2025-06-25T14:31:19.457507Z node 2 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [2:190:2201] 2025-06-25T14:31:19.458863Z node 2 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 2 [2:190:2201] 2025-06-25T14:31:19.464103Z node 2 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|cc2d42c3-7a9bd18c-e6e527cb-afd0b50b_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:107:2057] recipient: [3:105:2137] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:107:2057] recipient: [3:105:2137] Leader for TabletID 72057594037927937 is [3:111:2141] sender: [3:112:2057] recipient: [3:105:2137] 2025-06-25T14:31:19.837204Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:31:19.837284Z node 3 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [3:153:2057] recipient: [3:151:2172] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [3:153:2057] recipient: [3:151:2172] Leader for TabletID 72057594037927938 is [3:157:2176] sender: [3:158:2057] recipient: [3:151:2172] Leader for TabletID 72057594037927937 is [3:111:2141] sender: [3:183:2057] recipient: [3:14:2061] 2025-06-25T14:31:19.854305Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:31:19.855157Z node 3 :PERSQUEUE INFO: pq_impl.cpp:1497: [PQ: 72057594037927937] Config applied version 3 actor [3:181:2194] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "rt3.dc1--asdfgs--topic" Version: 3 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 3 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 3 Important: false } 2025-06-25T14:31:19.855753Z node 3 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [3:189:2200] 2025-06-25T14:31:19.857950Z node 3 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [3:189:2200] 2025-06-25T14:31:19.859718Z node 3 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [3:190:2201] 2025-06-25T14:31:19.861534Z node 3 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 2 [3:190:2201] 2025-06-25T14:31:19.867135Z node 3 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|3518a239-58e5ec41-caf978db-1e95f489_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:107:2057] recipient: [4:105:2137] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:107:2057] recipient: [4:105:2137] Leader for TabletID 72057594037927937 is [4:111:2141] sender: [4:112:2057] recipient: [4:105:2137] 2025-06-25T14:31:20.260388Z node 4 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:31:20.260486Z node 4 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [4:153:2057] recipient: [4:151:2172] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [4:153:2057] recipient: [4:151:2172] Leader for TabletID 72057594037927938 is [4:157:2176] sender: [4:158:2057] recipient: [4:151:2172] Leader for TabletID 72057594037927937 is [4:111:2141] sender: [4:183:2057] recipient: [4:14:2061] 2025-06-25T14:31:20.280419Z node 4 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:31:20.281365Z node 4 :PERSQUEUE INFO: pq_impl.cpp:1497: [PQ: 72057594037927937] Config applied version 4 actor [4:181:2194] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 ImportantClientId: "aaa" LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "rt3.dc1--asdfgs--topic" Version: 4 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 4 ReadRuleGenerations: 4 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 4 Important: false } Consumers { Name: "aaa" Generation: 4 Important: true } 2025-06-25T14:31:20.282040Z node 4 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [4:189:2200] 2025-06-25T14:31:20.284623Z node 4 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [4:189:2200] 2025-06-25T14:31:20.287405Z node 4 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [4:190:2201] 2025-06-25T14:31:20.289228Z node 4 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 2 [4:190:2201] 2025-06-25T14:31:20.303181Z node 4 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|930a7f4b-6a1a4156-7c5d431c-a49b5539_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user" SessionId: "" Offset: 0 Count: 1 Bytes: 1 } Cookie: 123 } via pipe: [4:181:2194] Leader for TabletID 72057594037927937 is [0: ... LifetimeSeconds: 0 ImportantClientId: "aaa" LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "rt3.dc1--asdfgs--topic" Version: 74 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 74 ReadRuleGenerations: 74 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 74 Important: false } Consumers { Name: "aaa" Generation: 74 Important: true } 2025-06-25T14:32:38.974908Z node 57 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [57:187:2198] 2025-06-25T14:32:38.984851Z node 57 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [57:187:2198] 2025-06-25T14:32:38.988074Z node 57 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [57:188:2199] 2025-06-25T14:32:38.990455Z node 57 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 2 [57:188:2199] 2025-06-25T14:32:39.024011Z node 57 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|a1d8069f-d83dccd2-da80151-2fb46d59_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-25T14:32:39.284653Z node 57 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|abbea448-826ad99d-aac1093a-63ff7842_1 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-25T14:32:39.483961Z node 57 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|29f6b2de-c9818560-6131f308-2d4c28d7_2 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-25T14:32:39.658752Z node 57 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|7d371836-efccd3a3-b91b27be-4913a35e_3 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-25T14:32:39.855760Z node 57 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|5a062bfa-d2068332-2f53523f-dacc5f9_4 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user" SessionId: "" Offset: 0 Count: 1 Bytes: 2147483647 } Cookie: 123 } via pipe: [57:179:2192] Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user" SessionId: "" Offset: 0 Count: 1 Bytes: 2147483647 MaxTimeLagMs: 180000 } Cookie: 123 } via pipe: [57:179:2192] Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user" SessionId: "" Offset: 22 Count: 1 Bytes: 2147483647 MaxTimeLagMs: 180000 } Cookie: 123 } via pipe: [57:179:2192] Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user" SessionId: "" Offset: 4 Count: 1 Bytes: 2147483647 MaxTimeLagMs: 1000 } Cookie: 123 } via pipe: [57:179:2192] Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user" SessionId: "" Offset: 0 Count: 1 Bytes: 2147483647 ReadTimestampMs: 120291 } Cookie: 123 } via pipe: [57:179:2192] Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user" SessionId: "" Offset: 22 Count: 1 Bytes: 2147483647 ReadTimestampMs: 120291 } Cookie: 123 } via pipe: [57:179:2192] Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user" SessionId: "" Offset: 4 Count: 1 Bytes: 2147483647 ReadTimestampMs: 299291 } Cookie: 123 } via pipe: [57:179:2192] 2025-06-25T14:32:40.169488Z node 57 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:32:40.179890Z node 57 :PERSQUEUE INFO: pq_impl.cpp:1497: [PQ: 72057594037927937] Config applied version 75 actor [57:179:2192] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 ImportantClientId: "aaa" LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "rt3.dc1--asdfgs--topic" Version: 75 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 74 ReadRuleGenerations: 74 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 299291 Generation: 74 Important: false } Consumers { Name: "aaa" Generation: 74 Important: true } Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user" SessionId: "" Offset: 0 Count: 1 Bytes: 2147483647 } Cookie: 123 } via pipe: [57:179:2192] Leader for TabletID 72057594037927937 is [0:0:0] sender: [58:107:2057] recipient: [58:105:2137] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [58:107:2057] recipient: [58:105:2137] Leader for TabletID 72057594037927937 is [58:111:2141] sender: [58:112:2057] recipient: [58:105:2137] 2025-06-25T14:32:40.999419Z node 58 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:32:40.999492Z node 58 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [58:153:2057] recipient: [58:151:2172] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [58:153:2057] recipient: [58:151:2172] Leader for TabletID 72057594037927938 is [58:157:2176] sender: [58:158:2057] recipient: [58:151:2172] Leader for TabletID 72057594037927937 is [58:111:2141] sender: [58:183:2057] recipient: [58:14:2061] 2025-06-25T14:32:41.045805Z node 58 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:32:41.046851Z node 58 :PERSQUEUE INFO: pq_impl.cpp:1497: [PQ: 72057594037927937] Config applied version 76 actor [58:181:2194] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 ImportantClientId: "aaa" LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "rt3.dc1--asdfgs--topic" Version: 76 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 76 ReadRuleGenerations: 76 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 76 Important: false } Consumers { Name: "aaa" Generation: 76 Important: true } 2025-06-25T14:32:41.047759Z node 58 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [58:189:2200] 2025-06-25T14:32:41.055208Z node 58 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [58:189:2200] 2025-06-25T14:32:41.067429Z node 58 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [58:190:2201] 2025-06-25T14:32:41.101986Z node 58 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 2 [58:190:2201] 2025-06-25T14:32:41.135457Z node 58 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|d64a25be-4d4e8930-4cb6bc70-d80a656f_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-25T14:32:41.381555Z node 58 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|96b939aa-a88775dd-c96b2b3f-ad528aef_1 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-25T14:32:41.579365Z node 58 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|bd22781c-ead931a5-30439d13-f1f5785a_2 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-25T14:32:41.799834Z node 58 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|c780deec-720a896e-96fa707b-549bb042_3 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-25T14:32:42.043491Z node 58 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|b01e521f-d66baf89-2add7d29-5b85210f_4 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user" SessionId: "" Offset: 0 Count: 1 Bytes: 2147483647 } Cookie: 123 } via pipe: [58:181:2194] Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user" SessionId: "" Offset: 0 Count: 1 Bytes: 2147483647 MaxTimeLagMs: 180000 } Cookie: 123 } via pipe: [58:181:2194] Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user" SessionId: "" Offset: 22 Count: 1 Bytes: 2147483647 MaxTimeLagMs: 180000 } Cookie: 123 } via pipe: [58:181:2194] Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user" SessionId: "" Offset: 4 Count: 1 Bytes: 2147483647 MaxTimeLagMs: 1000 } Cookie: 123 } via pipe: [58:181:2194] Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user" SessionId: "" Offset: 0 Count: 1 Bytes: 2147483647 ReadTimestampMs: 120292 } Cookie: 123 } via pipe: [58:181:2194] Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user" SessionId: "" Offset: 22 Count: 1 Bytes: 2147483647 ReadTimestampMs: 120292 } Cookie: 123 } via pipe: [58:181:2194] Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user" SessionId: "" Offset: 4 Count: 1 Bytes: 2147483647 ReadTimestampMs: 299292 } Cookie: 123 } via pipe: [58:181:2194] 2025-06-25T14:32:42.602989Z node 58 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:32:42.622275Z node 58 :PERSQUEUE INFO: pq_impl.cpp:1497: [PQ: 72057594037927937] Config applied version 77 actor [58:181:2194] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 ImportantClientId: "aaa" LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "rt3.dc1--asdfgs--topic" Version: 77 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 76 ReadRuleGenerations: 76 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 299292 Generation: 76 Important: false } Consumers { Name: "aaa" Generation: 76 Important: true } Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user" SessionId: "" Offset: 0 Count: 1 Bytes: 2147483647 } Cookie: 123 } via pipe: [58:181:2194] >> KqpProxy::InvalidSessionID [GOOD] >> KqpProxy::LoadedMetadataAfterCompilationTimeout >> TableCreation::ConcurrentTableCreationWithDifferentVersions [GOOD] >> TableCreation::ConcurrentUpdateTable >> TableCreation::MultipleTablesCreation [GOOD] >> TableCreation::CreateOldTable ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/controller/ut_target_discoverer/unittest >> TargetDiscoverer::Dirs [GOOD] Test command err: 2025-06-25T14:32:35.613106Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519894839628133639:2215];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:32:35.613168Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000c93/r3tmp/tmpWtKcly/pdisk_1.dat 2025-06-25T14:32:36.340521Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519894839628133463:2080] 1750861955582834 != 1750861955582837 2025-06-25T14:32:36.350808Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:32:36.374386Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:32:36.374481Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:32:36.381207Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:32:36.580653Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:28031 TServer::EnableGrpc on GrpcPort 7335, node 1 2025-06-25T14:32:36.992876Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:32:36.992907Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:32:36.992920Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:32:36.993027Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28031 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:32:37.823446Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:32:37.852517Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:32:37.915790Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:32:38.253063Z node 1 :REPLICATION_CONTROLLER TRACE: target_discoverer.cpp:27: [TargetDiscoverer][rid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribePathResponse { Result: { name: Root, owner: root@builtin, type: Directory, size_bytes: 0, created_at: { plan_step: 1750861957880, tx_id: 1 } } } 2025-06-25T14:32:38.253090Z node 1 :REPLICATION_CONTROLLER DEBUG: target_discoverer.cpp:42: [TargetDiscoverer][rid 1] Describe path succeeded: path# /Root 2025-06-25T14:32:38.320654Z node 1 :REPLICATION_CONTROLLER TRACE: target_discoverer.cpp:247: [TargetDiscoverer][rid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvListDirectoryResponse { Result: { children [{ name: Dir, owner: root@builtin, type: Directory, size_bytes: 0, created_at: { plan_step: 1750861957922, tx_id: 281474976710658 } }, { name: .sys, owner: , type: Directory, size_bytes: 0, created_at: { plan_step: 0, tx_id: 0 } }] } } 2025-06-25T14:32:38.320683Z node 1 :REPLICATION_CONTROLLER DEBUG: target_discoverer.cpp:260: [TargetDiscoverer][rid 1] Listing succeeded: path# /Root 2025-06-25T14:32:38.358857Z node 1 :REPLICATION_CONTROLLER TRACE: target_discoverer.cpp:247: [TargetDiscoverer][rid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvListDirectoryResponse { Result: { children [{ name: Table, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750861958097, tx_id: 281474976710659 } }] } } 2025-06-25T14:32:38.358887Z node 1 :REPLICATION_CONTROLLER DEBUG: target_discoverer.cpp:260: [TargetDiscoverer][rid 1] Listing succeeded: path# /Root/Dir 2025-06-25T14:32:40.616489Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519894839628133639:2215];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:32:40.616583Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:32:41.887161Z node 1 :REPLICATION_CONTROLLER TRACE: target_discoverer.cpp:98: [TargetDiscoverer][rid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribeTableResponse { Result: { name: Table, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750861958097, tx_id: 281474976710659 } } } 2025-06-25T14:32:41.887193Z node 1 :REPLICATION_CONTROLLER DEBUG: target_discoverer.cpp:113: [TargetDiscoverer][rid 1] Describe table succeeded: path# /Root/Dir/Table 2025-06-25T14:32:41.887225Z node 1 :REPLICATION_CONTROLLER INFO: target_discoverer.cpp:120: [TargetDiscoverer][rid 1] Add target: srcPath# /Root/Dir/Table, dstPath# /Root/Replicated/Dir/Table, kind# Table >> KqpProxy::NoLocalSessionExecution [GOOD] >> KqpProxy::NoUserAccessToScriptExecutionsTable |78.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/scheme_board/ut_populator/ydb-core-tx-scheme_board-ut_populator |78.7%| [LD] {RESULT} $(B)/ydb/core/tx/scheme_board/ut_populator/ydb-core-tx-scheme_board-ut_populator |78.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/scheme_board/ut_populator/ydb-core-tx-scheme_board-ut_populator >> KqpDocumentApi::AllowRead [GOOD] >> KqpDocumentApi::RestrictAlter >> TSubDomainTest::DeleteTableAndThenForceDeleteSubDomain >> KqpPrefixedVectorIndexes::OrderByCosineLevel2-Nullable-UseSimilarity [GOOD] >> KqpPrefixedVectorIndexes::OrderByCosineLevel2+Nullable-UseSimilarity >> KqpQueryService::ExecStatsPlan [GOOD] >> KqpQueryService::ExecStatsAst >> TSubDomainTest::CreateTablet >> TSubDomainTest::Boot [GOOD] >> TSubDomainTest::CheckAccessCopyTable >> TSubDomainTest::UserAttributes >> KqpQueryService::ExecuteQuery [GOOD] >> KqpQueryService::ExecuteQueryExplicitBeginCommitRollback >> TPartitionChooserSuite::TPartitionChooserActor_SplitMergeEnabled_SourceId_OldPartitionExists_Test [GOOD] >> TPartitionChooserSuite::TPartitionChooserActor_SplitMergeEnabled_SourceId_OldPartitionExists_NotWritten_Test >> TableCreation::ConcurrentMultipleTablesCreation [GOOD] >> DataShardVolatile::DistributedWriteThenCopyTable [GOOD] >> DataShardVolatile::DistributedWriteThenBulkUpsert |78.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_object_storage_listing/ydb-core-tx-datashard-ut_object_storage_listing |78.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_object_storage_listing/ydb-core-tx-datashard-ut_object_storage_listing |78.7%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_object_storage_listing/ydb-core-tx-datashard-ut_object_storage_listing >> TestKinesisHttpProxy::TestRequestWithIAM [GOOD] >> TSubDomainTest::StartAndStopTenanNode ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/proxy_service/ut/unittest >> TableCreation::ConcurrentMultipleTablesCreation [GOOD] Test command err: 2025-06-25T14:32:36.731428Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519894842198913946:2162];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:32:36.735623Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000cdb/r3tmp/tmpxOFicx/pdisk_1.dat 2025-06-25T14:32:37.197380Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:32:37.219303Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:32:37.219388Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:32:37.222224Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:2734 TServer::EnableGrpc on GrpcPort 6191, node 1 2025-06-25T14:32:37.531577Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:32:37.531601Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:32:37.531611Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:32:37.531714Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: 2025-06-25T14:32:37.736568Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:32:37.781389Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:32:39.770941Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1692: Updated YQL logs priority to current level: 4 2025-06-25T14:32:39.772431Z node 1 :KQP_PROXY INFO: kqp_proxy_service.cpp:454: Cannot start publishing usage, tenants: /dc-1, empty 2025-06-25T14:32:39.797168Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:524: Subscribed for config changes. 2025-06-25T14:32:39.797273Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-25T14:32:39.797311Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-25T14:32:39.797341Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:531: Updated table service config. 2025-06-25T14:32:39.797366Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1692: Updated YQL logs priority to current level: 4 2025-06-25T14:32:39.799061Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:147: Table script_executions updater. Describe result: PathErrorUnknown 2025-06-25T14:32:39.799079Z node 1 :KQP_PROXY NOTICE: table_creator.cpp:167: Table script_executions updater. Creating table 2025-06-25T14:32:39.799086Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:147: Table script_execution_leases updater. Describe result: PathErrorUnknown 2025-06-25T14:32:39.799111Z node 1 :KQP_PROXY NOTICE: table_creator.cpp:167: Table script_execution_leases updater. Creating table 2025-06-25T14:32:39.799137Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:100: Table script_execution_leases updater. Full table path:/dc-1/.metadata/script_execution_leases 2025-06-25T14:32:39.799137Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:100: Table script_executions updater. Full table path:/dc-1/.metadata/script_executions 2025-06-25T14:32:39.799201Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:147: Table result_sets updater. Describe result: PathErrorUnknown 2025-06-25T14:32:39.799208Z node 1 :KQP_PROXY NOTICE: table_creator.cpp:167: Table result_sets updater. Creating table 2025-06-25T14:32:39.799223Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:100: Table result_sets updater. Full table path:/dc-1/.metadata/result_sets 2025-06-25T14:32:39.801399Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-25T14:32:39.803494Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:39.806082Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:39.808047Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:39.814060Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:190: Table script_execution_leases updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976715658 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 PathId: 3 } 2025-06-25T14:32:39.814106Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:261: Table script_execution_leases updater. Subscribe on create table tx: 281474976715658 2025-06-25T14:32:39.814271Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:190: Table result_sets updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976715659 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 PathId: 4 } 2025-06-25T14:32:39.814294Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:261: Table result_sets updater. Subscribe on create table tx: 281474976715659 2025-06-25T14:32:39.814378Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:190: Table script_executions updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976715660 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 PathId: 5 } 2025-06-25T14:32:39.814430Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:261: Table script_executions updater. Subscribe on create table tx: 281474976715660 2025-06-25T14:32:39.929657Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:290: Table script_execution_leases updater. Request: create. Transaction completed: 281474976715658. Doublechecking... 2025-06-25T14:32:39.966309Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:290: Table script_executions updater. Request: create. Transaction completed: 281474976715660. Doublechecking... 2025-06-25T14:32:39.998562Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:362: Table script_execution_leases updater. Column diff is empty, finishing 2025-06-25T14:32:40.000779Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:290: Table result_sets updater. Request: create. Transaction completed: 281474976715659. Doublechecking... 2025-06-25T14:32:40.037163Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:362: Table script_executions updater. Column diff is empty, finishing 2025-06-25T14:32:40.064088Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:362: Table result_sets updater. Column diff is empty, finishing 2025-06-25T14:32:40.065890Z node 1 :KQP_PROXY DEBUG: query_actor.cpp:134: [TQueryBase] [TCreateScriptOperationQuery] TraceId: d39c4200-797fbe2b-23fc9e01-ec05de62, Bootstrap. Database: /dc-1 2025-06-25T14:32:40.098448Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1489: Request has 18444993211749.453225s seconds to be completed 2025-06-25T14:32:40.101259Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1564: Created new session, sessionId: ydb://session/3?node_id=1&id=YzljNmZhZmEtZWM2YWE4NjAtMjU5MmE4OGYtZjVjY2RhOGI=, workerId: [1:7519894859378783867:2294], database: /dc-1, longSession: 1, local sessions count: 1 2025-06-25T14:32:40.101398Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:663: Received create session request, trace_id: 2025-06-25T14:32:40.105122Z node 1 :KQP_PROXY DEBUG: query_actor.cpp:197: [TQueryBase] [TCreateScriptOperationQuery] TraceId: d39c4200-797fbe2b-23fc9e01-ec05de62, RunDataQuery: -- TCreateScriptOperationQuery::OnRunQuery DECLARE $database AS Text; DECLARE $execution_id AS Text; DECLARE $run_script_actor_id AS Text; DECLARE $execution_status AS Int32; DECLARE $execution_mode AS Int32; DECLARE $query_text AS Text; DECLARE $syntax AS Int32; DECLARE $meta AS JsonDocument; DECLARE $lease_duration AS Interval; DECLARE $execution_meta_ttl AS Interval; UPSERT INTO `.metadata/script_executions` (database, execution_id, run_script_actor_id, execution_status, execution_mode, start_ts, query_text, syntax, meta, expire_at) VALUES ($database, $execution_id, $run_script_actor_id, $execution_status, $execution_mode, CurrentUtcTimestamp(), $query_text, $syntax, $meta, CurrentUtcTimestamp() + $execution_meta_ttl); UPSERT INTO `.metadata/script_execution_leases` (database, execution_id, lease_deadline, lease_generation, expire_at) VALUES ($database, $execution_id, CurrentUtcTimestamp() + $lease_duration, 1, CurrentUtcTimestamp() + $execution_meta_ttl); 2025-06-25T14:32:40.105724Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:788: Ctx: { TraceId: , Database: /dc-1, DatabaseId: , SessionId: ydb://session/3?node_id=1&id=YzljNmZhZmEtZWM2YWE4NjAtMjU5MmE4OGYtZjVjY2RhOGI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 300.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 3, targetId: [1:7519894859378783867:2294] 2025-06-25T14:32:40.105754Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1315: Scheduled timeout timer for requestId: 3 timeout: 300.000000s actor id: ... _PROXY DEBUG: table_creator.cpp:362: Table test_table1 updater. Column diff is empty, finishing 2025-06-25T14:32:46.702901Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table1 updater. Column diff is empty, finishing 2025-06-25T14:32:46.704886Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table1 updater. Column diff is empty, finishing 2025-06-25T14:32:46.709174Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table1 updater. Column diff is empty, finishing 2025-06-25T14:32:46.711598Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table1 updater. Column diff is empty, finishing 2025-06-25T14:32:46.716469Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table1 updater. Column diff is empty, finishing 2025-06-25T14:32:46.716515Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table1 updater. Column diff is empty, finishing 2025-06-25T14:32:46.716561Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table1 updater. Column diff is empty, finishing 2025-06-25T14:32:46.716633Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table1 updater. Column diff is empty, finishing 2025-06-25T14:32:46.717652Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table1 updater. Column diff is empty, finishing 2025-06-25T14:32:46.789334Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:974: Forwarded response to sender actor, requestId: 10, sender: [2:7519894887145519372:2335], selfId: [2:7519894869965649103:2079], source: [2:7519894887145519351:2334] 2025-06-25T14:32:46.790024Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:240: [TQueryBase] [TSaveScriptExecutionResultMetaQuery] TraceId: 1f86e3e5-84052b7d-2122ac58-82841be4, TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=YzQ0Yjc5NjYtNDFkOGZmYzYtNjIxYzU1ZjItNzEzYTU2OTA=, TxId: 2025-06-25T14:32:46.790046Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:367: [TQueryBase] [TSaveScriptExecutionResultMetaQuery] TraceId: 1f86e3e5-84052b7d-2122ac58-82841be4, Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=YzQ0Yjc5NjYtNDFkOGZmYzYtNjIxYzU1ZjItNzEzYTU2OTA=, TxId: 2025-06-25T14:32:46.790179Z node 2 :KQP_PROXY DEBUG: kqp_script_executions.cpp:1911: [ScriptExecutions] [TSaveScriptExecutionResultActor] ExecutionId: 1f86e3e5-84052b7d-2122ac58-82841be4, start saving rows range [0; 1) 2025-06-25T14:32:46.790234Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:134: [TQueryBase] [TSaveScriptExecutionResultQuery] TraceId: 1f86e3e5-84052b7d-2122ac58-82841be4, Bootstrap. Database: /dc-1 2025-06-25T14:32:46.790565Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1489: Request has 18444993211742.761062s seconds to be completed 2025-06-25T14:32:46.792298Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1564: Created new session, sessionId: ydb://session/3?node_id=2&id=ODY3NDg1Y2MtNjFiZDZkMzEtNmQ4YTdkLWNkOGIyYzU2, workerId: [2:7519894887145519919:2352], database: /dc-1, longSession: 1, local sessions count: 4 2025-06-25T14:32:46.792444Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:663: Received create session request, trace_id: 2025-06-25T14:32:46.792842Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1374: Session closed, sessionId: ydb://session/3?node_id=2&id=YzQ0Yjc5NjYtNDFkOGZmYzYtNjIxYzU1ZjItNzEzYTU2OTA=, workerId: [2:7519894887145519351:2334], local sessions count: 3 2025-06-25T14:32:46.793023Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:197: [TQueryBase] [TSaveScriptExecutionResultQuery] TraceId: 1f86e3e5-84052b7d-2122ac58-82841be4, RunDataQuery: -- TSaveScriptExecutionResultQuery::OnRunQuery DECLARE $database AS Text; DECLARE $execution_id AS Text; DECLARE $result_set_id AS Int32; DECLARE $expire_at AS Optional; DECLARE $items AS List>; UPSERT INTO `.metadata/result_sets` SELECT $database as database, $execution_id as execution_id, $result_set_id as result_set_id, T.row_id as row_id, $expire_at as expire_at, T.result_set as result_set, T.accumulated_size as accumulated_size FROM AS_TABLE($items) AS T; 2025-06-25T14:32:46.793586Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:788: Ctx: { TraceId: , Database: /dc-1, DatabaseId: , SessionId: ydb://session/3?node_id=2&id=ODY3NDg1Y2MtNjFiZDZkMzEtNmQ4YTdkLWNkOGIyYzU2, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 300.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 12, targetId: [2:7519894887145519919:2352] 2025-06-25T14:32:46.793642Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1315: Scheduled timeout timer for requestId: 12 timeout: 300.000000s actor id: [2:7519894887145519923:3084] 2025-06-25T14:32:46.795188Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1374: Session closed, sessionId: ydb://session/3?node_id=2&id=OWEzZjE4ODctOWUxY2NkMTktOGRjYTZhMDktZTU0NTAzODU=, workerId: [2:7519894887145519253:2332], local sessions count: 2 2025-06-25T14:32:46.963618Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:974: Forwarded response to sender actor, requestId: 12, sender: [2:7519894887145519921:2353], selfId: [2:7519894869965649103:2079], source: [2:7519894887145519919:2352] 2025-06-25T14:32:46.964347Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:240: [TQueryBase] [TSaveScriptExecutionResultQuery] TraceId: 1f86e3e5-84052b7d-2122ac58-82841be4, TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=ODY3NDg1Y2MtNjFiZDZkMzEtNmQ4YTdkLWNkOGIyYzU2, TxId: 2025-06-25T14:32:46.964377Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:367: [TQueryBase] [TSaveScriptExecutionResultQuery] TraceId: 1f86e3e5-84052b7d-2122ac58-82841be4, Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=ODY3NDg1Y2MtNjFiZDZkMzEtNmQ4YTdkLWNkOGIyYzU2, TxId: 2025-06-25T14:32:46.964506Z node 2 :KQP_PROXY DEBUG: kqp_script_executions.cpp:1943: [ScriptExecutions] [TSaveScriptExecutionResultActor] ExecutionId: 1f86e3e5-84052b7d-2122ac58-82841be4, result part successfully saved 2025-06-25T14:32:46.964522Z node 2 :KQP_PROXY DEBUG: kqp_script_executions.cpp:1950: [ScriptExecutions] [TSaveScriptExecutionResultActor] ExecutionId: 1f86e3e5-84052b7d-2122ac58-82841be4, reply SUCCESS, issues: 2025-06-25T14:32:46.964808Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:134: [TQueryBase] [TSaveScriptExecutionResultMetaQuery] TraceId: 1f86e3e5-84052b7d-2122ac58-82841be4, Bootstrap. Database: /dc-1 2025-06-25T14:32:46.965346Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1374: Session closed, sessionId: ydb://session/3?node_id=2&id=ODY3NDg1Y2MtNjFiZDZkMzEtNmQ4YTdkLWNkOGIyYzU2, workerId: [2:7519894887145519919:2352], local sessions count: 1 2025-06-25T14:32:46.965414Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1489: Request has 18444993211742.586212s seconds to be completed 2025-06-25T14:32:46.967319Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1564: Created new session, sessionId: ydb://session/3?node_id=2&id=ZDI0ZTU4NTUtMmVjZGMzZDgtMTM3MTE2ZmYtYjc5N2FkMGY=, workerId: [2:7519894887145519951:2361], database: /dc-1, longSession: 1, local sessions count: 2 2025-06-25T14:32:46.967450Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:663: Received create session request, trace_id: 2025-06-25T14:32:46.967757Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:197: [TQueryBase] [TSaveScriptExecutionResultMetaQuery] TraceId: 1f86e3e5-84052b7d-2122ac58-82841be4, RunDataQuery: -- TSaveScriptExecutionResultMetaQuery::OnRunQuery DECLARE $database AS Text; DECLARE $execution_id AS Text; DECLARE $result_set_metas AS JsonDocument; UPDATE `.metadata/script_executions` SET result_set_metas = $result_set_metas WHERE database = $database AND execution_id = $execution_id; 2025-06-25T14:32:46.968621Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:788: Ctx: { TraceId: , Database: /dc-1, DatabaseId: , SessionId: ydb://session/3?node_id=2&id=ZDI0ZTU4NTUtMmVjZGMzZDgtMTM3MTE2ZmYtYjc5N2FkMGY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 300.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 14, targetId: [2:7519894887145519951:2361] 2025-06-25T14:32:46.968676Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1315: Scheduled timeout timer for requestId: 14 timeout: 300.000000s actor id: [2:7519894887145519953:3097] 2025-06-25T14:32:46.989484Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:974: Forwarded response to sender actor, requestId: 14, sender: [2:7519894887145519952:2362], selfId: [2:7519894869965649103:2079], source: [2:7519894887145519951:2361] 2025-06-25T14:32:46.990254Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:240: [TQueryBase] [TSaveScriptExecutionResultMetaQuery] TraceId: 1f86e3e5-84052b7d-2122ac58-82841be4, TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=ZDI0ZTU4NTUtMmVjZGMzZDgtMTM3MTE2ZmYtYjc5N2FkMGY=, TxId: 2025-06-25T14:32:46.990274Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:367: [TQueryBase] [TSaveScriptExecutionResultMetaQuery] TraceId: 1f86e3e5-84052b7d-2122ac58-82841be4, Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=ZDI0ZTU4NTUtMmVjZGMzZDgtMTM3MTE2ZmYtYjc5N2FkMGY=, TxId: 2025-06-25T14:32:46.990615Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:134: [TQueryBase] [TSaveScriptFinalStatusActor] TraceId: 1f86e3e5-84052b7d-2122ac58-82841be4, Bootstrap. Database: /dc-1 2025-06-25T14:32:46.990711Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1489: Request has 18444993211742.560917s seconds to be completed 2025-06-25T14:32:46.992443Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1564: Created new session, sessionId: ydb://session/3?node_id=2&id=Njk2Zjk2YWMtMWY3Njg1YTYtMWFhMDc0MzUtMjE4MDFlNQ==, workerId: [2:7519894887145519975:2370], database: /dc-1, longSession: 1, local sessions count: 3 2025-06-25T14:32:46.992582Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:663: Received create session request, trace_id: 2025-06-25T14:32:46.992653Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1374: Session closed, sessionId: ydb://session/3?node_id=2&id=ZDI0ZTU4NTUtMmVjZGMzZDgtMTM3MTE2ZmYtYjc5N2FkMGY=, workerId: [2:7519894887145519951:2361], local sessions count: 2 2025-06-25T14:32:46.992819Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:197: [TQueryBase] [TSaveScriptFinalStatusActor] TraceId: 1f86e3e5-84052b7d-2122ac58-82841be4, RunDataQuery: -- TSaveScriptFinalStatusActor::OnRunQuery DECLARE $database AS Text; DECLARE $execution_id AS Text; SELECT operation_status, finalization_status, meta, customer_supplied_id, user_token, script_sinks, script_secret_names FROM `.metadata/script_executions` WHERE database = $database AND execution_id = $execution_id AND (expire_at > CurrentUtcTimestamp() OR expire_at IS NULL); SELECT lease_generation FROM `.metadata/script_execution_leases` WHERE database = $database AND execution_id = $execution_id AND (expire_at > CurrentUtcTimestamp() OR expire_at IS NULL); 2025-06-25T14:32:46.993069Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:788: Ctx: { TraceId: , Database: /dc-1, DatabaseId: , SessionId: ydb://session/3?node_id=2&id=Njk2Zjk2YWMtMWY3Njg1YTYtMWFhMDc0MzUtMjE4MDFlNQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 300.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 16, targetId: [2:7519894887145519975:2370] 2025-06-25T14:32:46.993092Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1315: Scheduled timeout timer for requestId: 16 timeout: 300.000000s actor id: [2:7519894887145519977:3102] >> TestKinesisHttpProxy::TestRequestNoAuthorization >> TSubDomainTest::LsLs [GOOD] >> TSubDomainTest::LsAltered >> KqpQueryService::ShowCreateView [GOOD] >> TableCreation::CreateOldTable [GOOD] >> TestKinesisHttpProxy::PutRecordsWithIncorrectHashKey [GOOD] >> TopicAutoscaling::PartitionSplit_ReadEmptyPartitions_AutoscaleAwareSDK [GOOD] >> TopicAutoscaling::PartitionSplit_ManySession_PQv1 >> BasicUsage::WriteSessionCloseWaitsForWrites >> TSubDomainTest::UserAttributes [GOOD] >> TSubDomainTest::UserAttributesApplyIf >> TestKinesisHttpProxy::ListShards >> TestYmqHttpProxy::TestSendMessageWithAttributes [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/service/unittest >> KqpQueryService::ShowCreateView [GOOD] Test command err: Trying to start YDB, gRPC: 27305, MsgBus: 2604 2025-06-25T14:32:25.868237Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519894796798605153:2125];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:32:25.868279Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00186b/r3tmp/tmpdZevUQ/pdisk_1.dat 2025-06-25T14:32:26.372197Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:32:26.376497Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519894796798605068:2080] 1750861945850379 != 1750861945850382 TServer::EnableGrpc on GrpcPort 27305, node 1 2025-06-25T14:32:26.410872Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:32:26.410988Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:32:26.412731Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:32:26.433809Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:32:26.433835Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:32:26.433856Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:32:26.433978Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2604 TClient is connected to server localhost:2604 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-25T14:32:26.899455Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:32:27.165680Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:32:27.211326Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:32:27.222780Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:32:27.521357Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:32:27.840521Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:32:27.988548Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:32:29.762713Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894813978475898:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:29.762834Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:30.128258Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:30.181671Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:30.265873Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:30.311156Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:30.381820Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:30.463823Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:30.534821Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:30.652005Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894818273443861:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:30.652077Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:30.652288Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894818273443866:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:30.657941Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:32:30.682305Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519894818273443868:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:32:30.786290Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519894818273443921:3417] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:32:30.868288Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519894796798605153:2125];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:32:30.868409Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:32:31.949828Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519894822568411498:2483], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:2:35: Error: At function: KiReadTable!
:2:35: Error: Cannot find table 'db.[/Root/test_show_cre ... urrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Handle abort execution event from: [2:7519894851142222365:2474], status: BAD_REQUEST, reason: {
: Error: Terminate execution } 2025-06-25T14:32:38.655687Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=MTU1N2M1ODctZGRjMWY3ZWEtNDkwOGJlMGEtOGM4YmU3MA==, ActorId: [2:7519894851142222318:2474], ActorState: ExecuteState, TraceId: 01jykr2k9dabr1n294bpzesr87, Create QueryResponse for error on request, msg: Trying to start YDB, gRPC: 19255, MsgBus: 3397 2025-06-25T14:32:39.628668Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519894857689471067:2229];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:32:39.633863Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00186b/r3tmp/tmpFSR5jf/pdisk_1.dat 2025-06-25T14:32:39.859635Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:32:39.867739Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:32:39.867821Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:32:39.878692Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 19255, node 3 2025-06-25T14:32:40.016945Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:32:40.016968Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:32:40.016975Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:32:40.017097Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3397 2025-06-25T14:32:40.588812Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:3397 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:32:40.807263Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:32:40.816772Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:32:40.829670Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:32:40.987768Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:32:41.248342Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:32:41.407152Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:32:44.624448Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519894857689471067:2229];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:32:44.624532Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:32:45.949745Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519894883459276293:2372], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:45.950231Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:46.155079Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:46.235812Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:46.316327Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:46.398736Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:46.498192Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:46.590688Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:46.712685Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:46.885939Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519894887754244276:2437], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:46.886040Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:46.886430Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519894887754244281:2440], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:46.890890Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:32:46.928119Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519894887754244283:2441], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:32:46.983936Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519894887754244336:3430] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } |78.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/fq/libs/result_formatter/ut/ydb-core-fq-libs-result_formatter-ut |78.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/fq/libs/result_formatter/ut/ydb-core-fq-libs-result_formatter-ut |78.7%| [LD] {RESULT} $(B)/ydb/core/fq/libs/result_formatter/ut/ydb-core-fq-libs-result_formatter-ut >> TableCreation::ConcurrentUpdateTable [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/proxy_service/ut/unittest >> TableCreation::CreateOldTable [GOOD] Test command err: 2025-06-25T14:32:37.700077Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519894845621563692:2157];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:32:37.724403Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000c99/r3tmp/tmpGPgqG5/pdisk_1.dat 2025-06-25T14:32:38.428208Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:32:38.428674Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:32:38.435546Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:32:38.436429Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:32:38.440682Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519894845621563563:2080] 1750861957677744 != 1750861957677747 2025-06-25T14:32:38.692539Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:31673 TServer::EnableGrpc on GrpcPort 10992, node 1 2025-06-25T14:32:39.063910Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:32:39.063929Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:32:39.063935Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:32:39.064027Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:32:39.382691Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:32:39.405127Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:32:42.027385Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1692: Updated YQL logs priority to current level: 4 2025-06-25T14:32:42.028532Z node 1 :KQP_PROXY INFO: kqp_proxy_service.cpp:454: Cannot start publishing usage, tenants: /dc-1, empty 2025-06-25T14:32:42.055332Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:524: Subscribed for config changes. 2025-06-25T14:32:42.055367Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:531: Updated table service config. 2025-06-25T14:32:42.055393Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1692: Updated YQL logs priority to current level: 4 2025-06-25T14:32:42.055435Z node 1 :KQP_PROXY INFO: kqp_proxy_service.cpp:454: Cannot start publishing usage, tenants: /dc-1, empty 2025-06-25T14:32:42.055478Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-25T14:32:42.055516Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-25T14:32:42.153222Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:147: Table script_executions updater. Describe result: PathErrorUnknown 2025-06-25T14:32:42.153236Z node 1 :KQP_PROXY NOTICE: table_creator.cpp:167: Table script_executions updater. Creating table 2025-06-25T14:32:42.153270Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:100: Table script_executions updater. Full table path:/dc-1/.metadata/script_executions 2025-06-25T14:32:42.153431Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:147: Table script_execution_leases updater. Describe result: PathErrorUnknown 2025-06-25T14:32:42.153435Z node 1 :KQP_PROXY NOTICE: table_creator.cpp:167: Table script_execution_leases updater. Creating table 2025-06-25T14:32:42.153442Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:100: Table script_execution_leases updater. Full table path:/dc-1/.metadata/script_execution_leases 2025-06-25T14:32:42.156044Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:42.158612Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:42.162595Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:147: Table result_sets updater. Describe result: PathErrorUnknown 2025-06-25T14:32:42.162604Z node 1 :KQP_PROXY NOTICE: table_creator.cpp:167: Table result_sets updater. Creating table 2025-06-25T14:32:42.162631Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:100: Table result_sets updater. Full table path:/dc-1/.metadata/result_sets 2025-06-25T14:32:42.165341Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-25T14:32:42.165591Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-25T14:32:42.178541Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:42.186124Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:190: Table script_execution_leases updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976710659 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 PathId: 4 } 2025-06-25T14:32:42.186181Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:261: Table script_execution_leases updater. Subscribe on create table tx: 281474976710659 2025-06-25T14:32:42.189274Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:190: Table result_sets updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976710660 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 PathId: 5 } 2025-06-25T14:32:42.189304Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:261: Table result_sets updater. Subscribe on create table tx: 281474976710660 2025-06-25T14:32:42.193006Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:190: Table script_executions updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976710658 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 PathId: 3 } 2025-06-25T14:32:42.193038Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:261: Table script_executions updater. Subscribe on create table tx: 281474976710658 2025-06-25T14:32:42.359058Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:290: Table script_executions updater. Request: create. Transaction completed: 281474976710658. Doublechecking... 2025-06-25T14:32:42.419476Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:290: Table script_execution_leases updater. Request: create. Transaction completed: 281474976710659. Doublechecking... 2025-06-25T14:32:42.435357Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:362: Table script_executions updater. Column diff is empty, finishing 2025-06-25T14:32:42.437353Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:290: Table result_sets updater. Request: create. Transaction completed: 281474976710660. Doublechecking... 2025-06-25T14:32:42.476073Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:362: Table script_execution_leases updater. Column diff is empty, finishing 2025-06-25T14:32:42.530748Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:362: Table result_sets updater. Column diff is empty, finishing 2025-06-25T14:32:42.531206Z node 1 :KQP_PROXY DEBUG: query_actor.cpp:134: [TQueryBase] [TCreateScriptOperationQuery] TraceId: ac801333-d3e12da4-acf0aa57-4a0515cd, Bootstrap. Database: /dc-1 2025-06-25T14:32:42.632936Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1489: Request has 18444993211746.918719s seconds to be completed 2025-06-25T14:32:42.635613Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1564: Created new session, sessionId: ydb://session/3?node_id=1&id=ZmMyNWVhMWEtODA3Y2E5YWItMjZkZTk3YWYtZDA5ZWVhOTY=, workerId: [1:7519894867096400921:2295], database: /dc-1, longSession: 1, local sessions count: 1 2025-06-25T14:32:42.635765Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:663: Received create session request, trace_id: 2025-06-25T14:32:42.640846Z node 1 :KQP_PROXY DEBUG: query_actor.cpp:197: [TQueryBase] [TCreateScriptOperationQuery] TraceId: ac801333-d3e12da4-acf0aa57-4a0515cd, RunDataQuery: -- TCreateScriptOperationQuery::OnRunQuery DECLARE $database AS Text; DECLARE $execution_id AS Text; DECLARE $run_script_actor_id AS Text; DECLARE $execution_status AS Int32; DECLARE $execution_mode AS Int32; DECLARE $query_text AS Text; DECLARE $syntax AS Int32; DECLARE $meta AS JsonDocument; DECLARE $lease_duration AS Interval; DECLARE $execution_meta_ttl AS Interval; UPSERT INTO `.metadata/script_executions` (database, execution_id, run_script_actor_id, execution_status, execution_mode, start_ts, query_text, syntax, meta, expire_at) VALUES ($database, $execution_id, $run_script_actor_id, $execution_status, $execution_mode, CurrentUtcTimestamp(), $query_text, $syntax, $meta, CurrentUtcTimestamp() + $execution_meta_ttl); UPSERT INTO `.metadata/script_execution_leases` (database, execution_id, lease_deadline, lease_generation, expire_at) VALUES ($database, ... ctor] TraceId: 51aeddbe-50b42f7-a2d66aa9-54d5fedd, Bootstrap. Database: /dc-1 2025-06-25T14:32:50.387598Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:788: Ctx: { TraceId: , Database: dc-1, DatabaseId: , SessionId: ydb://session/3?node_id=2&id=ZDg3NGI4NGYtYjIwZDZjOGItMTM1MTY1NTgtNmVmYzYxOWU=, CurrentExecutionId: 51aeddbe-50b42f7-a2d66aa9-54d5fedd, CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 604800.000000s timeout: 604800.000000s cancelAfter: 0.000000s. Send request to target, requestId: 5, targetId: [2:7519894903141480326:2313] 2025-06-25T14:32:50.387624Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1315: Scheduled timeout timer for requestId: 5 timeout: 604800.000000s actor id: [2:7519894903141480332:2525] 2025-06-25T14:32:50.387656Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1489: Request has 18444993211739.163969s seconds to be completed 2025-06-25T14:32:50.389420Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1564: Created new session, sessionId: ydb://session/3?node_id=2&id=YzBmNTc4YzQtZTI2Y2ZmN2ItN2U3NDFkMTgtYzk2NGQ2OTY=, workerId: [2:7519894903141480333:2315], database: /dc-1, longSession: 1, local sessions count: 2 2025-06-25T14:32:50.389563Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:663: Received create session request, trace_id: 2025-06-25T14:32:50.390216Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:197: [TQueryBase] [TScriptProgressActor] TraceId: 51aeddbe-50b42f7-a2d66aa9-54d5fedd, RunDataQuery: -- TScriptProgressActor::OnRunQuery DECLARE $execution_id AS Text; DECLARE $database AS Text; DECLARE $plan AS JsonDocument; DECLARE $execution_status AS Int32; UPSERT INTO `.metadata/script_executions` (execution_id, database, plan, execution_status) VALUES ($execution_id, $database, $plan, $execution_status); 2025-06-25T14:32:50.390806Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:788: Ctx: { TraceId: , Database: /dc-1, DatabaseId: , SessionId: ydb://session/3?node_id=2&id=YzBmNTc4YzQtZTI2Y2ZmN2ItN2U3NDFkMTgtYzk2NGQ2OTY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 300.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 7, targetId: [2:7519894903141480333:2315] 2025-06-25T14:32:50.390838Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1315: Scheduled timeout timer for requestId: 7 timeout: 300.000000s actor id: [2:7519894903141480340:2528] 2025-06-25T14:32:50.500993Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1489: TraceId: "01jykr2z121x4j07qty5cc960n", Request has 18444993211739.050651s seconds to be completed 2025-06-25T14:32:50.502733Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1564: TraceId: "01jykr2z121x4j07qty5cc960n", Created new session, sessionId: ydb://session/3?node_id=2&id=ZDUxM2MzOTgtZDMzMGU4OTUtZjc1OGE0ZjMtNzM2ZjNjN2Q=, workerId: [2:7519894903141480361:2332], database: /dc-1, longSession: 1, local sessions count: 3 2025-06-25T14:32:50.502863Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:663: Received create session request, trace_id: 01jykr2z121x4j07qty5cc960n 2025-06-25T14:32:50.507952Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:134: [TQueryBase] [TSaveScriptExecutionResultMetaQuery] TraceId: 51aeddbe-50b42f7-a2d66aa9-54d5fedd, Bootstrap. Database: /dc-1 2025-06-25T14:32:50.509148Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:974: Forwarded response to sender actor, requestId: 5, sender: [2:7519894903141480210:2461], selfId: [2:7519894881666642893:2065], source: [2:7519894903141480326:2313] 2025-06-25T14:32:50.509246Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1489: Request has 18444993211739.042380s seconds to be completed 2025-06-25T14:32:50.510383Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:147: Table test_table updater. Describe result: PathErrorUnknown 2025-06-25T14:32:50.510393Z node 2 :KQP_PROXY NOTICE: table_creator.cpp:167: Table test_table updater. Creating table 2025-06-25T14:32:50.510425Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:100: Table test_table updater. Full table path:/dc-1/.test/test_table 2025-06-25T14:32:50.511233Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1564: Created new session, sessionId: ydb://session/3?node_id=2&id=MzE0MWRiYjQtNDIzOTZkMDYtOGEzOGI5MmUtODY1N2JlZTU=, workerId: [2:7519894903141480389:2334], database: /dc-1, longSession: 1, local sessions count: 4 2025-06-25T14:32:50.511367Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:663: Received create session request, trace_id: 2025-06-25T14:32:50.511658Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:197: [TQueryBase] [TSaveScriptExecutionResultMetaQuery] TraceId: 51aeddbe-50b42f7-a2d66aa9-54d5fedd, RunDataQuery: -- TSaveScriptExecutionResultMetaQuery::OnRunQuery DECLARE $database AS Text; DECLARE $execution_id AS Text; DECLARE $result_set_metas AS JsonDocument; UPDATE `.metadata/script_executions` SET result_set_metas = $result_set_metas WHERE database = $database AND execution_id = $execution_id; 2025-06-25T14:32:50.513523Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:50.515916Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:190: Table test_table updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976710665 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 PathId: 10 } 2025-06-25T14:32:50.515952Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:261: Table test_table updater. Subscribe on create table tx: 281474976710665 2025-06-25T14:32:50.518372Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:788: Ctx: { TraceId: , Database: /dc-1, DatabaseId: , SessionId: ydb://session/3?node_id=2&id=MzE0MWRiYjQtNDIzOTZkMDYtOGEzOGI5MmUtODY1N2JlZTU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 300.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 10, targetId: [2:7519894903141480389:2334] 2025-06-25T14:32:50.518423Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1315: Scheduled timeout timer for requestId: 10 timeout: 300.000000s actor id: [2:7519894903141480403:2558] 2025-06-25T14:32:50.597454Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:974: Forwarded response to sender actor, requestId: 7, sender: [2:7519894903141480337:2319], selfId: [2:7519894881666642893:2065], source: [2:7519894903141480333:2315] 2025-06-25T14:32:50.605508Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:240: [TQueryBase] [TScriptProgressActor] TraceId: 51aeddbe-50b42f7-a2d66aa9-54d5fedd, TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=YzBmNTc4YzQtZTI2Y2ZmN2ItN2U3NDFkMTgtYzk2NGQ2OTY=, TxId: 2025-06-25T14:32:50.605535Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:367: [TQueryBase] [TScriptProgressActor] TraceId: 51aeddbe-50b42f7-a2d66aa9-54d5fedd, Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=YzBmNTc4YzQtZTI2Y2ZmN2ItN2U3NDFkMTgtYzk2NGQ2OTY=, TxId: 2025-06-25T14:32:50.607236Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1374: Session closed, sessionId: ydb://session/3?node_id=2&id=YzBmNTc4YzQtZTI2Y2ZmN2ItN2U3NDFkMTgtYzk2NGQ2OTY=, workerId: [2:7519894903141480333:2315], local sessions count: 3 2025-06-25T14:32:50.608485Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:290: Table test_table updater. Request: create. Transaction completed: 281474976710665. Doublechecking... 2025-06-25T14:32:50.694683Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table updater. Column diff is empty, finishing 2025-06-25T14:32:50.700566Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table updater. Column diff is empty, finishing 2025-06-25T14:32:50.793113Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1374: Session closed, sessionId: ydb://session/3?node_id=2&id=ZDUxM2MzOTgtZDMzMGU4OTUtZjc1OGE0ZjMtNzM2ZjNjN2Q=, workerId: [2:7519894903141480361:2332], local sessions count: 2 2025-06-25T14:32:50.815700Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:974: Forwarded response to sender actor, requestId: 10, sender: [2:7519894903141480391:2335], selfId: [2:7519894881666642893:2065], source: [2:7519894903141480389:2334] 2025-06-25T14:32:50.816542Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:240: [TQueryBase] [TSaveScriptExecutionResultMetaQuery] TraceId: 51aeddbe-50b42f7-a2d66aa9-54d5fedd, TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=MzE0MWRiYjQtNDIzOTZkMDYtOGEzOGI5MmUtODY1N2JlZTU=, TxId: 2025-06-25T14:32:50.816566Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:367: [TQueryBase] [TSaveScriptExecutionResultMetaQuery] TraceId: 51aeddbe-50b42f7-a2d66aa9-54d5fedd, Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=MzE0MWRiYjQtNDIzOTZkMDYtOGEzOGI5MmUtODY1N2JlZTU=, TxId: 2025-06-25T14:32:50.816700Z node 2 :KQP_PROXY DEBUG: kqp_script_executions.cpp:1911: [ScriptExecutions] [TSaveScriptExecutionResultActor] ExecutionId: 51aeddbe-50b42f7-a2d66aa9-54d5fedd, start saving rows range [0; 1) 2025-06-25T14:32:50.816758Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:134: [TQueryBase] [TSaveScriptExecutionResultQuery] TraceId: 51aeddbe-50b42f7-a2d66aa9-54d5fedd, Bootstrap. Database: /dc-1 2025-06-25T14:32:50.820621Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1489: Request has 18444993211738.731020s seconds to be completed 2025-06-25T14:32:50.824271Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1564: Created new session, sessionId: ydb://session/3?node_id=2&id=NWFiY2MyZDYtMTU2MjRiZjItNGU1ZmVlZDAtZTBmZmYzNTA=, workerId: [2:7519894903141480502:2349], database: /dc-1, longSession: 1, local sessions count: 3 2025-06-25T14:32:50.824426Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:663: Received create session request, trace_id: 2025-06-25T14:32:50.824770Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1374: Session closed, sessionId: ydb://session/3?node_id=2&id=MzE0MWRiYjQtNDIzOTZkMDYtOGEzOGI5MmUtODY1N2JlZTU=, workerId: [2:7519894903141480389:2334], local sessions count: 2 2025-06-25T14:32:50.825603Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:197: [TQueryBase] [TSaveScriptExecutionResultQuery] TraceId: 51aeddbe-50b42f7-a2d66aa9-54d5fedd, RunDataQuery: -- TSaveScriptExecutionResultQuery::OnRunQuery DECLARE $database AS Text; DECLARE $execution_id AS Text; DECLARE $result_set_id AS Int32; DECLARE $expire_at AS Optional; DECLARE $items AS List>; UPSERT INTO `.metadata/result_sets` SELECT $database as database, $execution_id as execution_id, $result_set_id as result_set_id, T.row_id as row_id, $expire_at as expire_at, T.result_set as result_set, T.accumulated_size as accumulated_size FROM AS_TABLE($items) AS T; 2025-06-25T14:32:50.828813Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:788: Ctx: { TraceId: , Database: /dc-1, DatabaseId: , SessionId: ydb://session/3?node_id=2&id=NWFiY2MyZDYtMTU2MjRiZjItNGU1ZmVlZDAtZTBmZmYzNTA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 300.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 12, targetId: [2:7519894903141480502:2349] 2025-06-25T14:32:50.828854Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1315: Scheduled timeout timer for requestId: 12 timeout: 300.000000s actor id: [2:7519894903141480506:2616] 2025-06-25T14:32:50.841371Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 >> KqpProxy::NoUserAccessToScriptExecutionsTable [GOOD] >> ScriptExecutionsTest::RunCheckLeaseStatus [GOOD] >> ScriptExecutionsTest::UpdatesLeaseAfterExpiring >> TestYmqHttpProxy::TestSetQueueAttributes |78.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_bsvolume/ydb-core-tx-schemeshard-ut_bsvolume |78.7%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_bsvolume/ydb-core-tx-schemeshard-ut_bsvolume |78.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_bsvolume/ydb-core-tx-schemeshard-ut_bsvolume >> TestKinesisHttpProxy::TestConsumersEmptyNames [GOOD] >> DataShardVolatile::VolatileTxAbortedOnDrop [GOOD] >> DataShardVolatile::UpsertNoLocksArbiter+UseSink >> TSubDomainTest::CreateTableInsideAndForceDeleteSubDomain [GOOD] >> TSubDomainTest::CreateTableInsidetThenStopTenantAndForceDeleteSubDomain ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/proxy_service/ut/unittest >> TableCreation::ConcurrentUpdateTable [GOOD] Test command err: 2025-06-25T14:32:37.313955Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519894847786544471:2229];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:32:37.325448Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000c9e/r3tmp/tmpI1Zz8x/pdisk_1.dat 2025-06-25T14:32:38.102579Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:32:38.102688Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:32:38.162985Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:32:38.198386Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:32:38.204591Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519894847786544269:2080] 1750861957258883 != 1750861957258886 2025-06-25T14:32:38.308965Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:4201 TServer::EnableGrpc on GrpcPort 7917, node 1 2025-06-25T14:32:38.843007Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:32:38.843031Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:32:38.843040Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:32:38.843144Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:32:39.303752Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:32:39.364716Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:32:42.314504Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519894847786544471:2229];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:32:42.314585Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:32:42.390576Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1692: Updated YQL logs priority to current level: 4 2025-06-25T14:32:42.391849Z node 1 :KQP_PROXY INFO: kqp_proxy_service.cpp:454: Cannot start publishing usage, tenants: /dc-1, empty 2025-06-25T14:32:42.415332Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:524: Subscribed for config changes. 2025-06-25T14:32:42.415366Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:531: Updated table service config. 2025-06-25T14:32:42.415386Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1692: Updated YQL logs priority to current level: 4 2025-06-25T14:32:42.415437Z node 1 :KQP_PROXY INFO: kqp_proxy_service.cpp:454: Cannot start publishing usage, tenants: /dc-1, empty 2025-06-25T14:32:42.415482Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-25T14:32:42.415528Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-25T14:32:42.416654Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-25T14:32:42.416688Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-25T14:32:42.419687Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:147: Table result_sets updater. Describe result: PathErrorUnknown 2025-06-25T14:32:42.419698Z node 1 :KQP_PROXY NOTICE: table_creator.cpp:167: Table result_sets updater. Creating table 2025-06-25T14:32:42.419747Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:100: Table result_sets updater. Full table path:/dc-1/.metadata/result_sets 2025-06-25T14:32:42.419825Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:147: Table script_executions updater. Describe result: PathErrorUnknown 2025-06-25T14:32:42.419829Z node 1 :KQP_PROXY NOTICE: table_creator.cpp:167: Table script_executions updater. Creating table 2025-06-25T14:32:42.419854Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:100: Table script_executions updater. Full table path:/dc-1/.metadata/script_executions 2025-06-25T14:32:42.419917Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:147: Table script_execution_leases updater. Describe result: PathErrorUnknown 2025-06-25T14:32:42.419927Z node 1 :KQP_PROXY NOTICE: table_creator.cpp:167: Table script_execution_leases updater. Creating table 2025-06-25T14:32:42.419949Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:100: Table script_execution_leases updater. Full table path:/dc-1/.metadata/script_execution_leases 2025-06-25T14:32:42.425534Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:42.427741Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:42.429577Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:42.437096Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:190: Table script_executions updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976710659 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 PathId: 3 } 2025-06-25T14:32:42.437154Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:261: Table script_executions updater. Subscribe on create table tx: 281474976710659 2025-06-25T14:32:42.437230Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:190: Table result_sets updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976710658 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 PathId: 5 } 2025-06-25T14:32:42.437241Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:261: Table result_sets updater. Subscribe on create table tx: 281474976710658 2025-06-25T14:32:42.440426Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:190: Table script_execution_leases updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976710660 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 PathId: 4 } 2025-06-25T14:32:42.440478Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:261: Table script_execution_leases updater. Subscribe on create table tx: 281474976710660 2025-06-25T14:32:42.637823Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:290: Table script_executions updater. Request: create. Transaction completed: 281474976710659. Doublechecking... 2025-06-25T14:32:42.701174Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:362: Table script_executions updater. Column diff is empty, finishing 2025-06-25T14:32:42.711158Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:290: Table script_execution_leases updater. Request: create. Transaction completed: 281474976710660. Doublechecking... 2025-06-25T14:32:42.719840Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:290: Table result_sets updater. Request: create. Transaction completed: 281474976710658. Doublechecking... 2025-06-25T14:32:42.768866Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:362: Table script_execution_leases updater. Column diff is empty, finishing 2025-06-25T14:32:42.800663Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:362: Table result_sets updater. Column diff is empty, finishing 2025-06-25T14:32:42.804702Z node 1 :KQP_PROXY DEBUG: query_actor.cpp:134: [TQueryBase] [TCreateScriptOperationQuery] TraceId: f5e4b8a5-fdbf32f0-c3933a85-3bd8fd7a, Bootstrap. Database: /dc-1 2025-06-25T14:32:42.816626Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1489: Request has 18444993211746.735037s seconds to be completed 2025-06-25T14:32:42.819605Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1564: Created new session, sessionId: ydb://session/3?node_id=1&id=YjgzYjI1N2UtYjcyODU3ZTYtNjNlYWU4YjQtNzJjNjdiZDA=, workerId: [1:7519894869261381630:2296], database: /dc-1, longSession: 1, local sessions count: 1 2025-06-25T14:32:42.819794Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:663: Received create session request, trace_id: 2025-06-25T14:32:42.820728Z node 1 :KQP_PROXY DEBUG: query_actor.cpp:197: [TQueryBase] [TCreateScriptOperationQuery] TraceId: f5e4b8a5-fdbf32f0-c3933a85-3bd8fd7a, RunDataQuery: -- TCreateScriptOperationQuery::OnRunQuery DECLARE $database AS Text; DECLARE $execution_id AS Text; DECLARE $run_script_actor_id AS Text; DECLARE $execution_status AS Int32; DECLARE $execution_mode AS Int32; DECLARE $query_text AS Text; DECLARE $syntax AS Int32; DECLARE $meta AS JsonDocument; DECLARE $lease_duration AS Interval; DECLARE $execution_meta_ttl AS Interval; UPSERT INTO `.metadata/script_executions` (database, execution_id, run_script_actor_id, execution_status, execution_mode, sta ... UG: table_creator.cpp:362: Table test_table updater. Column diff is empty, finishing 2025-06-25T14:32:52.408787Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table updater. Column diff is empty, finishing 2025-06-25T14:32:52.424676Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table updater. Column diff is empty, finishing 2025-06-25T14:32:52.424769Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table updater. Column diff is empty, finishing 2025-06-25T14:32:52.424795Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table updater. Column diff is empty, finishing 2025-06-25T14:32:52.428913Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table updater. Column diff is empty, finishing 2025-06-25T14:32:52.428974Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table updater. Column diff is empty, finishing 2025-06-25T14:32:52.439688Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table updater. Column diff is empty, finishing 2025-06-25T14:32:52.444630Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table updater. Column diff is empty, finishing 2025-06-25T14:32:52.464902Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table updater. Column diff is empty, finishing 2025-06-25T14:32:52.530495Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1374: Session closed, sessionId: ydb://session/3?node_id=2&id=NjNmYTZhZTMtMWYxNTM3MDctOTU2MGUxZTItNzg4ZGIyYTY=, workerId: [2:7519894909780050832:2336], local sessions count: 2 2025-06-25T14:32:52.558306Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:974: Forwarded response to sender actor, requestId: 10, sender: [2:7519894909780050873:2339], selfId: [2:7519894879715278924:2229], source: [2:7519894909780050872:2338] 2025-06-25T14:32:52.559318Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:240: [TQueryBase] [TSaveScriptExecutionResultMetaQuery] TraceId: f3455fda-ea13a8ac-3d14ea38-ffd30433, TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=ZGM2NjViYjktYzcyNDcyY2UtNWE3M2VjZC1kODAzOGZjZA==, TxId: 2025-06-25T14:32:52.559360Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:367: [TQueryBase] [TSaveScriptExecutionResultMetaQuery] TraceId: f3455fda-ea13a8ac-3d14ea38-ffd30433, Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=ZGM2NjViYjktYzcyNDcyY2UtNWE3M2VjZC1kODAzOGZjZA==, TxId: 2025-06-25T14:32:52.559535Z node 2 :KQP_PROXY DEBUG: kqp_script_executions.cpp:1911: [ScriptExecutions] [TSaveScriptExecutionResultActor] ExecutionId: f3455fda-ea13a8ac-3d14ea38-ffd30433, start saving rows range [0; 1) 2025-06-25T14:32:52.559594Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:134: [TQueryBase] [TSaveScriptExecutionResultQuery] TraceId: f3455fda-ea13a8ac-3d14ea38-ffd30433, Bootstrap. Database: /dc-1 2025-06-25T14:32:52.560216Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1374: Session closed, sessionId: ydb://session/3?node_id=2&id=ZGM2NjViYjktYzcyNDcyY2UtNWE3M2VjZC1kODAzOGZjZA==, workerId: [2:7519894909780050872:2338], local sessions count: 1 2025-06-25T14:32:52.560298Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1489: Request has 18444993211736.991328s seconds to be completed 2025-06-25T14:32:52.562216Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1564: Created new session, sessionId: ydb://session/3?node_id=2&id=Y2M4YmZlNjQtNThlOGM1NWMtZWVmNjAyNjEtZGE2ZGNiNzI=, workerId: [2:7519894909780051080:2354], database: /dc-1, longSession: 1, local sessions count: 2 2025-06-25T14:32:52.562367Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:663: Received create session request, trace_id: 2025-06-25T14:32:52.562757Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:197: [TQueryBase] [TSaveScriptExecutionResultQuery] TraceId: f3455fda-ea13a8ac-3d14ea38-ffd30433, RunDataQuery: -- TSaveScriptExecutionResultQuery::OnRunQuery DECLARE $database AS Text; DECLARE $execution_id AS Text; DECLARE $result_set_id AS Int32; DECLARE $expire_at AS Optional; DECLARE $items AS List>; UPSERT INTO `.metadata/result_sets` SELECT $database as database, $execution_id as execution_id, $result_set_id as result_set_id, T.row_id as row_id, $expire_at as expire_at, T.result_set as result_set, T.accumulated_size as accumulated_size FROM AS_TABLE($items) AS T; 2025-06-25T14:32:52.563518Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:788: Ctx: { TraceId: , Database: /dc-1, DatabaseId: , SessionId: ydb://session/3?node_id=2&id=Y2M4YmZlNjQtNThlOGM1NWMtZWVmNjAyNjEtZGE2ZGNiNzI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 300.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 12, targetId: [2:7519894909780051080:2354] 2025-06-25T14:32:52.563549Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1315: Scheduled timeout timer for requestId: 12 timeout: 300.000000s actor id: [2:7519894909780051082:2719] 2025-06-25T14:32:52.791943Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:974: Forwarded response to sender actor, requestId: 12, sender: [2:7519894909780051081:2355], selfId: [2:7519894879715278924:2229], source: [2:7519894909780051080:2354] 2025-06-25T14:32:52.792437Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:240: [TQueryBase] [TSaveScriptExecutionResultQuery] TraceId: f3455fda-ea13a8ac-3d14ea38-ffd30433, TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=Y2M4YmZlNjQtNThlOGM1NWMtZWVmNjAyNjEtZGE2ZGNiNzI=, TxId: 2025-06-25T14:32:52.792463Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:367: [TQueryBase] [TSaveScriptExecutionResultQuery] TraceId: f3455fda-ea13a8ac-3d14ea38-ffd30433, Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=Y2M4YmZlNjQtNThlOGM1NWMtZWVmNjAyNjEtZGE2ZGNiNzI=, TxId: 2025-06-25T14:32:52.792568Z node 2 :KQP_PROXY DEBUG: kqp_script_executions.cpp:1943: [ScriptExecutions] [TSaveScriptExecutionResultActor] ExecutionId: f3455fda-ea13a8ac-3d14ea38-ffd30433, result part successfully saved 2025-06-25T14:32:52.792579Z node 2 :KQP_PROXY DEBUG: kqp_script_executions.cpp:1950: [ScriptExecutions] [TSaveScriptExecutionResultActor] ExecutionId: f3455fda-ea13a8ac-3d14ea38-ffd30433, reply SUCCESS, issues: 2025-06-25T14:32:52.792845Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:134: [TQueryBase] [TSaveScriptExecutionResultMetaQuery] TraceId: f3455fda-ea13a8ac-3d14ea38-ffd30433, Bootstrap. Database: /dc-1 2025-06-25T14:32:52.792943Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1374: Session closed, sessionId: ydb://session/3?node_id=2&id=Y2M4YmZlNjQtNThlOGM1NWMtZWVmNjAyNjEtZGE2ZGNiNzI=, workerId: [2:7519894909780051080:2354], local sessions count: 1 2025-06-25T14:32:52.793027Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1489: Request has 18444993211736.758603s seconds to be completed 2025-06-25T14:32:52.794958Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1564: Created new session, sessionId: ydb://session/3?node_id=2&id=NTMwZGRjYjctZjI5MmQyNjMtOWEzNDI5MDAtNDBmZmQwYzA=, workerId: [2:7519894909780051110:2363], database: /dc-1, longSession: 1, local sessions count: 2 2025-06-25T14:32:52.795112Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:663: Received create session request, trace_id: 2025-06-25T14:32:52.795790Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:197: [TQueryBase] [TSaveScriptExecutionResultMetaQuery] TraceId: f3455fda-ea13a8ac-3d14ea38-ffd30433, RunDataQuery: -- TSaveScriptExecutionResultMetaQuery::OnRunQuery DECLARE $database AS Text; DECLARE $execution_id AS Text; DECLARE $result_set_metas AS JsonDocument; UPDATE `.metadata/script_executions` SET result_set_metas = $result_set_metas WHERE database = $database AND execution_id = $execution_id; 2025-06-25T14:32:52.796149Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:788: Ctx: { TraceId: , Database: /dc-1, DatabaseId: , SessionId: ydb://session/3?node_id=2&id=NTMwZGRjYjctZjI5MmQyNjMtOWEzNDI5MDAtNDBmZmQwYzA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 300.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 14, targetId: [2:7519894909780051110:2363] 2025-06-25T14:32:52.796180Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1315: Scheduled timeout timer for requestId: 14 timeout: 300.000000s actor id: [2:7519894909780051112:2732] 2025-06-25T14:32:52.810500Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:974: Forwarded response to sender actor, requestId: 14, sender: [2:7519894909780051111:2364], selfId: [2:7519894879715278924:2229], source: [2:7519894909780051110:2363] 2025-06-25T14:32:52.811300Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:240: [TQueryBase] [TSaveScriptExecutionResultMetaQuery] TraceId: f3455fda-ea13a8ac-3d14ea38-ffd30433, TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=NTMwZGRjYjctZjI5MmQyNjMtOWEzNDI5MDAtNDBmZmQwYzA=, TxId: 2025-06-25T14:32:52.811328Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:367: [TQueryBase] [TSaveScriptExecutionResultMetaQuery] TraceId: f3455fda-ea13a8ac-3d14ea38-ffd30433, Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=NTMwZGRjYjctZjI5MmQyNjMtOWEzNDI5MDAtNDBmZmQwYzA=, TxId: 2025-06-25T14:32:52.812212Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:134: [TQueryBase] [TSaveScriptFinalStatusActor] TraceId: f3455fda-ea13a8ac-3d14ea38-ffd30433, Bootstrap. Database: /dc-1 2025-06-25T14:32:52.812363Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1374: Session closed, sessionId: ydb://session/3?node_id=2&id=NTMwZGRjYjctZjI5MmQyNjMtOWEzNDI5MDAtNDBmZmQwYzA=, workerId: [2:7519894909780051110:2363], local sessions count: 1 2025-06-25T14:32:52.812389Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1489: Request has 18444993211736.739237s seconds to be completed 2025-06-25T14:32:52.818196Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1564: Created new session, sessionId: ydb://session/3?node_id=2&id=M2Q5OTg2MmQtNmNkOTczMWQtNTEwYjhkNmEtMjZkMGFjNzc=, workerId: [2:7519894909780051134:2372], database: /dc-1, longSession: 1, local sessions count: 2 2025-06-25T14:32:52.818365Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:663: Received create session request, trace_id: 2025-06-25T14:32:52.818624Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:197: [TQueryBase] [TSaveScriptFinalStatusActor] TraceId: f3455fda-ea13a8ac-3d14ea38-ffd30433, RunDataQuery: -- TSaveScriptFinalStatusActor::OnRunQuery DECLARE $database AS Text; DECLARE $execution_id AS Text; SELECT operation_status, finalization_status, meta, customer_supplied_id, user_token, script_sinks, script_secret_names FROM `.metadata/script_executions` WHERE database = $database AND execution_id = $execution_id AND (expire_at > CurrentUtcTimestamp() OR expire_at IS NULL); SELECT lease_generation FROM `.metadata/script_execution_leases` WHERE database = $database AND execution_id = $execution_id AND (expire_at > CurrentUtcTimestamp() OR expire_at IS NULL); 2025-06-25T14:32:52.818905Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:788: Ctx: { TraceId: , Database: /dc-1, DatabaseId: , SessionId: ydb://session/3?node_id=2&id=M2Q5OTg2MmQtNmNkOTczMWQtNTEwYjhkNmEtMjZkMGFjNzc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 300.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 16, targetId: [2:7519894909780051134:2372] 2025-06-25T14:32:52.818934Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1315: Scheduled timeout timer for requestId: 16 timeout: 300.000000s actor id: [2:7519894909780051136:2737] >> TestKinesisHttpProxy::TestListStreamConsumers >> TSubDomainTest::LsAltered [GOOD] >> CommitOffset::Commit_WithSession_ParentNotFinished_SameSession [GOOD] >> CommitOffset::Commit_WithSession_ParentNotFinished_OtherSession_ParentCommittedToEnd ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/proxy_service/ut/unittest >> KqpProxy::NoUserAccessToScriptExecutionsTable [GOOD] Test command err: 2025-06-25T14:32:38.178078Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519894853231643374:2147];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:32:38.183109Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:32:38.280811Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519894849562006264:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:32:38.332160Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000c90/r3tmp/tmp1I6daP/pdisk_1.dat 2025-06-25T14:32:39.105168Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:32:39.141277Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:32:39.143632Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:32:39.170141Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:32:39.170200Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:32:39.172648Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:32:39.174229Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T14:32:39.181917Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:32:39.183686Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:32:39.335112Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:21250 2025-06-25T14:32:43.180515Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519894853231643374:2147];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:32:43.180588Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:32:43.203702Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1692: Updated YQL logs priority to current level: 4 2025-06-25T14:32:43.204966Z node 1 :KQP_PROXY INFO: kqp_proxy_service.cpp:454: Cannot start publishing usage, tenants: /dc-1, empty 2025-06-25T14:32:43.213435Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1692: Updated YQL logs priority to current level: 4 2025-06-25T14:32:43.300680Z node 2 :KQP_PROXY INFO: kqp_proxy_service.cpp:454: Cannot start publishing usage, tenants: /dc-1, empty 2025-06-25T14:32:43.301266Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519894849562006264:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:32:43.301550Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:32:43.335565Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:524: Subscribed for config changes. 2025-06-25T14:32:43.335606Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:531: Updated table service config. 2025-06-25T14:32:43.335633Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1692: Updated YQL logs priority to current level: 4 2025-06-25T14:32:43.335711Z node 1 :KQP_PROXY INFO: kqp_proxy_service.cpp:454: Cannot start publishing usage, tenants: /dc-1, empty 2025-06-25T14:32:43.337734Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-25T14:32:43.337781Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-25T14:32:43.337802Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-25T14:32:43.341732Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-25T14:32:43.340910Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1564: Created new session, sessionId: ydb://session/3?node_id=2&id=NDc5NjIyYTYtODA2ZmRkOS05MGU4OTY4OC00YmU0MzAyOA==, workerId: [2:7519894871036843014:2270], database: , longSession: 1, local sessions count: 1 2025-06-25T14:32:43.340948Z node 2 :KQP_PROXY INFO: kqp_proxy_service.cpp:454: Cannot start publishing usage, tenants: /dc-1, empty 2025-06-25T14:32:43.341172Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:663: Received create session request, trace_id: 2025-06-25T14:32:43.341238Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:524: Subscribed for config changes. 2025-06-25T14:32:43.341332Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:531: Updated table service config. 2025-06-25T14:32:43.341352Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1692: Updated YQL logs priority to current level: 4 2025-06-25T14:32:43.341389Z node 2 :KQP_PROXY INFO: kqp_proxy_service.cpp:454: Cannot start publishing usage, tenants: /dc-1, empty 2025-06-25T14:32:43.341428Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-25T14:32:43.341455Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-25T14:32:43.348649Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-25T14:32:43.348702Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-25T14:32:43.348718Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-25T14:32:43.355204Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:788: Ctx: { TraceId: , Database: , DatabaseId: , SessionId: ydb://session/3?node_id=2&id=NDc5NjIyYTYtODA2ZmRkOS05MGU4OTY4OC00YmU0MzAyOA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 600.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 2, targetId: [2:8678280833929343339:121] 2025-06-25T14:32:43.355260Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1315: Scheduled timeout timer for requestId: 2 timeout: 600.000000s actor id: [1:7519894874706480543:2473] 2025-06-25T14:32:43.362606Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894874706480544:2276], DatabaseId: /dc-1, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:43.362724Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /dc-1, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:43.368518Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:788: Ctx: { TraceId: , Database: , DatabaseId: , SessionId: ydb://session/3?node_id=2&id=NDc5NjIyYTYtODA2ZmRkOS05MGU4OTY4OC00YmU0MzAyOA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 600.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 3, targetId: [2:7519894871036843014:2270] 2025-06-25T14:32:43.368561Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1315: Scheduled timeout timer for requestId: 3 timeout: 600.000000s actor id: [2:7519894871036843029:2122] 2025-06-25T14:32:43.472448Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519894871036843031:2272], DatabaseId: /dc-1, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:43.472533Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /dc-1, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:43.712332Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1564: TraceId: "01jykr2r380hv25j3ykhehtqsd", Created new session, sessionId: ydb://session/3?node_id=2&id=MzU0OTQyYzgtYThlMGQ2MDctZTVmYTJjYWUtZDY4MzdlNmQ=, workerId: [2:7519894871036843043:2274], database: , longSession: 0, local sessions count: 2 2025-06-25T14:32:43.712572Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:788: Ctx: { TraceId: 01jykr2r380hv25j3ykhehtqsd, Database: , DatabaseId: , SessionId: ydb://session/3?node_id=2&id=MzU0OTQyYzgtYThlMGQ2MDctZTVmYTJjYWUtZDY4MzdlNmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 300.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 4, targetId: [2:7519894871036843043:2274] 2025-06-25T14:32:43.712608Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1315: Scheduled timeout timer for requestId: 4 timeout: 300.000000s actor id: [2:7519894871036843044:2126] 2025-06-25T14:32:43.716821Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519894871036843045:2275], DatabaseId: /dc-1, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:43.716919Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /dc-1, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:43.717268Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519894871036843050:2278], DatabaseId: /dc-1, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:43.729842Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715657:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:32:43.862025Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519894871036843052:2279], DatabaseId: /dc-1, PoolId: default, S ... proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715662:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:32:51.230028Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519894908546958084:2326], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715662 completed, doublechecking } 2025-06-25T14:32:51.321673Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519894908546958143:2973] txid# 281474976715663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:32:51.439780Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519894887072120139:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:32:51.439849Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:32:51.710441Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-25T14:32:51.716811Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:974: Forwarded response to sender actor, requestId: 5, sender: [3:7519894908546958075:2321], selfId: [3:7519894887072120251:2175], source: [3:7519894908546958074:2320] 2025-06-25T14:32:51.717371Z node 3 :KQP_PROXY DEBUG: query_actor.cpp:240: [TQueryBase] [TCreateScriptOperationQuery] TraceId: c6de2eba-f5d1f002-6e3077a-18bfbba8, TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=3&id=MmIyNzQ3N2ItMzdiOTlkNjctOGRiZWI0YTgtYzY3NTI0ZjA=, TxId: 2025-06-25T14:32:51.717415Z node 3 :KQP_PROXY DEBUG: query_actor.cpp:367: [TQueryBase] [TCreateScriptOperationQuery] TraceId: c6de2eba-f5d1f002-6e3077a-18bfbba8, Finish with SUCCESS, SessionId: ydb://session/3?node_id=3&id=MmIyNzQ3N2ItMzdiOTlkNjctOGRiZWI0YTgtYzY3NTI0ZjA=, TxId: 2025-06-25T14:32:51.717446Z node 3 :KQP_PROXY DEBUG: kqp_script_executions.cpp:304: [ScriptExecutions] Create script execution operation. ExecutionId: c6de2eba-f5d1f002-6e3077a-18bfbba8. Result: SUCCESS. Issues: 2025-06-25T14:32:51.721738Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1564: Created new session, sessionId: ydb://session/3?node_id=3&id=OGQwM2UwOWUtNjdjNzkwZjgtMzk5MWMzMzAtYWQzMzAyMDg=, workerId: [3:7519894908546958224:2340], database: /Root, longSession: 1, local sessions count: 2 2025-06-25T14:32:51.721922Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:663: Received create session request, trace_id: 2025-06-25T14:32:51.722405Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1374: Session closed, sessionId: ydb://session/3?node_id=3&id=MmIyNzQ3N2ItMzdiOTlkNjctOGRiZWI0YTgtYzY3NTI0ZjA=, workerId: [3:7519894908546958074:2320], local sessions count: 1 2025-06-25T14:32:51.723204Z node 3 :KQP_PROXY DEBUG: query_actor.cpp:134: [TQueryBase] [TScriptProgressActor] TraceId: c6de2eba-f5d1f002-6e3077a-18bfbba8, Bootstrap. Database: /Root 2025-06-25T14:32:51.725518Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:788: Ctx: { TraceId: 01jykr2z8x1s57hw7rehktx7f1, Database: /Root, DatabaseId: , SessionId: ydb://session/3?node_id=3&id=OGQwM2UwOWUtNjdjNzkwZjgtMzk5MWMzMzAtYWQzMzAyMDg=, CurrentExecutionId: c6de2eba-f5d1f002-6e3077a-18bfbba8, CustomerSuppliedId: 01jykr2z8x1s57hw7rehktx7f1, PoolId: }. TEvQueryRequest, set timer for: 604800.000000s timeout: 604800.000000s cancelAfter: 0.000000s. Send request to target, requestId: 7, targetId: [3:7519894908546958224:2340] 2025-06-25T14:32:51.725552Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1315: Scheduled timeout timer for requestId: 7 timeout: 604800.000000s actor id: [3:7519894908546958228:3024] 2025-06-25T14:32:51.725584Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1489: Request has 18444993211737.826043s seconds to be completed 2025-06-25T14:32:51.727784Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1564: Created new session, sessionId: ydb://session/3?node_id=3&id=ZGFhODllYmItNjdmYmJkMWUtZTk1NzgxZWItZDY2YjU0ZQ==, workerId: [3:7519894908546958229:2342], database: /Root, longSession: 1, local sessions count: 2 2025-06-25T14:32:51.727951Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:663: Received create session request, trace_id: 2025-06-25T14:32:51.729071Z node 3 :KQP_PROXY DEBUG: query_actor.cpp:197: [TQueryBase] [TScriptProgressActor] TraceId: c6de2eba-f5d1f002-6e3077a-18bfbba8, RunDataQuery: -- TScriptProgressActor::OnRunQuery DECLARE $execution_id AS Text; DECLARE $database AS Text; DECLARE $plan AS JsonDocument; DECLARE $execution_status AS Int32; UPSERT INTO `.metadata/script_executions` (execution_id, database, plan, execution_status) VALUES ($execution_id, $database, $plan, $execution_status); 2025-06-25T14:32:51.729550Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:788: Ctx: { TraceId: , Database: /Root, DatabaseId: , SessionId: ydb://session/3?node_id=3&id=ZGFhODllYmItNjdmYmJkMWUtZTk1NzgxZWItZDY2YjU0ZQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 300.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 9, targetId: [3:7519894908546958229:2342] 2025-06-25T14:32:51.729579Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1315: Scheduled timeout timer for requestId: 9 timeout: 300.000000s actor id: [3:7519894908546958236:3027] 2025-06-25T14:32:51.752268Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1489: TraceId: "01jykr3095f2ysabkj620a70ht", Request has 18444993211737.799381s seconds to be completed 2025-06-25T14:32:51.755378Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1564: TraceId: "01jykr3095f2ysabkj620a70ht", Created new session, sessionId: ydb://session/3?node_id=3&id=NGZlYzk2ZjYtOTBjMmI4YWMtMmMwZDgyNjktMTEyNWFiMmI=, workerId: [3:7519894908546958245:2351], database: /Root, longSession: 1, local sessions count: 3 2025-06-25T14:32:51.755561Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:663: Received create session request, trace_id: 01jykr3095f2ysabkj620a70ht 2025-06-25T14:32:51.827889Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:788: Ctx: { TraceId: 01jykr309qbwnx43acg8hs3nv0, Database: /Root, DatabaseId: , SessionId: ydb://session/3?node_id=3&id=NGZlYzk2ZjYtOTBjMmI4YWMtMmMwZDgyNjktMTEyNWFiMmI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 300.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 11, targetId: [3:7519894908546958245:2351] 2025-06-25T14:32:51.827942Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1315: Scheduled timeout timer for requestId: 11 timeout: 300.000000s actor id: [3:7519894908546958252:3034] 2025-06-25T14:32:51.851538Z node 3 :KQP_PROXY DEBUG: query_actor.cpp:134: [TQueryBase] [TSaveScriptExecutionResultMetaQuery] TraceId: c6de2eba-f5d1f002-6e3077a-18bfbba8, Bootstrap. Database: /Root 2025-06-25T14:32:51.853581Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:974: TraceId: "01jykr2z8x1s57hw7rehktx7f1", Forwarded response to sender actor, requestId: 7, sender: [3:7519894908546958071:2923], selfId: [3:7519894887072120251:2175], source: [3:7519894908546958224:2340] 2025-06-25T14:32:51.853679Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1489: Request has 18444993211737.697948s seconds to be completed 2025-06-25T14:32:51.855797Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1564: Created new session, sessionId: ydb://session/3?node_id=3&id=ZTZlZGI1ZWEtNmQzNDM0MmUtNjliODk0OTMtYTE1MzI3NDI=, workerId: [3:7519894908546958275:2356], database: /Root, longSession: 1, local sessions count: 4 2025-06-25T14:32:51.855977Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:663: Received create session request, trace_id: 2025-06-25T14:32:51.856514Z node 3 :KQP_PROXY DEBUG: query_actor.cpp:197: [TQueryBase] [TSaveScriptExecutionResultMetaQuery] TraceId: c6de2eba-f5d1f002-6e3077a-18bfbba8, RunDataQuery: -- TSaveScriptExecutionResultMetaQuery::OnRunQuery DECLARE $database AS Text; DECLARE $execution_id AS Text; DECLARE $result_set_metas AS JsonDocument; UPDATE `.metadata/script_executions` SET result_set_metas = $result_set_metas WHERE database = $database AND execution_id = $execution_id; 2025-06-25T14:32:51.857413Z node 3 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:304: Access denied: self# [3:7519894908546958278:3044], for# user@builtin, access# DescribeSchema 2025-06-25T14:32:51.857434Z node 3 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:304: Access denied: self# [3:7519894908546958278:3044], for# user@builtin, access# DescribeSchema 2025-06-25T14:32:51.858168Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:788: Ctx: { TraceId: , Database: /Root, DatabaseId: , SessionId: ydb://session/3?node_id=3&id=ZTZlZGI1ZWEtNmQzNDM0MmUtNjliODk0OTMtYTE1MzI3NDI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 300.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 13, targetId: [3:7519894908546958275:2356] 2025-06-25T14:32:51.858195Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1315: Scheduled timeout timer for requestId: 13 timeout: 300.000000s actor id: [3:7519894908546958279:3045] 2025-06-25T14:32:51.858906Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:974: Forwarded response to sender actor, requestId: 9, sender: [3:7519894908546958234:2346], selfId: [3:7519894887072120251:2175], source: [3:7519894908546958229:2342] 2025-06-25T14:32:51.860021Z node 3 :KQP_PROXY DEBUG: query_actor.cpp:240: [TQueryBase] [TScriptProgressActor] TraceId: c6de2eba-f5d1f002-6e3077a-18bfbba8, TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=3&id=ZGFhODllYmItNjdmYmJkMWUtZTk1NzgxZWItZDY2YjU0ZQ==, TxId: 2025-06-25T14:32:51.860057Z node 3 :KQP_PROXY DEBUG: query_actor.cpp:367: [TQueryBase] [TScriptProgressActor] TraceId: c6de2eba-f5d1f002-6e3077a-18bfbba8, Finish with SUCCESS, SessionId: ydb://session/3?node_id=3&id=ZGFhODllYmItNjdmYmJkMWUtZTk1NzgxZWItZDY2YjU0ZQ==, TxId: 2025-06-25T14:32:51.871838Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1374: Session closed, sessionId: ydb://session/3?node_id=3&id=ZGFhODllYmItNjdmYmJkMWUtZTk1NzgxZWItZDY2YjU0ZQ==, workerId: [3:7519894908546958229:2342], local sessions count: 3 2025-06-25T14:32:51.875667Z node 3 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [3:7519894908546958255:2354], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:1:1: Error: At function: KiReadTable!
:1:1: Error: Cannot find table 'db.[/Root/.metadata/script_executions]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:32:51.877525Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=3&id=NGZlYzk2ZjYtOTBjMmI4YWMtMmMwZDgyNjktMTEyNWFiMmI=, ActorId: [3:7519894908546958245:2351], ActorState: ExecuteState, TraceId: 01jykr309qbwnx43acg8hs3nv0, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:32:51.877750Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:974: TraceId: "01jykr309qbwnx43acg8hs3nv0", Forwarded response to sender actor, requestId: 11, sender: [3:7519894908546958251:2353], selfId: [3:7519894887072120251:2175], source: [3:7519894908546958245:2351] >> BasicUsage::PropagateSessionClosed >> TSubDomainTest::DeleteTableAndThenForceDeleteSubDomain [GOOD] >> TSubDomainTest::DatashardRunAtOtherNodeWhenOneNodeIsStopped >> TSubDomainTest::FailIfAffectedSetNotInterior [GOOD] >> TSubDomainTest::GenericCases >> THealthCheckTest::Issues100GroupsListing >> KqpQueryService::ExecStatsAst [GOOD] >> KqpQueryService::DmlNoTx ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_base_tenant/unittest >> TSubDomainTest::LsAltered [GOOD] Test command err: 2025-06-25T14:32:44.984941Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519894876704037919:2084];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:32:44.991443Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0016e2/r3tmp/tmp2SVcmx/pdisk_1.dat 2025-06-25T14:32:45.726368Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:32:45.726447Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:32:45.757387Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:32:45.818596Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519894876704037858:2080] 1750861964882249 != 1750861964882252 2025-06-25T14:32:45.830859Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:32:46.004889Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:3157 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-25T14:32:46.082846Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7519894876704037893:2089] Handle TEvNavigate describe path dc-1 2025-06-25T14:32:46.134368Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7519894885293972975:2265] HANDLE EvNavigateScheme dc-1 2025-06-25T14:32:46.134495Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7519894880999005395:2116], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:32:46.134570Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:887: [main][1:7519894880999005662:2260][/dc-1] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvSyncRequest: sender# [1:7519894880999005395:2116], cookie# 1 2025-06-25T14:32:46.137629Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519894880999005666:2260][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519894880999005663:2260], cookie# 1 2025-06-25T14:32:46.137687Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519894880999005667:2260][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519894880999005664:2260], cookie# 1 2025-06-25T14:32:46.137701Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519894880999005668:2260][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519894880999005665:2260], cookie# 1 2025-06-25T14:32:46.137739Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519894876704037827:2049] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519894880999005666:2260], cookie# 1 2025-06-25T14:32:46.137762Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519894876704037830:2052] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519894880999005667:2260], cookie# 1 2025-06-25T14:32:46.137781Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519894876704037833:2055] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519894880999005668:2260], cookie# 1 2025-06-25T14:32:46.137811Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519894880999005666:2260][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519894876704037827:2049], cookie# 1 2025-06-25T14:32:46.137825Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519894880999005667:2260][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519894876704037830:2052], cookie# 1 2025-06-25T14:32:46.137850Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519894880999005668:2260][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519894876704037833:2055], cookie# 1 2025-06-25T14:32:46.137898Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519894880999005662:2260][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519894880999005663:2260], cookie# 1 2025-06-25T14:32:46.137920Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519894880999005662:2260][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2025-06-25T14:32:46.137933Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519894880999005662:2260][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519894880999005664:2260], cookie# 1 2025-06-25T14:32:46.137943Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519894880999005662:2260][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 2, failures# 0 2025-06-25T14:32:46.137958Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519894880999005662:2260][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519894880999005665:2260], cookie# 1 2025-06-25T14:32:46.138007Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:984: [main][1:7519894880999005662:2260][/dc-1] Sync is done in the ring group: cookie# 1, ring group# 0, size# 3, half# 1, successes# 3, failures# 0, partial# 0 2025-06-25T14:32:46.138063Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [1:7519894880999005395:2116], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1 PathId: Partial: 0 } 2025-06-25T14:32:46.161726Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [1:7519894880999005395:2116], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1 PathId: Partial: 0 }, by path# { Subscriber: { Subscriber: [1:7519894880999005662:2260] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 1 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 0 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-25T14:32:46.161872Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [1:7519894880999005395:2116], cacheItem# { Subscriber: { Subscriber: [1:7519894880999005662:2260] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 1 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 0 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 1 IsSync: true Partial: 0 } 2025-06-25T14:32:46.164261Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [1:7519894885293972976:2266], recipient# [1:7519894885293972975:2265], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [72057594046644480:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-25T14:32:46.164574Z node 1 :TX_PROXY DEBUG: describe.cpp:356: Actor# [1:7519894885293972975:2265] HANDLE EvNavigateKeySetResult TDescribeReq marker# P5 ErrorCount# 0 TClient::Ls response: 2025-06-25T14:32:46.300755Z node 1 :TX_PROXY DEBUG: describe.cpp:435: Actor# [1:7519894885293972975:2265] SEND to# 72057594046644480 shardToRequest NKikimrSchemeOp.TDescribePath Path: "dc-1" Options { ShowPrivateTable: true } 2025-06-25T14:32:46.310811Z node 1 :TX_PROXY DEBUG: describe.cpp:448: Actor# [1:7519894885293972975:2265] Handle TEvDescribeSchemeResult Forward to# [1:7519894885293972974:2264] Cookie: 0 TEvDescribeSchemeResult: NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 2 Record# Status: StatusSuccess Path: "dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/dc-1" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046644480 Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:32:46.362344Z ... axPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046644480 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 281474976710658 CreateStep: 1750861971558 ParentPathId: 1 PathState: EPathStateAlter Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 0 } } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 ... (TRUNCATED) TClient::Ls request: /dc-1 2025-06-25T14:32:52.120637Z node 2 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [2:7519894904274453639:2097] Handle TEvNavigate describe path /dc-1 2025-06-25T14:32:52.147141Z node 2 :TX_PROXY DEBUG: describe.cpp:272: Actor# [2:7519894912864388816:2332] HANDLE EvNavigateScheme /dc-1 2025-06-25T14:32:52.147235Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [2:7519894904274453834:2116], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:32:52.147306Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:887: [main][2:7519894904274454055:2220][/dc-1] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvSyncRequest: sender# [2:7519894904274453834:2116], cookie# 4 2025-06-25T14:32:52.147361Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][2:7519894908569421366:2220][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [2:7519894908569421363:2220], cookie# 4 2025-06-25T14:32:52.147376Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][2:7519894908569421367:2220][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [2:7519894908569421364:2220], cookie# 4 2025-06-25T14:32:52.147393Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][2:7519894908569421368:2220][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [2:7519894908569421365:2220], cookie# 4 2025-06-25T14:32:52.147420Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [2:7519894904274453563:2049] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [2:7519894908569421366:2220], cookie# 4 2025-06-25T14:32:52.147441Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [2:7519894904274453566:2052] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [2:7519894908569421367:2220], cookie# 4 2025-06-25T14:32:52.147462Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [2:7519894904274453569:2055] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [2:7519894908569421368:2220], cookie# 4 2025-06-25T14:32:52.147491Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][2:7519894908569421366:2220][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 5 Partial: 0 }: sender# [2:7519894904274453563:2049], cookie# 4 2025-06-25T14:32:52.147505Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][2:7519894908569421367:2220][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 5 Partial: 0 }: sender# [2:7519894904274453566:2052], cookie# 4 2025-06-25T14:32:52.147518Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][2:7519894908569421368:2220][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 5 Partial: 0 }: sender# [2:7519894904274453569:2055], cookie# 4 2025-06-25T14:32:52.147548Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][2:7519894904274454055:2220][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 5 Partial: 0 }: sender# [2:7519894908569421363:2220], cookie# 4 2025-06-25T14:32:52.147566Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][2:7519894904274454055:2220][/dc-1] Sync is in progress: cookie# 4, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2025-06-25T14:32:52.147579Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][2:7519894904274454055:2220][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 5 Partial: 0 }: sender# [2:7519894908569421364:2220], cookie# 4 2025-06-25T14:32:52.147590Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][2:7519894904274454055:2220][/dc-1] Sync is in progress: cookie# 4, ring group# 0, size# 3, half# 1, successes# 2, failures# 0 2025-06-25T14:32:52.147604Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][2:7519894904274454055:2220][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 5 Partial: 0 }: sender# [2:7519894908569421365:2220], cookie# 4 2025-06-25T14:32:52.147624Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:984: [main][2:7519894904274454055:2220][/dc-1] Sync is done in the ring group: cookie# 4, ring group# 0, size# 3, half# 1, successes# 3, failures# 0, partial# 0 2025-06-25T14:32:52.147662Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [2:7519894904274453834:2116], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1 PathId: Partial: 0 } 2025-06-25T14:32:52.147720Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [2:7519894904274453834:2116], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1 PathId: Partial: 0 }, by path# { Subscriber: { Subscriber: [2:7519894904274454055:2220] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 4 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 1750861971523 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-25T14:32:52.147784Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [2:7519894904274453834:2116], cacheItem# { Subscriber: { Subscriber: [2:7519894904274454055:2220] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 4 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 1750861971523 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 4 IsSync: true Partial: 0 } 2025-06-25T14:32:52.147968Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [2:7519894912864388817:2333], recipient# [2:7519894912864388816:2332], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [72057594046644480:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-25T14:32:52.148001Z node 2 :TX_PROXY DEBUG: describe.cpp:356: Actor# [2:7519894912864388816:2332] HANDLE EvNavigateKeySetResult TDescribeReq marker# P5 ErrorCount# 0 2025-06-25T14:32:52.148071Z node 2 :TX_PROXY DEBUG: describe.cpp:435: Actor# [2:7519894912864388816:2332] SEND to# 72057594046644480 shardToRequest NKikimrSchemeOp.TDescribePath Path: "/dc-1" Options { ShowPrivateTable: true } 2025-06-25T14:32:52.148645Z node 2 :TX_PROXY DEBUG: describe.cpp:448: Actor# [2:7519894912864388816:2332] Handle TEvDescribeSchemeResult Forward to# [2:7519894912864388815:2331] Cookie: 0 TEvDescribeSchemeResult: NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 63 Record# Status: StatusSuccess Path: "/dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1750861971523 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } StoragePools { Name: "/dc-1:test" Kind: "test" } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/dc-1" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046644480 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1750861971523 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 281474976710658 CreateStep: 1750861971558 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" } Children { Name: ".sys" PathId: 18446744073709551615 ... (TRUNCATED) >> TSubDomainTest::CheckAccessCopyTable [GOOD] >> TSubDomainTest::ConsistentCopyTable >> CommitOffset::Commit_Flat_WithWrongSession_ToPast [GOOD] >> CommitOffset::Commit_WithSession_ParentNotFinished_OtherSession >> KqpDocumentApi::RestrictAlter [GOOD] >> KqpDocumentApi::RestrictDrop >> KqpQueryService::ExecuteQueryExplicitBeginCommitRollback [GOOD] >> KqpQueryService::ExecuteDDLStatusCodeSchemeError >> TSubDomainTest::UserAttributesApplyIf [GOOD] >> RetryPolicy::TWriteSession_RetryOnTargetCluster [GOOD] >> RetryPolicy::TWriteSession_SwitchBackToLocalCluster >> TSubDomainTest::StartAndStopTenanNode [GOOD] >> TSubDomainTest::StartTenanNodeAndStopAtDestructor >> THealthCheckTest::ShardsLimit999 >> THealthCheckTest::StaticGroupIssue |78.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/vdisk/syncer/ut/ydb-core-blobstorage-vdisk-syncer-ut |78.8%| [LD] {RESULT} $(B)/ydb/core/blobstorage/vdisk/syncer/ut/ydb-core-blobstorage-vdisk-syncer-ut |78.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/vdisk/syncer/ut/ydb-core-blobstorage-vdisk-syncer-ut >> TSubDomainTest::CreateTablet [GOOD] >> TSubDomainTest::CreateTabletForUnknownDomain >> TestKinesisHttpProxy::TestRequestNoAuthorization [GOOD] >> KqpProxy::LoadedMetadataAfterCompilationTimeout [GOOD] >> KqpProxy::ExecuteScriptFailsWithoutFeatureFlag |78.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/fq/libs/actors/ut/ydb-core-fq-libs-actors-ut |78.8%| [LD] {RESULT} $(B)/ydb/core/fq/libs/actors/ut/ydb-core-fq-libs-actors-ut |78.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/fq/libs/actors/ut/ydb-core-fq-libs-actors-ut ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_base_tenant/unittest >> TSubDomainTest::UserAttributesApplyIf [GOOD] Test command err: 2025-06-25T14:32:47.809498Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519894889061777802:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:32:47.809530Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0016bc/r3tmp/tmpQq0kWw/pdisk_1.dat 2025-06-25T14:32:48.588477Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519894889061777784:2080] 1750861967804447 != 1750861967804450 2025-06-25T14:32:48.597968Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:32:48.598058Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:32:48.654765Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:32:48.666011Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:32:48.844531Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:25796 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-25T14:32:49.050654Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7519894889061777996:2099] Handle TEvNavigate describe path dc-1 2025-06-25T14:32:49.095612Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7519894897651712905:2265] HANDLE EvNavigateScheme dc-1 2025-06-25T14:32:49.095713Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7519894893356745328:2119], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:32:49.095771Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:887: [main][1:7519894893356745591:2259][/dc-1] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvSyncRequest: sender# [1:7519894893356745328:2119], cookie# 1 2025-06-25T14:32:49.101354Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519894893356745595:2259][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519894893356745592:2259], cookie# 1 2025-06-25T14:32:49.101407Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519894893356745596:2259][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519894893356745593:2259], cookie# 1 2025-06-25T14:32:49.101421Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519894893356745597:2259][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519894893356745594:2259], cookie# 1 2025-06-25T14:32:49.101449Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519894889061777753:2049] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519894893356745595:2259], cookie# 1 2025-06-25T14:32:49.101481Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519894889061777756:2052] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519894893356745596:2259], cookie# 1 2025-06-25T14:32:49.101497Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519894889061777759:2055] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519894893356745597:2259], cookie# 1 2025-06-25T14:32:49.101538Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519894893356745595:2259][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519894889061777753:2049], cookie# 1 2025-06-25T14:32:49.101562Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519894893356745596:2259][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519894889061777756:2052], cookie# 1 2025-06-25T14:32:49.101577Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519894893356745597:2259][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519894889061777759:2055], cookie# 1 2025-06-25T14:32:49.101606Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519894893356745591:2259][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519894893356745592:2259], cookie# 1 2025-06-25T14:32:49.101626Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519894893356745591:2259][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2025-06-25T14:32:49.101638Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519894893356745591:2259][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519894893356745593:2259], cookie# 1 2025-06-25T14:32:49.101648Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519894893356745591:2259][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 2, failures# 0 2025-06-25T14:32:49.101659Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519894893356745591:2259][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519894893356745594:2259], cookie# 1 2025-06-25T14:32:49.101677Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:984: [main][1:7519894893356745591:2259][/dc-1] Sync is done in the ring group: cookie# 1, ring group# 0, size# 3, half# 1, successes# 3, failures# 0, partial# 0 2025-06-25T14:32:49.101740Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [1:7519894893356745328:2119], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1 PathId: Partial: 0 } 2025-06-25T14:32:49.111073Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [1:7519894893356745328:2119], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1 PathId: Partial: 0 }, by path# { Subscriber: { Subscriber: [1:7519894893356745591:2259] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 1 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 0 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-25T14:32:49.111188Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [1:7519894893356745328:2119], cacheItem# { Subscriber: { Subscriber: [1:7519894893356745591:2259] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 1 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 0 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 1 IsSync: true Partial: 0 } 2025-06-25T14:32:49.117783Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [1:7519894897651712906:2266], recipient# [1:7519894897651712905:2265], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [72057594046644480:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-25T14:32:49.117858Z node 1 :TX_PROXY DEBUG: describe.cpp:356: Actor# [1:7519894897651712905:2265] HANDLE EvNavigateKeySetResult TDescribeReq marker# P5 ErrorCount# 0 2025-06-25T14:32:49.192220Z node 1 :TX_PROXY DEBUG: describe.cpp:435: Actor# [1:7519894897651712905:2265] SEND to# 72057594046644480 shardToRequest NKikimrSchemeOp.TDescribePath Path: "dc-1" Options { ShowPrivateTable: true } 2025-06-25T14:32:49.207154Z node 1 :TX_PROXY DEBUG: describe.cpp:448: Actor# [1:7519894897651712905:2265] Handle TEvDescribeSchemeResult Forward to# [1:7519894897651712904:2264] Cookie: 0 TEvDescribeSchemeResult: NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 2 Record# Status: StatusSuccess Path: "dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/dc-1" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046644480 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:32:49.314468Z ... 46644480 Generation: 2 LocalPathId: 2 Version: 6 PathOwnerId: 72057594046644480, cookie: 281474976710662 2025-06-25T14:32:54.238765Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046644480, txId: 281474976710662 2025-06-25T14:32:54.238778Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046644480, txId: 281474976710662, pathId: [OwnerId: 72057594046644480, LocalPathId: 2], version: 6 2025-06-25T14:32:54.238791Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 2 2025-06-25T14:32:54.238853Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046644480, txId: 281474976710662, subscribers: 0 2025-06-25T14:32:54.239090Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046644480, cookie: 281474976710662 waiting... 2025-06-25T14:32:54.243210Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710662, at schemeshard: 72057594046644480 TClient::Ls request: /dc-1/USER_0 2025-06-25T14:32:54.245916Z node 2 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [2:7519894911704719746:2088] Handle TEvNavigate describe path /dc-1/USER_0 2025-06-25T14:32:54.277511Z node 2 :TX_PROXY DEBUG: describe.cpp:272: Actor# [2:7519894920294654949:2356] HANDLE EvNavigateScheme /dc-1/USER_0 2025-06-25T14:32:54.277616Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [2:7519894911704719958:2114], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:32:54.277683Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:887: [main][2:7519894915999687575:2301][/dc-1/USER_0] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvSyncRequest: sender# [2:7519894911704719958:2114], cookie# 10 2025-06-25T14:32:54.277740Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][2:7519894915999687579:2301][/dc-1/USER_0] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1/USER_0 }: sender# [2:7519894915999687576:2301], cookie# 10 2025-06-25T14:32:54.277761Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][2:7519894915999687580:2301][/dc-1/USER_0] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1/USER_0 }: sender# [2:7519894915999687577:2301], cookie# 10 2025-06-25T14:32:54.277775Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][2:7519894915999687581:2301][/dc-1/USER_0] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1/USER_0 }: sender# [2:7519894915999687578:2301], cookie# 10 2025-06-25T14:32:54.277799Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [2:7519894911704719687:2049] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1/USER_0 }: sender# [2:7519894915999687579:2301], cookie# 10 2025-06-25T14:32:54.277821Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [2:7519894911704719690:2052] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1/USER_0 }: sender# [2:7519894915999687580:2301], cookie# 10 2025-06-25T14:32:54.277836Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [2:7519894911704719693:2055] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1/USER_0 }: sender# [2:7519894915999687581:2301], cookie# 10 2025-06-25T14:32:54.277859Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][2:7519894915999687579:2301][/dc-1/USER_0] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 6 Partial: 0 }: sender# [2:7519894911704719687:2049], cookie# 10 2025-06-25T14:32:54.277871Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][2:7519894915999687580:2301][/dc-1/USER_0] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 6 Partial: 0 }: sender# [2:7519894911704719690:2052], cookie# 10 2025-06-25T14:32:54.277884Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][2:7519894915999687581:2301][/dc-1/USER_0] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 6 Partial: 0 }: sender# [2:7519894911704719693:2055], cookie# 10 2025-06-25T14:32:54.277919Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][2:7519894915999687575:2301][/dc-1/USER_0] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 6 Partial: 0 }: sender# [2:7519894915999687576:2301], cookie# 10 2025-06-25T14:32:54.277937Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][2:7519894915999687575:2301][/dc-1/USER_0] Sync is in progress: cookie# 10, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2025-06-25T14:32:54.277951Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][2:7519894915999687575:2301][/dc-1/USER_0] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 6 Partial: 0 }: sender# [2:7519894915999687577:2301], cookie# 10 2025-06-25T14:32:54.277961Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][2:7519894915999687575:2301][/dc-1/USER_0] Sync is in progress: cookie# 10, ring group# 0, size# 3, half# 1, successes# 2, failures# 0 2025-06-25T14:32:54.277976Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][2:7519894915999687575:2301][/dc-1/USER_0] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 6 Partial: 0 }: sender# [2:7519894915999687578:2301], cookie# 10 2025-06-25T14:32:54.277994Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:984: [main][2:7519894915999687575:2301][/dc-1/USER_0] Sync is done in the ring group: cookie# 10, ring group# 0, size# 3, half# 1, successes# 3, failures# 0, partial# 0 2025-06-25T14:32:54.278031Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [2:7519894911704719958:2114], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1/USER_0 PathId: Partial: 0 } 2025-06-25T14:32:54.278100Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [2:7519894911704719958:2114], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1/USER_0 PathId: Partial: 0 }, by path# { Subscriber: { Subscriber: [2:7519894915999687575:2301] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 10 } Filled: 1 Status: StatusSuccess Kind: 8 TableKind: 0 Created: 1 CreateStep: 1750861973882 PathId: [OwnerId: 72057594046644480, LocalPathId: 2] DomainId: [OwnerId: 72057594046644480, LocalPathId: 2] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-25T14:32:54.278181Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [2:7519894911704719958:2114], cacheItem# { Subscriber: { Subscriber: [2:7519894915999687575:2301] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 10 } Filled: 1 Status: StatusSuccess Kind: 8 TableKind: 0 Created: 1 CreateStep: 1750861973882 PathId: [OwnerId: 72057594046644480, LocalPathId: 2] DomainId: [OwnerId: 72057594046644480, LocalPathId: 2] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_0 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 10 IsSync: true Partial: 0 } 2025-06-25T14:32:54.278340Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [2:7519894920294654950:2357], recipient# [2:7519894920294654949:2356], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0 TableId: [72057594046644480:2:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Ok Kind: KindSubdomain DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 2] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 2] Params { Version: 1 PlanResolution: 0 TimeCastBucketsPerMediator: 0 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-25T14:32:54.278371Z node 2 :TX_PROXY DEBUG: describe.cpp:356: Actor# [2:7519894920294654949:2356] HANDLE EvNavigateKeySetResult TDescribeReq marker# P5 ErrorCount# 0 2025-06-25T14:32:54.278432Z node 2 :TX_PROXY DEBUG: describe.cpp:435: Actor# [2:7519894920294654949:2356] SEND to# 72057594046644480 shardToRequest NKikimrSchemeOp.TDescribePath Path: "/dc-1/USER_0" Options { ShowPrivateTable: true } 2025-06-25T14:32:54.279007Z node 2 :TX_PROXY DEBUG: describe.cpp:448: Actor# [2:7519894920294654949:2356] Handle TEvDescribeSchemeResult Forward to# [2:7519894920294654948:2355] Cookie: 0 TEvDescribeSchemeResult: NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 2 Record# Status: StatusSuccess Path: "/dc-1/USER_0" PathDescription { Self { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 281474976710658 CreateStep: 1750861973882 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 4 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 0 TimeCastBucketsPerMediator: 0 } DomainKey { SchemeShard: 72057594046644480 PathId: 2 } StoragePools { Name: "/dc-1:test" Kind: "test" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } UserAttributes { Key: "AttrA3" Value: "ValA3" } } PathId: 2 PathOwnerId: 72057594046644480 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 281474976710658 CreateStep: 1750861973882 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 4 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 0 } } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1... (TRUNCATED) >> TSubDomainTest::CreateDummyTabletsInDifferentDomains [GOOD] >> TSubDomainTest::CoordinatorRunAtSubdomainNodeWhenAvailable >> THealthCheckTest::Issues100Groups100VCardListing >> WithSDK::DescribeConsumer [GOOD] >> TestKinesisHttpProxy::TestUnauthorizedPutRecords >> KqpProxy::NodeDisconnectedTest [GOOD] >> TestKinesisHttpProxy::ListShards [GOOD] >> TestKinesisHttpProxy::ListShardsEmptyFields >> KqpProxy::CreatesScriptExecutionsTable [GOOD] >> KqpProxy::DatabasesCacheForServerless >> DataShardVolatile::DistributedWriteThenBulkUpsert [GOOD] >> DataShardVolatile::DistributedWriteThenBulkUpsertWithCdc >> THealthCheckTest::OrangeGroupIssueWhenDegradedGroupStatus ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/proxy_service/ut/unittest >> KqpProxy::NodeDisconnectedTest [GOOD] Test command err: 2025-06-25T14:32:38.591732Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519894850452964375:2237];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:32:38.597204Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000c8a/r3tmp/tmpjccElo/pdisk_1.dat 2025-06-25T14:32:39.307183Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:32:39.316501Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:32:39.334430Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519894850452964163:2080] 1750861958493312 != 1750861958493315 2025-06-25T14:32:39.351498Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:32:39.358142Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:32:39.500463Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:5163 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:32:39.918092Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:32:43.293819Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1692: Updated YQL logs priority to current level: 4 2025-06-25T14:32:43.294967Z node 1 :KQP_PROXY INFO: kqp_proxy_service.cpp:454: Cannot start publishing usage, tenants: /dc-1, empty 2025-06-25T14:32:43.336254Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1564: Created new session, sessionId: ydb://session/3?node_id=1&id=YzZkMWNlZWUtODA1NDBjYjQtYTdhYmU3YWQtNmRlODhkMjQ=, workerId: [1:7519894871927801242:2272], database: , longSession: 0, local sessions count: 1 2025-06-25T14:32:43.336291Z node 1 :KQP_PROXY INFO: kqp_proxy_service.cpp:454: Cannot start publishing usage, tenants: /dc-1, empty 2025-06-25T14:32:43.336557Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:788: Ctx: { TraceId: , Database: , DatabaseId: , SessionId: ydb://session/3?node_id=1&id=YzZkMWNlZWUtODA1NDBjYjQtYTdhYmU3YWQtNmRlODhkMjQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 0.010000s timeout: 0.010000s cancelAfter: 0.000000s. Send request to target, requestId: 2, targetId: [1:7519894871927801242:2272] 2025-06-25T14:32:43.336578Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1315: Scheduled timeout timer for requestId: 2 timeout: 0.010000s actor id: [0:0:0] 2025-06-25T14:32:43.336616Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-25T14:32:43.336656Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-25T14:32:43.336757Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:524: Subscribed for config changes. 2025-06-25T14:32:43.336773Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:531: Updated table service config. 2025-06-25T14:32:43.336786Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1692: Updated YQL logs priority to current level: 4 2025-06-25T14:32:43.336844Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-25T14:32:43.337232Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2246: SessionId: ydb://session/3?node_id=1&id=YzZkMWNlZWUtODA1NDBjYjQtYTdhYmU3YWQtNmRlODhkMjQ=, ActorId: [1:7519894871927801242:2272], ActorState: ReadyState, Reply query error, msg:
: Error: SomeUniqTextForUt proxyRequestId: 2 2025-06-25T14:32:43.337552Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:974: Forwarded response to sender actor, requestId: 2, sender: [1:7519894854747932011:2286], selfId: [1:7519894850452964212:2077], source: [1:7519894871927801242:2272] 2025-06-25T14:32:43.340490Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-25T14:32:43.341201Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894871927801243:2273], DatabaseId: /dc-1, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:43.341283Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /dc-1, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:43.348574Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1334: Handle TEvPrivate::TEvOnRequestTimeout(2) 2025-06-25T14:32:43.348598Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1337: Invalid request info while on request timeout handle. RequestId: 2 2025-06-25T14:32:53.825949Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:628:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:32:53.826085Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:32:53.826278Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:32:53.826496Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [3:625:2319], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:32:53.826864Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:32:53.826929Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000c8a/r3tmp/tmpW7txaw/pdisk_1.dat 2025-06-25T14:32:54.397916Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TClient is connected to server localhost:2653 KQP PROXY1 [2:8678280833929343339:121] KQP PROXY2 [3:8678280833929343339:121] SENDER [2:1062:2647] 2025-06-25T14:32:54.864057Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1564: Created new session, sessionId: ydb://session/3?node_id=3&id=ODRjNzZlZDEtZDg4YWRhYzQtODIwMmZjNjItMWI0OTQwMGY=, workerId: [3:1063:2338], database: , longSession: 1, local sessions count: 1 2025-06-25T14:32:54.864284Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:663: Received create session request, trace_id: Created session ydb://session/3?node_id=3&id=ODRjNzZlZDEtZDg4YWRhYzQtODIwMmZjNjItMWI0OTQwMGY= 2025-06-25T14:32:54.864962Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:788: Ctx: { TraceId: , Database: , DatabaseId: , SessionId: ydb://session/3?node_id=3&id=ODRjNzZlZDEtZDg4YWRhYzQtODIwMmZjNjItMWI0OTQwMGY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 0.001000s timeout: 0.001000s cancelAfter: 0.000000s. Send request to target, requestId: 2, targetId: [3:8678280833929343339:121] 2025-06-25T14:32:54.865027Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1315: Scheduled timeout timer for requestId: 2 timeout: 0.001000s actor id: [0:0:0] 2025-06-25T14:32:54.865692Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:788: Ctx: { TraceId: , Database: , DatabaseId: , SessionId: ydb://session/3?node_id=3&id=ODRjNzZlZDEtZDg4YWRhYzQtODIwMmZjNjItMWI0OTQwMGY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 0.001000s timeout: 0.001000s cancelAfter: 0.000000s. Send request to target, requestId: 3, targetId: [3:1063:2338] 2025-06-25T14:32:54.865736Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1315: Scheduled timeout timer for requestId: 3 timeout: 0.001000s actor id: [0:0:0] 2025-06-25T14:32:55.240964Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:1064:2648], DatabaseId: /dc-1, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:55.241105Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /dc-1, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:55.241639Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:1070:2339], DatabaseId: /dc-1, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:55.241720Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /dc-1, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:55.308604Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1334: Handle TEvPrivate::TEvOnRequestTimeout(2) 2025-06-25T14:32:55.308710Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1342: Reply timeout: requestId 2 sessionId: ydb://session/3?node_id=3&id=ODRjNzZlZDEtZDg4YWRhYzQtODIwMmZjNjItMWI0OTQwMGY= status: TIMEOUT round: 0 2025-06-25T14:32:55.308828Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1334: Handle ... :58.080565Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1334: Handle TEvPrivate::TEvOnRequestTimeout(57) 2025-06-25T14:32:58.080648Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1342: Reply timeout: requestId 57 sessionId: ydb://session/3?node_id=3&id=ZmJhODI4ZGEtYWQwZGMwNDktZGNmM2M4MDYtYTcxNDgyMGI= status: TIMEOUT round: 0 2025-06-25T14:32:58.080799Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:974: Forwarded response to sender actor, requestId: 57, sender: [2:1062:2647], selfId: [2:213:2173], source: [2:213:2173] 2025-06-25T14:32:58.082648Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1564: Created new session, sessionId: ydb://session/3?node_id=3&id=NGY5MTc5NjQtZWJjMjEwMDktZDA3NzcxYzMtYjZmNjE4NmE=, workerId: [3:1335:2482], database: , longSession: 1, local sessions count: 57 2025-06-25T14:32:58.082809Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:663: Received create session request, trace_id: Created session ydb://session/3?node_id=3&id=NGY5MTc5NjQtZWJjMjEwMDktZDA3NzcxYzMtYjZmNjE4NmE= 2025-06-25T14:32:58.083246Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:788: Ctx: { TraceId: , Database: , DatabaseId: , SessionId: ydb://session/3?node_id=3&id=NGY5MTc5NjQtZWJjMjEwMDktZDA3NzcxYzMtYjZmNjE4NmE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 0.001000s timeout: 0.001000s cancelAfter: 0.000000s. Send request to target, requestId: 58, targetId: [3:8678280833929343339:121] 2025-06-25T14:32:58.083341Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1315: Scheduled timeout timer for requestId: 58 timeout: 0.001000s actor id: [0:0:0] 2025-06-25T14:32:58.083773Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:1336:2711], DatabaseId: /dc-1, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:58.083894Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:788: Ctx: { TraceId: , Database: , DatabaseId: , SessionId: ydb://session/3?node_id=3&id=NGY5MTc5NjQtZWJjMjEwMDktZDA3NzcxYzMtYjZmNjE4NmE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 0.001000s timeout: 0.001000s cancelAfter: 0.000000s. Send request to target, requestId: 87, targetId: [3:1335:2482] 2025-06-25T14:32:58.083928Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1315: Scheduled timeout timer for requestId: 87 timeout: 0.001000s actor id: [0:0:0] 2025-06-25T14:32:58.084025Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /dc-1, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:58.140257Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:1338:2483], DatabaseId: /dc-1, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:58.143795Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /dc-1, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:58.159413Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1334: Handle TEvPrivate::TEvOnRequestTimeout(87) 2025-06-25T14:32:58.159487Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1342: Reply timeout: requestId 87 sessionId: ydb://session/3?node_id=3&id=NGY5MTc5NjQtZWJjMjEwMDktZDA3NzcxYzMtYjZmNjE4NmE= status: TIMEOUT round: 0 2025-06-25T14:32:58.159566Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1334: Handle TEvPrivate::TEvOnRequestTimeout(58) 2025-06-25T14:32:58.159593Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1342: Reply timeout: requestId 58 sessionId: ydb://session/3?node_id=3&id=NGY5MTc5NjQtZWJjMjEwMDktZDA3NzcxYzMtYjZmNjE4NmE= status: TIMEOUT round: 0 2025-06-25T14:32:58.159681Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:974: Forwarded response to sender actor, requestId: 58, sender: [2:1062:2647], selfId: [2:213:2173], source: [2:213:2173] 2025-06-25T14:32:58.159770Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=3&id=NGY5MTc5NjQtZWJjMjEwMDktZDA3NzcxYzMtYjZmNjE4NmE=, ActorId: [3:1335:2482], ActorState: ExecuteState, TraceId: 01jykr36f44d0t19es3wrfr82h, Create QueryResponse for error on request, msg: 2025-06-25T14:32:58.162202Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:974: Forwarded response to sender actor, requestId: 87, sender: [2:213:2173], selfId: [3:243:2129], source: [3:1335:2482] 2025-06-25T14:32:58.162409Z node 2 :KQP_PROXY ERROR: kqp_proxy_service.cpp:957: Unknown sender for proxy response, requestId: 58 2025-06-25T14:32:58.175029Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1564: Created new session, sessionId: ydb://session/3?node_id=3&id=MTU4NmRlMTgtY2E3MmY0YTgtOTE5MjYyOWMtODk4ZmNlNzY=, workerId: [3:1342:2486], database: , longSession: 1, local sessions count: 58 2025-06-25T14:32:58.175224Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:663: Received create session request, trace_id: 2025-06-25T14:32:58.175604Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:902: Received ping session request, request_id: 59, sender: [2:1062:2647], trace_id: 2025-06-25T14:32:58.175711Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1315: Scheduled timeout timer for requestId: 59 timeout: 0.001000s actor id: [0:0:0] 2025-06-25T14:32:58.175838Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:856: Received ping session request, has local session: ydb://session/3?node_id=3&id=MTU4NmRlMTgtY2E3MmY0YTgtOTE5MjYyOWMtODk4ZmNlNzY=, rpc ctrl: [0:0:0], sameNode: 0, trace_id: 2025-06-25T14:32:58.175959Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:974: Forwarded response to sender actor, requestId: 59, sender: [2:1062:2647], selfId: [2:213:2173], source: [3:243:2129] 2025-06-25T14:32:58.177625Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1564: Created new session, sessionId: ydb://session/3?node_id=3&id=ZDE5MTVmNDQtNDhjZjY2MmQtOTEzY2I5ZTUtNzI0ZDE4YWI=, workerId: [3:1343:2487], database: , longSession: 1, local sessions count: 59 2025-06-25T14:32:58.177778Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:663: Received create session request, trace_id: Created session ydb://session/3?node_id=3&id=ZDE5MTVmNDQtNDhjZjY2MmQtOTEzY2I5ZTUtNzI0ZDE4YWI= 2025-06-25T14:32:58.178181Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:788: Ctx: { TraceId: , Database: , DatabaseId: , SessionId: ydb://session/3?node_id=3&id=ZDE5MTVmNDQtNDhjZjY2MmQtOTEzY2I5ZTUtNzI0ZDE4YWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 0.001000s timeout: 0.001000s cancelAfter: 0.000000s. Send request to target, requestId: 60, targetId: [3:8678280833929343339:121] 2025-06-25T14:32:58.178223Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1315: Scheduled timeout timer for requestId: 60 timeout: 0.001000s actor id: [0:0:0] 2025-06-25T14:32:58.178595Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:788: Ctx: { TraceId: , Database: , DatabaseId: , SessionId: ydb://session/3?node_id=3&id=ZDE5MTVmNDQtNDhjZjY2MmQtOTEzY2I5ZTUtNzI0ZDE4YWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 0.001000s timeout: 0.001000s cancelAfter: 0.000000s. Send request to target, requestId: 90, targetId: [3:1343:2487] 2025-06-25T14:32:58.178632Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1315: Scheduled timeout timer for requestId: 90 timeout: 0.001000s actor id: [0:0:0] 2025-06-25T14:32:58.179837Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:1344:2713], DatabaseId: /dc-1, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:58.180016Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /dc-1, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:58.295635Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:1346:2488], DatabaseId: /dc-1, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:58.295841Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /dc-1, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:58.307904Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1334: Handle TEvPrivate::TEvOnRequestTimeout(90) 2025-06-25T14:32:58.308004Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1342: Reply timeout: requestId 90 sessionId: ydb://session/3?node_id=3&id=ZDE5MTVmNDQtNDhjZjY2MmQtOTEzY2I5ZTUtNzI0ZDE4YWI= status: TIMEOUT round: 0 2025-06-25T14:32:58.308097Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1334: Handle TEvPrivate::TEvOnRequestTimeout(59) 2025-06-25T14:32:58.308124Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1337: Invalid request info while on request timeout handle. RequestId: 59 2025-06-25T14:32:58.308515Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1334: Handle TEvPrivate::TEvOnRequestTimeout(60) 2025-06-25T14:32:58.308558Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1342: Reply timeout: requestId 60 sessionId: ydb://session/3?node_id=3&id=ZDE5MTVmNDQtNDhjZjY2MmQtOTEzY2I5ZTUtNzI0ZDE4YWI= status: TIMEOUT round: 0 2025-06-25T14:32:58.308714Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=3&id=ZDE5MTVmNDQtNDhjZjY2MmQtOTEzY2I5ZTUtNzI0ZDE4YWI=, ActorId: [3:1343:2487], ActorState: ExecuteState, TraceId: 01jykr36j2b9ntccyzayz318ns, Create QueryResponse for error on request, msg: 2025-06-25T14:32:58.308832Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:974: Forwarded response to sender actor, requestId: 60, sender: [2:1062:2647], selfId: [2:213:2173], source: [2:213:2173] 2025-06-25T14:32:58.310626Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:974: Forwarded response to sender actor, requestId: 90, sender: [2:213:2173], selfId: [3:243:2129], source: [3:1343:2487] 2025-06-25T14:32:58.310771Z node 2 :KQP_PROXY ERROR: kqp_proxy_service.cpp:957: Unknown sender for proxy response, requestId: 60 2025-06-25T14:32:58.312043Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1564: Created new session, sessionId: ydb://session/3?node_id=3&id=ZjZmYThkMTUtYzBmNDRmZjgtNjg0OWYyYTQtYzRlNmQ5MTU=, workerId: [3:1350:2491], database: , longSession: 1, local sessions count: 60 2025-06-25T14:32:58.312143Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:663: Received create session request, trace_id: 2025-06-25T14:32:58.316580Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:902: Received ping session request, request_id: 61, sender: [2:1062:2647], trace_id: 2025-06-25T14:32:58.316734Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1315: Scheduled timeout timer for requestId: 61 timeout: 0.001000s actor id: [0:0:0] 2025-06-25T14:32:58.340548Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1334: Handle TEvPrivate::TEvOnRequestTimeout(61) 2025-06-25T14:32:58.340628Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1342: Reply timeout: requestId 61 sessionId: ydb://session/3?node_id=3&id=ZjZmYThkMTUtYzBmNDRmZjgtNjg0OWYyYTQtYzRlNmQ5MTU= status: TIMEOUT round: 0 2025-06-25T14:32:58.340729Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:974: Forwarded response to sender actor, requestId: 61, sender: [2:1062:2647], selfId: [2:213:2173], source: [2:213:2173] >> TSubDomainTest::CreateTableInsidetThenStopTenantAndForceDeleteSubDomain [GOOD] >> TSubDomainTest::CreateTableInsideSubDomain >> ScriptExecutionsTest::UpdatesLeaseAfterExpiring [GOOD] >> TestKinesisHttpProxy::TestListStreamConsumers [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/ut_with_sdk/unittest >> WithSDK::DescribeConsumer [GOOD] Test command err: 2025-06-25T14:30:41.269574Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519894347380785600:2190];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:30:41.270268Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001916/r3tmp/tmpYvkD2p/pdisk_1.dat 2025-06-25T14:30:41.852477Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-25T14:30:42.055240Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:30:42.055340Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:30:42.057790Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:30:42.116487Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519894347380785448:2080] 1750861841179597 != 1750861841179600 2025-06-25T14:30:42.132118Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 29846, node 1 2025-06-25T14:30:42.253054Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:30:42.253751Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/yft8/001916/r3tmp/yandexvgnw2N.tmp 2025-06-25T14:30:42.253771Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/yft8/001916/r3tmp/yandexvgnw2N.tmp 2025-06-25T14:30:42.253954Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/yft8/001916/r3tmp/yandexvgnw2N.tmp 2025-06-25T14:30:42.254141Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:30:42.341902Z INFO: TTestServer started on Port 5267 GrpcPort 29846 TClient is connected to server localhost:5267 PQClient connected to localhost:29846 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:30:42.781217Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:30:42.812799Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:30:42.838312Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-25T14:30:42.847183Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:30:43.070903Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710660, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:30:43.086460Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710661, at schemeshard: 72057594046644480 2025-06-25T14:30:45.140961Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894364560655428:2300], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:45.141065Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894364560655440:2303], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:45.141131Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:45.149469Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:30:45.163728Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519894364560655442:2304], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710662 completed, doublechecking } 2025-06-25T14:30:45.464095Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519894364560655506:2446] txid# 281474976710663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:30:45.504957Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:45.568691Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:45.617003Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519894364560655516:2311], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:5:17: Error: At function: KiReadTable!
:5:17: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Versions]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:30:45.619263Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=ZTFiNzE3OTItNjA2NmIxZjMtNTBmMGY5NmQtNTliMmY5YWE=, ActorId: [1:7519894364560655402:2298], ActorState: ExecuteState, TraceId: 01jykqz4kv02ajjj9tb7qn268y, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:30:45.623362Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 5 column: 17 } message: "At function: KiReadTable!" end_position { row: 5 column: 17 } severity: 1 issues { position { row: 5 column: 17 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Versions]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 5 column: 17 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-25T14:30:45.675045Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); === CheckClustersList. Subcribe to ClusterTracker from [1:7519894364560655802:2623] 2025-06-25T14:30:46.232613Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519894347380785600:2190];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:30:46.232682Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; === CheckClustersList. Ok 2025-06-25T14:30:52.332466Z :TopicSplitMerge INFO: TTopicSdkTestSetup started 2025-06-25T14:30:52.385260Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:132: new create topic request 2025-06-25T14:30:52.386630Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877761, Sender [1:7519894394625427077:2704], Recipient [1:7519894347380785778:2148]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:30:52.386657Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5052: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T14:30:52.386679Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5837: Pipe server connected, at tablet: 72057594046644480 2025-06-25T14:30:52.386722Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271122432, Sender [1:7519894394625427073:2701], Recipient [1:7519894347380785778:2148]: {TEvModifySchemeTransaction txid# 281474976710673 TabletId# 72057594046644480} 2025-06-25T14:30:52.386737Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4966: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-06-25T14: ... 037893][test-topic] pipe [7:7519894937742568137:2799] connected; active server actors: 1 2025-06-25T14:32:58.750654Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5307: HandleHook, received event# 271187975, Sender [7:7519894937742568134:2797], Recipient [7:7519894899087861255:2429]: NKikimrPQ.TStatus ClientId: "test-consumer" 2025-06-25T14:32:58.750681Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5315: HandleHook, processing event TEvPersQueue::TEvStatus 2025-06-25T14:32:58.750705Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:1813: [PQ: 72075186224037892] Handle TEvPersQueue::TEvStatus 2025-06-25T14:32:58.750760Z node 7 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037893][test-topic] addPartitionToResponse tabletId 72075186224037892, partitionId 0, NodeId 7, Generation 1 2025-06-25T14:32:58.750850Z node 7 :PERSQUEUE TRACE: partition.h:582: StateIdle event# 271188491 (NKikimr::TEvPQ::TEvPartitionStatus), Tablet [7:7519894899087861255:2429], Partition 0, Sender [7:7519894899087861255:2429], Recipient [7:7519894899087861315:2433], Cookie: 0 2025-06-25T14:32:58.750901Z node 7 :PERSQUEUE TRACE: partition.h:584: StateIdle, received event# 271188491, Sender [7:7519894899087861255:2429], Recipient [7:7519894899087861315:2433]: NKikimr::TEvPQ::TEvPartitionStatus 2025-06-25T14:32:58.750925Z node 7 :PERSQUEUE TRACE: partition.h:602: StateIdle, processing event TEvPQ::TEvPartitionStatus 2025-06-25T14:32:58.751182Z node 7 :PERSQUEUE DEBUG: partition.cpp:873: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 0 PartitionConfig{ MaxCountInPartition: 2147483647 LifetimeSeconds: 3600 SourceIdLifetimeSeconds: 1382400 WriteSpeedInBytesPerSecond: 1048576 BurstSize: 1048576 TotalPartitions: 1 ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } SourceIdMaxCounts: 6000000 } 2025-06-25T14:32:58.751284Z node 7 :PQ_READ_PROXY DEBUG: schema_actors.cpp:750: DescribeTopicImpl [7:7519894937742568134:2797]: Got location 2025-06-25T14:32:58.751311Z node 7 :PQ_READ_PROXY DEBUG: schema_actors.cpp:729: DescribeTopicImpl [7:7519894937742568134:2797]: Got sessions 2025-06-25T14:32:58.752423Z node 7 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037893][test-topic] pipe [7:7519894937742568137:2799] disconnected; active server actors: 1 2025-06-25T14:32:58.752449Z node 7 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1688: [72075186224037893][test-topic] pipe [7:7519894937742568137:2799] disconnected no session 2025-06-25T14:32:58.752877Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5307: HandleHook, received event# 269877764, Sender [7:7519894937742568138:3234], Recipient [7:7519894899087861255:2429]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-25T14:32:58.752906Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5326: HandleHook, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-25T14:32:58.752926Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:2906: [PQ: 72075186224037892] Handle TEvTabletPipe::TEvServerDisconnected 2025-06-25T14:32:58.752955Z node 7 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037892] server disconnected, pipe [7:7519894937742568136:2798] destroyed 2025-06-25T14:32:58.844481Z node 7 :PERSQUEUE TRACE: partition.h:582: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7519894899087861255:2429], Partition 0, Sender [0:0:0], Recipient [7:7519894899087861315:2433], Cookie: 0 2025-06-25T14:32:58.844560Z node 7 :PERSQUEUE TRACE: partition.h:584: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7519894899087861315:2433]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-25T14:32:58.844584Z node 7 :PERSQUEUE TRACE: partition.h:610: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-25T14:32:58.844630Z node 7 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete old stuff 2025-06-25T14:32:58.844699Z node 7 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-25T14:32:58.844727Z node 7 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-25T14:32:58.844760Z node 7 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-25T14:32:58.981040Z node 7 :PERSQUEUE TRACE: partition.h:582: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7519894899087861255:2429], Partition 0, Sender [0:0:0], Recipient [7:7519894899087861315:2433], Cookie: 0 2025-06-25T14:32:58.981118Z node 7 :PERSQUEUE TRACE: partition.h:584: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7519894899087861315:2433]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-25T14:32:58.981143Z node 7 :PERSQUEUE TRACE: partition.h:610: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-25T14:32:58.981181Z node 7 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete old stuff 2025-06-25T14:32:58.981250Z node 7 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-25T14:32:58.981280Z node 7 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-25T14:32:58.981312Z node 7 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-25T14:32:58.984501Z node 7 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:157: [72075186224037893][test-topic] TPersQueueReadBalancer::HandleWakeup 2025-06-25T14:32:58.984564Z node 7 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:550: [72075186224037893][test-topic] Send TEvPersQueue::TEvStatus TabletId: 72075186224037892 Cookie: 9 2025-06-25T14:32:58.984727Z node 7 :PERSQUEUE TRACE: partition.h:582: StateIdle event# 271188544 (NKikimr::NPQ::NReadQuoterEvents::TEvQuotaCountersUpdated), Tablet [7:7519894899087861255:2429], Partition 0, Sender [7:7519894899087861318:2435], Recipient [7:7519894899087861315:2433], Cookie: 0 2025-06-25T14:32:58.984791Z node 7 :PERSQUEUE TRACE: partition.h:584: StateIdle, received event# 271188544, Sender [7:7519894899087861318:2435], Recipient [7:7519894899087861315:2433]: NKikimr::NPQ::NReadQuoterEvents::TEvQuotaCountersUpdated 2025-06-25T14:32:58.984816Z node 7 :PERSQUEUE TRACE: partition.h:630: StateIdle, processing event NReadQuoterEvents::TEvQuotaCountersUpdated 2025-06-25T14:32:58.985245Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5307: HandleHook, received event# 271188536, Sender [7:7519894899087861256:2430], Recipient [7:7519894899087861255:2429]: NKikimrPQ.TEvSubDomainStatus SubDomainOutOfSpace: false 2025-06-25T14:32:58.985278Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5340: HandleHook, processing event TEvPQ::TEvSubDomainStatus 2025-06-25T14:32:58.985349Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5307: HandleHook, received event# 271187975, Sender [7:7519894899087861256:2430], Recipient [7:7519894899087861255:2429]: NKikimrPQ.TStatus GetStatForAllConsumers: true 2025-06-25T14:32:58.985367Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5315: HandleHook, processing event TEvPersQueue::TEvStatus 2025-06-25T14:32:58.985394Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:1813: [PQ: 72075186224037892] Handle TEvPersQueue::TEvStatus 2025-06-25T14:32:58.985477Z node 7 :PERSQUEUE TRACE: partition.h:582: StateIdle event# 271188536 (NKikimr::TEvPQ::TEvSubDomainStatus), Tablet [7:7519894899087861255:2429], Partition 0, Sender [7:7519894899087861255:2429], Recipient [7:7519894899087861315:2433], Cookie: 0 2025-06-25T14:32:58.985531Z node 7 :PERSQUEUE TRACE: partition.h:584: StateIdle, received event# 271188536, Sender [7:7519894899087861255:2429], Recipient [7:7519894899087861315:2433]: NKikimrPQ.TEvSubDomainStatus SubDomainOutOfSpace: false 2025-06-25T14:32:58.985553Z node 7 :PERSQUEUE TRACE: partition.h:626: StateIdle, processing event TEvPQ::TEvSubDomainStatus 2025-06-25T14:32:58.985590Z node 7 :PERSQUEUE TRACE: partition.h:582: StateIdle event# 271188491 (NKikimr::TEvPQ::TEvPartitionStatus), Tablet [7:7519894899087861255:2429], Partition 0, Sender [7:7519894899087861255:2429], Recipient [7:7519894899087861315:2433], Cookie: 0 2025-06-25T14:32:58.985626Z node 7 :PERSQUEUE TRACE: partition.h:584: StateIdle, received event# 271188491, Sender [7:7519894899087861255:2429], Recipient [7:7519894899087861315:2433]: NKikimr::TEvPQ::TEvPartitionStatus 2025-06-25T14:32:58.985644Z node 7 :PERSQUEUE TRACE: partition.h:602: StateIdle, processing event TEvPQ::TEvPartitionStatus 2025-06-25T14:32:58.985898Z node 7 :PERSQUEUE DEBUG: partition.cpp:873: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 0 PartitionConfig{ MaxCountInPartition: 2147483647 LifetimeSeconds: 3600 SourceIdLifetimeSeconds: 1382400 WriteSpeedInBytesPerSecond: 1048576 BurstSize: 1048576 TotalPartitions: 1 ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } SourceIdMaxCounts: 6000000 } 2025-06-25T14:32:58.986062Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5307: HandleHook, received event# 271188503, Sender [7:7519894899087861315:2433], Recipient [7:7519894899087861255:2429]: NKikimr::TEvPQ::TEvPartitionLabeledCounters 2025-06-25T14:32:58.986083Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5321: HandleHook, processing event TEvPQ::TEvPartitionLabeledCounters 2025-06-25T14:32:58.986458Z node 7 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:688: [72075186224037893][test-topic] Send TEvPeriodicTopicStats PathId: 13 Generation: 1 StatsReportRound: 9 DataSize: 0 UsedReserveSize: 0 2025-06-25T14:32:58.986603Z node 7 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1823: [72075186224037893][test-topic] ProcessPendingStats. PendingUpdates size 1 2025-06-25T14:32:58.986827Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271188001, Sender [7:7519894899087861256:2430], Recipient [7:7519894830368383477:2161]: NKikimrPQ.TEvPeriodicTopicStats PathId: 13 Generation: 1 Round: 9 DataSize: 0 UsedReserveSize: 0 SubDomainOutOfSpace: false 2025-06-25T14:32:58.986854Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4993: StateWork, processing event TEvPersQueue::TEvPeriodicTopicStats 2025-06-25T14:32:58.986885Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__pq_stats.cpp:100: Got periodic topic stats at partition [OwnerId: 72057594046644480, LocalPathId: 13] DataSize 0 UsedReserveSize 0 2025-06-25T14:32:58.986908Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__pq_stats.cpp:141: Will delay TTxStoreTopicStats on# 0.099994s, queue# 1 >> TestYmqHttpProxy::TestSetQueueAttributes [GOOD] |78.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tablet_flat/ut/ydb-core-tablet_flat-ut |78.8%| [LD] {RESULT} $(B)/ydb/core/tablet_flat/ut/ydb-core-tablet_flat-ut |78.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tablet_flat/ut/ydb-core-tablet_flat-ut >> TSubDomainTest::CreateTabletForUnknownDomain [GOOD] >> TSubDomainTest::DatashardNotRunAtAllWhenSubDomainNodesIsStopped >> DataShardVolatile::UpsertNoLocksArbiter+UseSink [GOOD] >> DataShardVolatile::UpsertNoLocksArbiter-UseSink >> TestYmqHttpProxy::TestTagQueue >> TestKinesisHttpProxy::TestListStreamConsumersWithMaxResults >> TopicAutoscaling::ReadFromTimestamp_BeforeAutoscaleAwareSDK [GOOD] >> TopicAutoscaling::ReadFromTimestamp_PQv1 >> TopicAutoscaling::ReBalancingAfterSplit_sessionsWithPartition [GOOD] >> TopicAutoscaling::ReadFromTimestamp_AutoscaleAwareSDK >> KqpQueryService::ExecuteDDLStatusCodeSchemeError [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/proxy_service/ut/unittest >> ScriptExecutionsTest::UpdatesLeaseAfterExpiring [GOOD] Test command err: 2025-06-25T14:32:42.290651Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519894869496377441:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:32:42.290684Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000c80/r3tmp/tmpG1AdWt/pdisk_1.dat 2025-06-25T14:32:43.280791Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:32:43.320481Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519894869496377421:2080] 1750861962279323 != 1750861962279326 2025-06-25T14:32:43.324454Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:32:43.344734Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:32:43.344819Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:32:43.347743Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:32:43.348577Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=incorrect path status: LookupError; TClient is connected to server localhost:65127 TServer::EnableGrpc on GrpcPort 3931, node 1 2025-06-25T14:32:44.043169Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:32:44.043191Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:32:44.043199Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:32:44.043290Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:32:44.373310Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:32:44.401053Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:32:47.299987Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519894869496377441:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:32:47.300060Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:32:48.306215Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1692: Updated YQL logs priority to current level: 4 2025-06-25T14:32:48.313359Z node 1 :KQP_PROXY INFO: kqp_proxy_service.cpp:454: Cannot start publishing usage, tenants: /dc-1, empty 2025-06-25T14:32:48.369150Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:524: Subscribed for config changes. 2025-06-25T14:32:48.369196Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:531: Updated table service config. 2025-06-25T14:32:48.369228Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1692: Updated YQL logs priority to current level: 4 2025-06-25T14:32:48.369271Z node 1 :KQP_PROXY INFO: kqp_proxy_service.cpp:454: Cannot start publishing usage, tenants: /dc-1, empty 2025-06-25T14:32:48.369403Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-25T14:32:48.369440Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-25T14:32:48.376378Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-25T14:32:48.376425Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-25T14:32:48.381868Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:147: Table result_sets updater. Describe result: PathErrorUnknown 2025-06-25T14:32:48.381876Z node 1 :KQP_PROXY NOTICE: table_creator.cpp:167: Table result_sets updater. Creating table 2025-06-25T14:32:48.381922Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:100: Table result_sets updater. Full table path:/dc-1/.metadata/result_sets 2025-06-25T14:32:48.382781Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:147: Table script_executions updater. Describe result: PathErrorUnknown 2025-06-25T14:32:48.382787Z node 1 :KQP_PROXY NOTICE: table_creator.cpp:167: Table script_executions updater. Creating table 2025-06-25T14:32:48.382812Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:100: Table script_executions updater. Full table path:/dc-1/.metadata/script_executions 2025-06-25T14:32:48.382868Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:147: Table script_execution_leases updater. Describe result: PathErrorUnknown 2025-06-25T14:32:48.382874Z node 1 :KQP_PROXY NOTICE: table_creator.cpp:167: Table script_execution_leases updater. Creating table 2025-06-25T14:32:48.382884Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:100: Table script_execution_leases updater. Full table path:/dc-1/.metadata/script_execution_leases 2025-06-25T14:32:48.398009Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:48.399818Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:48.409530Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:48.423798Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:190: Table result_sets updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976710658 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 PathId: 4 } 2025-06-25T14:32:48.423861Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:261: Table result_sets updater. Subscribe on create table tx: 281474976710658 2025-06-25T14:32:48.425783Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:190: Table script_execution_leases updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976710660 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 PathId: 5 } 2025-06-25T14:32:48.425807Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:261: Table script_execution_leases updater. Subscribe on create table tx: 281474976710660 2025-06-25T14:32:48.436511Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:190: Table script_executions updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976710659 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 PathId: 3 } 2025-06-25T14:32:48.436576Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:261: Table script_executions updater. Subscribe on create table tx: 281474976710659 2025-06-25T14:32:48.581780Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:290: Table script_executions updater. Request: create. Transaction completed: 281474976710659. Doublechecking... 2025-06-25T14:32:48.631406Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:290: Table script_execution_leases updater. Request: create. Transaction completed: 281474976710660. Doublechecking... 2025-06-25T14:32:48.634260Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:290: Table result_sets updater. Request: create. Transaction completed: 281474976710658. Doublechecking... 2025-06-25T14:32:48.676346Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:362: Table script_executions updater. Column diff is empty, finishing 2025-06-25T14:32:48.692324Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:362: Table script_execution_leases updater. Column diff is empty, finishing 2025-06-25T14:32:48.728762Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:362: Table result_sets updater. Column diff is empty, finishing 2025-06-25T14:32:48.729750Z node 1 :KQP_PROXY DEBUG: query_actor.cpp:134: [TQueryBase] [TCreateScriptOperationQuery] TraceId: 680cea87-8ab55122-c1fc9d0a-994f7844, Bootstrap. Database: /dc-1 2025-06-25T14:32:48.742048Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1489: Request has 18444993211740.809611s seconds to be completed 2025-06-25T14:32:48.744736Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1564: Created new session, sessionId: ydb://session/3?node_id=1&id=MTM0ODhmNzItYTMwMTg4MjYtNWYxMzM0NTMtNWUyNjYwMGU=, workerId: [1:7519894895266182081:2298], database: /dc-1, longSession: 1, local sessions count: 1 2025-06-25T14:32:48.744885Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:663: Received create session request, trace_id: 2025-06-25T14:32:48.745798Z node 1 :KQP_PROXY DEBUG: query_actor.cpp:197: [TQueryBase] [TCreateScriptOperationQuery] TraceId: 680cea87-8ab55122-c1fc9d0a-994f7844, RunDataQuery: -- TCreateScriptOperationQuery::OnRunQuery DECLARE $database AS Text; DECLARE $execution_id AS Text; DECLARE $run_script_actor_id AS Text; DECLARE $execution_status AS Int32; DECLARE $execution_mode AS Int32; DECLARE $query_text AS Text; DECLARE $syntax AS Int32; DECLARE $meta AS JsonDocument; DECLAR ... t; DECLARE $user_token AS Text; DECLARE $script_sinks AS Optional; DECLARE $script_secret_names AS Optional; DECLARE $applicate_script_external_effect_required AS Bool; UPDATE `.metadata/script_executions` SET operation_status = $operation_status, execution_status = $execution_status, finalization_status = IF($applicate_script_external_effect_required, $finalization_status, NULL), issues = $issues, plan = $plan, end_ts = CurrentUtcTimestamp(), stats = $stats, ast = $ast, ast_compressed = $ast_compressed, ast_compression_method = $ast_compression_method, expire_at = IF($operation_ttl > CAST(0 AS Interval), CurrentUtcTimestamp() + $operation_ttl, NULL), customer_supplied_id = IF($applicate_script_external_effect_required, $customer_supplied_id, NULL), user_token = IF($applicate_script_external_effect_required, $user_token, NULL), script_sinks = IF($applicate_script_external_effect_required, $script_sinks, NULL), script_secret_names = IF($applicate_script_external_effect_required, $script_secret_names, NULL) WHERE database = $database AND execution_id = $execution_id; DELETE FROM `.metadata/script_execution_leases` WHERE database = $database AND execution_id = $execution_id; 2025-06-25T14:33:00.670196Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:788: Ctx: { TraceId: , Database: /dc-1, DatabaseId: , SessionId: ydb://session/3?node_id=2&id=NWZhMjIxYWQtZmVhMDE0Ni1iYTRiMTE5Zi0xMDQzZjA4Ng==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 300.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 22, targetId: [2:7519894948068838126:2371] 2025-06-25T14:33:00.670226Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1315: Scheduled timeout timer for requestId: 22 timeout: 300.000000s actor id: [2:7519894948068838201:2618] 2025-06-25T14:33:00.932433Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-25T14:33:00.962265Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:974: Forwarded response to sender actor, requestId: 21, sender: [2:7519894948068838175:2387], selfId: [2:7519894922299033241:2079], source: [2:7519894948068838174:2386] 2025-06-25T14:33:00.962907Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:240: [TQueryBase] [TScriptLeaseUpdater] TraceId: a9b1bea2-37b12f3-ac00eef8-566eaa01, State: Get lease info, TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=NWUxNzc3ZTctYzEzOWM3OTMtMTI5NGY0NjEtNjE2ZTE1NDI=, TxId: 01jykr397zfy8q0cdca10dh2nh 2025-06-25T14:33:00.963043Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:197: [TQueryBase] [TScriptLeaseUpdater] TraceId: a9b1bea2-37b12f3-ac00eef8-566eaa01, State: Get lease info, RunDataQuery: -- TScriptLeaseUpdater::OnGetLeaseInfo DECLARE $database AS Text; DECLARE $execution_id AS Text; DECLARE $lease_duration AS Interval; UPDATE `.metadata/script_execution_leases` SET lease_deadline=(CurrentUtcTimestamp() + $lease_duration) WHERE database = $database AND execution_id = $execution_id; 2025-06-25T14:33:00.963648Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:788: Ctx: { TraceId: , Database: /dc-1, DatabaseId: , SessionId: ydb://session/3?node_id=2&id=NWUxNzc3ZTctYzEzOWM3OTMtMTI5NGY0NjEtNjE2ZTE1NDI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 300.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 23, targetId: [2:7519894948068838174:2386] 2025-06-25T14:33:00.963682Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1315: Scheduled timeout timer for requestId: 23 timeout: 300.000000s actor id: [2:7519894948068838236:2634] 2025-06-25T14:33:01.290976Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:974: Forwarded response to sender actor, requestId: 23, sender: [2:7519894948068838235:2406], selfId: [2:7519894922299033241:2079], source: [2:7519894948068838174:2386] 2025-06-25T14:33:01.292500Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:240: [TQueryBase] [TScriptLeaseUpdater] TraceId: a9b1bea2-37b12f3-ac00eef8-566eaa01, State: Update lease, TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=NWUxNzc3ZTctYzEzOWM3OTMtMTI5NGY0NjEtNjE2ZTE1NDI=, TxId: 2025-06-25T14:33:01.292578Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:367: [TQueryBase] [TScriptLeaseUpdater] TraceId: a9b1bea2-37b12f3-ac00eef8-566eaa01, State: Update lease, Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=NWUxNzc3ZTctYzEzOWM3OTMtMTI5NGY0NjEtNjE2ZTE1NDI=, TxId: 2025-06-25T14:33:01.293893Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1374: Session closed, sessionId: ydb://session/3?node_id=2&id=NWUxNzc3ZTctYzEzOWM3OTMtMTI5NGY0NjEtNjE2ZTE1NDI=, workerId: [2:7519894948068838174:2386], local sessions count: 3 2025-06-25T14:33:01.304943Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:788: Ctx: { TraceId: 01jykr39kr50880d52x48mg6kv, Database: /dc-1, DatabaseId: , SessionId: ydb://session/3?node_id=2&id=MjBkNmQyNjUtNjBiZDJlODEtYTM0MDg3MTUtZDhlZTQ2YWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 300.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 24, targetId: [2:7519894943773870687:2330] 2025-06-25T14:33:01.304986Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1315: Scheduled timeout timer for requestId: 24 timeout: 300.000000s actor id: [2:7519894952363805558:2643] 2025-06-25T14:33:01.352233Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:974: Forwarded response to sender actor, requestId: 22, sender: [2:7519894948068838200:2394], selfId: [2:7519894922299033241:2079], source: [2:7519894948068838126:2371] 2025-06-25T14:33:01.353024Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:240: [TQueryBase] [TSaveScriptFinalStatusActor] TraceId: f3559cc4-16512e63-8f7b81f6-1acf9803, State: Update final status, TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=NWZhMjIxYWQtZmVhMDE0Ni1iYTRiMTE5Zi0xMDQzZjA4Ng==, TxId: 2025-06-25T14:33:01.353096Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:367: [TQueryBase] [TSaveScriptFinalStatusActor] TraceId: f3559cc4-16512e63-8f7b81f6-1acf9803, State: Update final status, Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=NWZhMjIxYWQtZmVhMDE0Ni1iYTRiMTE5Zi0xMDQzZjA4Ng==, TxId: 2025-06-25T14:33:01.353107Z node 2 :KQP_PROXY DEBUG: kqp_script_executions.cpp:2658: [ScriptExecutions] Finish script execution operation. ExecutionId: f3559cc4-16512e63-8f7b81f6-1acf9803. SUCCESS. Issues: 2025-06-25T14:33:01.353618Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1374: Session closed, sessionId: ydb://session/3?node_id=2&id=NWZhMjIxYWQtZmVhMDE0Ni1iYTRiMTE5Zi0xMDQzZjA4Ng==, workerId: [2:7519894948068838126:2371], local sessions count: 2 2025-06-25T14:33:01.354166Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1374: Session closed, sessionId: ydb://session/3?node_id=2&id=MTM3OTE4Y2UtYWNiN2JiZC02NThhMDg4My0zM2EyOTViOQ==, workerId: [2:7519894943773870656:2314], local sessions count: 1 2025-06-25T14:33:02.168516Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:974: TraceId: "01jykr39kr50880d52x48mg6kv", Forwarded response to sender actor, requestId: 24, sender: [2:7519894952363805557:2413], selfId: [2:7519894922299033241:2079], source: [2:7519894943773870687:2330] 2025-06-25T14:33:02.173534Z node 2 :KQP_PROXY DEBUG: kqp_script_executions.cpp:791: [ScriptExecutions] [TCheckLeaseStatusActor] ExecutionId: a9b1bea2-37b12f3-ac00eef8-566eaa01, Bootstrap. Start TCheckLeaseStatusQueryActor 2025-06-25T14:33:02.173591Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:134: [TQueryBase] [TCheckLeaseStatusQueryActor] TraceId: a9b1bea2-37b12f3-ac00eef8-566eaa01, Bootstrap. Database: /dc-1 2025-06-25T14:33:02.173892Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1489: Request has 18444993211727.377739s seconds to be completed 2025-06-25T14:33:02.176004Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1564: Created new session, sessionId: ydb://session/3?node_id=2&id=ZmUyN2Y3NjItN2NmMTc5MTEtNzE2YjU0M2ItNzk3ZjM3NjI=, workerId: [2:7519894956658772925:2431], database: /dc-1, longSession: 1, local sessions count: 2 2025-06-25T14:33:02.176175Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:663: Received create session request, trace_id: 2025-06-25T14:33:02.176681Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:197: [TQueryBase] [TCheckLeaseStatusQueryActor] TraceId: a9b1bea2-37b12f3-ac00eef8-566eaa01, RunDataQuery: -- TCheckLeaseStatusQueryActor::OnRunQuery DECLARE $database AS Text; DECLARE $execution_id AS Text; SELECT operation_status, execution_status, finalization_status, issues, run_script_actor_id FROM `.metadata/script_executions` WHERE database = $database AND execution_id = $execution_id AND (expire_at > CurrentUtcTimestamp() OR expire_at IS NULL); SELECT lease_deadline FROM `.metadata/script_execution_leases` WHERE database = $database AND execution_id = $execution_id AND (expire_at > CurrentUtcTimestamp() OR expire_at IS NULL); 2025-06-25T14:33:02.177033Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:788: Ctx: { TraceId: , Database: /dc-1, DatabaseId: , SessionId: ydb://session/3?node_id=2&id=ZmUyN2Y3NjItN2NmMTc5MTEtNzE2YjU0M2ItNzk3ZjM3NjI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 300.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 26, targetId: [2:7519894956658772925:2431] 2025-06-25T14:33:02.177064Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1315: Scheduled timeout timer for requestId: 26 timeout: 300.000000s actor id: [2:7519894956658772927:2670] 2025-06-25T14:33:02.292780Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-25T14:33:02.876644Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:974: Forwarded response to sender actor, requestId: 26, sender: [2:7519894956658772926:2432], selfId: [2:7519894922299033241:2079], source: [2:7519894956658772925:2431] 2025-06-25T14:33:02.877164Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:240: [TQueryBase] [TCheckLeaseStatusQueryActor] TraceId: a9b1bea2-37b12f3-ac00eef8-566eaa01, TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=ZmUyN2Y3NjItN2NmMTc5MTEtNzE2YjU0M2ItNzk3ZjM3NjI=, TxId: 2025-06-25T14:33:02.877282Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:367: [TQueryBase] [TCheckLeaseStatusQueryActor] TraceId: a9b1bea2-37b12f3-ac00eef8-566eaa01, Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=ZmUyN2Y3NjItN2NmMTc5MTEtNzE2YjU0M2ItNzk3ZjM3NjI=, TxId: 2025-06-25T14:33:02.877381Z node 2 :KQP_PROXY DEBUG: kqp_script_executions.cpp:838: [ScriptExecutions] [TCheckLeaseStatusActor] ExecutionId: a9b1bea2-37b12f3-ac00eef8-566eaa01, reply success 2025-06-25T14:33:02.877986Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1374: Session closed, sessionId: ydb://session/3?node_id=2&id=ZmUyN2Y3NjItN2NmMTc5MTEtNzE2YjU0M2ItNzk3ZjM3NjI=, workerId: [2:7519894956658772925:2431], local sessions count: 1 2025-06-25T14:33:02.905669Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1374: Session closed, sessionId: ydb://session/3?node_id=2&id=MjBkNmQyNjUtNjBiZDJlODEtYTM0MDg3MTUtZDhlZTQ2YWQ=, workerId: [2:7519894943773870687:2330], local sessions count: 0 >> THealthCheckTest::YellowGroupIssueWhenPartialGroupStatus >> TSubDomainTest::GenericCases [GOOD] >> KqpQueryService::DmlNoTx [GOOD] |78.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_object_storage_listing/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/service/unittest >> KqpQueryService::ExecuteDDLStatusCodeSchemeError [GOOD] Test command err: Trying to start YDB, gRPC: 14094, MsgBus: 29393 2025-06-25T14:32:26.687105Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519894798293712197:2232];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:32:26.687386Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001863/r3tmp/tmplhO3Q7/pdisk_1.dat 2025-06-25T14:32:27.394369Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:32:27.394486Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:32:27.402206Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519894798293711977:2080] 1750861946590288 != 1750861946590291 2025-06-25T14:32:27.412886Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:32:27.434211Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 14094, node 1 2025-06-25T14:32:27.672358Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:32:27.727807Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:32:27.727835Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:32:27.727844Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:32:27.727966Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:29393 TClient is connected to server localhost:29393 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:32:29.148789Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:32:29.179188Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:32:29.192977Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:32:29.393882Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:32:29.570455Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:32:29.696164Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:32:31.678257Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519894798293712197:2232];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:32:31.678362Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:32:32.217335Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894824063517399:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:32.217463Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:32.720441Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:32.778130Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:32.846537Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:32.919647Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:33.003044Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:33.098127Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:33.210240Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:33.439771Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894828358485368:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:33.439888Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:33.440258Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894828358485373:2438], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:33.444694Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:32:33.460194Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519894828358485375:2439], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:32:33.553651Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519894828358485426:3429] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 20299, MsgBus: 63737 2025-06-25T14:32:37.506691Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519894846560486139:2223];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:32:37.507027Z node 2 :METADATA_PROVIDER ERROR: log.cp ... (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:54.868659Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:54.944665Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:54.996794Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:55.041264Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:55.113213Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:55.183498Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:55.316048Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519894923546496760:2437], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:55.316143Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:55.316728Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519894923546496765:2440], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:55.321574Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:32:55.333868Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519894923546496767:2441], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:32:55.399859Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519894923546496821:3434] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:32:56.997778Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=3&id=ZGY1MWFiNDEtMTNkMmFkODctMzJjY2Q5MTItYjczY2EzOWI=, ActorId: [3:7519894927841464398:2482], ActorState: ReadyState, TraceId: 01jykr35ct2jd56p45tgnnfn3x, Create QueryResponse for error on request, msg: Trying to start YDB, gRPC: 2579, MsgBus: 30250 2025-06-25T14:32:58.069868Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519894938528732669:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:32:58.069929Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001863/r3tmp/tmp80TK1j/pdisk_1.dat 2025-06-25T14:32:58.515354Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:32:58.521693Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:32:58.521787Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:32:58.523479Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7519894938528732635:2080] 1750861978063108 != 1750861978063111 2025-06-25T14:32:58.545960Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 2579, node 4 2025-06-25T14:32:58.815570Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:32:58.815598Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:32:58.815606Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:32:58.815742Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:32:59.100524Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:30250 TClient is connected to server localhost:30250 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:32:59.941750Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:32:59.949200Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:33:03.076432Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519894938528732669:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:33:03.076512Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:33:03.884116Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519894960003569760:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:33:03.884218Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:33:03.885329Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519894960003569772:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:33:03.893995Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:33:03.919430Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519894960003569774:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:33:04.002707Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519894960003569825:2339] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:33:04.053902Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519894964298537152:2350] txid# 281474976710660, issues: { message: "Type \'TzTimestamp\' specified for column \'payload\' is not supported by storage" severity: 1 } 2025-06-25T14:33:04.062128Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=ZTQ0YjZiMTgtYzJjMjAxMWItZTM5ZDlmYTYtZTgxMDY4YjM=, ActorId: [4:7519894960003569756:2292], ActorState: ExecuteState, TraceId: 01jykr38aff42mm74mmgd073v0, Create QueryResponse for error on request, msg: >> TSubDomainTest::StartTenanNodeAndStopAtDestructor [GOOD] >> KqpProxy::ExecuteScriptFailsWithoutFeatureFlag [GOOD] |78.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_object_storage_listing/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/service/unittest >> KqpQueryService::DmlNoTx [GOOD] Test command err: Trying to start YDB, gRPC: 9433, MsgBus: 65104 2025-06-25T14:32:26.319799Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519894798663373418:2221];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:32:26.320252Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001873/r3tmp/tmpUmG5Me/pdisk_1.dat 2025-06-25T14:32:27.132836Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519894798663373211:2080] 1750861946229778 != 1750861946229781 2025-06-25T14:32:27.204933Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:32:27.208090Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:32:27.228967Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:32:27.233351Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:32:27.308500Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TServer::EnableGrpc on GrpcPort 9433, node 1 2025-06-25T14:32:27.550344Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:32:27.550374Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:32:27.550388Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:32:27.550674Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:65104 TClient is connected to server localhost:65104 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:32:28.917291Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:32:28.988103Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:32:29.434903Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:32:29.952995Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:32:30.135249Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:32:31.317168Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519894798663373418:2221];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:32:31.318808Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:32:32.922252Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894824433178641:2372], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:32.922369Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:33.289512Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:33.345491Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:33.410230Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:33.455225Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:33.526185Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:33.638588Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:33.731658Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:33.832344Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894828728146604:2437], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:33.832412Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:33.832667Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894828728146609:2440], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:33.844787Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:32:33.872552Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519894828728146611:2441], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:32:33.957604Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519894828728146664:3431] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 18029, MsgBus: 21761 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001873/r3tmp/tmpZSrKf8/pdisk_1.dat 2025-06-25T14:32:37.014118Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519894843613537319:2213];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:32:37.014876Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root ... orId: [3:7519894924387254323:2493], ActorState: ExecuteState, TraceId: 01jykr3486cr12nx8dcj9etj3g, Create QueryResponse for error on request, msg: Trying to start YDB, gRPC: 23381, MsgBus: 8557 2025-06-25T14:32:57.101229Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519894933104252708:2076];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:32:57.102107Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001873/r3tmp/tmp5mouIN/pdisk_1.dat 2025-06-25T14:32:57.253352Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:32:57.254817Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7519894933104252668:2080] 1750861977095341 != 1750861977095344 2025-06-25T14:32:57.265663Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:32:57.265759Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:32:57.269661Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 23381, node 4 2025-06-25T14:32:57.524923Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:32:57.524948Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:32:57.524957Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:32:57.525091Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8557 2025-06-25T14:32:58.113254Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:8557 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:32:58.322086Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:32:58.328073Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:32:58.355093Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:32:58.526491Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:32:58.780971Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:32:58.895694Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:33:02.108415Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519894933104252708:2076];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:33:02.108510Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:33:02.200124Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519894954579090765:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:33:02.200224Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:33:02.228410Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:33:02.285451Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:33:02.374508Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:33:02.434882Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:33:02.501121Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:33:02.595706Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:33:02.687678Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:33:02.846509Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519894954579091426:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:33:02.846613Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:33:02.846893Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519894954579091431:2436], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:33:02.851667Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:33:02.877091Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710669, at schemeshard: 72057594046644480 2025-06-25T14:33:02.877800Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519894954579091433:2437], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:33:02.982787Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519894954579091484:3417] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_base_tenant/unittest >> TSubDomainTest::GenericCases [GOOD] Test command err: 2025-06-25T14:32:44.817082Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519894875619287132:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:32:44.819501Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0016e3/r3tmp/tmpxGW3ol/pdisk_1.dat 2025-06-25T14:32:45.570006Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:32:45.689105Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T14:32:45.711289Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:32:45.711377Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:32:45.719709Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:32:45.848451Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:3398 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-25T14:32:46.031292Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7519894875619287328:2120] Handle TEvNavigate describe path dc-1 2025-06-25T14:32:46.096928Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7519894884209222412:2449] HANDLE EvNavigateScheme dc-1 2025-06-25T14:32:46.097079Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7519894879914254664:2143], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:32:46.097169Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:887: [main][1:7519894879914255095:2440][/dc-1] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvSyncRequest: sender# [1:7519894879914254664:2143], cookie# 1 2025-06-25T14:32:46.098720Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519894879914255099:2440][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519894879914255096:2440], cookie# 1 2025-06-25T14:32:46.098774Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519894879914255100:2440][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519894879914255097:2440], cookie# 1 2025-06-25T14:32:46.098789Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519894879914255101:2440][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519894879914255098:2440], cookie# 1 2025-06-25T14:32:46.098829Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519894875619287035:2051] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519894879914255099:2440], cookie# 1 2025-06-25T14:32:46.098858Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519894875619287038:2054] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519894879914255100:2440], cookie# 1 2025-06-25T14:32:46.098875Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519894875619287041:2057] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519894879914255101:2440], cookie# 1 2025-06-25T14:32:46.098908Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519894879914255099:2440][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519894875619287035:2051], cookie# 1 2025-06-25T14:32:46.098927Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519894879914255100:2440][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519894875619287038:2054], cookie# 1 2025-06-25T14:32:46.098952Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519894879914255101:2440][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519894875619287041:2057], cookie# 1 2025-06-25T14:32:46.098987Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519894879914255095:2440][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519894879914255096:2440], cookie# 1 2025-06-25T14:32:46.099010Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519894879914255095:2440][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2025-06-25T14:32:46.099024Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519894879914255095:2440][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519894879914255097:2440], cookie# 1 2025-06-25T14:32:46.099036Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519894879914255095:2440][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 2, failures# 0 2025-06-25T14:32:46.099061Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519894879914255095:2440][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519894879914255098:2440], cookie# 1 2025-06-25T14:32:46.099088Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:984: [main][1:7519894879914255095:2440][/dc-1] Sync is done in the ring group: cookie# 1, ring group# 0, size# 3, half# 1, successes# 3, failures# 0, partial# 0 2025-06-25T14:32:46.099148Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [1:7519894879914254664:2143], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1 PathId: Partial: 0 } 2025-06-25T14:32:46.104906Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [1:7519894879914254664:2143], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1 PathId: Partial: 0 }, by path# { Subscriber: { Subscriber: [1:7519894879914255095:2440] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 1 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 0 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-25T14:32:46.105028Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [1:7519894879914254664:2143], cacheItem# { Subscriber: { Subscriber: [1:7519894879914255095:2440] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 1 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 0 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 1 IsSync: true Partial: 0 } 2025-06-25T14:32:46.107403Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [1:7519894884209222413:2450], recipient# [1:7519894884209222412:2449], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [72057594046644480:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-25T14:32:46.107468Z node 1 :TX_PROXY DEBUG: describe.cpp:356: Actor# [1:7519894884209222412:2449] HANDLE EvNavigateKeySetResult TDescribeReq marker# P5 ErrorCount# 0 2025-06-25T14:32:46.150662Z node 1 :TX_PROXY DEBUG: describe.cpp:435: Actor# [1:7519894884209222412:2449] SEND to# 72057594046644480 shardToRequest NKikimrSchemeOp.TDescribePath Path: "dc-1" Options { ShowPrivateTable: true } 2025-06-25T14:32:46.154094Z node 1 :TX_PROXY DEBUG: describe.cpp:448: Actor# [1:7519894884209222412:2449] Handle TEvDescribeSchemeResult Forward to# [1:7519894884209222411:2448] Cookie: 0 TEvDescribeSchemeResult: NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 2 Record# Status: StatusSuccess Path: "dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/dc-1" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046644480 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:32:46.185256Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7519894875 ... 06-25T14:33:02.993595Z node 4 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][4:7519894953970351507:3018][/dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers Version: 0 }: sender# [4:7519894932495513496:2053] 2025-06-25T14:33:02.993610Z node 4 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [4:7519894932495513496:2053] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [4:7519894953970351507:3018] 2025-06-25T14:33:02.993634Z node 4 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][4:7519894953970351502:3018][/dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers Version: 0 }: sender# [4:7519894953970351503:3018] 2025-06-25T14:33:02.993701Z node 4 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][4:7519894953970351502:3018][/dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers Version: 0 }: sender# [4:7519894953970351505:3018] 2025-06-25T14:33:02.993749Z node 4 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:852: [main][4:7519894953970351502:3018][/dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers] Set up state: owner# [4:7519894932495513789:2129], state# { Deleted: 1 Strong: 1 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-25T14:33:02.993784Z node 4 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][4:7519894953970351502:3018][/dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers Version: 0 }: sender# [4:7519894953970351504:3018] 2025-06-25T14:33:02.993814Z node 4 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:870: [main][4:7519894953970351502:3018][/dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers] Ignore empty state: owner# [4:7519894932495513789:2129], state# { Deleted: 1 Strong: 1 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 1 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-25T14:33:02.993863Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [4:7519894932495513789:2129], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers PathId: Strong: 1 } 2025-06-25T14:33:02.993934Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [4:7519894932495513789:2129], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers PathId: Strong: 1 }, by path# { Subscriber: { Subscriber: [4:7519894953970351502:3018] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 0 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-25T14:33:02.994017Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [4:7519894932495513789:2129], cacheItem# { Subscriber: { Subscriber: [4:7519894953970351502:3018] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:33:02.994094Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [4:7519894953970351509:3019], recipient# [4:7519894953970351501:2295], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:33:03.508463Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [4:7519894932495513789:2129], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:33:03.508605Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [4:7519894932495513789:2129], cacheItem# { Subscriber: { Subscriber: [4:7519894936790481643:2515] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:33:03.508692Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [4:7519894958265318812:3022], recipient# [4:7519894958265318811:2296], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:33:03.808189Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [4:7519894932495513789:2129], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:33:03.808335Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [4:7519894932495513789:2129], cacheItem# { Subscriber: { Subscriber: [4:7519894936790481643:2515] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:33:03.808429Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [4:7519894958265318817:3023], recipient# [4:7519894958265318816:2297], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:33:04.001064Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [4:7519894932495513789:2129], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:33:04.001229Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [4:7519894932495513789:2129], cacheItem# { Subscriber: { Subscriber: [4:7519894953970351502:3018] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:33:04.001321Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [4:7519894962560286118:3027], recipient# [4:7519894962560286117:2298], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:33:04.506891Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [4:7519894932495513789:2129], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:33:04.507048Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [4:7519894932495513789:2129], cacheItem# { Subscriber: { Subscriber: [4:7519894936790481643:2515] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:33:04.507149Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [4:7519894962560286123:3028], recipient# [4:7519894962560286122:2299], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } >> TSubDomainTest::ConsistentCopyTable [GOOD] >> TSubDomainTest::DatashardRunAtOtherNodeWhenOneNodeIsStopped [GOOD] >> THealthCheckTest::Issues100GroupsListing [GOOD] >> THealthCheckTest::Issues100VCardListing ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/proxy_service/ut/unittest >> KqpProxy::ExecuteScriptFailsWithoutFeatureFlag [GOOD] Test command err: 2025-06-25T14:32:39.832289Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519894857853450437:2132];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:32:39.832492Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000c84/r3tmp/tmp7MSclQ/pdisk_1.dat 2025-06-25T14:32:40.638145Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:32:40.644811Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:32:40.646716Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:32:40.693676Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:32:40.696508Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519894857853450343:2080] 1750861959767718 != 1750861959767721 2025-06-25T14:32:40.860422Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:18100 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:32:41.352023Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:32:41.369278Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:32:44.452618Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1692: Updated YQL logs priority to current level: 4 2025-06-25T14:32:44.454051Z node 1 :KQP_PROXY INFO: kqp_proxy_service.cpp:454: Cannot start publishing usage, tenants: /dc-1, empty 2025-06-25T14:32:44.492891Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894879328287427:2272], DatabaseId: /dc-1, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:44.492999Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /dc-1, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:44.493503Z node 1 :KQP_PROXY WARN: kqp_proxy_service.cpp:1578: Failed to parse session id: ydb://session/1?id=ZjY5NWRlM2EtYWMyYjA5YWEtNzQ0MTVlYTMtM2Q4ZDgzOWQ=&node_id=1234&node_id=12345 2025-06-25T14:32:44.495157Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:524: Subscribed for config changes. 2025-06-25T14:32:44.495184Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:531: Updated table service config. 2025-06-25T14:32:44.495206Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1692: Updated YQL logs priority to current level: 4 2025-06-25T14:32:44.495279Z node 1 :KQP_PROXY INFO: kqp_proxy_service.cpp:454: Cannot start publishing usage, tenants: /dc-1, empty 2025-06-25T14:32:44.495347Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-25T14:32:44.495387Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-25T14:32:44.495547Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:974: Forwarded response to sender actor, requestId: 2, sender: [1:7519894866443385492:2287], selfId: [1:7519894857853450579:2238], source: [1:7519894857853450579:2238] 2025-06-25T14:32:44.496541Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-25T14:32:44.496576Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-25T14:32:44.496759Z node 1 :KQP_PROXY WARN: kqp_proxy_service.cpp:1578: Failed to parse session id: unknown://session/1?id=ZjY5NWRlM2EtYWMyYjA5YWEtNzQ0MTVlYTMtM2Q4ZDgzOWQ=&node_id=1234&node_id=12345 2025-06-25T14:32:44.496849Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:974: Forwarded response to sender actor, requestId: 3, sender: [1:7519894866443385492:2287], selfId: [1:7519894857853450579:2238], source: [1:7519894857853450579:2238] 2025-06-25T14:32:44.497429Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894879328287436:2273], DatabaseId: /dc-1, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:44.497570Z node 1 :KQP_PROXY WARN: kqp_proxy_service.cpp:1578: Failed to parse session id: ydb://session/1?id=ZjY5NWRlM2EtYWMyYjA5YWEtNzQ0MTVlYTMtM2Q4ZDgzOWQ=&node_id=eqweq 2025-06-25T14:32:44.497629Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:974: Forwarded response to sender actor, requestId: 4, sender: [1:7519894866443385492:2287], selfId: [1:7519894857853450579:2238], source: [1:7519894857853450579:2238] 2025-06-25T14:32:44.497673Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /dc-1, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:44.504476Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894879328287438:2274], DatabaseId: /dc-1, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:44.504582Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /dc-1, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:48.888825Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:265:2309], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:32:48.889023Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:32:48.889154Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000c84/r3tmp/tmpNwBl9L/pdisk_1.dat 2025-06-25T14:32:49.211424Z node 2 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 2 Type# 268639257 2025-06-25T14:32:49.213277Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:32:49.271645Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:531: Updated table service config. 2025-06-25T14:32:49.271746Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1692: Updated YQL logs priority to current level: 4 2025-06-25T14:32:49.272008Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:32:49.276588Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:33:2080] 1750861965235444 != 1750861965235448 2025-06-25T14:32:49.309559Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [2:256:2301], request# { ErrorCount: 0 DatabaseName: /Root DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo },{ Path: Root/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:32:49.311323Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [2:256:2301], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /Root/.metadata/workload_manager/delayed_requests PathId: Strong: 1 } 2025-06-25T14:32:49.311457Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [2:256:2301], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /Root/.metadata/workload_manager/delayed_requests PathId: Strong: 1 }, by path# { Subscriber: { Subscriber: [2:571:2494] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 0 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-25T14:32:49.311597Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [2:256:2301], cacheItem# { Subscriber: { Subscriber: [2:571:2494] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: Root/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: tr ... m::Wakeup to BLOB_CACHE_ACTOR Captured TEvents::TSystem::Wakeup to BLOB_CACHE_ACTOR Captured TEvents::TSystem::Wakeup to BS_SYNC_BROKER Captured TEvents::TSystem::Wakeup to BLOB_CACHE_ACTOR Captured TEvents::TSystem::Wakeup to NKikimr::NKqp::NSchedulerOld::TSchedulerActor Captured TEvents::TSystem::Wakeup to (anonymous namespace)::TComputeSchedulerService Captured TEvents::TSystem::Wakeup to BS_SYNC_BROKER Captured TEvents::TSystem::Wakeup to BLOB_CACHE_ACTOR Captured TEvents::TSystem::Wakeup to BS_SYNC_BROKER Captured TEvents::TSystem::Wakeup to BLOB_CACHE_ACTOR Captured TEvents::TSystem::Wakeup to BS_SYNC_BROKER Captured TEvents::TSystem::Wakeup to BLOB_CACHE_ACTOR Captured TEvents::TSystem::Wakeup to BS_SYNC_BROKER Captured TEvents::TSystem::Wakeup to BLOB_CACHE_ACTOR Captured TEvents::TSystem::Wakeup to BS_SYNC_BROKER Captured TEvents::TSystem::Wakeup to BSC_STAT_PROCESSOR Captured TEvents::TSystem::Wakeup to NKikimr::NBsController::TBlobStorageController::TSelfHealActor Captured TEvents::TSystem::Wakeup to BLOB_CACHE_ACTOR Captured TEvents::TSystem::Wakeup to TABLET_COUNTERS_AGGREGATOR Captured TEvents::TSystem::Wakeup to TICKET_PARSER_ACTOR Captured TEvents::TSystem::Wakeup to NKikimr::NIcNodeCache::TIcNodeCacheServiceActor Captured TEvents::TSystem::Wakeup to EXT_COUNTERS_UPDATER_ACTOR Captured TEvents::TSystem::Wakeup to KQP_COMPILE_COMPUTATION_PATTERN_SERVICE Captured TEvents::TSystem::Wakeup to KQP_NODE_SERVICE Captured TEvents::TSystem::Wakeup to (anonymous namespace)::TComputeSchedulerService Captured TEvents::TSystem::Wakeup to PROXY_SCHEME_CACHE Captured TEvents::TSystem::Wakeup to PROXY_SCHEME_CACHE Captured TEvents::TSystem::Wakeup to PROXY_SCHEME_CACHE Captured TEvents::TSystem::Wakeup to PROXY_SCHEME_CACHE Captured TEvents::TSystem::Wakeup to DS_PROXY_NODE_MON_ACTOR Captured TEvents::TSystem::Wakeup to BS_SYNC_BROKER Captured TEvents::TSystem::Wakeup to EXT_COUNTERS_UPDATER_ACTOR Captured TEvents::TSystem::Wakeup to NKikimr::NKqp::NSchedulerOld::TSchedulerActor Captured TEvents::TSystem::Wakeup to TABLET_RESPONSIVENESS_PINGER Captured TEvents::TSystem::Wakeup to TABLET_RESPONSIVENESS_PINGER Captured TEvents::TSystem::Wakeup to BLOB_CACHE_ACTOR Captured TEvents::TSystem::Wakeup to BS_SYNC_BROKER Captured TEvents::TSystem::Wakeup to BS_SYNC_BROKER Captured TEvents::TSystem::Wakeup to BLOB_CACHE_ACTOR Captured TEvents::TSystem::Wakeup to BS_SYNC_BROKER Captured TEvents::TSystem::Wakeup to BLOB_CACHE_ACTOR Captured TEvents::TSystem::Wakeup to BS_SYNC_BROKER Captured TEvents::TSystem::Wakeup to BLOB_CACHE_ACTOR Captured TEvents::TSystem::Wakeup to NKikimr::NKqp::NSchedulerOld::TSchedulerActor Captured TEvents::TSystem::Wakeup to (anonymous namespace)::TComputeSchedulerService Captured TEvents::TSystem::Wakeup to BS_SYNC_BROKER Captured TEvents::TSystem::Wakeup to BLOB_CACHE_ACTOR Captured TEvents::TSystem::Wakeup to BS_SYNC_BROKER Captured TEvents::TSystem::Wakeup to BLOB_CACHE_ACTOR Captured TEvents::TSystem::Wakeup to BLOB_CACHE_ACTOR Captured TEvents::TSystem::Wakeup to BS_SYNC_BROKER Captured TEvents::TSystem::Wakeup to BS_SYNC_BROKER Captured TEvents::TSystem::Wakeup to BLOB_CACHE_ACTOR Captured TEvents::TSystem::Wakeup to BLOB_CACHE_ACTOR Captured TEvents::TSystem::Wakeup to BS_SYNC_BROKER Captured TEvents::TSystem::Wakeup to EXT_COUNTERS_UPDATER_ACTOR Captured TEvents::TSystem::Wakeup to KQP_COMPILE_COMPUTATION_PATTERN_SERVICE Captured TEvents::TSystem::Wakeup to KQP_NODE_SERVICE Captured TEvents::TSystem::Wakeup to (anonymous namespace)::TComputeSchedulerService Captured TEvents::TSystem::Wakeup to PROXY_SCHEME_CACHE Captured TEvents::TSystem::Wakeup to PROXY_SCHEME_CACHE Captured TEvents::TSystem::Wakeup to PROXY_SCHEME_CACHE Captured TEvents::TSystem::Wakeup to PROXY_SCHEME_CACHE Captured TEvents::TSystem::Wakeup to DS_PROXY_NODE_MON_ACTOR Captured TEvents::TSystem::Wakeup to BS_SYNC_BROKER Captured TEvents::TSystem::Wakeup to EXT_COUNTERS_UPDATER_ACTOR Captured TEvents::TSystem::Wakeup to BLOB_CACHE_ACTOR Captured TEvents::TSystem::Wakeup to TICKET_PARSER_ACTOR Captured TEvents::TSystem::Wakeup to TABLET_RESPONSIVENESS_PINGER Captured TEvents::TSystem::Wakeup to TABLET_RESPONSIVENESS_PINGER Captured TEvents::TSystem::Wakeup to NKikimr::NKqp::NSchedulerOld::TSchedulerActor Captured TEvents::TSystem::Wakeup to BS_SYNC_BROKER Captured TEvents::TSystem::Wakeup to BLOB_CACHE_ACTOR Captured TEvents::TSystem::Wakeup to BS_SYNC_BROKER Captured TEvents::TSystem::Wakeup to BLOB_CACHE_ACTOR Captured TEvents::TSystem::Wakeup to BS_SYNC_BROKER Captured TEvents::TSystem::Wakeup to BLOB_CACHE_ACTOR Captured TEvents::TSystem::Wakeup to BLOB_CACHE_ACTOR Captured TEvents::TSystem::Wakeup to BS_SYNC_BROKER Captured TEvents::TSystem::Wakeup to NKikimr::NKqp::NSchedulerOld::TSchedulerActor Captured TEvents::TSystem::Wakeup to (anonymous namespace)::TComputeSchedulerService Captured TEvents::TSystem::Wakeup to BS_SYNC_BROKER Captured TEvents::TSystem::Wakeup to BLOB_CACHE_ACTOR Captured TEvents::TSystem::Wakeup to BLOB_CACHE_ACTOR Captured TEvents::TSystem::Wakeup to BS_SYNC_BROKER Captured TEvents::TSystem::Wakeup to BLOB_CACHE_ACTOR 2025-06-25T14:32:58.900685Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1334: Handle TEvPrivate::TEvOnRequestTimeout(20) 2025-06-25T14:32:58.900765Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1342: Reply timeout: requestId 20 sessionId: ydb://session/3?node_id=2&id=MzA5YzU2ZTMtNWYxMjJlMTgtMzFjNDBjMGEtOTM5ODllYmQ= status: TIMEOUT round: 0 Captured TEvents::TSystem::Wakeup to BS_SYNC_BROKER 2025-06-25T14:32:58.900974Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=MzA5YzU2ZTMtNWYxMjJlMTgtMzFjNDBjMGEtOTM5ODllYmQ=, ActorId: [2:1074:2877], ActorState: ExecuteState, TraceId: 01jykr36c30nhrk91pkfve2j7r, Create QueryResponse for error on request, msg: 2025-06-25T14:32:58.901185Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:974: Forwarded response to sender actor, requestId: 20, sender: [2:554:2480], selfId: [2:59:2106], source: [2:1074:2877] Send scheduled evet back 2025-06-25T14:32:58.901325Z node 2 :KQP_COMPILE_ACTOR NOTICE: kqp_compile_actor.cpp:577: Compilation timeout, self: [2:1077:2880], cluster: db, database: , text: "SELECT * FROM `/Root/Table`;", startTime: 2025-06-25T14:32:57.988728Z 2025-06-25T14:32:58.901403Z node 2 :KQP_COMPILE_ACTOR DEBUG: kqp_compile_actor.cpp:402: Send response, self: [2:1077:2880], owner: [2:249:2294], status: TIMEOUT, issues:
: Error: Query compilation timed out. , uid: e27cd5c0-effc507e-4ce54669-8ddfd7b9 Send captured event back Send captured event back Send captured event back Send captured event back Send captured event back 2025-06-25T14:33:00.597241Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519894944260723165:2142];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:33:00.597290Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000c84/r3tmp/tmp6vYEHT/pdisk_1.dat 2025-06-25T14:33:01.224710Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:33:01.234508Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:33:01.234602Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:33:01.254491Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 17905, node 3 2025-06-25T14:33:01.729442Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:33:01.729470Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:33:01.729480Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:33:01.729620Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:33:01.733054Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:20309 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:33:02.113568Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:33:05.475227Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1692: Updated YQL logs priority to current level: 4 2025-06-25T14:33:05.476916Z node 3 :KQP_PROXY INFO: kqp_proxy_service.cpp:454: Cannot start publishing usage, tenants: /Root, empty 2025-06-25T14:33:05.512836Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:524: Subscribed for config changes. 2025-06-25T14:33:05.516437Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:531: Updated table service config. 2025-06-25T14:33:05.516494Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1692: Updated YQL logs priority to current level: 4 2025-06-25T14:33:05.516580Z node 3 :KQP_PROXY INFO: kqp_proxy_service.cpp:454: Cannot start publishing usage, tenants: /Root, empty 2025-06-25T14:33:05.516670Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-25T14:33:05.516716Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-25T14:33:05.522055Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-25T14:33:05.523713Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-25T14:33:05.607384Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519894944260723165:2142];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:33:05.607505Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_base_tenant/unittest >> TSubDomainTest::StartTenanNodeAndStopAtDestructor [GOOD] Test command err: 2025-06-25T14:32:49.902080Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519894900533793908:2140];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:32:49.902121Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0016b6/r3tmp/tmp4ATxKi/pdisk_1.dat 2025-06-25T14:32:50.704981Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:32:50.705064Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:32:50.720127Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:32:50.732164Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:32:50.996556Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:11009 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-25T14:32:51.132806Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7519894904828761336:2118] Handle TEvNavigate describe path dc-1 2025-06-25T14:32:51.166304Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7519894909123729120:2441] HANDLE EvNavigateScheme dc-1 2025-06-25T14:32:51.166440Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7519894904828761361:2132], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:32:51.166503Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:887: [main][1:7519894904828761792:2423][/dc-1] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvSyncRequest: sender# [1:7519894904828761361:2132], cookie# 1 2025-06-25T14:32:51.167948Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519894904828761796:2423][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519894904828761793:2423], cookie# 1 2025-06-25T14:32:51.167976Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519894904828761797:2423][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519894904828761794:2423], cookie# 1 2025-06-25T14:32:51.167990Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519894904828761798:2423][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519894904828761795:2423], cookie# 1 2025-06-25T14:32:51.168015Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519894900533793753:2050] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519894904828761796:2423], cookie# 1 2025-06-25T14:32:51.168050Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519894900533793756:2053] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519894904828761797:2423], cookie# 1 2025-06-25T14:32:51.168084Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519894900533793759:2056] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519894904828761798:2423], cookie# 1 2025-06-25T14:32:51.168115Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519894904828761796:2423][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519894900533793753:2050], cookie# 1 2025-06-25T14:32:51.168128Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519894904828761797:2423][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519894900533793756:2053], cookie# 1 2025-06-25T14:32:51.168139Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519894904828761798:2423][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519894900533793759:2056], cookie# 1 2025-06-25T14:32:51.168166Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519894904828761792:2423][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519894904828761793:2423], cookie# 1 2025-06-25T14:32:51.168184Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519894904828761792:2423][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2025-06-25T14:32:51.168198Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519894904828761792:2423][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519894904828761794:2423], cookie# 1 2025-06-25T14:32:51.168209Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519894904828761792:2423][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 2, failures# 0 2025-06-25T14:32:51.168233Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519894904828761792:2423][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519894904828761795:2423], cookie# 1 2025-06-25T14:32:51.168270Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:984: [main][1:7519894904828761792:2423][/dc-1] Sync is done in the ring group: cookie# 1, ring group# 0, size# 3, half# 1, successes# 3, failures# 0, partial# 0 2025-06-25T14:32:51.169740Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [1:7519894904828761361:2132], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1 PathId: Partial: 0 } 2025-06-25T14:32:51.185578Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [1:7519894904828761361:2132], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1 PathId: Partial: 0 }, by path# { Subscriber: { Subscriber: [1:7519894904828761792:2423] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 1 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 0 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-25T14:32:51.185692Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [1:7519894904828761361:2132], cacheItem# { Subscriber: { Subscriber: [1:7519894904828761792:2423] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 1 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 0 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 1 IsSync: true Partial: 0 } 2025-06-25T14:32:51.195718Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [1:7519894909123729121:2442], recipient# [1:7519894909123729120:2441], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [72057594046644480:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-25T14:32:51.195794Z node 1 :TX_PROXY DEBUG: describe.cpp:356: Actor# [1:7519894909123729120:2441] HANDLE EvNavigateKeySetResult TDescribeReq marker# P5 ErrorCount# 0 2025-06-25T14:32:51.257410Z node 1 :TX_PROXY DEBUG: describe.cpp:435: Actor# [1:7519894909123729120:2441] SEND to# 72057594046644480 shardToRequest NKikimrSchemeOp.TDescribePath Path: "dc-1" Options { ShowPrivateTable: true } 2025-06-25T14:32:51.260465Z node 1 :TX_PROXY DEBUG: describe.cpp:448: Actor# [1:7519894909123729120:2441] Handle TEvDescribeSchemeResult Forward to# [1:7519894909123729118:2439] Cookie: 0 TEvDescribeSchemeResult: NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 2 Record# Status: StatusSuccess Path: "dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/dc-1" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046644480 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:32:51.301928Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7519894904828761336:2118] Handle TEvProposeTransaction 2025-06-25T14:32:51.301954Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:75198949048 ... ta/workload_manager/running_requests] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/workload_manager/running_requests Version: 0 }: sender# [3:7519894941168409595:2050] 2025-06-25T14:33:04.924242Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][3:7519894962643247029:2551][/dc-1/.metadata/workload_manager/running_requests] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/workload_manager/running_requests Version: 0 }: sender# [3:7519894941168409598:2053] 2025-06-25T14:33:04.924259Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][3:7519894962643247030:2551][/dc-1/.metadata/workload_manager/running_requests] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/workload_manager/running_requests Version: 0 }: sender# [3:7519894941168409601:2056] 2025-06-25T14:33:04.924281Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][3:7519894962643247012:2551][/dc-1/.metadata/workload_manager/running_requests] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/workload_manager/running_requests Version: 0 }: sender# [3:7519894962643247025:2551] 2025-06-25T14:33:04.924358Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [3:7519894941168409595:2050] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [3:7519894962643247022:2550] 2025-06-25T14:33:04.924374Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [3:7519894941168409595:2050] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [3:7519894962643247028:2551] 2025-06-25T14:33:04.924387Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [3:7519894941168409598:2053] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [3:7519894962643247023:2550] 2025-06-25T14:33:04.924400Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [3:7519894941168409598:2053] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [3:7519894962643247029:2551] 2025-06-25T14:33:04.924412Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [3:7519894941168409601:2056] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [3:7519894962643247024:2550] 2025-06-25T14:33:04.924423Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [3:7519894941168409601:2056] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [3:7519894962643247030:2551] 2025-06-25T14:33:04.924451Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [3:7519894941168409896:2130], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/.metadata/workload_manager/delayed_requests PathId: Strong: 1 } 2025-06-25T14:33:04.924496Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [3:7519894941168409896:2130], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/.metadata/workload_manager/delayed_requests PathId: Strong: 1 }, by path# { Subscriber: { Subscriber: [3:7519894962643247011:2550] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 0 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-25T14:33:04.924553Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7519894941168409896:2130], cacheItem# { Subscriber: { Subscriber: [3:7519894962643247011:2550] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:33:04.924601Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7519894962643247031:2552], recipient# [3:7519894962643247009:2277], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:33:04.924824Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][3:7519894962643247012:2551][/dc-1/.metadata/workload_manager/running_requests] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/workload_manager/running_requests Version: 0 }: sender# [3:7519894962643247026:2551] 2025-06-25T14:33:04.924857Z node 3 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:852: [main][3:7519894962643247012:2551][/dc-1/.metadata/workload_manager/running_requests] Set up state: owner# [3:7519894941168409896:2130], state# { Deleted: 1 Strong: 1 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-25T14:33:04.924888Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][3:7519894962643247012:2551][/dc-1/.metadata/workload_manager/running_requests] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/workload_manager/running_requests Version: 0 }: sender# [3:7519894962643247027:2551] 2025-06-25T14:33:04.924909Z node 3 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:870: [main][3:7519894962643247012:2551][/dc-1/.metadata/workload_manager/running_requests] Ignore empty state: owner# [3:7519894941168409896:2130], state# { Deleted: 1 Strong: 1 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 1 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-25T14:33:04.924939Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [3:7519894941168409896:2130], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/.metadata/workload_manager/running_requests PathId: Strong: 1 } 2025-06-25T14:33:04.924980Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [3:7519894941168409896:2130], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/.metadata/workload_manager/running_requests PathId: Strong: 1 }, by path# { Subscriber: { Subscriber: [3:7519894962643247012:2551] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 0 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-25T14:33:04.925031Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7519894941168409896:2130], cacheItem# { Subscriber: { Subscriber: [3:7519894962643247012:2551] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:33:04.925099Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7519894962643247032:2553], recipient# [3:7519894962643247007:2275], result# { ErrorCount: 2 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo },{ Path: dc-1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:33:05.374397Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7519894941168409896:2130], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:33:05.374524Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7519894941168409896:2130], cacheItem# { Subscriber: { Subscriber: [3:7519894945463377707:2484] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:33:05.374600Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7519894966938214336:2557], recipient# [3:7519894966938214335:2278], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:33:05.492701Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7519894941168409896:2130], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:33:05.492815Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7519894941168409896:2130], cacheItem# { Subscriber: { Subscriber: [3:7519894945463377707:2484] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:33:05.492906Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7519894966938214338:2558], recipient# [3:7519894966938214337:2279], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } >> TestKinesisHttpProxy::TestUnauthorizedPutRecords [GOOD] >> KqpDocumentApi::RestrictDrop [GOOD] >> ResultFormatter::Utf8WithQuotes [GOOD] >> ResultFormatter::VariantStruct [GOOD] >> THealthCheckTest::StaticGroupIssue [GOOD] >> THealthCheckTest::StorageLimit50 >> THealthCheckTest::ShardsLimit999 [GOOD] >> THealthCheckTest::ShardsLimit995 >> TestKinesisHttpProxy::TestWrongStream |78.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_bsvolume/unittest |78.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/result_formatter/ut/unittest >> ResultFormatter::VariantStruct [GOOD] |78.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_bsvolume/unittest >> TestKinesisHttpProxy::ListShardsEmptyFields [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_base_tenant/unittest >> TSubDomainTest::DatashardRunAtOtherNodeWhenOneNodeIsStopped [GOOD] Test command err: 2025-06-25T14:32:46.964013Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519894886682824240:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:32:46.964062Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0016d0/r3tmp/tmpPfkRYG/pdisk_1.dat 2025-06-25T14:32:48.062002Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:32:48.139856Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:32:48.141911Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:32:48.210085Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:32:48.210175Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:32:48.227531Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:11422 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-25T14:32:48.636507Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7519894890977791746:2116] Handle TEvNavigate describe path dc-1 2025-06-25T14:32:48.688076Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7519894895272759529:2441] HANDLE EvNavigateScheme dc-1 2025-06-25T14:32:48.688216Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7519894890977791818:2142], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:32:48.688285Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:887: [main][1:7519894895272759376:2312][/dc-1] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvSyncRequest: sender# [1:7519894890977791818:2142], cookie# 1 2025-06-25T14:32:48.689489Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519894895272759390:2312][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519894895272759387:2312], cookie# 1 2025-06-25T14:32:48.689518Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519894895272759391:2312][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519894895272759388:2312], cookie# 1 2025-06-25T14:32:48.689530Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519894895272759392:2312][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519894895272759389:2312], cookie# 1 2025-06-25T14:32:48.689562Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519894886682824170:2050] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519894895272759390:2312], cookie# 1 2025-06-25T14:32:48.689589Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519894886682824173:2053] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519894895272759391:2312], cookie# 1 2025-06-25T14:32:48.689604Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519894886682824176:2056] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519894895272759392:2312], cookie# 1 2025-06-25T14:32:48.689626Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519894895272759390:2312][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519894886682824170:2050], cookie# 1 2025-06-25T14:32:48.689639Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519894895272759391:2312][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519894886682824173:2053], cookie# 1 2025-06-25T14:32:48.689649Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519894895272759392:2312][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519894886682824176:2056], cookie# 1 2025-06-25T14:32:48.689684Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519894895272759376:2312][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519894895272759387:2312], cookie# 1 2025-06-25T14:32:48.689709Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519894895272759376:2312][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2025-06-25T14:32:48.689725Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519894895272759376:2312][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519894895272759388:2312], cookie# 1 2025-06-25T14:32:48.689738Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519894895272759376:2312][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 2, failures# 0 2025-06-25T14:32:48.689750Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519894895272759376:2312][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519894895272759389:2312], cookie# 1 2025-06-25T14:32:48.689769Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:984: [main][1:7519894895272759376:2312][/dc-1] Sync is done in the ring group: cookie# 1, ring group# 0, size# 3, half# 1, successes# 3, failures# 0, partial# 0 2025-06-25T14:32:48.689816Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [1:7519894890977791818:2142], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1 PathId: Partial: 0 } 2025-06-25T14:32:48.714001Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [1:7519894890977791818:2142], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1 PathId: Partial: 0 }, by path# { Subscriber: { Subscriber: [1:7519894895272759376:2312] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 1 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 0 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-25T14:32:48.714123Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [1:7519894890977791818:2142], cacheItem# { Subscriber: { Subscriber: [1:7519894895272759376:2312] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 1 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 0 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 1 IsSync: true Partial: 0 } 2025-06-25T14:32:48.723289Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [1:7519894895272759533:2442], recipient# [1:7519894895272759529:2441], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [72057594046644480:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-25T14:32:48.723365Z node 1 :TX_PROXY DEBUG: describe.cpp:356: Actor# [1:7519894895272759529:2441] HANDLE EvNavigateKeySetResult TDescribeReq marker# P5 ErrorCount# 0 2025-06-25T14:32:48.761141Z node 1 :TX_PROXY DEBUG: describe.cpp:435: Actor# [1:7519894895272759529:2441] SEND to# 72057594046644480 shardToRequest NKikimrSchemeOp.TDescribePath Path: "dc-1" Options { ShowPrivateTable: true } 2025-06-25T14:32:48.769618Z node 1 :TX_PROXY DEBUG: describe.cpp:448: Actor# [1:7519894895272759529:2441] Handle TEvDescribeSchemeResult Forward to# [1:7519894895272759528:2440] Cookie: 0 TEvDescribeSchemeResult: NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 2 Record# Status: StatusSuccess Path: "dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/dc-1" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046644480 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' suc ... ops# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:33:06.545430Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [4:7519894973221308218:2334], recipient# [4:7519894973221308190:2296], result# { ErrorCount: 2 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo },{ Path: dc-1/USER_0/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo }] } 2025-06-25T14:33:06.545618Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [4:7519894973221308190:2296], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:33:06.668508Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [4:7519894938861569456:2110], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo },{ Path: dc-1/USER_0/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:33:06.668646Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [4:7519894938861569456:2110], cacheItem# { Subscriber: { Subscriber: [4:7519894973221308201:2330] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_0/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:33:06.668692Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [4:7519894938861569456:2110], cacheItem# { Subscriber: { Subscriber: [4:7519894973221308202:2331] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_0/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:33:06.668807Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [4:7519894973221308219:2335], recipient# [4:7519894973221308190:2296], result# { ErrorCount: 2 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo },{ Path: dc-1/USER_0/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo }] } 2025-06-25T14:33:06.669173Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [4:7519894973221308190:2296], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:33:07.067817Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [4:7519894938861569456:2110], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo },{ Path: dc-1/USER_0/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:33:07.067973Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [4:7519894938861569456:2110], cacheItem# { Subscriber: { Subscriber: [4:7519894973221308201:2330] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_0/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:33:07.068019Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [4:7519894938861569456:2110], cacheItem# { Subscriber: { Subscriber: [4:7519894973221308202:2331] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_0/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:33:07.068129Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [4:7519894977516275518:2336], recipient# [4:7519894973221308190:2296], result# { ErrorCount: 2 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo },{ Path: dc-1/USER_0/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo }] } 2025-06-25T14:33:07.068513Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [4:7519894973221308190:2296], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:33:07.228813Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [4:7519894938861569456:2110], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:33:07.228933Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [4:7519894938861569456:2110], cacheItem# { Subscriber: { Subscriber: [4:7519894943156536871:2178] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:33:07.229002Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [4:7519894977516275520:2337], recipient# [4:7519894977516275519:2298], result# { ErrorCount: 1 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:33:07.477800Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [4:7519894938861569456:2110], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:33:07.477935Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [4:7519894938861569456:2110], cacheItem# { Subscriber: { Subscriber: [4:7519894973221308194:2329] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_0/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:33:07.478016Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [4:7519894977516275523:2338], recipient# [4:7519894977516275522:2299], result# { ErrorCount: 1 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo }] } 2025-06-25T14:33:07.478317Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/USER_0/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_base_tenant/unittest >> TSubDomainTest::ConsistentCopyTable [GOOD] Test command err: 2025-06-25T14:32:43.489470Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519894872469609641:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:32:43.489521Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0016d7/r3tmp/tmpIj5zx3/pdisk_1.dat 2025-06-25T14:32:44.072600Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:32:44.301052Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:32:44.301170Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:32:44.302151Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:23085 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-25T14:32:44.340595Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7519894872469609846:2105] Handle TEvNavigate describe path dc-1 2025-06-25T14:32:44.367318Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7519894876764577419:2259] HANDLE EvNavigateScheme dc-1 2025-06-25T14:32:44.367424Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7519894872469609870:2119], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:32:44.367459Z node 1 :TX_PROXY_SCHEME_CACHE TRACE: cache.cpp:2321: Create subscriber: self# [1:7519894872469609870:2119], path# /dc-1, domainOwnerId# 72057594046644480 2025-06-25T14:32:44.367648Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1003: [main][1:7519894876764577420:2260][/dc-1] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-25T14:32:44.369303Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519894872469609575:2049] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7519894876764577424:2260] 2025-06-25T14:32:44.369303Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519894872469609578:2052] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7519894876764577425:2260] 2025-06-25T14:32:44.369359Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519894872469609578:2052] Subscribe: subscriber# [1:7519894876764577425:2260], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-25T14:32:44.369372Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519894872469609575:2049] Subscribe: subscriber# [1:7519894876764577424:2260], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-25T14:32:44.369422Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519894872469609581:2055] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7519894876764577426:2260] 2025-06-25T14:32:44.369466Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519894876764577424:2260][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519894872469609575:2049] 2025-06-25T14:32:44.369467Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519894872469609581:2055] Subscribe: subscriber# [1:7519894876764577426:2260], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-25T14:32:44.369491Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519894876764577425:2260][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519894872469609578:2052] 2025-06-25T14:32:44.369499Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519894872469609575:2049] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7519894876764577424:2260] 2025-06-25T14:32:44.369533Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519894872469609578:2052] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7519894876764577425:2260] 2025-06-25T14:32:44.369541Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519894876764577426:2260][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519894872469609581:2055] 2025-06-25T14:32:44.369556Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519894872469609581:2055] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7519894876764577426:2260] 2025-06-25T14:32:44.369573Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519894876764577420:2260][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519894876764577421:2260] 2025-06-25T14:32:44.369606Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519894876764577420:2260][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519894876764577422:2260] 2025-06-25T14:32:44.369668Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:852: [main][1:7519894876764577420:2260][/dc-1] Set up state: owner# [1:7519894872469609870:2119], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-25T14:32:44.369778Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519894876764577420:2260][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519894876764577423:2260] 2025-06-25T14:32:44.369843Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:870: [main][1:7519894876764577420:2260][/dc-1] Path was already updated: owner# [1:7519894872469609870:2119], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-25T14:32:44.369880Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519894876764577424:2260][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519894876764577421:2260], cookie# 1 2025-06-25T14:32:44.369893Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519894876764577425:2260][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519894876764577422:2260], cookie# 1 2025-06-25T14:32:44.369912Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519894876764577426:2260][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519894876764577423:2260], cookie# 1 2025-06-25T14:32:44.370017Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519894872469609578:2052] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519894876764577425:2260], cookie# 1 2025-06-25T14:32:44.370039Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519894872469609581:2055] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519894876764577426:2260], cookie# 1 2025-06-25T14:32:44.370060Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519894876764577425:2260][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519894872469609578:2052], cookie# 1 2025-06-25T14:32:44.370074Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519894876764577426:2260][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519894872469609581:2055], cookie# 1 2025-06-25T14:32:44.370136Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519894876764577420:2260][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519894876764577422:2260], cookie# 1 2025-06-25T14:32:44.370163Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519894876764577420:2260][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2025-06-25T14:32:44.370199Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519894876764577420:2260][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519894876764577423:2260], cookie# 1 2025-06-25T14:32:44.370212Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519894876764577420:2260][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 2, failures# 0 2025-06-25T14:32:44.370868Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519894872469609575:2049] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519894876764577424:2260], cookie# 1 2025-06-25T14:32:44.370909Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519894876764577424:2260][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519894872469609575:2049], cookie# 1 2025-06-25T14:32:44.370948Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519894876764577420:2260][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519894876764577421:2260], cookie# 1 2025-06-25T14:32:44.370977Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:984: [main][1:7519894876764577420:2260][/dc-1] Sync is done in the ring group: cookie# 1, ring group# 0, size# 3, half# 1, successes# 3, failures# 0, partial# 0 TClient::Ls response: 2025-06-25T14:32:44.432928Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [1:7519894872469609870:2119], notify# NKikimr::TSchemeBoardEvents::TEvNotifyUpdate { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DescribeSchemeResult: Status: StatusSuccess Path: "/dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: ... lid> IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:33:06.674840Z node 6 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [6:7519894971758939525:2256], recipient# [6:7519894971758939497:2285], result# { ErrorCount: 2 DatabaseName: /dc-1/USER_1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo },{ Path: dc-1/USER_1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo }] } 2025-06-25T14:33:06.675263Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [6:7519894971758939497:2285], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:33:06.806105Z node 6 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [6:7519894945989135464:2107], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo },{ Path: dc-1/USER_1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:33:06.806240Z node 6 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [6:7519894945989135464:2107], cacheItem# { Subscriber: { Subscriber: [6:7519894971758939502:2251] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:33:06.806286Z node 6 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [6:7519894945989135464:2107], cacheItem# { Subscriber: { Subscriber: [6:7519894971758939503:2252] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:33:06.806408Z node 6 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [6:7519894971758939527:2257], recipient# [6:7519894971758939497:2285], result# { ErrorCount: 2 DatabaseName: /dc-1/USER_1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo },{ Path: dc-1/USER_1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo }] } 2025-06-25T14:33:06.806825Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [6:7519894971758939497:2285], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:33:07.132515Z node 6 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [6:7519894945989135464:2107], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo },{ Path: dc-1/USER_1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:33:07.132670Z node 6 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [6:7519894945989135464:2107], cacheItem# { Subscriber: { Subscriber: [6:7519894971758939502:2251] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:33:07.132718Z node 6 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [6:7519894945989135464:2107], cacheItem# { Subscriber: { Subscriber: [6:7519894971758939503:2252] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:33:07.132830Z node 6 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [6:7519894976053906825:2258], recipient# [6:7519894971758939497:2285], result# { ErrorCount: 2 DatabaseName: /dc-1/USER_1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo },{ Path: dc-1/USER_1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo }] } 2025-06-25T14:33:07.136532Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [6:7519894971758939497:2285], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:33:07.500708Z node 6 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [6:7519894945989135464:2107], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:33:07.500855Z node 6 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [6:7519894945989135464:2107], cacheItem# { Subscriber: { Subscriber: [6:7519894950284102961:2227] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:33:07.500949Z node 6 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [6:7519894976053906828:2259], recipient# [6:7519894976053906827:2288], result# { ErrorCount: 1 DatabaseName: /dc-1/USER_1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:33:07.541829Z node 6 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [6:7519894945989135464:2107], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:33:07.541983Z node 6 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [6:7519894945989135464:2107], cacheItem# { Subscriber: { Subscriber: [6:7519894950284102961:2227] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:33:07.542092Z node 6 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [6:7519894976053906830:2260], recipient# [6:7519894976053906829:2289], result# { ErrorCount: 1 DatabaseName: /dc-1/USER_1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } >> TDatabaseResolverTests::PostgreSQL >> TDatabaseResolverTests::DataStreams_Serverless >> TDatabaseResolverTests::PostgreSQL [GOOD] >> TDatabaseResolverTests::PostgreSQL_PermissionDenied >> TestKinesisHttpProxy::ListShardsExclusiveStartShardId ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/service/unittest >> KqpDocumentApi::RestrictDrop [GOOD] Test command err: Trying to start YDB, gRPC: 7053, MsgBus: 11360 2025-06-25T14:32:25.527007Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519894795263692365:2231];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:32:25.527068Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00187b/r3tmp/tmpyF8GK6/pdisk_1.dat 2025-06-25T14:32:26.222939Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519894795263692169:2080] 1750861945498069 != 1750861945498072 2025-06-25T14:32:26.278751Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:32:26.278865Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:32:26.280142Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 7053, node 1 2025-06-25T14:32:26.375524Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:32:26.404586Z node 1 :GRPC_SERVER WARN: grpc_request_proxy.cpp:529: SchemeBoardDelete /Root Strong=0 2025-06-25T14:32:26.405108Z node 1 :GRPC_SERVER WARN: grpc_request_proxy.cpp:529: SchemeBoardDelete /Root Strong=0 2025-06-25T14:32:26.520469Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:32:26.597033Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:32:26.597061Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:32:26.597067Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:32:26.597158Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11360 TClient is connected to server localhost:11360 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:32:27.861161Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:32:27.932872Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:32:27.954276Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:32:28.374202Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:32:28.893972Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:32:29.066976Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:32:30.528026Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519894795263692365:2231];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:32:30.528141Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:32:31.693316Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894821033497580:2371], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:31.693448Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:32.120482Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:32.189480Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:32.250535Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:32.333502Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:32.394890Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:32.470308Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:32.583454Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:32.707966Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894825328465547:2437], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:32.708046Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:32.708298Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894825328465552:2440], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:32.713492Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:32:32.734920Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519894825328465554:2441], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:32:32.804099Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519894825328465607:3432] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:32:34.754533Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation pa ... WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519894934710924620:2057];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:32:57.901466Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00187b/r3tmp/tmp8Mgx3B/pdisk_1.dat 2025-06-25T14:32:58.479733Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:32:58.482867Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7519894934710924604:2080] 1750861977882272 != 1750861977882275 2025-06-25T14:32:58.497219Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:32:58.497324Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:32:58.506107Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 12400, node 4 2025-06-25T14:32:58.736101Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:32:58.736133Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:32:58.736141Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:32:58.736287Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:32:58.851793Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:5309 TClient is connected to server localhost:5309 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:33:00.306157Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:33:00.329299Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:33:00.447527Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:33:00.718651Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:33:00.841884Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:33:02.905750Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519894934710924620:2057];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:33:02.912047Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:33:04.820453Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519894964775697326:2372], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:33:04.820547Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:33:04.911817Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:33:04.991101Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:33:05.105632Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:33:05.202408Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:33:05.323383Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:33:05.417348Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:33:05.519131Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:33:05.812679Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519894969070665304:2438], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:33:05.812785Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:33:05.813467Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519894969070665309:2441], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:33:05.822753Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:33:05.841880Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519894969070665311:2442], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:33:05.904529Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519894969070665362:3434] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:33:07.535421Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664)
: Error: Type annotation, code: 1030
:2:24: Error: At function: KiDropTable!
:2:24: Error: Document API table cannot be modified from YQL query: /Root/DocumentApiTest, code: 2008 >> TDatabaseResolverTests::PostgreSQL_PermissionDenied [GOOD] >> TDatabaseResolverTests::DataStreams_Serverless [GOOD] >> TDatabaseResolverTests::DataStreams_PermissionDenied >> TDatabaseResolverTests::DataStreams_PermissionDenied [GOOD] >> TDatabaseResolverTests::Ydb_Serverless >> TSubDomainTest::CreateTableInsideSubDomain [GOOD] >> TDatabaseResolverTests::Ydb_Serverless [GOOD] >> TDatabaseResolverTests::ClickHouseNative >> THealthCheckTest::OrangeGroupIssueWhenDegradedGroupStatus [GOOD] >> THealthCheckTest::OnlyDiskIssueOnSpaceIssues ------- [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/actors/ut/unittest >> TDatabaseResolverTests::PostgreSQL_PermissionDenied [GOOD] Test command err: 2025-06-25T14:33:10.559872Z node 2 :FQ_DATABASE_RESOLVER ERROR: database_resolver.cpp:175: TraceId: traceId ResponseProcessor::Handle(HttpIncomingResponse): error=Error while trying to resolve managed PostgreSQL database with id etn021us5r9rhld1vgbh via HTTP request to: endpoint 'mdb.api.cloud.yandex.net:443', url '/managed-postgresql/v1/clusters/etn021us5r9rhld1vgbh/hosts': you have no permission to resolve database id into database endpoint. Please check that your service account has role `managed-postgresql.viewer`. ------- [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/actors/ut/unittest >> TDatabaseResolverTests::DataStreams_PermissionDenied [GOOD] Test command err: 2025-06-25T14:33:10.734980Z node 2 :FQ_DATABASE_RESOLVER ERROR: database_resolver.cpp:175: TraceId: traceId ResponseProcessor::Handle(HttpIncomingResponse): error=Error while trying to resolve managed DataStreams database with id etn021us5r9rhld1vgbh via HTTP request to: endpoint 'ydbc.ydb.cloud.yandex.net:8789', url '/ydbc/cloud-prod/database?databaseId=etn021us5r9rhld1vgbh': you have no permission to resolve database id into database endpoint. >> THealthCheckTest::Issues100Groups100VCardListing [GOOD] >> THealthCheckTest::Issues100Groups100VCardMerging >> TDatabaseResolverTests::ClickHouseNative [GOOD] >> TDatabaseResolverTests::ClickHouseHttp >> TDatabaseResolverTests::ClickHouseHttp [GOOD] |78.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/actors/ut/unittest >> TDatabaseResolverTests::Ydb_Serverless [GOOD] |78.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/actors/ut/unittest >> TDatabaseResolverTests::ClickHouseHttp [GOOD] >> BasicUsage::WriteSessionCloseWaitsForWrites [GOOD] >> BasicUsage::WriteSessionCloseIgnoresWrites ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_base_tenant/unittest >> TSubDomainTest::CreateTableInsideSubDomain [GOOD] Test command err: 2025-06-25T14:32:44.569019Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519894876539735140:2227];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:32:44.569090Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0016e4/r3tmp/tmpwqUjsw/pdisk_1.dat 2025-06-25T14:32:45.569339Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:32:45.583851Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:32:45.596444Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:32:45.596525Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:32:45.617118Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:32:45.635435Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TClient is connected to server localhost:31620 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-25T14:32:45.994262Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7519894876539735180:2118] Handle TEvNavigate describe path dc-1 2025-06-25T14:32:46.076510Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7519894885129670259:2448] HANDLE EvNavigateScheme dc-1 2025-06-25T14:32:46.076633Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7519894880834702499:2131], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:32:46.076701Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:887: [main][1:7519894880834702842:2350][/dc-1] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvSyncRequest: sender# [1:7519894880834702499:2131], cookie# 1 2025-06-25T14:32:46.078065Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519894880834702846:2350][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519894880834702843:2350], cookie# 1 2025-06-25T14:32:46.078094Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519894880834702847:2350][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519894880834702844:2350], cookie# 1 2025-06-25T14:32:46.078108Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519894880834702848:2350][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519894880834702845:2350], cookie# 1 2025-06-25T14:32:46.078138Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519894876539734892:2050] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519894880834702846:2350], cookie# 1 2025-06-25T14:32:46.078161Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519894876539734895:2053] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519894880834702847:2350], cookie# 1 2025-06-25T14:32:46.078176Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519894876539734898:2056] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519894880834702848:2350], cookie# 1 2025-06-25T14:32:46.078205Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519894880834702846:2350][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519894876539734892:2050], cookie# 1 2025-06-25T14:32:46.078219Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519894880834702847:2350][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519894876539734895:2053], cookie# 1 2025-06-25T14:32:46.078232Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519894880834702848:2350][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519894876539734898:2056], cookie# 1 2025-06-25T14:32:46.078276Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519894880834702842:2350][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519894880834702843:2350], cookie# 1 2025-06-25T14:32:46.078298Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519894880834702842:2350][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2025-06-25T14:32:46.078314Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519894880834702842:2350][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519894880834702844:2350], cookie# 1 2025-06-25T14:32:46.078326Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519894880834702842:2350][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 2, failures# 0 2025-06-25T14:32:46.078339Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519894880834702842:2350][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519894880834702845:2350], cookie# 1 2025-06-25T14:32:46.078360Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:984: [main][1:7519894880834702842:2350][/dc-1] Sync is done in the ring group: cookie# 1, ring group# 0, size# 3, half# 1, successes# 3, failures# 0, partial# 0 2025-06-25T14:32:46.078418Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [1:7519894880834702499:2131], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1 PathId: Partial: 0 } 2025-06-25T14:32:46.103286Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [1:7519894880834702499:2131], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1 PathId: Partial: 0 }, by path# { Subscriber: { Subscriber: [1:7519894880834702842:2350] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 1 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 0 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-25T14:32:46.103418Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [1:7519894880834702499:2131], cacheItem# { Subscriber: { Subscriber: [1:7519894880834702842:2350] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 1 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 0 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 1 IsSync: true Partial: 0 } 2025-06-25T14:32:46.112191Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [1:7519894885129670260:2449], recipient# [1:7519894885129670259:2448], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [72057594046644480:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-25T14:32:46.125903Z node 1 :TX_PROXY DEBUG: describe.cpp:356: Actor# [1:7519894885129670259:2448] HANDLE EvNavigateKeySetResult TDescribeReq marker# P5 ErrorCount# 0 2025-06-25T14:32:46.226182Z node 1 :TX_PROXY DEBUG: describe.cpp:435: Actor# [1:7519894885129670259:2448] SEND to# 72057594046644480 shardToRequest NKikimrSchemeOp.TDescribePath Path: "dc-1" Options { ShowPrivateTable: true } 2025-06-25T14:32:46.235020Z node 1 :TX_PROXY DEBUG: describe.cpp:448: Actor# [1:7519894885129670259:2448] Handle TEvDescribeSchemeResult Forward to# [1:7519894880834702962:2447] Cookie: 0 TEvDescribeSchemeResult: NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 2 Record# Status: StatusSuccess Path: "dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/dc-1" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046644480 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' suc ... d> DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:33:08.802344Z node 5 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1003: [main][5:7519894981447880766:2753][/dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-25T14:33:08.802748Z node 5 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [5:7519894959973043104:2050] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers DomainOwnerId: 72057594046644480 }: sender# [5:7519894981447880771:2753] 2025-06-25T14:33:08.802759Z node 5 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [5:7519894959973043104:2050] Upsert description: path# /dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers 2025-06-25T14:33:08.802809Z node 5 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [5:7519894959973043104:2050] Subscribe: subscriber# [5:7519894981447880771:2753], path# /dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-25T14:33:08.802842Z node 5 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [5:7519894959973043107:2053] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers DomainOwnerId: 72057594046644480 }: sender# [5:7519894981447880772:2753] 2025-06-25T14:33:08.802850Z node 5 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [5:7519894959973043107:2053] Upsert description: path# /dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers 2025-06-25T14:33:08.802877Z node 5 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [5:7519894959973043107:2053] Subscribe: subscriber# [5:7519894981447880772:2753], path# /dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-25T14:33:08.802903Z node 5 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [5:7519894959973043110:2056] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers DomainOwnerId: 72057594046644480 }: sender# [5:7519894981447880773:2753] 2025-06-25T14:33:08.802911Z node 5 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [5:7519894959973043110:2056] Upsert description: path# /dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers 2025-06-25T14:33:08.802957Z node 5 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [5:7519894959973043110:2056] Subscribe: subscriber# [5:7519894981447880773:2753], path# /dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-25T14:33:08.802998Z node 5 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][5:7519894981447880771:2753][/dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers Version: 0 }: sender# [5:7519894959973043104:2050] 2025-06-25T14:33:08.803024Z node 5 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][5:7519894981447880772:2753][/dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers Version: 0 }: sender# [5:7519894959973043107:2053] 2025-06-25T14:33:08.803047Z node 5 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][5:7519894981447880773:2753][/dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers Version: 0 }: sender# [5:7519894959973043110:2056] 2025-06-25T14:33:08.803085Z node 5 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][5:7519894981447880766:2753][/dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers Version: 0 }: sender# [5:7519894981447880768:2753] 2025-06-25T14:33:08.803123Z node 5 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][5:7519894981447880766:2753][/dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers Version: 0 }: sender# [5:7519894981447880769:2753] 2025-06-25T14:33:08.803184Z node 5 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:852: [main][5:7519894981447880766:2753][/dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers] Set up state: owner# [5:7519894959973043447:2154], state# { Deleted: 1 Strong: 1 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-25T14:33:08.803212Z node 5 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][5:7519894981447880766:2753][/dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers Version: 0 }: sender# [5:7519894981447880770:2753] 2025-06-25T14:33:08.803236Z node 5 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:870: [main][5:7519894981447880766:2753][/dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers] Ignore empty state: owner# [5:7519894959973043447:2154], state# { Deleted: 1 Strong: 1 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 1 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-25T14:33:08.803258Z node 5 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [5:7519894959973043104:2050] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [5:7519894981447880771:2753] 2025-06-25T14:33:08.803274Z node 5 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [5:7519894959973043107:2053] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [5:7519894981447880772:2753] 2025-06-25T14:33:08.803289Z node 5 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [5:7519894959973043110:2056] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [5:7519894981447880773:2753] 2025-06-25T14:33:08.803320Z node 5 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [5:7519894959973043447:2154], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers PathId: Strong: 1 } 2025-06-25T14:33:08.803370Z node 5 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [5:7519894959973043447:2154], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers PathId: Strong: 1 }, by path# { Subscriber: { Subscriber: [5:7519894981447880766:2753] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 0 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-25T14:33:08.803436Z node 5 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [5:7519894959973043447:2154], cacheItem# { Subscriber: { Subscriber: [5:7519894981447880766:2753] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:33:08.803521Z node 5 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [5:7519894981447880767:2754], recipient# [5:7519894981447880750:2277], result# { ErrorCount: 2 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo },{ Path: dc-1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:33:08.803598Z node 5 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [5:7519894981447880774:2755], recipient# [5:7519894981447880751:2278], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:33:08.818357Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[5:7519894959973043309:2141];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:33:08.818409Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:33:08.836716Z node 5 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [5:7519894959973043447:2154], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:33:08.836846Z node 5 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [5:7519894959973043447:2154], cacheItem# { Subscriber: { Subscriber: [5:7519894964268011214:2481] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:33:08.836918Z node 5 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [5:7519894981447880781:2758], recipient# [5:7519894981447880780:2279], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } >> TestKinesisHttpProxy::TestListStreamConsumersWithMaxResults [GOOD] >> TestYmqHttpProxy::TestTagQueue [GOOD] >> TDatabaseResolverTests::DataStreams_Dedicated >> TDatabaseResolverTests::DataStreams_Dedicated [GOOD] >> TDatabaseResolverTests::ClickHouse_PermissionDenied >> TestKinesisHttpProxy::TestListStreamConsumersWithToken >> TDatabaseResolverTests::ClickHouse_PermissionDenied [GOOD] >> TestYmqHttpProxy::TestUntagQueue ------- [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/actors/ut/unittest >> TDatabaseResolverTests::ClickHouse_PermissionDenied [GOOD] Test command err: 2025-06-25T14:33:14.557530Z node 2 :FQ_DATABASE_RESOLVER ERROR: database_resolver.cpp:175: TraceId: traceId ResponseProcessor::Handle(HttpIncomingResponse): error=Error while trying to resolve managed ClickHouse database with id etn021us5r9rhld1vgbh via HTTP request to: endpoint 'mdb.api.cloud.yandex.net:443', url '/managed-clickhouse/v1/clusters/etn021us5r9rhld1vgbh/hosts': you have no permission to resolve database id into database endpoint. Please check that your service account has role `managed-clickhouse.viewer`. >> TPartitionChooserSuite::TPartitionChooserActor_SplitMergeEnabled_SourceId_OldPartitionExists_NotWritten_Test [GOOD] >> DataShardVolatile::UpsertNoLocksArbiter-UseSink [GOOD] >> DataShardVolatile::UpsertBrokenLockArbiter+UseSink >> THealthCheckTest::YellowGroupIssueWhenPartialGroupStatus [GOOD] >> THealthCheckTest::TestTabletIsDead >> TPQTest::TestSourceIdDropByUserWrites [GOOD] >> TPQTest::TestSourceIdDropBySourceIdCount >> CommitOffset::DistributedTxCommit_CheckOffsetCommitForDifferentCases [GOOD] >> CommitOffset::DistributedTxCommit_Flat_CheckOffsetCommitForDifferentCases |78.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_bsvolume/unittest >> DataShardVolatile::DistributedWriteThenBulkUpsertWithCdc [GOOD] >> TestKinesisHttpProxy::ListShardsExclusiveStartShardId [GOOD] >> DataShardVolatile::DistributedWriteLostPlanThenDrop >> TestKinesisHttpProxy::TestWrongStream [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/unittest >> TPartitionChooserSuite::TPartitionChooserActor_SplitMergeEnabled_SourceId_OldPartitionExists_NotWritten_Test [GOOD] Test command err: 2025-06-25T14:31:15.865204Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519894496064044901:2083];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:31:15.865271Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:31:15.906627Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519894494264536283:2141];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:31:16.062484Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:31:16.062880Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-25T14:31:16.072527Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000bb6/r3tmp/tmpphjQTf/pdisk_1.dat 2025-06-25T14:31:16.274883Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:31:16.274988Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:31:16.285648Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:31:16.293654Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T14:31:16.294129Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:31:16.308506Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:31:16.308602Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:31:16.312218Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 28303, node 1 2025-06-25T14:31:16.414798Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/yft8/000bb6/r3tmp/yandexOmbjoK.tmp 2025-06-25T14:31:16.414825Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/yft8/000bb6/r3tmp/yandexOmbjoK.tmp 2025-06-25T14:31:16.414995Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/yft8/000bb6/r3tmp/yandexOmbjoK.tmp 2025-06-25T14:31:16.415173Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:31:16.477656Z INFO: TTestServer started on Port 13140 GrpcPort 28303 TClient is connected to server localhost:13140 PQClient connected to localhost:28303 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:31:16.751345Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-25T14:31:16.824503Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:31:16.883008Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:31:16.912739Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... waiting... 2025-06-25T14:31:18.887108Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894508948947875:2302], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:31:18.887253Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894508948947896:2306], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:31:18.887460Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:31:18.894469Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:31:18.929485Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519894508948947900:2308], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710662 completed, doublechecking } 2025-06-25T14:31:19.118613Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519894508948948001:2812] txid# 281474976710663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:31:19.143328Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:31:19.160705Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519894513243915314:2314], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:31:19.160933Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=ZWEwMjk4NzUtMTMxMjkzZTMtMmU0ODg1YmItZjMxYTFjN2Q=, ActorId: [1:7519894508948947857:2299], ActorState: ExecuteState, TraceId: 01jykr05ja9jspa2y8v33tx9k7, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:31:19.163225Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-25T14:31:19.163330Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7519894507149438394:2274], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:31:19.165430Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=2&id=MzExOTRhNzMtOTBiN2Y0YTEtODM0YTY2NGMtZTFkOGM3ZmY=, ActorId: [2:7519894507149438346:2268], ActorState: ExecuteState, TraceId: 01jykr05pfccx535hnbm2pvpd4, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:31:19.165840Z node 2 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-25T14:31:19.222983Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:31:19.302329Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster ... node 9 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:62: TTableHelper SelectQuery: --!syntax_v1 DECLARE $Hash AS Uint64; DECLARE $Topic AS Utf8; DECLARE $SourceId AS Utf8; SELECT Partition, CreateTime, AccessTime, SeqNo FROM `//Root/.metadata/TopicPartitionsMapping` WHERE Hash == $Hash AND Topic == $Topic AND ProducerId == $SourceId; 2025-06-25T14:33:11.906604Z node 9 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:63: TTableHelper UpdateQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint64; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64; DECLARE $SeqNo AS Uint64; UPSERT INTO `//Root/.metadata/TopicPartitionsMapping` (Hash, Topic, ProducerId, CreateTime, AccessTime, Partition, SeqNo) VALUES ($Hash, $Topic, $SourceId, $CreateTime, $AccessTime, $Partition, $SeqNo); 2025-06-25T14:33:11.906614Z node 9 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:64: TTableHelper UpdateAccessTimeQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint64; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64; UPDATE `//Root/.metadata/TopicPartitionsMapping` SET AccessTime = $AccessTime WHERE Hash = $Hash AND Topic = $Topic AND ProducerId = $SourceId AND Partition = $Partition; 2025-06-25T14:33:11.906643Z node 9 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__sm_chooser_actor.h:116: TPartitionChooser [9:7519894993390830102:3992] (SourceId=A_Source_7, PreferedPartition=(NULL)) GetOwnershipFast Partition=1 TabletId=1001 2025-06-25T14:33:11.906775Z node 9 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_ut.cpp:382: StateMockWork, received event# 269877760, Sender [9:7519894993390830103:3992], Recipient [9:7519894963326057817:3312]: NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 1001 Status: OK ServerId: [9:7519894993390830102:3992] Leader: 1 Dead: 0 Generation: 1 VersionInfo: } 2025-06-25T14:33:11.906890Z node 9 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_ut.cpp:382: StateMockWork, received event# 271188557, Sender [9:7519894993390830102:3992], Recipient [9:7519894963326057817:3312]: NKikimrPQ.TEvCheckPartitionStatusRequest Partition: 1 SourceId: "A_Source_7" 2025-06-25T14:33:11.906958Z node 9 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_impl__sm_chooser_actor.h:139: StateOwnershipFast, received event# 271188558, Sender [9:7519894963326057817:3312], Recipient [9:7519894993390830102:3992]: NKikimrPQ.TEvCheckPartitionStatusResponse Status: Active 2025-06-25T14:33:11.906992Z node 9 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_impl__abstract_chooser_actor.h:88: TPartitionChooser [9:7519894993390830102:3992] (SourceId=A_Source_7, PreferedPartition=(NULL)) InitTable: SourceId=A_Source_7 TopicsAreFirstClassCitizen=1 UseSrcIdMetaMappingInFirstClass=1 2025-06-25T14:33:11.907064Z node 9 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_ut.cpp:382: StateMockWork, received event# 65543, Sender [9:7519894993390830102:3992], Recipient [9:7519894963326057817:3312]: NActors::TEvents::TEvPoison 2025-06-25T14:33:11.907322Z node 9 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_impl__abstract_chooser_actor.h:101: StateInitTable, received event# 277020685, Sender [9:7519894894606579020:2069], Recipient [9:7519894993390830102:3992]: NKikimr::NMetadata::NProvider::TEvManagerPrepared 2025-06-25T14:33:11.907368Z node 9 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:111: TPartitionChooser [9:7519894993390830102:3992] (SourceId=A_Source_7, PreferedPartition=(NULL)) StartKqpSession 2025-06-25T14:33:11.911443Z node 9 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_impl__abstract_chooser_actor.h:132: StateCreateKqpSession, received event# 271646728, Sender [9:7519894894606579202:2237], Recipient [9:7519894993390830102:3992]: NKikimrKqp.TEvCreateSessionResponse Error: "" Response { SessionId: "ydb://session/3?node_id=9&id=MWJjNTA0YTktOGMyNDIxZjctYjZhY2EzMDgtMmJmNjkxZDk=" NodeId: 9 } YdbStatus: SUCCESS ResourceExhausted: false 2025-06-25T14:33:11.911495Z node 9 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:142: TPartitionChooser [9:7519894993390830102:3992] (SourceId=A_Source_7, PreferedPartition=(NULL)) Select from the table 2025-06-25T14:33:12.159751Z node 9 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_impl__abstract_chooser_actor.h:163: StateSelect, received event# 271646721, Sender [9:7519894894606579202:2237], Recipient [9:7519894993390830102:3992]: NKikimrKqp.TEvQueryResponse Response { SessionId: "ydb://session/3?node_id=9&id=MWJjNTA0YTktOGMyNDIxZjctYjZhY2EzMDgtMmJmNjkxZDk=" PreparedQuery: "55d8ede-ee0efa2d-4e5ad20d-227cf2be" QueryParameters { Name: "$Hash" Type { Kind: Data Data { Scheme: 4 } } } QueryParameters { Name: "$Topic" Type { Kind: Data Data { Scheme: 4608 } } } QueryParameters { Name: "$SourceId" Type { Kind: Data Data { Scheme: 4608 } } } TxMeta { id: "01jykr3m6gdkggee7dvwn96t82" } YdbResults { columns { name: "Partition" type { optional_type { item { type_id: UINT32 } } } } columns { name: "CreateTime" type { optional_type { item { type_id: UINT64 } } } } columns { name: "AccessTime" type { optional_type { item { type_id: UINT64 } } } } columns { name: "SeqNo" type { optional_type { item { type_id: UINT64 } } } } rows { items { uint32_value: 0 } items { uint64_value: 1750861991641 } items { uint64_value: 1750861991641 } items { uint64_value: 13 } } } QueryDiagnostics: "" } YdbStatus: SUCCESS ConsumedRu: 140 2025-06-25T14:33:12.159973Z node 9 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_impl__abstract_chooser_actor.h:151: TPartitionChooser [9:7519894993390830102:3992] (SourceId=A_Source_7, PreferedPartition=(NULL)) Selected from table PartitionId=0 SeqNo=13 2025-06-25T14:33:12.160010Z node 9 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__sm_chooser_actor.h:151: TPartitionChooser [9:7519894993390830102:3992] (SourceId=A_Source_7, PreferedPartition=(NULL)) GetOldSeqNo 2025-06-25T14:33:12.160177Z node 9 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_ut.cpp:382: StateMockWork, received event# 269877760, Sender [9:7519894997685797444:3992], Recipient [9:7519894963326057816:3311]: NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 1000 Status: OK ServerId: [9:7519894993390830102:3992] Leader: 1 Dead: 0 Generation: 1 VersionInfo: } 2025-06-25T14:33:12.160293Z node 9 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_ut.cpp:382: StateMockWork, received event# 271187968, Sender [9:7519894993390830102:3992], Recipient [9:7519894963326057816:3311]: NKikimrClient.TPersQueueRequest PartitionRequest { Partition: 0 CmdGetMaxSeqNo { SourceId: "\000A_Source_7" } PipeClient { RawX1: 7519894997685797444 RawX2: 38654709656 } } 2025-06-25T14:33:12.160390Z node 9 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_impl__sm_chooser_actor.h:209: TPartitionChooser [9:7519894993390830102:3992] (SourceId=A_Source_7, PreferedPartition=(NULL)) OnPartitionChosen 2025-06-25T14:33:12.160535Z node 9 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_ut.cpp:382: StateMockWork, received event# 65543, Sender [9:7519894993390830102:3992], Recipient [9:7519894963326057816:3311]: NActors::TEvents::TEvPoison 2025-06-25T14:33:12.160591Z node 9 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_ut.cpp:382: StateMockWork, received event# 269877760, Sender [9:7519894997685797445:3992], Recipient [9:7519894963326057817:3312]: NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 1001 Status: OK ServerId: [9:7519894993390830102:3992] Leader: 1 Dead: 0 Generation: 1 VersionInfo: } 2025-06-25T14:33:12.160652Z node 9 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_ut.cpp:382: StateMockWork, received event# 271188557, Sender [9:7519894993390830102:3992], Recipient [9:7519894963326057817:3312]: NKikimrPQ.TEvCheckPartitionStatusRequest Partition: 1 2025-06-25T14:33:12.160725Z node 9 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_impl__abstract_chooser_actor.h:240: StateCheckPartition, received event# 271188558, Sender [9:7519894963326057817:3312], Recipient [9:7519894993390830102:3992]: NKikimrPQ.TEvCheckPartitionStatusResponse Status: Active 2025-06-25T14:33:12.160756Z node 9 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:174: TPartitionChooser [9:7519894993390830102:3992] (SourceId=A_Source_7, PreferedPartition=(NULL)) Update the table 2025-06-25T14:33:12.161010Z node 9 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_ut.cpp:382: StateMockWork, received event# 65543, Sender [9:7519894993390830102:3992], Recipient [9:7519894963326057817:3312]: NActors::TEvents::TEvPoison Received TEvChooseResult: 1 2025-06-25T14:33:12.339134Z node 9 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_impl__abstract_chooser_actor.h:212: StateUpdate, received event# 271646721, Sender [9:7519894894606579202:2237], Recipient [9:7519894993390830102:3992]: NKikimrKqp.TEvQueryResponse Response { SessionId: "ydb://session/3?node_id=9&id=MWJjNTA0YTktOGMyNDIxZjctYjZhY2EzMDgtMmJmNjkxZDk=" PreparedQuery: "8d464ee-524df7df-394abeb3-4246d263" QueryParameters { Name: "$AccessTime" Type { Kind: Data Data { Scheme: 4 } } } QueryParameters { Name: "$CreateTime" Type { Kind: Data Data { Scheme: 4 } } } QueryParameters { Name: "$Hash" Type { Kind: Data Data { Scheme: 4 } } } QueryParameters { Name: "$Partition" Type { Kind: Data Data { Scheme: 2 } } } QueryParameters { Name: "$SourceId" Type { Kind: Data Data { Scheme: 4608 } } } QueryParameters { Name: "$SeqNo" Type { Kind: Data Data { Scheme: 4 } } } QueryParameters { Name: "$Topic" Type { Kind: Data Data { Scheme: 4608 } } } TxMeta { } QueryDiagnostics: "" } YdbStatus: SUCCESS ConsumedRu: 103 2025-06-25T14:33:12.339188Z node 9 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:183: TPartitionChooser [9:7519894993390830102:3992] (SourceId=A_Source_7, PreferedPartition=(NULL)) HandleUpdate PartitionPersisted=0 Status=SUCCESS 2025-06-25T14:33:12.339235Z node 9 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:305: TPartitionChooser [9:7519894993390830102:3992] (SourceId=A_Source_7, PreferedPartition=(NULL)) ReplyResult: Partition=1, SeqNo=13 2025-06-25T14:33:12.339258Z node 9 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:268: TPartitionChooser [9:7519894993390830102:3992] (SourceId=A_Source_7, PreferedPartition=(NULL)) Start idle Run query: --!syntax_v1 SELECT Partition, SeqNo FROM `//Root/.metadata/TopicPartitionsMapping` WHERE Hash = 6541068412312944787 AND Topic = "Root" AND ProducerId = "00415F536F757263655F37" 2025-06-25T14:33:12.733994Z node 9 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715716. Ctx: { TraceId: 01jykr3mg0b3vwfxyjexvbjcx9, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=9&id=ODFjZmRiMTAtMTc2ZTc1NzMtOTYxNWFlMjAtYzY2YWMyNTE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:33:13.901215Z node 9 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1985: ActorId: [9:7519895001980764914:2717] TxId: 281474976715717. Ctx: { TraceId: 01jykr3nps8rmgsp8r0x3yh6t9, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=9&id=OTZkY2M4NzUtY2ExNDg2NzItYmRiOWE2ZTItMzE1ZGM5MTM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. UNAVAILABLE: Failed to send EvStartKqpTasksRequest because node is unavailable: 10 2025-06-25T14:33:13.901413Z node 9 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [9:7519895001980764920:2717], TxId: 281474976715717, task: 2. Ctx: { TraceId : 01jykr3nps8rmgsp8r0x3yh6t9. SessionId : ydb://session/3?node_id=9&id=OTZkY2M4NzUtY2ExNDg2NzItYmRiOWE2ZTItMzE1ZGM5MTM=. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Handle abort execution event from: [9:7519895001980764914:2717], status: UNAVAILABLE, reason: {
: Error: Terminate execution } >> THealthCheckTest::StorageLimit50 [GOOD] >> TestKinesisHttpProxy::ListShardsTimestamp >> THealthCheckTest::SpecificServerless >> TestKinesisHttpProxy::TestWrongStream2 >> TClockProCache::Touch [GOOD] >> TClockProCache::UpdateLimit [GOOD] >> TCompaction::OneMemtable [GOOD] >> TCompaction::ManyParts >> THealthCheckTest::ShardsLimit995 [GOOD] >> THealthCheckTest::ShardsLimit905 >> TDatabaseResolverTests::Ydb_Dedicated >> TDatabaseResolverTests::Ydb_Dedicated [GOOD] >> TSubDomainTest::CoordinatorRunAtSubdomainNodeWhenAvailable [GOOD] >> TSubDomainTest::CoordinatorRunAtSubdomainNodeWhenAvailable2 >> TCompaction::ManyParts [GOOD] >> TCompaction::BootAbort >> TFlatTableExecutor_ResourceProfile::TestExecutorStaticMemoryLimits >> TPartSlice::TrivialMerge [GOOD] >> TPartSlice::SupersetByRowId [GOOD] >> TPartSlice::Subtract [GOOD] >> TPartSlice::UnsplitBorrow [GOOD] >> TPartSliceLoader::RestoreMissingSlice >> THealthCheckTest::OnlyDiskIssueOnSpaceIssues [GOOD] >> THealthCheckTest::OnlyDiskIssueOnInitialPDisks >> TFlatTableExecutor_ResourceProfile::TestExecutorStaticMemoryLimits [GOOD] >> TFlatTableExecutor_ResourceProfile::TestExecutorTxDataLimitExceeded [GOOD] >> TFlatTableExecutor_ResourceProfile::TestExecutorTxDataGC [GOOD] >> TFlatTableExecutor_ResourceProfile::TestExecutorTxPartialDataHold >> THealthCheckTest::Issues100VCardListing [GOOD] >> THealthCheckTest::Issues100GroupsMerging >> TFlatTableExecutor_ResourceProfile::TestExecutorTxPartialDataHold [GOOD] >> TFlatTableExecutor_ResourceProfile::TestExecutorTxHoldAndUse [GOOD] >> TFlatTableExecutor_ResourceProfile::TestExecutorTxHoldOnRelease >> BuildStatsHistogram::Three_Serial_Small_2_Levels >> TPart::State [GOOD] >> TPart::Trivials [GOOD] >> TPart::Basics [GOOD] >> TPart::CellDefaults [GOOD] >> TPart::Matter [GOOD] >> TPart::External [GOOD] >> TPart::Outer [GOOD] >> TPart::MassCheck >> TFlatTableExecutor_ResourceProfile::TestExecutorTxHoldOnRelease [GOOD] >> TFlatTableExecutor_ResourceProfile::TestUpdateConfig [GOOD] >> TFlatTableExecutor_SliceOverlapScan::TestSliceOverlapScan |78.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/actors/ut/unittest >> TDatabaseResolverTests::Ydb_Dedicated [GOOD] >> TDatabaseResolverTests::MySQL >> TopicAutoscaling::PartitionSplit_ManySession_PQv1 [GOOD] >> TopicAutoscaling::PartitionSplit_ManySession_existed_AutoscaleAwareSDK >> TSharedPageCache::ThreeLeveledLRU >> BuildStatsHistogram::Three_Serial_Small_2_Levels [GOOD] >> BuildStatsHistogram::Three_Serial_Small_2_Levels_3_Buckets [GOOD] >> BuildStatsHistogram::Three_Serial_Small_1_Level >> DataCleanup::CleanupDataNoTables [GOOD] >> DataCleanup::CleanupDataNoTablesWithRestart [GOOD] >> DataCleanup::CleanupDataLog >> TCompaction::BootAbort [GOOD] >> TCompaction::Defaults [GOOD] >> TCompaction::Merges [GOOD] >> TCompactionMulti::ManyParts >> TPartSliceLoader::RestoreMissingSlice [GOOD] >> TPartSliceLoader::RestoreOneSlice [GOOD] >> TPartSliceLoader::RestoreMissingSliceFullScreen [GOOD] >> TPartSliceLoader::RestoreFromScreenIndexKeys [GOOD] >> TPartSliceLoader::RestoreFromScreenDataKeys >> DataCleanup::CleanupDataLog [GOOD] >> DataCleanup::CleanupData [GOOD] >> DataCleanup::CleanupDataMultipleFamilies >> YdbTableSplit::MergeByNoLoadAfterSplit [GOOD] >> TDatabaseResolverTests::MySQL [GOOD] >> TDatabaseResolverTests::MySQL_PermissionDenied >> TPartSliceLoader::RestoreFromScreenDataKeys [GOOD] >> TRowVersionRangesTest::SimpleInserts [GOOD] >> TRowVersionRangesTest::MergeFailLeft [GOOD] >> TRowVersionRangesTest::MergeFailRight [GOOD] >> TRowVersionRangesTest::MergeFailOuter [GOOD] >> TRowVersionRangesTest::MergeFailInner [GOOD] >> TRowVersionRangesTest::MergeExtendLeft [GOOD] >> TRowVersionRangesTest::MergeExtendLeftInner [GOOD] >> TRowVersionRangesTest::MergeExtendLeftComplete [GOOD] >> TRowVersionRangesTest::MergeExtendRight [GOOD] >> TRowVersionRangesTest::MergeExtendRightInner [GOOD] >> TRowVersionRangesTest::MergeExtendRightComplete [GOOD] >> TRowVersionRangesTest::MergeExtendBoth [GOOD] >> TRowVersionRangesTest::MergeHoleExact [GOOD] >> TRowVersionRangesTest::MergeHoleInner [GOOD] >> TRowVersionRangesTest::MergeHoleOuter [GOOD] >> TRowVersionRangesTest::MergeAllOuter [GOOD] >> BuildStatsHistogram::Three_Serial_Small_1_Level [GOOD] >> TRowVersionRangesTest::MergeAllInner [GOOD] >> BuildStatsHistogram::Three_Serial_Small_0_Levels >> DataCleanup::CleanupDataMultipleFamilies [GOOD] >> TRowVersionRangesTest::MergeAllEdges [GOOD] >> DataCleanup::CleanupDataMultipleTables >> TRowVersionRangesTest::ContainsEmpty [GOOD] >> TRowVersionRangesTest::ContainsNonEmpty [GOOD] >> TRowVersionRangesTest::ContainsInvalid [GOOD] >> TRowVersionRangesTest::AdjustDown [GOOD] >> TRowVersionRangesTest::AdjustDownSnapshot [GOOD] >> TRowVersionRangesTest::SteppedCookieAllocatorOrder [GOOD] >> TRowVersionRangesTest::SteppedCookieAllocatorLowerBound [GOOD] >> TS3FIFOCache::Touch [GOOD] >> TS3FIFOCache::Touch_MainQueue [GOOD] >> TS3FIFOCache::EvictNext [GOOD] >> TS3FIFOCache::UpdateLimit [GOOD] >> TS3FIFOCache::Erase [GOOD] >> TS3FIFOCache::Random >> TPart::MassCheck [GOOD] >> TPart::WreckPart >> TDatabaseResolverTests::MySQL_PermissionDenied [GOOD] >> DataCleanup::CleanupDataMultipleTables [GOOD] >> DataCleanup::CleanupDataWithFollowers [GOOD] >> DataCleanup::CleanupDataMultipleTimes >> BuildStatsHistogram::Three_Serial_Small_0_Levels [GOOD] >> BuildStatsMixedIndex::Single >> TS3FIFOCache::Random [GOOD] >> TS3FIFOGhostQueue::Basics [GOOD] >> TScheme::Shapshot [GOOD] >> TScheme::Delta [GOOD] >> TScheme::Policy [GOOD] >> TScreen::Cuts [GOOD] >> TScreen::Join [GOOD] >> TScreen::Sequential >> DataCleanup::CleanupDataMultipleTimes [GOOD] >> DataCleanup::CleanupDataEmptyTable [GOOD] >> DataCleanup::CleanupDataWithRestarts >> BuildStatsMixedIndex::Single [GOOD] >> DataCleanup::CleanupDataWithRestarts [GOOD] >> DataCleanup::CleanupDataRetryWithNotGreaterGenerations [GOOD] >> DataCleanup::CleanupDataWithTabletGCErrors >> BuildStatsMixedIndex::Single_Slices >> DataCleanup::CleanupDataWithTabletGCErrors [GOOD] >> DataCleanup::CleanupDataWithSysTabletGCErrors >> KqpProxy::DatabasesCacheForServerless [GOOD] >> BuildStatsMixedIndex::Single_Slices [GOOD] >> BuildStatsMixedIndex::Single_History >> DataCleanup::CleanupDataWithSysTabletGCErrors [GOOD] >> DBase::WideKey ------- [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/actors/ut/unittest >> TDatabaseResolverTests::MySQL_PermissionDenied [GOOD] Test command err: 2025-06-25T14:33:20.286664Z node 2 :FQ_DATABASE_RESOLVER ERROR: database_resolver.cpp:175: TraceId: traceId ResponseProcessor::Handle(HttpIncomingResponse): error=Error while trying to resolve managed MySQL database with id etn021us5r9rhld1vgbh via HTTP request to: endpoint 'mdb.api.cloud.yandex.net:443', url '/managed-mysql/v1/clusters/etn021us5r9rhld1vgbh/hosts': you have no permission to resolve database id into database endpoint. >> TCompactionMulti::ManyParts [GOOD] >> TCompactionMulti::MainPageCollectionEdge >> TScreen::Sequential [GOOD] >> TScreen::Random >> BuildStatsMixedIndex::Single_History [GOOD] >> BuildStatsMixedIndex::Single_History_Slices >> THealthCheckTest::Issues100Groups100VCardMerging [GOOD] >> THealthCheckTest::GreenStatusWhenInitPending >> TFlatTableExecutor_Exceptions::TestTabletExecuteExceptionEnqueue >> BuildStatsMixedIndex::Single_History_Slices [GOOD] >> BuildStatsMixedIndex::Single_Groups >> TFlatTableExecutor_Exceptions::TestTabletExecuteExceptionEnqueue [GOOD] >> TFlatTableExecutor_ExecutorTxLimit::TestExecutorTxLimit [GOOD] >> TFlatTableExecutor_Follower::BasicFollowerRead [GOOD] >> TFlatTableExecutor_Follower::FollowerEarlyRebootHoles >> TFlatTableExecutor_Follower::FollowerEarlyRebootHoles [GOOD] >> TFlatTableExecutor_Follower::FollowerAttachOnTxQueueScanSnapshot >> TSharedPageCache::ThreeLeveledLRU [GOOD] >> TSharedPageCache::S3FIFO >> BuildStatsMixedIndex::Single_Groups [GOOD] >> BuildStatsMixedIndex::Single_Groups_Slices >> Bloom::Conf [GOOD] >> Bloom::Hashes >> DBase::WideKey [GOOD] >> DBase::VersionPureMem >> Bloom::Hashes [GOOD] >> Bloom::Rater >> TFlatTableExecutor_Follower::FollowerAttachOnTxQueueScanSnapshot [GOOD] >> BuildStatsMixedIndex::Single_Groups_Slices [GOOD] >> BuildStatsMixedIndex::Single_Groups_History >> TFlatTableExecutor_Follower::FollowerAttachAfterLoan >> TBtreeIndexBuilder::NoNodes [GOOD] >> TFlatTableExecutor_Follower::FollowerAttachAfterLoan [GOOD] >> TFlatTableExecutor_Gc::TestFailedGcAfterReboot [GOOD] >> TBtreeIndexBuilder::OneNode [GOOD] >> TFlatTableExecutor_IndexLoading::CalculateReadSize_FlatIndex >> TBtreeIndexBuilder::FewNodes [GOOD] >> TBtreeIndexBuilder::SplitBySize [GOOD] >> TBtreeIndexNode::TIsNullBitmap [GOOD] >> TBtreeIndexNode::CompareTo [GOOD] >> TBtreeIndexNode::Basics [GOOD] >> TBtreeIndexNode::Group [GOOD] >> TBtreeIndexNode::History [GOOD] >> TBtreeIndexNode::OneKey [GOOD] >> TBtreeIndexNode::Reusable [GOOD] >> TBtreeIndexNode::CutKeys [GOOD] >> TBtreeIndexTPart::Conf [GOOD] >> TBtreeIndexTPart::NoNodes [GOOD] >> TBtreeIndexTPart::OneNode [GOOD] >> TBtreeIndexTPart::FewNodes [GOOD] >> TBtreeIndexTPart::Erases ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/table_split_ut/unittest >> YdbTableSplit::MergeByNoLoadAfterSplit [GOOD] Test command err: 2025-06-25T14:32:07.752434Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519894718813896505:2237];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:32:07.799683Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00139c/r3tmp/tmpgQvFOR/pdisk_1.dat 2025-06-25T14:32:08.457887Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:32:08.472357Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:32:08.472454Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:32:08.485141Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 3260, node 1 2025-06-25T14:32:08.728468Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:32:08.760894Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:32:08.760925Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:32:08.760939Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:32:08.761065Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:29741 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:32:09.154399Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:32:09.184656Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 Triggering split by load TClient is connected to server localhost:29741 2025-06-25T14:32:12.728535Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519894718813896505:2237];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:32:12.728619Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:32:13.242605Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894744583701183:2300], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:13.242707Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:13.640269Z node 1 :BS_CONTROLLER ERROR: {BSC07@impl.h:2206} ProcessControllerEvent event processing took too much time Type# 2146435072 Duration# 0.120341s 2025-06-25T14:32:13.640330Z node 1 :BS_CONTROLLER ERROR: {BSC00@bsc.cpp:705} StateWork event processing took too much time Type# 2146435078 Duration# 0.120407s 2025-06-25T14:32:13.662648Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:13.892707Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894744583701373:2315], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:13.892775Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:13.909719Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:171) TClient::Ls request: /Root/Foo TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Foo" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1750861933821 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Foo" Columns { Name: "NameHash" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Name" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "Versio... (TRUNCATED) Table has 1 shards TClient::Ls request: /Root/Foo TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Foo" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1750861933821 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Foo" Columns { Name: "NameHash" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Name" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "Versio... (TRUNCATED) 2025-06-25T14:32:14.194929Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894748878668765:2346], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:14.195142Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894748878668790:2361], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:14.195187Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894748878668791:2362], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:14.195282Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:14.196517Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894748878668792:2363], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:14.196564Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894748878668806:2373], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:14.199711Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_mkdir.cpp:115: TMkDir Propose, path: /Root/.metadata, operationId: 281474976715661:0, at schemeshard: 72057594046644480 2025-06-25T14:32:14.199896Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 281474976715661:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-25T14:32:14.199915Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_mkdir.cpp:115: TMkDir Propose, path: /Root/.metadata/workload_manager, operationId: 281474976715661:1, at schemeshard: 72057594046644480 2025-06-25T14:32:14.199986Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 281474976715661:2, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-25T14:32:14.200004Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_mkdir.cpp:115: TMkDir Propose, path: /Root/.metadata/workload_manager/pools, operationId: 281474976715661:2, at schemeshard: 72057594046644480 2025-06-25T14:32:14.200083Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 281474976715661:3, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-25T14:32:14.200135Z node 1 :FLAT_TX_SCHEMESHARD NOT ... athId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 2 } ChildrenExist: false } Table { Name: "Foo" Columns { Name: "NameHash" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Name" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "Versio... (TRUNCATED) 2025-06-25T14:33:14.069699Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__table_stats.cpp:450: Propose merge request : Transaction { WorkingDir: "/Root" OperationType: ESchemeOpSplitMergeTablePartitions SplitMergeTablePartitions { TablePath: "/Root/Foo" SourceTabletId: 72075186224037889 SourceTabletId: 72075186224037890 SchemeshardId: 72057594046644480 } Internal: true FailOnExist: false } TxId: 281474976710658 TabletId: 72057594046644480, reason: shard with tabletId: 72075186224037889 merge by load (shardLoad: 0.02), shardToMergeCount: 2, totalSize: 0, sizeToMerge: 0, totalLoad: 0.04, loadThreshold: 0.07 2025-06-25T14:33:14.069904Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_split_merge.cpp:804: TSplitMerge Propose, tableStr: /Root/Foo, tableId: , opId: 281474976710658:0, at schemeshard: 72057594046644480, request: TablePath: "/Root/Foo" SourceTabletId: 72075186224037889 SourceTabletId: 72075186224037890 SchemeshardId: 72057594046644480 2025-06-25T14:33:14.070675Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_split_merge.cpp:1083: TSplitMerge Propose accepted, tableStr: /Root/Foo, tableId: , opId: 281474976710658:0, at schemeshard: 72057594046644480, op: SourceRanges { KeyRangeBegin: "\002\000\000\000\000\200\000\000\000\200" KeyRangeEnd: "\002\000\004\000\000\000\025\247\030\\\000\000\000\200" TabletID: 72075186224037889 ShardIdx: 2 } SourceRanges { KeyRangeBegin: "\002\000\004\000\000\000\025\247\030\\\000\000\000\200" KeyRangeEnd: "" TabletID: 72075186224037890 ShardIdx: 3 } DestinationRanges { KeyRangeBegin: "\002\000\000\000\000\200\000\000\000\200" KeyRangeEnd: "" ShardIdx: 4 }, request: TablePath: "/Root/Foo" SourceTabletId: 72075186224037889 SourceTabletId: 72075186224037890 SchemeshardId: 72057594046644480 2025-06-25T14:33:14.070716Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 281474976710658:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-25T14:33:14.079984Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 281474976710658:0 ProgressState, operation type: TxSplitTablePartition, at tablet# 72057594046644480 2025-06-25T14:33:14.096216Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:177: TCreateParts opId# 281474976710658:0 HandleReply TEvCreateTabletReply, at tabletId: 72057594046644480 2025-06-25T14:33:14.096332Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 281474976710658:0 2 -> 3 2025-06-25T14:33:14.104051Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_split_merge.cpp:84: TSplitMerge TConfigureDestination ProgressState, operationId: 281474976710658:0, at schemeshard: 72057594046644480 2025-06-25T14:33:14.112777Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037891 actor [1:7519895006576800067:5302] 2025-06-25T14:33:14.153971Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037891 2025-06-25T14:33:14.154103Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037891, state: WaitScheme 2025-06-25T14:33:14.154300Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037891 TxInFly 0 2025-06-25T14:33:14.165322Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_split_merge.cpp:38: TSplitMerge TConfigureDestination operationId# 281474976710658:0 HandleReply TEvInitSplitMergeDestinationAck, operationId: 281474976710658:0, at schemeshard: 72057594046644480 message# OperationCookie: 281474976710658 TabletId: 72075186224037891 2025-06-25T14:33:14.165370Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 281474976710658:0 3 -> 131 2025-06-25T14:33:14.169308Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_split_merge.cpp:334: TSplitMerge TTransferData operationId# 281474976710658:0 ProgressState, at schemeshard: 72057594046644480 2025-06-25T14:33:14.201344Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state Ready tabletId 72075186224037891 2025-06-25T14:33:14.201463Z node 1 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037891 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-25T14:33:14.201511Z node 1 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186224037891 2025-06-25T14:33:14.201538Z node 1 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037891 2025-06-25T14:33:14.202924Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037891 TxInFly 0 2025-06-25T14:33:14.209333Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_split_merge.cpp:207: TSplitMerge TTransferData operationId# 281474976710658:0 HandleReply TEvSplitAck, at schemeshard: 72057594046644480, message: OperationCookie: 281474976710658 TabletId: 72075186224037889 2025-06-25T14:33:14.209666Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_split_merge.cpp:207: TSplitMerge TTransferData operationId# 281474976710658:0 HandleReply TEvSplitAck, at schemeshard: 72057594046644480, message: OperationCookie: 281474976710658 TabletId: 72075186224037890 2025-06-25T14:33:14.209920Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 281474976710658:0 131 -> 132 2025-06-25T14:33:14.212826Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046644480 2025-06-25T14:33:14.213114Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046644480 2025-06-25T14:33:14.213188Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_split_merge.cpp:437: TSplitMerge TNotifySrc, operationId: 281474976710658:0 ProgressState, at schemeshard: 72057594046644480 2025-06-25T14:33:14.215129Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 2 Version: 6 PathOwnerId: 72057594046644480, cookie: 281474976710658 2025-06-25T14:33:14.215190Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046644480, txId: 281474976710658 2025-06-25T14:33:14.215209Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046644480, txId: 281474976710658, pathId: [OwnerId: 72057594046644480, LocalPathId: 2], version: 6 2025-06-25T14:33:14.224426Z node 1 :TX_DATASHARD INFO: datashard_loans.cpp:177: 72075186224037889 Initiating switch from PreOffline to Offline state 2025-06-25T14:33:14.225724Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_split_merge.cpp:392: TSplitMerge TNotifySrc, operationId: 281474976710658:0 HandleReply TEvSplitPartitioningChangedAck, from datashard: 72075186224037889, at schemeshard: 72057594046644480 2025-06-25T14:33:14.226245Z node 1 :TX_DATASHARD INFO: datashard_loans.cpp:177: 72075186224037890 Initiating switch from PreOffline to Offline state 2025-06-25T14:33:14.226527Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_split_merge.cpp:392: TSplitMerge TNotifySrc, operationId: 281474976710658:0 HandleReply TEvSplitPartitioningChangedAck, from datashard: 72075186224037890, at schemeshard: 72057594046644480 2025-06-25T14:33:14.226831Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976710658:0 progress is 1/1 2025-06-25T14:33:14.226854Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976710658:0 progress is 1/1 2025-06-25T14:33:14.226901Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976710658:0 2025-06-25T14:33:14.230889Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:261: Unable to activate 281474976710658:0 2025-06-25T14:33:14.230994Z node 1 :TX_DATASHARD INFO: datashard_impl.h:3310: 72075186224037889 Reporting state Offline to schemeshard 72057594046644480 2025-06-25T14:33:14.231061Z node 1 :TX_DATASHARD INFO: datashard_impl.h:3310: 72075186224037890 Reporting state Offline to schemeshard 72057594046644480 2025-06-25T14:33:14.231526Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186224037890, state: Offline, at schemeshard: 72057594046644480 2025-06-25T14:33:14.231726Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186224037889, state: Offline, at schemeshard: 72057594046644480 2025-06-25T14:33:14.237596Z node 1 :TX_DATASHARD INFO: datashard.cpp:197: OnTabletStop: 72075186224037890 reason = ReasonStop 2025-06-25T14:33:14.237636Z node 1 :TX_DATASHARD INFO: datashard.cpp:197: OnTabletStop: 72075186224037889 reason = ReasonStop 2025-06-25T14:33:14.238071Z node 1 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186224037890 2025-06-25T14:33:14.238155Z node 1 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186224037890 2025-06-25T14:33:14.238179Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037890 not found 2025-06-25T14:33:14.238205Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037889 not found 2025-06-25T14:33:14.239022Z node 1 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186224037889 2025-06-25T14:33:14.239088Z node 1 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186224037889 TClient::Ls request: /Root/Foo TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Foo" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1750861933821 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 3 } ChildrenExist: false } Table { Name: "Foo" Columns { Name: "NameHash" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Name" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "Versio... (TRUNCATED) >> TBtreeIndexTPart::Erases [GOOD] >> TBtreeIndexTPart::Groups [GOOD] >> TBtreeIndexTPart::History >> TDatabaseResolverTests::ResolveTwoDataStreamsFirstError >> TBtreeIndexTPart::History [GOOD] >> TBtreeIndexTPart::External >> TDatabaseResolverTests::ResolveTwoDataStreamsFirstError [GOOD] >> Bloom::Rater [GOOD] >> Bloom::Dipping |78.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/engines/ut/ydb-core-tx-columnshard-engines-ut |78.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/columnshard/engines/ut/ydb-core-tx-columnshard-engines-ut |78.9%| [LD] {RESULT} $(B)/ydb/core/tx/columnshard/engines/ut/ydb-core-tx-columnshard-engines-ut >> BuildStatsMixedIndex::Single_Groups_History [GOOD] >> BuildStatsMixedIndex::Single_Groups_History_Slices >> TPart::WreckPart [GOOD] >> TPart::PageFailEnv >> TFlatTableExecutor_SliceOverlapScan::TestSliceOverlapScan [GOOD] >> TFlatTableExecutor_SnapshotWithCommits::SnapshotWithCommits [GOOD] >> TFlatTableExecutor_StickyPages::TestNonSticky_FlatIndex >> TBtreeIndexTPart::External [GOOD] >> TChargeBTreeIndex::NoNodes ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/proxy_service/ut/unittest >> KqpProxy::DatabasesCacheForServerless [GOOD] Test command err: 2025-06-25T14:32:38.302884Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519894852235788377:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:32:38.302929Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:32:38.400760Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519894851071259456:2232];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:32:38.400843Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:32:38.704152Z node 5 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[5:7519894852711985425:2176];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:32:39.184105Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519894855459949689:2250];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:32:39.266750Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000ca4/r3tmp/tmpDS3SOm/pdisk_1.dat 2025-06-25T14:32:39.499567Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:32:39.499750Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:32:39.600843Z node 5 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:32:39.615726Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:32:39.680957Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:32:39.720852Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:32:39.738994Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:32:39.755245Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:32:39.753144Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:32:40.249948Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:32:40.250050Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:32:40.286965Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:32:40.287100Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:32:40.287360Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:32:40.287420Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:32:40.287550Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:32:40.287577Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:32:40.287652Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:32:40.287872Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:32:40.519698Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:32:40.574024Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 5 Cookie 5 2025-06-25T14:32:40.574063Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 4 Cookie 4 2025-06-25T14:32:40.577519Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:32:40.578748Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:32:40.592735Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 3 Cookie 3 2025-06-25T14:32:40.592858Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T14:32:40.593004Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:32:40.593216Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:32:40.593311Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:27634 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: 2025-06-25T14:32:43.305447Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519894852235788377:2075];send_to=[0:7307199536658146131:7762515]; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:32:43.378776Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:32:43.411867Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:32:43.412406Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519894851071259456:2232];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:32:43.412501Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=timeout; waiting... 2025-06-25T14:32:43.604512Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[5:7519894852711985425:2176];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:32:43.604586Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:32:44.184085Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519894855459949689:2250];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:32:44.184158Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:32:53.137528Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1692: Updated YQL logs priority to current level: 4 2025-06-25T14:32:53.164970Z node 3 :KQP_PROXY INFO: kqp_proxy_service.cpp:454: Cannot start publishing usage, tenants: /dc-1, empty 2025-06-25T14:32:53.221951Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1692: Updated YQL logs priority to current level: 4 2025-06-25T14:32:53.222790Z node 1 :KQP_PROXY INFO: kqp_proxy_service.cpp:454: Cannot start publishing usage, tenants: /dc-1, empty 2025-06-25T14:32:53.267466Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:524: Subscribed for config changes. 2025-06-25T14:32:53.267507Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:531: Updated table service config. 2025-06-25T14:32:53.267528Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1692: Updated YQL logs priority to current level: 4 2025-06-25T14:32:53.271987Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:147: Table script_executions updater. Describe result: PathErrorUnknown 2025-06-25T14:32:53.276109Z node 1 :KQP_PROXY NOTICE: table_creator.cpp:167: Table script_executions updater. Creating table 2025-06-25T14:32:53.284179Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:100: Table script_executions updater. Full table path:/dc-1/.metadata/script_executions 2025-06-25T14:32:53.307393Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:524: Subscribed for config changes. 2025-06-25T14:32:53.307448Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:531: Updated table service config. 2025-06-25T14:32:53.307472Z node 3 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1692: Updated YQL logs priority to current level: 4 2025-06-25T14:32:53.307523Z node 3 :KQP_PROXY INFO: kqp_proxy_service.cpp:454: Cannot start publishing usage, tenants: /dc-1, empty 2025-06-25T14:32:53.308583Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:147: Table script_execution_leases updater. Describe result: PathErrorUnknown 2025-06-25T14:32:53.308621Z node 1 :KQP_PROXY NOTICE: table_creator.cpp:167: Table script_execution_leases updater. Creating table 2025-06-25T14:32:53.308667Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:100: Table script_execution_leases updater. Full table path:/dc-1/.metadata/script_execution_leases 2025-06-25T14:32:53.308761Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:147: Table result_sets updater. Describe re ... it::Complete. EnableColumnStatistics=false 2025-06-25T14:33:07.328784Z node 8 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:33:07.460428Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[6:7519894956004829639:2159];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:33:07.460523Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:33:07.605664Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-25T14:33:07.632515Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519894975689655364:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:33:07.632595Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/test-shared/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:33:07.691665Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:33:07.691723Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:33:07.694054Z node 6 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 7 Cookie 7 2025-06-25T14:33:07.695442Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:33:07.840209Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72075186224038889 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:33:07.840318Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72075186224038889 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:33:07.841352Z node 7 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224038889 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:33:07.841580Z node 7 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224038889 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:33:07.841639Z node 7 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224038889 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:33:07.841691Z node 7 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224038889 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:33:07.841788Z node 7 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224038889 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:33:07.841851Z node 7 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224038889 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:33:07.841905Z node 7 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224038889 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:33:07.841979Z node 7 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224038889 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:33:07.842054Z node 7 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224038889 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:33:07.847712Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72075186224038889 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:33:08.008055Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:33:08.011614Z node 7 :STATISTICS WARN: tx_init.cpp:287: [72075186224038895] TTxInit::Complete. EnableColumnStatistics=false 2025-06-25T14:33:08.048838Z node 8 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:33:08.175382Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-25T14:33:08.394759Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:33:08.626173Z node 7 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:574: [WorkloadService] [TDatabaseFetcherActor] ActorId: [7:7519894979984623554:2522], Database: /Root/test-serverless, Start database fetching 2025-06-25T14:33:08.626373Z node 7 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:600: [WorkloadService] [TDatabaseFetcherActor] ActorId: [7:7519894979984623554:2522], Database: /Root/test-serverless, Database info successfully fetched, serverless: 1 2025-06-25T14:33:08.640530Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:33:11.837410Z node 8 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:440: [WorkloadService] [Service] Started workload service initialization 2025-06-25T14:33:11.837567Z node 8 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:100: [WorkloadService] [Service] Subscribed for config changes 2025-06-25T14:33:11.837687Z node 8 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:111: [WorkloadService] [Service] Resource pools was enanbled 2025-06-25T14:33:11.932071Z node 8 :KQP_WORKLOAD_SERVICE TRACE: kqp_workload_service.cpp:125: [WorkloadService] [Service] Updated node info, noode count: 3 2025-06-25T14:33:11.932544Z node 8 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:241: [WorkloadService] [TCleanupTablesActor] ActorId: [8:7519894992011808442:2306], Start check tables existence, number paths: 2 2025-06-25T14:33:11.964459Z node 8 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:182: [WorkloadService] [TCleanupTablesActor] ActorId: [8:7519894992011808442:2306], Describe table /Root/test-dedicated/.metadata/workload_manager/delayed_requests status PathErrorUnknown 2025-06-25T14:33:11.964552Z node 8 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:182: [WorkloadService] [TCleanupTablesActor] ActorId: [8:7519894992011808442:2306], Describe table /Root/test-dedicated/.metadata/workload_manager/running_requests status PathErrorUnknown 2025-06-25T14:33:11.964616Z node 8 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:289: [WorkloadService] [TCleanupTablesActor] ActorId: [8:7519894992011808442:2306], Successfully finished 2025-06-25T14:33:11.964714Z node 8 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:367: [WorkloadService] [Service] Cleanup completed, tables exists: 0 2025-06-25T14:33:12.031674Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[8:7519894974831938661:2180];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:33:12.031768Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/test-dedicated/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:33:12.494040Z node 7 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:440: [WorkloadService] [Service] Started workload service initialization 2025-06-25T14:33:12.494224Z node 7 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:100: [WorkloadService] [Service] Subscribed for config changes 2025-06-25T14:33:12.494247Z node 7 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:111: [WorkloadService] [Service] Resource pools was enanbled 2025-06-25T14:33:12.518869Z node 7 :KQP_WORKLOAD_SERVICE TRACE: kqp_workload_service.cpp:125: [WorkloadService] [Service] Updated node info, noode count: 3 2025-06-25T14:33:12.519012Z node 7 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:241: [WorkloadService] [TCleanupTablesActor] ActorId: [7:7519894997164492841:2332], Start check tables existence, number paths: 2 2025-06-25T14:33:12.522246Z node 7 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:182: [WorkloadService] [TCleanupTablesActor] ActorId: [7:7519894997164492841:2332], Describe table /Root/test-shared/.metadata/workload_manager/delayed_requests status PathErrorUnknown 2025-06-25T14:33:12.522349Z node 7 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:182: [WorkloadService] [TCleanupTablesActor] ActorId: [7:7519894997164492841:2332], Describe table /Root/test-shared/.metadata/workload_manager/running_requests status PathErrorUnknown 2025-06-25T14:33:12.522401Z node 7 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:289: [WorkloadService] [TCleanupTablesActor] ActorId: [7:7519894997164492841:2332], Successfully finished 2025-06-25T14:33:12.522502Z node 7 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:367: [WorkloadService] [Service] Cleanup completed, tables exists: 0 2025-06-25T14:33:12.635265Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[7:7519894975689655364:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:33:12.635333Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/test-shared/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:33:17.975766Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7382: Cannot get console configs 2025-06-25T14:33:17.975794Z node 6 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:33:18.639701Z node 6 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 8 2025-06-25T14:33:18.640183Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-25T14:33:18.640694Z node 6 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 7 2025-06-25T14:33:18.640928Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-25T14:33:18.681558Z node 6 :KQP_SESSION INFO: kqp_session_actor.cpp:2370: SessionId: ydb://session/3?node_id=6&id=MjAxOTdkMjktMTdiYWZmMGEtOWVlOGI3YTMtMjM5MzMxZTY=, ActorId: [6:7519894973184699540:2295], ActorState: ReadyState, Session closed due to explicit close event 2025-06-25T14:33:18.681616Z node 6 :KQP_SESSION INFO: kqp_session_actor.cpp:2528: SessionId: ydb://session/3?node_id=6&id=MjAxOTdkMjktMTdiYWZmMGEtOWVlOGI3YTMtMjM5MzMxZTY=, ActorId: [6:7519894973184699540:2295], ActorState: ReadyState, Cleanup start, isFinal: 1 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-06-25T14:33:18.681643Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2589: SessionId: ydb://session/3?node_id=6&id=MjAxOTdkMjktMTdiYWZmMGEtOWVlOGI3YTMtMjM5MzMxZTY=, ActorId: [6:7519894973184699540:2295], ActorState: ReadyState, EndCleanup, isFinal: 1 2025-06-25T14:33:18.681674Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2601: SessionId: ydb://session/3?node_id=6&id=MjAxOTdkMjktMTdiYWZmMGEtOWVlOGI3YTMtMjM5MzMxZTY=, ActorId: [6:7519894973184699540:2295], ActorState: unknown state, Cleanup temp tables: 0 2025-06-25T14:33:18.681763Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2692: SessionId: ydb://session/3?node_id=6&id=MjAxOTdkMjktMTdiYWZmMGEtOWVlOGI3YTMtMjM5MzMxZTY=, ActorId: [6:7519894973184699540:2295], ActorState: unknown state, Session actor destroyed ------- [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/actors/ut/unittest >> TDatabaseResolverTests::ResolveTwoDataStreamsFirstError [GOOD] Test command err: 2025-06-25T14:33:22.337363Z node 1 :FQ_DATABASE_RESOLVER ERROR: database_resolver.cpp:175: TraceId: traceId ResponseProcessor::Handle(HttpIncomingResponse): error=Error while trying to resolve managed DataStreams database with id etn021us5r9rhld1vgb1 via HTTP request to: endpoint 'ydbc.ydb.cloud.yandex.net:8789', url '/ydbc/cloud-prod/database?databaseId=etn021us5r9rhld1vgb1': Status: 404 Response body: {"message":"Database not found"} >> TestKinesisHttpProxy::TestListStreamConsumersWithToken [GOOD] >> TFlatTableExecutor_StickyPages::TestNonSticky_FlatIndex [GOOD] >> TFlatTableExecutor_StickyPages::TestNonSticky_BTreeIndex >> TChargeBTreeIndex::NoNodes [GOOD] >> TChargeBTreeIndex::NoNodes_Groups >> TFlatTableExecutor_StickyPages::TestNonSticky_BTreeIndex [GOOD] >> TFlatTableExecutor_StickyPages::TestSticky >> BuildStatsMixedIndex::Single_Groups_History_Slices [GOOD] >> BuildStatsMixedIndex::Mixed >> TFlatTableExecutor_IndexLoading::CalculateReadSize_FlatIndex [GOOD] >> DBase::VersionPureMem [GOOD] >> TFlatTableExecutor_IndexLoading::CalculateReadSize_BTreeIndex >> DBase::VersionPureParts >> TFlatTableExecutor_StickyPages::TestSticky [GOOD] >> TFlatTableExecutor_StickyPages::TestNonStickyGroup_FlatIndex >> TSharedPageCache::S3FIFO [GOOD] >> TSharedPageCache::ReplacementPolicySwitch >> TScreen::Random [GOOD] >> TScreen::Shrink [GOOD] >> TScreen::Cook [GOOD] >> TSharedPageCache::Limits >> TDatabaseResolverTests::Ydb_Serverless_Timeout >> BuildStatsMixedIndex::Mixed [GOOD] >> BuildStatsMixedIndex::Mixed_Groups >> TestYmqHttpProxy::TestUntagQueue [GOOD] >> TFlatTableExecutor_StickyPages::TestNonStickyGroup_FlatIndex [GOOD] >> TFlatTableExecutor_StickyPages::TestNonStickyGroup_BTreeIndex >> TDatabaseResolverTests::Ydb_Serverless_Timeout [GOOD] >> TSharedPageCache::ReplacementPolicySwitch [GOOD] >> TSharedPageCache::MiddleCache_FlatIndex >> TFlatTableExecutor_StickyPages::TestNonStickyGroup_BTreeIndex [GOOD] >> TFlatTableExecutor_StickyPages::TestStickyMain >> BuildStatsMixedIndex::Mixed_Groups [GOOD] >> BuildStatsMixedIndex::Mixed_Groups_History >> TFlatTableExecutor_StickyPages::TestStickyMain [GOOD] >> TFlatTableExecutor_StickyPages::TestStickyAlt_FlatIndex >> TopicAutoscaling::PartitionSplit_AutosplitByLoad [GOOD] >> TestKinesisHttpProxy::TestCounters >> TFlatTableExecutor_StickyPages::TestStickyAlt_FlatIndex [GOOD] >> TFlatTableExecutor_StickyPages::TestStickyAlt_BTreeIndex >> TopicAutoscaling::PartitionSplit_AutosplitByLoad_AfterAlter >> TFlatTableExecutor_IndexLoading::CalculateReadSize_BTreeIndex [GOOD] >> TFlatTableExecutor_IndexLoading::PrechargeAndSeek_FlatIndex >> TSharedPageCache::MiddleCache_FlatIndex [GOOD] >> TSharedPageCache::ZeroCache_BTreeIndex >> TFlatTableExecutor_StickyPages::TestStickyAlt_BTreeIndex [GOOD] >> TFlatTableExecutor_StickyPages::TestStickyAll >> BuildStatsMixedIndex::Mixed_Groups_History [GOOD] >> BuildStatsMixedIndex::Serial >> TFlatTableExecutor_StickyPages::TestStickyAll [GOOD] >> TFlatTableExecutor_StickyPages::TestAlterAddFamilySticky ------- [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/actors/ut/unittest >> TDatabaseResolverTests::Ydb_Serverless_Timeout [GOOD] Test command err: 2025-06-25T14:33:23.191891Z node 1 :FQ_DATABASE_RESOLVER ERROR: database_resolver.cpp:175: TraceId: traceId ResponseProcessor::Handle(HttpIncomingResponse): error=Error while trying to resolve managed Ydb database with id etn021us5r9rhld1vgbh via HTTP request to: endpoint 'ydbc.ydb.cloud.yandex.net:8789', url '/ydbc/cloud-prod/database?databaseId=etn021us5r9rhld1vgbh': Connection timeout >> TFlatTableExecutor_StickyPages::TestAlterAddFamilySticky [GOOD] >> TFlatTableExecutor_StickyPages::TestAlterAddFamilyPartiallySticky >> TEvLocalSyncDataTests::SqueezeBlocks1 [GOOD] >> TEvLocalSyncDataTests::SqueezeBlocks2 [GOOD] >> BuildStatsMixedIndex::Serial [GOOD] >> BuildStatsMixedIndex::Serial_Groups >> TSharedPageCache::Limits [GOOD] >> TSharedPageCache::Limits_Config >> TPart::PageFailEnv [GOOD] >> TFlatTableExecutor_StickyPages::TestAlterAddFamilyPartiallySticky [GOOD] >> TFlatTableExecutor_VersionedLargeBlobs::TestMultiVersionCompactionLargeBlobs >> TPart::ForwardEnv >> TFlatTableExecutor_VersionedLargeBlobs::TestMultiVersionCompactionLargeBlobs [GOOD] >> TFlatTableExecutor_VersionedRows::TestVersionedRows >> Bloom::Dipping [GOOD] >> Bloom::Basics [GOOD] >> Bloom::Stairs >> BuildStatsMixedIndex::Serial_Groups [GOOD] >> BuildStatsMixedIndex::Serial_Groups_History >> TChargeBTreeIndex::NoNodes_Groups [GOOD] >> TChargeBTreeIndex::NoNodes_History >> TestYmqHttpProxy::TestTagQueueMultipleQueriesInflight >> TSharedPageCache::ZeroCache_BTreeIndex [GOOD] >> TSharedPageCache::ZeroCache_FlatIndex |79.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/wrappers/ut/ydb-core-wrappers-ut |79.0%| [LD] {RESULT} $(B)/ydb/core/wrappers/ut/ydb-core-wrappers-ut |79.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/wrappers/ut/ydb-core-wrappers-ut >> DBase::VersionPureParts [GOOD] >> DBase::VersionCompactedMem >> THealthCheckTest::SpecificServerless [GOOD] >> THealthCheckTest::SpecificServerlessWithExclusiveNodes >> TPart::ForwardEnv [GOOD] >> TFlatTableExecutor_VersionedRows::TestVersionedRows [GOOD] >> TPart::WreckPartColumnGroups >> TFlatTableExecutor_VersionedRows::TestVersionedRowsSmallBlobs >> Bloom::Stairs [GOOD] >> BuildStatsBTreeIndex::Single |79.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/syncer/ut/unittest >> TEvLocalSyncDataTests::SqueezeBlocks2 [GOOD] >> BuildStatsMixedIndex::Serial_Groups_History [GOOD] >> BuildStatsMixedIndex::Single_LowResolution >> TCompactionMulti::MainPageCollectionEdge [GOOD] >> TCompactionMulti::MainPageCollectionEdgeMany >> TChargeBTreeIndex::NoNodes_History [GOOD] >> TChargeBTreeIndex::NoNodes_Groups_History >> TSyncNeighborsTests::SerDes2 [GOOD] >> BuildStatsBTreeIndex::Single [GOOD] >> BuildStatsBTreeIndex::Single_Slices >> TSharedPageCache::ZeroCache_FlatIndex [GOOD] >> TSharedPageCache_Actor::Request_Basics >> TSharedPageCache_Actor::Request_Basics [GOOD] >> TSharedPageCache_Actor::Request_Failed >> TSharedPageCache::Limits_Config [GOOD] >> TSharedPageCache::ClockPro >> BuildStatsBTreeIndex::Single_Slices [GOOD] >> BuildStatsBTreeIndex::Single_History >> TSharedPageCache_Actor::Request_Failed [GOOD] >> TSharedPageCache_Actor::Request_Queue >> BuildStatsMixedIndex::Single_LowResolution [GOOD] >> BuildStatsMixedIndex::Single_Slices_LowResolution >> TSharedPageCache_Actor::Request_Queue [GOOD] >> TSharedPageCache_Actor::Request_Queue_Failed >> TSharedPageCache_Actor::Request_Queue_Failed [GOOD] >> TSharedPageCache_Actor::Request_Queue_Fast >> BuildStatsBTreeIndex::Single_History [GOOD] >> BuildStatsBTreeIndex::Single_History_Slices >> TSharedPageCache_Actor::Request_Queue_Fast [GOOD] >> TSharedPageCache_Actor::Request_Sequential >> BuildStatsMixedIndex::Single_Slices_LowResolution [GOOD] >> BuildStatsMixedIndex::Single_Groups_LowResolution >> TDatabaseResolverTests::Greenplum_MasterNode >> TSharedPageCache_Actor::Request_Sequential [GOOD] >> TSharedPageCache_Actor::Request_Cached >> DBase::VersionCompactedMem [GOOD] >> DBase::VersionCompactedParts >> BuildStatsMixedIndex::Single_Groups_LowResolution [GOOD] >> BuildStatsMixedIndex::Single_Groups_Slices_LowResolution >> TDatabaseResolverTests::Greenplum_MasterNode [GOOD] >> TDatabaseResolverTests::Greenplum_PermissionDenied |79.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/syncer/ut/unittest >> TSyncNeighborsTests::SerDes2 [GOOD] >> BuildStatsBTreeIndex::Single_History_Slices [GOOD] >> BuildStatsBTreeIndex::Single_Groups >> TSharedPageCache_Actor::Request_Cached [GOOD] >> TSharedPageCache_Actor::Request_Different_Collections [GOOD] >> TSharedPageCache_Actor::Request_Different_Pages >> TDatabaseResolverTests::Greenplum_PermissionDenied [GOOD] >> TSharedPageCache_Actor::Request_Different_Pages [GOOD] >> TSharedPageCache_Actor::Request_Different_Pages_Reversed >> BuildStatsMixedIndex::Single_Groups_Slices_LowResolution [GOOD] >> BuildStatsMixedIndex::Single_Groups_History_LowResolution >> BuildStatsBTreeIndex::Single_Groups [GOOD] >> BuildStatsBTreeIndex::Single_Groups_Slices >> DataShardVolatile::UpsertBrokenLockArbiter+UseSink [GOOD] >> DataShardVolatile::UpsertBrokenLockArbiter-UseSink >> TSyncBrokerTests::ShouldEnqueue >> TSharedPageCache_Actor::Request_Different_Pages_Reversed [GOOD] >> TSharedPageCache_Actor::Request_Subset >> BuildStatsBTreeIndex::Single_Groups_Slices [GOOD] >> BuildStatsBTreeIndex::Single_Groups_History >> TSharedPageCache_Actor::Request_Subset [GOOD] >> TSharedPageCache_Actor::Request_Subset_Shuffled >> TCompactionMulti::MainPageCollectionEdgeMany [GOOD] >> TCompactionMulti::MainPageCollectionOverflow >> TSharedPageCache_Actor::Request_Subset_Shuffled [GOOD] >> TSharedPageCache_Actor::Request_Superset >> BuildStatsMixedIndex::Single_Groups_History_LowResolution [GOOD] >> BuildStatsMixedIndex::Single_Groups_History_Slices_LowResolution >> TCompactionMulti::MainPageCollectionOverflow [GOOD] |79.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/ut/opt/ydb-core-kqp-ut-opt >> TCompactionMulti::MainPageCollectionOverflowSmallRefs >> TSharedPageCache_Actor::Request_Superset [GOOD] >> TSyncBrokerTests::ShouldEnqueue [GOOD] >> TSharedPageCache_Actor::Request_Superset_Reversed >> TSyncBrokerTests::ShouldEnqueueWithSameVDiskId |79.0%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/opt/ydb-core-kqp-ut-opt |79.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/opt/ydb-core-kqp-ut-opt |79.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/ut/tx/ydb-core-kqp-ut-tx |79.0%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/tx/ydb-core-kqp-ut-tx |79.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/sys_view/query_stats/ut/ydb-core-sys_view-query_stats-ut |79.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/tx/ydb-core-kqp-ut-tx |79.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/sys_view/query_stats/ut/ydb-core-sys_view-query_stats-ut |79.0%| [LD] {RESULT} $(B)/ydb/core/sys_view/query_stats/ut/ydb-core-sys_view-query_stats-ut >> TCompactionMulti::MainPageCollectionOverflowSmallRefs [GOOD] >> TCompactionMulti::MainPageCollectionOverflowLargeRefs >> TestKinesisHttpProxy::ListShardsTimestamp [GOOD] >> TestKinesisHttpProxy::TestWrongStream2 [GOOD] >> TSharedPageCache_Actor::Request_Superset_Reversed [GOOD] >> TSharedPageCache_Actor::Request_Crossing >> BuildStatsBTreeIndex::Single_Groups_History [GOOD] >> BuildStatsBTreeIndex::Single_Groups_History_Slices >> TSyncBrokerTests::ShouldEnqueueWithSameVDiskId [GOOD] >> TCompactionMulti::MainPageCollectionOverflowLargeRefs [GOOD] >> TExecutorDb::RandomOps ------- [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/actors/ut/unittest >> TDatabaseResolverTests::Greenplum_PermissionDenied [GOOD] Test command err: 2025-06-25T14:33:25.660983Z node 2 :FQ_DATABASE_RESOLVER ERROR: database_resolver.cpp:175: TraceId: traceId ResponseProcessor::Handle(HttpIncomingResponse): error=Error while trying to resolve managed Greenplum database with id etn021us5r9rhld1vgbh via HTTP request to: endpoint 'mdb.api.cloud.yandex.net:443', url '/managed-greenplum/v1/clusters/etn021us5r9rhld1vgbh/master-hosts': you have no permission to resolve database id into database endpoint. >> TSharedPageCache_Actor::Request_Crossing [GOOD] >> TSharedPageCache_Actor::Request_Crossing_Reversed >> DBase::VersionCompactedParts [GOOD] >> Memtable::Basics [GOOD] >> Memtable::BasicsReverse [GOOD] >> Memtable::Markers [GOOD] >> Memtable::Overlap [GOOD] >> Memtable::Wreck >> TSchemeShardTest::ManyDirs [GOOD] >> TSchemeShardTest::ListNotCreatedDirCase >> BuildStatsMixedIndex::Single_Groups_History_Slices_LowResolution [GOOD] >> Charge::Lookups [GOOD] >> Charge::ByKeysBasics >> TSharedPageCache_Actor::Request_Crossing_Reversed [GOOD] >> TSharedPageCache_Actor::Request_Crossing_Shuffled >> Memtable::Wreck [GOOD] >> Memtable::Erased >> TSharedPageCache::ClockPro [GOOD] >> TSharedPageCache::BigCache_BTreeIndex >> BuildStatsBTreeIndex::Single_Groups_History_Slices [GOOD] >> BuildStatsBTreeIndex::Mixed >> Charge::ByKeysBasics [GOOD] >> Charge::ByKeysGroups [GOOD] >> Charge::ByKeysGroupsLimits [GOOD] >> Charge::ByKeysLimits [GOOD] >> Charge::ByKeysReverse [GOOD] >> Charge::ByKeysHistory >> TSharedPageCache_Actor::Request_Crossing_Shuffled [GOOD] >> TSharedPageCache_Actor::Attach_Basics >> Charge::ByKeysHistory [GOOD] >> Charge::ByKeysIndex [GOOD] >> Charge::ByRows [GOOD] >> Charge::ByRowsReverse [GOOD] >> Charge::ByRowsLimits [GOOD] >> Charge::ByRowsLimitsReverse [GOOD] >> DBase::Basics [GOOD] >> DBase::Select [GOOD] >> DBase::Defaults [GOOD] >> DBase::Subsets [GOOD] >> DBase::Garbage [GOOD] >> DBase::Affects [GOOD] >> DBase::Annex [GOOD] >> DBase::AnnexRollbackChanges [GOOD] >> DBase::Outer [GOOD] >> DBase::VersionBasics [GOOD] >> DBase::KIKIMR_15506_MissingSnapshotKeys [GOOD] >> DBase::EraseCacheWithUncommittedChanges [GOOD] >> DBase::EraseCacheWithUncommittedChangesCompacted [GOOD] >> DBase::AlterAndUpsertChangesVisibility [GOOD] >> DBase::UncommittedChangesVisibility [GOOD] >> DBase::UncommittedChangesCommitWithUpdates [GOOD] >> DBase::ReplayNewTable [GOOD] >> DBase::SnapshotNewTable [GOOD] >> DBase::DropModifiedTable [GOOD] >> DBase::KIKIMR_15598_Many_MemTables >> BuildStatsBTreeIndex::Mixed [GOOD] >> BuildStatsBTreeIndex::Mixed_Groups >> TSharedPageCache_Actor::Attach_Basics [GOOD] >> TSharedPageCache_Actor::Attach_Request >> TSyncNeighborsTests::SerDes3 [GOOD] |79.0%| [TA] $(B)/ydb/core/fq/libs/actors/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> Memtable::Erased [GOOD] >> NFwd_TBlobs::MemTableTest [GOOD] >> NFwd_TBlobs::Lower [GOOD] >> NFwd_TBlobs::Sieve [GOOD] >> NFwd_TBlobs::SieveFiltered [GOOD] >> NFwd_TBlobs::Basics [GOOD] >> NFwd_TBlobs::Simple [GOOD] >> NFwd_TBlobs::Shuffle [GOOD] >> NFwd_TBlobs::Grow [GOOD] >> NFwd_TBlobs::Trace [GOOD] >> NFwd_TBlobs::Filtered [GOOD] >> NFwd_TBTreeIndexCache::Basics [GOOD] >> NFwd_TBTreeIndexCache::IndexPagesLocator [GOOD] >> NFwd_TBTreeIndexCache::GetTwice [GOOD] >> NFwd_TBTreeIndexCache::ForwardTwice [GOOD] >> NFwd_TBTreeIndexCache::Forward_OnlyUsed [GOOD] >> NFwd_TBTreeIndexCache::Skip_Done [GOOD] >> NFwd_TBTreeIndexCache::Skip_Done_None [GOOD] >> NFwd_TBTreeIndexCache::Skip_Keep [GOOD] >> NFwd_TBTreeIndexCache::Skip_Wait [GOOD] >> NFwd_TBTreeIndexCache::Trace_BTree [GOOD] >> NFwd_TBTreeIndexCache::Trace_Data [GOOD] >> NFwd_TBTreeIndexCache::End [GOOD] >> NFwd_TBTreeIndexCache::Slices [GOOD] >> NFwd_TBTreeIndexCache::ManyApplies [GOOD] >> NFwd_TFlatIndexCache::Basics [GOOD] >> NFwd_TFlatIndexCache::IndexPagesLocator [GOOD] >> NFwd_TFlatIndexCache::GetTwice [GOOD] >> NFwd_TFlatIndexCache::ForwardTwice [GOOD] >> NFwd_TFlatIndexCache::Skip_Done [GOOD] >> NFwd_TFlatIndexCache::Skip_Done_None [GOOD] >> NFwd_TFlatIndexCache::Skip_Keep [GOOD] >> NFwd_TFlatIndexCache::End [GOOD] >> TQuorumTrackerTests::ErasureNoneNeverHasQuorum_4_1 [GOOD] >> TQuorumTrackerTests::ErasureMirror3IncludingMyFailDomain_5_2 [GOOD] >> TSharedPageCache_Actor::Attach_Request [GOOD] >> TSharedPageCache_Actor::Detach_Basics >> TestKinesisHttpProxy::ListShardsToken ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/syncer/ut/unittest >> TSyncBrokerTests::ShouldEnqueueWithSameVDiskId [GOOD] Test command err: 2025-06-25T14:33:26.300542Z node 1 :BS_SYNCER DEBUG: blobstorage_syncer_broker.cpp:64: TEvQuerySyncToken, VDisk actor id: [0:1:1], actor id: [1:5:2052], token sent, active: 1, waiting: 0 2025-06-25T14:33:26.300649Z node 1 :BS_SYNCER DEBUG: blobstorage_syncer_broker.cpp:90: TEvQuerySyncToken, VDisk actor id: [0:1:2], actor id: [1:6:2053], enqueued, active: 1, waiting: 1 2025-06-25T14:33:26.548703Z node 2 :BS_SYNCER DEBUG: blobstorage_syncer_broker.cpp:64: TEvQuerySyncToken, VDisk actor id: [0:1:1], actor id: [2:5:2052], token sent, active: 1, waiting: 0 2025-06-25T14:33:26.548805Z node 2 :BS_SYNCER DEBUG: blobstorage_syncer_broker.cpp:90: TEvQuerySyncToken, VDisk actor id: [0:1:2], actor id: [2:6:2053], enqueued, active: 1, waiting: 1 2025-06-25T14:33:26.548863Z node 2 :BS_SYNCER DEBUG: blobstorage_syncer_broker.cpp:79: TEvQuerySyncToken, VDisk actor id: [0:1:2], actor id: [2:7:2054], enqueued, active: 1, waiting: 1 >> TPart::WreckPartColumnGroups [GOOD] >> TPart::PageFailEnvColumnGroups >> BuildStatsBTreeIndex::Mixed_Groups [GOOD] >> BuildStatsBTreeIndex::Mixed_Groups_History >> TSharedPageCache::BigCache_BTreeIndex [GOOD] >> TSharedPageCache::BigCache_FlatIndex >> TestKinesisHttpProxy::TestWrongRequest >> TSharedPageCache_Actor::Detach_Basics [GOOD] >> TSharedPageCache_Actor::Detach_Cached >> DataShardVolatile::DistributedWriteLostPlanThenDrop [GOOD] >> DataShardVolatile::DistributedWriteLostPlanThenSplit >> TSharedPageCache_Actor::Detach_Cached [GOOD] >> TSharedPageCache_Actor::Detach_Expired >> TChargeBTreeIndex::NoNodes_Groups_History [GOOD] >> TChargeBTreeIndex::OneNode |79.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/syncer/ut/unittest >> TSyncNeighborsTests::SerDes3 [GOOD] |79.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/syncer/ut/unittest >> TQuorumTrackerTests::ErasureMirror3IncludingMyFailDomain_5_2 [GOOD] >> BuildStatsBTreeIndex::Mixed_Groups_History [GOOD] >> BuildStatsFlatIndex::Single >> THealthCheckTest::OnlyDiskIssueOnInitialPDisks [GOOD] >> THealthCheckTest::ProtobufBelowLimitFor10VdisksIssues >> TSharedPageCache_Actor::Detach_Expired [GOOD] >> TSharedPageCache_Actor::Detach_InFly >> TSharedPageCache::BigCache_FlatIndex [GOOD] >> TSharedPageCache::MiddleCache_BTreeIndex >> BuildStatsFlatIndex::Single [GOOD] >> BuildStatsFlatIndex::Single_Slices >> THealthCheckTest::ShardsLimit905 [GOOD] >> THealthCheckTest::ShardsLimit800 >> TSharedPageCache_Actor::Detach_InFly [GOOD] >> TSharedPageCache_Actor::Detach_Queued |79.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/ut/idx_test/ydb-core-kqp-ut-idx_test |79.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/idx_test/ydb-core-kqp-ut-idx_test |79.0%| [TA] {RESULT} $(B)/ydb/core/fq/libs/actors/ut/test-results/unittest/{meta.json ... results_accumulator.log} |79.0%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/idx_test/ydb-core-kqp-ut-idx_test >> TChargeBTreeIndex::OneNode [GOOD] >> TChargeBTreeIndex::OneNode_Groups >> BuildStatsFlatIndex::Single_Slices [GOOD] >> BuildStatsFlatIndex::Single_History >> TSharedPageCache_Actor::Detach_Queued [GOOD] >> TSharedPageCache_Actor::Unregister_Basics >> TSharedPageCache_Actor::Unregister_Basics [GOOD] >> TSharedPageCache_Actor::Unregister_Cached >> TSharedPageCache::MiddleCache_BTreeIndex [GOOD] >> BuildStatsFlatIndex::Single_History [GOOD] >> BuildStatsFlatIndex::Single_History_Slices >> TSchemeShardTest::ListNotCreatedDirCase [GOOD] >> TSchemeShardTest::ListNotCreatedIndexCase >> TSharedPageCache_Actor::Unregister_Cached [GOOD] >> TSharedPageCache_Actor::Unregister_Expired >> TSharedPageCache_Actor::Unregister_Expired [GOOD] >> TSharedPageCache_Actor::Unregister_InFly >> TSharedPageCache_Actor::Unregister_InFly [GOOD] >> TSharedPageCache_Actor::Unregister_Queued >> TSyncBrokerTests::ShouldReturnToken >> BuildStatsFlatIndex::Single_History_Slices [GOOD] >> BuildStatsFlatIndex::Single_Groups ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tablet_flat/ut/unittest >> NFwd_TFlatIndexCache::End [GOOD] Test command err: 00000.000 II| FAKE_ENV: Born at 2025-06-25T14:33:20.054092Z 00000.007 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.008 II| FAKE_ENV: Starting storage for BS group 0 00000.008 II| FAKE_ENV: Starting storage for BS group 1 00000.008 II| FAKE_ENV: Starting storage for BS group 2 00000.008 II| FAKE_ENV: Starting storage for BS group 3 00000.013 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 3 actors 00000.014 NN| TABLET_SAUSAGECACHE: Poison cache serviced 0 reqs hit {0 0b} miss {0 0b} 00000.014 II| FAKE_ENV: Shut order, stopping 4 BS groups 00000.014 II| FAKE_ENV: DS.0 gone, left {42b, 1}, put {146b, 4} 00000.014 II| FAKE_ENV: DS.1 gone, left {105b, 3}, put {105b, 3} 00000.014 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {0b, 0} 00000.014 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {0b, 0} 00000.014 II| FAKE_ENV: All BS storage groups are stopped 00000.014 II| FAKE_ENV: Model stopped, hosted 3 actors, spent 0.000s 00000.014 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 0 Error 0 Left 15}, stopped 00000.000 II| FAKE_ENV: Born at 2025-06-25T14:33:20.074553Z 00000.007 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.008 II| FAKE_ENV: Starting storage for BS group 0 00000.008 II| FAKE_ENV: Starting storage for BS group 1 00000.008 II| FAKE_ENV: Starting storage for BS group 2 00000.008 II| FAKE_ENV: Starting storage for BS group 3 00000.016 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 3 actors 00000.017 NN| TABLET_SAUSAGECACHE: Poison cache serviced 0 reqs hit {0 0b} miss {0 0b} 00000.017 II| FAKE_ENV: Shut order, stopping 4 BS groups 00000.017 II| FAKE_ENV: DS.0 gone, left {42b, 1}, put {292b, 8} 00000.017 II| FAKE_ENV: DS.1 gone, left {210b, 6}, put {210b, 6} 00000.017 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {0b, 0} 00000.017 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {0b, 0} 00000.017 II| FAKE_ENV: All BS storage groups are stopped 00000.017 II| FAKE_ENV: Model stopped, hosted 4 actors, spent 0.000s 00000.017 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 0 Error 0 Left 15}, stopped 00000.000 II| FAKE_ENV: Born at 2025-06-25T14:33:20.096639Z 00000.007 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.007 II| FAKE_ENV: Starting storage for BS group 0 00000.008 II| FAKE_ENV: Starting storage for BS group 1 00000.008 II| FAKE_ENV: Starting storage for BS group 2 00000.008 II| FAKE_ENV: Starting storage for BS group 3 00000.041 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 3 actors 00000.042 NN| TABLET_SAUSAGECACHE: Poison cache serviced 1 reqs hit {1 76b} miss {0 0b} 00000.042 II| FAKE_ENV: Shut order, stopping 4 BS groups 00000.042 II| FAKE_ENV: DS.0 gone, left {42b, 1}, put {1181b, 13} 00000.042 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {116b, 2} 00000.042 II| FAKE_ENV: DS.1 gone, left {909b, 3}, put {1913b, 12} 00000.042 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {132b, 2} 00000.042 II| FAKE_ENV: All BS storage groups are stopped 00000.042 II| FAKE_ENV: Model stopped, hosted 3 actors, spent 0.000s 00000.042 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 0 Error 0 Left 15}, stopped 00000.000 II| FAKE_ENV: Born at 2025-06-25T14:33:20.144226Z 00000.008 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.009 II| FAKE_ENV: Starting storage for BS group 0 00000.009 II| FAKE_ENV: Starting storage for BS group 1 00000.009 II| FAKE_ENV: Starting storage for BS group 2 00000.009 II| FAKE_ENV: Starting storage for BS group 3 00000.022 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 3 actors 00000.022 NN| TABLET_SAUSAGECACHE: Poison cache serviced 1 reqs hit {1 102443b} miss {0 0b} 00000.022 II| FAKE_ENV: Shut order, stopping 4 BS groups 00000.023 II| FAKE_ENV: DS.0 gone, left {42b, 1}, put {751b, 11} 00000.023 II| FAKE_ENV: DS.1 gone, left {541b, 3}, put {103970b, 10} 00000.023 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {0b, 0} 00000.023 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {0b, 0} 00000.023 II| FAKE_ENV: All BS storage groups are stopped 00000.023 II| FAKE_ENV: Model stopped, hosted 3 actors, spent 0.000s 00000.023 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 0 Error 0 Left 15}, stopped 00000.000 II| FAKE_ENV: Born at 2025-06-25T14:33:20.184429Z 00000.006 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.006 II| FAKE_ENV: Starting storage for BS group 0 00000.007 II| FAKE_ENV: Starting storage for BS group 1 00000.007 II| FAKE_ENV: Starting storage for BS group 2 00000.007 II| FAKE_ENV: Starting storage for BS group 3 ... blocking NKikimr::TEvBlobStorage::TEvCollectGarbage from FLAT_EXECUTOR to FAKE_ENV_A cookie 0 00000.073 II| TABLET_SAUSAGECACHE: Wakeup 1 ... unblocking NKikimr::TEvBlobStorage::TEvCollectGarbage from FLAT_EXECUTOR to FAKE_ENV_A 00000.075 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 3 actors 00000.075 NN| TABLET_SAUSAGECACHE: Poison cache serviced 11 reqs hit {18 513007b} miss {0 0b} 00000.076 II| FAKE_ENV: Shut order, stopping 4 BS groups 00000.076 II| FAKE_ENV: DS.0 gone, left {42b, 1}, put {2095b, 23} 00000.076 II| FAKE_ENV: DS.1 gone, left {774b, 4}, put {210604b, 21} 00000.076 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {205178b, 4} 00000.076 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {102690b, 4} 00000.076 II| FAKE_ENV: All BS storage groups are stopped 00000.076 II| FAKE_ENV: Model stopped, hosted 4 actors, spent 15.00s 00000.076 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 0 Error 0 Left 16}, stopped 00000.000 II| FAKE_ENV: Born at 2025-06-25T14:33:20.266964Z 00000.007 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.008 II| FAKE_ENV: Starting storage for BS group 0 00000.008 II| FAKE_ENV: Starting storage for BS group 1 00000.008 II| FAKE_ENV: Starting storage for BS group 2 00000.008 II| FAKE_ENV: Starting storage for BS group 3 00000.085 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 3 actors 00000.086 NN| TABLET_SAUSAGECACHE: Poison cache serviced 3 reqs hit {3 307329b} miss {0 0b} 00000.086 II| FAKE_ENV: Shut order, stopping 4 BS groups 00000.086 II| FAKE_ENV: DS.0 gone, left {42b, 1}, put {1830b, 23} 00000.086 II| FAKE_ENV: DS.1 gone, left {1247b, 3}, put {311467b, 22} 00000.087 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {0b, 0} 00000.087 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {0b, 0} 00000.087 II| FAKE_ENV: All BS storage groups are stopped 00000.087 II| FAKE_ENV: Model stopped, hosted 3 actors, spent 0.000s 00000.087 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 0 Error 0 Left 15}, stopped 00000.000 II| FAKE_ENV: Born at 2025-06-25T14:33:20.359264Z 00000.007 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.008 II| FAKE_ENV: Starting storage for BS group 0 00000.008 II| FAKE_ENV: Starting storage for BS group 1 00000.008 II| FAKE_ENV: Starting storage for BS group 2 00000.008 II| FAKE_ENV: Starting storage for BS group 3 00000.035 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 5 actors 00000.036 NN| TABLET_SAUSAGECACHE: Poison cache serviced 4 reqs hit {8 307836b} miss {0 0b} 00000.036 II| FAKE_ENV: Shut order, stopping 4 BS groups 00000.037 II| FAKE_ENV: DS.0 gone, left {57b, 2}, put {1436b, 31} 00000.037 II| FAKE_ENV: DS.1 gone, left {629b, 3}, put {310476b, 16} 00000.037 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {0b, 0} 00000.037 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {0b, 0} 00000.037 II| FAKE_ENV: All BS storage groups are stopped 00000.037 II| FAKE_ENV: Model stopped, hosted 5 actors, spent 0.000s 00000.037 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 0 Error 0 Left 15}, stopped 00000.000 II| FAKE_ENV: Born at 2025-06-25T14:33:20.401772Z 00000.008 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.008 II| FAKE_ENV: Starting storage for BS group 0 00000.009 II| FAKE_ENV: Starting storage for BS group 1 00000.009 II| FAKE_ENV: Starting storage for BS group 2 00000.009 II| FAKE_ENV: Starting storage for BS group 3 00000.051 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 3 actors 00000.051 NN| TABLET_SAUSAGECACHE: Poison cache serviced 2 reqs hit {2 194646b} miss {0 0b} 00000.052 II| FAKE_ENV: Shut order, stopping 4 BS groups 00000.052 II| FAKE_ENV: DS.0 gone, left {42b, 1}, put {1571b, 23} 00000.052 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {0b, 0} 00000.052 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {0b, 0} 00000.052 II| FAKE_ENV: DS.1 gone, left {529b, 3}, put {197610b, 21} 00000.052 II| FAKE_ENV: All BS storage groups are stopped 00000.052 II| FAKE_ENV: Model stopped, hosted 3 actors, spent 0.000s 00000.052 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 0 Error 0 Left 15}, stopped 00000.000 II| FAKE_ENV: Born at 2025-06-25T14:33:20.459239Z 00000.007 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.008 II| FAKE_ENV: Starting storage for BS group 0 00000.008 II| FAKE_ENV: Starting storage for BS group 1 00000.008 II| FAKE_ENV: Starting storage for BS group 2 00000.008 II| FAKE_ENV: Starting storage for BS group 3 00000.014 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 3 actors 00000.014 NN| TABLET_SAUSAGECACHE: Poison cache serviced 0 reqs hit {0 0b} miss {0 0b} 00000.014 II| FAKE_ENV: Shut order, stopping 4 BS groups 00000.014 II| FAKE_ENV: DS.0 gone, left {42b, 1}, put {326b, 7} 00000.015 II| FAKE_ENV: DS.1 gone, left {418b, 4}, put {453b, 5} 00000.015 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {0b, 0} 00000.015 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {0b, 0} 00000.015 II| FAKE_ENV: All BS storage groups are stopped 00000.015 II| FAKE_ENV: Model stopped, hosted 3 actors, spent 0.000s 00000.015 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 0 Error 0 Left 15}, stopped 00000.000 II| FAKE_ENV: Born at 2025-06-25T14:33:20.480003Z 00000.007 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.008 II| FAKE_ENV: Starting storage for BS group 0 00000.008 II| FAKE_ENV: Starting storage for BS group 1 00000.008 II| FAKE_ENV: Starting storage for BS group 2 00000.008 II| FAKE_ENV: Starting storage for BS group 3 ... blocking NKikimr::TEvBlobStorage::TEvCollectGarbage from FLAT_EXECUTOR to FAKE_ENV_A cookie 0 00000.100 II| TABLET_SAUSAGECACHE: Wakeup 1 ... unblocking NKikimr::TEvBlobStorage::TEvCollectGarbage from FLAT_EXECUTOR to FAKE_ENV_A 00000.101 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 3 actors 00000.102 NN| TABLET_SAUSAGECACHE: Poison cache serviced 6 reqs hit {8 410030b} miss {0 0b} 00000.102 II| FAKE_ENV: Shut order, stopping 4 BS groups 00000.102 II| FAKE_ENV: DS.0 gone, left {42b, 1}, put {1494b, 23} 00000.102 II| FAKE_ENV: DS.1 gone, left {504b, 4}, put {310786b, 20} 00000.102 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {0b, 0} 00000.102 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {0b, 0} 00000.102 II| FAKE_ENV: All BS storage groups are stopped 00000.102 II| FAKE_ENV: Model stopped, hosted 5 actors, spent 15.00s 00000.102 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 0 Error 0 Left 16}, stopped 00000.000 II| FAKE_ENV: Born at 2025-06-25T14:33:20.587845Z 00000.007 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.008 II| FAKE_ENV: Starting storage for BS group 0 00000.008 II| FAKE_ENV: Starting storage for BS group 1 00000.008 II| FAKE_ENV: Starting storage for BS group 2 00000.008 II| FAKE_ENV: Starting storage for ... 3} Label{34 rev 1, 50b}, [6, +2)row | ERowOp 1: {6} {Set 1 Uint32 : 600} | ERowOp 1: {7} {Set 1 Uint32 : 700} + Rows{4} Label{44 rev 1, 50b}, [8, +2)row | ERowOp 1: {8} {Set 1 Uint32 : 800} | ERowOp 1: {9} {Set 1 Uint32 : 900} + Rows{5} Label{54 rev 1, 50b}, [10, +2)row | ERowOp 1: {10} {Set 1 Uint32 : 1000} | ERowOp 1: {11} {Set 1 Uint32 : 1100} + Rows{6} Label{64 rev 1, 50b}, [12, +2)row | ERowOp 1: {12} {Set 1 Uint32 : 1200} | ERowOp 1: {13} {Set 1 Uint32 : 1300} + Rows{7} Label{74 rev 1, 50b}, [14, +2)row | ERowOp 1: {14} {Set 1 Uint32 : 1400} | ERowOp 1: {15} {Set 1 Uint32 : 1500} + Rows{8} Label{84 rev 1, 50b}, [16, +2)row | ERowOp 1: {16} {Set 1 Uint32 : 1600} | ERowOp 1: {17} {Set 1 Uint32 : 1700} + Rows{9} Label{94 rev 1, 50b}, [18, +2)row | ERowOp 1: {18} {Set 1 Uint32 : 1800} | ERowOp 1: {19} {Set 1 Uint32 : 1900} + Rows{10} Label{104 rev 1, 50b}, [20, +2)row | ERowOp 1: {20} {Set 1 Uint32 : 2000} | ERowOp 1: {21} {Set 1 Uint32 : 2100} + Rows{11} Label{114 rev 1, 50b}, [22, +2)row | ERowOp 1: {22} {Set 1 Uint32 : 2200} | ERowOp 1: {23} {Set 1 Uint32 : 2300} + Rows{12} Label{124 rev 1, 50b}, [24, +2)row | ERowOp 1: {24} {Set 1 Uint32 : 2400} | ERowOp 1: {25} {Set 1 Uint32 : 2500} + Rows{13} Label{134 rev 1, 50b}, [26, +2)row | ERowOp 1: {26} {Set 1 Uint32 : 2600} | ERowOp 1: {27} {Set 1 Uint32 : 2700} + Rows{14} Label{144 rev 1, 50b}, [28, +2)row | ERowOp 1: {28} {Set 1 Uint32 : 2800} | ERowOp 1: {29} {Set 1 Uint32 : 2900} + Rows{15} Label{154 rev 1, 50b}, [30, +2)row | ERowOp 1: {30} {Set 1 Uint32 : 3000} | ERowOp 1: {31} {Set 1 Uint32 : 3100} + Rows{16} Label{164 rev 1, 50b}, [32, +2)row | ERowOp 1: {32} {Set 1 Uint32 : 3200} | ERowOp 1: {33} {Set 1 Uint32 : 3300} + Rows{17} Label{174 rev 1, 50b}, [34, +2)row | ERowOp 1: {34} {Set 1 Uint32 : 3400} | ERowOp 1: {35} {Set 1 Uint32 : 3500} + Rows{18} Label{184 rev 1, 50b}, [36, +2)row | ERowOp 1: {36} {Set 1 Uint32 : 3600} | ERowOp 1: {37} {Set 1 Uint32 : 3700} + Rows{19} Label{194 rev 1, 50b}, [38, +2)row | ERowOp 1: {38} {Set 1 Uint32 : 3800} | ERowOp 1: {39} {Set 1 Uint32 : 3900} Part{[1:2:3:0:0:0:0] eph 0, 1000b 40r} data 1479b + FlatIndex{20} Label{3 rev 3, 453b} 21 rec | Page Row Bytes (Uint32) | 0 0 50b {0} | 1 2 50b {2} | 2 4 50b {4} | 3 6 50b {6} | 4 8 50b {8} | 5 10 50b {10} | 6 12 50b {12} | 7 14 50b {14} | 8 16 50b {16} | 9 18 50b {18} | 10 20 50b {20} | 11 22 50b {22} | 12 24 50b {24} | 13 26 50b {26} | 14 28 50b {28} | 15 30 50b {30} | 16 32 50b {32} | 17 34 50b {34} | 18 36 50b {36} | 19 38 50b {38} | 19 39 50b {39} + Rows{0} Label{04 rev 1, 50b}, [0, +2)row | ERowOp 1: {0} {Set 1 Uint32 : 0} | ERowOp 1: {1} {Set 1 Uint32 : 100} + Rows{1} Label{14 rev 1, 50b}, [2, +2)row | ERowOp 1: {2} {Set 1 Uint32 : 200} | ERowOp 1: {3} {Set 1 Uint32 : 300} + Rows{2} Label{24 rev 1, 50b}, [4, +2)row | ERowOp 1: {4} {Set 1 Uint32 : 400} | ERowOp 1: {5} {Set 1 Uint32 : 500} + Rows{3} Label{34 rev 1, 50b}, [6, +2)row | ERowOp 1: {6} {Set 1 Uint32 : 600} | ERowOp 1: {7} {Set 1 Uint32 : 700} + Rows{4} Label{44 rev 1, 50b}, [8, +2)row | ERowOp 1: {8} {Set 1 Uint32 : 800} | ERowOp 1: {9} {Set 1 Uint32 : 900} + Rows{5} Label{54 rev 1, 50b}, [10, +2)row | ERowOp 1: {10} {Set 1 Uint32 : 1000} | ERowOp 1: {11} {Set 1 Uint32 : 1100} + Rows{6} Label{64 rev 1, 50b}, [12, +2)row | ERowOp 1: {12} {Set 1 Uint32 : 1200} | ERowOp 1: {13} {Set 1 Uint32 : 1300} + Rows{7} Label{74 rev 1, 50b}, [14, +2)row | ERowOp 1: {14} {Set 1 Uint32 : 1400} | ERowOp 1: {15} {Set 1 Uint32 : 1500} + Rows{8} Label{84 rev 1, 50b}, [16, +2)row | ERowOp 1: {16} {Set 1 Uint32 : 1600} | ERowOp 1: {17} {Set 1 Uint32 : 1700} + Rows{9} Label{94 rev 1, 50b}, [18, +2)row | ERowOp 1: {18} {Set 1 Uint32 : 1800} | ERowOp 1: {19} {Set 1 Uint32 : 1900} + Rows{10} Label{104 rev 1, 50b}, [20, +2)row | ERowOp 1: {20} {Set 1 Uint32 : 2000} | ERowOp 1: {21} {Set 1 Uint32 : 2100} + Rows{11} Label{114 rev 1, 50b}, [22, +2)row | ERowOp 1: {22} {Set 1 Uint32 : 2200} | ERowOp 1: {23} {Set 1 Uint32 : 2300} + Rows{12} Label{124 rev 1, 50b}, [24, +2)row | ERowOp 1: {24} {Set 1 Uint32 : 2400} | ERowOp 1: {25} {Set 1 Uint32 : 2500} + Rows{13} Label{134 rev 1, 50b}, [26, +2)row | ERowOp 1: {26} {Set 1 Uint32 : 2600} | ERowOp 1: {27} {Set 1 Uint32 : 2700} + Rows{14} Label{144 rev 1, 50b}, [28, +2)row | ERowOp 1: {28} {Set 1 Uint32 : 2800} | ERowOp 1: {29} {Set 1 Uint32 : 2900} + Rows{15} Label{154 rev 1, 50b}, [30, +2)row | ERowOp 1: {30} {Set 1 Uint32 : 3000} | ERowOp 1: {31} {Set 1 Uint32 : 3100} + Rows{16} Label{164 rev 1, 50b}, [32, +2)row | ERowOp 1: {32} {Set 1 Uint32 : 3200} | ERowOp 1: {33} {Set 1 Uint32 : 3300} + Rows{17} Label{174 rev 1, 50b}, [34, +2)row | ERowOp 1: {34} {Set 1 Uint32 : 3400} | ERowOp 1: {35} {Set 1 Uint32 : 3500} + Rows{18} Label{184 rev 1, 50b}, [36, +2)row | ERowOp 1: {36} {Set 1 Uint32 : 3600} | ERowOp 1: {37} {Set 1 Uint32 : 3700} + Rows{19} Label{194 rev 1, 50b}, [38, +2)row | ERowOp 1: {38} {Set 1 Uint32 : 3800} | ERowOp 1: {39} {Set 1 Uint32 : 3900} Part{[1:2:3:0:0:0:0] eph 0, 1000b 40r} data 1479b + FlatIndex{20} Label{3 rev 3, 453b} 21 rec | Page Row Bytes (Uint32) | 0 0 50b {0} | 1 2 50b {2} | 2 4 50b {4} | 3 6 50b {6} | 4 8 50b {8} | 5 10 50b {10} | 6 12 50b {12} | 7 14 50b {14} | 8 16 50b {16} | 9 18 50b {18} | 10 20 50b {20} | 11 22 50b {22} | 12 24 50b {24} | 13 26 50b {26} | 14 28 50b {28} | 15 30 50b {30} | 16 32 50b {32} | 17 34 50b {34} | 18 36 50b {36} | 19 38 50b {38} | 19 39 50b {39} + Rows{0} Label{04 rev 1, 50b}, [0, +2)row | ERowOp 1: {0} {Set 1 Uint32 : 0} | ERowOp 1: {1} {Set 1 Uint32 : 100} + Rows{1} Label{14 rev 1, 50b}, [2, +2)row | ERowOp 1: {2} {Set 1 Uint32 : 200} | ERowOp 1: {3} {Set 1 Uint32 : 300} + Rows{2} Label{24 rev 1, 50b}, [4, +2)row | ERowOp 1: {4} {Set 1 Uint32 : 400} | ERowOp 1: {5} {Set 1 Uint32 : 500} + Rows{3} Label{34 rev 1, 50b}, [6, +2)row | ERowOp 1: {6} {Set 1 Uint32 : 600} | ERowOp 1: {7} {Set 1 Uint32 : 700} + Rows{4} Label{44 rev 1, 50b}, [8, +2)row | ERowOp 1: {8} {Set 1 Uint32 : 800} | ERowOp 1: {9} {Set 1 Uint32 : 900} + Rows{5} Label{54 rev 1, 50b}, [10, +2)row | ERowOp 1: {10} {Set 1 Uint32 : 1000} | ERowOp 1: {11} {Set 1 Uint32 : 1100} + Rows{6} Label{64 rev 1, 50b}, [12, +2)row | ERowOp 1: {12} {Set 1 Uint32 : 1200} | ERowOp 1: {13} {Set 1 Uint32 : 1300} + Rows{7} Label{74 rev 1, 50b}, [14, +2)row | ERowOp 1: {14} {Set 1 Uint32 : 1400} | ERowOp 1: {15} {Set 1 Uint32 : 1500} + Rows{8} Label{84 rev 1, 50b}, [16, +2)row | ERowOp 1: {16} {Set 1 Uint32 : 1600} | ERowOp 1: {17} {Set 1 Uint32 : 1700} + Rows{9} Label{94 rev 1, 50b}, [18, +2)row | ERowOp 1: {18} {Set 1 Uint32 : 1800} | ERowOp 1: {19} {Set 1 Uint32 : 1900} + Rows{10} Label{104 rev 1, 50b}, [20, +2)row | ERowOp 1: {20} {Set 1 Uint32 : 2000} | ERowOp 1: {21} {Set 1 Uint32 : 2100} + Rows{11} Label{114 rev 1, 50b}, [22, +2)row | ERowOp 1: {22} {Set 1 Uint32 : 2200} | ERowOp 1: {23} {Set 1 Uint32 : 2300} + Rows{12} Label{124 rev 1, 50b}, [24, +2)row | ERowOp 1: {24} {Set 1 Uint32 : 2400} | ERowOp 1: {25} {Set 1 Uint32 : 2500} + Rows{13} Label{134 rev 1, 50b}, [26, +2)row | ERowOp 1: {26} {Set 1 Uint32 : 2600} | ERowOp 1: {27} {Set 1 Uint32 : 2700} + Rows{14} Label{144 rev 1, 50b}, [28, +2)row | ERowOp 1: {28} {Set 1 Uint32 : 2800} | ERowOp 1: {29} {Set 1 Uint32 : 2900} + Rows{15} Label{154 rev 1, 50b}, [30, +2)row | ERowOp 1: {30} {Set 1 Uint32 : 3000} | ERowOp 1: {31} {Set 1 Uint32 : 3100} + Rows{16} Label{164 rev 1, 50b}, [32, +2)row | ERowOp 1: {32} {Set 1 Uint32 : 3200} | ERowOp 1: {33} {Set 1 Uint32 : 3300} + Rows{17} Label{174 rev 1, 50b}, [34, +2)row | ERowOp 1: {34} {Set 1 Uint32 : 3400} | ERowOp 1: {35} {Set 1 Uint32 : 3500} + Rows{18} Label{184 rev 1, 50b}, [36, +2)row | ERowOp 1: {36} {Set 1 Uint32 : 3600} | ERowOp 1: {37} {Set 1 Uint32 : 3700} + Rows{19} Label{194 rev 1, 50b}, [38, +2)row | ERowOp 1: {38} {Set 1 Uint32 : 3800} | ERowOp 1: {39} {Set 1 Uint32 : 3900} Part{[1:2:3:0:0:0:0] eph 0, 1000b 40r} data 1479b + FlatIndex{20} Label{3 rev 3, 453b} 21 rec | Page Row Bytes (Uint32) | 0 0 50b {0} | 1 2 50b {2} | 2 4 50b {4} | 3 6 50b {6} | 4 8 50b {8} | 5 10 50b {10} | 6 12 50b {12} | 7 14 50b {14} | 8 16 50b {16} | 9 18 50b {18} | 10 20 50b {20} | 11 22 50b {22} | 12 24 50b {24} | 13 26 50b {26} | 14 28 50b {28} | 15 30 50b {30} | 16 32 50b {32} | 17 34 50b {34} | 18 36 50b {36} | 19 38 50b {38} | 19 39 50b {39} + Rows{0} Label{04 rev 1, 50b}, [0, +2)row | ERowOp 1: {0} {Set 1 Uint32 : 0} | ERowOp 1: {1} {Set 1 Uint32 : 100} + Rows{1} Label{14 rev 1, 50b}, [2, +2)row | ERowOp 1: {2} {Set 1 Uint32 : 200} | ERowOp 1: {3} {Set 1 Uint32 : 300} + Rows{2} Label{24 rev 1, 50b}, [4, +2)row | ERowOp 1: {4} {Set 1 Uint32 : 400} | ERowOp 1: {5} {Set 1 Uint32 : 500} + Rows{3} Label{34 rev 1, 50b}, [6, +2)row | ERowOp 1: {6} {Set 1 Uint32 : 600} | ERowOp 1: {7} {Set 1 Uint32 : 700} + Rows{4} Label{44 rev 1, 50b}, [8, +2)row | ERowOp 1: {8} {Set 1 Uint32 : 800} | ERowOp 1: {9} {Set 1 Uint32 : 900} + Rows{5} Label{54 rev 1, 50b}, [10, +2)row | ERowOp 1: {10} {Set 1 Uint32 : 1000} | ERowOp 1: {11} {Set 1 Uint32 : 1100} + Rows{6} Label{64 rev 1, 50b}, [12, +2)row | ERowOp 1: {12} {Set 1 Uint32 : 1200} | ERowOp 1: {13} {Set 1 Uint32 : 1300} + Rows{7} Label{74 rev 1, 50b}, [14, +2)row | ERowOp 1: {14} {Set 1 Uint32 : 1400} | ERowOp 1: {15} {Set 1 Uint32 : 1500} + Rows{8} Label{84 rev 1, 50b}, [16, +2)row | ERowOp 1: {16} {Set 1 Uint32 : 1600} | ERowOp 1: {17} {Set 1 Uint32 : 1700} + Rows{9} Label{94 rev 1, 50b}, [18, +2)row | ERowOp 1: {18} {Set 1 Uint32 : 1800} | ERowOp 1: {19} {Set 1 Uint32 : 1900} + Rows{10} Label{104 rev 1, 50b}, [20, +2)row | ERowOp 1: {20} {Set 1 Uint32 : 2000} | ERowOp 1: {21} {Set 1 Uint32 : 2100} + Rows{11} Label{114 rev 1, 50b}, [22, +2)row | ERowOp 1: {22} {Set 1 Uint32 : 2200} | ERowOp 1: {23} {Set 1 Uint32 : 2300} + Rows{12} Label{124 rev 1, 50b}, [24, +2)row | ERowOp 1: {24} {Set 1 Uint32 : 2400} | ERowOp 1: {25} {Set 1 Uint32 : 2500} + Rows{13} Label{134 rev 1, 50b}, [26, +2)row | ERowOp 1: {26} {Set 1 Uint32 : 2600} | ERowOp 1: {27} {Set 1 Uint32 : 2700} + Rows{14} Label{144 rev 1, 50b}, [28, +2)row | ERowOp 1: {28} {Set 1 Uint32 : 2800} | ERowOp 1: {29} {Set 1 Uint32 : 2900} + Rows{15} Label{154 rev 1, 50b}, [30, +2)row | ERowOp 1: {30} {Set 1 Uint32 : 3000} | ERowOp 1: {31} {Set 1 Uint32 : 3100} + Rows{16} Label{164 rev 1, 50b}, [32, +2)row | ERowOp 1: {32} {Set 1 Uint32 : 3200} | ERowOp 1: {33} {Set 1 Uint32 : 3300} + Rows{17} Label{174 rev 1, 50b}, [34, +2)row | ERowOp 1: {34} {Set 1 Uint32 : 3400} | ERowOp 1: {35} {Set 1 Uint32 : 3500} + Rows{18} Label{184 rev 1, 50b}, [36, +2)row | ERowOp 1: {36} {Set 1 Uint32 : 3600} | ERowOp 1: {37} {Set 1 Uint32 : 3700} + Rows{19} Label{194 rev 1, 50b}, [38, +2)row | ERowOp 1: {38} {Set 1 Uint32 : 3800} | ERowOp 1: {39} {Set 1 Uint32 : 3900} >> TSyncBrokerTests::ShouldReturnToken [GOOD] >> TSyncBrokerTests::ShouldReleaseToken >> BuildStatsFlatIndex::Single_Groups [GOOD] >> BuildStatsFlatIndex::Single_Groups_Slices >> TSharedPageCache_Actor::Unregister_Queued [GOOD] >> TSharedPageCache_Actor::Unregister_Queued_Pending >> TSyncBrokerTests::ShouldReleaseToken [GOOD] >> BuildStatsFlatIndex::Single_Groups_Slices [GOOD] >> BuildStatsFlatIndex::Single_Groups_History >> TSharedPageCache_Actor::Unregister_Queued_Pending [GOOD] >> TSwitchableCache::Touch [GOOD] >> TSwitchableCache::Erase [GOOD] >> TSwitchableCache::EvictNext [GOOD] >> TSwitchableCache::UpdateLimit [GOOD] >> TSwitchableCache::Switch_Touch_RotatePages_All [GOOD] >> TSwitchableCache::Switch_Touch_RotatePages_Parts [GOOD] >> TSwitchableCache::Switch_RotatePages_Force [GOOD] >> TSwitchableCache::Switch_RotatePages_Evicts [GOOD] >> TSwitchableCache::Switch_Touch [GOOD] >> TSwitchableCache::Switch_Erase [GOOD] >> TSwitchableCache::Switch_EvictNext [GOOD] >> TSwitchableCache::Switch_UpdateLimit [GOOD] >> TVersions::WreckHead >> TSyncBrokerTests::ShouldProcessAfterRelease >> THealthCheckTest::Issues100GroupsMerging [GOOD] >> THealthCheckTest::Issues100VCardMerging >> TSyncBrokerTests::ShouldProcessAfterRelease [GOOD] >> TSyncBrokerTests::ShouldReleaseInQueue >> BuildStatsFlatIndex::Single_Groups_History [GOOD] >> BuildStatsFlatIndex::Single_Groups_History_Slices |79.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest >> TSyncBrokerTests::ShouldReleaseInQueue [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/syncer/ut/unittest >> TSyncBrokerTests::ShouldReleaseToken [GOOD] Test command err: 2025-06-25T14:33:29.192229Z node 1 :BS_SYNCER DEBUG: blobstorage_syncer_broker.cpp:64: TEvQuerySyncToken, VDisk actor id: [0:1:1], actor id: [1:5:2052], token sent, active: 1, waiting: 0 2025-06-25T14:33:29.313568Z node 2 :BS_SYNCER DEBUG: blobstorage_syncer_broker.cpp:64: TEvQuerySyncToken, VDisk actor id: [0:1:1], actor id: [2:5:2052], token sent, active: 1, waiting: 0 2025-06-25T14:33:29.313680Z node 2 :BS_SYNCER DEBUG: blobstorage_syncer_broker.cpp:123: TEvReleaseSyncToken, VDisk actor id: [0:1:1], actor id: [2:5:2052], token released, active: 1, waiting: 0 >> BuildStatsFlatIndex::Single_Groups_History_Slices [GOOD] >> BuildStatsFlatIndex::Mixed >> THealthCheckTest::TestTabletIsDead [GOOD] >> THealthCheckTest::TestReBootingTabletIsDead |79.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest >> TPart::PageFailEnvColumnGroups [GOOD] >> TPart::ForwardEnvColumnGroups >> BuildStatsFlatIndex::Mixed [GOOD] >> BuildStatsFlatIndex::Mixed_Groups >> KqpLimits::CancelAfterRoTxWithFollowerStreamLookup [GOOD] >> TChargeBTreeIndex::OneNode_Groups [GOOD] >> TChargeBTreeIndex::OneNode_History ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tablet_flat/ut/unittest >> TSharedPageCache::MiddleCache_BTreeIndex [GOOD] Test command err: SmallQueue: MainQueue: {11 0f 1b}, {14 1f 1b}, {15 2f 1b}, {18 0f 1b}, {19 0f 1b}, {23 0f 1b}, {27 0f 1b} GhostQueue: 9, 12, 13, 16, 17, 20, 21, 24, 25, 28 0.29293 00000.000 II| FAKE_ENV: Born at 2025-06-25T14:33:23.040142Z 00000.008 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.009 II| FAKE_ENV: Starting storage for BS group 0 00000.009 II| FAKE_ENV: Starting storage for BS group 1 00000.009 II| FAKE_ENV: Starting storage for BS group 2 00000.009 II| FAKE_ENV: Starting storage for BS group 3 00000.016 II| TABLET_EXECUTOR: Leader{1:2:0} activating executor 00000.016 II| TABLET_EXECUTOR: LSnap{1:2, on 2:1, 35b, wait} done, Waste{2:0, 0b +(0, 0b), 0 trc} 00000.017 DD| TABLET_EXECUTOR: Leader{1:2:2} commited cookie 2 for step 1 00000.017 DD| TABLET_EXECUTOR: Leader{1:2:2} Tx{1, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxInitSchema} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxInitSchema 00000.017 DD| TABLET_EXECUTOR: Leader{1:2:2} Tx{1, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxInitSchema} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.018 DD| TABLET_EXECUTOR: Leader{1:2:2} Tx{1, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxInitSchema} hope 1 -> done Change{2, redo 0b alter 209b annex 0, ~{ } -{ }, 0 gb} 00000.018 DD| TABLET_EXECUTOR: Leader{1:2:2} Tx{1, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxInitSchema} release 4194304b of static, Memory{0 dyn 0} 00000.018 DD| TABLET_EXECUTOR: Leader{1:2:3} commited cookie 1 for step 2 00000.019 NN| TABLET_SAUSAGECACHE: Update config MemoryLimit: 8388608 ReplacementPolicy: ThreeLeveledLRU 00000.019 NN| TABLET_SAUSAGECACHE: Switch replacement policy from S3FIFO to ThreeLeveledLRU 00000.019 NN| TABLET_SAUSAGECACHE: Switch replacement policy done from S3FIFO to ThreeLeveledLRU 00000.019 DD| TABLET_EXECUTOR: Leader{1:2:3} Tx{2, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.019 DD| TABLET_EXECUTOR: Leader{1:2:3} Tx{2, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.020 DD| TABLET_EXECUTOR: Leader{1:2:3} Tx{2, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{2, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.020 DD| TABLET_EXECUTOR: Leader{1:2:3} Tx{2, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.020 DD| TABLET_EXECUTOR: Leader{1:2:4} commited cookie 1 for step 3 00000.021 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{3, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.021 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{3, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.021 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{3, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{3, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.021 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{3, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.022 DD| TABLET_EXECUTOR: Leader{1:2:5} commited cookie 1 for step 4 00000.022 DD| TABLET_EXECUTOR: Leader{1:2:5} Tx{4, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.022 DD| TABLET_EXECUTOR: Leader{1:2:5} Tx{4, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.023 DD| TABLET_EXECUTOR: Leader{1:2:5} Tx{4, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{4, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.023 DD| TABLET_EXECUTOR: Leader{1:2:5} Tx{4, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.023 DD| TABLET_EXECUTOR: Leader{1:2:6} commited cookie 1 for step 5 00000.023 DD| TABLET_EXECUTOR: Leader{1:2:6} Tx{5, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.024 DD| TABLET_EXECUTOR: Leader{1:2:6} Tx{5, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.024 DD| TABLET_EXECUTOR: Leader{1:2:6} Tx{5, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{5, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.025 DD| TABLET_EXECUTOR: Leader{1:2:6} Tx{5, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.025 DD| TABLET_EXECUTOR: Leader{1:2:7} commited cookie 1 for step 6 00000.025 DD| TABLET_EXECUTOR: Leader{1:2:7} Tx{6, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.025 DD| TABLET_EXECUTOR: Leader{1:2:7} Tx{6, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.026 DD| TABLET_EXECUTOR: Leader{1:2:7} Tx{6, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{6, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.026 DD| TABLET_EXECUTOR: Leader{1:2:7} Tx{6, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.026 DD| TABLET_EXECUTOR: Leader{1:2:8} commited cookie 1 for step 7 00000.027 DD| TABLET_EXECUTOR: Leader{1:2:8} Tx{7, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.027 DD| TABLET_EXECUTOR: Leader{1:2:8} Tx{7, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.027 DD| TABLET_EXECUTOR: Leader{1:2:8} Tx{7, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{7, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.027 DD| TABLET_EXECUTOR: Leader{1:2:8} Tx{7, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.027 DD| TABLET_EXECUTOR: Leader{1:2:9} commited cookie 1 for step 8 00000.028 DD| TABLET_EXECUTOR: Leader{1:2:9} Tx{8, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.028 DD| TABLET_EXECUTOR: Leader{1:2:9} Tx{8, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.028 DD| TABLET_EXECUTOR: Leader{1:2:9} Tx{8, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{8, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.028 DD| TABLET_EXECUTOR: Leader{1:2:9} Tx{8, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.029 DD| TABLET_EXECUTOR: Leader{1:2:10} commited cookie 1 for step 9 00000.029 DD| TABLET_EXECUTOR: Leader{1:2:10} Tx{9, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.029 DD| TABLET_EXECUTOR: Leader{1:2:10} Tx{9, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.030 DD| TABLET_EXECUTOR: Leader{1:2:10} Tx{9, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{9, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.030 DD| TABLET_EXECUTOR: Leader{1:2:10} Tx{9, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.030 DD| TABLET_EXECUTOR: Leader{1:2:11} commited cookie 1 for step 10 00000.030 DD| TABLET_EXECUTOR: Leader{1:2:11} Tx{10, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.030 DD| TABLET_EXECUTOR: Leader{1:2:11} Tx{10, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.031 DD| TABLET_EXECUTOR: Leader{1:2:11} Tx{10, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{10, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.031 DD| TABLET_EXECUTOR: Leader{1:2:11} Tx{10, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.031 DD| TABLET_EXECUTOR: Leader{1:2:12} commited cookie 1 for step 11 00000.031 DD| TABLET_EXECUTOR: Leader{1:2:12} Tx{11, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.032 DD| TABLET_EXECUTOR: Leader{1:2:12} Tx{11, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.032 DD| TABLET_EXECUTOR: Leader{1:2:12} Tx{11, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{11, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.032 DD| TABLET_EXECUTOR: Leader{1:2:12} Tx{11, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.032 DD| TABLET_EXECUTOR: Leader{1:2:13} commited cookie 1 for step 12 00000.033 DD| TABLET_EXECUTOR: Leader{1:2:13} Tx{12, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.033 DD| TABLET_EXECUTOR: Leader{1:2:13} Tx{12, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.033 DD| TABLET_EXECUTOR: Leader{1:2:13} Tx{12, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{12, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.033 DD| TABLET_EXECUTOR: Leader{1:2:13} Tx{12, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.034 DD| TABLET_EXECUTOR: Leader{1:2:14} commited cookie 1 for step 13 00000.034 DD| TABLET_EXECUTOR: Leader{1:2:14} Tx{13, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.034 DD| TABLET_EXECUTOR: Leader{1:2:14} Tx{13, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.035 DD| TABLET_EXECUTOR: Leader{1:2:14} Tx{13, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{13, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.035 DD| TABLET_EXECUTOR: Leader{1:2:14} Tx{13, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.035 DD| TABLET_EXECUTOR: Leader{1:2:15} commited cookie 1 for step 14 00000.035 DD| TABLET_EXECUTOR: Leader{1:2:15} Tx{14, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.035 DD| TABLET_EXECUTOR: Leader{1:2:15} Tx{14, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.036 DD| TABLET_EXECUTOR: Leader{1:2:15} Tx{14, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{14, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.036 DD| TABLET_EXECUTOR: Leader{1:2:15} Tx{14, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxW ... TTxReadRow} took 8388608b of static mem, Memory{8388608 dyn 0} 00000.478 D3| TABLET_EXECUTOR: Leader{1:3:2} requests PageCollection [1:2:103:1:12288:2976:0] 102443 bytes, 1 pages: [4 4] 00000.478 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{96, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} postponed, 102443b, pages {1 wait, 1 load}, freshly touched 4 pages 00000.478 TT| TABLET_SAUSAGECACHE: Request page collection [1:2:103:1:12288:2976:0] owner [6:580:2605] cookie 1 class Online from cache [ ] already requested [ ] to request [ 4 ] 00000.478 DD| TABLET_SAUSAGECACHE: Drop page collection [1:2:103:1:12288:2976:0] pages [ 96 ] owner [6:580:2605] 00000.478 TT| TABLET_SAUSAGECACHE: Receive page collection [1:2:103:1:12288:2976:0] status OK pages [ 4 ] 00000.478 TT| TABLET_SAUSAGECACHE: Send page collection result [1:2:103:1:12288:2976:0] owner [6:580:2605] class Online pages [ 4 ] cookie 1 00000.478 DD| TABLET_EXECUTOR: Leader{1:3:2} got result TEvResult{1 pages [1:2:103:1:12288:2976:0] ok OK}, category 1 00000.478 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{96, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} hope 2 -> done Change{103, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.478 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{96, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} release 8388608b of static, Memory{0 dyn 0} 00000.478 TT| TABLET_SAUSAGECACHE: Touch page collection [1:2:103:1:12288:2976:0] owner [6:580:2605] pages [ 14 4 117 111 ] 00000.478 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{97, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow 00000.478 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{97, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.479 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{97, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} hope 1 -> retry Change{103, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.479 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{97, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} touch new 796b, 102443b lo load (103239b in total), 0b requested for data (4194304b in total) 00000.479 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{97, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} took 8388608b of static mem, Memory{8388608 dyn 0} 00000.479 D3| TABLET_EXECUTOR: Leader{1:3:2} requests PageCollection [1:2:103:1:12288:2976:0] 102443 bytes, 1 pages: [3 4] 00000.479 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{97, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} postponed, 102443b, pages {1 wait, 1 load}, freshly touched 4 pages 00000.479 TT| TABLET_SAUSAGECACHE: Request page collection [1:2:103:1:12288:2976:0] owner [6:580:2605] cookie 1 class Online from cache [ ] already requested [ ] to request [ 3 ] 00000.479 DD| TABLET_SAUSAGECACHE: Drop page collection [1:2:103:1:12288:2976:0] pages [ 95 ] owner [6:580:2605] 00000.479 TT| TABLET_SAUSAGECACHE: Receive page collection [1:2:103:1:12288:2976:0] status OK pages [ 3 ] 00000.479 TT| TABLET_SAUSAGECACHE: Send page collection result [1:2:103:1:12288:2976:0] owner [6:580:2605] class Online pages [ 3 ] cookie 1 00000.479 DD| TABLET_EXECUTOR: Leader{1:3:2} got result TEvResult{1 pages [1:2:103:1:12288:2976:0] ok OK}, category 1 00000.479 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{97, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} hope 2 -> done Change{103, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.479 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{97, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} release 8388608b of static, Memory{0 dyn 0} 00000.479 TT| TABLET_SAUSAGECACHE: Touch page collection [1:2:103:1:12288:2976:0] owner [6:580:2605] pages [ 14 3 117 111 ] 00000.480 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{98, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow 00000.480 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{98, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.480 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{98, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} hope 1 -> retry Change{103, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.480 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{98, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} touch new 796b, 102443b lo load (103239b in total), 0b requested for data (4194304b in total) 00000.480 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{98, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} took 8388608b of static mem, Memory{8388608 dyn 0} 00000.480 D3| TABLET_EXECUTOR: Leader{1:3:2} requests PageCollection [1:2:103:1:12288:2976:0] 102443 bytes, 1 pages: [2 4] 00000.480 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{98, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} postponed, 102443b, pages {1 wait, 1 load}, freshly touched 4 pages 00000.480 TT| TABLET_SAUSAGECACHE: Request page collection [1:2:103:1:12288:2976:0] owner [6:580:2605] cookie 1 class Online from cache [ ] already requested [ ] to request [ 2 ] 00000.480 DD| TABLET_SAUSAGECACHE: Drop page collection [1:2:103:1:12288:2976:0] pages [ 93 ] owner [6:580:2605] 00000.480 TT| TABLET_SAUSAGECACHE: Receive page collection [1:2:103:1:12288:2976:0] status OK pages [ 2 ] 00000.480 TT| TABLET_SAUSAGECACHE: Send page collection result [1:2:103:1:12288:2976:0] owner [6:580:2605] class Online pages [ 2 ] cookie 1 00000.480 DD| TABLET_EXECUTOR: Leader{1:3:2} got result TEvResult{1 pages [1:2:103:1:12288:2976:0] ok OK}, category 1 00000.480 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{98, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} hope 2 -> done Change{103, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.480 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{98, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} release 8388608b of static, Memory{0 dyn 0} 00000.480 TT| TABLET_SAUSAGECACHE: Touch page collection [1:2:103:1:12288:2976:0] owner [6:580:2605] pages [ 14 2 117 111 ] 00000.481 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{99, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow 00000.481 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{99, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.481 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{99, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} hope 1 -> retry Change{103, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.481 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{99, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} touch new 796b, 102443b lo load (103239b in total), 0b requested for data (4194304b in total) 00000.481 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{99, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} took 8388608b of static mem, Memory{8388608 dyn 0} 00000.481 D3| TABLET_EXECUTOR: Leader{1:3:2} requests PageCollection [1:2:103:1:12288:2976:0] 102443 bytes, 1 pages: [1 4] 00000.481 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{99, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} postponed, 102443b, pages {1 wait, 1 load}, freshly touched 4 pages 00000.481 TT| TABLET_SAUSAGECACHE: Request page collection [1:2:103:1:12288:2976:0] owner [6:580:2605] cookie 1 class Online from cache [ ] already requested [ ] to request [ 1 ] 00000.481 DD| TABLET_SAUSAGECACHE: Drop page collection [1:2:103:1:12288:2976:0] pages [ 92 ] owner [6:580:2605] 00000.481 TT| TABLET_SAUSAGECACHE: Receive page collection [1:2:103:1:12288:2976:0] status OK pages [ 1 ] 00000.481 TT| TABLET_SAUSAGECACHE: Send page collection result [1:2:103:1:12288:2976:0] owner [6:580:2605] class Online pages [ 1 ] cookie 1 00000.481 DD| TABLET_EXECUTOR: Leader{1:3:2} got result TEvResult{1 pages [1:2:103:1:12288:2976:0] ok OK}, category 1 00000.481 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{99, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} hope 2 -> done Change{103, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.481 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{99, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} release 8388608b of static, Memory{0 dyn 0} 00000.481 TT| TABLET_SAUSAGECACHE: Touch page collection [1:2:103:1:12288:2976:0] owner [6:580:2605] pages [ 14 1 117 111 ] 00000.482 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{100, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow 00000.482 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{100, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.482 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{100, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} hope 1 -> retry Change{103, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.482 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{100, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} touch new 796b, 102443b lo load (103239b in total), 0b requested for data (4194304b in total) 00000.482 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{100, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} took 8388608b of static mem, Memory{8388608 dyn 0} 00000.482 D3| TABLET_EXECUTOR: Leader{1:3:2} requests PageCollection [1:2:103:1:12288:2976:0] 102443 bytes, 1 pages: [0 4] 00000.482 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{100, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} postponed, 102443b, pages {1 wait, 1 load}, freshly touched 4 pages 00000.482 TT| TABLET_SAUSAGECACHE: Request page collection [1:2:103:1:12288:2976:0] owner [6:580:2605] cookie 1 class Online from cache [ ] already requested [ ] to request [ 0 ] 00000.482 DD| TABLET_SAUSAGECACHE: Drop page collection [1:2:103:1:12288:2976:0] pages [ 91 ] owner [6:580:2605] 00000.482 TT| TABLET_SAUSAGECACHE: Receive page collection [1:2:103:1:12288:2976:0] status OK pages [ 0 ] 00000.482 TT| TABLET_SAUSAGECACHE: Send page collection result [1:2:103:1:12288:2976:0] owner [6:580:2605] class Online pages [ 0 ] cookie 1 00000.482 DD| TABLET_EXECUTOR: Leader{1:3:2} got result TEvResult{1 pages [1:2:103:1:12288:2976:0] ok OK}, category 1 00000.482 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{100, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} hope 2 -> done Change{103, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.482 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{100, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} release 8388608b of static, Memory{0 dyn 0} 00000.482 TT| TABLET_SAUSAGECACHE: Touch page collection [1:2:103:1:12288:2976:0] owner [6:580:2605] pages [ 14 0 117 111 ] Counters: Active:8313958/8388608, Passive:0, MemLimit:-1 00000.483 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 3 actors 00000.483 II| TABLET_EXECUTOR: Leader{1:3:2} suiciding, Waste{2:0, 10255801b +(0, 0b), 1 trc, -48685b acc} 00000.486 DD| TABLET_SAUSAGECACHE: Unregister owner [6:580:2605] 00000.486 DD| TABLET_SAUSAGECACHE: Remove page collection [1:2:103:1:12288:2976:0] owner [6:580:2605] 00000.486 DD| TABLET_SAUSAGECACHE: Remove owner [6:580:2605] 00000.486 NN| TABLET_SAUSAGECACHE: Poison cache serviced 138 reqs hit {0 0b} miss {139 12197190b} 00000.486 II| FAKE_ENV: Shut order, stopping 4 BS groups 00000.486 II| FAKE_ENV: DS.0 gone, left {42b, 1}, put {10191b, 107} 00000.487 II| FAKE_ENV: DS.1 gone, left {10257096b, 5}, put {10305919b, 107} 00000.510 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {0b, 0} 00000.510 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {0b, 0} 00000.511 II| FAKE_ENV: All BS storage groups are stopped 00000.511 II| FAKE_ENV: Model stopped, hosted 4 actors, spent 0.000s 00000.512 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 0 Error 0 Left 2741}, stopped >> TPart::ForwardEnvColumnGroups [GOOD] >> TPart::Versions [GOOD] >> TPart::ManyVersions [GOOD] >> TPart::ManyDeltas [GOOD] >> TPart::CutKeys_Lz4 [GOOD] >> TPart::CutKeys_Seek [GOOD] >> TPart::CutKeys_SeekPages [GOOD] >> TPart::CutKeys_SeekSlices [GOOD] >> TPart::CutKeys_CutString [GOOD] >> TPart::CutKeys_CutUtf8String [GOOD] >> TPartBtreeIndexIteration::NoNodes >> BuildStatsFlatIndex::Mixed_Groups [GOOD] >> BuildStatsFlatIndex::Mixed_Groups_History ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/syncer/ut/unittest >> TSyncBrokerTests::ShouldReleaseInQueue [GOOD] Test command err: 2025-06-25T14:33:29.725873Z node 1 :BS_SYNCER DEBUG: blobstorage_syncer_broker.cpp:64: TEvQuerySyncToken, VDisk actor id: [0:1:1], actor id: [1:5:2052], token sent, active: 1, waiting: 0 2025-06-25T14:33:29.726017Z node 1 :BS_SYNCER DEBUG: blobstorage_syncer_broker.cpp:90: TEvQuerySyncToken, VDisk actor id: [0:1:2], actor id: [1:6:2053], enqueued, active: 1, waiting: 1 2025-06-25T14:33:29.726098Z node 1 :BS_SYNCER DEBUG: blobstorage_syncer_broker.cpp:123: TEvReleaseSyncToken, VDisk actor id: [0:1:1], actor id: [1:5:2052], token released, active: 1, waiting: 1 2025-06-25T14:33:29.726168Z node 1 :BS_SYNCER DEBUG: blobstorage_syncer_broker.cpp:105: ProcessQueue(), VDisk actor id: [0:1:2], actor id: [1:6:2053], token sent, active: 0, waiting: 1 2025-06-25T14:33:29.824188Z node 2 :BS_SYNCER DEBUG: blobstorage_syncer_broker.cpp:64: TEvQuerySyncToken, VDisk actor id: [0:1:1], actor id: [2:5:2052], token sent, active: 1, waiting: 0 2025-06-25T14:33:29.824342Z node 2 :BS_SYNCER DEBUG: blobstorage_syncer_broker.cpp:90: TEvQuerySyncToken, VDisk actor id: [0:1:2], actor id: [2:6:2053], enqueued, active: 1, waiting: 1 2025-06-25T14:33:29.824427Z node 2 :BS_SYNCER DEBUG: blobstorage_syncer_broker.cpp:146: TEvReleaseSyncToken, VDisk actor id: [0:1:2], actor id: [2:6:2053], removed from queue, active: 1, waiting: 0 >> TSchemeShardTest::ListNotCreatedIndexCase [GOOD] >> TSchemeShardTest::FindSubDomainPathId >> BuildStatsFlatIndex::Mixed_Groups_History [GOOD] >> BuildStatsFlatIndex::Serial >> BasicUsage::WriteSessionCloseIgnoresWrites [GOOD] >> BuildStatsFlatIndex::Serial [GOOD] >> BuildStatsFlatIndex::Serial_Groups >> CommitOffset::Commit_WithSession_ParentNotFinished_OtherSession [GOOD] >> CommitOffset::Commit_FromSession_ToNewChild_WithoutCommitToParent >> TestProgram::JsonValueBinary >> TChargeBTreeIndex::OneNode_History [GOOD] >> TChargeBTreeIndex::OneNode_Groups_History >> BuildStatsFlatIndex::Serial_Groups [GOOD] >> BuildStatsFlatIndex::Serial_Groups_History >> TPartBtreeIndexIteration::NoNodes [GOOD] >> TPartBtreeIndexIteration::NoNodes_Groups >> THealthCheckTest::SpecificServerlessWithExclusiveNodes [GOOD] >> THealthCheckTest::SharedWhenTroublesWithExclusiveNodes >> DBase::KIKIMR_15598_Many_MemTables [GOOD] >> BuildStatsFlatIndex::Serial_Groups_History [GOOD] >> BuildStatsHistogram::Single |79.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest >> TestKinesisHttpProxy::TestCounters [GOOD] |79.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest >> TestProgram::JsonValueBinary [GOOD] |79.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest >> THealthCheckTest::GreenStatusWhenInitPending [GOOD] >> THealthCheckTest::IgnoreOtherGenerations |79.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest >> TSchemeShardTest::FindSubDomainPathId [GOOD] >> TSchemeShardTest::FindSubDomainPathIdActor ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/federated_topic/ut/unittest >> BasicUsage::WriteSessionCloseIgnoresWrites [GOOD] Test command err: 2025-06-25T14:32:52.327286Z :WriteSessionCloseWaitsForWrites INFO: Random seed for debugging is 1750861972327248 2025-06-25T14:32:53.364835Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519894917010634110:2141];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:32:53.364887Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:32:53.482187Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519894915515648452:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:32:53.482232Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:32:53.995923Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0012ff/r3tmp/tmpvMT01w/pdisk_1.dat 2025-06-25T14:32:54.084909Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-25T14:32:54.384429Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:32:54.396813Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:32:54.596617Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:32:54.624937Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:32:54.737272Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:32:54.737369Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:32:54.738713Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:32:54.738763Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:32:54.763639Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T14:32:54.763749Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:32:54.789026Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:32:54.825636Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 27248, node 1 2025-06-25T14:32:55.365077Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/yft8/0012ff/r3tmp/yandex0iEwzI.tmp 2025-06-25T14:32:55.365106Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/yft8/0012ff/r3tmp/yandex0iEwzI.tmp 2025-06-25T14:32:55.365242Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/yft8/0012ff/r3tmp/yandex0iEwzI.tmp 2025-06-25T14:32:55.365344Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:32:55.536072Z INFO: TTestServer started on Port 7321 GrpcPort 27248 TClient is connected to server localhost:7321 PQClient connected to localhost:27248 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:32:56.406550Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976720657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... waiting... waiting... 2025-06-25T14:32:58.368481Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519894917010634110:2141];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:32:58.368577Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:32:58.484619Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519894915515648452:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:32:58.484695Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:33:00.588339Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519894945580419794:2275], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:33:00.588467Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:33:00.596524Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519894945580419817:2278], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:33:00.609441Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715657:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:33:00.658439Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519894945580419830:2279], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715657 completed, doublechecking } 2025-06-25T14:33:00.943996Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519894945580419860:2138] txid# 281474976715658, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:33:01.039335Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:33:01.056225Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7519894945580419867:2284], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:33:01.061432Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519894947075406188:2310], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:33:01.063476Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=NTMzMmEwNGYtYzhlMThkNTEtNzAwYzE4YTQtZTc5YTAxYzI=, ActorId: [1:7519894947075406113:2302], ActorState: ExecuteState, TraceId: 01jykr38y87674ymzg2912g1kp, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:33:01.070621Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=2&id=NGUyNWEwMGItYzU0NDYyMWQtMjkxMzJjYjQtYzg2ODRjYmM=, ActorId: [2:7519894945580419791:2273], ActorState: ExecuteState, TraceId: 01jykr38wk7kt8k4m53rrg82jd, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:33:01.084684Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-25T14:33:01.093249Z node 2 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_ ... /rt3.dc1--test-topic" name rt3.dc1--test-topic version1 CallPersQueueGRPC request to localhost:11955 MetaRequest { CmdGetTopicMetadata { Topic: "rt3.dc1--test-topic" } } 2025-06-25T14:33:25.016419Z node 3 :PERSQUEUE INFO: msgbus_server_persqueue.cpp:1531: proxy answer CallPersQueueGRPC response: Status: 128 ErrorReason: "the following topics are not created: rt3.dc1--test-topic, Marker# PQ95" ErrorCode: UNKNOWN_TOPIC CallPersQueueGRPC request to localhost:11955 MetaRequest { CmdGetTopicMetadata { Topic: "rt3.dc1--test-topic" } } 2025-06-25T14:33:25.530045Z node 3 :PERSQUEUE INFO: msgbus_server_persqueue.cpp:1531: proxy answer CallPersQueueGRPC response: Status: 1 ErrorCode: OK MetaResponse { CmdGetTopicMetadataResult { TopicInfo { Topic: "rt3.dc1--test-topic" NumPartitions: 1 Config { PartitionConfig { LifetimeSeconds: 86400 LowWatermark: 8388608 SourceIdLifetimeSeconds: 86400 WriteSpeedInBytesPerSecond: 20000000 BurstSize: 20000000 SourceIdMaxCounts: 6000000 } Version: 1 LocalDC: true Codecs { Ids: 0 Ids: 1 Ids: 2 Codecs: "raw" Codecs: "gzip" Codecs: "lzop" } TopicPath: "/Root/PQ/rt3.dc1--test-topic" YdbDatabasePath: "/Root" Consumers { Name: "user" ReadFromTimestampsMs: 0 FormatVersion: 0 Codec { } Version: 0 Important: false } } ErrorCode: OK } } } === Topic created, have version: 1 2025-06-25T14:33:25.677221Z :DEBUG: [] MessageGroupId [src] SessionId [] Write session: try to update token 2025-06-25T14:33:25.677657Z :INFO: [] MessageGroupId [src] SessionId [] Write session: Do CDS request 2025-06-25T14:33:25.677716Z :INFO: [] MessageGroupId [src] SessionId [] Start write session. Will connect to endpoint: localhost:11955 2025-06-25T14:33:25.745426Z :DEBUG: [] MessageGroupId [src] SessionId [] Write session: send init request: init_request { topic: "test-topic" message_group_id: "src" } 2025-06-25T14:33:25.752633Z node 3 :PQ_WRITE_PROXY DEBUG: grpc_pq_write.h:107: new grpc connection 2025-06-25T14:33:25.752671Z node 3 :PQ_WRITE_PROXY DEBUG: grpc_pq_write.h:141: new session created cookie 1 2025-06-25T14:33:25.780605Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 1 sessionId: grpc read done: success: 1 data: init_request { topic: "test-topic" message_group_id: "src" } 2025-06-25T14:33:25.780766Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:442: session request cookie: 1 topic: "test-topic" message_group_id: "src" from ipv6:[::1]:42440 2025-06-25T14:33:25.780783Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:1532: write session: cookie=1 sessionId= userAgent="pqv1 server" ip=ipv6:[::1]:42440 proto=v1 topic=test-topic durationSec=0 2025-06-25T14:33:25.780796Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:566: init check schema 2025-06-25T14:33:25.782545Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:627: session v1 cookie: 1 sessionId: describe result for acl check 2025-06-25T14:33:25.782660Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:62: TTableHelper SelectQuery: --!syntax_v1 DECLARE $Hash AS Uint32; DECLARE $Topic AS Utf8; DECLARE $SourceId AS Utf8; SELECT Partition, CreateTime, AccessTime, SeqNo FROM `/Root/PQ/SourceIdMeta2` WHERE Hash == $Hash AND Topic == $Topic AND SourceId == $SourceId; 2025-06-25T14:33:25.782669Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:63: TTableHelper UpdateQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint32; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64;DECLARE $SeqNo AS Uint64; UPSERT INTO `/Root/PQ/SourceIdMeta2` (Hash, Topic, SourceId, CreateTime, AccessTime, Partition, SeqNo) VALUES ($Hash, $Topic, $SourceId, $CreateTime, $AccessTime, $Partition, $SeqNo); 2025-06-25T14:33:25.782678Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:64: TTableHelper UpdateAccessTimeQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint32; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64; UPDATE `/Root/PQ/SourceIdMeta2` SET AccessTime = $AccessTime WHERE Hash = $Hash AND Topic = $Topic AND SourceId = $SourceId AND Partition = $Partition; 2025-06-25T14:33:25.782695Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:111: TPartitionChooser [3:7519895052267753454:2458] (SourceId=src, PreferedPartition=(NULL)) StartKqpSession 2025-06-25T14:33:25.785362Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:142: TPartitionChooser [3:7519895052267753454:2458] (SourceId=src, PreferedPartition=(NULL)) Select from the table 2025-06-25T14:33:26.042147Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__old_chooser_actor.h:67: TPartitionChooser [3:7519895052267753454:2458] (SourceId=src, PreferedPartition=(NULL)) RequestPQRB 2025-06-25T14:33:26.042958Z node 3 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72075186224037893][rt3.dc1--test-topic] pipe [3:7519895056562720790:2458] connected; active server actors: 1 2025-06-25T14:33:26.043088Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__old_chooser_actor.h:80: TPartitionChooser [3:7519895052267753454:2458] (SourceId=src, PreferedPartition=(NULL)) Received partition 0 from PQRB for SourceId=src 2025-06-25T14:33:26.043104Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:174: TPartitionChooser [3:7519895052267753454:2458] (SourceId=src, PreferedPartition=(NULL)) Update the table 2025-06-25T14:33:26.066509Z node 3 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037893][rt3.dc1--test-topic] pipe [3:7519895056562720790:2458] disconnected; active server actors: 1 2025-06-25T14:33:26.066541Z node 3 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1688: [72075186224037893][rt3.dc1--test-topic] pipe [3:7519895056562720790:2458] disconnected no session 2025-06-25T14:33:26.279417Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:183: TPartitionChooser [3:7519895052267753454:2458] (SourceId=src, PreferedPartition=(NULL)) HandleUpdate PartitionPersisted=0 Status=SUCCESS 2025-06-25T14:33:26.279466Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:305: TPartitionChooser [3:7519895052267753454:2458] (SourceId=src, PreferedPartition=(NULL)) ReplyResult: Partition=0, SeqNo=(NULL) 2025-06-25T14:33:26.279485Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:268: TPartitionChooser [3:7519895052267753454:2458] (SourceId=src, PreferedPartition=(NULL)) Start idle 2025-06-25T14:33:26.279515Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:689: ProceedPartition. session cookie: 1 sessionId: partition: 0 expectedGeneration: (NULL) 2025-06-25T14:33:26.285242Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:819: TPartitionWriter 72075186224037892 (partition=0) TEvClientConnected Status OK, TabletId: 72075186224037892, NodeId 4, Generation: 1 2025-06-25T14:33:26.284987Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72075186224037892] server connected, pipe [3:7519895056562720828:2458], now have 1 active actors on pipe 2025-06-25T14:33:26.285641Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:348: Handle TEvRequest topic: 'rt3.dc1--test-topic' requestId: 2025-06-25T14:33:26.285679Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2812: [PQ: 72075186224037892] got client message batch for topic 'rt3.dc1--test-topic' partition 0 2025-06-25T14:33:26.285784Z node 4 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie src|d93f25e1-f08bc3d7-cb5db3b7-59648394_0 generated for partition 0 topic 'rt3.dc1--test-topic' owner src 2025-06-25T14:33:26.285898Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:34: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyOwnerOk. Partition: 0 2025-06-25T14:33:26.285950Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'rt3.dc1--test-topic' partition: 0 messageNo: 0 requestId: cookie: 0 2025-06-25T14:33:26.287007Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:865: session inited cookie: 1 partition: 0 MaxSeqNo: 0 sessionId: src|d93f25e1-f08bc3d7-cb5db3b7-59648394_0 2025-06-25T14:33:26.286590Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:348: Handle TEvRequest topic: 'rt3.dc1--test-topic' requestId: 2025-06-25T14:33:26.286617Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2812: [PQ: 72075186224037892] got client message batch for topic 'rt3.dc1--test-topic' partition 0 2025-06-25T14:33:26.286689Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'rt3.dc1--test-topic' partition: 0 messageNo: 0 requestId: cookie: 0 2025-06-25T14:33:26.293411Z :INFO: [] MessageGroupId [src] SessionId [] Counters: { Errors: 0 CurrentSessionLifetimeMs: 1750862006293 BytesWritten: 0 MessagesWritten: 0 BytesWrittenCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-25T14:33:26.293555Z :INFO: [] MessageGroupId [src] SessionId [] Write session established. Init response: session_id: "src|d93f25e1-f08bc3d7-cb5db3b7-59648394_0" topic: "test-topic" cluster: "dc1" supported_codecs: CODEC_RAW supported_codecs: CODEC_GZIP supported_codecs: CODEC_LZOP 2025-06-25T14:33:26.295128Z :INFO: [] MessageGroupId [src] SessionId [src|d93f25e1-f08bc3d7-cb5db3b7-59648394_0] Write session: close. Timeout = 0 ms 2025-06-25T14:33:26.295179Z :INFO: [] MessageGroupId [src] SessionId [src|d93f25e1-f08bc3d7-cb5db3b7-59648394_0] Write session will now close 2025-06-25T14:33:26.295236Z :DEBUG: [] MessageGroupId [src] SessionId [src|d93f25e1-f08bc3d7-cb5db3b7-59648394_0] Write session: aborting 2025-06-25T14:33:26.295759Z :INFO: [] MessageGroupId [src] SessionId [src|d93f25e1-f08bc3d7-cb5db3b7-59648394_0] Write session: gracefully shut down, all writes complete 2025-06-25T14:33:26.295805Z :DEBUG: [] MessageGroupId [src] SessionId [src|d93f25e1-f08bc3d7-cb5db3b7-59648394_0] Write session: destroy 2025-06-25T14:33:26.310404Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 1 sessionId: src|d93f25e1-f08bc3d7-cb5db3b7-59648394_0 grpc read done: success: 0 data: 2025-06-25T14:33:26.310431Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 1 sessionId: src|d93f25e1-f08bc3d7-cb5db3b7-59648394_0 grpc read failed 2025-06-25T14:33:26.310467Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 1 sessionId: src|d93f25e1-f08bc3d7-cb5db3b7-59648394_0 grpc closed 2025-06-25T14:33:26.310485Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 1 sessionId: src|d93f25e1-f08bc3d7-cb5db3b7-59648394_0 is DEAD 2025-06-25T14:33:26.311350Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037892 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-25T14:33:26.311930Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037892] server disconnected, pipe [3:7519895056562720828:2458] destroyed 2025-06-25T14:33:26.312006Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::DropOwner. Session was created >>> Ready to answer: ok 2025-06-25T14:33:26.413678Z :ERROR: [/Root] OnFederationDiscovery: Got error. Status: UNAVAILABLE. Description: 2025-06-25T14:33:28.932047Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7382: Cannot get console configs 2025-06-25T14:33:28.932085Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded >> TopicAutoscaling::ReadFromTimestamp_PQv1 [GOOD] >> TestKinesisHttpProxy::TestEmptyHttpBody ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest >> TestProgram::JsonValueBinary [GOOD] Test command err: FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:33;event=parse_program;program=Command { Assign { Column { Id: 15 } Constant { Text: "$.key" } } } Command { Assign { Column { Id: 16 } Function { Id: 8 Arguments { Id: 6 } Arguments { Id: 15 } FunctionType: YQL_KERNEL KernelIdx: 0 } } } Command { Projection { Columns { Id: 16 } } } Kernels: "O\016\006Arg\020JsonNode\020JsonPath\006UDF\006Udf\014Apply2\030BlockAsTuple\t\211\004\235\213\004\213\004\207\203\021H\203\001H\213\002\207?\004\001\235?\006\001\235?\n\001\032\000\t\211\004?\020\235?\002\001\235?\004\000\032\000\t\251\000?\026\002\000\t\251\000?\030\002\000\000\t\211\002?\022\235?\010\001\032\000\t\211\n?&?\026?\030?\002?\004?\010,ScalarApply\000?\036?\"\t\251\000?\002\002\000\t\251\000?\004\002\000\t\211\010?\010?\002?\000\207?\004?4$IfPresent\000?.\t\251\000?\000\002\000\t\211\n?4\201\213\004\213\004\203\n\203\005@\207\203\001H?@?4?D?D VisitAll\000\t\211\020?H\211\006?H\207\203\021H\214\n\210\203\001H\214\006\016\000\203\004\203\005@\203\004\203\004\207?\000\214\n\210\203\001H\214\006\026\000\t\211\010?X\203\005@\200\203\005@\202\022\000\003?nNJson2.JsonDocumentSqlValueConvertToUtf8\202\003?p\000\002\017\003?Z\000\003?\\\000\003?^\000\003?`\000\027?b?:\t\211\014?d\211\002?d\203\001H\016\000\203\004\203\005@\203\004\203\004?\004\026\000\t\211\010?\206\203\005@\200\203\005@\202\022\000\003?\222\"Json2.CompilePath\202\003?\224\000\002\017\003?\210\000\003?\212\000\003?\214\000\003?\216\000?2\036\010\000?j\276\t\251\000?@\002\000\'?4\t\251\000?D\002\000?\264\004\'?4\010\000\000\000/" ; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:102;parse_proto_program=Command { Assign { Column { Id: 15 } Constant { Text: "$.key" } } } Command { Assign { Column { Id: 16 } Function { Id: 8 Arguments { Id: 6 } Arguments { Id: 15 } FunctionType: YQL_KERNEL KernelIdx: 0 } } } Command { Projection { Columns { Id: 16 } } } Kernels: "O\016\006Arg\020JsonNode\020JsonPath\006UDF\006Udf\014Apply2\030BlockAsTuple\t\211\004\235\213\004\213\004\207\203\021H\203\001H\213\002\207?\004\001\235?\006\001\235?\n\001\032\000\t\211\004?\020\235?\002\001\235?\004\000\032\000\t\251\000?\026\002\000\t\251\000?\030\002\000\000\t\211\002?\022\235?\010\001\032\000\t\211\n?&?\026?\030?\002?\004?\010,ScalarApply\000?\036?\"\t\251\000?\002\002\000\t\251\000?\004\002\000\t\211\010?\010?\002?\000\207?\004?4$IfPresent\000?.\t\251\000?\000\002\000\t\211\n?4\201\213\004\213\004\203\n\203\005@\207\203\001H?@?4?D?D VisitAll\000\t\211\020?H\211\006?H\207\203\021H\214\n\210\203\001H\214\006\016\000\203\004\203\005@\203\004\203\004\207?\000\214\n\210\203\001H\214\006\026\000\t\211\010?X\203\005@\200\203\005@\202\022\000\003?nNJson2.JsonDocumentSqlValueConvertToUtf8\202\003?p\000\002\017\003?Z\000\003?\\\000\003?^\000\003?`\000\027?b?:\t\211\014?d\211\002?d\203\001H\016\000\203\004\203\005@\203\004\203\004?\004\026\000\t\211\010?\206\203\005@\200\203\005@\202\022\000\003?\222\"Json2.CompilePath\202\003?\224\000\002\017\003?\210\000\003?\212\000\003?\214\000\003?\216\000?2\036\010\000?j\276\t\251\000?@\002\000\'?4\t\251\000?D\002\000?\264\004\'?4\010\000\000\000/" ; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2101;fline=graph_execute.cpp:162;graph_constructed=digraph program {N0[shape=box, label="N0(0):{\"p\":{\"v\":\"$.key\"},\"o\":\"15\",\"t\":\"Const\"}\n"]; N1[shape=box, label="N4(15):{\"i\":\"6,15\",\"p\":{\"kernel\":{\"class_name\":\"SIMPLE\"}},\"o\":\"16\",\"t\":\"Calculation\"}\nREMOVE:15,6"]; N0 -> N1[label="1"]; N3 -> N1[label="2"]; N2[shape=box, label="N2(2):{\"i\":\"0\",\"p\":{\"data\":[{\"name\":\"json_binary\",\"id\":6}]},\"o\":\"6\",\"t\":\"FetchOriginalData\"}\n",style=filled,color="#FFFF88"]; N5 -> N2[label="1"]; N3[shape=box, label="N3(7):{\"i\":\"6\",\"p\":{\"address\":{\"name\":\"json_binary\",\"id\":6}},\"o\":\"6\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N2 -> N3[label="1"]; N4[shape=box, label="N5(15):{\"i\":\"16\",\"t\":\"Projection\"}\n",style=filled,color="#FFAAAA"]; N1 -> N4[label="1"]; N5[shape=box, label="N1(0):{\"p\":{\"data\":[{\"name\":\"json_binary\",\"id\":6}]},\"o\":\"0\",\"t\":\"ReserveMemory\"}\n"]; N0->N5->N2->N3->N1->N4[color=red]; }; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:51;event=program_parsed;result={"edges":[{"owner_id":0,"inputs":[]},{"owner_id":1,"inputs":[{"from":0},{"from":3}]},{"owner_id":2,"inputs":[{"from":5}]},{"owner_id":3,"inputs":[{"from":2}]},{"owner_id":4,"inputs":[{"from":1}]},{"owner_id":5,"inputs":[]}],"nodes":{"1":{"p":{"i":"6,15","p":{"kernel":{"class_name":"SIMPLE"}},"o":"16","t":"Calculation"},"w":15,"id":1},"3":{"p":{"i":"6","p":{"address":{"name":"json_binary","id":6}},"o":"6","t":"AssembleOriginalData"},"w":7,"id":3},"2":{"p":{"i":"0","p":{"data":[{"name":"json_binary","id":6}]},"o":"6","t":"FetchOriginalData"},"w":2,"id":2},"5":{"p":{"p":{"data":[{"name":"json_binary","id":6}]},"o":"0","t":"ReserveMemory"},"w":0,"id":5},"4":{"p":{"i":"16","t":"Projection"},"w":15,"id":4},"0":{"p":{"p":{"v":"$.key"},"o":"15","t":"Const"},"w":0,"id":0}}}; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10BinaryTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10BinaryTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10BinaryTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10BinaryTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10BinaryTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10BinaryTypeE; json_binary: [ 7B226B6579223A2276616C7565227D, 7B226B6579223A31307D, 7B226B6579223A302E317D, 7B226B6579223A66616C73657D, 7B22616E6F74686572223A2276616C7565227D, 5B5D ] json_binary: [ 010200002100000014000000030300000200000000040000C00400006B65790076616C756500, 0102000021000000140000008403000001000000800300006B6579000000000000002440, 0102000021000000140000008403000001000000800300006B6579009A9999999999B93F, 0102000021000000140000000000000001000000800300006B657900, 01020000210000001400000003030000020000008004000040050000616E6F746865720076616C756500, 010100000000000000000000 ] Check output for Utf8 FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:33;event=parse_program;program=Command { Assign { Column { Id: 15 } Constant { Text: "$.key" } } } Command { Assign { Column { Id: 16 } Function { Id: 8 Arguments { Id: 6 } Arguments { Id: 15 } FunctionType: YQL_KERNEL KernelIdx: 0 } } } Command { Projection { Columns { Id: 16 } } } Kernels: "O\016\006Arg\020JsonNode\020JsonPath\006UDF\006Udf\014Apply2\030BlockAsTuple\t\211\004\235\213\004\213\004\207\203\021H\203\001H\213\002\207\203\014\001\235?\006\001\235?\014\001\032\000\t\211\004?\022\235?\002\001\235?\004\000\032\000\t\251\000?\030\002\000\t\251\000?\032\002\000\000\t\211\002?\024\235?\n\001\032\000\t\211\n?(?\030?\032?\002?\004?\n,ScalarApply\000? ?$\t\251\000?\002\002\000\t\251\000?\004\002\000\t\211\010?\n?\002?\000\207?\010?6$IfPresent\000?0\t\251\000?\000\002\000\t\211\n?6\201\213\004\213\004\203\n\203\005@\207\203\014?B?6?F?F VisitAll\000\t\211\020?J\211\006?J\207\203\021H\214\n\210\203\001H\214\006\016\000\203\004\203\005@\203\004\203\004\207?\000\214\n\210\203\001H\214\006\026\000\t\211\010?Z\203\005@\200\203\005@\202\022\000\003?p N1[label="1"]; N3 -> N1[label="2"]; N2[shape=box, label="N2(2):{\"i\":\"0\",\"p\":{\"data\":[{\"name\":\"json_binary\",\"id\":6}]},\"o\":\"6\",\"t\":\"FetchOriginalData\"}\n",style=filled,color="#FFFF88"]; N5 -> N2[label="1"]; N3[shape=box, label="N3(7):{\"i\":\"6\",\"p\":{\"address\":{\"name\":\"json_binary\",\"id\":6}},\"o\":\"6\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N2 -> N3[label="1"]; N4[shape=box, label="N5(15):{\"i\":\"16\",\"t\":\"Projection\"}\n",style=filled,color="#FFAAAA"]; N1 -> N4[label="1"]; N5[shape=box, label="N1(0):{\"p\":{\"data\":[{\"name\":\"json_binary\",\"id\":6}]},\"o\":\"0\",\"t\":\"ReserveMemory\"}\n"]; N0->N5->N2->N3->N1->N4[color=red]; }; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:51;event=program_parsed;result={"edges":[{"owner_id":0,"inputs":[]},{"owner_id":1,"inputs":[{"from":0},{"from":3}]},{"owner_id":2,"inputs":[{"from":5}]},{"owner_id":3,"inputs":[{"from":2}]},{"owner_id":4,"inputs":[{"from":1}]},{"owner_id":5,"inputs":[]}],"nodes":{"1":{"p":{"i":"6,15","p":{"kernel":{"class_name":"SIMPLE"}},"o":"16","t" ... { Column { Id: 15 } Constant { Text: "$.key" } } } Command { Assign { Column { Id: 16 } Function { Id: 8 Arguments { Id: 6 } Arguments { Id: 15 } FunctionType: YQL_KERNEL KernelIdx: 0 } } } Command { Projection { Columns { Id: 16 } } } Kernels: "O\016\006Arg\020JsonNode\020JsonPath\006UDF\006Udf\014Apply2\030BlockAsTuple\t\211\004\235\213\004\213\004\207\203\021H\203\001H\213\002\207\203B\001\235?\006\001\235?\014\001\032\000\t\211\004?\022\235?\002\001\235?\004\000\032\000\t\251\000?\030\002\000\t\251\000?\032\002\000\000\t\211\002?\024\235?\n\001\032\000\t\211\n?(?\030?\032?\002?\004?\n,ScalarApply\000? ?$\t\251\000?\002\002\000\t\251\000?\004\002\000\t\211\010?\n?\002?\000\207?\010?6$IfPresent\000?0\t\251\000?\000\002\000\t\211\n?6\201\213\004\213\004\203\n\203\005@\207\203@?B?6?F?6 VisitAll\000\t\211\020?J\211\006?J\207\203\021H\214\n\210\203\001H\214\006\016\000\203\004\203\005@\203\004\203\004\207?\000\214\n\210\203\001H\214\006\026\000\t\211\010?Z\203\005@\200\203\005@\202\022\000\003?p@Json2.JsonDocumentSqlValueNumber\202\003?r\000\002\017\003?\\\000\003?^\000\003?`\000\003?b\000\027?d?<\t\211\014?f\211\002?f\203\001H\016\000\203\004\203\005@\203\004\203\004?\004\026\000\t\211\010?\210\203\005@\200\203\005@\202\022\000\003?\224\"Json2.CompilePath\202\003?\226\000\002\017\003?\212\000\003?\214\000\003?\216\000\003?\220\000?4\036\010\000?l\276\t\251\000?B\002\000\'?6\t\251\000?F\002\000\t\211\004?6\203\005@?F\030Invoke\000\003?\270\016Convert?\266\001\004\'?6\010\000\000\000/" ; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2101;fline=graph_execute.cpp:162;graph_constructed=digraph program {N0[shape=box, label="N0(0):{\"p\":{\"v\":\"$.key\"},\"o\":\"15\",\"t\":\"Const\"}\n"]; N1[shape=box, label="N4(15):{\"i\":\"6,15\",\"p\":{\"kernel\":{\"class_name\":\"SIMPLE\"}},\"o\":\"16\",\"t\":\"Calculation\"}\nREMOVE:15,6"]; N0 -> N1[label="1"]; N3 -> N1[label="2"]; N2[shape=box, label="N2(2):{\"i\":\"0\",\"p\":{\"data\":[{\"name\":\"json_binary\",\"id\":6}]},\"o\":\"6\",\"t\":\"FetchOriginalData\"}\n",style=filled,color="#FFFF88"]; N5 -> N2[label="1"]; N3[shape=box, label="N3(7):{\"i\":\"6\",\"p\":{\"address\":{\"name\":\"json_binary\",\"id\":6}},\"o\":\"6\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N2 -> N3[label="1"]; N4[shape=box, label="N5(15):{\"i\":\"16\",\"t\":\"Projection\"}\n",style=filled,color="#FFAAAA"]; N1 -> N4[label="1"]; N5[shape=box, label="N1(0):{\"p\":{\"data\":[{\"name\":\"json_binary\",\"id\":6}]},\"o\":\"0\",\"t\":\"ReserveMemory\"}\n"]; N0->N5->N2->N3->N1->N4[color=red]; }; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:51;event=program_parsed;result={"edges":[{"owner_id":0,"inputs":[]},{"owner_id":1,"inputs":[{"from":0},{"from":3}]},{"owner_id":2,"inputs":[{"from":5}]},{"owner_id":3,"inputs":[{"from":2}]},{"owner_id":4,"inputs":[{"from":1}]},{"owner_id":5,"inputs":[]}],"nodes":{"1":{"p":{"i":"6,15","p":{"kernel":{"class_name":"SIMPLE"}},"o":"16","t":"Calculation"},"w":15,"id":1},"3":{"p":{"i":"6","p":{"address":{"name":"json_binary","id":6}},"o":"6","t":"AssembleOriginalData"},"w":7,"id":3},"2":{"p":{"i":"0","p":{"data":[{"name":"json_binary","id":6}]},"o":"6","t":"FetchOriginalData"},"w":2,"id":2},"5":{"p":{"p":{"data":[{"name":"json_binary","id":6}]},"o":"0","t":"ReserveMemory"},"w":0,"id":5},"4":{"p":{"i":"16","t":"Projection"},"w":15,"id":4},"0":{"p":{"p":{"v":"$.key"},"o":"15","t":"Const"},"w":0,"id":0}}}; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10BinaryTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10BinaryTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10BinaryTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10BinaryTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10BinaryTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10BinaryTypeE; json_binary: [ 7B226B6579223A2276616C7565227D, 7B226B6579223A31307D, 7B226B6579223A302E317D, 7B226B6579223A66616C73657D, 7B22616E6F74686572223A2276616C7565227D, 5B5D ] json_binary: [ 010200002100000014000000030300000200000000040000C00400006B65790076616C756500, 0102000021000000140000008403000001000000800300006B6579000000000000002440, 0102000021000000140000008403000001000000800300006B6579009A9999999999B93F, 0102000021000000140000000000000001000000800300006B657900, 01020000210000001400000003030000020000008004000040050000616E6F746865720076616C756500, 010100000000000000000000 ] Check output for Float FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10DoubleTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10DoubleTypeE; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:33;event=parse_program;program=Command { Assign { Column { Id: 15 } Constant { Text: "$.key" } } } Command { Assign { Column { Id: 16 } Function { Id: 8 Arguments { Id: 6 } Arguments { Id: 15 } FunctionType: YQL_KERNEL KernelIdx: 0 } } } Command { Projection { Columns { Id: 16 } } } Kernels: "O\016\006Arg\020JsonNode\020JsonPath\006UDF\006Udf\014Apply2\030BlockAsTuple\t\211\004\235\213\004\213\004\207\203\021H\203\001H\213\002\207\203@\001\235?\006\001\235?\014\001\032\000\t\211\004?\022\235?\002\001\235?\004\000\032\000\t\251\000?\030\002\000\t\251\000?\032\002\000\000\t\211\002?\024\235?\n\001\032\000\t\211\n?(?\030?\032?\002?\004?\n,ScalarApply\000? ?$\t\251\000?\002\002\000\t\251\000?\004\002\000\t\211\010?\n?\002?\000\207?\010?6$IfPresent\000?0\t\251\000?\000\002\000\t\211\n?6\201\213\004\213\004\203\n\203\005@\207\203@?B?6?F?F VisitAll\000\t\211\020?J\211\006?J\207\203\021H\214\n\210\203\001H\214\006\016\000\203\004\203\005@\203\004\203\004\207?\000\214\n\210\203\001H\214\006\026\000\t\211\010?Z\203\005@\200\203\005@\202\022\000\003?p@Json2.JsonDocumentSqlValueNumber\202\003?r\000\002\017\003?\\\000\003?^\000\003?`\000\003?b\000\027?d?<\t\211\014?f\211\002?f\203\001H\016\000\203\004\203\005@\203\004\203\004?\004\026\000\t\211\010?\210\203\005@\200\203\005@\202\022\000\003?\224\"Json2.CompilePath\202\003?\226\000\002\017\003?\212\000\003?\214\000\003?\216\000\003?\220\000?4\036\010\000?l\276\t\251\000?B\002\000\'?6\t\251\000?F\002\000?\266\004\'?6\010\000\000\000/" ; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:102;parse_proto_program=Command { Assign { Column { Id: 15 } Constant { Text: "$.key" } } } Command { Assign { Column { Id: 16 } Function { Id: 8 Arguments { Id: 6 } Arguments { Id: 15 } FunctionType: YQL_KERNEL KernelIdx: 0 } } } Command { Projection { Columns { Id: 16 } } } Kernels: "O\016\006Arg\020JsonNode\020JsonPath\006UDF\006Udf\014Apply2\030BlockAsTuple\t\211\004\235\213\004\213\004\207\203\021H\203\001H\213\002\207\203@\001\235?\006\001\235?\014\001\032\000\t\211\004?\022\235?\002\001\235?\004\000\032\000\t\251\000?\030\002\000\t\251\000?\032\002\000\000\t\211\002?\024\235?\n\001\032\000\t\211\n?(?\030?\032?\002?\004?\n,ScalarApply\000? ?$\t\251\000?\002\002\000\t\251\000?\004\002\000\t\211\010?\n?\002?\000\207?\010?6$IfPresent\000?0\t\251\000?\000\002\000\t\211\n?6\201\213\004\213\004\203\n\203\005@\207\203@?B?6?F?F VisitAll\000\t\211\020?J\211\006?J\207\203\021H\214\n\210\203\001H\214\006\016\000\203\004\203\005@\203\004\203\004\207?\000\214\n\210\203\001H\214\006\026\000\t\211\010?Z\203\005@\200\203\005@\202\022\000\003?p@Json2.JsonDocumentSqlValueNumber\202\003?r\000\002\017\003?\\\000\003?^\000\003?`\000\003?b\000\027?d?<\t\211\014?f\211\002?f\203\001H\016\000\203\004\203\005@\203\004\203\004?\004\026\000\t\211\010?\210\203\005@\200\203\005@\202\022\000\003?\224\"Json2.CompilePath\202\003?\226\000\002\017\003?\212\000\003?\214\000\003?\216\000\003?\220\000?4\036\010\000?l\276\t\251\000?B\002\000\'?6\t\251\000?F\002\000?\266\004\'?6\010\000\000\000/" ; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2101;fline=graph_execute.cpp:162;graph_constructed=digraph program {N0[shape=box, label="N0(0):{\"p\":{\"v\":\"$.key\"},\"o\":\"15\",\"t\":\"Const\"}\n"]; N1[shape=box, label="N4(15):{\"i\":\"6,15\",\"p\":{\"kernel\":{\"class_name\":\"SIMPLE\"}},\"o\":\"16\",\"t\":\"Calculation\"}\nREMOVE:15,6"]; N0 -> N1[label="1"]; N3 -> N1[label="2"]; N2[shape=box, label="N2(2):{\"i\":\"0\",\"p\":{\"data\":[{\"name\":\"json_binary\",\"id\":6}]},\"o\":\"6\",\"t\":\"FetchOriginalData\"}\n",style=filled,color="#FFFF88"]; N5 -> N2[label="1"]; N3[shape=box, label="N3(7):{\"i\":\"6\",\"p\":{\"address\":{\"name\":\"json_binary\",\"id\":6}},\"o\":\"6\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N2 -> N3[label="1"]; N4[shape=box, label="N5(15):{\"i\":\"16\",\"t\":\"Projection\"}\n",style=filled,color="#FFAAAA"]; N1 -> N4[label="1"]; N5[shape=box, label="N1(0):{\"p\":{\"data\":[{\"name\":\"json_binary\",\"id\":6}]},\"o\":\"0\",\"t\":\"ReserveMemory\"}\n"]; N0->N5->N2->N3->N1->N4[color=red]; }; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:51;event=program_parsed;result={"edges":[{"owner_id":0,"inputs":[]},{"owner_id":1,"inputs":[{"from":0},{"from":3}]},{"owner_id":2,"inputs":[{"from":5}]},{"owner_id":3,"inputs":[{"from":2}]},{"owner_id":4,"inputs":[{"from":1}]},{"owner_id":5,"inputs":[]}],"nodes":{"1":{"p":{"i":"6,15","p":{"kernel":{"class_name":"SIMPLE"}},"o":"16","t":"Calculation"},"w":15,"id":1},"3":{"p":{"i":"6","p":{"address":{"name":"json_binary","id":6}},"o":"6","t":"AssembleOriginalData"},"w":7,"id":3},"2":{"p":{"i":"0","p":{"data":[{"name":"json_binary","id":6}]},"o":"6","t":"FetchOriginalData"},"w":2,"id":2},"5":{"p":{"p":{"data":[{"name":"json_binary","id":6}]},"o":"0","t":"ReserveMemory"},"w":0,"id":5},"4":{"p":{"i":"16","t":"Projection"},"w":15,"id":4},"0":{"p":{"p":{"v":"$.key"},"o":"15","t":"Const"},"w":0,"id":0}}}; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10BinaryTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10BinaryTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10BinaryTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10BinaryTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10BinaryTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10BinaryTypeE; json_binary: [ 7B226B6579223A2276616C7565227D, 7B226B6579223A31307D, 7B226B6579223A302E317D, 7B226B6579223A66616C73657D, 7B22616E6F74686572223A2276616C7565227D, 5B5D ] json_binary: [ 010200002100000014000000030300000200000000040000C00400006B65790076616C756500, 0102000021000000140000008403000001000000800300006B6579000000000000002440, 0102000021000000140000008403000001000000800300006B6579009A9999999999B93F, 0102000021000000140000000000000001000000800300006B657900, 01020000210000001400000003030000020000008004000040050000616E6F746865720076616C756500, 010100000000000000000000 ] Check output for Double FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10DoubleTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10DoubleTypeE; >> TS3WrapperTests::HeadUnknownObject >> TPartBtreeIndexIteration::NoNodes_Groups [GOOD] >> TPartBtreeIndexIteration::NoNodes_History |79.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/fq/libs/common/ut/ydb-core-fq-libs-common-ut |79.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/fq/libs/common/ut/ydb-core-fq-libs-common-ut |79.1%| [LD] {RESULT} $(B)/ydb/core/fq/libs/common/ut/ydb-core-fq-libs-common-ut >> TS3WrapperTests::HeadUnknownObject [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tablet_flat/ut/unittest >> DBase::KIKIMR_15598_Many_MemTables [GOOD] Test command err: 3 parts: [0:0:1:0:0:0:0] 167 rows, 7 pages, 1 levels: (91, 38) (166, 63) (325, 116) (394, 139) (481, 168) [0:0:2:0:0:0:0] 166 rows, 8 pages, 2 levels: (631, 218) (709, 244) (853, 292) (934, 319) (1087, 370) [0:0:3:0:0:0:0] 167 rows, 8 pages, 2 levels: (1156, 393) (1246, 423) (1396, 473) (1471, 498) (1633, 552) Checking BTree: Touched 100% bytes, 7 pages RowCountHistogram: 5% (actual 5%) key = (91, 38) value = 25 (actual 25 - 0% error) 5% (actual 5%) key = (166, 63) value = 50 (actual 50 - 0% error) 4% (actual 4%) key = (253, 92) value = 74 (actual 74 - 0% error) 4% (actual 4%) key = (325, 116) value = 96 (actual 96 - 0% error) 4% (actual 4%) key = (394, 139) value = 119 (actual 119 - 0% error) 5% (actual 5%) key = (481, 168) value = 144 (actual 144 - 0% error) 4% (actual 4%) key = (553, 192) value = 167 (actual 166 - 0% error) 4% (actual 5%) key = (631, 218) value = 191 (actual 191 - 0% error) 4% (actual 4%) key = (709, 244) value = 215 (actual 215 - 0% error) 3% (actual 3%) key = (766, 263) value = 234 (actual 234 - 0% error) 5% (actual 5%) key = (853, 292) value = 261 (actual 261 - 0% error) 4% (actual 4%) key = (934, 319) value = 285 (actual 285 - 0% error) 4% (actual 4%) key = (1006, 343) value = 309 (actual 309 - 0% error) 4% (actual 4%) key = (1087, 370) value = 333 (actual 332 - 0% error) 4% (actual 4%) key = (1156, 393) value = 354 (actual 354 - 0% error) 5% (actual 5%) key = (1246, 423) value = 380 (actual 380 - 0% error) 4% (actual 4%) key = (1324, 449) value = 404 (actual 404 - 0% error) 4% (actual 4%) key = (1396, 473) value = 426 (actual 426 - 0% error) 4% (actual 4%) key = (1471, 498) value = 448 (actual 448 - 0% error) 4% (actual 4%) key = (1543, 522) value = 470 (actual 470 - 0% error) 5% (actual 5%) key = (1633, 552) value = 496 (actual 496 - 0% error) 0% (actual 0%) DataSizeHistogram: 4% (actual 4%) key = (91, 38) value = 1974 (actual 1974 - 0% error) 4% (actual 4%) key = (166, 63) value = 3992 (actual 3992 - 0% error) 4% (actual 4%) key = (253, 92) value = 5889 (actual 5889 - 0% error) 4% (actual 4%) key = (325, 116) value = 7868 (actual 7868 - 0% error) 4% (actual 4%) key = (394, 139) value = 9910 (actual 9910 - 0% error) 4% (actual 4%) key = (481, 168) value = 11938 (actual 11938 - 0% error) 4% (actual 4%) key = (553, 192) value = 13685 (actual 13685 - 0% error) 4% (actual 4%) key = (631, 218) value = 15674 (actual 15674 - 0% error) 4% (actual 4%) key = (709, 244) value = 17709 (actual 17709 - 0% error) 4% (actual 4%) key = (766, 263) value = 19664 (actual 19664 - 0% error) 4% (actual 4%) key = (853, 292) value = 21673 (actual 21673 - 0% error) 4% (actual 4%) key = (934, 319) value = 23712 (actual 23712 - 0% error) 4% (actual 4%) key = (1006, 343) value = 25687 (actual 25687 - 0% error) 4% (actual 4%) key = (1087, 370) value = 27765 (actual 27678 - 0% error) 4% (actual 4%) key = (1156, 393) value = 29741 (actual 29741 - 0% error) 4% (actual 4%) key = (1246, 423) value = 31726 (actual 31726 - 0% error) 4% (actual 4%) key = (1324, 449) value = 33698 (actual 33698 - 0% error) 4% (actual 4%) key = (1396, 473) value = 35700 (actual 35700 - 0% error) 4% (actual 4%) key = (1471, 498) value = 37620 (actual 37620 - 0% error) 4% (actual 4%) key = (1543, 522) value = 39641 (actual 39641 - 0% error) 4% (actual 4%) key = (1633, 552) value = 41669 (actual 41669 - 0% error) 0% (actual 0%) Checking Flat: Touched 100% bytes, 3 pages RowCountHistogram: 5% (actual 5%) key = (91, 38) value = 25 (actual 25 - 0% error) 5% (actual 5%) key = (166, 63) value = 50 (actual 50 - 0% error) 4% (actual 4%) key = (253, 92) value = 74 (actual 74 - 0% error) 4% (actual 4%) key = (325, 116) value = 96 (actual 96 - 0% error) 4% (actual 4%) key = (394, 139) value = 119 (actual 119 - 0% error) 5% (actual 5%) key = (481, 168) value = 144 (actual 144 - 0% error) 4% (actual 4%) key = (556, 193) value = 167 (actual 167 - 0% error) 4% (actual 4%) key = (631, 218) value = 191 (actual 191 - 0% error) 4% (actual 4%) key = (709, 244) value = 215 (actual 215 - 0% error) 3% (actual 3%) key = (766, 263) value = 234 (actual 234 - 0% error) 5% (actual 5%) key = (853, 292) value = 261 (actual 261 - 0% error) 4% (actual 4%) key = (934, 319) value = 285 (actual 285 - 0% error) 4% (actual 4%) key = (1006, 343) value = 309 (actual 309 - 0% error) 4% (actual 4%) key = (1087, 370) value = 332 (actual 332 - 0% error) 0% (actual 0%) key = (1090, 371) value = 333 (actual 333 - 0% error) 4% (actual 4%) key = (1156, 393) value = 354 (actual 354 - 0% error) 5% (actual 5%) key = (1246, 423) value = 380 (actual 380 - 0% error) 4% (actual 4%) key = (1324, 449) value = 404 (actual 404 - 0% error) 4% (actual 4%) key = (1396, 473) value = 426 (actual 426 - 0% error) 4% (actual 4%) key = (1471, 498) value = 448 (actual 448 - 0% error) 4% (actual 4%) key = (1543, 522) value = 470 (actual 470 - 0% error) 5% (actual 5%) key = (1633, 552) value = 496 (actual 496 - 0% error) 0% (actual 0%) DataSizeHistogram: 4% (actual 4%) key = (91, 38) value = 1974 (actual 1974 - 0% error) 4% (actual 4%) key = (166, 63) value = 3992 (actual 3992 - 0% error) 4% (actual 4%) key = (253, 92) value = 5889 (actual 5889 - 0% error) 4% (actual 4%) key = (325, 116) value = 7868 (actual 7868 - 0% error) 4% (actual 4%) key = (394, 139) value = 9910 (actual 9910 - 0% error) 4% (actual 4%) key = (481, 168) value = 11938 (actual 11938 - 0% error) 4% (actual 4%) key = (556, 193) value = 13685 (actual 13685 - 0% error) 4% (actual 4%) key = (631, 218) value = 15674 (actual 15674 - 0% error) 4% (actual 4%) key = (709, 244) value = 17709 (actual 17709 - 0% error) 4% (actual 4%) key = (766, 263) value = 19664 (actual 19664 - 0% error) 4% (actual 4%) key = (853, 292) value = 21673 (actual 21673 - 0% error) 4% (actual 4%) key = (934, 319) value = 23712 (actual 23712 - 0% error) 4% (actual 4%) key = (1006, 343) value = 25687 (actual 25687 - 0% error) 4% (actual 4%) key = (1087, 370) value = 27678 (actual 27678 - 0% error) 0% (actual 0%) key = (1090, 371) value = 27765 (actual 27765 - 0% error) 4% (actual 4%) key = (1156, 393) value = 29741 (actual 29741 - 0% error) 4% (actual 4%) key = (1246, 423) value = 31726 (actual 31726 - 0% error) 4% (actual 4%) key = (1324, 449) value = 33698 (actual 33698 - 0% error) 4% (actual 4%) key = (1396, 473) value = 35700 (actual 35700 - 0% error) 4% (actual 4%) key = (1471, 498) value = 37620 (actual 37620 - 0% error) 4% (actual 4%) key = (1543, 522) value = 39641 (actual 39641 - 0% error) 4% (actual 4%) key = (1633, 552) value = 41669 (actual 41669 - 0% error) 0% (actual 0%) Checking Mixed: Touched 100% bytes, 7 pages RowCountHistogram: 14% (actual 5%) key = (91, 38) value = 70 (actual 25 - 9% error) 5% (actual 5%) key = (166, 63) value = 95 (actual 50 - 9% error) 4% (actual 4%) key = (253, 92) value = 119 (actual 74 - 9% error) 4% (actual 4%) key = (325, 116) value = 141 (actual 96 - 9% error) 4% (actual 4%) key = (394, 139) value = 164 (actual 119 - 9% error) 5% (actual 5%) key = (481, 168) value = 189 (actual 144 - 9% error) 4% (actual 9%) key = (631, 218) value = 212 (actual 191 - 4% error) 4% (actual 4%) key = (709, 244) value = 236 (actual 215 - 4% error) 3% (actual 3%) key = (766, 263) value = 255 (actual 234 - 4% error) 5% (actual 5%) key = (853, 292) value = 282 (actual 261 - 4% error) 4% (actual 4%) key = (934, 319) value = 306 (actual 285 - 4% error) 4% (actual 4%) key = (1006, 343) value = 330 (actual 309 - 4% error) 4% (actual 4%) key = (1087, 370) value = 353 (actual 332 - 4% error) 0% (actual 4%) key = (1156, 393) value = 354 (actual 354 - 0% error) 5% (actual 5%) key = (1246, 423) value = 380 (actual 380 - 0% error) 4% (actual 4%) key = (1324, 449) value = 404 (actual 404 - 0% error) 4% (actual 4%) key = (1396, 473) value = 426 (actual 426 - 0% error) 4% (actual 4%) key = (1471, 498) value = 448 (actual 448 - 0% error) 4% (actual 4%) key = (1543, 522) value = 470 (actual 470 - 0% error) 5% (actual 5%) key = (1633, 552) value = 496 (actual 496 - 0% error) 0% (actual 0%) DataSizeHistogram: 14% (actual 4%) key = (91, 38) value = 5939 (actual 1974 - 9% error) 4% (actual 4%) key = (166, 63) value = 7957 (actual 3992 - 9% error) 4% (actual 4%) key = (253, 92) value = 9854 (actual 5889 - 9% error) 4% (actual 4%) key = (325, 116) value = 11833 (actual 7868 - 9% error) 4% (actual 4%) key = (394, 139) value = 13875 (actual 9910 - 9% error) 4% (actual 4%) key = (481, 168) value = 15903 (actual 11938 - 9% error) 4% (actual 8%) key = (631, 218) value = 17650 (actual 15674 - 4% error) 4% (actual 4%) key = (709, 244) value = 19685 (actual 17709 - 4% error) 4% (actual 4%) key = (766, 263) value = 21640 (actual 19664 - 4% error) 4% (actual 4%) key = (853, 292) value = 23649 (actual 21673 - 4% error) 4% (actual 4%) key = (934, 319) value = 25688 (actual 23712 - 4% error) 4% (actual 4%) key = (1006, 343) value = 27663 (actual 25687 - 4% error) 4% (actual 4%) key = (1087, 370) value = 29654 (actual 27678 - 4% error) 0% (actual 4%) key = (1156, 393) value = 29741 (actual 29741 - 0% error) 4% (actual 4%) key = (1246, 423) value = 31726 (actual 31726 - 0% error) 4% (actual 4%) key = (1324, 449) value = 33698 (actual 33698 - 0% error) 4% (actual 4%) key = (1396, 473) value = 35700 (actual 35700 - 0% error) 4% (actual 4%) key = (1471, 498) value = 37620 (actual 37620 - 0% error) 4% (actual 4%) key = (1543, 522) value = 39641 (actual 39641 - 0% error) 4% (actual 4%) key = (1633, 552) value = 41669 (actual 41669 - 0% error) 0% (actual 0%) 3 parts: [0:0:1:0:0:0:0] 167 rows, 7 pages, 1 levels: (91, 38) (166, 63) (325, 116) (394, 139) (481, 168) [0:0:2:0:0:0:0] 166 rows, 8 pages, 2 levels: (631, 218) (709, 244) (853, 292) (934, 319) (1087, 370) [0:0:3:0:0:0:0] 167 rows, 8 pages, 2 levels: (1156, 393) (1246, 423) (1396, 473) (1471, 498) (1633, 552) Checking BTree: Touched 33% bytes, 2 pages RowCountHistogram: 14% (actual 14%) key = (253, 92) value = 74 (actual 74 - 0% error) 18% (actual 18%) key = (553, 192) value = 167 (actual 166 - 0% error) 33% (actual 33%) key = (1087, 370) value = 333 (actual 332 - 0% error) 18% (actual 18%) key = (1396, 473) value = 426 (actual 426 - 0% error) 14% (actual 14%) DataSizeHistogram: 14% (actual 14%) key = (253, 92) value = 5889 (actual 5889 - 0% error) 18% (actual 18%) key = (553, 192) value = 13685 (actual 13685 - 0% error) 33% (actual 33%) key = (1087, 370) value = 27765 (actual 27678 - 0% error) 18% (actual 19%) key = (1396, 473) value = 35700 (actual 35700 - 0% error) 15% (actual 15%) Checking Flat: Touched 100% bytes, 3 pages RowCountHistogram: 23% (actual 23%) key = (394, 139) value = 119 (actual 119 - 0% error) 23% (actual 23%) key = (766, 263) value = 234 (actual 234 - 0% error) 24% (actual 24%) key = (1156, 393) value = 354 (actual 354 - 0% error) 23% (actual 23%) key = (1543, 522) value = 470 (actual 470 - 0% error) 6% (actual 6%) DataSi ... 85 - 0% error) 4% (actual 4%) key = (631, 218) value = 15674 (actual 15674 - 0% error) 4% (actual 4%) key = (709, 244) value = 17709 (actual 17709 - 0% error) 4% (actual 4%) key = (766, 263) value = 19664 (actual 19664 - 0% error) 4% (actual 4%) key = (853, 292) value = 21673 (actual 21673 - 0% error) 4% (actual 4%) key = (934, 319) value = 23712 (actual 23712 - 0% error) 4% (actual 4%) key = (1006, 343) value = 25687 (actual 25687 - 0% error) 4% (actual 4%) key = (1087, 370) value = 27765 (actual 27678 - 0% error) 4% (actual 4%) key = (1156, 393) value = 29741 (actual 29741 - 0% error) 4% (actual 4%) key = (1246, 423) value = 31726 (actual 31726 - 0% error) 4% (actual 4%) key = (1324, 449) value = 33698 (actual 33698 - 0% error) 4% (actual 4%) key = (1396, 473) value = 35700 (actual 35700 - 0% error) 4% (actual 4%) key = (1471, 498) value = 37620 (actual 37620 - 0% error) 4% (actual 4%) key = (1543, 522) value = 39641 (actual 39641 - 0% error) 4% (actual 4%) key = (1633, 552) value = 41669 (actual 41669 - 0% error) 0% (actual 0%) Checking Flat: Touched 100% bytes, 3 pages RowCountHistogram: 5% (actual 5%) key = (91, 38) value = 25 (actual 25 - 0% error) 5% (actual 5%) key = (166, 63) value = 50 (actual 50 - 0% error) 4% (actual 4%) key = (253, 92) value = 74 (actual 74 - 0% error) 4% (actual 4%) key = (325, 116) value = 96 (actual 96 - 0% error) 4% (actual 4%) key = (394, 139) value = 119 (actual 119 - 0% error) 5% (actual 5%) key = (481, 168) value = 144 (actual 144 - 0% error) 4% (actual 4%) key = (556, 193) value = 167 (actual 167 - 0% error) 4% (actual 4%) key = (631, 218) value = 191 (actual 191 - 0% error) 4% (actual 4%) key = (709, 244) value = 215 (actual 215 - 0% error) 3% (actual 3%) key = (766, 263) value = 234 (actual 234 - 0% error) 5% (actual 5%) key = (853, 292) value = 261 (actual 261 - 0% error) 4% (actual 4%) key = (934, 319) value = 285 (actual 285 - 0% error) 4% (actual 4%) key = (1006, 343) value = 309 (actual 309 - 0% error) 4% (actual 4%) key = (1087, 370) value = 332 (actual 332 - 0% error) 0% (actual 0%) key = (1090, 371) value = 333 (actual 333 - 0% error) 4% (actual 4%) key = (1156, 393) value = 354 (actual 354 - 0% error) 5% (actual 5%) key = (1246, 423) value = 380 (actual 380 - 0% error) 4% (actual 4%) key = (1324, 449) value = 404 (actual 404 - 0% error) 4% (actual 4%) key = (1396, 473) value = 426 (actual 426 - 0% error) 4% (actual 4%) key = (1471, 498) value = 448 (actual 448 - 0% error) 4% (actual 4%) key = (1543, 522) value = 470 (actual 470 - 0% error) 5% (actual 5%) key = (1633, 552) value = 496 (actual 496 - 0% error) 0% (actual 0%) DataSizeHistogram: 4% (actual 4%) key = (91, 38) value = 1974 (actual 1974 - 0% error) 4% (actual 4%) key = (166, 63) value = 3992 (actual 3992 - 0% error) 4% (actual 4%) key = (253, 92) value = 5889 (actual 5889 - 0% error) 4% (actual 4%) key = (325, 116) value = 7868 (actual 7868 - 0% error) 4% (actual 4%) key = (394, 139) value = 9910 (actual 9910 - 0% error) 4% (actual 4%) key = (481, 168) value = 11938 (actual 11938 - 0% error) 4% (actual 4%) key = (556, 193) value = 13685 (actual 13685 - 0% error) 4% (actual 4%) key = (631, 218) value = 15674 (actual 15674 - 0% error) 4% (actual 4%) key = (709, 244) value = 17709 (actual 17709 - 0% error) 4% (actual 4%) key = (766, 263) value = 19664 (actual 19664 - 0% error) 4% (actual 4%) key = (853, 292) value = 21673 (actual 21673 - 0% error) 4% (actual 4%) key = (934, 319) value = 23712 (actual 23712 - 0% error) 4% (actual 4%) key = (1006, 343) value = 25687 (actual 25687 - 0% error) 4% (actual 4%) key = (1087, 370) value = 27678 (actual 27678 - 0% error) 0% (actual 0%) key = (1090, 371) value = 27765 (actual 27765 - 0% error) 4% (actual 4%) key = (1156, 393) value = 29741 (actual 29741 - 0% error) 4% (actual 4%) key = (1246, 423) value = 31726 (actual 31726 - 0% error) 4% (actual 4%) key = (1324, 449) value = 33698 (actual 33698 - 0% error) 4% (actual 4%) key = (1396, 473) value = 35700 (actual 35700 - 0% error) 4% (actual 4%) key = (1471, 498) value = 37620 (actual 37620 - 0% error) 4% (actual 4%) key = (1543, 522) value = 39641 (actual 39641 - 0% error) 4% (actual 4%) key = (1633, 552) value = 41669 (actual 41669 - 0% error) 0% (actual 0%) Checking Mixed: Touched 100% bytes, 3 pages RowCountHistogram: 14% (actual 5%) key = (91, 38) value = 70 (actual 25 - 9% error) 5% (actual 5%) key = (166, 63) value = 95 (actual 50 - 9% error) 4% (actual 4%) key = (253, 92) value = 119 (actual 74 - 9% error) 4% (actual 4%) key = (325, 116) value = 141 (actual 96 - 9% error) 4% (actual 4%) key = (394, 139) value = 164 (actual 119 - 9% error) 5% (actual 5%) key = (481, 168) value = 189 (actual 144 - 9% error) 4% (actual 9%) key = (631, 218) value = 212 (actual 191 - 4% error) 4% (actual 4%) key = (709, 244) value = 236 (actual 215 - 4% error) 3% (actual 3%) key = (766, 263) value = 255 (actual 234 - 4% error) 5% (actual 5%) key = (853, 292) value = 282 (actual 261 - 4% error) 4% (actual 4%) key = (934, 319) value = 306 (actual 285 - 4% error) 4% (actual 4%) key = (1006, 343) value = 330 (actual 309 - 4% error) 4% (actual 4%) key = (1087, 370) value = 353 (actual 332 - 4% error) 0% (actual 4%) key = (1156, 393) value = 354 (actual 354 - 0% error) 5% (actual 5%) key = (1246, 423) value = 380 (actual 380 - 0% error) 4% (actual 4%) key = (1324, 449) value = 404 (actual 404 - 0% error) 4% (actual 4%) key = (1396, 473) value = 426 (actual 426 - 0% error) 4% (actual 4%) key = (1471, 498) value = 448 (actual 448 - 0% error) 4% (actual 4%) key = (1543, 522) value = 470 (actual 470 - 0% error) 5% (actual 5%) key = (1633, 552) value = 496 (actual 496 - 0% error) 0% (actual 0%) DataSizeHistogram: 14% (actual 4%) key = (91, 38) value = 5939 (actual 1974 - 9% error) 4% (actual 4%) key = (166, 63) value = 7957 (actual 3992 - 9% error) 4% (actual 4%) key = (253, 92) value = 9854 (actual 5889 - 9% error) 4% (actual 4%) key = (325, 116) value = 11833 (actual 7868 - 9% error) 4% (actual 4%) key = (394, 139) value = 13875 (actual 9910 - 9% error) 4% (actual 4%) key = (481, 168) value = 15903 (actual 11938 - 9% error) 4% (actual 8%) key = (631, 218) value = 17650 (actual 15674 - 4% error) 4% (actual 4%) key = (709, 244) value = 19685 (actual 17709 - 4% error) 4% (actual 4%) key = (766, 263) value = 21640 (actual 19664 - 4% error) 4% (actual 4%) key = (853, 292) value = 23649 (actual 21673 - 4% error) 4% (actual 4%) key = (934, 319) value = 25688 (actual 23712 - 4% error) 4% (actual 4%) key = (1006, 343) value = 27663 (actual 25687 - 4% error) 4% (actual 4%) key = (1087, 370) value = 29654 (actual 27678 - 4% error) 0% (actual 4%) key = (1156, 393) value = 29741 (actual 29741 - 0% error) 4% (actual 4%) key = (1246, 423) value = 31726 (actual 31726 - 0% error) 4% (actual 4%) key = (1324, 449) value = 33698 (actual 33698 - 0% error) 4% (actual 4%) key = (1396, 473) value = 35700 (actual 35700 - 0% error) 4% (actual 4%) key = (1471, 498) value = 37620 (actual 37620 - 0% error) 4% (actual 4%) key = (1543, 522) value = 39641 (actual 39641 - 0% error) 4% (actual 4%) key = (1633, 552) value = 41669 (actual 41669 - 0% error) 0% (actual 0%) 3 parts: [0:0:1:0:0:0:0] 167 rows, 1 pages, 0 levels: () () () () () [0:0:2:0:0:0:0] 166 rows, 1 pages, 0 levels: () () () () () [0:0:3:0:0:0:0] 167 rows, 1 pages, 0 levels: () () () () () Checking BTree: Touched 0% bytes, 0 pages RowCountHistogram: 33% (actual 33%) key = (553, 192) value = 167 (actual 166 - 0% error) 33% (actual 33%) key = (1087, 370) value = 333 (actual 332 - 0% error) 33% (actual 33%) DataSizeHistogram: 32% (actual 32%) key = (553, 192) value = 13565 (actual 13565 - 0% error) 33% (actual 33%) key = (1087, 370) value = 27505 (actual 27505 - 0% error) 33% (actual 33%) Checking Flat: Touched 100% bytes, 3 pages RowCountHistogram: 33% (actual 33%) key = (556, 193) value = 167 (actual 167 - 0% error) 33% (actual 33%) key = (1090, 371) value = 333 (actual 333 - 0% error) 33% (actual 33%) DataSizeHistogram: 32% (actual 32%) key = (556, 193) value = 13565 (actual 13565 - 0% error) 33% (actual 33%) key = (1090, 371) value = 27505 (actual 27505 - 0% error) 33% (actual 33%) Checking Mixed: Touched 0% bytes, 0 pages RowCountHistogram: 100% (actual 100%) DataSizeHistogram: 100% (actual 100%) Got : 24000 2106439 49449 38 44 Expected: 24000 2106439 49449 38 44 { [2455, 2599), [2798, 3624), [4540, 4713), [5654, 7161), [8509, 8794), [8936, 9973), [11888, 14280), [14337, 14882), [15507, 16365), [17368, 19451), [19536, 20135), [20790, 21503), [21589, 23243) } Got : 12816 1121048 49449 20 23 Expected: 12816 1121048 49449 20 23 Got : 24000 3547100 81694 64 44 Expected: 24000 3547100 81694 64 44 { [1012, 1475), [1682, 1985), [2727, 3553), [3599, 3992), [5397, 7244), [9181, 9807), [9993, 10178), [12209, 14029), [15089, 15342), [16198, 16984), [17238, 18436), [21087, 21876), [23701, 23794) } Got : 9582 1425198 81694 26 17 Expected: 9582 1425198 81694 26 17 Got : 24000 2460139 23760 42 41 Expected: 24000 2460139 23760 42 41 { [1296, 2520), [3888, 4320), [5040, 6840), [6912, 7272), [10872, 11160), [11520, 12096), [12096, 13824), [15192, 15624), [17064, 17856), [18216, 19296), [19800, 20160), [20736, 21096), [21096, 22104) } Got : 10440 1060798 23760 18 18 Expected: 10440 1060798 23760 18 18 Got : 24000 4054050 46562 68 43 Expected: 24000 4054050 46562 68 43 { [460, 1518), [2300, 2484), [2760, 4002), [4600, 5842), [6302, 9752), [11178, 12328), [14582, 14858), [16790, 18032), [18216, 18446), [18722, 19504), [19504, 19964), [20378, 20470), [21344, 23506) } Got : 13570 2277890 46562 38 24 Expected: 13570 2277890 46562 38 24 Got : 24000 2106459 49449 38 44 Expected: 24000 2106459 49449 38 44 Got : 24000 2460219 23555 41 41 Expected: 24000 2460219 23555 41 41 Got : 24000 4054270 46543 66 43 Expected: 24000 4054270 46543 66 43 Got : 24000 2106479 49555 38 44 Expected: 24000 2106479 49555 38 44 Got : 24000 2460259 23628 41 41 Expected: 24000 2460259 23628 41 41 Got : 24000 4054290 46640 65 43 Expected: 24000 4054290 46640 65 43 Got : 24000 2106439 66674 3 4 Expected: 24000 2106439 66674 3 4 { [2455, 2599), [2798, 3624), [4540, 4713), [5654, 7161), [8509, 8794), [8936, 9973), [11888, 14280), [14337, 14882), [15507, 16365), [17368, 19451), [19536, 20135), [20790, 21503), [21589, 23243) } Got : 12816 1121048 66674 2 2 Expected: 12816 1121048 66674 2 2 Got : 24000 2460139 33541 4 4 Expected: 24000 2460139 33541 4 4 { [1296, 2520), [3888, 4320), [5040, 6840), [6912, 7272), [10872, 11160), [11520, 12096), [12096, 13824), [15192, 15624), [17064, 17856), [18216, 19296), [19800, 20160), [20736, 21096), [21096, 22104) } Got : 10440 1060798 33541 1 1 Expected: 10440 1060798 33541 1 1 Got : 24000 4054050 64742 7 4 Expected: 24000 4054050 64742 7 4 { [460, 1518), [2300, 2484), [2760, 4002), [4600, 5842), [6302, 9752), [11178, 12328), [14582, 14858), [16790, 18032), [18216, 18446), [18722, 19504), [19504, 19964), [20378, 20470), [21344, 23506) } Got : 13570 2234982 64742 4 2 Expected: 13570 2234982 64742 4 2 >> TSchemeShardTest::FindSubDomainPathIdActor [GOOD] >> TSchemeShardTest::FindSubDomainPathIdActorAsync |79.1%| [TS] {asan, default-linux-x86_64, release} ydb/core/wrappers/ut/unittest |79.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/query_stats/ut/unittest |79.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/query_stats/ut/unittest |79.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/query_stats/ut/unittest >> CommitOffset::Commit_WithSession_ParentNotFinished_OtherSession_ParentCommittedToEnd [GOOD] >> CommitOffset::Commit_WithSession_ToPastParentPartition |79.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/query_stats/ut/unittest >> TPartBtreeIndexIteration::NoNodes_History [GOOD] >> TPartBtreeIndexIteration::OneNode ------- [TS] {asan, default-linux-x86_64, release} ydb/core/wrappers/ut/unittest >> TS3WrapperTests::HeadUnknownObject [GOOD] Test command err: 2025-06-25T14:33:33.643836Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:75: Request: uuid# 4A55FECD-0394-411B-ABF4-4C8DDA7CF0B6, request# HeadObject { Bucket: TEST Key: key } REQUEST: HEAD /TEST/key HTTP/1.1 HEADERS: Host: localhost:9715 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: FA8F1F01-BB83-44BC-83D7-9A51FC408FC9 amz-sdk-request: attempt=1 content-type: application/xml user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-api-version: 2006-03-01 2025-06-25T14:33:33.661807Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:63: Response: uuid# 4A55FECD-0394-411B-ABF4-4C8DDA7CF0B6, response# No response body. >> TestKinesisHttpProxy::TestWrongRequest [GOOD] >> YdbIndexTable::OnlineBuild >> TPartBtreeIndexIteration::OneNode [GOOD] >> TPartBtreeIndexIteration::OneNode_Groups >> TestKinesisHttpProxy::ListShardsToken [GOOD] >> TSchemeShardTest::FindSubDomainPathIdActorAsync [GOOD] >> TChargeBTreeIndex::OneNode_Groups_History [GOOD] >> TChargeBTreeIndex::FewNodes >> TopicAutoscaling::ReadFromTimestamp_AutoscaleAwareSDK [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpLimits::CancelAfterRoTxWithFollowerStreamLookup [GOOD] Test command err: Trying to start YDB, gRPC: 61953, MsgBus: 17954 2025-06-25T14:29:37.229572Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519894076270900785:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:29:37.229606Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001491/r3tmp/tmpQtwUIH/pdisk_1.dat 2025-06-25T14:29:37.939312Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:29:37.948633Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519894076270900765:2080] 1750861777211426 != 1750861777211429 2025-06-25T14:29:37.965725Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:29:37.965826Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:29:37.968931Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 61953, node 1 2025-06-25T14:29:38.255819Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:29:38.255843Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:29:38.255849Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:29:38.255977Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:29:38.260586Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:17954 TClient is connected to server localhost:17954 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:29:39.048378Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:29:39.065816Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:29:39.080358Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:39.372866Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:39.598519Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:39.676356Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:29:41.542396Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894093450771591:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:41.542496Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:41.858526Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:41.895961Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:41.938922Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:41.974303Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:42.001103Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:42.030802Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:42.100955Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:29:42.153217Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894097745739547:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:42.153286Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:42.153423Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894097745739552:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:42.156485Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:29:42.194048Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519894097745739554:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:29:42.229601Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519894076270900785:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:29:42.229663Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:29:42.272083Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519894097745739605:3419] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:29:43.137713Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/ ... MGJjMGUtYzVkZDczNTk=, ActorId: [4:7519894611930882649:2479], ActorState: ExecuteState, TraceId: 01jykr2bb26xc398qsbkz85hcb, Create QueryResponse for error on request, msg: 2025-06-25T14:32:31.018223Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=OTk4ZjBlYWMtNDZjYzM1N2YtMThmMGJjMGUtYzVkZDczNTk=, ActorId: [4:7519894611930882649:2479], ActorState: ExecuteState, TraceId: 01jykr2bqccq9fwx5bz3axc3wa, Create QueryResponse for error on request, msg: 2025-06-25T14:32:32.303911Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=OTk4ZjBlYWMtNDZjYzM1N2YtMThmMGJjMGUtYzVkZDczNTk=, ActorId: [4:7519894611930882649:2479], ActorState: ExecuteState, TraceId: 01jykr2czf2rmnp7zgd5r2ezgj, Create QueryResponse for error on request, msg: 2025-06-25T14:32:35.028701Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=OTk4ZjBlYWMtNDZjYzM1N2YtMThmMGJjMGUtYzVkZDczNTk=, ActorId: [4:7519894611930882649:2479], ActorState: ExecuteState, TraceId: 01jykr2fmb1ha7rw08wknahzjw, Create QueryResponse for error on request, msg: 2025-06-25T14:32:35.597587Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=OTk4ZjBlYWMtNDZjYzM1N2YtMThmMGJjMGUtYzVkZDczNTk=, ActorId: [4:7519894611930882649:2479], ActorState: ExecuteState, TraceId: 01jykr2g5s55w9shq717wvt0t5, Create QueryResponse for error on request, msg: 2025-06-25T14:32:35.956264Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=OTk4ZjBlYWMtNDZjYzM1N2YtMThmMGJjMGUtYzVkZDczNTk=, ActorId: [4:7519894611930882649:2479], ActorState: ExecuteState, TraceId: 01jykr2gh33vqr6t0rkem484rw, Create QueryResponse for error on request, msg: 2025-06-25T14:32:37.228611Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=OTk4ZjBlYWMtNDZjYzM1N2YtMThmMGJjMGUtYzVkZDczNTk=, ActorId: [4:7519894611930882649:2479], ActorState: ExecuteState, TraceId: 01jykr2hrpcc0709qy9h6ned5h, Create QueryResponse for error on request, msg: 2025-06-25T14:32:37.696353Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=OTk4ZjBlYWMtNDZjYzM1N2YtMThmMGJjMGUtYzVkZDczNTk=, ActorId: [4:7519894611930882649:2479], ActorState: ExecuteState, TraceId: 01jykr2j78epctet2t88vg9g7b, Create QueryResponse for error on request, msg: 2025-06-25T14:32:40.476094Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=OTk4ZjBlYWMtNDZjYzM1N2YtMThmMGJjMGUtYzVkZDczNTk=, ActorId: [4:7519894611930882649:2479], ActorState: ExecuteState, TraceId: 01jykr2mxv71yqd85qex061zsv, Create QueryResponse for error on request, msg: 2025-06-25T14:32:40.893816Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=OTk4ZjBlYWMtNDZjYzM1N2YtMThmMGJjMGUtYzVkZDczNTk=, ActorId: [4:7519894611930882649:2479], ActorState: ExecuteState, TraceId: 01jykr2natd70exdgmgfn9yt9a, Create QueryResponse for error on request, msg: 2025-06-25T14:32:41.396784Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=OTk4ZjBlYWMtNDZjYzM1N2YtMThmMGJjMGUtYzVkZDczNTk=, ActorId: [4:7519894611930882649:2479], ActorState: ExecuteState, TraceId: 01jykr2nte33b8tvjg404rhqay, Create QueryResponse for error on request, msg: 2025-06-25T14:32:41.905076Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=OTk4ZjBlYWMtNDZjYzM1N2YtMThmMGJjMGUtYzVkZDczNTk=, ActorId: [4:7519894611930882649:2479], ActorState: ExecuteState, TraceId: 01jykr2pac9djfszvw0ghy42e6, Create QueryResponse for error on request, msg: 2025-06-25T14:32:42.355716Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=OTk4ZjBlYWMtNDZjYzM1N2YtMThmMGJjMGUtYzVkZDczNTk=, ActorId: [4:7519894611930882649:2479], ActorState: ExecuteState, TraceId: 01jykr2pqx2w9rqts7tx4v6nb7, Create QueryResponse for error on request, msg: 2025-06-25T14:32:42.884811Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=OTk4ZjBlYWMtNDZjYzM1N2YtMThmMGJjMGUtYzVkZDczNTk=, ActorId: [4:7519894611930882649:2479], ActorState: ExecuteState, TraceId: 01jykr2q820wa4c9rkwfje1vm5, Create QueryResponse for error on request, msg: 2025-06-25T14:32:43.442107Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=OTk4ZjBlYWMtNDZjYzM1N2YtMThmMGJjMGUtYzVkZDczNTk=, ActorId: [4:7519894611930882649:2479], ActorState: ExecuteState, TraceId: 01jykr2qt73j44yayfzj5zkc2y, Create QueryResponse for error on request, msg: 2025-06-25T14:32:45.324816Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=OTk4ZjBlYWMtNDZjYzM1N2YtMThmMGJjMGUtYzVkZDczNTk=, ActorId: [4:7519894611930882649:2479], ActorState: ExecuteState, TraceId: 01jykr2smr08ne7jat44yhawxx, Create QueryResponse for error on request, msg: 2025-06-25T14:32:45.989076Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=OTk4ZjBlYWMtNDZjYzM1N2YtMThmMGJjMGUtYzVkZDczNTk=, ActorId: [4:7519894611930882649:2479], ActorState: ExecuteState, TraceId: 01jykr2t9e9pgd2q4dcr13zfzw, Create QueryResponse for error on request, msg: 2025-06-25T14:32:47.500741Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=OTk4ZjBlYWMtNDZjYzM1N2YtMThmMGJjMGUtYzVkZDczNTk=, ActorId: [4:7519894611930882649:2479], ActorState: ExecuteState, TraceId: 01jykr2vrkaspfw0gdrwepr947, Create QueryResponse for error on request, msg: 2025-06-25T14:32:47.896623Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=OTk4ZjBlYWMtNDZjYzM1N2YtMThmMGJjMGUtYzVkZDczNTk=, ActorId: [4:7519894611930882649:2479], ActorState: ExecuteState, TraceId: 01jykr2w4vfkb1e3sv9gew5d7v, Create QueryResponse for error on request, msg: 2025-06-25T14:32:49.487210Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=OTk4ZjBlYWMtNDZjYzM1N2YtMThmMGJjMGUtYzVkZDczNTk=, ActorId: [4:7519894611930882649:2479], ActorState: ExecuteState, TraceId: 01jykr2xp84yfjdsstve67mh4y, Create QueryResponse for error on request, msg: 2025-06-25T14:32:50.036569Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=OTk4ZjBlYWMtNDZjYzM1N2YtMThmMGJjMGUtYzVkZDczNTk=, ActorId: [4:7519894611930882649:2479], ActorState: ExecuteState, TraceId: 01jykr2y7gampqcdnjcjvkvpht, Create QueryResponse for error on request, msg: 2025-06-25T14:32:50.473218Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=OTk4ZjBlYWMtNDZjYzM1N2YtMThmMGJjMGUtYzVkZDczNTk=, ActorId: [4:7519894611930882649:2479], ActorState: ExecuteState, TraceId: 01jykr2yn73v15f0ex942cddxy, Create QueryResponse for error on request, msg: 2025-06-25T14:32:51.605490Z node 4 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [4:7519894908283630102:2479] TxId: 281474976715852. Ctx: { TraceId: 01jykr2zrbcb908kc4x445a81q, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=OTk4ZjBlYWMtNDZjYzM1N2YtMThmMGJjMGUtYzVkZDczNTk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. CANCELLED: [ {
: Error: Request canceled after 390ms } {
: Error: Cancelling after 393ms during execution } ] 2025-06-25T14:32:51.605768Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=OTk4ZjBlYWMtNDZjYzM1N2YtMThmMGJjMGUtYzVkZDczNTk=, ActorId: [4:7519894611930882649:2479], ActorState: ExecuteState, TraceId: 01jykr2zrbcb908kc4x445a81q, Create QueryResponse for error on request, msg: 2025-06-25T14:32:52.506170Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=OTk4ZjBlYWMtNDZjYzM1N2YtMThmMGJjMGUtYzVkZDczNTk=, ActorId: [4:7519894611930882649:2479], ActorState: ExecuteState, TraceId: 01jykr30mp0p2wc2khpegsjq0n, Create QueryResponse for error on request, msg: 2025-06-25T14:32:53.307941Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=OTk4ZjBlYWMtNDZjYzM1N2YtMThmMGJjMGUtYzVkZDczNTk=, ActorId: [4:7519894611930882649:2479], ActorState: ExecuteState, TraceId: 01jykr31dj96a4g09be5kzh39n, Create QueryResponse for error on request, msg: 2025-06-25T14:32:56.911041Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=OTk4ZjBlYWMtNDZjYzM1N2YtMThmMGJjMGUtYzVkZDczNTk=, ActorId: [4:7519894611930882649:2479], ActorState: ExecuteState, TraceId: 01jykr34xs3rartzk25pxtab4c, Create QueryResponse for error on request, msg: 2025-06-25T14:32:57.371617Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=OTk4ZjBlYWMtNDZjYzM1N2YtMThmMGJjMGUtYzVkZDczNTk=, ActorId: [4:7519894611930882649:2479], ActorState: ExecuteState, TraceId: 01jykr35bz9efya312f1cxrgn5, Create QueryResponse for error on request, msg: 2025-06-25T14:32:59.203902Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=OTk4ZjBlYWMtNDZjYzM1N2YtMThmMGJjMGUtYzVkZDczNTk=, ActorId: [4:7519894611930882649:2479], ActorState: ExecuteState, TraceId: 01jykr37510fpxb89e6d75fhg1, Create QueryResponse for error on request, msg: 2025-06-25T14:33:00.028684Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=OTk4ZjBlYWMtNDZjYzM1N2YtMThmMGJjMGUtYzVkZDczNTk=, ActorId: [4:7519894611930882649:2479], ActorState: ExecuteState, TraceId: 01jykr37yt2mteq685p9ayqqat, Create QueryResponse for error on request, msg: 2025-06-25T14:33:00.665570Z node 4 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [4:7519894946938336284:2479] TxId: 281474976715875. Ctx: { TraceId: 01jykr38jn7cs1dr8mjfmmpaqw, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=OTk4ZjBlYWMtNDZjYzM1N2YtMThmMGJjMGUtYzVkZDczNTk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. CANCELLED: [ {
: Error: Request canceled after 418ms } {
: Error: Cancelling after 419ms during execution } ] 2025-06-25T14:33:00.665778Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=OTk4ZjBlYWMtNDZjYzM1N2YtMThmMGJjMGUtYzVkZDczNTk=, ActorId: [4:7519894611930882649:2479], ActorState: ExecuteState, TraceId: 01jykr38jn7cs1dr8mjfmmpaqw, Create QueryResponse for error on request, msg: 2025-06-25T14:33:01.433272Z node 4 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [4:7519894951233303615:2479] TxId: 281474976715878. Ctx: { TraceId: 01jykr39akdbq7sthxksq00874, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=OTk4ZjBlYWMtNDZjYzM1N2YtMThmMGJjMGUtYzVkZDczNTk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. CANCELLED: [ {
: Error: Request canceled after 420ms } {
: Error: Cancelling after 420ms during execution } ] 2025-06-25T14:33:01.433486Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=OTk4ZjBlYWMtNDZjYzM1N2YtMThmMGJjMGUtYzVkZDczNTk=, ActorId: [4:7519894611930882649:2479], ActorState: ExecuteState, TraceId: 01jykr39akdbq7sthxksq00874, Create QueryResponse for error on request, msg: 2025-06-25T14:33:05.668155Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=OTk4ZjBlYWMtNDZjYzM1N2YtMThmMGJjMGUtYzVkZDczNTk=, ActorId: [4:7519894611930882649:2479], ActorState: ExecuteState, TraceId: 01jykr3decbekc9yxrfww8zfpe, Create QueryResponse for error on request, msg: 2025-06-25T14:33:12.152766Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=OTk4ZjBlYWMtNDZjYzM1N2YtMThmMGJjMGUtYzVkZDczNTk=, ActorId: [4:7519894611930882649:2479], ActorState: ExecuteState, TraceId: 01jykr3krf51zrcj9aq09cf2nq, Create QueryResponse for error on request, msg: >> TestYmqHttpProxy::TestTagQueueMultipleQueriesInflight [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/ut_with_sdk/unittest >> TopicAutoscaling::ReadFromTimestamp_PQv1 [GOOD] Test command err: 2025-06-25T14:30:45.994958Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519894367351466178:2150];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:30:45.995973Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0018f5/r3tmp/tmpuA3jYT/pdisk_1.dat 2025-06-25T14:30:46.293699Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-25T14:30:46.394947Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519894367351466067:2080] 1750861845965825 != 1750861845965828 2025-06-25T14:30:46.430298Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:30:46.434125Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:30:46.434205Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:30:46.437384Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 30096, node 1 2025-06-25T14:30:46.516949Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/yft8/0018f5/r3tmp/yandexh4G6SG.tmp 2025-06-25T14:30:46.516971Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/yft8/0018f5/r3tmp/yandexh4G6SG.tmp 2025-06-25T14:30:46.517126Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/yft8/0018f5/r3tmp/yandexh4G6SG.tmp 2025-06-25T14:30:46.517222Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:30:46.566300Z INFO: TTestServer started on Port 18211 GrpcPort 30096 TClient is connected to server localhost:18211 PQClient connected to localhost:30096 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:30:46.956173Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:30:46.973949Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:30:46.988576Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-25T14:30:46.991798Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:30:46.994497Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:30:47.129248Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715660, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:30:47.145238Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715661, at schemeshard: 72057594046644480 2025-06-25T14:30:49.866679Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894384531336044:2299], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:49.866769Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894384531336057:2303], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:49.866822Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:49.872530Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:30:49.908592Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519894384531336059:2304], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715662 completed, doublechecking } 2025-06-25T14:30:50.008054Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519894384531336123:2444] txid# 281474976715663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:30:50.522878Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:50.546233Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519894388826303429:2311], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:30:50.548154Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=NTE0ZDZmMWMtYzc3MDA1NDctM2U0MGQ3Y2QtMTgyMTBiNTY=, ActorId: [1:7519894384531336042:2298], ActorState: ExecuteState, TraceId: 01jykqz976fhxc00sxc4sj8qr7, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:30:50.552597Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-25T14:30:50.566200Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:50.652272Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); === CheckClustersList. Subcribe to ClusterTracker from [1:7519894388826303720:2621] 2025-06-25T14:30:50.975968Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519894367351466178:2150];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:30:50.976059Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; === CheckClustersList. Ok 2025-06-25T14:30:57.081416Z :TopicSplitMerge INFO: TTopicSdkTestSetup started 2025-06-25T14:30:57.110221Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:132: new create topic request 2025-06-25T14:30:57.111408Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877761, Sender [1:7519894418891074997:2704], Recipient [1:7519894371646433750:2187]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:30:57.111433Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5052: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T14:30:57.111450Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5837: Pipe server connected, at tablet: 72057594046644480 2025-06-25T14:30:57.111493Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271122432, Sender [1:7519894418891074993:2701], Recipient [1:7519894371646433750:2187]: {TEvModifySchemeTransaction txid# 281474976715673 TabletId# 72057594046644480} 2025-06-25T14:30:57.111508Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4966: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-06-25T14: ... node 7 :PERSQUEUE TRACE: pq_impl.cpp:5307: HandleHook, received event# 271188503, Sender [7:7519895013876281007:2419], Recipient [7:7519895013876280959:2416]: NKikimr::TEvPQ::TEvPartitionLabeledCounters 2025-06-25T14:33:31.643109Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5321: HandleHook, processing event TEvPQ::TEvPartitionLabeledCounters 2025-06-25T14:33:31.643256Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5307: HandleHook, received event# 271188503, Sender [7:7519895069710857262:2806], Recipient [7:7519895069710857184:2798]: NKikimr::TEvPQ::TEvPartitionLabeledCounters 2025-06-25T14:33:31.643271Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5321: HandleHook, processing event TEvPQ::TEvPartitionLabeledCounters 2025-06-25T14:33:31.643318Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5307: HandleHook, received event# 271188503, Sender [7:7519895069710857258:2804], Recipient [7:7519895069710857182:2797]: NKikimr::TEvPQ::TEvPartitionLabeledCounters 2025-06-25T14:33:31.643335Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5321: HandleHook, processing event TEvPQ::TEvPartitionLabeledCounters 2025-06-25T14:33:31.643862Z node 7 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:688: [72075186224037893][test-topic] Send TEvPeriodicTopicStats PathId: 13 Generation: 1 StatsReportRound: 14 DataSize: 0 UsedReserveSize: 0 2025-06-25T14:33:31.644025Z node 7 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1823: [72075186224037893][test-topic] ProcessPendingStats. PendingUpdates size 3 2025-06-25T14:33:31.644379Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271188001, Sender [7:7519895013876280949:2415], Recipient [7:7519894966631639672:2157]: NKikimrPQ.TEvPeriodicTopicStats PathId: 13 Generation: 1 Round: 14 DataSize: 0 UsedReserveSize: 0 SubDomainOutOfSpace: false 2025-06-25T14:33:31.644408Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4993: StateWork, processing event TEvPersQueue::TEvPeriodicTopicStats 2025-06-25T14:33:31.644427Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__pq_stats.cpp:100: Got periodic topic stats at partition [OwnerId: 72057594046644480, LocalPathId: 13] DataSize 0 UsedReserveSize 0 2025-06-25T14:33:31.644451Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__pq_stats.cpp:141: Will delay TTxStoreTopicStats on# 0.099995s, queue# 1 2025-06-25T14:33:31.652492Z node 7 :PERSQUEUE TRACE: partition.h:582: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7519895013876280959:2416], Partition 0, Sender [0:0:0], Recipient [7:7519895013876281007:2419], Cookie: 0 2025-06-25T14:33:31.652566Z node 7 :PERSQUEUE TRACE: partition.h:584: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7519895013876281007:2419]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-25T14:33:31.652590Z node 7 :PERSQUEUE TRACE: partition.h:610: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-25T14:33:31.652635Z node 7 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete old stuff 2025-06-25T14:33:31.652700Z node 7 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-25T14:33:31.652723Z node 7 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-25T14:33:31.652752Z node 7 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-25T14:33:31.652809Z node 7 :PERSQUEUE TRACE: partition.h:582: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7519895069710857182:2797], Partition 2, Sender [0:0:0], Recipient [7:7519895069710857258:2804], Cookie: 0 2025-06-25T14:33:31.652839Z node 7 :PERSQUEUE TRACE: partition.h:584: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7519895069710857258:2804]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-25T14:33:31.652852Z node 7 :PERSQUEUE TRACE: partition.h:610: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-25T14:33:31.652877Z node 7 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037896, Partition: 2, State: StateIdle] Have 0 items to delete old stuff 2025-06-25T14:33:31.652910Z node 7 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037896, Partition: 2, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-25T14:33:31.652925Z node 7 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037896, Partition: 2, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-25T14:33:31.652943Z node 7 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037896, Partition: 2, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-25T14:33:31.652983Z node 7 :PERSQUEUE TRACE: partition.h:582: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7519895069710857184:2798], Partition 1, Sender [0:0:0], Recipient [7:7519895069710857262:2806], Cookie: 0 2025-06-25T14:33:31.653014Z node 7 :PERSQUEUE TRACE: partition.h:584: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7519895069710857262:2806]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-25T14:33:31.653027Z node 7 :PERSQUEUE TRACE: partition.h:610: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-25T14:33:31.653049Z node 7 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037897, Partition: 1, State: StateIdle] Have 0 items to delete old stuff 2025-06-25T14:33:31.653080Z node 7 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037897, Partition: 1, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-25T14:33:31.653095Z node 7 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037897, Partition: 1, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-25T14:33:31.653112Z node 7 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037897, Partition: 1, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-25T14:33:31.662407Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271122945, Sender [7:7519895013876280949:2415], Recipient [7:7519894966631639672:2157]: NKikimrSchemeOp.TDescribePath PathId: 13 SchemeshardId: 72057594046644480 2025-06-25T14:33:31.662459Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4967: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2025-06-25T14:33:31.746061Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 2146435095, Sender [0:0:0], Recipient [7:7519894966631639672:2157]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTopicStats 2025-06-25T14:33:31.746123Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5132: StateWork, processing event TEvPrivate::TEvPersistTopicStats 2025-06-25T14:33:31.746142Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__pq_stats.cpp:119: Started TEvPersistStats at tablet 72057594046644480, queue size# 1 2025-06-25T14:33:31.746162Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__pq_stats.cpp:128: Will execute TTxStoreStats, queue# 1 2025-06-25T14:33:31.746232Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__pq_stats.cpp:141: Will delay TTxStoreTopicStats on# 0.000000s, queue# 1 2025-06-25T14:33:31.748162Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 2146435095, Sender [0:0:0], Recipient [7:7519894966631639672:2157]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTopicStats 2025-06-25T14:33:31.748210Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5132: StateWork, processing event TEvPrivate::TEvPersistTopicStats 2025-06-25T14:33:31.748228Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__pq_stats.cpp:119: Started TEvPersistStats at tablet 72057594046644480, queue size# 0 2025-06-25T14:33:31.756611Z node 7 :PERSQUEUE TRACE: partition.h:582: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7519895069710857182:2797], Partition 2, Sender [0:0:0], Recipient [7:7519895069710857258:2804], Cookie: 0 2025-06-25T14:33:31.756615Z node 7 :PERSQUEUE TRACE: partition.h:582: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7519895013876280959:2416], Partition 0, Sender [0:0:0], Recipient [7:7519895013876281007:2419], Cookie: 0 2025-06-25T14:33:31.756700Z node 7 :PERSQUEUE TRACE: partition.h:584: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7519895069710857258:2804]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-25T14:33:31.756702Z node 7 :PERSQUEUE TRACE: partition.h:584: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7519895013876281007:2419]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-25T14:33:31.756733Z node 7 :PERSQUEUE TRACE: partition.h:610: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-25T14:33:31.756734Z node 7 :PERSQUEUE TRACE: partition.h:610: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-25T14:33:31.756784Z node 7 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete old stuff 2025-06-25T14:33:31.756785Z node 7 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037896, Partition: 2, State: StateIdle] Have 0 items to delete old stuff 2025-06-25T14:33:31.756865Z node 7 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-25T14:33:31.756876Z node 7 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037896, Partition: 2, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-25T14:33:31.756890Z node 7 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-25T14:33:31.756912Z node 7 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037896, Partition: 2, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-25T14:33:31.756923Z node 7 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-25T14:33:31.756945Z node 7 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037896, Partition: 2, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-25T14:33:31.756984Z node 7 :PERSQUEUE TRACE: partition.h:582: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7519895069710857184:2798], Partition 1, Sender [0:0:0], Recipient [7:7519895069710857262:2806], Cookie: 0 2025-06-25T14:33:31.757019Z node 7 :PERSQUEUE TRACE: partition.h:584: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7519895069710857262:2806]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-25T14:33:31.757034Z node 7 :PERSQUEUE TRACE: partition.h:610: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-25T14:33:31.757061Z node 7 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037897, Partition: 1, State: StateIdle] Have 0 items to delete old stuff 2025-06-25T14:33:31.757095Z node 7 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037897, Partition: 1, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-25T14:33:31.757114Z node 7 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037897, Partition: 1, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-25T14:33:31.757136Z node 7 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037897, Partition: 1, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 >> TChargeBTreeIndex::FewNodes [GOOD] >> TChargeBTreeIndex::FewNodes_Groups >> YdbIndexTable::MultiShardTableOneUniqIndex >> BuildStatsHistogram::Single [GOOD] |79.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/query_stats/ut/unittest >> BuildStatsHistogram::Single_Slices >> YdbIndexTable::MultiShardTableOneIndexIndexOverlapDataColumn >> YdbIndexTable::MultiShardTableOneIndex |79.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/query_stats/ut/unittest >> TFlatTableExecutor_IndexLoading::PrechargeAndSeek_FlatIndex [GOOD] >> TFlatTableExecutor_IndexLoading::PrechargeAndSeek_BTreeIndex >> THealthCheckTest::ShardsLimit800 [GOOD] >> THealthCheckTest::ShardsNoLimit >> QueryStats::Ranges [GOOD] |79.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_upload_rows/ydb-core-tx-datashard-ut_upload_rows |79.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_upload_rows/ydb-core-tx-datashard-ut_upload_rows |79.2%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_upload_rows/ydb-core-tx-datashard-ut_upload_rows >> YdbIndexTable::MultiShardTableUniqAndNonUniqIndex |79.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/query_stats/ut/unittest >> QueryStats::Ranges [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/http_proxy/ut/inside_ydb_ut/unittest >> TestKinesisHttpProxy::TestWrongRequest [GOOD] Test command err: 2025-06-25T14:32:24.921668Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519894792044700973:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:32:24.921709Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000c9a/r3tmp/tmptKnIHP/pdisk_1.dat 2025-06-25T14:32:25.837191Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:32:25.837276Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:32:25.847667Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:32:25.893319Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:32:25.900654Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519894792044700952:2080] 1750861944895004 != 1750861944895007 2025-06-25T14:32:25.980635Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TServer::EnableGrpc on GrpcPort 17558, node 1 2025-06-25T14:32:26.184994Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:32:26.185022Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:32:26.185030Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:32:26.185128Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10471 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:32:26.724169Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:32:26.746123Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 TClient is connected to server localhost:10471 2025-06-25T14:32:26.982726Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... 2025-06-25T14:32:26.996870Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-25T14:32:27.001788Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) waiting... 2025-06-25T14:32:27.018895Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710660, at schemeshard: 72057594046644480 2025-06-25T14:32:27.025723Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:32:27.171979Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:32:27.225593Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:32:27.276272Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:32:27.333875Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:32:27.414893Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:32:27.472678Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:32:27.543706Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:32:27.610555Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:32:27.707139Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:32:29.378907Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894813519538818:2337], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:29.379046Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:29.389373Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894813519538830:2340], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:29.393715Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710673:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:32:29.403868Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519894813519538832:2341], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710673 completed, doublechecking } 2025-06-25T14:32:29.507051Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519894813519538883:2870] txid# 281474976710674, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 18], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:32:29.924578Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519894792044700973:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:32:29.924648Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:32:30.134127Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 28147 ... l { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "Name" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "Value" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } } } } } } } } Member { Name: "truncated" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } } } Value { Struct { Optional { } } Struct { Optional { Bool: false } } } } } 2025-06-25T14:33:34.108900Z node 8 :SQS TRACE: executor.cpp:327: Request [] Query(idx=GET_USER_SETTINGS_ID) Queue [] Minikql data response: {"settings": [], "truncated": false} 2025-06-25T14:33:34.108986Z node 8 :SQS DEBUG: executor.cpp:401: Request [] Query(idx=GET_USER_SETTINGS_ID) Queue [] execution duration: 53ms 2025-06-25T14:33:34.109818Z node 8 :SQS TRACE: user_settings_reader.cpp:89: Handle user settings: { Status: 48 TxId: 281474976715685 StatusCode: SUCCESS ExecutionEngineStatus: 1 ExecutionEngineResponseStatus: 2 ExecutionEngineEvaluatedResponse { Type { Kind: Struct Struct { Member { Name: "settings" Type { Kind: Optional Optional { Item { Kind: List List { Item { Kind: Struct Struct { Member { Name: "Account" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "Name" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "Value" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } } } } } } } } Member { Name: "truncated" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } } } Value { Struct { Optional { } } Struct { Optional { Bool: false } } } } } 2025-06-25T14:33:34.112253Z node 8 :SQS TRACE: executor.cpp:286: Request [] Query(idx=GET_QUEUES_LIST_ID) Queue [] HandleResponse { Status: 48 TxId: 281474976715686 StatusCode: SUCCESS ExecutionEngineStatus: 1 ExecutionEngineResponseStatus: 2 ExecutionEngineEvaluatedResponse { Type { Kind: Struct Struct { Member { Name: "queues" Type { Kind: Optional Optional { Item { Kind: List List { Item { Kind: Struct Struct { Member { Name: "Account" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "CreatedTimestamp" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "CustomQueueName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "DlqName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "FifoQueue" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } Member { Name: "FolderId" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "MasterTabletId" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "QueueName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "QueueState" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "Shards" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "TablesFormat" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 2 } } } } } Member { Name: "Version" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } } } } } } } } Member { Name: "truncated" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } } } Value { Struct { Optional { } } Struct { Optional { Bool: false } } } } } 2025-06-25T14:33:34.112286Z node 8 :SQS DEBUG: executor.cpp:287: Request [] Query(idx=GET_QUEUES_LIST_ID) Queue [] Attempt 1 execution duration: 50ms 2025-06-25T14:33:34.118260Z node 8 :SQS TRACE: executor.cpp:325: Request [] Query(idx=GET_QUEUES_LIST_ID) Queue [] Sending mkql execution result: { Status: 48 TxId: 281474976715686 StatusCode: SUCCESS ExecutionEngineStatus: 1 ExecutionEngineResponseStatus: 2 ExecutionEngineEvaluatedResponse { Type { Kind: Struct Struct { Member { Name: "queues" Type { Kind: Optional Optional { Item { Kind: List List { Item { Kind: Struct Struct { Member { Name: "Account" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "CreatedTimestamp" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "CustomQueueName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "DlqName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "FifoQueue" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } Member { Name: "FolderId" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "MasterTabletId" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "QueueName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "QueueState" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "Shards" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "TablesFormat" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 2 } } } } } Member { Name: "Version" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } } } } } } } } Member { Name: "truncated" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } } } Value { Struct { Optional { } } Struct { Optional { Bool: false } } } } } 2025-06-25T14:33:34.118318Z node 8 :SQS TRACE: executor.cpp:327: Request [] Query(idx=GET_QUEUES_LIST_ID) Queue [] Minikql data response: {"queues": [], "truncated": false} 2025-06-25T14:33:34.118464Z node 8 :SQS DEBUG: executor.cpp:401: Request [] Query(idx=GET_QUEUES_LIST_ID) Queue [] execution duration: 59ms 2025-06-25T14:33:34.119011Z node 8 :SQS TRACE: queues_list_reader.cpp:82: Handle queues list: { Status: 48 TxId: 281474976715686 StatusCode: SUCCESS ExecutionEngineStatus: 1 ExecutionEngineResponseStatus: 2 ExecutionEngineEvaluatedResponse { Type { Kind: Struct Struct { Member { Name: "queues" Type { Kind: Optional Optional { Item { Kind: List List { Item { Kind: Struct Struct { Member { Name: "Account" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "CreatedTimestamp" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "CustomQueueName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "DlqName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "FifoQueue" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } Member { Name: "FolderId" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "MasterTabletId" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "QueueName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "QueueState" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "Shards" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "TablesFormat" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 2 } } } } } Member { Name: "Version" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } } } } } } } } Member { Name: "truncated" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } } } Value { Struct { Optional { } } Struct { Optional { Bool: false } } } } } 2025-06-25T14:33:34.485219Z node 8 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:552: [WorkloadService] [Service] Reply cleanup error NOT_FOUND to [8:7519895091868595083:2406]: Pool not found 2025-06-25T14:33:34.486036Z node 8 :SQS DEBUG: monitoring.cpp:60: [monitoring] Report deletion queue data lag: 0.000000s, count: 0 2025-06-25T14:33:34.614839Z node 8 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:552: [WorkloadService] [Service] Reply cleanup error NOT_FOUND to [8:7519895091868595080:2405]: Pool not found 2025-06-25T14:33:34.615686Z node 8 :SQS DEBUG: cleanup_queue_data.cpp:100: [cleanup removed queues] getting queues... 2025-06-25T14:33:34.619376Z node 8 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [8:7519895091868595195:2422], DatabaseId: /Root/SQS, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:33:34.619481Z node 8 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:602: [WorkloadService] [TDatabaseFetcherActor] ActorId: [8:7519895091868595196:2423], Database: /Root/SQS, Failed to fetch database info, UNSUPPORTED, issues: {
: Error: Invalid database path /Root/SQS, please check the correctness of the path } 2025-06-25T14:33:34.619536Z node 8 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root/SQS, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:33:35.028663Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:83: (#37,[::1]:38368) incoming connection opened 2025-06-25T14:33:35.028747Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:156: (#37,[::1]:38368) -> (POST /, 87 bytes) 2025-06-25T14:33:35.028867Z node 8 :HTTP_PROXY INFO: http_service.cpp:102: proxy service: incoming request from [d8ec:8d00:6050:0:c0ec:8d00:6050:0] request [CreateStream] url [/] database [] requestId: a7262f75-785c4eb2-9585592d-4a9b5307 2025-06-25T14:33:35.029445Z node 8 :HTTP_PROXY WARN: http_req.cpp:948: http request [CreateStream] requestId [a7262f75-785c4eb2-9585592d-4a9b5307] got new request with incorrect json from [d8ec:8d00:6050:0:c0ec:8d00:6050:0] database '' 2025-06-25T14:33:35.029617Z node 8 :HTTP_PROXY INFO: http_req.cpp:1211: http request [CreateStream] requestId [a7262f75-785c4eb2-9585592d-4a9b5307] reply with status: BAD_REQUEST message: ydb/core/http_proxy/json_proto_conversion.h:400: Unexpected json key: WrongStreamName 2025-06-25T14:33:35.029907Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:280: (#37,[::1]:38368) <- (400 InvalidArgumentException, 135 bytes) 2025-06-25T14:33:35.029957Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:289: (#37,[::1]:38368) Request: POST / HTTP/1.1 Host: example.amazonaws.com X-Amz-Target: kinesisApi.CreateStream X-Amz-Date: 20150830T123600Z Authorization: Content-Type: application/json Connection: Close Transfer-Encoding: chunked { "ShardCount":5, "StreamName":"testtopic", "WrongStreamName":"WrongStreamName" } 2025-06-25T14:33:35.029990Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:296: (#37,[::1]:38368) Response: HTTP/1.1 400 InvalidArgumentException Connection: close x-amzn-requestid: a7262f75-785c4eb2-9585592d-4a9b5307 x-amz-crc32: 3053902336 Content-Type: application/x-amz-json-1.1 Content-Length: 135 2025-06-25T14:33:35.030097Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:335: (#37,[::1]:38368) connection closed Http output full {"__type":"InvalidArgumentException","message":"ydb/core/http_proxy/json_proto_conversion.h:400: Unexpected json key: WrongStreamName"} 400 {"__type":"InvalidArgumentException","message":"ydb/core/http_proxy/json_proto_conversion.h:400: Unexpected json key: WrongStreamName"} 2025-06-25T14:33:35.065855Z node 8 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:552: [WorkloadService] [Service] Reply cleanup error NOT_FOUND to [8:7519895091868595193:2421]: Pool not found 2025-06-25T14:33:35.068552Z node 8 :SQS DEBUG: cleanup_queue_data.cpp:138: [cleanup removed queues] there are no queues to delete >> TPartBtreeIndexIteration::OneNode_Groups [GOOD] >> TPartBtreeIndexIteration::OneNode_History >> DataShardVolatile::DistributedWriteLostPlanThenSplit [GOOD] >> DataShardVolatile::DistributedOutOfOrderFollowerConsistency >> DataShardVolatile::UpsertBrokenLockArbiter-UseSink [GOOD] >> DataShardVolatile::UpsertNoLocksArbiterRestart+UseSink >> THealthCheckTest::ProtobufBelowLimitFor10VdisksIssues [GOOD] >> THealthCheckTest::ProtobufUnderLimitFor100LargeVdisksIssues |79.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/query_stats/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/ut_with_sdk/unittest >> TopicAutoscaling::ReadFromTimestamp_AutoscaleAwareSDK [GOOD] Test command err: 2025-06-25T14:30:46.704918Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519894371305855992:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:30:46.736945Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:30:46.997111Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0018e7/r3tmp/tmpNLNMkf/pdisk_1.dat 2025-06-25T14:30:47.275262Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:30:47.276768Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519894371305855964:2080] 1750861846675671 != 1750861846675674 2025-06-25T14:30:47.282099Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:30:47.282198Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:30:47.285191Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 64137, node 1 2025-06-25T14:30:47.485026Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/yft8/0018e7/r3tmp/yandexNT9ST8.tmp 2025-06-25T14:30:47.485048Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/yft8/0018e7/r3tmp/yandexNT9ST8.tmp 2025-06-25T14:30:47.485216Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/yft8/0018e7/r3tmp/yandexNT9ST8.tmp 2025-06-25T14:30:47.485353Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:30:47.568027Z INFO: TTestServer started on Port 21137 GrpcPort 64137 2025-06-25T14:30:47.753823Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:21137 PQClient connected to localhost:64137 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:30:48.336954Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:30:48.360716Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:30:48.376979Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-25T14:30:48.387600Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:30:48.682002Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715660, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:30:48.712721Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715661, at schemeshard: 72057594046644480 2025-06-25T14:30:51.571696Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894392780693228:2301], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:51.571790Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:51.571851Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894392780693255:2304], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:51.575580Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:30:51.588359Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519894392780693257:2305], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715662 completed, doublechecking } 2025-06-25T14:30:51.669046Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519894392780693321:2449] txid# 281474976715663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:30:52.006660Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519894371305855992:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:30:52.006714Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:30:52.026189Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:52.027298Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519894392780693329:2311], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:30:52.045879Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=NGM4OWU1YjUtMTk2MWY3N2YtODA3MGI1MDMtNzE0ZjVhYWE=, ActorId: [1:7519894392780693225:2299], ActorState: ExecuteState, TraceId: 01jykqzaxf9gnzpwdp6ppjp9e6, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:30:52.055901Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-25T14:30:52.130706Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:52.264826Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); === CheckClustersList. Subcribe to ClusterTracker from [1:7519894397075660918:2628] === CheckClustersList. Ok 2025-06-25T14:30:58.666640Z :TopicSplitMerge INFO: TTopicSdkTestSetup started 2025-06-25T14:30:58.697328Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:132: new create topic request 2025-06-25T14:30:58.698540Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877761, Sender [1:7519894422845464892:2706], Recipient [1:7519894375600823581:2146]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:30:58.698573Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5052: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T14:30:58.698590Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5837: Pipe server connected, at tablet: 72057594046644480 2025-06-25T14:30:58.698627Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271122432, Sender [1:7519894422845464888:2703], Recipient [1:7519894375600823581:2146]: {TEvModifySchemeTransaction txid# 281474976715673 TabletId# 72057594046644480} 2025-06-25T14:30:58.698641Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4966: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-06-25T14: ... "test" } SourceIdMaxCounts: 6000000 } 2025-06-25T14:33:34.705355Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5307: HandleHook, received event# 271188536, Sender [7:7519895027732670408:2426], Recipient [7:7519895027732670397:2425]: NKikimrPQ.TEvSubDomainStatus SubDomainOutOfSpace: false 2025-06-25T14:33:34.705370Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5340: HandleHook, processing event TEvPQ::TEvSubDomainStatus 2025-06-25T14:33:34.705420Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5307: HandleHook, received event# 271187975, Sender [7:7519895027732670408:2426], Recipient [7:7519895027732670397:2425]: NKikimrPQ.TStatus GetStatForAllConsumers: true 2025-06-25T14:33:34.705433Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5315: HandleHook, processing event TEvPersQueue::TEvStatus 2025-06-25T14:33:34.705447Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:1813: [PQ: 72075186224037892] Handle TEvPersQueue::TEvStatus 2025-06-25T14:33:34.705505Z node 7 :PERSQUEUE TRACE: partition.h:582: StateIdle event# 271188536 (NKikimr::TEvPQ::TEvSubDomainStatus), Tablet [7:7519895083567246682:2821], Partition 2, Sender [7:7519895083567246682:2821], Recipient [7:7519895083567246756:2827], Cookie: 0 2025-06-25T14:33:34.705547Z node 7 :PERSQUEUE TRACE: partition.h:584: StateIdle, received event# 271188536, Sender [7:7519895083567246682:2821], Recipient [7:7519895083567246756:2827]: NKikimrPQ.TEvSubDomainStatus SubDomainOutOfSpace: false 2025-06-25T14:33:34.705562Z node 7 :PERSQUEUE TRACE: partition.h:626: StateIdle, processing event TEvPQ::TEvSubDomainStatus 2025-06-25T14:33:34.705596Z node 7 :PERSQUEUE TRACE: partition.h:582: StateIdle event# 271188491 (NKikimr::TEvPQ::TEvPartitionStatus), Tablet [7:7519895083567246682:2821], Partition 2, Sender [7:7519895083567246682:2821], Recipient [7:7519895083567246756:2827], Cookie: 0 2025-06-25T14:33:34.705626Z node 7 :PERSQUEUE TRACE: partition.h:584: StateIdle, received event# 271188491, Sender [7:7519895083567246682:2821], Recipient [7:7519895083567246756:2827]: NKikimr::TEvPQ::TEvPartitionStatus 2025-06-25T14:33:34.705639Z node 7 :PERSQUEUE TRACE: partition.h:602: StateIdle, processing event TEvPQ::TEvPartitionStatus 2025-06-25T14:33:34.705807Z node 7 :PERSQUEUE DEBUG: partition.cpp:873: [PQ: 72075186224037896, Partition: 2, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 0 PartitionConfig{ MaxCountInPartition: 2147483647 LifetimeSeconds: 3600 SourceIdLifetimeSeconds: 1382400 WriteSpeedInBytesPerSecond: 1048576 BurstSize: 1048576 TotalPartitions: 3 ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } SourceIdMaxCounts: 6000000 } 2025-06-25T14:33:34.705998Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5307: HandleHook, received event# 271188503, Sender [7:7519895083567246759:2829], Recipient [7:7519895083567246683:2822]: NKikimr::TEvPQ::TEvPartitionLabeledCounters 2025-06-25T14:33:34.706016Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5321: HandleHook, processing event TEvPQ::TEvPartitionLabeledCounters 2025-06-25T14:33:34.706080Z node 7 :PERSQUEUE TRACE: partition.h:582: StateIdle event# 271188536 (NKikimr::TEvPQ::TEvSubDomainStatus), Tablet [7:7519895027732670397:2425], Partition 0, Sender [7:7519895027732670397:2425], Recipient [7:7519895027732670460:2429], Cookie: 0 2025-06-25T14:33:34.706122Z node 7 :PERSQUEUE TRACE: partition.h:584: StateIdle, received event# 271188536, Sender [7:7519895027732670397:2425], Recipient [7:7519895027732670460:2429]: NKikimrPQ.TEvSubDomainStatus SubDomainOutOfSpace: false 2025-06-25T14:33:34.706137Z node 7 :PERSQUEUE TRACE: partition.h:626: StateIdle, processing event TEvPQ::TEvSubDomainStatus 2025-06-25T14:33:34.706171Z node 7 :PERSQUEUE TRACE: partition.h:582: StateIdle event# 271188491 (NKikimr::TEvPQ::TEvPartitionStatus), Tablet [7:7519895027732670397:2425], Partition 0, Sender [7:7519895027732670397:2425], Recipient [7:7519895027732670460:2429], Cookie: 0 2025-06-25T14:33:34.706198Z node 7 :PERSQUEUE TRACE: partition.h:584: StateIdle, received event# 271188491, Sender [7:7519895027732670397:2425], Recipient [7:7519895027732670460:2429]: NKikimr::TEvPQ::TEvPartitionStatus 2025-06-25T14:33:34.706210Z node 7 :PERSQUEUE TRACE: partition.h:602: StateIdle, processing event TEvPQ::TEvPartitionStatus 2025-06-25T14:33:34.706358Z node 7 :PERSQUEUE DEBUG: partition.cpp:873: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 0 PartitionConfig{ MaxCountInPartition: 2147483647 LifetimeSeconds: 3600 SourceIdLifetimeSeconds: 1382400 WriteSpeedInBytesPerSecond: 1048576 BurstSize: 1048576 TotalPartitions: 3 ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } SourceIdMaxCounts: 6000000 } 2025-06-25T14:33:34.706452Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5307: HandleHook, received event# 271188503, Sender [7:7519895083567246756:2827], Recipient [7:7519895083567246682:2821]: NKikimr::TEvPQ::TEvPartitionLabeledCounters 2025-06-25T14:33:34.706466Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5321: HandleHook, processing event TEvPQ::TEvPartitionLabeledCounters 2025-06-25T14:33:34.706508Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5307: HandleHook, received event# 271188503, Sender [7:7519895027732670460:2429], Recipient [7:7519895027732670397:2425]: NKikimr::TEvPQ::TEvPartitionLabeledCounters 2025-06-25T14:33:34.706519Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5321: HandleHook, processing event TEvPQ::TEvPartitionLabeledCounters 2025-06-25T14:33:34.706872Z node 7 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:688: [72075186224037893][test-topic] Send TEvPeriodicTopicStats PathId: 13 Generation: 1 StatsReportRound: 15 DataSize: 0 UsedReserveSize: 0 2025-06-25T14:33:34.707014Z node 7 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1823: [72075186224037893][test-topic] ProcessPendingStats. PendingUpdates size 3 2025-06-25T14:33:34.716687Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271188001, Sender [7:7519895027732670408:2426], Recipient [7:7519894967603127191:2140]: NKikimrPQ.TEvPeriodicTopicStats PathId: 13 Generation: 1 Round: 15 DataSize: 0 UsedReserveSize: 0 SubDomainOutOfSpace: false 2025-06-25T14:33:34.716737Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4993: StateWork, processing event TEvPersQueue::TEvPeriodicTopicStats 2025-06-25T14:33:34.716775Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__pq_stats.cpp:100: Got periodic topic stats at partition [OwnerId: 72057594046644480, LocalPathId: 13] DataSize 0 UsedReserveSize 0 2025-06-25T14:33:34.716802Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__pq_stats.cpp:141: Will delay TTxStoreTopicStats on# 0.099996s, queue# 1 2025-06-25T14:33:34.716880Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271122945, Sender [7:7519895027732670408:2426], Recipient [7:7519894967603127191:2140]: NKikimrSchemeOp.TDescribePath PathId: 13 SchemeshardId: 72057594046644480 2025-06-25T14:33:34.716903Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4967: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2025-06-25T14:33:34.744120Z node 7 :PERSQUEUE TRACE: partition.h:582: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7519895083567246682:2821], Partition 2, Sender [0:0:0], Recipient [7:7519895083567246756:2827], Cookie: 0 2025-06-25T14:33:34.744203Z node 7 :PERSQUEUE TRACE: partition.h:584: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7519895083567246756:2827]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-25T14:33:34.744232Z node 7 :PERSQUEUE TRACE: partition.h:610: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-25T14:33:34.744282Z node 7 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037896, Partition: 2, State: StateIdle] Have 0 items to delete old stuff 2025-06-25T14:33:34.744375Z node 7 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037896, Partition: 2, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-25T14:33:34.744411Z node 7 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037896, Partition: 2, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-25T14:33:34.744442Z node 7 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037896, Partition: 2, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-25T14:33:34.745876Z node 7 :PERSQUEUE TRACE: partition.h:582: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7519895083567246683:2822], Partition 1, Sender [0:0:0], Recipient [7:7519895083567246759:2829], Cookie: 0 2025-06-25T14:33:34.745937Z node 7 :PERSQUEUE TRACE: partition.h:584: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7519895083567246759:2829]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-25T14:33:34.745961Z node 7 :PERSQUEUE TRACE: partition.h:610: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-25T14:33:34.746000Z node 7 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037897, Partition: 1, State: StateIdle] Have 0 items to delete old stuff 2025-06-25T14:33:34.746071Z node 7 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037897, Partition: 1, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-25T14:33:34.746094Z node 7 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037897, Partition: 1, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-25T14:33:34.746119Z node 7 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037897, Partition: 1, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-25T14:33:34.746169Z node 7 :PERSQUEUE TRACE: partition.h:582: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7519895027732670397:2425], Partition 0, Sender [0:0:0], Recipient [7:7519895027732670460:2429], Cookie: 0 2025-06-25T14:33:34.746200Z node 7 :PERSQUEUE TRACE: partition.h:584: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7519895027732670460:2429]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-25T14:33:34.746222Z node 7 :PERSQUEUE TRACE: partition.h:610: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-25T14:33:34.746248Z node 7 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete old stuff 2025-06-25T14:33:34.746277Z node 7 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-25T14:33:34.746291Z node 7 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-25T14:33:34.746305Z node 7 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 |79.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/query_stats/ut/unittest >> THealthCheckTest::SharedWhenTroublesWithExclusiveNodes [GOOD] |79.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/query_stats/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/http_proxy/ut/inside_ydb_ut/unittest >> TestKinesisHttpProxy::ListShardsToken [GOOD] Test command err: 2025-06-25T14:32:24.650467Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519894791214541347:2203];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:32:24.653572Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d18/r3tmp/tmpsxjQHy/pdisk_1.dat 2025-06-25T14:32:25.710205Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:32:25.749366Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:32:25.749477Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:32:25.757098Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:32:25.805829Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:32:25.811572Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 16785, node 1 2025-06-25T14:32:25.817821Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519894791214541182:2080] 1750861944599853 != 1750861944599856 2025-06-25T14:32:25.897850Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:32:25.897876Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:32:25.897885Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:32:25.898016Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:7395 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:32:26.465863Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... TClient is connected to server localhost:7395 2025-06-25T14:32:26.784803Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... 2025-06-25T14:32:26.793825Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-25T14:32:26.795731Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) waiting... 2025-06-25T14:32:26.816175Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710660, at schemeshard: 72057594046644480 2025-06-25T14:32:26.825396Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:32:26.967430Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:32:27.020280Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710663, at schemeshard: 72057594046644480 2025-06-25T14:32:27.024980Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:32:27.084329Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710665, at schemeshard: 72057594046644480 2025-06-25T14:32:27.088729Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:32:27.138495Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:32:27.195380Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:32:27.250979Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:32:27.311200Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:32:27.427631Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:32:27.494554Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:32:29.634596Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519894791214541347:2203];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:32:29.634686Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:32:29.973173Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894812689379044:2338], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:29.973274Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894812689379056:2341], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:29.973341Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:29.977496Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710673:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:32:29.989186Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519894812689379058:2342], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710673 completed, doublechecking } 2025-06-25T14:32:30.069897Z node 1 :TX_PROXY ERRO ... _impl.cpp:3683: [PQ: 72075186224037908] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-06-25T14:33:35.882175Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:3107: [PQ: 72075186224037911] Registered with mediator time cast 2025-06-25T14:33:35.883036Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:3107: [PQ: 72075186224037907] Registered with mediator time cast 2025-06-25T14:33:35.883136Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:3107: [PQ: 72075186224037908] Registered with mediator time cast 2025-06-25T14:33:35.883211Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:1241: [PQ: 72075186224037908] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-25T14:33:35.883222Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:4353: [PQ: 72075186224037908] Try execute txs with state DELETING 2025-06-25T14:33:35.883233Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:4398: [PQ: 72075186224037908] TxId 281474976715688, State DELETING 2025-06-25T14:33:35.883250Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:4610: [PQ: 72075186224037908] delete TxId 281474976715688 2025-06-25T14:33:35.883298Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:3107: [PQ: 72075186224037910] Registered with mediator time cast 2025-06-25T14:33:35.883624Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:1241: [PQ: 72075186224037909] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-25T14:33:35.883639Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:4353: [PQ: 72075186224037909] Try execute txs with state DELETING 2025-06-25T14:33:35.883651Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:4398: [PQ: 72075186224037909] TxId 281474976715688, State DELETING 2025-06-25T14:33:35.883672Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:4610: [PQ: 72075186224037909] delete TxId 281474976715688 Http output full {} 200 {} 2025-06-25T14:33:35.887988Z node 8 :HTTP_PROXY INFO: http_req.cpp:1207: http request [CreateStream] requestId [ee7f96ce-29f74097-c775c5ff-6e626c50] reply ok 2025-06-25T14:33:35.888341Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:280: (#37,[::1]:36718) <- (200 , 2 bytes) 2025-06-25T14:33:35.888464Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:335: (#37,[::1]:36718) connection closed 2025-06-25T14:33:35.889453Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:83: (#37,[::1]:40598) incoming connection opened 2025-06-25T14:33:35.889517Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:156: (#37,[::1]:40598) -> (POST /Root, 157 bytes) 2025-06-25T14:33:35.889628Z node 8 :HTTP_PROXY INFO: http_service.cpp:102: proxy service: incoming request from [1845:9800:6050:0:45:9800:6050:0] request [ListShards] url [/Root] database [/Root] requestId: fe7addf4-496458a6-f1adf23a-d4372f1d 2025-06-25T14:33:35.890041Z node 8 :HTTP_PROXY INFO: http_req.cpp:959: http request [ListShards] requestId [fe7addf4-496458a6-f1adf23a-d4372f1d] got new request from [1845:9800:6050:0:45:9800:6050:0] database '/Root' stream 'teststream' 2025-06-25T14:33:35.890473Z node 8 :HTTP_PROXY DEBUG: http_req.cpp:1500: http request [ListShards] requestId [fe7addf4-496458a6-f1adf23a-d4372f1d] [auth] Authorized successfully E0000 00:00:1750862015.890638 176950 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn 2025-06-25T14:33:35.890567Z node 8 :HTTP_PROXY INFO: http_req.cpp:678: http request [ListShards] requestId [fe7addf4-496458a6-f1adf23a-d4372f1d] sending grpc request to '' database: '/Root' iam token size: 0 2025-06-25T14:33:35.892823Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72075186224037911] server connected, pipe [8:7519895098040376666:2487], now have 1 active actors on pipe 2025-06-25T14:33:35.892884Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72075186224037907] server connected, pipe [8:7519895098040376665:2486], now have 1 active actors on pipe 2025-06-25T14:33:35.893805Z node 8 :HTTP_PROXY INFO: http_req.cpp:1207: http request [ListShards] requestId [fe7addf4-496458a6-f1adf23a-d4372f1d] reply ok 2025-06-25T14:33:35.894153Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:280: (#37,[::1]:40598) <- (200 , 449 bytes) 2025-06-25T14:33:35.894252Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:335: (#37,[::1]:40598) connection closed 2025-06-25T14:33:35.894450Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037907] server disconnected, pipe [8:7519895098040376665:2486] destroyed 2025-06-25T14:33:35.894477Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037911] server disconnected, pipe [8:7519895098040376666:2487] destroyed Http output full {"NextToken":"CJXbiLz6MhACGAIiCnRlc3RzdHJlYW0=","Shards":[{"ShardId":"shard-000000","SequenceNumberRange":{"StartingSequenceNumber":"0"},"HashKeyRange":{"EndingHashKey":"68056473384187692692674921486353642290","StartingHashKey":"0"}},{"ShardId":"shard-000001","SequenceNumberRange":{"StartingSequenceNumber":"0"},"HashKeyRange":{"EndingHashKey":"136112946768375385385349842972707284581","StartingHashKey":"68056473384187692692674921486353642291"}}]} 200 {"NextToken":"CJXbiLz6MhACGAIiCnRlc3RzdHJlYW0=","Shards":[{"ShardId":"shard-000000","SequenceNumberRange":{"StartingSequenceNumber":"0"},"HashKeyRange":{"EndingHashKey":"68056473384187692692674921486353642290","StartingHashKey":"0"}},{"ShardId":"shard-000001","SequenceNumberRange":{"StartingSequenceNumber":"0"},"HashKeyRange":{"EndingHashKey":"136112946768375385385349842972707284581","StartingHashKey":"68056473384187692692674921486353642291"}}]} 2025-06-25T14:33:35.898010Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:83: (#37,[::1]:40602) incoming connection opened 2025-06-25T14:33:35.898081Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:156: (#37,[::1]:40602) -> (POST /Root, 157 bytes) 2025-06-25T14:33:35.898242Z node 8 :HTTP_PROXY INFO: http_service.cpp:102: proxy service: incoming request from [f866:8700:6050:0:e066:8700:6050:0] request [ListShards] url [/Root] database [/Root] requestId: 323fe504-9ec055f4-21c6a587-f59bc9a5 2025-06-25T14:33:35.898683Z node 8 :HTTP_PROXY INFO: http_req.cpp:959: http request [ListShards] requestId [323fe504-9ec055f4-21c6a587-f59bc9a5] got new request from [f866:8700:6050:0:e066:8700:6050:0] database '/Root' stream 'teststream' E0000 00:00:1750862015.899795 176949 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn 2025-06-25T14:33:35.899629Z node 8 :HTTP_PROXY DEBUG: http_req.cpp:1500: http request [ListShards] requestId [323fe504-9ec055f4-21c6a587-f59bc9a5] [auth] Authorized successfully 2025-06-25T14:33:35.899729Z node 8 :HTTP_PROXY INFO: http_req.cpp:678: http request [ListShards] requestId [323fe504-9ec055f4-21c6a587-f59bc9a5] sending grpc request to '' database: '/Root' iam token size: 0 2025-06-25T14:33:35.900972Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72075186224037907] server connected, pipe [8:7519895098040376677:2491], now have 1 active actors on pipe 2025-06-25T14:33:35.901013Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72075186224037911] server connected, pipe [8:7519895098040376678:2492], now have 1 active actors on pipe 2025-06-25T14:33:35.901851Z node 8 :HTTP_PROXY INFO: http_req.cpp:1207: http request [ListShards] requestId [323fe504-9ec055f4-21c6a587-f59bc9a5] reply ok Http output full {"NextToken":"CJ3biLz6MhACGAIiCnRlc3RzdHJlYW0=","Shards":[{"ShardId":"shard-000000","SequenceNumberRange":{"StartingSequenceNumber":"0"},"HashKeyRange":{"EndingHashKey":"68056473384187692692674921486353642290","StartingHashKey":"0"}},{"ShardId":"shard-000001","SequenceNumberRange":{"StartingSequenceNumber":"0"},"HashKeyRange":{"EndingHashKey":"136112946768375385385349842972707284581","StartingHashKey":"68056473384187692692674921486353642291"}}]} 2025-06-25T14:33:35.902190Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:280: (#37,[::1]:40602) <- (200 , 449 bytes) 2025-06-25T14:33:35.902284Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:335: (#37,[::1]:40602) connection closed 200 {"NextToken":"CJ3biLz6MhACGAIiCnRlc3RzdHJlYW0=","Shards":[{"ShardId":"shard-000000","SequenceNumberRange":{"StartingSequenceNumber":"0"},"HashKeyRange":{"EndingHashKey":"68056473384187692692674921486353642290","StartingHashKey":"0"}},{"ShardId":"shard-000001","SequenceNumberRange":{"StartingSequenceNumber":"0"},"HashKeyRange":{"EndingHashKey":"136112946768375385385349842972707284581","StartingHashKey":"68056473384187692692674921486353642291"}}]} 2025-06-25T14:33:35.902608Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037907] server disconnected, pipe [8:7519895098040376677:2491] destroyed 2025-06-25T14:33:35.902634Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037911] server disconnected, pipe [8:7519895098040376678:2492] destroyed 2025-06-25T14:33:35.903954Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:83: (#37,[::1]:40610) incoming connection opened 2025-06-25T14:33:35.904023Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:156: (#37,[::1]:40610) -> (POST /Root, 157 bytes) 2025-06-25T14:33:35.904162Z node 8 :HTTP_PROXY INFO: http_service.cpp:102: proxy service: incoming request from [b8f5:8600:6050:0:a0f5:8600:6050:0] request [ListShards] url [/Root] database [/Root] requestId: bccf626a-abb5f598-12f601cd-cb73b80d 2025-06-25T14:33:35.904596Z node 8 :HTTP_PROXY INFO: http_req.cpp:959: http request [ListShards] requestId [bccf626a-abb5f598-12f601cd-cb73b80d] got new request from [b8f5:8600:6050:0:a0f5:8600:6050:0] database '/Root' stream 'teststream' E0000 00:00:1750862015.908598 176949 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn 2025-06-25T14:33:35.908392Z node 8 :HTTP_PROXY DEBUG: http_req.cpp:1500: http request [ListShards] requestId [bccf626a-abb5f598-12f601cd-cb73b80d] [auth] Authorized successfully 2025-06-25T14:33:35.908517Z node 8 :HTTP_PROXY INFO: http_req.cpp:678: http request [ListShards] requestId [bccf626a-abb5f598-12f601cd-cb73b80d] sending grpc request to '' database: '/Root' iam token size: 0 2025-06-25T14:33:35.910071Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72075186224037907] server connected, pipe [8:7519895098040376689:2496], now have 1 active actors on pipe 2025-06-25T14:33:35.910116Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72075186224037911] server connected, pipe [8:7519895098040376690:2497], now have 1 active actors on pipe Http output full {"NextToken":"CKbbiLz6MhACGAIiCnRlc3RzdHJlYW0=","Shards":[{"ShardId":"shard-000000","SequenceNumberRange":{"StartingSequenceNumber":"0"},"HashKeyRange":{"EndingHashKey":"68056473384187692692674921486353642290","StartingHashKey":"0"}},{"ShardId":"shard-000001","SequenceNumberRange":{"StartingSequenceNumber":"0"},"HashKeyRange":{"EndingHashKey":"136112946768375385385349842972707284581","StartingHashKey":"68056473384187692692674921486353642291"}}]} 200 {"NextToken":"CKbbiLz6MhACGAIiCnRlc3RzdHJlYW0=","Shards":[{"ShardId":"shard-000000","SequenceNumberRange":{"StartingSequenceNumber":"0"},"HashKeyRange":{"EndingHashKey":"68056473384187692692674921486353642290","StartingHashKey":"0"}},{"ShardId":"shard-000001","SequenceNumberRange":{"StartingSequenceNumber":"0"},"HashKeyRange":{"EndingHashKey":"136112946768375385385349842972707284581","StartingHashKey":"68056473384187692692674921486353642291"}}]} 2025-06-25T14:33:35.911158Z node 8 :HTTP_PROXY INFO: http_req.cpp:1207: http request [ListShards] requestId [bccf626a-abb5f598-12f601cd-cb73b80d] reply ok 2025-06-25T14:33:35.911554Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:280: (#37,[::1]:40610) <- (200 , 449 bytes) 2025-06-25T14:33:35.911655Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:335: (#37,[::1]:40610) connection closed 2025-06-25T14:33:35.913681Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037907] server disconnected, pipe [8:7519895098040376689:2496] destroyed 2025-06-25T14:33:35.913715Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037911] server disconnected, pipe [8:7519895098040376690:2497] destroyed >> YdbTableSplit::SplitByLoadWithNonEmptyRangeReads [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/http_proxy/ut/inside_ydb_ut/unittest >> TestYmqHttpProxy::TestTagQueueMultipleQueriesInflight [GOOD] Test command err: test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000cab/r3tmp/tmp0P6qks/pdisk_1.dat 2025-06-25T14:32:24.149684Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519894791755802782:2183];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:32:24.150033Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:32:24.990231Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:32:24.991015Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519894787460835323:2080] 1750861943962682 != 1750861943962685 2025-06-25T14:32:25.041070Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:32:25.041181Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:32:25.049407Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 31245, node 1 2025-06-25T14:32:25.169032Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:32:25.336696Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:32:25.336713Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:32:25.336720Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:32:25.336814Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8323 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:32:26.148148Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... TClient is connected to server localhost:8323 2025-06-25T14:32:26.691377Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... 2025-06-25T14:32:26.702021Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-25T14:32:26.713759Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) waiting... 2025-06-25T14:32:26.749973Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710660, at schemeshard: 72057594046644480 2025-06-25T14:32:26.756248Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:32:27.116343Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:32:27.221509Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:32:27.301364Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710665, at schemeshard: 72057594046644480 2025-06-25T14:32:27.305916Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:32:27.374463Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:32:27.462751Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:27.532825Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:32:27.596688Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:32:27.637343Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:32:27.718236Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:29.028924Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519894791755802782:2183];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:32:29.029002Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:32:30.349632Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894817525607786:2340], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:30.349803Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:30.350259Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894817525607798:2343], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:30.356174Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710673:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:32:30.379981Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519894817525607800:2344], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710673 completed, doublechecking } 2025-06-25T14:32:30.479450Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519894817525607851:2874] txid# 281474976710674, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 18], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:32:30.989904Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 2814749 ... : '/Root' iam token size: 0 2025-06-25T14:33:36.219148Z node 7 :SQS DEBUG: ymq_proxy.cpp:148: Got new request in YMQ proxy. FolderId: folder4, CloudId: cloud4, UserSid: fake_user_sid@as, RequestId: 1cb1f08b-60c93a2d-52104445-9201c260 2025-06-25T14:33:36.219291Z node 7 :SQS DEBUG: proxy_actor.cpp:263: Request [1cb1f08b-60c93a2d-52104445-9201c260] Proxy actor: used user_name='cloud4', queue_name='000000000000000301v0', folder_id='folder4' 2025-06-25T14:33:36.219301Z node 7 :SQS DEBUG: proxy_actor.cpp:78: Request [1cb1f08b-60c93a2d-52104445-9201c260] Request proxy started 2025-06-25T14:33:36.219573Z node 7 :SQS DEBUG: service.cpp:761: Request [1cb1f08b-60c93a2d-52104445-9201c260] Answer configuration for queue [cloud4/000000000000000301v0] without leader 2025-06-25T14:33:36.220133Z node 7 :SQS DEBUG: proxy_actor.cpp:97: Request [1cb1f08b-60c93a2d-52104445-9201c260] Get configuration duration: 0ms 2025-06-25T14:33:36.220567Z node 7 :SQS DEBUG: proxy_service.cpp:246: Request [1cb1f08b-60c93a2d-52104445-9201c260] Send get leader node request to sqs service for cloud4/000000000000000301v0 2025-06-25T14:33:36.220708Z node 7 :SQS DEBUG: service.cpp:581: Request [1cb1f08b-60c93a2d-52104445-9201c260] Leader node for queue [cloud4/000000000000000301v0] is 7 2025-06-25T14:33:36.220783Z node 7 :SQS DEBUG: proxy_service.cpp:170: Request [1cb1f08b-60c93a2d-52104445-9201c260] Got leader node for queue response. Node id: 7. Status: 0 2025-06-25T14:33:36.220893Z node 7 :SQS TRACE: proxy_service.cpp:303: Request [1cb1f08b-60c93a2d-52104445-9201c260] Sending request from proxy to leader node 7: ListQueueTags { Auth { UserName: "cloud4" FolderId: "folder4" UserSID: "fake_user_sid@as" } QueueName: "000000000000000301v0" } RequestId: "1cb1f08b-60c93a2d-52104445-9201c260" 2025-06-25T14:33:36.220968Z node 7 :SQS DEBUG: proxy_service.cpp:70: Request [1cb1f08b-60c93a2d-52104445-9201c260] Received Sqs Request: ListQueueTags { Auth { UserName: "cloud4" FolderId: "folder4" UserSID: "fake_user_sid@as" } QueueName: "000000000000000301v0" } RequestId: "1cb1f08b-60c93a2d-52104445-9201c260" 2025-06-25T14:33:36.221024Z node 7 :SQS DEBUG: action.h:133: Request [1cb1f08b-60c93a2d-52104445-9201c260] Request started. Actor: [7:7519895101306713812:5432] 2025-06-25T14:33:36.221049Z node 7 :SQS TRACE: service.cpp:1472: Inc local leader ref for actor [7:7519895101306713812:5432] 2025-06-25T14:33:36.221060Z node 7 :SQS DEBUG: service.cpp:754: Request [1cb1f08b-60c93a2d-52104445-9201c260] Forward configuration request to queue [cloud4/000000000000000301v0] leader 2025-06-25T14:33:36.226913Z node 7 :SQS TRACE: executor.cpp:286: Request [8025e8c3-df80475c-f3184c45-7180c639] Query(idx=INTERNAL_GET_QUEUE_ATTRIBUTES_ID) Queue [cloud4/000000000000000301v0] HandleResponse { Status: 48 TxId: 281474976710927 Step: 1750862016253 StatusCode: SUCCESS ExecutionEngineStatus: 1 ExecutionEngineResponseStatus: 2 ExecutionEngineEvaluatedResponse { Type { Kind: Struct Struct { Member { Name: "attrs" Type { Kind: Optional Optional { Item { Kind: Optional Optional { Item { Kind: Struct Struct { Member { Name: "ContentBasedDeduplication" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } Member { Name: "DelaySeconds" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "DlqArn" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "DlqName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "FifoQueue" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } Member { Name: "MaxReceiveCount" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "MaximumMessageSize" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "MessageRetentionPeriod" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "ReceiveMessageWaitTime" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "ShowDetailedCountersDeadline" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "VisibilityTimeout" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } } } } } } } } Member { Name: "queueExists" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } Member { Name: "tags" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } } } Value { Struct { Optional { Optional { Struct { Optional { Bool: false } } Struct { Optional { Uint64: 0 } } Struct { Optional { Text: "" } } Struct { Optional { Text: "" } } Struct { Optional { Bool: true } } Struct { Optional { Uint64: 0 } } Struct { Optional { Uint64: 262144 } } Struct { Optional { Uint64: 345600000 } } Struct { Optional { Uint64: 0 } } Struct { } Struct { Optional { Uint64: 30000 } } } } } Struct { Optional { Bool: true } } Struct { Optional { Text: "{}" } } } } } 2025-06-25T14:33:36.226955Z node 7 :SQS DEBUG: executor.cpp:287: Request [8025e8c3-df80475c-f3184c45-7180c639] Query(idx=INTERNAL_GET_QUEUE_ATTRIBUTES_ID) Queue [cloud4/000000000000000301v0] Attempt 1 execution duration: 39ms 2025-06-25T14:33:36.227517Z node 7 :SQS TRACE: executor.cpp:325: Request [8025e8c3-df80475c-f3184c45-7180c639] Query(idx=INTERNAL_GET_QUEUE_ATTRIBUTES_ID) Queue [cloud4/000000000000000301v0] Sending mkql execution result: { Status: 48 TxId: 281474976710927 Step: 1750862016253 StatusCode: SUCCESS ExecutionEngineStatus: 1 ExecutionEngineResponseStatus: 2 ExecutionEngineEvaluatedResponse { Type { Kind: Struct Struct { Member { Name: "attrs" Type { Kind: Optional Optional { Item { Kind: Optional Optional { Item { Kind: Struct Struct { Member { Name: "ContentBasedDeduplication" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } Member { Name: "DelaySeconds" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "DlqArn" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "DlqName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "FifoQueue" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } Member { Name: "MaxReceiveCount" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "MaximumMessageSize" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "MessageRetentionPeriod" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "ReceiveMessageWaitTime" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "ShowDetailedCountersDeadline" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "VisibilityTimeout" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } } } } } } } } Member { Name: "queueExists" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } Member { Name: "tags" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } } } Value { Struct { Optional { Optional { Struct { Optional { Bool: false } } Struct { Optional { Uint64: 0 } } Struct { Optional { Text: "" } } Struct { Optional { Text: "" } } Struct { Optional { Bool: true } } Struct { Optional { Uint64: 0 } } Struct { Optional { Uint64: 262144 } } Struct { Optional { Uint64: 345600000 } } Struct { Optional { Uint64: 0 } } Struct { } Struct { Optional { Uint64: 30000 } } } } } Struct { Optional { Bool: true } } Struct { Optional { Text: "{}" } } } } } 2025-06-25T14:33:36.227610Z node 7 :SQS TRACE: executor.cpp:327: Request [8025e8c3-df80475c-f3184c45-7180c639] Query(idx=INTERNAL_GET_QUEUE_ATTRIBUTES_ID) Queue [cloud4/000000000000000301v0] Minikql data response: {"attrs": {"ContentBasedDeduplication": false, "DelaySeconds": 0, "DlqArn": "", "DlqName": "", "FifoQueue": true, "MaxReceiveCount": 0, "MaximumMessageSize": 262144, "MessageRetentionPeriod": 345600000, "ReceiveMessageWaitTime": 0, "ShowDetailedCountersDeadline": null, "VisibilityTimeout": 30000}, "queueExists": true, "tags": "{}"} 2025-06-25T14:33:36.227763Z node 7 :SQS DEBUG: executor.cpp:401: Request [8025e8c3-df80475c-f3184c45-7180c639] Query(idx=INTERNAL_GET_QUEUE_ATTRIBUTES_ID) Queue [cloud4/000000000000000301v0] execution duration: 39ms 2025-06-25T14:33:36.228173Z node 7 :SQS DEBUG: queue_leader.cpp:556: Request [8025e8c3-df80475c-f3184c45-7180c639] Sending executed reply 2025-06-25T14:33:36.229058Z node 7 :SQS DEBUG: action.h:627: Request [1cb1f08b-60c93a2d-52104445-9201c260] Get configuration duration: 8ms 2025-06-25T14:33:36.229079Z node 7 :SQS TRACE: action.h:647: Request [1cb1f08b-60c93a2d-52104445-9201c260] Got configuration. Root url: http://ghrun-kqfvx6aroe.auto.internal:8771, Shards: 1, Fail: 0 2025-06-25T14:33:36.229101Z node 7 :SQS TRACE: action.h:427: Request [1cb1f08b-60c93a2d-52104445-9201c260] DoRoutine 2025-06-25T14:33:36.229168Z node 7 :SQS TRACE: action.h:264: Request [1cb1f08b-60c93a2d-52104445-9201c260] SendReplyAndDie from action actor { ListQueueTags { RequestId: "1cb1f08b-60c93a2d-52104445-9201c260" } } 2025-06-25T14:33:36.229244Z node 7 :SQS TRACE: proxy_service.h:35: Request [1cb1f08b-60c93a2d-52104445-9201c260] Sending sqs response: { ListQueueTags { RequestId: "1cb1f08b-60c93a2d-52104445-9201c260" } RequestId: "1cb1f08b-60c93a2d-52104445-9201c260" FolderId: "folder4" ResourceId: "000000000000000301v0" IsFifo: true } 2025-06-25T14:33:36.229353Z node 7 :SQS TRACE: proxy_service.cpp:194: HandleSqsResponse ListQueueTags { RequestId: "1cb1f08b-60c93a2d-52104445-9201c260" } RequestId: "1cb1f08b-60c93a2d-52104445-9201c260" FolderId: "folder4" ResourceId: "000000000000000301v0" IsFifo: true 2025-06-25T14:33:36.229409Z node 7 :SQS TRACE: proxy_service.cpp:208: Sending answer to proxy actor [7:7519895101306713810:2750]: ListQueueTags { RequestId: "1cb1f08b-60c93a2d-52104445-9201c260" } RequestId: "1cb1f08b-60c93a2d-52104445-9201c260" FolderId: "folder4" ResourceId: "000000000000000301v0" IsFifo: true 2025-06-25T14:33:36.229454Z node 7 :SQS TRACE: service.cpp:1483: Dec local leader ref for actor [7:7519895101306713812:5432]. Found: 1 2025-06-25T14:33:36.229790Z node 7 :SQS TRACE: proxy_actor.cpp:178: Request [1cb1f08b-60c93a2d-52104445-9201c260] HandleResponse: { ListQueueTags { RequestId: "1cb1f08b-60c93a2d-52104445-9201c260" } RequestId: "1cb1f08b-60c93a2d-52104445-9201c260" FolderId: "folder4" ResourceId: "000000000000000301v0" IsFifo: true }, status: OK 2025-06-25T14:33:36.229860Z node 7 :SQS DEBUG: proxy_actor.cpp:147: Request [1cb1f08b-60c93a2d-52104445-9201c260] Sending reply from proxy actor: { ListQueueTags { RequestId: "1cb1f08b-60c93a2d-52104445-9201c260" } RequestId: "1cb1f08b-60c93a2d-52104445-9201c260" FolderId: "folder4" ResourceId: "000000000000000301v0" IsFifo: true } 2025-06-25T14:33:36.230067Z node 7 :HTTP_PROXY DEBUG: http_req.cpp:379: http request [ListQueueTags] requestId [1cb1f08b-60c93a2d-52104445-9201c260] Got succesfult GRPC response. 2025-06-25T14:33:36.230117Z node 7 :HTTP_PROXY INFO: http_req.cpp:1207: http request [ListQueueTags] requestId [1cb1f08b-60c93a2d-52104445-9201c260] reply ok 2025-06-25T14:33:36.230216Z node 7 :HTTP_PROXY DEBUG: http_req.cpp:1267: http request [ListQueueTags] requestId [1cb1f08b-60c93a2d-52104445-9201c260] Send metering event. HttpStatusCode: 200 IsFifo: 1 FolderId: folder4 RequestSizeInBytes: 530 ResponseSizeInBytes: 179 SourceAddress: 388f:400:6050:0:208f:400:6050:0 ResourceId: 000000000000000301v0 Action: ListQueueTags 2025-06-25T14:33:36.230315Z node 7 :HTTP DEBUG: http_proxy_incoming.cpp:280: (#37,[::1]:38058) <- (200 , 2 bytes) 2025-06-25T14:33:36.230435Z node 7 :HTTP DEBUG: http_proxy_incoming.cpp:335: (#37,[::1]:38058) connection closed Http output full {} >> TChargeBTreeIndex::FewNodes_Groups [GOOD] >> TChargeBTreeIndex::FewNodes_History >> KqpSinkLocks::EmptyRange |79.2%| [TA] $(B)/ydb/core/sys_view/query_stats/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TTxAllocatorClientTest::ZeroRange [GOOD] >> IssuesTextFiltering::ShouldRemoveDatabasePath [GOOD] >> SplitterBasic::EqualSplitByMaxBytesLimitPerChunk [GOOD] >> TPartBtreeIndexIteration::OneNode_History [GOOD] >> TPartBtreeIndexIteration::OneNode_Slices |79.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_backup_collection/ydb-core-tx-schemeshard-ut_backup_collection |79.2%| [TA] {RESULT} $(B)/ydb/core/sys_view/query_stats/ut/test-results/unittest/{meta.json ... results_accumulator.log} |79.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_backup_collection/ydb-core-tx-schemeshard-ut_backup_collection |79.2%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_backup_collection/ydb-core-tx-schemeshard-ut_backup_collection >> Cache::Test1 [GOOD] >> Cache::Test2 [GOOD] >> Cache::Test3 [GOOD] >> THealthCheckTest::Issues100VCardMerging [GOOD] >> THealthCheckTest::LayoutCorrect ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/table_split_ut/unittest >> YdbTableSplit::SplitByLoadWithNonEmptyRangeReads [GOOD] Test command err: 2025-06-25T14:32:06.638039Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519894715217316398:2232];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:32:06.638095Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0013c1/r3tmp/tmpbYduIL/pdisk_1.dat 2025-06-25T14:32:07.362542Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:32:07.362704Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:32:07.382709Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:32:07.464678Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 31872, node 1 2025-06-25T14:32:07.636552Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:32:07.861024Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:32:07.861049Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:32:07.861056Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:32:07.861224Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:64750 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:32:08.250418Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... TClient is connected to server localhost:64750 2025-06-25T14:32:11.640752Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519894715217316398:2232];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:32:11.640839Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:32:12.537074Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894740987121069:2300], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:12.537239Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:12.851649Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:13.136714Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894745282088554:2315], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:13.136816Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:13.137006Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894745282088559:2318], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:13.141221Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:32:13.180550Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519894745282088561:2319], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-25T14:32:13.269720Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519894745282088637:2817] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:32:13.418297Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710661. Ctx: { TraceId: 01jykr1tjbfya72c92zsmb3bg5, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NzFjZDAyODAtNzEwYmRiYzUtNTllMDYzZTEtYmQyOTYzZjM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:32:13.464532Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710662. Ctx: { TraceId: 01jykr1twba318d4b70t82n5pk, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NzFjZDAyODAtNzEwYmRiYzUtNTllMDYzZTEtYmQyOTYzZjM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:32:13.490623Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710663. Ctx: { TraceId: 01jykr1txg6ycg9zz22epf57rz, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NzFjZDAyODAtNzEwYmRiYzUtNTllMDYzZTEtYmQyOTYzZjM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:32:13.537082Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710664. Ctx: { TraceId: 01jykr1tyve275h7gbxfmyvq5b, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NzFjZDAyODAtNzEwYmRiYzUtNTllMDYzZTEtYmQyOTYzZjM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:32:13.590641Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710665. Ctx: { TraceId: 01jykr1v0k8dvpd7th7xp73yq4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NzFjZDAyODAtNzEwYmRiYzUtNTllMDYzZTEtYmQyOTYzZjM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:32:13.610081Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710666. Ctx: { TraceId: 01jykr1v17cdd89x6jj31vs1cf, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NzFjZDAyODAtNzEwYmRiYzUtNTllMDYzZTEtYmQyOTYzZjM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:32:13.642204Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710667. Ctx: { TraceId: 01jykr1v277pwwqngb40j3kry5, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NzFjZDAyODAtNzEwYmRiYzUtNTllMDYzZTEtYmQyOTYzZjM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:32:13.669780Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710668. Ctx: { TraceId: 01jykr1v2w9bg1je23kqz3j8a2, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NzFjZDAyODAtNzEwYmRiYzUtNTllMDYzZTEtYmQyOTYzZjM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:32:13.702574Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710669. Ctx: { TraceId: 01jykr1v41dk4mf42s6yrgdp0a, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NzFjZDAyODAtNzEwYmRiYzUtNTllMDYzZTEtYmQyOTYzZjM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:32:13.725663Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710670. Ctx: { TraceId: 01jykr1v4v8r4x4t73znkhemf3, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NzFjZDAyODAtNzEwYmRiYzUtNTllMDYzZTEtYmQyOTYzZjM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:32:13.758528Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710671. Ctx: { TraceId: 01jykr1v5re103exje1zzww5ty, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NzFjZDAyODAtNzEwYmRiYzUtNTllMDYzZTEtYmQyOTYzZjM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:32:13.786021Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710672. Ctx: { TraceId: 01jykr1v6q7qcjgmnd6c4t86rj, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NzFjZDAyODAtNzEwYmRiYzUtNTllMDYzZTEtYmQyOTYzZjM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:32:13.814003Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710673. Ctx: { TraceId: 01jykr1v7kd3cxnxp71p3crn4k, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NzFjZDAyODAtNzEwYmRiYzUtNTllMDYzZTEtYmQyOTYzZjM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:32:13.846306Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710674. Ctx: { TraceId: 01jykr1v8m55m4m6sw5w3gqpxb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NzFjZDAyODAtNzEwYmRiYzUtNTllMDYzZTEtYmQyOTYzZjM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:32:13 ... 1474976722766. Ctx: { TraceId: 01jykr4b8mapvq8rvc7q3ebf1p, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODk3MDBmMjgtNzEyZDMyODgtNDczMjlmOTEtODY5YWZlNzg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:33:35.779187Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976722767. Ctx: { TraceId: 01jykr4b8w3axjvkd7jdrx8dv0, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NjY2Mjg3MDQtNTZiYzQ2OTctYzkxOWU3MTgtYTg0NzZhY2I=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:33:35.780300Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976722768. Ctx: { TraceId: 01jykr4b8w5yftqxmmfmx1nykd, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDFmMmM4YzgtNWU0MDIwZDUtODAxMWVlZTEtMTQ4OWFjMjc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:33:35.793245Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976722774. Ctx: { TraceId: 01jykr4b9c3vvbbfsz6pv7drr5, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NjRhMzFmLTVjMDdmZGE3LTY1ZGMzOGZmLTlmMTdmNDRj, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:33:35.793266Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976722773. Ctx: { TraceId: 01jykr4b9cddpyac6rsttxrxm3, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NDYxNjBlMDMtZmNmNzM5YWUtMjY4ZDExOGUtY2QzYWZkOTM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:33:35.794185Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976722769. Ctx: { TraceId: 01jykr4b9c131b3c20dpy12byg, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTA3ZjMxMDgtOWNmYWYyMTctYmI3MzRkYzUtMTE5ODE1ODc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:33:35.794279Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976722770. Ctx: { TraceId: 01jykr4b9c75pw4sqgtwdk0mns, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YWU4Y2U2MmQtMTdkNDI2Y2QtYzM2ZjZlOGQtOGU4OWNjZDU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:33:35.794947Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976722771. Ctx: { TraceId: 01jykr4b9c93kknnf8pjz54ezd, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MmUwYzVkYzAtY2I3NThiYjEtYWJiYzZkNzItMjM4YjMzZTM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:33:35.795347Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976722772. Ctx: { TraceId: 01jykr4b9c6c2rtgsrv4qgs782, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MTE1NmZmZmQtNmQxMWQ2OS03OTkxOTk4Ni0yNDJiNWM1Zg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:33:35.805207Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976722775. Ctx: { TraceId: 01jykr4b9j2p2y1y2b24zthys0, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDFmMmM4YzgtNWU0MDIwZDUtODAxMWVlZTEtMTQ4OWFjMjc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:33:35.806272Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976722776. Ctx: { TraceId: 01jykr4b9jb6g7ka64p0sk597p, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NjY2Mjg3MDQtNTZiYzQ2OTctYzkxOWU3MTgtYTg0NzZhY2I=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:33:35.807060Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976722777. Ctx: { TraceId: 01jykr4b9j7pm4g6jr785tpqqy, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODk3MDBmMjgtNzEyZDMyODgtNDczMjlmOTEtODY5YWZlNzg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:33:35.807856Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976722778. Ctx: { TraceId: 01jykr4b9j97aq70kc2vvrq9k8, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTY4MjUwMzEtYjUyZTllMC1mZGQ5NzE3YS1hYThiNDMyNg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root TClient::Ls request: /Root/Foo 2025-06-25T14:33:35.822927Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976722779. Ctx: { TraceId: 01jykr4ba04pa2g8qyfnbyvztc, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MTE1NmZmZmQtNmQxMWQ2OS03OTkxOTk4Ni0yNDJiNWM1Zg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:33:35.825205Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976722780. Ctx: { TraceId: 01jykr4ba51fj65s2tsj7tvbfb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTA3ZjMxMDgtOWNmYWYyMTctYmI3MzRkYzUtMTE5ODE1ODc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:33:35.825666Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976722781. Ctx: { TraceId: 01jykr4ba99c53k9s08khg0s4e, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YWU4Y2U2MmQtMTdkNDI2Y2QtYzM2ZjZlOGQtOGU4OWNjZDU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:33:35.826082Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976722782. Ctx: { TraceId: 01jykr4ba56mfg7zc4pzfdnn47, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NDYxNjBlMDMtZmNmNzM5YWUtMjY4ZDExOGUtY2QzYWZkOTM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:33:35.826900Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976722783. Ctx: { TraceId: 01jykr4ba52jh5hznhh0r6z8tv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NjRhMzFmLTVjMDdmZGE3LTY1ZGMzOGZmLTlmMTdmNDRj, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:33:35.831011Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976722784. Ctx: { TraceId: 01jykr4badegv0mjdhsxdz02g6, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NjY2Mjg3MDQtNTZiYzQ2OTctYzkxOWU3MTgtYTg0NzZhY2I=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Foo" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710658 CreateStep: 1750861933016 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 2 } ChildrenExist: false } Table { Name: "Foo" Columns { Name: "NameHash" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Name" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "Versio... (TRUNCATED) 2025-06-25T14:33:35.879051Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976722786. Ctx: { TraceId: 01jykr4bajdkwd8kqhbyhqzy8r, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTY4MjUwMzEtYjUyZTllMC1mZGQ5NzE3YS1hYThiNDMyNg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:33:35.880080Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976722787. Ctx: { TraceId: 01jykr4bam4ecyt6zywxmc96rx, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODk3MDBmMjgtNzEyZDMyODgtNDczMjlmOTEtODY5YWZlNzg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:33:35.882153Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976722785. Ctx: { TraceId: 01jykr4baj469cmx5zp24negcm, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDFmMmM4YzgtNWU0MDIwZDUtODAxMWVlZTEtMTQ4OWFjMjc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:33:35.883248Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976722788. Ctx: { TraceId: 01jykr4bb2brnt6xp1vnx7sqnq, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTA3ZjMxMDgtOWNmYWYyMTctYmI3MzRkYzUtMTE5ODE1ODc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:33:35.884381Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976722789. Ctx: { TraceId: 01jykr4bbgbtccs96pwsjwa5s9, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NjY2Mjg3MDQtNTZiYzQ2OTctYzkxOWU3MTgtYTg0NzZhY2I=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:33:35.887692Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976722793. Ctx: { TraceId: 01jykr4bbg18qbapyfgkatz148, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MTE1NmZmZmQtNmQxMWQ2OS03OTkxOTk4Ni0yNDJiNWM1Zg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:33:35.888154Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976722790. Ctx: { TraceId: 01jykr4bb8a034r9rckd9tgpft, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NDYxNjBlMDMtZmNmNzM5YWUtMjY4ZDExOGUtY2QzYWZkOTM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:33:35.888570Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976722791. Ctx: { TraceId: 01jykr4bbdcjhjhy0qnpfnark2, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NjRhMzFmLTVjMDdmZGE3LTY1ZGMzOGZmLTlmMTdmNDRj, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:33:35.888942Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976722792. Ctx: { TraceId: 01jykr4bbdfy5g5wkqv0w3wjgd, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YWU4Y2U2MmQtMTdkNDI2Y2QtYzM2ZjZlOGQtOGU4OWNjZDU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root TClient::Ls request: /Root/Foo TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Foo" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710658 CreateStep: 1750861933016 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 2 } ChildrenExist: false } Table { Name: "Foo" Columns { Name: "NameHash" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Name" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "Versio... (TRUNCATED) Table has 2 shards >> EscapingBasics::HideSecretsOverEncloseSecretShouldWork [GOOD] >> EscapingBasics::EscapeStringShouldWork [GOOD] >> TChargeBTreeIndex::FewNodes_History [GOOD] >> TChargeBTreeIndex::FewNodes_Sticky >> KqpPrefixedVectorIndexes::OrderByCosineLevel2+Nullable-UseSimilarity [GOOD] >> KqpPrefixedVectorIndexes::OrderByCosineLevel2-Nullable+UseSimilarity >> IcebergClusterProcessor::ValidateDdlCreationForHiveWithS3 [GOOD] >> IcebergClusterProcessor::ValidateRiseErrors [GOOD] >> EscapingBasics::HideSecretsShouldWork [GOOD] >> IcebergClusterProcessor::ValidateConfigurationWithoutCatalog [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/health_check/ut/unittest >> THealthCheckTest::SharedWhenTroublesWithExclusiveNodes [GOOD] Test command err: 2025-06-25T14:33:06.752953Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:628:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:33:06.753628Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:625:2319], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:33:06.753754Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:33:06.753927Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:33:06.753976Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:33:06.754067Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001d9d/r3tmp/tmpfR5bD8/pdisk_1.dat 2025-06-25T14:33:07.200236Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 17966, node 1 TClient is connected to server localhost:64603 2025-06-25T14:33:07.726291Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:33:07.726356Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:33:07.726388Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:33:07.726953Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration self_check_result: DEGRADED issue_log { id: "YELLOW-70fb-1231c6b1" status: YELLOW message: "Database has multiple issues" location { database { name: "/Root" } } reason: "YELLOW-1ba8-1231c6b1" reason: "YELLOW-5321-1231c6b1" type: "DATABASE" level: 1 } issue_log { id: "YELLOW-1ba8-1231c6b1" status: YELLOW message: "Compute is overloaded" location { database { name: "/Root" } } reason: "YELLOW-e9e2-1231c6b1-1" reason: "YELLOW-e9e2-1231c6b1-2" type: "COMPUTE" level: 2 } issue_log { id: "YELLOW-e9e2-1231c6b1-1" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 1 host: "::1" port: 12001 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 } issue_log { id: "YELLOW-e9e2-1231c6b1-2" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 2 host: "::1" port: 12002 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 } issue_log { id: "YELLOW-5321-1231c6b1" status: YELLOW message: "Storage degraded" location { database { name: "/Root" } } reason: "YELLOW-595f-1231c6b1-80c02825" type: "STORAGE" level: 2 } issue_log { id: "YELLOW-595f-1231c6b1-80c02825" status: YELLOW message: "Pool degraded" location { storage { pool { name: "static" } } database { name: "/Root" } } reason: "YELLOW-ef3e-1231c6b1-0" type: "STORAGE_POOL" level: 3 } issue_log { id: "RED-4847-1231c6b1-1-0-3-55-0-55" status: RED message: "VDisk is not available" location { storage { node { id: 1 host: "::1" port: 12001 } pool { name: "static" group { vdisk { id: "0-3-55-0-55" } } } } database { name: "/Root" } } type: "VDISK" level: 5 } issue_log { id: "YELLOW-ef3e-1231c6b1-0" status: YELLOW message: "Group degraded" location { storage { pool { name: "static" group { id: "0" } } } database { name: "/Root" } } reason: "RED-4847-1231c6b1-1-0-3-55-0-55" type: "STORAGE_GROUP" level: 4 } location { id: 1 host: "::1" port: 12001 } 2025-06-25T14:33:16.001170Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [3:628:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:33:16.001325Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:33:16.001520Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:33:16.001760Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [4:625:2319], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:33:16.002108Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:33:16.002178Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001d9d/r3tmp/tmpkeDkir/pdisk_1.dat 2025-06-25T14:33:16.458396Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 23336, node 3 TClient is connected to server localhost:23047 2025-06-25T14:33:17.114637Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:33:17.114706Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:33:17.114746Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:33:17.115380Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:33:22.515142Z node 5 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [5:420:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:33:22.515585Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:33:22.515723Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001d9d/r3tmp/tmpCfC86W/pdisk_1.dat 2025-06-25T14:33:22.893909Z node 5 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 28230, node 5 TClient is connected to server localhost:62232 2025-06-25T14:33:23.386321Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:33:23.386391Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:33:23.386429Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:33:23.386925Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:33:29.219812Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [7:497:2377], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:33:29.220185Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:33:29.220521Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001d9d/r3tmp/tmp3gPAlH/pdisk_1.dat 2025-06-25T14:33:29.722649Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 29802, node 7 TClient is connected to server localhost:22264 2025-06-25T14:33:30.377951Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:33:30.378025Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:33:30.378072Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:33:30.378697Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:33:36.362812Z node 10 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [10:419:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:33:36.363314Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:33:36.363396Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001d9d/r3tmp/tmpS5D6Ft/pdisk_1.dat 2025-06-25T14:33:37.046620Z node 10 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 7654, node 10 TClient is connected to server localhost:27436 2025-06-25T14:33:37.877526Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:33:37.877598Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:33:37.877658Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:33:37.878436Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_allocator_client/ut/unittest >> TTxAllocatorClientTest::ZeroRange [GOOD] Test command err: 2025-06-25T14:31:43.540230Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:1925: Tablet: 72057594046447617 LockedInitializationPath Marker# TSYS32 2025-06-25T14:31:43.540808Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:911: Tablet: 72057594046447617 HandleFindLatestLogEntry, NODATA Promote Marker# TSYS19 2025-06-25T14:31:43.541519Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:225: Tablet: 72057594046447617 TTablet::WriteZeroEntry. logid# [72057594046447617:2:0:0:0:0:0] Marker# TSYS01 2025-06-25T14:31:43.543221Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:0:0:0:20:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:31:43.543657Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:17: tablet# 72057594046447617 OnActivateExecutor 2025-06-25T14:31:43.554306Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:1:1:28672:35:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:31:43.554420Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:1:0:0:42:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:31:43.554533Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:1396: Tablet: 72057594046447617 GcCollect 0 channel, tablet:gen:step => 2:0 Marker# TSYS28 2025-06-25T14:31:43.554632Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:2:1:8192:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:31:43.554749Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:2:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:31:43.554828Z node 1 :TX_ALLOCATOR DEBUG: txallocator__scheme.cpp:22: tablet# 72057594046447617 TTxSchema Complete 2025-06-25T14:31:43.554931Z node 1 :TABLET_MAIN INFO: tablet_sys.cpp:1009: Tablet: 72057594046447617 Active! Generation: 2, Type: TxAllocator started in 0msec Marker# TSYS24 2025-06-25T14:31:43.555560Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:73:2107] requested range size#5000 2025-06-25T14:31:43.555966Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:3:1:24576:70:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:31:43.556043Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:3:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:31:43.556123Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 0 Reserved to# 5000 2025-06-25T14:31:43.556180Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:73:2107] TEvAllocateResult from# 0 to# 5000 |79.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/common/ut/unittest >> SplitterBasic::EqualSplitByMaxBytesLimitPerChunk [GOOD] |79.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/common/ut/unittest >> Cache::Test3 [GOOD] >> Cache::Test4 [GOOD] >> Cache::Test5 |79.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_cdc_stream/ydb-core-tx-schemeshard-ut_cdc_stream |79.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_cdc_stream/ydb-core-tx-schemeshard-ut_cdc_stream |79.3%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_cdc_stream/ydb-core-tx-schemeshard-ut_cdc_stream |79.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/tx_proxy/ut_schemereq/ydb-core-tx-tx_proxy-ut_schemereq |79.3%| [LD] {RESULT} $(B)/ydb/core/tx/tx_proxy/ut_schemereq/ydb-core-tx-tx_proxy-ut_schemereq >> TestKinesisHttpProxy::TestEmptyHttpBody [GOOD] |79.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/tx_proxy/ut_schemereq/ydb-core-tx-tx_proxy-ut_schemereq |79.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/common/ut/unittest >> EscapingBasics::EscapeStringShouldWork [GOOD] >> TFlatTableExecutor_VersionedRows::TestVersionedRowsSmallBlobs [GOOD] >> TFlatTableExecutor_VersionedRows::TestVersionedRowsLargeBlobs >> EntityId::Distinct [GOOD] >> EntityId::MinId [GOOD] >> EntityId::MaxId [GOOD] >> IcebergClusterProcessor::ValidateDdlCreationForHadoopWithS3 [GOOD] >> IcebergClusterProcessor::ValidateConfigurationWithoutWarehouse [GOOD] |79.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/common/ut/unittest >> IcebergClusterProcessor::ValidateConfigurationWithoutCatalog [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/common/ut/unittest >> IcebergClusterProcessor::ValidateRiseErrors [GOOD] Test command err: test case: 1 test case: 2 test case: 3 test case: 4 test case: 5 test case: 6 test case: 7 test case: 8 test case: 9 >> BuildStatsHistogram::Single_Slices [GOOD] >> BuildStatsHistogram::Single_History >> TChargeBTreeIndex::FewNodes_Sticky [GOOD] >> TChargeBTreeIndex::FewNodes_Groups_History |79.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/common/ut/unittest >> EntityId::MaxId [GOOD] >> Cache::Test5 [GOOD] >> EntityId::CheckId [GOOD] |79.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/common/ut/unittest >> IcebergClusterProcessor::ValidateConfigurationWithoutWarehouse [GOOD] |79.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_topic_splitmerge/ydb-core-tx-schemeshard-ut_topic_splitmerge |79.3%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_topic_splitmerge/ydb-core-tx-schemeshard-ut_topic_splitmerge |79.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_topic_splitmerge/ydb-core-tx-schemeshard-ut_topic_splitmerge |79.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/common/ut/unittest >> EntityId::CheckId [GOOD] >> EntityId::Order >> CommitOffset::DistributedTxCommit_Flat_CheckOffsetCommitForDifferentCases [GOOD] >> TPersQueueMirrorer::TestBasicRemote [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_base/unittest >> TSchemeShardTest::FindSubDomainPathIdActorAsync [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:29:31.974764Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:29:31.974889Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:29:31.974924Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:29:31.974963Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:29:31.974998Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:29:31.975026Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:29:31.975084Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:29:31.975151Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:29:31.975706Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:29:31.975976Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:29:32.217424Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:29:32.217480Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:29:32.233742Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:29:32.233914Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:29:32.234077Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:29:32.255081Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:29:32.255363Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:29:32.255938Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:29:32.256104Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:29:32.260838Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:29:32.260998Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:29:32.261971Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:29:32.262039Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:29:32.262222Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:29:32.262294Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:29:32.262338Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:29:32.262444Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:29:32.290080Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:243:2058] recipient: [1:15:2062] 2025-06-25T14:29:32.602560Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:29:32.602772Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:32.602996Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:29:32.603041Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:29:32.603240Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:29:32.603327Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:29:32.612094Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:29:32.612325Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:29:32.612552Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:32.612607Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:29:32.612649Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:29:32.612684Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:29:32.618494Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:32.618558Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:29:32.618601Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:29:32.620905Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:32.620969Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:29:32.621011Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:29:32.621061Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:29:32.630599Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:29:32.632691Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:29:32.632897Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:29:32.633779Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:29:32.633913Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:29:32.633950Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:29:32.634217Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:29:32.634280Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:29:32.634430Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:29:32.634513Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:29:32.640929Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:29:32.641012Z node 1 :FLAT_TX_SCHEMESHARD ... : 3], 2 2025-06-25T14:33:34.979508Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 5 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T14:33:34.979609Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 5 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T14:33:34.979652Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 102 2025-06-25T14:33:34.979729Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 5 2025-06-25T14:33:34.979797Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-25T14:33:34.981262Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T14:33:34.981358Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T14:33:34.981397Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 102 2025-06-25T14:33:34.981432Z node 15 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 2 2025-06-25T14:33:34.981469Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-06-25T14:33:34.981562Z node 15 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 102, subscribers: 0 2025-06-25T14:33:34.986205Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-25T14:33:34.994893Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 102 2025-06-25T14:33:35.002307Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-06-25T14:33:35.002411Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 2025-06-25T14:33:35.003065Z node 15 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-06-25T14:33:35.003211Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-25T14:33:35.003280Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [15:520:2470] TestWaitNotification: OK eventTxId 102 2025-06-25T14:33:35.003990Z node 15 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/SubDomenA" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:33:35.004369Z node 15 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/SubDomenA" took 413us result status StatusSuccess 2025-06-25T14:33:35.005065Z node 15 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/SubDomenA" PathDescription { Self { Name: "SubDomenA" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 1 SecurityStateVersion: 0 } } Children { Name: "Topic1" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 102 CreateStep: 5000003 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ChildrenExist: false BalancerTabletID: 72075186233409547 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 0 TimeCastBucketsPerMediator: 0 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 1 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 247 AccountSize: 247 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 1 PQPartitionsLimit: 1000000 SecurityState { } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:33:35.005804Z node 15 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/SubDomenA/Topic1" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-25T14:33:35.006055Z node 15 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/SubDomenA/Topic1" took 297us result status StatusSuccess 2025-06-25T14:33:35.006642Z node 15 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/SubDomenA/Topic1" PathDescription { Self { Name: "Topic1" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 102 CreateStep: 5000003 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 1 } ChildrenExist: false BalancerTabletID: 72075186233409547 } PersQueueGroup { Name: "Topic1" PathId: 3 TotalGroupCount: 1 PartitionPerTablet: 1 PQTabletConfig { PartitionConfig { LifetimeSeconds: 13 WriteSpeedInBytesPerSecond: 19 } YdbDatabasePath: "/MyRoot" MeteringMode: METERING_MODE_RESERVED_CAPACITY } Partitions { PartitionId: 0 TabletId: 72075186233409546 Status: Active } AlterVersion: 1 BalancerTabletID: 72075186233409547 NextPartitionId: 1 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 0 TimeCastBucketsPerMediator: 0 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 247 AccountSize: 247 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 1 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:33:35.540057Z node 15 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: PathId: 3 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:33:35.540457Z node 15 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:44: Tablet 72057594046678944 describe pathId 3 took 422us result status StatusSuccess 2025-06-25T14:33:35.541233Z node 15 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/SubDomenA/Topic1" PathDescription { Self { Name: "Topic1" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 102 CreateStep: 5000003 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 1 } ChildrenExist: false BalancerTabletID: 72075186233409547 } PersQueueGroup { Name: "Topic1" PathId: 3 TotalGroupCount: 1 PartitionPerTablet: 1 PQTabletConfig { PartitionConfig { LifetimeSeconds: 13 WriteSpeedInBytesPerSecond: 19 } YdbDatabasePath: "/MyRoot" MeteringMode: METERING_MODE_RESERVED_CAPACITY } Partitions { PartitionId: 0 TabletId: 72075186233409546 Status: Active } AlterVersion: 1 BalancerTabletID: 72075186233409547 NextPartitionId: 1 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 0 TimeCastBucketsPerMediator: 0 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 247 AccountSize: 247 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 1 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:33:35.616984Z node 15 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__find_subdomain_path_id.cpp:20: FindTabletSubDomainPathId for tablet 72075186233409546 >> EntityId::Order [GOOD] >> EscapingBasics::EncloseSecretShouldWork [GOOD] >> EscapingBasics::EncloseAndEscapeStringShouldWork [GOOD] >> SplitterBasic::EqualSplitByMaxRowsLimitPerChunk >> THealthCheckTest::IgnoreOtherGenerations [GOOD] >> THealthCheckTest::IgnoreServerlessWhenNotSpecific >> SplitterBasic::EqualSplitByMaxRowsLimitPerChunk [GOOD] >> SplitterBasic::LimitExceed [GOOD] >> TSubDomainTest::CoordinatorRunAtSubdomainNodeWhenAvailable2 [GOOD] >> TTxDataShardUploadRows::TestUploadShadowRows >> KqpTx::TooManyTx |79.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/common/ut/unittest >> EscapingBasics::EncloseAndEscapeStringShouldWork [GOOD] |79.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/common/ut/unittest >> SplitterBasic::LimitExceed [GOOD] >> TTxDataShardUploadRows::TestUploadShadowRowsShadowDataPublishThenSplit >> TTxDataShardUploadRows::TestUploadShadowRowsShadowDataSplitThenPublish ------- [TM] {asan, default-linux-x86_64, release} ydb/core/http_proxy/ut/inside_ydb_ut/unittest >> TestKinesisHttpProxy::TestEmptyHttpBody [GOOD] Test command err: 2025-06-25T14:32:24.230440Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519894792380558286:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:32:24.244794Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000ca3/r3tmp/tmpblgopt/pdisk_1.dat 2025-06-25T14:32:25.130057Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:32:25.130165Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:32:25.134356Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:32:25.277644Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:32:25.282058Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519894792380558261:2080] 1750861944209591 != 1750861944209594 2025-06-25T14:32:25.311297Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 2050, node 1 2025-06-25T14:32:25.584996Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:32:25.585016Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:32:25.585025Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:32:25.585117Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:7486 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:32:26.370433Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:32:26.393551Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 TClient is connected to server localhost:7486 2025-06-25T14:32:26.720005Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... 2025-06-25T14:32:26.729232Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-25T14:32:26.730759Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) waiting... 2025-06-25T14:32:26.768745Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710660, at schemeshard: 72057594046644480 2025-06-25T14:32:26.775332Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:32:27.066448Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:32:27.147968Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710663, at schemeshard: 72057594046644480 2025-06-25T14:32:27.152401Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:32:27.245941Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:32:27.359976Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:32:27.439748Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:32:27.539698Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:32:27.625561Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:32:27.702312Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:32:27.742024Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:32:29.236441Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519894792380558286:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:32:29.236529Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:32:31.113093Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894822445330723:2340], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:31.113231Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:31.113552Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894822445330735:2343], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:31.117511Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710673:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:32:31.133324Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519894822445330737:2344], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710673 completed, doublechecking } 2025-06-25T14:32:31.199455Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519894822445330788:2875] txid# 281474976710674, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046 ... ype { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "FifoQueue" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } Member { Name: "FolderId" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "MasterTabletId" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "QueueName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "QueueState" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "Shards" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "TablesFormat" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 2 } } } } } Member { Name: "Version" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } } } } } } } } Member { Name: "truncated" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } } } Value { Struct { Optional { } } Struct { Optional { Bool: false } } } } } 2025-06-25T14:33:41.041913Z node 8 :SQS DEBUG: executor.cpp:287: Request [] Query(idx=GET_QUEUES_LIST_ID) Queue [] Attempt 1 execution duration: 107ms 2025-06-25T14:33:41.042442Z node 8 :SQS TRACE: executor.cpp:325: Request [] Query(idx=GET_QUEUES_LIST_ID) Queue [] Sending mkql execution result: { Status: 48 TxId: 281474976710686 StatusCode: SUCCESS ExecutionEngineStatus: 1 ExecutionEngineResponseStatus: 2 ExecutionEngineEvaluatedResponse { Type { Kind: Struct Struct { Member { Name: "queues" Type { Kind: Optional Optional { Item { Kind: List List { Item { Kind: Struct Struct { Member { Name: "Account" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "CreatedTimestamp" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "CustomQueueName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "DlqName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "FifoQueue" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } Member { Name: "FolderId" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "MasterTabletId" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "QueueName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "QueueState" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "Shards" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "TablesFormat" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 2 } } } } } Member { Name: "Version" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } } } } } } } } Member { Name: "truncated" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } } } Value { Struct { Optional { } } Struct { Optional { Bool: false } } } } } 2025-06-25T14:33:41.042482Z node 8 :SQS TRACE: executor.cpp:327: Request [] Query(idx=GET_QUEUES_LIST_ID) Queue [] Minikql data response: {"queues": [], "truncated": false} 2025-06-25T14:33:41.042634Z node 8 :SQS DEBUG: executor.cpp:401: Request [] Query(idx=GET_QUEUES_LIST_ID) Queue [] execution duration: 107ms 2025-06-25T14:33:41.043242Z node 8 :SQS TRACE: queues_list_reader.cpp:82: Handle queues list: { Status: 48 TxId: 281474976710686 StatusCode: SUCCESS ExecutionEngineStatus: 1 ExecutionEngineResponseStatus: 2 ExecutionEngineEvaluatedResponse { Type { Kind: Struct Struct { Member { Name: "queues" Type { Kind: Optional Optional { Item { Kind: List List { Item { Kind: Struct Struct { Member { Name: "Account" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "CreatedTimestamp" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "CustomQueueName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "DlqName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "FifoQueue" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } Member { Name: "FolderId" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "MasterTabletId" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "QueueName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "QueueState" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "Shards" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "TablesFormat" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 2 } } } } } Member { Name: "Version" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } } } } } } } } Member { Name: "truncated" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } } } Value { Struct { Optional { } } Struct { Optional { Bool: false } } } } } 2025-06-25T14:33:41.049510Z node 8 :SQS TRACE: executor.cpp:286: Request [] Query(idx=GET_USER_SETTINGS_ID) Queue [] HandleResponse { Status: 48 TxId: 281474976710685 StatusCode: SUCCESS ExecutionEngineStatus: 1 ExecutionEngineResponseStatus: 2 ExecutionEngineEvaluatedResponse { Type { Kind: Struct Struct { Member { Name: "settings" Type { Kind: Optional Optional { Item { Kind: List List { Item { Kind: Struct Struct { Member { Name: "Account" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "Name" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "Value" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } } } } } } } } Member { Name: "truncated" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } } } Value { Struct { Optional { } } Struct { Optional { Bool: false } } } } } 2025-06-25T14:33:41.049546Z node 8 :SQS DEBUG: executor.cpp:287: Request [] Query(idx=GET_USER_SETTINGS_ID) Queue [] Attempt 1 execution duration: 131ms 2025-06-25T14:33:41.049807Z node 8 :SQS TRACE: executor.cpp:325: Request [] Query(idx=GET_USER_SETTINGS_ID) Queue [] Sending mkql execution result: { Status: 48 TxId: 281474976710685 StatusCode: SUCCESS ExecutionEngineStatus: 1 ExecutionEngineResponseStatus: 2 ExecutionEngineEvaluatedResponse { Type { Kind: Struct Struct { Member { Name: "settings" Type { Kind: Optional Optional { Item { Kind: List List { Item { Kind: Struct Struct { Member { Name: "Account" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "Name" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "Value" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } } } } } } } } Member { Name: "truncated" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } } } Value { Struct { Optional { } } Struct { Optional { Bool: false } } } } } 2025-06-25T14:33:41.049847Z node 8 :SQS TRACE: executor.cpp:327: Request [] Query(idx=GET_USER_SETTINGS_ID) Queue [] Minikql data response: {"settings": [], "truncated": false} 2025-06-25T14:33:41.049939Z node 8 :SQS DEBUG: executor.cpp:401: Request [] Query(idx=GET_USER_SETTINGS_ID) Queue [] execution duration: 131ms 2025-06-25T14:33:41.050344Z node 8 :SQS TRACE: user_settings_reader.cpp:89: Handle user settings: { Status: 48 TxId: 281474976710685 StatusCode: SUCCESS ExecutionEngineStatus: 1 ExecutionEngineResponseStatus: 2 ExecutionEngineEvaluatedResponse { Type { Kind: Struct Struct { Member { Name: "settings" Type { Kind: Optional Optional { Item { Kind: List List { Item { Kind: Struct Struct { Member { Name: "Account" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "Name" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "Value" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } } } } } } } } Member { Name: "truncated" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } } } Value { Struct { Optional { } } Struct { Optional { Bool: false } } } } } 2025-06-25T14:33:41.558230Z node 8 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:552: [WorkloadService] [Service] Reply cleanup error NOT_FOUND to [8:7519895119217303762:2407]: Pool not found 2025-06-25T14:33:41.559090Z node 8 :SQS DEBUG: monitoring.cpp:60: [monitoring] Report deletion queue data lag: 0.000000s, count: 0 2025-06-25T14:33:41.654188Z node 8 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:552: [WorkloadService] [Service] Reply cleanup error NOT_FOUND to [8:7519895119217303760:2406]: Pool not found 2025-06-25T14:33:41.654449Z node 8 :SQS DEBUG: cleanup_queue_data.cpp:100: [cleanup removed queues] getting queues... 2025-06-25T14:33:41.672526Z node 8 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [8:7519895123512271178:2425], DatabaseId: /Root/SQS, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:33:41.672685Z node 8 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:602: [WorkloadService] [TDatabaseFetcherActor] ActorId: [8:7519895123512271179:2426], Database: /Root/SQS, Failed to fetch database info, UNSUPPORTED, issues: {
: Error: Invalid database path /Root/SQS, please check the correctness of the path } 2025-06-25T14:33:41.672766Z node 8 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root/SQS, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:33:41.836597Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:83: (#37,[::1]:37216) incoming connection opened 2025-06-25T14:33:41.836699Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:156: (#37,[::1]:37216) -> (POST /Root, 4 bytes) 2025-06-25T14:33:41.836835Z node 8 :HTTP_PROXY INFO: http_service.cpp:102: proxy service: incoming request from [5834:0:6050:0:4034:0:6050:0] request [CreateStream] url [/Root] database [/Root] requestId: 36c87939-5b33df20-7eaec179-3d60f67d 2025-06-25T14:33:41.837461Z node 8 :HTTP_PROXY INFO: http_req.cpp:1211: http request [CreateStream] requestId [36c87939-5b33df20-7eaec179-3d60f67d] reply with status: BAD_REQUEST message: ydb/core/http_proxy/json_proto_conversion.h:395: Top level of json value is not a map Http output full {"__type":"MissingParameter","message":"ydb/core/http_proxy/json_proto_conversion.h:395: Top level of json value is not a map"} 2025-06-25T14:33:41.837763Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:280: (#37,[::1]:37216) <- (400 MissingParameter, 127 bytes) 2025-06-25T14:33:41.837815Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:289: (#37,[::1]:37216) Request: POST /Root HTTP/1.1 Host: example.amazonaws.com X-Amz-Target: kinesisApi.CreateStream X-Amz-Date: 20150830T123600Z Authorization: Content-Type: application/json Connection: Close Transfer-Encoding: chunked null 2025-06-25T14:33:41.837856Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:296: (#37,[::1]:37216) Response: HTTP/1.1 400 MissingParameter Connection: close x-amzn-requestid: 36c87939-5b33df20-7eaec179-3d60f67d x-amz-crc32: 851558042 Content-Type: application/x-amz-json-1.1 Content-Length: 127 2025-06-25T14:33:41.837969Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:335: (#37,[::1]:37216) connection closed >> TTxDataShardUploadRows::TestUploadRows >> TTxDataShardUploadRows::ShouldRejectOnChangeQueueOverflow >> TPartBtreeIndexIteration::OneNode_Slices [GOOD] >> TPartBtreeIndexIteration::OneNode_Groups_Slices |79.3%| [TA] $(B)/ydb/core/fq/libs/common/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpSnapshotRead::ReadWriteTxFailsOnConcurrentWrite2-withSink >> TopicAutoscaling::PartitionSplit_ManySession_existed_AutoscaleAwareSDK [GOOD] >> KqpSnapshotIsolation::TSimpleOltp [GOOD] >> KqpSnapshotIsolation::TSimpleOlap [GOOD] >> KqpTx::RollbackTx ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_base_tenant/unittest >> TSubDomainTest::CoordinatorRunAtSubdomainNodeWhenAvailable2 [GOOD] Test command err: 2025-06-25T14:32:45.375228Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519894880165806891:2239];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:32:45.384705Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0016ce/r3tmp/tmpDr8nTo/pdisk_1.dat 2025-06-25T14:32:46.225710Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:32:46.225794Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:32:46.237271Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:32:46.256549Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:32:46.313996Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:24974 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-25T14:32:46.759656Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7519894880165806904:2143] Handle TEvNavigate describe path dc-1 2025-06-25T14:32:46.795479Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7519894884460774649:2440] HANDLE EvNavigateScheme dc-1 2025-06-25T14:32:46.795612Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7519894880165806929:2157], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:32:46.795691Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:887: [main][1:7519894884460774631:2434][/dc-1] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvSyncRequest: sender# [1:7519894880165806929:2157], cookie# 1 2025-06-25T14:32:46.797222Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519894884460774635:2434][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519894884460774632:2434], cookie# 1 2025-06-25T14:32:46.797261Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519894884460774636:2434][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519894884460774633:2434], cookie# 1 2025-06-25T14:32:46.797278Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519894884460774637:2434][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519894884460774634:2434], cookie# 1 2025-06-25T14:32:46.797318Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519894875870839287:2057] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519894884460774637:2434], cookie# 1 2025-06-25T14:32:46.797360Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519894884460774637:2434][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519894875870839287:2057], cookie# 1 2025-06-25T14:32:46.797387Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519894884460774631:2434][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519894884460774634:2434], cookie# 1 2025-06-25T14:32:46.797425Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519894884460774631:2434][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2025-06-25T14:32:46.797461Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519894875870839281:2051] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519894884460774635:2434], cookie# 1 2025-06-25T14:32:46.797483Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519894875870839284:2054] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519894884460774636:2434], cookie# 1 2025-06-25T14:32:46.797501Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519894884460774635:2434][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519894875870839281:2051], cookie# 1 2025-06-25T14:32:46.797515Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519894884460774636:2434][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519894875870839284:2054], cookie# 1 2025-06-25T14:32:46.797534Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519894884460774631:2434][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519894884460774632:2434], cookie# 1 2025-06-25T14:32:46.797546Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519894884460774631:2434][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 2, failures# 0 2025-06-25T14:32:46.797559Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519894884460774631:2434][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519894884460774633:2434], cookie# 1 2025-06-25T14:32:46.797579Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:984: [main][1:7519894884460774631:2434][/dc-1] Sync is done in the ring group: cookie# 1, ring group# 0, size# 3, half# 1, successes# 3, failures# 0, partial# 0 2025-06-25T14:32:46.797636Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [1:7519894880165806929:2157], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1 PathId: Partial: 0 } 2025-06-25T14:32:46.810869Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [1:7519894880165806929:2157], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1 PathId: Partial: 0 }, by path# { Subscriber: { Subscriber: [1:7519894884460774631:2434] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 1 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 0 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-25T14:32:46.811620Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [1:7519894880165806929:2157], cacheItem# { Subscriber: { Subscriber: [1:7519894884460774631:2434] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 1 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 0 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 1 IsSync: true Partial: 0 } 2025-06-25T14:32:46.814439Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [1:7519894884460774652:2442], recipient# [1:7519894884460774649:2440], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [72057594046644480:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-25T14:32:46.814615Z node 1 :TX_PROXY DEBUG: describe.cpp:356: Actor# [1:7519894884460774649:2440] HANDLE EvNavigateKeySetResult TDescribeReq marker# P5 ErrorCount# 0 2025-06-25T14:32:46.877634Z node 1 :TX_PROXY DEBUG: describe.cpp:435: Actor# [1:7519894884460774649:2440] SEND to# 72057594046644480 shardToRequest NKikimrSchemeOp.TDescribePath Path: "dc-1" Options { ShowPrivateTable: true } 2025-06-25T14:32:46.881151Z node 1 :TX_PROXY DEBUG: describe.cpp:448: Actor# [1:7519894884460774649:2440] Handle TEvDescribeSchemeResult Forward to# [1:7519894884460774648:2439] Cookie: 0 TEvDescribeSchemeResult: NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 2 Record# Status: StatusSuccess Path: "dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/dc-1" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046644480 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:32:46.951430Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7519894880165806904:2143] Handle TEvProposeTransaction 2025-06-25T14:32:46.951459Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:75198948801 ... ger/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo }] } 2025-06-25T14:33:41.966469Z node 11 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [11:7519895111608293406:2290], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:33:42.189412Z node 13 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [13:7519895053691209852:2112], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:33:42.189595Z node 13 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [13:7519895053691209852:2112], cacheItem# { Subscriber: { Subscriber: [13:7519895113820752157:2199] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_0/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:33:42.189737Z node 13 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [13:7519895126705654089:2213], recipient# [13:7519895126705654088:2288], result# { ErrorCount: 1 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo }] } 2025-06-25T14:33:42.190273Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/USER_0/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:33:42.434910Z node 11 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [11:7519895055773718382:2112], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:33:42.435091Z node 11 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [11:7519895055773718382:2112], cacheItem# { Subscriber: { Subscriber: [11:7519895111608293412:2207] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_0/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:33:42.435199Z node 11 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [11:7519895124493195350:2224], recipient# [11:7519895124493195349:2300], result# { ErrorCount: 1 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo }] } 2025-06-25T14:33:42.435518Z node 11 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/USER_0/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:33:42.472800Z node 11 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [11:7519895055773718382:2112], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:33:42.472965Z node 11 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [11:7519895055773718382:2112], cacheItem# { Subscriber: { Subscriber: [11:7519895055773718398:2117] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:33:42.473077Z node 11 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [11:7519895124493195352:2225], recipient# [11:7519895124493195351:2301], result# { ErrorCount: 1 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:33:42.861484Z node 11 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [11:7519895055773718382:2112], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo },{ Path: dc-1/USER_0/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:33:42.861652Z node 11 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [11:7519895055773718382:2112], cacheItem# { Subscriber: { Subscriber: [11:7519895111608293410:2205] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_0/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:33:42.861715Z node 11 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [11:7519895055773718382:2112], cacheItem# { Subscriber: { Subscriber: [11:7519895111608293411:2206] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_0/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:33:42.861839Z node 11 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [11:7519895124493195353:2226], recipient# [11:7519895111608293406:2290], result# { ErrorCount: 2 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo },{ Path: dc-1/USER_0/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo }] } 2025-06-25T14:33:42.862293Z node 11 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [11:7519895111608293406:2290], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:33:42.962914Z node 11 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [11:7519895055773718382:2112], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:33:42.963084Z node 11 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [11:7519895055773718382:2112], cacheItem# { Subscriber: { Subscriber: [11:7519895055773718398:2117] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:33:42.963204Z node 11 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [11:7519895124493195355:2227], recipient# [11:7519895124493195354:2302], result# { ErrorCount: 1 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } >> TBackupCollectionTests::HiddenByFeatureFlag |79.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/tx/unittest >> KqpSnapshotIsolation::TSimpleOlap [GOOD] >> TBackupCollectionTests::CreateAbsolutePath |79.3%| [TA] $(B)/ydb/core/tx/schemeshard/ut_base/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/ut_with_sdk/unittest >> TPersQueueMirrorer::TestBasicRemote [GOOD] Test command err: 2025-06-25T14:30:41.277969Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519894350070055167:2135];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:30:41.278158Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:30:41.909983Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001922/r3tmp/tmpfKgIHC/pdisk_1.dat 2025-06-25T14:30:42.110249Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:30:42.110343Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:30:42.113004Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:30:42.138560Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:30:42.152412Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519894350070055070:2080] 1750861841193456 != 1750861841193459 TServer::EnableGrpc on GrpcPort 8129, node 1 2025-06-25T14:30:42.252894Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/yft8/001922/r3tmp/yandexy7srV6.tmp 2025-06-25T14:30:42.252925Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/yft8/001922/r3tmp/yandexy7srV6.tmp 2025-06-25T14:30:42.253160Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/yft8/001922/r3tmp/yandexy7srV6.tmp 2025-06-25T14:30:42.253279Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:30:42.305638Z INFO: TTestServer started on Port 6153 GrpcPort 8129 2025-06-25T14:30:42.384499Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:6153 PQClient connected to localhost:8129 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:30:42.785666Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:30:42.798932Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:30:42.815717Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:30:42.957259Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710660, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:30:42.973064Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710661, at schemeshard: 72057594046644480 2025-06-25T14:30:45.306508Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894367249925048:2300], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:45.306633Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:45.307082Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894367249925060:2303], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:45.311803Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:30:45.329769Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710662, at schemeshard: 72057594046644480 2025-06-25T14:30:45.330103Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519894367249925062:2304], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710662 completed, doublechecking } 2025-06-25T14:30:45.698128Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519894367249925127:2444] txid# 281474976710663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:30:45.734577Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:45.835257Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:45.867603Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519894367249925135:2311], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:5:17: Error: At function: KiReadTable!
:5:17: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Versions]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:30:45.870299Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=NzM3N2I5ODAtZWYwMmEwMWUtOWU0NzI3OTEtMTQ5MTgxZmQ=, ActorId: [1:7519894367249925046:2299], ActorState: ExecuteState, TraceId: 01jykqz4sp8wh21f1y2cg2he2p, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:30:45.872690Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 5 column: 17 } message: "At function: KiReadTable!" end_position { row: 5 column: 17 } severity: 1 issues { position { row: 5 column: 17 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Versions]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 5 column: 17 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-25T14:30:45.918507Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); === CheckClustersList. Subcribe to ClusterTracker from [1:7519894371544892716:2620] 2025-06-25T14:30:46.273158Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519894350070055167:2135];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:30:46.273238Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; === CheckClustersList. Ok 2025-06-25T14:30:51.452665Z :TopicSplitMerge INFO: TTopicSdkTestSetup started 2025-06-25T14:30:51.499452Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:132: new create topic request 2025-06-25T14:30:51.500669Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877761, Sender [1:7519894393019729361:2685], Recipient [1:7519894350070055384:2139]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:30:51.500709Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5052: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T14:30:51.500721Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5837: Pipe server connected, at tablet: 72057594046644480 2025-06-25T14:30:51.500760Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271122432, Sender [1:7519894393019729357:2682], Recipient [1:7519894350070055384:2139]: {TEvModifySchemeTransaction txid# 281474976710672 TabletId# 72057594046644480} 2025-06-25T14:30:51.500774Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4966: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-06-25T14:30: ... -06-25T14:33:41.728685Z node 6 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete old stuff 2025-06-25T14:33:41.728773Z node 6 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-25T14:33:41.728806Z node 6 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-25T14:33:41.728841Z node 6 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-25T14:33:41.729226Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 2146435095, Sender [0:0:0], Recipient [6:7519895020967066642:2139]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTopicStats 2025-06-25T14:33:41.729264Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5132: StateWork, processing event TEvPrivate::TEvPersistTopicStats 2025-06-25T14:33:41.729290Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__pq_stats.cpp:119: Started TEvPersistStats at tablet 72057594046644480, queue size# 1 2025-06-25T14:33:41.729310Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__pq_stats.cpp:128: Will execute TTxStoreStats, queue# 1 2025-06-25T14:33:41.729377Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__pq_stats.cpp:141: Will delay TTxStoreTopicStats on# 0.001522s, queue# 1 2025-06-25T14:33:41.736533Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 2146435095, Sender [0:0:0], Recipient [6:7519895020967066642:2139]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTopicStats 2025-06-25T14:33:41.736595Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5132: StateWork, processing event TEvPrivate::TEvPersistTopicStats 2025-06-25T14:33:41.736620Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__pq_stats.cpp:119: Started TEvPersistStats at tablet 72057594046644480, queue size# 0 2025-06-25T14:33:41.792570Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [6:7519895020967066642:2139]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:33:41.792625Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:33:41.792685Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [6:7519895020967066642:2139], Recipient [6:7519895020967066642:2139]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:33:41.792707Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:33:41.832553Z node 6 :PERSQUEUE TRACE: partition.h:582: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [6:7519895072506675246:2417], Partition 0, Sender [0:0:0], Recipient [6:7519895072506675303:2420], Cookie: 0 2025-06-25T14:33:41.832641Z node 6 :PERSQUEUE TRACE: partition.h:584: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [6:7519895072506675303:2420]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-25T14:33:41.832679Z node 6 :PERSQUEUE TRACE: partition.h:610: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-25T14:33:41.832739Z node 6 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete old stuff 2025-06-25T14:33:41.832831Z node 6 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-25T14:33:41.832865Z node 6 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-25T14:33:41.832903Z node 6 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-25T14:33:41.872542Z :INFO: [/Root] [/Root] [94bf9e33-1a7232d6-fe4e845e-3894a18b] Closing read session. Close timeout: 0.000000s 2025-06-25T14:33:41.872630Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): -:test-topic:0:1:2:3 2025-06-25T14:33:41.872703Z :INFO: [/Root] [/Root] [94bf9e33-1a7232d6-fe4e845e-3894a18b] Counters: { Errors: 0 CurrentSessionLifetimeMs: 5039 BytesRead: 27 MessagesRead: 3 BytesReadCompressed: 87 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-25T14:33:41.872850Z :NOTICE: [/Root] [/Root] [94bf9e33-1a7232d6-fe4e845e-3894a18b] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Close with zero timeout " } 2025-06-25T14:33:41.872914Z :DEBUG: [/Root] [/Root] [94bf9e33-1a7232d6-fe4e845e-3894a18b] [] Abort session to cluster 2025-06-25T14:33:41.873547Z :NOTICE: [/Root] [/Root] [94bf9e33-1a7232d6-fe4e845e-3894a18b] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-06-25T14:33:41.877284Z node 6 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer test-consumer session test-consumer_6_1_11237933362359559129_v1 grpc read done: success# 0, data# { } 2025-06-25T14:33:41.877319Z node 6 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 1 consumer test-consumer session test-consumer_6_1_11237933362359559129_v1 grpc read failed 2025-06-25T14:33:41.877350Z node 6 :PQ_READ_PROXY INFO: read_session_actor.cpp:92: session cookie 1 consumer test-consumer session test-consumer_6_1_11237933362359559129_v1 grpc closed 2025-06-25T14:33:41.877394Z node 6 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 1 consumer test-consumer session test-consumer_6_1_11237933362359559129_v1 is DEAD 2025-06-25T14:33:41.881833Z node 6 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037893][test-topic] pipe [6:7519895102571447551:2779] disconnected; active server actors: 1 2025-06-25T14:33:41.881877Z node 6 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037893][test-topic] pipe [6:7519895102571447551:2779] client test-consumer disconnected session test-consumer_6_1_11237933362359559129_v1 2025-06-25T14:33:41.882043Z node 6 :PERSQUEUE TRACE: pq_impl.cpp:5307: HandleHook, received event# 269877764, Sender [6:7519895102571447555:3218], Recipient [6:7519895072506675246:2417]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-25T14:33:41.882086Z node 6 :PERSQUEUE TRACE: pq_impl.cpp:5326: HandleHook, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-25T14:33:41.882115Z node 6 :PERSQUEUE TRACE: pq_impl.cpp:2906: [PQ: 72075186224037892] Handle TEvTabletPipe::TEvServerDisconnected 2025-06-25T14:33:41.882137Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:2452: [PQ: 72075186224037892] Destroy direct read session test-consumer_6_1_11237933362359559129_v1 2025-06-25T14:33:41.882179Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037892] server disconnected, pipe [6:7519895102571447554:2782] destroyed 2025-06-25T14:33:41.882224Z node 6 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: test-consumer_6_1_11237933362359559129_v1 2025-06-25T14:33:41.913548Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271122945, Sender [6:7519895072506675243:2416], Recipient [6:7519895020967066642:2139]: NKikimrSchemeOp.TDescribePath PathId: 13 SchemeshardId: 72057594046644480 2025-06-25T14:33:41.913597Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4967: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2025-06-25T14:33:41.936540Z node 6 :PERSQUEUE TRACE: partition.h:582: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [6:7519895072506675246:2417], Partition 0, Sender [0:0:0], Recipient [6:7519895072506675303:2420], Cookie: 0 2025-06-25T14:33:41.936625Z node 6 :PERSQUEUE TRACE: partition.h:584: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [6:7519895072506675303:2420]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-25T14:33:41.936657Z node 6 :PERSQUEUE TRACE: partition.h:610: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-25T14:33:41.936719Z node 6 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete old stuff 2025-06-25T14:33:41.936798Z node 6 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-25T14:33:41.936831Z node 6 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-25T14:33:41.936873Z node 6 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-25T14:33:42.038005Z node 6 :PERSQUEUE TRACE: partition.h:582: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [6:7519895072506675246:2417], Partition 0, Sender [0:0:0], Recipient [6:7519895072506675303:2420], Cookie: 0 2025-06-25T14:33:42.038085Z node 6 :PERSQUEUE TRACE: partition.h:584: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [6:7519895072506675303:2420]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-25T14:33:42.038115Z node 6 :PERSQUEUE TRACE: partition.h:610: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-25T14:33:42.038163Z node 6 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete old stuff 2025-06-25T14:33:42.038237Z node 6 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-25T14:33:42.038279Z node 6 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-25T14:33:42.038309Z node 6 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-25T14:33:42.138494Z node 6 :PERSQUEUE TRACE: partition.h:582: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [6:7519895072506675246:2417], Partition 0, Sender [0:0:0], Recipient [6:7519895072506675303:2420], Cookie: 0 2025-06-25T14:33:42.138588Z node 6 :PERSQUEUE TRACE: partition.h:584: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [6:7519895072506675303:2420]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-25T14:33:42.138626Z node 6 :PERSQUEUE TRACE: partition.h:610: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-25T14:33:42.138714Z node 6 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete old stuff 2025-06-25T14:33:42.138819Z node 6 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-25T14:33:42.138853Z node 6 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-25T14:33:42.138901Z node 6 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 >> TPartBtreeIndexIteration::OneNode_Groups_Slices [GOOD] >> TPartBtreeIndexIteration::OneNode_History_Slices >> KqpSinkLocks::EmptyRangeOlap >> KqpTx::RollbackByIdle >> TBackupCollectionTests::HiddenByFeatureFlag [GOOD] >> TBackupCollectionTests::DisallowedPath >> THealthCheckTest::ShardsNoLimit [GOOD] >> KqpSinkMvcc::OltpMultiSinksNoSinks >> THealthCheckTest::LayoutCorrect [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/ut_with_sdk/unittest >> TopicAutoscaling::PartitionSplit_ManySession_existed_AutoscaleAwareSDK [GOOD] Test command err: 2025-06-25T14:30:48.296857Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519894378467213827:2227];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:30:48.313037Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:30:48.735344Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0018de/r3tmp/tmppbiDHu/pdisk_1.dat 2025-06-25T14:30:49.052092Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519894378467213628:2080] 1750861848236108 != 1750861848236111 2025-06-25T14:30:49.057019Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:30:49.113499Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:30:49.113604Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:30:49.117780Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 3781, node 1 2025-06-25T14:30:49.309150Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:30:49.501185Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/yft8/0018de/r3tmp/yandexiRmjyC.tmp 2025-06-25T14:30:49.501207Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/yft8/0018de/r3tmp/yandexiRmjyC.tmp 2025-06-25T14:30:49.501364Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/yft8/0018de/r3tmp/yandexiRmjyC.tmp 2025-06-25T14:30:49.501504Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:30:49.581698Z INFO: TTestServer started on Port 26291 GrpcPort 3781 TClient is connected to server localhost:26291 PQClient connected to localhost:3781 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:30:50.145603Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:30:50.172892Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:30:50.188629Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-25T14:30:50.199444Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:30:50.375941Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710660, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:30:52.601480Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894395647083589:2299], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:52.601614Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:52.601916Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894395647083626:2304], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:52.609464Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:30:52.635678Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519894395647083628:2305], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710662 completed, doublechecking } 2025-06-25T14:30:53.165966Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519894395647083692:2448] txid# 281474976710663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:30:53.242143Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:53.283426Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519894378467213827:2227];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:30:53.283481Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:30:53.299179Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:53.350813Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519894399942050997:2311], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:5:17: Error: At function: KiReadTable!
:5:17: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Versions]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:30:53.353458Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=MzUxOTBjYzgtZTBiY2NmNzEtNWM5NDFlYTEtN2MzMGQzYQ==, ActorId: [1:7519894395647083587:2298], ActorState: ExecuteState, TraceId: 01jykqzbx11m3vndrfymfjnwjj, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:30:53.355548Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 5 column: 17 } message: "At function: KiReadTable!" end_position { row: 5 column: 17 } severity: 1 issues { position { row: 5 column: 17 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Versions]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 5 column: 17 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-25T14:30:53.412362Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); === CheckClustersList. Subcribe to ClusterTracker from [1:7519894399942051293:2628] === CheckClustersList. Ok 2025-06-25T14:30:58.800755Z :TopicSplitMerge INFO: TTopicSdkTestSetup started 2025-06-25T14:30:58.831204Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:132: new create topic request 2025-06-25T14:30:58.832369Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877761, Sender [1:7519894421416887938:2694], Recipient [1:7519894378467213964:2150]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:30:58.832397Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5052: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T14:30:58.832408Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5837: Pipe server connected, at tablet: 72057594046644480 2025-06-25T14:30:58.832470Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271122432, Sender [1:7519894421416887934:2691], Recipient [1:7519894378467213964:2150]: {TEvModifySchemeTransaction txid# 281474976710672 TabletId# 72057594046644480} 2025-06-25T14:30:58.832484Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4966: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-06-25T14:30:58.911472Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/Root" OperationType: ESchemeOpCreatePersQue ... node 7 :PERSQUEUE TRACE: partition_write.cpp:910: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ProcessChangeOwnerRequests. 2025-06-25T14:33:43.798327Z node 7 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete old stuff 2025-06-25T14:33:43.798411Z node 7 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-25T14:33:43.798433Z node 7 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-25T14:33:43.798461Z node 7 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-25T14:33:43.798521Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5307: HandleHook, received event# 271188501, Sender [7:7519895088831574725:2427], Recipient [7:7519895088831574665:2423]: NKikimr::TEvPQ::TEvPartitionCounters 2025-06-25T14:33:43.798545Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5319: HandleHook, processing event TEvPQ::TEvPartitionCounters 2025-06-25T14:33:43.798568Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:1274: [PQ: 72075186224037892] Handle TEvPQ::TEvPartitionCounters PartitionId 0 2025-06-25T14:33:43.798766Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5307: HandleHook, received event# 271188503, Sender [7:7519895088831574725:2427], Recipient [7:7519895088831574665:2423]: NKikimr::TEvPQ::TEvPartitionLabeledCounters 2025-06-25T14:33:43.798784Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5321: HandleHook, processing event TEvPQ::TEvPartitionLabeledCounters 2025-06-25T14:33:43.798825Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5307: HandleHook, received event# 271188503, Sender [7:7519895088831574725:2427], Recipient [7:7519895088831574665:2423]: NKikimr::TEvPQ::TEvPartitionLabeledCounters 2025-06-25T14:33:43.798838Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5321: HandleHook, processing event TEvPQ::TEvPartitionLabeledCounters 2025-06-25T14:33:43.807672Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5307: HandleHook, received event# 65538, Sender [0:0:0], Recipient [7:7519895088831574665:2423]: NActors::TEvents::TEvWakeup 2025-06-25T14:33:43.866485Z node 7 :PERSQUEUE TRACE: partition.h:582: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7519895088831574665:2423], Partition 0, Sender [0:0:0], Recipient [7:7519895088831574725:2427], Cookie: 0 2025-06-25T14:33:43.866554Z node 7 :PERSQUEUE TRACE: partition.h:584: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7519895088831574725:2427]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-25T14:33:43.866580Z node 7 :PERSQUEUE TRACE: partition.h:610: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-25T14:33:43.866622Z node 7 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete old stuff 2025-06-25T14:33:43.866692Z node 7 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-25T14:33:43.866718Z node 7 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-25T14:33:43.866748Z node 7 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-25T14:33:43.866796Z node 7 :PERSQUEUE TRACE: partition.h:582: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7519895127486281701:2819], Partition 2, Sender [0:0:0], Recipient [7:7519895127486281805:2838], Cookie: 0 2025-06-25T14:33:43.866825Z node 7 :PERSQUEUE TRACE: partition.h:584: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7519895127486281805:2838]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-25T14:33:43.866837Z node 7 :PERSQUEUE TRACE: partition.h:610: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-25T14:33:43.866859Z node 7 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037896, Partition: 2, State: StateIdle] Have 0 items to delete old stuff 2025-06-25T14:33:43.866886Z node 7 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037896, Partition: 2, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-25T14:33:43.866900Z node 7 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037896, Partition: 2, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-25T14:33:43.866915Z node 7 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037896, Partition: 2, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-25T14:33:43.866975Z node 7 :PERSQUEUE TRACE: partition.h:582: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7519895127486281703:2820], Partition 1, Sender [0:0:0], Recipient [7:7519895127486281810:2840], Cookie: 0 2025-06-25T14:33:43.867004Z node 7 :PERSQUEUE TRACE: partition.h:584: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7519895127486281810:2840]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-25T14:33:43.867016Z node 7 :PERSQUEUE TRACE: partition.h:610: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-25T14:33:43.867036Z node 7 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037897, Partition: 1, State: StateIdle] Have 0 items to delete old stuff 2025-06-25T14:33:43.867061Z node 7 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037897, Partition: 1, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-25T14:33:43.867074Z node 7 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037897, Partition: 1, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-25T14:33:43.867090Z node 7 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037897, Partition: 1, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-25T14:33:44.084500Z node 7 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:157: [72075186224037893][test-topic] TPersQueueReadBalancer::HandleWakeup 2025-06-25T14:33:44.084554Z node 7 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:550: [72075186224037893][test-topic] Send TEvPersQueue::TEvStatus TabletId: 72075186224037892 Cookie: 12 2025-06-25T14:33:44.084579Z node 7 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:550: [72075186224037893][test-topic] Send TEvPersQueue::TEvStatus TabletId: 72075186224037897 Cookie: 13 2025-06-25T14:33:44.084601Z node 7 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:550: [72075186224037893][test-topic] Send TEvPersQueue::TEvStatus TabletId: 72075186224037896 Cookie: 14 2025-06-25T14:33:44.084753Z node 7 :PERSQUEUE TRACE: partition.h:582: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7519895088831574665:2423], Partition 0, Sender [0:0:0], Recipient [7:7519895088831574725:2427], Cookie: 0 2025-06-25T14:33:44.084806Z node 7 :PERSQUEUE TRACE: partition.h:584: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7519895088831574725:2427]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-25T14:33:44.084830Z node 7 :PERSQUEUE TRACE: partition.h:610: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-25T14:33:44.084869Z node 7 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete old stuff 2025-06-25T14:33:44.084929Z node 7 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-25T14:33:44.084953Z node 7 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-25T14:33:44.084980Z node 7 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-25T14:33:44.085026Z node 7 :PERSQUEUE TRACE: partition.h:582: StateIdle event# 271188544 (NKikimr::NPQ::NReadQuoterEvents::TEvQuotaCountersUpdated), Tablet [7:7519895088831574665:2423], Partition 0, Sender [7:7519895088831574728:2429], Recipient [7:7519895088831574725:2427], Cookie: 0 2025-06-25T14:33:44.085058Z node 7 :PERSQUEUE TRACE: partition.h:584: StateIdle, received event# 271188544, Sender [7:7519895088831574728:2429], Recipient [7:7519895088831574725:2427]: NKikimr::NPQ::NReadQuoterEvents::TEvQuotaCountersUpdated 2025-06-25T14:33:44.085075Z node 7 :PERSQUEUE TRACE: partition.h:630: StateIdle, processing event NReadQuoterEvents::TEvQuotaCountersUpdated 2025-06-25T14:33:44.085121Z node 7 :PERSQUEUE TRACE: partition.h:582: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7519895127486281701:2819], Partition 2, Sender [0:0:0], Recipient [7:7519895127486281805:2838], Cookie: 0 2025-06-25T14:33:44.085154Z node 7 :PERSQUEUE TRACE: partition.h:584: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7519895127486281805:2838]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-25T14:33:44.085170Z node 7 :PERSQUEUE TRACE: partition.h:610: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-25T14:33:44.085194Z node 7 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037896, Partition: 2, State: StateIdle] Have 0 items to delete old stuff 2025-06-25T14:33:44.085224Z node 7 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037896, Partition: 2, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-25T14:33:44.085239Z node 7 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037896, Partition: 2, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-25T14:33:44.085255Z node 7 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037896, Partition: 2, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-25T14:33:44.085294Z node 7 :PERSQUEUE TRACE: partition.h:582: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7519895127486281703:2820], Partition 1, Sender [0:0:0], Recipient [7:7519895127486281810:2840], Cookie: 0 2025-06-25T14:33:44.085323Z node 7 :PERSQUEUE TRACE: partition.h:584: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7519895127486281810:2840]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-25T14:33:44.085335Z node 7 :PERSQUEUE TRACE: partition.h:610: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-25T14:33:44.085354Z node 7 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037897, Partition: 1, State: StateIdle] Have 0 items to delete old stuff 2025-06-25T14:33:44.085379Z node 7 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037897, Partition: 1, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-25T14:33:44.085391Z node 7 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037897, Partition: 1, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-25T14:33:44.085405Z node 7 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037897, Partition: 1, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-25T14:33:44.089022Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271122945, Sender [7:7519895088831574667:2424], Recipient [7:7519895032996998757:2144]: NKikimrSchemeOp.TDescribePath PathId: 13 SchemeshardId: 72057594046644480 2025-06-25T14:33:44.089056Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4967: StateWork, processing event TEvSchemeShard::TEvDescribeScheme >> TBackupCollectionTests::DisallowedPath [GOOD] >> TBackupCollectionTests::ParallelCreate >> TVersions::WreckHead [GOOD] >> TVersions::WreckHeadReverse >> TBackupCollectionTests::CreateAbsolutePath [GOOD] >> TBackupCollectionTests::Create >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-CreateUser-1 >> TPartBtreeIndexIteration::OneNode_History_Slices [GOOD] >> TPartBtreeIndexIteration::OneNode_Groups_History_Slices >> TTxDataShardUploadRows::TestUploadShadowRows [GOOD] >> TTxDataShardUploadRows::TestUploadShadowRowsShadowData >> TChargeBTreeIndex::FewNodes_Groups_History [GOOD] >> TChargeBTreeIndex::FewNodes_Groups_History_Sticky >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-43 >> TBackupCollectionTests::ParallelCreate [GOOD] >> TBackupCollectionTests::Drop >> TBackupCollectionTests::Create [GOOD] >> TBackupCollectionTests::CreateTwice ------- [TM] {asan, default-linux-x86_64, release} ydb/core/health_check/ut/unittest >> THealthCheckTest::ShardsNoLimit [GOOD] Test command err: 2025-06-25T14:33:06.479209Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:628:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:33:06.479645Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:33:06.479849Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:625:2319], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:33:06.479991Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:33:06.480248Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:33:06.480284Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001dac/r3tmp/tmpxrCap3/pdisk_1.dat 2025-06-25T14:33:07.051775Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 25560, node 1 TClient is connected to server localhost:27706 2025-06-25T14:33:07.645094Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:33:07.645159Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:33:07.645196Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:33:07.645608Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:33:16.707073Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [3:628:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:33:16.707526Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:33:16.707741Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:33:16.709901Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [4:625:2319], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:33:16.710167Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:33:16.710455Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001dac/r3tmp/tmpM8OWdf/pdisk_1.dat 2025-06-25T14:33:17.096220Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 8313, node 3 TClient is connected to server localhost:7275 2025-06-25T14:33:17.411556Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:33:17.411600Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:33:17.411622Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:33:17.412015Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:33:25.518340Z node 5 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [5:625:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:33:25.518687Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:33:25.518848Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:33:25.520777Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [6:634:2322], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:33:25.521122Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:33:25.521277Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001dac/r3tmp/tmpidMcQQ/pdisk_1.dat 2025-06-25T14:33:26.070906Z node 5 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 5837, node 5 TClient is connected to server localhost:5288 2025-06-25T14:33:26.629258Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:33:26.629309Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:33:26.629332Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:33:26.629793Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:33:35.140347Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [7:628:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:33:35.141033Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:33:35.141299Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:33:35.141743Z node 8 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [8:625:2319], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:33:35.142132Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:33:35.142291Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001dac/r3tmp/tmp45dnki/pdisk_1.dat 2025-06-25T14:33:35.530611Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 61529, node 7 TClient is connected to server localhost:10477 2025-06-25T14:33:35.921579Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:33:35.921638Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:33:35.921669Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:33:35.922146Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:33:45.458547Z node 9 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [9:628:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:33:45.459041Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:33:45.459236Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:33:45.461739Z node 10 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [10:625:2319], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:33:45.462033Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:33:45.462352Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001dac/r3tmp/tmpPYh3DO/pdisk_1.dat 2025-06-25T14:33:46.047156Z node 9 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 5844, node 9 TClient is connected to server localhost:11975 2025-06-25T14:33:46.661287Z node 9 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:33:46.661341Z node 9 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:33:46.661369Z node 9 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:33:46.661907Z node 9 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration >> TTxDataShardUploadRows::ShouldRejectOnChangeQueueOverflow [GOOD] >> TTxDataShardUploadRows::ShouldRejectOnChangeQueueOverflowAndRetry >> TTxDataShardUploadRows::TestUploadRows [GOOD] >> TTxDataShardUploadRows::TestUploadRowsDropColumnRace >> RetryPolicy::TWriteSession_SwitchBackToLocalCluster [GOOD] >> RetryPolicy::TWriteSession_SeqNoShift ------- [TM] {asan, default-linux-x86_64, release} ydb/core/health_check/ut/unittest >> THealthCheckTest::LayoutCorrect [GOOD] Test command err: 2025-06-25T14:33:05.446113Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:628:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:33:05.446609Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:33:05.446846Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:625:2319], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:33:05.446976Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:33:05.447285Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:33:05.447323Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001dad/r3tmp/tmpuTi4gd/pdisk_1.dat 2025-06-25T14:33:05.925916Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 25286, node 1 TClient is connected to server localhost:3197 2025-06-25T14:33:06.765722Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:33:06.765788Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:33:06.765823Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:33:06.766306Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:33:17.094847Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [3:628:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:33:17.094986Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:33:17.095174Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:33:17.095407Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [4:625:2319], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:33:17.095772Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:33:17.095842Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001dad/r3tmp/tmpTlx4Io/pdisk_1.dat 2025-06-25T14:33:17.579949Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 8364, node 3 TClient is connected to server localhost:14292 2025-06-25T14:33:18.183717Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:33:18.183782Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:33:18.183814Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:33:18.186562Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:33:27.143134Z node 5 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [5:625:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:33:27.143577Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:33:27.143784Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:33:27.145901Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [6:634:2322], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:33:27.146271Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:33:27.146430Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001dad/r3tmp/tmpZXNiAU/pdisk_1.dat 2025-06-25T14:33:27.570489Z node 5 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 25039, node 5 TClient is connected to server localhost:5647 2025-06-25T14:33:28.146564Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:33:28.146617Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:33:28.146648Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:33:28.147068Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:33:37.367908Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [7:628:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:33:37.368565Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:33:37.368802Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:33:37.369154Z node 8 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [8:625:2319], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:33:37.369460Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:33:37.369585Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001dad/r3tmp/tmp9hO4ep/pdisk_1.dat 2025-06-25T14:33:38.032744Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 8158, node 7 TClient is connected to server localhost:64861 2025-06-25T14:33:38.881241Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:33:38.881305Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:33:38.881343Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:33:38.881826Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:33:46.465302Z node 9 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [9:419:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:33:46.465447Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:33:46.465509Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001dad/r3tmp/tmpTuNXCW/pdisk_1.dat 2025-06-25T14:33:47.264561Z node 9 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 2619, node 9 TClient is connected to server localhost:5809 2025-06-25T14:33:48.095461Z node 9 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:33:48.095534Z node 9 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:33:48.095575Z node 9 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:33:48.096060Z node 9 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration >> BuildStatsHistogram::Single_History [GOOD] >> BuildStatsHistogram::Single_History_Slices >> THealthCheckTest::IgnoreServerlessWhenNotSpecific [GOOD] >> THealthCheckTest::HealthCheckConfigUpdate >> TBackupCollectionTests::Drop [GOOD] >> TBackupCollectionTests::DropTwice >> TBackupCollectionTests::CreateTwice [GOOD] >> TBackupCollectionTests::BackupAbsentCollection >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-4 >> KqpSinkLocks::EmptyRange [GOOD] >> KqpSinkLocks::EmptyRangeAlreadyBroken |79.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/ut/rbo/ydb-core-kqp-ut-rbo |79.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/rbo/ydb-core-kqp-ut-rbo |79.4%| [TA] {RESULT} $(B)/ydb/core/fq/libs/common/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> THealthCheckTest::TestReBootingTabletIsDead [GOOD] >> THealthCheckTest::UnknowPDiskState >> KqpTx::TooManyTx [GOOD] >> KqpTx::SnapshotROInteractive2 |79.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/tx_proxy/ut_storage_tenant/ydb-core-tx-tx_proxy-ut_storage_tenant >> TTxDataShardUploadRows::TestUploadShadowRowsShadowDataPublishThenSplit [GOOD] |79.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/tx_proxy/ut_storage_tenant/ydb-core-tx-tx_proxy-ut_storage_tenant >> TBackupCollectionTests::BackupAbsentCollection [GOOD] |79.4%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/rbo/ydb-core-kqp-ut-rbo |79.4%| [LD] {RESULT} $(B)/ydb/core/tx/tx_proxy/ut_storage_tenant/ydb-core-tx-tx_proxy-ut_storage_tenant >> TTxDataShardUploadRows::TestUploadShadowRowsShadowDataAlterSplitThenPublish >> TBackupCollectionTests::BackupDroppedCollection |79.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/services/persqueue_v1/ut/ydb-services-persqueue_v1-ut |79.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/rm_service/ut/ydb-core-kqp-rm_service-ut >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-ModifyUser-25 >> TBackupCollectionTests::DropTwice [GOOD] >> DataShardVolatile::UpsertNoLocksArbiterRestart+UseSink [GOOD] >> TTxDataShardUploadRows::TestUploadShadowRowsShadowDataSplitThenPublish [GOOD] >> THealthCheckTest::ProtobufUnderLimitFor100LargeVdisksIssues [GOOD] >> DataShardVolatile::UpsertNoLocksArbiterRestart-UseSink >> TTxDataShardUploadRows::UploadRowsToReplicatedTable >> TBackupCollectionTests::TableWithSystemColumns |79.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/services/persqueue_v1/ut/ydb-services-persqueue_v1-ut |79.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/rm_service/ut/ydb-core-kqp-rm_service-ut |79.4%| [LD] {RESULT} $(B)/ydb/services/persqueue_v1/ut/ydb-services-persqueue_v1-ut |79.4%| [LD] {RESULT} $(B)/ydb/core/kqp/rm_service/ut/ydb-core-kqp-rm_service-ut >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-25 >> TExecutorDb::RandomOps [GOOD] >> TBackupCollectionTests::BackupDroppedCollection [GOOD] >> TExecutorDb::FullScan >> TBackupCollectionTests::BackupAbsentDirs ------- [TM] {asan, default-linux-x86_64, release} ydb/core/health_check/ut/unittest >> THealthCheckTest::ProtobufUnderLimitFor100LargeVdisksIssues [GOOD] Test command err: 2025-06-25T14:33:09.175737Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:628:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:33:09.176217Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:33:09.176488Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:625:2319], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:33:09.176636Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:33:09.176941Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:33:09.176986Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001d8b/r3tmp/tmpv05ngm/pdisk_1.dat 2025-06-25T14:33:09.557444Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 20444, node 1 TClient is connected to server localhost:29012 2025-06-25T14:33:10.057749Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:33:10.057807Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:33:10.057837Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:33:10.058230Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:33:17.231300Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [3:628:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:33:17.231422Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:33:17.231597Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:33:17.231839Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [4:625:2319], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:33:17.232206Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:33:17.232274Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001d8b/r3tmp/tmpLLafPD/pdisk_1.dat 2025-06-25T14:33:17.621134Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 19124, node 3 TClient is connected to server localhost:4716 2025-06-25T14:33:18.113267Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:33:18.113324Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:33:18.113357Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:33:18.113887Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration self_check_result: GOOD issue_log { id: "YELLOW-f489-1231c6b1" status: YELLOW message: "Database has compute issues" location { database { name: "/Root" } } reason: "YELLOW-1ba8-1231c6b1" type: "DATABASE" level: 1 } issue_log { id: "YELLOW-1ba8-1231c6b1" status: YELLOW message: "Compute is overloaded" location { database { name: "/Root" } } reason: "YELLOW-e9e2-1231c6b1-3" reason: "YELLOW-e9e2-1231c6b1-4" type: "COMPUTE" level: 2 } issue_log { id: "YELLOW-e9e2-1231c6b1-3" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 3 host: "::1" port: 12001 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 } issue_log { id: "YELLOW-e9e2-1231c6b1-4" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 4 host: "::1" port: 12002 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 } issue_log { id: "YELLOW-e463-3-3-42" status: YELLOW message: "Available size is less than 12%" location { storage { node { id: 3 host: "::1" port: 12001 } pool { group { vdisk { pdisk { id: "3-42" path: "/home/runner/.ya/build/build_root/yft8/001d8b/r3tmp/tmpLLafPD/pdisk_1.dat" } } } } } } type: "PDISK" level: 6 } issue_log { id: "YELLOW-e463-3-3-43" status: YELLOW message: "Available size is less than 12%" location { storage { node { id: 3 host: "::1" port: 12001 } pool { group { vdisk { pdisk { id: "3-43" path: "/home/runner/.ya/build/build_root/yft8/001d8b/r3tmp/tmpLLafPD/pdisk_1.dat" } } } } } } type: "PDISK" level: 6 } issue_log { id: "YELLOW-e463-3-3-44" status: YELLOW message: "Available size is less than 12%" location { storage { node { id: 3 host: "::1" port: 12001 } pool { group { vdisk { pdisk { id: "3-44" path: "/home/runner/.ya/build/build_root/yft8/001d8b/r3tmp/tmpLLafPD/pdisk_1.dat" } } } } } } type: "PDISK" level: 6 } location { id: 3 host: "::1" port: 12001 } 2025-06-25T14:33:25.896871Z node 5 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [5:625:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:33:25.897242Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:33:25.897400Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:33:25.899072Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [6:634:2322], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:33:25.899368Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:33:25.899534Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001d8b/r3tmp/tmpEh73Bl/pdisk_1.dat 2025-06-25T14:33:26.273010Z node 5 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 14992, node 5 TClient is connected to server localhost:2010 2025-06-25T14:33:26.737760Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:33:26.737827Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:33:26.737864Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:33:26.738389Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration self_check_result: GOOD issue_log { id: "YELLOW-f489-1231c6b1" status: YELLOW message: "Database has compute issues" location { database { name: "/Root" } } reason: "YELLOW-1ba8-1231c6b1" type: "DATABASE" level: 1 } issue_log { id: "YELLOW-1ba8-1231c6b1" status: YELLOW message: "Compute is overloaded" location { database { name: "/Root" } } reason: "YELLOW-e9e2-1231c6b1-5" reason: "YELLOW-e9e2-1231c6b1-6" type: "COMPUTE" level: 2 } issue_log { id: "YELLOW-e9e2-1231c6b1-5" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 5 host: "::1" port: 12001 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 } issue_log { id: "YELLOW-e9e2-1231c6b1-6" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 6 host: "::1" port: 12002 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 } issue_log { id: "RED-b783-5-5-42" status: RED message: "PDisk state is DeviceIoError" location { storage { node { id: 5 host: "::1" port: 12001 } pool { group { vdisk { pdisk { id: "5-42" path: "/home/runner/.ya/build/build_root/yft8/001d8b/r3tmp/tmpEh73Bl/pdisk_1.dat" } } } } } } type: "PDISK" level: 6 } issue_log { id: "RED-b783-5-5-43" status: RED message: "PDisk state is DeviceIoError" location { storage { node { id: 5 host: "::1" port: 12001 } pool { group { vdisk { pdisk { id: "5-43" path: "/home/runner/.ya/build/build_root/yft8/001d8b/r3tmp/tmpEh73Bl/pdisk_1.dat" } } } } } } type: "PDISK" level: 6 } issue_log { id: "RED-b783-5-5-44" status: RED message: "PDisk state is DeviceIoError" location { storage { node { id: 5 host: "::1" port: 12001 } pool { group { vdisk { pdisk { id: "5-44" path: "/home/runner/.ya/build/build_root/yft8/001d8b/r3tmp/tmpEh73Bl/pdisk_1.dat" } } } } } } type: "PDISK" level: 6 } location { id: 5 host: "::1" port: 12001 } 2025-06-25T14:33:35.109159Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [7:628:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:33:35.109720Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:33:35.109961Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:33:35.110329Z node 8 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [8:625:2319], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:33:35.110653Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:33:35.110789Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001d8b/r3tmp/tmp5HfXor/pdisk_1.dat 2025-06-25T14:33:35.680479Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 30106, node 7 TClient is connected to server localhost:26512 2025-06-25T14:33:36.484446Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:33:36.484526Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:33:36.484579Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:33:36.485219Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:33:50.709015Z node 9 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [9:628:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:33:50.709437Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:33:50.709624Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:33:50.711796Z node 10 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [10:625:2319], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:33:50.712074Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:33:50.712596Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001d8b/r3tmp/tmpc06UOP/pdisk_1.dat 2025-06-25T14:33:51.230254Z node 9 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 27668, node 9 TClient is connected to server localhost:29823 2025-06-25T14:33:51.788347Z node 9 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:33:51.788414Z node 9 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:33:51.788455Z node 9 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:33:51.789067Z node 9 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration |79.4%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_base/test-results/unittest/{meta.json ... results_accumulator.log} >> TTxDataShardUploadRows::TestUploadShadowRowsShadowData [GOOD] >> TBackupCollectionTests::TableWithSystemColumns [GOOD] >> TBackupCollectionTests::BackupAbsentDirs [GOOD] >> TBackupCollectionTests::BackupNonIncrementalCollection >> TPartBtreeIndexIteration::OneNode_Groups_History_Slices [GOOD] >> TPartBtreeIndexIteration::FewNodes >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-CreateUser-1 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-CreateUser-2 >> KqpTx::RollbackTx [GOOD] >> KqpTx::RollbackTx2 >> TPQTest::TestWritePQ [GOOD] >> TPQTest::TestWriteOffsetWithBigMessage >> DataShardVolatile::DistributedOutOfOrderFollowerConsistency [GOOD] >> DataShardVolatile::DistributedWriteRSNotAckedBeforeCommit ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_backup_collection/unittest >> TBackupCollectionTests::TableWithSystemColumns [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:33:47.963426Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:33:47.963511Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:33:47.963555Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:33:47.963587Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:33:47.963634Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:33:47.963660Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:33:47.963726Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:33:47.963791Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:33:47.965039Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:33:47.965525Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:33:48.047168Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:33:48.047231Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:33:48.064110Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:33:48.064570Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:33:48.064744Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:33:48.071355Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:33:48.071703Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:33:48.072415Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:33:48.072692Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:33:48.076075Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:33:48.076239Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:33:48.077399Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:33:48.077457Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:33:48.077592Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:33:48.077637Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:33:48.077675Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:33:48.077756Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:33:48.084183Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:33:48.318553Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:33:48.318751Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:33:48.318960Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:33:48.319003Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:33:48.319195Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:33:48.319275Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:33:48.329440Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:33:48.329637Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:33:48.329848Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:33:48.329904Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:33:48.329956Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:33:48.329991Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:33:48.333297Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:33:48.333357Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:33:48.333391Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:33:48.335497Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:33:48.335558Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:33:48.335601Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:33:48.335666Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:33:48.339271Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:33:48.347813Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:33:48.348164Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:33:48.349429Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:33:48.349563Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:33:48.349611Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:33:48.349869Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:33:48.349911Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:33:48.350082Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:33:48.350193Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:33:48.352650Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:33:48.352693Z node 1 :FLAT_TX_SCHEMESHARD ... Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 8] Version: 3 } 2025-06-25T14:33:56.025078Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5039: StateWork, processing event NSchemeBoard::NSchemeshardEvents::TEvUpdateAck 2025-06-25T14:33:56.025349Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 8 Version: 3 PathOwnerId: 72057594046678944, cookie: 106 2025-06-25T14:33:56.025983Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 8 Version: 3 PathOwnerId: 72057594046678944, cookie: 106 2025-06-25T14:33:56.026052Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 106 2025-06-25T14:33:56.026131Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 106, pathId: [OwnerId: 72057594046678944, LocalPathId: 8], version: 3 2025-06-25T14:33:56.026341Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 8] was 4 2025-06-25T14:33:56.026455Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 106, ready parts: 1/2, is published: true 2025-06-25T14:33:56.026504Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-06-25T14:33:56.027818Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877761, Sender [6:660:2606], Recipient [6:136:2157]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:33:56.027859Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5052: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T14:33:56.027885Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5837: Pipe server connected, at tablet: 72057594046678944 2025-06-25T14:33:56.028268Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269551620, Sender [6:598:2553], Recipient [6:136:2157]: NKikimrTxDataShard.TEvSchemaChanged Source { RawX1: 598 RawX2: 25769806329 } Origin: 72075186233409548 State: 2 TxId: 106 Step: 0 Generation: 2 2025-06-25T14:33:56.028324Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4987: StateWork, processing event TEvDataShard::TEvSchemaChanged 2025-06-25T14:33:56.028477Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5596: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 598 RawX2: 25769806329 } Origin: 72075186233409548 State: 2 TxId: 106 Step: 0 Generation: 2 2025-06-25T14:33:56.028527Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1791: TOperation FindRelatedPartByTabletId, TxId: 106, tablet: 72075186233409548, partId: 1 2025-06-25T14:33:56.028674Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:632: TTxOperationReply execute, operationId: 106:1, at schemeshard: 72057594046678944, message: Source { RawX1: 598 RawX2: 25769806329 } Origin: 72075186233409548 State: 2 TxId: 106 Step: 0 Generation: 2 2025-06-25T14:33:56.028749Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1047: NTableState::TProposedWaitParts operationId# 106:1 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 2025-06-25T14:33:56.028882Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1051: NTableState::TProposedWaitParts operationId# 106:1 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 598 RawX2: 25769806329 } Origin: 72075186233409548 State: 2 TxId: 106 Step: 0 Generation: 2 2025-06-25T14:33:56.028963Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:670: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 106:1, shardIdx: 72057594046678944:3, shard: 72075186233409548, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-25T14:33:56.029005Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:707: all shard schema changes has been received, operationId: 106:1, at schemeshard: 72057594046678944 2025-06-25T14:33:56.029039Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:719: send schema changes ack message, operation: 106:1, datashard: 72075186233409548, at schemeshard: 72057594046678944 2025-06-25T14:33:56.029077Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 106:1 129 -> 240 2025-06-25T14:33:56.029241Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-06-25T14:33:56.029592Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-25T14:33:56.038855Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-25T14:33:56.039012Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 106 2025-06-25T14:33:56.039073Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-25T14:33:56.039256Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 106:1, at schemeshard: 72057594046678944 2025-06-25T14:33:56.039296Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-25T14:33:56.040838Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 106 2025-06-25T14:33:56.040891Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-25T14:33:56.041322Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 106 2025-06-25T14:33:56.041352Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-25T14:33:56.041442Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 106:1, at schemeshard: 72057594046678944 2025-06-25T14:33:56.041510Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-25T14:33:56.041550Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:276: Activate send for 106:1 2025-06-25T14:33:56.041658Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:632: Send to actor: [6:598:2553] msg type: 269552132 msg: NKikimrTxDataShard.TEvSchemaChangedResult TxId: 106 at schemeshard: 72057594046678944 2025-06-25T14:33:56.042003Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 2146435072, Sender [6:136:2157], Recipient [6:136:2157]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-25T14:33:56.042041Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4972: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-25T14:33:56.042097Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 106:1, at schemeshard: 72057594046678944 2025-06-25T14:33:56.042144Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 106:1 ProgressState 2025-06-25T14:33:56.042286Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-06-25T14:33:56.042319Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#106:1 progress is 2/2 2025-06-25T14:33:56.042382Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 106 ready parts: 2/2 2025-06-25T14:33:56.042429Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#106:1 progress is 2/2 2025-06-25T14:33:56.048425Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 106 ready parts: 2/2 2025-06-25T14:33:56.048530Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 106, ready parts: 2/2, is published: true 2025-06-25T14:33:56.048617Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1656: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [6:307:2296] message: TxId: 106 2025-06-25T14:33:56.048687Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 106 ready parts: 2/2 2025-06-25T14:33:56.048751Z node 6 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 106:0 2025-06-25T14:33:56.048788Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 106:0 2025-06-25T14:33:56.048868Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 7] was 2 2025-06-25T14:33:56.048902Z node 6 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 106:1 2025-06-25T14:33:56.048931Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 106:1 2025-06-25T14:33:56.049006Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 8] was 3 2025-06-25T14:33:56.051316Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-25T14:33:56.051435Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:632: Send to actor: [6:307:2296] msg type: 271124998 msg: NKikimrScheme.TEvNotifyTxCompletionResult TxId: 106 at schemeshard: 72057594046678944 2025-06-25T14:33:56.051632Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 106: got EvNotifyTxCompletionResult 2025-06-25T14:33:56.051678Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 106: satisfy waiter [6:626:2573] 2025-06-25T14:33:56.051903Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877764, Sender [6:628:2575], Recipient [6:136:2157]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-25T14:33:56.051945Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5053: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-25T14:33:56.051971Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5885: Server pipe is reset, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 106 |79.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_index/ydb-core-tx-schemeshard-ut_index >> KqpTx::RollbackByIdle [GOOD] >> KqpTx::RollbackInvalidated |79.4%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_index/ydb-core-tx-schemeshard-ut_index |79.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_index/ydb-core-tx-schemeshard-ut_index ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_upload_rows/unittest >> TTxDataShardUploadRows::TestUploadShadowRowsShadowData [GOOD] Test command err: 2025-06-25T14:33:47.305947Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:33:47.306100Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:33:47.306157Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000c92/r3tmp/tmpAKmrVN/pdisk_1.dat 2025-06-25T14:33:47.854670Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T14:33:47.864131Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:33:47.969904Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:33:47.975599Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750862024223753 != 1750862024223757 2025-06-25T14:33:48.034766Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:33:48.034955Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:33:48.049667Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:33:48.146211Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:33:48.240586Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:627:2531] 2025-06-25T14:33:48.240889Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T14:33:48.289445Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T14:33:48.289577Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T14:33:48.291500Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-25T14:33:48.291607Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-25T14:33:48.291676Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-25T14:33:48.292117Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T14:33:48.292295Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T14:33:48.292473Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:643:2531] in generation 1 2025-06-25T14:33:48.303829Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T14:33:48.345433Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-25T14:33:48.345753Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T14:33:48.345854Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:645:2541] 2025-06-25T14:33:48.345883Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T14:33:48.345926Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-25T14:33:48.345956Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:33:48.346466Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-25T14:33:48.346549Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-25T14:33:48.346586Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:33:48.346624Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:33:48.346659Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-25T14:33:48.346721Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:33:48.347103Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:623:2528], serverId# [1:634:2535], sessionId# [0:0:0] 2025-06-25T14:33:48.347488Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T14:33:48.347816Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-25T14:33:48.347895Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-25T14:33:48.349385Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T14:33:48.360150Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-25T14:33:48.360261Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-25T14:33:48.530487Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:660:2550], serverId# [1:662:2552], sessionId# [0:0:0] 2025-06-25T14:33:48.534891Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1000 at tablet 72075186224037888 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1000 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-06-25T14:33:48.534984Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:33:48.535869Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:33:48.535930Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-06-25T14:33:48.535972Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-06-25T14:33:48.536231Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-06-25T14:33:48.540621Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-25T14:33:48.541313Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:33:48.541381Z node 1 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-06-25T14:33:48.541867Z node 1 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-25T14:33:48.542229Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:33:48.543835Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-06-25T14:33:48.543879Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:33:48.548422Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-06-25T14:33:48.548579Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:33:48.549556Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:33:48.549595Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T14:33:48.549636Z node 1 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-25T14:33:48.549707Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [1:373:2367], exec latency: 0 ms, propose latency: 0 ms 2025-06-25T14:33:48.549758Z node 1 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-25T14:33:48.549841Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:33:48.574041Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T14:33:48.581133Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-25T14:33:48.581245Z node 1 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-25T14:33:48.582450Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-25T14:33:48.679851Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:694:2576], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:33:48.679994Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:704:2581], DatabaseId: /Root, PoolId: ... ndencies 2025-06-25T14:33:55.574939Z node 2 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 72075186224037888 CompleteEdge# v2000/281474976715664 IncompleteEdge# v{min} UnprotectedReadEdge# v{min} ImmediateWriteEdge# v1500/18446744073709551615 ImmediateWriteEdgeReplied# v1500/18446744073709551615 2025-06-25T14:33:55.574994Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:281474976715665] at 72075186224037888 2025-06-25T14:33:55.575032Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715665] at 72075186224037888 is Executed 2025-06-25T14:33:55.575055Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715665] at 72075186224037888 executing on unit BuildAndWaitDependencies 2025-06-25T14:33:55.575076Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715665] at 72075186224037888 to execution unit MakeScanSnapshot 2025-06-25T14:33:55.575098Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715665] at 72075186224037888 on unit MakeScanSnapshot 2025-06-25T14:33:55.575127Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715665] at 72075186224037888 is Executed 2025-06-25T14:33:55.575162Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715665] at 72075186224037888 executing on unit MakeScanSnapshot 2025-06-25T14:33:55.575185Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715665] at 72075186224037888 to execution unit WaitForStreamClearance 2025-06-25T14:33:55.575207Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715665] at 72075186224037888 on unit WaitForStreamClearance 2025-06-25T14:33:55.575257Z node 2 :TX_DATASHARD TRACE: wait_for_stream_clearance_unit.cpp:99: Requested stream clearance from [2:904:2715] for [0:281474976715665] at 72075186224037888 2025-06-25T14:33:55.575290Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715665] at 72075186224037888 is Continue 2025-06-25T14:33:55.575485Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287942, Sender [2:904:2715], Recipient [2:627:2531]: NKikimrTx.TEvStreamClearancePending TxId: 281474976715665 2025-06-25T14:33:55.575522Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3154: StateWork, processing event TEvTxProcessing::TEvStreamClearancePending 2025-06-25T14:33:55.575611Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287940, Sender [2:904:2715], Recipient [2:627:2531]: NKikimrTx.TEvStreamClearanceResponse TxId: 281474976715665 Cleared: true 2025-06-25T14:33:55.575642Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, processing event TEvTxProcessing::TEvStreamClearanceResponse 2025-06-25T14:33:55.575712Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [2:627:2531], Recipient [2:627:2531]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T14:33:55.575742Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T14:33:55.575820Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:33:55.575862Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 1 active planned 0 immediate 1 planned 0 2025-06-25T14:33:55.575909Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715665] at 72075186224037888 for WaitForStreamClearance 2025-06-25T14:33:55.575947Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715665] at 72075186224037888 on unit WaitForStreamClearance 2025-06-25T14:33:55.575995Z node 2 :TX_DATASHARD TRACE: wait_for_stream_clearance_unit.cpp:156: Got stream clearance for [0:281474976715665] at 72075186224037888 2025-06-25T14:33:55.576040Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715665] at 72075186224037888 is Executed 2025-06-25T14:33:55.576077Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715665] at 72075186224037888 executing on unit WaitForStreamClearance 2025-06-25T14:33:55.576118Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715665] at 72075186224037888 to execution unit ReadTableScan 2025-06-25T14:33:55.576151Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715665] at 72075186224037888 on unit ReadTableScan 2025-06-25T14:33:55.588546Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715665] at 72075186224037888 is Continue 2025-06-25T14:33:55.588611Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 0 immediate 1 planned 0 2025-06-25T14:33:55.588657Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 72075186224037888 2025-06-25T14:33:55.588700Z node 2 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-25T14:33:55.588734Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-25T14:33:55.589392Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435082, Sender [2:910:2720], Recipient [2:627:2531]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvRegisterScanActor 2025-06-25T14:33:55.589435Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3162: StateWork, processing event TEvPrivate::TEvRegisterScanActor 2025-06-25T14:33:55.589641Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:514: Got quota for read table scan ShardId: 72075186224037888, TxId: 281474976715665, MessageQuota: 1 2025-06-25T14:33:55.590050Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-25T14:33:55.590096Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:33:55.590481Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:662: Send response data ShardId: 72075186224037888, TxId: 281474976715665, Size: 36, Rows: 0, PendingAcks: 1, MessageQuota: 0 2025-06-25T14:33:55.590677Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877763, Sender [2:897:2708], Recipient [2:627:2531]: NKikimr::TEvTabletPipe::TEvClientDestroyed { TabletId: 72057594046644480 ClientId: [2:897:2708] ServerId: [2:899:2710] } 2025-06-25T14:33:55.590710Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3166: StateWork, processing event TEvTabletPipe::TEvClientDestroyed 2025-06-25T14:33:55.590803Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:483: Got stream data ack ShardId: 72075186224037888, TxId: 281474976715665, PendingAcks: 0 2025-06-25T14:33:55.590847Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:718: Finish scan ShardId: 72075186224037888, TxId: 281474976715665, MessageQuota: 0 2025-06-25T14:33:55.597040Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037888 2025-06-25T14:33:55.597112Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:4477: Found op: cookie: 281474976715665, at: 72075186224037888 2025-06-25T14:33:55.597287Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [2:627:2531], Recipient [2:627:2531]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T14:33:55.597324Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T14:33:55.597386Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:33:55.597421Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 1 active planned 0 immediate 1 planned 0 2025-06-25T14:33:55.597461Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715665] at 72075186224037888 for ReadTableScan 2025-06-25T14:33:55.597492Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715665] at 72075186224037888 on unit ReadTableScan 2025-06-25T14:33:55.597526Z node 2 :TX_DATASHARD TRACE: read_table_scan_unit.cpp:158: ReadTable scan complete for [0:281474976715665] at 72075186224037888 error: , IsFatalError: 0 2025-06-25T14:33:55.597578Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715665] at 72075186224037888 is Executed 2025-06-25T14:33:55.597610Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715665] at 72075186224037888 executing on unit ReadTableScan 2025-06-25T14:33:55.597637Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715665] at 72075186224037888 to execution unit FinishPropose 2025-06-25T14:33:55.597663Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715665] at 72075186224037888 on unit FinishPropose 2025-06-25T14:33:55.597707Z node 2 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715665 at tablet 72075186224037888 send to client, exec latency: 0 ms, propose latency: 0 ms, status: COMPLETE 2025-06-25T14:33:55.597765Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715665] at 72075186224037888 is DelayComplete 2025-06-25T14:33:55.597797Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715665] at 72075186224037888 executing on unit FinishPropose 2025-06-25T14:33:55.597833Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715665] at 72075186224037888 to execution unit CompletedOperations 2025-06-25T14:33:55.597869Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715665] at 72075186224037888 on unit CompletedOperations 2025-06-25T14:33:55.597918Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715665] at 72075186224037888 is Executed 2025-06-25T14:33:55.597938Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715665] at 72075186224037888 executing on unit CompletedOperations 2025-06-25T14:33:55.597961Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:281474976715665] at 72075186224037888 has finished 2025-06-25T14:33:55.597998Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:33:55.598022Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 72075186224037888 2025-06-25T14:33:55.598047Z node 2 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-25T14:33:55.598072Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-25T14:33:55.598136Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:33:55.598175Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715665] at 72075186224037888 on unit FinishPropose 2025-06-25T14:33:55.598219Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 >> TTxDataShardUploadRows::ShouldRejectOnChangeQueueOverflowAndRetry [GOOD] >> TTxDataShardUploadRows::BulkUpsertDuringAddIndexRaceCorruption >> KqpSnapshotRead::ReadWriteTxFailsOnConcurrentWrite2-withSink [GOOD] >> KqpSnapshotRead::ReadWriteTxFailsOnConcurrentWrite3+withSink >> TBackupCollectionTests::BackupNonIncrementalCollection [GOOD] >> TPartBtreeIndexIteration::FewNodes [GOOD] >> TPartBtreeIndexIteration::FewNodes_Groups >> TChargeBTreeIndex::FewNodes_Groups_History_Sticky [GOOD] >> TClockProCache::Lifecycle [GOOD] >> TClockProCache::EvictNext [GOOD] >> TClockProCache::Erase [GOOD] >> TClockProCache::Random >> TClockProCache::Random [GOOD] >> NFwd_TFlatIndexCache::Skip_Wait [GOOD] >> NFwd_TFlatIndexCache::Trace [GOOD] >> NFwd_TFlatIndexCache::Slices [GOOD] >> NFwd_TLoadedPagesCircularBuffer::Basics [GOOD] >> NOther::Blocks [GOOD] >> NPage::Encoded [GOOD] >> NPage::ABI_002 >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-4 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-5 >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-43 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-44 >> TSchemeShardTopicSplitMergeTest::MargeInactivePartitions >> BuildStatsHistogram::Single_History_Slices [GOOD] >> BuildStatsHistogram::Ten_Mixed >> NPage::ABI_002 [GOOD] >> NPage::GroupIdEncoding [GOOD] >> NPageCollection::Align [GOOD] >> NPageCollection::Meta >> TTxDataShardUploadRows::TestUploadRowsDropColumnRace [GOOD] >> TTxDataShardUploadRows::TestUploadRowsLocks ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_backup_collection/unittest >> TBackupCollectionTests::BackupNonIncrementalCollection [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:33:48.826198Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:33:48.826281Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:33:48.826323Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:33:48.826352Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:33:48.826393Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:33:48.826416Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:33:48.826492Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:33:48.826590Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:33:48.831590Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:33:48.831993Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:33:49.064479Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:33:49.064539Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:33:49.104664Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:33:49.105173Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:33:49.105352Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:33:49.141436Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:33:49.141829Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:33:49.142595Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:33:49.142935Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:33:49.158921Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:33:49.159127Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:33:49.160332Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:33:49.160394Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:33:49.160552Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:33:49.160601Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:33:49.160639Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:33:49.160737Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:33:49.186407Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:33:49.525636Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:33:49.525843Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:33:49.526033Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:33:49.526080Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:33:49.526324Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:33:49.526421Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:33:49.529173Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:33:49.529365Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:33:49.529573Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:33:49.529620Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:33:49.529689Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:33:49.529728Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:33:49.531754Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:33:49.531800Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:33:49.531839Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:33:49.533614Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:33:49.533675Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:33:49.533715Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:33:49.533763Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:33:49.537100Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:33:49.538894Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:33:49.539060Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:33:49.539924Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:33:49.540062Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:33:49.540106Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:33:49.540382Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:33:49.540439Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:33:49.540636Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:33:49.540714Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:33:49.542728Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:33:49.542771Z node 1 :FLAT_TX_SCHEMESHARD ... hDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 6] was 2 2025-06-25T14:33:58.219238Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 105:1 2025-06-25T14:33:58.219259Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 105:1 2025-06-25T14:33:58.219356Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 7] was 3 2025-06-25T14:33:58.219395Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate source path for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 3 2025-06-25T14:33:58.225212Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-25T14:33:58.225364Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:632: Send to actor: [7:303:2292] msg type: 271124998 msg: NKikimrScheme.TEvNotifyTxCompletionResult TxId: 105 at schemeshard: 72057594046678944 2025-06-25T14:33:58.225613Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 105: got EvNotifyTxCompletionResult 2025-06-25T14:33:58.225670Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 105: satisfy waiter [7:527:2486] 2025-06-25T14:33:58.225920Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877764, Sender [7:529:2488], Recipient [7:134:2156]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-25T14:33:58.225975Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5053: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-25T14:33:58.226007Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5885: Server pipe is reset, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 105 TestModificationResults wait txId: 106 2025-06-25T14:33:58.226631Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271122432, Sender [7:595:2552], Recipient [7:134:2156]: {TEvModifySchemeTransaction txid# 106 TabletId# 72057594046678944} 2025-06-25T14:33:58.226711Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4966: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-06-25T14:33:58.229681Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpBackupIncrementalBackupCollection BackupIncrementalBackupCollection { Name: ".backups/collections/MyCollection1" } } TxId: 106 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:33:58.230261Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_mkdir.cpp:115: TMkDir Propose, path: /MyRoot/.backups/collections/MyCollection1/19700101000000Z_incremental, operationId: 106:0, at schemeshard: 72057594046678944 2025-06-25T14:33:58.230503Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:347: AttachChild: child attached as only one child to the parent, parent id: [OwnerId: 72057594046678944, LocalPathId: 4], parent name: MyCollection1, child name: 19700101000000Z_incremental, child id: [OwnerId: 72057594046678944, LocalPathId: 8], at schemeshard: 72057594046678944 2025-06-25T14:33:58.230605Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 8] was 0 2025-06-25T14:33:58.230664Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 106:0 type: TxMkDir target path: [OwnerId: 72057594046678944, LocalPathId: 8] source path: 2025-06-25T14:33:58.230760Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 106:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:33:58.230900Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_just_reject.cpp:47: TReject Propose, opId: 106:1, explain: Incremental backup is disabled on this collection, at schemeshard: 72057594046678944 2025-06-25T14:33:58.230960Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 106:2, propose status:StatusInvalidParameter, reason: Incremental backup is disabled on this collection, at schemeshard: 72057594046678944 2025-06-25T14:33:58.234877Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:156: Abort operation: IgniteOperation fail to propose a part, opId: 106:1, at schemeshard: 72057594046678944, already accepted parts: 1, propose result status: StatusInvalidParameter, with reason: Incremental backup is disabled on this collection, tx message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpBackupIncrementalBackupCollection BackupIncrementalBackupCollection { Name: ".backups/collections/MyCollection1" } } TxId: 106 TabletId: 72057594046678944 2025-06-25T14:33:58.235075Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_mkdir.cpp:275: MkDir AbortPropose, opId: 106:0, at schemeshard: 72057594046678944 2025-06-25T14:33:58.235370Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-06-25T14:33:58.240402Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 106, response: Status: StatusInvalidParameter Reason: "Incremental backup is disabled on this collection" TxId: 106 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:33:58.240741Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 106, database: /MyRoot, subject: , status: StatusInvalidParameter, reason: Incremental backup is disabled on this collection, operation: BACKUP INCREMENTAL, path: /MyRoot/.backups/collections/MyCollection1 2025-06-25T14:33:58.240821Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046678944 TestModificationResult got TxId: 106, wait until txId: 106 TestWaitNotification wait txId: 106 2025-06-25T14:33:58.241238Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 106: send EvNotifyTxCompletion 2025-06-25T14:33:58.241293Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 106 2025-06-25T14:33:58.241758Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877761, Sender [7:601:2558], Recipient [7:134:2156]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:33:58.241821Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5052: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T14:33:58.241868Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5837: Pipe server connected, at tablet: 72057594046678944 2025-06-25T14:33:58.242026Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124996, Sender [7:303:2292], Recipient [7:134:2156]: NKikimrScheme.TEvNotifyTxCompletion TxId: 106 2025-06-25T14:33:58.242064Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvSchemeShard::TEvNotifyTxCompletion 2025-06-25T14:33:58.242149Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 106, at schemeshard: 72057594046678944 2025-06-25T14:33:58.242270Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 106: got EvNotifyTxCompletionResult 2025-06-25T14:33:58.242316Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 106: satisfy waiter [7:599:2556] 2025-06-25T14:33:58.242510Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877764, Sender [7:601:2558], Recipient [7:134:2156]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-25T14:33:58.242543Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5053: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-25T14:33:58.242597Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5885: Server pipe is reset, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 106 2025-06-25T14:33:58.243140Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271122945, Sender [7:602:2559], Recipient [7:134:2156]: NKikimrSchemeOp.TDescribePath Path: "/MyRoot/.backups/collections/MyCollection1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false } 2025-06-25T14:33:58.243204Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4967: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2025-06-25T14:33:58.243330Z node 7 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/.backups/collections/MyCollection1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:33:58.243603Z node 7 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/.backups/collections/MyCollection1" took 276us result status StatusSuccess 2025-06-25T14:33:58.244127Z node 7 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/.backups/collections/MyCollection1" PathDescription { Self { Name: "MyCollection1" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeBackupCollection CreateFinished: true CreateTxId: 103 CreateStep: 5000004 ParentPathId: 3 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 BackupCollectionVersion: 0 } ChildrenExist: true } Children { Name: "19700101000000Z_full" PathId: 6 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 105 CreateStep: 5000006 ParentPathId: 4 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: true } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 6 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } BackupCollectionDescription { Name: "MyCollection1" ExplicitEntryList { Entries { Type: ETypeTable Path: "/MyRoot/Table1" } } Cluster { } } } PathId: 4 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> NPageCollection::Meta [GOOD] >> NPageCollection::PagesToBlobsConverter [GOOD] >> NPageCollection::Grow [GOOD] >> NPageCollection::Groups [GOOD] >> NPageCollection::Chop [GOOD] >> NPageCollection::CookieAllocator [GOOD] >> NProto::LargeGlobId [GOOD] >> Redo::ABI_008 [GOOD] >> Self::Literals [GOOD] >> TTxDataShardUploadRows::UploadRowsToReplicatedTable [GOOD] |79.5%| [TA] $(B)/ydb/core/tx/schemeshard/ut_backup_collection/test-results/unittest/{meta.json ... results_accumulator.log} |79.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_trace/ydb-core-tx-datashard-ut_trace |79.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_trace/ydb-core-tx-datashard-ut_trace |79.5%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_backup_collection/test-results/unittest/{meta.json ... results_accumulator.log} |79.5%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_trace/ydb-core-tx-datashard-ut_trace >> THealthCheckTest::UnknowPDiskState [GOOD] >> THealthCheckTest::TestSystemStateRetriesAfterReceivingResponse >> TFlatTableExecutor_IndexLoading::PrechargeAndSeek_BTreeIndex [GOOD] >> TFlatTableExecutor_IndexLoading::Scan_FlatIndex >> TSchemeShardTopicSplitMergeTest::SplitWithManyPartition >> TPartBtreeIndexIteration::FewNodes_Groups [GOOD] >> TPartBtreeIndexIteration::FewNodes_History ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_upload_rows/unittest >> TTxDataShardUploadRows::UploadRowsToReplicatedTable [GOOD] Test command err: 2025-06-25T14:33:49.729870Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:33:49.730001Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:33:49.730050Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d1e/r3tmp/tmp2TL4nM/pdisk_1.dat 2025-06-25T14:33:50.164729Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T14:33:50.167990Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:33:50.245357Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:33:50.251039Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750862025085656 != 1750862025085660 2025-06-25T14:33:50.303137Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:33:50.303329Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:33:50.315856Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:33:50.418974Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:33:50.465548Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvBoot 2025-06-25T14:33:50.466645Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvRestored 2025-06-25T14:33:50.467117Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:627:2531] 2025-06-25T14:33:50.467366Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T14:33:50.550365Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-25T14:33:50.551120Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T14:33:50.551266Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T14:33:50.553216Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-25T14:33:50.553358Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-25T14:33:50.553410Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-25T14:33:50.553793Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T14:33:50.553938Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T14:33:50.554023Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:643:2531] in generation 1 2025-06-25T14:33:50.564839Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T14:33:50.602060Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-25T14:33:50.602263Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T14:33:50.602378Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:645:2541] 2025-06-25T14:33:50.602434Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T14:33:50.602471Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-25T14:33:50.602508Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:33:50.602708Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:627:2531], Recipient [1:627:2531]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T14:33:50.602753Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T14:33:50.603117Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-25T14:33:50.603248Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-25T14:33:50.603314Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:33:50.603373Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:33:50.603415Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-25T14:33:50.603487Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-25T14:33:50.603520Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-25T14:33:50.603549Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-25T14:33:50.603588Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:33:50.603980Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:634:2535], Recipient [1:627:2531]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:33:50.604018Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T14:33:50.604055Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:623:2528], serverId# [1:634:2535], sessionId# [0:0:0] 2025-06-25T14:33:50.604195Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:373:2367], Recipient [1:634:2535] 2025-06-25T14:33:50.604232Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-25T14:33:50.604366Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T14:33:50.604595Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-25T14:33:50.604642Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-25T14:33:50.604728Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-25T14:33:50.604783Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-25T14:33:50.604827Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-06-25T14:33:50.604861Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-06-25T14:33:50.604892Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-25T14:33:50.605132Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-06-25T14:33:50.605167Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit StoreSchemeTx 2025-06-25T14:33:50.605211Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit FinishPropose 2025-06-25T14:33:50.605255Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-25T14:33:50.605308Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayComplete 2025-06-25T14:33:50.605342Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit FinishPropose 2025-06-25T14:33:50.605374Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit WaitForPlan 2025-06-25T14:33:50.605404Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit WaitForPlan 2025-06-25T14:33:50.605430Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:281474976715657] at 72075186224037888 is not ready to execute on unit WaitForPlan 2025-06-25T14:33:50.606795Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269746185, Sender [1:646:2542], Recipient [1:627:2531]: NKikimr::TEvTxProxySchemeCache::TEvWatchNotifyUpdated 2025-06-25T14:33:50.606855Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T14:33:50.618776Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-25T14:33:50.618864Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-25T14:33:50.618903Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-25T14:33:50.618967Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715657 at tablet 72075186224037888 send to client, exec latency: 0 ms, propose late ... eartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037890 2025-06-25T14:33:58.374468Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:265:2309], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:33:58.374643Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:33:58.374772Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d1e/r3tmp/tmpxUz2NH/pdisk_1.dat 2025-06-25T14:33:58.748946Z node 2 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 2 Type# 268639257 2025-06-25T14:33:58.750444Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:33:58.787436Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:33:58.795775Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:33:2080] 1750862034245480 != 1750862034245484 2025-06-25T14:33:58.857163Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:33:58.857276Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:33:58.868579Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:33:58.973123Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:33:59.010515Z node 2 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [2:627:2531] 2025-06-25T14:33:59.010772Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T14:33:59.124144Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T14:33:59.124278Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T14:33:59.125783Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-25T14:33:59.125868Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-25T14:33:59.125934Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-25T14:33:59.126236Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T14:33:59.126361Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T14:33:59.126443Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [2:643:2531] in generation 1 2025-06-25T14:33:59.138332Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T14:33:59.138409Z node 2 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-25T14:33:59.138508Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T14:33:59.138617Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [2:645:2541] 2025-06-25T14:33:59.138660Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T14:33:59.138694Z node 2 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-25T14:33:59.138741Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:33:59.139057Z node 2 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-25T14:33:59.139195Z node 2 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-25T14:33:59.139309Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:33:59.139360Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:33:59.139402Z node 2 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-25T14:33:59.139464Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:33:59.139868Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [2:624:2529], serverId# [2:634:2535], sessionId# [0:0:0] 2025-06-25T14:33:59.140042Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T14:33:59.140233Z node 2 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-25T14:33:59.140305Z node 2 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-25T14:33:59.151839Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T14:33:59.168936Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-25T14:33:59.169053Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-25T14:33:59.386399Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [2:661:2551], serverId# [2:662:2552], sessionId# [0:0:0] 2025-06-25T14:33:59.387797Z node 2 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1000 at tablet 72075186224037888 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1000 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-06-25T14:33:59.387856Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:33:59.388050Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:33:59.388112Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-06-25T14:33:59.388154Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-06-25T14:33:59.388454Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-06-25T14:33:59.388585Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-25T14:33:59.388933Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:33:59.388994Z node 2 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-06-25T14:33:59.389374Z node 2 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-25T14:33:59.389723Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:33:59.391187Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-06-25T14:33:59.391233Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:33:59.391684Z node 2 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-06-25T14:33:59.391754Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:33:59.397320Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T14:33:59.397809Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:33:59.397853Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T14:33:59.397900Z node 2 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-25T14:33:59.398247Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [2:373:2367], exec latency: 0 ms, propose latency: 0 ms 2025-06-25T14:33:59.398317Z node 2 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-25T14:33:59.398411Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:33:59.400528Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-25T14:33:59.400600Z node 2 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-25T14:33:59.401418Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-25T14:33:59.463569Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [2:696:2578], serverId# [2:697:2579], sessionId# [0:0:0] 2025-06-25T14:33:59.463710Z node 2 :TX_DATASHARD NOTICE: datashard__op_rows.cpp:168: Rejecting bulk upsert request on datashard: tablet# 72075186224037888, error# Can't execute bulk upsert at replicated table >> TSchemeShardTopicSplitMergeTest::SplitTwoPartitions >> TSchemeShardTopicSplitMergeTest::MargeInactivePartitions [GOOD] >> TSchemeShardTopicSplitMergeTest::EnableSplitMerge >> KqpSinkMvcc::OltpMultiSinksNoSinks [GOOD] >> KqpSinkMvcc::OltpMultiSinks >> KqpTx::SnapshotROInteractive2 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-ModifyUser-25 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-ModifyUser-26 >> THealthCheckTest::HealthCheckConfigUpdate [GOOD] |79.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/vdisk/skeleton/ut/ydb-core-blobstorage-vdisk-skeleton-ut |79.5%| [LD] {RESULT} $(B)/ydb/core/blobstorage/vdisk/skeleton/ut/ydb-core-blobstorage-vdisk-skeleton-ut |79.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/vdisk/skeleton/ut/ydb-core-blobstorage-vdisk-skeleton-ut ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tablet_flat/ut/unittest >> Self::Literals [GOOD] Test command err: + BTreeIndex{PageId: 0 RowCount: 1155 DataSize: 11055 GroupDataSize: 22055 ErasedRowCount: 385, 13 rev 1, 683b} | PageId: 10000 RowCount: 100 DataSize: 1000 GroupDataSize: 2000 ErasedRowCount: 30 | > {0, a, false, 0} | PageId: 10001 RowCount: 201 DataSize: 2001 GroupDataSize: 4001 ErasedRowCount: 61 | > {1, b, true, 10} | PageId: 10002 RowCount: 303 DataSize: 3003 GroupDataSize: 6003 ErasedRowCount: 93 | > {2, c, false, 20} | PageId: 10003 RowCount: 406 DataSize: 4006 GroupDataSize: 8006 ErasedRowCount: 126 | > {3, d, true, 30} | PageId: 10004 RowCount: 510 DataSize: 5010 GroupDataSize: 10010 ErasedRowCount: 160 | > {4, e, false, 40} | PageId: 10005 RowCount: 615 DataSize: 6015 GroupDataSize: 12015 ErasedRowCount: 195 | > {5, f, true, 50} | PageId: 10006 RowCount: 721 DataSize: 7021 GroupDataSize: 14021 ErasedRowCount: 231 | > {6, g, false, 60} | PageId: 10007 RowCount: 828 DataSize: 8028 GroupDataSize: 16028 ErasedRowCount: 268 | > {7, h, true, 70} | PageId: 10008 RowCount: 936 DataSize: 9036 GroupDataSize: 18036 ErasedRowCount: 306 | > {8, i, false, 80} | PageId: 10009 RowCount: 1045 DataSize: 10045 GroupDataSize: 20045 ErasedRowCount: 345 | > {9, j, true, 90} | PageId: 10010 RowCount: 1155 DataSize: 11055 GroupDataSize: 22055 ErasedRowCount: 385 + BTreeIndex{PageId: 9 RowCount: 2310 DataSize: 21210 GroupDataSize: 42210 ErasedRowCount: 840, 13 rev 1, 116b} | + BTreeIndex{PageId: 5 RowCount: 936 DataSize: 9036 GroupDataSize: 18036 ErasedRowCount: 306, 13 rev 1, 179b} | | + BTreeIndex{PageId: 0 RowCount: 303 DataSize: 3003 GroupDataSize: 6003 ErasedRowCount: 93, 13 rev 1, 179b} | | | PageId: 10000 RowCount: 100 DataSize: 1000 GroupDataSize: 2000 ErasedRowCount: 30 | | | > {0, a, false, 0} | | | PageId: 10001 RowCount: 201 DataSize: 2001 GroupDataSize: 4001 ErasedRowCount: 61 | | | > {1, b, true, 10} | | | PageId: 10002 RowCount: 303 DataSize: 3003 GroupDataSize: 6003 ErasedRowCount: 93 | | > {2, c, false, 20} | | + BTreeIndex{PageId: 1 RowCount: 615 DataSize: 6015 GroupDataSize: 12015 ErasedRowCount: 195, 13 rev 1, 179b} | | | PageId: 10003 RowCount: 406 DataSize: 4006 GroupDataSize: 8006 ErasedRowCount: 126 | | | > {3, d, true, 30} | | | PageId: 10004 RowCount: 510 DataSize: 5010 GroupDataSize: 10010 ErasedRowCount: 160 | | | > {4, e, false, 40} | | | PageId: 10005 RowCount: 615 DataSize: 6015 GroupDataSize: 12015 ErasedRowCount: 195 | | > {5, f, true, 50} | | + BTreeIndex{PageId: 2 RowCount: 936 DataSize: 9036 GroupDataSize: 18036 ErasedRowCount: 306, 13 rev 1, 179b} | | | PageId: 10006 RowCount: 721 DataSize: 7021 GroupDataSize: 14021 ErasedRowCount: 231 | | | > {6, g, false, 60} | | | PageId: 10007 RowCount: 828 DataSize: 8028 GroupDataSize: 16028 ErasedRowCount: 268 | | | > {7, h, true, 70} | | | PageId: 10008 RowCount: 936 DataSize: 9036 GroupDataSize: 18036 ErasedRowCount: 306 | > {8, i, false, 80} | + BTreeIndex{PageId: 8 RowCount: 2310 DataSize: 21210 GroupDataSize: 42210 ErasedRowCount: 840, 13 rev 1, 242b} | | + BTreeIndex{PageId: 3 RowCount: 1266 DataSize: 12066 GroupDataSize: 24066 ErasedRowCount: 426, 13 rev 1, 179b} | | | PageId: 10009 RowCount: 1045 DataSize: 10045 GroupDataSize: 20045 ErasedRowCount: 345 | | | > {9, j, true, 90} | | | PageId: 10010 RowCount: 1155 DataSize: 11055 GroupDataSize: 22055 ErasedRowCount: 385 | | | > {10, k, false, 100} | | | PageId: 10011 RowCount: 1266 DataSize: 12066 GroupDataSize: 24066 ErasedRowCount: 426 | | > {11, l, true, 110} | | + BTreeIndex{PageId: 4 RowCount: 1605 DataSize: 15105 GroupDataSize: 30105 ErasedRowCount: 555, 13 rev 1, 179b} | | | PageId: 10012 RowCount: 1378 DataSize: 13078 GroupDataSize: 26078 ErasedRowCount: 468 | | | > {12, m, false, 120} | | | PageId: 10013 RowCount: 1491 DataSize: 14091 GroupDataSize: 28091 ErasedRowCount: 511 | | | > {13, n, true, 130} | | | PageId: 10014 RowCount: 1605 DataSize: 15105 GroupDataSize: 30105 ErasedRowCount: 555 | | > {14, o, false, 140} | | + BTreeIndex{PageId: 6 RowCount: 1953 DataSize: 18153 GroupDataSize: 36153 ErasedRowCount: 693, 13 rev 1, 179b} | | | PageId: 10015 RowCount: 1720 DataSize: 16120 GroupDataSize: 32120 ErasedRowCount: 600 | | | > {15, p, true, 150} | | | PageId: 10016 RowCount: 1836 DataSize: 17136 GroupDataSize: 34136 ErasedRowCount: 646 | | | > {16, q, false, 160} | | | PageId: 10017 RowCount: 1953 DataSize: 18153 GroupDataSize: 36153 ErasedRowCount: 693 | | > {17, r, true, 170} | | + BTreeIndex{PageId: 7 RowCount: 2310 DataSize: 21210 GroupDataSize: 42210 ErasedRowCount: 840, 13 rev 1, 179b} | | | PageId: 10018 RowCount: 2071 DataSize: 19171 GroupDataSize: 38171 ErasedRowCount: 741 | | | > {18, s, false, 180} | | | PageId: 10019 RowCount: 2190 DataSize: 20190 GroupDataSize: 40190 ErasedRowCount: 790 | | | > {19, t, true, 190} | | | PageId: 10020 RowCount: 2310 DataSize: 21210 GroupDataSize: 42210 ErasedRowCount: 840 + BTreeIndex{PageId: 15 RowCount: 15150 DataSize: 106050 GroupDataSize: 207050 ErasedRowCount: 8080, 13 rev 1, 174b} | + BTreeIndex{PageId: 12 RowCount: 9078 DataSize: 70278 GroupDataSize: 138278 ErasedRowCount: 4318, 13 rev 1, 690b} | | + BTreeIndex{PageId: 0 RowCount: 1266 DataSize: 12066 GroupDataSize: 24066 ErasedRowCount: 426, 13 rev 1, 702b} | | | PageId: 10000 RowCount: 100 DataSize: 1000 GroupDataSize: 2000 ErasedRowCount: 30 | | | > {0, x, NULL, NULL} | | | PageId: 10001 RowCount: 201 DataSize: 2001 GroupDataSize: 4001 ErasedRowCount: 61 | | | > {1, xx, NULL, NULL} | | | PageId: 10002 RowCount: 303 DataSize: 3003 GroupDataSize: 6003 ErasedRowCount: 93 | | | > {2, xxx, NULL, NULL} | | | PageId: 10003 RowCount: 406 DataSize: 4006 GroupDataSize: 8006 ErasedRowCount: 126 | | | > {3, xxxx, NULL, NULL} | | | PageId: 10004 RowCount: 510 DataSize: 5010 GroupDataSize: 10010 ErasedRowCount: 160 | | | > {4, xxxxx, NULL, NULL} | | | PageId: 10005 RowCount: 615 DataSize: 6015 GroupDataSize: 12015 ErasedRowCount: 195 | | | > {5, xxxxxx, NULL, NULL} | | | PageId: 10006 RowCount: 721 DataSize: 7021 GroupDataSize: 14021 ErasedRowCount: 231 | | | > {6, xxxxxxx, NULL, NULL} | | | PageId: 10007 RowCount: 828 DataSize: 8028 GroupDataSize: 16028 ErasedRowCount: 268 | | | > {7, xxxxxxxx, NULL, NULL} | | | PageId: 10008 RowCount: 936 DataSize: 9036 GroupDataSize: 18036 ErasedRowCount: 306 | | | > {8, xxxxxxxxx, NULL, NULL} | | | PageId: 10009 RowCount: 1045 DataSize: 10045 GroupDataSize: 20045 ErasedRowCount: 345 | | | > {9, xxxxxxxxxx, NULL, NULL} | | | PageId: 10010 RowCount: 1155 DataSize: 11055 GroupDataSize: 22055 ErasedRowCount: 385 | | | > {10, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10011 RowCount: 1266 DataSize: 12066 GroupDataSize: 24066 ErasedRowCount: 426 | | > {11, xxxxxxxxxx.., NULL, NULL} | | + BTreeIndex{PageId: 1 RowCount: 2431 DataSize: 22231 GroupDataSize: 44231 ErasedRowCount: 891, 13 rev 1, 683b} | | | PageId: 10012 RowCount: 1378 DataSize: 13078 GroupDataSize: 26078 ErasedRowCount: 468 | | | > {12, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10013 RowCount: 1491 DataSize: 14091 GroupDataSize: 28091 ErasedRowCount: 511 | | | > {13, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10014 RowCount: 1605 DataSize: 15105 GroupDataSize: 30105 ErasedRowCount: 555 | | | > {14, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10015 RowCount: 1720 DataSize: 16120 GroupDataSize: 32120 ErasedRowCount: 600 | | | > {15, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10016 RowCount: 1836 DataSize: 17136 GroupDataSize: 34136 ErasedRowCount: 646 | | | > {16, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10017 RowCount: 1953 DataSize: 18153 GroupDataSize: 36153 ErasedRowCount: 693 | | | > {17, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10018 RowCount: 2071 DataSize: 19171 GroupDataSize: 38171 ErasedRowCount: 741 | | | > {18, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10019 RowCount: 2190 DataSize: 20190 GroupDataSize: 40190 ErasedRowCount: 790 | | | > {19, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10020 RowCount: 2310 DataSize: 21210 GroupDataSize: 42210 ErasedRowCount: 840 | | | > {20, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10021 RowCount: 2431 DataSize: 22231 GroupDataSize: 44231 ErasedRowCount: 891 | | > {21, xxxxxxxxxx.., NULL, NULL} | | + BTreeIndex{PageId: 2 RowCount: 3565 DataSize: 31465 GroupDataSize: 62465 ErasedRowCount: 1395, 13 rev 1, 689b} | | | PageId: 10022 RowCount: 2553 DataSize: 23253 GroupDataSize: 46253 ErasedRowCount: 943 | | | > {22, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10023 RowCount: 2676 DataSize: 24276 GroupDataSize: 48276 ErasedRowCount: 996 | | | > {23, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10024 RowCount: 2800 DataSize: 25300 GroupDataSize: 50300 ErasedRowCount: 1050 | | | > {24, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10025 RowCount: 2925 DataSize: 26325 GroupDataSize: 52325 ErasedRowCount: 1105 | | | > {25, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10026 RowCount: 3051 DataSize: 27351 GroupDataSize: 54351 ErasedRowCount: 1161 | | | > {26, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10027 RowCount: 3178 DataSize: 28378 GroupDataSize: 56378 ErasedRowCount: 1218 | | | > {27, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10028 RowCount: 3306 DataSize: 29406 GroupDataSize: 58406 ErasedRowCount: 1276 | | | > {28, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10029 RowCount: 3435 DataSize: 30435 GroupDataSize: 60435 ErasedRowCount: 1335 | | | > {29, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10030 RowCount: 3565 DataSize: 31465 GroupDataSize: 62465 ErasedRowCount: 1395 | | > {30, xxxxxxxxxx.., NULL, NULL} | | + BTreeIndex{PageId: 3 RowCount: 4641 DataSize: 39741 GroupDataSize: 78741 ErasedRowCount: 1911, 13 rev 1, 669b} | | | PageId: 10031 RowCount: 3696 DataSize: 32496 GroupDataSize: 64496 ErasedRowCount: 1456 | | | > {31, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10032 RowCount: 3828 DataSize: 33528 GroupDataSize: 66528 ErasedRowCount: 1518 | | | > {32, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10033 RowCount: 3961 DataSize: 34561 GroupDataSize: 68561 ErasedRowCount: 1581 | | | > {33, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10034 RowCount: 4095 DataSize: 35595 GroupDataSize: 70595 ErasedRowCount: 1645 | | | > {34, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10035 RowCount: 4230 DataSize: 36630 GroupDataSize: 72630 ErasedRowCount: 1710 | | | > {35, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10036 RowCount: 4366 DataSize: 37666 GroupDataSize: 74666 ErasedRowCount: 1776 | | | > {36, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10037 RowCount: 4503 DataSize: 38703 GroupDataSize: 76703 ErasedRowCount: 1843 | | | > {37, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10038 RowCount: 4641 DataSize: 39741 GroupDataSize: 78741 ErasedRowCount: 1911 | | > {38, xxxxxxxxxx.., NULL, NULL} | | + BTreeIndex{PageId: 4 RowCount: 5781 DataSize: 48081 GroupDataSize: 95081 ErasedRowCount: 2491, 13 rev 1, 725b} | | | PageId: 10039 RowCount: 4780 DataSize: 40780 GroupDataSize: 80780 ErasedRowCount: 1980 | | | > {39, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10040 RowCount: 4920 DataSize: 41820 GroupDataSize: 82820 ErasedRowCount: 2050 | | | > {40, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10041 RowCount: 5061 DataSize: 42861 GroupDataSize: 84861 ErasedRowCount: 2121 | | | > {41, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10042 RowCount: 5203 DataSize: 43903 GroupDataSize: 86903 ErasedRowCount: 2193 | | | > {42, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10043 RowCount: 5346 DataSize: 44946 GroupDataSize: 88946 ErasedRowCount: 2266 | | | > {43, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10044 RowCount: 5490 DataSize: 45990 GroupDataSize: 90990 ErasedRowCount: 2340 | | | > {44, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10045 RowCount: 5635 DataSize: 47035 GroupDataSize: 93035 ErasedRowCount: 2415 | | | > {45, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10046 RowCount: 5781 DataSize: 48081 GroupDataSize: 95081 ErasedRowCount: 2491 | | > {46, xxxxxxxxxx.., NULL, NULL} | | + BTreeIndex{PageId: 5 RowCount: 6831 DataSize: 55431 GroupDataSize: 109431 ErasedRowCount: 3051, 13 ... 7 RowCount: 34 DataSize: 1202 GroupDataSize: 7632 ErasedRowCount: 0 | | | > {4, 10} | | | PageId: 70 RowCount: 36 DataSize: 1284 GroupDataSize: 8163 ErasedRowCount: 0 | | | > {5, 3} | | | PageId: 82 RowCount: 38 DataSize: 1350 GroupDataSize: 8602 ErasedRowCount: 0 | | | > {5, 6} | | | PageId: 87 RowCount: 40 DataSize: 1416 GroupDataSize: 9358 ErasedRowCount: 0 + Rows{0} Label{04 rev 1, 66b}, [0, +2)row | ERowOp 1: {0, 1} | ERowOp 1: {0, 3} + Rows{2} Label{24 rev 1, 66b}, [2, +2)row | ERowOp 1: {0, 4} | ERowOp 1: {0, 6} + Rows{4} Label{44 rev 1, 82b}, [4, +2)row | ERowOp 1: {0, 7} | ERowOp 1: {0, 8} + Rows{8} Label{84 rev 1, 66b}, [6, +2)row | ERowOp 1: {0, 10} | ERowOp 1: {1, 1} + Rows{11} Label{114 rev 1, 66b}, [8, +2)row | ERowOp 1: {1, 3} | ERowOp 1: {1, 4} + Rows{14} Label{144 rev 1, 82b}, [10, +2)row | ERowOp 1: {1, 6} | ERowOp 1: {1, 7} + Rows{20} Label{204 rev 1, 66b}, [12, +2)row | ERowOp 1: {1, 8} | ERowOp 1: {1, 10} + Rows{23} Label{234 rev 1, 66b}, [14, +2)row | ERowOp 1: {2, 1} | ERowOp 1: {2, 3} + Rows{26} Label{264 rev 1, 82b}, [16, +2)row | ERowOp 1: {2, 4} | ERowOp 1: {2, 6} + Rows{36} Label{364 rev 1, 66b}, [18, +2)row | ERowOp 1: {2, 7} | ERowOp 1: {2, 8} + Rows{39} Label{394 rev 1, 66b}, [20, +2)row | ERowOp 1: {2, 10} | ERowOp 1: {3, 1} + Rows{42} Label{424 rev 1, 82b}, [22, +2)row | ERowOp 1: {3, 3} | ERowOp 1: {3, 4} + Rows{48} Label{484 rev 1, 66b}, [24, +2)row | ERowOp 1: {3, 6} | ERowOp 1: {3, 7} + Rows{53} Label{534 rev 1, 66b}, [26, +2)row | ERowOp 1: {3, 8} | ERowOp 1: {3, 10} + Rows{58} Label{584 rev 1, 82b}, [28, +2)row | ERowOp 1: {4, 1} | ERowOp 1: {4, 3} + Rows{64} Label{644 rev 1, 66b}, [30, +2)row | ERowOp 1: {4, 4} | ERowOp 1: {4, 6} + Rows{67} Label{674 rev 1, 66b}, [32, +2)row | ERowOp 1: {4, 7} | ERowOp 1: {4, 8} + Rows{70} Label{704 rev 1, 82b}, [34, +2)row | ERowOp 1: {4, 10} | ERowOp 1: {5, 1} + Rows{82} Label{824 rev 1, 66b}, [36, +2)row | ERowOp 1: {5, 3} | ERowOp 1: {5, 4} + Rows{87} Label{874 rev 1, 66b}, [38, +2)row | ERowOp 1: {5, 6} | ERowOp 1: {5, 7} Slices{ [0, 39] } 0.29277 Part{[1:2:3:0:0:0:0] eph 0, 1000b 40r} data 1479b + FlatIndex{20} Label{3 rev 3, 453b} 21 rec | Page Row Bytes (Uint32) | 0 0 50b {0} | 1 2 50b {2} | 2 4 50b {4} | 3 6 50b {6} | 4 8 50b {8} | 5 10 50b {10} | 6 12 50b {12} | 7 14 50b {14} | 8 16 50b {16} | 9 18 50b {18} | 10 20 50b {20} | 11 22 50b {22} | 12 24 50b {24} | 13 26 50b {26} | 14 28 50b {28} | 15 30 50b {30} | 16 32 50b {32} | 17 34 50b {34} | 18 36 50b {36} | 19 38 50b {38} | 19 39 50b {39} + Rows{0} Label{04 rev 1, 50b}, [0, +2)row | ERowOp 1: {0} {Set 1 Uint32 : 0} | ERowOp 1: {1} {Set 1 Uint32 : 100} + Rows{1} Label{14 rev 1, 50b}, [2, +2)row | ERowOp 1: {2} {Set 1 Uint32 : 200} | ERowOp 1: {3} {Set 1 Uint32 : 300} + Rows{2} Label{24 rev 1, 50b}, [4, +2)row | ERowOp 1: {4} {Set 1 Uint32 : 400} | ERowOp 1: {5} {Set 1 Uint32 : 500} + Rows{3} Label{34 rev 1, 50b}, [6, +2)row | ERowOp 1: {6} {Set 1 Uint32 : 600} | ERowOp 1: {7} {Set 1 Uint32 : 700} + Rows{4} Label{44 rev 1, 50b}, [8, +2)row | ERowOp 1: {8} {Set 1 Uint32 : 800} | ERowOp 1: {9} {Set 1 Uint32 : 900} + Rows{5} Label{54 rev 1, 50b}, [10, +2)row | ERowOp 1: {10} {Set 1 Uint32 : 1000} | ERowOp 1: {11} {Set 1 Uint32 : 1100} + Rows{6} Label{64 rev 1, 50b}, [12, +2)row | ERowOp 1: {12} {Set 1 Uint32 : 1200} | ERowOp 1: {13} {Set 1 Uint32 : 1300} + Rows{7} Label{74 rev 1, 50b}, [14, +2)row | ERowOp 1: {14} {Set 1 Uint32 : 1400} | ERowOp 1: {15} {Set 1 Uint32 : 1500} + Rows{8} Label{84 rev 1, 50b}, [16, +2)row | ERowOp 1: {16} {Set 1 Uint32 : 1600} | ERowOp 1: {17} {Set 1 Uint32 : 1700} + Rows{9} Label{94 rev 1, 50b}, [18, +2)row | ERowOp 1: {18} {Set 1 Uint32 : 1800} | ERowOp 1: {19} {Set 1 Uint32 : 1900} + Rows{10} Label{104 rev 1, 50b}, [20, +2)row | ERowOp 1: {20} {Set 1 Uint32 : 2000} | ERowOp 1: {21} {Set 1 Uint32 : 2100} + Rows{11} Label{114 rev 1, 50b}, [22, +2)row | ERowOp 1: {22} {Set 1 Uint32 : 2200} | ERowOp 1: {23} {Set 1 Uint32 : 2300} + Rows{12} Label{124 rev 1, 50b}, [24, +2)row | ERowOp 1: {24} {Set 1 Uint32 : 2400} | ERowOp 1: {25} {Set 1 Uint32 : 2500} + Rows{13} Label{134 rev 1, 50b}, [26, +2)row | ERowOp 1: {26} {Set 1 Uint32 : 2600} | ERowOp 1: {27} {Set 1 Uint32 : 2700} + Rows{14} Label{144 rev 1, 50b}, [28, +2)row | ERowOp 1: {28} {Set 1 Uint32 : 2800} | ERowOp 1: {29} {Set 1 Uint32 : 2900} + Rows{15} Label{154 rev 1, 50b}, [30, +2)row | ERowOp 1: {30} {Set 1 Uint32 : 3000} | ERowOp 1: {31} {Set 1 Uint32 : 3100} + Rows{16} Label{164 rev 1, 50b}, [32, +2)row | ERowOp 1: {32} {Set 1 Uint32 : 3200} | ERowOp 1: {33} {Set 1 Uint32 : 3300} + Rows{17} Label{174 rev 1, 50b}, [34, +2)row | ERowOp 1: {34} {Set 1 Uint32 : 3400} | ERowOp 1: {35} {Set 1 Uint32 : 3500} + Rows{18} Label{184 rev 1, 50b}, [36, +2)row | ERowOp 1: {36} {Set 1 Uint32 : 3600} | ERowOp 1: {37} {Set 1 Uint32 : 3700} + Rows{19} Label{194 rev 1, 50b}, [38, +2)row | ERowOp 1: {38} {Set 1 Uint32 : 3800} | ERowOp 1: {39} {Set 1 Uint32 : 3900} Part{[1:2:3:0:0:0:0] eph 0, 1000b 40r} data 1479b + FlatIndex{20} Label{3 rev 3, 453b} 21 rec | Page Row Bytes (Uint32) | 0 0 50b {0} | 1 2 50b {2} | 2 4 50b {4} | 3 6 50b {6} | 4 8 50b {8} | 5 10 50b {10} | 6 12 50b {12} | 7 14 50b {14} | 8 16 50b {16} | 9 18 50b {18} | 10 20 50b {20} | 11 22 50b {22} | 12 24 50b {24} | 13 26 50b {26} | 14 28 50b {28} | 15 30 50b {30} | 16 32 50b {32} | 17 34 50b {34} | 18 36 50b {36} | 19 38 50b {38} | 19 39 50b {39} + Rows{0} Label{04 rev 1, 50b}, [0, +2)row | ERowOp 1: {0} {Set 1 Uint32 : 0} | ERowOp 1: {1} {Set 1 Uint32 : 100} + Rows{1} Label{14 rev 1, 50b}, [2, +2)row | ERowOp 1: {2} {Set 1 Uint32 : 200} | ERowOp 1: {3} {Set 1 Uint32 : 300} + Rows{2} Label{24 rev 1, 50b}, [4, +2)row | ERowOp 1: {4} {Set 1 Uint32 : 400} | ERowOp 1: {5} {Set 1 Uint32 : 500} + Rows{3} Label{34 rev 1, 50b}, [6, +2)row | ERowOp 1: {6} {Set 1 Uint32 : 600} | ERowOp 1: {7} {Set 1 Uint32 : 700} + Rows{4} Label{44 rev 1, 50b}, [8, +2)row | ERowOp 1: {8} {Set 1 Uint32 : 800} | ERowOp 1: {9} {Set 1 Uint32 : 900} + Rows{5} Label{54 rev 1, 50b}, [10, +2)row | ERowOp 1: {10} {Set 1 Uint32 : 1000} | ERowOp 1: {11} {Set 1 Uint32 : 1100} + Rows{6} Label{64 rev 1, 50b}, [12, +2)row | ERowOp 1: {12} {Set 1 Uint32 : 1200} | ERowOp 1: {13} {Set 1 Uint32 : 1300} + Rows{7} Label{74 rev 1, 50b}, [14, +2)row | ERowOp 1: {14} {Set 1 Uint32 : 1400} | ERowOp 1: {15} {Set 1 Uint32 : 1500} + Rows{8} Label{84 rev 1, 50b}, [16, +2)row | ERowOp 1: {16} {Set 1 Uint32 : 1600} | ERowOp 1: {17} {Set 1 Uint32 : 1700} + Rows{9} Label{94 rev 1, 50b}, [18, +2)row | ERowOp 1: {18} {Set 1 Uint32 : 1800} | ERowOp 1: {19} {Set 1 Uint32 : 1900} + Rows{10} Label{104 rev 1, 50b}, [20, +2)row | ERowOp 1: {20} {Set 1 Uint32 : 2000} | ERowOp 1: {21} {Set 1 Uint32 : 2100} + Rows{11} Label{114 rev 1, 50b}, [22, +2)row | ERowOp 1: {22} {Set 1 Uint32 : 2200} | ERowOp 1: {23} {Set 1 Uint32 : 2300} + Rows{12} Label{124 rev 1, 50b}, [24, +2)row | ERowOp 1: {24} {Set 1 Uint32 : 2400} | ERowOp 1: {25} {Set 1 Uint32 : 2500} + Rows{13} Label{134 rev 1, 50b}, [26, +2)row | ERowOp 1: {26} {Set 1 Uint32 : 2600} | ERowOp 1: {27} {Set 1 Uint32 : 2700} + Rows{14} Label{144 rev 1, 50b}, [28, +2)row | ERowOp 1: {28} {Set 1 Uint32 : 2800} | ERowOp 1: {29} {Set 1 Uint32 : 2900} + Rows{15} Label{154 rev 1, 50b}, [30, +2)row | ERowOp 1: {30} {Set 1 Uint32 : 3000} | ERowOp 1: {31} {Set 1 Uint32 : 3100} + Rows{16} Label{164 rev 1, 50b}, [32, +2)row | ERowOp 1: {32} {Set 1 Uint32 : 3200} | ERowOp 1: {33} {Set 1 Uint32 : 3300} + Rows{17} Label{174 rev 1, 50b}, [34, +2)row | ERowOp 1: {34} {Set 1 Uint32 : 3400} | ERowOp 1: {35} {Set 1 Uint32 : 3500} + Rows{18} Label{184 rev 1, 50b}, [36, +2)row | ERowOp 1: {36} {Set 1 Uint32 : 3600} | ERowOp 1: {37} {Set 1 Uint32 : 3700} + Rows{19} Label{194 rev 1, 50b}, [38, +2)row | ERowOp 1: {38} {Set 1 Uint32 : 3800} | ERowOp 1: {39} {Set 1 Uint32 : 3900} Part{[1:2:3:0:0:0:0] eph 0, 1000b 40r} data 1479b + FlatIndex{20} Label{3 rev 3, 453b} 21 rec | Page Row Bytes (Uint32) | 0 0 50b {0} | 1 2 50b {2} | 2 4 50b {4} | 3 6 50b {6} | 4 8 50b {8} | 5 10 50b {10} | 6 12 50b {12} | 7 14 50b {14} | 8 16 50b {16} | 9 18 50b {18} | 10 20 50b {20} | 11 22 50b {22} | 12 24 50b {24} | 13 26 50b {26} | 14 28 50b {28} | 15 30 50b {30} | 16 32 50b {32} | 17 34 50b {34} | 18 36 50b {36} | 19 38 50b {38} | 19 39 50b {39} + Rows{0} Label{04 rev 1, 50b}, [0, +2)row | ERowOp 1: {0} {Set 1 Uint32 : 0} | ERowOp 1: {1} {Set 1 Uint32 : 100} + Rows{1} Label{14 rev 1, 50b}, [2, +2)row | ERowOp 1: {2} {Set 1 Uint32 : 200} | ERowOp 1: {3} {Set 1 Uint32 : 300} + Rows{2} Label{24 rev 1, 50b}, [4, +2)row | ERowOp 1: {4} {Set 1 Uint32 : 400} | ERowOp 1: {5} {Set 1 Uint32 : 500} + Rows{3} Label{34 rev 1, 50b}, [6, +2)row | ERowOp 1: {6} {Set 1 Uint32 : 600} | ERowOp 1: {7} {Set 1 Uint32 : 700} + Rows{4} Label{44 rev 1, 50b}, [8, +2)row | ERowOp 1: {8} {Set 1 Uint32 : 800} | ERowOp 1: {9} {Set 1 Uint32 : 900} + Rows{5} Label{54 rev 1, 50b}, [10, +2)row | ERowOp 1: {10} {Set 1 Uint32 : 1000} | ERowOp 1: {11} {Set 1 Uint32 : 1100} + Rows{6} Label{64 rev 1, 50b}, [12, +2)row | ERowOp 1: {12} {Set 1 Uint32 : 1200} | ERowOp 1: {13} {Set 1 Uint32 : 1300} + Rows{7} Label{74 rev 1, 50b}, [14, +2)row | ERowOp 1: {14} {Set 1 Uint32 : 1400} | ERowOp 1: {15} {Set 1 Uint32 : 1500} + Rows{8} Label{84 rev 1, 50b}, [16, +2)row | ERowOp 1: {16} {Set 1 Uint32 : 1600} | ERowOp 1: {17} {Set 1 Uint32 : 1700} + Rows{9} Label{94 rev 1, 50b}, [18, +2)row | ERowOp 1: {18} {Set 1 Uint32 : 1800} | ERowOp 1: {19} {Set 1 Uint32 : 1900} + Rows{10} Label{104 rev 1, 50b}, [20, +2)row | ERowOp 1: {20} {Set 1 Uint32 : 2000} | ERowOp 1: {21} {Set 1 Uint32 : 2100} + Rows{11} Label{114 rev 1, 50b}, [22, +2)row | ERowOp 1: {22} {Set 1 Uint32 : 2200} | ERowOp 1: {23} {Set 1 Uint32 : 2300} + Rows{12} Label{124 rev 1, 50b}, [24, +2)row | ERowOp 1: {24} {Set 1 Uint32 : 2400} | ERowOp 1: {25} {Set 1 Uint32 : 2500} + Rows{13} Label{134 rev 1, 50b}, [26, +2)row | ERowOp 1: {26} {Set 1 Uint32 : 2600} | ERowOp 1: {27} {Set 1 Uint32 : 2700} + Rows{14} Label{144 rev 1, 50b}, [28, +2)row | ERowOp 1: {28} {Set 1 Uint32 : 2800} | ERowOp 1: {29} {Set 1 Uint32 : 2900} + Rows{15} Label{154 rev 1, 50b}, [30, +2)row | ERowOp 1: {30} {Set 1 Uint32 : 3000} | ERowOp 1: {31} {Set 1 Uint32 : 3100} + Rows{16} Label{164 rev 1, 50b}, [32, +2)row | ERowOp 1: {32} {Set 1 Uint32 : 3200} | ERowOp 1: {33} {Set 1 Uint32 : 3300} + Rows{17} Label{174 rev 1, 50b}, [34, +2)row | ERowOp 1: {34} {Set 1 Uint32 : 3400} | ERowOp 1: {35} {Set 1 Uint32 : 3500} + Rows{18} Label{184 rev 1, 50b}, [36, +2)row | ERowOp 1: {36} {Set 1 Uint32 : 3600} | ERowOp 1: {37} {Set 1 Uint32 : 3700} + Rows{19} Label{194 rev 1, 50b}, [38, +2)row | ERowOp 1: {38} {Set 1 Uint32 : 3800} | ERowOp 1: {39} {Set 1 Uint32 : 3900} >> TTxDataShardUploadRows::TestUploadShadowRowsShadowDataAlterSplitThenPublish [GOOD] >> TFlatTableExecutor_VersionedRows::TestVersionedRowsLargeBlobs [GOOD] >> TFlatTableRenameTableAndColumn::TestSchema1ToSchema2NoRestart >> TFlatTableExecutor_IndexLoading::Scan_FlatIndex [GOOD] >> TFlatTableExecutor_IndexLoading::Scan_BTreeIndex >> TFlatTableRenameTableAndColumn::TestSchema1ToSchema2NoRestart [GOOD] >> TFlatTableRenameTableAndColumn::TestSchema1ToSchema2 [GOOD] >> TFlatTableRenameTableAndColumn::TestSchema1ToSchema2ToSchema1 >> TSchemeShardTopicSplitMergeTest::SplitWithOnePartition >> TFlatTableRenameTableAndColumn::TestSchema1ToSchema2ToSchema1 [GOOD] >> TFlatTableRenameTableAndColumn::TestSchema1ToSchema2ToSchema1ToSchema2 >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-25 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-26 >> TFlatTableRenameTableAndColumn::TestSchema1ToSchema2ToSchema1ToSchema2 [GOOD] >> TGenCompaction::OverloadFactorDuringForceCompaction >> TPartBtreeIndexIteration::FewNodes_History [GOOD] >> TPartBtreeIndexIteration::FewNodes_Sticky >> TSchemeShardTopicSplitMergeTest::CreateTopicWithOnePartition ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/tx/unittest >> KqpTx::SnapshotROInteractive2 [GOOD] Test command err: Trying to start YDB, gRPC: 4312, MsgBus: 25444 2025-06-25T14:33:44.332507Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519895133295783675:2143];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:33:44.359005Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001313/r3tmp/tmpDy4Sly/pdisk_1.dat 2025-06-25T14:33:44.811523Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 4312, node 1 2025-06-25T14:33:44.899703Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:33:44.900073Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:33:44.904572Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:33:44.925902Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:33:44.925920Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:33:44.925926Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:33:44.926024Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25444 2025-06-25T14:33:45.363108Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:25444 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:33:45.643750Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:33:45.673039Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:33:45.698120Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:33:45.878892Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:33:46.079856Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:33:46.156446Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:33:49.200279Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519895154770621688:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:33:49.200407Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:33:49.332743Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519895133295783675:2143];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:33:49.332818Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:33:50.030044Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:33:50.082284Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:33:50.136786Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:33:50.213513Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:33:50.273861Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:33:50.357527Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:33:50.432179Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:33:50.536509Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519895159065589657:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:33:50.536601Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:33:50.537341Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519895159065589662:2438], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:33:50.541577Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:33:50.552065Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710669, at schemeshard: 72057594046644480 2025-06-25T14:33:50.552364Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519895159065589664:2439], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:33:50.608395Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519895159065589715:3428] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:33:52.040348Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=1&id=MWEzMmZkOWMtNWM0NTJhNDctODFhOWQ0NWMtNDczOWE1NmQ=, ActorId: [1:7519895163360557304:2479], ActorState: ReadyState, TraceId: 01jykr4v4k80es8jfh5hxq946d, Create QueryResponse for error on request, msg: ydb/core/kqp/session_actor/kqp_session_actor.cpp:861: Too many transactions, current active: 2 MaxTxPerSession: 2 Trying to start YDB, gRPC: 30923, MsgBus: 24375 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001313/r3tmp/tmpcuyhzI/pdisk_1.dat 2025-06-25T14:33:53.567092Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:33:53.650105Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:33:53.650257Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519895172369237251:2080] 1750862033332583 != 1750862033332586 2025-06-25T14:33:53.666110Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:33:53.666199Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:33:53.669047Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 30923, node 2 2025-06-25T14:33:53.760662Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:33:53.760684Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:33:53.760691Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:33:53.760814Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24375 TClient is connected to server localhost:24375 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-25T14:33:54.411540Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:33:54.450636Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:33:54.474475Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:33:54.587741Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:33:54.767716Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:33:54.873158Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:33:57.340474Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519895189549108055:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:33:57.340596Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:33:57.415787Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:33:57.466334Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:33:57.523764Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:33:57.599760Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:33:57.652501Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:33:57.716810Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:33:57.827518Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:33:57.940908Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519895189549108714:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:33:57.941009Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:33:57.941502Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519895189549108719:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:33:57.946286Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:33:57.978966Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519895189549108721:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:33:58.077362Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519895193844076068:3420] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } >> TSchemeShardTopicSplitMergeTest::SplitWithManyPartition [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_upload_rows/unittest >> TTxDataShardUploadRows::TestUploadShadowRowsShadowDataAlterSplitThenPublish [GOOD] Test command err: 2025-06-25T14:33:49.143080Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:33:49.143203Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:33:49.143268Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000c82/r3tmp/tmpOYxpdK/pdisk_1.dat 2025-06-25T14:33:49.668543Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T14:33:49.671633Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:33:49.734820Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:33:49.744564Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750862024965244 != 1750862024965248 2025-06-25T14:33:49.798477Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:33:49.798646Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:33:49.813431Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:33:49.911855Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:33:49.994877Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvBoot 2025-06-25T14:33:49.996005Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvRestored 2025-06-25T14:33:50.004642Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:627:2531] 2025-06-25T14:33:50.004917Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T14:33:50.095255Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-25T14:33:50.096023Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T14:33:50.096155Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T14:33:50.097851Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-25T14:33:50.097926Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-25T14:33:50.097983Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-25T14:33:50.098307Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T14:33:50.098433Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T14:33:50.098511Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:643:2531] in generation 1 2025-06-25T14:33:50.112386Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T14:33:50.136566Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-25T14:33:50.136771Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T14:33:50.136875Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:645:2541] 2025-06-25T14:33:50.136924Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T14:33:50.137001Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-25T14:33:50.137035Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:33:50.137226Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:627:2531], Recipient [1:627:2531]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T14:33:50.137275Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T14:33:50.137592Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-25T14:33:50.137687Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-25T14:33:50.137739Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:33:50.137803Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:33:50.137845Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-25T14:33:50.137879Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-25T14:33:50.137909Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-25T14:33:50.137942Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-25T14:33:50.137985Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:33:50.138352Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:634:2535], Recipient [1:627:2531]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:33:50.138396Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T14:33:50.138436Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:623:2528], serverId# [1:634:2535], sessionId# [0:0:0] 2025-06-25T14:33:50.138513Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:373:2367], Recipient [1:634:2535] 2025-06-25T14:33:50.138547Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-25T14:33:50.138660Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T14:33:50.138898Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-25T14:33:50.138961Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-25T14:33:50.139049Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-25T14:33:50.139091Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-25T14:33:50.139130Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-06-25T14:33:50.139168Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-06-25T14:33:50.139206Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-25T14:33:50.139466Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-06-25T14:33:50.139527Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit StoreSchemeTx 2025-06-25T14:33:50.139556Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit FinishPropose 2025-06-25T14:33:50.139609Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-25T14:33:50.139651Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayComplete 2025-06-25T14:33:50.139676Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit FinishPropose 2025-06-25T14:33:50.139707Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit WaitForPlan 2025-06-25T14:33:50.139734Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit WaitForPlan 2025-06-25T14:33:50.139753Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:281474976715657] at 72075186224037888 is not ready to execute on unit WaitForPlan 2025-06-25T14:33:50.141106Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269746185, Sender [1:646:2542], Recipient [1:627:2531]: NKikimr::TEvTxProxySchemeCache::TEvWatchNotifyUpdated 2025-06-25T14:33:50.141156Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T14:33:50.151798Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-25T14:33:50.151871Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-25T14:33:50.151906Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-25T14:33:50.151967Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715657 at tablet 72075186224037888 send to client, exec latency: 0 ms, propose late ... line.cpp:1862: Execution status for [3500:281474976715668] at 72075186224037889 is DelayComplete 2025-06-25T14:34:01.675534Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:281474976715668] at 72075186224037889 executing on unit CompleteOperation 2025-06-25T14:34:01.675567Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [3500:281474976715668] at 72075186224037889 to execution unit CompletedOperations 2025-06-25T14:34:01.675597Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:281474976715668] at 72075186224037889 on unit CompletedOperations 2025-06-25T14:34:01.675637Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:281474976715668] at 72075186224037889 is Executed 2025-06-25T14:34:01.675669Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:281474976715668] at 72075186224037889 executing on unit CompletedOperations 2025-06-25T14:34:01.675693Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [3500:281474976715668] at 72075186224037889 has finished 2025-06-25T14:34:01.675733Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:34:01.675755Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 72075186224037889 2025-06-25T14:34:01.675782Z node 2 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037889 has no attached operations 2025-06-25T14:34:01.675807Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 72075186224037889 2025-06-25T14:34:01.686654Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-06-25T14:34:01.686734Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-06-25T14:34:01.686787Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [3500:281474976715668] at 72075186224037889 on unit CompleteOperation 2025-06-25T14:34:01.686856Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [3500 : 281474976715668] from 72075186224037889 at tablet 72075186224037889 send result to client [2:1101:2879], exec latency: 0 ms, propose latency: 0 ms 2025-06-25T14:34:01.686910Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-25T14:34:01.689208Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287940, Sender [2:1101:2879], Recipient [2:926:2730]: NKikimrTx.TEvStreamClearanceResponse TxId: 281474976715668 Cleared: true 2025-06-25T14:34:01.689264Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, processing event TEvTxProcessing::TEvStreamClearanceResponse 2025-06-25T14:34:01.689353Z node 2 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037890 step# 3500} 2025-06-25T14:34:01.689406Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037890 2025-06-25T14:34:01.689435Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037890 2025-06-25T14:34:01.689610Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [2:926:2730], Recipient [2:926:2730]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T14:34:01.689640Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T14:34:01.689689Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037890 2025-06-25T14:34:01.689725Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037890 active 1 active planned 1 immediate 0 planned 1 2025-06-25T14:34:01.689762Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [3500:281474976715668] at 72075186224037890 for WaitForStreamClearance 2025-06-25T14:34:01.689789Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:281474976715668] at 72075186224037890 on unit WaitForStreamClearance 2025-06-25T14:34:01.689820Z node 2 :TX_DATASHARD TRACE: wait_for_stream_clearance_unit.cpp:156: Got stream clearance for [3500:281474976715668] at 72075186224037890 2025-06-25T14:34:01.689851Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:281474976715668] at 72075186224037890 is Executed 2025-06-25T14:34:01.689879Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:281474976715668] at 72075186224037890 executing on unit WaitForStreamClearance 2025-06-25T14:34:01.689905Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [3500:281474976715668] at 72075186224037890 to execution unit ReadTableScan 2025-06-25T14:34:01.689929Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:281474976715668] at 72075186224037890 on unit ReadTableScan 2025-06-25T14:34:01.690143Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:281474976715668] at 72075186224037890 is Continue 2025-06-25T14:34:01.690177Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037890 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-25T14:34:01.690201Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037890 2025-06-25T14:34:01.690225Z node 2 :TX_DATASHARD TRACE: plan_queue_unit.cpp:52: TPlanQueueUnit at 72075186224037890 out-of-order limits exceeded 2025-06-25T14:34:01.690253Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037890 2025-06-25T14:34:01.690973Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435082, Sender [2:1120:2896], Recipient [2:926:2730]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvRegisterScanActor 2025-06-25T14:34:01.691009Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3162: StateWork, processing event TEvPrivate::TEvRegisterScanActor 2025-06-25T14:34:01.691217Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:514: Got quota for read table scan ShardId: 72075186224037890, TxId: 281474976715668, MessageQuota: 1 2025-06-25T14:34:01.691860Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:662: Send response data ShardId: 72075186224037890, TxId: 281474976715668, Size: 54, Rows: 0, PendingAcks: 1, MessageQuota: 0 2025-06-25T14:34:01.760274Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:483: Got stream data ack ShardId: 72075186224037890, TxId: 281474976715668, PendingAcks: 0 2025-06-25T14:34:01.760374Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:718: Finish scan ShardId: 72075186224037890, TxId: 281474976715668, MessageQuota: 0 2025-06-25T14:34:01.762180Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037890 2025-06-25T14:34:01.762226Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:4477: Found op: cookie: 281474976715668, at: 72075186224037890 2025-06-25T14:34:01.762671Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [2:926:2730], Recipient [2:926:2730]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T14:34:01.762710Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T14:34:01.762769Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037890 2025-06-25T14:34:01.762807Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037890 active 1 active planned 1 immediate 0 planned 1 2025-06-25T14:34:01.762843Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [3500:281474976715668] at 72075186224037890 for ReadTableScan 2025-06-25T14:34:01.762871Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:281474976715668] at 72075186224037890 on unit ReadTableScan 2025-06-25T14:34:01.762904Z node 2 :TX_DATASHARD TRACE: read_table_scan_unit.cpp:158: ReadTable scan complete for [3500:281474976715668] at 72075186224037890 error: , IsFatalError: 0 2025-06-25T14:34:01.762946Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:281474976715668] at 72075186224037890 is Executed 2025-06-25T14:34:01.762976Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:281474976715668] at 72075186224037890 executing on unit ReadTableScan 2025-06-25T14:34:01.763003Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [3500:281474976715668] at 72075186224037890 to execution unit CompleteOperation 2025-06-25T14:34:01.763027Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:281474976715668] at 72075186224037890 on unit CompleteOperation 2025-06-25T14:34:01.763211Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:281474976715668] at 72075186224037890 is DelayComplete 2025-06-25T14:34:01.763237Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:281474976715668] at 72075186224037890 executing on unit CompleteOperation 2025-06-25T14:34:01.763260Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [3500:281474976715668] at 72075186224037890 to execution unit CompletedOperations 2025-06-25T14:34:01.763283Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:281474976715668] at 72075186224037890 on unit CompletedOperations 2025-06-25T14:34:01.763310Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:281474976715668] at 72075186224037890 is Executed 2025-06-25T14:34:01.763330Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:281474976715668] at 72075186224037890 executing on unit CompletedOperations 2025-06-25T14:34:01.763351Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [3500:281474976715668] at 72075186224037890 has finished 2025-06-25T14:34:01.763376Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037890 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:34:01.763398Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 72075186224037890 2025-06-25T14:34:01.763424Z node 2 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037890 has no attached operations 2025-06-25T14:34:01.763449Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 72075186224037890 2025-06-25T14:34:01.775438Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037890 2025-06-25T14:34:01.775500Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037890 2025-06-25T14:34:01.775533Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [3500:281474976715668] at 72075186224037890 on unit CompleteOperation 2025-06-25T14:34:01.775585Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [3500 : 281474976715668] from 72075186224037890 at tablet 72075186224037890 send result to client [2:1101:2879], exec latency: 0 ms, propose latency: 1 ms 2025-06-25T14:34:01.775629Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037890 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/health_check/ut/unittest >> THealthCheckTest::HealthCheckConfigUpdate [GOOD] Test command err: 2025-06-25T14:33:09.034468Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:628:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:33:09.034932Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:33:09.035182Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:625:2319], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:33:09.035334Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:33:09.035624Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:33:09.035660Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001d9c/r3tmp/tmpf9SFlS/pdisk_1.dat 2025-06-25T14:33:09.558731Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 31498, node 1 TClient is connected to server localhost:16947 2025-06-25T14:33:10.136097Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:33:10.136149Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:33:10.136170Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:33:10.136569Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:33:18.884230Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [3:628:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:33:18.885201Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:33:18.885465Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:33:18.885706Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [4:625:2319], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:33:18.886036Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:33:18.886100Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001d9c/r3tmp/tmpeNYsHq/pdisk_1.dat 2025-06-25T14:33:19.333279Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 30400, node 3 TClient is connected to server localhost:6040 2025-06-25T14:33:19.851948Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:33:19.852020Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:33:19.852055Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:33:19.852407Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:33:29.474215Z node 5 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [5:625:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:33:29.474533Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:33:29.474704Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:33:29.476424Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [6:634:2322], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:33:29.476749Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:33:29.476888Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001d9c/r3tmp/tmp3EJSfT/pdisk_1.dat 2025-06-25T14:33:29.919283Z node 5 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 25100, node 5 TClient is connected to server localhost:22300 2025-06-25T14:33:30.415222Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:33:30.415270Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:33:30.415301Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:33:30.415958Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration self_check_result: GOOD issue_log { id: "YELLOW-f489-1231c6b1" status: YELLOW message: "Database has compute issues" location { database { name: "/Root" } } reason: "YELLOW-1ba8-1231c6b1" type: "DATABASE" level: 1 } issue_log { id: "YELLOW-1ba8-1231c6b1" status: YELLOW message: "Compute is overloaded" location { database { name: "/Root" } } reason: "YELLOW-e9e2-1231c6b1-5" reason: "YELLOW-e9e2-1231c6b1-6" type: "COMPUTE" level: 2 } issue_log { id: "YELLOW-e9e2-1231c6b1-5" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 5 host: "::1" port: 12001 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 } issue_log { id: "YELLOW-e9e2-1231c6b1-6" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 6 host: "::1" port: 12002 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 } location { id: 5 host: "::1" port: 12001 } 2025-06-25T14:33:40.756635Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [7:628:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:33:40.757338Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:33:40.757660Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:33:40.758129Z node 8 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [8:625:2319], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:33:40.758544Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:33:40.758683Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001d9c/r3tmp/tmpu2Py1H/pdisk_1.dat 2025-06-25T14:33:41.332786Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 62267, node 7 TClient is connected to server localhost:22916 2025-06-25T14:33:42.085716Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:33:42.085796Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:33:42.085847Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:33:42.086438Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration self_check_result: GOOD issue_log { id: "YELLOW-f489-1231c6b1" status: YELLOW message: "Database has compute issues" location { database { name: "/Root" } } reason: "YELLOW-1ba8-1231c6b1" type: "DATABASE" level: 1 } issue_log { id: "YELLOW-1ba8-1231c6b1" status: YELLOW message: "Compute is overloaded" location { database { name: "/Root" } } reason: "YELLOW-e9e2-1231c6b1-7" reason: "YELLOW-e9e2-1231c6b1-8" type: "COMPUTE" level: 2 } issue_log { id: "YELLOW-e9e2-1231c6b1-7" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 7 host: "::1" port: 12001 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 } issue_log { id: "YELLOW-e9e2-1231c6b1-8" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 8 host: "::1" port: 12002 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 } location { id: 7 host: "::1" port: 12001 } 2025-06-25T14:33:49.025910Z node 9 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [9:419:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:33:49.026074Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:33:49.026151Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001d9c/r3tmp/tmpxPbwNP/pdisk_1.dat 2025-06-25T14:33:49.985011Z node 9 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 2800, node 9 TClient is connected to server localhost:4713 2025-06-25T14:33:50.664858Z node 9 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:33:50.664937Z node 9 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:33:50.664991Z node 9 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:33:50.665610Z node 9 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:33:59.350466Z node 11 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [11:419:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:33:59.351066Z node 11 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:33:59.351198Z node 11 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001d9c/r3tmp/tmpLZeE8j/pdisk_1.dat 2025-06-25T14:34:00.067678Z node 11 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 23371, node 11 TClient is connected to server localhost:3730 2025-06-25T14:34:01.012255Z node 11 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:34:01.020677Z node 11 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:34:01.024625Z node 11 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:34:01.037149Z node 11 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration >> KqpSinkLocks::EmptyRangeAlreadyBroken [GOOD] >> KqpSinkLocks::EmptyRangeAlreadyBrokenOlap >> TGenCompaction::OverloadFactorDuringForceCompaction [GOOD] >> TGenCompaction::ForcedCompactionNoGenerations [GOOD] >> TGenCompaction::ForcedCompactionWithGenerations [GOOD] >> TGenCompaction::ForcedCompactionWithFinalParts [GOOD] >> TGenCompaction::ForcedCompactionByDeletedRows [GOOD] >> TGenCompaction::ForcedCompactionByUnreachableMvccData [GOOD] >> TGenCompaction::ForcedCompactionByUnreachableMvccDataRestart [GOOD] >> TGenCompaction::ForcedCompactionByUnreachableMvccDataBorrowed [GOOD] >> TIterator::Basics [GOOD] >> TIterator::External >> TFlatTableExecutor_IndexLoading::Scan_BTreeIndex [GOOD] >> TFlatTableExecutor_IndexLoading::Scan_History_FlatIndex >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-CreateUser-2 [FAIL] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-CreateUser-3 >> TIterator::External [GOOD] >> TIterator::Single >> CommitOffset::Commit_FromSession_ToNewChild_WithoutCommitToParent [GOOD] >> TSchemeShardTopicSplitMergeTest::SplitTwoPartitions [GOOD] >> TSchemeShardTopicSplitMergeTest::SplitInactivePartition >> TPartBtreeIndexIteration::FewNodes_Sticky [GOOD] >> TPartBtreeIndexIteration::FewNodes_Slices >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-5 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-6 >> TopicAutoscaling::PartitionSplit_AutosplitByLoad_AfterAlter [GOOD] >> TSchemeShardTopicSplitMergeTest::Boot >> TSchemeShardTopicSplitMergeTest::EnableSplitMerge [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_topic_splitmerge/unittest >> TSchemeShardTopicSplitMergeTest::SplitWithManyPartition [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:34:02.275996Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:34:02.276077Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:34:02.276134Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:34:02.276173Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:34:02.276215Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:34:02.276241Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:34:02.276291Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:34:02.283259Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:34:02.284067Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:34:02.284456Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:34:02.532484Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:34:02.532552Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:34:02.588218Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:34:02.588787Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:34:02.588963Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:34:02.621543Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:34:02.621853Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:34:02.622447Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:34:02.622702Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:34:02.653065Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:34:02.653281Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:34:02.654505Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:34:02.654566Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:34:02.654700Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:34:02.654755Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:34:02.654798Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:34:02.654886Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:34:02.685261Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:34:03.231597Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:34:03.231832Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:03.232044Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:34:03.232084Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:34:03.236398Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:34:03.236531Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:34:03.238962Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:34:03.239166Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:34:03.239395Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:03.239463Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:34:03.239524Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:34:03.239563Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:34:03.245062Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:03.245132Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:34:03.245174Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:34:03.247087Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:03.247172Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:03.247218Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:34:03.247273Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:34:03.250521Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:34:03.261375Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:34:03.261571Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:34:03.262448Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:34:03.262593Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:34:03.262637Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:34:03.262929Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:34:03.262984Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:34:03.263159Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:34:03.263241Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:34:03.269290Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:34:03.269351Z node 1 :FLAT_TX_SCHEMESHARD ... \252\252\252\252\252\252\252\252\252\252\252\252\252\251" } } BalancerTabletID: 72075186233409549 BalancerOwnerId: 72057594046678944 BalancerShardId: 4 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 5 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:34:04.304044Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:763:2058] recipient: [1:106:2139] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:766:2058] recipient: [1:15:2062] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:767:2058] recipient: [1:765:2671] Leader for TabletID 72057594046678944 is [1:768:2672] sender: [1:769:2058] recipient: [1:765:2671] 2025-06-25T14:34:04.344950Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:34:04.345050Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:34:04.345099Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:34:04.345136Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:34:04.345166Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:34:04.345205Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:34:04.345259Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:34:04.345340Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:34:04.346019Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:34:04.346334Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:34:04.360148Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:34:04.361492Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:34:04.361641Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:34:04.361746Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:34:04.361776Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:34:04.361876Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:34:04.362569Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1393: TTxInit for Paths, read records: 3, at schemeshard: 72057594046678944 2025-06-25T14:34:04.362649Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:347: AttachChild: child attached as only one child to the parent, parent id: [OwnerId: 72057594046678944, LocalPathId: 1], parent name: MyRoot, child name: USER_1, child id: [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-25T14:34:04.362685Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:347: AttachChild: child attached as only one child to the parent, parent id: [OwnerId: 72057594046678944, LocalPathId: 2], parent name: USER_1, child name: Topic1, child id: [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-06-25T14:34:04.362752Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1467: TTxInit for UserAttributes, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:34:04.362833Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1493: TTxInit for UserAttributesAlterData, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:34:04.363055Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-25T14:34:04.363331Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1795: TTxInit for Tables, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:34:04.363403Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__root_data_erasure_manager.cpp:452: [RootDataErasureManager] Restore: Generation# 0, Status# 0, WakeupInterval# 604800 s, NumberDataErasureTenantsInRunning# 0 2025-06-25T14:34:04.363604Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2043: TTxInit for Columns, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:34:04.363699Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2103: TTxInit for ColumnsAlters, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:34:04.363810Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2161: TTxInit for Shards, read records: 4, at schemeshard: 72057594046678944 2025-06-25T14:34:04.363856Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-25T14:34:04.363883Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-25T14:34:04.363903Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 0 2025-06-25T14:34:04.363924Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-25T14:34:04.364025Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2247: TTxInit for TablePartitions, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:34:04.364112Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2313: TTxInit for TableShardPartitionConfigs, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:34:04.364288Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2463: TTxInit for ChannelsBinding, read records: 14, at schemeshard: 72057594046678944 2025-06-25T14:34:04.364609Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-25T14:34:04.364980Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2842: TTxInit for TableIndexes, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:34:04.365089Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2921: TTxInit for TableIndexKeys, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:34:04.365458Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3422: TTxInit for KesusInfos, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:34:04.365551Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3458: TTxInit for KesusAlters, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:34:04.365772Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3684: TTxInit for TxShards, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:34:04.365857Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3829: TTxInit for ShardToDelete, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:34:04.365960Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3846: TTxInit for BackupSettings, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:34:04.366136Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4006: TTxInit for ShardBackupStatus, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:34:04.366219Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4022: TTxInit for CompletedBackup, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:34:04.366407Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4307: TTxInit for Publications, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:34:04.366624Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4646: IndexBuild , records: 0, at schemeshard: 72057594046678944 2025-06-25T14:34:04.366690Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4702: KMeansTreeSample records: 0, at schemeshard: 72057594046678944 2025-06-25T14:34:04.366855Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4791: SnapshotTables: snapshots: 0 tables: 0, at schemeshard: 72057594046678944 2025-06-25T14:34:04.366898Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4818: SnapshotSteps: snapshots: 0, at schemeshard: 72057594046678944 2025-06-25T14:34:04.366961Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4845: LongLocks: records: 0, at schemeshard: 72057594046678944 2025-06-25T14:34:04.373028Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:34:04.375349Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:34:04.375430Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:34:04.375647Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:34:04.375699Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:34:04.375743Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:34:04.376838Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 >> TSchemeShardTopicSplitMergeTest::SplitWithOnePartition [GOOD] >> TExecutorDb::FullScan [GOOD] >> TExecutorDb::CoordinatorSimulation >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-44 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-45 >> BasicUsage::ReadMirrored ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_topic_splitmerge/unittest >> TSchemeShardTopicSplitMergeTest::EnableSplitMerge [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:34:00.252447Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:34:00.252555Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:34:00.252597Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:34:00.252645Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:34:00.252689Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:34:00.252738Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:34:00.252804Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:34:00.252862Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:34:00.253592Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:34:00.253918Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:34:00.473865Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:34:00.473933Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:34:00.512631Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:34:00.513105Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:34:00.513307Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:34:00.550599Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:34:00.550970Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:34:00.551673Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:34:00.551975Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:34:00.569137Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:34:00.569354Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:34:00.570623Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:34:00.570685Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:34:00.570846Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:34:00.570917Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:34:00.570988Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:34:00.571088Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:34:00.588793Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:34:00.844900Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:34:00.845113Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:00.845348Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:34:00.845414Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:34:00.845712Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:34:00.845784Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:34:00.853389Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:34:00.853611Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:34:00.853835Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:00.853907Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:34:00.853959Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:34:00.853997Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:34:00.856203Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:00.856268Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:34:00.856321Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:34:00.858205Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:00.858250Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:00.858292Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:34:00.858351Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:34:00.861830Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:34:00.864098Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:34:00.864338Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:34:00.865269Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:34:00.865488Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:34:00.865553Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:34:00.865891Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:34:00.865946Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:34:00.866176Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:34:00.866267Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:34:00.868459Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:34:00.868506Z node 1 :FLAT_TX_SCHEMESHARD ... on is done id#105:0 progress is 1/1 2025-06-25T14:34:04.757129Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 105 ready parts: 1/1 2025-06-25T14:34:04.757174Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 105, ready parts: 1/1, is published: false 2025-06-25T14:34:04.757220Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 105 ready parts: 1/1 2025-06-25T14:34:04.757267Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 105:0 2025-06-25T14:34:04.757300Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 105:0 2025-06-25T14:34:04.757440Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 5 2025-06-25T14:34:04.757480Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 105, publications: 1, subscribers: 0 2025-06-25T14:34:04.757511Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 105, [OwnerId: 72057594046678944, LocalPathId: 3], 3 2025-06-25T14:34:04.758293Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 3 PathOwnerId: 72057594046678944, cookie: 105 2025-06-25T14:34:04.758373Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 3 PathOwnerId: 72057594046678944, cookie: 105 2025-06-25T14:34:04.758406Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 105 2025-06-25T14:34:04.758441Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 105, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 3 2025-06-25T14:34:04.758476Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-06-25T14:34:04.758549Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 105, subscribers: 0 2025-06-25T14:34:04.763913Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 105 TestModificationResult got TxId: 105, wait until txId: 105 TestWaitNotification wait txId: 105 2025-06-25T14:34:04.781338Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 105: send EvNotifyTxCompletion 2025-06-25T14:34:04.781414Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 105 2025-06-25T14:34:04.781859Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 105, at schemeshard: 72057594046678944 2025-06-25T14:34:04.781963Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 105: got EvNotifyTxCompletionResult 2025-06-25T14:34:04.782000Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 105: satisfy waiter [2:766:2674] TestWaitNotification: OK eventTxId 105 2025-06-25T14:34:05.352987Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: PathId: 3 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:34:05.353236Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:44: Tablet 72057594046678944 describe pathId 3 took 267us result status StatusSuccess 2025-06-25T14:34:05.353871Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_1/Topic1" PathDescription { Self { Name: "Topic1" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 104 CreateStep: 150 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 2 } ChildrenExist: false BalancerTabletID: 72075186233409549 } PersQueueGroup { Name: "Topic1" PathId: 3 TotalGroupCount: 3 PartitionPerTablet: 7 PQTabletConfig { PartitionConfig { LifetimeSeconds: 3600 } YdbDatabasePath: "/MyRoot" PartitionStrategy { PartitionStrategyType: CAN_SPLIT_AND_MERGE } } Partitions { PartitionId: 0 TabletId: 72075186233409548 KeyRange { ToBound: "UUUUUUUUUUUUUUUT" } Status: Active } Partitions { PartitionId: 1 TabletId: 72075186233409548 KeyRange { FromBound: "UUUUUUUUUUUUUUUT" ToBound: "\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\251" } Status: Active } Partitions { PartitionId: 2 TabletId: 72075186233409548 KeyRange { FromBound: "\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\251" } Status: Active } AlterVersion: 2 BalancerTabletID: 72075186233409549 NextPartitionId: 3 Allocate { Name: "Topic1" AlterVersion: 2 TotalGroupCount: 3 NextPartitionId: 3 PartitionPerTablet: 7 PQTabletConfig { PartitionConfig { LifetimeSeconds: 3600 } YdbDatabasePath: "/MyRoot" PartitionStrategy { PartitionStrategyType: CAN_SPLIT_AND_MERGE } } Partitions { PartitionId: 1 GroupId: 2 TabletId: 72075186233409548 OwnerId: 72057594046678944 ShardId: 3 Status: Active KeyRange { FromBound: "UUUUUUUUUUUUUUUT" ToBound: "\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\251" } } Partitions { PartitionId: 2 GroupId: 3 TabletId: 72075186233409548 OwnerId: 72057594046678944 ShardId: 3 Status: Active KeyRange { FromBound: "\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\251" } } Partitions { PartitionId: 0 GroupId: 1 TabletId: 72075186233409548 OwnerId: 72057594046678944 ShardId: 3 Status: Active KeyRange { ToBound: "UUUUUUUUUUUUUUUT" } } BalancerTabletID: 72075186233409549 BalancerOwnerId: 72057594046678944 BalancerShardId: 4 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 3 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:34:05.423929Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_1/Topic1" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-25T14:34:05.424164Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_1/Topic1" took 363us result status StatusSuccess 2025-06-25T14:34:05.424837Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_1/Topic1" PathDescription { Self { Name: "Topic1" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 104 CreateStep: 150 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 2 } ChildrenExist: false BalancerTabletID: 72075186233409549 } PersQueueGroup { Name: "Topic1" PathId: 3 TotalGroupCount: 3 PartitionPerTablet: 7 PQTabletConfig { PartitionConfig { LifetimeSeconds: 3600 } YdbDatabasePath: "/MyRoot" PartitionStrategy { PartitionStrategyType: CAN_SPLIT_AND_MERGE } } Partitions { PartitionId: 0 TabletId: 72075186233409548 KeyRange { ToBound: "UUUUUUUUUUUUUUUT" } Status: Active } Partitions { PartitionId: 1 TabletId: 72075186233409548 KeyRange { FromBound: "UUUUUUUUUUUUUUUT" ToBound: "\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\251" } Status: Active } Partitions { PartitionId: 2 TabletId: 72075186233409548 KeyRange { FromBound: "\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\251" } Status: Active } AlterVersion: 2 BalancerTabletID: 72075186233409549 NextPartitionId: 3 Allocate { Name: "Topic1" AlterVersion: 2 TotalGroupCount: 3 NextPartitionId: 3 PartitionPerTablet: 7 PQTabletConfig { PartitionConfig { LifetimeSeconds: 3600 } YdbDatabasePath: "/MyRoot" PartitionStrategy { PartitionStrategyType: CAN_SPLIT_AND_MERGE } } Partitions { PartitionId: 1 GroupId: 2 TabletId: 72075186233409548 OwnerId: 72057594046678944 ShardId: 3 Status: Active KeyRange { FromBound: "UUUUUUUUUUUUUUUT" ToBound: "\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\251" } } Partitions { PartitionId: 2 GroupId: 3 TabletId: 72075186233409548 OwnerId: 72057594046678944 ShardId: 3 Status: Active KeyRange { FromBound: "\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\251" } } Partitions { PartitionId: 0 GroupId: 1 TabletId: 72075186233409548 OwnerId: 72057594046678944 ShardId: 3 Status: Active KeyRange { ToBound: "UUUUUUUUUUUUUUUT" } } BalancerTabletID: 72075186233409549 BalancerOwnerId: 72057594046678944 BalancerShardId: 4 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 3 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >>>>> Verify partition 0 >>>>> Verify partition 1 >>>>> Verify partition 2 >> KqpTx::RollbackInvalidated [GOOD] >> TIterator::Single [GOOD] >> TIterator::SingleReverse >> TSchemeShardTopicSplitMergeTest::SplitInactivePartition [GOOD] |79.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_incremental_restore_scan/ydb-core-tx-datashard-ut_incremental_restore_scan |79.5%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_incremental_restore_scan/ydb-core-tx-datashard-ut_incremental_restore_scan |79.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_incremental_restore_scan/ydb-core-tx-datashard-ut_incremental_restore_scan ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_topic_splitmerge/unittest >> TSchemeShardTopicSplitMergeTest::SplitWithOnePartition [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:34:04.499416Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:34:04.499494Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:34:04.499540Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:34:04.499571Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:34:04.499607Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:34:04.499647Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:34:04.499691Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:34:04.499752Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:34:04.500459Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:34:04.500775Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:34:04.585524Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:34:04.585583Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:34:04.620721Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:34:04.621198Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:34:04.621367Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:34:04.644726Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:34:04.645073Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:34:04.645667Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:34:04.645918Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:34:04.654151Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:34:04.654331Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:34:04.655524Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:34:04.655582Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:34:04.655709Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:34:04.655755Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:34:04.655814Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:34:04.655894Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:34:04.679217Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:34:04.806321Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:34:04.806553Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:04.806735Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:34:04.806785Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:34:04.806989Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:34:04.807069Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:34:04.809221Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:34:04.809404Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:34:04.809586Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:04.809651Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:34:04.809722Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:34:04.809756Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:34:04.811440Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:04.811488Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:34:04.811528Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:34:04.812999Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:04.813061Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:04.813101Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:34:04.813154Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:34:04.822215Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:34:04.824214Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:34:04.824440Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:34:04.825292Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:34:04.825422Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:34:04.825466Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:34:04.825731Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:34:04.825777Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:34:04.825921Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:34:04.826009Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:34:04.827963Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:34:04.828004Z node 1 :FLAT_TX_SCHEMESHARD ... plete 2025-06-25T14:34:05.615423Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:34:05.615614Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:34:05.615745Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:34:05.615783Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:34:05.616022Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:34:05.616707Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1393: TTxInit for Paths, read records: 3, at schemeshard: 72057594046678944 2025-06-25T14:34:05.616778Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:347: AttachChild: child attached as only one child to the parent, parent id: [OwnerId: 72057594046678944, LocalPathId: 1], parent name: MyRoot, child name: USER_1, child id: [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-25T14:34:05.616811Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:347: AttachChild: child attached as only one child to the parent, parent id: [OwnerId: 72057594046678944, LocalPathId: 2], parent name: USER_1, child name: Topic1, child id: [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-06-25T14:34:05.616870Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1467: TTxInit for UserAttributes, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:34:05.616941Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1493: TTxInit for UserAttributesAlterData, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:34:05.617143Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-25T14:34:05.617479Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1795: TTxInit for Tables, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:34:05.617567Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__root_data_erasure_manager.cpp:452: [RootDataErasureManager] Restore: Generation# 0, Status# 0, WakeupInterval# 604800 s, NumberDataErasureTenantsInRunning# 0 2025-06-25T14:34:05.617756Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2043: TTxInit for Columns, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:34:05.617846Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2103: TTxInit for ColumnsAlters, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:34:05.617957Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2161: TTxInit for Shards, read records: 4, at schemeshard: 72057594046678944 2025-06-25T14:34:05.618004Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-25T14:34:05.618046Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-25T14:34:05.618068Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 0 2025-06-25T14:34:05.618085Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-25T14:34:05.618182Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2247: TTxInit for TablePartitions, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:34:05.618287Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2313: TTxInit for TableShardPartitionConfigs, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:34:05.618494Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2463: TTxInit for ChannelsBinding, read records: 14, at schemeshard: 72057594046678944 2025-06-25T14:34:05.618667Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-25T14:34:05.619026Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2842: TTxInit for TableIndexes, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:34:05.619142Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2921: TTxInit for TableIndexKeys, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:34:05.619482Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3422: TTxInit for KesusInfos, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:34:05.619558Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3458: TTxInit for KesusAlters, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:34:05.619779Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3684: TTxInit for TxShards, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:34:05.619858Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3829: TTxInit for ShardToDelete, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:34:05.619942Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3846: TTxInit for BackupSettings, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:34:05.620112Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4006: TTxInit for ShardBackupStatus, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:34:05.620182Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4022: TTxInit for CompletedBackup, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:34:05.620699Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4307: TTxInit for Publications, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:34:05.620953Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4646: IndexBuild , records: 0, at schemeshard: 72057594046678944 2025-06-25T14:34:05.621034Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4702: KMeansTreeSample records: 0, at schemeshard: 72057594046678944 2025-06-25T14:34:05.621150Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4791: SnapshotTables: snapshots: 0 tables: 0, at schemeshard: 72057594046678944 2025-06-25T14:34:05.621202Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4818: SnapshotSteps: snapshots: 0, at schemeshard: 72057594046678944 2025-06-25T14:34:05.621246Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4845: LongLocks: records: 0, at schemeshard: 72057594046678944 2025-06-25T14:34:05.626002Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:34:05.628116Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:34:05.628178Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:34:05.628341Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:34:05.628386Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:34:05.628424Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:34:05.629424Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594046678944 is [1:742:2652] sender: [1:801:2058] recipient: [1:15:2062] 2025-06-25T14:34:05.709293Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_1/Topic1" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-25T14:34:05.709607Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_1/Topic1" took 303us result status StatusSuccess 2025-06-25T14:34:05.710297Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_1/Topic1" PathDescription { Self { Name: "Topic1" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 104 CreateStep: 150 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 2 } ChildrenExist: false BalancerTabletID: 72075186233409549 } PersQueueGroup { Name: "Topic1" PathId: 3 TotalGroupCount: 3 PartitionPerTablet: 7 PQTabletConfig { PartitionConfig { LifetimeSeconds: 3600 } YdbDatabasePath: "/MyRoot" PartitionStrategy { PartitionStrategyType: CAN_SPLIT_AND_MERGE } } Partitions { PartitionId: 0 TabletId: 72075186233409548 Status: Inactive ChildPartitionIds: 1 ChildPartitionIds: 2 } Partitions { PartitionId: 1 TabletId: 72075186233409548 KeyRange { ToBound: "\177" } Status: Active ParentPartitionIds: 0 } Partitions { PartitionId: 2 TabletId: 72075186233409548 KeyRange { FromBound: "\177" } Status: Active ParentPartitionIds: 0 } AlterVersion: 2 BalancerTabletID: 72075186233409549 NextPartitionId: 3 Allocate { Name: "Topic1" AlterVersion: 2 TotalGroupCount: 3 NextPartitionId: 3 PartitionPerTablet: 7 PQTabletConfig { PartitionConfig { LifetimeSeconds: 3600 } YdbDatabasePath: "/MyRoot" PartitionStrategy { PartitionStrategyType: CAN_SPLIT_AND_MERGE } } Partitions { PartitionId: 0 GroupId: 1 TabletId: 72075186233409548 OwnerId: 72057594046678944 ShardId: 3 Status: Inactive } Partitions { PartitionId: 1 GroupId: 2 TabletId: 72075186233409548 OwnerId: 72057594046678944 ShardId: 3 Status: Active ParentPartitionIds: 0 KeyRange { ToBound: "\177" } } Partitions { PartitionId: 2 GroupId: 3 TabletId: 72075186233409548 OwnerId: 72057594046678944 ShardId: 3 Status: Active ParentPartitionIds: 0 KeyRange { FromBound: "\177" } } BalancerTabletID: 72075186233409549 BalancerOwnerId: 72057594046678944 BalancerShardId: 4 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 3 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TSchemeShardTopicSplitMergeTest::CreateTopicWithOnePartition [GOOD] >> TSchemeShardTopicSplitMergeTest::DisableSplitMerge >> BuildStatsHistogram::Ten_Mixed [GOOD] >> BuildStatsHistogram::Ten_Serial >> TSchemeShardTopicSplitMergeTest::Boot [GOOD] >> TSchemeShardTopicSplitMergeTest::CreateTopicWithManyPartition >> KqpTx::RollbackTx2 [GOOD] >> TSchemeShardTopicSplitMergeTest::SplitWithWrongPartition >> KqpSnapshotRead::ReadWriteTxFailsOnConcurrentWrite3+withSink [GOOD] >> TSchemeShardTopicSplitMergeTest::MargePartitions >> TSchemeShardTopicSplitMergeTest::MargeUnorderedPartitions ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_topic_splitmerge/unittest >> TSchemeShardTopicSplitMergeTest::SplitInactivePartition [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:34:03.396450Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:34:03.396555Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:34:03.396594Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:34:03.396629Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:34:03.396670Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:34:03.396702Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:34:03.396759Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:34:03.396844Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:34:03.397595Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:34:03.397933Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:34:03.682610Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:34:03.682680Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:34:03.751184Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:34:03.751622Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:34:03.751785Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:34:03.774997Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:34:03.775349Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:34:03.775958Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:34:03.776199Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:34:03.799137Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:34:03.799315Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:34:03.800350Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:34:03.800405Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:34:03.800531Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:34:03.800581Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:34:03.800622Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:34:03.800710Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:34:03.817657Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:34:04.037446Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:34:04.037732Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:04.037993Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:34:04.038052Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:34:04.038346Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:34:04.038438Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:34:04.044047Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:34:04.044261Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:34:04.044499Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:04.044576Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:34:04.044620Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:34:04.044656Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:34:04.051158Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:04.051251Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:34:04.051305Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:34:04.058716Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:04.058801Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:04.058855Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:34:04.058924Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:34:04.062943Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:34:04.065316Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:34:04.065543Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:34:04.066707Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:34:04.066867Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:34:04.066921Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:34:04.067270Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:34:04.067332Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:34:04.067527Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:34:04.067633Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:34:04.073265Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:34:04.073352Z node 1 :FLAT_TX_SCHEMESHARD ... AT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_pq.cpp:647: NPQState::TPropose operationId# 105:0 HandleReply TEvProposeTransactionAttachResult CollectPQConfigChanged: false 2025-06-25T14:34:06.328258Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_pq.cpp:753: NPQState::TPropose operationId# 105:0 can't persist state: ShardsInProgress is not empty, remain: 1 2025-06-25T14:34:06.330207Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 105:0, at schemeshard: 72057594046678944 TestModificationResult got TxId: 105, wait until txId: 105 TestWaitNotification wait txId: 105 2025-06-25T14:34:06.330555Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 105: send EvNotifyTxCompletion 2025-06-25T14:34:06.330606Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 105 2025-06-25T14:34:06.331104Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 105, at schemeshard: 72057594046678944 2025-06-25T14:34:06.331177Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 105, ready parts: 0/1, is published: true 2025-06-25T14:34:06.331224Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 105, at schemeshard: 72057594046678944 2025-06-25T14:34:06.385768Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 200, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:34:06.385960Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 105 AckTo { RawX1: 0 RawX2: 0 } } Step: 200 MediatorID: 72075186233409547 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:34:06.386036Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_pq.cpp:661: NPQState::TPropose operationId# 105:0 HandleReply TEvOperationPlan, step: 200, at tablet: 72057594046678944 2025-06-25T14:34:06.386097Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_pq.cpp:753: NPQState::TPropose operationId# 105:0 can't persist state: ShardsInProgress is not empty, remain: 1 2025-06-25T14:34:06.457657Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1791: TOperation FindRelatedPartByTabletId, TxId: 105, tablet: 72075186233409548, partId: 0 2025-06-25T14:34:06.457858Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:632: TTxOperationReply execute, operationId: 105:0, at schemeshard: 72057594046678944, message: Origin: 72075186233409548 Status: COMPLETE TxId: 105 Step: 200 2025-06-25T14:34:06.457948Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_pq.cpp:623: NPQState::TPropose operationId# 105:0 HandleReply TEvProposeTransactionResult triggers early, at schemeshard: 72057594046678944 message# Origin: 72075186233409548 Status: COMPLETE TxId: 105 Step: 200 2025-06-25T14:34:06.458017Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_pq.cpp:264: CollectPQConfigChanged accept TEvPersQueue::TEvProposeTransactionResult, operationId: 105:0, shardIdx: 72057594046678944:3, shard: 72075186233409548, left await: 0, txState.State: Propose, txState.ReadyForNotifications: 0, at schemeshard: 72057594046678944 2025-06-25T14:34:06.458065Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_pq.cpp:628: NPQState::TPropose operationId# 105:0 HandleReply TEvProposeTransactionResult CollectPQConfigChanged: true 2025-06-25T14:34:06.458285Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 105:0 128 -> 240 2025-06-25T14:34:06.458499Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-06-25T14:34:06.462151Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 105:0, at schemeshard: 72057594046678944 2025-06-25T14:34:06.463064Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:34:06.463129Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 105, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-25T14:34:06.463479Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:34:06.463535Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [2:210:2210], at schemeshard: 72057594046678944, txId: 105, path id: 3 2025-06-25T14:34:06.463648Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 105:0, at schemeshard: 72057594046678944 2025-06-25T14:34:06.463705Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 105:0 ProgressState 2025-06-25T14:34:06.463835Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#105:0 progress is 1/1 2025-06-25T14:34:06.463887Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 105 ready parts: 1/1 2025-06-25T14:34:06.463937Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#105:0 progress is 1/1 2025-06-25T14:34:06.463975Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 105 ready parts: 1/1 2025-06-25T14:34:06.464023Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 105, ready parts: 1/1, is published: false 2025-06-25T14:34:06.464071Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 105 ready parts: 1/1 2025-06-25T14:34:06.464117Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 105:0 2025-06-25T14:34:06.464157Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 105:0 2025-06-25T14:34:06.464429Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 5 2025-06-25T14:34:06.464483Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 105, publications: 1, subscribers: 1 2025-06-25T14:34:06.464530Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 105, [OwnerId: 72057594046678944, LocalPathId: 3], 3 2025-06-25T14:34:06.465755Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 3 PathOwnerId: 72057594046678944, cookie: 105 2025-06-25T14:34:06.465881Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 3 PathOwnerId: 72057594046678944, cookie: 105 2025-06-25T14:34:06.465930Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 105 2025-06-25T14:34:06.465977Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 105, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 3 2025-06-25T14:34:06.466030Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-06-25T14:34:06.466133Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 105, subscribers: 1 2025-06-25T14:34:06.466184Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:212: TTxAckPublishToSchemeBoard Notify send TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, to actorId: [2:415:2379] 2025-06-25T14:34:06.472984Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 105 2025-06-25T14:34:06.473128Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 105: got EvNotifyTxCompletionResult 2025-06-25T14:34:06.473177Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 105: satisfy waiter [2:674:2595] TestWaitNotification: OK eventTxId 105 >>>>> Name: "Topic1" PQTabletConfig { PartitionConfig { } } Split { Partition: 1 SplitBoundary: "W" } TestModificationResults wait txId: 106 2025-06-25T14:34:06.483956Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/USER_1" OperationType: ESchemeOpAlterPersQueueGroup AlterPersQueueGroup { Name: "Topic1" PQTabletConfig { PartitionConfig { } } Split { Partition: 1 SplitBoundary: "W" } } } TxId: 106 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:34:06.484214Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_pq.cpp:509: TAlterPQ Propose, path: /MyRoot/USER_1/Topic1, pathId: , opId: 106:0, at schemeshard: 72057594046678944 2025-06-25T14:34:06.484458Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 106:1, propose status:StatusInvalidParameter, reason: Invalid partition status: 2, at schemeshard: 72057594046678944 2025-06-25T14:34:06.489424Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 106, response: Status: StatusInvalidParameter Reason: "Invalid partition status: 2" TxId: 106 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:34:06.489786Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 106, database: /MyRoot/USER_1, subject: , status: StatusInvalidParameter, reason: Invalid partition status: 2, operation: ALTER PERSISTENT QUEUE, path: /MyRoot/USER_1/Topic1 TestModificationResult got TxId: 106, wait until txId: 106 TestWaitNotification wait txId: 106 2025-06-25T14:34:06.490201Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 106: send EvNotifyTxCompletion 2025-06-25T14:34:06.490255Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 106 2025-06-25T14:34:06.490729Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 106, at schemeshard: 72057594046678944 2025-06-25T14:34:06.490843Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 106: got EvNotifyTxCompletionResult 2025-06-25T14:34:06.490888Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 106: satisfy waiter [2:766:2674] TestWaitNotification: OK eventTxId 106 >> TSchemeShardTopicSplitMergeTest::SplitWithWrongBoundary ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/tx/unittest >> KqpTx::RollbackInvalidated [GOOD] Test command err: Trying to start YDB, gRPC: 14864, MsgBus: 1701 2025-06-25T14:33:48.803458Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519895153889553452:2244];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:33:48.811300Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001302/r3tmp/tmpv4P7LB/pdisk_1.dat 2025-06-25T14:33:49.710311Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:33:49.710389Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:33:49.777697Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:33:49.780477Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519895153889553216:2080] 1750862028637455 != 1750862028637458 2025-06-25T14:33:49.802353Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:33:49.804008Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 14864, node 1 2025-06-25T14:33:50.016892Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:33:50.016912Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:33:50.016917Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:33:50.017036Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1701 TClient is connected to server localhost:1701 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:33:50.992514Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:33:51.008822Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:33:51.020544Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:33:51.206495Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:33:51.447917Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:33:51.571953Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:33:53.801622Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519895153889553452:2244];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:33:53.801684Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:33:54.096575Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519895179659358628:2370], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:33:54.096671Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:33:54.687092Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:33:54.769862Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:33:54.868142Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:33:54.915778Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:33:54.976535Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:33:55.071439Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:33:55.175216Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:33:55.304692Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519895183954326593:2436], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:33:55.304782Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:33:55.305039Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519895183954326598:2439], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:33:55.309396Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:33:55.333997Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519895183954326600:2440], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:33:55.389345Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519895183954326651:3430] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:33:56.999103Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=1&id=NzFlYjJlOTYtMWRlMTI4ZTgtNjU5M2ZkMTMtYzQwMGM1Nzc=, ActorId: [1:7519895188249294242:2481], ActorState: ReadyState, TraceId: 01jykr4zzm16ad9rxq16gmz56s, Create QueryResponse for error on request, msg: Trying to start ... 5T14:33:58.488220Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519895195236504627:2080] 1750862038124128 != 1750862038124131 2025-06-25T14:33:58.511343Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 28077, node 2 2025-06-25T14:33:58.728603Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:33:58.728625Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:33:58.728632Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:33:58.728739Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:33:59.132468Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:5022 TClient is connected to server localhost:5022 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:33:59.955409Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:33:59.964867Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:33:59.975997Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:34:00.089758Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:34:00.371733Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:34:00.487362Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:34:03.190779Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519895195236504862:2243];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:34:03.190843Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:34:03.444620Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519895216711342760:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:34:03.444708Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:34:03.513437Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:34:03.592830Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:34:03.653745Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:34:03.730521Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:34:03.807398Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:34:03.902396Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:34:03.989970Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:34:04.093907Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519895221006310726:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:34:04.093977Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:34:04.094344Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519895221006310731:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:34:04.104977Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:34:04.122162Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-25T14:34:04.124744Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519895221006310733:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:34:04.231355Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519895221006310786:3422] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:34:05.783839Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7519895225301278401:2489], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:2:13: Error: At function: KiReadTable!
:2:13: Error: Cannot find table 'db.[/Root/BadTable]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:34:05.784621Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=2&id=MjFhYTc4YzctMzliOWM3ZTYtODc3M2M2NGUtMThhYmM0YjQ=, ActorId: [2:7519895225301278369:2479], ActorState: ExecuteState, TraceId: 01jykr58gjeq59rhscggzad0cn, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 01jykr58fa5a24sp5kddk2tp7p 2025-06-25T14:34:05.810311Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=MjFhYTc4YzctMzliOWM3ZTYtODc3M2M2NGUtMThhYmM0YjQ=, ActorId: [2:7519895225301278369:2479], ActorState: ReadyState, TraceId: 01jykr58khd9rh0wzeph9efv4p, Create QueryResponse for error on request, msg: >> TTxDataShardUploadRows::TestUploadRowsLocks [GOOD] |79.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_export/ydb-core-tx-schemeshard-ut_export |79.5%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_export/ydb-core-tx-schemeshard-ut_export |79.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_export/ydb-core-tx-schemeshard-ut_export >> TSchemeShardTopicSplitMergeTest::CreateTopicWithManyPartition [GOOD] >> TTxDataShardUploadRows::BulkUpsertDuringAddIndexRaceCorruption [GOOD] >> TIterator::SingleReverse [GOOD] >> TIterator::Mixed ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/tx/unittest >> KqpSnapshotRead::ReadWriteTxFailsOnConcurrentWrite3+withSink [GOOD] Test command err: Trying to start YDB, gRPC: 27114, MsgBus: 12275 2025-06-25T14:33:46.021173Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519895145409625670:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:33:46.053156Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001312/r3tmp/tmp4mwUl6/pdisk_1.dat 2025-06-25T14:33:46.664701Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:33:46.664776Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:33:46.674755Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:33:46.728464Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519895141114658346:2080] 1750862026006313 != 1750862026006316 2025-06-25T14:33:46.755001Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 27114, node 1 2025-06-25T14:33:46.956087Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:33:46.956105Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:33:46.956114Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:33:46.956207Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:33:47.048498Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:12275 TClient is connected to server localhost:12275 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:33:48.590452Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:33:48.611263Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:33:48.619939Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:33:48.880758Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:33:49.199348Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:33:49.359001Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:33:51.028419Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519895145409625670:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:33:51.028502Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:33:51.786220Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519895166884463770:2370], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:33:51.786324Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:33:52.246474Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:33:52.297147Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:33:52.370491Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:33:52.404057Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:33:52.448224Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:33:52.513457Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:33:52.607631Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:33:52.723829Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519895171179431738:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:33:52.723930Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:33:52.724186Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519895171179431743:2438], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:33:52.756003Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:33:52.777314Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519895171179431745:2439], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:33:52.865793Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519895171179431798:3429] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:33:57.307291Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=1&id=MThlYWJhY2UtMjZmOTE2ZDItMTdhZDhjNC1mZjUxNjQ5OA==, ActorId: [1:7519895179769366638:2473], ActorState: ExecuteState, TraceId: 01jykr4zxb92rjabdxadcgpzkk, Create QueryResponse for error on request, msg: tx has deferred effects, but locks are broken Trying to start YDB, gRPC: 5762, MsgBus: 28041 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001312/r3tmp/tmpVzRi6B/pdisk_1.dat 2025-06-25T14:33:58.779930Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:33:58.852431Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519895193174865490:2080] 1750862038596358 != 1750862038596361 2025-06-25T14:33:58.878190Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:33:58.886051Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:33:58.886140Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:33:58.897973Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 5762, node 2 2025-06-25T14:33:59.087692Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:33:59.087714Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:33:59.087721Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:33:59.087825Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28041 2025-06-25T14:33:59.652760Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:28041 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:34:00.286138Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:34:00.297962Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:34:00.307720Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:34:00.451667Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:34:00.676602Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:34:00.814156Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:34:03.288489Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519895214649703592:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:34:03.288632Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:34:03.450369Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:34:03.596248Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:34:03.658351Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:34:03.710153Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:34:03.752219Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:34:03.851895Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:34:03.934531Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:34:04.014391Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519895218944671556:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:34:04.014459Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:34:04.014544Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519895218944671561:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:34:04.019047Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:34:04.036430Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519895218944671563:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:34:04.092845Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519895218944671614:3421] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:34:06.332682Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=N2FmYjI5NWUtNzgxMmZhYy1kOWMzYWI5Ni0xMGYwZDFiMA==, ActorId: [2:7519895223239639182:2476], ActorState: ExecuteState, TraceId: 01jykr590hfc9106ptp6txg7pa, Create QueryResponse for error on request, msg: tx has deferred effects, but locks are broken >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-ModifyUser-26 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-ModifyUser-27 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/tx/unittest >> KqpTx::RollbackTx2 [GOOD] Test command err: Trying to start YDB, gRPC: 23595, MsgBus: 19160 2025-06-25T14:33:46.454338Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519895142090326640:2233];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:33:46.454536Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00130c/r3tmp/tmpLJkSNT/pdisk_1.dat 2025-06-25T14:33:47.177239Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:33:47.177341Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:33:47.201012Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:33:47.272104Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519895142090326419:2080] 1750862026343503 != 1750862026343506 2025-06-25T14:33:47.287895Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 23595, node 1 2025-06-25T14:33:47.443473Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:33:47.560767Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:33:47.560796Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:33:47.560805Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:33:47.560903Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19160 TClient is connected to server localhost:19160 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:33:49.318656Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:33:49.385271Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:33:49.415263Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:33:49.776736Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:33:49.964782Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:33:50.074927Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:33:51.442913Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519895142090326640:2233];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:33:51.442995Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:33:52.441276Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519895167860131853:2370], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:33:52.441387Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:33:52.982910Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:33:53.060024Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:33:53.121723Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:33:53.190221Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:33:53.246717Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:33:53.319429Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:33:53.455173Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:33:53.605343Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519895172155099815:2438], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:33:53.605482Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:33:53.607047Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519895172155099820:2441], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:33:53.617287Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:33:53.644579Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519895172155099822:2442], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:33:53.704280Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519895172155099873:3426] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:33:56.387077Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=1&id=NzM0YjViOC1lMWNiZTVjYS1iODY0YjYyYy0zYTE0NDlkYg==, ActorId: [1:7519895180745034762:2483], ActorState: ReadyState, TraceId: 01jykr4zcrfmkz7v7ba97dehy2, Create QueryResponse for error on request, msg: Trying to start YDB, gRPC: 6025, MsgBus: 17500 2025-06-25T14:33:57.594857Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519895189813960221:2079];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:33:57.610636Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00130c/r3tmp/tmp11fxfD/pdisk_1.dat 2025-06-25T14:33:57.885651Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:33:57.885733Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:33:57.920264Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:33:57.929044Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 6025, node 2 2025-06-25T14:33:58.096791Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:33:58.096813Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:33:58.096821Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:33:58.096937Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17500 2025-06-25T14:33:58.576988Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:17500 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:33:59.180525Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:33:59.188575Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:33:59.200538Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:33:59.322523Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:33:59.538149Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:33:59.675448Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:34:02.596455Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519895189813960221:2079];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:34:02.596522Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:34:03.429441Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519895215583765571:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:34:03.429518Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:34:03.554568Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:34:03.617651Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:34:03.703736Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:34:03.761073Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:34:03.838300Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:34:03.921518Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:34:03.992144Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:34:04.095364Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519895219878733529:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:34:04.095433Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:34:04.095608Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519895219878733534:2437], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:34:04.098749Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:34:04.116703Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519895219878733536:2438], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:34:04.207685Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519895219878733587:3419] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:34:06.166254Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=MmRlMDMwZmUtMmM0NTY3YTctMjhmYzhjZTktN2I3MGU4NjE=, ActorId: [2:7519895224173701178:2479], ActorState: ReadyState, TraceId: 01jykr58yn37br9afd21zydak8, Create QueryResponse for error on request, msg: >> KqpRm::Reduce ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/ut_with_sdk/unittest >> TopicAutoscaling::PartitionSplit_AutosplitByLoad_AfterAlter [GOOD] Test command err: 2025-06-25T14:30:45.872246Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519894365195827806:2157];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:30:45.872565Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:30:46.087597Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001900/r3tmp/tmpFo8Zsz/pdisk_1.dat 2025-06-25T14:30:46.342187Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:30:46.342277Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:30:46.353230Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:30:46.393491Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 22932, node 1 2025-06-25T14:30:46.618484Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/yft8/001900/r3tmp/yandex4o4M1R.tmp 2025-06-25T14:30:46.618521Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/yft8/001900/r3tmp/yandex4o4M1R.tmp 2025-06-25T14:30:46.618751Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/yft8/001900/r3tmp/yandex4o4M1R.tmp 2025-06-25T14:30:46.618860Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:30:46.678380Z INFO: TTestServer started on Port 8806 GrpcPort 22932 2025-06-25T14:30:46.881254Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:8806 PQClient connected to localhost:22932 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:30:47.090004Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:30:47.106173Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:30:47.133416Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-25T14:30:47.143415Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:30:47.315356Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715660, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:30:47.325757Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715661, at schemeshard: 72057594046644480 2025-06-25T14:30:49.748664Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894382375697622:2300], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:49.748839Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:49.751892Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894382375697649:2303], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:49.756816Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:30:49.771055Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519894382375697651:2304], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715662 completed, doublechecking } 2025-06-25T14:30:50.014781Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519894382375697715:2444] txid# 281474976715663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:30:50.060881Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:50.161561Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:50.304625Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519894386670665040:2313], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:5:17: Error: At function: KiReadTable!
:5:17: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Versions]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:30:50.306630Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=NzhmNTRjNjAtODFkMDlmMzMtZTdlYzczY2QtM2U2YmZlNWI=, ActorId: [1:7519894382375697619:2298], ActorState: ExecuteState, TraceId: 01jykqz936e7zwgydxw0a8h13m, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:30:50.312330Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 5 column: 17 } message: "At function: KiReadTable!" end_position { row: 5 column: 17 } severity: 1 issues { position { row: 5 column: 17 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Versions]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 5 column: 17 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-25T14:30:50.353542Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); === CheckClustersList. Subcribe to ClusterTracker from [1:7519894386670665318:2624] 2025-06-25T14:30:50.872464Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519894365195827806:2157];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:30:50.872547Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; === CheckClustersList. Ok 2025-06-25T14:30:55.913800Z :TopicSplitMerge INFO: TTopicSdkTestSetup started 2025-06-25T14:30:55.939561Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:132: new create topic request 2025-06-25T14:30:55.940987Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877761, Sender [1:7519894408145501953:2684], Recipient [1:7519894369490795305:2161]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:30:55.941023Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5052: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T14:30:55.941041Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5837: Pipe server connected, at tablet: 72057594046644480 2025-06-25T14:30:55.941080Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271122432, Sender [1:7519894408145501949:2681], Recipient [1:7519894369490795305:2161]: {TEvModifySchemeTransaction txid# 281474976715672 TabletId# 72057594046644480} 2025-06-25T14:30:55.941097Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4966: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-06-25T14:30:56.007332Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/Root" OperationType: ESchemeOpCreatePersQueueGroup Creat ... Sender [0:0:0], Recipient [7:7519895194203095520:2974], Cookie: 0 2025-06-25T14:34:03.832748Z node 7 :PERSQUEUE TRACE: partition.h:584: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7519895194203095520:2974]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-25T14:34:03.832784Z node 7 :PERSQUEUE TRACE: partition.h:610: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-25T14:34:03.832809Z node 7 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037898, Partition: 4, State: StateIdle] Have 0 items to delete old stuff 2025-06-25T14:34:03.832853Z node 7 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037898, Partition: 4, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-25T14:34:03.832880Z node 7 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037898, Partition: 4, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-25T14:34:03.832901Z node 7 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037898, Partition: 4, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-25T14:34:03.836716Z node 7 :PERSQUEUE TRACE: partition.h:582: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7519895172728258417:2828], Partition 1, Sender [0:0:0], Recipient [7:7519895172728258493:2836], Cookie: 0 2025-06-25T14:34:03.836805Z node 7 :PERSQUEUE TRACE: partition.h:584: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7519895172728258493:2836]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-25T14:34:03.836838Z node 7 :PERSQUEUE TRACE: partition.h:610: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-25T14:34:03.836891Z node 7 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037897, Partition: 1, State: StateIdle] Have 0 items to delete old stuff 2025-06-25T14:34:03.836964Z node 7 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037897, Partition: 1, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-25T14:34:03.836995Z node 7 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037897, Partition: 1, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-25T14:34:03.837038Z node 7 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037897, Partition: 1, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-25T14:34:03.837099Z node 7 :PERSQUEUE TRACE: partition.h:582: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7519895108303747498:2424], Partition 0, Sender [0:0:0], Recipient [7:7519895108303747554:2428], Cookie: 0 2025-06-25T14:34:03.837137Z node 7 :PERSQUEUE TRACE: partition.h:584: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7519895108303747554:2428]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-25T14:34:03.837153Z node 7 :PERSQUEUE TRACE: partition.h:610: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-25T14:34:03.837179Z node 7 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete old stuff 2025-06-25T14:34:03.837211Z node 7 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-25T14:34:03.837228Z node 7 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-25T14:34:03.837246Z node 7 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-25T14:34:03.837290Z node 7 :PERSQUEUE TRACE: partition.h:582: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7519895172728258413:2827], Partition 2, Sender [0:0:0], Recipient [7:7519895172728258491:2834], Cookie: 0 2025-06-25T14:34:03.837324Z node 7 :PERSQUEUE TRACE: partition.h:584: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7519895172728258491:2834]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-25T14:34:03.837338Z node 7 :PERSQUEUE TRACE: partition.h:610: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-25T14:34:03.837363Z node 7 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037896, Partition: 2, State: StateIdle] Have 0 items to delete old stuff 2025-06-25T14:34:03.837395Z node 7 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037896, Partition: 2, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-25T14:34:03.837412Z node 7 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037896, Partition: 2, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-25T14:34:03.837429Z node 7 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037896, Partition: 2, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-25T14:34:03.900541Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5307: HandleHook, received event# 65538, Sender [0:0:0], Recipient [7:7519895194203095408:2957]: NActors::TEvents::TEvWakeup 2025-06-25T14:34:03.900715Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5307: HandleHook, received event# 65538, Sender [0:0:0], Recipient [7:7519895194203095409:2958]: NActors::TEvents::TEvWakeup 2025-06-25T14:34:03.934714Z node 7 :PERSQUEUE TRACE: partition.h:582: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7519895194203095409:2958], Partition 3, Sender [0:0:0], Recipient [7:7519895194203095522:2976], Cookie: 0 2025-06-25T14:34:03.934809Z node 7 :PERSQUEUE TRACE: partition.h:584: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7519895194203095522:2976]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-25T14:34:03.934848Z node 7 :PERSQUEUE TRACE: partition.h:610: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-25T14:34:03.934909Z node 7 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037899, Partition: 3, State: StateIdle] Have 0 items to delete old stuff 2025-06-25T14:34:03.934992Z node 7 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037899, Partition: 3, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-25T14:34:03.935039Z node 7 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037899, Partition: 3, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-25T14:34:03.935073Z node 7 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037899, Partition: 3, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-25T14:34:03.935124Z node 7 :PERSQUEUE TRACE: partition.h:582: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7519895194203095408:2957], Partition 4, Sender [0:0:0], Recipient [7:7519895194203095520:2974], Cookie: 0 2025-06-25T14:34:03.935157Z node 7 :PERSQUEUE TRACE: partition.h:584: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7519895194203095520:2974]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-25T14:34:03.935172Z node 7 :PERSQUEUE TRACE: partition.h:610: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-25T14:34:03.935198Z node 7 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037898, Partition: 4, State: StateIdle] Have 0 items to delete old stuff 2025-06-25T14:34:03.935228Z node 7 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037898, Partition: 4, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-25T14:34:03.935245Z node 7 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037898, Partition: 4, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-25T14:34:03.935262Z node 7 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037898, Partition: 4, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-25T14:34:03.940518Z node 7 :PERSQUEUE TRACE: partition.h:582: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7519895172728258417:2828], Partition 1, Sender [0:0:0], Recipient [7:7519895172728258493:2836], Cookie: 0 2025-06-25T14:34:03.940603Z node 7 :PERSQUEUE TRACE: partition.h:584: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7519895172728258493:2836]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-25T14:34:03.940638Z node 7 :PERSQUEUE TRACE: partition.h:610: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-25T14:34:03.940705Z node 7 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037897, Partition: 1, State: StateIdle] Have 0 items to delete old stuff 2025-06-25T14:34:03.940790Z node 7 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037897, Partition: 1, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-25T14:34:03.940823Z node 7 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037897, Partition: 1, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-25T14:34:03.940856Z node 7 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037897, Partition: 1, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-25T14:34:03.940917Z node 7 :PERSQUEUE TRACE: partition.h:582: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7519895108303747498:2424], Partition 0, Sender [0:0:0], Recipient [7:7519895108303747554:2428], Cookie: 0 2025-06-25T14:34:03.940954Z node 7 :PERSQUEUE TRACE: partition.h:584: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7519895108303747554:2428]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-25T14:34:03.940971Z node 7 :PERSQUEUE TRACE: partition.h:610: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-25T14:34:03.940999Z node 7 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete old stuff 2025-06-25T14:34:03.941036Z node 7 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-25T14:34:03.941052Z node 7 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-25T14:34:03.941073Z node 7 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-25T14:34:03.941114Z node 7 :PERSQUEUE TRACE: partition.h:582: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7519895172728258413:2827], Partition 2, Sender [0:0:0], Recipient [7:7519895172728258491:2834], Cookie: 0 2025-06-25T14:34:03.941146Z node 7 :PERSQUEUE TRACE: partition.h:584: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7519895172728258491:2834]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-25T14:34:03.941161Z node 7 :PERSQUEUE TRACE: partition.h:610: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-25T14:34:03.941184Z node 7 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037896, Partition: 2, State: StateIdle] Have 0 items to delete old stuff 2025-06-25T14:34:03.941220Z node 7 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037896, Partition: 2, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-25T14:34:03.941237Z node 7 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037896, Partition: 2, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-25T14:34:03.941255Z node 7 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037896, Partition: 2, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 >> TSchemeShardTopicSplitMergeTest::MargePartitions [GOOD] >> TFlatTableExecutor_IndexLoading::Scan_History_FlatIndex [GOOD] >> TSchemeShardTopicSplitMergeTest::MargeNotAdjacentRangePartitions >> TFlatTableExecutor_IndexLoading::Scan_History_BTreeIndex >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-CreateUser-2 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_topic_splitmerge/unittest >> TSchemeShardTopicSplitMergeTest::CreateTopicWithManyPartition [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:34:06.293306Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:34:06.293397Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:34:06.293441Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:34:06.293474Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:34:06.293518Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:34:06.293541Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:34:06.293599Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:34:06.293689Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:34:06.294458Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:34:06.294777Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:34:06.388670Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:34:06.388752Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:34:06.415136Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:34:06.415599Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:34:06.415764Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:34:06.421878Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:34:06.422221Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:34:06.422884Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:34:06.423173Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:34:06.426548Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:34:06.426719Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:34:06.427873Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:34:06.427926Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:34:06.428058Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:34:06.428108Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:34:06.428146Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:34:06.428222Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:34:06.435479Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:34:06.644997Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:34:06.645225Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:06.645410Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:34:06.645454Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:34:06.645679Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:34:06.645745Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:34:06.648023Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:34:06.648206Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:34:06.648460Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:06.648538Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:34:06.648593Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:34:06.648631Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:34:06.650507Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:06.650584Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:34:06.650690Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:34:06.652240Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:06.652287Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:06.652347Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:34:06.652409Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:34:06.655865Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:34:06.658051Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:34:06.658287Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:34:06.659369Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:34:06.659497Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:34:06.659539Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:34:06.659793Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:34:06.659839Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:34:06.659983Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:34:06.660062Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:34:06.662167Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:34:06.662213Z node 1 :FLAT_TX_SCHEMESHARD ... RN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:34:08.207069Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:34:08.207307Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:34:08.208204Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1393: TTxInit for Paths, read records: 3, at schemeshard: 72057594046678944 2025-06-25T14:34:08.208348Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:347: AttachChild: child attached as only one child to the parent, parent id: [OwnerId: 72057594046678944, LocalPathId: 1], parent name: MyRoot, child name: USER_1, child id: [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-25T14:34:08.208397Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:347: AttachChild: child attached as only one child to the parent, parent id: [OwnerId: 72057594046678944, LocalPathId: 2], parent name: USER_1, child name: Topic1, child id: [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-06-25T14:34:08.208476Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1467: TTxInit for UserAttributes, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:34:08.208550Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1493: TTxInit for UserAttributesAlterData, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:34:08.208793Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-25T14:34:08.209124Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1795: TTxInit for Tables, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:34:08.209208Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__root_data_erasure_manager.cpp:452: [RootDataErasureManager] Restore: Generation# 0, Status# 0, WakeupInterval# 604800 s, NumberDataErasureTenantsInRunning# 0 2025-06-25T14:34:08.209431Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2043: TTxInit for Columns, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:34:08.209540Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2103: TTxInit for ColumnsAlters, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:34:08.209659Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2161: TTxInit for Shards, read records: 4, at schemeshard: 72057594046678944 2025-06-25T14:34:08.209720Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-25T14:34:08.209753Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-25T14:34:08.209782Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 0 2025-06-25T14:34:08.209803Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-25T14:34:08.209907Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2247: TTxInit for TablePartitions, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:34:08.210006Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2313: TTxInit for TableShardPartitionConfigs, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:34:08.210252Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2463: TTxInit for ChannelsBinding, read records: 14, at schemeshard: 72057594046678944 2025-06-25T14:34:08.210456Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-25T14:34:08.210818Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2842: TTxInit for TableIndexes, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:34:08.210949Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2921: TTxInit for TableIndexKeys, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:34:08.211362Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3422: TTxInit for KesusInfos, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:34:08.211442Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3458: TTxInit for KesusAlters, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:34:08.211683Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3684: TTxInit for TxShards, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:34:08.211772Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3829: TTxInit for ShardToDelete, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:34:08.211878Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3846: TTxInit for BackupSettings, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:34:08.212064Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4006: TTxInit for ShardBackupStatus, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:34:08.212143Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4022: TTxInit for CompletedBackup, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:34:08.212410Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4307: TTxInit for Publications, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:34:08.212661Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4646: IndexBuild , records: 0, at schemeshard: 72057594046678944 2025-06-25T14:34:08.212735Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4702: KMeansTreeSample records: 0, at schemeshard: 72057594046678944 2025-06-25T14:34:08.212858Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4791: SnapshotTables: snapshots: 0 tables: 0, at schemeshard: 72057594046678944 2025-06-25T14:34:08.212925Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4818: SnapshotSteps: snapshots: 0, at schemeshard: 72057594046678944 2025-06-25T14:34:08.212987Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4845: LongLocks: records: 0, at schemeshard: 72057594046678944 2025-06-25T14:34:08.220852Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:34:08.224949Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:34:08.225055Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:34:08.225576Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:34:08.225644Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:34:08.225709Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:34:08.227178Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594046678944 is [2:644:2562] sender: [2:705:2058] recipient: [2:15:2062] 2025-06-25T14:34:08.301368Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_1/Topic1" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-25T14:34:08.301694Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_1/Topic1" took 375us result status StatusSuccess 2025-06-25T14:34:08.302440Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_1/Topic1" PathDescription { Self { Name: "Topic1" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 104 CreateStep: 150 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 1 } ChildrenExist: false BalancerTabletID: 72075186233409549 } PersQueueGroup { Name: "Topic1" PathId: 3 TotalGroupCount: 3 PartitionPerTablet: 7 PQTabletConfig { PartitionConfig { LifetimeSeconds: 3600 WriteSpeedInBytesPerSecond: 1024 } YdbDatabasePath: "/MyRoot" PartitionStrategy { PartitionStrategyType: CAN_SPLIT_AND_MERGE } } Partitions { PartitionId: 0 TabletId: 72075186233409548 KeyRange { ToBound: "UUUUUUUUUUUUUUUT" } Status: Active } Partitions { PartitionId: 1 TabletId: 72075186233409548 KeyRange { FromBound: "UUUUUUUUUUUUUUUT" ToBound: "\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\251" } Status: Active } Partitions { PartitionId: 2 TabletId: 72075186233409548 KeyRange { FromBound: "\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\251" } Status: Active } AlterVersion: 1 BalancerTabletID: 72075186233409549 NextPartitionId: 3 Allocate { Name: "Topic1" AlterVersion: 1 TotalGroupCount: 3 NextPartitionId: 3 PartitionPerTablet: 7 PQTabletConfig { PartitionConfig { LifetimeSeconds: 3600 WriteSpeedInBytesPerSecond: 1024 } YdbDatabasePath: "/MyRoot" PartitionStrategy { PartitionStrategyType: CAN_SPLIT_AND_MERGE } } Partitions { PartitionId: 0 GroupId: 1 TabletId: 72075186233409548 OwnerId: 72057594046678944 ShardId: 3 Status: Active KeyRange { ToBound: "UUUUUUUUUUUUUUUT" } } Partitions { PartitionId: 1 GroupId: 2 TabletId: 72075186233409548 OwnerId: 72057594046678944 ShardId: 3 Status: Active KeyRange { FromBound: "UUUUUUUUUUUUUUUT" ToBound: "\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\251" } } Partitions { PartitionId: 2 GroupId: 3 TabletId: 72075186233409548 OwnerId: 72057594046678944 ShardId: 3 Status: Active KeyRange { FromBound: "\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\251" } } BalancerTabletID: 72075186233409549 BalancerOwnerId: 72057594046678944 BalancerShardId: 4 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 3 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TSchemeShardTopicSplitMergeTest::SplitWithWrongPartition [GOOD] >> TSchemeShardTopicSplitMergeTest::SplitWithWrongBoundary [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-26 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-27 >> DataShardVolatile::UpsertNoLocksArbiterRestart-UseSink [GOOD] >> DataShardVolatile::UpsertBrokenLockArbiterRestart+UseSink >> TSchemeShardTopicSplitMergeTest::MargeUnorderedPartitions [GOOD] >> TSchemeShardTopicSplitMergeTest::MargePartitions2 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_upload_rows/unittest >> TTxDataShardUploadRows::TestUploadRowsLocks [GOOD] Test command err: 2025-06-25T14:33:50.053492Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:33:50.053638Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:33:50.053759Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000c76/r3tmp/tmptT3KM9/pdisk_1.dat 2025-06-25T14:33:50.490097Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T14:33:50.493174Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:33:50.573186Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:33:50.578252Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750862025277677 != 1750862025277681 2025-06-25T14:33:50.626303Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:33:50.626459Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:33:50.640697Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:33:50.732131Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:33:50.804451Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:641:2540] 2025-06-25T14:33:50.804689Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T14:33:50.869208Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T14:33:50.869441Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T14:33:50.871098Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-25T14:33:50.871168Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-25T14:33:50.871244Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-25T14:33:50.871570Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T14:33:50.871946Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T14:33:50.872159Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:677:2540] in generation 1 2025-06-25T14:33:50.873684Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037889 actor [1:647:2542] 2025-06-25T14:33:50.873877Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T14:33:50.882762Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T14:33:50.883030Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T14:33:50.884383Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037889 2025-06-25T14:33:50.884457Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037889 2025-06-25T14:33:50.884507Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037889 2025-06-25T14:33:50.884765Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T14:33:50.884970Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T14:33:50.885023Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037889 persisting started state actor id [1:685:2542] in generation 1 2025-06-25T14:33:50.887654Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037891 actor [1:651:2544] 2025-06-25T14:33:50.887883Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T14:33:50.896880Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037890 actor [1:653:2546] 2025-06-25T14:33:50.897104Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T14:33:50.905654Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T14:33:50.905854Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T14:33:50.907260Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037891 2025-06-25T14:33:50.907331Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037891 2025-06-25T14:33:50.907393Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037891 2025-06-25T14:33:50.907706Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T14:33:50.907876Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T14:33:50.907932Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037891 persisting started state actor id [1:712:2544] in generation 1 2025-06-25T14:33:50.908246Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T14:33:50.908366Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T14:33:50.909565Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037890 2025-06-25T14:33:50.909633Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037890 2025-06-25T14:33:50.909689Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037890 2025-06-25T14:33:50.909949Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T14:33:50.910022Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T14:33:50.910071Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037890 persisting started state actor id [1:713:2546] in generation 1 2025-06-25T14:33:50.921166Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T14:33:50.954775Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-25T14:33:50.954994Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T14:33:50.955112Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:718:2581] 2025-06-25T14:33:50.955164Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T14:33:50.955208Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-25T14:33:50.955264Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:33:50.955368Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T14:33:50.955402Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037889 2025-06-25T14:33:50.955472Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037889 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T14:33:50.955518Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037889, actorId: [1:719:2582] 2025-06-25T14:33:50.955545Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037889 2025-06-25T14:33:50.955566Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037889, state: WaitScheme 2025-06-25T14:33:50.955581Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-25T14:33:50.956030Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T14:33:50.956071Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037891 2025-06-25T14:33:50.956124Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037891 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T14:33:50.956177Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037891, actorId: [1:720:2583] 2025-06-25T14:33:50.956200Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037891 2025-06-25T14:33:50.956219Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037891, state: WaitScheme 2025-06-25T14:33:50.956236Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037891 2025-06-25T14:33:50.956299Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T14:33:50.956346Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037890 2025-06-25T14:33:50.956420Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037890 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T14:33:50.956474Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037890, actorId: [1:721:2584] 2025-06-25T14:33:50.956493Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037890 2025-06-25T14:33:50.956511Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037890, state: WaitScheme 2025-06-25T14:33:50.956529Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037890 2025-06-25T14:33:50.956760Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-25T14:33:50.956875Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets t ... ess_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:34:06.143148Z node 3 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-06-25T14:34:06.143585Z node 3 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-25T14:34:06.143969Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:34:06.152777Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-06-25T14:34:06.152851Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:34:06.153266Z node 3 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-06-25T14:34:06.153346Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:34:06.154600Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:34:06.154650Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T14:34:06.154696Z node 3 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-25T14:34:06.154760Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [3:373:2367], exec latency: 0 ms, propose latency: 0 ms 2025-06-25T14:34:06.154812Z node 3 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-25T14:34:06.155178Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:34:06.156979Z node 3 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T14:34:06.158890Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-25T14:34:06.158967Z node 3 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-25T14:34:06.159400Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-25T14:34:06.196117Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:694:2576], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:34:06.196227Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:704:2581], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:34:06.196320Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:34:06.201559Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:34:06.208561Z node 3 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T14:34:06.260943Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:34:06.394315Z node 3 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T14:34:06.406560Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:708:2584], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:34:06.617632Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:778:2623] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:34:07.094789Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715660. Ctx: { TraceId: 01jykr58zj21k3vfswjw335h29, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YWVhYWNiMDAtNzUwZDZhMGEtYzY1NWNkYjUtNzdhOTAyMGU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:34:07.100295Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [3:809:2640], serverId# [3:810:2641], sessionId# [0:0:0] 2025-06-25T14:34:07.100768Z node 3 :TX_DATASHARD DEBUG: execute_write_unit.cpp:251: Executing write operation for [0:2] at 72075186224037888 2025-06-25T14:34:07.100930Z node 3 :TX_DATASHARD DEBUG: execute_write_unit.cpp:416: Executed write operation for [0:2] at 72075186224037888, row count=3 2025-06-25T14:34:07.111988Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:34:07.478247Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jykr59xf8rvvqnayk8fbw5nh, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NjIzYjg1ZmEtNWE0Mzg3ZjQtYWYzYmIzZmUtYTE5ZjFhYjI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:34:07.484439Z node 3 :TX_DATASHARD DEBUG: datashard__read_iterator.cpp:2427: 72075186224037888 Acquired lock# 281474976715661, counter# 0 for [OwnerId: 72057594046644480, LocalPathId: 2] { items { uint32_value: 300 } } 2025-06-25T14:34:07.494211Z node 3 :TX_DATASHARD INFO: datashard__op_rows.cpp:26: TTxDirectBase(36) Execute: at tablet# 72075186224037888 2025-06-25T14:34:07.509035Z node 3 :TX_DATASHARD INFO: datashard__op_rows.cpp:80: TTxDirectBase(36) Complete: at tablet# 72075186224037888 2025-06-25T14:34:07.509130Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:34:07.509212Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:2560: Waiting for PlanStep# 1501 from mediator time cast 2025-06-25T14:34:07.509888Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3780: Notified by mediator time cast with PlanStep# 1501 at tablet 72075186224037888 2025-06-25T14:34:07.509960Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:34:07.629336Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jykr5a8pedr9hfyanhsy1r97, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NjIzYjg1ZmEtNWE0Mzg3ZjQtYWYzYmIzZmUtYTE5ZjFhYjI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:34:07.632134Z node 3 :TX_DATASHARD DEBUG: execute_write_unit.cpp:251: Executing write operation for [0:5] at 72075186224037888 2025-06-25T14:34:07.632295Z node 3 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_LOCKS_BROKEN;details=Operation is aborting because locks are not valid;tx_id=5; 2025-06-25T14:34:07.654196Z node 3 :TX_DATASHARD INFO: datashard_write_operation.cpp:707: Write transaction 5 at 72075186224037888 has an error: Operation is aborting because locks are not valid 2025-06-25T14:34:07.654486Z node 3 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:226: Prepare transaction failed. txid 5 at tablet 72075186224037888 errors: Status: STATUS_LOCKS_BROKEN Issues: { message: "Operation is aborting because locks are not valid" issue_code: 2001 severity: 1 } 2025-06-25T14:34:07.654696Z node 3 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:168: Errors while proposing transaction txid 5 at tablet 72075186224037888 Status: STATUS_LOCKS_BROKEN Issues: { message: "Operation is aborting because locks are not valid" issue_code: 2001 severity: 1 } 2025-06-25T14:34:07.654796Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:34:07.655066Z node 3 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:802: SelfId: [3:867:2646], Table: `/Root/table-1` ([72057594046644480:2:1]), SessionActorId: [3:816:2646]Got LOCKS BROKEN for table `/Root/table-1`. ShardID=72075186224037888, Sink=[3:867:2646].{
: Error: Operation is aborting because locks are not valid, code: 2001 } 2025-06-25T14:34:07.655602Z node 3 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:3004: SelfId: [3:860:2646], SessionActorId: [3:816:2646], statusCode=ABORTED. Issue=
: Error: Transaction locks invalidated. Table: `/Root/table-1`., code: 2001
: Error: Operation is aborting because locks are not valid, code: 2001 . sessionActorId=[3:816:2646]. isRollback=0 2025-06-25T14:34:07.656002Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:1895: SessionId: ydb://session/3?node_id=3&id=NjIzYjg1ZmEtNWE0Mzg3ZjQtYWYzYmIzZmUtYTE5ZjFhYjI=, ActorId: [3:816:2646], ActorState: ExecuteState, TraceId: 01jykr5a8pedr9hfyanhsy1r97, got TEvKqpBuffer::TEvError in ExecuteState, status: ABORTED send to: [3:861:2646] from: [3:860:2646] 2025-06-25T14:34:07.656288Z node 3 :TX_DATASHARD DEBUG: execute_write_unit.cpp:251: Executing write operation for [0:6] at 72075186224037888 2025-06-25T14:34:07.660474Z node 3 :TX_DATASHARD DEBUG: execute_write_unit.cpp:420: Skip empty write operation for [0:6] at 72075186224037888 2025-06-25T14:34:07.660751Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:34:07.660947Z node 3 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [3:861:2646] TxId: 281474976715662. Ctx: { TraceId: 01jykr5a8pedr9hfyanhsy1r97, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NjIzYjg1ZmEtNWE0Mzg3ZjQtYWYzYmIzZmUtYTE5ZjFhYjI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Transaction locks invalidated. Table: `/Root/table-1`., code: 2001 subissue: {
: Error: Operation is aborting because locks are not valid, code: 2001 } } 2025-06-25T14:34:07.661305Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=3&id=NjIzYjg1ZmEtNWE0Mzg3ZjQtYWYzYmIzZmUtYTE5ZjFhYjI=, ActorId: [3:816:2646], ActorState: ExecuteState, TraceId: 01jykr5a8pedr9hfyanhsy1r97, Create QueryResponse for error on request, msg: ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/ut_with_sdk/unittest >> CommitOffset::Commit_FromSession_ToNewChild_WithoutCommitToParent [GOOD] Test command err: 2025-06-25T14:30:45.215527Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519894364891566033:2231];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:30:45.215842Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:30:45.538582Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001901/r3tmp/tmpnnm5CS/pdisk_1.dat 2025-06-25T14:30:45.889609Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:30:45.889688Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:30:45.913765Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:30:45.920703Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519894364891565828:2080] 1750861845124839 != 1750861845124842 2025-06-25T14:30:45.948071Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 21645, node 1 2025-06-25T14:30:46.148756Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:30:46.220868Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/yft8/001901/r3tmp/yandexv2qwbA.tmp 2025-06-25T14:30:46.220904Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/yft8/001901/r3tmp/yandexv2qwbA.tmp 2025-06-25T14:30:46.221072Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/yft8/001901/r3tmp/yandexv2qwbA.tmp 2025-06-25T14:30:46.221200Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:30:46.305274Z INFO: TTestServer started on Port 18325 GrpcPort 21645 TClient is connected to server localhost:18325 PQClient connected to localhost:21645 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:30:46.748243Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:30:46.774190Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:30:46.782161Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-25T14:30:46.788287Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:30:46.964933Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710660, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:30:46.988383Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710661, at schemeshard: 72057594046644480 2025-06-25T14:30:49.455270Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894382071435796:2301], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:49.455391Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:49.457991Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894382071435820:2304], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:49.462394Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:30:49.475161Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519894382071435825:2305], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710662 completed, doublechecking } 2025-06-25T14:30:49.536586Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519894382071435889:2446] txid# 281474976710663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:30:49.857747Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:49.867729Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519894382071435897:2311], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:30:49.874247Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=OWNjNWNjNmUtOTEwZjMzODItYzNkZjI4NDAtZmM1ZjFkYzM=, ActorId: [1:7519894382071435793:2299], ActorState: ExecuteState, TraceId: 01jykqz8tvaj81be5g293jm0a2, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:30:49.880456Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-25T14:30:49.921144Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:50.092032Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:50.196522Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519894364891566033:2231];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:30:50.196643Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); === CheckClustersList. Subcribe to ClusterTracker from [1:7519894386366403490:2624] === CheckClustersList. Ok 2025-06-25T14:30:56.824887Z :TopicSplitMerge INFO: TTopicSdkTestSetup started 2025-06-25T14:30:56.846512Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:132: new create topic request 2025-06-25T14:30:56.847694Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877761, Sender [1:7519894412136207464:2702], Recipient [1:7519894364891566204:2177]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:30:56.847719Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5052: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T14:30:56.847752Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5837: Pipe server connected, at tablet: 72057594046644480 2025-06-25T14:30:56.847792Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271122432, Sender [1:7519894412136207460:2699], Recipient [1:7519894364891566204:2177]: {TEvModifySchemeTransaction txid# 281474976710673 TabletId# 72057594046644480} 2025-06-25T14:30:56.847811Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4966: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-06-25T14: ... PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-25T14:34:03.957389Z node 8 :PERSQUEUE TRACE: partition.h:582: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [8:7519895188737873632:2823], Partition 1, Sender [0:0:0], Recipient [8:7519895188737873708:2831], Cookie: 0 2025-06-25T14:34:03.957422Z node 8 :PERSQUEUE TRACE: partition.h:584: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [8:7519895188737873708:2831]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-25T14:34:03.957436Z node 8 :PERSQUEUE TRACE: partition.h:610: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-25T14:34:03.957459Z node 8 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037897, Partition: 1, State: StateIdle] Have 0 items to delete old stuff 2025-06-25T14:34:03.957492Z node 8 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037897, Partition: 1, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-25T14:34:03.957508Z node 8 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037897, Partition: 1, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-25T14:34:03.957525Z node 8 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037897, Partition: 1, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-25T14:34:03.957566Z node 8 :PERSQUEUE TRACE: partition.h:582: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [8:7519895188737873631:2822], Partition 2, Sender [0:0:0], Recipient [8:7519895188737873704:2829], Cookie: 0 2025-06-25T14:34:03.957597Z node 8 :PERSQUEUE TRACE: partition.h:584: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [8:7519895188737873704:2829]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-25T14:34:03.957611Z node 8 :PERSQUEUE TRACE: partition.h:610: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-25T14:34:03.957633Z node 8 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037896, Partition: 2, State: StateIdle] Have 0 items to delete old stuff 2025-06-25T14:34:03.957661Z node 8 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037896, Partition: 2, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-25T14:34:03.957675Z node 8 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037896, Partition: 2, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-25T14:34:03.957691Z node 8 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037896, Partition: 2, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-25T14:34:04.000776Z node 8 :PERSQUEUE TRACE: partition.h:582: StateIdle event# 271188544 (NKikimr::NPQ::NReadQuoterEvents::TEvQuotaCountersUpdated), Tablet [8:7519895188737873631:2822], Partition 2, Sender [8:7519895188737873713:2835], Recipient [8:7519895188737873704:2829], Cookie: 0 2025-06-25T14:34:04.000851Z node 8 :PERSQUEUE TRACE: partition.h:584: StateIdle, received event# 271188544, Sender [8:7519895188737873713:2835], Recipient [8:7519895188737873704:2829]: NKikimr::NPQ::NReadQuoterEvents::TEvQuotaCountersUpdated 2025-06-25T14:34:04.000880Z node 8 :PERSQUEUE TRACE: partition.h:630: StateIdle, processing event NReadQuoterEvents::TEvQuotaCountersUpdated 2025-06-25T14:34:04.000927Z node 8 :PERSQUEUE TRACE: partition.h:582: StateIdle event# 271188544 (NKikimr::NPQ::NReadQuoterEvents::TEvQuotaCountersUpdated), Tablet [8:7519895188737873632:2823], Partition 1, Sender [8:7519895188737873721:2836], Recipient [8:7519895188737873708:2831], Cookie: 0 2025-06-25T14:34:04.000971Z node 8 :PERSQUEUE TRACE: partition.h:584: StateIdle, received event# 271188544, Sender [8:7519895188737873721:2836], Recipient [8:7519895188737873708:2831]: NKikimr::NPQ::NReadQuoterEvents::TEvQuotaCountersUpdated 2025-06-25T14:34:04.000983Z node 8 :PERSQUEUE TRACE: partition.h:630: StateIdle, processing event NReadQuoterEvents::TEvQuotaCountersUpdated 2025-06-25T14:34:04.057162Z node 8 :PERSQUEUE TRACE: partition.h:582: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [8:7519895137198264743:2428], Partition 0, Sender [0:0:0], Recipient [8:7519895137198264784:2431], Cookie: 0 2025-06-25T14:34:04.057261Z node 8 :PERSQUEUE TRACE: partition.h:584: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [8:7519895137198264784:2431]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-25T14:34:04.057301Z node 8 :PERSQUEUE TRACE: partition.h:610: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-25T14:34:04.057366Z node 8 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete old stuff 2025-06-25T14:34:04.057463Z node 8 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-25T14:34:04.057502Z node 8 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-25T14:34:04.057546Z node 8 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-25T14:34:04.057614Z node 8 :PERSQUEUE TRACE: partition.h:582: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [8:7519895188737873632:2823], Partition 1, Sender [0:0:0], Recipient [8:7519895188737873708:2831], Cookie: 0 2025-06-25T14:34:04.057652Z node 8 :PERSQUEUE TRACE: partition.h:584: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [8:7519895188737873708:2831]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-25T14:34:04.057667Z node 8 :PERSQUEUE TRACE: partition.h:610: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-25T14:34:04.057702Z node 8 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037897, Partition: 1, State: StateIdle] Have 0 items to delete old stuff 2025-06-25T14:34:04.057739Z node 8 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037897, Partition: 1, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-25T14:34:04.057758Z node 8 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037897, Partition: 1, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-25T14:34:04.057780Z node 8 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037897, Partition: 1, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-25T14:34:04.057829Z node 8 :PERSQUEUE TRACE: partition.h:582: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [8:7519895188737873631:2822], Partition 2, Sender [0:0:0], Recipient [8:7519895188737873704:2829], Cookie: 0 2025-06-25T14:34:04.057866Z node 8 :PERSQUEUE TRACE: partition.h:584: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [8:7519895188737873704:2829]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-25T14:34:04.057882Z node 8 :PERSQUEUE TRACE: partition.h:610: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-25T14:34:04.057910Z node 8 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037896, Partition: 2, State: StateIdle] Have 0 items to delete old stuff 2025-06-25T14:34:04.057943Z node 8 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037896, Partition: 2, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-25T14:34:04.057960Z node 8 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037896, Partition: 2, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-25T14:34:04.057979Z node 8 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037896, Partition: 2, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-25T14:34:04.158035Z node 8 :PERSQUEUE TRACE: partition.h:582: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [8:7519895137198264743:2428], Partition 0, Sender [0:0:0], Recipient [8:7519895137198264784:2431], Cookie: 0 2025-06-25T14:34:04.158126Z node 8 :PERSQUEUE TRACE: partition.h:584: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [8:7519895137198264784:2431]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-25T14:34:04.158159Z node 8 :PERSQUEUE TRACE: partition.h:610: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-25T14:34:04.158211Z node 8 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete old stuff 2025-06-25T14:34:04.158296Z node 8 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-25T14:34:04.158330Z node 8 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-25T14:34:04.158368Z node 8 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-25T14:34:04.158422Z node 8 :PERSQUEUE TRACE: partition.h:582: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [8:7519895188737873632:2823], Partition 1, Sender [0:0:0], Recipient [8:7519895188737873708:2831], Cookie: 0 2025-06-25T14:34:04.158460Z node 8 :PERSQUEUE TRACE: partition.h:584: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [8:7519895188737873708:2831]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-25T14:34:04.158477Z node 8 :PERSQUEUE TRACE: partition.h:610: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-25T14:34:04.158504Z node 8 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037897, Partition: 1, State: StateIdle] Have 0 items to delete old stuff 2025-06-25T14:34:04.158542Z node 8 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037897, Partition: 1, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-25T14:34:04.158560Z node 8 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037897, Partition: 1, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-25T14:34:04.158581Z node 8 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037897, Partition: 1, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-25T14:34:04.158653Z node 8 :PERSQUEUE TRACE: partition.h:582: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [8:7519895188737873631:2822], Partition 2, Sender [0:0:0], Recipient [8:7519895188737873704:2829], Cookie: 0 2025-06-25T14:34:04.158694Z node 8 :PERSQUEUE TRACE: partition.h:584: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [8:7519895188737873704:2829]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-25T14:34:04.158709Z node 8 :PERSQUEUE TRACE: partition.h:610: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-25T14:34:04.158739Z node 8 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037896, Partition: 2, State: StateIdle] Have 0 items to delete old stuff 2025-06-25T14:34:04.158778Z node 8 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037896, Partition: 2, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-25T14:34:04.158794Z node 8 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037896, Partition: 2, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-25T14:34:04.158813Z node 8 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037896, Partition: 2, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 >> KqpRm::SingleSnapshotByExchanger >> TSchemeShardTopicSplitMergeTest::DisableSplitMerge [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_upload_rows/unittest >> TTxDataShardUploadRows::BulkUpsertDuringAddIndexRaceCorruption [GOOD] Test command err: 2025-06-25T14:33:50.183365Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:33:50.183535Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:33:50.183613Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000c6b/r3tmp/tmpvDWaEr/pdisk_1.dat 2025-06-25T14:33:50.564152Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T14:33:50.567543Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:33:50.606362Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:33:50.611712Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750862025371790 != 1750862025371794 2025-06-25T14:33:50.659759Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:33:50.659932Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:33:50.671664Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:33:50.759205Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:33:50.810044Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:638:2539] 2025-06-25T14:33:50.810339Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T14:33:50.857682Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T14:33:50.857849Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T14:33:50.859702Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-25T14:33:50.859795Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-25T14:33:50.859885Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-25T14:33:50.860337Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T14:33:50.861536Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T14:33:50.861619Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:665:2539] in generation 1 2025-06-25T14:33:50.862349Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037889 actor [1:642:2541] 2025-06-25T14:33:50.862583Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T14:33:50.871746Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T14:33:50.871934Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T14:33:50.873490Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037889 2025-06-25T14:33:50.873582Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037889 2025-06-25T14:33:50.873653Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037889 2025-06-25T14:33:50.873960Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T14:33:50.874085Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T14:33:50.874145Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037889 persisting started state actor id [1:672:2541] in generation 1 2025-06-25T14:33:50.885078Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T14:33:50.915630Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-25T14:33:50.915897Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T14:33:50.916085Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:675:2560] 2025-06-25T14:33:50.916128Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T14:33:50.916166Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-25T14:33:50.916228Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:33:50.916641Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T14:33:50.916693Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037889 2025-06-25T14:33:50.916757Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037889 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T14:33:50.916814Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037889, actorId: [1:676:2561] 2025-06-25T14:33:50.916858Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037889 2025-06-25T14:33:50.916884Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037889, state: WaitScheme 2025-06-25T14:33:50.916910Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-25T14:33:50.917362Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-25T14:33:50.917510Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-25T14:33:50.917690Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:33:50.917745Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:33:50.917797Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-25T14:33:50.917839Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:33:50.917889Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037889 2025-06-25T14:33:50.917948Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037889 2025-06-25T14:33:50.918405Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:631:2535], serverId# [1:649:2545], sessionId# [0:0:0] 2025-06-25T14:33:50.918466Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2025-06-25T14:33:50.918502Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:33:50.918557Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037889 TxInFly 0 2025-06-25T14:33:50.918601Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-06-25T14:33:50.918764Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T14:33:50.919036Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-25T14:33:50.919161Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-25T14:33:50.919678Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [1:632:2536], serverId# [1:657:2551], sessionId# [0:0:0] 2025-06-25T14:33:50.919896Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037889 2025-06-25T14:33:50.920081Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037889 txId 281474976715657 ssId 72057594046644480 seqNo 2:2 2025-06-25T14:33:50.920154Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037889 2025-06-25T14:33:50.922205Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T14:33:50.922328Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2025-06-25T14:33:50.934325Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-25T14:33:50.934446Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-25T14:33:50.935064Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037889 2025-06-25T14:33:50.935117Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037889 not sending time cast registration request in state WaitScheme 2025-06-25T14:33:51.107401Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [1:694:2573], serverId# [1:697:2576], sessionId# [0:0:0] 2025-06-25T14:33:51.107578Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:695:2574], serverId# [1:698:2577], sessionId# [0:0:0] 2025-06-25T14:33:51.125188Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1000 at tablet 72075186224037889 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 10 ... t finished with status SCHEME_ERROR 2025-06-25T14:34:07.780184Z node 3 :TX_PROXY DEBUG: proxy_impl.cpp:353: actor# [3:61:2108] Handle TEvExecuteKqpTransaction 2025-06-25T14:34:07.780262Z node 3 :TX_PROXY DEBUG: proxy_impl.cpp:342: actor# [3:61:2108] TxId# 281474976715662 ProcessProposeKqpTransaction 2025-06-25T14:34:07.781221Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jykr5aa9csfcbfx0qbzk3wc6, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=ZjZhMzgwZjctN2I4Zjc4NTAtNGVjZjQ3OWEtN2ExMzYzYg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:34:07.784272Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553215, Sender [3:1063:2856], Recipient [3:628:2532]: NKikimrTxDataShard.TEvRead ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 3 } Columns: 1 Columns: 2 ResultFormat: FORMAT_CELLVEC MaxRows: 1001 MaxBytes: 5242880 Reverse: false TotalRowsLimit: 1001 RangesSize: 1 2025-06-25T14:34:07.784471Z node 3 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2452: TTxReadViaPipeline execute: at tablet# 72075186224037888, FollowerId 0 2025-06-25T14:34:07.784531Z node 3 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 72075186224037888 CompleteEdge# v8000/0 IncompleteEdge# v{min} UnprotectedReadEdge# v{min} ImmediateWriteEdge# v8000/18446744073709551615 ImmediateWriteEdgeReplied# v8000/18446744073709551615 2025-06-25T14:34:07.784578Z node 3 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2555: 72075186224037888 changed HEAD read to non-repeatable v8000/18446744073709551615 2025-06-25T14:34:07.784669Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:6] at 72075186224037888 on unit CheckRead 2025-06-25T14:34:07.784773Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:6] at 72075186224037888 is Executed 2025-06-25T14:34:07.784812Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:6] at 72075186224037888 executing on unit CheckRead 2025-06-25T14:34:07.784857Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:6] at 72075186224037888 to execution unit BuildAndWaitDependencies 2025-06-25T14:34:07.784906Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:6] at 72075186224037888 on unit BuildAndWaitDependencies 2025-06-25T14:34:07.784957Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:6] at 72075186224037888 2025-06-25T14:34:07.785027Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:6] at 72075186224037888 is Executed 2025-06-25T14:34:07.785063Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:6] at 72075186224037888 executing on unit BuildAndWaitDependencies 2025-06-25T14:34:07.785087Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:6] at 72075186224037888 to execution unit ExecuteRead 2025-06-25T14:34:07.785111Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:6] at 72075186224037888 on unit ExecuteRead 2025-06-25T14:34:07.785240Z node 3 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1578: 72075186224037888 Execute read# 1, request: { ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 3 } Columns: 1 Columns: 2 ResultFormat: FORMAT_CELLVEC MaxRows: 1001 MaxBytes: 5242880 Reverse: false TotalRowsLimit: 1001 } 2025-06-25T14:34:07.785534Z node 3 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2163: 72075186224037888 Complete read# {[3:1063:2856], 0} after executionsCount# 1 2025-06-25T14:34:07.785588Z node 3 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2137: 72075186224037888 read iterator# {[3:1063:2856], 0} sends rowCount# 2, bytes# 64, quota rows left# 999, quota bytes left# 5242816, hasUnreadQueries# 0, total queries# 1, firstUnprocessed# 0 2025-06-25T14:34:07.785680Z node 3 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2188: 72075186224037888 read iterator# {[3:1063:2856], 0} finished in read 2025-06-25T14:34:07.785812Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:6] at 72075186224037888 is Executed 2025-06-25T14:34:07.785837Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:6] at 72075186224037888 executing on unit ExecuteRead 2025-06-25T14:34:07.785861Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:6] at 72075186224037888 to execution unit CompletedOperations 2025-06-25T14:34:07.785884Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:6] at 72075186224037888 on unit CompletedOperations 2025-06-25T14:34:07.785931Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:6] at 72075186224037888 is Executed 2025-06-25T14:34:07.785952Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:6] at 72075186224037888 executing on unit CompletedOperations 2025-06-25T14:34:07.785986Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:6] at 72075186224037888 has finished 2025-06-25T14:34:07.786037Z node 3 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2687: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037888 2025-06-25T14:34:07.786153Z node 3 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2736: TTxReadViaPipeline(69) Complete: at tablet# 72075186224037888 2025-06-25T14:34:07.787318Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553219, Sender [3:1063:2856], Recipient [3:628:2532]: NKikimrTxDataShard.TEvReadCancel ReadId: 0 2025-06-25T14:34:07.787381Z node 3 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3409: 72075186224037888 ReadCancel: { ReadId: 0 } { items { uint32_value: 1 } items { uint32_value: 2 } }, { items { uint32_value: 3 } items { uint32_value: 4 } } 2025-06-25T14:34:07.949153Z node 3 :TX_PROXY DEBUG: proxy_impl.cpp:353: actor# [3:61:2108] Handle TEvExecuteKqpTransaction 2025-06-25T14:34:07.949240Z node 3 :TX_PROXY DEBUG: proxy_impl.cpp:342: actor# [3:61:2108] TxId# 281474976715663 ProcessProposeKqpTransaction 2025-06-25T14:34:07.950104Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715663. Ctx: { TraceId: 01jykr5ahh7cg4ws08219hx1j3, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YjY2MTIyNTktYTk1YTkzMmEtOTkwYzYxNDktMjA3NDJkMzI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:34:07.953976Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553215, Sender [3:1093:2880], Recipient [3:864:2691]: NKikimrTxDataShard.TEvRead ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 8 SchemaVersion: 2 } Columns: 2 Columns: 1 ResultFormat: FORMAT_CELLVEC MaxRows: 32767 MaxBytes: 5242880 Reverse: false RangesSize: 1 2025-06-25T14:34:07.954162Z node 3 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2452: TTxReadViaPipeline execute: at tablet# 72075186224037889, FollowerId 0 2025-06-25T14:34:07.954220Z node 3 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 72075186224037889 CompleteEdge# v6000/281474976710759 IncompleteEdge# v{min} UnprotectedReadEdge# v{min} ImmediateWriteEdge# v5000/18446744073709551615 ImmediateWriteEdgeReplied# v5000/18446744073709551615 2025-06-25T14:34:07.954264Z node 3 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2555: 72075186224037889 changed HEAD read to non-repeatable v8000/18446744073709551615 2025-06-25T14:34:07.954319Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:4] at 72075186224037889 on unit CheckRead 2025-06-25T14:34:07.954460Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:4] at 72075186224037889 is Executed 2025-06-25T14:34:07.954518Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:4] at 72075186224037889 executing on unit CheckRead 2025-06-25T14:34:07.954570Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:4] at 72075186224037889 to execution unit BuildAndWaitDependencies 2025-06-25T14:34:07.954611Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:4] at 72075186224037889 on unit BuildAndWaitDependencies 2025-06-25T14:34:07.954660Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:4] at 72075186224037889 2025-06-25T14:34:07.954704Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:4] at 72075186224037889 is Executed 2025-06-25T14:34:07.954727Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:4] at 72075186224037889 executing on unit BuildAndWaitDependencies 2025-06-25T14:34:07.954747Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:4] at 72075186224037889 to execution unit ExecuteRead 2025-06-25T14:34:07.954771Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:4] at 72075186224037889 on unit ExecuteRead 2025-06-25T14:34:07.954871Z node 3 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1578: 72075186224037889 Execute read# 1, request: { ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 8 SchemaVersion: 2 } Columns: 2 Columns: 1 ResultFormat: FORMAT_CELLVEC MaxRows: 32767 MaxBytes: 5242880 Reverse: false } 2025-06-25T14:34:07.955193Z node 3 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2163: 72075186224037889 Complete read# {[3:1093:2880], 0} after executionsCount# 1 2025-06-25T14:34:07.955247Z node 3 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2137: 72075186224037889 read iterator# {[3:1093:2880], 0} sends rowCount# 2, bytes# 64, quota rows left# 32765, quota bytes left# 5242816, hasUnreadQueries# 0, total queries# 1, firstUnprocessed# 0 2025-06-25T14:34:07.955349Z node 3 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2188: 72075186224037889 read iterator# {[3:1093:2880], 0} finished in read 2025-06-25T14:34:07.955418Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:4] at 72075186224037889 is Executed 2025-06-25T14:34:07.955442Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:4] at 72075186224037889 executing on unit ExecuteRead 2025-06-25T14:34:07.955464Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:4] at 72075186224037889 to execution unit CompletedOperations 2025-06-25T14:34:07.955488Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:4] at 72075186224037889 on unit CompletedOperations 2025-06-25T14:34:07.955528Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:4] at 72075186224037889 is Executed 2025-06-25T14:34:07.955548Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:4] at 72075186224037889 executing on unit CompletedOperations 2025-06-25T14:34:07.955575Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:4] at 72075186224037889 has finished 2025-06-25T14:34:07.955611Z node 3 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2687: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037889 2025-06-25T14:34:07.955704Z node 3 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2736: TTxReadViaPipeline(69) Complete: at tablet# 72075186224037889 2025-06-25T14:34:07.957013Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553219, Sender [3:1093:2880], Recipient [3:864:2691]: NKikimrTxDataShard.TEvReadCancel ReadId: 0 2025-06-25T14:34:07.957083Z node 3 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3409: 72075186224037889 ReadCancel: { ReadId: 0 } { items { uint32_value: 1 } items { uint32_value: 2 } }, { items { uint32_value: 3 } items { uint32_value: 4 } } >> CommitOffset::Commit_WithSession_ToPastParentPartition [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_topic_splitmerge/unittest >> TSchemeShardTopicSplitMergeTest::SplitWithWrongPartition [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:34:08.008189Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:34:08.008280Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:34:08.012447Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:34:08.012504Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:34:08.012548Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:34:08.012577Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:34:08.012649Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:34:08.012736Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:34:08.013477Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:34:08.013838Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:34:08.118440Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:34:08.118501Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:34:08.198936Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:34:08.199445Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:34:08.199638Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:34:08.206234Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:34:08.206577Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:34:08.207208Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:34:08.207471Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:34:08.211382Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:34:08.211542Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:34:08.212661Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:34:08.212721Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:34:08.212867Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:34:08.212935Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:34:08.212975Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:34:08.213062Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:34:08.220271Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:34:08.474575Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:34:08.474791Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:08.479734Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:34:08.479802Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:34:08.480045Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:34:08.480126Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:34:08.489122Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:34:08.489316Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:34:08.489544Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:08.489606Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:34:08.489670Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:34:08.489712Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:34:08.497007Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:08.497073Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:34:08.497113Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:34:08.505090Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:08.505157Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:08.505211Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:34:08.505273Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:34:08.508980Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:34:08.514196Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:34:08.514377Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:34:08.515042Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:34:08.515161Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:34:08.515197Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:34:08.515435Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:34:08.515474Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:34:08.515595Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:34:08.515663Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:34:08.517982Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:34:08.518062Z node 1 :FLAT_TX_SCHEMESHARD ... 46678944 2025-06-25T14:34:09.026182Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_pq.cpp:753: NPQState::TPropose operationId# 104:0 can't persist state: ShardsInProgress is not empty, remain: 1 2025-06-25T14:34:09.056384Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1791: TOperation FindRelatedPartByTabletId, TxId: 104, tablet: 72075186233409548, partId: 0 2025-06-25T14:34:09.056553Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:632: TTxOperationReply execute, operationId: 104:0, at schemeshard: 72057594046678944, message: Origin: 72075186233409548 Status: COMPLETE TxId: 104 Step: 150 2025-06-25T14:34:09.056630Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_pq.cpp:623: NPQState::TPropose operationId# 104:0 HandleReply TEvProposeTransactionResult triggers early, at schemeshard: 72057594046678944 message# Origin: 72075186233409548 Status: COMPLETE TxId: 104 Step: 150 2025-06-25T14:34:09.056682Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_pq.cpp:264: CollectPQConfigChanged accept TEvPersQueue::TEvProposeTransactionResult, operationId: 104:0, shardIdx: 72057594046678944:3, shard: 72075186233409548, left await: 0, txState.State: Propose, txState.ReadyForNotifications: 0, at schemeshard: 72057594046678944 2025-06-25T14:34:09.056723Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_pq.cpp:628: NPQState::TPropose operationId# 104:0 HandleReply TEvProposeTransactionResult CollectPQConfigChanged: true 2025-06-25T14:34:09.056883Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 104:0 128 -> 240 2025-06-25T14:34:09.057054Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-25T14:34:09.057111Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-06-25T14:34:09.060050Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-25T14:34:09.060251Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:34:09.060346Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 104, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-25T14:34:09.060538Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 104, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-25T14:34:09.060714Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:34:09.060751Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 104, path id: 2 2025-06-25T14:34:09.060792Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 104, path id: 3 2025-06-25T14:34:09.060852Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-25T14:34:09.060890Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 104:0 ProgressState 2025-06-25T14:34:09.060983Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#104:0 progress is 1/1 2025-06-25T14:34:09.061018Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-25T14:34:09.061066Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#104:0 progress is 1/1 2025-06-25T14:34:09.061110Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-25T14:34:09.061163Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 104, ready parts: 1/1, is published: false 2025-06-25T14:34:09.061205Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-25T14:34:09.061241Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 104:0 2025-06-25T14:34:09.061268Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 104:0 2025-06-25T14:34:09.061393Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 5 2025-06-25T14:34:09.061428Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 104, publications: 2, subscribers: 1 2025-06-25T14:34:09.061455Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 104, [OwnerId: 72057594046678944, LocalPathId: 2], 5 2025-06-25T14:34:09.061489Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 104, [OwnerId: 72057594046678944, LocalPathId: 3], 2 2025-06-25T14:34:09.068075Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 5 PathOwnerId: 72057594046678944, cookie: 104 2025-06-25T14:34:09.068201Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 5 PathOwnerId: 72057594046678944, cookie: 104 2025-06-25T14:34:09.068253Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 104 2025-06-25T14:34:09.068293Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 104, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 5 2025-06-25T14:34:09.068357Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-06-25T14:34:09.069217Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 104 2025-06-25T14:34:09.069306Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 104 2025-06-25T14:34:09.069360Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 104 2025-06-25T14:34:09.069392Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 104, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 2 2025-06-25T14:34:09.069416Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-06-25T14:34:09.069471Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 104, subscribers: 1 2025-06-25T14:34:09.069528Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:212: TTxAckPublishToSchemeBoard Notify send TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, to actorId: [1:410:2374] 2025-06-25T14:34:09.081512Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2025-06-25T14:34:09.083002Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2025-06-25T14:34:09.083253Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 104: got EvNotifyTxCompletionResult 2025-06-25T14:34:09.083343Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 104: satisfy waiter [1:547:2480] TestWaitNotification: OK eventTxId 104 >>>>> Name: "Topic1" PQTabletConfig { PartitionConfig { } } Split { Partition: 7 SplitBoundary: "W" } TestModificationResults wait txId: 105 2025-06-25T14:34:09.091933Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/USER_1" OperationType: ESchemeOpAlterPersQueueGroup AlterPersQueueGroup { Name: "Topic1" PQTabletConfig { PartitionConfig { } } Split { Partition: 7 SplitBoundary: "W" } } } TxId: 105 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:34:09.092176Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_pq.cpp:509: TAlterPQ Propose, path: /MyRoot/USER_1/Topic1, pathId: , opId: 105:0, at schemeshard: 72057594046678944 2025-06-25T14:34:09.092414Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 105:1, propose status:StatusInvalidParameter, reason: Splitting partition does not exists: 7, at schemeshard: 72057594046678944 2025-06-25T14:34:09.096023Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 105, response: Status: StatusInvalidParameter Reason: "Splitting partition does not exists: 7" TxId: 105 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:34:09.096324Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 105, database: /MyRoot/USER_1, subject: , status: StatusInvalidParameter, reason: Splitting partition does not exists: 7, operation: ALTER PERSISTENT QUEUE, path: /MyRoot/USER_1/Topic1 TestModificationResult got TxId: 105, wait until txId: 105 TestWaitNotification wait txId: 105 2025-06-25T14:34:09.096616Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 105: send EvNotifyTxCompletion 2025-06-25T14:34:09.096647Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 105 2025-06-25T14:34:09.097009Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 105, at schemeshard: 72057594046678944 2025-06-25T14:34:09.097101Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 105: got EvNotifyTxCompletionResult 2025-06-25T14:34:09.097136Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 105: satisfy waiter [1:643:2565] TestWaitNotification: OK eventTxId 105 |79.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest >> KqpRm::Reduce [GOOD] >> TSchemeShardTopicSplitMergeTest::MargeNotAdjacentRangePartitions [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_topic_splitmerge/unittest >> TSchemeShardTopicSplitMergeTest::SplitWithWrongBoundary [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:34:08.636679Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:34:08.636770Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:34:08.636816Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:34:08.636850Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:34:08.636889Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:34:08.636915Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:34:08.636965Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:34:08.637039Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:34:08.637764Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:34:08.638054Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:34:08.720976Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:34:08.721027Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:34:08.738330Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:34:08.738744Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:34:08.738928Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:34:08.744828Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:34:08.745132Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:34:08.745780Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:34:08.746006Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:34:08.749116Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:34:08.749264Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:34:08.750357Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:34:08.750414Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:34:08.750559Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:34:08.750602Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:34:08.750652Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:34:08.750728Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:34:08.762797Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:34:08.895283Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:34:08.895496Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:08.895691Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:34:08.895735Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:34:08.895943Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:34:08.896009Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:34:08.898023Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:34:08.898191Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:34:08.898376Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:08.898436Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:34:08.898502Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:34:08.898553Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:34:08.900219Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:08.900277Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:34:08.900335Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:34:08.905116Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:08.905177Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:08.905223Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:34:08.905276Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:34:08.917978Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:34:08.919890Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:34:08.920098Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:34:08.920948Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:34:08.921078Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:34:08.921135Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:34:08.921396Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:34:08.921445Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:34:08.921634Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:34:08.921726Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:34:08.923983Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:34:08.924039Z node 1 :FLAT_TX_SCHEMESHARD ... satisfy waiter [1:643:2565] TestWaitNotification: OK eventTxId 105 >>>>> Name: "Topic1" PQTabletConfig { PartitionConfig { } } Split { Partition: 1 SplitBoundary: "\001" } TestModificationResults wait txId: 106 2025-06-25T14:34:09.331799Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/USER_1" OperationType: ESchemeOpAlterPersQueueGroup AlterPersQueueGroup { Name: "Topic1" PQTabletConfig { PartitionConfig { } } Split { Partition: 1 SplitBoundary: "\001" } } } TxId: 106 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:34:09.331974Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_pq.cpp:509: TAlterPQ Propose, path: /MyRoot/USER_1/Topic1, pathId: , opId: 106:0, at schemeshard: 72057594046678944 2025-06-25T14:34:09.332189Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 106:1, propose status:StatusInvalidParameter, reason: Split boundary less or equals FromBound of partition: '01' <= '55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 54', at schemeshard: 72057594046678944 2025-06-25T14:34:09.334405Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 106, response: Status: StatusInvalidParameter Reason: "Split boundary less or equals FromBound of partition: \'01\' <= \'55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 54\'" TxId: 106 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:34:09.334682Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 106, database: /MyRoot/USER_1, subject: , status: StatusInvalidParameter, reason: Split boundary less or equals FromBound of partition: '01' <= '55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 54', operation: ALTER PERSISTENT QUEUE, path: /MyRoot/USER_1/Topic1 TestModificationResult got TxId: 106, wait until txId: 106 TestWaitNotification wait txId: 106 2025-06-25T14:34:09.334981Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 106: send EvNotifyTxCompletion 2025-06-25T14:34:09.335052Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 106 2025-06-25T14:34:09.335453Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 106, at schemeshard: 72057594046678944 2025-06-25T14:34:09.335533Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 106: got EvNotifyTxCompletionResult 2025-06-25T14:34:09.335567Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 106: satisfy waiter [1:650:2572] TestWaitNotification: OK eventTxId 106 >>>>> Name: "Topic1" PQTabletConfig { PartitionConfig { } } Split { Partition: 1 SplitBoundary: "UUUUUUUUUUUUUUUT" } TestModificationResults wait txId: 107 2025-06-25T14:34:09.338532Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/USER_1" OperationType: ESchemeOpAlterPersQueueGroup AlterPersQueueGroup { Name: "Topic1" PQTabletConfig { PartitionConfig { } } Split { Partition: 1 SplitBoundary: "UUUUUUUUUUUUUUUT" } } } TxId: 107 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:34:09.338721Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_pq.cpp:509: TAlterPQ Propose, path: /MyRoot/USER_1/Topic1, pathId: , opId: 107:0, at schemeshard: 72057594046678944 2025-06-25T14:34:09.338922Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 107:1, propose status:StatusInvalidParameter, reason: Split boundary less or equals FromBound of partition: '55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 54' <= '55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 54', at schemeshard: 72057594046678944 2025-06-25T14:34:09.341160Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 107, response: Status: StatusInvalidParameter Reason: "Split boundary less or equals FromBound of partition: \'55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 54\' <= \'55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 54\'" TxId: 107 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:34:09.341408Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 107, database: /MyRoot/USER_1, subject: , status: StatusInvalidParameter, reason: Split boundary less or equals FromBound of partition: '55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 54' <= '55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 54', operation: ALTER PERSISTENT QUEUE, path: /MyRoot/USER_1/Topic1 TestModificationResult got TxId: 107, wait until txId: 107 TestWaitNotification wait txId: 107 2025-06-25T14:34:09.341700Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 107: send EvNotifyTxCompletion 2025-06-25T14:34:09.341750Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 107 2025-06-25T14:34:09.342121Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 107, at schemeshard: 72057594046678944 2025-06-25T14:34:09.342221Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 107: got EvNotifyTxCompletionResult 2025-06-25T14:34:09.342255Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 107: satisfy waiter [1:657:2579] TestWaitNotification: OK eventTxId 107 >>>>> Name: "Topic1" PQTabletConfig { PartitionConfig { } } Split { Partition: 1 SplitBoundary: "\255" } TestModificationResults wait txId: 108 2025-06-25T14:34:09.345335Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/USER_1" OperationType: ESchemeOpAlterPersQueueGroup AlterPersQueueGroup { Name: "Topic1" PQTabletConfig { PartitionConfig { } } Split { Partition: 1 SplitBoundary: "\255" } } } TxId: 108 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:34:09.345579Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_pq.cpp:509: TAlterPQ Propose, path: /MyRoot/USER_1/Topic1, pathId: , opId: 108:0, at schemeshard: 72057594046678944 2025-06-25T14:34:09.345790Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 108:1, propose status:StatusInvalidParameter, reason: Split boundary greate or equals ToBound of partition: 'AD' >= 'AA AA AA AA AA AA AA AA AA AA AA AA AA AA AA A9' (FromBound is '55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 54'), at schemeshard: 72057594046678944 2025-06-25T14:34:09.347933Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 108, response: Status: StatusInvalidParameter Reason: "Split boundary greate or equals ToBound of partition: \'AD\' >= \'AA AA AA AA AA AA AA AA AA AA AA AA AA AA AA A9\' (FromBound is \'55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 54\')" TxId: 108 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:34:09.348135Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 108, database: /MyRoot/USER_1, subject: , status: StatusInvalidParameter, reason: Split boundary greate or equals ToBound of partition: 'AD' >= 'AA AA AA AA AA AA AA AA AA AA AA AA AA AA AA A9' (FromBound is '55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 54'), operation: ALTER PERSISTENT QUEUE, path: /MyRoot/USER_1/Topic1 TestModificationResult got TxId: 108, wait until txId: 108 TestWaitNotification wait txId: 108 2025-06-25T14:34:09.348470Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 108: send EvNotifyTxCompletion 2025-06-25T14:34:09.348505Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 108 2025-06-25T14:34:09.348885Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 108, at schemeshard: 72057594046678944 2025-06-25T14:34:09.349000Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 108: got EvNotifyTxCompletionResult 2025-06-25T14:34:09.349032Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 108: satisfy waiter [1:664:2586] TestWaitNotification: OK eventTxId 108 >>>>> Name: "Topic1" PQTabletConfig { PartitionConfig { } } Split { Partition: 1 SplitBoundary: "\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\251" } TestModificationResults wait txId: 109 2025-06-25T14:34:09.352226Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/USER_1" OperationType: ESchemeOpAlterPersQueueGroup AlterPersQueueGroup { Name: "Topic1" PQTabletConfig { PartitionConfig { } } Split { Partition: 1 SplitBoundary: "\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\251" } } } TxId: 109 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:34:09.352483Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_pq.cpp:509: TAlterPQ Propose, path: /MyRoot/USER_1/Topic1, pathId: , opId: 109:0, at schemeshard: 72057594046678944 2025-06-25T14:34:09.352692Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 109:1, propose status:StatusInvalidParameter, reason: Split boundary greate or equals ToBound of partition: 'AA AA AA AA AA AA AA AA AA AA AA AA AA AA AA A9' >= 'AA AA AA AA AA AA AA AA AA AA AA AA AA AA AA A9' (FromBound is '55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 54'), at schemeshard: 72057594046678944 2025-06-25T14:34:09.355018Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 109, response: Status: StatusInvalidParameter Reason: "Split boundary greate or equals ToBound of partition: \'AA AA AA AA AA AA AA AA AA AA AA AA AA AA AA A9\' >= \'AA AA AA AA AA AA AA AA AA AA AA AA AA AA AA A9\' (FromBound is \'55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 54\')" TxId: 109 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:34:09.355259Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 109, database: /MyRoot/USER_1, subject: , status: StatusInvalidParameter, reason: Split boundary greate or equals ToBound of partition: 'AA AA AA AA AA AA AA AA AA AA AA AA AA AA AA A9' >= 'AA AA AA AA AA AA AA AA AA AA AA AA AA AA AA A9' (FromBound is '55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 54'), operation: ALTER PERSISTENT QUEUE, path: /MyRoot/USER_1/Topic1 TestModificationResult got TxId: 109, wait until txId: 109 TestWaitNotification wait txId: 109 2025-06-25T14:34:09.355585Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 109: send EvNotifyTxCompletion 2025-06-25T14:34:09.355623Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 109 2025-06-25T14:34:09.356080Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 109, at schemeshard: 72057594046678944 2025-06-25T14:34:09.356227Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 109: got EvNotifyTxCompletionResult 2025-06-25T14:34:09.356261Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 109: satisfy waiter [1:671:2593] TestWaitNotification: OK eventTxId 109 >> THealthCheckTest::TestSystemStateRetriesAfterReceivingResponse [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-6 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-7 >> TPartBtreeIndexIteration::FewNodes_Slices [GOOD] >> TPartBtreeIndexIteration::FewNodes_Groups_Slices >> TVectorIndexTests::CreateTableWithError |79.6%| [TA] $(B)/ydb/core/tx/datashard/ut_upload_rows/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_topic_splitmerge/unittest >> TSchemeShardTopicSplitMergeTest::DisableSplitMerge [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:34:05.231751Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:34:05.231848Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:34:05.231899Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:34:05.231968Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:34:05.232017Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:34:05.232059Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:34:05.232131Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:34:05.232203Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:34:05.232948Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:34:05.233304Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:34:05.345443Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:34:05.345508Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:34:05.357876Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:34:05.358079Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:34:05.358247Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:34:05.401175Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:34:05.401462Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:34:05.402182Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:34:05.402387Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:34:05.417078Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:34:05.417285Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:34:05.418468Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:34:05.418546Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:34:05.418755Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:34:05.418805Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:34:05.418857Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:34:05.418951Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:34:05.437252Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:243:2058] recipient: [1:15:2062] 2025-06-25T14:34:05.727950Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:34:05.728184Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:05.728430Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:34:05.728494Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:34:05.728745Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:34:05.728858Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:34:05.736658Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:34:05.736873Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:34:05.737154Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:05.737222Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:34:05.737279Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:34:05.737332Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:34:05.744065Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:05.744158Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:34:05.744206Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:34:05.749392Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:05.749457Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:05.749505Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:34:05.749595Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:34:05.753451Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:34:05.755675Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:34:05.755876Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:34:05.756860Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:34:05.757050Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:34:05.757102Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:34:05.757392Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:34:05.757451Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:34:05.757646Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:34:05.757728Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:34:05.760078Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:34:05.760128Z node 1 :FLAT_TX_SCHEMESHARD ... rd__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 105 2025-06-25T14:34:09.056578Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 105, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 3 2025-06-25T14:34:09.056635Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-06-25T14:34:09.056732Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 105, subscribers: 0 2025-06-25T14:34:09.067036Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 105 TestModificationResult got TxId: 105, wait until txId: 105 TestWaitNotification wait txId: 105 2025-06-25T14:34:09.073197Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 105: send EvNotifyTxCompletion 2025-06-25T14:34:09.073250Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 105 2025-06-25T14:34:09.073700Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 105, at schemeshard: 72057594046678944 2025-06-25T14:34:09.073799Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 105: got EvNotifyTxCompletionResult 2025-06-25T14:34:09.073839Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 105: satisfy waiter [2:752:2662] TestWaitNotification: OK eventTxId 105 2025-06-25T14:34:09.792433Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: PathId: 3 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:34:09.792920Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:44: Tablet 72057594046678944 describe pathId 3 took 498us result status StatusSuccess 2025-06-25T14:34:09.793599Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_1/Topic1" PathDescription { Self { Name: "Topic1" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 104 CreateStep: 150 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 2 } ChildrenExist: false BalancerTabletID: 72075186233409549 } PersQueueGroup { Name: "Topic1" PathId: 3 TotalGroupCount: 3 PartitionPerTablet: 7 PQTabletConfig { PartitionConfig { LifetimeSeconds: 3600 } YdbDatabasePath: "/MyRoot" PartitionStrategy { PartitionStrategyType: CAN_SPLIT_AND_MERGE } } Partitions { PartitionId: 0 TabletId: 72075186233409548 Status: Inactive ChildPartitionIds: 1 ChildPartitionIds: 2 } Partitions { PartitionId: 1 TabletId: 72075186233409548 KeyRange { ToBound: "\010" } Status: Active ParentPartitionIds: 0 } Partitions { PartitionId: 2 TabletId: 72075186233409548 KeyRange { FromBound: "\010" } Status: Active ParentPartitionIds: 0 } AlterVersion: 2 BalancerTabletID: 72075186233409549 NextPartitionId: 3 Allocate { Name: "Topic1" AlterVersion: 2 TotalGroupCount: 3 NextPartitionId: 3 PartitionPerTablet: 7 PQTabletConfig { PartitionConfig { LifetimeSeconds: 3600 } YdbDatabasePath: "/MyRoot" PartitionStrategy { PartitionStrategyType: CAN_SPLIT_AND_MERGE } } Partitions { PartitionId: 0 GroupId: 1 TabletId: 72075186233409548 OwnerId: 72057594046678944 ShardId: 3 Status: Inactive } Partitions { PartitionId: 1 GroupId: 2 TabletId: 72075186233409548 OwnerId: 72057594046678944 ShardId: 3 Status: Active ParentPartitionIds: 0 KeyRange { ToBound: "\010" } } Partitions { PartitionId: 2 GroupId: 3 TabletId: 72075186233409548 OwnerId: 72057594046678944 ShardId: 3 Status: Active ParentPartitionIds: 0 KeyRange { FromBound: "\010" } } BalancerTabletID: 72075186233409549 BalancerOwnerId: 72057594046678944 BalancerShardId: 4 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 3 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:34:09.893813Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_1/Topic1" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-25T14:34:09.894147Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_1/Topic1" took 359us result status StatusSuccess 2025-06-25T14:34:09.894935Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_1/Topic1" PathDescription { Self { Name: "Topic1" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 104 CreateStep: 150 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 2 } ChildrenExist: false BalancerTabletID: 72075186233409549 } PersQueueGroup { Name: "Topic1" PathId: 3 TotalGroupCount: 3 PartitionPerTablet: 7 PQTabletConfig { PartitionConfig { LifetimeSeconds: 3600 } YdbDatabasePath: "/MyRoot" PartitionStrategy { PartitionStrategyType: CAN_SPLIT_AND_MERGE } } Partitions { PartitionId: 0 TabletId: 72075186233409548 Status: Inactive ChildPartitionIds: 1 ChildPartitionIds: 2 } Partitions { PartitionId: 1 TabletId: 72075186233409548 KeyRange { ToBound: "\010" } Status: Active ParentPartitionIds: 0 } Partitions { PartitionId: 2 TabletId: 72075186233409548 KeyRange { FromBound: "\010" } Status: Active ParentPartitionIds: 0 } AlterVersion: 2 BalancerTabletID: 72075186233409549 NextPartitionId: 3 Allocate { Name: "Topic1" AlterVersion: 2 TotalGroupCount: 3 NextPartitionId: 3 PartitionPerTablet: 7 PQTabletConfig { PartitionConfig { LifetimeSeconds: 3600 } YdbDatabasePath: "/MyRoot" PartitionStrategy { PartitionStrategyType: CAN_SPLIT_AND_MERGE } } Partitions { PartitionId: 0 GroupId: 1 TabletId: 72075186233409548 OwnerId: 72057594046678944 ShardId: 3 Status: Inactive } Partitions { PartitionId: 1 GroupId: 2 TabletId: 72075186233409548 OwnerId: 72057594046678944 ShardId: 3 Status: Active ParentPartitionIds: 0 KeyRange { ToBound: "\010" } } Partitions { PartitionId: 2 GroupId: 3 TabletId: 72075186233409548 OwnerId: 72057594046678944 ShardId: 3 Status: Active ParentPartitionIds: 0 KeyRange { FromBound: "\010" } } BalancerTabletID: 72075186233409549 BalancerOwnerId: 72057594046678944 BalancerShardId: 4 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 3 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >>>>> Name: "Topic1" PQTabletConfig { PartitionConfig { } PartitionStrategy { PartitionStrategyType: DISABLED } } TestModificationResults wait txId: 106 2025-06-25T14:34:09.898814Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/USER_1" OperationType: ESchemeOpAlterPersQueueGroup AlterPersQueueGroup { Name: "Topic1" PQTabletConfig { PartitionConfig { } PartitionStrategy { PartitionStrategyType: DISABLED } } } } TxId: 106 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:34:09.899096Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_pq.cpp:509: TAlterPQ Propose, path: /MyRoot/USER_1/Topic1, pathId: , opId: 106:0, at schemeshard: 72057594046678944 2025-06-25T14:34:09.899264Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 106:1, propose status:StatusInvalidParameter, reason: Can`t disable auto partitioning., at schemeshard: 72057594046678944 2025-06-25T14:34:09.918653Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 106, response: Status: StatusInvalidParameter Reason: "Can`t disable auto partitioning." TxId: 106 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:34:09.918986Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 106, database: /MyRoot/USER_1, subject: , status: StatusInvalidParameter, reason: Can`t disable auto partitioning., operation: ALTER PERSISTENT QUEUE, path: /MyRoot/USER_1/Topic1 TestModificationResult got TxId: 106, wait until txId: 106 TestWaitNotification wait txId: 106 2025-06-25T14:34:09.919376Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 106: send EvNotifyTxCompletion 2025-06-25T14:34:09.919426Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 106 2025-06-25T14:34:09.919918Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 106, at schemeshard: 72057594046678944 2025-06-25T14:34:09.920044Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 106: got EvNotifyTxCompletionResult 2025-06-25T14:34:09.920091Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 106: satisfy waiter [2:766:2676] TestWaitNotification: OK eventTxId 106 >> TSchemeShardTopicSplitMergeTest::MargePartitions2 [GOOD] >> TVPatchTests::FullPatchTest [GOOD] >> TVPatchTests::FullPatchTestSpecialCase1 [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/rm_service/ut/unittest >> KqpRm::Reduce [GOOD] Test command err: 2025-06-25T14:34:09.794206Z node 2 :BS_PDISK WARN: {BPD01@blobstorage_pdisk_blockdevice_async.cpp:922} Warning# got EIoResult::FileOpenError from IoContext->Setup PDiskId# 1000 2025-06-25T14:34:09.795056Z node 2 :BS_PDISK CRIT: {BPD39@blobstorage_pdisk_impl.cpp:2897} BlockDevice initialization error Details# Can't open file "/home/runner/.ya/build/build_root/yft8/001ad9/r3tmp/tmpMeF8cS/pdisk_1.dat": unknown reason, errno# 0. PDiskId# 1000 2025-06-25T14:34:09.795765Z node 2 :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:300} PDiskId# 1000 bootstrapped to the StateError, reason# Can't open file "/home/runner/.ya/build/build_root/yft8/001ad9/r3tmp/tmpMeF8cS/pdisk_1.dat": unknown reason, errno# 0. Can not be initialized Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/yft8/001ad9/r3tmp/tmpMeF8cS/pdisk_1.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 5806671407864166878 PDiskId# 1000 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 2 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 1000 2025-06-25T14:34:09.846459Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:1115: TResourceBrokerActor bootstrap 2025-06-25T14:34:09.846763Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:1115: TResourceBrokerActor bootstrap 2025-06-25T14:34:09.887263Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:599: Start KqpResourceManagerActor at [2:460:2102] with ResourceBroker at [2:431:2101] 2025-06-25T14:34:09.887443Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:121: Start KqpResourceInfoExchangerActor at [2:461:2103] 2025-06-25T14:34:09.887647Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:599: Start KqpResourceManagerActor at [1:459:2338] with ResourceBroker at [1:430:2319] 2025-06-25T14:34:09.887742Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:121: Start KqpResourceInfoExchangerActor at [1:462:2339] 2025-06-25T14:34:09.887917Z node 2 :KQP_RESOURCE_MANAGER CRIT: kqp_rm_service.cpp:796: Failed to deliver subscription request to config dispatcher 2025-06-25T14:34:09.887959Z node 2 :KQP_RESOURCE_MANAGER CRIT: kqp_resource_info_exchanger.cpp:411: Failed to deliver subscription request to config dispatcher. 2025-06-25T14:34:09.887994Z node 1 :KQP_RESOURCE_MANAGER CRIT: kqp_rm_service.cpp:796: Failed to deliver subscription request to config dispatcher 2025-06-25T14:34:09.888016Z node 1 :KQP_RESOURCE_MANAGER CRIT: kqp_resource_info_exchanger.cpp:411: Failed to deliver subscription request to config dispatcher. 2025-06-25T14:34:09.888183Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:913: Don't set KqpProxySharedResources 2025-06-25T14:34:09.922399Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:929: Send to publish resource usage for reason: data_center update, payload: NodeId: 2 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372045444738657 } Timestamp: 1750862049 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2025-06-25T14:34:09.922654Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:913: Don't set KqpProxySharedResources 2025-06-25T14:34:09.922751Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:929: Send to publish resource usage for reason: data_center update, payload: NodeId: 1 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372041149771361 } Timestamp: 1750862049 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2025-06-25T14:34:09.923239Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_resource_info_exchanger.cpp:465: Received tenant pool status for exchanger, serving tenant: /dc-1, board: kqpexch+/dc-1 2025-06-25T14:34:09.923405Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_resource_info_exchanger.cpp:465: Received tenant pool status for exchanger, serving tenant: /dc-1, board: kqpexch+/dc-1 2025-06-25T14:34:09.923571Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:753: Received tenant pool status, serving tenant: /dc-1, board: kqprm+/dc-1 2025-06-25T14:34:09.923613Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:913: Don't set KqpProxySharedResources 2025-06-25T14:34:09.923730Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:929: Send to publish resource usage for reason: tenant updated, payload: NodeId: 2 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372045444738657 } Timestamp: 1750862049 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2025-06-25T14:34:09.923969Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:753: Received tenant pool status, serving tenant: /dc-1, board: kqprm+/dc-1 2025-06-25T14:34:09.923999Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:913: Don't set KqpProxySharedResources 2025-06-25T14:34:09.924094Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:929: Send to publish resource usage for reason: tenant updated, payload: NodeId: 1 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372041149771361 } Timestamp: 1750862049 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2025-06-25T14:34:09.925064Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:479: Get board info from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2025-06-25T14:34:09.925356Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:501: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2025-06-25T14:34:09.925873Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:479: Get board info from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2025-06-25T14:34:09.926173Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:501: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2025-06-25T14:34:09.926294Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:501: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2025-06-25T14:34:09.926558Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:526: Get resources info from node: 2 2025-06-25T14:34:09.926728Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:501: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2025-06-25T14:34:09.926846Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:526: Get resources info from node: 1 2025-06-25T14:34:09.927021Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:526: Get resources info from node: 2 2025-06-25T14:34:09.927121Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:526: Get resources info from node: 1 2025-06-25T14:34:09.930514Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new kqp_query task kqp-1-1-1 (1 by [1:459:2338]) priority=0 resources={0, 100} 2025-06-25T14:34:09.930605Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task kqp-1-1-1 (1 by [1:459:2338]) to queue queue_kqp_resource_manager 2025-06-25T14:34:09.930690Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {0, 100} for task kqp-1-1-1 (1 by [1:459:2338]) from queue queue_kqp_resource_manager 2025-06-25T14:34:09.930742Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task kqp-1-1-1 (1 by [1:459:2338]) to queue queue_kqp_resource_manager 2025-06-25T14:34:09.930784Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_kqp_resource_manager from 0.000000 to 0.250000 (insert task kqp-1-1-1 (1 by [1:459:2338])) 2025-06-25T14:34:09.931019Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:351: TxId: 1, taskId: 1. Allocated TKqpResourcesRequest{ MemoryPool: 1, Memory: 100ExternalMemory: 0 } 2025-06-25T14:34:09.931242Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:441: Update task kqp-1-1-1 (1 by [1:459:2338]) (priority=0 type=kqp_query resources={0, 30} resubmit=0) 2025-06-25T14:34:09.931315Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task kqp-1-1-1 (1 by [1:459:2338]) to queue queue_kqp_resource_manager 2025-06-25T14:34:09.931360Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_kqp_resource_manager from 0.000000 to 0.075000 (insert task kqp-1-1-1 (1 by [1:459:2338])) 2025-06-25T14:34:09.931412Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:404: TxId: 1, taskId: 1. Released resources, Memory: 70, Free Tier: 0, ExecutionUnits: 0. >> TDataShardTrace::TestTraceWriteImmediateOnShard ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_topic_splitmerge/unittest >> TSchemeShardTopicSplitMergeTest::MargeNotAdjacentRangePartitions [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:34:07.856226Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:34:07.856344Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:34:07.856386Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:34:07.856442Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:34:07.856493Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:34:07.856526Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:34:07.856593Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:34:07.856659Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:34:07.857427Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:34:07.857778Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:34:07.972697Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:34:07.972774Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:34:07.982552Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:34:07.982804Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:34:07.983032Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:34:07.993286Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:34:07.993599Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:34:07.994259Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:34:07.994483Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:34:07.997321Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:34:07.997514Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:34:07.998645Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:34:07.998712Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:34:07.998915Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:34:07.999006Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:34:07.999057Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:34:07.999193Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:34:08.012644Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:243:2058] recipient: [1:15:2062] 2025-06-25T14:34:08.160170Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:34:08.160458Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:08.160726Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:34:08.160809Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:34:08.161065Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:34:08.161158Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:34:08.165234Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:34:08.165451Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:34:08.165714Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:08.165784Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:34:08.165815Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:34:08.165839Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:34:08.168827Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:08.168894Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:34:08.168927Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:34:08.170473Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:08.170521Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:08.170554Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:34:08.170610Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:34:08.173304Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:34:08.175346Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:34:08.175506Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:34:08.176215Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:34:08.176347Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:34:08.176384Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:34:08.176705Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:34:08.176749Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:34:08.176915Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:34:08.176979Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:34:08.178800Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:34:08.178841Z node 1 :FLAT_TX_SCHEMESHARD ... -25T14:34:10.128538Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_pq.cpp:753: NPQState::TPropose operationId# 104:0 can't persist state: ShardsInProgress is not empty, remain: 1 2025-06-25T14:34:10.183235Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1791: TOperation FindRelatedPartByTabletId, TxId: 104, tablet: 72075186233409548, partId: 0 2025-06-25T14:34:10.183428Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:632: TTxOperationReply execute, operationId: 104:0, at schemeshard: 72057594046678944, message: Origin: 72075186233409548 Status: COMPLETE TxId: 104 Step: 150 2025-06-25T14:34:10.183500Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_pq.cpp:623: NPQState::TPropose operationId# 104:0 HandleReply TEvProposeTransactionResult triggers early, at schemeshard: 72057594046678944 message# Origin: 72075186233409548 Status: COMPLETE TxId: 104 Step: 150 2025-06-25T14:34:10.183558Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_pq.cpp:264: CollectPQConfigChanged accept TEvPersQueue::TEvProposeTransactionResult, operationId: 104:0, shardIdx: 72057594046678944:3, shard: 72075186233409548, left await: 0, txState.State: Propose, txState.ReadyForNotifications: 0, at schemeshard: 72057594046678944 2025-06-25T14:34:10.183597Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_pq.cpp:628: NPQState::TPropose operationId# 104:0 HandleReply TEvProposeTransactionResult CollectPQConfigChanged: true 2025-06-25T14:34:10.183783Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 104:0 128 -> 240 2025-06-25T14:34:10.183963Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-25T14:34:10.184028Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-06-25T14:34:10.186887Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-25T14:34:10.187261Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:34:10.187306Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 104, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-25T14:34:10.187496Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 104, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-25T14:34:10.187708Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:34:10.187764Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [2:208:2208], at schemeshard: 72057594046678944, txId: 104, path id: 2 2025-06-25T14:34:10.187806Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [2:208:2208], at schemeshard: 72057594046678944, txId: 104, path id: 3 2025-06-25T14:34:10.187880Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-25T14:34:10.187925Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 104:0 ProgressState 2025-06-25T14:34:10.188029Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#104:0 progress is 1/1 2025-06-25T14:34:10.188064Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-25T14:34:10.188115Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#104:0 progress is 1/1 2025-06-25T14:34:10.188146Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-25T14:34:10.188183Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 104, ready parts: 1/1, is published: false 2025-06-25T14:34:10.188232Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-25T14:34:10.188271Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 104:0 2025-06-25T14:34:10.188301Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 104:0 2025-06-25T14:34:10.188473Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 5 2025-06-25T14:34:10.188518Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 104, publications: 2, subscribers: 1 2025-06-25T14:34:10.188551Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 104, [OwnerId: 72057594046678944, LocalPathId: 2], 5 2025-06-25T14:34:10.188576Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 104, [OwnerId: 72057594046678944, LocalPathId: 3], 2 2025-06-25T14:34:10.190278Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 5 PathOwnerId: 72057594046678944, cookie: 104 2025-06-25T14:34:10.190390Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 5 PathOwnerId: 72057594046678944, cookie: 104 2025-06-25T14:34:10.190434Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 104 2025-06-25T14:34:10.190472Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 104, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 5 2025-06-25T14:34:10.190517Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-06-25T14:34:10.191177Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 104 2025-06-25T14:34:10.191246Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 104 2025-06-25T14:34:10.191274Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 104 2025-06-25T14:34:10.191300Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 104, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 2 2025-06-25T14:34:10.191327Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-06-25T14:34:10.191390Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 104, subscribers: 1 2025-06-25T14:34:10.191432Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:212: TTxAckPublishToSchemeBoard Notify send TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, to actorId: [2:407:2371] 2025-06-25T14:34:10.196436Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2025-06-25T14:34:10.197777Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2025-06-25T14:34:10.197899Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 104: got EvNotifyTxCompletionResult 2025-06-25T14:34:10.197951Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 104: satisfy waiter [2:545:2478] TestWaitNotification: OK eventTxId 104 >>>>> Name: "Topic1" PQTabletConfig { PartitionConfig { } } Merge { Partition: 0 AdjacentPartition: 2 } TestModificationResults wait txId: 105 2025-06-25T14:34:10.207296Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/USER_1" OperationType: ESchemeOpAlterPersQueueGroup AlterPersQueueGroup { Name: "Topic1" PQTabletConfig { PartitionConfig { } } Merge { Partition: 0 AdjacentPartition: 2 } } } TxId: 105 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:34:10.207543Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_pq.cpp:509: TAlterPQ Propose, path: /MyRoot/USER_1/Topic1, pathId: , opId: 105:0, at schemeshard: 72057594046678944 2025-06-25T14:34:10.207760Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 105:1, propose status:StatusInvalidParameter, reason: You cannot merge non-contiguous partitions, at schemeshard: 72057594046678944 2025-06-25T14:34:10.214058Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 105, response: Status: StatusInvalidParameter Reason: "You cannot merge non-contiguous partitions" TxId: 105 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:34:10.214348Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 105, database: /MyRoot/USER_1, subject: , status: StatusInvalidParameter, reason: You cannot merge non-contiguous partitions, operation: ALTER PERSISTENT QUEUE, path: /MyRoot/USER_1/Topic1 TestModificationResult got TxId: 105, wait until txId: 105 TestWaitNotification wait txId: 105 2025-06-25T14:34:10.214698Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 105: send EvNotifyTxCompletion 2025-06-25T14:34:10.214741Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 105 2025-06-25T14:34:10.215190Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 105, at schemeshard: 72057594046678944 2025-06-25T14:34:10.215296Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 105: got EvNotifyTxCompletionResult 2025-06-25T14:34:10.215331Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 105: satisfy waiter [2:641:2563] TestWaitNotification: OK eventTxId 105 >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-CreateUser-3 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-CreateUser-4 >> TVPatchTests::PatchPartFastXorDiffDisorder >> TVPatchTests::PatchPartFastXorDiffDisorder [GOOD] >> TVPatchTests::PatchPartFastXorDiffBeyoundBlob |79.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/skeleton/ut/unittest >> TVPatchTests::FullPatchTestSpecialCase1 [GOOD] >> KqpSinkLocks::EmptyRangeOlap [GOOD] >> KqpSinkLocks::InsertWithBulkUpsert+UseBulkUpsert >> TVectorIndexTests::CreateTableWithError [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_topic_splitmerge/unittest >> TSchemeShardTopicSplitMergeTest::MargePartitions2 [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:34:08.305406Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:34:08.305501Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:34:08.305541Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:34:08.305575Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:34:08.305612Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:34:08.305645Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:34:08.305724Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:34:08.305810Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:34:08.306515Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:34:08.306850Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:34:08.460561Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:34:08.460657Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:34:08.488554Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:34:08.489003Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:34:08.489192Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:34:08.495613Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:34:08.495935Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:34:08.496563Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:34:08.496844Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:34:08.499971Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:34:08.500141Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:34:08.501362Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:34:08.501447Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:34:08.501663Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:34:08.501730Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:34:08.501813Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:34:08.501931Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:34:08.509393Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:34:08.649424Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:34:08.649622Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:08.649825Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:34:08.649869Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:34:08.650133Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:34:08.650202Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:34:08.652417Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:34:08.652603Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:34:08.652792Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:08.652879Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:34:08.652926Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:34:08.652961Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:34:08.654813Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:08.654868Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:34:08.654904Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:34:08.661889Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:08.661951Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:08.661996Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:34:08.662054Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:34:08.665801Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:34:08.669499Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:34:08.669713Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:34:08.670819Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:34:08.670988Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:34:08.671043Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:34:08.671330Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:34:08.671398Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:34:08.671581Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:34:08.671687Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:34:08.674164Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:34:08.674212Z node 1 :FLAT_TX_SCHEMESHARD ... -25T14:34:10.904263Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_pq.cpp:623: NPQState::TPropose operationId# 105:0 HandleReply TEvProposeTransactionResult triggers early, at schemeshard: 72057594046678944 message# Origin: 72075186233409548 Status: COMPLETE TxId: 105 Step: 200 2025-06-25T14:34:10.904353Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_pq.cpp:264: CollectPQConfigChanged accept TEvPersQueue::TEvProposeTransactionResult, operationId: 105:0, shardIdx: 72057594046678944:3, shard: 72075186233409548, left await: 0, txState.State: Propose, txState.ReadyForNotifications: 0, at schemeshard: 72057594046678944 2025-06-25T14:34:10.904403Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_pq.cpp:628: NPQState::TPropose operationId# 105:0 HandleReply TEvProposeTransactionResult CollectPQConfigChanged: true 2025-06-25T14:34:10.904580Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 105:0 128 -> 240 2025-06-25T14:34:10.904765Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-06-25T14:34:10.907402Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 105:0, at schemeshard: 72057594046678944 2025-06-25T14:34:10.908008Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:34:10.908061Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 105, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-25T14:34:10.908374Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:34:10.908419Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [2:209:2209], at schemeshard: 72057594046678944, txId: 105, path id: 3 2025-06-25T14:34:10.908505Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 105:0, at schemeshard: 72057594046678944 2025-06-25T14:34:10.908547Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 105:0 ProgressState 2025-06-25T14:34:10.908648Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#105:0 progress is 1/1 2025-06-25T14:34:10.908684Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 105 ready parts: 1/1 2025-06-25T14:34:10.908723Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#105:0 progress is 1/1 2025-06-25T14:34:10.908754Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 105 ready parts: 1/1 2025-06-25T14:34:10.908793Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 105, ready parts: 1/1, is published: false 2025-06-25T14:34:10.908832Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 105 ready parts: 1/1 2025-06-25T14:34:10.908874Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 105:0 2025-06-25T14:34:10.908908Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 105:0 2025-06-25T14:34:10.909031Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 5 2025-06-25T14:34:10.909074Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 105, publications: 1, subscribers: 1 2025-06-25T14:34:10.909107Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 105, [OwnerId: 72057594046678944, LocalPathId: 3], 3 2025-06-25T14:34:10.909887Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 3 PathOwnerId: 72057594046678944, cookie: 105 2025-06-25T14:34:10.909989Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 3 PathOwnerId: 72057594046678944, cookie: 105 2025-06-25T14:34:10.910027Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 105 2025-06-25T14:34:10.910073Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 105, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 3 2025-06-25T14:34:10.910110Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-06-25T14:34:10.910184Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 105, subscribers: 1 2025-06-25T14:34:10.910229Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:212: TTxAckPublishToSchemeBoard Notify send TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, to actorId: [2:414:2378] 2025-06-25T14:34:10.914806Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 105 2025-06-25T14:34:10.914907Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 105: got EvNotifyTxCompletionResult 2025-06-25T14:34:10.914961Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 105: satisfy waiter [2:684:2603] TestWaitNotification: OK eventTxId 105 2025-06-25T14:34:10.922071Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_1/Topic1" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-25T14:34:10.922340Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_1/Topic1" took 306us result status StatusSuccess 2025-06-25T14:34:10.923104Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_1/Topic1" PathDescription { Self { Name: "Topic1" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 104 CreateStep: 150 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 2 } ChildrenExist: false BalancerTabletID: 72075186233409549 } PersQueueGroup { Name: "Topic1" PathId: 3 TotalGroupCount: 5 PartitionPerTablet: 7 PQTabletConfig { PartitionConfig { LifetimeSeconds: 3600 } YdbDatabasePath: "/MyRoot" PartitionStrategy { PartitionStrategyType: CAN_SPLIT_AND_MERGE } } Partitions { PartitionId: 0 TabletId: 72075186233409548 KeyRange { ToBound: "?\377\377\377\377\377\377\377\377\377\377\377\377\377\377\376" } Status: Active } Partitions { PartitionId: 1 TabletId: 72075186233409548 KeyRange { FromBound: "?\377\377\377\377\377\377\377\377\377\377\377\377\377\377\376" ToBound: "\177\377\377\377\377\377\377\377\377\377\377\377\377\377\377\375" } Status: Inactive ChildPartitionIds: 4 } Partitions { PartitionId: 2 TabletId: 72075186233409548 KeyRange { FromBound: "\177\377\377\377\377\377\377\377\377\377\377\377\377\377\377\375" ToBound: "\277\377\377\377\377\377\377\377\377\377\377\377\377\377\377\374" } Status: Inactive ChildPartitionIds: 4 } Partitions { PartitionId: 3 TabletId: 72075186233409548 KeyRange { FromBound: "\277\377\377\377\377\377\377\377\377\377\377\377\377\377\377\374" } Status: Active } Partitions { PartitionId: 4 TabletId: 72075186233409548 KeyRange { FromBound: "?\377\377\377\377\377\377\377\377\377\377\377\377\377\377\376" ToBound: "\277\377\377\377\377\377\377\377\377\377\377\377\377\377\377\374" } Status: Active ParentPartitionIds: 1 ParentPartitionIds: 2 } AlterVersion: 2 BalancerTabletID: 72075186233409549 NextPartitionId: 5 Allocate { Name: "Topic1" AlterVersion: 2 TotalGroupCount: 5 NextPartitionId: 5 PartitionPerTablet: 7 PQTabletConfig { PartitionConfig { LifetimeSeconds: 3600 } YdbDatabasePath: "/MyRoot" PartitionStrategy { PartitionStrategyType: CAN_SPLIT_AND_MERGE } } Partitions { PartitionId: 1 GroupId: 2 TabletId: 72075186233409548 OwnerId: 72057594046678944 ShardId: 3 Status: Inactive KeyRange { FromBound: "?\377\377\377\377\377\377\377\377\377\377\377\377\377\377\376" ToBound: "\177\377\377\377\377\377\377\377\377\377\377\377\377\377\377\375" } } Partitions { PartitionId: 2 GroupId: 3 TabletId: 72075186233409548 OwnerId: 72057594046678944 ShardId: 3 Status: Inactive KeyRange { FromBound: "\177\377\377\377\377\377\377\377\377\377\377\377\377\377\377\375" ToBound: "\277\377\377\377\377\377\377\377\377\377\377\377\377\377\377\374" } } Partitions { PartitionId: 3 GroupId: 4 TabletId: 72075186233409548 OwnerId: 72057594046678944 ShardId: 3 Status: Active KeyRange { FromBound: "\277\377\377\377\377\377\377\377\377\377\377\377\377\377\377\374" } } Partitions { PartitionId: 0 GroupId: 1 TabletId: 72075186233409548 OwnerId: 72057594046678944 ShardId: 3 Status: Active KeyRange { ToBound: "?\377\377\377\377\377\377\377\377\377\377\377\377\377\377\376" } } Partitions { PartitionId: 4 GroupId: 5 TabletId: 72075186233409548 OwnerId: 72057594046678944 ShardId: 3 Status: Active ParentPartitionIds: 1 ParentPartitionIds: 2 KeyRange { FromBound: "?\377\377\377\377\377\377\377\377\377\377\377\377\377\377\376" ToBound: "\277\377\377\377\377\377\377\377\377\377\377\377\377\377\377\374" } } BalancerTabletID: 72075186233409549 BalancerOwnerId: 72057594046678944 BalancerShardId: 4 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 5 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TVPatchTests::PatchPartFastXorDiffBeyoundBlob [GOOD] >> TVPatchTests::FullPatchTestXorDiffFasterVGetResult [GOOD] >> TVPatchTests::FindingPartsWhenPartsAreDontExist ------- [TM] {asan, default-linux-x86_64, release} ydb/core/health_check/ut/unittest >> THealthCheckTest::TestSystemStateRetriesAfterReceivingResponse [GOOD] Test command err: 2025-06-25T14:33:13.488454Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:628:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:33:13.488990Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:33:13.489232Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:625:2319], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:33:13.489374Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:33:13.489691Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:33:13.489731Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001d7a/r3tmp/tmp2bZmd2/pdisk_1.dat 2025-06-25T14:33:14.024722Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 13146, node 1 TClient is connected to server localhost:3525 2025-06-25T14:33:14.646201Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:33:14.646282Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:33:14.646318Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:33:14.646719Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:33:22.874853Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [3:459:2310], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:33:22.875204Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:33:22.875336Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:33:22.877002Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [4:708:2320], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:33:22.877436Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:33:22.877504Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001d7a/r3tmp/tmpheYqQE/pdisk_1.dat 2025-06-25T14:33:23.307732Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 28973, node 3 TClient is connected to server localhost:17626 2025-06-25T14:33:27.930710Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:33:27.930776Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:33:27.930813Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:33:27.931506Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:33:27.950021Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:33:27.960606Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:33:28.041951Z node 3 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 5 Cookie 5 2025-06-25T14:33:28.042667Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:33:28.323896Z node 3 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 5 2025-06-25T14:33:28.325562Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connected -> Disconnected self_check_result: EMERGENCY issue_log { id: "RED-f489-1231c6b1" status: RED message: "Database has compute issues" location { database { name: "/Root" } } reason: "RED-6fa7-1231c6b1" reason: "YELLOW-1ba8-1231c6b1" type: "DATABASE" level: 1 } issue_log { id: "RED-6fa7-1231c6b1" status: RED message: "Compute has issues with tablets" location { database { name: "/Root" } } reason: "RED-e5e3-1231c6b1-PersQueue" type: "COMPUTE" level: 2 } issue_log { id: "YELLOW-1ba8-1231c6b1" status: YELLOW message: "Compute is overloaded" location { database { name: "/Root" } } reason: "YELLOW-e9e2-1231c6b1-3" reason: "YELLOW-e9e2-1231c6b1-4" reason: "YELLOW-e9e2-1231c6b1-5" type: "COMPUTE" level: 2 } issue_log { id: "YELLOW-e9e2-1231c6b1-3" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 3 host: "::1" port: 12001 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 } issue_log { id: "YELLOW-e9e2-1231c6b1-4" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 4 host: "::1" port: 12002 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 } issue_log { id: "YELLOW-e9e2-1231c6b1-5" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 5 host: "::1" port: 12003 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 } issue_log { id: "RED-e5e3-1231c6b1-PersQueue" status: RED message: "Tablets are dead" location { compute { tablet { type: "PersQueue" id: "72075186224037888" count: 1 } } database { name: "/Root" } node { } } type: "TABLET" level: 4 } location { id: 3 host: "::1" port: 12001 } 2025-06-25T14:33:37.708815Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [6:788:2378], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:33:37.709230Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:33:37.709443Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:33:37.711198Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [7:785:2321], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:33:37.711499Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:33:37.711838Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001d7a/r3tmp/tmp9BTANr/pdisk_1.dat 2025-06-25T14:33:38.254452Z node 6 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 65018, node 6 TClient is connected to server localhost:18529 2025-06-25T14:33:43.557439Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:33:43.557518Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:33:43.557565Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:33:43.558410Z node 6 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:33:43.558623Z node 6 :HIVE TRACE: hive_impl.cpp:114: HIVE#72057594037968897 Handle TEvTabletPipe::TEvServerConnected([6:1268:2664]) [6:1491:2668] 2025-06-25T14:33:43.559268Z node 6 :HIVE DEBUG: hive_impl.cpp:34: HIVE#72057594037968897 Handle TEvHive::TEvCreateTablet(PersQueue(72057594046578946,0)) 2025-06-25T14:33:43.568394Z node 6 :HIVE DEBUG: tx__create_tablet.cpp:200: HIVE#72057594037968897 THive::TTxCreateTablet::Execute Owner: 72057594046578946 OwnerIdx: 0 TabletType: PersQueue BindedChannels { StoragePoolName: "/Root:test" } BindedChannels { StoragePoolName: "/Root:test" } BindedChannels { StoragePoolName: "/Root:test" } 2025-06-25T14:33:43.568529Z node 6 :HIVE DEBUG: tx__create_tablet.cpp:348: HIVE#72057594037968897 Hive 72057594037968897 allocated TabletId 72075186224037888 from TabletIdIndex 65536 2025-06-25T14:33:43.568881Z node 6 :HIVE DEBUG: tx__create_tablet.cpp:440: HIVE#72057594037968897 THive::TTxCreateTablet::Execute; Default resources after merge for type PersQueue: {} 2025-06-25T14:33:43.568983Z node 6 :HIVE DEBUG: tx__create_tablet.cpp:447: HIVE#72057594037968897 THive::TTxCreateTablet::Execute; Default resources after merge for profile 'default': {Memory: 1048576} 2025-06-25T14:33:43.569222Z node 6 :HIVE DEBUG: hive_impl.cpp:2817: HIVE#72057594037968897 CreateTabletFollowers Tablet PersQueue.72075186224037888.Leader.0 2025-06-25T14:33:43.569308Z node 6 :HIVE DEBUG: tx__create_tablet.cpp:173: HIVE#72057594037968897 THive::TTxCreateTablet::Execute TabletId: 72075186224037888 Status: OK 2025-06-25T14:33:43.569488Z node 6 :HIVE DEBUG: hive_impl.cpp:1075: HIVE#72057594037968897 THive::AssignTabletGroups TEvControllerSelectGroups tablet 72075186224037888 GroupParameters { StoragePoolSpecifier { Name: "/Root:test" } } ReturnAllMatchingGroups: true 2025-06-25T14:33:43.571344Z node 6 :HIVE DEBUG: hive_impl.cpp:72: HIVE#72057594037968897 Connected to tablet 72057594037932033 from tablet 72057594037968897 2025-06-25T14:33:43.585726Z node 6 :HIVE TRACE: hive_impl.cpp:114: HIVE#72057594037968897 Handle TEvTabletPipe::TEvServerConnected([8:1472:2327]) [6:1536:2677] 2025-06-25T14:33:43.608764Z node 6 :HIVE DEBUG: hive_impl.cpp:141: HIVE#72057594037968897 Handle TEvLocal::TEvRegisterNode from [8:1460:2327] HiveId: 72057594037968897 ServicedDomains { SchemeShard: 72057594046644480 PathId: 1 } TabletAvailability { Type: Mediator Priority: 0 } TabletAvailability { Type: Dummy Priori ... 5-06-25T14:33:43.836060Z node 6 :HIVE TRACE: hive_impl.cpp:2567: HIVE#72057594037968897 UpdateTotalResources: ObjectId (72057594046578946,0): {} -> {Memory: 1048576} 2025-06-25T14:33:43.836157Z node 6 :HIVE TRACE: hive_impl.cpp:2573: HIVE#72057594037968897 UpdateTotalResources: Type PersQueue: {} -> {Memory: 1048576} 2025-06-25T14:33:43.836278Z node 6 :HIVE DEBUG: hive_impl.cpp:342: HIVE#72057594037968897 ProcessBootQueue (0) 2025-06-25T14:33:43.840922Z node 6 :HIVE TRACE: hive_impl.cpp:344: HIVE#72057594037968897 ProcessBootQueue - sending 2025-06-25T14:33:43.841422Z node 6 :HIVE TRACE: hive_impl.cpp:328: HIVE#72057594037968897 ProcessBootQueue - executing 2025-06-25T14:33:43.841526Z node 6 :HIVE DEBUG: tx__process_boot_queue.cpp:18: HIVE#72057594037968897 THive::TTxProcessBootQueue()::Execute 2025-06-25T14:33:43.841588Z node 6 :HIVE DEBUG: hive_impl.cpp:222: HIVE#72057594037968897 Handle ProcessBootQueue (size: 0) 2025-06-25T14:33:43.841646Z node 6 :HIVE DEBUG: hive_impl.cpp:302: HIVE#72057594037968897 ProcessBootQueue - BootQueue empty (WaitQueue: 0) 2025-06-25T14:33:43.858421Z node 6 :HIVE DEBUG: tx__update_tablet_status.cpp:216: HIVE#72057594037968897 THive::TTxUpdateTabletStatus::Complete TabletId: 72075186224037888 SideEffects: {Notifications: 0x10040207 [6:1267:2663] {EvTabletCreationResult Status: OK TabletID: 72075186224037888}} 2025-06-25T14:33:43.858505Z node 6 :HIVE DEBUG: tx__process_boot_queue.cpp:26: HIVE#72057594037968897 THive::TTxProcessBootQueue()::Complete 2025-06-25T14:33:50.603052Z node 6 :HIVE DEBUG: hive_impl.cpp:731: HIVE#72057594037968897 Handle TEvLocal::TEvStatus for Node 8: Status: 2 2025-06-25T14:33:50.603181Z node 6 :HIVE DEBUG: tx__status.cpp:22: HIVE#72057594037968897 THive::TTxStatus(8)::Execute 2025-06-25T14:33:50.603252Z node 6 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 8 2025-06-25T14:33:50.603379Z node 6 :HIVE DEBUG: tx__status.cpp:65: HIVE#72057594037968897 THive::TTxStatus(8)::Complete 2025-06-25T14:33:50.603678Z node 6 :HIVE DEBUG: tx__restart_tablet.cpp:32: HIVE#72057594037968897 THive::TTxRestartTablet(PersQueue.72075186224037888.Leader.1)::Execute 2025-06-25T14:33:50.603799Z node 6 :HIVE DEBUG: tablet_info.cpp:123: HIVE#72057594037968897 Tablet(PersQueue.72075186224037888.Leader.1) VolatileState: Running -> Stopped (Node 8) 2025-06-25T14:33:50.603863Z node 6 :HIVE TRACE: node_info.cpp:118: HIVE#72057594037968897 Node(8, (0,1048576,0,0)->(0,0,0,0)) 2025-06-25T14:33:50.603979Z node 6 :HIVE TRACE: hive_impl.cpp:2567: HIVE#72057594037968897 UpdateTotalResources: ObjectId (72057594046578946,0): {Memory: 1048576} -> {} 2025-06-25T14:33:50.604097Z node 6 :HIVE TRACE: hive_impl.cpp:2573: HIVE#72057594037968897 UpdateTotalResources: Type PersQueue: {Memory: 1048576} -> {} 2025-06-25T14:33:50.604178Z node 6 :HIVE DEBUG: tablet_info.cpp:523: HIVE#72057594037968897 Sending TEvStopTablet(PersQueue.72075186224037888.Leader.1 gen 1) to node 8 2025-06-25T14:33:50.604260Z node 6 :HIVE DEBUG: tablet_info.cpp:125: HIVE#72057594037968897 Tablet(PersQueue.72075186224037888.Leader.1) VolatileState: Stopped -> Booting 2025-06-25T14:33:50.604322Z node 6 :HIVE DEBUG: hive_impl.cpp:342: HIVE#72057594037968897 ProcessBootQueue (1) 2025-06-25T14:33:50.604370Z node 6 :HIVE TRACE: hive_impl.cpp:344: HIVE#72057594037968897 ProcessBootQueue - sending 2025-06-25T14:33:50.604647Z node 6 :HIVE DEBUG: tx__kill_node.cpp:22: HIVE#72057594037968897 THive::TTxKillNode(8)::Execute 2025-06-25T14:33:50.604746Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-25T14:33:50.604795Z node 6 :HIVE TRACE: hive_domains.cpp:16: Node(8) DeregisterInDomains (72057594046644480:1) : 1 -> 0 2025-06-25T14:33:50.604848Z node 6 :HIVE DEBUG: hive_impl.cpp:2804: HIVE#72057594037968897 RemoveRegisteredDataCentersNode(3, 8) 2025-06-25T14:33:50.604919Z node 6 :HIVE TRACE: tx__kill_node.cpp:50: HIVE#72057594037968897 THive::TTxKillNode - killing pipe server [6:1536:2677] 2025-06-25T14:33:50.604972Z node 6 :HIVE DEBUG: hive_impl.cpp:105: HIVE#72057594037968897 TryToDeleteNode(8): waiting 3600.000000s 2025-06-25T14:33:50.615857Z node 6 :HIVE TRACE: hive_impl.cpp:122: HIVE#72057594037968897 Handle TEvTabletPipe::TEvServerDisconnected([8:1472:2327]) [6:1536:2677] 2025-06-25T14:33:50.620034Z node 6 :HIVE TRACE: hive_impl.cpp:114: HIVE#72057594037968897 Handle TEvTabletPipe::TEvServerConnected([6:1858:2692]) [6:1859:2697] 2025-06-25T14:33:50.646900Z node 6 :HIVE TRACE: hive_impl.cpp:122: HIVE#72057594037968897 Handle TEvTabletPipe::TEvServerDisconnected([6:1858:2692]) [6:1859:2697] 2025-06-25T14:33:50.649454Z node 6 :HIVE TRACE: hive_impl.cpp:114: HIVE#72057594037968897 Handle TEvTabletPipe::TEvServerConnected([9:1839:2328]) [6:1870:2699] 2025-06-25T14:33:50.663543Z node 6 :HIVE DEBUG: hive_impl.cpp:141: HIVE#72057594037968897 Handle TEvLocal::TEvRegisterNode from [9:1834:2328] HiveId: 72057594037968897 ServicedDomains { SchemeShard: 72057594046644480 PathId: 1 } TabletAvailability { Type: Mediator Priority: 0 } TabletAvailability { Type: Dummy Priority: 0 } TabletAvailability { Type: KeyValue Priority: 0 } TabletAvailability { Type: Coordinator Priority: 0 } TabletAvailability { Type: Hive Priority: 0 } TabletAvailability { Type: SchemeShard Priority: 0 } TabletAvailability { Type: DataShard Priority: 0 } TabletAvailability { Type: PersQueue Priority: 0 } TabletAvailability { Type: PersQueueReadBalancer Priority: 0 } TabletAvailability { Type: Kesus Priority: 0 } TabletAvailability { Type: SysViewProcessor Priority: 0 } TabletAvailability { Type: ColumnShard Priority: 0 } TabletAvailability { Type: SequenceShard Priority: 0 } TabletAvailability { Type: ReplicationController Priority: 0 } TabletAvailability { Type: StatisticsAggregator Priority: 0 } 2025-06-25T14:33:50.663687Z node 6 :HIVE DEBUG: tx__register_node.cpp:21: HIVE#72057594037968897 THive::TTxRegisterNode(9)::Execute 2025-06-25T14:33:50.663817Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:33:50.663877Z node 6 :HIVE DEBUG: hive_impl.cpp:361: HIVE#72057594037968897 ProcessWaitQueue (0) 2025-06-25T14:33:50.663921Z node 6 :HIVE DEBUG: hive_impl.cpp:342: HIVE#72057594037968897 ProcessBootQueue (1) 2025-06-25T14:33:50.663968Z node 6 :HIVE DEBUG: hive_impl.cpp:361: HIVE#72057594037968897 ProcessWaitQueue (0) 2025-06-25T14:33:50.664012Z node 6 :HIVE DEBUG: hive_impl.cpp:342: HIVE#72057594037968897 ProcessBootQueue (1) 2025-06-25T14:33:50.664112Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:33:50.664974Z node 6 :HIVE DEBUG: hive_impl.cpp:808: HIVE#72057594037968897 TEvInterconnect::TEvNodeInfo NodeId 9 Location DataCenter: "4" Module: "4" Rack: "4" Unit: "4" self_check_result: EMERGENCY issue_log { id: "RED-f489-1231c6b1" status: RED message: "Database has compute issues" location { database { name: "/Root" } } reason: "RED-6fa7-1231c6b1" reason: "YELLOW-1ba8-1231c6b1" type: "DATABASE" level: 1 } issue_log { id: "RED-6fa7-1231c6b1" status: RED message: "Compute has issues with tablets" location { database { name: "/Root" } } reason: "RED-e5e3-1231c6b1-PersQueue" type: "COMPUTE" level: 2 } issue_log { id: "YELLOW-1ba8-1231c6b1" status: YELLOW message: "Compute is overloaded" location { database { name: "/Root" } } reason: "YELLOW-e9e2-1231c6b1-6" reason: "YELLOW-e9e2-1231c6b1-7" reason: "YELLOW-e9e2-1231c6b1-8" reason: "YELLOW-e9e2-1231c6b1-9" type: "COMPUTE" level: 2 } issue_log { id: "YELLOW-e9e2-1231c6b1-6" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 6 host: "::1" port: 12001 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 } issue_log { id: "YELLOW-e9e2-1231c6b1-7" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 7 host: "::1" port: 12002 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 } issue_log { id: "YELLOW-e9e2-1231c6b1-8" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 8 host: "::1" port: 12003 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 } issue_log { id: "YELLOW-e9e2-1231c6b1-9" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 9 host: "::1" port: 12004 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 } issue_log { id: "RED-e5e3-1231c6b1-PersQueue" status: RED message: "Tablets are dead" location { compute { tablet { type: "PersQueue" id: "72075186224037888" count: 1 } } database { name: "/Root" } node { } } type: "TABLET" level: 4 } location { id: 6 host: "::1" port: 12001 } 2025-06-25T14:33:58.691688Z node 10 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [10:419:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:33:58.692165Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:33:58.692251Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001d7a/r3tmp/tmpeuDx71/pdisk_1.dat 2025-06-25T14:33:59.180480Z node 10 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 28973, node 10 TClient is connected to server localhost:21769 2025-06-25T14:33:59.759933Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:33:59.760020Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:33:59.760068Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:33:59.760907Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:34:07.729473Z node 12 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [12:419:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:34:07.729853Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:34:07.730050Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001d7a/r3tmp/tmpmBrpLS/pdisk_1.dat 2025-06-25T14:34:08.209755Z node 12 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 12994, node 12 TClient is connected to server localhost:25111 2025-06-25T14:34:09.339042Z node 12 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:34:09.339112Z node 12 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:34:09.339180Z node 12 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:34:09.341199Z node 12 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-45 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-46 >> TVPatchTests::PatchPartFastXorDiffWithEmptyDiffBuffer >> TVPatchTests::FindingPartsWhenPartsAreDontExist [GOOD] >> TVPatchTests::FindingPartsWhenOnlyOnePartExists >> TVPatchTests::FindingPartsWhenSeveralPartsExist >> TVPatchTests::FindingPartsWhenOnlyOnePartExists [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/skeleton/ut/unittest >> TVPatchTests::PatchPartFastXorDiffDisorder [GOOD] Test command err: Recv 65537 2025-06-25T14:34:11.609528Z node 1 :BS_VDISK_PATCH INFO: {BSVSP03@skeleton_vpatch_actor.cpp:190} [0:1:0:0:0] TEvVPatch: bootstrapped; OriginalBlobId# [1:2:3:4:6:100:0] Deadline# 1970-01-01T00:00:01.000000Z Send NKikimr::TEvBlobStorage::TEvVGet Recv NKikimr::TEvBlobStorage::TEvVGetResult 2025-06-25T14:34:11.610483Z node 1 :BS_VDISK_PATCH INFO: {BSVSP06@skeleton_vpatch_actor.cpp:266} [0:1:0:0:0] TEvVPatch: received parts index; OriginalBlobId# [1:2:3:4:6:100:0] Status# OK ResultSize# 1 2025-06-25T14:34:11.610557Z node 1 :BS_VDISK_PATCH INFO: {BSVSP04@skeleton_vpatch_actor.cpp:226} [0:1:0:0:0] TEvVPatch: sended found parts; OriginalBlobId# [1:2:3:4:6:100:0] FoundParts# [5] Status# OK Send NKikimr::TEvBlobStorage::TEvVPatchFoundParts Recv NKikimr::TEvBlobStorage::TEvVPatchXorDiff 2025-06-25T14:34:11.610769Z node 1 :BS_VDISK_PATCH INFO: {BSVSP13@skeleton_vpatch_actor.cpp:674} [0:1:0:0:0] TEvVPatch: received xor diff; OriginalBlobId# [1:2:3:4:6:100:0] PatchedBlobId# [1:3:3:4:6:100:0] FromPart# 4 ToPart# 0 HasBuffer# no ReceivedXorDiffCount# 1/0 Send NKikimr::TEvBlobStorage::TEvVPatchXorDiffResult 2025-06-25T14:34:11.610879Z node 1 :BS_VDISK_PATCH DEBUG: {BSVSP17@skeleton_vpatch_actor.cpp:727} [0:1:0:0:0] NotifySkeletonAboutDying; Send NKikimr::TEvVPatchDyingRequest Recv NKikimr::TEvBlobStorage::TEvVPatchDiff 2025-06-25T14:34:11.611068Z node 1 :BS_VDISK_PATCH INFO: {BSVSP07@skeleton_vpatch_actor.cpp:315} [0:1:0:0:0] TEvVPatch: send patch result; OriginalBlobId# [1:2:3:4:6:100:0] PatchedBlobId# [1:3:3:4:6:100:0] OriginalPartId# 0 PatchedPartId# 0 Status# ERROR ErrorReason# [XorDiff from datapart] the start of the diff at index 0 righter than the start of the diff at index 1; PrevDiffStart# 2 DiffStart# 0 Send NKikimr::TEvBlobStorage::TEvVPatchResult Recv NKikimr::TEvVPatchDyingConfirm ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest >> TVectorIndexTests::CreateTableWithError [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:34:11.333664Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:34:11.333767Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:34:11.333816Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:34:11.333852Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:34:11.333908Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:34:11.333942Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:34:11.334005Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:34:11.334069Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:34:11.334797Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:34:11.335200Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:34:11.428829Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:34:11.428899Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:34:11.447108Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:34:11.447581Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:34:11.447755Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:34:11.454585Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:34:11.455042Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:34:11.455693Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:34:11.455983Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:34:11.459983Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:34:11.460160Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:34:11.461310Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:34:11.461367Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:34:11.461500Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:34:11.461555Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:34:11.461594Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:34:11.461668Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:34:11.468450Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:34:11.629669Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:34:11.629916Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:11.630149Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:34:11.630209Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:34:11.630500Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:34:11.630585Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:34:11.633201Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:34:11.633402Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:34:11.633645Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:11.633735Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:34:11.633799Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:34:11.633839Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:34:11.636658Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:11.636733Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:34:11.636772Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:34:11.639500Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:11.639555Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:11.639616Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:34:11.639662Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:34:11.643339Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:34:11.646758Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:34:11.646990Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:34:11.647968Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:34:11.648124Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:34:11.648178Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:34:11.648521Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:34:11.648586Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:34:11.648783Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:34:11.648878Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:34:11.651286Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:34:11.651335Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:34:11.651543Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:34:11.651611Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 1, path id: 1 2025-06-25T14:34:11.651967Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:11.652033Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 1:0 ProgressState 2025-06-25T14:34:11.652139Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1:0 progress is 1/1 2025-06-25T14:34:11.652177Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-25T14:34:11.652219Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1:0 progress is 1/1 2025-06-25T14:34:11.652251Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-25T14:34:11.652287Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 1, ready parts: 1/1, is published: false 2025-06-25T14:34:11.652343Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-25T14:34:11.652383Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1:0 2025-06-25T14:34:11.652417Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 1:0 2025-06-25T14:34:11.652496Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-25T14:34:11.652537Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 1, publications: 1, subscribers: 0 2025-06-25T14:34:11.652568Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 1, [OwnerId: 72057594046678944, LocalPathId: 1], 3 2025-06-25T14:34:11.655907Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-06-25T14:34:11.656059Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-06-25T14:34:11.656117Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1 2025-06-25T14:34:11.656174Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 3 2025-06-25T14:34:11.656241Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:34:11.656375Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1, subscribers: 0 2025-06-25T14:34:11.661449Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1 2025-06-25T14:34:11.662077Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1, at schemeshard: 72057594046678944 TestModificationResults wait txId: 101 2025-06-25T14:34:11.664085Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:434: actor# [1:274:2263] Bootstrap 2025-06-25T14:34:11.687353Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:453: actor# [1:274:2263] Become StateWork (SchemeCache [1:279:2268]) 2025-06-25T14:34:11.690243Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateIndexedTable CreateIndexedTable { TableDescription { Name: "vectors" Columns { Name: "id" Type: "Uint64" } Columns { Name: "__ydb_parent" Type: "String" } KeyColumnNames: "id" } IndexDescription { Name: "idx_vector" KeyColumnNames: "__ydb_parent" Type: EIndexTypeGlobalVectorKmeansTree VectorIndexKmeansTreeDescription { Settings { settings { metric: DISTANCE_COSINE vector_type: VECTOR_TYPE_FLOAT vector_dimension: 1024 } } } } } } TxId: 101 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:34:11.690708Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_indexed_table.cpp:100: TCreateTableIndex construct operation table path: /MyRoot/vectors domain path id: [OwnerId: 72057594046678944, LocalPathId: 1] domain path: /MyRoot shardsToCreate: 2 GetShardsInside: 0 MaxShards: 200000 2025-06-25T14:34:11.690860Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_just_reject.cpp:47: TReject Propose, opId: 101:0, explain: index key column shouldn't have a reserved name, at schemeshard: 72057594046678944 2025-06-25T14:34:11.690987Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 101:1, propose status:StatusInvalidParameter, reason: index key column shouldn't have a reserved name, at schemeshard: 72057594046678944 2025-06-25T14:34:11.692296Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:274:2263] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-25T14:34:11.701360Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 101, response: Status: StatusInvalidParameter Reason: "index key column shouldn\'t have a reserved name" TxId: 101 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:34:11.701680Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 101, database: /MyRoot, subject: , status: StatusInvalidParameter, reason: index key column shouldn't have a reserved name, operation: CREATE TABLE WITH INDEXES, path: /MyRoot/vectors 2025-06-25T14:34:11.702365Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 TestModificationResult got TxId: 101, wait until txId: 101 TestModificationResults wait txId: 102 2025-06-25T14:34:11.706126Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateIndexedTable CreateIndexedTable { TableDescription { Name: "vectors" Columns { Name: "id" Type: "Uint64" } Columns { Name: "embedding" Type: "String" } KeyColumnNames: "id" } IndexDescription { Name: "idx_vector" KeyColumnNames: "embedding" Type: EIndexTypeGlobalVectorKmeansTree DataColumnNames: "id" VectorIndexKmeansTreeDescription { Settings { settings { metric: DISTANCE_COSINE vector_type: VECTOR_TYPE_FLOAT vector_dimension: 1024 } } } } } } TxId: 102 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:34:11.706600Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_indexed_table.cpp:100: TCreateTableIndex construct operation table path: /MyRoot/vectors domain path id: [OwnerId: 72057594046678944, LocalPathId: 1] domain path: /MyRoot shardsToCreate: 2 GetShardsInside: 0 MaxShards: 200000 2025-06-25T14:34:11.706773Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_just_reject.cpp:47: TReject Propose, opId: 102:0, explain: the same column can't be used as key and data column for one index, for example id, at schemeshard: 72057594046678944 2025-06-25T14:34:11.706825Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 102:1, propose status:StatusInvalidParameter, reason: the same column can't be used as key and data column for one index, for example id, at schemeshard: 72057594046678944 2025-06-25T14:34:11.713962Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 102, response: Status: StatusInvalidParameter Reason: "the same column can\'t be used as key and data column for one index, for example id" TxId: 102 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:34:11.714280Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 102, database: /MyRoot, subject: , status: StatusInvalidParameter, reason: the same column can't be used as key and data column for one index, for example id, operation: CREATE TABLE WITH INDEXES, path: /MyRoot/vectors TestModificationResult got TxId: 102, wait until txId: 102 >> TVPatchTests::PatchPartFastXorDiffWithEmptyDiffBuffer [GOOD] >> TFlatTableExecutor_IndexLoading::Scan_History_BTreeIndex [GOOD] >> TFlatTableExecutor_IndexLoading::Scan_Groups_FlatIndex >> KqpSinkMvcc::OltpMultiSinks [GOOD] |79.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_incremental_restore_scan/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/skeleton/ut/unittest >> TVPatchTests::FullPatchTestXorDiffFasterVGetResult [GOOD] Test command err: Recv 65537 2025-06-25T14:34:11.962411Z node 1 :BS_VDISK_PATCH INFO: {BSVSP03@skeleton_vpatch_actor.cpp:190} [0:1:0:0:0] TEvVPatch: bootstrapped; OriginalBlobId# [1:2:3:4:6:100:0] Deadline# 1970-01-01T00:00:01.000000Z Send NKikimr::TEvBlobStorage::TEvVGet Recv NKikimr::TEvBlobStorage::TEvVGetResult 2025-06-25T14:34:11.963255Z node 1 :BS_VDISK_PATCH INFO: {BSVSP06@skeleton_vpatch_actor.cpp:266} [0:1:0:0:0] TEvVPatch: received parts index; OriginalBlobId# [1:2:3:4:6:100:0] Status# OK ResultSize# 1 2025-06-25T14:34:11.963313Z node 1 :BS_VDISK_PATCH INFO: {BSVSP04@skeleton_vpatch_actor.cpp:226} [0:1:0:0:0] TEvVPatch: sended found parts; OriginalBlobId# [1:2:3:4:6:100:0] FoundParts# [5] Status# OK Send NKikimr::TEvBlobStorage::TEvVPatchFoundParts Recv NKikimr::TEvBlobStorage::TEvVPatchXorDiff 2025-06-25T14:34:11.963502Z node 1 :BS_VDISK_PATCH INFO: {BSVSP13@skeleton_vpatch_actor.cpp:674} [0:1:0:0:0] TEvVPatch: received xor diff; OriginalBlobId# [1:2:3:4:6:100:0] PatchedBlobId# [1:3:3:4:6:100:0] FromPart# 4 ToPart# 0 HasBuffer# no ReceivedXorDiffCount# 1/0 Send NKikimr::TEvBlobStorage::TEvVPatchXorDiffResult 2025-06-25T14:34:11.963603Z node 1 :BS_VDISK_PATCH DEBUG: {BSVSP17@skeleton_vpatch_actor.cpp:727} [0:1:0:0:0] NotifySkeletonAboutDying; Send NKikimr::TEvVPatchDyingRequest Recv NKikimr::TEvBlobStorage::TEvVPatchDiff 2025-06-25T14:34:11.963749Z node 1 :BS_VDISK_PATCH INFO: {BSVSP07@skeleton_vpatch_actor.cpp:315} [0:1:0:0:0] TEvVPatch: send patch result; OriginalBlobId# [1:2:3:4:6:100:0] PatchedBlobId# [1:3:3:4:6:100:0] OriginalPartId# 0 PatchedPartId# 0 Status# ERROR ErrorReason# The diff at index 0 went beyound the blob part; DiffStart# 100 DiffEnd# 96 BlobPartSize# 32 Send NKikimr::TEvBlobStorage::TEvVPatchResult Recv NKikimr::TEvVPatchDyingConfirm >> TVPatchTests::FindingPartsWhenSeveralPartsExist [GOOD] >> TVPatchTests::FindingPartsWithTimeout >> BuildStatsHistogram::Ten_Serial [GOOD] >> BuildStatsHistogram::Ten_Crossed >> KqpRm::SingleSnapshotByExchanger [GOOD] |79.6%| [TA] $(B)/ydb/core/tx/schemeshard/ut_topic_splitmerge/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/skeleton/ut/unittest >> TVPatchTests::FindingPartsWhenOnlyOnePartExists [GOOD] Test command err: Recv 65537 2025-06-25T14:34:12.379583Z node 1 :BS_VDISK_PATCH INFO: {BSVSP03@skeleton_vpatch_actor.cpp:190} [0:1:0:0:0] TEvVPatch: bootstrapped; OriginalBlobId# [1:2:3:4:6:10:0] Deadline# 1970-01-01T00:00:01.000000Z Send NKikimr::TEvBlobStorage::TEvVGet Recv NKikimr::TEvBlobStorage::TEvVGetResult 2025-06-25T14:34:12.381027Z node 1 :BS_VDISK_PATCH INFO: {BSVSP06@skeleton_vpatch_actor.cpp:266} [0:1:0:0:0] TEvVPatch: received parts index; OriginalBlobId# [1:2:3:4:6:10:0] Status# OK ResultSize# 1 2025-06-25T14:34:12.381103Z node 1 :BS_VDISK_PATCH INFO: {BSVSP04@skeleton_vpatch_actor.cpp:226} [0:1:0:0:0] TEvVPatch: sended found parts; OriginalBlobId# [1:2:3:4:6:10:0] FoundParts# [] Status# OK Send NKikimr::TEvBlobStorage::TEvVPatchFoundParts 2025-06-25T14:34:12.381198Z node 1 :BS_VDISK_PATCH DEBUG: {BSVSP17@skeleton_vpatch_actor.cpp:727} [0:1:0:0:0] NotifySkeletonAboutDying; Send NKikimr::TEvVPatchDyingRequest Recv NKikimr::TEvVPatchDyingConfirm Recv 65537 2025-06-25T14:34:12.475997Z node 2 :BS_VDISK_PATCH INFO: {BSVSP03@skeleton_vpatch_actor.cpp:190} [0:1:0:0:0] TEvVPatch: bootstrapped; OriginalBlobId# [1:2:3:4:6:10:0] Deadline# 1970-01-01T00:00:01.000000Z Send NKikimr::TEvBlobStorage::TEvVGet Recv NKikimr::TEvBlobStorage::TEvVGetResult 2025-06-25T14:34:12.476447Z node 2 :BS_VDISK_PATCH INFO: {BSVSP06@skeleton_vpatch_actor.cpp:266} [0:1:0:0:0] TEvVPatch: received parts index; OriginalBlobId# [1:2:3:4:6:10:0] Status# OK ResultSize# 1 2025-06-25T14:34:12.476562Z node 2 :BS_VDISK_PATCH INFO: {BSVSP04@skeleton_vpatch_actor.cpp:226} [0:1:0:0:0] TEvVPatch: sended found parts; OriginalBlobId# [1:2:3:4:6:10:0] FoundParts# [1] Status# OK Send NKikimr::TEvBlobStorage::TEvVPatchFoundParts Recv NKikimr::TEvBlobStorage::TEvVPatchDiff 2025-06-25T14:34:12.476748Z node 2 :BS_VDISK_PATCH INFO: {BSVSP09@skeleton_vpatch_actor.cpp:577} [0:1:0:0:0] TEvVPatch: received diff; OriginalBlobId# [1:2:3:4:6:10:0] PatchedBlobId# [1:3:3:4:6:10:0] OriginalPartId# 1 PatchedPartId# 1 XorReceiver# no ParityPart# no ForceEnd# yes 2025-06-25T14:34:12.476815Z node 2 :BS_VDISK_PATCH INFO: {BSVSP07@skeleton_vpatch_actor.cpp:315} [0:1:0:0:0] TEvVPatch: received force end; OriginalBlobId# [1:2:3:4:6:10:0] PatchedBlobId# [1:3:3:4:6:10:0] OriginalPartId# 1 PatchedPartId# 1 Status# OK ErrorReason# Send NKikimr::TEvBlobStorage::TEvVPatchResult 2025-06-25T14:34:12.476882Z node 2 :BS_VDISK_PATCH DEBUG: {BSVSP17@skeleton_vpatch_actor.cpp:727} [0:1:0:0:0] NotifySkeletonAboutDying; Send NKikimr::TEvVPatchDyingRequest Recv NKikimr::TEvVPatchDyingConfirm >> TVPatchTests::FindingPartsWithTimeout [GOOD] >> TVersions::WreckHeadReverse [GOOD] >> TVersions::Wreck2 >> IncrementalRestoreScan::ChangeSenderEmpty ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/skeleton/ut/unittest >> TVPatchTests::PatchPartFastXorDiffWithEmptyDiffBuffer [GOOD] Test command err: Recv 65537 2025-06-25T14:34:12.509904Z node 1 :BS_VDISK_PATCH INFO: {BSVSP03@skeleton_vpatch_actor.cpp:190} [0:1:0:0:0] TEvVPatch: bootstrapped; OriginalBlobId# [1:2:3:4:6:100:0] Deadline# 1970-01-01T00:00:01.000000Z Send NKikimr::TEvBlobStorage::TEvVGet Recv NKikimr::TEvBlobStorage::TEvVGetResult 2025-06-25T14:34:12.510821Z node 1 :BS_VDISK_PATCH INFO: {BSVSP06@skeleton_vpatch_actor.cpp:266} [0:1:0:0:0] TEvVPatch: received parts index; OriginalBlobId# [1:2:3:4:6:100:0] Status# OK ResultSize# 1 2025-06-25T14:34:12.510885Z node 1 :BS_VDISK_PATCH INFO: {BSVSP04@skeleton_vpatch_actor.cpp:226} [0:1:0:0:0] TEvVPatch: sended found parts; OriginalBlobId# [1:2:3:4:6:100:0] FoundParts# [5] Status# OK Send NKikimr::TEvBlobStorage::TEvVPatchFoundParts Recv NKikimr::TEvBlobStorage::TEvVPatchXorDiff 2025-06-25T14:34:12.511101Z node 1 :BS_VDISK_PATCH INFO: {BSVSP13@skeleton_vpatch_actor.cpp:674} [0:1:0:0:0] TEvVPatch: received xor diff; OriginalBlobId# [1:2:3:4:6:100:0] PatchedBlobId# [1:3:3:4:6:100:0] FromPart# 4 ToPart# 0 HasBuffer# no ReceivedXorDiffCount# 1/0 Send NKikimr::TEvBlobStorage::TEvVPatchXorDiffResult Recv NKikimr::TEvBlobStorage::TEvVPatchDiff 2025-06-25T14:34:12.511252Z node 1 :BS_VDISK_PATCH INFO: {BSVSP09@skeleton_vpatch_actor.cpp:577} [0:1:0:0:0] TEvVPatch: received diff; OriginalBlobId# [1:2:3:4:6:100:0] PatchedBlobId# [1:3:3:4:6:100:0] OriginalPartId# 5 PatchedPartId# 5 XorReceiver# yes ParityPart# yes ForceEnd# no 2025-06-25T14:34:12.511316Z node 1 :BS_VDISK_PATCH INFO: {BSVSP05@skeleton_vpatch_actor.cpp:246} [0:1:0:0:0] TEvVPatch: send vGet for pulling part data; OriginalBlobId# [1:2:3:4:6:100:0] PullingPart# 5 Send NKikimr::TEvBlobStorage::TEvVGet >> IncrementalRestoreScan::ChangeSenderSimple ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/skeleton/ut/unittest >> TVPatchTests::FindingPartsWithTimeout [GOOD] Test command err: Recv 65537 2025-06-25T14:34:12.666435Z node 1 :BS_VDISK_PATCH INFO: {BSVSP03@skeleton_vpatch_actor.cpp:190} [0:1:0:0:0] TEvVPatch: bootstrapped; OriginalBlobId# [1:2:3:4:6:10:0] Deadline# 1970-01-01T00:00:01.000000Z Send NKikimr::TEvBlobStorage::TEvVGet Recv NKikimr::TEvBlobStorage::TEvVGetResult 2025-06-25T14:34:12.667769Z node 1 :BS_VDISK_PATCH INFO: {BSVSP06@skeleton_vpatch_actor.cpp:266} [0:1:0:0:0] TEvVPatch: received parts index; OriginalBlobId# [1:2:3:4:6:10:0] Status# OK ResultSize# 1 2025-06-25T14:34:12.667836Z node 1 :BS_VDISK_PATCH INFO: {BSVSP04@skeleton_vpatch_actor.cpp:226} [0:1:0:0:0] TEvVPatch: sended found parts; OriginalBlobId# [1:2:3:4:6:10:0] FoundParts# [1 2] Status# OK Send NKikimr::TEvBlobStorage::TEvVPatchFoundParts Recv NKikimr::TEvBlobStorage::TEvVPatchDiff 2025-06-25T14:34:12.668063Z node 1 :BS_VDISK_PATCH INFO: {BSVSP09@skeleton_vpatch_actor.cpp:577} [0:1:0:0:0] TEvVPatch: received diff; OriginalBlobId# [1:2:3:4:6:10:0] PatchedBlobId# [1:3:3:4:6:10:0] OriginalPartId# 1 PatchedPartId# 1 XorReceiver# no ParityPart# no ForceEnd# yes 2025-06-25T14:34:12.668119Z node 1 :BS_VDISK_PATCH INFO: {BSVSP07@skeleton_vpatch_actor.cpp:315} [0:1:0:0:0] TEvVPatch: received force end; OriginalBlobId# [1:2:3:4:6:10:0] PatchedBlobId# [1:3:3:4:6:10:0] OriginalPartId# 1 PatchedPartId# 1 Status# OK ErrorReason# Send NKikimr::TEvBlobStorage::TEvVPatchResult 2025-06-25T14:34:12.668197Z node 1 :BS_VDISK_PATCH DEBUG: {BSVSP17@skeleton_vpatch_actor.cpp:727} [0:1:0:0:0] NotifySkeletonAboutDying; Send NKikimr::TEvVPatchDyingRequest Recv NKikimr::TEvVPatchDyingConfirm Recv 65537 2025-06-25T14:34:13.053245Z node 2 :BS_VDISK_PATCH INFO: {BSVSP03@skeleton_vpatch_actor.cpp:190} [0:1:0:0:0] TEvVPatch: bootstrapped; OriginalBlobId# [1:2:3:4:6:10:0] Deadline# 1970-01-01T00:00:01.000000Z Send NKikimr::TEvBlobStorage::TEvVGet Recv NActors::TEvents::TEvWakeup 2025-06-25T14:34:13.063862Z node 2 :BS_VDISK_PATCH ERROR: {BSVSP11@skeleton_vpatch_actor.cpp:734} [0:1:0:0:0] TEvVPatch: the vpatch actor died due to a deadline, before receiving diff; 2025-06-25T14:34:13.063980Z node 2 :BS_VDISK_PATCH INFO: {BSVSP04@skeleton_vpatch_actor.cpp:226} [0:1:0:0:0] TEvVPatch: sended found parts; OriginalBlobId# [1:2:3:4:6:10:0] FoundParts# [] Status# ERROR Send NKikimr::TEvBlobStorage::TEvVPatchFoundParts 2025-06-25T14:34:13.064075Z node 2 :BS_VDISK_PATCH DEBUG: {BSVSP17@skeleton_vpatch_actor.cpp:727} [0:1:0:0:0] NotifySkeletonAboutDying; Send NKikimr::TEvVPatchDyingRequest Recv NKikimr::TEvVPatchDyingConfirm |79.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_erase_rows/ydb-core-tx-datashard-ut_erase_rows |79.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_erase_rows/ydb-core-tx-datashard-ut_erase_rows >> TFlatTableExecutor_IndexLoading::Scan_Groups_FlatIndex [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/rm_service/ut/unittest >> KqpRm::SingleSnapshotByExchanger [GOOD] Test command err: 2025-06-25T14:34:11.017086Z node 2 :BS_PDISK WARN: {BPD01@blobstorage_pdisk_blockdevice_async.cpp:922} Warning# got EIoResult::FileOpenError from IoContext->Setup PDiskId# 1000 2025-06-25T14:34:11.017562Z node 2 :BS_PDISK CRIT: {BPD39@blobstorage_pdisk_impl.cpp:2897} BlockDevice initialization error Details# Can't open file "/home/runner/.ya/build/build_root/yft8/001ac5/r3tmp/tmpu6vMtK/pdisk_1.dat": unknown reason, errno# 0. PDiskId# 1000 2025-06-25T14:34:11.018066Z node 2 :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:300} PDiskId# 1000 bootstrapped to the StateError, reason# Can't open file "/home/runner/.ya/build/build_root/yft8/001ac5/r3tmp/tmpu6vMtK/pdisk_1.dat": unknown reason, errno# 0. Can not be initialized Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/yft8/001ac5/r3tmp/tmpu6vMtK/pdisk_1.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 11642355495201195217 PDiskId# 1000 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 2 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 1000 2025-06-25T14:34:11.053811Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:1115: TResourceBrokerActor bootstrap 2025-06-25T14:34:11.054070Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:1115: TResourceBrokerActor bootstrap 2025-06-25T14:34:11.086530Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:599: Start KqpResourceManagerActor at [2:460:2102] with ResourceBroker at [2:431:2101] 2025-06-25T14:34:11.086652Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:121: Start KqpResourceInfoExchangerActor at [2:461:2103] 2025-06-25T14:34:11.086788Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:599: Start KqpResourceManagerActor at [1:459:2338] with ResourceBroker at [1:430:2319] 2025-06-25T14:34:11.086858Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:121: Start KqpResourceInfoExchangerActor at [1:462:2339] 2025-06-25T14:34:11.087003Z node 2 :KQP_RESOURCE_MANAGER CRIT: kqp_rm_service.cpp:796: Failed to deliver subscription request to config dispatcher 2025-06-25T14:34:11.087039Z node 2 :KQP_RESOURCE_MANAGER CRIT: kqp_resource_info_exchanger.cpp:411: Failed to deliver subscription request to config dispatcher. 2025-06-25T14:34:11.087070Z node 1 :KQP_RESOURCE_MANAGER CRIT: kqp_rm_service.cpp:796: Failed to deliver subscription request to config dispatcher 2025-06-25T14:34:11.087089Z node 1 :KQP_RESOURCE_MANAGER CRIT: kqp_resource_info_exchanger.cpp:411: Failed to deliver subscription request to config dispatcher. 2025-06-25T14:34:11.087234Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:913: Don't set KqpProxySharedResources 2025-06-25T14:34:11.132855Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:929: Send to publish resource usage for reason: data_center update, payload: NodeId: 2 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372045444738657 } Timestamp: 1750862051 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2025-06-25T14:34:11.133073Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:913: Don't set KqpProxySharedResources 2025-06-25T14:34:11.133144Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:929: Send to publish resource usage for reason: data_center update, payload: NodeId: 1 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372041149771361 } Timestamp: 1750862051 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2025-06-25T14:34:11.133500Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_resource_info_exchanger.cpp:465: Received tenant pool status for exchanger, serving tenant: /dc-1, board: kqpexch+/dc-1 2025-06-25T14:34:11.133620Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_resource_info_exchanger.cpp:465: Received tenant pool status for exchanger, serving tenant: /dc-1, board: kqpexch+/dc-1 2025-06-25T14:34:11.133731Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:753: Received tenant pool status, serving tenant: /dc-1, board: kqprm+/dc-1 2025-06-25T14:34:11.133757Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:913: Don't set KqpProxySharedResources 2025-06-25T14:34:11.133846Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:929: Send to publish resource usage for reason: tenant updated, payload: NodeId: 2 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372045444738657 } Timestamp: 1750862051 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2025-06-25T14:34:11.134039Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:753: Received tenant pool status, serving tenant: /dc-1, board: kqprm+/dc-1 2025-06-25T14:34:11.134062Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:913: Don't set KqpProxySharedResources 2025-06-25T14:34:11.134126Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:929: Send to publish resource usage for reason: tenant updated, payload: NodeId: 1 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372041149771361 } Timestamp: 1750862051 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2025-06-25T14:34:11.134781Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:479: Get board info from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2025-06-25T14:34:11.135016Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:501: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2025-06-25T14:34:11.135488Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:479: Get board info from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2025-06-25T14:34:11.135696Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:501: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2025-06-25T14:34:11.135774Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:501: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2025-06-25T14:34:11.135986Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:526: Get resources info from node: 2 2025-06-25T14:34:11.136120Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:501: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2025-06-25T14:34:11.136203Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:526: Get resources info from node: 1 2025-06-25T14:34:11.139721Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:526: Get resources info from node: 2 2025-06-25T14:34:11.139895Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:526: Get resources info from node: 1 2025-06-25T14:34:11.142868Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new kqp_query task kqp-1-2-1 (1 by [1:459:2338]) priority=0 resources={0, 100} 2025-06-25T14:34:11.142945Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task kqp-1-2-1 (1 by [1:459:2338]) to queue queue_kqp_resource_manager 2025-06-25T14:34:11.143017Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {0, 100} for task kqp-1-2-1 (1 by [1:459:2338]) from queue queue_kqp_resource_manager 2025-06-25T14:34:11.143056Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task kqp-1-2-1 (1 by [1:459:2338]) to queue queue_kqp_resource_manager 2025-06-25T14:34:11.143093Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_kqp_resource_manager from 0.000000 to 0.250000 (insert task kqp-1-2-1 (1 by [1:459:2338])) 2025-06-25T14:34:11.143396Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:351: TxId: 1, taskId: 2. Allocated TKqpResourcesRequest{ MemoryPool: 1, Memory: 100ExternalMemory: 0 } 2025-06-25T14:34:11.143473Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new kqp_query task kqp-2-1-2 (2 by [1:459:2338]) priority=0 resources={0, 100} 2025-06-25T14:34:11.143518Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task kqp-2-1-2 (2 by [1:459:2338]) to queue queue_kqp_resource_manager 2025-06-25T14:34:11.143559Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {0, 100} for task kqp-2-1-2 (2 by [1:459:2338]) from queue queue_kqp_resource_manager 2025-06-25T14:34:11.143600Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task kqp-2-1-2 (2 by [1:459:2338]) to queue queue_kqp_resource_manager 2025-06-25T14:34:11.143639Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_kqp_resource_manager from 0.250000 to 0.500000 (insert task kqp-2-1-2 (2 by [1:459:2338])) 2025-06-25T14:34:11.143713Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:351: TxId: 2, taskId: 1. Allocated TKqpResourcesRequest{ MemoryPool: 1, Memory: 100ExternalMemory: 0 } 2025-06-25T14:34:11.143902Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:913: Don't set KqpProxySharedResources 2025-06-25T14:34:11.144038Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:929: Send to publish resource usage for reason: alloc, payload: NodeId: 1 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372041149771361 } Timestamp: 1750862051 AvailableComputeActors: 80 UsedMemory: 200 TotalMemory: 1000 Memory { Pool: 1 Available: 800 } ExecutionUnits: 80 2025-06-25T14:34:11.144292Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:526: Get resources info from node: 1 2025-06-25T14:34:12.317688Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:423: Schedule Snapshot request 2025-06-25T14:34:12.317816Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:528: Finish task kqp-1-2-1 (1 by [1:459:2338]) (release resources {0, 100}) 2025-06-25T14:34:12.317872Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:577: Updated planned resource usage for queue queue_kqp_resource_manager from 0.500000 to 0.300150 (remove task kqp-1-2-1 (1 by [1:459:2338])) 2025-06-25T14:34:12.317910Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:582: Updated real resource usage for queue queue_kqp_resource_manager from 0.000000 to 0.100300 2025-06-25T14:34:12.317960Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:404: TxId: 1, taskId: 2. Released resources, Memory: 100, Free Tier: 0, ExecutionUnits: 10. 2025-06-25T14:34:12.318012Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:528: Finish task kqp-2-1-2 (2 by [1:459:2338]) (release resources {0, 100}) 2025-06-25T14:34:12.318049Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:577: Updated planned resource usage for queue queue_kqp_resource_manager from 0.300150 to 0.100300 (remove task kqp-2-1-2 (2 by [1:459:2338])) 2025-06-25T14:34:12.318130Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:404: TxId: 2, taskId: 1. Released resources, Memory: 100, Free Tier: 0, ExecutionUnits: 10. 2025-06-25T14:34:12.318318Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:913: Don't set KqpProxySharedResources 2025-06-25T14:34:12.318462Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:929: Send to publish resource usage for reason: alloc, payload: NodeId: 1 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372041149771361 } Timestamp: 1750862052 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2025-06-25T14:34:12.318778Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:526: Get resources info from node: 1 2025-06-25T14:34:12.624340Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:423: Schedule Snapshot request |79.7%| [TA] {RESULT} $(B)/ydb/core/tx/datashard/ut_upload_rows/test-results/unittest/{meta.json ... results_accumulator.log} >> TFlatTableExecutor_IndexLoading::Scan_Groups_BTreeIndex |79.7%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_topic_splitmerge/test-results/unittest/{meta.json ... results_accumulator.log} |79.7%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_erase_rows/ydb-core-tx-datashard-ut_erase_rows ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/tx/unittest >> KqpSinkMvcc::OltpMultiSinks [GOOD] Test command err: Trying to start YDB, gRPC: 27504, MsgBus: 3277 2025-06-25T14:33:50.222856Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519895158951991167:2219];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:33:50.229247Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0012fd/r3tmp/tmpvcbua4/pdisk_1.dat 2025-06-25T14:33:50.956453Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:33:50.956527Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:33:50.972369Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:33:51.017237Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:33:51.020469Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519895158951990987:2080] 1750862030202123 != 1750862030202126 TServer::EnableGrpc on GrpcPort 27504, node 1 2025-06-25T14:33:51.217868Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:33:51.242007Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:33:51.242034Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:33:51.242049Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:33:51.242209Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3277 TClient is connected to server localhost:3277 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:33:52.527128Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:33:55.220459Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519895158951991167:2219];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:33:55.220561Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:33:55.904664Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519895180426828096:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:33:55.904927Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:33:55.907712Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519895180426828131:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:33:55.912241Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:33:55.935955Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519895180426828133:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:33:56.026388Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519895184721795481:2342] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:33:56.296018Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:33:56.490168Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:33:58.411248Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) Trying to start YDB, gRPC: 12191, MsgBus: 9853 2025-06-25T14:34:02.837303Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519895212995542112:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:34:02.837338Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0012fd/r3tmp/tmppT6OyX/pdisk_1.dat 2025-06-25T14:34:03.327138Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519895212995542092:2080] 1750862042831641 != 1750862042831644 2025-06-25T14:34:03.327878Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:34:03.342234Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:34:03.342308Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:34:03.356658Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 12191, node 2 2025-06-25T14:34:03.526662Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:34:03.526688Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:34:03.526694Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:34:03.526793Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:34:03.912557Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:9853 TClient is connected to server localhost:9853 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:34:04.405683Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:34:07.837644Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519895212995542112:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:34:07.837715Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:34:08.012108Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519895238765346486:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:34:08.012367Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:34:08.012878Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519895238765346522:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:34:08.017104Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:34:08.032472Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519895238765346524:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:34:08.099411Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519895238765346575:2338] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:34:08.164777Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:34:08.241477Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:34:09.848950Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) >> TPartBtreeIndexIteration::FewNodes_Groups_Slices [GOOD] >> TPartBtreeIndexIteration::FewNodes_History_Slices >> DataShardVolatile::DistributedWriteRSNotAckedBeforeCommit [GOOD] >> DataShardVolatile::DistributedUpsertRestartBeforePrepare+UseSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/ut_with_sdk/unittest >> CommitOffset::Commit_WithSession_ToPastParentPartition [GOOD] Test command err: 2025-06-25T14:30:41.705665Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519894347226445220:2179];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:30:41.705904Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:30:42.064425Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00190e/r3tmp/tmpsgL32r/pdisk_1.dat 2025-06-25T14:30:42.250317Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519894347226445079:2080] 1750861841675689 != 1750861841675692 2025-06-25T14:30:42.275068Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:30:42.300747Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:30:42.300854Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:30:42.306471Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 13182, node 1 2025-06-25T14:30:42.473830Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/yft8/00190e/r3tmp/yandexqGGIbk.tmp 2025-06-25T14:30:42.473853Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/yft8/00190e/r3tmp/yandexqGGIbk.tmp 2025-06-25T14:30:42.474059Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/yft8/00190e/r3tmp/yandexqGGIbk.tmp 2025-06-25T14:30:42.474206Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:30:42.529143Z INFO: TTestServer started on Port 11884 GrpcPort 13182 TClient is connected to server localhost:11884 2025-06-25T14:30:42.736894Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; PQClient connected to localhost:13182 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:30:42.931174Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:30:42.945196Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:30:42.967819Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... waiting... 2025-06-25T14:30:45.343268Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894364406315069:2302], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:45.343358Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894364406315045:2299], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:45.343871Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:45.348321Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:30:45.361488Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519894364406315073:2304], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715662 completed, doublechecking } 2025-06-25T14:30:45.452447Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519894364406315137:2445] txid# 281474976715663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:30:45.839216Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:45.873638Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519894364406315145:2310], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:30:45.875653Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=NjBkMWEyYmEtN2RlM2I2MC1hODFhMzJlZC04M2EyZmU5ZQ==, ActorId: [1:7519894364406315041:2298], ActorState: ExecuteState, TraceId: 01jykqz4tw95373axwvcw8dgrf, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:30:45.877525Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-25T14:30:45.923650Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:46.042246Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); === CheckClustersList. Subcribe to ClusterTracker from [1:7519894368701282729:2621] 2025-06-25T14:30:46.701483Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519894347226445220:2179];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:30:46.701553Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; === CheckClustersList. Ok 2025-06-25T14:30:52.825548Z :TopicSplitMerge INFO: TTopicSdkTestSetup started 2025-06-25T14:30:52.856730Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:132: new create topic request 2025-06-25T14:30:52.858228Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877761, Sender [1:7519894394471086706:2701], Recipient [1:7519894351521412710:2151]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:30:52.858260Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5052: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T14:30:52.858276Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5837: Pipe server connected, at tablet: 72057594046644480 2025-06-25T14:30:52.858315Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271122432, Sender [1:7519894394471086702:2698], Recipient [1:7519894351521412710:2151]: {TEvModifySchemeTransaction txid# 281474976715673 TabletId# 72057594046644480} 2025-06-25T14:30:52.858328Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4966: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-06-25T14:30:53.054455Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/Root" OperationType: ESchemeOpCreatePersQueueGroup CreatePersQueueGroup { Name: "test-topic" TotalGroupCount: 1 PartitionPerTablet: 1 PQTabletConfig { PartitionConfig { MaxCountInPartition: 2147483647 LifetimeSeconds: 3600 SourceIdLifetimeSeconds: 1382400 WriteSpeedInBytesPerSecond: 1048576 BurstSize: 1048576 ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProf ... ition: 4, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-25T14:34:08.167612Z node 7 :PERSQUEUE TRACE: partition.h:582: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7519895200306593142:2776], Partition 2, Sender [0:0:0], Recipient [7:7519895200306593223:2783], Cookie: 0 2025-06-25T14:34:08.167614Z node 7 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037897, Partition: 1, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-25T14:34:08.167641Z node 7 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037897, Partition: 1, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-25T14:34:08.167651Z node 7 :PERSQUEUE TRACE: partition.h:584: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7519895200306593223:2783]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-25T14:34:08.167669Z node 7 :PERSQUEUE TRACE: partition.h:610: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-25T14:34:08.167676Z node 7 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037897, Partition: 1, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-25T14:34:08.167697Z node 7 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037896, Partition: 2, State: StateIdle] Have 0 items to delete old stuff 2025-06-25T14:34:08.167739Z node 7 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037896, Partition: 2, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-25T14:34:08.167748Z node 7 :PERSQUEUE TRACE: partition.h:582: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7519895153061951620:2415], Partition 0, Sender [0:0:0], Recipient [7:7519895153061951680:2419], Cookie: 0 2025-06-25T14:34:08.167757Z node 7 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037896, Partition: 2, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-25T14:34:08.167777Z node 7 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037896, Partition: 2, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-25T14:34:08.167794Z node 7 :PERSQUEUE TRACE: partition.h:584: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7519895153061951680:2419]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-25T14:34:08.167811Z node 7 :PERSQUEUE TRACE: partition.h:610: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-25T14:34:08.167841Z node 7 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete old stuff 2025-06-25T14:34:08.167881Z node 7 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-25T14:34:08.167899Z node 7 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-25T14:34:08.167926Z node 7 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-25T14:34:08.174373Z node 7 :PERSQUEUE TRACE: partition.h:582: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7519895217486462772:2906], Partition 3, Sender [0:0:0], Recipient [7:7519895217486462861:2911], Cookie: 0 2025-06-25T14:34:08.174452Z node 7 :PERSQUEUE TRACE: partition.h:584: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7519895217486462861:2911]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-25T14:34:08.174488Z node 7 :PERSQUEUE TRACE: partition.h:610: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-25T14:34:08.174536Z node 7 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037899, Partition: 3, State: StateIdle] Have 0 items to delete old stuff 2025-06-25T14:34:08.174617Z node 7 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037899, Partition: 3, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-25T14:34:08.174646Z node 7 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037899, Partition: 3, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-25T14:34:08.174683Z node 7 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037899, Partition: 3, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-25T14:34:08.175407Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [7:7519895092932408428:2146]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:34:08.175450Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:34:08.175502Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [7:7519895092932408428:2146], Recipient [7:7519895092932408428:2146]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:34:08.175527Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:34:08.266788Z node 7 :PERSQUEUE TRACE: partition.h:582: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7519895153061951620:2415], Partition 0, Sender [0:0:0], Recipient [7:7519895153061951680:2419], Cookie: 0 2025-06-25T14:34:08.266882Z node 7 :PERSQUEUE TRACE: partition.h:584: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7519895153061951680:2419]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-25T14:34:08.266918Z node 7 :PERSQUEUE TRACE: partition.h:610: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-25T14:34:08.266982Z node 7 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete old stuff 2025-06-25T14:34:08.267071Z node 7 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-25T14:34:08.267102Z node 7 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-25T14:34:08.267144Z node 7 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-25T14:34:08.267221Z node 7 :PERSQUEUE TRACE: partition.h:582: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7519895217486462770:2905], Partition 4, Sender [0:0:0], Recipient [7:7519895217486462865:2915], Cookie: 0 2025-06-25T14:34:08.267258Z node 7 :PERSQUEUE TRACE: partition.h:584: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7519895217486462865:2915]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-25T14:34:08.267276Z node 7 :PERSQUEUE TRACE: partition.h:610: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-25T14:34:08.267305Z node 7 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037898, Partition: 4, State: StateIdle] Have 0 items to delete old stuff 2025-06-25T14:34:08.267341Z node 7 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037898, Partition: 4, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-25T14:34:08.267361Z node 7 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037898, Partition: 4, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-25T14:34:08.267383Z node 7 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037898, Partition: 4, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-25T14:34:08.267436Z node 7 :PERSQUEUE TRACE: partition.h:582: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7519895200306593142:2776], Partition 2, Sender [0:0:0], Recipient [7:7519895200306593223:2783], Cookie: 0 2025-06-25T14:34:08.267473Z node 7 :PERSQUEUE TRACE: partition.h:584: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7519895200306593223:2783]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-25T14:34:08.267490Z node 7 :PERSQUEUE TRACE: partition.h:610: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-25T14:34:08.267519Z node 7 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037896, Partition: 2, State: StateIdle] Have 0 items to delete old stuff 2025-06-25T14:34:08.267553Z node 7 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037896, Partition: 2, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-25T14:34:08.267570Z node 7 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037896, Partition: 2, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-25T14:34:08.267592Z node 7 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037896, Partition: 2, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-25T14:34:08.267640Z node 7 :PERSQUEUE TRACE: partition.h:582: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7519895200306593141:2775], Partition 1, Sender [0:0:0], Recipient [7:7519895200306593225:2785], Cookie: 0 2025-06-25T14:34:08.267674Z node 7 :PERSQUEUE TRACE: partition.h:584: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7519895200306593225:2785]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-25T14:34:08.267689Z node 7 :PERSQUEUE TRACE: partition.h:610: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-25T14:34:08.267715Z node 7 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037897, Partition: 1, State: StateIdle] Have 0 items to delete old stuff 2025-06-25T14:34:08.267750Z node 7 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037897, Partition: 1, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-25T14:34:08.267769Z node 7 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037897, Partition: 1, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-25T14:34:08.267789Z node 7 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037897, Partition: 1, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-25T14:34:08.272788Z node 7 :PERSQUEUE TRACE: partition.h:582: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7519895217486462772:2906], Partition 3, Sender [0:0:0], Recipient [7:7519895217486462861:2911], Cookie: 0 2025-06-25T14:34:08.272873Z node 7 :PERSQUEUE TRACE: partition.h:584: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7519895217486462861:2911]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-25T14:34:08.272908Z node 7 :PERSQUEUE TRACE: partition.h:610: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-25T14:34:08.272961Z node 7 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037899, Partition: 3, State: StateIdle] Have 0 items to delete old stuff 2025-06-25T14:34:08.273045Z node 7 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037899, Partition: 3, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-25T14:34:08.273075Z node 7 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037899, Partition: 3, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-25T14:34:08.273112Z node 7 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037899, Partition: 3, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-ModifyUser-27 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-ModifyUser-28 >> TFlatTableExecutor_IndexLoading::Scan_Groups_BTreeIndex [GOOD] >> TFlatTableExecutor_IndexLoading::Scan_Groups_BTreeIndex_Empty [GOOD] >> TFlatTableExecutor_KeepEraseMarkers::TestKeepEraseMarkers >> TFlatTableExecutor_KeepEraseMarkers::TestKeepEraseMarkers [GOOD] >> TFlatTableExecutor_LongTx::MemTableLongTx [GOOD] >> TFlatTableExecutor_LongTx::CompactUncommittedLongTx [GOOD] >> TFlatTableExecutor_LongTx::CompactCommittedLongTx >> TFlatTableExecutor_LongTx::CompactCommittedLongTx [GOOD] >> TFlatTableExecutor_LongTx::CompactedLongTxRestart [GOOD] >> TFlatTableExecutor_LongTx::CompactMultipleChanges >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-27 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-28 >> TFlatTableExecutor_LongTx::CompactMultipleChanges [GOOD] >> TFlatTableExecutor_LongTx::LongTxBorrow [GOOD] >> TFlatTableExecutor_LongTx::MemTableLongTxRead >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-7 [GOOD] >> TFlatTableExecutor_LongTx::MemTableLongTxRead [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-8 >> TFlatTableExecutor_LongTx::CompactedTxIdReuse [GOOD] >> TFlatTableExecutor_LongTx::MergeSkewedCommitted >> TFlatTableExecutor_LongTx::MergeSkewedCommitted [GOOD] >> TFlatTableExecutor_LongTxAndBlobs::SmallValues >> TFlatTableExecutor_LongTxAndBlobs::SmallValues [GOOD] >> TFlatTableExecutor_LongTxAndBlobs::OuterBlobValues >> IncrementalRestoreScan::Empty >> TFlatTableExecutor_LongTxAndBlobs::OuterBlobValues [GOOD] >> TFlatTableExecutor_LongTxAndBlobs::ExternalBlobValues |79.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/replication/controller/ut_dst_creator/ydb-core-tx-replication-controller-ut_dst_creator |79.7%| [LD] {RESULT} $(B)/ydb/core/tx/replication/controller/ut_dst_creator/ydb-core-tx-replication-controller-ut_dst_creator >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-CreateUser-2 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-CreateUser-3 >> TVPatchTests::PatchPartGetError >> TFlatTableExecutor_LongTxAndBlobs::ExternalBlobValues [GOOD] >> TFlatTableExecutor_LowPriorityTxs::TestEnqueueCancel [GOOD] >> TFlatTableExecutor_LowPriorityTxs::TestLowPriority [GOOD] >> TFlatTableExecutor_LowPriorityTxs::TestLowPriorityCancel |79.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/replication/controller/ut_dst_creator/ydb-core-tx-replication-controller-ut_dst_creator >> TFlatTableExecutor_LowPriorityTxs::TestLowPriorityCancel [GOOD] >> TFlatTableExecutor_LowPriorityTxs::TestLowPriorityAllocatingCancel [GOOD] >> TFlatTableExecutor_MoveTableData::TestMoveSnapshot [GOOD] >> TFlatTableExecutor_MoveTableData::TestMoveSnapshotFollower >> TIterator::Mixed [GOOD] >> TIterator::MixedReverse >> TFlatTableExecutor_MoveTableData::TestMoveSnapshotFollower [GOOD] >> TFlatTableExecutor_PostponedScan::TestPostponedScan >> TVPatchTests::PatchPartGetError [GOOD] >> TFlatTableExecutor_PostponedScan::TestPostponedScan [GOOD] >> TFlatTableExecutor_PostponedScan::TestCancelFinishedScan >> TFlatTableExecutor_PostponedScan::TestCancelFinishedScan [GOOD] >> TFlatTableExecutor_PostponedScan::TestCancelRunningPostponedScan >> TExportToS3Tests::DropSourceTableBeforeTransferring >> TFlatTableExecutor_PostponedScan::TestCancelRunningPostponedScan [GOOD] >> TFlatTableExecutor_PostponedScan::TestPostponedScanSnapshotMVCC >> TVPatchTests::PatchPartOk >> TExportToS3Tests::ShouldSucceedOnConcurrentTxs >> TExportToS3Tests::CheckItemProgress >> TVPatchTests::PatchPartOk [GOOD] >> TExportToS3Tests::ShouldOmitNonStrictStorageSettings >> TFlatTableExecutor_PostponedScan::TestPostponedScanSnapshotMVCC [GOOD] >> TFlatTableExecutor_Reboot::TestSchemeGcAfterReassign >> TPartBtreeIndexIteration::FewNodes_History_Slices [GOOD] >> TPartBtreeIndexIteration::FewNodes_Groups_History_Slices >> TFlatTableExecutor_Reboot::TestSchemeGcAfterReassign [GOOD] >> TFlatTableExecutor_RejectProbability::MaxedOutRejectProbability ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/skeleton/ut/unittest >> TVPatchTests::PatchPartGetError [GOOD] Test command err: Recv 65537 2025-06-25T14:34:16.630112Z node 1 :BS_VDISK_PATCH INFO: {BSVSP03@skeleton_vpatch_actor.cpp:190} [0:1:0:0:0] TEvVPatch: bootstrapped; OriginalBlobId# [1:2:3:4:6:10:0] Deadline# 1970-01-01T00:00:01.000000Z Send NKikimr::TEvBlobStorage::TEvVGet Recv NKikimr::TEvBlobStorage::TEvVGetResult 2025-06-25T14:34:16.631000Z node 1 :BS_VDISK_PATCH INFO: {BSVSP06@skeleton_vpatch_actor.cpp:266} [0:1:0:0:0] TEvVPatch: received parts index; OriginalBlobId# [1:2:3:4:6:10:0] Status# OK ResultSize# 1 2025-06-25T14:34:16.631068Z node 1 :BS_VDISK_PATCH INFO: {BSVSP04@skeleton_vpatch_actor.cpp:226} [0:1:0:0:0] TEvVPatch: sended found parts; OriginalBlobId# [1:2:3:4:6:10:0] FoundParts# [1] Status# OK Send NKikimr::TEvBlobStorage::TEvVPatchFoundParts Recv NKikimr::TEvBlobStorage::TEvVPatchDiff 2025-06-25T14:34:16.631257Z node 1 :BS_VDISK_PATCH INFO: {BSVSP09@skeleton_vpatch_actor.cpp:577} [0:1:0:0:0] TEvVPatch: received diff; OriginalBlobId# [1:2:3:4:6:10:0] PatchedBlobId# [1:3:3:4:6:10:0] OriginalPartId# 1 PatchedPartId# 1 XorReceiver# no ParityPart# no ForceEnd# no 2025-06-25T14:34:16.631326Z node 1 :BS_VDISK_PATCH INFO: {BSVSP05@skeleton_vpatch_actor.cpp:246} [0:1:0:0:0] TEvVPatch: send vGet for pulling part data; OriginalBlobId# [1:2:3:4:6:10:0] PullingPart# 1 Send NKikimr::TEvBlobStorage::TEvVGet Recv NKikimr::TEvBlobStorage::TEvVGetResult 2025-06-25T14:34:16.631513Z node 1 :BS_VDISK_PATCH INFO: {BSVSP07@skeleton_vpatch_actor.cpp:315} [0:1:0:0:0] TEvVPatch: send patch result; OriginalBlobId# [1:2:3:4:6:10:0] PatchedBlobId# [1:3:3:4:6:10:0] OriginalPartId# 1 PatchedPartId# 1 Status# ERROR ErrorReason# Recieve not OK status from VGetResult, received status# ERROR Send NKikimr::TEvBlobStorage::TEvVPatchResult 2025-06-25T14:34:16.631607Z node 1 :BS_VDISK_PATCH DEBUG: {BSVSP17@skeleton_vpatch_actor.cpp:727} [0:1:0:0:0] NotifySkeletonAboutDying; Send NKikimr::TEvVPatchDyingRequest Recv NKikimr::TEvVPatchDyingConfirm >> TExportToS3Tests::CancelUponCreatingExportDirShouldSucceed >> TVPatchTests::PatchPartPutError >> TExportToS3Tests::ExportPartitioningSettings >> TExportToS3Tests::UidAsIdempotencyKey >> TExportToS3Tests::CancelUponTransferringSingleShardTableShouldSucceed >> TExportToS3Tests::ShouldCheckQuotasExportsLimited >> TFlatTableExecutor_RejectProbability::MaxedOutRejectProbability [GOOD] >> TFlatTableExecutor_RejectProbability::SomeRejectProbability >> TVPatchTests::PatchPartPutError [GOOD] >> BuildStatsHistogram::Ten_Crossed [GOOD] >> BuildStatsHistogram::Ten_Mixed_Log >> TExportToS3Tests::ShouldSucceedOnSingleShardTable ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/skeleton/ut/unittest >> TVPatchTests::PatchPartOk [GOOD] Test command err: Recv 65537 2025-06-25T14:34:17.352477Z node 1 :BS_VDISK_PATCH INFO: {BSVSP03@skeleton_vpatch_actor.cpp:190} [0:1:0:0:0] TEvVPatch: bootstrapped; OriginalBlobId# [1:2:3:4:6:10:0] Deadline# 1970-01-01T00:00:01.000000Z Send NKikimr::TEvBlobStorage::TEvVGet Recv NKikimr::TEvBlobStorage::TEvVGetResult 2025-06-25T14:34:17.353329Z node 1 :BS_VDISK_PATCH INFO: {BSVSP06@skeleton_vpatch_actor.cpp:266} [0:1:0:0:0] TEvVPatch: received parts index; OriginalBlobId# [1:2:3:4:6:10:0] Status# OK ResultSize# 1 2025-06-25T14:34:17.353390Z node 1 :BS_VDISK_PATCH INFO: {BSVSP04@skeleton_vpatch_actor.cpp:226} [0:1:0:0:0] TEvVPatch: sended found parts; OriginalBlobId# [1:2:3:4:6:10:0] FoundParts# [1] Status# OK Send NKikimr::TEvBlobStorage::TEvVPatchFoundParts Recv NKikimr::TEvBlobStorage::TEvVPatchDiff 2025-06-25T14:34:17.353571Z node 1 :BS_VDISK_PATCH INFO: {BSVSP09@skeleton_vpatch_actor.cpp:577} [0:1:0:0:0] TEvVPatch: received diff; OriginalBlobId# [1:2:3:4:6:10:0] PatchedBlobId# [1:3:3:4:6:10:0] OriginalPartId# 1 PatchedPartId# 1 XorReceiver# no ParityPart# no ForceEnd# no 2025-06-25T14:34:17.353629Z node 1 :BS_VDISK_PATCH INFO: {BSVSP05@skeleton_vpatch_actor.cpp:246} [0:1:0:0:0] TEvVPatch: send vGet for pulling part data; OriginalBlobId# [1:2:3:4:6:10:0] PullingPart# 1 Send NKikimr::TEvBlobStorage::TEvVGet Recv NKikimr::TEvBlobStorage::TEvVGetResult 2025-06-25T14:34:17.353809Z node 1 :BS_VDISK_PATCH INFO: {BSVSP08@skeleton_vpatch_actor.cpp:383} [0:1:0:0:0] TEvVPatch: received part data; OriginalBlobId# [1:2:3:4:6:10:0] PatchedBlobId# [1:3:3:4:6:10:0] OriginalPartId# 1 PatchedPartId# 1 DataParts# 4 ReceivedBlobId# [1:2:3:4:6:10:1] Status# OK ResultSize# 1 ParityPart# no 2025-06-25T14:34:17.353892Z node 1 :BS_VDISK_PATCH INFO: {BSVSP14@skeleton_vpatch_actor.cpp:462} [0:1:0:0:0] TEvVPatch: send xor diffs; OriginalBlobId# [1:2:3:4:6:10:0] PatchedBlobId# [1:3:3:4:6:10:0] OriginalPartId# 1 PatchedPartId# 1 XorDiffCount# 0 2025-06-25T14:34:17.353981Z node 1 :BS_VDISK_PATCH INFO: {BSVSP15@skeleton_vpatch_actor.cpp:502} [0:1:0:0:0] TEvVPatch: send vPut; OriginalBlobId# [1:2:3:4:6:10:0] PatchedBlobId# [1:3:3:4:6:10:0] OriginalPartId# 1 PatchedPartId# 1 ReceivedXorDiffs# 0 ExpectedXorDiffs# 0 Send NKikimr::TEvBlobStorage::TEvVPut Recv NKikimr::TEvBlobStorage::TEvVPutResult 2025-06-25T14:34:17.354144Z node 1 :BS_VDISK_PATCH INFO: {BSVSP10@skeleton_vpatch_actor.cpp:627} [0:1:0:0:0] TEvVPatch: received put result; OriginalBlobId# [1:2:3:4:6:10:0] PatchedBlobId# [1:3:3:4:6:10:0] OriginalPartId# 1 PatchedPartId# 1 Status# OK 2025-06-25T14:34:17.354189Z node 1 :BS_VDISK_PATCH INFO: {BSVSP07@skeleton_vpatch_actor.cpp:315} [0:1:0:0:0] TEvVPatch: send patch result; OriginalBlobId# [1:2:3:4:6:10:0] PatchedBlobId# [1:3:3:4:6:10:0] OriginalPartId# 1 PatchedPartId# 1 Status# OK ErrorReason# Send NKikimr::TEvBlobStorage::TEvVPatchResult 2025-06-25T14:34:17.354286Z node 1 :BS_VDISK_PATCH DEBUG: {BSVSP17@skeleton_vpatch_actor.cpp:727} [0:1:0:0:0] NotifySkeletonAboutDying; Send NKikimr::TEvVPatchDyingRequest Recv NKikimr::TEvVPatchDyingConfirm >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-46 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-47 >> TExportToS3Tests::RebootDuringCompletion >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-CreateUser-4 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-CreateUser-5 >> TFlatTableExecutor_RejectProbability::SomeRejectProbability [GOOD] >> TFlatTableExecutor_RejectProbability::ZeroRejectProbability >> TDataShardTrace::TestTraceWriteImmediateOnShard [GOOD] >> TExportToS3Tests::ShouldSucceedOnConcurrentTxs [GOOD] >> TFlatTableExecutor_RejectProbability::ZeroRejectProbability [GOOD] >> TFlatTableExecutor_RejectProbability::ZeroRejectProbabilityMultipleTables >> TExportToS3Tests::DropSourceTableBeforeTransferring [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/skeleton/ut/unittest >> TVPatchTests::PatchPartPutError [GOOD] Test command err: Recv 65537 2025-06-25T14:34:17.981283Z node 1 :BS_VDISK_PATCH INFO: {BSVSP03@skeleton_vpatch_actor.cpp:190} [0:1:0:0:0] TEvVPatch: bootstrapped; OriginalBlobId# [1:2:3:4:6:10:0] Deadline# 1970-01-01T00:00:01.000000Z Send NKikimr::TEvBlobStorage::TEvVGet Recv NKikimr::TEvBlobStorage::TEvVGetResult 2025-06-25T14:34:17.982322Z node 1 :BS_VDISK_PATCH INFO: {BSVSP06@skeleton_vpatch_actor.cpp:266} [0:1:0:0:0] TEvVPatch: received parts index; OriginalBlobId# [1:2:3:4:6:10:0] Status# OK ResultSize# 1 2025-06-25T14:34:17.982388Z node 1 :BS_VDISK_PATCH INFO: {BSVSP04@skeleton_vpatch_actor.cpp:226} [0:1:0:0:0] TEvVPatch: sended found parts; OriginalBlobId# [1:2:3:4:6:10:0] FoundParts# [1] Status# OK Send NKikimr::TEvBlobStorage::TEvVPatchFoundParts Recv NKikimr::TEvBlobStorage::TEvVPatchDiff 2025-06-25T14:34:17.982606Z node 1 :BS_VDISK_PATCH INFO: {BSVSP09@skeleton_vpatch_actor.cpp:577} [0:1:0:0:0] TEvVPatch: received diff; OriginalBlobId# [1:2:3:4:6:10:0] PatchedBlobId# [1:3:3:4:6:10:0] OriginalPartId# 1 PatchedPartId# 1 XorReceiver# no ParityPart# no ForceEnd# no 2025-06-25T14:34:17.982676Z node 1 :BS_VDISK_PATCH INFO: {BSVSP05@skeleton_vpatch_actor.cpp:246} [0:1:0:0:0] TEvVPatch: send vGet for pulling part data; OriginalBlobId# [1:2:3:4:6:10:0] PullingPart# 1 Send NKikimr::TEvBlobStorage::TEvVGet Recv NKikimr::TEvBlobStorage::TEvVGetResult 2025-06-25T14:34:17.982899Z node 1 :BS_VDISK_PATCH INFO: {BSVSP08@skeleton_vpatch_actor.cpp:383} [0:1:0:0:0] TEvVPatch: received part data; OriginalBlobId# [1:2:3:4:6:10:0] PatchedBlobId# [1:3:3:4:6:10:0] OriginalPartId# 1 PatchedPartId# 1 DataParts# 4 ReceivedBlobId# [1:2:3:4:6:10:1] Status# OK ResultSize# 1 ParityPart# no 2025-06-25T14:34:17.983010Z node 1 :BS_VDISK_PATCH INFO: {BSVSP14@skeleton_vpatch_actor.cpp:462} [0:1:0:0:0] TEvVPatch: send xor diffs; OriginalBlobId# [1:2:3:4:6:10:0] PatchedBlobId# [1:3:3:4:6:10:0] OriginalPartId# 1 PatchedPartId# 1 XorDiffCount# 0 2025-06-25T14:34:17.983110Z node 1 :BS_VDISK_PATCH INFO: {BSVSP15@skeleton_vpatch_actor.cpp:502} [0:1:0:0:0] TEvVPatch: send vPut; OriginalBlobId# [1:2:3:4:6:10:0] PatchedBlobId# [1:3:3:4:6:10:0] OriginalPartId# 1 PatchedPartId# 1 ReceivedXorDiffs# 0 ExpectedXorDiffs# 0 Send NKikimr::TEvBlobStorage::TEvVPut Recv NKikimr::TEvBlobStorage::TEvVPutResult 2025-06-25T14:34:17.983325Z node 1 :BS_VDISK_PATCH INFO: {BSVSP10@skeleton_vpatch_actor.cpp:627} [0:1:0:0:0] TEvVPatch: received put result; OriginalBlobId# [1:2:3:4:6:10:0] PatchedBlobId# [1:3:3:4:6:10:0] OriginalPartId# 1 PatchedPartId# 1 Status# ERROR 2025-06-25T14:34:17.983389Z node 1 :BS_VDISK_PATCH INFO: {BSVSP07@skeleton_vpatch_actor.cpp:315} [0:1:0:0:0] TEvVPatch: send patch result; OriginalBlobId# [1:2:3:4:6:10:0] PatchedBlobId# [1:3:3:4:6:10:0] OriginalPartId# 1 PatchedPartId# 1 Status# ERROR ErrorReason# Recieve not OK status from VPutResult, received status# ERROR Send NKikimr::TEvBlobStorage::TEvVPatchResult 2025-06-25T14:34:17.983488Z node 1 :BS_VDISK_PATCH DEBUG: {BSVSP17@skeleton_vpatch_actor.cpp:727} [0:1:0:0:0] NotifySkeletonAboutDying; Send NKikimr::TEvVPatchDyingRequest Recv NKikimr::TEvVPatchDyingConfirm >> TExportToS3Tests::ShouldSucceedOnConcurrentExport >> TExportToS3Tests::DropCopiesBeforeTransferring1 >> TFlatTableExecutor_RejectProbability::ZeroRejectProbabilityMultipleTables [GOOD] >> TFlatTableExecutor_Reschedule::TestExecuteReschedule >> TExportToS3Tests::CancelUponCreatingExportDirShouldSucceed [GOOD] >> TFlatTableExecutor_Reschedule::TestExecuteReschedule [GOOD] >> TFlatTableExecutor_ResourceProfile::TestExecutorSetResourceProfile [GOOD] >> TFlatTableExecutor_ResourceProfile::TestExecutorRequestTxData [GOOD] >> TFlatTableExecutor_ResourceProfile::TestExecutorReuseStaticMemory >> TExportToS3Tests::ShouldOmitNonStrictStorageSettings [GOOD] >> IncrementalRestoreScan::ChangeSenderSimple [GOOD] >> TFlatTableExecutor_ResourceProfile::TestExecutorReuseStaticMemory [GOOD] >> TFlatTableExecutor_ResourceProfile::TestExecutorRequestPages >> TFlatTableExecutor_ResourceProfile::TestExecutorRequestPages [GOOD] >> TFlatTableExecutor_ResourceProfile::TestExecutorPageLimitExceeded [GOOD] >> TFlatTableExecutor_ResourceProfile::TestExecutorRequestMemory >> TExportToS3Tests::CancelUponCopyingTablesShouldSucceed >> TExportToS3Tests::ShouldPreserveIncrBackupFlag >> TFlatTableExecutor_ResourceProfile::TestExecutorRequestMemory [GOOD] >> TFlatTableExecutor_ResourceProfile::TestExecutorRequestMemoryFollower >> TOosLogicTests::RenderHtml [GOOD] >> TVPatchTests::FindingPartsWhenError >> TExportToS3Tests::ExportPartitioningSettings [GOOD] >> TVPatchTests::FindingPartsWhenError [GOOD] >> TFlatTableExecutor_ResourceProfile::TestExecutorRequestMemoryFollower [GOOD] >> TFlatTableExecutor_ResourceProfile::TestExecutorMemoryLimitExceeded [GOOD] >> IncrementalRestoreScan::ChangeSenderEmpty [GOOD] >> TExportToS3Tests::ExportIndexTablePartitioningSettings >> TFlatTableExecutor_ResourceProfile::TestExecutorPreserveTxData [GOOD] >> TExportToS3Tests::UidAsIdempotencyKey [GOOD] >> TExportToS3Tests::ShouldCheckQuotasExportsLimited [GOOD] >> TExecutorDb::CoordinatorSimulation [GOOD] >> TExecutorDb::RandomCoordinatorSimulation >> TExportToS3Tests::DropCopiesBeforeTransferring1 [GOOD] >> TExportToS3Tests::UserSID ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_trace/unittest >> TDataShardTrace::TestTraceWriteImmediateOnShard [GOOD] Test command err: 2025-06-25T14:34:15.241896Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:34:15.242055Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:34:15.242110Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000b80/r3tmp/tmpNGSHlW/pdisk_1.dat 2025-06-25T14:34:15.849069Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T14:34:15.852540Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:34:15.911518Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:34:15.926912Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750862051526670 != 1750862051526674 2025-06-25T14:34:15.983041Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:34:15.983177Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:34:15.997501Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:34:16.095432Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:34:16.645187Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_incremental_restore_scan/unittest >> IncrementalRestoreScan::ChangeSenderSimple [GOOD] Test command err: 2025-06-25T14:34:17.433212Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:34:17.433364Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:34:17.433415Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0018e5/r3tmp/tmprop931/pdisk_1.dat 2025-06-25T14:34:17.744995Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T14:34:17.861041Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "Root" StoragePools { Name: "/Root:test" Kind: "test" } } } TxId: 1 TabletId: 72057594046644480 , at schemeshard: 72057594046644480 2025-06-25T14:34:17.861299Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //Root, opId: 1:0, at schemeshard: 72057594046644480 2025-06-25T14:34:17.861523Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 0 2025-06-25T14:34:17.861567Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046644480, LocalPathId: 1] source path: 2025-06-25T14:34:17.861752Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-25T14:34:17.861822Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:34:17.862564Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046644480 PathId: 1, at schemeshard: 72057594046644480 2025-06-25T14:34:17.862785Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //Root 2025-06-25T14:34:17.863026Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046644480 2025-06-25T14:34:17.863105Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046644480 2025-06-25T14:34:17.863147Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:34:17.863178Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:34:17.863731Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046644480 2025-06-25T14:34:17.863777Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046644480 2025-06-25T14:34:17.863817Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:34:17.864349Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046644480 2025-06-25T14:34:17.864409Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046644480 2025-06-25T14:34:17.864463Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046644480 2025-06-25T14:34:17.864541Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:34:17.867793Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046644480 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:34:17.868339Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046644480 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:34:17.868533Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 2025-06-25T14:34:17.869578Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 1, at schemeshard: 72057594046644480 2025-06-25T14:34:17.869621Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 1, ready parts: 0/1, is published: true 2025-06-25T14:34:17.869669Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 1, at schemeshard: 72057594046644480 2025-06-25T14:34:17.902958Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7732: Got new config: QueryServiceConfig { AvailableExternalDataSources: "ObjectStorage" } 2025-06-25T14:34:17.903040Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:34:17.903769Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# ObjectStorage 2025-06-25T14:34:17.903829Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:34:17.911165Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750862053906255 != 1750862053906259 2025-06-25T14:34:17.961160Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:61:2108] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-25T14:34:17.962133Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 2025-06-25T14:34:17.962689Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:34:17.962810Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:34:17.977480Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:34:18.065826Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 500, transactions count in step: 1, at schemeshard: 72057594046644480 2025-06-25T14:34:18.066007Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 AckTo { RawX1: 0 RawX2: 0 } } Step: 500 MediatorID: 72057594046382081 TabletID: 72057594046644480, at schemeshard: 72057594046644480 2025-06-25T14:34:18.066053Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046644480 2025-06-25T14:34:18.066298Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:34:18.066347Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046644480 2025-06-25T14:34:18.066513Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 1 2025-06-25T14:34:18.066573Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046644480, LocalPathId: 1], at schemeshard: 72057594046644480 2025-06-25T14:34:18.067355Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046644480 2025-06-25T14:34:18.067417Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046644480, txId: 1, path id: [OwnerId: 72057594046644480, LocalPathId: 1] 2025-06-25T14:34:18.067590Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046644480 2025-06-25T14:34:18.067642Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:534:2462], at schemeshard: 72057594046644480, txId: 1, path id: 1 2025-06-25T14:34:18.067755Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046644480 2025-06-25T14:34:18.067793Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046644480] TDone opId# 1:0 ProgressState 2025-06-25T14:34:18.067877Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1:0 progress is 1/1 2025-06-25T14:34:18.067911Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-25T14:34:18.067946Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1:0 progress is 1/1 2025-06-25T14:34:18.067974Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-25T14:34:18.068004Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 1, ready parts: 1/1, is published: false 2025-06-25T14:34:18.068038Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-25T14:34:18.068088Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1:0 2025-06-25T14:34:18.068127Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 1:0 2025-06-25T14:34:18.068193Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 2 2025-06-25T14:34:18.068226Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publicatio ... :0 2025-06-25T14:34:18.913553Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 3 2025-06-25T14:34:18.913808Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715658 datashard 72075186224037889 state Ready 2025-06-25T14:34:18.913868Z node 1 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037889 Got TEvSchemaChangedResult from SS at 72075186224037889 2025-06-25T14:34:18.914459Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:61:2108] Handle TEvNavigate describe path /Root/IncrBackupTable 2025-06-25T14:34:18.951340Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:770:2628] HANDLE EvNavigateScheme /Root/IncrBackupTable 2025-06-25T14:34:18.955187Z node 1 :TX_PROXY DEBUG: describe.cpp:356: Actor# [1:770:2628] HANDLE EvNavigateKeySetResult TDescribeReq marker# P5 ErrorCount# 0 2025-06-25T14:34:18.955321Z node 1 :TX_PROXY DEBUG: describe.cpp:435: Actor# [1:770:2628] SEND to# 72057594046644480 shardToRequest NKikimrSchemeOp.TDescribePath Path: "/Root/IncrBackupTable" Options { ShowPrivateTable: true } 2025-06-25T14:34:18.956253Z node 1 :TX_PROXY DEBUG: describe.cpp:448: Actor# [1:770:2628] Handle TEvDescribeSchemeResult Forward to# [1:554:2480] Cookie: 0 TEvDescribeSchemeResult: NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 20 Record# Status: StatusSuccess Path: "/Root/IncrBackupTable" PathDescription { Self { Name: "IncrBackupTable" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1500 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "IncrBackupTable" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "__ydb_incrBackupImpl_deleted" Type: "Bool" TypeId: 6 Id: 3 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 1 } ColumnFamilies { Id: 0 Name: "default" } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046644480 2025-06-25T14:34:18.958450Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [1:780:2632], serverId# [1:781:2633], sessionId# [0:0:0] 2025-06-25T14:34:18.959168Z node 1 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.h:65: [IncrRestoreChangeSenderMain][[OwnerId: 72057594046644480, LocalPathId: 2]][1:782:2634] HandleUserTable TEvTxProxySchemeCache::TEvNavigateKeySetResult: result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/IncrBackupTable TableId: [72057594046644480:3:1] RequestType: ByTableId Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Ok Kind: KindTable DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-25T14:34:18.959375Z node 1 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.h:131: [IncrRestoreChangeSenderMain][[OwnerId: 72057594046644480, LocalPathId: 2]][1:782:2634] HandleTargetTable TEvTxProxySchemeCache::TEvNavigateKeySetResult: result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Table TableId: [72057594046644480:2:1] RequestType: ByTableId Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Ok Kind: KindTable DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-25T14:34:18.959665Z node 1 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.h:227: [IncrRestoreChangeSenderMain][[OwnerId: 72057594046644480, LocalPathId: 2]][1:782:2634] HandleKeys TEvTxProxySchemeCache::TEvResolveKeySetResult: result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 ResultSet [{ TableId: [OwnerId: 72057594046644480, LocalPathId: 2] Access: 0 SyncVersion: false Status: OkData Kind: KindRegularTable PartitionsCount: 1 DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } From: (Uint32 : NULL) IncFrom: 1 To: () IncTo: 0 }] } 2025-06-25T14:34:18.959850Z node 1 :CHANGE_EXCHANGE DEBUG: change_sender_incr_restore.cpp:139: [IncrRestoreChangeSenderMain][[OwnerId: 72057594046644480, LocalPathId: 2]][1:782:2634] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvEnqueueRecords { Records [{ Order: 0 PathId: [OwnerId: 72057594046644480, LocalPathId: 2] BodySize: 18 }] } 2025-06-25T14:34:18.959944Z node 1 :CHANGE_EXCHANGE DEBUG: change_sender_incr_restore.cpp:144: [IncrRestoreChangeSenderMain][[OwnerId: 72057594046644480, LocalPathId: 2]][1:782:2634] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 0 Group: 0 Step: 0 TxId: 0 PathId: [OwnerId: 72057594046644480, LocalPathId: 2] Kind: IncrementalRestore Source: InitialScan Body: 18b TableId: [OwnerId: 72057594046644480, LocalPathId: 3] SchemaVersion: 0 LockId: 0 LockOffset: 0 }] } 2025-06-25T14:34:18.960117Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:392: actor# [1:61:2108] Handle TEvGetProxyServicesRequest 2025-06-25T14:34:18.960173Z node 1 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:40: [TableChangeSenderShard][0:0][72075186224037888][1:786:2634] Handle NKikimr::TEvTxUserProxy::TEvGetProxyServicesResponse 2025-06-25T14:34:18.967867Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:787:2638], serverId# [1:788:2639], sessionId# [0:0:0] 2025-06-25T14:34:19.021877Z node 1 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:78: [TableChangeSenderShard][0:0][72075186224037888][1:786:2634] Handshake NKikimrChangeExchange.TEvStatus Status: STATUS_OK LastRecordOrder: 0 2025-06-25T14:34:19.022339Z node 1 :CHANGE_EXCHANGE DEBUG: change_sender_incr_restore.cpp:154: [IncrRestoreChangeSenderMain][[OwnerId: 72057594046644480, LocalPathId: 2]][1:782:2634] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186224037888 } 2025-06-25T14:34:19.022505Z node 1 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:123: [TableChangeSenderShard][0:0][72075186224037888][1:786:2634] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 0 Group: 0 Step: 0 TxId: 0 PathId: [OwnerId: 72057594046644480, LocalPathId: 2] Kind: IncrementalRestore Source: InitialScan Body: 18b TableId: [OwnerId: 72057594046644480, LocalPathId: 3] SchemaVersion: 0 LockId: 0 LockOffset: 0 }] } 2025-06-25T14:34:19.022597Z node 1 :CHANGE_EXCHANGE DEBUG: change_sender_incr_restore.cpp:154: [IncrRestoreChangeSenderMain][[OwnerId: 72057594046644480, LocalPathId: 2]][1:782:2634] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186224037888 } 2025-06-25T14:34:19.022749Z node 1 :CHANGE_EXCHANGE DEBUG: change_sender_incr_restore.cpp:176: [IncrRestoreChangeSenderMain][[OwnerId: 72057594046644480, LocalPathId: 2]][1:782:2634] Handle NKikimr::NDataShard::TEvIncrementalRestoreScan::TEvNoMoreData ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/skeleton/ut/unittest >> TVPatchTests::FindingPartsWhenError [GOOD] Test command err: Recv 65537 2025-06-25T14:34:20.030296Z node 1 :BS_VDISK_PATCH INFO: {BSVSP03@skeleton_vpatch_actor.cpp:190} [0:1:0:0:0] TEvVPatch: bootstrapped; OriginalBlobId# [1:2:3:4:6:10:0] Deadline# 1970-01-01T00:00:01.000000Z Send NKikimr::TEvBlobStorage::TEvVGet Recv NKikimr::TEvBlobStorage::TEvVGetResult 2025-06-25T14:34:20.031179Z node 1 :BS_VDISK_PATCH INFO: {BSVSP06@skeleton_vpatch_actor.cpp:266} [0:1:0:0:0] TEvVPatch: received parts index; OriginalBlobId# [1:2:3:4:6:10:0] Status# ERROR ResultSize# 1 2025-06-25T14:34:20.031260Z node 1 :BS_VDISK_PATCH INFO: {BSVSP04@skeleton_vpatch_actor.cpp:226} [0:1:0:0:0] TEvVPatch: sended found parts; OriginalBlobId# [1:2:3:4:6:10:0] FoundParts# [] Status# ERROR Send NKikimr::TEvBlobStorage::TEvVPatchFoundParts 2025-06-25T14:34:20.031350Z node 1 :BS_VDISK_PATCH DEBUG: {BSVSP17@skeleton_vpatch_actor.cpp:727} [0:1:0:0:0] NotifySkeletonAboutDying; Send NKikimr::TEvVPatchDyingRequest Recv NKikimr::TEvVPatchDyingConfirm >> TExportToS3Tests::ShouldCheckQuotasChildrenLimited >> TExportToS3Tests::ShouldSucceedOnConcurrentExport [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-8 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-9 >> TExportToS3Tests::DropCopiesBeforeTransferring2 >> TExportToS3Tests::RebootDuringCompletion [GOOD] >> TExportToS3Tests::CheckItemProgress [GOOD] >> TExportToS3Tests::ShouldSucceedOnSingleShardTable [GOOD] >> IncrementalRestoreScan::Empty [GOOD] >> TExportToS3Tests::ShouldPreserveIncrBackupFlag [GOOD] >> TExportToS3Tests::ShouldSucceedOnConcurrentImport >> TDataShardTrace::TestTraceDistributedSelectViaReadActors >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-CreateUser-3 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-CreateUser-4 >> TExportToS3Tests::CompletedExportEndTime >> TExportToS3Tests::RebootDuringAbortion |79.8%| [TA] $(B)/ydb/core/blobstorage/vdisk/skeleton/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TExportToS3Tests::ShouldExcludeBackupTableFromStats >> TExportToS3Tests::ShouldSucceedOnMultiShardTable |79.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/replication/service/ut_worker/ydb-core-tx-replication-service-ut_worker |79.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/replication/service/ut_worker/ydb-core-tx-replication-service-ut_worker |79.8%| [TA] {RESULT} $(B)/ydb/core/blobstorage/vdisk/skeleton/ut/test-results/unittest/{meta.json ... results_accumulator.log} |79.8%| [LD] {RESULT} $(B)/ydb/core/tx/replication/service/ut_worker/ydb-core-tx-replication-service-ut_worker >> TExportToS3Tests::UserSID [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_incremental_restore_scan/unittest >> IncrementalRestoreScan::ChangeSenderEmpty [GOOD] Test command err: 2025-06-25T14:34:17.676201Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:34:17.676637Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:34:17.676692Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001904/r3tmp/tmpNBOXBM/pdisk_1.dat 2025-06-25T14:34:18.124274Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T14:34:18.322155Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "Root" StoragePools { Name: "/Root:test" Kind: "test" } } } TxId: 1 TabletId: 72057594046644480 , at schemeshard: 72057594046644480 2025-06-25T14:34:18.322384Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //Root, opId: 1:0, at schemeshard: 72057594046644480 2025-06-25T14:34:18.322604Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 0 2025-06-25T14:34:18.322645Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046644480, LocalPathId: 1] source path: 2025-06-25T14:34:18.322859Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-25T14:34:18.322932Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:34:18.323667Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046644480 PathId: 1, at schemeshard: 72057594046644480 2025-06-25T14:34:18.323860Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //Root 2025-06-25T14:34:18.324078Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046644480 2025-06-25T14:34:18.324146Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046644480 2025-06-25T14:34:18.324202Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:34:18.324234Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:34:18.324847Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046644480 2025-06-25T14:34:18.324899Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046644480 2025-06-25T14:34:18.324936Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:34:18.325448Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046644480 2025-06-25T14:34:18.325496Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046644480 2025-06-25T14:34:18.325542Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046644480 2025-06-25T14:34:18.325604Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:34:18.329348Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046644480 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:34:18.329884Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046644480 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:34:18.330061Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 2025-06-25T14:34:18.331109Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 1, at schemeshard: 72057594046644480 2025-06-25T14:34:18.331157Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 1, ready parts: 0/1, is published: true 2025-06-25T14:34:18.331194Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 1, at schemeshard: 72057594046644480 2025-06-25T14:34:18.357215Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7732: Got new config: QueryServiceConfig { AvailableExternalDataSources: "ObjectStorage" } 2025-06-25T14:34:18.357311Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:34:18.357987Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# ObjectStorage 2025-06-25T14:34:18.358044Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:34:18.378352Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750862053635793 != 1750862053635797 2025-06-25T14:34:18.428518Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:61:2108] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-25T14:34:18.429502Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 2025-06-25T14:34:18.430011Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:34:18.430132Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:34:18.441818Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:34:18.534107Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 500, transactions count in step: 1, at schemeshard: 72057594046644480 2025-06-25T14:34:18.534298Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 AckTo { RawX1: 0 RawX2: 0 } } Step: 500 MediatorID: 72057594046382081 TabletID: 72057594046644480, at schemeshard: 72057594046644480 2025-06-25T14:34:18.534345Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046644480 2025-06-25T14:34:18.534627Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:34:18.534683Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046644480 2025-06-25T14:34:18.534874Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 1 2025-06-25T14:34:18.534954Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046644480, LocalPathId: 1], at schemeshard: 72057594046644480 2025-06-25T14:34:18.535723Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046644480 2025-06-25T14:34:18.535787Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046644480, txId: 1, path id: [OwnerId: 72057594046644480, LocalPathId: 1] 2025-06-25T14:34:18.535954Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046644480 2025-06-25T14:34:18.536005Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:534:2462], at schemeshard: 72057594046644480, txId: 1, path id: 1 2025-06-25T14:34:18.536111Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046644480 2025-06-25T14:34:18.536150Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046644480] TDone opId# 1:0 ProgressState 2025-06-25T14:34:18.536246Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1:0 progress is 1/1 2025-06-25T14:34:18.536276Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-25T14:34:18.536648Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1:0 progress is 1/1 2025-06-25T14:34:18.536692Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-25T14:34:18.536729Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 1, ready parts: 1/1, is published: false 2025-06-25T14:34:18.536769Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-25T14:34:18.536821Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1:0 2025-06-25T14:34:18.536883Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 1:0 2025-06-25T14:34:18.536958Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 2 2025-06-25T14:34:18.536993Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publicatio ... r: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 1 } ColumnFamilies { Id: 0 Name: "default" } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046644480 2025-06-25T14:34:19.471933Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:61:2108] Handle TEvNavigate describe path /Root/IncrBackupTable 2025-06-25T14:34:19.503034Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:782:2634] HANDLE EvNavigateScheme /Root/IncrBackupTable 2025-06-25T14:34:19.503655Z node 1 :TX_PROXY DEBUG: describe.cpp:356: Actor# [1:782:2634] HANDLE EvNavigateKeySetResult TDescribeReq marker# P5 ErrorCount# 0 2025-06-25T14:34:19.503776Z node 1 :TX_PROXY DEBUG: describe.cpp:435: Actor# [1:782:2634] SEND to# 72057594046644480 shardToRequest NKikimrSchemeOp.TDescribePath Path: "/Root/IncrBackupTable" 2025-06-25T14:34:19.504985Z node 1 :TX_PROXY DEBUG: describe.cpp:448: Actor# [1:782:2634] Handle TEvDescribeSchemeResult Forward to# [1:554:2480] Cookie: 0 TEvDescribeSchemeResult: NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 20 Record# Status: StatusSuccess Path: "/Root/IncrBackupTable" PathDescription { Self { Name: "IncrBackupTable" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1500 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "IncrBackupTable" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "__ydb_incrBackupImpl_deleted" Type: "Bool" TypeId: 6 Id: 3 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 1 } ColumnFamilies { Id: 0 Name: "default" } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046644480 2025-06-25T14:34:19.509470Z node 1 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.h:65: [IncrRestoreChangeSenderMain][[OwnerId: 72057594046644480, LocalPathId: 2]][1:784:2636] HandleUserTable TEvTxProxySchemeCache::TEvNavigateKeySetResult: result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/IncrBackupTable TableId: [72057594046644480:3:1] RequestType: ByTableId Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Ok Kind: KindTable DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-25T14:34:19.509750Z node 1 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.h:131: [IncrRestoreChangeSenderMain][[OwnerId: 72057594046644480, LocalPathId: 2]][1:784:2636] HandleTargetTable TEvTxProxySchemeCache::TEvNavigateKeySetResult: result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Table TableId: [72057594046644480:2:1] RequestType: ByTableId Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Ok Kind: KindTable DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-25T14:34:19.510061Z node 1 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.h:227: [IncrRestoreChangeSenderMain][[OwnerId: 72057594046644480, LocalPathId: 2]][1:784:2636] HandleKeys TEvTxProxySchemeCache::TEvResolveKeySetResult: result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 ResultSet [{ TableId: [OwnerId: 72057594046644480, LocalPathId: 2] Access: 0 SyncVersion: false Status: OkData Kind: KindRegularTable PartitionsCount: 1 DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } From: (Uint32 : NULL) IncFrom: 1 To: () IncTo: 0 }] } 2025-06-25T14:34:19.510215Z node 1 :CHANGE_EXCHANGE DEBUG: change_sender_incr_restore.cpp:176: [IncrRestoreChangeSenderMain][[OwnerId: 72057594046644480, LocalPathId: 2]][1:784:2636] Handle NKikimr::NDataShard::TEvIncrementalRestoreScan::TEvNoMoreData >> TExportToS3Tests::ExportIndexTablePartitioningSettings [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-ModifyUser-28 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-ModifyUser-29 >> TExportToS3Tests::EnableChecksumsPersistance >> TExportToS3Tests::Topics ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tablet_flat/ut/unittest >> TFlatTableExecutor_ResourceProfile::TestExecutorPreserveTxData [GOOD] Test command err: 00000.000 II| FAKE_ENV: Born at 2025-06-25T14:33:21.491927Z 00000.010 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.011 II| FAKE_ENV: Starting storage for BS group 0 00000.011 II| FAKE_ENV: Starting storage for BS group 1 00000.011 II| FAKE_ENV: Starting storage for BS group 2 00000.011 II| FAKE_ENV: Starting storage for BS group 3 00000.075 C1| TABLET_EXECUTOR: Tablet 1 unhandled exception std::runtime_error: test ??+0 (0x11A98F91) __cxa_throw+221 (0x11A98DBD) NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Exceptions::TTxExecuteThrowException::Execute(NKikimr::NTabletFlatExecutor::TTransactionContext&, NActors::TActorContext const&)+62 (0x10D839FE) NKikimr::NTabletFlatExecutor::TExecutor::ExecuteTransaction(NKikimr::NTabletFlatExecutor::TSeat*)+3349 (0x17DB6105) NKikimr::NTabletFlatExecutor::TExecutor::StateWork(TAutoPtr&)+504 (0x17D7A4D8) NActors::IActor::Receive(TAutoPtr&)+237 (0x13299C9D) 00000.076 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 2 actors 00000.076 NN| TABLET_SAUSAGECACHE: Poison cache serviced 0 reqs hit {0 0b} miss {0 0b} 00000.076 II| FAKE_ENV: Shut order, stopping 4 BS groups 00000.076 II| FAKE_ENV: DS.0 gone, left {42b, 1}, put {62b, 2} 00000.076 II| FAKE_ENV: DS.1 gone, left {35b, 1}, put {35b, 1} 00000.076 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {0b, 0} 00000.076 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {0b, 0} 00000.076 II| FAKE_ENV: All BS storage groups are stopped 00000.076 II| FAKE_ENV: Model stopped, hosted 3 actors, spent 0.000s 00000.076 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 1 Error 0 Left 15}, stopped 00000.000 II| FAKE_ENV: Born at 2025-06-25T14:33:21.574364Z 00000.008 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.008 II| FAKE_ENV: Starting storage for BS group 0 00000.008 II| FAKE_ENV: Starting storage for BS group 1 00000.009 II| FAKE_ENV: Starting storage for BS group 2 00000.009 II| FAKE_ENV: Starting storage for BS group 3 00000.013 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 2 actors 00000.013 NN| TABLET_SAUSAGECACHE: Poison cache serviced 0 reqs hit {0 0b} miss {0 0b} 00000.013 II| FAKE_ENV: Shut order, stopping 4 BS groups 00000.013 II| FAKE_ENV: DS.0 gone, left {111b, 2}, put {131b, 3} 00000.013 II| FAKE_ENV: DS.1 gone, left {42b, 2}, put {42b, 2} 00000.013 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {0b, 0} 00000.013 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {0b, 0} 00000.013 II| FAKE_ENV: All BS storage groups are stopped 00000.013 II| FAKE_ENV: Model stopped, hosted 3 actors, spent 0.000s 00000.013 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 0 Error 0 Left 15}, stopped 00000.000 II| FAKE_ENV: Born at 2025-06-25T14:33:21.592580Z 00000.007 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.008 II| FAKE_ENV: Starting storage for BS group 0 00000.008 II| FAKE_ENV: Starting storage for BS group 1 00000.008 II| FAKE_ENV: Starting storage for BS group 2 00000.008 II| FAKE_ENV: Starting storage for BS group 3 00000.017 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 4 actors 00000.018 NN| TABLET_SAUSAGECACHE: Poison cache serviced 0 reqs hit {0 0b} miss {0 0b} 00000.018 II| FAKE_ENV: Shut order, stopping 4 BS groups 00000.018 II| FAKE_ENV: DS.0 gone, left {561b, 14}, put {623b, 16} 00000.018 II| FAKE_ENV: DS.1 gone, left {693b, 8}, put {693b, 8} 00000.019 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {0b, 0} 00000.019 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {0b, 0} 00000.019 II| FAKE_ENV: All BS storage groups are stopped 00000.019 II| FAKE_ENV: Model stopped, hosted 4 actors, spent 0.000s 00000.019 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 0 Error 0 Left 15}, stopped 00000.000 II| FAKE_ENV: Born at 2025-06-25T14:33:21.616760Z 00000.008 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.008 II| FAKE_ENV: Starting storage for BS group 0 00000.009 II| FAKE_ENV: Starting storage for BS group 1 00000.009 II| FAKE_ENV: Starting storage for BS group 2 00000.009 II| FAKE_ENV: Starting storage for BS group 3 00000.019 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 4 actors 00000.020 NN| TABLET_SAUSAGECACHE: Poison cache serviced 0 reqs hit {0 0b} miss {0 0b} 00000.020 II| FAKE_ENV: Shut order, stopping 4 BS groups 00000.020 II| FAKE_ENV: DS.0 gone, left {141b, 4}, put {669b, 13} 00000.020 II| FAKE_ENV: DS.1 gone, left {868b, 8}, put {987b, 10} 00000.020 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {0b, 0} 00000.020 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {0b, 0} 00000.021 II| FAKE_ENV: All BS storage groups are stopped 00000.021 II| FAKE_ENV: Model stopped, hosted 5 actors, spent 0.000s 00000.021 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 0 Error 0 Left 15}, stopped 00000.000 II| FAKE_ENV: Born at 2025-06-25T14:33:21.642266Z 00000.006 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.007 II| FAKE_ENV: Starting storage for BS group 0 00000.007 II| FAKE_ENV: Starting storage for BS group 1 00000.007 II| FAKE_ENV: Starting storage for BS group 2 00000.007 II| FAKE_ENV: Starting storage for BS group 3 00000.008 II| TABLET_EXECUTOR: Leader{1:2:0} activating executor 00000.009 II| TABLET_EXECUTOR: LSnap{1:2, on 2:1, 35b, wait} done, Waste{2:0, 0b +(0, 0b), 0 trc} 00000.009 DD| TABLET_EXECUTOR: Leader{1:2:2} commited cookie 2 for step 1 ... initializing schema 00000.010 DD| TABLET_EXECUTOR: Leader{1:2:2} Tx{1, NKikimr::NTabletFlatExecutor::TRowsModel::TTxSchema} queued, type NKikimr::NTabletFlatExecutor::TRowsModel::TTxSchema 00000.010 DD| TABLET_EXECUTOR: Leader{1:2:2} Tx{1, NKikimr::NTabletFlatExecutor::TRowsModel::TTxSchema} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.010 DD| TABLET_EXECUTOR: Leader{1:2:2} Tx{1, NKikimr::NTabletFlatExecutor::TRowsModel::TTxSchema} hope 1 -> done Change{2, redo 0b alter 209b annex 0, ~{ } -{ }, 0 gb} 00000.010 DD| TABLET_EXECUTOR: Leader{1:2:2} Tx{1, NKikimr::NTabletFlatExecutor::TRowsModel::TTxSchema} release 4194304b of static, Memory{0 dyn 0} 00000.010 DD| TABLET_EXECUTOR: Leader{1:2:3} commited cookie 1 for step 2 ... inserting rows 00000.011 DD| TABLET_EXECUTOR: Leader{1:2:3} Tx{2, NKikimr::NTabletFlatExecutor::TRowsModel::TTxAddRows} queued, type NKikimr::NTabletFlatExecutor::TRowsModel::TTxAddRows 00000.011 DD| TABLET_EXECUTOR: Leader{1:2:3} Tx{2, NKikimr::NTabletFlatExecutor::TRowsModel::TTxAddRows} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.011 DD| TABLET_EXECUTOR: Leader{1:2:3} Tx{2, NKikimr::NTabletFlatExecutor::TRowsModel::TTxAddRows} hope 1 -> done Change{2, redo 512b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.011 DD| TABLET_EXECUTOR: Leader{1:2:3} Tx{2, NKikimr::NTabletFlatExecutor::TRowsModel::TTxAddRows} release 4194304b of static, Memory{0 dyn 0} 00000.011 DD| TABLET_EXECUTOR: Leader{1:2:4} commited cookie 1 for step 3 ... starting follower ... waiting for follower attach ... blocking NKikimr::TEvTablet::TEvNewFollowerAttached from TABLET_ACTOR to NKikimr::NTabletFlatExecutor::TTestFlatTablet cookie 0 ... waiting for follower attach (done) ... spamming QueueScan transactions 00000.012 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{3, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan} queued, type NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan 00000.012 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{3, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.012 II| TABLET_EXECUTOR: Leader{1:2:5} starting Scan{2 on 101, TEmptyScan{}} 00000.012 DD| TABLET_EXECUTOR: Leader{1:2:5} Tx{3, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan} hope 1 -> done Change{3, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.012 DD| TABLET_EXECUTOR: Leader{1:2:5} Tx{3, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan} release 4194304b of static, Memory{0 dyn 0} 00000.013 DD| TABLET_EXECUTOR: Leader{1:2:5} commited cookie 8 for step 4 00000.013 DD| TABLET_EXECUTOR: Leader{1:2:5} Tx{4, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan} queued, type NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan 00000.013 DD| TABLET_EXECUTOR: Leader{1:2:5} Tx{4, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.013 II| TABLET_EXECUTOR: Leader{1:2:6} starting Scan{4 on 101, TEmptyScan{}} 00000.013 DD| TABLET_EXECUTOR: Leader{1:2:6} Tx{4, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan} hope 1 -> done Change{3, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.013 DD| TABLET_EXECUTOR: Leader{1:2:6} Tx{4, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan} release 4194304b of static, Memory{0 dyn 0} 00000.013 DD| TABLET_EXECUTOR: Leader{1:2:6} commited cookie 8 for step 5 00000.014 DD| TABLET_EXECUTOR: Leader{1:2:6} Tx{5, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan} queued, type NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan 00000.014 DD| TABLET_EXECUTOR: Leader{1:2:6} Tx{5, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.014 II| TABLET_EXECUTOR: Leader{1:2:7} starting Scan{6 on 101, TEmptyScan{}} 00000.014 DD| TABLET_EXECUTOR: Leader{1:2:7} Tx{5, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan} hope 1 -> done Change{3, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.014 DD| TABLET_EXECUTOR: Leader{1:2:7} Tx{5, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan} release 4194304b of static, Memory{0 dyn 0} 00000.014 DD| TABLET_EXECUTOR: Leader{1:2:7} commited cookie 8 for step 6 00000.014 DD| TABLET_EXECUTOR: Leader{1:2:7} Tx{6, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan} queued, type NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan 00000.014 DD| TABLET_EXECUTOR: Leader{1:2:7} Tx{6, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.014 II| TABLET_EXECUTOR: Leader{1:2:8} starting Scan{8 on 101, TEmptyScan{}} 00000.015 DD| TABLET_EXECUTOR: Leader{1:2:8} Tx{6, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan} hope 1 -> done Change{3, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.015 DD| TABLET_EXECUTOR: Leader{1:2:8} Tx{6, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan} release 4194304b of static, Memory{0 dyn 0} 00000.015 DD| TABLET_EXECUTOR: Leader{1:2:8} commited cookie 8 for step 7 00000.015 DD| TABLET_EXECUTOR: Leader{1:2:8} Tx{7, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan} queued, type NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan 00000.015 DD| TABLET_EXECUTOR: Leader{1:2:8} Tx{7, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.015 II| TABLET_EXECUTOR: Leader{1:2:9} starting Scan{10 on 101, TEmptyScan{}} 00000.015 DD| TABLET_EXECUTOR: Leader{1:2:9} Tx{7, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan} hope 1 -> done Change{3, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.015 DD| TABLET_ ... ange{2, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.061 DD| TABLET_EXECUTOR: Leader{1:2:4} found attached Res{10 20480b} 00000.061 DD| TABLET_EXECUTOR: release 10240b of static tx data due to attached res 10, Memory{0 dyn 20480} 00000.061 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{24, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} touch new 0b, 0b lo load (0b in total), 524267520b requested for data (524288000b in total) 00000.062 EE| TABLET_EXECUTOR: Leader{1:2:4} Tx{24, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} mem 524288000b terminated, limit 314572800b is exceeded 00000.062 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{24, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} release Res{10 20480b}, Memory{0 dyn 0} 00000.062 DD| RESOURCE_BROKER: Update cookie for task Tx{23, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (10 by [56:30:2062]) 00000.062 DD| RESOURCE_BROKER: Finish task Tx{23, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (10 by [56:30:2062]) (release resources {0, 20480}) 00000.062 DD| RESOURCE_BROKER: Updated planned resource usage for queue queue_default from 0.001311 to 0.000000 (remove task Tx{23, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (10 by [56:30:2062])) 00000.062 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{25, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} queued, type NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory 00000.062 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{25, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} took 1024b of static mem, Memory{1024 dyn 0} 00000.062 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{25, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} hope 1 -> retry Change{2, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.062 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{25, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} touch new 0b, 0b lo load (0b in total), 19456b requested for data (20480b in total) 00000.062 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{25, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} release 1024b of static, Memory{0 dyn 0} 00000.062 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{25, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} release tx data 00000.062 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{25, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} request Res{11 20480b} type small_transaction 00000.063 DD| RESOURCE_BROKER: Submitted new unknown task Tx{25, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (11 by [56:30:2062]) priority=5 resources={0, 20480} 00000.063 EE| RESOURCE_BROKER: Assigning waiting task 'Tx{25, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (11 by [56:30:2062])' of unknown type 'small_transaction' to default queue 00000.063 DD| RESOURCE_BROKER: Allocate resources {0, 20480} for task Tx{25, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (11 by [56:30:2062]) from queue queue_default 00000.063 EE| RESOURCE_BROKER: Assigning in-fly task 'Tx{25, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (11 by [56:30:2062])' of unknown type 'small_transaction' to default queue 00000.063 DD| RESOURCE_BROKER: Updated planned resource usage for queue queue_default from 0.000000 to 0.001192 (insert task Tx{25, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (11 by [56:30:2062])) 00000.063 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{25, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} acquired dyn mem Res{11 20480b}, Memory{0 dyn 20480} 00000.063 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{25, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} hope 2 -> done Change{2, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.063 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{25, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} update resource task 11 releasing 0b, Memory{0 dyn 20480} 00000.063 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{25, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} captured Res{11 20480b} 00000.063 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{26, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} queued, type NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory 00000.063 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{26, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} took 1024b of static mem, Memory{1024 dyn 20480} 00000.063 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{26, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} hope 1 -> retry Change{2, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.064 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{26, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} touch new 0b, 0b lo load (0b in total), 19456b requested for data (20480b in total) 00000.064 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{26, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} release 1024b of static, Memory{0 dyn 20480} 00000.064 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{26, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} release tx data 00000.064 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{26, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} request Res{12 20480b} type small_transaction 00000.064 DD| RESOURCE_BROKER: Update task Tx{25, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (11 by [56:30:2062]) (priority=5 type=small_transaction resources={0, 20480} resubmit=0) 00000.064 EE| RESOURCE_BROKER: Assigning in-fly task 'Tx{25, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (11 by [56:30:2062])' of unknown type 'small_transaction' to default queue 00000.064 DD| RESOURCE_BROKER: Updated planned resource usage for queue queue_default from 0.000000 to 0.001192 (insert task Tx{25, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (11 by [56:30:2062])) 00000.064 DD| RESOURCE_BROKER: Submitted new unknown task Tx{26, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (12 by [56:30:2062]) priority=5 resources={0, 20480} 00000.064 EE| RESOURCE_BROKER: Assigning waiting task 'Tx{26, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (12 by [56:30:2062])' of unknown type 'small_transaction' to default queue 00000.064 DD| RESOURCE_BROKER: Allocate resources {0, 20480} for task Tx{26, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (12 by [56:30:2062]) from queue queue_default 00000.064 EE| RESOURCE_BROKER: Assigning in-fly task 'Tx{26, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (12 by [56:30:2062])' of unknown type 'small_transaction' to default queue 00000.064 DD| RESOURCE_BROKER: Updated planned resource usage for queue queue_default from 0.001192 to 0.002384 (insert task Tx{26, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (12 by [56:30:2062])) 00000.064 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{26, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} acquired dyn mem Res{12 20480b}, Memory{0 dyn 40960} 00000.064 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{26, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} hope 2 -> retry Change{2, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.064 DD| TABLET_EXECUTOR: Leader{1:2:4} found attached Res{11 20480b} 00000.064 DD| TABLET_EXECUTOR: Leader{1:2:4} moving tx data from attached Res{11 20480b} to Res{12 ...} 00000.064 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{26, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} touch new 0b, 0b lo load (0b in total), 524267520b requested for data (524288000b in total) 00000.064 EE| TABLET_EXECUTOR: Leader{1:2:4} Tx{26, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} mem 524288000b terminated, limit 314572800b is exceeded 00000.064 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{26, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} release Res{12 40960b}, Memory{0 dyn 0} 00000.064 DD| RESOURCE_BROKER: Update task Tx{26, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (12 by [56:30:2062]) (priority=5 type=small_transaction resources={0, 40960} resubmit=0) 00000.064 EE| RESOURCE_BROKER: Assigning in-fly task 'Tx{26, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (12 by [56:30:2062])' of unknown type 'small_transaction' to default queue 00000.064 DD| RESOURCE_BROKER: Updated planned resource usage for queue queue_default from 0.001192 to 0.003576 (insert task Tx{26, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (12 by [56:30:2062])) 00000.064 DD| RESOURCE_BROKER: Finish task Tx{25, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (11 by [56:30:2062]) (release resources {0, 20480}) 00000.064 DD| RESOURCE_BROKER: Updated planned resource usage for queue queue_default from 0.003576 to 0.002384 (remove task Tx{25, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (11 by [56:30:2062])) 00000.065 DD| RESOURCE_BROKER: Finish task Tx{26, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (12 by [56:30:2062]) (release resources {0, 40960}) 00000.065 DD| RESOURCE_BROKER: Updated planned resource usage for queue queue_default from 0.002384 to 0.000000 (remove task Tx{26, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (12 by [56:30:2062])) 00000.065 II| TABLET_EXECUTOR: Leader{1:2:4} suiciding, Waste{2:0, 317b +(0, 0b), 3 trc, -0b acc} 00000.066 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 2 actors 00000.066 NN| TABLET_SAUSAGECACHE: Poison cache serviced 0 reqs hit {0 0b} miss {0 0b} 00000.066 II| FAKE_ENV: Shut order, stopping 4 BS groups 00000.066 II| FAKE_ENV: DS.0 gone, left {180b, 3}, put {200b, 4} 00000.066 II| FAKE_ENV: DS.1 gone, left {352b, 3}, put {352b, 3} 00000.066 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {0b, 0} 00000.066 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {0b, 0} 00000.067 II| FAKE_ENV: All BS storage groups are stopped 00000.067 II| FAKE_ENV: Model stopped, hosted 3 actors, spent 0.000s 00000.067 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 0 Error 45 Left 401}, stopped >> TDataShardTrace::TestTraceDistributedUpsert-UseSink >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-28 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-29 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_incremental_restore_scan/unittest >> IncrementalRestoreScan::Empty [GOOD] Test command err: 2025-06-25T14:34:19.902297Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:34:19.902460Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:34:19.902517Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0018e2/r3tmp/tmpFh2FNe/pdisk_1.dat 2025-06-25T14:34:20.292006Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T14:34:20.292980Z node 1 :CHANGE_EXCHANGE DEBUG: incr_restore_scan.cpp:178: [TIncrementalRestoreScan][1337][OwnerId: 1, LocalPathId: 2][OwnerId: 3, LocalPathId: 4][1:558:2483] Exhausted 2025-06-25T14:34:20.293104Z node 1 :CHANGE_EXCHANGE DEBUG: incr_restore_scan.cpp:127: [TIncrementalRestoreScan][1337][OwnerId: 1, LocalPathId: 2][OwnerId: 3, LocalPathId: 4][1:558:2483] Handle TEvIncrementalRestoreScan::TEvFinished NKikimr::NDataShard::TEvIncrementalRestoreScan::TEvFinished 2025-06-25T14:34:20.293163Z node 1 :CHANGE_EXCHANGE DEBUG: incr_restore_scan.cpp:191: [TIncrementalRestoreScan][1337][OwnerId: 1, LocalPathId: 2][OwnerId: 3, LocalPathId: 4][1:558:2483] Finish Done >> KqpSinkLocks::InsertWithBulkUpsert+UseBulkUpsert [GOOD] >> KqpSinkLocks::InsertWithBulkUpsert-UseBulkUpsert >> TExportToS3Tests::DropCopiesBeforeTransferring2 [GOOD] >> TExportToS3Tests::CorruptedDyNumber >> TExportToS3Tests::ShouldCheckQuotasChildrenLimited [GOOD] >> TExportToS3Tests::SchemaMappingEncryption >> TPartBtreeIndexIteration::FewNodes_Groups_History_Slices [GOOD] >> TPartBtreeIndexIteration::FewNodes_Groups_History_Slices_Sticky |79.8%| [TA] $(B)/ydb/core/tx/datashard/ut_incremental_restore_scan/test-results/unittest/{meta.json ... results_accumulator.log} >> TExportToS3Tests::ShouldSucceedOnConcurrentImport [GOOD] >> BuildStatsHistogram::Ten_Mixed_Log [GOOD] >> BuildStatsHistogram::Ten_Serial_Log >> TExportToS3Tests::CompletedExportEndTime [GOOD] >> TExportToS3Tests::RebootDuringAbortion [GOOD] |79.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest >> TExportToS3Tests::ShouldRetryAtFinalStage >> TExportToS3Tests::Topics [GOOD] >> TExportToS3Tests::ShouldSucceedOnMultiShardTable [GOOD] >> TExportToS3Tests::Checksums >> TExportToS3Tests::ExportStartTime >> TDataShardTrace::TestTraceDistributedSelect >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-47 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-48 >> DataShardVolatile::UpsertBrokenLockArbiterRestart+UseSink [GOOD] >> DataShardVolatile::UpsertBrokenLockArbiterRestart-UseSink >> TDataShardTrace::TestTraceDistributedUpsert+UseSink >> TExportToS3Tests::ShouldSucceedOnManyTables >> TExportToS3Tests::CorruptedDyNumber [GOOD] >> TExportToS3Tests::TopicsWithPermissions >> TExportToS3Tests::EnableChecksumsPersistance [GOOD] >> TExportToS3Tests::DisableAutoDropping |79.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest >> TExportToS3Tests::EncryptedExport >> TVectorIndexTests::VectorKmeansTreeImplTable [GOOD] >> TExportToS3Tests::CancelUponTransferringSingleShardTableShouldSucceed [GOOD] >> TExportToS3Tests::TopicsWithPermissions [GOOD] |79.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/backup/impl/ut_table_writer/ydb-core-backup-impl-ut_table_writer |79.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/backup/impl/ut_table_writer/ydb-core-backup-impl-ut_table_writer |79.8%| [TA] {RESULT} $(B)/ydb/core/tx/datashard/ut_incremental_restore_scan/test-results/unittest/{meta.json ... results_accumulator.log} |79.8%| [LD] {RESULT} $(B)/ydb/core/backup/impl/ut_table_writer/ydb-core-backup-impl-ut_table_writer >> TExportToS3Tests::ExportStartTime [GOOD] >> TExportToS3Tests::SchemaMappingEncryption [GOOD] >> TExportToS3Tests::CancelUponTransferringMultiShardTableShouldSucceed >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-CreateUser-5 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-CreateUser-6 >> TExportToS3Tests::CancelUponCopyingTablesShouldSucceed [GOOD] >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnDyNumberMicroSeconds >> TExportToS3Tests::Checksums [GOOD] >> TIterator::MixedReverse [GOOD] >> TIterator::Serial |79.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest >> TVectorIndexTests::VectorKmeansTreeImplTable [GOOD] >> TExportToS3Tests::SchemaMapping >> TExportToS3Tests::AuditCompletedExport >> TExportToS3Tests::SchemaMappingEncryptionIncorrectKey >> TExportToS3Tests::ChecksumsWithCompression >> TExportToS3Tests::DisableAutoDropping [GOOD] >> TExportToS3Tests::ShouldSucceedOnManyTables [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-CreateUser-4 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-CreateUser-5 >> TExportToS3Tests::TablePermissions >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnUint64MilliSeconds >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnPgInt4Seconds >> BasicUsage::ReadMirrored [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_export/unittest >> TExportToS3Tests::TopicsWithPermissions [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:34:18.753041Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:34:18.753143Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:34:18.753183Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:34:18.753216Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:34:18.753255Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:34:18.753280Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:34:18.753332Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:34:18.753408Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:34:18.761919Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:34:18.762330Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:34:18.985082Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:34:18.985133Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:34:19.009980Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:34:19.010173Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:34:19.010334Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:34:19.047248Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:34:19.047529Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:34:19.048195Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:34:19.053070Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:34:19.057977Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:34:19.058186Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:34:19.059409Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:34:19.059468Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:34:19.059626Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:34:19.059676Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:34:19.059726Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:34:19.059803Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:34:19.085240Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:243:2058] recipient: [1:15:2062] 2025-06-25T14:34:19.362613Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:34:19.362893Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:19.363110Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:34:19.363158Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:34:19.363398Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:34:19.363481Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:34:19.369154Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:34:19.369366Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:34:19.369579Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:19.369646Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:34:19.369747Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:34:19.369780Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:34:19.375219Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:19.375294Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:34:19.375342Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:34:19.381241Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:19.381304Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:19.381362Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:34:19.381421Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:34:19.393668Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:34:19.397978Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:34:19.398167Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:34:19.399239Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:34:19.399393Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:34:19.399457Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:34:19.399752Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:34:19.399816Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:34:19.399991Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:34:19.400089Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:34:19.409781Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:34:19.409833Z node 1 :FLAT_TX_SCHEMESHARD ... blish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 281474976710757, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-25T14:34:24.911223Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:34:24.911262Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [4:209:2209], at schemeshard: 72057594046678944, txId: 281474976710757, path id: 1 2025-06-25T14:34:24.911300Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [4:209:2209], at schemeshard: 72057594046678944, txId: 281474976710757, path id: 3 2025-06-25T14:34:24.911663Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 281474976710757:0, at schemeshard: 72057594046678944 2025-06-25T14:34:24.911704Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 281474976710757:0 ProgressState 2025-06-25T14:34:24.911774Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976710757:0 progress is 1/1 2025-06-25T14:34:24.911802Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976710757 ready parts: 1/1 2025-06-25T14:34:24.911834Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976710757:0 progress is 1/1 2025-06-25T14:34:24.911859Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976710757 ready parts: 1/1 2025-06-25T14:34:24.911888Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 281474976710757, ready parts: 1/1, is published: false 2025-06-25T14:34:24.911919Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976710757 ready parts: 1/1 2025-06-25T14:34:24.911948Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976710757:0 2025-06-25T14:34:24.911973Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 281474976710757:0 2025-06-25T14:34:24.912029Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-25T14:34:24.912059Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 281474976710757, publications: 2, subscribers: 1 2025-06-25T14:34:24.912085Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 281474976710757, [OwnerId: 72057594046678944, LocalPathId: 1], 7 2025-06-25T14:34:24.912109Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 281474976710757, [OwnerId: 72057594046678944, LocalPathId: 3], 3 2025-06-25T14:34:24.912813Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 281474976710757 2025-06-25T14:34:24.912909Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 281474976710757 2025-06-25T14:34:24.912939Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 281474976710757 2025-06-25T14:34:24.912970Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 281474976710757, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 7 2025-06-25T14:34:24.913001Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-25T14:34:24.913937Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 3 PathOwnerId: 72057594046678944, cookie: 281474976710757 2025-06-25T14:34:24.914003Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 3 PathOwnerId: 72057594046678944, cookie: 281474976710757 2025-06-25T14:34:24.914029Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 281474976710757 2025-06-25T14:34:24.914056Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 281474976710757, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 3 2025-06-25T14:34:24.914083Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-25T14:34:24.914141Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 281474976710757, subscribers: 1 2025-06-25T14:34:24.914170Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:212: TTxAckPublishToSchemeBoard Notify send TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, to actorId: [4:126:2150] 2025-06-25T14:34:24.929191Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710757 2025-06-25T14:34:24.929306Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710757 2025-06-25T14:34:24.929379Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6830: Handle: TEvNotifyTxCompletionResult: txId# 281474976710757 2025-06-25T14:34:24.929458Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6832: Message: TxId: 281474976710757 2025-06-25T14:34:24.936792Z node 4 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: PathId: 2 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:34:24.938587Z node 4 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:44: Tablet 72057594046678944 describe pathId 2 took 179us result status StatusSuccess 2025-06-25T14:34:24.939038Z node 4 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Topic" PathDescription { Self { Name: "Topic" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 1 } ChildrenExist: false BalancerTabletID: 72075186233409548 } PersQueueGroup { Name: "Topic" PathId: 2 TotalGroupCount: 2 PartitionPerTablet: 1 PQTabletConfig { PartitionConfig { LifetimeSeconds: 10 } YdbDatabasePath: "/MyRoot" } Partitions { PartitionId: 0 TabletId: 72075186233409547 Status: Active } Partitions { PartitionId: 1 TabletId: 72075186233409546 Status: Active } AlterVersion: 1 BalancerTabletID: 72075186233409548 NextPartitionId: 2 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 2 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 TestWaitNotification wait txId: 102 2025-06-25T14:34:24.985101Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-06-25T14:34:24.985172Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 2025-06-25T14:34:24.985662Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:62: NotifyTxCompletion export in-flight, txId: 102, at schemeshard: 72057594046678944 2025-06-25T14:34:24.985705Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 102, at schemeshard: 72057594046678944 REQUEST: PUT /create_topic.pb HTTP/1.1 HEADERS: Host: localhost:16042 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 6734E502-BA7A-40C4-9B7A-4C6680387910 amz-sdk-request: attempt=1 content-length: 468 content-md5: eolrX6cGdcMGCBM8sb+6PQ== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /create_topic.pb / / 468 REQUEST: PUT /permissions.pb HTTP/1.1 HEADERS: Host: localhost:16042 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: B2FE9166-ED8F-4384-9BB6-C5543F97A16A amz-sdk-request: attempt=1 content-length: 43 content-md5: JIqMFsQjXF0c+sG0y+coog== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /permissions.pb / / 43 REQUEST: PUT /metadata.json HTTP/1.1 HEADERS: Host: localhost:16042 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: F2D9437F-E97D-46E2-969C-BCE2FCCF0A89 amz-sdk-request: attempt=1 content-length: 64 content-md5: axcCOQtFAWkgKK80Zy2JrQ== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /metadata.json / / 64 2025-06-25T14:34:25.053414Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-25T14:34:25.053465Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [4:621:2548] TestWaitNotification: OK eventTxId 102 >> TExportToS3Tests::SchemaMappingEncryptionIncorrectKey [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-ModifyUser-29 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-ModifyUser-30 >> TExportToS3Tests::EncryptedExport [GOOD] >> TExportToS3Tests::ChecksumsWithCompression [GOOD] >> TExportToS3Tests::Changefeeds >> TExportToS3Tests::AuditCompletedExport [GOOD] >> TExportToS3Tests::SchemaMapping [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_export/unittest >> TExportToS3Tests::DisableAutoDropping [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:34:17.713239Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:34:17.713331Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:34:17.713376Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:34:17.713410Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:34:17.713448Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:34:17.713476Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:34:17.713536Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:34:17.713600Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:34:17.714468Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:34:17.714818Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:34:17.809188Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:34:17.809248Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:34:17.818121Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:34:17.818304Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:34:17.818476Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:34:17.835975Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:34:17.836253Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:34:17.836934Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:34:17.837132Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:34:17.842104Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:34:17.842319Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:34:17.843410Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:34:17.843485Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:34:17.843646Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:34:17.843694Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:34:17.843738Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:34:17.843820Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:34:17.851040Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:243:2058] recipient: [1:15:2062] 2025-06-25T14:34:18.040706Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:34:18.040919Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:18.041119Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:34:18.041162Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:34:18.041371Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:34:18.041437Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:34:18.051559Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:34:18.051820Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:34:18.052079Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:18.052146Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:34:18.052194Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:34:18.052225Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:34:18.057253Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:18.057315Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:34:18.057357Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:34:18.059248Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:18.059300Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:18.059337Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:34:18.059384Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:34:18.062842Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:34:18.064723Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:34:18.064894Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:34:18.065878Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:34:18.066024Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:34:18.066091Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:34:18.066378Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:34:18.066440Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:34:18.066620Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:34:18.066697Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:34:18.068938Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:34:18.068988Z node 1 :FLAT_TX_SCHEMESHARD ... T14:34:26.038218Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 281474976710761 2025-06-25T14:34:26.038245Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 281474976710761, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 7 2025-06-25T14:34:26.038273Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-25T14:34:26.038330Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 281474976710761, ready parts: 0/1, is published: true 2025-06-25T14:34:26.044655Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:329: TExport::TTxProgress: DoComplete 2025-06-25T14:34:26.044841Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 281474976710761, at schemeshard: 72057594046678944 2025-06-25T14:34:26.044903Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 281474976710761, ready parts: 0/1, is published: true 2025-06-25T14:34:26.044954Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 281474976710761, at schemeshard: 72057594046678944 2025-06-25T14:34:26.045063Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 281474976710761:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:281474976710761 msg type: 269090816 2025-06-25T14:34:26.045168Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 281474976710761, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 281474976710761 at step: 5000007 FAKE_COORDINATOR: advance: minStep5000007 State->FrontStep: 5000006 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710761 at step: 5000007 2025-06-25T14:34:26.045548Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000007, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:34:26.045649Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976710761 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 21474838638 } } Step: 5000007 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:34:26.045690Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_rmdir.cpp:128: TRmDir HandleReply TEvOperationPlan, opId: 281474976710761:0, step: 5000007, at schemeshard: 72057594046678944 2025-06-25T14:34:26.045809Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_rmdir.cpp:179: RmDir is done, opId: 281474976710761:0, at schemeshard: 72057594046678944 2025-06-25T14:34:26.045935Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976710761:0 progress is 1/1 2025-06-25T14:34:26.045978Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976710761 ready parts: 1/1 2025-06-25T14:34:26.046019Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976710761:0 progress is 1/1 2025-06-25T14:34:26.046065Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976710761 ready parts: 1/1 2025-06-25T14:34:26.046123Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-25T14:34:26.046193Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-25T14:34:26.046232Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 281474976710761, ready parts: 1/1, is published: false 2025-06-25T14:34:26.046279Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976710761 ready parts: 1/1 2025-06-25T14:34:26.046318Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976710761:0 2025-06-25T14:34:26.046354Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 281474976710761:0 2025-06-25T14:34:26.046412Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-25T14:34:26.046449Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 281474976710761, publications: 2, subscribers: 1 2025-06-25T14:34:26.046487Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 281474976710761, [OwnerId: 72057594046678944, LocalPathId: 1], 11 2025-06-25T14:34:26.046523Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 281474976710761, [OwnerId: 72057594046678944, LocalPathId: 3], 18446744073709551615 2025-06-25T14:34:26.047178Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710761 2025-06-25T14:34:26.047273Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710761 2025-06-25T14:34:26.048949Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:34:26.048992Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 281474976710761, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:34:26.049132Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 281474976710761, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-25T14:34:26.049233Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:34:26.049265Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [5:211:2211], at schemeshard: 72057594046678944, txId: 281474976710761, path id: 1 2025-06-25T14:34:26.049302Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [5:211:2211], at schemeshard: 72057594046678944, txId: 281474976710761, path id: 3 FAKE_COORDINATOR: Erasing txId 281474976710761 2025-06-25T14:34:26.050027Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 11 PathOwnerId: 72057594046678944, cookie: 281474976710761 2025-06-25T14:34:26.050102Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 11 PathOwnerId: 72057594046678944, cookie: 281474976710761 2025-06-25T14:34:26.050136Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 281474976710761 2025-06-25T14:34:26.050188Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 281474976710761, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 11 2025-06-25T14:34:26.050231Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-25T14:34:26.050894Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 281474976710761 2025-06-25T14:34:26.050974Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 281474976710761 2025-06-25T14:34:26.051004Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 281474976710761 2025-06-25T14:34:26.051037Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 281474976710761, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 18446744073709551615 2025-06-25T14:34:26.051063Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-25T14:34:26.051121Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 281474976710761, subscribers: 1 2025-06-25T14:34:26.051161Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:212: TTxAckPublishToSchemeBoard Notify send TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, to actorId: [5:126:2150] 2025-06-25T14:34:26.057582Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710761 2025-06-25T14:34:26.069369Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710761 2025-06-25T14:34:26.069513Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6830: Handle: TEvNotifyTxCompletionResult: txId# 281474976710761 2025-06-25T14:34:26.069575Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6832: Message: TxId: 281474976710761 2025-06-25T14:34:26.069629Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:309: TExport::TTxProgress: DoExecute 2025-06-25T14:34:26.069659Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:1239: TExport::TTxProgress: OnNotifyResult: txId# 281474976710761 2025-06-25T14:34:26.069690Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:1270: TExport::TTxProgress: OnNotifyResult: txId# 281474976710761, id# 102, itemIdx# 4294967295 2025-06-25T14:34:26.071454Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:329: TExport::TTxProgress: DoComplete 2025-06-25T14:34:26.071556Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-25T14:34:26.071601Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [5:616:2570] TestWaitNotification: OK eventTxId 102 >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-29 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-30 >> DstCreator::SameOwner >> KqpSinkLocks::EmptyRangeAlreadyBrokenOlap [GOOD] >> TExportToS3Tests::AuditCancelledExport >> TExportToS3Tests::TablePermissions [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-9 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-49 >> DataShardVolatile::DistributedUpsertRestartBeforePrepare+UseSink [GOOD] >> DataShardVolatile::DistributedUpsertRestartBeforePrepare-UseSink ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/federated_topic/ut/unittest >> BasicUsage::ReadMirrored [GOOD] Test command err: 2025-06-25T14:32:56.120411Z :PropagateSessionClosed INFO: Random seed for debugging is 1750861976120371 2025-06-25T14:32:56.721366Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519894930300369879:2196];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:32:56.721451Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:32:56.928769Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519894927765803282:2243];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:32:56.928867Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:32:57.123558Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0012ec/r3tmp/tmp60gHhk/pdisk_1.dat 2025-06-25T14:32:57.174528Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-25T14:32:57.633504Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:32:57.633597Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:32:57.661005Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:32:57.661091Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:32:57.673520Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:32:57.749203Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:32:57.754872Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T14:32:57.755015Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:32:57.768566Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:32:57.778076Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; TServer::EnableGrpc on GrpcPort 21154, node 1 2025-06-25T14:32:57.885085Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:32:58.125581Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/yft8/0012ec/r3tmp/yandexBRddoo.tmp 2025-06-25T14:32:58.125605Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/yft8/0012ec/r3tmp/yandexBRddoo.tmp 2025-06-25T14:32:58.125724Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/yft8/0012ec/r3tmp/yandexBRddoo.tmp 2025-06-25T14:32:58.125837Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:32:58.268970Z INFO: TTestServer started on Port 1623 GrpcPort 21154 TClient is connected to server localhost:1623 PQClient connected to localhost:21154 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:32:59.068829Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... waiting... waiting... 2025-06-25T14:32:59.196370Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715660, at schemeshard: 72057594046644480 2025-06-25T14:33:01.727241Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519894930300369879:2196];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:33:01.727343Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:33:01.863036Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519894927765803282:2243];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:33:01.863118Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:33:02.498604Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519894953535607154:2271], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:33:02.498752Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519894953535607173:2275], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:33:02.498831Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:33:02.513420Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976720657:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:33:02.598818Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519894953535607177:2276], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976720657 completed, doublechecking } 2025-06-25T14:33:02.690081Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519894953535607205:2134] txid# 281474976720658, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:33:03.301189Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7519894953535607212:2280], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:33:03.300912Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519894956070174601:2307], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:33:03.302710Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=2&id=YTc3MjAyZGEtMWY3NWVmYTUtZWY0ZjAzNjgtZTljNzIwOWM=, ActorId: [2:7519894953535607151:2269], ActorState: ExecuteState, TraceId: 01jykr3aradsfvxjv1m8jnsp37, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:33:03.304836Z node 2 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-25T14:33:03.302957Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=OTVlOGI0NDQtMjI5MWEzMzItMTlhOTViNWEtMTNiOTk0NjA=, ActorId: [1:7519894956070174560:2300], ActorState: ExecuteState, TraceId: 01jykr3b2zbdk2tzyf6f4c8dk9, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:33:03.305267Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-25T14:33:03.307293Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/co ... om-dc3 in dc dc1 in database: Root, partition 0(assignId:1) wait data in partition inited, cookie 4 from offset 5 2025-06-25T14:34:25.136858Z node 1 :PQ_READ_PROXY DEBUG: partition_actor.cpp:890: session cookie 1 consumer shared/user session shared/user_1_1_12542537927434849856_v1 after read state TopicId: Topic rt3.dc1--test-topic-mirrored-from-dc3 in dc dc1 in database: Root, partition 0(assignId:1) EndOffset 5 ReadOffset 5 ReadGuid 6ea179f0-f0ab707e-9e478f35-c576bfa has messages 1 2025-06-25T14:34:25.136979Z node 1 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:1917: session cookie 1 consumer shared/user session shared/user_1_1_12542537927434849856_v1 read done: guid# 6ea179f0-f0ab707e-9e478f35-c576bfa, partition# TopicId: Topic rt3.dc1--test-topic-mirrored-from-dc3 in dc dc1 in database: Root, partition 0(assignId:1), size# 1398 2025-06-25T14:34:25.137000Z node 1 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2079: session cookie 1 consumer shared/user session shared/user_1_1_12542537927434849856_v1 response to read: guid# 6ea179f0-f0ab707e-9e478f35-c576bfa 2025-06-25T14:34:25.137259Z node 1 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2122: session cookie 1 consumer shared/user session shared/user_1_1_12542537927434849856_v1 Process answer. Aval parts: 0 2025-06-25T14:34:25.140589Z :DEBUG: [/Root] [/Root] [bb1cf3b1-7b5509fa-e0b94202-9037c763] [] Got ReadResponse, serverBytesSize = 1398, now ReadSizeBudget = 0, ReadSizeServerDelta = 8387210 2025-06-25T14:34:25.140716Z :DEBUG: [/Root] [/Root] [bb1cf3b1-7b5509fa-e0b94202-9037c763] [] In ContinueReadingDataImpl, ReadSizeBudget = 0, ReadSizeServerDelta = 8387210 2025-06-25T14:34:25.143514Z :DEBUG: [/Root] Decompression task done. Partition/PartitionSessionId: 1 (2-4) 2025-06-25T14:34:25.143577Z :DEBUG: [/Root] [/Root] [bb1cf3b1-7b5509fa-e0b94202-9037c763] [] Returning serverBytesSize = 1398 to budget 2025-06-25T14:34:25.143620Z :DEBUG: [/Root] [/Root] [bb1cf3b1-7b5509fa-e0b94202-9037c763] [] In ContinueReadingDataImpl, ReadSizeBudget = 1398, ReadSizeServerDelta = 8387210 2025-06-25T14:34:25.143911Z :DEBUG: [/Root] [/Root] [bb1cf3b1-7b5509fa-e0b94202-9037c763] [] After sending read request: ReadSizeBudget = 0, ReadSizeServerDelta = 8388608 2025-06-25T14:34:25.143997Z :DEBUG: [/Root] Take Data. Partition 0. Read: {0, 0} (2-2) 2025-06-25T14:34:25.144038Z :DEBUG: [/Root] Take Data. Partition 0. Read: {1, 0} (3-3) 2025-06-25T14:34:25.144078Z :DEBUG: [/Root] Take Data. Partition 0. Read: {1, 1} (4-4) >>> event from dataHandler: 2025-06-25T14:34:25.144394Z :INFO: [/Root] MessageGroupId [src_id] SessionId [src_id|3863d84a-ee0eead8-954eaecc-d312b22f_0] Write session will now close DataReceived { Partition session id: 1 Topic: "test-topic" Partition: 0 Database name: dc3 Database path: /Root Database id: account-dc3 Message { Data: ..240 bytes.. Information: { Offset: 2 ProducerId: "src_id" SeqNo: 3 CreateTime: 2025-06-25T14:34:25.041000Z WriteTime: 2025-06-25T14:34:25.100000Z MessageGroupId: "src_id" Meta: { "ident": "unknown", "server": "ipv6:[::1]:50480", "logtype": "unknown", "_ip": "ipv6:[::1]:50480" } MessageMeta: { } } Partition session id: 1 Topic: "test-topic" Partition: 0 Database name: dc3 Database path: /Root Database id: account-dc3 } Message { Data: ..350 bytes.. Information: { Offset: 3 ProducerId: "src_id" SeqNo: 4 CreateTime: 2025-06-25T14:34:25.041000Z WriteTime: 2025-06-25T14:34:25.104000Z MessageGroupId: "src_id" Meta: { "ident": "unknown", "server": "ipv6:[::1]:50480", "logtype": "unknown", "_ip": "ipv6:[::1]:50480" } MessageMeta: { } } Partition session id: 1 Topic: "test-topic" Partition: 0 Database name: dc3 Database path: /Root Database id: account-dc3 } Message { Data: ..460 bytes.. Information: { Offset: 4 ProducerId: "src_id" SeqNo: 5 CreateTime: 2025-06-25T14:34:25.041000Z WriteTime: 2025-06-25T14:34:25.104000Z MessageGroupId: "src_id" Meta: { "ident": "unknown", "server": "ipv6:[::1]:50480", "logtype": "unknown", "_ip": "ipv6:[::1]:50480" } MessageMeta: { } } Partition session id: 1 Topic: "test-topic" Partition: 0 Database name: dc3 Database path: /Root Database id: account-dc3 } } >>> get 3 messages in this event 2025-06-25T14:34:25.144452Z :DEBUG: [/Root] MessageGroupId [src_id] SessionId [src_id|3863d84a-ee0eead8-954eaecc-d312b22f_0] Write session: aborting 2025-06-25T14:34:25.144490Z :DEBUG: [/Root] [/Root] [bb1cf3b1-7b5509fa-e0b94202-9037c763] [] The application data is transferred to the client. Number of messages 3, size 1050 bytes 2025-06-25T14:34:25.144542Z :DEBUG: [/Root] [/Root] [bb1cf3b1-7b5509fa-e0b94202-9037c763] [] Returning serverBytesSize = 0 to budget 2025-06-25T14:34:25.144881Z :INFO: [/Root] MessageGroupId [src_id] SessionId [src_id|3863d84a-ee0eead8-954eaecc-d312b22f_0] Write session: gracefully shut down, all writes complete >>> Writes to test-topic-mirrored-from-dc3 successful 2025-06-25T14:34:25.144928Z :DEBUG: [/Root] MessageGroupId [src_id] SessionId [src_id|3863d84a-ee0eead8-954eaecc-d312b22f_0] Write session: destroy 2025-06-25T14:34:25.145102Z :INFO: [/Root] [/Root] [bb1cf3b1-7b5509fa-e0b94202-9037c763] Closing read session. Close timeout: 18446744073709.551615s 2025-06-25T14:34:25.145184Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): -:test-topic:0:3:4:0 -:test-topic-mirrored-from-dc2:0:2:4:0 -:test-topic-mirrored-from-dc3:0:1:4:0 2025-06-25T14:34:25.145231Z :INFO: [/Root] [/Root] [bb1cf3b1-7b5509fa-e0b94202-9037c763] Counters: { Errors: 0 CurrentSessionLifetimeMs: 615 BytesRead: 3600 MessagesRead: 15 BytesReadCompressed: 3600 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-25T14:34:25.145683Z :INFO: [/Root] [/Root] [bb1cf3b1-7b5509fa-e0b94202-9037c763] Closing read session. Close timeout: 0.000000s 2025-06-25T14:34:25.145728Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): -:test-topic:0:3:4:0 -:test-topic-mirrored-from-dc2:0:2:4:0 -:test-topic-mirrored-from-dc3:0:1:4:0 2025-06-25T14:34:25.145763Z :INFO: [/Root] [/Root] [bb1cf3b1-7b5509fa-e0b94202-9037c763] Counters: { Errors: 0 CurrentSessionLifetimeMs: 615 BytesRead: 3600 MessagesRead: 15 BytesReadCompressed: 3600 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-25T14:34:25.145807Z :INFO: [/Root] [/Root] [bb1cf3b1-7b5509fa-e0b94202-9037c763] Closing read session. Close timeout: 0.000000s 2025-06-25T14:34:25.145849Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): -:test-topic:0:3:4:0 -:test-topic-mirrored-from-dc2:0:2:4:0 -:test-topic-mirrored-from-dc3:0:1:4:0 2025-06-25T14:34:25.145888Z :INFO: [/Root] [/Root] [bb1cf3b1-7b5509fa-e0b94202-9037c763] Counters: { Errors: 0 CurrentSessionLifetimeMs: 615 BytesRead: 3600 MessagesRead: 15 BytesReadCompressed: 3600 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-25T14:34:25.145978Z :NOTICE: [/Root] [/Root] [bb1cf3b1-7b5509fa-e0b94202-9037c763] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-06-25T14:34:25.148399Z node 1 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 4 sessionId: src_id|3863d84a-ee0eead8-954eaecc-d312b22f_0 grpc read done: success: 0 data: 2025-06-25T14:34:25.148429Z node 1 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 4 sessionId: src_id|3863d84a-ee0eead8-954eaecc-d312b22f_0 grpc read failed 2025-06-25T14:34:25.148463Z node 1 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 4 sessionId: src_id|3863d84a-ee0eead8-954eaecc-d312b22f_0 grpc closed 2025-06-25T14:34:25.148465Z node 1 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer shared/user session shared/user_1_1_12542537927434849856_v1 grpc read done: success# 1, data# { read_request { bytes_size: 1398 } } 2025-06-25T14:34:25.148477Z node 1 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 4 sessionId: src_id|3863d84a-ee0eead8-954eaecc-d312b22f_0 is DEAD 2025-06-25T14:34:25.148521Z node 1 :PQ_READ_PROXY INFO: read_session_actor.cpp:92: session cookie 1 consumer shared/user session shared/user_1_1_12542537927434849856_v1 grpc closed 2025-06-25T14:34:25.148573Z node 1 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 1 consumer shared/user session shared/user_1_1_12542537927434849856_v1 is DEAD 2025-06-25T14:34:25.155190Z node 1 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037896 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-25T14:34:25.155621Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2452: [PQ: 72075186224037894] Destroy direct read session shared/user_1_1_12542537927434849856_v1 2025-06-25T14:34:25.155676Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037894] server disconnected, pipe [1:7519895305347435559:2559] destroyed 2025-06-25T14:34:25.156084Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2452: [PQ: 72075186224037896] Destroy direct read session shared/user_1_1_12542537927434849856_v1 2025-06-25T14:34:25.156112Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037896] server disconnected, pipe [1:7519895305347435555:2557] destroyed 2025-06-25T14:34:25.156139Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037896] server disconnected, pipe [1:7519895309642402949:2564] destroyed 2025-06-25T14:34:25.156203Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72075186224037896, Partition: 0, State: StateIdle] TPartition::DropOwner. 2025-06-25T14:34:25.156260Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2452: [PQ: 72075186224037892] Destroy direct read session shared/user_1_1_12542537927434849856_v1 2025-06-25T14:34:25.156277Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037892] server disconnected, pipe [1:7519895305347435557:2558] destroyed 2025-06-25T14:34:25.156665Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: shared/user_1_1_12542537927434849856_v1 2025-06-25T14:34:25.156690Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: shared/user_1_1_12542537927434849856_v1 2025-06-25T14:34:25.156705Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: shared/user_1_1_12542537927434849856_v1 2025-06-25T14:34:25.161180Z node 1 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037897][rt3.dc1--test-topic-mirrored-from-dc3] pipe [1:7519895305347435547:2548] disconnected; active server actors: 1 2025-06-25T14:34:25.161238Z node 1 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037897][rt3.dc1--test-topic-mirrored-from-dc3] pipe [1:7519895305347435547:2548] client user disconnected session shared/user_1_1_12542537927434849856_v1 2025-06-25T14:34:25.161536Z node 1 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037893][rt3.dc1--test-topic] pipe [1:7519895305347435548:2548] disconnected; active server actors: 1 2025-06-25T14:34:25.161565Z node 1 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037893][rt3.dc1--test-topic] pipe [1:7519895305347435548:2548] client user disconnected session shared/user_1_1_12542537927434849856_v1 2025-06-25T14:34:25.161636Z node 1 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037895][rt3.dc1--test-topic-mirrored-from-dc2] pipe [1:7519895305347435549:2548] disconnected; active server actors: 1 2025-06-25T14:34:25.161659Z node 1 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037895][rt3.dc1--test-topic-mirrored-from-dc2] pipe [1:7519895305347435549:2548] client user disconnected session shared/user_1_1_12542537927434849856_v1 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_export/unittest >> TExportToS3Tests::SchemaMappingEncryptionIncorrectKey [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:34:18.711470Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:34:18.711562Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:34:18.711599Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:34:18.711630Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:34:18.711678Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:34:18.711715Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:34:18.711763Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:34:18.711846Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:34:18.712916Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:34:18.713274Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:34:18.813364Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:34:18.813431Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:34:18.844437Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:34:18.844842Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:34:18.845008Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:34:18.854394Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:34:18.854767Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:34:18.855389Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:34:18.855672Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:34:18.859280Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:34:18.859458Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:34:18.860665Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:34:18.860725Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:34:18.860858Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:34:18.860915Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:34:18.860973Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:34:18.861056Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:34:18.870290Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:34:19.088542Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:34:19.088760Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:19.097585Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:34:19.097681Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:34:19.097906Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:34:19.097983Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:34:19.104951Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:34:19.105197Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:34:19.105444Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:19.105511Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:34:19.105549Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:34:19.105595Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:34:19.120139Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:19.120234Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:34:19.120282Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:34:19.129859Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:19.129940Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:19.129994Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:34:19.130053Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:34:19.133690Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:34:19.135743Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:34:19.135934Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:34:19.136854Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:34:19.137022Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:34:19.137081Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:34:19.137336Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:34:19.137383Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:34:19.137568Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:34:19.137647Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:34:19.139925Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:34:19.139983Z node 1 :FLAT_TX_SCHEMESHARD ... , partId: 4294967295, tablet: 72057594046316545 2025-06-25T14:34:27.138964Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 281474976710758, at schemeshard: 72057594046678944 2025-06-25T14:34:27.139037Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 281474976710758, ready parts: 0/1, is published: true 2025-06-25T14:34:27.139088Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 281474976710758, at schemeshard: 72057594046678944 FAKE_COORDINATOR: Add transaction: 281474976710758 at step: 5000005 FAKE_COORDINATOR: advance: minStep5000005 State->FrontStep: 5000004 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710758 at step: 5000005 2025-06-25T14:34:27.140697Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000005, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:34:27.140810Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976710758 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 17179871342 } } Step: 5000005 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:34:27.140857Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_rmdir.cpp:128: TRmDir HandleReply TEvOperationPlan, opId: 281474976710758:0, step: 5000005, at schemeshard: 72057594046678944 2025-06-25T14:34:27.140958Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_rmdir.cpp:179: RmDir is done, opId: 281474976710758:0, at schemeshard: 72057594046678944 2025-06-25T14:34:27.141019Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976710758:0 progress is 1/1 2025-06-25T14:34:27.141058Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976710758 ready parts: 1/1 2025-06-25T14:34:27.141108Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976710758:0 progress is 1/1 2025-06-25T14:34:27.141141Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976710758 ready parts: 1/1 2025-06-25T14:34:27.141197Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-25T14:34:27.141261Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 1 2025-06-25T14:34:27.141295Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 281474976710758, ready parts: 1/1, is published: false 2025-06-25T14:34:27.141339Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976710758 ready parts: 1/1 2025-06-25T14:34:27.141374Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976710758:0 2025-06-25T14:34:27.141405Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 281474976710758:0 2025-06-25T14:34:27.141474Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 2 2025-06-25T14:34:27.141513Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 281474976710758, publications: 2, subscribers: 1 2025-06-25T14:34:27.141550Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 281474976710758, [OwnerId: 72057594046678944, LocalPathId: 1], 11 2025-06-25T14:34:27.141589Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 281474976710758, [OwnerId: 72057594046678944, LocalPathId: 4], 18446744073709551615 2025-06-25T14:34:27.142355Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710758 2025-06-25T14:34:27.142452Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710758 FAKE_COORDINATOR: Erasing txId 281474976710758 2025-06-25T14:34:27.143675Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:34:27.143708Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 281474976710758, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:34:27.143847Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 281474976710758, path id: [OwnerId: 72057594046678944, LocalPathId: 4] 2025-06-25T14:34:27.143969Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:34:27.143997Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [4:209:2209], at schemeshard: 72057594046678944, txId: 281474976710758, path id: 1 2025-06-25T14:34:27.144030Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [4:209:2209], at schemeshard: 72057594046678944, txId: 281474976710758, path id: 4 2025-06-25T14:34:27.144680Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 11 PathOwnerId: 72057594046678944, cookie: 281474976710758 2025-06-25T14:34:27.144749Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 11 PathOwnerId: 72057594046678944, cookie: 281474976710758 2025-06-25T14:34:27.144777Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 281474976710758 2025-06-25T14:34:27.144824Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 281474976710758, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 11 2025-06-25T14:34:27.144867Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 4 2025-06-25T14:34:27.145124Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 281474976710758 2025-06-25T14:34:27.145181Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 281474976710758 2025-06-25T14:34:27.145206Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 281474976710758 2025-06-25T14:34:27.145232Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 281474976710758, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 18446744073709551615 2025-06-25T14:34:27.145256Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 1 2025-06-25T14:34:27.145315Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 281474976710758, subscribers: 1 2025-06-25T14:34:27.145357Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:212: TTxAckPublishToSchemeBoard Notify send TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, to actorId: [4:126:2150] 2025-06-25T14:34:27.145712Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-25T14:34:27.145755Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 4], at schemeshard: 72057594046678944 2025-06-25T14:34:27.145819Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-25T14:34:27.147549Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710758 2025-06-25T14:34:27.148418Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710758 2025-06-25T14:34:27.148504Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6830: Handle: TEvNotifyTxCompletionResult: txId# 281474976710758 2025-06-25T14:34:27.148557Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6832: Message: TxId: 281474976710758 2025-06-25T14:34:27.148615Z node 4 :EXPORT DEBUG: schemeshard_export__create.cpp:309: TExport::TTxProgress: DoExecute 2025-06-25T14:34:27.148654Z node 4 :EXPORT DEBUG: schemeshard_export__create.cpp:1239: TExport::TTxProgress: OnNotifyResult: txId# 281474976710758 2025-06-25T14:34:27.148693Z node 4 :EXPORT DEBUG: schemeshard_export__create.cpp:1270: TExport::TTxProgress: OnNotifyResult: txId# 281474976710758, id# 103, itemIdx# 4294967295 2025-06-25T14:34:27.148975Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-25T14:34:27.149952Z node 4 :EXPORT DEBUG: schemeshard_export__create.cpp:329: TExport::TTxProgress: DoComplete TestWaitNotification wait txId: 103 2025-06-25T14:34:27.150141Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 103: send EvNotifyTxCompletion 2025-06-25T14:34:27.150182Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 103 2025-06-25T14:34:27.150528Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 103, at schemeshard: 72057594046678944 2025-06-25T14:34:27.150602Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-06-25T14:34:27.150636Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [4:551:2509] TestWaitNotification: OK eventTxId 103 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_export/unittest >> TExportToS3Tests::EncryptedExport [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:34:18.693554Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:34:18.693636Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:34:18.693675Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:34:18.693707Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:34:18.693750Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:34:18.693783Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:34:18.693836Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:34:18.693904Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:34:18.694712Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:34:18.695060Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:34:18.796522Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:34:18.796587Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:34:18.813012Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:34:18.813393Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:34:18.813570Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:34:18.819220Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:34:18.819552Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:34:18.820160Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:34:18.820424Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:34:18.823566Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:34:18.823739Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:34:18.824972Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:34:18.825028Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:34:18.825142Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:34:18.825184Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:34:18.825246Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:34:18.825338Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:34:18.832180Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:34:18.955292Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:34:18.955486Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:18.955716Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:34:18.955769Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:34:18.955989Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:34:18.956066Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:34:18.958602Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:34:18.958821Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:34:18.959020Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:18.959079Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:34:18.959113Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:34:18.959143Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:34:18.960772Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:18.960820Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:34:18.960854Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:34:18.962327Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:18.962380Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:18.962425Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:34:18.962464Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:34:18.965728Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:34:18.967289Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:34:18.967483Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:34:18.968547Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:34:18.968684Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:34:18.968896Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:34:18.969194Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:34:18.969252Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:34:18.969418Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:34:18.969485Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:34:18.971339Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:34:18.971406Z node 1 :FLAT_TX_SCHEMESHARD ... 4:34:27.199314Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 281474976710763 2025-06-25T14:34:27.199351Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 281474976710763, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 11 2025-06-25T14:34:27.199395Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 4 2025-06-25T14:34:27.199464Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 281474976710763, ready parts: 0/1, is published: true 2025-06-25T14:34:27.201628Z node 4 :EXPORT DEBUG: schemeshard_export__create.cpp:329: TExport::TTxProgress: DoComplete 2025-06-25T14:34:27.201740Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 281474976710763, at schemeshard: 72057594046678944 2025-06-25T14:34:27.201777Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 281474976710763, ready parts: 0/1, is published: true 2025-06-25T14:34:27.201813Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 281474976710763, at schemeshard: 72057594046678944 2025-06-25T14:34:27.202348Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 281474976710763:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:281474976710763 msg type: 269090816 2025-06-25T14:34:27.202460Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 281474976710763, partId: 4294967295, tablet: 72057594046316545 2025-06-25T14:34:27.202601Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710763 FAKE_COORDINATOR: Add transaction: 281474976710763 at step: 5000010 FAKE_COORDINATOR: advance: minStep5000010 State->FrontStep: 5000009 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710763 at step: 5000010 2025-06-25T14:34:27.203096Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000010, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:34:27.203204Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976710763 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 17179871342 } } Step: 5000010 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:34:27.203265Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_rmdir.cpp:128: TRmDir HandleReply TEvOperationPlan, opId: 281474976710763:0, step: 5000010, at schemeshard: 72057594046678944 2025-06-25T14:34:27.203378Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_rmdir.cpp:179: RmDir is done, opId: 281474976710763:0, at schemeshard: 72057594046678944 2025-06-25T14:34:27.203462Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976710763:0 progress is 1/1 2025-06-25T14:34:27.203501Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976710763 ready parts: 1/1 2025-06-25T14:34:27.203546Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976710763:0 progress is 1/1 2025-06-25T14:34:27.203586Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976710763 ready parts: 1/1 2025-06-25T14:34:27.203650Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-25T14:34:27.203768Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 2025-06-25T14:34:27.203815Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 281474976710763, ready parts: 1/1, is published: false 2025-06-25T14:34:27.203880Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976710763 ready parts: 1/1 2025-06-25T14:34:27.203925Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976710763:0 2025-06-25T14:34:27.203959Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 281474976710763:0 2025-06-25T14:34:27.204018Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 4 2025-06-25T14:34:27.204061Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 281474976710763, publications: 2, subscribers: 1 2025-06-25T14:34:27.204102Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 281474976710763, [OwnerId: 72057594046678944, LocalPathId: 1], 13 2025-06-25T14:34:27.204137Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 281474976710763, [OwnerId: 72057594046678944, LocalPathId: 4], 18446744073709551615 2025-06-25T14:34:27.205532Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710763 2025-06-25T14:34:27.207074Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:34:27.207116Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 281474976710763, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:34:27.207284Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 281474976710763, path id: [OwnerId: 72057594046678944, LocalPathId: 4] 2025-06-25T14:34:27.207450Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:34:27.207501Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [4:209:2209], at schemeshard: 72057594046678944, txId: 281474976710763, path id: 1 2025-06-25T14:34:27.207545Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [4:209:2209], at schemeshard: 72057594046678944, txId: 281474976710763, path id: 4 FAKE_COORDINATOR: Erasing txId 281474976710763 2025-06-25T14:34:27.208331Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 13 PathOwnerId: 72057594046678944, cookie: 281474976710763 2025-06-25T14:34:27.208436Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 13 PathOwnerId: 72057594046678944, cookie: 281474976710763 2025-06-25T14:34:27.208478Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 281474976710763 2025-06-25T14:34:27.208574Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 281474976710763, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 13 2025-06-25T14:34:27.208621Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 4 2025-06-25T14:34:27.209725Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 281474976710763 2025-06-25T14:34:27.209787Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 281474976710763 2025-06-25T14:34:27.209820Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 281474976710763 2025-06-25T14:34:27.209855Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 281474976710763, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 18446744073709551615 2025-06-25T14:34:27.209897Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 2025-06-25T14:34:27.210001Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 281474976710763, subscribers: 1 2025-06-25T14:34:27.210061Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:212: TTxAckPublishToSchemeBoard Notify send TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, to actorId: [4:126:2150] 2025-06-25T14:34:27.212519Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710763 2025-06-25T14:34:27.212882Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710763 2025-06-25T14:34:27.212954Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6830: Handle: TEvNotifyTxCompletionResult: txId# 281474976710763 2025-06-25T14:34:27.212993Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6832: Message: TxId: 281474976710763 2025-06-25T14:34:27.213052Z node 4 :EXPORT DEBUG: schemeshard_export__create.cpp:309: TExport::TTxProgress: DoExecute 2025-06-25T14:34:27.213077Z node 4 :EXPORT DEBUG: schemeshard_export__create.cpp:1239: TExport::TTxProgress: OnNotifyResult: txId# 281474976710763 2025-06-25T14:34:27.213103Z node 4 :EXPORT DEBUG: schemeshard_export__create.cpp:1270: TExport::TTxProgress: OnNotifyResult: txId# 281474976710763, id# 103, itemIdx# 4294967295 2025-06-25T14:34:27.214555Z node 4 :EXPORT DEBUG: schemeshard_export__create.cpp:329: TExport::TTxProgress: DoComplete 2025-06-25T14:34:27.214659Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-06-25T14:34:27.214715Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [4:1137:3011] TestWaitNotification: OK eventTxId 103 >> BuildStatsHistogram::Ten_Serial_Log [GOOD] >> BuildStatsHistogram::Ten_Crossed_Log >> TIterator::Serial [GOOD] >> TIterator::SerialReverse >> DstCreator::WithIntermediateDir >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-48 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-1 >> TDataShardTrace::TestTraceDistributedSelectViaReadActors [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_export/unittest >> TExportToS3Tests::TablePermissions [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:34:19.217873Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:34:19.217962Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:34:19.218057Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:34:19.218097Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:34:19.218139Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:34:19.218223Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:34:19.218278Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:34:19.218342Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:34:19.219124Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:34:19.219813Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:34:19.353058Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:34:19.353134Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:34:19.392969Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:34:19.393351Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:34:19.393524Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:34:19.406663Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:34:19.407094Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:34:19.407754Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:34:19.408007Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:34:19.424227Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:34:19.424505Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:34:19.425860Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:34:19.425937Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:34:19.426160Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:34:19.426242Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:34:19.426304Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:34:19.426427Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:34:19.434314Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:34:19.632049Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:34:19.632281Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:19.636653Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:34:19.636733Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:34:19.637042Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:34:19.637135Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:34:19.645933Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:34:19.646160Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:34:19.646381Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:19.646434Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:34:19.646474Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:34:19.646505Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:34:19.648507Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:19.648564Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:34:19.648620Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:34:19.657108Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:19.657157Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:19.657204Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:34:19.657290Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:34:19.660932Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:34:19.662682Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:34:19.662844Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:34:19.663730Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:34:19.663872Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:34:19.663925Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:34:19.664173Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:34:19.664225Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:34:19.664394Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:34:19.664471Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:34:19.673692Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:34:19.673747Z node 1 :FLAT_TX_SCHEMESHARD ... 000005 FAKE_COORDINATOR: advance: minStep5000005 State->FrontStep: 5000004 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710759 at step: 5000005 FAKE_COORDINATOR: Send Plan to tablet 72075186233409547 for txId: 281474976710759 at step: 5000005 2025-06-25T14:34:27.977683Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000005, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:34:27.977797Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976710759 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 17179871342 } } Step: 5000005 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:34:27.977862Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:412: TBackup TPropose, opId: 281474976710759:0 HandleReply TEvOperationPlan, stepId: 5000005, at schemeshard: 72057594046678944 2025-06-25T14:34:27.978013Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 281474976710759:0 128 -> 129 2025-06-25T14:34:27.978155Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 REQUEST: PUT /metadata.json HTTP/1.1 HEADERS: Host: localhost:24900 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: CB54D360-8658-437E-A85E-7DEB85108378 amz-sdk-request: attempt=1 content-length: 106 content-md5: MiY7vpEE4i/Xg+IZdddDVg== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 S3_MOCK::HttpServeWrite: /metadata.json / / 106 FAKE_COORDINATOR: advance: minStep5000005 State->FrontStep: 5000005 2025-06-25T14:34:28.075805Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:34:28.075863Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 281474976710759, path id: [OwnerId: 72057594046678944, LocalPathId: 4] REQUEST: PUT /permissions.pb HTTP/1.1 HEADERS: Host: localhost:24900 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 4332849C-55DA-4805-9AA0-A9A60AEBC9AA amz-sdk-request: attempt=1 content-length: 137 content-md5: WeIr3D5bqIjvqMGEjx2JrA== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 S3_MOCK::HttpServeWrite: /permissions.pb / / 137 2025-06-25T14:34:28.076111Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:34:28.076152Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [4:209:2209], at schemeshard: 72057594046678944, txId: 281474976710759, path id: 4 2025-06-25T14:34:28.076691Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 281474976710759:0, at schemeshard: 72057594046678944 2025-06-25T14:34:28.076753Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:258: TBackup TProposedWaitParts, opId: 281474976710759:0 ProgressState, at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 281474976710759 2025-06-25T14:34:28.077405Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 3 PathOwnerId: 72057594046678944, cookie: 281474976710759 2025-06-25T14:34:28.077491Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 3 PathOwnerId: 72057594046678944, cookie: 281474976710759 2025-06-25T14:34:28.077524Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 281474976710759 2025-06-25T14:34:28.077560Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 281474976710759, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 3 2025-06-25T14:34:28.077597Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 4 2025-06-25T14:34:28.077678Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 281474976710759, ready parts: 0/1, is published: true 2025-06-25T14:34:28.096282Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710759 REQUEST: PUT /scheme.pb HTTP/1.1 HEADERS: Host: localhost:24900 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: F736C29D-DAC8-4094-B6C4-C1A15FEBD633 amz-sdk-request: attempt=1 content-length: 355 content-md5: 4DhJNWgTpoG3PVvZ0uCHUA== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 S3_MOCK::HttpServeWrite: /scheme.pb / / 355 REQUEST: PUT /data_00.csv HTTP/1.1 HEADERS: Host: localhost:24900 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 99101748-157A-4B57-ACE4-71711F4C556B amz-sdk-request: attempt=1 content-length: 0 content-md5: 1B2M2Y8AsgTpgAmY7PhCfg== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 S3_MOCK::HttpServeWrite: /data_00.csv / / 0 2025-06-25T14:34:28.113354Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5596: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 462 RawX2: 17179871613 } Origin: 72075186233409547 State: 2 TxId: 281474976710759 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 0 RowsProcessed: 0 } 2025-06-25T14:34:28.113414Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1791: TOperation FindRelatedPartByTabletId, TxId: 281474976710759, tablet: 72075186233409547, partId: 0 2025-06-25T14:34:28.113543Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:632: TTxOperationReply execute, operationId: 281474976710759:0, at schemeshard: 72057594046678944, message: Source { RawX1: 462 RawX2: 17179871613 } Origin: 72075186233409547 State: 2 TxId: 281474976710759 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 0 RowsProcessed: 0 } 2025-06-25T14:34:28.113653Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:233: TBackup TProposedWaitParts, opId: 281474976710759:0 HandleReply TEvSchemaChanged at tablet# 72057594046678944 message# Source { RawX1: 462 RawX2: 17179871613 } Origin: 72075186233409547 State: 2 TxId: 281474976710759 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 0 RowsProcessed: 0 } 2025-06-25T14:34:28.113726Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:670: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 281474976710759:0, shardIdx: 72057594046678944:2, shard: 72075186233409547, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-25T14:34:28.113779Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:707: all shard schema changes has been received, operationId: 281474976710759:0, at schemeshard: 72057594046678944 2025-06-25T14:34:28.113815Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:719: send schema changes ack message, operation: 281474976710759:0, datashard: 72075186233409547, at schemeshard: 72057594046678944 2025-06-25T14:34:28.113871Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 281474976710759:0 129 -> 240 2025-06-25T14:34:28.114033Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:116: Unable to make a bill: kind# TBackup, opId# 281474976710759:0, reason# domain is not a serverless db, domain# /MyRoot, domainPathId# [OwnerId: 72057594046678944, LocalPathId: 1], IsDomainSchemeShard: 1, ParentDomainId: [OwnerId: 72057594046678944, LocalPathId: 1], ResourcesDomainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:34:28.118791Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 281474976710759:0, at schemeshard: 72057594046678944 2025-06-25T14:34:28.118961Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 281474976710759:0, at schemeshard: 72057594046678944 2025-06-25T14:34:28.118999Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 281474976710759:0 ProgressState 2025-06-25T14:34:28.119122Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976710759:0 progress is 1/1 2025-06-25T14:34:28.119153Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976710759 ready parts: 1/1 2025-06-25T14:34:28.119185Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976710759:0 progress is 1/1 2025-06-25T14:34:28.119211Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976710759 ready parts: 1/1 2025-06-25T14:34:28.119240Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 281474976710759, ready parts: 1/1, is published: true 2025-06-25T14:34:28.119304Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1656: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [4:126:2150] message: TxId: 281474976710759 2025-06-25T14:34:28.119340Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976710759 ready parts: 1/1 2025-06-25T14:34:28.119369Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976710759:0 2025-06-25T14:34:28.119396Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 281474976710759:0 2025-06-25T14:34:28.119500Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 2025-06-25T14:34:28.125051Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6830: Handle: TEvNotifyTxCompletionResult: txId# 281474976710759 2025-06-25T14:34:28.125135Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6832: Message: TxId: 281474976710759 2025-06-25T14:34:28.129622Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-06-25T14:34:28.129691Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [4:493:2452] TestWaitNotification: OK eventTxId 103 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/tx/unittest >> KqpSinkLocks::EmptyRangeAlreadyBrokenOlap [GOOD] Test command err: Trying to start YDB, gRPC: 63913, MsgBus: 12265 2025-06-25T14:33:40.060950Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519895119238270782:2244];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:33:40.068856Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001319/r3tmp/tmpnteV94/pdisk_1.dat 2025-06-25T14:33:40.748602Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:33:40.765265Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:33:40.765357Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:33:40.767786Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:33:40.769522Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519895114943303262:2080] 1750862019951278 != 1750862019951281 TServer::EnableGrpc on GrpcPort 63913, node 1 2025-06-25T14:33:41.032457Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:33:41.075735Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:33:41.075761Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:33:41.075768Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:33:41.075899Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:12265 TClient is connected to server localhost:12265 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:33:42.330625Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:33:42.374098Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:33:45.017135Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519895119238270782:2244];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:33:45.017233Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:33:45.125408Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519895140713107678:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:33:45.125559Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:33:45.126069Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519895140713107699:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:33:45.130352Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:33:45.174877Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519895140713107701:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:33:45.252682Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519895140713107752:2342] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:33:45.676824Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:33:45.842549Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:33:47.589693Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:33:51.128422Z node 1 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_LOCKS_BROKEN;details=Operation is aborting because locks are not valid;tx_id=6; 2025-06-25T14:33:51.143767Z node 1 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:226: Prepare transaction failed. txid 6 at tablet 72075186224037888 errors: Status: STATUS_LOCKS_BROKEN Issues: { message: "Operation is aborting because locks are not valid" issue_code: 2001 severity: 1 } 2025-06-25T14:33:51.144011Z node 1 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:168: Errors while proposing transaction txid 6 at tablet 72075186224037888 Status: STATUS_LOCKS_BROKEN Issues: { message: "Operation is aborting because locks are not valid" issue_code: 2001 severity: 1 } 2025-06-25T14:33:51.144203Z node 1 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:802: SelfId: [1:7519895166482919591:2933], Table: `/Root/Test` ([72057594046644480:6:1]), SessionActorId: [1:7519895162187952218:2933]Got LOCKS BROKEN for table `/Root/Test`. ShardID=72075186224037888, Sink=[1:7519895166482919591:2933].{
: Error: Operation is aborting because locks are not valid, code: 2001 } 2025-06-25T14:33:51.144877Z node 1 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:3004: SelfId: [1:7519895166482919584:2933], SessionActorId: [1:7519895162187952218:2933], statusCode=ABORTED. Issue=
: Error: Transaction locks invalidated. Table: `/Root/Test`., code: 2001
: Error: Operation is aborting because locks are not valid, code: 2001 . sessionActorId=[1:7519895162187952218:2933]. isRollback=0 2025-06-25T14:33:51.145195Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:1895: SessionId: ydb://session/3?node_id=1&id=YTEyMGNkMzgtYThkMDc0YjktZDM0MGQyYzUtNDQ3MzkwOA==, ActorId: [1:7519895162187952218:2933], ActorState: ExecuteState, TraceId: 01jykr4t485yme2mps0k56xv48, got TEvKqpBuffer::TEvError in ExecuteState, status: ABORTED send to: [1:7519895166482919585:2933] from: [1:7519895166482919584:2933] 2025-06-25T14:33:51.145312Z node 1 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [1:7519895166482919585:2933] TxId: 281474976710667. Ctx: { TraceId: 01jykr4t485yme2mps0k56xv48, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTEyMGNkMzgtYThkMDc0YjktZDM0MGQyYzUtNDQ3MzkwOA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Transaction locks invalidated. Table: `/Root/Test`., code: 2001 subissue: {
: Error: Operation is aborting because locks are not valid, code: 2001 } } 2025-06-25T14:33:51.145547Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=1&id=YTEyMGNkMzgtYThkMDc0YjktZDM0MGQyYzUtNDQ3MzkwOA==, ActorId: [1:7519895162187952218:2933], ActorState: ExecuteState, TraceId: 01jykr4t485yme2mps0k56xv48, Create QueryResponse for error on request, msg:
: Error: Transaction locks invalidated. Table: `/Root/Test`., code: 2001
: Error: Operation is aborting because locks are not valid, code: 2001 Trying to start YDB, gRPC: 14354, MsgBus: 24446 2025-06-25T14:33:53.150065Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519895168957411372:2169];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001319/r3tmp/tmpc5ly6s/pdisk_1.dat 2025-06-25T14:33:53.217751Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:33:53.455482Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:33:53.455564Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:33:53.461046Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519895168957411228:2080] 1750862032976715 != 1750862032976718 2025-06-25T14:33:53.469277Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:33:53.470834Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, ... 5T14:34:23.150720Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037969;local_tx_no=14;method=complete;tx_info=;fline=secondary.h:126;event=duplication_tablet_broken_flag;txId=281474976710664; 2025-06-25T14:34:23.150738Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037969;local_tx_no=15;method=complete;tx_info=;fline=secondary.h:126;event=duplication_tablet_broken_flag;txId=281474976710664; 2025-06-25T14:34:23.150770Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037969;local_tx_no=16;method=complete;tx_info=;fline=secondary.h:126;event=duplication_tablet_broken_flag;txId=281474976710664; 2025-06-25T14:34:23.150788Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037969;local_tx_no=17;method=complete;tx_info=;fline=secondary.h:126;event=duplication_tablet_broken_flag;txId=281474976710664; 2025-06-25T14:34:24.750172Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037891;tx_state=TTxProgressTx::Execute;tx_current=281474976710668;tx_id=281474976710668;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710668; 2025-06-25T14:34:24.754759Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=281474976710668;tx_id=281474976710668;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710668; 2025-06-25T14:34:24.755073Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037895;tx_state=TTxProgressTx::Execute;tx_current=281474976710668;tx_id=281474976710668;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710668; 2025-06-25T14:34:24.755288Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=281474976710668;tx_id=281474976710668;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710668; 2025-06-25T14:34:24.755499Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037896;tx_state=TTxProgressTx::Execute;tx_current=281474976710668;tx_id=281474976710668;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710668; 2025-06-25T14:34:24.755511Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037892;tx_state=TTxProgressTx::Execute;tx_current=281474976710668;tx_id=281474976710668;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710668; 2025-06-25T14:34:24.755701Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037890;tx_state=TTxProgressTx::Execute;tx_current=281474976710668;tx_id=281474976710668;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710668; 2025-06-25T14:34:24.755777Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037897;tx_state=TTxProgressTx::Execute;tx_current=281474976710668;tx_id=281474976710668;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710668; 2025-06-25T14:34:24.756765Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037894;tx_state=TTxProgressTx::Execute;tx_current=281474976710668;tx_id=281474976710668;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710668; 2025-06-25T14:34:24.757027Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037893;tx_state=TTxProgressTx::Execute;tx_current=281474976710668;tx_id=281474976710668;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710668; 2025-06-25T14:34:25.298080Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[3:7519895247593889797:2314];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037891;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_LOCKS_BROKEN;details=tablet lock have another internal generation counter: 18446744073709551615 != 0;tx_id=281474976710671; 2025-06-25T14:34:25.298980Z node 3 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:2751: SelfId: [3:7519895312018409066:3896], SessionActorId: [3:7519895303428473967:3896], Got LOCKS BROKEN for table. ShardID=72075186224037891, Sink=[3:7519895312018409066:3896].{
: Error: tablet lock have another internal generation counter: 18446744073709551615 != 0, code: 2001 } 2025-06-25T14:34:25.299130Z node 3 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:3004: SelfId: [3:7519895312018409066:3896], SessionActorId: [3:7519895303428473967:3896], statusCode=ABORTED. Issue=
: Error: Transaction locks invalidated. Table: `/Root/Test`., code: 2001
: Error: tablet lock have another internal generation counter: 18446744073709551615 != 0, code: 2001 . sessionActorId=[3:7519895303428473967:3896]. isRollback=0 2025-06-25T14:34:25.299463Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:1895: SessionId: ydb://session/3?node_id=3&id=NTg0NmVjZDQtNDdlMzk5NDYtN2M0MDk4ZjUtYTNmZjhhNGU=, ActorId: [3:7519895303428473967:3896], ActorState: ExecuteState, TraceId: 01jykr5v3z9mcayk0cz936fcbr, got TEvKqpBuffer::TEvError in ExecuteState, status: ABORTED send to: [3:7519895312018409251:3896] from: [3:7519895312018409066:3896] 2025-06-25T14:34:25.299574Z node 3 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [3:7519895312018409251:3896] TxId: 281474976710671. Ctx: { TraceId: 01jykr5v3z9mcayk0cz936fcbr, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NTg0NmVjZDQtNDdlMzk5NDYtN2M0MDk4ZjUtYTNmZjhhNGU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Transaction locks invalidated. Table: `/Root/Test`., code: 2001 subissue: {
: Error: tablet lock have another internal generation counter: 18446744073709551615 != 0, code: 2001 } } 2025-06-25T14:34:25.299748Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=3&id=NTg0NmVjZDQtNDdlMzk5NDYtN2M0MDk4ZjUtYTNmZjhhNGU=, ActorId: [3:7519895303428473967:3896], ActorState: ExecuteState, TraceId: 01jykr5v3z9mcayk0cz936fcbr, Create QueryResponse for error on request, msg: 2025-06-25T14:34:25.300683Z node 3 :TX_COLUMNSHARD WARN: ctor_logger.h:56: TColumnShard.StateWork at 72075186224037894 unhandled event type: NKikimr::TEvDataShard::TEvCancelTransactionProposal event: NKikimrTxDataShard.TEvCancelTransactionProposal TxId: 281474976710671 2025-06-25T14:34:25.300785Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[3:7519895247593889786:2311];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037894;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-25T14:34:25.300873Z node 3 :TX_COLUMNSHARD WARN: ctor_logger.h:56: TColumnShard.StateWork at 72075186224037895 unhandled event type: NKikimr::TEvDataShard::TEvCancelTransactionProposal event: NKikimrTxDataShard.TEvCancelTransactionProposal TxId: 281474976710671 2025-06-25T14:34:25.300919Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[3:7519895247593889779:2308];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037895;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-25T14:34:25.300974Z node 3 :TX_COLUMNSHARD WARN: ctor_logger.h:56: TColumnShard.StateWork at 72075186224037896 unhandled event type: NKikimr::TEvDataShard::TEvCancelTransactionProposal event: NKikimrTxDataShard.TEvCancelTransactionProposal TxId: 281474976710671 2025-06-25T14:34:25.301015Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[3:7519895247593889780:2309];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037896;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-25T14:34:25.301087Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[3:7519895247593889797:2314];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037891;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-25T14:34:25.301267Z node 3 :TX_COLUMNSHARD WARN: ctor_logger.h:56: TColumnShard.StateWork at 72075186224037888 unhandled event type: NKikimr::TEvDataShard::TEvCancelTransactionProposal event: NKikimrTxDataShard.TEvCancelTransactionProposal TxId: 281474976710671 2025-06-25T14:34:25.301338Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[3:7519895247593889788:2313];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037888;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-25T14:34:25.301399Z node 3 :TX_COLUMNSHARD WARN: ctor_logger.h:56: TColumnShard.StateWork at 72075186224037897 unhandled event type: NKikimr::TEvDataShard::TEvCancelTransactionProposal event: NKikimrTxDataShard.TEvCancelTransactionProposal TxId: 281474976710671 2025-06-25T14:34:25.301427Z node 3 :TX_COLUMNSHARD WARN: ctor_logger.h:56: TColumnShard.StateWork at 72075186224037889 unhandled event type: NKikimr::TEvDataShard::TEvCancelTransactionProposal event: NKikimrTxDataShard.TEvCancelTransactionProposal TxId: 281474976710671 2025-06-25T14:34:25.301462Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[3:7519895247593889778:2307];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037897;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-25T14:34:25.301514Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[3:7519895247593889787:2312];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037889;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-25T14:34:25.301551Z node 3 :TX_COLUMNSHARD WARN: ctor_logger.h:56: TColumnShard.StateWork at 72075186224037890 unhandled event type: NKikimr::TEvDataShard::TEvCancelTransactionProposal event: NKikimrTxDataShard.TEvCancelTransactionProposal TxId: 281474976710671 2025-06-25T14:34:25.301597Z node 3 :TX_COLUMNSHARD WARN: ctor_logger.h:56: TColumnShard.StateWork at 72075186224037892 unhandled event type: NKikimr::TEvDataShard::TEvCancelTransactionProposal event: NKikimrTxDataShard.TEvCancelTransactionProposal TxId: 281474976710671 2025-06-25T14:34:25.301601Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[3:7519895247593889895:2316];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037890;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-25T14:34:25.301643Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[3:7519895247593889830:2315];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037892;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-25T14:34:25.301828Z node 3 :TX_COLUMNSHARD WARN: ctor_logger.h:56: TColumnShard.StateWork at 72075186224037893 unhandled event type: NKikimr::TEvDataShard::TEvCancelTransactionProposal event: NKikimrTxDataShard.TEvCancelTransactionProposal TxId: 281474976710671 2025-06-25T14:34:25.301884Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[3:7519895247593889781:2310];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037893;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0;
: Error: Transaction locks invalidated. Table: `/Root/Test`., code: 2001
: Error: tablet lock have another internal generation counter: 18446744073709551615 != 0, code: 2001 >> DstCreator::ColumnsSizeMismatch ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_export/unittest >> TExportToS3Tests::SchemaMapping [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:34:19.022345Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:34:19.022434Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:34:19.022469Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:34:19.022503Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:34:19.022548Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:34:19.022573Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:34:19.022637Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:34:19.022711Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:34:19.023545Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:34:19.023950Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:34:19.123625Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:34:19.123686Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:34:19.144951Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:34:19.145372Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:34:19.145523Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:34:19.157976Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:34:19.158431Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:34:19.159046Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:34:19.159296Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:34:19.163853Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:34:19.164033Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:34:19.165311Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:34:19.165369Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:34:19.165516Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:34:19.165568Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:34:19.165625Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:34:19.165715Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:34:19.177336Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:34:19.366029Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:34:19.366353Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:19.366595Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:34:19.366642Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:34:19.366882Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:34:19.366976Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:34:19.375481Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:34:19.375776Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:34:19.376008Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:19.376077Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:34:19.376112Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:34:19.376146Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:34:19.384858Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:19.384925Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:34:19.384969Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:34:19.394899Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:19.394971Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:19.395021Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:34:19.395083Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:34:19.398739Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:34:19.408992Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:34:19.409219Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:34:19.410203Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:34:19.410346Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:34:19.410402Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:34:19.410690Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:34:19.410745Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:34:19.410926Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:34:19.411012Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:34:19.413221Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:34:19.413286Z node 1 :FLAT_TX_SCHEMESHARD ... 14:34:27.848264Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 281474976710763 2025-06-25T14:34:27.848304Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 281474976710763, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 11 2025-06-25T14:34:27.848374Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 4 2025-06-25T14:34:27.848450Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 281474976710763, ready parts: 0/1, is published: true 2025-06-25T14:34:27.850124Z node 4 :EXPORT DEBUG: schemeshard_export__create.cpp:329: TExport::TTxProgress: DoComplete 2025-06-25T14:34:27.850432Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 281474976710763, at schemeshard: 72057594046678944 2025-06-25T14:34:27.850472Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 281474976710763, ready parts: 0/1, is published: true 2025-06-25T14:34:27.850520Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 281474976710763, at schemeshard: 72057594046678944 2025-06-25T14:34:27.851684Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 281474976710763:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:281474976710763 msg type: 269090816 2025-06-25T14:34:27.851805Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 281474976710763, partId: 4294967295, tablet: 72057594046316545 2025-06-25T14:34:27.852119Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710763 FAKE_COORDINATOR: Add transaction: 281474976710763 at step: 5000010 FAKE_COORDINATOR: advance: minStep5000010 State->FrontStep: 5000009 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710763 at step: 5000010 2025-06-25T14:34:27.852741Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000010, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:34:27.852844Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976710763 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 17179871342 } } Step: 5000010 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:34:27.852909Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_rmdir.cpp:128: TRmDir HandleReply TEvOperationPlan, opId: 281474976710763:0, step: 5000010, at schemeshard: 72057594046678944 2025-06-25T14:34:27.853023Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_rmdir.cpp:179: RmDir is done, opId: 281474976710763:0, at schemeshard: 72057594046678944 2025-06-25T14:34:27.853080Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976710763:0 progress is 1/1 2025-06-25T14:34:27.853121Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976710763 ready parts: 1/1 2025-06-25T14:34:27.853183Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976710763:0 progress is 1/1 2025-06-25T14:34:27.853219Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976710763 ready parts: 1/1 2025-06-25T14:34:27.853273Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-25T14:34:27.853335Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 2025-06-25T14:34:27.853372Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 281474976710763, ready parts: 1/1, is published: false 2025-06-25T14:34:27.853415Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976710763 ready parts: 1/1 2025-06-25T14:34:27.853471Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976710763:0 2025-06-25T14:34:27.853506Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 281474976710763:0 2025-06-25T14:34:27.853563Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 4 2025-06-25T14:34:27.853615Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 281474976710763, publications: 2, subscribers: 1 2025-06-25T14:34:27.853659Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 281474976710763, [OwnerId: 72057594046678944, LocalPathId: 1], 13 2025-06-25T14:34:27.853697Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 281474976710763, [OwnerId: 72057594046678944, LocalPathId: 4], 18446744073709551615 2025-06-25T14:34:27.854196Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710763 2025-06-25T14:34:27.855626Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:34:27.855662Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 281474976710763, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:34:27.855808Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 281474976710763, path id: [OwnerId: 72057594046678944, LocalPathId: 4] 2025-06-25T14:34:27.855909Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:34:27.855936Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [4:209:2209], at schemeshard: 72057594046678944, txId: 281474976710763, path id: 1 2025-06-25T14:34:27.855970Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [4:209:2209], at schemeshard: 72057594046678944, txId: 281474976710763, path id: 4 FAKE_COORDINATOR: Erasing txId 281474976710763 2025-06-25T14:34:27.856812Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 13 PathOwnerId: 72057594046678944, cookie: 281474976710763 2025-06-25T14:34:27.856912Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 13 PathOwnerId: 72057594046678944, cookie: 281474976710763 2025-06-25T14:34:27.856952Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 281474976710763 2025-06-25T14:34:27.857013Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 281474976710763, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 13 2025-06-25T14:34:27.857063Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 4 2025-06-25T14:34:27.857516Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 281474976710763 2025-06-25T14:34:27.857609Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 281474976710763 2025-06-25T14:34:27.857641Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 281474976710763 2025-06-25T14:34:27.857681Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 281474976710763, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 18446744073709551615 2025-06-25T14:34:27.857727Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 2025-06-25T14:34:27.857805Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 281474976710763, subscribers: 1 2025-06-25T14:34:27.857857Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:212: TTxAckPublishToSchemeBoard Notify send TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, to actorId: [4:126:2150] 2025-06-25T14:34:27.869983Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710763 2025-06-25T14:34:27.870325Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710763 2025-06-25T14:34:27.870408Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6830: Handle: TEvNotifyTxCompletionResult: txId# 281474976710763 2025-06-25T14:34:27.870472Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6832: Message: TxId: 281474976710763 2025-06-25T14:34:27.870569Z node 4 :EXPORT DEBUG: schemeshard_export__create.cpp:309: TExport::TTxProgress: DoExecute 2025-06-25T14:34:27.870600Z node 4 :EXPORT DEBUG: schemeshard_export__create.cpp:1239: TExport::TTxProgress: OnNotifyResult: txId# 281474976710763 2025-06-25T14:34:27.870628Z node 4 :EXPORT DEBUG: schemeshard_export__create.cpp:1270: TExport::TTxProgress: OnNotifyResult: txId# 281474976710763, id# 103, itemIdx# 4294967295 2025-06-25T14:34:27.872423Z node 4 :EXPORT DEBUG: schemeshard_export__create.cpp:329: TExport::TTxProgress: DoComplete 2025-06-25T14:34:27.872533Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-06-25T14:34:27.872579Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [4:858:2785] TestWaitNotification: OK eventTxId 103 >> TExportToS3Tests::AuditCancelledExport [GOOD] >> TPartBtreeIndexIteration::FewNodes_Groups_History_Slices_Sticky [GOOD] >> TPartGroupBtreeIndexIter::NoNodes >> TDataShardTrace::TestTraceDistributedUpsert-UseSink [GOOD] >> TExportToS3Tests::Changefeeds [GOOD] >> TPartGroupBtreeIndexIter::NoNodes [GOOD] >> TPartGroupBtreeIndexIter::OneNode >> TPartGroupBtreeIndexIter::OneNode [GOOD] >> TPartGroupBtreeIndexIter::FewNodes >> TExportToS3Tests::AutoDropping >> TPartGroupBtreeIndexIter::FewNodes [GOOD] >> TPartMulti::Basics [GOOD] >> TPartMulti::BasicsReverse [GOOD] >> TPartSlice::SimpleMerge [GOOD] >> TPartSlice::ComplexMerge [GOOD] >> TPartSlice::LongTailMerge [GOOD] >> TPartSlice::CutSingle [GOOD] >> TPartSlice::CutMulti [GOOD] >> TPartSlice::LookupBasics [GOOD] >> TPartSlice::LookupFull [GOOD] >> TPartSlice::EqualByRowId [GOOD] >> TPartSlice::ParallelCompactions [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_trace/unittest >> TDataShardTrace::TestTraceDistributedSelectViaReadActors [GOOD] Test command err: 2025-06-25T14:34:25.148291Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:34:25.148502Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:34:25.148563Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000b6b/r3tmp/tmp6QXi93/pdisk_1.dat 2025-06-25T14:34:25.493735Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T14:34:25.498001Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:34:25.553730Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:34:25.565107Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750862061543289 != 1750862061543293 2025-06-25T14:34:25.618775Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:34:25.618945Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:34:25.631661Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:34:25.733146Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:34:26.328758Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:34:28.267655Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:888:2730], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:34:28.267761Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:899:2735], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:34:28.267835Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:34:28.276826Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:34:28.312689Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037888 not found 2025-06-25T14:34:28.505877Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:902:2738], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-25T14:34:28.617121Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:964:2780] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:34:28.975699Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jykr5yh9fchq5nj8n6cm4trj, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTBmZThmZTMtNTQxYmMyNmQtYzExMzczODMtYTcyZjhhNzc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:34:29.109845Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jykr5z8f382ehab10bwf11hz, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NTllNjJlNjItNGVmNDczMDgtM2I5NTEyZjctZTA1ZGFmNWY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:34:29.277185Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715663. Ctx: { TraceId: 01jykr5zce4c8ne79586tgxy8y, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MWMyY2EyZmMtNGRiZWRiMTktNGUxOTkyNzYtZWIzZDMyZTI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root >> DstCreator::GlobalConsistency >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-CreateUser-5 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-CreateUser-6 >> TExportToS3Tests::CancelUponTransferringMultiShardTableShouldSucceed [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-CreateUser-6 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-CreateUser-7 >> TExportToS3Tests::CancelUponTransferringSingleTableShouldSucceed [GOOD] >> TExportToS3Tests::CancelUponTransferringManyTablesShouldSucceed ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_trace/unittest >> TDataShardTrace::TestTraceDistributedUpsert-UseSink [GOOD] Test command err: 2025-06-25T14:34:26.257077Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:34:26.257322Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:34:26.257388Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000b6a/r3tmp/tmpCUbqlU/pdisk_1.dat 2025-06-25T14:34:26.605077Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T14:34:26.609413Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:34:26.650357Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:34:26.655037Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750862062590254 != 1750862062590258 2025-06-25T14:34:26.709478Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:34:26.709666Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:34:26.721721Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:34:26.811247Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:34:27.216896Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:34:29.027989Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:888:2730], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:34:29.028105Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:899:2735], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:34:29.028203Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:34:29.045676Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:34:29.091549Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037888 not found 2025-06-25T14:34:29.267759Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:902:2738], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-25T14:34:29.354968Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:964:2780] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:34:29.785061Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jykr5z913tc0cxycp504ft3k, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NTAxYjJjNDgtMTExMzJkMDktNmMyMGU1ZGQtN2U1YmFmY2M=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root Trace: (Session.query.QUERY_ACTION_EXECUTE -> [(CompileService -> [(CompileActor)]) , (LiteralExecuter) , (DataExecuter -> [(WaitForTableResolve) , (RunTasks) , (Datashard.Transaction -> [(Tablet.Transaction -> [(Tablet.Transaction.Execute -> [(Datashard.Unit) , (Datashard.Unit) , (Datashard.Unit)]) , (Tablet.Transaction.Complete)]) , (Datashard.SendWithConfirmedReadOnlyLease) , (Tablet.Transaction -> [(Tablet.Transaction.Execute -> [(Datashard.Unit) , (Datashard.Unit) , (Datashard.Unit) , (Datashard.Unit) , (Datashard.Unit) , (Datashard.Unit)]) , (Tablet.WriteLog -> [(Tablet.WriteLog.LogEntry)]) , (Tablet.Transaction.Complete)]) , (Datashard.SendResult)]) , (Datashard.Transaction -> [(Tablet.Transaction -> [(Tablet.Transaction.Execute -> [(Datashard.Unit) , (Datashard.Unit) , (Datashard.Unit)]) , (Tablet.Transaction.Complete)]) , (Datashard.SendWithConfirmedReadOnlyLease) , (Tablet.Transaction -> [(Tablet.Transaction.Execute -> [(Datashard.Unit) , (Datashard.Unit) , (Datashard.Unit) , (Datashard.Unit) , (Datashard.Unit) , (Datashard.Unit)]) , (Tablet.WriteLog -> [(Tablet.WriteLog.LogEntry)]) , (Tablet.Transaction.Complete)]) , (Datashard.SendResult)])])]) >> TVersions::Wreck2 [GOOD] >> TVersions::Wreck2Reverse >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnDyNumberMicroSeconds [GOOD] >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnDate32 >> DstCreator::ExistingDst >> DstCreator::ReplicationModeMismatch >> TDataShardTrace::TestTraceDistributedUpsert+UseSink [GOOD] >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnUint64MilliSeconds [GOOD] >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnUint64MicroSeconds >> TExportToS3Tests::AutoDropping [GOOD] |79.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/service/ut_worker/unittest |79.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/service/ut_worker/unittest >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnPgInt4Seconds [GOOD] >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnPgInt8Microseconds >> TDataShardTrace::TestTraceDistributedSelect [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_export/unittest >> TExportToS3Tests::Changefeeds [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:34:18.464159Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:34:18.464272Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:34:18.464329Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:34:18.464363Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:34:18.464406Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:34:18.464448Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:34:18.464503Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:34:18.464579Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:34:18.465380Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:34:18.465724Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:34:18.581046Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:34:18.581114Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:34:18.624536Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:34:18.634944Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:34:18.635087Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:34:18.654640Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:34:18.655065Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:34:18.655668Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:34:18.658103Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:34:18.667181Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:34:18.667358Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:34:18.668589Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:34:18.668642Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:34:18.668685Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:34:18.668733Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:34:18.668769Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:34:18.668958Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:34:18.725246Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:34:18.917116Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:34:18.917325Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:18.917549Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:34:18.917600Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:34:18.917817Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:34:18.917904Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:34:18.926248Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:34:18.926519Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:34:18.926726Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:18.926778Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:34:18.926844Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:34:18.926887Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:34:18.933530Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:18.933597Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:34:18.933637Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:34:18.941071Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:18.941140Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:18.941192Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:34:18.941299Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:34:18.944672Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:34:18.953006Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:34:18.953218Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:34:18.954224Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:34:18.954362Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:34:18.954420Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:34:18.954701Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:34:18.954754Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:34:18.954941Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:34:18.955013Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:34:18.959077Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:34:18.959136Z node 1 :FLAT_TX_SCHEMESHARD ... 14:34:30.149852Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 281474976710761 2025-06-25T14:34:30.149898Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 281474976710761, pathId: [OwnerId: 72057594046678944, LocalPathId: 9], version: 7 2025-06-25T14:34:30.149929Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 9] was 3 2025-06-25T14:34:30.150001Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 281474976710761, ready parts: 0/1, is published: true 2025-06-25T14:34:30.158318Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:329: TExport::TTxProgress: DoComplete 2025-06-25T14:34:30.159201Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 281474976710761:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:281474976710761 msg type: 269090816 2025-06-25T14:34:30.159375Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 281474976710761, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 281474976710761 at step: 5000010 FAKE_COORDINATOR: advance: minStep5000010 State->FrontStep: 5000009 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710761 at step: 5000010 2025-06-25T14:34:30.159721Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 281474976710761, at schemeshard: 72057594046678944 2025-06-25T14:34:30.159775Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 281474976710761, ready parts: 0/1, is published: true 2025-06-25T14:34:30.159831Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 281474976710761, at schemeshard: 72057594046678944 2025-06-25T14:34:30.160069Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000010, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:34:30.160225Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976710761 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 21474838638 } } Step: 5000010 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:34:30.160281Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_rmdir.cpp:128: TRmDir HandleReply TEvOperationPlan, opId: 281474976710761:0, step: 5000010, at schemeshard: 72057594046678944 2025-06-25T14:34:30.160446Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_rmdir.cpp:179: RmDir is done, opId: 281474976710761:0, at schemeshard: 72057594046678944 2025-06-25T14:34:30.160526Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976710761:0 progress is 1/1 2025-06-25T14:34:30.160606Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976710761 ready parts: 1/1 2025-06-25T14:34:30.160675Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976710761:0 progress is 1/1 2025-06-25T14:34:30.160728Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976710761 ready parts: 1/1 2025-06-25T14:34:30.160803Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-25T14:34:30.160895Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 9] was 2 2025-06-25T14:34:30.160942Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 281474976710761, ready parts: 1/1, is published: false 2025-06-25T14:34:30.161003Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976710761 ready parts: 1/1 2025-06-25T14:34:30.161074Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976710761:0 2025-06-25T14:34:30.161126Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 281474976710761:0 2025-06-25T14:34:30.161224Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 9] was 3 2025-06-25T14:34:30.161283Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 281474976710761, publications: 2, subscribers: 1 2025-06-25T14:34:30.161327Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 281474976710761, [OwnerId: 72057594046678944, LocalPathId: 1], 12 2025-06-25T14:34:30.161385Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 281474976710761, [OwnerId: 72057594046678944, LocalPathId: 9], 18446744073709551615 2025-06-25T14:34:30.162333Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710761 2025-06-25T14:34:30.162440Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710761 2025-06-25T14:34:30.164562Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:34:30.164612Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 281474976710761, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:34:30.164834Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 281474976710761, path id: [OwnerId: 72057594046678944, LocalPathId: 9] 2025-06-25T14:34:30.164978Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:34:30.165021Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [5:211:2211], at schemeshard: 72057594046678944, txId: 281474976710761, path id: 1 2025-06-25T14:34:30.165063Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [5:211:2211], at schemeshard: 72057594046678944, txId: 281474976710761, path id: 9 FAKE_COORDINATOR: Erasing txId 281474976710761 2025-06-25T14:34:30.165912Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 12 PathOwnerId: 72057594046678944, cookie: 281474976710761 2025-06-25T14:34:30.166035Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 12 PathOwnerId: 72057594046678944, cookie: 281474976710761 2025-06-25T14:34:30.166088Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 281474976710761 2025-06-25T14:34:30.166170Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 281474976710761, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 12 2025-06-25T14:34:30.166238Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-25T14:34:30.167331Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 9 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 281474976710761 2025-06-25T14:34:30.167457Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 9 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 281474976710761 2025-06-25T14:34:30.167492Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 281474976710761 2025-06-25T14:34:30.167550Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 281474976710761, pathId: [OwnerId: 72057594046678944, LocalPathId: 9], version: 18446744073709551615 2025-06-25T14:34:30.167584Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 9] was 2 2025-06-25T14:34:30.167669Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 281474976710761, subscribers: 1 2025-06-25T14:34:30.167743Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:212: TTxAckPublishToSchemeBoard Notify send TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, to actorId: [5:126:2150] 2025-06-25T14:34:30.174768Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710761 2025-06-25T14:34:30.175800Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710761 2025-06-25T14:34:30.175936Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6830: Handle: TEvNotifyTxCompletionResult: txId# 281474976710761 2025-06-25T14:34:30.176017Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6832: Message: TxId: 281474976710761 2025-06-25T14:34:30.176074Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:309: TExport::TTxProgress: DoExecute 2025-06-25T14:34:30.176106Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:1239: TExport::TTxProgress: OnNotifyResult: txId# 281474976710761 2025-06-25T14:34:30.176137Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:1270: TExport::TTxProgress: OnNotifyResult: txId# 281474976710761, id# 105, itemIdx# 4294967295 2025-06-25T14:34:30.178052Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:329: TExport::TTxProgress: DoComplete 2025-06-25T14:34:30.178156Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 105: got EvNotifyTxCompletionResult 2025-06-25T14:34:30.178214Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 105: satisfy waiter [5:1386:3173] TestWaitNotification: OK eventTxId 105 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tablet_flat/ut/unittest >> TPartSlice::ParallelCompactions [GOOD] Test command err: ======= CUT ======= Part{[1:2:3:0:0:0:0] eph 0, 346b 12r} data 755b + FlatIndex{4} Label{3 rev 3, 172b} 5 rec | Page Row Bytes (Uint32, String) | 0 0 86b {1, aaa} | 1 3 88b {1, b} | 2 6 86b {2, NULL} | 3 9 86b {2, ccx} | 3 11 86b {2, cxz} + BTreeIndex{PageId: 5 RowCount: 12 DataSize: 346 ErasedRowCount: 0} Label{13 rev 1, 208b} | PageId: 0 RowCount: 3 DataSize: 86 ErasedRowCount: 0 | > {1, b} | PageId: 1 RowCount: 6 DataSize: 174 ErasedRowCount: 0 | > {2, NULL} | PageId: 2 RowCount: 9 DataSize: 260 ErasedRowCount: 0 | > {2, ccx} | PageId: 3 RowCount: 12 DataSize: 346 ErasedRowCount: 0 ======= FULL ======= Part{[1:2:3:0:0:0:0] eph 0, 346b 12r} data 777b + FlatIndex{4} Label{3 rev 3, 179b} 5 rec | Page Row Bytes (Uint32, String) | 0 0 86b {1, aaa} | 1 3 88b {1, baaaa} | 2 6 86b {2, aaa} | 3 9 86b {2, ccx} | 3 11 86b {2, cxz} + BTreeIndex{PageId: 5 RowCount: 12 DataSize: 346 ErasedRowCount: 0} Label{13 rev 1, 223b} | PageId: 0 RowCount: 3 DataSize: 86 ErasedRowCount: 0 | > {1, baaaa} | PageId: 1 RowCount: 6 DataSize: 174 ErasedRowCount: 0 | > {2, aaa} | PageId: 2 RowCount: 9 DataSize: 260 ErasedRowCount: 0 | > {2, ccx} | PageId: 3 RowCount: 12 DataSize: 346 ErasedRowCount: 0 ======= CUT ======= Part{[1:2:3:0:0:0:0] eph 0, 420b 10r} data 1347b + FlatIndex{10} Label{3 rev 3, 362b} 11 rec | Page Row Bytes (Uint32, String) | 0 0 42b {1, aaa} | 1 1 42b {1, ab} | 2 2 42b {1, ac} | 3 3 42b {1, b} | 4 4 42b {1, bb} | 5 5 42b {2, NULL} | 6 6 42b {2, ab} | 7 7 42b {2, ac} | 8 8 42b {2, b} | 9 9 42b {2, bb} | 9 9 42b {2, bba} + BTreeIndex{PageId: 11 RowCount: 10 DataSize: 420 ErasedRowCount: 0} Label{13 rev 1, 536b} | PageId: 0 RowCount: 1 DataSize: 42 ErasedRowCount: 0 | > {1, ab} | PageId: 1 RowCount: 2 DataSize: 84 ErasedRowCount: 0 | > {1, ac} | PageId: 2 RowCount: 3 DataSize: 126 ErasedRowCount: 0 | > {1, b} | PageId: 3 RowCount: 4 DataSize: 168 ErasedRowCount: 0 | > {1, bb} | PageId: 4 RowCount: 5 DataSize: 210 ErasedRowCount: 0 | > {2, NULL} | PageId: 5 RowCount: 6 DataSize: 252 ErasedRowCount: 0 | > {2, ab} | PageId: 6 RowCount: 7 DataSize: 294 ErasedRowCount: 0 | > {2, ac} | PageId: 7 RowCount: 8 DataSize: 336 ErasedRowCount: 0 | > {2, b} | PageId: 8 RowCount: 9 DataSize: 378 ErasedRowCount: 0 | > {2, bb} | PageId: 9 RowCount: 10 DataSize: 420 ErasedRowCount: 0 ======= FULL ======= Part{[1:2:3:0:0:0:0] eph 0, 420b 10r} data 1381b + FlatIndex{10} Label{3 rev 3, 375b} 11 rec | Page Row Bytes (Uint32, String) | 0 0 42b {1, aaa} | 1 1 42b {1, aba} | 2 2 42b {1, aca} | 3 3 42b {1, baa} | 4 4 42b {1, bba} | 5 5 42b {2, aaa} | 6 6 42b {2, aba} | 7 7 42b {2, aca} | 8 8 42b {2, baa} | 9 9 42b {2, bba} | 9 9 42b {2, bba} + BTreeIndex{PageId: 11 RowCount: 10 DataSize: 420 ErasedRowCount: 0} Label{13 rev 1, 557b} | PageId: 0 RowCount: 1 DataSize: 42 ErasedRowCount: 0 | > {1, aba} | PageId: 1 RowCount: 2 DataSize: 84 ErasedRowCount: 0 | > {1, aca} | PageId: 2 RowCount: 3 DataSize: 126 ErasedRowCount: 0 | > {1, baa} | PageId: 3 RowCount: 4 DataSize: 168 ErasedRowCount: 0 | > {1, bba} | PageId: 4 RowCount: 5 DataSize: 210 ErasedRowCount: 0 | > {2, aaa} | PageId: 5 RowCount: 6 DataSize: 252 ErasedRowCount: 0 | > {2, aba} | PageId: 6 RowCount: 7 DataSize: 294 ErasedRowCount: 0 | > {2, aca} | PageId: 7 RowCount: 8 DataSize: 336 ErasedRowCount: 0 | > {2, baa} | PageId: 8 RowCount: 9 DataSize: 378 ErasedRowCount: 0 | > {2, bba} | PageId: 9 RowCount: 10 DataSize: 420 ErasedRowCount: 0 ======= SLICES ======= { [0, 1), [1, 2), [2, 3), [3, 4), [4, 5), [5, 7), [7, 9), [9, 9] } ======= CUT ======= Part{[1:2:3:0:0:0:0] eph 0, 420b 10r} data 1347b + FlatIndex{10} Label{3 rev 3, 362b} 11 rec | Page Row Bytes (Uint32, String) | 0 0 42b {1, aaa} | 1 1 42b {1, ab} | 2 2 42b {1, ac} | 3 3 42b {1, b} | 4 4 42b {1, bb} | 5 5 42b {2, NULL} | 6 6 42b {2, ab} | 7 7 42b {2, ac} | 8 8 42b {2, b} | 9 9 42b {2, bb} | 9 9 42b {2, bba} + BTreeIndex{PageId: 11 RowCount: 10 DataSize: 420 ErasedRowCount: 0} Label{13 rev 1, 536b} | PageId: 0 RowCount: 1 DataSize: 42 ErasedRowCount: 0 | > {1, ab} | PageId: 1 RowCount: 2 DataSize: 84 ErasedRowCount: 0 | > {1, ac} | PageId: 2 RowCount: 3 DataSize: 126 ErasedRowCount: 0 | > {1, b} | PageId: 3 RowCount: 4 DataSize: 168 ErasedRowCount: 0 | > {1, bb} | PageId: 4 RowCount: 5 DataSize: 210 ErasedRowCount: 0 | > {2, NULL} | PageId: 5 RowCount: 6 DataSize: 252 ErasedRowCount: 0 | > {2, ab} | PageId: 6 RowCount: 7 DataSize: 294 ErasedRowCount: 0 | > {2, ac} | PageId: 7 RowCount: 8 DataSize: 336 ErasedRowCount: 0 | > {2, b} | PageId: 8 RowCount: 9 DataSize: 378 ErasedRowCount: 0 | > {2, bb} | PageId: 9 RowCount: 10 DataSize: 420 ErasedRowCount: 0 ======= FULL ======= Part{[1:2:3:0:0:0:0] eph 0, 420b 10r} data 1381b + FlatIndex{10} Label{3 rev 3, 375b} 11 rec | Page Row Bytes (Uint32, String) | 0 0 42b {1, aaa} | 1 1 42b {1, aba} | 2 2 42b {1, aca} | 3 3 42b {1, baa} | 4 4 42b {1, bba} | 5 5 42b {2, aaa} | 6 6 42b {2, aba} | 7 7 42b {2, aca} | 8 8 42b {2, baa} | 9 9 42b {2, bba} | 9 9 42b {2, bba} + BTreeIndex{PageId: 11 RowCount: 10 DataSize: 420 ErasedRowCount: 0} Label{13 rev 1, 557b} | PageId: 0 RowCount: 1 DataSize: 42 ErasedRowCount: 0 | > {1, aba} | PageId: 1 RowCount: 2 DataSize: 84 ErasedRowCount: 0 | > {1, aca} | PageId: 2 RowCount: 3 DataSize: 126 ErasedRowCount: 0 | > {1, baa} | PageId: 3 RowCount: 4 DataSize: 168 ErasedRowCount: 0 | > {1, bba} | PageId: 4 RowCount: 5 DataSize: 210 ErasedRowCount: 0 | > {2, aaa} | PageId: 5 RowCount: 6 DataSize: 252 ErasedRowCount: 0 | > {2, aba} | PageId: 6 RowCount: 7 DataSize: 294 ErasedRowCount: 0 | > {2, aca} | PageId: 7 RowCount: 8 DataSize: 336 ErasedRowCount: 0 | > {2, baa} | PageId: 8 RowCount: 9 DataSize: 378 ErasedRowCount: 0 | > {2, bba} | PageId: 9 RowCount: 10 DataSize: 420 ErasedRowCount: 0 Part{[1:2:3:0:0:0:0] eph 0, 81b 2r} data 316b + FlatIndex{2} Label{3 rev 3, 107b} 3 rec | Page Row Bytes (String) | 0 0 40b {cccccc} | 1 1 41b {ccccccd} | 1 1 41b {ccccccd} + BTreeIndex{PageId: 3 RowCount: 2 DataSize: 81 ErasedRowCount: 0} Label{13 rev 1, 109b} | PageId: 0 RowCount: 1 DataSize: 40 ErasedRowCount: 0 | > {ccccccd} | PageId: 1 RowCount: 2 DataSize: 81 ErasedRowCount: 0 Part{[1:2:3:0:0:0:0] eph 0, 83b 2r} data 320b + FlatIndex{2} Label{3 rev 3, 109b} 3 rec | Page Row Bytes (String) | 0 0 40b {cccccc} | 1 1 43b {ccccccd} | 1 1 43b {ccccccddd} + BTreeIndex{PageId: 3 RowCount: 2 DataSize: 83 ErasedRowCount: 0} Label{13 rev 1, 109b} | PageId: 0 RowCount: 1 DataSize: 40 ErasedRowCount: 0 | > {ccccccd} | PageId: 1 RowCount: 2 DataSize: 83 ErasedRowCount: 0 Part{[1:2:3:0:0:0:0] eph 0, 80b 2r} data 312b + FlatIndex{2} Label{3 rev 3, 105b} 3 rec | Page Row Bytes (String) | 0 0 40b {cccccc} | 1 1 40b {cccccd} | 1 1 40b {cccccd} + BTreeIndex{PageId: 3 RowCount: 2 DataSize: 80 ErasedRowCount: 0} Label{13 rev 1, 108b} | PageId: 0 RowCount: 1 DataSize: 40 ErasedRowCount: 0 | > {cccccd} | PageId: 1 RowCount: 2 DataSize: 80 ErasedRowCount: 0 Part{[1:2:3:0:0:0:0] eph 0, 82b 2r} data 316b + FlatIndex{2} Label{3 rev 3, 107b} 3 rec | Page Row Bytes (String) | 0 0 40b {cccccc} | 1 1 42b {cccccd} | 1 1 42b {cccccddd} + BTreeIndex{PageId: 3 RowCount: 2 DataSize: 82 ErasedRowCount: 0} Label{13 rev 1, 108b} | PageId: 0 RowCount: 1 DataSize: 40 ErasedRowCount: 0 | > {cccccd} | PageId: 1 RowCount: 2 DataSize: 82 ErasedRowCount: 0 Part{[1:2:3:0:0:0:0] eph 0, 79b 2r} data 308b + FlatIndex{2} Label{3 rev 3, 103b} 3 rec | Page Row Bytes (String) | 0 0 40b {cccccc} | 1 1 39b {ccccd} | 1 1 39b {ccccd} + BTreeIndex{PageId: 3 RowCount: 2 DataSize: 79 ErasedRowCount: 0} Label{13 rev 1, 107b} | PageId: 0 RowCount: 1 DataSize: 40 ErasedRowCount: 0 | > {ccccd} | PageId: 1 RowCount: 2 DataSize: 79 ErasedRowCount: 0 Part{[1:2:3:0:0:0:0] eph 0, 81b 2r} data 312b + FlatIndex{2} Label{3 rev 3, 105b} 3 rec | Page Row Bytes (String) | 0 0 40b {cccccc} | 1 1 41b {ccccd} | 1 1 41b {ccccddd} + BTreeIndex{PageId: 3 RowCount: 2 DataSize: 81 ErasedRowCount: 0} Label{13 rev 1, 107b} | PageId: 0 RowCount: 1 DataSize: 40 ErasedRowCount: 0 | > {ccccd} | PageId: 1 RowCount: 2 DataSize: 81 ErasedRowCount: 0 Part{[1:2:3:0:0:0:0] eph 0, 78b 2r} data 304b + FlatIndex{2} Label{3 rev 3, 101b} 3 rec | Page Row Bytes (String) | 0 0 40b {cccccc} | 1 1 38b {cccd} | 1 1 38b {cccd} + BTreeIndex{PageId: 3 RowCount: 2 DataSize: 78 ErasedRowCount: 0} Label{13 rev 1, 106b} | PageId: 0 RowCount: 1 DataSize: 40 ErasedRowCount: 0 | > {cccd} | PageId: 1 RowCount: 2 DataSize: 78 ErasedRowCount: 0 Part{[1:2:3:0:0:0:0] eph 0, 80b 2r} data 308b + FlatIndex{2} Label{3 rev 3, 103b} 3 rec | Page Row Bytes (String) | 0 0 40b {cccccc} | 1 1 40b {cccd} | 1 1 40b {cccddd} + BTreeIndex{PageId: 3 RowCount: 2 DataSize: 80 ErasedRowCount: 0} Label{13 rev 1, 106b} | PageId: 0 RowCount: 1 DataSize: 40 ErasedRowCount: 0 | > {cccd} | PageId: 1 RowCount: 2 DataSize: 80 ErasedRowCount: 0 Part{[1:2:3:0:0:0:0] eph 0, 75b 2r} data 292b + FlatIndex{2} Label{3 rev 3, 95b} 3 rec | Page Row Bytes (String) | 0 0 40b {cccccc} | 1 1 35b {d} | 1 1 35b {d} + BTreeIndex{PageId: 3 RowCount: 2 DataSize: 75 ErasedRowCount: 0} Label{13 rev 1, 103b} | PageId: 0 RowCount: 1 DataSize: 40 ErasedRowCount: 0 | > {d} | PageId: 1 RowCount: 2 DataSize: 75 ErasedRowCount: 0 Part{[1:2:3:0:0:0:0] eph 0, 77b 2r} data 296b + FlatIndex{2} Label{3 rev 3, 97b} 3 rec | Page Row Bytes (String) | 0 0 40b {cccccc} | 1 1 37b {d} | 1 1 37b {ddd} + BTreeIndex{PageId: 3 RowCount: 2 DataSize: 77 ErasedRowCount: 0} Label{13 rev 1, 103b} | PageId: 0 RowCount: 1 DataSize: 40 ErasedRowCount: 0 | > {d} | PageId: 1 RowCount: 2 DataSize: 77 ErasedRowCount: 0 Part{[1:2:3:0:0:0:0] eph 0, 69b 2r} data 280b + FlatIndex{2} Label{3 rev 3, 89b} 3 rec | Page Row Bytes (String) | 0 0 34b {} | 1 1 35b {d} | 1 1 35b {d} + BTreeIndex{PageId: 3 RowCount: 2 DataSize: 69 ErasedRowCount: 0} Label{13 rev 1, 103b} | PageId: 0 RowCount: 1 DataSize: 34 ErasedRowCount: 0 | > {d} | PageId: 1 RowCount: 2 DataSize: 69 ErasedRowCount: 0 Part{[1:2:3:0:0:0:0] eph 0, 71b 2r} data 284b + FlatIndex{2} Label{3 rev 3, 91b} 3 rec | Page Row Bytes (String) | 0 0 34b {} | 1 1 37b {d} | 1 1 37b {ddd} + BTreeIndex{PageId: 3 RowCount: 2 DataSize: 71 ErasedRowCount: 0} Label{13 r ... owOp 1: {0, 8} {Set 2 Uint32 : 5}, {Set 3 Uint64 : 5}, {Set 4 String : xxxxxxxxxx_5} + Rows{3} Label{34 rev 1, 120b}, [6, +2)row | ERowOp 1: {0, 10} {Set 2 Uint32 : 6}, {Set 3 Uint64 : 6}, {Set 4 String : xxxxxxxxxx_6} | ERowOp 1: {1, 1} {Set 2 Uint32 : 7}, {Set 3 Uint64 : 7}, {Set 4 String : xxxxxxxxxx_7} + Rows{4} Label{44 rev 1, 120b}, [8, +2)row | ERowOp 1: {1, 3} {Set 2 Uint32 : 8}, {Set 3 Uint64 : 8}, {Set 4 String : xxxxxxxxxx_8} | ERowOp 1: {1, 4} {Set 2 Uint32 : 9}, {Set 3 Uint64 : 9}, {Set 4 String : xxxxxxxxxx_9} + Rows{5} Label{54 rev 1, 122b}, [10, +2)row | ERowOp 1: {1, 6} {Set 2 Uint32 : 10}, {Set 3 Uint64 : 10}, {Set 4 String : xxxxxxxxxx_10} | ERowOp 1: {1, 7} {Set 2 Uint32 : 11}, {Set 3 Uint64 : 11}, {Set 4 String : xxxxxxxxxx_11} + Rows{6} Label{64 rev 1, 122b}, [12, +2)row | ERowOp 1: {1, 8} {Set 2 Uint32 : 12}, {Set 3 Uint64 : 12}, {Set 4 String : xxxxxxxxxx_12} | ERowOp 1: {1, 10} {Set 2 Uint32 : 13}, {Set 3 Uint64 : 13}, {Set 4 String : xxxxxxxxxx_13} + Rows{7} Label{74 rev 1, 122b}, [14, +2)row | ERowOp 1: {2, 1} {Set 2 Uint32 : 14}, {Set 3 Uint64 : 14}, {Set 4 String : xxxxxxxxxx_14} | ERowOp 1: {2, 3} {Set 2 Uint32 : 15}, {Set 3 Uint64 : 15}, {Set 4 String : xxxxxxxxxx_15} + Rows{8} Label{84 rev 1, 122b}, [16, +2)row | ERowOp 1: {2, 4} {Set 2 Uint32 : 16}, {Set 3 Uint64 : 16}, {Set 4 String : xxxxxxxxxx_16} | ERowOp 1: {2, 6} {Set 2 Uint32 : 17}, {Set 3 Uint64 : 17}, {Set 4 String : xxxxxxxxxx_17} + Rows{9} Label{94 rev 1, 122b}, [18, +2)row | ERowOp 1: {2, 7} {Set 2 Uint32 : 18}, {Set 3 Uint64 : 18}, {Set 4 String : xxxxxxxxxx_18} | ERowOp 1: {2, 8} {Set 2 Uint32 : 19}, {Set 3 Uint64 : 19}, {Set 4 String : xxxxxxxxxx_19} + Rows{10} Label{104 rev 1, 122b}, [20, +2)row | ERowOp 1: {2, 10} {Set 2 Uint32 : 20}, {Set 3 Uint64 : 20}, {Set 4 String : xxxxxxxxxx_20} | ERowOp 1: {3, 1} {Set 2 Uint32 : 21}, {Set 3 Uint64 : 21}, {Set 4 String : xxxxxxxxxx_21} + Rows{11} Label{114 rev 1, 122b}, [22, +2)row | ERowOp 1: {3, 3} {Set 2 Uint32 : 22}, {Set 3 Uint64 : 22}, {Set 4 String : xxxxxxxxxx_22} | ERowOp 1: {3, 4} {Set 2 Uint32 : 23}, {Set 3 Uint64 : 23}, {Set 4 String : xxxxxxxxxx_23} + Rows{12} Label{124 rev 1, 122b}, [24, +2)row | ERowOp 1: {3, 6} {Set 2 Uint32 : 24}, {Set 3 Uint64 : 24}, {Set 4 String : xxxxxxxxxx_24} | ERowOp 1: {3, 7} {Set 2 Uint32 : 25}, {Set 3 Uint64 : 25}, {Set 4 String : xxxxxxxxxx_25} + Rows{13} Label{134 rev 1, 122b}, [26, +2)row | ERowOp 1: {3, 8} {Set 2 Uint32 : 26}, {Set 3 Uint64 : 26}, {Set 4 String : xxxxxxxxxx_26} | ERowOp 1: {3, 10} {Set 2 Uint32 : 27}, {Set 3 Uint64 : 27}, {Set 4 String : xxxxxxxxxx_27} + Rows{14} Label{144 rev 1, 122b}, [28, +2)row | ERowOp 1: {4, 1} {Set 2 Uint32 : 28}, {Set 3 Uint64 : 28}, {Set 4 String : xxxxxxxxxx_28} | ERowOp 1: {4, 3} {Set 2 Uint32 : 29}, {Set 3 Uint64 : 29}, {Set 4 String : xxxxxxxxxx_29} + Rows{15} Label{154 rev 1, 122b}, [30, +2)row | ERowOp 1: {4, 4} {Set 2 Uint32 : 30}, {Set 3 Uint64 : 30}, {Set 4 String : xxxxxxxxxx_30} | ERowOp 1: {4, 6} {Set 2 Uint32 : 31}, {Set 3 Uint64 : 31}, {Set 4 String : xxxxxxxxxx_31} + Rows{16} Label{164 rev 1, 122b}, [32, +2)row | ERowOp 1: {4, 7} {Set 2 Uint32 : 32}, {Set 3 Uint64 : 32}, {Set 4 String : xxxxxxxxxx_32} | ERowOp 1: {4, 8} {Set 2 Uint32 : 33}, {Set 3 Uint64 : 33}, {Set 4 String : xxxxxxxxxx_33} + Rows{17} Label{174 rev 1, 122b}, [34, +2)row | ERowOp 1: {4, 10} {Set 2 Uint32 : 34}, {Set 3 Uint64 : 34}, {Set 4 String : xxxxxxxxxx_34} | ERowOp 1: {5, 1} {Set 2 Uint32 : 35}, {Set 3 Uint64 : 35}, {Set 4 String : xxxxxxxxxx_35} + Rows{18} Label{184 rev 1, 122b}, [36, +2)row | ERowOp 1: {5, 3} {Set 2 Uint32 : 36}, {Set 3 Uint64 : 36}, {Set 4 String : xxxxxxxxxx_36} | ERowOp 1: {5, 4} {Set 2 Uint32 : 37}, {Set 3 Uint64 : 37}, {Set 4 String : xxxxxxxxxx_37} + Rows{19} Label{194 rev 1, 122b}, [38, +2)row | ERowOp 1: {5, 6} {Set 2 Uint32 : 38}, {Set 3 Uint64 : 38}, {Set 4 String : xxxxxxxxxx_38} | ERowOp 1: {5, 7} {Set 2 Uint32 : 39}, {Set 3 Uint64 : 39}, {Set 4 String : xxxxxxxxxx_39} Slices{ [0, 39] } Part{[1:2:3:0:0:0:0] eph 0, 2430b 40r} data 4441b + FlatIndex{26} Label{3 rev 3, 558b} 21 rec | Page Row Bytes (Uint32, Uint32) | 0 0 120b {0, 1} | 1 2 120b {0, 4} | 2 4 120b {0, 7} | 3 6 120b {0, 10} | 4 8 120b {1, 3} | 5 10 122b {1, 6} | 7 12 122b {1, 8} | 8 14 122b {2, NULL} | 9 16 122b {2, 4} | 11 18 122b {2, 7} | 12 20 122b {2, 10} | 13 22 122b {3, 3} | 15 24 122b {3, 6} | 16 26 122b {3, 8} | 17 28 122b {4, NULL} | 19 30 122b {4, 4} | 20 32 122b {4, 7} | 21 34 122b {4, 10} | 24 36 122b {5, 3} | 25 38 122b {5, 6} | 25 39 122b {5, 7} + BTreeIndex{PageId: 29 RowCount: 40 DataSize: 2430 ErasedRowCount: 0} Label{13 rev 1, 102b} | + BTreeIndex{PageId: 23 RowCount: 18 DataSize: 1088 ErasedRowCount: 0} Label{13 rev 1, 151b} | | + BTreeIndex{PageId: 6 RowCount: 6 DataSize: 360 ErasedRowCount: 0} Label{13 rev 1, 151b} | | | PageId: 0 RowCount: 2 DataSize: 120 ErasedRowCount: 0 | | | > {0, 4} | | | PageId: 1 RowCount: 4 DataSize: 240 ErasedRowCount: 0 | | | > {0, 7} | | | PageId: 2 RowCount: 6 DataSize: 360 ErasedRowCount: 0 | | > {0, 10} | | + BTreeIndex{PageId: 10 RowCount: 12 DataSize: 722 ErasedRowCount: 0} Label{13 rev 1, 151b} | | | PageId: 3 RowCount: 8 DataSize: 480 ErasedRowCount: 0 | | | > {1, 3} | | | PageId: 4 RowCount: 10 DataSize: 600 ErasedRowCount: 0 | | | > {1, 6} | | | PageId: 5 RowCount: 12 DataSize: 722 ErasedRowCount: 0 | | > {1, 8} | | + BTreeIndex{PageId: 14 RowCount: 18 DataSize: 1088 ErasedRowCount: 0} Label{13 rev 1, 147b} | | | PageId: 7 RowCount: 14 DataSize: 844 ErasedRowCount: 0 | | | > {2, NULL} | | | PageId: 8 RowCount: 16 DataSize: 966 ErasedRowCount: 0 | | | > {2, 4} | | | PageId: 9 RowCount: 18 DataSize: 1088 ErasedRowCount: 0 | > {2, 7} | + BTreeIndex{PageId: 28 RowCount: 40 DataSize: 2430 ErasedRowCount: 0} Label{13 rev 1, 151b} | | + BTreeIndex{PageId: 18 RowCount: 24 DataSize: 1454 ErasedRowCount: 0} Label{13 rev 1, 151b} | | | PageId: 11 RowCount: 20 DataSize: 1210 ErasedRowCount: 0 | | | > {2, 10} | | | PageId: 12 RowCount: 22 DataSize: 1332 ErasedRowCount: 0 | | | > {3, 3} | | | PageId: 13 RowCount: 24 DataSize: 1454 ErasedRowCount: 0 | | > {3, 6} | | + BTreeIndex{PageId: 22 RowCount: 30 DataSize: 1820 ErasedRowCount: 0} Label{13 rev 1, 147b} | | | PageId: 15 RowCount: 26 DataSize: 1576 ErasedRowCount: 0 | | | > {3, 8} | | | PageId: 16 RowCount: 28 DataSize: 1698 ErasedRowCount: 0 | | | > {4, NULL} | | | PageId: 17 RowCount: 30 DataSize: 1820 ErasedRowCount: 0 | | > {4, 4} | | + BTreeIndex{PageId: 27 RowCount: 40 DataSize: 2430 ErasedRowCount: 0} Label{13 rev 1, 249b} | | | PageId: 19 RowCount: 32 DataSize: 1942 ErasedRowCount: 0 | | | > {4, 7} | | | PageId: 20 RowCount: 34 DataSize: 2064 ErasedRowCount: 0 | | | > {4, 10} | | | PageId: 21 RowCount: 36 DataSize: 2186 ErasedRowCount: 0 | | | > {5, 3} | | | PageId: 24 RowCount: 38 DataSize: 2308 ErasedRowCount: 0 | | | > {5, 6} | | | PageId: 25 RowCount: 40 DataSize: 2430 ErasedRowCount: 0 + Rows{0} Label{04 rev 1, 120b}, [0, +2)row | ERowOp 1: {0, 1} {Set 2 Uint32 : 0}, {Set 3 Uint64 : 0}, {Set 4 String : xxxxxxxxxx_0} | ERowOp 1: {0, 3} {Set 2 Uint32 : 1}, {Set 3 Uint64 : 1}, {Set 4 String : xxxxxxxxxx_1} + Rows{1} Label{14 rev 1, 120b}, [2, +2)row | ERowOp 1: {0, 4} {Set 2 Uint32 : 2}, {Set 3 Uint64 : 2}, {Set 4 String : xxxxxxxxxx_2} | ERowOp 1: {0, 6} {Set 2 Uint32 : 3}, {Set 3 Uint64 : 3}, {Set 4 String : xxxxxxxxxx_3} + Rows{2} Label{24 rev 1, 120b}, [4, +2)row | ERowOp 1: {0, 7} {Set 2 Uint32 : 4}, {Set 3 Uint64 : 4}, {Set 4 String : xxxxxxxxxx_4} | ERowOp 1: {0, 8} {Set 2 Uint32 : 5}, {Set 3 Uint64 : 5}, {Set 4 String : xxxxxxxxxx_5} + Rows{3} Label{34 rev 1, 120b}, [6, +2)row | ERowOp 1: {0, 10} {Set 2 Uint32 : 6}, {Set 3 Uint64 : 6}, {Set 4 String : xxxxxxxxxx_6} | ERowOp 1: {1, 1} {Set 2 Uint32 : 7}, {Set 3 Uint64 : 7}, {Set 4 String : xxxxxxxxxx_7} + Rows{4} Label{44 rev 1, 120b}, [8, +2)row | ERowOp 1: {1, 3} {Set 2 Uint32 : 8}, {Set 3 Uint64 : 8}, {Set 4 String : xxxxxxxxxx_8} | ERowOp 1: {1, 4} {Set 2 Uint32 : 9}, {Set 3 Uint64 : 9}, {Set 4 String : xxxxxxxxxx_9} + Rows{5} Label{54 rev 1, 122b}, [10, +2)row | ERowOp 1: {1, 6} {Set 2 Uint32 : 10}, {Set 3 Uint64 : 10}, {Set 4 String : xxxxxxxxxx_10} | ERowOp 1: {1, 7} {Set 2 Uint32 : 11}, {Set 3 Uint64 : 11}, {Set 4 String : xxxxxxxxxx_11} + Rows{7} Label{74 rev 1, 122b}, [12, +2)row | ERowOp 1: {1, 8} {Set 2 Uint32 : 12}, {Set 3 Uint64 : 12}, {Set 4 String : xxxxxxxxxx_12} | ERowOp 1: {1, 10} {Set 2 Uint32 : 13}, {Set 3 Uint64 : 13}, {Set 4 String : xxxxxxxxxx_13} + Rows{8} Label{84 rev 1, 122b}, [14, +2)row | ERowOp 1: {2, 1} {Set 2 Uint32 : 14}, {Set 3 Uint64 : 14}, {Set 4 String : xxxxxxxxxx_14} | ERowOp 1: {2, 3} {Set 2 Uint32 : 15}, {Set 3 Uint64 : 15}, {Set 4 String : xxxxxxxxxx_15} + Rows{9} Label{94 rev 1, 122b}, [16, +2)row | ERowOp 1: {2, 4} {Set 2 Uint32 : 16}, {Set 3 Uint64 : 16}, {Set 4 String : xxxxxxxxxx_16} | ERowOp 1: {2, 6} {Set 2 Uint32 : 17}, {Set 3 Uint64 : 17}, {Set 4 String : xxxxxxxxxx_17} + Rows{11} Label{114 rev 1, 122b}, [18, +2)row | ERowOp 1: {2, 7} {Set 2 Uint32 : 18}, {Set 3 Uint64 : 18}, {Set 4 String : xxxxxxxxxx_18} | ERowOp 1: {2, 8} {Set 2 Uint32 : 19}, {Set 3 Uint64 : 19}, {Set 4 String : xxxxxxxxxx_19} + Rows{12} Label{124 rev 1, 122b}, [20, +2)row | ERowOp 1: {2, 10} {Set 2 Uint32 : 20}, {Set 3 Uint64 : 20}, {Set 4 String : xxxxxxxxxx_20} | ERowOp 1: {3, 1} {Set 2 Uint32 : 21}, {Set 3 Uint64 : 21}, {Set 4 String : xxxxxxxxxx_21} + Rows{13} Label{134 rev 1, 122b}, [22, +2)row | ERowOp 1: {3, 3} {Set 2 Uint32 : 22}, {Set 3 Uint64 : 22}, {Set 4 String : xxxxxxxxxx_22} | ERowOp 1: {3, 4} {Set 2 Uint32 : 23}, {Set 3 Uint64 : 23}, {Set 4 String : xxxxxxxxxx_23} + Rows{15} Label{154 rev 1, 122b}, [24, +2)row | ERowOp 1: {3, 6} {Set 2 Uint32 : 24}, {Set 3 Uint64 : 24}, {Set 4 String : xxxxxxxxxx_24} | ERowOp 1: {3, 7} {Set 2 Uint32 : 25}, {Set 3 Uint64 : 25}, {Set 4 String : xxxxxxxxxx_25} + Rows{16} Label{164 rev 1, 122b}, [26, +2)row | ERowOp 1: {3, 8} {Set 2 Uint32 : 26}, {Set 3 Uint64 : 26}, {Set 4 String : xxxxxxxxxx_26} | ERowOp 1: {3, 10} {Set 2 Uint32 : 27}, {Set 3 Uint64 : 27}, {Set 4 String : xxxxxxxxxx_27} + Rows{17} Label{174 rev 1, 122b}, [28, +2)row | ERowOp 1: {4, 1} {Set 2 Uint32 : 28}, {Set 3 Uint64 : 28}, {Set 4 String : xxxxxxxxxx_28} | ERowOp 1: {4, 3} {Set 2 Uint32 : 29}, {Set 3 Uint64 : 29}, {Set 4 String : xxxxxxxxxx_29} + Rows{19} Label{194 rev 1, 122b}, [30, +2)row | ERowOp 1: {4, 4} {Set 2 Uint32 : 30}, {Set 3 Uint64 : 30}, {Set 4 String : xxxxxxxxxx_30} | ERowOp 1: {4, 6} {Set 2 Uint32 : 31}, {Set 3 Uint64 : 31}, {Set 4 String : xxxxxxxxxx_31} + Rows{20} Label{204 rev 1, 122b}, [32, +2)row | ERowOp 1: {4, 7} {Set 2 Uint32 : 32}, {Set 3 Uint64 : 32}, {Set 4 String : xxxxxxxxxx_32} | ERowOp 1: {4, 8} {Set 2 Uint32 : 33}, {Set 3 Uint64 : 33}, {Set 4 String : xxxxxxxxxx_33} + Rows{21} Label{214 rev 1, 122b}, [34, +2)row | ERowOp 1: {4, 10} {Set 2 Uint32 : 34}, {Set 3 Uint64 : 34}, {Set 4 String : xxxxxxxxxx_34} | ERowOp 1: {5, 1} {Set 2 Uint32 : 35}, {Set 3 Uint64 : 35}, {Set 4 String : xxxxxxxxxx_35} + Rows{24} Label{244 rev 1, 122b}, [36, +2)row | ERowOp 1: {5, 3} {Set 2 Uint32 : 36}, {Set 3 Uint64 : 36}, {Set 4 String : xxxxxxxxxx_36} | ERowOp 1: {5, 4} {Set 2 Uint32 : 37}, {Set 3 Uint64 : 37}, {Set 4 String : xxxxxxxxxx_37} + Rows{25} Label{254 rev 1, 122b}, [38, +2)row | ERowOp 1: {5, 6} {Set 2 Uint32 : 38}, {Set 3 Uint64 : 38}, {Set 4 String : xxxxxxxxxx_38} | ERowOp 1: {5, 7} {Set 2 Uint32 : 39}, {Set 3 Uint64 : 39}, {Set 4 String : xxxxxxxxxx_39} Slices{ [0, 39] } >> TIterator::SerialReverse [GOOD] >> TIterator::GetKey >> TIterator::GetKey [GOOD] >> TIterator::GetKeyWithEraseCache [GOOD] >> TIterator::GetKeyWithVersionSkips |79.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/service/ut_worker/unittest >> TIterator::GetKeyWithVersionSkips [GOOD] >> DstCreator::SameOwner [GOOD] >> TLegacy::IndexIter >> DstCreator::SamePartitionCount >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-ModifyUser-30 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-DropUser-49 >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-49 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-50 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_trace/unittest >> TDataShardTrace::TestTraceDistributedUpsert+UseSink [GOOD] Test command err: 2025-06-25T14:34:27.788932Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:34:27.789116Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:34:27.789171Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000b57/r3tmp/tmpHr4n3h/pdisk_1.dat 2025-06-25T14:34:28.181279Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T14:34:28.184249Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:34:28.271173Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:34:28.286386Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750862064152795 != 1750862064152799 2025-06-25T14:34:28.333791Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:34:28.333930Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:34:28.345769Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:34:28.450694Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:34:28.870378Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:34:30.720737Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:888:2730], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:34:30.720870Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:899:2735], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:34:30.720990Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:34:30.726813Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:34:30.753229Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037888 not found 2025-06-25T14:34:30.933346Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:902:2738], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-25T14:34:31.015924Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:964:2780] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:34:31.402698Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jykr60xy33rtafh3zpzfqe8n, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTdkNGZhNzQtNDhhZWM2ZDctZGI4ZjIyOGYtODNiMDMxODU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root Trace: (Session.query.QUERY_ACTION_EXECUTE -> [(CompileService -> [(CompileActor)]) , (DataExecuter -> [(WaitForTableResolve) , (ComputeActor -> [(ForwardWriteActor)]) , (RunTasks) , (Commit -> [(Datashard.WriteTransaction -> [(Tablet.Transaction -> [(Tablet.Transaction.Execute -> [(Datashard.Unit) , (Datashard.Unit) , (Datashard.Unit)]) , (Tablet.Transaction.Complete)]) , (Datashard.SendWithConfirmedReadOnlyLease) , (Tablet.Transaction -> [(Tablet.Transaction.Execute -> [(Datashard.Unit) , (Datashard.Unit) , (Datashard.Unit) , (Datashard.Unit) , (Datashard.Unit) , (Datashard.Unit)]) , (Tablet.WriteLog -> [(Tablet.WriteLog.LogEntry)]) , (Tablet.Transaction.Complete)]) , (Datashard.SendWriteResult)]) , (Datashard.WriteTransaction -> [(Tablet.Transaction -> [(Tablet.Transaction.Execute -> [(Datashard.Unit) , (Datashard.Unit) , (Datashard.Unit)]) , (Tablet.Transaction.Complete)]) , (Datashard.SendWithConfirmedReadOnlyLease) , (Tablet.Transaction -> [(Tablet.Transaction.Execute -> [(Datashard.Unit) , (Datashard.Unit) , (Datashard.Unit) , (Datashard.Unit) , (Datashard.Unit) , (Datashard.Unit)]) , (Tablet.WriteLog -> [(Tablet.WriteLog.LogEntry)]) , (Tablet.Transaction.Complete)]) , (Datashard.SendWriteResult)])])])]) >> KqpSinkLocks::InsertWithBulkUpsert-UseBulkUpsert [GOOD] >> TLegacy::IndexIter [GOOD] >> TLegacy::ScreenedIndexIter >> TLegacy::ScreenedIndexIter [GOOD] >> TLegacy::StatsIter >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-30 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-31 >> Worker::Basic >> TLegacy::StatsIter [GOOD] >> TPageHandleTest::Uninitialized [GOOD] >> TPageHandleTest::NormalUse [GOOD] >> TPageHandleTest::HandleRef [GOOD] >> TPageHandleTest::PinnedRef [GOOD] >> TPageHandleTest::PinnedRefPure [GOOD] >> TPart::BasicColumnGroups [GOOD] >> DstCreator::WithIntermediateDir [GOOD] >> DstCreator::WithAsyncIndex |79.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/service/ut_worker/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_trace/unittest >> TDataShardTrace::TestTraceDistributedSelect [GOOD] Test command err: 2025-06-25T14:34:27.633289Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:34:27.633495Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:34:27.633562Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000b5c/r3tmp/tmpSdICoc/pdisk_1.dat 2025-06-25T14:34:28.049018Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T14:34:28.052528Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:34:28.098427Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:34:28.108251Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750862064030484 != 1750862064030488 2025-06-25T14:34:28.159529Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:34:28.159663Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:34:28.172455Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:34:28.265026Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:34:28.665631Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:34:30.508470Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:888:2730], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:34:30.508623Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:899:2735], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:34:30.508730Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:34:30.513995Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:34:30.539057Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037888 not found 2025-06-25T14:34:30.719453Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:902:2738], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-25T14:34:30.821674Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:964:2780] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:34:31.290913Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jykr60q9ewzae7s63hk7214y, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTg5ZTI1MTUtNzc5NmQ2YjktYWVjZTA3YjQtZDBjYjE0ZDk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:34:31.409229Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jykr61gr4b3a29qpnwc7pm8s, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODgwOTdmNS05MGQzNTI3NC01ZGU3NzllOC1hNTFiMDI0YQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:34:32.101228Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715663. Ctx: { TraceId: 01jykr61vbebqyw9gr5r8hj6cb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NTA2NTk4NDMtZDE2OTMwNGEtN2MwYjhmOTUtNmZiMjBiNmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_export/unittest >> TExportToS3Tests::AutoDropping [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:34:18.590046Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:34:18.590144Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:34:18.590182Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:34:18.590216Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:34:18.590270Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:34:18.590317Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:34:18.590415Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:34:18.590494Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:34:18.591268Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:34:18.591652Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:34:18.684535Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:34:18.684589Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:34:18.699985Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:34:18.700366Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:34:18.700532Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:34:18.705471Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:34:18.705784Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:34:18.706394Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:34:18.706620Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:34:18.709603Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:34:18.709768Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:34:18.710848Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:34:18.710900Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:34:18.711011Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:34:18.711061Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:34:18.711120Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:34:18.711199Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:34:18.717504Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:34:18.860535Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:34:18.860779Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:18.861026Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:34:18.861090Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:34:18.861368Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:34:18.861451Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:34:18.869312Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:34:18.869550Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:34:18.869787Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:18.869863Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:34:18.869906Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:34:18.869941Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:34:18.876088Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:18.876250Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:34:18.876293Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:34:18.878029Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:18.878075Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:18.878121Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:34:18.878170Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:34:18.904552Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:34:18.906339Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:34:18.906530Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:34:18.907429Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:34:18.907588Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:34:18.907639Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:34:18.907870Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:34:18.907918Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:34:18.908122Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:34:18.908211Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:34:18.915380Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:34:18.915457Z node 1 :FLAT_TX_SCHEMESHARD ... d__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710761 2025-06-25T14:34:32.275034Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6830: Handle: TEvNotifyTxCompletionResult: txId# 281474976710761 2025-06-25T14:34:32.275102Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6832: Message: TxId: 281474976710761 2025-06-25T14:34:32.275166Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:309: TExport::TTxProgress: DoExecute 2025-06-25T14:34:32.275228Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:1239: TExport::TTxProgress: OnNotifyResult: txId# 281474976710761 2025-06-25T14:34:32.275257Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:1270: TExport::TTxProgress: OnNotifyResult: txId# 281474976710761, id# 102, itemIdx# 4294967295 2025-06-25T14:34:32.277464Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:329: TExport::TTxProgress: DoComplete 2025-06-25T14:34:32.277549Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-25T14:34:32.277646Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [5:473:2432] TestWaitNotification: OK eventTxId 102 2025-06-25T14:34:32.278870Z node 5 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:34:32.279086Z node 5 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 254us result status StatusSuccess 2025-06-25T14:34:32.279622Z node 5 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 11 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 11 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 9 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Table" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 1 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 desc: 1 2025-06-25T14:34:32.280214Z node 5 :EXPORT DEBUG: schemeshard_export__forget.cpp:79: TExport::TTxForget, dropping export tables, info: { Id: 102 Uid: '' Kind: S3 DomainPathId: [OwnerId: 72057594046678944, LocalPathId: 1] ExportPathId: [OwnerId: 72057594046678944, LocalPathId: 3] UserSID: '(empty maybe)' PeerName: '' State: Done WaitTxId: 281474976710761 Issue: '' Items: 1 PendingItems: 0 PendingDropItems: 0 } 2025-06-25T14:34:32.287264Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:309: TExport::TTxProgress: DoExecute 2025-06-25T14:34:32.287332Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:739: TExport::TTxProgress: Resume: id# 102 2025-06-25T14:34:32.287420Z node 5 :EXPORT INFO: schemeshard_export__create.cpp:537: TExport::TTxProgress: Allocate txId: info# { Id: 102 Uid: '' Kind: S3 DomainPathId: [OwnerId: 72057594046678944, LocalPathId: 1] ExportPathId: [OwnerId: 72057594046678944, LocalPathId: 3] UserSID: '(empty maybe)' PeerName: '' State: Dropping WaitTxId: 0 Issue: '' Items: 1 PendingItems: 0 PendingDropItems: 0 } 2025-06-25T14:34:32.287525Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:329: TExport::TTxProgress: DoComplete 2025-06-25T14:34:32.287659Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 102, at schemeshard: 72057594046678944 2025-06-25T14:34:32.287726Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:309: TExport::TTxProgress: DoExecute 2025-06-25T14:34:32.287761Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:859: TExport::TTxProgress: OnAllocateResult: txId# 281474976710762, id# 102 2025-06-25T14:34:32.287834Z node 5 :EXPORT INFO: schemeshard_export__create.cpp:529: TExport::TTxProgress: Drop propose: info# { Id: 102 Uid: '' Kind: S3 DomainPathId: [OwnerId: 72057594046678944, LocalPathId: 1] ExportPathId: [OwnerId: 72057594046678944, LocalPathId: 3] UserSID: '(empty maybe)' PeerName: '' State: Dropping WaitTxId: 0 Issue: '' Items: 1 PendingItems: 0 PendingDropItems: 0 }, txId# 281474976710762 2025-06-25T14:34:32.287927Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:329: TExport::TTxProgress: DoComplete 2025-06-25T14:34:32.290322Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpRmDir Drop { Name: "export-102" } Internal: true } TxId: 281474976710762 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:34:32.290487Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_rmdir.cpp:28: TRmDir Propose, path: /MyRoot/export-102, pathId: 0, opId: 281474976710762:0, at schemeshard: 72057594046678944 2025-06-25T14:34:32.290652Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 281474976710762:1, propose status:StatusPathDoesNotExist, reason: Check failed: path: '/MyRoot/export-102', error: path has been deleted (id: [OwnerId: 72057594046678944, LocalPathId: 3], type: EPathTypeDir, state: EPathStateNotExist), drop stepId: 5000007, drop txId: 281474976710761, at schemeshard: 72057594046678944 2025-06-25T14:34:32.300523Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 281474976710762, response: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/export-102\', error: path has been deleted (id: [OwnerId: 72057594046678944, LocalPathId: 3], type: EPathTypeDir, state: EPathStateNotExist), drop stepId: 5000007, drop txId: 281474976710761" TxId: 281474976710762 SchemeshardId: 72057594046678944 PathId: 3 PathDropTxId: 281474976710761, at schemeshard: 72057594046678944 2025-06-25T14:34:32.300879Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976710762, database: /MyRoot, subject: , status: StatusPathDoesNotExist, reason: Check failed: path: '/MyRoot/export-102', error: path has been deleted (id: [OwnerId: 72057594046678944, LocalPathId: 3], type: EPathTypeDir, state: EPathStateNotExist), drop stepId: 5000007, drop txId: 281474976710761, operation: DROP DIRECTORY, path: /MyRoot/export-102 2025-06-25T14:34:32.301065Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6781: Handle: TEvModifySchemeTransactionResult: txId# 281474976710762, status# StatusPathDoesNotExist 2025-06-25T14:34:32.301163Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6783: Message: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/export-102\', error: path has been deleted (id: [OwnerId: 72057594046678944, LocalPathId: 3], type: EPathTypeDir, state: EPathStateNotExist), drop stepId: 5000007, drop txId: 281474976710761" TxId: 281474976710762 SchemeshardId: 72057594046678944 PathId: 3 PathDropTxId: 281474976710761 2025-06-25T14:34:32.301244Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:309: TExport::TTxProgress: DoExecute 2025-06-25T14:34:32.301289Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:920: TExport::TTxProgress: OnModifyResult: txId# 281474976710762, status# StatusPathDoesNotExist 2025-06-25T14:34:32.301384Z node 5 :EXPORT TRACE: schemeshard_export__create.cpp:921: Message: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/export-102\', error: path has been deleted (id: [OwnerId: 72057594046678944, LocalPathId: 3], type: EPathTypeDir, state: EPathStateNotExist), drop stepId: 5000007, drop txId: 281474976710761" TxId: 281474976710762 SchemeshardId: 72057594046678944 PathId: 3 PathDropTxId: 281474976710761 2025-06-25T14:34:32.301517Z node 5 :EXPORT INFO: schemeshard_export__create.cpp:1102: TExport::TTxProgress: Wait for completion: info# { Id: 102 Uid: '' Kind: S3 DomainPathId: [OwnerId: 72057594046678944, LocalPathId: 1] ExportPathId: [OwnerId: 72057594046678944, LocalPathId: 3] UserSID: '(empty maybe)' PeerName: '' State: Dropping WaitTxId: 281474976710761 Issue: '' Items: 1 PendingItems: 0 PendingDropItems: 0 }, itemIdx# 4294967295, txId# 281474976710761 2025-06-25T14:34:32.305139Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:329: TExport::TTxProgress: DoComplete 2025-06-25T14:34:32.305309Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710761, at schemeshard: 72057594046678944 2025-06-25T14:34:32.305449Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6830: Handle: TEvNotifyTxCompletionResult: txId# 281474976710761 2025-06-25T14:34:32.305530Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6832: Message: TxId: 281474976710761 2025-06-25T14:34:32.305592Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:309: TExport::TTxProgress: DoExecute 2025-06-25T14:34:32.305635Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:1239: TExport::TTxProgress: OnNotifyResult: txId# 281474976710761 2025-06-25T14:34:32.305680Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:1270: TExport::TTxProgress: OnNotifyResult: txId# 281474976710761, id# 102, itemIdx# 4294967295 2025-06-25T14:34:32.313193Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:329: TExport::TTxProgress: DoComplete TestWaitNotification wait txId: 102 2025-06-25T14:34:32.313467Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-06-25T14:34:32.313515Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 2025-06-25T14:34:32.314036Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-06-25T14:34:32.314157Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-25T14:34:32.314199Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [5:687:2641] TestWaitNotification: OK eventTxId 102 >> TableWriter::Backup [GOOD] |79.9%| [TA] $(B)/ydb/core/tx/datashard/ut_trace/test-results/unittest/{meta.json ... results_accumulator.log} |79.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/backup/impl/ut_table_writer/unittest |79.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/backup/impl/ut_table_writer/unittest |79.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/backup/impl/ut_table_writer/unittest |79.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/backup/impl/ut_table_writer/unittest |79.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/viewer/ut/ydb-core-viewer-ut |79.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/viewer/ut/ydb-core-viewer-ut |79.9%| [TA] {RESULT} $(B)/ydb/core/tx/datashard/ut_trace/test-results/unittest/{meta.json ... results_accumulator.log} |79.9%| [LD] {RESULT} $(B)/ydb/core/viewer/ut/ydb-core-viewer-ut |79.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/backup/impl/ut_table_writer/unittest >> TableWriter::Backup [GOOD] |79.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/backup/impl/ut_table_writer/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/tx/unittest >> KqpSinkLocks::InsertWithBulkUpsert-UseBulkUpsert [GOOD] Test command err: Trying to start YDB, gRPC: 19276, MsgBus: 19329 2025-06-25T14:33:48.442141Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519895150885109076:2135];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:33:48.442360Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001306/r3tmp/tmpyEtM4D/pdisk_1.dat 2025-06-25T14:33:49.566416Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:33:49.566494Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:33:49.584904Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:33:49.598308Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:33:49.598418Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:33:49.623048Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:33:49.624180Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519895150885108979:2080] 1750862028377824 != 1750862028377827 TServer::EnableGrpc on GrpcPort 19276, node 1 2025-06-25T14:33:49.860761Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:33:49.860782Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:33:49.860788Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:33:49.860909Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19329 TClient is connected to server localhost:19329 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:33:51.056843Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:33:51.080876Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:33:53.408764Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519895150885109076:2135];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:33:53.408825Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:33:54.077264Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519895176654913410:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:33:54.077379Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:33:54.077669Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519895176654913422:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:33:54.081341Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:33:54.102569Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519895176654913424:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:33:54.192371Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519895176654913476:2343] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:33:54.919071Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T14:33:55.114114Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519895176654913640:2309];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:33:55.118428Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519895180949880943:2314];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:33:55.118802Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519895176654913640:2309];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:33:55.119069Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519895176654913640:2309];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:33:55.119210Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519895176654913640:2309];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:33:55.119298Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519895176654913640:2309];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:33:55.119384Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519895176654913640:2309];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:33:55.119465Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519895176654913640:2309];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:33:55.119568Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519895176654913640:2309];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:33:55.119656Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519895176654913640:2309];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:33:55.119762Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519895176654913640:2309];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:33:55.119855Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519895176654913640:2309];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:33:55.123116Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519895180949880943:2314];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:33:55.123280Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519895180949880943:2314];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:33:55.123375Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519895180949880943:2314];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:33:55.123488Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519895180949880943:2314];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:33:55.123588Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519895180949880943:2314];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:33:55.123680Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519895180949880943:2314];tablet_id=72075186224037888;process=TTxInitSchema::Execu ... session_actor.cpp:1895: SessionId: ydb://session/3?node_id=2&id=YWU2MDc4NTEtNmUzMDJhMTYtOGJhZmIwNjAtMWFkODYyOTM=, ActorId: [2:7519895290990621370:2933], ActorState: ExecuteState, TraceId: 01jykr5qeye78dv5t4qvncs75x, got TEvKqpBuffer::TEvError in ExecuteState, status: ABORTED send to: [2:7519895295285588713:2933] from: [2:7519895295285588712:2933] 2025-06-25T14:34:21.134563Z node 2 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [2:7519895295285588713:2933] TxId: 281474976710665. Ctx: { TraceId: 01jykr5qeye78dv5t4qvncs75x, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YWU2MDc4NTEtNmUzMDJhMTYtOGJhZmIwNjAtMWFkODYyOTM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Transaction locks invalidated. Table: `/Root/Test`., code: 2001 subissue: {
: Error: Operation is aborting because locks are not valid, code: 2001 } } 2025-06-25T14:34:21.134759Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=YWU2MDc4NTEtNmUzMDJhMTYtOGJhZmIwNjAtMWFkODYyOTM=, ActorId: [2:7519895290990621370:2933], ActorState: ExecuteState, TraceId: 01jykr5qeye78dv5t4qvncs75x, Create QueryResponse for error on request, msg: 2025-06-25T14:34:21.136469Z node 2 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_ABORTED;details=Distributed transaction aborted due to commit failure;tx_id=281474976710665; 2025-06-25T14:34:21.136655Z node 2 :TX_DATASHARD ERROR: datashard.cpp:751: Complete volatile write [1750862061179 : 281474976710665] from 72075186224037889 at tablet 72075186224037889, error: Status: STATUS_ABORTED Issues: { message: "Distributed transaction aborted due to commit failure" issue_code: 2011 severity: 1 } Trying to start YDB, gRPC: 12199, MsgBus: 23036 2025-06-25T14:34:22.922362Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519895296355961355:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:34:22.922426Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001306/r3tmp/tmpVBU7Zh/pdisk_1.dat 2025-06-25T14:34:23.267992Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:34:23.280789Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:34:23.280874Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:34:23.289487Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 12199, node 3 2025-06-25T14:34:23.464688Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:34:23.464708Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:34:23.464715Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:34:23.464835Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23036 2025-06-25T14:34:23.932475Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:23036 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:34:24.393749Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:34:24.412222Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:34:27.776739Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519895317830798422:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:34:27.776878Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:34:27.778509Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519895317830798449:2295], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:34:27.782674Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:34:27.796192Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519895317830798451:2296], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:34:27.873928Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519895317830798502:2337] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:34:27.924712Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519895296355961355:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:34:27.924799Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:34:27.956270Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:34:28.030650Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:34:29.314229Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:34:31.800569Z node 3 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_LOCKS_BROKEN;details=Operation is aborting because locks are not valid;tx_id=281474976715666; 2025-06-25T14:34:31.802639Z node 3 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:2751: SelfId: [3:7519895335010675841:2933], SessionActorId: [3:7519895335010675782:2933], Got LOCKS BROKEN for table. ShardID=72075186224037888, Sink=[3:7519895335010675841:2933].{
: Error: Operation is aborting because locks are not valid, code: 2001 } 2025-06-25T14:34:31.802754Z node 3 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:3004: SelfId: [3:7519895335010675841:2933], SessionActorId: [3:7519895335010675782:2933], statusCode=ABORTED. Issue=
: Error: Transaction locks invalidated. Table: `/Root/Test`., code: 2001
: Error: Operation is aborting because locks are not valid, code: 2001 . sessionActorId=[3:7519895335010675782:2933]. isRollback=0 2025-06-25T14:34:31.802983Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:1895: SessionId: ydb://session/3?node_id=3&id=NTlmZTNkZTgtY2E1YWE2MTgtNjI1NzZkMzgtY2ViMjJhY2U=, ActorId: [3:7519895335010675782:2933], ActorState: ExecuteState, TraceId: 01jykr61wt9g8c4qmnrerpwy36, got TEvKqpBuffer::TEvError in ExecuteState, status: ABORTED send to: [3:7519895335010675842:2933] from: [3:7519895335010675841:2933] 2025-06-25T14:34:31.803067Z node 3 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [3:7519895335010675842:2933] TxId: 281474976715666. Ctx: { TraceId: 01jykr61wt9g8c4qmnrerpwy36, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NTlmZTNkZTgtY2E1YWE2MTgtNjI1NzZkMzgtY2ViMjJhY2U=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Transaction locks invalidated. Table: `/Root/Test`., code: 2001 subissue: {
: Error: Operation is aborting because locks are not valid, code: 2001 } } 2025-06-25T14:34:31.803244Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=3&id=NTlmZTNkZTgtY2E1YWE2MTgtNjI1NzZkMzgtY2ViMjJhY2U=, ActorId: [3:7519895335010675782:2933], ActorState: ExecuteState, TraceId: 01jykr61wt9g8c4qmnrerpwy36, Create QueryResponse for error on request, msg: 2025-06-25T14:34:31.805806Z node 3 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_ABORTED;details=Distributed transaction aborted due to commit failure;tx_id=281474976715666; 2025-06-25T14:34:31.805960Z node 3 :TX_DATASHARD ERROR: datashard.cpp:751: Complete volatile write [1750862071847 : 281474976715666] from 72075186224037889 at tablet 72075186224037889, error: Status: STATUS_ABORTED Issues: { message: "Distributed transaction aborted due to commit failure" issue_code: 2011 severity: 1 } >> BuildStatsHistogram::Ten_Crossed_Log [GOOD] >> BuildStatsHistogram::Five_Five_Mixed >> YdbIndexTable::MultiShardTableOneIndex [GOOD] >> YdbIndexTable::MultiShardTableOneIndexDataColumn >> DstCreator::ColumnsSizeMismatch [GOOD] >> DstCreator::ColumnTypeMismatch >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-1 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-2 >> DstCreator::GlobalConsistency [GOOD] >> DstCreator::KeyColumnNameMismatch >> DataShardVolatile::UpsertBrokenLockArbiterRestart-UseSink [GOOD] >> DataShardVolatile::UpsertDependenciesShardsRestart+UseSink >> TableWriter::Restore [GOOD] |80.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/backup/impl/ut_table_writer/unittest >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-CreateUser-6 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-CreateUser-7 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tablet_flat/ut/unittest >> TPart::BasicColumnGroups [GOOD] Test command err: 00000.000 II| FAKE_ENV: Born at 2025-06-25T14:33:19.629347Z 00000.007 DD| RESOURCE_BROKER: TResourceBrokerActor bootstrap 00000.010 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.010 II| FAKE_ENV: Starting storage for BS group 0 00000.011 II| FAKE_ENV: Starting storage for BS group 1 00000.011 II| FAKE_ENV: Starting storage for BS group 2 00000.011 II| FAKE_ENV: Starting storage for BS group 3 00000.028 II| TABLET_EXECUTOR: Leader{1:2:0} activating executor 00000.028 II| TABLET_EXECUTOR: LSnap{1:2, on 2:1, 35b, wait} done, Waste{2:0, 0b +(0, 0b), 0 trc} 00000.029 DD| TABLET_EXECUTOR: Leader{1:2:2} commited cookie 2 for step 1 00000.029 DD| TABLET_EXECUTOR: Leader{1:2:2} Tx{1, NKikimr::NTabletFlatExecutor::TRowsModel::TTxSchema} queued, type NKikimr::NTabletFlatExecutor::TRowsModel::TTxSchema 00000.029 DD| TABLET_EXECUTOR: Leader{1:2:2} Tx{1, NKikimr::NTabletFlatExecutor::TRowsModel::TTxSchema} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.030 DD| TABLET_EXECUTOR: Leader{1:2:2} Tx{1, NKikimr::NTabletFlatExecutor::TRowsModel::TTxSchema} hope 1 -> done Change{2, redo 0b alter 302b annex 0, ~{ } -{ }, 0 gb} 00000.030 DD| TABLET_EXECUTOR: Leader{1:2:2} Tx{1, NKikimr::NTabletFlatExecutor::TRowsModel::TTxSchema} release 4194304b of static, Memory{0 dyn 0} 00000.030 DD| TABLET_EXECUTOR: TGenCompactionStrategy CheckGeneration for 1 generation 1, state Free, final id 0, final level 0 00000.031 DD| TABLET_EXECUTOR: Leader{1:2:3} commited cookie 1 for step 2 00000.033 DD| TABLET_EXECUTOR: Leader{1:2:3} Tx{2, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxSetResourceProfile} queued, type NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxSetResourceProfile 00000.033 DD| TABLET_EXECUTOR: Leader{1:2:3} Tx{2, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxSetResourceProfile} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.033 DD| TABLET_EXECUTOR: Leader{1:2:3} Tx{2, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxSetResourceProfile} hope 1 -> done Change{2, redo 0b alter 15b annex 0, ~{ } -{ }, 0 gb} 00000.033 DD| TABLET_EXECUTOR: Leader{1:2:3} Tx{2, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxSetResourceProfile} release 4194304b of static, Memory{0 dyn 0} 00000.034 DD| TABLET_EXECUTOR: TGenCompactionStrategy CheckGeneration for 1 generation 1, state Free, final id 0, final level 0 00000.034 DD| TABLET_EXECUTOR: Leader{1:2:4} commited cookie 1 for step 3 00000.034 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{3, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} queued, type NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory 00000.034 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{3, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} took 1024b of static mem, Memory{1024 dyn 0} 00000.034 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{3, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} hope 1 -> retry Change{2, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.034 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{3, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} touch new 0b, 0b lo load (0b in total), 104856577b requested for data (104857601b in total) 00000.034 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{3, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} release 1024b of static, Memory{0 dyn 0} 00000.034 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{3, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} release tx data 00000.034 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{3, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} request Res{1 104857601b} type large_transaction 00000.035 DD| RESOURCE_BROKER: Submitted new unknown task Tx{3, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (1 by [1:30:2062]) priority=5 resources={0, 104857601} 00000.035 EE| RESOURCE_BROKER: Assigning waiting task 'Tx{3, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (1 by [1:30:2062])' of unknown type 'large_transaction' to default queue 00000.035 DD| RESOURCE_BROKER: Allocate resources {0, 104857601} for task Tx{3, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (1 by [1:30:2062]) from queue queue_default 00000.035 EE| RESOURCE_BROKER: Assigning in-fly task 'Tx{3, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (1 by [1:30:2062])' of unknown type 'large_transaction' to default queue 00000.035 DD| RESOURCE_BROKER: Updated planned resource usage for queue queue_default from 0.000000 to 12.207031 (insert task Tx{3, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (1 by [1:30:2062])) 00000.035 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{3, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} acquired dyn mem Res{1 104857601b}, Memory{0 dyn 104857601} 00000.035 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{3, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} hope 2 -> done Change{2, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.035 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{3, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} release Res{1 104857601b}, Memory{0 dyn 0} 00000.035 DD| RESOURCE_BROKER: Finish task Tx{3, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (1 by [1:30:2062]) (release resources {0, 104857601}) 00000.035 DD| RESOURCE_BROKER: Updated planned resource usage for queue queue_default from 12.207031 to 0.000000 (remove task Tx{3, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (1 by [1:30:2062])) 00000.035 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{4, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} queued, type NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory 00000.035 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{4, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} took 1024b of static mem, Memory{1024 dyn 0} 00000.036 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{4, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} hope 1 -> retry Change{2, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.036 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{4, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} touch new 0b, 0b lo load (0b in total), 104856577b requested for data (104857601b in total) 00000.036 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{4, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} took 104857601b of static mem, Memory{104857601 dyn 0} 00000.036 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{4, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} hope 2 -> done Change{2, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.036 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{4, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} release 104857601b of static, Memory{0 dyn 0} 00000.036 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{5, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} queued, type NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory 00000.036 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{5, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} took 1024b of static mem, Memory{1024 dyn 0} 00000.036 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{5, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} hope 1 -> retry Change{2, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.036 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{5, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} touch new 0b, 0b lo load (0b in total), 209714177b requested for data (209715201b in total) 00000.036 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{5, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} release 1024b of static, Memory{0 dyn 0} 00000.036 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{5, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} release tx data 00000.036 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{5, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} request Res{2 209715201b} type large_transaction 00000.036 DD| RESOURCE_BROKER: Submitted new unknown task Tx{5, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (2 by [1:30:2062]) priority=5 resources={0, 209715201} 00000.036 EE| RESOURCE_BROKER: Assigning waiting task 'Tx{5, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (2 by [1:30:2062])' of unknown type 'large_transaction' to default queue 00000.037 DD| RESOURCE_BROKER: Allocate resources {0, 209715201} for task Tx{5, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (2 by [1:30:2062]) from queue queue_default 00000.037 EE| RESOURCE_BROKER: Assigning in-fly task 'Tx{5, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (2 by [1:30:2062])' of unknown type 'large_transaction' to default queue 00000.037 DD| RESOURCE_BROKER: Updated planned resource usage for queue queue_default from 0.000000 to 23.193359 (insert task Tx{5, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (2 by [1:30:2062])) 00000.037 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{5, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} acquired dyn mem Res{2 209715201b}, Memory{0 dyn 209715201} 00000.037 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{5, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} hope 2 -> done Change{2, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.037 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{5, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} release Res{2 209715201b}, Memory{0 dyn 0} 00000.037 DD| RESOURCE_BROKER: Finish task Tx{5, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (2 by [1:30:2062]) (release resources {0, 209715201}) 00000.037 DD| RESOURCE_BROKER: Updated planned resource usage for queue queue_default from 23.193359 to 0.000000 (remove task Tx{5, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (2 by [1:30:2062])) 00000.037 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{6, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} queued, type NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory 00000.037 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{6, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} took 1024b of static mem, Memory{1024 dyn 0} 00000.037 DD| TABLET_EXE ... 76:97:0], [1:2:54:1:24576:97:0], [1:2:55:1:24576:97:0], [1:2:56:1:24576:97:0], [1:2:57:1:24576:97:0], [1:2:58:1:24576:97:0], [1:2:59:1:24576:97:0], [1:2:60:1:24576:97:0], [1:2:61:1:24576:97:0], [1:2:62:1:24576:97:0], [1:2:63:1:24576:97:0], [1:2:64:1:24576:97:0], [1:2:65:1:24576:97:0], [1:2:66:1:24576:97:0], [1:2:67:1:24576:97:0], [1:2:68:1:24576:97:0], [1:2:69:1:24576:97:0], [1:2:70:1:24576:97:0], [1:2:71:1:24576:97:0], [1:2:72:1:24576:97:0], [1:2:73:1:24576:101:0], [1:2:74:1:24576:102:0], [1:2:75:1:24576:101:0], [1:2:76:1:24576:102:0], [1:2:77:1:24576:104:0], [1:2:78:1:24576:104:0], [1:2:79:1:24576:104:0], [1:2:80:1:24576:104:0], [1:2:81:1:24576:103:0], [1:2:82:1:24576:101:0], [1:2:83:1:24576:104:0], [1:2:84:1:24576:104:0], [1:2:85:1:24576:104:0], [1:2:86:1:24576:104:0], [1:2:87:1:24576:104:0], [1:2:88:1:24576:104:0], [1:2:89:1:24576:104:0], [1:2:90:1:24576:101:0], [1:2:91:1:24576:104:0], [1:2:92:1:24576:104:0], [1:2:93:1:24576:98:0], [1:2:94:1:24576:104:0], [1:2:95:1:24576:104:0], [1:2:96:1:24576:104:0], [1:2:97:1:24576:104:0], [1:2:98:1:24576:104:0], [1:2:99:1:24576:104:0], [1:2:100:1:24576:104:0], [1:2:101:1:24576:97:0], [1:2:102:1:24576:100:0], [1:2:103:1:24576:104:0], [1:2:104:1:24576:104:0], [1:2:105:1:24576:104:0], [1:2:106:1:24576:104:0], [1:2:107:1:24576:104:0], [1:2:108:1:24576:104:0], [1:2:109:1:24576:104:0], [1:2:110:1:24576:104:0], [1:2:111:1:24576:104:0], [1:2:112:1:24576:104:0], [1:2:113:1:24576:104:0], [1:2:114:1:24576:104:0], [1:2:115:1:24576:104:0], [1:2:116:1:24576:104:0], [1:2:117:1:24576:104:0], [1:2:118:1:24576:104:0], [1:2:119:1:24576:104:0], [1:2:120:1:24576:104:0], [1:2:121:1:24576:104:0], [1:2:122:1:24576:104:0], [1:2:123:1:24576:104:0], [1:2:124:1:24576:104:0], [1:2:125:1:24576:104:0], [1:2:126:1:24576:104:0], [1:2:127:1:24576:104:0], [1:2:128:1:24576:104:0], [1:2:129:1:24576:104:0], [1:2:130:1:24576:104:0], [1:2:131:1:24576:104:0], [1:2:132:1:24576:104:0], [1:2:133:1:24576:104:0], [1:2:134:1:24576:104:0], [1:2:135:1:24576:104:0], [1:2:136:1:24576:104:0], [1:2:137:1:24576:104:0], [1:2:138:1:24576:104:0], [1:2:139:1:24576:104:0], [1:2:140:1:24576:104:0], [1:2:141:1:24576:104:0], [1:2:142:1:24576:104:0], [1:2:145:1:24576:60:0], [1:2:146:1:24576:60:0] } 00000.107 DD| TABLET_SAUSAGECACHE: Add page collection [1:2:143:1:12288:758:0] 00000.107 DD| TABLET_SAUSAGECACHE: Add page collection [1:2:143:1:12288:758:0] owner [20:212:2237] 00000.107 TT| TABLET_SAUSAGECACHE: Request page collection [1:2:143:1:12288:758:0] owner [20:212:2237] cookie 4 class Online from cache [ ] already requested [ ] to request [ 22 23 24 25 ] 00000.107 TT| TABLET_SAUSAGECACHE: Receive page collection [1:2:143:1:12288:758:0] status OK pages [ 22 23 24 25 ] 00000.107 TT| TABLET_SAUSAGECACHE: Send page collection result [1:2:143:1:12288:758:0] owner [20:212:2237] class Online pages [ 22 23 24 25 ] cookie 4 00000.108 II| TABLET_EXECUTOR: Leader{1:3:0} activating executor 00000.108 II| TABLET_EXECUTOR: LSnap{1:3, on 3:1, 1880b, wait} done, Waste{2:0, 141856b +(140, 14018b), 146 trc} 00000.109 DD| TABLET_SAUSAGECACHE: Attach page collection [1:2:143:1:12288:758:0] owner [20:212:2237] 00000.109 TT| TABLET_SAUSAGECACHE: Request page collection [1:2:143:1:12288:758:0] owner [20:212:2237] cookie 2 class AsyncLoad from cache [ 22 23 24 25 ] already requested [ ] to request [ 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 ] 00000.109 TT| TABLET_SAUSAGECACHE: Request page collection [1:2:143:1:12288:758:0] async queue pages [ 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 ] 00000.110 DD| TABLET_EXECUTOR: Leader{1:3:2} commited cookie 2 for step 1 00000.110 TT| TABLET_SAUSAGECACHE: Touch page collection [1:2:143:1:12288:758:0] owner [20:212:2237] pages [ 22 23 24 25 ] 00000.111 TT| TABLET_SAUSAGECACHE: Receive page collection [1:2:143:1:12288:758:0] status OK pages [ 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 ] 00000.111 TT| TABLET_SAUSAGECACHE: Send page collection result [1:2:143:1:12288:758:0] owner [20:212:2237] class AsyncLoad pages [ 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 ] cookie 2 00000.111 DD| TABLET_EXECUTOR: Leader{1:3:2} got result TEvResult{26 pages [1:2:143:1:12288:758:0] ok OK}, category 2 00000.111 TT| TABLET_SAUSAGECACHE: Touch page collection [1:2:143:1:12288:758:0] owner [20:212:2237] pages [ 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 ] 00000.112 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{1, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_StickyPages::TTxFullScan} queued, type NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_StickyPages::TTxFullScan 00000.112 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{1, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_StickyPages::TTxFullScan} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.113 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{1, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_StickyPages::TTxFullScan} hope 1 -> done Change{145, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.113 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{1, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_StickyPages::TTxFullScan} release 4194304b of static, Memory{0 dyn 0} 00000.113 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 3 actors 00000.113 II| TABLET_EXECUTOR: Leader{1:3:2} suiciding, Waste{2:0, 141856b +(0, 0b), 1 trc, -14018b acc} 00000.113 DD| TABLET_SAUSAGECACHE: Unregister owner [20:212:2237] 00000.113 DD| TABLET_SAUSAGECACHE: Remove page collection [1:2:143:1:12288:758:0] owner [20:212:2237] 00000.113 DD| TABLET_SAUSAGECACHE: Remove owner [20:212:2237] 00000.114 DD| TABLET_SAUSAGECACHE: Drop expired page collection [1:2:143:1:12288:758:0] 00000.114 NN| TABLET_SAUSAGECACHE: Poison cache serviced 3 reqs hit {6 1077b} miss {50 281387b} 00000.114 II| FAKE_ENV: Shut order, stopping 4 BS groups 00000.114 II| FAKE_ENV: DS.0 gone, left {42b, 1}, put {14354b, 149} 00000.114 II| FAKE_ENV: DS.1 gone, left {143736b, 8}, put {157893b, 150} 00000.114 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {0b, 0} 00000.114 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {0b, 0} 00000.114 II| FAKE_ENV: All BS storage groups are stopped 00000.114 II| FAKE_ENV: Model stopped, hosted 4 actors, spent 0.000s 00000.114 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 0 Error 0 Left 795}, stopped 00000.000 II| FAKE_ENV: Born at 2025-06-25T14:33:24.113231Z 00000.008 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.008 II| FAKE_ENV: Starting storage for BS group 0 00000.009 II| FAKE_ENV: Starting storage for BS group 1 00000.009 II| FAKE_ENV: Starting storage for BS group 2 00000.009 II| FAKE_ENV: Starting storage for BS group 3 00000.037 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 3 actors 00000.038 NN| TABLET_SAUSAGECACHE: Poison cache serviced 3 reqs hit {3 512b} miss {0 0b} 00000.038 II| FAKE_ENV: Shut order, stopping 4 BS groups 00000.038 II| FAKE_ENV: DS.0 gone, left {1356b, 12}, put {1376b, 13} 00000.038 II| FAKE_ENV: DS.1 gone, left {6814b, 23}, put {6814b, 23} 00000.038 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {0b, 0} 00000.038 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {0b, 0} 00000.039 II| FAKE_ENV: All BS storage groups are stopped 00000.039 II| FAKE_ENV: Model stopped, hosted 3 actors, spent 0.000s 00000.039 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 0 Error 0 Left 15}, stopped 00000.000 II| FAKE_ENV: Born at 2025-06-25T14:33:24.158844Z 00000.007 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.008 II| FAKE_ENV: Starting storage for BS group 0 00000.008 II| FAKE_ENV: Starting storage for BS group 1 00000.008 II| FAKE_ENV: Starting storage for BS group 2 00000.008 II| FAKE_ENV: Starting storage for BS group 3 00000.299 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 2 actors 00000.299 NN| TABLET_SAUSAGECACHE: Poison cache serviced 10 reqs hit {860 5551893b} miss {0 0b} 00000.307 II| FAKE_ENV: Shut order, stopping 4 BS groups 00000.307 II| FAKE_ENV: DS.0 gone, left {1201b, 13}, put {1221b, 14} 00000.307 II| FAKE_ENV: DS.1 gone, left {6751256b, 17}, put {6751256b, 17} 00000.309 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {0b, 0} 00000.309 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {0b, 0} 00000.309 II| FAKE_ENV: All BS storage groups are stopped 00000.309 II| FAKE_ENV: Model stopped, hosted 3 actors, spent 0.000s 00000.309 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 0 Error 0 Left 15}, stopped 00000.000 II| FAKE_ENV: Born at 2025-06-25T14:33:24.482483Z 00000.014 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.015 II| FAKE_ENV: Starting storage for BS group 0 00000.015 II| FAKE_ENV: Starting storage for BS group 1 00000.015 II| FAKE_ENV: Starting storage for BS group 2 00000.015 II| FAKE_ENV: Starting storage for BS group 3 00017.452 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 2 actors 00017.452 NN| TABLET_SAUSAGECACHE: Poison cache serviced 4109 reqs hit {2091 2366986b} miss {6144 6340608b} 00017.460 II| FAKE_ENV: Shut order, stopping 4 BS groups 00017.460 II| FAKE_ENV: DS.0 gone, left {1761b, 14}, put {1781b, 15} 00017.461 II| FAKE_ENV: DS.1 gone, left {6927727b, 27}, put {6927727b, 27} 00017.473 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {0b, 0} 00017.474 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {0b, 0} 00017.474 II| FAKE_ENV: All BS storage groups are stopped 00017.474 II| FAKE_ENV: Model stopped, hosted 3 actors, spent 0.000s 00017.474 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 0 Error 0 Left 15}, stopped 00000.000 II| FAKE_ENV: Born at 2025-06-25T14:33:41.969473Z 00000.007 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.007 II| FAKE_ENV: Starting storage for BS group 0 00000.007 II| FAKE_ENV: Starting storage for BS group 1 00000.008 II| FAKE_ENV: Starting storage for BS group 2 00000.008 II| FAKE_ENV: Starting storage for BS group 3 00021.089 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 2 actors 00021.089 NN| TABLET_SAUSAGECACHE: Poison cache serviced 4106 reqs hit {43 253450b} miss {4096 4227072b} 00021.101 II| FAKE_ENV: Shut order, stopping 4 BS groups 00021.107 II| FAKE_ENV: DS.0 gone, left {44744b, 2}, put {164747b, 16} 00021.107 II| FAKE_ENV: DS.1 gone, left {2764621b, 2068}, put {2764621b, 2068} 00021.117 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {0b, 0} 00021.117 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {0b, 0} 00021.117 II| FAKE_ENV: All BS storage groups are stopped 00021.117 II| FAKE_ENV: Model stopped, hosted 3 actors, spent 0.000s 00021.118 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 0 Error 0 Left 15}, stopped 00000.000 II| FAKE_ENV: Born at 2025-06-25T14:34:03.102542Z 00000.012 II| FAKE_ENV: Starting storage for BS group 0 00000.012 II| FAKE_ENV: Starting storage for BS group 1 00000.012 II| FAKE_ENV: Starting storage for BS group 2 00000.013 II| FAKE_ENV: Starting storage for BS group 3 00000.000 II| FAKE_ENV: Born at 2025-06-25T14:34:03.142078Z 00000.018 II| FAKE_ENV: Starting storage for BS group 0 00000.018 II| FAKE_ENV: Starting storage for BS group 1 00000.018 II| FAKE_ENV: Starting storage for BS group 2 00000.018 II| FAKE_ENV: Starting storage for BS group 3 00000.000 II| FAKE_ENV: Born at 2025-06-25T14:34:03.204081Z 00000.008 II| FAKE_ENV: Starting storage for BS group 0 00000.009 II| FAKE_ENV: Starting storage for BS group 1 00000.009 II| FAKE_ENV: Starting storage for BS group 2 00000.009 II| FAKE_ENV: Starting storage for BS group 3 00000.000 II| FAKE_ENV: Born at 2025-06-25T14:34:03.283089Z 00000.036 II| FAKE_ENV: Starting storage for BS group 0 00000.037 II| FAKE_ENV: Starting storage for BS group 1 00000.037 II| FAKE_ENV: Starting storage for BS group 2 00000.037 II| FAKE_ENV: Starting storage for BS group 3 |80.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/backup/impl/ut_table_writer/unittest |80.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/backup/impl/ut_table_writer/unittest >> TableWriter::Restore [GOOD] |80.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/backup/impl/ut_table_writer/unittest >> TExecutorDb::RandomCoordinatorSimulation [GOOD] |80.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/service/ut_worker/unittest |80.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/service/ut_worker/unittest >> TExecutorDb::MultiPage >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnDate32 [GOOD] >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnDatetime64 >> DstCreator::ExistingDst [GOOD] >> DstCreator::EmptyReplicationConfig |80.0%| [TA] $(B)/ydb/core/backup/impl/ut_table_writer/test-results/unittest/{meta.json ... results_accumulator.log} >> TExportToS3Tests::CancelUponTransferringManyTablesShouldSucceed [GOOD] >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnPgInt8Microseconds [GOOD] >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnPgDate |80.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/service/ut_worker/unittest |80.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/service/ut_worker/unittest |80.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/service/ut_worker/unittest >> DstCreator::ReplicationModeMismatch [GOOD] >> DstCreator::ReplicationConsistencyLevelMismatch >> DstCreator::NonExistentSrc >> TExportToS3Tests::CancelledExportEndTime >> TExecutorDb::MultiPage [GOOD] >> TExecutorDb::EncodedPage >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnUint64MicroSeconds [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-CreateUser-7 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-CreateUser-8 >> DstCreator::SamePartitionCount [GOOD] >> DstCreator::WithSyncIndexAndIntermediateDir >> TExecutorDb::EncodedPage [GOOD] >> TFlatCxxDatabaseTest::BasicSchemaTest >> DstCreator::WithAsyncIndex [GOOD] >> TFlatCxxDatabaseTest::BasicSchemaTest [GOOD] >> TFlatCxxDatabaseTest::RenameColumnSchemaTest [GOOD] >> TFlatCxxDatabaseTest::SchemaFillerTest [GOOD] >> TFlatDatabaseDecimal::UpdateRead [GOOD] >> TFlatEraseCacheTest::BasicUsage >> DstCreator::WithSyncIndex >> TFlatEraseCacheTest::BasicUsage [GOOD] >> TFlatEraseCacheTest::BasicUsageReverse [GOOD] >> TFlatEraseCacheTest::CacheEviction [GOOD] >> TFlatEraseCacheTest::StressGarbageCollection [GOOD] >> TFlatEraseCacheTest::StressGarbageCollectionWithStrings >> DstCreator::Basic >> TFlatEraseCacheTest::StressGarbageCollectionWithStrings [GOOD] >> TFlatExecutorLeases::Basics >> EraseRowsTests::ConditionalEraseRowsShouldNotErase >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnUint64Seconds >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnPgInt8Seconds ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_erase_rows/unittest >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnUint64MicroSeconds [GOOD] Test command err: 2025-06-25T14:34:29.803585Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:34:29.803735Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:34:29.803808Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0015ef/r3tmp/tmpudCYfu/pdisk_1.dat 2025-06-25T14:34:30.174629Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T14:34:30.178229Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:34:30.220694Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:34:30.226668Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750862066838334 != 1750862066838338 2025-06-25T14:34:30.273279Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:34:30.273433Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:34:30.285044Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:34:30.372805Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:34:30.432065Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:627:2531] 2025-06-25T14:34:30.432416Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T14:34:30.490252Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T14:34:30.490413Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T14:34:30.492363Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-25T14:34:30.492476Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-25T14:34:30.492536Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-25T14:34:30.492987Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T14:34:30.493167Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T14:34:30.493257Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:643:2531] in generation 1 2025-06-25T14:34:30.508603Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T14:34:30.539420Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-25T14:34:30.539664Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T14:34:30.539807Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:645:2541] 2025-06-25T14:34:30.539851Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T14:34:30.539905Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-25T14:34:30.539968Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:34:30.540560Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-25T14:34:30.540663Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-25T14:34:30.540724Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:34:30.540771Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:34:30.540819Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-25T14:34:30.540870Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:34:30.541294Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:623:2528], serverId# [1:634:2535], sessionId# [0:0:0] 2025-06-25T14:34:30.541513Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T14:34:30.541795Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-25T14:34:30.541913Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-25T14:34:30.543743Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T14:34:30.555507Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-25T14:34:30.555646Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-25T14:34:30.717986Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:660:2550], serverId# [1:662:2552], sessionId# [0:0:0] 2025-06-25T14:34:30.741820Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1000 at tablet 72075186224037888 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1000 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-06-25T14:34:30.741934Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:34:30.743168Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:34:30.743235Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-06-25T14:34:30.743288Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-06-25T14:34:30.743617Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-06-25T14:34:30.743791Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-25T14:34:30.744464Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:34:30.744556Z node 1 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-06-25T14:34:30.746840Z node 1 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-25T14:34:30.747351Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:34:30.749450Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-06-25T14:34:30.749511Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:34:30.750140Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-06-25T14:34:30.750232Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:34:30.751230Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:34:30.751273Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T14:34:30.751343Z node 1 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-25T14:34:30.751407Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [1:373:2367], exec latency: 0 ms, propose latency: 0 ms 2025-06-25T14:34:30.751465Z node 1 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-25T14:34:30.751566Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:34:30.756022Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T14:34:30.758221Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-25T14:34:30.758335Z node 1 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-25T14:34:30.759417Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-25T14:34:30.803320Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:694:2576], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:34:30.803465Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:704:2581], DatabaseId: /Root, PoolId: ... .543349Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-06-25T14:34:36.543397Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-06-25T14:34:36.543638Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-06-25T14:34:36.548636Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-25T14:34:36.549254Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:34:36.549334Z node 2 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-06-25T14:34:36.549931Z node 2 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-25T14:34:36.550350Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:34:36.552456Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-06-25T14:34:36.552514Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:34:36.553007Z node 2 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-06-25T14:34:36.553089Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:34:36.554205Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T14:34:36.554630Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:34:36.554686Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T14:34:36.554731Z node 2 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-25T14:34:36.554791Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [2:373:2367], exec latency: 0 ms, propose latency: 0 ms 2025-06-25T14:34:36.554845Z node 2 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-25T14:34:36.554952Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:34:36.557640Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-25T14:34:36.557722Z node 2 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-25T14:34:36.558615Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-25T14:34:36.657252Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:694:2576], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:34:36.657349Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:703:2581], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:34:36.657434Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:34:36.662595Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:34:36.679588Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T14:34:36.741145Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:34:36.877458Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T14:34:36.883342Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:708:2584], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:34:36.923753Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:778:2623] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:34:37.018637Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715660. Ctx: { TraceId: 01jykr66qa18bjdppm7d5hxcy8, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=M2Y1ZTI5NGQtMWMxNmY4NmEtYjliNjRjYTYtODlmYWY4ODI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:34:37.021243Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [2:809:2640], serverId# [2:810:2641], sessionId# [0:0:0] 2025-06-25T14:34:37.021692Z node 2 :TX_DATASHARD DEBUG: execute_write_unit.cpp:251: Executing write operation for [0:2] at 72075186224037888 2025-06-25T14:34:37.021867Z node 2 :TX_DATASHARD DEBUG: execute_write_unit.cpp:416: Executed write operation for [0:2] at 72075186224037888, row count=4 2025-06-25T14:34:37.036970Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:34:37.113674Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [2:817:2647], serverId# [2:818:2648], sessionId# [0:0:0] 2025-06-25T14:34:37.114713Z node 2 :TX_DATASHARD INFO: datashard__op_rows.cpp:26: TTxDirectBase(48) Execute: at tablet# 72075186224037888 2025-06-25T14:34:37.129169Z node 2 :TX_DATASHARD INFO: datashard__op_rows.cpp:80: TTxDirectBase(48) Complete: at tablet# 72075186224037888 2025-06-25T14:34:37.129250Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:34:37.129512Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037888 2025-06-25T14:34:37.129559Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:4487: Conditional erase complete: cookie: 3, at: 72075186224037888 2025-06-25T14:34:37.129853Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:34:37.129905Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:34:37.129956Z node 2 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-25T14:34:37.130028Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:34:37.130120Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037888, clientId# [2:817:2647], serverId# [2:818:2648], sessionId# [0:0:0] 2025-06-25T14:34:37.131117Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T14:34:37.131459Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-25T14:34:37.131650Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:34:37.131695Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 1 active planned 0 immediate 1 planned 0 2025-06-25T14:34:37.131747Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715661] at 72075186224037888 for WaitForStreamClearance 2025-06-25T14:34:37.131974Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 0 immediate 1 planned 0 2025-06-25T14:34:37.132036Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:34:37.132810Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:514: Got quota for read table scan ShardId: 72075186224037888, TxId: 281474976715661, MessageQuota: 1 2025-06-25T14:34:37.133081Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:662: Send response data ShardId: 72075186224037888, TxId: 281474976715661, Size: 36, Rows: 0, PendingAcks: 1, MessageQuota: 0 2025-06-25T14:34:37.133361Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:483: Got stream data ack ShardId: 72075186224037888, TxId: 281474976715661, PendingAcks: 0 2025-06-25T14:34:37.133414Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:718: Finish scan ShardId: 72075186224037888, TxId: 281474976715661, MessageQuota: 0 2025-06-25T14:34:37.135190Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037888 2025-06-25T14:34:37.135249Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:4477: Found op: cookie: 281474976715661, at: 72075186224037888 2025-06-25T14:34:37.135621Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:34:37.135662Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 1 active planned 0 immediate 1 planned 0 2025-06-25T14:34:37.135700Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715661] at 72075186224037888 for ReadTableScan 2025-06-25T14:34:37.135832Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:34:37.135889Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:34:37.135935Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 >> DistributedEraseTests::DistributedEraseTxShouldFailOnVariousErrors ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/controller/ut_dst_creator/unittest >> DstCreator::SamePartitionCount [GOOD] Test command err: 2025-06-25T14:34:28.181806Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519895322916181819:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:34:28.189144Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017d2/r3tmp/tmpPEsWvi/pdisk_1.dat 2025-06-25T14:34:28.674143Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:34:28.707241Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:34:28.707352Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:34:28.711664Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:17230 TServer::EnableGrpc on GrpcPort 7668, node 1 2025-06-25T14:34:29.001568Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:34:29.001596Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:34:29.001632Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:34:29.001750Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:34:29.187866Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:17230 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:34:29.464854Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:34:29.485015Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:34:29.488286Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:34:29.495651Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... TClient::Ls request: /Root/Table TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1750862069614 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" Key... (TRUNCATED) TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1750862069523 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "user@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1750862069614 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: ".sys" PathId: 1844... (TRUNCATED) 2025-06-25T14:34:29.702210Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-25T14:34:29.702326Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-25T14:34:29.702337Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:87: [DstCreator][rid 1][tid 1] Get table profiles 2025-06-25T14:34:29.703200Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:106: [DstCreator][rid 1][tid 1] Handle NKikimr::NConsole::TEvConfigsDispatcher::TEvGetConfigResponse 2025-06-25T14:34:32.188936Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:158: [DstCreator][rid 1][tid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribeTableResponse { Result: { name: Table, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750862069614, tx_id: 281474976715659 } } } 2025-06-25T14:34:32.189291Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:249: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxUserProxy::TEvAllocateTxIdResult 2025-06-25T14:34:32.191059Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:34:32.192389Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:279: [DstCreator][rid 1][tid 1] Handle {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715660} 2025-06-25T14:34:32.192415Z node 1 :REPLICATION_CONTROLLER DEBUG: dst_creator.cpp:306: [DstCreator][rid 1][tid 1] Subscribe tx: txId# 281474976715660 TClient::Ls request: /Root/Replicated 2025-06-25T14:34:32.282198Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:311: [DstCreator][rid 1][tid 1] Handle NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976715660 2025-06-25T14:34:32.282246Z node 1 :REPLICATION_CONTROLLER INFO: dst_creator.cpp:585: [DstCreator][rid 1][tid 1] Success: dstPathId# [OwnerId: 72057594046644480, LocalPathId: 3] TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Replicated" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715660 CreateStep: 1750862072323 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "user@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Replicated" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "ke... (TRUNCATED) 2025-06-25T14:34:33.229638Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519895347494607138:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:34:33.229823Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017d2/r3tmp/tmp35gowP/pdisk_1.dat 2025-06-25T14:34:33.411074Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:34:33.416174Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519895347494607117:2080] 1750862073219896 != 1750862073219899 2025-06-25T14:34:33.430161Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:34:33.430249Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:34:33.431385Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:8436 TServer::EnableGrpc on GrpcPort 1569, node 2 2025-06-25T14:34:33.652424Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:34:33.652440Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:34:33.652446Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:34:33.652538Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8436 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:34:34.045482Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:34:34.057062Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:34:34.060590Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... TClient::Ls request: /Root/Table TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1750862074178 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" Key... (TRUNCATED) TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1750862074094 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1750862074178 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: ".sys" PathId: 1844... (TRUNCATED) 2025-06-25T14:34:34.208909Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-25T14:34:34.209049Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-25T14:34:34.209061Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:87: [DstCreator][rid 1][tid 1] Get table profiles 2025-06-25T14:34:34.215104Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:106: [DstCreator][rid 1][tid 1] Handle NKikimr::NConsole::TEvConfigsDispatcher::TEvGetConfigResponse 2025-06-25T14:34:34.266966Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:34:37.095710Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:158: [DstCreator][rid 1][tid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribeTableResponse { Result: { name: Table, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750862074178, tx_id: 281474976715658 } } } 2025-06-25T14:34:37.096110Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:249: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxUserProxy::TEvAllocateTxIdResult 2025-06-25T14:34:37.097966Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:34:37.099504Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:279: [DstCreator][rid 1][tid 1] Handle {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715659} 2025-06-25T14:34:37.099519Z node 2 :REPLICATION_CONTROLLER DEBUG: dst_creator.cpp:306: [DstCreator][rid 1][tid 1] Subscribe tx: txId# 281474976715659 2025-06-25T14:34:37.171950Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:311: [DstCreator][rid 1][tid 1] Handle NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976715659 2025-06-25T14:34:37.171977Z node 2 :REPLICATION_CONTROLLER INFO: dst_creator.cpp:585: [DstCreator][rid 1][tid 1] Success: dstPathId# [OwnerId: 72057594046644480, LocalPathId: 3] TClient::Ls request: /Root/Table TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1750862074178 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" Key... (TRUNCATED) TClient::Ls request: /Root/Replicated TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Replicated" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1750862077202 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Replicated" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "ke... (TRUNCATED) >> TExportToS3Tests::CancelledExportEndTime [GOOD] >> DataShardVolatile::DistributedUpsertRestartBeforePrepare-UseSink [GOOD] >> DataShardVolatile::DistributedUpsertRestartAfterPrepare+UseSink >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnUint32 >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-31 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-32 >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-DropUser-49 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-DropUser-50 >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-50 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-51 >> DstCreator::KeyColumnNameMismatch [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/controller/ut_dst_creator/unittest >> DstCreator::WithAsyncIndex [GOOD] Test command err: test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017cd/r3tmp/tmpj6BAnz/pdisk_1.dat 2025-06-25T14:34:29.945407Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519895329380919154:2237];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:34:29.945922Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:34:30.241703Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:34:30.248525Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519895329380918950:2080] 1750862069652480 != 1750862069652483 2025-06-25T14:34:30.264837Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:34:30.264962Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:34:30.270529Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:22771 TServer::EnableGrpc on GrpcPort 11909, node 1 2025-06-25T14:34:30.610250Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:34:30.610273Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:34:30.610281Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:34:30.610424Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:34:30.671167Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:22771 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:34:31.079441Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:34:31.100732Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:34:31.104373Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... TClient::Ls request: /Root/Table TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710658 CreateStep: 1750862071217 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" Key... (TRUNCATED) TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1750862071140 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710658 CreateStep: 1750862071217 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: ".sys" PathId: 1844... (TRUNCATED) 2025-06-25T14:34:31.374188Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-25T14:34:31.374347Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-25T14:34:31.374363Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:87: [DstCreator][rid 1][tid 1] Get table profiles 2025-06-25T14:34:31.380475Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:106: [DstCreator][rid 1][tid 1] Handle NKikimr::NConsole::TEvConfigsDispatcher::TEvGetConfigResponse 2025-06-25T14:34:33.260378Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:158: [DstCreator][rid 1][tid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribeTableResponse { Result: { name: Table, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750862071217, tx_id: 281474976710658 } } } 2025-06-25T14:34:33.260761Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:249: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxUserProxy::TEvAllocateTxIdResult 2025-06-25T14:34:33.262683Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:34:33.263721Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:279: [DstCreator][rid 1][tid 1] Handle {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976710659} 2025-06-25T14:34:33.263741Z node 1 :REPLICATION_CONTROLLER DEBUG: dst_creator.cpp:306: [DstCreator][rid 1][tid 1] Subscribe tx: txId# 281474976710659 TClient::Ls request: /Root/Dir/Replicated 2025-06-25T14:34:33.302692Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:311: [DstCreator][rid 1][tid 1] Handle NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976710659 2025-06-25T14:34:33.302741Z node 1 :REPLICATION_CONTROLLER INFO: dst_creator.cpp:585: [DstCreator][rid 1][tid 1] Success: dstPathId# [OwnerId: 72057594046644480, LocalPathId: 4] TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Replicated" PathId: 4 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710659 CreateStep: 1750862073338 ParentPathId: 3 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Replicated" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "ke... (TRUNCATED) 2025-06-25T14:34:34.122218Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519895348790720961:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:34:34.122258Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017cd/r3tmp/tmpZZjzcn/pdisk_1.dat 2025-06-25T14:34:34.431597Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:34:34.432644Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519895348790720940:2080] 1750862074121302 != 1750862074121305 2025-06-25T14:34:34.448579Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:34:34.448667Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:34:34.454650Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:19798 TServer::EnableGrpc on GrpcPort 26652, node 2 2025-06-25T14:34:34.766221Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:34:34.766244Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:34:34.766251Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:34:34.766366Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19798 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:34:35.133674Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:34:35.142309Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:34:35.153473Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:34:35.155632Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... TClient::Ls request: /Root/Table TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1750862075494 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: true } Table { Name: "Table" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyCo... (TRUNCATED) TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1750862075186 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 4 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1750862075494 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: true } Children { Name: ".sys" PathId: 18446... (TRUNCATED) 2025-06-25T14:34:35.584229Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-25T14:34:35.584371Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-25T14:34:35.584384Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:87: [DstCreator][rid 1][tid 1] Get table profiles 2025-06-25T14:34:35.588406Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:106: [DstCreator][rid 1][tid 1] Handle NKikimr::NConsole::TEvConfigsDispatcher::TEvGetConfigResponse 2025-06-25T14:34:37.947766Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:158: [DstCreator][rid 1][tid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribeTableResponse { Result: { name: Table, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750862075494, tx_id: 281474976715658 } } } 2025-06-25T14:34:37.948095Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:249: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxUserProxy::TEvAllocateTxIdResult 2025-06-25T14:34:37.949559Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:34:37.951028Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:279: [DstCreator][rid 1][tid 1] Handle {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715659} 2025-06-25T14:34:37.951044Z node 2 :REPLICATION_CONTROLLER DEBUG: dst_creator.cpp:306: [DstCreator][rid 1][tid 1] Subscribe tx: txId# 281474976715659 2025-06-25T14:34:37.991756Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:311: [DstCreator][rid 1][tid 1] Handle NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976715659 TClient::Ls request: /Root/Replicated 2025-06-25T14:34:37.991786Z node 2 :REPLICATION_CONTROLLER INFO: dst_creator.cpp:585: [DstCreator][rid 1][tid 1] Success: dstPathId# [OwnerId: 72057594046644480, LocalPathId: 5] TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Replicated" PathId: 5 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1750862078028 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Replicated" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key... (TRUNCATED) >> DstCreator::ColumnTypeMismatch [GOOD] >> KqpPrefixedVectorIndexes::OrderByCosineLevel2-Nullable+UseSimilarity [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-CreateUser-7 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-CreateUser-8 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/controller/ut_dst_creator/unittest >> DstCreator::KeyColumnNameMismatch [GOOD] Test command err: 2025-06-25T14:34:31.373207Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519895338537768743:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:34:31.384923Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017b5/r3tmp/tmpr0U5Ht/pdisk_1.dat 2025-06-25T14:34:31.904655Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:34:31.904787Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:34:31.908903Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:34:31.917328Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:34:31.920689Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519895338537768712:2080] 1750862071358499 != 1750862071358502 TClient is connected to server localhost:10236 TServer::EnableGrpc on GrpcPort 30148, node 1 2025-06-25T14:34:32.222291Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:34:32.222324Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:34:32.222333Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:34:32.222451Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:34:32.372739Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:10236 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:34:32.655046Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:34:32.670223Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:34:32.673907Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... TClient::Ls request: /Root/Table TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710658 CreateStep: 1750862072806 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" Key... (TRUNCATED) TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1750862072708 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710658 CreateStep: 1750862072806 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: ".sys" PathId: 1844... (TRUNCATED) 2025-06-25T14:34:32.919612Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-25T14:34:32.919801Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-25T14:34:32.919818Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:87: [DstCreator][rid 1][tid 1] Get table profiles 2025-06-25T14:34:32.920364Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:106: [DstCreator][rid 1][tid 1] Handle NKikimr::NConsole::TEvConfigsDispatcher::TEvGetConfigResponse 2025-06-25T14:34:35.012483Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:158: [DstCreator][rid 1][tid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribeTableResponse { Result: { name: Table, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750862072806, tx_id: 281474976710658 } } } 2025-06-25T14:34:35.012766Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:249: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxUserProxy::TEvAllocateTxIdResult 2025-06-25T14:34:35.014512Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:34:35.015865Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:279: [DstCreator][rid 1][tid 1] Handle {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976710659} 2025-06-25T14:34:35.015887Z node 1 :REPLICATION_CONTROLLER DEBUG: dst_creator.cpp:306: [DstCreator][rid 1][tid 1] Subscribe tx: txId# 281474976710659 2025-06-25T14:34:35.072093Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:311: [DstCreator][rid 1][tid 1] Handle NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976710659 2025-06-25T14:34:35.072125Z node 1 :REPLICATION_CONTROLLER INFO: dst_creator.cpp:585: [DstCreator][rid 1][tid 1] Success: dstPathId# [OwnerId: 72057594046644480, LocalPathId: 3] TClient::Ls request: /Root/Replicated TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Replicated" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710659 CreateStep: 1750862075109 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Replicated" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "ke... (TRUNCATED) 2025-06-25T14:34:35.972814Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519895353450452301:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:34:35.972852Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017b5/r3tmp/tmpjps9xI/pdisk_1.dat 2025-06-25T14:34:36.232834Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:34:36.232931Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:34:36.255861Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:34:36.257522Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519895353450452283:2080] 1750862075972 ... AttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:34:36.938902Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:34:36.949574Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:34:36.958756Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:34:37.126152Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1750862076992 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710659 CreateStep: 1750862077216 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "Src" PathId: 2 S... (TRUNCATED) TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1750862076992 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710659 CreateStep: 1750862077216 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "Src" PathId: 2 S... (TRUNCATED) 2025-06-25T14:34:37.221199Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-25T14:34:37.221310Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-25T14:34:37.221322Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:87: [DstCreator][rid 1][tid 1] Get table profiles 2025-06-25T14:34:37.221971Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:106: [DstCreator][rid 1][tid 1] Handle NKikimr::NConsole::TEvConfigsDispatcher::TEvGetConfigResponse 2025-06-25T14:34:39.654598Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:158: [DstCreator][rid 1][tid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribeTableResponse { Result: { name: Src, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750862077146, tx_id: 281474976710658 } } } 2025-06-25T14:34:39.654864Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:249: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxUserProxy::TEvAllocateTxIdResult 2025-06-25T14:34:39.656460Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:279: [DstCreator][rid 1][tid 1] Handle {TEvModifySchemeTransactionResult Status# StatusAlreadyExists txid# 281474976710660 Reason# Check failed: path: '/Root/Dst', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 3], type: EPathTypeTable, state: EPathStateNoChanges)} 2025-06-25T14:34:39.657446Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:335: [DstCreator][rid 1][tid 1] Handle NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 20 Record# Status: StatusSuccess Path: "/Root/Dst" PathDescription { Self { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710659 CreateStep: 1750862077216 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Dst" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "value" KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 1 } } TableSchemaVersion: 1 IsBackup: false ReplicationConfig { Mode: REPLICATION_MODE_READ_ONLY ConsistencyLevel: CONSISTENCY_LEVEL_ROW } IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } UserAttributes { Key: "__async_replica" Value: "true" } } PathId: 3 PathOwnerId: 72057594046644480 2025-06-25T14:34:39.657679Z node 2 :REPLICATION_CONTROLLER ERROR: dst_creator.cpp:594: [DstCreator][rid 1][tid 1] Error: status# StatusSchemeError, reason# Key column name mismatch: position: 0, expected: key, got: value ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/controller/ut_dst_creator/unittest >> DstCreator::ColumnTypeMismatch [GOOD] Test command err: 2025-06-25T14:34:30.306631Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519895333078970214:2222];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:34:30.314536Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017bf/r3tmp/tmpXDfooM/pdisk_1.dat 2025-06-25T14:34:30.932924Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519895333078970022:2080] 1750862070245584 != 1750862070245587 2025-06-25T14:34:30.948253Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:34:30.948373Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:34:30.952977Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:34:30.990608Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TClient is connected to server localhost:20113 TServer::EnableGrpc on GrpcPort 25674, node 1 2025-06-25T14:34:31.312474Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:34:31.448940Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:34:31.448960Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:34:31.448966Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:34:31.449075Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:20113 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:34:32.459590Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:34:32.501436Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:34:32.511181Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:34:32.745905Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1750862072526 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710659 CreateStep: 1750862072862 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "Src" PathId: 2 S... (TRUNCATED) TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1750862072526 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710659 CreateStep: 1750862072862 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "Src" PathId: 2 S... (TRUNCATED) 2025-06-25T14:34:32.933582Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-25T14:34:32.933738Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-25T14:34:32.933751Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:87: [DstCreator][rid 1][tid 1] Get table profiles 2025-06-25T14:34:32.939998Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:106: [DstCreator][rid 1][tid 1] Handle NKikimr::NConsole::TEvConfigsDispatcher::TEvGetConfigResponse 2025-06-25T14:34:34.947860Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:158: [DstCreator][rid 1][tid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribeTableResponse { Result: { name: Src, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750862072638, tx_id: 281474976710658 } } } 2025-06-25T14:34:34.948255Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:249: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxUserProxy::TEvAllocateTxIdResult 2025-06-25T14:34:34.949926Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:279: [DstCreator][rid 1][tid 1] Handle {TEvModifySchemeTransactionResult Status# StatusAlreadyExists txid# 281474976710660 Reason# Check failed: path: '/Root/Dst', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 3], type: EPathTypeTable, state: EPathStateNoChanges)} 2025-06-25T14:34:34.952069Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:335: [DstCreator][rid 1][tid 1] Handle NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 20 Record# Status: StatusSuccess Path: "/Root/Dst" PathDescription { Self { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710659 CreateStep: 1750862072862 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Dst" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "extra" Type: "Utf8" TypeId: 4608 Id: 3 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Thre ... ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:34:37.133607Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:34:37.144089Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:34:37.146948Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:34:37.222840Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1750862077181 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710659 CreateStep: 1750862077321 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "Src" PathId: 2 S... (TRUNCATED) TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1750862077181 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710659 CreateStep: 1750862077321 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "Src" PathId: 2 S... (TRUNCATED) 2025-06-25T14:34:37.362635Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-25T14:34:37.362736Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-25T14:34:37.362747Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:87: [DstCreator][rid 1][tid 1] Get table profiles 2025-06-25T14:34:37.363415Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:106: [DstCreator][rid 1][tid 1] Handle NKikimr::NConsole::TEvConfigsDispatcher::TEvGetConfigResponse 2025-06-25T14:34:39.789915Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:158: [DstCreator][rid 1][tid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribeTableResponse { Result: { name: Src, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750862077237, tx_id: 281474976710658 } } } 2025-06-25T14:34:39.790181Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:249: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxUserProxy::TEvAllocateTxIdResult 2025-06-25T14:34:39.791600Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:279: [DstCreator][rid 1][tid 1] Handle {TEvModifySchemeTransactionResult Status# StatusAlreadyExists txid# 281474976710660 Reason# Check failed: path: '/Root/Dst', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 3], type: EPathTypeTable, state: EPathStateNoChanges)} 2025-06-25T14:34:39.793174Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:335: [DstCreator][rid 1][tid 1] Handle NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 20 Record# Status: StatusSuccess Path: "/Root/Dst" PathDescription { Self { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710659 CreateStep: 1750862077321 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Dst" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 1 } } TableSchemaVersion: 1 IsBackup: false ReplicationConfig { Mode: REPLICATION_MODE_READ_ONLY ConsistencyLevel: CONSISTENCY_LEVEL_ROW } IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } UserAttributes { Key: "__async_replica" Value: "true" } } PathId: 3 PathOwnerId: 72057594046644480 2025-06-25T14:34:39.793378Z node 2 :REPLICATION_CONTROLLER ERROR: dst_creator.cpp:594: [DstCreator][rid 1][tid 1] Error: status# StatusSchemeError, reason# Column type mismatch: name: value, expected: Utf8, got: Uint32 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_export/unittest >> TExportToS3Tests::CancelledExportEndTime [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:34:18.855910Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:34:18.856004Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:34:18.856042Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:34:18.856074Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:34:18.856116Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:34:18.856144Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:34:18.856199Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:34:18.856271Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:34:18.857021Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:34:18.857330Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:34:18.999404Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:34:18.999462Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:34:19.029947Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:34:19.030125Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:34:19.030277Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:34:19.050570Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:34:19.050906Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:34:19.051542Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:34:19.051721Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:34:19.063315Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:34:19.063537Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:34:19.064671Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:34:19.064732Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:34:19.064907Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:34:19.064954Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:34:19.064993Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:34:19.065086Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:34:19.089229Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:243:2058] recipient: [1:15:2062] 2025-06-25T14:34:19.275362Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:34:19.275700Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:19.276113Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:34:19.276200Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:34:19.276586Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:34:19.276762Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:34:19.285337Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:34:19.285564Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:34:19.285812Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:19.285866Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:34:19.285903Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:34:19.285940Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:34:19.293344Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:19.293423Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:34:19.293473Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:34:19.295296Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:19.295350Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:19.295391Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:34:19.295446Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:34:19.299072Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:34:19.301209Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:34:19.301397Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:34:19.302384Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:34:19.302512Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:34:19.302564Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:34:19.302859Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:34:19.302915Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:34:19.303067Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:34:19.303143Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:34:19.306759Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:34:19.306828Z node 1 :FLAT_TX_SCHEMESHARD ... 4046678944 TestWaitNotification wait txId: 102 2025-06-25T14:34:39.692148Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-06-25T14:34:39.692217Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 2025-06-25T14:34:39.695400Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/export-102" OperationType: ESchemeOpBackup Backup { TableName: "0" NumberOfRetries: 0 S3Settings { Endpoint: "localhost:22747" Scheme: HTTP Bucket: "" ObjectKeyPattern: "" AccessKey: "" SecretKey: "" StorageClass: STORAGE_CLASS_UNSPECIFIED UseVirtualAddressing: true } Table { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table" Columns { Name: "key" Type: "Utf8" TypeId: 4608 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 1 } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } NeedToBill: true SnapshotStep: 0 SnapshotTxId: 0 EnableChecksums: false EnablePermissions: false } Internal: true } TxId: 281474976710759 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:34:39.695987Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_backup_restore_common.h:586: TBackup Propose, path: /MyRoot/export-102/0, opId: 281474976710759:0, at schemeshard: 72057594046678944 2025-06-25T14:34:39.696126Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 2 2025-06-25T14:34:39.696170Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 281474976710759:0 type: TxBackup target path: [OwnerId: 72057594046678944, LocalPathId: 4] source path: 2025-06-25T14:34:39.703010Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 281474976710759:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:34:39.703119Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpBackup, opId: 281474976710759:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_backup_restore_common.h:563) 2025-06-25T14:34:39.704594Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:62: NotifyTxCompletion export in-flight, txId: 102, at schemeshard: 72057594046678944 2025-06-25T14:34:39.704660Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 102, at schemeshard: 72057594046678944 2025-06-25T14:34:39.712215Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 281474976710759, response: Status: StatusAccepted TxId: 281474976710759 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:34:39.712547Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976710759, database: /MyRoot, subject: , status: StatusAccepted, operation: BACKUP TABLE, path: /MyRoot/export-102/0 2025-06-25T14:34:39.712849Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6781: Handle: TEvModifySchemeTransactionResult: txId# 281474976710759, status# StatusAccepted 2025-06-25T14:34:39.712922Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6783: Message: Status: StatusAccepted TxId: 281474976710759 SchemeshardId: 72057594046678944 2025-06-25T14:34:39.713342Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 281474976710759:0, at schemeshard: 72057594046678944 2025-06-25T14:34:39.713402Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 281474976710759:0 ProgressState, operation type: TxBackup, at tablet# 72057594046678944 2025-06-25T14:34:39.713460Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 281474976710759:0 ProgressState no shards to create, do next state 2025-06-25T14:34:39.713496Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 281474976710759:0 2 -> 3 2025-06-25T14:34:39.727673Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:71: TTxOperationProposeCancelTx Execute, at schemeshard: 72057594046678944, message: TargetTxId: 281474976710759 TxId: 102 2025-06-25T14:34:39.727772Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_cancel_tx.cpp:37: Execute cancel tx: opId# 102:0, target opId# 281474976710759:0 2025-06-25T14:34:39.728591Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 281474976710759:0, at schemeshard: 72057594046678944 2025-06-25T14:34:39.728647Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_backup_restore_common.h:58: TBackup TConfigurePart ProgressState, opId: 281474976710759:0, at schemeshard: 72057594046678944 2025-06-25T14:34:39.728818Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_backup.cpp:41: Propose backup to datashard 72075186233409547 txid 281474976710759:0 at schemeshard 72057594046678944 2025-06-25T14:34:39.731911Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:88: TTxOperationProposeCancelTx Complete, at schemeshard: 72057594046678944 2025-06-25T14:34:39.732201Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 281474976710759:0, at schemeshard: 72057594046678944 2025-06-25T14:34:39.732241Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_backup_restore_common.h:58: TBackup TConfigurePart ProgressState, opId: 281474976710759:0, at schemeshard: 72057594046678944 2025-06-25T14:34:39.732381Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_backup.cpp:41: Propose backup to datashard 72075186233409547 txid 281474976710759:0 at schemeshard 72057594046678944 2025-06-25T14:34:39.732925Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6868: Handle: TEvCancelTxResult: Cookie: 102, at schemeshard: 72057594046678944 2025-06-25T14:34:39.733044Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6870: Message: Status: StatusAccepted Result: "Cancelled at SchemeShard" TargetTxId: 281474976710759 TxId: 102 2025-06-25T14:34:39.733516Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 281474976710759:0 from tablet: 72057594046678944 to tablet: 72075186233409547 cookie: 72057594046678944:2 msg type: 269549568 2025-06-25T14:34:39.733660Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 281474976710759, partId: 0, tablet: 72075186233409547 2025-06-25T14:34:39.746247Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 281474976710759:0 from tablet: 72057594046678944 to tablet: 72075186233409547 cookie: 72057594046678944:2 msg type: 269549568 2025-06-25T14:34:39.747399Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-25T14:34:39.747456Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [4:566:2522] TestWaitNotification: OK eventTxId 102 >> BuildStatsHistogram::Five_Five_Mixed [GOOD] >> BuildStatsHistogram::Five_Five_Serial >> DstCreator::ReplicationConsistencyLevelMismatch [GOOD] >> DstCreator::EmptyReplicationConfig [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-2 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-3 >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnPgDate [GOOD] >> TFlatExecutorLeases::Basics [GOOD] >> TFlatExecutorLeases::BasicsLeaseTimeout >> DstCreator::NonExistentSrc [GOOD] >> DstCreator::KeyColumnsSizeMismatch >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnDatetime64 [GOOD] >> YdbIndexTable::MultiShardTableOneIndexIndexOverlapDataColumn [GOOD] >> YdbIndexTable::MultiShardTableOneIndexPkOverlap >> DstCreator::WithSyncIndexAndIntermediateDir [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/controller/ut_dst_creator/unittest >> DstCreator::ReplicationConsistencyLevelMismatch [GOOD] Test command err: 2025-06-25T14:34:32.147075Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519895341472583606:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:34:32.165352Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017ae/r3tmp/tmpYbQM3l/pdisk_1.dat 2025-06-25T14:34:32.837407Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:34:32.837544Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:34:32.859620Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:34:32.884443Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519895341472583568:2080] 1750862072110538 != 1750862072110541 2025-06-25T14:34:32.909964Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TClient is connected to server localhost:30502 TServer::EnableGrpc on GrpcPort 26956, node 1 2025-06-25T14:34:33.172930Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:34:33.388985Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:34:33.389007Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:34:33.389013Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:34:33.389154Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:30502 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:34:34.231273Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:34:34.248520Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:34:34.387044Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1750862074290 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1750862074472 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "Src" PathId: 2 S... (TRUNCATED) TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1750862074290 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1750862074472 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "Src" PathId: 2 S... (TRUNCATED) 2025-06-25T14:34:34.499524Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-25T14:34:34.499675Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-25T14:34:34.499688Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:87: [DstCreator][rid 1][tid 1] Get table profiles 2025-06-25T14:34:34.500831Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:106: [DstCreator][rid 1][tid 1] Handle NKikimr::NConsole::TEvConfigsDispatcher::TEvGetConfigResponse 2025-06-25T14:34:36.721218Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:158: [DstCreator][rid 1][tid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribeTableResponse { Result: { name: Src, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750862074374, tx_id: 281474976715658 } } } 2025-06-25T14:34:36.721522Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:249: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxUserProxy::TEvAllocateTxIdResult 2025-06-25T14:34:36.723002Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:279: [DstCreator][rid 1][tid 1] Handle {TEvModifySchemeTransactionResult Status# StatusAlreadyExists txid# 281474976715660 Reason# Check failed: path: '/Root/Dst', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 3], type: EPathTypeTable, state: EPathStateNoChanges)} 2025-06-25T14:34:36.725751Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:335: [DstCreator][rid 1][tid 1] Handle NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 20 Record# Status: StatusSuccess Path: "/Root/Dst" PathDescription { Self { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1750862074472 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Dst" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBro ... 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:34:38.873758Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:34:38.878199Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:34:38.880782Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:34:38.961816Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1750862078924 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710659 CreateStep: 1750862079078 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "Src" PathId: 2 S... (TRUNCATED) TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1750862078924 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710659 CreateStep: 1750862079078 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "Src" PathId: 2 S... (TRUNCATED) 2025-06-25T14:34:39.136853Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-25T14:34:39.136972Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-25T14:34:39.136982Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:87: [DstCreator][rid 1][tid 1] Get table profiles 2025-06-25T14:34:39.141093Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:106: [DstCreator][rid 1][tid 1] Handle NKikimr::NConsole::TEvConfigsDispatcher::TEvGetConfigResponse 2025-06-25T14:34:41.360655Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:158: [DstCreator][rid 1][tid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribeTableResponse { Result: { name: Src, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750862078994, tx_id: 281474976710658 } } } 2025-06-25T14:34:41.360945Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:249: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxUserProxy::TEvAllocateTxIdResult 2025-06-25T14:34:41.362365Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:279: [DstCreator][rid 1][tid 1] Handle {TEvModifySchemeTransactionResult Status# StatusAlreadyExists txid# 281474976710660 Reason# Check failed: path: '/Root/Dst', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 3], type: EPathTypeTable, state: EPathStateNoChanges)} 2025-06-25T14:34:41.363307Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:335: [DstCreator][rid 1][tid 1] Handle NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 20 Record# Status: StatusSuccess Path: "/Root/Dst" PathDescription { Self { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710659 CreateStep: 1750862079078 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Dst" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 1 } } TableSchemaVersion: 1 IsBackup: false ReplicationConfig { Mode: REPLICATION_MODE_READ_ONLY ConsistencyLevel: CONSISTENCY_LEVEL_GLOBAL } IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } UserAttributes { Key: "__async_replica" Value: "true" } } PathId: 3 PathOwnerId: 72057594046644480 2025-06-25T14:34:41.363480Z node 2 :REPLICATION_CONTROLLER ERROR: dst_creator.cpp:594: [DstCreator][rid 1][tid 1] Error: status# StatusSchemeError, reason# Replication consistency level mismatch: expected: CONSISTENCY_LEVEL_ROW, got: 1 >> Worker::Basic [GOOD] >> DstCreator::WithSyncIndex [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_erase_rows/unittest >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnPgDate [GOOD] Test command err: 2025-06-25T14:34:29.853933Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:34:29.854089Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:34:29.854149Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0015e3/r3tmp/tmp0f4CAR/pdisk_1.dat 2025-06-25T14:34:30.195211Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T14:34:30.201843Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:34:30.248365Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:34:30.253794Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750862066986752 != 1750862066986756 2025-06-25T14:34:30.301641Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:34:30.301821Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:34:30.313654Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:34:30.431620Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:34:30.489849Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:627:2531] 2025-06-25T14:34:30.490142Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T14:34:30.536562Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T14:34:30.536723Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T14:34:30.538777Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-25T14:34:30.538891Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-25T14:34:30.538960Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-25T14:34:30.539399Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T14:34:30.539577Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T14:34:30.539682Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:643:2531] in generation 1 2025-06-25T14:34:30.552573Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T14:34:30.597543Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-25T14:34:30.597781Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T14:34:30.597905Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:645:2541] 2025-06-25T14:34:30.597948Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T14:34:30.598001Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-25T14:34:30.598045Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:34:30.598579Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-25T14:34:30.598710Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-25T14:34:30.598780Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:34:30.598826Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:34:30.598874Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-25T14:34:30.598918Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:34:30.599317Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:623:2528], serverId# [1:634:2535], sessionId# [0:0:0] 2025-06-25T14:34:30.599781Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T14:34:30.600001Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-25T14:34:30.600102Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-25T14:34:30.601960Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T14:34:30.614314Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-25T14:34:30.614424Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-25T14:34:30.784539Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:660:2550], serverId# [1:662:2552], sessionId# [0:0:0] 2025-06-25T14:34:30.789785Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1000 at tablet 72075186224037888 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1000 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-06-25T14:34:30.789880Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:34:30.790905Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:34:30.790967Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-06-25T14:34:30.791015Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-06-25T14:34:30.791283Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-06-25T14:34:30.791453Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-25T14:34:30.792038Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:34:30.792135Z node 1 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-06-25T14:34:30.794371Z node 1 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-25T14:34:30.794856Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:34:30.806376Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-06-25T14:34:30.806455Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:34:30.807099Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-06-25T14:34:30.807189Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:34:30.807975Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:34:30.808024Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T14:34:30.808102Z node 1 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-25T14:34:30.808166Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [1:373:2367], exec latency: 0 ms, propose latency: 0 ms 2025-06-25T14:34:30.808223Z node 1 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-25T14:34:30.808328Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:34:30.813137Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T14:34:30.819464Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-25T14:34:30.819598Z node 1 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-25T14:34:30.821724Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-25T14:34:30.856235Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:694:2576], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:34:30.856391Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:704:2581], DatabaseId: /Root, PoolId: ... .512853Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-06-25T14:34:41.512901Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-06-25T14:34:41.513157Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-06-25T14:34:41.513290Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-25T14:34:41.514040Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:34:41.514103Z node 3 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-06-25T14:34:41.521038Z node 3 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-25T14:34:41.521598Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:34:41.523968Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-06-25T14:34:41.524030Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:34:41.524440Z node 3 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-06-25T14:34:41.524538Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:34:41.525806Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:34:41.525856Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T14:34:41.525907Z node 3 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-25T14:34:41.525974Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [3:373:2367], exec latency: 0 ms, propose latency: 0 ms 2025-06-25T14:34:41.526046Z node 3 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-25T14:34:41.526159Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:34:41.528101Z node 3 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T14:34:41.530434Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-25T14:34:41.530502Z node 3 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-25T14:34:41.530833Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-25T14:34:41.575821Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:694:2576], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:34:41.575934Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:704:2581], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:34:41.576016Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:34:41.588553Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:34:41.596150Z node 3 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T14:34:41.643463Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:34:41.777471Z node 3 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T14:34:41.783240Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:708:2584], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:34:41.826022Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:778:2623] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:34:41.952418Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715660. Ctx: { TraceId: 01jykr6bh63k9atv4nkgp2bgsf, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=ODk5ZGU2YWYtN2ZhMmY5MTctNTdkZTlhMTEtYTNhNGRmMWM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:34:41.955462Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [3:809:2640], serverId# [3:810:2641], sessionId# [0:0:0] 2025-06-25T14:34:41.955956Z node 3 :TX_DATASHARD DEBUG: execute_write_unit.cpp:251: Executing write operation for [0:2] at 72075186224037888 2025-06-25T14:34:41.956153Z node 3 :TX_DATASHARD DEBUG: execute_write_unit.cpp:416: Executed write operation for [0:2] at 72075186224037888, row count=5 2025-06-25T14:34:41.967377Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:34:42.007545Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [3:817:2647], serverId# [3:818:2648], sessionId# [0:0:0] 2025-06-25T14:34:42.008953Z node 3 :TX_DATASHARD INFO: datashard__op_rows.cpp:26: TTxDirectBase(48) Execute: at tablet# 72075186224037888 2025-06-25T14:34:42.020495Z node 3 :TX_DATASHARD INFO: datashard__op_rows.cpp:80: TTxDirectBase(48) Complete: at tablet# 72075186224037888 2025-06-25T14:34:42.020582Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:34:42.020853Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037888 2025-06-25T14:34:42.020905Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4487: Conditional erase complete: cookie: 3, at: 72075186224037888 2025-06-25T14:34:42.021225Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:34:42.021279Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:34:42.021327Z node 3 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-25T14:34:42.021397Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:34:42.021485Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037888, clientId# [3:817:2647], serverId# [3:818:2648], sessionId# [0:0:0] 2025-06-25T14:34:42.022504Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T14:34:42.022883Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-25T14:34:42.023117Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:34:42.023180Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 1 active planned 0 immediate 1 planned 0 2025-06-25T14:34:42.023239Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715661] at 72075186224037888 for WaitForStreamClearance 2025-06-25T14:34:42.023485Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 0 immediate 1 planned 0 2025-06-25T14:34:42.023563Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:34:42.024218Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:514: Got quota for read table scan ShardId: 72075186224037888, TxId: 281474976715661, MessageQuota: 1 2025-06-25T14:34:42.026629Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:662: Send response data ShardId: 72075186224037888, TxId: 281474976715661, Size: 43, Rows: 0, PendingAcks: 1, MessageQuota: 0 2025-06-25T14:34:42.026825Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:483: Got stream data ack ShardId: 72075186224037888, TxId: 281474976715661, PendingAcks: 0 2025-06-25T14:34:42.026885Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:718: Finish scan ShardId: 72075186224037888, TxId: 281474976715661, MessageQuota: 0 2025-06-25T14:34:42.063533Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037888 2025-06-25T14:34:42.063623Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4477: Found op: cookie: 281474976715661, at: 72075186224037888 2025-06-25T14:34:42.064140Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:34:42.064186Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 1 active planned 0 immediate 1 planned 0 2025-06-25T14:34:42.064224Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715661] at 72075186224037888 for ReadTableScan 2025-06-25T14:34:42.064422Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:34:42.064500Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:34:42.064553Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/controller/ut_dst_creator/unittest >> DstCreator::EmptyReplicationConfig [GOOD] Test command err: 2025-06-25T14:34:32.018024Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519895343148984074:2228];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:34:32.018348Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017a9/r3tmp/tmpambgV6/pdisk_1.dat 2025-06-25T14:34:32.624044Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:34:32.628483Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:34:32.634137Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:34:32.644170Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519895338854016586:2080] 1750862071925261 != 1750862071925264 2025-06-25T14:34:32.676469Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:16323 TServer::EnableGrpc on GrpcPort 2949, node 1 2025-06-25T14:34:33.017769Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:34:33.246596Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:34:33.246644Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:34:33.246652Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:34:33.246811Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16323 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:34:33.849369Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:34:33.869174Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:34:33.890861Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:34:34.139952Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1750862073905 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1750862074220 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "Src" PathId: 2 S... (TRUNCATED) TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1750862073905 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1750862074220 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "Src" PathId: 2 S... (TRUNCATED) 2025-06-25T14:34:34.262982Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-25T14:34:34.263096Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-25T14:34:34.263112Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:87: [DstCreator][rid 1][tid 1] Get table profiles 2025-06-25T14:34:34.263823Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:106: [DstCreator][rid 1][tid 1] Handle NKikimr::NConsole::TEvConfigsDispatcher::TEvGetConfigResponse 2025-06-25T14:34:36.339153Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:158: [DstCreator][rid 1][tid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribeTableResponse { Result: { name: Src, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750862074045, tx_id: 281474976715658 } } } 2025-06-25T14:34:36.339453Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:249: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxUserProxy::TEvAllocateTxIdResult 2025-06-25T14:34:36.340927Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:279: [DstCreator][rid 1][tid 1] Handle {TEvModifySchemeTransactionResult Status# StatusAlreadyExists txid# 281474976715660 Reason# Check failed: path: '/Root/Dst', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 3], type: EPathTypeTable, state: EPathStateNoChanges)} 2025-06-25T14:34:36.342756Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:335: [DstCreator][rid 1][tid 1] Handle NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 20 Record# Status: StatusSuccess Path: "/Root/Dst" PathDescription { Self { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1750862074220 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Dst" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBr ... EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:34:38.681758Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:34:38.688536Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:34:38.695171Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:34:38.790338Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1750862078728 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1750862078903 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "Src" PathId: 2 S... (TRUNCATED) TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1750862078728 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1750862078903 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "Src" PathId: 2 S... (TRUNCATED) 2025-06-25T14:34:38.915616Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-25T14:34:38.915712Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-25T14:34:38.915723Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:87: [DstCreator][rid 1][tid 1] Get table profiles 2025-06-25T14:34:38.916721Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:106: [DstCreator][rid 1][tid 1] Handle NKikimr::NConsole::TEvConfigsDispatcher::TEvGetConfigResponse 2025-06-25T14:34:41.441998Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:158: [DstCreator][rid 1][tid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribeTableResponse { Result: { name: Src, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750862078812, tx_id: 281474976715658 } } } 2025-06-25T14:34:41.442323Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:249: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxUserProxy::TEvAllocateTxIdResult 2025-06-25T14:34:41.443815Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:279: [DstCreator][rid 1][tid 1] Handle {TEvModifySchemeTransactionResult Status# StatusAlreadyExists txid# 281474976715660 Reason# Check failed: path: '/Root/Dst', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 3], type: EPathTypeTable, state: EPathStateNoChanges)} 2025-06-25T14:34:41.444724Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:335: [DstCreator][rid 1][tid 1] Handle NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 20 Record# Status: StatusSuccess Path: "/Root/Dst" PathDescription { Self { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1750862078903 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Dst" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 1 } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046644480 2025-06-25T14:34:41.444877Z node 2 :REPLICATION_CONTROLLER ERROR: dst_creator.cpp:594: [DstCreator][rid 1][tid 1] Error: status# StatusSchemeError, reason# Empty replication config >> DstCreator::Basic [GOOD] >> DstCreator::CannotFindColumn |80.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_ttl/ydb-core-tx-schemeshard-ut_ttl |80.0%| [TA] {RESULT} $(B)/ydb/core/backup/impl/ut_table_writer/test-results/unittest/{meta.json ... results_accumulator.log} |80.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_ttl/ydb-core-tx-schemeshard-ut_ttl |80.0%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_ttl/ydb-core-tx-schemeshard-ut_ttl ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_erase_rows/unittest >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnDatetime64 [GOOD] Test command err: 2025-06-25T14:34:29.042631Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:34:29.042807Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:34:29.042887Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001602/r3tmp/tmpP1nCLF/pdisk_1.dat 2025-06-25T14:34:29.413215Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T14:34:29.416872Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:34:29.468503Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:34:29.481481Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750862065598077 != 1750862065598081 2025-06-25T14:34:29.531579Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:34:29.531746Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:34:29.543666Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:34:29.641943Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:34:29.702182Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:627:2531] 2025-06-25T14:34:29.702441Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T14:34:29.746917Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T14:34:29.747080Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T14:34:29.748774Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-25T14:34:29.748871Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-25T14:34:29.748925Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-25T14:34:29.749317Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T14:34:29.749467Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T14:34:29.749554Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:643:2531] in generation 1 2025-06-25T14:34:29.760335Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T14:34:29.791990Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-25T14:34:29.792206Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T14:34:29.792526Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:645:2541] 2025-06-25T14:34:29.792572Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T14:34:29.792644Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-25T14:34:29.792692Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:34:29.793216Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-25T14:34:29.793334Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-25T14:34:29.793401Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:34:29.793439Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:34:29.793478Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-25T14:34:29.793513Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:34:29.793879Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:623:2528], serverId# [1:634:2535], sessionId# [0:0:0] 2025-06-25T14:34:29.794060Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T14:34:29.794303Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-25T14:34:29.794401Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-25T14:34:29.796087Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T14:34:29.808819Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-25T14:34:29.808908Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-25T14:34:29.978966Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:660:2550], serverId# [1:662:2552], sessionId# [0:0:0] 2025-06-25T14:34:29.983876Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1000 at tablet 72075186224037888 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1000 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-06-25T14:34:29.983953Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:34:29.989559Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:34:29.989627Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-06-25T14:34:29.989684Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-06-25T14:34:29.989972Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-06-25T14:34:29.990121Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-25T14:34:29.990692Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:34:29.990764Z node 1 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-06-25T14:34:29.993293Z node 1 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-25T14:34:29.993745Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:34:29.995743Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-06-25T14:34:29.995808Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:34:29.996828Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-06-25T14:34:29.996949Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:34:29.997951Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:34:29.997999Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T14:34:29.998072Z node 1 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-25T14:34:29.998169Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [1:373:2367], exec latency: 0 ms, propose latency: 0 ms 2025-06-25T14:34:29.998230Z node 1 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-25T14:34:29.998349Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:34:30.004278Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T14:34:30.006505Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-25T14:34:30.006587Z node 1 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-25T14:34:30.007535Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-25T14:34:30.049937Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:694:2576], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:34:30.050097Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:704:2581], DatabaseId: /Root, PoolId: ... .865383Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-06-25T14:34:41.865453Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-06-25T14:34:41.865682Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-06-25T14:34:41.865807Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-25T14:34:41.866459Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:34:41.866545Z node 3 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-06-25T14:34:41.866955Z node 3 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-25T14:34:41.867320Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:34:41.872422Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-06-25T14:34:41.872513Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:34:41.872890Z node 3 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-06-25T14:34:41.872974Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:34:41.873989Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:34:41.874035Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T14:34:41.874092Z node 3 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-25T14:34:41.874151Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [3:373:2367], exec latency: 0 ms, propose latency: 0 ms 2025-06-25T14:34:41.874202Z node 3 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-25T14:34:41.874284Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:34:41.889061Z node 3 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T14:34:41.899013Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-25T14:34:41.899101Z node 3 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-25T14:34:41.899474Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-25T14:34:41.938106Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:694:2576], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:34:41.938194Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:704:2581], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:34:41.938264Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:34:41.943590Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:34:41.949252Z node 3 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T14:34:41.996969Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:34:42.126987Z node 3 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T14:34:42.129977Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:708:2584], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:34:42.169271Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:778:2623] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:34:42.278912Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715660. Ctx: { TraceId: 01jykr6bwg7yzb7waasq78y6d3, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=MTg1ZjFjZjYtOGM1Y2MyNGQtYTUwMDM0YWEtYmYzNDAwMQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:34:42.282177Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [3:809:2640], serverId# [3:810:2641], sessionId# [0:0:0] 2025-06-25T14:34:42.282664Z node 3 :TX_DATASHARD DEBUG: execute_write_unit.cpp:251: Executing write operation for [0:2] at 72075186224037888 2025-06-25T14:34:42.282849Z node 3 :TX_DATASHARD DEBUG: execute_write_unit.cpp:416: Executed write operation for [0:2] at 72075186224037888, row count=5 2025-06-25T14:34:42.293982Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:34:42.329395Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [3:817:2647], serverId# [3:818:2648], sessionId# [0:0:0] 2025-06-25T14:34:42.330688Z node 3 :TX_DATASHARD INFO: datashard__op_rows.cpp:26: TTxDirectBase(48) Execute: at tablet# 72075186224037888 2025-06-25T14:34:42.342234Z node 3 :TX_DATASHARD INFO: datashard__op_rows.cpp:80: TTxDirectBase(48) Complete: at tablet# 72075186224037888 2025-06-25T14:34:42.342332Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:34:42.342618Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037888 2025-06-25T14:34:42.342679Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4487: Conditional erase complete: cookie: 3, at: 72075186224037888 2025-06-25T14:34:42.343035Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:34:42.343097Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:34:42.343160Z node 3 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-25T14:34:42.343237Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:34:42.343347Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037888, clientId# [3:817:2647], serverId# [3:818:2648], sessionId# [0:0:0] 2025-06-25T14:34:42.344453Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T14:34:42.344858Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-25T14:34:42.345050Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:34:42.345103Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 1 active planned 0 immediate 1 planned 0 2025-06-25T14:34:42.345160Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715661] at 72075186224037888 for WaitForStreamClearance 2025-06-25T14:34:42.345433Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 0 immediate 1 planned 0 2025-06-25T14:34:42.345503Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:34:42.346170Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:514: Got quota for read table scan ShardId: 72075186224037888, TxId: 281474976715661, MessageQuota: 1 2025-06-25T14:34:42.346439Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:662: Send response data ShardId: 72075186224037888, TxId: 281474976715661, Size: 36, Rows: 0, PendingAcks: 1, MessageQuota: 0 2025-06-25T14:34:42.346599Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:483: Got stream data ack ShardId: 72075186224037888, TxId: 281474976715661, PendingAcks: 0 2025-06-25T14:34:42.346657Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:718: Finish scan ShardId: 72075186224037888, TxId: 281474976715661, MessageQuota: 0 2025-06-25T14:34:42.406075Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037888 2025-06-25T14:34:42.406172Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4477: Found op: cookie: 281474976715661, at: 72075186224037888 2025-06-25T14:34:42.406754Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:34:42.406816Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 1 active planned 0 immediate 1 planned 0 2025-06-25T14:34:42.406865Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715661] at 72075186224037888 for ReadTableScan 2025-06-25T14:34:42.407030Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:34:42.407114Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:34:42.407179Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 >> TFlatExecutorLeases::BasicsLeaseTimeout [GOOD] >> TFlatExecutorLeases::BasicsInitialLease ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/controller/ut_dst_creator/unittest >> DstCreator::WithSyncIndexAndIntermediateDir [GOOD] Test command err: 2025-06-25T14:34:38.588857Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519895369095728568:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:34:38.589611Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001794/r3tmp/tmpHeR40d/pdisk_1.dat 2025-06-25T14:34:39.081679Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:34:39.114230Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:34:39.114370Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:34:39.115793Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:7760 TServer::EnableGrpc on GrpcPort 25430, node 1 2025-06-25T14:34:39.444807Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:34:39.444830Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:34:39.444836Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:34:39.444940Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:34:39.592471Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:7760 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:34:39.909107Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:34:39.924378Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:34:39.933819Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... TClient::Ls request: /Root/Table TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710658 CreateStep: 1750862080373 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: true } Table { Name: "Table" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyCo... (TRUNCATED) TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1750862079967 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 4 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710658 CreateStep: 1750862080373 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: true } Children { Name: ".sys" PathId: 18446... (TRUNCATED) 2025-06-25T14:34:40.491594Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-25T14:34:40.491864Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-25T14:34:40.491891Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:87: [DstCreator][rid 1][tid 1] Get table profiles 2025-06-25T14:34:40.495838Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:106: [DstCreator][rid 1][tid 1] Handle NKikimr::NConsole::TEvConfigsDispatcher::TEvGetConfigResponse 2025-06-25T14:34:42.217261Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:158: [DstCreator][rid 1][tid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribeTableResponse { Result: { name: Table, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750862080373, tx_id: 281474976710658 } } } 2025-06-25T14:34:42.217630Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:249: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxUserProxy::TEvAllocateTxIdResult 2025-06-25T14:34:42.219752Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:34:42.222541Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:279: [DstCreator][rid 1][tid 1] Handle {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976710659} 2025-06-25T14:34:42.222560Z node 1 :REPLICATION_CONTROLLER DEBUG: dst_creator.cpp:306: [DstCreator][rid 1][tid 1] Subscribe tx: txId# 281474976710659 2025-06-25T14:34:42.260350Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:311: [DstCreator][rid 1][tid 1] Handle NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976710659 2025-06-25T14:34:42.261608Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:335: [DstCreator][rid 1][tid 1] Handle NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 20 Record# Status: StatusSuccess Path: "/Root/Dir/Replicated" PathDescription { Self { Name: "Replicated" PathId: 6 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710659 CreateStep: 1750862082298 ParentPathId: 5 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: true } Table { Name: "Replicated" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraC ... pshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 SplitByLoadSettings { Enabled: false } } ColumnFamilies { Id: 0 StorageConfig { SysLog { PreferredPoolKind: "test" } Log { PreferredPoolKind: "test" } Data { PreferredPoolKind: "test" } } } } TableSchemaVersion: 1 IsBackup: false ReplicationConfig { Mode: REPLICATION_MODE_READ_ONLY ConsistencyLevel: CONSISTENCY_LEVEL_ROW } IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186224037905 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 7 PathsLimit: 10000 ShardsInside: 19 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } UserAttributes { Key: "__async_replica" Value: "true" } } PathId: 8 PathOwnerId: 72057594046644480 } 2025-06-25T14:34:42.329799Z node 1 :REPLICATION_CONTROLLER INFO: dst_creator.cpp:585: [DstCreator][rid 1][tid 2] Success: dstPathId# [OwnerId: 72057594046644480, LocalPathId: 8] TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "index_by_value" PathId: 7 SchemeshardId: 72057594046644480 PathType: EPathTypeTableIndex CreateFinished: true CreateTxId: 281474976710659 CreateStep: 1750862082298 ParentPathId: 6 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableIndexVersion: 1 } ChildrenExist: true } Children { Name: "indexImplTable" PathId: 8 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710659 CreateStep: 1750862082298 ParentPathId: 7 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" PathSubType: EPathSubTypeSyncIndexImplTable Version { ... (TRUNCATED) TClient::Ls request: /Root/Dir/Replicated/index_by_value/indexImplTable TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "indexImplTable" PathId: 8 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710659 CreateStep: 1750862082298 ParentPathId: 7 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeSyncIndexImplTable Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "indexImplTable" Columns { Name: "value" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } ... (TRUNCATED) Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "indexImplTable" PathId: 8 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710659 CreateStep: 1750862082298 ParentPathId: 7 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeSyncIndexImplTable Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "indexImplTable" Columns { Name: "value" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "value" KeyColumnNames: "key" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 SplitByLoadSettings { Enabled: false } } ColumnFamilies { Id: 0 StorageConfig { SysLog { PreferredPoolKind: "test" } Log { PreferredPoolKind: "test" } Data { PreferredPoolKind: "test" } } } } TableSchemaVersion: 1 IsBackup: false ReplicationConfig { Mode: REPLICATION_MODE_READ_ONLY ConsistencyLevel: CONSISTENCY_LEVEL_ROW } IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186224037905 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 7 PathsLimit: 10000 ShardsInside: 19 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } UserAttributes { Key: "__async_replica" Value: "true" } } Path: "/Root/Dir/Replicated/index_by_value/indexImplTable" >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-CreateUser-8 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-CreateUser-9 |80.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/ut/sysview/ydb-core-kqp-ut-sysview |80.1%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/sysview/ydb-core-kqp-ut-sysview |80.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/sysview/ydb-core-kqp-ut-sysview >> EraseRowsTests::ConditionalEraseRowsShouldNotErase [GOOD] >> EraseRowsTests::ConditionalEraseRowsShouldFailOnVariousErrors >> DistributedEraseTests::DistributedEraseTxShouldFailOnVariousErrors [GOOD] >> EraseRowsTests::ConditionalEraseRowsShouldErase >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnUint64Seconds [GOOD] >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnUint64NanoSeconds ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/service/ut_worker/unittest >> Worker::Basic [GOOD] Test command err: 2025-06-25T14:34:34.018700Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519895345873106409:2135];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:34:34.019155Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017a5/r3tmp/tmpqM4TrD/pdisk_1.dat 2025-06-25T14:34:34.606819Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:34:34.606888Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:34:34.609126Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:34:34.623841Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:34:34.625495Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519895345873106312:2080] 1750862073993766 != 1750862073993769 TClient is connected to server localhost:13630 TServer::EnableGrpc on GrpcPort 31604, node 1 2025-06-25T14:34:34.989023Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:34:34.989053Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:34:34.989064Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:34:34.989191Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:34:35.040497Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:13630 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:34:35.463600Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:34:35.480644Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:34:35.684110Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... TClient::Ls request: /Root/Table TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Table" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710659 CreateStep: 1750862075802 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" Key... (TRUNCATED) 2025-06-25T14:34:35.916506Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:32: [RemoteTopicReader][/Root/topic][0][1:7519895354463041711:2420] Handshake: worker# [1:7519895354463041710:2420] 2025-06-25T14:34:35.916571Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:295: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 3][1:7519895354463041712:2420] Handshake: worker# [1:7519895354463041710:2420] 2025-06-25T14:34:35.944589Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:312: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 3][1:7519895354463041712:2420] Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult: result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Table TableId: [72057594046644480:3:1] RequestType: ByTableId Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Ok Kind: KindTable DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-25T14:34:35.945088Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:387: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 3][1:7519895354463041712:2420] Handle TEvTxProxySchemeCache::TEvResolveKeySetResult: result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 ResultSet [{ TableId: [OwnerId: 72057594046644480, LocalPathId: 3] Access: 0 SyncVersion: false Status: OkData Kind: KindRegularTable PartitionsCount: 1 DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } From: (Uint32 : NULL) IncFrom: 1 To: () IncTo: 0 }] } 2025-06-25T14:34:35.945122Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:417: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 3][1:7519895354463041712:2420] Send handshake: worker# [1:7519895354463041710:2420] 2025-06-25T14:34:35.945183Z node 1 :REPLICATION_SERVICE DEBUG: worker.cpp:150: [Worker][1:7519895354463041710:2420] Handle NKikimr::NReplication::NService::TEvWorker::TEvHandshake 2025-06-25T14:34:35.945197Z node 1 :REPLICATION_SERVICE INFO: worker.cpp:162: [Worker][1:7519895354463041710:2420] Handshake with writer: sender# [1:7519895354463041712:2420] 2025-06-25T14:34:35.957251Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:41: [RemoteTopicReader][/Root/topic][0][1:7519895354463041711:2420] Create read session: session# [1:7519895354463041715:2291] 2025-06-25T14:34:35.981187Z node 1 :REPLICATION_SERVICE DEBUG: worker.cpp:150: [Worker][1:7519895354463041710:2420] Handle NKikimr::NReplication::NService::TEvWorker::TEvHandshake 2025-06-25T14:34:35.981273Z node 1 :REPLICATION_SERVICE INFO: worker.cpp:154: [Worker][1:7519895354463041710:2420] Handshake with reader: sender# [1:7519895354463041711:2420] 2025-06-25T14:34:35.981418Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:48: [RemoteTopicReader][/Root/topic][0][1:7519895354463041711:2420] Handle NKikimr::NReplication::NService::TEvWorker::TEvPoll { SkipCommit: 0 } 2025-06-25T14:34:36.090377Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:34:36.096232Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:79: [RemoteTopicReader][/Root/topic][0][1:7519895354463041711:2420] Handle NKikimr::NReplication::TEvYdbProxy::TEvStartTopicReadingSession { Result: { ReadSessionId: consumer_1_1_1064706047886589322_v1 } } 2025-06-25T14:34:38.163652Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519895367347943775:2333], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:34:38.163767Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519895367347943790:2338], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:34:38.163813Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519895367347943791:2339], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:34:38.163904Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:34:38.168295Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710661:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:34:38.206234Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519895367347943795:2341], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710661 completed, doublechecking } 2025-06-25T14:34:38.218586Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519895367347943797:2500] txid# 281474976710662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateCreate)" severity: 1 } 2025-06-25T14:34:38.219968Z node 1 :FLAT_TX_SCHEME ... schemeshard__operation_alter_table.cpp:171) 2025-06-25T14:34:41.072455Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976710682:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:171) 2025-06-25T14:34:41.648706Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710687:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:34:42.564691Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:58: [RemoteTopicReader][/Root/topic][0][1:7519895354463041711:2420] Handle NKikimr::NReplication::TEvYdbProxy::TEvReadTopicResponse { Result: { PartitionId: 0 Messages [{ Codec: RAW Data: 36b Offset: 0 SeqNo: 1 CreateTime: 2025-06-25T14:34:42.496000Z MessageGroupId: producer ProducerId: producer }] } } 2025-06-25T14:34:42.564788Z node 1 :REPLICATION_SERVICE DEBUG: worker.cpp:216: [Worker][1:7519895354463041710:2420] Handle NKikimr::NReplication::NService::TEvWorker::TEvData { Source: 0 Records [{ Codec: RAW Data: 36b Offset: 0 SeqNo: 1 CreateTime: 2025-06-25T14:34:42.496000Z MessageGroupId: producer ProducerId: producer }] } 2025-06-25T14:34:42.564859Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:431: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 3][1:7519895354463041712:2420] Handle NKikimr::NReplication::NService::TEvWorker::TEvData { Source: 0 Records [{ Codec: RAW Data: 36b Offset: 0 SeqNo: 1 CreateTime: 2025-06-25T14:34:42.496000Z MessageGroupId: producer ProducerId: producer }] } 2025-06-25T14:34:42.564992Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:556: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 3][1:7519895354463041712:2420] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRequestRecords { Records [{ Order: 0 BodySize: 36 }] } 2025-06-25T14:34:42.565123Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:54: [TablePartitionWriter][72057594046644480:3:1][72075186224037890][1:7519895384527813741:2420] Handle NKikimr::TEvTxUserProxy::TEvGetProxyServicesResponse 2025-06-25T14:34:42.565191Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:587: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 3][1:7519895354463041712:2420] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186224037890 } 2025-06-25T14:34:42.565274Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:74: [TablePartitionWriter][72057594046644480:3:1][72075186224037890][1:7519895384527813741:2420] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 0 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 36b }] } 2025-06-25T14:34:42.570001Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:111: [TablePartitionWriter][72057594046644480:3:1][72075186224037890][1:7519895384527813741:2420] Handle NKikimrTxDataShard.TEvApplyReplicationChangesResult Status: STATUS_OK 2025-06-25T14:34:42.570067Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:587: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 3][1:7519895354463041712:2420] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186224037890 } 2025-06-25T14:34:42.570123Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:570: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 3][1:7519895354463041712:2420] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRemoveRecords { Records [0] } 2025-06-25T14:34:42.570195Z node 1 :REPLICATION_SERVICE DEBUG: worker.cpp:176: [Worker][1:7519895354463041710:2420] Handle NKikimr::NReplication::NService::TEvWorker::TEvPoll { SkipCommit: 0 } 2025-06-25T14:34:42.570239Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:48: [RemoteTopicReader][/Root/topic][0][1:7519895354463041711:2420] Handle NKikimr::NReplication::NService::TEvWorker::TEvPoll { SkipCommit: 0 } 2025-06-25T14:34:42.788875Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:58: [RemoteTopicReader][/Root/topic][0][1:7519895354463041711:2420] Handle NKikimr::NReplication::TEvYdbProxy::TEvReadTopicResponse { Result: { PartitionId: 0 Messages [{ Codec: RAW Data: 36b Offset: 1 SeqNo: 2 CreateTime: 2025-06-25T14:34:42.776000Z MessageGroupId: producer ProducerId: producer }] } } 2025-06-25T14:34:42.788961Z node 1 :REPLICATION_SERVICE DEBUG: worker.cpp:216: [Worker][1:7519895354463041710:2420] Handle NKikimr::NReplication::NService::TEvWorker::TEvData { Source: 0 Records [{ Codec: RAW Data: 36b Offset: 1 SeqNo: 2 CreateTime: 2025-06-25T14:34:42.776000Z MessageGroupId: producer ProducerId: producer }] } 2025-06-25T14:34:42.789013Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:431: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 3][1:7519895354463041712:2420] Handle NKikimr::NReplication::NService::TEvWorker::TEvData { Source: 0 Records [{ Codec: RAW Data: 36b Offset: 1 SeqNo: 2 CreateTime: 2025-06-25T14:34:42.776000Z MessageGroupId: producer ProducerId: producer }] } 2025-06-25T14:34:42.789106Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:556: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 3][1:7519895354463041712:2420] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRequestRecords { Records [{ Order: 1 BodySize: 36 }] } 2025-06-25T14:34:42.789212Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:74: [TablePartitionWriter][72057594046644480:3:1][72075186224037890][1:7519895384527813741:2420] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 1 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 36b }] } 2025-06-25T14:34:42.791207Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:111: [TablePartitionWriter][72057594046644480:3:1][72075186224037890][1:7519895384527813741:2420] Handle NKikimrTxDataShard.TEvApplyReplicationChangesResult Status: STATUS_OK 2025-06-25T14:34:42.791270Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:587: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 3][1:7519895354463041712:2420] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186224037890 } 2025-06-25T14:34:42.791307Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:570: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 3][1:7519895354463041712:2420] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRemoveRecords { Records [1] } 2025-06-25T14:34:42.791351Z node 1 :REPLICATION_SERVICE DEBUG: worker.cpp:176: [Worker][1:7519895354463041710:2420] Handle NKikimr::NReplication::NService::TEvWorker::TEvPoll { SkipCommit: 0 } 2025-06-25T14:34:42.791398Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:48: [RemoteTopicReader][/Root/topic][0][1:7519895354463041711:2420] Handle NKikimr::NReplication::NService::TEvWorker::TEvPoll { SkipCommit: 0 } 2025-06-25T14:34:43.093518Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:58: [RemoteTopicReader][/Root/topic][0][1:7519895354463041711:2420] Handle NKikimr::NReplication::TEvYdbProxy::TEvReadTopicResponse { Result: { PartitionId: 0 Messages [{ Codec: RAW Data: 36b Offset: 2 SeqNo: 3 CreateTime: 2025-06-25T14:34:43.036000Z MessageGroupId: producer ProducerId: producer }] } } 2025-06-25T14:34:43.093598Z node 1 :REPLICATION_SERVICE DEBUG: worker.cpp:216: [Worker][1:7519895354463041710:2420] Handle NKikimr::NReplication::NService::TEvWorker::TEvData { Source: 0 Records [{ Codec: RAW Data: 36b Offset: 2 SeqNo: 3 CreateTime: 2025-06-25T14:34:43.036000Z MessageGroupId: producer ProducerId: producer }] } 2025-06-25T14:34:43.093651Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:431: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 3][1:7519895354463041712:2420] Handle NKikimr::NReplication::NService::TEvWorker::TEvData { Source: 0 Records [{ Codec: RAW Data: 36b Offset: 2 SeqNo: 3 CreateTime: 2025-06-25T14:34:43.036000Z MessageGroupId: producer ProducerId: producer }] } 2025-06-25T14:34:43.093742Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:556: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 3][1:7519895354463041712:2420] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRequestRecords { Records [{ Order: 2 BodySize: 36 }] } 2025-06-25T14:34:43.093826Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:74: [TablePartitionWriter][72057594046644480:3:1][72075186224037890][1:7519895384527813741:2420] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 2 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 36b }] } 2025-06-25T14:34:43.096046Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:111: [TablePartitionWriter][72057594046644480:3:1][72075186224037890][1:7519895384527813741:2420] Handle NKikimrTxDataShard.TEvApplyReplicationChangesResult Status: STATUS_OK 2025-06-25T14:34:43.096097Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:587: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 3][1:7519895354463041712:2420] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186224037890 } 2025-06-25T14:34:43.096131Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:570: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 3][1:7519895354463041712:2420] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRemoveRecords { Records [2] } 2025-06-25T14:34:43.096181Z node 1 :REPLICATION_SERVICE DEBUG: worker.cpp:176: [Worker][1:7519895354463041710:2420] Handle NKikimr::NReplication::NService::TEvWorker::TEvPoll { SkipCommit: 0 } 2025-06-25T14:34:43.096223Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:48: [RemoteTopicReader][/Root/topic][0][1:7519895354463041711:2420] Handle NKikimr::NReplication::NService::TEvWorker::TEvPoll { SkipCommit: 0 } 2025-06-25T14:34:43.179305Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:119: [RemoteTopicReader][/Root/topic][0][1:7519895354463041711:2420] Handle NKikimr::NReplication::TEvYdbProxy::TEvTopicReaderGone { Result: { status: UNAVAILABLE, issues: {
: Error: PartitionSessionClosed { Partition session id: 1 Topic: "topic" Partition: 0 Reason: ConnectionLost } } } } 2025-06-25T14:34:43.179336Z node 1 :REPLICATION_SERVICE INFO: topic_reader.cpp:131: [RemoteTopicReader][/Root/topic][0][1:7519895354463041711:2420] Leave 2025-06-25T14:34:43.179385Z node 1 :REPLICATION_SERVICE INFO: worker.cpp:235: [Worker][1:7519895354463041710:2420] Reader has gone: sender# [1:7519895354463041711:2420] 2025-06-25T14:34:43.179443Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:32: [RemoteTopicReader][/Root/topic][0][1:7519895388822781228:2420] Handshake: worker# [1:7519895354463041710:2420] 2025-06-25T14:34:43.182508Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:41: [RemoteTopicReader][/Root/topic][0][1:7519895388822781228:2420] Create read session: session# [1:7519895388822781229:2291] 2025-06-25T14:34:43.182559Z node 1 :REPLICATION_SERVICE DEBUG: worker.cpp:150: [Worker][1:7519895354463041710:2420] Handle NKikimr::NReplication::NService::TEvWorker::TEvHandshake 2025-06-25T14:34:43.182571Z node 1 :REPLICATION_SERVICE INFO: worker.cpp:154: [Worker][1:7519895354463041710:2420] Handshake with reader: sender# [1:7519895388822781228:2420] 2025-06-25T14:34:43.182599Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:48: [RemoteTopicReader][/Root/topic][0][1:7519895388822781228:2420] Handle NKikimr::NReplication::NService::TEvWorker::TEvPoll { SkipCommit: 0 } >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnUint32 [GOOD] >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnTimestamp64 >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnPgInt8Seconds [GOOD] >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnPgInt8Milliseconds ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/controller/ut_dst_creator/unittest >> DstCreator::WithSyncIndex [GOOD] Test command err: 2025-06-25T14:34:39.116603Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519895372845901294:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:34:39.117151Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001776/r3tmp/tmpxMjv8d/pdisk_1.dat 2025-06-25T14:34:39.546654Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:34:39.624425Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:34:39.624547Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:34:39.630562Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:16628 TServer::EnableGrpc on GrpcPort 21989, node 1 2025-06-25T14:34:39.849031Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:34:39.849063Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:34:39.849071Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:34:39.849220Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16628 2025-06-25T14:34:40.126412Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:34:40.397321Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:34:40.413581Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:34:40.424987Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... TClient::Ls request: /Root/Table TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1750862080849 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: true } Table { Name: "Table" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyCo... (TRUNCATED) TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1750862080457 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 4 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1750862080849 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: true } Children { Name: ".sys" PathId: 18446... (TRUNCATED) 2025-06-25T14:34:41.026007Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-25T14:34:41.026124Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-25T14:34:41.026134Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:87: [DstCreator][rid 1][tid 1] Get table profiles 2025-06-25T14:34:41.026853Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:106: [DstCreator][rid 1][tid 1] Handle NKikimr::NConsole::TEvConfigsDispatcher::TEvGetConfigResponse 2025-06-25T14:34:42.785783Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:158: [DstCreator][rid 1][tid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribeTableResponse { Result: { name: Table, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750862080849, tx_id: 281474976715658 } } } 2025-06-25T14:34:42.786194Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:249: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxUserProxy::TEvAllocateTxIdResult 2025-06-25T14:34:42.788445Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:34:42.791295Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:279: [DstCreator][rid 1][tid 1] Handle {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715659} 2025-06-25T14:34:42.791317Z node 1 :REPLICATION_CONTROLLER DEBUG: dst_creator.cpp:306: [DstCreator][rid 1][tid 1] Subscribe tx: txId# 281474976715659 2025-06-25T14:34:42.842900Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:311: [DstCreator][rid 1][tid 1] Handle NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976715659 2025-06-25T14:34:42.844376Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:335: [DstCreator][rid 1][tid 1] Handle NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 20 Record# Status: StatusSuccess Path: "/Root/Replicated" PathDescription { Self { Name: "Replicated" PathId: 5 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1750862082879 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: true } Table { Name: "Replicated" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCom ... 0 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 SplitByLoadSettings { Enabled: false } } ColumnFamilies { Id: 0 StorageConfig { SysLog { PreferredPoolKind: "test" } Log { PreferredPoolKind: "test" } Data { PreferredPoolKind: "test" } } } } TableSchemaVersion: 1 IsBackup: false ReplicationConfig { Mode: REPLICATION_MODE_READ_ONLY ConsistencyLevel: CONSISTENCY_LEVEL_ROW } IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186224037906 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 6 PathsLimit: 10000 ShardsInside: 19 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } UserAttributes { Key: "__async_replica" Value: "true" } } PathId: 7 PathOwnerId: 72057594046644480 } 2025-06-25T14:34:42.934789Z node 1 :REPLICATION_CONTROLLER INFO: dst_creator.cpp:585: [DstCreator][rid 1][tid 2] Success: dstPathId# [OwnerId: 72057594046644480, LocalPathId: 7] TClient::Ls request: /Root/Replicated/index_by_value TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "index_by_value" PathId: 6 SchemeshardId: 72057594046644480 PathType: EPathTypeTableIndex CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1750862082879 ParentPathId: 5 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableIndexVersion: 1 } ChildrenExist: true } Children { Name: "indexImplTable" PathId: 7 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1750862082879 ParentPathId: 6 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" PathSubType: EPathSubTypeSyncIndexImplTable Version { ... (TRUNCATED) TClient::Ls request: /Root/Replicated/index_by_value/indexImplTable TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "indexImplTable" PathId: 7 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1750862082879 ParentPathId: 6 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeSyncIndexImplTable Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "indexImplTable" Columns { Name: "value" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } ... (TRUNCATED) Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "indexImplTable" PathId: 7 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1750862082879 ParentPathId: 6 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeSyncIndexImplTable Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "indexImplTable" Columns { Name: "value" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "value" KeyColumnNames: "key" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 SplitByLoadSettings { Enabled: false } } ColumnFamilies { Id: 0 StorageConfig { SysLog { PreferredPoolKind: "test" } Log { PreferredPoolKind: "test" } Data { PreferredPoolKind: "test" } } } } TableSchemaVersion: 1 IsBackup: false ReplicationConfig { Mode: REPLICATION_MODE_READ_ONLY ConsistencyLevel: CONSISTENCY_LEVEL_ROW } IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186224037906 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 6 PathsLimit: 10000 ShardsInside: 19 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } UserAttributes { Key: "__async_replica" Value: "true" } } Path: "/Root/Replicated/index_by_value/indexImplTable" |80.1%| [TA] $(B)/ydb/core/tx/replication/service/ut_worker/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpPrefixedVectorIndexes::OrderByCosineLevel2-Nullable+UseSimilarity [GOOD] Test command err: Trying to start YDB, gRPC: 10653, MsgBus: 18687 2025-06-25T14:31:59.728836Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519894681951096934:2220];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:31:59.729399Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001c6d/r3tmp/tmpKtTQXn/pdisk_1.dat 2025-06-25T14:32:00.424729Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519894681951096752:2080] 1750861919662421 != 1750861919662424 2025-06-25T14:32:00.427659Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:32:00.433233Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:32:00.433320Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:32:00.436712Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 10653, node 1 2025-06-25T14:32:00.603370Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:32:00.603392Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:32:00.603399Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:32:00.603545Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:32:00.693123Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:18687 TClient is connected to server localhost:18687 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:32:01.371886Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:32:01.387127Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:32:01.405139Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:01.577240Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:32:01.772852Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:32:01.878175Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:32:04.099573Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894703425934884:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:04.099664Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:04.471078Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:04.526168Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:04.578926Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:04.626786Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:04.690870Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:04.692681Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519894681951096934:2220];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:32:04.692805Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:32:04.781791Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:04.940233Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:32:05.057950Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894707720902845:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:05.058029Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:05.058438Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894707720902850:2436], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:32:05.062349Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:32:05.079750Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519894707720902852:2437], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:32:05.144920Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519894707720902903:3421] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:32:06.594676Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877761, Sender [1:7519894712015870480:3600], Recipient [1:7519894686246064431:2179]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:32:06.594716Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:50 ... DataShard::TEvPeriodicTableStats 2025-06-25T14:34:38.810582Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037900 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 4] state 'Ready' dataSize 1472 rowCount 8 cpuUsage 0.0059 2025-06-25T14:34:38.810673Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:570: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037900 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 4] raw table stats: DataSize: 1472 RowCount: 8 IndexSize: 0 InMemSize: 1472 LastAccessTime: 1750862030575 LastUpdateTime: 1750862030575 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 8 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 1 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 2025-06-25T14:34:38.910790Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [3:7519895120661311765:2148]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-06-25T14:34:38.910854Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5131: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-06-25T14:34:38.910871Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046644480, queue size# 3 2025-06-25T14:34:38.910936Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:601: Will execute TTxStoreStats, queue# 3 2025-06-25T14:34:38.910957Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:610: Will delay TTxStoreTableStats on# 0.000000s, queue# 3 2025-06-25T14:34:38.911034Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 4 shard idx 72057594046644480:12 data size 648 row count 1 2025-06-25T14:34:38.911117Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037899 maps to shardIdx: 72057594046644480:12 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 4], pathId map=Logs, is column=0, is olap=0, RowCount 1, DataSize 648 2025-06-25T14:34:38.911133Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037899, followerId 0 2025-06-25T14:34:38.911197Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:12 with partCount# 0, rowCount# 1, searchHeight# 1, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-25T14:34:38.911247Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037899 2025-06-25T14:34:38.911284Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 4 shard idx 72057594046644480:11 data size 0 row count 0 2025-06-25T14:34:38.911321Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037898 maps to shardIdx: 72057594046644480:11 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 4], pathId map=Logs, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-25T14:34:38.911331Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037898, followerId 0 2025-06-25T14:34:38.911363Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:11 with partCount# 0, rowCount# 0, searchHeight# 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-25T14:34:38.911377Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037898 2025-06-25T14:34:38.911398Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 4 shard idx 72057594046644480:13 data size 1472 row count 8 2025-06-25T14:34:38.911433Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037900 maps to shardIdx: 72057594046644480:13 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 4], pathId map=Logs, is column=0, is olap=0, RowCount 8, DataSize 1472 2025-06-25T14:34:38.911442Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037900, followerId 0 2025-06-25T14:34:38.911474Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:13 with partCount# 0, rowCount# 8, searchHeight# 1, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-25T14:34:38.911498Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037900 2025-06-25T14:34:38.911598Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:34:38.912117Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [3:7519895120661311765:2148]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-06-25T14:34:38.912140Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5131: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-06-25T14:34:38.912151Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046644480, queue size# 0 2025-06-25T14:34:39.072576Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:7519895120661311765:2148]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:34:39.072634Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:34:39.072703Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [3:7519895120661311765:2148], Recipient [3:7519895120661311765:2148]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:34:39.072725Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:34:39.232749Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269553162, Sender [3:7519895129251247288:2322], Recipient [3:7519895120661311765:2148]: NKikimrTxDataShard.TEvPeriodicTableStats DatashardId: 72075186224037902 TableLocalId: 5 Generation: 1 Round: 4 TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 InMemSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 0 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 37 Memory: 119352 } ShardState: 2 UserTablePartOwners: 72075186224037902 NodeId: 3 StartTime: 1750862023921 TableOwnerId: 72057594046644480 FollowerId: 0 2025-06-25T14:34:39.232831Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4992: StateWork, processing event TEvDataShard::TEvPeriodicTableStats 2025-06-25T14:34:39.232872Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037902 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 5] state 'Ready' dataSize 0 rowCount 0 cpuUsage 0.0037 2025-06-25T14:34:39.232985Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:570: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037902 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 5] raw table stats: DataSize: 0 RowCount: 0 IndexSize: 0 InMemSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 0 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 2025-06-25T14:34:39.233016Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:610: Will delay TTxStoreTableStats on# 0.099994s, queue# 1 2025-06-25T14:34:39.336504Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [3:7519895120661311765:2148]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-06-25T14:34:39.336563Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5131: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-06-25T14:34:39.336589Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046644480, queue size# 1 2025-06-25T14:34:39.336653Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:601: Will execute TTxStoreStats, queue# 1 2025-06-25T14:34:39.336673Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:610: Will delay TTxStoreTableStats on# 0.000000s, queue# 1 2025-06-25T14:34:39.336743Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 5 shard idx 72057594046644480:15 data size 0 row count 0 2025-06-25T14:34:39.336811Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037902 maps to shardIdx: 72057594046644480:15 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 5], pathId map=BatchUpload, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-25T14:34:39.336825Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037902, followerId 0 2025-06-25T14:34:39.336890Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:15 with partCount# 0, rowCount# 0, searchHeight# 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-25T14:34:39.336934Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037902 2025-06-25T14:34:39.337008Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:34:39.337135Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [3:7519895120661311765:2148]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-06-25T14:34:39.337153Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5131: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-06-25T14:34:39.337164Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046644480, queue size# 0 >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-51 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-52 >> TFlatExecutorLeases::BasicsInitialLease [GOOD] >> TFlatExecutorLeases::BasicsInitialLeaseTimeout >> EraseRowsTests::EraseRowsShouldSuccess >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-32 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-33 >> DistributedEraseTests::ConditionalEraseRowsShouldSuccessOnShardedIndex >> DistributedEraseTests::ConditionalEraseRowsShouldEraseOnUint32 >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-DropUser-50 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-DropUser-51 >> TVersions::Wreck2Reverse [GOOD] >> TVersions::Wreck1 >> BuildStatsHistogram::Five_Five_Serial [GOOD] >> BuildStatsHistogram::Five_Five_Crossed >> DstCreator::KeyColumnsSizeMismatch [GOOD] >> DistributedEraseTests::ConditionalEraseRowsShouldErase >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-CreateUser-8 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-CreateUser-9 >> TFlatExecutorLeases::BasicsInitialLeaseTimeout [GOOD] >> DistributedEraseTests::ConditionalEraseRowsShouldNotErase >> TFlatExecutorLeases::BasicsInitialLeaseSleep >> DataShardVolatile::UpsertDependenciesShardsRestart+UseSink [GOOD] >> DataShardVolatile::UpsertDependenciesShardsRestart-UseSink >> EraseRowsTests::ConditionalEraseRowsShouldNotEraseModifiedRows >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-3 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-4 >> DstCreator::CannotFindColumn [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/controller/ut_dst_creator/unittest >> DstCreator::KeyColumnsSizeMismatch [GOOD] Test command err: 2025-06-25T14:34:38.010894Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519895363955230943:2223];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:34:38.011219Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001795/r3tmp/tmpRNgW6t/pdisk_1.dat 2025-06-25T14:34:38.677117Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:34:38.677227Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:34:38.682941Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:34:38.748728Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:34:38.751628Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519895363955230755:2080] 1750862077946664 != 1750862077946667 TClient is connected to server localhost:2695 TServer::EnableGrpc on GrpcPort 18555, node 1 2025-06-25T14:34:38.984618Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:34:39.148733Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:34:39.148757Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:34:39.148768Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:34:39.149884Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2695 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:34:39.835352Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:34:39.872575Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1750862079904 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version... (TRUNCATED) TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1750862079904 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version... (TRUNCATED) 2025-06-25T14:34:39.940714Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-25T14:34:39.940832Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-25T14:34:39.940845Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:87: [DstCreator][rid 1][tid 1] Get table profiles 2025-06-25T14:34:39.941493Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:106: [DstCreator][rid 1][tid 1] Handle NKikimr::NConsole::TEvConfigsDispatcher::TEvGetConfigResponse 2025-06-25T14:34:42.240652Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:158: [DstCreator][rid 1][tid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribeTableResponse { Result: { status: SCHEME_ERROR, issues: } } 2025-06-25T14:34:42.240731Z node 1 :REPLICATION_CONTROLLER ERROR: dst_creator.cpp:594: [DstCreator][rid 1][tid 1] Error: status# StatusSchemeError, reason# Cannot describe table: status: SCHEME_ERROR, issue: test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001795/r3tmp/tmp3zLR2b/pdisk_1.dat 2025-06-25T14:34:43.251997Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:34:43.392403Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519895388451596393:2080] 1750862083063201 != 1750862083063204 2025-06-25T14:34:43.412268Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:34:43.414257Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:34:43.414330Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:34:43.425362Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:8928 TServer::EnableGrpc on GrpcPort 4584, node 2 2025-06-25T14:34:43.800827Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:34:43.800842Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:34:43.800846Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:34:43.800921Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8928 2025-06-25T14:34:44.149504Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:34:44.221805Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:34:44.242926Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:34:44.384977Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1750862084272 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1750862084461 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "Src" PathId: 2 S... (TRUNCATED) TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1750862084272 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1750862084461 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "Src" PathId: 2 S... (TRUNCATED) 2025-06-25T14:34:44.465131Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-25T14:34:44.465288Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-25T14:34:44.465302Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:87: [DstCreator][rid 1][tid 1] Get table profiles 2025-06-25T14:34:44.466045Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:106: [DstCreator][rid 1][tid 1] Handle NKikimr::NConsole::TEvConfigsDispatcher::TEvGetConfigResponse 2025-06-25T14:34:46.706866Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:158: [DstCreator][rid 1][tid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribeTableResponse { Result: { name: Src, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750862084356, tx_id: 281474976715658 } } } 2025-06-25T14:34:46.707208Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:249: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxUserProxy::TEvAllocateTxIdResult 2025-06-25T14:34:46.708742Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:279: [DstCreator][rid 1][tid 1] Handle {TEvModifySchemeTransactionResult Status# StatusAlreadyExists txid# 281474976715660 Reason# Check failed: path: '/Root/Dst', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 3], type: EPathTypeTable, state: EPathStateNoChanges)} 2025-06-25T14:34:46.711605Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:335: [DstCreator][rid 1][tid 1] Handle NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 20 Record# Status: StatusSuccess Path: "/Root/Dst" PathDescription { Self { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1750862084461 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Dst" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnNames: "value" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 1 } } TableSchemaVersion: 1 IsBackup: false ReplicationConfig { Mode: REPLICATION_MODE_READ_ONLY ConsistencyLevel: CONSISTENCY_LEVEL_ROW } IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } UserAttributes { Key: "__async_replica" Value: "true" } } PathId: 3 PathOwnerId: 72057594046644480 2025-06-25T14:34:46.711828Z node 2 :REPLICATION_CONTROLLER ERROR: dst_creator.cpp:594: [DstCreator][rid 1][tid 1] Error: status# StatusSchemeError, reason# Key columns size mismatch: expected: 1, got: 2 >> Viewer::SelectStringWithNoBase64Encoding >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnPgInt8Milliseconds [GOOD] >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnPgTimestamp ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/controller/ut_dst_creator/unittest >> DstCreator::CannotFindColumn [GOOD] Test command err: 2025-06-25T14:34:39.232273Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519895370709473349:2220];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:34:39.249135Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001784/r3tmp/tmp9hvZjb/pdisk_1.dat 2025-06-25T14:34:39.901536Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:34:39.908936Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519895370709473167:2080] 1750862079170832 != 1750862079170835 2025-06-25T14:34:39.919820Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:34:39.919915Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:34:39.922283Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:10555 TServer::EnableGrpc on GrpcPort 25741, node 1 2025-06-25T14:34:40.207167Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:34:40.297627Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:34:40.297660Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:34:40.297678Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:34:40.297811Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10555 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:34:41.004021Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:34:41.040535Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:34:41.048704Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... TClient::Ls request: /Root/Table TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710658 CreateStep: 1750862081185 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" Key... (TRUNCATED) TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1750862081073 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710658 CreateStep: 1750862081185 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: ".sys" PathId: 1844... (TRUNCATED) 2025-06-25T14:34:41.324701Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-25T14:34:41.324854Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-25T14:34:41.324879Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:87: [DstCreator][rid 1][tid 1] Get table profiles 2025-06-25T14:34:41.326239Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:106: [DstCreator][rid 1][tid 1] Handle NKikimr::NConsole::TEvConfigsDispatcher::TEvGetConfigResponse 2025-06-25T14:34:43.378221Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:158: [DstCreator][rid 1][tid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribeTableResponse { Result: { name: Table, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750862081185, tx_id: 281474976710658 } } } 2025-06-25T14:34:43.378550Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:249: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxUserProxy::TEvAllocateTxIdResult 2025-06-25T14:34:43.380217Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:34:43.381027Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:279: [DstCreator][rid 1][tid 1] Handle {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976710659} 2025-06-25T14:34:43.381051Z node 1 :REPLICATION_CONTROLLER DEBUG: dst_creator.cpp:306: [DstCreator][rid 1][tid 1] Subscribe tx: txId# 281474976710659 2025-06-25T14:34:43.420716Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:311: [DstCreator][rid 1][tid 1] Handle NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976710659 2025-06-25T14:34:43.420745Z node 1 :REPLICATION_CONTROLLER INFO: dst_creator.cpp:585: [DstCreator][rid 1][tid 1] Success: dstPathId# [OwnerId: 72057594046644480, LocalPathId: 3] TClient::Ls request: /Root/Replicated TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Replicated" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710659 CreateStep: 1750862083460 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Replicated" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "ke... (TRUNCATED) 2025-06-25T14:34:44.377324Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519895393442293196:2080];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:34:44.381109Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001784/r3tmp/tmpWS4E8b/pdisk_1.dat 2025-06-25T14:34:44.548274Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:34:44.668993Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:34:44.669087Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:34:44.670667Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server lo ... ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:34:45.296496Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:34:45.305379Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:34:45.310546Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:34:45.356267Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:34:45.391625Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1750862085343 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1750862085476 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "Src" PathId: 2 S... (TRUNCATED) TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1750862085343 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1750862085476 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "Src" PathId: 2 S... (TRUNCATED) 2025-06-25T14:34:45.497831Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-25T14:34:45.497943Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-25T14:34:45.497956Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:87: [DstCreator][rid 1][tid 1] Get table profiles 2025-06-25T14:34:45.498508Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:106: [DstCreator][rid 1][tid 1] Handle NKikimr::NConsole::TEvConfigsDispatcher::TEvGetConfigResponse 2025-06-25T14:34:48.094629Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:158: [DstCreator][rid 1][tid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribeTableResponse { Result: { name: Src, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750862085420, tx_id: 281474976715658 } } } 2025-06-25T14:34:48.094966Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:249: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxUserProxy::TEvAllocateTxIdResult 2025-06-25T14:34:48.096476Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:279: [DstCreator][rid 1][tid 1] Handle {TEvModifySchemeTransactionResult Status# StatusAlreadyExists txid# 281474976715660 Reason# Check failed: path: '/Root/Dst', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 3], type: EPathTypeTable, state: EPathStateNoChanges)} 2025-06-25T14:34:48.097512Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:335: [DstCreator][rid 1][tid 1] Handle NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 20 Record# Status: StatusSuccess Path: "/Root/Dst" PathDescription { Self { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1750862085476 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Dst" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value2" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 1 } } TableSchemaVersion: 1 IsBackup: false ReplicationConfig { Mode: REPLICATION_MODE_READ_ONLY ConsistencyLevel: CONSISTENCY_LEVEL_ROW } IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } UserAttributes { Key: "__async_replica" Value: "true" } } PathId: 3 PathOwnerId: 72057594046644480 2025-06-25T14:34:48.097779Z node 2 :REPLICATION_CONTROLLER ERROR: dst_creator.cpp:594: [DstCreator][rid 1][tid 1] Error: status# StatusSchemeError, reason# Cannot find column: name: value >> Viewer::FuzzySearcherLimit3OutOf4 [GOOD] >> Viewer::FuzzySearcherLimit4OutOf4 [GOOD] >> Viewer::FuzzySearcherLongWord [GOOD] >> Viewer::FuzzySearcherPriority [GOOD] >> Viewer::JsonAutocompleteColumns >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnTimestamp64 [GOOD] >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnUint64NanoSeconds [GOOD] >> EraseRowsTests::ConditionalEraseRowsShouldErase [GOOD] >> EraseRowsTests::ConditionalEraseRowsShouldBreakLocks |80.1%| [TA] $(B)/ydb/core/tx/replication/controller/ut_dst_creator/test-results/unittest/{meta.json ... results_accumulator.log} |80.1%| [TA] $(B)/ydb/core/persqueue/ut/ut_with_sdk/test-results/unittest/{meta.json ... results_accumulator.log} >> DataShardVolatile::DistributedUpsertRestartAfterPrepare+UseSink [GOOD] >> DataShardVolatile::DistributedUpsertRestartAfterPrepare-UseSink >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-CreateUser-9 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-CreateUser-10 |80.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_write/ydb-core-tx-datashard-ut_write |80.1%| [TA] {RESULT} $(B)/ydb/core/tx/replication/service/ut_worker/test-results/unittest/{meta.json ... results_accumulator.log} |80.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_write/ydb-core-tx-datashard-ut_write >> TFlatExecutorLeases::BasicsInitialLeaseSleep [GOOD] >> TFlatExecutorLeases::BasicsInitialLeaseSleepTimeout |80.1%| [TA] {RESULT} $(B)/ydb/core/tx/replication/controller/ut_dst_creator/test-results/unittest/{meta.json ... results_accumulator.log} >> Viewer::LevenshteinDistance [GOOD] >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnDyNumberSeconds >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-52 [GOOD] >> Viewer::JsonStorageListingV2 >> EraseRowsTests::EraseRowsShouldSuccess [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-53 >> EraseRowsTests::EraseRowsShouldFailOnVariousErrors >> Viewer::JsonAutocompleteStartOfDatabaseName >> BuildStatsHistogram::Five_Five_Crossed [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-33 [GOOD] >> EraseRowsTests::ConditionalEraseRowsShouldFailOnVariousErrors [GOOD] >> BuildStatsHistogram::Single_Small_2_Levels >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-34 >> BuildStatsHistogram::Single_Small_2_Levels [GOOD] >> Viewer::TabletMerging |80.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest >> BuildStatsHistogram::Single_Small_2_Levels_3_Buckets [GOOD] |80.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/ut/join/ydb-core-kqp-ut-join >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-DropUser-51 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-DropUser-52 >> BuildStatsHistogram::Single_Small_1_Level |80.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/join/ydb-core-kqp-ut-join |80.1%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_write/ydb-core-tx-datashard-ut_write >> BuildStatsHistogram::Single_Small_1_Level [GOOD] >> DistributedEraseTests::ConditionalEraseRowsShouldEraseOnUint32 [GOOD] >> EraseRowsTests::ConditionalEraseRowsShouldNotEraseModifiedRows [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-CreateUser-9 [GOOD] >> DistributedEraseTests::ConditionalEraseRowsShouldFailOnSchemeTx |80.1%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/join/ydb-core-kqp-ut-join >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-CreateUser-15 >> DistributedEraseTests::ConditionalEraseRowsShouldNotErase [GOOD] >> Viewer::PDiskMerging >> DistributedEraseTests::ConditionalEraseRowsShouldSuccessOnShardedIndex [GOOD] >> Viewer::JsonAutocompleteSimilarDatabaseName >> BuildStatsHistogram::Single_Small_0_Levels >> DistributedEraseTests::ConditionalEraseRowsShouldErase [GOOD] >> EraseRowsTests::EraseRowsFromReplicatedTable >> DistributedEraseTests::ConditionalEraseRowsShouldNotEraseModifiedRows >> Viewer::TabletMergingPacked >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-4 [GOOD] >> DistributedEraseTests::ConditionalEraseRowsCheckLimits >> Viewer::PDiskMerging [GOOD] >> DistributedEraseTests::ConditionalEraseRowsShouldFailOnVariousErrors >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-5 >> BuildStatsHistogram::Single_Small_0_Levels [GOOD] >> BuildStatsHistogram::Three_Mixed_Small_2_Levels >> Viewer::SelectStringWithBase64Encoding >> TFlatExecutorLeases::BasicsInitialLeaseSleepTimeout [GOOD] >> BuildStatsHistogram::Three_Mixed_Small_2_Levels [GOOD] >> BuildStatsHistogram::Three_Mixed_Small_2_Levels_3_Buckets >> TFlatTableDatetime::TestDate >> BuildStatsHistogram::Three_Mixed_Small_2_Levels_3_Buckets [GOOD] >> TFlatTableDatetime::TestDate [GOOD] >> TFlatTableExecutor_BackgroundCompactions::TestRunBackgroundSnapshot >> BuildStatsHistogram::Three_Mixed_Small_1_Level >> TFlatTableExecutor_BackgroundCompactions::TestRunBackgroundSnapshot [GOOD] >> BuildStatsHistogram::Three_Mixed_Small_1_Level [GOOD] >> TFlatTableExecutor_BackgroundCompactions::TestChangeBackgroundSnapshotToRegular [GOOD] >> TFlatTableExecutor_BackgroundCompactions::TestRunBackgroundCompactionGen1 >> BuildStatsHistogram::Three_Mixed_Small_0_Levels >> TFlatTableExecutor_BackgroundCompactions::TestRunBackgroundCompactionGen1 [GOOD] >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnPgTimestamp [GOOD] >> TFlatTableExecutor_BackgroundCompactions::TestChangeBackgroundCompactionToRegular >> BuildStatsHistogram::Three_Mixed_Small_0_Levels [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_erase_rows/unittest >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnTimestamp64 [GOOD] >> TFlatTableExecutor_BackgroundCompactions::TestChangeBackgroundCompactionToRegular [GOOD] Test command err: 2025-06-25T14:34:43.008528Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:34:43.008681Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:34:43.008746Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0015ae/r3tmp/tmpSj38k9/pdisk_1.dat 2025-06-25T14:34:43.330143Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T14:34:43.333326Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:34:43.391548Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:34:43.396956Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750862079999783 != 1750862079999787 2025-06-25T14:34:43.446148Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:34:43.446285Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:34:43.458103Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:34:43.556349Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:34:43.610683Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:627:2531] 2025-06-25T14:34:43.610953Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T14:34:43.663824Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T14:34:43.663990Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T14:34:43.665765Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-25T14:34:43.665873Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-25T14:34:43.665941Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-25T14:34:43.666346Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T14:34:43.666532Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T14:34:43.666611Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:643:2531] in generation 1 2025-06-25T14:34:43.677702Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T14:34:43.721648Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-25T14:34:43.721892Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T14:34:43.722012Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:645:2541] 2025-06-25T14:34:43.722049Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T14:34:43.722100Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-25T14:34:43.722141Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:34:43.722680Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-25T14:34:43.722774Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-25T14:34:43.722826Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:34:43.722873Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:34:43.722913Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-25T14:34:43.722949Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:34:43.723323Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:623:2528], serverId# [1:634:2535], sessionId# [0:0:0] 2025-06-25T14:34:43.723503Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T14:34:43.723749Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-25T14:34:43.723845Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-25T14:34:43.725517Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T14:34:43.736934Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-25T14:34:43.737059Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-25T14:34:43.893406Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:660:2550], serverId# [1:662:2552], sessionId# [0:0:0] 2025-06-25T14:34:43.902638Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1000 at tablet 72075186224037888 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1000 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-06-25T14:34:43.902745Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:34:43.903788Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:34:43.903845Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-06-25T14:34:43.903893Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-06-25T14:34:43.904176Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-06-25T14:34:43.906895Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-25T14:34:43.907687Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:34:43.907776Z node 1 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-06-25T14:34:43.910017Z node 1 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-25T14:34:43.910540Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:34:43.912798Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-06-25T14:34:43.912859Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:34:43.913438Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-06-25T14:34:43.913525Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:34:43.914357Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:34:43.914413Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T14:34:43.914510Z node 1 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-25T14:34:43.914588Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [1:373:2367], exec latency: 0 ms, propose latency: 0 ms 2025-06-25T14:34:43.914662Z node 1 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-25T14:34:43.914775Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:34:43.919387Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T14:34:43.921591Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-25T14:34:43.921680Z node 1 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-25T14:34:43.922655Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-25T14:34:43.961347Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:694:2576], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:34:43.961461Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:704:2581], DatabaseId: /Root, PoolId: ... 4:49.734200Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-06-25T14:34:49.734247Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-06-25T14:34:49.734546Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-06-25T14:34:49.734698Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-25T14:34:49.748466Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:34:49.748580Z node 2 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-06-25T14:34:49.749168Z node 2 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-25T14:34:49.749727Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:34:49.760922Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-06-25T14:34:49.761009Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:34:49.761527Z node 2 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-06-25T14:34:49.761609Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:34:49.762830Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T14:34:49.771324Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:34:49.771401Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T14:34:49.771463Z node 2 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-25T14:34:49.771542Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [2:373:2367], exec latency: 0 ms, propose latency: 0 ms 2025-06-25T14:34:49.771605Z node 2 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-25T14:34:49.771717Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:34:49.774136Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-25T14:34:49.774240Z node 2 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-25T14:34:49.784929Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-25T14:34:49.864254Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:694:2576], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:34:49.864382Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:703:2581], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:34:49.864461Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:34:49.882414Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:34:49.897117Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T14:34:49.951691Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:34:50.073459Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T14:34:50.082506Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:708:2584], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:34:50.121656Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:778:2623] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:34:50.396664Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715660. Ctx: { TraceId: 01jykr6kky3ww1e0c79jaq9s0m, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NjI1MTU3OC1hNmY0YjExZi1kMGViMDY1LTMwOWJjMWNi, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:34:50.406237Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [2:809:2640], serverId# [2:810:2641], sessionId# [0:0:0] 2025-06-25T14:34:50.406773Z node 2 :TX_DATASHARD DEBUG: execute_write_unit.cpp:251: Executing write operation for [0:2] at 72075186224037888 2025-06-25T14:34:50.406976Z node 2 :TX_DATASHARD DEBUG: execute_write_unit.cpp:416: Executed write operation for [0:2] at 72075186224037888, row count=5 2025-06-25T14:34:50.418088Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:34:50.446233Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [2:817:2647], serverId# [2:818:2648], sessionId# [0:0:0] 2025-06-25T14:34:50.447414Z node 2 :TX_DATASHARD INFO: datashard__op_rows.cpp:26: TTxDirectBase(48) Execute: at tablet# 72075186224037888 2025-06-25T14:34:50.459653Z node 2 :TX_DATASHARD INFO: datashard__op_rows.cpp:80: TTxDirectBase(48) Complete: at tablet# 72075186224037888 2025-06-25T14:34:50.459759Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:34:50.460059Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037888 2025-06-25T14:34:50.460112Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:4487: Conditional erase complete: cookie: 3, at: 72075186224037888 2025-06-25T14:34:50.460465Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:34:50.460525Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:34:50.460585Z node 2 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-25T14:34:50.460655Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:34:50.460755Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037888, clientId# [2:817:2647], serverId# [2:818:2648], sessionId# [0:0:0] 2025-06-25T14:34:50.461760Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T14:34:50.462157Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-25T14:34:50.462380Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:34:50.462434Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 1 active planned 0 immediate 1 planned 0 2025-06-25T14:34:50.462484Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715661] at 72075186224037888 for WaitForStreamClearance 2025-06-25T14:34:50.462741Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 0 immediate 1 planned 0 2025-06-25T14:34:50.462814Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:34:50.463863Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:514: Got quota for read table scan ShardId: 72075186224037888, TxId: 281474976715661, MessageQuota: 1 2025-06-25T14:34:50.464104Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:662: Send response data ShardId: 72075186224037888, TxId: 281474976715661, Size: 36, Rows: 0, PendingAcks: 1, MessageQuota: 0 2025-06-25T14:34:50.464228Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:483: Got stream data ack ShardId: 72075186224037888, TxId: 281474976715661, PendingAcks: 0 2025-06-25T14:34:50.464283Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:718: Finish scan ShardId: 72075186224037888, TxId: 281474976715661, MessageQuota: 0 2025-06-25T14:34:50.465911Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037888 2025-06-25T14:34:50.465964Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:4477: Found op: cookie: 281474976715661, at: 72075186224037888 2025-06-25T14:34:50.466345Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:34:50.466405Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 1 active planned 0 immediate 1 planned 0 2025-06-25T14:34:50.466444Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715661] at 72075186224037888 for ReadTableScan 2025-06-25T14:34:50.466572Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:34:50.466626Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:34:50.466667Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 >> Viewer::JsonAutocompleteEmpty >> TAsyncIndexTests::CdcAndSplitWithReboots[TabletReboots] >> BuildStatsHistogram::Mixed_Groups_History >> TFlatTableExecutor_BackgroundCompactions::TestRunBackgroundCompactionGen2 >> TFlatTableExecutor_BackgroundCompactions::TestRunBackgroundCompactionGen2 [GOOD] >> BuildStatsHistogram::Mixed_Groups_History [GOOD] >> EraseRowsTests::EraseRowsShouldFailOnVariousErrors [GOOD] >> Viewer::TabletMergingPacked [GOOD] >> BuildStatsHistogram::Serial_Groups_History >> TFlatTableExecutor_BackgroundCompactions::TestChangeBackgroundSnapshotPriorityByTime >> Viewer::VDiskMerging >> BuildStatsHistogram::Serial_Groups_History [GOOD] >> TFlatTableExecutor_BackgroundCompactions::TestChangeBackgroundSnapshotPriorityByTime [GOOD] >> TFlatTableExecutor_BackgroundCompactions::TestChangeBackgroundCompactionPriorityByTime >> BuildStatsHistogram::Benchmark >> Viewer::Cluster10000Tablets >> EraseRowsTests::ConditionalEraseRowsShouldBreakLocks [GOOD] >> TExportToS3Tests::ShouldExcludeBackupTableFromStats [GOOD] >> TFlatTableExecutor_BackgroundCompactions::TestChangeBackgroundCompactionPriorityByTime [GOOD] >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnDyNumberSeconds [GOOD] >> BuildStatsHistogram::Benchmark [GOOD] >> TFlatTableExecutor_BTreeIndex::EnableLocalDBBtreeIndex_Default >> BuildStatsHistogram::Many_Mixed >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnDyNumberMilliSeconds ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_erase_rows/unittest >> EraseRowsTests::ConditionalEraseRowsShouldFailOnVariousErrors [GOOD] Test command err: 2025-06-25T14:34:42.416384Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:34:42.416512Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:34:42.416574Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0015c0/r3tmp/tmp0CycpJ/pdisk_1.dat 2025-06-25T14:34:42.743606Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T14:34:42.746799Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:34:42.789021Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:34:42.794176Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750862079395063 != 1750862079395067 2025-06-25T14:34:42.840758Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:34:42.840911Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:34:42.857051Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:34:42.943164Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:34:42.985723Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:627:2531] 2025-06-25T14:34:42.985979Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T14:34:43.035393Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T14:34:43.035568Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T14:34:43.037275Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-25T14:34:43.037396Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-25T14:34:43.037469Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-25T14:34:43.037885Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T14:34:43.038035Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T14:34:43.038109Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:643:2531] in generation 1 2025-06-25T14:34:43.048846Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T14:34:43.082874Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-25T14:34:43.083067Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T14:34:43.083167Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:645:2541] 2025-06-25T14:34:43.083213Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T14:34:43.083265Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-25T14:34:43.083313Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:34:43.083790Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-25T14:34:43.083889Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-25T14:34:43.083946Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:34:43.083987Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:34:43.084024Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-25T14:34:43.084061Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:34:43.084456Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:623:2528], serverId# [1:634:2535], sessionId# [0:0:0] 2025-06-25T14:34:43.084668Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T14:34:43.084911Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-25T14:34:43.085000Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-25T14:34:43.086661Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T14:34:43.097465Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-25T14:34:43.097597Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-25T14:34:43.254568Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:660:2550], serverId# [1:662:2552], sessionId# [0:0:0] 2025-06-25T14:34:43.262301Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1000 at tablet 72075186224037888 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1000 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-06-25T14:34:43.262395Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:34:43.263404Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:34:43.263462Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-06-25T14:34:43.263527Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-06-25T14:34:43.263807Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-06-25T14:34:43.263964Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-25T14:34:43.264637Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:34:43.264710Z node 1 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-06-25T14:34:43.266651Z node 1 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-25T14:34:43.267114Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:34:43.268457Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-06-25T14:34:43.268511Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:34:43.269029Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-06-25T14:34:43.269118Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:34:43.269857Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:34:43.269899Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T14:34:43.269966Z node 1 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-25T14:34:43.270028Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [1:373:2367], exec latency: 0 ms, propose latency: 0 ms 2025-06-25T14:34:43.270079Z node 1 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-25T14:34:43.270178Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:34:43.274513Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T14:34:43.276557Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-25T14:34:43.276637Z node 1 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-25T14:34:43.277570Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-25T14:34:43.305373Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:694:2576], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:34:43.305521Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:704:2581], DatabaseId: /Root, PoolId: ... main_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037892 2025-06-25T14:34:51.837378Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037893 2025-06-25T14:34:51.837433Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T14:34:51.880304Z node 2 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037894 actor [2:1213:2991] 2025-06-25T14:34:51.880542Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T14:34:51.891169Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T14:34:51.891358Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T14:34:51.893128Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037894 2025-06-25T14:34:51.893210Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037894 2025-06-25T14:34:51.893275Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037894 2025-06-25T14:34:51.893594Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T14:34:51.893733Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T14:34:51.893809Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037894 persisting started state actor id [2:1229:2991] in generation 1 2025-06-25T14:34:51.928930Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T14:34:51.929010Z node 2 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037894 2025-06-25T14:34:51.929119Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037894 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T14:34:51.929227Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037894, actorId: [2:1231:3001] 2025-06-25T14:34:51.929275Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037894 2025-06-25T14:34:51.929307Z node 2 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037894, state: WaitScheme 2025-06-25T14:34:51.929338Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037894 2025-06-25T14:34:51.929695Z node 2 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037894 2025-06-25T14:34:51.929790Z node 2 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037894 2025-06-25T14:34:51.929892Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037894 2025-06-25T14:34:51.929929Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037894 active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:34:51.929966Z node 2 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037894 TxInFly 0 2025-06-25T14:34:51.930004Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037894 2025-06-25T14:34:51.930088Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037894, clientId# [2:1212:2990], serverId# [2:1220:2995], sessionId# [0:0:0] 2025-06-25T14:34:51.930218Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037894 2025-06-25T14:34:51.930440Z node 2 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037894 txId 281474976715663 ssId 72057594046644480 seqNo 2:7 2025-06-25T14:34:51.930527Z node 2 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715663 at tablet 72075186224037894 2025-06-25T14:34:51.931331Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037894 2025-06-25T14:34:51.942256Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037894 2025-06-25T14:34:51.942411Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037894 not sending time cast registration request in state WaitScheme 2025-06-25T14:34:52.080436Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037894, clientId# [2:1237:3007], serverId# [2:1239:3009], sessionId# [0:0:0] 2025-06-25T14:34:52.081093Z node 2 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715663 at step 4000 at tablet 72075186224037894 { Transactions { TxId: 281474976715663 AckTo { RawX1: 0 RawX2: 0 } } Step: 4000 MediatorID: 72057594046382081 TabletID: 72075186224037894 } 2025-06-25T14:34:52.081143Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037894 2025-06-25T14:34:52.098536Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037894 2025-06-25T14:34:52.098635Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037894 active 0 active planned 0 immediate 0 planned 1 2025-06-25T14:34:52.098683Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [4000:281474976715663] in PlanQueue unit at 72075186224037894 2025-06-25T14:34:52.098971Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037894 loaded tx from db 4000:281474976715663 keys extracted: 0 2025-06-25T14:34:52.099125Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037894 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-25T14:34:52.099906Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037894 2025-06-25T14:34:52.099980Z node 2 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037894 tableId# [OwnerId: 72057594046644480, LocalPathId: 8] schema version# 1 2025-06-25T14:34:52.100399Z node 2 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037894 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-25T14:34:52.108041Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037894 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:34:52.113340Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037894 time 3500 2025-06-25T14:34:52.113406Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037894 2025-06-25T14:34:52.128956Z node 2 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037894 step# 4000} 2025-06-25T14:34:52.129050Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037894 2025-06-25T14:34:52.133421Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037894 2025-06-25T14:34:52.133531Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2025-06-25T14:34:52.133770Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037890 2025-06-25T14:34:52.133946Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037891 2025-06-25T14:34:52.134031Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037892 2025-06-25T14:34:52.134088Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037893 2025-06-25T14:34:52.134144Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T14:34:52.134256Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037894 2025-06-25T14:34:52.134297Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037894 2025-06-25T14:34:52.134378Z node 2 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037894 2025-06-25T14:34:52.134441Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [4000 : 281474976715663] from 72075186224037894 at tablet 72075186224037894 send result to client [2:373:2367], exec latency: 0 ms, propose latency: 0 ms 2025-06-25T14:34:52.134492Z node 2 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037894 Sending notify to schemeshard 72057594046644480 txId 281474976715663 state Ready TxInFly 0 2025-06-25T14:34:52.134590Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037894 2025-06-25T14:34:52.135747Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037894 coordinator 72057594046316545 last step 0 next step 4000 2025-06-25T14:34:52.141669Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715663 datashard 72075186224037894 state Ready 2025-06-25T14:34:52.141746Z node 2 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037894 Got TEvSchemaChangedResult from SS at 72075186224037894 2025-06-25T14:34:52.223732Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037894, clientId# [2:1266:3030], serverId# [2:1267:3031], sessionId# [0:0:0] 2025-06-25T14:34:52.223997Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037894, clientId# [2:1266:3030], serverId# [2:1267:3031], sessionId# [0:0:0] 2025-06-25T14:34:52.248866Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037894, clientId# [2:1271:3035], serverId# [2:1272:3036], sessionId# [0:0:0] 2025-06-25T14:34:52.249099Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037894, clientId# [2:1271:3035], serverId# [2:1272:3036], sessionId# [0:0:0] 2025-06-25T14:34:52.277443Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037894, clientId# [2:1276:3040], serverId# [2:1277:3041], sessionId# [0:0:0] 2025-06-25T14:34:52.277739Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037894, clientId# [2:1276:3040], serverId# [2:1277:3041], sessionId# [0:0:0] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-53 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-54 >> TExportToS3Tests::ShouldRestartOnScanErrors >> Viewer::JsonAutocompleteColumns [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-CreateUser-10 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-34 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-CreateUser-11 >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-35 >> RetryPolicy::TWriteSession_SeqNoShift [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_erase_rows/unittest >> EraseRowsTests::ConditionalEraseRowsShouldBreakLocks [GOOD] Test command err: 2025-06-25T14:34:42.910666Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:34:42.910800Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:34:42.910853Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0015bc/r3tmp/tmp8AH588/pdisk_1.dat 2025-06-25T14:34:43.272059Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T14:34:43.275273Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:34:43.340854Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:34:43.354019Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750862079931345 != 1750862079931349 2025-06-25T14:34:43.404386Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:34:43.404556Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:34:43.418694Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:34:43.515808Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:34:43.599407Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:649:2547] 2025-06-25T14:34:43.599654Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T14:34:43.647883Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T14:34:43.648045Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T14:34:43.649648Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-25T14:34:43.649809Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-25T14:34:43.649858Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-25T14:34:43.650207Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T14:34:43.650541Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T14:34:43.650603Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:679:2547] in generation 1 2025-06-25T14:34:43.651938Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037889 actor [1:651:2549] 2025-06-25T14:34:43.652136Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T14:34:43.661391Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T14:34:43.661519Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T14:34:43.662886Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037889 2025-06-25T14:34:43.662959Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037889 2025-06-25T14:34:43.663004Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037889 2025-06-25T14:34:43.663308Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T14:34:43.663568Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T14:34:43.663630Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037889 persisting started state actor id [1:692:2549] in generation 1 2025-06-25T14:34:43.664890Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037890 actor [1:657:2551] 2025-06-25T14:34:43.665071Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T14:34:43.673885Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T14:34:43.673996Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T14:34:43.675265Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037890 2025-06-25T14:34:43.675346Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037890 2025-06-25T14:34:43.675390Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037890 2025-06-25T14:34:43.675720Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T14:34:43.675829Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T14:34:43.675883Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037890 persisting started state actor id [1:700:2551] in generation 1 2025-06-25T14:34:43.687008Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T14:34:43.722582Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-25T14:34:43.722788Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T14:34:43.722897Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:704:2578] 2025-06-25T14:34:43.722940Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T14:34:43.722975Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-25T14:34:43.723063Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:34:43.723400Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T14:34:43.723437Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037889 2025-06-25T14:34:43.723501Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037889 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T14:34:43.723554Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037889, actorId: [1:705:2579] 2025-06-25T14:34:43.723574Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037889 2025-06-25T14:34:43.723595Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037889, state: WaitScheme 2025-06-25T14:34:43.723617Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-25T14:34:43.723959Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T14:34:43.723992Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037890 2025-06-25T14:34:43.724035Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037890 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T14:34:43.724077Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037890, actorId: [1:706:2580] 2025-06-25T14:34:43.724095Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037890 2025-06-25T14:34:43.724113Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037890, state: WaitScheme 2025-06-25T14:34:43.724133Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037890 2025-06-25T14:34:43.724301Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-25T14:34:43.724528Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-25T14:34:43.724721Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:34:43.724766Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:34:43.724808Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-25T14:34:43.724851Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:34:43.724892Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037889 2025-06-25T14:34:43.724944Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037889 2025-06-25T14:34:43.725312Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:638:2542], serverId# [1:664:2555], sessionId# [0:0:0] 2025-06-25T14:34:43.725363Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2025-06-25T14:34:43.725391Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:34:43.725439Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037889 TxInFly 0 2025-06-25T14:34:43.725478Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-06-25T14:34:43.725519Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037890 2025-06-25T14:34:43.725580Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037890 2025-06-25T14:34:43.725712Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T14:34:43.725928Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 720751862240 ... mplete at 72075186224037888 2025-06-25T14:34:56.014745Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:34:56.014790Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T14:34:56.014870Z node 3 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-25T14:34:56.014932Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [3:373:2367], exec latency: 0 ms, propose latency: 0 ms 2025-06-25T14:34:56.014976Z node 3 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-25T14:34:56.015066Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:34:56.020068Z node 3 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T14:34:56.022157Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-25T14:34:56.022230Z node 3 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-25T14:34:56.022630Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-25T14:34:56.058669Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:694:2576], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:34:56.058770Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:704:2581], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:34:56.058868Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:34:56.063722Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:34:56.071905Z node 3 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T14:34:56.121288Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:34:56.249215Z node 3 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T14:34:56.255105Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:708:2584], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:34:56.291843Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:778:2623] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:34:56.400641Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715660. Ctx: { TraceId: 01jykr6sns32dxhrcc8007mp6b, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=Y2RjZmI5OTUtNmU4MWE5NS1kZDU3OGMyYi05NDEwNDg0Zg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:34:56.403554Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [3:809:2640], serverId# [3:810:2641], sessionId# [0:0:0] 2025-06-25T14:34:56.404052Z node 3 :TX_DATASHARD DEBUG: execute_write_unit.cpp:251: Executing write operation for [0:2] at 72075186224037888 2025-06-25T14:34:56.404240Z node 3 :TX_DATASHARD DEBUG: execute_write_unit.cpp:416: Executed write operation for [0:2] at 72075186224037888, row count=3 2025-06-25T14:34:56.417303Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:34:56.647736Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jykr6t1ney9g6bc1esw0d3ac, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=Nzg1YzMyYTItY2U4MTI0YjEtODY0NTRkOGEtZmJhN2ZhYTQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:34:56.650444Z node 3 :TX_DATASHARD DEBUG: datashard__read_iterator.cpp:2427: 72075186224037888 Acquired lock# 281474976715661, counter# 0 for [OwnerId: 72057594046644480, LocalPathId: 2] { items { uint64_value: 0 } } 2025-06-25T14:34:56.701344Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [3:849:2672], serverId# [3:850:2673], sessionId# [0:0:0] 2025-06-25T14:34:56.702593Z node 3 :TX_DATASHARD INFO: datashard__op_rows.cpp:26: TTxDirectBase(48) Execute: at tablet# 72075186224037888 2025-06-25T14:34:56.714100Z node 3 :TX_DATASHARD INFO: datashard__op_rows.cpp:80: TTxDirectBase(48) Complete: at tablet# 72075186224037888 2025-06-25T14:34:56.714215Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:34:56.714308Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:2560: Waiting for PlanStep# 1501 from mediator time cast 2025-06-25T14:34:56.715143Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3780: Notified by mediator time cast with PlanStep# 1501 at tablet 72075186224037888 2025-06-25T14:34:56.715221Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:34:56.715396Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037888 2025-06-25T14:34:56.715449Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4487: Conditional erase complete: cookie: 4, at: 72075186224037888 2025-06-25T14:34:56.715755Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:34:56.715808Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:34:56.715865Z node 3 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-25T14:34:56.715931Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:34:56.716027Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037888, clientId# [3:849:2672], serverId# [3:850:2673], sessionId# [0:0:0] 2025-06-25T14:34:56.790607Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jykr6tacdn6c6b4pk0psxp1g, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=Nzg1YzMyYTItY2U4MTI0YjEtODY0NTRkOGEtZmJhN2ZhYTQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:34:56.793738Z node 3 :TX_DATASHARD DEBUG: execute_write_unit.cpp:251: Executing write operation for [0:6] at 72075186224037888 2025-06-25T14:34:56.793926Z node 3 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_LOCKS_BROKEN;details=Operation is aborting because locks are not valid;tx_id=6; 2025-06-25T14:34:56.804113Z node 3 :TX_DATASHARD INFO: datashard_write_operation.cpp:707: Write transaction 6 at 72075186224037888 has an error: Operation is aborting because locks are not valid 2025-06-25T14:34:56.804450Z node 3 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:226: Prepare transaction failed. txid 6 at tablet 72075186224037888 errors: Status: STATUS_LOCKS_BROKEN Issues: { message: "Operation is aborting because locks are not valid" issue_code: 2001 severity: 1 } 2025-06-25T14:34:56.804709Z node 3 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:168: Errors while proposing transaction txid 6 at tablet 72075186224037888 Status: STATUS_LOCKS_BROKEN Issues: { message: "Operation is aborting because locks are not valid" issue_code: 2001 severity: 1 } 2025-06-25T14:34:56.804812Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:34:56.805110Z node 3 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:802: SelfId: [3:871:2646], Table: `/Root/table-1` ([72057594046644480:2:1]), SessionActorId: [3:816:2646]Got LOCKS BROKEN for table `/Root/table-1`. ShardID=72075186224037888, Sink=[3:871:2646].{
: Error: Operation is aborting because locks are not valid, code: 2001 } 2025-06-25T14:34:56.805706Z node 3 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:3004: SelfId: [3:864:2646], SessionActorId: [3:816:2646], statusCode=ABORTED. Issue=
: Error: Transaction locks invalidated. Table: `/Root/table-1`., code: 2001
: Error: Operation is aborting because locks are not valid, code: 2001 . sessionActorId=[3:816:2646]. isRollback=0 2025-06-25T14:34:56.806117Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:1895: SessionId: ydb://session/3?node_id=3&id=Nzg1YzMyYTItY2U4MTI0YjEtODY0NTRkOGEtZmJhN2ZhYTQ=, ActorId: [3:816:2646], ActorState: ExecuteState, TraceId: 01jykr6tacdn6c6b4pk0psxp1g, got TEvKqpBuffer::TEvError in ExecuteState, status: ABORTED send to: [3:865:2646] from: [3:864:2646] 2025-06-25T14:34:56.806366Z node 3 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [3:865:2646] TxId: 281474976715662. Ctx: { TraceId: 01jykr6tacdn6c6b4pk0psxp1g, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=Nzg1YzMyYTItY2U4MTI0YjEtODY0NTRkOGEtZmJhN2ZhYTQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Transaction locks invalidated. Table: `/Root/table-1`., code: 2001 subissue: {
: Error: Operation is aborting because locks are not valid, code: 2001 } } 2025-06-25T14:34:56.806742Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=3&id=Nzg1YzMyYTItY2U4MTI0YjEtODY0NTRkOGEtZmJhN2ZhYTQ=, ActorId: [3:816:2646], ActorState: ExecuteState, TraceId: 01jykr6tacdn6c6b4pk0psxp1g, Create QueryResponse for error on request, msg: 2025-06-25T14:34:56.807690Z node 3 :TX_DATASHARD DEBUG: execute_write_unit.cpp:251: Executing write operation for [0:7] at 72075186224037888 2025-06-25T14:34:56.807755Z node 3 :TX_DATASHARD DEBUG: execute_write_unit.cpp:420: Skip empty write operation for [0:7] at 72075186224037888 2025-06-25T14:34:56.807949Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 |80.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest >> RetryPolicy::RetryWithBatching ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_erase_rows/unittest >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnPgTimestamp [GOOD] Test command err: 2025-06-25T14:34:42.853565Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:34:42.853683Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:34:42.853744Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0015ce/r3tmp/tmp98zW8S/pdisk_1.dat 2025-06-25T14:34:43.242739Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T14:34:43.245699Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:34:43.293009Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:34:43.298476Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750862079469853 != 1750862079469857 2025-06-25T14:34:43.344233Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:34:43.344399Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:34:43.355719Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:34:43.451949Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:34:43.508452Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:627:2531] 2025-06-25T14:34:43.508724Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T14:34:43.555200Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T14:34:43.555339Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T14:34:43.557048Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-25T14:34:43.557146Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-25T14:34:43.557202Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-25T14:34:43.557576Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T14:34:43.557726Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T14:34:43.557813Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:643:2531] in generation 1 2025-06-25T14:34:43.572826Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T14:34:43.604996Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-25T14:34:43.605188Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T14:34:43.605297Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:645:2541] 2025-06-25T14:34:43.605340Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T14:34:43.605398Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-25T14:34:43.605444Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:34:43.605938Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-25T14:34:43.606043Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-25T14:34:43.606117Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:34:43.606164Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:34:43.606206Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-25T14:34:43.606248Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:34:43.606649Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:623:2528], serverId# [1:634:2535], sessionId# [0:0:0] 2025-06-25T14:34:43.606806Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T14:34:43.607018Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-25T14:34:43.607109Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-25T14:34:43.608839Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T14:34:43.620489Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-25T14:34:43.620580Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-25T14:34:43.780800Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:660:2550], serverId# [1:662:2552], sessionId# [0:0:0] 2025-06-25T14:34:43.786004Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1000 at tablet 72075186224037888 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1000 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-06-25T14:34:43.786084Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:34:43.787041Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:34:43.787093Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-06-25T14:34:43.787139Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-06-25T14:34:43.787383Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-06-25T14:34:43.787549Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-25T14:34:43.788105Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:34:43.788186Z node 1 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-06-25T14:34:43.790161Z node 1 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-25T14:34:43.790589Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:34:43.792148Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-06-25T14:34:43.792197Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:34:43.792734Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-06-25T14:34:43.792803Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:34:43.793407Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:34:43.793450Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T14:34:43.793536Z node 1 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-25T14:34:43.793592Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [1:373:2367], exec latency: 0 ms, propose latency: 0 ms 2025-06-25T14:34:43.793640Z node 1 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-25T14:34:43.793724Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:34:43.798641Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T14:34:43.800394Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-25T14:34:43.800476Z node 1 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-25T14:34:43.801294Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-25T14:34:43.847912Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:694:2576], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:34:43.848075Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:704:2581], DatabaseId: /Root, PoolId: ... .943124Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-06-25T14:34:54.943182Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-06-25T14:34:54.948802Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-06-25T14:34:54.949102Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-25T14:34:54.950148Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:34:54.950242Z node 3 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-06-25T14:34:54.950887Z node 3 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-25T14:34:54.951443Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:34:54.956995Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-06-25T14:34:54.957075Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:34:54.957897Z node 3 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-06-25T14:34:54.957985Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:34:54.959369Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:34:54.959419Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T14:34:54.959474Z node 3 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-25T14:34:54.959556Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [3:373:2367], exec latency: 0 ms, propose latency: 0 ms 2025-06-25T14:34:54.959640Z node 3 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-25T14:34:54.959755Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:34:54.961955Z node 3 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T14:34:54.964108Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-25T14:34:54.964195Z node 3 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-25T14:34:54.964960Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-25T14:34:55.012528Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:694:2576], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:34:55.012640Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:704:2581], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:34:55.012716Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:34:55.018625Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:34:55.026274Z node 3 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T14:34:55.085294Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:34:55.206392Z node 3 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T14:34:55.209947Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:708:2584], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:34:55.247624Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:778:2623] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:34:55.394273Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715660. Ctx: { TraceId: 01jykr6rn291p0regzhc53n39z, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YWJlZDMxOTctMjRlMWRhZGItNGI3MDU2N2QtMmI1NGYxODc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:34:55.399383Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [3:809:2640], serverId# [3:810:2641], sessionId# [0:0:0] 2025-06-25T14:34:55.399777Z node 3 :TX_DATASHARD DEBUG: execute_write_unit.cpp:251: Executing write operation for [0:2] at 72075186224037888 2025-06-25T14:34:55.399940Z node 3 :TX_DATASHARD DEBUG: execute_write_unit.cpp:416: Executed write operation for [0:2] at 72075186224037888, row count=5 2025-06-25T14:34:55.411807Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:34:55.450347Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [3:817:2647], serverId# [3:818:2648], sessionId# [0:0:0] 2025-06-25T14:34:55.451752Z node 3 :TX_DATASHARD INFO: datashard__op_rows.cpp:26: TTxDirectBase(48) Execute: at tablet# 72075186224037888 2025-06-25T14:34:55.463575Z node 3 :TX_DATASHARD INFO: datashard__op_rows.cpp:80: TTxDirectBase(48) Complete: at tablet# 72075186224037888 2025-06-25T14:34:55.463673Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:34:55.463970Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037888 2025-06-25T14:34:55.464031Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4487: Conditional erase complete: cookie: 3, at: 72075186224037888 2025-06-25T14:34:55.464426Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:34:55.464517Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:34:55.464583Z node 3 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-25T14:34:55.464662Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:34:55.464788Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037888, clientId# [3:817:2647], serverId# [3:818:2648], sessionId# [0:0:0] 2025-06-25T14:34:55.465892Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T14:34:55.466272Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-25T14:34:55.466503Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:34:55.470859Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 1 active planned 0 immediate 1 planned 0 2025-06-25T14:34:55.470982Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715661] at 72075186224037888 for WaitForStreamClearance 2025-06-25T14:34:55.471345Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 0 immediate 1 planned 0 2025-06-25T14:34:55.471439Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:34:55.472227Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:514: Got quota for read table scan ShardId: 72075186224037888, TxId: 281474976715661, MessageQuota: 1 2025-06-25T14:34:55.472624Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:662: Send response data ShardId: 72075186224037888, TxId: 281474976715661, Size: 48, Rows: 0, PendingAcks: 1, MessageQuota: 0 2025-06-25T14:34:55.472788Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:483: Got stream data ack ShardId: 72075186224037888, TxId: 281474976715661, PendingAcks: 0 2025-06-25T14:34:55.472850Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:718: Finish scan ShardId: 72075186224037888, TxId: 281474976715661, MessageQuota: 0 2025-06-25T14:34:55.539042Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037888 2025-06-25T14:34:55.539140Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4477: Found op: cookie: 281474976715661, at: 72075186224037888 2025-06-25T14:34:55.539675Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:34:55.539721Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 1 active planned 0 immediate 1 planned 0 2025-06-25T14:34:55.539766Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715661] at 72075186224037888 for ReadTableScan 2025-06-25T14:34:55.539912Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:34:55.539986Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:34:55.540038Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 >> TFlatTableExecutor_BTreeIndex::EnableLocalDBBtreeIndex_Default [GOOD] >> Viewer::VDiskMerging [GOOD] >> EraseRowsTests::EraseRowsFromReplicatedTable [GOOD] >> DistributedEraseTests::ConditionalEraseRowsShouldFailOnVariousErrors [GOOD] >> DistributedEraseTests::ConditionalEraseRowsShouldFailOnSplit >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-DropUser-52 [GOOD] >> Viewer::TenantInfo5kkTablets >> TFlatTableExecutor_BTreeIndex::EnableLocalDBBtreeIndex_True >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-DropUser-53 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/viewer/ut/unittest >> Viewer::JsonAutocompleteColumns [GOOD] Test command err: 2025-06-25T14:34:56.126573Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:121:2167], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:34:56.126841Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:34:56.126985Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-25T14:34:56.444858Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 16135, node 1 TClient is connected to server localhost:9926 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_erase_rows/unittest >> EraseRowsTests::EraseRowsShouldFailOnVariousErrors [GOOD] Test command err: 2025-06-25T14:34:49.727706Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:34:49.727862Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:34:49.727913Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0015a4/r3tmp/tmpgHimdL/pdisk_1.dat 2025-06-25T14:34:50.136486Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T14:34:50.148681Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:34:50.231182Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:34:50.236216Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750862086360292 != 1750862086360296 2025-06-25T14:34:50.288076Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:34:50.288231Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:34:50.301907Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:34:50.402192Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:34:50.478918Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:627:2531] 2025-06-25T14:34:50.479246Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T14:34:50.525226Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T14:34:50.525368Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T14:34:50.527388Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-25T14:34:50.527489Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-25T14:34:50.527542Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-25T14:34:50.527888Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T14:34:50.528027Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T14:34:50.528099Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:643:2531] in generation 1 2025-06-25T14:34:50.540875Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T14:34:50.571652Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-25T14:34:50.571904Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T14:34:50.572068Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:645:2541] 2025-06-25T14:34:50.572113Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T14:34:50.572166Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-25T14:34:50.572214Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:34:50.572820Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-25T14:34:50.572933Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-25T14:34:50.572996Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:34:50.573038Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:34:50.573100Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-25T14:34:50.573164Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:34:50.573593Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:623:2528], serverId# [1:634:2535], sessionId# [0:0:0] 2025-06-25T14:34:50.573757Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T14:34:50.574023Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-25T14:34:50.574136Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-25T14:34:50.576018Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T14:34:50.589508Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-25T14:34:50.589668Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-25T14:34:50.755230Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:660:2550], serverId# [1:662:2552], sessionId# [0:0:0] 2025-06-25T14:34:50.760733Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1000 at tablet 72075186224037888 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1000 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-06-25T14:34:50.760846Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:34:50.761979Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:34:50.762047Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-06-25T14:34:50.762105Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-06-25T14:34:50.762509Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-06-25T14:34:50.762719Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-25T14:34:50.763394Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:34:50.763510Z node 1 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-06-25T14:34:50.765984Z node 1 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-25T14:34:50.766545Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:34:50.769577Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-06-25T14:34:50.769641Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:34:50.770308Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-06-25T14:34:50.770412Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:34:50.771110Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:34:50.771146Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T14:34:50.771199Z node 1 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-25T14:34:50.771259Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [1:373:2367], exec latency: 0 ms, propose latency: 0 ms 2025-06-25T14:34:50.771307Z node 1 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-25T14:34:50.771411Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:34:50.775253Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T14:34:50.776910Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-25T14:34:50.776991Z node 1 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-25T14:34:50.777774Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-25T14:34:50.823557Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:694:2576], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:34:50.823703Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:704:2581], DatabaseId: /Root, PoolId: ... shard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T14:34:56.288207Z node 2 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-25T14:34:56.288259Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:34:56.288708Z node 2 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-25T14:34:56.288809Z node 2 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-25T14:34:56.288928Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:34:56.288970Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:34:56.289009Z node 2 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-25T14:34:56.289050Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:34:56.289427Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [2:624:2529], serverId# [2:634:2535], sessionId# [0:0:0] 2025-06-25T14:34:56.289583Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T14:34:56.289809Z node 2 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-25T14:34:56.289889Z node 2 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-25T14:34:56.291366Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T14:34:56.302653Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-25T14:34:56.302783Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-25T14:34:56.461786Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [2:661:2551], serverId# [2:662:2552], sessionId# [0:0:0] 2025-06-25T14:34:56.463317Z node 2 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1000 at tablet 72075186224037888 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1000 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-06-25T14:34:56.463385Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:34:56.463586Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:34:56.463629Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-06-25T14:34:56.463673Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-06-25T14:34:56.463931Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-06-25T14:34:56.464054Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-25T14:34:56.466568Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:34:56.466665Z node 2 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-06-25T14:34:56.467129Z node 2 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-25T14:34:56.467569Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:34:56.469280Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-06-25T14:34:56.469338Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:34:56.469808Z node 2 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-06-25T14:34:56.469884Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:34:56.474751Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T14:34:56.475195Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:34:56.475252Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T14:34:56.475303Z node 2 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-25T14:34:56.475365Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [2:373:2367], exec latency: 0 ms, propose latency: 0 ms 2025-06-25T14:34:56.475523Z node 2 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-25T14:34:56.475614Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:34:56.477682Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-25T14:34:56.477747Z node 2 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-25T14:34:56.478634Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-25T14:34:56.551451Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [2:696:2578], serverId# [2:697:2579], sessionId# [0:0:0] 2025-06-25T14:34:56.551641Z node 2 :TX_DATASHARD INFO: datashard__op_rows.cpp:26: TTxDirectBase(48) Execute: at tablet# 72075186224037888 2025-06-25T14:34:56.581106Z node 2 :TX_DATASHARD INFO: datashard__op_rows.cpp:80: TTxDirectBase(48) Complete: at tablet# 72075186224037888 2025-06-25T14:34:56.581220Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:34:56.581594Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037888, clientId# [2:696:2578], serverId# [2:697:2579], sessionId# [0:0:0] 2025-06-25T14:34:56.627300Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [2:702:2584], serverId# [2:703:2585], sessionId# [0:0:0] 2025-06-25T14:34:56.627527Z node 2 :TX_DATASHARD INFO: datashard__op_rows.cpp:26: TTxDirectBase(48) Execute: at tablet# 72075186224037888 2025-06-25T14:34:56.627764Z node 2 :TX_DATASHARD INFO: datashard__op_rows.cpp:80: TTxDirectBase(48) Complete: at tablet# 72075186224037888 2025-06-25T14:34:56.627820Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:34:56.628065Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037888, clientId# [2:702:2584], serverId# [2:703:2585], sessionId# [0:0:0] 2025-06-25T14:34:56.681827Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [2:707:2589], serverId# [2:708:2590], sessionId# [0:0:0] 2025-06-25T14:34:56.682046Z node 2 :TX_DATASHARD INFO: datashard__op_rows.cpp:26: TTxDirectBase(48) Execute: at tablet# 72075186224037888 2025-06-25T14:34:56.682289Z node 2 :TX_DATASHARD INFO: datashard__op_rows.cpp:80: TTxDirectBase(48) Complete: at tablet# 72075186224037888 2025-06-25T14:34:56.682379Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:34:56.682624Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037888, clientId# [2:707:2589], serverId# [2:708:2590], sessionId# [0:0:0] 2025-06-25T14:34:56.710717Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [2:712:2594], serverId# [2:713:2595], sessionId# [0:0:0] 2025-06-25T14:34:56.710935Z node 2 :TX_DATASHARD INFO: datashard__op_rows.cpp:26: TTxDirectBase(48) Execute: at tablet# 72075186224037888 2025-06-25T14:34:56.711167Z node 2 :TX_DATASHARD INFO: datashard__op_rows.cpp:80: TTxDirectBase(48) Complete: at tablet# 72075186224037888 2025-06-25T14:34:56.711221Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:34:56.711463Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037888, clientId# [2:712:2594], serverId# [2:713:2595], sessionId# [0:0:0] 2025-06-25T14:34:56.739431Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [2:717:2599], serverId# [2:718:2600], sessionId# [0:0:0] 2025-06-25T14:34:56.739648Z node 2 :TX_DATASHARD INFO: datashard__op_rows.cpp:26: TTxDirectBase(48) Execute: at tablet# 72075186224037888 2025-06-25T14:34:56.740024Z node 2 :TX_DATASHARD INFO: datashard__op_rows.cpp:80: TTxDirectBase(48) Complete: at tablet# 72075186224037888 2025-06-25T14:34:56.740088Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:34:56.740372Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037888, clientId# [2:717:2599], serverId# [2:718:2600], sessionId# [0:0:0] 2025-06-25T14:34:56.768225Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [2:722:2604], serverId# [2:723:2605], sessionId# [0:0:0] 2025-06-25T14:34:56.768472Z node 2 :TX_DATASHARD INFO: datashard__op_rows.cpp:26: TTxDirectBase(48) Execute: at tablet# 72075186224037888 2025-06-25T14:34:56.768723Z node 2 :TX_DATASHARD INFO: datashard__op_rows.cpp:80: TTxDirectBase(48) Complete: at tablet# 72075186224037888 2025-06-25T14:34:56.768778Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:34:56.769020Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037888, clientId# [2:722:2604], serverId# [2:723:2605], sessionId# [0:0:0] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-CreateUser-15 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-CreateUser-16 >> TExportToS3Tests::ShouldRestartOnScanErrors [GOOD] >> DistributedEraseTests::ConditionalEraseRowsShouldFailOnSchemeTx [GOOD] >> DistributedEraseTests::ConditionalEraseRowsShouldFailOnDeadShard >> TFlatTableExecutor_BTreeIndex::EnableLocalDBBtreeIndex_True [GOOD] |80.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/cms/console/ut/ydb-core-cms-console-ut |80.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_kqp_scan/ydb-core-tx-datashard-ut_kqp_scan >> Viewer::JsonAutocompleteStartOfDatabaseName [GOOD] >> KqpSysColV1::StreamSelectRange >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-5 [GOOD] >> DataShardVolatile::UpsertDependenciesShardsRestart-UseSink [GOOD] >> DataShardVolatile::DistributedUpsertRestartAfterPrepare-UseSink [GOOD] >> TPQTest::TestWriteOffsetWithBigMessage [GOOD] >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnDyNumberMilliSeconds [GOOD] >> TExportToS3Tests::ShouldRetryAtFinalStage [GOOD] >> Viewer::JsonAutocompleteSimilarDatabaseName [GOOD] >> DistributedEraseTests::ConditionalEraseRowsCheckLimits [GOOD] >> DistributedEraseTests::ConditionalEraseRowsAsyncIndex >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-54 [GOOD] >> DistributedEraseTests::ConditionalEraseRowsShouldFailOnDeadShard [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-CreateUser-16 [GOOD] >> Viewer::JsonAutocompleteEmpty [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-DropUser-53 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-CreateUser-11 [GOOD] >> DistributedEraseTests::ConditionalEraseRowsShouldFailOnSplit [GOOD] >> DistributedEraseTests::ConditionalEraseRowsShouldNotEraseModifiedRows [GOOD] >> Viewer::JsonStorageListingV1 >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-35 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-6 >> DataShardVolatile::DistributedUpsertRestartAfterPlan >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnDyNumberNanoSeconds |80.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/cms/console/ut/ydb-core-cms-console-ut |80.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_kqp_scan/ydb-core-tx-datashard-ut_kqp_scan |80.2%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_kqp_scan/ydb-core-tx-datashard-ut_kqp_scan |80.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest |80.2%| [LD] {RESULT} $(B)/ydb/core/cms/console/ut/ydb-core-cms-console-ut |80.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest |80.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_erase_rows/unittest >> EraseRowsTests::EraseRowsFromReplicatedTable [GOOD] Test command err: 2025-06-25T14:34:51.399689Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:34:51.399828Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:34:51.399889Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00157e/r3tmp/tmpyYBS8U/pdisk_1.dat 2025-06-25T14:34:52.065686Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T14:34:52.070735Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:34:52.168036Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:34:52.197444Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750862088148976 != 1750862088148980 2025-06-25T14:34:52.252705Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:34:52.252880Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:34:52.264502Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:34:52.355574Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:34:52.400167Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:627:2531] 2025-06-25T14:34:52.400459Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T14:34:52.446367Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T14:34:52.446523Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T14:34:52.448190Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-25T14:34:52.448295Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-25T14:34:52.448451Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-25T14:34:52.448814Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T14:34:52.448988Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T14:34:52.449073Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:643:2531] in generation 1 2025-06-25T14:34:52.460645Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T14:34:52.511164Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-25T14:34:52.511402Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T14:34:52.511520Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:645:2541] 2025-06-25T14:34:52.511555Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T14:34:52.511608Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-25T14:34:52.511718Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:34:52.512287Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-25T14:34:52.512429Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-25T14:34:52.512503Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:34:52.512574Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:34:52.512619Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-25T14:34:52.512684Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:34:52.513069Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:623:2528], serverId# [1:634:2535], sessionId# [0:0:0] 2025-06-25T14:34:52.513221Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T14:34:52.513494Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-25T14:34:52.513593Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-25T14:34:52.515320Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T14:34:52.527019Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-25T14:34:52.527132Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-25T14:34:52.681846Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:660:2550], serverId# [1:662:2552], sessionId# [0:0:0] 2025-06-25T14:34:52.690847Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1000 at tablet 72075186224037888 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1000 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-06-25T14:34:52.690955Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:34:52.691870Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:34:52.691923Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-06-25T14:34:52.691973Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-06-25T14:34:52.692225Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-06-25T14:34:52.692402Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-25T14:34:52.693019Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:34:52.693089Z node 1 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-06-25T14:34:52.707517Z node 1 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-25T14:34:52.708047Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:34:52.709773Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-06-25T14:34:52.709827Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:34:52.710351Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-06-25T14:34:52.710422Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:34:52.711877Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:34:52.711922Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T14:34:52.712220Z node 1 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-25T14:34:52.712284Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [1:373:2367], exec latency: 0 ms, propose latency: 0 ms 2025-06-25T14:34:52.712364Z node 1 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-25T14:34:52.712468Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:34:52.716895Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T14:34:52.718725Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-25T14:34:52.718796Z node 1 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-25T14:34:52.719644Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-25T14:34:52.758942Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:694:2576], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:34:52.759084Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:704:2581], DatabaseId: /Root, PoolId: ... TablesActor] ActorId: [2:265:2309], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:34:58.077363Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:34:58.077505Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00157e/r3tmp/tmpy5Xtmr/pdisk_1.dat 2025-06-25T14:34:58.368549Z node 2 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 2 Type# 268639257 2025-06-25T14:34:58.370126Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:34:58.398022Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:34:58.400094Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:33:2080] 1750862094247251 != 1750862094247255 2025-06-25T14:34:58.454213Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:34:58.454369Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:34:58.465844Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:34:58.554800Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:34:58.585647Z node 2 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [2:627:2531] 2025-06-25T14:34:58.585898Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T14:34:58.641741Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T14:34:58.641892Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T14:34:58.643736Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-25T14:34:58.643828Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-25T14:34:58.643894Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-25T14:34:58.644251Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T14:34:58.644433Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T14:34:58.644516Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [2:643:2531] in generation 1 2025-06-25T14:34:58.658589Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T14:34:58.658673Z node 2 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-25T14:34:58.658780Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T14:34:58.658864Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [2:645:2541] 2025-06-25T14:34:58.658908Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T14:34:58.658955Z node 2 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-25T14:34:58.658994Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:34:58.659362Z node 2 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-25T14:34:58.659455Z node 2 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-25T14:34:58.659661Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:34:58.659712Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:34:58.659757Z node 2 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-25T14:34:58.659806Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:34:58.660182Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [2:624:2529], serverId# [2:634:2535], sessionId# [0:0:0] 2025-06-25T14:34:58.660373Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T14:34:58.660573Z node 2 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-25T14:34:58.660660Z node 2 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-25T14:34:58.662267Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T14:34:58.672975Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-25T14:34:58.673100Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-25T14:34:58.846183Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [2:661:2551], serverId# [2:662:2552], sessionId# [0:0:0] 2025-06-25T14:34:58.847871Z node 2 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1000 at tablet 72075186224037888 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1000 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-06-25T14:34:58.847939Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:34:58.848159Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:34:58.848209Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-06-25T14:34:58.848261Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-06-25T14:34:58.848562Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-06-25T14:34:58.848709Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-25T14:34:58.849090Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:34:58.849160Z node 2 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-06-25T14:34:58.849614Z node 2 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-25T14:34:58.850041Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:34:58.861990Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-06-25T14:34:58.862067Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:34:58.862633Z node 2 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-06-25T14:34:58.862734Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:34:58.863997Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T14:34:58.864613Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:34:58.864665Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T14:34:58.864728Z node 2 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-25T14:34:58.864799Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [2:373:2367], exec latency: 0 ms, propose latency: 0 ms 2025-06-25T14:34:58.864864Z node 2 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-25T14:34:58.864981Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:34:58.867024Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-25T14:34:58.867115Z node 2 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-25T14:34:58.869224Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-25T14:34:58.928165Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [2:696:2578], serverId# [2:697:2579], sessionId# [0:0:0] 2025-06-25T14:34:58.928329Z node 2 :TX_DATASHARD NOTICE: datashard__op_rows.cpp:168: Rejecting erase request on datashard: tablet# 72075186224037888, error# Can't execute erase at replicated table 2025-06-25T14:34:58.928508Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037888, clientId# [2:696:2578], serverId# [2:697:2579], sessionId# [0:0:0] >> Viewer::JsonAutocompleteSimilarDatabaseNameWithLimit >> TFlatTableExecutor_BTreeIndex::EnableLocalDBBtreeIndex_False >> KqpSysColV1::StreamSelectRange [GOOD] >> DataShardVolatile::NotCachingAbortingDeletes+UseSink >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-DropUser-54 >> DistributedEraseTests::ConditionalEraseRowsShouldNotFailOnMissingRows >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-6 [GOOD] >> TPQTest::TestTimeRetention >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnDyNumberNanoSeconds [GOOD] >> Viewer::JsonAutocompleteSimilarDatabaseNameWithLimit [GOOD] >> Viewer::JsonAutocompleteEndOfDatabaseName >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-DropUser-54 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-CreateUser-17 >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-36 >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-55 >> DistributedEraseTests::ConditionalEraseRowsShouldNotFailOnMissingRows [GOOD] >> Viewer::JsonAutocompleteSimilarDatabaseNamePOST >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-7 >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-CreateUser-12 >> TFlatTableExecutor_BTreeIndex::EnableLocalDBBtreeIndex_False [GOOD] >> DataShardVolatile::DistributedUpsertRestartAfterPlan [GOOD] >> Viewer::Cluster10000Tablets [GOOD] >> DistributedEraseTests::ConditionalEraseRowsAsyncIndex [GOOD] >> Viewer::SelectStringWithNoBase64Encoding [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-DropUser-55 >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-7 [GOOD] >> DataShardVolatile::NotCachingAbortingDeletes+UseSink [GOOD] >> Viewer::JsonAutocompleteEndOfDatabaseName [GOOD] >> TPQTest::TestTimeRetention [GOOD] >> Viewer::JsonAutocompleteScheme >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-8 >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-CreateUser-17 [GOOD] >> Viewer::FuzzySearcherLimit1OutOf4 [GOOD] >> DataShardVolatile::CompactedVolatileChangesCommit >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-36 [GOOD] >> TFlatTableExecutor_BTreeIndex::EnableLocalDBBtreeIndex_True_EnableLocalDBFlatIndex_False >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-55 [GOOD] >> Viewer::FuzzySearcherLimit2OutOf4 [GOOD] >> Viewer::ExecuteQueryDoesntExecuteSchemeOperationsInsideTransation >> DataShardVolatile::NotCachingAbortingDeletes-UseSink >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-CreateUser-12 [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_erase_rows/unittest >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnUint64NanoSeconds [GOOD] Test command err: 2025-06-25T14:34:42.446970Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:34:42.447107Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:34:42.447167Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0015d0/r3tmp/tmpndCAsg/pdisk_1.dat 2025-06-25T14:34:42.882946Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T14:34:42.886366Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:34:42.945500Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:34:42.950781Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750862079433180 != 1750862079433184 2025-06-25T14:34:43.005174Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:34:43.005320Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:34:43.017033Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:34:43.118474Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:34:43.190109Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:627:2531] 2025-06-25T14:34:43.190449Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T14:34:43.239477Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T14:34:43.239623Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T14:34:43.241571Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-25T14:34:43.241699Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-25T14:34:43.241762Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-25T14:34:43.242200Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T14:34:43.242367Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T14:34:43.242445Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:643:2531] in generation 1 2025-06-25T14:34:43.255425Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T14:34:43.369314Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-25T14:34:43.369548Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T14:34:43.369689Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:645:2541] 2025-06-25T14:34:43.369753Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T14:34:43.369812Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-25T14:34:43.369868Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:34:43.370408Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-25T14:34:43.370521Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-25T14:34:43.370590Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:34:43.370632Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:34:43.370679Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-25T14:34:43.370721Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:34:43.372235Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:623:2528], serverId# [1:634:2535], sessionId# [0:0:0] 2025-06-25T14:34:43.372451Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T14:34:43.372708Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-25T14:34:43.372819Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-25T14:34:43.374585Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T14:34:43.388946Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-25T14:34:43.389058Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-25T14:34:43.566790Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:660:2550], serverId# [1:662:2552], sessionId# [0:0:0] 2025-06-25T14:34:43.575838Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1000 at tablet 72075186224037888 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1000 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-06-25T14:34:43.575948Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:34:43.586140Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:34:43.586218Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-06-25T14:34:43.586272Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-06-25T14:34:43.586589Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-06-25T14:34:43.586754Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-25T14:34:43.587453Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:34:43.587539Z node 1 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-06-25T14:34:43.589500Z node 1 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-25T14:34:43.589940Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:34:43.591781Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-06-25T14:34:43.591844Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:34:43.599102Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-06-25T14:34:43.599229Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:34:43.600197Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:34:43.600248Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T14:34:43.600334Z node 1 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-25T14:34:43.600409Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [1:373:2367], exec latency: 0 ms, propose latency: 0 ms 2025-06-25T14:34:43.600475Z node 1 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-25T14:34:43.600581Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:34:43.614112Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T14:34:43.617015Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-25T14:34:43.617126Z node 1 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-25T14:34:43.618199Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-25T14:34:43.688785Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:694:2576], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:34:43.688970Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:704:2581], DatabaseId: /Root, PoolId: ... .617111Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-06-25T14:34:49.617166Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-06-25T14:34:49.617436Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-06-25T14:34:49.617581Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-25T14:34:49.618121Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:34:49.618202Z node 2 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-06-25T14:34:49.618697Z node 2 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-25T14:34:49.619157Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:34:49.625285Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-06-25T14:34:49.625362Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:34:49.625875Z node 2 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-06-25T14:34:49.625956Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:34:49.627233Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T14:34:49.627808Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:34:49.627854Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T14:34:49.627906Z node 2 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-25T14:34:49.627974Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [2:373:2367], exec latency: 0 ms, propose latency: 0 ms 2025-06-25T14:34:49.628031Z node 2 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-25T14:34:49.628125Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:34:49.630361Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-25T14:34:49.636653Z node 2 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-25T14:34:49.637932Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-25T14:34:49.726329Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:694:2576], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:34:49.726506Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:703:2581], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:34:49.726602Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:34:49.751628Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:34:49.779028Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T14:34:49.851633Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:34:49.982158Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T14:34:49.985696Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:708:2584], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:34:50.028110Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:778:2623] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:34:50.217074Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715660. Ctx: { TraceId: 01jykr6kfp1s0pt4zd01g211v3, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YTBiODQ5ZTAtZTU4NGU2NWQtMzg0NDlmZGQtN2U5YTlhMjY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:34:50.219648Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [2:809:2640], serverId# [2:810:2641], sessionId# [0:0:0] 2025-06-25T14:34:50.220088Z node 2 :TX_DATASHARD DEBUG: execute_write_unit.cpp:251: Executing write operation for [0:2] at 72075186224037888 2025-06-25T14:34:50.220250Z node 2 :TX_DATASHARD DEBUG: execute_write_unit.cpp:416: Executed write operation for [0:2] at 72075186224037888, row count=4 2025-06-25T14:34:50.241035Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:34:50.303687Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [2:817:2647], serverId# [2:818:2648], sessionId# [0:0:0] 2025-06-25T14:34:50.304822Z node 2 :TX_DATASHARD INFO: datashard__op_rows.cpp:26: TTxDirectBase(48) Execute: at tablet# 72075186224037888 2025-06-25T14:34:50.317028Z node 2 :TX_DATASHARD INFO: datashard__op_rows.cpp:80: TTxDirectBase(48) Complete: at tablet# 72075186224037888 2025-06-25T14:34:50.317124Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:34:50.317383Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037888 2025-06-25T14:34:50.317436Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:4487: Conditional erase complete: cookie: 3, at: 72075186224037888 2025-06-25T14:34:50.317762Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:34:50.317822Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:34:50.317877Z node 2 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-25T14:34:50.317953Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:34:50.318062Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037888, clientId# [2:817:2647], serverId# [2:818:2648], sessionId# [0:0:0] 2025-06-25T14:34:50.319074Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T14:34:50.319463Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-25T14:34:50.319666Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:34:50.319721Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 1 active planned 0 immediate 1 planned 0 2025-06-25T14:34:50.319778Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715661] at 72075186224037888 for WaitForStreamClearance 2025-06-25T14:34:50.320035Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 0 immediate 1 planned 0 2025-06-25T14:34:50.320103Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:34:50.333053Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:514: Got quota for read table scan ShardId: 72075186224037888, TxId: 281474976715661, MessageQuota: 1 2025-06-25T14:34:50.333345Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:662: Send response data ShardId: 72075186224037888, TxId: 281474976715661, Size: 36, Rows: 0, PendingAcks: 1, MessageQuota: 0 2025-06-25T14:34:50.333501Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:483: Got stream data ack ShardId: 72075186224037888, TxId: 281474976715661, PendingAcks: 0 2025-06-25T14:34:50.333559Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:718: Finish scan ShardId: 72075186224037888, TxId: 281474976715661, MessageQuota: 0 2025-06-25T14:34:50.335261Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037888 2025-06-25T14:34:50.335322Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:4477: Found op: cookie: 281474976715661, at: 72075186224037888 2025-06-25T14:34:50.335753Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:34:50.335801Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 1 active planned 0 immediate 1 planned 0 2025-06-25T14:34:50.335845Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715661] at 72075186224037888 for ReadTableScan 2025-06-25T14:34:50.335996Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:34:50.336064Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:34:50.336113Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_erase_rows/unittest >> DistributedEraseTests::ConditionalEraseRowsShouldFailOnDeadShard [GOOD] Test command err: 2025-06-25T14:34:50.291684Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:34:50.291895Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:34:50.291959Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001595/r3tmp/tmpn3abCw/pdisk_1.dat 2025-06-25T14:34:50.669366Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T14:34:50.673679Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:34:50.753864Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:34:50.765043Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750862086902020 != 1750862086902024 2025-06-25T14:34:50.814539Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:34:50.814700Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:34:50.831927Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:34:50.925466Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:34:51.003544Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:649:2547] 2025-06-25T14:34:51.003836Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T14:34:51.066907Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T14:34:51.067184Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T14:34:51.069056Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-25T14:34:51.069151Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-25T14:34:51.069212Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-25T14:34:51.069663Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T14:34:51.070027Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T14:34:51.070092Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:679:2547] in generation 1 2025-06-25T14:34:51.071839Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037889 actor [1:651:2549] 2025-06-25T14:34:51.072054Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T14:34:51.090977Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T14:34:51.091151Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T14:34:51.092734Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037889 2025-06-25T14:34:51.092811Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037889 2025-06-25T14:34:51.092859Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037889 2025-06-25T14:34:51.093187Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T14:34:51.093513Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T14:34:51.093592Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037889 persisting started state actor id [1:692:2549] in generation 1 2025-06-25T14:34:51.095039Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037890 actor [1:657:2551] 2025-06-25T14:34:51.095242Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T14:34:51.104424Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T14:34:51.104548Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T14:34:51.106050Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037890 2025-06-25T14:34:51.106123Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037890 2025-06-25T14:34:51.106166Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037890 2025-06-25T14:34:51.106491Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T14:34:51.106683Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T14:34:51.106753Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037890 persisting started state actor id [1:700:2551] in generation 1 2025-06-25T14:34:51.118463Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T14:34:51.152460Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-25T14:34:51.152733Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T14:34:51.152880Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:704:2578] 2025-06-25T14:34:51.152928Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T14:34:51.152974Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-25T14:34:51.153013Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:34:51.153422Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T14:34:51.153525Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037889 2025-06-25T14:34:51.153607Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037889 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T14:34:51.153675Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037889, actorId: [1:705:2579] 2025-06-25T14:34:51.153701Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037889 2025-06-25T14:34:51.153724Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037889, state: WaitScheme 2025-06-25T14:34:51.153749Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-25T14:34:51.154138Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T14:34:51.154175Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037890 2025-06-25T14:34:51.154224Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037890 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T14:34:51.154284Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037890, actorId: [1:706:2580] 2025-06-25T14:34:51.154321Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037890 2025-06-25T14:34:51.154361Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037890, state: WaitScheme 2025-06-25T14:34:51.154384Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037890 2025-06-25T14:34:51.154637Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-25T14:34:51.154802Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-25T14:34:51.155025Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:34:51.155071Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:34:51.155122Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-25T14:34:51.155167Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:34:51.155240Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037889 2025-06-25T14:34:51.155303Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037889 2025-06-25T14:34:51.155816Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:638:2542], serverId# [1:664:2555], sessionId# [0:0:0] 2025-06-25T14:34:51.155876Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2025-06-25T14:34:51.155904Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:34:51.155964Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037889 TxInFly 0 2025-06-25T14:34:51.156005Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-06-25T14:34:51.156060Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037890 2025-06-25T14:34:51.156120Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037890 2025-06-25T14:34:51.156290Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T14:34:51.156577Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 720751862240 ... d__readset.cpp:91: TTxReadSet::Complete at 72075186224037888 2025-06-25T14:35:05.851224Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-25T14:35:05.851336Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037889 2025-06-25T14:35:05.851408Z node 3 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 72075186224037890 source 72075186224037890 dest 72075186224037888 consumer 72075186224037888 txId 281474976715661 2025-06-25T14:35:05.851490Z node 3 :TX_DATASHARD DEBUG: datashard_distributed_erase.cpp:784: [DistEraser] [3:1050:2788] HandlePropose TEvDataShard::TEvProposeTransactionResult: txId# 281474976715662, shard# 72075186224037888, status# 1 2025-06-25T14:35:05.851595Z node 3 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 72075186224037889 source 72075186224037889 dest 72075186224037888 consumer 72075186224037888 txId 281474976715661 2025-06-25T14:35:05.851639Z node 3 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 72075186224037890 2025-06-25T14:35:05.851667Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037890 2025-06-25T14:35:05.851727Z node 3 :TX_DATASHARD DEBUG: datashard_distributed_erase.cpp:784: [DistEraser] [3:1050:2788] HandlePropose TEvDataShard::TEvProposeTransactionResult: txId# 281474976715662, shard# 72075186224037889, status# 1 2025-06-25T14:35:05.851766Z node 3 :TX_DATASHARD DEBUG: datashard_distributed_erase.cpp:784: [DistEraser] [3:1050:2788] HandlePropose TEvDataShard::TEvProposeTransactionResult: txId# 281474976715662, shard# 72075186224037890, status# 1 2025-06-25T14:35:05.851798Z node 3 :TX_DATASHARD DEBUG: datashard_distributed_erase.cpp:901: [DistEraser] [3:1050:2788] Register plan: txId# 281474976715662, minStep# 1528, maxStep# 31528 2025-06-25T14:35:05.851957Z node 3 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 72075186224037888 source 72075186224037888 dest 72075186224037890 consumer 72075186224037890 txId 281474976715661 2025-06-25T14:35:05.851999Z node 3 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 72075186224037889 source 72075186224037889 dest 72075186224037890 consumer 72075186224037890 txId 281474976715661 2025-06-25T14:35:05.884919Z node 3 :TX_DATASHARD INFO: datashard.cpp:190: OnDetach: 72075186224037888 2025-06-25T14:35:05.885072Z node 3 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186224037888 2025-06-25T14:35:05.887808Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3635: Client pipe to tablet 72075186224037888 from 72075186224037889 is reset 2025-06-25T14:35:05.887896Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3635: Client pipe to tablet 72075186224037888 from 72075186224037890 is reset 2025-06-25T14:35:05.888398Z node 3 :TX_DATASHARD ERROR: datashard_distributed_erase.cpp:167: [DistEraser] [3:1050:2788] Reply: txId# 281474976715662, status# SHARD_UNKNOWN, error# Tx state unknown: reason# lost pipe while waiting for reply (plan), txId# 281474976715662, shard# 72075186224037888 2025-06-25T14:35:05.889157Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037889 2025-06-25T14:35:05.889209Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4487: Conditional erase complete: cookie: 4, at: 72075186224037889 2025-06-25T14:35:05.889335Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037889, clientId# [3:1045:2784], serverId# [3:1046:2785], sessionId# [0:0:0] 2025-06-25T14:35:05.889500Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2025-06-25T14:35:05.889536Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 active 0 active planned 0 immediate 0 planned 1 2025-06-25T14:35:05.889569Z node 3 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037889 TxInFly 1 2025-06-25T14:35:05.889605Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-06-25T14:35:05.908297Z node 3 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [3:1062:2799] 2025-06-25T14:35:05.908594Z node 3 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T14:35:05.912358Z node 3 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T14:35:05.913492Z node 3 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T14:35:05.915540Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-25T14:35:05.915625Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-25T14:35:05.915687Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-25T14:35:05.916098Z node 3 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T14:35:05.916439Z node 3 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T14:35:05.916517Z node 3 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [3:1077:2799] in generation 2 2025-06-25T14:35:05.928174Z node 3 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T14:35:05.928328Z node 3 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state Ready tabletId 72075186224037888 2025-06-25T14:35:05.928450Z node 3 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-25T14:35:05.928760Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [3:1080:2807] 2025-06-25T14:35:05.928809Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T14:35:05.928861Z node 3 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-25T14:35:05.928900Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:35:05.929164Z node 3 :TX_DATASHARD DEBUG: datashard__init.cpp:711: TxInitSchemaDefaults.Execute 2025-06-25T14:35:05.929343Z node 3 :TX_DATASHARD DEBUG: datashard__init.cpp:723: TxInitSchemaDefaults.Complete 2025-06-25T14:35:05.930439Z node 3 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-25T14:35:05.930513Z node 3 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-25T14:35:05.930612Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 1527 2025-06-25T14:35:05.930664Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:35:05.930939Z node 3 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T14:35:05.931061Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:35:05.931110Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-06-25T14:35:05.931151Z node 3 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 1 2025-06-25T14:35:05.931201Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:35:05.931328Z node 3 :TX_DATASHARD DEBUG: datashard__progress_resend_rs.cpp:14: Start TTxProgressResendRS at tablet 72075186224037888 2025-06-25T14:35:05.931401Z node 3 :TX_DATASHARD INFO: datashard.cpp:4101: Resend RS at 72075186224037888 from 72075186224037888 to 72075186224037889 txId 281474976715661 2025-06-25T14:35:05.931451Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3990: Send RS 1 at 72075186224037888 from 72075186224037888 to 72075186224037889 txId 281474976715661 2025-06-25T14:35:05.931647Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3359: Receive RS at 72075186224037889 source 72075186224037888 dest 72075186224037889 producer 72075186224037888 txId 281474976715661 2025-06-25T14:35:05.931755Z node 3 :TX_DATASHARD DEBUG: datashard__readset.cpp:15: TTxReadSet::Execute at 72075186224037889 got read set: {TEvReadSet step# 1527 txid# 281474976715661 TabletSource# 72075186224037888 TabletDest# 72075186224037889 SetTabletProducer# 72075186224037888 ReadSet.Size()# 2 Seqno# 1 Flags# 0} 2025-06-25T14:35:05.931821Z node 3 :TX_DATASHARD NOTICE: datashard_pipeline.cpp:734: Outdated readset for 1527:281474976715661 at 72075186224037889 2025-06-25T14:35:05.931877Z node 3 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 72075186224037889 2025-06-25T14:35:05.931945Z node 3 :TX_DATASHARD DEBUG: datashard__readset.cpp:99: Send RS Ack at 72075186224037889 {TEvReadSet step# 1527 txid# 281474976715661 TabletSource# 72075186224037888 TabletDest# 72075186224037889 SetTabletProducer# 72075186224037888 ReadSet.Size()# 2 Seqno# 1 Flags# 0} 2025-06-25T14:35:05.932024Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 1500 next step 1527 2025-06-25T14:35:05.932122Z node 3 :TX_DATASHARD DEBUG: datashard__progress_resend_rs.cpp:14: Start TTxProgressResendRS at tablet 72075186224037888 2025-06-25T14:35:05.932151Z node 3 :TX_DATASHARD INFO: datashard.cpp:4101: Resend RS at 72075186224037888 from 72075186224037888 to 72075186224037890 txId 281474976715661 2025-06-25T14:35:05.932178Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3990: Send RS 2 at 72075186224037888 from 72075186224037888 to 72075186224037890 txId 281474976715661 2025-06-25T14:35:05.932297Z node 3 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 72075186224037888 source 72075186224037888 dest 72075186224037889 consumer 72075186224037889 txId 281474976715661 2025-06-25T14:35:05.932379Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3359: Receive RS at 72075186224037890 source 72075186224037888 dest 72075186224037890 producer 72075186224037888 txId 281474976715661 2025-06-25T14:35:05.932438Z node 3 :TX_DATASHARD DEBUG: datashard__readset.cpp:15: TTxReadSet::Execute at 72075186224037890 got read set: {TEvReadSet step# 1527 txid# 281474976715661 TabletSource# 72075186224037888 TabletDest# 72075186224037890 SetTabletProducer# 72075186224037888 ReadSet.Size()# 2 Seqno# 2 Flags# 0} 2025-06-25T14:35:05.932475Z node 3 :TX_DATASHARD NOTICE: datashard_pipeline.cpp:734: Outdated readset for 1527:281474976715661 at 72075186224037890 2025-06-25T14:35:05.932511Z node 3 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 72075186224037890 2025-06-25T14:35:05.932549Z node 3 :TX_DATASHARD DEBUG: datashard__readset.cpp:99: Send RS Ack at 72075186224037890 {TEvReadSet step# 1527 txid# 281474976715661 TabletSource# 72075186224037888 TabletDest# 72075186224037890 SetTabletProducer# 72075186224037888 ReadSet.Size()# 2 Seqno# 2 Flags# 0} 2025-06-25T14:35:05.932684Z node 3 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 72075186224037888 source 72075186224037888 dest 72075186224037890 consumer 72075186224037890 txId 281474976715661 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_erase_rows/unittest >> DistributedEraseTests::ConditionalEraseRowsShouldFailOnSplit [GOOD] Test command err: 2025-06-25T14:34:51.024800Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:34:51.024996Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:34:51.025047Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001585/r3tmp/tmpR9723I/pdisk_1.dat 2025-06-25T14:34:51.381080Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T14:34:51.384576Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:34:51.468823Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:34:51.498483Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750862087866427 != 1750862087866431 2025-06-25T14:34:51.554140Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:34:51.554310Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:34:51.569692Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:34:51.664117Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:34:51.741740Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:649:2547] 2025-06-25T14:34:51.742107Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T14:34:51.806328Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T14:34:51.806587Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T14:34:51.808583Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-25T14:34:51.808687Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-25T14:34:51.808746Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-25T14:34:51.809219Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T14:34:51.809598Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T14:34:51.809683Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:679:2547] in generation 1 2025-06-25T14:34:51.811447Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037889 actor [1:651:2549] 2025-06-25T14:34:51.811687Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T14:34:51.833160Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T14:34:51.833320Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T14:34:51.834973Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037889 2025-06-25T14:34:51.835064Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037889 2025-06-25T14:34:51.835121Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037889 2025-06-25T14:34:51.835455Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T14:34:51.835817Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T14:34:51.835891Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037889 persisting started state actor id [1:692:2549] in generation 1 2025-06-25T14:34:51.837591Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037890 actor [1:657:2551] 2025-06-25T14:34:51.837804Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T14:34:51.847423Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T14:34:51.847569Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T14:34:51.849201Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037890 2025-06-25T14:34:51.849285Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037890 2025-06-25T14:34:51.849354Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037890 2025-06-25T14:34:51.849697Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T14:34:51.849847Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T14:34:51.849935Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037890 persisting started state actor id [1:700:2551] in generation 1 2025-06-25T14:34:51.861171Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T14:34:51.934716Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-25T14:34:51.934960Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T14:34:51.935090Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:704:2578] 2025-06-25T14:34:51.935132Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T14:34:51.935170Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-25T14:34:51.935218Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:34:51.935576Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T14:34:51.935679Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037889 2025-06-25T14:34:51.935760Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037889 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T14:34:51.935815Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037889, actorId: [1:705:2579] 2025-06-25T14:34:51.935838Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037889 2025-06-25T14:34:51.935861Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037889, state: WaitScheme 2025-06-25T14:34:51.935883Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-25T14:34:51.936228Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T14:34:51.936261Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037890 2025-06-25T14:34:51.936346Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037890 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T14:34:51.936401Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037890, actorId: [1:706:2580] 2025-06-25T14:34:51.936450Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037890 2025-06-25T14:34:51.936473Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037890, state: WaitScheme 2025-06-25T14:34:51.936495Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037890 2025-06-25T14:34:51.936754Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-25T14:34:51.936932Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-25T14:34:51.937166Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:34:51.937209Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:34:51.937257Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-25T14:34:51.937306Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:34:51.937362Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037889 2025-06-25T14:34:51.937423Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037889 2025-06-25T14:34:51.937883Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:638:2542], serverId# [1:664:2555], sessionId# [0:0:0] 2025-06-25T14:34:51.937955Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2025-06-25T14:34:51.937989Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:34:51.938028Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037889 TxInFly 0 2025-06-25T14:34:51.938067Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-06-25T14:34:51.938111Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037890 2025-06-25T14:34:51.938177Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037890 2025-06-25T14:34:51.938381Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T14:34:51.938641Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 720751862240 ... node 3 :TX_DATASHARD DEBUG: datashard__compaction.cpp:203: CompactionComplete of tablet# 72075186224037889, table# 7, finished edge# 0, ts 1970-01-01T00:00:00.000000Z 2025-06-25T14:35:05.420530Z node 3 :TX_DATASHARD DEBUG: datashard__compaction.cpp:240: ReplyCompactionWaiters of tablet# 72075186224037889, table# 7, finished edge# 0, front# 0 2025-06-25T14:35:05.420899Z node 3 :TX_DATASHARD DEBUG: datashard__compaction.cpp:203: CompactionComplete of tablet# 72075186224037889, table# 8, finished edge# 0, ts 1970-01-01T00:00:00.000000Z 2025-06-25T14:35:05.420926Z node 3 :TX_DATASHARD DEBUG: datashard__compaction.cpp:240: ReplyCompactionWaiters of tablet# 72075186224037889, table# 8, finished edge# 0, front# 0 2025-06-25T14:35:05.423015Z node 3 :TX_DATASHARD DEBUG: datashard__compaction.cpp:203: CompactionComplete of tablet# 72075186224037889, table# 1001, finished edge# 0, ts 1970-01-01T00:00:00.000000Z 2025-06-25T14:35:05.423063Z node 3 :TX_DATASHARD DEBUG: datashard__compaction.cpp:240: ReplyCompactionWaiters of tablet# 72075186224037889, table# 1001, finished edge# 0, front# 0 2025-06-25T14:35:05.423542Z node 3 :TX_DATASHARD DEBUG: datashard_split_src.cpp:256: 72075186224037889 snapshot complete for split OpId 281474976715663 2025-06-25T14:35:05.423738Z node 3 :TX_DATASHARD DEBUG: datashard_split_src.cpp:332: 72075186224037889 BorrowSnapshot: table 3 snapshot size is 12 total snapshot size is 12 for split OpId 281474976715663 2025-06-25T14:35:05.423799Z node 3 :TX_DATASHARD DEBUG: datashard_split_src.cpp:332: 72075186224037889 BorrowSnapshot: table 4 snapshot size is 12 total snapshot size is 24 for split OpId 281474976715663 2025-06-25T14:35:05.423832Z node 3 :TX_DATASHARD DEBUG: datashard_split_src.cpp:332: 72075186224037889 BorrowSnapshot: table 7 snapshot size is 12 total snapshot size is 36 for split OpId 281474976715663 2025-06-25T14:35:05.423897Z node 3 :TX_DATASHARD DEBUG: datashard_split_src.cpp:332: 72075186224037889 BorrowSnapshot: table 8 snapshot size is 12 total snapshot size is 48 for split OpId 281474976715663 2025-06-25T14:35:05.424100Z node 3 :TX_DATASHARD DEBUG: datashard_split_src.cpp:332: 72075186224037889 BorrowSnapshot: table 1001 snapshot size is 146 total snapshot size is 194 for split OpId 281474976715663 2025-06-25T14:35:05.424298Z node 3 :TX_DATASHARD DEBUG: datashard_split_src.cpp:332: 72075186224037889 BorrowSnapshot: table 3 snapshot size is 12 total snapshot size is 206 for split OpId 281474976715663 2025-06-25T14:35:05.424363Z node 3 :TX_DATASHARD DEBUG: datashard_split_src.cpp:332: 72075186224037889 BorrowSnapshot: table 4 snapshot size is 12 total snapshot size is 218 for split OpId 281474976715663 2025-06-25T14:35:05.424392Z node 3 :TX_DATASHARD DEBUG: datashard_split_src.cpp:332: 72075186224037889 BorrowSnapshot: table 7 snapshot size is 12 total snapshot size is 230 for split OpId 281474976715663 2025-06-25T14:35:05.424420Z node 3 :TX_DATASHARD DEBUG: datashard_split_src.cpp:332: 72075186224037889 BorrowSnapshot: table 8 snapshot size is 12 total snapshot size is 242 for split OpId 281474976715663 2025-06-25T14:35:05.424535Z node 3 :TX_DATASHARD DEBUG: datashard_split_src.cpp:332: 72075186224037889 BorrowSnapshot: table 1001 snapshot size is 155 total snapshot size is 397 for split OpId 281474976715663 2025-06-25T14:35:05.425177Z node 3 :TX_DATASHARD DEBUG: datashard_split_src.cpp:424: 72075186224037889 Sending snapshots from src for split OpId 281474976715663 2025-06-25T14:35:05.425366Z node 3 :TX_DATASHARD DEBUG: datashard_impl.h:2342: Sending snapshot for split opId 281474976715663 from datashard 72075186224037889 to datashard 72075186224037892 size 221 2025-06-25T14:35:05.425492Z node 3 :TX_DATASHARD DEBUG: datashard_impl.h:2342: Sending snapshot for split opId 281474976715663 from datashard 72075186224037889 to datashard 72075186224037891 size 215 2025-06-25T14:35:05.425860Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037891, clientId# [3:1154:2864], serverId# [3:1155:2865], sessionId# [0:0:0] 2025-06-25T14:35:05.425903Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037892, clientId# [3:1153:2863], serverId# [3:1156:2866], sessionId# [0:0:0] 2025-06-25T14:35:05.426063Z node 3 :TX_DATASHARD DEBUG: datashard_split_dst.cpp:175: 72075186224037891 Received snapshot for split/merge TxId 281474976715663 from tabeltId 72075186224037889 2025-06-25T14:35:05.426845Z node 3 :TX_DATASHARD DEBUG: datashard_split_dst.cpp:175: 72075186224037892 Received snapshot for split/merge TxId 281474976715663 from tabeltId 72075186224037889 2025-06-25T14:35:05.428614Z node 3 :TX_DATASHARD DEBUG: datashard_split_dst.cpp:304: 72075186224037891 ack snapshot OpId 281474976715663 2025-06-25T14:35:05.428765Z node 3 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state Ready tabletId 72075186224037891 2025-06-25T14:35:05.428876Z node 3 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037891 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-25T14:35:05.428964Z node 3 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186224037891 2025-06-25T14:35:05.429028Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037891, actorId: [3:1159:2869] 2025-06-25T14:35:05.429071Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037891 2025-06-25T14:35:05.429115Z node 3 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037891 2025-06-25T14:35:05.429148Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037891 2025-06-25T14:35:05.429331Z node 3 :TX_DATASHARD DEBUG: datashard_split_src.cpp:461: 72075186224037889 Received snapshot Ack from dst 72075186224037891 for split OpId 281474976715663 2025-06-25T14:35:05.430126Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037891 time 2000 2025-06-25T14:35:05.430227Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037891 2025-06-25T14:35:05.430332Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037891 2025-06-25T14:35:05.430363Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037891 active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:35:05.430396Z node 3 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037891 TxInFly 0 2025-06-25T14:35:05.430430Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037891 2025-06-25T14:35:05.430574Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037891, clientId# [3:1154:2864], serverId# [3:1155:2865], sessionId# [0:0:0] 2025-06-25T14:35:05.430654Z node 3 :TX_DATASHARD DEBUG: datashard_split_dst.cpp:304: 72075186224037892 ack snapshot OpId 281474976715663 2025-06-25T14:35:05.430734Z node 3 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state Ready tabletId 72075186224037892 2025-06-25T14:35:05.430797Z node 3 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037892 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-25T14:35:05.430853Z node 3 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186224037892 2025-06-25T14:35:05.430894Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037892, actorId: [3:1161:2871] 2025-06-25T14:35:05.430918Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037892 2025-06-25T14:35:05.430948Z node 3 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037892 2025-06-25T14:35:05.430972Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037892 2025-06-25T14:35:05.431075Z node 3 :TX_DATASHARD DEBUG: datashard_split_src.cpp:461: 72075186224037889 Received snapshot Ack from dst 72075186224037892 for split OpId 281474976715663 2025-06-25T14:35:05.431830Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037892 time 2000 2025-06-25T14:35:05.431863Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037892 2025-06-25T14:35:05.431944Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037892, clientId# [3:1153:2863], serverId# [3:1156:2866], sessionId# [0:0:0] 2025-06-25T14:35:05.432080Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037891 coordinator 72057594046316545 last step 1500 next step 2000 2025-06-25T14:35:05.432134Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:2812: CheckMediatorStateRestored at 72075186224037891: waitStep# 2000 readStep# 2000 observedStep# 2000 2025-06-25T14:35:05.432222Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037892 2025-06-25T14:35:05.432250Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037892 active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:35:05.432277Z node 3 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037892 TxInFly 0 2025-06-25T14:35:05.432318Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037892 2025-06-25T14:35:05.432518Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037892 coordinator 72057594046316545 last step 1500 next step 2000 2025-06-25T14:35:05.432550Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:2812: CheckMediatorStateRestored at 72075186224037892: waitStep# 2000 readStep# 2000 observedStep# 2000 2025-06-25T14:35:05.454310Z node 3 :TX_DATASHARD DEBUG: datashard_split_src.cpp:485: 72075186224037889 ack split to schemeshard 281474976715663 2025-06-25T14:35:05.461593Z node 3 :TX_DATASHARD DEBUG: datashard_split_src.cpp:565: Got TEvSplitPartitioningChanged: opId: 281474976715663, at datashard: 72075186224037889, state: SplitSrcWaitForPartitioningChanged 2025-06-25T14:35:05.467409Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037889 2025-06-25T14:35:05.467477Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4487: Conditional erase complete: cookie: 4, at: 72075186224037889 2025-06-25T14:35:05.467758Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037889, clientId# [3:1045:2784], serverId# [3:1046:2785], sessionId# [0:0:0] 2025-06-25T14:35:05.467892Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2025-06-25T14:35:05.467937Z node 3 :TX_DATASHARD INFO: datashard__progress_tx.cpp:21: Progress tx at non-ready tablet 72075186224037889 state 5 2025-06-25T14:35:05.468153Z node 3 :TX_DATASHARD DEBUG: datashard_split_src.cpp:532: 72075186224037889 ack split partitioning changed to schemeshard 281474976715663 2025-06-25T14:35:05.468240Z node 3 :TX_DATASHARD DEBUG: datashard_loans.cpp:220: 72075186224037889 in PreOffline state HasSharedBobs: 1 SchemaOperations: [ ] OutReadSets count: 0 ChangesQueue size: 0 ChangeExchangeSplit: 1 siblings to be activated: wait to activation from: 2025-06-25T14:35:05.469266Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-56 >> TPQTest::TestTabletRestoreEventsOrder >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-CreateUser-13 >> TFlatTableExecutor_BTreeIndex::EnableLocalDBBtreeIndex_True_EnableLocalDBFlatIndex_False [GOOD] >> Viewer::ServerlessNodesPage >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-37 >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-CreateUser-18 >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-CreateUser-13 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-CreateUser-14 >> TPQTest::TestTabletRestoreEventsOrder [GOOD] >> TFlatTableExecutor_BTreeIndex::EnableLocalDBBtreeIndex_False_EnableLocalDBFlatIndex_False >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-37 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-38 >> TFlatTableExecutor_BTreeIndex::EnableLocalDBBtreeIndex_False_EnableLocalDBFlatIndex_False [GOOD] >> TFlatTableExecutor_BTreeIndex::EnableLocalDBBtreeIndex_True_TurnOff >> TFlatTableExecutor_BTreeIndex::EnableLocalDBBtreeIndex_True_TurnOff [GOOD] >> TFlatTableExecutor_BTreeIndex::EnableLocalDBBtreeIndex_True_Generations >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-DropUser-55 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-56 [GOOD] >> TFlatTableExecutor_BTreeIndex::EnableLocalDBBtreeIndex_True_Generations [GOOD] >> TFlatTableExecutor_CachePressure::TestNotEnoughLocalCache >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-CreateUser-18 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-57 >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-DropUser-56 >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-CreateUser-19 >> TFlatTableExecutor_CachePressure::TestNotEnoughLocalCache [GOOD] >> TFlatTableExecutor_Cold::ColdBorrowScan >> TFlatTableExecutor_Cold::ColdBorrowScan [GOOD] >> TFlatTableExecutor_ColumnGroups::TestManyRows >> TFlatTableExecutor_ColumnGroups::TestManyRows [GOOD] >> TFlatTableExecutor_CompactionScan::TestCompactionScan >> TFlatTableExecutor_CompactionScan::TestCompactionScan [GOOD] >> TFlatTableExecutor_CompressedSelectRows::TestCompressedSelectRows >> TFlatTableExecutor_CompressedSelectRows::TestCompressedSelectRows [GOOD] >> TFlatTableExecutor_Exceptions::TestTabletExecuteExceptionDirect >> TFlatTableExecutor_Exceptions::TestTabletExecuteExceptionDirect [GOOD] >> TFlatTableExecutorGC::TestGCVectorDeduplicaton [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_erase_rows/unittest >> DistributedEraseTests::ConditionalEraseRowsAsyncIndex [GOOD] Test command err: 2025-06-25T14:34:51.329215Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:34:51.329393Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:34:51.329453Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00158c/r3tmp/tmpX9NfPM/pdisk_1.dat 2025-06-25T14:34:51.705333Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T14:34:51.708855Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:34:51.761458Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:34:51.767433Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750862087713566 != 1750862087713570 2025-06-25T14:34:51.818135Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:34:51.818417Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:34:51.830956Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:34:51.928201Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:34:52.015918Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:649:2547] 2025-06-25T14:34:52.016185Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T14:34:52.055456Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T14:34:52.055817Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T14:34:52.057761Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-25T14:34:52.057851Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-25T14:34:52.057909Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-25T14:34:52.058381Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T14:34:52.058780Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T14:34:52.058855Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:679:2547] in generation 1 2025-06-25T14:34:52.060636Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037889 actor [1:651:2549] 2025-06-25T14:34:52.060897Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T14:34:52.070728Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T14:34:52.070887Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T14:34:52.072773Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037889 2025-06-25T14:34:52.072864Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037889 2025-06-25T14:34:52.072910Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037889 2025-06-25T14:34:52.073250Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T14:34:52.073611Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T14:34:52.073683Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037889 persisting started state actor id [1:692:2549] in generation 1 2025-06-25T14:34:52.075349Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037890 actor [1:657:2551] 2025-06-25T14:34:52.075547Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T14:34:52.085218Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T14:34:52.085352Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T14:34:52.086923Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037890 2025-06-25T14:34:52.087005Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037890 2025-06-25T14:34:52.087055Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037890 2025-06-25T14:34:52.087359Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T14:34:52.087486Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T14:34:52.087561Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037890 persisting started state actor id [1:700:2551] in generation 1 2025-06-25T14:34:52.099324Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T14:34:52.129890Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-25T14:34:52.130150Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T14:34:52.130313Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:704:2578] 2025-06-25T14:34:52.130396Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T14:34:52.130438Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-25T14:34:52.130481Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:34:52.130891Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T14:34:52.130941Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037889 2025-06-25T14:34:52.131085Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037889 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T14:34:52.131153Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037889, actorId: [1:705:2579] 2025-06-25T14:34:52.131180Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037889 2025-06-25T14:34:52.131207Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037889, state: WaitScheme 2025-06-25T14:34:52.131232Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-25T14:34:52.131625Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T14:34:52.131663Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037890 2025-06-25T14:34:52.131713Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037890 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T14:34:52.131768Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037890, actorId: [1:706:2580] 2025-06-25T14:34:52.131803Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037890 2025-06-25T14:34:52.131841Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037890, state: WaitScheme 2025-06-25T14:34:52.131871Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037890 2025-06-25T14:34:52.132105Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-25T14:34:52.132296Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-25T14:34:52.132607Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:34:52.132660Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:34:52.132730Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-25T14:34:52.132784Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:34:52.132839Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037889 2025-06-25T14:34:52.132935Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037889 2025-06-25T14:34:52.133477Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:638:2542], serverId# [1:664:2555], sessionId# [0:0:0] 2025-06-25T14:34:52.133536Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2025-06-25T14:34:52.133572Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:34:52.133637Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037889 TxInFly 0 2025-06-25T14:34:52.133684Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-06-25T14:34:52.133729Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037890 2025-06-25T14:34:52.133790Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037890 2025-06-25T14:34:52.133954Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T14:34:52.134262Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 720751862240 ... datashard.cpp:3990: Send RS 2 at 72075186224037891 from 72075186224037891 to 72075186224037893 txId 281474976715666 2025-06-25T14:35:13.815020Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037891 2025-06-25T14:35:13.815078Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [2500 : 281474976715666] from 72075186224037891 at tablet 72075186224037891 send result to client [3:1372:3004], exec latency: 0 ms, propose latency: 0 ms 2025-06-25T14:35:13.815207Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1170: EnqueueChangeRecords: at tablet: 72075186224037891, records: { Order: 4 PathId: [OwnerId: 72057594046644480, LocalPathId: 14] BodySize: 28 TableId: [OwnerId: 72057594046644480, LocalPathId: 11] SchemaVersion: 1 }, { Order: 5 PathId: [OwnerId: 72057594046644480, LocalPathId: 14] BodySize: 28 TableId: [OwnerId: 72057594046644480, LocalPathId: 11] SchemaVersion: 1 }, { Order: 6 PathId: [OwnerId: 72057594046644480, LocalPathId: 14] BodySize: 28 TableId: [OwnerId: 72057594046644480, LocalPathId: 11] SchemaVersion: 1 } 2025-06-25T14:35:13.815279Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037891 2025-06-25T14:35:13.815728Z node 3 :TX_DATASHARD DEBUG: datashard_distributed_erase.cpp:978: [DistEraser] [3:1372:3004] HandlePlan TEvDataShard::TEvProposeTransactionResult: txId# 281474976715666, shard# 72075186224037891, status# 2 2025-06-25T14:35:13.815916Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3359: Receive RS at 72075186224037893 source 72075186224037891 dest 72075186224037893 producer 72075186224037891 txId 281474976715666 2025-06-25T14:35:13.816036Z node 3 :TX_DATASHARD DEBUG: datashard__readset.cpp:15: TTxReadSet::Execute at 72075186224037893 got read set: {TEvReadSet step# 2500 txid# 281474976715666 TabletSource# 72075186224037891 TabletDest# 72075186224037893 SetTabletProducer# 72075186224037891 ReadSet.Size()# 19 Seqno# 2 Flags# 0} 2025-06-25T14:35:13.816231Z node 3 :TX_DATASHARD INFO: datashard_change_sending.cpp:215: TTxRequestChangeRecords Execute: at tablet# 72075186224037891 2025-06-25T14:35:13.816556Z node 3 :TX_DATASHARD DEBUG: datashard_change_sending.cpp:235: Send 3 change records: to# [3:1179:2893], at tablet# 72075186224037891 2025-06-25T14:35:13.816603Z node 3 :TX_DATASHARD INFO: datashard_change_sending.cpp:260: TTxRequestChangeRecords Complete: sent# 3, forgotten# 0, left# 0, at tablet# 72075186224037891 2025-06-25T14:35:13.816676Z node 3 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037893 step# 2500} 2025-06-25T14:35:13.816718Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037893 2025-06-25T14:35:13.816754Z node 3 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 72075186224037893 2025-06-25T14:35:13.817023Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037893 2025-06-25T14:35:13.817067Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037893 active 1 active planned 1 immediate 0 planned 1 2025-06-25T14:35:13.817103Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [2500:281474976715666] at 72075186224037893 for LoadAndWaitInRS 2025-06-25T14:35:13.817542Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037893 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:35:13.818033Z node 3 :TX_DATASHARD DEBUG: datashard_change_receiving.cpp:470: Handle TEvChangeExchange::TEvApplyRecords: origin# 72075186224037891, generation# 1, at tablet# 72075186224037892 2025-06-25T14:35:13.829433Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037893 2025-06-25T14:35:13.829515Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [2500 : 281474976715666] from 72075186224037893 at tablet 72075186224037893 send result to client [3:1372:3004], exec latency: 0 ms, propose latency: 1 ms 2025-06-25T14:35:13.829601Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:563: Send delayed Ack RS Ack at 72075186224037893 {TEvReadSet step# 2500 txid# 281474976715666 TabletSource# 72075186224037891 TabletDest# 72075186224037893 SetTabletConsumer# 72075186224037893 Flags# 0 Seqno# 2} 2025-06-25T14:35:13.829642Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037893 2025-06-25T14:35:13.829736Z node 3 :TX_DATASHARD DEBUG: datashard_distributed_erase.cpp:978: [DistEraser] [3:1372:3004] HandlePlan TEvDataShard::TEvProposeTransactionResult: txId# 281474976715666, shard# 72075186224037893, status# 2 2025-06-25T14:35:13.829770Z node 3 :TX_DATASHARD DEBUG: datashard_distributed_erase.cpp:165: [DistEraser] [3:1372:3004] Reply: txId# 281474976715666, status# OK, error# 2025-06-25T14:35:13.829907Z node 3 :TX_DATASHARD INFO: datashard_change_sending.cpp:310: TTxRemoveChangeRecords Execute: records# 3, at tablet# 72075186224037891 2025-06-25T14:35:13.829934Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1087: RemoveChangeRecord: order: 4, at tablet: 72075186224037891 2025-06-25T14:35:13.830031Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1087: RemoveChangeRecord: order: 5, at tablet: 72075186224037891 2025-06-25T14:35:13.830052Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1087: RemoveChangeRecord: order: 6, at tablet: 72075186224037891 2025-06-25T14:35:13.830278Z node 3 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 72075186224037891 source 72075186224037891 dest 72075186224037893 consumer 72075186224037893 txId 281474976715666 2025-06-25T14:35:13.830505Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037891 2025-06-25T14:35:13.830552Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4487: Conditional erase complete: cookie: 4, at: 72075186224037891 2025-06-25T14:35:13.830644Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037891, clientId# [3:1366:2999], serverId# [3:1367:3000], sessionId# [0:0:0] 2025-06-25T14:35:13.830789Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037891 2025-06-25T14:35:13.830821Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037891 active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:35:13.830847Z node 3 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037891 TxInFly 0 2025-06-25T14:35:13.831720Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037893 2025-06-25T14:35:13.832002Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037893 2025-06-25T14:35:13.832135Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037893 2025-06-25T14:35:13.832169Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037893 active 1 active planned 0 immediate 1 planned 0 2025-06-25T14:35:13.832229Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715667] at 72075186224037893 for WaitForStreamClearance 2025-06-25T14:35:13.832466Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037893 (dry run) active 1 active planned 0 immediate 1 planned 0 2025-06-25T14:35:13.832561Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037893 2025-06-25T14:35:13.833138Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:514: Got quota for read table scan ShardId: 72075186224037893, TxId: 281474976715667, MessageQuota: 1 2025-06-25T14:35:13.833246Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:718: Finish scan ShardId: 72075186224037893, TxId: 281474976715667, MessageQuota: 1 2025-06-25T14:35:13.834613Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037893 2025-06-25T14:35:13.834662Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4477: Found op: cookie: 281474976715667, at: 72075186224037893 2025-06-25T14:35:13.834866Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037893 2025-06-25T14:35:13.834895Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037893 active 1 active planned 0 immediate 1 planned 0 2025-06-25T14:35:13.834920Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715667] at 72075186224037893 for ReadTableScan 2025-06-25T14:35:13.835028Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037893 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:35:13.835067Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037893 2025-06-25T14:35:13.835113Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037893 2025-06-25T14:35:13.836216Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037892 2025-06-25T14:35:13.836417Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037892 2025-06-25T14:35:13.836512Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037892 2025-06-25T14:35:13.836531Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037892 active 1 active planned 0 immediate 1 planned 0 2025-06-25T14:35:13.836552Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715668] at 72075186224037892 for WaitForStreamClearance 2025-06-25T14:35:13.836668Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037892 (dry run) active 1 active planned 0 immediate 1 planned 0 2025-06-25T14:35:13.836695Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037892 2025-06-25T14:35:13.837055Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:514: Got quota for read table scan ShardId: 72075186224037892, TxId: 281474976715668, MessageQuota: 1 2025-06-25T14:35:13.837140Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:718: Finish scan ShardId: 72075186224037892, TxId: 281474976715668, MessageQuota: 1 2025-06-25T14:35:13.867600Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037892 2025-06-25T14:35:13.867651Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4477: Found op: cookie: 281474976715668, at: 72075186224037892 2025-06-25T14:35:13.867801Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037892 2025-06-25T14:35:13.867834Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037892 active 1 active planned 0 immediate 1 planned 0 2025-06-25T14:35:13.867871Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715668] at 72075186224037892 for ReadTableScan 2025-06-25T14:35:13.867984Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037892 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:35:13.868043Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037892 2025-06-25T14:35:13.868082Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037892 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/unittest >> TPQTest::TestTabletRestoreEventsOrder [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:107:2057] recipient: [1:105:2137] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:107:2057] recipient: [1:105:2137] Leader for TabletID 72057594037927937 is [1:111:2141] sender: [1:112:2057] recipient: [1:105:2137] 2025-06-25T14:31:16.787200Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:31:16.787278Z node 1 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [1:153:2057] recipient: [1:151:2172] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [1:153:2057] recipient: [1:151:2172] Leader for TabletID 72057594037927938 is [1:157:2176] sender: [1:158:2057] recipient: [1:151:2172] Leader for TabletID 72057594037927937 is [1:111:2141] sender: [1:183:2057] recipient: [1:14:2061] 2025-06-25T14:31:16.801256Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037927937] server connected, pipe [1:182:2195], now have 1 active actors on pipe 2025-06-25T14:31:16.801414Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:1470: [PQ: 72057594037927937] Handle TEvPersQueue::TEvUpdateConfig 2025-06-25T14:31:16.816976Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:1656: [PQ: 72057594037927937] Config update version 1(current 0) received from actor [1:181:2194] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 PartitionIds: 2 PartitionIds: 3 TopicName: "rt3.dc1--asdfgs--topic" Version: 1 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } Partitions { PartitionId: 2 } Partitions { PartitionId: 3 } ReadRuleGenerations: 1 ReadRuleGenerations: 1 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } AllPartitions { PartitionId: 2 } AllPartitions { PartitionId: 3 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 1 Important: false } Consumers { Name: "test" Generation: 1 Important: false } 2025-06-25T14:31:16.819952Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:591: [PQ: 72057594037927937] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 PartitionIds: 2 PartitionIds: 3 TopicName: "rt3.dc1--asdfgs--topic" Version: 1 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } Partitions { PartitionId: 2 } Partitions { PartitionId: 3 } ReadRuleGenerations: 1 ReadRuleGenerations: 1 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } AllPartitions { PartitionId: 2 } AllPartitions { PartitionId: 3 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 1 Important: false } Consumers { Name: "test" Generation: 1 Important: false } 2025-06-25T14:31:16.820132Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:31:16.822541Z node 1 :PERSQUEUE INFO: pq_impl.cpp:1497: [PQ: 72057594037927937] Config applied version 1 actor [1:181:2194] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 PartitionIds: 2 PartitionIds: 3 TopicName: "rt3.dc1--asdfgs--topic" Version: 1 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } Partitions { PartitionId: 2 } Partitions { PartitionId: 3 } ReadRuleGenerations: 1 ReadRuleGenerations: 1 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } AllPartitions { PartitionId: 2 } AllPartitions { PartitionId: 3 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 1 Important: false } Consumers { Name: "test" Generation: 1 Important: false } 2025-06-25T14:31:16.822671Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--asdfgs--topic:3:Initializer] Start initializing step TInitConfigStep 2025-06-25T14:31:16.822867Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--asdfgs--topic:0:Initializer] Start initializing step TInitConfigStep 2025-06-25T14:31:16.822928Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--asdfgs--topic:1:Initializer] Start initializing step TInitConfigStep 2025-06-25T14:31:16.822957Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--asdfgs--topic:2:Initializer] Start initializing step TInitConfigStep 2025-06-25T14:31:16.823475Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--asdfgs--topic:3:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-25T14:31:16.823875Z node 1 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 3, State: StateInit] bootstrapping 3 [1:192:2203] 2025-06-25T14:31:16.826730Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--asdfgs--topic:3:Initializer] Initializing completed. 2025-06-25T14:31:16.826807Z node 1 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 3, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 3 generation 2 [1:192:2203] 2025-06-25T14:31:16.826880Z node 1 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037927937, Partition: 3, State: StateInit] SYNC INIT topic rt3.dc1--asdfgs--topic partitition 3 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-25T14:31:16.829085Z node 1 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037927937, Partition: 3, State: StateIdle] Process pending events. Count 0 2025-06-25T14:31:16.829239Z node 1 :PERSQUEUE DEBUG: partition.cpp:3232: [PQ: 72057594037927937, Partition: 3, State: StateIdle] Topic 'rt3.dc1--asdfgs--topic' partition 3 user user reinit request with generation 1 2025-06-25T14:31:16.829295Z node 1 :PERSQUEUE DEBUG: partition.cpp:3302: [PQ: 72057594037927937, Partition: 3, State: StateIdle] Topic 'rt3.dc1--asdfgs--topic' partition 3 user user reinit with generation 1 done 2025-06-25T14:31:16.829347Z node 1 :PERSQUEUE DEBUG: partition.cpp:3232: [PQ: 72057594037927937, Partition: 3, State: StateIdle] Topic 'rt3.dc1--asdfgs--topic' partition 3 user test reinit request with generation 1 2025-06-25T14:31:16.829379Z node 1 :PERSQUEUE DEBUG: partition.cpp:3302: [PQ: 72057594037927937, Partition: 3, State: StateIdle] Topic 'rt3.dc1--asdfgs--topic' partition 3 user test reinit with generation 1 done 2025-06-25T14:31:16.829606Z node 1 :PERSQUEUE DEBUG: partition_read.cpp:882: [PQ: 72057594037927937, Partition: 3, State: StateIdle] Topic 'rt3.dc1--asdfgs--topic' partition 3 user user readTimeStamp for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 0 2025-06-25T14:31:16.829649Z node 1 :PERSQUEUE DEBUG: partition_read.cpp:882: [PQ: 72057594037927937, Partition: 3, State: StateIdle] Topic 'rt3.dc1--asdfgs--topic' partition 3 user test readTimeStamp for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 0 2025-06-25T14:31:16.829814Z node 1 :PERSQUEUE DEBUG: read.h:272: CacheProxy. Passthrough write request to KV 2025-06-25T14:31:16.830078Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 3, State: StateIdle] no data for compaction 2025-06-25T14:31:16.830711Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--asdfgs--topic:0:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-25T14:31:16.830939Z node 1 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [1:189:2200] 2025-06-25T14:31:16.832689Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--asdfgs--topic:0:Initializer] Initializing completed. 2025-06-25T14:31:16.832759Z node 1 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [1:189:2200] 2025-06-25T14:31:16.832813Z node 1 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037927937, Partition: 0, State: StateInit] SYNC INIT topic rt3.dc1--asdfgs--topic partitition 0 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-25T14:31:16.834415Z node 1 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Process pending events. Count 0 2025-06-25T14:31:16.834519Z node 1 :PERSQUEUE DEBUG: partition.cpp:3232: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'rt3.dc1--asdfgs--topic' partition 0 user user reinit request with generation 1 2025-06-25T14:31:16.834561Z node 1 :PERSQUEUE DEBUG: partition.cpp:3302: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'rt3.dc1--asdfgs--topic' partition 0 user user reinit with generation 1 done 2025-06-25T14:31:16.834594Z node 1 :PERSQUEUE DEBUG: partition.cpp:3232: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'rt3.dc1--asdfgs--topic' partition 0 user test reinit request with generation 1 2025-06-25T14:31:16.834612Z node 1 :PERSQUEUE DEBUG: partition.cpp:3302: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'rt3.dc1--asdfgs--topic' partition 0 user test reinit with generation 1 done 2025-06-25T14:31:16.834742Z node 1 :PERSQUEUE DEBUG: partition_read.cpp:882: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'rt3.dc1--asdfgs--topic' partition 0 user user readTimeStamp for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 0 2025-06-25T14:31:16.834790Z node 1 :PERSQUEUE DEBUG: partition_read.cpp:882: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'rt3.dc1--asdfgs--topic' partition 0 user test readTimeStamp for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 0 2025-06-25T14:31:16.834951Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--asdfgs--topic:1:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-25T14:31:16.835191Z node 1 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [1:190:2201] 2025-06-25T14:31:16.836514Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--asdfgs--topic:1:Initializer] Initializing completed. 2025-06-25T14:31:16.836553Z node 1 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 2 [1:190:2201] 2025-06-25T14:31:16.836588Z node 1 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037927937, Partition: 1, State: StateInit] SYNC INIT topic rt3.dc1--asdfgs--topic partitition 1 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-25T14:31:16.838122Z node 1 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037927937, Partition: 1, State: StateIdle] Process pending events. Count 0 2025-06-25T14:31:16.838217Z node 1 :PERSQUEUE DEBUG: partition.cpp:3232: [PQ: 72057594037927937, Partition: 1, State: StateIdle] Topic 'rt3.dc1--asdfgs--topic' partition 1 user user reinit request with generation 1 2025-06-25T14:31:16.838264Z node 1 :PERSQUEUE DEBUG: partition.cpp:3302: [PQ: 72057594037927937, Partition: 1, State: StateIdle] Topic 'rt3.dc1--asdfgs--topic' partition 1 user user r ... tionId: 1 } ReadRuleGenerations: 78 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 78 Important: false } 2025-06-25T14:35:04.126335Z node 79 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [79:189:2200] 2025-06-25T14:35:04.129600Z node 79 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [79:189:2200] 2025-06-25T14:35:04.132114Z node 79 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [79:190:2201] 2025-06-25T14:35:04.134326Z node 79 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 2 [79:190:2201] 2025-06-25T14:35:04.142692Z node 79 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|714a8695-e356c41d-d030eed9-ff6ad3bb_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-25T14:35:04.181117Z node 79 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|eec7a67-133d014c-7df32ce8-7a7e3245_1 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-25T14:35:04.202723Z node 79 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|559d0207-a4d5c7b4-731e865e-230f61a9_2 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-25T14:35:04.220637Z node 79 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:35:04.224929Z node 79 :PERSQUEUE INFO: pq_impl.cpp:1497: [PQ: 72057594037927937] Config applied version 79 actor [79:181:2194] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 1000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 100 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "rt3.dc1--asdfgs--topic" Version: 79 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 78 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 78 Important: false } 2025-06-25T14:35:04.232949Z node 79 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|a24b06db-fcbfeb81-b9ce53bd-3c220efc_3 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-25T14:35:04.267842Z node 79 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|8abc3e47-a854263c-321ac6ee-be38deb2_4 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-25T14:35:04.291014Z node 79 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|8f3f9cec-c31b38-ea4ce338-7d8aa889_5 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default Leader for TabletID 72057594037927937 is [0:0:0] sender: [80:107:2057] recipient: [80:105:2137] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [80:107:2057] recipient: [80:105:2137] Leader for TabletID 72057594037927937 is [80:111:2141] sender: [80:112:2057] recipient: [80:105:2137] 2025-06-25T14:35:04.734373Z node 80 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:35:04.734464Z node 80 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [80:153:2057] recipient: [80:151:2172] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [80:153:2057] recipient: [80:151:2172] Leader for TabletID 72057594037927938 is [80:157:2176] sender: [80:158:2057] recipient: [80:151:2172] Leader for TabletID 72057594037927937 is [80:111:2141] sender: [80:183:2057] recipient: [80:14:2061] 2025-06-25T14:35:04.755415Z node 80 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:35:04.756397Z node 80 :PERSQUEUE INFO: pq_impl.cpp:1497: [PQ: 72057594037927937] Config applied version 80 actor [80:181:2194] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 1000 MaxSizeInPartition: 104857600 LifetimeSeconds: 1000 LowWatermark: 100 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "rt3.dc1--asdfgs--topic" Version: 80 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 80 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 80 Important: false } 2025-06-25T14:35:04.757050Z node 80 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [80:189:2200] 2025-06-25T14:35:04.759805Z node 80 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [80:189:2200] 2025-06-25T14:35:04.761876Z node 80 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [80:190:2201] 2025-06-25T14:35:04.763965Z node 80 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 2 [80:190:2201] 2025-06-25T14:35:04.778961Z node 80 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|94ad9f0a-afd73b4c-3c8916a1-c6c86a94_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-25T14:35:04.794461Z node 80 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|94f3e7fb-9b54188d-9e01ee84-cb0041a9_1 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-25T14:35:04.810654Z node 80 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|e041eab9-45c2454f-54f2806b-ba614563_2 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-25T14:35:04.831366Z node 80 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:35:04.836827Z node 80 :PERSQUEUE INFO: pq_impl.cpp:1497: [PQ: 72057594037927937] Config applied version 81 actor [80:181:2194] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 1000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 100 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "rt3.dc1--asdfgs--topic" Version: 81 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 80 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 80 Important: false } 2025-06-25T14:35:04.844087Z node 80 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|5beb4f83-a893a090-b90f753f-f927c4ab_3 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-25T14:35:04.875677Z node 80 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|612b8dc-5de08dff-b67a6603-77df991c_4 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-25T14:35:04.892766Z node 80 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|3a4d0206-d7fb962e-a3302f83-7fba09ef_5 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-25T14:35:05.497698Z node 81 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:35:05.497787Z node 81 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-25T14:35:05.547280Z node 81 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:35:05.547382Z node 81 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-25T14:35:05.550976Z node 81 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:35:05.552492Z node 81 :PERSQUEUE INFO: pq_impl.cpp:1497: [PQ: 72057594037927937] Config applied version 82 actor [81:179:2192] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 ImportantClientId: "aaa" LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "topic" Version: 82 LocalDC: true Topic: "topic" TopicPath: "/topic" YcCloudId: "somecloud" YcFolderId: "somefolder" YdbDatabaseId: "PQ" YdbDatabasePath: "/Root/PQ" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 82 ReadRuleGenerations: 82 FederationAccount: "federationAccount" MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 82 Important: false } Consumers { Name: "aaa" Generation: 82 Important: true } 2025-06-25T14:35:05.553352Z node 81 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [81:247:2245] 2025-06-25T14:35:05.571085Z node 81 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'topic' partition 0 generation 3 [81:247:2245] 2025-06-25T14:35:05.573528Z node 81 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [81:249:2247] 2025-06-25T14:35:05.574804Z node 81 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'topic' partition 1 generation 3 [81:249:2247] 2025-06-25T14:35:05.608732Z node 81 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:35:05.608809Z node 81 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-25T14:35:05.609743Z node 81 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [81:326:2307] 2025-06-25T14:35:05.611069Z node 81 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [81:328:2309] 2025-06-25T14:35:05.615603Z node 81 :PERSQUEUE INFO: partition_init.cpp:895: [topic:0:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-25T14:35:05.615680Z node 81 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'topic' partition 0 generation 4 [81:326:2307] 2025-06-25T14:35:05.615887Z node 81 :PERSQUEUE INFO: partition_init.cpp:895: [topic:1:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-25T14:35:05.615916Z node 81 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'topic' partition 1 generation 4 [81:328:2309] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_export/unittest >> TExportToS3Tests::ShouldRestartOnScanErrors [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:34:17.881137Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:34:17.881232Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:34:17.881268Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:34:17.881301Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:34:17.881355Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:34:17.881388Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:34:17.881454Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:34:17.881518Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:34:17.882347Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:34:17.882678Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:34:17.966159Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:34:17.966223Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:34:17.983296Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:34:17.983611Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:34:17.983730Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:34:17.996387Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:34:17.996682Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:34:17.997179Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:34:17.997376Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:34:18.000974Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:34:18.001143Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:34:18.002201Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:34:18.002247Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:34:18.002333Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:34:18.002373Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:34:18.002406Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:34:18.002468Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:34:18.008091Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:34:18.128739Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:34:18.128924Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:18.129135Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:34:18.129181Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:34:18.129369Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:34:18.129432Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:34:18.133744Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:34:18.133941Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:34:18.134149Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:18.134231Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:34:18.134269Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:34:18.134301Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:34:18.139584Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:18.139666Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:34:18.139705Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:34:18.146537Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:18.146606Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:18.146647Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:34:18.146698Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:34:18.150155Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:34:18.157592Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:34:18.157818Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:34:18.158789Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:34:18.158967Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:34:18.159031Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:34:18.159290Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:34:18.159340Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:34:18.159488Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:34:18.159554Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:34:18.173401Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:34:18.173467Z node 1 :FLAT_TX_SCHEMESHARD ... or txid 281474976710759:0 128 -> 129 2025-06-25T14:34:59.453197Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 REQUEST: PUT /metadata.json HTTP/1.1 HEADERS: Host: localhost:5918 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 19C2843B-C292-489F-8E69-607CD0CF3C89 amz-sdk-request: attempt=1 content-length: 106 content-md5: heRlZdXBqq/26pCrTLfM5g== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 S3_MOCK::HttpServeWrite: /metadata.json / / 106 2025-06-25T14:34:59.523726Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:34:59.523800Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 281474976710759, path id: [OwnerId: 72057594046678944, LocalPathId: 4] 2025-06-25T14:34:59.524064Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:34:59.524114Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [4:209:2209], at schemeshard: 72057594046678944, txId: 281474976710759, path id: 4 FAKE_COORDINATOR: advance: minStep5000005 State->FrontStep: 5000005 2025-06-25T14:34:59.524685Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 281474976710759:0, at schemeshard: 72057594046678944 2025-06-25T14:34:59.524747Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:258: TBackup TProposedWaitParts, opId: 281474976710759:0 ProgressState, at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 281474976710759 2025-06-25T14:34:59.525674Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 3 PathOwnerId: 72057594046678944, cookie: 281474976710759 2025-06-25T14:34:59.525779Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 3 PathOwnerId: 72057594046678944, cookie: 281474976710759 2025-06-25T14:34:59.525821Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 281474976710759 2025-06-25T14:34:59.525860Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 281474976710759, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 3 2025-06-25T14:34:59.525902Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 4 2025-06-25T14:34:59.525990Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 281474976710759, ready parts: 0/1, is published: true REQUEST: PUT /scheme.pb HTTP/1.1 HEADERS: Host: localhost:5918 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: ED71F55C-2C73-463C-B130-2FB1427D33E4 amz-sdk-request: attempt=1 content-length: 357 content-md5: csvC5nqNTZsSLy4ymlp0/Q== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 S3_MOCK::HttpServeWrite: /scheme.pb / / 357 2025-06-25T14:34:59.542905Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710759 TestWaitNotification wait txId: 102 2025-06-25T14:34:59.543158Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-06-25T14:34:59.543243Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 2025-06-25T14:34:59.554364Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:62: NotifyTxCompletion export in-flight, txId: 102, at schemeshard: 72057594046678944 2025-06-25T14:34:59.554452Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 102, at schemeshard: 72057594046678944 REQUEST: PUT /metadata.json HTTP/1.1 HEADERS: Host: localhost:5918 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: FF84948E-1667-4A65-ABAE-50FF789D12AA amz-sdk-request: attempt=1 content-length: 106 content-md5: heRlZdXBqq/26pCrTLfM5g== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 S3_MOCK::HttpServeWrite: /metadata.json / / 106 REQUEST: PUT /scheme.pb HTTP/1.1 HEADERS: Host: localhost:5918 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 050F783D-685A-4EA1-BD01-FDC1BF248E9B amz-sdk-request: attempt=1 content-length: 357 content-md5: csvC5nqNTZsSLy4ymlp0/Q== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 S3_MOCK::HttpServeWrite: /scheme.pb / / 357 REQUEST: PUT /data_00.csv HTTP/1.1 HEADERS: Host: localhost:5918 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 286E995A-0E2D-4EA1-83BF-FFA526602241 amz-sdk-request: attempt=1 content-length: 11 content-md5: bj4KQf2rit2DOGLxvSlUww== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 S3_MOCK::HttpServeWrite: /data_00.csv / / 11 2025-06-25T14:35:00.319056Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5596: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 450 RawX2: 17179871601 } Origin: 72075186233409547 State: 2 TxId: 281474976710759 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-06-25T14:35:00.319167Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1791: TOperation FindRelatedPartByTabletId, TxId: 281474976710759, tablet: 72075186233409547, partId: 0 2025-06-25T14:35:00.319434Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:632: TTxOperationReply execute, operationId: 281474976710759:0, at schemeshard: 72057594046678944, message: Source { RawX1: 450 RawX2: 17179871601 } Origin: 72075186233409547 State: 2 TxId: 281474976710759 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-06-25T14:35:00.319621Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:233: TBackup TProposedWaitParts, opId: 281474976710759:0 HandleReply TEvSchemaChanged at tablet# 72057594046678944 message# Source { RawX1: 450 RawX2: 17179871601 } Origin: 72075186233409547 State: 2 TxId: 281474976710759 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-06-25T14:35:00.319726Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:670: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 281474976710759:0, shardIdx: 72057594046678944:2, shard: 72075186233409547, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-25T14:35:00.319786Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:707: all shard schema changes has been received, operationId: 281474976710759:0, at schemeshard: 72057594046678944 2025-06-25T14:35:00.319847Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:719: send schema changes ack message, operation: 281474976710759:0, datashard: 72075186233409547, at schemeshard: 72057594046678944 2025-06-25T14:35:00.319910Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 281474976710759:0 129 -> 240 2025-06-25T14:35:00.320133Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:116: Unable to make a bill: kind# TBackup, opId# 281474976710759:0, reason# domain is not a serverless db, domain# /MyRoot, domainPathId# [OwnerId: 72057594046678944, LocalPathId: 1], IsDomainSchemeShard: 1, ParentDomainId: [OwnerId: 72057594046678944, LocalPathId: 1], ResourcesDomainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:35:00.333523Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 281474976710759:0, at schemeshard: 72057594046678944 2025-06-25T14:35:00.334046Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 281474976710759:0, at schemeshard: 72057594046678944 2025-06-25T14:35:00.334124Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 281474976710759:0 ProgressState 2025-06-25T14:35:00.334327Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976710759:0 progress is 1/1 2025-06-25T14:35:00.334381Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976710759 ready parts: 1/1 2025-06-25T14:35:00.334476Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976710759:0 progress is 1/1 2025-06-25T14:35:00.334534Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976710759 ready parts: 1/1 2025-06-25T14:35:00.334588Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 281474976710759, ready parts: 1/1, is published: true 2025-06-25T14:35:00.334699Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1656: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [4:126:2150] message: TxId: 281474976710759 2025-06-25T14:35:00.334771Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976710759 ready parts: 1/1 2025-06-25T14:35:00.334825Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976710759:0 2025-06-25T14:35:00.334869Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 281474976710759:0 2025-06-25T14:35:00.335022Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 2025-06-25T14:35:00.343363Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6830: Handle: TEvNotifyTxCompletionResult: txId# 281474976710759 2025-06-25T14:35:00.343511Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6832: Message: TxId: 281474976710759 2025-06-25T14:35:00.350969Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-25T14:35:00.351059Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [4:594:2548] TestWaitNotification: OK eventTxId 102 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tablet_flat/ut/unittest >> TFlatTableExecutorGC::TestGCVectorDeduplicaton [GOOD] Test command err: 00000.000 II| FAKE_ENV: Born at 2025-06-25T14:33:26.465792Z 00000.010 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.010 II| FAKE_ENV: TNanny initiates TDummy tablet 72057594037927937 birth 00000.011 II| FAKE_ENV: Starting storage for BS group 0 00000.011 II| FAKE_ENV: Starting storage for BS group 1 00000.011 II| FAKE_ENV: Starting storage for BS group 2 00000.011 II| FAKE_ENV: Starting storage for BS group 3 00000.016 II| TABLET_EXECUTOR: Leader{1:2:0} activating executor 00000.016 II| TABLET_EXECUTOR: LSnap{1:2, on 2:1, 35b, wait} done, Waste{2:0, 0b +(0, 0b), 0 trc} 00000.154 II| TABLET_EXECUTOR: LSnap{1:2, on 2:301, 5624b, wait} done, Waste{2:0, 535572b +(0, 0b), 300 trc} 00000.179 II| TABLET_EXECUTOR: Leader{1:2:348} starting compaction 00000.179 II| TABLET_EXECUTOR: Leader{1:2:349} starting Scan{1 on 3, Compact{1.2.348, eph 1}} 00000.179 II| TABLET_EXECUTOR: Leader{1:2:349} started compaction 1 00000.179 II| TABLET_OPS_HOST: Scan{1 on 3, Compact{1.2.348, eph 1}} begin on TSubset{head 2, 1m 0p 0c} 00000.182 II| TABLET_OPS_HOST: Scan{1 on 3, Compact{1.2.348, eph 1}} end=Done, 105r seen, TFwd{fetch=0B,saved=0B,usage=0B,after=0B,before=0B}, bio Spent{time=0.000s,wait=0.000s,interrupts=1}, trace 10 of 12 ~1p 00000.182 II| OPS_COMPACT: Compact{1.2.348, eph 1} end=Done, 6 blobs 75r (max 105), put Spent{time=0.000s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 10 +2, (174832 33478 112265)b }, ecr=1.000 00000.206 II| TABLET_EXECUTOR: Leader{1:2:350} Compact 1 on TGenCompactionParams{3: gen 0 epoch +inf, 0 parts} step 348, product {1 parts epoch 2} done 00000.213 II| TABLET_EXECUTOR: Leader{1:2:361} starting compaction 00000.213 II| TABLET_EXECUTOR: Leader{1:2:362} starting Scan{3 on 2, Compact{1.2.361, eph 1}} 00000.213 II| TABLET_EXECUTOR: Leader{1:2:362} started compaction 3 00000.213 II| TABLET_OPS_HOST: Scan{3 on 2, Compact{1.2.361, eph 1}} begin on TSubset{head 2, 1m 0p 0c} 00000.216 II| TABLET_OPS_HOST: Scan{3 on 2, Compact{1.2.361, eph 1}} end=Done, 106r seen, TFwd{fetch=0B,saved=0B,usage=0B,after=0B,before=0B}, bio Spent{time=0.000s,wait=0.000s,interrupts=1}, trace 15 of 19 ~1p 00000.216 II| OPS_COMPACT: Compact{1.2.361, eph 1} end=Done, 11 blobs 82r (max 106), put Spent{time=0.000s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 15 +7, (131116 20509 204420)b }, ecr=1.000 00000.218 II| TABLET_EXECUTOR: Leader{1:2:363} Compact 3 on TGenCompactionParams{2: gen 0 epoch +inf, 0 parts} step 361, product {1 parts epoch 2} done 00000.466 II| TABLET_EXECUTOR: LSnap{1:2, on 2:601, 8460b, wait} done, Waste{2:0, 1381058b +(147, 69965b), 300 trc} 00000.491 II| TABLET_EXECUTOR: Leader{1:2:629} starting compaction 00000.491 II| TABLET_EXECUTOR: Leader{1:2:630} starting Scan{5 on 3, Compact{1.2.629, eph 2}} 00000.491 II| TABLET_EXECUTOR: Leader{1:2:630} started compaction 5 00000.491 II| TABLET_OPS_HOST: Scan{5 on 3, Compact{1.2.629, eph 2}} begin on TSubset{head 3, 1m 0p 0c} 00000.494 II| TABLET_OPS_HOST: Scan{5 on 3, Compact{1.2.629, eph 2}} end=Done, 93r seen, TFwd{fetch=0B,saved=0B,usage=0B,after=0B,before=0B}, bio Spent{time=0.000s,wait=0.000s,interrupts=1}, trace 12 of 18 ~1p 00000.494 II| OPS_COMPACT: Compact{1.2.629, eph 2} end=Done, 7 blobs 93r (max 93), put Spent{time=0.000s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 12 +3, (129866 33172 139815)b }, ecr=1.000 00000.520 II| TABLET_EXECUTOR: Leader{1:2:630} Compact 5 on TGenCompactionParams{3: gen 0 epoch +inf, 0 parts} step 629, product {1 parts epoch 3} done 00000.522 II| TABLET_EXECUTOR: Leader{1:2:631} starting compaction 00000.522 II| TABLET_EXECUTOR: Leader{1:2:632} starting Scan{7 on 3, Compact{1.2.631, eph 2}} 00000.522 II| TABLET_EXECUTOR: Leader{1:2:632} started compaction 7 00000.522 II| TABLET_OPS_HOST: Scan{7 on 3, Compact{1.2.631, eph 2}} begin on TSubset{head 0, 0m 2p 0c} 00000.530 II| TABLET_OPS_HOST: Scan{7 on 3, Compact{1.2.631, eph 2}} end=Done, 138r seen, TFwd{fetch=297KiB,saved=297KiB,usage=290KiB,after=6.81KiB,before=0B}, bio Spent{time=0.000s,wait=0.000s,interrupts=6}, trace 22 of 27 ~3p 00000.530 II| OPS_COMPACT: Compact{1.2.631, eph 2} end=Done, 4 blobs 109r (max 168), put Spent{time=0.000s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 22 +0, (223945 59675 209233)b }, ecr=1.000 00000.533 II| TABLET_EXECUTOR: Leader{1:2:634} Compact 7 on TGenCompactionParams{3: gen 1 epoch 0, 2 parts} step 631, product {1 parts epoch 0} done 00000.632 II| TABLET_EXECUTOR: Leader{1:2:731} starting compaction 00000.632 II| TABLET_EXECUTOR: Leader{1:2:732} starting Scan{9 on 2, Compact{1.2.731, eph 2}} 00000.632 II| TABLET_EXECUTOR: Leader{1:2:732} started compaction 9 00000.632 II| TABLET_OPS_HOST: Scan{9 on 2, Compact{1.2.731, eph 2}} begin on TSubset{head 3, 1m 0p 0c} 00000.634 II| TABLET_OPS_HOST: Scan{9 on 2, Compact{1.2.731, eph 2}} end=Done, 105r seen, TFwd{fetch=0B,saved=0B,usage=0B,after=0B,before=0B}, bio Spent{time=0.000s,wait=0.000s,interrupts=1}, trace 9 of 12 ~1p 00000.635 II| OPS_COMPACT: Compact{1.2.731, eph 2} end=Done, 5 blobs 105r (max 105), put Spent{time=0.000s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 9 +1, (183265 13341 88097)b }, ecr=1.000 00000.659 II| TABLET_EXECUTOR: Leader{1:2:733} Compact 9 on TGenCompactionParams{2: gen 0 epoch +inf, 0 parts} step 731, product {1 parts epoch 3} done 00000.660 II| TABLET_EXECUTOR: Leader{1:2:734} starting compaction 00000.660 II| TABLET_EXECUTOR: Leader{1:2:735} starting Scan{11 on 2, Compact{1.2.734, eph 2}} 00000.660 II| TABLET_EXECUTOR: Leader{1:2:735} started compaction 11 00000.660 II| TABLET_OPS_HOST: Scan{11 on 2, Compact{1.2.734, eph 2}} begin on TSubset{head 0, 0m 2p 0c} 00000.666 II| TABLET_OPS_HOST: Scan{11 on 2, Compact{1.2.734, eph 2}} end=Done, 156r seen, TFwd{fetch=286KiB,saved=286KiB,usage=286KiB,after=0B,before=0B}, bio Spent{time=0.000s,wait=0.000s,interrupts=7}, trace 22 of 32 ~3p 00000.667 II| OPS_COMPACT: Compact{1.2.734, eph 2} end=Done, 4 blobs 129r (max 187), put Spent{time=0.000s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 22 +0, (266693 13341 205735)b }, ecr=1.000 00000.670 II| TABLET_EXECUTOR: Leader{1:2:737} Compact 11 on TGenCompactionParams{2: gen 1 epoch 0, 2 parts} step 734, product {1 parts epoch 0} done 00000.854 II| TABLET_EXECUTOR: LSnap{1:2, on 2:901, 10132b, wait} done, Waste{2:0, 1902996b +(182, 857259b), 300 trc} 00000.974 II| TABLET_EXECUTOR: Leader{1:2:987} starting compaction 00000.974 II| TABLET_EXECUTOR: Leader{1:2:988} starting Scan{13 on 3, Compact{1.2.987, eph 3}} 00000.974 II| TABLET_EXECUTOR: Leader{1:2:988} started compaction 13 00000.974 II| TABLET_OPS_HOST: Scan{13 on 3, Compact{1.2.987, eph 3}} begin on TSubset{head 4, 1m 0p 0c} 00000.977 II| TABLET_OPS_HOST: Scan{13 on 3, Compact{1.2.987, eph 3}} end=Done, 104r seen, TFwd{fetch=0B,saved=0B,usage=0B,after=0B,before=0B}, bio Spent{time=0.000s,wait=0.000s,interrupts=1}, trace 12 of 14 ~1p 00000.978 II| OPS_COMPACT: Compact{1.2.987, eph 3} end=Done, 11 blobs 104r (max 104), put Spent{time=0.000s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 12 +7, (142875 20082 170237)b }, ecr=1.000 00001.008 II| TABLET_EXECUTOR: Leader{1:2:989} Compact 13 on TGenCompactionParams{3: gen 0 epoch +inf, 0 parts} step 987, product {1 parts epoch 4} done 00001.097 II| TABLET_EXECUTOR: Leader{1:2:1082} starting compaction 00001.097 II| TABLET_EXECUTOR: Leader{1:2:1083} starting Scan{15 on 2, Compact{1.2.1082, eph 3}} 00001.097 II| TABLET_EXECUTOR: Leader{1:2:1083} started compaction 15 00001.097 II| TABLET_OPS_HOST: Scan{15 on 2, Compact{1.2.1082, eph 3}} begin on TSubset{head 4, 1m 0p 0c} 00001.104 II| TABLET_OPS_HOST: Scan{15 on 2, Compact{1.2.1082, eph 3}} end=Done, 120r seen, TFwd{fetch=0B,saved=0B,usage=0B,after=0B,before=0B}, bio Spent{time=0.000s,wait=0.000s,interrupts=1}, trace 16 of 19 ~1p 00001.104 II| OPS_COMPACT: Compact{1.2.1082, eph 3} end=Done, 15 blobs 120r (max 120), put Spent{time=0.000s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 16 +11, (136634 6538 230692)b }, ecr=1.000 00001.106 II| TABLET_EXECUTOR: Leader{1:2:1084} Compact 15 on TGenCompactionParams{2: gen 0 epoch +inf, 0 parts} step 1082, product {1 parts epoch 4} done 00001.271 II| TABLET_EXECUTOR: LSnap{1:2, on 2:1201, 11500b, wait} done, Waste{2:0, 2929711b +(156, 64701b), 300 trc} 00001.459 II| TABLET_EXECUTOR: Leader{1:2:1326} starting compaction 00001.459 II| TABLET_EXECUTOR: Leader{1:2:1327} starting Scan{17 on 3, Compact{1.2.1326, eph 4}} 00001.459 II| TABLET_EXECUTOR: Leader{1:2:1327} started compaction 17 00001.459 II| TABLET_OPS_HOST: Scan{17 on 3, Compact{1.2.1326, eph 4}} begin on TSubset{head 5, 1m 0p 0c} 00001.462 II| TABLET_OPS_HOST: Scan{17 on 3, Compact{1.2.1326, eph 4}} end=Done, 97r seen, TFwd{fetch=0B,saved=0B,usage=0B,after=0B,before=0B}, bio Spent{time=0.000s,wait=0.000s,interrupts=1}, trace 17 of 21 ~1p 00001.462 II| OPS_COMPACT: Compact{1.2.1326, eph 4} end=Done, 13 blobs 97r (max 97), put Spent{time=0.000s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 17 +9, (118746 13469 219908)b }, ecr=1.000 00001.491 II| TABLET_EXECUTOR: Leader{1:2:1328} Compact 17 on TGenCompactionParams{3: gen 0 epoch +inf, 0 parts} step 1326, product {1 parts epoch 5} done 00001.492 II| TABLET_EXECUTOR: Leader{1:2:1330} starting compaction 00001.492 II| TABLET_EXECUTOR: Leader{1:2:1331} starting Scan{19 on 3, Compact{1.2.1330, eph 4}} 00001.492 II| TABLET_EXECUTOR: Leader{1:2:1331} started compaction 19 00001.493 II| TABLET_OPS_HOST: Scan{19 on 3, Compact{1.2.1330, eph 4}} begin on TSubset{head 0, 0m 2p 0c} 00001.497 II| TABLET_OPS_HOST: Scan{19 on 3, Compact{1.2.1330, eph 4}} end=Done, 167r seen, TFwd{fetch=254KiB,saved=254KiB,usage=241KiB,after=0B,before=0B}, bio Spent{time=0.000s,wait=0.000s,interrupts=6}, trace 37 of 45 ~3p 00001.498 II| OPS_COMPACT: Compact{1.2.1330, eph 4} end=Done, 4 blobs 167r (max 201), put Spent{time=0.000s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 37 +0, (201038 19989 320704)b }, ecr=1.000 00001.500 II| TABLET_EXECUTOR: Leader{1:2:1333} Compact 19 on TGenCompactionParams{3: gen 1 epoch 0, 2 parts} step 1330, product {1 parts epoch 0} done 00001.578 II| TABLET_EXECUTOR: Leader{1:2:1386} starting compaction 00001.578 II| TABLET_EXECUTOR: Leader{1:2:1387} starting Scan{21 on 2, Compact{1.2.1386, eph 4}} 00001.578 II| TABLET_EXECUTOR: Leader{1:2:1387} started compaction 21 00001.578 II| TABLET_OPS_HOST: Scan{21 on 2, Compact{1.2.1386, eph 4}} begin on TSubset{head 5, 1m 0p 0c} 00001.581 II| TABLET_OPS_HOST: Scan{21 on 2, Compact{1.2.1386, eph 4}} end=Done, 112r seen, TFwd{fetch=0B,saved=0B,usage=0B,after=0B,before=0B}, bio Spent{time=0.000s,wait=0.000s,interrupts=1}, trace 11 of 14 ~1p 00001.582 II| OPS_COMPACT: Compact{1.2.1386, eph 4} end=Done, 9 blobs 112r (max 112), put Spent{time=0.000s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 11 +5, (168411 6894 140412)b }, ecr=1.000 00001.602 II| TABLET_EXECUTOR: Leader{1:2:1387} Compact 21 on TGenCompactionParams{2: gen 0 epoch +inf, 0 parts} step 1386, product {1 parts epoch 5} done 00001.604 II| TABLET_EXECUTOR: Leader{1:2:1389} starting compaction 00001.604 II| TABLET_EXECUTOR: Leader{1:2:1390} starting Scan{23 on 2, Compact{1.2.1389, eph 4}} 00001.604 II| TABLET_EXECUTOR: Leader{1:2:1390} started compaction 23 00001.604 II| TABLET_OPS_HOST: Scan{23 on 2, Compact{1.2.1389, eph 4}} begin on TSubset{head 0, 0m 2p 0c} 00001.609 II| TABLET_OPS_HOST: Scan{23 on 2, Compact{1.2.1389, eph 4}} end=Done, 186r seen, TFwd{fetch=290KiB,saved=290KiB,usage=290KiB,after=0B,before=0B}, bio Spent{time=0.000s,wait=0.000s,interrupts=7}, trace 33 of 43 ~3p 00001.610 II| OPS_COMPACT: Compact{1.2.1389, eph 4} end=Done, 4 blobs 186r (max 232), put Spent{time=0.000s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 33 +0, (266624 6894 286893)b }, ecr=1.000 00001.612 II| TABLET_EXECUTOR: Leader{1:2:1391} Compact 23 on TGenCompactionParams{2: gen 1 epoch 0, 2 parts} step 1389, product {1 parts epoch 0} done 00001.716 II| TABLET_EXECUTOR: LSnap{1:2, on 2:1501, 13007b, wait} done, Waste{2:0, 3553724b +(179 ... SAUSAGECACHE: Send page collection result [1:2:251:1:12288:161:0] owner [37:418:2424] class Scan pages [ 0 ] cookie 0 00000.303 DD| TABLET_SAUSAGECACHE: Add page collection [1:2:313:1:12288:161:0] owner [37:418:2424] 00000.303 TT| TABLET_SAUSAGECACHE: Request page collection [1:2:313:1:12288:161:0] owner [37:418:2424] cookie 0 class Scan from cache [ 0 ] 00000.303 TT| TABLET_SAUSAGECACHE: Send page collection result [1:2:313:1:12288:161:0] owner [37:418:2424] class Scan pages [ 0 ] cookie 0 00000.313 DD| TABLET_SAUSAGECACHE: Save page collection [1:2:315:1:12288:163:0] owner [37:419:2424] compacted pages [ 2 ] 00000.313 DD| TABLET_SAUSAGECACHE: Add page collection [1:2:315:1:12288:163:0] 00000.313 DD| TABLET_SAUSAGECACHE: Unregister owner [37:418:2424] 00000.313 DD| TABLET_SAUSAGECACHE: Remove page collection [1:2:64:1:12288:161:0] owner [37:418:2424] 00000.313 DD| TABLET_SAUSAGECACHE: Remove page collection [1:2:126:1:12288:161:0] owner [37:418:2424] 00000.313 DD| TABLET_SAUSAGECACHE: Remove page collection [1:2:251:1:12288:161:0] owner [37:418:2424] 00000.313 DD| TABLET_SAUSAGECACHE: Remove page collection [1:2:188:1:12288:161:0] owner [37:418:2424] 00000.313 DD| TABLET_SAUSAGECACHE: Remove page collection [1:2:313:1:12288:161:0] owner [37:418:2424] 00000.313 DD| TABLET_SAUSAGECACHE: Remove owner [37:418:2424] 00000.313 II| TABLET_EXECUTOR: Leader{1:2:316} Compact 63 on TGenCompactionParams{101: gen 2 epoch 0, 5 parts} step 315, product {1 parts epoch 0} done 00000.313 DD| TABLET_EXECUTOR: TGenCompactionStrategy CompactionFinished for 1: compaction 63, generation 2 00000.313 DD| TABLET_EXECUTOR: TGenCompactionStrategy CheckGeneration for 1 generation 2, state Free, final id 0, final level 2 00000.314 DD| RESOURCE_BROKER: Finish task gen2-table-101-tablet-1 (32 by [37:30:2062]) (release resources {1, 0}) 00000.314 DD| RESOURCE_BROKER: Updated planned resource usage for queue queue_compaction_gen2 from 4.687500 to 0.000000 (remove task gen2-table-101-tablet-1 (32 by [37:30:2062])) 00000.314 DD| TABLET_SAUSAGECACHE: Attach page collection [1:2:315:1:12288:163:0] owner [37:30:2062] 00000.314 DD| TABLET_SAUSAGECACHE: Add page collection [1:2:315:1:12288:163:0] owner [37:30:2062] 00000.314 DD| TABLET_SAUSAGECACHE: Detach page collection [1:2:313:1:12288:161:0] owner [37:30:2062] 00000.314 DD| TABLET_SAUSAGECACHE: Remove page collection [1:2:313:1:12288:161:0] owner [37:30:2062] 00000.314 DD| TABLET_SAUSAGECACHE: Detach page collection [1:2:251:1:12288:161:0] owner [37:30:2062] 00000.314 DD| TABLET_SAUSAGECACHE: Remove page collection [1:2:251:1:12288:161:0] owner [37:30:2062] 00000.314 DD| TABLET_SAUSAGECACHE: Detach page collection [1:2:188:1:12288:161:0] owner [37:30:2062] 00000.314 DD| TABLET_SAUSAGECACHE: Remove page collection [1:2:188:1:12288:161:0] owner [37:30:2062] 00000.314 DD| TABLET_SAUSAGECACHE: Detach page collection [1:2:126:1:12288:161:0] owner [37:30:2062] 00000.314 DD| TABLET_SAUSAGECACHE: Remove page collection [1:2:126:1:12288:161:0] owner [37:30:2062] 00000.314 DD| TABLET_EXECUTOR: Leader{1:2:317} commited cookie 3 for step 316 00000.314 DD| TABLET_EXECUTOR: Leader{1:2:317} switch applied on followers, step 316 00000.314 DD| TABLET_SAUSAGECACHE: Detach page collection [1:2:64:1:12288:161:0] owner [37:30:2062] 00000.314 DD| TABLET_SAUSAGECACHE: Remove page collection [1:2:64:1:12288:161:0] owner [37:30:2062] 00000.314 TT| TABLET_SAUSAGECACHE: Touch page collection [1:2:315:1:12288:163:0] owner [37:30:2062] pages [ 2 ] 00000.315 DD| TABLET_SAUSAGECACHE: Add page collection [1:2:64:1:12288:161:0] owner [37:405:2414] 00000.315 TT| TABLET_SAUSAGECACHE: Request page collection [1:2:64:1:12288:161:0] owner [37:405:2414] cookie 0 class Scan from cache [ 0 ] 00000.315 TT| TABLET_SAUSAGECACHE: Send page collection result [1:2:64:1:12288:161:0] owner [37:405:2414] class Scan pages [ 0 ] cookie 0 00000.315 DD| TABLET_SAUSAGECACHE: Add page collection [1:2:126:1:12288:161:0] owner [37:405:2414] 00000.315 TT| TABLET_SAUSAGECACHE: Request page collection [1:2:126:1:12288:161:0] owner [37:405:2414] cookie 0 class Scan from cache [ 0 ] 00000.315 TT| TABLET_SAUSAGECACHE: Send page collection result [1:2:126:1:12288:161:0] owner [37:405:2414] class Scan pages [ 0 ] cookie 0 00000.316 DD| TABLET_SAUSAGECACHE: Add page collection [1:2:188:1:12288:161:0] owner [37:405:2414] 00000.316 TT| TABLET_SAUSAGECACHE: Request page collection [1:2:188:1:12288:161:0] owner [37:405:2414] cookie 0 class Scan from cache [ 0 ] 00000.316 TT| TABLET_SAUSAGECACHE: Send page collection result [1:2:188:1:12288:161:0] owner [37:405:2414] class Scan pages [ 0 ] cookie 0 00000.316 DD| TABLET_SAUSAGECACHE: Add page collection [1:2:251:1:12288:161:0] owner [37:405:2414] 00000.316 TT| TABLET_SAUSAGECACHE: Request page collection [1:2:251:1:12288:161:0] owner [37:405:2414] cookie 0 class Scan from cache [ 0 ] 00000.316 TT| TABLET_SAUSAGECACHE: Send page collection result [1:2:251:1:12288:161:0] owner [37:405:2414] class Scan pages [ 0 ] cookie 0 00000.316 DD| TABLET_SAUSAGECACHE: Add page collection [1:2:261:1:12288:161:0] owner [37:405:2414] 00000.316 TT| TABLET_SAUSAGECACHE: Request page collection [1:2:261:1:12288:161:0] owner [37:405:2414] cookie 0 class Scan from cache [ 0 ] 00000.316 TT| TABLET_SAUSAGECACHE: Send page collection result [1:2:261:1:12288:161:0] owner [37:405:2414] class Scan pages [ 0 ] cookie 0 00000.316 DD| TABLET_SAUSAGECACHE: Add page collection [1:2:273:1:12288:161:0] owner [37:405:2414] 00000.316 TT| TABLET_SAUSAGECACHE: Request page collection [1:2:273:1:12288:161:0] owner [37:405:2414] cookie 0 class Scan from cache [ 0 ] 00000.317 TT| TABLET_SAUSAGECACHE: Send page collection result [1:2:273:1:12288:161:0] owner [37:405:2414] class Scan pages [ 0 ] cookie 0 00000.317 DD| TABLET_SAUSAGECACHE: Add page collection [1:2:285:1:12288:161:0] owner [37:405:2414] 00000.317 TT| TABLET_SAUSAGECACHE: Request page collection [1:2:285:1:12288:161:0] owner [37:405:2414] cookie 0 class Scan from cache [ 0 ] 00000.317 TT| TABLET_SAUSAGECACHE: Send page collection result [1:2:285:1:12288:161:0] owner [37:405:2414] class Scan pages [ 0 ] cookie 0 00000.317 DD| TABLET_SAUSAGECACHE: Add page collection [1:2:297:1:12288:161:0] owner [37:405:2414] 00000.317 TT| TABLET_SAUSAGECACHE: Request page collection [1:2:297:1:12288:161:0] owner [37:405:2414] cookie 0 class Scan from cache [ 0 ] 00000.317 TT| TABLET_SAUSAGECACHE: Send page collection result [1:2:297:1:12288:161:0] owner [37:405:2414] class Scan pages [ 0 ] cookie 0 00000.317 DD| TABLET_SAUSAGECACHE: Unregister owner [37:405:2414] 00000.317 DD| TABLET_SAUSAGECACHE: Remove page collection [1:2:297:1:12288:161:0] owner [37:405:2414] 00000.317 DD| TABLET_SAUSAGECACHE: Remove page collection [1:2:261:1:12288:161:0] owner [37:405:2414] 00000.317 DD| TABLET_SAUSAGECACHE: Remove page collection [1:2:188:1:12288:161:0] owner [37:405:2414] 00000.317 DD| TABLET_SAUSAGECACHE: Remove page collection [1:2:251:1:12288:161:0] owner [37:405:2414] 00000.317 DD| TABLET_SAUSAGECACHE: Remove page collection [1:2:273:1:12288:161:0] owner [37:405:2414] 00000.317 DD| TABLET_SAUSAGECACHE: Remove page collection [1:2:285:1:12288:161:0] owner [37:405:2414] 00000.317 DD| TABLET_SAUSAGECACHE: Remove page collection [1:2:64:1:12288:161:0] owner [37:405:2414] 00000.318 DD| TABLET_SAUSAGECACHE: Remove page collection [1:2:126:1:12288:161:0] owner [37:405:2414] 00000.318 DD| TABLET_SAUSAGECACHE: Remove owner [37:405:2414] 00000.318 DD| RESOURCE_BROKER: Finish task Scan{58 on 101}::1 (29 by [37:30:2062]) (release resources {1, 0}) 00000.318 DD| RESOURCE_BROKER: Updated planned resource usage for queue queue_scan from 11.718750 to 0.000000 (remove task Scan{58 on 101}::1 (29 by [37:30:2062])) 00000.318 II| TABLET_EXECUTOR: Leader{1:2:317} suiciding, Waste{2:0, 7661b +(30, 11928b), 16 trc, -42341b acc} 00000.319 DD| TABLET_SAUSAGECACHE: Unregister owner [37:30:2062] 00000.319 DD| TABLET_SAUSAGECACHE: Remove page collection [1:2:315:1:12288:163:0] owner [37:30:2062] 00000.319 DD| TABLET_SAUSAGECACHE: Remove owner [37:30:2062] 00000.320 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 2 actors 00000.320 NN| TABLET_SAUSAGECACHE: Poison cache serviced 38 reqs hit {38 21480b} miss {0 0b} 00000.320 II| FAKE_ENV: Shut order, stopping 4 BS groups 00000.320 II| FAKE_ENV: DS.0 gone, left {1964b, 17}, put {31647b, 317} 00000.320 II| FAKE_ENV: DS.1 gone, left {23845b, 37}, put {57239b, 346} 00000.320 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {0b, 0} 00000.320 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {0b, 0} 00000.320 II| FAKE_ENV: All BS storage groups are stopped 00000.320 II| FAKE_ENV: Model stopped, hosted 3 actors, spent 0.000s 00000.321 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 0 Error 0 Left 2287}, stopped 00000.000 II| FAKE_ENV: Born at 2025-06-25T14:35:14.064024Z 00000.007 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.008 II| FAKE_ENV: Starting storage for BS group 0 00000.009 II| FAKE_ENV: Starting storage for BS group 1 00000.009 II| FAKE_ENV: Starting storage for BS group 2 00000.009 II| FAKE_ENV: Starting storage for BS group 3 00000.058 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 2 actors 00000.058 NN| TABLET_SAUSAGECACHE: Poison cache serviced 0 reqs hit {0 0b} miss {0 0b} 00000.058 II| FAKE_ENV: Shut order, stopping 4 BS groups 00000.058 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {0b, 0} 00000.058 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {0b, 0} 00000.058 II| FAKE_ENV: DS.0 gone, left {536b, 6}, put {556b, 7} 00000.058 II| FAKE_ENV: DS.1 gone, left {30495b, 8}, put {30495b, 8} 00000.058 II| FAKE_ENV: All BS storage groups are stopped 00000.058 II| FAKE_ENV: Model stopped, hosted 3 actors, spent 0.000s 00000.058 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 0 Error 0 Left 15}, stopped 00000.000 II| FAKE_ENV: Born at 2025-06-25T14:35:14.128603Z 00000.008 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.009 II| FAKE_ENV: Starting storage for BS group 0 00000.009 II| FAKE_ENV: Starting storage for BS group 1 00000.009 II| FAKE_ENV: Starting storage for BS group 2 00000.010 II| FAKE_ENV: Starting storage for BS group 3 00000.100 CC| TABLET_EXECUTOR: Tablet 1 unhandled exception std::runtime_error: test ??+0 (0x11A98F91) __cxa_throw+221 (0x11A98DBD) NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Exceptions::TTxExecuteThrowException::Execute(NKikimr::NTabletFlatExecutor::TTransactionContext&, NActors::TActorContext const&)+62 (0x10D839FE) NKikimr::NTabletFlatExecutor::TExecutor::ExecuteTransaction(NKikimr::NTabletFlatExecutor::TSeat*)+3349 (0x17DB6105) NKikimr::NTabletFlatExecutor::TExecutor::DoExecute(TAutoPtr, NKikimr::NTabletFlatExecutor::TExecutor::ETxMode)+10562 (0x17DB1AE2) non-virtual thunk to NKikimr::NTabletFlatExecutor::TExecutor::Execute(TAutoPtr, NActors::TActorContext const&)+54 (0x17DB8D76) ??+0 (0x10D837F0) NKikimr::NFake::TDummy::Inbox(TAutoPtr&)+2810 (0x10CC50FA) NActors::IActor::Receive(TAutoPtr&)+237 (0x13299C9D) 00000.101 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 2 actors 00000.101 NN| TABLET_SAUSAGECACHE: Poison cache serviced 0 reqs hit {0 0b} miss {0 0b} 00000.101 II| FAKE_ENV: Shut order, stopping 4 BS groups 00000.101 II| FAKE_ENV: DS.0 gone, left {42b, 1}, put {62b, 2} 00000.101 II| FAKE_ENV: DS.1 gone, left {35b, 1}, put {35b, 1} 00000.101 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {0b, 0} 00000.101 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {0b, 0} 00000.101 II| FAKE_ENV: All BS storage groups are stopped 00000.101 II| FAKE_ENV: Model stopped, hosted 3 actors, spent 0.000s 00000.101 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 1 Error 0 Left 15}, stopped ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_erase_rows/unittest >> DistributedEraseTests::ConditionalEraseRowsShouldNotFailOnMissingRows [GOOD] Test command err: 2025-06-25T14:34:49.922081Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:34:49.922230Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:34:49.922298Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001598/r3tmp/tmpPEoisA/pdisk_1.dat 2025-06-25T14:34:50.277019Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T14:34:50.280398Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:34:50.334458Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:34:50.349140Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750862086851758 != 1750862086851762 2025-06-25T14:34:50.400630Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:34:50.400790Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:34:50.413653Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:34:50.497743Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:34:50.571333Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:649:2547] 2025-06-25T14:34:50.571605Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T14:34:50.627042Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T14:34:50.627210Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T14:34:50.632559Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-25T14:34:50.632686Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-25T14:34:50.632757Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-25T14:34:50.633203Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T14:34:50.633575Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T14:34:50.633634Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:679:2547] in generation 1 2025-06-25T14:34:50.635184Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037889 actor [1:651:2549] 2025-06-25T14:34:50.635381Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T14:34:50.644773Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T14:34:50.644931Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T14:34:50.646462Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037889 2025-06-25T14:34:50.646552Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037889 2025-06-25T14:34:50.646610Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037889 2025-06-25T14:34:50.646923Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T14:34:50.647268Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T14:34:50.647332Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037889 persisting started state actor id [1:692:2549] in generation 1 2025-06-25T14:34:50.648872Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037890 actor [1:657:2551] 2025-06-25T14:34:50.649080Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T14:34:50.658396Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T14:34:50.658540Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T14:34:50.660019Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037890 2025-06-25T14:34:50.660092Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037890 2025-06-25T14:34:50.660149Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037890 2025-06-25T14:34:50.660486Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T14:34:50.660628Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T14:34:50.660691Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037890 persisting started state actor id [1:700:2551] in generation 1 2025-06-25T14:34:50.671779Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T14:34:50.721730Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-25T14:34:50.721967Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T14:34:50.722118Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:704:2578] 2025-06-25T14:34:50.722169Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T14:34:50.722207Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-25T14:34:50.722246Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:34:50.722729Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T14:34:50.722790Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037889 2025-06-25T14:34:50.722898Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037889 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T14:34:50.722982Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037889, actorId: [1:705:2579] 2025-06-25T14:34:50.723014Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037889 2025-06-25T14:34:50.723038Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037889, state: WaitScheme 2025-06-25T14:34:50.723062Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-25T14:34:50.723469Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T14:34:50.723522Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037890 2025-06-25T14:34:50.723579Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037890 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T14:34:50.723665Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037890, actorId: [1:706:2580] 2025-06-25T14:34:50.723708Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037890 2025-06-25T14:34:50.723734Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037890, state: WaitScheme 2025-06-25T14:34:50.723773Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037890 2025-06-25T14:34:50.724029Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-25T14:34:50.724205Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-25T14:34:50.724439Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:34:50.724487Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:34:50.724527Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-25T14:34:50.724586Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:34:50.724645Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037889 2025-06-25T14:34:50.724705Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037889 2025-06-25T14:34:50.725126Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:638:2542], serverId# [1:664:2555], sessionId# [0:0:0] 2025-06-25T14:34:50.725173Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2025-06-25T14:34:50.725200Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:34:50.725254Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037889 TxInFly 0 2025-06-25T14:34:50.725291Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-06-25T14:34:50.725331Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037890 2025-06-25T14:34:50.725381Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037890 2025-06-25T14:34:50.725554Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T14:34:50.725790Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 720751862240 ... 037888 2025-06-25T14:35:11.019066Z node 3 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 72075186224037889 source 72075186224037889 dest 72075186224037888 consumer 72075186224037888 txId 281474976715662 2025-06-25T14:35:11.019121Z node 3 :TX_DATASHARD DEBUG: datashard_distributed_erase.cpp:978: [DistEraser] [3:1050:2788] HandlePlan TEvDataShard::TEvProposeTransactionResult: txId# 281474976715662, shard# 72075186224037888, status# 2 2025-06-25T14:35:11.019204Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037890 2025-06-25T14:35:11.019240Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [2000 : 281474976715662] from 72075186224037890 at tablet 72075186224037890 send result to client [3:1050:2788], exec latency: 0 ms, propose latency: 1 ms 2025-06-25T14:35:11.019288Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:563: Send delayed Ack RS Ack at 72075186224037890 {TEvReadSet step# 2000 txid# 281474976715662 TabletSource# 72075186224037889 TabletDest# 72075186224037890 SetTabletConsumer# 72075186224037890 Flags# 0 Seqno# 6} 2025-06-25T14:35:11.019316Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037890 2025-06-25T14:35:11.019373Z node 3 :TX_DATASHARD DEBUG: datashard_distributed_erase.cpp:978: [DistEraser] [3:1050:2788] HandlePlan TEvDataShard::TEvProposeTransactionResult: txId# 281474976715662, shard# 72075186224037890, status# 2 2025-06-25T14:35:11.019408Z node 3 :TX_DATASHARD DEBUG: datashard_distributed_erase.cpp:165: [DistEraser] [3:1050:2788] Reply: txId# 281474976715662, status# OK, error# 2025-06-25T14:35:11.019550Z node 3 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 72075186224037889 source 72075186224037889 dest 72075186224037890 consumer 72075186224037890 txId 281474976715662 2025-06-25T14:35:11.019916Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037889 2025-06-25T14:35:11.019969Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4487: Conditional erase complete: cookie: 4, at: 72075186224037889 2025-06-25T14:35:11.020073Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037889, clientId# [3:1045:2784], serverId# [3:1046:2785], sessionId# [0:0:0] 2025-06-25T14:35:11.020243Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2025-06-25T14:35:11.020284Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:35:11.020355Z node 3 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037889 TxInFly 0 2025-06-25T14:35:11.020431Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-06-25T14:35:11.021664Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037889 2025-06-25T14:35:11.022099Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037889 2025-06-25T14:35:11.022306Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2025-06-25T14:35:11.022356Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 active 1 active planned 0 immediate 1 planned 0 2025-06-25T14:35:11.022410Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715664] at 72075186224037889 for WaitForStreamClearance 2025-06-25T14:35:11.022677Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 (dry run) active 1 active planned 0 immediate 1 planned 0 2025-06-25T14:35:11.022753Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-06-25T14:35:11.023439Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:514: Got quota for read table scan ShardId: 72075186224037889, TxId: 281474976715664, MessageQuota: 1 2025-06-25T14:35:11.023717Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:662: Send response data ShardId: 72075186224037889, TxId: 281474976715664, Size: 70, Rows: 0, PendingAcks: 1, MessageQuota: 0 2025-06-25T14:35:11.023867Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:483: Got stream data ack ShardId: 72075186224037889, TxId: 281474976715664, PendingAcks: 0 2025-06-25T14:35:11.023925Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:718: Finish scan ShardId: 72075186224037889, TxId: 281474976715664, MessageQuota: 0 2025-06-25T14:35:11.034422Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037889 2025-06-25T14:35:11.034496Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4477: Found op: cookie: 281474976715664, at: 72075186224037889 2025-06-25T14:35:11.034953Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2025-06-25T14:35:11.034988Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 active 1 active planned 0 immediate 1 planned 0 2025-06-25T14:35:11.035020Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715664] at 72075186224037889 for ReadTableScan 2025-06-25T14:35:11.035133Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:35:11.035179Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-06-25T14:35:11.035218Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-25T14:35:11.037690Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T14:35:11.038022Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-25T14:35:11.038246Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:35:11.038302Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 1 active planned 0 immediate 1 planned 0 2025-06-25T14:35:11.038357Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715665] at 72075186224037888 for WaitForStreamClearance 2025-06-25T14:35:11.038605Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 0 immediate 1 planned 0 2025-06-25T14:35:11.038671Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:35:11.039311Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:514: Got quota for read table scan ShardId: 72075186224037888, TxId: 281474976715665, MessageQuota: 1 2025-06-25T14:35:11.039547Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:662: Send response data ShardId: 72075186224037888, TxId: 281474976715665, Size: 35, Rows: 0, PendingAcks: 1, MessageQuota: 0 2025-06-25T14:35:11.039657Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:483: Got stream data ack ShardId: 72075186224037888, TxId: 281474976715665, PendingAcks: 0 2025-06-25T14:35:11.039699Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:718: Finish scan ShardId: 72075186224037888, TxId: 281474976715665, MessageQuota: 0 2025-06-25T14:35:11.073064Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037888 2025-06-25T14:35:11.073162Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4477: Found op: cookie: 281474976715665, at: 72075186224037888 2025-06-25T14:35:11.073692Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:35:11.073738Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 1 active planned 0 immediate 1 planned 0 2025-06-25T14:35:11.073779Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715665] at 72075186224037888 for ReadTableScan 2025-06-25T14:35:11.073940Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:35:11.074011Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:35:11.074072Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:35:11.076993Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037890 2025-06-25T14:35:11.077393Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037890 2025-06-25T14:35:11.077611Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037890 2025-06-25T14:35:11.077662Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037890 active 1 active planned 0 immediate 1 planned 0 2025-06-25T14:35:11.077736Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715666] at 72075186224037890 for WaitForStreamClearance 2025-06-25T14:35:11.077996Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037890 (dry run) active 1 active planned 0 immediate 1 planned 0 2025-06-25T14:35:11.078094Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037890 2025-06-25T14:35:11.078800Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:514: Got quota for read table scan ShardId: 72075186224037890, TxId: 281474976715666, MessageQuota: 1 2025-06-25T14:35:11.079058Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:662: Send response data ShardId: 72075186224037890, TxId: 281474976715666, Size: 35, Rows: 0, PendingAcks: 1, MessageQuota: 0 2025-06-25T14:35:11.079213Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:483: Got stream data ack ShardId: 72075186224037890, TxId: 281474976715666, PendingAcks: 0 2025-06-25T14:35:11.079274Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:718: Finish scan ShardId: 72075186224037890, TxId: 281474976715666, MessageQuota: 0 2025-06-25T14:35:11.117266Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037890 2025-06-25T14:35:11.117367Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4477: Found op: cookie: 281474976715666, at: 72075186224037890 2025-06-25T14:35:11.117912Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037890 2025-06-25T14:35:11.117958Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037890 active 1 active planned 0 immediate 1 planned 0 2025-06-25T14:35:11.117996Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715666] at 72075186224037890 for ReadTableScan 2025-06-25T14:35:11.118161Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037890 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:35:11.118237Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037890 2025-06-25T14:35:11.118290Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037890 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSysColV1::StreamSelectRange [GOOD] Test command err: Trying to start YDB, gRPC: 19225, MsgBus: 26068 2025-06-25T14:35:01.982368Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519895466962934385:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:35:01.985298Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001ae0/r3tmp/tmpIaCOUs/pdisk_1.dat 2025-06-25T14:35:02.366501Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:35:02.418687Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:35:02.418783Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:35:02.423793Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 19225, node 1 2025-06-25T14:35:02.480052Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:35:02.480084Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:35:02.480096Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:35:02.480258Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26068 TClient is connected to server localhost:26068 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-25T14:35:02.998078Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:35:03.065811Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:35:03.078518Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:35:03.092082Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:35:03.296891Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:03.478493Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:35:03.547330Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:35:05.015820Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519895484142805170:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:35:05.015944Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:35:05.246038Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:05.274855Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:05.305981Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:05.331827Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:05.369265Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:05.415271Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:05.462806Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:05.554764Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519895484142805829:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:35:05.554855Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:35:05.555087Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519895484142805834:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:35:05.557944Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:35:05.573881Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519895484142805836:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:35:05.650176Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519895484142805887:3419] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:35:06.786951Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750862106826, txId: 281474976710672] shutting down 2025-06-25T14:35:06.982654Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519895466962934385:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:35:06.982755Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_erase_rows/unittest >> EraseRowsTests::ConditionalEraseRowsShouldEraseOnDyNumberNanoSeconds [GOOD] Test command err: 2025-06-25T14:34:55.801897Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:34:55.802054Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:34:55.802160Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00156f/r3tmp/tmpFIxUOJ/pdisk_1.dat 2025-06-25T14:34:56.151933Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T14:34:56.160148Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:34:56.228651Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:34:56.242324Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750862092349352 != 1750862092349356 2025-06-25T14:34:56.296264Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:34:56.296451Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:34:56.308143Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:34:56.415501Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:34:56.465778Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:627:2531] 2025-06-25T14:34:56.466046Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T14:34:56.526175Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T14:34:56.526344Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T14:34:56.528026Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-25T14:34:56.528135Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-25T14:34:56.528194Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-25T14:34:56.530232Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T14:34:56.530435Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T14:34:56.530523Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:643:2531] in generation 1 2025-06-25T14:34:56.544806Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T14:34:56.586498Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-25T14:34:56.586718Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T14:34:56.586836Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:645:2541] 2025-06-25T14:34:56.586869Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T14:34:56.586925Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-25T14:34:56.586968Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:34:56.587444Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-25T14:34:56.587544Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-25T14:34:56.587600Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:34:56.587638Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:34:56.587677Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-25T14:34:56.587715Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:34:56.588079Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:623:2528], serverId# [1:634:2535], sessionId# [0:0:0] 2025-06-25T14:34:56.588231Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T14:34:56.588479Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-25T14:34:56.588582Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-25T14:34:56.590282Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T14:34:56.603016Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-25T14:34:56.603122Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-25T14:34:56.770657Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:660:2550], serverId# [1:662:2552], sessionId# [0:0:0] 2025-06-25T14:34:56.775331Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1000 at tablet 72075186224037888 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1000 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-06-25T14:34:56.775419Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:34:56.777372Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:34:56.777432Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-06-25T14:34:56.777492Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-06-25T14:34:56.777779Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-06-25T14:34:56.777925Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-25T14:34:56.778481Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:34:56.778547Z node 1 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-06-25T14:34:56.780575Z node 1 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-25T14:34:56.780980Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:34:56.782606Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-06-25T14:34:56.782664Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:34:56.783125Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-06-25T14:34:56.783196Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:34:56.783799Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:34:56.783834Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T14:34:56.783893Z node 1 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-25T14:34:56.783952Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [1:373:2367], exec latency: 0 ms, propose latency: 0 ms 2025-06-25T14:34:56.784015Z node 1 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-25T14:34:56.784098Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:34:56.805567Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T14:34:56.807641Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-25T14:34:56.807726Z node 1 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-25T14:34:56.808563Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-25T14:34:56.839144Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:694:2576], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:34:56.839261Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:704:2581], DatabaseId: /Root, PoolId: ... .254314Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-06-25T14:35:07.254361Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-06-25T14:35:07.254595Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-06-25T14:35:07.254730Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-25T14:35:07.255437Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:35:07.255504Z node 3 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-06-25T14:35:07.255928Z node 3 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-25T14:35:07.256333Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:35:07.260025Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-06-25T14:35:07.260086Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:35:07.260504Z node 3 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-06-25T14:35:07.260592Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:35:07.261669Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:35:07.261709Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T14:35:07.261755Z node 3 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-25T14:35:07.261814Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [3:373:2367], exec latency: 0 ms, propose latency: 0 ms 2025-06-25T14:35:07.261868Z node 3 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-25T14:35:07.261950Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:35:07.264012Z node 3 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T14:35:07.265935Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-25T14:35:07.265995Z node 3 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-25T14:35:07.266407Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-25T14:35:07.303264Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:694:2576], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:35:07.303359Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:704:2581], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:35:07.303448Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:35:07.307650Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:35:07.313260Z node 3 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T14:35:07.358031Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:35:07.476883Z node 3 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T14:35:07.479283Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:708:2584], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:35:07.513575Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:778:2623] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:35:07.579778Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715660. Ctx: { TraceId: 01jykr74n5cqp85kac4ywkqwtx, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NDcwYjdjNjYtZjRiZWU1Yy0yYzE2NmE4NC1jY2Q3NTRiYw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:35:07.581974Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [3:809:2640], serverId# [3:810:2641], sessionId# [0:0:0] 2025-06-25T14:35:07.582356Z node 3 :TX_DATASHARD DEBUG: execute_write_unit.cpp:251: Executing write operation for [0:2] at 72075186224037888 2025-06-25T14:35:07.582531Z node 3 :TX_DATASHARD DEBUG: execute_write_unit.cpp:416: Executed write operation for [0:2] at 72075186224037888, row count=4 2025-06-25T14:35:07.596918Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:35:07.622447Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [3:817:2647], serverId# [3:818:2648], sessionId# [0:0:0] 2025-06-25T14:35:07.623401Z node 3 :TX_DATASHARD INFO: datashard__op_rows.cpp:26: TTxDirectBase(48) Execute: at tablet# 72075186224037888 2025-06-25T14:35:07.634719Z node 3 :TX_DATASHARD INFO: datashard__op_rows.cpp:80: TTxDirectBase(48) Complete: at tablet# 72075186224037888 2025-06-25T14:35:07.634812Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:35:07.635090Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037888 2025-06-25T14:35:07.636411Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4487: Conditional erase complete: cookie: 3, at: 72075186224037888 2025-06-25T14:35:07.636724Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:35:07.636790Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:35:07.636844Z node 3 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-25T14:35:07.636902Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:35:07.636991Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037888, clientId# [3:817:2647], serverId# [3:818:2648], sessionId# [0:0:0] 2025-06-25T14:35:07.637767Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T14:35:07.638031Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-25T14:35:07.638186Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:35:07.638227Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 1 active planned 0 immediate 1 planned 0 2025-06-25T14:35:07.638306Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715661] at 72075186224037888 for WaitForStreamClearance 2025-06-25T14:35:07.638492Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 0 immediate 1 planned 0 2025-06-25T14:35:07.638552Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:35:07.639121Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:514: Got quota for read table scan ShardId: 72075186224037888, TxId: 281474976715661, MessageQuota: 1 2025-06-25T14:35:07.639321Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:662: Send response data ShardId: 72075186224037888, TxId: 281474976715661, Size: 37, Rows: 0, PendingAcks: 1, MessageQuota: 0 2025-06-25T14:35:07.639456Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:483: Got stream data ack ShardId: 72075186224037888, TxId: 281474976715661, PendingAcks: 0 2025-06-25T14:35:07.639497Z node 3 :TX_DATASHARD DEBUG: read_table_scan.cpp:718: Finish scan ShardId: 72075186224037888, TxId: 281474976715661, MessageQuota: 0 2025-06-25T14:35:07.640971Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037888 2025-06-25T14:35:07.641012Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4477: Found op: cookie: 281474976715661, at: 72075186224037888 2025-06-25T14:35:07.641403Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:35:07.641439Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 1 active planned 0 immediate 1 planned 0 2025-06-25T14:35:07.641473Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715661] at 72075186224037888 for ReadTableScan 2025-06-25T14:35:07.641601Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:35:07.641643Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:35:07.641676Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 |80.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSysColV1::StreamInnerJoinSelectAsterisk |80.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_export/unittest >> TExportToS3Tests::ShouldRetryAtFinalStage [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:34:17.711594Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:34:17.711675Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:34:17.711705Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:34:17.711729Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:34:17.711772Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:34:17.711793Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:34:17.711826Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:34:17.711890Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:34:17.712746Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:34:17.713124Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:34:17.797064Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:34:17.797134Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:34:17.815075Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:34:17.815524Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:34:17.815703Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:34:17.822134Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:34:17.822550Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:34:17.823272Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:34:17.823561Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:34:17.827369Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:34:17.827574Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:34:17.828947Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:34:17.829021Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:34:17.829145Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:34:17.829204Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:34:17.829249Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:34:17.829334Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:34:17.836383Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:34:17.969320Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:34:17.969543Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:17.969789Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:34:17.969852Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:34:17.970063Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:34:17.970152Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:34:17.973647Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:34:17.973888Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:34:17.974121Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:17.974187Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:34:17.974224Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:34:17.974259Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:34:17.977298Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:17.977361Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:34:17.977401Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:34:17.979657Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:17.979726Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:17.979778Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:34:17.979834Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:34:17.983487Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:34:17.987759Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:34:17.987982Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:34:17.988988Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:34:17.989164Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:34:17.989229Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:34:17.989496Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:34:17.989555Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:34:17.989720Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:34:17.989793Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:34:17.993174Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:34:17.993239Z node 1 :FLAT_TX_SCHEMESHARD ... nerId: 72057594046678944, LocalPathId: 4] state 'Ready' dataSize 0 rowCount 0 cpuUsage 0.0022 2025-06-25T14:34:57.343651Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046678944, queue size# 2 2025-06-25T14:34:57.343848Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 2 shard idx 72057594046678944:1 data size 70 row count 2 2025-06-25T14:34:57.343948Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186233409546 maps to shardIdx: 72057594046678944:1 followerId=0, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], pathId map=Table, is column=0, is olap=0, RowCount 2, DataSize 70 2025-06-25T14:34:57.344069Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186233409546 2025-06-25T14:34:57.344127Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 4 shard idx 72057594046678944:2 data size 0 row count 0 2025-06-25T14:34:57.344173Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186233409547 maps to shardIdx: 72057594046678944:2 followerId=0, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], pathId map=0, is column=0, is olap=0, RowCount 0, DataSize 0, with borrowed parts 2025-06-25T14:34:57.344223Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186233409547 2025-06-25T14:34:57.355966Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046678944, queue size# 0 2025-06-25T14:35:01.195994Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046678944 from shard 72075186233409546 followerId 0 pathId [OwnerId: 72057594046678944, LocalPathId: 2] state 'Ready' dataSize 70 rowCount 2 cpuUsage 0.0018 2025-06-25T14:35:01.217669Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046678944 from shard 72075186233409547 followerId 0 pathId [OwnerId: 72057594046678944, LocalPathId: 4] state 'Ready' dataSize 0 rowCount 0 cpuUsage 0.0016 2025-06-25T14:35:01.267470Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046678944, queue size# 2 2025-06-25T14:35:01.267678Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 2 shard idx 72057594046678944:1 data size 70 row count 2 2025-06-25T14:35:01.267768Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186233409546 maps to shardIdx: 72057594046678944:1 followerId=0, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], pathId map=Table, is column=0, is olap=0, RowCount 2, DataSize 70 2025-06-25T14:35:01.267882Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186233409546 2025-06-25T14:35:01.267940Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 4 shard idx 72057594046678944:2 data size 0 row count 0 2025-06-25T14:35:01.267984Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186233409547 maps to shardIdx: 72057594046678944:2 followerId=0, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], pathId map=0, is column=0, is olap=0, RowCount 0, DataSize 0, with borrowed parts 2025-06-25T14:35:01.268095Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186233409547 2025-06-25T14:35:01.278663Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046678944, queue size# 0 2025-06-25T14:35:04.799565Z node 4 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:797: [Export] [s3] Bootstrap: self# [4:576:2532], attempt# 1 2025-06-25T14:35:04.838640Z node 4 :DATASHARD_BACKUP DEBUG: export_scan.cpp:118: [Export] [scanner] Handle TEvExportScan::TEvReset: self# [4:575:2531] 2025-06-25T14:35:04.839042Z node 4 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:441: [Export] [s3] Handle TEvExportScan::TEvReady: self# [4:576:2532], sender# [4:575:2531] 2025-06-25T14:35:04.839159Z node 4 :DATASHARD_BACKUP DEBUG: export_scan.cpp:130: [Export] [scanner] Handle TEvExportScan::TEvFeed: self# [4:575:2531] 2025-06-25T14:35:04.839369Z node 4 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:459: [Export] [s3] Handle TEvExportScan::TEvBuffer: self# [4:576:2532], sender# [4:575:2531], msg# NKikimr::NDataShard::TEvExportScan::TEvBuffer { Last: 0 Checksum: } 2025-06-25T14:35:04.839568Z node 4 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:526: [Export] [s3] Handle TEvDataShard::TEvS3Upload: self# [4:576:2532], upload# { Id: 1 Status: Complete Error: (empty maybe) Parts: [6e3e0a41fdab8add833862f1bd2954c3,1d8dd09e584ce6a47582a31b591900e2,d41d8cd98f00b204e9800998ecf8427e] } REQUEST: POST /data_00.csv?uploadId=1 HTTP/1.1 HEADERS: Host: localhost:28152 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 71137395-CB4F-4442-B94E-4AF00BF4C84F amz-sdk-request: attempt=1 content-length: 459 content-type: application/xml user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-api-version: 2006-03-01 S3_MOCK::HttpServeAction: 4 / /data_00.csv / uploadId=1 2025-06-25T14:35:04.844362Z node 4 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:623: [Export] [s3] Handle TEvExternalStorage::TEvCompleteMultipartUploadResponse: self# [4:576:2532], result# 2025-06-25T14:35:04.844562Z node 4 :DATASHARD_BACKUP DEBUG: export_scan.cpp:144: [Export] [scanner] Handle TEvExportScan::TEvFinish: self# [4:575:2531], msg# NKikimr::NDataShard::TEvExportScan::TEvFinish { Success: 1 Error: } 2025-06-25T14:35:04.859206Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5596: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 449 RawX2: 17179871600 } Origin: 72075186233409547 State: 2 TxId: 281474976710759 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-06-25T14:35:04.859287Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1791: TOperation FindRelatedPartByTabletId, TxId: 281474976710759, tablet: 72075186233409547, partId: 0 2025-06-25T14:35:04.859454Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:632: TTxOperationReply execute, operationId: 281474976710759:0, at schemeshard: 72057594046678944, message: Source { RawX1: 449 RawX2: 17179871600 } Origin: 72075186233409547 State: 2 TxId: 281474976710759 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-06-25T14:35:04.859650Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:233: TBackup TProposedWaitParts, opId: 281474976710759:0 HandleReply TEvSchemaChanged at tablet# 72057594046678944 message# Source { RawX1: 449 RawX2: 17179871600 } Origin: 72075186233409547 State: 2 TxId: 281474976710759 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-06-25T14:35:04.859731Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:670: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 281474976710759:0, shardIdx: 72057594046678944:2, shard: 72075186233409547, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-25T14:35:04.859781Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:707: all shard schema changes has been received, operationId: 281474976710759:0, at schemeshard: 72057594046678944 2025-06-25T14:35:04.859828Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:719: send schema changes ack message, operation: 281474976710759:0, datashard: 72075186233409547, at schemeshard: 72057594046678944 2025-06-25T14:35:04.859880Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 281474976710759:0 129 -> 240 2025-06-25T14:35:04.860043Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:116: Unable to make a bill: kind# TBackup, opId# 281474976710759:0, reason# domain is not a serverless db, domain# /MyRoot, domainPathId# [OwnerId: 72057594046678944, LocalPathId: 1], IsDomainSchemeShard: 1, ParentDomainId: [OwnerId: 72057594046678944, LocalPathId: 1], ResourcesDomainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:35:04.862630Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 281474976710759:0, at schemeshard: 72057594046678944 2025-06-25T14:35:04.863029Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 281474976710759:0, at schemeshard: 72057594046678944 2025-06-25T14:35:04.863089Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 281474976710759:0 ProgressState 2025-06-25T14:35:04.863226Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976710759:0 progress is 1/1 2025-06-25T14:35:04.863266Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976710759 ready parts: 1/1 2025-06-25T14:35:04.863308Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976710759:0 progress is 1/1 2025-06-25T14:35:04.863344Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976710759 ready parts: 1/1 2025-06-25T14:35:04.863385Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 281474976710759, ready parts: 1/1, is published: true 2025-06-25T14:35:04.863460Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1656: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [4:126:2150] message: TxId: 281474976710759 2025-06-25T14:35:04.863513Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976710759 ready parts: 1/1 2025-06-25T14:35:04.863557Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976710759:0 2025-06-25T14:35:04.863590Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 281474976710759:0 2025-06-25T14:35:04.863703Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 2025-06-25T14:35:04.866345Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6830: Handle: TEvNotifyTxCompletionResult: txId# 281474976710759 2025-06-25T14:35:04.866431Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6832: Message: TxId: 281474976710759 2025-06-25T14:35:04.869796Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-25T14:35:04.869860Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [4:596:2548] TestWaitNotification: OK eventTxId 102 >> DataShardWrite::ExecSQLUpsertImmediate+EvWrite |80.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpJoinOrder::CanonizedJoinOrderTPCC |80.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/scheme_board/ut_cache/ydb-core-tx-scheme_board-ut_cache |80.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/persqueue/ut/slow/ydb-core-persqueue-ut-slow |80.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/executer_actor/ut/ydb-core-kqp-executer_actor-ut >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-8 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-9 >> KqpJoinOrder::SortingsWithLookupJoinByPrefix-RemoveLimitOperator |80.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/grpc_streaming/ut/ydb-core-grpc_streaming-ut |80.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_index_build/ydb-core-tx-schemeshard-ut_index_build |80.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/scheme_board/ut_cache/ydb-core-tx-scheme_board-ut_cache |80.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/persqueue/ut/slow/ydb-core-persqueue-ut-slow |80.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/executer_actor/ut/ydb-core-kqp-executer_actor-ut |80.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/grpc_streaming/ut/ydb-core-grpc_streaming-ut |80.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_index_build/ydb-core-tx-schemeshard-ut_index_build |80.3%| [LD] {RESULT} $(B)/ydb/core/persqueue/ut/slow/ydb-core-persqueue-ut-slow |80.3%| [LD] {RESULT} $(B)/ydb/core/tx/scheme_board/ut_cache/ydb-core-tx-scheme_board-ut_cache |80.3%| [LD] {RESULT} $(B)/ydb/core/kqp/executer_actor/ut/ydb-core-kqp-executer_actor-ut |80.3%| [LD] {RESULT} $(B)/ydb/core/grpc_streaming/ut/ydb-core-grpc_streaming-ut |80.3%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_index_build/ydb-core-tx-schemeshard-ut_index_build |80.3%| [TA] {RESULT} $(B)/ydb/core/persqueue/ut/ut_with_sdk/test-results/unittest/{meta.json ... results_accumulator.log} >> OlapEstimationRowsCorrectness::TPCDS78 >> KqpJoinOrder::Sortings4Year+RemoveLimitOperator >> KqpJoinOrder::SortingsByPrefixWithConstant-RemoveLimitOperator >> KqpJoinOrder::FiveWayJoinStatsOverride+ColumnStore >> KqpScan::RemoteShardScan >> TConsoleTests::TestGetUnknownTenantStatus >> KqpScan::ScanDuringSplit10 >> TModificationsValidatorTests::TestIsValidationRequired_NONE [GOOD] >> TModificationsValidatorTests::TestIsValidationRequired_DOMAIN [GOOD] >> TModificationsValidatorTests::TestIsValidationRequired_TENANTS [GOOD] >> TModificationsValidatorTests::TestIsValidationRequired_TENANTS_AND_NODE_TYPES [GOOD] >> TModificationsValidatorTests::TestIndexAndModificationsShrink_RemoveItems_NONE [GOOD] >> TModificationsValidatorTests::TestIndexAndModificationsShrink_RemoveItems_DOMAIN [GOOD] >> TModificationsValidatorTests::TestIndexAndModificationsShrink_RemoveItems_TENANTS [GOOD] >> TModificationsValidatorTests::TestIndexAndModificationsShrink_RemoveItems_TENANTS_AND_NODE_TYPES [GOOD] >> TModificationsValidatorTests::TestIndexAndModificationsShrink_ModifyItemsSameScope_NONE [GOOD] >> TModificationsValidatorTests::TestIndexAndModificationsShrink_ModifyItemsSameScope_DOMAIN [GOOD] >> TModificationsValidatorTests::TestIndexAndModificationsShrink_ModifyItemsSameScope_TENANTS [GOOD] >> TModificationsValidatorTests::TestIndexAndModificationsShrink_ModifyItemsSameScope_TENANTS_AND_NODE_TYPES [GOOD] >> TModificationsValidatorTests::TestIndexAndModificationsShrink_ModifyItemsNarrowScope_NONE [GOOD] >> TModificationsValidatorTests::TestIndexAndModificationsShrink_ModifyItemsNarrowScope_TENANTS [GOOD] >> TModificationsValidatorTests::TestIndexAndModificationsShrink_ModifyItemsNarrowScope_TENANTS_AND_NODE_TYPES >> Viewer::ExecuteQueryDoesntExecuteSchemeOperationsInsideTransation [GOOD] >> Viewer::FloatPointJsonQuery >> KqpJoinOrder::TPCHEveryQueryWorks-ColumnStore |80.3%| [TA] $(B)/ydb/core/tx/datashard/ut_erase_rows/test-results/unittest/{meta.json ... results_accumulator.log} >> TVersions::Wreck1 [GOOD] >> TVersions::Wreck1Reverse >> KqpScan::ScanRetryRead >> TModificationsValidatorTests::TestIndexAndModificationsShrink_ModifyItemsNarrowScope_TENANTS_AND_NODE_TYPES [GOOD] >> TNetClassifierUpdaterTest::TestGetUpdatesFromHttpServer >> TConsoleTests::TestCreateTenant >> TConsoleConfigTests::TestModifyConfigItem |80.3%| [TA] {RESULT} $(B)/ydb/core/tx/datashard/ut_erase_rows/test-results/unittest/{meta.json ... results_accumulator.log} >> TModificationsValidatorTests::TestIndexAndModificationsShrink_AddItems_NONE [GOOD] >> TModificationsValidatorTests::TestIndexAndModificationsShrink_AddItems_DOMAIN [GOOD] >> TModificationsValidatorTests::TestIndexAndModificationsShrink_AddItems_TENANTS [GOOD] >> TModificationsValidatorTests::TestIndexAndModificationsShrink_AddItems_TENANTS_AND_NODE_TYPES [GOOD] >> TModificationsValidatorTests::TestIndexAndModificationsShrink_ModifyItemsExpandScope_NONE [GOOD] >> TModificationsValidatorTests::TestIndexAndModificationsShrink_ModifyItemsExpandScope_DOMAIN [GOOD] >> TModificationsValidatorTests::TestIndexAndModificationsShrink_ModifyItemsExpandScope_TENANTS [GOOD] >> TModificationsValidatorTests::TestIndexAndModificationsShrink_ModifyItemsExpandScope_TENANTS_AND_NODE_TYPES [GOOD] >> TModificationsValidatorTests::TestIndexAndModificationsShrink_ModifyItemsNarrowScope_DOMAIN [GOOD] >> TModificationsValidatorTests::TestComputeAffectedConfigs_DomainAffected_DOMAIN [GOOD] >> TModificationsValidatorTests::TestComputeAffectedConfigs_DomainAffected_TENANTS [GOOD] >> TModificationsValidatorTests::TestComputeAffectedConfigs_DomainAffected_TENANTS_AND_NODE_TYPES [GOOD] >> TModificationsValidatorTests::TestComputeAffectedConfigs_DomainUnaffected_TENANTS [GOOD] >> TModificationsValidatorTests::TestComputeAffectedConfigs_DomainUnaffected_TENANTS_AND_NODE_TYPES [GOOD] >> TModificationsValidatorTests::TestComputeAffectedConfigs_All_DomainAffected_DOMAIN [GOOD] >> TModificationsValidatorTests::TestComputeAffectedConfigs_All_DomainAffected_TENANTS [GOOD] >> TModificationsValidatorTests::TestComputeAffectedConfigs_All_DomainAffected_TENANTS_AND_NODE_TYPES [GOOD] >> TModificationsValidatorTests::TestComputeAffectedConfigs_All_DomainUnaffected_TENANTS [GOOD] >> TModificationsValidatorTests::TestComputeAffectedConfigs_All_DomainUnaffected_TENANTS_AND_NODE_TYPES [GOOD] |80.3%| [TA] $(B)/ydb/core/tx/schemeshard/ut_export/test-results/unittest/{meta.json ... results_accumulator.log} >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-CreateUser-14 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-CreateUser-15 |80.3%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_export/test-results/unittest/{meta.json ... results_accumulator.log} |80.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/services/ydb/ut/ydb-services-ydb-ut |80.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/services/ydb/ut/ydb-services-ydb-ut |80.3%| [LD] {RESULT} $(B)/ydb/services/ydb/ut/ydb-services-ydb-ut |80.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/replication/service/ut_topic_reader/ydb-core-tx-replication-service-ut_topic_reader |80.4%| [LD] {RESULT} $(B)/ydb/core/tx/replication/service/ut_topic_reader/ydb-core-tx-replication-service-ut_topic_reader |80.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/replication/service/ut_topic_reader/ydb-core-tx-replication-service-ut_topic_reader |80.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/cms/console/ut/unittest >> TModificationsValidatorTests::TestComputeAffectedConfigs_All_DomainUnaffected_TENANTS_AND_NODE_TYPES [GOOD] >> Viewer::SelectStringWithBase64Encoding [GOOD] >> Viewer::QueryExecuteScript >> Viewer::JsonAutocompleteSimilarDatabaseNamePOST [GOOD] >> Viewer::JsonAutocompleteSimilarDatabaseNameLowerCase |80.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/tx_proxy/ut_ext_tenant/ydb-core-tx-tx_proxy-ut_ext_tenant >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-38 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-39 |80.4%| [LD] {RESULT} $(B)/ydb/core/tx/tx_proxy/ut_ext_tenant/ydb-core-tx-tx_proxy-ut_ext_tenant |80.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/tx_proxy/ut_ext_tenant/ydb-core-tx-tx_proxy-ut_ext_tenant >> TConsoleConfigTests::TestModifyConfigItem [GOOD] >> TConsoleConfigTests::TestRemoveConfigItem >> BuildStatsHistogram::Many_Mixed [GOOD] >> BuildStatsHistogram::Many_Serial >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-57 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-58 >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-CreateUser-19 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-CreateUser-20 >> TConsoleInMemoryConfigSubscriptionTests::TestSubscriptionCreate >> TConsoleConfigTests::TestRemoveConfigItem [GOOD] >> TConsoleConfigTests::TestRemoveConfigItems >> YdbIndexTable::MultiShardTableOneIndexDataColumn [GOOD] >> YdbIndexTable::MultiShardTableOneIndexIndexOverlap >> Viewer::JsonAutocompleteScheme [GOOD] >> Viewer::JsonAutocompleteEmptyColumns >> TConsoleTests::TestGetUnknownTenantStatus [GOOD] >> TConsoleTests::TestGetUnknownTenantStatusExtSubdomain >> DataShardWrite::ExecSQLUpsertImmediate+EvWrite [GOOD] >> DataShardWrite::ExecSQLUpsertImmediate-EvWrite >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-DropUser-56 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-DropUser-57 >> TConsoleInMemoryConfigSubscriptionTests::TestSubscriptionCreate [GOOD] >> TConsoleInMemoryConfigSubscriptionTests::TestSubscriptionClient >> TConsoleConfigTests::TestRemoveConfigItems [GOOD] >> TConsoleConfigTests::TestConfigureOrderConflicts >> KqpSysColV1::StreamInnerJoinSelectAsterisk [GOOD] >> DataShardVolatile::CompactedVolatileChangesCommit [GOOD] >> DataShardVolatile::CompactedVolatileChangesAbort >> TConsoleInMemoryConfigSubscriptionTests::TestSubscriptionClient [GOOD] >> TConsoleInMemoryConfigSubscriptionTests::TestSubscriptionClientManyUpdates >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-9 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-10 >> TConsoleTests::TestCreateTenant [GOOD] >> TConsoleTests::TestCreateTenantExtSubdomain >> YdbIndexTable::MultiShardTableOneUniqIndex [GOOD] >> YdbIndexTable::MultiShardTableOneUniqIndexDataColumn >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-CreateUser-15 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-CreateUser-16 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSysColV1::StreamInnerJoinSelectAsterisk [GOOD] Test command err: Trying to start YDB, gRPC: 4174, MsgBus: 10768 2025-06-25T14:35:17.352273Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519895535059552589:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:35:17.352361Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001acc/r3tmp/tmpHnkpdo/pdisk_1.dat 2025-06-25T14:35:17.808862Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:35:17.808947Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:35:17.822634Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:35:17.875046Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:35:17.875344Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519895535059552566:2080] 1750862117350669 != 1750862117350672 TServer::EnableGrpc on GrpcPort 4174, node 1 2025-06-25T14:35:17.980987Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:35:17.986249Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:35:17.986275Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:35:17.986482Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10768 2025-06-25T14:35:18.410320Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:10768 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:35:18.599642Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:35:18.615134Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:35:18.632673Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:35:18.813198Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:35:18.959904Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:35:19.046204Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:35:20.842097Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519895547944456094:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:35:20.842217Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:35:21.198575Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:21.244903Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:21.293068Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:21.345346Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:21.407886Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:21.448138Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:21.523345Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:21.640652Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519895552239424055:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:35:21.640723Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:35:21.641067Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519895552239424060:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:35:21.645213Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:35:21.703705Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519895552239424062:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:35:21.804841Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519895552239424113:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:35:22.353461Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519895535059552589:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:35:22.353550Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:35:23.822171Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750862123843, txId: 281474976710672] shutting down >> TConsoleConfigTests::TestConfigureOrderConflicts [GOOD] >> TConsoleConfigTests::TestGetItems >> Viewer::FloatPointJsonQuery [GOOD] >> Viewer::AuthorizeYdbTokenWithDatabaseAttributes >> RetryPolicy::RetryWithBatching [GOOD] >> TConsoleTests::TestGetUnknownTenantStatusExtSubdomain [GOOD] >> TConsoleTests::TestRestartConsoleAndPools >> TSubDomainTest::DatashardNotRunAtAllWhenSubDomainNodesIsStopped [GOOD] >> TNetClassifierUpdaterTest::TestGetUpdatesFromHttpServer [GOOD] >> TNetClassifierUpdaterTest::TestFiltrationByNetboxCustomFieldsAndTags >> DataShardVolatile::NotCachingAbortingDeletes-UseSink [GOOD] >> DataShardVolatile::GracefulShardRestartNoEarlyReadSetAck >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-39 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-40 >> TConsoleConfigTests::TestGetItems [GOOD] >> TConsoleConfigTests::TestGetNodeItems >> TJaegerTracingConfiguratorTests::DefaultConfig >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-58 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-59 >> DataShardWrite::ExecSQLUpsertImmediate-EvWrite [GOOD] >> DataShardWrite::DeleteImmediate >> TConsoleConfigTests::TestGetNodeItems [GOOD] >> TConsoleConfigTests::TestGetNodeConfig ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_base_tenant/unittest >> TSubDomainTest::DatashardNotRunAtAllWhenSubDomainNodesIsStopped [GOOD] Test command err: 2025-06-25T14:32:48.536491Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519894893417136944:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:32:48.536593Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:32:48.507625Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519894894082113137:2148];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:32:48.507824Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:32:48.719058Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519894896663543025:2158];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:32:48.747689Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:32:49.558463Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0016be/r3tmp/tmpfbxLok/pdisk_1.dat 2025-06-25T14:32:49.828627Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:32:49.857172Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:32:49.888530Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:32:49.921250Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:32:50.032939Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:32:50.818110Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:32:50.836680Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:32:50.836788Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:32:50.840010Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:32:50.840067Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:32:50.863577Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:32:50.863688Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:32:50.880301Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:32:50.889148Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:32:50.923440Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T14:32:50.924992Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 3 Cookie 3 2025-06-25T14:32:50.925967Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:32:50.956774Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:26216 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-25T14:32:51.732336Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7519894894082113235:2143] Handle TEvNavigate describe path dc-1 2025-06-25T14:32:51.856113Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7519894906967015606:2459] HANDLE EvNavigateScheme dc-1 2025-06-25T14:32:51.856320Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7519894898377080555:2156], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:32:51.856398Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:887: [main][1:7519894898377080633:2189][/dc-1] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvSyncRequest: sender# [1:7519894898377080555:2156], cookie# 1 2025-06-25T14:32:51.865705Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519894898377080648:2189][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519894898377080645:2189], cookie# 1 2025-06-25T14:32:51.865773Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519894898377080649:2189][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519894898377080646:2189], cookie# 1 2025-06-25T14:32:51.865791Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519894898377080650:2189][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519894898377080647:2189], cookie# 1 2025-06-25T14:32:51.865824Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519894889787145619:2053] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519894898377080648:2189], cookie# 1 2025-06-25T14:32:51.865849Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519894889787145622:2056] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519894898377080649:2189], cookie# 1 2025-06-25T14:32:51.865864Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519894889787145625:2059] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519894898377080650:2189], cookie# 1 2025-06-25T14:32:51.865902Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519894898377080648:2189][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519894889787145619:2053], cookie# 1 2025-06-25T14:32:51.865919Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519894898377080649:2189][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519894889787145622:2056], cookie# 1 2025-06-25T14:32:51.865930Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519894898377080650:2189][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519894889787145625:2059], cookie# 1 2025-06-25T14:32:51.865961Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519894898377080633:2189][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519894898377080645:2189], cookie# 1 2025-06-25T14:32:51.866002Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519894898377080633:2189][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2025-06-25T14:32:51.866020Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519894898377080633:2189][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519894898377080646:2189], cookie# 1 2025-06-25T14:32:51.866030Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519894898377080633:2189][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 2, failures# 0 2025-06-25T14:32:51.866051Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519894898377080633:2189][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519894898377080647:2189], cookie# 1 2025-06-25T14:32:51.866077Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:984: [main][1:7519894898377080633:2189][/dc-1] Sync is done in the ring group: cookie# 1, ring group# 0, size# 3, half# 1, successes# 3, failures# 0, partial# 0 2025-06-25T14:32:51.866151Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [1:7519894898377080555:2156], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1 PathId: Partial: 0 } 2025-06-25T14:32:51.892632Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7519894900958510424:2110], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:32:51.892712Z node 3 :TX_PROXY_SCHEME_CACHE TRACE: cache.cpp:2321: Create subscriber: self# [3:7519894900958510424:2110], path# /dc-1/.metadata/initialization/migrations, domainOwnerId# 72057594046644480 2025-06-25T14:32:51.892902Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1003: [main][3:7519894909548445048:2117][/dc-1/.metadata/initialization/migrations] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-25T14:32:51.904548Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][3:7519894909548445052:2117][/dc-1/.metadata/initialization/migrations] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/initialization/migrations Version: 0 }: sender# [1:7519894889787145619:2053] 2025-06-25T14:32:51.904603Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][3:7519894909548445053:2117][/dc-1/.metadata/initialization/migrations] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/initialization/migrations Version: 0 }: sender# [1:7519894889787145622:2056] 2025-06-25T14:32:51.904662Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][3:7519894909548445048:2117][/dc-1/.metadata/initialization/migrations] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/initialization/migrations Version: 0 }: sender# [3:7519894909548445049:2117] 2025-06-25T14:32:51.904728Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][3:7519894909548445048:2117][/dc-1/.metadata/initialization/migrations] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/initialization/migrations Version: 0 }: sender# [3:7519894909548445050:2117] 2025-06-25T14:32:51.904776Z node 3 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:852: [main][3:7519894909548445048:2117][/dc-1/.metadata/initialization/migrations] Set up state: owner# [3:7519894900958510424:2110], state# { Deleted: 1 Strong: 1 Versi ... ath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:35:24.948804Z node 7 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [7:7519895565916017231:3557], recipient# [7:7519895565916017230:2566], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:35:25.396525Z node 7 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [7:7519894964620592949:2131], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:35:25.396679Z node 7 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [7:7519894964620592949:2131], cacheItem# { Subscriber: { Subscriber: [7:7519894968915560855:2554] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:35:25.396772Z node 7 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [7:7519895570210984535:3564], recipient# [7:7519895570210984534:2567], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:35:25.796752Z node 8 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [8:7519894973047491401:2109], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:35:25.800696Z node 8 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [8:7519894973047491401:2109], cacheItem# { Subscriber: { Subscriber: [8:7519894977342459086:2347] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:35:25.800859Z node 8 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [8:7519895570047946803:2759], recipient# [8:7519895570047946802:2701], result# { ErrorCount: 1 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:35:25.860522Z node 8 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [8:7519894973047491401:2109], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:35:25.860702Z node 8 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [8:7519894973047491401:2109], cacheItem# { Subscriber: { Subscriber: [8:7519894977342459086:2347] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:35:25.860782Z node 8 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [8:7519894973047491401:2109], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:35:25.860860Z node 8 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [8:7519894973047491401:2109], cacheItem# { Subscriber: { Subscriber: [8:7519894990227361011:2355] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_0/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:35:25.860945Z node 8 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [8:7519895570047946806:2760], recipient# [8:7519895570047946804:2702], result# { ErrorCount: 1 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:35:25.861014Z node 8 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [8:7519895570047946807:2761], recipient# [8:7519895570047946805:2703], result# { ErrorCount: 1 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:35:25.956685Z node 7 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [7:7519894964620592949:2131], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:35:25.956841Z node 7 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [7:7519894964620592949:2131], cacheItem# { Subscriber: { Subscriber: [7:7519894986095430327:2758] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:35:25.956947Z node 7 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [7:7519895570210984547:3569], recipient# [7:7519895570210984546:2568], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:35:26.404967Z node 7 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [7:7519894964620592949:2131], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:35:26.405129Z node 7 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [7:7519894964620592949:2131], cacheItem# { Subscriber: { Subscriber: [7:7519894968915560855:2554] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:35:26.405268Z node 7 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [7:7519895574505951845:3570], recipient# [7:7519895574505951844:2569], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } >> TConsoleInMemoryConfigSubscriptionTests::TestSubscriptionClientManyUpdates [GOOD] >> TConsoleInMemoryConfigSubscriptionTests::TestSubscriptionClientManyUpdatesAddRemove >> TJaegerTracingConfiguratorTests::DefaultConfig [GOOD] >> TJaegerTracingConfiguratorTests::GlobalRules >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-CreateUser-20 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-CreateUser-21 >> Viewer::ServerlessNodesPage [GOOD] >> Viewer::ServerlessWithExclusiveNodes >> TConsoleConfigTests::TestGetNodeConfig [GOOD] >> TConsoleConfigTests::TestAutoOrder >> TConsoleTests::TestCreateTenantExtSubdomain [GOOD] >> TConsoleTests::TestCreateSharedTenant >> TJaegerTracingConfiguratorTests::GlobalRules [GOOD] >> TJaegerTracingConfiguratorTests::ExternalTracePlusSampling >> Viewer::JsonAutocompleteSimilarDatabaseNameLowerCase [GOOD] >> Viewer::JsonAutocompleteSchemePOST >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-DropUser-57 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-DropUser-58 >> TConfigsCacheTests::TestNoNotificationIfConfigIsCached >> YdbIndexTable::MultiShardTableOneIndexPkOverlap [GOOD] >> KqpScan::ScanRetryRead [GOOD] >> KqpScan::ScanRetryReadRanges >> TConsoleConfigTests::TestAutoOrder [GOOD] >> TConsoleConfigTests::TestAutoSplit >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-10 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-11 >> TJaegerTracingConfiguratorTests::ExternalTracePlusSampling [GOOD] >> TJaegerTracingConfiguratorTests::RequestTypeThrottler >> KqpScan::ScanDuringSplit10 [GOOD] >> KqpScan::ScanDuringSplitThenMerge >> TConsoleTests::TestRestartConsoleAndPools [GOOD] >> TConsoleTests::TestRemoveTenantWithBorrowedStorageUnits >> TConfigsCacheTests::TestNoNotificationIfConfigIsCached [GOOD] >> TConfigsCacheTests::TestFullConfigurationRestore >> TConsoleInMemoryConfigSubscriptionTests::TestSubscriptionClientManyUpdatesAddRemove [GOOD] >> TConsoleInMemoryConfigSubscriptionTests::TestSubscriptionClientDeadCausesSubscriptionDeregistration >> TJaegerTracingConfiguratorTests::RequestTypeThrottler [GOOD] >> TJaegerTracingConfiguratorTests::RequestTypeSampler >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-CreateUser-16 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-CreateUser-17 >> TConsoleConfigTests::TestAutoSplit [GOOD] >> TConsoleConfigTests::TestValidation ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/idx_test/unittest >> YdbIndexTable::MultiShardTableOneIndexPkOverlap [GOOD] Test command err: Trying to start YDB, gRPC: 18535, MsgBus: 3589 2025-06-25T14:33:37.179084Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519895103115151186:2185];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:33:37.179402Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001644/r3tmp/tmpIASO0s/pdisk_1.dat 2025-06-25T14:33:37.805640Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:33:37.824069Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:33:37.824159Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:33:37.846209Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 18535, node 1 2025-06-25T14:33:38.116839Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:33:38.116868Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:33:38.116879Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:33:38.117002Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:33:38.216455Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:3589 TClient is connected to server localhost:3589 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:33:39.236110Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:33:39.250263Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:33:39.260872Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:33:39.519447Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:33:39.820874Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:33:39.945196Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:33:42.178561Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519895103115151186:2185];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:33:42.178640Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:33:42.653541Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519895124589989172:2370], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:33:42.653639Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:33:43.206489Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:33:43.242514Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:33:43.277802Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:33:43.318021Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:33:43.359235Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:33:43.408508Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:33:43.487936Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:33:43.623169Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519895128884957138:2436], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:33:43.623248Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:33:43.623474Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519895128884957143:2439], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:33:43.627490Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:33:43.647420Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519895128884957145:2440], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:33:43.705009Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519895128884957196:3434] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:33:45.305559Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:33:46.305728Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710673. Ctx: { TraceId: 01jykr4 ... WJiY2UtNDM3NDU0ZmMtZGFhODVmNjQtY2YxYzIyMDU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:35:26.753849Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714528. Ctx: { TraceId: 01jykr7qm46zqsg2z3tdspp06s, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YTBkZDgxNzctNjgyZDBlOGEtYjEyMWYzNWItZDUzZmQwOWM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:35:26.763438Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714529. Ctx: { TraceId: 01jykr7qmh96smkwzg70annnk2, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZDdkZWExYy1jZmZjNjFjZC03MjQ3MjdjYy01ZTYwMWVmMw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:35:26.794688Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714530. Ctx: { TraceId: 01jykr7qp6c3s57v9ar34cn92q, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MWU0MmQyZTEtYWJlMzVhYTktODQ0Mjg3ZGYtNzI0ZGZjYTg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:35:26.799635Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714531. Ctx: { TraceId: 01jykr7qp97767ex6gck0pfq3y, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YTBkZDgxNzctNjgyZDBlOGEtYjEyMWYzNWItZDUzZmQwOWM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:35:26.807174Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714532. Ctx: { TraceId: 01jykr7qpdft94mj0jewth50wn, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=Yjk1OWJiNS1hZTU4ZGQxOS0zOWRlNGM1Ny0zYzVhNTEyNQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:35:26.809221Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714533. Ctx: { TraceId: 01jykr7qp9cbgwptjd7cqnxd07, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZDU2NmJkYmYtZjY0N2QzNzMtZTRhNTUxNWUtOTBiODBiMw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:35:26.810402Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714534. Ctx: { TraceId: 01jykr7qp95kmat904vcgr97x9, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MWY5MWJiY2UtNDM3NDU0ZmMtZGFhODVmNjQtY2YxYzIyMDU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:35:26.819875Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714535. Ctx: { TraceId: 01jykr7qp9cbgwptjd7cqnxd07, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZDU2NmJkYmYtZjY0N2QzNzMtZTRhNTUxNWUtOTBiODBiMw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:35:26.821062Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714536. Ctx: { TraceId: 01jykr7qp95kmat904vcgr97x9, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MWY5MWJiY2UtNDM3NDU0ZmMtZGFhODVmNjQtY2YxYzIyMDU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:35:26.839134Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714537. Ctx: { TraceId: 01jykr7qqdfv5p34yq74e2j4r4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZDdkZWExYy1jZmZjNjFjZC03MjQ3MjdjYy01ZTYwMWVmMw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:35:26.839926Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714538. Ctx: { TraceId: 01jykr7qqd72nbs5788tn0v18z, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NTFiOWU5ODgtOThhZjI2YzctMTk0YjkxY2UtYWVjZjk3NmU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:35:26.849102Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714539. Ctx: { TraceId: 01jykr7qqd72nbs5788tn0v18z, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NTFiOWU5ODgtOThhZjI2YzctMTk0YjkxY2UtYWVjZjk3NmU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:35:26.851870Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714540. Ctx: { TraceId: 01jykr7qqdfv5p34yq74e2j4r4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZDdkZWExYy1jZmZjNjFjZC03MjQ3MjdjYy01ZTYwMWVmMw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:35:26.864443Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714541. Ctx: { TraceId: 01jykr7qr0649v72zs36ayqgwc, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MWU0MmQyZTEtYWJlMzVhYTktODQ0Mjg3ZGYtNzI0ZGZjYTg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:35:26.872356Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714542. Ctx: { TraceId: 01jykr7qrha7wtqgbq4y4w9m22, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=Yjk1OWJiNS1hZTU4ZGQxOS0zOWRlNGM1Ny0zYzVhNTEyNQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:35:26.881168Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714543. Ctx: { TraceId: 01jykr7qrmd2w6t6w07v5ayk3g, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YTBkZDgxNzctNjgyZDBlOGEtYjEyMWYzNWItZDUzZmQwOWM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:35:26.885787Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714544. Ctx: { TraceId: 01jykr7qrmd2w6t6w07v5ayk3g, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YTBkZDgxNzctNjgyZDBlOGEtYjEyMWYzNWItZDUzZmQwOWM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:35:26.888893Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714545. Ctx: { TraceId: 01jykr7qrmd2w6t6w07v5ayk3g, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YTBkZDgxNzctNjgyZDBlOGEtYjEyMWYzNWItZDUzZmQwOWM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:35:26.891423Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714546. Ctx: { TraceId: 01jykr7qs66hw0adk43j7mz3ds, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NTFiOWU5ODgtOThhZjI2YzctMTk0YjkxY2UtYWVjZjk3NmU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:35:26.893060Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714547. Ctx: { TraceId: 01jykr7qrx568ep8tq36qkfzcj, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MWY5MWJiY2UtNDM3NDU0ZmMtZGFhODVmNjQtY2YxYzIyMDU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:35:26.898131Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714548. Ctx: { TraceId: 01jykr7qrx568ep8tq36qkfzcj, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MWY5MWJiY2UtNDM3NDU0ZmMtZGFhODVmNjQtY2YxYzIyMDU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:35:26.901087Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714549. Ctx: { TraceId: 01jykr7qsg5tz9fswkecyrc0k6, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZDdkZWExYy1jZmZjNjFjZC03MjQ3MjdjYy01ZTYwMWVmMw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:35:26.901681Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714550. Ctx: { TraceId: 01jykr7qsh853hexc0a4ckbx3d, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MWU0MmQyZTEtYWJlMzVhYTktODQ0Mjg3ZGYtNzI0ZGZjYTg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:35:26.937650Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714551. Ctx: { TraceId: 01jykr7qte04t57dkpqxps5e42, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZDU2NmJkYmYtZjY0N2QzNzMtZTRhNTUxNWUtOTBiODBiMw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:35:26.938908Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714552. Ctx: { TraceId: 01jykr7qth9da6gyvjvad0ks4w, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YTBkZDgxNzctNjgyZDBlOGEtYjEyMWYzNWItZDUzZmQwOWM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:35:26.960897Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714554. Ctx: { TraceId: 01jykr7qth45jwm8kd6hamypsm, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MmQzMWZlOGItNTg0MGJiYjQtOTViODQ5ZDItYzI4MjA2MzM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:35:26.964502Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714553. Ctx: { TraceId: 01jykr7qtr983ky0f5gntrntqw, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NTFiOWU5ODgtOThhZjI2YzctMTk0YjkxY2UtYWVjZjk3NmU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:35:26.975676Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714555. Ctx: { TraceId: 01jykr7qvmb5dftq6qa6efsr48, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MWY5MWJiY2UtNDM3NDU0ZmMtZGFhODVmNjQtY2YxYzIyMDU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:35:26.984360Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714556. Ctx: { TraceId: 01jykr7qth45jwm8kd6hamypsm, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MmQzMWZlOGItNTg0MGJiYjQtOTViODQ5ZDItYzI4MjA2MzM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root finished with status: SUCCESS 2025-06-25T14:35:26.990797Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714557. Ctx: { TraceId: 01jykr7qvm090be9xrt25a8kms, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=Yjk1OWJiNS1hZTU4ZGQxOS0zOWRlNGM1Ny0zYzVhNTEyNQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root finished with status: SUCCESS finished with status: SUCCESS 2025-06-25T14:35:26.995870Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714558. Ctx: { TraceId: 01jykr7qvx37zhe74vfeqqek9h, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZDU2NmJkYmYtZjY0N2QzNzMtZTRhNTUxNWUtOTBiODBiMw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:35:26.996177Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714559. Ctx: { TraceId: 01jykr7qvm090be9xrt25a8kms, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=Yjk1OWJiNS1hZTU4ZGQxOS0zOWRlNGM1Ny0zYzVhNTEyNQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:35:27.006026Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714560. Ctx: { TraceId: 01jykr7qvx37zhe74vfeqqek9h, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZDU2NmJkYmYtZjY0N2QzNzMtZTRhNTUxNWUtOTBiODBiMw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root finished with status: SUCCESS finished with status: SUCCESS >> Viewer::AuthorizeYdbTokenWithDatabaseAttributes [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-40 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-41 >> TConfigsCacheTests::TestFullConfigurationRestore [GOOD] >> TConfigsCacheTests::TestConfigurationSaveOnNotification >> TConsoleInMemoryConfigSubscriptionTests::TestSubscriptionClientDeadCausesSubscriptionDeregistration [GOOD] >> TConsoleInMemoryConfigSubscriptionTests::TestSubscriptionClientReconnectsOnConnectionLoose >> DataShardWrite::DeleteImmediate [GOOD] >> DataShardWrite::CancelImmediate >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-59 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-60 >> TJaegerTracingConfiguratorTests::RequestTypeSampler [GOOD] >> TJaegerTracingConfiguratorTests::SamplingSameScope >> TConsoleConfigTests::TestValidation [GOOD] >> TConsoleConfigTests::TestCheckConfigUpdates >> TConfigsCacheTests::TestConfigurationSaveOnNotification [GOOD] >> TConfigsCacheTests::TestOverwrittenConfigurationDoesntCauseNotification >> KqpScan::RemoteShardScan [GOOD] >> KqpScan::ScanDuringSplit >> TConsoleInMemoryConfigSubscriptionTests::TestSubscriptionClientReconnectsOnConnectionLoose [GOOD] >> TConsoleInMemoryConfigSubscriptionTests::TestSubscribeAfterConfigApplyWithKnownConfig >> TConsoleTests::TestRestartConsoleAndPoolsExtSubdomain >> TJaegerTracingConfiguratorTests::SamplingSameScope [GOOD] >> TJaegerTracingConfiguratorTests::ThrottlingByDb >> TConsoleTests::TestCreateSharedTenant [GOOD] >> TConsoleTests::TestCreateServerlessTenant ------- [TM] {asan, default-linux-x86_64, release} ydb/core/viewer/ut/unittest >> Viewer::AuthorizeYdbTokenWithDatabaseAttributes [GOOD] Test command err: 2025-06-25T14:35:08.515462Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:654:2389], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:35:08.516049Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:35:08.516372Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:35:08.516510Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:651:2332], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:35:08.516781Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:35:08.517104Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-25T14:35:08.898856Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:35:09.029109Z node 1 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:106} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2025-06-25T14:35:09.043485Z node 1 :BS_PDISK WARN: {BSP01@blobstorage_pdisk_actor.cpp:436} Magic sector is present on disk, now going to format device PDiskId# 1000 2025-06-25T14:35:09.464060Z node 1 :BS_PDISK WARN: {BSP01@blobstorage_pdisk_actor.cpp:374} Device formatting done PDiskId# 1000 TServer::EnableGrpc on GrpcPort 61620, node 1 TClient is connected to server localhost:22625 2025-06-25T14:35:09.691808Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:35:09.691853Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:35:09.691881Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:35:09.692051Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:35:13.020225Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519895515562109194:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:35:13.020336Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-25T14:35:13.195170Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519895515562109170:2080] 1750862113019210 != 1750862113019213 2025-06-25T14:35:13.198277Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:35:13.202189Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:35:13.202356Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:35:13.206373Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 10085, node 3 2025-06-25T14:35:13.264270Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:35:13.264305Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:35:13.264334Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:35:13.264493Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3256 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:35:13.632970Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:35:13.648329Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:35:13.661358Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) waiting... 2025-06-25T14:35:13.664819Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) 2025-06-25T14:35:13.668943Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715660, at schemeshard: 72057594046644480 2025-06-25T14:35:14.031207Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:35:15.450364Z node 3 :TICKET_PARSER INFO: viewer_ut.cpp:437: Ticket parser: got TEvAuthorizeTicket event: test_ydb_token /Root 1 2025-06-25T14:35:15.450441Z node 3 :TICKET_PARSER INFO: viewer_ut.cpp:496: Send TEvAuthorizeTicketResult success 2025-06-25T14:35:16.146109Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519895528447011739:2305], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:35:16.146111Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519895528447011731:2302], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:35:16.146209Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:35:16.149889Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715661:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:35:16.159250Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519895528447011745:2306], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715661 completed, doublechecking } 2025-06-25T14:35:16.242649Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519895528447011796:2353] txid# 281474976715662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:35:16.622229Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=3&id=MzhlZDQxODYtMzg4NmNmMDAtODdjN2UyMTQtYzU1NjBlYmI=, ActorId: [3:7519895528447011729:2301], ActorState: ExecuteState, TraceId: 01jykr7d9h8cejcf3f8yrfsaq3, Create QueryResponse for error on request, msg: Scheme operations cannot be executed inside transaction test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-25T14:35:19.765983Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:35:19.843810Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:35:19.846227Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7519895542551621977:2080] 1750862119619273 != 1750862119619276 2025-06-25T14:35:19.858496Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:35:19.858577Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:35:19.859640Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 19842, node 4 2025-06-25T14:35:19.924897Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:35:19.924925Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:35:19.924933Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:35:19.925053Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23581 2025-06-25T14:35:20.370366 ... 4d80] received request Name# ListDatabases ok# false data# peer# 2025-06-25T14:35:31.706227Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0004d4680] received request Name# RemoveDatabase ok# false data# peer# 2025-06-25T14:35:31.706387Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0004d3f80] received request Name# DescribeDatabaseOptions ok# false data# peer# 2025-06-25T14:35:31.706473Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0004d3880] received request Name# GetScaleRecommendation ok# false data# peer# 2025-06-25T14:35:31.706626Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0004d2380] received request Name# ListEndpoints ok# false data# peer# 2025-06-25T14:35:31.706722Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0004d3180] received request Name# WhoAmI ok# false data# peer# 2025-06-25T14:35:31.706874Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0004d2a80] received request Name# NodeRegistration ok# false data# peer# 2025-06-25T14:35:31.706970Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0004d1c80] received request Name# Scan ok# false data# peer# 2025-06-25T14:35:31.707101Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0004d1580] received request Name# GetShardLocations ok# false data# peer# 2025-06-25T14:35:31.707205Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0004d0e80] received request Name# DescribeTable ok# false data# peer# 2025-06-25T14:35:31.707343Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0004d0780] received request Name# CreateSnapshot ok# false data# peer# 2025-06-25T14:35:31.707483Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0004d0080] received request Name# RefreshSnapshot ok# false data# peer# 2025-06-25T14:35:31.707607Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0004cf980] received request Name# DiscardSnapshot ok# false data# peer# 2025-06-25T14:35:31.707813Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0004cf280] received request Name# List ok# false data# peer# 2025-06-25T14:35:31.707851Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0004ceb80] received request Name# RateLimiter/CreateResource ok# false data# peer# 2025-06-25T14:35:31.708097Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0004ce480] received request Name# RateLimiter/AlterResource ok# false data# peer# 2025-06-25T14:35:31.708113Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0004cdd80] received request Name# RateLimiter/DropResource ok# false data# peer# 2025-06-25T14:35:31.708352Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0004c1280] received request Name# RateLimiter/ListResources ok# false data# peer# 2025-06-25T14:35:31.708579Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0004c0b80] received request Name# RateLimiter/DescribeResource ok# false data# peer# 2025-06-25T14:35:31.708838Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000348780] received request Name# RateLimiter/AcquireResource ok# false data# peer# 2025-06-25T14:35:31.709118Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0004bfd80] received request Name# CreateStream ok# false data# peer# 2025-06-25T14:35:31.709358Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0004be880] received request Name# ListStreams ok# false data# peer# 2025-06-25T14:35:31.709589Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0004be180] received request Name# DeleteStream ok# false data# peer# 2025-06-25T14:35:31.709622Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0004c0480] received request Name# DescribeStream ok# false data# peer# 2025-06-25T14:35:31.709901Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000540780] received request Name# SetWriteQuota ok# false data# peer# 2025-06-25T14:35:31.709912Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0004bda80] received request Name# ListShards ok# false data# peer# 2025-06-25T14:35:31.710154Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b00053cf80] received request Name# UpdateStream ok# false data# peer# 2025-06-25T14:35:31.710160Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0004bd380] received request Name# PutRecord ok# false data# peer# 2025-06-25T14:35:31.710392Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0004bcc80] received request Name# PutRecords ok# false data# peer# 2025-06-25T14:35:31.710408Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0004bc580] received request Name# GetRecords ok# false data# peer# 2025-06-25T14:35:31.710649Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0004bbe80] received request Name# GetShardIterator ok# false data# peer# 2025-06-25T14:35:31.710716Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0004b6380] received request Name# SubscribeToShard ok# false data# peer# 2025-06-25T14:35:31.710910Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0004e1880] received request Name# DescribeLimits ok# false data# peer# 2025-06-25T14:35:31.710976Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0004b7f80] received request Name# DescribeStreamSummary ok# false data# peer# 2025-06-25T14:35:31.711138Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000528d80] received request Name# DecreaseStreamRetentionPeriod ok# false data# peer# 2025-06-25T14:35:31.711239Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000527f80] received request Name# IncreaseStreamRetentionPeriod ok# false data# peer# 2025-06-25T14:35:31.711376Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000527180] received request Name# UpdateShardCount ok# false data# peer# 2025-06-25T14:35:31.711492Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000526380] received request Name# UpdateStreamMode ok# false data# peer# 2025-06-25T14:35:31.711632Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000525580] received request Name# RegisterStreamConsumer ok# false data# peer# 2025-06-25T14:35:31.711745Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000524e80] received request Name# DeregisterStreamConsumer ok# false data# peer# 2025-06-25T14:35:31.711869Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000523980] received request Name# DescribeStreamConsumer ok# false data# peer# 2025-06-25T14:35:31.711957Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000521d80] received request Name# ListStreamConsumers ok# false data# peer# 2025-06-25T14:35:31.712131Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000520f80] received request Name# AddTagsToStream ok# false data# peer# 2025-06-25T14:35:31.712197Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000520880] received request Name# DisableEnhancedMonitoring ok# false data# peer# 2025-06-25T14:35:31.712421Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000535f80] received request Name# EnableEnhancedMonitoring ok# false data# peer# 2025-06-25T14:35:31.712462Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b00051fa80] received request Name# ListTagsForStream ok# false data# peer# 2025-06-25T14:35:31.712669Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b00053ac80] received request Name# MergeShards ok# false data# peer# 2025-06-25T14:35:31.712714Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000539e80] received request Name# RemoveTagsFromStream ok# false data# peer# 2025-06-25T14:35:31.712902Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000539080] received request Name# SplitShard ok# false data# peer# 2025-06-25T14:35:31.712943Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000522480] received request Name# StartStreamEncryption ok# false data# peer# 2025-06-25T14:35:31.713149Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b00053dd80] received request Name# StopStreamEncryption ok# false data# peer# 2025-06-25T14:35:31.713190Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000541c80] received request Name# SelfCheck ok# false data# peer# 2025-06-25T14:35:31.713390Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0004e8f80] received request Name# NodeCheck ok# false data# peer# 2025-06-25T14:35:31.713432Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000526a80] received request Name# CreateSession ok# false data# peer# 2025-06-25T14:35:31.713640Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000525c80] received request Name# DeleteSession ok# false data# peer# 2025-06-25T14:35:31.713669Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b00051ec80] received request Name# AttachSession ok# false data# peer# 2025-06-25T14:35:31.713895Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000524780] received request Name# BeginTransaction ok# false data# peer# 2025-06-25T14:35:31.713954Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000521680] received request Name# CommitTransaction ok# false data# peer# 2025-06-25T14:35:31.714136Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000520180] received request Name# RollbackTransaction ok# false data# peer# 2025-06-25T14:35:31.714250Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0004e7380] received request Name# ExecuteQuery ok# false data# peer# 2025-06-25T14:35:31.714394Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000528680] received request Name# ExecuteScript ok# false data# peer# 2025-06-25T14:35:31.714495Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000527880] received request Name# FetchScriptResults ok# false data# peer# 2025-06-25T14:35:31.714638Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000535880] received request Name# ExecuteTabletMiniKQL ok# false data# peer# 2025-06-25T14:35:31.714716Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000523280] received request Name# ChangeTabletSchema ok# false data# peer# 2025-06-25T14:35:31.714889Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000535180] received request Name# RestartTablet ok# false data# peer# 2025-06-25T14:35:31.714961Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b00051f380] received request Name# CreateLogStore ok# false data# peer# 2025-06-25T14:35:31.715138Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b00053b380] received request Name# DescribeLogStore ok# false data# peer# 2025-06-25T14:35:31.715225Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000538280] received request Name# DropLogStore ok# false data# peer# 2025-06-25T14:35:31.715390Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b00053c180] received request Name# AlterLogStore ok# false data# peer# 2025-06-25T14:35:31.715478Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b00053a580] received request Name# CreateLogTable ok# false data# peer# 2025-06-25T14:35:31.715678Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000522b80] received request Name# DescribeLogTable ok# false data# peer# 2025-06-25T14:35:31.715746Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b00053c880] received request Name# DropLogTable ok# false data# peer# 2025-06-25T14:35:31.715929Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b00053e480] received request Name# AlterLogTable ok# false data# peer# 2025-06-25T14:35:31.715990Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000540e80] received request Name# Login ok# false data# peer# 2025-06-25T14:35:31.716222Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000542380] received request Name# DescribeReplication ok# false data# peer# 2025-06-25T14:35:31.716265Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000543180] received request Name# DescribeView ok# false data# peer# ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/persqueue_public/ut/with_offset_ranges_mode_ut/unittest >> RetryPolicy::RetryWithBatching [GOOD] Test command err: 2025-06-25T14:29:19.789356Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:19.789386Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:19.789405Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-25T14:29:19.789902Z :ERROR: [db] [sessionid] [cluster] Got error. Status: INTERNAL_ERROR. Description: 2025-06-25T14:29:19.789947Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:19.789965Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:19.790958Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.005094s 2025-06-25T14:29:19.792005Z :ERROR: [db] [sessionid] [cluster] Got error. Status: INTERNAL_ERROR. Description: 2025-06-25T14:29:19.792027Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:19.792039Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:19.792070Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.005251s 2025-06-25T14:29:19.792414Z :ERROR: [db] [sessionid] [cluster] Got error. Status: INTERNAL_ERROR. Description: 2025-06-25T14:29:19.792435Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:19.792449Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:29:19.792494Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.006662s 2025-06-25T14:29:19.824945Z :TWriteSession_TestPolicy INFO: Random seed for debugging is 1750861759824887 2025-06-25T14:29:20.309700Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519893999601486238:2239];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:29:20.309765Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:29:20.403211Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519894000721998935:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:29:20.404219Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001872/r3tmp/tmpuVNxJB/pdisk_1.dat 2025-06-25T14:29:20.709270Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-25T14:29:20.757807Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-25T14:29:21.309134Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:29:21.351860Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:29:21.419875Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:29:21.435978Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:29:21.436067Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:29:21.433002Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:29:21.436341Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:29:21.440680Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:29:21.440772Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:29:21.453542Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T14:29:21.454638Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:29:21.455469Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 12719, node 1 2025-06-25T14:29:21.596865Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/yft8/001872/r3tmp/yandexDJTAqX.tmp 2025-06-25T14:29:21.596891Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/yft8/001872/r3tmp/yandexDJTAqX.tmp 2025-06-25T14:29:21.597027Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/yft8/001872/r3tmp/yandexDJTAqX.tmp 2025-06-25T14:29:21.597172Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:29:21.706490Z INFO: TTestServer started on Port 61192 GrpcPort 12719 TClient is connected to server localhost:61192 PQClient connected to localhost:12719 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:29:22.161800Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... waiting... waiting... 2025-06-25T14:29:22.257657Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715660, at schemeshard: 72057594046644480 2025-06-25T14:29:24.745094Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519894017901868425:2274], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:24.745229Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519894017901868417:2271], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:24.745394Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:29:24.752949Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976720657:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:29:24.776383Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519894017901868431:2275], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976720657 completed, doublechecking } 2025-06-25T14:29:24.913673Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519894017901868459:2133] txid# 281474976720658, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:29:25.266434Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7519894017901868474:2279], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:29:25.267093Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519894016781356307:2305], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:29:25.267431Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=NGZjYWQxZGUtYmI0YjMyODktNzg4MDlhYmItZDM5NWJjZTA=, ActorId: [1:7519894016781356268:2297], ActorState: ExecuteState, TraceId: 01jykqwpa09w0c0tne66br23h1, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:29:25.268501Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=2&id=OWQ4NzdjYmMtZWM0Y2MyZjctZDI2ZDZkNWMtMmM5MWQ1MmE=, ActorId: [2:7519894017901868415:2270], ActorState: ExecuteState, TraceId: 01jykqwp44767p822z9jpffmjr, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:29:25.269760Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check ... 6224037892, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-25T14:35:24.374967Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Answering for message sourceid: '\0test-message-group-id', Topic: 'rt3.dc1--test-topic', Partition: 0, SeqNo: 2, partNo: 0, Offset: 1 is stored on disk 2025-06-25T14:35:24.374988Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-25T14:35:24.375021Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Answering for message sourceid: '\0test-message-group-id', Topic: 'rt3.dc1--test-topic', Partition: 0, SeqNo: 3, partNo: 0, Offset: 2 is stored on disk 2025-06-25T14:35:24.375045Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-25T14:35:24.375074Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Answering for message sourceid: '\0test-message-group-id', Topic: 'rt3.dc1--test-topic', Partition: 0, SeqNo: 4, partNo: 0, Offset: 3 is stored on disk 2025-06-25T14:35:24.375097Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-25T14:35:24.375126Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Answering for message sourceid: '\0test-message-group-id', Topic: 'rt3.dc1--test-topic', Partition: 0, SeqNo: 5, partNo: 0, Offset: 4 is stored on disk 2025-06-25T14:35:24.375151Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-25T14:35:24.375180Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Answering for message sourceid: '\0test-message-group-id', Topic: 'rt3.dc1--test-topic', Partition: 0, SeqNo: 6, partNo: 0, Offset: 5 is stored on disk 2025-06-25T14:35:24.375202Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-25T14:35:24.375231Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Answering for message sourceid: '\0test-message-group-id', Topic: 'rt3.dc1--test-topic', Partition: 0, SeqNo: 7, partNo: 0, Offset: 6 is stored on disk 2025-06-25T14:35:24.375254Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-25T14:35:24.375284Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Answering for message sourceid: '\0test-message-group-id', Topic: 'rt3.dc1--test-topic', Partition: 0, SeqNo: 8, partNo: 0, Offset: 7 is stored on disk 2025-06-25T14:35:24.375305Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-25T14:35:24.375336Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Answering for message sourceid: '\0test-message-group-id', Topic: 'rt3.dc1--test-topic', Partition: 0, SeqNo: 9, partNo: 0, Offset: 8 is stored on disk 2025-06-25T14:35:24.375355Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-25T14:35:24.375385Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Answering for message sourceid: '\0test-message-group-id', Topic: 'rt3.dc1--test-topic', Partition: 0, SeqNo: 10, partNo: 0, Offset: 9 is stored on disk 2025-06-25T14:35:24.375654Z node 17 :PERSQUEUE DEBUG: partition_read.cpp:882: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Topic 'rt3.dc1--test-topic' partition 0 user user readTimeStamp for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 0 2025-06-25T14:35:24.375696Z node 17 :PERSQUEUE DEBUG: partition_read.cpp:924: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Topic 'rt3.dc1--test-topic' partition 0 user user send read request for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 1 rrg 0 2025-06-25T14:35:24.375763Z node 17 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72075186224037892, Partition: 0, State: StateIdle] need more data for compaction. cumulativeSize=1208, count=1, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-25T14:35:24.375935Z node 17 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'rt3.dc1--test-topic' partition: 0 messageNo: 1 requestId: cookie: 1 2025-06-25T14:35:24.376080Z node 17 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037892 (partition=0) Received event: NKikimr::TEvPersQueue::TEvResponse 2025-06-25T14:35:24.376576Z node 17 :PERSQUEUE DEBUG: partition_read.cpp:839: [PQ: 72075186224037892, Partition: 0, State: StateIdle] read cookie 2 Topic 'rt3.dc1--test-topic' partition 0 user user offset 0 count 1 size 1024000 endOffset 10 max time lag 0ms effective offset 0 2025-06-25T14:35:24.376897Z node 17 :PERSQUEUE DEBUG: partition_read.cpp:1043: [PQ: 72075186224037892, Partition: 0, State: StateIdle] read cookie 2 added 1 blobs, size 1208 count 10 last offset 0, current partition end offset: 10 2025-06-25T14:35:24.376933Z node 17 :PERSQUEUE DEBUG: partition_read.cpp:1069: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Reading cookie 2. Send blob request. 2025-06-25T14:35:24.376991Z node 17 :PERSQUEUE DEBUG: cache_eviction.h:492: Got data from cache. Partition 0 offset 0 partno 0 count 10 parts_count 0 source 1 size 1208 accessed 0 times before, last time 2025-06-25T14:35:24.000000Z 2025-06-25T14:35:24.377014Z node 17 :PERSQUEUE DEBUG: read.h:121: Reading cookie 2. All 1 blobs are from cache. 2025-06-25T14:35:24.377067Z node 17 :PERSQUEUE DEBUG: partition_read.cpp:551: FormAnswer for 1 blobs 2025-06-25T14:35:24.377338Z node 17 :PERSQUEUE DEBUG: partition_read.cpp:476: FormAnswer processing batch offset 0 totakecount 10 count 10 size 1188 from pos 0 cbcount 10 2025-06-25T14:35:24.377423Z node 17 :PERSQUEUE DEBUG: partition_read.cpp:964: Topic 'rt3.dc1--test-topic' partition 0 user user readTimeStamp done, result 1750862124356 queuesize 0 startOffset 0 2025-06-25T14:35:24.378433Z node 17 :PERSQUEUE DEBUG: pq_l2_cache.cpp:120: PQ Cache (L2). Adding blob. Tablet '72075186224037892' partition 0 offset 0 partno 0 count 10 parts 0 suffix '63' size 1208 2025-06-25T14:35:24.378513Z node 17 :PERSQUEUE DEBUG: pq_l2_cache.cpp:192: PQ Cache (L2). Touched. Tablet '72075186224037892' partition 0 offset 0 partno 0 count 10 parts 0 suffix '63' 2025-06-25T14:35:24.385918Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|814524e5-bee514bc-47303fef-e533cba1_0] Write session got write response: sequence_numbers: 1 sequence_numbers: 2 sequence_numbers: 3 sequence_numbers: 4 sequence_numbers: 5 sequence_numbers: 6 sequence_numbers: 7 sequence_numbers: 8 sequence_numbers: 9 sequence_numbers: 10 offsets: 0 offsets: 1 offsets: 2 offsets: 3 offsets: 4 offsets: 5 offsets: 6 offsets: 7 offsets: 8 offsets: 9 already_written: false already_written: false already_written: false already_written: false already_written: false already_written: false already_written: false already_written: false already_written: false already_written: false write_statistics { persist_duration_ms: 18 queued_in_partition_duration_ms: 3 } 2025-06-25T14:35:24.385999Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|814524e5-bee514bc-47303fef-e533cba1_0] Write session: acknoledged message 1 2025-06-25T14:35:24.386052Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|814524e5-bee514bc-47303fef-e533cba1_0] Write session: acknoledged message 2 2025-06-25T14:35:24.386083Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|814524e5-bee514bc-47303fef-e533cba1_0] Write session: acknoledged message 3 2025-06-25T14:35:24.386111Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|814524e5-bee514bc-47303fef-e533cba1_0] Write session: acknoledged message 4 2025-06-25T14:35:24.386139Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|814524e5-bee514bc-47303fef-e533cba1_0] Write session: acknoledged message 5 2025-06-25T14:35:24.386169Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|814524e5-bee514bc-47303fef-e533cba1_0] Write session: acknoledged message 6 2025-06-25T14:35:24.386195Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|814524e5-bee514bc-47303fef-e533cba1_0] Write session: acknoledged message 7 2025-06-25T14:35:24.386219Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|814524e5-bee514bc-47303fef-e533cba1_0] Write session: acknoledged message 8 2025-06-25T14:35:24.386276Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|814524e5-bee514bc-47303fef-e533cba1_0] Write session: acknoledged message 9 2025-06-25T14:35:24.386313Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|814524e5-bee514bc-47303fef-e533cba1_0] Write session: acknoledged message 10 2025-06-25T14:35:24.386783Z :INFO: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|814524e5-bee514bc-47303fef-e533cba1_0] Write session: close. Timeout = 0 ms 2025-06-25T14:35:24.386836Z :INFO: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|814524e5-bee514bc-47303fef-e533cba1_0] Write session will now close 2025-06-25T14:35:24.386888Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|814524e5-bee514bc-47303fef-e533cba1_0] Write session: aborting 2025-06-25T14:35:24.387381Z :INFO: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|814524e5-bee514bc-47303fef-e533cba1_0] Write session: gracefully shut down, all writes complete 2025-06-25T14:35:24.387444Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|814524e5-bee514bc-47303fef-e533cba1_0] Write session: destroy 2025-06-25T14:35:24.388701Z node 17 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 7 sessionId: test-message-group-id|814524e5-bee514bc-47303fef-e533cba1_0 grpc read done: success: 0 data: 2025-06-25T14:35:24.388734Z node 17 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 7 sessionId: test-message-group-id|814524e5-bee514bc-47303fef-e533cba1_0 grpc read failed 2025-06-25T14:35:24.388777Z node 17 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 7 sessionId: test-message-group-id|814524e5-bee514bc-47303fef-e533cba1_0 grpc closed 2025-06-25T14:35:24.388804Z node 17 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 7 sessionId: test-message-group-id|814524e5-bee514bc-47303fef-e533cba1_0 is DEAD 2025-06-25T14:35:24.389714Z node 17 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037892 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-25T14:35:24.389861Z node 17 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037892] server disconnected, pipe [17:7519895562701448073:2590] destroyed 2025-06-25T14:35:24.389922Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::DropOwner. >> Viewer::JsonAutocompleteEmptyColumns [GOOD] >> Viewer::JsonAutocompleteColumnsPOST >> TConsoleConfigTests::TestCheckConfigUpdates [GOOD] >> TConsoleConfigTests::TestManageValidators >> Viewer::QueryExecuteScript [FAIL] >> Viewer::Plan2SvgOK >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-CreateUser-21 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-CreateUser-22 >> TConfigsCacheTests::TestOverwrittenConfigurationDoesntCauseNotification [GOOD] >> TConfigsCacheTests::TestConfigurationChangeSensor >> TConsoleInMemoryConfigSubscriptionTests::TestSubscribeAfterConfigApplyWithKnownConfig [GOOD] >> TConsoleTests::TestAlterTenantModifyStorageResourcesForPending >> TJaegerTracingConfiguratorTests::ThrottlingByDb [GOOD] >> TJaegerTracingConfiguratorTests::SamplingByDb >> DataShardVolatile::CompactedVolatileChangesAbort [GOOD] >> TConsoleConfigTests::TestManageValidators [GOOD] >> TConsoleConfigTests::TestDryRun >> TConsoleConfigSubscriptionTests::TestAddConfigSubscription >> TConfigsCacheTests::TestConfigurationChangeSensor [GOOD] >> TConfigsDispatcherTests::TestSubscriptionNotification >> TConsoleTests::TestRemoveTenantWithBorrowedStorageUnits [GOOD] >> TConsoleTests::TestListTenants >> TJaegerTracingConfiguratorTests::SamplingByDb [GOOD] >> TJaegerTracingConfiguratorTests::SharedThrottlingLimits >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-DropUser-58 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-DropUser-59 >> KqpIndexLookupJoin::CheckCastUtf8ToString-StreamLookupJoin-NotNull >> TConfigsDispatcherTests::TestSubscriptionNotification [GOOD] >> TConfigsDispatcherTests::TestSubscriptionNotificationForNewSubscriberAfterUpdate >> TConsoleConfigTests::TestDryRun [GOOD] >> TConsoleInMemoryConfigSubscriptionTests::TestNoYamlWithoutFlag >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-11 [FAIL] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-12 |80.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/fq/libs/checkpoint_storage/ut/ydb-core-fq-libs-checkpoint_storage-ut |80.4%| [LD] {RESULT} $(B)/ydb/core/fq/libs/checkpoint_storage/ut/ydb-core-fq-libs-checkpoint_storage-ut |80.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/fq/libs/checkpoint_storage/ut/ydb-core-fq-libs-checkpoint_storage-ut >> TConsoleConfigSubscriptionTests::TestAddConfigSubscription [GOOD] >> TConsoleConfigSubscriptionTests::TestRemoveConfigSubscription >> TJaegerTracingConfiguratorTests::SharedThrottlingLimits [GOOD] >> TJaegerTracingConfiguratorTests::SharedSamplingLimits >> TConfigsDispatcherTests::TestSubscriptionNotificationForNewSubscriberAfterUpdate [GOOD] >> TConfigsDispatcherTests::TestSubscriptionNotificationForNewSubscriberDuringUpdate >> DataShardWrite::CancelImmediate [GOOD] >> DataShardWrite::DeletePrepared+Volatile >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-CreateUser-17 [FAIL] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-CreateUser-18 >> TJaegerTracingConfiguratorTests::SharedSamplingLimits [GOOD] >> TLogSettingsConfiguratorTests::TestNoChanges |80.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/dsproxy/ut_fat/ydb-core-blobstorage-dsproxy-ut_fat |80.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/dsproxy/ut_fat/ydb-core-blobstorage-dsproxy-ut_fat |80.4%| [LD] {RESULT} $(B)/ydb/core/blobstorage/dsproxy/ut_fat/ydb-core-blobstorage-dsproxy-ut_fat >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-60 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-61 >> TConsoleInMemoryConfigSubscriptionTests::TestNoYamlWithoutFlag [GOOD] >> TConsoleInMemoryConfigSubscriptionTests::TestConsoleRestart >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-41 [GOOD] >> TConsoleConfigSubscriptionTests::TestRemoveConfigSubscription [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-42 >> TConsoleConfigSubscriptionTests::TestRemoveConfigSubscriptions >> TConfigsDispatcherTests::TestSubscriptionNotificationForNewSubscriberDuringUpdate [GOOD] >> TConfigsDispatcherTests::TestRemoveSubscription |80.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/mind/bscontroller/ut_bscontroller/ydb-core-mind-bscontroller-ut_bscontroller |80.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/mind/bscontroller/ut_bscontroller/ydb-core-mind-bscontroller-ut_bscontroller |80.4%| [LD] {RESULT} $(B)/ydb/core/mind/bscontroller/ut_bscontroller/ydb-core-mind-bscontroller-ut_bscontroller >> TConsoleTests::TestRestartConsoleAndPoolsExtSubdomain [GOOD] >> TConsoleTests::TestSetDefaultStorageUnitsQuota >> BuildStatsHistogram::Many_Serial [GOOD] >> TConsoleTests::TestCreateServerlessTenant [GOOD] >> TConsoleTests::TestCreateServerlessTenantWrongSharedDb >> Viewer::JsonAutocompleteSchemePOST [GOOD] >> DataShardVolatile::GracefulShardRestartNoEarlyReadSetAck [GOOD] >> TLogSettingsConfiguratorTests::TestNoChanges [GOOD] >> TLogSettingsConfiguratorTests::TestAddComponentEntries >> TConfigsDispatcherTests::TestRemoveSubscription [GOOD] >> TConfigsDispatcherTests::TestRemoveSubscriptionWhileUpdateInProcess >> TConsoleConfigSubscriptionTests::TestRemoveConfigSubscriptions [GOOD] >> TConsoleConfigSubscriptionTests::TestListConfigSubscriptions >> KqpScan::ScanRetryReadRanges [GOOD] >> TConsoleTests::TestListTenants [GOOD] >> TConsoleTests::TestListTenantsExtSubdomain >> TConsoleTests::TestAlterTenantModifyStorageResourcesForPending [GOOD] >> TConsoleTests::TestAlterTenantModifyStorageResourcesForPendingExtSubdomain >> TConsoleInMemoryConfigSubscriptionTests::TestConsoleRestart [GOOD] >> TConsoleInMemoryConfigSubscriptionTests::TestConsoleRestartSimplified >> TLogSettingsConfiguratorTests::TestAddComponentEntries [GOOD] >> TLogSettingsConfiguratorTests::TestRemoveComponentEntries >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-CreateUser-22 [FAIL] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-CreateUser-23 |80.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/services/ext_index/ut/ydb-services-ext_index-ut >> TConfigsDispatcherTests::TestRemoveSubscriptionWhileUpdateInProcess [GOOD] >> TConfigsDispatcherTests::TestEmptyChangeCausesNoNotification |80.4%| [LD] {RESULT} $(B)/ydb/services/ext_index/ut/ydb-services-ext_index-ut |80.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/services/ext_index/ut/ydb-services-ext_index-ut |80.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/ut/batch_operations/ydb-core-kqp-ut-batch_operations |80.4%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/batch_operations/ydb-core-kqp-ut-batch_operations |80.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/batch_operations/ydb-core-kqp-ut-batch_operations ------- [TM] {asan, default-linux-x86_64, release} ydb/core/viewer/ut/unittest >> Viewer::JsonAutocompleteSchemePOST [GOOD] Test command err: 2025-06-25T14:35:00.893006Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:121:2167], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:35:00.893350Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:35:00.893545Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-25T14:35:01.199196Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 19123, node 1 TClient is connected to server localhost:18847 2025-06-25T14:35:09.038335Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:287:2330], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:35:09.038594Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:35:09.038704Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-25T14:35:09.331334Z node 2 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 2 Type# 268639257 TServer::EnableGrpc on GrpcPort 20176, node 2 TClient is connected to server localhost:15311 2025-06-25T14:35:18.007334Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [3:299:2341], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:35:18.007704Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:35:18.007872Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-25T14:35:18.390396Z node 3 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 3 Type# 268639257 TServer::EnableGrpc on GrpcPort 22991, node 3 TClient is connected to server localhost:14201 2025-06-25T14:35:28.396130Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [4:117:2163], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:35:28.396751Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:35:28.396845Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-25T14:35:28.761173Z node 4 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 4 Type# 268639257 TServer::EnableGrpc on GrpcPort 11554, node 4 TClient is connected to server localhost:20212 2025-06-25T14:35:41.097906Z node 5 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [5:284:2327], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:35:41.098493Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:35:41.098783Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-25T14:35:41.481223Z node 5 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 5 Type# 268639257 TServer::EnableGrpc on GrpcPort 11180, node 5 TClient is connected to server localhost:23000 |80.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_incremental_backup/ydb-core-tx-datashard-ut_incremental_backup |80.5%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_incremental_backup/ydb-core-tx-datashard-ut_incremental_backup |80.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_incremental_backup/ydb-core-tx-datashard-ut_incremental_backup >> TConsoleConfigSubscriptionTests::TestListConfigSubscriptions [GOOD] >> TConsoleConfigSubscriptionTests::TestReplaceConfigSubscriptions ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_kqp_scan/unittest >> KqpScan::ScanRetryReadRanges [GOOD] Test command err: 2025-06-25T14:35:25.209257Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:628:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:35:25.209877Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:625:2319], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:35:25.209986Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:35:25.210139Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:35:25.210185Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:35:25.210255Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001590/r3tmp/tmpvy6U3Y/pdisk_1.dat 2025-06-25T14:35:25.694634Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:35:25.943431Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:35:26.090712Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:35:26.090872Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:35:26.100228Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:35:26.102843Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:35:26.126612Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T14:35:26.127562Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:35:26.127972Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:35:26.466241Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:27.178911Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:1315:2791], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:35:27.179033Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:1326:2796], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:35:27.179451Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:35:27.185481Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:35:27.321239Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:35:27.321376Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:35:27.710041Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:1329:2799], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:35:27.885365Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:1454:2869] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:35:29.049118Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715660. Ctx: { TraceId: 01jykr7r28bmabvdy6mpah1qrz, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTgxYzc2NDItYWE3NzNlOTItZGQ4NDgwZi1lODEzODc4OA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root -- nodeId: 2 2025-06-25T14:35:29.975309Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jykr7sy8cgn571wfv3zmqzbc, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MjA3MDBkYWYtZGY0YzQzYjAtYTYwYzViYTAtZWEyYjk4NGU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root -- EvScan [1:1533:2921] -> [2:1489:2395] -- EvScanData from [2:1537:2402]: pass 2025-06-25T14:35:30.890195Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715663. Ctx: { TraceId: 01jykr7sy8cgn571wfv3zmqzbc, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MjA3MDBkYWYtZGY0YzQzYjAtYTYwYzViYTAtZWEyYjk4NGU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root -- EvStreamData: {"ResultSet":{"columns":[{"name":"column0","type":{"optional_type":{"item":{"type_id":4}}}}],"rows":[{"items":[{"uint64_value":596400}]}]},"SeqNo":1,"QueryResultIndex":0,"ChannelId":1,"VirtualTimestamp":{"Step":2000,"TxId":281474976715661},"Finished":true} 2025-06-25T14:35:30.894010Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 2000, txId: 281474976715661] shutting down 2025-06-25T14:35:38.913760Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [3:628:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:35:38.913916Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:35:38.914121Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:35:38.914385Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [4:625:2319], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:35:38.914791Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:35:38.914884Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001590/r3tmp/tmpsybYEO/pdisk_1.dat 2025-06-25T14:35:39.663926Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:35:39.855540Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:35:39.964790Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:35:39.964889Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:35:39.967937Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:35:39.968027Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:35:39.982216Z node 3 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 4 Cookie 4 2025-06-25T14:35:39.982613Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:35:39.982920Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:35:40.316949Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:41.030743Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:1317:2793], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:35:41.030855Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:1328:2798], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:35:41.030946Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:35:41.062902Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:35:41.233183Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:35:41.234515Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:35:41.685662Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:1331:2801], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:35:41.805808Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:1458:2873] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:35:43.652096Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715660. Ctx: { TraceId: 01jykr85k44jh0t63dqz6x937j, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=ODdkNmM3NWYtZTI4ZTk5YWItM2NmYjQ5MTktYTY5N2E4ZDk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root -- nodeId: 4 2025-06-25T14:35:45.041296Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jykr88715wk2c2vd3r8wbk2a, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=Mjg5ZDMyNmYtNGVhMTI1OWMtZjk3Nzc1NjctYzJhNDllMg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root -- EvScan [3:1538:2926] -> [4:1493:2395] -- EvScanData from [4:1542:2402]: pass -- EvStreamData: {"ResultSet":{"columns":[{"name":"key","type":{"optional_type":{"item":{"type_id":2}}}},{"name":"value","type":{"optional_type":{"item":{"type_id":2}}}}],"rows":[{"items":[{"uint32_value":2},{"uint32_value":22}]},{"items":[{"uint32_value":21},{"uint32_value":2121}]},{"items":[{"uint32_value":22},{"uint32_value":2222}]},{"items":[{"uint32_value":23},{"uint32_value":2323}]},{"items":[{"uint32_value":24},{"uint32_value":2424}]},{"items":[{"uint32_value":25},{"uint32_value":2525}]},{"items":[{"uint32_value":26},{"uint32_value":2626}]},{"items":[{"uint32_value":27},{"uint32_value":2727}]},{"items":[{"uint32_value":28},{"uint32_value":2828}]},{"items":[{"uint32_value":29},{"uint32_value":2929}]},{"items":[{"uint32_value":40},{"uint32_value":4040}]},{"items":[{"uint32_value":41},{"uint32_value":4141}]},{"items":[{"uint32_value":42},{"uint32_value":4242}]},{"items":[{"uint32_value":43},{"uint32_value":4343}]},{"items":[{"uint32_value":44},{"uint32_value":4444}]},{"items":[{"uint32_value":45},{"uint32_value":4545}]},{"items":[{"uint32_value":46},{"uint32_value":4646}]},{"items":[{"uint32_value":47},{"uint32_value":4747}]},{"items":[{"uint32_value":48},{"uint32_value":4848}]},{"items":[{"uint32_value":49},{"uint32_value":4949}]},{"items":[{"uint32_value":50},{"uint32_value":5050}]}]},"SeqNo":1,"QueryResultIndex":0,"ChannelId":2,"VirtualTimestamp":{"Step":2000,"TxId":281474976715661},"Finished":false} -- EvStreamData: {"ResultSet":{"columns":[{"name":"key","type":{"optional_type":{"item":{"type_id":2}}}},{"name":"value","type":{"optional_type":{"item":{"type_id":2}}}}]},"SeqNo":2,"QueryResultIndex":0,"ChannelId":2,"VirtualTimestamp":{"Step":2000,"TxId":281474976715661},"Finished":true} 2025-06-25T14:35:45.053680Z node 3 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 2000, txId: 281474976715661] shutting down >> TConfigsDispatcherTests::TestEmptyChangeCausesNoNotification [GOOD] >> TConfigsDispatcherTests::TestYamlAndNonYamlCoexist ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_volatile/unittest >> DataShardVolatile::CompactedVolatileChangesAbort [GOOD] Test command err: 2025-06-25T14:30:51.879876Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:30:51.880000Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:30:51.880043Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0014fb/r3tmp/tmpfXSt5y/pdisk_1.dat 2025-06-25T14:30:52.237810Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T14:30:52.241383Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:30:52.284631Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:30:52.290771Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750861847910115 != 1750861847910119 2025-06-25T14:30:52.340578Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:61:2108] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-25T14:30:52.341847Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 2025-06-25T14:30:52.342512Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:30:52.342645Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:30:52.357887Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:30:52.559006Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:61:2108] Handle TEvProposeTransaction 2025-06-25T14:30:52.559094Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:61:2108] TxId# 281474976715657 ProcessProposeTransaction 2025-06-25T14:30:52.559270Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:61:2108] Cookie# 0 userReqId# "" txid# 281474976715657 SEND to# [1:603:2511] 2025-06-25T14:30:52.739591Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:603:2511] txid# 281474976715657 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateTable CreateTable { Name: "table-1" Columns { Name: "key" Type: "Uint32" FamilyName: "" NotNull: false } Columns { Name: "value" Type: "Uint32" FamilyName: "" NotNull: false } KeyColumnNames: "key" UniformPartitionsCount: 1 } } } ExecTimeoutPeriod: 18446744073709551615 2025-06-25T14:30:52.739726Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:603:2511] txid# 281474976715657 Bootstrap, UserSID: CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-25T14:30:52.740475Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1660: Actor# [1:603:2511] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-06-25T14:30:52.740601Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:603:2511] txid# 281474976715657 TEvNavigateKeySet requested from SchemeCache 2025-06-25T14:30:52.740963Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:603:2511] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-25T14:30:52.741177Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:603:2511] HANDLE EvNavigateKeySetResult, txid# 281474976715657 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 1000 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-25T14:30:52.741381Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:603:2511] txid# 281474976715657 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715657 TabletId# 72057594046644480} 2025-06-25T14:30:52.743220Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:52.743786Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:603:2511] txid# 281474976715657 HANDLE EvClientConnected 2025-06-25T14:30:52.744476Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [1:603:2511] txid# 281474976715657 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715657} 2025-06-25T14:30:52.744556Z node 1 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [1:603:2511] txid# 281474976715657 SEND to# [1:554:2480] Source {TEvProposeTransactionStatus txid# 281474976715657 Status# 53} 2025-06-25T14:30:52.781655Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:619:2526], Recipient [1:628:2532]: NKikimr::TEvTablet::TEvBoot 2025-06-25T14:30:52.782997Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:619:2526], Recipient [1:628:2532]: NKikimr::TEvTablet::TEvRestored 2025-06-25T14:30:52.783557Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:628:2532] 2025-06-25T14:30:52.783862Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T14:30:52.795169Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:619:2526], Recipient [1:628:2532]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-25T14:30:52.836119Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T14:30:52.836348Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T14:30:52.838288Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-25T14:30:52.838422Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-25T14:30:52.838499Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-25T14:30:52.838929Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T14:30:52.839106Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T14:30:52.839199Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:644:2532] in generation 1 2025-06-25T14:30:52.850872Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T14:30:52.971989Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-25T14:30:52.972205Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T14:30:52.972331Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:646:2542] 2025-06-25T14:30:52.972364Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T14:30:52.972398Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-25T14:30:52.972427Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:30:52.972675Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:628:2532], Recipient [1:628:2532]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T14:30:52.972740Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T14:30:52.973132Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-25T14:30:52.973232Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-25T14:30:52.973275Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:30:52.973308Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:30:52.973365Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-25T14:30:52.973398Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-25T14:30:52.973431Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-25T14:30:52.973468Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-25T14:30:52.973512Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:30:52.973989Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:633:2534], Recipient [1:628:2532]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:30:52.974036Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T14:30:52.974076Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:624:2529], serverId# [1:633:2534], sessionId# [0:0:0] 2025-06-25T14:30:52.974213Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:373:2367], Recipient [1:633:2534] 2025-06-25T14:30:52.974251Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-25T14:30:52.974362Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T14:30:52.974569Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-25T14:30:52.974621Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-25T14:30:52.974700Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-25T14:30:52.974744Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-25T14 ... RD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T14:35:37.706452Z node 26 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [26:930:2736], serverId# [26:931:2737], sessionId# [0:0:0] 2025-06-25T14:35:37.706687Z node 26 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553210, Sender [26:929:2735], Recipient [26:658:2546]: NKikimrTxDataShard.TEvCompactTable PathId { OwnerId: 72057594046644480 LocalId: 2 } CompactBorrowed: false 2025-06-25T14:35:37.706860Z node 26 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:16} Tx{28, NKikimr::NDataShard::TDataShard::TTxCompactTable} queued, type NKikimr::NDataShard::TDataShard::TTxCompactTable 2025-06-25T14:35:37.707011Z node 26 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:16} Tx{28, NKikimr::NDataShard::TDataShard::TTxCompactTable} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-25T14:35:37.707177Z node 26 :TABLET_EXECUTOR DEBUG: TCompactionLogic PrepareForceCompaction for 72075186224037888 table 1001, mode Full, forced state None, forced mode Full 2025-06-25T14:35:37.712542Z node 26 :TX_DATASHARD INFO: datashard__compaction.cpp:141: Started background compaction# 1 of 72075186224037888 tableId# 2 localTid# 1001, requested from [26:929:2735], partsCount# 0, memtableSize# 656, memtableWaste# 3952, memtableRows# 2 2025-06-25T14:35:37.712804Z node 26 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:16} Tx{28, NKikimr::NDataShard::TDataShard::TTxCompactTable} hope 1 -> done Change{16, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 2025-06-25T14:35:37.712975Z node 26 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:16} Tx{28, NKikimr::NDataShard::TDataShard::TTxCompactTable} release 4194304b of static, Memory{0 dyn 0} 2025-06-25T14:35:37.713476Z node 26 :TABLET_EXECUTOR DEBUG: TGenCompactionStrategy PrepareCompaction for 72075186224037888: task 1, edge 9223372036854775807/0, generation 0 2025-06-25T14:35:37.713596Z node 26 :TABLET_EXECUTOR INFO: Leader{72075186224037888:1:16} starting compaction 2025-06-25T14:35:37.714161Z node 26 :TABLET_EXECUTOR INFO: Leader{72075186224037888:1:17} starting Scan{1 on 1001, Compact{72075186224037888.1.16, eph 1}} 2025-06-25T14:35:37.714365Z node 26 :TABLET_EXECUTOR INFO: Leader{72075186224037888:1:17} started compaction 1 2025-06-25T14:35:37.714496Z node 26 :TABLET_EXECUTOR DEBUG: TGenCompactionStrategy PrepareCompaction for 72075186224037888 started compaction 1 generation 0 ... blocking NKikimr::TEvBlobStorage::TEvPut from TABLET_REQ_WRITE_LOG to BS_PROXY_ACTOR cookie 14904338818803045619 2025-06-25T14:35:37.722670Z node 26 :TABLET_EXECUTOR INFO: Leader{72075186224037888:1:17} Compact 1 on TGenCompactionParams{1001: gen 0 epoch +inf, 0 parts} step 16, product {tx status + 1 parts epoch 2} done 2025-06-25T14:35:37.723114Z node 26 :TABLET_EXECUTOR DEBUG: TGenCompactionStrategy CompactionFinished for 72075186224037888: compaction 1, generation 0 2025-06-25T14:35:37.723304Z node 26 :TABLET_EXECUTOR DEBUG: TGenCompactionStrategy CheckGeneration for 72075186224037888 generation 1, state Free, final id 0, final level 0 2025-06-25T14:35:37.723411Z node 26 :TABLET_EXECUTOR DEBUG: TGenCompactionStrategy CheckGeneration for 72075186224037888 generation 3, state Free, final id 0, final level 0 2025-06-25T14:35:37.723995Z node 26 :TX_DATASHARD DEBUG: datashard__compaction.cpp:203: CompactionComplete of tablet# 72075186224037888, table# 1001, finished edge# 1, ts 1970-01-01T00:00:01.544079Z 2025-06-25T14:35:37.724224Z node 26 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:18} Tx{29, NKikimr::NDataShard::TDataShard::TTxPersistFullCompactionTs} queued, type NKikimr::NDataShard::TDataShard::TTxPersistFullCompactionTs 2025-06-25T14:35:37.725390Z node 26 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:18} Tx{29, NKikimr::NDataShard::TDataShard::TTxPersistFullCompactionTs} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-25T14:35:37.725590Z node 26 :TX_DATASHARD DEBUG: datashard__compaction.cpp:240: ReplyCompactionWaiters of tablet# 72075186224037888, table# 1001, finished edge# 1, front# 1 2025-06-25T14:35:37.725768Z node 26 :TX_DATASHARD DEBUG: datashard__compaction.cpp:260: ReplyCompactionWaiters of tablet# 72075186224037888, table# 1001 sending TEvCompactTableResult to# [26:929:2735]pathId# [OwnerId: 72057594046644480, LocalPathId: 2] 2025-06-25T14:35:37.726727Z node 26 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:18} Tx{29, NKikimr::NDataShard::TDataShard::TTxPersistFullCompactionTs} hope 1 -> done Change{17, redo 83b alter 0b annex 0, ~{ 27 } -{ }, 0 gb} 2025-06-25T14:35:37.726899Z node 26 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:18} Tx{29, NKikimr::NDataShard::TDataShard::TTxPersistFullCompactionTs} release 4194304b of static, Memory{0 dyn 0} ... blocking NKikimr::TEvBlobStorage::TEvPut from TABLET_REQ_WRITE_LOG to BS_PROXY_ACTOR cookie 13056541747046645770 ... blocking NKikimr::TEvBlobStorage::TEvPut from TABLET_REQ_WRITE_LOG to BS_PROXY_ACTOR cookie 14750755279968837991 ========= Starting an immediate read ========= 2025-06-25T14:35:38.115579Z node 26 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715663. Ctx: { TraceId: 01jykr82cm1f04cy2y1n1m13ha, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=26&id=Nzc4YzFkNy04MDJhZmM1Ni1iMDhlZWUzYS0xZGFiNzZmYg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:35:38.122571Z node 26 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:155: TClient[72075186224037888] send [26:862:2681] 2025-06-25T14:35:38.127128Z node 26 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:629: TClient[72075186224037888] push event to server [26:862:2681] 2025-06-25T14:35:38.127713Z node 26 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553215, Sender [26:955:2743], Recipient [26:658:2546]: NKikimrTxDataShard.TEvRead ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 ResultFormat: FORMAT_CELLVEC MaxRows: 32767 MaxBytes: 5242880 Reverse: false KeysSize: 1 2025-06-25T14:35:38.128003Z node 26 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:19} Tx{30, NKikimr::NDataShard::TDataShard::TTxReadViaPipeline} queued, type NKikimr::NDataShard::TDataShard::TTxReadViaPipeline 2025-06-25T14:35:38.128171Z node 26 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:19} Tx{30, NKikimr::NDataShard::TDataShard::TTxReadViaPipeline} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-25T14:35:38.128391Z node 26 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2452: TTxReadViaPipeline execute: at tablet# 72075186224037888, FollowerId 0 2025-06-25T14:35:38.128515Z node 26 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 72075186224037888 CompleteEdge# v1545/281474976715662 IncompleteEdge# v{min} UnprotectedReadEdge# v{min} ImmediateWriteEdge# v{min} ImmediateWriteEdgeReplied# v{min} 2025-06-25T14:35:38.128630Z node 26 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2555: 72075186224037888 changed HEAD read to non-repeatable v1545/18446744073709551615 2025-06-25T14:35:38.128794Z node 26 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:5] at 72075186224037888 on unit CheckRead 2025-06-25T14:35:38.129013Z node 26 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:5] at 72075186224037888 is Executed 2025-06-25T14:35:38.129123Z node 26 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:5] at 72075186224037888 executing on unit CheckRead 2025-06-25T14:35:38.129222Z node 26 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:5] at 72075186224037888 to execution unit BuildAndWaitDependencies 2025-06-25T14:35:38.129308Z node 26 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:5] at 72075186224037888 on unit BuildAndWaitDependencies 2025-06-25T14:35:38.129405Z node 26 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:5] at 72075186224037888 2025-06-25T14:35:38.129510Z node 26 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:5] at 72075186224037888 is Executed 2025-06-25T14:35:38.129542Z node 26 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:5] at 72075186224037888 executing on unit BuildAndWaitDependencies 2025-06-25T14:35:38.129565Z node 26 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:5] at 72075186224037888 to execution unit ExecuteRead 2025-06-25T14:35:38.129588Z node 26 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:5] at 72075186224037888 on unit ExecuteRead 2025-06-25T14:35:38.129806Z node 26 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1578: 72075186224037888 Execute read# 1, request: { ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 ResultFormat: FORMAT_CELLVEC MaxRows: 32767 MaxBytes: 5242880 Reverse: false } 2025-06-25T14:35:38.130125Z node 26 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:5] at 72075186224037888 is DelayComplete 2025-06-25T14:35:38.130192Z node 26 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:5] at 72075186224037888 executing on unit ExecuteRead 2025-06-25T14:35:38.130297Z node 26 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:5] at 72075186224037888 to execution unit CompletedOperations 2025-06-25T14:35:38.130385Z node 26 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:5] at 72075186224037888 on unit CompletedOperations 2025-06-25T14:35:38.130438Z node 26 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:5] at 72075186224037888 is Executed 2025-06-25T14:35:38.130487Z node 26 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:5] at 72075186224037888 executing on unit CompletedOperations 2025-06-25T14:35:38.130538Z node 26 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:5] at 72075186224037888 has finished 2025-06-25T14:35:38.130633Z node 26 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2687: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037888 2025-06-25T14:35:38.130812Z node 26 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:19} Tx{30, NKikimr::NDataShard::TDataShard::TTxReadViaPipeline} hope 1 -> done Change{18, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 2025-06-25T14:35:38.130969Z node 26 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:19} Tx{30, NKikimr::NDataShard::TDataShard::TTxReadViaPipeline} release 4194304b of static, Memory{0 dyn 0} 2025-06-25T14:35:38.242373Z node 26 :TABLET_EXECUTOR DEBUG: Leader{72057594046316545:2:13} Tx{20, NKikimr::NFlatTxCoordinator::TTxCoordinator::TTxPlanStep} queued, type NKikimr::NFlatTxCoordinator::TTxCoordinator::TTxPlanStep 2025-06-25T14:35:38.242590Z node 26 :TABLET_EXECUTOR DEBUG: Leader{72057594046316545:2:13} Tx{20, NKikimr::NFlatTxCoordinator::TTxCoordinator::TTxPlanStep} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-25T14:35:38.242922Z node 26 :TABLET_EXECUTOR DEBUG: Leader{72057594046316545:2:13} Tx{20, NKikimr::NFlatTxCoordinator::TTxCoordinator::TTxPlanStep} hope 1 -> done Change{12, redo 134b alter 0b annex 0, ~{ 2 } -{ }, 0 gb} 2025-06-25T14:35:38.243084Z node 26 :TABLET_EXECUTOR DEBUG: Leader{72057594046316545:2:13} Tx{20, NKikimr::NFlatTxCoordinator::TTxCoordinator::TTxPlanStep} release 4194304b of static, Memory{0 dyn 0} 2025-06-25T14:35:38.243998Z node 26 :TABLET_EXECUTOR DEBUG: Leader{72057594046316545:2:14} commited cookie 1 for step 13 2025-06-25T14:35:38.244392Z node 26 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:155: TClient[72057594046382081] send [26:493:2437] 2025-06-25T14:35:38.244512Z node 26 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:629: TClient[72057594046382081] push event to server [26:493:2437] >> TLogSettingsConfiguratorTests::TestRemoveComponentEntries [GOOD] >> TLogSettingsConfiguratorTests::TestChangeDefaults >> TConsoleInMemoryConfigSubscriptionTests::TestConsoleRestartSimplified [GOOD] >> TConsoleInMemoryConfigSubscriptionTests::TestComplexYamlConfigChanges |80.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_replication/ydb-core-tx-schemeshard-ut_replication |80.5%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_replication/ydb-core-tx-schemeshard-ut_replication |80.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_replication/ydb-core-tx-schemeshard-ut_replication ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_volatile/unittest >> DataShardVolatile::GracefulShardRestartNoEarlyReadSetAck [GOOD] Test command err: 2025-06-25T14:30:51.714263Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:30:51.714435Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:30:51.714506Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00150c/r3tmp/tmp0f7Ush/pdisk_1.dat 2025-06-25T14:30:52.120888Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T14:30:52.124963Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:30:52.173773Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:30:52.184902Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750861847622052 != 1750861847622056 2025-06-25T14:30:52.232839Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:61:2108] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-25T14:30:52.234167Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 2025-06-25T14:30:52.234797Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:30:52.234941Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:30:52.251730Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:30:52.462445Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:61:2108] Handle TEvProposeTransaction 2025-06-25T14:30:52.462529Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:61:2108] TxId# 281474976715657 ProcessProposeTransaction 2025-06-25T14:30:52.462719Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:61:2108] Cookie# 0 userReqId# "" txid# 281474976715657 SEND to# [1:603:2511] 2025-06-25T14:30:52.625228Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:603:2511] txid# 281474976715657 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateTable CreateTable { Name: "table-1" Columns { Name: "key" Type: "Uint32" FamilyName: "" NotNull: false } Columns { Name: "value" Type: "Uint32" FamilyName: "" NotNull: false } Columns { Name: "value2" Type: "Uint32" FamilyName: "" NotNull: false } KeyColumnNames: "key" UniformPartitionsCount: 1 } } } ExecTimeoutPeriod: 18446744073709551615 2025-06-25T14:30:52.625351Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:603:2511] txid# 281474976715657 Bootstrap, UserSID: CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-25T14:30:52.626162Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1660: Actor# [1:603:2511] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-06-25T14:30:52.626293Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:603:2511] txid# 281474976715657 TEvNavigateKeySet requested from SchemeCache 2025-06-25T14:30:52.626697Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:603:2511] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-25T14:30:52.626925Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:603:2511] HANDLE EvNavigateKeySetResult, txid# 281474976715657 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 1000 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-25T14:30:52.627132Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:603:2511] txid# 281474976715657 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715657 TabletId# 72057594046644480} 2025-06-25T14:30:52.629230Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:52.629890Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:603:2511] txid# 281474976715657 HANDLE EvClientConnected 2025-06-25T14:30:52.630637Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [1:603:2511] txid# 281474976715657 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715657} 2025-06-25T14:30:52.630720Z node 1 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [1:603:2511] txid# 281474976715657 SEND to# [1:554:2480] Source {TEvProposeTransactionStatus txid# 281474976715657 Status# 53} 2025-06-25T14:30:52.666313Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:619:2526], Recipient [1:628:2532]: NKikimr::TEvTablet::TEvBoot 2025-06-25T14:30:52.667834Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:619:2526], Recipient [1:628:2532]: NKikimr::TEvTablet::TEvRestored 2025-06-25T14:30:52.668458Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:628:2532] 2025-06-25T14:30:52.668830Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T14:30:52.680033Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:619:2526], Recipient [1:628:2532]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-25T14:30:52.717920Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T14:30:52.718094Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T14:30:52.720253Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-25T14:30:52.720380Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-25T14:30:52.720449Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-25T14:30:52.720919Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T14:30:52.721090Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T14:30:52.721196Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:644:2532] in generation 1 2025-06-25T14:30:52.733573Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T14:30:52.782242Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-25T14:30:52.782481Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T14:30:52.782609Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:646:2542] 2025-06-25T14:30:52.782654Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T14:30:52.782729Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-25T14:30:52.782781Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:30:52.783121Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:628:2532], Recipient [1:628:2532]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T14:30:52.783188Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T14:30:52.783602Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-25T14:30:52.783714Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-25T14:30:52.783784Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:30:52.783826Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:30:52.783912Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-25T14:30:52.783969Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-25T14:30:52.784024Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-25T14:30:52.784063Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-25T14:30:52.784117Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:30:52.784665Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:633:2534], Recipient [1:628:2532]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:30:52.784716Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T14:30:52.784770Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:624:2529], serverId# [1:633:2534], sessionId# [0:0:0] 2025-06-25T14:30:52.784940Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:373:2367], Recipient [1:633:2534] 2025-06-25T14:30:52.784991Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-25T14:30:52.785150Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T14:30:52.785476Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-25T14:30:52.785574Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-25T14:30:52.785769Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-25T14:30:52.785846Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:28147 ... pp:1916: Add [0:7] at 72075186224037889 to execution unit ExecuteRead 2025-06-25T14:35:44.652296Z node 28 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:7] at 72075186224037889 on unit ExecuteRead 2025-06-25T14:35:44.656541Z node 28 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1578: 72075186224037889 Execute read# 1, request: { ReadId: 1 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 Snapshot { Step: 1527 TxId: 18446744073709551615 } ResultFormat: FORMAT_CELLVEC MaxRows: 1000 MaxBytes: 5242880 Reverse: false TotalRowsLimit: 1000 } 2025-06-25T14:35:44.656798Z node 28 :TX_DATASHARD TRACE: datashard.cpp:2476: PromoteImmediatePostExecuteEdges at 72075186224037889 promoting UnprotectedReadEdge to v1527/18446744073709551615 2025-06-25T14:35:44.656850Z node 28 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2163: 72075186224037889 Complete read# {[28:1051:2821], 1} after executionsCount# 1 2025-06-25T14:35:44.656890Z node 28 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2137: 72075186224037889 read iterator# {[28:1051:2821], 1} sends rowCount# 1, bytes# 32, quota rows left# 999, quota bytes left# 5242848, hasUnreadQueries# 0, total queries# 1, firstUnprocessed# 0 2025-06-25T14:35:44.656959Z node 28 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2188: 72075186224037889 read iterator# {[28:1051:2821], 1} finished in read 2025-06-25T14:35:44.657018Z node 28 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:7] at 72075186224037889 is Executed 2025-06-25T14:35:44.657049Z node 28 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:7] at 72075186224037889 executing on unit ExecuteRead 2025-06-25T14:35:44.657076Z node 28 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:7] at 72075186224037889 to execution unit CompletedOperations 2025-06-25T14:35:44.657109Z node 28 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:7] at 72075186224037889 on unit CompletedOperations 2025-06-25T14:35:44.657160Z node 28 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:7] at 72075186224037889 is Executed 2025-06-25T14:35:44.657185Z node 28 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:7] at 72075186224037889 executing on unit CompletedOperations 2025-06-25T14:35:44.657210Z node 28 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:7] at 72075186224037889 has finished 2025-06-25T14:35:44.657239Z node 28 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2687: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037889 2025-06-25T14:35:44.657338Z node 28 :TABLET_EXECUTOR DEBUG: Leader{72075186224037889:1:16} Tx{33, NKikimr::NDataShard::TDataShard::TTxReadViaPipeline} hope 1 -> done Change{16, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 2025-06-25T14:35:44.657397Z node 28 :TABLET_EXECUTOR DEBUG: Leader{72075186224037889:1:16} Tx{33, NKikimr::NDataShard::TDataShard::TTxReadViaPipeline} release 4194304b of static, Memory{0 dyn 0} 2025-06-25T14:35:44.657440Z node 28 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2736: TTxReadViaPipeline(69) Complete: at tablet# 72075186224037889 2025-06-25T14:35:44.658387Z node 28 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:155: TClient[72075186224037889] send [28:901:2709] 2025-06-25T14:35:44.658429Z node 28 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:629: TClient[72075186224037889] push event to server [28:901:2709] 2025-06-25T14:35:44.658803Z node 28 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553219, Sender [28:1051:2821], Recipient [28:666:2551]: NKikimrTxDataShard.TEvReadCancel ReadId: 1 2025-06-25T14:35:44.658862Z node 28 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3409: 72075186224037889 ReadCancel: { ReadId: 1 } 2025-06-25T14:35:44.658978Z node 28 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72075186224037890] ::Bootstrap [28:1054:2824] 2025-06-25T14:35:44.659046Z node 28 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72075186224037890] lookup [28:1054:2824] 2025-06-25T14:35:44.659152Z node 28 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72075186224037890] queue send [28:1054:2824] 2025-06-25T14:35:44.659193Z node 28 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:190: TClient[72075186224037890] forward result local node, try to connect [28:1054:2824] 2025-06-25T14:35:44.659235Z node 28 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[72075186224037890]::SendEvent [28:1054:2824] 2025-06-25T14:35:44.659403Z node 28 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [28:1055:2825], Recipient [28:1007:2793]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:35:44.659441Z node 28 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T14:35:44.659475Z node 28 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037890, clientId# [28:1054:2824], serverId# [28:1055:2825], sessionId# [0:0:0] 2025-06-25T14:35:44.659521Z node 28 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:310: TClient[72075186224037890] connected with status OK role: Leader [28:1054:2824] 2025-06-25T14:35:44.659558Z node 28 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:325: TClient[72075186224037890] send queued [28:1054:2824] 2025-06-25T14:35:44.659584Z node 28 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:629: TClient[72075186224037890] push event to server [28:1054:2824] 2025-06-25T14:35:44.659795Z node 28 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553215, Sender [28:1051:2821], Recipient [28:1007:2793]: NKikimrTxDataShard.TEvRead ReadId: 2 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 Snapshot { Step: 1527 TxId: 18446744073709551615 } ResultFormat: FORMAT_CELLVEC MaxRows: 999 MaxBytes: 5242880 Reverse: false TotalRowsLimit: 999 RangesSize: 1 2025-06-25T14:35:44.659894Z node 28 :TABLET_EXECUTOR DEBUG: Leader{72075186224037890:2:4} Tx{13, NKikimr::NDataShard::TDataShard::TTxReadViaPipeline} queued, type NKikimr::NDataShard::TDataShard::TTxReadViaPipeline 2025-06-25T14:35:44.659945Z node 28 :TABLET_EXECUTOR DEBUG: Leader{72075186224037890:2:4} Tx{13, NKikimr::NDataShard::TDataShard::TTxReadViaPipeline} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-25T14:35:44.660024Z node 28 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2452: TTxReadViaPipeline execute: at tablet# 72075186224037890, FollowerId 0 2025-06-25T14:35:44.660087Z node 28 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 72075186224037890 on unit CheckRead 2025-06-25T14:35:44.660152Z node 28 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 72075186224037890 is Executed 2025-06-25T14:35:44.660182Z node 28 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 72075186224037890 executing on unit CheckRead 2025-06-25T14:35:44.660211Z node 28 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 72075186224037890 to execution unit BuildAndWaitDependencies 2025-06-25T14:35:44.660243Z node 28 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 72075186224037890 on unit BuildAndWaitDependencies 2025-06-25T14:35:44.660303Z node 28 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:1] at 72075186224037890 2025-06-25T14:35:44.660363Z node 28 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 72075186224037890 is Executed 2025-06-25T14:35:44.660389Z node 28 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 72075186224037890 executing on unit BuildAndWaitDependencies 2025-06-25T14:35:44.660414Z node 28 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 72075186224037890 to execution unit ExecuteRead 2025-06-25T14:35:44.660439Z node 28 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 72075186224037890 on unit ExecuteRead 2025-06-25T14:35:44.660557Z node 28 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1578: 72075186224037890 Execute read# 1, request: { ReadId: 2 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 Snapshot { Step: 1527 TxId: 18446744073709551615 } ResultFormat: FORMAT_CELLVEC MaxRows: 999 MaxBytes: 5242880 Reverse: false TotalRowsLimit: 999 } 2025-06-25T14:35:44.660774Z node 28 :TX_DATASHARD TRACE: datashard.cpp:2476: PromoteImmediatePostExecuteEdges at 72075186224037890 promoting UnprotectedReadEdge to v1527/18446744073709551615 2025-06-25T14:35:44.660818Z node 28 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2163: 72075186224037890 Complete read# {[28:1051:2821], 2} after executionsCount# 1 2025-06-25T14:35:44.660855Z node 28 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2137: 72075186224037890 read iterator# {[28:1051:2821], 2} sends rowCount# 1, bytes# 32, quota rows left# 998, quota bytes left# 5242848, hasUnreadQueries# 0, total queries# 1, firstUnprocessed# 0 2025-06-25T14:35:44.660917Z node 28 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2188: 72075186224037890 read iterator# {[28:1051:2821], 2} finished in read 2025-06-25T14:35:44.660964Z node 28 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 72075186224037890 is Executed 2025-06-25T14:35:44.660990Z node 28 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 72075186224037890 executing on unit ExecuteRead 2025-06-25T14:35:44.661014Z node 28 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 72075186224037890 to execution unit CompletedOperations 2025-06-25T14:35:44.661041Z node 28 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 72075186224037890 on unit CompletedOperations 2025-06-25T14:35:44.661083Z node 28 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 72075186224037890 is Executed 2025-06-25T14:35:44.661108Z node 28 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 72075186224037890 executing on unit CompletedOperations 2025-06-25T14:35:44.661131Z node 28 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:1] at 72075186224037890 has finished 2025-06-25T14:35:44.661156Z node 28 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2687: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037890 2025-06-25T14:35:44.661241Z node 28 :TABLET_EXECUTOR DEBUG: Leader{72075186224037890:2:4} Tx{13, NKikimr::NDataShard::TDataShard::TTxReadViaPipeline} hope 1 -> done Change{17, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 2025-06-25T14:35:44.661291Z node 28 :TABLET_EXECUTOR DEBUG: Leader{72075186224037890:2:4} Tx{13, NKikimr::NDataShard::TDataShard::TTxReadViaPipeline} release 4194304b of static, Memory{0 dyn 0} 2025-06-25T14:35:44.661331Z node 28 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2736: TTxReadViaPipeline(69) Complete: at tablet# 72075186224037890 2025-06-25T14:35:44.661948Z node 28 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:155: TClient[72075186224037890] send [28:1054:2824] 2025-06-25T14:35:44.661982Z node 28 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:629: TClient[72075186224037890] push event to server [28:1054:2824] 2025-06-25T14:35:44.662099Z node 28 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553219, Sender [28:1051:2821], Recipient [28:1007:2793]: NKikimrTxDataShard.TEvReadCancel ReadId: 2 2025-06-25T14:35:44.662142Z node 28 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3409: 72075186224037890 ReadCancel: { ReadId: 2 } { items { uint32_value: 1 } items { uint32_value: 1 } }, { items { uint32_value: 11 } items { uint32_value: 111 } }, { items { uint32_value: 21 } items { uint32_value: 21 } } >> TConfigsDispatcherTests::TestYamlAndNonYamlCoexist [GOOD] >> TConfigsDispatcherTests::TestYamlEndToEnd >> TConsoleTests::TestCreateServerlessTenantWrongSharedDb [GOOD] >> TConsoleTests::TestCreateTenantWrongName >> TConsoleConfigSubscriptionTests::TestReplaceConfigSubscriptions [GOOD] >> TConsoleConfigSubscriptionTests::TestNotificationForNewSubscription >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-DropUser-59 [FAIL] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-DropUser-60 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tablet_flat/ut/unittest >> BuildStatsHistogram::Many_Serial [GOOD] Test command err: Got : 24000 2106439 49449 9 9 Expected: 24000 2106439 49449 9 9 { [2455, 2599), [2798, 3624), [4540, 4713), [5654, 7161), [8509, 8794), [8936, 9973), [11888, 14280), [14337, 14882), [15507, 16365), [17368, 19451), [19536, 20135), [20790, 21503), [21589, 23243) } Got : 12816 1121048 49449 9 9 Expected: 12816 1121048 49449 9 9 Got : 24000 3547100 81694 9 9 Expected: 24000 3547100 81694 9 9 { [1012, 1475), [1682, 1985), [2727, 3553), [3599, 3992), [5397, 7244), [9181, 9807), [9993, 10178), [12209, 14029), [15089, 15342), [16198, 16984), [17238, 18436), [21087, 21876), [23701, 23794) } Got : 9582 1425282 81694 9 9 Expected: 9582 1425282 81694 9 9 Got : 24000 2460139 23760 9 9 Expected: 24000 2460139 23760 9 9 { [1296, 2520), [3888, 4320), [5040, 6840), [6912, 7272), [10872, 11160), [11520, 12096), [12096, 13824), [15192, 15624), [17064, 17856), [18216, 19296), [19800, 20160), [20736, 21096), [21096, 22104) } Got : 10440 1060767 23760 9 9 Expected: 10440 1060767 23760 9 9 Got : 24000 4054050 46562 9 9 Expected: 24000 4054050 46562 9 9 { [460, 1518), [2300, 2484), [2760, 4002), [4600, 5842), [6302, 9752), [11178, 12328), [14582, 14858), [16790, 18032), [18216, 18446), [18722, 19504), [19504, 19964), [20378, 20470), [21344, 23506) } Got : 13570 2273213 46562 9 9 Expected: 13570 2273213 46562 9 9 Got : 24000 2106459 49449 9 9 Expected: 24000 2106459 49449 9 9 Got : 24000 2460219 23555 9 9 Expected: 24000 2460219 23555 9 9 Got : 24000 4054270 46543 9 9 Expected: 24000 4054270 46543 9 9 Got : 24000 2106439 25272 38 44 Expected: 24000 2106439 25272 38 44 { [2455, 2599), [2798, 3624), [4540, 4713), [5654, 7161), [8509, 8794), [8936, 9973), [11888, 14280), [14337, 14882), [15507, 16365), [17368, 19451), [19536, 20135), [20790, 21503), [21589, 23243) } Got : 12816 1121048 25272 20 23 Expected: 12816 1121048 25272 20 23 Got : 24000 3547100 49916 64 44 Expected: 24000 3547100 49916 64 44 { [1012, 1475), [1682, 1985), [2727, 3553), [3599, 3992), [5397, 7244), [9181, 9807), [9993, 10178), [12209, 14029), [15089, 15342), [16198, 16984), [17238, 18436), [21087, 21876), [23701, 23794) } Got : 9582 1425198 49916 26 17 Expected: 9582 1425198 49916 26 17 Got : 24000 2460139 13170 42 41 Expected: 24000 2460139 13170 42 41 { [1296, 2520), [3888, 4320), [5040, 6840), [6912, 7272), [10872, 11160), [11520, 12096), [12096, 13824), [15192, 15624), [17064, 17856), [18216, 19296), [19800, 20160), [20736, 21096), [21096, 22104) } Got : 10440 1060798 13170 18 18 Expected: 10440 1060798 13170 18 18 Got : 24000 4054050 29361 68 43 Expected: 24000 4054050 29361 68 43 { [460, 1518), [2300, 2484), [2760, 4002), [4600, 5842), [6302, 9752), [11178, 12328), [14582, 14858), [16790, 18032), [18216, 18446), [18722, 19504), [19504, 19964), [20378, 20470), [21344, 23506) } Got : 13570 2277890 29361 38 24 Expected: 13570 2277890 29361 38 24 Got : 24000 2106459 25428 38 44 Expected: 24000 2106459 25428 38 44 Got : 24000 2460219 13482 41 41 Expected: 24000 2460219 13482 41 41 Got : 24000 4054270 29970 67 43 Expected: 24000 4054270 29970 67 43 Got : 24000 2106479 25458 38 44 Expected: 24000 2106479 25458 38 44 Got : 24000 2460259 13528 42 41 Expected: 24000 2460259 13528 42 41 Got : 24000 4054290 30013 67 43 Expected: 24000 4054290 30013 67 43 1 parts: [0:0:1:0:0:0:0] 240000 rows, 10181 pages, 7 levels: (159964, 53329) (319996, 106673) (479902, 159975) (639565, 213196) (799303, 266442) Checking BTree: Touched 0% bytes, 4 pages RowCountHistogram: 10% (actual 10%) key = (80152, 26725) value = 24033 (actual 24079 - 0% error) 10% (actual 10%) key = (160300, 53441) value = 48088 (actual 48136 - 0% error) 10% (actual 10%) key = (241096, 80373) value = 72280 (actual 72327 - 0% error) 10% (actual 10%) key = (321454, 107159) value = 96428 (actual 96478 - 0% error) 10% (actual 10%) key = (402202, 134075) value = 120604 (actual 120651 - 0% error) 10% (actual 10%) key = (482362, 160795) value = 144727 (actual 144775 - 0% error) 10% (actual 10%) key = (562825, 187616) value = 168893 (actual 168936 - 0% error) 10% (actual 10%) key = (642871, 214298) value = 192974 (actual 193024 - 0% error) 5% (actual 5%) key = (683260, 227761) value = 205073 (actual 205115 - 0% error) 14% (actual 14%) DataSizeHistogram: 10% (actual 10%) key = (80152, 26725) value = 2048715 (actual 2052707 - 0% error) 10% (actual 10%) key = (160300, 53441) value = 4098370 (actual 4102393 - 0% error) 10% (actual 10%) key = (241096, 80373) value = 6145924 (actual 6149966 - 0% error) 10% (actual 10%) key = (321454, 107159) value = 8194622 (actual 8198636 - 0% error) 10% (actual 10%) key = (402202, 134075) value = 10244365 (actual 10248317 - 0% error) 10% (actual 10%) key = (482362, 160795) value = 12292389 (actual 12296360 - 0% error) 10% (actual 10%) key = (562825, 187616) value = 14344066 (actual 14348128 - 0% error) 10% (actual 10%) key = (642871, 214298) value = 16393002 (actual 16396983 - 0% error) 5% (actual 5%) key = (683260, 227761) value = 17416844 (actual 17420850 - 0% error) 14% (actual 14%) Checking Flat: Touched 100% bytes, 1 pages RowCountHistogram: 10% (actual 10%) key = (80065, 26696) value = 24008 (actual 24056 - 0% error) 10% (actual 10%) key = (160045, 53356) value = 48012 (actual 48061 - 0% error) 10% (actual 10%) key = (240238, 80087) value = 72016 (actual 72061 - 0% error) 10% (actual 10%) key = (320152, 106725) value = 96035 (actual 96085 - 0% error) 10% (actual 10%) key = (400354, 133459) value = 120047 (actual 120093 - 0% error) 10% (actual 10%) key = (480133, 160052) value = 144053 (actual 144100 - 0% error) 10% (actual 10%) key = (560080, 186701) value = 168060 (actual 168102 - 0% error) 10% (actual 10%) key = (639892, 213305) value = 192073 (actual 192119 - 0% error) 10% (actual 10%) key = (719776, 239933) value = 216090 (actual 216137 - 0% error) 9% (actual 9%) DataSizeHistogram: 10% (actual 10%) key = (79732, 26585) value = 2038706 (actual 2042645 - 0% error) 10% (actual 10%) key = (159427, 53150) value = 4076220 (actual 4080259 - 0% error) 10% (actual 10%) key = (239872, 79965) value = 6113940 (actual 6117932 - 0% error) 10% (actual 10%) key = (319834, 106619) value = 8152983 (actual 8156951 - 0% error) 10% (actual 10%) key = (400105, 133376) value = 10190566 (actual 10194584 - 0% error) 10% (actual 10%) key = (479833, 159952) value = 12228261 (actual 12232212 - 0% error) 10% (actual 10%) key = (559774, 186599) value = 14265925 (actual 14269984 - 0% error) 10% (actual 10%) key = (639385, 213136) value = 16304923 (actual 16308915 - 0% error) 10% (actual 10%) key = (719437, 239820) value = 18342658 (actual 18346641 - 0% error) 9% (actual 9%) Checking Mixed: Touched 1% bytes, 51 pages RowCountHistogram: 10% (actual 10%) key = (80152, 26725) value = 24033 (actual 24079 - 0% error) 10% (actual 10%) key = (160300, 53441) value = 48088 (actual 48136 - 0% error) 10% (actual 10%) key = (241096, 80373) value = 72280 (actual 72327 - 0% error) 10% (actual 10%) key = (321454, 107159) value = 96428 (actual 96478 - 0% error) 10% (actual 10%) key = (402202, 134075) value = 120604 (actual 120651 - 0% error) 10% (actual 10%) key = (482362, 160795) value = 144727 (actual 144775 - 0% error) 10% (actual 10%) key = (562825, 187616) value = 168893 (actual 168936 - 0% error) 10% (actual 10%) key = (642871, 214298) value = 192974 (actual 193024 - 0% error) 10% (actual 10%) key = (723403, 241142) value = 217180 (actual 217228 - 0% error) 9% (actual 9%) DataSizeHistogram: 10% (actual 10%) key = (80152, 26725) value = 2048715 (actual 2052707 - 0% error) 10% (actual 10%) key = (160300, 53441) value = 4098370 (actual 4102393 - 0% error) 10% (actual 10%) key = (241096, 80373) value = 6145924 (actual 6149966 - 0% error) 10% (actual 10%) key = (321454, 107159) value = 8194622 (actual 8198636 - 0% error) 10% (actual 10%) key = (402202, 134075) value = 10244365 (actual 10248317 - 0% error) 10% (actual 10%) key = (482362, 160795) value = 12292389 (actual 12296360 - 0% error) 10% (actual 10%) key = (562825, 187616) value = 14344066 (actual 14348128 - 0% error) 10% (actual 10%) key = (642871, 214298) value = 16393002 (actual 16396983 - 0% error) 10% (actual 10%) key = (723403, 241142) value = 18443184 (actual 18447186 - 0% error) 9% (actual 9%) { [12965, 17271), [20685, 27602), [31405, 43682), [58051, 73731), [81074, 85635), [86559, 89297), [92588, 112654), [134937, 148111), [152568, 158136), [169526, 171272), [181381, 184364), [188301, 199001), [201179, 227534) } 1 parts: [0:0:1:0:0:0:0] 240000 rows, 10181 pages, 7 levels: (159964, 53329) (319996, 106673) (479902, 159975) (639565, 213196) (799303, 266442) Checking BTree: Touched 3% bytes, 111 pages RowCountHistogram: 6% (actual 6%) key = (80152, 26725) value = 7654 (actual 7700 - 0% error) 11% (actual 11%) key = (140245, 46756) value = 21908 (actual 21959 - 0% error) 12% (actual 12%) key = (241096, 80373) value = 37729 (actual 37776 - 0% error) 5% (actual 5%) key = (291388, 97137) value = 44561 (actual 44610 - 0% error) 14% (actual 14%) key = (361831, 120618) value = 62406 (actual 62455 - 0% error) 6% (actual 6%) key = (462178, 154067) value = 70269 (actual 70314 - 0% error) 10% (actual 10%) key = (522574, 174199) value = 83950 (actual 83996 - 0% error) 9% (actual 9%) key = (647905, 215976) value = 96207 (actual 96256 - 0% error) 11% (actual 11%) key = (703270, 234431) value = 110645 (actual 110694 - 0% error) 12% (actual 12%) DataSizeHistogram: 6% (actual 6%) key = (80152, 26725) value = 650681 (actual 654673 - 0% error) 11% (actual 11%) key = (140245, 46756) value = 1862907 (actual 1866988 - 0% error) 12% (actual 12%) key = (241096, 80373) value = 3200081 (actual 3204123 - 0% error) 5% (actual 5%) key = (291388, 97137) value = 3780473 (actual 3784554 - 0% error) 14% (actual 14%) key = (361831, 120618) value = 5294670 (actual 5298760 - 0% error) 6% (actual 6%) key = (462178, 154067) value = 5965285 (actual 5969310 - 0% error) 10% (actual 10%) key = (522574, 174199) value = 7125413 (actual 7129406 - 0% error) 9% (actual 9%) key = (647905, 215976) value = 8166922 (actual 8170966 - 0% error) 11% (actual 11%) key = (703270, 234431) value = 9391370 (actual 9395383 - 0% error) 12% (actual 12%) { [12965, 17271), [20685, 27602), [31405, 43682), [58051, 73731), [81074, 85635), [86559, 89297), [92588, 112654), [134937, 148111), [152568, 158136), [169526, 171272), [181381, 184364), [188301, 199001), [201179, 227534) } Checking Flat: Touched 100% bytes, 1 pages RowCountHistogram: 10% (actual 10%) key = (109672, 36565) value = 12716 (actual 12760 - 0% error) 10% (actual 10%) key = (200011, 66678) value = 25439 (actual 25485 - 0% error) 10% (actual 10%) key = (242497, 80840) value = 38151 (actual 38197 - 0% error) 10% (actual 10%) key = (323278, 107767) value = 50861 (actual 50910 - 0% error) 9% (actual 9%) key = (365755, 121926) value = 63568 (actual 63614 - 0% error) 10% (actual 10%) key = (482191, 160738) value = 76283 (actual 76335 - 0% error) 10% (actual 9%) key = (610882, 203635) value = 88992 (actual 89039 - 0% error) 10% (actual 10%) key = (673702, 224575) value = 101722 (actual 101768 - 0% error) 10% (actual 10%) key = (715753, 238592) value = 114435 (actual 114484 - 0% error) 9% (actual 9%) DataSizeHistogram: 10% (actual 10%) ... 140, NULL) (311209, NULL) (311281, NULL) (311344, NULL) (311416, NULL) [0:0:935:0:0:0:0] 100 rows, 100 pages, 4 levels: (311479, NULL) (311542, NULL) (311614, NULL) (311683, NULL) (311755, NULL) [0:0:936:0:0:0:0] 100 rows, 100 pages, 4 levels: (311821, NULL) (311890, NULL) (311956, NULL) (312034, NULL) (312100, NULL) [0:0:937:0:0:0:0] 100 rows, 100 pages, 4 levels: (312172, NULL) (312232, NULL) (312301, NULL) (312370, NULL) (312439, NULL) [0:0:938:0:0:0:0] 100 rows, 100 pages, 4 levels: (312508, NULL) (312571, NULL) (312637, NULL) (312700, NULL) (312760, NULL) [0:0:939:0:0:0:0] 100 rows, 100 pages, 4 levels: (312835, NULL) (312904, NULL) (312970, NULL) (313030, NULL) (313102, NULL) [0:0:940:0:0:0:0] 100 rows, 100 pages, 4 levels: (313174, NULL) (313240, NULL) (313300, NULL) (313366, NULL) (313429, NULL) [0:0:941:0:0:0:0] 100 rows, 100 pages, 4 levels: (313498, NULL) (313573, NULL) (313639, NULL) (313699, NULL) (313768, NULL) [0:0:942:0:0:0:0] 100 rows, 100 pages, 4 levels: (313828, NULL) (313891, NULL) (313957, NULL) (314023, NULL) (314086, NULL) [0:0:943:0:0:0:0] 100 rows, 100 pages, 4 levels: (314149, NULL) (314212, NULL) (314275, NULL) (314338, NULL) (314401, NULL) [0:0:944:0:0:0:0] 100 rows, 100 pages, 4 levels: (314464, NULL) (314530, NULL) (314590, NULL) (314656, NULL) (314719, NULL) [0:0:945:0:0:0:0] 100 rows, 100 pages, 4 levels: (314788, NULL) (314854, NULL) (314920, NULL) (314983, NULL) (315046, NULL) [0:0:946:0:0:0:0] 100 rows, 100 pages, 4 levels: (315109, NULL) (315178, NULL) (315238, NULL) (315304, NULL) (315370, NULL) [0:0:947:0:0:0:0] 100 rows, 100 pages, 4 levels: (315433, NULL) (315496, NULL) (315565, NULL) (315631, NULL) (315697, NULL) [0:0:948:0:0:0:0] 100 rows, 100 pages, 4 levels: (315766, NULL) (315826, NULL) (315889, NULL) (315952, NULL) (316024, NULL) [0:0:949:0:0:0:0] 100 rows, 100 pages, 4 levels: (316087, NULL) (316156, NULL) (316222, NULL) (316288, NULL) (316357, NULL) [0:0:950:0:0:0:0] 100 rows, 100 pages, 4 levels: (316432, NULL) (316498, NULL) (316564, NULL) (316636, NULL) (316705, NULL) [0:0:951:0:0:0:0] 100 rows, 100 pages, 4 levels: (316768, NULL) (316831, NULL) (316891, NULL) (316951, NULL) (317011, NULL) [0:0:952:0:0:0:0] 100 rows, 100 pages, 4 levels: (317080, NULL) (317143, NULL) (317218, NULL) (317287, NULL) (317356, NULL) [0:0:953:0:0:0:0] 100 rows, 100 pages, 4 levels: (317422, NULL) (317497, NULL) (317563, NULL) (317632, NULL) (317701, NULL) [0:0:954:0:0:0:0] 100 rows, 100 pages, 4 levels: (317764, NULL) (317824, NULL) (317887, NULL) (317953, NULL) (318019, NULL) [0:0:955:0:0:0:0] 100 rows, 100 pages, 4 levels: (318088, NULL) (318166, NULL) (318235, NULL) (318304, NULL) (318370, NULL) [0:0:956:0:0:0:0] 100 rows, 100 pages, 4 levels: (318442, NULL) (318511, NULL) (318574, NULL) (318640, NULL) (318703, NULL) [0:0:957:0:0:0:0] 100 rows, 100 pages, 4 levels: (318772, NULL) (318838, NULL) (318898, NULL) (318970, NULL) (319036, NULL) [0:0:958:0:0:0:0] 100 rows, 100 pages, 4 levels: (319099, NULL) (319162, NULL) (319225, NULL) (319294, NULL) (319360, NULL) [0:0:959:0:0:0:0] 100 rows, 100 pages, 4 levels: (319423, NULL) (319492, NULL) (319555, NULL) (319621, NULL) (319687, NULL) [0:0:960:0:0:0:0] 100 rows, 100 pages, 4 levels: (319753, NULL) (319828, NULL) (319900, NULL) (319963, NULL) (320035, NULL) [0:0:961:0:0:0:0] 100 rows, 100 pages, 4 levels: (320104, NULL) (320164, NULL) (320233, NULL) (320299, NULL) (320365, NULL) [0:0:962:0:0:0:0] 100 rows, 100 pages, 4 levels: (320428, NULL) (320500, NULL) (320569, NULL) (320629, NULL) (320698, NULL) [0:0:963:0:0:0:0] 100 rows, 100 pages, 4 levels: (320764, NULL) (320833, NULL) (320893, NULL) (320959, NULL) (321019, NULL) [0:0:964:0:0:0:0] 100 rows, 100 pages, 4 levels: (321085, NULL) (321151, NULL) (321214, NULL) (321277, NULL) (321352, NULL) [0:0:965:0:0:0:0] 100 rows, 100 pages, 4 levels: (321421, NULL) (321493, NULL) (321562, NULL) (321631, NULL) (321691, NULL) [0:0:966:0:0:0:0] 100 rows, 100 pages, 4 levels: (321757, NULL) (321823, NULL) (321886, NULL) (321949, NULL) (322009, NULL) [0:0:967:0:0:0:0] 100 rows, 100 pages, 4 levels: (322081, NULL) (322159, NULL) (322225, NULL) (322294, NULL) (322363, NULL) [0:0:968:0:0:0:0] 100 rows, 100 pages, 4 levels: (322429, NULL) (322498, NULL) (322564, NULL) (322642, NULL) (322711, NULL) [0:0:969:0:0:0:0] 100 rows, 100 pages, 4 levels: (322783, NULL) (322846, NULL) (322915, NULL) (322978, NULL) (323041, NULL) [0:0:970:0:0:0:0] 100 rows, 100 pages, 4 levels: (323104, NULL) (323164, NULL) (323230, NULL) (323305, NULL) (323368, NULL) [0:0:971:0:0:0:0] 100 rows, 100 pages, 4 levels: (323434, NULL) (323506, NULL) (323569, NULL) (323632, NULL) (323707, NULL) [0:0:972:0:0:0:0] 100 rows, 100 pages, 4 levels: (323776, NULL) (323851, NULL) (323917, NULL) (323986, NULL) (324052, NULL) [0:0:973:0:0:0:0] 100 rows, 100 pages, 4 levels: (324115, NULL) (324184, NULL) (324256, NULL) (324316, NULL) (324379, NULL) [0:0:974:0:0:0:0] 100 rows, 100 pages, 4 levels: (324442, NULL) (324502, NULL) (324568, NULL) (324631, NULL) (324703, NULL) [0:0:975:0:0:0:0] 100 rows, 100 pages, 4 levels: (324769, NULL) (324838, NULL) (324904, NULL) (324973, NULL) (325033, NULL) [0:0:976:0:0:0:0] 100 rows, 100 pages, 4 levels: (325105, NULL) (325174, NULL) (325234, NULL) (325297, NULL) (325363, NULL) [0:0:977:0:0:0:0] 100 rows, 100 pages, 4 levels: (325438, NULL) (325504, NULL) (325570, NULL) (325630, NULL) (325699, NULL) [0:0:978:0:0:0:0] 100 rows, 100 pages, 4 levels: (325771, NULL) (325834, NULL) (325900, NULL) (325966, NULL) (326032, NULL) [0:0:979:0:0:0:0] 100 rows, 100 pages, 4 levels: (326101, NULL) (326170, NULL) (326233, NULL) (326296, NULL) (326359, NULL) [0:0:980:0:0:0:0] 100 rows, 100 pages, 4 levels: (326434, NULL) (326497, NULL) (326563, NULL) (326632, NULL) (326701, NULL) [0:0:981:0:0:0:0] 100 rows, 100 pages, 4 levels: (326773, NULL) (326836, NULL) (326905, NULL) (326965, NULL) (327025, NULL) [0:0:982:0:0:0:0] 100 rows, 100 pages, 4 levels: (327097, NULL) (327169, NULL) (327232, NULL) (327301, NULL) (327364, NULL) [0:0:983:0:0:0:0] 100 rows, 100 pages, 4 levels: (327430, NULL) (327496, NULL) (327559, NULL) (327622, NULL) (327682, NULL) [0:0:984:0:0:0:0] 100 rows, 100 pages, 4 levels: (327742, NULL) (327811, NULL) (327871, NULL) (327934, NULL) (327997, NULL) [0:0:985:0:0:0:0] 100 rows, 100 pages, 4 levels: (328072, NULL) (328138, NULL) (328222, NULL) (328291, NULL) (328363, NULL) [0:0:986:0:0:0:0] 100 rows, 100 pages, 4 levels: (328432, NULL) (328501, NULL) (328573, NULL) (328648, NULL) (328717, NULL) [0:0:987:0:0:0:0] 100 rows, 100 pages, 4 levels: (328783, NULL) (328849, NULL) (328915, NULL) (328978, NULL) (329044, NULL) [0:0:988:0:0:0:0] 100 rows, 100 pages, 4 levels: (329119, NULL) (329185, NULL) (329248, NULL) (329317, NULL) (329383, NULL) [0:0:989:0:0:0:0] 100 rows, 100 pages, 4 levels: (329455, NULL) (329518, NULL) (329590, NULL) (329662, NULL) (329722, NULL) [0:0:990:0:0:0:0] 100 rows, 100 pages, 4 levels: (329782, NULL) (329854, NULL) (329917, NULL) (329983, NULL) (330049, NULL) [0:0:991:0:0:0:0] 100 rows, 100 pages, 4 levels: (330118, NULL) (330187, NULL) (330253, NULL) (330322, NULL) (330382, NULL) [0:0:992:0:0:0:0] 100 rows, 100 pages, 4 levels: (330454, NULL) (330520, NULL) (330595, NULL) (330673, NULL) (330739, NULL) [0:0:993:0:0:0:0] 100 rows, 100 pages, 4 levels: (330808, NULL) (330874, NULL) (330940, NULL) (331003, NULL) (331072, NULL) [0:0:994:0:0:0:0] 100 rows, 100 pages, 4 levels: (331132, NULL) (331204, NULL) (331276, NULL) (331342, NULL) (331405, NULL) [0:0:995:0:0:0:0] 100 rows, 100 pages, 4 levels: (331465, NULL) (331540, NULL) (331615, NULL) (331684, NULL) (331753, NULL) [0:0:996:0:0:0:0] 100 rows, 100 pages, 4 levels: (331816, NULL) (331891, NULL) (331960, NULL) (332026, NULL) (332086, NULL) [0:0:997:0:0:0:0] 100 rows, 100 pages, 4 levels: (332152, NULL) (332215, NULL) (332284, NULL) (332350, NULL) (332419, NULL) [0:0:998:0:0:0:0] 100 rows, 100 pages, 4 levels: (332491, NULL) (332557, NULL) (332623, NULL) (332686, NULL) (332752, NULL) [0:0:999:0:0:0:0] 100 rows, 100 pages, 4 levels: (332818, NULL) (332884, NULL) (332944, NULL) (333013, NULL) (333073, NULL) [0:0:1000:0:0:0:0] 100 rows, 100 pages, 4 levels: (333148, NULL) (333214, NULL) (333274, NULL) (333340, NULL) (333403, NULL) Checking BTree: Touched 0% bytes, 0 pages RowCountHistogram: 5% (actual 6%) key = (16984, 5669) value = 5100 (actual 6998 - -1% error) 10% (actual 9%) key = (50416, 16813) value = 15100 (actual 16798 - -1% error) 10% (actual 9%) key = (83701, 27908) value = 25100 (actual 26598 - -1% error) 10% (actual 9%) key = (116986, 39003) value = 35100 (actual 36398 - -1% error) 10% (actual 9%) key = (150319, 50114) value = 45100 (actual 46198 - -1% error) 10% (actual 9%) key = (183700, 61241) value = 55100 (actual 55998 - 0% error) 10% (actual 9%) key = (217081, 72368) value = 65100 (actual 65798 - 0% error) 10% (actual 9%) key = (250486, 83503) value = 75100 (actual 75598 - 0% error) 10% (actual 9%) key = (283771, 94598) value = 85100 (actual 85398 - 0% error) 14% (actual 14%) DataSizeHistogram: 5% (actual 6%) key = (16648, 5557) value = 524891 (actual 723287 - -1% error) 10% (actual 9%) key = (50086, 16703) value = 1569936 (actual 1747238 - -1% error) 9% (actual 9%) key = (83356, 27793) value = 2610698 (actual 2767306 - -1% error) 10% (actual 9%) key = (116647, 38890) value = 3652143 (actual 3787394 - -1% error) 9% (actual 9%) key = (149656, 49893) value = 4685435 (actual 4800597 - -1% error) 10% (actual 9%) key = (183040, 61021) value = 5728420 (actual 5822785 - 0% error) 10% (actual 9%) key = (216727, 72250) value = 6776444 (actual 6848929 - 0% error) 9% (actual 9%) key = (250144, 83389) value = 7813547 (actual 7865227 - 0% error) 9% (actual 9%) key = (283444, 94489) value = 8853697 (actual 8884838 - 0% error) 14% (actual 14%) Checking Flat: Touched 100% bytes, 1000 pages RowCountHistogram: 10% (actual 11%) key = (33379, 11134) value = 10000 (actual 11800 - -1% error) 10% (actual 9%) key = (66721, 22248) value = 20000 (actual 21600 - -1% error) 10% (actual 9%) key = (100015, 33346) value = 30000 (actual 31400 - -1% error) 10% (actual 9%) key = (133258, 44427) value = 40000 (actual 41200 - -1% error) 10% (actual 9%) key = (166621, 55548) value = 50000 (actual 51000 - -1% error) 10% (actual 9%) key = (200041, 66688) value = 60000 (actual 60800 - 0% error) 10% (actual 9%) key = (233449, 77824) value = 70000 (actual 70600 - 0% error) 10% (actual 9%) key = (266824, 88949) value = 80000 (actual 80400 - 0% error) 10% (actual 9%) key = (300073, 100032) value = 90000 (actual 90200 - 0% error) 10% (actual 9%) DataSizeHistogram: 10% (actual 11%) key = (33187, NULL) value = 1041247 (actual 1229534 - -1% error) 10% (actual 9%) key = (66517, NULL) value = 2082456 (actual 2249844 - -1% error) 10% (actual 9%) key = (99709, NULL) value = 3123684 (actual 3270138 - -1% error) 10% (actual 9%) key = (132925, NULL) value = 4164886 (actual 4290603 - -1% error) 10% (actual 9%) key = (166246, NULL) value = 5206111 (actual 5311117 - -1% error) 10% (actual 9%) key = (199678, NULL) value = 6247321 (actual 6331068 - 0% error) 10% (actual 9%) key = (233290, NULL) value = 7288529 (actual 7350869 - 0% error) 10% (actual 9%) key = (266701, NULL) value = 8329759 (actual 8371441 - 0% error) 10% (actual 9%) key = (300052, NULL) value = 9371030 (actual 9392083 - 0% error) 9% (actual 9%) Checking Mixed: Touched 0% bytes, 0 pages RowCountHistogram: 100% (actual 100%) DataSizeHistogram: 100% (actual 100%) >> TLogSettingsConfiguratorTests::TestChangeDefaults [GOOD] >> TModificationsValidatorTests::TestApplyValidators_TENANTS [GOOD] >> TModificationsValidatorTests::TestApplyValidators_TENANTS_AND_NODE_TYPES [GOOD] >> TModificationsValidatorTests::TestApplyValidatorsWithOldConfig [GOOD] >> TModificationsValidatorTests::TestChecksLimitError [GOOD] >> TModificationsValidatorTests::TestChecksLimitWarning [GOOD] |80.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/ut/perf/ydb-core-kqp-ut-perf |80.5%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/perf/ydb-core-kqp-ut-perf |80.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/perf/ydb-core-kqp-ut-perf >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-12 [FAIL] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-13 >> KqpIndexLookupJoin::CheckCastUtf8ToString-StreamLookupJoin-NotNull [GOOD] >> KqpIndexLookupJoin::CheckCastUtf8ToString-StreamLookupJoin+NotNull >> KqpJoinOrder::SortingsSimpleOrderByPKAlias-RemoveLimitOperator >> KqpJoinOrder::SortingsByPK+RemoveLimitOperator >> TConfigsDispatcherTests::TestYamlEndToEnd [GOOD] >> TConsoleConfigHelpersTests::TestConfigCourier >> OlapEstimationRowsCorrectness::TPCH10 >> TConsoleInMemoryConfigSubscriptionTests::TestComplexYamlConfigChanges [GOOD] >> TConsoleInMemoryConfigSubscriptionTests::TestNoYamlResend ------- [TM] {asan, default-linux-x86_64, release} ydb/core/cms/console/ut/unittest >> TModificationsValidatorTests::TestChecksLimitWarning [GOOD] Test command err: 2025-06-25T14:35:28.479255Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:35:28.479308Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:35:28.535620Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:35:30.097373Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:35:30.097451Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:35:30.140872Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:35:31.492754Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:35:31.492817Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:35:31.557340Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:35:33.089607Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:35:33.089673Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:35:33.140358Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:35:34.845332Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:35:34.845409Z node 5 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:35:34.927939Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:35:36.339463Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:35:36.339538Z node 6 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:35:36.387865Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:35:38.257981Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:35:38.258047Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:35:38.337105Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:35:40.003775Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:35:40.003853Z node 8 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:35:40.044584Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:35:42.010740Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:35:42.010808Z node 9 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:35:42.069550Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:35:43.521681Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:35:43.521756Z node 10 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:35:43.573536Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:35:45.732991Z node 11 :CMS_CONFIGS TRACE: console_configs_manager.h:259: StateWork, received event# 268637729, Sender [11:158:2168], Recipient [11:357:2298]: {TEvControllerProposeConfigRequest Record# } 2025-06-25T14:35:45.733091Z node 11 :CMS_CONFIGS TRACE: console_configs_manager.h:293: StateWork, processing event TEvBlobStorage::TEvControllerProposeConfigRequest 2025-06-25T14:35:45.745194Z node 11 :CMS_CONFIGS TRACE: console_configs_subscriber.cpp:105: StateWork, received event# 269877760, Sender [11:318:2287], Recipient [11:317:2284]: NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594037936131 Status: OK ServerId: [11:408:2344] Leader: 1 Dead: 0 Generation: 2 VersionInfo: } 2025-06-25T14:35:45.745275Z node 11 :CMS_CONFIGS TRACE: console_configs_subscriber.cpp:115: StateWork, processing event TEvTabletPipe::TEvClientConnected 2025-06-25T14:35:45.775140Z node 11 :CMS_CONFIGS TRACE: console_configs_manager.h:259: StateWork, received event# 273285144, Sender [11:317:2284], Recipient [11:357:2298]: NKikimr::NConsole::TEvConsole::TEvConfigSubscriptionRequest { Generation: 1 Options { NodeId: 11 Host: "ghrun-kqfvx6aroe.auto.internal" Tenant: "" NodeType: "" } ConfigItemKinds: 29 ConfigItemKinds: 1 ConfigItemKinds: 89 ConfigItemKinds: 2 ConfigItemKinds: 90 ConfigItemKinds: 32 ConfigItemKinds: 3 ConfigItemKinds: 33 ConfigItemKinds: 34 ConfigItemKinds: 6 ConfigItemKinds: 36 ConfigItemKinds: 37 ConfigItemKinds: 8 ConfigItemKinds: 38 ConfigItemKinds: 10 ConfigItemKinds: 39 ConfigItemKinds: 43 ConfigItemKinds: 73 ConfigItemKinds: 75 ConfigItemKinds: 46 ConfigItemKinds: 77 ConfigItemKinds: 80 ConfigItemKinds: 81 ConfigItemKinds: 52 ConfigItemKinds: 54 ConfigItemKinds: 25 ConfigItemKinds: 55 ConfigItemKinds: 26 ServeYaml: true YamlApiVersion: 1 } 2025-06-25T14:35:45.775450Z node 11 :CMS_CONFIGS TRACE: console_configs_provider.h:229: StateWork, received event# 273285144, Sender [11:317:2284], Recipient [11:364:2310]: NKikimr::NConsole::TEvConsole::TEvConfigSubscriptionRequest { Generation: 1 Options { NodeId: 11 Host: "ghrun-kqfvx6aroe.auto.internal" Tenant: "" NodeType: "" } ConfigItemKinds: 29 ConfigItemKinds: 1 ConfigItemKinds: 89 ConfigItemKinds: 2 ConfigItemKinds: 90 ConfigItemKinds: 32 ConfigItemKinds: 3 ConfigItemKinds: 33 ConfigItemKinds: 34 ConfigItemKinds: 6 ConfigItemKinds: 36 ConfigItemKinds: 37 ConfigItemKinds: 8 ConfigItemKinds: 38 ConfigItemKinds: 10 ConfigItemKinds: 39 ConfigItemKinds: 43 ConfigItemKinds: 73 ConfigItemKinds: 75 ConfigItemKinds: 46 ConfigItemKinds: 77 ConfigItemKinds: 80 ConfigItemKinds: 81 ConfigItemKinds: 52 ConfigItemKinds: 54 ConfigItemKinds: 25 ConfigItemKinds: 55 ConfigItemKinds: 26 ServeYaml: true YamlApiVersion: 1 } 2025-06-25T14:35:45.775520Z node 11 :CMS_CONFIGS TRACE: console_configs_provider.h:232: StateWork, processing event TEvConsole::TEvConfigSubscriptionRequest 2025-06-25T14:35:45.775639Z node 11 :CMS_CONFIGS DEBUG: console_configs_provider.cpp:866: TConfigsProvider registered new subscription [11:317:2284]:1 2025-06-25T14:35:45.775742Z node 11 :CMS_CONFIGS TRACE: console_configs_provider.cpp:627: TConfigsProvider: check if update is required for volatile subscription [11:317:2284]:1 2025-06-25T14:35:45.775818Z node 11 :CMS_CONFIGS TRACE: console_configs_provider.cpp:710: TConfigsProvider: new config found for subscription [11:317:2284]:1 version= 2025-06-25T14:35:45.775949Z node 11 :CMS_CONFIGS TRACE: console_configs_provider.cpp:312: TSubscriptionClientSender([11:317:2284]) send TEvConfigSubscriptionResponse 2025-06-25T14:35:45.776100Z node 11 :CMS_CONFIGS TRACE: console_configs_provider.cpp:320: StateWork, received event# 273285146, Sender [11:364:2310], Recipient [11:409:2310]: NKikimr::NConsole::TEvConsole::TEvConfigSubscriptionNotification { Generation: 1 Config { } MainYamlConfigNotChanged: true } 2025-06-25T14:35:45.776150Z node 11 :CMS_CONFIGS TRACE: console_configs_provider.cpp:323: StateWork, processing event TEvConsole::TEvConfigSubscriptionNotification 2025-06-25T14:35:45.776231Z node 11 :CMS_CONFIGS TRACE: console_configs_provider.cpp:379: TSubscriptionClientSender([11:317:2284]) send TEvConfigSubscriptionNotificationRequest: Order: 1 Generation: 1 Config { } MainYamlConfigNotChanged: true 2025-06-25T14:35:45.777248Z node 11 :CMS_CONFIGS TRACE: console_configs_subscriber.cpp:105: StateWork, received event# 273286169, Sender [11:409:2310], Recipient [11:317:2284]: NKikimr::NConsole::TEvConsole::TEvConfigSubscriptionResponse { Generation: 1 Status { Code: SUCCESS } } 2025-06-25T14:35:45.777299Z node 11 :CMS_CONFIGS TRACE: console_configs_subscriber.cpp:111: StateWork, processing event TEvConsole::TEvConfigSubscriptionResponse 2025-06-25T14:35:45.777500Z node 11 :CMS_CONFIGS TRACE: console_configs_subscriber.cpp:105: StateWork, received event# 273285146, Sender [11:409:2310], Recipient [11:317:2284]: NKikimr::NConsole::TEvConsole::TEvConfigSubscriptionNotification { Order: 1 Generation: 1 Config { } MainYamlConfigNotChanged: true } 2025-06-25T14:35:45.777539Z node 11 :CMS_CONFIGS TRACE: console_configs_subscriber.cpp:113: StateWork, processing event TEvConsole::TEvConfigSubscriptionNotification 2025-06-25T14:35:45.795100Z node 11 :CMS_CONFIGS INFO: log_settings_configurator.cpp:86: TLogSettingsConfigurator: got new config: 2025-06-25T14:35:45.795237Z node 11 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:192: TLogSettingsConfigurator: Priority for the component GLOBAL has been changed from WARN to NOTICE 2025-06-25T14:35:45.795306Z node 11 :CMS_CONFIGS NOTICE: log_settings_configur ... NOTICE to ALERT 2025-06-25T14:35:50.687028Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:203: TLogSettingsConfigurator: Sampling priority for the component TX_CONVEYOR has been changed from DEBUG to ALERT 2025-06-25T14:35:50.687052Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:209: TLogSettingsConfigurator: Sampling rate for the component TX_CONVEYOR has been changed from 0 to 10 2025-06-25T14:35:50.687073Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:192: TLogSettingsConfigurator: Priority for the component TX_LIMITER has been changed from NOTICE to ALERT 2025-06-25T14:35:50.687097Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:203: TLogSettingsConfigurator: Sampling priority for the component TX_LIMITER has been changed from DEBUG to ALERT 2025-06-25T14:35:50.687167Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:209: TLogSettingsConfigurator: Sampling rate for the component TX_LIMITER has been changed from 0 to 10 2025-06-25T14:35:50.687203Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:192: TLogSettingsConfigurator: Priority for the component ARROW_HELPER has been changed from NOTICE to ALERT 2025-06-25T14:35:50.687227Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:203: TLogSettingsConfigurator: Sampling priority for the component ARROW_HELPER has been changed from DEBUG to ALERT 2025-06-25T14:35:50.687250Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:209: TLogSettingsConfigurator: Sampling rate for the component ARROW_HELPER has been changed from 0 to 10 2025-06-25T14:35:50.687274Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:192: TLogSettingsConfigurator: Priority for the component SSA_GRAPH_EXECUTION has been changed from NOTICE to ALERT 2025-06-25T14:35:50.687303Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:203: TLogSettingsConfigurator: Sampling priority for the component SSA_GRAPH_EXECUTION has been changed from DEBUG to ALERT 2025-06-25T14:35:50.687325Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:209: TLogSettingsConfigurator: Sampling rate for the component SSA_GRAPH_EXECUTION has been changed from 0 to 10 2025-06-25T14:35:50.687348Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:192: TLogSettingsConfigurator: Priority for the component KAFKA_PROXY has been changed from NOTICE to ALERT 2025-06-25T14:35:50.687370Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:203: TLogSettingsConfigurator: Sampling priority for the component KAFKA_PROXY has been changed from DEBUG to ALERT 2025-06-25T14:35:50.687403Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:209: TLogSettingsConfigurator: Sampling rate for the component KAFKA_PROXY has been changed from 0 to 10 2025-06-25T14:35:50.687430Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:192: TLogSettingsConfigurator: Priority for the component OBJECTS_MONITORING has been changed from NOTICE to ALERT 2025-06-25T14:35:50.687455Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:203: TLogSettingsConfigurator: Sampling priority for the component OBJECTS_MONITORING has been changed from DEBUG to ALERT 2025-06-25T14:35:50.687477Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:209: TLogSettingsConfigurator: Sampling rate for the component OBJECTS_MONITORING has been changed from 0 to 10 2025-06-25T14:35:50.687501Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:192: TLogSettingsConfigurator: Priority for the component STATISTICS has been changed from NOTICE to ALERT 2025-06-25T14:35:50.687524Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:203: TLogSettingsConfigurator: Sampling priority for the component STATISTICS has been changed from DEBUG to ALERT 2025-06-25T14:35:50.687544Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:209: TLogSettingsConfigurator: Sampling rate for the component STATISTICS has been changed from 0 to 10 2025-06-25T14:35:50.687576Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:192: TLogSettingsConfigurator: Priority for the component BS_REQUEST_COST has been changed from NOTICE to ALERT 2025-06-25T14:35:50.687617Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:203: TLogSettingsConfigurator: Sampling priority for the component BS_REQUEST_COST has been changed from DEBUG to ALERT 2025-06-25T14:35:50.687641Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:209: TLogSettingsConfigurator: Sampling rate for the component BS_REQUEST_COST has been changed from 0 to 10 2025-06-25T14:35:50.687665Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:192: TLogSettingsConfigurator: Priority for the component BS_VDISK_BALANCING has been changed from NOTICE to ALERT 2025-06-25T14:35:50.687690Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:203: TLogSettingsConfigurator: Sampling priority for the component BS_VDISK_BALANCING has been changed from DEBUG to ALERT 2025-06-25T14:35:50.687711Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:209: TLogSettingsConfigurator: Sampling rate for the component BS_VDISK_BALANCING has been changed from 0 to 10 2025-06-25T14:35:50.687733Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:192: TLogSettingsConfigurator: Priority for the component BS_PROXY_GETBLOCK has been changed from NOTICE to ALERT 2025-06-25T14:35:50.687755Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:203: TLogSettingsConfigurator: Sampling priority for the component BS_PROXY_GETBLOCK has been changed from DEBUG to ALERT 2025-06-25T14:35:50.687778Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:209: TLogSettingsConfigurator: Sampling rate for the component BS_PROXY_GETBLOCK has been changed from 0 to 10 2025-06-25T14:35:50.687811Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:192: TLogSettingsConfigurator: Priority for the component BS_SHRED has been changed from NOTICE to ALERT 2025-06-25T14:35:50.687837Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:203: TLogSettingsConfigurator: Sampling priority for the component BS_SHRED has been changed from DEBUG to ALERT 2025-06-25T14:35:50.687859Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:209: TLogSettingsConfigurator: Sampling rate for the component BS_SHRED has been changed from 0 to 10 2025-06-25T14:35:50.687882Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:192: TLogSettingsConfigurator: Priority for the component BS_PROXY_CHECKINTEGRITY has been changed from NOTICE to ALERT 2025-06-25T14:35:50.687905Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:203: TLogSettingsConfigurator: Sampling priority for the component BS_PROXY_CHECKINTEGRITY has been changed from DEBUG to ALERT 2025-06-25T14:35:50.687926Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:209: TLogSettingsConfigurator: Sampling rate for the component BS_PROXY_CHECKINTEGRITY has been changed from 0 to 10 2025-06-25T14:35:50.687964Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:192: TLogSettingsConfigurator: Priority for the component BS_PROXY_BRIDGE has been changed from NOTICE to ALERT 2025-06-25T14:35:50.688000Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:203: TLogSettingsConfigurator: Sampling priority for the component BS_PROXY_BRIDGE has been changed from DEBUG to ALERT 2025-06-25T14:35:50.688023Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:209: TLogSettingsConfigurator: Sampling rate for the component BS_PROXY_BRIDGE has been changed from 0 to 10 2025-06-25T14:35:50.688051Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:192: TLogSettingsConfigurator: Priority for the component LDAP_AUTH_PROVIDER has been changed from NOTICE to ALERT 2025-06-25T14:35:50.688074Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:203: TLogSettingsConfigurator: Sampling priority for the component LDAP_AUTH_PROVIDER has been changed from DEBUG to ALERT 2025-06-25T14:35:50.688096Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:209: TLogSettingsConfigurator: Sampling rate for the component LDAP_AUTH_PROVIDER has been changed from 0 to 10 2025-06-25T14:35:50.688117Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:192: TLogSettingsConfigurator: Priority for the component GROUPED_MEMORY_LIMITER has been changed from NOTICE to ALERT 2025-06-25T14:35:50.688139Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:203: TLogSettingsConfigurator: Sampling priority for the component GROUPED_MEMORY_LIMITER has been changed from DEBUG to ALERT 2025-06-25T14:35:50.688159Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:209: TLogSettingsConfigurator: Sampling rate for the component GROUPED_MEMORY_LIMITER has been changed from 0 to 10 2025-06-25T14:35:50.688195Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:192: TLogSettingsConfigurator: Priority for the component DATA_INTEGRITY has been changed from NOTICE to ALERT 2025-06-25T14:35:50.689881Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:203: TLogSettingsConfigurator: Sampling priority for the component DATA_INTEGRITY has been changed from DEBUG to ALERT 2025-06-25T14:35:50.689918Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:209: TLogSettingsConfigurator: Sampling rate for the component DATA_INTEGRITY has been changed from 0 to 10 2025-06-25T14:35:50.689948Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:192: TLogSettingsConfigurator: Priority for the component TX_PRIORITIES_QUEUE has been changed from NOTICE to ALERT 2025-06-25T14:35:50.689971Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:203: TLogSettingsConfigurator: Sampling priority for the component TX_PRIORITIES_QUEUE has been changed from DEBUG to ALERT 2025-06-25T14:35:50.689994Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:209: TLogSettingsConfigurator: Sampling rate for the component TX_PRIORITIES_QUEUE has been changed from 0 to 10 2025-06-25T14:35:50.690029Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:192: TLogSettingsConfigurator: Priority for the component BSCONFIG has been changed from NOTICE to ALERT 2025-06-25T14:35:50.690055Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:203: TLogSettingsConfigurator: Sampling priority for the component BSCONFIG has been changed from DEBUG to ALERT 2025-06-25T14:35:50.690078Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:209: TLogSettingsConfigurator: Sampling rate for the component BSCONFIG has been changed from 0 to 10 2025-06-25T14:35:50.690115Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:192: TLogSettingsConfigurator: Priority for the component NAMESERVICE has been changed from NOTICE to ALERT 2025-06-25T14:35:50.690140Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:203: TLogSettingsConfigurator: Sampling priority for the component NAMESERVICE has been changed from DEBUG to ALERT 2025-06-25T14:35:50.690167Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:209: TLogSettingsConfigurator: Sampling rate for the component NAMESERVICE has been changed from 0 to 10 2025-06-25T14:35:50.690203Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:192: TLogSettingsConfigurator: Priority for the component BRIDGE has been changed from NOTICE to ALERT 2025-06-25T14:35:50.690227Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:203: TLogSettingsConfigurator: Sampling priority for the component BRIDGE has been changed from DEBUG to ALERT 2025-06-25T14:35:50.690248Z node 14 :CMS_CONFIGS NOTICE: log_settings_configurator.cpp:209: TLogSettingsConfigurator: Sampling rate for the component BRIDGE has been changed from 0 to 10 2025-06-25T14:35:50.690467Z node 14 :CMS_CONFIGS TRACE: log_settings_configurator.cpp:100: TLogSettingsConfigurator: Send TEvConfigNotificationResponse: SubscriptionId: 0 ConfigId { } >> DataShardWrite::DeletePrepared+Volatile [GOOD] >> DataShardWrite::DeletePrepared-Volatile >> TConsoleTests::TestListTenantsExtSubdomain [GOOD] >> TConsoleTests::TestModifyUsedZoneKind >> TConsoleTests::TestSetDefaultStorageUnitsQuota [GOOD] >> TConsoleTests::TestSetDefaultComputationalUnitsQuota |80.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/src-client-topic-ut-with_direct_read_ut >> Viewer::Plan2SvgOK [FAIL] >> Viewer::Plan2SvgBad >> KqpJoinOrder::ShuffleEliminationOneJoin |80.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/src-client-topic-ut-with_direct_read_ut |80.5%| [LD] {RESULT} $(B)/ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/src-client-topic-ut-with_direct_read_ut |80.5%| [TA] $(B)/ydb/core/tx/datashard/ut_volatile/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpScan::ScanDuringSplitThenMerge [GOOD] >> KqpScan::ScanPg >> Viewer::JsonAutocompleteColumnsPOST [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-42 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-61 >> TConsoleConfigHelpersTests::TestConfigCourier [GOOD] >> TConsoleConfigHelpersTests::TestConfigSubscriber >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-61 [FAIL] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-62 |80.5%| [TA] {RESULT} $(B)/ydb/core/tx/datashard/ut_volatile/test-results/unittest/{meta.json ... results_accumulator.log} >> TConsoleInMemoryConfigSubscriptionTests::TestNoYamlResend [GOOD] >> TConsoleInMemoryConfigSubscriptionTests::TestSubscribeAfterConfigApply >> TConsoleConfigSubscriptionTests::TestNotificationForNewSubscription [GOOD] >> TConsoleConfigSubscriptionTests::TestNotificationForNewConfigItem |80.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/sys_view/ut/ydb-core-sys_view-ut |80.5%| [LD] {RESULT} $(B)/ydb/core/sys_view/ut/ydb-core-sys_view-ut |80.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/sys_view/ut/ydb-core-sys_view-ut >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-CreateUser-23 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-CreateUser-24 >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-CreateUser-18 [FAIL] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-CreateUser-19 >> TConsoleTests::TestAlterTenantModifyStorageResourcesForPendingExtSubdomain [GOOD] >> TConsoleTests::TestAlterTenantModifyStorageResourcesForRunning >> TConsoleTests::TestCreateTenantWrongName [GOOD] >> TConsoleTests::TestCreateTenantWrongNameExtSubdomain >> Viewer::ServerlessWithExclusiveNodes [GOOD] >> Viewer::SharedDoesntShowExclusiveNodes >> TExtSubDomainTest::DeclareAndDefineWithoutNodes-AlterDatabaseCreateHiveFirst-true ------- [TM] {asan, default-linux-x86_64, release} ydb/core/viewer/ut/unittest >> Viewer::JsonAutocompleteColumnsPOST [GOOD] Test command err: 2025-06-25T14:35:02.392083Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:121:2167], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:35:02.392459Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:35:02.392662Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-25T14:35:02.717657Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 8347, node 1 TClient is connected to server localhost:18672 2025-06-25T14:35:10.912021Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:287:2330], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:35:10.912300Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:35:10.912424Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-25T14:35:11.196863Z node 2 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 2 Type# 268639257 TServer::EnableGrpc on GrpcPort 3118, node 2 TClient is connected to server localhost:27599 2025-06-25T14:35:20.363161Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [3:299:2341], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:35:20.363539Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:35:20.363705Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-25T14:35:20.728388Z node 3 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 3 Type# 268639257 TServer::EnableGrpc on GrpcPort 22777, node 3 TClient is connected to server localhost:12579 2025-06-25T14:35:33.358112Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [4:117:2163], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:35:33.358606Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:35:33.358686Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-25T14:35:33.713108Z node 4 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 4 Type# 268639257 TServer::EnableGrpc on GrpcPort 25500, node 4 TClient is connected to server localhost:28465 2025-06-25T14:35:49.014005Z node 5 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [5:284:2327], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:35:49.014626Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:35:49.014934Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-25T14:35:49.396382Z node 5 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 5 Type# 268639257 TServer::EnableGrpc on GrpcPort 5997, node 5 TClient is connected to server localhost:17261 |80.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/security/ldap_auth_provider/ut/ydb-core-security-ldap_auth_provider-ut |80.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/security/ldap_auth_provider/ut/ydb-core-security-ldap_auth_provider-ut |80.6%| [LD] {RESULT} $(B)/ydb/core/security/ldap_auth_provider/ut/ydb-core-security-ldap_auth_provider-ut >> KqpScan::ScanDuringSplit [GOOD] >> KqpScan::ScanAfterSplitSlowMetaRead >> TConsoleInMemoryConfigSubscriptionTests::TestSubscribeAfterConfigApply [GOOD] >> TConsoleInMemoryConfigSubscriptionTests::TestSubscribeAfterConfigApplyWithDb >> RemoteTopicReader::ReadTopic >> TConsoleConfigHelpersTests::TestConfigSubscriber [GOOD] >> TConsoleConfigHelpersTests::TestConfigSubscriberAutoTenantTenant >> TConsoleTests::TestSetDefaultComputationalUnitsQuota [GOOD] >> TConsoleTests::TestTenantConfigConsistency >> TConsoleTests::TestModifyUsedZoneKind [GOOD] >> TConsoleTests::TestMergeConfig >> TConsoleInMemoryConfigSubscriptionTests::TestSubscribeAfterConfigApplyWithDb [GOOD] >> TVersions::Wreck1Reverse [GOOD] >> TVersions::Wreck0 >> TExtSubDomainTest::DeclareAndAlterPools-AlterDatabaseCreateHiveFirst-false >> DataShardWrite::DeletePrepared-Volatile [GOOD] >> DataShardWrite::DelayedVolatileTxAndEvWrite >> TConsoleTests::TestCreateTenantWrongNameExtSubdomain [GOOD] >> TConsoleTests::TestCreateTenantWrongPool ------- [TM] {asan, default-linux-x86_64, release} ydb/core/cms/console/ut/unittest >> TConsoleInMemoryConfigSubscriptionTests::TestSubscribeAfterConfigApplyWithDb [GOOD] Test command err: 2025-06-25T14:35:20.145948Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:35:20.146002Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:35:20.259382Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:35:22.026442Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:35:22.026505Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:35:22.070851Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:35:23.388651Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:35:23.388718Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:35:23.437688Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:35:25.093923Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:35:25.093998Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:35:25.137447Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:35:26.724636Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:35:26.724711Z node 5 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:35:26.799248Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:35:28.080664Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:35:28.080740Z node 6 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:35:28.137725Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:35:29.730686Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:35:29.730761Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:35:29.781904Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:35:31.228025Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:35:31.228108Z node 8 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:35:31.284445Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:35:33.128455Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:35:33.128542Z node 9 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:35:33.173748Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:35:34.965387Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:35:34.965478Z node 10 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:35:35.014049Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:35:36.812280Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:35:36.812381Z node 11 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:35:36.873494Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:35:38.671079Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:35:38.671177Z node 12 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:35:38.724709Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:35:40.761996Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:35:40.762078Z node 13 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:35:40.873926Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:35:43.017131Z node 14 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:35:43.017224Z node 14 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:35:43.097849Z node 14 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:35:45.445090Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:35:45.445173Z node 16 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:35:45.501783Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:35:48.035213Z node 18 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:35:48.035301Z node 18 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:35:48.080972Z node 18 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:35:50.556477Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:35:50.556569Z node 20 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:35:50.621793Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:35:52.535391Z node 22 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:35:52.535472Z node 22 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:35:52.617262Z node 22 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:35:54.792298Z node 23 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:35:54.792426Z node 23 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:35:54.861737Z node 23 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:35:55.650723Z node 23 :BS_CONTROLLER ERROR: {BSC26@console_interaction.cpp:112} failed to parse config obtained from Console ErrorReason# ydb/library/yaml_config/yaml_config_parser.cpp:1362: Condition violated: `config.HasDomainsConfig()' Yaml# --- metadata: kind: MainConfig cluster: "" version: 1 config: log_config: cluster_name: cluster1 allowed_labels: test: type: enum values: ? true selector_config: [] 2025-06-25T14:35:57.305774Z node 24 :BS_CONTROLLER ERROR: {BSC26@console_interaction.cpp:112} failed to parse config obtained from Console ErrorReason# ydb/library/yaml_config/yaml_config_parser.cpp:1362: Condition violated: `config.HasDomainsConfig()' Yaml# --- metadata: kind: MainConfig cluster: "" version: 1 config: log_config: cluster_name: cluster1 allowed_labels: test: type: enum values: ? true selector_config: [] >> TConsoleConfigSubscriptionTests::TestNotificationForNewConfigItem [GOOD] >> TConsoleConfigSubscriptionTests::TestNotificationForModifiedConfigItem >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-DropUser-60 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-DropUser-61 >> TConsoleConfigHelpersTests::TestConfigSubscriberAutoTenantTenant [GOOD] >> TConsoleConfigHelpersTests::TestConfigSubscriberAutoTenantMultipleTenants >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-62 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-63 >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-13 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-14 >> KqpIndexLookupJoin::CheckCastUtf8ToString-StreamLookupJoin+NotNull [GOOD] >> TExtSubDomainTest::DeclareAndDefineWithoutNodes-AlterDatabaseCreateHiveFirst-true [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-CreateUser-19 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-CreateUser-20 >> TConsoleTests::TestTenantConfigConsistency [GOOD] >> TConsoleTests::TestSetConfig >> KqpScanLogs::GraceJoin+EnabledLogs [GOOD] >> KqpScanLogs::GraceJoin-EnabledLogs >> KqpScanSpilling::SpillingInRuntimeNodes+EnabledSpilling [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-61 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-62 >> Viewer::TabletMerging [GOOD] >> Viewer::StorageGroupOutputWithoutFilterNoDepends >> KqpScanSpilling::HandleErrorsCorrectly [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpIndexLookupJoin::CheckCastUtf8ToString-StreamLookupJoin+NotNull [GOOD] Test command err: Trying to start YDB, gRPC: 19769, MsgBus: 17609 2025-06-25T14:35:42.382772Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519895641759513885:2189];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:35:42.383237Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000ec2/r3tmp/tmpf7ufIA/pdisk_1.dat 2025-06-25T14:35:43.426335Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:35:43.459789Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519895641759513734:2080] 1750862142340987 != 1750862142340990 2025-06-25T14:35:43.462606Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:35:43.467292Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:35:43.467384Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:35:43.484432Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:35:43.497263Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 19769, node 1 2025-06-25T14:35:43.748824Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:35:43.748844Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:35:43.748853Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:35:43.748981Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17609 TClient is connected to server localhost:17609 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:35:44.719079Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:35:44.752835Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:35:44.806720Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:35:44.960842Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:35:45.162914Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:35:45.271925Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:35:47.376415Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519895641759513885:2189];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:35:47.376489Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:35:47.521210Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519895663234351854:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:35:47.521349Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:35:47.967001Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:48.016542Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:48.097741Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:48.123718Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:48.149518Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:48.188828Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:48.274731Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:48.461870Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519895667529319826:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:35:48.461950Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:35:48.462303Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519895667529319831:2438], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:35:48.466742Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:35:48.501898Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519895667529319833:2439], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:35:48.588678Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519895667529319887:3431] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:35:49.973988Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Oper ... TA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000ec2/r3tmp/tmpTOFkrX/pdisk_1.dat 2025-06-25T14:35:52.159605Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:35:52.163843Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519895681585237874:2080] 1750862151912657 != 1750862151912660 TServer::EnableGrpc on GrpcPort 30109, node 2 2025-06-25T14:35:52.280659Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:35:52.280764Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:35:52.303219Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:35:52.320791Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:35:52.320814Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:35:52.320821Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:35:52.320921Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8487 TClient is connected to server localhost:8487 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:35:52.915876Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:35:52.926904Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:35:52.967455Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:35:53.023976Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:35:53.176811Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:35:53.252295Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:35:56.169684Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519895703060075972:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:35:56.169759Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:35:56.284746Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:56.349866Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:56.402767Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:56.456382Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:56.588555Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:56.664392Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:56.735668Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:56.859769Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519895703060076631:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:35:56.859851Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:35:56.860068Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519895703060076636:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:35:56.864683Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:35:56.881089Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519895703060076638:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:35:56.924470Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519895681585237895:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:35:56.924565Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:35:56.973525Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519895703060076692:3410] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:35:58.055747Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:58.150501Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) >> TConsoleTests::TestAlterTenantModifyStorageResourcesForRunning [GOOD] >> TConsoleTests::TestAlterTenantModifyStorageResourcesForRunningExtSubdomain >> TConsoleTests::TestMergeConfig [GOOD] >> TConsoleTests::TestRemoveTenant >> TExtSubDomainTest::CreateTableInsideThenStopTenantAndForceDeleteSubDomain-AlterDatabaseCreateHiveFirst-false >> Viewer::JsonStorageListingV2 [GOOD] >> Viewer::JsonStorageListingV2GroupIdFilter ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_ext_tenant/unittest >> TExtSubDomainTest::DeclareAndDefineWithoutNodes-AlterDatabaseCreateHiveFirst-true [GOOD] Test command err: 2025-06-25T14:35:55.423249Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519895695859447271:2159];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:35:55.442219Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000a64/r3tmp/tmpR8FprQ/pdisk_1.dat 2025-06-25T14:35:56.328083Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:35:56.334200Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:35:56.353263Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:35:56.355674Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:35:56.357151Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519895695859447146:2080] 1750862155376966 != 1750862155376969 2025-06-25T14:35:56.444708Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:4821 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-25T14:35:56.724570Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7519895695859447350:2094] Handle TEvNavigate describe path dc-1 2025-06-25T14:35:56.775304Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7519895700154415174:2435] HANDLE EvNavigateScheme dc-1 2025-06-25T14:35:56.775447Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7519895695859447396:2120], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:35:56.775512Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:887: [main][1:7519895700154415106:2392][/dc-1] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvSyncRequest: sender# [1:7519895695859447396:2120], cookie# 1 2025-06-25T14:35:56.781046Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519895700154415110:2392][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519895700154415107:2392], cookie# 1 2025-06-25T14:35:56.781109Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519895700154415111:2392][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519895700154415108:2392], cookie# 1 2025-06-25T14:35:56.781125Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519895700154415112:2392][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519895700154415109:2392], cookie# 1 2025-06-25T14:35:56.781170Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519895695859447115:2049] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519895700154415110:2392], cookie# 1 2025-06-25T14:35:56.781195Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519895695859447118:2052] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519895700154415111:2392], cookie# 1 2025-06-25T14:35:56.781223Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519895695859447121:2055] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519895700154415112:2392], cookie# 1 2025-06-25T14:35:56.781252Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519895700154415110:2392][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519895695859447115:2049], cookie# 1 2025-06-25T14:35:56.781265Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519895700154415111:2392][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519895695859447118:2052], cookie# 1 2025-06-25T14:35:56.781334Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519895700154415112:2392][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519895695859447121:2055], cookie# 1 2025-06-25T14:35:56.781369Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519895700154415106:2392][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519895700154415107:2392], cookie# 1 2025-06-25T14:35:56.781394Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519895700154415106:2392][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2025-06-25T14:35:56.781436Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519895700154415106:2392][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519895700154415108:2392], cookie# 1 2025-06-25T14:35:56.781451Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519895700154415106:2392][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 2, failures# 0 2025-06-25T14:35:56.781465Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519895700154415106:2392][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519895700154415109:2392], cookie# 1 2025-06-25T14:35:56.781490Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:984: [main][1:7519895700154415106:2392][/dc-1] Sync is done in the ring group: cookie# 1, ring group# 0, size# 3, half# 1, successes# 3, failures# 0, partial# 0 2025-06-25T14:35:56.781557Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [1:7519895695859447396:2120], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1 PathId: Partial: 0 } 2025-06-25T14:35:56.799470Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [1:7519895695859447396:2120], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1 PathId: Partial: 0 }, by path# { Subscriber: { Subscriber: [1:7519895700154415106:2392] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 1 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 0 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-25T14:35:56.804723Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [1:7519895695859447396:2120], cacheItem# { Subscriber: { Subscriber: [1:7519895700154415106:2392] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 1 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 0 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 1 IsSync: true Partial: 0 } 2025-06-25T14:35:56.815401Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [1:7519895700154415175:2436], recipient# [1:7519895700154415174:2435], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [72057594046644480:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-25T14:35:56.820579Z node 1 :TX_PROXY DEBUG: describe.cpp:356: Actor# [1:7519895700154415174:2435] HANDLE EvNavigateKeySetResult TDescribeReq marker# P5 ErrorCount# 0 2025-06-25T14:35:56.890523Z node 1 :TX_PROXY DEBUG: describe.cpp:435: Actor# [1:7519895700154415174:2435] SEND to# 72057594046644480 shardToRequest NKikimrSchemeOp.TDescribePath Path: "dc-1" Options { ShowPrivateTable: true } 2025-06-25T14:35:56.894038Z node 1 :TX_PROXY DEBUG: describe.cpp:448: Actor# [1:7519895700154415174:2435] Handle TEvDescribeSchemeResult Forward to# [1:7519895700154415173:2434] Cookie: 0 TEvDescribeSchemeResult: NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 2 Record# Status: StatusSuccess Path: "dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/dc-1" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046644480 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 Pa... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:35:56.936899Z ... ath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_0 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 2 IsSync: true Partial: 0 } 2025-06-25T14:35:57.828065Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [1:7519895704449382794:2665], recipient# [1:7519895704449382793:2664], result# { ErrorCount: 1 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0 TableId: [72057594046644480:2:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: RedirectLookupError Kind: KindExtSubdomain DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 2] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 2] Params { Version: 1 PlanResolution: 0 TimeCastBucketsPerMediator: 0 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-25T14:35:57.828093Z node 1 :TX_PROXY INFO: describe.cpp:356: Actor# [1:7519895704449382793:2664] HANDLE EvNavigateKeySetResult TDescribeReq marker# P5 ErrorCount# 1 TClient::Ls response: Status: 128 StatusCode: ERROR Issues { message: "Default error" severity: 1 } SchemeStatus: 13 ErrorReason: "Could not resolve redirected path" TClient::Ls request: /dc-1 2025-06-25T14:35:57.836480Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7519895695859447350:2094] Handle TEvNavigate describe path /dc-1 2025-06-25T14:35:57.860611Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7519895704449382796:2667] HANDLE EvNavigateScheme /dc-1 2025-06-25T14:35:57.860704Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7519895695859447396:2120], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:35:57.860759Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:887: [main][1:7519895700154415106:2392][/dc-1] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvSyncRequest: sender# [1:7519895695859447396:2120], cookie# 4 2025-06-25T14:35:57.860815Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519895700154415110:2392][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519895700154415107:2392], cookie# 4 2025-06-25T14:35:57.860829Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519895700154415111:2392][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519895700154415108:2392], cookie# 4 2025-06-25T14:35:57.860841Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519895700154415112:2392][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519895700154415109:2392], cookie# 4 2025-06-25T14:35:57.860862Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519895695859447115:2049] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519895700154415110:2392], cookie# 4 2025-06-25T14:35:57.860882Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519895695859447118:2052] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519895700154415111:2392], cookie# 4 2025-06-25T14:35:57.860896Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519895695859447121:2055] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519895700154415112:2392], cookie# 4 TClient::Ls response: 2025-06-25T14:35:57.860915Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519895700154415110:2392][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 5 Partial: 0 }: sender# [1:7519895695859447115:2049], cookie# 4 2025-06-25T14:35:57.860926Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519895700154415111:2392][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 5 Partial: 0 }: sender# [1:7519895695859447118:2052], cookie# 4 2025-06-25T14:35:57.860955Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519895700154415112:2392][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 5 Partial: 0 }: sender# [1:7519895695859447121:2055], cookie# 4 2025-06-25T14:35:57.860978Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519895700154415106:2392][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 5 Partial: 0 }: sender# [1:7519895700154415107:2392], cookie# 4 2025-06-25T14:35:57.860996Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519895700154415106:2392][/dc-1] Sync is in progress: cookie# 4, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2025-06-25T14:35:57.861010Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519895700154415106:2392][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 5 Partial: 0 }: sender# [1:7519895700154415108:2392], cookie# 4 2025-06-25T14:35:57.861034Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519895700154415106:2392][/dc-1] Sync is in progress: cookie# 4, ring group# 0, size# 3, half# 1, successes# 2, failures# 0 2025-06-25T14:35:57.861052Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519895700154415106:2392][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 5 Partial: 0 }: sender# [1:7519895700154415109:2392], cookie# 4 2025-06-25T14:35:57.861069Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:984: [main][1:7519895700154415106:2392][/dc-1] Sync is done in the ring group: cookie# 4, ring group# 0, size# 3, half# 1, successes# 3, failures# 0, partial# 0 2025-06-25T14:35:57.861105Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [1:7519895695859447396:2120], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1 PathId: Partial: 0 } 2025-06-25T14:35:57.861158Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [1:7519895695859447396:2120], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1 PathId: Partial: 0 }, by path# { Subscriber: { Subscriber: [1:7519895700154415106:2392] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 4 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 1750862157149 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-25T14:35:57.861225Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [1:7519895695859447396:2120], cacheItem# { Subscriber: { Subscriber: [1:7519895700154415106:2392] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 4 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 1750862157149 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 4 IsSync: true Partial: 0 } 2025-06-25T14:35:57.861365Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [1:7519895704449382797:2668], recipient# [1:7519895704449382796:2667], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [72057594046644480:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-25T14:35:57.861422Z node 1 :TX_PROXY DEBUG: describe.cpp:356: Actor# [1:7519895704449382796:2667] HANDLE EvNavigateKeySetResult TDescribeReq marker# P5 ErrorCount# 0 2025-06-25T14:35:57.861494Z node 1 :TX_PROXY DEBUG: describe.cpp:435: Actor# [1:7519895704449382796:2667] SEND to# 72057594046644480 shardToRequest NKikimrSchemeOp.TDescribePath Path: "/dc-1" Options { ShowPrivateTable: true } 2025-06-25T14:35:57.862085Z node 1 :TX_PROXY DEBUG: describe.cpp:448: Actor# [1:7519895704449382796:2667] Handle TEvDescribeSchemeResult Forward to# [1:7519895704449382795:2666] Cookie: 0 TEvDescribeSchemeResult: NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 63 Record# Status: StatusSuccess Path: "/dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1750862157149 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } StoragePools { Name: "" Kind: "storage-pool-number-1" } StoragePools { Name: "" Kind: "storage-pool-number-2" } StoragePools { Name: "/dc-1:test" Kind: "test" } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/dc-1" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046644480 Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1750862157149 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeExtSubDomain CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1750862157233 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" } DomainDescription { SchemeShardId_Depricated: 72057594046... (TRUNCATED) |80.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/sharding/ut/ydb-core-tx-sharding-ut >> TConsoleConfigHelpersTests::TestConfigSubscriberAutoTenantMultipleTenants [GOOD] >> TConsoleConfigHelpersTests::TestConfigSubscriberAutoTenantDomain |80.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/sharding/ut/ydb-core-tx-sharding-ut |80.6%| [LD] {RESULT} $(B)/ydb/core/tx/sharding/ut/ydb-core-tx-sharding-ut >> TConsoleTests::TestCreateTenantWrongPool [GOOD] >> TConsoleTests::TestCreateTenantWrongPoolExtSubdomain >> TNetClassifierUpdaterTest::TestFiltrationByNetboxCustomFieldsAndTags [GOOD] >> TNetClassifierUpdaterTest::TestFiltrationByNetboxCustomFieldsOnly >> TExtSubDomainTest::DeclareAndDrop >> TConsoleConfigSubscriptionTests::TestNotificationForModifiedConfigItem [GOOD] >> TConsoleConfigSubscriptionTests::TestNotificationForModifiedConfigItemScope >> KqpScanLogs::WideCombine+EnabledLogs [GOOD] >> TExtSubDomainTest::CreateTableInsideAndLs-AlterDatabaseCreateHiveFirst-false >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-CreateUser-24 [FAIL] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-49 |80.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/keyvalue/ut/ydb-core-keyvalue-ut |80.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/keyvalue/ut/ydb-core-keyvalue-ut |80.6%| [LD] {RESULT} $(B)/ydb/core/keyvalue/ut/ydb-core-keyvalue-ut >> KqpScanSpilling::SpillingInRuntimeNodes-EnabledSpilling [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-63 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-64 >> TConsoleTests::TestSetConfig [GOOD] >> TConsoleTests::TestTenantGeneration >> KqpJoinOrder::SortingsWithLookupJoinByPrefix-RemoveLimitOperator [GOOD] >> TConsoleTests::TestCreateTenantWrongPoolExtSubdomain [GOOD] >> TConsoleTests::TestCreateTenantAlreadyExists ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/runtime/unittest >> KqpScanSpilling::SpillingInRuntimeNodes+EnabledSpilling [GOOD] Test command err: cwd: /home/runner/.ya/build/build_root/yft8/0015a9/ydb/core/kqp/ut/runtime/test-results/unittest/testing_out_stuff/chunk7 Trying to start YDB, gRPC: 65523, MsgBus: 9199 2025-06-25T14:30:29.564605Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519894297391309911:2225];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:30:29.565075Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0015a9/r3tmp/tmp7NdzIH/pdisk_1.dat 2025-06-25T14:30:29.959245Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:30:29.959351Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:30:29.968410Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519894297391309720:2080] 1750861829485409 != 1750861829485412 2025-06-25T14:30:29.968585Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:30:29.972324Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 65523, node 1 2025-06-25T14:30:30.092911Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:30:30.092938Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:30:30.092950Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:30:30.093047Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9199 2025-06-25T14:30:30.484771Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:9199 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:30:30.833881Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:30:30.851342Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:30:31.001739Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:30:31.141647Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:30:31.220187Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:30:32.869198Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894310276213230:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:32.869315Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:33.172050Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:33.212760Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:33.250985Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:33.289674Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:33.326911Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:33.366601Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:33.408498Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:33.467913Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894314571181184:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:33.467984Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:33.468201Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894314571181189:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:33.471516Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:30:33.481405Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519894314571181191:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:30:33.566791Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519894314571181242:3420] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:30:34.518173Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519894297391309911:2225];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:30:34.518251Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:30:44.955437Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7382: Cannot get console configs 2025-06-25T14:30:44.955466Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ( (let $1 (KqpTable '"/Root/KeyValue" '"72057594046644480:6" '"" '1)) (let $2 (KqpRowsSourceSettings $1 '('"Key" '"Value") '() (Void) '())) (let $3 (OptionalType (DataType 'Uint64))) (let $4 (OptionalType (DataType 'String))) (let $5 '('('"_logical_id" '787) '('"_id" '"8e4c4cae-f6ae2ede-b5d0fda9-3461c9ad") '('"_wide_channels" (StructType '('"Key" $3) '('"Value" $4))))) (let $6 (DqPhyStage '((DqSource (DataSource '"KqpReadRangesSource") $2)) (lambda '($17) (block '( (let $18 (lambda '($19) (Member $19 '"Key") (Member $19 '"Value"))) (return (FromFlow (ExpandMap (ToFlow $17) $18))) ))) $5)) (let $7 '('1)) (let $8 (DqCnHashShuffle (TDqOutput $6 '0) $7 '1 '"HashV1")) (let $9 (StructType '('"t1.Key" $3) '('"t1.Value" $4) '('"t2.Key" $3) '('"t2.Value" $4))) (let $10 '('('"_logical_id" '685) '('"_id" '"119c8431-c97a02a0-b0b4e6db-d1a7789b") '('"_wide_channels" $9))) (let $11 (DqPhyStage '($8) (lambda '($20) (block '( (let $21 '('0 '0 '1 '1)) (let $22 '('0 '2 '1 '3)) (let $23 (GraceSelfJoinCore (ToFlow $20) 'Full $7 $7 $21 $22 '('"t1.Value") '('"t2.Value") '())) (return (FromFlow (WideSort $23 '('('1 (Bool 'true)))))) ))) $10)) (let $12 (DqCnMerge (TDqOutput $11 '0) '('('1 '"Asc")))) (let $13 (DqPhyStage '($12) (lambda '($24) (FromFlow (NarrowMap (ToFlow $24) (lambda '($25 $26 $27 $28) (AsStruct '('"t1.Key" $25) '('"t1.Value" $26) '('"t2.Key" $27) '('"t2.Value" $28)))))) '('('"_logical_id" '697) '('"_id" '"d009d853-8d91c1f3-fab5f1bb-4d2083a4")))) (let $14 '($6 $11 $13)) (let $15 '('"t1.Key" '"t1.Value" '"t2.Key" '"t2.Value")) (let $16 (DqCnResult (TDqOutput $13 '0) $15)) (return (KqpPhysicalQuery '((KqpPhysicalTx $14 '($16) '() '('('"type" '"generic")))) '((KqpTxResultBinding (ListType $9) '0 '0)) '('('"type" '"query")))) ) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/runtime/unittest >> KqpScanSpilling::HandleErrorsCorrectly [GOOD] Test command err: cwd: /home/runner/.ya/build/build_root/yft8/0015dc/ydb/core/kqp/ut/runtime/test-results/unittest/testing_out_stuff/chunk4 Trying to start YDB, gRPC: 24333, MsgBus: 16015 2025-06-25T14:30:28.812952Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519894291699637408:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:30:28.813456Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0015dc/r3tmp/tmpK4OgKE/pdisk_1.dat 2025-06-25T14:30:29.086351Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 24333, node 1 2025-06-25T14:30:29.195379Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:30:29.195404Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:30:29.195414Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:30:29.195511Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:30:29.200736Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:30:29.200829Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:30:29.202611Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:16015 TClient is connected to server localhost:16015 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-25T14:30:29.819746Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:30:29.871756Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:30:29.907857Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:30:30.091292Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:30:30.254267Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:30:30.328169Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:30:31.942812Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894304584540916:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:31.942968Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:32.232622Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:32.293078Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:32.334055Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:32.363323Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:32.398842Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:32.444415Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:32.517306Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:32.608770Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894308879508876:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:32.608829Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894308879508881:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:32.608865Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:32.611513Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:30:32.621632Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519894308879508883:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:30:32.674985Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519894308879508934:3419] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:30:33.813295Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519894291699637408:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:30:33.813359Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:30:44.068534Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7382: Cannot get console configs 2025-06-25T14:30:44.068574Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ( (let $1 (KqpTable '"/Root/KeyValue" '"72057594046644480:6" '"" '1)) (let $2 (KqpRowsSourceSettings $1 '('"Key" '"Value") '() (Void) '())) (let $3 (OptionalType (DataType 'Uint64))) (let $4 (OptionalType (DataType 'String))) (let $5 '('('"_logical_id" '787) '('"_id" '"56ef05cc-fa6df16c-f0e72170-8b33acf9") '('"_wide_channels" (StructType '('"Key" $3) '('"Value" $4))))) (let $6 (DqPhyStage '((DqSource (DataSource '"KqpReadRangesSource") $2)) (lambda '($17) (block '( (let $18 (lambda '($19) (Member $19 '"Key") (Member $19 '"Value"))) (return (FromFlow (ExpandMap (ToFlow $17) $18))) ))) $5)) (let $7 '('1)) (let $8 (DqCnHashShuffle (TDqOutput $6 '0) $7 '1 '"HashV1")) (let $9 (StructType '('"t1.Key" $3) '('"t1.Value" $4) '('"t2.Key" $3) '('"t2.Value" $4))) (let $10 '('('"_logical_id" '685) '('"_id" '"b5d14bae-3df873bb-ff6352f-75f76f12") '('"_wide_channels" $9))) (let $11 (DqPhyStage '($8) (lambda '($20) (block '( (let $21 '('0 '0 '1 '1)) (let $22 '('0 '2 '1 '3)) (let $23 (GraceSelfJoinCore (ToFlow $20) 'Full $7 $7 $21 $22 '('"t1.Value") '('"t2.Value") '())) (return (FromFlow (WideSort $23 '('('1 (Bool 'true)))))) ))) $10)) (let $12 (DqCnMerge (TDqOutput $11 '0) '('('1 '"Asc")))) (let $13 (DqPhyStage '($12) (lambda '($24) (FromFlow (NarrowMap (ToFlow $24) (lambda '($25 $26 $27 $28) (AsStruct '('"t1.Key" $25) '('"t1.Value" $26) '('"t2.Key" $27) '('"t2.Value" $28)))))) '('('"_logical_id" '697) '('"_id" '"d57f52b4-3ea82034-98cfd7b4-7bbdbf50")))) (let $14 '($6 $11 $13)) (let $15 '('"t1.Key" '"t1.Value" '"t2.Key" '"t2.Value")) (let $16 (DqCnResult (TDqOutput $13 '0) $15)) (return (KqpPhysicalQuery '((KqpPhysicalTx $14 '($16) '() '('('"type" '"generic")))) '((KqpTxResultBinding (ListType $9) '0 '0)) '('('"type" '"query")))) ) 2025-06-25T14:35:59.144553Z node 1 :KQP_COMPUTE ERROR: spilling_file.cpp:412: [Write] File size limit exceeded. From: [1:7519895704743889205:7617], blobId: 0, bytes: 1401088 2025-06-25T14:35:59.168765Z node 1 :KQP_COMPUTE ERROR: spilling_file.cpp:412: [Write] File size limit exceeded. From: [1:7519895704743889205:7617], blobId: 1, bytes: 84 2025-06-25T14:35:59.168817Z node 1 :KQP_COMPUTE ERROR: spilling_file.cpp:412: [Write] File size limit exceeded. From: [1:7519895704743889205:7617], blobId: 2, bytes: 2402376 2025-06-25T14:35:59.169142Z node 1 :KQP_COMPUTE ERROR: spilling_file.cpp:412: [Write] File size limit exceeded. From: [1:7519895704743889205:7617], blobId: 3, bytes: 144 2025-06-25T14:35:59.169175Z node 1 :KQP_COMPUTE ERROR: spilling_file.cpp:412: [Write] File size limit exceeded. From: [1:7519895704743889205:7617], blobId: 4, bytes: 1200936 2025-06-25T14:35:59.169326Z node 1 :KQP_COMPUTE ERROR: spilling_file.cpp:412: [Write] File size limit exceeded. From: [1:7519895704743889205:7617], blobId: 5, bytes: 72 2025-06-25T14:35:59.169354Z node 1 :KQP_COMPUTE ERROR: spilling_file.cpp:412: [Write] File size limit exceeded. From: [1:7519895704743889205:7617], blobId: 6, bytes: 1601312 2025-06-25T14:35:59.169574Z node 1 :KQP_COMPUTE ERROR: spilling_file.cpp:412: [Write] File size limit exceeded. From: [1:7519895704743889205:7617], blobId: 7, bytes: 96 2025-06-25T14:35:59.169602Z node 1 :KQP_COMPUTE ERROR: spilling_file.cpp:412: [Write] File size limit exceeded. From: [1:7519895704743889205:7617], blobId: 8, bytes: 2001584 2025-06-25T14:35:59.169896Z node 1 :KQP_COMPUTE ERROR: spilling_file.cpp:412: [Write] File size limit exceeded. From: [1:7519895704743889205:7617], blobId: 9, bytes: 120 2025-06-25T14:35:59.169933Z node 1 :KQP_COMPUTE ERROR: spilling_file.cpp:412: [Write] File size limit exceeded. From: [1:7519895704743889205:7617], blobId: 10, bytes: 1801952 2025-06-25T14:35:59.170186Z node 1 :KQP_COMPUTE ERROR: spilling_file.cpp:412: [Write] File size limit exceeded. From: [1:7519895704743889205:7617], blobId: 11, bytes: 108 2025-06-25T14:35:59.170218Z node 1 :KQP_COMPUTE ERROR: spilling_file.cpp:412: [Write] File size limit exceeded. From: [1:7519895704743889205:7617], blobId: 12, bytes: 2001792 2025-06-25T14:35:59.170473Z node 1 :KQP_COMPUTE ERROR: spilling_file.cpp:412: [Write] File size limit exceeded. From: [1:7519895704743889205:7617], blobId: 13, bytes: 120 2025-06-25T14:35:59.170504Z node 1 :KQP_COMPUTE ERROR: spilling_file.cpp:412: [Write] File size limit exceeded. From: [1:7519895704743889205:7617], blobId: 14, bytes: 2202288 2025-06-25T14:35:59.174794Z node 1 :KQP_COMPUTE ERROR: compute_storage_actor.cpp:79: TxId: 281474976715972. Error: [TEvError] File size limit exceeded: 1/0Mb 2025-06-25T14:35:59.179802Z node 1 :KQP_COMPUTE ERROR: spilling_file.cpp:412: [Write] File size limit exceeded. From: [1:7519895704743889205:7617], blobId: 15, bytes: 132 2025-06-25T14:35:59.179887Z node 1 :KQP_COMPUTE ERROR: spilling_file.cpp:412: [Write] File size limit exceeded. From: [1:7519895704743889205:7617], blobId: 16, bytes: 2002000 2025-06-25T14:35:59.180145Z node 1 :KQP_COMPUTE ERROR: spilling_file.cpp:412: [Write] File size limit exceeded. From: [1:7519895704743889205:7617], blobId: 17, bytes: 120 2025-06-25T14:35:59.212451Z node 1 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:678: SelfId: [1:7519895704743889191:4695], TxId: 281474976715972, task: 2. Ctx: { SessionId : ydb://session/3?node_id=1&id=MWNhYmQ2ODAtYmY1YjVhNmMtNjIxNTAwZTctZjYzNjk5OWU=. CustomerSuppliedId : . TraceId : 01jykr8nq6fhvjz6zt2jeazw03. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. InternalError: INTERNAL_ERROR DEFAULT_ERROR: {
: Error: [Compute spilling][TEvError] File size limit exceeded: 1/0Mb }. 2025-06-25T14:35:59.229706Z node 1 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:678: SelfId: [1:7519895704743889192:4696], TxId: 281474976715972, task: 3. Ctx: { SessionId : ydb://session/3?node_id=1&id=MWNhYmQ2ODAtYmY1YjVhNmMtNjIxNTAwZTctZjYzNjk5OWU=. TraceId : 01jykr8nq6fhvjz6zt2jeazw03. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. InternalError: INTERNAL_ERROR DEFAULT_ERROR: {
: Error: Terminate execution }. 2025-06-25T14:35:59.292105Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=1&id=MWNhYmQ2ODAtYmY1YjVhNmMtNjIxNTAwZTctZjYzNjk5OWU=, ActorId: [1:7519895704743889177:4690], ActorState: ExecuteState, TraceId: 01jykr8nq6fhvjz6zt2jeazw03, Create QueryResponse for error on request, msg: >> TConsoleConfigHelpersTests::TestConfigSubscriberAutoTenantDomain [GOOD] >> TConsoleConfigHelpersTests::TestConfigSubscriptionEraser >> Viewer::Plan2SvgBad [FAIL] >> KqpScanLogs::WideCombine-EnabledLogs [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/runtime/unittest >> KqpScanLogs::WideCombine+EnabledLogs [GOOD] Test command err: cwd: /home/runner/.ya/build/build_root/yft8/0015ba/ydb/core/kqp/ut/runtime/test-results/unittest/testing_out_stuff/chunk2 Trying to start YDB, gRPC: 10103, MsgBus: 6813 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0015ba/r3tmp/tmpcPSjGP/pdisk_1.dat TServer::EnableGrpc on GrpcPort 10103, node 1 TClient is connected to server localhost:6813 TClient is connected to server localhost:6813 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... waiting... waiting... waiting... waiting... ( (let $1 (KqpTable '"/Root/KeyValue" '"72057594046644480:6" '"" '1)) (let $2 (KqpRowsSourceSettings $1 '('"Key" '"Value") '() (Void) '())) (let $3 (DataType 'Uint64)) (let $4 '('('"_logical_id" '505) '('"_id" '"362581bd-7b92f13-31cf483-1d93158e") '('"_wide_channels" (StructType '('"Value" (OptionalType (DataType 'String))) '('_yql_agg_0 $3))))) (let $5 (DqPhyStage '((DqSource (DataSource '"KqpReadRangesSource") $2)) (lambda '($12) (block '( (let $13 (lambda '($15) (Member $15 '"Key") (Member $15 '"Value"))) (let $14 (lambda '($25 $26) $25 $26)) (return (FromFlow (WideCombiner (ExpandMap (ToFlow $12) $13) '-1073741824 (lambda '($16 $17) $17) (lambda '($18 $19 $20) (AggrCountInit $19)) (lambda '($21 $22 $23 $24) (AggrCountUpdate $22 $24)) $14))) ))) $4)) (let $6 (DqCnHashShuffle (TDqOutput $5 '0) '('0) '0 '"HashV1")) (let $7 (DqPhyStage '($6) (lambda '($27) (block '( (let $28 (WideCombiner (ToFlow $27) '"" (lambda '($29 $30) $29) (lambda '($31 $32 $33) $33) (lambda '($34 $35 $36 $37) (AggrAdd $36 $37)) (lambda '($38 $39) $39))) (return (FromFlow (NarrowMap $28 (lambda '($40) (AsStruct '('"column0" $40)))))) ))) '('('"_logical_id" '1265) '('"_id" '"2fb779a0-30d539e9-5fd1e570-2b484428")))) (let $8 (DqCnUnionAll (TDqOutput $7 '0))) (let $9 (DqPhyStage '($8) (lambda '($41) $41) '('('"_logical_id" '1533) '('"_id" '"c733f91f-db106e5b-fd68845-f0a21737")))) (let $10 '($5 $7 $9)) (let $11 (DqCnResult (TDqOutput $9 '0) '('"column0"))) (return (KqpPhysicalQuery '((KqpPhysicalTx $10 '($11) '() '('('"type" '"generic")))) '((KqpTxResultBinding (ListType (StructType '('"column0" $3))) '0 '0)) '('('"type" '"query")))) ) >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-CreateUser-20 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-CreateUser-21 >> KqpScan::ScanPg [GOOD] >> DataShardWrite::DelayedVolatileTxAndEvWrite [GOOD] >> TExtSubDomainTest::DeclareAndDrop [GOOD] >> DataShardWrite::DoubleWriteUncommittedThenDoubleReadWithCommit ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/runtime/unittest >> KqpScanSpilling::SpillingInRuntimeNodes-EnabledSpilling [GOOD] Test command err: cwd: /home/runner/.ya/build/build_root/yft8/0015e9/ydb/core/kqp/ut/runtime/test-results/unittest/testing_out_stuff/chunk8 Trying to start YDB, gRPC: 61197, MsgBus: 28697 2025-06-25T14:30:28.671682Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519894293792834392:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:30:28.674415Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0015e9/r3tmp/tmpmP1E8P/pdisk_1.dat 2025-06-25T14:30:28.968335Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:30:28.968872Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519894293792834367:2080] 1750861828667344 != 1750861828667347 TServer::EnableGrpc on GrpcPort 61197, node 1 2025-06-25T14:30:29.043379Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:30:29.043492Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:30:29.045164Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:30:29.045187Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:30:29.045197Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:30:29.045206Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:30:29.045304Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28697 TClient is connected to server localhost:28697 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:30:29.599197Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:30:29.634492Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:30:29.650685Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:29.681740Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:30:29.867131Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:30:30.075619Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:30:30.179203Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:32.017241Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894310972705196:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:32.017366Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:32.312957Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:32.361799Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:32.392065Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:32.416645Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:32.449623Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:32.538728Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:32.577102Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:30:32.667051Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894310972705858:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:32.667142Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:32.667384Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519894310972705863:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:30:32.671111Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:30:32.681859Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519894310972705865:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:30:32.785841Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519894310972705918:3416] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:30:33.672288Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519894293792834392:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:30:33.672360Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:30:43.954581Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7382: Cannot get console configs 2025-06-25T14:30:43.954622Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ( (let $1 (KqpTable '"/Root/KeyValue" '"72057594046644480:6" '"" '1)) (let $2 (KqpRowsSourceSettings $1 '('"Key" '"Value") '() (Void) '())) (let $3 (OptionalType (DataType 'Uint64))) (let $4 (OptionalType (DataType 'String))) (let $5 '('('"_logical_id" '787) '('"_id" '"67afd931-f3686b29-b4464f8c-4a8ba5ce") '('"_wide_channels" (StructType '('"Key" $3) '('"Value" $4))))) (let $6 (DqPhyStage '((DqSource (DataSource '"KqpReadRangesSource") $2)) (lambda '($17) (block '( (let $18 (lambda '($19) (Member $19 '"Key") (Member $19 '"Value"))) (return (FromFlow (ExpandMap (ToFlow $17) $18))) ))) $5)) (let $7 '('1)) (let $8 (DqCnHashShuffle (TDqOutput $6 '0) $7 '1 '"HashV1")) (let $9 (StructType '('"t1.Key" $3) '('"t1.Value" $4) '('"t2.Key" $3) '('"t2.Value" $4))) (let $10 '('('"_logical_id" '685) '('"_id" '"f4c31235-ae97afdb-db1bbac0-d095b8d2") '('"_wide_channels" $9))) (let $11 (DqPhyStage '($8) (lambda '($20) (block '( (let $21 '('0 '0 '1 '1)) (let $22 '('0 '2 '1 '3)) (let $23 (GraceSelfJoinCore (ToFlow $20) 'Full $7 $7 $21 $22 '('"t1.Value") '('"t2.Value") '())) (return (FromFlow (WideSort $23 '('('1 (Bool 'true)))))) ))) $10)) (let $12 (DqCnMerge (TDqOutput $11 '0) '('('1 '"Asc")))) (let $13 (DqPhyStage '($12) (lambda '($24) (FromFlow (NarrowMap (ToFlow $24) (lambda '($25 $26 $27 $28) (AsStruct '('"t1.Key" $25) '('"t1.Value" $26) '('"t2.Key" $27) '('"t2.Value" $28)))))) '('('"_logical_id" '697) '('"_id" '"e97e79ac-8171cccf-44632d5d-4a30648a")))) (let $14 '($6 $11 $13)) (let $15 '('"t1.Key" '"t1.Value" '"t2.Key" '"t2.Value")) (let $16 (DqCnResult (TDqOutput $13 '0) $15)) (return (KqpPhysicalQuery '((KqpPhysicalTx $14 '($16) '() '('('"type" '"generic")))) '((KqpTxResultBinding (ListType $9) '0 '0)) '('('"type" '"query")))) ) >> TExtSubDomainTest::DeclareAndAlterPools-AlterDatabaseCreateHiveFirst-false [GOOD] >> TExtSubDomainTest::DeclareAndAlterPools-AlterDatabaseCreateHiveFirst-true >> RemoteTopicReader::ReadTopic [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-DropUser-61 [FAIL] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-DropUser-62 >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-14 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-15 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_ext_tenant/unittest >> TExtSubDomainTest::DeclareAndDrop [GOOD] Test command err: 2025-06-25T14:36:04.126715Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519895734336149423:2236];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:36:04.126774Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000a4e/r3tmp/tmpa6D0Ai/pdisk_1.dat 2025-06-25T14:36:04.453401Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:36:04.497377Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:36:04.497469Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:36:04.503905Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:3947 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-25T14:36:04.746786Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7519895734336149435:2105] Handle TEvNavigate describe path dc-1 2025-06-25T14:36:04.768231Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7519895734336149711:2258] HANDLE EvNavigateScheme dc-1 2025-06-25T14:36:04.768410Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7519895734336149458:2118], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:36:04.768446Z node 1 :TX_PROXY_SCHEME_CACHE TRACE: cache.cpp:2321: Create subscriber: self# [1:7519895734336149458:2118], path# /dc-1, domainOwnerId# 72057594046644480 2025-06-25T14:36:04.768614Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1003: [main][1:7519895734336149712:2259][/dc-1] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-25T14:36:04.774447Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519895734336149167:2049] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7519895734336149716:2259] 2025-06-25T14:36:04.774527Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519895734336149167:2049] Subscribe: subscriber# [1:7519895734336149716:2259], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-25T14:36:04.774595Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519895734336149170:2052] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7519895734336149717:2259] 2025-06-25T14:36:04.774610Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519895734336149170:2052] Subscribe: subscriber# [1:7519895734336149717:2259], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-25T14:36:04.774642Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519895734336149173:2055] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7519895734336149718:2259] 2025-06-25T14:36:04.774657Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519895734336149173:2055] Subscribe: subscriber# [1:7519895734336149718:2259], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-25T14:36:04.774694Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519895734336149716:2259][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519895734336149167:2049] 2025-06-25T14:36:04.774722Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519895734336149717:2259][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519895734336149170:2052] 2025-06-25T14:36:04.774753Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519895734336149718:2259][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519895734336149173:2055] 2025-06-25T14:36:04.774797Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519895734336149712:2259][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519895734336149713:2259] 2025-06-25T14:36:04.774829Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519895734336149712:2259][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519895734336149714:2259] 2025-06-25T14:36:04.774879Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:852: [main][1:7519895734336149712:2259][/dc-1] Set up state: owner# [1:7519895734336149458:2118], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-25T14:36:04.774978Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519895734336149712:2259][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519895734336149715:2259] 2025-06-25T14:36:04.775020Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:870: [main][1:7519895734336149712:2259][/dc-1] Path was already updated: owner# [1:7519895734336149458:2118], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-25T14:36:04.775050Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519895734336149716:2259][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519895734336149713:2259], cookie# 1 2025-06-25T14:36:04.775062Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519895734336149717:2259][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519895734336149714:2259], cookie# 1 2025-06-25T14:36:04.775073Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519895734336149718:2259][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519895734336149715:2259], cookie# 1 2025-06-25T14:36:04.775094Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519895734336149167:2049] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7519895734336149716:2259] 2025-06-25T14:36:04.775132Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519895734336149167:2049] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519895734336149716:2259], cookie# 1 2025-06-25T14:36:04.775153Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519895734336149170:2052] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7519895734336149717:2259] 2025-06-25T14:36:04.775164Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519895734336149170:2052] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519895734336149717:2259], cookie# 1 2025-06-25T14:36:04.775184Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519895734336149173:2055] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7519895734336149718:2259] 2025-06-25T14:36:04.775198Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519895734336149173:2055] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519895734336149718:2259], cookie# 1 2025-06-25T14:36:04.780168Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519895734336149716:2259][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519895734336149167:2049], cookie# 1 2025-06-25T14:36:04.780224Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519895734336149717:2259][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519895734336149170:2052], cookie# 1 2025-06-25T14:36:04.780250Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519895734336149718:2259][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519895734336149173:2055], cookie# 1 2025-06-25T14:36:04.780292Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519895734336149712:2259][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519895734336149713:2259], cookie# 1 2025-06-25T14:36:04.780331Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519895734336149712:2259][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2025-06-25T14:36:04.780345Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519895734336149712:2259][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519895734336149714:2259], cookie# 1 2025-06-25T14:36:04.780378Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519895734336149712:2259][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 2, failures# 0 2025-06-25T14:36:04.780391Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519895734336149712:2259][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519895734336149715:2259], cookie# 1 2025-06-25T14:36:04.780421Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:984: [main][1:7519895734336149712:2259][/dc-1] Sync is done in the ring group: cookie# 1, ring group# 0, size# 3, half# 1, successes# 3, failures# 0, partial# 0 2025-06-25T14:36:04.848073Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [1:7519895734336149458:2118], notify# NKikimr::TSchemeBoardEvents::TEvNotifyUpdate { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DescribeSchemeResult: Status: StatusSuccess Path: "/dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataS ... e NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046644480 Generation: 2 PathId: [OwnerId: 72057594046644480, LocalPathId: 2] Version: 18446744073709551615 }: sender# [1:7519895734336149693:2242], cookie# 281474976715659 2025-06-25T14:36:05.186627Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:756: [1:7519895734336149688:2239] Ack for unknown update (already acked?): sender# [1:7519895734336149693:2242], cookie# 281474976715659 2025-06-25T14:36:05.187177Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046644480, cookie: 281474976715659 2025-06-25T14:36:05.187227Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046644480, cookie: 281474976715659 2025-06-25T14:36:05.187299Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046644480 2025-06-25T14:36:05.187326Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046644480 2025-06-25T14:36:05.187379Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715659, at schemeshard: 72057594046644480 TClient::Ls request: /dc-1 2025-06-25T14:36:05.188195Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7519895734336149435:2105] Handle TEvNavigate describe path /dc-1 2025-06-25T14:36:05.209737Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7519895738631117099:2320] HANDLE EvNavigateScheme /dc-1 2025-06-25T14:36:05.209828Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7519895734336149458:2118], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:36:05.209919Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:887: [main][1:7519895734336149712:2259][/dc-1] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvSyncRequest: sender# [1:7519895734336149458:2118], cookie# 4 2025-06-25T14:36:05.210009Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519895734336149716:2259][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519895734336149713:2259], cookie# 4 2025-06-25T14:36:05.210031Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519895734336149717:2259][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519895734336149714:2259], cookie# 4 2025-06-25T14:36:05.210042Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519895734336149718:2259][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519895734336149715:2259], cookie# 4 2025-06-25T14:36:05.210064Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519895734336149167:2049] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519895734336149716:2259], cookie# 4 2025-06-25T14:36:05.210079Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519895734336149170:2052] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519895734336149717:2259], cookie# 4 2025-06-25T14:36:05.210087Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519895734336149173:2055] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519895734336149718:2259], cookie# 4 2025-06-25T14:36:05.210108Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519895734336149716:2259][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 7 Partial: 0 }: sender# [1:7519895734336149167:2049], cookie# 4 2025-06-25T14:36:05.210135Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519895734336149717:2259][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 7 Partial: 0 }: sender# [1:7519895734336149170:2052], cookie# 4 2025-06-25T14:36:05.210174Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519895734336149718:2259][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 7 Partial: 0 }: sender# [1:7519895734336149173:2055], cookie# 4 2025-06-25T14:36:05.210195Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519895734336149712:2259][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 7 Partial: 0 }: sender# [1:7519895734336149713:2259], cookie# 4 2025-06-25T14:36:05.210229Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519895734336149712:2259][/dc-1] Sync is in progress: cookie# 4, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2025-06-25T14:36:05.210250Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519895734336149712:2259][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 7 Partial: 0 }: sender# [1:7519895734336149714:2259], cookie# 4 2025-06-25T14:36:05.210257Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519895734336149712:2259][/dc-1] Sync is in progress: cookie# 4, ring group# 0, size# 3, half# 1, successes# 2, failures# 0 2025-06-25T14:36:05.210264Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519895734336149712:2259][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 7 Partial: 0 }: sender# [1:7519895734336149715:2259], cookie# 4 2025-06-25T14:36:05.210286Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:984: [main][1:7519895734336149712:2259][/dc-1] Sync is done in the ring group: cookie# 4, ring group# 0, size# 3, half# 1, successes# 3, failures# 0, partial# 0 2025-06-25T14:36:05.210325Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [1:7519895734336149458:2118], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1 PathId: Partial: 0 } 2025-06-25T14:36:05.210375Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [1:7519895734336149458:2118], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1 PathId: Partial: 0 }, by path# { Subscriber: { Subscriber: [1:7519895734336149712:2259] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 4 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 1750862165164 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-25T14:36:05.210448Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [1:7519895734336149458:2118], cacheItem# { Subscriber: { Subscriber: [1:7519895734336149712:2259] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 4 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 1750862165164 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 4 IsSync: true Partial: 0 } 2025-06-25T14:36:05.210591Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [1:7519895738631117100:2321], recipient# [1:7519895738631117099:2320], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [72057594046644480:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-25T14:36:05.210646Z node 1 :TX_PROXY DEBUG: describe.cpp:356: Actor# [1:7519895738631117099:2320] HANDLE EvNavigateKeySetResult TDescribeReq marker# P5 ErrorCount# 0 2025-06-25T14:36:05.210711Z node 1 :TX_PROXY DEBUG: describe.cpp:435: Actor# [1:7519895738631117099:2320] SEND to# 72057594046644480 shardToRequest NKikimrSchemeOp.TDescribePath Path: "/dc-1" Options { ShowPrivateTable: true } 2025-06-25T14:36:05.211215Z node 1 :TX_PROXY DEBUG: describe.cpp:448: Actor# [1:7519895738631117099:2320] Handle TEvDescribeSchemeResult Forward to# [1:7519895738631117098:2319] Cookie: 0 TEvDescribeSchemeResult: NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 2 Record# Status: StatusSuccess Path: "/dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1750862165164 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } StoragePools { Name: "/dc-1:test" Kind: "test" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/dc-1" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046644480 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1750862165164 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version... (TRUNCATED) >> TConsoleConfigHelpersTests::TestConfigSubscriptionEraser [GOOD] >> FeatureFlagsConfiguratorTest::TestFeatureFlagsUpdates >> TConsoleConfigSubscriptionTests::TestNotificationForModifiedConfigItemScope [GOOD] >> TConsoleConfigSubscriptionTests::TestNotificationForRemovedConfigItem ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/runtime/unittest >> KqpScanLogs::WideCombine-EnabledLogs [GOOD] Test command err: cwd: /home/runner/.ya/build/build_root/yft8/0015f5/ydb/core/kqp/ut/runtime/test-results/unittest/testing_out_stuff/chunk3 Trying to start YDB, gRPC: 2306, MsgBus: 31288 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0015f5/r3tmp/tmp1CBZkC/pdisk_1.dat TServer::EnableGrpc on GrpcPort 2306, node 1 TClient is connected to server localhost:31288 TClient is connected to server localhost:31288 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... waiting... waiting... waiting... waiting... ( (let $1 (KqpTable '"/Root/KeyValue" '"72057594046644480:6" '"" '1)) (let $2 (KqpRowsSourceSettings $1 '('"Key" '"Value") '() (Void) '())) (let $3 (DataType 'Uint64)) (let $4 '('('"_logical_id" '505) '('"_id" '"85a47d31-a9dee0c6-67c915ae-2796451d") '('"_wide_channels" (StructType '('"Value" (OptionalType (DataType 'String))) '('_yql_agg_0 $3))))) (let $5 (DqPhyStage '((DqSource (DataSource '"KqpReadRangesSource") $2)) (lambda '($12) (block '( (let $13 (lambda '($15) (Member $15 '"Key") (Member $15 '"Value"))) (let $14 (lambda '($25 $26) $25 $26)) (return (FromFlow (WideCombiner (ExpandMap (ToFlow $12) $13) '-1073741824 (lambda '($16 $17) $17) (lambda '($18 $19 $20) (AggrCountInit $19)) (lambda '($21 $22 $23 $24) (AggrCountUpdate $22 $24)) $14))) ))) $4)) (let $6 (DqCnHashShuffle (TDqOutput $5 '0) '('0) '0 '"HashV1")) (let $7 (DqPhyStage '($6) (lambda '($27) (block '( (let $28 (WideCombiner (ToFlow $27) '"" (lambda '($29 $30) $29) (lambda '($31 $32 $33) $33) (lambda '($34 $35 $36 $37) (AggrAdd $36 $37)) (lambda '($38 $39) $39))) (return (FromFlow (NarrowMap $28 (lambda '($40) (AsStruct '('"column0" $40)))))) ))) '('('"_logical_id" '1265) '('"_id" '"43076f57-e2100d6b-cf155cea-8419a4fc")))) (let $8 (DqCnUnionAll (TDqOutput $7 '0))) (let $9 (DqPhyStage '($8) (lambda '($41) $41) '('('"_logical_id" '1533) '('"_id" '"3a4dc1d2-7038cdb1-efe4641-eb566cde")))) (let $10 '($5 $7 $9)) (let $11 (DqCnResult (TDqOutput $9 '0) '('"column0"))) (return (KqpPhysicalQuery '((KqpPhysicalTx $10 '($11) '() '('('"type" '"generic")))) '((KqpTxResultBinding (ListType (StructType '('"column0" $3))) '0 '0)) '('('"type" '"query")))) ) >> IncrementalBackup::SimpleRestoreBackupCollection+WithIncremental ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::SortingsWithLookupJoinByPrefix-RemoveLimitOperator [GOOD] Test command err: Trying to start YDB, gRPC: 6971, MsgBus: 24838 2025-06-25T14:35:18.867532Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519895537119646623:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:35:18.867576Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000ed7/r3tmp/tmpzvknz7/pdisk_1.dat 2025-06-25T14:35:19.286185Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:35:19.286272Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:35:19.292492Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:35:19.317100Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:35:19.360454Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519895537119646605:2080] 1750862118863640 != 1750862118863643 TServer::EnableGrpc on GrpcPort 6971, node 1 2025-06-25T14:35:19.504658Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:35:19.504680Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:35:19.504686Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:35:19.504782Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24838 2025-06-25T14:35:19.898647Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:24838 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:35:20.044555Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:35:20.057363Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:35:21.992567Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519895550004549140:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:35:21.992725Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:35:21.993104Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519895550004549152:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:35:21.997118Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:35:22.011048Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519895550004549154:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:35:22.072549Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519895554299516501:2338] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:35:22.383687Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:22.475473Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:22.506522Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:22.536150Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:22.567437Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:22.689758Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:22.712619Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:22.743586Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:22.784971Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:22.851345Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:22.922230Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:22.978375Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:23.021217Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:23.670179Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:23.714419Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation typ ... 48665Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038646;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:35:59.648832Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038603;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:35:59.649409Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038567;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:35:59.653507Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038646;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:35:59.653773Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038567;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:35:59.654066Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038620;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:35:59.654288Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038652;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:35:59.658927Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038652;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:35:59.659793Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038628;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:35:59.660218Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038620;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:35:59.660863Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038632;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:35:59.664744Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038628;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:35:59.665310Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038645;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:35:59.665813Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038632;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:35:59.666345Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038634;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:35:59.674758Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038634;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:35:59.675385Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038643;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:35:59.676111Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038645;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:35:59.676537Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038589;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:35:59.681925Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038643;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:35:59.682393Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038589;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:35:59.682927Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038545;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:35:59.683486Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038647;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:35:59.687710Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038545;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:35:59.688399Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038644;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:35:59.688795Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038647;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:35:59.689256Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038649;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:35:59.693288Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038644;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:35:59.693844Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038633;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:35:59.695431Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038649;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:35:59.695894Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038631;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:35:59.698821Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038633;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:35:59.699333Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038655;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:35:59.702069Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038631;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:35:59.702672Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038627;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:35:59.703792Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038655;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:35:59.704579Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038630;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:35:59.707479Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038627;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:35:59.708004Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038650;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:35:59.709107Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038630;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:35:59.713073Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038650;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:35:59.944636Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jykr7pqsf0znsyz30z5pe3p6", SessionId: ydb://session/3?node_id=1&id=YzVjYzg2ODUtNmU2ZWZmY2UtOWI3MzllZDktZjdmNjczNmM=, Slow query, duration: 34.126595s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:36:00.617539Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:36:00.618168Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:36:00.618874Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;self_id=[1:7519895627313976639:4204];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038331;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038629;receive=72075186224038170; 2025-06-25T14:36:00.619298Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-62 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-63 |80.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/mind/bscontroller/ut/ydb-core-mind-bscontroller-ut |80.7%| [LD] {RESULT} $(B)/ydb/core/mind/bscontroller/ut/ydb-core-mind-bscontroller-ut |80.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/mind/bscontroller/ut/ydb-core-mind-bscontroller-ut ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/service/ut_topic_reader/unittest >> RemoteTopicReader::ReadTopic [GOOD] Test command err: 2025-06-25T14:35:56.482904Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519895701054776053:2230];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:35:56.483126Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00174b/r3tmp/tmpriV9dO/pdisk_1.dat 2025-06-25T14:35:57.279505Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:35:57.279576Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:35:57.294796Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:35:57.368305Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:35:57.376581Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519895701054775848:2080] 1750862156441861 != 1750862156441864 2025-06-25T14:35:57.480139Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:18958 TServer::EnableGrpc on GrpcPort 13107, node 1 2025-06-25T14:35:57.997202Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:35:57.997229Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:35:57.997238Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:35:57.997336Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:18958 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:35:58.606477Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:35:58.905096Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:36:01.141767Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519895722529613221:2322], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:36:01.141910Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:36:01.142559Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519895722529613240:2330], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:36:01.142596Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519895722529613241:2331], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:36:01.146699Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710661:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:36:01.151414Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519895722529613246:2444] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateCreate)" severity: 1 } 2025-06-25T14:36:01.161136Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519895722529613245:2333], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710661 completed, doublechecking } 2025-06-25T14:36:01.161191Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519895722529613244:2332], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710661 completed, doublechecking } 2025-06-25T14:36:01.230225Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519895722529613293:2475] txid# 281474976710662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:36:01.262731Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519895722529613311:2483] txid# 281474976710663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:36:01.811011Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519895701054776053:2230];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:36:01.842173Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:36:02.898822Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:36:03.741453Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:36:04.534066Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976710680:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:171) 2025-06-25T14:36:05.279448Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976710685:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:171) 2025-06-25T14:36:06.006235Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710692:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:36:07.328800Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:32: [RemoteTopicReader][/Root/topic][0][1:7519895748299417927:2831] Handshake: worker# [1:7519895709644711039:2294] 2025-06-25T14:36:07.333346Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:41: [RemoteTopicReader][/Root/topic][0][1:7519895748299417927:2831] Create read session: session# [1:7519895748299417928:2293] 2025-06-25T14:36:07.333766Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:48: [RemoteTopicReader][/Root/topic][0][1:7519895748299417927:2831] Handle NKikimr::NReplication::NService::TEvWorker::TEvPoll { SkipCommit: 0 } 2025-06-25T14:36:07.354523Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:79: [RemoteTopicReader][/Root/topic][0][1:7519895748299417927:2831] Handle NKikimr::NReplication::TEvYdbProxy::TEvStartTopicReadingSession { Result: { ReadSessionId: consumer_1_1_10573381295177628199_v1 } } 2025-06-25T14:36:07.365705Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:58: [RemoteTopicReader][/Root/topic][0][1:7519895748299417927:2831] Handle NKikimr::NReplication::TEvYdbProxy::TEvReadTopicResponse { Result: { PartitionId: 0 Messages [{ Codec: RAW Data: 9b Offset: 0 SeqNo: 1 CreateTime: 2025-06-25T14:36:07.220000Z MessageGroupId: producer ProducerId: producer }] } } 2025-06-25T14:36:07.367847Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:48: [RemoteTopicReader][/Root/topic][0][1:7519895748299417927:2831] Handle NKikimr::NReplication::NService::TEvWorker::TEvPoll { SkipCommit: 0 } 2025-06-25T14:36:07.480578Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:58: [RemoteTopicReader][/Root/topic][0][1:7519895748299417927:2831] Handle NKikimr::NReplication::TEvYdbProxy::TEvReadTopicResponse { Result: { PartitionId: 0 Messages [{ Codec: RAW Data: 9b Offset: 1 SeqNo: 2 CreateTime: 2025-06-25T14:36:07.462000Z MessageGroupId: producer ProducerId: producer }] } } 2025-06-25T14:36:07.567532Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:32: [RemoteTopicReader][/Root/topic][0][1:7519895748299418026:2866] Handshake: worker# [1:7519895709644711039:2294] 2025-06-25T14:36:07.575607Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:41: [RemoteTopicReader][/Root/topic][0][1:7519895748299418026:2866] Create read session: session# [1:7519895748299418027:2293] 2025-06-25T14:36:07.581684Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:48: [RemoteTopicReader][/Root/topic][0][1:7519895748299418026:2866] Handle NKikimr::NReplication::NService::TEvWorker::TEvPoll { SkipCommit: 0 } 2025-06-25T14:36:07.602527Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:79: [RemoteTopicReader][/Root/topic][0][1:7519895748299418026:2866] Handle NKikimr::NReplication::TEvYdbProxy::TEvStartTopicReadingSession { Result: { ReadSessionId: consumer_1_2_10961234404795199272_v1 } } 2025-06-25T14:36:07.605019Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:58: [RemoteTopicReader][/Root/topic][0][1:7519895748299418026:2866] Handle NKikimr::NReplication::TEvYdbProxy::TEvReadTopicResponse { Result: { PartitionId: 0 Messages [{ Codec: RAW Data: 9b Offset: 1 SeqNo: 2 CreateTime: 2025-06-25T14:36:07.462000Z MessageGroupId: producer ProducerId: producer }] } } ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_kqp_scan/unittest >> KqpScan::ScanPg [GOOD] Test command err: 2025-06-25T14:35:25.769113Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:628:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:35:25.769625Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:35:25.769907Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:625:2319], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:35:25.770061Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:35:25.770379Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:35:25.770419Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001582/r3tmp/tmp2FA7fX/pdisk_1.dat 2025-06-25T14:35:26.137897Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:35:26.351685Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:35:26.491608Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:215:2175] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-25T14:35:26.495555Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:35:26.495701Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:35:26.497011Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 2025-06-25T14:35:26.498376Z node 2 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [2:245:2131] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-25T14:35:26.499799Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:35:26.499900Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:35:26.511497Z node 2 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976720656 RangeEnd# 281474976725656 txAllocator# 72057594046447617 2025-06-25T14:35:26.527455Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T14:35:26.528028Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:35:26.528507Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:35:26.897782Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:215:2175] Handle TEvProposeTransaction 2025-06-25T14:35:26.897871Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:215:2175] TxId# 281474976715657 ProcessProposeTransaction 2025-06-25T14:35:26.898059Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:215:2175] Cookie# 0 userReqId# "" txid# 281474976715657 SEND to# [1:1154:2704] 2025-06-25T14:35:27.094356Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:1154:2704] txid# 281474976715657 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateTable CreateTable { Name: "table-1" Columns { Name: "key" Type: "Uint32" FamilyName: "" NotNull: false } Columns { Name: "value" Type: "Uint32" FamilyName: "" NotNull: false } KeyColumnNames: "key" UniformPartitionsCount: 1 } } } ExecTimeoutPeriod: 18446744073709551615 2025-06-25T14:35:27.094490Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:1154:2704] txid# 281474976715657 Bootstrap, UserSID: CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-25T14:35:27.095268Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1660: Actor# [1:1154:2704] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-06-25T14:35:27.095370Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:1154:2704] txid# 281474976715657 TEvNavigateKeySet requested from SchemeCache 2025-06-25T14:35:27.095811Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:1154:2704] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-25T14:35:27.096010Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:1154:2704] HANDLE EvNavigateKeySetResult, txid# 281474976715657 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-25T14:35:27.096105Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:1154:2704] txid# 281474976715657 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715657 TabletId# 72057594046644480} 2025-06-25T14:35:27.096488Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:1154:2704] txid# 281474976715657 HANDLE EvClientConnected 2025-06-25T14:35:27.098155Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:27.105547Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [1:1154:2704] txid# 281474976715657 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715657} 2025-06-25T14:35:27.105652Z node 1 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [1:1154:2704] txid# 281474976715657 SEND to# [1:1061:2646] Source {TEvProposeTransactionStatus txid# 281474976715657 Status# 53} 2025-06-25T14:35:27.216629Z node 2 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [2:1205:2353] 2025-06-25T14:35:27.216906Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T14:35:27.282600Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T14:35:27.282748Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T14:35:27.284738Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-25T14:35:27.284829Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-25T14:35:27.284896Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-25T14:35:27.285322Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T14:35:27.285546Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T14:35:27.285663Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [2:1228:2353] in generation 1 2025-06-25T14:35:27.317235Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T14:35:27.344468Z node 2 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-25T14:35:27.344704Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T14:35:27.344846Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [2:1232:2370] 2025-06-25T14:35:27.344892Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T14:35:27.344998Z node 2 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-25T14:35:27.345040Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:35:27.345593Z node 2 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-25T14:35:27.345718Z node 2 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-25T14:35:27.345840Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:35:27.345889Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:35:27.345941Z node 2 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-25T14:35:27.345985Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:35:27.409523Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:1189:2734], serverId# [2:1236:2371], sessionId# [0:0:0] 2025-06-25T14:35:27.410021Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T14:35:27.410330Z node 2 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-25T14:35:27.410449Z node 2 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-25T14:35:27.412807Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T14:35:27.429877Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-25T14:35:27.430040Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-25T14:35:27.797121Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:1260:2752], serverId# [2:1262:2378], sessionId# [0:0:0] 2025-06-25T14:35:27.809007Z node 2 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1000 at tablet 72075186224037888 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1000 MediatorID: 72057594046 ... qfjvyme1srcfd1pa0. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. Do not drain channelId: 1, finished 2025-06-25T14:35:51.381105Z node 3 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:675: TxId: 281474976715664, task: 1. Tasks execution finished 2025-06-25T14:35:51.381131Z node 3 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:510: SelfId: [3:1558:2931], TxId: 281474976715664, task: 1. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=3&id=MzY5YWVjYjItNTk2YWYzODUtZDFmNTMwOWYtZTc1NDBjNWU=. TraceId : 01jykr8dnqfjvyme1srcfd1pa0. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. Compute state finished. All channels and sinks finished 2025-06-25T14:35:51.381213Z node 3 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:494: TxId: 281474976715664, task: 1. pass away 2025-06-25T14:35:51.381294Z node 3 :KQP_COMPUTE DEBUG: log.cpp:784: fline=kqp_compute_actor_factory.cpp:67;problem=finish_compute_actor;tx_id=281474976715664;task_id=1;success=1;message={
: Error: COMPUTE_STATE_FINISHED }; 2025-06-25T14:35:51.381420Z node 3 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:404: TxId: 281474976715664, taskId: 1. Released resources, Memory: 0, Free Tier: 1048576, ExecutionUnits: 1. 2025-06-25T14:35:51.381575Z node 3 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:442: ActorId: [3:1555:2892] TxId: 281474976715664. Ctx: { TraceId: 01jykr8dnqfjvyme1srcfd1pa0, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=MzY5YWVjYjItNTk2YWYzODUtZDFmNTMwOWYtZTc1NDBjNWU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [3:1558:2931], task: 1, state: COMPUTE_STATE_FINISHED, stats: { CpuTimeUs: 1819 Tasks { TaskId: 1 CpuTimeUs: 509 FinishTimeMs: 1750862151381 OutputRows: 1 OutputBytes: 6 ResultRows: 1 ResultBytes: 6 ComputeCpuTimeUs: 58 BuildCpuTimeUs: 451 HostName: "ghrun-kqfvx6aroe" NodeId: 3 CreateTimeMs: 1750862151378 UpdateTimeMs: 1750862151381 } MaxMemoryUsage: 1048576 } 2025-06-25T14:35:51.381618Z node 3 :KQP_EXECUTER INFO: kqp_planner.cpp:697: TxId: 281474976715664. Ctx: { TraceId: 01jykr8dnqfjvyme1srcfd1pa0, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=MzY5YWVjYjItNTk2YWYzODUtZDFmNTMwOWYtZTc1NDBjNWU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Compute actor has finished execution: [3:1558:2931] 2025-06-25T14:35:51.381709Z node 3 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:2188: ActorId: [3:1555:2892] TxId: 281474976715664. Ctx: { TraceId: 01jykr8dnqfjvyme1srcfd1pa0, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=MzY5YWVjYjItNTk2YWYzODUtZDFmNTMwOWYtZTc1NDBjNWU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. terminate execution. 2025-06-25T14:35:51.381742Z node 3 :KQP_EXECUTER TRACE: kqp_executer_impl.h:2202: ActorId: [3:1555:2892] TxId: 281474976715664. Ctx: { TraceId: 01jykr8dnqfjvyme1srcfd1pa0, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=MzY5YWVjYjItNTk2YWYzODUtZDFmNTMwOWYtZTc1NDBjNWU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Terminate, become ZombieState 2025-06-25T14:35:51.381782Z node 3 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:862: ActorId: [3:1555:2892] TxId: 281474976715664. Ctx: { TraceId: 01jykr8dnqfjvyme1srcfd1pa0, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=MzY5YWVjYjItNTk2YWYzODUtZDFmNTMwOWYtZTc1NDBjNWU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Resource usage for last stat interval: ComputeTime: 0.001819s ReadRows: 0 ReadBytes: 0 ru: 1 rate limiter was not found force flag: 1 2025-06-25T14:35:51.382499Z node 3 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 2000, txId: 281474976715661] shutting down 2025-06-25T14:35:51.382586Z node 3 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [3:215:2175] Handle TEvProposeTransaction 2025-06-25T14:35:51.382618Z node 3 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [3:215:2175] TxId# 0 ProcessProposeTransaction 2025-06-25T14:35:51.382707Z node 3 :TX_PROXY DEBUG: proxy_impl.cpp:289: actor# [3:215:2175] Cookie# 0 userReqId# "" txid# 0 reqId# [3:1560:2932] SnapshotReq marker# P0 2025-06-25T14:35:51.383467Z node 3 :TX_PROXY DEBUG: resolvereq.cpp:152: Actor# [3:1563:2932] txid# 0 HANDLE EvNavigateKeySetResult TResolveTablesActor marker# P1 ErrorCount# 0 2025-06-25T14:35:51.383712Z node 3 :TX_PROXY DEBUG: resolvereq.cpp:272: Actor# [3:1563:2932] txid# 0 HANDLE EvResolveKeySetResult TResolveTablesActor marker# P2 ErrorCount# 0 2025-06-25T14:35:51.383812Z node 3 :TX_PROXY DEBUG: snapshotreq.cpp:1453: Actor# [3:1560:2932] SEND TEvDiscardVolatileSnapshotRequest to datashard 72075186224037888 marker# P3 2025-06-25T14:36:00.800196Z node 5 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [5:625:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:36:00.800669Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:36:00.800943Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:36:00.803525Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [6:634:2322], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:36:00.804031Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:36:00.804239Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001582/r3tmp/tmp6n6BIr/pdisk_1.dat 2025-06-25T14:36:01.165942Z node 5 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:36:01.341753Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:36:01.463551Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:36:01.463733Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:36:01.477054Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:36:01.477179Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:36:01.491109Z node 5 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 6 Cookie 6 2025-06-25T14:36:01.491964Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:36:01.492484Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:36:01.803815Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:36:02.434646Z node 5 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [5:1316:2791], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:36:02.434776Z node 5 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [5:1327:2796], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:36:02.434888Z node 5 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:36:02.442736Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:36:02.557858Z node 6 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:36:02.557990Z node 5 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:36:02.979787Z node 5 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [5:1330:2799], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:36:03.106500Z node 5 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [5:1457:2871] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:36:03.991477Z node 5 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715660. Ctx: { TraceId: 01jykr8tg038rpn3ck2hrpfdft, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=5&id=ZDc4OTcxNDItNjZhNjZiZTgtNTRjZGVhMjItNjdjYzZiZDU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:36:05.079761Z node 5 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jykr8w1ye799qxzz7y1d12eq, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=5&id=YTVjZDVhNDItMWViMzQ0ZTYtYTM2MTk2MTktZWQ2NGI4OTk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:36:05.757488Z node 5 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715663. Ctx: { TraceId: 01jykr8w1ye799qxzz7y1d12eq, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=5&id=YTVjZDVhNDItMWViMzQ0ZTYtYTM2MTk2MTktZWQ2NGI4OTk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:36:05.760393Z node 5 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 2000, txId: 281474976715661] shutting down >> IncrementalBackup::SimpleBackup |80.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/batch_operations/unittest >> TConsoleTests::TestAlterTenantModifyStorageResourcesForRunningExtSubdomain [GOOD] >> TConsoleTests::TestAlterUnknownTenant >> TReplicationTests::Create >> FeatureFlagsConfiguratorTest::TestFeatureFlagsUpdates [GOOD] >> TExtSubDomainTest::CreateTableInsideAndLs-AlterDatabaseCreateHiveFirst-false [GOOD] >> TExtSubDomainTest::CreateTableInsideAndLs-AlterDatabaseCreateHiveFirst-true >> TConsoleTests::TestRemoveTenant [GOOD] >> TConsoleTests::TestRemoveTenantExtSubdomain >> TConsoleTests::TestTenantGeneration [GOOD] >> TConsoleTests::TestTenantGenerationExtSubdomain >> TReplicationTests::CreateSequential >> TReplicationTests::Create [GOOD] >> TReplicationTests::ConsistencyLevel >> TConsoleTests::TestCreateTenantAlreadyExists [GOOD] >> TConsoleTests::TestCreateTenantAlreadyExistsExtSubdomain ------- [TM] {asan, default-linux-x86_64, release} ydb/core/cms/console/ut/unittest >> FeatureFlagsConfiguratorTest::TestFeatureFlagsUpdates [GOOD] Test command err: 2025-06-25T14:35:32.419024Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:35:32.419078Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:35:32.521829Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:35:34.105219Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:35:34.105281Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:35:34.189076Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:35:35.829962Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:35:35.830028Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:35:35.889284Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:35:37.321497Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:35:37.321562Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:35:37.381447Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:35:39.361623Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:35:39.361717Z node 5 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:35:39.424572Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:35:54.095513Z node 15 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:35:54.095598Z node 15 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:35:54.144592Z node 15 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:35:55.699028Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:35:55.699103Z node 16 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:35:55.750039Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:36:03.657949Z node 21 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:36:03.658038Z node 21 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:36:03.753824Z node 21 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:36:05.915538Z node 22 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:36:05.915629Z node 22 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:36:05.997649Z node 22 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:36:07.947685Z node 23 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:36:07.947781Z node 23 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:36:08.022065Z node 23 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:36:10.687318Z node 24 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:36:10.687407Z node 24 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:36:10.741783Z node 24 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) >> LdapAuthProviderTest::LdapServerIsUnavailable >> KqpJoinOrder::SortingsByPrefixWithConstant-RemoveLimitOperator [GOOD] |80.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/conveyor_composite/ut/ydb-core-tx-conveyor_composite-ut |80.7%| [LD] {RESULT} $(B)/ydb/core/tx/conveyor_composite/ut/ydb-core-tx-conveyor_composite-ut |80.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/conveyor_composite/ut/ydb-core-tx-conveyor_composite-ut >> SystemView::ShowCreateTableDefaultLiteral >> KqpQueryPerf::Replace+QueryService+UseSink >> LdapAuthProviderTest_LdapsScheme::LdapFetchGroupsWithDontExistGroupAttribute >> TConsoleConfigSubscriptionTests::TestNotificationForRemovedConfigItem [GOOD] >> TConsoleConfigSubscriptionTests::TestNotificationForRestartedClient >> Viewer::TenantInfo5kkTablets [GOOD] >> Viewer::UseTransactionWhenExecuteDataActionQuery >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-49 [FAIL] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-50 >> LdapAuthProviderTest_nonSecure::LdapFetchGroupsFromAdLdapServer >> TReplicationTests::ConsistencyLevel [GOOD] >> TReplicationTests::Alter >> TReplicationTests::CreateSequential [GOOD] >> TReplicationTests::CreateInParallel >> TKeyValueTest::TestRewriteThenLastValueNewApi >> TConsoleTests::TestAlterUnknownTenant [GOOD] >> TConsoleTests::TestAlterUnknownTenantExtSubdomain |80.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_extsubdomain/ydb-core-tx-schemeshard-ut_extsubdomain |80.7%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_extsubdomain/ydb-core-tx-schemeshard-ut_extsubdomain |80.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_extsubdomain/ydb-core-tx-schemeshard-ut_extsubdomain |80.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/engine/ut/ydb-core-engine-ut |80.7%| [LD] {RESULT} $(B)/ydb/core/engine/ut/ydb-core-engine-ut |80.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/engine/ut/ydb-core-engine-ut >> TReplicationTests::Alter [GOOD] >> TReplicationTests::CannotAddReplicationConfig >> TExtSubDomainTest::DeclareAndAlterPools-AlterDatabaseCreateHiveFirst-true [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-64 [FAIL] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-65 >> KqpJoinOrder::CanonizedJoinOrderTPCC [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-CreateUser-21 [FAIL] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-CreateUser-22 >> TReplicationTests::CreateInParallel [GOOD] >> TReplicationTests::CreateDropRecreate ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::SortingsByPrefixWithConstant-RemoveLimitOperator [GOOD] Test command err: Trying to start YDB, gRPC: 18009, MsgBus: 19114 2025-06-25T14:35:19.328885Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519895544893590893:2231];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:35:19.329489Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000ede/r3tmp/tmp92QVZo/pdisk_1.dat 2025-06-25T14:35:19.914625Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:35:19.914714Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:35:19.923654Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:35:19.963358Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 18009, node 1 2025-06-25T14:35:20.099676Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:35:20.099699Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:35:20.099719Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:35:20.099835Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:35:20.268504Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:19114 TClient is connected to server localhost:19114 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:35:20.940935Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:35:20.954262Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:35:23.063847Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519895562073460519:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:35:23.063851Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519895562073460531:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:35:23.063980Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:35:23.068654Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:35:23.081513Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-25T14:35:23.083905Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519895562073460533:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:35:23.177957Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519895562073460584:2336] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:35:23.572693Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:23.678326Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:23.710791Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:23.743169Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:23.774446Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:23.918992Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:23.996996Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:24.053572Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:24.151002Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:24.183690Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:24.219511Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:24.262945Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:24.313943Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:24.320420Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519895544893590893:2231];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:35:24.320495Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:35:24.937965Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: E ... 99038Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038461;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:05.599349Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038603;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:36:05.599599Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038429;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:36:05.604933Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038429;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:05.605687Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038621;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:36:05.608157Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038603;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:05.608846Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038561;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:36:05.610177Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038621;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:05.610728Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038555;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:36:05.614472Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038561;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:05.615585Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038555;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:05.615630Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038627;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:36:05.616167Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038483;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:36:05.620929Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038483;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:05.621637Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038581;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:36:05.626931Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038627;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:05.627215Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038581;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:05.627527Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038635;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:36:05.627902Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038553;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:36:05.634147Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038553;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:05.634903Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038493;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:36:05.636067Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038635;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:05.640919Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038639;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:36:05.641409Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038493;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:05.642124Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038616;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:36:05.647315Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038639;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:05.647551Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038616;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:05.648231Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038599;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:36:05.648633Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038633;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:36:05.654612Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038633;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:05.655006Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038599;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:05.655312Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038614;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:36:05.656749Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038463;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:36:05.661394Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038614;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:05.662176Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038463;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:05.662439Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038491;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:36:05.662852Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038619;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:36:05.668147Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038491;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:05.668454Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038619;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:05.669015Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038605;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:36:05.674353Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038605;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:05.764214Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jykr7r7c9tfxdszz0htr1erb", SessionId: ydb://session/3?node_id=1&id=YTQzYWIwMzQtODVhNWVjNS1kOTdlZTJlNC0zYTA0MDE4OQ==, Slow query, duration: 38.423298s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:36:06.007637Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:36:06.008147Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:36:06.008764Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;self_id=[1:7519895660857727816:4665];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038331;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038170;receive=72075186224038629; 2025-06-25T14:36:06.009205Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; >> Viewer::SharedDoesntShowExclusiveNodes [GOOD] >> Viewer::ServerlessWithExclusiveNodesCheckTable >> TReplicationTests::CannotAddReplicationConfig [GOOD] >> TReplicationTests::CannotSetAsyncReplicaAttribute >> IncrementalBackup::SimpleRestoreBackupCollection+WithIncremental [FAIL] >> IncrementalBackup::SimpleRestoreBackupCollection-WithIncremental >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-63 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-64 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_ext_tenant/unittest >> TExtSubDomainTest::DeclareAndAlterPools-AlterDatabaseCreateHiveFirst-true [GOOD] Test command err: 2025-06-25T14:35:58.805695Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519895709582018428:2181];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000a60/r3tmp/tmpH3gkCZ/pdisk_1.dat 2025-06-25T14:35:59.183825Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:35:59.613984Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:35:59.616394Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:35:59.616634Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:35:59.648091Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:35:59.784879Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:65184 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-25T14:35:59.958434Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7519895709582018494:2116] Handle TEvNavigate describe path dc-1 2025-06-25T14:36:00.008500Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7519895718171953572:2443] HANDLE EvNavigateScheme dc-1 2025-06-25T14:36:00.008642Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7519895713876985815:2130], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:36:00.008673Z node 1 :TX_PROXY_SCHEME_CACHE TRACE: cache.cpp:2321: Create subscriber: self# [1:7519895713876985815:2130], path# /dc-1, domainOwnerId# 72057594046644480 2025-06-25T14:36:00.008827Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1003: [main][1:7519895718171953573:2444][/dc-1] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-25T14:36:00.034138Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519895709582018214:2050] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7519895718171953577:2444] 2025-06-25T14:36:00.034230Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519895709582018214:2050] Subscribe: subscriber# [1:7519895718171953577:2444], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-25T14:36:00.034314Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519895709582018217:2053] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7519895718171953578:2444] 2025-06-25T14:36:00.034358Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519895709582018217:2053] Subscribe: subscriber# [1:7519895718171953578:2444], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-25T14:36:00.034455Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519895718171953577:2444][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519895709582018214:2050] 2025-06-25T14:36:00.034479Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519895718171953578:2444][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519895709582018217:2053] 2025-06-25T14:36:00.034529Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519895718171953573:2444][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519895718171953574:2444] 2025-06-25T14:36:00.034559Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519895718171953573:2444][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519895718171953575:2444] 2025-06-25T14:36:00.034608Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:852: [main][1:7519895718171953573:2444][/dc-1] Set up state: owner# [1:7519895713876985815:2130], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-25T14:36:00.034778Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519895718171953577:2444][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519895718171953574:2444], cookie# 1 2025-06-25T14:36:00.034790Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519895718171953578:2444][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519895718171953575:2444], cookie# 1 2025-06-25T14:36:00.034803Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519895718171953579:2444][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519895718171953576:2444], cookie# 1 2025-06-25T14:36:00.034825Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519895709582018214:2050] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7519895718171953577:2444] 2025-06-25T14:36:00.034844Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519895709582018214:2050] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519895718171953577:2444], cookie# 1 2025-06-25T14:36:00.034866Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519895709582018217:2053] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7519895718171953578:2444] 2025-06-25T14:36:00.034884Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519895709582018217:2053] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519895718171953578:2444], cookie# 1 2025-06-25T14:36:00.036683Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519895709582018220:2056] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7519895718171953579:2444] 2025-06-25T14:36:00.036749Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519895709582018220:2056] Subscribe: subscriber# [1:7519895718171953579:2444], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-25T14:36:00.036865Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519895718171953577:2444][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519895709582018214:2050], cookie# 1 2025-06-25T14:36:00.036885Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519895718171953578:2444][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519895709582018217:2053], cookie# 1 2025-06-25T14:36:00.036936Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519895718171953579:2444][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519895709582018220:2056] 2025-06-25T14:36:00.036990Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519895718171953573:2444][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519895718171953574:2444], cookie# 1 2025-06-25T14:36:00.037014Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519895718171953573:2444][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2025-06-25T14:36:00.037045Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519895718171953573:2444][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519895718171953575:2444], cookie# 1 2025-06-25T14:36:00.037061Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519895718171953573:2444][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 2, failures# 0 2025-06-25T14:36:00.037086Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519895718171953573:2444][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519895718171953576:2444] 2025-06-25T14:36:00.037159Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:870: [main][1:7519895718171953573:2444][/dc-1] Path was already updated: owner# [1:7519895713876985815:2130], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-25T14:36:00.037191Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519895709582018220:2056] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519895718171953579:2444], cookie# 1 2025-06-25T14:36:00.037213Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519895709582018220:2056] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7519895718171953579:2444] 2025-06-25T14:36:00.037228Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519895718171953579:2444][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519895709582018220:2056], cookie# 1 2025-06-25T14:36:00.037243Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519895718171953573:2444][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519895718171953576:2444], cookie# 1 2025-06-25T14:36:00.037262Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:984: [main][1:7519895718171953573:2444][/dc-1] Sync is done in the ring group: cookie# 1, ring group# 0, size# 3, half# 1, successes# 3, failures# 0, partial# 0 2025-06-25T14:36:00.141762Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [1:7519895713876985815:2130], notify# NKikimr::TSchemeBoardEvents::TEvNotifyUpdate { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DescribeSchemeResult: Status: StatusSuccess Path: "/dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 Resour ... .TEvSubscribe { Path: /dc-1/.metadata/workload_manager/running_requests DomainOwnerId: 72057594046644480 }: sender# [3:7519895775797449693:3000] 2025-06-25T14:36:13.177656Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [3:7519895754322611690:2050] Upsert description: path# /dc-1/.metadata/workload_manager/running_requests 2025-06-25T14:36:13.177701Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [3:7519895754322611690:2050] Subscribe: subscriber# [3:7519895775797449693:3000], path# /dc-1/.metadata/workload_manager/running_requests, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-25T14:36:13.177726Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [3:7519895754322611693:2053] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1/.metadata/workload_manager/running_requests DomainOwnerId: 72057594046644480 }: sender# [3:7519895775797449694:3000] 2025-06-25T14:36:13.177733Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [3:7519895754322611693:2053] Upsert description: path# /dc-1/.metadata/workload_manager/running_requests 2025-06-25T14:36:13.177811Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [3:7519895754322611693:2053] Subscribe: subscriber# [3:7519895775797449694:3000], path# /dc-1/.metadata/workload_manager/running_requests, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-25T14:36:13.177840Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [3:7519895754322611696:2056] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1/.metadata/workload_manager/running_requests DomainOwnerId: 72057594046644480 }: sender# [3:7519895775797449695:3000] 2025-06-25T14:36:13.177850Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [3:7519895754322611696:2056] Upsert description: path# /dc-1/.metadata/workload_manager/running_requests 2025-06-25T14:36:13.177883Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [3:7519895754322611696:2056] Subscribe: subscriber# [3:7519895775797449695:3000], path# /dc-1/.metadata/workload_manager/running_requests, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-25T14:36:13.177916Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][3:7519895775797449693:3000][/dc-1/.metadata/workload_manager/running_requests] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/workload_manager/running_requests Version: 0 }: sender# [3:7519895754322611690:2050] 2025-06-25T14:36:13.177938Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][3:7519895775797449694:3000][/dc-1/.metadata/workload_manager/running_requests] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/workload_manager/running_requests Version: 0 }: sender# [3:7519895754322611693:2053] 2025-06-25T14:36:13.177957Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][3:7519895775797449695:3000][/dc-1/.metadata/workload_manager/running_requests] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/workload_manager/running_requests Version: 0 }: sender# [3:7519895754322611696:2056] 2025-06-25T14:36:13.177991Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][3:7519895775797449683:3000][/dc-1/.metadata/workload_manager/running_requests] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/workload_manager/running_requests Version: 0 }: sender# [3:7519895775797449690:3000] 2025-06-25T14:36:13.178016Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][3:7519895775797449683:3000][/dc-1/.metadata/workload_manager/running_requests] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/workload_manager/running_requests Version: 0 }: sender# [3:7519895775797449691:3000] 2025-06-25T14:36:13.178040Z node 3 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:852: [main][3:7519895775797449683:3000][/dc-1/.metadata/workload_manager/running_requests] Set up state: owner# [3:7519895758617579289:2129], state# { Deleted: 1 Strong: 1 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-25T14:36:13.178060Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][3:7519895775797449683:3000][/dc-1/.metadata/workload_manager/running_requests] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/workload_manager/running_requests Version: 0 }: sender# [3:7519895775797449692:3000] 2025-06-25T14:36:13.178078Z node 3 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:870: [main][3:7519895775797449683:3000][/dc-1/.metadata/workload_manager/running_requests] Ignore empty state: owner# [3:7519895758617579289:2129], state# { Deleted: 1 Strong: 1 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 1 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-25T14:36:13.178097Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [3:7519895754322611690:2050] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [3:7519895775797449693:3000] 2025-06-25T14:36:13.178109Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [3:7519895754322611693:2053] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [3:7519895775797449694:3000] 2025-06-25T14:36:13.178122Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [3:7519895754322611696:2056] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [3:7519895775797449695:3000] 2025-06-25T14:36:13.178158Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [3:7519895758617579289:2129], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/.metadata/workload_manager/running_requests PathId: Strong: 1 } 2025-06-25T14:36:13.178210Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [3:7519895758617579289:2129], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/.metadata/workload_manager/running_requests PathId: Strong: 1 }, by path# { Subscriber: { Subscriber: [3:7519895775797449683:3000] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 0 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-25T14:36:13.178269Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7519895758617579289:2129], cacheItem# { Subscriber: { Subscriber: [3:7519895775797449683:3000] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:36:13.178356Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7519895775797449696:3001], recipient# [3:7519895775797449681:2287], result# { ErrorCount: 2 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo },{ Path: dc-1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:36:13.933040Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519895754322611790:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:36:13.933131Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:36:13.948930Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7519895758617579289:2129], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:36:13.949052Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7519895758617579289:2129], cacheItem# { Subscriber: { Subscriber: [3:7519895758617579975:2611] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:36:13.949130Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7519895775797449715:3004], recipient# [3:7519895775797449714:2288], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:36:14.177098Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7519895758617579289:2129], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:36:14.177242Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7519895758617579289:2129], cacheItem# { Subscriber: { Subscriber: [3:7519895775797449669:2997] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:36:14.177329Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7519895780092417016:3008], recipient# [3:7519895780092417015:2289], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } >> KqpScan::ScanAfterSplitSlowMetaRead [GOOD] >> TConsoleTests::TestTenantGenerationExtSubdomain [GOOD] >> TConsoleTests::TestSchemeShardErrorForwarding >> TReplicationTests::CreateDropRecreate [GOOD] >> TReplicationTests::CreateWithoutCredentials >> KqpJoinOrder::Sortings4Year+RemoveLimitOperator [GOOD] >> TReplicationTests::CannotSetAsyncReplicaAttribute [GOOD] >> TReplicationTests::AlterReplicatedTable >> TExtSubDomainTest::CreateTableInsideAndLs-AlterDatabaseCreateHiveFirst-true [GOOD] |80.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/tx_proxy/ut_encrypted_storage/ydb-core-tx-tx_proxy-ut_encrypted_storage |80.8%| [LD] {RESULT} $(B)/ydb/core/tx/tx_proxy/ut_encrypted_storage/ydb-core-tx-tx_proxy-ut_encrypted_storage |80.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/tx_proxy/ut_encrypted_storage/ydb-core-tx-tx_proxy-ut_encrypted_storage >> LdapAuthProviderTest::LdapServerIsUnavailable [GOOD] >> LdapAuthProviderTest::LdapRequestWithEmptyHost >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-15 [FAIL] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-16 >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-DropUser-62 [FAIL] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-DropUser-63 >> TConsoleConfigSubscriptionTests::TestNotificationForRestartedClient [GOOD] >> TConsoleConfigSubscriptionTests::TestNotificationForTimeoutedNotificationResponse >> LdapAuthProviderTest_LdapsScheme::LdapFetchGroupsWithDontExistGroupAttribute [GOOD] >> LdapAuthProviderTest_LdapsScheme::LdapFetchGroupsWithInvalidRobotUserLoginBad ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::CanonizedJoinOrderTPCC [GOOD] Test command err: Trying to start YDB, gRPC: 12713, MsgBus: 23580 2025-06-25T14:35:18.289384Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519895540438524955:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:35:18.290874Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000ee1/r3tmp/tmpEV2RXQ/pdisk_1.dat 2025-06-25T14:35:18.660619Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:35:18.663000Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519895540438524932:2080] 1750862118286514 != 1750862118286517 TServer::EnableGrpc on GrpcPort 12713, node 1 2025-06-25T14:35:18.751112Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:35:18.751733Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:35:18.762318Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:35:18.796565Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:35:18.796587Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:35:18.796595Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:35:18.796721Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23580 2025-06-25T14:35:19.308403Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:23580 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:35:19.661853Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:35:19.681064Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:35:21.844373Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519895553323427465:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:35:21.844528Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:35:21.844978Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519895553323427477:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:35:21.850763Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:35:21.869025Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519895553323427479:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:35:21.936973Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519895553323427530:2336] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:35:22.254887Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:22.416993Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:22.461855Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:22.500660Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:22.576913Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:22.824090Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:22.880230Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:22.912269Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:22.947921Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:22.977414Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:23.013079Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:23.048807Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:23.124968Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:23.291892Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519895540438524955:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:35:23.291950Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:35:23.992871Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, subope ... 49806Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038601;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:36:02.751973Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038613;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:36:02.752912Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038615;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:36:02.754781Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038601;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:36:02.755335Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038621;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:36:02.757756Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038615;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:36:02.758292Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038617;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:36:02.760692Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038621;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:36:02.761262Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038589;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:36:02.765096Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038589;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:36:02.765711Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038623;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:36:02.765806Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038617;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:36:02.766317Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038551;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:36:02.770798Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038623;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:36:02.770863Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038551;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:36:02.771366Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038587;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:36:02.772134Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038593;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:36:02.775586Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038587;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:36:02.776157Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038635;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:36:02.781053Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038593;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:36:02.781694Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038633;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:36:02.782933Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038635;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:36:02.783518Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038643;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:36:02.786582Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038633;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:36:02.787207Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038645;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:36:02.788119Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038643;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:36:02.789361Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038661;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:36:02.793792Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038661;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:36:02.796835Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038645;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:36:02.797544Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038637;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:36:02.801285Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038655;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:36:02.805906Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038637;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:36:02.806685Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038619;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:36:02.810434Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038655;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:36:02.811058Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038657;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:36:02.815530Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038619;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:36:02.816099Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038519;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:36:02.820571Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038657;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:36:02.821048Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038653;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:36:02.825535Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038653;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:36:02.842108Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038519;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:36:03.012714Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jykr7pyg417twp5n251rr5t3", SessionId: ydb://session/3?node_id=1&id=MWM4MjkwMi1kMTgxMjYxZS0xNjI5MjM5Ny0zOTI3N2FkZg==, Slow query, duration: 36.979778s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:36:03.371510Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; 2025-06-25T14:36:03.372069Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; 2025-06-25T14:36:03.372678Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;self_id=[1:7519895634927822759:4274];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038331;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038629;receive=72075186224038170; 2025-06-25T14:36:03.373188Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; >> KeyValueReadStorage::ReadError [GOOD] >> KeyValueReadStorage::ReadErrorWithWrongGroupId [GOOD] >> KeyValueReadStorage::ReadErrorWithUncorrectCookie >> TConsoleTests::TestCreateTenantAlreadyExistsExtSubdomain [GOOD] >> TConsoleTests::TestCreateSubSubDomain >> KeyValueReadStorage::ReadErrorWithUncorrectCookie [GOOD] >> LdapAuthProviderTest_nonSecure::LdapFetchGroupsFromAdLdapServer [GOOD] >> LdapAuthProviderTest_nonSecure::LdapFetchGroupsWithDefaultGroupAttributeGood >> TReplicationTests::CreateWithoutCredentials [GOOD] >> TReplicationTests::Describe >> TReplicationTests::AlterReplicatedTable [GOOD] >> TReplicationTests::AlterReplicatedIndexTable >> TConsoleTests::TestAlterUnknownTenantExtSubdomain [GOOD] >> TKeyValueTest::TestWriteReadRangeLimitThenLimitWorks >> TConsoleTests::TestAlterBorrowedStorage ------- [TM] {asan, default-linux-x86_64, release} ydb/core/keyvalue/ut/unittest >> KeyValueReadStorage::ReadErrorWithUncorrectCookie [GOOD] Test command err: 2025-06-25T14:36:18.929686Z 1 00h00m00.000000s :KEYVALUE INFO: {KV20@keyvalue_storage_read_request.cpp:209} Received GetResult KeyValue# 1 GroupId# 3 Status# ERROR ResponseSz# 1 ErrorReason# ReadRequestCookie# 0 2025-06-25T14:36:18.929790Z 1 00h00m00.000000s :KEYVALUE ERROR: {KV316@keyvalue_storage_read_request.cpp:270} Unexpected EvGetResult. KeyValue# 1 Status# ERROR Deadline# 18446744073709551 Now# 0 SentAt# 1970-01-01T00:00:00.000000Z GotAt# 1750862178928 ErrorReason# 2025-06-25T14:36:18.942984Z 1 00h00m00.000000s :KEYVALUE INFO: {KV20@keyvalue_storage_read_request.cpp:209} Received GetResult KeyValue# 1 GroupId# 2 Status# OK ResponseSz# 1 ErrorReason# ReadRequestCookie# 0 2025-06-25T14:36:18.943061Z 1 00h00m00.000000s :KEYVALUE ERROR: {KV318@keyvalue_storage_read_request.cpp:240} Received EvGetResult from an unexpected storage group. KeyValue# 1 GroupId# 2 ExpecetedGroupId# 3 Status# OK Deadline# 18446744073709551 Now# 0 SentAt# 1970-01-01T00:00:00.000000Z GotAt# 1750862178942 ErrorReason# 2025-06-25T14:36:18.975023Z 1 00h00m00.000000s :KEYVALUE INFO: {KV20@keyvalue_storage_read_request.cpp:209} Received GetResult KeyValue# 1 GroupId# 3 Status# OK ResponseSz# 1 ErrorReason# ReadRequestCookie# 0 2025-06-25T14:36:18.975095Z 1 00h00m00.000000s :KEYVALUE ERROR: {KV319@keyvalue_storage_read_request.cpp:222} Received EvGetResult with an unexpected cookie. KeyValue# 1 Cookie# 1000 SentGets# 1 GroupId# 3 Status# OK Deadline# 18446744073709551 Now# 0 GotAt# 1750862178974 ErrorReason# ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_ext_tenant/unittest >> TExtSubDomainTest::CreateTableInsideAndLs-AlterDatabaseCreateHiveFirst-true [GOOD] Test command err: 2025-06-25T14:36:05.200506Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519895740696549669:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:36:05.200565Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000a4b/r3tmp/tmpQQqnkF/pdisk_1.dat 2025-06-25T14:36:05.662247Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:36:05.679396Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:36:05.679486Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:36:05.684847Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:6739 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-25T14:36:05.882920Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7519895740696549871:2121] Handle TEvNavigate describe path dc-1 2025-06-25T14:36:05.899588Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7519895740696550335:2437] HANDLE EvNavigateScheme dc-1 2025-06-25T14:36:05.899720Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7519895740696549955:2160], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:36:05.899750Z node 1 :TX_PROXY_SCHEME_CACHE TRACE: cache.cpp:2321: Create subscriber: self# [1:7519895740696549955:2160], path# /dc-1, domainOwnerId# 72057594046644480 2025-06-25T14:36:05.899903Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1003: [main][1:7519895740696550336:2438][/dc-1] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-25T14:36:05.901597Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519895740696549579:2050] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7519895740696550340:2438] 2025-06-25T14:36:05.901653Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519895740696549579:2050] Subscribe: subscriber# [1:7519895740696550340:2438], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-25T14:36:05.901752Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519895740696549582:2053] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7519895740696550341:2438] 2025-06-25T14:36:05.901771Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519895740696549582:2053] Subscribe: subscriber# [1:7519895740696550341:2438], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-25T14:36:05.901801Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519895740696549585:2056] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7519895740696550342:2438] 2025-06-25T14:36:05.901816Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519895740696549585:2056] Subscribe: subscriber# [1:7519895740696550342:2438], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-25T14:36:05.901873Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519895740696550340:2438][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519895740696549579:2050] 2025-06-25T14:36:05.901907Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519895740696550341:2438][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519895740696549582:2053] 2025-06-25T14:36:05.901924Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519895740696550342:2438][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519895740696549585:2056] 2025-06-25T14:36:05.901978Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519895740696550336:2438][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519895740696550337:2438] 2025-06-25T14:36:05.902010Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519895740696550336:2438][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519895740696550338:2438] 2025-06-25T14:36:05.902065Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:852: [main][1:7519895740696550336:2438][/dc-1] Set up state: owner# [1:7519895740696549955:2160], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-25T14:36:05.902150Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519895740696550336:2438][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519895740696550339:2438] 2025-06-25T14:36:05.902183Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:870: [main][1:7519895740696550336:2438][/dc-1] Path was already updated: owner# [1:7519895740696549955:2160], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-25T14:36:05.902225Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519895740696550340:2438][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519895740696550337:2438], cookie# 1 2025-06-25T14:36:05.902233Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519895740696550341:2438][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519895740696550338:2438], cookie# 1 2025-06-25T14:36:05.902240Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519895740696550342:2438][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519895740696550339:2438], cookie# 1 2025-06-25T14:36:05.902266Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519895740696549579:2050] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7519895740696550340:2438] 2025-06-25T14:36:05.902282Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519895740696549579:2050] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519895740696550340:2438], cookie# 1 2025-06-25T14:36:05.902293Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519895740696549582:2053] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7519895740696550341:2438] 2025-06-25T14:36:05.902308Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519895740696549582:2053] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519895740696550341:2438], cookie# 1 2025-06-25T14:36:05.902319Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519895740696549585:2056] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7519895740696550342:2438] 2025-06-25T14:36:05.902325Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519895740696549585:2056] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519895740696550342:2438], cookie# 1 2025-06-25T14:36:05.904396Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519895740696550340:2438][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519895740696549579:2050], cookie# 1 2025-06-25T14:36:05.904417Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519895740696550341:2438][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519895740696549582:2053], cookie# 1 2025-06-25T14:36:05.904430Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519895740696550342:2438][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519895740696549585:2056], cookie# 1 2025-06-25T14:36:05.904465Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519895740696550336:2438][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519895740696550337:2438], cookie# 1 2025-06-25T14:36:05.904486Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519895740696550336:2438][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2025-06-25T14:36:05.904501Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519895740696550336:2438][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519895740696550338:2438], cookie# 1 2025-06-25T14:36:05.904540Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519895740696550336:2438][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 2, failures# 0 2025-06-25T14:36:05.904557Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519895740696550336:2438][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519895740696550339:2438], cookie# 1 2025-06-25T14:36:05.904575Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:984: [main][1:7519895740696550336:2438][/dc-1] Sync is done in the ring group: cookie# 1, ring group# 0, size# 3, half# 1, successes# 3, failures# 0, partial# 0 2025-06-25T14:36:05.942136Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [1:7519895740696549955:2160], notify# NKikimr::TSchemeBoardEvents::TEvNotifyUpdate { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DescribeSchemeResult: Status: StatusSuccess Path: "/dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataS ... 6.372299Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [3:7519895771205657436:2132], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/.metadata/workload_manager/delayed_requests PathId: Strong: 1 } 2025-06-25T14:36:16.372377Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [3:7519895771205657436:2132], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/.metadata/workload_manager/delayed_requests PathId: Strong: 1 }, by path# { Subscriber: { Subscriber: [3:7519895788385527719:2915] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 0 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-25T14:36:16.372480Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7519895771205657436:2132], cacheItem# { Subscriber: { Subscriber: [3:7519895788385527719:2915] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:36:16.372518Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [3:7519895771205657436:2132], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/.metadata/workload_manager/running_requests PathId: Strong: 1 } 2025-06-25T14:36:16.372551Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [3:7519895771205657436:2132], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/.metadata/workload_manager/running_requests PathId: Strong: 1 }, by path# { Subscriber: { Subscriber: [3:7519895788385527720:2916] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 0 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-25T14:36:16.372595Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7519895771205657436:2132], cacheItem# { Subscriber: { Subscriber: [3:7519895788385527720:2916] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:36:16.372661Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][3:7519895788385527736:2914][/dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers Version: 0 }: sender# [3:7519895771205657126:2050] 2025-06-25T14:36:16.372678Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [3:7519895771205657132:2056] Subscribe: subscriber# [3:7519895788385527738:2914], path# /dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-25T14:36:16.372688Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][3:7519895788385527737:2914][/dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers Version: 0 }: sender# [3:7519895771205657129:2053] 2025-06-25T14:36:16.372720Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][3:7519895788385527718:2914][/dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers Version: 0 }: sender# [3:7519895788385527733:2914] 2025-06-25T14:36:16.372760Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][3:7519895788385527738:2914][/dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers Version: 0 }: sender# [3:7519895771205657132:2056] 2025-06-25T14:36:16.372776Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][3:7519895788385527718:2914][/dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers Version: 0 }: sender# [3:7519895788385527734:2914] 2025-06-25T14:36:16.372811Z node 3 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:852: [main][3:7519895788385527718:2914][/dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers] Set up state: owner# [3:7519895771205657436:2132], state# { Deleted: 1 Strong: 1 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-25T14:36:16.372831Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][3:7519895788385527718:2914][/dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers Version: 0 }: sender# [3:7519895788385527735:2914] 2025-06-25T14:36:16.372865Z node 3 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:870: [main][3:7519895788385527718:2914][/dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers] Ignore empty state: owner# [3:7519895771205657436:2132], state# { Deleted: 1 Strong: 1 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 1 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-25T14:36:16.372895Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [3:7519895771205657126:2050] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [3:7519895788385527736:2914] 2025-06-25T14:36:16.372911Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [3:7519895771205657129:2053] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [3:7519895788385527737:2914] 2025-06-25T14:36:16.372948Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [3:7519895771205657132:2056] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [3:7519895788385527738:2914] 2025-06-25T14:36:16.372971Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [3:7519895771205657436:2132], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers PathId: Strong: 1 } 2025-06-25T14:36:16.373017Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [3:7519895771205657436:2132], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers PathId: Strong: 1 }, by path# { Subscriber: { Subscriber: [3:7519895788385527718:2914] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 0 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-25T14:36:16.373048Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7519895788385527739:2917], recipient# [3:7519895788385527717:2285], result# { ErrorCount: 2 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo },{ Path: dc-1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:36:16.373077Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7519895771205657436:2132], cacheItem# { Subscriber: { Subscriber: [3:7519895788385527718:2914] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:36:16.373154Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7519895788385527743:2918], recipient# [3:7519895788385527716:2284], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:36:16.584511Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7519895771205657436:2132], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:36:16.584661Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7519895771205657436:2132], cacheItem# { Subscriber: { Subscriber: [3:7519895775500625532:2707] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:36:16.584773Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7519895788385527745:2919], recipient# [3:7519895788385527744:2286], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } >> TExtSubDomainTest::CreateTableInsideThenStopTenantAndForceDeleteSubDomain-AlterDatabaseCreateHiveFirst-false [GOOD] >> TExtSubDomainTest::CreateTableInsideThenStopTenantAndForceDeleteSubDomain-AlterDatabaseCreateHiveFirst-true ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_kqp_scan/unittest >> KqpScan::ScanAfterSplitSlowMetaRead [GOOD] Test command err: 2025-06-25T14:35:26.612457Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:628:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:35:26.612919Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:35:26.613147Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:625:2319], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:35:26.613278Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:35:26.613612Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:35:26.613655Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001577/r3tmp/tmp0bSw2l/pdisk_1.dat 2025-06-25T14:35:27.035888Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:35:27.293050Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:35:27.453133Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:215:2175] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-25T14:35:27.455390Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:35:27.455548Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:35:27.467170Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 2025-06-25T14:35:27.477051Z node 2 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [2:245:2131] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-25T14:35:27.482706Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:35:27.482837Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:35:27.483777Z node 2 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976720656 RangeEnd# 281474976725656 txAllocator# 72057594046447617 2025-06-25T14:35:27.503950Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T14:35:27.504432Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:35:27.504966Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:35:27.851001Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:215:2175] Handle TEvProposeTransaction 2025-06-25T14:35:27.851079Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:215:2175] TxId# 281474976715657 ProcessProposeTransaction 2025-06-25T14:35:27.851241Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:215:2175] Cookie# 0 userReqId# "" txid# 281474976715657 SEND to# [1:1153:2703] 2025-06-25T14:35:28.077618Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:1153:2703] txid# 281474976715657 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateTable CreateTable { Name: "table-1" Columns { Name: "key" Type: "Uint32" FamilyName: "" NotNull: false } Columns { Name: "value" Type: "Uint32" FamilyName: "" NotNull: false } KeyColumnNames: "key" UniformPartitionsCount: 7 } } } ExecTimeoutPeriod: 18446744073709551615 2025-06-25T14:35:28.077730Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:1153:2703] txid# 281474976715657 Bootstrap, UserSID: CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-25T14:35:28.078522Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1660: Actor# [1:1153:2703] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-06-25T14:35:28.078628Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:1153:2703] txid# 281474976715657 TEvNavigateKeySet requested from SchemeCache 2025-06-25T14:35:28.079041Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:1153:2703] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-25T14:35:28.079341Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:1153:2703] HANDLE EvNavigateKeySetResult, txid# 281474976715657 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-25T14:35:28.079458Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:1153:2703] txid# 281474976715657 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715657 TabletId# 72057594046644480} 2025-06-25T14:35:28.079860Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:1153:2703] txid# 281474976715657 HANDLE EvClientConnected 2025-06-25T14:35:28.086809Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:28.100527Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [1:1153:2703] txid# 281474976715657 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715657} 2025-06-25T14:35:28.100628Z node 1 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [1:1153:2703] txid# 281474976715657 SEND to# [1:1061:2646] Source {TEvProposeTransactionStatus txid# 281474976715657 Status# 53} 2025-06-25T14:35:28.224096Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037889 actor [1:1231:2761] 2025-06-25T14:35:28.235609Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T14:35:28.347845Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037894 actor [1:1233:2762] 2025-06-25T14:35:28.348100Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T14:35:28.393920Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T14:35:28.394489Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037892 actor [1:1237:2764] 2025-06-25T14:35:28.394675Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T14:35:28.411854Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T14:35:28.418027Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037889 2025-06-25T14:35:28.418141Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037889 2025-06-25T14:35:28.418205Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037889 2025-06-25T14:35:28.418632Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T14:35:28.419674Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T14:35:28.419799Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037889 persisting started state actor id [1:1338:2761] in generation 1 2025-06-25T14:35:28.426973Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T14:35:28.427399Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T14:35:28.437106Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037894 2025-06-25T14:35:28.437213Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037894 2025-06-25T14:35:28.437264Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037894 2025-06-25T14:35:28.437632Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T14:35:28.445011Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T14:35:28.445171Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037894 persisting started state actor id [1:1352:2762] in generation 1 2025-06-25T14:35:28.492651Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T14:35:28.493398Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T14:35:28.494875Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037892 2025-06-25T14:35:28.494941Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037892 2025-06-25T14:35:28.495002Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037892 2025-06-25T14:35:28.495404Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T14:35:28.497248Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T14:35:28.497315Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037892 persisting started state actor id [1:1376:2764] in generation 1 2025-06-25T14:35:28.520876Z node 2 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [2:1318:2362] 2025-06-25T14:35:28.521140Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T14:35:28.571507Z node 2 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037891 actor [2:1329:2363] 2025-06-25T14:35:28.571733Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T14:35:28.594291Z node 2 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037893 actor [2:1331:2364] 2025-06-25T14:35:28.594510Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T14:35:28.610443Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T14:35:28.611268Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T14:35:28.613201Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 7207 ... 2025-06-25T14:36:16.236694Z node 5 :KQP_COMPUTE TRACE: dq_sync_compute_actor_base.h:36: SelfId: [5:1877:3110], TxId: 281474976715667, task: 1. Ctx: { CustomerSuppliedId : . TraceId : 01jykr94w938m0jt279s5eqnhb. SessionId : ydb://session/3?node_id=5&id=OWUxY2EyOS1iNWQyNDdlNi1mMmU2MzBlYi00MDAwOGU4Zg==. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. Resume execution, run status: Finished 2025-06-25T14:36:16.236740Z node 5 :KQP_COMPUTE TRACE: dq_compute_actor_impl.h:393: SelfId: [5:1877:3110], TxId: 281474976715667, task: 1. Ctx: { CustomerSuppliedId : . TraceId : 01jykr94w938m0jt279s5eqnhb. SessionId : ydb://session/3?node_id=5&id=OWUxY2EyOS1iNWQyNDdlNi1mMmU2MzBlYi00MDAwOGU4Zg==. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. ProcessOutputsState.Inflight: 0 2025-06-25T14:36:16.236789Z node 5 :KQP_COMPUTE TRACE: dq_compute_actor_impl.h:423: SelfId: [5:1877:3110], TxId: 281474976715667, task: 1. Ctx: { CustomerSuppliedId : . TraceId : 01jykr94w938m0jt279s5eqnhb. SessionId : ydb://session/3?node_id=5&id=OWUxY2EyOS1iNWQyNDdlNi1mMmU2MzBlYi00MDAwOGU4Zg==. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. Do not drain channelId: 1, finished 2025-06-25T14:36:16.236852Z node 5 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:670: TxId: 281474976715667, task: 1. Tasks execution finished, waiting for chunk delivery in output channelId: 1, seqNo: [1] 2025-06-25T14:36:16.237210Z node 5 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:442: ActorId: [5:1873:2928] TxId: 281474976715667. Ctx: { TraceId: 01jykr94w938m0jt279s5eqnhb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=5&id=OWUxY2EyOS1iNWQyNDdlNi1mMmU2MzBlYi00MDAwOGU4Zg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [5:1877:3110], task: 1, state: COMPUTE_STATE_EXECUTING, stats: { } 2025-06-25T14:36:16.237258Z node 5 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:668: ActorId: [5:1873:2928] TxId: 281474976715667. Ctx: { TraceId: 01jykr94w938m0jt279s5eqnhb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=5&id=OWUxY2EyOS1iNWQyNDdlNi1mMmU2MzBlYi00MDAwOGU4Zg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Waiting for: CA [5:1877:3110], 2025-06-25T14:36:16.237675Z node 5 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:357: ActorId: [5:1873:2928] TxId: 281474976715667. Ctx: { TraceId: 01jykr94w938m0jt279s5eqnhb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=5&id=OWUxY2EyOS1iNWQyNDdlNi1mMmU2MzBlYi00MDAwOGU4Zg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Send TEvStreamData to [5:1558:2928], seqNo: 1, nRows: 1 2025-06-25T14:36:16.237883Z node 6 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037890 coordinator 72057594046316545 last step 0 next step 3000 2025-06-25T14:36:16.237952Z node 6 :TX_DATASHARD DEBUG: datashard.cpp:2812: CheckMediatorStateRestored at 72075186224037890: waitStep# 3000 readStep# 3000 observedStep# 3000 2025-06-25T14:36:16.238454Z node 5 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:442: ActorId: [5:1873:2928] TxId: 281474976715667. Ctx: { TraceId: 01jykr94w938m0jt279s5eqnhb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=5&id=OWUxY2EyOS1iNWQyNDdlNi1mMmU2MzBlYi00MDAwOGU4Zg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [5:1877:3110], task: 1, state: COMPUTE_STATE_EXECUTING, stats: { CpuTimeUs: 509 Tasks { TaskId: 1 CpuTimeUs: 202 FinishTimeMs: 1750862176235 OutputRows: 1 OutputBytes: 6 ResultRows: 1 ResultBytes: 6 ComputeCpuTimeUs: 61 BuildCpuTimeUs: 141 HostName: "ghrun-kqfvx6aroe" NodeId: 5 CreateTimeMs: 1750862176234 CurrentWaitOutputTimeUs: 51 UpdateTimeMs: 1750862176235 } MaxMemoryUsage: 1048576 } 2025-06-25T14:36:16.238567Z node 5 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:668: ActorId: [5:1873:2928] TxId: 281474976715667. Ctx: { TraceId: 01jykr94w938m0jt279s5eqnhb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=5&id=OWUxY2EyOS1iNWQyNDdlNi1mMmU2MzBlYi00MDAwOGU4Zg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Waiting for: CA [5:1877:3110], 2025-06-25T14:36:16.238616Z node 5 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037889 coordinator 72057594046316545 last step 0 next step 3000 2025-06-25T14:36:16.238648Z node 5 :TX_DATASHARD DEBUG: datashard.cpp:2812: CheckMediatorStateRestored at 72075186224037889: waitStep# 3000 readStep# 3000 observedStep# 3000 ... response 271646822 NKikimr::NKqp::TEvKqpExecuter::TEvStreamData NKikimrKqp.TEvExecuterStreamData ResultSet { columns { name: "column0" type { optional_type { item { type_id: UINT64 } } } } rows { items { uint64_value: 596400 } } } SeqNo: 1 QueryResultIndex: 0 ChannelId: 1 VirtualTimestamp { Step: 2500 TxId: 281474976715664 } Finished: true 2025-06-25T14:36:16.239207Z node 5 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:423: TxId: 281474976715667, send ack to channelId: 1, seqNo: 1, enough: 0, freeSpace: 100, to: [5:1878:3110] 2025-06-25T14:36:16.239299Z node 5 :KQP_COMPUTE TRACE: dq_compute_actor_channels.cpp:179: TxId: 281474976715667, task: 1. Received channel data ack for channelId: 1, seqNo: 1, lastSentSeqNo: 1, freeSpace: 100, early finish: 0 2025-06-25T14:36:16.239345Z node 5 :KQP_COMPUTE TRACE: dq_compute_actor_channels.cpp:207: TxId: 281474976715667, task: 1. PeerState, peerState:(freeSpace:100;inFlightBytes:0;inFlightCount:0;), sentSeqNo: 1, ackSeqNo: 1 2025-06-25T14:36:16.239369Z node 5 :KQP_COMPUTE TRACE: dq_compute_actor_channels.cpp:220: TxId: 281474976715667, task: 1. Resume compute actor 2025-06-25T14:36:16.239571Z node 5 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [5:1877:3110], TxId: 281474976715667, task: 1. Ctx: { CustomerSuppliedId : . TraceId : 01jykr94w938m0jt279s5eqnhb. SessionId : ydb://session/3?node_id=5&id=OWUxY2EyOS1iNWQyNDdlNi1mMmU2MzBlYi00MDAwOGU4Zg==. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. CA StateFunc 271646922 2025-06-25T14:36:16.239617Z node 5 :KQP_COMPUTE TRACE: dq_compute_actor_impl.h:1485: SelfId: [5:1877:3110], TxId: 281474976715667, task: 1. Ctx: { CustomerSuppliedId : . TraceId : 01jykr94w938m0jt279s5eqnhb. SessionId : ydb://session/3?node_id=5&id=OWUxY2EyOS1iNWQyNDdlNi1mMmU2MzBlYi00MDAwOGU4Zg==. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. Poll inputs 2025-06-25T14:36:16.239648Z node 5 :KQP_COMPUTE TRACE: dq_compute_actor_impl.h:1500: SelfId: [5:1877:3110], TxId: 281474976715667, task: 1. Ctx: { CustomerSuppliedId : . TraceId : 01jykr94w938m0jt279s5eqnhb. SessionId : ydb://session/3?node_id=5&id=OWUxY2EyOS1iNWQyNDdlNi1mMmU2MzBlYi00MDAwOGU4Zg==. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. Poll sources 2025-06-25T14:36:16.239696Z node 5 :KQP_COMPUTE TRACE: dq_sync_compute_actor_base.h:36: SelfId: [5:1877:3110], TxId: 281474976715667, task: 1. Ctx: { CustomerSuppliedId : . TraceId : 01jykr94w938m0jt279s5eqnhb. SessionId : ydb://session/3?node_id=5&id=OWUxY2EyOS1iNWQyNDdlNi1mMmU2MzBlYi00MDAwOGU4Zg==. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. Resume execution, run status: Finished 2025-06-25T14:36:16.239734Z node 5 :KQP_COMPUTE TRACE: dq_compute_actor_impl.h:393: SelfId: [5:1877:3110], TxId: 281474976715667, task: 1. Ctx: { CustomerSuppliedId : . TraceId : 01jykr94w938m0jt279s5eqnhb. SessionId : ydb://session/3?node_id=5&id=OWUxY2EyOS1iNWQyNDdlNi1mMmU2MzBlYi00MDAwOGU4Zg==. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. ProcessOutputsState.Inflight: 0 2025-06-25T14:36:16.239765Z node 5 :KQP_COMPUTE TRACE: dq_compute_actor_impl.h:423: SelfId: [5:1877:3110], TxId: 281474976715667, task: 1. Ctx: { CustomerSuppliedId : . TraceId : 01jykr94w938m0jt279s5eqnhb. SessionId : ydb://session/3?node_id=5&id=OWUxY2EyOS1iNWQyNDdlNi1mMmU2MzBlYi00MDAwOGU4Zg==. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. Do not drain channelId: 1, finished 2025-06-25T14:36:16.239796Z node 5 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:675: TxId: 281474976715667, task: 1. Tasks execution finished 2025-06-25T14:36:16.239821Z node 5 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:510: SelfId: [5:1877:3110], TxId: 281474976715667, task: 1. Ctx: { CustomerSuppliedId : . TraceId : 01jykr94w938m0jt279s5eqnhb. SessionId : ydb://session/3?node_id=5&id=OWUxY2EyOS1iNWQyNDdlNi1mMmU2MzBlYi00MDAwOGU4Zg==. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. Compute state finished. All channels and sinks finished 2025-06-25T14:36:16.239896Z node 5 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:494: TxId: 281474976715667, task: 1. pass away 2025-06-25T14:36:16.239977Z node 5 :KQP_COMPUTE DEBUG: log.cpp:784: fline=kqp_compute_actor_factory.cpp:67;problem=finish_compute_actor;tx_id=281474976715667;task_id=1;success=1;message={
: Error: COMPUTE_STATE_FINISHED }; 2025-06-25T14:36:16.240121Z node 5 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:404: TxId: 281474976715667, taskId: 1. Released resources, Memory: 0, Free Tier: 1048576, ExecutionUnits: 1. 2025-06-25T14:36:16.240438Z node 5 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:442: ActorId: [5:1873:2928] TxId: 281474976715667. Ctx: { TraceId: 01jykr94w938m0jt279s5eqnhb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=5&id=OWUxY2EyOS1iNWQyNDdlNi1mMmU2MzBlYi00MDAwOGU4Zg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [5:1877:3110], task: 1, state: COMPUTE_STATE_FINISHED, stats: { CpuTimeUs: 1992 Tasks { TaskId: 1 CpuTimeUs: 208 FinishTimeMs: 1750862176239 OutputRows: 1 OutputBytes: 6 ResultRows: 1 ResultBytes: 6 ComputeCpuTimeUs: 67 BuildCpuTimeUs: 141 HostName: "ghrun-kqfvx6aroe" NodeId: 5 CreateTimeMs: 1750862176234 UpdateTimeMs: 1750862176239 } MaxMemoryUsage: 1048576 } 2025-06-25T14:36:16.240506Z node 5 :KQP_EXECUTER INFO: kqp_planner.cpp:697: TxId: 281474976715667. Ctx: { TraceId: 01jykr94w938m0jt279s5eqnhb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=5&id=OWUxY2EyOS1iNWQyNDdlNi1mMmU2MzBlYi00MDAwOGU4Zg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Compute actor has finished execution: [5:1877:3110] 2025-06-25T14:36:16.240605Z node 5 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:2188: ActorId: [5:1873:2928] TxId: 281474976715667. Ctx: { TraceId: 01jykr94w938m0jt279s5eqnhb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=5&id=OWUxY2EyOS1iNWQyNDdlNi1mMmU2MzBlYi00MDAwOGU4Zg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. terminate execution. 2025-06-25T14:36:16.240644Z node 5 :KQP_EXECUTER TRACE: kqp_executer_impl.h:2202: ActorId: [5:1873:2928] TxId: 281474976715667. Ctx: { TraceId: 01jykr94w938m0jt279s5eqnhb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=5&id=OWUxY2EyOS1iNWQyNDdlNi1mMmU2MzBlYi00MDAwOGU4Zg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Terminate, become ZombieState 2025-06-25T14:36:16.240698Z node 5 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:862: ActorId: [5:1873:2928] TxId: 281474976715667. Ctx: { TraceId: 01jykr94w938m0jt279s5eqnhb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=5&id=OWUxY2EyOS1iNWQyNDdlNi1mMmU2MzBlYi00MDAwOGU4Zg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Resource usage for last stat interval: ComputeTime: 0.001992s ReadRows: 0 ReadBytes: 0 ru: 1 rate limiter was not found force flag: 1 ... response 271646721 NKikimr::NKqp::NPrivateEvents::TEvQueryResponse NKikimrKqp.TEvQueryResponse Response { TxMeta { } QueryDiagnostics: "" } YdbStatus: SUCCESS ConsumedRu: 877 |80.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_read_table/ydb-core-tx-datashard-ut_read_table |80.8%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_read_table/ydb-core-tx-datashard-ut_read_table |80.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_read_table/ydb-core-tx-datashard-ut_read_table >> TReplicationTests::Describe [GOOD] >> TReplicationTests::CreateReplicatedTable |80.8%| [TA] $(B)/ydb/core/tx/datashard/ut_kqp_scan/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::Sortings4Year+RemoveLimitOperator [GOOD] Test command err: Trying to start YDB, gRPC: 24828, MsgBus: 65379 2025-06-25T14:35:19.306860Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519895543417369760:2133];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:35:19.307575Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000ed4/r3tmp/tmp6iaKws/pdisk_1.dat 2025-06-25T14:35:19.904106Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:35:19.904196Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:35:19.969285Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:35:19.969445Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519895543417369665:2080] 1750862119290731 != 1750862119290734 2025-06-25T14:35:19.990752Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 24828, node 1 2025-06-25T14:35:20.135594Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:35:20.135612Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:35:20.135620Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:35:20.135730Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:35:20.307015Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:65379 TClient is connected to server localhost:65379 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:35:20.997629Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:35:22.940489Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519895556302272196:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:35:22.940580Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:35:22.941006Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519895556302272208:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:35:22.946145Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:35:22.960660Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519895556302272210:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:35:23.043251Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519895560597239558:2336] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:35:23.372469Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:23.518726Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:23.562216Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:23.628158Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:23.681980Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:23.866650Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:23.910967Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:23.983385Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:24.017035Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:24.053519Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:24.075087Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:24.111786Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:24.148506Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:24.305177Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519895543417369760:2133];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:35:24.305258Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:35:24.786130Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/ ... 65460Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038565;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:36:11.874531Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038565;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:11.881482Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038461;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:36:11.891133Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038461;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:11.897479Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038559;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:36:11.911282Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038559;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:11.911898Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038555;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:36:11.922633Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038555;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:11.923221Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038547;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:36:11.937833Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038547;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:11.938430Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038543;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:36:11.950034Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038543;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:11.956846Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038537;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:36:11.961979Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038537;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:11.962512Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038529;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:36:11.967346Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038529;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:11.968014Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038545;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:36:11.972109Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038545;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:11.973015Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038519;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:36:11.975281Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038609;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:11.975941Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038595;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:36:11.980343Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038519;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:11.981067Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038493;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:36:11.986447Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038493;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:11.987167Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038511;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:36:12.002307Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038511;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:12.002891Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038509;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:36:12.003380Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038595;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:12.003852Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038487;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:36:12.016100Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038487;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:12.016773Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038481;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:36:12.019281Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038509;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:12.019945Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038479;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:36:12.026770Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038481;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:12.027450Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038569;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:36:12.033405Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038479;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:12.034065Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038567;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:36:12.039061Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038569;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:12.039737Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038525;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:36:12.040254Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038567;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:12.051018Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038525;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:12.220615Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jykr7r405740ejjbpyb7ny6a", SessionId: ydb://session/3?node_id=1&id=MTU5ODAyMzMtN2U5ZGFiMTMtYjJjMTNiMDItYjMwNmI3ZTg=, Slow query, duration: 44.987561s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:36:12.563795Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:36:12.564396Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:36:12.567405Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;self_id=[1:7519895710921120035:5343];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038629;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038170;receive=72075186224038331; 2025-06-25T14:36:12.568253Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; >> IncrementalBackup::SimpleBackup [GOOD] >> IncrementalBackup::MultiRestore >> KeyValueReadStorage::ReadOk >> KeyValueReadStorage::ReadOk [GOOD] >> KeyValueReadStorage::ReadNotWholeBlobOk [GOOD] >> KeyValueReadStorage::ReadOneItemError [GOOD] >> TReplicationTests::AlterReplicatedIndexTable [GOOD] >> TReplicationTests::CopyReplicatedTable |80.8%| [TA] {RESULT} $(B)/ydb/core/tx/datashard/ut_kqp_scan/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/keyvalue/ut/unittest >> KeyValueReadStorage::ReadOneItemError [GOOD] Test command err: 2025-06-25T14:36:21.032302Z 1 00h00m00.000000s :KEYVALUE INFO: {KV20@keyvalue_storage_read_request.cpp:209} Received GetResult KeyValue# 1 GroupId# 3 Status# OK ResponseSz# 1 ErrorReason# ReadRequestCookie# 0 2025-06-25T14:36:21.035534Z 1 00h00m00.000000s :KEYVALUE INFO: {KV34@keyvalue_storage_read_request.cpp:492} Send respose KeyValue# 1 Status# RSTATUS_OK ReadRequestCookie# 0 2025-06-25T14:36:21.042503Z 1 00h00m00.000000s :KEYVALUE INFO: {KV20@keyvalue_storage_read_request.cpp:209} Received GetResult KeyValue# 1 GroupId# 3 Status# OK ResponseSz# 1 ErrorReason# ReadRequestCookie# 0 2025-06-25T14:36:21.042560Z 1 00h00m00.000000s :KEYVALUE INFO: {KV34@keyvalue_storage_read_request.cpp:492} Send respose KeyValue# 1 Status# RSTATUS_OK ReadRequestCookie# 0 2025-06-25T14:36:21.048954Z 1 00h00m00.000000s :KEYVALUE INFO: {KV20@keyvalue_storage_read_request.cpp:209} Received GetResult KeyValue# 1 GroupId# 3 Status# OK ResponseSz# 1 ErrorReason# ReadRequestCookie# 0 2025-06-25T14:36:21.049080Z 1 00h00m00.000000s :KEYVALUE ERROR: {KV317@keyvalue_storage_read_request.cpp:310} Unexpected EvGetResult. KeyValue# 1 Status# OK Id# [1:2:3:2:0:1:0] ResponseStatus# ERROR Deadline# 586524-01-19T08:01:49.551615Z Now# 1970-01-01T00:00:00.000000Z SentAt# 1970-01-01T00:00:00.000000Z GotAt# 2025-06-25T14:36:21.048758Z ErrorReason# >> TConsoleTests::TestSchemeShardErrorForwarding [GOOD] >> TConsoleTests::TestScaleRecommenderPolicies >> TKeyValueTest::TestWriteReadDeleteWithRestartsThenResponseOk >> TReplicationTests::CopyReplicatedTable [GOOD] >> TKeyValueTest::TestInlineWriteReadWithRestartsThenResponseOk >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-CreateUser-22 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-CreateUser-23 >> TConsoleTests::TestRemoveTenantExtSubdomain [GOOD] >> TConsoleTests::TestRemoveSharedTenantWoServerlessTenants >> TConsoleConfigSubscriptionTests::TestNotificationForTimeoutedNotificationResponse [GOOD] >> TConsoleConfigSubscriptionTests::TestNotificationForRestartedServer >> TKeyValueTest::TestWriteTrimWithRestartsThenResponseOk >> LdapAuthProviderTest::LdapRequestWithEmptyHost [GOOD] >> LdapAuthProviderTest::LdapRequestWithEmptyBaseDn >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-50 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-51 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_replication/unittest >> TReplicationTests::CopyReplicatedTable [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:36:11.917108Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:36:11.917224Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:36:11.917271Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:36:11.917305Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:36:11.917349Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:36:11.917376Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:36:11.917439Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:36:11.917503Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:36:11.918235Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:36:11.918567Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:36:12.069635Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:36:12.069695Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:36:12.090032Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:36:12.090239Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:36:12.090415Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:36:12.134133Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:36:12.134418Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:36:12.135078Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:36:12.135277Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:36:12.138370Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:36:12.138553Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:36:12.145286Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:36:12.145375Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:36:12.145605Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:36:12.145673Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:36:12.145721Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:36:12.145817Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:36:12.157140Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:243:2058] recipient: [1:15:2062] 2025-06-25T14:36:12.345212Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:36:12.345465Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:36:12.345678Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:36:12.345734Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:36:12.346026Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:36:12.346103Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:36:12.348509Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:36:12.348709Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:36:12.348929Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:36:12.349007Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:36:12.349051Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:36:12.349084Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:36:12.351774Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:36:12.351845Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:36:12.351884Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:36:12.353832Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:36:12.353895Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:36:12.353942Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:36:12.353997Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:36:12.357742Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:36:12.359791Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:36:12.359985Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:36:12.361277Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:36:12.361422Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:36:12.361468Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:36:12.361765Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:36:12.361824Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:36:12.362039Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:36:12.362132Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:36:12.364515Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:36:12.364564Z node 1 :FLAT_TX_SCHEMESHARD ... xId: 102 } 2025-06-25T14:36:22.150138Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5596: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 409 RawX2: 34359740744 } Origin: 72075186233409547 State: 2 TxId: 102 Step: 0 Generation: 2 2025-06-25T14:36:22.150205Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1791: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409547, partId: 0 2025-06-25T14:36:22.150372Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:632: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: Source { RawX1: 409 RawX2: 34359740744 } Origin: 72075186233409547 State: 2 TxId: 102 Step: 0 Generation: 2 2025-06-25T14:36:22.150454Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1047: NTableState::TProposedWaitParts operationId# 102:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 2025-06-25T14:36:22.150580Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1051: NTableState::TProposedWaitParts operationId# 102:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 409 RawX2: 34359740744 } Origin: 72075186233409547 State: 2 TxId: 102 Step: 0 Generation: 2 2025-06-25T14:36:22.150672Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:670: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 102:0, shardIdx: 72057594046678944:2, shard: 72075186233409547, left await: 1, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-25T14:36:22.150748Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1056: NTableState::TProposedWaitParts operationId# 102:0 HandleReply TEvDataShard::TEvSchemaChanged CollectSchemaChanged: false 2025-06-25T14:36:22.154107Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-25T14:36:22.154416Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-25T14:36:22.167911Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5596: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 309 RawX2: 34359740662 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 2025-06-25T14:36:22.167982Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1791: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409546, partId: 0 2025-06-25T14:36:22.168111Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:632: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: Source { RawX1: 309 RawX2: 34359740662 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 2025-06-25T14:36:22.168162Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1047: NTableState::TProposedWaitParts operationId# 102:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 2025-06-25T14:36:22.168240Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1051: NTableState::TProposedWaitParts operationId# 102:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 309 RawX2: 34359740662 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 2025-06-25T14:36:22.168301Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:670: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 102:0, shardIdx: 72057594046678944:1, shard: 72075186233409546, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-25T14:36:22.168375Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:707: all shard schema changes has been received, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-25T14:36:22.168422Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:719: send schema changes ack message, operation: 102:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-06-25T14:36:22.168473Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:719: send schema changes ack message, operation: 102:0, datashard: 72075186233409547, at schemeshard: 72057594046678944 2025-06-25T14:36:22.168507Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 102:0 129 -> 240 2025-06-25T14:36:22.170855Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-25T14:36:22.171349Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-25T14:36:22.171429Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_states.h:93: TCopyTable::TWaitCopyTableBarrier operationId: 102:0ProgressState, operation type TxCopyTable 2025-06-25T14:36:22.171492Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:1061: Set barrier, OperationId: 102:0, name: CopyTableBarrier, done: 0, blocked: 1, parts count: 1 2025-06-25T14:36:22.171559Z node 8 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:1105: All parts have reached barrier, tx: 102, done: 0, blocked: 1 2025-06-25T14:36:22.171668Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_states.h:76: TCopyTable::TWaitCopyTableBarrier operationId: 102:0 HandleReply TEvPrivate::TEvCompleteBarrier, msg: NKikimr::NSchemeShard::TEvPrivate::TEvCompleteBarrier { TxId: 102 Name: CopyTableBarrier }, at tablet# 72057594046678944 2025-06-25T14:36:22.171721Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 102:0 240 -> 240 2025-06-25T14:36:22.174278Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-25T14:36:22.174341Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 102:0 ProgressState 2025-06-25T14:36:22.174501Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:0 progress is 1/1 2025-06-25T14:36:22.174552Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-25T14:36:22.174606Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:0 progress is 1/1 2025-06-25T14:36:22.174668Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-25T14:36:22.174717Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: true 2025-06-25T14:36:22.174816Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1656: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [8:337:2314] message: TxId: 102 2025-06-25T14:36:22.174886Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-25T14:36:22.174944Z node 8 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 102:0 2025-06-25T14:36:22.174994Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 102:0 2025-06-25T14:36:22.175181Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-25T14:36:22.175229Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate source path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-25T14:36:22.177687Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-25T14:36:22.177763Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [8:437:2396] TestWaitNotification: OK eventTxId 102 2025-06-25T14:36:22.178447Z node 8 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/CopyTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:36:22.178743Z node 8 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/CopyTable" took 337us result status StatusSuccess 2025-06-25T14:36:22.179322Z node 8 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/CopyTable" PathDescription { Self { Name: "CopyTable" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 102 CreateStep: 5000003 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "CopyTable" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Uint64" TypeId: 4 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TReplicationTests::CreateReplicatedTable [GOOD] >> TReplicationTests::DropReplicationWithInvalidCredentials >> LdapAuthProviderTest_LdapsScheme::LdapFetchGroupsWithInvalidRobotUserLoginBad [GOOD] >> LdapAuthProviderTest_LdapsScheme::LdapFetchGroupsWithInvalidRobotUserPasswordBad >> LdapAuthProviderTest_nonSecure::LdapFetchGroupsWithDefaultGroupAttributeGood [GOOD] >> LdapAuthProviderTest_nonSecure::LdapFetchGroupsWithDefaultGroupAttributeDisableNestedGroupsGood >> TGroupMapperTest::InterlacedRacksWithoutInterlacedNodes [GOOD] >> KqpQueryPerf::Replace+QueryService+UseSink [GOOD] >> IncrementalBackup::SimpleRestoreBackupCollection-WithIncremental [GOOD] |80.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/workload_service/ut/ydb-core-kqp-workload_service-ut |80.8%| [LD] {RESULT} $(B)/ydb/core/kqp/workload_service/ut/ydb-core-kqp-workload_service-ut |80.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/workload_service/ut/ydb-core-kqp-workload_service-ut >> YdbIndexTable::OnlineBuild [GOOD] >> YdbIndexTable::OnlineBuildWithDataColumn >> TReplicationTests::DropReplicationWithInvalidCredentials [GOOD] >> TReplicationTests::DropReplicationWithUnknownSecret >> Viewer::UseTransactionWhenExecuteDataActionQuery [GOOD] >> ViewerTopicDataTests::TopicDataTest |80.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut/unittest >> TGroupMapperTest::InterlacedRacksWithoutInterlacedNodes [GOOD] >> Viewer::StorageGroupOutputWithoutFilterNoDepends [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-65 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-66 >> Viewer::StorageGroupOutputWithSpaceCheckDependsOnVDiskSpaceStatus >> TGroupMapperTest::MakeDisksUnusable [GOOD] >> TPQTest::TestSourceIdDropBySourceIdCount [GOOD] >> TPQTest::TestStorageRetention >> TConsoleTests::TestAlterBorrowedStorage [GOOD] >> TConsoleTests::TestAlterStorageUnitsOfSharedTenant ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::Replace+QueryService+UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 1401, MsgBus: 16872 2025-06-25T14:36:14.297109Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519895781370119231:2205];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:36:14.297591Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000bb2/r3tmp/tmptoYdYs/pdisk_1.dat 2025-06-25T14:36:14.840476Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519895781370119061:2080] 1750862174197586 != 1750862174197589 2025-06-25T14:36:14.962727Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:36:14.962812Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:36:14.964814Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:36:14.972577Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 1401, node 1 2025-06-25T14:36:15.240583Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:36:15.240605Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:36:15.240614Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:36:15.240730Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:36:15.295905Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:16872 TClient is connected to server localhost:16872 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:36:16.328482Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:36:16.371142Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:36:16.613490Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:36:16.859115Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:36:16.972558Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:36:19.299461Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519895781370119231:2205];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:36:19.299534Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:36:19.658161Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519895802844957198:2370], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:36:19.658259Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:36:20.073153Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:36:20.148269Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:36:20.198143Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:36:20.237745Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:36:20.323146Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:36:20.372995Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:36:20.448794Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:36:20.524973Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519895807139925164:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:36:20.525065Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:36:20.525367Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519895807139925169:2438], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:36:20.529315Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:36:20.539304Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519895807139925171:2439], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:36:20.639569Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519895807139925222:3426] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-64 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-65 |80.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut/unittest >> TGroupMapperTest::MakeDisksUnusable [GOOD] >> TReplicationTests::DropReplicationWithUnknownSecret [GOOD] >> TConsoleTests::TestCreateSubSubDomain [GOOD] >> TConsoleTests::TestCreateSubSubDomainExtSubdomain >> TGroupMapperTest::MakeDisksNonoperational [GOOD] >> LdapAuthProviderTest::LdapRequestWithEmptyBaseDn [GOOD] >> LdapAuthProviderTest::LdapRequestWithEmptyBindDn |80.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_rtmr/ydb-core-tx-schemeshard-ut_rtmr |80.9%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_rtmr/ydb-core-tx-schemeshard-ut_rtmr |80.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_rtmr/ydb-core-tx-schemeshard-ut_rtmr |80.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut/unittest >> TGroupMapperTest::MakeDisksNonoperational [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_replication/unittest >> TReplicationTests::DropReplicationWithUnknownSecret [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:36:13.687476Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:36:13.687557Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:36:13.687592Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:36:13.687621Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:36:13.687661Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:36:13.687682Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:36:13.687740Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:36:13.687828Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:36:13.695793Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:36:13.696146Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:36:13.781400Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:36:13.781466Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:36:13.828750Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:36:13.829230Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:36:13.829421Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:36:13.884728Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:36:13.885079Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:36:13.885706Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:36:13.886006Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:36:13.909248Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:36:13.909477Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:36:13.910579Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:36:13.910640Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:36:13.910789Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:36:13.910831Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:36:13.910868Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:36:13.910944Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:36:13.941342Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:36:14.448831Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:36:14.449080Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:36:14.449311Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:36:14.449366Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:36:14.449626Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:36:14.449717Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:36:14.457248Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:36:14.457463Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:36:14.457665Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:36:14.457729Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:36:14.457776Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:36:14.457809Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:36:14.469324Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:36:14.469398Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:36:14.469437Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:36:14.473156Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:36:14.473215Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:36:14.473260Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:36:14.473334Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:36:14.478852Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:36:14.492637Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:36:14.492888Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:36:14.493960Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:36:14.494126Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:36:14.494173Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:36:14.494484Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:36:14.494538Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:36:14.494725Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:36:14.494797Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:36:14.497550Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:36:14.497600Z node 1 :FLAT_TX_SCHEMESHARD ... ToDone TxId: 102 ready parts: 1/1 2025-06-25T14:36:25.697491Z node 9 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:0 progress is 1/1 2025-06-25T14:36:25.697540Z node 9 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-25T14:36:25.697599Z node 9 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: false 2025-06-25T14:36:25.697657Z node 9 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-25T14:36:25.697716Z node 9 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 102:0 2025-06-25T14:36:25.697767Z node 9 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 102:0 2025-06-25T14:36:25.697952Z node 9 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-25T14:36:25.698014Z node 9 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 102, publications: 2, subscribers: 0 2025-06-25T14:36:25.698068Z node 9 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 102, [OwnerId: 72057594046678944, LocalPathId: 1], 7 2025-06-25T14:36:25.698120Z node 9 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 102, [OwnerId: 72057594046678944, LocalPathId: 2], 18446744073709551615 2025-06-25T14:36:25.699049Z node 9 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 274137603, Sender [9:209:2209], Recipient [9:129:2153]: NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 7 } 2025-06-25T14:36:25.699093Z node 9 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5039: StateWork, processing event NSchemeBoard::NSchemeshardEvents::TEvUpdateAck 2025-06-25T14:36:25.699175Z node 9 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T14:36:25.699259Z node 9 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T14:36:25.699309Z node 9 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 102 2025-06-25T14:36:25.699370Z node 9 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 7 2025-06-25T14:36:25.699436Z node 9 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-25T14:36:25.699537Z node 9 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-06-25T14:36:25.700377Z node 9 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 274137603, Sender [9:209:2209], Recipient [9:129:2153]: NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 18446744073709551615 } 2025-06-25T14:36:25.700422Z node 9 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5039: StateWork, processing event NSchemeBoard::NSchemeshardEvents::TEvUpdateAck 2025-06-25T14:36:25.700493Z node 9 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T14:36:25.700575Z node 9 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T14:36:25.700609Z node 9 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 102 2025-06-25T14:36:25.700644Z node 9 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 18446744073709551615 2025-06-25T14:36:25.700680Z node 9 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-25T14:36:25.700779Z node 9 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 102, subscribers: 0 2025-06-25T14:36:25.700830Z node 9 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-06-25T14:36:25.701212Z node 9 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 2146435084, Sender [9:129:2153], Recipient [9:129:2153]: NKikimr::NSchemeShard::TEvPrivate::TEvCleanDroppedPaths 2025-06-25T14:36:25.701260Z node 9 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5127: StateWork, processing event TEvPrivate::TEvCleanDroppedPaths 2025-06-25T14:36:25.701395Z node 9 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-25T14:36:25.701458Z node 9 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-25T14:36:25.701555Z node 9 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:36:25.703862Z node 9 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-25T14:36:25.706382Z node 9 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-25T14:36:25.706427Z node 9 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-25T14:36:25.706585Z node 9 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-25T14:36:25.706613Z node 9 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-25T14:36:25.706939Z node 9 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 102 2025-06-25T14:36:25.707302Z node 9 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-06-25T14:36:25.707370Z node 9 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 2025-06-25T14:36:25.707837Z node 9 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877761, Sender [9:470:2416], Recipient [9:129:2153]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:36:25.707918Z node 9 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5052: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T14:36:25.707971Z node 9 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5837: Pipe server connected, at tablet: 72057594046678944 2025-06-25T14:36:25.708167Z node 9 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124996, Sender [9:417:2363], Recipient [9:129:2153]: NKikimrScheme.TEvNotifyTxCompletion TxId: 102 2025-06-25T14:36:25.708214Z node 9 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvSchemeShard::TEvNotifyTxCompletion 2025-06-25T14:36:25.708333Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-06-25T14:36:25.708487Z node 9 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-25T14:36:25.708546Z node 9 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [9:468:2414] 2025-06-25T14:36:25.708786Z node 9 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877764, Sender [9:470:2416], Recipient [9:129:2153]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-25T14:36:25.708838Z node 9 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5053: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-25T14:36:25.708894Z node 9 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5885: Server pipe is reset, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 102 2025-06-25T14:36:25.709439Z node 9 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271122945, Sender [9:471:2417], Recipient [9:129:2153]: NKikimrSchemeOp.TDescribePath Path: "/MyRoot/Replication" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false } 2025-06-25T14:36:25.709513Z node 9 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4967: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2025-06-25T14:36:25.709654Z node 9 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Replication" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:36:25.709901Z node 9 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Replication" took 265us result status StatusPathDoesNotExist 2025-06-25T14:36:25.710289Z node 9 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/Replication\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/Replication" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 >> TBlobStorageControllerGrouperTest::TestGroupFromCandidatesTrivial [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-16 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-17 |80.9%| [TA] $(B)/ydb/core/tx/schemeshard/ut_replication/test-results/unittest/{meta.json ... results_accumulator.log} |80.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/statistics/service/ut/ydb-core-statistics-service-ut |80.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/statistics/service/ut/ydb-core-statistics-service-ut |80.9%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_replication/test-results/unittest/{meta.json ... results_accumulator.log} |80.9%| [LD] {RESULT} $(B)/ydb/core/statistics/service/ut/ydb-core-statistics-service-ut >> YdbIndexTable::MultiShardTableUniqAndNonUniqIndex [GOOD] >> YdbIndexTable::MultiShardTableTwoIndexes |80.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut/unittest >> TBlobStorageControllerGrouperTest::TestGroupFromCandidatesTrivial [GOOD] >> YdbIndexTable::MultiShardTableOneIndexIndexOverlap [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-DropUser-63 [FAIL] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-DropUser-64 >> TPQTest::TestStorageRetention [GOOD] >> TPQTest::TestSetClientOffset >> TMiniKQLEngineFlatHostTest::ShardId [GOOD] >> TMiniKQLEngineFlatHostTest::Basic |80.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_read_only_vdisk/ut_blobstorage-ut_read_only_vdisk |80.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_read_only_vdisk/ut_blobstorage-ut_read_only_vdisk |80.9%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_read_only_vdisk/ut_blobstorage-ut_read_only_vdisk >> TConsoleTests::TestScaleRecommenderPolicies [GOOD] >> TConsoleTests::TestScaleRecommenderPoliciesValidation >> TMiniKQLEngineFlatHostTest::Basic [GOOD] >> TMiniKQLEngineFlatTest::TestAbort [GOOD] >> TMiniKQLEngineFlatTest::TestCASBoth2Fail1 >> TMiniKQLEngineFlatTest::TestCASBoth2Fail1 [GOOD] >> TMiniKQLEngineFlatTest::TestCASBoth2Fail2 >> LdapAuthProviderTest_LdapsScheme::LdapFetchGroupsWithInvalidRobotUserPasswordBad [GOOD] >> LdapAuthProviderTest_LdapsScheme::LdapFetchGroupsWithRemovedUserCredentialsBad >> TMiniKQLEngineFlatTest::TestCASBoth2Fail2 [GOOD] >> TMiniKQLEngineFlatTest::TestCASBoth2Fail12 >> TMiniKQLEngineFlatTest::TestSelectRangeFullWithoutColumnsNotExists >> TMiniKQLEngineFlatTest::TestCASBoth2Fail12 [GOOD] >> TMiniKQLEngineFlatTest::TestBug998 >> TMiniKQLEngineFlatTest::TestSelectRangeFullWithoutColumnsNotExists [GOOD] >> TMiniKQLEngineFlatTest::TestSelectRangeFullWithoutColumnsNotExistsNullKey >> TMiniKQLEngineFlatTest::TestBug998 [GOOD] >> TMiniKQLEngineFlatTest::TestAcquireLocks >> TMiniKQLEngineFlatTest::TestSelectRangeFullWithoutColumnsNotExistsNullKey [GOOD] >> TMiniKQLEngineFlatTest::TestSelectRangeFullExistsTruncatedByItems >> LdapAuthProviderTest_nonSecure::LdapFetchGroupsWithDefaultGroupAttributeDisableNestedGroupsGood [GOOD] >> LdapAuthProviderTest_nonSecure::LdapFetchGroupsWithDefaultGroupAttributeGoodUseListOfHosts >> TMiniKQLEngineFlatTest::TestAcquireLocks [GOOD] >> TMiniKQLEngineFlatTest::NoMapPushdownMultipleConsumers [GOOD] >> TMiniKQLEngineFlatTest::NoMapPushdownNonPureLambda >> TMiniKQLEngineFlatTest::TestSelectRangeFullExistsTruncatedByItems [GOOD] >> TMiniKQLEngineFlatTest::TestSelectRangeFullExistsTruncatedByItemsFromNull |80.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_encrypted_storage/unittest |80.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_encrypted_storage/unittest >> TMiniKQLEngineFlatTest::NoMapPushdownNonPureLambda [GOOD] >> TMiniKQLEngineFlatTest::NoOrderedMapPushdown >> TMiniKQLEngineFlatTest::TestSelectRangeFullExistsTruncatedByItemsFromNull [GOOD] >> TMiniKQLEngineFlatTest::TestSelectRangeFullExistsTruncatedByBytes >> TMiniKQLEngineFlatTest::TestSelectRangeFullExistsTruncatedByBytes [GOOD] >> TMiniKQLEngineFlatTest::TestSelectRangeNullNull >> TMiniKQLEngineFlatTest::NoOrderedMapPushdown [GOOD] >> TMiniKQLEngineFlatTest::NoMapPushdownWriteToTable >> TMiniKQLEngineFlatTest::NoMapPushdownWriteToTable [GOOD] >> TMiniKQLEngineFlatTest::NoMapPushdownArgClosure >> TMiniKQLEngineFlatTest::TestSelectRangeNullNull [GOOD] >> TMiniKQLEngineFlatTest::TestSelectRangeToExclusive [GOOD] >> TMiniKQLEngineFlatTest::TestSelectRangeNoShards >> TMiniKQLEngineFlatTest::NoMapPushdownArgClosure [GOOD] >> TMiniKQLEngineFlatTest::TestSelectRangeNoShards [GOOD] >> TMiniKQLEngineFlatTest::TestSelectRangeReverseWithPartitions >> TMiniKQLEngineFlatTest::TestSelectRangeReverseWithPartitions [GOOD] >> TMiniKQLEngineFlatTest::TestSelectRangeReverseWithPartitionsTruncatedByItems1 >> TConsoleTests::TestRemoveSharedTenantWoServerlessTenants [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/secondary_index/py3test >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_1__ASYNC-pk_types10-all_types10-index10---ASYNC] 2025-06-25 14:35:55,280 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper execution timed out 2025-06-25 14:35:55,532 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper has overrun 600 secs timeout. Process tree before termination: pid rss ref pdirt 15978 621M 620M 615M ydb-tests-datashard-secondary_index --basetemp /home/runner/.ya/build/build_root/yft8/000aa2/tmp --capture no -c pkg:library.python.pytest:pytest.yatest.ini -p no:factor --doct 17100 1.3G 1.3G 1.2G └─ ydbd server --suppress-version-check --yaml-config=/home/runner/.ya/build/build_root/yft8/000aa2/ydb/tests/datashard/secondary_index/test-results/py3test/testing_out_stuf Test command err: File "library/python/pytest/main.py", line 101, in main rc = pytest.main( File "contrib/python/pytest/py3/_pytest/config/__init__.py", line 175, in main ret: Union[ExitCode, int] = config.hook.pytest_cmdline_main( File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 512, in __call__ return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120, in _hookexec return self._inner_hookexec(hook_name, methods, kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_callers.py", line 121, in _multicall res = hook_impl.function(*args) File "contrib/python/pytest/py3/_pytest/main.py", line 320, in pytest_cmdline_main return wrap_session(config, _main) File "contrib/python/pytest/py3/_pytest/main.py", line 273, in wrap_session session.exitstatus = doit(config, session) or 0 File "contrib/python/pytest/py3/_pytest/main.py", line 327, in _main config.hook.pytest_runtestloop(session=session) File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 512, in __call__ return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120, in _hookexec return self._inner_hookexec(hook_name, methods, kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_callers.py", line 121, in _multicall res = hook_impl.function(*args) File "contrib/python/pytest/py3/_pytest/main.py", line 352, in pytest_runtestloop item.config.hook.pytest_runtest_protocol(item=item, nextitem=nextitem) File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 512, in __call__ return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120, in _hookexec return self._inner_hookexec(hook_name, methods, kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_callers.py", line 121, in _multicall res = hook_impl.function(*args) File "contrib/python/pytest/py3/_pytest/runner.py", line 115, in pytest_runtest_protocol runtestprotocol(item, nextitem=nextitem) File "contrib/python/pytest/py3/_pytest/runner.py", line 134, in runtestprotocol reports.append(call_and_report(item, "call", log)) File "contrib/python/pytest/py3/_pytest/runner.py", line 223, in call_and_report call = call_runtest_hook(item, when, **kwds) File "contrib/python/pytest/py3/_pytest/runner.py", line 262, in call_runtest_hook return CallInfo.from_call( File "contrib/python/pytest/py3/_pytest/runner.py", line 342, in from_call result: Optional[TResult] = func() File "contrib/python/pytest/py3/_pytest/runner.py", line 263, in lambda: ihook(item=item, **kwds), when=when, reraise=reraise File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 512, in __call__ return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120, in _hookexec return self._inner_hookexec(hook_name, methods, kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_callers.py", line 121, in _multicall res = hook_impl.function(*args) File "contrib/python/pytest/py3/_pytest/runner.py", line 170, in pytest_runtest_call item.runtest() File "contrib/python/pytest/py3/_pytest/python.py", line 1844, in runtest self.ihook.pytest_pyfunc_call(pyfuncitem=self) File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 512, in __call__ return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120, in _hookexec return self._inner_hookexec(hook_name, methods, kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_callers.py", line 121, in _multicall res = hook_impl.function(*args) File "library/python/pytest/plugins/ya.py", line 563, in pytest_pyfunc_call pyfuncitem.retval = testfunction(**testargs) File "ydb/tests/datashard/secondary_index/test_secondary_index.py", line 162, in test_secondary_index_cover self.select_all_types(table_name, index, all_types, dml) File "ydb/tests/datashard/secondary_index/test_secondary_index.py", line 200, in select_all_types dml.transactional(process) File "ydb/tests/datashard/lib/dml_operations.py", line 17, in transactional return self.query_object.transactional(process) File "ydb/tests/sql/lib/test_query.py", line 58, in transactional return self.pool.retry_operation_sync(lambda session: fn(session)) File "contrib/python/ydb/py3/ydb/query/pool.py", line 141, in retry_operation_sync return retry_operation_sync(wrapped_callee, retry_settings) File "contrib/python/ydb/py3/ydb/retries.py", line 133, in retry_operation_sync for next_opt in opt_generator: File "contrib/python/ydb/py3/ydb/retries.py", line 94, in retry_operation_impl result = YdbRetryOperationFinalResult(callee(*args, **kwargs)) File "contrib/python/ydb/py3/ydb/query/pool.py", line 139, in wrapped_callee return callee(session, *args, **kwargs) File "ydb/tests/sql/lib/test_query.py", line 58, in return self.pool.retry_operation_sync(lambda session: fn(session)) File "ydb/tests/datashard/secondary_index/test_secondary_index.py", line 188, in process rows = self.query(select_sql, tx=session.transaction(tx_mode=ydb.QueryStaleReadOnly())) File "ydb/tests/sql/lib/test_query.py", line 52, in query for result_set in result_sets: File "contrib/python/ydb/py3/ydb/_utilities.py", line 173, in __next__ return self._next() File "contrib/python/ydb/py3/ydb/_utilities.py", line 164, in _next res = self.wrapper(next(self.it)) File "contrib/python/grpcio/py3/grpc/_channel.py", line 475, in __next__ return self._next() File "contrib/python/grpcio/py3/grpc/_channel.py", line 872, in _next _common.wait(self._state.condition.wait, _response_ready) File "contrib/python/grpcio/py3/grpc/_common.py", line 150, in wait _wait_once(wait_fn, MAXIMUM_WAIT_TIMEOUT, spin_cb) File "contrib/python/grpcio/py3/grpc/_common.py", line 112, in _wait_once wait_fn(timeout=timeout) File "contrib/tools/python3/Lib/threading.py", line 359, in wait gotit = waiter.acquire(True, timeout) File "library/python/pytest/plugins/ya.py", line 344, in _graceful_shutdown traceback.print_stack(file=sys.stderr) Thread 0x00007f9e71c8f640 (most recent call first): File "ydb/tests/library/common/wait_for.py", line 19 in wait_for File "ydb/tests/library/harness/daemon.py", line 198 in stop File "ydb/tests/library/harness/kikimr_runner.py", line 261 in stop File "ydb/tests/library/harness/kikimr_runner.py", line 574 in __stop_node File "ydb/tests/library/harness/kikimr_runner.py", line 588 in stop_node File "contrib/tools/python3/Lib/threading.py", line 1012 in run File "contrib/tools/python3/Lib/threading.py", line 1075 in _bootstrap_inner File "contrib/tools/python3/Lib/threading.py", line 1032 in _bootstrap Current thread 0x00007f9ea2129940 (most recent call first): File "contrib/tools/python3/Lib/threading.py", line 1169 in _wait_for_tstate_lock File "contrib/tools/python3/Lib/threading.py", line 1149 in join File "ydb/tests/library/harness/kikimr_runner.py", line 599 in stop File "ydb/tests/sql/lib/test_base.py", line 75 in teardown_class File "contrib/python/pytest/py3/_pytest/python.py", line 764 in _call_with_optional_argument File "contrib/python/pytest/py3/_pytest/python.py", line 847 in xunit_setup_class_fixture File "contrib/python/pytest/py3/_pytest/fixtures.py", line 926 in _teardown_yield_fixture File "contrib/python/pytest/py3/_pytest/fixtures.py", line 1042 in finish File "contrib/python/pytest/py3/_pytest/runner.py", line 543 in teardown_exact File "contrib/python/pytest/py3/_pytest/runner.py", line 109 in pytest_sessionfinish File "contrib/python/pluggy/py3/pluggy/_callers.py", line 121 in _multicall File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120 in _hookexec File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 512 in __call__ File "contrib/python/pytest/py3/_pytest/main.py", line 308 in wrap_session File "contrib/python/pytest/py3/_pytest/main.py", line 320 in pytest_cmdline_main File "contrib/python/pluggy/py3/pluggy/_callers.py", line 121 in _multicall File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120 in _hookexec File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 512 in __call__ File "contrib/python/pytest/py3/_pytest/config/__init__.py", line 175 in main File "library/python/pytest/main.py", line 101 in main Traceback (most recent call last): File "library/python/testing/yatest_common/yatest/common/process.py", line 384, in wait wait_for( File "library/python/testing/yatest_common/yatest/common/process.py", line 765, in wait_for raise TimeoutError(truncate(message, MAX_MESSAGE_LEN)) yatest.common.process.TimeoutError: ...ary.python.pytest:pytest.yatest.ini', '-p', 'no:factor', '--doctest-modules', '--ya-trace', '/home/runner/.ya/build/build_root/yft8/000aa2/ydb/tests/datashard/secondary_index/test-results/py3test/testing_out_stuff/chunk16/ytest.report.trace', '--build-root', '/home/runner/.ya/build/build_root/yft8/000aa2', '--source-root', '/home/runner/.ya/build/build_root/yft8/000aa2/environment/arcadia', '--output-dir', '/home/runner/.ya/build/build_root/yft8/000aa2/ydb/tests/datashard/secondary_index/test-results/py3test/testing_out_stuff/chunk16/testing_out_stuff', '--durations', '0', '--project-path', 'ydb/tests/datashard/secondary_index', '--test-tool-bin', '/home/runner/.ya/tools/v4/9029509511/test_tool', '--ya-version', '2', '--collect-cores', '--sanitizer-extra-checks', '--build-type', 'release', '--tb', 'short', '--modulo', '28', '--modulo-index', '16', '--partition-mode', 'SEQUENTIAL', '--split-by-tests', '--dep-root', 'ydb/tests/datashard/secondary_index', '--flags', 'APPLE_SDK_LOCAL=yes', '--flags', 'CFLAGS=-fno-omit-frame-pointer -Wno-unknown-argument', '--flags', 'DEBUGINFO_LINES_ONLY=yes', '--flags', 'DISABLE_FLAKE8_MIGRATIONS=yes', '--flags', 'OPENSOURCE=yes', '--flags', 'SANITIZER_TYPE=address', '--flags', 'TESTS_REQUESTED=yes', '--flags', 'USE_AIO=static', '--flags', 'USE_CLANG_CL=yes', '--flags', 'USE_EAT_MY_DATA=yes', '--flags', 'USE_ICONV=static', '--flags', 'USE_IDN=static', '--flags', 'USE_PREBUILT_TOOLS=no', '--sanitize', 'address']' stopped by 600 seconds timeout During handling of the above exception, another exception occurred: Traceback (most recent call last): File "devtools/ya/test/programs/test_tool/run_test/run_test.py", line 1738, in main res.wait(check_exit_code=False, timeout=run_timeout, on_timeout=timeout_callback) File "library/python/testing/yatest_common/yatest/common/process.py", line 398, in wait raise ExecutionTimeoutError(self, str(e)) yatest.common.process.ExecutionTimeoutError: (("...ary.python.pytest:pytest.yatest.ini', '-p', 'no:factor', '--doctest-modules', '--ya-trace', '/home/runner/.ya/build/build_root/yft8/000aa2/ydb/tests/datashard/secondary_index/test-results/py3test/testing_out_stuff/chunk16/ytest.report.trace', '--build-root', '/home/runner/.ya/build/build_root/yft8/000aa2', '--source-root', '/home/runner/.ya/build/build_root/yft8/000aa2/environment/arcadia', '--output-dir', '/home/runner/.ya/build/build_root/yft8/000aa2/ydb/tests/datashard/secondary_index/test-results/py3test/testing_out_stuff/chunk16/testing_out_stuff', '--durations', '0', '--project-path', 'ydb/tests/datashard/secondary_index', '--test-tool-bin', '/home/runner/.ya/tools/v4/9029509511/test_tool', '--ya-version', '2', '--collect-cores', '--sanitizer-extra-checks', '--build-type', 'release', '--tb', 'short', '--modulo', '28', '--modulo-index', '16', '--partition-mode', 'SEQUENTIAL', '--split-by-tests', '--dep-root', 'ydb/tests/datashard/secondary_index', '--flags', 'APPLE_SDK_LOCAL=yes', '--flags', 'CFLAGS=-fno-omit-frame-pointer -Wno-unknown-argument', '--flags', 'DEBUGINFO_LINES_ONLY=yes', '--flags', 'DISABLE_FLAKE8_MIGRATIONS=yes', '--flags', 'OPENSOURCE=yes', '--flags', 'SANITIZER_TYPE=address', '--flags', 'TESTS_REQUESTED=yes', '--flags', 'USE_AIO=static', '--flags', 'USE_CLANG_CL=yes', '--flags', 'USE_EAT_MY_DATA=yes', '--flags', 'USE_ICONV=static', '--flags', 'USE_IDN=static', '--flags', 'USE_PREBUILT_TOOLS=no', '--sanitize', 'address']' stopped by 600 seconds timeout",), {}) 2025-06-25 14:36:28,886 WARNING library.python.cores: Core dump dir doesn't exist: /coredumps 2025-06-25 14:36:28,886 WARNING library.python.cores: Core dump dir doesn't exist: /var/tmp/cores >> TConsoleTests::TestRemoveSharedTenantWithServerlessTenants >> TMiniKQLEngineFlatTest::TestSelectRangeReverseWithPartitionsTruncatedByItems1 [GOOD] >> TMiniKQLEngineFlatTest::TestSelectRangeReverseWithPartitionsTruncatedByItems2 >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-CreateUser-23 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-CreateUser-24 >> TMiniKQLEngineFlatTest::TestSelectRangeReverseWithPartitionsTruncatedByItems2 [GOOD] >> TMiniKQLEngineFlatTest::TestSelectRangeReverseWithPartitionsTruncatedByItems3 >> TMiniKQLEngineFlatTest::TestSelectRangeReverseWithPartitionsTruncatedByItems3 [GOOD] >> TMiniKQLEngineFlatTest::TestSelectRangeNoColumns >> TMiniKQLEngineFlatTest::TestSelectRangeNoColumns [GOOD] >> TStorageTenantTest::CreateTableOutsideDatabaseFailToStartTabletsButDropIsOk [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_incremental_backup/unittest >> IncrementalBackup::SimpleRestoreBackupCollection-WithIncremental [GOOD] Test command err: 2025-06-25T14:36:12.910314Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:36:12.910449Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:36:12.910520Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000972/r3tmp/tmpbXJFz5/pdisk_1.dat 2025-06-25T14:36:13.372596Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T14:36:13.373771Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877761, Sender [1:557:2482], Recipient [1:373:2367]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:36:13.373854Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5052: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T14:36:13.373889Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5837: Pipe server connected, at tablet: 72057594046644480 2025-06-25T14:36:13.374029Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271122432, Sender [1:554:2480], Recipient [1:373:2367]: {TEvModifySchemeTransaction txid# 1 TabletId# 72057594046644480} 2025-06-25T14:36:13.374074Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4966: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-06-25T14:36:13.539272Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "Root" StoragePools { Name: "/Root:test" Kind: "test" } } } TxId: 1 TabletId: 72057594046644480 , at schemeshard: 72057594046644480 2025-06-25T14:36:13.539492Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //Root, opId: 1:0, at schemeshard: 72057594046644480 2025-06-25T14:36:13.539694Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 0 2025-06-25T14:36:13.539781Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046644480, LocalPathId: 1] source path: 2025-06-25T14:36:13.540026Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-25T14:36:13.540105Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:36:13.540204Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-25T14:36:13.541231Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046644480 PathId: 1, at schemeshard: 72057594046644480 2025-06-25T14:36:13.541422Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //Root 2025-06-25T14:36:13.541476Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:36:13.541510Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:276: Activate send for 1:0 2025-06-25T14:36:13.541709Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 2146435072, Sender [1:373:2367], Recipient [1:373:2367]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-25T14:36:13.541748Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4972: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-25T14:36:13.541828Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046644480 2025-06-25T14:36:13.541873Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046644480 2025-06-25T14:36:13.541961Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:36:13.541998Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:36:13.542093Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-25T14:36:13.542582Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:36:13.542676Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:276: Activate send for 1:0 2025-06-25T14:36:13.542816Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 2146435072, Sender [1:373:2367], Recipient [1:373:2367]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-25T14:36:13.542849Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4972: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-25T14:36:13.542898Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046644480 2025-06-25T14:36:13.542939Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046644480 2025-06-25T14:36:13.542977Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:36:13.543073Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-25T14:36:13.543462Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:36:13.543491Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:276: Activate send for 1:0 2025-06-25T14:36:13.543610Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 2146435072, Sender [1:373:2367], Recipient [1:373:2367]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-25T14:36:13.543645Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4972: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-25T14:36:13.543687Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046644480 2025-06-25T14:36:13.543714Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046644480 2025-06-25T14:36:13.543753Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046644480 2025-06-25T14:36:13.543780Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-25T14:36:13.543826Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:36:13.547266Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046644480 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:36:13.547770Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:36:13.547845Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046644480 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:36:13.548024Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 2025-06-25T14:36:13.549569Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877760, Sender [1:562:2487], Recipient [1:373:2367]: NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594046316545 Status: OK ServerId: [1:564:2488] Leader: 1 Dead: 0 Generation: 2 VersionInfo: } 2025-06-25T14:36:13.549624Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5050: StateWork, processing event TEvTabletPipe::TEvClientConnected 2025-06-25T14:36:13.549689Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5787: Handle TEvClientConnected, tabletId: 72057594046316545, status: OK, at schemeshard: 72057594046644480 2025-06-25T14:36:13.549824Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269091328, Sender [1:369:2363], Recipient [1:373:2367]: NKikimrTx.TEvProposeTransactionStatus Status: 16 StepId: 500 TxId: 1 2025-06-25T14:36:13.550183Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877761, Sender [1:566:2490], Recipient [1:373:2367]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:36:13.550227Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5052: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T14:36:13.550266Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5837: Pipe server connected, at tablet: 72057594046644480 2025-06-25T14:36:13.550431Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124996, Sender [1:554:2480], Recipient [1:373:2367]: NKikimrScheme.TEvNotifyTxCompletion TxId: 1 2025-06-25T14:36:13.550467Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvSchemeShard::TEvNotifyTxCompletion 2025-06-25T14:36:13.550526Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 1, at schemeshard: 72057594046644480 2025-06-25T14:36:13.550577Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 1, ready parts: 0/1, is published: true 2025-06-25T14:36:13.550618Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 1, at schemeshard: 72057594046644480 2025-06-25T14:36:13.603518Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 273285138, Sender [1:45:2092], Recipient [1:373:2367]: ... _TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5052: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T14:36:23.776515Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5837: Pipe server connected, at tablet: 72057594046644480 2025-06-25T14:36:23.776712Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269551620, Sender [2:697:2569], Recipient [2:373:2367]: NKikimrTxDataShard.TEvSchemaChanged Source { RawX1: 697 RawX2: 8589937161 } Origin: 72075186224037888 State: 2 TxId: 281474976715662 Step: 0 Generation: 1 2025-06-25T14:36:23.776747Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4987: StateWork, processing event TEvDataShard::TEvSchemaChanged 2025-06-25T14:36:23.776838Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5596: Handle TEvSchemaChanged, tabletId: 72057594046644480, at schemeshard: 72057594046644480, message: Source { RawX1: 697 RawX2: 8589937161 } Origin: 72075186224037888 State: 2 TxId: 281474976715662 Step: 0 Generation: 1 2025-06-25T14:36:23.776882Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1791: TOperation FindRelatedPartByTabletId, TxId: 281474976715662, tablet: 72075186224037888, partId: 0 2025-06-25T14:36:23.777029Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:632: TTxOperationReply execute, operationId: 281474976715662:0, at schemeshard: 72057594046644480, message: Source { RawX1: 697 RawX2: 8589937161 } Origin: 72075186224037888 State: 2 TxId: 281474976715662 Step: 0 Generation: 1 2025-06-25T14:36:23.777079Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1047: NTableState::TProposedWaitParts operationId# 281474976715662:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046644480 2025-06-25T14:36:23.777169Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1051: NTableState::TProposedWaitParts operationId# 281474976715662:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046644480 message: Source { RawX1: 697 RawX2: 8589937161 } Origin: 72075186224037888 State: 2 TxId: 281474976715662 Step: 0 Generation: 1 2025-06-25T14:36:23.777234Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:670: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 281474976715662:0, shardIdx: 72057594046644480:1, shard: 72075186224037888, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046644480 2025-06-25T14:36:23.777276Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:707: all shard schema changes has been received, operationId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-25T14:36:23.777329Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:719: send schema changes ack message, operation: 281474976715662:0, datashard: 72075186224037889, at schemeshard: 72057594046644480 2025-06-25T14:36:23.777376Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:719: send schema changes ack message, operation: 281474976715662:0, datashard: 72075186224037888, at schemeshard: 72057594046644480 2025-06-25T14:36:23.777445Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 281474976715662:0 129 -> 240 2025-06-25T14:36:23.777585Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-25T14:36:23.778142Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-25T14:36:23.778192Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:36:23.778238Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:276: Activate send for 281474976715662:0 2025-06-25T14:36:23.778341Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:632: Send to actor: [2:907:2705] msg type: 269552132 msg: NKikimrTxDataShard.TEvSchemaChangedResult TxId: 281474976715662 at schemeshard: 72057594046644480 2025-06-25T14:36:23.778410Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:632: Send to actor: [2:697:2569] msg type: 269552132 msg: NKikimrTxDataShard.TEvSchemaChangedResult TxId: 281474976715662 at schemeshard: 72057594046644480 2025-06-25T14:36:23.778534Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715662 datashard 72075186224037888 state Ready 2025-06-25T14:36:23.778628Z node 2 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-25T14:36:23.778824Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715662 datashard 72075186224037889 state Ready 2025-06-25T14:36:23.778870Z node 2 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037889 Got TEvSchemaChangedResult from SS at 72075186224037889 2025-06-25T14:36:23.779037Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 2146435072, Sender [2:373:2367], Recipient [2:373:2367]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-25T14:36:23.779076Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4972: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-25T14:36:23.779126Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-25T14:36:23.779178Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_states.h:93: TCopyTable::TWaitCopyTableBarrier operationId: 281474976715662:0ProgressState, operation type TxCopyTable 2025-06-25T14:36:23.779261Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-25T14:36:23.779309Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:1061: Set barrier, OperationId: 281474976715662:0, name: CopyTableBarrier, done: 0, blocked: 1, parts count: 1 2025-06-25T14:36:23.779362Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:1105: All parts have reached barrier, tx: 281474976715662, done: 0, blocked: 1 2025-06-25T14:36:23.779458Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_states.h:76: TCopyTable::TWaitCopyTableBarrier operationId: 281474976715662:0 HandleReply TEvPrivate::TEvCompleteBarrier, msg: NKikimr::NSchemeShard::TEvPrivate::TEvCompleteBarrier { TxId: 281474976715662 Name: CopyTableBarrier }, at tablet# 72057594046644480 2025-06-25T14:36:23.779507Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 281474976715662:0 240 -> 240 2025-06-25T14:36:23.780098Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:36:23.780140Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:276: Activate send for 281474976715662:0 2025-06-25T14:36:23.780252Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 2146435072, Sender [2:373:2367], Recipient [2:373:2367]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-25T14:36:23.780866Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4972: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-25T14:36:23.780937Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-25T14:36:23.780987Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046644480] TDone opId# 281474976715662:0 ProgressState 2025-06-25T14:36:23.781134Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-25T14:36:23.781176Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976715662:0 progress is 1/1 2025-06-25T14:36:23.781219Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976715662 ready parts: 1/1 2025-06-25T14:36:23.781273Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976715662:0 progress is 1/1 2025-06-25T14:36:23.781316Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976715662 ready parts: 1/1 2025-06-25T14:36:23.781365Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 281474976715662, ready parts: 1/1, is published: true 2025-06-25T14:36:23.781451Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1656: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [2:887:2689] message: TxId: 281474976715662 2025-06-25T14:36:23.781514Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976715662 ready parts: 1/1 2025-06-25T14:36:23.781569Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976715662:0 2025-06-25T14:36:23.781607Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 281474976715662:0 2025-06-25T14:36:23.781771Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 11] was 3 2025-06-25T14:36:23.781818Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate source path for pathId [OwnerId: 72057594046644480, LocalPathId: 6] was 3 2025-06-25T14:36:23.782297Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:36:23.782382Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:632: Send to actor: [2:887:2689] msg type: 271124998 msg: NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976715662 at schemeshard: 72057594046644480 2025-06-25T14:36:23.782764Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877764, Sender [2:895:2696], Recipient [2:373:2367]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-25T14:36:23.782801Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5053: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-25T14:36:23.782832Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5885: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-25T14:36:23.934645Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [2:985:2764], serverId# [2:986:2765], sessionId# [0:0:0] 2025-06-25T14:36:23.934808Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715663. Ctx: { TraceId: 01jykr9fbefs9tbyemm1ef8m3c, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MmZmNjEyNDItODc5OWZjMWQtOGNlOWM2NS05NDdhOGNkOQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root { items { uint32_value: 1 } items { uint32_value: 10 } }, { items { uint32_value: 2 } items { uint32_value: 20 } }, { items { uint32_value: 3 } items { uint32_value: 30 } }, { items { uint32_value: 4 } items { uint32_value: 40 } }, { items { uint32_value: 5 } items { uint32_value: 50 } } >> LdapAuthProviderTest::LdapRequestWithEmptyBindDn [GOOD] >> LdapAuthProviderTest::LdapRequestWithEmptyBindPassword |80.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/engine/ut/unittest >> TMiniKQLEngineFlatTest::NoMapPushdownArgClosure [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/idx_test/unittest >> YdbIndexTable::MultiShardTableOneIndexIndexOverlap [GOOD] Test command err: Trying to start YDB, gRPC: 17543, MsgBus: 11139 2025-06-25T14:33:37.251942Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519895105233708205:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:33:37.260838Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001654/r3tmp/tmpMk7Ih9/pdisk_1.dat 2025-06-25T14:33:37.792667Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:33:37.792766Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:33:37.794107Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:33:37.797955Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:33:37.803956Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519895105233708172:2080] 1750862017239337 != 1750862017239340 TServer::EnableGrpc on GrpcPort 17543, node 1 2025-06-25T14:33:37.952956Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:33:37.952997Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:33:37.953016Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:33:37.953126Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11139 2025-06-25T14:33:38.250448Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:11139 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:33:38.930584Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:33:38.974884Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:33:39.170738Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:33:39.351986Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:33:39.429586Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:33:42.206089Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519895126708546280:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:33:42.206193Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:33:42.252529Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519895105233708205:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:33:42.252613Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:33:42.485097Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:33:42.542053Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:33:42.582275Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:33:42.642563Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:33:42.699541Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:33:42.743537Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:33:42.800150Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:33:42.876771Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519895126708546944:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:33:42.876819Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:33:42.880667Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519895126708546949:2436], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:33:42.884205Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:33:42.896718Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519895126708546951:2437], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:33:43.004041Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519895126708547002:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:33:44.218522Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:33:45.011964Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710673. Ctx: { ... jNhYmItYTk2Zjg5NDUtM2Q0OWVkOWItNzEwNDliOGM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:36:23.195013Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719317. Ctx: { TraceId: 01jykr9eqwdaac8et8a56j1gzw, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=ZDIyNDM0NjYtMTFmYjU5N2EtNzhjYTE2OGQtYjIwNTYyZjM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:36:23.242727Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719318. Ctx: { TraceId: 01jykr9es9evka6mdzw5px156e, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=N2UxYjkyOWMtOGQ3N2ExZGMtYjM5NDJkNy0yZjIwNjJlNg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:36:23.244168Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719319. Ctx: { TraceId: 01jykr9es9dhq9a1mh7k9k57wz, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NDNmMTRkOGEtYzA5YTBjZGEtZGQxNDUxNmItOTNmODJlOTE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:36:23.244800Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719320. Ctx: { TraceId: 01jykr9es98tqnbdyhembcrf0y, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=MmI5ZmQ5OTYtZTIyOGI4YS1jNDNjZjJhZi1jZDFjZjc0NQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:36:23.264118Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719321. Ctx: { TraceId: 01jykr9etvbv6r71g2evcy67ns, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=MWEwYzRkZi0yM2FkM2M5Yy0zMTUyYTljNC03ZDdkZTA2Mw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:36:23.268965Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719322. Ctx: { TraceId: 01jykr9etvbv6r71g2evcy67ns, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=MWEwYzRkZi0yM2FkM2M5Yy0zMTUyYTljNC03ZDdkZTA2Mw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:36:23.276399Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719323. Ctx: { TraceId: 01jykr9ev4bjvqzxa3xrp2ffab, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=ZDBiNjNhYmItYTk2Zjg5NDUtM2Q0OWVkOWItNzEwNDliOGM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:36:23.276427Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719324. Ctx: { TraceId: 01jykr9ev53kgb263qj274teks, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=N2UxYjkyOWMtOGQ3N2ExZGMtYjM5NDJkNy0yZjIwNjJlNg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:36:23.277491Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719325. Ctx: { TraceId: 01jykr9ev4erwpxepv3bj9qmeh, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=ZDIyNDM0NjYtMTFmYjU5N2EtNzhjYTE2OGQtYjIwNTYyZjM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:36:23.286262Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719327. Ctx: { TraceId: 01jykr9ev53kgb263qj274teks, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=N2UxYjkyOWMtOGQ3N2ExZGMtYjM5NDJkNy0yZjIwNjJlNg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:36:23.286325Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719326. Ctx: { TraceId: 01jykr9ev4bjvqzxa3xrp2ffab, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=ZDBiNjNhYmItYTk2Zjg5NDUtM2Q0OWVkOWItNzEwNDliOGM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:36:23.287102Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719328. Ctx: { TraceId: 01jykr9ev4erwpxepv3bj9qmeh, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=ZDIyNDM0NjYtMTFmYjU5N2EtNzhjYTE2OGQtYjIwNTYyZjM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:36:23.299021Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719329. Ctx: { TraceId: 01jykr9evn0p5wxy35g3e2jj4p, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=MmI5ZmQ5OTYtZTIyOGI4YS1jNDNjZjJhZi1jZDFjZjc0NQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:36:23.309986Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719330. Ctx: { TraceId: 01jykr9ewabn6zb26c8b2xzpqx, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NDNmMTRkOGEtYzA5YTBjZGEtZGQxNDUxNmItOTNmODJlOTE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:36:23.324299Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719331. Ctx: { TraceId: 01jykr9ewhab9fw3d9khmtbgqy, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=MWEwYzRkZi0yM2FkM2M5Yy0zMTUyYTljNC03ZDdkZTA2Mw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:36:23.325171Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719332. Ctx: { TraceId: 01jykr9ewjeg70vsh6w7663r5f, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NmIxYWEyNzgtZjg3YjMwMGQtODZmZjA5MGYtNmI1MzdkNWU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:36:23.331085Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719333. Ctx: { TraceId: 01jykr9ewjeg70vsh6w7663r5f, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NmIxYWEyNzgtZjg3YjMwMGQtODZmZjA5MGYtNmI1MzdkNWU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:36:23.332413Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719334. Ctx: { TraceId: 01jykr9ewhab9fw3d9khmtbgqy, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=MWEwYzRkZi0yM2FkM2M5Yy0zMTUyYTljNC03ZDdkZTA2Mw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:36:23.333739Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719335. Ctx: { TraceId: 01jykr9ewjeg70vsh6w7663r5f, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NmIxYWEyNzgtZjg3YjMwMGQtODZmZjA5MGYtNmI1MzdkNWU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:36:23.343957Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719336. Ctx: { TraceId: 01jykr9ewy16xqft2qkg8s79ff, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=N2UxYjkyOWMtOGQ3N2ExZGMtYjM5NDJkNy0yZjIwNjJlNg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:36:23.359488Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719337. Ctx: { TraceId: 01jykr9exge6jwv6cc378f9rg6, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=ZDBiNjNhYmItYTk2Zjg5NDUtM2Q0OWVkOWItNzEwNDliOGM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:36:23.363819Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719338. Ctx: { TraceId: 01jykr9exm91bsczt2z22qfq6h, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=ZDIyNDM0NjYtMTFmYjU5N2EtNzhjYTE2OGQtYjIwNTYyZjM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:36:23.376596Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719339. Ctx: { TraceId: 01jykr9exm91bsczt2z22qfq6h, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=ZDIyNDM0NjYtMTFmYjU5N2EtNzhjYTE2OGQtYjIwNTYyZjM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:36:23.378433Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719340. Ctx: { TraceId: 01jykr9exge6jwv6cc378f9rg6, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=ZDBiNjNhYmItYTk2Zjg5NDUtM2Q0OWVkOWItNzEwNDliOGM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:36:23.379693Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719341. Ctx: { TraceId: 01jykr9exm91bsczt2z22qfq6h, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=ZDIyNDM0NjYtMTFmYjU5N2EtNzhjYTE2OGQtYjIwNTYyZjM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:36:23.381455Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719342. Ctx: { TraceId: 01jykr9exge6jwv6cc378f9rg6, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=ZDBiNjNhYmItYTk2Zjg5NDUtM2Q0OWVkOWItNzEwNDliOGM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:36:23.386361Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719343. Ctx: { TraceId: 01jykr9eyj3t6h4fckppf3b96v, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NDNmMTRkOGEtYzA5YTBjZGEtZGQxNDUxNmItOTNmODJlOTE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:36:23.389853Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719344. Ctx: { TraceId: 01jykr9eyq56kchf9r06kxzv8g, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=MWEwYzRkZi0yM2FkM2M5Yy0zMTUyYTljNC03ZDdkZTA2Mw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:36:23.391688Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719345. Ctx: { TraceId: 01jykr9eyj3t6h4fckppf3b96v, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NDNmMTRkOGEtYzA5YTBjZGEtZGQxNDUxNmItOTNmODJlOTE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:36:23.395840Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719346. Ctx: { TraceId: 01jykr9eypf2sx0esqbkkytszr, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NmIxYWEyNzgtZjg3YjMwMGQtODZmZjA5MGYtNmI1MzdkNWU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root finished with status: SUCCESS finished with status: SUCCESS 2025-06-25T14:36:23.398475Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719347. Ctx: { TraceId: 01jykr9eyj3t6h4fckppf3b96v, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NDNmMTRkOGEtYzA5YTBjZGEtZGQxNDUxNmItOTNmODJlOTE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root finished with status: SUCCESS 2025-06-25T14:36:23.424117Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719348. Ctx: { TraceId: 01jykr9ezgffsakw8qqyj9z94z, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=N2UxYjkyOWMtOGQ3N2ExZGMtYjM5NDJkNy0yZjIwNjJlNg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:36:23.431473Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719349. Ctx: { TraceId: 01jykr9ezgffsakw8qqyj9z94z, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=N2UxYjkyOWMtOGQ3N2ExZGMtYjM5NDJkNy0yZjIwNjJlNg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root finished with status: SUCCESS finished with status: SUCCESS |80.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_encrypted_storage/unittest >> TStorageTenantTest::CreateTableOutsideDatabaseFailToStartTabletsButDropIsOk [GOOD] >> DataShardWrite::DoubleWriteUncommittedThenDoubleReadWithCommit [GOOD] |80.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/engine/ut/unittest >> TMiniKQLEngineFlatTest::TestSelectRangeNoColumns [GOOD] >> TConsoleConfigSubscriptionTests::TestNotificationForRestartedServer [GOOD] >> TConsoleConfigSubscriptionTests::TestAddSubscriptionIdempotency >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-51 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-52 >> TConsoleTests::TestAlterStorageUnitsOfSharedTenant [GOOD] >> TConsoleTests::TestAlterServerlessTenant |81.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_encrypted_storage/unittest >> IncrementalBackup::E2EBackupCollection >> Viewer::JsonStorageListingV1 [GOOD] >> Viewer::JsonStorageListingV1GroupIdFilter >> DataShardReadTableSnapshots::ReadTableDropColumn >> DataShardReadTableSnapshots::ReadTableSplitBefore >> DataShardReadTableSnapshots::ReadTableSplitNewTxIdResolveResultReorder >> TConsoleTests::TestScaleRecommenderPoliciesValidation [GOOD] >> TConsoleTxProcessorTests::TestTxProcessorSingle >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-65 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-66 >> ResourcePoolsDdl::TestPoolSwitchToUnlimitedState ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_write/unittest >> DataShardWrite::DoubleWriteUncommittedThenDoubleReadWithCommit [GOOD] Test command err: 2025-06-25T14:35:20.955346Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:35:20.955471Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:35:20.955518Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001183/r3tmp/tmp3O0eEK/pdisk_1.dat 2025-06-25T14:35:21.258602Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T14:35:21.261738Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:35:21.320018Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:35:21.327168Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750862117717473 != 1750862117717477 2025-06-25T14:35:21.377266Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:35:21.377406Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:35:21.388864Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:35:21.476075Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:21.545201Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvBoot 2025-06-25T14:35:21.546241Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvRestored 2025-06-25T14:35:21.546669Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:627:2531] 2025-06-25T14:35:21.546890Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T14:35:21.589661Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-25T14:35:21.590395Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T14:35:21.590510Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T14:35:21.592045Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-25T14:35:21.592126Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-25T14:35:21.592176Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-25T14:35:21.593405Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T14:35:21.593562Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T14:35:21.593693Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:643:2531] in generation 1 2025-06-25T14:35:21.608857Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T14:35:21.630492Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-25T14:35:21.630729Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T14:35:21.630838Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:645:2541] 2025-06-25T14:35:21.630881Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T14:35:21.630922Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-25T14:35:21.630958Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:35:21.631153Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:627:2531], Recipient [1:627:2531]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T14:35:21.631193Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T14:35:21.631503Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-25T14:35:21.631581Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-25T14:35:21.631626Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:35:21.631661Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:35:21.631703Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-25T14:35:21.631748Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-25T14:35:21.631790Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-25T14:35:21.631833Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-25T14:35:21.631867Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:35:21.632212Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:634:2535], Recipient [1:627:2531]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:35:21.632246Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T14:35:21.633722Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:623:2528], serverId# [1:634:2535], sessionId# [0:0:0] 2025-06-25T14:35:21.633873Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:373:2367], Recipient [1:634:2535] 2025-06-25T14:35:21.633924Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-25T14:35:21.634038Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T14:35:21.634237Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-25T14:35:21.634287Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-25T14:35:21.634378Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-25T14:35:21.634419Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-25T14:35:21.634455Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-06-25T14:35:21.634487Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-06-25T14:35:21.634516Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-25T14:35:21.634783Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-06-25T14:35:21.634821Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit StoreSchemeTx 2025-06-25T14:35:21.634849Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit FinishPropose 2025-06-25T14:35:21.634893Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-25T14:35:21.634947Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayComplete 2025-06-25T14:35:21.634975Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit FinishPropose 2025-06-25T14:35:21.635004Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit WaitForPlan 2025-06-25T14:35:21.635041Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit WaitForPlan 2025-06-25T14:35:21.635062Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:281474976715657] at 72075186224037888 is not ready to execute on unit WaitForPlan 2025-06-25T14:35:21.636269Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269746185, Sender [1:646:2542], Recipient [1:627:2531]: NKikimr::TEvTxProxySchemeCache::TEvWatchNotifyUpdated 2025-06-25T14:35:21.636339Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T14:35:21.647064Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-25T14:35:21.647140Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-25T14:35:21.647175Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-25T14:35:21.647219Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715657 at tablet 72075186224037888 send to client, exec latency: 0 ms, propose late ... 888 on unit CompletedOperations 2025-06-25T14:36:28.927384Z node 9 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:6] at 72075186224037888 is Executed 2025-06-25T14:36:28.927408Z node 9 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:6] at 72075186224037888 executing on unit CompletedOperations 2025-06-25T14:36:28.927432Z node 9 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:6] at 72075186224037888 has finished 2025-06-25T14:36:28.927473Z node 9 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2687: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037888 2025-06-25T14:36:28.927542Z node 9 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2736: TTxReadViaPipeline(69) Complete: at tablet# 72075186224037888 2025-06-25T14:36:28.945209Z node 9 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553219, Sender [9:1571:2402], Recipient [9:1230:2353]: NKikimrTxDataShard.TEvReadCancel ReadId: 0 2025-06-25T14:36:28.945303Z node 9 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3409: 72075186224037888 ReadCancel: { ReadId: 0 } 2025-06-25T14:36:28.945930Z node 9 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553219, Sender [9:1573:2403], Recipient [9:1230:2353]: NKikimrTxDataShard.TEvReadCancel ReadId: 0 2025-06-25T14:36:28.945980Z node 9 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3409: 72075186224037888 ReadCancel: { ReadId: 0 } 2025-06-25T14:36:28.954192Z node 9 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 278003712, Sender [8:1556:2901], Recipient [9:1501:2397] 2025-06-25T14:36:28.954248Z node 9 :TX_DATASHARD TRACE: datashard__write.cpp:182: Handle TTxWrite: at tablet# 72075186224037888 2025-06-25T14:36:28.954426Z node 9 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435074, Sender [9:1230:2353], Recipient [9:1230:2353]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvDelayedProposeTransaction 2025-06-25T14:36:28.954466Z node 9 :TX_DATASHARD TRACE: datashard_impl.h:3159: StateWork, processing event TEvPrivate::TEvDelayedProposeTransaction 2025-06-25T14:36:28.954528Z node 9 :TX_DATASHARD TRACE: datashard__write.cpp:28: TTxWrite:: execute at tablet# 72075186224037888 2025-06-25T14:36:28.954690Z node 9 :TX_DATASHARD TRACE: datashard_write_operation.cpp:64: Parsing write transaction for 0 at 72075186224037888, record: TxMode: MODE_IMMEDIATE Locks { Locks { LockId: 281474976715661 DataShard: 72075186224037888 Generation: 1 Counter: 0 SchemeShard: 72057594046644480 PathId: 2 HasWrites: true } SendingShards: 72075186224037888 ReceivingShards: 72075186224037888 Op: Commit } 2025-06-25T14:36:28.954801Z node 9 :TX_DATASHARD TRACE: key_validator.cpp:33: -- AddReadRange: (Uint64 : 281474976715661, Uint64 : 72075186224037888, Uint64 : 72057594046644480, Uint64 : 2) table: [1:997:0] 2025-06-25T14:36:28.954940Z node 9 :TX_DATASHARD TRACE: key_validator.cpp:54: -- AddWriteRange: (Uint64 : 281474976715661, Uint64 : 72075186224037888, Uint64 : 72057594046644480, Uint64 : 2) table: [1:997:0] 2025-06-25T14:36:28.955036Z node 9 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:7] at 72075186224037888 on unit CheckWrite 2025-06-25T14:36:28.955084Z node 9 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:7] at 72075186224037888 is Executed 2025-06-25T14:36:28.955115Z node 9 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:7] at 72075186224037888 executing on unit CheckWrite 2025-06-25T14:36:28.955146Z node 9 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:7] at 72075186224037888 to execution unit BuildAndWaitDependencies 2025-06-25T14:36:28.955189Z node 9 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:7] at 72075186224037888 on unit BuildAndWaitDependencies 2025-06-25T14:36:28.955230Z node 9 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 72075186224037888 CompleteEdge# v1500/0 IncompleteEdge# v{min} UnprotectedReadEdge# v1500/18446744073709551615 ImmediateWriteEdge# v1500/18446744073709551615 ImmediateWriteEdgeReplied# v1500/18446744073709551615 2025-06-25T14:36:28.955287Z node 9 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:7] at 72075186224037888 2025-06-25T14:36:28.955319Z node 9 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:7] at 72075186224037888 is Executed 2025-06-25T14:36:28.955344Z node 9 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:7] at 72075186224037888 executing on unit BuildAndWaitDependencies 2025-06-25T14:36:28.955367Z node 9 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:7] at 72075186224037888 to execution unit ExecuteWrite 2025-06-25T14:36:28.955391Z node 9 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:7] at 72075186224037888 on unit ExecuteWrite 2025-06-25T14:36:28.955419Z node 9 :TX_DATASHARD DEBUG: execute_write_unit.cpp:251: Executing write operation for [0:7] at 72075186224037888 2025-06-25T14:36:28.955483Z node 9 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 72075186224037888 CompleteEdge# v1500/0 IncompleteEdge# v{min} UnprotectedReadEdge# v1500/18446744073709551615 ImmediateWriteEdge# v1500/18446744073709551615 ImmediateWriteEdgeReplied# v1500/18446744073709551615 2025-06-25T14:36:28.955613Z node 9 :TX_DATASHARD TRACE: datashard_kqp.cpp:806: KqpCommitLock LockId: 281474976715661 DataShard: 72075186224037888 Generation: 1 Counter: 0 SchemeShard: 72057594046644480 PathId: 2 HasWrites: true 2025-06-25T14:36:28.955674Z node 9 :TX_DATASHARD TRACE: datashard_user_db.cpp:435: Committing changes lockId# 281474976715661 in localTid# 1001 shard# 72075186224037888 2025-06-25T14:36:28.955775Z node 9 :TX_DATASHARD DEBUG: execute_write_unit.cpp:420: Skip empty write operation for [0:7] at 72075186224037888 2025-06-25T14:36:28.955962Z node 9 :TX_DATASHARD TRACE: execute_write_unit.cpp:47: add locks to result: 0 2025-06-25T14:36:28.956027Z node 9 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:7] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-25T14:36:28.956058Z node 9 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:7] at 72075186224037888 executing on unit ExecuteWrite 2025-06-25T14:36:28.956084Z node 9 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:7] at 72075186224037888 to execution unit FinishProposeWrite 2025-06-25T14:36:28.956116Z node 9 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:7] at 72075186224037888 on unit FinishProposeWrite 2025-06-25T14:36:28.956185Z node 9 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:7] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-06-25T14:36:28.956225Z node 9 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:7] at 72075186224037888 executing on unit FinishProposeWrite 2025-06-25T14:36:28.956277Z node 9 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:7] at 72075186224037888 to execution unit CompletedOperations 2025-06-25T14:36:28.956665Z node 9 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:7] at 72075186224037888 on unit CompletedOperations 2025-06-25T14:36:28.956752Z node 9 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:7] at 72075186224037888 is Executed 2025-06-25T14:36:28.956780Z node 9 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:7] at 72075186224037888 executing on unit CompletedOperations 2025-06-25T14:36:28.956811Z node 9 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:7] at 72075186224037888 has finished 2025-06-25T14:36:28.966328Z node 9 :TX_DATASHARD TRACE: datashard__write.cpp:150: TTxWrite complete: at tablet# 72075186224037888 2025-06-25T14:36:28.966389Z node 9 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:7] at 72075186224037888 on unit FinishProposeWrite 2025-06-25T14:36:28.966426Z node 9 :TX_DATASHARD TRACE: finish_propose_write_unit.cpp:163: Propose transaction complete txid 7 at tablet 72075186224037888 send to client, propose latency: 1 ms, status: STATUS_COMPLETED 2025-06-25T14:36:28.966502Z node 9 :TX_DATASHARD DEBUG: datashard.cpp:2560: Waiting for PlanStep# 1501 from mediator time cast 2025-06-25T14:36:28.966577Z node 9 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:36:28.968143Z node 9 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 270270977, Sender [9:61:2064], Recipient [9:1230:2353]: {TEvNotifyPlanStep TabletId# 72075186224037888 PlanStep# 1501} 2025-06-25T14:36:28.968202Z node 9 :TX_DATASHARD TRACE: datashard_impl.h:3172: StateWork, processing event TEvMediatorTimecast::TEvNotifyPlanStep 2025-06-25T14:36:28.968264Z node 9 :TX_DATASHARD DEBUG: datashard.cpp:3780: Notified by mediator time cast with PlanStep# 1501 at tablet 72075186224037888 2025-06-25T14:36:28.968350Z node 9 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 { items { int64_value: 0 } items { int64_value: 1000 } }, { items { int64_value: 1 } items { int64_value: 1001 } }, { items { int64_value: 2 } items { int64_value: 1002 } }, { items { int64_value: 3 } items { int64_value: 1003 } }, { items { int64_value: 4 } items { int64_value: 1004 } }, { items { int64_value: 5 } items { int64_value: 1005 } }, { items { int64_value: 6 } items { int64_value: 5001 } } { items { int64_value: 0 } items { int64_value: 2000 } }, { items { int64_value: 1 } items { int64_value: 2001 } }, { items { int64_value: 2 } items { int64_value: 2002 } }, { items { int64_value: 3 } items { int64_value: 2003 } }, { items { int64_value: 4 } items { int64_value: 2004 } }, { items { int64_value: 5 } items { int64_value: 2005 } }, { items { int64_value: 6 } items { int64_value: 5002 } } result_sets { columns { name: "index" type { optional_type { item { type_id: INT64 } } } } columns { name: "value" type { optional_type { item { type_id: INT64 } } } } rows { items { int64_value: 0 } items { int64_value: 1000 } } rows { items { int64_value: 1 } items { int64_value: 1001 } } rows { items { int64_value: 2 } items { int64_value: 1002 } } rows { items { int64_value: 3 } items { int64_value: 1003 } } rows { items { int64_value: 4 } items { int64_value: 1004 } } rows { items { int64_value: 5 } items { int64_value: 1005 } } rows { items { int64_value: 6 } items { int64_value: 5001 } } } result_sets { columns { name: "index" type { optional_type { item { type_id: INT64 } } } } columns { name: "value" type { optional_type { item { type_id: INT64 } } } } rows { items { int64_value: 0 } items { int64_value: 2000 } } rows { items { int64_value: 1 } items { int64_value: 2001 } } rows { items { int64_value: 2 } items { int64_value: 2002 } } rows { items { int64_value: 3 } items { int64_value: 2003 } } rows { items { int64_value: 4 } items { int64_value: 2004 } } rows { items { int64_value: 5 } items { int64_value: 2005 } } rows { items { int64_value: 6 } items { int64_value: 5002 } } } tx_meta { } >> KqpWorkloadService::WorkloadServiceDisabledByFeatureFlag |81.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_rtmr/unittest >> KqpWorkloadService::TestQueryCancelAfterPoolWithLimits >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-66 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-67 >> LdapAuthProviderTest_nonSecure::LdapFetchGroupsWithDefaultGroupAttributeGoodUseListOfHosts [GOOD] >> LdapAuthProviderTest_nonSecure::LdapFetchGroupsWithCustomGroupAttributeGood >> LdapAuthProviderTest_LdapsScheme::LdapFetchGroupsWithRemovedUserCredentialsBad [GOOD] >> LdapAuthProviderTest_LdapsScheme::LdapRefreshGroupsInfoGood |81.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_rtmr/unittest >> TConsoleTests::TestCreateSubSubDomainExtSubdomain [GOOD] >> TConsoleTests::TestAuthorization |81.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_auditsettings/ydb-core-tx-schemeshard-ut_auditsettings |81.0%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_auditsettings/ydb-core-tx-schemeshard-ut_auditsettings |81.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_auditsettings/ydb-core-tx-schemeshard-ut_auditsettings >> LdapAuthProviderTest::LdapRequestWithEmptyBindPassword [GOOD] >> LdapAuthProviderTest_LdapsScheme::LdapFetchGroupsDisableRequestToAD >> TExtSubDomainTest::CreateTableInsideThenStopTenantAndForceDeleteSubDomain-AlterDatabaseCreateHiveFirst-true [GOOD] >> ReadOnlyVDisk::TestGetWithMustRestoreFirst >> TConsoleConfigSubscriptionTests::TestAddSubscriptionIdempotency [GOOD] >> TConsoleConfigSubscriptionTests::TestConfigNotificationRetries >> HttpRequest::Status >> ViewerTopicDataTests::TopicDataTest [GOOD] >> ReadOnlyVDisk::TestDiscover >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-17 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-18 >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-DropUser-64 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-DropUser-65 >> TVersions::Wreck0 [GOOD] >> TVersions::Wreck0Reverse ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_ext_tenant/unittest >> TExtSubDomainTest::CreateTableInsideThenStopTenantAndForceDeleteSubDomain-AlterDatabaseCreateHiveFirst-true [GOOD] Test command err: 2025-06-25T14:36:02.742509Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519895729667851664:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:36:02.742561Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000a58/r3tmp/tmpw6pU0M/pdisk_1.dat 2025-06-25T14:36:03.743622Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:36:03.801283Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:36:03.803629Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:36:03.803719Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:36:03.835393Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:36:03.924447Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:27963 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-25T14:36:04.378911Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7519895729667851854:2118] Handle TEvNavigate describe path dc-1 2025-06-25T14:36:04.433891Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7519895738257786935:2448] HANDLE EvNavigateScheme dc-1 2025-06-25T14:36:04.434013Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7519895733962819179:2134], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:36:04.434088Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:887: [main][1:7519895733962819536:2372][/dc-1] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvSyncRequest: sender# [1:7519895733962819179:2134], cookie# 1 2025-06-25T14:36:04.441448Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519895733962819541:2372][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519895733962819538:2372], cookie# 1 2025-06-25T14:36:04.441504Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519895733962819542:2372][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519895733962819539:2372], cookie# 1 2025-06-25T14:36:04.441519Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519895733962819543:2372][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519895733962819540:2372], cookie# 1 2025-06-25T14:36:04.441549Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519895729667851565:2050] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519895733962819541:2372], cookie# 1 2025-06-25T14:36:04.441581Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519895729667851568:2053] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519895733962819542:2372], cookie# 1 2025-06-25T14:36:04.441600Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519895729667851571:2056] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519895733962819543:2372], cookie# 1 2025-06-25T14:36:04.441627Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519895733962819541:2372][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519895729667851565:2050], cookie# 1 2025-06-25T14:36:04.441650Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519895733962819542:2372][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519895729667851568:2053], cookie# 1 2025-06-25T14:36:04.441661Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519895733962819543:2372][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519895729667851571:2056], cookie# 1 2025-06-25T14:36:04.441689Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519895733962819536:2372][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519895733962819538:2372], cookie# 1 2025-06-25T14:36:04.441717Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519895733962819536:2372][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2025-06-25T14:36:04.441735Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519895733962819536:2372][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519895733962819539:2372], cookie# 1 2025-06-25T14:36:04.441746Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519895733962819536:2372][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 2, failures# 0 2025-06-25T14:36:04.441756Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519895733962819536:2372][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519895733962819540:2372], cookie# 1 2025-06-25T14:36:04.441779Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:984: [main][1:7519895733962819536:2372][/dc-1] Sync is done in the ring group: cookie# 1, ring group# 0, size# 3, half# 1, successes# 3, failures# 0, partial# 0 2025-06-25T14:36:04.441837Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [1:7519895733962819179:2134], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1 PathId: Partial: 0 } 2025-06-25T14:36:04.458432Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [1:7519895733962819179:2134], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1 PathId: Partial: 0 }, by path# { Subscriber: { Subscriber: [1:7519895733962819536:2372] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 1 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 0 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-25T14:36:04.458557Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [1:7519895733962819179:2134], cacheItem# { Subscriber: { Subscriber: [1:7519895733962819536:2372] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 1 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 0 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 1 IsSync: true Partial: 0 } 2025-06-25T14:36:04.465048Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [1:7519895738257786936:2449], recipient# [1:7519895738257786935:2448], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [72057594046644480:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-25T14:36:04.465147Z node 1 :TX_PROXY DEBUG: describe.cpp:356: Actor# [1:7519895738257786935:2448] HANDLE EvNavigateKeySetResult TDescribeReq marker# P5 ErrorCount# 0 2025-06-25T14:36:04.532204Z node 1 :TX_PROXY DEBUG: describe.cpp:435: Actor# [1:7519895738257786935:2448] SEND to# 72057594046644480 shardToRequest NKikimrSchemeOp.TDescribePath Path: "dc-1" Options { ShowPrivateTable: true } 2025-06-25T14:36:04.544779Z node 1 :TX_PROXY DEBUG: describe.cpp:448: Actor# [1:7519895738257786935:2448] Handle TEvDescribeSchemeResult Forward to# [1:7519895738257786933:2447] Cookie: 0 TEvDescribeSchemeResult: NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 2 Record# Status: StatusSuccess Path: "dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/dc-1" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046644480 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 Pa... (TRUNCATED) WaitRootIsUp 'dc-1' suc ... X_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7519895853414306252:3521], recipient# [3:7519895853414306250:2311], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:36:31.353503Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7519895853414306253:3522], recipient# [3:7519895853414306251:2312], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:36:32.325728Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7519895806169663984:2130], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:36:32.325836Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7519895806169663984:2130], cacheItem# { Subscriber: { Subscriber: [3:7519895810464631915:2569] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:36:32.325900Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7519895857709273571:3526], recipient# [3:7519895857709273570:2313], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:36:32.360450Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7519895806169663984:2130], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:36:32.360584Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7519895806169663984:2130], cacheItem# { Subscriber: { Subscriber: [3:7519895827644501676:2985] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:36:32.360673Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7519895806169663984:2130], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:36:32.360745Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7519895806169663984:2130], cacheItem# { Subscriber: { Subscriber: [3:7519895810464631915:2569] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:36:32.360811Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7519895857709273574:3527], recipient# [3:7519895857709273572:2314], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:36:32.360867Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7519895857709273575:3528], recipient# [3:7519895857709273573:2315], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:36:33.328017Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7519895806169663984:2130], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:36:33.328170Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7519895806169663984:2130], cacheItem# { Subscriber: { Subscriber: [3:7519895810464631915:2569] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:36:33.328267Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7519895862004240890:3532], recipient# [3:7519895862004240889:2316], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:36:33.361769Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7519895806169663984:2130], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:36:33.361876Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7519895806169663984:2130], cacheItem# { Subscriber: { Subscriber: [3:7519895810464631915:2569] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:36:33.361918Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7519895806169663984:2130], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:36:33.361968Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7519895806169663984:2130], cacheItem# { Subscriber: { Subscriber: [3:7519895827644501676:2985] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:36:33.362018Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7519895862004240896:3533], recipient# [3:7519895862004240894:2317], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:36:33.362057Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7519895862004240897:3534], recipient# [3:7519895862004240895:2318], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } |81.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/client/minikql_compile/ut/ydb-core-client-minikql_compile-ut |81.0%| [LD] {RESULT} $(B)/ydb/core/client/minikql_compile/ut/ydb-core-client-minikql_compile-ut >> TConsoleTxProcessorTests::TestTxProcessorSingle [GOOD] >> TConsoleTxProcessorTests::TestTxProcessorSubProcessor >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-CreateUser-24 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-DropUser-49 |81.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/client/minikql_compile/ut/ydb-core-client-minikql_compile-ut >> TConsoleTests::TestRemoveSharedTenantWithServerlessTenants [GOOD] >> TConsoleTests::TestRemoveSharedTenantAfterRemoveServerlessTenant >> TKeyValueTest::TestWriteTrimWithRestartsThenResponseOk [GOOD] >> TKeyValueTest::TestWriteToExtraChannelThenReadMixedChannelsReturnsOkNewApi >> TConsoleTests::TestAlterServerlessTenant [GOOD] >> TConsoleTests::TestAttributes >> KqpWorkloadService::WorkloadServiceDisabledByFeatureFlag [GOOD] >> KqpWorkloadService::WorkloadServiceDisabledByFeatureFlagOnServerless |81.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_read_only_vdisk/unittest >> LdapAuthProviderTest_nonSecure::LdapFetchGroupsWithCustomGroupAttributeGood [GOOD] >> LdapAuthProviderTest_nonSecure::LdapFetchGroupsUseInvalidSearchFilterBad >> DataShardReadTableSnapshots::ReadTableSplitBefore [GOOD] >> DataShardReadTableSnapshots::ReadTableSplitFinished >> DataShardReadTableSnapshots::ReadTableSplitNewTxIdResolveResultReorder [GOOD] >> DataShardReadTableSnapshots::ReadTableUUID >> DataShardReadTableSnapshots::ReadTableDropColumn [GOOD] >> DataShardReadTableSnapshots::CorruptedDyNumber >> Viewer::ServerlessWithExclusiveNodesCheckTable [GOOD] >> LdapAuthProviderTest_LdapsScheme::LdapFetchGroupsDisableRequestToAD [GOOD] >> ReadOnlyVDisk::TestGetWithMustRestoreFirst [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-52 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-53 >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-66 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-67 >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-67 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-68 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_read_only_vdisk/unittest >> ReadOnlyVDisk::TestGetWithMustRestoreFirst [GOOD] Test command err: RandomSeed# 12545151986451263498 === Trying to put and get a blob === SEND TEvPut with key [1:1:0:0:0:131072:0] TEvPutResult: TEvPutResult {Id# [1:1:0:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} === Read all 1 blob(s) === SEND TEvGet with key [1:1:0:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:0:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} === Putting VDisk #0 to read-only === Setting VDisk read-only to 1 for position 0 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:0:0] === Write 10 blobs, expect some VDisks refuse parts but writes go through === SEND TEvPut with key [1:1:1:0:0:32768:0] 2025-06-25T14:36:36.959490Z 1 00h01m30.060512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5316:698] TEvPutResult: TEvPutResult {Id# [1:1:1:0:0:32768:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:2:0:0:131072:0] 2025-06-25T14:36:36.964766Z 1 00h01m30.060512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5316:698] TEvPutResult: TEvPutResult {Id# [1:1:2:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:3:0:0:32768:0] 2025-06-25T14:36:36.970465Z 1 00h01m30.060512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5316:698] TEvPutResult: TEvPutResult {Id# [1:1:3:0:0:32768:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:4:0:0:131072:0] 2025-06-25T14:36:36.975628Z 1 00h01m30.060512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5316:698] TEvPutResult: TEvPutResult {Id# [1:1:4:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:5:0:0:32768:0] TEvPutResult: TEvPutResult {Id# [1:1:5:0:0:32768:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:6:0:0:131072:0] TEvPutResult: TEvPutResult {Id# [1:1:6:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:7:0:0:32768:0] 2025-06-25T14:36:36.985684Z 1 00h01m30.060512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5316:698] TEvPutResult: TEvPutResult {Id# [1:1:7:0:0:32768:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:8:0:0:131072:0] 2025-06-25T14:36:36.989630Z 1 00h01m30.060512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5316:698] TEvPutResult: TEvPutResult {Id# [1:1:8:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:9:0:0:32768:0] 2025-06-25T14:36:36.996675Z 1 00h01m30.060512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5316:698] TEvPutResult: TEvPutResult {Id# [1:1:9:0:0:32768:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:10:0:0:131072:0] 2025-06-25T14:36:36.999818Z 1 00h01m30.060512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5316:698] TEvPutResult: TEvPutResult {Id# [1:1:10:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} === Read all 11 blob(s) === SEND TEvGet with key [1:1:0:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:0:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:1:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:1:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:2:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:2:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:3:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:3:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:4:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:4:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:5:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:5:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:6:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:6:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:7:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:7:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:8:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:8:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:9:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:9:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:10:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:10:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} === Put 2 more VDisks to read-only === Setting VDisk read-only to 1 for position 1 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:1:0] Setting VDisk read-only to 1 for position 2 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:2:0] === Write 10 more blobs, expect errors === SEND TEvPut with key [1:1:11:0:0:32768:0] 2025-06-25T14:36:39.971654Z 1 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5316:698] 2025-06-25T14:36:39.971822Z 3 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5330:712] 2025-06-25T14:36:39.971971Z 2 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5323:705] 2025-06-25T14:36:39.972963Z 1 00h05m30.160512s :BS_PROXY_PUT ERROR: [b240cbc7bf76ed12] Result# TEvPutResult {Id# [1:1:11:0:0:32768:0] Status# ERROR StatusFlags# { } ErrorReason# "TRestoreStrategy saw optimisticState# EBS_DISINTEGRATED GroupId# 2181038080 BlobId# [1:1:11:0:0:32768:0] Reported ErrorReasons# [ { OrderNumber# 0 VDiskId# [82000000:1:0:0:0] NodeId# 1 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 1 VDiskId# [82000000:1:0:1:0] NodeId# 2 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 2 VDiskId# [82000000:1:0:2:0] NodeId# 3 ErrorReasons# [ "VDisk is in read-only mode", ] } ] Part situations# [ { OrderNumber# 5 Situations# SUUUUU } { OrderNumber# 6 Situations# USUUUU } { OrderNumber# 7 Situations# UUSUUU } { OrderNumber# 0 Situations# UUUEUU } { OrderNumber# 1 Situations# UUUUEU } { OrderNumber# 2 Situations# UUUUUE } { OrderNumber# 3 Situations# UUUSUU } { OrderNumber# 4 Situations# UUUUUS } ] " ApproximateFreeSpaceShare# 0.999988} GroupId# 2181038080 Marker# BPP12 TEvPutResult: TEvPutResult {Id# [1:1:11:0:0:32768:0] Status# ERROR StatusFlags# { } ErrorReason# "TRestoreStrategy saw optimisticState# EBS_DISINTEGRATED GroupId# 2181038080 BlobId# [1:1:11:0:0:32768:0] Reported ErrorReasons# [ { OrderNumber# 0 VDiskId# [82000000:1:0:0:0] NodeId# 1 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 1 VDiskId# [82000000:1:0:1:0] NodeId# 2 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 2 VDiskId# [82000000:1:0:2:0] NodeId# 3 ErrorReasons# [ "VDisk is in read-only mode", ] } ] Part situations# [ { OrderNumber# 5 Situations# SUUUUU } { OrderNumber# 6 Situations# USUUUU } { OrderNumber# 7 Situations# UUSUUU } { OrderNumber# 0 Situations# UUUEUU } { OrderNumber# 1 Situations# UUUUEU } { OrderNumber# 2 Situations# UUUUUE } { OrderNumber# 3 Situations# UUUSUU } { OrderNumber# 4 Situations# UUUUUS } ] " ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:12:0:0:131072:0] 2025-06-25T14:36:39.974717Z 1 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5316:698] 2025-06-25T14:36:39.975111Z 2 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5323:705] 2025-06-25T14:36:39.976242Z 3 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5330:712] TEvPutResult: TEvPutResult {Id# [1:1:12:0:0:131072:0] Status# ERROR StatusFlags# { } ErrorReason# "TRestoreStrategy saw optimisticState# EBS_DISINTEGRATED GroupId# 2181038080 BlobId# [1:1:12:0:0:131072:0] Reported ErrorReasons# [ { OrderNumber# 0 VDiskId# [82000000:1:0:0:0] NodeId# 1 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 1 VDiskId# [82000000:1:0:1:0] NodeId# 2 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 2 VDiskId# [82000000:1:0:2:0] NodeId# 3 ErrorReasons# [ "VDisk is in read-only mode", ] } ] Part situations# [ { OrderNumber# 4 Situations# SUUUUU } { OrderNumber# 5 Situations# USUUUU } { OrderNumber# 6 Situations# UUSUUU } { OrderNumber# 7 Situations# UUUSUU } { OrderNumber# 0 Situations# UUUUEU } { OrderNumber# 1 Situations# UUUUUE } { OrderNumber# 2 Situations# UUUUEU } { OrderNumber# 3 Situations# UUUUUS } ] " ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:13:0:0:32768:0] 2025-06-25T14:36:39.978112Z 1 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5316:698] 2025-06-25T14:36:39.978814Z 2 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5323:705] 2025-06-25T14:36:39.979645Z 3 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5330:712] TEvPutResult: TEvPutResult {Id# [1:1:13:0:0:32768:0] Status# ERROR StatusFlags# { } ErrorReason# "TRestoreStrategy saw optimisticState# EBS_DISINTEGRATED GroupId# 2181038080 BlobId# [1:1:13:0:0:32768:0] Reported ErrorReasons# [ { OrderNumber# 0 VDiskId# [82000000:1:0:0:0] NodeId# 1 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 1 VDiskId# [82000000:1:0:1:0] NodeId# 2 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 2 VDiskId# [82000000:1:0:2:0] NodeId# 3 ErrorReasons# [ "VDisk is in read-only mode", ] } ] Part situations# [ { OrderNumber# 3 Situations# PUUUUU } { OrderNumber# 4 Situations# UPUUUU } { OrderNumber# 5 Situations# UUPUUU } { OrderNumber# 6 Situations# UUUPUU } { OrderNumber# 7 Situations# UUUUPU } { OrderNumber# 0 Situations# UUUUUE } { OrderNumber# 1 Situations# UUUUUE } { OrderNumber# 2 Situations# UUUUUE } ] " ApproximateFreeSpaceShare# 0.999963} SEND TEvPut with key [1:1:14:0:0:131072:0] 2025-06-25T14:36:39.980942Z 3 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5330:712] 2025-06-25T14:36:39.981991Z 1 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5316:698] 2025-06-25T14:36:39.982592Z 2 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5323:705] TEvPutResult: TEvPutResult {Id# [1:1:14:0:0:131072:0] Status# ERROR StatusFlags# { } ErrorReason# "TRestoreStrategy saw optimisticState# EBS_DISINTEGRATED GroupId# 2181038080 BlobId# [1:1:14:0:0:131072:0] Reported ErrorReasons# [ { OrderNumber# 2 VDiskId# [82000000:1:0:2:0] NodeId# 3 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 0 VDiskId# [82000000:1:0:0:0] NodeId# 1 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 1 VDiskId# [82000000:1:0:1:0] NodeId# 2 ErrorReasons# [ "VDisk is in read-only mode", ] } ] Part situations# [ { OrderNumber# 2 Situations# EUUUUU } { OrderNumber# 3 Situations# UPUUUU } { OrderNumber# 4 Situations# UUPUUU } { OrderNumber# 5 Situations# UUUPUU } { OrderNumber# 6 Situations# UUUUPU } { OrderNumber# 7 Situations# UUUUUP } { OrderNumber# 0 Situations# EUUUUU } { OrderNumber# 1 Situations# EUUUUU } ] " ApproximateFreeSpaceShare# 0.999963} SEND TEvPut with key [1:1:15:0:0:32768:0] 2025-06-25T14:36:39.983741Z 3 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5330:712] 2025-06-25T14:36:39.983829Z 2 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5323:705] 2025-06-25T14:36:39.984667Z 1 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5316:698] TEvPutResult: TEvPutResult {Id# [1:1:15:0:0:32768:0] Status# ERROR StatusFlags# { } ErrorReason# "TRestoreStrategy saw optimisticState# EBS_DISINTEGRATED GroupId# 2181038080 BlobId# [1:1:15:0:0:32768:0] Reported ErrorReasons# [ { OrderNumber# 1 VDiskId# [82000000:1:0:1:0] NodeId# 2 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 2 VDiskId# [82000000:1:0:2:0] NodeId# 3 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 0 VDiskId# [82000000:1:0:0:0] NodeId# 1 ErrorReasons# [ "VDisk is in read-only mode", ] } ] Part situations# [ { OrderNumber# 1 Situations# EUUUUU } { OrderNumber# 2 Situations# UEUUUU } { OrderNumber# 3 Situations# UUSUUU } { OrderNumber# 4 Situations# UUUSUU } { OrderNumber# 5 Situations# UUUUSU } { OrderNumber# 6 Situations# UUUUUS } { OrderNumber# 7 Situations# USUUUU } { OrderNumber# 0 Situations# EUUUUU } ] " ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:16:0:0:131072:0] 2025-06-25T14:36:39.986391Z 3 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5330:712] 2025-06-25T14:36:39.986475Z 2 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5323:705] 2025-06-25T14:36:39.987466Z 1 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5316:698] TEvPutResult: TEvPutResult {Id# [1:1:16:0:0:131072:0] Status# ERROR StatusFlags# { } ErrorReason# "TRestoreStrategy saw optimisticState# EBS_DISINTEGRATED GroupId# 2181038080 BlobId# [1:1:16:0:0:131072:0] Reported ErrorReasons# [ { OrderNumber# 1 VDiskId# [82000000:1:0:1:0] NodeId# 2 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 2 VDiskId# [82000000:1:0:2:0] NodeId# 3 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 0 VDiskId# [82000000:1:0:0:0] NodeId# 1 ErrorReasons# [ "VDisk is in read-only mode", ] } ] Part situations# [ { OrderNumber# 1 Situations# EUUUUU } { OrderNumber# 2 Situations# UEUUUU } { OrderNumber# 3 Situations# UUSUUU } { OrderNumber# 4 Situations# UUUSUU } { OrderNumber# 5 Situations# UUUUSU } { OrderNumber# 6 Situations# UUUUUS } { OrderNumber# 7 Situations# USUUUU } { OrderNumber# 0 Situations# EUUUUU } ] " ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:17:0:0:32768:0] 2025-06-25T14:36:39.989037Z 1 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5316:698] 2025-06-25T14:36:39.989269Z 3 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5330:712] 2025-06-25T14:36:39.989330Z 2 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5323:705] TEvPutResult: TEvPutResult {Id# [1:1:17:0:0:32768:0] Status# ERROR StatusFlags# { } ErrorReason# "TRestoreStrategy saw optimisticState# EBS_DISINTEGRATED GroupId# 2181038080 BlobId# [1:1:17:0:0:32768:0] Reported ErrorReasons# [ { OrderNumber# 0 VDiskId# [82000000:1:0:0:0] NodeId# 1 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 1 VDiskId# [82000000:1:0:1:0] NodeId# 2 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 2 VDiskId# [82000000:1:0:2:0] NodeId# 3 ErrorReasons# [ "VDisk is in read-only mode", ] } ] Part situations# [ { OrderNumber# 0 Situations# EUUUUU } { OrderNumber# 1 Situations# UEUUUU } { OrderNumber# 2 Situations# UUEUUU } { OrderNumber# 3 Situations# UUUSUU } { OrderNumber# 4 Situations# UUUUSU } { OrderNumber# 5 Situations# UUUUUS } { OrderNumber# 6 Situations# SUUUUU } { OrderNumber# 7 Situations# UUSUUU } ] " ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:18:0:0:131072:0] 2025-06-25T14:36:39.991406Z 1 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5316:698] 2025-06-25T14:36:39.991614Z 2 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5323:705] 2025-06-25T14:36:39.991725Z 3 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5330:712] TEvPutResult: TEvPutResult {Id# [1:1:18:0:0:131072:0] Status# ERROR StatusFlags# { } ErrorReason# "TRestoreStrategy saw optimisticState# EBS_DISINTEGRATED GroupId# 2181038080 BlobId# [1:1:18:0:0:131072:0] Reported ErrorReasons# [ { OrderNumber# 0 VDiskId# [82000000:1:0:0:0] NodeId# 1 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 1 VDiskId# [82000000:1:0:1:0] NodeId# 2 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 2 VDiskId# [82000000:1:0:2:0] NodeId# 3 ErrorReasons# [ "VDisk is in read-only mode", ] } ] Part situations# [ { OrderNumber# 7 Situations# SUUUUU } { OrderNumber# 0 Situations# UEUUUU } { OrderNumber# 1 Situations# UUEUUU } { OrderNumber# 2 Situations# UUUEUU } { OrderNumber# 3 Situations# UUUUSU } { OrderNumber# 4 Situations# UUUUUS } { OrderNumber# 5 Situations# USUUUU } { OrderNumber# 6 Situations# UUSUUU } ] " ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:19:0:0:32768:0] 2025-06-25T14:36:39.994044Z 1 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5316:698] 2025-06-25T14:36:39.994264Z 3 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5330:712] 2025-06-25T14:36:39.994348Z 2 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5323:705] TEvPutResult: TEvPutResult {Id# [1:1:19:0:0:32768:0] Status# ERROR StatusFlags# { } ErrorReason# "TRestoreStrategy saw optimisticState# EBS_DISINTEGRATED GroupId# 2181038080 BlobId# [1:1:19:0:0:32768:0] Reported ErrorReasons# [ { OrderNumber# 0 VDiskId# [82000000:1:0:0:0] NodeId# 1 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 1 VDiskId# [82000000:1:0:1:0] NodeId# 2 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 2 VDiskId# [82000000:1:0:2:0] NodeId# 3 ErrorReasons# [ "VDisk is in read-only mode", ] } ] Part situations# [ { OrderNumber# 6 Situations# SUUUUU } { OrderNumber# 7 Situations# USUUUU } { OrderNumber# 0 Situations# UUEUUU } { OrderNumber# 1 Situations# UUUEUU } { OrderNumber# 2 Situations# UUUUEU } { OrderNumber# 3 Situations# UUUUUS } { OrderNumber# 4 Situations# UUSUUU } { OrderNumber# 5 Situations# UUUUSU } ] " ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:20:0:0:131072:0] 2025-06-25T14:36:39.996412Z 1 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5316:698] 2025-06-25T14:36:39.996525Z 3 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5330:712] 2025-06-25T14:36:39.996641Z 2 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5323:705] TEvPutResult: TEvPutResult {Id# [1:1:20:0:0:131072:0] Status# ERROR StatusFlags# { } ErrorReason# "TRestoreStrategy saw optimisticState# EBS_DISINTEGRATED GroupId# 2181038080 BlobId# [1:1:20:0:0:131072:0] Reported ErrorReasons# [ { OrderNumber# 0 VDiskId# [82000000:1:0:0:0] NodeId# 1 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 1 VDiskId# [82000000:1:0:1:0] NodeId# 2 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 2 VDiskId# [82000000:1:0:2:0] NodeId# 3 ErrorReasons# [ "VDisk is in read-only mode", ] } ] Part situations# [ { OrderNumber# 5 Situations# SUUUUU } { OrderNumber# 6 Situations# USUUUU } { OrderNumber# 7 Situations# UUSUUU } { OrderNumber# 0 Situations# UUUEUU } { OrderNumber# 1 Situations# UUUUEU } { OrderNumber# 2 Situations# UUUUUE } { OrderNumber# 3 Situations# UUUSUU } { OrderNumber# 4 Situations# UUUUUS } ] " ApproximateFreeSpaceShare# 0.999988} SEND TEvGet with key [1:1:11:0:0:32768:0] 2025-06-25T14:36:40.002341Z 1 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5317:699] 2025-06-25T14:36:40.002544Z 2 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5324:706] 2025-06-25T14:36:40.002605Z 3 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5331:713] 2025-06-25T14:36:40.003226Z 1 00h05m30.160512s :BS_PROXY_GET ERROR: [4d7fdf59cb3bcbfa] Response# TEvGetResult {Status# ERROR ResponseSz# 1 {[1:1:11:0:0:32768:0] ERROR Size# 0 RequestedSize# 32768} ErrorReason# "TStrategyBase saw optimisticState# EBS_DISINTEGRATED GroupId# 2181038080 BlobId# [1:1:11:0:0:32768:0] Reported ErrorReasons# [ { OrderNumber# 0 VDiskId# [82000000:1:0:0:0] NodeId# 1 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 1 VDiskId# [82000000:1:0:1:0] NodeId# 2 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 2 VDiskId# [82000000:1:0:2:0] NodeId# 3 ErrorReasons# [ "VDisk is in read-only mode", ] } ] Part situations# [ { OrderNumber# 5 Situations# PUUUUU } { OrderNumber# 6 Situations# UPUUUU } { OrderNumber# 7 Situations# UUPUUU } { OrderNumber# 0 Situations# UUUEUU } { OrderNumber# 1 Situations# UUUUEU } { OrderNumber# 2 Situations# UUUUUE } { OrderNumber# 3 Situations# AAAPAA } { OrderNumber# 4 Situations# AAAAAA } ] "} Marker# BPG29 2025-06-25T14:36:40.003389Z 2 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5324:706] 2025-06-25T14:36:40.003458Z 3 00h05m30.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5331:713] TEvGetResult: TEvGetResult {Status# ERROR ResponseSz# 1 {[1:1:11:0:0:32768:0] ERROR Size# 0 RequestedSize# 32768} ErrorReason# "TStrategyBase saw optimisticState# EBS_DISINTEGRATED GroupId# 2181038080 BlobId# [1:1:11:0:0:32768:0] Reported ErrorReasons# [ { OrderNumber# 0 VDiskId# [82000000:1:0:0:0] NodeId# 1 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 1 VDiskId# [82000000:1:0:1:0] NodeId# 2 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 2 VDiskId# [82000000:1:0:2:0] NodeId# 3 ErrorReasons# [ "VDisk is in read-only mode", ] } ] Part situations# [ { OrderNumber# 5 Situations# PUUUUU } { OrderNumber# 6 Situations# UPUUUU } { OrderNumber# 7 Situations# UUPUUU } { OrderNumber# 0 Situations# UUUEUU } { OrderNumber# 1 Situations# UUUUEU } { OrderNumber# 2 Situations# UUUUUE } { OrderNumber# 3 Situations# AAAPAA } { OrderNumber# 4 Situations# AAAAAA } ] "} >> IncrementalBackup::E2EBackupCollection [FAIL] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/viewer/ut/unittest >> ViewerTopicDataTests::TopicDataTest [GOOD] Test command err: 2025-06-25T14:35:24.152217Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:1574:2395], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:35:24.152999Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:35:24.153346Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:35:24.157191Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:1571:2338], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:35:24.157520Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [3:1577:2338], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:35:24.157744Z node 5 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [5:807:2335], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:35:24.158544Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:35:24.158602Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:35:24.158639Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:35:24.158780Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:35:24.159286Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:35:24.159400Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [4:1580:2336], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:35:24.159824Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:35:24.160104Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:35:24.160385Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-25T14:35:24.579449Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:35:24.813103Z node 1 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:106} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2025-06-25T14:35:24.872500Z node 1 :BS_PDISK WARN: {BSP01@blobstorage_pdisk_actor.cpp:436} Magic sector is present on disk, now going to format device PDiskId# 1000 2025-06-25T14:35:25.620009Z node 1 :BS_PDISK WARN: {BSP01@blobstorage_pdisk_actor.cpp:374} Device formatting done PDiskId# 1000 TServer::EnableGrpc on GrpcPort 27189, node 1 TClient is connected to server localhost:11564 2025-06-25T14:35:26.100039Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:35:26.100098Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:35:26.100134Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:35:26.101829Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:36:14.585098Z node 6 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[6:7519895779853828411:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:36:14.592401Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-25T14:36:15.120565Z node 6 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [6:7519895779853828384:2080] 1750862174583560 != 1750862174583563 2025-06-25T14:36:15.147929Z node 6 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:36:15.161856Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:36:15.162009Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 24874, node 6 2025-06-25T14:36:15.181103Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:36:15.409226Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:36:15.409260Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:36:15.409274Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:36:15.409456Z node 6 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:36:15.625831Z node 6 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:5139 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:36:16.196057Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:36:16.247349Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:36:16.255259Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... 2025-06-25T14:36:16.267760Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710660, at schemeshard: 72057594046644480 2025-06-25T14:36:19.449398Z node 6 :TICKET_PARSER INFO: viewer_ut.cpp:437: Ticket parser: got TEvAuthorizeTicket event: test_ydb_token /Root 1 2025-06-25T14:36:19.449480Z node 6 :TICKET_PARSER INFO: viewer_ut.cpp:496: Send TEvAuthorizeTicketResult success 2025-06-25T14:36:19.588538Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[6:7519895779853828411:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:36:19.588637Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:36:19.963044Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7519895801328665545:2304], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:36:19.963210Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:36:19.963604Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7519895801328665557:2307], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:36:19.968234Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710661:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:36:20.000615Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [6:7519895801328665559:2308], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error ... sage(s) (1 left), first sequence number is 19 2025-06-25T14:36:31.631673Z :DEBUG: [] MessageGroupId [producer3] SessionId [producer3|b25e8787-9253855f-d17b6482-fdd9ad3d_0] Write session: try to update token 2025-06-25T14:36:31.631713Z :DEBUG: [] MessageGroupId [producer3] SessionId [producer3|b25e8787-9253855f-d17b6482-fdd9ad3d_0] Send 1 message(s) (0 left), first sequence number is 20 2025-06-25T14:36:31.654447Z :DEBUG: [] MessageGroupId [producer3] SessionId [producer3|b25e8787-9253855f-d17b6482-fdd9ad3d_0] Write session got write response: sequence_numbers: 15 sequence_numbers: 16 offsets: 54 offsets: 55 already_written: false already_written: false write_statistics { persist_duration_ms: 3 queued_in_partition_duration_ms: 51 } 2025-06-25T14:36:31.654501Z :DEBUG: [] MessageGroupId [producer3] SessionId [producer3|b25e8787-9253855f-d17b6482-fdd9ad3d_0] Write session: acknoledged message 15 2025-06-25T14:36:31.654528Z :DEBUG: [] MessageGroupId [producer3] SessionId [producer3|b25e8787-9253855f-d17b6482-fdd9ad3d_0] Write session: acknoledged message 16 2025-06-25T14:36:31.759092Z :DEBUG: [] MessageGroupId [producer3] SessionId [producer3|b25e8787-9253855f-d17b6482-fdd9ad3d_0] Write session got write response: sequence_numbers: 17 offsets: 56 already_written: false write_statistics { persist_duration_ms: 5 } 2025-06-25T14:36:31.759151Z :DEBUG: [] MessageGroupId [producer3] SessionId [producer3|b25e8787-9253855f-d17b6482-fdd9ad3d_0] Write session: acknoledged message 17 2025-06-25T14:36:31.761200Z :DEBUG: [] MessageGroupId [producer3] SessionId [producer3|b25e8787-9253855f-d17b6482-fdd9ad3d_0] Write session got write response: sequence_numbers: 18 offsets: 57 already_written: false write_statistics { persist_duration_ms: 5 queued_in_partition_duration_ms: 101 } 2025-06-25T14:36:31.761241Z :DEBUG: [] MessageGroupId [producer3] SessionId [producer3|b25e8787-9253855f-d17b6482-fdd9ad3d_0] Write session: acknoledged message 18 2025-06-25T14:36:31.764134Z :DEBUG: [] MessageGroupId [producer3] SessionId [producer3|b25e8787-9253855f-d17b6482-fdd9ad3d_0] Write session got write response: sequence_numbers: 19 offsets: 58 already_written: false write_statistics { persist_duration_ms: 2 } 2025-06-25T14:36:31.764179Z :DEBUG: [] MessageGroupId [producer3] SessionId [producer3|b25e8787-9253855f-d17b6482-fdd9ad3d_0] Write session: acknoledged message 19 2025-06-25T14:36:31.764381Z :DEBUG: [] MessageGroupId [producer3] SessionId [producer3|b25e8787-9253855f-d17b6482-fdd9ad3d_0] Write session got write response: sequence_numbers: 20 offsets: 59 already_written: false write_statistics { persist_duration_ms: 2 } 2025-06-25T14:36:31.764419Z :DEBUG: [] MessageGroupId [producer3] SessionId [producer3|b25e8787-9253855f-d17b6482-fdd9ad3d_0] Write session: acknoledged message 20 2025-06-25T14:36:31.812578Z :INFO: [] MessageGroupId [producer3] SessionId [producer3|b25e8787-9253855f-d17b6482-fdd9ad3d_0] Write session will now close 2025-06-25T14:36:31.812706Z :DEBUG: [] MessageGroupId [producer3] SessionId [producer3|b25e8787-9253855f-d17b6482-fdd9ad3d_0] Write session: aborting 2025-06-25T14:36:31.813425Z :INFO: [] MessageGroupId [producer3] SessionId [producer3|b25e8787-9253855f-d17b6482-fdd9ad3d_0] Write session: gracefully shut down, all writes complete 2025-06-25T14:36:31.813908Z :DEBUG: [] MessageGroupId [producer3] SessionId [producer3|b25e8787-9253855f-d17b6482-fdd9ad3d_0] Write session is aborting and will not restart 2025-06-25T14:36:31.814806Z :DEBUG: [] MessageGroupId [producer3] SessionId [producer3|b25e8787-9253855f-d17b6482-fdd9ad3d_0] Write session: destroy 2025-06-25T14:36:32.134002Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519895858174437363:2349], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:36:32.134452Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:36:32.140760Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519895858174437391:2352], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:36:32.150106Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710661:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:36:32.164801Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7519895858174437393:2353], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710661 completed, doublechecking } 2025-06-25T14:36:32.226531Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7519895858174437447:2520] txid# 281474976710662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:36:32.326119Z node 7 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [7:7519895858174437456:2359], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:36:32.326549Z node 7 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=7&id=YjA3YmRlMzUtNDFmNzc2OTktMjRiMzZhOGYtMjhjZjU5MDQ=, ActorId: [7:7519895858174437361:2348], ActorState: ExecuteState, TraceId: 01jykr9qg2azs4qkej56sgamfb, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:36:32.327518Z node 7 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } Size: 1398104 Size: 1398104 Size: 1398104 Size: 1398104 Size: 1398104 Size: 1398104 Size: 1398104 Size: 1398104 Size: 1398104 Size: 1398104 2025-06-25T14:36:32.747871Z :DEBUG: [] MessageGroupId [producer4] SessionId [] Write session: try to update token 2025-06-25T14:36:32.748387Z :INFO: [] MessageGroupId [producer4] SessionId [] Write session: Do CDS request 2025-06-25T14:36:32.748471Z :INFO: [] MessageGroupId [producer4] SessionId [] Start write session. Will connect to endpoint: localhost:10702 2025-06-25T14:36:32.763743Z :DEBUG: [] MessageGroupId [producer4] SessionId [] Write session: send init request: init_request { topic: "/Root/topic1" message_group_id: "producer4" } 2025-06-25T14:36:32.769221Z :INFO: [] MessageGroupId [producer4] SessionId [] Counters: { Errors: 0 CurrentSessionLifetimeMs: 1750862192769 BytesWritten: 0 MessagesWritten: 0 BytesWrittenCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-25T14:36:32.769316Z :INFO: [] MessageGroupId [producer4] SessionId [] Write session established. Init response: session_id: "producer4|94143727-cde8d683-a43fb093-ce396a4c_0" topic: "topic1" 2025-06-25T14:36:32.772735Z :DEBUG: [] MessageGroupId [producer4] SessionId [producer4|94143727-cde8d683-a43fb093-ce396a4c_0] Write 1 messages with Id from 1 to 1 2025-06-25T14:36:32.773378Z :INFO: [] MessageGroupId [producer4] SessionId [producer4|94143727-cde8d683-a43fb093-ce396a4c_0] Write session: close. Timeout = 18446744073709551 ms 2025-06-25T14:36:32.823285Z :DEBUG: [] MessageGroupId [producer4] SessionId [producer4|94143727-cde8d683-a43fb093-ce396a4c_0] Write session: try to update token 2025-06-25T14:36:32.823350Z :DEBUG: [] MessageGroupId [producer4] SessionId [producer4|94143727-cde8d683-a43fb093-ce396a4c_0] Send 1 message(s) (0 left), first sequence number is 1 2025-06-25T14:36:32.896784Z :DEBUG: [] MessageGroupId [producer4] SessionId [producer4|94143727-cde8d683-a43fb093-ce396a4c_0] Write session got write response: sequence_numbers: 1 offsets: 60 already_written: false write_statistics { persist_duration_ms: 3 queued_in_partition_duration_ms: 43 } 2025-06-25T14:36:32.896838Z :DEBUG: [] MessageGroupId [producer4] SessionId [producer4|94143727-cde8d683-a43fb093-ce396a4c_0] Write session: acknoledged message 1 2025-06-25T14:36:32.975051Z :INFO: [] MessageGroupId [producer4] SessionId [producer4|94143727-cde8d683-a43fb093-ce396a4c_0] Write session will now close 2025-06-25T14:36:32.975169Z :DEBUG: [] MessageGroupId [producer4] SessionId [producer4|94143727-cde8d683-a43fb093-ce396a4c_0] Write session: aborting 2025-06-25T14:36:32.975965Z :INFO: [] MessageGroupId [producer4] SessionId [producer4|94143727-cde8d683-a43fb093-ce396a4c_0] Write session: gracefully shut down, all writes complete 2025-06-25T14:36:32.976704Z :DEBUG: [] MessageGroupId [producer4] SessionId [producer4|94143727-cde8d683-a43fb093-ce396a4c_0] Write session: destroy Size: 4194320 Got response:400: PathErrorUnknown Got response:400: No such partition in topic 2025-06-25T14:36:33.297802Z node 7 :PERSQUEUE ERROR: partition_read.cpp:780: [PQ: 72075186224037889, Partition: 0, State: StateIdle] reading from too big offset - topic topic1 partition 0 client $without_consumer EndOffset 61 offset 10000 Got response:400: Bad offset 2025-06-25T14:36:33.367463Z node 7 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [7:7519895862469404836:2382], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:36:33.368481Z node 7 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=7&id=ZmYzNzVmZjAtMmI0NWY1ODQtMjcyOGMzYmQtZDllYjAxYmQ=, ActorId: [7:7519895862469404829:2378], ActorState: ExecuteState, TraceId: 01jykr9rnj4c6fy4dkwx50qsgr, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:36:33.368892Z node 7 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } >> ReadOnlyVDisk::TestStorageLoad >> TConsoleTxProcessorTests::TestTxProcessorSubProcessor [GOOD] >> TConsoleTxProcessorTests::TestTxProcessorTemporary ------- [TM] {asan, default-linux-x86_64, release} ydb/core/security/ldap_auth_provider/ut/unittest >> LdapAuthProviderTest_LdapsScheme::LdapFetchGroupsDisableRequestToAD [GOOD] Test command err: 2025-06-25T14:36:13.850431Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519895774928654347:2226];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:36:13.850648Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000f97/r3tmp/tmp3yHMvd/pdisk_1.dat 2025-06-25T14:36:14.845721Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:36:14.855764Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:36:14.855865Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:36:14.885281Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:36:14.895102Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:36:14.925132Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:36:14.926531Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519895774928654157:2080] 1750862173761632 != 1750862173761635 TServer::EnableGrpc on GrpcPort 23553, node 1 2025-06-25T14:36:15.224953Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:36:15.224971Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:36:15.224980Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:36:15.225090Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:36:15.448445Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-25T14:36:15.448772Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-25T14:36:15.448792Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-25T14:36:15.449958Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://unavailablehost:4007, port: 4007 2025-06-25T14:36:15.450014Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:179: start TLS 2025-06-25T14:36:15.458957Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:184: Could not start TLS. Can't contact LDAP server 2025-06-25T14:36:15.459406Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket eyJh****D0YQ (4B4F5ED1) () has now retryable error message 'Could not login via LDAP (Could not start TLS. Can't contact LDAP server)' 2025-06-25T14:36:15.459571Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-25T14:36:15.459586Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-25T14:36:15.460090Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://unavailablehost:4007, port: 4007 2025-06-25T14:36:15.460120Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:179: start TLS 2025-06-25T14:36:15.472465Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:184: Could not start TLS. Can't contact LDAP server 2025-06-25T14:36:15.472606Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket eyJh****D0YQ (4B4F5ED1) () has now retryable error message 'Could not login via LDAP (Could not start TLS. Can't contact LDAP server)' 2025-06-25T14:36:18.336973Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519895795919001135:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:36:18.337301Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000f97/r3tmp/tmpSrfTs5/pdisk_1.dat 2025-06-25T14:36:18.607260Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:36:18.616615Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519895795919001111:2080] 1750862178333289 != 1750862178333292 2025-06-25T14:36:18.631195Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:36:18.631278Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:36:18.633862Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 16459, node 2 2025-06-25T14:36:18.705040Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:36:18.705062Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:36:18.705075Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:36:18.705221Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:36:19.025845Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-25T14:36:19.033823Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-25T14:36:19.033862Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-25T14:36:19.034862Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1809: Ticket eyJh****cduQ (6319E52B) () has now permanent error message 'Could not login via LDAP (List of ldap server hosts is empty)' 2025-06-25T14:36:22.981815Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519895813954092962:2232];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:36:22.983843Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000f97/r3tmp/tmpYogZtF/pdisk_1.dat 2025-06-25T14:36:23.276417Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:36:23.276517Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:36:23.277921Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:36:23.284520Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519895813954092765:2080] 1750862182939283 != 1750862182939286 2025-06-25T14:36:23.294080Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 21894, node 3 2025-06-25T14:36:23.385364Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:36:23.385394Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:36:23.385402Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:36:23.385536Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:36:23.556511Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-25T14:36:23.561517Z node 3 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-25T14:36:23.561547Z node 3 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-25T14:36:23.562255Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1809: Ticket eyJh****EsRQ (83910766) () has now permanent error message 'Could not login via LDAP (Parameter BaseDn is empty)' 2025-06-25T14:36:26.723740Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519895829906688152:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:36:26.725870Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000f97/r3tmp/tmpXZUdX5/pdisk_1.dat 2025-06-25T14:36:26.870270Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:36:26.874665Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7519895829906688121:2080] 1750862186715626 != 1750862186715629 TServer::EnableGrpc on GrpcPort 5727, node 4 2025-06-25T14:36:26.898477Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:36:26.898590Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:36:26.900107Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:36:26.984453Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:36:26.984475Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:36:26.984484Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:36:26.984636Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:36:27.054505Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-25T14:36:27.060699Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-25T14:36:27.060736Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-25T14:36:27.061517Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1809: Ticket eyJh****qtRA (D1073A91) () has now permanent error message 'Could not login via LDAP (Parameter BindDn is empty)' 2025-06-25T14:36:30.636785Z node 5 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[5:7519895846626520192:2246];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000f97/r3tmp/tmp7SnzCf/pdisk_1.dat 2025-06-25T14:36:30.756774Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:36:30.837663Z node 5 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:36:30.839513Z node 5 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [5:7519895846626519956:2080] 1750862190526283 != 1750862190526286 2025-06-25T14:36:30.861964Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:36:30.862090Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:36:30.865655Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 1752, node 5 2025-06-25T14:36:31.128657Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:36:31.128682Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:36:31.128690Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:36:31.128832Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:36:31.384549Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-25T14:36:31.388025Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-25T14:36:31.388061Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-25T14:36:31.389017Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1809: Ticket eyJh****k6oA (EC9B967D) () has now permanent error message 'Could not login via LDAP (Parameter BindPassword is empty)' 2025-06-25T14:36:31.528488Z node 5 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:36:35.358798Z node 6 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[6:7519895868604407238:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:36:35.358879Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000f97/r3tmp/tmpuBTyoP/pdisk_1.dat 2025-06-25T14:36:35.638474Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:36:35.638571Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:36:35.641031Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:36:35.643388Z node 6 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:36:35.645762Z node 6 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [6:7519895868604407219:2080] 1750862195356958 != 1750862195356961 TServer::EnableGrpc on GrpcPort 2560, node 6 2025-06-25T14:36:35.778614Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:36:35.778637Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:36:35.778646Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:36:35.778807Z node 6 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:36:36.012510Z node 6 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-25T14:36:36.015039Z node 6 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-25T14:36:36.015070Z node 6 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-25T14:36:36.015867Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldaps, uris: ldaps://localhost:31706, port: 31706 2025-06-25T14:36:36.015982Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-25T14:36:36.084673Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-25T14:36:36.133396Z node 6 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****mE5w (DD2BA98B) () has now valid token of ldapuser@ldap |81.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_user_attributes/ydb-core-tx-schemeshard-ut_user_attributes |81.0%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_user_attributes/ydb-core-tx-schemeshard-ut_user_attributes >> TConsoleTests::TestAuthorization [GOOD] >> TConsoleTests::TestAuthorizationExtSubdomain |81.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_user_attributes/ydb-core-tx-schemeshard-ut_user_attributes ------- [TM] {asan, default-linux-x86_64, release} ydb/core/viewer/ut/unittest >> Viewer::ServerlessWithExclusiveNodesCheckTable [GOOD] Test command err: 2025-06-25T14:34:50.443308Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519895417212777979:2227];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:34:50.443503Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-25T14:34:51.027652Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:34:51.027780Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:34:51.029497Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:34:51.036491Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519895417212777779:2080] 1750862090386852 != 1750862090386855 2025-06-25T14:34:51.055767Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 16928, node 1 2025-06-25T14:34:51.320911Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:34:51.322629Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:34:51.322676Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:34:51.322871Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:34:51.428714Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:4557 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:34:51.939222Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:34:51.977377Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:34:52.009112Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:34:52.013142Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... 2025-06-25T14:34:52.021189Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710660, at schemeshard: 72057594046644480 2025-06-25T14:34:55.264642Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519895438687614947:2303], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:34:55.264692Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519895438687614955:2306], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:34:55.264737Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:34:55.272811Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710661:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:34:55.294178Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519895438687614961:2307], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710661 completed, doublechecking } 2025-06-25T14:34:55.356908Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519895438687615012:2359] txid# 281474976710662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:34:55.445489Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519895417212777979:2227];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:34:55.445565Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:34:58.370076Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519895452164265722:2254];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-25T14:34:58.431007Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:34:58.544389Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:34:58.544461Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:34:58.558464Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519895452164265480:2080] 1750862098210399 != 1750862098210402 2025-06-25T14:34:58.569849Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:34:58.572072Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 3195, node 2 2025-06-25T14:34:58.696982Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:34:58.697010Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:34:58.697017Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:34:58.697162Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4798 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:34:59.062594Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:34:59.092473Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:34:59.122612Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:34:59.127515Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... 2025-06-25T14:34:59.257679Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:35:02.222519Z node 2 :KQP ... .sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:35:10.654862Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:35:10.670924Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) waiting... 2025-06-25T14:35:10.674310Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) 2025-06-25T14:35:11.032358Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:35:14.041361Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519895521641469573:2303], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:35:14.041377Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519895521641469578:2306], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:35:14.041479Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:35:14.046341Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715661:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:35:14.061719Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519895521641469587:2307], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715661 completed, doublechecking } 2025-06-25T14:35:14.123768Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519895521641469638:2355] txid# 281474976715662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:35:24.879985Z node 5 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [5:434:2390], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:35:24.880580Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:35:24.880733Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-25T14:35:25.359530Z node 5 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:35:25.512045Z node 5 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:106} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2025-06-25T14:35:25.581909Z node 5 :BS_PDISK WARN: {BSP01@blobstorage_pdisk_actor.cpp:436} Magic sector is present on disk, now going to format device PDiskId# 1000 2025-06-25T14:35:26.324123Z node 5 :BS_PDISK WARN: {BSP01@blobstorage_pdisk_actor.cpp:374} Device formatting done PDiskId# 1000 TServer::EnableGrpc on GrpcPort 17302, node 5 TClient is connected to server localhost:11993 2025-06-25T14:35:26.877049Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:35:26.877141Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:35:26.877223Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:35:26.877723Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:35:47.110826Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [7:510:2390], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:35:47.111361Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:35:47.111504Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-25T14:35:47.876240Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:35:48.079877Z node 7 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:106} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2025-06-25T14:35:48.173412Z node 7 :BS_PDISK WARN: {BSP01@blobstorage_pdisk_actor.cpp:436} Magic sector is present on disk, now going to format device PDiskId# 1000 2025-06-25T14:35:49.350544Z node 7 :BS_PDISK WARN: {BSP01@blobstorage_pdisk_actor.cpp:374} Device formatting done PDiskId# 1000 TServer::EnableGrpc on GrpcPort 27613, node 7 TClient is connected to server localhost:30206 2025-06-25T14:35:50.279762Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:35:50.279887Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:35:50.279977Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:35:50.281131Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:36:08.094910Z node 10 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [10:505:2387], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:36:08.095569Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:36:08.095780Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-25T14:36:08.709825Z node 10 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:36:08.873310Z node 10 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:106} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2025-06-25T14:36:08.945747Z node 10 :BS_PDISK WARN: {BSP01@blobstorage_pdisk_actor.cpp:436} Magic sector is present on disk, now going to format device PDiskId# 1000 2025-06-25T14:36:10.295749Z node 10 :BS_PDISK WARN: {BSP01@blobstorage_pdisk_actor.cpp:374} Device formatting done PDiskId# 1000 TServer::EnableGrpc on GrpcPort 16782, node 10 TClient is connected to server localhost:18885 2025-06-25T14:36:11.331962Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:36:11.332082Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:36:11.332167Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:36:11.333230Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:36:31.057237Z node 13 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [13:592:2391], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:36:31.057783Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:36:31.058050Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-25T14:36:31.705410Z node 13 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:36:31.866845Z node 13 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:106} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2025-06-25T14:36:31.961494Z node 13 :BS_PDISK WARN: {BSP01@blobstorage_pdisk_actor.cpp:436} Magic sector is present on disk, now going to format device PDiskId# 1000 2025-06-25T14:36:33.074305Z node 13 :BS_PDISK WARN: {BSP01@blobstorage_pdisk_actor.cpp:374} Device formatting done PDiskId# 1000 TServer::EnableGrpc on GrpcPort 8211, node 13 TClient is connected to server localhost:4883 2025-06-25T14:36:34.121406Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:36:34.121533Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:36:34.121624Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:36:34.122149Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration >> KqpJoinOrder::SortingsSimpleOrderByPKAlias-RemoveLimitOperator [GOOD] >> ResourcePoolsDdl::TestPoolSwitchToUnlimitedState [GOOD] >> ResourcePoolsDdl::TestResourcePoolAcl >> TNetClassifierUpdaterTest::TestFiltrationByNetboxCustomFieldsOnly [GOOD] >> TNetClassifierUpdaterTest::TestFiltrationByNetboxTags >> ReadOnlyVDisk::TestGarbageCollect |81.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_read_only_vdisk/unittest >> ReadOnlyVDisk::TestDiscover [GOOD] |81.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/ut/pg/ydb-core-kqp-ut-pg |81.0%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/pg/ydb-core-kqp-ut-pg |81.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/pg/ydb-core-kqp-ut-pg >> KqpJoinOrder::SortingsByPK+RemoveLimitOperator [GOOD] >> ReadOnlyVDisk::TestReads ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_read_only_vdisk/unittest >> ReadOnlyVDisk::TestDiscover [GOOD] Test command err: RandomSeed# 2666614288339090219 SEND TEvPut with key [1:1:0:0:0:131072:0] TEvPutResult: TEvPutResult {Id# [1:1:0:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:1:0:0:32768:0] TEvPutResult: TEvPutResult {Id# [1:1:1:0:0:32768:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:2:0:0:131072:0] TEvPutResult: TEvPutResult {Id# [1:1:2:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} === Read all 3 blob(s) === SEND TEvGet with key [1:1:0:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:0:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:1:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:1:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:2:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:2:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} Setting VDisk read-only to 1 for position 0 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:0:0] SEND TEvPut with key [1:1:3:0:0:32768:0] 2025-06-25T14:36:38.435000Z 1 00h01m30.060512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5315:698] TEvPutResult: TEvPutResult {Id# [1:1:3:0:0:32768:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Setting VDisk read-only to 1 for position 1 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:1:0] SEND TEvPut with key [1:1:4:0:0:131072:0] 2025-06-25T14:36:38.900838Z 1 00h02m00.100000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5315:698] 2025-06-25T14:36:38.901932Z 2 00h02m00.100000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5322:705] TEvPutResult: TEvPutResult {Id# [1:1:4:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Setting VDisk read-only to 1 for position 2 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:2:0] SEND TEvPut with key [1:1:5:0:0:32768:0] 2025-06-25T14:36:39.333196Z 3 00h02m30.110512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5329:712] 2025-06-25T14:36:39.334177Z 1 00h02m30.110512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5315:698] 2025-06-25T14:36:39.334817Z 2 00h02m30.110512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5322:705] 2025-06-25T14:36:39.335100Z 1 00h02m30.110512s :BS_PROXY_PUT ERROR: [97439c33856ecde7] Result# TEvPutResult {Id# [1:1:5:0:0:32768:0] Status# ERROR StatusFlags# { } ErrorReason# "TRestoreStrategy saw optimisticState# EBS_DISINTEGRATED GroupId# 2181038080 BlobId# [1:1:5:0:0:32768:0] Reported ErrorReasons# [ { OrderNumber# 2 VDiskId# [82000000:1:0:2:0] NodeId# 3 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 0 VDiskId# [82000000:1:0:0:0] NodeId# 1 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 1 VDiskId# [82000000:1:0:1:0] NodeId# 2 ErrorReasons# [ "VDisk is in read-only mode", ] } ] Part situations# [ { OrderNumber# 2 Situations# EUUUUU } { OrderNumber# 3 Situations# UPUUUU } { OrderNumber# 4 Situations# UUPUUU } { OrderNumber# 5 Situations# UUUPUU } { OrderNumber# 6 Situations# UUUUPU } { OrderNumber# 7 Situations# UUUUUP } { OrderNumber# 0 Situations# EUUUUU } { OrderNumber# 1 Situations# EUUUUU } ] " ApproximateFreeSpaceShare# 0.999988} GroupId# 2181038080 Marker# BPP12 TEvPutResult: TEvPutResult {Id# [1:1:5:0:0:32768:0] Status# ERROR StatusFlags# { } ErrorReason# "TRestoreStrategy saw optimisticState# EBS_DISINTEGRATED GroupId# 2181038080 BlobId# [1:1:5:0:0:32768:0] Reported ErrorReasons# [ { OrderNumber# 2 VDiskId# [82000000:1:0:2:0] NodeId# 3 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 0 VDiskId# [82000000:1:0:0:0] NodeId# 1 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 1 VDiskId# [82000000:1:0:1:0] NodeId# 2 ErrorReasons# [ "VDisk is in read-only mode", ] } ] Part situations# [ { OrderNumber# 2 Situations# EUUUUU } { OrderNumber# 3 Situations# UPUUUU } { OrderNumber# 4 Situations# UUPUUU } { OrderNumber# 5 Situations# UUUPUU } { OrderNumber# 6 Situations# UUUUPU } { OrderNumber# 7 Situations# UUUUUP } { OrderNumber# 0 Situations# EUUUUU } { OrderNumber# 1 Situations# EUUUUU } ] " ApproximateFreeSpaceShare# 0.999988} === Putting VDisk #3 to read-only === Setting VDisk read-only to 1 for position 3 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:3:0] === Read all 6 blob(s) === SEND TEvGet with key [1:1:0:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:0:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:1:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:1:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:2:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:2:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:3:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:3:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:4:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:4:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:5:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:5:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} === Putting VDisk #4 to read-only === Setting VDisk read-only to 1 for position 4 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:4:0] === Read all 6 blob(s) === SEND TEvGet with key [1:1:0:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:0:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:1:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:1:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:2:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:2:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:3:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:3:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:4:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:4:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:5:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:5:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} === Putting VDisk #5 to read-only === Setting VDisk read-only to 1 for position 5 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:5:0] === Read all 6 blob(s) === SEND TEvGet with key [1:1:0:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:0:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:1:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:1:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:2:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:2:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:3:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:3:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:4:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:4:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:5:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:5:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} === Putting VDisk #6 to read-only === Setting VDisk read-only to 1 for position 6 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:6:0] === Read all 6 blob(s) === SEND TEvGet with key [1:1:0:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:0:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:1:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:1:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:2:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:2:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:3:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:3:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:4:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:4:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:5:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:5:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} === Putting VDisk #0 to normal === Setting VDisk read-only to 0 for position 0 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:0:0] === Putting VDisk #1 to normal === Setting VDisk read-only to 0 for position 1 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:1:0] === Putting VDisk #2 to normal === Setting VDisk read-only to 0 for position 2 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:2:0] === Putting VDisk #3 to normal === Setting VDisk read-only to 0 for position 3 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:3:0] === Putting VDisk #4 to normal === Setting VDisk read-only to 0 for position 4 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:4:0] === Putting VDisk #5 to normal === Setting VDisk read-only to 0 for position 5 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:5:0] === Putting VDisk #6 to normal === Setting VDisk read-only to 0 for position 6 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:6:0] SEND TEvPut with key [1:1:6:0:0:131072:0] TEvPutResult: TEvPutResult {Id# [1:1:6:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} >> LdapAuthProviderTest_nonSecure::LdapFetchGroupsUseInvalidSearchFilterBad [GOOD] >> ReadOnlyVDisk::TestSync >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-18 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-19 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::SortingsSimpleOrderByPKAlias-RemoveLimitOperator [GOOD] Test command err: Trying to start YDB, gRPC: 7499, MsgBus: 16227 2025-06-25T14:35:52.058831Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519895684349042144:2223];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:35:52.059139Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000ebb/r3tmp/tmpLFJsJB/pdisk_1.dat 2025-06-25T14:35:52.754054Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:35:52.754168Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:35:52.763966Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:35:52.828900Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:35:52.832433Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519895680054074663:2080] 1750862152013685 != 1750862152013688 TServer::EnableGrpc on GrpcPort 7499, node 1 2025-06-25T14:35:53.045477Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:35:53.095346Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:35:53.095366Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:35:53.095374Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:35:53.095494Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16227 TClient is connected to server localhost:16227 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:35:54.285365Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:35:54.308478Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:35:56.989222Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519895701528911794:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:35:56.989362Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:35:56.996456Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519895701528911806:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:35:57.004182Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:35:57.028505Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519895701528911808:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:35:57.049941Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519895684349042144:2223];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:35:57.050009Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:35:57.092501Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519895705823879155:2339] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:35:57.582404Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:57.715275Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:57.750315Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:57.782193Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:57.822231Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:57.978665Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:58.064626Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:58.099669Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:58.139534Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:58.185583Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:58.220128Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:58.242273Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:58.272637Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:58.951175Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, subopera ... 31018Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038615;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:36:35.634160Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038545;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:35.634607Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038551;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:36:35.635469Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038615;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:35.635890Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038597;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:36:35.638920Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038551;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:35.639454Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038553;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:36:35.640132Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038597;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:35.640906Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038523;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:36:35.645706Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038523;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:35.646326Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038577;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:36:35.651606Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038577;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:35.652603Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038561;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:36:35.653242Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038553;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:35.653672Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038549;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:36:35.657371Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038561;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:35.657813Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038549;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:35.658027Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038565;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:36:35.658311Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038573;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:36:35.662790Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038565;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:35.663381Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038517;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:36:35.670192Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038517;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:35.670863Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038505;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:36:35.674304Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038573;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:35.674794Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038567;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:36:35.683721Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038505;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:35.684245Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038501;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:36:35.694711Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038501;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:35.695202Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:36:35.700470Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:35.701102Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038563;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:36:35.705427Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038563;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:35.706008Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038595;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:36:35.710570Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038595;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:35.711066Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038521;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:36:35.716504Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038521;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:35.717086Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038633;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:36:35.721424Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038633;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:35.722796Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038537;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:36:35.727735Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038537;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:35.758417Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038567;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:35.812971Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jykr8t205wxzs6rh5fcys1zb", SessionId: ydb://session/3?node_id=1&id=NzA2YWRkZjAtMzQxYWQxMWYtZTNjMjc1NTgtMTJkYmNjYjg=, Slow query, duration: 33.827886s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:36:36.074647Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:36:36.074918Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;self_id=[1:7519895796018208843:4297];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038331;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038170;receive=72075186224038629; 2025-06-25T14:36:36.075165Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:36:36.075216Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; >> ReadOnlyVDisk::TestWrites >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-DropUser-65 [FAIL] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-DropUser-66 >> DataShardReadTableSnapshots::CorruptedDyNumber [GOOD] >> TConsoleTxProcessorTests::TestTxProcessorTemporary [GOOD] >> TConsoleTxProcessorTests::TestTxProcessorRandom ------- [TM] {asan, default-linux-x86_64, release} ydb/core/security/ldap_auth_provider/ut/unittest >> LdapAuthProviderTest_nonSecure::LdapFetchGroupsUseInvalidSearchFilterBad [GOOD] Test command err: 2025-06-25T14:36:14.720227Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519895777452531813:2219];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:36:14.729281Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000f82/r3tmp/tmpGFcMC5/pdisk_1.dat 2025-06-25T14:36:15.490069Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519895777452531630:2080] 1750862174680410 != 1750862174680413 2025-06-25T14:36:15.553884Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:36:15.559847Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:36:15.559937Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:36:15.568733Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 4403, node 1 2025-06-25T14:36:15.718088Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:36:15.788840Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:36:15.788859Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:36:15.788866Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:36:15.788953Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:36:16.099978Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-25T14:36:16.104925Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-25T14:36:16.104955Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-25T14:36:16.105624Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:31603, port: 31603 2025-06-25T14:36:16.106202Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-25T14:36:16.144806Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-25T14:36:16.188750Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:357: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (member:1.2.840.113556.1.4.1941:=uid=ldapuser,dc=search,dc=yandex,dc=net), attributes: 1.1 2025-06-25T14:36:16.237181Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****VGdw (5FE7F979) () has now valid token of ldapuser@ldap test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000f82/r3tmp/tmpQ5HORx/pdisk_1.dat 2025-06-25T14:36:19.499359Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519895802140550848:2148];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:36:19.512744Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:36:19.597690Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:36:19.597754Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:36:19.603582Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:36:19.603985Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519895802140550722:2080] 1750862179164489 != 1750862179164492 2025-06-25T14:36:19.621484Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 5583, node 2 2025-06-25T14:36:19.826892Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:36:19.826911Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:36:19.826917Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:36:19.827017Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:36:19.994638Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-25T14:36:19.994900Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-25T14:36:19.994914Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-25T14:36:19.995532Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:8924, port: 8924 2025-06-25T14:36:19.995595Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-25T14:36:20.016453Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-25T14:36:20.068523Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:357: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (member:1.2.840.113556.1.4.1941:=uid=ldapuser,dc=search,dc=yandex,dc=net), attributes: 1.1 2025-06-25T14:36:20.069076Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:382: Try to get nested groups - tree traversal 2025-06-25T14:36:20.069141Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managerOfProject1,cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=project1,cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-25T14:36:20.112710Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-25T14:36:20.160526Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-25T14:36:20.161308Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****fVVg (8BD648C4) () has now valid token of ldapuser@ldap 2025-06-25T14:36:20.292460Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:36:23.965398Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519895816307072450:2227];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:36:23.965547Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000f82/r3tmp/tmphGbiWa/pdisk_1.dat TServer::EnableGrpc on GrpcPort 2656, node 3 2025-06-25T14:36:24.384582Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:36:24.384706Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:36:24.385935Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:36:24.393368Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:36:24.408433Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519895816307072241:2080] 1750862183954971 != 1750862183954974 2025-06-25T14:36:24.441040Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:36:24.441060Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:36:24.441066Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:36:24.441173Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:36:24.596441Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-25T14:36:24.600614Z node 3 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-25T14:36:24.600639Z node 3 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-25T14:36:24.601253Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:15748, port: 15748 2025-06-25T14:36:24.601307Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-25T14:36:24.617177Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-25T14:36:24.661280Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****jvRg (21DC8A54) () has now valid token of ldapuser@ldap 2025-06-25T14:36:24.954166Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000f82/r3tmp/tmpdCTg3P/pdisk_1.dat 2025-06-25T14:36:29.132015Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:36:29.256371Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:36:29.286777Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:36:29.286852Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:36:29.288300Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7519895839114045092:2080] 1750862188974026 != 1750862188974029 2025-06-25T14:36:29.292894Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 16468, node 4 2025-06-25T14:36:29.448860Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:36:29.448881Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:36:29.448889Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:36:29.449009Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:36:29.610157Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-25T14:36:29.613361Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-25T14:36:29.613388Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-25T14:36:29.614024Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://qqq:63483 ldap://localhost:63483 ldap://localhost:11111, port: 63483 2025-06-25T14:36:29.614111Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-25T14:36:29.656856Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-25T14:36:29.704335Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:357: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (member:1.2.840.113556.1.4.1941:=uid=ldapuser,dc=search,dc=yandex,dc=net), attributes: 1.1 2025-06-25T14:36:29.704938Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:382: Try to get nested groups - tree traversal 2025-06-25T14:36:29.704987Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managerOfProject1,cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=project1,cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-25T14:36:29.748886Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-25T14:36:29.792620Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-25T14:36:29.793646Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****ahlw (D0E1AD9B) () has now valid token of ldapuser@ldap 2025-06-25T14:36:30.024901Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:36:33.998575Z node 5 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[5:7519895859052776076:2163];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:36:34.108510Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000f82/r3tmp/tmpMigaAj/pdisk_1.dat 2025-06-25T14:36:34.291147Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:36:34.291223Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:36:34.295989Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:36:34.302183Z node 5 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [5:7519895859052775949:2080] 1750862193954265 != 1750862193954268 2025-06-25T14:36:34.313669Z node 5 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 20327, node 5 2025-06-25T14:36:34.464981Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:36:34.465000Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:36:34.465007Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:36:34.465129Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:36:34.796435Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-25T14:36:34.807507Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-25T14:36:34.807536Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-25T14:36:34.808336Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:29546, port: 29546 2025-06-25T14:36:34.808419Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-25T14:36:34.827890Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: groupDN 2025-06-25T14:36:34.872596Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:357: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (member:1.2.840.113556.1.4.1941:=uid=ldapuser,dc=search,dc=yandex,dc=net), attributes: 1.1 2025-06-25T14:36:34.873134Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:382: Try to get nested groups - tree traversal 2025-06-25T14:36:34.873181Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managerOfProject1,cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=project1,cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: groupDN 2025-06-25T14:36:34.924526Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: groupDN 2025-06-25T14:36:34.974074Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: groupDN 2025-06-25T14:36:34.975108Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****DsPw (B5A7F716) () has now valid token of ldapuser@ldap 2025-06-25T14:36:34.997389Z node 5 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000f82/r3tmp/tmpnpezcd/pdisk_1.dat 2025-06-25T14:36:39.848381Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:36:39.952484Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:36:39.952578Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:36:39.953260Z node 6 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:36:39.954883Z node 6 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [6:7519895887464231656:2080] 1750862199676276 != 1750862199676279 2025-06-25T14:36:39.965649Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 19961, node 6 2025-06-25T14:36:40.135909Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:36:40.135929Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:36:40.135936Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:36:40.136060Z node 6 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:36:40.299980Z node 6 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-25T14:36:40.300256Z node 6 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-25T14:36:40.300272Z node 6 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-25T14:36:40.300971Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:29102, port: 29102 2025-06-25T14:36:40.301056Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-25T14:36:40.336947Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: &(uid=ldapuser)(), attributes: memberOf 2025-06-25T14:36:40.337036Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:323: Could not perform search for filter &(uid=ldapuser)() on server ldap://localhost:29102. Bad search filter 2025-06-25T14:36:40.337242Z node 6 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1809: Ticket eyJh****53Fw (9A2B6D6A) () has now permanent error message 'Could not login via LDAP (Could not perform search for filter &(uid=ldapuser)() on server ldap://localhost:29102. Bad search filter)' |81.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_read_only_vdisk/unittest >> LdapAuthProviderTest_LdapsScheme::LdapRefreshGroupsInfoGood [GOOD] >> LdapAuthProviderTest_LdapsScheme::LdapRefreshGroupsInfoDisableNestedGroupsGood ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::SortingsByPK+RemoveLimitOperator [GOOD] Test command err: Trying to start YDB, gRPC: 14691, MsgBus: 20238 2025-06-25T14:35:52.132348Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519895682725248910:2131];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000ebd/r3tmp/tmp6liyUo/pdisk_1.dat 2025-06-25T14:35:52.418144Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:35:52.581954Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:35:52.582058Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:35:52.584843Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:35:52.622121Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:35:52.624497Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519895682725248817:2080] 1750862152008554 != 1750862152008557 TServer::EnableGrpc on GrpcPort 14691, node 1 2025-06-25T14:35:52.812994Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:35:52.813012Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:35:52.813018Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:35:52.813163Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:20238 2025-06-25T14:35:53.136445Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:20238 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:35:53.488796Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:35:53.508967Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:35:55.384536Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519895695610151344:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:35:55.384679Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:35:55.385222Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519895695610151356:2295], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:35:55.388998Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:35:55.401974Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519895695610151358:2296], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:35:55.474738Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519895695610151411:2335] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:35:55.827734Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:55.958221Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:56.010888Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:56.050302Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:56.101024Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:56.329177Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:56.420967Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:56.452506Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:56.481069Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:56.520159Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:56.551193Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:56.574907Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:56.608940Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:57.057583Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519895682725248910:2131];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:35:57.057638Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:35:57.481200Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, subope ... line=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:36:37.154382Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038643;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:37.155723Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038429;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:37.156483Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038477;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:36:37.165045Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038473;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:36:37.174679Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038477;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:37.175325Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038555;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:36:37.182864Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038473;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:37.183481Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038624;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:36:37.188703Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038555;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:37.188810Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038624;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:37.189299Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038511;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:36:37.189373Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038523;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:36:37.199502Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038523;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:37.201072Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038561;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:36:37.206657Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038561;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:37.207404Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038553;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:36:37.208085Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038511;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:37.208802Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038505;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:36:37.214796Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038553;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:37.215499Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038449;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:36:37.217821Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038505;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:37.218497Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038635;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:36:37.221377Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038449;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:37.222081Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038547;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:36:37.227835Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038635;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:37.229416Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038547;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:37.230034Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038655;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:36:37.232748Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038645;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:36:37.239859Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038655;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:37.242369Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038645;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:37.242886Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038493;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:36:37.245611Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038469;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:36:37.257661Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038493;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:37.258324Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038487;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:36:37.261424Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038469;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:37.261994Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038597;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:36:37.264612Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038487;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:37.265354Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038463;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:36:37.271557Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038463;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:37.272225Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038654;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:36:37.272394Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038597;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:37.278623Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038654;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:37.472619Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jykr8r8z0q4y5grvcadk0pqr", SessionId: ydb://session/3?node_id=1&id=ODQ1NGY0NjctYWUxMGU0YjEtYjM0YzFlOTItN2VmNWZhNg==, Slow query, duration: 37.312903s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:36:37.841112Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:36:37.841574Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:36:37.842493Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; >> DataShardReadTableSnapshots::ReadTableUUID [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-DropUser-49 [FAIL] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-DropUser-50 >> DataShardReadTableSnapshots::ReadTableSplitFinished [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_read_table/unittest >> DataShardReadTableSnapshots::CorruptedDyNumber [GOOD] Test command err: 2025-06-25T14:36:35.730501Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:36:35.730713Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:36:35.730785Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001aaa/r3tmp/tmpgo6kpg/pdisk_1.dat 2025-06-25T14:36:36.095713Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T14:36:36.098500Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:36:36.159695Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:36:36.166015Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750862192433041 != 1750862192433045 2025-06-25T14:36:36.212024Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:61:2108] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-25T14:36:36.213180Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 2025-06-25T14:36:36.213773Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:36:36.213920Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:36:36.225568Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:36:36.322513Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:61:2108] Handle TEvProposeTransaction 2025-06-25T14:36:36.322594Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:61:2108] TxId# 281474976715657 ProcessProposeTransaction 2025-06-25T14:36:36.322763Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:61:2108] Cookie# 0 userReqId# "" txid# 281474976715657 SEND to# [1:602:2510] 2025-06-25T14:36:36.468192Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:602:2510] txid# 281474976715657 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateTable CreateTable { Name: "table-1" Columns { Name: "key" Type: "Uint32" FamilyName: "" NotNull: false } Columns { Name: "value" Type: "Uint32" FamilyName: "" NotNull: false } KeyColumnNames: "key" UniformPartitionsCount: 1 } } } ExecTimeoutPeriod: 18446744073709551615 2025-06-25T14:36:36.468509Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:602:2510] txid# 281474976715657 Bootstrap, UserSID: CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-25T14:36:36.469130Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1660: Actor# [1:602:2510] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-06-25T14:36:36.469240Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:602:2510] txid# 281474976715657 TEvNavigateKeySet requested from SchemeCache 2025-06-25T14:36:36.469581Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:602:2510] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-25T14:36:36.469813Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:602:2510] HANDLE EvNavigateKeySetResult, txid# 281474976715657 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-25T14:36:36.469987Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:602:2510] txid# 281474976715657 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715657 TabletId# 72057594046644480} 2025-06-25T14:36:36.470308Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:602:2510] txid# 281474976715657 HANDLE EvClientConnected 2025-06-25T14:36:36.471880Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:36:36.473072Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [1:602:2510] txid# 281474976715657 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715657} 2025-06-25T14:36:36.473144Z node 1 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [1:602:2510] txid# 281474976715657 SEND to# [1:554:2480] Source {TEvProposeTransactionStatus txid# 281474976715657 Status# 53} 2025-06-25T14:36:36.514293Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvBoot 2025-06-25T14:36:36.515471Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvRestored 2025-06-25T14:36:36.516055Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:627:2531] 2025-06-25T14:36:36.516366Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T14:36:36.563429Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-25T14:36:36.564198Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T14:36:36.564937Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T14:36:36.566884Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-25T14:36:36.566969Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-25T14:36:36.567031Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-25T14:36:36.567412Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T14:36:36.567564Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T14:36:36.567739Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:643:2531] in generation 1 2025-06-25T14:36:36.579821Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T14:36:36.605331Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-25T14:36:36.605583Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T14:36:36.605735Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:645:2541] 2025-06-25T14:36:36.605781Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T14:36:36.605823Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-25T14:36:36.605864Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:36:36.606131Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:627:2531], Recipient [1:627:2531]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T14:36:36.606197Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T14:36:36.606606Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-25T14:36:36.606718Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-25T14:36:36.606792Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:36:36.606853Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:36:36.606913Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-25T14:36:36.606972Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-25T14:36:36.607020Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-25T14:36:36.607061Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-25T14:36:36.607120Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:36:36.607591Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:634:2535], Recipient [1:627:2531]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:36:36.607633Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T14:36:36.607683Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:623:2528], serverId# [1:634:2535], sessionId# [0:0:0] 2025-06-25T14:36:36.607794Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:373:2367], Recipient [1:634:2535] 2025-06-25T14:36:36.607842Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-25T14:36:36.607961Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T14:36:36.608223Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-25T14:36:36.608284Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-25T14:36:36.609026Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-25T14:36:36.609113Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-25T14: ... ng event TEvTxProcessing::TEvStreamClearancePending 2025-06-25T14:36:44.972693Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287940, Sender [2:702:2583], Recipient [2:627:2531]: NKikimrTx.TEvStreamClearanceResponse TxId: 281474976715659 Cleared: true 2025-06-25T14:36:44.972730Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, processing event TEvTxProcessing::TEvStreamClearanceResponse 2025-06-25T14:36:44.972851Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [2:627:2531], Recipient [2:627:2531]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T14:36:44.972869Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T14:36:44.972897Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:36:44.972922Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 1 active planned 0 immediate 1 planned 0 2025-06-25T14:36:44.972952Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715659] at 72075186224037888 for WaitForStreamClearance 2025-06-25T14:36:44.972976Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715659] at 72075186224037888 on unit WaitForStreamClearance 2025-06-25T14:36:44.973004Z node 2 :TX_DATASHARD TRACE: wait_for_stream_clearance_unit.cpp:156: Got stream clearance for [0:281474976715659] at 72075186224037888 2025-06-25T14:36:44.973027Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715659] at 72075186224037888 is Executed 2025-06-25T14:36:44.973046Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715659] at 72075186224037888 executing on unit WaitForStreamClearance 2025-06-25T14:36:44.973064Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715659] at 72075186224037888 to execution unit ReadTableScan 2025-06-25T14:36:44.973081Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715659] at 72075186224037888 on unit ReadTableScan 2025-06-25T14:36:44.973215Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715659] at 72075186224037888 is Continue 2025-06-25T14:36:44.973231Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 0 immediate 1 planned 0 2025-06-25T14:36:44.973268Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 72075186224037888 2025-06-25T14:36:44.973289Z node 2 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-25T14:36:44.973308Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-25T14:36:44.973345Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:36:44.973615Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435082, Sender [2:732:2600], Recipient [2:627:2531]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvRegisterScanActor 2025-06-25T14:36:44.973643Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3162: StateWork, processing event TEvPrivate::TEvRegisterScanActor 2025-06-25T14:36:44.973703Z node 2 :TX_PROXY TRACE: read_table_impl.cpp:1232: StateReadTable, received event# 269287428, Sender [2:732:2600], Recipient [2:702:2583]: NKikimrTx.TEvStreamQuotaRequest TxId: 281474976715659 ShardId: 72075186224037888 2025-06-25T14:36:44.973728Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2069: [ReadTable [2:702:2583] TxId# 281474976715658] Received TEvStreamQuotaRequest from ShardId# 72075186224037888 2025-06-25T14:36:44.973948Z node 2 :TX_PROXY TRACE: read_table_impl.cpp:1232: StateReadTable, received event# 269287941, Sender [2:701:2583], Recipient [2:702:2583]: NKikimrTx.TEvStreamQuotaResponse TxId: 281474976715658 MessageSizeLimit: 1 ReservedMessages: 1 2025-06-25T14:36:44.973992Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2097: [ReadTable [2:702:2583] TxId# 281474976715658] Updated quotas, allocated = 1, message size = 1, message rows = 0, available = 1 2025-06-25T14:36:44.974025Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2160: [ReadTable [2:702:2583] TxId# 281474976715658] Reserving quota 1 messages for ShardId# 72075186224037888 2025-06-25T14:36:44.974072Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:514: Got quota for read table scan ShardId: 72075186224037888, TxId: 281474976715659, MessageQuota: 1 2025-06-25T14:36:44.974154Z node 2 :TX_DATASHARD ERROR: read_table_scan.cpp:681: Got scan fatal error: Invalid DyNumber binary representation 2025-06-25T14:36:44.974186Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:718: Finish scan ShardId: 72075186224037888, TxId: 281474976715659, MessageQuota: 1 2025-06-25T14:36:44.974303Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037888 2025-06-25T14:36:44.974329Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:4477: Found op: cookie: 281474976715659, at: 72075186224037888 2025-06-25T14:36:44.974422Z node 2 :TX_PROXY TRACE: read_table_impl.cpp:1232: StateReadTable, received event# 269287429, Sender [2:732:2600], Recipient [2:702:2583]: NKikimrTx.TEvStreamQuotaRelease TxId: 281474976715659 ShardId: 72075186224037888 2025-06-25T14:36:44.974447Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2120: [ReadTable [2:702:2583] TxId# 281474976715658] Received TEvStreamQuotaRelease from ShardId# 72075186224037888 2025-06-25T14:36:44.974475Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2188: [ReadTable [2:702:2583] TxId# 281474976715658] Released quota 1 reserved messages from ShardId# 72075186224037888 2025-06-25T14:36:44.974572Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [2:627:2531], Recipient [2:627:2531]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T14:36:44.974606Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T14:36:44.974648Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:36:44.974680Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 1 active planned 0 immediate 1 planned 0 2025-06-25T14:36:44.974749Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715659] at 72075186224037888 for ReadTableScan 2025-06-25T14:36:44.974774Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715659] at 72075186224037888 on unit ReadTableScan 2025-06-25T14:36:44.974806Z node 2 :TX_DATASHARD TRACE: read_table_scan_unit.cpp:158: ReadTable scan complete for [0:281474976715659] at 72075186224037888 error: Invalid DyNumber binary representation, IsFatalError: 1 2025-06-25T14:36:44.974847Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715659] at 72075186224037888 is Executed 2025-06-25T14:36:44.974879Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715659] at 72075186224037888 executing on unit ReadTableScan 2025-06-25T14:36:44.974904Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715659] at 72075186224037888 to execution unit FinishPropose 2025-06-25T14:36:44.974928Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715659] at 72075186224037888 on unit FinishPropose 2025-06-25T14:36:44.974948Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715659] at 72075186224037888 is DelayComplete 2025-06-25T14:36:44.974969Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715659] at 72075186224037888 executing on unit FinishPropose 2025-06-25T14:36:44.974995Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715659] at 72075186224037888 to execution unit CompletedOperations 2025-06-25T14:36:44.975022Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715659] at 72075186224037888 on unit CompletedOperations 2025-06-25T14:36:44.975056Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715659] at 72075186224037888 is Executed 2025-06-25T14:36:44.975073Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715659] at 72075186224037888 executing on unit CompletedOperations 2025-06-25T14:36:44.975091Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:281474976715659] at 72075186224037888 has finished 2025-06-25T14:36:44.975117Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:36:44.975141Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 72075186224037888 2025-06-25T14:36:44.975173Z node 2 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-25T14:36:44.975198Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-25T14:36:44.975241Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:36:44.975268Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715659] at 72075186224037888 on unit FinishPropose 2025-06-25T14:36:44.975294Z node 2 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715659 at tablet 72075186224037888 send to client, exec latency: 0 ms, propose latency: 0 ms, status: EXEC_ERROR 2025-06-25T14:36:44.975327Z node 2 :TX_DATASHARD ERROR: finish_propose_unit.cpp:174: Errors while proposing transaction txid 281474976715659 at tablet 72075186224037888 status: EXEC_ERROR errors: PROGRAM_ERROR (Invalid DyNumber binary representation) | 2025-06-25T14:36:44.975378Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:36:44.975564Z node 2 :TX_PROXY TRACE: read_table_impl.cpp:1232: StateReadTable, received event# 269550080, Sender [2:627:2531], Recipient [2:702:2583]: NKikimrTxDataShard.TEvProposeTransactionResult TxKind: TX_KIND_SCAN Origin: 72075186224037888 Status: EXEC_ERROR Error { Kind: PROGRAM_ERROR Reason: "Invalid DyNumber binary representation" } TxId: 281474976715659 Step: 0 OrderId: 281474976715659 ExecLatency: 0 ProposeLatency: 0 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186224037888 CpuTimeUsec: 302 } } CommitVersion { Step: 0 TxId: 281474976715659 } 2025-06-25T14:36:44.975588Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:1921: [ReadTable [2:702:2583] TxId# 281474976715658] Received TEvProposeTransactionResult Status# EXEC_ERROR ShardId# 72075186224037888 2025-06-25T14:36:44.975639Z node 2 :TX_PROXY ERROR: read_table_impl.cpp:2919: [ReadTable [2:702:2583] TxId# 281474976715658] RESPONSE Status# ExecError shard: 72075186224037888 table: /Root/Table 2025-06-25T14:36:44.975874Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553190, Sender [2:702:2583], Recipient [2:627:2531]: NKikimrTxDataShard.TEvDiscardVolatileSnapshotRequest OwnerId: 72057594046644480 PathId: 2 Step: 1500 TxId: 281474976715658 |81.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_read_table/unittest >> DataShardReadTableSnapshots::ReadTableUUID [GOOD] Test command err: 2025-06-25T14:36:36.178774Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:36:36.178917Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:36:36.178965Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001acb/r3tmp/tmpTUirfQ/pdisk_1.dat 2025-06-25T14:36:36.551727Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T14:36:36.554861Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:36:36.618949Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:36:36.630270Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750862192518543 != 1750862192518547 2025-06-25T14:36:36.684692Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:61:2108] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-25T14:36:36.685846Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 2025-06-25T14:36:36.686413Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:36:36.686528Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:36:36.699654Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:36:36.788719Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:61:2108] Handle TEvProposeTransaction 2025-06-25T14:36:36.788791Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:61:2108] TxId# 281474976715657 ProcessProposeTransaction 2025-06-25T14:36:36.788968Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:61:2108] Cookie# 0 userReqId# "" txid# 281474976715657 SEND to# [1:602:2510] 2025-06-25T14:36:36.966340Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:602:2510] txid# 281474976715657 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateTable CreateTable { Name: "table-1" Columns { Name: "key" Type: "Uint32" FamilyName: "" NotNull: false } Columns { Name: "value" Type: "Uint32" FamilyName: "" NotNull: false } KeyColumnNames: "key" UniformPartitionsCount: 1 } } } ExecTimeoutPeriod: 18446744073709551615 2025-06-25T14:36:36.966471Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:602:2510] txid# 281474976715657 Bootstrap, UserSID: CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-25T14:36:36.967104Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1660: Actor# [1:602:2510] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-06-25T14:36:36.967200Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:602:2510] txid# 281474976715657 TEvNavigateKeySet requested from SchemeCache 2025-06-25T14:36:36.967529Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:602:2510] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-25T14:36:36.967804Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:602:2510] HANDLE EvNavigateKeySetResult, txid# 281474976715657 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-25T14:36:36.967998Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:602:2510] txid# 281474976715657 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715657 TabletId# 72057594046644480} 2025-06-25T14:36:36.968348Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:602:2510] txid# 281474976715657 HANDLE EvClientConnected 2025-06-25T14:36:36.969898Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:36:36.971061Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [1:602:2510] txid# 281474976715657 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715657} 2025-06-25T14:36:36.971143Z node 1 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [1:602:2510] txid# 281474976715657 SEND to# [1:554:2480] Source {TEvProposeTransactionStatus txid# 281474976715657 Status# 53} 2025-06-25T14:36:37.014293Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvBoot 2025-06-25T14:36:37.015510Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvRestored 2025-06-25T14:36:37.016071Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:627:2531] 2025-06-25T14:36:37.016444Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T14:36:37.064019Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-25T14:36:37.064833Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T14:36:37.064979Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T14:36:37.066912Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-25T14:36:37.067003Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-25T14:36:37.067067Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-25T14:36:37.067484Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T14:36:37.067659Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T14:36:37.067813Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:643:2531] in generation 1 2025-06-25T14:36:37.081006Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T14:36:37.141497Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-25T14:36:37.141738Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T14:36:37.141864Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:645:2541] 2025-06-25T14:36:37.141907Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T14:36:37.141947Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-25T14:36:37.141987Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:36:37.142255Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:627:2531], Recipient [1:627:2531]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T14:36:37.142314Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T14:36:37.142694Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-25T14:36:37.142787Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-25T14:36:37.142860Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:36:37.142915Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:36:37.142964Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-25T14:36:37.143023Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-25T14:36:37.143059Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-25T14:36:37.143096Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-25T14:36:37.143169Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:36:37.143594Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:634:2535], Recipient [1:627:2531]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:36:37.143633Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T14:36:37.143679Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:623:2528], serverId# [1:634:2535], sessionId# [0:0:0] 2025-06-25T14:36:37.143771Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:373:2367], Recipient [1:634:2535] 2025-06-25T14:36:37.143845Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-25T14:36:37.143960Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T14:36:37.144172Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-25T14:36:37.144223Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-25T14:36:37.144303Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-25T14:36:37.144397Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-25T14: ... 86224037888 to execution unit ReadTableScan 2025-06-25T14:36:45.500118Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715662] at 72075186224037888 on unit ReadTableScan 2025-06-25T14:36:45.510710Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715662] at 72075186224037888 is Continue 2025-06-25T14:36:45.510784Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 0 immediate 1 planned 0 2025-06-25T14:36:45.510820Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 72075186224037888 2025-06-25T14:36:45.510852Z node 2 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-25T14:36:45.510882Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-25T14:36:45.510975Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:36:45.511535Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435082, Sender [2:829:2657], Recipient [2:627:2531]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvRegisterScanActor 2025-06-25T14:36:45.511584Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3162: StateWork, processing event TEvPrivate::TEvRegisterScanActor 2025-06-25T14:36:45.511652Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2069: [ReadTable [2:817:2646] TxId# 281474976715661] Received TEvStreamQuotaRequest from ShardId# 72075186224037888 2025-06-25T14:36:45.512002Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2097: [ReadTable [2:817:2646] TxId# 281474976715661] Updated quotas, allocated = 1, message size = 1, message rows = 0, available = 1 2025-06-25T14:36:45.512074Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2160: [ReadTable [2:817:2646] TxId# 281474976715661] Reserving quota 1 messages for ShardId# 72075186224037888 2025-06-25T14:36:45.512145Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:514: Got quota for read table scan ShardId: 72075186224037888, TxId: 281474976715662, MessageQuota: 1 2025-06-25T14:36:45.512356Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:662: Send response data ShardId: 72075186224037888, TxId: 281474976715662, Size: 38, Rows: 0, PendingAcks: 1, MessageQuota: 0 2025-06-25T14:36:45.512460Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:1700: [ReadTable [2:817:2646] TxId# 281474976715661] Received stream data from ShardId# 72075186224037888 2025-06-25T14:36:45.512543Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:483: Got stream data ack ShardId: 72075186224037888, TxId: 281474976715662, PendingAcks: 0 2025-06-25T14:36:45.512640Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2069: [ReadTable [2:817:2646] TxId# 281474976715661] Received TEvStreamQuotaRequest from ShardId# 72075186224037888 2025-06-25T14:36:45.513041Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2097: [ReadTable [2:817:2646] TxId# 281474976715661] Updated quotas, allocated = 1, message size = 1, message rows = 0, available = 1 2025-06-25T14:36:45.513073Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2160: [ReadTable [2:817:2646] TxId# 281474976715661] Reserving quota 1 messages for ShardId# 72075186224037888 2025-06-25T14:36:45.513115Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:514: Got quota for read table scan ShardId: 72075186224037888, TxId: 281474976715662, MessageQuota: 1 2025-06-25T14:36:45.513184Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:662: Send response data ShardId: 72075186224037888, TxId: 281474976715662, Size: 38, Rows: 0, PendingAcks: 1, MessageQuota: 0 2025-06-25T14:36:45.513236Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:1700: [ReadTable [2:817:2646] TxId# 281474976715661] Received stream data from ShardId# 72075186224037888 2025-06-25T14:36:45.513279Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:483: Got stream data ack ShardId: 72075186224037888, TxId: 281474976715662, PendingAcks: 0 2025-06-25T14:36:45.513309Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2069: [ReadTable [2:817:2646] TxId# 281474976715661] Received TEvStreamQuotaRequest from ShardId# 72075186224037888 2025-06-25T14:36:45.513538Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2097: [ReadTable [2:817:2646] TxId# 281474976715661] Updated quotas, allocated = 1, message size = 1, message rows = 0, available = 1 2025-06-25T14:36:45.513581Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2160: [ReadTable [2:817:2646] TxId# 281474976715661] Reserving quota 1 messages for ShardId# 72075186224037888 2025-06-25T14:36:45.513633Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:514: Got quota for read table scan ShardId: 72075186224037888, TxId: 281474976715662, MessageQuota: 1 2025-06-25T14:36:45.513685Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:662: Send response data ShardId: 72075186224037888, TxId: 281474976715662, Size: 38, Rows: 0, PendingAcks: 1, MessageQuota: 0 2025-06-25T14:36:45.513739Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:1700: [ReadTable [2:817:2646] TxId# 281474976715661] Received stream data from ShardId# 72075186224037888 2025-06-25T14:36:45.513780Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:483: Got stream data ack ShardId: 72075186224037888, TxId: 281474976715662, PendingAcks: 0 2025-06-25T14:36:45.513808Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2069: [ReadTable [2:817:2646] TxId# 281474976715661] Received TEvStreamQuotaRequest from ShardId# 72075186224037888 2025-06-25T14:36:45.514009Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2097: [ReadTable [2:817:2646] TxId# 281474976715661] Updated quotas, allocated = 1, message size = 1, message rows = 0, available = 1 2025-06-25T14:36:45.514034Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2160: [ReadTable [2:817:2646] TxId# 281474976715661] Reserving quota 1 messages for ShardId# 72075186224037888 2025-06-25T14:36:45.514068Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:514: Got quota for read table scan ShardId: 72075186224037888, TxId: 281474976715662, MessageQuota: 1 2025-06-25T14:36:45.514118Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:718: Finish scan ShardId: 72075186224037888, TxId: 281474976715662, MessageQuota: 1 2025-06-25T14:36:45.514289Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037888 2025-06-25T14:36:45.514341Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:4477: Found op: cookie: 281474976715662, at: 72075186224037888 2025-06-25T14:36:45.514419Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2120: [ReadTable [2:817:2646] TxId# 281474976715661] Received TEvStreamQuotaRelease from ShardId# 72075186224037888 2025-06-25T14:36:45.514462Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2188: [ReadTable [2:817:2646] TxId# 281474976715661] Released quota 1 reserved messages from ShardId# 72075186224037888 2025-06-25T14:36:45.514645Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [2:627:2531], Recipient [2:627:2531]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T14:36:45.514707Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T14:36:45.514786Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:36:45.514850Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 1 active planned 0 immediate 1 planned 0 2025-06-25T14:36:45.516332Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715662] at 72075186224037888 for ReadTableScan 2025-06-25T14:36:45.516407Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715662] at 72075186224037888 on unit ReadTableScan 2025-06-25T14:36:45.516480Z node 2 :TX_DATASHARD TRACE: read_table_scan_unit.cpp:158: ReadTable scan complete for [0:281474976715662] at 72075186224037888 error: , IsFatalError: 0 2025-06-25T14:36:45.516536Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715662] at 72075186224037888 is Executed 2025-06-25T14:36:45.516583Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715662] at 72075186224037888 executing on unit ReadTableScan 2025-06-25T14:36:45.516622Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715662] at 72075186224037888 to execution unit FinishPropose 2025-06-25T14:36:45.516660Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715662] at 72075186224037888 on unit FinishPropose 2025-06-25T14:36:45.516718Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715662] at 72075186224037888 is DelayComplete 2025-06-25T14:36:45.516756Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715662] at 72075186224037888 executing on unit FinishPropose 2025-06-25T14:36:45.516796Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715662] at 72075186224037888 to execution unit CompletedOperations 2025-06-25T14:36:45.516830Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715662] at 72075186224037888 on unit CompletedOperations 2025-06-25T14:36:45.516882Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715662] at 72075186224037888 is Executed 2025-06-25T14:36:45.516907Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715662] at 72075186224037888 executing on unit CompletedOperations 2025-06-25T14:36:45.516934Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:281474976715662] at 72075186224037888 has finished 2025-06-25T14:36:45.516978Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:36:45.517019Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 72075186224037888 2025-06-25T14:36:45.517056Z node 2 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-25T14:36:45.517096Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-25T14:36:45.517182Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:36:45.517220Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715662] at 72075186224037888 on unit FinishPropose 2025-06-25T14:36:45.517261Z node 2 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715662 at tablet 72075186224037888 send to client, exec latency: 0 ms, propose latency: 0 ms, status: COMPLETE 2025-06-25T14:36:45.517347Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:36:45.517537Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:1850: [ReadTable [2:817:2646] TxId# 281474976715661] Received stream complete from ShardId# 72075186224037888 2025-06-25T14:36:45.517612Z node 2 :TX_PROXY INFO: read_table_impl.cpp:2933: [ReadTable [2:817:2646] TxId# 281474976715661] RESPONSE Status# ExecComplete prepare time: 0.013883s execute time: 0.125903s total time: 0.139786s 2025-06-25T14:36:45.518008Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553190, Sender [2:817:2646], Recipient [2:627:2531]: NKikimrTxDataShard.TEvDiscardVolatileSnapshotRequest OwnerId: 72057594046644480 PathId: 2 Step: 2000 TxId: 281474976715661 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_incremental_backup/unittest >> IncrementalBackup::E2EBackupCollection [FAIL] Test command err: 2025-06-25T14:36:15.314593Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:36:15.314753Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:36:15.314826Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00099f/r3tmp/tmpjvq85u/pdisk_1.dat 2025-06-25T14:36:15.680958Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T14:36:15.682135Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877761, Sender [1:557:2482], Recipient [1:373:2367]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:36:15.682205Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5052: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T14:36:15.682244Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5837: Pipe server connected, at tablet: 72057594046644480 2025-06-25T14:36:15.682383Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271122432, Sender [1:554:2480], Recipient [1:373:2367]: {TEvModifySchemeTransaction txid# 1 TabletId# 72057594046644480} 2025-06-25T14:36:15.682434Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4966: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-06-25T14:36:15.955756Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "Root" StoragePools { Name: "/Root:test" Kind: "test" } } } TxId: 1 TabletId: 72057594046644480 , at schemeshard: 72057594046644480 2025-06-25T14:36:15.955996Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //Root, opId: 1:0, at schemeshard: 72057594046644480 2025-06-25T14:36:15.956207Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 0 2025-06-25T14:36:15.956264Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046644480, LocalPathId: 1] source path: 2025-06-25T14:36:15.956519Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-25T14:36:15.956611Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:36:15.956713Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-25T14:36:15.957446Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046644480 PathId: 1, at schemeshard: 72057594046644480 2025-06-25T14:36:15.957648Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //Root 2025-06-25T14:36:15.957696Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:36:15.957728Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:276: Activate send for 1:0 2025-06-25T14:36:15.957912Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 2146435072, Sender [1:373:2367], Recipient [1:373:2367]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-25T14:36:15.957948Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4972: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-25T14:36:15.958019Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046644480 2025-06-25T14:36:15.958068Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046644480 2025-06-25T14:36:15.958147Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:36:15.958183Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:36:15.958290Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-25T14:36:15.958703Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:36:15.958784Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:276: Activate send for 1:0 2025-06-25T14:36:15.958905Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 2146435072, Sender [1:373:2367], Recipient [1:373:2367]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-25T14:36:15.958935Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4972: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-25T14:36:15.958982Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046644480 2025-06-25T14:36:15.959015Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046644480 2025-06-25T14:36:15.959050Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:36:15.959115Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-25T14:36:15.959447Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:36:15.959475Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:276: Activate send for 1:0 2025-06-25T14:36:15.959566Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 2146435072, Sender [1:373:2367], Recipient [1:373:2367]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-25T14:36:15.959590Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4972: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-25T14:36:15.959625Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046644480 2025-06-25T14:36:15.959663Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046644480 2025-06-25T14:36:15.959709Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046644480 2025-06-25T14:36:15.959736Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-25T14:36:15.959767Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:36:15.978641Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046644480 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:36:15.979306Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:36:15.979373Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046644480 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:36:15.979550Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 2025-06-25T14:36:15.990853Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877760, Sender [1:562:2487], Recipient [1:373:2367]: NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594046316545 Status: OK ServerId: [1:564:2488] Leader: 1 Dead: 0 Generation: 2 VersionInfo: } 2025-06-25T14:36:15.990942Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5050: StateWork, processing event TEvTabletPipe::TEvClientConnected 2025-06-25T14:36:15.991000Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5787: Handle TEvClientConnected, tabletId: 72057594046316545, status: OK, at schemeshard: 72057594046644480 2025-06-25T14:36:15.991178Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269091328, Sender [1:369:2363], Recipient [1:373:2367]: NKikimrTx.TEvProposeTransactionStatus Status: 16 StepId: 500 TxId: 1 2025-06-25T14:36:15.991638Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877761, Sender [1:566:2490], Recipient [1:373:2367]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:36:15.991688Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5052: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T14:36:15.991722Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5837: Pipe server connected, at tablet: 72057594046644480 2025-06-25T14:36:15.991902Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124996, Sender [1:554:2480], Recipient [1:373:2367]: NKikimrScheme.TEvNotifyTxCompletion TxId: 1 2025-06-25T14:36:15.991944Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvSchemeShard::TEvNotifyTxCompletion 2025-06-25T14:36:15.992017Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 1, at schemeshard: 72057594046644480 2025-06-25T14:36:15.992053Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 1, ready parts: 0/1, is published: true 2025-06-25T14:36:15.992087Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 1, at schemeshard: 72057594046644480 2025-06-25T14:36:16.046943Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 273285138, Sender [1:45:2092], Recipient [1:373:2367]: ... EvPrivate::TEvCompleteBarrier { TxId: 281474976715668 Name: CopyTableBarrier }, at tablet# 72057594046644480 2025-06-25T14:36:39.985520Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 281474976715668:2 1 -> 240 2025-06-25T14:36:39.986145Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:36:39.986184Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:276: Activate send for 281474976715668:0 2025-06-25T14:36:39.986214Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:276: Activate send for 281474976715668:2 2025-06-25T14:36:39.986330Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 2146435072, Sender [1:373:2367], Recipient [1:373:2367]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-25T14:36:39.986361Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4972: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-25T14:36:39.986436Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-25T14:36:39.986489Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046644480] TDone opId# 281474976715668:0 ProgressState 2025-06-25T14:36:39.986599Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-25T14:36:39.986635Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976715668:0 progress is 2/3 2025-06-25T14:36:39.986662Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976715668 ready parts: 2/3 2025-06-25T14:36:39.986693Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976715668:0 progress is 2/3 2025-06-25T14:36:39.986719Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976715668 ready parts: 2/3 2025-06-25T14:36:39.986750Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 281474976715668, ready parts: 2/3, is published: true 2025-06-25T14:36:39.986992Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 2146435072, Sender [1:373:2367], Recipient [1:373:2367]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-25T14:36:39.987023Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4972: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-25T14:36:39.987068Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 281474976715668:2, at schemeshard: 72057594046644480 2025-06-25T14:36:39.987097Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046644480] TDone opId# 281474976715668:2 ProgressState 2025-06-25T14:36:39.987147Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-25T14:36:39.987177Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976715668:2 progress is 3/3 2025-06-25T14:36:39.987224Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976715668 ready parts: 3/3 2025-06-25T14:36:39.987256Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976715668:2 progress is 3/3 2025-06-25T14:36:39.987285Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976715668 ready parts: 3/3 2025-06-25T14:36:39.987317Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 281474976715668, ready parts: 3/3, is published: true 2025-06-25T14:36:39.987402Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1656: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:1382:3050] message: TxId: 281474976715668 2025-06-25T14:36:39.987466Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976715668 ready parts: 3/3 2025-06-25T14:36:39.987522Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976715668:0 2025-06-25T14:36:39.987561Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 281474976715668:0 2025-06-25T14:36:39.987708Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 16] was 3 2025-06-25T14:36:39.987790Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate source path for pathId [OwnerId: 72057594046644480, LocalPathId: 12] was 3 2025-06-25T14:36:39.987833Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976715668:1 2025-06-25T14:36:39.987854Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 281474976715668:1 2025-06-25T14:36:39.987888Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 15] was 2 2025-06-25T14:36:39.987910Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976715668:2 2025-06-25T14:36:39.987929Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 281474976715668:2 2025-06-25T14:36:39.987966Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 9] was 3 2025-06-25T14:36:39.992580Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:36:39.992854Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:36:39.992970Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:632: Send to actor: [1:1382:3050] msg type: 271124998 msg: NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976715668 at schemeshard: 72057594046644480 2025-06-25T14:36:39.993336Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877764, Sender [1:1389:3056], Recipient [1:373:2367]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-25T14:36:39.993375Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5053: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-25T14:36:39.993403Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5885: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-25T14:36:40.023119Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877764, Sender [1:1461:3110], Recipient [1:373:2367]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-25T14:36:40.023193Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5053: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-25T14:36:40.023230Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5885: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-25T14:36:40.023390Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877764, Sender [1:1465:3114], Recipient [1:373:2367]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-25T14:36:40.023414Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5053: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-25T14:36:40.023441Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5885: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-25T14:36:40.184978Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:373:2367]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:36:40.185068Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:36:40.185201Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [1:373:2367], Recipient [1:373:2367]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:36:40.185237Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:36:40.445696Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037893, clientId# [1:1497:3142], serverId# [1:1498:3143], sessionId# [0:0:0] 2025-06-25T14:36:40.445884Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715669. Ctx: { TraceId: 01jykr9zfx5j31025930a15esf, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTliNTBiYjYtOTAyMzAxNDUtNGZmNWVkODgtMTBiNWQ4NmM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root { items { uint32_value: 1 } items { uint32_value: 10 } }, { items { uint32_value: 2 } items { uint32_value: 20 } }, { items { uint32_value: 3 } items { uint32_value: 30 } } assertion failed at ydb/core/tx/datashard/datashard_ut_incremental_backup.cpp:925, virtual void NKikimr::NTestSuiteIncrementalBackup::TTestCaseE2EBackupCollection::Execute_(NUnitTest::TTestContext &): (expected == actual) failed: ("{ items { uint32_value: 2 } items { uint32_value: 200 } }, { items { uint32_value: 3 } items { uint32_value: 30 } }" != "{ items { uint32_value: 1 } items { uint32_value: 10 } }, { items { uint32_value: 2 } items { uint32_value: 20 } }, { items { uint32_value: 3 } items { uint32_value: 30 } }") , with diff: "{ items { uint32_value: (|1 } items { uint3)2(|_value:) (|10 )} (|}, { )items { uint32_value: 2(0| } items { uint32_value: 2)0 } }, { items { uint32_value: 3 } items { uint32_value: 30 } }" TBackTrace::Capture()+28 (0x19D0DECC) NUnitTest::NPrivate::RaiseError(char const*, TBasicString> const&, bool)+592 (0x1A1CCF90) NKikimr::NTestSuiteIncrementalBackup::TTestCaseE2EBackupCollection::Execute_(NUnitTest::TTestContext&)+10537 (0x19918669) std::__y1::__function::__func, void ()>::operator()()+280 (0x199216C8) TColoredProcessor::Run(std::__y1::function, TBasicString> const&, char const*, bool)+534 (0x1A204176) NUnitTest::TTestBase::Run(std::__y1::function, TBasicString> const&, char const*, bool)+505 (0x1A1D3B19) NKikimr::NTestSuiteIncrementalBackup::TCurrentTest::Execute()+1204 (0x19920574) NUnitTest::TTestFactory::Execute()+2438 (0x1A1D53E6) NUnitTest::RunMain(int, char**)+5213 (0x1A1FE6ED) ??+0 (0x7F19C251CD90) __libc_start_main+128 (0x7F19C251CE40) _start+41 (0x16F11029) >> TConsoleTests::TestAttributes [GOOD] >> TConsoleTests::TestAlterTenantTooManyStorageResourcesForRunning >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-53 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-54 >> TConsoleTests::TestRemoveSharedTenantAfterRemoveServerlessTenant [GOOD] >> TConsoleTests::TestRemoveServerlessTenant >> TSchemeShardAuditSettings::AlterExtSubdomain-ExternalSchemeShard-false ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_read_table/unittest >> DataShardReadTableSnapshots::ReadTableSplitFinished [GOOD] Test command err: 2025-06-25T14:36:36.149737Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:36:36.149899Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:36:36.149950Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001aec/r3tmp/tmpXXJQ2c/pdisk_1.dat 2025-06-25T14:36:36.515361Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T14:36:36.518596Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:36:36.581110Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:36:36.589068Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750862192400534 != 1750862192400538 2025-06-25T14:36:36.641347Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:61:2108] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-25T14:36:36.642338Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 2025-06-25T14:36:36.642854Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:36:36.642974Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:36:36.657516Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:36:36.744989Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:61:2108] Handle TEvProposeTransaction 2025-06-25T14:36:36.745058Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:61:2108] TxId# 281474976715657 ProcessProposeTransaction 2025-06-25T14:36:36.745189Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:61:2108] Cookie# 0 userReqId# "" txid# 281474976715657 SEND to# [1:602:2510] 2025-06-25T14:36:36.919206Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:602:2510] txid# 281474976715657 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateTable CreateTable { Name: "table-1" Columns { Name: "key" Type: "Uint32" FamilyName: "" NotNull: false } Columns { Name: "value" Type: "Uint32" FamilyName: "" NotNull: false } KeyColumnNames: "key" UniformPartitionsCount: 1 } } } ExecTimeoutPeriod: 18446744073709551615 2025-06-25T14:36:36.919330Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:602:2510] txid# 281474976715657 Bootstrap, UserSID: CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-25T14:36:36.919955Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1660: Actor# [1:602:2510] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-06-25T14:36:36.920043Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:602:2510] txid# 281474976715657 TEvNavigateKeySet requested from SchemeCache 2025-06-25T14:36:36.920420Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:602:2510] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-25T14:36:36.920632Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:602:2510] HANDLE EvNavigateKeySetResult, txid# 281474976715657 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-25T14:36:36.920804Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:602:2510] txid# 281474976715657 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715657 TabletId# 72057594046644480} 2025-06-25T14:36:36.921121Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:602:2510] txid# 281474976715657 HANDLE EvClientConnected 2025-06-25T14:36:36.922626Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:36:36.923877Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [1:602:2510] txid# 281474976715657 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715657} 2025-06-25T14:36:36.923945Z node 1 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [1:602:2510] txid# 281474976715657 SEND to# [1:554:2480] Source {TEvProposeTransactionStatus txid# 281474976715657 Status# 53} 2025-06-25T14:36:36.971017Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvBoot 2025-06-25T14:36:36.972162Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvRestored 2025-06-25T14:36:36.976844Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:627:2531] 2025-06-25T14:36:36.977179Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T14:36:37.067955Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-25T14:36:37.072952Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T14:36:37.073134Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T14:36:37.074849Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-25T14:36:37.074938Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-25T14:36:37.075002Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-25T14:36:37.075351Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T14:36:37.075497Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T14:36:37.075642Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:643:2531] in generation 1 2025-06-25T14:36:37.088915Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T14:36:37.174759Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-25T14:36:37.174996Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T14:36:37.175115Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:645:2541] 2025-06-25T14:36:37.175152Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T14:36:37.175186Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-25T14:36:37.175245Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:36:37.175477Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:627:2531], Recipient [1:627:2531]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T14:36:37.175528Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T14:36:37.175958Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-25T14:36:37.176058Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-25T14:36:37.176125Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:36:37.176177Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:36:37.176231Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-25T14:36:37.176281Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-25T14:36:37.184396Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-25T14:36:37.184498Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-25T14:36:37.184580Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:36:37.185151Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:634:2535], Recipient [1:627:2531]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:36:37.185205Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T14:36:37.185254Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:623:2528], serverId# [1:634:2535], sessionId# [0:0:0] 2025-06-25T14:36:37.185369Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:373:2367], Recipient [1:634:2535] 2025-06-25T14:36:37.185413Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-25T14:36:37.185539Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T14:36:37.185781Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-25T14:36:37.185832Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-25T14:36:37.185908Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-25T14:36:37.185965Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-25T14: ... D DEBUG: read_table_scan.cpp:662: Send response data ShardId: 72075186224037896, TxId: 281474976715664, Size: 36, Rows: 0, PendingAcks: 1, MessageQuota: 0 2025-06-25T14:36:45.883598Z node 2 :TX_PROXY TRACE: read_table_impl.cpp:1232: StateReadTable, received event# 269550080, Sender [2:1309:3022], Recipient [2:1037:2810]: NKikimrTxDataShard.TEvProposeTransactionResult TxKind: TX_KIND_SCAN Origin: 72075186224037896 Status: RESPONSE_DATA TxId: 281474976715664 TxResult: "\n\016\n\003key\022\007\252\006\004\n\002\010\002\n\020\n\005value\022\007\252\006\004\n\002\010\002\030\001\022\016b\005\035\006\000\000\000b\005\035B\000\000\000" RowOffsets: 36 ApiVersion: 1 DataSeqNo: 1 DataLastKey: "\001\000\004\000\000\000\006\000\000\000" 2025-06-25T14:36:45.883643Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:1700: [ReadTable [2:1037:2810] TxId# 281474976715663] Received stream data from ShardId# 72075186224037896 2025-06-25T14:36:45.883672Z node 2 :TX_PROXY TRACE: read_table_impl.cpp:1711: [ReadTable [2:1037:2810] TxId# 281474976715663] Sending TEvStreamDataAck to [2:1309:3022] ShardId# 72075186224037896 2025-06-25T14:36:45.883762Z node 2 :TX_PROXY TRACE: read_table_impl.cpp:1232: StateReadTable, received event# 269287428, Sender [2:1309:3022], Recipient [2:1037:2810]: NKikimrTx.TEvStreamQuotaRequest TxId: 281474976715664 ShardId: 72075186224037896 2025-06-25T14:36:45.883787Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2069: [ReadTable [2:1037:2810] TxId# 281474976715663] Received TEvStreamQuotaRequest from ShardId# 72075186224037896 2025-06-25T14:36:45.883821Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:483: Got stream data ack ShardId: 72075186224037896, TxId: 281474976715664, PendingAcks: 0 2025-06-25T14:36:45.884175Z node 2 :TX_PROXY TRACE: read_table_impl.cpp:1232: StateReadTable, received event# 269287941, Sender [2:1036:2810], Recipient [2:1037:2810]: NKikimrTx.TEvStreamQuotaResponse TxId: 281474976715663 MessageSizeLimit: 1 ReservedMessages: 1 2025-06-25T14:36:45.884215Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2097: [ReadTable [2:1037:2810] TxId# 281474976715663] Updated quotas, allocated = 1, message size = 1, message rows = 0, available = 1 2025-06-25T14:36:45.884246Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2160: [ReadTable [2:1037:2810] TxId# 281474976715663] Reserving quota 1 messages for ShardId# 72075186224037896 2025-06-25T14:36:45.884290Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:514: Got quota for read table scan ShardId: 72075186224037896, TxId: 281474976715664, MessageQuota: 1 2025-06-25T14:36:45.889270Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:718: Finish scan ShardId: 72075186224037896, TxId: 281474976715664, MessageQuota: 1 2025-06-25T14:36:45.889524Z node 2 :TX_PROXY TRACE: read_table_impl.cpp:1232: StateReadTable, received event# 269287429, Sender [2:1309:3022], Recipient [2:1037:2810]: NKikimrTx.TEvStreamQuotaRelease TxId: 281474976715664 ShardId: 72075186224037896 2025-06-25T14:36:45.889562Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2120: [ReadTable [2:1037:2810] TxId# 281474976715663] Received TEvStreamQuotaRelease from ShardId# 72075186224037896 2025-06-25T14:36:45.889591Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2188: [ReadTable [2:1037:2810] TxId# 281474976715663] Released quota 1 reserved messages from ShardId# 72075186224037896 2025-06-25T14:36:45.889688Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037896 2025-06-25T14:36:45.889732Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:4477: Found op: cookie: 281474976715664, at: 72075186224037896 2025-06-25T14:36:45.889900Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [2:1208:2943], Recipient [2:1208:2943]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T14:36:45.889934Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T14:36:45.890007Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037896 2025-06-25T14:36:45.890042Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037896 active 1 active planned 0 immediate 1 planned 0 2025-06-25T14:36:45.890079Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715664] at 72075186224037896 for ReadTableScan 2025-06-25T14:36:45.890147Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715664] at 72075186224037896 on unit ReadTableScan 2025-06-25T14:36:45.890183Z node 2 :TX_DATASHARD TRACE: read_table_scan_unit.cpp:158: ReadTable scan complete for [0:281474976715664] at 72075186224037896 error: , IsFatalError: 0 2025-06-25T14:36:45.890223Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715664] at 72075186224037896 is Executed 2025-06-25T14:36:45.890253Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715664] at 72075186224037896 executing on unit ReadTableScan 2025-06-25T14:36:45.890281Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715664] at 72075186224037896 to execution unit FinishPropose 2025-06-25T14:36:45.890309Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715664] at 72075186224037896 on unit FinishPropose 2025-06-25T14:36:45.890342Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715664] at 72075186224037896 is DelayComplete 2025-06-25T14:36:45.890366Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715664] at 72075186224037896 executing on unit FinishPropose 2025-06-25T14:36:45.890389Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715664] at 72075186224037896 to execution unit CompletedOperations 2025-06-25T14:36:45.890412Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715664] at 72075186224037896 on unit CompletedOperations 2025-06-25T14:36:45.890481Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715664] at 72075186224037896 is Executed 2025-06-25T14:36:45.890510Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715664] at 72075186224037896 executing on unit CompletedOperations 2025-06-25T14:36:45.890531Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:281474976715664] at 72075186224037896 has finished 2025-06-25T14:36:45.890559Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037896 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:36:45.890585Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 72075186224037896 2025-06-25T14:36:45.890611Z node 2 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037896 has no attached operations 2025-06-25T14:36:45.890635Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 72075186224037896 2025-06-25T14:36:45.890704Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037896 2025-06-25T14:36:45.890735Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715664] at 72075186224037896 on unit FinishPropose 2025-06-25T14:36:45.890769Z node 2 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715664 at tablet 72075186224037896 send to client, exec latency: 0 ms, propose latency: 0 ms, status: COMPLETE 2025-06-25T14:36:45.890834Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037896 2025-06-25T14:36:45.891125Z node 2 :TX_PROXY TRACE: read_table_impl.cpp:1232: StateReadTable, received event# 269550080, Sender [2:1208:2943], Recipient [2:1037:2810]: NKikimrTxDataShard.TEvProposeTransactionResult TxKind: TX_KIND_SCAN Origin: 72075186224037896 Status: COMPLETE TxId: 281474976715664 Step: 0 OrderId: 281474976715664 ExecLatency: 0 ProposeLatency: 0 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186224037896 CpuTimeUsec: 361 } } CommitVersion { Step: 0 TxId: 281474976715664 } 2025-06-25T14:36:45.891161Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:1850: [ReadTable [2:1037:2810] TxId# 281474976715663] Received stream complete from ShardId# 72075186224037896 2025-06-25T14:36:45.891223Z node 2 :TX_PROXY INFO: read_table_impl.cpp:2933: [ReadTable [2:1037:2810] TxId# 281474976715663] RESPONSE Status# ExecComplete prepare time: 0.038929s execute time: 0.852040s total time: 0.890969s 2025-06-25T14:36:45.891642Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553190, Sender [2:1037:2810], Recipient [2:837:2662]: NKikimrTxDataShard.TEvDiscardVolatileSnapshotRequest OwnerId: 72057594046644480 PathId: 2 Step: 2000 TxId: 281474976715663 2025-06-25T14:36:45.891966Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553190, Sender [2:1037:2810], Recipient [2:947:2744]: NKikimrTxDataShard.TEvDiscardVolatileSnapshotRequest OwnerId: 72057594046644480 PathId: 2 Step: 2000 TxId: 281474976715663 2025-06-25T14:36:45.892169Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553190, Sender [2:1037:2810], Recipient [2:952:2746]: NKikimrTxDataShard.TEvDiscardVolatileSnapshotRequest OwnerId: 72057594046644480 PathId: 2 Step: 2000 TxId: 281474976715663 2025-06-25T14:36:45.892548Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553190, Sender [2:1037:2810], Recipient [2:1203:2941]: NKikimrTxDataShard.TEvDiscardVolatileSnapshotRequest OwnerId: 72057594046644480 PathId: 2 Step: 2000 TxId: 281474976715663 2025-06-25T14:36:45.892729Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553190, Sender [2:1037:2810], Recipient [2:1208:2943]: NKikimrTxDataShard.TEvDiscardVolatileSnapshotRequest OwnerId: 72057594046644480 PathId: 2 Step: 2000 TxId: 281474976715663 2025-06-25T14:36:45.892960Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [2:1312:3025], Recipient [2:1094:2859]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:36:45.892989Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T14:36:45.893027Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037893, clientId# [2:1310:3023], serverId# [2:1312:3025], sessionId# [0:0:0] 2025-06-25T14:36:45.893149Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553190, Sender [2:1037:2810], Recipient [2:1094:2859]: NKikimrTxDataShard.TEvDiscardVolatileSnapshotRequest OwnerId: 72057594046644480 PathId: 2 Step: 2000 TxId: 281474976715663 2025-06-25T14:36:45.893276Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [2:1313:3026], Recipient [2:1097:2861]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:36:45.893303Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T14:36:45.893329Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037894, clientId# [2:1311:3024], serverId# [2:1313:3026], sessionId# [0:0:0] 2025-06-25T14:36:45.893443Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553190, Sender [2:1037:2810], Recipient [2:1097:2861]: NKikimrTxDataShard.TEvDiscardVolatileSnapshotRequest OwnerId: 72057594046644480 PathId: 2 Step: 2000 TxId: 281474976715663 >> ColumnStatistics::CountMinSketchStatistics |81.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/services/fq/ut_integration/ydb-services-fq-ut_integration |81.1%| [LD] {RESULT} $(B)/ydb/services/fq/ut_integration/ydb-services-fq-ut_integration |81.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/services/fq/ut_integration/ydb-services-fq-ut_integration >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-67 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-68 >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-68 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-69 |81.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/long_tx_service/ut/ydb-core-tx-long_tx_service-ut |81.1%| [LD] {RESULT} $(B)/ydb/core/tx/long_tx_service/ut/ydb-core-tx-long_tx_service-ut |81.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/long_tx_service/ut/ydb-core-tx-long_tx_service-ut >> TConsoleTests::TestAuthorizationExtSubdomain [GOOD] |81.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_auditsettings/unittest >> TConsoleTests::TestAttributesExtSubdomain |81.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_auditsettings/unittest >> TSchemeShardAuditSettings::AlterExtSubdomain-ExternalSchemeShard-true |81.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_user_attributes/unittest >> TPQTest::TestSetClientOffset [GOOD] >> TPQTest::TestStatusWithMultipleConsumers >> TSchemeShardUserAttrsTest::SpecialAttributes >> Viewer::StorageGroupOutputWithSpaceCheckDependsOnVDiskSpaceStatus [GOOD] >> Viewer::StorageGroupOutputWithSpaceCheckDependsOnUsage >> ResourcePoolsDdl::TestResourcePoolAcl [GOOD] >> ResourcePoolsDdl::TestWorkloadConfigOnServerless >> KqpWorkloadService::WorkloadServiceDisabledByFeatureFlagOnServerless [GOOD] >> KqpWorkloadService::WorkloadServiceDisabledByInvalidDatabasePath >> KqpPg::ReadPgArray >> KqpPg::NoTableQuery+useSink >> TPQTest::TestStatusWithMultipleConsumers [GOOD] >> KqpPg::ReadPgArray [GOOD] >> KqpPg::TableArrayInsert+useSink >> TSchemeShardUserAttrsTest::SpecialAttributes [GOOD] >> ReadOnlyVDisk::TestWrites [GOOD] >> TSchemeShardAuditSettings::AlterExtSubdomain-ExternalSchemeShard-false [GOOD] >> KqpWorkloadService::TestQueryCancelAfterPoolWithLimits [GOOD] >> KqpWorkloadService::TestLargeConcurrentQueryLimit >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-19 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-20 >> KqpPg::TypeCoercionBulkUpsert >> TKeyValueTest::TestWriteReadRangeLimitThenLimitWorks [GOOD] >> TKeyValueTest::TestWriteReadRangeLimitThenLimitWorksNewApi ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_user_attributes/unittest >> TSchemeShardUserAttrsTest::SpecialAttributes [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:36:50.870735Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:36:50.870839Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:36:50.870891Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:36:50.870927Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:36:50.870977Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:36:50.871023Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:36:50.871080Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:36:50.871150Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:36:50.871933Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:36:50.872270Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:36:50.961408Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:36:50.961496Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:36:50.983692Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:36:50.984201Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:36:50.984383Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:36:50.991336Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:36:50.991761Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:36:50.992503Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:36:50.992818Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:36:50.996863Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:36:50.997085Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:36:50.998342Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:36:50.998405Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:36:50.998543Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:36:50.998596Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:36:50.998642Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:36:50.998728Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:36:51.009229Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:36:51.163355Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:36:51.163563Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:36:51.163780Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:36:51.163829Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:36:51.164089Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:36:51.164168Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:36:51.166931Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:36:51.167199Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:36:51.167410Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:36:51.167517Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:36:51.167592Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:36:51.167646Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:36:51.170431Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:36:51.170546Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:36:51.170593Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:36:51.173525Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:36:51.173583Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:36:51.173653Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:36:51.173715Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:36:51.185514Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:36:51.187667Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:36:51.187846Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:36:51.188838Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:36:51.188975Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:36:51.189020Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:36:51.189379Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:36:51.189441Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:36:51.189613Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:36:51.189693Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:36:51.192016Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:36:51.192060Z node 1 :FLAT_TX_SCHEMESHARD ... ] was 2 2025-06-25T14:36:51.242543Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 2 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T14:36:51.242621Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 2 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T14:36:51.242650Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 102 2025-06-25T14:36:51.242676Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 2 2025-06-25T14:36:51.242703Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-25T14:36:51.242761Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 102, ready parts: 0/1, is published: true 2025-06-25T14:36:51.245165Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 102:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:102 msg type: 269090816 2025-06-25T14:36:51.245318Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 102, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 102 at step: 5000002 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000001 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 102 at step: 5000002 2025-06-25T14:36:51.245831Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000002, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:36:51.245959Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 102 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000002 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:36:51.246016Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_mkdir.cpp:33: MkDir::TPropose operationId# 102:0 HandleReply TEvPrivate::TEvOperationPlan, step: 5000002, at schemeshard: 72057594046678944 2025-06-25T14:36:51.246169Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 102:0 128 -> 240 2025-06-25T14:36:51.246333Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:36:51.246410Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-25T14:36:51.247162Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-25T14:36:51.248995Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 FAKE_COORDINATOR: Erasing txId 102 2025-06-25T14:36:51.250378Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:36:51.250431Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:36:51.250564Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-25T14:36:51.250667Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:36:51.250701Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 102, path id: 1 2025-06-25T14:36:51.250747Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 102, path id: 2 2025-06-25T14:36:51.251053Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-25T14:36:51.251101Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 102:0 ProgressState 2025-06-25T14:36:51.251182Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:0 progress is 1/1 2025-06-25T14:36:51.251228Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-25T14:36:51.251281Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:0 progress is 1/1 2025-06-25T14:36:51.251311Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-25T14:36:51.251345Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: false 2025-06-25T14:36:51.251380Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-25T14:36:51.251409Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 102:0 2025-06-25T14:36:51.251439Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 102:0 2025-06-25T14:36:51.251527Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-25T14:36:51.251582Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 102, publications: 2, subscribers: 0 2025-06-25T14:36:51.251622Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 102, [OwnerId: 72057594046678944, LocalPathId: 1], 5 2025-06-25T14:36:51.251645Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 102, [OwnerId: 72057594046678944, LocalPathId: 2], 3 2025-06-25T14:36:51.252337Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T14:36:51.252433Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T14:36:51.252468Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 102 2025-06-25T14:36:51.252516Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 5 2025-06-25T14:36:51.252566Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-25T14:36:51.253225Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T14:36:51.253320Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T14:36:51.253348Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 102 2025-06-25T14:36:51.253509Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 3 2025-06-25T14:36:51.253545Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-25T14:36:51.253603Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 102, subscribers: 0 2025-06-25T14:36:51.256844Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-25T14:36:51.256949Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 TestModificationResult got TxId: 102, wait until txId: 102 TestModificationResults wait txId: 103 2025-06-25T14:36:51.259804Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpMkDir MkDir { Name: "DirD" } AlterUserAttributes { UserAttributes { Key: "__extra_path_symbols_allowed" Value: "./_" } } } TxId: 103 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:36:51.260256Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_mkdir.cpp:115: TMkDir Propose, path: /MyRoot/DirD, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-25T14:36:51.260384Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 103:1, propose status:StatusInvalidParameter, reason: UserAttributes: attribute '__extra_path_symbols_allowed' has invalid value './_', forbidden symbols are found, at schemeshard: 72057594046678944 2025-06-25T14:36:51.262883Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 103, response: Status: StatusInvalidParameter Reason: "UserAttributes: attribute \'__extra_path_symbols_allowed\' has invalid value \'./_\', forbidden symbols are found" TxId: 103 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:36:51.263152Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 103, database: /MyRoot, subject: , status: StatusInvalidParameter, reason: UserAttributes: attribute '__extra_path_symbols_allowed' has invalid value './_', forbidden symbols are found, operation: CREATE DIRECTORY, path: /MyRoot/DirD TestModificationResult got TxId: 103, wait until txId: 103 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_read_only_vdisk/unittest >> ReadOnlyVDisk::TestWrites [GOOD] Test command err: RandomSeed# 2064534554739857765 === Trying to put and get a blob === SEND TEvPut with key [1:1:0:0:0:131072:0] TEvPutResult: TEvPutResult {Id# [1:1:0:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} === Read all 1 blob(s) === SEND TEvGet with key [1:1:0:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:0:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} === Putting VDisk #0 to read-only === Setting VDisk read-only to 1 for position 0 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:0:0] === Write 10 blobs, expect some VDisks refuse parts but writes go through === SEND TEvPut with key [1:1:1:0:0:32768:0] 2025-06-25T14:36:47.101399Z 1 00h01m30.060512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5317:699] TEvPutResult: TEvPutResult {Id# [1:1:1:0:0:32768:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:2:0:0:131072:0] 2025-06-25T14:36:47.113752Z 1 00h01m30.060512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5317:699] TEvPutResult: TEvPutResult {Id# [1:1:2:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:3:0:0:32768:0] 2025-06-25T14:36:47.123978Z 1 00h01m30.060512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5317:699] TEvPutResult: TEvPutResult {Id# [1:1:3:0:0:32768:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:4:0:0:131072:0] 2025-06-25T14:36:47.131075Z 1 00h01m30.060512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5317:699] TEvPutResult: TEvPutResult {Id# [1:1:4:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:5:0:0:32768:0] TEvPutResult: TEvPutResult {Id# [1:1:5:0:0:32768:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:6:0:0:131072:0] TEvPutResult: TEvPutResult {Id# [1:1:6:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:7:0:0:32768:0] 2025-06-25T14:36:47.144047Z 1 00h01m30.060512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5317:699] TEvPutResult: TEvPutResult {Id# [1:1:7:0:0:32768:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:8:0:0:131072:0] 2025-06-25T14:36:47.146989Z 1 00h01m30.060512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5317:699] TEvPutResult: TEvPutResult {Id# [1:1:8:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:9:0:0:32768:0] 2025-06-25T14:36:47.154820Z 1 00h01m30.060512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5317:699] TEvPutResult: TEvPutResult {Id# [1:1:9:0:0:32768:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:10:0:0:131072:0] 2025-06-25T14:36:47.162669Z 1 00h01m30.060512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5317:699] TEvPutResult: TEvPutResult {Id# [1:1:10:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} === Read all 11 blob(s) === SEND TEvGet with key [1:1:0:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:0:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:1:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:1:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:2:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:2:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:3:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:3:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:4:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:4:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:5:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:5:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:6:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:6:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:7:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:7:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:8:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:8:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:9:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:9:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:10:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:10:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} === Put 2 more VDisks to read-only === Setting VDisk read-only to 1 for position 1 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:1:0] Setting VDisk read-only to 1 for position 2 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:2:0] === Write 10 more blobs, expect errors === SEND TEvPut with key [1:1:11:0:0:32768:0] 2025-06-25T14:36:48.489978Z 1 00h03m30.110512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5317:699] 2025-06-25T14:36:48.490113Z 3 00h03m30.110512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5331:713] 2025-06-25T14:36:48.490235Z 2 00h03m30.110512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5324:706] 2025-06-25T14:36:48.491173Z 1 00h03m30.110512s :BS_PROXY_PUT ERROR: [aed6df1309e0cc5b] Result# TEvPutResult {Id# [1:1:11:0:0:32768:0] Status# ERROR StatusFlags# { } ErrorReason# "TRestoreStrategy saw optimisticState# EBS_DISINTEGRATED GroupId# 2181038080 BlobId# [1:1:11:0:0:32768:0] Reported ErrorReasons# [ { OrderNumber# 0 VDiskId# [82000000:1:0:0:0] NodeId# 1 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 1 VDiskId# [82000000:1:0:1:0] NodeId# 2 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 2 VDiskId# [82000000:1:0:2:0] NodeId# 3 ErrorReasons# [ "VDisk is in read-only mode", ] } ] Part situations# [ { OrderNumber# 5 Situations# SUUUUU } { OrderNumber# 6 Situations# USUUUU } { OrderNumber# 7 Situations# UUSUUU } { OrderNumber# 0 Situations# UUUEUU } { OrderNumber# 1 Situations# UUUUEU } { OrderNumber# 2 Situations# UUUUUE } { OrderNumber# 3 Situations# UUUSUU } { OrderNumber# 4 Situations# UUUUUS } ] " ApproximateFreeSpaceShare# 0.999988} GroupId# 2181038080 Marker# BPP12 TEvPutResult: TEvPutResult {Id# [1:1:11:0:0:32768:0] Status# ERROR StatusFlags# { } ErrorReason# "TRestoreStrategy saw optimisticState# EBS_DISINTEGRATED GroupId# 2181038080 BlobId# [1:1:11:0:0:32768:0] Reported ErrorReasons# [ { OrderNumber# 0 VDiskId# [82000000:1:0:0:0] NodeId# 1 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 1 VDiskId# [82000000:1:0:1:0] NodeId# 2 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 2 VDiskId# [82000000:1:0:2:0] NodeId# 3 ErrorReasons# [ "VDisk is in read-only mode", ] } ] Part situations# [ { OrderNumber# 5 Situations# SUUUUU } { OrderNumber# 6 Situations# USUUUU } { OrderNumber# 7 Situations# UUSUUU } { OrderNumber# 0 Situations# UUUEUU } { OrderNumber# 1 Situations# UUUUEU } { OrderNumber# 2 Situations# UUUUUE } { OrderNumber# 3 Situations# UUUSUU } { OrderNumber# 4 Situations# UUUUUS } ] " ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:12:0:0:131072:0] 2025-06-25T14:36:48.493109Z 1 00h03m30.110512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5317:699] 2025-06-25T14:36:48.493270Z 2 00h03m30.110512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5324:706] 2025-06-25T14:36:48.494413Z 3 00h03m30.110512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5331:713] TEvPutResult: TEvPutResult {Id# [1:1:12:0:0:131072:0] Status# ERROR StatusFlags# { } ErrorReason# "TRestoreStrategy saw optimisticState# EBS_DISINTEGRATED GroupId# 2181038080 BlobId# [1:1:12:0:0:131072:0] Reported ErrorReasons# [ { OrderNumber# 0 VDiskId# [82000000:1:0:0:0] NodeId# 1 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 1 VDiskId# [82000000:1:0:1:0] NodeId# 2 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 2 VDiskId# [82000000:1:0:2:0] NodeId# 3 ErrorReasons# [ "VDisk is in read-only mode", ] } ] Part situations# [ { OrderNumber# 4 Situations# SUUUUU } { OrderNumber# 5 Situations# USUUUU } { OrderNumber# 6 Situations# UUSUUU } { OrderNumber# 7 Situations# UUUSUU } { OrderNumber# 0 Situations# UUUUEU } { OrderNumber# 1 Situations# UUUUUE } { OrderNumber# 2 Situations# UUUUEU } { OrderNumber# 3 Situations# UUUUUS } ] " ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:13:0:0:32768:0] 2025-06-25T14:36:48.496287Z 1 00h03m30.110512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5317:699] 2025-06-25T14:36:48.497452Z 2 00h03m30.110512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5324:706] 2025-06-25T14:36:48.498366Z 3 00h03m30.110512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5331:713] TEvPutResult: TEvPutResult {Id# [1:1:13:0:0:32768:0] Status# ERROR StatusFlags# { } ErrorReason# "TRestoreStrategy saw optimisticState# EBS_DISINTEGRATED GroupId# 2181038080 BlobId# [1:1:13:0:0:32768:0] Reported ErrorReasons# [ { OrderNumber# 0 VDiskId# [82000000:1:0:0:0] NodeId# 1 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 1 VDiskId# [82000000:1:0:1:0] NodeId# 2 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 2 VDiskId# [82000000:1:0:2:0] NodeId# 3 ErrorReasons# [ "VDisk is in read-only mode", ] } ] Part situations# [ { OrderNumber# 3 Situations# PUUUUU } { OrderNumber# 4 Situations# UPUUUU } { OrderNumber# 5 Situations# UUPUUU } { OrderNumber# 6 Situations# UUUPUU } { OrderNumber# 7 Situations# UUUUPU } { OrderNumber# 0 Situations# UUUUUE } { OrderNumber# 1 Situations# UUUUUE } { OrderNumber# 2 Situations# UUUUUE } ] " ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:14:0:0:131072:0] 2025-06-25T14:36:48.499685Z 3 00h03m30.110512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5331:713] 2025-06-25T14:36:48.500915Z 1 00h03m30.110512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5317:699] 2025-06-25T14:36:48.501540Z 2 00h03m30.110512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5324:706] TEvPutResult: TEvPutResult {Id# [1:1:14:0:0:131072:0] Status# ERROR StatusFlags# { } ErrorReason# "TRestoreStrategy saw optimisticState# EBS_DISINTEGRATED GroupId# 2181038080 BlobId# [1:1:14:0:0:131072:0] Reported ErrorReasons# [ { OrderNumber# 2 VDiskId# [82000000:1:0:2:0] NodeId# 3 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 0 VDiskId# [82000000:1:0:0:0] NodeId# 1 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 1 VDiskId# [82000000:1:0:1:0] NodeId# 2 ErrorReasons# [ "VDisk is in read-only mo ... ey [1:1:5:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:5:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:6:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:6:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:7:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:7:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:8:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:8:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:9:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:9:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:10:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:10:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:11:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:11:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:12:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:12:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:13:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:13:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:14:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:14:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:15:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:15:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:16:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:16:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:17:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:17:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:18:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:18:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:19:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:19:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:20:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:20:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} === Restoring to normal VDisk #0 === Setting VDisk read-only to 0 for position 0 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:0:0] === Write 10 blobs, expect some VDisks refuse parts but the writes still go through === SEND TEvPut with key [1:1:21:0:0:32768:0] 2025-06-25T14:36:51.105717Z 3 00h08m00.161536s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5331:713] 2025-06-25T14:36:51.105908Z 2 00h08m00.161536s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5324:706] TEvPutResult: TEvPutResult {Id# [1:1:21:0:0:32768:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999963} SEND TEvPut with key [1:1:22:0:0:131072:0] 2025-06-25T14:36:51.109861Z 2 00h08m00.161536s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5324:706] 2025-06-25T14:36:51.111383Z 3 00h08m00.161536s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5331:713] TEvPutResult: TEvPutResult {Id# [1:1:22:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999963} SEND TEvPut with key [1:1:23:0:0:32768:0] TEvPutResult: TEvPutResult {Id# [1:1:23:0:0:32768:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999963} SEND TEvPut with key [1:1:24:0:0:131072:0] 2025-06-25T14:36:51.116216Z 3 00h08m00.161536s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5331:713] TEvPutResult: TEvPutResult {Id# [1:1:24:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999963} SEND TEvPut with key [1:1:25:0:0:32768:0] 2025-06-25T14:36:51.119384Z 3 00h08m00.161536s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5331:713] 2025-06-25T14:36:51.119512Z 2 00h08m00.161536s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5324:706] TEvPutResult: TEvPutResult {Id# [1:1:25:0:0:32768:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999963} SEND TEvPut with key [1:1:26:0:0:131072:0] 2025-06-25T14:36:51.122682Z 3 00h08m00.161536s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5331:713] 2025-06-25T14:36:51.122789Z 2 00h08m00.161536s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5324:706] TEvPutResult: TEvPutResult {Id# [1:1:26:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999963} SEND TEvPut with key [1:1:27:0:0:32768:0] 2025-06-25T14:36:51.127276Z 3 00h08m00.161536s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5331:713] 2025-06-25T14:36:51.127388Z 2 00h08m00.161536s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5324:706] TEvPutResult: TEvPutResult {Id# [1:1:27:0:0:32768:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999963} SEND TEvPut with key [1:1:28:0:0:131072:0] 2025-06-25T14:36:51.130436Z 2 00h08m00.161536s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5324:706] 2025-06-25T14:36:51.130707Z 3 00h08m00.161536s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5331:713] TEvPutResult: TEvPutResult {Id# [1:1:28:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999963} SEND TEvPut with key [1:1:29:0:0:32768:0] 2025-06-25T14:36:51.134009Z 3 00h08m00.161536s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5331:713] 2025-06-25T14:36:51.134158Z 2 00h08m00.161536s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5324:706] TEvPutResult: TEvPutResult {Id# [1:1:29:0:0:32768:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999963} SEND TEvPut with key [1:1:30:0:0:131072:0] 2025-06-25T14:36:51.137614Z 3 00h08m00.161536s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5331:713] 2025-06-25T14:36:51.137765Z 2 00h08m00.161536s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5324:706] TEvPutResult: TEvPutResult {Id# [1:1:30:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999963} === Read all 31 blob(s) === SEND TEvGet with key [1:1:0:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:0:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:1:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:1:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:2:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:2:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:3:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:3:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:4:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:4:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:5:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:5:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:6:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:6:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:7:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:7:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:8:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:8:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:9:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:9:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:10:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:10:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:11:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:11:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:12:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:12:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:13:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:13:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:14:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:14:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:15:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:15:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:16:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:16:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:17:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:17:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:18:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:18:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:19:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:19:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:20:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:20:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:21:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:21:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:22:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:22:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:23:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:23:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:24:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:24:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:25:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:25:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:26:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:26:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:27:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:27:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:28:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:28:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:29:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:29:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:30:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:30:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_auditsettings/unittest >> TSchemeShardAuditSettings::AlterExtSubdomain-ExternalSchemeShard-false [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:36:48.670253Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:36:48.670352Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:36:48.670396Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:36:48.670431Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:36:48.670474Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:36:48.670504Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:36:48.670557Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:36:48.670628Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:36:48.671374Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:36:48.671714Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:36:48.752908Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:36:48.752977Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:36:48.775149Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:36:48.775573Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:36:48.775756Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:36:48.781746Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:36:48.782109Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:36:48.782746Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:36:48.783022Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:36:48.786857Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:36:48.787056Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:36:48.788281Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:36:48.788360Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:36:48.788518Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:36:48.788572Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:36:48.788619Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:36:48.788724Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:36:48.795463Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:36:48.940046Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:36:48.940275Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:36:48.940505Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:36:48.940558Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:36:48.940821Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:36:48.940893Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:36:48.943217Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:36:48.943410Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:36:48.943613Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:36:48.943704Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:36:48.943752Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:36:48.943798Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:36:48.946309Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:36:48.946386Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:36:48.946430Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:36:48.948137Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:36:48.948192Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:36:48.948262Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:36:48.948330Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:36:48.951811Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:36:48.953857Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:36:48.954038Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:36:48.954928Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:36:48.955059Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:36:48.955110Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:36:48.955442Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:36:48.955497Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:36:48.955675Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:36:48.955768Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:36:48.957894Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:36:48.957941Z node 1 :FLAT_TX_SCHEMESHARD ... RDINATOR: Add transaction: 175 at step: 5000076 FAKE_COORDINATOR: advance: minStep5000076 State->FrontStep: 5000075 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 175 at step: 5000076 2025-06-25T14:36:51.359269Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000076, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:36:51.359348Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 175 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000076 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:36:51.359380Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_extsubdomain.cpp:157: TDropExtSubdomain TPropose, operationId: 175:0 HandleReply TEvOperationPlan, step: 5000076, at schemeshard: 72057594046678944 2025-06-25T14:36:51.359447Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5302: ExamineTreeVFS visit path id [OwnerId: 72057594046678944, LocalPathId: 26] name: USER_0 type: EPathTypeExtSubDomain state: EPathStateDrop stepDropped: 0 droppedTxId: 175 parent: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:36:51.359487Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5318: ExamineTreeVFS run path id: [OwnerId: 72057594046678944, LocalPathId: 26] 2025-06-25T14:36:51.359512Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 175:0 128 -> 134 2025-06-25T14:36:51.360117Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 175 2025-06-25T14:36:51.362626Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 175 2025-06-25T14:36:51.363133Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 175:0, at schemeshard: 72057594046678944 2025-06-25T14:36:51.363168Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_extsubdomain.cpp:104: TDropExtSubdomain TDeleteExternalShards, operationId: 175:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:36:51.363268Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 175:0 134 -> 135 2025-06-25T14:36:51.363446Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:36:51.363489Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 26] was 2 FAKE_COORDINATOR: Erasing txId 175 2025-06-25T14:36:51.365036Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:36:51.365079Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 175, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:36:51.365189Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 175, path id: [OwnerId: 72057594046678944, LocalPathId: 26] 2025-06-25T14:36:51.365272Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:36:51.365299Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 175, path id: 1 2025-06-25T14:36:51.365329Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 175, path id: 26 2025-06-25T14:36:51.365587Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 175:0, at schemeshard: 72057594046678944 2025-06-25T14:36:51.365626Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:400: [72057594046678944] TDeleteParts opId# 175:0 ProgressState 2025-06-25T14:36:51.365652Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 175:0 135 -> 240 2025-06-25T14:36:51.366287Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 103 PathOwnerId: 72057594046678944, cookie: 175 2025-06-25T14:36:51.366358Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 103 PathOwnerId: 72057594046678944, cookie: 175 2025-06-25T14:36:51.366391Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 175 2025-06-25T14:36:51.366413Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 175, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 103 2025-06-25T14:36:51.366441Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-25T14:36:51.367073Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 26 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 175 2025-06-25T14:36:51.367130Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 26 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 175 2025-06-25T14:36:51.367147Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 175 2025-06-25T14:36:51.367173Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 175, pathId: [OwnerId: 72057594046678944, LocalPathId: 26], version: 18446744073709551615 2025-06-25T14:36:51.367193Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 26] was 3 2025-06-25T14:36:51.367241Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 175, ready parts: 0/1, is published: true 2025-06-25T14:36:51.368909Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 175:0, at schemeshard: 72057594046678944 2025-06-25T14:36:51.368968Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 175:0 ProgressState 2025-06-25T14:36:51.369047Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#175:0 progress is 1/1 2025-06-25T14:36:51.369070Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 175 ready parts: 1/1 2025-06-25T14:36:51.369106Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#175:0 progress is 1/1 2025-06-25T14:36:51.369129Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 175 ready parts: 1/1 2025-06-25T14:36:51.369172Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 175, ready parts: 1/1, is published: true 2025-06-25T14:36:51.369209Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 175 ready parts: 1/1 2025-06-25T14:36:51.369232Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 175:0 2025-06-25T14:36:51.369249Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 175:0 2025-06-25T14:36:51.369289Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 26] was 2 2025-06-25T14:36:51.369520Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-25T14:36:51.369549Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 26], at schemeshard: 72057594046678944 2025-06-25T14:36:51.369592Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 26] was 1 2025-06-25T14:36:51.370025Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-25T14:36:51.370071Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 26], at schemeshard: 72057594046678944 2025-06-25T14:36:51.370123Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:36:51.370457Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 175 2025-06-25T14:36:51.371715Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 175 2025-06-25T14:36:51.374329Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-25T14:36:51.374439Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 175, wait until txId: 175 TestWaitNotification wait txId: 175 2025-06-25T14:36:51.375581Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 175: send EvNotifyTxCompletion 2025-06-25T14:36:51.375622Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 175 2025-06-25T14:36:51.376885Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 175, at schemeshard: 72057594046678944 2025-06-25T14:36:51.376989Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 175: got EvNotifyTxCompletionResult 2025-06-25T14:36:51.377032Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 175: satisfy waiter [1:2611:4600] TestWaitNotification: OK eventTxId 175 >> TKeyValueTest::TestWriteReadDeleteWithRestartsThenResponseOk [GOOD] >> TKeyValueTest::TestWriteReadDeleteWithRestartsAndCatchCollectGarbageEventsWithSlowInitialGC >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-DropUser-66 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-DropUser-67 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/unittest >> TPQTest::TestStatusWithMultipleConsumers [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:107:2057] recipient: [1:105:2137] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:107:2057] recipient: [1:105:2137] Leader for TabletID 72057594037927937 is [1:111:2141] sender: [1:112:2057] recipient: [1:105:2137] 2025-06-25T14:31:23.592641Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:31:23.592728Z node 1 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [1:153:2057] recipient: [1:151:2172] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [1:153:2057] recipient: [1:151:2172] Leader for TabletID 72057594037927938 is [1:157:2176] sender: [1:158:2057] recipient: [1:151:2172] Leader for TabletID 72057594037927937 is [1:111:2141] sender: [1:183:2057] recipient: [1:14:2061] 2025-06-25T14:31:23.615985Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:31:23.637060Z node 1 :PERSQUEUE INFO: pq_impl.cpp:1497: [PQ: 72057594037927937] Config applied version 1 actor [1:181:2194] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "rt3.dc1--asdfgs--topic" Version: 1 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 1 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 1 Important: false } 2025-06-25T14:31:23.638230Z node 1 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [1:189:2200] 2025-06-25T14:31:23.640733Z node 1 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [1:189:2200] 2025-06-25T14:31:23.643094Z node 1 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [1:190:2201] 2025-06-25T14:31:23.644894Z node 1 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 2 [1:190:2201] 2025-06-25T14:31:23.653034Z node 1 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie owner1|8aed1a8a-74347428-5a17700d-93ca3776_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner owner1 2025-06-25T14:31:23.653580Z node 1 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie owner2|8c844626-8012f38f-35d47d5e-45d2a543_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner owner2 2025-06-25T14:31:23.677114Z node 1 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie owner1|502f838-24d6bfc3-d883cbd5-7afdfcb_1 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner owner1 Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:107:2057] recipient: [2:105:2137] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:107:2057] recipient: [2:105:2137] Leader for TabletID 72057594037927937 is [2:111:2141] sender: [2:112:2057] recipient: [2:105:2137] 2025-06-25T14:31:24.204784Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:31:24.204876Z node 2 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [2:153:2057] recipient: [2:151:2172] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [2:153:2057] recipient: [2:151:2172] Leader for TabletID 72057594037927938 is [2:157:2176] sender: [2:158:2057] recipient: [2:151:2172] Leader for TabletID 72057594037927937 is [2:111:2141] sender: [2:183:2057] recipient: [2:14:2061] 2025-06-25T14:31:24.222525Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:31:24.223570Z node 2 :PERSQUEUE INFO: pq_impl.cpp:1497: [PQ: 72057594037927937] Config applied version 2 actor [2:181:2194] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "rt3.dc1--asdfgs--topic" Version: 2 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 2 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 2 Important: false } 2025-06-25T14:31:24.224181Z node 2 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [2:189:2200] 2025-06-25T14:31:24.226426Z node 2 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [2:189:2200] 2025-06-25T14:31:24.229808Z node 2 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [2:190:2201] 2025-06-25T14:31:24.231283Z node 2 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 2 [2:190:2201] 2025-06-25T14:31:24.237789Z node 2 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie owner1|f4424169-4e71bb18-657e22bf-a02503ea_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner owner1 2025-06-25T14:31:24.238278Z node 2 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie owner2|f8334dce-90e3bfa9-7b713300-82ccf3d5_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner owner2 2025-06-25T14:31:24.262974Z node 2 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie owner1|eacf4688-a2110ebb-60c779a5-c5ad7d2c_1 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner owner1 Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:107:2057] recipient: [3:105:2137] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:107:2057] recipient: [3:105:2137] Leader for TabletID 72057594037927937 is [3:111:2141] sender: [3:112:2057] recipient: [3:105:2137] 2025-06-25T14:31:24.579107Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:31:24.579178Z node 3 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [3:153:2057] recipient: [3:151:2172] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [3:153:2057] recipient: [3:151:2172] Leader for TabletID 72057594037927938 is [3:157:2176] sender: [3:158:2057] recipient: [3:151:2172] Leader for TabletID 72057594037927937 is [3:111:2141] sender: [3:183:2057] recipient: [3:14:2061] 2025-06-25T14:31:24.600563Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:31:24.601313Z node 3 :PERSQUEUE INFO: pq_impl.cpp:1497: [PQ: 72057594037927937] Config applied version 3 actor [3:181:2194] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "rt3.dc1--asdfgs--topic" Version: 3 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 3 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 3 Important: false } 2025-06-25T14:31:24.601841Z node 3 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [3:189:2200] 2025-06-25T14:31:24.603997Z node 3 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [3:189:2200] 2025-06-25T14:31:24.605673Z node 3 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [3:190:2201] 2025-06-25T14:31:24.607347Z node 3 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 2 [3:190:2201] 2025-06-25T14:31:24.612150Z node 3 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie owner1|6c8df4e7-4d5c0e68-a14d7e69-d82c1f97_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner owner1 2025-06-25T14:31:24.612526Z node 3 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie owner2|e5678b10-31eb5f7a-e51600ee-e2dc8edd_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner owner2 2025-06-25T14:31:24.631698Z node 3 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie owner1|960309d7-57f68e6-2077f133-243b062c_1 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner owner1 Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:107:2057] recipient: [4:105:2137] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:107:2057] recipient: [4:105:2137] Leader for TabletID 72057594037927937 is [4:111:2141] sender: [4:112:2057] recipient: [4:105:2137] 2025-06-25T14:31:25.091178Z node 4 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:31:25.091251Z node 4 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [4:153:2057] recipient: [4:151:2172] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [4:153:2057] recipient: [4:151:2172] Leader for TabletID 72057594037927938 is [4:157:2176] sender: [4:158:2057] recipient: [4:151:2172] Leader for TabletID 72057594037927937 is [4:111:2141] sender: [4:183:2057] recipient: [4:14:2061] 2025-06-25T14:31:25.108117Z node 4 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:31:25.109063Z node 4 :PERSQUEUE INFO: pq_impl.cpp:1497: [PQ: 72057594037927937] Config applied version 4 actor [4:181:2194] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "rt3.dc1--asdfgs--topic" Version: 4 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 4 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 4 Important: false } 2025-06-25T14:31:25.109701Z node 4 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: State ... 4:462:2454] connected; active server actors: 1 2025-06-25T14:36:50.796986Z node 174 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [174:467:2459] connected; active server actors: 1 2025-06-25T14:36:50.799610Z node 174 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [174:472:2464] connected; active server actors: 1 2025-06-25T14:36:50.802444Z node 174 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [174:477:2469] connected; active server actors: 1 2025-06-25T14:36:50.804973Z node 174 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [174:482:2474] connected; active server actors: 1 2025-06-25T14:36:50.807604Z node 174 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [174:487:2479] connected; active server actors: 1 2025-06-25T14:36:50.810221Z node 174 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [174:492:2484] connected; active server actors: 1 2025-06-25T14:36:50.812454Z node 174 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [174:497:2489] connected; active server actors: 1 2025-06-25T14:36:50.816933Z node 174 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [174:502:2494] connected; active server actors: 1 2025-06-25T14:36:50.819911Z node 174 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [174:507:2499] connected; active server actors: 1 2025-06-25T14:36:50.821784Z node 174 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [174:512:2504] connected; active server actors: 1 2025-06-25T14:36:50.823882Z node 174 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [174:517:2509] connected; active server actors: 1 2025-06-25T14:36:50.826543Z node 174 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [174:522:2514] connected; active server actors: 1 2025-06-25T14:36:50.829034Z node 174 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [174:527:2519] connected; active server actors: 1 2025-06-25T14:36:50.833345Z node 174 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [174:532:2524] connected; active server actors: 1 2025-06-25T14:36:50.835802Z node 174 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [174:537:2529] connected; active server actors: 1 2025-06-25T14:36:50.843480Z node 174 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [174:542:2534] connected; active server actors: 1 2025-06-25T14:36:50.846593Z node 174 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [174:547:2539] connected; active server actors: 1 2025-06-25T14:36:50.849900Z node 174 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [174:552:2544] connected; active server actors: 1 2025-06-25T14:36:50.854242Z node 174 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [174:557:2549] connected; active server actors: 1 2025-06-25T14:36:50.857585Z node 174 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [174:562:2554] connected; active server actors: 1 2025-06-25T14:36:50.866016Z node 174 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [174:567:2559] connected; active server actors: 1 2025-06-25T14:36:50.869686Z node 174 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [174:572:2564] connected; active server actors: 1 2025-06-25T14:36:50.872918Z node 174 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [174:577:2569] connected; active server actors: 1 2025-06-25T14:36:50.875804Z node 174 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [174:582:2574] connected; active server actors: 1 2025-06-25T14:36:50.878478Z node 174 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [174:587:2579] connected; active server actors: 1 2025-06-25T14:36:50.880992Z node 174 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [174:592:2584] connected; active server actors: 1 2025-06-25T14:36:50.883498Z node 174 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [174:597:2589] connected; active server actors: 1 2025-06-25T14:36:50.886082Z node 174 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [174:602:2594] connected; active server actors: 1 2025-06-25T14:36:50.888368Z node 174 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [174:607:2599] connected; active server actors: 1 2025-06-25T14:36:50.892594Z node 174 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [174:612:2604] connected; active server actors: 1 2025-06-25T14:36:50.895079Z node 174 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [174:617:2609] connected; active server actors: 1 2025-06-25T14:36:50.897412Z node 174 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [174:622:2614] connected; active server actors: 1 2025-06-25T14:36:50.899597Z node 174 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [174:627:2619] connected; active server actors: 1 2025-06-25T14:36:50.901813Z node 174 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [174:632:2624] connected; active server actors: 1 2025-06-25T14:36:50.904053Z node 174 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [174:637:2629] connected; active server actors: 1 2025-06-25T14:36:50.909000Z node 174 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [174:642:2634] connected; active server actors: 1 2025-06-25T14:36:50.911758Z node 174 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [174:647:2639] connected; active server actors: 1 2025-06-25T14:36:50.914719Z node 174 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [174:652:2644] connected; active server actors: 1 2025-06-25T14:36:50.917122Z node 174 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [174:657:2649] connected; active server actors: 1 2025-06-25T14:36:50.922814Z node 174 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [174:662:2654] connected; active server actors: 1 2025-06-25T14:36:50.925411Z node 174 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [174:667:2659] connected; active server actors: 1 2025-06-25T14:36:50.927754Z node 174 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [174:672:2664] connected; active server actors: 1 2025-06-25T14:36:50.930175Z node 174 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [174:677:2669] connected; active server actors: 1 2025-06-25T14:36:50.932522Z node 174 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [174:682:2674] connected; active server actors: 1 2025-06-25T14:36:50.936695Z node 174 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [174:687:2679] connected; active server actors: 1 2025-06-25T14:36:50.939829Z node 174 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [174:692:2684] connected; active server actors: 1 2025-06-25T14:36:50.944070Z node 174 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [174:697:2689] connected; active server actors: 1 2025-06-25T14:36:50.946964Z node 174 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [174:702:2694] connected; active server actors: 1 2025-06-25T14:36:50.952916Z node 174 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [174:707:2699] connected; active server actors: 1 2025-06-25T14:36:50.956509Z node 174 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [174:712:2704] connected; active server actors: 1 2025-06-25T14:36:50.959358Z node 174 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [174:717:2709] connected; active server actors: 1 2025-06-25T14:36:50.961986Z node 174 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [174:722:2714] connected; active server actors: 1 2025-06-25T14:36:50.964700Z node 174 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [174:727:2719] connected; active server actors: 1 2025-06-25T14:36:50.967149Z node 174 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [174:732:2724] connected; active server actors: 1 2025-06-25T14:36:50.969553Z node 174 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [174:737:2729] connected; active server actors: 1 2025-06-25T14:36:50.972399Z node 174 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [174:742:2734] connected; active server actors: 1 2025-06-25T14:36:50.975468Z node 174 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [174:747:2739] connected; active server actors: 1 2025-06-25T14:36:50.977989Z node 174 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [174:752:2744] connected; active server actors: 1 2025-06-25T14:36:50.980771Z node 174 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037927937] server connected, pipe [174:757:2749], now have 1 active actors on pipe 2025-06-25T14:36:50.993910Z node 174 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037927937] server connected, pipe [174:760:2752], now have 1 active actors on pipe 2025-06-25T14:36:50.995201Z node 174 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037927937] server connected, pipe [174:763:2755], now have 1 active actors on pipe 2025-06-25T14:36:50.996511Z node 174 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [174:766:2758] connected; active server actors: 1 >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-DropUser-50 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-DropUser-51 >> ReadOnlyVDisk::TestGarbageCollect [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-54 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-55 >> KqpPg::JoinWithQueryService+StreamLookup >> KqpPg::TypeCoercionInsert-useSink >> KqpPg::CreateTableBulkUpsertAndRead >> TKeyValueTest::TestWriteReadDeleteWithRestartsAndCatchCollectGarbageEventsWithSlowInitialGC [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-69 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-70 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_read_only_vdisk/unittest >> ReadOnlyVDisk::TestGarbageCollect [GOOD] Test command err: RandomSeed# 67301576857641405 SEND TEvPut with key [1:1:0:0:0:131072:0] TEvPutResult: TEvPutResult {Id# [1:1:0:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:1:0:0:32768:0] TEvPutResult: TEvPutResult {Id# [1:1:1:0:0:32768:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} === Read all 2 blob(s) === SEND TEvGet with key [1:1:0:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:0:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:1:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:1:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:1:0:0:1:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:1:0:0:1:0] NODATA Size# 0}} Setting VDisk read-only to 1 for position 0 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:0:0] SEND TEvPut with key [1:1:2:0:0:131072:0] 2025-06-25T14:36:44.625911Z 1 00h01m40.100000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5317:699] TEvPutResult: TEvPutResult {Id# [1:1:2:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} 2025-06-25T14:36:44.630609Z 1 00h01m40.100000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5317:699] SEND TEvGet with key [1:1:2:0:0:1:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:2:0:0:1:0] NODATA Size# 0}} Setting VDisk read-only to 1 for position 1 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:1:0] SEND TEvPut with key [1:1:3:0:0:32768:0] 2025-06-25T14:36:45.698260Z 1 00h03m20.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5317:699] 2025-06-25T14:36:45.699144Z 2 00h03m20.160512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5324:706] TEvPutResult: TEvPutResult {Id# [1:1:3:0:0:32768:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} 2025-06-25T14:36:46.274116Z 1 00h04m20.161024s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5317:699] 2025-06-25T14:36:46.274321Z 2 00h04m20.161024s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5324:706] SEND TEvGet with key [1:1:3:0:0:1:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:3:0:0:1:0] NODATA Size# 0}} Setting VDisk read-only to 1 for position 2 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:2:0] SEND TEvPut with key [1:1:4:0:0:131072:0] 2025-06-25T14:36:46.618217Z 1 00h05m00.200000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5317:699] 2025-06-25T14:36:46.619470Z 2 00h05m00.200000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5324:706] 2025-06-25T14:36:46.620609Z 3 00h05m00.200000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5331:713] 2025-06-25T14:36:46.621027Z 1 00h05m00.200000s :BS_PROXY_PUT ERROR: [2e7fe36a708f3aa2] Result# TEvPutResult {Id# [1:1:4:0:0:131072:0] Status# ERROR StatusFlags# { } ErrorReason# "TRestoreStrategy saw optimisticState# EBS_DISINTEGRATED GroupId# 2181038080 BlobId# [1:1:4:0:0:131072:0] Reported ErrorReasons# [ { OrderNumber# 0 VDiskId# [82000000:1:0:0:0] NodeId# 1 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 1 VDiskId# [82000000:1:0:1:0] NodeId# 2 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 2 VDiskId# [82000000:1:0:2:0] NodeId# 3 ErrorReasons# [ "VDisk is in read-only mode", ] } ] Part situations# [ { OrderNumber# 3 Situations# PUUUUU } { OrderNumber# 4 Situations# UPUUUU } { OrderNumber# 5 Situations# UUPUUU } { OrderNumber# 6 Situations# UUUPUU } { OrderNumber# 7 Situations# UUUUPU } { OrderNumber# 0 Situations# UUUUUE } { OrderNumber# 1 Situations# UUUUUE } { OrderNumber# 2 Situations# UUUUUE } ] " ApproximateFreeSpaceShare# 0.999988} GroupId# 2181038080 Marker# BPP12 TEvPutResult: TEvPutResult {Id# [1:1:4:0:0:131072:0] Status# ERROR StatusFlags# { } ErrorReason# "TRestoreStrategy saw optimisticState# EBS_DISINTEGRATED GroupId# 2181038080 BlobId# [1:1:4:0:0:131072:0] Reported ErrorReasons# [ { OrderNumber# 0 VDiskId# [82000000:1:0:0:0] NodeId# 1 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 1 VDiskId# [82000000:1:0:1:0] NodeId# 2 ErrorReasons# [ "VDisk is in read-only mode", ] } { OrderNumber# 2 VDiskId# [82000000:1:0:2:0] NodeId# 3 ErrorReasons# [ "VDisk is in read-only mode", ] } ] Part situations# [ { OrderNumber# 3 Situations# PUUUUU } { OrderNumber# 4 Situations# UPUUUU } { OrderNumber# 5 Situations# UUPUUU } { OrderNumber# 6 Situations# UUUPUU } { OrderNumber# 7 Situations# UUUUPU } { OrderNumber# 0 Situations# UUUUUE } { OrderNumber# 1 Situations# UUUUUE } { OrderNumber# 2 Situations# UUUUUE } ] " ApproximateFreeSpaceShare# 0.999988} 2025-06-25T14:36:47.060211Z 1 00h06m00.210512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5317:699] 2025-06-25T14:36:47.060428Z 2 00h06m00.210512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5324:706] 2025-06-25T14:36:47.060482Z 3 00h06m00.210512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5331:713] === Putting VDisk #3 to read-only === Setting VDisk read-only to 1 for position 3 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:3:0] 2025-06-25T14:36:47.787970Z 1 00h07m40.260512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5317:699] 2025-06-25T14:36:47.788176Z 2 00h07m40.260512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5324:706] 2025-06-25T14:36:47.788242Z 3 00h07m40.260512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5331:713] 2025-06-25T14:36:47.788290Z 4 00h07m40.260512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) Unavailable in read-only Sender# [1:5338:720] === Putting VDisk #4 to read-only === Setting VDisk read-only to 1 for position 4 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:4:0] 2025-06-25T14:36:48.144431Z 1 00h08m20.262048s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5317:699] 2025-06-25T14:36:48.144644Z 2 00h08m20.262048s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5324:706] 2025-06-25T14:36:48.144722Z 3 00h08m20.262048s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5331:713] 2025-06-25T14:36:48.144775Z 4 00h08m20.262048s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) Unavailable in read-only Sender# [1:5338:720] 2025-06-25T14:36:48.144826Z 5 00h08m20.262048s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) Unavailable in read-only Sender# [1:5345:727] === Putting VDisk #5 to read-only === Setting VDisk read-only to 1 for position 5 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:5:0] 2025-06-25T14:36:48.479030Z 1 00h09m00.310512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5317:699] 2025-06-25T14:36:48.479270Z 2 00h09m00.310512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5324:706] 2025-06-25T14:36:48.479336Z 3 00h09m00.310512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5331:713] 2025-06-25T14:36:48.479395Z 4 00h09m00.310512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) Unavailable in read-only Sender# [1:5338:720] 2025-06-25T14:36:48.479455Z 5 00h09m00.310512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) Unavailable in read-only Sender# [1:5345:727] 2025-06-25T14:36:48.479509Z 6 00h09m00.310512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) Unavailable in read-only Sender# [1:5352:734] === Putting VDisk #6 to read-only === Setting VDisk read-only to 1 for position 6 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:6:0] 2025-06-25T14:36:48.724814Z 1 00h09m40.312048s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5317:699] 2025-06-25T14:36:48.725036Z 2 00h09m40.312048s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5324:706] 2025-06-25T14:36:48.725099Z 3 00h09m40.312048s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5331:713] 2025-06-25T14:36:48.725155Z 4 00h09m40.312048s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) Unavailable in read-only Sender# [1:5338:720] 2025-06-25T14:36:48.725212Z 5 00h09m40.312048s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) Unavailable in read-only Sender# [1:5345:727] 2025-06-25T14:36:48.725267Z 6 00h09m40.312048s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) Unavailable in read-only Sender# [1:5352:734] 2025-06-25T14:36:48.725323Z 7 00h09m40.312048s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) Unavailable in read-only Sender# [1:5359:741] === Putting VDisk #0 to normal === Setting VDisk read-only to 0 for position 0 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:0:0] 2025-06-25T14:36:48.986179Z 2 00h10m20.360512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:5324:706] 2025-06-25T14:36:48.986280Z 3 00h10m20.360512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5331:713] 2025-06-25T14:36:48.986340Z 4 00h10m20.360512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) Unavailable in read-only Sender# [1:5338:720] 2025-06-25T14:36:48.986395Z 5 00h10m20.360512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) Unavailable in read-only Sender# [1:5345:727] 2025-06-25T14:36:48.986450Z 6 00h10m20.360512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) Unavailable in read-only Sender# [1:5352:734] 2025-06-25T14:36:48.986504Z 7 00h10m20.360512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) Unavailable in read-only Sender# [1:5359:741] === Putting VDisk #1 to normal === Setting VDisk read-only to 0 for position 1 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:1:0] 2025-06-25T14:36:49.392762Z 3 00h11m00.400000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:5331:713] 2025-06-25T14:36:49.392889Z 4 00h11m00.400000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) Unavailable in read-only Sender# [1:5338:720] 2025-06-25T14:36:49.392956Z 5 00h11m00.400000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) Unavailable in read-only Sender# [1:5345:727] 2025-06-25T14:36:49.393018Z 6 00h11m00.400000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) Unavailable in read-only Sender# [1:5352:734] 2025-06-25T14:36:49.393078Z 7 00h11m00.400000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) Unavailable in read-only Sender# [1:5359:741] === Putting VDisk #2 to normal === Setting VDisk read-only to 0 for position 2 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:2:0] 2025-06-25T14:36:49.815967Z 4 00h11m40.410512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) Unavailable in read-only Sender# [1:5338:720] 2025-06-25T14:36:49.816068Z 5 00h11m40.410512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) Unavailable in read-only Sender# [1:5345:727] 2025-06-25T14:36:49.816125Z 6 00h11m40.410512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) Unavailable in read-only Sender# [1:5352:734] 2025-06-25T14:36:49.816178Z 7 00h11m40.410512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) Unavailable in read-only Sender# [1:5359:741] === Putting VDisk #3 to normal === Setting VDisk read-only to 0 for position 3 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:3:0] 2025-06-25T14:36:50.258806Z 5 00h12m20.450512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) Unavailable in read-only Sender# [1:5345:727] 2025-06-25T14:36:50.258906Z 6 00h12m20.450512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) Unavailable in read-only Sender# [1:5352:734] 2025-06-25T14:36:50.258965Z 7 00h12m20.450512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) Unavailable in read-only Sender# [1:5359:741] Setting VDisk read-only to 0 for position 4 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:4:0] 2025-06-25T14:36:51.422596Z 6 00h14m00.461536s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) Unavailable in read-only Sender# [1:5352:734] 2025-06-25T14:36:51.422693Z 7 00h14m00.461536s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) Unavailable in read-only Sender# [1:5359:741] SEND TEvGet with key [1:1:3:0:0:1:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:3:0:0:1:0] NODATA Size# 0}} Setting VDisk read-only to 0 for position 5 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:5:0] 2025-06-25T14:36:51.960684Z 7 00h14m40.500000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) Unavailable in read-only Sender# [1:5359:741] SEND TEvGet with key [1:1:3:0:0:1:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:3:0:0:1:0] NODATA Size# 0}} Setting VDisk read-only to 0 for position 6 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:6:0] SEND TEvGet with key [1:1:3:0:0:1:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:3:0:0:1:0] NODATA Size# 0}} SEND TEvPut with key [1:1:4:0:0:131072:0] TEvPutResult: TEvPutResult {Id# [1:1:4:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999976} SEND TEvGet with key [1:1:4:0:0:1:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:4:0:0:1:0] NODATA Size# 0}} >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-68 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-69 >> KqpPg::CreateTableSerialColumns+useSink >> KqpPg::NoTableQuery+useSink [GOOD] >> KqpPg::NoTableQuery-useSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/keyvalue/ut/unittest >> TKeyValueTest::TestWriteReadDeleteWithRestartsAndCatchCollectGarbageEventsWithSlowInitialGC [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:57:2057] recipient: [1:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:57:2057] recipient: [1:52:2097] Leader for TabletID 72057594037927937 is [1:59:2099] sender: [1:60:2057] recipient: [1:52:2097] Leader for TabletID 72057594037927937 is [1:59:2099] sender: [1:77:2057] recipient: [1:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:57:2057] recipient: [2:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:57:2057] recipient: [2:53:2097] Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:60:2057] recipient: [2:53:2097] Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:77:2057] recipient: [2:14:2061] !Reboot 72057594037927937 (actor [2:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:79:2057] recipient: [2:38:2085] Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:81:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:83:2057] recipient: [2:82:2112] Leader for TabletID 72057594037927937 is [2:84:2113] sender: [2:85:2057] recipient: [2:82:2112] !Reboot 72057594037927937 (actor [2:59:2099]) rebooted! !Reboot 72057594037927937 (actor [2:59:2099]) tablet resolver refreshed! new actor is[2:84:2113] Leader for TabletID 72057594037927937 is [2:84:2113] sender: [2:170:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:57:2057] recipient: [3:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:57:2057] recipient: [3:52:2097] Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:60:2057] recipient: [3:52:2097] Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:77:2057] recipient: [3:14:2061] !Reboot 72057594037927937 (actor [3:59:2099]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:79:2057] recipient: [3:38:2085] Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:82:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:83:2057] recipient: [3:81:2112] Leader for TabletID 72057594037927937 is [3:84:2113] sender: [3:85:2057] recipient: [3:81:2112] !Reboot 72057594037927937 (actor [3:59:2099]) rebooted! !Reboot 72057594037927937 (actor [3:59:2099]) tablet resolver refreshed! new actor is[3:84:2113] Leader for TabletID 72057594037927937 is [3:84:2113] sender: [3:170:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:57:2057] recipient: [4:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:57:2057] recipient: [4:53:2097] Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:60:2057] recipient: [4:53:2097] Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:77:2057] recipient: [4:14:2061] !Reboot 72057594037927937 (actor [4:59:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:80:2057] recipient: [4:38:2085] Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:83:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:84:2057] recipient: [4:82:2112] Leader for TabletID 72057594037927937 is [4:85:2113] sender: [4:86:2057] recipient: [4:82:2112] !Reboot 72057594037927937 (actor [4:59:2099]) rebooted! !Reboot 72057594037927937 (actor [4:59:2099]) tablet resolver refreshed! new actor is[4:85:2113] Leader for TabletID 72057594037927937 is [4:85:2113] sender: [4:171:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:57:2057] recipient: [5:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:57:2057] recipient: [5:53:2097] Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:60:2057] recipient: [5:53:2097] Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:77:2057] recipient: [5:14:2061] !Reboot 72057594037927937 (actor [5:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:83:2057] recipient: [5:38:2085] Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:86:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:87:2057] recipient: [5:85:2115] Leader for TabletID 72057594037927937 is [5:88:2116] sender: [5:89:2057] recipient: [5:85:2115] !Reboot 72057594037927937 (actor [5:59:2099]) rebooted! !Reboot 72057594037927937 (actor [5:59:2099]) tablet resolver refreshed! new actor is[5:88:2116] Leader for TabletID 72057594037927937 is [5:88:2116] sender: [5:174:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:57:2057] recipient: [6:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:57:2057] recipient: [6:52:2097] Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:60:2057] recipient: [6:52:2097] Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:77:2057] recipient: [6:14:2061] !Reboot 72057594037927937 (actor [6:59:2099]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:83:2057] recipient: [6:38:2085] Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:86:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:87:2057] recipient: [6:85:2115] Leader for TabletID 72057594037927937 is [6:88:2116] sender: [6:89:2057] recipient: [6:85:2115] !Reboot 72057594037927937 (actor [6:59:2099]) rebooted! !Reboot 72057594037927937 (actor [6:59:2099]) tablet resolver refreshed! new actor is[6:88:2116] Leader for TabletID 72057594037927937 is [6:88:2116] sender: [6:174:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:57:2057] recipient: [7:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:57:2057] recipient: [7:53:2097] Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:60:2057] recipient: [7:53:2097] Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:77:2057] recipient: [7:14:2061] !Reboot 72057594037927937 (actor [7:59:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:84:2057] recipient: [7:38:2085] Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:87:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:88:2057] recipient: [7:86:2115] Leader for TabletID 72057594037927937 is [7:89:2116] sender: [7:90:2057] recipient: [7:86:2115] !Reboot 72057594037927937 (actor [7:59:2099]) rebooted! !Reboot 72057594037927937 (actor [7:59:2099]) tablet resolver refreshed! new actor is[7:89:2116] Leader for TabletID 72057594037927937 is [7:89:2116] sender: [7:175:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:57:2057] recipient: [8:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:57:2057] recipient: [8:54:2097] Leader for TabletID 72057594037927937 is [8:59:2099] sender: [8:60:2057] recipient: [8:54:2097] Leader for TabletID 72057594037927937 is [8:59:2099] sender: [8:77:2057] recipient: [8:14:2061] !Reboot 72057594037927937 (actor [8:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [8:59:2099] sender: [8:86:2057] recipient: [8:38:2085] Leader for TabletID 72057594037927937 is [8:59:2099] sender: [8:89:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [8:59:2099] sender: [8:90:2057] recipient: [8:88:2117] Leader for TabletID 72057594037927937 is [8:91:2118] sender: [8:92:2057] recipient: [8:88:2117] !Reboot 72057594037927937 (actor [8:59:2099]) rebooted! !Reboot 72057594037927937 (actor [8:59:2099]) tablet resolver refreshed! new actor is[8:91:2118] Leader for TabletID 72057594037927937 is [8:91:2118] sender: [8:177:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:57:2057] recipient: [9:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:57:2057] recipient: [9:53:2097] Leader for TabletID 72057594037927937 is [9:59:2099] sender: [9:60:2057] recipient: [9:53:2097] Leader for TabletID 72057594037927937 is [9:59:2099] sender: [9:77:2057] recipient: [9:14:2061] !Reboot 72057594037927937 (actor [9:59:2099]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [9:59:2099] sender: [9:86:2057] recipient: [9:38:2085] Leader for TabletID 72057594037927937 is [9:59:2099] sender: [9:89:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [9:59:2099] sender: [9:90:2057] recipient: [9:88:2117] Leader for TabletID 72057594037927937 is [9:91:2118] sender: [9:92:2057] recipient: [9:88:2117] !Reboot 72057594037927937 (actor [9:59:2099]) rebooted! !Reboot 72057594037927937 (actor [9:59:2099]) tablet resolver refreshed! new actor is[9:91:2118] Leader for TabletID 72057594037927937 is [9:91:2118] sender: [9:177:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:57:2057] recipient: [10:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:57:2057] recipient: [10:54:2097] Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:60:2057] recipient: [10:54:2097] Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:77:2057] recipient: [10:14:2061] !Reboot 72057594037927937 (actor [10:59:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:87:2057] recipient: [10:38:2085] Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:90:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:91:2057] recipient: [10:89:2117] Leader for TabletID 72057594037927937 is [10:92:2118] sender: [10:93:2057] recipient: [10:89:2117] !Reboot 72057594037927937 (actor [10:59:2099]) rebooted! !Reboot 72057594037927937 (actor [10:59:2099]) tablet resolver refreshed! new actor is[10:92:2118] Leader for TabletID 72057594037927937 is [10:92:2118] sender: [10:178:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:57:2057] recipient: [11:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:57:2057] recipient: [11:53:2097] Leader for TabletID 72057594037927937 is [11:59:2099] sender: [11:60:2057] recipient: [11:53:2097] Leader for TabletID 72057594037927937 is [11:59:2099] sender: [11:77:2057] recipient: [11:14:2061] !Reboot 72057594037927937 (actor [11:59:2099]) on event NKikimr::TEvKeyValue::TEvCollect ! Leader for TabletID 72057594037927937 is [11:59:2099] sender: [11:88:2057] recipient: [11:38:2085] Leader for TabletID 72057594037927937 is [11:59:2099] sender: [11:91:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [11:59:2099] sender: [11:92:2057] recipient: [11:90:2118] Leader for TabletID 72057594037927937 is [11:93:2119] sender: [11:94:2057] recipient: [11:90:2118] !Reboot 72057594037927937 (actor [11:59:2099]) rebooted! !Reboot 72057594037927937 (actor [11:59:2099]) tablet resolver refreshed! new actor is[11:93:2119] Leader for TabletID 72057594037927937 is [11:93:2119] sender: [11:113:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:57:2057] recipient: [12:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:57:2057] recipient: [12:54:2097] Leader for TabletID 72057594037927937 is [12:59:2099] sender: [12:60:2057] recipient: [12:54:2097] Leader for TabletID 72057594037927937 is [12:59:2099] sender: [12:77:2057] recipient: [12:14:2061] !Reboot 72057594037927937 (actor [12:59:2099]) on event NKikimr::TEvKeyValue::TEvCompleteGC ! Leader for TabletID 72057594037927937 is [12:59:2099] sender: [12:89:2057] recipient: [12:38:2085] Leader for TabletID 72057594037927937 is [12:59:2099] sender: [12:92:2057] recipient: [12:14:2061] Leader for TabletID 72057594037927937 is [12:59:2099] sender: [12:93:2057] recipient: [12:91:2119] Leader for TabletID 72057594037927937 is [12:94:2120] sender: [12:95:2057] recipient: [12:91:2119] !Reboot 72057594037927937 (actor [12:59:2099]) rebooted! !Reboot 72057594037927937 (actor [12:59:2099]) tablet resolver refreshed! new actor is[12:94:2120] Leader for TabletID 72057594037927937 is [12:94:2120] sender: [12:114:2057] recipient: [12:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [13:57:2057] recipient: [13:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [13:57:2057] recipient: [13:52:2097] Leader for TabletID 72057594037927937 is [13:59:2099] sender: [13:60:2057] recipient: [13:52:2097] Leader for TabletID 72057594037927937 is [13:59:2099] sender: [13:77:2057] recipient: [13:14:2061] !Reboot 72057594037927937 (actor [13:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [13:59:2099] sender: [13:92:2057] recipient: [13:38:2085] Leader for TabletID 72057594037927937 is [13:59:2099] sender: [13:95:2057] recipient: [13:14:2061] Leader for TabletID 72057594037927937 is [13:59:2099] sender: [13:96:2057] recipient: [13:94:2122] Leader for TabletID 72057594037927937 is [13:97:2123] sender: [13:98:2057] recipient: [13:94:2122] !Reboot 72057594037927937 (actor [13:59:2099]) rebooted! !Reboot 72057594037927937 (actor [13:59:2099]) tablet resolver refreshed! new actor is[13:97:2123] Leader for TabletID 72057594037927937 is [13:97:2123] sender: [13:183:2057] recipient: [13:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [14:57:2057] recipient: [14:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [14:57:2057] recipient: [14:54:2097] Leader for TabletID 72057594037927937 is [14:59:2099] sender: [14:60:2057] recipient: [14:54:2097] Leader for TabletID 72057594037927937 is [14:59:2099] sender: [14:77:2057] recipient: [14:14:2061] !Reboot 72057594037927937 (actor [14:59:2099]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [14:59:2099] sender: [14:92:2057] recipient: [14:38:2085] Leader for TabletID 72057594037927937 is [14:59:2099] sender: [14:94:2057] recipient: [14:14:2061] Leader for TabletID 72057594037927937 is [14:59:2099] sender: [14:96:2057] recipient: [14:95:2122] Leader for TabletID 72057594037927937 is [14:97:2123] sender: [14:98:2057] recipient: [14:95:2122] !Reboot 72057594037927937 (actor [14:59:2099]) rebooted! !Reboot 72057594037927937 (actor [14:59:2099]) tablet resolver refreshed! new actor is[14:97:2123] Leader for TabletID 72057594037927937 is [14:97:2123] sender: [14:183:2057] recipient: [14:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [15:57:2057] recipient: [15:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [15:57:2057] recipient: [15:53:2097] Leader for TabletID 72057594037927937 is [15:59:2099] sender: [15:60:2057] recipient: [15:53:2097] Leader for TabletID 72057594037927937 is [15:59:2099] sender: [15:77:2057] recipient: [15:14:2061] !Reboot 72057594037927937 (actor [15:59:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [15:59:2099] sender: [15:93:2057] recipient: [15:38:2085] Leader for TabletID 72057594037927937 is [15:59:2099] sender: [15:96:2057] recipient: [15:14:2061] Leader for TabletID 72057594037927937 is [15:59:2099] sender: [15:97:2057] recipient: [15:95:2122] Leader for TabletID 72057594037927937 is [15:98:2123] sender: [15:99:2057] recipient: [15:95:2122] !Reboot 72057594037927937 (actor [15:59:2099]) rebooted! !Reboot 72057594037927937 (actor [15:59:2099]) tablet resolver refreshed! new actor is[15:98:2123] Leader for TabletID 72057594037927937 is [15:98:2123] sender: [15:184:2057] recipient: [15:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [16:57:2057] recipient: [16:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [16:57:2057] recipient: [16:53:2097] Leader for TabletID 72057594037927937 is [16:59:2099] sender: [16:60:2057] recipient: [16:53:2097] Leader for TabletID 72057594037927937 is [16:59:2099] sender: [16:77:2057] recipient: [16:14:2061] >> TConsoleTests::TestRemoveServerlessTenant [GOOD] >> TConsoleTests::TestRegisterComputationalUnitsForPending >> KqpWorkloadService::WorkloadServiceDisabledByInvalidDatabasePath [GOOD] >> KqpWorkloadService::TestZeroQueueSizeManyQueries >> ReadOnlyVDisk::TestReads [GOOD] >> TConsoleTests::TestAttributesExtSubdomain [GOOD] >> TConsoleTests::TestDatabaseQuotas >> TSchemeShardAuditSettings::AlterExtSubdomain-ExternalSchemeShard-true [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_read_only_vdisk/unittest >> ReadOnlyVDisk::TestReads [GOOD] Test command err: RandomSeed# 12193052465750692501 === Trying to put and get a blob === SEND TEvPut with key [1:1:0:0:0:131072:0] TEvPutResult: TEvPutResult {Id# [1:1:0:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} === Read all 1 blob(s) === SEND TEvGet with key [1:1:0:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:0:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} === Putting VDisk #0 to read-only === Setting VDisk read-only to 1 for position 0 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:0:0] === Read all 1 blob(s) === SEND TEvGet with key [1:1:0:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:0:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} === Putting VDisk #1 to read-only === Setting VDisk read-only to 1 for position 1 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:1:0] === Read all 1 blob(s) === SEND TEvGet with key [1:1:0:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:0:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} === Putting VDisk #2 to read-only === Setting VDisk read-only to 1 for position 2 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:2:0] === Read all 1 blob(s) === SEND TEvGet with key [1:1:0:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:0:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} === Putting VDisk #3 to read-only === Setting VDisk read-only to 1 for position 3 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:3:0] === Read all 1 blob(s) === SEND TEvGet with key [1:1:0:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:0:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} === Putting VDisk #4 to read-only === Setting VDisk read-only to 1 for position 4 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:4:0] === Read all 1 blob(s) === SEND TEvGet with key [1:1:0:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:0:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} === Putting VDisk #5 to read-only === Setting VDisk read-only to 1 for position 5 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:5:0] === Read all 1 blob(s) === SEND TEvGet with key [1:1:0:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:0:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} === Putting VDisk #6 to read-only === Setting VDisk read-only to 1 for position 6 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:6:0] === Read all 1 blob(s) === SEND TEvGet with key [1:1:0:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:0:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} === Restoring to normal VDisk #0 === Setting VDisk read-only to 0 for position 0 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:0:0] === Read all 1 blob(s) === SEND TEvGet with key [1:1:0:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:0:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} === Restoring to normal VDisk #1 === Setting VDisk read-only to 0 for position 1 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:1:0] === Read all 1 blob(s) === SEND TEvGet with key [1:1:0:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:0:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} === Restoring to normal VDisk #2 === Setting VDisk read-only to 0 for position 2 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:2:0] === Read all 1 blob(s) === SEND TEvGet with key [1:1:0:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:0:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} === Restoring to normal VDisk #3 === Setting VDisk read-only to 0 for position 3 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:3:0] === Read all 1 blob(s) === SEND TEvGet with key [1:1:0:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:0:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} === Restoring to normal VDisk #4 === Setting VDisk read-only to 0 for position 4 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:4:0] === Read all 1 blob(s) === SEND TEvGet with key [1:1:0:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:0:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} === Restoring to normal VDisk #5 === Setting VDisk read-only to 0 for position 5 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:5:0] === Read all 1 blob(s) === SEND TEvGet with key [1:1:0:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:0:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} === Restoring to normal VDisk #6 === Setting VDisk read-only to 0 for position 6 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:6:0] === Read all 1 blob(s) === SEND TEvGet with key [1:1:0:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:0:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} >> KqpPg::InsertFromSelect_Simple+useSink >> LdapAuthProviderTest_LdapsScheme::LdapRefreshGroupsInfoDisableNestedGroupsGood [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_auditsettings/unittest >> TSchemeShardAuditSettings::AlterExtSubdomain-ExternalSchemeShard-true [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:36:49.987142Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:36:49.987242Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:36:49.987282Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:36:49.987314Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:36:49.987354Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:36:49.987381Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:36:49.987429Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:36:49.987501Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:36:49.988186Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:36:49.991924Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:36:50.085010Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:36:50.085069Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:36:50.107674Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:36:50.108109Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:36:50.108291Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:36:50.114576Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:36:50.114855Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:36:50.115487Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:36:50.115736Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:36:50.118955Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:36:50.119140Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:36:50.120225Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:36:50.120290Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:36:50.120462Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:36:50.120529Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:36:50.120576Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:36:50.120669Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:36:50.128460Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:36:50.254127Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:36:50.254331Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:36:50.254535Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:36:50.254588Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:36:50.254776Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:36:50.254842Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:36:50.257613Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:36:50.257846Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:36:50.258075Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:36:50.258175Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:36:50.258221Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:36:50.258275Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:36:50.260605Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:36:50.260680Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:36:50.260724Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:36:50.263342Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:36:50.263425Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:36:50.263508Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:36:50.263590Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:36:50.266476Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:36:50.268340Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:36:50.268585Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:36:50.269593Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:36:50.269734Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:36:50.269797Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:36:50.270126Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:36:50.270191Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:36:50.270377Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:36:50.270486Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:36:50.273494Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:36:50.273544Z node 1 :FLAT_TX_SCHEMESHARD ... -25T14:36:56.265277Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 175, path id: 1 2025-06-25T14:36:56.265323Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 175, path id: 26 2025-06-25T14:36:56.265614Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 175:0, at schemeshard: 72057594046678944 2025-06-25T14:36:56.265652Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:400: [72057594046678944] TDeleteParts opId# 175:0 ProgressState 2025-06-25T14:36:56.265689Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 175:0 135 -> 240 2025-06-25T14:36:56.266854Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 103 PathOwnerId: 72057594046678944, cookie: 175 2025-06-25T14:36:56.266955Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 103 PathOwnerId: 72057594046678944, cookie: 175 2025-06-25T14:36:56.266992Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 175 2025-06-25T14:36:56.267018Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 175, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 103 2025-06-25T14:36:56.267067Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-25T14:36:56.268599Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 26 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 175 2025-06-25T14:36:56.268712Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 26 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 175 2025-06-25T14:36:56.268740Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 175 2025-06-25T14:36:56.268767Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 175, pathId: [OwnerId: 72057594046678944, LocalPathId: 26], version: 18446744073709551615 2025-06-25T14:36:56.268805Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 26] was 6 2025-06-25T14:36:56.268873Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 175, ready parts: 0/1, is published: true 2025-06-25T14:36:56.270735Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:74 hive 72057594037968897 at ss 72057594046678944 2025-06-25T14:36:56.270768Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:73 hive 72057594037968897 at ss 72057594046678944 2025-06-25T14:36:56.270783Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:75 hive 72057594037968897 at ss 72057594046678944 2025-06-25T14:36:56.270936Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 175:0, at schemeshard: 72057594046678944 2025-06-25T14:36:56.270964Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 175:0 ProgressState 2025-06-25T14:36:56.271018Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#175:0 progress is 1/1 2025-06-25T14:36:56.271038Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 175 ready parts: 1/1 2025-06-25T14:36:56.271070Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#175:0 progress is 1/1 2025-06-25T14:36:56.271091Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 175 ready parts: 1/1 2025-06-25T14:36:56.271128Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 175, ready parts: 1/1, is published: true 2025-06-25T14:36:56.271152Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 175 ready parts: 1/1 2025-06-25T14:36:56.271185Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 175:0 2025-06-25T14:36:56.271204Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 175:0 2025-06-25T14:36:56.271310Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 26] was 5 2025-06-25T14:36:56.272231Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 175 2025-06-25T14:36:56.272788Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 74 TxId_Deprecated: 74 TabletID: 72075186233409619 2025-06-25T14:36:56.274157Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 74 ShardOwnerId: 72057594046678944 ShardLocalIdx: 74, at schemeshard: 72057594046678944 2025-06-25T14:36:56.274349Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 26] was 4 Forgetting tablet 72075186233409619 2025-06-25T14:36:56.274945Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 73 TxId_Deprecated: 73 TabletID: 72075186233409618 2025-06-25T14:36:56.275650Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:36:56.277942Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 73 ShardOwnerId: 72057594046678944 ShardLocalIdx: 73, at schemeshard: 72057594046678944 2025-06-25T14:36:56.278150Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 26] was 3 2025-06-25T14:36:56.278502Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 75 TxId_Deprecated: 75 TabletID: 72075186233409620 Forgetting tablet 72075186233409618 Forgetting tablet 72075186233409620 2025-06-25T14:36:56.279753Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 75 ShardOwnerId: 72057594046678944 ShardLocalIdx: 75, at schemeshard: 72057594046678944 2025-06-25T14:36:56.279900Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 26] was 2 2025-06-25T14:36:56.280790Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-25T14:36:56.280830Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 26], at schemeshard: 72057594046678944 2025-06-25T14:36:56.280907Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 26] was 1 2025-06-25T14:36:56.281808Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 175 2025-06-25T14:36:56.281964Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-25T14:36:56.282001Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 26], at schemeshard: 72057594046678944 2025-06-25T14:36:56.282055Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:36:56.285144Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:74 2025-06-25T14:36:56.285206Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:74 tabletId 72075186233409619 2025-06-25T14:36:56.285372Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:73 2025-06-25T14:36:56.285419Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:73 tabletId 72075186233409618 2025-06-25T14:36:56.285645Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:75 2025-06-25T14:36:56.285701Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:75 tabletId 72075186233409620 2025-06-25T14:36:56.288222Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-25T14:36:56.288423Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 175, wait until txId: 175 TestWaitNotification wait txId: 175 2025-06-25T14:36:56.290087Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 175: send EvNotifyTxCompletion 2025-06-25T14:36:56.290153Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 175 2025-06-25T14:36:56.291897Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 175, at schemeshard: 72057594046678944 2025-06-25T14:36:56.292021Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 175: got EvNotifyTxCompletionResult 2025-06-25T14:36:56.292059Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 175: satisfy waiter [1:6744:7722] TestWaitNotification: OK eventTxId 175 >> KqpPg::InsertNoTargetColumns_Simple+useSink |81.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/sequenceshard/ut/ydb-core-tx-sequenceshard-ut |81.2%| [LD] {RESULT} $(B)/ydb/core/tx/sequenceshard/ut/ydb-core-tx-sequenceshard-ut >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-20 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-21 |81.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/sequenceshard/ut/ydb-core-tx-sequenceshard-ut ------- [TM] {asan, default-linux-x86_64, release} ydb/core/security/ldap_auth_provider/ut/unittest >> LdapAuthProviderTest_LdapsScheme::LdapRefreshGroupsInfoDisableNestedGroupsGood [GOOD] Test command err: 2025-06-25T14:36:14.373489Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519895781223540817:2180];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:36:14.373547Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000f77/r3tmp/tmpvpx0Z7/pdisk_1.dat 2025-06-25T14:36:15.160166Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:36:15.176496Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:36:15.189614Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:36:15.273427Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:36:15.280639Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519895781223540675:2080] 1750862174317037 != 1750862174317040 TServer::EnableGrpc on GrpcPort 32759, node 1 2025-06-25T14:36:15.388744Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:36:15.520905Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:36:15.520929Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:36:15.520935Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:36:15.521038Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:36:15.783238Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-25T14:36:15.783596Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-25T14:36:15.783622Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-25T14:36:15.785012Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldaps, uris: ldaps://localhost:27389, port: 27389 2025-06-25T14:36:15.785095Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-25T14:36:15.876755Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: groupDN 2025-06-25T14:36:15.925274Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****ORJQ (7D62FD80) () has now valid token of ldapuser@ldap 2025-06-25T14:36:18.909047Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519895796819680469:2151];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000f77/r3tmp/tmpEpbDcw/pdisk_1.dat 2025-06-25T14:36:19.265932Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:36:19.322679Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:36:19.338561Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:36:19.344896Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:36:19.353214Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 10245, node 2 2025-06-25T14:36:19.544685Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:36:19.544704Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:36:19.544713Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:36:19.544803Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:36:19.868749Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-25T14:36:19.869059Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-25T14:36:19.869112Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-25T14:36:19.869806Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldaps, uris: ldaps://localhost:24618, port: 24618 2025-06-25T14:36:19.869863Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=invalidRobouser,dc=search,dc=yandex,dc=net 2025-06-25T14:36:19.948691Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:201: Could not perform initial LDAP bind for dn cn=invalidRobouser,dc=search,dc=yandex,dc=net on server ldaps://localhost:24618. Invalid credentials 2025-06-25T14:36:19.949152Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:36:19.949515Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1809: Ticket eyJh****Sr_w (07278A2F) () has now permanent error message 'Could not login via LDAP (Could not perform initial LDAP bind for dn cn=invalidRobouser,dc=search,dc=yandex,dc=net on server ldaps://localhost:24618. Invalid credentials)' 2025-06-25T14:36:23.924991Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519895817450935151:2203];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000f77/r3tmp/tmpS9ByFs/pdisk_1.dat 2025-06-25T14:36:24.014818Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:36:24.232678Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:36:24.232747Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:36:24.235779Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519895817450934964:2080] 1750862183800681 != 1750862183800684 2025-06-25T14:36:24.242994Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:36:24.262169Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 18201, node 3 2025-06-25T14:36:24.420464Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:36:24.420485Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:36:24.420503Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:36:24.420633Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:36:24.559936Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-25T14:36:24.560224Z node 3 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-25T14:36:24.560252Z node 3 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-25T14:36:24.561019Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldaps, uris: ldaps://localhost:31780, port: 31780 2025-06-25T14:36:24.561097Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-25T14:36:24.632873Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:201: Could not perform initial LDAP bind for dn cn=robouser,dc=search,dc=yandex,dc=net on server ldaps://localhost:31780. Invalid credentials 2025-06-25T14:36:24.633560Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1809: Ticket eyJh****JZ4w (DE01A20A) () has now permanent error message 'Could not login via LDAP (Could not perform initial LDAP bind for dn cn=robouser,dc=search,dc=yandex,dc=net on server ldaps://localhost:31780. Invalid credentials)' test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000f77/r3tmp/tmpkAwgst/pdisk_1.dat 2025-06-25T14:36:28.865918Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519895839264608909:2233];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:36:28.991052Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:36:29.071017Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:36:29.076492Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7519895839264608701:2080] 1750862188775225 != 1750862188775228 2025-06-25T14:36:29.085133Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:36:29.085232Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:36:29.087733Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 10779, node 4 2025-06-25T14:36:29.213020Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:36:29.213041Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:36:29.213047Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:36:29.213151Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:36:29.444427Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-25T14:36:29.446839Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-25T14:36:29.446863Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-25T14:36:29.447552Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldaps, uris: ldaps://localhost:2161, port: 2161 2025-06-25T14:36:29.447636Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2 ... le: (empty maybe) 2025-06-25T14:36:34.549243Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:36:34.712453Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-25T14:36:34.720689Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-25T14:36:34.720737Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-25T14:36:34.721460Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldaps, uris: ldaps://localhost:27859, port: 27859 2025-06-25T14:36:34.721521Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-25T14:36:34.815212Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-25T14:36:34.860766Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:357: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (member:1.2.840.113556.1.4.1941:=uid=ldapuser,dc=search,dc=yandex,dc=net), attributes: 1.1 2025-06-25T14:36:34.861469Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:382: Try to get nested groups - tree traversal 2025-06-25T14:36:34.861516Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managerOfProject1,cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=project1,cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-25T14:36:34.908755Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-25T14:36:34.960608Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-25T14:36:34.964024Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****eGDQ (8AC507DA) () has now valid token of ldapuser@ldap 2025-06-25T14:36:35.112456Z node 5 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:36:39.082291Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[5:7519895865582125676:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:36:39.082394Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:36:39.118617Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****eGDQ (8AC507DA) 2025-06-25T14:36:39.118989Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldaps, uris: ldaps://localhost:27859, port: 27859 2025-06-25T14:36:39.119082Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-25T14:36:39.180931Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-25T14:36:39.240451Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:357: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (member:1.2.840.113556.1.4.1941:=uid=ldapuser,dc=search,dc=yandex,dc=net), attributes: 1.1 2025-06-25T14:36:39.241370Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:382: Try to get nested groups - tree traversal 2025-06-25T14:36:39.241402Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=project1,cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-25T14:36:39.284698Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-25T14:36:39.332553Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-25T14:36:39.333874Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****eGDQ (8AC507DA) () has now valid token of ldapuser@ldap 2025-06-25T14:36:43.128461Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****eGDQ (8AC507DA) 2025-06-25T14:36:43.128890Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldaps, uris: ldaps://localhost:27859, port: 27859 2025-06-25T14:36:43.128977Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-25T14:36:43.208989Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-25T14:36:43.256677Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:357: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (member:1.2.840.113556.1.4.1941:=uid=ldapuser,dc=search,dc=yandex,dc=net), attributes: 1.1 2025-06-25T14:36:43.260520Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:382: Try to get nested groups - tree traversal 2025-06-25T14:36:43.260588Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=project1,cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-25T14:36:43.308681Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-25T14:36:43.352777Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-25T14:36:43.354207Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****eGDQ (8AC507DA) () has now valid token of ldapuser@ldap 2025-06-25T14:36:46.217191Z node 6 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[6:7519895915144953665:2140];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:36:46.237457Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000f77/r3tmp/tmppwY0wE/pdisk_1.dat 2025-06-25T14:36:46.475981Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:36:46.476123Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:36:46.477787Z node 6 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:36:46.479107Z node 6 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [6:7519895915144953553:2080] 1750862206191747 != 1750862206191750 2025-06-25T14:36:46.492058Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 3853, node 6 2025-06-25T14:36:46.641450Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:36:46.641476Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:36:46.641484Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:36:46.641611Z node 6 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:36:46.840441Z node 6 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-25T14:36:46.841641Z node 6 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-25T14:36:46.841679Z node 6 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-25T14:36:46.842374Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldaps, uris: ldaps://localhost:22067, port: 22067 2025-06-25T14:36:46.842455Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-25T14:36:46.924797Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-25T14:36:46.969291Z node 6 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****-Jug (E6534DDC) () has now valid token of ldapuser@ldap 2025-06-25T14:36:47.240477Z node 6 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:36:51.216435Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[6:7519895915144953665:2140];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:36:51.216520Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:36:51.228431Z node 6 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****-Jug (E6534DDC) 2025-06-25T14:36:51.228535Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldaps, uris: ldaps://localhost:22067, port: 22067 2025-06-25T14:36:51.228635Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-25T14:36:51.300726Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-25T14:36:51.345624Z node 6 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****-Jug (E6534DDC) () has now valid token of ldapuser@ldap 2025-06-25T14:36:56.240186Z node 6 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****-Jug (E6534DDC) 2025-06-25T14:36:56.240484Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldaps, uris: ldaps://localhost:22067, port: 22067 2025-06-25T14:36:56.240566Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-25T14:36:56.309991Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-25T14:36:56.357268Z node 6 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****-Jug (E6534DDC) () has now valid token of ldapuser@ldap >> KqpPg::JoinWithQueryService+StreamLookup [GOOD] >> KqpPg::Insert_Serial+useSink >> KqpPg::EmptyQuery+useSink >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-DropUser-67 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-DropUser-68 >> Yq_1::Basic_Null >> KqpPg::NoTableQuery-useSink [GOOD] >> KqpPg::PgCreateTable >> SystemView::ShowCreateTableDefaultLiteral [GOOD] >> SystemView::ShowCreateTableColumn >> KqpPg::CreateTableSerialColumns+useSink [GOOD] >> KqpPg::CreateTableSerialColumns-useSink >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-55 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-56 >> Yq_1::DeleteConnections >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-DropUser-51 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-DropUser-52 >> TConsoleTests::TestRegisterComputationalUnitsForPending [GOOD] >> TConsoleTests::TestNotifyOperationCompletion >> TConsoleTests::TestDatabaseQuotas [GOOD] >> TConsoleTests::TestDatabaseQuotasBadOverallQuota >> KqpPg::InsertNoTargetColumns_Simple+useSink [GOOD] >> KqpPg::InsertNoTargetColumns_Simple-useSink >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-69 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-70 >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-70 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-71 >> KqpPg::EmptyQuery+useSink [GOOD] >> KqpPg::EmptyQuery-useSink >> KqpPg::Insert_Serial+useSink [GOOD] >> KqpPg::Insert_Serial-useSink >> HttpRequest::Status [GOOD] >> KqpWorkloadService::TestZeroQueueSizeManyQueries [GOOD] >> KqpWorkloadServiceActors::TestCreateDefaultPool >> TKeyValueTest::TestWriteToExtraChannelThenReadMixedChannelsReturnsOkNewApi [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-21 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-22 >> ReadOnlyVDisk::TestSync [GOOD] >> ResourcePoolsDdl::TestWorkloadConfigOnServerless [GOOD] >> ResourcePoolsSysView::TestResourcePoolsSysViewOnServerless >> KqpPg::CreateTableSerialColumns-useSink [GOOD] >> KqpPg::DropIndex ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest >> HttpRequest::Status [GOOD] Test command err: 2025-06-25T14:36:40.150175Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:419:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:36:40.150476Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:36:40.150567Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001d4a/r3tmp/tmpNLkH79/pdisk_1.dat 2025-06-25T14:36:40.694356Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 62027, node 1 2025-06-25T14:36:41.063248Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:36:41.063306Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:36:41.063354Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:36:41.063962Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:36:41.066344Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:36:41.222413Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:36:41.222548Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:36:41.241806Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:18108 2025-06-25T14:36:41.991958Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-25T14:36:45.799078Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-25T14:36:45.897380Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:36:45.897502Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:36:45.949837Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T14:36:45.957273Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:36:46.202382Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:36:46.242543Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:36:46.243135Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:36:46.243660Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:36:46.243796Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:36:46.244019Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:36:46.244108Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:36:46.244185Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:36:46.244258Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:36:46.244353Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:36:46.469585Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:36:46.469692Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:36:46.486126Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:36:46.721450Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:36:46.809744Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-25T14:36:46.809865Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-25T14:36:46.951691Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-25T14:36:46.951925Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-25T14:36:46.952140Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-25T14:36:46.952208Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-25T14:36:46.952259Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-25T14:36:46.952348Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-25T14:36:46.952416Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-25T14:36:46.952480Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-25T14:36:46.953047Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-25T14:36:46.991810Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7949: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-25T14:36:46.991922Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7979: ConnectToSA(), pipe client id: [2:1793:2562], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-25T14:36:47.014740Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1808:2573] 2025-06-25T14:36:47.030019Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1849:2589] 2025-06-25T14:36:47.030346Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1849:2589], schemeshard id = 72075186224037897 2025-06-25T14:36:47.054114Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-25T14:36:47.110208Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-25T14:36:47.110267Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-25T14:36:47.110346Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-25T14:36:47.138819Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:36:47.161454Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-25T14:36:47.161612Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-25T14:36:47.427554Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-25T14:36:47.645537Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-25T14:36:47.720936Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-25T14:36:48.338497Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:36:48.603043Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2153:3026], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:36:48.603182Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:36:48.631256Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715659:0, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T14:36:49.082143Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2293:2831];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:36:49.082392Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2293:2831];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:36:49.082762Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2293:2831];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:36:49.082899Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2293:2831];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:36:49.083028Z node 2 :TX_COLUMNSHARD WARN: ... MNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037900;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; 2025-06-25T14:36:51.852578Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037902;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; 2025-06-25T14:36:51.853061Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037903;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; 2025-06-25T14:36:51.853503Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037901;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; 2025-06-25T14:36:51.854464Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037904;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; 2025-06-25T14:36:51.854942Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037905;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; 2025-06-25T14:36:51.856647Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037907;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; 2025-06-25T14:36:51.857111Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037908;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; 2025-06-25T14:36:51.857536Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037906;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; 2025-06-25T14:36:52.813857Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:3711:3185], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:36:52.814308Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:36:52.818218Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterColumnTable, opId: 281474976715661:0, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/alter_table.cpp:323) 2025-06-25T14:36:52.893000Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037899;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-25T14:36:52.893802Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037900;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-25T14:36:52.894675Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037901;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-25T14:36:52.896099Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037902;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-25T14:36:52.896979Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037903;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-25T14:36:52.897843Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037904;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-25T14:36:52.898307Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037905;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-25T14:36:52.899565Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037907;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-25T14:36:52.900166Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037906;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-25T14:36:52.901464Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037908;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-25T14:36:53.673112Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:3875:3232], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:36:53.673640Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:36:53.678131Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterColumnTable, opId: 281474976715662:0, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/alter_table.cpp:323) 2025-06-25T14:36:53.732778Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037899;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715662; 2025-06-25T14:36:53.733389Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037900;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715662; 2025-06-25T14:36:53.733887Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037902;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715662; 2025-06-25T14:36:53.734355Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037903;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715662; 2025-06-25T14:36:53.734803Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037901;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715662; 2025-06-25T14:36:53.735808Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037904;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715662; 2025-06-25T14:36:53.736278Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037905;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715662; 2025-06-25T14:36:53.737105Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037907;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715662; 2025-06-25T14:36:53.737535Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037906;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715662; 2025-06-25T14:36:53.738819Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037908;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715662; waiting actualization: 0/0.000013s 2025-06-25T14:37:03.196883Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:5773:5384] 2025-06-25T14:37:03.199664Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:478: [72075186224037894] Send TEvStatistics::TEvAnalyzeStatusResponse. Status STATUS_NO_OPERATION Answer: 'No analyze operation' FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=abstract.cpp:105;event=AbortEmergency;reason=TTxWriteIndex destructor withno CompleteReady flag;prev_reason=; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=manager.cpp:51;message=aborted data locks manager; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=abstract.cpp:105;event=AbortEmergency;reason=TTxWriteIndex destructor withno CompleteReady flag;prev_reason=; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=manager.cpp:51;message=aborted data locks manager; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=abstract.cpp:105;event=AbortEmergency;reason=TTxWriteIndex destructor withno CompleteReady flag;prev_reason=; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=manager.cpp:51;message=aborted data locks manager; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=abstract.cpp:105;event=AbortEmergency;reason=TTxWriteIndex destructor withno CompleteReady flag;prev_reason=; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=manager.cpp:51;message=aborted data locks manager; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=abstract.cpp:105;event=AbortEmergency;reason=TTxWriteIndex destructor withno CompleteReady flag;prev_reason=; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=manager.cpp:51;message=aborted data locks manager; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=abstract.cpp:105;event=AbortEmergency;reason=TTxWriteIndex destructor withno CompleteReady flag;prev_reason=; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=manager.cpp:51;message=aborted data locks manager; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=abstract.cpp:105;event=AbortEmergency;reason=TTxWriteIndex destructor withno CompleteReady flag;prev_reason=; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=manager.cpp:51;message=aborted data locks manager; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=abstract.cpp:105;event=AbortEmergency;reason=TTxWriteIndex destructor withno CompleteReady flag;prev_reason=; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=manager.cpp:51;message=aborted data locks manager; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=abstract.cpp:105;event=AbortEmergency;reason=TTxWriteIndex destructor withno CompleteReady flag;prev_reason=; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=manager.cpp:51;message=aborted data locks manager; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=abstract.cpp:105;event=AbortEmergency;reason=TTxWriteIndex destructor withno CompleteReady flag;prev_reason=; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=manager.cpp:51;message=aborted data locks manager; >> TConsoleTests::TestDatabaseQuotasBadOverallQuota [GOOD] >> TConsoleTests::TestDatabaseQuotasBadStorageQuota ------- [TM] {asan, default-linux-x86_64, release} ydb/core/keyvalue/ut/unittest >> TKeyValueTest::TestWriteToExtraChannelThenReadMixedChannelsReturnsOkNewApi [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:57:2057] recipient: [1:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:57:2057] recipient: [1:52:2097] Leader for TabletID 72057594037927937 is [1:59:2099] sender: [1:60:2057] recipient: [1:52:2097] Leader for TabletID 72057594037927937 is [1:59:2099] sender: [1:77:2057] recipient: [1:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:57:2057] recipient: [2:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:57:2057] recipient: [2:53:2097] Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:60:2057] recipient: [2:53:2097] Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:77:2057] recipient: [2:14:2061] !Reboot 72057594037927937 (actor [2:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:79:2057] recipient: [2:38:2085] Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:81:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:83:2057] recipient: [2:82:2112] Leader for TabletID 72057594037927937 is [2:84:2113] sender: [2:85:2057] recipient: [2:82:2112] !Reboot 72057594037927937 (actor [2:59:2099]) rebooted! !Reboot 72057594037927937 (actor [2:59:2099]) tablet resolver refreshed! new actor is[2:84:2113] Leader for TabletID 72057594037927937 is [2:84:2113] sender: [2:170:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:57:2057] recipient: [3:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:57:2057] recipient: [3:52:2097] Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:60:2057] recipient: [3:52:2097] Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:77:2057] recipient: [3:14:2061] !Reboot 72057594037927937 (actor [3:59:2099]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:79:2057] recipient: [3:38:2085] Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:82:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:83:2057] recipient: [3:81:2112] Leader for TabletID 72057594037927937 is [3:84:2113] sender: [3:85:2057] recipient: [3:81:2112] !Reboot 72057594037927937 (actor [3:59:2099]) rebooted! !Reboot 72057594037927937 (actor [3:59:2099]) tablet resolver refreshed! new actor is[3:84:2113] Leader for TabletID 72057594037927937 is [3:84:2113] sender: [3:170:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:57:2057] recipient: [4:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:57:2057] recipient: [4:53:2097] Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:60:2057] recipient: [4:53:2097] Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:77:2057] recipient: [4:14:2061] !Reboot 72057594037927937 (actor [4:59:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:80:2057] recipient: [4:38:2085] Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:83:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:84:2057] recipient: [4:82:2112] Leader for TabletID 72057594037927937 is [4:85:2113] sender: [4:86:2057] recipient: [4:82:2112] !Reboot 72057594037927937 (actor [4:59:2099]) rebooted! !Reboot 72057594037927937 (actor [4:59:2099]) tablet resolver refreshed! new actor is[4:85:2113] Leader for TabletID 72057594037927937 is [4:85:2113] sender: [4:171:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:57:2057] recipient: [5:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:57:2057] recipient: [5:53:2097] Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:60:2057] recipient: [5:53:2097] Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:77:2057] recipient: [5:14:2061] !Reboot 72057594037927937 (actor [5:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:83:2057] recipient: [5:38:2085] Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:86:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:87:2057] recipient: [5:85:2115] Leader for TabletID 72057594037927937 is [5:88:2116] sender: [5:89:2057] recipient: [5:85:2115] !Reboot 72057594037927937 (actor [5:59:2099]) rebooted! !Reboot 72057594037927937 (actor [5:59:2099]) tablet resolver refreshed! new actor is[5:88:2116] Leader for TabletID 72057594037927937 is [5:88:2116] sender: [5:174:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:57:2057] recipient: [6:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:57:2057] recipient: [6:52:2097] Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:60:2057] recipient: [6:52:2097] Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:77:2057] recipient: [6:14:2061] !Reboot 72057594037927937 (actor [6:59:2099]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:83:2057] recipient: [6:38:2085] Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:86:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:87:2057] recipient: [6:85:2115] Leader for TabletID 72057594037927937 is [6:88:2116] sender: [6:89:2057] recipient: [6:85:2115] !Reboot 72057594037927937 (actor [6:59:2099]) rebooted! !Reboot 72057594037927937 (actor [6:59:2099]) tablet resolver refreshed! new actor is[6:88:2116] Leader for TabletID 72057594037927937 is [6:88:2116] sender: [6:174:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:57:2057] recipient: [7:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:57:2057] recipient: [7:53:2097] Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:60:2057] recipient: [7:53:2097] Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:77:2057] recipient: [7:14:2061] !Reboot 72057594037927937 (actor [7:59:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:84:2057] recipient: [7:38:2085] Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:87:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:88:2057] recipient: [7:86:2115] Leader for TabletID 72057594037927937 is [7:89:2116] sender: [7:90:2057] recipient: [7:86:2115] !Reboot 72057594037927937 (actor [7:59:2099]) rebooted! !Reboot 72057594037927937 (actor [7:59:2099]) tablet resolver refreshed! new actor is[7:89:2116] Leader for TabletID 72057594037927937 is [7:89:2116] sender: [7:175:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:57:2057] recipient: [8:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:57:2057] recipient: [8:54:2097] Leader for TabletID 72057594037927937 is [8:59:2099] sender: [8:60:2057] recipient: [8:54:2097] Leader for TabletID 72057594037927937 is [8:59:2099] sender: [8:77:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:57:2057] recipient: [9:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:57:2057] recipient: [9:53:2097] Leader for TabletID 72057594037927937 is [9:59:2099] sender: [9:60:2057] recipient: [9:53:2097] Leader for TabletID 72057594037927937 is [9:59:2099] sender: [9:77:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:57:2057] recipient: [10:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:57:2057] recipient: [10:54:2097] Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:60:2057] recipient: [10:54:2097] Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:77:2057] recipient: [10:14:2061] !Reboot 72057594037927937 (actor [10:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:79:2057] recipient: [10:38:2085] Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:82:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:83:2057] recipient: [10:81:2112] Leader for TabletID 72057594037927937 is [10:84:2113] sender: [10:85:2057] recipient: [10:81:2112] !Reboot 72057594037927937 (actor [10:59:2099]) rebooted! !Reboot 72057594037927937 (actor [10:59:2099]) tablet resolver refreshed! new actor is[10:84:2113] Leader for TabletID 72057594037927937 is [10:84:2113] sender: [10:170:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:57:2057] recipient: [11:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:57:2057] recipient: [11:53:2097] Leader for TabletID 72057594037927937 is [11:59:2099] sender: [11:60:2057] recipient: [11:53:2097] Leader for TabletID 72057594037927937 is [11:59:2099] sender: [11:77:2057] recipient: [11:14:2061] !Reboot 72057594037927937 (actor [11:59:2099]) on event NKikimr::TEvKeyValue::TEvExecuteTransaction ! Leader for TabletID 72057594037927937 is [11:59:2099] sender: [11:79:2057] recipient: [11:38:2085] Leader for TabletID 72057594037927937 is [11:59:2099] sender: [11:82:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [11:59:2099] sender: [11:83:2057] recipient: [11:81:2112] Leader for TabletID 72057594037927937 is [11:84:2113] sender: [11:85:2057] recipient: [11:81:2112] !Reboot 72057594037927937 (actor [11:59:2099]) rebooted! !Reboot 72057594037927937 (actor [11:59:2099]) tablet resolver refreshed! new actor is[11:84:2113] Leader for TabletID 72057594037927937 is [11:84:2113] sender: [11:170:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:57:2057] recipient: [12:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:57:2057] recipient: [12:54:2097] Leader for TabletID 72057594037927937 is [12:59:2099] sender: [12:60:2057] recipient: [12:54:2097] Leader for TabletID 72057594037927937 is [12:59:2099] sender: [12:77:2057] recipient: [12:14:2061] !Reboot 72057594037927937 (actor [12:59:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [12:59:2099] sender: [12:80:2057] recipient: [12:38:2085] Leader for TabletID 72057594037927937 is [12:59:2099] sender: [12:83:2057] recipient: [12:82:2112] Leader for TabletID 72057594037927937 is [12:59:2099] sender: [12:84:2057] recipient: [12:14:2061] Leader for TabletID 72057594037927937 is [12:85:2113] sender: [12:86:2057] recipient: [12:82:2112] !Reboot 72057594037927937 (actor [12:59:2099]) rebooted! !Reboot 72057594037927937 (actor [12:59:2099]) tablet resolver refreshed! new actor is[12:85:2113] Leader for TabletID 72057594037927937 is [12:85:2113] sender: [12:171:2057] recipient: [12:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [13:57:2057] recipient: [13:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [13:57:2057] recipient: [13:52:2097] Leader for TabletID 72057594037927937 is [13:59:2099] sender: [13:60:2057] recipient: [13:52:2097] Leader for TabletID 72057594037927937 is [13:59:2099] sender: [13:77:2057] recipient: [13:14:2061] !Reboot 72057594037927937 (actor [13:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [13:59:2099] sender: [13:83:2057] recipient: [13:38:2085] Leader for TabletID 72057594037927937 is [13:59:2099] sender: [13:86:2057] recipient: [13:14:2061] Leader for TabletID 72057594037927937 is [13:59:2099] sender: [13:87:2057] recipient: [13:85:2115] Leader for TabletID 72057594037927937 is [13:88:2116] sender: [13:89:2057] recipient: [13:85:2115] !Reboot 72057594037927937 (actor [13:59:2099]) rebooted! !Reboot 72057594037927937 (actor [13:59:2099]) tablet resolver refreshed! new actor is[13:88:2116] Leader for TabletID 72057594037927937 is [13:88:2116] sender: [13:174:2057] recipient: [13:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [14:57:2057] recipient: [14:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [14:57:2057] recipient: [14:54:2097] Leader for TabletID 72057594037927937 is [14:59:2099] sender: [14:60:2057] recipient: [14:54:2097] Leader for TabletID 72057594037927937 is [14:59:2099] sender: [14:77:2057] recipient: [14:14:2061] !Reboot 72057594037927937 (actor [14:59:2099]) on event NKikimr::TEvKeyValue::TEvExecuteTransaction ! Leader for TabletID 72057594037927937 is [14:59:2099] sender: [14:83:2057] recipient: [14:38:2085] Leader for TabletID 72057594037927937 is [14:59:2099] sender: [14:86:2057] recipient: [14:14:2061] Leader for TabletID 72057594037927937 is [14:59:2099] sender: [14:87:2057] recipient: [14:85:2115] Leader for TabletID 72057594037927937 is [14:88:2116] sender: [14:89:2057] recipient: [14:85:2115] !Reboot 72057594037927937 (actor [14:59:2099]) rebooted! !Reboot 72057594037927937 (actor [14:59:2099]) tablet resolver refreshed! new actor is[14:88:2116] Leader for TabletID 72057594037927937 is [14:88:2116] sender: [14:174:2057] recipient: [14:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [15:57:2057] recipient: [15:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [15:57:2057] recipient: [15:53:2097] Leader for TabletID 72057594037927937 is [15:59:2099] sender: [15:60:2057] recipient: [15:53:2097] Leader for TabletID 72057594037927937 is [15:59:2099] sender: [15:77:2057] recipient: [15:14:2061] !Reboot 72057594037927937 (actor [15:59:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [15:59:2099] sender: [15:84:2057] recipient: [15:38:2085] Leader for TabletID 72057594037927937 is [15:59:2099] sender: [15:87:2057] recipient: [15:14:2061] Leader for TabletID 72057594037927937 is [15:59:2099] sender: [15:88:2057] recipient: [15:86:2115] Leader for TabletID 72057594037927937 is [15:89:2116] sender: [15:90:2057] recipient: [15:86:2115] !Reboot 72057594037927937 (actor [15:59:2099]) rebooted! !Reboot 72057594037927937 (actor [15:59:2099]) tablet resolver refreshed! new actor is[15:89:2116] Leader for TabletID 72057594037927937 is [15:89:2116] sender: [15:175:2057] recipient: [15:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [16:57:2057] recipient: [16:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [16:57:2057] recipient: [16:53:2097] Leader for TabletID 72057594037927937 is [16:59:2099] sender: [16:60:2057] recipient: [16:53:2097] Leader for TabletID 72057594037927937 is [16:59:2099] sender: [16:77:2057] recipient: [16:14:2061] !Reboot 72057594037927937 (actor [16:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [16:59:2099] sender: [16:87:2057] recipient: [16:38:2085] Leader for TabletID 72057594037927937 is [16:59:2099] sender: [16:90:2057] recipient: [16:14:2061] Leader for TabletID 72057594037927937 is [16:59:2099] sender: [16:91:2057] recipient: [16:89:2118] Leader for TabletID 72057594037927937 is [16:92:2119] sender: [16:93:2057] recipient: [16:89:2118] !Reboot 72057594037927937 (actor [16:59:2099]) rebooted! !Reboot 72057594037927937 (actor [16:59:2099]) tablet resolver refreshed! new actor is[16:92:2119] Leader for TabletID 72057594037927937 is [16:92:2119] sender: [16:178:2057] recipient: [16:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [17:57:2057] recipient: [17:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [17:57:2057] recipient: [17:52:2097] Leader for TabletID 72057594037927937 is [17:59:2099] sender: [17:60:2057] recipient: [17:52:2097] Leader for TabletID 72057594037927937 is [17:59:2099] sender: [17:77:2057] recipient: [17:14:2061] !Reboot 72057594037927937 (actor [17:59:2099]) on event NKikimr::TEvKeyValue::TEvExecuteTransaction ! Leader for TabletID 72057594037927937 is [17:59:2099] sender: [17:87:2057] recipient: [17:38:2085] Leader for TabletID 72057594037927937 is [17:59:2099] sender: [17:90:2057] recipient: [17:14:2061] Leader for TabletID 72057594037927937 is [17:59:2099] sender: [17:91:2057] recipient: [17:89:2118] Leader for TabletID 72057594037927937 is [17:92:2119] sender: [17:93:2057] recipient: [17:89:2118] !Reboot 72057594037927937 (actor [17:59:2099]) rebooted! !Reboot 72057594037927937 (actor [17:59:2099]) tablet resolver refreshed! new actor is[17:92:2119] Leader for TabletID 72057594037927937 is [17:92:2119] sender: [17:178:2057] recipient: [17:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [18:57:2057] recipient: [18:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [18:57:2057] recipient: [18:53:2097] Leader for TabletID 72057594037927937 is [18:59:2099] sender: [18:60:2057] recipient: [18:53:2097] Leader for TabletID 72057594037927937 is [18:59:2099] sender: [18:77:2057] recipient: [18:14:2061] !Reboot 72057594037927937 (actor [18:59:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [18:59:2099] sender: [18:88:2057] recipient: [18:38:2085] Leader for TabletID 72057594037927937 is [18:59:2099] sender: [18:91:2057] recipient: [18:14:2061] Leader for TabletID 72057594037927937 is [18:59:2099] sender: [18:92:2057] recipient: [18:90:2118] Leader for TabletID 72057594037927937 is [18:93:2119] sender: [18:94:2057] recipient: [18:90:2118] !Reboot 72057594037927937 (actor [18:59:2099]) rebooted! !Reboot 72057594037927937 (actor [18:59:2099]) tablet resolver refreshed! new actor is[18:93:2119] Leader for TabletID 72057594037927937 is [18:93:2119] sender: [18:179:2057] recipient: [18:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [19:57:2057] recipient: [19:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [19:57:2057] recipient: [19:53:2097] Leader for TabletID 72057594037927937 is [19:59:2099] sender: [19:60:2057] recipient: [19:53:2097] Leader for TabletID 72057594037927937 is [19:59:2099] sender: [19:77:2057] recipient: [19:14:2061] !Reboot 72057594037927937 (actor [19:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [19:59:2099] sender: [19:91:2057] recipient: [19:38:2085] Leader for TabletID 72057594037927937 is [19:59:2099] sender: [19:94:2057] recipient: [19:14:2061] Leader for TabletID 72057594037927937 is [19:59:2099] sender: [19:95:2057] recipient: [19:93:2121] Leader for TabletID 72057594037927937 is [19:96:2122] sender: [19:97:2057] recipient: [19:93:2121] !Reboot 72057594037927937 (actor [19:59:2099]) rebooted! !Reboot 72057594037927937 (actor [19:59:2099]) tablet resolver refreshed! new actor is[19:96:2122] Leader for TabletID 72057594037927937 is [19:96:2122] sender: [19:182:2057] recipient: [19:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [20:57:2057] recipient: [20:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [20:57:2057] recipient: [20:54:2097] Leader for TabletID 72057594037927937 is [20:59:2099] sender: [20:60:2057] recipient: [20:54:2097] Leader for TabletID 72057594037927937 is [20:59:2099] sender: [20:77:2057] recipient: [20:14:2061] !Reboot 72057594037927937 (actor [20:59:2099]) on event NKikimr::TEvKeyValue::TEvReadRange ! Leader for TabletID 72057594037927937 is [20:59:2099] sender: [20:91:2057] recipient: [20:38:2085] Leader for TabletID 72057594037927937 is [20:59:2099] sender: [20:94:2057] recipient: [20:14:2061] Leader for TabletID 72057594037927937 is [20:59:2099] sender: [20:95:2057] recipient: [20:93:2121] Leader for TabletID 72057594037927937 is [20:96:2122] sender: [20:97:2057] recipient: [20:93:2121] !Reboot 72057594037927937 (actor [20:59:2099]) rebooted! !Reboot 72057594037927937 (actor [20:59:2099]) tablet resolver refreshed! new actor is[20:96:2122] Leader for TabletID 72057594037927937 is [20:96:2122] sender: [20:182:2057] recipient: [20:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [21:57:2057] recipient: [21:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [21:57:2057] recipient: [21:52:2097] Leader for TabletID 72057594037927937 is [21:59:2099] sender: [21:60:2057] recipient: [21:52:2097] Leader for TabletID 72057594037927937 is [21:59:2099] sender: [21:77:2057] recipient: [21:14:2061] !Reboot 72057594037927937 (actor [21:59:2099]) on event NKikimr::TEvKeyValue::TEvNotify ! Leader for TabletID 72057594037927937 is [21:59:2099] sender: [21:92:2057] recipient: [21:38:2085] Leader for TabletID 72057594037927937 is [21:59:2099] sender: [21:95:2057] recipient: [21:14:2061] Leader for TabletID 72057594037927937 is [21:59:2099] sender: [21:96:2057] recipient: [21:94:2121] Leader for TabletID 72057594037927937 is [21:97:2122] sender: [21:98:2057] recipient: [21:94:2121] !Reboot 72057594037927937 (actor [21:59:2099]) rebooted! !Reboot 72057594037927937 (actor [21:59:2099]) tablet resolver refreshed! new actor is[21:97:2122] Leader for TabletID 72057594037927937 is [0:0:0] sender: [22:57:2057] recipient: [22:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [22:57:2057] recipient: [22:53:2097] Leader for TabletID 72057594037927937 is [22:59:2099] sender: [22:60:2057] recipient: [22:53:2097] Leader for TabletID 72057594037927937 is [22:59:2099] sender: [22:77:2057] recipient: [22:14:2061] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_read_only_vdisk/unittest >> ReadOnlyVDisk::TestSync [GOOD] Test command err: RandomSeed# 11143916088400946939 Setting VDisk read-only to 1 for position 0 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:0:0] Setting VDisk read-only to 1 for position 1 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:1:0] SEND TEvPut with key [1:1:0:0:0:131072:0] 2025-06-25T14:36:46.143083Z 1 00h02m00.100000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:8817:940] 2025-06-25T14:36:46.143449Z 2 00h02m00.100000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:8824:947] TEvPutResult: TEvPutResult {Id# [1:1:0:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Setting VDisk read-only to 0 for position 0 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:0:0] Setting VDisk read-only to 0 for position 1 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:1:0] Setting VDisk read-only to 1 for position 1 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:1:0] Setting VDisk read-only to 1 for position 2 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:2:0] SEND TEvPut with key [1:1:1:0:0:32768:0] 2025-06-25T14:36:48.945214Z 3 00h06m00.210512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) Unavailable in read-only Sender# [1:8831:954] 2025-06-25T14:36:48.945400Z 2 00h06m00.210512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Unavailable in read-only Sender# [1:8824:947] TEvPutResult: TEvPutResult {Id# [1:1:1:0:0:32768:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Setting VDisk read-only to 0 for position 1 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:1:0] Setting VDisk read-only to 0 for position 2 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:2:0] Setting VDisk read-only to 1 for position 2 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:2:0] Setting VDisk read-only to 1 for position 3 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:3:0] SEND TEvPut with key [1:1:2:0:0:131072:0] TEvPutResult: TEvPutResult {Id# [1:1:2:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Setting VDisk read-only to 0 for position 2 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:2:0] Setting VDisk read-only to 0 for position 3 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:3:0] Setting VDisk read-only to 1 for position 3 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:3:0] Setting VDisk read-only to 1 for position 4 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:4:0] SEND TEvPut with key [1:1:3:0:0:32768:0] 2025-06-25T14:36:54.342456Z 5 00h14m00.361536s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) Unavailable in read-only Sender# [1:8845:968] 2025-06-25T14:36:54.342574Z 4 00h14m00.361536s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) Unavailable in read-only Sender# [1:8838:961] TEvPutResult: TEvPutResult {Id# [1:1:3:0:0:32768:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Setting VDisk read-only to 0 for position 3 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:3:0] Setting VDisk read-only to 0 for position 4 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:4:0] Setting VDisk read-only to 1 for position 4 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:4:0] Setting VDisk read-only to 1 for position 5 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:5:0] SEND TEvPut with key [1:1:4:0:0:131072:0] 2025-06-25T14:36:57.295533Z 6 00h18m00.412560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) Unavailable in read-only Sender# [1:8852:975] 2025-06-25T14:36:57.295653Z 5 00h18m00.412560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) Unavailable in read-only Sender# [1:8845:968] TEvPutResult: TEvPutResult {Id# [1:1:4:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Setting VDisk read-only to 0 for position 4 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:4:0] Setting VDisk read-only to 0 for position 5 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:5:0] Setting VDisk read-only to 1 for position 5 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:5:0] Setting VDisk read-only to 1 for position 6 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:6:0] SEND TEvPut with key [1:1:5:0:0:32768:0] 2025-06-25T14:37:00.253787Z 7 00h22m00.500000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) Unavailable in read-only Sender# [1:8859:982] 2025-06-25T14:37:00.253923Z 6 00h22m00.500000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) Unavailable in read-only Sender# [1:8852:975] TEvPutResult: TEvPutResult {Id# [1:1:5:0:0:32768:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Setting VDisk read-only to 0 for position 5 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:5:0] Setting VDisk read-only to 0 for position 6 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:6:0] Setting VDisk read-only to 1 for position 6 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:6:0] Setting VDisk read-only to 1 for position 0 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:0:0] SEND TEvPut with key [1:1:6:0:0:131072:0] 2025-06-25T14:37:03.357132Z 7 00h26m00.561536s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) Unavailable in read-only Sender# [1:8859:982] TEvPutResult: TEvPutResult {Id# [1:1:6:0:0:131072:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Setting VDisk read-only to 0 for position 6 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:6:0] Setting VDisk read-only to 0 for position 0 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:0:0] === Read all 7 blob(s) === SEND TEvGet with key [1:1:0:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:0:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:1:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:1:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:2:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:2:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:3:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:3:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:4:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:4:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} SEND TEvGet with key [1:1:5:0:0:32768:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:5:0:0:32768:0] OK Size# 32768 RequestedSize# 32768}} SEND TEvGet with key [1:1:6:0:0:131072:0] TEvGetResult: TEvGetResult {Status# OK ResponseSz# 1 {[1:1:6:0:0:131072:0] OK Size# 131072 RequestedSize# 131072}} >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-DropUser-68 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-DropUser-69 >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-56 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-57 >> KqpPg::InsertNoTargetColumns_Simple-useSink [GOOD] >> KqpPg::InsertNoTargetColumns_Serial-useSink >> KqpPg::EmptyQuery-useSink [GOOD] >> KqpPg::DuplicatedColumns+useSink >> Yq_1::CreateConnection_With_Existing_Name >> Yq_1::CreateQuery_With_Idempotency >> Yq_1::ModifyConnections >> KqpPg::TypeCoercionBulkUpsert [GOOD] >> KqpPg::TypeCoercionInsert+useSink >> TConsoleTests::TestNotifyOperationCompletion [GOOD] >> TConsoleTests::TestNotifyOperationCompletionExtSubdomain >> KqpPg::Insert_Serial-useSink [GOOD] >> KqpPg::InsertValuesFromTableWithDefaultText+useSink >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-DropUser-52 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-DropUser-53 >> KqpWorkloadService::TestLargeConcurrentQueryLimit [GOOD] >> KqpWorkloadService::TestLessConcurrentQueryLimit >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-70 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-71 >> TConsoleTests::TestDatabaseQuotasBadStorageQuota [GOOD] >> TKeyValueTest::TestWriteReadRangeLimitThenLimitWorksNewApi [GOOD] |81.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/yql/providers/solomon/actors/ut/ydb-library-yql-providers-solomon-actors-ut |81.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/library/yql/providers/solomon/actors/ut/ydb-library-yql-providers-solomon-actors-ut |81.2%| [LD] {RESULT} $(B)/ydb/library/yql/providers/solomon/actors/ut/ydb-library-yql-providers-solomon-actors-ut >> KqpPg::DropIndex [GOOD] >> KqpPg::CreateUniqPgColumn+useSink |81.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/ut/effects/ydb-core-kqp-ut-effects |81.2%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/effects/ydb-core-kqp-ut-effects >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-71 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-72 |81.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/effects/ydb-core-kqp-ut-effects >> KqpWorkloadServiceActors::TestCreateDefaultPool [GOOD] >> KqpWorkloadServiceActors::TestCpuLoadActor ------- [TM] {asan, default-linux-x86_64, release} ydb/core/keyvalue/ut/unittest >> TKeyValueTest::TestWriteReadRangeLimitThenLimitWorksNewApi [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:57:2057] recipient: [1:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:57:2057] recipient: [1:52:2097] Leader for TabletID 72057594037927937 is [1:59:2099] sender: [1:60:2057] recipient: [1:52:2097] Leader for TabletID 72057594037927937 is [1:59:2099] sender: [1:77:2057] recipient: [1:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:57:2057] recipient: [2:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:57:2057] recipient: [2:53:2097] Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:60:2057] recipient: [2:53:2097] Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:77:2057] recipient: [2:14:2061] !Reboot 72057594037927937 (actor [2:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:79:2057] recipient: [2:38:2085] Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:81:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:83:2057] recipient: [2:82:2112] Leader for TabletID 72057594037927937 is [2:84:2113] sender: [2:85:2057] recipient: [2:82:2112] !Reboot 72057594037927937 (actor [2:59:2099]) rebooted! !Reboot 72057594037927937 (actor [2:59:2099]) tablet resolver refreshed! new actor is[2:84:2113] Leader for TabletID 72057594037927937 is [2:84:2113] sender: [2:170:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:57:2057] recipient: [3:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:57:2057] recipient: [3:52:2097] Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:60:2057] recipient: [3:52:2097] Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:77:2057] recipient: [3:14:2061] !Reboot 72057594037927937 (actor [3:59:2099]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:79:2057] recipient: [3:38:2085] Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:82:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:83:2057] recipient: [3:81:2112] Leader for TabletID 72057594037927937 is [3:84:2113] sender: [3:85:2057] recipient: [3:81:2112] !Reboot 72057594037927937 (actor [3:59:2099]) rebooted! !Reboot 72057594037927937 (actor [3:59:2099]) tablet resolver refreshed! new actor is[3:84:2113] Leader for TabletID 72057594037927937 is [3:84:2113] sender: [3:170:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:57:2057] recipient: [4:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:57:2057] recipient: [4:53:2097] Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:60:2057] recipient: [4:53:2097] Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:77:2057] recipient: [4:14:2061] !Reboot 72057594037927937 (actor [4:59:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:80:2057] recipient: [4:38:2085] Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:82:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:84:2057] recipient: [4:83:2112] Leader for TabletID 72057594037927937 is [4:85:2113] sender: [4:86:2057] recipient: [4:83:2112] !Reboot 72057594037927937 (actor [4:59:2099]) rebooted! !Reboot 72057594037927937 (actor [4:59:2099]) tablet resolver refreshed! new actor is[4:85:2113] Leader for TabletID 72057594037927937 is [4:85:2113] sender: [4:171:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:57:2057] recipient: [5:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:57:2057] recipient: [5:53:2097] Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:60:2057] recipient: [5:53:2097] Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:77:2057] recipient: [5:14:2061] !Reboot 72057594037927937 (actor [5:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:83:2057] recipient: [5:38:2085] Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:86:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:87:2057] recipient: [5:85:2115] Leader for TabletID 72057594037927937 is [5:88:2116] sender: [5:89:2057] recipient: [5:85:2115] !Reboot 72057594037927937 (actor [5:59:2099]) rebooted! !Reboot 72057594037927937 (actor [5:59:2099]) tablet resolver refreshed! new actor is[5:88:2116] Leader for TabletID 72057594037927937 is [5:88:2116] sender: [5:174:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:57:2057] recipient: [6:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:57:2057] recipient: [6:52:2097] Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:60:2057] recipient: [6:52:2097] Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:77:2057] recipient: [6:14:2061] !Reboot 72057594037927937 (actor [6:59:2099]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:83:2057] recipient: [6:38:2085] Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:86:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:87:2057] recipient: [6:85:2115] Leader for TabletID 72057594037927937 is [6:88:2116] sender: [6:89:2057] recipient: [6:85:2115] !Reboot 72057594037927937 (actor [6:59:2099]) rebooted! !Reboot 72057594037927937 (actor [6:59:2099]) tablet resolver refreshed! new actor is[6:88:2116] Leader for TabletID 72057594037927937 is [6:88:2116] sender: [6:174:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:57:2057] recipient: [7:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:57:2057] recipient: [7:53:2097] Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:60:2057] recipient: [7:53:2097] Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:77:2057] recipient: [7:14:2061] !Reboot 72057594037927937 (actor [7:59:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:84:2057] recipient: [7:38:2085] Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:87:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:88:2057] recipient: [7:86:2115] Leader for TabletID 72057594037927937 is [7:89:2116] sender: [7:90:2057] recipient: [7:86:2115] !Reboot 72057594037927937 (actor [7:59:2099]) rebooted! !Reboot 72057594037927937 (actor [7:59:2099]) tablet resolver refreshed! new actor is[7:89:2116] Leader for TabletID 72057594037927937 is [7:89:2116] sender: [7:175:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:57:2057] recipient: [8:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:57:2057] recipient: [8:54:2097] Leader for TabletID 72057594037927937 is [8:59:2099] sender: [8:60:2057] recipient: [8:54:2097] Leader for TabletID 72057594037927937 is [8:59:2099] sender: [8:77:2057] recipient: [8:14:2061] !Reboot 72057594037927937 (actor [8:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [8:59:2099] sender: [8:86:2057] recipient: [8:38:2085] Leader for TabletID 72057594037927937 is [8:59:2099] sender: [8:89:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [8:59:2099] sender: [8:90:2057] recipient: [8:88:2117] Leader for TabletID 72057594037927937 is [8:91:2118] sender: [8:92:2057] recipient: [8:88:2117] !Reboot 72057594037927937 (actor [8:59:2099]) rebooted! !Reboot 72057594037927937 (actor [8:59:2099]) tablet resolver refreshed! new actor is[8:91:2118] Leader for TabletID 72057594037927937 is [8:91:2118] sender: [8:177:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:57:2057] recipient: [9:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:57:2057] recipient: [9:53:2097] Leader for TabletID 72057594037927937 is [9:59:2099] sender: [9:60:2057] recipient: [9:53:2097] Leader for TabletID 72057594037927937 is [9:59:2099] sender: [9:77:2057] recipient: [9:14:2061] !Reboot 72057594037927937 (actor [9:59:2099]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [9:59:2099] sender: [9:86:2057] recipient: [9:38:2085] Leader for TabletID 72057594037927937 is [9:59:2099] sender: [9:89:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [9:59:2099] sender: [9:90:2057] recipient: [9:88:2117] Leader for TabletID 72057594037927937 is [9:91:2118] sender: [9:92:2057] recipient: [9:88:2117] !Reboot 72057594037927937 (actor [9:59:2099]) rebooted! !Reboot 72057594037927937 (actor [9:59:2099]) tablet resolver refreshed! new actor is[9:91:2118] Leader for TabletID 72057594037927937 is [9:91:2118] sender: [9:177:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:57:2057] recipient: [10:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:57:2057] recipient: [10:54:2097] Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:60:2057] recipient: [10:54:2097] Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:77:2057] recipient: [10:14:2061] !Reboot 72057594037927937 (actor [10:59:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:87:2057] recipient: [10:38:2085] Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:90:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:91:2057] recipient: [10:89:2117] Leader for TabletID 72057594037927937 is [10:92:2118] sender: [10:93:2057] recipient: [10:89:2117] !Reboot 72057594037927937 (actor [10:59:2099]) rebooted! !Reboot 72057594037927937 (actor [10:59:2099]) tablet resolver refreshed! new actor is[10:92:2118] Leader for TabletID 72057594037927937 is [10:92:2118] sender: [10:178:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:57:2057] recipient: [11:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:57:2057] recipient: [11:53:2097] Leader for TabletID 72057594037927937 is [11:59:2099] sender: [11:60:2057] recipient: [11:53:2097] Leader for TabletID 72057594037927937 is [11:59:2099] sender: [11:77:2057] recipient: [11:14:2061] !Reboot 72057594037927937 (actor [11:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [11:59:2099] sender: [11:89:2057] recipient: [11:38:2085] Leader for TabletID 72057594037927937 is [11:59:2099] sender: [11:92:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [11:59:2099] sender: [11:93:2057] recipient: [11:91:2119] Leader for TabletID 72057594037927937 is [11:94:2120] sender: [11:95:2057] recipient: [11:91:2119] !Reboot 72057594037927937 (actor [11:59:2099]) rebooted! !Reboot 72057594037927937 (actor [11:59:2099]) tablet resolver refreshed! new actor is[11:94:2120] Leader for TabletID 72057594037927937 is [11:94:2120] sender: [11:180:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:57:2057] recipient: [12:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:57:2057] recipient: [12:54:2097] Leader for TabletID 72057594037927937 is [12:59:2099] sender: [12:60:2057] recipient: [12:54:2097] Leader for TabletID 72057594037927937 is [12:59:2099] sender: [12:77:2057] recipient: [12:14:2061] !Reboot 72057594037927937 (acto ... bletID 72057594037927937 is [13:59:2099] sender: [13:90:2057] recipient: [13:38:2085] Leader for TabletID 72057594037927937 is [13:59:2099] sender: [13:93:2057] recipient: [13:14:2061] Leader for TabletID 72057594037927937 is [13:59:2099] sender: [13:94:2057] recipient: [13:92:2119] Leader for TabletID 72057594037927937 is [13:95:2120] sender: [13:96:2057] recipient: [13:92:2119] !Reboot 72057594037927937 (actor [13:59:2099]) rebooted! !Reboot 72057594037927937 (actor [13:59:2099]) tablet resolver refreshed! new actor is[13:95:2120] Leader for TabletID 72057594037927937 is [13:95:2120] sender: [13:181:2057] recipient: [13:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [14:57:2057] recipient: [14:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [14:57:2057] recipient: [14:54:2097] Leader for TabletID 72057594037927937 is [14:59:2099] sender: [14:60:2057] recipient: [14:54:2097] Leader for TabletID 72057594037927937 is [14:59:2099] sender: [14:77:2057] recipient: [14:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [15:57:2057] recipient: [15:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [15:57:2057] recipient: [15:53:2097] Leader for TabletID 72057594037927937 is [15:59:2099] sender: [15:60:2057] recipient: [15:53:2097] Leader for TabletID 72057594037927937 is [15:59:2099] sender: [15:77:2057] recipient: [15:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [16:57:2057] recipient: [16:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [16:57:2057] recipient: [16:53:2097] Leader for TabletID 72057594037927937 is [16:59:2099] sender: [16:60:2057] recipient: [16:53:2097] Leader for TabletID 72057594037927937 is [16:59:2099] sender: [16:77:2057] recipient: [16:14:2061] !Reboot 72057594037927937 (actor [16:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [16:59:2099] sender: [16:79:2057] recipient: [16:38:2085] Leader for TabletID 72057594037927937 is [16:59:2099] sender: [16:82:2057] recipient: [16:14:2061] Leader for TabletID 72057594037927937 is [16:59:2099] sender: [16:83:2057] recipient: [16:81:2112] Leader for TabletID 72057594037927937 is [16:84:2113] sender: [16:85:2057] recipient: [16:81:2112] !Reboot 72057594037927937 (actor [16:59:2099]) rebooted! !Reboot 72057594037927937 (actor [16:59:2099]) tablet resolver refreshed! new actor is[16:84:2113] Leader for TabletID 72057594037927937 is [16:84:2113] sender: [16:170:2057] recipient: [16:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [17:57:2057] recipient: [17:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [17:57:2057] recipient: [17:52:2097] Leader for TabletID 72057594037927937 is [17:59:2099] sender: [17:60:2057] recipient: [17:52:2097] Leader for TabletID 72057594037927937 is [17:59:2099] sender: [17:77:2057] recipient: [17:14:2061] !Reboot 72057594037927937 (actor [17:59:2099]) on event NKikimr::TEvKeyValue::TEvExecuteTransaction ! Leader for TabletID 72057594037927937 is [17:59:2099] sender: [17:79:2057] recipient: [17:38:2085] Leader for TabletID 72057594037927937 is [17:59:2099] sender: [17:82:2057] recipient: [17:14:2061] Leader for TabletID 72057594037927937 is [17:59:2099] sender: [17:83:2057] recipient: [17:81:2112] Leader for TabletID 72057594037927937 is [17:84:2113] sender: [17:85:2057] recipient: [17:81:2112] !Reboot 72057594037927937 (actor [17:59:2099]) rebooted! !Reboot 72057594037927937 (actor [17:59:2099]) tablet resolver refreshed! new actor is[17:84:2113] Leader for TabletID 72057594037927937 is [17:84:2113] sender: [17:170:2057] recipient: [17:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [18:57:2057] recipient: [18:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [18:57:2057] recipient: [18:53:2097] Leader for TabletID 72057594037927937 is [18:59:2099] sender: [18:60:2057] recipient: [18:53:2097] Leader for TabletID 72057594037927937 is [18:59:2099] sender: [18:77:2057] recipient: [18:14:2061] !Reboot 72057594037927937 (actor [18:59:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [18:59:2099] sender: [18:80:2057] recipient: [18:38:2085] Leader for TabletID 72057594037927937 is [18:59:2099] sender: [18:82:2057] recipient: [18:14:2061] Leader for TabletID 72057594037927937 is [18:59:2099] sender: [18:84:2057] recipient: [18:83:2112] Leader for TabletID 72057594037927937 is [18:85:2113] sender: [18:86:2057] recipient: [18:83:2112] !Reboot 72057594037927937 (actor [18:59:2099]) rebooted! !Reboot 72057594037927937 (actor [18:59:2099]) tablet resolver refreshed! new actor is[18:85:2113] Leader for TabletID 72057594037927937 is [18:85:2113] sender: [18:171:2057] recipient: [18:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [19:57:2057] recipient: [19:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [19:57:2057] recipient: [19:53:2097] Leader for TabletID 72057594037927937 is [19:59:2099] sender: [19:60:2057] recipient: [19:53:2097] Leader for TabletID 72057594037927937 is [19:59:2099] sender: [19:77:2057] recipient: [19:14:2061] !Reboot 72057594037927937 (actor [19:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [19:59:2099] sender: [19:83:2057] recipient: [19:38:2085] Leader for TabletID 72057594037927937 is [19:59:2099] sender: [19:86:2057] recipient: [19:14:2061] Leader for TabletID 72057594037927937 is [19:59:2099] sender: [19:87:2057] recipient: [19:85:2115] Leader for TabletID 72057594037927937 is [19:88:2116] sender: [19:89:2057] recipient: [19:85:2115] !Reboot 72057594037927937 (actor [19:59:2099]) rebooted! !Reboot 72057594037927937 (actor [19:59:2099]) tablet resolver refreshed! new actor is[19:88:2116] Leader for TabletID 72057594037927937 is [19:88:2116] sender: [19:174:2057] recipient: [19:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [20:57:2057] recipient: [20:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [20:57:2057] recipient: [20:54:2097] Leader for TabletID 72057594037927937 is [20:59:2099] sender: [20:60:2057] recipient: [20:54:2097] Leader for TabletID 72057594037927937 is [20:59:2099] sender: [20:77:2057] recipient: [20:14:2061] !Reboot 72057594037927937 (actor [20:59:2099]) on event NKikimr::TEvKeyValue::TEvReadRange ! Leader for TabletID 72057594037927937 is [20:59:2099] sender: [20:83:2057] recipient: [20:38:2085] Leader for TabletID 72057594037927937 is [20:59:2099] sender: [20:86:2057] recipient: [20:14:2061] Leader for TabletID 72057594037927937 is [20:59:2099] sender: [20:87:2057] recipient: [20:85:2115] Leader for TabletID 72057594037927937 is [20:88:2116] sender: [20:89:2057] recipient: [20:85:2115] !Reboot 72057594037927937 (actor [20:59:2099]) rebooted! !Reboot 72057594037927937 (actor [20:59:2099]) tablet resolver refreshed! new actor is[20:88:2116] Leader for TabletID 72057594037927937 is [20:88:2116] sender: [20:174:2057] recipient: [20:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [21:57:2057] recipient: [21:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [21:57:2057] recipient: [21:52:2097] Leader for TabletID 72057594037927937 is [21:59:2099] sender: [21:60:2057] recipient: [21:52:2097] Leader for TabletID 72057594037927937 is [21:59:2099] sender: [21:77:2057] recipient: [21:14:2061] !Reboot 72057594037927937 (actor [21:59:2099]) on event NKikimr::TEvKeyValue::TEvNotify ! Leader for TabletID 72057594037927937 is [21:59:2099] sender: [21:84:2057] recipient: [21:38:2085] Leader for TabletID 72057594037927937 is [21:59:2099] sender: [21:87:2057] recipient: [21:14:2061] Leader for TabletID 72057594037927937 is [21:59:2099] sender: [21:88:2057] recipient: [21:86:2115] Leader for TabletID 72057594037927937 is [21:89:2116] sender: [21:90:2057] recipient: [21:86:2115] !Reboot 72057594037927937 (actor [21:59:2099]) rebooted! !Reboot 72057594037927937 (actor [21:59:2099]) tablet resolver refreshed! new actor is[21:89:2116] Leader for TabletID 72057594037927937 is [21:89:2116] sender: [21:107:2057] recipient: [21:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [22:57:2057] recipient: [22:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [22:57:2057] recipient: [22:53:2097] Leader for TabletID 72057594037927937 is [22:59:2099] sender: [22:60:2057] recipient: [22:53:2097] Leader for TabletID 72057594037927937 is [22:59:2099] sender: [22:77:2057] recipient: [22:14:2061] !Reboot 72057594037927937 (actor [22:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [22:59:2099] sender: [22:86:2057] recipient: [22:38:2085] Leader for TabletID 72057594037927937 is [22:59:2099] sender: [22:89:2057] recipient: [22:14:2061] Leader for TabletID 72057594037927937 is [22:59:2099] sender: [22:90:2057] recipient: [22:88:2117] Leader for TabletID 72057594037927937 is [22:91:2118] sender: [22:92:2057] recipient: [22:88:2117] !Reboot 72057594037927937 (actor [22:59:2099]) rebooted! !Reboot 72057594037927937 (actor [22:59:2099]) tablet resolver refreshed! new actor is[22:91:2118] Leader for TabletID 72057594037927937 is [22:91:2118] sender: [22:177:2057] recipient: [22:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [23:57:2057] recipient: [23:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [23:57:2057] recipient: [23:53:2097] Leader for TabletID 72057594037927937 is [23:59:2099] sender: [23:60:2057] recipient: [23:53:2097] Leader for TabletID 72057594037927937 is [23:59:2099] sender: [23:77:2057] recipient: [23:14:2061] !Reboot 72057594037927937 (actor [23:59:2099]) on event NKikimr::TEvKeyValue::TEvReadRange ! Leader for TabletID 72057594037927937 is [23:59:2099] sender: [23:86:2057] recipient: [23:38:2085] Leader for TabletID 72057594037927937 is [23:59:2099] sender: [23:89:2057] recipient: [23:14:2061] Leader for TabletID 72057594037927937 is [23:59:2099] sender: [23:90:2057] recipient: [23:88:2117] Leader for TabletID 72057594037927937 is [23:91:2118] sender: [23:92:2057] recipient: [23:88:2117] !Reboot 72057594037927937 (actor [23:59:2099]) rebooted! !Reboot 72057594037927937 (actor [23:59:2099]) tablet resolver refreshed! new actor is[23:91:2118] Leader for TabletID 72057594037927937 is [23:91:2118] sender: [23:177:2057] recipient: [23:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [24:57:2057] recipient: [24:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [24:57:2057] recipient: [24:53:2097] Leader for TabletID 72057594037927937 is [24:59:2099] sender: [24:60:2057] recipient: [24:53:2097] Leader for TabletID 72057594037927937 is [24:59:2099] sender: [24:77:2057] recipient: [24:14:2061] !Reboot 72057594037927937 (actor [24:59:2099]) on event NKikimr::TEvKeyValue::TEvNotify ! Leader for TabletID 72057594037927937 is [24:59:2099] sender: [24:87:2057] recipient: [24:38:2085] Leader for TabletID 72057594037927937 is [24:59:2099] sender: [24:90:2057] recipient: [24:14:2061] Leader for TabletID 72057594037927937 is [24:59:2099] sender: [24:91:2057] recipient: [24:89:2117] Leader for TabletID 72057594037927937 is [24:92:2118] sender: [24:93:2057] recipient: [24:89:2117] !Reboot 72057594037927937 (actor [24:59:2099]) rebooted! !Reboot 72057594037927937 (actor [24:59:2099]) tablet resolver refreshed! new actor is[24:92:2118] Leader for TabletID 72057594037927937 is [0:0:0] sender: [25:57:2057] recipient: [25:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [25:57:2057] recipient: [25:54:2097] Leader for TabletID 72057594037927937 is [25:59:2099] sender: [25:60:2057] recipient: [25:54:2097] Leader for TabletID 72057594037927937 is [25:59:2099] sender: [25:77:2057] recipient: [25:14:2061] >> KqpJoinOrder::FiveWayJoinStatsOverride+ColumnStore [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/cms/console/ut/unittest >> TConsoleTests::TestDatabaseQuotasBadStorageQuota [GOOD] Test command err: 2025-06-25T14:35:20.312391Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:35:20.312479Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:35:20.363076Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:35:21.609857Z node 2 :BS_PDISK WARN: {BPD01@blobstorage_pdisk_blockdevice_async.cpp:922} Warning# got EIoResult::FileOpenError from IoContext->Setup PDiskId# 1000 2025-06-25T14:35:21.610278Z node 2 :BS_PDISK CRIT: {BPD39@blobstorage_pdisk_impl.cpp:2897} BlockDevice initialization error Details# Can't open file "/home/runner/.ya/build/build_root/yft8/000eed/r3tmp/tmpwsmcNJ/pdisk_1.dat": unknown reason, errno# 0. PDiskId# 1000 2025-06-25T14:35:21.610952Z node 2 :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:300} PDiskId# 1000 bootstrapped to the StateError, reason# Can't open file "/home/runner/.ya/build/build_root/yft8/000eed/r3tmp/tmpwsmcNJ/pdisk_1.dat": unknown reason, errno# 0. Can not be initialized Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/yft8/000eed/r3tmp/tmpwsmcNJ/pdisk_1.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 6600662314614400560 PDiskId# 1000 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 2 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 1000 2025-06-25T14:35:21.762307Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T14:35:21.767318Z node 2 :BS_LOCALRECOVERY CRIT: localrecovery_public.cpp:103: PDiskId# 1000 VDISK[80000000:_:0:0:0]: (2147483648) LocalRecovery FINISHED: {RecoveryDuration# INPROGRESS RecoveredLogStartLsn# 0 SuccessfulRecovery# false EmptyLogoBlobsDb# true EmptyBlocksDb# true EmptyBarriersDb# true EmptySyncLog# true EmptySyncer# true EmptyHuge# true LogRecLogoBlob# 0 LogRecBlock# 0 LogRecGC# 0 LogRecSyncLogIdx# 0 LogRecLogoBlobsDB# 0 LogRecBlocksDB# 0 LogRecBarriersDB# 0 LogRecCutLog# 0 LogRecLocalSyncData# 0 LogRecSyncerState# 0 LogRecHandoffDel# 0 LogRecHugeBlobAllocChunk# 0 LogRecHugeBlobFreeChunk# 0 LogRecHugeBlobEntryPoint# 0 LogRecHugeLogoBlob# 0 LogRecLogoBlobOpt# 0 LogRecPhantomBlob# 0 LogRecAnubisOsirisPut# 0 LogRecAddBulkSst# 0 LogoBlobFreshApply# 0 LogoBlobFreshSkip# 0 LogoBlobsBatchFreshApply# 0 LogoBlobsBatchFreshSkip#0 LogoBlobSyncLogApply# 0 LogoBlobSyncLogSkip# 0 HugeLogoBlobFreshApply# 0 HugeLogoBlobFreshSkip# 0 HugeLogoBlobSyncLogApply# 0 HugeLogoBlobSyncLogSkip# 0 BlockFreshApply# 0 BlockFreshSkip# 0 BlocksBatchFreshApply# 0 BlocksBatchFreshSkip# 0 BlockSyncLogApply# 0 BlockSyncLogSkip# 0 BarrierFreshApply# 0 BarrierFreshSkip# 0 BarriersBatchFreshApply# 0 BarriersBatchFreshSkip# 0 BarrierSyncLogApply# 0 BarrierSyncLogSkip# 0 GCBarrierFreshApply# 0 GCBarrierFreshSkip# 0 GCLogoBlobFreshApply# 0 GCLogoBlobFreshSkip# 0 GCSyncLogApply# 0 GCSyncLogSkip# 0 TryPutLogoBlobSyncData# 0 TryPutBlockSyncData# 0 TryPutBarrierSyncData# 0 HandoffDelFreshApply# 0 HandoffDelFreshSkip# 0 HugeBlobAllocChunkApply# 0 HugeBlobAllocChunkSkip# 0 HugeBlobFreeChunkApply# 0 HugeBlobFreeChunkSkip# 0 HugeLogoBlobToHeapApply# 0 HugeLogoBlobToHeapSkip# 0 HugeSlotsDelGenericApply# 0 HugeSlotsDelGenericSkip# 0 TryPutLogoBlobPhantom# 0 RecoveryLogDiapason# [18446744073709551615 0] StartingPoints# {} ReadLogReplies# {}} reason# Yard::Init failed, errorReason# "PDisk is in StateError, reason# PDiskId# 1000 bootstrapped to the StateError, reason# Can't open file "/home/runner/.ya/build/build_root/yft8/000eed/r3tmp/tmpwsmcNJ/pdisk_1.dat": unknown reason, errno# 0. Can not be initialized" status# CORRUPTED;VDISK LOCAL RECOVERY FAILURE DUE TO LOGICAL ERROR 2025-06-25T14:35:21.801920Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T14:35:21.810792Z node 2 :BS_LOCALRECOVERY CRIT: localrecovery_public.cpp:103: PDiskId# 1000 VDISK[80000001:_:0:0:0]: (2147483649) LocalRecovery FINISHED: {RecoveryDuration# INPROGRESS RecoveredLogStartLsn# 0 SuccessfulRecovery# false EmptyLogoBlobsDb# true EmptyBlocksDb# true EmptyBarriersDb# true EmptySyncLog# true EmptySyncer# true EmptyHuge# true LogRecLogoBlob# 0 LogRecBlock# 0 LogRecGC# 0 LogRecSyncLogIdx# 0 LogRecLogoBlobsDB# 0 LogRecBlocksDB# 0 LogRecBarriersDB# 0 LogRecCutLog# 0 LogRecLocalSyncData# 0 LogRecSyncerState# 0 LogRecHandoffDel# 0 LogRecHugeBlobAllocChunk# 0 LogRecHugeBlobFreeChunk# 0 LogRecHugeBlobEntryPoint# 0 LogRecHugeLogoBlob# 0 LogRecLogoBlobOpt# 0 LogRecPhantomBlob# 0 LogRecAnubisOsirisPut# 0 LogRecAddBulkSst# 0 LogoBlobFreshApply# 0 LogoBlobFreshSkip# 0 LogoBlobsBatchFreshApply# 0 LogoBlobsBatchFreshSkip#0 LogoBlobSyncLogApply# 0 LogoBlobSyncLogSkip# 0 HugeLogoBlobFreshApply# 0 HugeLogoBlobFreshSkip# 0 HugeLogoBlobSyncLogApply# 0 HugeLogoBlobSyncLogSkip# 0 BlockFreshApply# 0 BlockFreshSkip# 0 BlocksBatchFreshApply# 0 BlocksBatchFreshSkip# 0 BlockSyncLogApply# 0 BlockSyncLogSkip# 0 BarrierFreshApply# 0 BarrierFreshSkip# 0 BarriersBatchFreshApply# 0 BarriersBatchFreshSkip# 0 BarrierSyncLogApply# 0 BarrierSyncLogSkip# 0 GCBarrierFreshApply# 0 GCBarrierFreshSkip# 0 GCLogoBlobFreshApply# 0 GCLogoBlobFreshSkip# 0 GCSyncLogApply# 0 GCSyncLogSkip# 0 TryPutLogoBlobSyncData# 0 TryPutBlockSyncData# 0 TryPutBarrierSyncData# 0 HandoffDelFreshApply# 0 HandoffDelFreshSkip# 0 HugeBlobAllocChunkApply# 0 HugeBlobAllocChunkSkip# 0 HugeBlobFreeChunkApply# 0 HugeBlobFreeChunkSkip# 0 HugeLogoBlobToHeapApply# 0 HugeLogoBlobToHeapSkip# 0 HugeSlotsDelGenericApply# 0 HugeSlotsDelGenericSkip# 0 TryPutLogoBlobPhantom# 0 RecoveryLogDiapason# [18446744073709551615 0] StartingPoints# {} ReadLogReplies# {}} reason# Yard::Init failed, errorReason# "PDisk is in StateError, reason# PDiskId# 1000 bootstrapped to the StateError, reason# Can't open file "/home/runner/.ya/build/build_root/yft8/000eed/r3tmp/tmpwsmcNJ/pdisk_1.dat": unknown reason, errno# 0. Can not be initialized" status# CORRUPTED;VDISK LOCAL RECOVERY FAILURE DUE TO LOGICAL ERROR 2025-06-25T14:35:21.810960Z node 2 :BS_LOCALRECOVERY CRIT: localrecovery_public.cpp:103: PDiskId# 1000 VDISK[80000002:_:0:0:0]: (2147483650) LocalRecovery FINISHED: {RecoveryDuration# INPROGRESS RecoveredLogStartLsn# 0 SuccessfulRecovery# false EmptyLogoBlobsDb# true EmptyBlocksDb# true EmptyBarriersDb# true EmptySyncLog# true EmptySyncer# true EmptyHuge# true LogRecLogoBlob# 0 LogRecBlock# 0 LogRecGC# 0 LogRecSyncLogIdx# 0 LogRecLogoBlobsDB# 0 LogRecBlocksDB# 0 LogRecBarriersDB# 0 LogRecCutLog# 0 LogRecLocalSyncData# 0 LogRecSyncerState# 0 LogRecHandoffDel# 0 LogRecHugeBlobAllocChunk# 0 LogRecHugeBlobFreeChunk# 0 LogRecHugeBlobEntryPoint# 0 LogRecHugeLogoBlob# 0 LogRecLogoBlobOpt# 0 LogRecPhantomBlob# 0 LogRecAnubisOsirisPut# 0 LogRecAddBulkSst# 0 LogoBlobFreshApply# 0 LogoBlobFreshSkip# 0 LogoBlobsBatchFreshApply# 0 LogoBlobsBatchFreshSkip#0 LogoBlobSyncLogApply# 0 LogoBlobSyncLogSkip# 0 HugeLogoBlobFreshApply# 0 HugeLogoBlobFreshSkip# 0 HugeLogoBlobSyncLogApply# 0 HugeLogoBlobSyncLogSkip# 0 BlockFreshApply# 0 BlockFreshSkip# 0 BlocksBatchFreshApply# 0 BlocksBatchFreshSkip# 0 BlockSyncLogApply# 0 BlockSyncLogSkip# 0 BarrierFreshApply# 0 BarrierFreshSkip# 0 BarriersBatchFreshApply# 0 BarriersBatchFreshSkip# 0 BarrierSyncLogApply# 0 BarrierSyncLogSkip# 0 GCBarrierFreshApply# 0 GCBarrierFreshSkip# 0 GCLogoBlobFreshApply# 0 GCLogoBlobFreshSkip# 0 GCSyncLogApply# 0 GCSyncLogSkip# 0 TryPutLogoBlobSyncData# 0 TryPutBlockSyncData# 0 TryPutBarrierSyncData# 0 HandoffDelFreshApply# 0 HandoffDelFreshSkip# 0 HugeBlobAllocChunkApply# 0 HugeBlobAllocChunkSkip# 0 HugeBlobFreeChunkApply# 0 HugeBlobFreeChunkSkip# 0 HugeLogoBlobToHeapApply# 0 HugeLogoBlobToHeapSkip# 0 HugeSlotsDelGenericApply# 0 HugeSlotsDelGenericSkip# 0 TryPutLogoBlobPhantom# 0 RecoveryLogDiapason# [18446744073709551615 0] StartingPoints# {} ReadLogReplies# {}} reason# Yard::Init failed, errorReason# "PDisk is in StateError, reason# PDiskId# 1000 bootstrapped to the StateError, reason# Can't open file "/home/runner/.ya/build/build_root/yft8/000eed/r3tmp/tmpwsmcNJ/pdisk_1.dat": unknown reason, errno# 0. Can not be initialized" status# CORRUPTED;VDISK LOCAL RECOVERY FAILURE DUE TO LOGICAL ERROR 2025-06-25T14:35:21.897486Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:1, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-25T14:35:22.062414Z node 5 :BS_PDISK WARN: {BPD01@blobstorage_pdisk_blockdevice_async.cpp:922} Warning# got EIoResult::FileOpenError from IoContext->Setup PDiskId# 1000 2025-06-25T14:35:22.062900Z node 5 :BS_PDISK CRIT: {BPD39@blobstorage_pdisk_impl.cpp:2897} BlockDevice initialization error Details# Can't open file "/home/runner/.ya/build/build_root/yft8/000eed/r3tmp/tmpwsmcNJ/pdisk_1.dat": unknown reason, errno# 0. PDiskId# 1000 2025-06-25T14:35:22.063156Z node 5 :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:300} PDiskId# 1000 bootstrapped to the StateError, reason# Can't open file "/home/runner/.ya/build/build_root/yft8/000eed/r3tmp/tmpwsmcNJ/pdisk_1.dat": unknown reason, errno# 0. Can not be initialized Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/yft8/000eed/r3tmp/tmpwsmcNJ/pdisk_1.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 11664209413570165727 PDiskId# 1000 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 2 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimS ... Weight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 1000 2025-06-25T14:36:59.288818Z node 152 :BS_PDISK WARN: {BPD01@blobstorage_pdisk_blockdevice_async.cpp:922} Warning# got EIoResult::FileOpenError from IoContext->Setup PDiskId# 1000 2025-06-25T14:36:59.289317Z node 152 :BS_PDISK CRIT: {BPD39@blobstorage_pdisk_impl.cpp:2897} BlockDevice initialization error Details# Can't open file "/home/runner/.ya/build/build_root/yft8/000eed/r3tmp/tmpvyXK9v/pdisk_1.dat": unknown reason, errno# 0. PDiskId# 1000 2025-06-25T14:36:59.289488Z node 152 :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:300} PDiskId# 1000 bootstrapped to the StateError, reason# Can't open file "/home/runner/.ya/build/build_root/yft8/000eed/r3tmp/tmpvyXK9v/pdisk_1.dat": unknown reason, errno# 0. Can not be initialized Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/yft8/000eed/r3tmp/tmpvyXK9v/pdisk_1.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 1506415746881139073 PDiskId# 1000 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 2 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 1000 2025-06-25T14:36:59.392292Z node 146 :BS_PDISK WARN: {BPD01@blobstorage_pdisk_blockdevice_async.cpp:922} Warning# got EIoResult::FileOpenError from IoContext->Setup PDiskId# 1000 2025-06-25T14:36:59.403547Z node 146 :BS_PDISK CRIT: {BPD39@blobstorage_pdisk_impl.cpp:2897} BlockDevice initialization error Details# Can't open file "/home/runner/.ya/build/build_root/yft8/000eed/r3tmp/tmpvyXK9v/pdisk_1.dat": unknown reason, errno# 0. PDiskId# 1000 2025-06-25T14:36:59.403734Z node 146 :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:300} PDiskId# 1000 bootstrapped to the StateError, reason# Can't open file "/home/runner/.ya/build/build_root/yft8/000eed/r3tmp/tmpvyXK9v/pdisk_1.dat": unknown reason, errno# 0. Can not be initialized Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/yft8/000eed/r3tmp/tmpvyXK9v/pdisk_1.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 9724410552971757461 PDiskId# 1000 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 2 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 1000 2025-06-25T14:36:59.457813Z node 147 :BS_PDISK WARN: {BPD01@blobstorage_pdisk_blockdevice_async.cpp:922} Warning# got EIoResult::FileOpenError from IoContext->Setup PDiskId# 1000 2025-06-25T14:36:59.458240Z node 147 :BS_PDISK CRIT: {BPD39@blobstorage_pdisk_impl.cpp:2897} BlockDevice initialization error Details# Can't open file "/home/runner/.ya/build/build_root/yft8/000eed/r3tmp/tmpvyXK9v/pdisk_1.dat": unknown reason, errno# 0. PDiskId# 1000 2025-06-25T14:36:59.458407Z node 147 :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:300} PDiskId# 1000 bootstrapped to the StateError, reason# Can't open file "/home/runner/.ya/build/build_root/yft8/000eed/r3tmp/tmpvyXK9v/pdisk_1.dat": unknown reason, errno# 0. Can not be initialized Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/yft8/000eed/r3tmp/tmpvyXK9v/pdisk_1.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 1480886589122423072 PDiskId# 1000 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 2 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 1000 2025-06-25T14:36:59.528275Z node 149 :BS_PDISK WARN: {BPD01@blobstorage_pdisk_blockdevice_async.cpp:922} Warning# got EIoResult::FileOpenError from IoContext->Setup PDiskId# 1000 2025-06-25T14:36:59.528783Z node 149 :BS_PDISK CRIT: {BPD39@blobstorage_pdisk_impl.cpp:2897} BlockDevice initialization error Details# Can't open file "/home/runner/.ya/build/build_root/yft8/000eed/r3tmp/tmpvyXK9v/pdisk_1.dat": unknown reason, errno# 0. PDiskId# 1000 2025-06-25T14:36:59.531881Z node 149 :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:300} PDiskId# 1000 bootstrapped to the StateError, reason# Can't open file "/home/runner/.ya/build/build_root/yft8/000eed/r3tmp/tmpvyXK9v/pdisk_1.dat": unknown reason, errno# 0. Can not be initialized Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/yft8/000eed/r3tmp/tmpvyXK9v/pdisk_1.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 9109708152570796539 PDiskId# 1000 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 2 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 1000 2025-06-25T14:36:59.916015Z node 145 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:36:59.916100Z node 145 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:37:00.016110Z node 145 :STATISTICS WARN: tx_init.cpp:287: [72075186233409554] TTxInit::Complete. EnableColumnStatistics=false 2025-06-25T14:37:03.205482Z node 154 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:37:03.205580Z node 154 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:37:03.270074Z node 154 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:37:07.245987Z node 163 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:37:07.246090Z node 163 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:37:07.313483Z node 163 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) >> TestProgram::CountUIDByVAT >> TVersions::Wreck0Reverse [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-22 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-23 >> KqpPg::InsertNoTargetColumns_Serial-useSink [GOOD] >> KqpPg::InsertValuesFromTableWithDefault+useSink >> TestProgram::CountUIDByVAT [GOOD] >> KqpPg::DuplicatedColumns+useSink [GOOD] >> KqpPg::DuplicatedColumns-useSink >> PrivateApi::PingTask >> KqpPg::InsertValuesFromTableWithDefaultText+useSink [GOOD] >> KqpPg::InsertValuesFromTableWithDefaultText-useSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest >> TestProgram::CountUIDByVAT [GOOD] Test command err: FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:33;event=parse_program;program=Command { GroupBy { Aggregates { Column { Id: 10001 } Function { Id: 2 Arguments { Id: 2 } } } KeyColumns { Id: 4 } } } Command { Projection { Columns { Id: 10001 } Columns { Id: 4 } } } ; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:102;parse_proto_program=Command { GroupBy { Aggregates { Column { Id: 10001 } Function { Id: 2 Arguments { Id: 2 } } } KeyColumns { Id: 4 } } } Command { Projection { Columns { Id: 10001 } Columns { Id: 4 } } } ; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2101;fline=graph_execute.cpp:162;graph_constructed=digraph program {N7[shape=box, label="N0(0):{\"p\":{\"data\":[{\"name\":\"uid\",\"id\":2},{\"name\":\"vat\",\"id\":4}]},\"o\":\"0\",\"t\":\"ReserveMemory\"}\n"]; N0[shape=box, label="N4(18):{\"i\":\"2,4\",\"o\":\"10001\",\"t\":\"Aggregation\"}\nREMOVE:2"]; N2 -> N0[label="1"]; N4 -> N0[label="2"]; N2[shape=box, label="N3(9):{\"i\":\"2\",\"p\":{\"address\":{\"name\":\"uid\",\"id\":2}},\"o\":\"2\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N6 -> N2[label="1"]; N4[shape=box, label="N2(9):{\"i\":\"4\",\"p\":{\"address\":{\"name\":\"vat\",\"id\":4}},\"o\":\"4\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N6 -> N4[label="1"]; N5[shape=box, label="N5(27):{\"i\":\"10001,4\",\"t\":\"Projection\"}\n",style=filled,color="#FFAAAA"]; N4 -> N5[label="1"]; N0 -> N5[label="2"]; N6[shape=box, label="N1(4):{\"i\":\"0\",\"p\":{\"data\":[{\"name\":\"uid\",\"id\":2},{\"name\":\"vat\",\"id\":4}]},\"o\":\"2,4\",\"t\":\"FetchOriginalData\"}\n",style=filled,color="#FFFF88"]; N7 -> N6[label="1"]; N7->N6->N4->N2->N0->N5[color=red]; }; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:51;event=program_parsed;result={"edges":[{"owner_id":7,"inputs":[]},{"owner_id":0,"inputs":[{"from":2},{"from":4}]},{"owner_id":2,"inputs":[{"from":6}]},{"owner_id":4,"inputs":[{"from":6}]},{"owner_id":5,"inputs":[{"from":4},{"from":0}]},{"owner_id":6,"inputs":[{"from":7}]}],"nodes":{"2":{"p":{"i":"2","p":{"address":{"name":"uid","id":2}},"o":"2","t":"AssembleOriginalData"},"w":9,"id":2},"6":{"p":{"i":"0","p":{"data":[{"name":"uid","id":2},{"name":"vat","id":4}]},"o":"2,4","t":"FetchOriginalData"},"w":4,"id":6},"7":{"p":{"p":{"data":[{"name":"uid","id":2},{"name":"vat","id":4}]},"o":"0","t":"ReserveMemory"},"w":0,"id":7},"5":{"p":{"i":"10001,4","t":"Projection"},"w":27,"id":5},"4":{"p":{"i":"4","p":{"address":{"name":"vat","id":4}},"o":"4","t":"AssembleOriginalData"},"w":9,"id":4},"0":{"p":{"i":"2,4","o":"10001","t":"Aggregation"},"w":18,"id":0}}}; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9Int32TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9Int32TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9Int32TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9Int32TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9Int32TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9Int32TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9Int32TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10UInt64TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9Int32TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10UInt64TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9Int32TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10UInt64TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9Int32TypeE; >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-DropUser-69 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-DropUser-70 >> Yq_1::DescribeJob >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-57 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-58 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::FiveWayJoinStatsOverride+ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 29809, MsgBus: 14993 2025-06-25T14:35:19.386433Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519895543230506058:2167];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:35:19.386867Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000ecb/r3tmp/tmpRbLUA2/pdisk_1.dat 2025-06-25T14:35:19.873549Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:35:19.873655Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:35:19.890806Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:35:19.928423Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519895543230505912:2080] 1750862119333156 != 1750862119333159 2025-06-25T14:35:19.948265Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 29809, node 1 2025-06-25T14:35:20.140401Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:35:20.140422Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:35:20.140429Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:35:20.140547Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:35:20.388450Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:14993 TClient is connected to server localhost:14993 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:35:21.003216Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:35:23.354205Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519895560410375738:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:35:23.354279Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519895560410375747:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:35:23.354340Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:35:23.366587Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:35:23.384571Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519895560410375752:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:35:23.481842Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519895560410375805:2337] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:35:23.841126Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T14:35:24.082893Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519895560410376031:2311];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:35:24.083126Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519895560410376031:2311];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:35:24.083409Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519895560410376031:2311];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:35:24.083511Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519895560410376031:2311];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:35:24.083610Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519895560410376031:2311];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:35:24.083712Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519895560410376031:2311];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:35:24.083830Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519895560410376031:2311];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:35:24.083949Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519895560410376031:2311];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:35:24.084062Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519895560410376031:2311];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:35:24.084166Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519895560410376031:2311];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:35:24.084255Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519895560410376031:2311];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:35:24.097802Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519895560410376050:2320];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:35:24.097901Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519895560410376050:2320];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:35:24.098166Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519895560410376050:2320];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:35:24.098274Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519895560410376050:2320];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:35:24.098400Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519895560410376050:2320];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:35:24.098527Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519895560410376050:2320];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:35:24.098633Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519895560410376050:2320];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:35:24.098833Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519895560410376050:2320];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:35:24.098949Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519895560410376050:2320];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:35:24.099061Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519895560410376050:2320];tablet_id=72075186224037899;process=TTxInitSchema:: ... 15850Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039379;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:36:57.818466Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039331;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:57.819687Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039287;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:36:57.824078Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039379;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:57.824126Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039287;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:57.824689Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039388;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:36:57.824689Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039404;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:36:57.829302Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039404;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:57.829819Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039245;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:36:57.834162Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039388;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:57.834683Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039371;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:36:57.835260Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039245;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:57.835670Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039251;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:36:57.839193Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039371;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:57.839731Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039339;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:36:57.840129Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039251;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:57.840934Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039243;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:36:57.844563Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039339;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:57.845084Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039345;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:36:57.845360Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039243;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:57.845817Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039281;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:36:57.849638Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039345;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:57.850144Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039355;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:36:57.850282Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039281;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:57.850729Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039361;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:36:57.855170Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039355;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:57.855782Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039357;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:36:57.860462Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039357;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:57.861132Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039319;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:36:57.864283Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039361;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:57.865915Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039319;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:57.866511Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039369;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:36:57.869002Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039343;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:36:57.874765Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039369;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:57.875387Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039249;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:36:57.876679Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039343;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:57.877218Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039263;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:36:57.879721Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039249;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:57.880264Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039383;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:36:57.881980Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039263;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:57.893290Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039383;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:58.080961Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jykr99jv6g3xbmhnqdbs7mb4", SessionId: ydb://session/3?node_id=1&id=OWRlMjQwZTAtMmMzOWE0ZDMtMmY3MDdkZjItNzcwZWRiYzU=, Slow query, duration: 40.196221s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:36:58.419480Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:36:58.420014Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:36:58.420375Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;self_id=[1:7519895869648067516:8538];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224039094;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224039392;receive=72075186224038933; 2025-06-25T14:36:58.421515Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; >> TConsoleTests::TestNotifyOperationCompletionExtSubdomain [GOOD] >> TConsoleTests::TestRemoveAttributes ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tablet_flat/ut/unittest >> TVersions::Wreck0Reverse [GOOD] Test command err: 00000.000 II| FAKE_ENV: Born at 2025-06-25T14:33:20.048874Z 00000.010 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.010 II| FAKE_ENV: Starting storage for BS group 0 00000.011 II| FAKE_ENV: Starting storage for BS group 1 00000.011 II| FAKE_ENV: Starting storage for BS group 2 00000.011 II| FAKE_ENV: Starting storage for BS group 3 00000.016 II| TABLET_EXECUTOR: Leader{1:2:0} activating executor 00000.016 II| TABLET_EXECUTOR: LSnap{1:2, on 2:1, 35b, wait} done, Waste{2:0, 0b +(0, 0b), 0 trc} 00000.017 DD| TABLET_EXECUTOR: Leader{1:2:2} commited cookie 2 for step 1 00000.018 DD| TABLET_EXECUTOR: Leader{1:2:2} Tx{1, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxInitSchema} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxInitSchema 00000.018 DD| TABLET_EXECUTOR: Leader{1:2:2} Tx{1, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxInitSchema} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.018 DD| TABLET_EXECUTOR: Leader{1:2:2} Tx{1, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxInitSchema} hope 1 -> done Change{2, redo 0b alter 209b annex 0, ~{ } -{ }, 0 gb} 00000.018 DD| TABLET_EXECUTOR: Leader{1:2:2} Tx{1, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxInitSchema} release 4194304b of static, Memory{0 dyn 0} 00000.019 DD| TABLET_EXECUTOR: Leader{1:2:3} commited cookie 1 for step 2 00000.019 NN| TABLET_SAUSAGECACHE: Update config MemoryLimit: 8388608 ReplacementPolicy: ThreeLeveledLRU 00000.019 NN| TABLET_SAUSAGECACHE: Switch replacement policy from S3FIFO to ThreeLeveledLRU 00000.019 NN| TABLET_SAUSAGECACHE: Switch replacement policy done from S3FIFO to ThreeLeveledLRU 00000.019 II| TABLET_SAUSAGECACHE: Limit memory consumer with 16777216TiB 00000.020 DD| TABLET_EXECUTOR: Leader{1:2:3} Tx{2, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.020 DD| TABLET_EXECUTOR: Leader{1:2:3} Tx{2, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.020 DD| TABLET_EXECUTOR: Leader{1:2:3} Tx{2, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{2, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.020 DD| TABLET_EXECUTOR: Leader{1:2:3} Tx{2, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.021 DD| TABLET_EXECUTOR: Leader{1:2:4} commited cookie 1 for step 3 00000.021 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{3, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.021 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{3, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.021 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{3, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{3, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.022 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{3, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.022 DD| TABLET_EXECUTOR: Leader{1:2:5} commited cookie 1 for step 4 00000.022 DD| TABLET_EXECUTOR: Leader{1:2:5} Tx{4, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.022 DD| TABLET_EXECUTOR: Leader{1:2:5} Tx{4, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.023 DD| TABLET_EXECUTOR: Leader{1:2:5} Tx{4, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{4, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.023 DD| TABLET_EXECUTOR: Leader{1:2:5} Tx{4, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.023 DD| TABLET_EXECUTOR: Leader{1:2:6} commited cookie 1 for step 5 00000.023 DD| TABLET_EXECUTOR: Leader{1:2:6} Tx{5, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.023 DD| TABLET_EXECUTOR: Leader{1:2:6} Tx{5, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.024 DD| TABLET_EXECUTOR: Leader{1:2:6} Tx{5, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{5, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.024 DD| TABLET_EXECUTOR: Leader{1:2:6} Tx{5, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.024 DD| TABLET_EXECUTOR: Leader{1:2:7} commited cookie 1 for step 6 00000.024 DD| TABLET_EXECUTOR: Leader{1:2:7} Tx{6, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.025 DD| TABLET_EXECUTOR: Leader{1:2:7} Tx{6, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.025 DD| TABLET_EXECUTOR: Leader{1:2:7} Tx{6, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{6, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.025 DD| TABLET_EXECUTOR: Leader{1:2:7} Tx{6, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.025 DD| TABLET_EXECUTOR: Leader{1:2:8} commited cookie 1 for step 7 00000.026 DD| TABLET_EXECUTOR: Leader{1:2:8} Tx{7, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.026 DD| TABLET_EXECUTOR: Leader{1:2:8} Tx{7, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.026 DD| TABLET_EXECUTOR: Leader{1:2:8} Tx{7, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{7, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.026 DD| TABLET_EXECUTOR: Leader{1:2:8} Tx{7, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.026 DD| TABLET_EXECUTOR: Leader{1:2:9} commited cookie 1 for step 8 00000.027 DD| TABLET_EXECUTOR: Leader{1:2:9} Tx{8, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.027 DD| TABLET_EXECUTOR: Leader{1:2:9} Tx{8, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.027 DD| TABLET_EXECUTOR: Leader{1:2:9} Tx{8, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{8, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.027 DD| TABLET_EXECUTOR: Leader{1:2:9} Tx{8, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.027 DD| TABLET_EXECUTOR: Leader{1:2:10} commited cookie 1 for step 9 00000.028 DD| TABLET_EXECUTOR: Leader{1:2:10} Tx{9, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.028 DD| TABLET_EXECUTOR: Leader{1:2:10} Tx{9, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.028 DD| TABLET_EXECUTOR: Leader{1:2:10} Tx{9, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{9, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.028 DD| TABLET_EXECUTOR: Leader{1:2:10} Tx{9, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.029 DD| TABLET_EXECUTOR: Leader{1:2:11} commited cookie 1 for step 10 00000.029 DD| TABLET_EXECUTOR: Leader{1:2:11} Tx{10, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.029 DD| TABLET_EXECUTOR: Leader{1:2:11} Tx{10, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.029 DD| TABLET_EXECUTOR: Leader{1:2:11} Tx{10, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{10, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.029 DD| TABLET_EXECUTOR: Leader{1:2:11} Tx{10, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.030 DD| TABLET_EXECUTOR: Leader{1:2:12} commited cookie 1 for step 11 00000.030 DD| TABLET_EXECUTOR: Leader{1:2:12} Tx{11, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.030 DD| TABLET_EXECUTOR: Leader{1:2:12} Tx{11, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.030 DD| TABLET_EXECUTOR: Leader{1:2:12} Tx{11, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{11, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.030 DD| TABLET_EXECUTOR: Leader{1:2:12} Tx{11, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.031 DD| TABLET_EXECUTOR: Leader{1:2:13} commited cookie 1 for step 12 00000.031 DD| TABLET_EXECUTOR: Leader{1:2:13} Tx{12, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.031 DD| TABLET_EXECUTOR: Leader{1:2:13} Tx{12, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.032 DD| TABLET_EXECUTOR: Leader{1:2:13} Tx{12, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{12, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.032 DD| TABLET_EXECUTOR: Leader{1:2:13} Tx{12, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.032 DD| TABLET_EXECUTOR: Leader{1:2:14} commited cookie 1 for step 13 00000.032 DD| TABLET_EXECUTOR: Leader{1:2:14} Tx{13, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.032 DD| TABLET_EXECUTOR: Leader{1:2:14} Tx{13, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.033 DD| TABLET_EXECUTOR: Leader{1:2:14} Tx{13, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{13, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.033 DD| TABLET_EXECUTOR: Leader{1:2:14} Tx{13, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.033 DD| TABLET_EXECUTOR: Leader{1:2:15} commited cookie 1 for step 14 00000.033 DD| TABLET_EXECUTOR: Leader{1:2:15} Tx{14, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.033 DD| TABLET_EXECUTOR: Leader{1:2:15} Tx{14, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.034 DD| TABLET_EXECUTOR: Leader{1:2:15} Tx{14, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{14, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.034 DD| TABLET_EXECUTOR: Leader{1:2:15} Tx{14, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.034 DD| TABLET_EXECUTOR: Leader{ ... ageCollection: [1:0:256:0:0:0:1] Pages: [ 1 3 ] Cookie: 4 Actual: PageCollection: [1:0:256:0:0:0:1] Pages: [ 1 3 ] Cookie: 4 2025-06-25T14:33:29.163966Z node 35 :TABLET_SAUSAGECACHE NOTICE: shared_sausagecache.cpp:1265: Bootstrap with config MemoryLimit: 456 AsyncQueueInFlyLimit: 19 ... waiting for NKikimr::NSharedCache::TEvRequest 2025-06-25T14:33:29.164651Z node 35 :TABLET_SAUSAGECACHE DEBUG: shared_sausagecache.cpp:385: Add page collection [1:0:256:0:0:0:1] 2025-06-25T14:33:29.164756Z node 35 :TABLET_SAUSAGECACHE DEBUG: shared_sausagecache.cpp:400: Add page collection [1:0:256:0:0:0:1] owner [35:5:2052] 2025-06-25T14:33:29.164873Z node 35 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:597: Request page collection [1:0:256:0:0:0:1] owner [35:5:2052] cookie 1 class AsyncLoad from cache [ ] already requested [ ] to request [ 1 2 3 4 5 ] 2025-06-25T14:33:29.164937Z node 35 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:681: Request page collection [1:0:256:0:0:0:1] async queue pages [ 1 2 ] ... waiting for NKikimr::NSharedCache::TEvRequest (done) ... waiting for NKikimr::NSharedCache::TEvRequest ... blocking NKikimr::NTabletFlatExecutor::NBlockIO::TEvFetch from SAUSAGE_CACHE to SAUSAGE_BIO_A cookie 0 2025-06-25T14:33:29.165230Z node 35 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:597: Request page collection [1:0:256:0:0:0:1] owner [35:5:2052] cookie 2 class AsyncLoad from cache [ ] already requested [ ] to request [ 6 7 ] ... waiting for NKikimr::NSharedCache::TEvRequest (done) ... waiting for NKikimr::NSharedCache::TEvRequest 2025-06-25T14:33:29.165413Z node 35 :TABLET_SAUSAGECACHE DEBUG: shared_sausagecache.cpp:385: Add page collection [1:0:256:0:0:0:2] 2025-06-25T14:33:29.165471Z node 35 :TABLET_SAUSAGECACHE DEBUG: shared_sausagecache.cpp:400: Add page collection [1:0:256:0:0:0:2] owner [35:5:2052] 2025-06-25T14:33:29.165640Z node 35 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:597: Request page collection [1:0:256:0:0:0:2] owner [35:5:2052] cookie 3 class AsyncLoad from cache [ ] already requested [ ] to request [ 10 11 12 ] ... waiting for NKikimr::NSharedCache::TEvRequest (done) ... waiting for NKikimr::NSharedCache::TEvRequest 2025-06-25T14:33:29.165847Z node 35 :TABLET_SAUSAGECACHE DEBUG: shared_sausagecache.cpp:400: Add page collection [1:0:256:0:0:0:1] owner [35:6:2053] 2025-06-25T14:33:29.165958Z node 35 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:597: Request page collection [1:0:256:0:0:0:1] owner [35:6:2053] cookie 4 class AsyncLoad from cache [ ] already requested [ 1 5 ] to request [ 9 10 ] ... waiting for NKikimr::NSharedCache::TEvRequest (done) Checking fetches#4 Expected: PageCollection: [1:0:256:0:0:0:1] Pages: [ 1 2 ] Cookie: 20 Actual: PageCollection: [1:0:256:0:0:0:1] Pages: [ 1 2 ] Cookie: 20 ... waiting for NKikimr::NSharedCache::TEvUnregister 2025-06-25T14:33:29.166238Z node 35 :TABLET_SAUSAGECACHE DEBUG: shared_sausagecache.cpp:803: Unregister owner [35:5:2052] 2025-06-25T14:33:29.166302Z node 35 :TABLET_SAUSAGECACHE DEBUG: shared_sausagecache.cpp:1072: Send page collection error [1:0:256:0:0:0:2] owner [35:5:2052] class AsyncLoad error RACE cookie 3 2025-06-25T14:33:29.166364Z node 35 :TABLET_SAUSAGECACHE DEBUG: shared_sausagecache.cpp:816: Remove page collection [1:0:256:0:0:0:2] owner [35:5:2052] 2025-06-25T14:33:29.166413Z node 35 :TABLET_SAUSAGECACHE DEBUG: shared_sausagecache.cpp:1072: Send page collection error [1:0:256:0:0:0:1] owner [35:5:2052] class AsyncLoad error RACE cookie 1 2025-06-25T14:33:29.166441Z node 35 :TABLET_SAUSAGECACHE DEBUG: shared_sausagecache.cpp:1072: Send page collection error [1:0:256:0:0:0:1] owner [35:5:2052] class AsyncLoad error RACE cookie 2 2025-06-25T14:33:29.166465Z node 35 :TABLET_SAUSAGECACHE DEBUG: shared_sausagecache.cpp:816: Remove page collection [1:0:256:0:0:0:1] owner [35:5:2052] 2025-06-25T14:33:29.166487Z node 35 :TABLET_SAUSAGECACHE DEBUG: shared_sausagecache.cpp:823: Remove owner [35:5:2052] ... waiting for NKikimr::NSharedCache::TEvUnregister (done) ... waiting for results #4 ... waiting for results #4 (done) Checking results#4 Expected: PageCollection: [1:0:256:0:0:0:1] Pages: [ ] Cookie: 1 PageCollection: [1:0:256:0:0:0:1] Pages: [ ] Cookie: 2 PageCollection: [1:0:256:0:0:0:2] Pages: [ ] Cookie: 3 Actual: PageCollection: [1:0:256:0:0:0:1] Pages: [ ] Cookie: 1 PageCollection: [1:0:256:0:0:0:1] Pages: [ ] Cookie: 2 PageCollection: [1:0:256:0:0:0:2] Pages: [ ] Cookie: 3 ... waiting for fetches #4 2025-06-25T14:33:29.166898Z node 35 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:867: Receive page collection [1:0:256:0:0:0:1] status OK pages [ 1 2 ] 2025-06-25T14:33:29.166972Z node 35 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:730: Drop pending page collection request [1:0:256:0:0:0:1] class AsyncLoad cookie 1 2025-06-25T14:33:29.167036Z node 35 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:681: Request page collection [1:0:256:0:0:0:1] async queue pages [ 5 9 ] ... blocking NKikimr::NTabletFlatExecutor::NBlockIO::TEvFetch from SAUSAGE_CACHE to SAUSAGE_BIO_A cookie 0 ... waiting for fetches #4 (done) Checking fetches#4 Expected: PageCollection: [1:0:256:0:0:0:1] Pages: [ 5 9 ] Cookie: 20 Actual: PageCollection: [1:0:256:0:0:0:1] Pages: [ 5 9 ] Cookie: 20 ... waiting for fetches #4 2025-06-25T14:33:29.167343Z node 35 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:867: Receive page collection [1:0:256:0:0:0:1] status OK pages [ 5 9 ] 2025-06-25T14:33:29.167409Z node 35 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:730: Drop pending page collection request [1:0:256:0:0:0:1] class AsyncLoad cookie 2 2025-06-25T14:33:29.167490Z node 35 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:681: Request page collection [1:0:256:0:0:0:1] async queue pages [ 10 ] 2025-06-25T14:33:29.167615Z node 35 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:730: Drop pending page collection request [1:0:256:0:0:0:2] class AsyncLoad cookie 3 2025-06-25T14:33:29.167703Z node 35 :TABLET_SAUSAGECACHE DEBUG: shared_sausagecache.cpp:1012: Drop page collection [1:0:256:0:0:0:1] pages [ 2 ] owner [35:6:2053] ... blocking NKikimr::NTabletFlatExecutor::NBlockIO::TEvFetch from SAUSAGE_CACHE to SAUSAGE_BIO_A cookie 0 ... waiting for fetches #4 (done) Checking fetches#4 Expected: PageCollection: [1:0:256:0:0:0:1] Pages: [ 10 ] Cookie: 10 Actual: PageCollection: [1:0:256:0:0:0:1] Pages: [ 10 ] Cookie: 10 ... waiting for results #4 2025-06-25T14:33:29.167983Z node 35 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:867: Receive page collection [1:0:256:0:0:0:1] status OK pages [ 10 ] 2025-06-25T14:33:29.168038Z node 35 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:1050: Send page collection result [1:0:256:0:0:0:1] owner [35:6:2053] class AsyncLoad pages [ 1 5 9 10 ] cookie 4 ... waiting for results #4 (done) Checking results#4 Expected: PageCollection: [1:0:256:0:0:0:1] Pages: [ 1 5 9 10 ] Cookie: 4 Actual: PageCollection: [1:0:256:0:0:0:1] Pages: [ 1 5 9 10 ] Cookie: 4 2025-06-25T14:33:29.372341Z node 36 :TABLET_SAUSAGECACHE NOTICE: shared_sausagecache.cpp:1265: Bootstrap with config MemoryLimit: 456 AsyncQueueInFlyLimit: 19 ... waiting for NKikimr::NSharedCache::TEvRequest 2025-06-25T14:33:29.373153Z node 36 :TABLET_SAUSAGECACHE DEBUG: shared_sausagecache.cpp:385: Add page collection [1:0:256:0:0:0:1] 2025-06-25T14:33:29.373232Z node 36 :TABLET_SAUSAGECACHE DEBUG: shared_sausagecache.cpp:400: Add page collection [1:0:256:0:0:0:1] owner [36:5:2052] 2025-06-25T14:33:29.373425Z node 36 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:597: Request page collection [1:0:256:0:0:0:1] owner [36:5:2052] cookie 1 class AsyncLoad from cache [ ] already requested [ ] to request [ 1 ] 2025-06-25T14:33:29.373492Z node 36 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:681: Request page collection [1:0:256:0:0:0:1] async queue pages [ 1 ] ... waiting for NKikimr::NSharedCache::TEvRequest (done) ... waiting for NKikimr::NSharedCache::TEvRequest 2025-06-25T14:33:29.373725Z node 36 :TABLET_SAUSAGECACHE DEBUG: shared_sausagecache.cpp:385: Add page collection [1:0:256:0:0:0:2] 2025-06-25T14:33:29.373775Z node 36 :TABLET_SAUSAGECACHE DEBUG: shared_sausagecache.cpp:400: Add page collection [1:0:256:0:0:0:2] owner [36:6:2053] 2025-06-25T14:33:29.373875Z node 36 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:597: Request page collection [1:0:256:0:0:0:2] owner [36:6:2053] cookie 2 class AsyncLoad from cache [ ] already requested [ ] to request [ 10 11 ] 2025-06-25T14:33:29.373957Z node 36 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:681: Request page collection [1:0:256:0:0:0:2] async queue pages [ 10 ] ... waiting for NKikimr::NSharedCache::TEvRequest (done) ... waiting for NKikimr::NSharedCache::TEvRequest ... blocking NKikimr::NTabletFlatExecutor::NBlockIO::TEvFetch from SAUSAGE_CACHE to SAUSAGE_BIO_A cookie 0 ... blocking NKikimr::NTabletFlatExecutor::NBlockIO::TEvFetch from SAUSAGE_CACHE to SAUSAGE_BIO_A cookie 0 2025-06-25T14:33:29.374241Z node 36 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:597: Request page collection [1:0:256:0:0:0:2] owner [36:6:2053] cookie 3 class AsyncLoad from cache [ ] already requested [ ] to request [ 12 ] ... waiting for NKikimr::NSharedCache::TEvRequest (done) Checking fetches#3 Expected: PageCollection: [1:0:256:0:0:0:1] Pages: [ 1 ] Cookie: 10 PageCollection: [1:0:256:0:0:0:2] Pages: [ 10 ] Cookie: 10 Actual: PageCollection: [1:0:256:0:0:0:1] Pages: [ 1 ] Cookie: 10 PageCollection: [1:0:256:0:0:0:2] Pages: [ 10 ] Cookie: 10 ... waiting for NKikimr::NSharedCache::TEvUnregister 2025-06-25T14:33:29.374838Z node 36 :TABLET_SAUSAGECACHE DEBUG: shared_sausagecache.cpp:803: Unregister owner [36:6:2053] 2025-06-25T14:33:29.374906Z node 36 :TABLET_SAUSAGECACHE DEBUG: shared_sausagecache.cpp:1072: Send page collection error [1:0:256:0:0:0:2] owner [36:6:2053] class AsyncLoad error RACE cookie 2 2025-06-25T14:33:29.374969Z node 36 :TABLET_SAUSAGECACHE DEBUG: shared_sausagecache.cpp:1072: Send page collection error [1:0:256:0:0:0:2] owner [36:6:2053] class AsyncLoad error RACE cookie 3 2025-06-25T14:33:29.375003Z node 36 :TABLET_SAUSAGECACHE DEBUG: shared_sausagecache.cpp:816: Remove page collection [1:0:256:0:0:0:2] owner [36:6:2053] 2025-06-25T14:33:29.375047Z node 36 :TABLET_SAUSAGECACHE DEBUG: shared_sausagecache.cpp:823: Remove owner [36:6:2053] ... waiting for NKikimr::NSharedCache::TEvUnregister (done) ... waiting for results #3 ... waiting for results #3 (done) Checking results#3 Expected: PageCollection: [1:0:256:0:0:0:2] Pages: [ ] Cookie: 2 PageCollection: [1:0:256:0:0:0:2] Pages: [ ] Cookie: 3 Actual: PageCollection: [1:0:256:0:0:0:2] Pages: [ ] Cookie: 2 PageCollection: [1:0:256:0:0:0:2] Pages: [ ] Cookie: 3 ... waiting for results #3 2025-06-25T14:33:29.375438Z node 36 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:867: Receive page collection [1:0:256:0:0:0:1] status OK pages [ 1 ] 2025-06-25T14:33:29.375512Z node 36 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:1050: Send page collection result [1:0:256:0:0:0:1] owner [36:5:2052] class AsyncLoad pages [ 1 ] cookie 1 2025-06-25T14:33:29.375612Z node 36 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:730: Drop pending page collection request [1:0:256:0:0:0:2] class AsyncLoad cookie 2 2025-06-25T14:33:29.375678Z node 36 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:730: Drop pending page collection request [1:0:256:0:0:0:2] class AsyncLoad cookie 3 ... waiting for results #3 (done) Checking results#3 Expected: PageCollection: [1:0:256:0:0:0:1] Pages: [ 1 ] Cookie: 1 Actual: PageCollection: [1:0:256:0:0:0:1] Pages: [ 1 ] Cookie: 1 Checking fetches#3 Expected: Actual: 2025-06-25T14:33:29.389175Z node 36 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:867: Receive page collection [1:0:256:0:0:0:2] status OK pages [ 10 ] Checking results#3 Expected: Actual: Checking fetches#3 Expected: Actual: >> Yq_1::DescribeConnection >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-DropUser-53 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-DropUser-54 >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-71 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-72 >> Yq_1::Basic |81.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_range_ops/ydb-core-tx-datashard-ut_range_ops |81.2%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_range_ops/ydb-core-tx-datashard-ut_range_ops |81.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_range_ops/ydb-core-tx-datashard-ut_range_ops >> Yq_1::ListConnections >> KqpPg::DuplicatedColumns-useSink [GOOD] >> KqpPg::InsertFromSelect_NoReorder+useSink >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-72 [FAIL] >> KqpPg::InsertValuesFromTableWithDefault+useSink [GOOD] >> KqpPg::InsertValuesFromTableWithDefault-useSink |81.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/fq/pq_async_io/ut/ydb-tests-fq-pq_async_io-ut |81.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/fq/pq_async_io/ut/ydb-tests-fq-pq_async_io-ut |81.3%| [LD] {RESULT} $(B)/ydb/tests/fq/pq_async_io/ut/ydb-tests-fq-pq_async_io-ut >> TKeyValueTest::TestInlineWriteReadWithRestartsThenResponseOk [GOOD] >> TKeyValueTest::TestInlineWriteReadRangeLimitThenLimitWorksNewApi >> Yq_1::DeleteConnections [GOOD] >> Yq_1::Create_And_Modify_The_Same_Connection >> ResourcePoolsSysView::TestResourcePoolsSysViewOnServerless [GOOD] >> ResourcePoolsSysView::TestResourcePoolsSysViewFilters >> ReadOnlyVDisk::TestStorageLoad [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-23 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-24 >> KqpPg::CreateUniqPgColumn+useSink [GOOD] >> KqpPg::CreateUniqPgColumn-useSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/ut_blobstorage/ut_read_only_vdisk/unittest >> ReadOnlyVDisk::TestStorageLoad [GOOD] Test command err: RandomSeed# 13781067130791914022 Setting VDisk read-only to 1 for position 0 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:0:0] 2025-06-25T14:36:48.960934Z 1 00h02m38.100000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5321:700] 2025-06-25T14:36:48.963977Z 1 00h02m38.100000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5321:700] 2025-06-25T14:36:48.967463Z 1 00h02m38.100000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5321:700] 2025-06-25T14:36:48.972938Z 1 00h02m38.100000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5321:700] 2025-06-25T14:36:48.973629Z 1 00h02m38.100000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5321:700] 2025-06-25T14:36:48.999017Z 1 00h02m38.200000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5321:700] 2025-06-25T14:36:49.086789Z 1 00h02m38.300000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5321:700] 2025-06-25T14:36:49.424740Z 1 00h02m38.500000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5321:700] 2025-06-25T14:36:49.437382Z 1 00h02m38.600000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5321:700] 2025-06-25T14:36:49.706482Z 1 00h02m38.800000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5321:700] 2025-06-25T14:36:49.781645Z 1 00h02m38.900000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5321:700] 2025-06-25T14:36:49.843884Z 1 00h02m39.100000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5321:700] 2025-06-25T14:36:49.844626Z 1 00h02m39.100000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5321:700] 2025-06-25T14:36:49.888908Z 1 00h02m39.200000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5321:700] 2025-06-25T14:36:50.188222Z 1 00h02m39.400000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5321:700] 2025-06-25T14:36:50.206120Z 1 00h02m39.500000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5321:700] 2025-06-25T14:36:50.236149Z 1 00h02m39.700000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5321:700] 2025-06-25T14:36:50.248462Z 1 00h02m39.800000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5321:700] 2025-06-25T14:36:50.503407Z 1 00h02m40.000000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5321:700] 2025-06-25T14:36:50.528987Z 1 00h02m40.100000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5321:700] 2025-06-25T14:36:50.531129Z 1 00h02m40.100000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5321:700] 2025-06-25T14:36:50.544487Z 1 00h02m40.200000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5321:700] 2025-06-25T14:36:50.582116Z 1 00h02m40.300000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5321:700] 2025-06-25T14:36:50.597445Z 1 00h02m40.400000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5321:700] 2025-06-25T14:36:50.611338Z 1 00h02m40.500000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5321:700] 2025-06-25T14:36:50.622197Z 1 00h02m40.600000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5321:700] 2025-06-25T14:36:50.630769Z 1 00h02m40.700000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5321:700] 2025-06-25T14:36:50.822518Z 1 00h02m40.800000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5321:700] 2025-06-25T14:36:50.945021Z 1 00h02m40.900000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5321:700] 2025-06-25T14:36:50.955411Z 1 00h02m41.000000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5321:700] 2025-06-25T14:36:50.967737Z 1 00h02m41.100000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5321:700] 2025-06-25T14:36:50.968339Z 1 00h02m41.100000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5321:700] 2025-06-25T14:36:51.235306Z 1 00h02m41.300000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5321:700] 2025-06-25T14:36:51.288961Z 1 00h02m41.400000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5321:700] 2025-06-25T14:36:51.323245Z 1 00h02m41.600000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5321:700] 2025-06-25T14:36:51.351516Z 1 00h02m41.700000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5321:700] 2025-06-25T14:36:51.395073Z 1 00h02m41.900000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5321:700] 2025-06-25T14:36:51.417623Z 1 00h02m42.000000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5321:700] 2025-06-25T14:36:51.470746Z 1 00h02m42.100000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5321:700] 2025-06-25T14:36:51.476810Z 1 00h02m42.100000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5321:700] 2025-06-25T14:36:51.542291Z 1 00h02m42.300000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5321:700] 2025-06-25T14:36:51.638966Z 1 00h02m42.400000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5321:700] 2025-06-25T14:36:51.943183Z 1 00h02m42.600000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5321:700] 2025-06-25T14:36:51.981943Z 1 00h02m42.700000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5321:700] 2025-06-25T14:36:52.009705Z 1 00h02m42.900000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5321:700] 2025-06-25T14:36:52.020491Z 1 00h02m43.000000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5321:700] 2025-06-25T14:36:52.110964Z 1 00h02m43.100000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5321:700] 2025-06-25T14:36:52.114298Z 1 00h02m43.100000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5321:700] 2025-06-25T14:36:52.131039Z 1 00h02m43.200000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5321:700] 2025-06-25T14:36:52.227120Z 1 00h02m43.300000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5321:700] 2025-06-25T14:36:52.475067Z 1 00h02m43.400000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5321:700] 2025-06-25T14:36:52.488864Z 1 00h02m43.500000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5321:700] 2025-06-25T14:36:52.505921Z 1 00h02m43.600000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5321:700] 2025-06-25T14:36:52.521410Z 1 00h02m43.700000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5321:700] 2025-06-25T14:36:52.534732Z 1 00h02m43.800000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5321:700] 2025-06-25T14:36:52.600575Z 1 00h02m43.900000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5321:700] 2025-06-25T14:36:52.744232Z 1 00h02m44.000000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5321:700] 2025-06-25T14:36:52.766032Z 1 00h02m44.100000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5321:700] 2025-06-25T14:36:52.783850Z 1 00h02m44.200000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5321:700] 2025-06-25T14:36:52.863321Z 1 00h02m44.300000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5321:700] 2025-06-25T14:36:52.899254Z 1 00h02m44.500000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5321:700] 2025-06-25T14:36:53.110771Z 1 00h02m44.600000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5321:700] 2025-06-25T14:36:53.241815Z 1 00h02m44.800000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5321:700] 2025-06-25T14:36:53.258656Z 1 00h02m44.900000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5321:700] 2025-06-25T14:36:53.372268Z 1 00h02m45.100000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5321:700] 2025-06-25T14:36:53.565776Z 1 00h02m45.200000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5321:700] 2025-06-25T14:36:53.583873Z 1 00h02m45.300000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5321:700] 2025-06-25T14:36:53.727640Z 1 00h02m45.500000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5321:700] 2025-06-25T14:36:53.743240Z 1 00h02m45.600000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5321:700] 2025-06-25T14:36:53.775823Z 1 00h02m45.800000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5321:700] 2025-06-25T14:36:53.788830Z 1 00h02m45.900000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:5321:700] 2025-06-25T14:36:54.052409Z 1 00h02m46.100000s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [ ... k [82000000:1:0:5:0] Setting VDisk read-only to 0 for position 6 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:6:0] 2025-06-25T14:37:07.566296Z 8 00h20m54.412560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5370:749] 2025-06-25T14:37:07.570446Z 8 00h20m54.412560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5370:749] 2025-06-25T14:37:07.576117Z 8 00h20m54.412560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5370:749] 2025-06-25T14:37:07.584347Z 8 00h20m54.412560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5370:749] 2025-06-25T14:37:07.585055Z 8 00h20m54.412560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5370:749] 2025-06-25T14:37:07.854096Z 8 00h20m54.512560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5370:749] 2025-06-25T14:37:08.032006Z 8 00h20m54.612560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5370:749] 2025-06-25T14:37:08.063346Z 8 00h20m54.812560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5370:749] 2025-06-25T14:37:08.147176Z 8 00h20m54.912560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5370:749] 2025-06-25T14:37:08.412626Z 8 00h20m55.112560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5370:749] 2025-06-25T14:37:08.435937Z 8 00h20m55.212560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5370:749] 2025-06-25T14:37:08.616703Z 8 00h20m55.412560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5370:749] 2025-06-25T14:37:08.619203Z 8 00h20m55.412560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5370:749] 2025-06-25T14:37:08.639188Z 8 00h20m55.512560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5370:749] 2025-06-25T14:37:08.671850Z 8 00h20m55.712560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5370:749] 2025-06-25T14:37:08.698553Z 8 00h20m55.812560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5370:749] 2025-06-25T14:37:08.812210Z 8 00h20m56.012560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5370:749] 2025-06-25T14:37:08.829081Z 8 00h20m56.112560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5370:749] 2025-06-25T14:37:09.055947Z 8 00h20m56.312560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5370:749] 2025-06-25T14:37:09.081785Z 8 00h20m56.412560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5370:749] 2025-06-25T14:37:09.083109Z 8 00h20m56.412560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5370:749] 2025-06-25T14:37:09.190319Z 8 00h20m56.512560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5370:749] 2025-06-25T14:37:09.223233Z 8 00h20m56.712560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5370:749] 2025-06-25T14:37:09.240862Z 8 00h20m56.812560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5370:749] 2025-06-25T14:37:09.376888Z 8 00h20m56.912560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5370:749] 2025-06-25T14:37:09.395965Z 8 00h20m57.012560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5370:749] 2025-06-25T14:37:09.414418Z 8 00h20m57.112560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5370:749] 2025-06-25T14:37:09.428821Z 8 00h20m57.212560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5370:749] 2025-06-25T14:37:09.447002Z 8 00h20m57.312560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5370:749] 2025-06-25T14:37:09.507923Z 8 00h20m57.412560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5370:749] 2025-06-25T14:37:09.509871Z 8 00h20m57.412560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5370:749] 2025-06-25T14:37:09.712531Z 8 00h20m57.612560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5370:749] 2025-06-25T14:37:09.909756Z 8 00h20m57.712560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5370:749] 2025-06-25T14:37:10.042567Z 8 00h20m57.912560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5370:749] 2025-06-25T14:37:10.061584Z 8 00h20m58.012560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5370:749] 2025-06-25T14:37:10.187689Z 8 00h20m58.212560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5370:749] 2025-06-25T14:37:10.203603Z 8 00h20m58.312560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5370:749] 2025-06-25T14:37:10.224848Z 8 00h20m58.412560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5370:749] 2025-06-25T14:37:10.226588Z 8 00h20m58.412560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5370:749] 2025-06-25T14:37:10.326608Z 8 00h20m58.612560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5370:749] 2025-06-25T14:37:10.524110Z 8 00h20m58.712560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5370:749] 2025-06-25T14:37:10.549828Z 8 00h20m58.912560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5370:749] 2025-06-25T14:37:10.693722Z 8 00h20m59.012560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5370:749] 2025-06-25T14:37:10.846672Z 8 00h20m59.212560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5370:749] 2025-06-25T14:37:10.867784Z 8 00h20m59.312560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5370:749] 2025-06-25T14:37:10.894457Z 8 00h20m59.412560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5370:749] 2025-06-25T14:37:10.896115Z 8 00h20m59.412560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5370:749] 2025-06-25T14:37:10.929121Z 8 00h20m59.612560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5370:749] 2025-06-25T14:37:11.007160Z 8 00h20m59.712560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5370:749] 2025-06-25T14:37:11.222199Z 8 00h20m59.912560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5370:749] 2025-06-25T14:37:11.387646Z 8 00h21m00.012560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5370:749] 2025-06-25T14:37:11.503803Z 8 00h21m00.212560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5370:749] 2025-06-25T14:37:11.531919Z 8 00h21m00.312560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5370:749] 2025-06-25T14:37:11.647007Z 8 00h21m00.412560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5370:749] 2025-06-25T14:37:11.649360Z 8 00h21m00.412560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5370:749] 2025-06-25T14:37:11.668483Z 8 00h21m00.512560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5370:749] 2025-06-25T14:37:11.783968Z 8 00h21m00.612560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5370:749] 2025-06-25T14:37:11.938407Z 8 00h21m00.712560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5370:749] 2025-06-25T14:37:12.018467Z 8 00h21m00.812560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5370:749] 2025-06-25T14:37:12.033746Z 8 00h21m00.912560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5370:749] 2025-06-25T14:37:12.074560Z 8 00h21m01.112560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5370:749] 2025-06-25T14:37:12.099042Z 8 00h21m01.212560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5370:749] 2025-06-25T14:37:12.350509Z 8 00h21m01.412560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5370:749] 2025-06-25T14:37:12.373538Z 8 00h21m01.512560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5370:749] 2025-06-25T14:37:12.391216Z 8 00h21m01.612560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5370:749] 2025-06-25T14:37:12.453672Z 8 00h21m01.812560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5370:749] 2025-06-25T14:37:12.655962Z 8 00h21m01.912560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5370:749] 2025-06-25T14:37:12.694738Z 8 00h21m02.112560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5370:749] 2025-06-25T14:37:12.786118Z 8 00h21m02.212560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5370:749] 2025-06-25T14:37:12.873393Z 8 00h21m02.412560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5370:749] 2025-06-25T14:37:12.873953Z 8 00h21m02.412560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5370:749] 2025-06-25T14:37:12.877439Z 8 00h21m02.412560s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Unavailable in read-only Sender# [1:5370:749] >> KqpPg::InsertValuesFromTableWithDefaultText-useSink [GOOD] >> KqpPg::InsertValuesFromTableWithDefaultTextNotNull+useSink >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-DropUser-70 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-DropUser-71 >> KqpPg::TableArrayInsert+useSink [GOOD] >> KqpPg::TableArrayInsert-useSink |81.3%| [TA] $(B)/ydb/core/blobstorage/ut_blobstorage/ut_read_only_vdisk/test-results/unittest/{meta.json ... results_accumulator.log} |81.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest |81.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/quoter/ut/ydb-core-quoter-ut |81.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/quoter/ut/ydb-core-quoter-ut |81.3%| [TA] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_read_only_vdisk/test-results/unittest/{meta.json ... results_accumulator.log} |81.3%| [LD] {RESULT} $(B)/ydb/core/quoter/ut/ydb-core-quoter-ut >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-58 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-59 >> TNetClassifierUpdaterTest::TestFiltrationByNetboxTags [GOOD] >> KqpPg::CreateTableBulkUpsertAndRead [GOOD] >> KqpPg::CopyTableSerialColumns+useSink >> TConsoleTests::TestRemoveAttributes [GOOD] >> TConsoleTests::TestRemoveAttributesExtSubdomain |81.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/query_actor/ut/ydb-library-query_actor-ut |81.3%| [LD] {RESULT} $(B)/ydb/library/query_actor/ut/ydb-library-query_actor-ut >> Yq_1::Basic_Null [GOOD] >> Yq_1::Basic_TaggedLiteral |81.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/library/query_actor/ut/ydb-library-query_actor-ut >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-72 [GOOD] |81.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest >> KqpPg::InsertFromSelect_Simple+useSink [GOOD] >> KqpPg::InsertFromSelect_Simple-useSink >> KqpPg::InsertValuesFromTableWithDefault-useSink [GOOD] >> KqpPg::InsertValuesFromTableWithDefaultAndCast+useSink >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-DropUser-54 [FAIL] >> KqpPg::InsertFromSelect_NoReorder+useSink [GOOD] >> KqpPg::DropTablePg ------- [TM] {asan, default-linux-x86_64, release} ydb/core/cms/console/ut/unittest >> TNetClassifierUpdaterTest::TestFiltrationByNetboxTags [GOOD] Test command err: 2025-06-25T14:35:19.952653Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519895541324465567:2182];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:35:19.952944Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000f26/r3tmp/tmpNwTz3l/pdisk_1.dat 2025-06-25T14:35:20.552382Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519895541324465423:2080] 1750862119882925 != 1750862119882928 2025-06-25T14:35:20.562739Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:35:20.566566Z node 1 :HTTP ERROR: http_proxy_outgoing.cpp:122: (#26,[::1]:11629) connection closed with error: Connection refused 2025-06-25T14:35:20.567007Z node 1 :CMS_CONFIGS ERROR: net_classifier_updater.cpp:278: NetClassifierUpdater failed to get subnets: Connection refused 2025-06-25T14:35:20.567881Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:35:20.567949Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:35:20.570107Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:35:20.871012Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:35:23.637237Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519895561519813116:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:35:23.637275Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000f26/r3tmp/tmprzdqaI/pdisk_1.dat 2025-06-25T14:35:23.952091Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:35:23.956958Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:35:23.957030Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:35:23.957935Z node 2 :HTTP ERROR: http_proxy_outgoing.cpp:122: (#28,[::1]:11237) connection closed with error: Connection refused 2025-06-25T14:35:23.969320Z node 2 :CMS_CONFIGS ERROR: net_classifier_updater.cpp:278: NetClassifierUpdater failed to get subnets: Connection refused 2025-06-25T14:35:23.971637Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:35:24.716666Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:35:27.540985Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519895577397192015:2212];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:35:27.541168Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000f26/r3tmp/tmpoUFD5E/pdisk_1.dat 2025-06-25T14:35:27.689392Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:35:27.708516Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519895577397191828:2080] 1750862127443837 != 1750862127443840 2025-06-25T14:35:27.710638Z node 3 :HTTP ERROR: http_proxy_outgoing.cpp:122: (#26,[::1]:13362) connection closed with error: Connection refused 2025-06-25T14:35:27.717218Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:35:27.717301Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:35:27.717551Z node 3 :CMS_CONFIGS ERROR: net_classifier_updater.cpp:278: NetClassifierUpdater failed to get subnets: Connection refused 2025-06-25T14:35:27.725228Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:35:28.472452Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:35:31.825718Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519895592531864370:2244];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000f26/r3tmp/tmpU9WBNl/pdisk_1.dat 2025-06-25T14:35:31.938385Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:35:32.008239Z node 4 :HTTP ERROR: http_proxy_outgoing.cpp:122: (#28,[::1]:6222) connection closed with error: Connection refused 2025-06-25T14:35:32.011475Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:35:32.012140Z node 4 :CMS_CONFIGS ERROR: net_classifier_updater.cpp:278: NetClassifierUpdater failed to get subnets: Connection refused 2025-06-25T14:35:32.016505Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7519895592531864137:2080] 1750862131746563 != 1750862131746566 2025-06-25T14:35:32.026882Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:35:32.026957Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:35:32.094024Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:35:32.812459Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:35:36.924934Z node 5 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[5:7519895617426405191:2177];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000f26/r3tmp/tmpl1Za8a/pdisk_1.dat 2025-06-25T14:35:37.081225Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:35:37.258937Z node 5 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:35:37.264921Z node 5 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [5:7519895617426405039:2080] 1750862136822564 != 1750862136822567 2025-06-25T14:35:37.291913Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:35:37.291996Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:35:37.316262Z node 5 :HTTP ERROR: http_proxy_outgoing.cpp:122: (#30,[::1]:20911) connection closed with error: Connection refused 2025-06-25T14:35:37.320931Z node 5 :CMS_CONFIGS ERROR: net_classifier_updater.cpp:278: NetClassifierUpdater failed to get subnets: Connection refused 2025-06-25T14:35:37.322025Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:35:37.908585Z node 5 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:35:43.416826Z node 6 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[6:7519895644083369608:2140];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000f26/r3tmp/tmpiqJcDy/pdisk_1.dat 2025-06-25T14:35:43.581401Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:35:43.716921Z node 6 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:35:43.719401Z node 6 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [6:7519895644083369504:2080] 1750862143320816 != 1750862143320819 2025-06-25T14:35:43.735628Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:35:43.735707Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:35:43.744847Z node 6 :HTTP ERROR: http_proxy_outgoing.cpp:122: (#32,[::1]:15339) connection closed with error: Connection refused 2025-06-25T14:35:43.745266Z node 6 :CMS_CONFIGS ERROR: net_classifier_updater.cpp:278: NetClassifierUpdater failed to get subnets: Connection refused 2025-06-25T14:35:43.746228Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:35:44.402144Z node 6 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:35:49.109489Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519895672391185985:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:35:49.109537Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000f26/r3tmp/tmptChvYh/pdisk_1.dat 2025-06-25T14:35:49.519809Z node 7 :HTTP ERROR: http_proxy_outgoing.cpp:122: (#34,[::1]:15598) connection closed with error: Connection refused 2025-06-25T14:35:49.537368Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:35:49.538844Z node 7 :CMS_CONFIGS ERROR: net_classifier_updater.cpp:278: NetClassifierUpdater failed to get subnets: Connection refused 2025-06-25T14:35:49.551666Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:35:49.551754Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:35:49.559945Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:35:50.204589Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:35:54.445349Z node 8 :ME ... log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[17:7519895897414137919:2231];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:36:42.926674Z node 17 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000f26/r3tmp/tmpGKCbBJ/pdisk_1.dat 2025-06-25T14:36:43.147974Z node 17 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [17:7519895897414137723:2080] 1750862202737424 != 1750862202737427 2025-06-25T14:36:43.265253Z node 17 :HTTP ERROR: http_proxy_outgoing.cpp:122: (#26,[::1]:7600) connection closed with error: Connection refused 2025-06-25T14:36:43.268354Z node 17 :CMS_CONFIGS ERROR: net_classifier_updater.cpp:278: NetClassifierUpdater failed to get subnets: Connection refused 2025-06-25T14:36:43.270491Z node 17 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(17, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:36:43.270598Z node 17 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(17, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:36:43.284695Z node 17 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(17, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:36:43.300770Z node 17 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:36:43.744648Z node 17 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:36:48.904464Z node 18 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[18:7519895927007566326:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:36:48.904709Z node 18 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000f26/r3tmp/tmpuZb4uL/pdisk_1.dat 2025-06-25T14:36:49.117721Z node 18 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:36:49.135698Z node 18 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(18, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:36:49.135830Z node 18 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(18, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:36:49.136869Z node 18 :HTTP ERROR: http_proxy_outgoing.cpp:122: (#28,[::1]:5529) connection closed with error: Connection refused 2025-06-25T14:36:49.137193Z node 18 :CMS_CONFIGS ERROR: net_classifier_updater.cpp:278: NetClassifierUpdater failed to get subnets: Connection refused 2025-06-25T14:36:49.184218Z node 18 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(18, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:36:49.912438Z node 18 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:36:54.343175Z node 19 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[19:7519895951004024945:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:36:54.343297Z node 19 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000f26/r3tmp/tmpK4nXcr/pdisk_1.dat 2025-06-25T14:36:54.554484Z node 19 :HTTP ERROR: http_proxy_outgoing.cpp:122: (#30,[::1]:64152) connection closed with error: Connection refused 2025-06-25T14:36:54.556752Z node 19 :CMS_CONFIGS ERROR: net_classifier_updater.cpp:278: NetClassifierUpdater failed to get subnets: Connection refused 2025-06-25T14:36:54.557963Z node 19 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:36:54.569950Z node 19 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(19, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:36:54.570056Z node 19 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(19, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:36:54.571958Z node 19 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [19:7519895951004024924:2080] 1750862214342311 != 1750862214342314 2025-06-25T14:36:54.577662Z node 19 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(19, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:36:55.444770Z node 19 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:36:59.812085Z node 20 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[20:7519895974500833763:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:36:59.812131Z node 20 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000f26/r3tmp/tmpUpf6ts/pdisk_1.dat 2025-06-25T14:37:00.105488Z node 20 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:37:00.107198Z node 20 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [20:7519895974500833743:2080] 1750862219809268 != 1750862219809271 2025-06-25T14:37:00.124608Z node 20 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(20, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:37:00.124713Z node 20 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(20, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:37:00.128406Z node 20 :HTTP ERROR: http_proxy_outgoing.cpp:122: (#32,[::1]:21343) connection closed with error: Connection refused 2025-06-25T14:37:00.129008Z node 20 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(20, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:37:00.129803Z node 20 :CMS_CONFIGS ERROR: net_classifier_updater.cpp:278: NetClassifierUpdater failed to get subnets: Connection refused 2025-06-25T14:37:00.848053Z node 20 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:37:05.779462Z node 21 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[21:7519895997392113918:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:37:05.779577Z node 21 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000f26/r3tmp/tmpfOFtjl/pdisk_1.dat 2025-06-25T14:37:05.967591Z node 21 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:37:05.972792Z node 21 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [21:7519895997392113897:2080] 1750862225775677 != 1750862225775680 2025-06-25T14:37:05.989013Z node 21 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(21, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:37:05.989125Z node 21 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(21, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:37:05.991033Z node 21 :HTTP ERROR: http_proxy_outgoing.cpp:122: (#34,[::1]:6636) connection closed with error: Connection refused 2025-06-25T14:37:05.992301Z node 21 :CMS_CONFIGS ERROR: net_classifier_updater.cpp:278: NetClassifierUpdater failed to get subnets: Connection refused 2025-06-25T14:37:05.992703Z node 21 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(21, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:37:06.800856Z node 21 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:37:11.770005Z node 22 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[22:7519896023996382672:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:37:11.770072Z node 22 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000f26/r3tmp/tmpQkqo0P/pdisk_1.dat 2025-06-25T14:37:12.042808Z node 22 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(22, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:37:12.042917Z node 22 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(22, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:37:12.055879Z node 22 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:37:12.057822Z node 22 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [22:7519896023996382643:2080] 1750862231769172 != 1750862231769175 2025-06-25T14:37:12.083276Z node 22 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(22, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:37:12.084099Z node 22 :HTTP ERROR: http_proxy_outgoing.cpp:122: (#36,[::1]:14014) connection closed with error: Connection refused 2025-06-25T14:37:12.087204Z node 22 :CMS_CONFIGS ERROR: net_classifier_updater.cpp:278: NetClassifierUpdater failed to get subnets: Connection refused 2025-06-25T14:37:12.836727Z node 22 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:37:18.232156Z node 23 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[23:7519896056027825267:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:37:18.242425Z node 23 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000f26/r3tmp/tmp8RqvVA/pdisk_1.dat 2025-06-25T14:37:18.433490Z node 23 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:37:18.449682Z node 23 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(23, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:37:18.449789Z node 23 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(23, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:37:18.453347Z node 23 :HTTP ERROR: http_proxy_outgoing.cpp:122: (#38,[::1]:19735) connection closed with error: Connection refused 2025-06-25T14:37:18.456964Z node 23 :CMS_CONFIGS ERROR: net_classifier_updater.cpp:278: NetClassifierUpdater failed to get subnets: Connection refused 2025-06-25T14:37:18.457903Z node 23 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(23, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:37:19.244446Z node 23 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; |81.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/ymq/actor/cloud_events/cloud_events_ut/ydb-core-ymq-actor-cloud_events-cloud_events_ut |81.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/ymq/actor/cloud_events/cloud_events_ut/ydb-core-ymq-actor-cloud_events-cloud_events_ut |81.3%| [LD] {RESULT} $(B)/ydb/core/ymq/actor/cloud_events/cloud_events_ut/ydb-core-ymq-actor-cloud_events-cloud_events_ut >> Yq_1::CreateConnection_With_Existing_Name [GOOD] >> Yq_1::CreateConnections_With_Idempotency >> KqpWorkloadServiceActors::TestCpuLoadActor [GOOD] >> TestProgram::YqlKernelEndsWithScalar ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_schemereq/unittest >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-72 [GOOD] Test command err: Starting YDB, grpc: 28308, msgbus: 12886 2025-06-25T14:33:55.784116Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519895182223886687:2141];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:33:55.784167Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000994/r3tmp/tmpkJzf36/pdisk_1.dat 2025-06-25T14:33:56.723943Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:33:56.724064Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:33:56.795228Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:33:56.796434Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:33:56.805262Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:33:56.832435Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TServer::EnableGrpc on GrpcPort 28308, node 1 2025-06-25T14:33:57.168775Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:33:57.168797Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:33:57.168806Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:33:57.168916Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:12886 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-25T14:33:57.629699Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7519895182223886822:2116] Handle TEvNavigate describe path dc-1 2025-06-25T14:33:57.659853Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7519895190813821951:2457] HANDLE EvNavigateScheme dc-1 2025-06-25T14:33:57.668835Z node 1 :TX_PROXY DEBUG: describe.cpp:356: Actor# [1:7519895190813821951:2457] HANDLE EvNavigateKeySetResult TDescribeReq marker# P5 ErrorCount# 0 2025-06-25T14:33:57.736532Z node 1 :TX_PROXY DEBUG: describe.cpp:435: Actor# [1:7519895190813821951:2457] SEND to# 72057594046644480 shardToRequest NKikimrSchemeOp.TDescribePath Path: "dc-1" Options { ReturnBoundaries: true ShowPrivateTable: true ReturnRangeKey: true } 2025-06-25T14:33:57.763193Z node 1 :TX_PROXY DEBUG: describe.cpp:448: Actor# [1:7519895190813821951:2457] Handle TEvDescribeSchemeResult Forward to# [1:7519895190813821950:2456] Cookie: 0 TEvDescribeSchemeResult: NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 2 Record# Status: StatusSuccess Path: "dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/dc-1" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046644480 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:33:57.828914Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7519895182223886822:2116] Handle TEvProposeTransaction 2025-06-25T14:33:57.828947Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:7519895182223886822:2116] TxId# 281474976715657 ProcessProposeTransaction 2025-06-25T14:33:57.829043Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:7519895182223886822:2116] Cookie# 0 userReqId# "" txid# 281474976715657 SEND to# [1:7519895190813821967:2465] 2025-06-25T14:33:57.992097Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:7519895190813821967:2465] txid# 281474976715657 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "dc-1" StoragePools { Name: "" Kind: "tenant-db" } StoragePools { Name: "/dc-1:test" Kind: "test" } } } } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" PeerName: "" 2025-06-25T14:33:57.992202Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:7519895190813821967:2465] txid# 281474976715657 Bootstrap, UserSID: root@builtin CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-25T14:33:57.992428Z node 1 :TX_PROXY DEBUG: schemereq.cpp:585: Actor# [1:7519895190813821967:2465] txid# 281474976715657 Bootstrap, UserSID: root@builtin IsClusterAdministrator: 1 2025-06-25T14:33:57.992507Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:7519895190813821967:2465] txid# 281474976715657 TEvNavigateKeySet requested from SchemeCache 2025-06-25T14:33:57.992755Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:7519895190813821967:2465] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-25T14:33:57.992857Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:7519895190813821967:2465] HANDLE EvNavigateKeySetResult, txid# 281474976715657 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# false 2025-06-25T14:33:57.992923Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:7519895190813821967:2465] txid# 281474976715657 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715657 TabletId# 72057594046644480} 2025-06-25T14:33:57.993039Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:7519895190813821967:2465] txid# 281474976715657 HANDLE EvClientConnected 2025-06-25T14:33:57.993752Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:33:57.997851Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [1:7519895190813821967:2465] txid# 281474976715657 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715657} 2025-06-25T14:33:57.997900Z node 1 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [1:7519895190813821967:2465] txid# 281474976715657 SEND to# [1:7519895190813821966:2464] Source {TEvProposeTransactionStatus txid# 281474976715657 Status# 53} waiting... 2025-06-25T14:33:58.022443Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7519895182223886822:2116] Handle TEvProposeTransaction 2025-06-25T14:33:58.022472Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:7519895182223886822:2116] TxId# 281474976715658 ProcessProposeTransaction 2025-06-25T14:33:58.022500Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:7519895182223886822:2116] Cookie# 0 userReqId# "" txid# 281474976715658 SEND to# [1:7519895195108789307:2502] 2025-06-25T14:33:58.024693Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:7519895195108789307:2502] txid# 281474976715658 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/" OperationType: ESchemeOpModifyACL ModifyACL { Name: "dc-1" DiffACL: "\n\032\010\000\022\026\010\001\020\377\377\003\032\014root@builtin \003" } } } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" PeerName: "" 2025-06-25T14:33:58.024742Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:7519895195108789307:2502] txid# 281474976715658 Bootstrap, UserSID: root@builtin CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-25T14:33:58.024756Z node 1 :TX_PROXY DEBUG: schemereq.cpp:585: Actor# [1:7519895195108789307:2502] txid# 281474976715658 Bootstrap, UserSID: root@builtin IsClusterAdministrator: 1 2025-06-25T14:33:58.024795Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:7519895195108789307:2502] txid# 281474976715658 TEvNavigateKeySet requested from SchemeCache 2025-06-25T14:33:58.025016Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:7519895195108789307:2502] txid# 281474976715658 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-25T14:33:58.025137Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:7519895195108789307:2502] HANDLE EvNavigateKeySetResult, txid# 281474976715658 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-25T14:33:58.025171Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:7519895195108789307:2502] txid# 281474976715658 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715658 TabletId# 72057594046644480} 2025-06-25T14:33:58.025303Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:7519895195108789307:2502] txid# 281474976715658 H ... 74949401268:2304], DatabaseId: /dc-1, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710660 completed, doublechecking } 2025-06-25T14:37:23.840412Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [59:7519896053474563941:2114] Handle TEvProposeTransaction 2025-06-25T14:37:23.840458Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [59:7519896053474563941:2114] TxId# 281474976710661 ProcessProposeTransaction 2025-06-25T14:37:23.840520Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [59:7519896053474563941:2114] Cookie# 0 userReqId# "" txid# 281474976710661 SEND to# [59:7519896074949401342:2578] 2025-06-25T14:37:23.844038Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [59:7519896074949401342:2578] txid# 281474976710661 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/dc-1/.metadata/workload_manager/pools" OperationType: ESchemeOpCreateResourcePool ModifyACL { Name: "default" DiffACL: "\n\032\010\000\022\026\010\001\020\377\377\003\032\014root@builtin \003\n!\010\000\022\035\010\001\020\201\004\032\024all-users@well-known \003\n\031\010\000\022\025\010\001\020\201\004\032\014root@builtin \003" NewOwner: "metadata@system" } Internal: true CreateResourcePool { Name: "default" Properties { Properties { key: "concurrent_query_limit" value: "-1" } Properties { key: "database_load_cpu_threshold" value: "-1" } Properties { key: "query_cancel_after_seconds" value: "0" } Properties { key: "query_cpu_limit_percent_per_node" value: "-1" } Properties { key: "query_memory_limit_percent_per_node" value: "-1" } Properties { key: "queue_size" value: "-1" } Properties { key: "resource_weight" value: "-1" } Properties { key: "total_cpu_limit_percent_per_node" value: "-1" } } } } } UserToken: "\n\017metadata@system\022\000" DatabaseName: "/dc-1" 2025-06-25T14:37:23.844096Z node 59 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [59:7519896074949401342:2578] txid# 281474976710661 Bootstrap, UserSID: metadata@system CheckAdministrator: 1 CheckDatabaseAdministrator: 1 2025-06-25T14:37:23.844120Z node 59 :TX_PROXY DEBUG: schemereq.cpp:585: Actor# [59:7519896074949401342:2578] txid# 281474976710661 Bootstrap, UserSID: metadata@system IsClusterAdministrator: 0 2025-06-25T14:37:23.852381Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1434: Actor# [59:7519896074949401342:2578] txid# 281474976710661 HandleResolveDatabase, ResultSet size: 1 ResultSet error count: 0 2025-06-25T14:37:23.852472Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1469: Actor# [59:7519896074949401342:2578] txid# 281474976710661 HandleResolveDatabase, UserSID: metadata@system CheckAdministrator: 1 CheckDatabaseAdministrator: 1 IsClusterAdministrator: 0 IsDatabaseAdministrator: 0 DatabaseOwner: root@builtin 2025-06-25T14:37:23.853582Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1660: Actor# [59:7519896074949401342:2578] txid# 281474976710661 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-06-25T14:37:23.853700Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [59:7519896074949401342:2578] txid# 281474976710661 TEvNavigateKeySet requested from SchemeCache 2025-06-25T14:37:23.853939Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [59:7519896074949401342:2578] txid# 281474976710661 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-25T14:37:23.854141Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [59:7519896074949401342:2578] HANDLE EvNavigateKeySetResult, txid# 281474976710661 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-25T14:37:23.854204Z node 59 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [59:7519896074949401342:2578] txid# 281474976710661 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976710661 TabletId# 72057594046644480} 2025-06-25T14:37:23.854366Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [59:7519896074949401342:2578] txid# 281474976710661 HANDLE EvClientConnected 2025-06-25T14:37:23.870132Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [59:7519896074949401342:2578] txid# 281474976710661 Status StatusAlreadyExists HANDLE {TEvModifySchemeTransactionResult Status# StatusAlreadyExists txid# 281474976710661 Reason# Check failed: path: '/dc-1/.metadata/workload_manager/pools/default', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)} 2025-06-25T14:37:23.870300Z node 59 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [59:7519896074949401342:2578] txid# 281474976710661, issues: { message: "Check failed: path: \'/dc-1/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:37:23.870342Z node 59 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [59:7519896074949401342:2578] txid# 281474976710661 SEND to# [59:7519896074949401268:2304] Source {TEvProposeTransactionStatus txid# 281474976710661 Status# 48} 2025-06-25T14:37:23.917015Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [59:7519896053474563941:2114] Handle TEvProposeTransaction 2025-06-25T14:37:23.917054Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [59:7519896053474563941:2114] TxId# 281474976710662 ProcessProposeTransaction 2025-06-25T14:37:23.917106Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [59:7519896053474563941:2114] Cookie# 0 userReqId# "" txid# 281474976710662 SEND to# [59:7519896074949401366:2590] 2025-06-25T14:37:23.924494Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [59:7519896074949401366:2590] txid# 281474976710662 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/dc-1" OperationType: ESchemeOpAlterLogin AlterLogin { CreateUser { User: "targetuser" Password: "passwd" CanLogin: true IsHashedPassword: false } } } } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" DatabaseName: "/dc-1" RequestType: "" PeerName: "ipv6:[::1]:58958" 2025-06-25T14:37:23.924603Z node 59 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [59:7519896074949401366:2590] txid# 281474976710662 Bootstrap, UserSID: root@builtin CheckAdministrator: 1 CheckDatabaseAdministrator: 1 2025-06-25T14:37:23.924634Z node 59 :TX_PROXY DEBUG: schemereq.cpp:585: Actor# [59:7519896074949401366:2590] txid# 281474976710662 Bootstrap, UserSID: root@builtin IsClusterAdministrator: 1 2025-06-25T14:37:23.924699Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [59:7519896074949401366:2590] txid# 281474976710662 TEvNavigateKeySet requested from SchemeCache 2025-06-25T14:37:23.925142Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [59:7519896074949401366:2590] txid# 281474976710662 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-25T14:37:23.925300Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [59:7519896074949401366:2590] HANDLE EvNavigateKeySetResult, txid# 281474976710662 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-25T14:37:23.925369Z node 59 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [59:7519896074949401366:2590] txid# 281474976710662 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976710662 TabletId# 72057594046644480} 2025-06-25T14:37:23.925556Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [59:7519896074949401366:2590] txid# 281474976710662 HANDLE EvClientConnected 2025-06-25T14:37:24.184774Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [59:7519896074949401366:2590] txid# 281474976710662 Status StatusSuccess HANDLE {TEvModifySchemeTransactionResult Status# StatusSuccess txid# 281474976710662} 2025-06-25T14:37:24.184856Z node 59 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [59:7519896074949401366:2590] txid# 281474976710662 SEND to# [59:7519896074949401365:2296] Source {TEvProposeTransactionStatus txid# 281474976710662 Status# 48} 2025-06-25T14:37:24.256835Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [59:7519896053474563941:2114] Handle TEvProposeTransaction 2025-06-25T14:37:24.256874Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [59:7519896053474563941:2114] TxId# 281474976710663 ProcessProposeTransaction 2025-06-25T14:37:24.256922Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [59:7519896053474563941:2114] Cookie# 0 userReqId# "" txid# 281474976710663 SEND to# [59:7519896079244368702:2609] 2025-06-25T14:37:24.259759Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [59:7519896079244368702:2609] txid# 281474976710663 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/dc-1" OperationType: ESchemeOpAlterLogin AlterLogin { RemoveUser { User: "targetuser" MissingOk: false } } } } UserToken: "\n\024ordinaryuser@builtin\022\030\022\026\n\024all-users@well-known\032\024ordinaryuser@builtin\"\007Builtin*\027ordi****ltin (32520BBF)" DatabaseName: "/dc-1" RequestType: "" PeerName: "ipv6:[::1]:58970" 2025-06-25T14:37:24.259849Z node 59 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [59:7519896079244368702:2609] txid# 281474976710663 Bootstrap, UserSID: ordinaryuser@builtin CheckAdministrator: 1 CheckDatabaseAdministrator: 1 2025-06-25T14:37:24.259875Z node 59 :TX_PROXY DEBUG: schemereq.cpp:585: Actor# [59:7519896079244368702:2609] txid# 281474976710663 Bootstrap, UserSID: ordinaryuser@builtin IsClusterAdministrator: 0 2025-06-25T14:37:24.260052Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1434: Actor# [59:7519896079244368702:2609] txid# 281474976710663 HandleResolveDatabase, ResultSet size: 1 ResultSet error count: 0 2025-06-25T14:37:24.260100Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1469: Actor# [59:7519896079244368702:2609] txid# 281474976710663 HandleResolveDatabase, UserSID: ordinaryuser@builtin CheckAdministrator: 1 CheckDatabaseAdministrator: 1 IsClusterAdministrator: 0 IsDatabaseAdministrator: 0 DatabaseOwner: root@builtin 2025-06-25T14:37:24.260164Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [59:7519896079244368702:2609] txid# 281474976710663 TEvNavigateKeySet requested from SchemeCache 2025-06-25T14:37:24.260553Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [59:7519896079244368702:2609] txid# 281474976710663 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-25T14:37:24.260592Z node 59 :TX_PROXY ERROR: schemereq.cpp:1103: Actor# [59:7519896079244368702:2609] txid# 281474976710663, Access denied for ordinaryuser@builtin, attempt to manage user 2025-06-25T14:37:24.260701Z node 59 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [59:7519896079244368702:2609] txid# 281474976710663, issues: { message: "Access denied for ordinaryuser@builtin" issue_code: 200000 severity: 1 } 2025-06-25T14:37:24.260734Z node 59 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [59:7519896079244368702:2609] txid# 281474976710663 SEND to# [59:7519896079244368701:2316] Source {TEvProposeTransactionStatus Status# 5} 2025-06-25T14:37:24.261097Z node 59 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=59&id=YjlmZTY3Y2QtZDU0YTI0NjYtNTM2YWYxMDAtN2JlMDEwZTQ=, ActorId: [59:7519896079244368687:2316], ActorState: ExecuteState, TraceId: 01jykrbac7ahrtapbrgejkj3x2, Create QueryResponse for error on request, msg: 2025-06-25T14:37:24.261630Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:353: actor# [59:7519896053474563941:2114] Handle TEvExecuteKqpTransaction 2025-06-25T14:37:24.261649Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:342: actor# [59:7519896053474563941:2114] TxId# 281474976710664 ProcessProposeKqpTransaction >> Yq_1::ModifyConnections [GOOD] >> Yq_1::ModifyQuery >> TestProgram::YqlKernelEndsWithScalar [GOOD] >> TSchemeShardUserAttrsTest::MkDir ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest >> TestProgram::YqlKernelEndsWithScalar [GOOD] Test command err: FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:33;event=parse_program;program=Command { Assign { Column { Id: 15 } Constant { Bytes: "amet." } } } Command { Assign { Column { Id: 16 } Function { Arguments { Id: 7 } Arguments { Id: 15 } FunctionType: YQL_KERNEL KernelIdx: 0 } } } Command { Projection { Columns { Id: 16 } } } Kernels: "O\004\006Arg\030BlockAsTuple\t\211\004\235\213\004\213\004\203\001H\203\005@\213\002\203\014\001\235?\004\001\235?\010\001\006\000\t\211\004?\016\235?\000\001\235?\002\000\006\000\t\251\000?\024\002\000\t\251\000?\026\002\000\000\t\211\002?\020\235?\006\001\006\000\t\211\006?$\203\005@?\024?\026$BlockFunc\000\003?(\020EndsWith?\034? \001\000\000/" ; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:102;parse_proto_program=Command { Assign { Column { Id: 15 } Constant { Bytes: "amet." } } } Command { Assign { Column { Id: 16 } Function { Arguments { Id: 7 } Arguments { Id: 15 } FunctionType: YQL_KERNEL KernelIdx: 0 } } } Command { Projection { Columns { Id: 16 } } } Kernels: "O\004\006Arg\030BlockAsTuple\t\211\004\235\213\004\213\004\203\001H\203\005@\213\002\203\014\001\235?\004\001\235?\010\001\006\000\t\211\004?\016\235?\000\001\235?\002\000\006\000\t\251\000?\024\002\000\t\251\000?\026\002\000\000\t\211\002?\020\235?\006\001\006\000\t\211\006?$\203\005@?\024?\026$BlockFunc\000\003?(\020EndsWith?\034? \001\000\000/" ; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2101;fline=graph_execute.cpp:162;graph_constructed=digraph program {N0[shape=box, label="N0(0):{\"p\":{\"v\":\"amet.\"},\"o\":\"15\",\"t\":\"Const\"}\n"]; N1[shape=box, label="N4(15):{\"i\":\"7,15\",\"p\":{\"kernel\":{\"class_name\":\"SIMPLE\"}},\"o\":\"16\",\"t\":\"Calculation\"}\nREMOVE:7,15"]; N0 -> N1[label="1"]; N3 -> N1[label="2"]; N2[shape=box, label="N2(2):{\"i\":\"0\",\"p\":{\"data\":[{\"name\":\"string\",\"id\":7}]},\"o\":\"7\",\"t\":\"FetchOriginalData\"}\n",style=filled,color="#FFFF88"]; N5 -> N2[label="1"]; N3[shape=box, label="N3(7):{\"i\":\"7\",\"p\":{\"address\":{\"name\":\"string\",\"id\":7}},\"o\":\"7\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N2 -> N3[label="1"]; N4[shape=box, label="N5(15):{\"i\":\"16\",\"t\":\"Projection\"}\n",style=filled,color="#FFAAAA"]; N1 -> N4[label="1"]; N5[shape=box, label="N1(0):{\"p\":{\"data\":[{\"name\":\"string\",\"id\":7}]},\"o\":\"0\",\"t\":\"ReserveMemory\"}\n"]; N0->N5->N2->N3->N1->N4[color=red]; }; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:51;event=program_parsed;result={"edges":[{"owner_id":0,"inputs":[]},{"owner_id":1,"inputs":[{"from":0},{"from":3}]},{"owner_id":2,"inputs":[{"from":5}]},{"owner_id":3,"inputs":[{"from":2}]},{"owner_id":4,"inputs":[{"from":1}]},{"owner_id":5,"inputs":[]}],"nodes":{"1":{"p":{"i":"7,15","p":{"kernel":{"class_name":"SIMPLE"}},"o":"16","t":"Calculation"},"w":15,"id":1},"3":{"p":{"i":"7","p":{"address":{"name":"string","id":7}},"o":"7","t":"AssembleOriginalData"},"w":7,"id":3},"2":{"p":{"i":"0","p":{"data":[{"name":"string","id":7}]},"o":"7","t":"FetchOriginalData"},"w":2,"id":2},"5":{"p":{"p":{"data":[{"name":"string","id":7}]},"o":"0","t":"ReserveMemory"},"w":0,"id":5},"4":{"p":{"i":"16","t":"Projection"},"w":15,"id":4},"0":{"p":{"p":{"v":"amet."},"o":"15","t":"Const"},"w":0,"id":0}}}; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9UInt8TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9UInt8TypeE; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/workload_service/ut/unittest >> KqpWorkloadServiceActors::TestCpuLoadActor [GOOD] Test command err: 2025-06-25T14:36:33.449393Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519895861624572853:2225];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:36:33.449779Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001a6c/r3tmp/tmpCoQKo2/pdisk_1.dat 2025-06-25T14:36:34.042752Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:36:34.049753Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:36:34.128388Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519895861624572658:2080] 1750862193373586 != 1750862193373589 2025-06-25T14:36:34.138819Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:36:34.141580Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 28699, node 1 2025-06-25T14:36:34.380777Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:36:34.380798Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:36:34.380812Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:36:34.380926Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:36:34.444686Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:6277 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:36:35.068669Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:36:35.090546Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:36:37.619496Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:223: SessionId: ydb://session/3?node_id=1&id=YjExZDE0MjYtZWVmNjU2OWEtN2UyMzAzNzEtYTRkZTkxMTM=, ActorId: [0:0:0], ActorState: unknown state, Create session actor with id YjExZDE0MjYtZWVmNjU2OWEtN2UyMzAzNzEtYTRkZTkxMTM= 2025-06-25T14:36:37.619866Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:227: SessionId: ydb://session/3?node_id=1&id=YjExZDE0MjYtZWVmNjU2OWEtN2UyMzAzNzEtYTRkZTkxMTM=, ActorId: [1:7519895878804442459:2289], ActorState: unknown state, session actor bootstrapped 2025-06-25T14:36:37.619931Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:100: [WorkloadService] [Service] Subscribed for config changes 2025-06-25T14:36:37.619944Z node 1 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:114: [WorkloadService] [Service] Resource pools was disabled 2025-06-25T14:36:37.633900Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:223: SessionId: ydb://session/3?node_id=1&id=YjUyZjc1MGItMzI2NzA4YTUtNDc1NWIwMWEtM2ZlNzUxYWI=, ActorId: [0:0:0], ActorState: unknown state, Create session actor with id YjUyZjc1MGItMzI2NzA4YTUtNDc1NWIwMWEtM2ZlNzUxYWI= 2025-06-25T14:36:37.634188Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:227: SessionId: ydb://session/3?node_id=1&id=YjUyZjc1MGItMzI2NzA4YTUtNDc1NWIwMWEtM2ZlNzUxYWI=, ActorId: [1:7519895878804442461:2290], ActorState: unknown state, session actor bootstrapped 2025-06-25T14:36:37.634389Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:443: SessionId: ydb://session/3?node_id=1&id=YjUyZjc1MGItMzI2NzA4YTUtNDc1NWIwMWEtM2ZlNzUxYWI=, ActorId: [1:7519895878804442461:2290], ActorState: ReadyState, TraceId: 01jykr9ww2fr6m45gt047k3zmz, received request, proxyRequestId: 3 prepared: 0 tx_control: 0 action: QUERY_ACTION_EXECUTE type: QUERY_TYPE_SQL_GENERIC_QUERY text: SELECT 42; rpcActor: [1:7519895878804442460:2298] database: Root databaseId: /Root pool id: 2025-06-25T14:36:37.634538Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:575: SessionId: ydb://session/3?node_id=1&id=YjUyZjc1MGItMzI2NzA4YTUtNDc1NWIwMWEtM2ZlNzUxYWI=, ActorId: [1:7519895878804442461:2290], ActorState: ExecuteState, TraceId: 01jykr9ww2fr6m45gt047k3zmz, Sending CompileQuery request 2025-06-25T14:36:38.115878Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1352: SessionId: ydb://session/3?node_id=1&id=YjUyZjc1MGItMzI2NzA4YTUtNDc1NWIwMWEtM2ZlNzUxYWI=, ActorId: [1:7519895878804442461:2290], ActorState: ExecuteState, TraceId: 01jykr9ww2fr6m45gt047k3zmz, ExecutePhyTx, tx: 0x000050C0002F7418 literal: 0 commit: 1 txCtx.DeferredEffects.size(): 0 2025-06-25T14:36:38.115957Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1503: SessionId: ydb://session/3?node_id=1&id=YjUyZjc1MGItMzI2NzA4YTUtNDc1NWIwMWEtM2ZlNzUxYWI=, ActorId: [1:7519895878804442461:2290], ActorState: ExecuteState, TraceId: 01jykr9ww2fr6m45gt047k3zmz, Sending to Executer TraceId: 0 8 2025-06-25T14:36:38.116134Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1561: SessionId: ydb://session/3?node_id=1&id=YjUyZjc1MGItMzI2NzA4YTUtNDc1NWIwMWEtM2ZlNzUxYWI=, ActorId: [1:7519895878804442461:2290], ActorState: ExecuteState, TraceId: 01jykr9ww2fr6m45gt047k3zmz, Created new KQP executer: [1:7519895883099409762:2290] isRollback: 0 2025-06-25T14:36:38.160760Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1852: SessionId: ydb://session/3?node_id=1&id=YjUyZjc1MGItMzI2NzA4YTUtNDc1NWIwMWEtM2ZlNzUxYWI=, ActorId: [1:7519895878804442461:2290], ActorState: ExecuteState, TraceId: 01jykr9ww2fr6m45gt047k3zmz, Forwarded TEvStreamData to [1:7519895878804442460:2298] 2025-06-25T14:36:38.166051Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1754: SessionId: ydb://session/3?node_id=1&id=YjUyZjc1MGItMzI2NzA4YTUtNDc1NWIwMWEtM2ZlNzUxYWI=, ActorId: [1:7519895878804442461:2290], ActorState: ExecuteState, TraceId: 01jykr9ww2fr6m45gt047k3zmz, TEvTxResponse, CurrentTx: 1/1 response.status: SUCCESS 2025-06-25T14:36:38.166260Z node 1 :KQP_SESSION INFO: kqp_session_actor.cpp:2013: SessionId: ydb://session/3?node_id=1&id=YjUyZjc1MGItMzI2NzA4YTUtNDc1NWIwMWEtM2ZlNzUxYWI=, ActorId: [1:7519895878804442461:2290], ActorState: ExecuteState, TraceId: 01jykr9ww2fr6m45gt047k3zmz, txInfo Status: Committed Kind: Pure TotalDuration: 50.53 ServerDuration: 50.441 QueriesCount: 2 2025-06-25T14:36:38.166315Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2168: SessionId: ydb://session/3?node_id=1&id=YjUyZjc1MGItMzI2NzA4YTUtNDc1NWIwMWEtM2ZlNzUxYWI=, ActorId: [1:7519895878804442461:2290], ActorState: ExecuteState, TraceId: 01jykr9ww2fr6m45gt047k3zmz, Create QueryResponse for action: QUERY_ACTION_EXECUTE with SUCCESS status 2025-06-25T14:36:38.166508Z node 1 :KQP_SESSION INFO: kqp_session_actor.cpp:2528: SessionId: ydb://session/3?node_id=1&id=YjUyZjc1MGItMzI2NzA4YTUtNDc1NWIwMWEtM2ZlNzUxYWI=, ActorId: [1:7519895878804442461:2290], ActorState: ExecuteState, TraceId: 01jykr9ww2fr6m45gt047k3zmz, Cleanup start, isFinal: 1 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-06-25T14:36:38.166546Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2589: SessionId: ydb://session/3?node_id=1&id=YjUyZjc1MGItMzI2NzA4YTUtNDc1NWIwMWEtM2ZlNzUxYWI=, ActorId: [1:7519895878804442461:2290], ActorState: ExecuteState, TraceId: 01jykr9ww2fr6m45gt047k3zmz, EndCleanup, isFinal: 1 2025-06-25T14:36:38.166595Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2325: SessionId: ydb://session/3?node_id=1&id=YjUyZjc1MGItMzI2NzA4YTUtNDc1NWIwMWEtM2ZlNzUxYWI=, ActorId: [1:7519895878804442461:2290], ActorState: ExecuteState, TraceId: 01jykr9ww2fr6m45gt047k3zmz, Sent query response back to proxy, proxyRequestId: 3, proxyId: [1:7519895861624572860:2230] 2025-06-25T14:36:38.166637Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2601: SessionId: ydb://session/3?node_id=1&id=YjUyZjc1MGItMzI2NzA4YTUtNDc1NWIwMWEtM2ZlNzUxYWI=, ActorId: [1:7519895878804442461:2290], ActorState: unknown state, TraceId: 01jykr9ww2fr6m45gt047k3zmz, Cleanup temp tables: 0 2025-06-25T14:36:38.166983Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2692: SessionId: ydb://session/3?node_id=1&id=YjUyZjc1MGItMzI2NzA4YTUtNDc1NWIwMWEtM2ZlNzUxYWI=, ActorId: [1:7519895878804442461:2290], ActorState: unknown state, TraceId: 01jykr9ww2fr6m45gt047k3zmz, Session actor destroyed 2025-06-25T14:36:38.194066Z node 1 :KQP_SESSION INFO: kqp_session_actor.cpp:2370: SessionId: ydb://session/3?node_id=1&id=YjExZDE0MjYtZWVmNjU2OWEtN2UyMzAzNzEtYTRkZTkxMTM=, ActorId: [1:7519895878804442459:2289], ActorState: ReadyState, Session closed due to explicit close event 2025-06-25T14:36:38.194118Z node 1 :KQP_SESSION INFO: kqp_session_actor.cpp:2528: SessionId: ydb://session/3?node_id=1&id=YjExZDE0MjYtZWVmNjU2OWEtN2UyMzAzNzEtYTRkZTkxMTM=, ActorId: [1:7519895878804442459:2289], ActorState: ReadyState, Cleanup start, isFinal: 1 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-06-25T14:36:38.194138Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2589: SessionId: ydb://session/3?node_id=1&id=YjExZDE0MjYtZWVmNjU2OWEtN2UyMzAzNzEtYTRkZTkxMTM=, ActorId: [1:7519895878804442459:2289], ActorState: ReadyState, EndCleanup, isFinal: 1 2025-06-25T14:36:38.194175Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2601: SessionId: ydb://session/3?node_id=1&id=YjExZDE0MjYtZWVmNjU2OWEtN2UyMzAzNzEtYTRkZTkxMTM=, ActorId: [1:7519895878804442459:2289], ActorState: unknown state, Cleanup temp tables: 0 2025-06-25T14:36:38.194248Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2692: SessionId: ydb://session/3?node_id=1&id=YjExZDE0MjYtZWVmNjU2OWEtN2UyMzAzNzEtYTRkZTkxMTM=, ActorId: [1:7519895878804442459:2289], ActorState: unknown state, Session actor destroyed 2025-06-25T14:36:39.989758Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519895884560030536:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:36:39.989855Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: S ... Query SELECT SUM(CpuThreads) AS ThreadsCount, SUM(CpuThreads * (1.0 - CpuIdle)) AS TotalLoad FROM `.sys/nodes`; rpcActor: [8:7519896070420975128:2330] database: /Root databaseId: /Root pool id: default 2025-06-25T14:37:22.999988Z node 8 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:169: [WorkloadService] [Service] Recieved new request from [8:7519896070420975127:2329], DatabaseId: /Root, PoolId: default, SessionId: ydb://session/3?node_id=8&id=ZjkwODIwNTEtZjI1YmQ4MzItMWY1YjA1NDctOTIxMWU0MjQ= 2025-06-25T14:37:23.000037Z node 8 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:44: [WorkloadService] [TPoolResolverActor] ActorId: [8:7519896070420975131:2332], DatabaseId: /Root, PoolId: default, SessionId: ydb://session/3?node_id=8&id=ZjkwODIwNTEtZjI1YmQ4MzItMWY1YjA1NDctOTIxMWU0MjQ=, Start pool fetching 2025-06-25T14:37:23.000059Z node 8 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:185: [WorkloadService] [TPoolFetcherActor] ActorId: [8:7519896070420975132:2333], DatabaseId: /Root, PoolId: default, Start pool fetching 2025-06-25T14:37:23.000262Z node 8 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:223: [WorkloadService] [TPoolFetcherActor] ActorId: [8:7519896070420975130:2331], DatabaseId: /Root, PoolId: default, Pool info successfully fetched 2025-06-25T14:37:23.000369Z node 8 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:223: [WorkloadService] [TPoolFetcherActor] ActorId: [8:7519896070420975132:2333], DatabaseId: /Root, PoolId: default, Pool info successfully fetched 2025-06-25T14:37:23.000393Z node 8 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:253: [WorkloadService] [Service] Successfully fetched pool default, DatabaseId: /Root 2025-06-25T14:37:23.000450Z node 8 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:107: [WorkloadService] [TPoolResolverActor] ActorId: [8:7519896070420975131:2332], DatabaseId: /Root, PoolId: default, SessionId: ydb://session/3?node_id=8&id=ZjkwODIwNTEtZjI1YmQ4MzItMWY1YjA1NDctOTIxMWU0MjQ=, Pool info successfully resolved 2025-06-25T14:37:23.000509Z node 8 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:279: [WorkloadService] [Service] Successfully fetched pool default, DatabaseId: /Root, SessionId: ydb://session/3?node_id=8&id=ZjkwODIwNTEtZjI1YmQ4MzItMWY1YjA1NDctOTIxMWU0MjQ= 2025-06-25T14:37:23.000599Z node 8 :KQP_WORKLOAD_SERVICE DEBUG: pool_handlers_actors.cpp:203: [WorkloadService] [TPoolHandlerActorBase] ActorId: [8:7519896070420975070:2316], DatabaseId: /Root, PoolId: default, Received new request, worker id: [8:7519896070420975127:2329], session id: ydb://session/3?node_id=8&id=ZjkwODIwNTEtZjI1YmQ4MzItMWY1YjA1NDctOTIxMWU0MjQ= 2025-06-25T14:37:23.000640Z node 8 :KQP_WORKLOAD_SERVICE DEBUG: pool_handlers_actors.cpp:313: [WorkloadService] [TPoolHandlerActorBase] ActorId: [8:7519896070420975070:2316], DatabaseId: /Root, PoolId: default, Reply continue success to [8:7519896070420975127:2329], session id: ydb://session/3?node_id=8&id=ZjkwODIwNTEtZjI1YmQ4MzItMWY1YjA1NDctOTIxMWU0MjQ=, local in flight: 1 2025-06-25T14:37:23.000663Z node 8 :KQP_WORKLOAD_SERVICE TRACE: kqp_workload_service.cpp:290: [WorkloadService] [Service] Request placed into pool, DatabaseId: /Root, PoolId: default, SessionId: ydb://session/3?node_id=8&id=ZjkwODIwNTEtZjI1YmQ4MzItMWY1YjA1NDctOTIxMWU0MjQ= 2025-06-25T14:37:23.000716Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:527: SessionId: ydb://session/3?node_id=8&id=ZjkwODIwNTEtZjI1YmQ4MzItMWY1YjA1NDctOTIxMWU0MjQ=, ActorId: [8:7519896070420975127:2329], ActorState: ExecuteState, TraceId: 01jykrb95qfjfk0b4rrt3g5y2q, continue request, pool id: default 2025-06-25T14:37:23.000960Z node 8 :KQP_SESSION INFO: kqp_query_state.cpp:78: Scheme error, pathId: [OwnerId: 72057594046644480, LocalPathId: 1], status: PathNotTable 2025-06-25T14:37:23.685076Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1352: SessionId: ydb://session/3?node_id=8&id=ZjkwODIwNTEtZjI1YmQ4MzItMWY1YjA1NDctOTIxMWU0MjQ=, ActorId: [8:7519896070420975127:2329], ActorState: ExecuteState, TraceId: 01jykrb95qfjfk0b4rrt3g5y2q, ExecutePhyTx, tx: 0x000050C0003442D8 literal: 0 commit: 0 txCtx.DeferredEffects.size(): 0 2025-06-25T14:37:23.685131Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1503: SessionId: ydb://session/3?node_id=8&id=ZjkwODIwNTEtZjI1YmQ4MzItMWY1YjA1NDctOTIxMWU0MjQ=, ActorId: [8:7519896070420975127:2329], ActorState: ExecuteState, TraceId: 01jykrb95qfjfk0b4rrt3g5y2q, Sending to Executer TraceId: 0 8 2025-06-25T14:37:23.685211Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1561: SessionId: ydb://session/3?node_id=8&id=ZjkwODIwNTEtZjI1YmQ4MzItMWY1YjA1NDctOTIxMWU0MjQ=, ActorId: [8:7519896070420975127:2329], ActorState: ExecuteState, TraceId: 01jykrb95qfjfk0b4rrt3g5y2q, Created new KQP executer: [8:7519896074715942454:2329] isRollback: 0 2025-06-25T14:37:23.704218Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1754: SessionId: ydb://session/3?node_id=8&id=ZjkwODIwNTEtZjI1YmQ4MzItMWY1YjA1NDctOTIxMWU0MjQ=, ActorId: [8:7519896070420975127:2329], ActorState: ExecuteState, TraceId: 01jykrb95qfjfk0b4rrt3g5y2q, TEvTxResponse, CurrentTx: 1/2 response.status: SUCCESS 2025-06-25T14:37:23.704536Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1352: SessionId: ydb://session/3?node_id=8&id=ZjkwODIwNTEtZjI1YmQ4MzItMWY1YjA1NDctOTIxMWU0MjQ=, ActorId: [8:7519896070420975127:2329], ActorState: ExecuteState, TraceId: 01jykrb95qfjfk0b4rrt3g5y2q, ExecutePhyTx, tx: 0x000050C00069DB58 literal: 1 commit: 1 txCtx.DeferredEffects.size(): 0 2025-06-25T14:37:23.705119Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1754: SessionId: ydb://session/3?node_id=8&id=ZjkwODIwNTEtZjI1YmQ4MzItMWY1YjA1NDctOTIxMWU0MjQ=, ActorId: [8:7519896070420975127:2329], ActorState: ExecuteState, TraceId: 01jykrb95qfjfk0b4rrt3g5y2q, TEvTxResponse, CurrentTx: 2/2 response.status: SUCCESS 2025-06-25T14:37:23.705254Z node 8 :KQP_SESSION INFO: kqp_session_actor.cpp:2013: SessionId: ydb://session/3?node_id=8&id=ZjkwODIwNTEtZjI1YmQ4MzItMWY1YjA1NDctOTIxMWU0MjQ=, ActorId: [8:7519896070420975127:2329], ActorState: ExecuteState, TraceId: 01jykrb95qfjfk0b4rrt3g5y2q, txInfo Status: Committed Kind: ReadOnly TotalDuration: 20.27 ServerDuration: 20.208 QueriesCount: 2 2025-06-25T14:37:23.705351Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2168: SessionId: ydb://session/3?node_id=8&id=ZjkwODIwNTEtZjI1YmQ4MzItMWY1YjA1NDctOTIxMWU0MjQ=, ActorId: [8:7519896070420975127:2329], ActorState: ExecuteState, TraceId: 01jykrb95qfjfk0b4rrt3g5y2q, Create QueryResponse for action: QUERY_ACTION_EXECUTE with SUCCESS status 2025-06-25T14:37:23.705418Z node 8 :KQP_SESSION INFO: kqp_session_actor.cpp:2528: SessionId: ydb://session/3?node_id=8&id=ZjkwODIwNTEtZjI1YmQ4MzItMWY1YjA1NDctOTIxMWU0MjQ=, ActorId: [8:7519896070420975127:2329], ActorState: ExecuteState, TraceId: 01jykrb95qfjfk0b4rrt3g5y2q, Cleanup start, isFinal: 0 CleanupCtx: 1 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 1 2025-06-25T14:37:23.705771Z node 8 :KQP_WORKLOAD_SERVICE DEBUG: pool_handlers_actors.cpp:233: [WorkloadService] [TPoolHandlerActorBase] ActorId: [8:7519896070420975070:2316], DatabaseId: /Root, PoolId: default, Received cleanup request, worker id: [8:7519896070420975127:2329], session id: ydb://session/3?node_id=8&id=ZjkwODIwNTEtZjI1YmQ4MzItMWY1YjA1NDctOTIxMWU0MjQ=, duration: 0.705298s, cpu consumed: 0.001836s 2025-06-25T14:37:23.705802Z node 8 :KQP_WORKLOAD_SERVICE DEBUG: pool_handlers_actors.cpp:437: [WorkloadService] [TPoolHandlerActorBase] ActorId: [8:7519896070420975070:2316], DatabaseId: /Root, PoolId: default, Reply cleanup success to [8:7519896070420975127:2329], session id: ydb://session/3?node_id=8&id=ZjkwODIwNTEtZjI1YmQ4MzItMWY1YjA1NDctOTIxMWU0MjQ=, local in flight: 0 2025-06-25T14:37:23.705854Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2589: SessionId: ydb://session/3?node_id=8&id=ZjkwODIwNTEtZjI1YmQ4MzItMWY1YjA1NDctOTIxMWU0MjQ=, ActorId: [8:7519896070420975127:2329], ActorState: CleanupState, TraceId: 01jykrb95qfjfk0b4rrt3g5y2q, EndCleanup, isFinal: 0 2025-06-25T14:37:23.705909Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2325: SessionId: ydb://session/3?node_id=8&id=ZjkwODIwNTEtZjI1YmQ4MzItMWY1YjA1NDctOTIxMWU0MjQ=, ActorId: [8:7519896070420975127:2329], ActorState: CleanupState, TraceId: 01jykrb95qfjfk0b4rrt3g5y2q, Sent query response back to proxy, proxyRequestId: 6, proxyId: [8:7519896027471301182:2198] 2025-06-25T14:37:23.706233Z node 8 :KQP_WORKLOAD_SERVICE TRACE: kqp_workload_service.cpp:335: [WorkloadService] [Service] Request finished in pool, DatabaseId: /Root, PoolId: default, Duration: 0.705298s, CpuConsumed: 0.001836s, AdjustCpuQuota: 0 2025-06-25T14:37:23.706699Z node 8 :KQP_WORKLOAD_SERVICE DEBUG: query_actor.cpp:240: [TQueryBase] [TCpuLoadFetcherActor] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=8&id=ZjkwODIwNTEtZjI1YmQ4MzItMWY1YjA1NDctOTIxMWU0MjQ=, TxId: 2025-06-25T14:37:23.706796Z node 8 :KQP_WORKLOAD_SERVICE DEBUG: query_actor.cpp:367: [TQueryBase] [TCpuLoadFetcherActor] Finish with SUCCESS, SessionId: ydb://session/3?node_id=8&id=ZjkwODIwNTEtZjI1YmQ4MzItMWY1YjA1NDctOTIxMWU0MjQ=, TxId: 2025-06-25T14:37:23.707160Z node 8 :KQP_SESSION INFO: kqp_session_actor.cpp:2370: SessionId: ydb://session/3?node_id=8&id=ZjkwODIwNTEtZjI1YmQ4MzItMWY1YjA1NDctOTIxMWU0MjQ=, ActorId: [8:7519896070420975127:2329], ActorState: ReadyState, Session closed due to explicit close event 2025-06-25T14:37:23.707193Z node 8 :KQP_SESSION INFO: kqp_session_actor.cpp:2528: SessionId: ydb://session/3?node_id=8&id=ZjkwODIwNTEtZjI1YmQ4MzItMWY1YjA1NDctOTIxMWU0MjQ=, ActorId: [8:7519896070420975127:2329], ActorState: ReadyState, Cleanup start, isFinal: 1 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-06-25T14:37:23.707221Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2589: SessionId: ydb://session/3?node_id=8&id=ZjkwODIwNTEtZjI1YmQ4MzItMWY1YjA1NDctOTIxMWU0MjQ=, ActorId: [8:7519896070420975127:2329], ActorState: ReadyState, EndCleanup, isFinal: 1 2025-06-25T14:37:23.707245Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2601: SessionId: ydb://session/3?node_id=8&id=ZjkwODIwNTEtZjI1YmQ4MzItMWY1YjA1NDctOTIxMWU0MjQ=, ActorId: [8:7519896070420975127:2329], ActorState: unknown state, Cleanup temp tables: 0 2025-06-25T14:37:23.707307Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2692: SessionId: ydb://session/3?node_id=8&id=ZjkwODIwNTEtZjI1YmQ4MzItMWY1YjA1NDctOTIxMWU0MjQ=, ActorId: [8:7519896070420975127:2329], ActorState: unknown state, Session actor destroyed 2025-06-25T14:37:23.721951Z node 8 :KQP_SESSION INFO: kqp_session_actor.cpp:2370: SessionId: ydb://session/3?node_id=8&id=NjNlN2YxMmItMzIzZGZjMTItMWQwN2UxZjEtNjg0NmYzZmU=, ActorId: [8:7519896066126007587:2302], ActorState: ReadyState, Session closed due to explicit close event 2025-06-25T14:37:23.721998Z node 8 :KQP_SESSION INFO: kqp_session_actor.cpp:2528: SessionId: ydb://session/3?node_id=8&id=NjNlN2YxMmItMzIzZGZjMTItMWQwN2UxZjEtNjg0NmYzZmU=, ActorId: [8:7519896066126007587:2302], ActorState: ReadyState, Cleanup start, isFinal: 1 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-06-25T14:37:23.722039Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2589: SessionId: ydb://session/3?node_id=8&id=NjNlN2YxMmItMzIzZGZjMTItMWQwN2UxZjEtNjg0NmYzZmU=, ActorId: [8:7519896066126007587:2302], ActorState: ReadyState, EndCleanup, isFinal: 1 2025-06-25T14:37:23.722070Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2601: SessionId: ydb://session/3?node_id=8&id=NjNlN2YxMmItMzIzZGZjMTItMWQwN2UxZjEtNjg0NmYzZmU=, ActorId: [8:7519896066126007587:2302], ActorState: unknown state, Cleanup temp tables: 0 2025-06-25T14:37:23.722139Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2692: SessionId: ydb://session/3?node_id=8&id=NjNlN2YxMmItMzIzZGZjMTItMWQwN2UxZjEtNjg0NmYzZmU=, ActorId: [8:7519896066126007587:2302], ActorState: unknown state, Session actor destroyed >> KqpPg::InsertValuesFromTableWithDefaultTextNotNull+useSink [GOOD] >> KqpPg::InsertValuesFromTableWithDefaultTextNotNull-useSink >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-24 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-DropUser-71 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-DropUser-72 >> TSchemeShardUserAttrsTest::Boot >> TSchemeShardUserAttrsTest::MkDir [GOOD] >> TConsoleTxProcessorTests::TestTxProcessorRandom [GOOD] >> TImmediateControlsConfiguratorTests::TestControlsInitialization ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_user_attributes/unittest >> TSchemeShardUserAttrsTest::MkDir [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:37:29.855101Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:37:29.855201Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:37:29.855236Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:37:29.855284Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:37:29.855334Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:37:29.855361Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:37:29.855437Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:37:29.855520Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:37:29.856196Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:37:29.856542Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:37:29.985114Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:37:29.985175Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:37:30.009960Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:37:30.010196Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:37:30.010329Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:37:30.064917Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:37:30.065238Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:37:30.065846Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:37:30.066038Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:37:30.080725Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:37:30.080933Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:37:30.082007Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:37:30.082067Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:37:30.082245Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:37:30.082293Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:37:30.082348Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:37:30.082431Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:37:30.097379Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:243:2058] recipient: [1:15:2062] 2025-06-25T14:37:30.255474Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:37:30.255824Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:37:30.256033Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:37:30.256093Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:37:30.257129Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:37:30.257216Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:37:30.259809Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:37:30.259992Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:37:30.260213Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:37:30.260263Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:37:30.260303Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:37:30.260356Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:37:30.262366Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:37:30.262427Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:37:30.262487Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:37:30.264534Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:37:30.264582Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:37:30.264625Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:37:30.264694Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:37:30.268111Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:37:30.270293Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:37:30.270484Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:37:30.271512Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:37:30.271672Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:37:30.271723Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:37:30.272002Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:37:30.272070Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:37:30.272239Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:37:30.272390Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:37:30.274555Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:37:30.274605Z node 1 :FLAT_TX_SCHEMESHARD ... : 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 8 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 8 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 6 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "DirA" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" ChildrenExist: true } Children { Name: "DirB" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 101 CreateStep: 5000003 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 4 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:37:30.429565Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/DirA" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:37:30.429747Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/DirA" took 181us result status StatusSuccess 2025-06-25T14:37:30.430087Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/DirA" PathDescription { Self { Name: "DirA" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 } ChildrenExist: true } Children { Name: "SubDirA" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 102 CreateStep: 5000004 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" ChildrenExist: true } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } UserAttributes { Key: "AttrA1" Value: "ValA1" } UserAttributes { Key: "AttrA2" Value: "ValA2" } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:37:30.430692Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/DirB" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:37:30.430894Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/DirB" took 180us result status StatusSuccess 2025-06-25T14:37:30.431266Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/DirB" PathDescription { Self { Name: "DirB" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 101 CreateStep: 5000003 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 2 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } UserAttributes { Key: "AttrB1" Value: "ValB1" } UserAttributes { Key: "AttrB2" Value: "ValB2" } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:37:30.440981Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/DirA/SubDirA" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:37:30.441282Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/DirA/SubDirA" took 318us result status StatusSuccess 2025-06-25T14:37:30.441707Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/DirA/SubDirA" PathDescription { Self { Name: "SubDirA" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 102 CreateStep: 5000004 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 4 } ChildrenExist: true } Children { Name: "DirB" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 103 CreateStep: 5000005 ParentPathId: 4 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } UserAttributes { Key: "AttrAA1" Value: "ValAA1" } UserAttributes { Key: "AttrAA2" Value: "ValAA2" } } PathId: 4 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:37:30.442406Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/DirA/SubDirA/DirB" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:37:30.442620Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/DirA/SubDirA/DirB" took 218us result status StatusSuccess 2025-06-25T14:37:30.442960Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/DirA/SubDirA/DirB" PathDescription { Self { Name: "DirB" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 103 CreateStep: 5000005 ParentPathId: 4 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 2 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } UserAttributes { Key: "AttrAB1" Value: "ValAB1" } UserAttributes { Key: "AttrAB2" Value: "ValAB2" } } PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TSchemeShardUserAttrsTest::UserConditionsAtAlter >> TSchemeShardUserAttrsTest::Boot [GOOD] >> KqpJoinOrder::ShuffleEliminationOneJoin [GOOD] >> TImmediateControlsConfiguratorTests::TestControlsInitialization [GOOD] >> TImmediateControlsConfiguratorTests::TestModifiedControls >> PrivateApi::PingTask [GOOD] >> PrivateApi::GetTask >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-59 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-60 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_user_attributes/unittest >> TSchemeShardUserAttrsTest::Boot [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:37:31.077578Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:37:31.077665Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:37:31.077703Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:37:31.077741Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:37:31.077790Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:37:31.077817Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:37:31.077894Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:37:31.077968Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:37:31.078728Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:37:31.079070Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:37:31.187246Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:37:31.187310Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:37:31.199650Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:37:31.199903Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:37:31.200063Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:37:31.216172Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:37:31.216489Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:37:31.217199Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:37:31.217406Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:37:31.220331Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:37:31.220527Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:37:31.221722Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:37:31.221787Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:37:31.221953Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:37:31.222005Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:37:31.222087Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:37:31.222175Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:37:31.229591Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:243:2058] recipient: [1:15:2062] 2025-06-25T14:37:31.428659Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:37:31.434700Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:37:31.435053Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:37:31.435114Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:37:31.435468Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:37:31.435575Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:37:31.443367Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:37:31.443610Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:37:31.443868Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:37:31.443924Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:37:31.443976Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:37:31.444016Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:37:31.453245Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:37:31.453337Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:37:31.453388Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:37:31.461528Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:37:31.461610Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:37:31.461671Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:37:31.461773Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:37:31.481751Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:37:31.494546Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:37:31.494862Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:37:31.495953Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:37:31.496149Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:37:31.496205Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:37:31.496579Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:37:31.496638Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:37:31.496824Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:37:31.496946Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:37:31.509777Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:37:31.509846Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:37:31.510116Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:37:31.510166Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:210:2210], at schemeshard: 72057594046678944, txId: 1, path id: 1 2025-06-25T14:37:31.510513Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:37:31.510568Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 1:0 ProgressState 2025-06-25T14:37:31.510679Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1:0 progress is 1/1 2025-06-25T14:37:31.510717Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-25T14:37:31.510772Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1:0 progress is 1/1 2025-06-25T14:37:31.510835Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-25T14:37:31.510878Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 1, ready parts: 1/1, is published: false 2025-06-25T14:37:31.510919Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-25T14:37:31.510961Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1:0 2025-06-25T14:37:31.510994Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 1:0 2025-06-25T14:37:31.511070Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-25T14:37:31.511110Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 1, publications: 1, subscribers: 0 2025-06-25T14:37:31.511155Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 1, [OwnerId: 72057594046678944, LocalPathId: 1], 3 2025-06-25T14:37:31.513219Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-06-25T14:37:31.513373Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-06-25T14:37:31.513441Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1 2025-06-25T14:37:31.513486Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 3 2025-06-25T14:37:31.513538Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:37:31.513674Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1, subscribers: 0 2025-06-25T14:37:31.551227Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1 2025-06-25T14:37:31.551857Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/viewer/ut/unittest >> Viewer::Plan2SvgBad [FAIL] Test command err: 2025-06-25T14:34:55.177508Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519895441683092554:2235];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:34:55.177560Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-25T14:34:55.729347Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:34:55.734414Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519895441683092327:2080] 1750862095114780 != 1750862095114783 2025-06-25T14:34:55.759912Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:34:55.760184Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:34:55.773915Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 28696, node 1 2025-06-25T14:34:56.041919Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:34:56.041951Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:34:56.042198Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:34:56.042377Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:34:56.154186Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:23239 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:34:56.560834Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:34:56.586775Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:34:56.600975Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:34:56.605731Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... 2025-06-25T14:34:56.611129Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715660, at schemeshard: 72057594046644480 2025-06-25T14:34:59.543630Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519895458862962205:2306], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:34:59.543739Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519895458862962200:2303], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:34:59.543806Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:34:59.547910Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715661:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:34:59.565514Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519895458862962214:2307], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715661 completed, doublechecking } 2025-06-25T14:34:59.644613Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519895458862962265:2361] txid# 281474976715662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:35:00.173965Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519895441683092554:2235];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:35:00.174030Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:35:02.193706Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519895470844980937:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:35:02.193764Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-25T14:35:02.370867Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:35:02.371891Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519895470844980916:2080] 1750862102193022 != 1750862102193025 2025-06-25T14:35:02.390531Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:35:02.390623Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:35:02.393255Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 19686, node 2 2025-06-25T14:35:02.553069Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:35:02.553098Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:35:02.553108Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:35:02.553255Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:12902 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:35:02.919623Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:35:02.946411Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) waiting... 2025-06-25T14:35:02.949530Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) 2025-06-25T14:35:03.207806Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:35:05.985489Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519895483729883483:2305], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, ... 0210Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:437: Ticket parser: got TEvAuthorizeTicket event: test_ydb_token /Root 1 2025-06-25T14:35:26.610260Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:496: Send TEvAuthorizeTicketResult success 2025-06-25T14:35:26.701852Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:437: Ticket parser: got TEvAuthorizeTicket event: test_ydb_token /Root 1 2025-06-25T14:35:26.701903Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:496: Send TEvAuthorizeTicketResult success 2025-06-25T14:35:26.780053Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:437: Ticket parser: got TEvAuthorizeTicket event: test_ydb_token /Root 1 2025-06-25T14:35:26.780102Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:496: Send TEvAuthorizeTicketResult success 2025-06-25T14:35:26.900110Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:437: Ticket parser: got TEvAuthorizeTicket event: test_ydb_token /Root 1 2025-06-25T14:35:26.900164Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:496: Send TEvAuthorizeTicketResult success 2025-06-25T14:35:27.013721Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:437: Ticket parser: got TEvAuthorizeTicket event: test_ydb_token /Root 1 2025-06-25T14:35:27.013781Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:496: Send TEvAuthorizeTicketResult success 2025-06-25T14:35:27.107474Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:437: Ticket parser: got TEvAuthorizeTicket event: test_ydb_token /Root 1 2025-06-25T14:35:27.107524Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:496: Send TEvAuthorizeTicketResult success 2025-06-25T14:35:27.220096Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:437: Ticket parser: got TEvAuthorizeTicket event: test_ydb_token /Root 1 2025-06-25T14:35:27.220146Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:496: Send TEvAuthorizeTicketResult success 2025-06-25T14:35:27.403525Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:437: Ticket parser: got TEvAuthorizeTicket event: test_ydb_token /Root 1 2025-06-25T14:35:27.403578Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:496: Send TEvAuthorizeTicketResult success 2025-06-25T14:35:27.585376Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:437: Ticket parser: got TEvAuthorizeTicket event: test_ydb_token /Root 1 2025-06-25T14:35:27.585422Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:496: Send TEvAuthorizeTicketResult success 2025-06-25T14:35:27.792289Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:437: Ticket parser: got TEvAuthorizeTicket event: test_ydb_token /Root 1 2025-06-25T14:35:27.792354Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:496: Send TEvAuthorizeTicketResult success 2025-06-25T14:35:27.979802Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:437: Ticket parser: got TEvAuthorizeTicket event: test_ydb_token /Root 1 2025-06-25T14:35:27.979856Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:496: Send TEvAuthorizeTicketResult success 2025-06-25T14:35:28.076067Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:437: Ticket parser: got TEvAuthorizeTicket event: test_ydb_token /Root 1 2025-06-25T14:35:28.076116Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:496: Send TEvAuthorizeTicketResult success 2025-06-25T14:35:28.167668Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:437: Ticket parser: got TEvAuthorizeTicket event: test_ydb_token /Root 1 2025-06-25T14:35:28.167719Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:496: Send TEvAuthorizeTicketResult success 2025-06-25T14:35:28.288112Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:437: Ticket parser: got TEvAuthorizeTicket event: test_ydb_token /Root 1 2025-06-25T14:35:28.288171Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:496: Send TEvAuthorizeTicketResult success 2025-06-25T14:35:28.297603Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715680:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:28.299718Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715681:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:28.322445Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715679:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:29.900367Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:437: Ticket parser: got TEvAuthorizeTicket event: test_ydb_token /Root 1 2025-06-25T14:35:29.900424Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:496: Send TEvAuthorizeTicketResult success assertion failed at ydb/core/viewer/viewer_ut.cpp:1914, virtual void NTestSuiteViewer::TTestCaseQueryExecuteScript::Execute_(NUnitTest::TTestContext &): (json["metadata"].GetMap().at("exec_stats").GetMap().contains("process_cpu_time_us")) {"metadata":{"result_sets_meta":[{"finished":true,"columns":[{"name":"Key","type":{"optional_type":{"item":{"type_id":"UINT64"}}}},{"name":"Value","type":{"optional_type":{"item":{"type_id":"STRING"}}}}],"number_rows":"15"}],"execution_id":"2b5b97e4-504c161a-b2ebdc8c-7fc4adc1","exec_stats":{"query_plan":"{}"},"script_content":{"text":"SELECT * FROM `/Root/Test`;"},"exec_mode":"EXEC_MODE_EXECUTE","exec_status":"EXEC_STATUS_RUNNING","@type":"type.googleapis.com/Ydb.Query.ExecuteScriptMetadata"},"status":"SUCCESS","id":"ydb://scriptexec/9?id=2b5b97e4-504c161a-b2ebdc8c-7fc4adc1"} TBackTrace::Capture()+28 (0x1995C6EC) NUnitTest::NPrivate::RaiseError(char const*, TBasicString> const&, bool)+592 (0x19E181A0) NTestSuiteViewer::TTestCaseQueryExecuteScript::Execute_(NUnitTest::TTestContext&)+11601 (0x194FBED1) std::__y1::__function::__func, void ()>::operator()()+280 (0x19511C58) TColoredProcessor::Run(std::__y1::function, TBasicString> const&, char const*, bool)+534 (0x19E4F386) NUnitTest::TTestBase::Run(std::__y1::function, TBasicString> const&, char const*, bool)+505 (0x19E1ED29) NTestSuiteViewer::TCurrentTest::Execute()+1204 (0x19510B04) NUnitTest::TTestFactory::Execute()+2438 (0x19E205F6) NUnitTest::RunMain(int, char**)+5213 (0x19E498FD) ??+0 (0x7F0AFB745D90) __libc_start_main+128 (0x7F0AFB745E40) _start+41 (0x16D40029) 2025-06-25T14:35:39.011516Z node 6 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[6:7519895624613630805:2249];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:35:39.011894Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-25T14:35:39.207772Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:35:39.207888Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:35:39.209444Z node 6 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:35:39.227910Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 22170, node 6 2025-06-25T14:35:39.457183Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:35:39.457214Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:35:39.457228Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:35:39.457400Z node 6 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:35:39.832537Z node 6 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:4794 2025-06-25T14:35:43.824450Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[6:7519895624613630805:2249];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:35:43.824564Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; (TSystemError) (Error 11: Resource temporarily unavailable) util/network/socket.cpp:910: can not read from socket input stream 2025-06-25T14:35:52.858508Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519895683873377395:2214];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:35:52.858617Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-25T14:35:53.127426Z node 7 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [7:7519895683873377214:2080] 1750862152649923 != 1750862152649926 2025-06-25T14:35:53.127710Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:35:53.168272Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:35:53.168640Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:35:53.173949Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 9071, node 7 2025-06-25T14:35:53.385332Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:35:53.385359Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:35:53.385371Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:35:53.385587Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:35:53.754284Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:31308 2025-06-25T14:35:57.714813Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[7:7519895683873377395:2214];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:35:57.714921Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; (TSystemError) (Error 11: Resource temporarily unavailable) util/network/socket.cpp:910: can not read from socket input stream |81.3%| [TA] $(B)/ydb/core/tablet_flat/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TSchemeShardUserAttrsTest::UserConditionsAtAlter [GOOD] >> KqpPg::InsertValuesFromTableWithDefaultAndCast+useSink [GOOD] >> KqpPg::InsertValuesFromTableWithDefaultAndCast-useSink >> KqpPg::CopyTableSerialColumns+useSink [GOOD] >> KqpPg::CopyTableSerialColumns-useSink |81.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_keys/ydb-core-tx-datashard-ut_keys |81.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_keys/ydb-core-tx-datashard-ut_keys >> Yq_1::CreateQuery_With_Idempotency [GOOD] >> Yq_1::CreateQuery_Without_Connection ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_user_attributes/unittest >> TSchemeShardUserAttrsTest::UserConditionsAtAlter [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:37:32.517506Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:37:32.517588Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:37:32.517634Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:37:32.517679Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:37:32.517725Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:37:32.517770Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:37:32.517821Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:37:32.517886Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:37:32.518608Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:37:32.518940Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:37:32.616173Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:37:32.616266Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:37:32.635822Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:37:32.636339Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:37:32.636519Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:37:32.643016Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:37:32.643373Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:37:32.644105Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:37:32.644409Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:37:32.648339Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:37:32.648535Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:37:32.649666Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:37:32.649725Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:37:32.649855Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:37:32.649905Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:37:32.649947Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:37:32.650031Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:37:32.656664Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:37:32.823124Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:37:32.823362Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:37:32.823548Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:37:32.823593Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:37:32.823822Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:37:32.823906Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:37:32.832722Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:37:32.832940Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:37:32.833140Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:37:32.833204Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:37:32.833268Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:37:32.833351Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:37:32.835650Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:37:32.835721Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:37:32.835781Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:37:32.841135Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:37:32.841205Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:37:32.841273Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:37:32.841339Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:37:32.866009Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:37:32.868414Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:37:32.868624Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:37:32.869635Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:37:32.869771Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:37:32.869818Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:37:32.870137Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:37:32.870190Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:37:32.870350Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:37:32.870449Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:37:32.872823Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:37:32.872867Z node 1 :FLAT_TX_SCHEMESHARD ... { PathId: 2 PathVersion: 4 } } TxId: 103 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:37:33.025945Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_user_attrs.cpp:26: TAlterUserAttrs Propose, path: /MyRoot/DirA, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-25T14:37:33.026073Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 0 2025-06-25T14:37:33.026125Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 103:0 type: TxAlterUserAttributes target path: [OwnerId: 72057594046678944, LocalPathId: 2] source path: 2025-06-25T14:37:33.026257Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 103:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:37:33.026329Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 103:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) 2025-06-25T14:37:33.028587Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 103, response: Status: StatusAccepted TxId: 103 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:37:33.028804Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 103, database: /MyRoot, subject: , status: StatusAccepted, operation: ALTER USER ATTRIBUTES, path: /MyRoot/DirA 2025-06-25T14:37:33.029025Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-25T14:37:33.029082Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_user_attrs.cpp:97: TAlterUserAttrs ProgressState, opId: 103:0, at schemeshard: 72057594046678944 2025-06-25T14:37:33.029158Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 103 ready parts: 1/1 2025-06-25T14:37:33.029278Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 103 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:37:33.031346Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 103:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:103 msg type: 269090816 2025-06-25T14:37:33.031497Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 103, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 103 at step: 5000004 FAKE_COORDINATOR: advance: minStep5000004 State->FrontStep: 5000003 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 103 at step: 5000004 2025-06-25T14:37:33.031838Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000004, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:37:33.031956Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 103 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000004 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:37:33.032041Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_user_attrs.cpp:114: TAlterUserAttrs HandleReply TEvOperationPlan, opId: 103:0, stepId:5000004, at schemeshard: 72057594046678944 2025-06-25T14:37:33.032246Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#103:0 progress is 1/1 2025-06-25T14:37:33.032283Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-25T14:37:33.032335Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#103:0 progress is 1/1 2025-06-25T14:37:33.032380Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-25T14:37:33.032454Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-25T14:37:33.032522Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 103, ready parts: 1/1, is published: false 2025-06-25T14:37:33.032579Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-25T14:37:33.032615Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-25T14:37:33.032652Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 103:0 2025-06-25T14:37:33.032685Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 103:0 2025-06-25T14:37:33.032740Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-25T14:37:33.032779Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 103, publications: 1, subscribers: 0 2025-06-25T14:37:33.032811Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 103, [OwnerId: 72057594046678944, LocalPathId: 2], 5 2025-06-25T14:37:33.051615Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:37:33.051712Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 103, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-25T14:37:33.051911Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:37:33.051952Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 103, path id: 2 FAKE_COORDINATOR: Erasing txId 103 2025-06-25T14:37:33.052553Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 5 PathOwnerId: 72057594046678944, cookie: 103 2025-06-25T14:37:33.052654Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 5 PathOwnerId: 72057594046678944, cookie: 103 2025-06-25T14:37:33.052693Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 103 2025-06-25T14:37:33.052752Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 5 2025-06-25T14:37:33.052809Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-25T14:37:33.052912Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 103, subscribers: 0 2025-06-25T14:37:33.063093Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 TestModificationResult got TxId: 103, wait until txId: 103 TestWaitNotification wait txId: 103 2025-06-25T14:37:33.063465Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 103: send EvNotifyTxCompletion 2025-06-25T14:37:33.063515Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 103 2025-06-25T14:37:33.063906Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 103, at schemeshard: 72057594046678944 2025-06-25T14:37:33.063997Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-06-25T14:37:33.064043Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [1:354:2343] TestWaitNotification: OK eventTxId 103 2025-06-25T14:37:33.064627Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/DirA" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:37:33.064864Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/DirA" took 238us result status StatusSuccess 2025-06-25T14:37:33.065226Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/DirA" PathDescription { Self { Name: "DirA" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 3 ChildrenVersion: 2 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } UserAttributes { Key: "AttrA2" Value: "ValA2" } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TConsoleTests::TestRemoveAttributesExtSubdomain [GOOD] >> KqpPg::CreateUniqPgColumn-useSink [GOOD] >> KqpPg::CreateUniqComplexPgColumn+useSink >> TImmediateControlsConfiguratorTests::TestModifiedControls [GOOD] >> TImmediateControlsConfiguratorTests::TestResetToDefault >> Viewer::StorageGroupOutputWithSpaceCheckDependsOnUsage [GOOD] >> Yq_1::DescribeConnection [GOOD] >> TImmediateControlsConfiguratorTests::TestResetToDefault [GOOD] >> Viewer::SimpleFeatureFlags >> KqpPg::InsertValuesFromTableWithDefaultTextNotNull-useSink [GOOD] >> Yq_1::DeleteQuery >> KqpPg::InsertValuesFromTableWithDefaultTextNotNullButNull+useSink >> KqpPg::DropTablePg [GOOD] >> TImmediateControlsConfiguratorTests::TestMaxLimit >> TKeyValueTest::TestInlineWriteReadRangeLimitThenLimitWorksNewApi [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-DropUser-72 [GOOD] >> KqpPg::DropTablePgMultiple >> TImmediateControlsConfiguratorTests::TestMaxLimit [GOOD] >> TImmediateControlsConfiguratorTests::TestDynamicMap >> Yq_1::ListConnections [GOOD] >> Yq_1::ListConnectionsOnEmptyConnectionsTable >> YdbIndexTable::MultiShardTableOneUniqIndexDataColumn [GOOD] >> ResourcePoolsSysView::TestResourcePoolsSysViewFilters [GOOD] >> Yq_1::Create_And_Modify_The_Same_Connection [GOOD] |81.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/tools/kqprun/kqprun |81.3%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_keys/ydb-core-tx-datashard-ut_keys |81.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/tools/kqprun/kqprun ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::ShuffleEliminationOneJoin [GOOD] Test command err: Trying to start YDB, gRPC: 30675, MsgBus: 14328 2025-06-25T14:35:52.637520Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519895685535135421:2139];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:35:52.638174Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000eb2/r3tmp/tmpga8JsY/pdisk_1.dat 2025-06-25T14:35:53.072962Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519895685535135311:2080] 1750862152627365 != 1750862152627368 2025-06-25T14:35:53.087385Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:35:53.088911Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:35:53.089011Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:35:53.095304Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 30675, node 1 2025-06-25T14:35:53.221235Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:35:53.221264Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:35:53.221277Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:35:53.221391Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14328 2025-06-25T14:35:53.658176Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:14328 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:35:54.016973Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:35:54.039512Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:35:56.476072Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519895702715005136:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:35:56.476190Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:35:56.476497Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519895702715005148:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:35:56.479741Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:35:56.494130Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-25T14:35:56.494306Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519895702715005150:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:35:56.573366Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519895702715005201:2337] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:35:56.874861Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T14:35:57.089352Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519895702715005446:2310];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:35:57.089548Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519895702715005446:2310];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:35:57.089772Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519895702715005446:2310];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:35:57.089866Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519895702715005446:2310];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:35:57.089952Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519895702715005446:2310];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:35:57.090038Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519895702715005446:2310];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:35:57.090135Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519895702715005446:2310];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:35:57.090232Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519895702715005446:2310];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:35:57.090314Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519895702715005446:2310];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:35:57.090394Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519895702715005446:2310];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:35:57.090481Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519895702715005446:2310];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:35:57.099473Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519895707009972751:2319];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:35:57.099536Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519895707009972751:2319];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:35:57.099763Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519895707009972751:2319];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:35:57.108774Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519895707009972751:2319];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:35:57.108958Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519895707009972751:2319];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:35:57.109116Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519895707009972751:2319];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:35:57.109219Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519895707009972751:2319];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:35:57.109337Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519895707009972751:2319];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:35:57.109488Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=7207518622 ... line=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:37:17.239195Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039395;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:37:17.239936Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039416;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:37:17.242239Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039422;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:37:17.242762Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039390;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:37:17.247855Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039390;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:37:17.252211Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039416;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:37:17.252609Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039394;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:37:17.256948Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039394;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:37:17.257543Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039408;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:37:17.257543Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039402;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:37:17.263377Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039408;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:37:17.263437Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039402;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:37:17.264097Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039404;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:37:17.264537Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039421;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:37:17.269984Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039421;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:37:17.270993Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039413;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:37:17.273253Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039404;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:37:17.274350Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039406;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:37:17.279195Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039406;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:37:17.283081Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039413;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:37:17.283906Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039410;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:37:17.284691Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039407;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:37:17.293116Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039410;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:37:17.293432Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039407;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:37:17.294029Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039424;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:37:17.294034Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039384;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:37:17.299457Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039424;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:37:17.299458Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039384;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:37:17.300248Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039418;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:37:17.300248Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039420;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:37:17.305915Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039420;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:37:17.305914Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039418;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:37:17.306645Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039412;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:37:17.306854Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039393;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:37:17.311869Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039412;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:37:17.313474Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039411;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:37:17.315730Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039393;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:37:17.317334Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039423;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:37:17.318808Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039411;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:37:17.322539Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039423;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:37:17.424109Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039270;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:37:17.429574Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039270;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:37:17.582516Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jykra2aq0pm5c11psg8mb6ze", SessionId: ydb://session/3?node_id=1&id=MmM3MDcwNTQtY2IyOWIzYzMtNDY0ZTU4YjktNzdhM2EwMDM=, Slow query, duration: 34.358673s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:37:17.897639Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:37:17.898089Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:37:17.898208Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; |81.4%| [LD] {RESULT} $(B)/ydb/tests/tools/kqprun/kqprun ------- [TM] {asan, default-linux-x86_64, release} ydb/core/cms/console/ut/unittest >> TConsoleTests::TestRemoveAttributesExtSubdomain [GOOD] Test command err: 2025-06-25T14:35:20.340649Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:35:20.340732Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:35:20.388481Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:35:24.104912Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:35:24.104986Z node 10 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:35:24.174576Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:35:25.298031Z node 12 :BS_PDISK WARN: {BPD01@blobstorage_pdisk_blockdevice_async.cpp:922} Warning# got EIoResult::FileOpenError from IoContext->Setup PDiskId# 1000 2025-06-25T14:35:25.298506Z node 12 :BS_PDISK CRIT: {BPD39@blobstorage_pdisk_impl.cpp:2897} BlockDevice initialization error Details# Can't open file "/home/runner/.ya/build/build_root/yft8/000f47/r3tmp/tmpg5nVCM/pdisk_1.dat": unknown reason, errno# 0. PDiskId# 1000 2025-06-25T14:35:25.299072Z node 12 :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:300} PDiskId# 1000 bootstrapped to the StateError, reason# Can't open file "/home/runner/.ya/build/build_root/yft8/000f47/r3tmp/tmpg5nVCM/pdisk_1.dat": unknown reason, errno# 0. Can not be initialized Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/yft8/000f47/r3tmp/tmpg5nVCM/pdisk_1.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 15593240644466973760 PDiskId# 1000 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 2 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 1000 2025-06-25T14:35:27.655939Z node 19 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:35:27.656011Z node 19 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:35:27.717564Z node 19 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:35:29.150638Z node 19 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 19 Type# 268639257 2025-06-25T14:35:29.210203Z node 19 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:1, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-25T14:35:29.412921Z node 23 :BS_PDISK WARN: {BPD01@blobstorage_pdisk_blockdevice_async.cpp:922} Warning# got EIoResult::FileOpenError from IoContext->Setup PDiskId# 1000 2025-06-25T14:35:29.413417Z node 23 :BS_PDISK CRIT: {BPD39@blobstorage_pdisk_impl.cpp:2897} BlockDevice initialization error Details# Can't open file "/home/runner/.ya/build/build_root/yft8/000f47/r3tmp/tmpxL73Ap/pdisk_1.dat": unknown reason, errno# 0. PDiskId# 1000 2025-06-25T14:35:29.413632Z node 23 :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:300} PDiskId# 1000 bootstrapped to the StateError, reason# Can't open file "/home/runner/.ya/build/build_root/yft8/000f47/r3tmp/tmpxL73Ap/pdisk_1.dat": unknown reason, errno# 0. Can not be initialized Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/yft8/000f47/r3tmp/tmpxL73Ap/pdisk_1.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 15226366310242609380 PDiskId# 1000 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 2 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 1000 2025-06-25T14:35:29.447392Z node 24 :BS_PDISK WARN: {BPD01@blobstorage_pdisk_blockdevice_async.cpp:922} Warning# got EIoResult::FileOpenError from IoContext->Setup PDiskId# 1000 2025-06-25T14:35:29.447907Z node 24 :BS_PDISK CRIT: {BPD39@blobstorage_pdisk_impl.cpp:2897} BlockDevice initialization error Details# Can't open file "/home/runner/.ya/build/build_root/yft8/000f47/r3tmp/tmpxL73Ap/pdisk_1.dat": unknown reason, errno# 0. PDiskId# 1000 2025-06-25T14:35:29.448075Z node 24 :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:300} PDiskId# 1000 bootstrapped to the StateError, reason# Can't open file "/home/runner/.ya/build/build_root/yft8/000f47/r3tmp/tmpxL73Ap/pdisk_1.dat": unknown reason, errno# 0. Can not be initialized Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/yft8/000f47/r3tmp/tmpxL73Ap/pdisk_1.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 13372573840544061406 PDiskId# 1000 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 2 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 1000 2025-06-25T14:35:29.567444Z node 21 :BS_PDISK WARN: {BPD01@blobstorage_pdisk_blockdevice_async.cpp:922} Warning# got EIoResult::FileOpenError from IoContext->Setup PDiskId# 1000 2025-06-25T14:35:29.567937Z node 21 :BS_PDISK CRIT: {BPD39@blobstorage_pdisk_impl.cpp:2897} BlockDevice initialization error Details# Can't open file "/home/runner/.ya/build/build_root/yft8/000f47/r3tmp/tmpxL73Ap/pdisk_1.dat": unknown reason, errno# 0. PDiskId# 1000 2025-06-25T14:35:29.568390Z node 21 :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:300} PDiskId# 1000 bootstrapped to the StateError, reason# Can't open file "/home/runner/.ya/build/build_root/yft8/000f47/r3tmp/tmpxL73Ap/pdisk_1.dat": unknown reason, errno# 0. Can not be initialized Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/yft8/000f47/r3tmp/tmpxL73Ap/pdisk_1.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 11311720304708015978 PDiskId# 1000 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 2 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 1000 2025-06-25T14:35:29.679028Z node 20 :BS_PDISK WARN: {BPD01@blobstorage_pdisk_blockdevice_async.cpp:922} Warning# got EIoResult::FileOpenError from IoContext->Setup PDiskId# 1000 2025-06-25T14:35:29.679517Z node 20 :BS_PDISK CRIT: {BPD39@blobstorage_pdisk_imp ... 2057594046578944, txId: 281474976715661, path id: [OwnerId: 72057594046578944, LocalPathId: 3] 2025-06-25T14:37:31.678014Z node 163 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046578944 2025-06-25T14:37:31.678054Z node 163 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [163:714:2237], at schemeshard: 72057594046578944, txId: 281474976715661, path id: 3 2025-06-25T14:37:31.678226Z node 163 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5906: Handle TEvUpdateTenantSchemeShard, at schemeshard: 72075186233409546, msg: TabletId: 72057594046578944 Generation: 2 UserAttributes { Key: "name1" Value: "value1" } UserAttributesVersion: 3 2025-06-25T14:37:31.678315Z node 163 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__sync_update_tenants.cpp:79: TTxUpdateTenant DoExecute, msg: TabletId: 72057594046578944 Generation: 2 UserAttributes { Key: "name1" Value: "value1" } UserAttributesVersion: 3, at schemeshard: 72075186233409546 2025-06-25T14:37:31.678629Z node 163 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:590: Cannot publish paths for unknown operation id#0 2025-06-25T14:37:31.679206Z node 163 :TX_COORDINATOR DEBUG: coordinator_impl.cpp:397: tablet# 72057594046316545 HANDLE EvMediatorQueueConfirmations MediatorId# 72057594046382081 2025-06-25T14:37:31.679299Z node 163 :TX_COORDINATOR DEBUG: coordinator__mediators_confirmations.cpp:84: at tablet# 72057594046316545 [2:16] persistent tx 281474976715661 for mediator 72057594046382081 tablet 72057594046578944 removed=1 2025-06-25T14:37:31.679334Z node 163 :TX_COORDINATOR DEBUG: coordinator__mediators_confirmations.cpp:91: at tablet# 72057594046316545 [2:16] persistent tx 281474976715661 for mediator 72057594046382081 acknowledged 2025-06-25T14:37:31.679366Z node 163 :TX_COORDINATOR DEBUG: coordinator__mediators_confirmations.cpp:99: at tablet# 72057594046316545 [2:16] persistent tx 281474976715661 acknowledged 2025-06-25T14:37:31.681089Z node 163 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046578944, msg: Owner: 72057594046578944 Generation: 2 LocalPathId: 3 Version: 7 PathOwnerId: 72057594046578944, cookie: 281474976715661 2025-06-25T14:37:31.681191Z node 163 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046578944, msg: Owner: 72057594046578944 Generation: 2 LocalPathId: 3 Version: 7 PathOwnerId: 72057594046578944, cookie: 281474976715661 2025-06-25T14:37:31.681244Z node 163 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046578944, txId: 281474976715661 2025-06-25T14:37:31.681345Z node 163 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046578944, txId: 281474976715661, pathId: [OwnerId: 72057594046578944, LocalPathId: 3], version: 7 2025-06-25T14:37:31.681467Z node 163 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046578944, LocalPathId: 3] was 11 2025-06-25T14:37:31.681618Z node 163 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046578944, txId: 281474976715661, subscribers: 1 2025-06-25T14:37:31.681714Z node 163 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:212: TTxAckPublishToSchemeBoard Notify send TEvNotifyTxCompletionResult, at schemeshard: 72057594046578944, to actorId: [163:1988:2389] 2025-06-25T14:37:31.689132Z node 163 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5893: Handle TEvSyncTenantSchemeShard, at schemeshard: 72057594046578944, msg: DomainSchemeShard: 72057594046578944 DomainPathId: 3 TabletID: 72075186233409546 Generation: 2 EffectiveACLVersion: 0 SubdomainVersion: 3 UserAttributesVersion: 3 TenantHive: 18446744073709551615 TenantSysViewProcessor: 72075186233409553 TenantRootACL: "" TenantStatisticsAggregator: 72075186233409554 TenantGraphShard: 18446744073709551615 2025-06-25T14:37:31.689229Z node 163 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__sync_update_tenants.cpp:26: TTxSyncTenant DoExecute, pathId: [OwnerId: 72057594046578944, LocalPathId: 3], at schemeshard: 72057594046578944 2025-06-25T14:37:31.689316Z node 163 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:569: DoUpdateTenant no hasChanges, pathId: [OwnerId: 72057594046578944, LocalPathId: 3], tenantLink: TSubDomainsLinks::TLink { DomainKey: [OwnerId: 72057594046578944, LocalPathId: 3], Generation: 2, ActorId:[163:1434:2611], EffectiveACLVersion: 0, SubdomainVersion: 3, UserAttributesVersion: 3, TenantHive: 18446744073709551615, TenantSysViewProcessor: 72075186233409553, TenantStatisticsAggregator: 72075186233409554, TenantGraphShard: 18446744073709551615, TenantRootACL: }, subDomain->GetVersion(): 3, actualEffectiveACLVersion: 0, actualUserAttrsVersion: 3, tenantHive: 18446744073709551615, tenantSysViewProcessor: 72075186233409553, at schemeshard: 72057594046578944 2025-06-25T14:37:31.689446Z node 163 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72075186233409546 2025-06-25T14:37:31.689482Z node 163 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72075186233409546, txId: 0, path id: [OwnerId: 72075186233409546, LocalPathId: 1] 2025-06-25T14:37:31.689645Z node 163 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72075186233409546 2025-06-25T14:37:31.689678Z node 163 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [163:1733:2828], at schemeshard: 72075186233409546, txId: 0, path id: 1 2025-06-25T14:37:31.691664Z node 163 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72075186233409546, msg: Owner: 72075186233409546 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72075186233409546, cookie: 0 2025-06-25T14:37:31.698835Z node 163 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046578944, cookie: 281474976715661 2025-06-25T14:37:31.698949Z node 163 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__sync_update_tenants.cpp:36: TTxSyncTenant DoComplete, pathId: [OwnerId: 72057594046578944, LocalPathId: 3], at schemeshard: 72057594046578944 Reply: Status: StatusSuccess Path: "/dc-1/users/tenant-1" PathDescription { Self { Name: "tenant-1" PathId: 3 SchemeshardId: 72057594046578944 PathType: EPathTypeExtSubDomain CreateFinished: true CreateTxId: 281474976715657 CreateStep: 1000 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 3 ChildrenVersion: 1 SubDomainVersion: 3 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046578944 PathId_Depricated: 1 ProcessingParams { Version: 3 PlanResolution: 10 Coordinators: 72075186233409547 Coordinators: 72075186233409548 Coordinators: 72075186233409549 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409550 Mediators: 72075186233409551 Mediators: 72075186233409552 SchemeShard: 72075186233409546 SysViewProcessor: 72075186233409553 StatisticsAggregator: 72075186233409554 } DomainKey { SchemeShard: 72057594046578944 PathId: 3 } StoragePools { Name: "/dc-1/users/tenant-1:hdd" Kind: "hdd" } StoragePools { Name: "/dc-1/users/tenant-1:hdd-1" Kind: "hdd-1" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 9 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046578944 PathId: 3 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 DatabaseQuotas { } SecurityState { } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } UserAttributes { Key: "name1" Value: "value1" } } PathId: 3 PathOwnerId: 72057594046578944 Reply: Status: StatusSuccess Path: "/dc-1/users/tenant-1" PathDescription { Self { Name: "tenant-1" PathId: 3 SchemeshardId: 72057594046578944 PathType: EPathTypeExtSubDomain CreateFinished: true CreateTxId: 281474976715657 CreateStep: 1000 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 3 ChildrenVersion: 1 SubDomainVersion: 3 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046578944 PathId_Depricated: 1 ProcessingParams { Version: 3 PlanResolution: 10 Coordinators: 72075186233409547 Coordinators: 72075186233409548 Coordinators: 72075186233409549 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409550 Mediators: 72075186233409551 Mediators: 72075186233409552 SchemeShard: 72075186233409546 SysViewProcessor: 72075186233409553 StatisticsAggregator: 72075186233409554 } DomainKey { SchemeShard: 72057594046578944 PathId: 3 } StoragePools { Name: "/dc-1/users/tenant-1:hdd" Kind: "hdd" } StoragePools { Name: "/dc-1/users/tenant-1:hdd-1" Kind: "hdd-1" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 9 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046578944 PathId: 3 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 DatabaseQuotas { } SecurityState { } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } UserAttributes { Key: "name1" Value: "value1" } } PathId: 3 PathOwnerId: 72057594046578944 >> YdbIndexTable::MultiShardTableTwoIndexes [GOOD] >> TImmediateControlsConfiguratorTests::TestDynamicMap [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/keyvalue/ut/unittest >> TKeyValueTest::TestInlineWriteReadRangeLimitThenLimitWorksNewApi [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:57:2057] recipient: [1:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:57:2057] recipient: [1:52:2097] Leader for TabletID 72057594037927937 is [1:59:2099] sender: [1:60:2057] recipient: [1:52:2097] Leader for TabletID 72057594037927937 is [1:59:2099] sender: [1:77:2057] recipient: [1:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:57:2057] recipient: [2:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:57:2057] recipient: [2:53:2097] Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:60:2057] recipient: [2:53:2097] Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:77:2057] recipient: [2:14:2061] !Reboot 72057594037927937 (actor [2:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:79:2057] recipient: [2:38:2085] Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:81:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:83:2057] recipient: [2:82:2112] Leader for TabletID 72057594037927937 is [2:84:2113] sender: [2:85:2057] recipient: [2:82:2112] !Reboot 72057594037927937 (actor [2:59:2099]) rebooted! !Reboot 72057594037927937 (actor [2:59:2099]) tablet resolver refreshed! new actor is[2:84:2113] Leader for TabletID 72057594037927937 is [2:84:2113] sender: [2:170:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:57:2057] recipient: [3:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:57:2057] recipient: [3:52:2097] Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:60:2057] recipient: [3:52:2097] Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:77:2057] recipient: [3:14:2061] !Reboot 72057594037927937 (actor [3:59:2099]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:79:2057] recipient: [3:38:2085] Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:82:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:83:2057] recipient: [3:81:2112] Leader for TabletID 72057594037927937 is [3:84:2113] sender: [3:85:2057] recipient: [3:81:2112] !Reboot 72057594037927937 (actor [3:59:2099]) rebooted! !Reboot 72057594037927937 (actor [3:59:2099]) tablet resolver refreshed! new actor is[3:84:2113] Leader for TabletID 72057594037927937 is [3:84:2113] sender: [3:170:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:57:2057] recipient: [4:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:57:2057] recipient: [4:53:2097] Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:60:2057] recipient: [4:53:2097] Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:77:2057] recipient: [4:14:2061] !Reboot 72057594037927937 (actor [4:59:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:80:2057] recipient: [4:38:2085] Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:83:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:84:2057] recipient: [4:82:2112] Leader for TabletID 72057594037927937 is [4:85:2113] sender: [4:86:2057] recipient: [4:82:2112] !Reboot 72057594037927937 (actor [4:59:2099]) rebooted! !Reboot 72057594037927937 (actor [4:59:2099]) tablet resolver refreshed! new actor is[4:85:2113] Leader for TabletID 72057594037927937 is [4:85:2113] sender: [4:171:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:57:2057] recipient: [5:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:57:2057] recipient: [5:53:2097] Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:60:2057] recipient: [5:53:2097] Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:77:2057] recipient: [5:14:2061] !Reboot 72057594037927937 (actor [5:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:83:2057] recipient: [5:38:2085] Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:86:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:87:2057] recipient: [5:85:2115] Leader for TabletID 72057594037927937 is [5:88:2116] sender: [5:89:2057] recipient: [5:85:2115] !Reboot 72057594037927937 (actor [5:59:2099]) rebooted! !Reboot 72057594037927937 (actor [5:59:2099]) tablet resolver refreshed! new actor is[5:88:2116] Leader for TabletID 72057594037927937 is [5:88:2116] sender: [5:174:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:57:2057] recipient: [6:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:57:2057] recipient: [6:52:2097] Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:60:2057] recipient: [6:52:2097] Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:77:2057] recipient: [6:14:2061] !Reboot 72057594037927937 (actor [6:59:2099]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:83:2057] recipient: [6:38:2085] Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:86:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:87:2057] recipient: [6:85:2115] Leader for TabletID 72057594037927937 is [6:88:2116] sender: [6:89:2057] recipient: [6:85:2115] !Reboot 72057594037927937 (actor [6:59:2099]) rebooted! !Reboot 72057594037927937 (actor [6:59:2099]) tablet resolver refreshed! new actor is[6:88:2116] Leader for TabletID 72057594037927937 is [6:88:2116] sender: [6:174:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:57:2057] recipient: [7:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:57:2057] recipient: [7:53:2097] Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:60:2057] recipient: [7:53:2097] Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:77:2057] recipient: [7:14:2061] !Reboot 72057594037927937 (actor [7:59:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:84:2057] recipient: [7:38:2085] Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:87:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:88:2057] recipient: [7:86:2115] Leader for TabletID 72057594037927937 is [7:89:2116] sender: [7:90:2057] recipient: [7:86:2115] !Reboot 72057594037927937 (actor [7:59:2099]) rebooted! !Reboot 72057594037927937 (actor [7:59:2099]) tablet resolver refreshed! new actor is[7:89:2116] Leader for TabletID 72057594037927937 is [7:89:2116] sender: [7:175:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:57:2057] recipient: [8:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:57:2057] recipient: [8:54:2097] Leader for TabletID 72057594037927937 is [8:59:2099] sender: [8:60:2057] recipient: [8:54:2097] Leader for TabletID 72057594037927937 is [8:59:2099] sender: [8:77:2057] recipient: [8:14:2061] !Reboot 72057594037927937 (actor [8:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [8:59:2099] sender: [8:87:2057] recipient: [8:38:2085] Leader for TabletID 72057594037927937 is [8:59:2099] sender: [8:90:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [8:59:2099] sender: [8:91:2057] recipient: [8:89:2118] Leader for TabletID 72057594037927937 is [8:92:2119] sender: [8:93:2057] recipient: [8:89:2118] !Reboot 72057594037927937 (actor [8:59:2099]) rebooted! !Reboot 72057594037927937 (actor [8:59:2099]) tablet resolver refreshed! new actor is[8:92:2119] Leader for TabletID 72057594037927937 is [8:92:2119] sender: [8:178:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:57:2057] recipient: [9:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:57:2057] recipient: [9:53:2097] Leader for TabletID 72057594037927937 is [9:59:2099] sender: [9:60:2057] recipient: [9:53:2097] Leader for TabletID 72057594037927937 is [9:59:2099] sender: [9:77:2057] recipient: [9:14:2061] !Reboot 72057594037927937 (actor [9:59:2099]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [9:59:2099] sender: [9:87:2057] recipient: [9:38:2085] Leader for TabletID 72057594037927937 is [9:59:2099] sender: [9:90:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [9:59:2099] sender: [9:91:2057] recipient: [9:89:2118] Leader for TabletID 72057594037927937 is [9:92:2119] sender: [9:93:2057] recipient: [9:89:2118] !Reboot 72057594037927937 (actor [9:59:2099]) rebooted! !Reboot 72057594037927937 (actor [9:59:2099]) tablet resolver refreshed! new actor is[9:92:2119] Leader for TabletID 72057594037927937 is [9:92:2119] sender: [9:178:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:57:2057] recipient: [10:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:57:2057] recipient: [10:54:2097] Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:60:2057] recipient: [10:54:2097] Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:77:2057] recipient: [10:14:2061] !Reboot 72057594037927937 (actor [10:59:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:88:2057] recipient: [10:38:2085] Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:91:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:92:2057] recipient: [10:90:2118] Leader for TabletID 72057594037927937 is [10:93:2119] sender: [10:94:2057] recipient: [10:90:2118] !Reboot 72057594037927937 (actor [10:59:2099]) rebooted! !Reboot 72057594037927937 (actor [10:59:2099]) tablet resolver refreshed! new actor is[10:93:2119] Leader for TabletID 72057594037927937 is [10:93:2119] sender: [10:179:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:57:2057] recipient: [11:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:57:2057] recipient: [11:53:2097] Leader for TabletID 72057594037927937 is [11:59:2099] sender: [11:60:2057] recipient: [11:53:2097] Leader for TabletID 72057594037927937 is [11:59:2099] sender: [11:77:2057] recipient: [11:14:2061] !Reboot 72057594037927937 (actor [11:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [11:59:2099] sender: [11:91:2057] recipient: [11:38:2085] Leader for TabletID 72057594037927937 is [11:59:2099] sender: [11:94:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [11:59:2099] sender: [11:95:2057] recipient: [11:93:2121] Leader for TabletID 72057594037927937 is [11:96:2122] sender: [11:97:2057] recipient: [11:93:2121] !Reboot 72057594037927937 (actor [11:59:2099]) rebooted! !Reboot 72057594037927937 (actor [11:59:2099]) tablet resolver refreshed! new actor is[11:96:2122] Leader for TabletID 72057594037927937 is [11:96:2122] sender: [11:182:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:57:2057] recipient: [12:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:57:2057] recipient: [12:54:2097] Leader for TabletID 72057594037927937 is [12:59:2099] sender: [12:60:2057] recipient: [12:54:2097] Leader for TabletID 72057594037927937 is [12:59:2099] sender: [12:77:2057] recipient: [12:14:2061] !Reboot 72057594037927937 (acto ... 057594037927937 is [25:59:2099] sender: [25:106:2057] recipient: [25:38:2085] Leader for TabletID 72057594037927937 is [25:59:2099] sender: [25:109:2057] recipient: [25:14:2061] Leader for TabletID 72057594037927937 is [25:59:2099] sender: [25:110:2057] recipient: [25:108:2131] Leader for TabletID 72057594037927937 is [25:111:2132] sender: [25:112:2057] recipient: [25:108:2131] !Reboot 72057594037927937 (actor [25:59:2099]) rebooted! !Reboot 72057594037927937 (actor [25:59:2099]) tablet resolver refreshed! new actor is[25:111:2132] Leader for TabletID 72057594037927937 is [25:111:2132] sender: [25:197:2057] recipient: [25:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [26:57:2057] recipient: [26:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [26:57:2057] recipient: [26:54:2097] Leader for TabletID 72057594037927937 is [26:59:2099] sender: [26:60:2057] recipient: [26:54:2097] Leader for TabletID 72057594037927937 is [26:59:2099] sender: [26:77:2057] recipient: [26:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [27:57:2057] recipient: [27:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [27:57:2057] recipient: [27:53:2097] Leader for TabletID 72057594037927937 is [27:59:2099] sender: [27:60:2057] recipient: [27:53:2097] Leader for TabletID 72057594037927937 is [27:59:2099] sender: [27:77:2057] recipient: [27:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [28:57:2057] recipient: [28:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [28:57:2057] recipient: [28:53:2097] Leader for TabletID 72057594037927937 is [28:59:2099] sender: [28:60:2057] recipient: [28:53:2097] Leader for TabletID 72057594037927937 is [28:59:2099] sender: [28:77:2057] recipient: [28:14:2061] !Reboot 72057594037927937 (actor [28:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [28:59:2099] sender: [28:79:2057] recipient: [28:38:2085] Leader for TabletID 72057594037927937 is [28:59:2099] sender: [28:82:2057] recipient: [28:14:2061] Leader for TabletID 72057594037927937 is [28:59:2099] sender: [28:83:2057] recipient: [28:81:2112] Leader for TabletID 72057594037927937 is [28:84:2113] sender: [28:85:2057] recipient: [28:81:2112] !Reboot 72057594037927937 (actor [28:59:2099]) rebooted! !Reboot 72057594037927937 (actor [28:59:2099]) tablet resolver refreshed! new actor is[28:84:2113] Leader for TabletID 72057594037927937 is [28:84:2113] sender: [28:170:2057] recipient: [28:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [29:57:2057] recipient: [29:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [29:57:2057] recipient: [29:54:2097] Leader for TabletID 72057594037927937 is [29:59:2099] sender: [29:60:2057] recipient: [29:54:2097] Leader for TabletID 72057594037927937 is [29:59:2099] sender: [29:77:2057] recipient: [29:14:2061] !Reboot 72057594037927937 (actor [29:59:2099]) on event NKikimr::TEvKeyValue::TEvExecuteTransaction ! Leader for TabletID 72057594037927937 is [29:59:2099] sender: [29:79:2057] recipient: [29:38:2085] Leader for TabletID 72057594037927937 is [29:59:2099] sender: [29:82:2057] recipient: [29:14:2061] Leader for TabletID 72057594037927937 is [29:59:2099] sender: [29:83:2057] recipient: [29:81:2112] Leader for TabletID 72057594037927937 is [29:84:2113] sender: [29:85:2057] recipient: [29:81:2112] !Reboot 72057594037927937 (actor [29:59:2099]) rebooted! !Reboot 72057594037927937 (actor [29:59:2099]) tablet resolver refreshed! new actor is[29:84:2113] Leader for TabletID 72057594037927937 is [29:84:2113] sender: [29:170:2057] recipient: [29:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [30:57:2057] recipient: [30:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [30:57:2057] recipient: [30:52:2097] Leader for TabletID 72057594037927937 is [30:59:2099] sender: [30:60:2057] recipient: [30:52:2097] Leader for TabletID 72057594037927937 is [30:59:2099] sender: [30:77:2057] recipient: [30:14:2061] !Reboot 72057594037927937 (actor [30:59:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [30:59:2099] sender: [30:80:2057] recipient: [30:38:2085] Leader for TabletID 72057594037927937 is [30:59:2099] sender: [30:83:2057] recipient: [30:14:2061] Leader for TabletID 72057594037927937 is [30:59:2099] sender: [30:84:2057] recipient: [30:82:2112] Leader for TabletID 72057594037927937 is [30:85:2113] sender: [30:86:2057] recipient: [30:82:2112] !Reboot 72057594037927937 (actor [30:59:2099]) rebooted! !Reboot 72057594037927937 (actor [30:59:2099]) tablet resolver refreshed! new actor is[30:85:2113] Leader for TabletID 72057594037927937 is [30:85:2113] sender: [30:171:2057] recipient: [30:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [31:57:2057] recipient: [31:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [31:57:2057] recipient: [31:53:2097] Leader for TabletID 72057594037927937 is [31:59:2099] sender: [31:60:2057] recipient: [31:53:2097] Leader for TabletID 72057594037927937 is [31:59:2099] sender: [31:77:2057] recipient: [31:14:2061] !Reboot 72057594037927937 (actor [31:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [31:59:2099] sender: [31:83:2057] recipient: [31:38:2085] Leader for TabletID 72057594037927937 is [31:59:2099] sender: [31:86:2057] recipient: [31:14:2061] Leader for TabletID 72057594037927937 is [31:59:2099] sender: [31:87:2057] recipient: [31:85:2115] Leader for TabletID 72057594037927937 is [31:88:2116] sender: [31:89:2057] recipient: [31:85:2115] !Reboot 72057594037927937 (actor [31:59:2099]) rebooted! !Reboot 72057594037927937 (actor [31:59:2099]) tablet resolver refreshed! new actor is[31:88:2116] Leader for TabletID 72057594037927937 is [31:88:2116] sender: [31:174:2057] recipient: [31:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [32:57:2057] recipient: [32:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [32:57:2057] recipient: [32:53:2097] Leader for TabletID 72057594037927937 is [32:59:2099] sender: [32:60:2057] recipient: [32:53:2097] Leader for TabletID 72057594037927937 is [32:59:2099] sender: [32:77:2057] recipient: [32:14:2061] !Reboot 72057594037927937 (actor [32:59:2099]) on event NKikimr::TEvKeyValue::TEvReadRange ! Leader for TabletID 72057594037927937 is [32:59:2099] sender: [32:83:2057] recipient: [32:38:2085] Leader for TabletID 72057594037927937 is [32:59:2099] sender: [32:86:2057] recipient: [32:14:2061] Leader for TabletID 72057594037927937 is [32:59:2099] sender: [32:87:2057] recipient: [32:85:2115] Leader for TabletID 72057594037927937 is [32:88:2116] sender: [32:89:2057] recipient: [32:85:2115] !Reboot 72057594037927937 (actor [32:59:2099]) rebooted! !Reboot 72057594037927937 (actor [32:59:2099]) tablet resolver refreshed! new actor is[32:88:2116] Leader for TabletID 72057594037927937 is [32:88:2116] sender: [32:174:2057] recipient: [32:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [33:57:2057] recipient: [33:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [33:57:2057] recipient: [33:53:2097] Leader for TabletID 72057594037927937 is [33:59:2099] sender: [33:60:2057] recipient: [33:53:2097] Leader for TabletID 72057594037927937 is [33:59:2099] sender: [33:77:2057] recipient: [33:14:2061] !Reboot 72057594037927937 (actor [33:59:2099]) on event NKikimr::TEvKeyValue::TEvNotify ! Leader for TabletID 72057594037927937 is [33:59:2099] sender: [33:84:2057] recipient: [33:38:2085] Leader for TabletID 72057594037927937 is [33:59:2099] sender: [33:87:2057] recipient: [33:14:2061] Leader for TabletID 72057594037927937 is [33:59:2099] sender: [33:88:2057] recipient: [33:86:2115] Leader for TabletID 72057594037927937 is [33:89:2116] sender: [33:90:2057] recipient: [33:86:2115] !Reboot 72057594037927937 (actor [33:59:2099]) rebooted! !Reboot 72057594037927937 (actor [33:59:2099]) tablet resolver refreshed! new actor is[33:89:2116] Leader for TabletID 72057594037927937 is [33:89:2116] sender: [33:107:2057] recipient: [33:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [34:57:2057] recipient: [34:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [34:57:2057] recipient: [34:53:2097] Leader for TabletID 72057594037927937 is [34:59:2099] sender: [34:60:2057] recipient: [34:53:2097] Leader for TabletID 72057594037927937 is [34:59:2099] sender: [34:77:2057] recipient: [34:14:2061] !Reboot 72057594037927937 (actor [34:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [34:59:2099] sender: [34:86:2057] recipient: [34:38:2085] Leader for TabletID 72057594037927937 is [34:59:2099] sender: [34:89:2057] recipient: [34:14:2061] Leader for TabletID 72057594037927937 is [34:59:2099] sender: [34:90:2057] recipient: [34:88:2117] Leader for TabletID 72057594037927937 is [34:91:2118] sender: [34:92:2057] recipient: [34:88:2117] !Reboot 72057594037927937 (actor [34:59:2099]) rebooted! !Reboot 72057594037927937 (actor [34:59:2099]) tablet resolver refreshed! new actor is[34:91:2118] Leader for TabletID 72057594037927937 is [34:91:2118] sender: [34:177:2057] recipient: [34:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [35:57:2057] recipient: [35:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [35:57:2057] recipient: [35:53:2097] Leader for TabletID 72057594037927937 is [35:59:2099] sender: [35:60:2057] recipient: [35:53:2097] Leader for TabletID 72057594037927937 is [35:59:2099] sender: [35:77:2057] recipient: [35:14:2061] !Reboot 72057594037927937 (actor [35:59:2099]) on event NKikimr::TEvKeyValue::TEvReadRange ! Leader for TabletID 72057594037927937 is [35:59:2099] sender: [35:86:2057] recipient: [35:38:2085] Leader for TabletID 72057594037927937 is [35:59:2099] sender: [35:89:2057] recipient: [35:14:2061] Leader for TabletID 72057594037927937 is [35:59:2099] sender: [35:90:2057] recipient: [35:88:2117] Leader for TabletID 72057594037927937 is [35:91:2118] sender: [35:92:2057] recipient: [35:88:2117] !Reboot 72057594037927937 (actor [35:59:2099]) rebooted! !Reboot 72057594037927937 (actor [35:59:2099]) tablet resolver refreshed! new actor is[35:91:2118] Leader for TabletID 72057594037927937 is [35:91:2118] sender: [35:177:2057] recipient: [35:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [36:57:2057] recipient: [36:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [36:57:2057] recipient: [36:53:2097] Leader for TabletID 72057594037927937 is [36:59:2099] sender: [36:60:2057] recipient: [36:53:2097] Leader for TabletID 72057594037927937 is [36:59:2099] sender: [36:77:2057] recipient: [36:14:2061] !Reboot 72057594037927937 (actor [36:59:2099]) on event NKikimr::TEvKeyValue::TEvNotify ! Leader for TabletID 72057594037927937 is [36:59:2099] sender: [36:87:2057] recipient: [36:38:2085] Leader for TabletID 72057594037927937 is [36:59:2099] sender: [36:90:2057] recipient: [36:14:2061] Leader for TabletID 72057594037927937 is [36:59:2099] sender: [36:91:2057] recipient: [36:89:2117] Leader for TabletID 72057594037927937 is [36:92:2118] sender: [36:93:2057] recipient: [36:89:2117] !Reboot 72057594037927937 (actor [36:59:2099]) rebooted! !Reboot 72057594037927937 (actor [36:59:2099]) tablet resolver refreshed! new actor is[36:92:2118] Leader for TabletID 72057594037927937 is [0:0:0] sender: [37:57:2057] recipient: [37:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [37:57:2057] recipient: [37:54:2097] Leader for TabletID 72057594037927937 is [37:59:2099] sender: [37:60:2057] recipient: [37:54:2097] Leader for TabletID 72057594037927937 is [37:59:2099] sender: [37:77:2057] recipient: [37:14:2061] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-60 [GOOD] >> KqpPg::InsertValuesFromTableWithDefaultAndCast-useSink [GOOD] |81.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/public_http/ut/ydb-core-public_http-ut |81.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/public_http/ut/ydb-core-public_http-ut |81.4%| [LD] {RESULT} $(B)/ydb/core/public_http/ut/ydb-core-public_http-ut |81.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_replication/ydb-core-tx-datashard-ut_replication >> KqpPg::TypeCoercionInsert+useSink [GOOD] >> KqpPg::TableSelect+useSink >> TestProgram::YqlKernel >> KqpPg::CopyTableSerialColumns-useSink [GOOD] >> KqpPg::InsertValuesFromTableWithDefaultTextNotNullButNull+useSink [GOOD] >> KqpPg::TypeCoercionInsert-useSink [GOOD] >> OlapEstimationRowsCorrectness::TPCDS78 [GOOD] >> Yq_1::DescribeJob [GOOD] >> KqpPg::DropTablePgMultiple [GOOD] >> KqpWorkloadService::TestLessConcurrentQueryLimit [GOOD] >> Yq_1::CreateConnections_With_Idempotency [GOOD] >> KqpPg::InsertValuesFromTableWithDefaultTextNotNullButNull-useSink >> KqpPg::CreateIndex >> KqpPg::InsertValuesFromTableWithDefaultBool+useSink >> KqpPg::V1CreateTable >> Yq_1::DescribeQuery >> KqpWorkloadService::TestCpuLoadThreshold >> KqpPg::DropTableIfExists >> TestProgram::YqlKernel [GOOD] >> KqpPg::CreateUniqComplexPgColumn+useSink [GOOD] >> KqpPg::CreateUniqComplexPgColumn-useSink |81.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_replication/ydb-core-tx-datashard-ut_replication |81.4%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_replication/ydb-core-tx-datashard-ut_replication |81.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/cms/console/ut/unittest >> TImmediateControlsConfiguratorTests::TestDynamicMap [GOOD] Test command err: 2025-06-25T14:35:38.691734Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:35:38.691797Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:35:38.744086Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:35:40.442074Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T14:35:40.493934Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:1, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-25T14:35:40.678447Z node 4 :BS_PDISK WARN: {BPD01@blobstorage_pdisk_blockdevice_async.cpp:922} Warning# got EIoResult::FileOpenError from IoContext->Setup PDiskId# 1000 2025-06-25T14:35:40.678965Z node 4 :BS_PDISK CRIT: {BPD39@blobstorage_pdisk_impl.cpp:2897} BlockDevice initialization error Details# Can't open file "/home/runner/.ya/build/build_root/yft8/000ecc/r3tmp/tmpzGyIYZ/pdisk_1.dat": unknown reason, errno# 0. PDiskId# 1000 2025-06-25T14:35:40.679592Z node 4 :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:300} PDiskId# 1000 bootstrapped to the StateError, reason# Can't open file "/home/runner/.ya/build/build_root/yft8/000ecc/r3tmp/tmpzGyIYZ/pdisk_1.dat": unknown reason, errno# 0. Can not be initialized Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/yft8/000ecc/r3tmp/tmpzGyIYZ/pdisk_1.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 17997059103849416535 PDiskId# 1000 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 2 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 1000 2025-06-25T14:35:40.721116Z node 3 :BS_PDISK WARN: {BPD01@blobstorage_pdisk_blockdevice_async.cpp:922} Warning# got EIoResult::FileOpenError from IoContext->Setup PDiskId# 1000 2025-06-25T14:35:40.721664Z node 3 :BS_PDISK CRIT: {BPD39@blobstorage_pdisk_impl.cpp:2897} BlockDevice initialization error Details# Can't open file "/home/runner/.ya/build/build_root/yft8/000ecc/r3tmp/tmpzGyIYZ/pdisk_1.dat": unknown reason, errno# 0. PDiskId# 1000 2025-06-25T14:35:40.721871Z node 3 :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:300} PDiskId# 1000 bootstrapped to the StateError, reason# Can't open file "/home/runner/.ya/build/build_root/yft8/000ecc/r3tmp/tmpzGyIYZ/pdisk_1.dat": unknown reason, errno# 0. Can not be initialized Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/yft8/000ecc/r3tmp/tmpzGyIYZ/pdisk_1.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 17690882792072425278 PDiskId# 1000 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 2 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 1000 2025-06-25T14:35:40.838496Z node 2 :BS_PDISK WARN: {BPD01@blobstorage_pdisk_blockdevice_async.cpp:922} Warning# got EIoResult::FileOpenError from IoContext->Setup PDiskId# 1000 2025-06-25T14:35:40.839007Z node 2 :BS_PDISK CRIT: {BPD39@blobstorage_pdisk_impl.cpp:2897} BlockDevice initialization error Details# Can't open file "/home/runner/.ya/build/build_root/yft8/000ecc/r3tmp/tmpzGyIYZ/pdisk_1.dat": unknown reason, errno# 0. PDiskId# 1000 2025-06-25T14:35:40.839213Z node 2 :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:300} PDiskId# 1000 bootstrapped to the StateError, reason# Can't open file "/home/runner/.ya/build/build_root/yft8/000ecc/r3tmp/tmpzGyIYZ/pdisk_1.dat": unknown reason, errno# 0. Can not be initialized Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/yft8/000ecc/r3tmp/tmpzGyIYZ/pdisk_1.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 129093749999298474 PDiskId# 1000 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 2 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 1000 2025-06-25T14:35:40.854202Z node 2 :BS_LOCALRECOVERY CRIT: localrecovery_public.cpp:103: PDiskId# 1000 VDISK[80000000:_:0:0:0]: (2147483648) LocalRecovery FINISHED: {RecoveryDuration# INPROGRESS RecoveredLogStartLsn# 0 SuccessfulRecovery# false EmptyLogoBlobsDb# true EmptyBlocksDb# true EmptyBarriersDb# true EmptySyncLog# true EmptySyncer# true EmptyHuge# true LogRecLogoBlob# 0 LogRecBlock# 0 LogRecGC# 0 LogRecSyncLogIdx# 0 LogRecLogoBlobsDB# 0 LogRecBlocksDB# 0 LogRecBarriersDB# 0 LogRecCutLog# 0 LogRecLocalSyncData# 0 LogRecSyncerState# 0 LogRecHandoffDel# 0 LogRecHugeBlobAllocChunk# 0 LogRecHugeBlobFreeChunk# 0 LogRecHugeBlobEntryPoint# 0 LogRecHugeLogoBlob# 0 LogRecLogoBlobOpt# 0 LogRecPhantomBlob# 0 LogRecAnubisOsirisPut# 0 LogRecAddBulkSst# 0 LogoBlobFreshApply# 0 LogoBlobFreshSkip# 0 LogoBlobsBatchFreshApply# 0 LogoBlobsBatchFreshSkip#0 LogoBlobSyncLogApply# 0 LogoBlobSyncLogSkip# 0 HugeLogoBlobFreshApply# 0 HugeLogoBlobFreshSkip# 0 HugeLogoBlobSyncLogApply# 0 HugeLogoBlobSyncLogSkip# 0 BlockFreshApply# 0 BlockFreshSkip# 0 BlocksBatchFreshApply# 0 BlocksBatchFreshSkip# 0 BlockSyncLogApply# 0 BlockSyncLogSkip# 0 BarrierFreshApply# 0 BarrierFreshSkip# 0 BarriersBatchFreshApply# 0 BarriersBatchFreshSkip# 0 BarrierSyncLogApply# 0 BarrierSyncLogSkip# 0 GCBarrierFreshApply# 0 GCBarrierFreshSkip# 0 GCLogoBlobFreshApply# 0 GCLogoBlobFreshSkip# 0 GCSyncLogApply# 0 GCSyncLogSkip# 0 TryPutLogoBlobSyncData# 0 TryPutBlockSyncData# 0 TryPutBarrierSyncData# 0 HandoffDelFreshApply# 0 HandoffDelFreshSkip# 0 HugeBlobAllocChunkApply# 0 HugeBlobAllocChunkSkip# 0 HugeBlobFreeChunkApply# 0 HugeBlobFreeChunkSkip# 0 HugeLogoBlobToHeapApply# 0 HugeLogoBlobToHeapSkip# 0 HugeSlotsDelGenericApply# 0 HugeSlotsDelGenericSkip# 0 TryPutLogoBlobPhantom# 0 RecoveryLogDiapason# [18446744073709551615 0] StartingPoints# {} ReadLogReplies# {}} reason# Yard::Init failed, errorReason# "PDisk is in StateError, reason# PDiskId# 1000 bootstrapped to the StateError, reason# Can't open file "/home/runner/.ya/build/build_root/yft8/000ecc/r3tmp/tmpzGyIYZ/pdisk_1.dat": unknown reason, errno# 0. Can not be initialized" status# CORRUPTED;VDISK LOCAL RECOVERY FAILURE DUE TO LOGICAL ERROR 2025-06-25T14:35:40.924277Z node 6 :BS_PDISK WARN: {BPD01@blobstorage_pdisk_blockdevice_async.cpp:922} Warning# got EIoResult::FileOpenError from IoContext->Setup PDiskId# 1000 2025-06-25T14:35:40.924837Z node 6 :BS_PDISK CRIT: {BPD39@blobstorage_pdisk_impl.cpp:2897} BlockDevice initialization error Details# Can't open file "/home/runner/.ya/build/build_root/yft8/000ecc/r3tmp/tmpzGyIYZ/pdisk_1.dat": unknown reason, errno# 0. PDiskId# 1000 2025-06-25T14:35:40.925067Z node 6 :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:300} PDiskId# 1000 bootstrapped to the StateError, reason# Can't open file "/home/runner/.ya/build/build_root/yft8/000ecc/r3tmp/tmpzGyIYZ/pdisk_1.dat": unknown reason, errno# 0. Can not be initialized Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/yft8/000ecc/r3tmp/tmpzGyIYZ/pdisk_1.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 8299202909258922468 PDiskId# 1000 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 2 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpe ... istered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control TxLimitControls.PerRequestDataSizeLimit was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control TxLimitControls.PerShardReadSizeLimit was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control TxLimitControls.PerShardIncomingReadSetSizeLimit was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control TxLimitControls.DefaultTimeoutMs was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control CoordinatorControls.EnableLeaderLeases was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control CoordinatorControls.MinLeaderLeaseDurationUs was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control CoordinatorControls.VolatilePlanLeaseMs was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control CoordinatorControls.PlanAheadTimeShiftMs was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control CoordinatorControls.MinPlanResolutionMs was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control SchemeShardControls.ForceShardSplitDataSize was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control SchemeShardControls.DisableForceShardSplit was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control TCMallocControls.ProfileSamplingRate was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control TCMallocControls.GuardedSamplingRate was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control TCMallocControls.PageCacheTargetSize was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control TCMallocControls.PageCacheReleaseRate was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control VDiskControls.EnableLocalSyncLogDataCutting was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control VDiskControls.EnableSyncLogChunkCompressionHDD was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control VDiskControls.EnableSyncLogChunkCompressionSSD was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control VDiskControls.MaxSyncLogChunksInFlightHDD was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control VDiskControls.MaxSyncLogChunksInFlightSSD was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control VDiskControls.BurstThresholdNsHDD was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control VDiskControls.BurstThresholdNsSSD was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control VDiskControls.BurstThresholdNsNVME was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control VDiskControls.DiskTimeAvailableScaleHDD was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control VDiskControls.DiskTimeAvailableScaleSSD was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control VDiskControls.DiskTimeAvailableScaleNVME was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control VDiskControls.DefaultHugeGarbagePerMille was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control VDiskControls.HugeDefragFreeSpaceBorderPerMille was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control VDiskControls.MaxChunksToDefragInflight was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control VDiskControls.ThrottlingDryRun was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control VDiskControls.ThrottlingMinLevel0SstCount was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control VDiskControls.ThrottlingMaxLevel0SstCount was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control VDiskControls.ThrottlingMinInplacedSizeHDD was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control VDiskControls.ThrottlingMaxInplacedSizeHDD was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control VDiskControls.ThrottlingMinInplacedSizeSSD was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control VDiskControls.ThrottlingMaxInplacedSizeSSD was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control VDiskControls.ThrottlingMinOccupancyPerMille was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control VDiskControls.ThrottlingMaxOccupancyPerMille was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control VDiskControls.ThrottlingMinLogChunkCount was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control VDiskControls.ThrottlingMaxLogChunkCount was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control VDiskControls.MaxInProgressSyncCount was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control TabletControls.MaxCommitRedoMB was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control DSProxyControls.SlowDiskThreshold was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control DSProxyControls.PredictedDelayMultiplier was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control DSProxyControls.LongRequestThresholdMs was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control DSProxyControls.MaxNumOfSlowDisks was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control DSProxyControls.SlowDiskThresholdHDD was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control DSProxyControls.PredictedDelayMultiplierHDD was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control DSProxyControls.MaxNumOfSlowDisksHDD was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control DSProxyControls.SlowDiskThresholdSSD was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control DSProxyControls.PredictedDelayMultiplierSSD was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control DSProxyControls.MaxNumOfSlowDisksSSD was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control DSProxyControls.RequestReportingSettings.BucketSize was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control DSProxyControls.RequestReportingSettings.LeakDurationMs was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control DSProxyControls.RequestReportingSettings.LeakRate was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control PDiskControls.MaxCommonLogChunksHDD was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control PDiskControls.MaxCommonLogChunksSSD was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control PDiskControls.UseNoopSchedulerHDD was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control PDiskControls.UseNoopSchedulerSSD was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control BlobStorageControllerControls.EnableSelfHealWithDegraded was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control TableServiceControls.EnableMergeDatashardReads was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. WARNING: immediate control TestShardControls.DisableWrites was registered before TImmediateControlsConfigurator creation. A default value may have been used before it was configured. ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest >> TestProgram::YqlKernel [GOOD] Test command err: FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:33;event=parse_program;program=Command { Assign { Column { Id: 15 } Function { Arguments { Id: 3 } Arguments { Id: 4 } FunctionType: YQL_KERNEL KernelIdx: 0 } } } Command { Projection { Columns { Id: 15 } } } Kernels: "O\002\030BlockAsTuple\t\211\004\235\213\004\213\002\203\002\213\002?\000\001\235?\002\001\235?\004\001\002\000\t\211\002?\n\235?\000\001\002\000\t\251\000?\020\014Arg\000\000\t\211\002?\014?\020\002\000\t\211\006?\020\203\005@?\020?\020$BlockFunc\000\003?\034\006Add?\026?\026\001\000\000/" ; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:102;parse_proto_program=Command { Assign { Column { Id: 15 } Function { Arguments { Id: 3 } Arguments { Id: 4 } FunctionType: YQL_KERNEL KernelIdx: 0 } } } Command { Projection { Columns { Id: 15 } } } Kernels: "O\002\030BlockAsTuple\t\211\004\235\213\004\213\002\203\002\213\002?\000\001\235?\002\001\235?\004\001\002\000\t\211\002?\n\235?\000\001\002\000\t\251\000?\020\014Arg\000\000\t\211\002?\014?\020\002\000\t\211\006?\020\203\005@?\020?\020$BlockFunc\000\003?\034\006Add?\026?\026\001\000\000/" ; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2101;fline=graph_execute.cpp:162;graph_constructed=digraph program {N7[shape=box, label="N0(0):{\"p\":{\"data\":[{\"name\":\"sum\",\"id\":3},{\"name\":\"vat\",\"id\":4}]},\"o\":\"0\",\"t\":\"ReserveMemory\"}\n"]; N0[shape=box, label="N4(26):{\"i\":\"3,4\",\"p\":{\"kernel\":{\"class_name\":\"SIMPLE\"}},\"o\":\"15\",\"t\":\"Calculation\"}\nREMOVE:3,4"]; N2 -> N0[label="1"]; N4 -> N0[label="2"]; N2[shape=box, label="N2(9):{\"i\":\"3\",\"p\":{\"address\":{\"name\":\"sum\",\"id\":3}},\"o\":\"3\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N6 -> N2[label="1"]; N4[shape=box, label="N3(9):{\"i\":\"4\",\"p\":{\"address\":{\"name\":\"vat\",\"id\":4}},\"o\":\"4\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N6 -> N4[label="1"]; N5[shape=box, label="N5(26):{\"i\":\"15\",\"t\":\"Projection\"}\n",style=filled,color="#FFAAAA"]; N0 -> N5[label="1"]; N6[shape=box, label="N1(4):{\"i\":\"0\",\"p\":{\"data\":[{\"name\":\"sum\",\"id\":3},{\"name\":\"vat\",\"id\":4}]},\"o\":\"3,4\",\"t\":\"FetchOriginalData\"}\n",style=filled,color="#FFFF88"]; N7 -> N6[label="1"]; N7->N6->N2->N4->N0->N5[color=red]; }; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:51;event=program_parsed;result={"edges":[{"owner_id":7,"inputs":[]},{"owner_id":0,"inputs":[{"from":2},{"from":4}]},{"owner_id":2,"inputs":[{"from":6}]},{"owner_id":4,"inputs":[{"from":6}]},{"owner_id":5,"inputs":[{"from":0}]},{"owner_id":6,"inputs":[{"from":7}]}],"nodes":{"2":{"p":{"i":"3","p":{"address":{"name":"sum","id":3}},"o":"3","t":"AssembleOriginalData"},"w":9,"id":2},"6":{"p":{"i":"0","p":{"data":[{"name":"sum","id":3},{"name":"vat","id":4}]},"o":"3,4","t":"FetchOriginalData"},"w":4,"id":6},"7":{"p":{"p":{"data":[{"name":"sum","id":3},{"name":"vat","id":4}]},"o":"0","t":"ReserveMemory"},"w":0,"id":7},"5":{"p":{"i":"15","t":"Projection"},"w":26,"id":5},"4":{"p":{"i":"4","p":{"address":{"name":"vat","id":4}},"o":"4","t":"AssembleOriginalData"},"w":9,"id":4},"0":{"p":{"i":"3,4","p":{"kernel":{"class_name":"SIMPLE"}},"o":"15","t":"Calculation"},"w":26,"id":0}}}; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9Int32TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9Int32TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9Int32TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9Int32TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9Int32TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9Int32TypeE; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/workload_service/ut/unittest >> ResourcePoolsSysView::TestResourcePoolsSysViewFilters [GOOD] Test command err: 2025-06-25T14:36:33.139900Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519895860689738485:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:36:33.140231Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001a57/r3tmp/tmpQYy8lO/pdisk_1.dat 2025-06-25T14:36:33.595374Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:36:33.595485Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:36:33.598236Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:36:33.603912Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 30192, node 1 2025-06-25T14:36:33.748839Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:36:33.748877Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:36:33.748884Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:36:33.748984Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25032 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:36:34.104426Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:36:34.209001Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:36:36.912489Z node 1 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:440: [WorkloadService] [Service] Started workload service initialization 2025-06-25T14:36:36.912700Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:100: [WorkloadService] [Service] Subscribed for config changes 2025-06-25T14:36:36.912741Z node 1 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:111: [WorkloadService] [Service] Resource pools was enanbled 2025-06-25T14:36:36.928631Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:223: SessionId: ydb://session/3?node_id=1&id=ZmYyMWM2ODEtMjkyN2MyYTAtNDc1MDllMWQtNGFlOTc5Zg==, ActorId: [0:0:0], ActorState: unknown state, Create session actor with id ZmYyMWM2ODEtMjkyN2MyYTAtNDc1MDllMWQtNGFlOTc5Zg== 2025-06-25T14:36:36.929194Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:241: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7519895873574640973:2290], Start check tables existence, number paths: 2 2025-06-25T14:36:36.929256Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:227: SessionId: ydb://session/3?node_id=1&id=ZmYyMWM2ODEtMjkyN2MyYTAtNDc1MDllMWQtNGFlOTc5Zg==, ActorId: [1:7519895873574640974:2291], ActorState: unknown state, session actor bootstrapped 2025-06-25T14:36:36.929317Z node 1 :KQP_WORKLOAD_SERVICE TRACE: kqp_workload_service.cpp:125: [WorkloadService] [Service] Updated node info, noode count: 1 2025-06-25T14:36:36.934810Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:182: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7519895873574640973:2290], Describe table /Root/.metadata/workload_manager/delayed_requests status PathErrorUnknown 2025-06-25T14:36:36.934875Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:182: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7519895873574640973:2290], Describe table /Root/.metadata/workload_manager/running_requests status PathErrorUnknown 2025-06-25T14:36:36.934917Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:289: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7519895873574640973:2290], Successfully finished 2025-06-25T14:36:36.934985Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:367: [WorkloadService] [Service] Cleanup completed, tables exists: 0 2025-06-25T14:36:36.941729Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:387: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519895873574640991:2301], DatabaseId: Root, PoolId: sample_pool_id, Start pool creating 2025-06-25T14:36:36.947932Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:36:36.959167Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:429: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519895873574640991:2301], DatabaseId: Root, PoolId: sample_pool_id, Subscribe on create pool tx: 281474976710658 2025-06-25T14:36:36.961026Z node 1 :KQP_WORKLOAD_SERVICE TRACE: scheme_actors.cpp:352: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519895873574640991:2301], DatabaseId: Root, PoolId: sample_pool_id, Tablet to pipe successfully connected 2025-06-25T14:36:36.970061Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519895873574640991:2301], DatabaseId: Root, PoolId: sample_pool_id, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:36:37.028236Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:387: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519895873574640991:2301], DatabaseId: Root, PoolId: sample_pool_id, Start pool creating 2025-06-25T14:36:37.032286Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519895877869608339:2334] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/sample_pool_id\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:36:37.035945Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:480: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519895873574640991:2301], DatabaseId: Root, PoolId: sample_pool_id, Pool successfully created 2025-06-25T14:36:37.039117Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:223: SessionId: ydb://session/3?node_id=1&id=MjMwOGI1NWItOGQwYjg0NzAtMmRiNTBmY2YtOWQ1Y2FjMGY=, ActorId: [0:0:0], ActorState: unknown state, Create session actor with id MjMwOGI1NWItOGQwYjg0NzAtMmRiNTBmY2YtOWQ1Y2FjMGY= 2025-06-25T14:36:37.039317Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:227: SessionId: ydb://session/3?node_id=1&id=MjMwOGI1NWItOGQwYjg0NzAtMmRiNTBmY2YtOWQ1Y2FjMGY=, ActorId: [1:7519895877869608346:2292], ActorState: unknown state, session actor bootstrapped 2025-06-25T14:36:37.039405Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:157: [WorkloadService] [Service] Recieved subscription request, DatabaseId: /Root, PoolId: sample_pool_id 2025-06-25T14:36:37.039418Z node 1 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:561: [WorkloadService] [Service] Creating new database state for id /Root 2025-06-25T14:36:37.039481Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:185: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519895877869608348:2293], DatabaseId: /Root, PoolId: sample_pool_id, Start pool fetching 2025-06-25T14:36:37.039540Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:443: SessionId: ydb://session/3?node_id=1&id=MjMwOGI1NWItOGQwYjg0NzAtMmRiNTBmY2YtOWQ1Y2FjMGY=, ActorId: [1:7519895877869608346:2292], ActorState: ReadyState, TraceId: 01jykr9w9f7v0pj1jk64fy8h1a, received request, proxyRequestId: 3 prepared: 0 tx_control: 0 action: QUERY_ACTION_EXECUTE type: QUERY_TYPE_SQL_GENERIC_QUERY text: SELECT 42; rpcActor: [1:7519895877869608345:2339] database: Root databaseId: /Root pool id: sample_pool_id 2025-06-25T14:36:37.039620Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:169: [WorkloadService] [Service] Recieved new request from [1:7519895877869608346:2292], DatabaseId: /Root, PoolId: sample_pool_id, SessionId: ydb://session/3?node_id=1&id=MjMwOGI1NWItOGQwYjg0NzAtMmRiNTBmY2YtOWQ1Y2FjMGY= 2025-06-25T14:36:37.039669Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:574: [WorkloadService] [TDatabaseFetcherActor] ActorId: [1:7519895877869608349:2294], Database: /Root, Start database fetching 2025-06-25T14:36:37.040644Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:600: [WorkloadService] [TDatabaseFetcherActor] ActorId: [1:7519895877869608349:2294], Database: /Root, Database info successfully fetched, serverless: 0 2025-06-25T14:36:37.040763Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:223: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519895877869608348:2293], DatabaseId: /Root, PoolId: sample_pool_id, Pool info successfully fetched 2025-06-25T14:36:37.040799Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:240: [WorkloadService] [Service] Successfully fetched database info, DatabaseId: /Root, Serverless: 0 2025-06-25T14:36:37.040843Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:253: [WorkloadService] [Service] Successfully fetched pool sample_pool_id, DatabaseId: /Root 2025-06-25T14:36:37.040855Z node 1 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:571: [WorkloadService] [Service] Creating new handler for pool /Root/sample_pool_id 2025-06-25T14:36:37.041094Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: pool_handlers_actors.cpp:466: [WorkloadService] [TPoolHandlerActorBase] ActorId: [1:7519895877869608360:2296], DatabaseId: /Root, PoolId: sample_pool_id, Subscribed on schemeboard notifications for path: [OwnerId: 72057594046644480, LocalPathId: 5] 2025-06-25T14:36:37.041154Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:44: [WorkloadService] [TPoolResolverActor] ActorId: [1:7519895877869608359:2295], DatabaseId: /Root, PoolId: sample_pool_id, SessionId: ydb://session/3?node_id=1&id=MjMwOGI1NWItOGQwYjg0NzAtMmRiNTBmY2YtOWQ1Y2FjMGY=, Start pool fetching 2025-06-25T14:36:37.041181Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:185: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519895877869608361:2297], DatabaseId: /Root, Pool ... ExecuteState, TraceId: 01jykrbp1j6wdqnhzmryza9bcc, ExecutePhyTx, tx: 0x000050C000399658 literal: 0 commit: 1 txCtx.DeferredEffects.size(): 0 2025-06-25T14:37:36.552956Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1503: SessionId: ydb://session/3?node_id=10&id=NjBhOWE0N2ItNjNiODRiYTUtYjhlMjdkNjMtYWNmMTc3ZTk=, ActorId: [10:7519896132355760257:2371], ActorState: ExecuteState, TraceId: 01jykrbp1j6wdqnhzmryza9bcc, Sending to Executer TraceId: 0 8 2025-06-25T14:37:36.553062Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1561: SessionId: ydb://session/3?node_id=10&id=NjBhOWE0N2ItNjNiODRiYTUtYjhlMjdkNjMtYWNmMTc3ZTk=, ActorId: [10:7519896132355760257:2371], ActorState: ExecuteState, TraceId: 01jykrbp1j6wdqnhzmryza9bcc, Created new KQP executer: [10:7519896132355760263:2371] isRollback: 0 2025-06-25T14:37:36.636685Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1852: SessionId: ydb://session/3?node_id=10&id=NjBhOWE0N2ItNjNiODRiYTUtYjhlMjdkNjMtYWNmMTc3ZTk=, ActorId: [10:7519896132355760257:2371], ActorState: ExecuteState, TraceId: 01jykrbp1j6wdqnhzmryza9bcc, Forwarded TEvStreamData to [9:7519896131004155529:3380] 2025-06-25T14:37:36.638038Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1754: SessionId: ydb://session/3?node_id=10&id=NjBhOWE0N2ItNjNiODRiYTUtYjhlMjdkNjMtYWNmMTc3ZTk=, ActorId: [10:7519896132355760257:2371], ActorState: ExecuteState, TraceId: 01jykrbp1j6wdqnhzmryza9bcc, TEvTxResponse, CurrentTx: 1/1 response.status: SUCCESS 2025-06-25T14:37:36.638193Z node 10 :KQP_SESSION INFO: kqp_session_actor.cpp:2013: SessionId: ydb://session/3?node_id=10&id=NjBhOWE0N2ItNjNiODRiYTUtYjhlMjdkNjMtYWNmMTc3ZTk=, ActorId: [10:7519896132355760257:2371], ActorState: ExecuteState, TraceId: 01jykrbp1j6wdqnhzmryza9bcc, txInfo Status: Committed Kind: ReadOnly TotalDuration: 85.427 ServerDuration: 85.338 QueriesCount: 2 2025-06-25T14:37:36.638266Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2168: SessionId: ydb://session/3?node_id=10&id=NjBhOWE0N2ItNjNiODRiYTUtYjhlMjdkNjMtYWNmMTc3ZTk=, ActorId: [10:7519896132355760257:2371], ActorState: ExecuteState, TraceId: 01jykrbp1j6wdqnhzmryza9bcc, Create QueryResponse for action: QUERY_ACTION_EXECUTE with SUCCESS status 2025-06-25T14:37:36.638723Z node 10 :KQP_SESSION INFO: kqp_session_actor.cpp:2528: SessionId: ydb://session/3?node_id=10&id=NjBhOWE0N2ItNjNiODRiYTUtYjhlMjdkNjMtYWNmMTc3ZTk=, ActorId: [10:7519896132355760257:2371], ActorState: ExecuteState, TraceId: 01jykrbp1j6wdqnhzmryza9bcc, Cleanup start, isFinal: 1 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-06-25T14:37:36.638761Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2589: SessionId: ydb://session/3?node_id=10&id=NjBhOWE0N2ItNjNiODRiYTUtYjhlMjdkNjMtYWNmMTc3ZTk=, ActorId: [10:7519896132355760257:2371], ActorState: ExecuteState, TraceId: 01jykrbp1j6wdqnhzmryza9bcc, EndCleanup, isFinal: 1 2025-06-25T14:37:36.638823Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2325: SessionId: ydb://session/3?node_id=10&id=NjBhOWE0N2ItNjNiODRiYTUtYjhlMjdkNjMtYWNmMTc3ZTk=, ActorId: [10:7519896132355760257:2371], ActorState: ExecuteState, TraceId: 01jykrbp1j6wdqnhzmryza9bcc, Sent query response back to proxy, proxyRequestId: 5, proxyId: [10:7519896097996020712:2149] 2025-06-25T14:37:36.638846Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2601: SessionId: ydb://session/3?node_id=10&id=NjBhOWE0N2ItNjNiODRiYTUtYjhlMjdkNjMtYWNmMTc3ZTk=, ActorId: [10:7519896132355760257:2371], ActorState: unknown state, TraceId: 01jykrbp1j6wdqnhzmryza9bcc, Cleanup temp tables: 0 2025-06-25T14:37:36.639325Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2692: SessionId: ydb://session/3?node_id=10&id=NjBhOWE0N2ItNjNiODRiYTUtYjhlMjdkNjMtYWNmMTc3ZTk=, ActorId: [10:7519896132355760257:2371], ActorState: unknown state, TraceId: 01jykrbp1j6wdqnhzmryza9bcc, Session actor destroyed 2025-06-25T14:37:36.655542Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:223: SessionId: ydb://session/3?node_id=10&id=MzAzMjUwZjMtNDhkYWQ3ZDgtOGYyZDM5MjgtMmJiOTg1NTE=, ActorId: [0:0:0], ActorState: unknown state, Create session actor with id MzAzMjUwZjMtNDhkYWQ3ZDgtOGYyZDM5MjgtMmJiOTg1NTE= 2025-06-25T14:37:36.656031Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:227: SessionId: ydb://session/3?node_id=10&id=MzAzMjUwZjMtNDhkYWQ3ZDgtOGYyZDM5MjgtMmJiOTg1NTE=, ActorId: [10:7519896132355760280:2379], ActorState: unknown state, session actor bootstrapped 2025-06-25T14:37:36.656196Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:443: SessionId: ydb://session/3?node_id=10&id=MzAzMjUwZjMtNDhkYWQ3ZDgtOGYyZDM5MjgtMmJiOTg1NTE=, ActorId: [10:7519896132355760280:2379], ActorState: ReadyState, TraceId: 01jykrbpgg6p340gxbheybt9h1, received request, proxyRequestId: 6 prepared: 0 tx_control: 0 action: QUERY_ACTION_EXECUTE type: QUERY_TYPE_SQL_GENERIC_QUERY text: SELECT * FROM `.sys/resource_pools` WHERE Name >= "default" rpcActor: [9:7519896131004155540:3382] database: /Root/test-dedicated databaseId: /Root/test-dedicated pool id: default 2025-06-25T14:37:36.656232Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:264: SessionId: ydb://session/3?node_id=10&id=MzAzMjUwZjMtNDhkYWQ3ZDgtOGYyZDM5MjgtMmJiOTg1NTE=, ActorId: [10:7519896132355760280:2379], ActorState: ReadyState, TraceId: 01jykrbpgg6p340gxbheybt9h1, request placed into pool from cache: default 2025-06-25T14:37:36.656645Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:575: SessionId: ydb://session/3?node_id=10&id=MzAzMjUwZjMtNDhkYWQ3ZDgtOGYyZDM5MjgtMmJiOTg1NTE=, ActorId: [10:7519896132355760280:2379], ActorState: ExecuteState, TraceId: 01jykrbpgg6p340gxbheybt9h1, Sending CompileQuery request 2025-06-25T14:37:36.844757Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1352: SessionId: ydb://session/3?node_id=10&id=MzAzMjUwZjMtNDhkYWQ3ZDgtOGYyZDM5MjgtMmJiOTg1NTE=, ActorId: [10:7519896132355760280:2379], ActorState: ExecuteState, TraceId: 01jykrbpgg6p340gxbheybt9h1, ExecutePhyTx, tx: 0x000050C00009AC18 literal: 0 commit: 1 txCtx.DeferredEffects.size(): 0 2025-06-25T14:37:36.844823Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1503: SessionId: ydb://session/3?node_id=10&id=MzAzMjUwZjMtNDhkYWQ3ZDgtOGYyZDM5MjgtMmJiOTg1NTE=, ActorId: [10:7519896132355760280:2379], ActorState: ExecuteState, TraceId: 01jykrbpgg6p340gxbheybt9h1, Sending to Executer TraceId: 0 8 2025-06-25T14:37:36.844921Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1561: SessionId: ydb://session/3?node_id=10&id=MzAzMjUwZjMtNDhkYWQ3ZDgtOGYyZDM5MjgtMmJiOTg1NTE=, ActorId: [10:7519896132355760280:2379], ActorState: ExecuteState, TraceId: 01jykrbpgg6p340gxbheybt9h1, Created new KQP executer: [10:7519896132355760288:2379] isRollback: 0 2025-06-25T14:37:36.862962Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1852: SessionId: ydb://session/3?node_id=10&id=MzAzMjUwZjMtNDhkYWQ3ZDgtOGYyZDM5MjgtMmJiOTg1NTE=, ActorId: [10:7519896132355760280:2379], ActorState: ExecuteState, TraceId: 01jykrbpgg6p340gxbheybt9h1, Forwarded TEvStreamData to [9:7519896131004155540:3382] 2025-06-25T14:37:36.864541Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1754: SessionId: ydb://session/3?node_id=10&id=MzAzMjUwZjMtNDhkYWQ3ZDgtOGYyZDM5MjgtMmJiOTg1NTE=, ActorId: [10:7519896132355760280:2379], ActorState: ExecuteState, TraceId: 01jykrbpgg6p340gxbheybt9h1, TEvTxResponse, CurrentTx: 1/1 response.status: SUCCESS 2025-06-25T14:37:36.864689Z node 10 :KQP_SESSION INFO: kqp_session_actor.cpp:2013: SessionId: ydb://session/3?node_id=10&id=MzAzMjUwZjMtNDhkYWQ3ZDgtOGYyZDM5MjgtMmJiOTg1NTE=, ActorId: [10:7519896132355760280:2379], ActorState: ExecuteState, TraceId: 01jykrbpgg6p340gxbheybt9h1, txInfo Status: Committed Kind: ReadOnly TotalDuration: 20.073 ServerDuration: 19.972 QueriesCount: 2 2025-06-25T14:37:36.864757Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2168: SessionId: ydb://session/3?node_id=10&id=MzAzMjUwZjMtNDhkYWQ3ZDgtOGYyZDM5MjgtMmJiOTg1NTE=, ActorId: [10:7519896132355760280:2379], ActorState: ExecuteState, TraceId: 01jykrbpgg6p340gxbheybt9h1, Create QueryResponse for action: QUERY_ACTION_EXECUTE with SUCCESS status 2025-06-25T14:37:36.865114Z node 10 :KQP_SESSION INFO: kqp_session_actor.cpp:2528: SessionId: ydb://session/3?node_id=10&id=MzAzMjUwZjMtNDhkYWQ3ZDgtOGYyZDM5MjgtMmJiOTg1NTE=, ActorId: [10:7519896132355760280:2379], ActorState: ExecuteState, TraceId: 01jykrbpgg6p340gxbheybt9h1, Cleanup start, isFinal: 1 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-06-25T14:37:36.865142Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2589: SessionId: ydb://session/3?node_id=10&id=MzAzMjUwZjMtNDhkYWQ3ZDgtOGYyZDM5MjgtMmJiOTg1NTE=, ActorId: [10:7519896132355760280:2379], ActorState: ExecuteState, TraceId: 01jykrbpgg6p340gxbheybt9h1, EndCleanup, isFinal: 1 2025-06-25T14:37:36.865194Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2325: SessionId: ydb://session/3?node_id=10&id=MzAzMjUwZjMtNDhkYWQ3ZDgtOGYyZDM5MjgtMmJiOTg1NTE=, ActorId: [10:7519896132355760280:2379], ActorState: ExecuteState, TraceId: 01jykrbpgg6p340gxbheybt9h1, Sent query response back to proxy, proxyRequestId: 6, proxyId: [10:7519896097996020712:2149] 2025-06-25T14:37:36.865214Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2601: SessionId: ydb://session/3?node_id=10&id=MzAzMjUwZjMtNDhkYWQ3ZDgtOGYyZDM5MjgtMmJiOTg1NTE=, ActorId: [10:7519896132355760280:2379], ActorState: unknown state, TraceId: 01jykrbpgg6p340gxbheybt9h1, Cleanup temp tables: 0 2025-06-25T14:37:36.870700Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2692: SessionId: ydb://session/3?node_id=10&id=MzAzMjUwZjMtNDhkYWQ3ZDgtOGYyZDM5MjgtMmJiOTg1NTE=, ActorId: [10:7519896132355760280:2379], ActorState: unknown state, TraceId: 01jykrbpgg6p340gxbheybt9h1, Session actor destroyed 2025-06-25T14:37:36.879216Z node 9 :KQP_SESSION INFO: kqp_session_actor.cpp:2370: SessionId: ydb://session/3?node_id=9&id=OWMzMDc2MTctNDk5NGM5ZmYtOTFlNzg5YTQtZmI2NzJiYmQ=, ActorId: [9:7519896092349448693:2295], ActorState: ReadyState, Session closed due to explicit close event 2025-06-25T14:37:36.879270Z node 9 :KQP_SESSION INFO: kqp_session_actor.cpp:2528: SessionId: ydb://session/3?node_id=9&id=OWMzMDc2MTctNDk5NGM5ZmYtOTFlNzg5YTQtZmI2NzJiYmQ=, ActorId: [9:7519896092349448693:2295], ActorState: ReadyState, Cleanup start, isFinal: 1 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-06-25T14:37:36.879299Z node 9 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2589: SessionId: ydb://session/3?node_id=9&id=OWMzMDc2MTctNDk5NGM5ZmYtOTFlNzg5YTQtZmI2NzJiYmQ=, ActorId: [9:7519896092349448693:2295], ActorState: ReadyState, EndCleanup, isFinal: 1 2025-06-25T14:37:36.879325Z node 9 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2601: SessionId: ydb://session/3?node_id=9&id=OWMzMDc2MTctNDk5NGM5ZmYtOTFlNzg5YTQtZmI2NzJiYmQ=, ActorId: [9:7519896092349448693:2295], ActorState: unknown state, Cleanup temp tables: 0 2025-06-25T14:37:36.879413Z node 9 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2692: SessionId: ydb://session/3?node_id=9&id=OWMzMDc2MTctNDk5NGM5ZmYtOTFlNzg5YTQtZmI2NzJiYmQ=, ActorId: [9:7519896092349448693:2295], ActorState: unknown state, Session actor destroyed 2025-06-25T14:37:36.881681Z node 9 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 11 2025-06-25T14:37:36.882148Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(11, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-25T14:37:36.882311Z node 9 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 10 2025-06-25T14:37:36.882530Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-25T14:37:37.268451Z node 10 :SYSTEM_VIEWS WARN: sysview_service.cpp:811: Summary delivery problem: service id# [10:7519896097996020629:2075], processor id# 72075186224037891, database# /Root/test-dedicated ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/idx_test/unittest >> YdbIndexTable::MultiShardTableTwoIndexes [GOOD] Test command err: Trying to start YDB, gRPC: 4150, MsgBus: 21373 2025-06-25T14:33:38.313659Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519895110684219765:2205];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:33:38.320656Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001605/r3tmp/tmpFdTg0L/pdisk_1.dat 2025-06-25T14:33:38.981675Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:33:38.981758Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:33:38.989482Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:33:39.007057Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519895110684219593:2080] 1750862018221648 != 1750862018221651 2025-06-25T14:33:39.034064Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 4150, node 1 2025-06-25T14:33:39.308819Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:33:39.308854Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:33:39.308862Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:33:39.308973Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:33:39.328445Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:21373 TClient is connected to server localhost:21373 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:33:40.806115Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:33:40.836604Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:33:40.854282Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:33:41.134586Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:33:41.434959Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:33:41.581113Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:33:43.292410Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519895110684219765:2205];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:33:43.292476Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:33:44.006048Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519895132159057728:2370], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:33:44.006150Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:33:44.378654Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:33:44.420528Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:33:44.453193Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:33:44.530437Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:33:44.570462Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:33:44.658886Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:33:44.748704Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:33:44.924198Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519895136454025695:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:33:44.924273Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:33:44.924631Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519895136454025700:2438], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:33:44.929344Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:33:44.947052Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519895136454025702:2439], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:33:45.044548Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519895140748993051:3425] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:33:46.339619Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_w ... mZjODktM2EzZjgwMGItNmNkM2U4ZjUtYjM0ZWZkZDc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:37:34.470616Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719480. Ctx: { TraceId: 01jykrbmbwcze50pfzzt7maqsk, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MTZiM2UxOGQtY2VlZWYyOWEtNzVhNzY1ZTEtYzIyYWQyYTg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:37:34.470676Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719481. Ctx: { TraceId: 01jykrbmbecqt62xjrtx795sqq, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZjdhZmZjODktM2EzZjgwMGItNmNkM2U4ZjUtYjM0ZWZkZDc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:37:34.482288Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719482. Ctx: { TraceId: 01jykrbmc0cba69p9v8hrx88xc, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZWY4MDhmMDctYjY0NzcyZTEtZjc1MDNhNzQtNTE3OTJmMjk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:37:34.491174Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719483. Ctx: { TraceId: 01jykrbmcpf5v0rv934gj3gbt3, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZmEzODRmNDItNjU4MGJlZTQtYTgyMWJkZDEtM2VlYTNiYTU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:37:34.493699Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719484. Ctx: { TraceId: 01jykrbmc0cba69p9v8hrx88xc, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZWY4MDhmMDctYjY0NzcyZTEtZjc1MDNhNzQtNTE3OTJmMjk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:37:34.507125Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719485. Ctx: { TraceId: 01jykrbmcw1qq02a7tybyhz9a8, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=N2JlNmJlMmItZmFmYWJlZmUtZGJjYzk2NzQtNjc3Njc2YzQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:37:34.508047Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719486. Ctx: { TraceId: 01jykrbmd5dc7r4q9b003cmxdh, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ODMwYmE4ZTUtN2FhMWZmOWEtN2M3YmE4ODAtNWZlMDYxNGQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:37:34.516582Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719487. Ctx: { TraceId: 01jykrbmcw1qq02a7tybyhz9a8, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=N2JlNmJlMmItZmFmYWJlZmUtZGJjYzk2NzQtNjc3Njc2YzQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:37:34.523588Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719488. Ctx: { TraceId: 01jykrbmd5dc7r4q9b003cmxdh, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ODMwYmE4ZTUtN2FhMWZmOWEtN2M3YmE4ODAtNWZlMDYxNGQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:37:34.529399Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719489. Ctx: { TraceId: 01jykrbmdb4fywfn8st2jzms5k, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MTZiM2UxOGQtY2VlZWYyOWEtNzVhNzY1ZTEtYzIyYWQyYTg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:37:34.534783Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719490. Ctx: { TraceId: 01jykrbme14sjj3w3s302jhw73, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZjdhZmZjODktM2EzZjgwMGItNmNkM2U4ZjUtYjM0ZWZkZDc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:37:34.536948Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719491. Ctx: { TraceId: 01jykrbmdb4fywfn8st2jzms5k, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MTZiM2UxOGQtY2VlZWYyOWEtNzVhNzY1ZTEtYzIyYWQyYTg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:37:34.545207Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719492. Ctx: { TraceId: 01jykrbme14sjj3w3s302jhw73, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZjdhZmZjODktM2EzZjgwMGItNmNkM2U4ZjUtYjM0ZWZkZDc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:37:34.549263Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719493. Ctx: { TraceId: 01jykrbme5fcmp1a0tv36e46s6, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZmEzODRmNDItNjU4MGJlZTQtYTgyMWJkZDEtM2VlYTNiYTU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:37:34.558514Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719494. Ctx: { TraceId: 01jykrbme5fcmp1a0tv36e46s6, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZmEzODRmNDItNjU4MGJlZTQtYTgyMWJkZDEtM2VlYTNiYTU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:37:34.567014Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719495. Ctx: { TraceId: 01jykrbmexc5wxnbtfjwj36yt6, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=N2JlNmJlMmItZmFmYWJlZmUtZGJjYzk2NzQtNjc3Njc2YzQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:37:34.576231Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719496. Ctx: { TraceId: 01jykrbmexc5wxnbtfjwj36yt6, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=N2JlNmJlMmItZmFmYWJlZmUtZGJjYzk2NzQtNjc3Njc2YzQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:37:34.588075Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719497. Ctx: { TraceId: 01jykrbmfd1t3tt812faqv3c23, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ODMwYmE4ZTUtN2FhMWZmOWEtN2M3YmE4ODAtNWZlMDYxNGQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:37:34.596402Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719498. Ctx: { TraceId: 01jykrbmfd1t3tt812faqv3c23, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ODMwYmE4ZTUtN2FhMWZmOWEtN2M3YmE4ODAtNWZlMDYxNGQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:37:34.598930Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719499. Ctx: { TraceId: 01jykrbmft356mkk2ezwcc78re, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZjdhZmZjODktM2EzZjgwMGItNmNkM2U4ZjUtYjM0ZWZkZDc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:37:34.599436Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719500. Ctx: { TraceId: 01jykrbmft36why9h7xwsrkn05, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MTZiM2UxOGQtY2VlZWYyOWEtNzVhNzY1ZTEtYzIyYWQyYTg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:37:34.600037Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719501. Ctx: { TraceId: 01jykrbmfd1t3tt812faqv3c23, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ODMwYmE4ZTUtN2FhMWZmOWEtN2M3YmE4ODAtNWZlMDYxNGQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:37:34.611160Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719502. Ctx: { TraceId: 01jykrbmft36why9h7xwsrkn05, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MTZiM2UxOGQtY2VlZWYyOWEtNzVhNzY1ZTEtYzIyYWQyYTg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:37:34.611917Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719503. Ctx: { TraceId: 01jykrbmft356mkk2ezwcc78re, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZjdhZmZjODktM2EzZjgwMGItNmNkM2U4ZjUtYjM0ZWZkZDc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:37:34.622588Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719504. Ctx: { TraceId: 01jykrbmft36why9h7xwsrkn05, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MTZiM2UxOGQtY2VlZWYyOWEtNzVhNzY1ZTEtYzIyYWQyYTg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:37:34.631468Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719505. Ctx: { TraceId: 01jykrbmgk9788jae6405ejv86, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZWY4MDhmMDctYjY0NzcyZTEtZjc1MDNhNzQtNTE3OTJmMjk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:37:34.632159Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719506. Ctx: { TraceId: 01jykrbmgmda316v9ymqfcvr3t, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZmEzODRmNDItNjU4MGJlZTQtYTgyMWJkZDEtM2VlYTNiYTU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root finished with status: SUCCESS 2025-06-25T14:37:34.665402Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719507. Ctx: { TraceId: 01jykrbmhh9acp057qyzn7pjtp, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=N2JlNmJlMmItZmFmYWJlZmUtZGJjYzk2NzQtNjc3Njc2YzQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:37:34.668559Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719508. Ctx: { TraceId: 01jykrbmj315smh0dxjtvm2pjk, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZGY4M2JlZDQtOGIxODgyN2ItZmFkOTNlY2EtMmVmNGFiZjM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:37:34.676292Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719509. Ctx: { TraceId: 01jykrbmjb0zje6bzrrznq83bx, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ODMwYmE4ZTUtN2FhMWZmOWEtN2M3YmE4ODAtNWZlMDYxNGQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:37:34.676611Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719510. Ctx: { TraceId: 01jykrbmjb7ja2m98471thvhz1, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZWY4MDhmMDctYjY0NzcyZTEtZjc1MDNhNzQtNTE3OTJmMjk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:37:34.678796Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719511. Ctx: { TraceId: 01jykrbmhh9acp057qyzn7pjtp, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=N2JlNmJlMmItZmFmYWJlZmUtZGJjYzk2NzQtNjc3Njc2YzQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:37:34.689583Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976719512. Ctx: { TraceId: 01jykrbmj315smh0dxjtvm2pjk, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZGY4M2JlZDQtOGIxODgyN2ItZmFkOTNlY2EtMmVmNGFiZjM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root finished with status: SUCCESS finished with status: SUCCESS finished with status: SUCCESS finished with status: SUCCESS ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/idx_test/unittest >> YdbIndexTable::MultiShardTableOneUniqIndexDataColumn [GOOD] Test command err: Trying to start YDB, gRPC: 29894, MsgBus: 18429 2025-06-25T14:33:37.001161Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519895101500495556:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:33:37.001212Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001664/r3tmp/tmpq1JJOw/pdisk_1.dat 2025-06-25T14:33:37.702558Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:33:37.702651Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:33:37.712050Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:33:37.733744Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:33:37.752460Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519895101500495538:2080] 1750862016992149 != 1750862016992152 TServer::EnableGrpc on GrpcPort 29894, node 1 2025-06-25T14:33:37.892474Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:33:37.892510Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:33:37.892519Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:33:37.892682Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:33:38.095116Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:18429 TClient is connected to server localhost:18429 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:33:38.723937Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:33:38.767759Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:33:38.778916Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:33:39.018356Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:33:39.232413Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:33:39.332358Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:33:41.414479Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519895122975333676:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:33:41.414588Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:33:42.004515Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519895101500495556:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:33:42.004575Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:33:42.128101Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:33:42.202541Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:33:42.300847Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:33:42.378026Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:33:42.436917Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:33:42.490844Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:33:42.568526Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:33:42.666095Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519895127270301639:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:33:42.666161Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:33:42.666513Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519895127270301644:2437], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:33:42.670465Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:33:42.687160Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519895127270301646:2438], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:33:42.776774Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519895127270301698:3418] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:33:44.460519Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/ ... jU5OTMtYTNiZWJhYjQtNzQwNjNmODMtNDNjZTRlMjA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:37:32.005034Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976727125. Ctx: { TraceId: 01jykrbhyw1bawhfxzkqwbt9a1, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=OTQyYjY1NjUtMWI0NmM1NWYtOWEzNzIwMi0yN2RjNWQ0YQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:37:32.007945Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976727126. Ctx: { TraceId: 01jykrbhywbks445n2g19ykz8s, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NTNiYWJhMmYtODFiZDcyMDktOGQ4OGMwYjYtMTljMWE4M2I=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:37:32.018278Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976727127. Ctx: { TraceId: 01jykrbhyw1bawhfxzkqwbt9a1, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=OTQyYjY1NjUtMWI0NmM1NWYtOWEzNzIwMi0yN2RjNWQ0YQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:37:32.021321Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976727128. Ctx: { TraceId: 01jykrbhywbks445n2g19ykz8s, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NTNiYWJhMmYtODFiZDcyMDktOGQ4OGMwYjYtMTljMWE4M2I=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:37:32.025235Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976727130. Ctx: { TraceId: 01jykrbhywbks445n2g19ykz8s, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NTNiYWJhMmYtODFiZDcyMDktOGQ4OGMwYjYtMTljMWE4M2I=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:37:32.027581Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976727129. Ctx: { TraceId: 01jykrbhzg4re1qzj21nrt0h9t, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NzM2ZmU4MzEtOTlkZWU4ZDQtNDE5YzBiYTQtZWM4NjY3ZGU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:37:32.036958Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976727131. Ctx: { TraceId: 01jykrbhzmfc704mf3q10bp67s, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=OGMzMGQzY2EtZGE1ZTFlOTItMTg1ZjFlYmEtMzMzNzgzMzY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:37:32.043492Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976727132. Ctx: { TraceId: 01jykrbhzmfc704mf3q10bp67s, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=OGMzMGQzY2EtZGE1ZTFlOTItMTg1ZjFlYmEtMzMzNzgzMzY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:37:32.048859Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976727133. Ctx: { TraceId: 01jykrbhzmfc704mf3q10bp67s, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=OGMzMGQzY2EtZGE1ZTFlOTItMTg1ZjFlYmEtMzMzNzgzMzY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:37:32.054691Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976727134. Ctx: { TraceId: 01jykrbhzmfc704mf3q10bp67s, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=OGMzMGQzY2EtZGE1ZTFlOTItMTg1ZjFlYmEtMzMzNzgzMzY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:37:32.072385Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976727135. Ctx: { TraceId: 01jykrbj0s3wea0y5x14ex88sv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ODE0MDg2NDktYjhhOTgwMDEtZTlhZTQwM2MtNDc2ZTIwZjI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:37:32.080123Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976727136. Ctx: { TraceId: 01jykrbj111hpcp2qvh28rzz2c, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=OTQyYjY1NjUtMWI0NmM1NWYtOWEzNzIwMi0yN2RjNWQ0YQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:37:32.085394Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976727137. Ctx: { TraceId: 01jykrbj1abmrp46c8yrajg76p, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MjQyNjU5OTMtYTNiZWJhYjQtNzQwNjNmODMtNDNjZTRlMjA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:37:32.091226Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976727139. Ctx: { TraceId: 01jykrbj1abmrp46c8yrajg76p, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MjQyNjU5OTMtYTNiZWJhYjQtNzQwNjNmODMtNDNjZTRlMjA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:37:32.092785Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976727138. Ctx: { TraceId: 01jykrbj111hpcp2qvh28rzz2c, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=OTQyYjY1NjUtMWI0NmM1NWYtOWEzNzIwMi0yN2RjNWQ0YQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:37:32.095311Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976727141. Ctx: { TraceId: 01jykrbj1k4nw41nszfh2hdsjp, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NzM2ZmU4MzEtOTlkZWU4ZDQtNDE5YzBiYTQtZWM4NjY3ZGU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:37:32.095779Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976727140. Ctx: { TraceId: 01jykrbj1k862dy0tm3vgnpgwe, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NTNiYWJhMmYtODFiZDcyMDktOGQ4OGMwYjYtMTljMWE4M2I=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:37:32.099163Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976727142. Ctx: { TraceId: 01jykrbj111hpcp2qvh28rzz2c, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=OTQyYjY1NjUtMWI0NmM1NWYtOWEzNzIwMi0yN2RjNWQ0YQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:37:32.103675Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976727143. Ctx: { TraceId: 01jykrbj1k862dy0tm3vgnpgwe, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NTNiYWJhMmYtODFiZDcyMDktOGQ4OGMwYjYtMTljMWE4M2I=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:37:32.106345Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976727144. Ctx: { TraceId: 01jykrbj1k4nw41nszfh2hdsjp, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NzM2ZmU4MzEtOTlkZWU4ZDQtNDE5YzBiYTQtZWM4NjY3ZGU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:37:32.113571Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976727145. Ctx: { TraceId: 01jykrbj1k862dy0tm3vgnpgwe, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NTNiYWJhMmYtODFiZDcyMDktOGQ4OGMwYjYtMTljMWE4M2I=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:37:32.120241Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976727146. Ctx: { TraceId: 01jykrbj1k4nw41nszfh2hdsjp, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NzM2ZmU4MzEtOTlkZWU4ZDQtNDE5YzBiYTQtZWM4NjY3ZGU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:37:32.122297Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976727147. Ctx: { TraceId: 01jykrbj1k862dy0tm3vgnpgwe, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NTNiYWJhMmYtODFiZDcyMDktOGQ4OGMwYjYtMTljMWE4M2I=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:37:32.134663Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976727148. Ctx: { TraceId: 01jykrbj26bm573h42gh77zvyt, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=OGMzMGQzY2EtZGE1ZTFlOTItMTg1ZjFlYmEtMzMzNzgzMzY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root finished with status: SUCCESS 2025-06-25T14:37:32.141999Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976727149. Ctx: { TraceId: 01jykrbj1k4nw41nszfh2hdsjp, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NzM2ZmU4MzEtOTlkZWU4ZDQtNDE5YzBiYTQtZWM4NjY3ZGU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:37:32.148874Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976727151. Ctx: { TraceId: 01jykrbj26bm573h42gh77zvyt, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=OGMzMGQzY2EtZGE1ZTFlOTItMTg1ZjFlYmEtMzMzNzgzMzY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:37:32.148884Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976727150. Ctx: { TraceId: 01jykrbj30a39rcs4r13632d8t, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ODE0MDg2NDktYjhhOTgwMDEtZTlhZTQwM2MtNDc2ZTIwZjI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root finished with status: SUCCESS 2025-06-25T14:37:32.154530Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976727152. Ctx: { TraceId: 01jykrbj26bm573h42gh77zvyt, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=OGMzMGQzY2EtZGE1ZTFlOTItMTg1ZjFlYmEtMzMzNzgzMzY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:37:32.161583Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976727153. Ctx: { TraceId: 01jykrbj30a39rcs4r13632d8t, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ODE0MDg2NDktYjhhOTgwMDEtZTlhZTQwM2MtNDc2ZTIwZjI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:37:32.168833Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976727154. Ctx: { TraceId: 01jykrbj30a39rcs4r13632d8t, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ODE0MDg2NDktYjhhOTgwMDEtZTlhZTQwM2MtNDc2ZTIwZjI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root finished with status: SUCCESS finished with status: SUCCESS 2025-06-25T14:37:32.212257Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976727155. Ctx: { TraceId: 01jykrbj401jbyawcxcerfj1dm, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MjQyNjU5OTMtYTNiZWJhYjQtNzQwNjNmODMtNDNjZTRlMjA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:37:32.221871Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976727156. Ctx: { TraceId: 01jykrbj401jbyawcxcerfj1dm, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MjQyNjU5OTMtYTNiZWJhYjQtNzQwNjNmODMtNDNjZTRlMjA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:37:32.227822Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976727157. Ctx: { TraceId: 01jykrbj401jbyawcxcerfj1dm, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MjQyNjU5OTMtYTNiZWJhYjQtNzQwNjNmODMtNDNjZTRlMjA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root finished with status: SUCCESS ------- [TM] {asan, default-linux-x86_64, release} ydb/services/fq/ut_integration/unittest >> Yq_1::Create_And_Modify_The_Same_Connection [GOOD] Test command err: 2025-06-25T14:37:01.700373Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519895981808765119:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:37:01.701558Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; E0625 14:37:02.348876468 234968 dns_resolver_ares.cc:452] no server name supplied in dns URI E0625 14:37:02.349028076 234968 channel.cc:120] channel stack builder failed: UNKNOWN: the target uri is not valid: dns:/// 2025-06-25T14:37:02.464434Z node 1 :YQL_NODES_MANAGER ERROR: nodes_manager.cpp:364: ydb/core/fq/libs/actors/nodes_manager.cpp:322: TRANSPORT_UNAVAILABLE
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:30242: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint localhost:30242 2025-06-25T14:37:02.717260Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:37:02.742053Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:37:03.485360Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:30242: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:30242 } ] 2025-06-25T14:37:03.721001Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:37:04.728829Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:37:05.197021Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:30242: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:30242 } ] 2025-06-25T14:37:05.732932Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:37:06.200002Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7519896003283601939:2271], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:37:06.200292Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:37:06.278232Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7519896003283601939:2271], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001ffe/r3tmp/tmpJWcgUu/pdisk_1.dat 2025-06-25T14:37:06.452489Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7519896003283601939:2271], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:37:06.632783Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:37:06.632877Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:37:06.642836Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:37:06.686589Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 30242, node 1 2025-06-25T14:37:06.700013Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519895981808765119:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:37:06.700086Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; TClient is connected to server localhost:30974 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:37:07.142244Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... E0625 14:37:07.350857066 235096 dns_resolver_ares.cc:452] no server name supplied in dns URI E0625 14:37:07.351017899 235096 channel.cc:120] channel stack builder failed: UNKNOWN: the target uri is not valid: dns:/// 2025-06-25T14:37:07.414080Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:37:07.423554Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:37:07.425874Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:37:07.425887Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:37:07.426064Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:37:08.037186Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/queries". Create session OK 2025-06-25T14:37:08.037233Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/queries" 2025-06-25T14:37:08.037242Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/queries" 2025-06-25T14:37:08.062953Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-25T14:37:08.070283Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/idempotency_keys". Create session OK 2025-06-25T14:37:08.070317Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/idempotency_keys" 2025-06-25T14:37:08.070322Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/idempotency_keys" 2025-06-25T14:37:08.087989Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/pending_small". Create session OK 2025-06-25T14:37:08.088016Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/pending_small" 2025-06-25T14:37:08.088022Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/pending_small" 2025-06-25T14:37:08.101972Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/nodes". Create session OK 2025-06-25T14:37:08.102005Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/nodes" 2025-06-25T14:37:08.102011Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/nodes" 2025-06-25T14:37:08.107179Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/mappings". Create session OK 2025-06-25T14:37:08.107209Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/mappings" 2025-06-25T14:37:08.107215Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/mappings" 2025-06-25T14:37:08.116519Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/bindings". Create session OK 2025-06-25T14:37:08.116549Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/bindings" 2025-06-25T14:37:08.116555Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/bindings" 2025-06-25T14:37:08.117008Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created directory "Root/yq" 2025-06-25T14:37:08.117026Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create directory "Root/yq": 2025-06-25T14:37:08.117052Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/tenants". Create session OK 2025-06-25T14:37:08.117063Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/tenants" 2025-06-25T14:37:08.117069Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/tenants" 2025-06-25T14:37:08.119417Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/connections". Create session OK 2025-06-25T14:37:08.119432Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/connections" 2025-06-25T14:37:08.119438Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/connections" 2025-06-25T14:37:08.125339Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/result_sets". Create session OK 2025-06-25T14:37:08.125367Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/result_sets" 2025-06-25T14:37:08.125373Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/result_sets" 2025-06-25T14:37:08.132962Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/tenant_acks". Create session OK 2025-06-25T14:37:08.132994Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/tenant_acks" 2025-06-25T14:37:08.133002Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/tenant_acks" 2025-06-25T14:37:08.135389Z node 1 :YQ_CONTROL_PLANE ... EwMTI=. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. About to drain async output 0. FreeSpace: 67108864, allowedOvercommit: 4194304, toSend: 71303168, finished: 0 2025-06-25T14:37:36.638369Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:675: TxId: 281474976715707, task: 1. Tasks execution finished 2025-06-25T14:37:36.638384Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:510: SelfId: [4:7519896131955919480:2680], TxId: 281474976715707, task: 1. Ctx: { SessionId : ydb://session/3?node_id=4&id=NDBmM2M3OWItNmY4ZTBjYmItODMyNTAwNGEtNmFmODEwMTI=. CustomerSuppliedId : . TraceId : 01jykrbp68byxkgw4m12wcq3e9. CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. Compute state finished. All channels and sinks finished 2025-06-25T14:37:36.638487Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:3311: SelfId: [4:7519896131955919485:2681], TxId: 281474976715707, task: 2. Add data: 234 / 234 2025-06-25T14:37:36.638521Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:494: TxId: 281474976715707, task: 1. pass away 2025-06-25T14:37:36.638553Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:3280: SelfId: [4:7519896131955919485:2681], TxId: 281474976715707, task: 2. Send data=234, closed=1, bufferActorId=[4:7519896131955919473:2537] 2025-06-25T14:37:36.638574Z node 4 :KQP_COMPUTE DEBUG: dq_sync_compute_actor_base.h:372: SelfId: [4:7519896131955919481:2681], TxId: 281474976715707, task: 2. Ctx: { TraceId : 01jykrbp68byxkgw4m12wcq3e9. SessionId : ydb://session/3?node_id=4&id=NDBmM2M3OWItNmY4ZTBjYmItODMyNTAwNGEtNmFmODEwMTI=. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. Drain async output 0. Free space decreased: -9223372036787666944, sent data from buffer: 234 2025-06-25T14:37:36.638603Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715707, task: 2. Tasks execution finished, don't wait for ack delivery in input channelId: 1, seqNo: [1] 2025-06-25T14:37:36.638611Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:675: TxId: 281474976715707, task: 2. Tasks execution finished 2025-06-25T14:37:36.638625Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:1587: SelfId: [4:7519896131955919481:2681], TxId: 281474976715707, task: 2. Ctx: { TraceId : 01jykrbp68byxkgw4m12wcq3e9. SessionId : ydb://session/3?node_id=4&id=NDBmM2M3OWItNmY4ZTBjYmItODMyNTAwNGEtNmFmODEwMTI=. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. Waiting finish of sink[0] 2025-06-25T14:37:36.638629Z node 4 :KQP_COMPUTE DEBUG: log.cpp:784: fline=kqp_compute_actor_factory.cpp:67;problem=finish_compute_actor;tx_id=281474976715707;task_id=1;success=1;message={
: Error: COMPUTE_STATE_FINISHED }; 2025-06-25T14:37:36.638838Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:1860: SelfId: [4:7519896131955919473:2537], SessionActorId: [4:7519896110481081995:2537], Create new TableWriteActor for table `Root/yq/connections` ([72057594046644480:12:1]). lockId=281474976715700. ActorId=[4:7519896131955919487:2537] 2025-06-25T14:37:36.638879Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:388: Table: `Root/yq/connections` ([72057594046644480:12:1]), SessionActorId: [4:7519896110481081995:2537]Open: token=0 2025-06-25T14:37:36.638900Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:1987: SelfId: [4:7519896131955919473:2537], SessionActorId: [4:7519896110481081995:2537], ProcessRequestQueue [OwnerId: 72057594046644480, LocalPathId: 12] NOT READY queue=1 2025-06-25T14:37:36.638949Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:394: SelfId: [4:7519896131955919487:2537], Table: `Root/yq/connections` ([72057594046644480:12:1]), SessionActorId: [4:7519896110481081995:2537]Write: token=0 2025-06-25T14:37:36.639057Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:402: SelfId: [4:7519896131955919487:2537], Table: `Root/yq/connections` ([72057594046644480:12:1]), SessionActorId: [4:7519896110481081995:2537]Close: token=0 2025-06-25T14:37:36.639106Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:3180: SelfId: [4:7519896131955919485:2681], TxId: 281474976715707, task: 2. TKqpForwardWriteActor recieve EvBufferWriteResult from [4:7519896131955919473:2537] 2025-06-25T14:37:36.639120Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:3198: SelfId: [4:7519896131955919485:2681], TxId: 281474976715707, task: 2. Finished 2025-06-25T14:37:36.639137Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [4:7519896131955919481:2681], TxId: 281474976715707, task: 2. Ctx: { TraceId : 01jykrbp68byxkgw4m12wcq3e9. SessionId : ydb://session/3?node_id=4&id=NDBmM2M3OWItNmY4ZTBjYmItODMyNTAwNGEtNmFmODEwMTI=. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. CA StateFunc 271646922 2025-06-25T14:37:36.639168Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715707, task: 2. Tasks execution finished, don't wait for ack delivery in input channelId: 1, seqNo: [1] 2025-06-25T14:37:36.639177Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:675: TxId: 281474976715707, task: 2. Tasks execution finished 2025-06-25T14:37:36.639189Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:510: SelfId: [4:7519896131955919481:2681], TxId: 281474976715707, task: 2. Ctx: { TraceId : 01jykrbp68byxkgw4m12wcq3e9. SessionId : ydb://session/3?node_id=4&id=NDBmM2M3OWItNmY4ZTBjYmItODMyNTAwNGEtNmFmODEwMTI=. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. Compute state finished. All channels and sinks finished 2025-06-25T14:37:36.639243Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:494: TxId: 281474976715707, task: 2. pass away 2025-06-25T14:37:36.639296Z node 4 :KQP_COMPUTE DEBUG: log.cpp:784: fline=kqp_compute_actor_factory.cpp:67;problem=finish_compute_actor;tx_id=281474976715707;task_id=2;success=1;message={
: Error: COMPUTE_STATE_FINISHED }; 2025-06-25T14:37:36.639637Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:2087: SelfId: [4:7519896131955919473:2537], SessionActorId: [4:7519896110481081995:2537], Start prepare for distributed commit 2025-06-25T14:37:36.639649Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:909: SelfId: [4:7519896131955919487:2537], Table: `Root/yq/connections` ([72057594046644480:12:1]), SessionActorId: [4:7519896110481081995:2537]SetPrepare; txId=281474976715707 2025-06-25T14:37:36.639666Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:2052: SelfId: [4:7519896131955919473:2537], SessionActorId: [4:7519896110481081995:2537], Flush data 2025-06-25T14:37:36.639798Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:1050: SelfId: [4:7519896131955919487:2537], Table: `Root/yq/connections` ([72057594046644480:12:1]), SessionActorId: [4:7519896110481081995:2537]Send EvWrite to ShardID=72075186224037893, isPrepare=1, isImmediateCommit=0, TxId=281474976715707, LockTxId=0, LockNodeId=0, Locks= LockId: 281474976715700 DataShard: 72075186224037893 Generation: 1 Counter: 1 SchemeShard: 72057594046644480 PathId: 12, Size=324, Cookie=1, OperationsCount=1, IsFinal=1, Attempts=0, Mode=1, BufferMemory=324 2025-06-25T14:37:36.639892Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:2196: SelfId: [4:7519896131955919473:2537], SessionActorId: [4:7519896110481081995:2537], Send EvWrite (external) to ShardID=72075186224037896, isPrepare=1, isImmediateCommit=0, TxId=281474976715707, LockTxId=0, LockNodeId=0, Locks= LockId: 281474976715700 DataShard: 72075186224037896 Generation: 1 Counter: 1 SchemeShard: 72057594046644480 PathId: 15, Size=0, Cookie=0, OperationsCount=0, IsFinal=1, Attempts=0 2025-06-25T14:37:36.640411Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:617: SelfId: [4:7519896131955919487:2537], Table: `Root/yq/connections` ([72057594046644480:12:1]), SessionActorId: [4:7519896110481081995:2537]Recv EvWriteResult from ShardID=72075186224037893, Status=STATUS_PREPARED, TxId=281474976715707, Locks= , Cookie=1 2025-06-25T14:37:36.640454Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:2052: SelfId: [4:7519896131955919473:2537], SessionActorId: [4:7519896110481081995:2537], Flush data 2025-06-25T14:37:36.640485Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:2586: SelfId: [4:7519896131955919473:2537], SessionActorId: [4:7519896110481081995:2537], Recv EvWriteResult (external) from ShardID=72075186224037896, Status=STATUS_PREPARED, TxId=281474976715707, Locks= , Cookie=0 2025-06-25T14:37:36.640499Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:2840: SelfId: [4:7519896131955919473:2537], SessionActorId: [4:7519896110481081995:2537], Got prepared result TxId=281474976715707, TabletId=72075186224037896, Cookie=0 2025-06-25T14:37:36.640521Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:2130: SelfId: [4:7519896131955919473:2537], SessionActorId: [4:7519896110481081995:2537], Start distributed commit with TxId=281474976715707 2025-06-25T14:37:36.640531Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:917: SelfId: [4:7519896131955919487:2537], Table: `Root/yq/connections` ([72057594046644480:12:1]), SessionActorId: [4:7519896110481081995:2537]SetDistributedCommit; txId=281474976715707 2025-06-25T14:37:36.640568Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:2297: SelfId: [4:7519896131955919473:2537], SessionActorId: [4:7519896110481081995:2537], Execute planned transaction, coordinator: 72057594046316545, volitale: 1, shards: 2 2025-06-25T14:37:36.640724Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:2353: SelfId: [4:7519896131955919473:2537], SessionActorId: [4:7519896110481081995:2537], Got transaction status, status: 16 2025-06-25T14:37:36.645131Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:2353: SelfId: [4:7519896131955919473:2537], SessionActorId: [4:7519896110481081995:2537], Got transaction status, status: 17 2025-06-25T14:37:36.649296Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:2586: SelfId: [4:7519896131955919473:2537], SessionActorId: [4:7519896110481081995:2537], Recv EvWriteResult (external) from ShardID=72075186224037896, Status=STATUS_COMPLETED, TxId=281474976715707, Locks= , Cookie=0 2025-06-25T14:37:36.649319Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:2873: SelfId: [4:7519896131955919473:2537], SessionActorId: [4:7519896110481081995:2537], Got completed result TxId=281474976715707, TabletId=72075186224037896, Cookie=0, Locks= 2025-06-25T14:37:36.649753Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:617: SelfId: [4:7519896131955919487:2537], Table: `Root/yq/connections` ([72057594046644480:12:1]), SessionActorId: [4:7519896110481081995:2537]Recv EvWriteResult from ShardID=72075186224037893, Status=STATUS_COMPLETED, TxId=281474976715707, Locks= , Cookie=0 2025-06-25T14:37:36.649776Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:869: SelfId: [4:7519896131955919487:2537], Table: `Root/yq/connections` ([72057594046644480:12:1]), SessionActorId: [4:7519896110481081995:2537]Got completed result TxId=281474976715707, TabletId=72075186224037893, Cookie=0, Mode=2, Locks= 2025-06-25T14:37:36.649792Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:2912: SelfId: [4:7519896131955919473:2537], SessionActorId: [4:7519896110481081995:2537], Committed TxId=281474976715707 2025-06-25T14:37:36.754230Z node 4 :YQL_NODES_MANAGER ERROR: nodes_manager.cpp:364: ydb/core/fq/libs/actors/nodes_manager.cpp:322: TRANSPORT_UNAVAILABLE
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv6:%5B::%5D:1237: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint [::]:1237 2025-06-25T14:37:37.174373Z node 4 :FQ_PENDING_FETCHER ERROR: pending_fetcher.cpp:259: Error with GetTask:
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv6:%5B::%5D:1237: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint [::]:1237 |81.4%| [TA] {RESULT} $(B)/ydb/core/tablet_flat/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpImmediateEffects::UpsertExistingKey >> KqpEffects::InsertRevert_Literal_Success >> KqpImmediateEffects::InsertDuplicates+UseSink >> KqpImmediateEffects::Delete >> KqpEffects::DeletePkPrefixWithIndex |81.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_ru_calculator/ydb-core-tx-schemeshard-ut_ru_calculator |81.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_ru_calculator/ydb-core-tx-schemeshard-ut_ru_calculator |81.4%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_ru_calculator/ydb-core-tx-schemeshard-ut_ru_calculator ------- [TM] {asan, default-linux-x86_64, release} ydb/services/fq/ut_integration/unittest >> Yq_1::CreateConnections_With_Idempotency [GOOD] Test command err: 2025-06-25T14:37:09.098610Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519896014252312036:2242];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:37:09.098694Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; E0625 14:37:09.576601598 236818 dns_resolver_ares.cc:452] no server name supplied in dns URI E0625 14:37:09.576740162 236818 channel.cc:120] channel stack builder failed: UNKNOWN: the target uri is not valid: dns:/// 2025-06-25T14:37:10.096593Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:37:10.117406Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:37:10.625641Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:7269: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:7269 } ] 2025-06-25T14:37:10.811760Z node 1 :YQL_NODES_MANAGER ERROR: nodes_manager.cpp:364: ydb/core/fq/libs/actors/nodes_manager.cpp:322: TRANSPORT_UNAVAILABLE
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:7269: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint localhost:7269 2025-06-25T14:37:11.119775Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:37:12.124633Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:37:12.544573Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:7269: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:7269 } ] 2025-06-25T14:37:13.131325Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:37:14.102220Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519896014252312036:2242];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:37:14.102285Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:37:14.136486Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:37:14.325679Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7519896035727148727:2273], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:37:14.338250Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:37:14.389824Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7519896035727148727:2273], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001ff3/r3tmp/tmpkpj7Ld/pdisk_1.dat 2025-06-25T14:37:14.540488Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7519896035727148727:2273], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } E0625 14:37:14.561122724 237040 dns_resolver_ares.cc:452] no server name supplied in dns URI E0625 14:37:14.561267381 237040 channel.cc:120] channel stack builder failed: UNKNOWN: the target uri is not valid: dns:/// 2025-06-25T14:37:14.781300Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 7269, node 1 2025-06-25T14:37:15.044761Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/quotas". Create session OK 2025-06-25T14:37:15.044816Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/quotas" 2025-06-25T14:37:15.044839Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/quotas" 2025-06-25T14:37:15.106180Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/idempotency_keys". Create session OK 2025-06-25T14:37:15.106216Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/idempotency_keys" 2025-06-25T14:37:15.106224Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/idempotency_keys" 2025-06-25T14:37:15.108352Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/bindings". Create session OK 2025-06-25T14:37:15.108368Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/bindings" 2025-06-25T14:37:15.108376Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/bindings" 2025-06-25T14:37:15.132072Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/mappings". Create session OK 2025-06-25T14:37:15.132105Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/mappings" 2025-06-25T14:37:15.132113Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/mappings" 2025-06-25T14:37:15.135306Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/result_sets". Create session OK 2025-06-25T14:37:15.135330Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/result_sets" 2025-06-25T14:37:15.135337Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/result_sets" 2025-06-25T14:37:15.140817Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/nodes". Create session OK 2025-06-25T14:37:15.140832Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/nodes" 2025-06-25T14:37:15.140838Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/nodes" 2025-06-25T14:37:15.141890Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/queries". Create session OK 2025-06-25T14:37:15.141904Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/queries" 2025-06-25T14:37:15.141911Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/queries" 2025-06-25T14:37:15.152085Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/connections". Create session OK 2025-06-25T14:37:15.152085Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/jobs". Create session OK 2025-06-25T14:37:15.152104Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/jobs" 2025-06-25T14:37:15.152111Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/jobs" 2025-06-25T14:37:15.152116Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/connections" 2025-06-25T14:37:15.152130Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/connections" 2025-06-25T14:37:15.155055Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/tenants". Create session OK 2025-06-25T14:37:15.155082Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/tenants" 2025-06-25T14:37:15.155088Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/tenants" 2025-06-25T14:37:15.155239Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/compute_databases". Create session OK 2025-06-25T14:37:15.155253Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/compute_databases" 2025-06-25T14:37:15.155266Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/compute_databases" 2025-06-25T14:37:15.157882Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/tenant_acks". Create session OK 2025-06-25T14:37:15.157907Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/tenant_acks" 2025-06-25T14:37:15.157914Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/tenant_acks" 2025-06-25T14:37:15.163119Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/pending_small". Create session OK 2025-06-25T14:37:15.163146Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/pending_small" 2025-06-25T14:37:15.163152Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/pending_small" TClient is connected to server localhost:15559 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-25T14:37:15.172879Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896040022116546:2347], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:15.172987Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:15.180044Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896040022116558:2350], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:15.180658Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896040022116563:2354], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:15.186062Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896040022116561:2352], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:15.187258Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp: ... pp:648: SyncQuota finished with error: 2025-06-25T14:37:42.356550Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:42.357188Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:42.357506Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:42.357596Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:42.357677Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:42.357756Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:42.357836Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:42.357917Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:42.375051Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:42.375143Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:42.375220Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:42.375349Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:42.375381Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:42.375459Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:42.375505Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:42.375558Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:42.375606Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:42.375660Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:42.375691Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:42.375785Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:42.375844Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:42.375912Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:42.375959Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:42.376007Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:42.376050Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:42.376099Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:42.376132Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:42.376166Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:42.376230Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:42.376268Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:42.376333Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:42.376386Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:42.376435Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:42.376484Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:42.376525Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:42.376571Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:42.376615Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:42.376651Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:42.376703Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:42.376733Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:42.376797Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:42.376833Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:42.376892Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:42.376927Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:42.376988Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:42.377025Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:42.377079Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:42.377121Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:42.377182Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:42.377219Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:42.377301Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:42.377369Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:42.377442Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:42.377491Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:42.377546Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:42.377597Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:42.377642Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:42.377707Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:42.377758Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:42.377822Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:42.377875Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:42.377925Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:42.377975Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:42.378014Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:42.378062Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:42.378112Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:42.378159Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:42.378205Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:42.378253Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:42.378300Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:42.378346Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:42.378394Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:42.378441Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:42.378489Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:42.378538Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:42.378600Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:42.378641Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:42.378704Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:42.378739Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:42.378796Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:42.378844Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:42.378890Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:42.378943Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:42.378987Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:42.379041Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:42.379082Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:42.379136Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:42.379174Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:42.379229Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:42.379272Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:42.379327Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:42.379370Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:42.379417Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:42.379459Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:42.379507Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:42.379553Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:42.379598Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:42.379628Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:42.379701Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:42.379734Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:42.379782Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:42.379838Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:42.379868Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:42.379927Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: >> Viewer::SimpleFeatureFlags [GOOD] >> OlapEstimationRowsCorrectness::TPCH10 [GOOD] |81.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_followers/ydb-core-tx-datashard-ut_followers |81.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_followers/ydb-core-tx-datashard-ut_followers |81.5%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_followers/ydb-core-tx-datashard-ut_followers |81.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest >> TestProgram::CountWithNulls >> Yq_1::Basic [GOOD] >> Yq_1::Basic_EmptyList >> KqpImmediateEffects::ConflictingKeyW1RR2 >> KqpImmediateEffects::Insert >> KqpImmediateEffects::ConflictingKeyR1WR2 >> KqpInplaceUpdate::Negative_BatchUpdate+UseSink >> KqpImmediateEffects::ConflictingKeyR1WRR2 >> KqpPg::InsertValuesFromTableWithDefaultBool+useSink [GOOD] >> KqpPg::InsertValuesFromTableWithDefaultBool-useSink |81.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_stats/ydb-core-tx-datashard-ut_stats |81.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_stats/ydb-core-tx-datashard-ut_stats |81.5%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_stats/ydb-core-tx-datashard-ut_stats >> TestProgram::CountWithNulls [GOOD] >> KqpPg::CreateIndex [GOOD] >> KqpPg::CreateNotNullPgColumn |81.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest >> TestProgram::CountWithNulls [GOOD] Test command err: FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:33;event=parse_program;program=Command { GroupBy { Aggregates { Column { Id: 10001 } Function { Id: 2 Arguments { Id: 2 } } } } } Command { Projection { Columns { Id: 10001 } } } ; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:102;parse_proto_program=Command { GroupBy { Aggregates { Column { Id: 10001 } Function { Id: 2 Arguments { Id: 2 } } } } } Command { Projection { Columns { Id: 10001 } } } ; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2101;fline=graph_execute.cpp:162;graph_constructed=digraph program {N0[shape=box, label="N3(15):{\"i\":\"2\",\"p\":{\"kernel\":{\"class_name\":\"SIMPLE\"}},\"o\":\"10001\",\"t\":\"Calculation\"}\nREMOVE:2"]; N2 -> N0[label="1"]; N1[shape=box, label="N1(2):{\"i\":\"0\",\"p\":{\"data\":[{\"name\":\"uid\",\"id\":2}]},\"o\":\"2\",\"t\":\"FetchOriginalData\"}\n",style=filled,color="#FFFF88"]; N4 -> N1[label="1"]; N2[shape=box, label="N2(7):{\"i\":\"2\",\"p\":{\"address\":{\"name\":\"uid\",\"id\":2}},\"o\":\"2\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N1 -> N2[label="1"]; N3[shape=box, label="N4(15):{\"i\":\"10001\",\"t\":\"Projection\"}\n",style=filled,color="#FFAAAA"]; N0 -> N3[label="1"]; N4[shape=box, label="N0(0):{\"p\":{\"data\":[{\"name\":\"uid\",\"id\":2}]},\"o\":\"0\",\"t\":\"ReserveMemory\"}\n"]; N4->N1->N2->N0->N3[color=red]; }; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:51;event=program_parsed;result={"edges":[{"owner_id":0,"inputs":[{"from":2}]},{"owner_id":1,"inputs":[{"from":4}]},{"owner_id":2,"inputs":[{"from":1}]},{"owner_id":3,"inputs":[{"from":0}]},{"owner_id":4,"inputs":[]}],"nodes":{"1":{"p":{"i":"0","p":{"data":[{"name":"uid","id":2}]},"o":"2","t":"FetchOriginalData"},"w":2,"id":1},"3":{"p":{"i":"10001","t":"Projection"},"w":15,"id":3},"2":{"p":{"i":"2","p":{"address":{"name":"uid","id":2}},"o":"2","t":"AssembleOriginalData"},"w":7,"id":2},"4":{"p":{"p":{"data":[{"name":"uid","id":2}]},"o":"0","t":"ReserveMemory"},"w":0,"id":4},"0":{"p":{"i":"2","p":{"kernel":{"class_name":"SIMPLE"}},"o":"10001","t":"Calculation"},"w":15,"id":0}}}; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10UInt64TypeE; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> OlapEstimationRowsCorrectness::TPCDS78 [GOOD] Test command err: Trying to start YDB, gRPC: 27800, MsgBus: 13183 2025-06-25T14:35:19.350233Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519895542175689338:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:35:19.350301Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000ece/r3tmp/tmp6jzKz1/pdisk_1.dat 2025-06-25T14:35:19.845181Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:35:19.845261Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:35:19.850587Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:35:19.938479Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 27800, node 1 2025-06-25T14:35:20.076935Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:35:20.076956Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:35:20.076965Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:35:20.077082Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13183 2025-06-25T14:35:20.384582Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:13183 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:35:20.891769Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:35:20.920482Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:35:22.945497Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519895555060591826:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:35:22.945648Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:35:22.950953Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519895555060591838:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:35:22.957551Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:35:22.970619Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519895555060591840:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:35:23.066639Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519895559355559188:2336] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:35:23.330457Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T14:35:23.556289Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519895559355559421:2317];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:35:23.558482Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519895559355559421:2317];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:35:23.558830Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519895559355559421:2317];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:35:23.558943Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519895559355559421:2317];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:35:23.559215Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519895559355559421:2317];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:35:23.559366Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519895559355559421:2317];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:35:23.559469Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519895559355559421:2317];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:35:23.559556Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519895559355559421:2317];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:35:23.559684Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519895559355559421:2317];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:35:23.560074Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519895559355559421:2317];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:35:23.560176Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519895559355559421:2317];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:35:23.602222Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519895559355559407:2313];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:35:23.602315Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519895559355559407:2313];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:35:23.602516Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519895559355559407:2313];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:35:23.602609Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519895559355559407:2313];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:35:23.602703Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519895559355559407:2313];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:35:23.602794Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519895559355559407:2313];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:35:23.602875Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519895559355559407:2313];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:35:23.603019Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519895559355559407:2313];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:35:23.603125Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519895559355559407:2313];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:35:23.603227Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519895559355559407:2313];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline= ... line=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:36:55.998272Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039299;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:55.998886Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039301;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:36:56.000700Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039398;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:56.001289Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039337;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:36:56.004851Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039301;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:56.005469Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039297;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:36:56.006081Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039337;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:56.007685Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039363;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:36:56.011595Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039297;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:56.012237Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039422;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:36:56.012956Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039363;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:56.013841Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039357;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:36:56.019054Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039357;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:56.019564Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039319;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:36:56.019991Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039422;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:56.020585Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039384;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:36:56.023081Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039319;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:56.023586Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039259;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:36:56.028171Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039259;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:56.028783Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039331;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:36:56.034063Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039384;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:56.034807Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039343;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:36:56.037545Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039331;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:56.038278Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039377;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:36:56.040052Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039343;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:56.040612Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039309;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:36:56.045683Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039377;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:56.046144Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039309;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:56.046470Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039313;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:36:56.046792Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039369;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:36:56.058447Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039369;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:56.058525Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039313;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:56.059017Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039315;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:36:56.059031Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039375;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:36:56.064504Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039315;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:56.065227Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039406;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:36:56.065457Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039375;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:56.070692Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039406;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:56.189749Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039236;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:36:56.195845Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039236;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:56.204168Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039285;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:36:56.210011Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039285;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:36:56.242329Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jykr9aaaavdb2meean89gtf0", SessionId: ydb://session/3?node_id=1&id=ZDkwYTBmYmYtNTkwNjQ0MjYtOTM1NTMxOGItODllMmFjZWY=, Slow query, duration: 37.607445s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:36:56.942515Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:36:56.945149Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:36:56.946374Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; >> KqpPg::InsertValuesFromTableWithDefaultTextNotNullButNull-useSink [GOOD] >> KqpPg::InsertValuesFromTableWithDefaultNegativeCase+useSink |81.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest >> Yq_1::Basic_TaggedLiteral [GOOD] |81.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest >> KqpPg::DropTableIfExists [GOOD] >> KqpPg::DropTableIfExists_GenericQuery |81.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/services/config/ut/ydb-services-config-ut |81.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/services/config/ut/ydb-services-config-ut |81.5%| [LD] {RESULT} $(B)/ydb/services/config/ut/ydb-services-config-ut >> TestProgram::YqlKernelStartsWithScalar >> TestProgram::YqlKernelStartsWithScalar [GOOD] >> KqpInplaceUpdate::SingleRowStr-UseSink ------- [TM] {asan, default-linux-x86_64, release} ydb/services/fq/ut_integration/unittest >> Yq_1::Basic_TaggedLiteral [GOOD] Test command err: 2025-06-25T14:37:00.452188Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519895976580888942:2239];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:37:00.452236Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; E0625 14:37:01.280573317 234425 dns_resolver_ares.cc:452] no server name supplied in dns URI E0625 14:37:01.280739546 234425 channel.cc:120] channel stack builder failed: UNKNOWN: the target uri is not valid: dns:/// 2025-06-25T14:37:01.451414Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:37:01.461864Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:37:02.092982Z node 1 :YQL_NODES_MANAGER ERROR: nodes_manager.cpp:364: ydb/core/fq/libs/actors/nodes_manager.cpp:322: TRANSPORT_UNAVAILABLE
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:4100: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint localhost:4100 2025-06-25T14:37:02.101889Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:4100: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:4100 } ] 2025-06-25T14:37:02.460838Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:37:03.462784Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:37:03.949759Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:4100: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:4100 } ] 2025-06-25T14:37:04.468593Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:37:04.740285Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:37:04.753363Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7519895993760758287:2273], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:37:04.824682Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7519895993760758287:2273], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:37:04.952762Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7519895993760758287:2273], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00200c/r3tmp/tmpRfDPJq/pdisk_1.dat 2025-06-25T14:37:05.232505Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7519895993760758287:2273], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:37:05.289379Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:37:05.337847Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 4100, node 1 2025-06-25T14:37:05.387558Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:37:05.387658Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:37:05.390702Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:37:05.392214Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:37:05.392222Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:37:05.392229Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:37:05.392374Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:37:05.453630Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519895976580888942:2239];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:37:05.453842Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; TClient is connected to server localhost:14579 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:37:05.785707Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... E0625 14:37:06.257187296 234575 dns_resolver_ares.cc:452] no server name supplied in dns URI E0625 14:37:06.257348529 234575 channel.cc:120] channel stack builder failed: UNKNOWN: the target uri is not valid: dns:/// 2025-06-25T14:37:06.348697Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/jobs". Create session OK 2025-06-25T14:37:06.348732Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/jobs" 2025-06-25T14:37:06.348740Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/jobs" 2025-06-25T14:37:06.360637Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/idempotency_keys". Create session OK 2025-06-25T14:37:06.360665Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/idempotency_keys" 2025-06-25T14:37:06.360671Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/idempotency_keys" 2025-06-25T14:37:06.401948Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-25T14:37:06.402047Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/nodes". Create session OK 2025-06-25T14:37:06.402075Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/nodes" 2025-06-25T14:37:06.402083Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/nodes" 2025-06-25T14:37:06.404957Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/compute_databases". Create session OK 2025-06-25T14:37:06.404982Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/compute_databases" 2025-06-25T14:37:06.404988Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/compute_databases" 2025-06-25T14:37:06.405609Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/mappings". Create session OK 2025-06-25T14:37:06.405624Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/mappings" 2025-06-25T14:37:06.405631Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/mappings" 2025-06-25T14:37:06.408147Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/tenants". Create session OK 2025-06-25T14:37:06.408168Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/tenants" 2025-06-25T14:37:06.408174Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/tenants" 2025-06-25T14:37:06.410688Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/bindings". Create session OK 2025-06-25T14:37:06.410702Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/bindings" 2025-06-25T14:37:06.410707Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/bindings" 2025-06-25T14:37:06.412722Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/tenant_acks". Create session OK 2025-06-25T14:37:06.412746Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/tenant_acks" 2025-06-25T14:37:06.412773Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/tenant_acks" 2025-06-25T14:37:06.419352Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created directory "Root/yq" 2025-06-25T14:37:06.419381Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create directory "Root/yq": 2025-06-25T14:37:06.419439Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/pending_small". Create session OK 2025-06-25T14:37:06.419450Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/pending_small" 2025-06-25T14:37:06.419461Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/pending_small" 2025-06-25T14:37:06.450814Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/result_sets". Create session OK 2025-06-25T14:37:06.4508 ... MmYtNDFjMmFhMGItNzg1YzVkNTc=. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. Compute state finished. All channels and sinks finished 2025-06-25T14:37:44.991901Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:494: TxId: 281474976715774, task: 1. pass away 2025-06-25T14:37:44.991966Z node 4 :KQP_COMPUTE DEBUG: log.cpp:784: fline=kqp_compute_actor_factory.cpp:67;problem=finish_compute_actor;tx_id=281474976715774;task_id=1;success=1;message={
: Error: COMPUTE_STATE_FINISHED }; 2025-06-25T14:37:44.992346Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:2110: SelfId: [4:7519896166869097077:2350], SessionActorId: [4:7519896115329484870:2350], Start immediate commit 2025-06-25T14:37:44.992358Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:923: SelfId: [4:7519896166869097087:2350], Table: `Root/yq/quotas` ([72057594046644480:7:1]), SessionActorId: [4:7519896115329484870:2350]SetImmediateCommit 2025-06-25T14:37:44.992373Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:2052: SelfId: [4:7519896166869097077:2350], SessionActorId: [4:7519896115329484870:2350], Flush data 2025-06-25T14:37:44.992519Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:1050: SelfId: [4:7519896166869097087:2350], Table: `Root/yq/quotas` ([72057594046644480:7:1]), SessionActorId: [4:7519896115329484870:2350]Send EvWrite to ShardID=72075186224037888, isPrepare=0, isImmediateCommit=1, TxId=0, LockTxId=0, LockNodeId=0, Locks= LockId: 281474976715770 DataShard: 72075186224037888 Generation: 1 Counter: 19 SchemeShard: 72057594046644480 PathId: 7, Size=136, Cookie=1, OperationsCount=1, IsFinal=1, Attempts=0, Mode=3, BufferMemory=136 2025-06-25T14:37:44.992577Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:617: SelfId: [4:7519896166869097068:2545], Table: `Root/yq/quotas` ([72057594046644480:7:1]), SessionActorId: [4:7519896132509356153:2545]Recv EvWriteResult from ShardID=72075186224037888, Status=STATUS_COMPLETED, TxId=41, Locks= , Cookie=1 2025-06-25T14:37:44.992601Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:869: SelfId: [4:7519896166869097068:2545], Table: `Root/yq/quotas` ([72057594046644480:7:1]), SessionActorId: [4:7519896132509356153:2545]Got completed result TxId=41, TabletId=72075186224037888, Cookie=1, Mode=3, Locks= 2025-06-25T14:37:44.992641Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:2912: SelfId: [4:7519896166869097051:2545], SessionActorId: [4:7519896132509356153:2545], Committed TxId=0 2025-06-25T14:37:45.011657Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:617: SelfId: [4:7519896166869097087:2350], Table: `Root/yq/quotas` ([72057594046644480:7:1]), SessionActorId: [4:7519896115329484870:2350]Recv EvWriteResult from ShardID=72075186224037888, Status=STATUS_COMPLETED, TxId=43, Locks= , Cookie=1 2025-06-25T14:37:45.011704Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:869: SelfId: [4:7519896166869097087:2350], Table: `Root/yq/quotas` ([72057594046644480:7:1]), SessionActorId: [4:7519896115329484870:2350]Got completed result TxId=43, TabletId=72075186224037888, Cookie=1, Mode=3, Locks= 2025-06-25T14:37:45.011756Z node 4 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:2912: SelfId: [4:7519896166869097077:2350], SessionActorId: [4:7519896115329484870:2350], Committed TxId=0 2025-06-25T14:37:45.044487Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:45.064120Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:45.222344Z node 4 :FQ_PENDING_FETCHER ERROR: pending_fetcher.cpp:259: Error with GetTask:
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv6:%5B::%5D:31512: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint [::]:31512 2025-06-25T14:37:46.085719Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:46.095631Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:46.095891Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:46.096091Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:46.096277Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:46.096573Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:46.096839Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:46.097092Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:46.097350Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:46.097537Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:46.097672Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:46.097767Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:46.097863Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:46.097954Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:46.098031Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:46.098108Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:46.098191Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:46.098271Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:46.098352Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:46.098443Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:46.098518Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:46.098593Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:46.098672Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:46.098750Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:46.098822Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:46.098897Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:46.098968Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:46.099041Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:46.099115Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:46.099189Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:46.099271Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:46.099348Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:46.099431Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:46.099508Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:46.099636Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:46.099729Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:46.099830Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:46.099977Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:46.100071Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:46.100534Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:46.100646Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:46.100722Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:46.100799Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:46.100872Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:46.100946Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:46.101021Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:46.101094Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:46.101171Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:46.101252Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:46.101352Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:46.101428Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:46.101499Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:46.101575Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:46.101657Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:46.101741Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:46.101823Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:46.101897Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:46.101973Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:46.102048Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:46.102126Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:46.102200Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:46.102275Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:46.102351Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:46.102429Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:46.102510Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:46.102605Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:46.102708Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:46.102790Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:46.102871Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:46.102955Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:46.103033Z node 4 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: |81.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/olap/high_load/ydb-tests-olap-high_load |81.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/olap/high_load/ydb-tests-olap-high_load ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> OlapEstimationRowsCorrectness::TPCH10 [GOOD] Test command err: Trying to start YDB, gRPC: 25743, MsgBus: 4468 2025-06-25T14:35:52.140890Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519895685332186683:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:35:52.157733Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000eb5/r3tmp/tmpYzzQj8/pdisk_1.dat 2025-06-25T14:35:52.801068Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:35:52.801150Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:35:52.812127Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:35:52.917252Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:35:52.932531Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519895685332186654:2080] 1750862152119708 != 1750862152119711 TServer::EnableGrpc on GrpcPort 25743, node 1 2025-06-25T14:35:53.103033Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:35:53.103051Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:35:53.103062Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:35:53.103162Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:35:53.156449Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:4468 TClient is connected to server localhost:4468 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:35:54.471800Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:35:54.513117Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:35:56.822127Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519895702512056488:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:35:56.822252Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:35:56.822701Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519895702512056500:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:35:56.826549Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:35:56.851274Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519895702512056502:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:35:56.917996Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519895702512056553:2340] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:35:57.147522Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519895685332186683:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:35:57.215126Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:35:57.342523Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T14:35:57.687366Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519895706807024101:2319];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:35:57.687578Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519895706807024101:2319];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:35:57.687868Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519895706807024101:2319];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:35:57.687988Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519895706807024101:2319];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:35:57.688116Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519895706807024101:2319];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:35:57.688241Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519895706807024101:2319];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:35:57.688408Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519895706807024101:2319];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:35:57.688522Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519895706807024101:2319];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:35:57.688649Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519895706807024101:2319];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:35:57.688770Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519895706807024101:2319];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:35:57.688790Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519895706807024103:2321];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:35:57.688853Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519895706807024103:2321];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:35:57.688888Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519895706807024101:2319];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:35:57.689006Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519895706807024103:2321];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:35:57.689097Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519895706807024103:2321];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:35:57.689219Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519895706807024103:2321];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:35:57.689330Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519895706807024103:2321];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:35:57.689428Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519895706807024103:2321];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:35:57.689539Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519895706807024103:2321];tablet_ ... line=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:37:27.733649Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039363;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:37:27.734248Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039397;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:37:27.736752Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039383;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:37:27.737328Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039195;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:37:27.738221Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039397;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:37:27.738775Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039377;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:37:27.742881Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039195;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:37:27.744075Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039377;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:37:27.744761Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039339;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:37:27.744989Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039382;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:37:27.750808Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039382;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:37:27.751491Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039339;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:37:27.753894Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039373;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:37:27.755382Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039378;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:37:27.760612Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039373;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:37:27.762103Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039417;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:37:27.762214Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039378;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:37:27.762987Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039387;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:37:27.767906Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039417;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:37:27.769125Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039301;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:37:27.769241Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039387;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:37:27.769874Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039403;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:37:27.775275Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039403;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:37:27.775758Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039301;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:37:27.776109Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039407;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:37:27.776516Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039367;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:37:27.783321Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039367;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:37:27.783894Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039407;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:37:27.784110Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039421;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:37:27.784773Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039399;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:37:27.790797Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039399;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:37:27.790798Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039421;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:37:27.791593Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039381;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:37:27.791736Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039401;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:37:27.797737Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039381;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:37:27.798043Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039401;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:37:27.798583Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039385;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:37:27.799502Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039376;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:37:27.804881Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039385;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:37:27.805895Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039376;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:37:27.863787Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039225;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:37:27.870343Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039225;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:37:27.958275Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jykra9b0833g9sy6vd3nfb0s", SessionId: ydb://session/3?node_id=1&id=MzEzMDVjZTYtOGQwYTc5MGQtMTU3ZGIxZGMtNzE1MmQzMzQ=, Slow query, duration: 37.553131s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:37:28.236237Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:37:28.236237Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:37:28.236863Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; |81.5%| [LD] {RESULT} $(B)/ydb/tests/olap/high_load/ydb-tests-olap-high_load ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest >> TestProgram::YqlKernelStartsWithScalar [GOOD] Test command err: FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:33;event=parse_program;program=Command { Assign { Column { Id: 15 } Constant { Bytes: "Lorem" } } } Command { Assign { Column { Id: 16 } Function { Arguments { Id: 7 } Arguments { Id: 15 } FunctionType: YQL_KERNEL KernelIdx: 0 } } } Command { Projection { Columns { Id: 16 } } } Kernels: "O\004\006Arg\030BlockAsTuple\t\211\004\235\213\004\213\004\203\001H\203\005@\213\002\203\014\001\235?\004\001\235?\010\001\006\000\t\211\004?\016\235?\000\001\235?\002\000\006\000\t\251\000?\024\002\000\t\251\000?\026\002\000\000\t\211\002?\020\235?\006\001\006\000\t\211\006?$\203\005@?\024?\026$BlockFunc\000\003?(\024StartsWith?\034? \001\000\000/" ; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:102;parse_proto_program=Command { Assign { Column { Id: 15 } Constant { Bytes: "Lorem" } } } Command { Assign { Column { Id: 16 } Function { Arguments { Id: 7 } Arguments { Id: 15 } FunctionType: YQL_KERNEL KernelIdx: 0 } } } Command { Projection { Columns { Id: 16 } } } Kernels: "O\004\006Arg\030BlockAsTuple\t\211\004\235\213\004\213\004\203\001H\203\005@\213\002\203\014\001\235?\004\001\235?\010\001\006\000\t\211\004?\016\235?\000\001\235?\002\000\006\000\t\251\000?\024\002\000\t\251\000?\026\002\000\000\t\211\002?\020\235?\006\001\006\000\t\211\006?$\203\005@?\024?\026$BlockFunc\000\003?(\024StartsWith?\034? \001\000\000/" ; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2101;fline=graph_execute.cpp:162;graph_constructed=digraph program {N0[shape=box, label="N0(0):{\"p\":{\"v\":\"Lorem\"},\"o\":\"15\",\"t\":\"Const\"}\n"]; N1[shape=box, label="N4(15):{\"i\":\"7,15\",\"p\":{\"kernel\":{\"class_name\":\"SIMPLE\"}},\"o\":\"16\",\"t\":\"Calculation\"}\nREMOVE:7,15"]; N0 -> N1[label="1"]; N3 -> N1[label="2"]; N2[shape=box, label="N2(2):{\"i\":\"0\",\"p\":{\"data\":[{\"name\":\"string\",\"id\":7}]},\"o\":\"7\",\"t\":\"FetchOriginalData\"}\n",style=filled,color="#FFFF88"]; N5 -> N2[label="1"]; N3[shape=box, label="N3(7):{\"i\":\"7\",\"p\":{\"address\":{\"name\":\"string\",\"id\":7}},\"o\":\"7\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N2 -> N3[label="1"]; N4[shape=box, label="N5(15):{\"i\":\"16\",\"t\":\"Projection\"}\n",style=filled,color="#FFAAAA"]; N1 -> N4[label="1"]; N5[shape=box, label="N1(0):{\"p\":{\"data\":[{\"name\":\"string\",\"id\":7}]},\"o\":\"0\",\"t\":\"ReserveMemory\"}\n"]; N0->N5->N2->N3->N1->N4[color=red]; }; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:51;event=program_parsed;result={"edges":[{"owner_id":0,"inputs":[]},{"owner_id":1,"inputs":[{"from":0},{"from":3}]},{"owner_id":2,"inputs":[{"from":5}]},{"owner_id":3,"inputs":[{"from":2}]},{"owner_id":4,"inputs":[{"from":1}]},{"owner_id":5,"inputs":[]}],"nodes":{"1":{"p":{"i":"7,15","p":{"kernel":{"class_name":"SIMPLE"}},"o":"16","t":"Calculation"},"w":15,"id":1},"3":{"p":{"i":"7","p":{"address":{"name":"string","id":7}},"o":"7","t":"AssembleOriginalData"},"w":7,"id":3},"2":{"p":{"i":"0","p":{"data":[{"name":"string","id":7}]},"o":"7","t":"FetchOriginalData"},"w":2,"id":2},"5":{"p":{"p":{"data":[{"name":"string","id":7}]},"o":"0","t":"ReserveMemory"},"w":0,"id":5},"4":{"p":{"i":"16","t":"Projection"},"w":15,"id":4},"0":{"p":{"p":{"v":"Lorem"},"o":"15","t":"Const"},"w":0,"id":0}}}; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9UInt8TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9UInt8TypeE; >> SystemView::ShowCreateTableColumn [GOOD] >> SystemView::ShowCreateTableKeyBloomFilter >> PrivateApi::GetTask [GOOD] >> PrivateApi::Nodes >> KqpEffects::InsertAbort_Literal_Duplicates+UseSink >> Yq_1::ListConnectionsOnEmptyConnectionsTable [GOOD] >> KqpEffects::UpdateOn_Literal >> KqpEffects::InsertAbort_Literal_Conflict+UseSink ------- [TM] {asan, default-linux-x86_64, release} ydb/services/fq/ut_integration/unittest >> Yq_1::ListConnectionsOnEmptyConnectionsTable [GOOD] Test command err: 2025-06-25T14:37:20.381604Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519896063548491891:2169];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:37:20.381806Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; E0625 14:37:21.381867697 239867 dns_resolver_ares.cc:452] no server name supplied in dns URI E0625 14:37:21.382016423 239867 channel.cc:120] channel stack builder failed: UNKNOWN: the target uri is not valid: dns:/// 2025-06-25T14:37:21.391094Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:37:21.400685Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:17581: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:17581 } ] 2025-06-25T14:37:21.402956Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:37:22.270067Z node 1 :YQL_NODES_MANAGER ERROR: nodes_manager.cpp:364: ydb/core/fq/libs/actors/nodes_manager.cpp:322: TRANSPORT_UNAVAILABLE
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:17581: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint localhost:17581 2025-06-25T14:37:22.273616Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:17581: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:17581 } ] 2025-06-25T14:37:22.420126Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:37:23.422000Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:37:24.165181Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:17581: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:17581 } ] 2025-06-25T14:37:24.424245Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:37:25.292477Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519896063548491891:2169];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:37:25.292545Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:37:25.426760Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001fa0/r3tmp/tmptUHmKo/pdisk_1.dat 2025-06-25T14:37:26.040636Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7519896089318296269:2278], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:37:26.040888Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:37:26.086915Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 17581, node 1 2025-06-25T14:37:26.344411Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:37:26.351871Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:37:26.351891Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:37:26.351900Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:37:26.359657Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration E0625 14:37:26.385131991 240041 dns_resolver_ares.cc:452] no server name supplied in dns URI E0625 14:37:26.385296860 240041 channel.cc:120] channel stack builder failed: UNKNOWN: the target uri is not valid: dns:/// TClient is connected to server localhost:64019 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:37:26.542218Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:37:26.567742Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:37:27.108068Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-25T14:37:27.114245Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/bindings". Create session OK 2025-06-25T14:37:27.114281Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/bindings" 2025-06-25T14:37:27.114289Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/bindings" 2025-06-25T14:37:27.125282Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/tenant_acks". Create session OK 2025-06-25T14:37:27.125323Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/tenant_acks" 2025-06-25T14:37:27.125331Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/tenant_acks" 2025-06-25T14:37:27.126762Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/quotas". Create session OK 2025-06-25T14:37:27.126779Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/quotas" 2025-06-25T14:37:27.126785Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/quotas" 2025-06-25T14:37:27.132552Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/pending_small". Create session OK 2025-06-25T14:37:27.132570Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/pending_small" 2025-06-25T14:37:27.132576Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/pending_small" 2025-06-25T14:37:27.133719Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:27.134266Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/idempotency_keys". Create session OK 2025-06-25T14:37:27.134290Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/idempotency_keys" 2025-06-25T14:37:27.134296Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/idempotency_keys" 2025-06-25T14:37:27.135350Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/compute_databases". Create session OK 2025-06-25T14:37:27.135361Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/compute_databases" 2025-06-25T14:37:27.135365Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/compute_databases" 2025-06-25T14:37:27.137647Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/mappings". Create session OK 2025-06-25T14:37:27.137660Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/mappings" 2025-06-25T14:37:27.137664Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/mappings" 2025-06-25T14:37:27.139420Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/result_sets". Create session OK 2025-06-25T14:37:27.139432Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/result_sets" 2025-06-25T14:37:27.139437Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/result_sets" 2025-06-25T14:37:27.148723Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/tenants". Create session OK 2025-06-25T14:37:27.148761Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/tenants" 2025-06-25T14:37:27.148770Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/tenants" 2025-06-25T14:37:27.151149Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/connections". Create session OK 2025-06-25T14:37:27.151166Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/connections" 2025-06-25T14:37:27.151170Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/connections" 2025-06-25T14:37:27.153286Z node 1 ... 6357 RawX2: 4503616807242246 } } DstEndpoint { ActorId { RawX1: 7519896178112016358 RawX2: 4503616807242247 } } InMemory: true DstStageId: 1 } Update { Id: 2 TransportVersion: DATA_TRANSPORT_OOB_PICKLE_1_0 SrcTaskId: 2 SrcEndpoint { ActorId { RawX1: 7519896178112016358 RawX2: 4503616807242247 } } DstEndpoint { ActorId { RawX1: 7519896178112016351 RawX2: 4503616807242233 } } InMemory: true } 2025-06-25T14:37:47.591334Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:1081: SelfId: [4:7519896178112016358:2567], TxId: 281474976715689, task: 2. Ctx: { SessionId : ydb://session/3?node_id=4&id=MzEwNWIyMzctNGQzOGFkOTItODdlNTI0NWUtOWJlOGFmNWY=. CustomerSuppliedId : . TraceId : 01jykrc0sy6tzgkfxbne3psgk2. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. Update input channelId: 1, peer: [4:7519896178112016357:2566] 2025-06-25T14:37:47.591401Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [4:7519896178112016358:2567], TxId: 281474976715689, task: 2. Ctx: { SessionId : ydb://session/3?node_id=4&id=MzEwNWIyMzctNGQzOGFkOTItODdlNTI0NWUtOWJlOGFmNWY=. CustomerSuppliedId : . TraceId : 01jykrc0sy6tzgkfxbne3psgk2. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. CA StateFunc 271646926 2025-06-25T14:37:47.591491Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:1074: SelfId: [4:7519896178112016358:2567], TxId: 281474976715689, task: 2. Ctx: { SessionId : ydb://session/3?node_id=4&id=MzEwNWIyMzctNGQzOGFkOTItODdlNTI0NWUtOWJlOGFmNWY=. CustomerSuppliedId : . TraceId : 01jykrc0sy6tzgkfxbne3psgk2. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. Received channels info: Update { Id: 1 TransportVersion: DATA_TRANSPORT_OOB_PICKLE_1_0 SrcTaskId: 1 DstTaskId: 2 SrcEndpoint { ActorId { RawX1: 7519896178112016357 RawX2: 4503616807242246 } } DstEndpoint { ActorId { RawX1: 7519896178112016358 RawX2: 4503616807242247 } } InMemory: true DstStageId: 1 } Update { Id: 2 TransportVersion: DATA_TRANSPORT_OOB_PICKLE_1_0 SrcTaskId: 2 SrcEndpoint { ActorId { RawX1: 7519896178112016358 RawX2: 4503616807242247 } } DstEndpoint { ActorId { RawX1: 7519896178112016351 RawX2: 4503616807242233 } } InMemory: true } 2025-06-25T14:37:47.591538Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [4:7519896178112016358:2567], TxId: 281474976715689, task: 2. Ctx: { SessionId : ydb://session/3?node_id=4&id=MzEwNWIyMzctNGQzOGFkOTItODdlNTI0NWUtOWJlOGFmNWY=. CustomerSuppliedId : . TraceId : 01jykrc0sy6tzgkfxbne3psgk2. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. CA StateFunc 271646922 2025-06-25T14:37:47.591586Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:958: TxId: 281474976715689, task: 1, CA Id [4:7519896178112016357:2566]. Recv TEvReadResult from ShardID=72075186224037894, ReadId=0, Status=SUCCESS, Finished=1, RowCount=0, TxLocks= , BrokenTxLocks= 2025-06-25T14:37:47.591602Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1050: TxId: 281474976715689, task: 1, CA Id [4:7519896178112016357:2566]. Taken 0 locks 2025-06-25T14:37:47.591616Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1064: TxId: 281474976715689, task: 1, CA Id [4:7519896178112016357:2566]. new data for read #0 seqno = 1 finished = 1 2025-06-25T14:37:47.591634Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [4:7519896178112016357:2566], TxId: 281474976715689, task: 1. Ctx: { TraceId : 01jykrc0sy6tzgkfxbne3psgk2. SessionId : ydb://session/3?node_id=4&id=MzEwNWIyMzctNGQzOGFkOTItODdlNTI0NWUtOWJlOGFmNWY=. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. CA StateFunc 276037645 2025-06-25T14:37:47.591664Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [4:7519896178112016357:2566], TxId: 281474976715689, task: 1. Ctx: { TraceId : 01jykrc0sy6tzgkfxbne3psgk2. SessionId : ydb://session/3?node_id=4&id=MzEwNWIyMzctNGQzOGFkOTItODdlNTI0NWUtOWJlOGFmNWY=. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. CA StateFunc 271646922 2025-06-25T14:37:47.591682Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1331: TxId: 281474976715689, task: 1, CA Id [4:7519896178112016357:2566]. enter getasyncinputdata results size 1, freeSpace 8388608 2025-06-25T14:37:47.591698Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1227: TxId: 281474976715689, task: 1, CA Id [4:7519896178112016357:2566]. enter pack cells method shardId: 72075186224037894 processedRows: 0 packed rows: 0 freeSpace: 8388608 2025-06-25T14:37:47.591713Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1308: TxId: 281474976715689, task: 1, CA Id [4:7519896178112016357:2566]. exit pack cells method shardId: 72075186224037894 processedRows: 0 packed rows: 0 freeSpace: 8388608 2025-06-25T14:37:47.591724Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1365: TxId: 281474976715689, task: 1, CA Id [4:7519896178112016357:2566]. returned 0 rows; processed 0 rows 2025-06-25T14:37:47.591770Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1402: TxId: 281474976715689, task: 1, CA Id [4:7519896178112016357:2566]. dropping batch for read #0 2025-06-25T14:37:47.591779Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:458: TxId: 281474976715689, task: 1, CA Id [4:7519896178112016357:2566]. effective maxinflight 1 sorted 1 2025-06-25T14:37:47.591790Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:480: TxId: 281474976715689, task: 1, CA Id [4:7519896178112016357:2566]. Scheduled table scans, in flight: 0 shards. pending shards to read: 0, 2025-06-25T14:37:47.591819Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1427: TxId: 281474976715689, task: 1, CA Id [4:7519896178112016357:2566]. returned async data processed rows 0 left freeSpace 8388608 received rows 0 running reads 0 pending shards 0 finished = 1 has limit 0 limit reached 0 2025-06-25T14:37:47.591890Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:502: SelfId: [4:7519896178112016357:2566], TxId: 281474976715689, task: 1. Ctx: { TraceId : 01jykrc0sy6tzgkfxbne3psgk2. SessionId : ydb://session/3?node_id=4&id=MzEwNWIyMzctNGQzOGFkOTItODdlNTI0NWUtOWJlOGFmNWY=. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. Continue execution, either output buffers are not empty or not all channels are ready, hasDataToSend: 1, channelsReady: 1 2025-06-25T14:37:47.591915Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [4:7519896178112016358:2567], TxId: 281474976715689, task: 2. Ctx: { SessionId : ydb://session/3?node_id=4&id=MzEwNWIyMzctNGQzOGFkOTItODdlNTI0NWUtOWJlOGFmNWY=. CustomerSuppliedId : . TraceId : 01jykrc0sy6tzgkfxbne3psgk2. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. CA StateFunc 271646923 2025-06-25T14:37:47.591935Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:163: TxId: 281474976715689, task: 2. Finish input channelId: 1, from: [4:7519896178112016357:2566] 2025-06-25T14:37:47.591960Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [4:7519896178112016358:2567], TxId: 281474976715689, task: 2. Ctx: { SessionId : ydb://session/3?node_id=4&id=MzEwNWIyMzctNGQzOGFkOTItODdlNTI0NWUtOWJlOGFmNWY=. CustomerSuppliedId : . TraceId : 01jykrc0sy6tzgkfxbne3psgk2. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. CA StateFunc 271646922 2025-06-25T14:37:47.592001Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:502: SelfId: [4:7519896178112016358:2567], TxId: 281474976715689, task: 2. Ctx: { SessionId : ydb://session/3?node_id=4&id=MzEwNWIyMzctNGQzOGFkOTItODdlNTI0NWUtOWJlOGFmNWY=. CustomerSuppliedId : . TraceId : 01jykrc0sy6tzgkfxbne3psgk2. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. Continue execution, either output buffers are not empty or not all channels are ready, hasDataToSend: 1, channelsReady: 1 2025-06-25T14:37:47.592015Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [4:7519896178112016357:2566], TxId: 281474976715689, task: 1. Ctx: { TraceId : 01jykrc0sy6tzgkfxbne3psgk2. SessionId : ydb://session/3?node_id=4&id=MzEwNWIyMzctNGQzOGFkOTItODdlNTI0NWUtOWJlOGFmNWY=. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. CA StateFunc 271646927 2025-06-25T14:37:47.592032Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [4:7519896178112016357:2566], TxId: 281474976715689, task: 1. Ctx: { TraceId : 01jykrc0sy6tzgkfxbne3psgk2. SessionId : ydb://session/3?node_id=4&id=MzEwNWIyMzctNGQzOGFkOTItODdlNTI0NWUtOWJlOGFmNWY=. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. CA StateFunc 271646922 2025-06-25T14:37:47.592049Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:675: TxId: 281474976715689, task: 1. Tasks execution finished 2025-06-25T14:37:47.592061Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:510: SelfId: [4:7519896178112016357:2566], TxId: 281474976715689, task: 1. Ctx: { TraceId : 01jykrc0sy6tzgkfxbne3psgk2. SessionId : ydb://session/3?node_id=4&id=MzEwNWIyMzctNGQzOGFkOTItODdlNTI0NWUtOWJlOGFmNWY=. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. Compute state finished. All channels and sinks finished 2025-06-25T14:37:47.592153Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:494: TxId: 281474976715689, task: 1. pass away 2025-06-25T14:37:47.592242Z node 4 :KQP_COMPUTE DEBUG: log.cpp:784: fline=kqp_compute_actor_factory.cpp:67;problem=finish_compute_actor;tx_id=281474976715689;task_id=1;success=1;message={
: Error: COMPUTE_STATE_FINISHED }; 2025-06-25T14:37:47.592547Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [4:7519896178112016358:2567], TxId: 281474976715689, task: 2. Ctx: { SessionId : ydb://session/3?node_id=4&id=MzEwNWIyMzctNGQzOGFkOTItODdlNTI0NWUtOWJlOGFmNWY=. CustomerSuppliedId : . TraceId : 01jykrc0sy6tzgkfxbne3psgk2. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. CA StateFunc 271646922 2025-06-25T14:37:47.592582Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715689, task: 2. Tasks execution finished, don't wait for ack delivery in input channelId: 1, seqNo: [1] 2025-06-25T14:37:47.592590Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:675: TxId: 281474976715689, task: 2. Tasks execution finished 2025-06-25T14:37:47.592598Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:510: SelfId: [4:7519896178112016358:2567], TxId: 281474976715689, task: 2. Ctx: { SessionId : ydb://session/3?node_id=4&id=MzEwNWIyMzctNGQzOGFkOTItODdlNTI0NWUtOWJlOGFmNWY=. CustomerSuppliedId : . TraceId : 01jykrc0sy6tzgkfxbne3psgk2. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. Compute state finished. All channels and sinks finished 2025-06-25T14:37:47.592640Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:494: TxId: 281474976715689, task: 2. pass away 2025-06-25T14:37:47.592692Z node 4 :KQP_COMPUTE DEBUG: log.cpp:784: fline=kqp_compute_actor_factory.cpp:67;problem=finish_compute_actor;tx_id=281474976715689;task_id=2;success=1;message={
: Error: COMPUTE_STATE_FINISHED }; 2025-06-25T14:37:47.764548Z node 4 :FQ_PENDING_FETCHER ERROR: pending_fetcher.cpp:259: Error with GetTask:
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv6:%5B::%5D:25315: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint [::]:25315 2025-06-25T14:37:48.697196Z node 4 :FQ_PENDING_FETCHER ERROR: pending_fetcher.cpp:259: Error with GetTask:
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv6:%5B::%5D:25315: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint [::]:25315 >> KqpImmediateEffects::UpsertExistingKey [GOOD] >> KqpImmediateEffects::WriteThenReadWithCommit >> KqpEffects::DeletePkPrefixWithIndex [GOOD] >> KqpEffects::AlterDuringUpsertTransaction+UseSink >> KqpImmediateEffects::Delete [GOOD] >> KqpImmediateEffects::ConflictingKeyW1WRR2 >> KqpImmediateEffects::InsertDuplicates+UseSink [GOOD] >> KqpImmediateEffects::InsertConflictTxAborted >> KqpEffects::InsertRevert_Literal_Success [GOOD] >> KqpEffects::InsertRevert_Literal_Duplicates >> Viewer::JsonStorageListingV2GroupIdFilter [GOOD] >> Viewer::JsonStorageListingV2NodeIdFilter >> KqpImmediateEffects::Insert [GOOD] >> KqpImmediateEffects::ImmediateUpdateSelect ------- [TM] {asan, default-linux-x86_64, release} ydb/core/viewer/ut/unittest >> Viewer::SimpleFeatureFlags [GOOD] Test command err: BASE_PERF = 3.934276612 2025-06-25T14:36:09.286338Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:121:2167], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:36:09.286674Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:36:09.286895Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-25T14:36:09.629196Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 18978, node 1 TClient is connected to server localhost:32558 2025-06-25T14:36:22.255760Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:287:2330], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:36:22.256072Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:36:22.256181Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-25T14:36:22.621173Z node 2 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 2 Type# 268639257 TServer::EnableGrpc on GrpcPort 1397, node 2 TClient is connected to server localhost:26951 2025-06-25T14:36:34.366195Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [3:299:2341], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:36:34.366565Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:36:34.366727Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-25T14:36:34.816997Z node 3 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 3 Type# 268639257 TServer::EnableGrpc on GrpcPort 10336, node 3 TClient is connected to server localhost:10151 2025-06-25T14:36:47.135967Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [4:117:2163], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:36:47.136574Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:36:47.136708Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-25T14:36:47.510879Z node 4 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 4 Type# 268639257 TServer::EnableGrpc on GrpcPort 6159, node 4 TClient is connected to server localhost:1703 2025-06-25T14:36:59.779874Z node 5 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [5:284:2327], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:36:59.780420Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:36:59.780702Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-25T14:37:00.180955Z node 5 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 5 Type# 268639257 TServer::EnableGrpc on GrpcPort 12503, node 5 TClient is connected to server localhost:26524 2025-06-25T14:37:14.005585Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [6:299:2341], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:37:14.005982Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:37:14.006212Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-25T14:37:14.521142Z node 6 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 6 Type# 268639257 TServer::EnableGrpc on GrpcPort 10085, node 6 TClient is connected to server localhost:27493 2025-06-25T14:37:30.440091Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [7:299:2341], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:37:30.440690Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:37:30.441122Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-25T14:37:30.971257Z node 7 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 7 Type# 268639257 TServer::EnableGrpc on GrpcPort 7898, node 7 TClient is connected to server localhost:14224 2025-06-25T14:37:36.009503Z node 8 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[8:7519896132115710052:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:37:36.009655Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-25T14:37:36.675850Z node 8 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:37:36.692506Z node 8 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [8:7519896127820742737:2080] 1750862255985339 != 1750862255985342 2025-06-25T14:37:36.747212Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:37:36.757499Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:37:36.792257Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 8015, node 8 2025-06-25T14:37:37.004580Z node 8 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:37:37.004613Z node 8 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:37:37.004634Z node 8 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:37:37.004838Z node 8 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:37:37.060300Z node 8 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:5354 2025-06-25T14:37:41.009509Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[8:7519896132115710052:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:37:41.009610Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> KqpPg::CreateNotNullPgColumn [GOOD] >> KqpPg::CreateSequence >> KqpImmediateEffects::ConflictingKeyR1WR2 [GOOD] >> KqpImmediateEffects::ConflictingKeyR1RWR2 >> KqpImmediateEffects::ConflictingKeyW1RR2 [GOOD] >> KqpImmediateEffects::ConflictingKeyRW1WRR2 >> KqpImmediateEffects::ConflictingKeyR1WRR2 [GOOD] >> KqpImmediateEffects::ConflictingKeyRW1RR2 >> KqpInplaceUpdate::Negative_BatchUpdate+UseSink [GOOD] >> KqpInplaceUpdate::BigRow >> KqpPg::InsertValuesFromTableWithDefaultNegativeCase+useSink [GOOD] >> KqpPg::InsertValuesFromTableWithDefaultNegativeCase-useSink >> KqpPg::InsertValuesFromTableWithDefaultBool-useSink [GOOD] >> KqpPg::InsertNoTargetColumns_SerialNotNull+useSink >> KqpPg::CreateUniqComplexPgColumn-useSink [GOOD] >> KqpPg::CreateTempTable >> TKeyValueTest::TestRewriteThenLastValueNewApi [GOOD] >> TKeyValueTest::TestSetExecutorFastLogPolicy >> KqpPg::DropTableIfExists_GenericQuery [GOOD] >> KqpPg::EquiJoin+useSink |81.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_column_stats/ydb-core-tx-datashard-ut_column_stats |81.5%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_column_stats/ydb-core-tx-datashard-ut_column_stats |81.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_column_stats/ydb-core-tx-datashard-ut_column_stats >> KqpInplaceUpdate::SingleRowStr-UseSink [GOOD] >> KqpWrite::CastValues >> KqpEffects::InsertAbort_Literal_Duplicates+UseSink [GOOD] >> KqpEffects::InsertAbort_Literal_Duplicates-UseSink >> KqpEffects::UpdateOn_Literal [GOOD] >> KqpEffects::UpdateOn_Params >> Yq_1::ModifyQuery [GOOD] >> KqpPg::CreateSequence [GOOD] >> KqpPg::AlterSequence >> KqpEffects::AlterDuringUpsertTransaction+UseSink [GOOD] >> KqpEffects::AlterDuringUpsertTransaction-UseSink |81.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/time_cast/ut/ydb-core-tx-time_cast-ut |81.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/time_cast/ut/ydb-core-tx-time_cast-ut |81.6%| [LD] {RESULT} $(B)/ydb/core/tx/time_cast/ut/ydb-core-tx-time_cast-ut |81.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/memory_controller/ut/ydb-core-memory_controller-ut |81.6%| [LD] {RESULT} $(B)/ydb/core/memory_controller/ut/ydb-core-memory_controller-ut |81.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/memory_controller/ut/ydb-core-memory_controller-ut >> KqpEffects::InsertAbort_Literal_Conflict+UseSink [GOOD] >> KqpEffects::EmptyUpdate+UseSink >> KqpImmediateEffects::WriteThenReadWithCommit [GOOD] >> KqpImmediateEffects::ImmediateUpdateSelect [GOOD] >> KqpWorkloadService::TestCpuLoadThreshold [GOOD] >> KqpWorkloadService::TestCpuLoadThresholdRefresh >> KqpEffects::InsertRevert_Literal_Duplicates [GOOD] >> KqpImmediateEffects::ConflictingKeyW1WRR2 [GOOD] >> KqpImmediateEffects::InsertConflictTxAborted [GOOD] >> TQuorumTrackerTests::Erasure4Plus2BlockNotIncludingMyFailDomain_8_2 [GOOD] >> TQuorumTrackerTests::ErasureMirror3IncludingMyFailDomain_4_2 [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/services/fq/ut_integration/unittest >> Yq_1::ModifyQuery [GOOD] Test command err: 2025-06-25T14:37:09.581795Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519896016681453799:2174];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:37:09.581886Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; E0625 14:37:09.950306764 237132 dns_resolver_ares.cc:452] no server name supplied in dns URI E0625 14:37:09.950478471 237132 channel.cc:120] channel stack builder failed: UNKNOWN: the target uri is not valid: dns:/// 2025-06-25T14:37:10.589564Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:37:10.595370Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:37:11.021718Z node 1 :YQL_NODES_MANAGER ERROR: nodes_manager.cpp:364: ydb/core/fq/libs/actors/nodes_manager.cpp:322: TRANSPORT_UNAVAILABLE
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:4899: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint localhost:4899 2025-06-25T14:37:11.028378Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:4899: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:4899 } ] 2025-06-25T14:37:11.601907Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:37:12.604767Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:37:12.832563Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:4899: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:4899 } ] 2025-06-25T14:37:13.615409Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:37:13.774052Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:37:13.800330Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7519896033861323224:2273], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:37:13.899619Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7519896033861323224:2273], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001fd5/r3tmp/tmpqQwygo/pdisk_1.dat 2025-06-25T14:37:14.042049Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7519896033861323224:2273], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:37:14.192063Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 4899, node 1 TClient is connected to server localhost:7096 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-25T14:37:14.580586Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519896016681453799:2174];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:37:14.580663Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:37:14.654580Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... E0625 14:37:14.959411641 237354 dns_resolver_ares.cc:452] no server name supplied in dns URI E0625 14:37:14.959567453 237354 channel.cc:120] channel stack builder failed: UNKNOWN: the target uri is not valid: dns:/// 2025-06-25T14:37:15.019407Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:37:15.019816Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:37:15.019834Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:37:15.019842Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:37:15.019989Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:37:15.300616Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/jobs". Create session OK 2025-06-25T14:37:15.300644Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/jobs" 2025-06-25T14:37:15.300654Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/jobs" 2025-06-25T14:37:15.306069Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/pending_small". Create session OK 2025-06-25T14:37:15.306098Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/pending_small" 2025-06-25T14:37:15.306104Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/pending_small" 2025-06-25T14:37:15.312236Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/mappings". Create session OK 2025-06-25T14:37:15.312243Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/tenant_acks". Create session OK 2025-06-25T14:37:15.312258Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/mappings" 2025-06-25T14:37:15.312259Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/tenant_acks" 2025-06-25T14:37:15.312275Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/mappings" 2025-06-25T14:37:15.312283Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/tenant_acks" 2025-06-25T14:37:15.315271Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/connections". Create session OK 2025-06-25T14:37:15.315320Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/connections" 2025-06-25T14:37:15.315330Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/connections" 2025-06-25T14:37:15.320425Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/compute_databases". Create session OK 2025-06-25T14:37:15.320445Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/compute_databases" 2025-06-25T14:37:15.320452Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/compute_databases" 2025-06-25T14:37:15.323093Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/queries". Create session OK 2025-06-25T14:37:15.323115Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/queries" 2025-06-25T14:37:15.323121Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/queries" 2025-06-25T14:37:15.329951Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-25T14:37:15.344639Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/bindings". Create session OK 2025-06-25T14:37:15.344667Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/bindings" 2025-06-25T14:37:15.344673Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/bindings" 2025-06-25T14:37:15.358328Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created directory "Root/yq" 2025-06-25T14:37:15.358363Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create directory "Root/yq": 2025-06-25T14:37:15.360829Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/nodes". Create session OK 2025-06-25T14:37:15.360845Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/nodes" 2025-06-25T14:37:15.360851Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/nodes" 2025-06-25T14:37:15.377697Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/quotas". Create session OK 2025-06-25T14:37:15.377727Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/quotas" 2025-06-25T14:37:15.377734Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/quotas" 2025-06-25T14:37:15.379506Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/result_sets". Create session OK 2025-06-25T14:37:15.379520Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/result_sets" 2025-06-25T14:37:15.379525Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/result_sets" 2025-06-25T14:37:15.385601Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/idempotency_keys". Create sess ... } } InMemory: true DstStageId: 1 } 2025-06-25T14:37:47.141136Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1331: TxId: 281474976715776, task: 1, CA Id [4:7519896179957993534:2901]. enter getasyncinputdata results size 0, freeSpace 8388608 2025-06-25T14:37:47.141147Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1427: TxId: 281474976715776, task: 1, CA Id [4:7519896179957993534:2901]. returned async data processed rows 0 left freeSpace 8388608 received rows 0 running reads 0 pending shards 0 finished = 0 has limit 0 limit reached 0 2025-06-25T14:37:47.141165Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [4:7519896179957993534:2901], TxId: 281474976715776, task: 1. Ctx: { TraceId : 01jykrc0qzewppe598v5t7jf7q. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=4&id=MjJlMzM4MGEtM2ZjOWY5ZTctY2U4MzRjZDAtYTU3NWUxYWE=. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. CA StateFunc 271646922 2025-06-25T14:37:47.141177Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1331: TxId: 281474976715776, task: 1, CA Id [4:7519896179957993534:2901]. enter getasyncinputdata results size 0, freeSpace 8388608 2025-06-25T14:37:47.141191Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1427: TxId: 281474976715776, task: 1, CA Id [4:7519896179957993534:2901]. returned async data processed rows 0 left freeSpace 8388608 received rows 0 running reads 0 pending shards 0 finished = 0 has limit 0 limit reached 0 2025-06-25T14:37:47.141237Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:134: SelfId: [4:7519896179957993535:2902], TxId: 281474976715776, task: 4. Ctx: { SessionId : ydb://session/3?node_id=4&id=MjJlMzM4MGEtM2ZjOWY5ZTctY2U4MzRjZDAtYTU3NWUxYWE=. TraceId : 01jykrc0qzewppe598v5t7jf7q. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. Start compute actor [4:7519896179957993535:2902], task: 4 2025-06-25T14:37:47.141252Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:141: SelfId: [4:7519896179957993535:2902], TxId: 281474976715776, task: 4. Ctx: { SessionId : ydb://session/3?node_id=4&id=MjJlMzM4MGEtM2ZjOWY5ZTctY2U4MzRjZDAtYTU3NWUxYWE=. TraceId : 01jykrc0qzewppe598v5t7jf7q. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. Set execution timeout 299.996365s 2025-06-25T14:37:47.141492Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:1365: SelfId: [4:7519896179957993535:2902], TxId: 281474976715776, task: 4. Ctx: { SessionId : ydb://session/3?node_id=4&id=MjJlMzM4MGEtM2ZjOWY5ZTctY2U4MzRjZDAtYTU3NWUxYWE=. TraceId : 01jykrc0qzewppe598v5t7jf7q. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. Create source for input 0 { Source { Type: "KqpReadRangesSource" Settings { type_url: "type.googleapis.com/NKikimrTxDataShard.TKqpReadRangesSourceSettings" value: "\n%\n\014\010\200\202\224\204\200\200\200\200\001\020\017\022\017Root/yq/queries\030\001*\0000\001\032?\022=\002\000\037\000\000\000yandexcloud://Execute_folder_id\024\000\000\000utque7j6ed0gaecu7u75\"\027\010\022\022\rmeta_revision\030\003(\0000\000\"\023\010\002\022\010query_id\030\201 (\0000\001\"\020\010\001\022\005scope\030\201 (\0000\001(\0000\000@\201 @\201 H\001R\022\010\305\205\230\274\3722\020\377\377\377\377\377\377\377\377\377\001`\000h\376\247\200\200\200\200@p\004z\000z\000\240\001\000\270\001\000" } WatermarksMode: WATERMARKS_MODE_DISABLED } } 2025-06-25T14:37:47.141565Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [4:7519896179957993535:2902], TxId: 281474976715776, task: 4. Ctx: { SessionId : ydb://session/3?node_id=4&id=MjJlMzM4MGEtM2ZjOWY5ZTctY2U4MzRjZDAtYTU3NWUxYWE=. TraceId : 01jykrc0qzewppe598v5t7jf7q. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. CA StateFunc 271646926 2025-06-25T14:37:47.141625Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:1074: SelfId: [4:7519896179957993535:2902], TxId: 281474976715776, task: 4. Ctx: { SessionId : ydb://session/3?node_id=4&id=MjJlMzM4MGEtM2ZjOWY5ZTctY2U4MzRjZDAtYTU3NWUxYWE=. TraceId : 01jykrc0qzewppe598v5t7jf7q. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. Received channels info: Update { Id: 2 TransportVersion: DATA_TRANSPORT_OOB_PICKLE_1_0 SrcTaskId: 4 DstTaskId: 5 SrcEndpoint { ActorId { RawX1: 7519896179957993535 RawX2: 4503616807242582 } } DstEndpoint { } InMemory: true SrcStageId: 3 DstStageId: 4 } 2025-06-25T14:37:47.141667Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:444: TxId: 281474976715776, task: 4, CA Id [4:7519896179957993535:2902]. Shards State: TShardState{ TabletId: 0, Last Key , Ranges: [], Points: [# 0: (String : yandexcloud://Execute_folder_id, String : utque7j6ed0gaecu7u75)], RetryAttempt: 0, ResolveAttempt: 0 } 2025-06-25T14:37:47.141726Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:511: TxId: 281474976715776, task: 4, CA Id [4:7519896179957993535:2902]. Sending TEvResolveKeySet update for table 'Root/yq/queries', range: [(String : yandexcloud://Execute_folder_id, String : utque7j6ed0gaecu7u75) ; (String : yandexcloud://Execute_folder_id, String : utque7j6ed0gaecu7u75)], attempt #1 2025-06-25T14:37:47.141764Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [4:7519896179957993535:2902], TxId: 281474976715776, task: 4. Ctx: { SessionId : ydb://session/3?node_id=4&id=MjJlMzM4MGEtM2ZjOWY5ZTctY2U4MzRjZDAtYTU3NWUxYWE=. TraceId : 01jykrc0qzewppe598v5t7jf7q. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. CA StateFunc 271646926 2025-06-25T14:37:47.141823Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:1074: SelfId: [4:7519896179957993535:2902], TxId: 281474976715776, task: 4. Ctx: { SessionId : ydb://session/3?node_id=4&id=MjJlMzM4MGEtM2ZjOWY5ZTctY2U4MzRjZDAtYTU3NWUxYWE=. TraceId : 01jykrc0qzewppe598v5t7jf7q. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. Received channels info: Update { Id: 2 TransportVersion: DATA_TRANSPORT_OOB_PICKLE_1_0 SrcTaskId: 4 DstTaskId: 5 SrcEndpoint { ActorId { RawX1: 7519896179957993535 RawX2: 4503616807242582 } } DstEndpoint { ActorId { RawX1: 7519896179957993538 RawX2: 4503616807242585 } } InMemory: true SrcStageId: 3 DstStageId: 4 } 2025-06-25T14:37:47.141853Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:1093: SelfId: [4:7519896179957993535:2902], TxId: 281474976715776, task: 4. Ctx: { SessionId : ydb://session/3?node_id=4&id=MjJlMzM4MGEtM2ZjOWY5ZTctY2U4MzRjZDAtYTU3NWUxYWE=. TraceId : 01jykrc0qzewppe598v5t7jf7q. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. Update output channelId: 2, peer: [4:7519896179957993538:2905] 2025-06-25T14:37:47.141867Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1331: TxId: 281474976715776, task: 4, CA Id [4:7519896179957993535:2902]. enter getasyncinputdata results size 0, freeSpace 8388608 2025-06-25T14:37:47.141880Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1427: TxId: 281474976715776, task: 4, CA Id [4:7519896179957993535:2902]. returned async data processed rows 0 left freeSpace 8388608 received rows 0 running reads 0 pending shards 0 finished = 0 has limit 0 limit reached 0 2025-06-25T14:37:47.141904Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [4:7519896179957993535:2902], TxId: 281474976715776, task: 4. Ctx: { SessionId : ydb://session/3?node_id=4&id=MjJlMzM4MGEtM2ZjOWY5ZTctY2U4MzRjZDAtYTU3NWUxYWE=. TraceId : 01jykrc0qzewppe598v5t7jf7q. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. CA StateFunc 271646926 2025-06-25T14:37:47.141963Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:1074: SelfId: [4:7519896179957993535:2902], TxId: 281474976715776, task: 4. Ctx: { SessionId : ydb://session/3?node_id=4&id=MjJlMzM4MGEtM2ZjOWY5ZTctY2U4MzRjZDAtYTU3NWUxYWE=. TraceId : 01jykrc0qzewppe598v5t7jf7q. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. Received channels info: Update { Id: 2 TransportVersion: DATA_TRANSPORT_OOB_PICKLE_1_0 SrcTaskId: 4 DstTaskId: 5 SrcEndpoint { ActorId { RawX1: 7519896179957993535 RawX2: 4503616807242582 } } DstEndpoint { ActorId { RawX1: 7519896179957993538 RawX2: 4503616807242585 } } InMemory: true SrcStageId: 3 DstStageId: 4 } 2025-06-25T14:37:47.141973Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1331: TxId: 281474976715776, task: 4, CA Id [4:7519896179957993535:2902]. enter getasyncinputdata results size 0, freeSpace 8388608 2025-06-25T14:37:47.141984Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1427: TxId: 281474976715776, task: 4, CA Id [4:7519896179957993535:2902]. returned async data processed rows 0 left freeSpace 8388608 received rows 0 running reads 0 pending shards 0 finished = 0 has limit 0 limit reached 0 2025-06-25T14:37:47.142002Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [4:7519896179957993535:2902], TxId: 281474976715776, task: 4. Ctx: { SessionId : ydb://session/3?node_id=4&id=MjJlMzM4MGEtM2ZjOWY5ZTctY2U4MzRjZDAtYTU3NWUxYWE=. TraceId : 01jykrc0qzewppe598v5t7jf7q. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. CA StateFunc 271646922 2025-06-25T14:37:47.142011Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1331: TxId: 281474976715776, task: 4, CA Id [4:7519896179957993535:2902]. enter getasyncinputdata results size 0, freeSpace 8388608 2025-06-25T14:37:47.142023Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1427: TxId: 281474976715776, task: 4, CA Id [4:7519896179957993535:2902]. returned async data processed rows 0 left freeSpace 8388608 received rows 0 running reads 0 pending shards 0 finished = 0 has limit 0 limit reached 0 2025-06-25T14:37:47.142092Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:134: SelfId: [4:7519896179957993536:2903], TxId: 281474976715776, task: 2. Ctx: { SessionId : ydb://session/3?node_id=4&id=MjJlMzM4MGEtM2ZjOWY5ZTctY2U4MzRjZDAtYTU3NWUxYWE=. TraceId : 01jykrc0qzewppe598v5t7jf7q. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. Start compute actor [4:7519896179957993536:2903], task: 2 2025-06-25T14:37:47.142117Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:141: SelfId: [4:7519896179957993536:2903], TxId: 281474976715776, task: 2. Ctx: { SessionId : ydb://session/3?node_id=4&id=MjJlMzM4MGEtM2ZjOWY5ZTctY2U4MzRjZDAtYTU3NWUxYWE=. TraceId : 01jykrc0qzewppe598v5t7jf7q. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. Set execution timeout 299.996204s 2025-06-25T14:37:47.142318Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:1452: SelfId: [4:7519896179957993536:2903], TxId: 281474976715776, task: 2. Ctx: { SessionId : ydb://session/3?node_id=4&id=MjJlMzM4MGEtM2ZjOWY5ZTctY2U4MzRjZDAtYTU3NWUxYWE=. TraceId : 01jykrc0qzewppe598v5t7jf7q. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. Create sink for output 0 { Sink { Type: "KqpTableSink" Settings { type_url: "type.googleapis.com/NKikimrKqp.TKqpTableSinkSettings" value: "\032%\n\025Root/yq/pending_small\020\200\202\224\204\200\200\200\200\001\030\014(\001\"\r\n\006tenant\020\n \201 \"\014\n\005scope\020\001 \201 \"\017\n\010query_id\020\002 \201 *\024\n\016assigned_until\020\013 2*\022\n\014last_seen_at\020\003 2*\014\n\005owner\020\t \201 *\017\n\010query_id\020\002 \201 *\023\n\rretry_counter\020\004 \004*\036\n\030retry_counter_updated_at\020\005 2*\020\n\nretry_rate\020\014 *\014\n\005scope\020\001 \201 *\r\n\006tenant\020\n \201 0\376\247\200\200\200\200@8\004@\000H\001R\022\t9\2206\312\273\t\\h\021\351\t\000\000\004\000\020\000X\000`\000h\007h\003h\006h\002h\004h\005h\010h\001h\000x\000" } } } >> KqpPg::InsertValuesFromTableWithDefaultNegativeCase-useSink [GOOD] >> KqpInplaceUpdate::BigRow [GOOD] >> KqpImmediateEffects::ConflictingKeyR1RWR2 [GOOD] >> KqpImmediateEffects::ConflictingKeyRW1WRR2 [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/effects/unittest >> KqpImmediateEffects::ImmediateUpdateSelect [GOOD] Test command err: Trying to start YDB, gRPC: 7942, MsgBus: 28795 2025-06-25T14:37:46.139959Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519896175264134791:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:37:46.144332Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001878/r3tmp/tmpIqZKpj/pdisk_1.dat 2025-06-25T14:37:46.710057Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:37:46.710698Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519896175264134762:2080] 1750862266135011 != 1750862266135014 2025-06-25T14:37:46.721944Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:37:46.722031Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:37:46.729573Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 7942, node 1 2025-06-25T14:37:46.916434Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:37:46.916451Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:37:46.916458Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:37:46.916707Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28795 2025-06-25T14:37:47.152780Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:28795 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:37:47.523443Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:37:47.539299Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:37:47.548052Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:37:47.685883Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:37:47.860071Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:37:47.955106Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:37:49.444548Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896188149038299:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:49.444635Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:49.702545Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:49.736357Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:49.766521Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:49.792879Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:49.831403Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:49.873832Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:49.918692Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:50.008571Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896192444006256:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:50.008627Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:50.008909Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896192444006261:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:50.013105Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:37:50.022444Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519896192444006263:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:37:50.117159Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519896192444006314:3424] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:37:51.079355Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:51.140391Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519896175264134791:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:37:51.142768Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 28862, MsgBus: 17516 2025-06-25T14:37:52.596236Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519896200507174303:2192];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:37:52.671382Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001878/r3tmp/tmp5hw6F0/pdisk_1.dat 2025-06-25T14:37:52.761967Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:37:52.781904Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:37:52.781979Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:37:52.785436Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 28862, node 2 2025-06-25T14:37:53.012898Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:37:53.012919Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:37:53.012926Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:37:53.013029Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17516 TClient is connected to server localhost:17516 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:37:53.529628Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:37:53.538698Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:37:53.547385Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:37:53.575525Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:37:53.636938Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:37:53.847983Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:37:53.932296Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:37:55.919121Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896213392077680:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:55.919201Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:55.967346Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:55.998911Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:56.038986Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:56.103065Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:56.135246Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:56.190563Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:56.245202Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:56.346989Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896217687045640:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:56.347072Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:56.347418Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896217687045645:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:56.350827Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:37:56.361006Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519896217687045647:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:37:56.432744Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519896217687045698:3422] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:37:57.504487Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:57.640264Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519896200507174303:2192];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:37:57.640381Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; |81.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/syncer/ut/unittest >> TQuorumTrackerTests::ErasureMirror3IncludingMyFailDomain_4_2 [GOOD] >> KqpPg::CreateTempTable [GOOD] >> KqpPg::CreateTempTableSerial |81.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/locks/ut_range_treap/ydb-core-tx-locks-ut_range_treap |81.6%| [LD] {RESULT} $(B)/ydb/core/tx/locks/ut_range_treap/ydb-core-tx-locks-ut_range_treap |81.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/locks/ut_range_treap/ydb-core-tx-locks-ut_range_treap ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/effects/unittest >> KqpImmediateEffects::WriteThenReadWithCommit [GOOD] Test command err: Trying to start YDB, gRPC: 27860, MsgBus: 29930 2025-06-25T14:37:44.657551Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519896164263891048:2139];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:37:44.658080Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001895/r3tmp/tmpkDj3zB/pdisk_1.dat 2025-06-25T14:37:45.171313Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:37:45.171450Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:37:45.180550Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:37:45.185993Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 27860, node 1 2025-06-25T14:37:45.380859Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:37:45.380883Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:37:45.380895Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:37:45.380993Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:37:45.658374Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:29930 TClient is connected to server localhost:29930 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:37:46.076794Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:37:46.106838Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:37:46.327579Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:37:46.540271Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:37:46.673046Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:48.250603Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896181443761751:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:48.250730Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:48.662277Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:48.699185Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:48.731749Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:48.756118Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:48.792448Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:48.839633Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:48.915718Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:48.994081Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896181443762415:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:48.994181Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:48.994311Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896181443762420:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:48.998207Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:37:49.008597Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519896181443762422:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:37:49.071011Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519896185738729769:3423] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:37:49.661143Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519896164263891048:2139];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:37:49.661195Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:37:50.520277Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) Trying to start YDB, gRPC: 12506, MsgBus: 30929 2025-06-25T14:37:52.119324Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519896202080922085:2146];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.y ... tabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZTRkOWFmM2ItNGVlYmYxNmQtNDg4ZmM4MS0zZDI1NjRhMA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Executing physical tx, type: 2, stages: 1 2025-06-25T14:37:58.202726Z node 2 :KQP_EXECUTER DEBUG: kqp_tasks_graph.cpp:25: StageInfo: StageId #[0,0], InputsCount: 1, OutputsCount: 1 2025-06-25T14:37:58.203070Z node 2 :KQP_EXECUTER DEBUG: kqp_table_resolver.cpp:271: TxId: 281474976715676. Resolved key sets: 1 2025-06-25T14:37:58.203186Z node 2 :KQP_EXECUTER DEBUG: kqp_table_resolver.cpp:295: TxId: 281474976715676. Resolved key: { TableId: [OwnerId: 72057594046644480, LocalPathId: 17] Access: 1 SyncVersion: false Status: OkData Kind: KindRegularTable PartitionsCount: 3 DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } From: (Uint64 : NULL) IncFrom: 1 To: () IncTo: 0 } 2025-06-25T14:37:58.203255Z node 2 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:2035: ActorId: [2:7519896227850728614:2474] TxId: 281474976715676. Ctx: { TraceId: 01jykrcbe59mjgwmarjhfxr6bf, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZTRkOWFmM2ItNGVlYmYxNmQtNDg4ZmM4MS0zZDI1NjRhMA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Stage [0,0] AST: ( (return (lambda '($1) $1)) ) 2025-06-25T14:37:58.203454Z node 2 :KQP_EXECUTER DEBUG: kqp_tasks_graph.cpp:241: Create result channelId: 1 from task: 1 with index: 0 2025-06-25T14:37:58.203692Z node 2 :KQP_EXECUTER DEBUG: kqp_shards_resolver.cpp:76: [ShardsResolver] TxId: 281474976715676. Shard resolve complete, resolved shards: 1 2025-06-25T14:37:58.203727Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:273: ActorId: [2:7519896227850728614:2474] TxId: 281474976715676. Ctx: { TraceId: 01jykrcbe59mjgwmarjhfxr6bf, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZTRkOWFmM2ItNGVlYmYxNmQtNDg4ZmM4MS0zZDI1NjRhMA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Shards nodes resolved, success: 1, failed: 0 2025-06-25T14:37:58.203758Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:296: ActorId: [2:7519896227850728614:2474] TxId: 281474976715676. Ctx: { TraceId: 01jykrcbe59mjgwmarjhfxr6bf, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZTRkOWFmM2ItNGVlYmYxNmQtNDg4ZmM4MS0zZDI1NjRhMA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Shards on nodes: node 2: [72075186224037922] 2025-06-25T14:37:58.203794Z node 2 :KQP_EXECUTER DEBUG: kqp_planner.cpp:562: TxId: 281474976715676. Ctx: { TraceId: 01jykrcbe59mjgwmarjhfxr6bf, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZTRkOWFmM2ItNGVlYmYxNmQtNDg4ZmM4MS0zZDI1NjRhMA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Total tasks: 1, readonly: true, 1 scan tasks on 1 nodes, localComputeTasks: 0, snapshot: {18446744073709551615, 1750862278000} 2025-06-25T14:37:58.203998Z node 2 :KQP_EXECUTER DEBUG: kqp_planner.cpp:802: TxId: 281474976715676. Ctx: { TraceId: 01jykrcbe59mjgwmarjhfxr6bf, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZTRkOWFmM2ItNGVlYmYxNmQtNDg4ZmM4MS0zZDI1NjRhMA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Collect channels updates for task: 1 at actor [2:7519896227850728618:2474] 2025-06-25T14:37:58.204026Z node 2 :KQP_EXECUTER DEBUG: kqp_planner.cpp:794: TxId: 281474976715676. Ctx: { TraceId: 01jykrcbe59mjgwmarjhfxr6bf, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZTRkOWFmM2ItNGVlYmYxNmQtNDg4ZmM4MS0zZDI1NjRhMA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Sending channels info to compute actor: [2:7519896227850728618:2474], channels: 1 2025-06-25T14:37:58.204051Z node 2 :KQP_EXECUTER INFO: kqp_data_executer.cpp:2806: ActorId: [2:7519896227850728614:2474] TxId: 281474976715676. Ctx: { TraceId: 01jykrcbe59mjgwmarjhfxr6bf, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZTRkOWFmM2ItNGVlYmYxNmQtNDg4ZmM4MS0zZDI1NjRhMA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Total tasks: 1, readonly: 1, datashardTxs: 0, evWriteTxs: 0, topicTxs: 0, volatile: 0, immediate: 1, pending compute tasks0, useFollowers: 0 2025-06-25T14:37:58.204073Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:668: ActorId: [2:7519896227850728614:2474] TxId: 281474976715676. Ctx: { TraceId: 01jykrcbe59mjgwmarjhfxr6bf, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZTRkOWFmM2ItNGVlYmYxNmQtNDg4ZmM4MS0zZDI1NjRhMA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Waiting for: CA [2:7519896227850728618:2474], 2025-06-25T14:37:58.204095Z node 2 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:156: ActorId: [2:7519896227850728614:2474] TxId: 281474976715676. Ctx: { TraceId: 01jykrcbe59mjgwmarjhfxr6bf, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZTRkOWFmM2ItNGVlYmYxNmQtNDg4ZmM4MS0zZDI1NjRhMA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: WaitResolveState, waiting for 1 compute actor(s) and 0 datashard(s): CA [2:7519896227850728618:2474], 2025-06-25T14:37:58.204112Z node 2 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:2368: ActorId: [2:7519896227850728614:2474] TxId: 281474976715676. Ctx: { TraceId: 01jykrcbe59mjgwmarjhfxr6bf, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZTRkOWFmM2ItNGVlYmYxNmQtNDg4ZmM4MS0zZDI1NjRhMA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: WaitResolveState, immediate tx, become ExecuteState 2025-06-25T14:37:58.207132Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:442: ActorId: [2:7519896227850728614:2474] TxId: 281474976715676. Ctx: { TraceId: 01jykrcbe59mjgwmarjhfxr6bf, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZTRkOWFmM2ItNGVlYmYxNmQtNDg4ZmM4MS0zZDI1NjRhMA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [2:7519896227850728618:2474], task: 1, state: COMPUTE_STATE_EXECUTING, stats: { } 2025-06-25T14:37:58.207166Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:668: ActorId: [2:7519896227850728614:2474] TxId: 281474976715676. Ctx: { TraceId: 01jykrcbe59mjgwmarjhfxr6bf, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZTRkOWFmM2ItNGVlYmYxNmQtNDg4ZmM4MS0zZDI1NjRhMA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Waiting for: CA [2:7519896227850728618:2474], 2025-06-25T14:37:58.207193Z node 2 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:156: ActorId: [2:7519896227850728614:2474] TxId: 281474976715676. Ctx: { TraceId: 01jykrcbe59mjgwmarjhfxr6bf, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZTRkOWFmM2ItNGVlYmYxNmQtNDg4ZmM4MS0zZDI1NjRhMA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, waiting for 1 compute actor(s) and 0 datashard(s): CA [2:7519896227850728618:2474], 2025-06-25T14:37:58.218567Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:442: ActorId: [2:7519896227850728614:2474] TxId: 281474976715676. Ctx: { TraceId: 01jykrcbe59mjgwmarjhfxr6bf, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZTRkOWFmM2ItNGVlYmYxNmQtNDg4ZmM4MS0zZDI1NjRhMA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [2:7519896227850728618:2474], task: 1, state: COMPUTE_STATE_FINISHED, stats: { CpuTimeUs: 5870 DurationUs: 2000 Tasks { TaskId: 1 CpuTimeUs: 1930 FinishTimeMs: 1750862278214 OutputRows: 1 OutputBytes: 22 Tables { TablePath: "/Root/TestImmediateEffects" ReadRows: 1 ReadBytes: 22 AffectedPartitions: 1 } IngressRows: 1 ResultRows: 1 ResultBytes: 22 ComputeCpuTimeUs: 1817 BuildCpuTimeUs: 113 HostName: "ghrun-kqfvx6aroe" NodeId: 2 StartTimeMs: 1750862278212 CreateTimeMs: 1750862278204 UpdateTimeMs: 1750862278214 } MaxMemoryUsage: 1048576 } 2025-06-25T14:37:58.218683Z node 2 :KQP_EXECUTER INFO: kqp_planner.cpp:697: TxId: 281474976715676. Ctx: { TraceId: 01jykrcbe59mjgwmarjhfxr6bf, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZTRkOWFmM2ItNGVlYmYxNmQtNDg4ZmM4MS0zZDI1NjRhMA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Compute actor has finished execution: [2:7519896227850728618:2474] 2025-06-25T14:37:58.218855Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:2188: ActorId: [2:7519896227850728614:2474] TxId: 281474976715676. Ctx: { TraceId: 01jykrcbe59mjgwmarjhfxr6bf, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZTRkOWFmM2ItNGVlYmYxNmQtNDg4ZmM4MS0zZDI1NjRhMA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. terminate execution. 2025-06-25T14:37:58.218885Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:862: ActorId: [2:7519896227850728614:2474] TxId: 281474976715676. Ctx: { TraceId: 01jykrcbe59mjgwmarjhfxr6bf, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZTRkOWFmM2ItNGVlYmYxNmQtNDg4ZmM4MS0zZDI1NjRhMA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Resource usage for last stat interval: ComputeTime: 0.005870s ReadRows: 1 ReadBytes: 22 ru: 3 rate limiter was not found force flag: 1 2025-06-25T14:37:58.221170Z node 2 :KQP_EXECUTER DEBUG: kqp_table_resolver.cpp:271: TxId: 281474976715677. Resolved key sets: 0 2025-06-25T14:37:58.221251Z node 2 :KQP_EXECUTER DEBUG: kqp_planner.cpp:562: TxId: 281474976715677. Ctx: { TraceId: 01jykrcbe59mjgwmarjhfxr6bf, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZTRkOWFmM2ItNGVlYmYxNmQtNDg4ZmM4MS0zZDI1NjRhMA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Total tasks: 0, readonly: true, 0 scan tasks on 0 nodes, localComputeTasks: 0, snapshot: {18446744073709551615, 1750862278000} 2025-06-25T14:37:58.221275Z node 2 :KQP_EXECUTER INFO: kqp_data_executer.cpp:2806: ActorId: [2:7519896227850728622:2474] TxId: 281474976715677. Ctx: { TraceId: 01jykrcbe59mjgwmarjhfxr6bf, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZTRkOWFmM2ItNGVlYmYxNmQtNDg4ZmM4MS0zZDI1NjRhMA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Total tasks: 0, readonly: 0, datashardTxs: 0, evWriteTxs: 0, topicTxs: 0, volatile: 0, immediate: 1, pending compute tasks0, useFollowers: 0 2025-06-25T14:37:58.221311Z node 2 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:276: ActorId: [2:7519896227850728622:2474] TxId: 281474976715677. Ctx: { TraceId: 01jykrcbe59mjgwmarjhfxr6bf, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZTRkOWFmM2ItNGVlYmYxNmQtNDg4ZmM4MS0zZDI1NjRhMA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Send Commit to BufferActor=[2:7519896227850728605:2474] 2025-06-25T14:37:58.221367Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:862: ActorId: [2:7519896227850728622:2474] TxId: 281474976715677. Ctx: { TraceId: 01jykrcbe59mjgwmarjhfxr6bf, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZTRkOWFmM2ItNGVlYmYxNmQtNDg4ZmM4MS0zZDI1NjRhMA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Resource usage for last stat interval: ComputeTime: 0.000000s ReadRows: 0 ReadBytes: 0 ru: 1 rate limiter was not found force flag: 1 2025-06-25T14:37:58.223907Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:2188: ActorId: [2:7519896227850728622:2474] TxId: 281474976715677. Ctx: { TraceId: 01jykrcbe59mjgwmarjhfxr6bf, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZTRkOWFmM2ItNGVlYmYxNmQtNDg4ZmM4MS0zZDI1NjRhMA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. terminate execution. ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/effects/unittest >> KqpImmediateEffects::ConflictingKeyW1WRR2 [GOOD] Test command err: Trying to start YDB, gRPC: 4972, MsgBus: 28658 2025-06-25T14:37:44.650347Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519896165512442504:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:37:44.650390Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001882/r3tmp/tmp1uq0Ba/pdisk_1.dat 2025-06-25T14:37:45.169804Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:37:45.169949Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:37:45.174881Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:37:45.187174Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 4972, node 1 2025-06-25T14:37:45.372213Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:37:45.372232Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:37:45.372248Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:37:45.372367Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:37:45.672437Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:28658 TClient is connected to server localhost:28658 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:37:46.138006Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:37:46.155803Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:37:46.178358Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:37:46.351409Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:46.590519Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:37:46.700775Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:37:48.305204Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896182692313302:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:48.305330Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:48.664227Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:48.703321Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:48.737094Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:48.779099Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:48.810755Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:48.879626Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:48.905315Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:48.994283Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896182692313965:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:48.994360Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:48.994730Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896182692313970:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:48.998966Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:37:49.012144Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519896182692313972:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:37:49.091237Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519896186987281319:3421] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:37:49.650821Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519896165512442504:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:37:49.650910Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:37:50.528869Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) Trying to start YDB, gRPC: 25960, MsgBus: 32514 2025-06-25T14:37:52.246217Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: f ... node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:37:52.544856Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:37:52.544961Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:32514 TClient is connected to server localhost:32514 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:37:53.183589Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:37:53.192713Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:37:53.203025Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:37:53.269453Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:37:53.308469Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:37:53.478959Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:37:53.571801Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:37:55.865256Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896211083292341:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:55.865337Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:55.941395Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:56.029038Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:56.084042Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:56.115298Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:56.145873Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:56.181096Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:56.213919Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:56.289337Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896215378260295:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:56.289468Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:56.290184Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896215378260301:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:56.294391Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:37:56.337042Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519896215378260303:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:37:56.414687Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519896215378260354:3419] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:37:57.247628Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519896198198388856:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:37:57.247715Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:37:57.783012Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:58.356186Z node 2 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1546: SelfId: [2:7519896223968195492:2476], TxId: 281474976710677, task: 1. Ctx: { SessionId : ydb://session/3?node_id=2&id=YmY2YzlmY2QtZWQ4NTk2YzAtOWFhZTg3OWEtMzgxZjlmZTQ=. CustomerSuppliedId : . TraceId : 01jykrcbka9bjha81hmsmftesq. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Source[0] fatal error: {
: Error: Read request aborted subissue: {
: Error: Read conflict with concurrent transaction (shard# 72075186224037922 node# 2 state# Ready) } } 2025-06-25T14:37:58.368539Z node 2 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:678: SelfId: [2:7519896223968195492:2476], TxId: 281474976710677, task: 1. Ctx: { SessionId : ydb://session/3?node_id=2&id=YmY2YzlmY2QtZWQ4NTk2YzAtOWFhZTg3OWEtMzgxZjlmZTQ=. CustomerSuppliedId : . TraceId : 01jykrcbka9bjha81hmsmftesq. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. InternalError: ABORTED DEFAULT_ERROR: {
: Error: Read request aborted subissue: {
: Error: Read conflict with concurrent transaction (shard# 72075186224037922 node# 2 state# Ready) } }. 2025-06-25T14:37:58.369297Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=YmY2YzlmY2QtZWQ4NTk2YzAtOWFhZTg3OWEtMzgxZjlmZTQ=, ActorId: [2:7519896219673227923:2476], ActorState: ExecuteState, TraceId: 01jykrcbka9bjha81hmsmftesq, Create QueryResponse for error on request, msg: ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/effects/unittest >> KqpEffects::InsertRevert_Literal_Duplicates [GOOD] Test command err: Trying to start YDB, gRPC: 32639, MsgBus: 24627 2025-06-25T14:37:44.636430Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519896165468313493:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:37:44.647002Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0018a7/r3tmp/tmpHl08Du/pdisk_1.dat 2025-06-25T14:37:45.168545Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:37:45.168653Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:37:45.181919Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:37:45.196156Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 32639, node 1 2025-06-25T14:37:45.374024Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:37:45.374041Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:37:45.374075Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:37:45.374200Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:37:45.656520Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:24627 TClient is connected to server localhost:24627 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:37:46.152678Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:37:46.169721Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:37:46.176711Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:37:46.391709Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:37:46.572922Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:37:46.655597Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:37:48.086559Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896182648184268:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:48.086640Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:48.662758Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:48.753877Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:48.786109Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:48.827087Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:48.854849Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:48.922430Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:48.963672Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:49.027874Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896186943152226:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:49.027949Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:49.028516Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896186943152231:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:49.031943Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:37:49.044440Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519896186943152233:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:37:49.108265Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519896186943152286:3421] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:37:49.627614Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519896165468313493:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:37:49.635000Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 62604, MsgBus: 62490 2025-06-25T14:37:52.339383Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519896199195895868:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:37:52.339431Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0018a7/r3tmp/tmprOUGcW/pdisk_1.dat 2025-06-25T14:37:52.455786Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:37:52.493878Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:37:52.493956Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 62604, node 2 2025-06-25T14:37:52.496519Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:37:52.596595Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:37:52.596613Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:37:52.596620Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:37:52.596739Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:62490 TClient is connected to server localhost:62490 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:37:53.117916Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:37:53.126908Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:37:53.140282Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:37:53.224477Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:37:53.354924Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:37:53.426275Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:37:53.523934Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:37:55.856808Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896212080799367:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:55.856925Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:55.912284Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:55.947554Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:56.019564Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:56.052779Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:56.091128Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:56.127496Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:56.162528Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:56.248718Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896216375767324:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:56.248799Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:56.249017Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896216375767329:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:56.253069Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:37:56.266273Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519896216375767331:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:37:56.367612Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519896216375767382:3417] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:37:57.340438Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519896199195895868:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:37:57.340520Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> KqpImmediateEffects::ConflictingKeyRW1RR2 [GOOD] >> KqpPg::InsertNoTargetColumns_SerialNotNull+useSink [GOOD] >> KqpPg::InsertNoTargetColumns_SerialNotNull-useSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/effects/unittest >> KqpImmediateEffects::InsertConflictTxAborted [GOOD] Test command err: Trying to start YDB, gRPC: 5714, MsgBus: 3367 2025-06-25T14:37:44.638941Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519896165807212759:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:37:44.647240Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00188f/r3tmp/tmp0y4eYU/pdisk_1.dat 2025-06-25T14:37:45.175985Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:37:45.192033Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:37:45.192140Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:37:45.192251Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519896165807212738:2080] 1750862264635331 != 1750862264635334 2025-06-25T14:37:45.194168Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 5714, node 1 2025-06-25T14:37:45.370481Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:37:45.370511Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:37:45.370531Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:37:45.370679Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:37:45.668764Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:3367 TClient is connected to server localhost:3367 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:37:46.169027Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:37:46.206839Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:37:46.215618Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:37:46.356435Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:46.543384Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:37:46.636683Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:37:48.110071Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896182987083567:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:48.110168Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:48.663211Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:48.701344Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:48.733366Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:48.802472Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:48.835178Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:48.874073Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:48.916985Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:49.010214Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896187282051529:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:49.010398Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:49.010801Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896187282051536:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:49.015131Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:37:49.050227Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519896187282051538:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:37:49.104183Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519896187282051589:3420] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:37:49.636658Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519896165807212759:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:37:49.636742Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:37:50.538059Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work ... tIsUp 'Root' success. 2025-06-25T14:37:53.206317Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:37:53.216736Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:37:53.220535Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:37:53.230657Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:37:53.296156Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:37:53.456122Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:37:53.549222Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:37:55.980906Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896210952251626:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:55.980968Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:56.051001Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:56.124434Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:56.158463Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:56.188017Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:56.225201Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:56.265119Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:56.359425Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:56.458823Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896215247219593:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:56.458913Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:56.459146Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896215247219598:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:56.463415Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:37:56.485990Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710669, at schemeshard: 72057594046644480 2025-06-25T14:37:56.486238Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519896215247219600:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:37:56.566676Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519896215247219651:3418] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:37:57.204560Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519896198067348139:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:37:57.214581Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:37:57.763744Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:58.090007Z node 2 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_CONSTRAINT_VIOLATION;details=Conflict with existing key.;tx_id=4; 2025-06-25T14:37:58.090229Z node 2 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:226: Prepare transaction failed. txid 4 at tablet 72075186224037922 errors: Status: STATUS_CONSTRAINT_VIOLATION Issues: { message: "Conflict with existing key." issue_code: 2012 severity: 1 } 2025-06-25T14:37:58.090391Z node 2 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:168: Errors while proposing transaction txid 4 at tablet 72075186224037922 Status: STATUS_CONSTRAINT_VIOLATION Issues: { message: "Conflict with existing key." issue_code: 2012 severity: 1 } 2025-06-25T14:37:58.091322Z node 2 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:819: SelfId: [2:7519896223837154731:2474], Table: `/Root/TestImmediateEffects` ([72057594046644480:17:1]), SessionActorId: [2:7519896219542187217:2474]Got CONSTRAINT VIOLATION for table `/Root/TestImmediateEffects`. ShardID=72075186224037922, Sink=[2:7519896223837154731:2474].{
: Error: Conflict with existing key., code: 2012 } 2025-06-25T14:37:58.091399Z node 2 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:3004: SelfId: [2:7519896223837154721:2474], SessionActorId: [2:7519896219542187217:2474], statusCode=PRECONDITION_FAILED. Issue=
: Error: Constraint violated. Table: `/Root/TestImmediateEffects`., code: 2012
: Error: Conflict with existing key., code: 2012 . sessionActorId=[2:7519896219542187217:2474]. isRollback=0 2025-06-25T14:37:58.091612Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:1895: SessionId: ydb://session/3?node_id=2&id=M2YxZWVkYTItNTM3ZTQzYWQtMThhNThkYTUtNzI2MWE1MDU=, ActorId: [2:7519896219542187217:2474], ActorState: ExecuteState, TraceId: 01jykrcbb49g3y06egsrcvvzrf, got TEvKqpBuffer::TEvError in ExecuteState, status: PRECONDITION_FAILED send to: [2:7519896223837154722:2474] from: [2:7519896223837154721:2474] 2025-06-25T14:37:58.091686Z node 2 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [2:7519896223837154722:2474] TxId: 281474976710674. Ctx: { TraceId: 01jykrcbb49g3y06egsrcvvzrf, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=M2YxZWVkYTItNTM3ZTQzYWQtMThhNThkYTUtNzI2MWE1MDU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. PRECONDITION_FAILED: {
: Error: Constraint violated. Table: `/Root/TestImmediateEffects`., code: 2012 subissue: {
: Error: Conflict with existing key., code: 2012 } } 2025-06-25T14:37:58.091860Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=M2YxZWVkYTItNTM3ZTQzYWQtMThhNThkYTUtNzI2MWE1MDU=, ActorId: [2:7519896219542187217:2474], ActorState: ExecuteState, TraceId: 01jykrcbb49g3y06egsrcvvzrf, Create QueryResponse for error on request, msg: ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/pg/unittest >> KqpPg::InsertValuesFromTableWithDefaultNegativeCase-useSink [GOOD] Test command err: Trying to start YDB, gRPC: 12567, MsgBus: 9021 2025-06-25T14:36:54.388932Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519895952504543009:2142];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:36:54.391637Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000a93/r3tmp/tmpf1bPGc/pdisk_1.dat 2025-06-25T14:36:54.812182Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 12567, node 1 2025-06-25T14:36:54.818313Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:36:54.818429Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:36:54.822629Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:36:54.956950Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:36:54.956973Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:36:54.956980Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:36:54.957091Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9021 2025-06-25T14:36:55.392431Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:9021 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:36:55.660867Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:36:55.674358Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:36:57.649564Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519895965389445428:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:36:57.649726Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:36:57.693161Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:36:57.856686Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519895965389445535:2304], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:36:57.856794Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:36:57.872027Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:36:57.916346Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519895965389445613:2315], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:36:57.916424Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:36:57.916831Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519895965389445618:2318], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:36:57.920508Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710660:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:36:57.930608Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519895965389445620:2319], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710660 completed, doublechecking } 2025-06-25T14:36:57.995581Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519895965389445671:2443] txid# 281474976710661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 1187, MsgBus: 1720 2025-06-25T14:36:59.437599Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519895972783610934:2076];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:36:59.526216Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000a93/r3tmp/tmpAb36VQ/pdisk_1.dat 2025-06-25T14:36:59.652438Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:36:59.654725Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519895972783610895:2080] 1750862219432913 != 1750862219432916 2025-06-25T14:36:59.672402Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:36:59.672481Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:36:59.675267Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 1187, node 2 2025-06-25T14:36:59.760882Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:36:59.760905Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:36:59.760913Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:36:59.761041Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1720 TClient is connected to server localhost:1720 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:37:00.303926Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:37:00.309734Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:37:00.465748Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:37:02.800232Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519895985668513401:2291], DatabaseId: /Root, PoolId: default, Failed to fetch ... CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:37:48.523937Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:37:52.134972Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7519896200752947164:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:52.135080Z node 10 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:52.135520Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7519896200752947176:2295], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:52.140527Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:37:52.153292Z node 10 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [10:7519896200752947178:2296], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:37:52.246037Z node 10 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [10:7519896200752947229:2337] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:37:52.285343Z node 10 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [10:7519896200752947238:2300], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:1:1: Error: At function: KiCreateTable!
:1:1: Error: Failed to parse default expr for typename int4, error reason: Error while converting text to binary: yql/essentials/minikql/mkql_terminator.cpp:47: ERROR: invalid input syntax for type integer: "text" 2025-06-25T14:37:52.286805Z node 10 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=10&id=MzQzYTAyMjAtMjc4YTlkYWYtOGRhNGY4NDAtNDU4MWY5OGE=, ActorId: [10:7519896200752947162:2291], ActorState: ExecuteState, TraceId: 01jykrc241659z5crnh2c8x3gj, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id:
: Error: Type annotation, code: 1030
:1:1: Error: At function: KiCreateTable!
:1:1: Error: Failed to parse default expr for typename int4, error reason: Error while converting text to binary: yql/essentials/minikql/mkql_terminator.cpp:47: ERROR: invalid input syntax for type integer: "text" 2025-06-25T14:37:52.429700Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[10:7519896179278110067:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:37:52.429801Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 4547, MsgBus: 13024 2025-06-25T14:37:53.501161Z node 11 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[11:7519896205515191483:2168];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:37:53.529821Z node 11 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000a93/r3tmp/tmpQScDG3/pdisk_1.dat 2025-06-25T14:37:53.698810Z node 11 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(11, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:37:53.698931Z node 11 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(11, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:37:53.717168Z node 11 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:37:53.719088Z node 11 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(11, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:37:53.719470Z node 11 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [11:7519896205515191354:2080] 1750862273469543 != 1750862273469546 TServer::EnableGrpc on GrpcPort 4547, node 11 2025-06-25T14:37:53.796990Z node 11 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:37:53.797021Z node 11 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:37:53.797035Z node 11 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:37:53.797225Z node 11 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13024 2025-06-25T14:37:54.501363Z node 11 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:13024 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:37:54.746143Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:37:54.757618Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:37:58.488141Z node 11 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[11:7519896205515191483:2168];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:37:58.488252Z node 11 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:37:58.559217Z node 11 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [11:7519896226990028475:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:58.559340Z node 11 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:58.559983Z node 11 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [11:7519896226990028487:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:58.567780Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:37:58.590405Z node 11 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [11:7519896226990028489:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:37:58.692410Z node 11 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [11:7519896226990028541:2339] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:37:58.767139Z node 11 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [11:7519896226990028550:2301], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:1:1: Error: At function: KiCreateTable!
:1:1: Error: Failed to parse default expr for typename int4, error reason: Error while converting text to binary: yql/essentials/minikql/mkql_terminator.cpp:47: ERROR: invalid input syntax for type integer: "text" 2025-06-25T14:37:58.768553Z node 11 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=11&id=NGMwZGNkZjUtODYwNjdmZjUtN2M5NzNiNjgtYmVjNjc0NTM=, ActorId: [11:7519896226990028449:2292], ActorState: ExecuteState, TraceId: 01jykrc86z0tmqdfe4xb24pc8n, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id:
: Error: Type annotation, code: 1030
:1:1: Error: At function: KiCreateTable!
:1:1: Error: Failed to parse default expr for typename int4, error reason: Error while converting text to binary: yql/essentials/minikql/mkql_terminator.cpp:47: ERROR: invalid input syntax for type integer: "text" >> Yq_1::DescribeQuery [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/effects/unittest >> KqpImmediateEffects::ConflictingKeyRW1WRR2 [GOOD] Test command err: Trying to start YDB, gRPC: 12453, MsgBus: 17804 2025-06-25T14:37:46.135989Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519896175898185954:2142];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:37:46.164578Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001870/r3tmp/tmp0q8StN/pdisk_1.dat 2025-06-25T14:37:46.607764Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519896175898185840:2080] 1750862266098529 != 1750862266098532 2025-06-25T14:37:46.640776Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:37:46.641142Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:37:46.641229Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:37:46.647292Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 12453, node 1 2025-06-25T14:37:46.803705Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:37:46.803733Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:37:46.803742Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:37:46.803878Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17804 2025-06-25T14:37:47.132658Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:17804 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:37:47.524672Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-25T14:37:47.565009Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:47.722257Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:37:47.876783Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:37:47.961059Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:37:49.516809Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896188783089358:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:49.516923Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:49.862107Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:49.895083Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:49.924997Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:49.996303Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:50.024117Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:50.099033Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:50.174795Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:50.263861Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896193078057323:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:50.263931Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:50.263974Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896193078057328:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:50.267337Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:37:50.278625Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519896193078057330:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:37:50.369227Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519896193078057381:3419] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:37:51.129260Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519896175898185954:2142];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:37:51.129337Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:37:51.418130Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) Trying to start YDB, gRPC: 19406, MsgBus: 9628 test_client.cpp: SetPath # /home/runner/.ya/build/build_roo ... e(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:37:53.254444Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 19406, node 2 2025-06-25T14:37:53.378977Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:37:53.379001Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:37:53.379007Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:37:53.379115Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9628 TClient is connected to server localhost:9628 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:37:53.881911Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:37:53.891199Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:37:53.907497Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:37:54.048956Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:37:54.071155Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:37:54.276106Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:37:54.371109Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:37:56.578922Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896218179417699:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:56.579023Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:56.724979Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:56.780097Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:56.839710Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:56.924048Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:56.974100Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:57.087630Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:57.174530Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:57.260784Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896222474385659:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:57.260896Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:57.261160Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896222474385664:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:57.265162Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:37:57.279136Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519896222474385666:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:37:57.342829Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519896222474385717:3416] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:37:58.439354Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:59.415064Z node 2 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1546: SelfId: [2:7519896231064320873:2476], TxId: 281474976715678, task: 1. Ctx: { CustomerSuppliedId : . TraceId : 01jykrccmxdc8xh6smger83jp5. SessionId : ydb://session/3?node_id=2&id=Njg3YjM2YzYtOTc1NTZhMGQtMzJlMmU5ZS1hN2NkNTk3. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Source[0] fatal error: {
: Error: Read request aborted subissue: {
: Error: Read conflict with concurrent transaction (shard# 72075186224037922 node# 2 state# Ready) } } 2025-06-25T14:37:59.415587Z node 2 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:678: SelfId: [2:7519896231064320873:2476], TxId: 281474976715678, task: 1. Ctx: { CustomerSuppliedId : . TraceId : 01jykrccmxdc8xh6smger83jp5. SessionId : ydb://session/3?node_id=2&id=Njg3YjM2YzYtOTc1NTZhMGQtMzJlMmU5ZS1hN2NkNTk3. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. InternalError: ABORTED DEFAULT_ERROR: {
: Error: Read request aborted subissue: {
: Error: Read conflict with concurrent transaction (shard# 72075186224037922 node# 2 state# Ready) } }. 2025-06-25T14:37:59.416184Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=Njg3YjM2YzYtOTc1NTZhMGQtMzJlMmU5ZS1hN2NkNTk3, ActorId: [2:7519896226769353285:2476], ActorState: ExecuteState, TraceId: 01jykrccmxdc8xh6smger83jp5, Create QueryResponse for error on request, msg: ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/effects/unittest >> KqpInplaceUpdate::BigRow [GOOD] Test command err: Trying to start YDB, gRPC: 25574, MsgBus: 21699 2025-06-25T14:37:46.260436Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519896173131011520:2229];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:37:46.260545Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00186f/r3tmp/tmp3xTTVe/pdisk_1.dat 2025-06-25T14:37:46.808768Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:37:46.808846Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:37:46.817412Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:37:46.860268Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519896173131011320:2080] 1750862266191381 != 1750862266191384 2025-06-25T14:37:46.889733Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 25574, node 1 2025-06-25T14:37:46.973530Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:37:46.973562Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:37:46.973598Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:37:46.973734Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:21699 2025-06-25T14:37:47.223176Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:21699 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:37:47.598014Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:37:47.616327Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:37:47.783539Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:37:47.991393Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:37:48.091996Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:37:49.738597Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896186015914847:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:49.738748Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:50.062772Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:50.092471Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:50.120988Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:50.155899Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:50.193439Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:50.265491Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:50.292940Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:50.390216Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896190310882813:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:50.390313Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:50.390512Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896190310882818:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:50.394191Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:37:50.409086Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519896190310882820:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:37:50.486212Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519896190310882871:3427] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:37:51.225003Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519896173131011520:2229];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:37:51.225070Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:37:51.441573Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) Trying to start YDB, gRPC: 4756, MsgBus: 12208 2025-06-25T14:37:53.325036Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519896206046881204:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:37:53.325105Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00186f/r3tmp/tmpbdlMyi/pdisk_1.dat 2025-06-25T14:37:53.455315Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:37:53.469332Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:37:53.469408Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:37:53.471005Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 4756, node 2 2025-06-25T14:37:53.565838Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:37:53.565861Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:37:53.565869Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:37:53.565982Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:12208 TClient is connected to server localhost:12208 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:37:54.118830Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-25T14:37:54.137426Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:54.224705Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:37:54.353363Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:37:54.410737Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:37:54.496210Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:37:56.615501Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896218931784697:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:56.615588Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:56.671388Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:56.712456Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:56.755851Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:56.837844Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:56.880983Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:56.940326Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:57.009801Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:57.132547Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896223226752651:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:57.132672Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:57.134876Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896223226752656:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:57.139481Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:37:57.153468Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-25T14:37:57.154370Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519896223226752658:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:37:57.232964Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519896223226752709:3418] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:37:58.329524Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519896206046881204:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:37:58.329613Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:37:58.362170Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) >> TestProgram::JsonExistsBinary ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/effects/unittest >> KqpImmediateEffects::ConflictingKeyR1RWR2 [GOOD] Test command err: Trying to start YDB, gRPC: 31143, MsgBus: 64956 2025-06-25T14:37:46.128532Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519896172503110840:2166];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:37:46.129125Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00187c/r3tmp/tmpKeSEZW/pdisk_1.dat 2025-06-25T14:37:46.612724Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:37:46.612812Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:37:46.617915Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:37:46.653799Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519896172503110710:2080] 1750862266112680 != 1750862266112683 2025-06-25T14:37:46.661690Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 31143, node 1 2025-06-25T14:37:46.783697Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:37:46.783720Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:37:46.783728Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:37:46.783863Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:64956 2025-06-25T14:37:47.128522Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:64956 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:37:47.384220Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:37:47.401322Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:37:47.420239Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:37:47.611573Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:47.806024Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:37:47.874097Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:37:49.570375Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896185388014237:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:49.570476Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:49.867472Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:49.903175Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:49.939160Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:49.966499Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:49.993906Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:50.053008Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:50.084741Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:50.167317Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896189682982195:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:50.167394Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:50.168495Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896189682982200:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:50.172716Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:37:50.186233Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519896189682982203:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:37:50.286161Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519896189682982255:3422] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:37:51.128436Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519896172503110840:2166];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:37:51.128539Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:37:51.422163Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) Trying to start YDB, gRPC: 12047, MsgBus: 1514 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00187c/r3tmp/tmpUOJrlG/pdisk_1.dat 2025-06-25T14:37:53.202294Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519896199180819321:2080] 1750862272950707 != 1750862272950710 2025-06-25T14:37:53.202426Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:37:53.203347Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 12047, node 2 2025-06-25T14:37:53.233348Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:37:53.233423Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:37:53.242941Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:37:53.284990Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:37:53.285010Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:37:53.285016Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:37:53.285114Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1514 TClient is connected to server localhost:1514 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:37:53.774266Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:37:53.798353Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:37:53.863898Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:37:53.964874Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:37:54.045760Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:37:54.150961Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:56.605935Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896216360690124:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:56.606036Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:56.674396Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:56.733331Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:56.778580Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:56.866795Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:56.904877Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:56.990213Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:57.064776Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:57.194998Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896220655658077:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:57.195104Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:57.195434Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896220655658082:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:57.200013Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:37:57.220152Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519896220655658084:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:37:57.278149Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519896220655658135:3414] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:37:58.488720Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/effects/unittest >> KqpImmediateEffects::ConflictingKeyRW1RR2 [GOOD] Test command err: Trying to start YDB, gRPC: 28002, MsgBus: 26032 2025-06-25T14:37:46.286363Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519896174934789854:2220];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:37:46.286734Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001869/r3tmp/tmpizAZOz/pdisk_1.dat 2025-06-25T14:37:46.846040Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:37:46.846170Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:37:46.849751Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:37:46.859042Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:37:46.860468Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519896174934789672:2080] 1750862266252650 != 1750862266252653 TServer::EnableGrpc on GrpcPort 28002, node 1 2025-06-25T14:37:46.978250Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:37:46.978270Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:37:46.978278Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:37:46.982563Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26032 2025-06-25T14:37:47.276177Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:26032 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:37:47.665437Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:37:47.678028Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:37:47.686952Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:37:47.840035Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:37:48.018419Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:37:48.081826Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:49.721071Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896187819693201:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:49.721192Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:50.030450Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:50.069037Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:50.101235Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:50.146737Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:50.191947Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:50.239004Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:50.305942Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:50.394326Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896192114661165:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:50.394400Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:50.394550Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896192114661170:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:50.398551Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:37:50.411580Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519896192114661172:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:37:50.519294Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519896192114661223:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:37:51.276256Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519896174934789854:2220];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:37:51.276353Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:37:51.519916Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/ ... 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519896202637813292:2056];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:37:53.147895Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001869/r3tmp/tmp1bd4be/pdisk_1.dat 2025-06-25T14:37:53.398809Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:37:53.398882Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:37:53.402571Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:37:53.428496Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519896202637813277:2080] 1750862273140777 != 1750862273140780 2025-06-25T14:37:53.445211Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 13458, node 2 2025-06-25T14:37:53.573604Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:37:53.573623Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:37:53.573633Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:37:53.573738Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2736 TClient is connected to server localhost:2736 2025-06-25T14:37:54.183512Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:37:54.229411Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:37:54.242409Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:37:54.255144Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:37:54.318967Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:37:54.539596Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:37:54.619813Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:57.137669Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896219817684096:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:57.137761Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:57.221875Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:57.296799Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:57.366091Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:57.397550Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:57.466855Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:57.512226Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:57.611223Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:57.729953Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896219817684764:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:57.730043Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:57.730275Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896219817684769:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:57.734759Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:37:57.747329Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519896219817684771:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:37:57.837063Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519896219817684822:3423] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:37:58.190081Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519896202637813292:2056];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:37:58.190219Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:37:59.441344Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) |81.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest >> TestProgram::JsonExistsBinary [GOOD] >> KqpWrite::CastValues [GOOD] >> KqpInplaceUpdate::SingleRowSimple-UseSink >> KqpPg::PgCreateTable [GOOD] >> KqpPg::PgUpdate+useSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest >> TestProgram::JsonExistsBinary [GOOD] Test command err: FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:33;event=parse_program;program=Command { Assign { Column { Id: 15 } Constant { Text: "$.key" } } } Command { Assign { Column { Id: 16 } Function { Id: 8 Arguments { Id: 6 } Arguments { Id: 15 } FunctionType: YQL_KERNEL KernelIdx: 0 } } } Command { Projection { Columns { Id: 16 } } } Kernels: "O\016\020JsonNode\006Arg\020JsonPath\006UDF\006Udf\014Apply2\030BlockAsTuple\t\211\004\235\213\004\213\004\207\203\021H\203\001H\213\002\207\203\014\001\235?\006\001\235?\014\001\032\000\t\211\004?\022\235?\002\001\235?\004\000\032\000\t\251\000?\030\006\000\t\251\000?\032\006\000\000\t\211\002?\024\235?\n\001\032\000\t\211\n?(?\030?\032?\002?\004?\n,ScalarApply\000? ?$\t\251\000?\002\006\000\t\251\000?\004\006\000\t\211\010?\n?\002?\000\207?\010?6$IfPresent\000?0\t\251\000?\000\006\000\t\211\022?6\211\010?6\207\203\021H\214\n\210\203\001H\214\002?6\016\000\203\004\203\005@\203\004\203\004\207?\000\214\n\210\203\001H\214\002\207\203\014\026\000\t\211\010?J\203\005@\200\203\005@\202\022\000\003?d6Json2.JsonDocumentSqlExists\202\003?f\000\002\017\003?L\000\003?N\000\003?P\000\003?R\000\027?T?<\t\211\014?V\211\002?V\203\001H\016\000\203\004\203\005@\203\004\203\004?\004\026\000\t\211\010?|\203\005@\200\203\005@\202\022\000\003?\210\"Json2.CompilePath\202\003?\212\000\002\017\003?~\000\003?\200\000\003?\202\000\003?\204\000?4\036\010\000?\\7?`\003?^\000\276\001\'?6\010\000\000\000/" ; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:102;parse_proto_program=Command { Assign { Column { Id: 15 } Constant { Text: "$.key" } } } Command { Assign { Column { Id: 16 } Function { Id: 8 Arguments { Id: 6 } Arguments { Id: 15 } FunctionType: YQL_KERNEL KernelIdx: 0 } } } Command { Projection { Columns { Id: 16 } } } Kernels: "O\016\020JsonNode\006Arg\020JsonPath\006UDF\006Udf\014Apply2\030BlockAsTuple\t\211\004\235\213\004\213\004\207\203\021H\203\001H\213\002\207\203\014\001\235?\006\001\235?\014\001\032\000\t\211\004?\022\235?\002\001\235?\004\000\032\000\t\251\000?\030\006\000\t\251\000?\032\006\000\000\t\211\002?\024\235?\n\001\032\000\t\211\n?(?\030?\032?\002?\004?\n,ScalarApply\000? ?$\t\251\000?\002\006\000\t\251\000?\004\006\000\t\211\010?\n?\002?\000\207?\010?6$IfPresent\000?0\t\251\000?\000\006\000\t\211\022?6\211\010?6\207\203\021H\214\n\210\203\001H\214\002?6\016\000\203\004\203\005@\203\004\203\004\207?\000\214\n\210\203\001H\214\002\207\203\014\026\000\t\211\010?J\203\005@\200\203\005@\202\022\000\003?d6Json2.JsonDocumentSqlExists\202\003?f\000\002\017\003?L\000\003?N\000\003?P\000\003?R\000\027?T?<\t\211\014?V\211\002?V\203\001H\016\000\203\004\203\005@\203\004\203\004?\004\026\000\t\211\010?|\203\005@\200\203\005@\202\022\000\003?\210\"Json2.CompilePath\202\003?\212\000\002\017\003?~\000\003?\200\000\003?\202\000\003?\204\000?4\036\010\000?\\7?`\003?^\000\276\001\'?6\010\000\000\000/" ; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2101;fline=graph_execute.cpp:162;graph_constructed=digraph program {N0[shape=box, label="N0(0):{\"p\":{\"v\":\"$.key\"},\"o\":\"15\",\"t\":\"Const\"}\n"]; N1[shape=box, label="N4(15):{\"i\":\"6,15\",\"p\":{\"kernel\":{\"class_name\":\"SIMPLE\"}},\"o\":\"16\",\"t\":\"Calculation\"}\nREMOVE:15,6"]; N0 -> N1[label="1"]; N3 -> N1[label="2"]; N2[shape=box, label="N2(2):{\"i\":\"0\",\"p\":{\"data\":[{\"name\":\"json_binary\",\"id\":6}]},\"o\":\"6\",\"t\":\"FetchOriginalData\"}\n",style=filled,color="#FFFF88"]; N5 -> N2[label="1"]; N3[shape=box, label="N3(7):{\"i\":\"6\",\"p\":{\"address\":{\"name\":\"json_binary\",\"id\":6}},\"o\":\"6\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N2 -> N3[label="1"]; N4[shape=box, label="N5(15):{\"i\":\"16\",\"t\":\"Projection\"}\n",style=filled,color="#FFAAAA"]; N1 -> N4[label="1"]; N5[shape=box, label="N1(0):{\"p\":{\"data\":[{\"name\":\"json_binary\",\"id\":6}]},\"o\":\"0\",\"t\":\"ReserveMemory\"}\n"]; N0->N5->N2->N3->N1->N4[color=red]; }; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:51;event=program_parsed;result={"edges":[{"owner_id":0,"inputs":[]},{"owner_id":1,"inputs":[{"from":0},{"from":3}]},{"owner_id":2,"inputs":[{"from":5}]},{"owner_id":3,"inputs":[{"from":2}]},{"owner_id":4,"inputs":[{"from":1}]},{"owner_id":5,"inputs":[]}],"nodes":{"1":{"p":{"i":"6,15","p":{"kernel":{"class_name":"SIMPLE"}},"o":"16","t":"Calculation"},"w":15,"id":1},"3":{"p":{"i":"6","p":{"address":{"name":"json_binary","id":6}},"o":"6","t":"AssembleOriginalData"},"w":7,"id":3},"2":{"p":{"i":"0","p":{"data":[{"name":"json_binary","id":6}]},"o":"6","t":"FetchOriginalData"},"w":2,"id":2},"5":{"p":{"p":{"data":[{"name":"json_binary","id":6}]},"o":"0","t":"ReserveMemory"},"w":0,"id":5},"4":{"p":{"i":"16","t":"Projection"},"w":15,"id":4},"0":{"p":{"p":{"v":"$.key"},"o":"15","t":"Const"},"w":0,"id":0}}}; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10BinaryTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10BinaryTypeE; json_binary: [ 7B226B6579223A2276616C7565227D, 5B5D ] json_binary: [ 010200002100000014000000030300000200000000040000C00400006B65790076616C756500, 010100000000000000000000 ] FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9UInt8TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9UInt8TypeE; >> KqpImmediateEffects::TxWithReadAtTheEnd-UseSink >> KqpImmediateEffects::TxWithWriteAtTheEnd-UseSink >> KqpImmediateEffects::UpdateAfterUpsert >> Yq_1::DeleteQuery [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_retryable_iam_error[tables_format_v1] >> KqpEffects::UpdateOn_Params [GOOD] |81.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/services/fq/ut_integration/unittest >> Yq_1::DescribeQuery [GOOD] Test command err: 2025-06-25T14:37:15.809984Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519896040771895164:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:37:15.815857Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:37:16.837187Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; E0625 14:37:16.963032993 238804 dns_resolver_ares.cc:452] no server name supplied in dns URI E0625 14:37:16.963163556 238804 channel.cc:120] channel stack builder failed: UNKNOWN: the target uri is not valid: dns:/// 2025-06-25T14:37:16.982190Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:37:17.828197Z node 1 :YQL_NODES_MANAGER ERROR: nodes_manager.cpp:364: ydb/core/fq/libs/actors/nodes_manager.cpp:322: TRANSPORT_UNAVAILABLE
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:13545: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint localhost:13545 2025-06-25T14:37:17.912200Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:13545: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:13545 } ] 2025-06-25T14:37:17.989079Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:37:18.990129Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:37:19.689683Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:13545: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:13545 } ] 2025-06-25T14:37:20.003444Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:37:20.755896Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519896040771895164:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:37:20.755971Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:37:21.004682Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001fc1/r3tmp/tmp26Td4f/pdisk_1.dat 2025-06-25T14:37:21.793302Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7519896066541699642:2277], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:37:21.793443Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:37:21.793639Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; TServer::EnableGrpc on GrpcPort 13545, node 1 2025-06-25T14:37:21.801258Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 E0625 14:37:21.942951694 238923 dns_resolver_ares.cc:452] no server name supplied in dns URI E0625 14:37:21.943133859 238923 channel.cc:120] channel stack builder failed: UNKNOWN: the target uri is not valid: dns:/// 2025-06-25T14:37:22.212498Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/bindings". Create session OK 2025-06-25T14:37:22.212536Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/bindings" 2025-06-25T14:37:22.212547Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/bindings" 2025-06-25T14:37:22.214251Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/jobs". Create session OK 2025-06-25T14:37:22.214264Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/jobs" 2025-06-25T14:37:22.214270Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/jobs" 2025-06-25T14:37:22.217149Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/pending_small". Create session OK 2025-06-25T14:37:22.217174Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/pending_small" 2025-06-25T14:37:22.217179Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/pending_small" 2025-06-25T14:37:22.232756Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/compute_databases". Create session OK 2025-06-25T14:37:22.232793Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/compute_databases" 2025-06-25T14:37:22.232800Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/compute_databases" 2025-06-25T14:37:22.235065Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/idempotency_keys". Create session OK 2025-06-25T14:37:22.235086Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/idempotency_keys" 2025-06-25T14:37:22.235095Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/idempotency_keys" 2025-06-25T14:37:22.237410Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/result_sets". Create session OK 2025-06-25T14:37:22.237425Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/result_sets" 2025-06-25T14:37:22.237431Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/result_sets" 2025-06-25T14:37:22.240762Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/tenant_acks". Create session OK 2025-06-25T14:37:22.240785Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/tenant_acks" 2025-06-25T14:37:22.240791Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/tenant_acks" 2025-06-25T14:37:22.244155Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/connections". Create session OK 2025-06-25T14:37:22.244196Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/connections" 2025-06-25T14:37:22.244202Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/connections" 2025-06-25T14:37:22.245002Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/quotas". Create session OK 2025-06-25T14:37:22.245016Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/quotas" 2025-06-25T14:37:22.245026Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/quotas" 2025-06-25T14:37:22.251513Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/tenants". Create session OK 2025-06-25T14:37:22.251557Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/tenants" 2025-06-25T14:37:22.251564Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/tenants" TClient is connected to server localhost:12917 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-25T14:37:22.278927Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/mappings". Create session OK 2025-06-25T14:37:22.278956Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/mappings" 2025-06-25T14:37:22.278962Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/mappings" 2025-06-25T14:37:22.304550Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/nodes". Create session OK 2025-06-25T14:37:22.304565Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/nodes" 2025-06-25T14:37:22.304571Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/nodes" 2025-06-25T14:37:22.380765Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/queries". Create session OK 2025-06-25T14:37:22.380790Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/queries" 2025-06-25T14:37:22.380798Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/queries" 2025-06-25T14:37:22.438203Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:37:22.438226Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:37:22.438278Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:37:22.438416Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:37:22.462227Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:37:22.466521Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896070836667143:2354], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:22.466633Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896070836667144:2355], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:22.466665Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896070836667118:2348], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:22.467098Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:22.481258Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operatio ... -06-25T14:37:58.746570Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:632: TxId: 281474976715797, task: 1, CA Id [4:7519896227559246031:2946]. Processing resolved ShardId# 72075186224037892, partition range: [(String : yandexcloud://some_folder_id, String : utque7j6e67fijcvm2a8) ; ()), i: 0, state ranges: 0, points: 1 2025-06-25T14:37:58.746592Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:670: TxId: 281474976715797, task: 1, CA Id [4:7519896227559246031:2946]. Add point to new shardId: 72075186224037892 2025-06-25T14:37:58.746690Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:714: TxId: 281474976715797, task: 1, CA Id [4:7519896227559246031:2946]. Pending shards States: TShardState{ TabletId: 72075186224037892, Last Key , Ranges: [], Points: [# 0: (String : yandexcloud://some_folder_id, String : utque7j6e67fijcvm2a8)], RetryAttempt: 0, ResolveAttempt: 0 }; In Flight shards States: TShardState{ TabletId: 0, Last Key , Ranges: [], Points: [# 0: (String : yandexcloud://some_folder_id, String : utque7j6e67fijcvm2a8)], RetryAttempt: 0, ResolveAttempt: 1 }; 2025-06-25T14:37:58.746704Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:458: TxId: 281474976715797, task: 1, CA Id [4:7519896227559246031:2946]. effective maxinflight 1024 sorted 0 2025-06-25T14:37:58.746717Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:462: TxId: 281474976715797, task: 1, CA Id [4:7519896227559246031:2946]. BEFORE: 1.0 2025-06-25T14:37:58.746760Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:884: TxId: 281474976715797, task: 1, CA Id [4:7519896227559246031:2946]. Send EvRead to shardId: 72075186224037892, tablePath: Root/yq/queries, ranges: , limit: (empty maybe), readId = 0, reverse = 0, snapshot = (txid=0,step=0), lockTxId = 0, lockNodeId = 0 2025-06-25T14:37:58.746790Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:476: TxId: 281474976715797, task: 1, CA Id [4:7519896227559246031:2946]. AFTER: 0.1 2025-06-25T14:37:58.746800Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:480: TxId: 281474976715797, task: 1, CA Id [4:7519896227559246031:2946]. Scheduled table scans, in flight: 1 shards. pending shards to read: 0, 2025-06-25T14:37:58.749077Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:958: TxId: 281474976715797, task: 1, CA Id [4:7519896227559246031:2946]. Recv TEvReadResult from ShardID=72075186224037892, ReadId=0, Status=SUCCESS, Finished=1, RowCount=1, TxLocks= , BrokenTxLocks= 2025-06-25T14:37:58.749107Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1050: TxId: 281474976715797, task: 1, CA Id [4:7519896227559246031:2946]. Taken 0 locks 2025-06-25T14:37:58.749124Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1064: TxId: 281474976715797, task: 1, CA Id [4:7519896227559246031:2946]. new data for read #0 seqno = 1 finished = 1 2025-06-25T14:37:58.749148Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [4:7519896227559246031:2946], TxId: 281474976715797, task: 1. Ctx: { TraceId : 01jykrcbkebynaesagjcc25h3s. SessionId : ydb://session/3?node_id=4&id=NjNmYTkyMjAtZmQ1MGU1Mi04OTY3ODQ2NS03ZDYyYzBiZQ==. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. CA StateFunc 276037645 2025-06-25T14:37:58.749167Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [4:7519896227559246031:2946], TxId: 281474976715797, task: 1. Ctx: { TraceId : 01jykrcbkebynaesagjcc25h3s. SessionId : ydb://session/3?node_id=4&id=NjNmYTkyMjAtZmQ1MGU1Mi04OTY3ODQ2NS03ZDYyYzBiZQ==. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. CA StateFunc 271646922 2025-06-25T14:37:58.749186Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1331: TxId: 281474976715797, task: 1, CA Id [4:7519896227559246031:2946]. enter getasyncinputdata results size 1, freeSpace 8388608 2025-06-25T14:37:58.749203Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1227: TxId: 281474976715797, task: 1, CA Id [4:7519896227559246031:2946]. enter pack cells method shardId: 72075186224037892 processedRows: 0 packed rows: 0 freeSpace: 8388608 2025-06-25T14:37:58.749229Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1308: TxId: 281474976715797, task: 1, CA Id [4:7519896227559246031:2946]. exit pack cells method shardId: 72075186224037892 processedRows: 0 packed rows: 1 freeSpace: 8386368 2025-06-25T14:37:58.749251Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1365: TxId: 281474976715797, task: 1, CA Id [4:7519896227559246031:2946]. returned 1 rows; processed 1 rows 2025-06-25T14:37:58.749287Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1402: TxId: 281474976715797, task: 1, CA Id [4:7519896227559246031:2946]. dropping batch for read #0 2025-06-25T14:37:58.749300Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:458: TxId: 281474976715797, task: 1, CA Id [4:7519896227559246031:2946]. effective maxinflight 1024 sorted 0 2025-06-25T14:37:58.749312Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:480: TxId: 281474976715797, task: 1, CA Id [4:7519896227559246031:2946]. Scheduled table scans, in flight: 0 shards. pending shards to read: 0, 2025-06-25T14:37:58.749327Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1427: TxId: 281474976715797, task: 1, CA Id [4:7519896227559246031:2946]. returned async data processed rows 1 left freeSpace 8386368 received rows 1 running reads 0 pending shards 0 finished = 1 has limit 0 limit reached 0 2025-06-25T14:37:58.749542Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:502: SelfId: [4:7519896227559246031:2946], TxId: 281474976715797, task: 1. Ctx: { TraceId : 01jykrcbkebynaesagjcc25h3s. SessionId : ydb://session/3?node_id=4&id=NjNmYTkyMjAtZmQ1MGU1Mi04OTY3ODQ2NS03ZDYyYzBiZQ==. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. Continue execution, either output buffers are not empty or not all channels are ready, hasDataToSend: 1, channelsReady: 1 2025-06-25T14:37:58.749563Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [4:7519896227559246031:2946], TxId: 281474976715797, task: 1. Ctx: { TraceId : 01jykrcbkebynaesagjcc25h3s. SessionId : ydb://session/3?node_id=4&id=NjNmYTkyMjAtZmQ1MGU1Mi04OTY3ODQ2NS03ZDYyYzBiZQ==. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. CA StateFunc 271646922 2025-06-25T14:37:58.749599Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:670: TxId: 281474976715797, task: 1. Tasks execution finished, waiting for chunk delivery in output channelId: 1, seqNo: [1] 2025-06-25T14:37:58.749617Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [4:7519896227559246032:2947], TxId: 281474976715797, task: 2. Ctx: { TraceId : 01jykrcbkebynaesagjcc25h3s. SessionId : ydb://session/3?node_id=4&id=NjNmYTkyMjAtZmQ1MGU1Mi04OTY3ODQ2NS03ZDYyYzBiZQ==. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. CA StateFunc 271646923 2025-06-25T14:37:58.749646Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:163: TxId: 281474976715797, task: 2. Finish input channelId: 1, from: [4:7519896227559246031:2946] 2025-06-25T14:37:58.749682Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [4:7519896227559246032:2947], TxId: 281474976715797, task: 2. Ctx: { TraceId : 01jykrcbkebynaesagjcc25h3s. SessionId : ydb://session/3?node_id=4&id=NjNmYTkyMjAtZmQ1MGU1Mi04OTY3ODQ2NS03ZDYyYzBiZQ==. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. CA StateFunc 271646922 2025-06-25T14:37:58.749855Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:502: SelfId: [4:7519896227559246032:2947], TxId: 281474976715797, task: 2. Ctx: { TraceId : 01jykrcbkebynaesagjcc25h3s. SessionId : ydb://session/3?node_id=4&id=NjNmYTkyMjAtZmQ1MGU1Mi04OTY3ODQ2NS03ZDYyYzBiZQ==. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. Continue execution, either output buffers are not empty or not all channels are ready, hasDataToSend: 1, channelsReady: 1 2025-06-25T14:37:58.749874Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [4:7519896227559246031:2946], TxId: 281474976715797, task: 1. Ctx: { TraceId : 01jykrcbkebynaesagjcc25h3s. SessionId : ydb://session/3?node_id=4&id=NjNmYTkyMjAtZmQ1MGU1Mi04OTY3ODQ2NS03ZDYyYzBiZQ==. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. CA StateFunc 271646927 2025-06-25T14:37:58.749903Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [4:7519896227559246031:2946], TxId: 281474976715797, task: 1. Ctx: { TraceId : 01jykrcbkebynaesagjcc25h3s. SessionId : ydb://session/3?node_id=4&id=NjNmYTkyMjAtZmQ1MGU1Mi04OTY3ODQ2NS03ZDYyYzBiZQ==. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. CA StateFunc 271646922 2025-06-25T14:37:58.749922Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:675: TxId: 281474976715797, task: 1. Tasks execution finished 2025-06-25T14:37:58.749934Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:510: SelfId: [4:7519896227559246031:2946], TxId: 281474976715797, task: 1. Ctx: { TraceId : 01jykrcbkebynaesagjcc25h3s. SessionId : ydb://session/3?node_id=4&id=NjNmYTkyMjAtZmQ1MGU1Mi04OTY3ODQ2NS03ZDYyYzBiZQ==. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. Compute state finished. All channels and sinks finished 2025-06-25T14:37:58.750044Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:494: TxId: 281474976715797, task: 1. pass away 2025-06-25T14:37:58.750134Z node 4 :KQP_COMPUTE DEBUG: log.cpp:784: fline=kqp_compute_actor_factory.cpp:67;problem=finish_compute_actor;tx_id=281474976715797;task_id=1;success=1;message={
: Error: COMPUTE_STATE_FINISHED }; 2025-06-25T14:37:58.750528Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [4:7519896227559246032:2947], TxId: 281474976715797, task: 2. Ctx: { TraceId : 01jykrcbkebynaesagjcc25h3s. SessionId : ydb://session/3?node_id=4&id=NjNmYTkyMjAtZmQ1MGU1Mi04OTY3ODQ2NS03ZDYyYzBiZQ==. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. CA StateFunc 271646922 2025-06-25T14:37:58.750562Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715797, task: 2. Tasks execution finished, don't wait for ack delivery in input channelId: 1, seqNo: [1] 2025-06-25T14:37:58.750570Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:675: TxId: 281474976715797, task: 2. Tasks execution finished 2025-06-25T14:37:58.750581Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:510: SelfId: [4:7519896227559246032:2947], TxId: 281474976715797, task: 2. Ctx: { TraceId : 01jykrcbkebynaesagjcc25h3s. SessionId : ydb://session/3?node_id=4&id=NjNmYTkyMjAtZmQ1MGU1Mi04OTY3ODQ2NS03ZDYyYzBiZQ==. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. Compute state finished. All channels and sinks finished 2025-06-25T14:37:58.750628Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:494: TxId: 281474976715797, task: 2. pass away 2025-06-25T14:37:58.750676Z node 4 :KQP_COMPUTE DEBUG: log.cpp:784: fline=kqp_compute_actor_factory.cpp:67;problem=finish_compute_actor;tx_id=281474976715797;task_id=2;success=1;message={
: Error: COMPUTE_STATE_FINISHED }; 2025-06-25T14:37:58.983158Z node 4 :FQ_PENDING_FETCHER ERROR: pending_fetcher.cpp:259: Error with GetTask:
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv6:%5B::%5D:12421: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint [::]:12421 2025-06-25T14:37:59.942141Z node 4 :FQ_PENDING_FETCHER ERROR: pending_fetcher.cpp:259: Error with GetTask:
: Error: GRpc error: (1): CANCELLED
: Error: Grpc error response on endpoint [::]:12421 >> KqpEffects::EmptyUpdate+UseSink [GOOD] >> KqpEffects::EmptyUpdate-UseSink >> KqpEffects::InsertAbort_Literal_Duplicates-UseSink [GOOD] >> KqpEffects::InsertAbort_Literal_Conflict-UseSink >> KqpPg::EquiJoin+useSink [GOOD] >> KqpPg::EquiJoin-useSink >> KqpEffects::AlterAfterUpsertTransaction+UseSink |81.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/fq/libs/row_dispatcher/format_handler/ut/ydb-core-fq-libs-row_dispatcher-format_handler-ut |81.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/fq/libs/row_dispatcher/format_handler/ut/ydb-core-fq-libs-row_dispatcher-format_handler-ut |81.7%| [LD] {RESULT} $(B)/ydb/core/fq/libs/row_dispatcher/format_handler/ut/ydb-core-fq-libs-row_dispatcher-format_handler-ut >> KqpInplaceUpdate::Negative_SingleRowWithKeyCast+UseSink >> KqpWrite::Insert |81.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/effects/unittest >> KqpWrite::CastValues [GOOD] Test command err: Trying to start YDB, gRPC: 13955, MsgBus: 26044 2025-06-25T14:37:49.099481Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519896188483339734:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:37:49.102776Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001867/r3tmp/tmpCIXb7x/pdisk_1.dat 2025-06-25T14:37:49.495649Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 13955, node 1 2025-06-25T14:37:49.543011Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:37:49.543543Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:37:49.545480Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:37:49.584776Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:37:49.584801Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:37:49.584838Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:37:49.584985Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26044 TClient is connected to server localhost:26044 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-25T14:37:50.106555Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:37:50.127337Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:37:50.150003Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:37:50.330903Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:37:50.474103Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:37:50.555655Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:37:52.230183Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896201368243245:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:52.230285Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:52.552053Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:52.585177Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:52.617959Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:52.657326Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:52.707232Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:52.741819Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:52.794178Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:52.878613Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896201368243900:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:52.878693Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:52.878929Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896201368243905:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:52.882858Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:37:52.899684Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519896201368243907:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:37:52.956912Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519896201368243958:3419] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:37:54.104420Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519896188483339734:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:37:54.111381Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:37:54.451963Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) Trying to start YDB, gRPC: 24865, MsgBus: 23526 2025-06-25T14:37:55.890357Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519896213930681255:2076];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:37:55.897265Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001867/r3tmp/tmpHvCHJk/pdisk_1.dat 2025-06-25T14:37:56.016453Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519896213930681209:2080] 1750862275873998 != 1750862275874001 2025-06-25T14:37:56.022281Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:37:56.035380Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:37:56.035458Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 24865, node 2 2025-06-25T14:37:56.039519Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:37:56.089590Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:37:56.089611Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:37:56.089618Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:37:56.089759Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23526 TClient is connected to server localhost:23526 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:37:56.614065Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:37:56.624994Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:37:56.650748Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:37:56.759471Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:37:56.905698Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:37:57.016256Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:37:57.136584Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:37:59.812562Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896231110552010:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:59.812654Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:59.866719Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:59.942782Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:59.980696Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:00.071373Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:00.120932Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:00.192928Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:00.292092Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:00.379898Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896235405519970:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:00.379989Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:00.380393Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896235405519975:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:00.385100Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:38:00.406200Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519896235405519977:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:38:00.498299Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519896235405520028:3413] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:38:00.890271Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519896213930681255:2076];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:00.890346Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> Yq_1::Basic_EmptyList [GOOD] >> Yq_1::Basic_EmptyDict >> ttl_delete_s3.py::TestDeleteS3Ttl::test_data_unchanged_after_ttl_change ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/effects/unittest >> KqpEffects::UpdateOn_Params [GOOD] Test command err: Trying to start YDB, gRPC: 31139, MsgBus: 22862 2025-06-25T14:37:50.646521Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519896189712858823:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:37:50.647314Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00185f/r3tmp/tmpwvusBE/pdisk_1.dat 2025-06-25T14:37:51.019881Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:37:51.021089Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519896189712858787:2080] 1750862270644965 != 1750862270644968 TServer::EnableGrpc on GrpcPort 31139, node 1 2025-06-25T14:37:51.061868Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:37:51.067684Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:37:51.094461Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:37:51.176849Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:37:51.176872Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:37:51.176879Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:37:51.177021Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:22862 2025-06-25T14:37:51.661731Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:22862 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:37:51.899316Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:37:51.939381Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:37:52.151227Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:37:52.300436Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:37:52.372942Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:37:53.950146Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896202597762306:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:53.950291Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:54.255145Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:54.288049Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:54.368276Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:54.414562Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:54.458253Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:54.536810Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:54.576095Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:54.635000Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896206892730262:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:54.635130Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:54.635378Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896206892730267:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:54.639089Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:37:54.661896Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519896206892730269:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:37:54.746390Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519896206892730322:3417] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:37:55.648579Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519896189712858823:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:37:55.649708Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 6378, MsgBus: 12041 2025-06-25T14:37:57.100238Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519896221158664940:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:37:57.120891Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00185f/r3tmp/tmpXOKrvY/pdisk_1.dat 2025-06-25T14:37:57.292477Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:37:57.294011Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519896221158664902:2080] 1750862277083068 != 1750862277083071 TServer::EnableGrpc on GrpcPort 6378, node 2 2025-06-25T14:37:57.328157Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:37:57.328340Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:37:57.337600Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:37:57.388776Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:37:57.388797Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:37:57.388804Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:37:57.388912Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:12041 TClient is connected to server localhost:12041 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:37:57.930331Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:37:57.934872Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:37:57.945914Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:58.030975Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:37:58.123763Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:37:58.252462Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:37:58.357502Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:00.852614Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896234043568418:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:00.852710Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:00.933992Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:00.966945Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:01.038159Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:01.068943Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:01.104911Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:01.148152Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:01.180216Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:01.273760Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896238338536372:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:01.273884Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:01.274217Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896238338536377:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:01.277716Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:38:01.288285Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519896238338536379:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:38:01.352150Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519896238338536430:3413] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:38:02.103122Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519896221158664940:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:02.103200Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; ------- [TM] {asan, default-linux-x86_64, release} ydb/services/fq/ut_integration/unittest >> Yq_1::DeleteQuery [GOOD] Test command err: 2025-06-25T14:37:17.701152Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519896048767804081:2233];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:37:17.701563Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; E0625 14:37:18.319524027 239258 dns_resolver_ares.cc:452] no server name supplied in dns URI E0625 14:37:18.342518806 239258 channel.cc:120] channel stack builder failed: UNKNOWN: the target uri is not valid: dns:/// 2025-06-25T14:37:18.695075Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:37:18.707368Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:37:19.409343Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:14263: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:14263 } ] 2025-06-25T14:37:19.653819Z node 1 :YQL_NODES_MANAGER ERROR: nodes_manager.cpp:364: ydb/core/fq/libs/actors/nodes_manager.cpp:322: TRANSPORT_UNAVAILABLE
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:14263: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint localhost:14263 2025-06-25T14:37:19.709315Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:37:20.713081Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:37:21.145457Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:14263: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:14263 } ] 2025-06-25T14:37:21.714996Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:37:22.704589Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519896048767804081:2233];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:37:22.704661Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:37:22.714553Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; E0625 14:37:23.320924570 239351 dns_resolver_ares.cc:452] no server name supplied in dns URI E0625 14:37:23.321085981 239351 channel.cc:120] channel stack builder failed: UNKNOWN: the target uri is not valid: dns:/// test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001fb5/r3tmp/tmpAobh5o/pdisk_1.dat 2025-06-25T14:37:23.678462Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:37:23.728860Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7519896074537608255:2277], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:37:23.743884Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:37:23.760506Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:37:23.877368Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 14263, node 1 2025-06-25T14:37:23.883931Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:14263: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:14263 } ] 2025-06-25T14:37:23.916236Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create directory "Root/yq" 2025-06-25T14:37:23.916270Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:383: Call create directory "Root/yq" 2025-06-25T14:37:23.929474Z node 1 :FQ_PENDING_FETCHER ERROR: pending_fetcher.cpp:259: Error with GetTask:
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:14263: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint localhost:14263 2025-06-25T14:37:23.929520Z node 1 :YQL_NODES_MANAGER ERROR: nodes_manager.cpp:364: ydb/core/fq/libs/actors/nodes_manager.cpp:322: TRANSPORT_UNAVAILABLE
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:14263: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint localhost:14263 2025-06-25T14:37:24.231432Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:37:24.231457Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:37:24.231464Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:37:24.231599Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:37:24.231979Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TClient is connected to server localhost:29049 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:37:24.416945Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:37:27.371025Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:37:27.371137Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:37:27.381472Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected E0625 14:37:28.324606370 239351 dns_resolver_ares.cc:452] no server name supplied in dns URI E0625 14:37:28.324760844 239351 channel.cc:120] channel stack builder failed: UNKNOWN: the target uri is not valid: dns:/// 2025-06-25T14:37:28.325991Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/result_sets". Create session OK 2025-06-25T14:37:28.326022Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/result_sets" 2025-06-25T14:37:28.326050Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/result_sets" 2025-06-25T14:37:28.350039Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/connections". Create session OK 2025-06-25T14:37:28.350067Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/connections" 2025-06-25T14:37:28.350074Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/connections" 2025-06-25T14:37:28.352304Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/bindings". Create session OK 2025-06-25T14:37:28.352348Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/bindings" 2025-06-25T14:37:28.352357Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/bindings" 2025-06-25T14:37:28.365935Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/mappings". Create session OK 2025-06-25T14:37:28.365965Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/mappings" 2025-06-25T14:37:28.365971Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/mappings" 2025-06-25T14:37:28.366091Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/tenants". Create session OK 2025-06-25T14:37:28.366118Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/tenants" 2025-06-25T14:37:28.366125Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/tenants" 2025-06-25T14:37:28.369096Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/idempotency_keys". Create session OK 2025-06-25T14:37:28.369132Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/idempotency_keys" 2025-06-25T14:37:28.369139Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/idempotency_keys" 2025-06-25T14:37:28.370821Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/pending_small". Create session OK 2025-06-25T14:37:28.370866Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/pending_small" 20 ... tZWIxZjE3ZTE=. TraceId : 01jykrcdch4tq077epb4209kcg. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. CA StateFunc 271646922 2025-06-25T14:38:00.556371Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:527: TxId: 281474976715810, task: 1, CA Id [4:7519896235529613280:3099]. Received TEvResolveKeySetResult update for table 'Root/yq/queries' 2025-06-25T14:38:00.556454Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:632: TxId: 281474976715810, task: 1, CA Id [4:7519896235529613280:3099]. Processing resolved ShardId# 72075186224037892, partition range: [(String : yandexcloud://Execute_folder_id, String : utque7j6e6h0kl0omk80) ; ()), i: 0, state ranges: 0, points: 1 2025-06-25T14:38:00.556474Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:670: TxId: 281474976715810, task: 1, CA Id [4:7519896235529613280:3099]. Add point to new shardId: 72075186224037892 2025-06-25T14:38:00.556576Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:714: TxId: 281474976715810, task: 1, CA Id [4:7519896235529613280:3099]. Pending shards States: TShardState{ TabletId: 72075186224037892, Last Key , Ranges: [], Points: [# 0: (String : yandexcloud://Execute_folder_id, String : utque7j6e6h0kl0omk80)], RetryAttempt: 0, ResolveAttempt: 0 }; In Flight shards States: TShardState{ TabletId: 0, Last Key , Ranges: [], Points: [# 0: (String : yandexcloud://Execute_folder_id, String : utque7j6e6h0kl0omk80)], RetryAttempt: 0, ResolveAttempt: 1 }; 2025-06-25T14:38:00.556590Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:458: TxId: 281474976715810, task: 1, CA Id [4:7519896235529613280:3099]. effective maxinflight 1024 sorted 0 2025-06-25T14:38:00.556604Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:462: TxId: 281474976715810, task: 1, CA Id [4:7519896235529613280:3099]. BEFORE: 1.0 2025-06-25T14:38:00.556648Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:884: TxId: 281474976715810, task: 1, CA Id [4:7519896235529613280:3099]. Send EvRead to shardId: 72075186224037892, tablePath: Root/yq/queries, ranges: , limit: (empty maybe), readId = 0, reverse = 0, snapshot = (txid=0,step=0), lockTxId = 0, lockNodeId = 0 2025-06-25T14:38:00.556689Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:476: TxId: 281474976715810, task: 1, CA Id [4:7519896235529613280:3099]. AFTER: 0.1 2025-06-25T14:38:00.556704Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:480: TxId: 281474976715810, task: 1, CA Id [4:7519896235529613280:3099]. Scheduled table scans, in flight: 1 shards. pending shards to read: 0, 2025-06-25T14:38:00.557468Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:958: TxId: 281474976715810, task: 1, CA Id [4:7519896235529613280:3099]. Recv TEvReadResult from ShardID=72075186224037892, ReadId=0, Status=SUCCESS, Finished=1, RowCount=0, TxLocks= , BrokenTxLocks= 2025-06-25T14:38:00.557491Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1050: TxId: 281474976715810, task: 1, CA Id [4:7519896235529613280:3099]. Taken 0 locks 2025-06-25T14:38:00.557507Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1064: TxId: 281474976715810, task: 1, CA Id [4:7519896235529613280:3099]. new data for read #0 seqno = 1 finished = 1 2025-06-25T14:38:00.557530Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [4:7519896235529613280:3099], TxId: 281474976715810, task: 1. Ctx: { SessionId : ydb://session/3?node_id=4&id=YjYyNWU4NzQtOGY1MzU3NTItY2JkNDMxNGItZWIxZjE3ZTE=. TraceId : 01jykrcdch4tq077epb4209kcg. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. CA StateFunc 276037645 2025-06-25T14:38:00.557552Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [4:7519896235529613280:3099], TxId: 281474976715810, task: 1. Ctx: { SessionId : ydb://session/3?node_id=4&id=YjYyNWU4NzQtOGY1MzU3NTItY2JkNDMxNGItZWIxZjE3ZTE=. TraceId : 01jykrcdch4tq077epb4209kcg. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. CA StateFunc 271646922 2025-06-25T14:38:00.557579Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1331: TxId: 281474976715810, task: 1, CA Id [4:7519896235529613280:3099]. enter getasyncinputdata results size 1, freeSpace 8388608 2025-06-25T14:38:00.557599Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1227: TxId: 281474976715810, task: 1, CA Id [4:7519896235529613280:3099]. enter pack cells method shardId: 72075186224037892 processedRows: 0 packed rows: 0 freeSpace: 8388608 2025-06-25T14:38:00.557616Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1308: TxId: 281474976715810, task: 1, CA Id [4:7519896235529613280:3099]. exit pack cells method shardId: 72075186224037892 processedRows: 0 packed rows: 0 freeSpace: 8388608 2025-06-25T14:38:00.557629Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1365: TxId: 281474976715810, task: 1, CA Id [4:7519896235529613280:3099]. returned 0 rows; processed 0 rows 2025-06-25T14:38:00.557666Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1402: TxId: 281474976715810, task: 1, CA Id [4:7519896235529613280:3099]. dropping batch for read #0 2025-06-25T14:38:00.557679Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:458: TxId: 281474976715810, task: 1, CA Id [4:7519896235529613280:3099]. effective maxinflight 1024 sorted 0 2025-06-25T14:38:00.557691Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:480: TxId: 281474976715810, task: 1, CA Id [4:7519896235529613280:3099]. Scheduled table scans, in flight: 0 shards. pending shards to read: 0, 2025-06-25T14:38:00.557709Z node 4 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1427: TxId: 281474976715810, task: 1, CA Id [4:7519896235529613280:3099]. returned async data processed rows 0 left freeSpace 8388608 received rows 0 running reads 0 pending shards 0 finished = 1 has limit 0 limit reached 0 2025-06-25T14:38:00.557798Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:502: SelfId: [4:7519896235529613280:3099], TxId: 281474976715810, task: 1. Ctx: { SessionId : ydb://session/3?node_id=4&id=YjYyNWU4NzQtOGY1MzU3NTItY2JkNDMxNGItZWIxZjE3ZTE=. TraceId : 01jykrcdch4tq077epb4209kcg. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. Continue execution, either output buffers are not empty or not all channels are ready, hasDataToSend: 1, channelsReady: 1 2025-06-25T14:38:00.557826Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [4:7519896235529613281:3100], TxId: 281474976715810, task: 2. Ctx: { SessionId : ydb://session/3?node_id=4&id=YjYyNWU4NzQtOGY1MzU3NTItY2JkNDMxNGItZWIxZjE3ZTE=. TraceId : 01jykrcdch4tq077epb4209kcg. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. CA StateFunc 271646923 2025-06-25T14:38:00.557850Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:163: TxId: 281474976715810, task: 2. Finish input channelId: 1, from: [4:7519896235529613280:3099] 2025-06-25T14:38:00.557879Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [4:7519896235529613281:3100], TxId: 281474976715810, task: 2. Ctx: { SessionId : ydb://session/3?node_id=4&id=YjYyNWU4NzQtOGY1MzU3NTItY2JkNDMxNGItZWIxZjE3ZTE=. TraceId : 01jykrcdch4tq077epb4209kcg. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. CA StateFunc 271646922 2025-06-25T14:38:00.557923Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:502: SelfId: [4:7519896235529613281:3100], TxId: 281474976715810, task: 2. Ctx: { SessionId : ydb://session/3?node_id=4&id=YjYyNWU4NzQtOGY1MzU3NTItY2JkNDMxNGItZWIxZjE3ZTE=. TraceId : 01jykrcdch4tq077epb4209kcg. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. Continue execution, either output buffers are not empty or not all channels are ready, hasDataToSend: 1, channelsReady: 1 2025-06-25T14:38:00.557939Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [4:7519896235529613280:3099], TxId: 281474976715810, task: 1. Ctx: { SessionId : ydb://session/3?node_id=4&id=YjYyNWU4NzQtOGY1MzU3NTItY2JkNDMxNGItZWIxZjE3ZTE=. TraceId : 01jykrcdch4tq077epb4209kcg. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. CA StateFunc 271646927 2025-06-25T14:38:00.557961Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [4:7519896235529613280:3099], TxId: 281474976715810, task: 1. Ctx: { SessionId : ydb://session/3?node_id=4&id=YjYyNWU4NzQtOGY1MzU3NTItY2JkNDMxNGItZWIxZjE3ZTE=. TraceId : 01jykrcdch4tq077epb4209kcg. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. CA StateFunc 271646922 2025-06-25T14:38:00.557981Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:675: TxId: 281474976715810, task: 1. Tasks execution finished 2025-06-25T14:38:00.557997Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:510: SelfId: [4:7519896235529613280:3099], TxId: 281474976715810, task: 1. Ctx: { SessionId : ydb://session/3?node_id=4&id=YjYyNWU4NzQtOGY1MzU3NTItY2JkNDMxNGItZWIxZjE3ZTE=. TraceId : 01jykrcdch4tq077epb4209kcg. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. Compute state finished. All channels and sinks finished 2025-06-25T14:38:00.558123Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:494: TxId: 281474976715810, task: 1. pass away 2025-06-25T14:38:00.558232Z node 4 :KQP_COMPUTE DEBUG: log.cpp:784: fline=kqp_compute_actor_factory.cpp:67;problem=finish_compute_actor;tx_id=281474976715810;task_id=1;success=1;message={
: Error: COMPUTE_STATE_FINISHED }; 2025-06-25T14:38:00.558649Z node 4 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [4:7519896235529613281:3100], TxId: 281474976715810, task: 2. Ctx: { SessionId : ydb://session/3?node_id=4&id=YjYyNWU4NzQtOGY1MzU3NTItY2JkNDMxNGItZWIxZjE3ZTE=. TraceId : 01jykrcdch4tq077epb4209kcg. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. CA StateFunc 271646922 2025-06-25T14:38:00.558698Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715810, task: 2. Tasks execution finished, don't wait for ack delivery in input channelId: 1, seqNo: [1] 2025-06-25T14:38:00.558710Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:675: TxId: 281474976715810, task: 2. Tasks execution finished 2025-06-25T14:38:00.558723Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:510: SelfId: [4:7519896235529613281:3100], TxId: 281474976715810, task: 2. Ctx: { SessionId : ydb://session/3?node_id=4&id=YjYyNWU4NzQtOGY1MzU3NTItY2JkNDMxNGItZWIxZjE3ZTE=. TraceId : 01jykrcdch4tq077epb4209kcg. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. Compute state finished. All channels and sinks finished 2025-06-25T14:38:00.558783Z node 4 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:494: TxId: 281474976715810, task: 2. pass away 2025-06-25T14:38:00.558844Z node 4 :KQP_COMPUTE DEBUG: log.cpp:784: fline=kqp_compute_actor_factory.cpp:67;problem=finish_compute_actor;tx_id=281474976715810;task_id=2;success=1;message={
: Error: COMPUTE_STATE_FINISHED }; 2025-06-25T14:38:00.564543Z node 4 :YQ_CONTROL_PLANE_STORAGE WARN: ydb_control_plane_storage_impl.h:770: DescribeQueryRequest - DescribeQueryResult: {query_id: "utque7j6e6h0kl0omk80" } ERROR: {
: Error: (NYql::TCodeLineException) ydb/core/fq/libs/control_plane_storage/ydb_control_plane_storage_queries.cpp:664: Query does not exist or permission denied. Please check the id of the query or your access rights, code: 1000 } 2025-06-25T14:38:01.411471Z node 4 :FQ_PENDING_FETCHER ERROR: pending_fetcher.cpp:259: Error with GetTask:
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv6:%5B::%5D:22628: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint [::]:22628 |81.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |81.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest >> KqpImmediateEffects::DeleteOnAfterInsertWithIndex >> KqpPg::AlterSequence [GOOD] >> KqpPg::AlterColumnSetDefaultFromSequence >> KqpEffects::AlterDuringUpsertTransaction-UseSink [GOOD] >> PrivateApi::Nodes [GOOD] >> KqpWrite::UpsertNullKey >> KqpImmediateEffects::InsertExistingKey-UseSink >> KqpImmediateEffects::ConflictingKeyW1WR2 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/effects/unittest >> KqpEffects::AlterDuringUpsertTransaction-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 27349, MsgBus: 16570 2025-06-25T14:37:44.651616Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519896167162070292:2144];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:37:44.652339Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001889/r3tmp/tmp9rjuq4/pdisk_1.dat 2025-06-25T14:37:45.168262Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:37:45.169454Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:37:45.178830Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:37:45.188640Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 27349, node 1 2025-06-25T14:37:45.365006Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:37:45.365031Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:37:45.367650Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:37:45.367828Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:37:45.655389Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:16570 TClient is connected to server localhost:16570 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:37:46.063511Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:37:46.111887Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:37:46.331501Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:37:46.526681Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:37:46.619880Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:37:47.881281Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896180046973709:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:47.881385Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:48.664277Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:48.708854Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:48.739711Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:48.774308Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:48.808668Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:48.850161Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:48.880179Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:48.987178Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896184341941668:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:48.987235Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:48.987285Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896184341941673:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:48.989645Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:37:48.997128Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519896184341941675:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:37:49.070405Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519896188636909022:3423] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:37:49.647781Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519896167162070292:2144];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:37:49.647847Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:37:50.561982Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) Trying to start YDB, gRPC: 29407, MsgBus: 1732 2025-06-25T14:37:52.061098Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519896199367708317:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:37:52.061137Z node 2 :METADAT ... 2025-06-25T14:37:59.225033Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 3026, node 3 2025-06-25T14:37:59.356693Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:37:59.356722Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:37:59.356732Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:37:59.356886Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13330 2025-06-25T14:37:59.836788Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:13330 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:37:59.957483Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:37:59.963457Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:37:59.978576Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:00.070101Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:00.319884Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:00.438505Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:02.984524Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519896245086441196:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:02.984665Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:03.063243Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:03.128647Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:03.173120Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:03.221614Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:03.263281Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:03.339394Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:03.377242Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:03.469896Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519896249381409154:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:03.470057Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:03.470411Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519896249381409159:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:03.475255Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:38:03.491270Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710669, at schemeshard: 72057594046644480 2025-06-25T14:38:03.491635Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519896249381409161:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:38:03.566764Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519896249381409212:3421] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:38:04.959061Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:05.193685Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:171) 2025-06-25T14:38:05.223970Z node 3 :TX_DATASHARD ERROR: datashard_pipeline.cpp:1570: Shard 72075186224037922 cannot parse tx 281474976710674: Table '/Root/TestTable' scheme changed. 2025-06-25T14:38:05.224228Z node 3 :KQP_EXECUTER ERROR: kqp_data_executer.cpp:864: ActorId: [3:7519896257971344177:2465] TxId: 281474976710674. Ctx: { TraceId: 01jykrcjd56df0h2fd4zbezw3n, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NzQ3NTY3ZjktYmQyNDA2YzgtNzk4ZTI3MjgtYzA1ZGJiMjU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ERROR: [SCHEME_CHANGED] Table '/Root/TestTable' scheme changed.; 2025-06-25T14:38:05.232602Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=3&id=NzQ3NTY3ZjktYmQyNDA2YzgtNzk4ZTI3MjgtYzA1ZGJiMjU=, ActorId: [3:7519896253676376739:2465], ActorState: ExecuteState, TraceId: 01jykrcjd56df0h2fd4zbezw3n, Create QueryResponse for error on request, msg: >> KqpInplaceUpdate::Negative_SingleRowListFromRange+UseSink >> KqpImmediateEffects::UpsertDuplicates ------- [TM] {asan, default-linux-x86_64, release} ydb/services/fq/ut_integration/unittest >> PrivateApi::Nodes [GOOD] Test command err: 2025-06-25T14:37:14.970697Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519896036277327631:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:37:14.970754Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; E0625 14:37:15.726447444 238543 dns_resolver_ares.cc:452] no server name supplied in dns URI E0625 14:37:15.726580010 238543 channel.cc:120] channel stack builder failed: UNKNOWN: the target uri is not valid: dns:/// 2025-06-25T14:37:15.973665Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:37:16.052499Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:37:16.702410Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:11191: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:11191 } ] 2025-06-25T14:37:16.778264Z node 1 :YQL_NODES_MANAGER ERROR: nodes_manager.cpp:364: ydb/core/fq/libs/actors/nodes_manager.cpp:322: TRANSPORT_UNAVAILABLE
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:11191: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint localhost:11191 2025-06-25T14:37:16.976681Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:37:17.978877Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:37:18.116432Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:11191: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:11191 } ] 2025-06-25T14:37:18.980137Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001fca/r3tmp/tmpEipxTt/pdisk_1.dat 2025-06-25T14:37:19.883909Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7519896057752164656:2274], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:37:19.976573Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519896036277327631:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:37:19.976645Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:37:19.996468Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:37:20.020495Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7519896057752164656:2274], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:37:20.028453Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:37:20.091535Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:37:20.091832Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:37:20.116909Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:37:20.131927Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:37:20.277338Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 11191, node 1 2025-06-25T14:37:20.319390Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:37:20.319411Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:37:20.319418Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:37:20.319566Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:20254 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: E0625 14:37:20.723517800 238725 dns_resolver_ares.cc:452] no server name supplied in dns URI E0625 14:37:20.723671526 238725 channel.cc:120] channel stack builder failed: UNKNOWN: the target uri is not valid: dns:/// Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:37:20.834322Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:37:20.836205Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/mappings". Create session OK 2025-06-25T14:37:20.836244Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/mappings" 2025-06-25T14:37:20.836253Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/mappings" 2025-06-25T14:37:20.836737Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/tenant_acks". Create session OK 2025-06-25T14:37:20.836760Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/tenant_acks" 2025-06-25T14:37:20.836767Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/tenant_acks" 2025-06-25T14:37:20.840448Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/connections". Create session OK 2025-06-25T14:37:20.840480Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/connections" 2025-06-25T14:37:20.840485Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/connections" 2025-06-25T14:37:20.843155Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/jobs". Create session OK 2025-06-25T14:37:20.843179Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/jobs" 2025-06-25T14:37:20.843184Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/jobs" waiting... 2025-06-25T14:37:20.850165Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:20.851605Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:20.852281Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/result_sets". Create session OK 2025-06-25T14:37:20.852306Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/result_sets" 2025-06-25T14:37:20.852365Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/result_sets" 2025-06-25T14:37:20.852689Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/idempotency_keys". Create session OK 2025-06-25T14:37:20.852715Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/idempotency_keys" 2025-06-25T14:37:20.852722Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/idempotency_keys" 2025-06-25T14:37:20.857949Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/quotas". Create session OK 2025-06-25T14:37:20.857984Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/quotas" 2025-06-25T14:37:20.857994Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/quotas" 2025-06-25T14:37:20.862509Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/bindings". Create session OK 2025-06-25T14:37:20.862538Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/bindings" 2025-06-25T14:37:20.862544Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/bindings" 2025-06-25T14:37:20.864848Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/pending_small". Create session OK 2025-06-25T14:37:20.864870Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/pending_small" 2025-06-25T14:37:20.864876Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/pending_small" 2025-06-25T14:37:20.867392Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schem ... . PoolId : default. Database : . }. Set execution timeout 299.996725s 2025-06-25T14:38:02.973879Z node 7 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:1452: SelfId: [7:7519896243462534688:2343], TxId: 281474976710682, task: 1. Ctx: { SessionId : ydb://session/3?node_id=7&id=MTIxNWI1ZDktZDk4NGI4NmItYjJhOTRhNmEtZDM1YzBkMmI=. CustomerSuppliedId : . TraceId : 01jykrcg6s5ykr2j227ewgh9yf. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. Create sink for output 0 { Sink { Type: "KqpTableSink" Settings { type_url: "type.googleapis.com/NKikimrKqp.TKqpTableSinkSettings" value: "\032\035\n\rRoot/yq/nodes\020\200\202\224\204\200\200\200\200\001\030\014(\001\"\r\n\006tenant\020\001 \201 \"\r\n\007node_id\020\003 \002*\024\n\016active_workers\020\005 \004*\022\n\013data_center\020\013 \201 *\017\n\texpire_at\020\010 2*\017\n\010hostname\020\004 \201 *\022\n\013instance_id\020\002 \201 *\027\n\021interconnect_port\020\t \002*\026\n\020memory_allocated\020\007 \004*\022\n\014memory_limit\020\006 \004*\023\n\014node_address\020\n \201 *\r\n\007node_id\020\003 \002*\r\n\006tenant\020\001 \201 0\231\200\200\200\200\200@8\007@\000H\001R\022\t\033\366`\223\312\t\\h\021\'\t\000\000\007\000\020\000X\000`\000h\004h\nh\007h\003h\002h\010h\006h\005h\th\001h\000x\000" } } } 2025-06-25T14:38:02.974035Z node 7 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [7:7519896243462534688:2343], TxId: 281474976710682, task: 1. Ctx: { SessionId : ydb://session/3?node_id=7&id=MTIxNWI1ZDktZDk4NGI4NmItYjJhOTRhNmEtZDM1YzBkMmI=. CustomerSuppliedId : . TraceId : 01jykrcg6s5ykr2j227ewgh9yf. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. CA StateFunc 271646926 2025-06-25T14:38:02.974061Z node 7 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:1074: SelfId: [7:7519896243462534688:2343], TxId: 281474976710682, task: 1. Ctx: { SessionId : ydb://session/3?node_id=7&id=MTIxNWI1ZDktZDk4NGI4NmItYjJhOTRhNmEtZDM1YzBkMmI=. CustomerSuppliedId : . TraceId : 01jykrcg6s5ykr2j227ewgh9yf. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. Received channels info: 2025-06-25T14:38:02.974130Z node 7 :KQP_COMPUTE DEBUG: dq_sync_compute_actor_base.h:358: SelfId: [7:7519896243462534688:2343], TxId: 281474976710682, task: 1. Ctx: { SessionId : ydb://session/3?node_id=7&id=MTIxNWI1ZDktZDk4NGI4NmItYjJhOTRhNmEtZDM1YzBkMmI=. CustomerSuppliedId : . TraceId : 01jykrcg6s5ykr2j227ewgh9yf. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. About to drain async output 0. FreeSpace: 67108864, allowedOvercommit: 4194304, toSend: 71303168, finished: 0 2025-06-25T14:38:02.974237Z node 7 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:3311: TxId: 281474976710682, task: 1. Add data: 101 / 101 2025-06-25T14:38:02.974285Z node 7 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:3280: TxId: 281474976710682, task: 1. Send data=101, closed=1, bufferActorId=[7:7519896243462534683:2343] 2025-06-25T14:38:02.974306Z node 7 :KQP_COMPUTE DEBUG: dq_sync_compute_actor_base.h:372: SelfId: [7:7519896243462534688:2343], TxId: 281474976710682, task: 1. Ctx: { SessionId : ydb://session/3?node_id=7&id=MTIxNWI1ZDktZDk4NGI4NmItYjJhOTRhNmEtZDM1YzBkMmI=. CustomerSuppliedId : . TraceId : 01jykrcg6s5ykr2j227ewgh9yf. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. Drain async output 0. Free space decreased: -9223372036787666944, sent data from buffer: 101 2025-06-25T14:38:02.974323Z node 7 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:675: TxId: 281474976710682, task: 1. Tasks execution finished 2025-06-25T14:38:02.974334Z node 7 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:1587: SelfId: [7:7519896243462534688:2343], TxId: 281474976710682, task: 1. Ctx: { SessionId : ydb://session/3?node_id=7&id=MTIxNWI1ZDktZDk4NGI4NmItYjJhOTRhNmEtZDM1YzBkMmI=. CustomerSuppliedId : . TraceId : 01jykrcg6s5ykr2j227ewgh9yf. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. Waiting finish of sink[0] 2025-06-25T14:38:02.974359Z node 7 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [7:7519896243462534688:2343], TxId: 281474976710682, task: 1. Ctx: { SessionId : ydb://session/3?node_id=7&id=MTIxNWI1ZDktZDk4NGI4NmItYjJhOTRhNmEtZDM1YzBkMmI=. CustomerSuppliedId : . TraceId : 01jykrcg6s5ykr2j227ewgh9yf. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. CA StateFunc 271646926 2025-06-25T14:38:02.974377Z node 7 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:1074: SelfId: [7:7519896243462534688:2343], TxId: 281474976710682, task: 1. Ctx: { SessionId : ydb://session/3?node_id=7&id=MTIxNWI1ZDktZDk4NGI4NmItYjJhOTRhNmEtZDM1YzBkMmI=. CustomerSuppliedId : . TraceId : 01jykrcg6s5ykr2j227ewgh9yf. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. Received channels info: 2025-06-25T14:38:02.974390Z node 7 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:675: TxId: 281474976710682, task: 1. Tasks execution finished 2025-06-25T14:38:02.974397Z node 7 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:1587: SelfId: [7:7519896243462534688:2343], TxId: 281474976710682, task: 1. Ctx: { SessionId : ydb://session/3?node_id=7&id=MTIxNWI1ZDktZDk4NGI4NmItYjJhOTRhNmEtZDM1YzBkMmI=. CustomerSuppliedId : . TraceId : 01jykrcg6s5ykr2j227ewgh9yf. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. Waiting finish of sink[0] 2025-06-25T14:38:02.974432Z node 7 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [7:7519896243462534688:2343], TxId: 281474976710682, task: 1. Ctx: { SessionId : ydb://session/3?node_id=7&id=MTIxNWI1ZDktZDk4NGI4NmItYjJhOTRhNmEtZDM1YzBkMmI=. CustomerSuppliedId : . TraceId : 01jykrcg6s5ykr2j227ewgh9yf. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. CA StateFunc 271646922 2025-06-25T14:38:02.974446Z node 7 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:675: TxId: 281474976710682, task: 1. Tasks execution finished 2025-06-25T14:38:02.974458Z node 7 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:1587: SelfId: [7:7519896243462534688:2343], TxId: 281474976710682, task: 1. Ctx: { SessionId : ydb://session/3?node_id=7&id=MTIxNWI1ZDktZDk4NGI4NmItYjJhOTRhNmEtZDM1YzBkMmI=. CustomerSuppliedId : . TraceId : 01jykrcg6s5ykr2j227ewgh9yf. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. Waiting finish of sink[0] 2025-06-25T14:38:02.974519Z node 7 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:1860: SelfId: [7:7519896243462534683:2343], SessionActorId: [7:7519896213397761340:2343], Create new TableWriteActor for table `Root/yq/nodes` ([72057594046644480:12:1]). lockId=281474976710681. ActorId=[7:7519896243462534691:2343] 2025-06-25T14:38:02.974565Z node 7 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:388: Table: `Root/yq/nodes` ([72057594046644480:12:1]), SessionActorId: [7:7519896213397761340:2343]Open: token=0 2025-06-25T14:38:02.974588Z node 7 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:1987: SelfId: [7:7519896243462534683:2343], SessionActorId: [7:7519896213397761340:2343], ProcessRequestQueue [OwnerId: 72057594046644480, LocalPathId: 12] NOT READY queue=1 2025-06-25T14:38:02.974636Z node 7 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:394: SelfId: [7:7519896243462534691:2343], Table: `Root/yq/nodes` ([72057594046644480:12:1]), SessionActorId: [7:7519896213397761340:2343]Write: token=0 2025-06-25T14:38:02.974726Z node 7 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:402: SelfId: [7:7519896243462534691:2343], Table: `Root/yq/nodes` ([72057594046644480:12:1]), SessionActorId: [7:7519896213397761340:2343]Close: token=0 2025-06-25T14:38:02.974755Z node 7 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:3180: SelfId: [7:7519896243462534690:2343], TxId: 281474976710682, task: 1. TKqpForwardWriteActor recieve EvBufferWriteResult from [7:7519896243462534683:2343] 2025-06-25T14:38:02.974768Z node 7 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:3198: SelfId: [7:7519896243462534690:2343], TxId: 281474976710682, task: 1. Finished 2025-06-25T14:38:02.974784Z node 7 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [7:7519896243462534688:2343], TxId: 281474976710682, task: 1. Ctx: { SessionId : ydb://session/3?node_id=7&id=MTIxNWI1ZDktZDk4NGI4NmItYjJhOTRhNmEtZDM1YzBkMmI=. CustomerSuppliedId : . TraceId : 01jykrcg6s5ykr2j227ewgh9yf. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. CA StateFunc 271646922 2025-06-25T14:38:02.974803Z node 7 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:675: TxId: 281474976710682, task: 1. Tasks execution finished 2025-06-25T14:38:02.974816Z node 7 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:510: SelfId: [7:7519896243462534688:2343], TxId: 281474976710682, task: 1. Ctx: { SessionId : ydb://session/3?node_id=7&id=MTIxNWI1ZDktZDk4NGI4NmItYjJhOTRhNmEtZDM1YzBkMmI=. CustomerSuppliedId : . TraceId : 01jykrcg6s5ykr2j227ewgh9yf. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. Compute state finished. All channels and sinks finished 2025-06-25T14:38:02.974885Z node 7 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:494: TxId: 281474976710682, task: 1. pass away 2025-06-25T14:38:02.974954Z node 7 :KQP_COMPUTE DEBUG: log.cpp:784: fline=kqp_compute_actor_factory.cpp:67;problem=finish_compute_actor;tx_id=281474976710682;task_id=1;success=1;message={
: Error: COMPUTE_STATE_FINISHED }; 2025-06-25T14:38:02.975302Z node 7 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:2110: SelfId: [7:7519896243462534683:2343], SessionActorId: [7:7519896213397761340:2343], Start immediate commit 2025-06-25T14:38:02.975315Z node 7 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:923: SelfId: [7:7519896243462534691:2343], Table: `Root/yq/nodes` ([72057594046644480:12:1]), SessionActorId: [7:7519896213397761340:2343]SetImmediateCommit 2025-06-25T14:38:02.975330Z node 7 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:2052: SelfId: [7:7519896243462534683:2343], SessionActorId: [7:7519896213397761340:2343], Flush data 2025-06-25T14:38:02.975464Z node 7 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:1050: SelfId: [7:7519896243462534691:2343], Table: `Root/yq/nodes` ([72057594046644480:12:1]), SessionActorId: [7:7519896213397761340:2343]Send EvWrite to ShardID=72075186224037893, isPrepare=0, isImmediateCommit=1, TxId=0, LockTxId=0, LockNodeId=0, Locks= LockId: 281474976710681 DataShard: 72075186224037893 Generation: 1 Counter: 2 SchemeShard: 72057594046644480 PathId: 12, Size=212, Cookie=1, OperationsCount=1, IsFinal=1, Attempts=0, Mode=3, BufferMemory=212 2025-06-25T14:38:02.983552Z node 7 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:617: SelfId: [7:7519896243462534691:2343], Table: `Root/yq/nodes` ([72057594046644480:12:1]), SessionActorId: [7:7519896213397761340:2343]Recv EvWriteResult from ShardID=72075186224037893, Status=STATUS_COMPLETED, TxId=7, Locks= , Cookie=1 2025-06-25T14:38:02.983592Z node 7 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:869: SelfId: [7:7519896243462534691:2343], Table: `Root/yq/nodes` ([72057594046644480:12:1]), SessionActorId: [7:7519896213397761340:2343]Got completed result TxId=7, TabletId=72075186224037893, Cookie=1, Mode=3, Locks= 2025-06-25T14:38:02.983658Z node 7 :KQP_COMPUTE DEBUG: kqp_write_actor.cpp:2912: SelfId: [7:7519896243462534683:2343], SessionActorId: [7:7519896213397761340:2343], Committed TxId=0 2025-06-25T14:38:03.182834Z node 7 :FQ_PENDING_FETCHER ERROR: pending_fetcher.cpp:259: Error with GetTask:
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv6:%5B::%5D:9908: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint [::]:9908 2025-06-25T14:38:04.154124Z node 7 :FQ_PENDING_FETCHER ERROR: pending_fetcher.cpp:259: Error with GetTask:
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv6:%5B::%5D:9908: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint [::]:9908 >> KqpImmediateEffects::InteractiveTxWithWriteAtTheEnd |81.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> KqpPg::CreateTempTableSerial [GOOD] >> KqpPg::DropSequence >> KqpPg::InsertNoTargetColumns_SerialNotNull-useSink [GOOD] >> KqpPg::InsertFromSelect_Simple-useSink [GOOD] >> KqpPg::InsertFromSelect_NoReorder-useSink |81.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/coordinator/ut/ydb-core-tx-coordinator-ut |81.7%| [LD] {RESULT} $(B)/ydb/core/tx/coordinator/ut/ydb-core-tx-coordinator-ut |81.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/coordinator/ut/ydb-core-tx-coordinator-ut >> KqpInplaceUpdate::SingleRowArithm-UseSink >> KqpImmediateEffects::DeleteAfterUpsert |81.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/ymq/actor/yc_search_ut/ydb-core-ymq-actor-yc_search_ut |81.7%| [LD] {RESULT} $(B)/ydb/core/ymq/actor/yc_search_ut/ydb-core-ymq-actor-yc_search_ut |81.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/ymq/actor/yc_search_ut/ydb-core-ymq-actor-yc_search_ut >> KqpEffects::EmptyUpdate-UseSink [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/pg/unittest >> KqpPg::InsertNoTargetColumns_SerialNotNull-useSink [GOOD] Test command err: Trying to start YDB, gRPC: 7519, MsgBus: 28281 2025-06-25T14:36:58.226703Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519895968138398912:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:36:58.226848Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000a49/r3tmp/tmp4FW714/pdisk_1.dat 2025-06-25T14:36:58.645081Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 7519, node 1 2025-06-25T14:36:58.737727Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:36:58.737839Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:36:58.763391Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:36:58.796028Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:36:58.796047Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:36:58.796053Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:36:58.796163Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28281 2025-06-25T14:36:59.247387Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:28281 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:36:59.527225Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:37:01.603209Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519895981023301414:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:01.603329Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:01.603594Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519895981023301426:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:01.607591Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:37:01.621272Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-25T14:37:01.621514Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519895981023301428:2295], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:37:01.688581Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519895981023301479:2336] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:37:01.775350Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) Trying to start YDB, gRPC: 24463, MsgBus: 19895 2025-06-25T14:37:02.933115Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519895984785419702:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:37:02.933163Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000a49/r3tmp/tmpvQOOlG/pdisk_1.dat 2025-06-25T14:37:03.147994Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:37:03.148069Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:37:03.165454Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:37:03.166682Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 24463, node 2 2025-06-25T14:37:03.352799Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:37:03.352820Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:37:03.352825Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:37:03.352930Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19895 TClient is connected to server localhost:19895 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-25T14:37:03.957617Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-25T14:37:03.966567Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:37:06.451989Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896001965289473:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:06.452045Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896001965289463:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:06.452141Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:06.455383Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:37:06.474671Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519896001965289492:2295], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:37:06.538892Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519896001965289543:2334] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:37:06.575153Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, ... 4: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[10:7519896203565533747:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:37:58.784239Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:37:59.340897Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7519896229335338152:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:59.340982Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7519896229335338144:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:59.341375Z node 10 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:59.346657Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:37:59.395130Z node 10 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [10:7519896229335338158:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:37:59.449058Z node 10 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [10:7519896229335338209:2340] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:37:59.491550Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:59.649608Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:59.762124Z node 10 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [10:7519896229335338447:2323], status: BAD_REQUEST, issues:
: Error: Type annotation, code: 1030
:1:1: Error: At function: KiWriteTable!
:1:1: Error: Missing not null column in input: c. All not null columns should be initialized, code: 2032 2025-06-25T14:37:59.764612Z node 10 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=10&id=ZTljMWQxZGUtNzYxOTI3YmUtODE5OGEzMjQtMTIzODYxMzc=, ActorId: [10:7519896229335338445:2322], ActorState: ExecuteState, TraceId: 01jykrcd0zbrs0xy8ynek0b7hc, ReplyQueryCompileError, status BAD_REQUEST remove tx with tx_id: Trying to start YDB, gRPC: 65194, MsgBus: 2443 2025-06-25T14:38:01.187191Z node 11 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[11:7519896240595847509:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:01.187247Z node 11 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000a49/r3tmp/tmpuS13J9/pdisk_1.dat 2025-06-25T14:38:01.387265Z node 11 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:38:01.392476Z node 11 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [11:7519896240595847475:2080] 1750862281186372 != 1750862281186375 2025-06-25T14:38:01.413098Z node 11 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(11, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:01.413229Z node 11 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(11, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:01.417209Z node 11 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(11, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 65194, node 11 2025-06-25T14:38:01.569014Z node 11 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:38:01.569045Z node 11 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:38:01.569058Z node 11 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:38:01.569241Z node 11 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2443 2025-06-25T14:38:02.195916Z node 11 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:2443 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:38:02.570080Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:38:02.581152Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:38:06.188472Z node 11 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[11:7519896240595847509:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:06.188554Z node 11 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:38:07.342459Z node 11 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [11:7519896266365651886:2295], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:07.342715Z node 11 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:07.343470Z node 11 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [11:7519896266365651913:2298], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:07.349645Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:38:07.372003Z node 11 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [11:7519896266365651915:2299], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:38:07.468190Z node 11 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [11:7519896266365651966:2343] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:38:07.508531Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:07.664582Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:07.795561Z node 11 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [11:7519896266365652204:2325], status: BAD_REQUEST, issues:
: Error: Type annotation, code: 1030
:1:1: Error: At function: KiWriteTable!
:1:1: Error: Missing not null column in input: c. All not null columns should be initialized, code: 2032 2025-06-25T14:38:07.796510Z node 11 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=11&id=NmM2N2Q5ZTgtYTI5OGJkMzQtOGJmZjEyOWEtNTUzMmEwZDc=, ActorId: [11:7519896266365652202:2324], ActorState: ExecuteState, TraceId: 01jykrcmwk3rxpggbn99g6jyzs, ReplyQueryCompileError, status BAD_REQUEST remove tx with tx_id: >> KqpInplaceUpdate::SingleRowIf-UseSink |81.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/services/keyvalue/ut/ydb-services-keyvalue-ut |81.7%| [LD] {RESULT} $(B)/ydb/services/keyvalue/ut/ydb-services-keyvalue-ut |81.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/services/keyvalue/ut/ydb-services-keyvalue-ut >> KqpInplaceUpdate::SingleRowSimple-UseSink [GOOD] >> KqpInplaceUpdate::SingleRowStr+UseSink >> KqpPg::PgUpdate+useSink [GOOD] >> KqpPg::PgUpdate-useSink >> KqpImmediateEffects::TxWithWriteAtTheEnd-UseSink [GOOD] >> KqpImmediateEffects::UnobservedUncommittedChangeConflict >> KqpWorkloadService::TestCpuLoadThresholdRefresh [GOOD] >> KqpWorkloadService::TestHandlerActorCleanup >> KqpImmediateEffects::TxWithReadAtTheEnd-UseSink [GOOD] >> KqpImmediateEffects::TxWithWriteAtTheEnd+UseSink >> KqpEffects::InsertAbort_Literal_Conflict-UseSink [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/effects/unittest >> KqpEffects::EmptyUpdate-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 2182, MsgBus: 27902 2025-06-25T14:37:51.476435Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519896196239877314:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:37:51.476551Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001857/r3tmp/tmptlr7C1/pdisk_1.dat 2025-06-25T14:37:51.856420Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519896196239877289:2080] 1750862271475625 != 1750862271475628 2025-06-25T14:37:51.856453Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 2182, node 1 2025-06-25T14:37:51.915732Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:37:51.918454Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:37:51.922056Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:37:51.996910Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:37:51.996934Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:37:51.996942Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:37:51.997100Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27902 2025-06-25T14:37:52.491904Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:27902 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:37:52.751614Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:37:52.780583Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:37:52.992692Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:37:53.188117Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:37:53.280446Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:37:54.997253Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896209124780812:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:54.997369Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:55.393067Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:55.474198Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:55.571728Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:55.603097Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:55.642164Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:55.715803Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:55.787165Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:55.864122Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896213419748778:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:55.864203Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:55.864445Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896213419748783:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:55.868058Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:37:55.880657Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519896213419748785:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:37:55.967697Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519896213419748838:3421] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:37:56.546831Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519896196239877314:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:37:56.546905Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:37:57.450849Z node 1 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_CONSTRAINT_VIOLATION;details=Conflict with existing key.;tx_id=3; 2025-06-25T14:37:57.476936Z node 1 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:226: Prepare transaction failed. txid 3 at tablet 72075186224037888 errors: Status: STATUS_CONSTRAINT_VIOLATION Issues: { message: "Conflict with existing key." issue_code: 2012 severity: 1 } 2025-06-25T14:37:57.477112Z node 1 :TX_DATASHARD ERROR: finish_pr ... ACE: datashard_pipeline.cpp:1910: Advance execution plan for [1750862289792:281474976715664] at 72075186224037888 executing on unit DropCdcStream 2025-06-25T14:38:09.751131Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1750862289792:281474976715664] at 72075186224037888 to execution unit CreateIncrementalRestoreSrc 2025-06-25T14:38:09.751141Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1750862289792:281474976715664] at 72075186224037888 on unit CreateIncrementalRestoreSrc 2025-06-25T14:38:09.751150Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1750862289792:281474976715664] at 72075186224037888 is Executed 2025-06-25T14:38:09.751158Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1750862289792:281474976715664] at 72075186224037888 executing on unit CreateIncrementalRestoreSrc 2025-06-25T14:38:09.751165Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1750862289792:281474976715664] at 72075186224037888 to execution unit CompleteOperation 2025-06-25T14:38:09.751173Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1750862289792:281474976715664] at 72075186224037888 on unit CompleteOperation 2025-06-25T14:38:09.751329Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1750862289792:281474976715664] at 72075186224037888 is DelayComplete 2025-06-25T14:38:09.751344Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1750862289792:281474976715664] at 72075186224037888 executing on unit CompleteOperation 2025-06-25T14:38:09.751364Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1750862289792:281474976715664] at 72075186224037888 to execution unit CompletedOperations 2025-06-25T14:38:09.751377Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1750862289792:281474976715664] at 72075186224037888 on unit CompletedOperations 2025-06-25T14:38:09.751397Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1750862289792:281474976715664] at 72075186224037888 is Executed 2025-06-25T14:38:09.751406Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1750862289792:281474976715664] at 72075186224037888 executing on unit CompletedOperations 2025-06-25T14:38:09.751416Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [1750862289792:281474976715664] at 72075186224037888 has finished 2025-06-25T14:38:09.751426Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:38:09.751435Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 72075186224037888 2025-06-25T14:38:09.751443Z node 3 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-25T14:38:09.751453Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-25T14:38:09.760776Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269746185, Sender [3:7519896269372901783:2340], Recipient [3:7519896269372901840:2305]: NKikimr::TEvTxProxySchemeCache::TEvWatchNotifyUpdated 2025-06-25T14:38:09.760826Z node 3 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2025-06-25T14:38:09.765629Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269746185, Sender [3:7519896269372901783:2340], Recipient [3:7519896269372901762:2299]: NKikimr::TEvTxProxySchemeCache::TEvWatchNotifyUpdated 2025-06-25T14:38:09.765670Z node 3 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T14:38:09.765953Z node 3 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1750862289792} 2025-06-25T14:38:09.765993Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:38:09.766048Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:38:09.766070Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1750862289792:281474976715664] at 72075186224037888 on unit DropTable 2025-06-25T14:38:09.766083Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1750862289792:281474976715664] at 72075186224037888 on unit CompleteOperation 2025-06-25T14:38:09.766131Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1750862289792 : 281474976715664] from 72075186224037888 at tablet 72075186224037888 send result to client [3:7519896247898064900:2144], exec latency: 1 ms, propose latency: 16 ms 2025-06-25T14:38:09.766164Z node 3 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715664 state PreOffline TxInFly 0 2025-06-25T14:38:09.766209Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:38:09.766312Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268828683, Sender [3:7519896269372901754:2325], Recipient [3:7519896269372901762:2299]: NKikimr::TEvTablet::TEvFollowerGcApplied 2025-06-25T14:38:09.772728Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877760, Sender [3:7519896273667869326:2331], Recipient [3:7519896269372901762:2299]: NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594046644480 Status: OK ServerId: [3:7519896273667869329:2502] Leader: 1 Dead: 0 Generation: 2 VersionInfo: } 2025-06-25T14:38:09.772767Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3165: StateWork, processing event TEvTabletPipe::TEvClientConnected 2025-06-25T14:38:09.774761Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269552132, Sender [3:7519896247898064900:2144], Recipient [3:7519896269372901762:2299]: NKikimrTxDataShard.TEvSchemaChangedResult TxId: 281474976715664 2025-06-25T14:38:09.774797Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3136: StateWork, processing event TEvDataShard::TEvSchemaChangedResult 2025-06-25T14:38:09.774816Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715664 datashard 72075186224037888 state PreOffline 2025-06-25T14:38:09.774870Z node 3 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-25T14:38:09.778582Z node 3 :TX_DATASHARD DEBUG: datashard_loans.cpp:220: 72075186224037888 in PreOffline state HasSharedBobs: 0 SchemaOperations: [ ] OutReadSets count: 0 ChangesQueue size: 0 ChangeExchangeSplit: 1 siblings to be activated: wait to activation from: 2025-06-25T14:38:09.778670Z node 3 :TX_DATASHARD INFO: datashard_loans.cpp:177: 72075186224037888 Initiating switch from PreOffline to Offline state 2025-06-25T14:38:09.780491Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877763, Sender [3:7519896273667869326:2331], Recipient [3:7519896269372901762:2299]: NKikimr::TEvTabletPipe::TEvClientDestroyed { TabletId: 72057594046644480 ClientId: [3:7519896273667869326:2331] ServerId: [3:7519896273667869329:2502] } 2025-06-25T14:38:09.780514Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3166: StateWork, processing event TEvTabletPipe::TEvClientDestroyed 2025-06-25T14:38:09.785721Z node 3 :TX_DATASHARD INFO: datashard_impl.h:3310: 72075186224037888 Reporting state Offline to schemeshard 72057594046644480 2025-06-25T14:38:09.785857Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268828683, Sender [3:7519896269372901754:2325], Recipient [3:7519896269372901762:2299]: NKikimr::TEvTablet::TEvFollowerGcApplied 2025-06-25T14:38:09.786304Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877760, Sender [3:7519896273667869334:2332], Recipient [3:7519896269372901762:2299]: NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594046644480 Status: OK ServerId: [3:7519896273667869335:2507] Leader: 1 Dead: 0 Generation: 2 VersionInfo: } 2025-06-25T14:38:09.786327Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3165: StateWork, processing event TEvTabletPipe::TEvClientConnected 2025-06-25T14:38:09.787387Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269552133, Sender [3:7519896247898064900:2144], Recipient [3:7519896269372901762:2299]: NKikimrTxDataShard.TEvStateChangedResult TabletId: 72057594046644480 State: 4 2025-06-25T14:38:09.787415Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3137: StateWork, processing event TEvDataShard::TEvStateChangedResult 2025-06-25T14:38:09.787429Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:2962: Handle TEvStateChangedResult datashard 72075186224037888 state Offline 2025-06-25T14:38:09.787532Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877763, Sender [3:7519896273667869334:2332], Recipient [3:7519896269372901762:2299]: NKikimr::TEvTabletPipe::TEvClientDestroyed { TabletId: 72057594046644480 ClientId: [3:7519896273667869334:2332] ServerId: [3:7519896273667869335:2507] } 2025-06-25T14:38:09.787548Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3166: StateWork, processing event TEvTabletPipe::TEvClientDestroyed 2025-06-25T14:38:09.791845Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268829699, Sender [3:7519896269372901754:2325], Recipient [3:7519896269372901762:2299]: NKikimrTabletBase.TEvTabletStop TabletID: 72075186224037888 Reason: ReasonStop 2025-06-25T14:38:09.791895Z node 3 :TX_DATASHARD INFO: datashard.cpp:197: OnTabletStop: 72075186224037888 reason = ReasonStop 2025-06-25T14:38:09.791954Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877764, Sender [3:7519896273667869289:2468], Recipient [3:7519896269372901762:2299]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-25T14:38:09.791971Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3169: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-25T14:38:09.791993Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037888, clientId# [3:7519896273667869288:2467], serverId# [3:7519896273667869289:2468], sessionId# [0:0:0] 2025-06-25T14:38:09.792463Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037888 not found 2025-06-25T14:38:09.792648Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268829696, Sender [3:7519896269372901754:2325], Recipient [3:7519896269372901762:2299]: NKikimr::TEvTablet::TEvTabletDead 2025-06-25T14:38:09.792917Z node 3 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186224037888 2025-06-25T14:38:09.792990Z node 3 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186224037888 2025-06-25T14:38:09.794582Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 271843840, Sender [3:7519896247898064565:2064], Recipient [3:7519896269372901840:2305]: NKikimr::TEvPipeCache::TEvDeliveryProblem 2025-06-25T14:38:09.794607Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3167: StateWork, processing event TEvPipeCache::TEvDeliveryProblem 2025-06-25T14:38:09.794621Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3635: Client pipe to tablet 72075186224037888 from 72075186224037889 is reset >> KqpImmediateEffects::UpdateAfterUpsert [GOOD] >> KqpImmediateEffects::UpdateAfterInsert >> KqpEffects::AlterAfterUpsertTransaction+UseSink [GOOD] >> KqpEffects::AlterAfterUpsertTransaction-UseSink >> KqpInplaceUpdate::Negative_SingleRowWithKeyCast+UseSink [GOOD] >> KqpInplaceUpdate::Negative_SingleRowListFromRange-UseSink >> KqpWrite::Insert [GOOD] >> KqpWrite::CastValuesOptional >> KqpImmediateEffects::ConflictingKeyRW1WR2 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/effects/unittest >> KqpEffects::InsertAbort_Literal_Conflict-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 63407, MsgBus: 20217 2025-06-25T14:37:49.908777Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519896185858805142:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:37:49.908838Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001860/r3tmp/tmpHsCSoP/pdisk_1.dat 2025-06-25T14:37:50.304873Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:37:50.305490Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519896185858805108:2080] 1750862269907771 != 1750862269907774 TServer::EnableGrpc on GrpcPort 63407, node 1 2025-06-25T14:37:50.322269Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:37:50.324516Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:37:50.326585Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:37:50.375840Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:37:50.375867Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:37:50.375878Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:37:50.376015Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:20217 TClient is connected to server localhost:20217 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-25T14:37:50.932438Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:37:50.948329Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:37:50.980468Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:37:51.137192Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:37:51.322538Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:37:51.395918Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:37:53.333238Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896203038675939:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:53.333341Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:53.665844Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:53.709259Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:53.744974Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:53.831315Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:53.902103Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:53.944842Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:53.994913Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:54.080658Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896207333643897:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:54.080761Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:54.081156Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896207333643902:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:54.085624Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:37:54.132254Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519896207333643904:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:37:54.226471Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519896207333643955:3427] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:37:54.980799Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519896185858805142:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:37:54.980899Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:37:55.427933Z node 1 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_CONSTRAINT_VIOLATION;details=Conflict with existing key.;tx_id=3; 2025-06-25T14:37:55.443283Z node 1 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:226: Prepare transaction failed. txid 3 at tablet 72075186224037888 errors: Status: STATUS_CONSTRAINT_VIOLATION Issues: { message: "Conflict with existing key." issue_code: 2012 severity: 1 } 2025-06-25T14:37:55.443457Z node 1 :TX_DATASHARD ERROR: finish_ ... cted 2025-06-25T14:38:04.130192Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:04.132767Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 8498, node 3 2025-06-25T14:38:04.171064Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:38:04.171087Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:38:04.171096Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:38:04.171225Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3826 TClient is connected to server localhost:3826 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:38:04.858064Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:38:04.876565Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:38:04.898708Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:04.960575Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:38:05.101253Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:05.477094Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:05.616845Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:08.545757Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519896269680992755:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:08.545866Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:08.607393Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:08.645030Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:08.682936Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:08.712019Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:08.783188Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:08.858761Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:08.956572Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:08.991990Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519896248206154664:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:08.994361Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:38:09.057773Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519896273975960719:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:09.057880Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:09.058103Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519896273975960724:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:09.062055Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:38:09.076699Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519896273975960726:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:38:09.166401Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519896273975960779:3423] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:38:10.847992Z node 3 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:678: SelfId: [3:7519896278270928356:2482], TxId: 281474976710673, task: 1. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=3&id=NDc2ZTkyZDAtNGI4YWRmNjQtNDI0ZWVjOGEtY2E4Y2QwZg==. TraceId : 01jykrcqn0eds1xpr7nvxbqg21. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. InternalError: PRECONDITION_FAILED KIKIMR_CONSTRAINT_VIOLATION: {
: Error: Conflict with existing key., code: 2012 }. 2025-06-25T14:38:10.848211Z node 3 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [3:7519896278270928358:2483], TxId: 281474976710673, task: 2. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=3&id=NDc2ZTkyZDAtNGI4YWRmNjQtNDI0ZWVjOGEtY2E4Y2QwZg==. TraceId : 01jykrcqn0eds1xpr7nvxbqg21. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Handle abort execution event from: [3:7519896278270928353:2469], status: PRECONDITION_FAILED, reason: {
: Error: Terminate execution } 2025-06-25T14:38:10.848476Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=3&id=NDc2ZTkyZDAtNGI4YWRmNjQtNDI0ZWVjOGEtY2E4Y2QwZg==, ActorId: [3:7519896278270928321:2469], ActorState: ExecuteState, TraceId: 01jykrcqn0eds1xpr7nvxbqg21, Create QueryResponse for error on request, msg: >> KqpPg::TableSelect+useSink [GOOD] >> KqpPg::TableSelect-useSink >> KqpPg::EquiJoin-useSink [GOOD] >> KqpPg::ExplainColumnsReorder >> KqpInplaceUpdate::SingleRowArithm+UseSink >> Yq_1::CreateQuery_Without_Connection [GOOD] >> KqpWrite::UpsertNullKey [GOOD] >> KqpWrite::ProjectReplace-UseSink >> KqpImmediateEffects::ConflictingKeyW1WR2 [GOOD] >> KqpImmediateEffects::ConflictingKeyW1RWR2 >> KqpInplaceUpdate::Negative_SingleRowListFromRange+UseSink [GOOD] >> KqpInplaceUpdate::Negative_BatchUpdate-UseSink >> TEvLocalSyncDataTests::SqueezeBlocks3 [GOOD] >> TQuorumTrackerTests::Erasure4Plus2BlockIncludingMyFailDomain_8_2 [GOOD] >> KqpImmediateEffects::UpsertDuplicates [GOOD] >> KqpImmediateEffects::UpsertConflictInteractiveTxAborted >> KqpImmediateEffects::InsertExistingKey-UseSink [GOOD] >> KqpImmediateEffects::Interactive >> KqpPg::InsertFromSelect_NoReorder-useSink [GOOD] >> KqpPg::InsertFromSelect_Serial+useSink >> KqpImmediateEffects::InteractiveTxWithWriteAtTheEnd [GOOD] >> KqpImmediateEffects::ManyFlushes |81.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/syncer/ut/unittest >> TQuorumTrackerTests::Erasure4Plus2BlockIncludingMyFailDomain_8_2 [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/services/fq/ut_integration/unittest >> Yq_1::CreateQuery_Without_Connection [GOOD] Test command err: 2025-06-25T14:37:09.323593Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519896013673626460:2076];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:37:09.323765Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; E0625 14:37:09.527717913 237041 dns_resolver_ares.cc:452] no server name supplied in dns URI E0625 14:37:09.527885691 237041 channel.cc:120] channel stack builder failed: UNKNOWN: the target uri is not valid: dns:/// 2025-06-25T14:37:10.327767Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:37:10.337735Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:37:10.555851Z node 1 :YQL_NODES_MANAGER ERROR: nodes_manager.cpp:364: ydb/core/fq/libs/actors/nodes_manager.cpp:322: TRANSPORT_UNAVAILABLE
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:10982: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint localhost:10982 2025-06-25T14:37:10.555997Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:10982: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:10982 } ] 2025-06-25T14:37:11.332781Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:37:12.154034Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:10982: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:10982 } ] 2025-06-25T14:37:12.353667Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:37:13.356673Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:37:13.969020Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7519896030853495985:2272], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:37:13.969161Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:37:14.037128Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7519896030853495985:2272], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001fe1/r3tmp/tmpzTXAWd/pdisk_1.dat 2025-06-25T14:37:14.152577Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7519896030853495985:2272], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:37:14.328137Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519896013673626460:2076];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:37:14.328213Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:37:14.342236Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 10982, node 1 E0625 14:37:14.528349326 237147 dns_resolver_ares.cc:452] no server name supplied in dns URI E0625 14:37:14.528506992 237147 channel.cc:120] channel stack builder failed: UNKNOWN: the target uri is not valid: dns:/// TClient is connected to server localhost:26895 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-25T14:37:14.734830Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:37:14.734861Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:37:14.734869Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:37:14.735026Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:37:14.741674Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TClient::Ls response: 2025-06-25T14:37:14.877114Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/queries". Create session OK 2025-06-25T14:37:14.877154Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/queries" 2025-06-25T14:37:14.877164Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/queries" 2025-06-25T14:37:14.880390Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/idempotency_keys". Create session OK 2025-06-25T14:37:14.880412Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/idempotency_keys" 2025-06-25T14:37:14.880418Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/idempotency_keys" 2025-06-25T14:37:14.885903Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/jobs". Create session OK 2025-06-25T14:37:14.885922Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/jobs" 2025-06-25T14:37:14.885928Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/jobs" 2025-06-25T14:37:14.897484Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/tenant_acks". Create session OK 2025-06-25T14:37:14.897516Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/tenant_acks" 2025-06-25T14:37:14.897523Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/tenant_acks" 2025-06-25T14:37:14.900464Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/connections". Create session OK 2025-06-25T14:37:14.900494Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/connections" 2025-06-25T14:37:14.900499Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/connections" 2025-06-25T14:37:14.901954Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/compute_databases". Create session OK 2025-06-25T14:37:14.901977Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/compute_databases" 2025-06-25T14:37:14.901983Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/compute_databases" 2025-06-25T14:37:14.904650Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/mappings". Create session OK 2025-06-25T14:37:14.904664Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/mappings" 2025-06-25T14:37:14.904669Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/mappings" 2025-06-25T14:37:14.907645Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/quotas". Create session OK 2025-06-25T14:37:14.907658Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/quotas" 2025-06-25T14:37:14.907662Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/quotas" 2025-06-25T14:37:14.910234Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/bindings". Create session OK 2025-06-25T14:37:14.910249Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/bindings" 2025-06-25T14:37:14.910254Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/bindings" 2025-06-25T14:37:14.913336Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/pending_small". Create session OK 2025-06-25T14:37:14.913357Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/pending_small" 2025-06-25T14:37:14.913364Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/pending_small" 2025-06-25T14:37:14.917974Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/result_sets". Create session OK 2025-06-25T14:37:14.917998Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/result_sets" 2025-06-25T14:37:14.918004Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/result_sets" 2025-06-25T14:37:14.921472Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/nodes". Create session OK 2025-06-25T14:37:14.921486Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/nodes" 2025-06-25T14:37:14.921492Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/nodes" 2025-06-25T14:37:14.923944Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/tenants". Create session OK 2025-06-25T14:37:14.923959Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/tenants" 2025-06-25T14:37:14.923964Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/tenants" 2025-06-25T14:37:14.926143Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:37:14.935044Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created directory "Root/yq" 2025-06-25T14:37:14.935082Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create directory "Root/yq": Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: ... .405030Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:51.405132Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:51.405224Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:51.405340Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:51.405440Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:51.405581Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:51.405673Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:51.405826Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:51.406010Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:51.406148Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:51.406307Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:51.406419Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:51.406512Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:51.406592Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:51.406701Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:51.406837Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:51.406998Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:51.407118Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:51.407241Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:51.407334Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:51.407435Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:51.407577Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:51.407701Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:51.407824Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:51.408024Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:51.408174Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:51.408292Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:51.408374Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:51.408453Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:51.408523Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:51.408604Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:51.408716Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:51.409011Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:51.409368Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:51.409539Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:51.409693Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:51.409789Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:51.409888Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:51.409967Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:51.410063Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:51.410165Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:51.410344Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:51.410442Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:51.410516Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:51.410633Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:51.410736Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:51.410819Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:51.410955Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:51.411027Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:51.411137Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:51.411225Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:51.411292Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:51.411361Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:51.411493Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:51.411665Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:51.411821Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:51.411910Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:51.412351Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:51.412496Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:51.412626Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:51.412734Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:51.412851Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:51.413077Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:51.413202Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:51.413307Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:51.413432Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:51.413538Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:51.413613Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:51.413712Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:51.413859Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:51.413964Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:51.414056Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:51.414121Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:51.414213Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:51.414351Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:51.414528Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:51.414597Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:51.414683Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:51.414763Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:51.414851Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:51.414981Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:51.415044Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:51.415129Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:51.415237Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:51.415324Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:51.415409Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:51.415519Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:51.415589Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:51.415648Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:51.415772Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:51.415843Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:51.415936Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:51.416068Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:51.416132Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:51.416228Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:51.416299Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:51.417853Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:51.417951Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:51.418028Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:51.418099Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:51.418170Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:51.418265Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:51.418336Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:37:51.418453Z node 1 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: [good] Yq_1::CreateQuery_Without_Connection >> TKeyValueTest::TestSetExecutorFastLogPolicy [GOOD] >> KqpPg::DropSequence [GOOD] >> KqpPg::DeleteWithQueryService+useSink >> KqpPg::AlterColumnSetDefaultFromSequence [GOOD] >> KqpPg::CreateTableIfNotExists_GenericQuery >> KqpInplaceUpdate::SingleRowArithm-UseSink [GOOD] >> KqpInplaceUpdate::SingleRowIf+UseSink |81.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/base/ut_board_subscriber/unittest |81.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/base/ut_board_subscriber/unittest >> KqpImmediateEffects::DeleteAfterUpsert [GOOD] >> KqpImmediateEffects::DeleteAfterInsert >> TBoardSubscriberTest::SimpleSubscriber >> KqpInplaceUpdate::SingleRowStr+UseSink [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/keyvalue/ut/unittest >> TKeyValueTest::TestSetExecutorFastLogPolicy [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:57:2057] recipient: [1:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:57:2057] recipient: [1:52:2097] Leader for TabletID 72057594037927937 is [1:59:2099] sender: [1:60:2057] recipient: [1:52:2097] Leader for TabletID 72057594037927937 is [1:59:2099] sender: [1:77:2057] recipient: [1:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:57:2057] recipient: [2:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:57:2057] recipient: [2:53:2097] Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:60:2057] recipient: [2:53:2097] Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:77:2057] recipient: [2:14:2061] !Reboot 72057594037927937 (actor [2:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:79:2057] recipient: [2:38:2085] Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:81:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:83:2057] recipient: [2:82:2112] Leader for TabletID 72057594037927937 is [2:84:2113] sender: [2:85:2057] recipient: [2:82:2112] !Reboot 72057594037927937 (actor [2:59:2099]) rebooted! !Reboot 72057594037927937 (actor [2:59:2099]) tablet resolver refreshed! new actor is[2:84:2113] Leader for TabletID 72057594037927937 is [2:84:2113] sender: [2:170:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:57:2057] recipient: [3:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:57:2057] recipient: [3:52:2097] Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:60:2057] recipient: [3:52:2097] Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:77:2057] recipient: [3:14:2061] !Reboot 72057594037927937 (actor [3:59:2099]) on event NKikimr::TEvKeyValue::TEvExecuteTransaction ! Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:79:2057] recipient: [3:38:2085] Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:82:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:83:2057] recipient: [3:81:2112] Leader for TabletID 72057594037927937 is [3:84:2113] sender: [3:85:2057] recipient: [3:81:2112] !Reboot 72057594037927937 (actor [3:59:2099]) rebooted! !Reboot 72057594037927937 (actor [3:59:2099]) tablet resolver refreshed! new actor is[3:84:2113] Leader for TabletID 72057594037927937 is [3:84:2113] sender: [3:170:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:57:2057] recipient: [4:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:57:2057] recipient: [4:53:2097] Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:60:2057] recipient: [4:53:2097] Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:77:2057] recipient: [4:14:2061] !Reboot 72057594037927937 (actor [4:59:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:80:2057] recipient: [4:38:2085] Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:83:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:84:2057] recipient: [4:82:2112] Leader for TabletID 72057594037927937 is [4:85:2113] sender: [4:86:2057] recipient: [4:82:2112] !Reboot 72057594037927937 (actor [4:59:2099]) rebooted! !Reboot 72057594037927937 (actor [4:59:2099]) tablet resolver refreshed! new actor is[4:85:2113] Leader for TabletID 72057594037927937 is [4:85:2113] sender: [4:171:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:57:2057] recipient: [5:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:57:2057] recipient: [5:53:2097] Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:60:2057] recipient: [5:53:2097] Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:77:2057] recipient: [5:14:2061] !Reboot 72057594037927937 (actor [5:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:83:2057] recipient: [5:38:2085] Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:86:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:87:2057] recipient: [5:85:2115] Leader for TabletID 72057594037927937 is [5:88:2116] sender: [5:89:2057] recipient: [5:85:2115] !Reboot 72057594037927937 (actor [5:59:2099]) rebooted! !Reboot 72057594037927937 (actor [5:59:2099]) tablet resolver refreshed! new actor is[5:88:2116] Leader for TabletID 72057594037927937 is [5:88:2116] sender: [5:174:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:57:2057] recipient: [6:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:57:2057] recipient: [6:52:2097] Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:60:2057] recipient: [6:52:2097] Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:77:2057] recipient: [6:14:2061] !Reboot 72057594037927937 (actor [6:59:2099]) on event NKikimr::TEvKeyValue::TEvRead ! Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:83:2057] recipient: [6:38:2085] Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:86:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:87:2057] recipient: [6:85:2115] Leader for TabletID 72057594037927937 is [6:88:2116] sender: [6:89:2057] recipient: [6:85:2115] !Reboot 72057594037927937 (actor [6:59:2099]) rebooted! !Reboot 72057594037927937 (actor [6:59:2099]) tablet resolver refreshed! new actor is[6:88:2116] Leader for TabletID 72057594037927937 is [6:88:2116] sender: [6:174:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:57:2057] recipient: [7:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:57:2057] recipient: [7:53:2097] Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:60:2057] recipient: [7:53:2097] Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:77:2057] recipient: [7:14:2061] !Reboot 72057594037927937 (actor [7:59:2099]) on event NKikimr::TEvKeyValue::TEvNotify ! Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:84:2057] recipient: [7:38:2085] Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:87:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:88:2057] recipient: [7:86:2115] Leader for TabletID 72057594037927937 is [7:89:2116] sender: [7:90:2057] recipient: [7:86:2115] !Reboot 72057594037927937 (actor [7:59:2099]) rebooted! !Reboot 72057594037927937 (actor [7:59:2099]) tablet resolver refreshed! new actor is[7:89:2116] Leader for TabletID 72057594037927937 is [7:89:2116] sender: [7:107:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:57:2057] recipient: [8:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:57:2057] recipient: [8:54:2097] Leader for TabletID 72057594037927937 is [8:59:2099] sender: [8:60:2057] recipient: [8:54:2097] Leader for TabletID 72057594037927937 is [8:59:2099] sender: [8:77:2057] recipient: [8:14:2061] !Reboot 72057594037927937 (actor [8:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [8:59:2099] sender: [8:86:2057] recipient: [8:38:2085] Leader for TabletID 72057594037927937 is [8:59:2099] sender: [8:89:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [8:59:2099] sender: [8:90:2057] recipient: [8:88:2117] Leader for TabletID 72057594037927937 is [8:91:2118] sender: [8:92:2057] recipient: [8:88:2117] !Reboot 72057594037927937 (actor [8:59:2099]) rebooted! !Reboot 72057594037927937 (actor [8:59:2099]) tablet resolver refreshed! new actor is[8:91:2118] Leader for TabletID 72057594037927937 is [8:91:2118] sender: [8:177:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:57:2057] recipient: [9:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:57:2057] recipient: [9:53:2097] Leader for TabletID 72057594037927937 is [9:59:2099] sender: [9:60:2057] recipient: [9:53:2097] Leader for TabletID 72057594037927937 is [9:59:2099] sender: [9:77:2057] recipient: [9:14:2061] !Reboot 72057594037927937 (actor [9:59:2099]) on event NKikimr::TEvKeyValue::TEvExecuteTransaction ! Leader for TabletID 72057594037927937 is [9:59:2099] sender: [9:86:2057] recipient: [9:38:2085] Leader for TabletID 72057594037927937 is [9:59:2099] sender: [9:89:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [9:59:2099] sender: [9:90:2057] recipient: [9:88:2117] Leader for TabletID 72057594037927937 is [9:91:2118] sender: [9:92:2057] recipient: [9:88:2117] !Reboot 72057594037927937 (actor [9:59:2099]) rebooted! !Reboot 72057594037927937 (actor [9:59:2099]) tablet resolver refreshed! new actor is[9:91:2118] Leader for TabletID 72057594037927937 is [9:91:2118] sender: [9:177:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:57:2057] recipient: [10:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:57:2057] recipient: [10:54:2097] Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:60:2057] recipient: [10:54:2097] Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:77:2057] recipient: [10:14:2061] !Reboot 72057594037927937 (actor [10:59:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:87:2057] recipient: [10:38:2085] Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:90:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:91:2057] recipient: [10:89:2117] Leader for TabletID 72057594037927937 is [10:92:2118] sender: [10:93:2057] recipient: [10:89:2117] !Reboot 72057594037927937 (actor [10:59:2099]) rebooted! !Reboot 72057594037927937 (actor [10:59:2099]) tablet resolver refreshed! new actor is[10:92:2118] Leader for TabletID 72057594037927937 is [10:92:2118] sender: [10:178:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:57:2057] recipient: [11:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:57:2057] recipient: [11:53:2097] Leader for TabletID 72057594037927937 is [11:59:2099] sender: [11:60:2057] recipient: [11:53:2097] Leader for TabletID 72057594037927937 is [11:59:2099] sender: [11:77:2057] recipient: [11:14:2061] !Reboot 72057594037927937 (actor [11:59:2099]) on event NKikimr::TEvKeyValue::TEvCollect ! Leader for TabletID 72057594037927937 is [11:59:2099] sender: [11:88:2057] recipient: [11:38:2085] Leader for TabletID 72057594037927937 is [11:59:2099] sender: [11:91:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [11:59:2099] sender: [11:92:2057] recipient: [11:90:2118] Leader for TabletID 72057594037927937 is [11:93:2119] sender: [11:94:2057] recipient: [11:90:2118] !Reboot 72057594037927937 (actor [11:59:2099]) rebooted! !Reboot 72057594037927937 (actor [11:59:2099]) tablet resolver refreshed! new actor is[11:93:2119] Leader for TabletID 72057594037927937 is [11:93:2119] sender: [11:113:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:57:2057] recipient: [12:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:57:2057] recipient: [12:54:2097] Leader for TabletID 72057594037927937 is [12:59:2099] sender: [12:60:2057] recipient: [12:54:2097] Leader for TabletID 72057594037927937 is [12:59:2099] sender: [12:77:2057] recipient: [12:14:2061] !Reboot 72057594037927937 (a ... D 72057594037927937 is [57:59:2099] sender: [57:141:2057] recipient: [57:38:2085] Leader for TabletID 72057594037927937 is [57:59:2099] sender: [57:144:2057] recipient: [57:14:2061] Leader for TabletID 72057594037927937 is [57:59:2099] sender: [57:145:2057] recipient: [57:143:2158] Leader for TabletID 72057594037927937 is [57:146:2159] sender: [57:147:2057] recipient: [57:143:2158] !Reboot 72057594037927937 (actor [57:59:2099]) rebooted! !Reboot 72057594037927937 (actor [57:59:2099]) tablet resolver refreshed! new actor is[57:146:2159] Leader for TabletID 72057594037927937 is [0:0:0] sender: [58:57:2057] recipient: [58:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [58:57:2057] recipient: [58:54:2097] Leader for TabletID 72057594037927937 is [58:59:2099] sender: [58:60:2057] recipient: [58:54:2097] Leader for TabletID 72057594037927937 is [58:59:2099] sender: [58:77:2057] recipient: [58:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [59:57:2057] recipient: [59:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [59:57:2057] recipient: [59:52:2097] Leader for TabletID 72057594037927937 is [59:59:2099] sender: [59:60:2057] recipient: [59:52:2097] Leader for TabletID 72057594037927937 is [59:59:2099] sender: [59:77:2057] recipient: [59:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [60:57:2057] recipient: [60:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [60:57:2057] recipient: [60:53:2097] Leader for TabletID 72057594037927937 is [60:59:2099] sender: [60:60:2057] recipient: [60:53:2097] Leader for TabletID 72057594037927937 is [60:59:2099] sender: [60:77:2057] recipient: [60:14:2061] !Reboot 72057594037927937 (actor [60:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [60:59:2099] sender: [60:79:2057] recipient: [60:38:2085] Leader for TabletID 72057594037927937 is [60:59:2099] sender: [60:82:2057] recipient: [60:14:2061] Leader for TabletID 72057594037927937 is [60:59:2099] sender: [60:83:2057] recipient: [60:81:2112] Leader for TabletID 72057594037927937 is [60:84:2113] sender: [60:85:2057] recipient: [60:81:2112] !Reboot 72057594037927937 (actor [60:59:2099]) rebooted! !Reboot 72057594037927937 (actor [60:59:2099]) tablet resolver refreshed! new actor is[60:84:2113] Leader for TabletID 72057594037927937 is [60:84:2113] sender: [60:170:2057] recipient: [60:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [61:57:2057] recipient: [61:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [61:57:2057] recipient: [61:53:2097] Leader for TabletID 72057594037927937 is [61:59:2099] sender: [61:60:2057] recipient: [61:53:2097] Leader for TabletID 72057594037927937 is [61:59:2099] sender: [61:77:2057] recipient: [61:14:2061] !Reboot 72057594037927937 (actor [61:59:2099]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [61:59:2099] sender: [61:79:2057] recipient: [61:38:2085] Leader for TabletID 72057594037927937 is [61:59:2099] sender: [61:82:2057] recipient: [61:14:2061] Leader for TabletID 72057594037927937 is [61:59:2099] sender: [61:83:2057] recipient: [61:81:2112] Leader for TabletID 72057594037927937 is [61:84:2113] sender: [61:85:2057] recipient: [61:81:2112] !Reboot 72057594037927937 (actor [61:59:2099]) rebooted! !Reboot 72057594037927937 (actor [61:59:2099]) tablet resolver refreshed! new actor is[61:84:2113] Leader for TabletID 72057594037927937 is [61:84:2113] sender: [61:170:2057] recipient: [61:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [62:57:2057] recipient: [62:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [62:57:2057] recipient: [62:53:2097] Leader for TabletID 72057594037927937 is [62:59:2099] sender: [62:60:2057] recipient: [62:53:2097] Leader for TabletID 72057594037927937 is [62:59:2099] sender: [62:77:2057] recipient: [62:14:2061] !Reboot 72057594037927937 (actor [62:59:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [62:59:2099] sender: [62:80:2057] recipient: [62:38:2085] Leader for TabletID 72057594037927937 is [62:59:2099] sender: [62:83:2057] recipient: [62:14:2061] Leader for TabletID 72057594037927937 is [62:59:2099] sender: [62:84:2057] recipient: [62:82:2112] Leader for TabletID 72057594037927937 is [62:85:2113] sender: [62:86:2057] recipient: [62:82:2112] !Reboot 72057594037927937 (actor [62:59:2099]) rebooted! !Reboot 72057594037927937 (actor [62:59:2099]) tablet resolver refreshed! new actor is[62:85:2113] Leader for TabletID 72057594037927937 is [62:85:2113] sender: [62:171:2057] recipient: [62:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [63:57:2057] recipient: [63:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [63:57:2057] recipient: [63:53:2097] Leader for TabletID 72057594037927937 is [63:59:2099] sender: [63:60:2057] recipient: [63:53:2097] Leader for TabletID 72057594037927937 is [63:59:2099] sender: [63:77:2057] recipient: [63:14:2061] !Reboot 72057594037927937 (actor [63:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [63:59:2099] sender: [63:82:2057] recipient: [63:38:2085] Leader for TabletID 72057594037927937 is [63:59:2099] sender: [63:84:2057] recipient: [63:14:2061] Leader for TabletID 72057594037927937 is [63:59:2099] sender: [63:86:2057] recipient: [63:85:2114] Leader for TabletID 72057594037927937 is [63:87:2115] sender: [63:88:2057] recipient: [63:85:2114] !Reboot 72057594037927937 (actor [63:59:2099]) rebooted! !Reboot 72057594037927937 (actor [63:59:2099]) tablet resolver refreshed! new actor is[63:87:2115] Leader for TabletID 72057594037927937 is [63:87:2115] sender: [63:173:2057] recipient: [63:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [64:57:2057] recipient: [64:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [64:57:2057] recipient: [64:53:2097] Leader for TabletID 72057594037927937 is [64:59:2099] sender: [64:60:2057] recipient: [64:53:2097] Leader for TabletID 72057594037927937 is [64:59:2099] sender: [64:77:2057] recipient: [64:14:2061] !Reboot 72057594037927937 (actor [64:59:2099]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [64:59:2099] sender: [64:82:2057] recipient: [64:38:2085] Leader for TabletID 72057594037927937 is [64:59:2099] sender: [64:85:2057] recipient: [64:14:2061] Leader for TabletID 72057594037927937 is [64:59:2099] sender: [64:86:2057] recipient: [64:84:2114] Leader for TabletID 72057594037927937 is [64:87:2115] sender: [64:88:2057] recipient: [64:84:2114] !Reboot 72057594037927937 (actor [64:59:2099]) rebooted! !Reboot 72057594037927937 (actor [64:59:2099]) tablet resolver refreshed! new actor is[64:87:2115] Leader for TabletID 72057594037927937 is [64:87:2115] sender: [64:173:2057] recipient: [64:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [65:57:2057] recipient: [65:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [65:57:2057] recipient: [65:53:2097] Leader for TabletID 72057594037927937 is [65:59:2099] sender: [65:60:2057] recipient: [65:53:2097] Leader for TabletID 72057594037927937 is [65:59:2099] sender: [65:77:2057] recipient: [65:14:2061] !Reboot 72057594037927937 (actor [65:59:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [65:59:2099] sender: [65:83:2057] recipient: [65:38:2085] Leader for TabletID 72057594037927937 is [65:59:2099] sender: [65:86:2057] recipient: [65:14:2061] Leader for TabletID 72057594037927937 is [65:59:2099] sender: [65:87:2057] recipient: [65:85:2114] Leader for TabletID 72057594037927937 is [65:88:2115] sender: [65:89:2057] recipient: [65:85:2114] !Reboot 72057594037927937 (actor [65:59:2099]) rebooted! !Reboot 72057594037927937 (actor [65:59:2099]) tablet resolver refreshed! new actor is[65:88:2115] Leader for TabletID 72057594037927937 is [65:88:2115] sender: [65:174:2057] recipient: [65:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [66:57:2057] recipient: [66:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [66:57:2057] recipient: [66:54:2097] Leader for TabletID 72057594037927937 is [66:59:2099] sender: [66:60:2057] recipient: [66:54:2097] Leader for TabletID 72057594037927937 is [66:59:2099] sender: [66:77:2057] recipient: [66:14:2061] !Reboot 72057594037927937 (actor [66:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [66:59:2099] sender: [66:86:2057] recipient: [66:38:2085] Leader for TabletID 72057594037927937 is [66:59:2099] sender: [66:89:2057] recipient: [66:14:2061] Leader for TabletID 72057594037927937 is [66:59:2099] sender: [66:90:2057] recipient: [66:88:2117] Leader for TabletID 72057594037927937 is [66:91:2118] sender: [66:92:2057] recipient: [66:88:2117] !Reboot 72057594037927937 (actor [66:59:2099]) rebooted! !Reboot 72057594037927937 (actor [66:59:2099]) tablet resolver refreshed! new actor is[66:91:2118] Leader for TabletID 72057594037927937 is [66:91:2118] sender: [66:177:2057] recipient: [66:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [67:57:2057] recipient: [67:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [67:57:2057] recipient: [67:53:2097] Leader for TabletID 72057594037927937 is [67:59:2099] sender: [67:60:2057] recipient: [67:53:2097] Leader for TabletID 72057594037927937 is [67:59:2099] sender: [67:77:2057] recipient: [67:14:2061] !Reboot 72057594037927937 (actor [67:59:2099]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [67:59:2099] sender: [67:86:2057] recipient: [67:38:2085] Leader for TabletID 72057594037927937 is [67:59:2099] sender: [67:89:2057] recipient: [67:14:2061] Leader for TabletID 72057594037927937 is [67:59:2099] sender: [67:90:2057] recipient: [67:88:2117] Leader for TabletID 72057594037927937 is [67:91:2118] sender: [67:92:2057] recipient: [67:88:2117] !Reboot 72057594037927937 (actor [67:59:2099]) rebooted! !Reboot 72057594037927937 (actor [67:59:2099]) tablet resolver refreshed! new actor is[67:91:2118] Leader for TabletID 72057594037927937 is [67:91:2118] sender: [67:177:2057] recipient: [67:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [68:57:2057] recipient: [68:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [68:57:2057] recipient: [68:53:2097] Leader for TabletID 72057594037927937 is [68:59:2099] sender: [68:60:2057] recipient: [68:53:2097] Leader for TabletID 72057594037927937 is [68:59:2099] sender: [68:77:2057] recipient: [68:14:2061] !Reboot 72057594037927937 (actor [68:59:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [68:59:2099] sender: [68:87:2057] recipient: [68:38:2085] Leader for TabletID 72057594037927937 is [68:59:2099] sender: [68:90:2057] recipient: [68:14:2061] Leader for TabletID 72057594037927937 is [68:59:2099] sender: [68:91:2057] recipient: [68:89:2117] Leader for TabletID 72057594037927937 is [68:92:2118] sender: [68:93:2057] recipient: [68:89:2117] !Reboot 72057594037927937 (actor [68:59:2099]) rebooted! !Reboot 72057594037927937 (actor [68:59:2099]) tablet resolver refreshed! new actor is[68:92:2118] Leader for TabletID 72057594037927937 is [68:92:2118] sender: [68:178:2057] recipient: [68:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [69:57:2057] recipient: [69:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [69:57:2057] recipient: [69:53:2097] Leader for TabletID 72057594037927937 is [69:59:2099] sender: [69:60:2057] recipient: [69:53:2097] Leader for TabletID 72057594037927937 is [69:59:2099] sender: [69:77:2057] recipient: [69:14:2061] >> KqpImmediateEffects::Replace >> KqpImmediateEffects::DeleteOnAfterInsertWithIndex [GOOD] >> KqpImmediateEffects::ForceImmediateEffectsExecution+UseSink >> KqpEffects::AlterAfterUpsertBeforeUpsertTransaction+UseSink >> KqpInplaceUpdate::SingleRowIf-UseSink [GOOD] >> KqpInplaceUpdate::SingleRowPgNotNull+UseSink >> KqpImmediateEffects::TxWithWriteAtTheEnd+UseSink [GOOD] >> KqpImmediateEffects::UnobservedUncommittedChangeConflict [GOOD] >> KqpImmediateEffects::UpdateAfterInsert [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/effects/unittest >> KqpInplaceUpdate::SingleRowStr+UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 32493, MsgBus: 3495 2025-06-25T14:38:03.170526Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519896248161484800:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:03.178088Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001855/r3tmp/tmpim9jZD/pdisk_1.dat 2025-06-25T14:38:03.715275Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:38:03.716573Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519896248161484768:2080] 1750862283162255 != 1750862283162258 2025-06-25T14:38:03.742063Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:03.742186Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:03.784932Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 32493, node 1 2025-06-25T14:38:03.901834Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:38:03.901859Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:38:03.901869Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:38:03.901982Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3495 2025-06-25T14:38:04.212882Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:3495 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:38:04.701987Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:38:04.729344Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:38:04.746467Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:04.918797Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:05.144712Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:05.291479Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:07.692628Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896265341355601:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:07.692728Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:08.073852Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:08.114445Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:08.154295Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:08.171974Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519896248161484800:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:08.172050Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:38:08.184144Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:08.232612Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:08.299985Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:08.339315Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:08.405702Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896269636323561:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:08.405779Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:08.407252Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896269636323566:2436], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:08.411737Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:38:08.422637Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519896269636323568:2437], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:38:08.521063Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519896269636323620:3421] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:38:10.008583Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_wo ... 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519896281897965000:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:11.410186Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001855/r3tmp/tmprnfiXE/pdisk_1.dat 2025-06-25T14:38:11.582808Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:38:11.586105Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519896281897964979:2080] 1750862291409591 != 1750862291409594 2025-06-25T14:38:11.623205Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:11.623271Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:11.625104Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 16481, node 2 2025-06-25T14:38:11.749254Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:38:11.749274Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:38:11.749280Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:38:11.749378Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6808 TClient is connected to server localhost:6808 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:38:12.310513Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:38:12.321320Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:38:12.333764Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:12.416103Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:12.418083Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... 2025-06-25T14:38:12.571625Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:12.643954Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:14.656046Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896294782868500:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:14.656127Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:14.720137Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:14.756089Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:14.785899Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:14.822252Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:14.858522Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:14.901294Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:14.976483Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:15.056585Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896299077836451:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:15.056678Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:15.057024Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896299077836456:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:15.061231Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:38:15.073329Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519896299077836458:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:38:15.151068Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519896299077836509:3418] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:38:16.160707Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:16.410443Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519896281897965000:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:16.410516Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> KqpWrite::CastValuesOptional [GOOD] >> KqpEffects::AlterAfterUpsertTransaction-UseSink [GOOD] >> KqpEffects::AlterAfterUpsertBeforeUpsertTransaction-UseSink >> TBoardSubscriberTest::SimpleSubscriber [GOOD] >> KqpImmediateEffects::Upsert >> KqpEffects::InsertAbort_Select_Duplicates+UseSink >> KqpInplaceUpdate::Negative_SingleRowListFromRange-UseSink [GOOD] >> KqpPg::PgUpdate-useSink [GOOD] >> KqpPg::JoinWithQueryService-StreamLookup |81.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/base/ut_board_subscriber/unittest >> TBoardSubscriberTest::SimpleSubscriber [GOOD] >> KqpImmediateEffects::ConflictingKeyRW1WR2 [GOOD] >> KqpImmediateEffects::ConflictingKeyRW1RWR2 >> KqpInplaceUpdate::Negative_SingleRowWithKeyCast-UseSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/effects/unittest >> KqpImmediateEffects::TxWithWriteAtTheEnd+UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 14255, MsgBus: 3007 2025-06-25T14:38:03.407394Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519896248256880473:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:03.418272Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00184d/r3tmp/tmprzOi8D/pdisk_1.dat 2025-06-25T14:38:03.961285Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:03.961368Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:03.969592Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:38:03.981667Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 14255, node 1 2025-06-25T14:38:04.125962Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:38:04.125986Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:38:04.125996Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:38:04.126116Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:38:04.421234Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:3007 TClient is connected to server localhost:3007 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:38:05.096663Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:38:05.125518Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:38:05.142863Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:05.538999Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:05.802251Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:06.035040Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:08.271872Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896269731718550:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:08.272042Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:08.408700Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519896248256880473:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:08.408759Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:38:08.583973Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:08.619649Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:08.651242Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:08.699452Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:08.741178Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:08.786992Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:08.860041Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:08.966207Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896269731719217:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:08.966266Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:08.966346Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896269731719222:2436], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:08.973286Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:38:08.986414Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519896269731719224:2437], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:38:09.053461Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519896274026686572:3431] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:38:10.494008Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) Trying to start YDB, gRPC: 9726, MsgBus: 25985 2025-06-25T14:38:12.068916Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519896288216855855:2057];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:12.069011Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00184d/r3tmp/tmpEs7b2v/pdisk_1.dat 2025-06-25T14:38:12.198227Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519896288216855839:2080] 1750862292068395 != 1750862292068398 2025-06-25T14:38:12.206928Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:38:12.219100Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:12.219176Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:12.222448Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 9726, node 2 2025-06-25T14:38:12.425038Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:38:12.425063Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:38:12.425072Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:38:12.425205Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25985 TClient is connected to server localhost:25985 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:38:12.925342Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:38:12.940770Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:38:12.955611Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:13.033876Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:13.124785Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:38:13.253690Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:13.334044Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:15.556456Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896301101759350:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:15.556543Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:15.606370Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:15.642188Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:15.708734Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:15.784544Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:15.817625Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:15.853010Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:15.922417Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:15.984813Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896301101760015:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:15.984894Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:15.985154Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896301101760020:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:15.988912Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:38:15.999848Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519896301101760022:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:38:16.081468Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519896305396727369:3421] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:38:17.017816Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:17.069288Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519896288216855855:2057];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:17.069369Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/effects/unittest >> KqpWrite::CastValuesOptional [GOOD] Test command err: Trying to start YDB, gRPC: 19659, MsgBus: 18151 2025-06-25T14:38:04.340512Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519896253721902262:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:04.346497Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001843/r3tmp/tmpHTN7UM/pdisk_1.dat 2025-06-25T14:38:05.008429Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519896253721902218:2080] 1750862284330480 != 1750862284330483 2025-06-25T14:38:05.021668Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:05.021757Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:05.022350Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:38:05.038554Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 19659, node 1 2025-06-25T14:38:05.261300Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:38:05.261322Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:38:05.261329Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:38:05.261436Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:38:05.402134Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:18151 TClient is connected to server localhost:18151 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:38:06.825280Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:38:06.855597Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:38:07.026581Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:07.243343Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:07.403757Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:09.230530Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896275196740334:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:09.230663Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:09.348516Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519896253721902262:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:09.348605Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:38:09.845146Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:09.913161Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:09.950895Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:09.996063Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:10.031279Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:10.120390Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:10.169951Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:10.226896Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896279491708294:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:10.226970Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:10.227215Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896279491708299:2437], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:10.230008Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:38:10.238435Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519896279491708301:2438], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:38:10.332987Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519896279491708352:3423] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:38:11.484101Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:11.713029Z node 1 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;stat ... subissue: {
: Error: Conflict with existing key., code: 2012 } } 2025-06-25T14:38:11.844347Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=1&id=Y2ZjNjRmNDktOTJjMTJmZjktOTU0OTJhMTgtZmMyNzE3NDE=, ActorId: [1:7519896283786675923:2480], ActorState: ExecuteState, TraceId: 01jykrcrs39cm4cz5gmexq708p, Create QueryResponse for error on request, msg:
: Error: Constraint violated. Table: `/Root/KeyValue`., code: 2012
: Error: Conflict with existing key., code: 2012 Trying to start YDB, gRPC: 11993, MsgBus: 1559 2025-06-25T14:38:12.971569Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519896286241141974:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:12.971619Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001843/r3tmp/tmpa1zIny/pdisk_1.dat 2025-06-25T14:38:13.097922Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:38:13.101042Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519896286241141952:2080] 1750862292970704 != 1750862292970707 TServer::EnableGrpc on GrpcPort 11993, node 2 2025-06-25T14:38:13.125519Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:13.125618Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:13.127943Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:38:13.174105Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:38:13.174123Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:38:13.174131Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:38:13.174225Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1559 TClient is connected to server localhost:1559 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:38:13.642883Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:38:13.655045Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:13.722731Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:13.870392Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:13.989711Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:38:13.990812Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:16.108581Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896303421012762:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:16.108676Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:16.157007Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:16.188203Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:16.217962Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:16.245336Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:16.283096Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:16.332913Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:16.382668Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:16.442447Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896303421013415:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:16.442511Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896303421013420:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:16.442574Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:16.446225Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:38:16.457647Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519896303421013422:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:38:16.531497Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519896303421013473:3416] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:38:17.972422Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519896286241141974:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:17.972507Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/effects/unittest >> KqpImmediateEffects::UnobservedUncommittedChangeConflict [GOOD] Test command err: Trying to start YDB, gRPC: 28317, MsgBus: 12817 2025-06-25T14:38:03.483824Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519896247876652234:2156];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:03.484167Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001853/r3tmp/tmpyZ1AU9/pdisk_1.dat 2025-06-25T14:38:03.884928Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:38:03.886669Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:03.886769Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:03.900487Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519896247876652104:2080] 1750862283455608 != 1750862283455611 2025-06-25T14:38:03.903216Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 28317, node 1 2025-06-25T14:38:04.052891Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:38:04.052915Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:38:04.052922Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:38:04.053061Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:12817 2025-06-25T14:38:04.484937Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:12817 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:38:05.038113Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:38:05.090280Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:38:05.105164Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:05.456690Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:05.937518Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:06.112115Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:08.271038Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896269351490226:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:08.271206Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:08.484580Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519896247876652234:2156];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:08.484669Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:38:08.690445Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:08.733343Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:08.816937Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:08.846075Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:08.881587Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:08.918656Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:08.999894Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:09.108052Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896273646458193:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:09.108153Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:09.108585Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896273646458198:2436], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:09.113241Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:38:09.131269Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710669, at schemeshard: 72057594046644480 2025-06-25T14:38:09.131474Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519896273646458200:2437], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:38:09.197033Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519896273646458251:3426] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:38:10.477609Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is u ... thVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:38:12.836548Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:38:12.842216Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:38:12.847191Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:12.922816Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:13.015196Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:38:13.055546Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:13.120242Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:15.448969Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896300391242235:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:15.449036Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:15.505383Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:15.543627Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:15.578914Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:15.614464Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:15.656204Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:15.712080Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:15.788970Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:15.874749Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896300391242900:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:15.874859Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:15.875193Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896300391242905:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:15.879441Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:38:15.895868Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519896300391242907:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:38:15.975097Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519896300391242958:3418] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:38:16.947399Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519896283211371436:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:16.947460Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:38:17.063148Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:17.578790Z node 2 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_LOCKS_BROKEN;details=Operation is aborting because locks are not valid;tx_id=281474976715676; 2025-06-25T14:38:17.590274Z node 2 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:802: SelfId: [2:7519896308981178086:2502], Table: `/Root/TestImmediateEffects` ([72057594046644480:17:1]), SessionActorId: [2:7519896308981178030:2502]Got LOCKS BROKEN for table `/Root/TestImmediateEffects`. ShardID=72075186224037922, Sink=[2:7519896308981178086:2502].{
: Error: Operation is aborting because locks are not valid, code: 2001 } 2025-06-25T14:38:17.590881Z node 2 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:3004: SelfId: [2:7519896308981178079:2502], SessionActorId: [2:7519896308981178030:2502], statusCode=ABORTED. Issue=
: Error: Transaction locks invalidated. Table: `/Root/TestImmediateEffects`., code: 2001
: Error: Operation is aborting because locks are not valid, code: 2001 . sessionActorId=[2:7519896308981178030:2502]. isRollback=0 2025-06-25T14:38:17.591147Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:1895: SessionId: ydb://session/3?node_id=2&id=MWMzOThhMTQtYTk3ZmQ1NmMtMWMyMWYyNmYtNzFjODIwMTM=, ActorId: [2:7519896308981178030:2502], ActorState: ExecuteState, TraceId: 01jykrcyf54d7r2mnmznc8mgp9, got TEvKqpBuffer::TEvError in ExecuteState, status: ABORTED send to: [2:7519896308981178080:2502] from: [2:7519896308981178079:2502] 2025-06-25T14:38:17.591237Z node 2 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [2:7519896308981178080:2502] TxId: 281474976715676. Ctx: { TraceId: 01jykrcyf54d7r2mnmznc8mgp9, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MWMzOThhMTQtYTk3ZmQ1NmMtMWMyMWYyNmYtNzFjODIwMTM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Transaction locks invalidated. Table: `/Root/TestImmediateEffects`., code: 2001 subissue: {
: Error: Operation is aborting because locks are not valid, code: 2001 } } 2025-06-25T14:38:17.591422Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=MWMzOThhMTQtYTk3ZmQ1NmMtMWMyMWYyNmYtNzFjODIwMTM=, ActorId: [2:7519896308981178030:2502], ActorState: ExecuteState, TraceId: 01jykrcyf54d7r2mnmznc8mgp9, Create QueryResponse for error on request, msg: |81.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/columnshard/splitter/ut/ydb-core-tx-columnshard-splitter-ut |81.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/columnshard/splitter/ut/ydb-core-tx-columnshard-splitter-ut |81.8%| [LD] {RESULT} $(B)/ydb/core/tx/columnshard/splitter/ut/ydb-core-tx-columnshard-splitter-ut ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/effects/unittest >> KqpImmediateEffects::UpdateAfterInsert [GOOD] Test command err: Trying to start YDB, gRPC: 15310, MsgBus: 9037 2025-06-25T14:38:03.539036Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519896248259765297:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:03.539271Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00184e/r3tmp/tmp8uzR6J/pdisk_1.dat 2025-06-25T14:38:04.055923Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:04.056056Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:04.060292Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:38:04.085015Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 15310, node 1 2025-06-25T14:38:04.224884Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:38:04.224913Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:38:04.224923Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:38:04.225095Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:38:04.556504Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:9037 TClient is connected to server localhost:9037 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:38:05.871607Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:38:05.962355Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:38:05.981050Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:06.295858Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:06.599546Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:06.792344Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:08.530239Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896269734603417:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:08.530316Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:08.541015Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519896248259765297:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:08.541094Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:38:08.923823Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:09.004142Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:09.043146Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:09.086016Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:09.158447Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:09.238846Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:09.329312Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:09.476517Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896274029571385:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:09.476620Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:09.480434Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896274029571390:2436], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:09.488879Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:38:09.505746Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519896274029571392:2437], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:38:09.603590Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519896274029571447:3431] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:38:10.820654Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) Trying to start YDB, gRPC: 17178, MsgBus: 9400 2025-06-25T14:38:12.316665Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519896284814379423:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:12.316808Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00184e/r3tmp/tmpuctSOb/pdisk_1.dat 2025-06-25T14:38:12.488253Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:12.488364Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:12.497312Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:38:12.510001Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 17178, node 2 2025-06-25T14:38:12.600514Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:38:12.600538Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:38:12.600546Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:38:12.600673Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9400 TClient is connected to server localhost:9400 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:38:13.045079Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:38:13.052579Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:13.128644Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:13.263934Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:13.329621Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:13.394356Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:38:15.721731Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896297699282908:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:15.721824Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:15.791494Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:15.819712Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:15.861979Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:15.923705Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:15.958618Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:16.032329Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:16.107269Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:16.167313Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896301994250867:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:16.167413Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896301994250872:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:16.167428Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:16.170797Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:38:16.179307Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519896301994250874:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:38:16.249829Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519896301994250925:3419] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:38:17.314700Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:17.318649Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519896284814379423:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:17.318704Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> KqpImmediateEffects::InteractiveTxWithReadAtTheEnd+UseSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/effects/unittest >> KqpInplaceUpdate::Negative_SingleRowListFromRange-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 26014, MsgBus: 17808 2025-06-25T14:38:04.467470Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519896252481553544:2231];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:04.504787Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001841/r3tmp/tmpf7lLLE/pdisk_1.dat 2025-06-25T14:38:05.470970Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:05.471063Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:05.489820Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:38:05.508007Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:38:05.517700Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:38:05.520553Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519896252481553338:2080] 1750862284445009 != 1750862284445012 TServer::EnableGrpc on GrpcPort 26014, node 1 2025-06-25T14:38:05.736829Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:38:05.736851Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:38:05.736869Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:38:05.736980Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17808 TClient is connected to server localhost:17808 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:38:06.893986Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:38:06.926537Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:07.093769Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:07.272516Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:07.358092Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:09.042262Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896273956391456:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:09.042365Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:09.376302Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:09.405195Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:09.450295Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:09.467893Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519896252481553544:2231];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:09.467970Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:38:09.501140Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:09.550901Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:09.643230Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:09.731148Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:09.828462Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896273956392117:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:09.828539Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:09.828755Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896273956392122:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:09.837159Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:38:09.864080Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519896273956392124:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:38:09.969947Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519896273956392175:3420] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:38:11.138639Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) Trying to start YDB, gRPC: 26953, MsgBus: 6436 2025-06-25T14:38:12.758274Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519896286766061101:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:12.758309Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001841/r3tmp/tmpWPIaMn/pdisk_1.dat 2025-06-25T14:38:12.894586Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:38:12.897225Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519896286766061079:2080] 1750862292757457 != 1750862292757460 TServer::EnableGrpc on GrpcPort 26953, node 2 2025-06-25T14:38:12.933419Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:12.933504Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:12.935363Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:38:12.947347Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:38:12.947368Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:38:12.947374Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:38:12.947489Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6436 TClient is connected to server localhost:6436 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:38:13.346253Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:38:13.352976Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:38:13.364977Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:13.414089Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:13.559185Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:13.651249Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:13.785785Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:38:15.787895Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896299650964584:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:15.787959Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:15.834261Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:15.882323Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:15.917504Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:15.950334Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:16.004761Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:16.046162Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:16.079886Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:16.134321Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896303945932536:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:16.134434Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:16.134500Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896303945932541:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:16.138276Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:38:16.147058Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519896303945932543:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:38:16.235581Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519896303945932594:3413] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:38:17.649482Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:17.760769Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519896286766061101:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:17.760859Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> KqpInplaceUpdate::SingleRowArithm+UseSink [GOOD] >> KqpInplaceUpdate::Negative_SingleRowWithValueCast-UseSink >> KqpPg::ExplainColumnsReorder [GOOD] >> KqpWrite::ProjectReplace-UseSink [GOOD] >> KqpImmediateEffects::ConflictingKeyW1RWR2 [GOOD] >> KqpEffects::InsertAbort_Literal_Success >> KqpImmediateEffects::MultipleEffectsWithIndex |81.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/base/ut_board_subscriber/unittest >> KqpImmediateEffects::UpsertConflictInteractiveTxAborted [GOOD] |81.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_background_compaction/ydb-core-tx-datashard-ut_background_compaction |81.8%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_background_compaction/ydb-core-tx-datashard-ut_background_compaction |81.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_background_compaction/ydb-core-tx-datashard-ut_background_compaction ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/effects/unittest >> KqpWrite::ProjectReplace-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 12308, MsgBus: 62817 2025-06-25T14:38:07.482585Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519896266076484548:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:07.482652Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001838/r3tmp/tmpt8Aqw0/pdisk_1.dat 2025-06-25T14:38:07.927577Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:38:08.000596Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:08.000691Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:08.017583Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 12308, node 1 2025-06-25T14:38:08.079428Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:38:08.079448Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:38:08.079470Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:38:08.079621Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:62817 2025-06-25T14:38:08.492627Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:62817 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:38:08.786637Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:38:08.805158Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:08.957524Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:09.167921Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:09.280940Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:11.245098Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896283256355323:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:11.245194Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:11.650604Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:11.682168Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:11.722358Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:11.755318Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:11.797128Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:11.881179Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:11.961973Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:12.047149Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896287551323281:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:12.047229Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:12.047492Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896287551323286:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:12.050751Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:38:12.059241Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519896287551323288:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:38:12.145698Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519896287551323339:3419] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:38:12.484349Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519896266076484548:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:12.484413Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 21265, MsgBus: 20963 2025-06-25T14:38:14.696627Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519896296675199700:2092];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:14.699608Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001838/r3tmp/tmpwmpmkj/pdisk_1.dat 2025-06-25T14:38:14.826647Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 21265, node 2 2025-06-25T14:38:14.905056Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:14.905167Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:14.916614Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:38:15.024919Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:38:15.024947Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:38:15.024959Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:38:15.025096Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:20963 TClient is connected to server localhost:20963 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:38:15.572271Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:38:15.582177Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:38:15.590623Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:38:15.684484Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:15.738110Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:38:15.835093Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:15.889815Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:18.120351Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896313855070425:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:18.120433Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:18.165163Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:18.242856Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:18.288097Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:18.357647Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:18.391130Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:18.424640Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:18.456631Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:18.527736Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896313855071082:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:18.527798Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:18.528188Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896313855071087:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:18.531576Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:38:18.543562Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519896313855071089:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:38:18.618208Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519896313855071140:3418] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:38:19.700299Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519896296675199700:2092];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:19.700384Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> KqpImmediateEffects::Interactive [GOOD] >> KqpInplaceUpdate::Negative_BatchUpdate-UseSink [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/pg/unittest >> KqpPg::ExplainColumnsReorder [GOOD] Test command err: Trying to start YDB, gRPC: 29541, MsgBus: 12661 2025-06-25T14:36:59.641693Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519895971764416957:2145];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:36:59.646754Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000a3d/r3tmp/tmpYoYzA0/pdisk_1.dat 2025-06-25T14:37:00.106397Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:37:00.112504Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:37:00.112596Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:37:00.117597Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 29541, node 1 2025-06-25T14:37:00.288721Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:37:00.288746Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:37:00.288755Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:37:00.288900Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:12661 2025-06-25T14:37:00.641793Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:12661 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:37:01.060054Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:37:03.151302Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519895988944286651:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:03.151480Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:03.151924Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519895988944286663:2295], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:03.156103Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:37:03.176001Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519895988944286665:2296], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:37:03.266505Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519895988944286716:2336] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 64170, MsgBus: 17090 2025-06-25T14:37:03.978179Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519895990021456819:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:37:03.980732Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000a3d/r3tmp/tmpFEGLbR/pdisk_1.dat 2025-06-25T14:37:04.187472Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:37:04.202736Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:37:04.202805Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:37:04.209181Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 64170, node 2 2025-06-25T14:37:04.303744Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:37:04.303761Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:37:04.303779Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:37:04.303885Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17090 TClient is connected to server localhost:17090 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:37:04.912686Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:37:04.921291Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:37:04.996442Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:37:07.455433Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896007201326605:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:07.455527Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:07.455805Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896007201326617:2295], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:07.459673Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:37:07.471894Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519896007201326619:2296], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:37:07.534791Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519896007201326670:2334] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 63628, MsgBus: 4878 2025-06-25T14:37:08.419003Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519896010596428480:2145];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000a3d/r3tmp/tmpgoN2Fz/pdisk_1.dat 2025-06-25T14:37:08.730682Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T1 ... 204: failed to initialize from file: (empty maybe) 2025-06-25T14:38:04.705486Z node 11 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:38:05.096484Z node 11 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:17047 TClient is connected to server localhost:17047 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:38:06.619337Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:38:06.633022Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:38:09.088453Z node 11 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[11:7519896251541419161:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:09.095961Z node 11 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:38:10.790093Z node 11 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [11:7519896277311223557:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:10.790232Z node 11 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:10.817702Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:10.892400Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:10.995305Z node 11 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [11:7519896277311223733:2314], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:10.995487Z node 11 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:10.996241Z node 11 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [11:7519896277311223740:2317], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:11.001944Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715660:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:38:11.016595Z node 11 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [11:7519896277311223742:2318], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715660 completed, doublechecking } 2025-06-25T14:38:11.111531Z node 11 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [11:7519896281606191091:2449] txid# 281474976715661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:38:19.090044Z node 12 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [12:274:2317], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:38:19.090504Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:38:19.090739Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000a3d/r3tmp/tmp6gCPY8/pdisk_1.dat 2025-06-25T14:38:19.551138Z node 12 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 12 Type# 268639257 2025-06-25T14:38:19.553574Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:38:19.611839Z node 12 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:38:19.615834Z node 12 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [12:33:2080] 1750862293919848 != 1750862293919851 2025-06-25T14:38:19.672563Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:19.672744Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:19.684584Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:38:19.786782Z node 12 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [12:605:2513], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:19.786933Z node 12 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [12:615:2518], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:19.787051Z node 12 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:19.795137Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715657:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:38:19.944057Z node 12 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [12:619:2521], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715657 completed, doublechecking } 2025-06-25T14:38:19.965616Z node 12 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:38:20.005436Z node 12 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [12:689:2560] txid# 281474976715658, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } PreparedQuery: "c7002799-ffa2242e-74ac12a1-4be771a5" QueryAst: "(\n(let $1 (PgType \'int4))\n(let $2 \'(\'(\'\"_logical_id\" \'218) \'(\'\"_id\" \'\"b086359d-38c4c1b8-eb0a7608-f3741905\") \'(\'\"_partition_mode\" \'\"single\")))\n(let $3 (DqPhyStage \'() (lambda \'() (Iterator (AsList (AsStruct \'(\'\"x\" (PgConst \'1 $1)) \'(\'\"y\" (PgConst \'2 $1)))))) $2))\n(let $4 (DqCnResult (TDqOutput $3 \'\"0\") \'(\'\"y\" \'\"x\")))\n(return (KqpPhysicalQuery \'((KqpPhysicalTx \'($3) \'($4) \'() \'(\'(\'\"type\" \'\"generic\")))) \'((KqpTxResultBinding (ListType (StructType \'(\'\"x\" $1) \'(\'\"y\" $1))) \'\"0\" \'\"0\")) \'(\'(\'\"type\" \'\"query\"))))\n)\n" QueryPlan: "{\"Plan\":{\"Plans\":[{\"PlanNodeId\":2,\"Plans\":[{\"PlanNodeId\":1,\"Operators\":[{\"Inputs\":[],\"Iterator\":\"[{x: \\\"1\\\",y: \\\"2\\\"}]\",\"Name\":\"Iterator\"}],\"Node Type\":\"ConstantExpr\"}],\"Node Type\":\"ResultSet\",\"PlanNodeType\":\"ResultSet\"}],\"Node Type\":\"Query\",\"Stats\":{\"ResourcePoolId\":\"default\"},\"PlanNodeType\":\"Query\"},\"meta\":{\"version\":\"0.2\",\"type\":\"query\"},\"tables\":[],\"SimplifiedPlan\":{\"PlanNodeId\":0,\"Plans\":[{\"PlanNodeId\":1,\"Node Type\":\"ResultSet\",\"PlanNodeType\":\"ResultSet\"}],\"Node Type\":\"Query\",\"OptimizerStats\":{\"EquiJoinsCount\":0,\"JoinsCount\":0},\"PlanNodeType\":\"Query\"}}" YdbResults { columns { name: "y" type { pg_type { oid: 23 } } } columns { name: "x" type { pg_type { oid: 23 } } } } QueryDiagnostics: "" >> KqpPg::InsertFromSelect_Serial+useSink [GOOD] >> KqpPg::InsertFromSelect_Serial-useSink >> KqpImmediateEffects::ForceImmediateEffectsExecution-UseSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/effects/unittest >> KqpImmediateEffects::ConflictingKeyW1RWR2 [GOOD] Test command err: Trying to start YDB, gRPC: 22630, MsgBus: 28455 2025-06-25T14:38:08.004216Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519896270783867664:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:08.004298Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001834/r3tmp/tmpb3vx5Z/pdisk_1.dat 2025-06-25T14:38:08.352752Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 22630, node 1 2025-06-25T14:38:08.400477Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:08.401999Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:08.424584Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:38:08.488937Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:38:08.488957Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:38:08.488964Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:38:08.489060Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28455 TClient is connected to server localhost:28455 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-25T14:38:09.014229Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:38:09.066302Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:38:09.091869Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:38:09.239209Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:09.446710Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:09.552786Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:11.520260Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896283668771143:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:11.520376Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:11.870894Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:11.928014Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:11.984945Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:12.035265Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:12.062836Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:12.138498Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:12.219461Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:12.356471Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896287963739101:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:12.356597Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:12.356949Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896287963739106:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:12.361424Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:38:12.410837Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519896287963739108:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:38:12.496689Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519896287963739161:3418] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:38:13.005268Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519896270783867664:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:13.005330Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:38:13.551539Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) Trying to start YDB, gRPC: 30429, MsgBus: 11926 2025-06-25T14:38:14.797768Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519896294766106760:2130];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:14.801268Z node 2 :METADA ... icated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:38:15.635216Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:38:15.656862Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:38:15.665895Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:15.747480Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:15.822353Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:38:15.886882Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:15.965485Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:18.268440Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896311945977506:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:18.268538Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:18.338585Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:18.377094Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:18.410860Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:18.482684Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:18.521292Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:18.570717Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:18.616079Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:18.706950Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896311945978170:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:18.707020Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:18.707078Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896311945978175:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:18.711578Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:38:18.732367Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519896311945978177:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:38:18.810117Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519896311945978228:3421] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:38:19.798545Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519896294766106760:2130];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:19.798606Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:38:19.918658Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:20.512878Z node 2 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_LOCKS_BROKEN;details=Operation is aborting because it cannot acquire locks;tx_id=6; 2025-06-25T14:38:20.522183Z node 2 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:226: Prepare transaction failed. txid 6 at tablet 72075186224037922 errors: Status: STATUS_LOCKS_BROKEN Issues: { message: "Operation is aborting because it cannot acquire locks" issue_code: 2001 severity: 1 } 2025-06-25T14:38:20.522348Z node 2 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:168: Errors while proposing transaction txid 6 at tablet 72075186224037922 Status: STATUS_LOCKS_BROKEN Issues: { message: "Operation is aborting because it cannot acquire locks" issue_code: 2001 severity: 1 } 2025-06-25T14:38:20.522583Z node 2 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:802: SelfId: [2:7519896320535913358:2476], Table: `/Root/TestImmediateEffects` ([72057594046644480:17:1]), SessionActorId: [2:7519896316240945797:2476]Got LOCKS BROKEN for table `/Root/TestImmediateEffects`. ShardID=72075186224037922, Sink=[2:7519896320535913358:2476].{
: Error: Operation is aborting because it cannot acquire locks, code: 2001 } 2025-06-25T14:38:20.523085Z node 2 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:3004: SelfId: [2:7519896320535913322:2476], SessionActorId: [2:7519896316240945797:2476], statusCode=ABORTED. Issue=
: Error: Transaction locks invalidated. Table: `/Root/TestImmediateEffects`., code: 2001
: Error: Operation is aborting because it cannot acquire locks, code: 2001 . sessionActorId=[2:7519896316240945797:2476]. isRollback=0 2025-06-25T14:38:20.523377Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:1895: SessionId: ydb://session/3?node_id=2&id=ZjFkZTJkNTItZDYwMGMzMGEtZjRlYTViNDAtNGFkOGJhMTg=, ActorId: [2:7519896316240945797:2476], ActorState: ExecuteState, TraceId: 01jykrd16afjsckbg03hsyyjf5, got TEvKqpBuffer::TEvError in ExecuteState, status: ABORTED send to: [2:7519896320535913352:2476] from: [2:7519896320535913322:2476] 2025-06-25T14:38:20.523465Z node 2 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [2:7519896320535913352:2476] TxId: 281474976715676. Ctx: { TraceId: 01jykrd16afjsckbg03hsyyjf5, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZjFkZTJkNTItZDYwMGMzMGEtZjRlYTViNDAtNGFkOGJhMTg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Transaction locks invalidated. Table: `/Root/TestImmediateEffects`., code: 2001 subissue: {
: Error: Operation is aborting because it cannot acquire locks, code: 2001 } } 2025-06-25T14:38:20.523722Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=ZjFkZTJkNTItZDYwMGMzMGEtZjRlYTViNDAtNGFkOGJhMTg=, ActorId: [2:7519896316240945797:2476], ActorState: ExecuteState, TraceId: 01jykrd16afjsckbg03hsyyjf5, Create QueryResponse for error on request, msg: >> KqpImmediateEffects::ManyFlushes [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/effects/unittest >> KqpImmediateEffects::UpsertConflictInteractiveTxAborted [GOOD] Test command err: Trying to start YDB, gRPC: 6128, MsgBus: 16349 2025-06-25T14:38:08.329048Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519896269485720117:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:08.343659Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00182b/r3tmp/tmp6nn2vF/pdisk_1.dat 2025-06-25T14:38:08.842325Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519896269485720074:2080] 1750862288303442 != 1750862288303445 2025-06-25T14:38:08.851078Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:38:08.856429Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:08.856532Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 6128, node 1 2025-06-25T14:38:08.884957Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:38:08.969461Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:38:08.969484Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:38:08.969491Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:38:08.969601Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16349 2025-06-25T14:38:09.363041Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:16349 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:38:10.003001Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:38:10.027222Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:38:10.039081Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:10.292283Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:10.532175Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:10.667065Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:12.356694Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896286665590905:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:12.356783Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:12.755125Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:12.783373Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:12.809601Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:12.877346Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:12.945696Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:13.020665Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:13.056466Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:13.135508Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896290960558873:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:13.135570Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:13.135728Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896290960558878:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:13.138379Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:38:13.145868Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519896290960558880:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:38:13.216243Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519896290960558931:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:38:13.331266Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519896269485720117:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:13.331324Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:38:14.203619Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_w ... :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519896299108847526:2235];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00182b/r3tmp/tmp8m7GRF/pdisk_1.dat 2025-06-25T14:38:15.460714Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:38:15.562261Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:38:15.568094Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519896299108847316:2080] 1750862295355658 != 1750862295355661 2025-06-25T14:38:15.583158Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:15.583233Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:15.585581Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 14574, node 2 2025-06-25T14:38:15.625354Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:38:15.625372Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:38:15.625379Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:38:15.625482Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:32141 TClient is connected to server localhost:32141 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:38:16.077643Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:38:16.083220Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:38:16.094956Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:38:16.176809Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:16.295930Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:16.380565Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:38:16.384092Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:18.701852Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896311993750831:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:18.701943Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:18.765410Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:18.808356Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:18.844832Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:18.878837Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:18.963000Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:19.017724Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:19.062260Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:19.123473Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896316288718785:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:19.123619Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:19.123815Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896316288718790:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:19.132815Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:38:19.156853Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519896316288718792:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:38:19.252291Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519896316288718843:3415] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:38:20.314070Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:20.382566Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519896299108847526:2235];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:20.382640Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> SystemView::ShowCreateTableKeyBloomFilter [GOOD] >> SystemView::ShowCreateTable >> KqpInplaceUpdate::SingleRowIf+UseSink [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/effects/unittest >> KqpInplaceUpdate::Negative_BatchUpdate-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 24557, MsgBus: 17635 2025-06-25T14:38:08.234181Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519896269332899828:2146];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:08.234479Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00182a/r3tmp/tmpBzQItv/pdisk_1.dat 2025-06-25T14:38:08.674439Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:38:08.688663Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519896269332899711:2080] 1750862288208426 != 1750862288208429 2025-06-25T14:38:08.734832Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:08.734984Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:08.738659Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 24557, node 1 2025-06-25T14:38:08.866691Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:38:08.866724Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:38:08.866735Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:38:08.866939Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17635 2025-06-25T14:38:09.217397Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:17635 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:38:09.899983Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:38:09.926742Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:38:09.940189Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:10.143564Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:10.303956Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:10.410009Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:12.117788Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896286512770537:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:12.117904Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:12.489033Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:12.532770Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:12.570051Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:12.641256Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:12.672353Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:12.704027Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:12.736693Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:12.822695Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896286512771197:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:12.822770Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:12.823177Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896286512771202:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:12.826993Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:38:12.836255Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519896286512771204:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:38:12.931471Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519896286512771255:3420] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:38:13.232699Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519896269332899828:2146];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:13.232818Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:38:13.884615Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/ ... 06-25T14:38:15.423665Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00182a/r3tmp/tmp88HVMq/pdisk_1.dat 2025-06-25T14:38:15.508553Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:38:15.512470Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519896296998635895:2080] 1750862295283637 != 1750862295283640 2025-06-25T14:38:15.535249Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:15.535335Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:15.540101Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 25745, node 2 2025-06-25T14:38:15.635973Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:38:15.636001Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:38:15.636009Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:38:15.636121Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:18584 TClient is connected to server localhost:18584 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:38:16.119841Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:38:16.128205Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:38:16.142033Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:38:16.211222Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:16.338478Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:16.377417Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:38:16.403500Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:18.785574Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896309883539418:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:18.785661Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:18.843210Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:18.914009Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:19.000387Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:19.040652Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:19.085172Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:19.173513Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:19.236239Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:19.322384Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896314178507375:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:19.322466Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:19.322698Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896314178507380:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:19.327020Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:38:19.340540Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-25T14:38:19.341035Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519896314178507382:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:38:19.396599Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519896314178507433:3420] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:38:20.354109Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519896296998636115:2237];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:20.354182Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:38:20.723757Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/effects/unittest >> KqpImmediateEffects::Interactive [GOOD] Test command err: Trying to start YDB, gRPC: 17415, MsgBus: 7355 2025-06-25T14:38:08.000648Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519896269237315564:2237];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:08.000751Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001836/r3tmp/tmpHq1QNV/pdisk_1.dat 2025-06-25T14:38:08.395133Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:38:08.398205Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519896264942348039:2080] 1750862287966417 != 1750862287966420 2025-06-25T14:38:08.410508Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:08.410617Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 17415, node 1 2025-06-25T14:38:08.412666Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:38:08.526662Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:38:08.526682Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:38:08.526689Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:38:08.526793Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:7355 2025-06-25T14:38:08.930228Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:7355 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:38:09.293234Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:38:09.331482Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:38:09.339168Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:09.550504Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:09.908916Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:10.119336Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:11.909439Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896282122218870:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:11.909560Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:12.401613Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:12.463676Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:12.504547Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:12.542349Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:12.592920Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:12.636775Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:12.677780Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:12.742392Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896286417186825:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:12.742470Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:12.742743Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896286417186830:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:12.746797Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:38:12.763721Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-25T14:38:12.763964Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519896286417186832:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:38:12.842096Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519896286417186883:3420] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:38:13.001046Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519896269237315564:2237];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:13.001140Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:38:13.971351Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo ... :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519896298125522891:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:15.568567Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001836/r3tmp/tmpejm6DY/pdisk_1.dat 2025-06-25T14:38:15.734950Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:38:15.740760Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519896298125522860:2080] 1750862295551935 != 1750862295551938 2025-06-25T14:38:15.754466Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:15.754548Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:15.759325Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 13768, node 2 2025-06-25T14:38:15.816901Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:38:15.816924Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:38:15.816931Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:38:15.817044Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:18531 TClient is connected to server localhost:18531 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:38:16.286641Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:38:16.291609Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:38:16.301020Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:16.358079Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:16.512348Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:16.570311Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:16.582560Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:38:18.694929Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896311010426365:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:18.695037Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:18.761687Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:18.806005Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:18.844941Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:18.882225Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:18.915113Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:18.988370Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:19.041344Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:19.155213Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896315305394324:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:19.155324Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:19.155632Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896315305394329:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:19.159753Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:38:19.196051Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519896315305394331:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:38:19.277619Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519896315305394382:3414] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:38:20.315518Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:20.555626Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519896298125522891:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:20.555714Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> KqpPg::TableArrayInsert-useSink [GOOD] >> KqpPg::Returning+useSink >> KqpImmediateEffects::DeleteAfterInsert [GOOD] >> KqpImmediateEffects::ReplaceExistingKey >> KqpImmediateEffects::ForceImmediateEffectsExecution+UseSink [GOOD] >> KqpPg::CreateTableIfNotExists_GenericQuery [GOOD] >> KqpPg::AlterColumnSetDefaultFromSequenceWithSchemaname >> KqpWrite::InsertRevert >> KqpImmediateEffects::Replace [GOOD] >> KqpImmediateEffects::ReplaceDuplicates ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/effects/unittest >> KqpImmediateEffects::ManyFlushes [GOOD] Test command err: Trying to start YDB, gRPC: 6891, MsgBus: 24904 2025-06-25T14:38:08.458679Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519896268111511370:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:08.458726Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00182c/r3tmp/tmpF8nxI5/pdisk_1.dat 2025-06-25T14:38:08.927854Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:38:08.928440Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519896268111511347:2080] 1750862288456174 != 1750862288456177 2025-06-25T14:38:08.959853Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:08.959950Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:08.962921Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 6891, node 1 2025-06-25T14:38:09.078034Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:38:09.078070Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:38:09.078091Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:38:09.078407Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24904 2025-06-25T14:38:09.483581Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:24904 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:38:09.874226Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:38:09.920966Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:10.110919Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:10.291686Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:10.387322Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:12.182008Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896285291382156:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:12.182097Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:12.701986Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:12.733556Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:12.773983Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:12.802656Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:12.839604Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:12.914349Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:12.988296Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:13.081717Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896289586350122:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:13.081807Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:13.082003Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896289586350127:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:13.086289Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:38:13.097808Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519896289586350129:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:38:13.189448Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519896289586350182:3420] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:38:13.459089Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519896268111511370:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:13.459141Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:38:14.220648Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) Trying to start YDB, gRPC: 12617, MsgBus: 15951 2025-06-25T14:38:15.693191Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519896298330883738:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:15.693244Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00182c/r3tmp/tmpuLwrwm/pdisk_1.dat 2025-06-25T14:38:15.892428Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519896298330883720:2080] 1750862295687032 != 1750862295687035 2025-06-25T14:38:15.900745Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 12617, node 2 2025-06-25T14:38:15.917506Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:15.917592Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:15.928338Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:38:15.982718Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:38:15.982745Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:38:15.982752Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:38:15.982869Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:15951 TClient is connected to server localhost:15951 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:38:16.412389Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:38:16.424100Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:16.500512Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:16.655229Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:16.723031Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:16.728460Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:38:19.191120Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896315510754524:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:19.191202Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:19.277016Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:19.318815Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:19.403402Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:19.449107Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:19.494284Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:19.577482Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:19.618475Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:19.709658Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896315510755184:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:19.709711Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:19.709962Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896315510755189:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:19.714095Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:38:19.732358Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519896315510755191:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:38:19.831176Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519896315510755242:3415] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:38:20.712737Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519896298330883738:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:20.712849Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:38:20.931862Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) >> KqpPg::DeleteWithQueryService+useSink [GOOD] >> KqpPg::DeleteWithQueryService-useSink >> KqpEffects::InsertAbort_Params_Success >> KqpImmediateEffects::InsertDuplicates-UseSink >> KqpEffects::AlterAfterUpsertBeforeUpsertTransaction+UseSink [GOOD] >> KqpEffects::AlterAfterUpsertBeforeUpsertSelectTransaction+UseSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/effects/unittest >> KqpInplaceUpdate::SingleRowIf+UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 19281, MsgBus: 25844 2025-06-25T14:38:10.484710Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519896275797787964:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:10.498343Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001820/r3tmp/tmp1wkOvT/pdisk_1.dat 2025-06-25T14:38:10.933561Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:38:10.937104Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:10.937186Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:10.955175Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519896275797787939:2080] 1750862290473719 != 1750862290473722 2025-06-25T14:38:10.972170Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 19281, node 1 2025-06-25T14:38:11.072941Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:38:11.072962Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:38:11.072972Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:38:11.073086Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25844 2025-06-25T14:38:11.504938Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:25844 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:38:11.726769Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:38:11.760304Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:11.966303Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:38:12.124291Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:12.213589Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:13.920300Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896288682691470:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:13.920407Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:14.191138Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:14.218855Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:14.248602Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:14.276857Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:14.313415Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:14.374897Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:14.400997Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:14.453009Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896292977659423:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:14.453092Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:14.453237Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896292977659428:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:14.456536Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:38:14.467086Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519896292977659430:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:38:14.527257Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519896292977659481:3418] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:38:15.485545Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519896275797787964:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:15.485629Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:38:15.779789Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) Trying to start YDB, gRPC: 14737, MsgBus: 19799 2025-06-25T14:38:17.122768Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519896308553083989:2087];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:17.129706Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001820/r3tmp/tmpdAR1nX/pdisk_1.dat 2025-06-25T14:38:17.328987Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:38:17.342789Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:17.342874Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:17.346537Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 14737, node 2 2025-06-25T14:38:17.408903Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:38:17.408931Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:38:17.408939Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:38:17.409059Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19799 TClient is connected to server localhost:19799 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-25T14:38:17.922467Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:38:17.942587Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:18.023797Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:18.142959Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:38:18.248049Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:18.322897Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:20.471877Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896321437987446:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:20.471961Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:20.526293Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:20.590273Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:20.636102Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:20.674870Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:20.719829Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:20.795391Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:20.879754Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:20.948772Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896321437988115:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:20.948864Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:20.949255Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896321437988120:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:20.953517Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:38:20.974397Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519896321437988122:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:38:21.067646Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519896325732955469:3420] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:38:22.123051Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519896308553083989:2087];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:22.123232Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:38:22.166462Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) >> KqpInplaceUpdate::SingleRowPgNotNull+UseSink [GOOD] >> KqpEffects::UpdateOn_Select ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/effects/unittest >> KqpImmediateEffects::DeleteAfterInsert [GOOD] Test command err: Trying to start YDB, gRPC: 1922, MsgBus: 16403 2025-06-25T14:38:10.573739Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519896277603060474:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:10.573804Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00181e/r3tmp/tmpGJ4aqv/pdisk_1.dat 2025-06-25T14:38:10.950915Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 1922, node 1 2025-06-25T14:38:11.021180Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:11.021270Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:11.035829Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:38:11.149099Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:38:11.149127Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:38:11.149138Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:38:11.149325Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16403 TClient is connected to server localhost:16403 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-25T14:38:11.594444Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:38:11.718969Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:38:11.736454Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:38:11.746507Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:11.923058Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:12.132552Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:12.247293Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:14.060073Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896294782931258:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:14.060179Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:14.310092Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:14.384588Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:14.420164Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:14.450874Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:14.480972Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:14.521946Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:14.592055Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:14.644881Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896294782931921:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:14.644955Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:14.645197Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896294782931926:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:14.649259Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:38:14.664039Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519896294782931928:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:38:14.759641Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519896294782931979:3420] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:38:15.573840Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519896277603060474:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:15.573929Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:38:15.878004Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) Trying to start YDB, gRPC: 27650, MsgBus: 13485 2025-06-25T14:38:17.208233Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519896309286694838:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:17.208322Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00181e/r3tmp/tmpDXFWRG/pdisk_1.dat 2025-06-25T14:38:17.345233Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:38:17.356454Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519896309286694818:2080] 1750862297205832 != 1750862297205835 TServer::EnableGrpc on GrpcPort 27650, node 2 2025-06-25T14:38:17.378168Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:17.378246Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:17.379962Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:38:17.504832Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:38:17.504858Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:38:17.504867Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:38:17.504983Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13485 TClient is connected to server localhost:13485 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:38:18.230008Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:38:18.232648Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:38:18.248784Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:18.328817Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:18.512789Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:18.598567Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:20.772996Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896322171598342:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:20.773069Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:20.854455Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:20.919695Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:20.953821Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:20.995141Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:21.042509Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:21.099637Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:21.151157Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:21.254213Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896326466566293:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:21.254318Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:21.254600Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896326466566298:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:21.258775Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:38:21.278897Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519896326466566300:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:38:21.342591Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519896326466566351:3414] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:38:22.210608Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519896309286694838:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:22.210696Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:38:22.432648Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/effects/unittest >> KqpImmediateEffects::ForceImmediateEffectsExecution+UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 23862, MsgBus: 3585 2025-06-25T14:38:06.461392Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519896261993982634:2203];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:06.461769Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00183c/r3tmp/tmp8IcxOg/pdisk_1.dat 2025-06-25T14:38:06.984131Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:06.984268Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:06.989314Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:38:07.007805Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:38:07.012599Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519896261993982467:2080] 1750862286395397 != 1750862286395400 TServer::EnableGrpc on GrpcPort 23862, node 1 2025-06-25T14:38:07.144889Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:38:07.144914Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:38:07.144922Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:38:07.145088Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3585 2025-06-25T14:38:07.472520Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:3585 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:38:07.782858Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:38:07.800213Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:38:07.824436Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:08.036487Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:08.244370Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:38:08.329255Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:10.209203Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896279173853297:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:10.209320Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:10.554471Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:10.585520Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:10.643315Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:10.680021Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:10.713929Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:10.750057Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:10.796381Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:10.911499Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896279173853959:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:10.911580Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:10.911850Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896279173853964:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:10.915102Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:38:10.924004Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519896279173853966:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:38:11.012512Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519896283468821313:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:38:11.459362Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519896261993982634:2203];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:11.459435Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:38:12.069592Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_wo ... art proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) Trying to start YDB, gRPC: 10992, MsgBus: 7080 2025-06-25T14:38:17.774009Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519896306826004902:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:17.782891Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00183c/r3tmp/tmpfbHND8/pdisk_1.dat 2025-06-25T14:38:18.008499Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:18.008602Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:18.009064Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:38:18.021022Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 10992, node 2 2025-06-25T14:38:18.100699Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:38:18.100725Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:38:18.100733Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:38:18.100859Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:7080 TClient is connected to server localhost:7080 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:38:18.629845Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:38:18.648819Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:18.726806Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:18.814661Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:38:18.911497Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:18.994603Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:21.133349Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896324005875694:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:21.133480Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:21.190782Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:21.251753Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:21.305991Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:21.375562Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:21.405614Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:21.472604Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:21.520560Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:21.575529Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896324005876356:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:21.575601Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:21.575811Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896324005876361:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:21.578832Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:38:21.586568Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519896324005876363:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:38:21.644252Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519896324005876414:3415] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:38:22.773923Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519896306826004902:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:22.774319Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:38:22.802065Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) >> KqpInplaceUpdate::SingleRowSimple+UseSink >> KqpEffects::AlterAfterUpsertBeforeUpsertTransaction-UseSink [GOOD] >> KqpImmediateEffects::Upsert [GOOD] >> KqpImmediateEffects::UpdateOn >> KqpImmediateEffects::UpsertAfterInsert >> KqpEffects::InsertAbort_Select_Duplicates+UseSink [GOOD] >> KqpEffects::InsertAbort_Select_Conflict+UseSink >> KqpImmediateEffects::ConflictingKeyRW1RWR2 [GOOD] >> KqpPg::JoinWithQueryService-StreamLookup [GOOD] >> KqpPg::PgAggregate+useSink >> KqpInplaceUpdate::Negative_SingleRowWithKeyCast-UseSink [GOOD] >> KqpInplaceUpdate::Negative_SingleRowWithValueCast+UseSink >> KqpEffects::InsertAbort_Select_Success ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/effects/unittest >> KqpInplaceUpdate::SingleRowPgNotNull+UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 24818, MsgBus: 19077 2025-06-25T14:38:11.391588Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519896283124875736:2222];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:11.394775Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001815/r3tmp/tmpKtR90x/pdisk_1.dat 2025-06-25T14:38:11.762486Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:11.762602Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:11.785411Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:38:11.867893Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519896283124875552:2080] 1750862291372218 != 1750862291372221 2025-06-25T14:38:11.896384Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 24818, node 1 2025-06-25T14:38:12.036852Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:38:12.036874Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:38:12.036882Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:38:12.037009Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:38:12.384487Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:19077 TClient is connected to server localhost:19077 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:38:12.885679Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:38:12.912244Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:38:12.926044Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:38:13.074278Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:13.223922Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:13.300235Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:15.017225Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896300304746368:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:15.017330Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:15.326121Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:15.366795Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:15.406853Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:15.462538Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:15.492923Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:15.529461Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:15.577250Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:15.679150Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896300304747023:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:15.679259Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:15.679517Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896300304747028:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:15.683272Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:38:15.693627Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519896300304747030:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:38:15.792650Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519896300304747081:3423] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:38:16.389676Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519896283124875736:2222];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:16.389754Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:38:16.886193Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) Trying to start YDB, gRPC: 14990, MsgBus: 64981 2025-06-25T14:38:18.456716Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519896311044952773:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:18.456764Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001815/r3tmp/tmpQwMFvh/pdisk_1.dat 2025-06-25T14:38:18.598697Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 14990, node 2 2025-06-25T14:38:18.614749Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:18.614838Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:18.616163Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:38:18.672820Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:38:18.672845Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:38:18.672854Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:38:18.672966Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:64981 TClient is connected to server localhost:64981 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:38:19.245535Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:38:19.253574Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:38:19.266885Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:19.351191Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:19.459154Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:38:19.502085Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:19.586659Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:22.069439Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896328224823575:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:22.069515Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:22.137126Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:22.180364Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:22.221619Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:22.254650Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:22.297247Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:22.374977Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:22.474129Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:22.547579Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896328224824242:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:22.547652Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:22.547789Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896328224824247:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:22.551151Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:38:22.561829Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519896328224824249:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:38:22.613627Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519896328224824300:3423] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:38:23.456962Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519896311044952773:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:23.457026Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:38:23.778894Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) >> TSchemeShardUserAttrsTest::UserConditionsAtCreateDropOps |81.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_user_attributes/unittest |81.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_user_attributes/unittest >> KqpInplaceUpdate::Negative_SingleRowWithValueCast-UseSink [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/effects/unittest >> KqpEffects::AlterAfterUpsertBeforeUpsertTransaction-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 27756, MsgBus: 23451 2025-06-25T14:38:04.344878Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519896250119003334:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:04.379922Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001845/r3tmp/tmp3fuih7/pdisk_1.dat 2025-06-25T14:38:04.948986Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519896250119003304:2080] 1750862284291355 != 1750862284291358 2025-06-25T14:38:04.954266Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:38:05.006590Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:05.006671Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:05.021654Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 27756, node 1 2025-06-25T14:38:05.296461Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:38:05.382116Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:38:05.382145Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:38:05.382152Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:38:05.382259Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23451 TClient is connected to server localhost:23451 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:38:06.852200Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:38:06.881613Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:07.169221Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:07.443501Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:07.528830Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:09.241423Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896271593841429:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:09.241544Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:09.352463Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519896250119003334:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:09.362676Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:38:09.662245Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:09.716271Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:09.781894Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:09.862922Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:09.975417Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:10.035466Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:10.086643Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:10.212280Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896275888809384:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:10.212430Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:10.212915Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896275888809389:2436], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:10.216974Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:38:10.232884Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710669, at schemeshard: 72057594046644480 2025-06-25T14:38:10.233055Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519896275888809391:2437], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:38:10.333791Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519896275888809444:3424] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:38:11.354652Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/ ... mismatch for subscription [3:7519896311966010192:2080] 1750862298946464 != 1750862298946467 2025-06-25T14:38:19.233958Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:19.234032Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:19.241371Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 1736, node 3 2025-06-25T14:38:19.333264Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:38:19.333282Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:38:19.333290Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:38:19.333400Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:31207 TClient is connected to server localhost:31207 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:38:19.920082Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:38:19.926350Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:38:19.937699Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:20.008620Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:38:20.045071Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:20.233578Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:20.310470Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:22.720477Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519896329145881015:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:22.720586Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:22.790093Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:22.826566Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:22.861990Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:22.918861Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:22.949271Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:22.988145Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:23.022594Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:23.086955Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519896333440848972:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:23.087049Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:23.087101Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519896333440848977:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:23.092579Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:38:23.102825Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519896333440848979:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:38:23.170456Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519896333440849030:3417] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:38:23.976293Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519896311966010409:2240];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:23.976377Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:38:24.513516Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:24.795801Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976710675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:171) 2025-06-25T14:38:24.980296Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=3&id=ODI0MDU0MWMtZjA5YTUyNGEtOTA1MWJhM2ItMTEwYTZjMWU=, ActorId: [3:7519896337735816556:2465], ActorState: ExecuteState, TraceId: 01jykrd5pe8hhmgsmp3d7z7gka, Create QueryResponse for error on request, msg: ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/effects/unittest >> KqpImmediateEffects::ConflictingKeyRW1RWR2 [GOOD] Test command err: Trying to start YDB, gRPC: 18028, MsgBus: 25010 2025-06-25T14:38:13.364603Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519896289525742011:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:13.364640Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001814/r3tmp/tmpzxEO6P/pdisk_1.dat 2025-06-25T14:38:13.880290Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:38:13.883776Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519896289525741992:2080] 1750862293361654 != 1750862293361657 TServer::EnableGrpc on GrpcPort 18028, node 1 2025-06-25T14:38:13.894786Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:13.894872Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:13.901846Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:38:13.962018Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:38:13.962038Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:38:13.962050Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:38:13.962194Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25010 TClient is connected to server localhost:25010 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-25T14:38:14.392373Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:38:14.480682Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:38:14.508777Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:38:14.625869Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:14.779456Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:14.859655Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:16.434381Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896302410645524:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:16.434472Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:16.754390Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:16.790996Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:16.834789Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:16.872405Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:16.919395Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:16.956149Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:16.986919Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:17.073227Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896306705613478:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:17.073296Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:17.073549Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896306705613483:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:17.076595Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:38:17.085821Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519896306705613485:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:38:17.142812Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519896306705613536:3416] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:38:18.206198Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:18.365039Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519896289525742011:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:18.365123Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 31937, MsgBus: 25877 2025-06-25T14:38:19.705519Z node 2 :METADATA_PROVIDER WARN ... icated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:38:20.561626Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:38:20.567986Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:38:20.581822Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:20.716771Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:20.736402Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:38:20.915732Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:20.993527Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:22.905261Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896331082183039:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:22.905337Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:22.957866Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:22.989355Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:23.030554Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:23.101768Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:23.149041Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:23.188261Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:23.277078Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:23.374642Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896335377151000:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:23.374758Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:23.375084Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896335377151005:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:23.379473Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:38:23.392206Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519896335377151007:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:38:23.452438Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519896335377151058:3415] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:38:24.479791Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:24.705901Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519896318197279562:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:24.705979Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:38:25.135370Z node 2 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_LOCKS_BROKEN;details=Operation is aborting because it cannot acquire locks;tx_id=7; 2025-06-25T14:38:25.146222Z node 2 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:226: Prepare transaction failed. txid 7 at tablet 72075186224037922 errors: Status: STATUS_LOCKS_BROKEN Issues: { message: "Operation is aborting because it cannot acquire locks" issue_code: 2001 severity: 1 } 2025-06-25T14:38:25.146424Z node 2 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:168: Errors while proposing transaction txid 7 at tablet 72075186224037922 Status: STATUS_LOCKS_BROKEN Issues: { message: "Operation is aborting because it cannot acquire locks" issue_code: 2001 severity: 1 } 2025-06-25T14:38:25.146693Z node 2 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:802: SelfId: [2:7519896343967086193:2475], Table: `/Root/TestImmediateEffects` ([72057594046644480:17:1]), SessionActorId: [2:7519896339672118625:2475]Got LOCKS BROKEN for table `/Root/TestImmediateEffects`. ShardID=72075186224037922, Sink=[2:7519896343967086193:2475].{
: Error: Operation is aborting because it cannot acquire locks, code: 2001 } 2025-06-25T14:38:25.147383Z node 2 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:3004: SelfId: [2:7519896339672118863:2475], SessionActorId: [2:7519896339672118625:2475], statusCode=ABORTED. Issue=
: Error: Transaction locks invalidated. Table: `/Root/TestImmediateEffects`., code: 2001
: Error: Operation is aborting because it cannot acquire locks, code: 2001 . sessionActorId=[2:7519896339672118625:2475]. isRollback=0 2025-06-25T14:38:25.147687Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:1895: SessionId: ydb://session/3?node_id=2&id=NWI4ZGVkNGMtOTExMzBhOTEtMjgyYjU2MTEtMmMzNGU0OWE=, ActorId: [2:7519896339672118625:2475], ActorState: ExecuteState, TraceId: 01jykrd5ra15nkm7afm59yk012, got TEvKqpBuffer::TEvError in ExecuteState, status: ABORTED send to: [2:7519896343967086187:2475] from: [2:7519896339672118863:2475] 2025-06-25T14:38:25.147780Z node 2 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [2:7519896343967086187:2475] TxId: 281474976710677. Ctx: { TraceId: 01jykrd5ra15nkm7afm59yk012, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NWI4ZGVkNGMtOTExMzBhOTEtMjgyYjU2MTEtMmMzNGU0OWE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Transaction locks invalidated. Table: `/Root/TestImmediateEffects`., code: 2001 subissue: {
: Error: Operation is aborting because it cannot acquire locks, code: 2001 } } 2025-06-25T14:38:25.148031Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=NWI4ZGVkNGMtOTExMzBhOTEtMjgyYjU2MTEtMmMzNGU0OWE=, ActorId: [2:7519896339672118625:2475], ActorState: ExecuteState, TraceId: 01jykrd5ra15nkm7afm59yk012, Create QueryResponse for error on request, msg: >> KqpImmediateEffects::InteractiveTxWithReadAtTheEnd+UseSink [GOOD] >> KqpImmediateEffects::InteractiveTxWithReadAtTheEnd-UseSink >> KqpEffects::InsertAbort_Literal_Success [GOOD] >> KqpEffects::InsertAbort_Params_Conflict+UseSink >> TSchemeShardUserAttrsTest::UserConditionsAtCreateDropOps [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/effects/unittest >> KqpInplaceUpdate::Negative_SingleRowWithValueCast-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 24585, MsgBus: 24311 2025-06-25T14:38:14.192621Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519896294631079761:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:14.192758Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001813/r3tmp/tmpFWhWbO/pdisk_1.dat 2025-06-25T14:38:14.508293Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 24585, node 1 2025-06-25T14:38:14.580973Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:38:14.581004Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:38:14.581013Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:38:14.581161Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:38:14.616906Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:14.617061Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:14.623269Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:24311 TClient is connected to server localhost:24311 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-25T14:38:15.222277Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:38:15.350692Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:38:15.398661Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:15.556018Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:15.716512Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:15.782935Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:17.234615Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896307515983258:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:17.234705Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:17.640541Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:17.694956Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:17.746543Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:17.792844Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:17.832031Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:17.923425Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:17.986680Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:18.059746Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896311810951215:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:18.059827Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:18.060179Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896311810951220:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:18.063898Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:38:18.082831Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519896311810951222:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:38:18.160610Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519896311810951273:3418] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:38:19.198744Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519896294631079761:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:19.199698Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:38:19.315046Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) Trying to start YDB, gRPC: 30531, MsgBus: 18814 2025-06-25T14:38:20.924194Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519896320339043530:2226];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:20.958744Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001813/r3tmp/tmpyigbaK/pdisk_1.dat 2025-06-25T14:38:21.085082Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:21.085165Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:21.087408Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:38:21.090976Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:38:21.096525Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519896320339043340:2080] 1750862300864915 != 1750862300864918 TServer::EnableGrpc on GrpcPort 30531, node 2 2025-06-25T14:38:21.193056Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:38:21.193078Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:38:21.193085Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:38:21.193184Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:18814 TClient is connected to server localhost:18814 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:38:21.641376Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:38:21.662047Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:21.724341Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:21.884455Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:38:21.888457Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:21.996807Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:23.880136Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896333223946875:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:23.880217Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:23.941948Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:23.981523Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:24.055796Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:24.091787Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:24.123863Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:24.211866Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:24.248742Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:24.375356Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896337518914842:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:24.375434Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:24.375598Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896337518914847:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:24.378623Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:38:24.389083Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519896337518914849:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:38:24.489327Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519896337518914900:3417] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:38:25.719320Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:25.879158Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519896320339043530:2226];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:25.879209Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> TSchemeShardUserAttrsTest::VariousUse ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_user_attributes/unittest >> TSchemeShardUserAttrsTest::UserConditionsAtCreateDropOps [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:38:27.196618Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:38:27.196726Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:38:27.196773Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:38:27.196811Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:38:27.196879Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:38:27.196914Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:38:27.196975Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:38:27.197046Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:38:27.197841Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:38:27.198223Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:38:27.265494Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:38:27.265573Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:38:27.284217Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:38:27.284643Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:38:27.284789Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:38:27.290778Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:38:27.291099Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:38:27.291744Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:38:27.291991Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:38:27.295556Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:38:27.295751Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:38:27.296894Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:38:27.296957Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:38:27.297081Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:38:27.297127Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:38:27.297172Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:38:27.297268Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:38:27.306164Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:38:27.456437Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:38:27.456729Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:38:27.456952Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:38:27.456998Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:38:27.457266Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:38:27.457348Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:38:27.461585Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:38:27.461840Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:38:27.462058Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:38:27.462124Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:38:27.462191Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:38:27.462254Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:38:27.464719Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:38:27.464778Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:38:27.464814Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:38:27.466476Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:38:27.466514Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:38:27.466571Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:38:27.466630Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:38:27.469226Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:38:27.471123Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:38:27.471319Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:38:27.472270Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:38:27.472445Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:38:27.472497Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:38:27.472861Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:38:27.472919Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:38:27.473136Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:38:27.473250Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:38:27.477706Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:38:27.477782Z node 1 :FLAT_TX_SCHEMESHARD ... is published: false 2025-06-25T14:38:27.617017Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 105 ready parts: 1/1 2025-06-25T14:38:27.617066Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 105:0 2025-06-25T14:38:27.617094Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 105:0 2025-06-25T14:38:27.617149Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 2 2025-06-25T14:38:27.617182Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 105, publications: 2, subscribers: 0 2025-06-25T14:38:27.617210Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 105, [OwnerId: 72057594046678944, LocalPathId: 1], 11 2025-06-25T14:38:27.617236Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 105, [OwnerId: 72057594046678944, LocalPathId: 4], 18446744073709551615 2025-06-25T14:38:27.617727Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 105 2025-06-25T14:38:27.618074Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 105 2025-06-25T14:38:27.620026Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:38:27.620087Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 105, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:38:27.620290Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 105, path id: [OwnerId: 72057594046678944, LocalPathId: 4] 2025-06-25T14:38:27.620451Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:38:27.620488Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 105, path id: 1 2025-06-25T14:38:27.620530Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 105, path id: 4 FAKE_COORDINATOR: Erasing txId 105 2025-06-25T14:38:27.621183Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 11 PathOwnerId: 72057594046678944, cookie: 105 2025-06-25T14:38:27.621267Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 11 PathOwnerId: 72057594046678944, cookie: 105 2025-06-25T14:38:27.621300Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 105 2025-06-25T14:38:27.621360Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 105, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 11 2025-06-25T14:38:27.621418Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 4 2025-06-25T14:38:27.621897Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 105 2025-06-25T14:38:27.621981Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 105 2025-06-25T14:38:27.622008Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 105 2025-06-25T14:38:27.622041Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 105, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 18446744073709551615 2025-06-25T14:38:27.622074Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 1 2025-06-25T14:38:27.622155Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 105, subscribers: 0 2025-06-25T14:38:27.622361Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-25T14:38:27.622404Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 4], at schemeshard: 72057594046678944 2025-06-25T14:38:27.622481Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-25T14:38:27.624795Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 105 2025-06-25T14:38:27.627894Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 105 2025-06-25T14:38:27.628027Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 105, wait until txId: 105 TestWaitNotification wait txId: 105 2025-06-25T14:38:27.628437Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 105: send EvNotifyTxCompletion 2025-06-25T14:38:27.628482Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 105 2025-06-25T14:38:27.629067Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 105, at schemeshard: 72057594046678944 2025-06-25T14:38:27.629159Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 105: got EvNotifyTxCompletionResult 2025-06-25T14:38:27.629197Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 105: satisfy waiter [1:408:2397] TestWaitNotification: OK eventTxId 105 2025-06-25T14:38:27.629894Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/DirC" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:38:27.630107Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/DirC" took 254us result status StatusPathDoesNotExist 2025-06-25T14:38:27.630294Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/DirC\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/DirC" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: true } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-25T14:38:27.630922Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:38:27.631176Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 281us result status StatusSuccess 2025-06-25T14:38:27.631762Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 11 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 11 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 9 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "DirA" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "DirB" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 101 CreateStep: 5000003 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 2 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TSchemeShardUserAttrsTest::SetAttrs |81.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_auditsettings/unittest >> TSchemeShardAuditSettings::AlterSubdomain |81.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_auditsettings/unittest >> KqpImmediateEffects::ForceImmediateEffectsExecution-UseSink [GOOD] >> KqpImmediateEffects::ImmediateUpdate >> Viewer::JsonStorageListingV1GroupIdFilter [GOOD] >> Viewer::JsonStorageListingV1NodeIdFilter >> KqpPg::InsertFromSelect_Serial-useSink [GOOD] >> KqpPg::InsertNoTargetColumns_ColumnOrder+useSink >> TSchemeShardUserAttrsTest::VariousUse [GOOD] >> TSchemeShardUserAttrsTest::SetAttrs [GOOD] >> KqpImmediateEffects::ReplaceDuplicates [GOOD] >> TSchemeShardAuditSettings::CreateSubdomain |81.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/fq/libs/row_dispatcher/ut/ydb-core-fq-libs-row_dispatcher-ut |81.9%| [LD] {RESULT} $(B)/ydb/core/fq/libs/row_dispatcher/ut/ydb-core-fq-libs-row_dispatcher-ut |81.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/fq/libs/row_dispatcher/ut/ydb-core-fq-libs-row_dispatcher-ut >> KqpImmediateEffects::ReplaceExistingKey [GOOD] >> KqpImmediateEffects::TxWithReadAtTheEnd+UseSink >> TSchemeShardAuditSettings::CreateExtSubdomain >> KqpEffects::InsertAbort_Params_Success [GOOD] >> KqpEffects::InsertAbort_Params_Duplicates+UseSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_user_attributes/unittest >> TSchemeShardUserAttrsTest::VariousUse [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:38:28.778155Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:38:28.778275Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:38:28.778321Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:38:28.778353Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:38:28.778401Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:38:28.778431Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:38:28.778481Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:38:28.778548Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:38:28.779309Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:38:28.779638Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:38:28.859019Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:38:28.859099Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:38:28.875378Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:38:28.875791Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:38:28.875934Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:38:28.881486Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:38:28.881804Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:38:28.882497Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:38:28.882748Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:38:28.886000Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:38:28.886188Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:38:28.887336Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:38:28.887396Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:38:28.887530Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:38:28.887576Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:38:28.887619Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:38:28.887700Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:38:28.894208Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:38:29.011576Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:38:29.011820Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:38:29.012011Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:38:29.012054Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:38:29.012284Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:38:29.012387Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:38:29.014782Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:38:29.014957Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:38:29.015149Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:38:29.015219Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:38:29.015261Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:38:29.015318Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:38:29.017519Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:38:29.017597Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:38:29.017646Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:38:29.020909Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:38:29.020968Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:38:29.021024Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:38:29.021081Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:38:29.023693Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:38:29.025449Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:38:29.025607Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:38:29.026305Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:38:29.026419Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:38:29.026460Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:38:29.026737Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:38:29.026776Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:38:29.026938Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:38:29.027016Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:38:29.033545Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:38:29.033607Z node 1 :FLAT_TX_SCHEMESHARD ... shToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 112 2025-06-25T14:38:29.292857Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:38:29.292894Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 112, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:38:29.293048Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 112, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-25T14:38:29.293121Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 112, path id: [OwnerId: 72057594046678944, LocalPathId: 4] 2025-06-25T14:38:29.293257Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:38:29.293289Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 112, path id: 1 2025-06-25T14:38:29.293351Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 112, path id: 3 2025-06-25T14:38:29.293379Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 112, path id: 4 FAKE_COORDINATOR: Erasing txId 112 2025-06-25T14:38:29.294055Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 9 PathOwnerId: 72057594046678944, cookie: 112 2025-06-25T14:38:29.294138Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 9 PathOwnerId: 72057594046678944, cookie: 112 2025-06-25T14:38:29.294172Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 3, at schemeshard: 72057594046678944, txId: 112 2025-06-25T14:38:29.294219Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 112, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 9 2025-06-25T14:38:29.294261Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-25T14:38:29.294810Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 7 PathOwnerId: 72057594046678944, cookie: 112 2025-06-25T14:38:29.294877Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 7 PathOwnerId: 72057594046678944, cookie: 112 2025-06-25T14:38:29.294905Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 112 2025-06-25T14:38:29.294931Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 112, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 7 2025-06-25T14:38:29.294958Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-25T14:38:29.296159Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 112 2025-06-25T14:38:29.296227Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 112 2025-06-25T14:38:29.296253Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 112 2025-06-25T14:38:29.296278Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 112, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 18446744073709551615 2025-06-25T14:38:29.296304Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 1 2025-06-25T14:38:29.296396Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 112, subscribers: 0 2025-06-25T14:38:29.296620Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-25T14:38:29.296665Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 4], at schemeshard: 72057594046678944 2025-06-25T14:38:29.296775Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-25T14:38:29.298795Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 112 2025-06-25T14:38:29.299147Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 112 2025-06-25T14:38:29.302239Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 112 2025-06-25T14:38:29.302358Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 112, wait until txId: 112 TestWaitNotification wait txId: 112 2025-06-25T14:38:29.302760Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 112: send EvNotifyTxCompletion 2025-06-25T14:38:29.302800Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 112 2025-06-25T14:38:29.303403Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 112, at schemeshard: 72057594046678944 2025-06-25T14:38:29.303503Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 112: got EvNotifyTxCompletionResult 2025-06-25T14:38:29.303537Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 112: satisfy waiter [1:500:2489] TestWaitNotification: OK eventTxId 112 2025-06-25T14:38:29.304262Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/DirB" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:38:29.304528Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/DirB" took 262us result status StatusSuccess 2025-06-25T14:38:29.304914Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/DirB" PathDescription { Self { Name: "DirB" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 109 CreateStep: 5000008 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 6 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } UserAttributes { Key: "AttrB1" Value: "ValB1" } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 TestModificationResults wait txId: 113 2025-06-25T14:38:29.307991Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpRmDir Drop { Name: "DirB" } ApplyIf { PathId: 2 PathVersion: 8 } ApplyIf { PathId: 3 PathVersion: 7 } ApplyIf { PathId: 4 PathVersion: 3 } } TxId: 113 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:38:29.308170Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_rmdir.cpp:28: TRmDir Propose, path: /MyRoot/DirB, pathId: 0, opId: 113:0, at schemeshard: 72057594046678944 2025-06-25T14:38:29.308293Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 113:1, propose status:StatusPreconditionFailed, reason: fail user constraint: ApplyIf section: no path with id [OwnerId: 72057594046678944, LocalPathId: 4], at schemeshard: 72057594046678944 2025-06-25T14:38:29.310780Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 113, response: Status: StatusPreconditionFailed Reason: "fail user constraint: ApplyIf section: no path with id [OwnerId: 72057594046678944, LocalPathId: 4]" TxId: 113 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:38:29.311020Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 113, database: /MyRoot, subject: , status: StatusPreconditionFailed, reason: fail user constraint: ApplyIf section: no path with id [OwnerId: 72057594046678944, LocalPathId: 4], operation: DROP DIRECTORY, path: /MyRoot/DirB TestModificationResult got TxId: 113, wait until txId: 113 |81.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_auditsettings/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_user_attributes/unittest >> TSchemeShardUserAttrsTest::SetAttrs [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:38:29.079610Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:38:29.079719Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:38:29.079755Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:38:29.079782Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:38:29.079817Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:38:29.079842Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:38:29.079878Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:38:29.079933Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:38:29.080562Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:38:29.080803Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:38:29.150090Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:38:29.150151Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:38:29.169119Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:38:29.169565Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:38:29.169713Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:38:29.176175Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:38:29.176582Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:38:29.177241Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:38:29.177513Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:38:29.181568Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:38:29.181730Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:38:29.182666Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:38:29.182742Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:38:29.182911Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:38:29.182966Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:38:29.183015Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:38:29.183097Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:38:29.189533Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:38:29.322791Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:38:29.323004Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:38:29.323191Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:38:29.323244Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:38:29.323486Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:38:29.323542Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:38:29.329504Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:38:29.329749Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:38:29.329975Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:38:29.330033Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:38:29.330085Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:38:29.330149Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:38:29.335028Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:38:29.335110Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:38:29.335155Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:38:29.341500Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:38:29.341593Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:38:29.341663Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:38:29.341729Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:38:29.345646Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:38:29.349169Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:38:29.349373Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:38:29.350393Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:38:29.350553Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:38:29.350607Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:38:29.350930Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:38:29.350993Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:38:29.351179Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:38:29.351291Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:38:29.354908Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:38:29.354965Z node 1 :FLAT_TX_SCHEMESHARD ... .cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:38:29.458811Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 103:0 type: TxAlterUserAttributes target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:38:29.458960Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 103:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:38:29.459036Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 103:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) 2025-06-25T14:38:29.465181Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 103, response: Status: StatusAccepted TxId: 103 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:38:29.465355Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 103, subject: , status: StatusAccepted, operation: ALTER USER ATTRIBUTES, path: MyRoot 2025-06-25T14:38:29.465538Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-25T14:38:29.465587Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_user_attrs.cpp:97: TAlterUserAttrs ProgressState, opId: 103:0, at schemeshard: 72057594046678944 2025-06-25T14:38:29.465633Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 103 ready parts: 1/1 2025-06-25T14:38:29.465750Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 103 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:38:29.468238Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 103:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:103 msg type: 269090816 2025-06-25T14:38:29.468418Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 103, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 103 at step: 5000004 FAKE_COORDINATOR: advance: minStep5000004 State->FrontStep: 5000003 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 103 at step: 5000004 2025-06-25T14:38:29.468809Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000004, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:38:29.468928Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 103 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000004 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:38:29.468982Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_user_attrs.cpp:114: TAlterUserAttrs HandleReply TEvOperationPlan, opId: 103:0, stepId:5000004, at schemeshard: 72057594046678944 2025-06-25T14:38:29.469197Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#103:0 progress is 1/1 2025-06-25T14:38:29.469263Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-25T14:38:29.469303Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#103:0 progress is 1/1 2025-06-25T14:38:29.469335Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-25T14:38:29.469399Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-25T14:38:29.469466Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 103, ready parts: 1/1, is published: false 2025-06-25T14:38:29.469525Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:38:29.469592Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-25T14:38:29.469632Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 103:0 2025-06-25T14:38:29.469681Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 103:0 2025-06-25T14:38:29.469743Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-25T14:38:29.469782Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 103, publications: 1, subscribers: 0 2025-06-25T14:38:29.469814Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 103, [OwnerId: 72057594046678944, LocalPathId: 1], 6 2025-06-25T14:38:29.476272Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:38:29.476366Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 103, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:38:29.476620Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:38:29.476673Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 103, path id: 1 FAKE_COORDINATOR: Erasing txId 103 2025-06-25T14:38:29.477400Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 6 PathOwnerId: 72057594046678944, cookie: 103 2025-06-25T14:38:29.477516Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 6 PathOwnerId: 72057594046678944, cookie: 103 2025-06-25T14:38:29.477555Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 103 2025-06-25T14:38:29.477598Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 6 2025-06-25T14:38:29.477643Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-25T14:38:29.477751Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 103, subscribers: 0 2025-06-25T14:38:29.489225Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 TestModificationResult got TxId: 103, wait until txId: 103 TestWaitNotification wait txId: 103 2025-06-25T14:38:29.489559Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 103: send EvNotifyTxCompletion 2025-06-25T14:38:29.489624Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 103 2025-06-25T14:38:29.490146Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 103, at schemeshard: 72057594046678944 2025-06-25T14:38:29.490258Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-06-25T14:38:29.490299Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [1:346:2335] TestWaitNotification: OK eventTxId 103 2025-06-25T14:38:29.490826Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:38:29.491040Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 233us result status StatusSuccess 2025-06-25T14:38:29.491586Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 2 ChildrenVersion: 3 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "DirA" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } UserAttributes { Key: "AttrRoot" Value: "ValRoot" } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> KqpEffects::AlterAfterUpsertBeforeUpsertSelectTransaction+UseSink [GOOD] >> KqpEffects::AlterAfterUpsertBeforeUpsertSelectTransaction-UseSink |81.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest >> KqpEffects::UpdateOn_Select [GOOD] >> KqpImmediateEffects::AlreadyBrokenImmediateEffects ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/effects/unittest >> KqpImmediateEffects::ReplaceDuplicates [GOOD] Test command err: Trying to start YDB, gRPC: 8031, MsgBus: 61491 2025-06-25T14:38:17.853263Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519896307015454457:2141];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:17.864814Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001809/r3tmp/tmpmdzhcr/pdisk_1.dat 2025-06-25T14:38:18.336077Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 8031, node 1 2025-06-25T14:38:18.342257Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:18.342394Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:18.345310Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:38:18.420894Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:38:18.420921Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:38:18.420940Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:38:18.421054Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:61491 2025-06-25T14:38:18.778695Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:61491 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:38:19.022525Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:38:19.053144Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:19.227817Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:38:19.420075Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:19.490175Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:21.027845Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896324195325162:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:21.027951Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:21.326985Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:21.357609Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:21.393954Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:21.425219Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:21.453937Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:21.507741Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:21.578008Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:21.663495Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896324195325825:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:21.663563Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:21.663622Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896324195325830:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:21.666907Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:38:21.675613Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519896324195325832:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:38:21.758368Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519896324195325883:3421] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:38:22.849882Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519896307015454457:2141];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:22.849957Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:38:22.922193Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) Trying to start YDB, gRPC: 14329, MsgBus: 13190 2025-06-25T14:38:24.276278Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519896337834049224:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:24.276355Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001809/r3tmp/tmpRsiKXs/pdisk_1.dat 2025-06-25T14:38:24.411811Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 14329, node 2 2025-06-25T14:38:24.439941Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:24.440043Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:24.444840Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:38:24.496642Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:38:24.496664Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:38:24.496671Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:38:24.496788Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13190 TClient is connected to server localhost:13190 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:38:24.998353Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:38:25.005294Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:38:25.016458Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:25.101295Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:25.239013Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:25.290977Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:38:25.319531Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:27.359983Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896350718952704:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:27.360035Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:27.401372Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:27.433318Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:27.476262Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:27.509950Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:27.543975Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:27.582221Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:27.640076Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:27.702538Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896350718953365:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:27.702653Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:27.703149Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896350718953370:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:27.707423Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:38:27.718102Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519896350718953372:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:38:27.814961Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519896350718953423:3414] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:38:28.766760Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:29.278104Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519896337834049224:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:29.278207Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> KqpImmediateEffects::InsertDuplicates-UseSink [GOOD] >> KqpImmediateEffects::InsertExistingKey+UseSink >> KqpWrite::InsertRevert [GOOD] >> KqpWrite::ProjectReplace+UseSink >> TSchemeShardAuditSettings::CreateSubdomain [GOOD] >> Yq_1::Basic_EmptyDict [GOOD] >> TSchemeShardAuditSettings::CreateExtSubdomain [GOOD] |81.9%| [TA] $(B)/ydb/core/tx/schemeshard/ut_user_attributes/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpPg::DeleteWithQueryService-useSink [GOOD] >> KqpImmediateEffects::UpdateOn [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_auditsettings/unittest >> TSchemeShardAuditSettings::CreateSubdomain [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:38:30.380254Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:38:30.380365Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:38:30.380407Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:38:30.380442Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:38:30.380495Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:38:30.380525Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:38:30.380575Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:38:30.380638Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:38:30.381407Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:38:30.381756Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:38:30.463099Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:38:30.463168Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:38:30.492044Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:38:30.492577Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:38:30.492767Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:38:30.499060Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:38:30.499408Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:38:30.500107Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:38:30.500386Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:38:30.503992Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:38:30.504209Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:38:30.505470Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:38:30.505537Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:38:30.505688Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:38:30.505737Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:38:30.505782Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:38:30.505874Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:38:30.512739Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:38:30.635455Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:38:30.635671Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:38:30.635865Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:38:30.635913Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:38:30.636141Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:38:30.636213Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:38:30.638223Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:38:30.638424Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:38:30.638587Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:38:30.638654Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:38:30.638710Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:38:30.638765Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:38:30.640356Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:38:30.640432Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:38:30.640479Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:38:30.649121Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:38:30.649184Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:38:30.649263Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:38:30.649320Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:38:30.652710Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:38:30.654673Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:38:30.654857Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:38:30.655808Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:38:30.655931Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:38:30.655976Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:38:30.656285Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:38:30.656366Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:38:30.656519Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:38:30.656615Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:38:30.658430Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:38:30.658475Z node 1 :FLAT_TX_SCHEMESHARD ... eration_side_effects.cpp:654: Send tablet strongly msg operationId: 112:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:112 msg type: 269090816 2025-06-25T14:38:31.016853Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 112, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 112 at step: 5000013 FAKE_COORDINATOR: advance: minStep5000013 State->FrontStep: 5000012 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 112 at step: 5000013 2025-06-25T14:38:31.017340Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000013, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:38:31.017450Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 112 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000013 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:38:31.017536Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_unsafe.cpp:47: TDropForceUnsafe TPropose, operationId: 112:0 HandleReply TEvOperationPlan, step: 5000013, at schemeshard: 72057594046678944 2025-06-25T14:38:31.017595Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5302: ExamineTreeVFS visit path id [OwnerId: 72057594046678944, LocalPathId: 7] name: USER_0 type: EPathTypeSubDomain state: EPathStateDrop stepDropped: 0 droppedTxId: 112 parent: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:38:31.017629Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5318: ExamineTreeVFS run path id: [OwnerId: 72057594046678944, LocalPathId: 7] 2025-06-25T14:38:31.017735Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 112:0 128 -> 130 2025-06-25T14:38:31.017907Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:38:31.017967Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 7] was 2 2025-06-25T14:38:31.018943Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 112 2025-06-25T14:38:31.020037Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 112 FAKE_COORDINATOR: Erasing txId 112 2025-06-25T14:38:31.021135Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:38:31.021174Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 112, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:38:31.021283Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 112, path id: [OwnerId: 72057594046678944, LocalPathId: 7] 2025-06-25T14:38:31.021395Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:38:31.021431Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 112, path id: 1 2025-06-25T14:38:31.021467Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 112, path id: 7 2025-06-25T14:38:31.021518Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 112:0, at schemeshard: 72057594046678944 2025-06-25T14:38:31.021555Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:418: [72057594046678944] TDeleteParts opId# 112:0 ProgressState 2025-06-25T14:38:31.021636Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#112:0 progress is 1/1 2025-06-25T14:38:31.021662Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 112 ready parts: 1/1 2025-06-25T14:38:31.021711Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#112:0 progress is 1/1 2025-06-25T14:38:31.021753Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 112 ready parts: 1/1 2025-06-25T14:38:31.021783Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 112, ready parts: 1/1, is published: false 2025-06-25T14:38:31.021808Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 112 ready parts: 1/1 2025-06-25T14:38:31.021837Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 112:0 2025-06-25T14:38:31.021864Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 112:0 2025-06-25T14:38:31.021928Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 7] was 3 2025-06-25T14:38:31.021966Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 112, publications: 2, subscribers: 0 2025-06-25T14:38:31.021998Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 112, [OwnerId: 72057594046678944, LocalPathId: 1], 27 2025-06-25T14:38:31.022026Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 112, [OwnerId: 72057594046678944, LocalPathId: 7], 18446744073709551615 2025-06-25T14:38:31.022809Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 27 PathOwnerId: 72057594046678944, cookie: 112 2025-06-25T14:38:31.022921Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 27 PathOwnerId: 72057594046678944, cookie: 112 2025-06-25T14:38:31.022963Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 112 2025-06-25T14:38:31.022993Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 112, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 27 2025-06-25T14:38:31.023025Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-25T14:38:31.023703Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 7 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 112 2025-06-25T14:38:31.023844Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 7 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 112 2025-06-25T14:38:31.023886Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 112 2025-06-25T14:38:31.023928Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 112, pathId: [OwnerId: 72057594046678944, LocalPathId: 7], version: 18446744073709551615 2025-06-25T14:38:31.023968Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 7] was 2 2025-06-25T14:38:31.024046Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 112, subscribers: 0 2025-06-25T14:38:31.024348Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-25T14:38:31.024403Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 7], at schemeshard: 72057594046678944 2025-06-25T14:38:31.024516Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 7] was 1 2025-06-25T14:38:31.024917Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-25T14:38:31.024952Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 7], at schemeshard: 72057594046678944 2025-06-25T14:38:31.025004Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:38:31.028057Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 112 2025-06-25T14:38:31.028302Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 112 2025-06-25T14:38:31.029436Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-25T14:38:31.029520Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 112, wait until txId: 112 TestWaitNotification wait txId: 112 2025-06-25T14:38:31.029854Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 112: send EvNotifyTxCompletion 2025-06-25T14:38:31.029896Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 112 2025-06-25T14:38:31.030507Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 112, at schemeshard: 72057594046678944 2025-06-25T14:38:31.030590Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 112: got EvNotifyTxCompletionResult 2025-06-25T14:38:31.030627Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 112: satisfy waiter [1:664:2653] TestWaitNotification: OK eventTxId 112 >> KqpEffects::InsertAbort_Select_Conflict+UseSink [GOOD] >> KqpEffects::InsertAbort_Select_Conflict-UseSink >> KqpInplaceUpdate::SingleRowSimple+UseSink [GOOD] >> KqpInplaceUpdate::SingleRowPgNotNull-UseSink |82.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest |82.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest >> KqpInplaceUpdate::Negative_SingleRowWithValueCast+UseSink [GOOD] >> KqpImmediateEffects::MultipleEffectsWithIndex [GOOD] >> KqpImmediateEffects::MultiShardUpsertAfterRead >> KqpImmediateEffects::UpsertAfterInsert [GOOD] >> KqpImmediateEffects::UpsertAfterInsertWithIndex |82.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_auditsettings/unittest >> TSchemeShardAuditSettings::CreateExtSubdomain [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:38:30.626922Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:38:30.626981Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:38:30.627006Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:38:30.627027Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:38:30.627056Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:38:30.627074Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:38:30.627102Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:38:30.627145Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:38:30.627660Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:38:30.627890Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:38:30.692642Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:38:30.692697Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:38:30.708014Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:38:30.708377Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:38:30.708562Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:38:30.717598Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:38:30.717950Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:38:30.718579Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:38:30.718838Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:38:30.738048Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:38:30.738236Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:38:30.739418Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:38:30.739471Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:38:30.739636Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:38:30.739686Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:38:30.739731Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:38:30.739875Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:38:30.757067Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:38:30.872582Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:38:30.872790Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:38:30.873006Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:38:30.873046Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:38:30.873262Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:38:30.873325Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:38:30.877078Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:38:30.877238Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:38:30.877376Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:38:30.877442Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:38:30.877492Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:38:30.877533Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:38:30.887540Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:38:30.887609Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:38:30.887645Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:38:30.892982Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:38:30.893043Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:38:30.893116Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:38:30.893158Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:38:30.896622Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:38:30.905149Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:38:30.905395Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:38:30.906193Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:38:30.906318Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:38:30.906360Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:38:30.906642Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:38:30.906693Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:38:30.906862Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:38:30.907021Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:38:30.910175Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:38:30.910222Z node 1 :FLAT_TX_SCHEMESHARD ... 46316545 FAKE_COORDINATOR: Add transaction: 112 at step: 5000013 FAKE_COORDINATOR: advance: minStep5000013 State->FrontStep: 5000012 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 112 at step: 5000013 2025-06-25T14:38:31.298643Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000013, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:38:31.298725Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 112 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000013 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:38:31.298762Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_extsubdomain.cpp:157: TDropExtSubdomain TPropose, operationId: 112:0 HandleReply TEvOperationPlan, step: 5000013, at schemeshard: 72057594046678944 2025-06-25T14:38:31.298823Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5302: ExamineTreeVFS visit path id [OwnerId: 72057594046678944, LocalPathId: 7] name: USER_0 type: EPathTypeExtSubDomain state: EPathStateDrop stepDropped: 0 droppedTxId: 112 parent: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:38:31.298847Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5318: ExamineTreeVFS run path id: [OwnerId: 72057594046678944, LocalPathId: 7] 2025-06-25T14:38:31.298887Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 112:0 128 -> 134 2025-06-25T14:38:31.300459Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 112 2025-06-25T14:38:31.300713Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 112 2025-06-25T14:38:31.302775Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 112:0, at schemeshard: 72057594046678944 2025-06-25T14:38:31.302821Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_extsubdomain.cpp:104: TDropExtSubdomain TDeleteExternalShards, operationId: 112:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:38:31.302938Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 112:0 134 -> 135 2025-06-25T14:38:31.303066Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:38:31.303130Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 7] was 2 FAKE_COORDINATOR: Erasing txId 112 2025-06-25T14:38:31.304758Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:38:31.304799Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 112, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:38:31.304908Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 112, path id: [OwnerId: 72057594046678944, LocalPathId: 7] 2025-06-25T14:38:31.304996Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:38:31.305040Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 112, path id: 1 2025-06-25T14:38:31.305078Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 112, path id: 7 2025-06-25T14:38:31.305349Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 112:0, at schemeshard: 72057594046678944 2025-06-25T14:38:31.305396Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:400: [72057594046678944] TDeleteParts opId# 112:0 ProgressState 2025-06-25T14:38:31.305421Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 112:0 135 -> 240 2025-06-25T14:38:31.306272Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 27 PathOwnerId: 72057594046678944, cookie: 112 2025-06-25T14:38:31.306364Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 27 PathOwnerId: 72057594046678944, cookie: 112 2025-06-25T14:38:31.306393Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 112 2025-06-25T14:38:31.306425Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 112, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 27 2025-06-25T14:38:31.306453Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-25T14:38:31.307335Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 7 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 112 2025-06-25T14:38:31.307415Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 7 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 112 2025-06-25T14:38:31.307450Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 112 2025-06-25T14:38:31.307481Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 112, pathId: [OwnerId: 72057594046678944, LocalPathId: 7], version: 18446744073709551615 2025-06-25T14:38:31.307518Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 7] was 3 2025-06-25T14:38:31.307579Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 112, ready parts: 0/1, is published: true 2025-06-25T14:38:31.308966Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 112:0, at schemeshard: 72057594046678944 2025-06-25T14:38:31.309008Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 112:0 ProgressState 2025-06-25T14:38:31.309087Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#112:0 progress is 1/1 2025-06-25T14:38:31.309112Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 112 ready parts: 1/1 2025-06-25T14:38:31.309140Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#112:0 progress is 1/1 2025-06-25T14:38:31.309177Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 112 ready parts: 1/1 2025-06-25T14:38:31.309211Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 112, ready parts: 1/1, is published: true 2025-06-25T14:38:31.309242Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 112 ready parts: 1/1 2025-06-25T14:38:31.309267Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 112:0 2025-06-25T14:38:31.309312Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 112:0 2025-06-25T14:38:31.309362Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 7] was 2 2025-06-25T14:38:31.309717Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-25T14:38:31.309756Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 7], at schemeshard: 72057594046678944 2025-06-25T14:38:31.309802Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 7] was 1 2025-06-25T14:38:31.310141Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-25T14:38:31.310174Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 7], at schemeshard: 72057594046678944 2025-06-25T14:38:31.310223Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:38:31.310930Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 112 2025-06-25T14:38:31.311789Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 112 2025-06-25T14:38:31.313842Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-25T14:38:31.313926Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 112, wait until txId: 112 TestWaitNotification wait txId: 112 2025-06-25T14:38:31.314274Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 112: send EvNotifyTxCompletion 2025-06-25T14:38:31.314327Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 112 2025-06-25T14:38:31.314910Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 112, at schemeshard: 72057594046678944 2025-06-25T14:38:31.315000Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 112: got EvNotifyTxCompletionResult 2025-06-25T14:38:31.315042Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 112: satisfy waiter [1:654:2643] TestWaitNotification: OK eventTxId 112 >> TSchemeShardAuditSettings::AlterSubdomain [GOOD] >> KqpEffects::InsertAbort_Select_Success [GOOD] >> KqpEffects::InsertAbort_Select_Duplicates-UseSink >> BasicStatistics::TwoNodes >> KqpPg::Returning+useSink [GOOD] >> KqpPg::Returning-useSink >> KqpPg::PgAggregate+useSink [GOOD] >> KqpPg::PgAggregate-useSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/effects/unittest >> KqpImmediateEffects::UpdateOn [GOOD] Test command err: Trying to start YDB, gRPC: 11559, MsgBus: 25164 2025-06-25T14:38:19.456030Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519896314934791065:2225];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:19.456271Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001805/r3tmp/tmpnbyqsY/pdisk_1.dat 2025-06-25T14:38:19.878928Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:19.879038Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:19.903797Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519896314934790877:2080] 1750862299415754 != 1750862299415757 2025-06-25T14:38:19.906106Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:38:19.910306Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 11559, node 1 2025-06-25T14:38:20.036182Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:38:20.036211Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:38:20.036330Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:38:20.036474Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25164 2025-06-25T14:38:20.432523Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:25164 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:38:20.755284Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:38:20.772063Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:38:20.813637Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:38:21.003511Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:21.194369Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:21.281149Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:22.899055Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896327819694386:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:22.899157Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:23.193623Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:23.227271Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:23.261230Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:23.292241Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:23.320411Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:23.392148Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:23.426620Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:23.489397Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896332114662346:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:23.489488Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:23.489718Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896332114662351:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:23.493246Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:38:23.504395Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519896332114662353:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:38:23.560617Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519896332114662406:3418] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:38:24.444994Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519896314934791065:2225];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:24.445049Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:38:24.510528Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) Trying to start YDB, gRPC: 22119, MsgBus: 14941 2025-06-25T14:38:25.888258Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519896343217339558:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:25.888357Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001805/r3tmp/tmpJcJ8NS/pdisk_1.dat 2025-06-25T14:38:26.009431Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:38:26.010572Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519896343217339535:2080] 1750862305887532 != 1750862305887535 2025-06-25T14:38:26.035059Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:26.035143Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 22119, node 2 2025-06-25T14:38:26.043229Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:38:26.160937Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:38:26.160962Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:38:26.160970Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:38:26.161077Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14941 TClient is connected to server localhost:14941 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:38:26.619997Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:38:26.641388Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:26.695746Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:26.867186Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:26.910995Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:38:26.932466Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:28.873189Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896356102243072:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:28.873284Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:28.937184Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:28.964019Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:28.988785Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:29.016178Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:29.042639Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:29.075480Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:29.109609Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:29.198145Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896360397211031:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:29.198238Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:29.198485Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896360397211036:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:29.201857Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:38:29.212042Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519896360397211038:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:38:29.272285Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519896360397211089:3420] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:38:30.358141Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:30.888543Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519896343217339558:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:30.888618Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_auditsettings/unittest >> TSchemeShardAuditSettings::AlterSubdomain [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:38:29.243023Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:38:29.243120Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:38:29.243164Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:38:29.243215Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:38:29.243262Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:38:29.243291Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:38:29.243342Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:38:29.243410Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:38:29.244166Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:38:29.244529Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:38:29.330659Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:38:29.330739Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:38:29.352276Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:38:29.352815Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:38:29.353022Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:38:29.360278Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:38:29.360619Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:38:29.361089Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:38:29.361309Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:38:29.364785Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:38:29.364978Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:38:29.366191Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:38:29.366250Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:38:29.366386Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:38:29.366434Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:38:29.366477Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:38:29.366557Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:38:29.375056Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:38:29.532254Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:38:29.532520Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:38:29.532756Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:38:29.532805Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:38:29.533043Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:38:29.533116Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:38:29.543351Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:38:29.543600Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:38:29.543823Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:38:29.543898Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:38:29.543952Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:38:29.544002Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:38:29.550757Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:38:29.550841Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:38:29.550898Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:38:29.558956Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:38:29.559036Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:38:29.559830Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:38:29.559900Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:38:29.570588Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:38:29.577257Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:38:29.577518Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:38:29.578544Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:38:29.578705Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:38:29.578778Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:38:29.579083Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:38:29.579133Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:38:29.579351Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:38:29.579477Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:38:29.589416Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:38:29.589479Z node 1 :FLAT_TX_SCHEMESHARD ... s.cpp:654: Send tablet strongly msg operationId: 175:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:175 msg type: 269090816 2025-06-25T14:38:31.964498Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 175, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 175 at step: 5000076 FAKE_COORDINATOR: advance: minStep5000076 State->FrontStep: 5000075 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 175 at step: 5000076 2025-06-25T14:38:31.967323Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000076, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:38:31.967453Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 175 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000076 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:38:31.967514Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_unsafe.cpp:47: TDropForceUnsafe TPropose, operationId: 175:0 HandleReply TEvOperationPlan, step: 5000076, at schemeshard: 72057594046678944 2025-06-25T14:38:31.967569Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5302: ExamineTreeVFS visit path id [OwnerId: 72057594046678944, LocalPathId: 26] name: USER_0 type: EPathTypeSubDomain state: EPathStateDrop stepDropped: 0 droppedTxId: 175 parent: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:38:31.967605Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5318: ExamineTreeVFS run path id: [OwnerId: 72057594046678944, LocalPathId: 26] 2025-06-25T14:38:31.967727Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 175:0 128 -> 130 2025-06-25T14:38:31.967923Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:38:31.968008Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 26] was 2 2025-06-25T14:38:31.969133Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 175 2025-06-25T14:38:31.969273Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 175 FAKE_COORDINATOR: Erasing txId 175 2025-06-25T14:38:31.971966Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:38:31.972008Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 175, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:38:31.972172Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 175, path id: [OwnerId: 72057594046678944, LocalPathId: 26] 2025-06-25T14:38:31.972296Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:38:31.972353Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 175, path id: 1 2025-06-25T14:38:31.972415Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 175, path id: 26 2025-06-25T14:38:31.972776Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 175:0, at schemeshard: 72057594046678944 2025-06-25T14:38:31.972818Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:418: [72057594046678944] TDeleteParts opId# 175:0 ProgressState 2025-06-25T14:38:31.972886Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#175:0 progress is 1/1 2025-06-25T14:38:31.972913Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 175 ready parts: 1/1 2025-06-25T14:38:31.972946Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#175:0 progress is 1/1 2025-06-25T14:38:31.972970Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 175 ready parts: 1/1 2025-06-25T14:38:31.973017Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 175, ready parts: 1/1, is published: false 2025-06-25T14:38:31.973055Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 175 ready parts: 1/1 2025-06-25T14:38:31.973083Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 175:0 2025-06-25T14:38:31.973113Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 175:0 2025-06-25T14:38:31.973192Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 26] was 3 2025-06-25T14:38:31.973228Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 175, publications: 2, subscribers: 0 2025-06-25T14:38:31.973255Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 175, [OwnerId: 72057594046678944, LocalPathId: 1], 103 2025-06-25T14:38:31.973290Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 175, [OwnerId: 72057594046678944, LocalPathId: 26], 18446744073709551615 2025-06-25T14:38:31.974049Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 103 PathOwnerId: 72057594046678944, cookie: 175 2025-06-25T14:38:31.974155Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 103 PathOwnerId: 72057594046678944, cookie: 175 2025-06-25T14:38:31.974193Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 175 2025-06-25T14:38:31.974223Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 175, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 103 2025-06-25T14:38:31.974254Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-25T14:38:31.975447Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 26 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 175 2025-06-25T14:38:31.975561Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 26 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 175 2025-06-25T14:38:31.975595Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 175 2025-06-25T14:38:31.975626Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 175, pathId: [OwnerId: 72057594046678944, LocalPathId: 26], version: 18446744073709551615 2025-06-25T14:38:31.975657Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 26] was 2 2025-06-25T14:38:31.975744Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 175, subscribers: 0 2025-06-25T14:38:31.976104Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-25T14:38:31.976150Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 26], at schemeshard: 72057594046678944 2025-06-25T14:38:31.976237Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 26] was 1 2025-06-25T14:38:31.993182Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-25T14:38:31.993255Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 26], at schemeshard: 72057594046678944 2025-06-25T14:38:31.993346Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:38:31.997770Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 175 2025-06-25T14:38:31.998309Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 175 2025-06-25T14:38:32.009450Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-25T14:38:32.009585Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 175, wait until txId: 175 TestWaitNotification wait txId: 175 2025-06-25T14:38:32.010786Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 175: send EvNotifyTxCompletion 2025-06-25T14:38:32.010828Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 175 2025-06-25T14:38:32.012267Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 175, at schemeshard: 72057594046678944 2025-06-25T14:38:32.012492Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 175: got EvNotifyTxCompletionResult 2025-06-25T14:38:32.012542Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 175: satisfy waiter [1:2483:4472] TestWaitNotification: OK eventTxId 175 >> KqpPg::AlterColumnSetDefaultFromSequenceWithSchemaname [GOOD] >> KqpPg::CheckPgAutoParams+useSink >> ColumnStatistics::CountMinSketchServerlessStatistics ------- [TM] {asan, default-linux-x86_64, release} ydb/services/fq/ut_integration/unittest >> Yq_1::Basic_EmptyDict [GOOD] Test command err: 2025-06-25T14:37:18.699000Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519896054359403025:2196];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:37:18.712392Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; E0625 14:37:19.491191725 239611 dns_resolver_ares.cc:452] no server name supplied in dns URI E0625 14:37:19.491312590 239611 channel.cc:120] channel stack builder failed: UNKNOWN: the target uri is not valid: dns:/// 2025-06-25T14:37:19.704847Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:37:19.748950Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:37:20.472660Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:25566: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:25566 } ] 2025-06-25T14:37:20.740517Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:37:20.759926Z node 1 :YQL_NODES_MANAGER ERROR: nodes_manager.cpp:364: ydb/core/fq/libs/actors/nodes_manager.cpp:322: TRANSPORT_UNAVAILABLE
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:25566: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint localhost:25566 2025-06-25T14:37:21.746142Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:37:21.946225Z node 1 :YQ_CONTROL_PLANE_STORAGE ERROR: schema.cpp:160: Create directory "Root/yq" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:25566: Failed to connect to remote host: Connection refused } {
: Error: Grpc error response on endpoint localhost:25566 } ] 2025-06-25T14:37:22.747038Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:37:23.700452Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519896054359403025:2196];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:37:23.700509Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:37:23.749369Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001fad/r3tmp/tmp15ADlk/pdisk_1.dat 2025-06-25T14:37:24.244591Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:37:24.248305Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7519896080129207251:2275], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:37:24.343426Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 25566, node 1 E0625 14:37:24.497177280 239688 dns_resolver_ares.cc:452] no server name supplied in dns URI E0625 14:37:24.497374773 239688 channel.cc:120] channel stack builder failed: UNKNOWN: the target uri is not valid: dns:/// 2025-06-25T14:37:24.537281Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/compute_databases". Create session OK 2025-06-25T14:37:24.537320Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/compute_databases" 2025-06-25T14:37:24.537328Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/compute_databases" 2025-06-25T14:37:24.541734Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/mappings". Create session OK 2025-06-25T14:37:24.541766Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/mappings" 2025-06-25T14:37:24.541772Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/mappings" 2025-06-25T14:37:24.545373Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/jobs". Create session OK 2025-06-25T14:37:24.545407Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/jobs" 2025-06-25T14:37:24.545413Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/jobs" 2025-06-25T14:37:24.547823Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/tenant_acks". Create session OK 2025-06-25T14:37:24.547835Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/tenant_acks" 2025-06-25T14:37:24.547840Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/tenant_acks" 2025-06-25T14:37:24.563378Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/quotas". Create session OK 2025-06-25T14:37:24.563405Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/quotas" 2025-06-25T14:37:24.563411Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/quotas" 2025-06-25T14:37:24.569556Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/connections". Create session OK 2025-06-25T14:37:24.569580Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/connections" 2025-06-25T14:37:24.569586Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/connections" 2025-06-25T14:37:24.595361Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/result_sets". Create session OK 2025-06-25T14:37:24.595378Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/result_sets" 2025-06-25T14:37:24.595384Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/result_sets" 2025-06-25T14:37:24.598376Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/tenants". Create session OK 2025-06-25T14:37:24.598422Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/tenants" 2025-06-25T14:37:24.598438Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/tenants" 2025-06-25T14:37:24.607248Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/nodes". Create session OK 2025-06-25T14:37:24.607270Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/nodes" 2025-06-25T14:37:24.607276Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/nodes" 2025-06-25T14:37:24.614519Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/bindings". Create session OK 2025-06-25T14:37:24.614551Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/bindings" 2025-06-25T14:37:24.614556Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/bindings" 2025-06-25T14:37:24.614651Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/pending_small". Create session OK 2025-06-25T14:37:24.614671Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/pending_small" 2025-06-25T14:37:24.614677Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/pending_small" 2025-06-25T14:37:24.629117Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/queries". Create session OK 2025-06-25T14:37:24.629181Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/queries" 2025-06-25T14:37:24.629188Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/queries" 2025-06-25T14:37:24.642423Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "Root/yq/idempotency_keys". Create session OK 2025-06-25T14:37:24.642457Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "Root/yq/idempotency_keys" 2025-06-25T14:37:24.642463Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "Root/yq/idempotency_keys" 2025-06-25T14:37:24.850447Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896080129207539:2347], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:24.851997Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:24.852327Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896080129207551:2350], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:24.855081Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created directory "Root/yq" 2025-06-25T14:37:24.855121Z node 1 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create directory "Root/yq": 2025-06-25T14:37:24.862409Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896080129207582:2353], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:24.862478Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896080129207584:2355], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:24.862518Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:24.868456Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateR ... pp:648: SyncQuota finished with error: 2025-06-25T14:38:29.656302Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:38:29.661340Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:38:29.661397Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:38:29.661595Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:38:29.661623Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:38:29.661792Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:38:29.661820Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:38:29.661981Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:38:29.662006Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:38:29.662106Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:38:29.662242Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:38:29.662332Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:38:29.662424Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:38:29.662462Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:38:29.662549Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:38:29.662642Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:38:29.662735Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:38:29.662852Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:38:29.662944Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:38:29.663107Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:38:29.663136Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:38:29.663326Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:38:29.663355Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:38:29.663510Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:38:29.663535Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:38:29.663629Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:38:29.663713Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:38:29.663858Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:38:29.663970Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:38:29.664064Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:38:29.664101Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:38:29.664189Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:38:29.664284Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:38:29.665149Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:38:29.665345Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:38:29.665376Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:38:29.665533Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:38:29.665559Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:38:29.665709Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:38:29.665734Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:38:29.665886Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:38:29.665913Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:38:29.666007Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:38:29.666099Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:38:29.666201Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:38:29.666346Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:38:29.666437Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:38:29.666472Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:38:29.666560Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:38:29.666653Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:38:29.666745Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:38:29.666949Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:38:29.666977Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:38:29.667113Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:38:29.667209Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:38:29.667710Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:38:29.667876Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:38:29.667913Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:38:29.667996Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:38:29.668083Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:38:29.668240Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:38:29.668367Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:38:29.668463Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:38:29.668498Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:38:29.668584Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:38:29.668668Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:38:29.668748Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:38:29.668923Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:38:29.668956Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:38:29.669095Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:38:29.669120Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:38:29.669276Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:38:29.669299Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:38:29.669464Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:38:29.669488Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:38:29.669571Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:38:29.669655Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:38:29.669744Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:38:29.669952Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:38:29.670051Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:38:29.670088Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:38:29.670174Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:38:29.670256Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:38:29.670428Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:38:29.670455Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:38:29.670624Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:38:29.670650Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:38:29.670806Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:38:29.670904Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:38:29.671022Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:38:29.671119Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:38:29.671162Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:38:29.671276Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:38:29.671362Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:38:29.671443Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:38:29.671596Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:38:29.671695Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:38:29.671731Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:38:29.671826Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:38:29.671987Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:38:29.672014Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:38:29.672169Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:38:29.672196Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: 2025-06-25T14:38:29.672290Z node 7 :FQ_QUOTA_SERVICE ERROR: quota_manager.cpp:648: SyncQuota finished with error: >> KqpEffects::InsertAbort_Params_Conflict+UseSink [GOOD] >> KqpEffects::InsertAbort_Params_Conflict-UseSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/effects/unittest >> KqpInplaceUpdate::Negative_SingleRowWithValueCast+UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 1964, MsgBus: 3711 2025-06-25T14:38:19.752614Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519896315715176626:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:19.752859Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017f9/r3tmp/tmplnWveP/pdisk_1.dat 2025-06-25T14:38:20.216240Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:38:20.234846Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:20.234937Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:20.237223Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 1964, node 1 2025-06-25T14:38:20.378195Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:38:20.378215Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:38:20.378234Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:38:20.378332Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3711 2025-06-25T14:38:20.784511Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:3711 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:38:21.139790Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:38:21.157076Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:38:21.169667Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:21.414918Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:21.606608Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:21.688398Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:23.272099Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896332895047412:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:23.272179Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:23.589457Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:23.625072Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:23.660345Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:23.701431Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:23.732197Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:23.771922Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:23.800654Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:23.889328Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896332895048070:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:23.889370Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:23.889545Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896332895048075:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:23.892989Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:38:23.901637Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519896332895048077:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:38:23.978704Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519896332895048128:3417] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:38:24.752680Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519896315715176626:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:24.752743Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:38:25.087083Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) Trying to start YDB, gRPC: 9827, MsgBus: 18797 2025-06-25T14:38:26.519963Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519896344350206062:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:26.519996Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017f9/r3tmp/tmpB1sI4c/pdisk_1.dat 2025-06-25T14:38:26.670616Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519896344350206042:2080] 1750862306516829 != 1750862306516832 2025-06-25T14:38:26.683909Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 9827, node 2 2025-06-25T14:38:26.701632Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:26.701751Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:26.711130Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:38:26.739217Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:38:26.739258Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:38:26.739267Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:38:26.739393Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:18797 TClient is connected to server localhost:18797 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:38:27.169695Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:38:27.182266Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:27.252947Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:38:27.379484Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:27.490008Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:27.600869Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:38:29.420961Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896357235109574:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:29.421064Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:29.474901Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:29.544444Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:29.591357Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:29.630071Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:29.666351Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:29.704167Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:29.739263Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:29.822296Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896357235110242:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:29.822416Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:29.822599Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896357235110247:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:29.826096Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:38:29.836440Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519896357235110249:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:38:29.896658Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519896357235110300:3421] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:38:30.953066Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:31.520439Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519896344350206062:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:31.520521Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/pg/unittest >> KqpPg::DeleteWithQueryService-useSink [GOOD] Test command err: Trying to start YDB, gRPC: 11606, MsgBus: 21688 2025-06-25T14:36:55.609420Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519895957346624890:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:36:55.613727Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000a66/r3tmp/tmpqrkPOW/pdisk_1.dat 2025-06-25T14:36:56.240619Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:36:56.244526Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519895957346624866:2080] 1750862215591997 != 1750862215592000 2025-06-25T14:36:56.255764Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:36:56.255862Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:36:56.258941Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 11606, node 1 2025-06-25T14:36:56.428941Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:36:56.428970Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:36:56.428977Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:36:56.429099Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:36:56.625028Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:21688 TClient is connected to server localhost:21688 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:36:57.251078Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:36:59.041854Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519895974526494697:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:36:59.041991Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:36:59.108920Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:36:59.229799Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519895974526494829:2302], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:36:59.230072Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:36:59.230295Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519895974526494834:2305], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:36:59.234518Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:36:59.250003Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519895974526494836:2306], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-25T14:36:59.335163Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519895974526494887:2415] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 1 1 1 Trying to start YDB, gRPC: 20560, MsgBus: 22616 2025-06-25T14:37:00.829759Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519895975308114717:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:37:00.829840Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000a66/r3tmp/tmpUdyQ3g/pdisk_1.dat 2025-06-25T14:37:00.956117Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 20560, node 2 2025-06-25T14:37:00.998346Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:37:00.998420Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:37:00.999983Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:37:01.079173Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:37:01.079199Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:37:01.079206Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:37:01.079305Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:22616 TClient is connected to server localhost:22616 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-25T14:37:01.633961Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:37:01.850171Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:37:04.214724Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519895992487984498:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:04.214901Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:04.222488Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:04.310939Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519895992487984634:2302], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:04.311058Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found ... SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:38:18.223240Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:38:18.237441Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:38:21.844419Z node 11 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[11:7519896304018256442:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:21.844505Z node 11 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:38:22.601911Z node 11 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [11:7519896329788060826:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:22.602039Z node 11 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:22.629197Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:22.761008Z node 11 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [11:7519896329788060933:2305], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:22.761168Z node 11 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:22.761579Z node 11 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [11:7519896329788060938:2308], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:22.767254Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:38:22.792761Z node 11 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [11:7519896329788060940:2309], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-25T14:38:22.851245Z node 11 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [11:7519896329788060992:2400] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 16282, MsgBus: 13443 2025-06-25T14:38:24.573394Z node 12 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[12:7519896338673390470:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:24.573489Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000a66/r3tmp/tmpSHeGni/pdisk_1.dat 2025-06-25T14:38:24.814829Z node 12 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:38:24.815524Z node 12 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [12:7519896338673390443:2080] 1750862304572717 != 1750862304572720 2025-06-25T14:38:24.838578Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:24.838705Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:24.840733Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 16282, node 12 2025-06-25T14:38:24.919101Z node 12 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:38:24.919128Z node 12 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:38:24.919140Z node 12 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:38:24.919325Z node 12 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13443 2025-06-25T14:38:25.602476Z node 12 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:13443 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:38:25.858642Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:38:25.867800Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:38:29.575094Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[12:7519896338673390470:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:29.575190Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:38:29.992283Z node 12 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [12:7519896360148227566:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:29.992451Z node 12 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:30.026259Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:30.203019Z node 12 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [12:7519896364443194968:2305], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:30.203172Z node 12 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:30.203497Z node 12 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [12:7519896364443194973:2308], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:30.209065Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:38:30.223245Z node 12 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [12:7519896364443194975:2309], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-25T14:38:30.307696Z node 12 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [12:7519896364443195026:2399] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } >> KqpImmediateEffects::InteractiveTxWithReadAtTheEnd-UseSink [GOOD] >> BasicStatistics::TwoTables |82.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest |82.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest >> TSyncBrokerTests::ShouldReturnTokensWithSameVDiskId |82.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/graph/shard/ut/ydb-core-graph-shard-ut |82.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/graph/shard/ut/ydb-core-graph-shard-ut |82.0%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_user_attributes/test-results/unittest/{meta.json ... results_accumulator.log} |82.0%| [LD] {RESULT} $(B)/ydb/core/graph/shard/ut/ydb-core-graph-shard-ut >> TBoardSubscriberTest::NotAvailableByShutdown >> KqpImmediateEffects::ImmediateUpdate [GOOD] |82.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest >> TSyncBrokerTests::ShouldReturnTokensWithSameVDiskId [GOOD] >> TSyncNeighborsTests::SerDes1 [GOOD] >> BasicStatistics::TwoDatabases |82.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/effects/unittest >> KqpImmediateEffects::InteractiveTxWithReadAtTheEnd-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 4773, MsgBus: 21218 2025-06-25T14:38:20.811674Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519896320553645263:2134];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:20.812100Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017f6/r3tmp/tmpeC3lia/pdisk_1.dat 2025-06-25T14:38:21.221375Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519896320553645164:2080] 1750862300793180 != 1750862300793183 2025-06-25T14:38:21.237472Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:38:21.238848Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:21.238910Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:21.243659Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 4773, node 1 2025-06-25T14:38:21.406234Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:38:21.406262Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:38:21.406269Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:38:21.406385Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:21218 2025-06-25T14:38:21.812669Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:21218 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:38:22.103246Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:38:22.125018Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:38:22.141963Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:22.317473Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:38:22.484422Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:22.576077Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:24.099445Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896337733515991:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:24.099544Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:24.408954Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:24.442865Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:24.474448Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:24.513242Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:24.552197Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:24.632095Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:24.685916Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:24.770052Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896337733516648:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:24.770116Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:24.770252Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896337733516653:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:24.774076Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:38:24.793729Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519896337733516655:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:38:24.898682Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519896337733516708:3416] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:38:25.814468Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519896320553645263:2134];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:25.818614Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:38:25.890606Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) Trying to start YDB, gRPC: 11918, MsgBus: 3538 2025-06-25T14:38:27.451364Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519896351067265219:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:27.451410Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017f6/r3tmp/tmphQdIPp/pdisk_1.dat 2025-06-25T14:38:27.589298Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:38:27.589478Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519896351067265198:2080] 1750862307450536 != 1750862307450539 TServer::EnableGrpc on GrpcPort 11918, node 2 2025-06-25T14:38:27.631168Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:27.631257Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:27.634568Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:38:27.655781Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:38:27.655802Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:38:27.655810Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:38:27.655918Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3538 TClient is connected to server localhost:3538 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:38:28.156798Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:38:28.176399Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:28.245729Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:28.403903Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:28.479578Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:28.482625Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:38:30.556916Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896363952168729:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:30.556993Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:30.600290Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:30.628197Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:30.654960Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:30.683753Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:30.710222Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:30.744375Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:30.815024Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:30.903216Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896363952169392:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:30.903329Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:30.903372Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896363952169397:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:30.906948Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:38:30.919578Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519896363952169399:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:38:31.012681Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519896368247136746:3419] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:38:32.178618Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:32.452435Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519896351067265219:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:32.452510Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> KqpPg::InsertNoTargetColumns_ColumnOrder+useSink [GOOD] >> KqpPg::InsertNoTargetColumns_ColumnOrder-useSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/vdisk/syncer/ut/unittest >> TSyncNeighborsTests::SerDes1 [GOOD] Test command err: 2025-06-25T14:38:34.859259Z node 1 :BS_SYNCER DEBUG: blobstorage_syncer_broker.cpp:64: TEvQuerySyncToken, VDisk actor id: [0:1:1], actor id: [1:5:2052], token sent, active: 1, waiting: 0 2025-06-25T14:38:34.859421Z node 1 :BS_SYNCER DEBUG: blobstorage_syncer_broker.cpp:50: TEvQuerySyncToken, VDisk actor id: [0:1:1], actor id: [1:6:2053], token sent, active: 1, waiting: 0 >> TBoardSubscriberTest::NotAvailableByShutdown [GOOD] |82.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest >> HttpRequest::Analyze |82.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/base/ut_board_subscriber/unittest >> TBoardSubscriberTest::NotAvailableByShutdown [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/effects/unittest >> KqpImmediateEffects::ImmediateUpdate [GOOD] Test command err: Trying to start YDB, gRPC: 23634, MsgBus: 25579 2025-06-25T14:38:22.800518Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519896328358846615:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:22.800649Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017e1/r3tmp/tmpX6W9Pq/pdisk_1.dat 2025-06-25T14:38:23.206298Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:38:23.208945Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519896328358846594:2080] 1750862302799195 != 1750862302799198 TServer::EnableGrpc on GrpcPort 23634, node 1 2025-06-25T14:38:23.227998Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:23.228107Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:23.232715Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:38:23.296676Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:38:23.296704Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:38:23.296720Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:38:23.296873Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25579 TClient is connected to server localhost:25579 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-25T14:38:23.834429Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:38:23.875390Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:38:23.891088Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:24.021114Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:24.185081Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:24.273033Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:25.859544Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896341243750114:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:25.859631Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:26.183522Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:26.213995Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:26.248268Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:26.280980Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:26.359494Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:26.392231Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:26.474472Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:26.530017Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896345538718074:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:26.530074Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:26.530126Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896345538718079:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:26.533154Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:38:26.545914Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519896345538718081:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:38:26.603570Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519896345538718132:3418] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:38:27.795435Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:27.801912Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519896328358846615:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:27.806157Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 63022, MsgBus: 1057 2025-06-25T14:38:29.020629Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519896358041414630:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:29.020715Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017e1/r3tmp/tmphSevoT/pdisk_1.dat 2025-06-25T14:38:29.117715Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:38:29.118495Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519896358041414597:2080] 1750862309019977 != 1750862309019980 TServer::EnableGrpc on GrpcPort 63022, node 2 2025-06-25T14:38:29.165226Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:29.165305Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:29.168090Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:38:29.190056Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:38:29.190077Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:38:29.190087Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:38:29.190201Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1057 TClient is connected to server localhost:1057 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-25T14:38:29.716601Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:38:29.728930Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:29.800503Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:29.929197Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:30.004371Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:30.052347Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:38:32.097517Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896370926318122:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:32.097601Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:32.148100Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:32.175290Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:32.204546Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:32.280381Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:32.315520Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:32.357244Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:32.426373Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:32.488047Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896370926318788:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:32.488148Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:32.488676Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896370926318793:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:32.492817Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:38:32.508486Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519896370926318795:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:38:32.609937Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519896370926318846:3419] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:38:33.631820Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:34.024457Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519896358041414630:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:34.030439Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> KqpImmediateEffects::TxWithReadAtTheEnd+UseSink [GOOD] >> KqpEffects::InsertAbort_Params_Duplicates+UseSink [GOOD] >> KqpEffects::InsertAbort_Params_Duplicates-UseSink >> KqpWrite::ProjectReplace+UseSink [GOOD] >> unstable_connection.py::TestUnstableConnection::test |82.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest >> BasicStatistics::ServerlessGlobalIndex >> KqpImmediateEffects::InsertExistingKey+UseSink [GOOD] >> BasicStatistics::SimpleGlobalIndex |82.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest |82.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |82.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest |82.1%| [TA] $(B)/ydb/core/blobstorage/vdisk/syncer/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpEffects::AlterAfterUpsertBeforeUpsertSelectTransaction-UseSink [GOOD] >> KqpImmediateEffects::AlreadyBrokenImmediateEffects [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/effects/unittest >> KqpImmediateEffects::TxWithReadAtTheEnd+UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 24520, MsgBus: 18515 2025-06-25T14:38:24.052517Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519896336163981957:2134];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:24.052828Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017e0/r3tmp/tmpwCjKm5/pdisk_1.dat 2025-06-25T14:38:24.507923Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:38:24.508130Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519896336163981861:2080] 1750862304018974 != 1750862304018977 2025-06-25T14:38:24.517716Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:24.517843Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:24.519682Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 24520, node 1 2025-06-25T14:38:24.593811Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:38:24.593835Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:38:24.593849Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:38:24.593965Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:18515 2025-06-25T14:38:25.054748Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:18515 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:38:25.210667Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:38:25.236594Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:25.371715Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:25.520091Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:25.621535Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:27.345646Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896349048885389:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:27.345751Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:27.712429Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:27.741754Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:27.780019Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:27.829082Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:27.865793Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:27.901034Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:27.942880Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:27.999292Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896349048886043:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:27.999358Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:27.999538Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896349048886048:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:28.002907Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:38:28.013265Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519896349048886050:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:38:28.076855Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519896353343853399:3421] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:38:29.038917Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519896336163981957:2134];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:29.039588Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:38:29.104797Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) Trying to start YDB, gRPC: 61054, MsgBus: 28963 2025-06-25T14:38:30.354801Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519896364374896816:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:30.354841Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017e0/r3tmp/tmpdCWOVH/pdisk_1.dat 2025-06-25T14:38:30.484481Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:38:30.492836Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519896364374896795:2080] 1750862310354254 != 1750862310354257 TServer::EnableGrpc on GrpcPort 61054, node 2 2025-06-25T14:38:30.525845Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:30.525917Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:30.530677Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:38:30.577011Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:38:30.577034Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:38:30.577042Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:38:30.577162Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28963 TClient is connected to server localhost:28963 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:38:31.030783Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:38:31.045569Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:31.127254Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:38:31.368177Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:31.415378Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... 2025-06-25T14:38:31.464269Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:33.491739Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896377259800323:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:33.491802Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:33.546053Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:33.584080Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:33.659985Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:33.735014Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:33.801516Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:33.872727Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:33.959094Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:34.022087Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896381554768292:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:34.022178Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:34.022264Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896381554768297:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:34.025086Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:38:34.036268Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519896381554768299:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:38:34.123019Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519896381554768350:3418] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:38:34.958525Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:35.355252Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519896364374896816:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:35.355325Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; |82.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest |82.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest >> KqpInplaceUpdate::SingleRowPgNotNull-UseSink [GOOD] >> KqpImmediateEffects::MultiShardUpsertAfterRead [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/effects/unittest >> KqpImmediateEffects::InsertExistingKey+UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 27052, MsgBus: 23840 2025-06-25T14:38:24.692403Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519896337933602119:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:24.692456Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017c6/r3tmp/tmpeFcEwI/pdisk_1.dat 2025-06-25T14:38:25.048469Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519896337933602100:2080] 1750862304688505 != 1750862304688508 2025-06-25T14:38:25.060564Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 27052, node 1 2025-06-25T14:38:25.096762Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:25.097349Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:25.100298Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:38:25.151049Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:38:25.151070Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:38:25.151077Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:38:25.151228Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23840 TClient is connected to server localhost:23840 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-25T14:38:25.705440Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:38:25.839619Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:38:25.864226Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:38:25.899525Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:26.178951Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:26.330513Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:26.411130Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:27.849613Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896350818505638:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:27.849721Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:28.121811Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:28.149462Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:28.182947Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:28.249465Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:28.292215Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:28.337489Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:28.372642Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:28.434032Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896355113473596:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:28.434119Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:28.434576Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896355113473601:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:28.439321Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:38:28.450832Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519896355113473603:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:38:28.526426Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519896355113473654:3417] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:38:29.691936Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519896337933602119:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:29.692068Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:38:29.702081Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/ ... { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:38:31.977787Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:38:31.993427Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:38:32.008363Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:32.084632Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:32.227918Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:32.311995Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:32.321380Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:38:34.396977Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896380843281096:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:34.397054Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:34.443645Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:34.472118Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:34.498682Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:34.527955Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:34.556039Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:34.586802Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:34.616301Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:34.665786Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896380843281754:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:34.665872Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:34.665884Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896380843281759:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:34.668882Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:38:34.715434Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519896380843281761:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:38:34.778935Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519896380843281812:3414] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:38:35.720934Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:36.063047Z node 2 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_CONSTRAINT_VIOLATION;details=Conflict with existing key.;tx_id=4; 2025-06-25T14:38:36.063228Z node 2 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:226: Prepare transaction failed. txid 4 at tablet 72075186224037922 errors: Status: STATUS_CONSTRAINT_VIOLATION Issues: { message: "Conflict with existing key." issue_code: 2012 severity: 1 } 2025-06-25T14:38:36.063402Z node 2 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:168: Errors while proposing transaction txid 4 at tablet 72075186224037922 Status: STATUS_CONSTRAINT_VIOLATION Issues: { message: "Conflict with existing key." issue_code: 2012 severity: 1 } 2025-06-25T14:38:36.063569Z node 2 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:819: SelfId: [2:7519896389433216801:2473], Table: `/Root/TestImmediateEffects` ([72057594046644480:17:1]), SessionActorId: [2:7519896385138249378:2473]Got CONSTRAINT VIOLATION for table `/Root/TestImmediateEffects`. ShardID=72075186224037922, Sink=[2:7519896389433216801:2473].{
: Error: Conflict with existing key., code: 2012 } 2025-06-25T14:38:36.063673Z node 2 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:3004: SelfId: [2:7519896389433216785:2473], SessionActorId: [2:7519896385138249378:2473], statusCode=PRECONDITION_FAILED. Issue=
: Error: Constraint violated. Table: `/Root/TestImmediateEffects`., code: 2012
: Error: Conflict with existing key., code: 2012 . sessionActorId=[2:7519896385138249378:2473]. isRollback=0 2025-06-25T14:38:36.063899Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:1895: SessionId: ydb://session/3?node_id=2&id=NDBiYjIxYTktNmMzNmVlMDUtM2MxOGM1YzItZTc2ZjA0YWM=, ActorId: [2:7519896385138249378:2473], ActorState: ExecuteState, TraceId: 01jykrdg9z0gfqmgemq659mmw2, got TEvKqpBuffer::TEvError in ExecuteState, status: PRECONDITION_FAILED send to: [2:7519896389433216795:2473] from: [2:7519896389433216785:2473] 2025-06-25T14:38:36.063999Z node 2 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [2:7519896389433216795:2473] TxId: 281474976715675. Ctx: { TraceId: 01jykrdg9z0gfqmgemq659mmw2, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NDBiYjIxYTktNmMzNmVlMDUtM2MxOGM1YzItZTc2ZjA0YWM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. PRECONDITION_FAILED: {
: Error: Constraint violated. Table: `/Root/TestImmediateEffects`., code: 2012 subissue: {
: Error: Conflict with existing key., code: 2012 } } 2025-06-25T14:38:36.064169Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=NDBiYjIxYTktNmMzNmVlMDUtM2MxOGM1YzItZTc2ZjA0YWM=, ActorId: [2:7519896385138249378:2473], ActorState: ExecuteState, TraceId: 01jykrdg9z0gfqmgemq659mmw2, Create QueryResponse for error on request, msg: ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/effects/unittest >> KqpWrite::ProjectReplace+UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 18412, MsgBus: 6495 2025-06-25T14:38:24.279182Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519896335680325013:2135];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:24.279325Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017d6/r3tmp/tmpQfhk0e/pdisk_1.dat 2025-06-25T14:38:24.667181Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519896335680324915:2080] 1750862304272023 != 1750862304272026 2025-06-25T14:38:24.674739Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 18412, node 1 2025-06-25T14:38:24.721765Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:24.721848Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:24.736166Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:38:24.807348Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:38:24.807376Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:38:24.807386Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:38:24.807525Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6495 TClient is connected to server localhost:6495 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-25T14:38:25.288081Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:38:25.380986Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:38:25.403921Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:25.555674Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:25.742446Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:25.863427Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:27.714756Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896348565228449:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:27.714858Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:28.002524Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:28.033171Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:28.096959Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:28.168114Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:28.195413Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:28.228030Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:28.254679Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:28.341957Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896352860196412:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:28.342022Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:28.342343Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896352860196417:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:28.346201Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:38:28.358814Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519896352860196419:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:38:28.455040Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519896352860196470:3420] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:38:29.276069Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519896335680325013:2135];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:29.276166Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:38:29.433387Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) Trying to start YDB, gRPC: 22107, MsgBus: 21510 2025-06-25T14:38:31.409924Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519896368932882145:2245];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:31.410483Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017d6/r3tmp/tmpXl8AIa/pdisk_1.dat 2025-06-25T14:38:31.569591Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:38:31.570556Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519896368932881909:2080] 1750862311349757 != 1750862311349760 2025-06-25T14:38:31.586985Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:31.587081Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:31.592273Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 22107, node 2 2025-06-25T14:38:31.641124Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:38:31.641218Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:38:31.641224Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:38:31.641303Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:21510 TClient is connected to server localhost:21510 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-25T14:38:32.073455Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:38:32.093449Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:32.174098Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:32.344624Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:32.409219Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:38:32.417291Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:34.393518Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896381817785416:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:34.393644Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:34.446553Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:34.469364Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:34.506428Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:34.535304Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:34.566173Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:34.598336Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:34.630433Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:34.684656Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896381817786072:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:34.684756Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:34.684806Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896381817786077:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:34.687921Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:38:34.698728Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519896381817786079:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:38:34.781073Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519896381817786130:3417] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } |82.1%| [TA] $(B)/ydb/core/tx/schemeshard/ut_auditsettings/test-results/unittest/{meta.json ... results_accumulator.log} |82.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/effects/unittest >> KqpImmediateEffects::AlreadyBrokenImmediateEffects [GOOD] Test command err: Trying to start YDB, gRPC: 11987, MsgBus: 22826 2025-06-25T14:38:25.250167Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519896340220323699:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:25.250226Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017b4/r3tmp/tmpb6JEVQ/pdisk_1.dat 2025-06-25T14:38:25.655835Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:38:25.676978Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:25.677084Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:25.679002Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 11987, node 1 2025-06-25T14:38:25.843321Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:38:25.843345Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:38:25.843356Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:38:25.843512Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:22826 2025-06-25T14:38:26.257908Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:22826 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:38:26.466341Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:38:26.483051Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:38:26.494307Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:26.634522Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:26.767575Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:26.830770Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:28.325313Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896353105227196:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:28.325439Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:28.630121Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:28.700392Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:28.737823Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:28.769873Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:28.802486Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:28.884593Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:28.911837Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:28.962372Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896353105227854:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:28.962447Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:28.962697Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896353105227859:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:28.965976Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:38:28.973903Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519896353105227861:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:38:29.052365Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519896357400195208:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:38:30.252722Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519896340220323699:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:30.253501Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 6421, MsgBus: 17230 2025-06-25T14:38:31.329491Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519896367047451618:2192];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017b4/r3tmp/tmputyZJN/pdisk_1.dat 2025-06-25T14:38:31.399064Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/in ... 511Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:31.550824Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:38:31.556466Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519896367047451439:2080] 1750862311243034 != 1750862311243037 2025-06-25T14:38:31.560661Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 6421, node 2 2025-06-25T14:38:31.603520Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:38:31.603538Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:38:31.603546Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:38:31.603637Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17230 TClient is connected to server localhost:17230 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:38:32.021120Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:38:32.028427Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:38:32.035907Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:32.109288Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:32.274347Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:32.328780Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... 2025-06-25T14:38:32.357516Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:34.609496Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896379932354973:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:34.609578Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:34.649630Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:34.676010Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:34.702760Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:34.766637Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:34.789054Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:34.816006Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:34.845220Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:34.895089Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896379932355630:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:34.895169Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:34.895177Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896379932355635:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:34.898721Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:38:34.909266Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519896379932355637:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:38:34.989165Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519896379932355688:3414] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:38:35.820347Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:36.273223Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519896367047451618:2192];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:36.273267Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:38:36.428291Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=OWNlNWM3MDMtNWFjZTRiMTctYjdhMDRkZjMtMzAwMzRlZTc=, ActorId: [2:7519896388522290764:2501], ActorState: ExecuteState, TraceId: 01jykrdgtjbh7e337c4b01s4ck, Create QueryResponse for error on request, msg: tx has deferred effects, but locks are broken 2025-06-25T14:38:36.438232Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=OWNlNWM3MDMtNWFjZTRiMTctYjdhMDRkZjMtMzAwMzRlZTc=, ActorId: [2:7519896388522290764:2501], ActorState: ReadyState, TraceId: 01jykrdgwnevmgy393p9jgcrc7, Create QueryResponse for error on request, msg: ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/effects/unittest >> KqpEffects::AlterAfterUpsertBeforeUpsertSelectTransaction-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 2316, MsgBus: 10577 2025-06-25T14:38:18.358091Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519896311574744012:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:18.362344Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001807/r3tmp/tmpA7PoPs/pdisk_1.dat 2025-06-25T14:38:18.854754Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:18.854868Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:18.856901Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:38:18.874388Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:38:18.875045Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519896311574743983:2080] 1750862298354792 != 1750862298354795 TServer::EnableGrpc on GrpcPort 2316, node 1 2025-06-25T14:38:18.964818Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:38:18.971718Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:38:18.971756Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:38:18.971903Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10577 2025-06-25T14:38:19.360799Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:10577 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:38:19.635293Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:38:19.647860Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:38:19.663185Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:19.793100Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:19.978713Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:20.091929Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:21.832030Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896324459647509:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:21.832145Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:22.100931Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:22.131387Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:22.203579Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:22.254661Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:22.305967Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:22.345723Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:22.416664Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:22.476734Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896328754615473:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:22.476792Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:22.476924Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896328754615478:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:22.479914Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:38:22.496896Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519896328754615480:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:38:22.586163Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519896328754615531:3420] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:38:23.358423Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519896311574744012:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:23.358527Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:38:23.706109Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_w ... 8:30.858191Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 9262, node 3 2025-06-25T14:38:30.899554Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:30.899625Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:30.901473Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:38:30.925053Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:38:30.925069Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:38:30.925076Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:38:30.925196Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:21555 TClient is connected to server localhost:21555 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:38:31.450242Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:38:31.456394Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:38:31.474779Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:31.566569Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:31.734589Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:31.767459Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:38:31.798964Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:34.267784Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519896379500706470:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:34.267858Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:34.322176Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:34.349514Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:34.391613Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:34.423227Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:34.453097Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:34.482917Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:34.514649Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:34.566922Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519896379500707124:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:34.567012Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:34.567038Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519896379500707129:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:34.570121Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:38:34.580486Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519896379500707131:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:38:34.657688Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519896379500707182:3416] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:38:35.755465Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519896362320835673:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:35.755524Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:38:35.906634Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:36.137200Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:171) 2025-06-25T14:38:36.332114Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=3&id=ODEzNWE1NS1kMjRmM2U0Zi1jYmQ3MmY4OC1iNGQ5OWY4NQ==, ActorId: [3:7519896383795674709:2465], ActorState: ExecuteState, TraceId: 01jykrdgm3d7xe0dcgh4d1bg4e, Create QueryResponse for error on request, msg: >> HttpRequest::AnalyzeServerless >> KqpEffects::InsertAbort_Select_Conflict-UseSink [GOOD] >> KqpEffects::InsertAbort_Select_Duplicates-UseSink [GOOD] >> KqpEffects::InsertRevert_Literal_Conflict |82.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest |82.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/effects/unittest >> KqpInplaceUpdate::SingleRowPgNotNull-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 23234, MsgBus: 16541 2025-06-25T14:38:25.748727Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519896342084430255:2138];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:25.768092Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00179f/r3tmp/tmpRjD1Mr/pdisk_1.dat 2025-06-25T14:38:26.217744Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:26.217847Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:26.220672Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:38:26.238880Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519896342084430155:2080] 1750862305726981 != 1750862305726984 2025-06-25T14:38:26.247615Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 23234, node 1 2025-06-25T14:38:26.302904Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:38:26.302926Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:38:26.302936Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:38:26.303073Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16541 TClient is connected to server localhost:16541 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-25T14:38:26.741031Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:38:26.864441Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-25T14:38:26.900369Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:27.029356Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:27.177123Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:27.259194Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:28.974982Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896354969333688:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:28.975064Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:29.294249Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:29.326285Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:29.356569Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:29.383609Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:29.457110Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:29.536853Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:29.614696Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:29.702920Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896359264301653:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:29.703004Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:29.703060Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896359264301658:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:29.706437Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:38:29.716138Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519896359264301660:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:38:29.807941Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519896359264301713:3427] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:38:30.741935Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519896342084430255:2138];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:30.741997Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:38:30.824035Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) Trying to start YDB, gRPC: 31418, MsgBus: 23777 2025-06-25T14:38:32.129511Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519896370829216701:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:32.129559Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00179f/r3tmp/tmpzwMhLo/pdisk_1.dat 2025-06-25T14:38:32.274464Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:38:32.276084Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519896370829216679:2080] 1750862312128637 != 1750862312128640 2025-06-25T14:38:32.297029Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:32.297112Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:32.297833Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 31418, node 2 2025-06-25T14:38:32.364868Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:38:32.364892Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:38:32.364900Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:38:32.365021Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23777 TClient is connected to server localhost:23777 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:38:32.873100Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:38:32.883390Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:38:32.898531Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:38:32.974147Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:33.107441Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:33.163370Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:38:33.207393Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:35.175366Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896383714120193:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:35.175487Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:35.229011Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:35.263156Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:35.292342Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:35.319256Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:35.345406Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:35.372848Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:35.400410Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:35.450653Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896383714120853:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:35.450789Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:35.451023Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896383714120858:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:35.465755Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:38:35.473056Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519896383714120860:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:38:35.564036Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519896383714120911:3417] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:38:36.522514Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:37.130820Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519896370829216701:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:37.130906Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/effects/unittest >> KqpImmediateEffects::MultiShardUpsertAfterRead [GOOD] Test command err: Trying to start YDB, gRPC: 18201, MsgBus: 14214 2025-06-25T14:38:22.076770Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519896331039568584:2176];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:22.078881Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017e8/r3tmp/tmpIVxs6l/pdisk_1.dat 2025-06-25T14:38:22.491211Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:38:22.492364Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:22.492460Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 18201, node 1 2025-06-25T14:38:22.493031Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519896326744601150:2080] 1750862301994453 != 1750862301994456 2025-06-25T14:38:22.494873Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:38:22.551427Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:38:22.551444Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:38:22.551457Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:38:22.551544Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14214 TClient is connected to server localhost:14214 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-25T14:38:23.052769Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:38:23.142552Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:38:23.171982Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:23.335495Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:23.516435Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:23.588095Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:25.163328Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896343924471989:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:25.163429Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:25.484412Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:25.518349Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:25.552391Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:25.603213Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:25.663151Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:25.748048Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:25.810678Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:25.915927Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896343924472649:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:25.916010Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:25.916223Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896343924472654:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:25.921334Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:38:25.936552Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519896343924472656:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:38:26.020964Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519896348219440003:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:38:27.005224Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519896331039568584:2176];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:27.005282Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:38:27.041732Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:27.080490Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation pa ... ome/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) Trying to start YDB, gRPC: 16875, MsgBus: 4839 2025-06-25T14:38:32.281501Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519896372390162466:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:32.281574Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017e8/r3tmp/tmpdTqzox/pdisk_1.dat 2025-06-25T14:38:32.485949Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:38:32.486806Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519896372390162442:2080] 1750862312280775 != 1750862312280778 2025-06-25T14:38:32.496229Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:32.496332Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:32.499604Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 16875, node 2 2025-06-25T14:38:32.575247Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:38:32.575284Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:38:32.575296Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:38:32.575448Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4839 TClient is connected to server localhost:4839 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:38:33.163507Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:38:33.179756Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:33.269625Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:33.348228Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:38:33.460099Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:33.540059Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:35.389380Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896385275065957:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:35.389450Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:35.438745Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:35.467234Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:35.537876Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:35.565390Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:35.589940Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:35.657927Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:35.711207Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:35.763318Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896385275066618:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:35.763397Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:35.763421Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896385275066623:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:35.766575Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:38:35.774747Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519896385275066625:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:38:35.861070Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519896385275066676:3417] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:38:36.704204Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:37.281625Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519896372390162466:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:37.281704Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; |82.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest >> BasicStatistics::NotFullStatisticsColumnshard >> KqpPg::PgAggregate-useSink [GOOD] >> KqpPg::MkqlTerminate |82.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest >> HttpRequest::ProbeServerless >> KqpEffects::InsertAbort_Params_Conflict-UseSink [GOOD] >> HttpRequest::Probe ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/effects/unittest >> KqpEffects::InsertAbort_Select_Conflict-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 61926, MsgBus: 17384 2025-06-25T14:38:19.382716Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519896316016251971:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:19.382792Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017fd/r3tmp/tmpZO4FX3/pdisk_1.dat 2025-06-25T14:38:19.831482Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 61926, node 1 2025-06-25T14:38:19.865148Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:19.865241Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:19.871796Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:38:19.976528Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:38:19.976566Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:38:19.976577Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:38:19.976716Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17384 2025-06-25T14:38:20.398830Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:17384 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:38:20.632148Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:38:20.671825Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:20.810772Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:20.991483Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:21.078051Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:22.782952Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896328901155468:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:22.783206Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:23.073141Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:23.107031Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:23.134067Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:23.161717Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:23.196200Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:23.274408Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:23.347949Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:23.440949Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896333196123434:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:23.441056Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:23.441363Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896333196123439:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:23.445089Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:38:23.456829Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519896333196123441:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:38:23.535825Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519896333196123492:3420] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:38:24.383000Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519896316016251971:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:24.383066Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:38:24.552076Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:24.832090Z node 1 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_CONSTRAINT_VIOLATION;details=Conflict with existing key.;tx_id=3; 2025-06-25T14:38:24.841032Z node 1 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:226: Prepare transaction failed. txid 3 a ... 94037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 1858, node 3 2025-06-25T14:38:32.288161Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:38:32.288189Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:38:32.288199Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:38:32.288350Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25542 TClient is connected to server localhost:25542 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:38:32.753812Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:38:32.781043Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:32.865319Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:33.035699Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:33.110049Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:38:33.117775Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:35.447589Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519896384656510753:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:35.447669Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:35.503228Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:35.536819Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:35.564410Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:35.596465Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:35.628784Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:35.698654Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:35.759429Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:35.811988Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519896384656511413:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:35.812062Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:35.812082Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519896384656511418:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:35.816571Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:38:35.825241Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519896384656511420:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:38:35.898028Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519896384656511471:3416] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:38:37.082170Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:37.101586Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519896371771607249:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:37.101670Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:38:37.555938Z node 3 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:678: SelfId: [3:7519896393246446431:2493], TxId: 281474976715676, task: 1. Ctx: { SessionId : ydb://session/3?node_id=3&id=Njk5NjM3YzktMWI1YjAxMzUtMTdkNmE3ZWMtZGNiNzI1YjY=. CustomerSuppliedId : . TraceId : 01jykrdhnqamdmvm8frhxn35sa. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. InternalError: PRECONDITION_FAILED KIKIMR_CONSTRAINT_VIOLATION: {
: Error: Conflict with existing key., code: 2012 }. 2025-06-25T14:38:37.557235Z node 3 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [3:7519896393246446432:2494], TxId: 281474976715676, task: 2. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=3&id=Njk5NjM3YzktMWI1YjAxMzUtMTdkNmE3ZWMtZGNiNzI1YjY=. TraceId : 01jykrdhnqamdmvm8frhxn35sa. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Handle abort execution event from: [3:7519896393246446428:2464], status: PRECONDITION_FAILED, reason: {
: Error: Terminate execution } 2025-06-25T14:38:37.559794Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=3&id=Njk5NjM3YzktMWI1YjAxMzUtMTdkNmE3ZWMtZGNiNzI1YjY=, ActorId: [3:7519896393246446292:2464], ActorState: ExecuteState, TraceId: 01jykrdhnqamdmvm8frhxn35sa, Create QueryResponse for error on request, msg: |82.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest |82.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest |82.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest >> BasicStatistics::Serverless |82.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest |82.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest |82.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/grpc_services/tablet/ut/ydb-core-grpc_services-tablet-ut |82.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/grpc_services/tablet/ut/ydb-core-grpc_services-tablet-ut |82.2%| [TA] {RESULT} $(B)/ydb/core/blobstorage/vdisk/syncer/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpImmediateEffects::UpsertAfterInsertWithIndex [GOOD] >> BasicStatistics::Simple >> KqpPg::V1CreateTable [GOOD] >> KqpEffects::InsertAbort_Params_Duplicates-UseSink [GOOD] >> KqpPg::InsertNoTargetColumns_ColumnOrder-useSink [GOOD] >> KqpPg::InsertNoTargetColumns_NotOneSize+useSink |82.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest >> KqpPg::ValuesInsert+useSink |82.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/effects/unittest >> KqpEffects::InsertAbort_Params_Conflict-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 13983, MsgBus: 7185 2025-06-25T14:38:21.788731Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519896322771246324:2077];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:21.789836Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017f1/r3tmp/tmpZ8AGkV/pdisk_1.dat 2025-06-25T14:38:22.139690Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 13983, node 1 2025-06-25T14:38:22.185559Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:22.185694Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:22.187522Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:38:22.275542Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:38:22.275569Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:38:22.275578Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:38:22.275743Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:7185 TClient is connected to server localhost:7185 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-25T14:38:22.804494Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:38:22.923526Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:38:22.945192Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:38:22.961037Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:23.102027Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:23.251376Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:23.328013Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:24.997885Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896335656149812:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:24.997974Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:25.305428Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:25.335542Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:25.362214Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:25.395339Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:25.424114Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:25.470698Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:25.506058Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:25.616384Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896339951117768:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:25.616490Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:25.618295Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896339951117773:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:25.622713Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:38:25.641538Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519896339951117775:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:38:25.701756Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519896339951117826:3421] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:38:26.788926Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519896322771246324:2077];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:26.789019Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 21144, MsgBus: 28449 2025-06-25T14:38:27.836689Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519896351214820345:2155];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017f1/r3tmp/tmpJMXtDw/pdisk_1.dat 2025-06-25T14:38:27.879827Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/init ... d 2025-06-25T14:38:33.634777Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:33.637662Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 26809, node 3 2025-06-25T14:38:33.680817Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:38:33.680855Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:38:33.680864Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:38:33.681004Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11397 TClient is connected to server localhost:11397 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:38:34.222831Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:38:34.231057Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:38:34.239329Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:34.315471Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:34.469370Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:34.510567Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:38:34.526244Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:36.573532Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519896388923538886:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:36.573634Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:36.610886Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:36.636681Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:36.666450Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:36.695441Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:36.725758Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:36.760339Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:36.791155Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:36.869082Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519896388923539542:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:36.869163Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519896388923539547:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:36.869166Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:36.871910Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:38:36.881329Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519896388923539549:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:38:36.954458Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519896388923539600:3421] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:38:38.381974Z node 3 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:678: SelfId: [3:7519896397513474455:2476], TxId: 281474976715673, task: 1. Ctx: { CustomerSuppliedId : . TraceId : 01jykrdjj21s4av4kjeysvb51m. SessionId : ydb://session/3?node_id=3&id=M2ViYzM3MGMtNzJiOWVhZjEtYjNlMTZmYzMtOTUwODJmOGY=. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. InternalError: PRECONDITION_FAILED KIKIMR_CONSTRAINT_VIOLATION: {
: Error: Conflict with existing key., code: 2012 }. 2025-06-25T14:38:38.382280Z node 3 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [3:7519896397513474457:2477], TxId: 281474976715673, task: 2. Ctx: { CustomerSuppliedId : . TraceId : 01jykrdjj21s4av4kjeysvb51m. SessionId : ydb://session/3?node_id=3&id=M2ViYzM3MGMtNzJiOWVhZjEtYjNlMTZmYzMtOTUwODJmOGY=. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Handle abort execution event from: [3:7519896397513474452:2464], status: PRECONDITION_FAILED, reason: {
: Error: Terminate execution } 2025-06-25T14:38:38.382580Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=3&id=M2ViYzM3MGMtNzJiOWVhZjEtYjNlMTZmYzMtOTUwODJmOGY=, ActorId: [3:7519896397513474421:2464], ActorState: ExecuteState, TraceId: 01jykrdjj21s4av4kjeysvb51m, Create QueryResponse for error on request, msg: 2025-06-25T14:38:38.502498Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519896376038635393:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:38.502578Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; |82.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest |82.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest |82.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest |82.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest |82.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest >> KqpPg::TableSelect-useSink [GOOD] >> KqpPg::TableInsert+useSink >> KqpPg::Returning-useSink [GOOD] >> BasicStatistics::NotFullStatisticsDatashard >> KqpEffects::InsertRevert_Literal_Conflict [GOOD] >> KqpPg::SelectIndex+useSink >> BasicStatistics::TwoServerlessDbs >> BasicStatistics::TwoServerlessTwoSharedDbs |82.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest |82.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest |82.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest |82.2%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_auditsettings/test-results/unittest/{meta.json ... results_accumulator.log} |82.2%| [LD] {RESULT} $(B)/ydb/core/grpc_services/tablet/ut/ydb-core-grpc_services-tablet-ut ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/effects/unittest >> KqpEffects::InsertAbort_Params_Duplicates-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 24425, MsgBus: 14345 2025-06-25T14:38:24.475617Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519896338453314285:2140];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:24.478884Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017d0/r3tmp/tmpInHuP2/pdisk_1.dat 2025-06-25T14:38:24.901153Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:38:24.935163Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:24.935272Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 24425, node 1 2025-06-25T14:38:24.952041Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:38:25.024480Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:38:25.024743Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:38:25.024760Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:38:25.024880Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14345 TClient is connected to server localhost:14345 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-25T14:38:25.438623Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:38:25.535214Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:38:25.613354Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:25.830782Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:26.022641Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:26.095219Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:27.541184Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896351338217696:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:27.541301Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:27.851969Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:27.879907Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:27.954628Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:27.996063Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:28.028738Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:28.058391Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:28.128206Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:28.183343Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896355633185652:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:28.183444Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:28.183645Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896355633185657:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:28.186973Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:38:28.196283Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519896355633185659:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:38:28.291542Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519896355633185710:3418] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:38:29.471285Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519896338453314285:2140];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:29.471357Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 22240, MsgBus: 25124 2025-06-25T14:38:30.473105Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519896364132992528:2141];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017d0/r3tmp/tmpUtSQxs/pdisk_1.dat 2025-06-25T14:38:30.533948Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:38:30.566013Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:38 ... 11451519:2080] 1750862316128746 != 1750862316128749 TServer::EnableGrpc on GrpcPort 11441, node 3 2025-06-25T14:38:36.259085Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:36.259176Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:36.261002Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:38:36.279573Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:38:36.279595Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:38:36.279603Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:38:36.279708Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4013 TClient is connected to server localhost:4013 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:38:36.814369Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:38:36.822759Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:36.867560Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:36.988622Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:37.040929Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:37.184406Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:38:38.966897Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519896397801387730:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:38.967015Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:39.013371Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:39.040031Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:39.103462Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:39.125386Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:39.148815Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:39.201693Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:39.236341Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:39.286635Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519896402096355685:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:39.286760Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:39.286826Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519896402096355690:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:39.290255Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:38:39.298886Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519896402096355692:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:38:39.354683Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519896402096355743:3411] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:38:40.833451Z node 3 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:678: SelfId: [3:7519896406391323302:2476], TxId: 281474976715673, task: 1. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=3&id=M2MyOTY2MWEtYjg5OTFmZDktYTMwYzI1NDgtYTc0OTBmN2I=. TraceId : 01jykrdmxmaz3rz3p2sg35btve. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. InternalError: PRECONDITION_FAILED KIKIMR_CONSTRAINT_VIOLATION: {
: Error: Duplicated keys found., code: 2012 }. 2025-06-25T14:38:40.833750Z node 3 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [3:7519896406391323303:2477], TxId: 281474976715673, task: 2. Ctx: { SessionId : ydb://session/3?node_id=3&id=M2MyOTY2MWEtYjg5OTFmZDktYTMwYzI1NDgtYTc0OTBmN2I=. TraceId : 01jykrdmxmaz3rz3p2sg35btve. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Handle abort execution event from: [3:7519896406391323299:2464], status: PRECONDITION_FAILED, reason: {
: Error: Terminate execution } 2025-06-25T14:38:40.834046Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=3&id=M2MyOTY2MWEtYjg5OTFmZDktYTMwYzI1NDgtYTc0OTBmN2I=, ActorId: [3:7519896406391323268:2464], ActorState: ExecuteState, TraceId: 01jykrdmxmaz3rz3p2sg35btve, Create QueryResponse for error on request, msg: 2025-06-25T14:38:41.130315Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519896389211451546:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:41.130391Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/effects/unittest >> KqpImmediateEffects::UpsertAfterInsertWithIndex [GOOD] Test command err: Trying to start YDB, gRPC: 27760, MsgBus: 5361 2025-06-25T14:38:26.057434Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519896344101260876:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:26.057468Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001765/r3tmp/tmpUyWsAr/pdisk_1.dat 2025-06-25T14:38:26.450527Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:26.450726Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:26.452850Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:38:26.460479Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:38:26.461980Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519896344101260858:2080] 1750862306040906 != 1750862306040909 TServer::EnableGrpc on GrpcPort 27760, node 1 2025-06-25T14:38:26.551346Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:38:26.551381Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:38:26.551392Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:38:26.551506Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5361 TClient is connected to server localhost:5361 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-25T14:38:27.124382Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:38:27.140961Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:38:27.164504Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:27.278217Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:38:27.439687Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:27.520275Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:29.158146Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896356986164383:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:29.158279Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:29.497227Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:29.539613Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:29.588910Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:29.668983Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:29.704351Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:29.728024Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:29.753296Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:29.831470Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896356986165044:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:29.831576Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:29.831641Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896356986165049:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:29.835334Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:38:29.848112Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519896356986165051:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:38:29.913982Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519896356986165102:3419] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:38:30.988385Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:31.057976Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519896344101260876:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:31.058040Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 2186, MsgBus: 61522 2025-06-25T14:38:32.267511Z node 2 :METADATA_PROVIDER WARN: lo ... 32.393682Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519896371484694885:2080] 1750862312266839 != 1750862312266842 TServer::EnableGrpc on GrpcPort 2186, node 2 2025-06-25T14:38:32.425510Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:32.425607Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:32.427546Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:38:32.460244Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:38:32.460271Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:38:32.460279Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:38:32.460420Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:61522 TClient is connected to server localhost:61522 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:38:32.963266Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:38:32.978336Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:33.057501Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:33.219063Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:33.286865Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:38:33.321086Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:35.275255Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896384369598395:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:35.275385Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:35.330858Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:35.360249Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:35.387922Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:35.416092Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:35.445449Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:35.473742Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:35.503781Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:35.555148Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896384369599052:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:35.555199Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:35.555247Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519896384369599057:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:35.558826Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:38:35.571836Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519896384369599059:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:38:35.624720Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519896384369599110:3420] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:38:36.570041Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:36.645226Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:36.674544Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:37.267892Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519896371484694918:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:37.267968Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; |82.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest |82.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest >> KqpPg::MkqlTerminate [GOOD] >> KqpPg::NoSelectFullScan ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/effects/unittest >> KqpEffects::InsertRevert_Literal_Conflict [GOOD] Test command err: Trying to start YDB, gRPC: 20733, MsgBus: 21194 2025-06-25T14:38:26.608305Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519896347734756287:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:26.608582Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001762/r3tmp/tmpRDDtmZ/pdisk_1.dat TServer::EnableGrpc on GrpcPort 20733, node 1 2025-06-25T14:38:26.990311Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:38:27.008798Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519896347734756268:2080] 1750862306607360 != 1750862306607363 2025-06-25T14:38:27.040266Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:27.051750Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:27.064075Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:38:27.090624Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:38:27.090643Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:38:27.090653Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:38:27.090814Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:21194 TClient is connected to server localhost:21194 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-25T14:38:27.617592Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:38:27.716697Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:38:27.730539Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:38:27.742549Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:27.945615Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:28.073809Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:28.142339Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:29.691892Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896360619659790:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:29.691991Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:29.978990Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:30.005138Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:30.034634Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:30.061199Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:30.103166Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:30.140414Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:30.220594Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:30.316600Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896364914627749:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:30.316674Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:30.316824Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896364914627754:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:30.320102Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:38:30.339105Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519896364914627756:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:38:30.435099Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519896364914627807:3421] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:38:31.463005Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:31.608111Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519896347734756287:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:31.608182Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot de ... tabaseId : /Root. PoolId : default. Database : /Root. }. Handle abort execution event from: [2:7519896394490183871:2464], status: PRECONDITION_FAILED, reason: {
: Error: Terminate execution } 2025-06-25T14:38:37.699552Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=ZjYwMTMxNjItYWJmNTk1MDgtYTE5YzJjMi03Mzk5MGQ1YQ==, ActorId: [2:7519896394490183735:2464], ActorState: ExecuteState, TraceId: 01jykrdhpeayyx9m58vn6r9bf4, Create QueryResponse for error on request, msg: Trying to start YDB, gRPC: 14887, MsgBus: 61960 2025-06-25T14:38:38.775429Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519896397169041916:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:38.775504Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001762/r3tmp/tmphlmZIo/pdisk_1.dat 2025-06-25T14:38:38.908723Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:38:38.909877Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519896397169041893:2080] 1750862318774879 != 1750862318774882 2025-06-25T14:38:38.930959Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:38.931084Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:38.933924Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 14887, node 3 2025-06-25T14:38:38.984926Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:38:38.984953Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:38:38.984965Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:38:38.985105Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:61960 TClient is connected to server localhost:61960 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:38:39.442462Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-25T14:38:39.452987Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:39.510535Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:39.659371Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:39.736100Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:38:39.907606Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:38:42.058433Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519896414348912719:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:42.058523Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:42.107839Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:42.143174Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:42.215545Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:42.254304Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:42.322092Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:42.395333Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:42.427812Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:42.500326Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519896414348913380:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:42.500408Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:42.500461Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519896414348913385:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:42.504325Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:38:42.515130Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519896414348913387:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:38:42.603270Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519896414348913438:3419] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:38:43.775822Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519896397169041916:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:43.775897Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; |82.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest |82.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest |82.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest |82.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_schemereq/unittest >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-60 [GOOD] Test command err: Starting YDB, grpc: 17444, msgbus: 7316 2025-06-25T14:34:10.053604Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519895244166829510:2146];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000993/r3tmp/tmp2gKdMU/pdisk_1.dat 2025-06-25T14:34:10.437475Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:34:10.826532Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:34:10.826659Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:34:10.845602Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:34:10.859850Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 17444, node 1 2025-06-25T14:34:11.050333Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:34:11.060890Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:34:11.060917Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:34:11.060928Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:34:11.061046Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:7316 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-25T14:34:11.412488Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7519895248461796812:2111] Handle TEvNavigate describe path dc-1 2025-06-25T14:34:11.455972Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7519895252756764749:2452] HANDLE EvNavigateScheme dc-1 2025-06-25T14:34:11.459647Z node 1 :TX_PROXY DEBUG: describe.cpp:356: Actor# [1:7519895252756764749:2452] HANDLE EvNavigateKeySetResult TDescribeReq marker# P5 ErrorCount# 0 2025-06-25T14:34:11.504793Z node 1 :TX_PROXY DEBUG: describe.cpp:435: Actor# [1:7519895252756764749:2452] SEND to# 72057594046644480 shardToRequest NKikimrSchemeOp.TDescribePath Path: "dc-1" Options { ReturnBoundaries: true ShowPrivateTable: true ReturnRangeKey: true } 2025-06-25T14:34:11.515745Z node 1 :TX_PROXY DEBUG: describe.cpp:448: Actor# [1:7519895252756764749:2452] Handle TEvDescribeSchemeResult Forward to# [1:7519895252756764730:2446] Cookie: 0 TEvDescribeSchemeResult: NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 2 Record# Status: StatusSuccess Path: "dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/dc-1" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046644480 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:34:11.575001Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7519895248461796812:2111] Handle TEvProposeTransaction 2025-06-25T14:34:11.575029Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:7519895248461796812:2111] TxId# 281474976715657 ProcessProposeTransaction 2025-06-25T14:34:11.575140Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:7519895248461796812:2111] Cookie# 0 userReqId# "" txid# 281474976715657 SEND to# [1:7519895252756764759:2458] 2025-06-25T14:34:11.715451Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:7519895252756764759:2458] txid# 281474976715657 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "dc-1" StoragePools { Name: "" Kind: "tenant-db" } StoragePools { Name: "/dc-1:test" Kind: "test" } } } } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" PeerName: "" 2025-06-25T14:34:11.715534Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:7519895252756764759:2458] txid# 281474976715657 Bootstrap, UserSID: root@builtin CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-25T14:34:11.715558Z node 1 :TX_PROXY DEBUG: schemereq.cpp:585: Actor# [1:7519895252756764759:2458] txid# 281474976715657 Bootstrap, UserSID: root@builtin IsClusterAdministrator: 1 2025-06-25T14:34:11.715633Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:7519895252756764759:2458] txid# 281474976715657 TEvNavigateKeySet requested from SchemeCache 2025-06-25T14:34:11.715953Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:7519895252756764759:2458] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-25T14:34:11.716076Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:7519895252756764759:2458] HANDLE EvNavigateKeySetResult, txid# 281474976715657 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# false 2025-06-25T14:34:11.716130Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:7519895252756764759:2458] txid# 281474976715657 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715657 TabletId# 72057594046644480} 2025-06-25T14:34:11.716281Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:7519895252756764759:2458] txid# 281474976715657 HANDLE EvClientConnected 2025-06-25T14:34:11.717537Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:34:11.720455Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [1:7519895252756764759:2458] txid# 281474976715657 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715657} 2025-06-25T14:34:11.720502Z node 1 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [1:7519895252756764759:2458] txid# 281474976715657 SEND to# [1:7519895252756764758:2457] Source {TEvProposeTransactionStatus txid# 281474976715657 Status# 53} waiting... 2025-06-25T14:34:11.734027Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:34:11.740671Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7519895248461796812:2111] Handle TEvProposeTransaction 2025-06-25T14:34:11.740695Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:7519895248461796812:2111] TxId# 281474976715658 ProcessProposeTransaction 2025-06-25T14:34:11.740742Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:7519895248461796812:2111] Cookie# 0 userReqId# "" txid# 281474976715658 SEND to# [1:7519895252756764800:2495] 2025-06-25T14:34:11.742948Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:7519895252756764800:2495] txid# 281474976715658 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/" OperationType: ESchemeOpModifyACL ModifyACL { Name: "dc-1" DiffACL: "\n\032\010\000\022\026\010\001\020\377\377\003\032\014root@builtin \003" } } } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" PeerName: "" 2025-06-25T14:34:11.742991Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:7519895252756764800:2495] txid# 281474976715658 Bootstrap, UserSID: root@builtin CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-25T14:34:11.743006Z node 1 :TX_PROXY DEBUG: schemereq.cpp:585: Actor# [1:7519895252756764800:2495] txid# 281474976715658 Bootstrap, UserSID: root@builtin IsClusterAdministrator: 1 2025-06-25T14:34:11.743055Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:7519895252756764800:2495] txid# 281474976715658 TEvNavigateKeySet requested from SchemeCache 2025-06-25T14:34:11.743341Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:7519895252756764800:2495] txid# 281474976715658 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-25T14:34:11.743421Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:7519895252756764800:2495] HANDLE EvNavigateKeySetResult, txid# 281474976715658 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-25T14:34:11.748369Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:7519895252756764800:2495] txid# 281474976715658 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715658 TabletId# 72057594046644480} 2025-06-25T14:34:11.748544Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:7519895252756764800:2495] txid# 281474976715658 HANDLE EvClientConnected 2025-06-25T14:34:11.74895 ... ainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-25T14:37:38.683062Z node 59 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [59:7519896138871436717:2571] txid# 281474976715661 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715661 TabletId# 72057594046644480} 2025-06-25T14:37:38.683167Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [59:7519896138871436717:2571] txid# 281474976715661 HANDLE EvClientConnected 2025-06-25T14:37:38.686842Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [59:7519896138871436717:2571] txid# 281474976715661 Status StatusAlreadyExists HANDLE {TEvModifySchemeTransactionResult Status# StatusAlreadyExists txid# 281474976715661 Reason# Check failed: path: '/dc-1/.metadata/workload_manager/pools/default', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)} 2025-06-25T14:37:38.686984Z node 59 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [59:7519896138871436717:2571] txid# 281474976715661, issues: { message: "Check failed: path: \'/dc-1/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:37:38.687019Z node 59 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [59:7519896138871436717:2571] txid# 281474976715661 SEND to# [59:7519896138871436647:2303] Source {TEvProposeTransactionStatus txid# 281474976715661 Status# 48} 2025-06-25T14:37:38.707549Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [59:7519896117396599305:2111] Handle TEvProposeTransaction 2025-06-25T14:37:38.707587Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [59:7519896117396599305:2111] TxId# 281474976715662 ProcessProposeTransaction 2025-06-25T14:37:38.707640Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [59:7519896117396599305:2111] Cookie# 0 userReqId# "" txid# 281474976715662 SEND to# [59:7519896138871436741:2583] 2025-06-25T14:37:38.709788Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [59:7519896138871436741:2583] txid# 281474976715662 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/dc-1" OperationType: ESchemeOpAlterLogin AlterLogin { CreateUser { User: "targetuser" Password: "passwd" CanLogin: true IsHashedPassword: false } } } } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" DatabaseName: "/dc-1" RequestType: "" PeerName: "ipv6:[::1]:41222" 2025-06-25T14:37:38.709871Z node 59 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [59:7519896138871436741:2583] txid# 281474976715662 Bootstrap, UserSID: root@builtin CheckAdministrator: 1 CheckDatabaseAdministrator: 1 2025-06-25T14:37:38.709894Z node 59 :TX_PROXY DEBUG: schemereq.cpp:585: Actor# [59:7519896138871436741:2583] txid# 281474976715662 Bootstrap, UserSID: root@builtin IsClusterAdministrator: 1 2025-06-25T14:37:38.709948Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [59:7519896138871436741:2583] txid# 281474976715662 TEvNavigateKeySet requested from SchemeCache 2025-06-25T14:37:38.710266Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [59:7519896138871436741:2583] txid# 281474976715662 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-25T14:37:38.710395Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [59:7519896138871436741:2583] HANDLE EvNavigateKeySetResult, txid# 281474976715662 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-25T14:37:38.710453Z node 59 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [59:7519896138871436741:2583] txid# 281474976715662 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715662 TabletId# 72057594046644480} 2025-06-25T14:37:38.710631Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [59:7519896138871436741:2583] txid# 281474976715662 HANDLE EvClientConnected 2025-06-25T14:37:38.718448Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [59:7519896138871436741:2583] txid# 281474976715662 Status StatusSuccess HANDLE {TEvModifySchemeTransactionResult Status# StatusSuccess txid# 281474976715662} 2025-06-25T14:37:38.718516Z node 59 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [59:7519896138871436741:2583] txid# 281474976715662 SEND to# [59:7519896138871436740:2295] Source {TEvProposeTransactionStatus txid# 281474976715662 Status# 48} 2025-06-25T14:37:38.733707Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [59:7519896117396599305:2111] Handle TEvProposeTransaction 2025-06-25T14:37:38.733749Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [59:7519896117396599305:2111] TxId# 281474976715663 ProcessProposeTransaction 2025-06-25T14:37:38.733809Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [59:7519896117396599305:2111] Cookie# 0 userReqId# "" txid# 281474976715663 SEND to# [59:7519896138871436754:2592] 2025-06-25T14:37:38.736655Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [59:7519896138871436754:2592] txid# 281474976715663 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "" OperationType: ESchemeOpModifyACL ModifyACL { Name: "dc-1" DiffACL: "" NewOwner: "db_admin@builtin" } } } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" DatabaseName: "/dc-1" RequestType: "" PeerName: "ipv6:[::1]:41234" 2025-06-25T14:37:38.736735Z node 59 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [59:7519896138871436754:2592] txid# 281474976715663 Bootstrap, UserSID: root@builtin CheckAdministrator: 1 CheckDatabaseAdministrator: 1 2025-06-25T14:37:38.736759Z node 59 :TX_PROXY DEBUG: schemereq.cpp:585: Actor# [59:7519896138871436754:2592] txid# 281474976715663 Bootstrap, UserSID: root@builtin IsClusterAdministrator: 1 2025-06-25T14:37:38.736824Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [59:7519896138871436754:2592] txid# 281474976715663 TEvNavigateKeySet requested from SchemeCache 2025-06-25T14:37:38.737239Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [59:7519896138871436754:2592] txid# 281474976715663 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-25T14:37:38.737365Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [59:7519896138871436754:2592] HANDLE EvNavigateKeySetResult, txid# 281474976715663 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-25T14:37:38.737430Z node 59 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [59:7519896138871436754:2592] txid# 281474976715663 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715663 TabletId# 72057594046644480} 2025-06-25T14:37:38.737608Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [59:7519896138871436754:2592] txid# 281474976715663 HANDLE EvClientConnected 2025-06-25T14:37:38.738069Z node 59 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:37:38.740591Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [59:7519896138871436754:2592] txid# 281474976715663 Status StatusSuccess HANDLE {TEvModifySchemeTransactionResult Status# StatusSuccess txid# 281474976715663} 2025-06-25T14:37:38.740667Z node 59 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [59:7519896138871436754:2592] txid# 281474976715663 SEND to# [59:7519896138871436753:2308] Source {TEvProposeTransactionStatus txid# 281474976715663 Status# 48} 2025-06-25T14:37:38.803581Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [59:7519896117396599305:2111] Handle TEvProposeTransaction 2025-06-25T14:37:38.803620Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [59:7519896117396599305:2111] TxId# 281474976715664 ProcessProposeTransaction 2025-06-25T14:37:38.803721Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [59:7519896117396599305:2111] Cookie# 0 userReqId# "" txid# 281474976715664 SEND to# [59:7519896138871436788:2608] 2025-06-25T14:37:38.806512Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [59:7519896138871436788:2608] txid# 281474976715664 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/dc-1" OperationType: ESchemeOpAlterLogin AlterLogin { RemoveUser { User: "targetuser" MissingOk: false } } } } UserToken: "\n\020db_admin@builtin\022\030\022\026\n\024all-users@well-known\032\020db_admin@builtin\"\007Builtin*\027db_a****ltin (DEFA2CD5)" DatabaseName: "/dc-1" RequestType: "" PeerName: "ipv6:[::1]:41276" 2025-06-25T14:37:38.806605Z node 59 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [59:7519896138871436788:2608] txid# 281474976715664 Bootstrap, UserSID: db_admin@builtin CheckAdministrator: 1 CheckDatabaseAdministrator: 1 2025-06-25T14:37:38.806629Z node 59 :TX_PROXY DEBUG: schemereq.cpp:585: Actor# [59:7519896138871436788:2608] txid# 281474976715664 Bootstrap, UserSID: db_admin@builtin IsClusterAdministrator: 0 2025-06-25T14:37:38.806825Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1434: Actor# [59:7519896138871436788:2608] txid# 281474976715664 HandleResolveDatabase, ResultSet size: 1 ResultSet error count: 0 2025-06-25T14:37:38.806873Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1469: Actor# [59:7519896138871436788:2608] txid# 281474976715664 HandleResolveDatabase, UserSID: db_admin@builtin CheckAdministrator: 1 CheckDatabaseAdministrator: 1 IsClusterAdministrator: 0 IsDatabaseAdministrator: 1 DatabaseOwner: db_admin@builtin 2025-06-25T14:37:38.806919Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [59:7519896138871436788:2608] txid# 281474976715664 TEvNavigateKeySet requested from SchemeCache 2025-06-25T14:37:38.807187Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [59:7519896138871436788:2608] txid# 281474976715664 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-25T14:37:38.807306Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [59:7519896138871436788:2608] HANDLE EvNavigateKeySetResult, txid# 281474976715664 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-25T14:37:38.807359Z node 59 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [59:7519896138871436788:2608] txid# 281474976715664 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715664 TabletId# 72057594046644480} 2025-06-25T14:37:38.807490Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [59:7519896138871436788:2608] txid# 281474976715664 HANDLE EvClientConnected 2025-06-25T14:37:38.810198Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [59:7519896138871436788:2608] txid# 281474976715664 Status StatusSuccess HANDLE {TEvModifySchemeTransactionResult Status# StatusSuccess txid# 281474976715664} 2025-06-25T14:37:38.810256Z node 59 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [59:7519896138871436788:2608] txid# 281474976715664 SEND to# [59:7519896138871436787:2314] Source {TEvProposeTransactionStatus txid# 281474976715664 Status# 48} |82.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_schemereq/unittest >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-DropUser-72 [GOOD] Test command err: Starting YDB, grpc: 27125, msgbus: 8324 2025-06-25T14:33:54.652725Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519895179245238740:2239];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:33:54.652798Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00099a/r3tmp/tmpnPC4HL/pdisk_1.dat 2025-06-25T14:33:55.538076Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:33:55.559357Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:33:55.559460Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:33:55.615312Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:33:55.616381Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TServer::EnableGrpc on GrpcPort 27125, node 1 2025-06-25T14:33:55.848998Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:33:55.849036Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:33:55.849045Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:33:55.849185Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8324 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-25T14:33:56.345038Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7519895179245238743:2116] Handle TEvNavigate describe path dc-1 2025-06-25T14:33:56.386955Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7519895187835173881:2452] HANDLE EvNavigateScheme dc-1 2025-06-25T14:33:56.387284Z node 1 :TX_PROXY DEBUG: describe.cpp:356: Actor# [1:7519895187835173881:2452] HANDLE EvNavigateKeySetResult TDescribeReq marker# P5 ErrorCount# 0 2025-06-25T14:33:56.450322Z node 1 :TX_PROXY DEBUG: describe.cpp:435: Actor# [1:7519895187835173881:2452] SEND to# 72057594046644480 shardToRequest NKikimrSchemeOp.TDescribePath Path: "dc-1" Options { ReturnBoundaries: true ShowPrivateTable: true ReturnRangeKey: true } 2025-06-25T14:33:56.469915Z node 1 :TX_PROXY DEBUG: describe.cpp:448: Actor# [1:7519895187835173881:2452] Handle TEvDescribeSchemeResult Forward to# [1:7519895187835173880:2451] Cookie: 0 TEvDescribeSchemeResult: NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 2 Record# Status: StatusSuccess Path: "dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/dc-1" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046644480 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:33:56.528569Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7519895179245238743:2116] Handle TEvProposeTransaction 2025-06-25T14:33:56.528600Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:7519895179245238743:2116] TxId# 281474976715657 ProcessProposeTransaction 2025-06-25T14:33:56.528700Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:7519895179245238743:2116] Cookie# 0 userReqId# "" txid# 281474976715657 SEND to# [1:7519895187835173888:2458] 2025-06-25T14:33:56.826098Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:7519895187835173888:2458] txid# 281474976715657 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "dc-1" StoragePools { Name: "" Kind: "tenant-db" } StoragePools { Name: "/dc-1:test" Kind: "test" } } } } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" PeerName: "" 2025-06-25T14:33:56.826199Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:7519895187835173888:2458] txid# 281474976715657 Bootstrap, UserSID: root@builtin CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-25T14:33:56.826217Z node 1 :TX_PROXY DEBUG: schemereq.cpp:585: Actor# [1:7519895187835173888:2458] txid# 281474976715657 Bootstrap, UserSID: root@builtin IsClusterAdministrator: 1 2025-06-25T14:33:56.826304Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:7519895187835173888:2458] txid# 281474976715657 TEvNavigateKeySet requested from SchemeCache 2025-06-25T14:33:56.826638Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:7519895187835173888:2458] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-25T14:33:56.826763Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:7519895187835173888:2458] HANDLE EvNavigateKeySetResult, txid# 281474976715657 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# false 2025-06-25T14:33:56.826831Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:7519895187835173888:2458] txid# 281474976715657 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715657 TabletId# 72057594046644480} 2025-06-25T14:33:56.826989Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:7519895187835173888:2458] txid# 281474976715657 HANDLE EvClientConnected 2025-06-25T14:33:56.827915Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:33:56.838305Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [1:7519895187835173888:2458] txid# 281474976715657 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715657} 2025-06-25T14:33:56.838363Z node 1 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [1:7519895187835173888:2458] txid# 281474976715657 SEND to# [1:7519895187835173887:2457] Source {TEvProposeTransactionStatus txid# 281474976715657 Status# 53} waiting... 2025-06-25T14:33:56.901538Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7519895179245238743:2116] Handle TEvProposeTransaction 2025-06-25T14:33:56.901560Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:7519895179245238743:2116] TxId# 281474976715658 ProcessProposeTransaction 2025-06-25T14:33:56.901589Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:7519895179245238743:2116] Cookie# 0 userReqId# "" txid# 281474976715658 SEND to# [1:7519895187835173933:2495] 2025-06-25T14:33:56.904009Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:7519895187835173933:2495] txid# 281474976715658 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/" OperationType: ESchemeOpModifyACL ModifyACL { Name: "dc-1" DiffACL: "\n\032\010\000\022\026\010\001\020\377\377\003\032\014root@builtin \003" } } } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" PeerName: "" 2025-06-25T14:33:56.904052Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:7519895187835173933:2495] txid# 281474976715658 Bootstrap, UserSID: root@builtin CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-25T14:33:56.904066Z node 1 :TX_PROXY DEBUG: schemereq.cpp:585: Actor# [1:7519895187835173933:2495] txid# 281474976715658 Bootstrap, UserSID: root@builtin IsClusterAdministrator: 1 2025-06-25T14:33:56.904127Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:7519895187835173933:2495] txid# 281474976715658 TEvNavigateKeySet requested from SchemeCache 2025-06-25T14:33:56.904422Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:7519895187835173933:2495] txid# 281474976715658 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-25T14:33:56.904496Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:7519895187835173933:2495] HANDLE EvNavigateKeySetResult, txid# 281474976715658 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-25T14:33:56.904531Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:7519895187835173933:2495] txid# 281474976715658 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715658 TabletId# 72057594046644480} 2025-06-25T14:33:56.904723Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:7519895187835173933:2495] txid# 281474976715658 HANDLE EvClientConnected 2025-06-25T14:33:56.905274Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715 ... Request# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-25T14:37:36.886703Z node 59 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [59:7519896130543997557:2591] txid# 281474976715661 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715661 TabletId# 72057594046644480} 2025-06-25T14:37:36.886886Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [59:7519896130543997557:2591] txid# 281474976715661 HANDLE EvClientConnected 2025-06-25T14:37:36.897179Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [59:7519896130543997557:2591] txid# 281474976715661 Status StatusSuccess HANDLE {TEvModifySchemeTransactionResult Status# StatusSuccess txid# 281474976715661} 2025-06-25T14:37:36.897252Z node 59 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [59:7519896130543997557:2591] txid# 281474976715661 SEND to# [59:7519896130543997556:2295] Source {TEvProposeTransactionStatus txid# 281474976715661 Status# 48} 2025-06-25T14:37:37.230055Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [59:7519896104774192820:2114] Handle TEvProposeTransaction 2025-06-25T14:37:37.230102Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [59:7519896104774192820:2114] TxId# 281474976715662 ProcessProposeTransaction 2025-06-25T14:37:37.230160Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [59:7519896104774192820:2114] Cookie# 0 userReqId# "" txid# 281474976715662 SEND to# [59:7519896134838964879:2608] 2025-06-25T14:37:37.232840Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [59:7519896134838964879:2608] txid# 281474976715662 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "" OperationType: ESchemeOpModifyACL ModifyACL { Name: "dc-1" DiffACL: "\n\022\010\001\022\016\032\014ordinaryuser\n\032\010\000\022\026\010\001\020\200\200\002\032\014ordinaryuser \000" } } } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" DatabaseName: "/dc-1" RequestType: "" PeerName: "ipv6:[::1]:35830" 2025-06-25T14:37:37.232924Z node 59 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [59:7519896134838964879:2608] txid# 281474976715662 Bootstrap, UserSID: root@builtin CheckAdministrator: 1 CheckDatabaseAdministrator: 1 2025-06-25T14:37:37.232950Z node 59 :TX_PROXY DEBUG: schemereq.cpp:585: Actor# [59:7519896134838964879:2608] txid# 281474976715662 Bootstrap, UserSID: root@builtin IsClusterAdministrator: 1 2025-06-25T14:37:37.233011Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [59:7519896134838964879:2608] txid# 281474976715662 TEvNavigateKeySet requested from SchemeCache 2025-06-25T14:37:37.233388Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [59:7519896134838964879:2608] txid# 281474976715662 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-25T14:37:37.233496Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [59:7519896134838964879:2608] HANDLE EvNavigateKeySetResult, txid# 281474976715662 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-25T14:37:37.233549Z node 59 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [59:7519896134838964879:2608] txid# 281474976715662 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715662 TabletId# 72057594046644480} 2025-06-25T14:37:37.233743Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [59:7519896134838964879:2608] txid# 281474976715662 HANDLE EvClientConnected 2025-06-25T14:37:37.234316Z node 59 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:37:37.243799Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [59:7519896134838964879:2608] txid# 281474976715662 Status StatusSuccess HANDLE {TEvModifySchemeTransactionResult Status# StatusSuccess txid# 281474976715662} 2025-06-25T14:37:37.243863Z node 59 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [59:7519896134838964879:2608] txid# 281474976715662 SEND to# [59:7519896134838964878:2311] Source {TEvProposeTransactionStatus txid# 281474976715662 Status# 48} 2025-06-25T14:37:37.299493Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [59:7519896104774192820:2114] Handle TEvProposeTransaction 2025-06-25T14:37:37.299528Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [59:7519896104774192820:2114] TxId# 281474976715663 ProcessProposeTransaction 2025-06-25T14:37:37.299580Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [59:7519896104774192820:2114] Cookie# 0 userReqId# "" txid# 281474976715663 SEND to# [59:7519896134838964911:2626] 2025-06-25T14:37:37.302733Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [59:7519896134838964911:2626] txid# 281474976715663 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/dc-1" OperationType: ESchemeOpAlterLogin AlterLogin { CreateUser { User: "targetuser" Password: "passwd" CanLogin: true IsHashedPassword: false } } } } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" DatabaseName: "/dc-1" RequestType: "" PeerName: "ipv6:[::1]:35840" 2025-06-25T14:37:37.302812Z node 59 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [59:7519896134838964911:2626] txid# 281474976715663 Bootstrap, UserSID: root@builtin CheckAdministrator: 1 CheckDatabaseAdministrator: 1 2025-06-25T14:37:37.302840Z node 59 :TX_PROXY DEBUG: schemereq.cpp:585: Actor# [59:7519896134838964911:2626] txid# 281474976715663 Bootstrap, UserSID: root@builtin IsClusterAdministrator: 1 2025-06-25T14:37:37.302893Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [59:7519896134838964911:2626] txid# 281474976715663 TEvNavigateKeySet requested from SchemeCache 2025-06-25T14:37:37.303322Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [59:7519896134838964911:2626] txid# 281474976715663 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-25T14:37:37.303436Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [59:7519896134838964911:2626] HANDLE EvNavigateKeySetResult, txid# 281474976715663 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-25T14:37:37.303497Z node 59 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [59:7519896134838964911:2626] txid# 281474976715663 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715663 TabletId# 72057594046644480} 2025-06-25T14:37:37.303653Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [59:7519896134838964911:2626] txid# 281474976715663 HANDLE EvClientConnected 2025-06-25T14:37:37.318461Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [59:7519896134838964911:2626] txid# 281474976715663 Status StatusSuccess HANDLE {TEvModifySchemeTransactionResult Status# StatusSuccess txid# 281474976715663} 2025-06-25T14:37:37.318531Z node 59 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [59:7519896134838964911:2626] txid# 281474976715663 SEND to# [59:7519896134838964910:2313] Source {TEvProposeTransactionStatus txid# 281474976715663 Status# 48} 2025-06-25T14:37:37.381273Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [59:7519896104774192820:2114] Handle TEvProposeTransaction 2025-06-25T14:37:37.381303Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [59:7519896104774192820:2114] TxId# 281474976715664 ProcessProposeTransaction 2025-06-25T14:37:37.381355Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [59:7519896104774192820:2114] Cookie# 0 userReqId# "" txid# 281474976715664 SEND to# [59:7519896134838964939:2638] 2025-06-25T14:37:37.384220Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [59:7519896134838964939:2638] txid# 281474976715664 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/dc-1" OperationType: ESchemeOpAlterLogin AlterLogin { RemoveUser { User: "targetuser" MissingOk: false } } } } UserToken: "\n\014ordinaryuser\022\030\022\026\n\024all-users@well-known\032\334\003eyJhbGciOiJQUzI1NiIsImtpZCI6IjEifQ.eyJhdWQiOlsiXC9kYy0xIl0sImV4cCI6MTc1MDkwNTQ1NywiaWF0IjoxNzUwODYyMjU3LCJzdWIiOiJvcmRpbmFyeXVzZXIifQ.lnbuWgz21w40hzj2k10PNpokaRKFlxQf64MyWztYRhH0C94W2ynfbpbROBFmfiT1NjoNMcaSLv2CxNZfH1rDKzWVtqC2sLkrq-EwSq020mJvO7c6HBEdVgFHvKh3pZG6cNU1SP4YgHPQpcCKDLj_yo8iN7BybughhVNamJBOdwYcKHK_OasocfXK9obyrEmjMq7ldRFvaZ103NpZrcZYzSDCFxZWvQ1-hdPN46TPIkNY573E7xIUkoqTZY6ce2tcahdhCAtRm0bc-beLnDHc7WK0YhiyD3LuELTTMGsAiWEZhvEzLbiGDZMWcVuVEuXlul1__9RsNUqCTSgKnRFGNA\"\005Login*\210\001eyJhbGciOiJQUzI1NiIsImtpZCI6IjEifQ.eyJhdWQiOlsiXC9kYy0xIl0sImV4cCI6MTc1MDkwNTQ1NywiaWF0IjoxNzUwODYyMjU3LCJzdWIiOiJvcmRpbmFyeXVzZXIifQ.**" DatabaseName: "/dc-1" RequestType: "" PeerName: "ipv6:[::1]:35866" 2025-06-25T14:37:37.384295Z node 59 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [59:7519896134838964939:2638] txid# 281474976715664 Bootstrap, UserSID: ordinaryuser CheckAdministrator: 1 CheckDatabaseAdministrator: 1 2025-06-25T14:37:37.384342Z node 59 :TX_PROXY DEBUG: schemereq.cpp:585: Actor# [59:7519896134838964939:2638] txid# 281474976715664 Bootstrap, UserSID: ordinaryuser IsClusterAdministrator: 0 2025-06-25T14:37:37.384491Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1434: Actor# [59:7519896134838964939:2638] txid# 281474976715664 HandleResolveDatabase, ResultSet size: 1 ResultSet error count: 0 2025-06-25T14:37:37.384537Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1469: Actor# [59:7519896134838964939:2638] txid# 281474976715664 HandleResolveDatabase, UserSID: ordinaryuser CheckAdministrator: 1 CheckDatabaseAdministrator: 1 IsClusterAdministrator: 0 IsDatabaseAdministrator: 0 DatabaseOwner: root@builtin 2025-06-25T14:37:37.384581Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [59:7519896134838964939:2638] txid# 281474976715664 TEvNavigateKeySet requested from SchemeCache 2025-06-25T14:37:37.384831Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [59:7519896134838964939:2638] txid# 281474976715664 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-25T14:37:37.384854Z node 59 :TX_PROXY ERROR: schemereq.cpp:1103: Actor# [59:7519896134838964939:2638] txid# 281474976715664, Access denied for ordinaryuser, attempt to manage user 2025-06-25T14:37:37.384940Z node 59 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [59:7519896134838964939:2638] txid# 281474976715664, issues: { message: "Access denied for ordinaryuser" issue_code: 200000 severity: 1 } 2025-06-25T14:37:37.384966Z node 59 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [59:7519896134838964939:2638] txid# 281474976715664 SEND to# [59:7519896134838964938:2325] Source {TEvProposeTransactionStatus Status# 5} 2025-06-25T14:37:37.385225Z node 59 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=59&id=ZjUyMDNhMzctYmJhN2Q3YWMtODE5MzA5NTItZTU1Mjk4ZjU=, ActorId: [59:7519896134838964929:2325], ActorState: ExecuteState, TraceId: 01jykrbq6hf641a28ysn6t965q, Create QueryResponse for error on request, msg: 2025-06-25T14:37:37.386565Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:353: actor# [59:7519896104774192820:2114] Handle TEvExecuteKqpTransaction 2025-06-25T14:37:37.386598Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:342: actor# [59:7519896104774192820:2114] TxId# 281474976715665 ProcessProposeKqpTransaction ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_schemereq/unittest >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-72 [FAIL] Test command err: Starting YDB, grpc: 10771, msgbus: 14224 2025-06-25T14:33:53.221781Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519895174786330506:2236];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:33:53.221830Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00099d/r3tmp/tmp8YJ01T/pdisk_1.dat 2025-06-25T14:33:54.073495Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:33:54.192861Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:33:54.194072Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:33:54.204800Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:33:54.215992Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 10771, node 1 2025-06-25T14:33:54.383399Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:33:54.383421Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:33:54.383428Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:33:54.385257Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14224 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-25T14:33:54.605010Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7519895174786330513:2097] Handle TEvNavigate describe path dc-1 2025-06-25T14:33:54.627473Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7519895179081298359:2435] HANDLE EvNavigateScheme dc-1 2025-06-25T14:33:54.627831Z node 1 :TX_PROXY DEBUG: describe.cpp:356: Actor# [1:7519895179081298359:2435] HANDLE EvNavigateKeySetResult TDescribeReq marker# P5 ErrorCount# 0 2025-06-25T14:33:54.668937Z node 1 :TX_PROXY DEBUG: describe.cpp:435: Actor# [1:7519895179081298359:2435] SEND to# 72057594046644480 shardToRequest NKikimrSchemeOp.TDescribePath Path: "dc-1" Options { ReturnBoundaries: true ShowPrivateTable: true ReturnRangeKey: true } 2025-06-25T14:33:54.698414Z node 1 :TX_PROXY DEBUG: describe.cpp:448: Actor# [1:7519895179081298359:2435] Handle TEvDescribeSchemeResult Forward to# [1:7519895179081298358:2434] Cookie: 0 TEvDescribeSchemeResult: NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 2 Record# Status: StatusSuccess Path: "dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/dc-1" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046644480 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:33:54.743386Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7519895174786330513:2097] Handle TEvProposeTransaction 2025-06-25T14:33:54.743418Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:7519895174786330513:2097] TxId# 281474976715657 ProcessProposeTransaction 2025-06-25T14:33:54.743546Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:7519895174786330513:2097] Cookie# 0 userReqId# "" txid# 281474976715657 SEND to# [1:7519895179081298366:2441] 2025-06-25T14:33:54.883385Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:7519895179081298366:2441] txid# 281474976715657 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "dc-1" StoragePools { Name: "" Kind: "tenant-db" } StoragePools { Name: "/dc-1:test" Kind: "test" } } } } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" PeerName: "" 2025-06-25T14:33:54.883478Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:7519895179081298366:2441] txid# 281474976715657 Bootstrap, UserSID: root@builtin CheckAdministrator: 1 CheckDatabaseAdministrator: 1 2025-06-25T14:33:54.883498Z node 1 :TX_PROXY DEBUG: schemereq.cpp:585: Actor# [1:7519895179081298366:2441] txid# 281474976715657 Bootstrap, UserSID: root@builtin IsClusterAdministrator: 1 2025-06-25T14:33:54.892448Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:7519895179081298366:2441] txid# 281474976715657 TEvNavigateKeySet requested from SchemeCache 2025-06-25T14:33:54.892803Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:7519895179081298366:2441] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-25T14:33:54.892954Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:7519895179081298366:2441] HANDLE EvNavigateKeySetResult, txid# 281474976715657 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# false 2025-06-25T14:33:54.893002Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:7519895179081298366:2441] txid# 281474976715657 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715657 TabletId# 72057594046644480} 2025-06-25T14:33:54.893143Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:7519895179081298366:2441] txid# 281474976715657 HANDLE EvClientConnected 2025-06-25T14:33:54.893984Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:33:54.898263Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [1:7519895179081298366:2441] txid# 281474976715657 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715657} 2025-06-25T14:33:54.898309Z node 1 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [1:7519895179081298366:2441] txid# 281474976715657 SEND to# [1:7519895179081298365:2440] Source {TEvProposeTransactionStatus txid# 281474976715657 Status# 53} waiting... 2025-06-25T14:33:54.930188Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7519895174786330513:2097] Handle TEvProposeTransaction 2025-06-25T14:33:54.930213Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:7519895174786330513:2097] TxId# 281474976715658 ProcessProposeTransaction 2025-06-25T14:33:54.930243Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:7519895174786330513:2097] Cookie# 0 userReqId# "" txid# 281474976715658 SEND to# [1:7519895179081298409:2477] 2025-06-25T14:33:54.932630Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:7519895179081298409:2477] txid# 281474976715658 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/" OperationType: ESchemeOpModifyACL ModifyACL { Name: "dc-1" DiffACL: "\n\032\010\000\022\026\010\001\020\377\377\003\032\014root@builtin \003" } } } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" PeerName: "" 2025-06-25T14:33:54.932680Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:7519895179081298409:2477] txid# 281474976715658 Bootstrap, UserSID: root@builtin CheckAdministrator: 1 CheckDatabaseAdministrator: 1 2025-06-25T14:33:54.932693Z node 1 :TX_PROXY DEBUG: schemereq.cpp:585: Actor# [1:7519895179081298409:2477] txid# 281474976715658 Bootstrap, UserSID: root@builtin IsClusterAdministrator: 1 2025-06-25T14:33:54.932836Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:7519895179081298409:2477] txid# 281474976715658 TEvNavigateKeySet requested from SchemeCache 2025-06-25T14:33:54.933128Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:7519895179081298409:2477] txid# 281474976715658 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-25T14:33:54.933216Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:7519895179081298409:2477] HANDLE EvNavigateKeySetResult, txid# 281474976715658 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-25T14:33:54.933281Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:7519895179081298409:2477] txid# 281474976715658 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715658 TabletId# 72057594046644480} 2025-06-25T14:33:54.933404Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:7519895179081298409:2477] txid# 281474976715658 HANDLE EvClientConnected 2025-06-25T14:33:54.933869Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 2814749767 ... axTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046644480 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:37:12.307928Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [59:7519896023557055989:2111] Handle TEvProposeTransaction 2025-06-25T14:37:12.307961Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [59:7519896023557055989:2111] TxId# 281474976710657 ProcessProposeTransaction 2025-06-25T14:37:12.308078Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [59:7519896023557055989:2111] Cookie# 0 userReqId# "" txid# 281474976710657 SEND to# [59:7519896027852023976:2451] 2025-06-25T14:37:12.310766Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [59:7519896027852023976:2451] txid# 281474976710657 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "dc-1" StoragePools { Name: "" Kind: "tenant-db" } StoragePools { Name: "/dc-1:test" Kind: "test" } } } } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" PeerName: "" 2025-06-25T14:37:12.311011Z node 59 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [59:7519896027852023976:2451] txid# 281474976710657 Bootstrap, UserSID: root@builtin CheckAdministrator: 1 CheckDatabaseAdministrator: 1 2025-06-25T14:37:12.311038Z node 59 :TX_PROXY DEBUG: schemereq.cpp:585: Actor# [59:7519896027852023976:2451] txid# 281474976710657 Bootstrap, UserSID: root@builtin IsClusterAdministrator: 1 2025-06-25T14:37:12.311101Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [59:7519896027852023976:2451] txid# 281474976710657 TEvNavigateKeySet requested from SchemeCache 2025-06-25T14:37:12.311444Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [59:7519896027852023976:2451] txid# 281474976710657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-25T14:37:12.311553Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [59:7519896027852023976:2451] HANDLE EvNavigateKeySetResult, txid# 281474976710657 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# false 2025-06-25T14:37:12.311606Z node 59 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [59:7519896027852023976:2451] txid# 281474976710657 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976710657 TabletId# 72057594046644480} 2025-06-25T14:37:12.311755Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [59:7519896027852023976:2451] txid# 281474976710657 HANDLE EvClientConnected 2025-06-25T14:37:12.312444Z node 59 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:37:12.329963Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [59:7519896027852023976:2451] txid# 281474976710657 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976710657} 2025-06-25T14:37:12.330031Z node 59 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [59:7519896027852023976:2451] txid# 281474976710657 SEND to# [59:7519896027852023975:2450] Source {TEvProposeTransactionStatus txid# 281474976710657 Status# 53} waiting... 2025-06-25T14:37:12.357677Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [59:7519896023557055989:2111] Handle TEvProposeTransaction 2025-06-25T14:37:12.357708Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [59:7519896023557055989:2111] TxId# 281474976710658 ProcessProposeTransaction 2025-06-25T14:37:12.357744Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [59:7519896023557055989:2111] Cookie# 0 userReqId# "" txid# 281474976710658 SEND to# [59:7519896027852024016:2487] 2025-06-25T14:37:12.360199Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [59:7519896027852024016:2487] txid# 281474976710658 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/" OperationType: ESchemeOpModifyACL ModifyACL { Name: "dc-1" DiffACL: "\n\032\010\000\022\026\010\001\020\377\377\003\032\014root@builtin \003" } } } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" PeerName: "" 2025-06-25T14:37:12.360274Z node 59 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [59:7519896027852024016:2487] txid# 281474976710658 Bootstrap, UserSID: root@builtin CheckAdministrator: 1 CheckDatabaseAdministrator: 1 2025-06-25T14:37:12.360295Z node 59 :TX_PROXY DEBUG: schemereq.cpp:585: Actor# [59:7519896027852024016:2487] txid# 281474976710658 Bootstrap, UserSID: root@builtin IsClusterAdministrator: 1 2025-06-25T14:37:12.360385Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [59:7519896027852024016:2487] txid# 281474976710658 TEvNavigateKeySet requested from SchemeCache 2025-06-25T14:37:12.360715Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [59:7519896027852024016:2487] txid# 281474976710658 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-25T14:37:12.360813Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [59:7519896027852024016:2487] HANDLE EvNavigateKeySetResult, txid# 281474976710658 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-25T14:37:12.360864Z node 59 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [59:7519896027852024016:2487] txid# 281474976710658 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976710658 TabletId# 72057594046644480} 2025-06-25T14:37:12.361011Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [59:7519896027852024016:2487] txid# 281474976710658 HANDLE EvClientConnected 2025-06-25T14:37:12.361487Z node 59 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:37:12.364970Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [59:7519896027852024016:2487] txid# 281474976710658 Status StatusSuccess HANDLE {TEvModifySchemeTransactionResult Status# StatusSuccess txid# 281474976710658} 2025-06-25T14:37:12.365036Z node 59 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [59:7519896027852024016:2487] txid# 281474976710658 SEND to# [59:7519896027852024015:2486] Source {TEvProposeTransactionStatus txid# 281474976710658 Status# 48} 2025-06-25T14:37:12.619926Z node 59 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:37:16.620788Z node 59 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[59:7519896023557056146:2228];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:37:16.620860Z node 59 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:37:17.598257Z node 59 :KQP_PROXY ERROR: kqp_proxy_service.cpp:1482: TraceId: "01jykraytjcf1xcc337qa6wng6", Request deadline has expired for 0.197908s seconds assertion failed at ydb/core/tx/tx_proxy/schemereq_ut.cpp:256, void NKikimr::NTxProxyUT::CreateLocalUser(const TTestEnv &, const TString &, const TString &, const TString &): (sessionResult.IsSuccess())
: Error: GRpc error: (4): Deadline Exceeded
: Error: Grpc error response on endpoint localhost:30013 TBackTrace::Capture()+28 (0x1971395C) NUnitTest::NPrivate::RaiseError(char const*, TBasicString> const&, bool)+592 (0x19BD1770) NKikimr::NTxProxyUT::CreateLocalUser(NKikimr::NTxProxyUT::TTestEnv const&, TBasicString> const&, TBasicString> const&, TBasicString> const&)+2057 (0x192E9F69) NKikimr::NTxProxyUT::NTestSuiteSchemeReqAccess::AlterLoginProtect_RootDB(NUnitTest::TTestContext&, NKikimr::NTxProxyUT::NTestSuiteSchemeReqAccess::TAlterLoginTestCase)+1859 (0x19300373) std::__y1::__bind_return, NKikimr::NTxProxyUT::NTestSuiteSchemeReqAccess::TAlterLoginTestCase>, std::__y1::tuple, __is_valid_bind_return, NKikimr::NTxProxyUT::NTestSuiteSchemeReqAccess::TAlterLoginTestCase>, std::__y1::tuple>::value>::type std::__y1::__bind const&, NKikimr::NTxProxyUT::NTestSuiteSchemeReqAccess::TAlterLoginTestCase const&>::operator()[abi:fe200000](NUnitTest::TTestContext&)+588 (0x1933F20C) std::__y1::__function::__func, void ()>::operator()()+280 (0x1932F258) TColoredProcessor::Run(std::__y1::function, TBasicString> const&, char const*, bool)+534 (0x19C08976) NUnitTest::TTestBase::Run(std::__y1::function, TBasicString> const&, char const*, bool)+505 (0x19BD82F9) NKikimr::NTxProxyUT::NTestSuiteSchemeReqAccess::TCurrentTest::Execute()+1204 (0x1932E104) NUnitTest::TTestFactory::Execute()+2438 (0x19BD9BC6) NUnitTest::RunMain(int, char**)+5213 (0x19C02EED) ??+0 (0x7F03E3118D90) __libc_start_main+128 (0x7F03E3118E40) _start+41 (0x16BB6029) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_schemereq/unittest >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-DropUser-54 [FAIL] Test command err: Starting YDB, grpc: 12562, msgbus: 1935 2025-06-25T14:33:50.941071Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519895159492364116:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:33:50.941110Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0009b2/r3tmp/tmprD1J5j/pdisk_1.dat 2025-06-25T14:33:51.700011Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:33:51.735449Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:33:51.735538Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:33:51.759866Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 12562, node 1 2025-06-25T14:33:52.061247Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:33:52.128924Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:33:52.128944Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:33:52.128951Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:33:52.129075Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1935 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-25T14:33:52.557728Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7519895163787331635:2126] Handle TEvNavigate describe path dc-1 2025-06-25T14:33:52.596901Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7519895168082299442:2449] HANDLE EvNavigateScheme dc-1 2025-06-25T14:33:52.597306Z node 1 :TX_PROXY DEBUG: describe.cpp:356: Actor# [1:7519895168082299442:2449] HANDLE EvNavigateKeySetResult TDescribeReq marker# P5 ErrorCount# 0 2025-06-25T14:33:52.683931Z node 1 :TX_PROXY DEBUG: describe.cpp:435: Actor# [1:7519895168082299442:2449] SEND to# 72057594046644480 shardToRequest NKikimrSchemeOp.TDescribePath Path: "dc-1" Options { ReturnBoundaries: true ShowPrivateTable: true ReturnRangeKey: true } 2025-06-25T14:33:52.700615Z node 1 :TX_PROXY DEBUG: describe.cpp:448: Actor# [1:7519895168082299442:2449] Handle TEvDescribeSchemeResult Forward to# [1:7519895168082299441:2448] Cookie: 0 TEvDescribeSchemeResult: NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 2 Record# Status: StatusSuccess Path: "dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/dc-1" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046644480 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:33:52.723385Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7519895163787331635:2126] Handle TEvProposeTransaction 2025-06-25T14:33:52.723418Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:7519895163787331635:2126] TxId# 281474976710657 ProcessProposeTransaction 2025-06-25T14:33:52.723536Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:7519895163787331635:2126] Cookie# 0 userReqId# "" txid# 281474976710657 SEND to# [1:7519895168082299449:2455] 2025-06-25T14:33:52.848834Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:7519895168082299449:2455] txid# 281474976710657 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "dc-1" StoragePools { Name: "" Kind: "tenant-db" } StoragePools { Name: "/dc-1:test" Kind: "test" } } } } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" PeerName: "" 2025-06-25T14:33:52.848974Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:7519895168082299449:2455] txid# 281474976710657 Bootstrap, UserSID: root@builtin CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-25T14:33:52.849017Z node 1 :TX_PROXY DEBUG: schemereq.cpp:585: Actor# [1:7519895168082299449:2455] txid# 281474976710657 Bootstrap, UserSID: root@builtin IsClusterAdministrator: 1 2025-06-25T14:33:52.849142Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:7519895168082299449:2455] txid# 281474976710657 TEvNavigateKeySet requested from SchemeCache 2025-06-25T14:33:52.853455Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:7519895168082299449:2455] txid# 281474976710657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-25T14:33:52.853691Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:7519895168082299449:2455] HANDLE EvNavigateKeySetResult, txid# 281474976710657 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# false 2025-06-25T14:33:52.853775Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:7519895168082299449:2455] txid# 281474976710657 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976710657 TabletId# 72057594046644480} 2025-06-25T14:33:52.853934Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:7519895168082299449:2455] txid# 281474976710657 HANDLE EvClientConnected 2025-06-25T14:33:52.875070Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:33:52.881993Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [1:7519895168082299449:2455] txid# 281474976710657 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976710657} 2025-06-25T14:33:52.882048Z node 1 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [1:7519895168082299449:2455] txid# 281474976710657 SEND to# [1:7519895168082299448:2454] Source {TEvProposeTransactionStatus txid# 281474976710657 Status# 53} 2025-06-25T14:33:52.962565Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7519895163787331635:2126] Handle TEvProposeTransaction 2025-06-25T14:33:52.962593Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:7519895163787331635:2126] TxId# 281474976710658 ProcessProposeTransaction 2025-06-25T14:33:52.962627Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:7519895163787331635:2126] Cookie# 0 userReqId# "" txid# 281474976710658 SEND to# [1:7519895168082299490:2492] 2025-06-25T14:33:52.965173Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:7519895168082299490:2492] txid# 281474976710658 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/" OperationType: ESchemeOpModifyACL ModifyACL { Name: "dc-1" DiffACL: "\n\032\010\000\022\026\010\001\020\377\377\003\032\014root@builtin \003" } } } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" PeerName: "" 2025-06-25T14:33:52.965236Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:7519895168082299490:2492] txid# 281474976710658 Bootstrap, UserSID: root@builtin CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-25T14:33:52.965253Z node 1 :TX_PROXY DEBUG: schemereq.cpp:585: Actor# [1:7519895168082299490:2492] txid# 281474976710658 Bootstrap, UserSID: root@builtin IsClusterAdministrator: 1 2025-06-25T14:33:52.965313Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:7519895168082299490:2492] txid# 281474976710658 TEvNavigateKeySet requested from SchemeCache 2025-06-25T14:33:52.965616Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:7519895168082299490:2492] txid# 281474976710658 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-25T14:33:52.965708Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:7519895168082299490:2492] HANDLE EvNavigateKeySetResult, txid# 281474976710658 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-25T14:33:52.965769Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:7519895168082299490:2492] txid# 281474976710658 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976710658 TabletId# 72057594046644480} 2025-06-25T14:33:52.965895Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:7519895168082299490:2492] txid# 281474976710658 HANDLE EvClientConnected 2025-06-25T14:33:52.966404Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710 ... CastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# false 2025-06-25T14:37:18.863214Z node 59 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [59:7519896054139521707:2439] txid# 281474976715657 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715657 TabletId# 72057594046644480} 2025-06-25T14:37:18.863341Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [59:7519896054139521707:2439] txid# 281474976715657 HANDLE EvClientConnected 2025-06-25T14:37:18.864030Z node 59 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:37:18.869928Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [59:7519896054139521707:2439] txid# 281474976715657 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715657} 2025-06-25T14:37:18.869988Z node 59 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [59:7519896054139521707:2439] txid# 281474976715657 SEND to# [59:7519896054139521704:2437] Source {TEvProposeTransactionStatus txid# 281474976715657 Status# 53} waiting... 2025-06-25T14:37:18.908464Z node 59 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:37:18.909029Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [59:7519896049844553744:2115] Handle TEvProposeTransaction 2025-06-25T14:37:18.909050Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [59:7519896049844553744:2115] TxId# 281474976715658 ProcessProposeTransaction 2025-06-25T14:37:18.909086Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [59:7519896049844553744:2115] Cookie# 0 userReqId# "" txid# 281474976715658 SEND to# [59:7519896054139521754:2476] 2025-06-25T14:37:18.911936Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [59:7519896054139521754:2476] txid# 281474976715658 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/" OperationType: ESchemeOpModifyACL ModifyACL { Name: "dc-1" DiffACL: "\n\032\010\000\022\026\010\001\020\377\377\003\032\014root@builtin \003" } } } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" PeerName: "" 2025-06-25T14:37:18.912001Z node 59 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [59:7519896054139521754:2476] txid# 281474976715658 Bootstrap, UserSID: root@builtin CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-25T14:37:18.912023Z node 59 :TX_PROXY DEBUG: schemereq.cpp:585: Actor# [59:7519896054139521754:2476] txid# 281474976715658 Bootstrap, UserSID: root@builtin IsClusterAdministrator: 1 2025-06-25T14:37:18.912077Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [59:7519896054139521754:2476] txid# 281474976715658 TEvNavigateKeySet requested from SchemeCache 2025-06-25T14:37:18.912409Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [59:7519896054139521754:2476] txid# 281474976715658 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-25T14:37:18.912511Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [59:7519896054139521754:2476] HANDLE EvNavigateKeySetResult, txid# 281474976715658 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-25T14:37:18.912560Z node 59 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [59:7519896054139521754:2476] txid# 281474976715658 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715658 TabletId# 72057594046644480} 2025-06-25T14:37:18.912721Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [59:7519896054139521754:2476] txid# 281474976715658 HANDLE EvClientConnected 2025-06-25T14:37:18.913166Z node 59 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:37:18.916109Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [59:7519896054139521754:2476] txid# 281474976715658 Status StatusSuccess HANDLE {TEvModifySchemeTransactionResult Status# StatusSuccess txid# 281474976715658} 2025-06-25T14:37:18.916162Z node 59 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [59:7519896054139521754:2476] txid# 281474976715658 SEND to# [59:7519896054139521753:2475] Source {TEvProposeTransactionStatus txid# 281474976715658 Status# 48} 2025-06-25T14:37:18.969265Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [59:7519896049844553744:2115] Handle TEvProposeTransaction 2025-06-25T14:37:18.969292Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [59:7519896049844553744:2115] TxId# 281474976715659 ProcessProposeTransaction 2025-06-25T14:37:18.969331Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [59:7519896049844553744:2115] Cookie# 0 userReqId# "" txid# 281474976715659 SEND to# [59:7519896054139521772:2486] 2025-06-25T14:37:18.972036Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [59:7519896054139521772:2486] txid# 281474976715659 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "" OperationType: ESchemeOpModifyACL ModifyACL { Name: "dc-1" DiffACL: "\n\033\010\001\022\027\032\025cluster_admin@builtin\n#\010\000\022\037\010\001\020\200\200\002\032\025cluster_admin@builtin \000" } } } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" DatabaseName: "/dc-1" RequestType: "" PeerName: "ipv6:[::1]:43930" 2025-06-25T14:37:18.972109Z node 59 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [59:7519896054139521772:2486] txid# 281474976715659 Bootstrap, UserSID: root@builtin CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-25T14:37:18.972131Z node 59 :TX_PROXY DEBUG: schemereq.cpp:585: Actor# [59:7519896054139521772:2486] txid# 281474976715659 Bootstrap, UserSID: root@builtin IsClusterAdministrator: 1 2025-06-25T14:37:18.972187Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [59:7519896054139521772:2486] txid# 281474976715659 TEvNavigateKeySet requested from SchemeCache 2025-06-25T14:37:18.972959Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [59:7519896054139521772:2486] txid# 281474976715659 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-25T14:37:18.973073Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [59:7519896054139521772:2486] HANDLE EvNavigateKeySetResult, txid# 281474976715659 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-25T14:37:18.973127Z node 59 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [59:7519896054139521772:2486] txid# 281474976715659 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715659 TabletId# 72057594046644480} 2025-06-25T14:37:18.973262Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [59:7519896054139521772:2486] txid# 281474976715659 HANDLE EvClientConnected 2025-06-25T14:37:18.973802Z node 59 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:37:18.976892Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [59:7519896054139521772:2486] txid# 281474976715659 Status StatusSuccess HANDLE {TEvModifySchemeTransactionResult Status# StatusSuccess txid# 281474976715659} 2025-06-25T14:37:18.976942Z node 59 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [59:7519896054139521772:2486] txid# 281474976715659 SEND to# [59:7519896054139521771:2279] Source {TEvProposeTransactionStatus txid# 281474976715659 Status# 48} 2025-06-25T14:37:22.858347Z node 59 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[59:7519896049844553715:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:37:22.858451Z node 59 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:37:24.831213Z node 59 :KQP_PROXY ERROR: kqp_proxy_service.cpp:1482: TraceId: "01jykrb58h7jz8bhbp5w9htx9f", Request deadline has expired for 0.838858s seconds assertion failed at ydb/core/tx/tx_proxy/schemereq_ut.cpp:256, void NKikimr::NTxProxyUT::CreateLocalUser(const TTestEnv &, const TString &, const TString &, const TString &): (sessionResult.IsSuccess())
: Error: GRpc error: (4): Deadline Exceeded
: Error: Grpc error response on endpoint localhost:8624 TBackTrace::Capture()+28 (0x1971395C) NUnitTest::NPrivate::RaiseError(char const*, TBasicString> const&, bool)+592 (0x19BD1770) NKikimr::NTxProxyUT::CreateLocalUser(NKikimr::NTxProxyUT::TTestEnv const&, TBasicString> const&, TBasicString> const&, TBasicString> const&)+2057 (0x192E9F69) NKikimr::NTxProxyUT::NTestSuiteSchemeReqAccess::AlterLoginProtect_RootDB(NUnitTest::TTestContext&, NKikimr::NTxProxyUT::NTestSuiteSchemeReqAccess::TAlterLoginTestCase)+2799 (0x1930071F) std::__y1::__bind_return, NKikimr::NTxProxyUT::NTestSuiteSchemeReqAccess::TAlterLoginTestCase>, std::__y1::tuple, __is_valid_bind_return, NKikimr::NTxProxyUT::NTestSuiteSchemeReqAccess::TAlterLoginTestCase>, std::__y1::tuple>::value>::type std::__y1::__bind const&, NKikimr::NTxProxyUT::NTestSuiteSchemeReqAccess::TAlterLoginTestCase const&>::operator()[abi:fe200000](NUnitTest::TTestContext&)+588 (0x1933F20C) std::__y1::__function::__func, void ()>::operator()()+280 (0x1932F258) TColoredProcessor::Run(std::__y1::function, TBasicString> const&, char const*, bool)+534 (0x19C08976) NUnitTest::TTestBase::Run(std::__y1::function, TBasicString> const&, char const*, bool)+505 (0x19BD82F9) NKikimr::NTxProxyUT::NTestSuiteSchemeReqAccess::TCurrentTest::Execute()+1204 (0x1932E104) NUnitTest::TTestFactory::Execute()+2438 (0x19BD9BC6) NUnitTest::RunMain(int, char**)+5213 (0x19C02EED) ??+0 (0x7FA610F7ED90) __libc_start_main+128 (0x7FA610F7EE40) _start+41 (0x16BB6029) |82.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_bsvolume/unittest |82.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_bsvolume/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_schemereq/unittest >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-24 [GOOD] Test command err: Starting YDB, grpc: 63230, msgbus: 65336 2025-06-25T14:33:51.839099Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519895166144066018:2239];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:33:51.839219Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0009a0/r3tmp/tmpcDkJ8M/pdisk_1.dat 2025-06-25T14:33:52.704857Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:33:52.807000Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:33:52.855492Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:33:52.855590Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:33:52.857214Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:33:52.889468Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 63230, node 1 2025-06-25T14:33:53.180901Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:33:53.180919Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:33:53.180928Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:33:53.181027Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:65336 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-25T14:33:53.688511Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7519895166144066038:2118] Handle TEvNavigate describe path dc-1 2025-06-25T14:33:53.766702Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7519895174734001147:2446] HANDLE EvNavigateScheme dc-1 2025-06-25T14:33:53.767036Z node 1 :TX_PROXY DEBUG: describe.cpp:356: Actor# [1:7519895174734001147:2446] HANDLE EvNavigateKeySetResult TDescribeReq marker# P5 ErrorCount# 0 2025-06-25T14:33:53.801009Z node 1 :TX_PROXY DEBUG: describe.cpp:435: Actor# [1:7519895174734001147:2446] SEND to# 72057594046644480 shardToRequest NKikimrSchemeOp.TDescribePath Path: "dc-1" Options { ReturnBoundaries: true ShowPrivateTable: true ReturnRangeKey: true } 2025-06-25T14:33:53.815778Z node 1 :TX_PROXY DEBUG: describe.cpp:448: Actor# [1:7519895174734001147:2446] Handle TEvDescribeSchemeResult Forward to# [1:7519895174734001146:2445] Cookie: 0 TEvDescribeSchemeResult: NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 2 Record# Status: StatusSuccess Path: "dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/dc-1" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046644480 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:33:53.859200Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7519895166144066038:2118] Handle TEvProposeTransaction 2025-06-25T14:33:53.859227Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:7519895166144066038:2118] TxId# 281474976710657 ProcessProposeTransaction 2025-06-25T14:33:53.859344Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:7519895166144066038:2118] Cookie# 0 userReqId# "" txid# 281474976710657 SEND to# [1:7519895174734001154:2452] 2025-06-25T14:33:54.008442Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:7519895174734001154:2452] txid# 281474976710657 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "dc-1" StoragePools { Name: "" Kind: "tenant-db" } StoragePools { Name: "/dc-1:test" Kind: "test" } } } } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" PeerName: "" 2025-06-25T14:33:54.008520Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:7519895174734001154:2452] txid# 281474976710657 Bootstrap, UserSID: root@builtin CheckAdministrator: 1 CheckDatabaseAdministrator: 0 2025-06-25T14:33:54.008538Z node 1 :TX_PROXY DEBUG: schemereq.cpp:585: Actor# [1:7519895174734001154:2452] txid# 281474976710657 Bootstrap, UserSID: root@builtin IsClusterAdministrator: 1 2025-06-25T14:33:54.008602Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:7519895174734001154:2452] txid# 281474976710657 TEvNavigateKeySet requested from SchemeCache 2025-06-25T14:33:54.008883Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:7519895174734001154:2452] txid# 281474976710657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-25T14:33:54.008994Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:7519895174734001154:2452] HANDLE EvNavigateKeySetResult, txid# 281474976710657 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# false 2025-06-25T14:33:54.009050Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:7519895174734001154:2452] txid# 281474976710657 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976710657 TabletId# 72057594046644480} 2025-06-25T14:33:54.009168Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:7519895174734001154:2452] txid# 281474976710657 HANDLE EvClientConnected 2025-06-25T14:33:54.009826Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:33:54.014192Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [1:7519895174734001154:2452] txid# 281474976710657 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976710657} 2025-06-25T14:33:54.014249Z node 1 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [1:7519895174734001154:2452] txid# 281474976710657 SEND to# [1:7519895174734001153:2451] Source {TEvProposeTransactionStatus txid# 281474976710657 Status# 53} waiting... 2025-06-25T14:33:54.056589Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7519895166144066038:2118] Handle TEvProposeTransaction 2025-06-25T14:33:54.056613Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:7519895166144066038:2118] TxId# 281474976710658 ProcessProposeTransaction 2025-06-25T14:33:54.056647Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:7519895166144066038:2118] Cookie# 0 userReqId# "" txid# 281474976710658 SEND to# [1:7519895179028968502:2493] 2025-06-25T14:33:54.058845Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:7519895179028968502:2493] txid# 281474976710658 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/" OperationType: ESchemeOpModifyACL ModifyACL { Name: "dc-1" DiffACL: "\n\032\010\000\022\026\010\001\020\377\377\003\032\014root@builtin \003" } } } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" PeerName: "" 2025-06-25T14:33:54.058888Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:7519895179028968502:2493] txid# 281474976710658 Bootstrap, UserSID: root@builtin CheckAdministrator: 1 CheckDatabaseAdministrator: 0 2025-06-25T14:33:54.058901Z node 1 :TX_PROXY DEBUG: schemereq.cpp:585: Actor# [1:7519895179028968502:2493] txid# 281474976710658 Bootstrap, UserSID: root@builtin IsClusterAdministrator: 1 2025-06-25T14:33:54.058944Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:7519895179028968502:2493] txid# 281474976710658 TEvNavigateKeySet requested from SchemeCache 2025-06-25T14:33:54.059202Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:7519895179028968502:2493] txid# 281474976710658 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-25T14:33:54.059278Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:7519895179028968502:2493] HANDLE EvNavigateKeySetResult, txid# 281474976710658 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-25T14:33:54.059312Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:7519895179028968502:2493] txid# 281474976710658 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976710658 TabletId# 72057594046644480} 2025-06-25T14:33:54.059423Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:7519895179028968502:2493] txid# 281474976710658 H ... T14:37:27.725801Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [59:7519896094117185333:2582] txid# 281474976710660 Status StatusAlreadyExists HANDLE {TEvModifySchemeTransactionResult Status# StatusAlreadyExists txid# 281474976710660 Reason# Check failed: path: '/dc-1/.metadata/workload_manager/pools/default', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)} 2025-06-25T14:37:27.725967Z node 59 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [59:7519896094117185333:2582] txid# 281474976710660, issues: { message: "Check failed: path: \'/dc-1/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:37:27.726003Z node 59 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [59:7519896094117185333:2582] txid# 281474976710660 SEND to# [59:7519896094117185249:2303] Source {TEvProposeTransactionStatus txid# 281474976710660 Status# 48} 2025-06-25T14:37:27.754642Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [59:7519896072642347983:2115] Handle TEvProposeTransaction 2025-06-25T14:37:27.754675Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [59:7519896072642347983:2115] TxId# 281474976710661 ProcessProposeTransaction 2025-06-25T14:37:27.754723Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [59:7519896072642347983:2115] Cookie# 0 userReqId# "" txid# 281474976710661 SEND to# [59:7519896094117185357:2594] 2025-06-25T14:37:27.757455Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [59:7519896094117185357:2594] txid# 281474976710661 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/dc-1" OperationType: ESchemeOpAlterLogin AlterLogin { CreateUser { User: "ordinaryuser" Password: "passwd" CanLogin: true IsHashedPassword: false } } } } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" DatabaseName: "/dc-1" RequestType: "" PeerName: "ipv6:[::1]:56230" 2025-06-25T14:37:27.757548Z node 59 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [59:7519896094117185357:2594] txid# 281474976710661 Bootstrap, UserSID: root@builtin CheckAdministrator: 1 CheckDatabaseAdministrator: 1 2025-06-25T14:37:27.757575Z node 59 :TX_PROXY DEBUG: schemereq.cpp:585: Actor# [59:7519896094117185357:2594] txid# 281474976710661 Bootstrap, UserSID: root@builtin IsClusterAdministrator: 1 2025-06-25T14:37:27.757633Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [59:7519896094117185357:2594] txid# 281474976710661 TEvNavigateKeySet requested from SchemeCache 2025-06-25T14:37:27.757991Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [59:7519896094117185357:2594] txid# 281474976710661 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-25T14:37:27.758142Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [59:7519896094117185357:2594] HANDLE EvNavigateKeySetResult, txid# 281474976710661 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-25T14:37:27.758206Z node 59 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [59:7519896094117185357:2594] txid# 281474976710661 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976710661 TabletId# 72057594046644480} 2025-06-25T14:37:27.758384Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [59:7519896094117185357:2594] txid# 281474976710661 HANDLE EvClientConnected 2025-06-25T14:37:27.769146Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [59:7519896094117185357:2594] txid# 281474976710661 Status StatusSuccess HANDLE {TEvModifySchemeTransactionResult Status# StatusSuccess txid# 281474976710661} 2025-06-25T14:37:27.769218Z node 59 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [59:7519896094117185357:2594] txid# 281474976710661 SEND to# [59:7519896094117185356:2294] Source {TEvProposeTransactionStatus txid# 281474976710661 Status# 48} 2025-06-25T14:37:27.875993Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [59:7519896072642347983:2115] Handle TEvProposeTransaction 2025-06-25T14:37:27.876030Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [59:7519896072642347983:2115] TxId# 281474976710662 ProcessProposeTransaction 2025-06-25T14:37:27.876106Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [59:7519896072642347983:2115] Cookie# 0 userReqId# "" txid# 281474976710662 SEND to# [59:7519896094117185379:2610] 2025-06-25T14:37:27.879080Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [59:7519896094117185379:2610] txid# 281474976710662 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "" OperationType: ESchemeOpModifyACL ModifyACL { Name: "dc-1" DiffACL: "\n\022\010\001\022\016\032\014ordinaryuser\n\032\010\000\022\026\010\001\020\200\200\002\032\014ordinaryuser \000" } } } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" DatabaseName: "/dc-1" RequestType: "" PeerName: "ipv6:[::1]:56236" 2025-06-25T14:37:27.879177Z node 59 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [59:7519896094117185379:2610] txid# 281474976710662 Bootstrap, UserSID: root@builtin CheckAdministrator: 1 CheckDatabaseAdministrator: 1 2025-06-25T14:37:27.879202Z node 59 :TX_PROXY DEBUG: schemereq.cpp:585: Actor# [59:7519896094117185379:2610] txid# 281474976710662 Bootstrap, UserSID: root@builtin IsClusterAdministrator: 1 2025-06-25T14:37:27.879269Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [59:7519896094117185379:2610] txid# 281474976710662 TEvNavigateKeySet requested from SchemeCache 2025-06-25T14:37:27.879694Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [59:7519896094117185379:2610] txid# 281474976710662 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-25T14:37:27.879825Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [59:7519896094117185379:2610] HANDLE EvNavigateKeySetResult, txid# 281474976710662 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-25T14:37:27.879906Z node 59 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [59:7519896094117185379:2610] txid# 281474976710662 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976710662 TabletId# 72057594046644480} 2025-06-25T14:37:27.880134Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [59:7519896094117185379:2610] txid# 281474976710662 HANDLE EvClientConnected 2025-06-25T14:37:27.880766Z node 59 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:37:27.884611Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [59:7519896094117185379:2610] txid# 281474976710662 Status StatusSuccess HANDLE {TEvModifySchemeTransactionResult Status# StatusSuccess txid# 281474976710662} 2025-06-25T14:37:27.884673Z node 59 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [59:7519896094117185379:2610] txid# 281474976710662 SEND to# [59:7519896094117185378:2309] Source {TEvProposeTransactionStatus txid# 281474976710662 Status# 48} 2025-06-25T14:37:27.980837Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [59:7519896072642347983:2115] Handle TEvProposeTransaction 2025-06-25T14:37:27.980871Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [59:7519896072642347983:2115] TxId# 281474976710663 ProcessProposeTransaction 2025-06-25T14:37:27.980916Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [59:7519896072642347983:2115] Cookie# 0 userReqId# "" txid# 281474976710663 SEND to# [59:7519896094117185427:2637] 2025-06-25T14:37:27.983550Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [59:7519896094117185427:2637] txid# 281474976710663 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/dc-1" OperationType: ESchemeOpAlterLogin AlterLogin { CreateUser { User: "targetuser" Password: "passwd" CanLogin: true IsHashedPassword: false } } } } UserToken: "\n\014ordinaryuser\022\030\022\026\n\024all-users@well-known\032\334\003eyJhbGciOiJQUzI1NiIsImtpZCI6IjEifQ.eyJhdWQiOlsiXC9kYy0xIl0sImV4cCI6MTc1MDkwNTQ0NywiaWF0IjoxNzUwODYyMjQ3LCJzdWIiOiJvcmRpbmFyeXVzZXIifQ.lIcJs0OsI5-arnEheoRUgBltDlknazBWteADHf5Ig0-GCRJ0d_sjaUNv71u2aKDJRDvzOMhhepG5QKSrCvQ5LXFz5Tg_HTlWU6A3JhcE9S3Z8AQ1-oPRCeq7iFPvwSTcwdFm2cdy_AHmOdAX2YLF_JFkrFz5jAZa6ib531DGYImU-2nKA81bXBFMkazSHj3mfX9wmnVK9-Ft7MPGdf57CFgre0EgEpMzhXA9punJxSwDGwrPgkZil0wtS7TAuBN6pTT0ZYaut0FOvWmFUFAHmQ2AnKmsJAt5jHJJaQg2Xp4WW-hjpnvA-FyjFtv9f61lg8eP92JvpXjcikQoZ-9xVw\"\005Login*\210\001eyJhbGciOiJQUzI1NiIsImtpZCI6IjEifQ.eyJhdWQiOlsiXC9kYy0xIl0sImV4cCI6MTc1MDkwNTQ0NywiaWF0IjoxNzUwODYyMjQ3LCJzdWIiOiJvcmRpbmFyeXVzZXIifQ.**" DatabaseName: "/dc-1" RequestType: "" PeerName: "ipv6:[::1]:56268" 2025-06-25T14:37:27.983610Z node 59 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [59:7519896094117185427:2637] txid# 281474976710663 Bootstrap, UserSID: ordinaryuser CheckAdministrator: 1 CheckDatabaseAdministrator: 1 2025-06-25T14:37:27.983631Z node 59 :TX_PROXY DEBUG: schemereq.cpp:585: Actor# [59:7519896094117185427:2637] txid# 281474976710663 Bootstrap, UserSID: ordinaryuser IsClusterAdministrator: 0 2025-06-25T14:37:27.983774Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1434: Actor# [59:7519896094117185427:2637] txid# 281474976710663 HandleResolveDatabase, ResultSet size: 1 ResultSet error count: 0 2025-06-25T14:37:27.983813Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1469: Actor# [59:7519896094117185427:2637] txid# 281474976710663 HandleResolveDatabase, UserSID: ordinaryuser CheckAdministrator: 1 CheckDatabaseAdministrator: 1 IsClusterAdministrator: 0 IsDatabaseAdministrator: 0 DatabaseOwner: root@builtin 2025-06-25T14:37:27.983855Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [59:7519896094117185427:2637] txid# 281474976710663 TEvNavigateKeySet requested from SchemeCache 2025-06-25T14:37:27.984104Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [59:7519896094117185427:2637] txid# 281474976710663 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-25T14:37:27.984132Z node 59 :TX_PROXY ERROR: schemereq.cpp:1103: Actor# [59:7519896094117185427:2637] txid# 281474976710663, Access denied for ordinaryuser, attempt to manage user 2025-06-25T14:37:27.984216Z node 59 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [59:7519896094117185427:2637] txid# 281474976710663, issues: { message: "Access denied for ordinaryuser" issue_code: 200000 severity: 1 } 2025-06-25T14:37:27.984248Z node 59 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [59:7519896094117185427:2637] txid# 281474976710663 SEND to# [59:7519896094117185426:2315] Source {TEvProposeTransactionStatus Status# 5} 2025-06-25T14:37:27.986161Z node 59 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=59&id=YzczYmYxYzAtNjRkYzQ4N2MtMTU1OWNiMjMtMmU4Zjk5MTM=, ActorId: [59:7519896094117185406:2315], ActorState: ExecuteState, TraceId: 01jykrbe0mevpvq5a8mmyqh3nc, Create QueryResponse for error on request, msg: 2025-06-25T14:37:27.986622Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:353: actor# [59:7519896072642347983:2115] Handle TEvExecuteKqpTransaction 2025-06-25T14:37:27.986642Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:342: actor# [59:7519896072642347983:2115] TxId# 281474976710664 ProcessProposeKqpTransaction |82.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_rtmr/unittest |82.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_rtmr/unittest >> TBSV::CleanupDroppedVolumesOnRestart |82.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_rtmr/unittest |82.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_rtmr/unittest |82.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_rtmr/unittest |82.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_rtmr/unittest |82.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_rtmr/unittest >> TRtmrTest::CreateWithoutTimeCastBuckets >> KqpWorkloadServiceSubscriptions::TestResourcePoolSubscriptionAfterDrop >> KqpPg::CheckPgAutoParams+useSink [GOOD] >> KqpPg::CheckPgAutoParams-useSink >> KqpWorkloadService::TestQueueSizeSimple >> ResourcePoolsDdl::TestCreateResourcePool >> ResourcePoolClassifiersDdl::TestResourcePoolClassifiersPermissions >> KqpPg::InsertNoTargetColumns_NotOneSize+useSink [GOOD] >> KqpPg::InsertNoTargetColumns_NotOneSize-useSink >> KqpWorkloadServiceTables::TestTablesIsNotCreatingForUnlimitedPool |82.3%| [TA] $(B)/ydb/core/kqp/ut/effects/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpWorkloadServiceActors::TestPoolFetcher >> DataShardReadTableSnapshots::ReadTableSnapshot >> DataShardReadTableSnapshots::ReadTableDropColumnLatePropose |82.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_encrypted_storage/unittest |82.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_encrypted_storage/unittest >> KqpWorkloadServiceDistributed::TestDistributedQueue >> KqpPg::SelectIndex+useSink [GOOD] >> KqpPg::SelectIndex-useSink |82.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_encrypted_storage/unittest |82.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_encrypted_storage/unittest |82.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_encrypted_storage/unittest >> TBSV::CleanupDroppedVolumesOnRestart [GOOD] >> TMiniKQLEngineFlatTest::TestUpdateRowNotExistWithoutColumns >> TMiniKQLProtoTestYdb::TestExportVoidTypeYdb [GOOD] >> TMiniKQLProtoTestYdb::TestExportUuidTypeYdb >> TMiniKQLEngineFlatTest::TestSelectRowWithoutColumnsNotExists >> TMiniKQLEngineFlatTest::TestUpdateRowNotExistWithoutColumns [GOOD] >> TMiniKQLEngineFlatTest::TestUpdateRowNotExistSetPayload >> TMiniKQLProtoTestYdb::TestExportUuidTypeYdb [GOOD] >> TMiniKQLProtoTestYdb::TestExportTupleTypeYdb >> TMiniKQLEngineFlatTest::TestSelectRowWithoutColumnsNotExists [GOOD] >> TMiniKQLEngineFlatTest::TestSelectRowWithoutColumnsExists >> TMiniKQLEngineFlatTest::TestEmptyProgram [GOOD] >> TMiniKQLEngineFlatTest::TestEraseRow >> TMiniKQLEngineFlatTest::TestUpdateRowNotExistSetPayload [GOOD] >> TMiniKQLEngineFlatTest::TestUpdateRowNotExistSetPayloadNullValue [GOOD] >> TMiniKQLEngineFlatTest::TestUpdateRowNotExistErasePayload >> TMiniKQLProtoTestYdb::TestExportTupleTypeYdb [GOOD] >> TMiniKQLProtoTestYdb::TestExportStructTypeYdb [GOOD] >> TMiniKQLProtoTestYdb::TestExportVariantTupleTypeYdb >> TMiniKQLEngineFlatTest::TestSelectRowWithoutColumnsExists [GOOD] >> TMiniKQLEngineFlatTest::TestSelectRowPayload [GOOD] >> TMiniKQLEngineFlatTest::TestSelectRowPayloadNullKey >> TMiniKQLEngineFlatTest::TestEraseRow [GOOD] >> TMiniKQLEngineFlatTest::TestEraseRowNullKey >> TMiniKQLEngineFlatTest::TestUpdateRowNotExistErasePayload [GOOD] >> TMiniKQLEngineFlatTest::TestUpdateRowExistChangePayload >> TMiniKQLProtoTestYdb::TestExportVariantTupleTypeYdb [GOOD] >> TMiniKQLProtoTestYdb::TestExportVariantStructTypeYdb >> TMiniKQLEngineFlatTest::TestSelectRowPayloadNullKey [GOOD] >> TMiniKQLEngineFlatTest::TestSelectRangeToInclusive >> TMiniKQLEngineFlatTest::TestEraseRowNullKey [GOOD] >> TMiniKQLEngineFlatTest::TestEraseRowManyShards [GOOD] >> TMiniKQLEngineFlatTest::TestCASBoth2Success >> TMiniKQLEngineFlatTest::TestUpdateRowExistChangePayload [GOOD] >> TMiniKQLEngineFlatTest::TestUpdateRowExistErasePayload ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_bsvolume/unittest >> TBSV::CleanupDroppedVolumesOnRestart [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:38:49.511644Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:38:49.511787Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:38:49.511828Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:38:49.511885Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:38:49.511943Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:38:49.511973Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:38:49.512089Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:38:49.512162Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:38:49.512972Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:38:49.528996Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:38:49.743401Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:38:49.743475Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:38:49.784949Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:38:49.785525Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:38:49.785724Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:38:49.811453Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:38:49.811940Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:38:49.829695Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:38:49.838597Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:38:49.903005Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:38:49.912001Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:38:49.976801Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:38:49.976912Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:38:49.977099Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:38:49.977155Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:38:49.977237Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:38:49.977333Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:38:49.985927Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:38:50.164741Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:38:50.182839Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:38:50.190365Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:38:50.190473Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:38:50.198874Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:38:50.199052Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:38:50.211751Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:38:50.220103Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:38:50.220376Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:38:50.220487Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:38:50.220528Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:38:50.220575Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:38:50.222737Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:38:50.222805Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:38:50.222847Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:38:50.229874Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:38:50.229938Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:38:50.230010Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:38:50.230064Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:38:50.241936Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:38:50.243989Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:38:50.252807Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:38:50.254383Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:38:50.254564Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:38:50.254639Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:38:50.263600Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:38:50.263705Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:38:50.263894Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:38:50.263978Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:38:50.270126Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:38:50.270192Z node 1 :FLAT_TX_SCHEMESHARD ... sistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 Leader for TabletID 72057594046678944 is [1:408:2382] sender: [1:475:2058] recipient: [1:15:2062] 2025-06-25T14:38:50.912418Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/BSVolume" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:38:50.912643Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/BSVolume" took 271us result status StatusPathDoesNotExist 2025-06-25T14:38:50.912828Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/BSVolume\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/BSVolume" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-25T14:38:50.913828Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 Leader for TabletID 72057594046678944 is [1:408:2382] sender: [1:476:2058] recipient: [1:106:2139] Leader for TabletID 72057594046678944 is [1:408:2382] sender: [1:479:2058] recipient: [1:478:2435] Leader for TabletID 72057594046678944 is [1:408:2382] sender: [1:480:2058] recipient: [1:15:2062] Leader for TabletID 72057594046678944 is [1:481:2436] sender: [1:482:2058] recipient: [1:478:2435] 2025-06-25T14:38:50.953287Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:38:50.953389Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:38:50.953434Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:38:50.953470Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:38:50.953511Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:38:50.953562Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:38:50.953660Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:38:50.953734Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:38:50.954517Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:38:50.954798Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:38:50.967859Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:38:50.969037Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:38:50.969210Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:38:50.969346Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:38:50.969380Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:38:50.969645Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:38:50.970237Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1393: TTxInit for Paths, read records: 1, at schemeshard: 72057594046678944 2025-06-25T14:38:50.970342Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1467: TTxInit for UserAttributes, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:38:50.970392Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1493: TTxInit for UserAttributesAlterData, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:38:50.970791Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1795: TTxInit for Tables, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:38:50.970871Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__root_data_erasure_manager.cpp:452: [RootDataErasureManager] Restore: Generation# 0, Status# 0, WakeupInterval# 604800 s, NumberDataErasureTenantsInRunning# 0 2025-06-25T14:38:50.971031Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2043: TTxInit for Columns, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:38:50.971088Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2103: TTxInit for ColumnsAlters, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:38:50.971260Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2161: TTxInit for Shards, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:38:50.971354Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2247: TTxInit for TablePartitions, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:38:50.971439Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2313: TTxInit for TableShardPartitionConfigs, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:38:50.971565Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2463: TTxInit for ChannelsBinding, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:38:50.971788Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2842: TTxInit for TableIndexes, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:38:50.971898Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2921: TTxInit for TableIndexKeys, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:38:50.972206Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3422: TTxInit for KesusInfos, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:38:50.972256Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3458: TTxInit for KesusAlters, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:38:50.972469Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3684: TTxInit for TxShards, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:38:50.972559Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3829: TTxInit for ShardToDelete, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:38:50.972614Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3846: TTxInit for BackupSettings, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:38:50.972761Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4006: TTxInit for ShardBackupStatus, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:38:50.972837Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4022: TTxInit for CompletedBackup, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:38:50.972965Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4307: TTxInit for Publications, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:38:50.973120Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4646: IndexBuild , records: 0, at schemeshard: 72057594046678944 2025-06-25T14:38:50.973203Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4702: KMeansTreeSample records: 0, at schemeshard: 72057594046678944 2025-06-25T14:38:50.973355Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4791: SnapshotTables: snapshots: 0 tables: 0, at schemeshard: 72057594046678944 2025-06-25T14:38:50.973406Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4818: SnapshotSteps: snapshots: 0, at schemeshard: 72057594046678944 2025-06-25T14:38:50.973470Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4845: LongLocks: records: 0, at schemeshard: 72057594046678944 2025-06-25T14:38:50.977876Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:38:50.980032Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:38:50.980094Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:38:50.980272Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:38:50.980339Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:38:50.980397Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:38:50.980572Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594046678944 is [1:481:2436] sender: [1:540:2058] recipient: [1:15:2062] 2025-06-25T14:38:51.016170Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/BSVolume" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:38:51.016438Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/BSVolume" took 292us result status StatusPathDoesNotExist 2025-06-25T14:38:51.016619Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/BSVolume\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/BSVolume" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 >> TMiniKQLProtoTestYdb::TestExportVariantStructTypeYdb [GOOD] >> TMiniKQLProtoTestYdb::TestExportVoidYdb >> TMiniKQLEngineFlatTest::TestSelectRangeToInclusive [GOOD] >> TMiniKQLEngineFlatTest::TestSelectRowManyShards [GOOD] >> TMiniKQLEngineFlatTest::TestSelectRowNoShards >> TMiniKQLEngineFlatTest::TestCASBoth2Success [GOOD] >> TMiniKQLEngineFlatTest::TestEraseRowNoShards >> TMiniKQLEngineFlatTest::TestUpdateRowExistErasePayload [GOOD] >> TMiniKQLEngineFlatTest::TestUpdateRowManyShards [GOOD] >> TMiniKQLEngineFlatTest::TestUpdateRowNoShards >> TMiniKQLProtoTestYdb::TestExportVoidYdb [GOOD] >> TMiniKQLProtoTestYdb::TestExportStringYdb [GOOD] >> TMiniKQLProtoTestYdb::TestExportUuidYdb >> TMiniKQLEngineFlatTest::TestSelectRowNoShards [GOOD] >> TMiniKQLEngineFlatTest::TestSelectRangeWithPartitions >> TMiniKQLEngineFlatTest::TestEraseRowNoShards [GOOD] >> TMiniKQLEngineFlatTest::TestDiagnostics >> TMiniKQLEngineFlatTest::TestUpdateRowNoShards [GOOD] >> TMiniKQLEngineFlatTest::TestTopSortPushdownPk >> TMiniKQLProtoTestYdb::TestExportUuidYdb [GOOD] >> TMiniKQLProtoTestYdb::TestExportTupleYdb >> TMiniKQLEngineFlatTest::TestSelectRangeWithPartitions [GOOD] >> TMiniKQLEngineFlatTest::TestSelectRangeWithPartitionsTruncatedByItems [GOOD] >> TMiniKQLEngineFlatTest::TestSelectRangeWithPartitionsTruncatedByBytes >> KqpWorkloadServiceActors::TestPoolFetcher [GOOD] >> KqpWorkloadServiceActors::TestPoolFetcherAclValidation >> TMiniKQLEngineFlatTest::TestDiagnostics [GOOD] >> TMiniKQLEngineFlatTest::TestCombineByKeyPushdown >> TMiniKQLEngineFlatTest::TestTopSortPushdownPk [GOOD] >> TMiniKQLEngineFlatTest::TestTopSortPushdown |82.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/graph/ut/ydb-core-graph-ut >> TMiniKQLProtoTestYdb::TestExportTupleYdb [GOOD] |82.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/graph/ut/ydb-core-graph-ut >> KqpWorkloadServiceSubscriptions::TestResourcePoolSubscriptionAfterDrop [GOOD] >> TMiniKQLEngineFlatTest::TestSelectRangeWithPartitionsTruncatedByBytes [GOOD] >> TRtmrTest::CreateWithoutTimeCastBuckets [GOOD] >> TMiniKQLEngineFlatTest::TestCombineByKeyPushdown [GOOD] >> TMiniKQLEngineFlatTest::TestTopSortPushdown [GOOD] >> TMiniKQLProtoTestYdb::TestExportStructYdb [GOOD] >> KqpWorkloadServiceTables::TestTablesIsNotCreatingForUnlimitedPool [GOOD] >> KqpWorkloadServiceTables::TestCreateWorkloadSerivceTables >> TMiniKQLEngineFlatTest::TestSomePushDown >> TMiniKQLProtoTestYdb::TestExportVariantYdb >> TMiniKQLEngineFlatTest::TestCombineByKeyNoPushdown >> TMiniKQLProgramBuilderTest::TestEraseRowDynamicKey >> ResourcePoolClassifiersDdl::TestCreateResourcePoolClassifier >> TMiniKQLEngineFlatTest::TestSomePushDown [GOOD] >> TMiniKQLProtoTestYdb::TestExportVariantYdb [GOOD] >> TMiniKQLProgramBuilderTest::TestEraseRowDynamicKey [GOOD] >> TMiniKQLEngineFlatTest::TestCombineByKeyNoPushdown [GOOD] >> TMiniKQLEngineFlatTest::TestTakePushdown [GOOD] >> TMiniKQLEngineFlatTest::TestTopSortNonImmediatePushdown >> TMiniKQLProgramBuilderTest::TestAcquireLocks [GOOD] >> TMiniKQLProgramBuilderTest::TestDiagnostics >> TMiniKQLEngineFlatTest::TestLengthPushdown [GOOD] >> TMiniKQLProgramBuilderTest::TestDiagnostics [GOOD] >> TMiniKQLEngineFlatTest::TestInternalResult >> TMiniKQLEngineFlatTest::TestTopSortNonImmediatePushdown [GOOD] >> TMiniKQLEngineFlatTest::TestInternalResult [GOOD] >> TMiniKQLEngineFlatTest::TestIndependentSelects >> TMiniKQLEngineFlatTest::TestIndependentSelects [GOOD] >> TMiniKQLEngineFlatTest::TestCrossTableRs >> TMiniKQLEngineFlatTest::TestCrossTableRs [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_rtmr/unittest >> TRtmrTest::CreateWithoutTimeCastBuckets [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:38:50.893146Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:38:50.893239Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:38:50.893273Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:38:50.893331Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:38:50.893401Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:38:50.893443Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:38:50.893500Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:38:50.893572Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:38:50.894290Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:38:50.908155Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:38:51.320628Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:38:51.320707Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:38:51.451076Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:38:51.451754Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:38:51.477292Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:38:51.521505Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:38:51.521882Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:38:51.556676Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:38:51.575156Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:38:51.603987Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:38:51.604221Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:38:51.657737Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:38:51.657840Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:38:51.658025Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:38:51.658081Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:38:51.658128Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:38:51.667467Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:38:51.674673Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:38:51.816326Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:38:51.861647Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:38:51.883111Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:38:51.883208Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:38:51.883497Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:38:51.883616Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:38:51.889430Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:38:51.912034Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:38:51.921502Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:38:51.921596Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:38:51.921640Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:38:51.921686Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:38:51.924077Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:38:51.924160Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:38:51.924202Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:38:51.926135Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:38:51.926180Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:38:51.936519Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:38:51.936672Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:38:51.960173Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:38:51.963529Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:38:51.963770Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:38:51.964808Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:38:51.964964Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:38:51.965014Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:38:51.983781Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:38:51.983899Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:38:52.000847Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:38:52.001016Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:38:52.003767Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:38:52.003826Z node 1 :FLAT_TX_SCHEMESHARD ... d propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 100 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:38:52.134418Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 100:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:100 msg type: 269090816 2025-06-25T14:38:52.134523Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 100, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 100 at step: 5000002 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000001 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 100 at step: 5000002 2025-06-25T14:38:52.134747Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000002, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:38:52.134846Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 100 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000002 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:38:52.134902Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_rtmr.cpp:130: TCreateRTMR TPropose, operationId: 100:0 HandleReply TEvOperationPlan, at schemeshard: 72057594046678944 2025-06-25T14:38:52.134989Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 100:0 128 -> 240 2025-06-25T14:38:52.135142Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:38:52.135189Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-25T14:38:52.136649Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:38:52.136681Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 100, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:38:52.136830Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 100, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-25T14:38:52.136916Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:38:52.136945Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 100, path id: 1 2025-06-25T14:38:52.136981Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 100, path id: 2 FAKE_COORDINATOR: Erasing txId 100 2025-06-25T14:38:52.137276Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 100:0, at schemeshard: 72057594046678944 2025-06-25T14:38:52.137310Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 100:0 ProgressState 2025-06-25T14:38:52.137385Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#100:0 progress is 1/1 2025-06-25T14:38:52.137417Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 100 ready parts: 1/1 2025-06-25T14:38:52.137444Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#100:0 progress is 1/1 2025-06-25T14:38:52.137484Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 100 ready parts: 1/1 2025-06-25T14:38:52.137511Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 100, ready parts: 1/1, is published: false 2025-06-25T14:38:52.137544Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 100 ready parts: 1/1 2025-06-25T14:38:52.137576Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 100:0 2025-06-25T14:38:52.137599Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 100:0 2025-06-25T14:38:52.137654Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-25T14:38:52.137686Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 100, publications: 2, subscribers: 0 2025-06-25T14:38:52.137711Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 100, [OwnerId: 72057594046678944, LocalPathId: 1], 5 2025-06-25T14:38:52.137730Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 100, [OwnerId: 72057594046678944, LocalPathId: 2], 2 2025-06-25T14:38:52.138282Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 100 2025-06-25T14:38:52.138345Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 100 2025-06-25T14:38:52.138369Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 100 2025-06-25T14:38:52.138397Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 100, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 5 2025-06-25T14:38:52.138438Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-25T14:38:52.138957Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 2 PathOwnerId: 72057594046678944, cookie: 100 2025-06-25T14:38:52.139036Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 2 PathOwnerId: 72057594046678944, cookie: 100 2025-06-25T14:38:52.139079Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 100 2025-06-25T14:38:52.139108Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 100, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 2 2025-06-25T14:38:52.139138Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-25T14:38:52.139207Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 100, subscribers: 0 2025-06-25T14:38:52.142985Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 100 2025-06-25T14:38:52.143191Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 100 TestModificationResult got TxId: 100, wait until txId: 100 TestWaitNotification wait txId: 100 2025-06-25T14:38:52.160406Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 100: send EvNotifyTxCompletion 2025-06-25T14:38:52.160505Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 100 2025-06-25T14:38:52.160943Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 100, at schemeshard: 72057594046678944 2025-06-25T14:38:52.161033Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 100: got EvNotifyTxCompletionResult 2025-06-25T14:38:52.161060Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 100: satisfy waiter [1:316:2305] TestWaitNotification: OK eventTxId 100 2025-06-25T14:38:52.161567Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/rtmr1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:38:52.161735Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/rtmr1" took 226us result status StatusSuccess 2025-06-25T14:38:52.161974Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/rtmr1" PathDescription { Self { Name: "rtmr1" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeRtmrVolume CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 RTMRVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } RtmrVolumeDescription { Name: "rtmr1" PathId: 2 PartitionsCount: 0 } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 |82.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/library/yql/providers/generic/actors/ut/ydb-library-yql-providers-generic-actors-ut |82.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/engine/ut/unittest >> TMiniKQLProtoTestYdb::TestExportVariantYdb [GOOD] |82.4%| [LD] {RESULT} $(B)/ydb/core/graph/ut/ydb-core-graph-ut |82.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/library/yql/providers/generic/actors/ut/ydb-library-yql-providers-generic-actors-ut |82.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/cms/ut_sentinel/ydb-core-cms-ut_sentinel |82.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/engine/ut/unittest >> TMiniKQLEngineFlatTest::TestTopSortNonImmediatePushdown [GOOD] |82.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/cms/ut_sentinel/ydb-core-cms-ut_sentinel |82.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/engine/ut/unittest >> TMiniKQLProgramBuilderTest::TestDiagnostics [GOOD] |82.4%| [LD] {RESULT} $(B)/ydb/library/yql/providers/generic/actors/ut/ydb-library-yql-providers-generic-actors-ut |82.4%| [LD] {RESULT} $(B)/ydb/core/cms/ut_sentinel/ydb-core-cms-ut_sentinel ------- [TM] {asan, default-linux-x86_64, release} ydb/core/engine/ut/unittest >> TMiniKQLEngineFlatTest::TestCrossTableRs [GOOD] Test command err: SetProgram (370): ydb/core/engine/mkql_engine_flat.cpp:183: ExtractResultType(): requirement !label.StartsWith(TxInternalResultPrefix) failed. Label can't be used in SetResult as it's reserved for internal purposes: __cantuse PrepareShardPrograms (491): too many shard readsets (1 > 0), src tables: [200:301:0], dst tables: [200:302:0] Type { Kind: Struct } >> KqpPg::NoSelectFullScan [GOOD] >> TMiniKQLProgramBuilderTest::TestEraseRowStaticKey >> KqpPg::LongDomainName >> TMiniKQLProgramBuilderTest::TestEraseRowStaticKey [GOOD] >> TMiniKQLProgramBuilderTest::TestEraseRowPartialDynamicKey |82.4%| [TA] $(B)/ydb/core/tx/schemeshard/ut_rtmr/test-results/unittest/{meta.json ... results_accumulator.log} |82.4%| [TA] {RESULT} $(B)/ydb/core/kqp/ut/effects/test-results/unittest/{meta.json ... results_accumulator.log} |82.4%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_rtmr/test-results/unittest/{meta.json ... results_accumulator.log} >> TMiniKQLProgramBuilderTest::TestEraseRowPartialDynamicKey [GOOD] >> TMiniKQLProgramBuilderTest::TestSelectRow [GOOD] >> TMiniKQLProgramBuilderTest::TestUpdateRowDynamicKey >> TMiniKQLProgramBuilderTest::TestUpdateRowDynamicKey [GOOD] >> TMiniKQLProgramBuilderTest::TestSelectFromInclusiveRange >> TMiniKQLProgramBuilderTest::TestSelectFromInclusiveRange [GOOD] >> TMiniKQLProgramBuilderTest::TestSelectFromExclusiveRange >> DataShardReadTableSnapshots::ReadTableSnapshot [GOOD] >> DataShardReadTableSnapshots::ReadTableDropColumnLatePropose [GOOD] >> DataShardReadTableSnapshots::ReadTableMaxRows >> DataShardReadTableSnapshots::ReadTableSplitAfter >> TConsoleTests::TestAlterTenantTooManyStorageResourcesForRunning [GOOD] >> TConsoleTests::TestAlterTenantTooManyStorageResourcesForRunningExtSubdomain >> TMiniKQLProgramBuilderTest::TestSelectFromExclusiveRange [GOOD] >> TMiniKQLProgramBuilderTest::TestSelectToInclusiveRange >> TMiniKQLProgramBuilderTest::TestSelectToInclusiveRange [GOOD] >> TMiniKQLProgramBuilderTest::TestSelectToExclusiveRange [GOOD] >> TMiniKQLProgramBuilderTest::TestSelectBothFromInclusiveToInclusiveRange >> TMiniKQLProgramBuilderTest::TestSelectBothFromInclusiveToInclusiveRange [GOOD] >> TMiniKQLProgramBuilderTest::TestSelectBothFromExclusiveToExclusiveRange >> TMiniKQLProgramBuilderTest::TestSelectBothFromExclusiveToExclusiveRange [GOOD] >> TMiniKQLProgramBuilderTest::TestInvalidParameterName >> ResourcePoolsDdl::TestCreateResourcePool [GOOD] >> ResourcePoolsDdl::TestCreateResourcePoolOnServerless >> TMiniKQLProgramBuilderTest::TestInvalidParameterName [GOOD] >> TMiniKQLProgramBuilderTest::TestInvalidParameterType [GOOD] >> TMiniKQLEngineFlatTest::TestPureProgram >> KqpWorkloadService::TestQueueSizeSimple [GOOD] >> KqpWorkloadService::TestQueueSizeManyQueries >> TMiniKQLProgramBuilderTest::TestUpdateRowStaticKey [GOOD] >> TMiniKQLProtoTestYdb::TestExportDataTypeYdb >> TMiniKQLEngineFlatTest::TestPureProgram [GOOD] >> TMiniKQLEngineFlatTest::TestSelectRangeFullExists [GOOD] >> TMiniKQLEngineFlatTest::TestSelectRangeFromInclusive >> TMiniKQLProtoTestYdb::TestExportDataTypeYdb [GOOD] >> TMiniKQLProtoTestYdb::TestExportDecimalTypeYdb >> TMiniKQLEngineFlatTest::TestSelectRangeFromInclusive [GOOD] >> TMiniKQLEngineFlatTest::TestSelectRangeFromExclusive >> TMiniKQLProtoTestYdb::TestExportDecimalTypeYdb [GOOD] >> TMiniKQLProtoTestYdb::TestExportDictTypeYdb [GOOD] >> TMiniKQLProtoTestYdb::TestExportBoolYdb >> TMiniKQLEngineFlatTest::TestSelectRangeFromExclusive [GOOD] >> TMiniKQLEngineFlatTest::TestSelectRangeBothIncFromIncTo [GOOD] >> TMiniKQLEngineFlatTest::TestSelectRangeBothExcFromIncTo >> TMiniKQLProtoTestYdb::TestExportBoolYdb [GOOD] >> TMiniKQLProtoTestYdb::TestExportDoubleYdb >> TMiniKQLEngineFlatTest::TestSelectRangeBothExcFromIncTo [GOOD] >> TMiniKQLEngineFlatTest::TestSelectRangeBothIncFromExcTo >> KqpPg::InsertNoTargetColumns_NotOneSize-useSink [GOOD] >> KqpPg::InsertNoTargetColumns_Alter+useSink >> TMiniKQLProtoTestYdb::TestExportDoubleYdb [GOOD] >> TMiniKQLProtoTestYdb::TestExportDecimalYdb [GOOD] >> TMiniKQLProtoTestYdb::TestExportDecimalNegativeYdb >> TMiniKQLEngineFlatTest::TestSelectRangeBothIncFromExcTo [GOOD] >> TMiniKQLEngineFlatTest::TestSelectRangeBothExcFromExcTo |82.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/engine/ut/unittest >> TMiniKQLProgramBuilderTest::TestInvalidParameterType [GOOD] >> TMiniKQLProtoTestYdb::TestExportDecimalNegativeYdb [GOOD] >> TMiniKQLProtoTestYdb::TestExportDecimalHugeYdb >> TMiniKQLEngineFlatTest::TestSelectRangeBothExcFromExcTo [GOOD] >> TMiniKQLEngineFlatTest::TestMapsPushdown [GOOD] >> TMiniKQLEngineFlatTest::TestNoOrderedTakePushdown >> TMiniKQLProtoTestYdb::TestExportDecimalHugeYdb [GOOD] >> TMiniKQLProtoTestYdb::TestExportEmptyOptionalOptionalYdb [GOOD] >> TMiniKQLProtoTestYdb::TestExportDictYdb >> TMiniKQLEngineFlatTest::TestNoOrderedTakePushdown [GOOD] >> TMiniKQLEngineFlatTest::TestNoAggregatedPushdown >> TMiniKQLProtoTestYdb::TestExportDictYdb [GOOD] >> TMiniKQLProtoTestYdb::TestCellsFromTuple [GOOD] >> TMiniKQLEngineFlatTest::TestNoAggregatedPushdown [GOOD] >> TMiniKQLEngineFlatTest::TestNoPartialSortPushdown >> TMiniKQLEngineFlatTest::TestNoPartialSortPushdown [GOOD] >> TMiniKQLEngineFlatTest::TestMultiRSPerDestination [GOOD] >> KqpWorkloadServiceActors::TestPoolFetcherAclValidation [GOOD] >> KqpWorkloadServiceActors::TestPoolFetcherNotExistingPool >> KqpPg::SelectIndex-useSink [GOOD] >> KqpPg::TableDeleteAllData+useSink |82.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/engine/ut/unittest >> TMiniKQLProtoTestYdb::TestCellsFromTuple [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/engine/ut/unittest >> TMiniKQLEngineFlatTest::TestMultiRSPerDestination [GOOD] Test command err: PrepareShardPrograms (491): too many shard readsets (2 > 1), src tables: [200:301:0], dst tables: [200:301:0] Type { Kind: Struct } |82.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_export/ydb-core-tx-datashard-ut_export |82.5%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_export/ydb-core-tx-datashard-ut_export |82.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_export/ydb-core-tx-datashard-ut_export >> ResourcePoolClassifiersDdl::TestResourcePoolClassifiersPermissions [GOOD] >> ResourcePoolClassifiersDdl::TestResourcePoolClassifierRanks |82.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_compaction/ydb-core-tx-datashard-ut_compaction |82.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_compaction/ydb-core-tx-datashard-ut_compaction |82.5%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_compaction/ydb-core-tx-datashard-ut_compaction >> YdbIndexTable::OnlineBuildWithDataColumn [GOOD] >> HttpRequest::Analyze [GOOD] >> DataShardReadTableSnapshots::ReadTableSplitAfter [GOOD] >> DataShardReadTableSnapshots::ReadTableMaxRows [GOOD] >> KqpWorkloadServiceActors::TestPoolFetcherNotExistingPool [GOOD] >> KqpWorkloadServiceActors::TestDefaultPoolUsePermissions >> KqpPg::LongDomainName [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/idx_test/unittest >> YdbIndexTable::OnlineBuildWithDataColumn [GOOD] Test command err: Trying to start YDB, gRPC: 26487, MsgBus: 17060 2025-06-25T14:33:35.380879Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519895098129216410:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:33:35.385429Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00166c/r3tmp/tmpUh7NMp/pdisk_1.dat 2025-06-25T14:33:35.858175Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:33:35.858265Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:33:35.866230Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:33:35.879003Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:33:35.879161Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519895098129216381:2080] 1750862015358953 != 1750862015358956 TServer::EnableGrpc on GrpcPort 26487, node 1 2025-06-25T14:33:35.980779Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:33:35.980797Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:33:35.980805Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:33:35.980899Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17060 2025-06-25T14:33:36.382963Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:17060 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:33:36.727541Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:33:36.755702Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:33:36.923065Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:33:37.080174Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:33:37.151867Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:33:38.923936Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519895111014119917:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:33:38.924097Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:33:39.471705Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:33:39.542722Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:33:39.598573Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:33:39.658973Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:33:39.760855Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:33:39.844239Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:33:39.934522Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:33:40.094202Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519895119604055174:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:33:40.094284Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:33:40.094676Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519895119604055179:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:33:40.099483Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:33:40.135180Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519895119604055181:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:33:40.238659Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519895119604055232:3421] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:33:40.387072Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519895098129216410:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:33:40.387138Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:33:41.651540Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:33:42.485294Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710673. Ctx: { ... Ctx: { TraceId: 01jykre2cfcjz20rwytsjaw0q4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YTc1YTA3ZC03OTZiN2UwYi0xYmU4MWE1NS1kOWNhZmI3Zg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:38:54.360348Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976720637. Ctx: { TraceId: 01jykre2cfcjz20rwytsjaw0q4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YTc1YTA3ZC03OTZiN2UwYi0xYmU4MWE1NS1kOWNhZmI3Zg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:38:54.390700Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976720638. Ctx: { TraceId: 01jykre2dhffbs8wna57bjtf7t, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YmI4MGU2MzctNDU2NDZkMTAtZDY0ZmUyM2EtYTNlNThmODQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:38:54.395959Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976720639. Ctx: { TraceId: 01jykre2dhffbs8wna57bjtf7t, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YmI4MGU2MzctNDU2NDZkMTAtZDY0ZmUyM2EtYTNlNThmODQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:38:54.426441Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976720640. Ctx: { TraceId: 01jykre2ep13yh7cafbzc492kg, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YTc1YTA3ZC03OTZiN2UwYi0xYmU4MWE1NS1kOWNhZmI3Zg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:38:54.464562Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976720641. Ctx: { TraceId: 01jykre2ep13yh7cafbzc492kg, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YTc1YTA3ZC03OTZiN2UwYi0xYmU4MWE1NS1kOWNhZmI3Zg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:38:54.497123Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976720642. Ctx: { TraceId: 01jykre2gw38d2b68mjy9e0rnx, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YmI4MGU2MzctNDU2NDZkMTAtZDY0ZmUyM2EtYTNlNThmODQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:38:54.501982Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976720643. Ctx: { TraceId: 01jykre2gw38d2b68mjy9e0rnx, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YmI4MGU2MzctNDU2NDZkMTAtZDY0ZmUyM2EtYTNlNThmODQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:38:54.531546Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976720644. Ctx: { TraceId: 01jykre2hycyqcwvryp2626tj9, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YTc1YTA3ZC03OTZiN2UwYi0xYmU4MWE1NS1kOWNhZmI3Zg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:38:54.536821Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976720645. Ctx: { TraceId: 01jykre2hycyqcwvryp2626tj9, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YTc1YTA3ZC03OTZiN2UwYi0xYmU4MWE1NS1kOWNhZmI3Zg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:38:54.560712Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976720646. Ctx: { TraceId: 01jykre2jwdkakgf7yv194f71f, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YmI4MGU2MzctNDU2NDZkMTAtZDY0ZmUyM2EtYTNlNThmODQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:38:54.566921Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976720647. Ctx: { TraceId: 01jykre2jwdkakgf7yv194f71f, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YmI4MGU2MzctNDU2NDZkMTAtZDY0ZmUyM2EtYTNlNThmODQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:38:54.606965Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976720648. Ctx: { TraceId: 01jykre2mbcxa32s7v2dw04gnh, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YTc1YTA3ZC03OTZiN2UwYi0xYmU4MWE1NS1kOWNhZmI3Zg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:38:54.611377Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976720649. Ctx: { TraceId: 01jykre2mbcxa32s7v2dw04gnh, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YTc1YTA3ZC03OTZiN2UwYi0xYmU4MWE1NS1kOWNhZmI3Zg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:38:54.640119Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976720650. Ctx: { TraceId: 01jykre2nc3yxcrjjgpg9pag9g, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YmI4MGU2MzctNDU2NDZkMTAtZDY0ZmUyM2EtYTNlNThmODQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:38:54.644626Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976720651. Ctx: { TraceId: 01jykre2nc3yxcrjjgpg9pag9g, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YmI4MGU2MzctNDU2NDZkMTAtZDY0ZmUyM2EtYTNlNThmODQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:38:54.673953Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976720652. Ctx: { TraceId: 01jykre2pd04tnmpkbjj2f646c, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YTc1YTA3ZC03OTZiN2UwYi0xYmU4MWE1NS1kOWNhZmI3Zg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:38:54.679744Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976720653. Ctx: { TraceId: 01jykre2pd04tnmpkbjj2f646c, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YTc1YTA3ZC03OTZiN2UwYi0xYmU4MWE1NS1kOWNhZmI3Zg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:38:54.712647Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976720654. Ctx: { TraceId: 01jykre2qm9hfat9mvpyyn0awt, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YmI4MGU2MzctNDU2NDZkMTAtZDY0ZmUyM2EtYTNlNThmODQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:38:54.719322Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976720655. Ctx: { TraceId: 01jykre2qm9hfat9mvpyyn0awt, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YmI4MGU2MzctNDU2NDZkMTAtZDY0ZmUyM2EtYTNlNThmODQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:38:54.800544Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976720656. Ctx: { TraceId: 01jykre2t65yp844jtdw1ne6aj, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YTc1YTA3ZC03OTZiN2UwYi0xYmU4MWE1NS1kOWNhZmI3Zg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:38:54.808758Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976720657. Ctx: { TraceId: 01jykre2t65yp844jtdw1ne6aj, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YTc1YTA3ZC03OTZiN2UwYi0xYmU4MWE1NS1kOWNhZmI3Zg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:38:54.844873Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976720658. Ctx: { TraceId: 01jykre2vp8wxm1wnfpwzh0wxa, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YmI4MGU2MzctNDU2NDZkMTAtZDY0ZmUyM2EtYTNlNThmODQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:38:54.850745Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976720659. Ctx: { TraceId: 01jykre2vp8wxm1wnfpwzh0wxa, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YmI4MGU2MzctNDU2NDZkMTAtZDY0ZmUyM2EtYTNlNThmODQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:38:54.878945Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976720660. Ctx: { TraceId: 01jykre2wt3nqvbybd7x09anb9, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YTc1YTA3ZC03OTZiN2UwYi0xYmU4MWE1NS1kOWNhZmI3Zg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:38:54.885940Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976720661. Ctx: { TraceId: 01jykre2wt3nqvbybd7x09anb9, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YTc1YTA3ZC03OTZiN2UwYi0xYmU4MWE1NS1kOWNhZmI3Zg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:38:54.922937Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976720662. Ctx: { TraceId: 01jykre2y6ceh2kaxxeak0ska2, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YmI4MGU2MzctNDU2NDZkMTAtZDY0ZmUyM2EtYTNlNThmODQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:38:54.955129Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976720663. Ctx: { TraceId: 01jykre2y6ceh2kaxxeak0ska2, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YmI4MGU2MzctNDU2NDZkMTAtZDY0ZmUyM2EtYTNlNThmODQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:38:54.990182Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976720664. Ctx: { TraceId: 01jykre309a1dj2e296k0jysbx, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YTc1YTA3ZC03OTZiN2UwYi0xYmU4MWE1NS1kOWNhZmI3Zg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:38:55.003566Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976720665. Ctx: { TraceId: 01jykre309a1dj2e296k0jysbx, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YTc1YTA3ZC03OTZiN2UwYi0xYmU4MWE1NS1kOWNhZmI3Zg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:38:55.040590Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976720666. Ctx: { TraceId: 01jykre31weq62xv4tmcpd3w0k, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YmI4MGU2MzctNDU2NDZkMTAtZDY0ZmUyM2EtYTNlNThmODQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:38:55.047191Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976720667. Ctx: { TraceId: 01jykre31weq62xv4tmcpd3w0k, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YmI4MGU2MzctNDU2NDZkMTAtZDY0ZmUyM2EtYTNlNThmODQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:38:55.081061Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976720668. Ctx: { TraceId: 01jykre33429k2dcqhe4szb1zw, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YTc1YTA3ZC03OTZiN2UwYi0xYmU4MWE1NS1kOWNhZmI3Zg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:38:55.088204Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976720669. Ctx: { TraceId: 01jykre33429k2dcqhe4szb1zw, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YTc1YTA3ZC03OTZiN2UwYi0xYmU4MWE1NS1kOWNhZmI3Zg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root finished with status: SUCCESS ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest >> HttpRequest::Analyze [GOOD] Test command err: 2025-06-25T14:38:38.474629Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:419:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:38:38.475004Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:38:38.475085Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001cf5/r3tmp/tmpT8WZEJ/pdisk_1.dat 2025-06-25T14:38:38.790585Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 11390, node 1 2025-06-25T14:38:39.008628Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:38:39.008688Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:38:39.008722Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:38:39.009089Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:38:39.011196Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:38:39.110509Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:39.110684Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:39.126713Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:2029 2025-06-25T14:38:39.700732Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-25T14:38:42.717429Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-25T14:38:42.746314Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:42.746420Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:42.784130Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T14:38:42.785588Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:38:42.972603Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:38:42.996028Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:42.996568Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:42.997053Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:42.997268Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:42.997360Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:42.997447Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:42.997537Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:42.997626Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:42.997683Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:43.168402Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:43.168500Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:43.180947Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:38:43.341334Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:38:43.382490Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-25T14:38:43.382557Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-25T14:38:43.404352Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-25T14:38:43.405959Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-25T14:38:43.406226Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-25T14:38:43.406286Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-25T14:38:43.406351Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-25T14:38:43.406420Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-25T14:38:43.406473Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-25T14:38:43.406522Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-25T14:38:43.407050Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-25T14:38:43.436976Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7949: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-25T14:38:43.437114Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7979: ConnectToSA(), pipe client id: [2:1788:2562], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-25T14:38:43.442348Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1801:2571] 2025-06-25T14:38:43.446097Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1831:2586] 2025-06-25T14:38:43.446556Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1831:2586], schemeshard id = 72075186224037897 2025-06-25T14:38:43.450562Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-25T14:38:43.463834Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-25T14:38:43.463880Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-25T14:38:43.463952Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-25T14:38:43.474576Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:43.479423Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-25T14:38:43.479523Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-25T14:38:43.640909Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-25T14:38:43.773378Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-25T14:38:43.851838Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-25T14:38:44.482303Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:38:44.725428Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2142:3018], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:44.725584Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:44.747035Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715659:0, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T14:38:45.037082Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2277:2830];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:38:45.037329Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2277:2830];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:38:45.037626Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2277:2830];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:38:45.037770Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2277:2830];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:38:45.037918Z node 2 :TX_COLUMNSHARD WARN: ... ogressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; 2025-06-25T14:38:48.012006Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037903;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; 2025-06-25T14:38:48.012282Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037904;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; 2025-06-25T14:38:48.012792Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037906;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; 2025-06-25T14:38:48.013095Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037907;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; 2025-06-25T14:38:48.013448Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037905;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; 2025-06-25T14:38:48.014309Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037908;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; 2025-06-25T14:38:48.878765Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:3706:3175], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:48.878888Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:48.897472Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterColumnTable, opId: 281474976715661:0, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/alter_table.cpp:323) 2025-06-25T14:38:48.952780Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037900;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-25T14:38:48.953449Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037901;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-25T14:38:48.955664Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037899;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-25T14:38:48.956347Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037902;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-25T14:38:48.956963Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037903;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-25T14:38:48.957495Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037904;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-25T14:38:48.958277Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037905;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-25T14:38:48.958902Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037906;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-25T14:38:48.959450Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037907;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-25T14:38:48.960652Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037908;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-25T14:38:49.713236Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:3868:3221], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:49.713412Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:49.735326Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterColumnTable, opId: 281474976715662:0, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/alter_table.cpp:323) 2025-06-25T14:38:49.797136Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037900;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715662; 2025-06-25T14:38:49.797978Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037901;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715662; 2025-06-25T14:38:49.799003Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037899;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715662; 2025-06-25T14:38:49.799562Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037902;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715662; 2025-06-25T14:38:49.800139Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037903;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715662; 2025-06-25T14:38:49.800751Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037904;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715662; 2025-06-25T14:38:49.801292Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037905;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715662; 2025-06-25T14:38:49.802628Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037906;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715662; 2025-06-25T14:38:49.803344Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037907;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715662; 2025-06-25T14:38:49.804441Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037908;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715662; waiting actualization: 0/0.000019s 2025-06-25T14:38:57.521300Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:5743:5374] 2025-06-25T14:38:57.526318Z node 2 :STATISTICS DEBUG: tx_analyze.cpp:22: [72075186224037894] TTxAnalyze::Execute. ReplyToActorId [1:5740:3811] , Record { OperationId: "\000\000\000\000\034\030\t\tz^\343\345.\270\267\312" Tables { PathId { OwnerId: 72075186224037897 LocalId: 4 } } } 2025-06-25T14:38:57.526385Z node 2 :STATISTICS DEBUG: tx_analyze.cpp:47: [72075186224037894] TTxAnalyze::Execute. Create new force traversal operation, OperationId=  z^. 2025-06-25T14:38:57.526417Z node 2 :STATISTICS DEBUG: tx_analyze.cpp:65: [72075186224037894] TTxAnalyze::Execute. Create new force traversal table, OperationId=  z^. , PathId [OwnerId: 72075186224037897, LocalPathId: 4] Answer: 'Analyze sent. OperationId: 000000070r144qmqq3wmqbhdya' FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=abstract.cpp:105;event=AbortEmergency;reason=TTxWriteIndex destructor withno CompleteReady flag;prev_reason=; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=manager.cpp:51;message=aborted data locks manager; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=abstract.cpp:105;event=AbortEmergency;reason=TTxWriteIndex destructor withno CompleteReady flag;prev_reason=; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=manager.cpp:51;message=aborted data locks manager; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=abstract.cpp:105;event=AbortEmergency;reason=TTxWriteIndex destructor withno CompleteReady flag;prev_reason=; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=manager.cpp:51;message=aborted data locks manager; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=abstract.cpp:105;event=AbortEmergency;reason=TTxWriteIndex destructor withno CompleteReady flag;prev_reason=; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=manager.cpp:51;message=aborted data locks manager; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=abstract.cpp:105;event=AbortEmergency;reason=TTxWriteIndex destructor withno CompleteReady flag;prev_reason=; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=manager.cpp:51;message=aborted data locks manager; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=abstract.cpp:105;event=AbortEmergency;reason=TTxWriteIndex destructor withno CompleteReady flag;prev_reason=; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=manager.cpp:51;message=aborted data locks manager; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=abstract.cpp:105;event=AbortEmergency;reason=TTxWriteIndex destructor withno CompleteReady flag;prev_reason=; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=manager.cpp:51;message=aborted data locks manager; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=abstract.cpp:105;event=AbortEmergency;reason=TTxWriteIndex destructor withno CompleteReady flag;prev_reason=; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=manager.cpp:51;message=aborted data locks manager; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=abstract.cpp:105;event=AbortEmergency;reason=TTxWriteIndex destructor withno CompleteReady flag;prev_reason=; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=manager.cpp:51;message=aborted data locks manager; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=abstract.cpp:105;event=AbortEmergency;reason=TTxWriteIndex destructor withno CompleteReady flag;prev_reason=; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=manager.cpp:51;message=aborted data locks manager; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_read_table/unittest >> DataShardReadTableSnapshots::ReadTableMaxRows [GOOD] Test command err: 2025-06-25T14:38:51.846973Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:38:51.847114Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:38:51.847164Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001a9e/r3tmp/tmpcRgYWC/pdisk_1.dat 2025-06-25T14:38:52.151472Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T14:38:52.154711Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:38:52.193412Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:38:52.201913Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750862329217077 != 1750862329217081 2025-06-25T14:38:52.246339Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:61:2108] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-25T14:38:52.247292Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 2025-06-25T14:38:52.247814Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:52.247925Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:52.259327Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:38:52.337290Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:61:2108] Handle TEvProposeTransaction 2025-06-25T14:38:52.337360Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:61:2108] TxId# 281474976715657 ProcessProposeTransaction 2025-06-25T14:38:52.337511Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:61:2108] Cookie# 0 userReqId# "" txid# 281474976715657 SEND to# [1:602:2510] 2025-06-25T14:38:52.467417Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:602:2510] txid# 281474976715657 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateTable CreateTable { Name: "table-1" Columns { Name: "key" Type: "Uint32" FamilyName: "" NotNull: false } Columns { Name: "value" Type: "Uint32" FamilyName: "" NotNull: false } KeyColumnNames: "key" UniformPartitionsCount: 1 } } } ExecTimeoutPeriod: 18446744073709551615 2025-06-25T14:38:52.467550Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:602:2510] txid# 281474976715657 Bootstrap, UserSID: CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-25T14:38:52.468114Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1660: Actor# [1:602:2510] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-06-25T14:38:52.468194Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:602:2510] txid# 281474976715657 TEvNavigateKeySet requested from SchemeCache 2025-06-25T14:38:52.468529Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:602:2510] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-25T14:38:52.468717Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:602:2510] HANDLE EvNavigateKeySetResult, txid# 281474976715657 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-25T14:38:52.468861Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:602:2510] txid# 281474976715657 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715657 TabletId# 72057594046644480} 2025-06-25T14:38:52.469161Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:602:2510] txid# 281474976715657 HANDLE EvClientConnected 2025-06-25T14:38:52.470456Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:52.471506Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [1:602:2510] txid# 281474976715657 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715657} 2025-06-25T14:38:52.471570Z node 1 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [1:602:2510] txid# 281474976715657 SEND to# [1:554:2480] Source {TEvProposeTransactionStatus txid# 281474976715657 Status# 53} 2025-06-25T14:38:52.501537Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvBoot 2025-06-25T14:38:52.502610Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvRestored 2025-06-25T14:38:52.503080Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:627:2531] 2025-06-25T14:38:52.503327Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T14:38:52.548555Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-25T14:38:52.549214Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T14:38:52.549325Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T14:38:52.551024Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-25T14:38:52.551096Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-25T14:38:52.551146Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-25T14:38:52.551491Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T14:38:52.551619Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T14:38:52.551746Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:643:2531] in generation 1 2025-06-25T14:38:52.562428Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T14:38:52.587887Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-25T14:38:52.588124Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T14:38:52.588256Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:645:2541] 2025-06-25T14:38:52.588297Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T14:38:52.588348Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-25T14:38:52.588391Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:38:52.588631Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:627:2531], Recipient [1:627:2531]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T14:38:52.588706Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T14:38:52.589060Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-25T14:38:52.589161Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-25T14:38:52.589242Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:38:52.589299Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:38:52.589375Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-25T14:38:52.589432Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-25T14:38:52.589460Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-25T14:38:52.589493Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-25T14:38:52.589533Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:38:52.589916Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:634:2535], Recipient [1:627:2531]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:38:52.589956Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T14:38:52.589999Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:623:2528], serverId# [1:634:2535], sessionId# [0:0:0] 2025-06-25T14:38:52.590083Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:373:2367], Recipient [1:634:2535] 2025-06-25T14:38:52.590125Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-25T14:38:52.590248Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T14:38:52.590464Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-25T14:38:52.590516Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-25T14:38:52.590608Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-25T14:38:52.590678Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-25T14: ... 224037890 2025-06-25T14:38:58.489245Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037890 2025-06-25T14:38:58.489894Z node 2 :TX_PROXY TRACE: read_table_impl.cpp:1232: StateReadTable, received event# 269287428, Sender [2:953:2752], Recipient [2:927:2728]: NKikimrTx.TEvStreamQuotaRequest TxId: 281474976715663 ShardId: 72075186224037890 2025-06-25T14:38:58.489938Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2069: [ReadTable [2:927:2728] TxId# 281474976715662] Received TEvStreamQuotaRequest from ShardId# 72075186224037890 2025-06-25T14:38:58.489987Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2160: [ReadTable [2:927:2728] TxId# 281474976715662] Reserving quota 1 messages for ShardId# 72075186224037890 ... observed row limit of 2 rows at [2:953:2752] 2025-06-25T14:38:58.490080Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:514: Got quota for read table scan ShardId: 72075186224037890, TxId: 281474976715663, MessageQuota: 1 2025-06-25T14:38:58.490417Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435082, Sender [2:953:2752], Recipient [2:842:2664]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvRegisterScanActor 2025-06-25T14:38:58.490462Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3162: StateWork, processing event TEvPrivate::TEvRegisterScanActor 2025-06-25T14:38:58.490644Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:662: Send response data ShardId: 72075186224037890, TxId: 281474976715663, Size: 36, Rows: 0, PendingAcks: 1, MessageQuota: 0 2025-06-25T14:38:58.490877Z node 2 :TX_PROXY TRACE: read_table_impl.cpp:1232: StateReadTable, received event# 269550080, Sender [2:953:2752], Recipient [2:927:2728]: NKikimrTxDataShard.TEvProposeTransactionResult TxKind: TX_KIND_SCAN Origin: 72075186224037890 Status: RESPONSE_DATA TxId: 281474976715663 TxResult: "\n\016\n\003key\022\007\252\006\004\n\002\010\002\n\020\n\005value\022\007\252\006\004\n\002\010\002\030\001\022\016b\005\035\004\000\000\000b\005\035,\000\000\000" RowOffsets: 36 ApiVersion: 1 DataSeqNo: 1 DataLastKey: "\001\000\004\000\000\000\004\000\000\000" 2025-06-25T14:38:58.490919Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:1700: [ReadTable [2:927:2728] TxId# 281474976715662] Received stream data from ShardId# 72075186224037890 2025-06-25T14:38:58.490951Z node 2 :TX_PROXY TRACE: read_table_impl.cpp:1711: [ReadTable [2:927:2728] TxId# 281474976715662] Sending TEvStreamDataAck to [2:953:2752] ShardId# 72075186224037890 2025-06-25T14:38:58.491023Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:483: Got stream data ack ShardId: 72075186224037890, TxId: 281474976715663, PendingAcks: 0 2025-06-25T14:38:58.491122Z node 2 :TX_PROXY TRACE: read_table_impl.cpp:1232: StateReadTable, received event# 269287428, Sender [2:953:2752], Recipient [2:927:2728]: NKikimrTx.TEvStreamQuotaRequest TxId: 281474976715663 ShardId: 72075186224037890 2025-06-25T14:38:58.491153Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2069: [ReadTable [2:927:2728] TxId# 281474976715662] Received TEvStreamQuotaRequest from ShardId# 72075186224037890 2025-06-25T14:38:58.491533Z node 2 :TX_PROXY TRACE: read_table_impl.cpp:1232: StateReadTable, received event# 269287941, Sender [2:926:2728], Recipient [2:927:2728]: NKikimrTx.TEvStreamQuotaResponse TxId: 281474976715662 MessageSizeLimit: 1 ReservedMessages: 1 2025-06-25T14:38:58.491573Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2097: [ReadTable [2:927:2728] TxId# 281474976715662] Updated quotas, allocated = 1, message size = 1, message rows = 0, available = 1 2025-06-25T14:38:58.491606Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2160: [ReadTable [2:927:2728] TxId# 281474976715662] Reserving quota 1 messages for ShardId# 72075186224037890 ... observed row limit of 1 rows at [2:953:2752] 2025-06-25T14:38:58.491669Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:514: Got quota for read table scan ShardId: 72075186224037890, TxId: 281474976715663, MessageQuota: 1 2025-06-25T14:38:58.491751Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:662: Send response data ShardId: 72075186224037890, TxId: 281474976715663, Size: 36, Rows: 0, PendingAcks: 1, MessageQuota: 0 2025-06-25T14:38:58.491904Z node 2 :TX_PROXY TRACE: read_table_impl.cpp:1232: StateReadTable, received event# 269550080, Sender [2:953:2752], Recipient [2:927:2728]: NKikimrTxDataShard.TEvProposeTransactionResult TxKind: TX_KIND_SCAN Origin: 72075186224037890 Status: RESPONSE_DATA TxId: 281474976715663 TxResult: "\n\016\n\003key\022\007\252\006\004\n\002\010\002\n\020\n\005value\022\007\252\006\004\n\002\010\002\030\001\022\016b\005\035\005\000\000\000b\005\0357\000\000\000" RowOffsets: 36 ApiVersion: 1 DataSeqNo: 2 DataLastKey: "\001\000\004\000\000\000\005\000\000\000" 2025-06-25T14:38:58.491937Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:1700: [ReadTable [2:927:2728] TxId# 281474976715662] Received stream data from ShardId# 72075186224037890 2025-06-25T14:38:58.491968Z node 2 :TX_PROXY TRACE: read_table_impl.cpp:1711: [ReadTable [2:927:2728] TxId# 281474976715662] Sending TEvStreamDataAck to [2:953:2752] ShardId# 72075186224037890 2025-06-25T14:38:58.492053Z node 2 :TX_PROXY INFO: read_table_impl.cpp:2933: [ReadTable [2:927:2728] TxId# 281474976715662] RESPONSE Status# ExecComplete prepare time: 0.015302s execute time: 0.184199s total time: 0.199501s 2025-06-25T14:38:58.492255Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:483: Got stream data ack ShardId: 72075186224037890, TxId: 281474976715663, PendingAcks: 0 2025-06-25T14:38:58.492302Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:718: Finish scan ShardId: 72075186224037890, TxId: 281474976715663, MessageQuota: 0 2025-06-25T14:38:58.536470Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553190, Sender [2:927:2728], Recipient [2:837:2662]: NKikimrTxDataShard.TEvDiscardVolatileSnapshotRequest OwnerId: 72057594046644480 PathId: 2 Step: 2000 TxId: 281474976715662 2025-06-25T14:38:58.536763Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037890 2025-06-25T14:38:58.536808Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:4477: Found op: cookie: 281474976715663, at: 72075186224037890 2025-06-25T14:38:58.537200Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [2:842:2664], Recipient [2:842:2664]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T14:38:58.537240Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T14:38:58.537295Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037890 2025-06-25T14:38:58.537333Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037890 active 1 active planned 0 immediate 1 planned 0 2025-06-25T14:38:58.537376Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715663] at 72075186224037890 for ReadTableScan 2025-06-25T14:38:58.537411Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715663] at 72075186224037890 on unit ReadTableScan 2025-06-25T14:38:58.537449Z node 2 :TX_DATASHARD TRACE: read_table_scan_unit.cpp:158: ReadTable scan complete for [0:281474976715663] at 72075186224037890 error: , IsFatalError: 0 2025-06-25T14:38:58.537495Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715663] at 72075186224037890 is Executed 2025-06-25T14:38:58.537531Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715663] at 72075186224037890 executing on unit ReadTableScan 2025-06-25T14:38:58.537561Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715663] at 72075186224037890 to execution unit FinishPropose 2025-06-25T14:38:58.537591Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715663] at 72075186224037890 on unit FinishPropose 2025-06-25T14:38:58.537625Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715663] at 72075186224037890 is DelayComplete 2025-06-25T14:38:58.537652Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715663] at 72075186224037890 executing on unit FinishPropose 2025-06-25T14:38:58.537680Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715663] at 72075186224037890 to execution unit CompletedOperations 2025-06-25T14:38:58.537711Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715663] at 72075186224037890 on unit CompletedOperations 2025-06-25T14:38:58.537755Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715663] at 72075186224037890 is Executed 2025-06-25T14:38:58.537780Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715663] at 72075186224037890 executing on unit CompletedOperations 2025-06-25T14:38:58.537806Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:281474976715663] at 72075186224037890 has finished 2025-06-25T14:38:58.537834Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037890 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:38:58.537864Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 72075186224037890 2025-06-25T14:38:58.537896Z node 2 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037890 has no attached operations 2025-06-25T14:38:58.537925Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 72075186224037890 2025-06-25T14:38:58.537984Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037890 2025-06-25T14:38:58.538019Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715663] at 72075186224037890 on unit FinishPropose 2025-06-25T14:38:58.538057Z node 2 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715663 at tablet 72075186224037890 send to client, exec latency: 0 ms, propose latency: 0 ms, status: COMPLETE 2025-06-25T14:38:58.538122Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037890 2025-06-25T14:38:58.538401Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549569, Sender [2:927:2728], Recipient [2:842:2664]: NKikimrTxDataShard.TEvCancelTransactionProposal TxId: 281474976715663 2025-06-25T14:38:58.538449Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3174: StateWork, processing event TEvDataShard::TEvCancelTransactionProposal 2025-06-25T14:38:58.538489Z node 2 :TX_DATASHARD DEBUG: datashard__cancel_tx_proposal.cpp:73: Got TEvDataShard::TEvCancelTransactionProposal 72075186224037890 txId 281474976715663 2025-06-25T14:38:58.538548Z node 2 :TX_DATASHARD DEBUG: datashard__cancel_tx_proposal.cpp:44: Start TTxCancelTransactionProposal at tablet 72075186224037890 txId 281474976715663 2025-06-25T14:38:58.538725Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287431, Sender [2:927:2728], Recipient [2:842:2664]: NKikimrTx.TEvInterruptTransaction TxId: 281474976715663 2025-06-25T14:38:58.538760Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3155: StateWork, processing event TEvTxProcessing::TEvInterruptTransaction 2025-06-25T14:38:58.538867Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553190, Sender [2:927:2728], Recipient [2:842:2664]: NKikimrTxDataShard.TEvDiscardVolatileSnapshotRequest OwnerId: 72057594046644480 PathId: 2 Step: 2000 TxId: 281474976715662 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_read_table/unittest >> DataShardReadTableSnapshots::ReadTableSplitAfter [GOOD] Test command err: 2025-06-25T14:38:52.005738Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:38:52.005880Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:38:52.005937Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001a8f/r3tmp/tmpBgQPpY/pdisk_1.dat 2025-06-25T14:38:52.313467Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T14:38:52.316593Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:38:52.355870Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:38:52.363433Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750862329257530 != 1750862329257534 2025-06-25T14:38:52.407731Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:61:2108] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-25T14:38:52.408815Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 2025-06-25T14:38:52.409254Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:52.409353Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:52.420760Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:38:52.498116Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:61:2108] Handle TEvProposeTransaction 2025-06-25T14:38:52.498182Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:61:2108] TxId# 281474976715657 ProcessProposeTransaction 2025-06-25T14:38:52.498286Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:61:2108] Cookie# 0 userReqId# "" txid# 281474976715657 SEND to# [1:602:2510] 2025-06-25T14:38:52.607779Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:602:2510] txid# 281474976715657 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateTable CreateTable { Name: "table-1" Columns { Name: "key" Type: "Uint32" FamilyName: "" NotNull: false } Columns { Name: "value" Type: "Uint32" FamilyName: "" NotNull: false } KeyColumnNames: "key" UniformPartitionsCount: 1 } } } ExecTimeoutPeriod: 18446744073709551615 2025-06-25T14:38:52.607890Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:602:2510] txid# 281474976715657 Bootstrap, UserSID: CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-25T14:38:52.608447Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1660: Actor# [1:602:2510] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-06-25T14:38:52.608552Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:602:2510] txid# 281474976715657 TEvNavigateKeySet requested from SchemeCache 2025-06-25T14:38:52.608895Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:602:2510] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-25T14:38:52.609082Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:602:2510] HANDLE EvNavigateKeySetResult, txid# 281474976715657 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-25T14:38:52.609239Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:602:2510] txid# 281474976715657 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715657 TabletId# 72057594046644480} 2025-06-25T14:38:52.609747Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:602:2510] txid# 281474976715657 HANDLE EvClientConnected 2025-06-25T14:38:52.611124Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:52.612145Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [1:602:2510] txid# 281474976715657 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715657} 2025-06-25T14:38:52.612209Z node 1 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [1:602:2510] txid# 281474976715657 SEND to# [1:554:2480] Source {TEvProposeTransactionStatus txid# 281474976715657 Status# 53} 2025-06-25T14:38:52.641739Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvBoot 2025-06-25T14:38:52.642649Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvRestored 2025-06-25T14:38:52.642991Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:627:2531] 2025-06-25T14:38:52.643236Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T14:38:52.682666Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-25T14:38:52.683231Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T14:38:52.683328Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T14:38:52.684786Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-25T14:38:52.684838Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-25T14:38:52.684907Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-25T14:38:52.685159Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T14:38:52.685268Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T14:38:52.685436Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:643:2531] in generation 1 2025-06-25T14:38:52.696152Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T14:38:52.725658Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-25T14:38:52.725866Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T14:38:52.725996Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:645:2541] 2025-06-25T14:38:52.726036Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T14:38:52.726069Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-25T14:38:52.726112Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:38:52.726337Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:627:2531], Recipient [1:627:2531]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T14:38:52.726390Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T14:38:52.726779Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-25T14:38:52.726897Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-25T14:38:52.726973Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:38:52.727026Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:38:52.727077Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-25T14:38:52.727126Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-25T14:38:52.727179Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-25T14:38:52.727212Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-25T14:38:52.727257Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:38:52.727682Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:634:2535], Recipient [1:627:2531]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:38:52.727743Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T14:38:52.727791Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:623:2528], serverId# [1:634:2535], sessionId# [0:0:0] 2025-06-25T14:38:52.727906Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:373:2367], Recipient [1:634:2535] 2025-06-25T14:38:52.727949Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-25T14:38:52.728047Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T14:38:52.728254Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-25T14:38:52.728325Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-25T14:38:52.728399Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-25T14:38:52.728459Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-25T14: ... 2646]: NKikimrTxDataShard.TEvProposeTransactionResult TxKind: TX_KIND_SCAN Origin: 72075186224037890 Status: RESPONSE_DATA TxId: 281474976715662 TxResult: "\n\016\n\003key\022\007\252\006\004\n\002\010\002\n\020\n\005value\022\007\252\006\004\n\002\010\002\030\001\022\016b\005\035\003\000\000\000b\005\035!\000\000\000" RowOffsets: 36 ApiVersion: 1 DataSeqNo: 1 DataLastKey: "\001\000\004\000\000\000\003\000\000\000" 2025-06-25T14:38:58.335027Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:1700: [ReadTable [2:817:2646] TxId# 281474976715661] Received stream data from ShardId# 72075186224037890 2025-06-25T14:38:58.335054Z node 2 :TX_PROXY TRACE: read_table_impl.cpp:1711: [ReadTable [2:817:2646] TxId# 281474976715661] Sending TEvStreamDataAck to [2:954:2752] ShardId# 72075186224037890 2025-06-25T14:38:58.335130Z node 2 :TX_PROXY TRACE: read_table_impl.cpp:1232: StateReadTable, received event# 269287428, Sender [2:954:2752], Recipient [2:817:2646]: NKikimrTx.TEvStreamQuotaRequest TxId: 281474976715662 ShardId: 72075186224037890 2025-06-25T14:38:58.335153Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2069: [ReadTable [2:817:2646] TxId# 281474976715661] Received TEvStreamQuotaRequest from ShardId# 72075186224037890 2025-06-25T14:38:58.335214Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:483: Got stream data ack ShardId: 72075186224037890, TxId: 281474976715662, PendingAcks: 0 2025-06-25T14:38:58.335548Z node 2 :TX_PROXY TRACE: read_table_impl.cpp:1232: StateReadTable, received event# 269287941, Sender [2:816:2646], Recipient [2:817:2646]: NKikimrTx.TEvStreamQuotaResponse TxId: 281474976715661 MessageSizeLimit: 1 ReservedMessages: 1 2025-06-25T14:38:58.335578Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2097: [ReadTable [2:817:2646] TxId# 281474976715661] Updated quotas, allocated = 1, message size = 1, message rows = 0, available = 1 2025-06-25T14:38:58.335606Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2160: [ReadTable [2:817:2646] TxId# 281474976715661] Reserving quota 1 messages for ShardId# 72075186224037890 2025-06-25T14:38:58.335645Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:514: Got quota for read table scan ShardId: 72075186224037890, TxId: 281474976715662, MessageQuota: 1 2025-06-25T14:38:58.335732Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:662: Send response data ShardId: 72075186224037890, TxId: 281474976715662, Size: 36, Rows: 0, PendingAcks: 1, MessageQuota: 0 2025-06-25T14:38:58.335843Z node 2 :TX_PROXY TRACE: read_table_impl.cpp:1232: StateReadTable, received event# 269550080, Sender [2:954:2752], Recipient [2:817:2646]: NKikimrTxDataShard.TEvProposeTransactionResult TxKind: TX_KIND_SCAN Origin: 72075186224037890 Status: RESPONSE_DATA TxId: 281474976715662 TxResult: "\n\016\n\003key\022\007\252\006\004\n\002\010\002\n\020\n\005value\022\007\252\006\004\n\002\010\002\030\001\022\016b\005\035\004\000\000\000b\005\035,\000\000\000" RowOffsets: 36 ApiVersion: 1 DataSeqNo: 2 DataLastKey: "\001\000\004\000\000\000\004\000\000\000" 2025-06-25T14:38:58.335887Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:1700: [ReadTable [2:817:2646] TxId# 281474976715661] Received stream data from ShardId# 72075186224037890 2025-06-25T14:38:58.335916Z node 2 :TX_PROXY TRACE: read_table_impl.cpp:1711: [ReadTable [2:817:2646] TxId# 281474976715661] Sending TEvStreamDataAck to [2:954:2752] ShardId# 72075186224037890 2025-06-25T14:38:58.335968Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:483: Got stream data ack ShardId: 72075186224037890, TxId: 281474976715662, PendingAcks: 0 2025-06-25T14:38:58.336027Z node 2 :TX_PROXY TRACE: read_table_impl.cpp:1232: StateReadTable, received event# 269287428, Sender [2:954:2752], Recipient [2:817:2646]: NKikimrTx.TEvStreamQuotaRequest TxId: 281474976715662 ShardId: 72075186224037890 2025-06-25T14:38:58.336049Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2069: [ReadTable [2:817:2646] TxId# 281474976715661] Received TEvStreamQuotaRequest from ShardId# 72075186224037890 2025-06-25T14:38:58.336264Z node 2 :TX_PROXY TRACE: read_table_impl.cpp:1232: StateReadTable, received event# 269287941, Sender [2:816:2646], Recipient [2:817:2646]: NKikimrTx.TEvStreamQuotaResponse TxId: 281474976715661 MessageSizeLimit: 1 ReservedMessages: 1 2025-06-25T14:38:58.336291Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2097: [ReadTable [2:817:2646] TxId# 281474976715661] Updated quotas, allocated = 1, message size = 1, message rows = 0, available = 1 2025-06-25T14:38:58.336378Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2160: [ReadTable [2:817:2646] TxId# 281474976715661] Reserving quota 1 messages for ShardId# 72075186224037890 2025-06-25T14:38:58.336420Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:514: Got quota for read table scan ShardId: 72075186224037890, TxId: 281474976715662, MessageQuota: 1 2025-06-25T14:38:58.336474Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:718: Finish scan ShardId: 72075186224037890, TxId: 281474976715662, MessageQuota: 1 2025-06-25T14:38:58.336623Z node 2 :TX_PROXY TRACE: read_table_impl.cpp:1232: StateReadTable, received event# 269287429, Sender [2:954:2752], Recipient [2:817:2646]: NKikimrTx.TEvStreamQuotaRelease TxId: 281474976715662 ShardId: 72075186224037890 2025-06-25T14:38:58.336654Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2120: [ReadTable [2:817:2646] TxId# 281474976715661] Received TEvStreamQuotaRelease from ShardId# 72075186224037890 2025-06-25T14:38:58.336698Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2188: [ReadTable [2:817:2646] TxId# 281474976715661] Released quota 1 reserved messages from ShardId# 72075186224037890 2025-06-25T14:38:58.336742Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037890 2025-06-25T14:38:58.336768Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:4477: Found op: cookie: 281474976715662, at: 72075186224037890 2025-06-25T14:38:58.336893Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [2:857:2677], Recipient [2:857:2677]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T14:38:58.336921Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T14:38:58.336963Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037890 2025-06-25T14:38:58.336993Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037890 active 1 active planned 0 immediate 1 planned 0 2025-06-25T14:38:58.337022Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715662] at 72075186224037890 for ReadTableScan 2025-06-25T14:38:58.337046Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715662] at 72075186224037890 on unit ReadTableScan 2025-06-25T14:38:58.337074Z node 2 :TX_DATASHARD TRACE: read_table_scan_unit.cpp:158: ReadTable scan complete for [0:281474976715662] at 72075186224037890 error: , IsFatalError: 0 2025-06-25T14:38:58.337107Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715662] at 72075186224037890 is Executed 2025-06-25T14:38:58.337131Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715662] at 72075186224037890 executing on unit ReadTableScan 2025-06-25T14:38:58.337155Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715662] at 72075186224037890 to execution unit FinishPropose 2025-06-25T14:38:58.337180Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715662] at 72075186224037890 on unit FinishPropose 2025-06-25T14:38:58.337210Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715662] at 72075186224037890 is DelayComplete 2025-06-25T14:38:58.337232Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715662] at 72075186224037890 executing on unit FinishPropose 2025-06-25T14:38:58.337256Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715662] at 72075186224037890 to execution unit CompletedOperations 2025-06-25T14:38:58.337282Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715662] at 72075186224037890 on unit CompletedOperations 2025-06-25T14:38:58.337320Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715662] at 72075186224037890 is Executed 2025-06-25T14:38:58.337343Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715662] at 72075186224037890 executing on unit CompletedOperations 2025-06-25T14:38:58.337363Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:281474976715662] at 72075186224037890 has finished 2025-06-25T14:38:58.337388Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037890 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:38:58.337412Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 72075186224037890 2025-06-25T14:38:58.337440Z node 2 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037890 has no attached operations 2025-06-25T14:38:58.337463Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 72075186224037890 2025-06-25T14:38:58.337512Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037890 2025-06-25T14:38:58.337540Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715662] at 72075186224037890 on unit FinishPropose 2025-06-25T14:38:58.337576Z node 2 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715662 at tablet 72075186224037890 send to client, exec latency: 0 ms, propose latency: 0 ms, status: COMPLETE 2025-06-25T14:38:58.337635Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037890 2025-06-25T14:38:58.337870Z node 2 :TX_PROXY TRACE: read_table_impl.cpp:1232: StateReadTable, received event# 269550080, Sender [2:857:2677], Recipient [2:817:2646]: NKikimrTxDataShard.TEvProposeTransactionResult TxKind: TX_KIND_SCAN Origin: 72075186224037890 Status: COMPLETE TxId: 281474976715662 Step: 0 OrderId: 281474976715662 ExecLatency: 0 ProposeLatency: 0 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186224037890 CpuTimeUsec: 297 } } CommitVersion { Step: 0 TxId: 281474976715662 } 2025-06-25T14:38:58.337905Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:1850: [ReadTable [2:817:2646] TxId# 281474976715661] Received stream complete from ShardId# 72075186224037890 2025-06-25T14:38:58.337969Z node 2 :TX_PROXY INFO: read_table_impl.cpp:2933: [ReadTable [2:817:2646] TxId# 281474976715661] RESPONSE Status# ExecComplete prepare time: 0.013400s execute time: 0.273159s total time: 0.286559s 2025-06-25T14:38:58.338304Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553190, Sender [2:817:2646], Recipient [2:627:2531]: NKikimrTxDataShard.TEvDiscardVolatileSnapshotRequest OwnerId: 72057594046644480 PathId: 2 Step: 2000 TxId: 281474976715661 2025-06-25T14:38:58.338470Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553190, Sender [2:817:2646], Recipient [2:852:2675]: NKikimrTxDataShard.TEvDiscardVolatileSnapshotRequest OwnerId: 72057594046644480 PathId: 2 Step: 2000 TxId: 281474976715661 2025-06-25T14:38:58.338767Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553190, Sender [2:817:2646], Recipient [2:857:2677]: NKikimrTxDataShard.TEvDiscardVolatileSnapshotRequest OwnerId: 72057594046644480 PathId: 2 Step: 2000 TxId: 281474976715661 >> KqpWorkloadServiceDistributed::TestDistributedQueue [GOOD] >> KqpWorkloadServiceDistributed::TestNodeDisconnect >> ResourcePoolClassifiersDdl::TestCreateResourcePoolClassifier [GOOD] >> ResourcePoolClassifiersDdl::TestCreateResourcePoolClassifierOnServerless |82.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/gateway/ut/ydb-core-kqp-gateway-ut |82.5%| [LD] {RESULT} $(B)/ydb/core/kqp/gateway/ut/ydb-core-kqp-gateway-ut |82.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/gateway/ut/ydb-core-kqp-gateway-ut |82.5%| [TA] $(B)/ydb/core/tx/datashard/ut_read_table/test-results/unittest/{meta.json ... results_accumulator.log} |82.5%| [TA] $(B)/ydb/services/fq/ut_integration/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/pg/unittest >> KqpPg::LongDomainName [GOOD] Test command err: Trying to start YDB, gRPC: 18326, MsgBus: 17043 2025-06-25T14:36:51.518873Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519895936830099037:2135];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:36:51.525659Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000ac5/r3tmp/tmpne1qIg/pdisk_1.dat 2025-06-25T14:36:52.053313Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519895936830098940:2080] 1750862211506676 != 1750862211506679 2025-06-25T14:36:52.066359Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:36:52.076837Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:36:52.077116Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:36:52.080919Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 18326, node 1 2025-06-25T14:36:52.165785Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:36:52.165813Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:36:52.165830Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:36:52.165957Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17043 2025-06-25T14:36:52.512753Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:17043 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:36:52.867515Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:36:54.618280Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519895949715001482:2295], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:36:54.618364Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519895949715001474:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:36:54.618523Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:36:54.622727Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:36:54.646250Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519895949715001488:2296], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:36:54.735678Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519895949715001539:2335] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 22900, MsgBus: 19851 2025-06-25T14:36:55.748684Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519895957135537209:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:36:55.748788Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000ac5/r3tmp/tmpv7s8oB/pdisk_1.dat 2025-06-25T14:36:55.986340Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:36:55.986411Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:36:55.991271Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:36:55.996122Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 22900, node 2 2025-06-25T14:36:56.128809Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:36:56.128831Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:36:56.128838Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:36:56.128949Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19851 TClient is connected to server localhost:19851 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-25T14:36:56.819905Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:36:56.826422Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:36:56.835371Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:36:59.217369Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519895974315406998:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:36:59.217457Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:36:59.217672Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519895974315407010:2295], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:36:59.221152Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:36:59.241661Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-25T14:36:59.245100Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519895974315407012:2296], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:36:59.351151Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519895974315407063:2332] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 13903, MsgBus: 32038 2025-06-25T14:37:00.415082Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519895978756397890:2 ... , first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:38:50.650801Z node 10 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [10:7519896447515388467:2296], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:38:50.710248Z node 10 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [10:7519896447515388518:2333] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:38:50.750419Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:50.870897Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[10:7519896426040551382:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:50.870983Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; {"Plan":{"Plans":[{"PlanNodeId":4,"Plans":[{"PlanNodeId":3,"Plans":[{"PlanNodeId":2,"Plans":[{"Tables":["pgbench_accounts"],"PlanNodeId":1,"Operators":[{"Scan":"Parallel","E-Size":"0","ReadRanges":["aid (null, 3)","aid [7, 7]"],"Name":"TableRangeScan","Inputs":[],"Path":"\/Root\/pgbench_accounts","E-Rows":"0","Table":"pgbench_accounts","ReadRangesKeys":["aid"],"ReadColumns":["abalance"],"E-Cost":"0","ReadRangesExpectedSize":2}],"Node Type":"TableRangeScan"}],"Node Type":"UnionAll","PlanNodeType":"Connection"}],"Operators":[{"Inputs":[{"ExternalPlanNodeId":2}],"SortBy":"input.abalance","Name":"Sort"}],"Node Type":"Sort"}],"Node Type":"ResultSet_1","PlanNodeType":"ResultSet"}],"Node Type":"Query","Stats":{"ResourcePoolId":"default"},"PlanNodeType":"Query"},"meta":{"version":"0.2","type":"query"},"tables":[{"name":"\/Root\/pgbench_accounts","reads":[{"columns":["abalance"],"scan_by":["aid (null, 3)","aid [7, 7]"],"type":"Scan"}]}],"SimplifiedPlan":{"PlanNodeId":0,"Plans":[{"PlanNodeId":1,"Plans":[{"PlanNodeId":2,"Plans":[{"PlanNodeId":4,"Operators":[{"Scan":"Parallel","E-Size":"0","ReadRanges":["aid (null, 3)","aid [7, 7]"],"Name":"TableRangeScan","Path":"\/Root\/pgbench_accounts","E-Rows":"0","Table":"pgbench_accounts","ReadRangesKeys":["aid"],"ReadColumns":["abalance"],"E-Cost":"0","ReadRangesExpectedSize":2}],"Node Type":"TableRangeScan"}],"Operators":[{"SortBy":"input.abalance","Name":"Sort"}],"Node Type":"Sort"}],"Node Type":"ResultSet_1","PlanNodeType":"ResultSet"}],"Node Type":"Query","OptimizerStats":{"EquiJoinsCount":0,"JoinsCount":0},"PlanNodeType":"Query"}} {"Plan":{"Plans":[{"PlanNodeId":4,"Plans":[{"PlanNodeId":3,"Plans":[{"PlanNodeId":2,"Plans":[{"Tables":["pgbench_accounts"],"PlanNodeId":1,"Operators":[{"Scan":"Parallel","ReadRange":["aid (4, 3)"],"E-Size":"0","Name":"TableRangeScan","Inputs":[],"Path":"\/Root\/pgbench_accounts","E-Rows":"1","Table":"pgbench_accounts","ReadColumns":["abalance"],"E-Cost":"0"}],"Node Type":"TableRangeScan"}],"Node Type":"UnionAll","PlanNodeType":"Connection"}],"Node Type":"Collect"}],"Node Type":"ResultSet","PlanNodeType":"ResultSet"}],"Node Type":"Query","Stats":{"ResourcePoolId":"default"},"PlanNodeType":"Query"},"meta":{"version":"0.2","type":"query"},"tables":[{"name":"\/Root\/pgbench_accounts","reads":[{"columns":["abalance"],"scan_by":["aid (4, 3)"],"type":"Scan"}]}],"SimplifiedPlan":{"PlanNodeId":0,"Plans":[{"PlanNodeId":1,"Plans":[{"PlanNodeId":4,"Operators":[{"Scan":"Parallel","ReadRange":["aid (4, 3)"],"E-Size":"0","Name":"TableRangeScan","Path":"\/Root\/pgbench_accounts","E-Rows":"1","Table":"pgbench_accounts","ReadColumns":["abalance"],"E-Cost":"0"}],"Node Type":"TableRangeScan"}],"Node Type":"ResultSet","PlanNodeType":"ResultSet"}],"Node Type":"Query","OptimizerStats":{"EquiJoinsCount":0,"JoinsCount":0},"PlanNodeType":"Query"}} Trying to start YDB, gRPC: 18200, MsgBus: 1068 2025-06-25T14:38:53.774713Z node 11 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[11:7519896460379525009:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:53.774795Z node 11 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000ac5/r3tmp/tmpTcoAJ1/pdisk_1.dat 2025-06-25T14:38:53.941178Z node 11 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:38:53.943450Z node 11 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(11, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:53.943565Z node 11 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(11, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:53.959866Z node 11 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [11:7519896460379524989:2080] 1750862333774228 != 1750862333774231 2025-06-25T14:38:53.963630Z node 11 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(11, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 18200, node 11 2025-06-25T14:38:54.014360Z node 11 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:38:54.014390Z node 11 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:38:54.014402Z node 11 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:38:54.014562Z node 11 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1068 TClient is connected to server localhost:1068 WaitRootIsUp 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'... TClient::Ls request: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_D... (TRUNCATED) WaitRootIsUp 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' success. waiting... 2025-06-25T14:38:54.790342Z node 11 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:38:54.790520Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:38:54.801726Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:38:58.241470Z node 11 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [11:7519896481854362107:2292], DatabaseId: /aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:58.241470Z node 11 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [11:7519896481854362115:2295], DatabaseId: /aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:58.241595Z node 11 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:58.246167Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:38:58.263989Z node 11 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [11:7519896481854362121:2296], DatabaseId: /aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:38:58.343694Z node 11 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [11:7519896481854362172:2336] txid# 281474976715659, issues: { message: "Check failed: path: \'/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:38:58.375320Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:58.775325Z node 11 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[11:7519896460379525009:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:58.775434Z node 11 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/.metadata/initialization/migrations;error=timeout; >> KqpWorkloadService::TestQueueSizeManyQueries [GOOD] >> KqpWorkloadService::TestZeroQueueSize >> TBoardSubscriberTest::DropByDisconnect |82.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/build_index/ut/ydb-core-tx-datashard-build_index-ut |82.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/build_index/ut/ydb-core-tx-datashard-build_index-ut |82.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/nodewarden/ut_sequence/ydb-core-blobstorage-nodewarden-ut_sequence >> TBSV::ShardsNotLeftInShardsToDelete |82.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/nodewarden/ut_sequence/ydb-core-blobstorage-nodewarden-ut_sequence |82.5%| [TA] {RESULT} $(B)/ydb/core/tx/datashard/ut_read_table/test-results/unittest/{meta.json ... results_accumulator.log} |82.6%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/build_index/ut/ydb-core-tx-datashard-build_index-ut |82.6%| [LD] {RESULT} $(B)/ydb/core/blobstorage/nodewarden/ut_sequence/ydb-core-blobstorage-nodewarden-ut_sequence >> TBoardSubscriberTest::DropByDisconnect [GOOD] >> KqpPg::InsertNoTargetColumns_Alter+useSink [GOOD] >> KqpPg::InsertNoTargetColumns_Alter-useSink >> SystemView::ShowCreateTable [GOOD] >> SystemView::ShowCreateTableChangefeeds >> HttpRequest::AnalyzeServerless [GOOD] |82.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_bsvolume/unittest |82.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/ut/view/ydb-core-kqp-ut-view |82.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/base/ut_board_subscriber/unittest >> TBoardSubscriberTest::DropByDisconnect [GOOD] |82.6%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/view/ydb-core-kqp-ut-view |82.6%| [TA] $(B)/ydb/core/kqp/ut/idx_test/test-results/unittest/{meta.json ... results_accumulator.log} |82.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/view/ydb-core-kqp-ut-view >> KqpWorkloadServiceActors::TestDefaultPoolUsePermissions [GOOD] >> KqpWorkloadServiceActors::TestDefaultPoolAdminPermissions >> TBSV::ShardsNotLeftInShardsToDelete [GOOD] |82.6%| [TA] {RESULT} $(B)/ydb/services/fq/ut_integration/test-results/unittest/{meta.json ... results_accumulator.log} |82.6%| [TA] {RESULT} $(B)/ydb/core/kqp/ut/idx_test/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_bsvolume/unittest >> TBSV::ShardsNotLeftInShardsToDelete [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:39:02.416475Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:39:02.416577Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:39:02.416619Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:39:02.416655Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:39:02.416704Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:39:02.416732Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:39:02.416783Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:39:02.416858Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:39:02.417595Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:39:02.417909Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:39:02.496714Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:39:02.496789Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:39:02.519578Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:39:02.523866Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:39:02.524049Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:39:02.534167Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:39:02.534548Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:39:02.535300Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:39:02.535828Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:39:02.539299Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:39:02.539472Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:39:02.540773Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:39:02.540834Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:39:02.540886Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:39:02.540930Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:39:02.540969Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:39:02.541178Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:39:02.547983Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:39:02.665279Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:39:02.665522Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:39:02.665752Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:39:02.665802Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:39:02.666041Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:39:02.666130Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:39:02.668753Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:39:02.668944Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:39:02.669166Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:39:02.669248Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:39:02.669305Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:39:02.669358Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:39:02.671455Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:39:02.671516Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:39:02.671560Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:39:02.673428Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:39:02.673480Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:39:02.673541Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:39:02.673607Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:39:02.677536Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:39:02.679600Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:39:02.679828Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:39:02.680809Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:39:02.680960Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:39:02.681020Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:39:02.681305Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:39:02.681358Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:39:02.681535Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:39:02.681626Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:39:02.684018Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:39:02.684066Z node 1 :FLAT_TX_SCHEMESHARD ... 7594046678944, at schemeshard: 72057594046678944 2025-06-25T14:39:02.872054Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_bsv.cpp:40: TDropBlockStoreVolume TPropose, operationId: 102:0 HandleReply TEvOperationPlan, step: 5000003, at schemeshard: 72057594046678944 2025-06-25T14:39:02.872171Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-25T14:39:02.872324Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:0 progress is 1/1 2025-06-25T14:39:02.872363Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-25T14:39:02.872404Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:0 progress is 1/1 2025-06-25T14:39:02.872435Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-25T14:39:02.872501Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:39:02.872562Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-25T14:39:02.872598Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: false 2025-06-25T14:39:02.872645Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-25T14:39:02.872680Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 102:0 2025-06-25T14:39:02.872720Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 102:0 2025-06-25T14:39:02.872849Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-25T14:39:02.872895Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 102, publications: 2, subscribers: 0 2025-06-25T14:39:02.872928Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 102, [OwnerId: 72057594046678944, LocalPathId: 1], 7 2025-06-25T14:39:02.872967Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 102, [OwnerId: 72057594046678944, LocalPathId: 2], 18446744073709551615 2025-06-25T14:39:02.873817Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-06-25T14:39:02.873869Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:1 tabletId 72075186233409546 2025-06-25T14:39:02.874209Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-06-25T14:39:02.874255Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:2 tabletId 72075186233409547 2025-06-25T14:39:02.875445Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:1 hive 72057594037968897 at ss 72057594046678944 2025-06-25T14:39:02.875490Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:2 hive 72057594037968897 at ss 72057594046678944 2025-06-25T14:39:02.875665Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:39:02.875698Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:39:02.875856Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-25T14:39:02.876010Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:39:02.876043Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 102, path id: 1 2025-06-25T14:39:02.876094Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 102, path id: 2 FAKE_COORDINATOR: Erasing txId 102 2025-06-25T14:39:02.876540Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T14:39:02.876628Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T14:39:02.876674Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 102 2025-06-25T14:39:02.876736Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 18446744073709551615 2025-06-25T14:39:02.876780Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-25T14:39:02.877075Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-25T14:39:02.877119Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-25T14:39:02.877204Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-25T14:39:02.877694Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T14:39:02.877777Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T14:39:02.877805Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 102 2025-06-25T14:39:02.877834Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 7 2025-06-25T14:39:02.877861Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:39:02.877923Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 102, subscribers: 0 2025-06-25T14:39:02.878130Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 1 TxId_Deprecated: 1 2025-06-25T14:39:02.878641Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 1 ShardOwnerId: 72057594046678944 ShardLocalIdx: 1, at schemeshard: 72057594046678944 2025-06-25T14:39:02.878883Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 2 TxId_Deprecated: 2 2025-06-25T14:39:02.879071Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046678944 ShardLocalIdx: 2, at schemeshard: 72057594046678944 2025-06-25T14:39:02.881392Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-25T14:39:02.882398Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-25T14:39:02.883024Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-25T14:39:02.883593Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-06-25T14:39:02.883672Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 102 2025-06-25T14:39:02.884033Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-06-25T14:39:02.884073Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 2025-06-25T14:39:02.884458Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-06-25T14:39:02.884557Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-25T14:39:02.884599Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:401:2379] TestWaitNotification: OK eventTxId 102 wait until 72075186233409546 is deleted wait until 72075186233409547 is deleted 2025-06-25T14:39:02.884979Z node 1 :HIVE INFO: tablet_helpers.cpp:1476: [72057594037968897] TEvSubscribeToTabletDeletion, 72075186233409546 2025-06-25T14:39:02.885085Z node 1 :HIVE INFO: tablet_helpers.cpp:1476: [72057594037968897] TEvSubscribeToTabletDeletion, 72075186233409547 Deleted tabletId 72075186233409546 Deleted tabletId 72075186233409547 { Type { Kind: Struct Struct { Member { Name: "ShardsToDelete" Type { Kind: Optional Optional { Item { Kind: Struct Struct { Member { Name: "List" Type { Kind: List List { Item { Kind: Struct Struct { Member { Name: "ShardIdx" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } } } } } } Member { Name: "Truncated" Type { Kind: Data Data { Scheme: 6 } } } } } } } } } } Value { Struct { Optional { Struct { } Struct { Bool: false } } } } } |82.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/sequenceproxy/ut/ydb-core-tx-sequenceproxy-ut |82.6%| [LD] {RESULT} $(B)/ydb/core/tx/sequenceproxy/ut/ydb-core-tx-sequenceproxy-ut |82.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/sequenceproxy/ut/ydb-core-tx-sequenceproxy-ut ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest >> HttpRequest::AnalyzeServerless [GOOD] Test command err: 2025-06-25T14:38:41.569205Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:419:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:38:41.569528Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:38:41.569629Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001cc0/r3tmp/tmpV2ei6k/pdisk_1.dat 2025-06-25T14:38:41.925870Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 14287, node 1 2025-06-25T14:38:42.194503Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:38:42.194564Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:38:42.194606Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:38:42.195219Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:38:42.203347Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:38:42.313207Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:42.313357Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:42.328753Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:9506 2025-06-25T14:38:42.862910Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-25T14:38:45.952902Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-25T14:38:45.992560Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:45.992686Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:46.031141Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T14:38:46.033444Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:38:46.264732Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:38:46.300022Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:46.300638Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:46.301222Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:46.301392Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:46.301609Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:46.301713Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:46.301783Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:46.301890Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:46.301955Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:46.514331Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:46.514465Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:46.527499Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:38:46.667012Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:38:46.715349Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-25T14:38:46.715451Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-25T14:38:46.750844Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-25T14:38:46.751058Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-25T14:38:46.751244Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-25T14:38:46.751286Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-25T14:38:46.751340Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-25T14:38:46.751395Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-25T14:38:46.751433Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-25T14:38:46.751468Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-25T14:38:46.751882Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-25T14:38:46.774092Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7949: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-25T14:38:46.774211Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7979: ConnectToSA(), pipe client id: [2:1793:2562], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-25T14:38:46.783459Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1808:2573] 2025-06-25T14:38:46.790661Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1849:2589] 2025-06-25T14:38:46.790973Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1849:2589], schemeshard id = 72075186224037897 2025-06-25T14:38:46.800839Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Shared 2025-06-25T14:38:46.815817Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-25T14:38:46.815881Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-25T14:38:46.815959Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Shared/.metadata/_statistics 2025-06-25T14:38:46.825350Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:46.832867Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-25T14:38:46.833040Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-25T14:38:47.029406Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-25T14:38:47.175216Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-25T14:38:47.251616Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-25T14:38:47.779345Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:38:47.806649Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-25T14:38:48.415498Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:38:48.593138Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7894: Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult, at schemeshard: 72075186224037899 2025-06-25T14:38:48.593203Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7910: Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult, StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037899 2025-06-25T14:38:48.593299Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7979: ConnectToSA(), pipe client id: [2:2504:2905], at schemeshard: 72075186224037899, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037899 2025-06-25T14:38:48.594790Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:2506:2907] 2025-06-25T14:38:48.595057Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:2506:2907], schemeshard id = 72075186224037899 2025-06-25T14:38:49.717913Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2626:3193], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:49.718067Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06- ... =TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715662; 2025-06-25T14:38:52.717641Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037909;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715662; 2025-06-25T14:38:52.717987Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037907;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715662; 2025-06-25T14:38:52.718344Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037910;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715662; 2025-06-25T14:38:52.718696Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037911;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715662; 2025-06-25T14:38:52.719003Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037913;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715662; 2025-06-25T14:38:52.719286Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037914;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715662; 2025-06-25T14:38:54.045450Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:4224:3361], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:54.045863Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:54.049262Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterColumnTable, opId: 281474976715663:0, at schemeshard: 72075186224037899, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/alter_table.cpp:323) 2025-06-25T14:38:54.100849Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037905;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715663;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715663; 2025-06-25T14:38:54.101725Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037906;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715663;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715663; 2025-06-25T14:38:54.102948Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037912;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715663;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715663; 2025-06-25T14:38:54.104063Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037908;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715663;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715663; 2025-06-25T14:38:54.104652Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037909;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715663;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715663; 2025-06-25T14:38:54.105201Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037907;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715663;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715663; 2025-06-25T14:38:54.105770Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037914;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715663;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715663; 2025-06-25T14:38:54.107655Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037910;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715663;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715663; 2025-06-25T14:38:54.108208Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037911;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715663;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715663; 2025-06-25T14:38:54.108796Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037913;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715663;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715663; 2025-06-25T14:38:54.780704Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:4385:3404], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:54.781093Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:54.785677Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterColumnTable, opId: 281474976715664:0, at schemeshard: 72075186224037899, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/alter_table.cpp:323) 2025-06-25T14:38:54.838277Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037905;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; 2025-06-25T14:38:54.838796Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037906;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; 2025-06-25T14:38:54.839185Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037908;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; 2025-06-25T14:38:54.839555Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037909;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; 2025-06-25T14:38:54.839908Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037907;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; 2025-06-25T14:38:54.840753Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037914;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; 2025-06-25T14:38:54.841245Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037912;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; 2025-06-25T14:38:54.842293Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037910;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; 2025-06-25T14:38:54.842792Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037911;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; 2025-06-25T14:38:54.843305Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037913;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; waiting actualization: 0/0.000015s 2025-06-25T14:39:02.143542Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:6212:5565] 2025-06-25T14:39:02.147535Z node 2 :STATISTICS DEBUG: tx_analyze.cpp:22: [72075186224037894] TTxAnalyze::Execute. ReplyToActorId [1:6208:3971] , Record { OperationId: "\000\000\000\000!\345O\301\021\275+> ResourcePoolsDdl::TestCreateResourcePoolOnServerless [GOOD] >> ResourcePoolsDdl::TestDefaultPoolRestrictions >> KqpPg::CheckPgAutoParams-useSink [GOOD] |82.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_reassign/ydb-core-tx-datashard-ut_reassign |82.6%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_reassign/ydb-core-tx-datashard-ut_reassign |82.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_reassign/ydb-core-tx-datashard-ut_reassign >> TStorageTenantTest::Empty [GOOD] >> KqpWorkloadServiceTables::TestCreateWorkloadSerivceTables [GOOD] >> KqpWorkloadServiceTables::TestPoolStateFetcherActor |82.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_encrypted_storage/unittest >> TStorageTenantTest::Empty [GOOD] >> TSchemeShardExtSubDomainTest::CreateItemsInsideExtSubdomainAtGSSwithoutTSS >> TSchemeShardExtSubDomainTest::NothingInsideGSS-AlterDatabaseCreateHiveFirst-true >> TSchemeShardExtSubDomainTest::Create >> TSchemeShardExtSubDomainTest::CreateAndWait >> TSchemeShardExtSubDomainTest::Fake [GOOD] >> TSchemeShardExtSubDomainTest::CreateWithOnlyDotsNotAllowed >> TSchemeShardExtSubDomainTest::AlterCantChangeExternalSchemeShard-AlterDatabaseCreateHiveFirst-false >> TSchemeShardExtSubDomainTest::CreateAndAlterAlterAddStoragePool-ExternalHive >> TSchemeShardExtSubDomainTest::CreateAndAlterWithoutEnablingTx >> TSchemeShardExtSubDomainTest::AlterTwiceAndWithPlainAlterSubdomain ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/pg/unittest >> KqpPg::CheckPgAutoParams-useSink [GOOD] Test command err: Trying to start YDB, gRPC: 6695, MsgBus: 17709 2025-06-25T14:36:54.686759Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519895952276841183:2222];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:36:54.687158Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000a7e/r3tmp/tmp9PsbeN/pdisk_1.dat 2025-06-25T14:36:55.166240Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:36:55.166333Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:36:55.177908Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:36:55.250215Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519895952276841000:2080] 1750862214675182 != 1750862214675185 2025-06-25T14:36:55.282909Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 6695, node 1 2025-06-25T14:36:55.476944Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:36:55.476975Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:36:55.476984Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:36:55.477108Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:36:55.682148Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:17709 TClient is connected to server localhost:17709 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:36:56.233729Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:36:56.249297Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:36:58.209152Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:36:58.412475Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill \x62797465612030 \x62797465612030 \x62797465612031 \x62797465612031 \x62797465612032 \x62797465612032 \x62797465612033 \x62797465612033 \x62797465612034 \x62797465612034 \x62797465612035 \x62797465612035 \x62797465612036 \x62797465612036 \x62797465612037 \x62797465612037 \x62797465612038 \x62797465612038 \x62797465612039 \x62797465612039 2025-06-25T14:36:58.546823Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:36:58.639763Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill \x62797465612030 \x62797465612030 \x62797465612031 \x62797465612031 \x62797465612032 \x62797465612032 \x62797465612033 \x62797465612033 \x62797465612034 \x62797465612034 \x62797465612035 \x62797465612035 \x62797465612036 \x62797465612036 \x62797465612037 \x62797465612037 \x62797465612038 \x62797465612038 \x62797465612039 \x62797465612039 2025-06-25T14:36:58.717083Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:36:58.772292Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill {"\\x6130","\\x623130"} {"\\x6130","\\x623130"} {"\\x6131","\\x623131"} {"\\x6131","\\x623131"} {"\\x6132","\\x623132"} {"\\x6132","\\x623132"} {"\\x6133","\\x623133"} {"\\x6133","\\x623133"} {"\\x6134","\\x623134"} {"\\x6134","\\x623134"} {"\\x6135","\\x623135"} {"\\x6135","\\x623135"} {"\\x6136","\\x623136"} {"\\x6136","\\x623136"} {"\\x6137","\\x623137"} {"\\x6137","\\x623137"} {"\\x6138","\\x623138"} {"\\x6138","\\x623138"} {"\\x6139","\\x623139"} {"\\x6139","\\x623139"} 2025-06-25T14:36:58.832836Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) {"\\x6130","\\x623130"} {"\\x6130","\\x623130"} {"\\x6131","\\x623131"} {"\\x6131","\\x623131"} {"\\x6132","\\x623132"} {"\\x6132","\\x623132"} {"\\x6133","\\x623133"} {"\\x6133","\\x623133"} {"\\x6134","\\x623134"} {"\\x6134","\\x623134"} {"\\x6135","\\x623135"} {"\\x6135","\\x623135"} {"\\x6136","\\x623136"} {"\\x6136","\\x623136"} {"\\x6137","\\x623137"} {"\\x6137","\\x623137"} {"\\x6138","\\x623138"} {"\\x6138","\\x623138"} {"\\x6139","\\x623139"} {"\\x6139","\\x623139"} 2025-06-25T14:36:59.002591Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:36:59.054054Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill f f t t 2025-06-25T14:36:59.160687Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710678:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:36:59.230993Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill f f t t 2025-06-25T14:36:59.283024Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710682:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:36:59.391990Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill {f,f} {f,f} {t,t} {t,t} 2025-06-25T14:36:59.449366Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710686:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) {f,f} {f,f} {t,t} {t,t} 2025-06-25T14:36:59.557459Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710689:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:36:59.620196Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 0 0 1 1 2 2 3 3 4 4 5 5 6 6 7 7 8 8 9 9 2025-06-25T14:36:59.671643Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710693:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:36:59.688839Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519895952276841183:2222];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:36:59.688912Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:36:59.746723Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 0 0 1 1 2 2 3 3 4 4 5 5 6 6 7 7 8 8 9 9 2025-06-25T14:36:59.790052Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710697:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:36:59.849057Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill {0,0} {0,0} {1,1} {1,1} {2,2} {2,2} {3,3} {3,3} {4,4} {4,4} {5,5} {5,5} {6,6} {6,6} {7,7} {7,7} {8,8} {8,8} {9,9} {9,9} 2025-06-25T14:36:59.886327Z node 1 :FLAT_TX_ ... on: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:38:57.286062Z node 14 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:39:00.961427Z node 14 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[14:7519896469401480366:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:39:00.961534Z node 14 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:39:01.141132Z node 14 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [14:7519896495171284751:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:39:01.141196Z node 14 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [14:7519896495171284763:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:39:01.141275Z node 14 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:39:01.146838Z node 14 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:39:01.161436Z node 14 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [14:7519896495171284780:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:39:01.261186Z node 14 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [14:7519896495171284832:2340] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:39:01.303707Z node 14 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:39:01.680721Z node 14 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:39:02.356923Z node 14 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:468: Get parsing result with error, self: [14:7519896499466252468:2359], owner: [14:7519896495171284731:2284], statement id: 0 2025-06-25T14:39:02.357179Z node 14 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=14&id=NmVjMDA2NzQtNmZmMjU0YmItMWE1ZTQzMWUtNmFjMzQ1ZDA=, ActorId: [14:7519896499466252466:2358], ActorState: ExecuteState, TraceId: 01jykrea6g03gj6emmfz27ev7p, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-06-25T14:39:02.726336Z node 14 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [14:7519896499466252494:2369], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:1:1: Error: At function: RemovePrefixMembers, At function: PgSelect, At tuple, At tuple, At tuple, At function: PgSetItem, At tuple
: Error: At tuple
:1:1: Error: At function: PgWhere, At lambda
:2:55: Error: At function: PgOp
:2:55: Error: Unable to find an overload for operator = with given argument type(s): (text,int4) 2025-06-25T14:39:02.728259Z node 14 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=14&id=ZTc2NzA2NDEtOTExZmZlOGYtODIwOWE3N2MtY2FkZDFkYzQ=, ActorId: [14:7519896499466252491:2367], ActorState: ExecuteState, TraceId: 01jykreah59qm08725d4wqfzsk, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-06-25T14:39:02.800910Z node 14 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [14:7519896499466252506:2375], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:1:1: Error: At function: RemovePrefixMembers, At function: PgSelect, At tuple, At tuple, At tuple, At function: PgSetItem, At tuple
: Error: At tuple
:1:1: Error: At function: PgWhere, At lambda
:2:57: Error: At function: PgAnd
:2:67: Error: At function: PgOp
:2:67: Error: Unable to find an overload for operator = with given argument type(s): (text,int4) 2025-06-25T14:39:02.803111Z node 14 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=14&id=ZjQ3ODQ5ZjUtMWY5MzllZGMtYmMzNGJlZmItNzgyNTdlYg==, ActorId: [14:7519896499466252503:2373], ActorState: ExecuteState, TraceId: 01jykreajq3fjadp0nn4z48tq4, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-06-25T14:39:02.824337Z node 14 :KQP_EXECUTER CRIT: kqp_literal_executer.cpp:112: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: 01jykreamza07tnc5rpvp9v7d2, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=14&id=YTdiMzEzZmMtMTA1MDNhYTItNzRhNDU4ZDgtMjk2OWFlNmE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. TKqpLiteralExecuter, unexpected exception caught: (NKikimr::NMiniKQL::TTerminateException) Terminate was called, reason(51): ERROR: invalid input syntax for type integer: "a" 2025-06-25T14:39:02.824999Z node 14 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=14&id=YTdiMzEzZmMtMTA1MDNhYTItNzRhNDU4ZDgtMjk2OWFlNmE=, ActorId: [14:7519896499466252515:2379], ActorState: ExecuteState, TraceId: 01jykreamza07tnc5rpvp9v7d2, Create QueryResponse for error on request, msg: 2025-06-25T14:39:02.880031Z node 14 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:39:02.980123Z node 14 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:39:03.069196Z node 14 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [14:7519896503761219983:2406], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:1:1: Error: At function: KiWriteTable!
:1:1: Error: values have 3 columns, INSERT INTO expects: 2 2025-06-25T14:39:03.069476Z node 14 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=14&id=ODJkZDQwNzUtNmIzYTNkY2ItOTAzMzdmOWMtZDgxMThmNjk=, ActorId: [14:7519896503761219980:2404], ActorState: ExecuteState, TraceId: 01jykreaw1a04h7g6yck2g6w0r, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-06-25T14:39:03.111328Z node 14 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [14:7519896503761219995:2412], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:1:1: Error: At function: KiWriteTable!
:1:1: Error: Failed to convert type: List> to List>
:1:1: Error: Failed to convert 'id': pgunknown to Optional
:1:1: Error: Row type mismatch for table: db.[/Root/PgTable2] 2025-06-25T14:39:03.114424Z node 14 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=14&id=MmJmYmViOTUtZGNkNjEyMTgtMmU0ODlkYWYtMmYzZGIxMTk=, ActorId: [14:7519896503761219992:2410], ActorState: ExecuteState, TraceId: 01jykreax95kvh6phqna32t2g2, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-06-25T14:39:03.564763Z node 14 :KQP_EXECUTER CRIT: kqp_literal_executer.cpp:112: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: 01jykreayqfz69km69nv1q6sby, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=14&id=MmRkOGRmZDktZGU5MDVmMzQtNjIyYzNhMWQtOWM4MDFmMWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. TKqpLiteralExecuter, unexpected exception caught: (NKikimr::NMiniKQL::TTerminateException) Terminate was called, reason(51): ERROR: invalid input syntax for type integer: "a" 2025-06-25T14:39:03.565399Z node 14 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=14&id=MmRkOGRmZDktZGU5MDVmMzQtNjIyYzNhMWQtOWM4MDFmMWI=, ActorId: [14:7519896503761220004:2416], ActorState: ExecuteState, TraceId: 01jykreayqfz69km69nv1q6sby, Create QueryResponse for error on request, msg: 2025-06-25T14:39:03.615591Z node 14 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:39:04.236766Z node 14 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 14, TabletId: 72075186224037892 not found 2025-06-25T14:39:04.273815Z node 14 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710681:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) |82.7%| [TA] $(B)/ydb/core/tx/tx_proxy/ut_encrypted_storage/test-results/unittest/{meta.json ... results_accumulator.log} |82.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/fq/libs/checkpointing/ut/ydb-core-fq-libs-checkpointing-ut |82.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/fq/libs/checkpointing/ut/ydb-core-fq-libs-checkpointing-ut |82.7%| [TA] {RESULT} $(B)/ydb/core/tx/tx_proxy/ut_encrypted_storage/test-results/unittest/{meta.json ... results_accumulator.log} |82.7%| [LD] {RESULT} $(B)/ydb/core/fq/libs/checkpointing/ut/ydb-core-fq-libs-checkpointing-ut >> KqpWorkloadServiceActors::TestDefaultPoolAdminPermissions [GOOD] >> KqpWorkloadServiceDistributed::TestDistributedLargeConcurrentQueryLimit >> KqpWorkloadService::TestZeroQueueSize [GOOD] >> KqpWorkloadService::TestQueryCancelAfterUnlimitedPool >> TMiniKQLProtoTestYdb::TestExportOptionalTypeYdb [GOOD] >> TMiniKQLProtoTestYdb::TestExportListTypeYdb >> TMiniKQLProtoTestYdb::TestExportListTypeYdb [GOOD] >> TMiniKQLProtoTestYdb::TestExportIntegralYdb >> TMiniKQLProtoTestYdb::TestExportIntegralYdb [GOOD] >> TMiniKQLProtoTestYdb::TestExportEmptyOptionalYdb [GOOD] >> TMiniKQLProtoTestYdb::TestExportMultipleOptionalNotEmptyYdb >> TMiniKQLProtoTestYdb::TestExportMultipleOptionalNotEmptyYdb [GOOD] >> TMiniKQLProtoTestYdb::TestExportOptionalYdb >> TMiniKQLProtoTestYdb::TestExportOptionalYdb [GOOD] >> TMiniKQLProtoTestYdb::TestExportListYdb >> ResourcePoolsDdl::TestDefaultPoolRestrictions [GOOD] >> ResourcePoolsDdl::TestAlterResourcePool >> TMiniKQLProtoTestYdb::TestExportListYdb [GOOD] >> TMiniKQLProtoTestYdb::TestExportMultipleOptionalVariantNotNullYdb >> TMiniKQLProtoTestYdb::TestExportMultipleOptionalVariantNotNullYdb [GOOD] >> TMiniKQLProtoTestYdb::TestExportOptionalVariantOptionalNullYdb [GOOD] >> TMiniKQLProtoTestYdb::TestExportMultipleOptionalVariantOptionalNullYdb >> TMiniKQLProtoTestYdb::TestExportMultipleOptionalVariantOptionalNullYdb [GOOD] >> TMiniKQLProtoTestYdb::TestExportMultipleOptionalVariantOptionalNotNullYdb >> TMiniKQLProtoTestYdb::TestExportMultipleOptionalVariantOptionalNotNullYdb [GOOD] >> TMiniKQLProtoTestYdb::TestExportOptionalVariantOptionalYdbType >> TMiniKQLProtoTestYdb::TestExportOptionalVariantOptionalYdbType [GOOD] >> KqpPg::InsertNoTargetColumns_Alter-useSink [GOOD] >> KqpPg::InsertNoTargetColumns_Serial+useSink |82.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/engine/ut/unittest >> TMiniKQLProtoTestYdb::TestExportOptionalVariantOptionalYdbType [GOOD] |82.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tools/stress_tool/ut/ydb-tools-stress_tool-ut |82.7%| [LD] {RESULT} $(B)/ydb/tools/stress_tool/ut/ydb-tools-stress_tool-ut |82.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tools/stress_tool/ut/ydb-tools-stress_tool-ut >> ResourcePoolClassifiersDdl::TestResourcePoolClassifierRanks [GOOD] >> ResourcePoolClassifiersDdl::TestExplicitPoolId |82.7%| [TA] $(B)/ydb/core/engine/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TSchemeShardExtSubDomainTest::Create [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndAlter >> TSchemeShardExtSubDomainTest::CreateAndAlterWithoutEnablingTx [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndAlterWithoutEnablingTx-AlterDatabaseCreateHiveFirst >> TSchemeShardExtSubDomainTest::CreateItemsInsideExtSubdomainAtGSSwithoutTSS [GOOD] >> TSchemeShardExtSubDomainTest::CreateWithExtraPathSymbolsAllowed-AlterDatabaseCreateHiveFirst-false >> TSchemeShardExtSubDomainTest::CreateAndWait [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndAlterWithoutEnablingTx-ExternalHive >> TSchemeShardExtSubDomainTest::AlterCantChangeExternalSchemeShard-AlterDatabaseCreateHiveFirst-false [GOOD] >> TSchemeShardExtSubDomainTest::AlterCantChangeExternalSchemeShard-AlterDatabaseCreateHiveFirst-true >> TSchemeShardExtSubDomainTest::CreateWithOnlyDotsNotAllowed [GOOD] >> TSchemeShardExtSubDomainTest::NothingInsideGSS-AlterDatabaseCreateHiveFirst-false >> TSchemeShardExtSubDomainTest::AlterTwiceAndWithPlainAlterSubdomain [GOOD] >> TSchemeShardExtSubDomainTest::AlterCantChangeSetParams-AlterDatabaseCreateHiveFirst-false >> TSchemeShardExtSubDomainTest::CreateAndAlterAlterAddStoragePool-ExternalHive [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndAlterAlterAddStoragePool-AlterDatabaseCreateHiveFirst-ExternalHive |82.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> TSchemeShardExtSubDomainTest::NothingInsideGSS-AlterDatabaseCreateHiveFirst-true [GOOD] >> TSchemeShardExtSubDomainTest::SysViewProcessorSync-AlterDatabaseCreateHiveFirst-false >> TSchemeShardExtSubDomainTest::CreateAndAlterWithoutEnablingTx-ExternalHive [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndAlterWithoutEnablingTx-AlterDatabaseCreateHiveFirst-ExternalHive >> TSchemeShardExtSubDomainTest::AlterCantChangeExternalSchemeShard-AlterDatabaseCreateHiveFirst-true [GOOD] >> TSchemeShardExtSubDomainTest::AlterCantChangeExternalHive-AlterDatabaseCreateHiveFirst-false >> TSchemeShardExtSubDomainTest::CreateAndAlterWithoutEnablingTx-AlterDatabaseCreateHiveFirst [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndAlterWithExternalHive-AlterDatabaseCreateHiveFirst-false >> TSchemeShardExtSubDomainTest::NothingInsideGSS-AlterDatabaseCreateHiveFirst-false [GOOD] >> TSchemeShardExtSubDomainTest::Drop >> TSchemeShardExtSubDomainTest::CreateAndAlterAlterAddStoragePool-AlterDatabaseCreateHiveFirst-ExternalHive [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndAlterAlterSameStoragePools >> TSchemeShardExtSubDomainTest::AlterCantChangeSetParams-AlterDatabaseCreateHiveFirst-false [GOOD] >> TSchemeShardExtSubDomainTest::AlterCantChangeSetParams-AlterDatabaseCreateHiveFirst-true >> TSchemeShardExtSubDomainTest::CreateAndAlterWithExternalHive-AlterDatabaseCreateHiveFirst-false [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndAlterWithExternalHive-AlterDatabaseCreateHiveFirst-true >> TSchemeShardExtSubDomainTest::CreateAndAlterWithoutEnablingTx-AlterDatabaseCreateHiveFirst-ExternalHive [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndSameAlterTwice >> TSchemeShardExtSubDomainTest::CreateAndAlter [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndAlter-ExternalHive >> TSchemeShardExtSubDomainTest::AlterCantChangeExternalHive-AlterDatabaseCreateHiveFirst-false [GOOD] >> TSchemeShardExtSubDomainTest::AlterCantChangeExternalHive-AlterDatabaseCreateHiveFirst-true |82.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/cms/ut_sentinel_unstable/ydb-core-cms-ut_sentinel_unstable |82.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/cms/ut_sentinel_unstable/ydb-core-cms-ut_sentinel_unstable |82.7%| [TA] {RESULT} $(B)/ydb/core/engine/ut/test-results/unittest/{meta.json ... results_accumulator.log} |82.7%| [LD] {RESULT} $(B)/ydb/core/cms/ut_sentinel_unstable/ydb-core-cms-ut_sentinel_unstable >> TBSV::ShouldLimitBlockStoreVolumeDropRate >> TSchemeShardExtSubDomainTest::CreateAndAlterWithExternalHive-AlterDatabaseCreateHiveFirst-true [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndAlterThenDropChangesParent-ExternalHive >> TSchemeShardExtSubDomainTest::CreateWithExtraPathSymbolsAllowed-AlterDatabaseCreateHiveFirst-false [GOOD] >> TSchemeShardExtSubDomainTest::CreateWithExtraPathSymbolsAllowed-AlterDatabaseCreateHiveFirst-true >> TSchemeShardExtSubDomainTest::CreateAndAlterAlterSameStoragePools [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndAlterAlterSameStoragePools-ExternalHive >> TSchemeShardExtSubDomainTest::AlterCantChangeSetParams-AlterDatabaseCreateHiveFirst-true [GOOD] >> TSchemeShardExtSubDomainTest::AlterRequiresParamCombinations-AlterDatabaseCreateHiveFirst-false >> TSchemeShardExtSubDomainTest::AlterCantChangeExternalHive-AlterDatabaseCreateHiveFirst-true [GOOD] >> TSchemeShardExtSubDomainTest::AlterCantChangeExternalSysViewProcessor-AlterDatabaseCreateHiveFirst-false >> TSchemeShardExtSubDomainTest::CreateAndSameAlterTwice [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndSameAlterTwice-ExternalHive >> TSchemeShardExtSubDomainTest::SysViewProcessorSync-AlterDatabaseCreateHiveFirst-false [GOOD] >> TSchemeShardExtSubDomainTest::SysViewProcessorSync-AlterDatabaseCreateHiveFirst-true >> TSchemeShardExtSubDomainTest::Drop [GOOD] >> TSchemeShardExtSubDomainTest::Drop-ExternalHive >> TSchemeShardExtSubDomainTest::CreateAndAlter-ExternalHive [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndAlter-AlterDatabaseCreateHiveFirst |82.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/datashard/ut_init/ydb-core-tx-datashard-ut_init |82.7%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_init/ydb-core-tx-datashard-ut_init |82.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_init/ydb-core-tx-datashard-ut_init >> TSchemeShardExtSubDomainTest::CreateAndAlterAlterSameStoragePools-ExternalHive [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndAlterAlterSameStoragePools-AlterDatabaseCreateHiveFirst >> TSchemeShardExtSubDomainTest::CreateAndAlterThenDropChangesParent-ExternalHive [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndAlterThenDropChangesParent-AlterDatabaseCreateHiveFirst >> TSchemeShardExtSubDomainTest::AlterRequiresParamCombinations-AlterDatabaseCreateHiveFirst-false [GOOD] >> TSchemeShardExtSubDomainTest::AlterRequiresParamCombinations-AlterDatabaseCreateHiveFirst-true >> KqpWorkloadServiceTables::TestPoolStateFetcherActor [GOOD] >> KqpWorkloadServiceTables::TestCleanupOnServiceRestart >> TSchemeShardExtSubDomainTest::CreateAndSameAlterTwice-ExternalHive [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndSameAlterTwice-AlterDatabaseCreateHiveFirst >> TSchemeShardExtSubDomainTest::CreateWithExtraPathSymbolsAllowed-AlterDatabaseCreateHiveFirst-true [GOOD] >> TSchemeShardExtSubDomainTest::CreateNameConflicts-AlterDatabaseCreateHiveFirst-false >> TSchemeShardExtSubDomainTest::AlterCantChangeExternalSysViewProcessor-AlterDatabaseCreateHiveFirst-false [GOOD] >> TSchemeShardExtSubDomainTest::AlterCantChangeExternalSysViewProcessor-AlterDatabaseCreateHiveFirst-true >> TSchemeShardExtSubDomainTest::CreateAndAlterAlterSameStoragePools-AlterDatabaseCreateHiveFirst [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndAlterAlterSameStoragePools-AlterDatabaseCreateHiveFirst-ExternalHive >> TSchemeShardExtSubDomainTest::CreateAndAlterThenDropChangesParent-AlterDatabaseCreateHiveFirst [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndAlterThenDropChangesParent-AlterDatabaseCreateHiveFirst-ExternalHive >> TSchemeShardExtSubDomainTest::CreateAndAlter-AlterDatabaseCreateHiveFirst [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndAlter-AlterDatabaseCreateHiveFirst-ExternalHive >> TSchemeShardExtSubDomainTest::Drop-ExternalHive [GOOD] >> TSchemeShardExtSubDomainTest::Drop-AlterDatabaseCreateHiveFirst >> TSchemeShardExtSubDomainTest::AlterRequiresParamCombinations-AlterDatabaseCreateHiveFirst-true [GOOD] >> TSchemeShardExtSubDomainTest::AlterNameConflicts-AlterDatabaseCreateHiveFirst-false >> TSchemeShardExtSubDomainTest::SysViewProcessorSync-AlterDatabaseCreateHiveFirst-true [GOOD] >> TSchemeShardExtSubDomainTest::StatisticsAggregatorSync-AlterDatabaseCreateHiveFirst-false >> TBSV::ShouldLimitBlockStoreVolumeDropRate [GOOD] >> KqpPg::TableInsert+useSink [GOOD] >> KqpPg::TableInsert-useSink >> TSchemeShardExtSubDomainTest::CreateAndSameAlterTwice-AlterDatabaseCreateHiveFirst [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndSameAlterTwice-AlterDatabaseCreateHiveFirst-ExternalHive >> TSchemeShardExtSubDomainTest::CreateNameConflicts-AlterDatabaseCreateHiveFirst-false [GOOD] >> TSchemeShardExtSubDomainTest::CreateNameConflicts-AlterDatabaseCreateHiveFirst-true >> TSchemeShardExtSubDomainTest::CreateAndAlterAlterSameStoragePools-AlterDatabaseCreateHiveFirst-ExternalHive [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndAlterThenDropChangesParent >> TSchemeShardExtSubDomainTest::CreateAndAlterThenDropChangesParent-AlterDatabaseCreateHiveFirst-ExternalHive [GOOD] >> TSchemeShardExtSubDomainTest::AlterCantChangeExternalSysViewProcessor-AlterDatabaseCreateHiveFirst-true [GOOD] >> TSchemeShardExtSubDomainTest::AlterCantChangeExternalStatisticsAggregator-AlterDatabaseCreateHiveFirst-false ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_bsvolume/unittest >> TBSV::ShouldLimitBlockStoreVolumeDropRate [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:39:15.045223Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:39:15.045329Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:39:15.045369Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:39:15.045405Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:39:15.045455Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:39:15.045485Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:39:15.045536Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:39:15.045604Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:39:15.046374Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:39:15.046728Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:39:15.122528Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:39:15.122600Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:39:15.139131Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:39:15.139593Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:39:15.139765Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:39:15.146222Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:39:15.146635Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:39:15.147331Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:39:15.147627Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:39:15.151685Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:39:15.151875Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:39:15.153131Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:39:15.153192Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:39:15.153335Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:39:15.153391Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:39:15.153440Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:39:15.153534Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:39:15.160058Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:39:15.325929Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:39:15.326190Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:39:15.326464Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:39:15.326520Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:39:15.326784Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:39:15.326907Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:39:15.334863Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:39:15.335084Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:39:15.335335Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:39:15.335413Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:39:15.335515Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:39:15.335578Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:39:15.338903Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:39:15.338973Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:39:15.339035Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:39:15.340954Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:39:15.341005Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:39:15.341058Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:39:15.341119Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:39:15.344525Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:39:15.346439Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:39:15.346633Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:39:15.347530Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:39:15.347663Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:39:15.347727Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:39:15.348050Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:39:15.348120Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:39:15.348546Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:39:15.348647Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:39:15.351111Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:39:15.351160Z node 1 :FLAT_TX_SCHEMESHARD ... 883031Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 129, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 129 at step: 5000028 FAKE_COORDINATOR: advance: minStep5000028 State->FrontStep: 5000027 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 129 at step: 5000028 2025-06-25T14:39:16.883544Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000028, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:39:16.883649Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 129 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000028 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:39:16.883704Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_bsv.cpp:40: TDropBlockStoreVolume TPropose, operationId: 129:0 HandleReply TEvOperationPlan, step: 5000028, at schemeshard: 72057594046678944 2025-06-25T14:39:16.883820Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 13] was 2 2025-06-25T14:39:16.883942Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#129:0 progress is 1/1 2025-06-25T14:39:16.883991Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 129 ready parts: 1/1 2025-06-25T14:39:16.884052Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#129:0 progress is 1/1 2025-06-25T14:39:16.884103Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 129 ready parts: 1/1 2025-06-25T14:39:16.884168Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:39:16.884246Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 13] was 1 2025-06-25T14:39:16.884290Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 129, ready parts: 1/1, is published: false 2025-06-25T14:39:16.884361Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 129 ready parts: 1/1 2025-06-25T14:39:16.884401Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 129:0 2025-06-25T14:39:16.884454Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 129:0 2025-06-25T14:39:16.884610Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 13] was 2 2025-06-25T14:39:16.884658Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 129, publications: 2, subscribers: 0 2025-06-25T14:39:16.884694Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 129, [OwnerId: 72057594046678944, LocalPathId: 1], 54 2025-06-25T14:39:16.884748Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 129, [OwnerId: 72057594046678944, LocalPathId: 13], 18446744073709551615 2025-06-25T14:39:16.887908Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:24 2025-06-25T14:39:16.887977Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:24 tabletId 72075186233409569 2025-06-25T14:39:16.890094Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:23 2025-06-25T14:39:16.890160Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:23 tabletId 72075186233409568 2025-06-25T14:39:16.891039Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:24 hive 72057594037968897 at ss 72057594046678944 2025-06-25T14:39:16.891081Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:23 hive 72057594037968897 at ss 72057594046678944 2025-06-25T14:39:16.891283Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:39:16.891323Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 129, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:39:16.891464Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 129, path id: [OwnerId: 72057594046678944, LocalPathId: 13] 2025-06-25T14:39:16.891582Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:39:16.891615Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 129, path id: 1 2025-06-25T14:39:16.891689Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 129, path id: 13 FAKE_COORDINATOR: Erasing txId 129 2025-06-25T14:39:16.892179Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 13 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 129 2025-06-25T14:39:16.892260Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 13 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 129 2025-06-25T14:39:16.892293Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 129 2025-06-25T14:39:16.892345Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 129, pathId: [OwnerId: 72057594046678944, LocalPathId: 13], version: 18446744073709551615 2025-06-25T14:39:16.892414Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 13] was 1 2025-06-25T14:39:16.892947Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-25T14:39:16.892991Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 13], at schemeshard: 72057594046678944 2025-06-25T14:39:16.893064Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-25T14:39:16.893552Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 24 TxId_Deprecated: 24 2025-06-25T14:39:16.893698Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 54 PathOwnerId: 72057594046678944, cookie: 129 2025-06-25T14:39:16.893770Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 54 PathOwnerId: 72057594046678944, cookie: 129 2025-06-25T14:39:16.893800Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 129 2025-06-25T14:39:16.893827Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 129, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 54 2025-06-25T14:39:16.893883Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:39:16.893964Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 129, subscribers: 0 2025-06-25T14:39:16.894173Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 23 TxId_Deprecated: 23 2025-06-25T14:39:16.894355Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 24 ShardOwnerId: 72057594046678944 ShardLocalIdx: 24, at schemeshard: 72057594046678944 2025-06-25T14:39:16.894796Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 23 ShardOwnerId: 72057594046678944 ShardLocalIdx: 23, at schemeshard: 72057594046678944 2025-06-25T14:39:16.899082Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 129 2025-06-25T14:39:16.901471Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-25T14:39:16.901544Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 129 2025-06-25T14:39:16.901601Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:24 2025-06-25T14:39:16.901643Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:23 TestModificationResult got TxId: 129, wait until txId: 129 TestWaitNotification wait txId: 129 2025-06-25T14:39:16.902061Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 129: send EvNotifyTxCompletion 2025-06-25T14:39:16.902090Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 129 2025-06-25T14:39:16.902643Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 129, at schemeshard: 72057594046678944 2025-06-25T14:39:16.902736Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 129: got EvNotifyTxCompletionResult 2025-06-25T14:39:16.902762Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 129: satisfy waiter [1:1674:3542] TestWaitNotification: OK eventTxId 129 >> TSchemeShardExtSubDomainTest::AlterNameConflicts-AlterDatabaseCreateHiveFirst-false [GOOD] >> TSchemeShardExtSubDomainTest::AlterNameConflicts-AlterDatabaseCreateHiveFirst-true >> TSchemeShardExtSubDomainTest::CreateAndAlter-AlterDatabaseCreateHiveFirst-ExternalHive [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndAlterAlterAddStoragePool >> TSchemeShardExtSubDomainTest::CreateAndSameAlterTwice-AlterDatabaseCreateHiveFirst-ExternalHive [GOOD] >> TSchemeShardExtSubDomainTest::Drop-AlterDatabaseCreateHiveFirst [GOOD] >> TSchemeShardExtSubDomainTest::Drop-AlterDatabaseCreateHiveFirst-ExternalHive ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_extsubdomain/unittest >> TSchemeShardExtSubDomainTest::CreateAndAlterThenDropChangesParent-AlterDatabaseCreateHiveFirst-ExternalHive [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:39:10.460384Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:39:10.460578Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:39:10.460626Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:39:10.460687Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:39:10.460731Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:39:10.460756Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:39:10.469231Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:39:10.469344Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:39:10.469993Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:39:10.487485Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:39:10.863368Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:39:10.863429Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:39:10.945528Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:39:10.945995Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:39:10.973561Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:39:10.985094Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:39:10.995421Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:39:11.024615Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:39:11.056175Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:39:11.116269Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:39:11.151824Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:39:11.218226Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:39:11.218310Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:39:11.227567Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:39:11.227629Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:39:11.227678Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:39:11.227766Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:39:11.278692Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:39:11.549611Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:39:11.549856Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:39:11.550079Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:39:11.550134Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:39:11.550419Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:39:11.550516Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:39:11.557788Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:39:11.558058Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:39:11.560918Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:39:11.561015Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:39:11.561054Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:39:11.561097Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:39:11.569661Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:39:11.579285Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:39:11.579367Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:39:11.583241Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:39:11.583285Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:39:11.596508Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:39:11.596630Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:39:11.620680Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:39:11.630936Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:39:11.631131Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:39:11.632190Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:39:11.632334Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:39:11.632384Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:39:11.632646Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:39:11.632693Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:39:11.632966Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:39:11.647794Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:39:11.657070Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:39:11.657119Z node 1 :FLAT_TX_SCHEMESHARD ... 594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 103 2025-06-25T14:39:17.392934Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 103 2025-06-25T14:39:17.392959Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 18446744073709551615 2025-06-25T14:39:17.392985Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 7 2025-06-25T14:39:17.393037Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 103, ready parts: 0/1, is published: true 2025-06-25T14:39:17.394522Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:1 hive 72075186233409546 at ss 72057594046678944 2025-06-25T14:39:17.394568Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:3 hive 72075186233409546 at ss 72057594046678944 2025-06-25T14:39:17.394595Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:2 hive 72075186233409546 at ss 72057594046678944 2025-06-25T14:39:17.394618Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:4 hive 72075186233409546 at ss 72057594046678944 2025-06-25T14:39:17.395068Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-25T14:39:17.395114Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 103:0 ProgressState 2025-06-25T14:39:17.395229Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#103:0 progress is 1/1 2025-06-25T14:39:17.395271Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-25T14:39:17.395318Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#103:0 progress is 1/1 2025-06-25T14:39:17.395360Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-25T14:39:17.395404Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 103, ready parts: 1/1, is published: true 2025-06-25T14:39:17.395452Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-25T14:39:17.395494Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 103:0 2025-06-25T14:39:17.395528Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 103:0 2025-06-25T14:39:17.395696Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 6 2025-06-25T14:39:17.396548Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-25T14:39:17.396757Z node 7 :HIVE INFO: tablet_helpers.cpp:1356: [72075186233409546] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 1 TxId_Deprecated: 1 TabletID: 72075186233409546 2025-06-25T14:39:17.396893Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-25T14:39:17.396966Z node 7 :HIVE INFO: tablet_helpers.cpp:1356: [72075186233409546] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 3 TxId_Deprecated: 3 TabletID: 72075186234409547 2025-06-25T14:39:17.397112Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72075186233409546 TxId_Deprecated: 1 ShardOwnerId: 72057594046678944 ShardLocalIdx: 1, at schemeshard: 72057594046678944 2025-06-25T14:39:17.397365Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-06-25T14:39:17.398369Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72075186233409546 TxId_Deprecated: 3 ShardOwnerId: 72057594046678944 ShardLocalIdx: 3, at schemeshard: 72057594046678944 2025-06-25T14:39:17.398565Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 Forgetting tablet 72075186234409547 2025-06-25T14:39:17.398995Z node 7 :HIVE INFO: tablet_helpers.cpp:1356: [72075186233409546] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 2 TxId_Deprecated: 2 TabletID: 72075186234409546 2025-06-25T14:39:17.400120Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:39:17.403913Z node 7 :HIVE INFO: tablet_helpers.cpp:1356: [72075186233409546] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 4 TxId_Deprecated: 4 TabletID: 72075186234409548 2025-06-25T14:39:17.404230Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72075186233409546 TxId_Deprecated: 2 ShardOwnerId: 72057594046678944 ShardLocalIdx: 2, at schemeshard: 72057594046678944 2025-06-25T14:39:17.404542Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-25T14:39:17.405083Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72075186233409546 TxId_Deprecated: 4 ShardOwnerId: 72057594046678944 ShardLocalIdx: 4, at schemeshard: 72057594046678944 2025-06-25T14:39:17.405226Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 Forgetting tablet 72075186234409546 Forgetting tablet 72075186234409548 2025-06-25T14:39:17.406825Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-25T14:39:17.406882Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-25T14:39:17.407019Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-25T14:39:17.407664Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-25T14:39:17.407717Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-25T14:39:17.407786Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:39:17.409187Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-06-25T14:39:17.409243Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:1 tabletId 72075186233409546 2025-06-25T14:39:17.409319Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:3 2025-06-25T14:39:17.409338Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:3 tabletId 72075186234409547 2025-06-25T14:39:17.411193Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-06-25T14:39:17.411231Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:2 tabletId 72075186234409546 2025-06-25T14:39:17.411286Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:4 2025-06-25T14:39:17.411326Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:4 tabletId 72075186234409548 2025-06-25T14:39:17.411479Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-25T14:39:17.411569Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 103, wait until txId: 103 TestWaitNotification wait txId: 103 2025-06-25T14:39:17.411839Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 103: send EvNotifyTxCompletion 2025-06-25T14:39:17.411881Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 103 2025-06-25T14:39:17.412242Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 103, at schemeshard: 72057594046678944 2025-06-25T14:39:17.412346Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-06-25T14:39:17.412391Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [7:585:2524] TestWaitNotification: OK eventTxId 103 2025-06-25T14:39:17.412885Z node 7 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:39:17.413046Z node 7 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 200us result status StatusPathDoesNotExist 2025-06-25T14:39:17.413188Z node 7 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/USER_0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 >> TSchemeShardExtSubDomainTest::CreateAndAlterThenDropChangesParent [GOOD] >> TSchemeShardExtSubDomainTest::CreateNameConflicts-AlterDatabaseCreateHiveFirst-true [GOOD] >> TSchemeShardExtSubDomainTest::CreateThenDropChangesParent-AlterDatabaseCreateHiveFirst-false >> TSchemeShardExtSubDomainTest::AlterNameConflicts-AlterDatabaseCreateHiveFirst-true [GOOD] >> ResourcePoolClassifiersDdl::TestCreateResourcePoolClassifierOnServerless [GOOD] >> ResourcePoolClassifiersDdl::TestAlterResourcePoolClassifier >> TSchemeShardExtSubDomainTest::StatisticsAggregatorSync-AlterDatabaseCreateHiveFirst-false [GOOD] >> TSchemeShardExtSubDomainTest::StatisticsAggregatorSync-AlterDatabaseCreateHiveFirst-true >> TSchemeShardExtSubDomainTest::AlterCantChangeExternalStatisticsAggregator-AlterDatabaseCreateHiveFirst-false [GOOD] >> TSchemeShardExtSubDomainTest::AlterCantChangeExternalStatisticsAggregator-AlterDatabaseCreateHiveFirst-true >> TSchemeShardExtSubDomainTest::CreateAndAlterAlterAddStoragePool [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndAlterAlterAddStoragePool-AlterDatabaseCreateHiveFirst ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_extsubdomain/unittest >> TSchemeShardExtSubDomainTest::CreateAndSameAlterTwice-AlterDatabaseCreateHiveFirst-ExternalHive [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:39:10.461218Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:39:10.461382Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:39:10.461432Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:39:10.461486Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:39:10.461537Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:39:10.461570Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:39:10.470392Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:39:10.470524Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:39:10.471341Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:39:10.487486Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:39:10.853053Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:39:10.853135Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:39:10.945542Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:39:10.945954Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:39:10.975567Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:39:10.982298Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:39:10.994495Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:39:11.024635Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:39:11.056631Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:39:11.118325Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:39:11.151156Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:39:11.218512Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:39:11.218610Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:39:11.228093Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:39:11.228172Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:39:11.228242Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:39:11.228390Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:39:11.266415Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:39:11.497201Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:39:11.497579Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:39:11.497806Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:39:11.497857Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:39:11.506074Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:39:11.506202Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:39:11.522826Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:39:11.540630Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:39:11.560951Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:39:11.561058Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:39:11.561126Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:39:11.561167Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:39:11.569796Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:39:11.580555Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:39:11.580663Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:39:11.583140Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:39:11.583217Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:39:11.596514Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:39:11.596624Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:39:11.612131Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:39:11.624994Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:39:11.625169Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:39:11.683579Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:39:11.683744Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:39:11.683802Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:39:11.684094Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:39:11.684143Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:39:11.684305Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:39:11.684424Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:39:11.688566Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:39:11.688621Z node 1 :FLAT_TX_SCHEMESHARD ... 678944, LocalPathId: 2], Generation: 2, ActorId:[7:401:2369], EffectiveACLVersion: 0, SubdomainVersion: 4, UserAttributesVersion: 1, TenantHive: 72075186233409546, TenantSysViewProcessor: 18446744073709551615, TenantStatisticsAggregator: 18446744073709551615, TenantGraphShard: 18446744073709551615, TenantRootACL: }, subDomain->GetVersion(): 4, actualEffectiveACLVersion: 0, actualUserAttrsVersion: 1, tenantHive: 72075186233409546, tenantSysViewProcessor: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-25T14:39:18.028238Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72075186234409546 2025-06-25T14:39:18.028271Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72075186234409546, txId: 0, path id: [OwnerId: 72075186234409546, LocalPathId: 1] 2025-06-25T14:39:18.028409Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72075186234409546 2025-06-25T14:39:18.028444Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [7:493:2434], at schemeshard: 72075186234409546, txId: 0, path id: 1 2025-06-25T14:39:18.029234Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72075186234409546, msg: Owner: 72075186234409546 Generation: 2 LocalPathId: 1 Version: 6 PathOwnerId: 72075186234409546, cookie: 0 2025-06-25T14:39:18.029678Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-25T14:39:18.029733Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 103:0 ProgressState 2025-06-25T14:39:18.029866Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#103:0 progress is 1/1 2025-06-25T14:39:18.029908Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-25T14:39:18.029961Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#103:0 progress is 1/1 2025-06-25T14:39:18.030019Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-25T14:39:18.030072Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 103, ready parts: 1/1, is published: true 2025-06-25T14:39:18.030123Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-25T14:39:18.030168Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 103:0 2025-06-25T14:39:18.030208Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 103:0 2025-06-25T14:39:18.030278Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 6 2025-06-25T14:39:18.031453Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-25T14:39:18.031533Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__sync_update_tenants.cpp:36: TTxSyncTenant DoComplete, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 TestModificationResult got TxId: 103, wait until txId: 103 TestWaitNotification wait txId: 103 2025-06-25T14:39:18.032934Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 103: send EvNotifyTxCompletion 2025-06-25T14:39:18.032988Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 103 2025-06-25T14:39:18.033391Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 103, at schemeshard: 72057594046678944 2025-06-25T14:39:18.033476Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-06-25T14:39:18.033522Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [7:570:2509] TestWaitNotification: OK eventTxId 103 2025-06-25T14:39:18.034037Z node 7 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:39:18.034238Z node 7 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 224us result status StatusSuccess 2025-06-25T14:39:18.034689Z node 7 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0" PathDescription { Self { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeExtSubDomain CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 4 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 4 PlanResolution: 50 Coordinators: 72075186234409547 TimeCastBucketsPerMediator: 2 Mediators: 72075186234409548 SchemeShard: 72075186234409546 Hive: 72075186233409546 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "pool-1" Kind: "hdd" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:39:18.035169Z node 7 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:39:18.035323Z node 7 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 175us result status StatusSuccess 2025-06-25T14:39:18.035707Z node 7 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0" PathDescription { Self { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeExtSubDomain CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 4 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 4 PlanResolution: 50 Coordinators: 72075186234409547 TimeCastBucketsPerMediator: 2 Mediators: 72075186234409548 SchemeShard: 72075186234409546 Hive: 72075186233409546 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "pool-1" Kind: "hdd" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:39:18.036272Z node 7 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72075186234409546 2025-06-25T14:39:18.036449Z node 7 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72075186234409546 describe path "/MyRoot/USER_0" took 193us result status StatusSuccess 2025-06-25T14:39:18.036827Z node 7 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0" PathDescription { Self { Name: "MyRoot/USER_0" PathId: 1 SchemeshardId: 72075186234409546 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 4 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 2 ProcessingParams { Version: 4 PlanResolution: 50 Coordinators: 72075186234409547 TimeCastBucketsPerMediator: 2 Mediators: 72075186234409548 SchemeShard: 72075186234409546 Hive: 72075186233409546 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "pool-1" Kind: "hdd" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot/USER_0" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72075186234409546, at schemeshard: 72075186234409546 |82.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/dsproxy/ut/ydb-core-blobstorage-dsproxy-ut |82.7%| [LD] {RESULT} $(B)/ydb/core/blobstorage/dsproxy/ut/ydb-core-blobstorage-dsproxy-ut |82.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/dsproxy/ut/ydb-core-blobstorage-dsproxy-ut |82.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/mediator/ut/ydb-core-tx-mediator-ut |82.7%| [LD] {RESULT} $(B)/ydb/core/tx/mediator/ut/ydb-core-tx-mediator-ut |82.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/mediator/ut/ydb-core-tx-mediator-ut ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_extsubdomain/unittest >> TSchemeShardExtSubDomainTest::CreateAndAlterThenDropChangesParent [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:39:10.460919Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:39:10.461069Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:39:10.461117Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:39:10.461157Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:39:10.461212Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:39:10.461239Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:39:10.471993Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:39:10.472135Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:39:10.472890Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:39:10.488036Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:39:10.850917Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:39:10.851013Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:39:10.945474Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:39:10.945961Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:39:10.974645Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:39:10.988677Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:39:10.996642Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:39:11.024939Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:39:11.056724Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:39:11.119944Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:39:11.151148Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:39:11.218608Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:39:11.218694Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:39:11.229133Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:39:11.229199Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:39:11.229272Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:39:11.229378Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:39:11.281422Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:39:11.476767Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:39:11.484718Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:39:11.493238Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:39:11.493324Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:39:11.504688Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:39:11.504841Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:39:11.525177Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:39:11.540913Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:39:11.562370Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:39:11.562464Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:39:11.562505Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:39:11.562550Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:39:11.573698Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:39:11.580894Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:39:11.580975Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:39:11.585539Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:39:11.585612Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:39:11.596780Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:39:11.596884Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:39:11.616647Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:39:11.622629Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:39:11.622835Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:39:11.623922Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:39:11.624063Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:39:11.624129Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:39:11.632638Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:39:11.632703Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:39:11.632906Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:39:11.645985Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:39:11.651815Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:39:11.651858Z node 1 :FLAT_TX_SCHEMESHARD ... , msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 103 2025-06-25T14:39:18.382020Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 103 2025-06-25T14:39:18.382053Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 7 2025-06-25T14:39:18.382082Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-25T14:39:18.382557Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 103 2025-06-25T14:39:18.382635Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 103 2025-06-25T14:39:18.382663Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 103 2025-06-25T14:39:18.382687Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 18446744073709551615 2025-06-25T14:39:18.382715Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 6 2025-06-25T14:39:18.382768Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 103, ready parts: 0/1, is published: true 2025-06-25T14:39:18.384693Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:1 hive 72057594037968897 at ss 72057594046678944 2025-06-25T14:39:18.384740Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:3 hive 72057594037968897 at ss 72057594046678944 2025-06-25T14:39:18.384759Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:2 hive 72057594037968897 at ss 72057594046678944 2025-06-25T14:39:18.385009Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-25T14:39:18.385044Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 103:0 ProgressState 2025-06-25T14:39:18.385148Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#103:0 progress is 1/1 2025-06-25T14:39:18.385183Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-25T14:39:18.385230Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#103:0 progress is 1/1 2025-06-25T14:39:18.385265Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-25T14:39:18.385317Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 103, ready parts: 1/1, is published: true 2025-06-25T14:39:18.385360Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-25T14:39:18.385408Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 103:0 2025-06-25T14:39:18.385443Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 103:0 2025-06-25T14:39:18.385564Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-06-25T14:39:18.386422Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-25T14:39:18.386632Z node 7 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 1 TxId_Deprecated: 1 TabletID: 72075186233409546 2025-06-25T14:39:18.391432Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 1 ShardOwnerId: 72057594046678944 ShardLocalIdx: 1, at schemeshard: 72057594046678944 2025-06-25T14:39:18.391804Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 Forgetting tablet 72075186233409546 2025-06-25T14:39:18.392973Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:39:18.396958Z node 7 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 3 TxId_Deprecated: 3 TabletID: 72075186233409548 2025-06-25T14:39:18.397144Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 Forgetting tablet 72075186233409548 2025-06-25T14:39:18.398226Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 3 ShardOwnerId: 72057594046678944 ShardLocalIdx: 3, at schemeshard: 72057594046678944 2025-06-25T14:39:18.398443Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-25T14:39:18.398704Z node 7 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 2 TxId_Deprecated: 2 TabletID: 72075186233409547 Forgetting tablet 72075186233409547 2025-06-25T14:39:18.399700Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046678944 ShardLocalIdx: 2, at schemeshard: 72057594046678944 2025-06-25T14:39:18.399879Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-25T14:39:18.400606Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-25T14:39:18.400661Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-25T14:39:18.400781Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-25T14:39:18.401555Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-25T14:39:18.401625Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-25T14:39:18.401712Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:39:18.404786Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-06-25T14:39:18.404864Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:1 tabletId 72075186233409546 2025-06-25T14:39:18.404962Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:3 2025-06-25T14:39:18.404986Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:3 tabletId 72075186233409548 2025-06-25T14:39:18.405352Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-06-25T14:39:18.405397Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:2 tabletId 72075186233409547 2025-06-25T14:39:18.406621Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-25T14:39:18.406725Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 103, wait until txId: 103 TestWaitNotification wait txId: 103 2025-06-25T14:39:18.407027Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 103: send EvNotifyTxCompletion 2025-06-25T14:39:18.407091Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 103 2025-06-25T14:39:18.407543Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 103, at schemeshard: 72057594046678944 2025-06-25T14:39:18.407638Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-06-25T14:39:18.407679Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [7:542:2489] TestWaitNotification: OK eventTxId 103 2025-06-25T14:39:18.412618Z node 7 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:39:18.412842Z node 7 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 256us result status StatusPathDoesNotExist 2025-06-25T14:39:18.413034Z node 7 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/USER_0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 >> TSchemeShardExtSubDomainTest::CreateThenDropChangesParent-AlterDatabaseCreateHiveFirst-false [GOOD] >> TSchemeShardExtSubDomainTest::CreateThenDropChangesParent-AlterDatabaseCreateHiveFirst-true ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_extsubdomain/unittest >> TSchemeShardExtSubDomainTest::AlterNameConflicts-AlterDatabaseCreateHiveFirst-true [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:39:10.461321Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:39:10.461518Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:39:10.461570Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:39:10.461612Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:39:10.461657Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:39:10.461687Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:39:10.472181Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:39:10.472302Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:39:10.473138Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:39:10.488200Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:39:10.888059Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:39:10.888135Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:39:10.945531Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:39:10.946011Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:39:10.975570Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:39:10.988878Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:39:10.994710Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:39:11.024641Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:39:11.056208Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:39:11.128902Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:39:11.150684Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:39:11.217745Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:39:11.217858Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:39:11.227081Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:39:11.227163Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:39:11.227234Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:39:11.227346Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:39:11.265607Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:39:11.725396Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:39:11.725787Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:39:11.726206Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:39:11.726293Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:39:11.726615Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:39:11.726757Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:39:11.730463Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:39:11.730884Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:39:11.731209Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:39:11.731292Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:39:11.731349Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:39:11.731414Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:39:11.734303Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:39:11.734391Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:39:11.734440Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:39:11.736528Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:39:11.736604Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:39:11.736669Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:39:11.736773Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:39:11.756917Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:39:11.765223Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:39:11.765477Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:39:11.766704Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:39:11.766885Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:39:11.766954Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:39:11.767379Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:39:11.767459Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:39:11.767671Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:39:11.767756Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:39:11.777651Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:39:11.777710Z node 1 :FLAT_TX_SCHEMESHARD ... n_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:39:18.407140Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:39:18.408951Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:39:18.409019Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:39:18.409073Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:39:18.410678Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:39:18.410726Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:39:18.410788Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:39:18.410856Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:39:18.411025Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:39:18.412566Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:39:18.412797Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:39:18.413708Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:39:18.413828Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 30064773229 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:39:18.413889Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:39:18.414197Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:39:18.414272Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:39:18.414482Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:39:18.414586Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:39:18.416385Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:39:18.416453Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:39:18.416694Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:39:18.416772Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [7:209:2209], at schemeshard: 72057594046678944, txId: 1, path id: 1 2025-06-25T14:39:18.417215Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:39:18.417293Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 1:0 ProgressState 2025-06-25T14:39:18.417463Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1:0 progress is 1/1 2025-06-25T14:39:18.417520Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-25T14:39:18.417594Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1:0 progress is 1/1 2025-06-25T14:39:18.417644Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-25T14:39:18.417696Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 1, ready parts: 1/1, is published: false 2025-06-25T14:39:18.417763Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-25T14:39:18.417817Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1:0 2025-06-25T14:39:18.417858Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 1:0 2025-06-25T14:39:18.417955Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-25T14:39:18.418015Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 1, publications: 1, subscribers: 0 2025-06-25T14:39:18.418074Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 1, [OwnerId: 72057594046678944, LocalPathId: 1], 3 2025-06-25T14:39:18.418779Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-06-25T14:39:18.418914Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-06-25T14:39:18.418968Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1 2025-06-25T14:39:18.419025Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 3 2025-06-25T14:39:18.419082Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:39:18.419210Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1, subscribers: 0 2025-06-25T14:39:18.422181Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1 2025-06-25T14:39:18.422764Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1, at schemeshard: 72057594046678944 TestModificationResults wait txId: 101 2025-06-25T14:39:18.423465Z node 7 :TX_PROXY DEBUG: proxy_impl.cpp:434: actor# [7:272:2261] Bootstrap 2025-06-25T14:39:18.456054Z node 7 :TX_PROXY DEBUG: proxy_impl.cpp:453: actor# [7:272:2261] Become StateWork (SchemeCache [7:277:2266]) 2025-06-25T14:39:18.459723Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpAlterExtSubDomain SubDomain { PlanResolution: 50 Coordinators: 1 Mediators: 1 Name: "USER_1" ExternalSchemeShard: true } } TxId: 101 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:39:18.459923Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_extsubdomain.cpp:1079: [72057594046678944] CreateCompatibleAlterExtSubDomain, opId 101:0, feature flag EnableAlterDatabaseCreateHiveFirst 1, tx WorkingDir: "/MyRoot" OperationType: ESchemeOpAlterExtSubDomain SubDomain { PlanResolution: 50 Coordinators: 1 Mediators: 1 Name: "USER_1" ExternalSchemeShard: true } 2025-06-25T14:39:18.460002Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_extsubdomain.cpp:1085: [72057594046678944] CreateCompatibleAlterExtSubDomain, opId 101:0, path /MyRoot/USER_1 2025-06-25T14:39:18.460183Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_just_reject.cpp:47: TReject Propose, opId: 101:0, explain: Invalid AlterExtSubDomain request: Check failed: path: '/MyRoot/USER_1', error: path hasn't been resolved, nearest resolved path: '/MyRoot' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), at schemeshard: 72057594046678944 2025-06-25T14:39:18.460246Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 101:1, propose status:StatusPathDoesNotExist, reason: Invalid AlterExtSubDomain request: Check failed: path: '/MyRoot/USER_1', error: path hasn't been resolved, nearest resolved path: '/MyRoot' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), at schemeshard: 72057594046678944 2025-06-25T14:39:18.463310Z node 7 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [7:272:2261] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-25T14:39:18.471592Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 101, response: Status: StatusPathDoesNotExist Reason: "Invalid AlterExtSubDomain request: Check failed: path: \'/MyRoot/USER_1\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" TxId: 101 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:39:18.471946Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 101, database: /MyRoot, subject: , status: StatusPathDoesNotExist, reason: Invalid AlterExtSubDomain request: Check failed: path: '/MyRoot/USER_1', error: path hasn't been resolved, nearest resolved path: '/MyRoot' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), operation: ALTER DATABASE, path: /MyRoot/USER_1 2025-06-25T14:39:18.472828Z node 7 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 TestModificationResult got TxId: 101, wait until txId: 101 >> TSchemeShardExtSubDomainTest::Drop-AlterDatabaseCreateHiveFirst-ExternalHive [GOOD] >> KqpPg::InsertNoTargetColumns_Serial+useSink [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndAlterAlterAddStoragePool-AlterDatabaseCreateHiveFirst [GOOD] >> TSchemeShardExtSubDomainTest::AlterCantChangeExternalStatisticsAggregator-AlterDatabaseCreateHiveFirst-true [GOOD] >> TSchemeShardExtSubDomainTest::CreateThenDropChangesParent-AlterDatabaseCreateHiveFirst-true [GOOD] >> ResourcePoolsDdl::TestAlterResourcePool [GOOD] >> ResourcePoolsDdl::TestPoolSwitchToLimitedState ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_extsubdomain/unittest >> TSchemeShardExtSubDomainTest::Drop-AlterDatabaseCreateHiveFirst-ExternalHive [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:39:10.460390Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:39:10.460597Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:39:10.460660Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:39:10.460703Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:39:10.460755Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:39:10.460799Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:39:10.469266Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:39:10.469402Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:39:10.470249Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:39:10.488605Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:39:10.860619Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:39:10.860738Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:39:10.945543Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:39:10.946121Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:39:10.977014Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:39:10.987412Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:39:10.994732Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:39:11.024666Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:39:11.056574Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:39:11.124561Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:39:11.150659Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:39:11.219916Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:39:11.220020Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:39:11.230164Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:39:11.230260Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:39:11.230335Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:39:11.230462Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:39:11.273256Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:39:11.564625Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:39:11.564895Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:39:11.565129Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:39:11.565193Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:39:11.565480Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:39:11.565567Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:39:11.568614Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:39:11.568841Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:39:11.569097Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:39:11.569173Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:39:11.569222Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:39:11.569262Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:39:11.571356Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:39:11.580983Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:39:11.581083Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:39:11.590254Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:39:11.590317Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:39:11.596971Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:39:11.597097Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:39:11.678164Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:39:11.685287Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:39:11.685540Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:39:11.686510Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:39:11.686681Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:39:11.686734Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:39:11.687009Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:39:11.687069Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:39:11.687251Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:39:11.687359Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:39:11.690006Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:39:11.690053Z node 1 :FLAT_TX_SCHEMESHARD ... hardOwnerId: 72057594046678944 ShardLocalIdx: 3 TxId_Deprecated: 3 TabletID: 72075186234409547 2025-06-25T14:39:19.857196Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 105 2025-06-25T14:39:19.859739Z node 6 :HIVE INFO: tablet_helpers.cpp:1356: [72075186233409546] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 2 TxId_Deprecated: 2 TabletID: 72075186234409546 2025-06-25T14:39:19.860072Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72075186233409546 TxId_Deprecated: 3 ShardOwnerId: 72057594046678944 ShardLocalIdx: 3, at schemeshard: 72057594046678944 2025-06-25T14:39:19.860360Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 Forgetting tablet 72075186234409547 2025-06-25T14:39:19.860803Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 105 2025-06-25T14:39:19.860980Z node 6 :TX_DATASHARD ERROR: datashard.cpp:3573: Datashard's schemeshard pipe destroyed while no messages to sent at 72075186234409549 2025-06-25T14:39:19.861984Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:39:19.866086Z node 6 :TX_DATASHARD ERROR: datashard.cpp:3573: Datashard's schemeshard pipe destroyed while no messages to sent at 72075186234409550 2025-06-25T14:39:19.867068Z node 6 :HIVE INFO: tablet_helpers.cpp:1356: [72075186233409546] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 4 TxId_Deprecated: 4 TabletID: 72075186234409548 Forgetting tablet 72075186234409546 2025-06-25T14:39:19.868125Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72075186233409546 TxId_Deprecated: 2 ShardOwnerId: 72057594046678944 ShardLocalIdx: 2, at schemeshard: 72057594046678944 2025-06-25T14:39:19.868337Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 Forgetting tablet 72075186234409548 2025-06-25T14:39:19.869335Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72075186233409546 TxId_Deprecated: 4 ShardOwnerId: 72057594046678944 ShardLocalIdx: 4, at schemeshard: 72057594046678944 2025-06-25T14:39:19.869489Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-25T14:39:19.870188Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-25T14:39:19.870260Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-25T14:39:19.870409Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-25T14:39:19.870949Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-25T14:39:19.871015Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-25T14:39:19.871097Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:39:19.873994Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-06-25T14:39:19.874076Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:1 tabletId 72075186233409546 2025-06-25T14:39:19.874456Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:3 2025-06-25T14:39:19.874497Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:3 tabletId 72075186234409547 2025-06-25T14:39:19.875190Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-06-25T14:39:19.875221Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:2 tabletId 72075186234409546 2025-06-25T14:39:19.875842Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:4 2025-06-25T14:39:19.875908Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:4 tabletId 72075186234409548 2025-06-25T14:39:19.876120Z node 6 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-25T14:39:19.876206Z node 6 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 105, wait until txId: 105 TestWaitNotification wait txId: 105 2025-06-25T14:39:19.876580Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 105: send EvNotifyTxCompletion 2025-06-25T14:39:19.876640Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 105 2025-06-25T14:39:19.877218Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 105, at schemeshard: 72057594046678944 2025-06-25T14:39:19.877339Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 105: got EvNotifyTxCompletionResult 2025-06-25T14:39:19.877391Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 105: satisfy waiter [6:797:2702] TestWaitNotification: OK eventTxId 105 2025-06-25T14:39:19.878102Z node 6 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0/dir/table_1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:39:19.878346Z node 6 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0/dir/table_1" took 297us result status StatusPathDoesNotExist 2025-06-25T14:39:19.878838Z node 6 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0/dir/table_1\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/USER_0/dir/table_1" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-25T14:39:19.879481Z node 6 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:39:19.879684Z node 6 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 221us result status StatusPathDoesNotExist 2025-06-25T14:39:19.879847Z node 6 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/USER_0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-25T14:39:19.880376Z node 6 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:39:19.880586Z node 6 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 234us result status StatusSuccess 2025-06-25T14:39:19.881027Z node 6 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> KqpPg::ValuesInsert+useSink [GOOD] >> KqpPg::ValuesInsert-useSink >> TSchemeShardExtSubDomainTest::StatisticsAggregatorSync-AlterDatabaseCreateHiveFirst-true [GOOD] >> TSchemeShardExtSubDomainTest::SchemeQuotas-AlterDatabaseCreateHiveFirst-false >> TSchemeShardExtSubDomainTest::AlterWithPlainAlterSubdomain ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_extsubdomain/unittest >> TSchemeShardExtSubDomainTest::AlterCantChangeExternalStatisticsAggregator-AlterDatabaseCreateHiveFirst-true [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:39:10.460853Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:39:10.460999Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:39:10.461046Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:39:10.461088Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:39:10.461128Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:39:10.461161Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:39:10.471180Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:39:10.471270Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:39:10.471893Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:39:10.487745Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:39:10.849908Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:39:10.850001Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:39:10.946172Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:39:10.946631Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:39:10.976749Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:39:10.986336Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:39:10.996908Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:39:11.024973Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:39:11.055846Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:39:11.129323Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:39:11.151857Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:39:11.218527Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:39:11.218653Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:39:11.228662Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:39:11.228731Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:39:11.228793Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:39:11.228897Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:39:11.272548Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:39:11.671831Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:39:11.672070Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:39:11.672244Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:39:11.672285Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:39:11.676628Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:39:11.676726Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:39:11.684951Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:39:11.685147Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:39:11.685303Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:39:11.685354Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:39:11.685410Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:39:11.685456Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:39:11.693088Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:39:11.693171Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:39:11.693211Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:39:11.701040Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:39:11.701098Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:39:11.701154Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:39:11.701216Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:39:11.704552Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:39:11.707003Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:39:11.707252Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:39:11.710629Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:39:11.710920Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:39:11.711023Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:39:11.711470Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:39:11.711564Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:39:11.711891Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:39:11.712011Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:39:11.722870Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:39:11.722953Z node 1 :FLAT_TX_SCHEMESHARD ... D DEBUG: schemeshard__operation_common_subdomain.cpp:120: NSubDomainState::TConfigureParts operationId# 102:0 Got OK TEvConfigureStatus from tablet# 72075186233409549 shardIdx# 72057594046678944:4 at schemeshard# 72057594046678944 2025-06-25T14:39:20.006157Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 102:0 3 -> 128 2025-06-25T14:39:20.009178Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-25T14:39:20.009374Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-25T14:39:20.009432Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-25T14:39:20.009497Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 102:0, at tablet# 72057594046678944 2025-06-25T14:39:20.009564Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 102 ready parts: 1/1 2025-06-25T14:39:20.009733Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 102 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:39:20.011347Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 102:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:102 msg type: 269090816 2025-06-25T14:39:20.011505Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 102, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 102 at step: 5000003 FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000002 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 102 at step: 5000003 2025-06-25T14:39:20.011861Z node 8 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000003, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:39:20.012010Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 102 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 34359740525 } } Step: 5000003 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:39:20.012069Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 102:0, at tablet# 72057594046678944 2025-06-25T14:39:20.012419Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 102:0 128 -> 240 2025-06-25T14:39:20.012490Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 102:0, at tablet# 72057594046678944 2025-06-25T14:39:20.012626Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 6 2025-06-25T14:39:20.012751Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:569: DoUpdateTenant no hasChanges, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], tenantLink: TSubDomainsLinks::TLink { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 2], Generation: 2, ActorId:[8:363:2335], EffectiveACLVersion: 0, SubdomainVersion: 2, UserAttributesVersion: 1, TenantHive: 18446744073709551615, TenantSysViewProcessor: 18446744073709551615, TenantStatisticsAggregator: 72075186233409549, TenantGraphShard: 18446744073709551615, TenantRootACL: }, subDomain->GetVersion(): 2, actualEffectiveACLVersion: 0, actualUserAttrsVersion: 1, tenantHive: 18446744073709551615, tenantSysViewProcessor: 18446744073709551615, at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 102 2025-06-25T14:39:20.014513Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:39:20.014579Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-25T14:39:20.014793Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:39:20.014846Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [8:208:2208], at schemeshard: 72057594046678944, txId: 102, path id: 2 2025-06-25T14:39:20.015259Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-25T14:39:20.015336Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_extsubdomain.cpp:761: [72057594046678944] TSyncHive, operationId 102:0, ProgressState, NeedSyncHive: 0 2025-06-25T14:39:20.015387Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 102:0 240 -> 240 2025-06-25T14:39:20.016287Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 4 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T14:39:20.016446Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 4 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T14:39:20.016502Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 102 2025-06-25T14:39:20.016551Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 4 2025-06-25T14:39:20.016610Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 7 2025-06-25T14:39:20.016710Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 102, ready parts: 0/1, is published: true 2025-06-25T14:39:20.020183Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-25T14:39:20.020243Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 102:0 ProgressState 2025-06-25T14:39:20.020395Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:0 progress is 1/1 2025-06-25T14:39:20.020445Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-25T14:39:20.020493Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:0 progress is 1/1 2025-06-25T14:39:20.020538Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-25T14:39:20.020586Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: true 2025-06-25T14:39:20.020677Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1656: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [8:304:2293] message: TxId: 102 2025-06-25T14:39:20.020735Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-25T14:39:20.020790Z node 8 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 102:0 2025-06-25T14:39:20.020833Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 102:0 2025-06-25T14:39:20.021028Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 6 2025-06-25T14:39:20.022001Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-25T14:39:20.023169Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-25T14:39:20.023229Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [8:511:2448] TestWaitNotification: OK eventTxId 102 TestModificationResults wait txId: 103 2025-06-25T14:39:20.026377Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpAlterExtSubDomain SubDomain { Name: "USER_0" ExternalStatisticsAggregator: false } } TxId: 103 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:39:20.026581Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_extsubdomain.cpp:1079: [72057594046678944] CreateCompatibleAlterExtSubDomain, opId 103:0, feature flag EnableAlterDatabaseCreateHiveFirst 1, tx WorkingDir: "/MyRoot" OperationType: ESchemeOpAlterExtSubDomain SubDomain { Name: "USER_0" ExternalStatisticsAggregator: false } 2025-06-25T14:39:20.026699Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_extsubdomain.cpp:1085: [72057594046678944] CreateCompatibleAlterExtSubDomain, opId 103:0, path /MyRoot/USER_0 2025-06-25T14:39:20.026845Z node 8 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_just_reject.cpp:47: TReject Propose, opId: 103:0, explain: Invalid AlterExtSubDomain request: Invalid ExtSubDomain request: ExternalStatisticsAggregator could only be added, not removed, at schemeshard: 72057594046678944 2025-06-25T14:39:20.026901Z node 8 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 103:1, propose status:StatusInvalidParameter, reason: Invalid AlterExtSubDomain request: Invalid ExtSubDomain request: ExternalStatisticsAggregator could only be added, not removed, at schemeshard: 72057594046678944 2025-06-25T14:39:20.029315Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 103, response: Status: StatusInvalidParameter Reason: "Invalid AlterExtSubDomain request: Invalid ExtSubDomain request: ExternalStatisticsAggregator could only be added, not removed" TxId: 103 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:39:20.029624Z node 8 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 103, database: /MyRoot, subject: , status: StatusInvalidParameter, reason: Invalid AlterExtSubDomain request: Invalid ExtSubDomain request: ExternalStatisticsAggregator could only be added, not removed, operation: ALTER DATABASE, path: /MyRoot/USER_0 TestModificationResult got TxId: 103, wait until txId: 103 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_extsubdomain/unittest >> TSchemeShardExtSubDomainTest::CreateAndAlterAlterAddStoragePool-AlterDatabaseCreateHiveFirst [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:39:10.460375Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:39:10.460594Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:39:10.460660Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:39:10.460717Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:39:10.460757Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:39:10.460791Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:39:10.470703Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:39:10.470809Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:39:10.471636Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:39:10.487979Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:39:10.894369Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:39:10.894445Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:39:10.948510Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:39:10.948986Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:39:10.976515Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:39:10.996803Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:39:10.997170Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:39:11.028623Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:39:11.056616Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:39:11.134350Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:39:11.148511Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:39:11.218244Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:39:11.218885Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:39:11.229637Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:39:11.229706Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:39:11.229777Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:39:11.229887Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:39:11.265302Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:39:11.711999Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:39:11.712405Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:39:11.712710Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:39:11.712759Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:39:11.713094Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:39:11.713316Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:39:11.716486Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:39:11.716786Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:39:11.717086Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:39:11.717185Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:39:11.717232Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:39:11.717287Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:39:11.719723Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:39:11.719807Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:39:11.719854Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:39:11.721752Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:39:11.721801Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:39:11.721875Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:39:11.721947Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:39:11.725669Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:39:11.727793Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:39:11.728013Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:39:11.729108Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:39:11.729275Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:39:11.729326Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:39:11.729662Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:39:11.729720Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:39:11.729915Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:39:11.730006Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:39:11.732455Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:39:11.732507Z node 1 :FLAT_TX_SCHEMESHARD ... etails: tx: 104, [OwnerId: 72057594046678944, LocalPathId: 2], 6 2025-06-25T14:39:20.052602Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5906: Handle TEvUpdateTenantSchemeShard, at schemeshard: 72075186233409546, msg: TabletId: 72057594046678944 Generation: 2 UserAttributes { Key: "user__attr_1" Value: "value" } UserAttributesVersion: 2 2025-06-25T14:39:20.052748Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__sync_update_tenants.cpp:79: TTxUpdateTenant DoExecute, msg: TabletId: 72057594046678944 Generation: 2 UserAttributes { Key: "user__attr_1" Value: "value" } UserAttributesVersion: 2, at schemeshard: 72075186233409546 2025-06-25T14:39:20.052971Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:590: Cannot publish paths for unknown operation id#0 2025-06-25T14:39:20.053292Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:39:20.053350Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 104, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-25T14:39:20.053583Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:39:20.053658Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [7:209:2209], at schemeshard: 72057594046678944, txId: 104, path id: 2 FAKE_COORDINATOR: Erasing txId 104 2025-06-25T14:39:20.054359Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 6 PathOwnerId: 72057594046678944, cookie: 104 2025-06-25T14:39:20.054490Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 6 PathOwnerId: 72057594046678944, cookie: 104 2025-06-25T14:39:20.054541Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 104 2025-06-25T14:39:20.054591Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 104, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 6 2025-06-25T14:39:20.054648Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-06-25T14:39:20.054754Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 104, subscribers: 0 2025-06-25T14:39:20.057432Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5893: Handle TEvSyncTenantSchemeShard, at schemeshard: 72057594046678944, msg: DomainSchemeShard: 72057594046678944 DomainPathId: 2 TabletID: 72075186233409546 Generation: 2 EffectiveACLVersion: 0 SubdomainVersion: 3 UserAttributesVersion: 2 TenantHive: 18446744073709551615 TenantSysViewProcessor: 18446744073709551615 TenantRootACL: "" TenantStatisticsAggregator: 18446744073709551615 TenantGraphShard: 18446744073709551615 2025-06-25T14:39:20.057529Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__sync_update_tenants.cpp:26: TTxSyncTenant DoExecute, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-25T14:39:20.057646Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:569: DoUpdateTenant no hasChanges, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], tenantLink: TSubDomainsLinks::TLink { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 2], Generation: 2, ActorId:[7:353:2330], EffectiveACLVersion: 0, SubdomainVersion: 3, UserAttributesVersion: 2, TenantHive: 18446744073709551615, TenantSysViewProcessor: 18446744073709551615, TenantStatisticsAggregator: 18446744073709551615, TenantGraphShard: 18446744073709551615, TenantRootACL: }, subDomain->GetVersion(): 3, actualEffectiveACLVersion: 0, actualUserAttrsVersion: 2, tenantHive: 18446744073709551615, tenantSysViewProcessor: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-25T14:39:20.057758Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72075186233409546 2025-06-25T14:39:20.057791Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72075186233409546, txId: 0, path id: [OwnerId: 72075186233409546, LocalPathId: 1] 2025-06-25T14:39:20.057944Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72075186233409546 2025-06-25T14:39:20.057975Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [7:450:2400], at schemeshard: 72075186233409546, txId: 0, path id: 1 2025-06-25T14:39:20.059226Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72075186233409546, msg: Owner: 72075186233409546 Generation: 2 LocalPathId: 1 Version: 6 PathOwnerId: 72075186233409546, cookie: 0 2025-06-25T14:39:20.059390Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2025-06-25T14:39:20.059496Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__sync_update_tenants.cpp:36: TTxSyncTenant DoComplete, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 TestModificationResult got TxId: 104, wait until txId: 104 TestWaitNotification wait txId: 104 2025-06-25T14:39:20.059812Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 104: send EvNotifyTxCompletion 2025-06-25T14:39:20.059868Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 104 2025-06-25T14:39:20.060412Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 104, at schemeshard: 72057594046678944 2025-06-25T14:39:20.060532Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 104: got EvNotifyTxCompletionResult 2025-06-25T14:39:20.060582Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 104: satisfy waiter [7:547:2495] TestWaitNotification: OK eventTxId 104 2025-06-25T14:39:20.061241Z node 7 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:39:20.061497Z node 7 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 291us result status StatusSuccess 2025-06-25T14:39:20.062024Z node 7 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0" PathDescription { Self { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeExtSubDomain CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 2 ChildrenVersion: 1 SubDomainVersion: 3 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 3 PlanResolution: 50 Coordinators: 72075186233409547 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409548 SchemeShard: 72075186233409546 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "pool-1" Kind: "hdd" } StoragePools { Name: "pool-2" Kind: "hdd-1" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } UserAttributes { Key: "user__attr_1" Value: "value" } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:39:20.062782Z node 7 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72075186233409546 2025-06-25T14:39:20.062994Z node 7 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72075186233409546 describe path "/MyRoot/USER_0" took 239us result status StatusSuccess 2025-06-25T14:39:20.063441Z node 7 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0" PathDescription { Self { Name: "MyRoot/USER_0" PathId: 1 SchemeshardId: 72075186233409546 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 2 ChildrenVersion: 1 SubDomainVersion: 3 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 2 ProcessingParams { Version: 3 PlanResolution: 50 Coordinators: 72075186233409547 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409548 SchemeShard: 72075186233409546 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "pool-1" Kind: "hdd" } StoragePools { Name: "pool-2" Kind: "hdd-1" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot/USER_0" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } UserAttributes { Key: "user__attr_1" Value: "value" } } PathId: 1 PathOwnerId: 72075186233409546, at schemeshard: 72075186233409546 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_extsubdomain/unittest >> TSchemeShardExtSubDomainTest::CreateThenDropChangesParent-AlterDatabaseCreateHiveFirst-true [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:39:10.460849Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:39:10.461007Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:39:10.461061Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:39:10.461104Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:39:10.461137Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:39:10.461165Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:39:10.470351Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:39:10.470461Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:39:10.472534Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:39:10.487891Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:39:10.864864Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:39:10.864921Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:39:10.945462Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:39:10.945972Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:39:10.975760Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:39:10.989661Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:39:10.997460Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:39:11.024615Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:39:11.055379Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:39:11.133643Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:39:11.151538Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:39:11.223920Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:39:11.224034Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:39:11.230704Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:39:11.231012Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:39:11.231078Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:39:11.231185Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:39:11.291780Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:39:11.596029Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:39:11.596801Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:39:11.597093Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:39:11.597185Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:39:11.597584Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:39:11.597706Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:39:11.608222Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:39:11.608478Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:39:11.608719Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:39:11.608782Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:39:11.608833Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:39:11.608869Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:39:11.610868Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:39:11.610936Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:39:11.610977Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:39:11.612709Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:39:11.612757Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:39:11.612830Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:39:11.612892Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:39:11.628778Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:39:11.637536Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:39:11.637816Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:39:11.639017Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:39:11.639124Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:39:11.639159Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:39:11.639408Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:39:11.639451Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:39:11.639598Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:39:11.648583Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:39:11.656954Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:39:11.656995Z node 1 :FLAT_TX_SCHEMESHARD ... .cpp:5318: ExamineTreeVFS run path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-25T14:39:20.345278Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 102:0 128 -> 134 2025-06-25T14:39:20.346065Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-25T14:39:20.346713Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-25T14:39:20.347779Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-25T14:39:20.347840Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_extsubdomain.cpp:104: TDropExtSubdomain TDeleteExternalShards, operationId: 102:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:39:20.347948Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 102:0 134 -> 135 2025-06-25T14:39:20.348206Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:39:20.348286Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 FAKE_COORDINATOR: Erasing txId 102 2025-06-25T14:39:20.350784Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:39:20.350819Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:39:20.350933Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-25T14:39:20.351071Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:39:20.351099Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [7:207:2207], at schemeshard: 72057594046678944, txId: 102, path id: 1 2025-06-25T14:39:20.351129Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [7:207:2207], at schemeshard: 72057594046678944, txId: 102, path id: 2 2025-06-25T14:39:20.351327Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-25T14:39:20.351383Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:400: [72057594046678944] TDeleteParts opId# 102:0 ProgressState 2025-06-25T14:39:20.351437Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 102:0 135 -> 240 2025-06-25T14:39:20.352175Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T14:39:20.352239Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T14:39:20.352272Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 102 2025-06-25T14:39:20.352323Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 7 2025-06-25T14:39:20.352363Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-25T14:39:20.352910Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T14:39:20.352975Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T14:39:20.353008Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 102 2025-06-25T14:39:20.353032Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 18446744073709551615 2025-06-25T14:39:20.353057Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-25T14:39:20.353107Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 102, ready parts: 0/1, is published: true 2025-06-25T14:39:20.355737Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-25T14:39:20.355798Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 102:0 ProgressState 2025-06-25T14:39:20.355958Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:0 progress is 1/1 2025-06-25T14:39:20.355999Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-25T14:39:20.356049Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:0 progress is 1/1 2025-06-25T14:39:20.356090Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-25T14:39:20.356132Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: true 2025-06-25T14:39:20.356174Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-25T14:39:20.356212Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 102:0 2025-06-25T14:39:20.356253Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 102:0 2025-06-25T14:39:20.356336Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-25T14:39:20.356747Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-25T14:39:20.356797Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-25T14:39:20.356865Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-25T14:39:20.357051Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-25T14:39:20.357099Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-25T14:39:20.357181Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:39:20.357701Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-25T14:39:20.359192Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-25T14:39:20.361194Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-25T14:39:20.361284Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 102 2025-06-25T14:39:20.361478Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-06-25T14:39:20.361524Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 2025-06-25T14:39:20.361904Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-06-25T14:39:20.361999Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-25T14:39:20.362044Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [7:342:2331] TestWaitNotification: OK eventTxId 102 2025-06-25T14:39:20.362572Z node 7 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:39:20.362761Z node 7 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 233us result status StatusPathDoesNotExist 2025-06-25T14:39:20.362920Z node 7 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/USER_0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 |82.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tablet_flat/benchmark/core_tablet_flat_benchmark |82.8%| [LD] {RESULT} $(B)/ydb/core/tablet_flat/benchmark/core_tablet_flat_benchmark |82.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tablet_flat/benchmark/core_tablet_flat_benchmark ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/pg/unittest >> KqpPg::InsertNoTargetColumns_Serial+useSink [GOOD] Test command err: Trying to start YDB, gRPC: 62033, MsgBus: 14176 2025-06-25T14:36:57.439227Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519895964914825722:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:36:57.440291Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000a57/r3tmp/tmpOsZKFr/pdisk_1.dat 2025-06-25T14:36:57.796577Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:36:57.840744Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:36:57.840818Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 62033, node 1 2025-06-25T14:36:57.847944Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:36:57.910369Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:36:57.910389Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:36:57.910395Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:36:57.910494Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14176 TClient is connected to server localhost:14176 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-25T14:36:58.445725Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:36:58.481098Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 16 2025-06-25T14:37:00.455391Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:00.624259Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-25T14:37:00.632837Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:00.687887Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-25T14:37:00.733799Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519895977799728376:2310], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:00.733896Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:00.734270Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519895977799728388:2313], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:37:00.738196Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:37:00.753649Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519895977799728390:2314], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715662 completed, doublechecking } 2025-06-25T14:37:00.849802Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519895977799728441:2451] txid# 281474976715663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } f f t t 18 2025-06-25T14:37:01.238077Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:01.291909Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-25T14:37:01.296586Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:01.354279Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 0 0 1 1 2 2 3 3 4 4 5 5 6 6 7 7 8 8 9 9 21 2025-06-25T14:37:01.821007Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:01.936552Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-25T14:37:01.949873Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:02.047265Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-25T14:37:02.440472Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519895964914825722:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:37:02.440537Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 0 0 1 1 2 2 3 3 4 4 5 5 6 6 7 7 8 8 9 9 23 2025-06-25T14:37:02.475176Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715678:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:02.569795Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-25T14:37:02.576646Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715680:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:02.642502Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 0 0 1 1 2 2 3 3 4 4 5 5 6 6 7 7 8 8 9 9 20 2025-06-25T14:37:03.014658Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715684:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:03.130200Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-25T14:37:03.135544Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715686:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:37:03.192460Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 0 0 1 1 2 2 3 3 4 4 5 5 6 6 7 7 8 8 9 9 700 2025-06-25T14:37:03.690508Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715690:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__oper ... is connected to server localhost:32658 2025-06-25T14:39:03.971240Z node 11 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:32658 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:39:04.167792Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:39:04.182164Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:39:07.958186Z node 11 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[11:7519896500966622854:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:39:07.966035Z node 11 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:39:08.554950Z node 11 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [11:7519896526736427241:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:39:08.555006Z node 11 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [11:7519896526736427220:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:39:08.555175Z node 11 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:39:08.560252Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:39:08.576889Z node 11 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [11:7519896526736427258:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:39:08.664138Z node 11 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [11:7519896526736427309:2340] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:39:08.704691Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:39:09.468955Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:171) Trying to start YDB, gRPC: 21546, MsgBus: 20439 2025-06-25T14:39:11.553714Z node 12 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[12:7519896538805541405:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:39:11.553785Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000a57/r3tmp/tmpjJ6vVD/pdisk_1.dat 2025-06-25T14:39:12.086013Z node 12 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:39:12.087066Z node 12 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [12:7519896538805541380:2080] 1750862351550254 != 1750862351550257 2025-06-25T14:39:12.150573Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:39:12.150726Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:39:12.158508Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 21546, node 12 2025-06-25T14:39:12.370242Z node 12 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:39:12.370279Z node 12 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:39:12.370296Z node 12 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:39:12.370537Z node 12 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:39:12.540545Z node 12 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:20439 TClient is connected to server localhost:20439 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:39:13.295495Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:39:16.554221Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[12:7519896538805541405:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:39:16.554319Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:39:18.272924Z node 12 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [12:7519896568870313094:2295], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:39:18.273157Z node 12 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:39:18.273308Z node 12 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [12:7519896568870313121:2298], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:39:18.280155Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:39:18.300688Z node 12 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [12:7519896568870313123:2299], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:39:18.371260Z node 12 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [12:7519896568870313174:2343] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:39:18.490535Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) >> TSchemeShardExtSubDomainTest::AlterWithPlainAlterSubdomain [GOOD] >> TSchemeShardExtSubDomainTest::AlterWithPlainAlterSubdomain-ExternalHive >> ResourcePoolClassifiersDdl::TestExplicitPoolId [GOOD] >> ResourcePoolClassifiersDdl::TestMultiGroupClassification >> TSchemeShardExtSubDomainTest::AlterWithPlainAlterSubdomain-ExternalHive [GOOD] >> TSchemeShardExtSubDomainTest::AlterWithPlainAlterSubdomain-AlterDatabaseCreateHiveFirst >> KqpWorkloadService::TestQueryCancelAfterUnlimitedPool [GOOD] >> KqpWorkloadService::TestStartQueryAfterCancel >> TSchemeShardExtSubDomainTest::SchemeQuotas-AlterDatabaseCreateHiveFirst-false [GOOD] >> TSchemeShardExtSubDomainTest::SchemeQuotas-AlterDatabaseCreateHiveFirst-true >> TSchemeShardExtSubDomainTest::AlterWithPlainAlterSubdomain-AlterDatabaseCreateHiveFirst [GOOD] >> TSchemeShardExtSubDomainTest::AlterWithPlainAlterSubdomain-AlterDatabaseCreateHiveFirst-ExternalHive >> TSchemeShardExtSubDomainTest::AlterWithPlainAlterSubdomain-AlterDatabaseCreateHiveFirst-ExternalHive [GOOD] >> TSchemeShardExtSubDomainTest::AlterTwiceAndWithPlainAlterSubdomain-ExternalHive |82.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_login_large/ydb-core-tx-schemeshard-ut_login_large |82.8%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_login_large/ydb-core-tx-schemeshard-ut_login_large |82.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_login_large/ydb-core-tx-schemeshard-ut_login_large >> TSchemeShardExtSubDomainTest::AlterTwiceAndWithPlainAlterSubdomain-ExternalHive [GOOD] >> TSchemeShardExtSubDomainTest::AlterTwiceAndWithPlainAlterSubdomain-AlterDatabaseCreateHiveFirst |82.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_split_merge/ydb-core-tx-schemeshard-ut_split_merge |82.8%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_split_merge/ydb-core-tx-schemeshard-ut_split_merge |82.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_split_merge/ydb-core-tx-schemeshard-ut_split_merge >> TSchemeShardExtSubDomainTest::SchemeQuotas-AlterDatabaseCreateHiveFirst-true [GOOD] >> ttl_unavailable_s3.py::TestUnavailableS3::test >> TBoardSubscriberTest::ReconnectReplica >> TBoardSubscriberTest::ReconnectReplica [GOOD] >> TSchemeShardExtSubDomainTest::AlterTwiceAndWithPlainAlterSubdomain-AlterDatabaseCreateHiveFirst [GOOD] >> TSchemeShardExtSubDomainTest::AlterTwiceAndWithPlainAlterSubdomain-AlterDatabaseCreateHiveFirst-ExternalHive ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_extsubdomain/unittest >> TSchemeShardExtSubDomainTest::SchemeQuotas-AlterDatabaseCreateHiveFirst-true [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:39:10.460420Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:39:10.460631Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:39:10.460688Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:39:10.460723Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:39:10.460778Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:39:10.460809Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:39:10.469273Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:39:10.469383Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:39:10.470301Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:39:10.488567Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:39:10.859925Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:39:10.859995Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:39:10.946166Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:39:10.946575Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:39:10.975490Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:39:10.984378Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:39:10.994505Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:39:11.024535Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:39:11.055811Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:39:11.120448Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:39:11.151468Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:39:11.217724Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:39:11.217829Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:39:11.226470Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:39:11.226549Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:39:11.226625Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:39:11.226744Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:39:11.268254Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:39:11.610830Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:39:11.611038Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:39:11.611228Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:39:11.611264Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:39:11.611518Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:39:11.611596Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:39:11.614749Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:39:11.614946Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:39:11.615167Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:39:11.615229Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:39:11.615262Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:39:11.615367Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:39:11.617126Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:39:11.617175Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:39:11.617216Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:39:11.618769Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:39:11.618807Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:39:11.618849Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:39:11.618900Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:39:11.622264Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:39:11.678006Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:39:11.678221Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:39:11.679279Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:39:11.679528Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:39:11.679576Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:39:11.679818Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:39:11.679864Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:39:11.680087Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:39:11.680158Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:39:11.683009Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:39:11.683128Z node 1 :FLAT_TX_SCHEMESHARD ... HARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72075186233409546, txId: 116, path id: [OwnerId: 72075186233409546, LocalPathId: 1] 2025-06-25T14:39:26.326238Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72075186233409546, txId: 116, path id: [OwnerId: 72075186233409546, LocalPathId: 9] 2025-06-25T14:39:26.326346Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72075186233409546 2025-06-25T14:39:26.326422Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [7:725:2625], at schemeshard: 72075186233409546, txId: 116, path id: 1 2025-06-25T14:39:26.326484Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [7:725:2625], at schemeshard: 72075186233409546, txId: 116, path id: 9 2025-06-25T14:39:26.327085Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 116:0, at schemeshard: 72075186233409546 2025-06-25T14:39:26.327162Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 116:0 ProgressState, operation type: TxCreateTable, at tablet# 72075186233409546 2025-06-25T14:39:26.327444Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:359: TCreateParts opId# 116:0 CreateRequest Event to Hive: 72057594037968897 msg: Owner: 72075186233409546 OwnerIdx: 11 TabletType: DataShard ObjectDomain { SchemeShard: 72057594046678944 PathId: 2 } ObjectId: 9 BindedChannels { StoragePoolName: "/dc-1/users/tenant-1:hdd" } BindedChannels { StoragePoolName: "/dc-1/users/tenant-1:hdd" } BindedChannels { StoragePoolName: "/dc-1/users/tenant-1:hdd" } AllowedDomains { SchemeShard: 72057594046678944 PathId: 2 } 2025-06-25T14:39:26.328298Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72075186233409546, msg: Owner: 72075186233409546 Generation: 3 LocalPathId: 1 Version: 16 PathOwnerId: 72075186233409546, cookie: 116 2025-06-25T14:39:26.328467Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72075186233409546, msg: Owner: 72075186233409546 Generation: 3 LocalPathId: 1 Version: 16 PathOwnerId: 72075186233409546, cookie: 116 2025-06-25T14:39:26.328519Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72075186233409546, txId: 116 2025-06-25T14:39:26.328576Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72075186233409546, txId: 116, pathId: [OwnerId: 72075186233409546, LocalPathId: 1], version: 16 2025-06-25T14:39:26.328641Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72075186233409546, LocalPathId: 1] was 13 2025-06-25T14:39:26.329675Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72075186233409546, msg: Owner: 72075186233409546 Generation: 3 LocalPathId: 9 Version: 1 PathOwnerId: 72075186233409546, cookie: 116 2025-06-25T14:39:26.329747Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72075186233409546, msg: Owner: 72075186233409546 Generation: 3 LocalPathId: 9 Version: 1 PathOwnerId: 72075186233409546, cookie: 116 2025-06-25T14:39:26.329778Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72075186233409546, txId: 116 2025-06-25T14:39:26.329804Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72075186233409546, txId: 116, pathId: [OwnerId: 72075186233409546, LocalPathId: 9], version: 1 2025-06-25T14:39:26.329834Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72075186233409546, LocalPathId: 9] was 4 2025-06-25T14:39:26.329899Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 116, ready parts: 0/1, is published: true 2025-06-25T14:39:26.332580Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 116:0 from tablet: 72075186233409546 to tablet: 72057594037968897 cookie: 72075186233409546:11 msg type: 268697601 2025-06-25T14:39:26.332740Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 116, partId: 0, tablet: 72057594037968897 2025-06-25T14:39:26.332795Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1806: TOperation RegisterRelationByShardIdx, TxId: 116, shardIdx: 72075186233409546:11, partId: 0 2025-06-25T14:39:26.333175Z node 7 :HIVE INFO: tablet_helpers.cpp:1181: [72057594037968897] TEvCreateTablet, msg: Owner: 72075186233409546 OwnerIdx: 11 TabletType: DataShard ObjectDomain { SchemeShard: 72057594046678944 PathId: 2 } ObjectId: 9 BindedChannels { StoragePoolName: "/dc-1/users/tenant-1:hdd" } BindedChannels { StoragePoolName: "/dc-1/users/tenant-1:hdd" } BindedChannels { StoragePoolName: "/dc-1/users/tenant-1:hdd" } AllowedDomains { SchemeShard: 72057594046678944 PathId: 2 } 2025-06-25T14:39:26.354204Z node 7 :HIVE INFO: tablet_helpers.cpp:1245: [72057594037968897] TEvCreateTablet, Owner 72075186233409546, OwnerIdx 11, type DataShard, boot OK, tablet id 72075186233409556 2025-06-25T14:39:26.354742Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5951: Handle TEvCreateTabletReply at schemeshard: 72075186233409546 message: Status: OK Owner: 72075186233409546 OwnerIdx: 11 TabletID: 72075186233409556 Origin: 72057594037968897 2025-06-25T14:39:26.354809Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1820: TOperation FindRelatedPartByShardIdx, TxId: 116, shardIdx: 72075186233409546:11, partId: 0 2025-06-25T14:39:26.354976Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:632: TTxOperationReply execute, operationId: 116:0, at schemeshard: 72075186233409546, message: Status: OK Owner: 72075186233409546 OwnerIdx: 11 TabletID: 72075186233409556 Origin: 72057594037968897 2025-06-25T14:39:26.355039Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:177: TCreateParts opId# 116:0 HandleReply TEvCreateTabletReply, at tabletId: 72075186233409546 2025-06-25T14:39:26.355137Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:180: TCreateParts opId# 116:0 HandleReply TEvCreateTabletReply, message: Status: OK Owner: 72075186233409546 OwnerIdx: 11 TabletID: 72075186233409556 Origin: 72057594037968897 2025-06-25T14:39:26.355248Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 116:0 2 -> 3 2025-06-25T14:39:26.356331Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72075186233409546, cookie: 116 2025-06-25T14:39:26.359540Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72075186233409546, cookie: 116 2025-06-25T14:39:26.360664Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 116:0, at schemeshard: 72075186233409546 2025-06-25T14:39:26.360912Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 116:0, at schemeshard: 72075186233409546 2025-06-25T14:39:26.360977Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_table.cpp:197: TCreateTable TConfigureParts operationId# 116:0 ProgressState at tabletId# 72075186233409546 2025-06-25T14:39:26.361073Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_table.cpp:217: TCreateTable TConfigureParts operationId# 116:0 ProgressState Propose modify scheme on datashard datashardId: 72075186233409556 seqNo: 3:8 2025-06-25T14:39:26.361463Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_table.cpp:233: TCreateTable TConfigureParts operationId# 116:0 ProgressState Propose modify scheme on datashard datashardId: 72075186233409556 message: TxKind: TX_KIND_SCHEME SourceDeprecated { RawX1: 674 RawX2: 30064773657 } TxBody: "\n\236\004\n\007Table11\020\t\032\r\n\003key\030\002 \001(\000@\000\032\020\n\005Value\030\200$ \002(\000@\000(\001:\262\003\022\253\003\010\200\200\200\002\020\254\002\030\364\003 \200\200\200\010(\0000\200\200\200 8\200\200\200\010@\2008H\000RX\010\000\020\000\030\010 \010(\200\200\200@0\377\377\377\377\0178\001B$\010e\020d\031\000\000\000\000\000\000\360?*\025background_compactionJ\017compaction_gen1P\nX\200\200\001`nh\000p\000Rb\010\001\020\200\200\200\024\030\005 \020(\200\200\200\200\0020\377\377\377\377\0178\000B$\010e\020d\031\000\000\000\000\000\000\360?*\025background_compactionJ\017compaction_gen2P\nX\200\200\001`nh\200\200\200\004p\200\200\200\004Rc\010\002\020\200\200\200\310\001\030\005 \020(\200\200\200\200@0\377\377\377\377\0178\000B$\010e\020d\031\000\000\000\000\000\000\360?*\025background_compactionJ\017compaction_gen3P\nX\200\200\001`nh\200\200\200(p\200\200\200(X\001`\005j$\010e\020d\031\000\000\000\000\000\000\360?*\025background_compactionr\017compaction_gen0z\017compaction_gen0\202\001\004scan\210\001\200\200\200\010\220\001\364\003\230\0012\270\001\2008\300\001\006R\002\020\001J\026/MyRoot/USER_0/Table11\242\001\006\001\000\000\000\000\200\252\001\000\260\001\001\270\001\000\210\002\001\222\002\013\t\n\000\220\000\000\020\000\001\020\t:\004\010\003\020\010" TxId: 116 ExecLevel: 0 Flags: 0 SchemeShardId: 72075186233409546 ProcessingParams { Version: 3 PlanResolution: 50 Coordinators: 72075186233409547 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409548 SchemeShard: 72075186233409546 } SubDomainPathId: 1 2025-06-25T14:39:26.370786Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 116:0 from tablet: 72075186233409546 to tablet: 72075186233409556 cookie: 72075186233409546:11 msg type: 269549568 2025-06-25T14:39:26.370980Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 116, partId: 0, tablet: 72075186233409556 TestModificationResult got TxId: 116, wait until txId: 116 TestModificationResults wait txId: 117 2025-06-25T14:39:26.399381Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/USER_0" OperationType: ESchemeOpCreateTable CreateTable { Name: "Table12" Columns { Name: "key" Type: "Uint32" } Columns { Name: "Value" Type: "Utf8" } KeyColumnNames: "key" } } TxId: 117 TabletId: 72075186233409546 , at schemeshard: 72075186233409546 2025-06-25T14:39:26.401783Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 117, response: Status: StatusQuotaExceeded Reason: "Request exceeded a limit on the number of schema operations, try again later." TxId: 117 SchemeshardId: 72075186233409546, at schemeshard: 72075186233409546 2025-06-25T14:39:26.402078Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 117, database: /MyRoot/USER_0, subject: , status: StatusQuotaExceeded, reason: Request exceeded a limit on the number of schema operations, try again later., operation: CREATE TABLE, path: /MyRoot/USER_0/Table12 TestModificationResult got TxId: 117, wait until txId: 117 |82.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/base/ut_board_subscriber/unittest >> TBoardSubscriberTest::ReconnectReplica [GOOD] >> KqpWorkloadServiceTables::TestCleanupOnServiceRestart [GOOD] >> KqpWorkloadServiceTables::TestLeaseExpiration >> TSchemeShardExtSubDomainTest::AlterTwiceAndWithPlainAlterSubdomain-AlterDatabaseCreateHiveFirst-ExternalHive [GOOD] >> TGroupMapperTest::MakeDisksForbidden |82.8%| [TA] $(B)/ydb/core/base/ut_board_subscriber/test-results/unittest/{meta.json ... results_accumulator.log} >> TGroupMapperTest::NonUniformCluster >> TGroupMapperTest::NonUniformClusterDifferentSlotsPerDisk >> TGroupMapperTest::CheckNotToBreakFailModel >> TGroupMapperTest::Block42_1disk >> TGroupMapperTest::NonUniformClusterDifferentSlotsPerDisk [GOOD] >> TGroupMapperTest::MakeDisksForbidden [GOOD] >> TGroupMapperTest::CheckNotToBreakFailModel [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_extsubdomain/unittest >> TSchemeShardExtSubDomainTest::AlterTwiceAndWithPlainAlterSubdomain-AlterDatabaseCreateHiveFirst-ExternalHive [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:39:21.503871Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:39:21.503972Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:39:21.504023Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:39:21.504062Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:39:21.504099Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:39:21.504128Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:39:21.504202Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:39:21.504273Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:39:21.505144Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:39:21.505515Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:39:21.585288Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:39:21.585414Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:39:21.609016Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:39:21.609476Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:39:21.609649Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:39:21.629945Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:39:21.630340Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:39:21.631003Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:39:21.632032Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:39:21.657134Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:39:21.657371Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:39:21.658697Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:39:21.658765Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:39:21.658954Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:39:21.659002Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:39:21.659049Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:39:21.659131Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:39:21.677527Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:39:21.911841Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:39:21.912083Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:39:21.912290Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:39:21.912387Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:39:21.912632Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:39:21.912704Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:39:21.915255Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:39:21.915482Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:39:21.915709Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:39:21.915860Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:39:21.915907Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:39:21.915943Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:39:21.918044Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:39:21.918109Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:39:21.918152Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:39:21.920017Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:39:21.920070Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:39:21.920115Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:39:21.920184Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:39:21.923913Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:39:21.926496Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:39:21.926673Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:39:21.927705Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:39:21.927847Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:39:21.927893Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:39:21.928175Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:39:21.928229Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:39:21.928441Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:39:21.928514Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:39:21.930824Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:39:21.930873Z node 1 :FLAT_TX_SCHEMESHARD ... p Execute, stepId: 5000004, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:39:28.769054Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 103 Coordinator: 72057594046316545 AckTo { RawX1: 126 RawX2: 30064773222 } } Step: 5000004 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:39:28.769122Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 103:0, at tablet# 72057594046678944 2025-06-25T14:39:28.769385Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 103:0 128 -> 240 2025-06-25T14:39:28.769437Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 103:0, at tablet# 72057594046678944 2025-06-25T14:39:28.769569Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 6 2025-06-25T14:39:28.769867Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:577: Send TEvUpdateTenantSchemeShard, to actor: [7:392:2361], msg: TabletId: 72057594046678944 Generation: 2 StoragePools { Name: "pool-1" Kind: "hdd" } SubdomainVersion: 4 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 }, at schemeshard: 72057594046678944 2025-06-25T14:39:28.775276Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5906: Handle TEvUpdateTenantSchemeShard, at schemeshard: 72075186234409546, msg: TabletId: 72057594046678944 Generation: 2 StoragePools { Name: "pool-1" Kind: "hdd" } SubdomainVersion: 4 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } 2025-06-25T14:39:28.775443Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__sync_update_tenants.cpp:79: TTxUpdateTenant DoExecute, msg: TabletId: 72057594046678944 Generation: 2 StoragePools { Name: "pool-1" Kind: "hdd" } SubdomainVersion: 4 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 }, at schemeshard: 72075186234409546 2025-06-25T14:39:28.775634Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:590: Cannot publish paths for unknown operation id#0 FAKE_COORDINATOR: Erasing txId 103 2025-06-25T14:39:28.775985Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:39:28.776025Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 103, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-25T14:39:28.776192Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:39:28.776237Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [7:207:2207], at schemeshard: 72057594046678944, txId: 103, path id: 2 2025-06-25T14:39:28.776583Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-25T14:39:28.776647Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_extsubdomain.cpp:761: [72057594046678944] TSyncHive, operationId 103:0, ProgressState, NeedSyncHive: 0 2025-06-25T14:39:28.776705Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 103:0 240 -> 240 2025-06-25T14:39:28.777800Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 6 PathOwnerId: 72057594046678944, cookie: 103 2025-06-25T14:39:28.777941Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 6 PathOwnerId: 72057594046678944, cookie: 103 2025-06-25T14:39:28.777998Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 103 2025-06-25T14:39:28.778053Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 6 2025-06-25T14:39:28.778111Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 7 2025-06-25T14:39:28.778217Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 103, ready parts: 0/1, is published: true 2025-06-25T14:39:28.782087Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5893: Handle TEvSyncTenantSchemeShard, at schemeshard: 72057594046678944, msg: DomainSchemeShard: 72057594046678944 DomainPathId: 2 TabletID: 72075186234409546 Generation: 2 EffectiveACLVersion: 0 SubdomainVersion: 4 UserAttributesVersion: 1 TenantHive: 72075186233409546 TenantSysViewProcessor: 18446744073709551615 TenantRootACL: "" TenantStatisticsAggregator: 18446744073709551615 TenantGraphShard: 18446744073709551615 2025-06-25T14:39:28.782221Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__sync_update_tenants.cpp:26: TTxSyncTenant DoExecute, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-25T14:39:28.782373Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:569: DoUpdateTenant no hasChanges, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], tenantLink: TSubDomainsLinks::TLink { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 2], Generation: 2, ActorId:[7:392:2361], EffectiveACLVersion: 0, SubdomainVersion: 4, UserAttributesVersion: 1, TenantHive: 72075186233409546, TenantSysViewProcessor: 18446744073709551615, TenantStatisticsAggregator: 18446744073709551615, TenantGraphShard: 18446744073709551615, TenantRootACL: }, subDomain->GetVersion(): 4, actualEffectiveACLVersion: 0, actualUserAttrsVersion: 1, tenantHive: 72075186233409546, tenantSysViewProcessor: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-25T14:39:28.782784Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72075186234409546 2025-06-25T14:39:28.782829Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72075186234409546, txId: 0, path id: [OwnerId: 72075186234409546, LocalPathId: 1] 2025-06-25T14:39:28.782978Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72075186234409546 2025-06-25T14:39:28.783018Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [7:483:2423], at schemeshard: 72075186234409546, txId: 0, path id: 1 2025-06-25T14:39:28.784004Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-25T14:39:28.784070Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 103:0 ProgressState 2025-06-25T14:39:28.784243Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#103:0 progress is 1/1 2025-06-25T14:39:28.784305Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-25T14:39:28.786183Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#103:0 progress is 1/1 2025-06-25T14:39:28.786236Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-25T14:39:28.786283Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 103, ready parts: 1/1, is published: true 2025-06-25T14:39:28.786359Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-25T14:39:28.786418Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 103:0 2025-06-25T14:39:28.786466Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 103:0 2025-06-25T14:39:28.786564Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 6 2025-06-25T14:39:28.787849Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72075186234409546, msg: Owner: 72075186234409546 Generation: 2 LocalPathId: 1 Version: 6 PathOwnerId: 72075186234409546, cookie: 0 2025-06-25T14:39:28.788365Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-25T14:39:28.788463Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__sync_update_tenants.cpp:36: TTxSyncTenant DoComplete, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 TestModificationResult got TxId: 103, wait until txId: 104 TestModificationResults wait txId: 104 TestModificationResult got TxId: 104, wait until txId: 104 TestWaitNotification wait txId: 103 2025-06-25T14:39:28.790664Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 103: send EvNotifyTxCompletion 2025-06-25T14:39:28.790724Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 103 2025-06-25T14:39:28.791212Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 103, at schemeshard: 72057594046678944 2025-06-25T14:39:28.791318Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-06-25T14:39:28.791363Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [7:570:2508] TestWaitNotification: OK eventTxId 103 |82.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut/unittest >> TGroupMapperTest::NonUniformClusterDifferentSlotsPerDisk [GOOD] |82.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut/unittest >> TGroupMapperTest::CheckNotToBreakFailModel [GOOD] |82.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut/unittest >> TGroupMapperTest::MakeDisksForbidden [GOOD] >> ResourcePoolsDdl::TestPoolSwitchToLimitedState [GOOD] >> ResourcePoolsDdl::TestDropResourcePool >> TGroupMapperTest::NonUniformClusterMirror3dc >> TGroupMapperTest::SimplestErasureNone [GOOD] |82.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut/unittest >> TGroupMapperTest::SimplestErasureNone [GOOD] >> TGroupMapperTest::NonUniformClusterMirror3dc [GOOD] |82.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |82.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut/unittest >> TGroupMapperTest::NonUniformClusterMirror3dc [GOOD] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_1__SYNC-pk_types8-all_types8-index8---SYNC] >> TGroupMapperTest::Mirror3dc >> ResourcePoolClassifiersDdl::TestAlterResourcePoolClassifier [GOOD] >> ResourcePoolClassifiersDdl::TestDropResourcePoolClassifier |82.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/scheme_shard/py3test |82.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut/unittest >> TGroupMapperTest::Mirror3dc [GOOD] |82.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut/unittest |82.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut/unittest >> TGroupMapperTest::Mirror3dc [GOOD] >> ColumnStatistics::CountMinSketchStatistics [GOOD] >> TBlobStorageControllerGrouperTest::when_one_server_per_rack_in_4_racks_then_can_construct_group_with_4_domains [GOOD] |82.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut/unittest >> TGroupMapperTest::Mirror3dc3Nodes [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest >> ColumnStatistics::CountMinSketchStatistics [GOOD] Test command err: 2025-06-25T14:36:51.736720Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:419:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:36:51.737185Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:36:51.737253Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001d43/r3tmp/tmpKZvPtE/pdisk_1.dat 2025-06-25T14:36:52.135655Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 65040, node 1 2025-06-25T14:36:52.542122Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:36:52.542177Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:36:52.542206Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:36:52.542536Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:36:52.548371Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:36:52.674753Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:36:52.674907Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:36:52.698614Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:12646 2025-06-25T14:36:53.323520Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-25T14:36:56.859463Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-25T14:36:56.935247Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:36:56.935370Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:36:56.986285Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T14:36:56.993096Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:36:57.245124Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:36:57.272083Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:36:57.272656Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:36:57.273917Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:36:57.274268Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:36:57.274363Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:36:57.274444Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:36:57.274525Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:36:57.274606Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:36:57.274687Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:36:57.474055Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:36:57.474171Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:36:57.490606Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:36:57.711269Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:36:57.761307Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-25T14:36:57.761405Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-25T14:36:57.796752Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-25T14:36:57.798659Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-25T14:36:57.798882Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-25T14:36:57.798948Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-25T14:36:57.799002Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-25T14:36:57.799053Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-25T14:36:57.799101Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-25T14:36:57.799152Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-25T14:36:57.799716Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-25T14:36:57.843772Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7949: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-25T14:36:57.843852Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7979: ConnectToSA(), pipe client id: [2:1788:2562], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-25T14:36:57.857695Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1801:2571] 2025-06-25T14:36:57.866334Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1831:2586] 2025-06-25T14:36:57.867159Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1831:2586], schemeshard id = 72075186224037897 2025-06-25T14:36:57.886276Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-25T14:36:57.926605Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-25T14:36:57.926665Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-25T14:36:57.926738Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-25T14:36:57.946518Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:36:57.961351Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-25T14:36:57.961546Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-25T14:36:58.181181Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-25T14:36:58.330009Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-25T14:36:58.484933Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-25T14:36:59.229434Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:36:59.475427Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2145:3020], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:36:59.475614Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:36:59.495295Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715659:0, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T14:36:59.633335Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2220:2795];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:36:59.633577Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2220:2795];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:36:59.633875Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2220:2795];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:36:59.634013Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2220:2795];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:36:59.634120Z node 2 :TX_COLUMNSHARD WARN: ... ats size# 51 2025-06-25T14:39:29.253538Z node 2 :STATISTICS DEBUG: tx_schemeshard_stats.cpp:132: [72075186224037894] TTxSchemeShardStats::Complete 2025-06-25T14:39:30.455670Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-25T14:39:30.455759Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-25T14:39:30.455811Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 3] is data table. 2025-06-25T14:39:30.455855Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:723: [72075186224037894] ScheduleNextTraversal. Skip traversal for datashard table [OwnerId: 72075186224037897, LocalPathId: 3] 2025-06-25T14:39:30.456245Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-25T14:39:30.468502Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DELETE FROM `.metadata/_statistics` WHERE owner_id = $owner_id AND local_path_id = $local_path_id; 2025-06-25T14:39:30.471885Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7208:5292], DatabaseId: /Root/Database, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:39:30.471982Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7218:5297], DatabaseId: /Root/Database, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:39:30.472047Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root/Database, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:39:30.487919Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976720658:2, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:39:30.569710Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7222:5300], DatabaseId: /Root/Database, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976720658 completed, doublechecking } 2025-06-25T14:39:30.785591Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7318:5346] txid# 281474976720659, issues: { message: "Check failed: path: \'/Root/Database/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72075186224037897, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:39:31.054090Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [2:7347:5361]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T14:39:31.054323Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-25T14:39:31.054391Z node 2 :STATISTICS DEBUG: service_impl.cpp:1219: ConnectToSA(), pipe client id = [2:7349:5363] 2025-06-25T14:39:31.054460Z node 2 :STATISTICS DEBUG: service_impl.cpp:1248: SyncNode(), pipe client id = [2:7349:5363] 2025-06-25T14:39:31.054676Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:7350:5364] 2025-06-25T14:39:31.054774Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:133: [72075186224037894] EvConnectNode, pipe server id = [2:7350:5364], node id = 2, have schemeshards count = 0, need schemeshards count = 1 2025-06-25T14:39:31.054826Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:314: [72075186224037894] SendStatisticsToNode(), node id = 2, schemeshard count = 1 2025-06-25T14:39:31.054951Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:7349:5363], server id = [2:7350:5364], tablet id = 72075186224037894, status = OK 2025-06-25T14:39:31.055002Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-25T14:39:31.055060Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 1, ReplyToActorId = [2:7347:5361], StatRequests.size() = 1 2025-06-25T14:39:31.574875Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=MzllMzEwOTgtM2UzNmQyMWQtYTU5ODczYjctNzY2MGQ2OGU=, TxId: 2025-06-25T14:39:31.574969Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=MzllMzEwOTgtM2UzNmQyMWQtYTU5ODczYjctNzY2MGQ2OGU=, TxId: 2025-06-25T14:39:31.575549Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-25T14:39:31.596799Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 3] 2025-06-25T14:39:31.596868Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-25T14:39:31.650710Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:217: [72075186224037894] EvFastPropagateCheck 2025-06-25T14:39:31.650795Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:357: [72075186224037894] PropagateFastStatistics(), node count = 0, schemeshard count = 0 2025-06-25T14:39:31.714876Z node 2 :STATISTICS DEBUG: service_impl.cpp:1189: EvRequestTimeout, pipe client id = [2:7349:5363], schemeshard count = 1 2025-06-25T14:39:33.931807Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-25T14:39:33.931868Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-25T14:39:33.931922Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 4] is column table. 2025-06-25T14:39:33.931963Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:732: [72075186224037894] Start schedule traversal navigate for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-25T14:39:33.935071Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-25T14:39:33.958733Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-25T14:39:33.959266Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-25T14:39:33.959353Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-25T14:39:33.960191Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:58: [72075186224037894] TTxResponseTabletDistribution::Execute. Node count = 1 2025-06-25T14:39:33.973410Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:92: [72075186224037894] TTxResponseTabletDistribution::Complete 2025-06-25T14:39:33.973639Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 2, current Round: 0 2025-06-25T14:39:33.984442Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:7458:5423], server id = [2:7459:5424], tablet id = 72075186224037899, status = OK 2025-06-25T14:39:33.984843Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:7458:5423], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-25T14:39:34.007397Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037899 2025-06-25T14:39:34.007511Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-25T14:39:34.007780Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:27: [72075186224037894] TTxAggregateStatisticsResponse::Execute 2025-06-25T14:39:34.008000Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:118: [72075186224037894] TTxAggregateStatisticsResponse::Complete 2025-06-25T14:39:34.008174Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:7458:5423], server id = [2:7459:5424], tablet id = 72075186224037899 2025-06-25T14:39:34.008206Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-25T14:39:34.008418Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-25T14:39:34.042034Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-06-25T14:39:34.094131Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [2:7479:5443]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T14:39:34.094385Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-25T14:39:34.094425Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 2, ReplyToActorId = [2:7479:5443], StatRequests.size() = 1 2025-06-25T14:39:34.339984Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=YjRmZWMzODctOGY4YzRlNGUtYTZjYjVlNTAtNjE4MjUyNDY=, TxId: 2025-06-25T14:39:34.340043Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=YjRmZWMzODctOGY4YzRlNGUtYTZjYjVlNTAtNjE4MjUyNDY=, TxId: 2025-06-25T14:39:34.340826Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-25T14:39:34.341838Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [1:7492:5538]], StatType[ 2 ], StatRequestsCount[ 1 ] 2025-06-25T14:39:34.342070Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-25T14:39:34.342129Z node 1 :STATISTICS DEBUG: service_impl.cpp:812: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] resolve DatabasePath[ [OwnerId: 72057594046644480, LocalPathId: 2] ] 2025-06-25T14:39:34.344151Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-25T14:39:34.344221Z node 1 :STATISTICS DEBUG: service_impl.cpp:715: [TStatService::QueryStatistics] RequestId[ 1 ], Database[ Root/Database ], TablePath[ /Root/Database/.metadata/_statistics ] 2025-06-25T14:39:34.344283Z node 1 :STATISTICS DEBUG: service_impl.cpp:656: [TStatService::LoadStatistics] QueryId[ 1 ], PathId[ [OwnerId: 72075186224037897, LocalPathId: 4] ], StatType[ 2 ], ColumnTag[ 1 ] 2025-06-25T14:39:34.361573Z node 1 :STATISTICS DEBUG: service_impl.cpp:1152: TEvLoadStatisticsQueryResponse, request id = 1 |82.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut/unittest >> TBlobStorageControllerGrouperTest::when_one_server_per_rack_in_4_racks_then_can_construct_group_with_4_domains [GOOD] >> TGroupMapperTest::SimplestMirror3dc |82.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut/unittest >> TGroupMapperTest::Mirror3dc3Nodes [GOOD] |82.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_bsvolume/unittest >> TGroupMapperTest::SimplestMirror3dc [GOOD] |82.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut/unittest >> TGroupMapperTest::SimplestMirror3dc [GOOD] >> ResourcePoolClassifiersDdl::TestMultiGroupClassification [GOOD] >> ResourcePoolClassifiersSysView::TestResourcePoolClassifiersSysViewOnServerless |82.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut/unittest >> TBlobStorageControllerGrouperTest::when_one_server_per_rack_in_4_racks_then_can_construct_group_with_4_domains_and_one_small_node [GOOD] >> TGroupMapperTest::DifferentGroupSizeInUnits [GOOD] >> TMultiversionObjectMap::MonteCarlo >> TGroupMapperTest::MonteCarlo |82.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut/unittest >> TBlobStorageControllerGrouperTest::when_one_server_per_rack_in_4_racks_then_can_construct_group_with_4_domains_and_one_small_node [GOOD] |82.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut/unittest >> TGroupMapperTest::DifferentGroupSizeInUnits [GOOD] >> TGroupMapperTest::NonUniformClusterMirror3dcWithUnusableDomain >> ResourcePoolsDdl::TestDropResourcePool [GOOD] >> TBlobStorageControllerGrouperTest::TestGroupFromCandidatesEmpty [GOOD] >> TGroupMapperTest::NonUniformClusterMirror3dcWithUnusableDomain [GOOD] |82.9%| [TA] $(B)/ydb/core/tx/schemeshard/ut_bsvolume/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpWorkloadService::TestStartQueryAfterCancel [GOOD] >> KqpWorkloadService::TestZeroConcurrentQueryLimit |82.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut/unittest >> TBlobStorageControllerGrouperTest::TestGroupFromCandidatesEmpty [GOOD] |82.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut/unittest >> TGroupMapperTest::NonUniformClusterMirror3dcWithUnusableDomain [GOOD] >> TGroupMapperTest::ReassignGroupTest3dc |82.9%| [TA] $(B)/ydb/core/tx/schemeshard/ut_extsubdomain/test-results/unittest/{meta.json ... results_accumulator.log} |82.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/partition_stats/ut/unittest >> TBlobStorageControllerGrouperTest::TestGroupFromCandidatesHuge >> PartitionStats::CollectorOverload ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/workload_service/ut/unittest >> ResourcePoolsDdl::TestDropResourcePool [GOOD] Test command err: 2025-06-25T14:38:48.689328Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519896439506156855:2135];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:48.689859Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001989/r3tmp/tmpqZMMgI/pdisk_1.dat 2025-06-25T14:38:49.077917Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:38:49.079697Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519896439506156758:2080] 1750862328675361 != 1750862328675364 TServer::EnableGrpc on GrpcPort 28165, node 1 2025-06-25T14:38:49.141140Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:49.141278Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:49.142842Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:38:49.154728Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:38:49.154748Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:38:49.154754Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:38:49.154850Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:22134 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:38:49.452975Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:38:49.693089Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:38:51.467710Z node 1 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:440: [WorkloadService] [Service] Started workload service initialization 2025-06-25T14:38:51.481902Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:223: SessionId: ydb://session/3?node_id=1&id=NGIxMGI1OTMtMzM0M2ZmZjItZGMyZmM1ZDUtNmMxYjVlNjQ=, ActorId: [0:0:0], ActorState: unknown state, Create session actor with id NGIxMGI1OTMtMzM0M2ZmZjItZGMyZmM1ZDUtNmMxYjVlNjQ= 2025-06-25T14:38:51.485831Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:241: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7519896452391059258:2287], Start check tables existence, number paths: 2 2025-06-25T14:38:51.485951Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:227: SessionId: ydb://session/3?node_id=1&id=NGIxMGI1OTMtMzM0M2ZmZjItZGMyZmM1ZDUtNmMxYjVlNjQ=, ActorId: [1:7519896452391059259:2288], ActorState: unknown state, session actor bootstrapped 2025-06-25T14:38:51.486529Z node 1 :KQP_WORKLOAD_SERVICE TRACE: kqp_workload_service.cpp:125: [WorkloadService] [Service] Updated node info, noode count: 1 2025-06-25T14:38:51.486546Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:100: [WorkloadService] [Service] Subscribed for config changes 2025-06-25T14:38:51.486560Z node 1 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:111: [WorkloadService] [Service] Resource pools was enanbled 2025-06-25T14:38:51.488482Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:182: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7519896452391059258:2287], Describe table /Root/.metadata/workload_manager/delayed_requests status PathErrorUnknown 2025-06-25T14:38:51.488536Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:182: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7519896452391059258:2287], Describe table /Root/.metadata/workload_manager/running_requests status PathErrorUnknown 2025-06-25T14:38:51.488569Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:289: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7519896452391059258:2287], Successfully finished 2025-06-25T14:38:51.488638Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:367: [WorkloadService] [Service] Cleanup completed, tables exists: 0 2025-06-25T14:38:51.500541Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:387: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519896452391059286:2298], DatabaseId: Root, PoolId: sample_pool_id, Start pool creating 2025-06-25T14:38:51.503664Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:38:51.507107Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:429: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519896452391059286:2298], DatabaseId: Root, PoolId: sample_pool_id, Subscribe on create pool tx: 281474976715658 2025-06-25T14:38:51.508434Z node 1 :KQP_WORKLOAD_SERVICE TRACE: scheme_actors.cpp:352: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519896452391059286:2298], DatabaseId: Root, PoolId: sample_pool_id, Tablet to pipe successfully connected 2025-06-25T14:38:51.520580Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519896452391059286:2298], DatabaseId: Root, PoolId: sample_pool_id, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:38:51.576856Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:387: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519896452391059286:2298], DatabaseId: Root, PoolId: sample_pool_id, Start pool creating 2025-06-25T14:38:51.580188Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519896452391059337:2330] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/sample_pool_id\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:38:51.580271Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:480: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519896452391059286:2298], DatabaseId: Root, PoolId: sample_pool_id, Pool successfully created 2025-06-25T14:38:51.586964Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:157: [WorkloadService] [Service] Recieved subscription request, DatabaseId: /Root, PoolId: default 2025-06-25T14:38:51.587007Z node 1 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:561: [WorkloadService] [Service] Creating new database state for id /Root 2025-06-25T14:38:51.587052Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:185: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896452391059346:2292], DatabaseId: /Root, PoolId: default, Start pool fetching 2025-06-25T14:38:51.587238Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:443: SessionId: ydb://session/3?node_id=1&id=NGIxMGI1OTMtMzM0M2ZmZjItZGMyZmM1ZDUtNmMxYjVlNjQ=, ActorId: [1:7519896452391059259:2288], ActorState: ReadyState, TraceId: 01jykrdzp2eg6qe9tzpd8zmev4, received request, proxyRequestId: 3 prepared: 0 tx_control: 0 action: QUERY_ACTION_EXECUTE type: QUERY_TYPE_SQL_DDL text: CREATE RESOURCE POOL my_pool WITH ( CONCURRENT_QUERY_LIMIT=1, QUEUE_SIZE=0 ); rpcActor: [0:0:0] database: /Root databaseId: /Root pool id: default 2025-06-25T14:38:51.589147Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896452391059346:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:51.589267Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:51.834120Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:38:51.839873Z node 1 :KQP_SESSION INFO: kqp_session_actor.cpp:2528: SessionId: ydb://session/3?node_id=1&id=NGIxMGI1OTMtMzM0M2ZmZjItZGMyZmM1ZDUtNmMxYjVlNjQ=, ActorId: [1:7519896452391059259:2288], ActorState: ExecuteState, TraceId: 01jykrdzp2eg6qe9tzpd8zmev4, Cleanup start, isFinal: 0 CleanupCtx: 1 TransactionsToBeAborted.size(): 0 WorkerId: [1:7519896452391059355:2288] WorkloadServiceCleanup: 0 2025-06-25T14:38:51.841304Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2589: SessionId: ydb://session/3?node_id=1&id=NGIxMGI1OTMtMzM0M2ZmZjItZGMyZmM1ZDUtNmMxYjVlNjQ=, ActorId: [1:7519896452391059259:2288], ActorState: CleanupState, TraceId: 01jykrdzp2eg6qe9tzpd8zmev4, EndCleanup, isFinal: 0 2025-06-25T14:38:51.841353Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2325: SessionId: ydb://session/3?node_id=1&id=NGIxMGI1OTMtMzM0M2ZmZjItZGMyZmM1ZDUtNmMxYjVlNjQ=, ActorId: [1:7519896452391059259:2288], ActorState: CleanupState, TraceId: 01jykrdzp2eg6qe9tzpd8zmev4, Sent query response back to proxy, proxyRequestId: 3, proxyId: [1:7519896439506156991:2237] 2025-06-25T14:38:51.844656Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:223: SessionId: ydb://session/3?node_id=1&id=YWIzN2MzYy05MDYxNzY1YS01ZTkyMjkwOS1mMTBlNWE1OA==, ActorId: [0:0:0], ActorState: unknown state, Create session actor with id YWIzN2MzYy05MDYxNzY1YS01ZTkyMjkwOS1mMTBlNWE1OA== 2025-06-25T14:38:51.844741Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:227: SessionId: ydb://session/3?node_id=1&id=YWIzN2MzYy05MDYxNzY1YS01ZTkyMjkwOS1mMTBlNWE1OA==, ActorId: [1:7519896452391059379:22 ... 39:38.271724Z node 8 :KQP_SESSION INFO: kqp_session_actor.cpp:2370: SessionId: ydb://session/3?node_id=8&id=ZDgxYjU5MWUtNWNjNGU2MjItMWI3NzZkMjQtNjQ4ZWQ3Y2Q=, ActorId: [8:7519896656891399559:2403], ActorState: ReadyState, Session closed due to explicit close event 2025-06-25T14:39:38.271753Z node 8 :KQP_SESSION INFO: kqp_session_actor.cpp:2528: SessionId: ydb://session/3?node_id=8&id=ZDgxYjU5MWUtNWNjNGU2MjItMWI3NzZkMjQtNjQ4ZWQ3Y2Q=, ActorId: [8:7519896656891399559:2403], ActorState: ReadyState, Cleanup start, isFinal: 1 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-06-25T14:39:38.271776Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2589: SessionId: ydb://session/3?node_id=8&id=ZDgxYjU5MWUtNWNjNGU2MjItMWI3NzZkMjQtNjQ4ZWQ3Y2Q=, ActorId: [8:7519896656891399559:2403], ActorState: ReadyState, EndCleanup, isFinal: 1 2025-06-25T14:39:38.271796Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2601: SessionId: ydb://session/3?node_id=8&id=ZDgxYjU5MWUtNWNjNGU2MjItMWI3NzZkMjQtNjQ4ZWQ3Y2Q=, ActorId: [8:7519896656891399559:2403], ActorState: unknown state, Cleanup temp tables: 0 2025-06-25T14:39:38.271857Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2692: SessionId: ydb://session/3?node_id=8&id=ZDgxYjU5MWUtNWNjNGU2MjItMWI3NzZkMjQtNjQ4ZWQ3Y2Q=, ActorId: [8:7519896656891399559:2403], ActorState: unknown state, Session actor destroyed 2025-06-25T14:39:38.281233Z node 8 :KQP_WORKLOAD_SERVICE DEBUG: pool_handlers_actors.cpp:294: [WorkloadService] [TPoolHandlerActorBase] ActorId: [8:7519896639711529948:2300], DatabaseId: /Root, PoolId: my_pool, Got delete notification 2025-06-25T14:39:38.281331Z node 8 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:157: [WorkloadService] [Service] Recieved subscription request, DatabaseId: /Root, PoolId: my_pool 2025-06-25T14:39:38.281386Z node 8 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:185: [WorkloadService] [TPoolFetcherActor] ActorId: [8:7519896656891399618:2417], DatabaseId: /Root, PoolId: my_pool, Start pool fetching 2025-06-25T14:39:38.281721Z node 8 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [8:7519896656891399618:2417], DatabaseId: /Root, PoolId: my_pool, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool my_pool not found or you don't have access permissions } 2025-06-25T14:39:38.281816Z node 8 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool my_pool, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool my_pool not found or you don't have access permissions } 2025-06-25T14:39:38.285209Z node 8 :KQP_SESSION INFO: kqp_session_actor.cpp:2528: SessionId: ydb://session/3?node_id=8&id=N2EzZmJiZjYtOWU0YTQ2YTYtZmM4MjgyN2QtZjE2Mzk1NmE=, ActorId: [8:7519896639711529840:2292], ActorState: ExecuteState, TraceId: 01jykrfd8j36rzbwshp8c8fp5d, Cleanup start, isFinal: 0 CleanupCtx: 1 TransactionsToBeAborted.size(): 0 WorkerId: [8:7519896656891399587:2292] WorkloadServiceCleanup: 0 2025-06-25T14:39:38.287789Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2589: SessionId: ydb://session/3?node_id=8&id=N2EzZmJiZjYtOWU0YTQ2YTYtZmM4MjgyN2QtZjE2Mzk1NmE=, ActorId: [8:7519896639711529840:2292], ActorState: CleanupState, TraceId: 01jykrfd8j36rzbwshp8c8fp5d, EndCleanup, isFinal: 0 2025-06-25T14:39:38.287856Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2325: SessionId: ydb://session/3?node_id=8&id=N2EzZmJiZjYtOWU0YTQ2YTYtZmM4MjgyN2QtZjE2Mzk1NmE=, ActorId: [8:7519896639711529840:2292], ActorState: CleanupState, TraceId: 01jykrfd8j36rzbwshp8c8fp5d, Sent query response back to proxy, proxyRequestId: 17, proxyId: [8:7519896622531660151:2160] 2025-06-25T14:39:38.297328Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:223: SessionId: ydb://session/3?node_id=8&id=OGY3OTI0NmYtODkxNjk2YzQtZDVjOGFlMDYtZGJkOTg3YjQ=, ActorId: [0:0:0], ActorState: unknown state, Create session actor with id OGY3OTI0NmYtODkxNjk2YzQtZDVjOGFlMDYtZGJkOTg3YjQ= 2025-06-25T14:39:38.297633Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:227: SessionId: ydb://session/3?node_id=8&id=OGY3OTI0NmYtODkxNjk2YzQtZDVjOGFlMDYtZGJkOTg3YjQ=, ActorId: [8:7519896656891399630:2418], ActorState: unknown state, session actor bootstrapped 2025-06-25T14:39:38.297744Z node 8 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:157: [WorkloadService] [Service] Recieved subscription request, DatabaseId: /Root, PoolId: my_pool 2025-06-25T14:39:38.297801Z node 8 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:185: [WorkloadService] [TPoolFetcherActor] ActorId: [8:7519896656891399632:2419], DatabaseId: /Root, PoolId: my_pool, Start pool fetching 2025-06-25T14:39:38.297830Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:443: SessionId: ydb://session/3?node_id=8&id=OGY3OTI0NmYtODkxNjk2YzQtZDVjOGFlMDYtZGJkOTg3YjQ=, ActorId: [8:7519896656891399630:2418], ActorState: ReadyState, TraceId: 01jykrfd9s901aea0r0cb12te9, received request, proxyRequestId: 19 prepared: 0 tx_control: 0 action: QUERY_ACTION_EXECUTE type: QUERY_TYPE_SQL_GENERIC_QUERY text: SELECT 42; rpcActor: [8:7519896656891399629:2608] database: Root databaseId: /Root pool id: my_pool 2025-06-25T14:39:38.297886Z node 8 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:169: [WorkloadService] [Service] Recieved new request from [8:7519896656891399630:2418], DatabaseId: /Root, PoolId: my_pool, SessionId: ydb://session/3?node_id=8&id=OGY3OTI0NmYtODkxNjk2YzQtZDVjOGFlMDYtZGJkOTg3YjQ= 2025-06-25T14:39:38.297934Z node 8 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:44: [WorkloadService] [TPoolResolverActor] ActorId: [8:7519896656891399633:2420], DatabaseId: /Root, PoolId: my_pool, SessionId: ydb://session/3?node_id=8&id=OGY3OTI0NmYtODkxNjk2YzQtZDVjOGFlMDYtZGJkOTg3YjQ=, Start pool fetching 2025-06-25T14:39:38.297988Z node 8 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:185: [WorkloadService] [TPoolFetcherActor] ActorId: [8:7519896656891399634:2421], DatabaseId: /Root, PoolId: my_pool, Start pool fetching 2025-06-25T14:39:38.300962Z node 8 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [8:7519896656891399632:2419], DatabaseId: /Root, PoolId: my_pool, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool my_pool not found or you don't have access permissions } 2025-06-25T14:39:38.301050Z node 8 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [8:7519896656891399634:2421], DatabaseId: /Root, PoolId: my_pool, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool my_pool not found or you don't have access permissions } 2025-06-25T14:39:38.301113Z node 8 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool my_pool, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool my_pool not found or you don't have access permissions } 2025-06-25T14:39:38.301228Z node 8 :KQP_WORKLOAD_SERVICE ERROR: scheme_actors.cpp:56: [WorkloadService] [TPoolResolverActor] ActorId: [8:7519896656891399633:2420], DatabaseId: /Root, PoolId: my_pool, SessionId: ydb://session/3?node_id=8&id=OGY3OTI0NmYtODkxNjk2YzQtZDVjOGFlMDYtZGJkOTg3YjQ=, Failed to fetch pool info NOT_FOUND, issues: {
: Error: Resource pool my_pool not found or you don't have access permissions } 2025-06-25T14:39:38.301350Z node 8 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:114: [WorkloadService] [TPoolResolverActor] ActorId: [8:7519896656891399633:2420], DatabaseId: /Root, PoolId: my_pool, SessionId: ydb://session/3?node_id=8&id=OGY3OTI0NmYtODkxNjk2YzQtZDVjOGFlMDYtZGJkOTg3YjQ=, Failed to resolve pool, NOT_FOUND, issues: {
: Error: Failed to resolve pool id my_pool subissue: {
: Error: Resource pool my_pool not found or you don't have access permissions } } 2025-06-25T14:39:38.301492Z node 8 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:546: [WorkloadService] [Service] Reply continue error NOT_FOUND to [8:7519896656891399630:2418]: {
: Error: Failed to resolve pool id my_pool subissue: {
: Error: Resource pool my_pool not found or you don't have access permissions } } 2025-06-25T14:39:38.301597Z node 8 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=8&id=OGY3OTI0NmYtODkxNjk2YzQtZDVjOGFlMDYtZGJkOTg3YjQ=, ActorId: [8:7519896656891399630:2418], ActorState: ExecuteState, TraceId: 01jykrfd9s901aea0r0cb12te9, Create QueryResponse for error on request, msg: Query failed during adding/waiting in workload pool 2025-06-25T14:39:38.301755Z node 8 :KQP_SESSION INFO: kqp_session_actor.cpp:2528: SessionId: ydb://session/3?node_id=8&id=OGY3OTI0NmYtODkxNjk2YzQtZDVjOGFlMDYtZGJkOTg3YjQ=, ActorId: [8:7519896656891399630:2418], ActorState: ExecuteState, TraceId: 01jykrfd9s901aea0r0cb12te9, Cleanup start, isFinal: 1 CleanupCtx: 1 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 1 2025-06-25T14:39:38.301893Z node 8 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:189: [WorkloadService] [Service] Finished request with worker actor [8:7519896656891399630:2418], DatabaseId: /Root, PoolId: my_pool, SessionId: ydb://session/3?node_id=8&id=OGY3OTI0NmYtODkxNjk2YzQtZDVjOGFlMDYtZGJkOTg3YjQ= 2025-06-25T14:39:38.301945Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2589: SessionId: ydb://session/3?node_id=8&id=OGY3OTI0NmYtODkxNjk2YzQtZDVjOGFlMDYtZGJkOTg3YjQ=, ActorId: [8:7519896656891399630:2418], ActorState: CleanupState, TraceId: 01jykrfd9s901aea0r0cb12te9, EndCleanup, isFinal: 1 2025-06-25T14:39:38.302053Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2325: SessionId: ydb://session/3?node_id=8&id=OGY3OTI0NmYtODkxNjk2YzQtZDVjOGFlMDYtZGJkOTg3YjQ=, ActorId: [8:7519896656891399630:2418], ActorState: CleanupState, TraceId: 01jykrfd9s901aea0r0cb12te9, Sent query response back to proxy, proxyRequestId: 19, proxyId: [8:7519896622531660151:2160] 2025-06-25T14:39:38.302087Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2601: SessionId: ydb://session/3?node_id=8&id=OGY3OTI0NmYtODkxNjk2YzQtZDVjOGFlMDYtZGJkOTg3YjQ=, ActorId: [8:7519896656891399630:2418], ActorState: unknown state, TraceId: 01jykrfd9s901aea0r0cb12te9, Cleanup temp tables: 0 2025-06-25T14:39:38.302214Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2692: SessionId: ydb://session/3?node_id=8&id=OGY3OTI0NmYtODkxNjk2YzQtZDVjOGFlMDYtZGJkOTg3YjQ=, ActorId: [8:7519896656891399630:2418], ActorState: unknown state, TraceId: 01jykrfd9s901aea0r0cb12te9, Session actor destroyed 2025-06-25T14:39:38.315446Z node 8 :KQP_SESSION INFO: kqp_session_actor.cpp:2370: SessionId: ydb://session/3?node_id=8&id=N2EzZmJiZjYtOWU0YTQ2YTYtZmM4MjgyN2QtZjE2Mzk1NmE=, ActorId: [8:7519896639711529840:2292], ActorState: ReadyState, Session closed due to explicit close event 2025-06-25T14:39:38.315493Z node 8 :KQP_SESSION INFO: kqp_session_actor.cpp:2528: SessionId: ydb://session/3?node_id=8&id=N2EzZmJiZjYtOWU0YTQ2YTYtZmM4MjgyN2QtZjE2Mzk1NmE=, ActorId: [8:7519896639711529840:2292], ActorState: ReadyState, Cleanup start, isFinal: 1 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-06-25T14:39:38.315520Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2589: SessionId: ydb://session/3?node_id=8&id=N2EzZmJiZjYtOWU0YTQ2YTYtZmM4MjgyN2QtZjE2Mzk1NmE=, ActorId: [8:7519896639711529840:2292], ActorState: ReadyState, EndCleanup, isFinal: 1 2025-06-25T14:39:38.315542Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2601: SessionId: ydb://session/3?node_id=8&id=N2EzZmJiZjYtOWU0YTQ2YTYtZmM4MjgyN2QtZjE2Mzk1NmE=, ActorId: [8:7519896639711529840:2292], ActorState: unknown state, Cleanup temp tables: 0 2025-06-25T14:39:38.315621Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2692: SessionId: ydb://session/3?node_id=8&id=N2EzZmJiZjYtOWU0YTQ2YTYtZmM4MjgyN2QtZjE2Mzk1NmE=, ActorId: [8:7519896639711529840:2292], ActorState: unknown state, Session actor destroyed >> PartitionStats::CollectorOverload [GOOD] >> ResultFormatter::Void >> TBlobStorageControllerGrouperTest::TestGroupFromCandidatesHuge [GOOD] >> ResultFormatter::Tuple >> ResultFormatter::StructWithNoFields >> ResultFormatter::FormatEmptySchema [GOOD] >> ResultFormatter::FormatNonEmptySchema [GOOD] >> ResultFormatter::Void [GOOD] >> ResultFormatter::VariantTuple [GOOD] >> ResultFormatter::Tuple [GOOD] >> ResultFormatter::Tagged [GOOD] >> ResultFormatter::StructWithNoFields [GOOD] >> ResultFormatter::StructTypeNameAsString [GOOD] |82.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/partition_stats/ut/unittest >> PartitionStats::CollectorOverload [GOOD] >> TGroupMapperTest::MapperSequentialCalls |82.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut/unittest >> TBlobStorageControllerGrouperTest::TestGroupFromCandidatesHuge [GOOD] |82.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/result_formatter/ut/unittest >> ResultFormatter::VariantTuple [GOOD] |82.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/result_formatter/ut/unittest >> ResultFormatter::Tagged [GOOD] |83.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/result_formatter/ut/unittest >> ResultFormatter::FormatNonEmptySchema [GOOD] >> TGroupMapperTest::NonUniformCluster2 |83.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/result_formatter/ut/unittest >> ResultFormatter::StructTypeNameAsString [GOOD] >> SystemView::ShowCreateTableChangefeeds [GOOD] >> SystemView::ShowCreateTableColumnAlterColumn >> KqpWorkloadServiceDistributed::TestNodeDisconnect [GOOD] >> KqpWorkloadServiceDistributed::TestDistributedLessConcurrentQueryLimit >> KqpJoinOrder::TPCHEveryQueryWorks-ColumnStore [GOOD] >> KqpWorkloadService::TestZeroConcurrentQueryLimit [GOOD] >> TKeyValueTest::TestConcatWorks >> TKeyValueTest::TestInlineWriteReadDeleteWithRestartsThenResponseOk >> KeyValueReadStorage::ReadRangeOk1Key >> TKeyValueTest::TestWriteReadDeleteWithRestartsAndCatchCollectGarbageEvents >> TKeyValueTest::TestWriteReadWithRestartsThenResponseOk >> TKeyValueTest::TestWriteDeleteThenReadRemaining >> TKeyValueTest::TestWrite200KDeleteThenResponseError >> KeyValueReadStorage::ReadRangeOk1Key [GOOD] >> KeyValueReadStorage::ReadRangeOk [GOOD] >> KeyValueReadStorage::ReadRangeNoData [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::TPCHEveryQueryWorks-ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 27106, MsgBus: 8292 2025-06-25T14:35:19.706935Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519895541734477020:2198];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:35:19.707130Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000ec5/r3tmp/tmp1rZYAb/pdisk_1.dat 2025-06-25T14:35:20.277530Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:35:20.277657Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:35:20.285656Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:35:20.309152Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:35:20.312793Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519895541734476860:2080] 1750862119667098 != 1750862119667101 TServer::EnableGrpc on GrpcPort 27106, node 1 2025-06-25T14:35:20.432822Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:35:20.432842Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:35:20.432848Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:35:20.432959Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8292 2025-06-25T14:35:20.715421Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:8292 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:35:21.129983Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:35:21.164984Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:35:23.079769Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519895558914346687:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:35:23.079855Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:35:23.079905Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519895558914346699:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:35:23.083425Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:35:23.097134Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519895558914346701:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:35:23.198073Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519895558914346752:2338] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:35:23.617717Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:23.800281Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:23.847274Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:23.880652Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:23.918595Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:23.951523Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:24.028832Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:24.094800Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:35:24.700414Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519895541734477020:2198];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:35:24.700478Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:35:35.208547Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7382: Cannot get console configs 2025-06-25T14:35:35.208574Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:37:54.001516Z node 1 :TX_DATASHARD WARN: datashard__read_iterator.cpp:3439: 72075186224037892 Cancelled read: {[1:7519896203159446790:3611], 0} 2025-06-25T14:38:16.064868Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jykrcjt5djmj3zzn36pz161z", SessionId: ydb://session/3?node_id=1&id=Y2I2Y2Y0MTAtZTRjNTJmMjctMjAzYWQ2YzktOTVhZTUyZDc=, Slow query, duration: 10.426432s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "\n\n-- TPC-H/TPC-R Suppliers Who Kept Orders Waiting Query (Q21)\n-- TPC TPC-H Parameter Substitution (Version 2.17.2 build 0)\n-- using 1680793381 as a seed to the RNG\n\nselect\n `/Root/supplier`.s_name as s_name,\n count(*) as numwait\nfrom\n `/Root/supplier`\n cross join `/Root/lineitem` l1\n cross join `/Root/orders`\n cross join `/Root/nation`\n left semi join (\n select\n l2.l_orderkey as l_orderkey\n from\n `/Root/lineitem` l2\n cross join `/Root/supplier`\n cross join `/Root/lineitem` l1\n cross join `/Root/orders`\n cross join `/Root/nation`\n where\n s_suppkey = l1.l_suppkey\n and o_orderkey = l1.l_orderkey\n and o_orderstatus = 'F'\n and l1.l_receiptdate > l1.l_commitdate\n and s_nationkey = n_nationkey\n and n_name = 'EGYPT'\n and l2.l_orderkey = l1.l_orderkey\n and l2.l_suppkey <> l1.l_suppkey\n ) as l2 on l2.l_orderkey = l1.l_orderkey\n left only join (\n select\n l3.l_orderkey as l_orderkey\n from\n `/Root/lineitem` l3\n cross join `/Root/supplier`\n cross join `/Root/lineitem` l1\n cross join `/Root/orders`\n cross join `/Root/nation`\n where\n s_suppkey = l1.l_suppkey\n and o_orderkey = l1.l_orderkey\n and o_orderstatus = 'F'\n and l1.l_receiptdate > l1.l_commitdate\n and s_nationkey = n_nationkey\n and n_name = 'EGYPT'\n and l3.l_orderkey = l1.l_orderkey\n and l3.l_suppkey <> l1.l_suppkey\n and l3.l_receiptdate > l3.l_commitdate\n ) as l3 on l3.l_orderkey = l1.l_orderkey\nwhere\n s_suppkey = l1.l_suppkey\n and o_orderkey = l1.l_orderkey\n and o_orderstatus = 'F'\n and l1.l_receiptdate > l1.l_commitdate\n and s_nationkey = n_nationkey\n and n_name = 'EGYPT'\ngroup by\n `/Root/supplier`.s_name\norder by\n numwait desc,\n s_name\nlimit 100;\n", parameters: 0b 2025-06-25T14:38:33.569685Z node 1 :TX_DATASHARD WARN: datashard__read_iterator.cpp:3439: 72075186224037891 Cancelled read: {[1:7519896374958140198:3926], 0} 2025-06-25T14:39:09.282960Z node 1 :TX_DATASHARD WARN: datashard__read_iterator.cpp:3439: 72075186224037889 Cancelled read: {[1:7519896529576964147:4258], 0} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/keyvalue/ut/unittest >> KeyValueReadStorage::ReadRangeNoData [GOOD] Test command err: 2025-06-25T14:39:44.147524Z 1 00h00m00.000000s :KEYVALUE INFO: {KV20@keyvalue_storage_read_request.cpp:209} Received GetResult KeyValue# 1 GroupId# 3 Status# OK ResponseSz# 1 ErrorReason# ReadRequestCookie# 0 2025-06-25T14:39:44.158085Z 1 00h00m00.000000s :KEYVALUE INFO: {KV34@keyvalue_storage_read_request.cpp:492} Send respose KeyValue# 1 Status# RSTATUS_OK ReadRequestCookie# 0 2025-06-25T14:39:44.172260Z 1 00h00m00.000000s :KEYVALUE INFO: {KV20@keyvalue_storage_read_request.cpp:209} Received GetResult KeyValue# 1 GroupId# 3 Status# OK ResponseSz# 2 ErrorReason# ReadRequestCookie# 0 2025-06-25T14:39:44.172332Z 1 00h00m00.000000s :KEYVALUE INFO: {KV34@keyvalue_storage_read_request.cpp:492} Send respose KeyValue# 1 Status# RSTATUS_OK ReadRequestCookie# 0 2025-06-25T14:39:44.177322Z 1 00h00m00.000000s :KEYVALUE INFO: {KV320@keyvalue_storage_read_request.cpp:122} Inline read request KeyValue# 1 Status# OK 2025-06-25T14:39:44.177371Z 1 00h00m00.000000s :KEYVALUE DEBUG: {KV322@keyvalue_storage_read_request.cpp:134} Expected OK or UNKNOWN and given OK readCount# 0 2025-06-25T14:39:44.177405Z 1 00h00m00.000000s :KEYVALUE INFO: {KV34@keyvalue_storage_read_request.cpp:492} Send respose KeyValue# 1 Status# RSTATUS_OK ReadRequestCookie# 0 >> TGroupMapperTest::NonUniformCluster2 [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/workload_service/ut/unittest >> KqpWorkloadService::TestZeroConcurrentQueryLimit [GOOD] Test command err: 2025-06-25T14:38:48.710460Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519896440513858230:2137];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:48.716360Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0019a5/r3tmp/tmpBl9tfS/pdisk_1.dat 2025-06-25T14:38:49.112023Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519896440513858130:2080] 1750862328700396 != 1750862328700399 2025-06-25T14:38:49.121628Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 24396, node 1 2025-06-25T14:38:49.149148Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:49.149526Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:49.151273Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:38:49.170272Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:38:49.170309Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:38:49.170343Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:38:49.170497Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:20109 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:38:49.446860Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:38:49.716455Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:38:51.370699Z node 1 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:440: [WorkloadService] [Service] Started workload service initialization 2025-06-25T14:38:51.381932Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:241: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7519896453398760637:2289], Start check tables existence, number paths: 2 2025-06-25T14:38:51.382080Z node 1 :KQP_WORKLOAD_SERVICE TRACE: kqp_workload_service.cpp:125: [WorkloadService] [Service] Updated node info, noode count: 1 2025-06-25T14:38:51.382106Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:100: [WorkloadService] [Service] Subscribed for config changes 2025-06-25T14:38:51.382133Z node 1 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:111: [WorkloadService] [Service] Resource pools was enanbled 2025-06-25T14:38:51.382937Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:182: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7519896453398760637:2289], Describe table /Root/.metadata/workload_manager/delayed_requests status PathErrorUnknown 2025-06-25T14:38:51.383008Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:182: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7519896453398760637:2289], Describe table /Root/.metadata/workload_manager/running_requests status PathErrorUnknown 2025-06-25T14:38:51.383081Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:289: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7519896453398760637:2289], Successfully finished 2025-06-25T14:38:51.383170Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:367: [WorkloadService] [Service] Cleanup completed, tables exists: 0 2025-06-25T14:38:51.383771Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:223: SessionId: ydb://session/3?node_id=1&id=ZDhkZmVjZTEtNWQwMTE2OC0zOTBlNzBhOC05Njc1MGEzNw==, ActorId: [0:0:0], ActorState: unknown state, Create session actor with id ZDhkZmVjZTEtNWQwMTE2OC0zOTBlNzBhOC05Njc1MGEzNw== 2025-06-25T14:38:51.383830Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:227: SessionId: ydb://session/3?node_id=1&id=ZDhkZmVjZTEtNWQwMTE2OC0zOTBlNzBhOC05Njc1MGEzNw==, ActorId: [1:7519896453398760654:2290], ActorState: unknown state, session actor bootstrapped 2025-06-25T14:38:51.410623Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:387: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519896453398760656:2297], DatabaseId: Root, PoolId: sample_pool_id, Start pool creating 2025-06-25T14:38:51.413768Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:38:51.414800Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:429: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519896453398760656:2297], DatabaseId: Root, PoolId: sample_pool_id, Subscribe on create pool tx: 281474976710658 2025-06-25T14:38:51.414915Z node 1 :KQP_WORKLOAD_SERVICE TRACE: scheme_actors.cpp:352: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519896453398760656:2297], DatabaseId: Root, PoolId: sample_pool_id, Tablet to pipe successfully connected 2025-06-25T14:38:51.425389Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519896453398760656:2297], DatabaseId: Root, PoolId: sample_pool_id, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:38:51.498959Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:387: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519896453398760656:2297], DatabaseId: Root, PoolId: sample_pool_id, Start pool creating 2025-06-25T14:38:51.502825Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519896453398760707:2329] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/sample_pool_id\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:38:51.502938Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:480: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519896453398760656:2297], DatabaseId: Root, PoolId: sample_pool_id, Pool successfully created 2025-06-25T14:38:51.505599Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:223: SessionId: ydb://session/3?node_id=1&id=MTU1ZTFhMzAtZDZjNTYwNDItMWE4YWFhNDgtY2NjNjg1NDI=, ActorId: [0:0:0], ActorState: unknown state, Create session actor with id MTU1ZTFhMzAtZDZjNTYwNDItMWE4YWFhNDgtY2NjNjg1NDI= 2025-06-25T14:38:51.505732Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:227: SessionId: ydb://session/3?node_id=1&id=MTU1ZTFhMzAtZDZjNTYwNDItMWE4YWFhNDgtY2NjNjg1NDI=, ActorId: [1:7519896453398760715:2291], ActorState: unknown state, session actor bootstrapped 2025-06-25T14:38:51.505961Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:157: [WorkloadService] [Service] Recieved subscription request, DatabaseId: /Root, PoolId: sample_pool_id 2025-06-25T14:38:51.505982Z node 1 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:561: [WorkloadService] [Service] Creating new database state for id /Root 2025-06-25T14:38:51.506203Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:443: SessionId: ydb://session/3?node_id=1&id=MTU1ZTFhMzAtZDZjNTYwNDItMWE4YWFhNDgtY2NjNjg1NDI=, ActorId: [1:7519896453398760715:2291], ActorState: ReadyState, TraceId: 01jykrdzkj5q0ynj3reyfm276e, received request, proxyRequestId: 3 prepared: 0 tx_control: 0 action: QUERY_ACTION_EXECUTE type: QUERY_TYPE_SQL_GENERIC_QUERY text: SELECT 42; rpcActor: [1:7519896453398760714:2335] database: Root databaseId: /Root pool id: sample_pool_id 2025-06-25T14:38:51.506257Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:169: [WorkloadService] [Service] Recieved new request from [1:7519896453398760715:2291], DatabaseId: /Root, PoolId: sample_pool_id, SessionId: ydb://session/3?node_id=1&id=MTU1ZTFhMzAtZDZjNTYwNDItMWE4YWFhNDgtY2NjNjg1NDI= 2025-06-25T14:38:51.506310Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:185: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896453398760717:2292], DatabaseId: /Root, PoolId: sample_pool_id, Start pool fetching 2025-06-25T14:38:51.506394Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:574: [WorkloadService] [TDatabaseFetcherActor] ActorId: [1:7519896453398760718:2293], Database: /Root, Start database fetching 2025-06-25T14:38:51.507229Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:600: [WorkloadService] [TDatabaseFetcherActor] ActorId: [1:7519896453398760718:2293], Database: /Root, Database info successfully fetched, serverless: 0 2025-06-25T14:38:51.507316Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:240: [WorkloadService] [Service] Successfully fetched database info, DatabaseId: /Root, Serverless: 0 2025-06-25T14:38:51.507380Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:44: [WorkloadService] [TPoolResolverActor] ActorId: [1:7519896453398760727:2294], DatabaseId: /Root, PoolId: sample_pool_id, SessionId: ydb://session/3?node_id=1&id=MTU1ZTFhMzAtZDZjNTYwNDItMWE4YWFhNDgtY2NjNjg1NDI=, Start pool fetching 2025-06-25T14:38:51.507419Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:185: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896453398760728:2295], DatabaseId: /Root, PoolId: sample_pool_id, Start pool fetching 2025-06-25T14:38:51.507832Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:223: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896453398760728:2295], DatabaseId: /Root, PoolId: sample_pool_id, Pool info successfully fetched 2025-06-25T14:38:51.507839Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:223: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896453398760717:2292], DatabaseId: /Root, PoolId: sample_pool_id, Pool info successfully fetched 2025-06-25T14:38:51.507870Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:107: [WorkloadService] [TPoolResolverActor] ActorId: [1:7519896453398760727:2294], DatabaseId: /Root, Po ... N DEBUG: kqp_session_actor.cpp:223: SessionId: ydb://session/3?node_id=6&id=MTAxNTVlZGQtMzNjMjYzYjktM2JmYWE2NGUtNTk1MWMwNWQ=, ActorId: [0:0:0], ActorState: unknown state, Create session actor with id MTAxNTVlZGQtMzNjMjYzYjktM2JmYWE2NGUtNTk1MWMwNWQ= 2025-06-25T14:39:43.213684Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:227: SessionId: ydb://session/3?node_id=6&id=MTAxNTVlZGQtMzNjMjYzYjktM2JmYWE2NGUtNTk1MWMwNWQ=, ActorId: [6:7519896676021385000:2291], ActorState: unknown state, session actor bootstrapped 2025-06-25T14:39:43.215218Z node 6 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:387: [WorkloadService] [TPoolCreatorActor] ActorId: [6:7519896676021385002:2298], DatabaseId: Root, PoolId: sample_pool_id, Start pool creating 2025-06-25T14:39:43.218812Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:39:43.220069Z node 6 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:429: [WorkloadService] [TPoolCreatorActor] ActorId: [6:7519896676021385002:2298], DatabaseId: Root, PoolId: sample_pool_id, Subscribe on create pool tx: 281474976715658 2025-06-25T14:39:43.220221Z node 6 :KQP_WORKLOAD_SERVICE TRACE: scheme_actors.cpp:352: [WorkloadService] [TPoolCreatorActor] ActorId: [6:7519896676021385002:2298], DatabaseId: Root, PoolId: sample_pool_id, Tablet to pipe successfully connected 2025-06-25T14:39:43.227798Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [6:7519896676021385002:2298], DatabaseId: Root, PoolId: sample_pool_id, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:39:43.282040Z node 6 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:387: [WorkloadService] [TPoolCreatorActor] ActorId: [6:7519896676021385002:2298], DatabaseId: Root, PoolId: sample_pool_id, Start pool creating 2025-06-25T14:39:43.285171Z node 6 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [6:7519896676021385053:2330] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/sample_pool_id\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:39:43.285273Z node 6 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:480: [WorkloadService] [TPoolCreatorActor] ActorId: [6:7519896676021385002:2298], DatabaseId: Root, PoolId: sample_pool_id, Pool successfully created 2025-06-25T14:39:43.288697Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:223: SessionId: ydb://session/3?node_id=6&id=ZjY4ZDE2MzItZTY0ZmU0NWQtMTI0MzRjMDYtOWQ0NTJiMDM=, ActorId: [0:0:0], ActorState: unknown state, Create session actor with id ZjY4ZDE2MzItZTY0ZmU0NWQtMTI0MzRjMDYtOWQ0NTJiMDM= 2025-06-25T14:39:43.288816Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:227: SessionId: ydb://session/3?node_id=6&id=ZjY4ZDE2MzItZTY0ZmU0NWQtMTI0MzRjMDYtOWQ0NTJiMDM=, ActorId: [6:7519896676021385060:2292], ActorState: unknown state, session actor bootstrapped 2025-06-25T14:39:43.289102Z node 6 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:157: [WorkloadService] [Service] Recieved subscription request, DatabaseId: /Root, PoolId: sample_pool_id 2025-06-25T14:39:43.289132Z node 6 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:561: [WorkloadService] [Service] Creating new database state for id /Root 2025-06-25T14:39:43.289197Z node 6 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:185: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7519896676021385062:2293], DatabaseId: /Root, PoolId: sample_pool_id, Start pool fetching 2025-06-25T14:39:43.289195Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:443: SessionId: ydb://session/3?node_id=6&id=ZjY4ZDE2MzItZTY0ZmU0NWQtMTI0MzRjMDYtOWQ0NTJiMDM=, ActorId: [6:7519896676021385060:2292], ActorState: ReadyState, TraceId: 01jykrfj5s0rqjryb42qy8djjk, received request, proxyRequestId: 3 prepared: 0 tx_control: 0 action: QUERY_ACTION_EXECUTE type: QUERY_TYPE_SQL_GENERIC_QUERY text: SELECT 42; rpcActor: [6:7519896676021385059:2335] database: Root databaseId: /Root pool id: sample_pool_id 2025-06-25T14:39:43.289248Z node 6 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:169: [WorkloadService] [Service] Recieved new request from [6:7519896676021385060:2292], DatabaseId: /Root, PoolId: sample_pool_id, SessionId: ydb://session/3?node_id=6&id=ZjY4ZDE2MzItZTY0ZmU0NWQtMTI0MzRjMDYtOWQ0NTJiMDM= 2025-06-25T14:39:43.289311Z node 6 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:574: [WorkloadService] [TDatabaseFetcherActor] ActorId: [6:7519896676021385063:2294], Database: /Root, Start database fetching 2025-06-25T14:39:43.289525Z node 6 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:600: [WorkloadService] [TDatabaseFetcherActor] ActorId: [6:7519896676021385063:2294], Database: /Root, Database info successfully fetched, serverless: 0 2025-06-25T14:39:43.289584Z node 6 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:240: [WorkloadService] [Service] Successfully fetched database info, DatabaseId: /Root, Serverless: 0 2025-06-25T14:39:43.289632Z node 6 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:44: [WorkloadService] [TPoolResolverActor] ActorId: [6:7519896676021385069:2295], DatabaseId: /Root, PoolId: sample_pool_id, SessionId: ydb://session/3?node_id=6&id=ZjY4ZDE2MzItZTY0ZmU0NWQtMTI0MzRjMDYtOWQ0NTJiMDM=, Start pool fetching 2025-06-25T14:39:43.289660Z node 6 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:185: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7519896676021385070:2296], DatabaseId: /Root, PoolId: sample_pool_id, Start pool fetching 2025-06-25T14:39:43.290671Z node 6 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:223: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7519896676021385062:2293], DatabaseId: /Root, PoolId: sample_pool_id, Pool info successfully fetched 2025-06-25T14:39:43.290672Z node 6 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:223: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7519896676021385070:2296], DatabaseId: /Root, PoolId: sample_pool_id, Pool info successfully fetched 2025-06-25T14:39:43.290732Z node 6 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:107: [WorkloadService] [TPoolResolverActor] ActorId: [6:7519896676021385069:2295], DatabaseId: /Root, PoolId: sample_pool_id, SessionId: ydb://session/3?node_id=6&id=ZjY4ZDE2MzItZTY0ZmU0NWQtMTI0MzRjMDYtOWQ0NTJiMDM=, Pool info successfully resolved 2025-06-25T14:39:43.290734Z node 6 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:253: [WorkloadService] [Service] Successfully fetched pool sample_pool_id, DatabaseId: /Root 2025-06-25T14:39:43.290753Z node 6 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:571: [WorkloadService] [Service] Creating new handler for pool /Root/sample_pool_id 2025-06-25T14:39:43.290934Z node 6 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:279: [WorkloadService] [Service] Successfully fetched pool sample_pool_id, DatabaseId: /Root, SessionId: ydb://session/3?node_id=6&id=ZjY4ZDE2MzItZTY0ZmU0NWQtMTI0MzRjMDYtOWQ0NTJiMDM= 2025-06-25T14:39:43.290951Z node 6 :KQP_WORKLOAD_SERVICE DEBUG: pool_handlers_actors.cpp:466: [WorkloadService] [TPoolHandlerActorBase] ActorId: [6:7519896676021385076:2297], DatabaseId: /Root, PoolId: sample_pool_id, Subscribed on schemeboard notifications for path: [OwnerId: 72057594046644480, LocalPathId: 5] 2025-06-25T14:39:43.291041Z node 6 :KQP_WORKLOAD_SERVICE TRACE: kqp_workload_service.cpp:290: [WorkloadService] [Service] Request placed into pool, DatabaseId: /Root, PoolId: sample_pool_id, SessionId: ydb://session/3?node_id=6&id=ZjY4ZDE2MzItZTY0ZmU0NWQtMTI0MzRjMDYtOWQ0NTJiMDM= 2025-06-25T14:39:43.291100Z node 6 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=6&id=ZjY4ZDE2MzItZTY0ZmU0NWQtMTI0MzRjMDYtOWQ0NTJiMDM=, ActorId: [6:7519896676021385060:2292], ActorState: ExecuteState, TraceId: 01jykrfj5s0rqjryb42qy8djjk, Create QueryResponse for error on request, msg: Query failed during adding/waiting in workload pool sample_pool_id 2025-06-25T14:39:43.291199Z node 6 :KQP_SESSION INFO: kqp_session_actor.cpp:2528: SessionId: ydb://session/3?node_id=6&id=ZjY4ZDE2MzItZTY0ZmU0NWQtMTI0MzRjMDYtOWQ0NTJiMDM=, ActorId: [6:7519896676021385060:2292], ActorState: ExecuteState, TraceId: 01jykrfj5s0rqjryb42qy8djjk, Cleanup start, isFinal: 1 CleanupCtx: 1 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 1 2025-06-25T14:39:43.291239Z node 6 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:189: [WorkloadService] [Service] Finished request with worker actor [6:7519896676021385060:2292], DatabaseId: /Root, PoolId: sample_pool_id, SessionId: ydb://session/3?node_id=6&id=ZjY4ZDE2MzItZTY0ZmU0NWQtMTI0MzRjMDYtOWQ0NTJiMDM= 2025-06-25T14:39:43.291276Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2589: SessionId: ydb://session/3?node_id=6&id=ZjY4ZDE2MzItZTY0ZmU0NWQtMTI0MzRjMDYtOWQ0NTJiMDM=, ActorId: [6:7519896676021385060:2292], ActorState: CleanupState, TraceId: 01jykrfj5s0rqjryb42qy8djjk, EndCleanup, isFinal: 1 2025-06-25T14:39:43.291356Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2325: SessionId: ydb://session/3?node_id=6&id=ZjY4ZDE2MzItZTY0ZmU0NWQtMTI0MzRjMDYtOWQ0NTJiMDM=, ActorId: [6:7519896676021385060:2292], ActorState: CleanupState, TraceId: 01jykrfj5s0rqjryb42qy8djjk, Sent query response back to proxy, proxyRequestId: 3, proxyId: [6:7519896658841515333:2179] 2025-06-25T14:39:43.291402Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2601: SessionId: ydb://session/3?node_id=6&id=ZjY4ZDE2MzItZTY0ZmU0NWQtMTI0MzRjMDYtOWQ0NTJiMDM=, ActorId: [6:7519896676021385060:2292], ActorState: unknown state, TraceId: 01jykrfj5s0rqjryb42qy8djjk, Cleanup temp tables: 0 2025-06-25T14:39:43.291497Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2692: SessionId: ydb://session/3?node_id=6&id=ZjY4ZDE2MzItZTY0ZmU0NWQtMTI0MzRjMDYtOWQ0NTJiMDM=, ActorId: [6:7519896676021385060:2292], ActorState: unknown state, TraceId: 01jykrfj5s0rqjryb42qy8djjk, Session actor destroyed 2025-06-25T14:39:43.292063Z node 6 :KQP_WORKLOAD_SERVICE DEBUG: pool_handlers_actors.cpp:274: [WorkloadService] [TPoolHandlerActorBase] ActorId: [6:7519896676021385076:2297], DatabaseId: /Root, PoolId: sample_pool_id, Got watch notification 2025-06-25T14:39:43.299262Z node 6 :KQP_SESSION INFO: kqp_session_actor.cpp:2370: SessionId: ydb://session/3?node_id=6&id=MTAxNTVlZGQtMzNjMjYzYjktM2JmYWE2NGUtNTk1MWMwNWQ=, ActorId: [6:7519896676021385000:2291], ActorState: ReadyState, Session closed due to explicit close event 2025-06-25T14:39:43.299300Z node 6 :KQP_SESSION INFO: kqp_session_actor.cpp:2528: SessionId: ydb://session/3?node_id=6&id=MTAxNTVlZGQtMzNjMjYzYjktM2JmYWE2NGUtNTk1MWMwNWQ=, ActorId: [6:7519896676021385000:2291], ActorState: ReadyState, Cleanup start, isFinal: 1 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-06-25T14:39:43.299318Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2589: SessionId: ydb://session/3?node_id=6&id=MTAxNTVlZGQtMzNjMjYzYjktM2JmYWE2NGUtNTk1MWMwNWQ=, ActorId: [6:7519896676021385000:2291], ActorState: ReadyState, EndCleanup, isFinal: 1 2025-06-25T14:39:43.299343Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2601: SessionId: ydb://session/3?node_id=6&id=MTAxNTVlZGQtMzNjMjYzYjktM2JmYWE2NGUtNTk1MWMwNWQ=, ActorId: [6:7519896676021385000:2291], ActorState: unknown state, Cleanup temp tables: 0 2025-06-25T14:39:43.299418Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2692: SessionId: ydb://session/3?node_id=6&id=MTAxNTVlZGQtMzNjMjYzYjktM2JmYWE2NGUtNTk1MWMwNWQ=, ActorId: [6:7519896676021385000:2291], ActorState: unknown state, Session actor destroyed >> TKeyValueTest::TestWriteReadDeleteWithRestartsAndCatchCollectGarbageEvents [GOOD] >> TKeyValueTest::TestWriteLongKey |83.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut/unittest >> TGroupMapperTest::NonUniformCluster2 [GOOD] >> TKeyValueCollectorTest::TestKeyValueCollectorSingle >> TKeyValueTest::TestInlineWriteReadWithRestartsThenResponseOkNewApi >> TKeyValueCollectorTest::TestKeyValueCollectorSingle [GOOD] >> TKeyValueCollectorTest::TestKeyValueCollectorSingleWithOneError >> TKeyValueCollectorTest::TestKeyValueCollectorSingleWithOneError [GOOD] >> TKeyValueCollectorTest::TestKeyValueCollectorMultiple >> TKeyValueCollectorTest::TestKeyValueCollectorMultiple [GOOD] >> TKeyValueCollectorTest::TestKeyValueCollectorEmpty >> TKeyValueCollectorTest::TestKeyValueCollectorEmpty [GOOD] >> TKeyValueCollectorTest::TestKeyValueCollectorMany >> TKeyValueTest::TestCopyRangeWorks |83.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/keyvalue/ut/unittest >> TKeyValueCollectorTest::TestKeyValueCollectorMultiple [GOOD] >> TKeyValueCollectorTest::TestKeyValueCollectorMany [GOOD] >> KeyValueReadStorage::ReadWithTwoPartsOk [GOOD] >> TGroupMapperTest::MapperSequentialCalls [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/keyvalue/ut/unittest >> KeyValueReadStorage::ReadWithTwoPartsOk [GOOD] Test command err: 2025-06-25T14:39:47.289481Z 1 00h00m00.000000s :KEYVALUE INFO: {KV20@keyvalue_storage_read_request.cpp:209} Received GetResult KeyValue# 1 GroupId# 3 Status# OK ResponseSz# 2 ErrorReason# ReadRequestCookie# 0 2025-06-25T14:39:47.291652Z 1 00h00m00.000000s :KEYVALUE INFO: {KV34@keyvalue_storage_read_request.cpp:492} Send respose KeyValue# 1 Status# RSTATUS_OK ReadRequestCookie# 0 >> ResourcePoolClassifiersDdl::TestDropResourcePoolClassifier [GOOD] >> ResourcePoolClassifiersDdl::TestDropResourcePool >> KqpWorkloadService::TestHandlerActorCleanup [GOOD] |83.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut/unittest >> TGroupMapperTest::MapperSequentialCalls [GOOD] >> TKeyValueTest::TestObtainLockNewApi >> TKeyValueTest::TestInlineEmptyWriteReadDeleteWithRestartsThenResponseOk >> KqpPg::TableDeleteAllData+useSink [GOOD] >> KqpPg::TableDeleteAllData-useSink >> KqpWorkloadServiceDistributed::TestDistributedLargeConcurrentQueryLimit [GOOD] >> TKeyValueTest::TestCleanUpDataOnEmptyTablet >> TGroupMapperTest::NonUniformCluster [GOOD] |83.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut/unittest >> TGroupMapperTest::NonUniformCluster [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/workload_service/ut/unittest >> KqpWorkloadService::TestHandlerActorCleanup [GOOD] Test command err: 2025-06-25T14:36:33.858719Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519895861803297285:2227];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:36:33.859075Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001a4f/r3tmp/tmpaWX2G0/pdisk_1.dat 2025-06-25T14:36:34.340475Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519895861803297087:2080] 1750862193819474 != 1750862193819477 2025-06-25T14:36:34.369891Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:36:34.377531Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:36:34.377623Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:36:34.380922Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 19356, node 1 2025-06-25T14:36:34.624967Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:36:34.624994Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:36:34.625018Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:36:34.625127Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:36:34.861292Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:6449 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:36:35.145303Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:36:37.576450Z node 1 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:440: [WorkloadService] [Service] Started workload service initialization 2025-06-25T14:36:37.596496Z node 1 :KQP_WORKLOAD_SERVICE TRACE: kqp_workload_service.cpp:125: [WorkloadService] [Service] Updated node info, noode count: 1 2025-06-25T14:36:37.596536Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:100: [WorkloadService] [Service] Subscribed for config changes 2025-06-25T14:36:37.596553Z node 1 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:111: [WorkloadService] [Service] Resource pools was enanbled 2025-06-25T14:36:37.653344Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:241: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7519895878983166896:2290], Start check tables existence, number paths: 2 2025-06-25T14:36:37.658671Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:223: SessionId: ydb://session/3?node_id=1&id=MTkwYWM2YmYtOTRhYjc1OWMtNjgxYzA1ZWQtNzk4OGU4OTc=, ActorId: [0:0:0], ActorState: unknown state, Create session actor with id MTkwYWM2YmYtOTRhYjc1OWMtNjgxYzA1ZWQtNzk4OGU4OTc= 2025-06-25T14:36:37.658869Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:182: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7519895878983166896:2290], Describe table /Root/.metadata/workload_manager/delayed_requests status PathErrorUnknown 2025-06-25T14:36:37.658919Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:182: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7519895878983166896:2290], Describe table /Root/.metadata/workload_manager/running_requests status PathErrorUnknown 2025-06-25T14:36:37.658956Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:289: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7519895878983166896:2290], Successfully finished 2025-06-25T14:36:37.659036Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:367: [WorkloadService] [Service] Cleanup completed, tables exists: 0 2025-06-25T14:36:37.664421Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:227: SessionId: ydb://session/3?node_id=1&id=MTkwYWM2YmYtOTRhYjc1OWMtNjgxYzA1ZWQtNzk4OGU4OTc=, ActorId: [1:7519895878983166912:2291], ActorState: unknown state, session actor bootstrapped 2025-06-25T14:36:37.680891Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:387: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519895878983166914:2303], DatabaseId: Root, PoolId: sample_pool_id, Start pool creating 2025-06-25T14:36:37.685888Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:36:37.688669Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:429: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519895878983166914:2303], DatabaseId: Root, PoolId: sample_pool_id, Subscribe on create pool tx: 281474976710658 2025-06-25T14:36:37.689306Z node 1 :KQP_WORKLOAD_SERVICE TRACE: scheme_actors.cpp:352: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519895878983166914:2303], DatabaseId: Root, PoolId: sample_pool_id, Tablet to pipe successfully connected 2025-06-25T14:36:37.704344Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519895878983166914:2303], DatabaseId: Root, PoolId: sample_pool_id, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:36:37.772223Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:387: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519895878983166914:2303], DatabaseId: Root, PoolId: sample_pool_id, Start pool creating 2025-06-25T14:36:37.777512Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519895878983166965:2335] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/sample_pool_id\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:36:37.777681Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:480: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519895878983166914:2303], DatabaseId: Root, PoolId: sample_pool_id, Pool successfully created 2025-06-25T14:36:37.787082Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:223: SessionId: ydb://session/3?node_id=1&id=YmE4NTc2MmMtYzUwOTk5YzQtN2VhZjJkNzEtZjUwZmM3ZTI=, ActorId: [0:0:0], ActorState: unknown state, Create session actor with id YmE4NTc2MmMtYzUwOTk5YzQtN2VhZjJkNzEtZjUwZmM3ZTI= 2025-06-25T14:36:37.787491Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:227: SessionId: ydb://session/3?node_id=1&id=YmE4NTc2MmMtYzUwOTk5YzQtN2VhZjJkNzEtZjUwZmM3ZTI=, ActorId: [1:7519895878983166973:2292], ActorState: unknown state, session actor bootstrapped 2025-06-25T14:36:37.787706Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:443: SessionId: ydb://session/3?node_id=1&id=YmE4NTc2MmMtYzUwOTk5YzQtN2VhZjJkNzEtZjUwZmM3ZTI=, ActorId: [1:7519895878983166973:2292], ActorState: ReadyState, TraceId: 01jykr9x0vcmb92wd396ary0p2, received request, proxyRequestId: 3 prepared: 0 tx_control: 0 action: QUERY_ACTION_EXECUTE type: QUERY_TYPE_SQL_GENERIC_QUERY text: SELECT 42; rpcActor: [1:7519895878983166972:2341] database: Root databaseId: /Root pool id: sample_pool_id 2025-06-25T14:36:37.787753Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:157: [WorkloadService] [Service] Recieved subscription request, DatabaseId: /Root, PoolId: sample_pool_id 2025-06-25T14:36:37.787765Z node 1 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:561: [WorkloadService] [Service] Creating new database state for id /Root 2025-06-25T14:36:37.787845Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:169: [WorkloadService] [Service] Recieved new request from [1:7519895878983166973:2292], DatabaseId: /Root, PoolId: sample_pool_id, SessionId: ydb://session/3?node_id=1&id=YmE4NTc2MmMtYzUwOTk5YzQtN2VhZjJkNzEtZjUwZmM3ZTI= 2025-06-25T14:36:37.787893Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:185: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519895878983166975:2293], DatabaseId: /Root, PoolId: sample_pool_id, Start pool fetching 2025-06-25T14:36:37.787981Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:574: [WorkloadService] [TDatabaseFetcherActor] ActorId: [1:7519895878983166976:2294], Database: /Root, Start database fetching 2025-06-25T14:36:37.790751Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:600: [WorkloadService] [TDatabaseFetcherActor] ActorId: [1:7519895878983166976:2294], Database: /Root, Database info successfully fetched, serverless: 0 2025-06-25T14:36:37.790917Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:223: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519895878983166975:2293], DatabaseId: /Root, PoolId: sample_pool_id, Pool info successfully fetched 2025-06-25T14:36:37.790965Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:240: [WorkloadService] [Service] Successfully fetched database info, DatabaseId: /Root, Serverless: 0 2025-06-25T14:36:37.791004Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:253: [WorkloadService] [Service] Successfully fetched pool sample_pool_id, DatabaseId: /Root 2025-06-25T14:36:37.791017Z node 1 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:571: [WorkloadService] [Service] Creating new handler for pool /Root/sample_pool_id 2025-06-25T14:36:37.791284Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: pool_handlers_actors.cpp:466: [WorkloadService] [TPoolHandlerActorBase] ActorId: [1:7519895878983166987:2296], DatabaseId: /Root, PoolId: sample_pool_id, Subscribed on schemeboard notifications for path: [OwnerId: 72057594046644480, LocalPathId: 5] 2025-06-25T14:36:37.791340Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:44: [WorkloadService] [TPoolResolverActor] ActorId: [1:7519895878983166986:2295], DatabaseId: /Root, PoolId: sample_pool_id, SessionId: ydb://session/3?node_id=1&id=YmE4NTc2MmMtYzUwOTk5YzQtN2VhZjJkNzEtZjUwZmM3ZTI=, Start ... 2228] 2025-06-25T14:38:21.062175Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2601: SessionId: ydb://session/3?node_id=6&id=ODQ4Zjg5Y2EtZmM1YTI0N2YtNGM3NjhlOGItNWY5YjdmNTg=, ActorId: [6:7519896320458447911:2406], ActorState: unknown state, TraceId: 01jykrd1svc6dpjqzm863354xv, Cleanup temp tables: 0 2025-06-25T14:38:21.062444Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2692: SessionId: ydb://session/3?node_id=6&id=ODQ4Zjg5Y2EtZmM1YTI0N2YtNGM3NjhlOGItNWY5YjdmNTg=, ActorId: [6:7519896320458447911:2406], ActorState: unknown state, TraceId: 01jykrd1svc6dpjqzm863354xv, Session actor destroyed 2025-06-25T14:38:21.075949Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:443: SessionId: ydb://session/3?node_id=6&id=ZDFiOTlhZi1iMWM3OWQzNy0yNjk4YTA0MC03OTY4NDliYQ==, ActorId: [6:7519896303278578186:2290], ActorState: ReadyState, TraceId: 01jykrd1we4qqk7q4g17g65vn1, received request, proxyRequestId: 18 prepared: 0 tx_control: 0 action: QUERY_ACTION_EXECUTE type: QUERY_TYPE_SQL_DDL text: DROP RESOURCE POOL sample_pool_id; DROP RESOURCE POOL default; rpcActor: [0:0:0] database: /Root databaseId: /Root pool id: default 2025-06-25T14:38:21.138533Z node 6 :KQP_WORKLOAD_SERVICE DEBUG: pool_handlers_actors.cpp:294: [WorkloadService] [TPoolHandlerActorBase] ActorId: [6:7519896307573545584:2298], DatabaseId: /Root, PoolId: sample_pool_id, Got delete notification 2025-06-25T14:38:21.138640Z node 6 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:157: [WorkloadService] [Service] Recieved subscription request, DatabaseId: /Root, PoolId: sample_pool_id 2025-06-25T14:38:21.138702Z node 6 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:185: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7519896324753415271:2420], DatabaseId: /Root, PoolId: sample_pool_id, Start pool fetching 2025-06-25T14:38:21.141366Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7519896324753415271:2420], DatabaseId: /Root, PoolId: sample_pool_id, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool sample_pool_id not found or you don't have access permissions } 2025-06-25T14:38:21.141478Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool sample_pool_id, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool sample_pool_id not found or you don't have access permissions } 2025-06-25T14:38:21.156596Z node 6 :KQP_WORKLOAD_SERVICE DEBUG: pool_handlers_actors.cpp:294: [WorkloadService] [TPoolHandlerActorBase] ActorId: [6:7519896307573545781:2322], DatabaseId: /Root, PoolId: default, Got delete notification 2025-06-25T14:38:21.156712Z node 6 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:157: [WorkloadService] [Service] Recieved subscription request, DatabaseId: /Root, PoolId: default 2025-06-25T14:38:21.156774Z node 6 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:185: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7519896324753415294:2421], DatabaseId: /Root, PoolId: default, Start pool fetching 2025-06-25T14:38:21.157649Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7519896324753415294:2421], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:21.157745Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:21.163483Z node 6 :KQP_SESSION INFO: kqp_session_actor.cpp:2528: SessionId: ydb://session/3?node_id=6&id=ZDFiOTlhZi1iMWM3OWQzNy0yNjk4YTA0MC03OTY4NDliYQ==, ActorId: [6:7519896303278578186:2290], ActorState: ExecuteState, TraceId: 01jykrd1we4qqk7q4g17g65vn1, Cleanup start, isFinal: 0 CleanupCtx: 1 TransactionsToBeAborted.size(): 0 WorkerId: [6:7519896324753415262:2290] WorkloadServiceCleanup: 0 2025-06-25T14:38:21.166650Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2589: SessionId: ydb://session/3?node_id=6&id=ZDFiOTlhZi1iMWM3OWQzNy0yNjk4YTA0MC03OTY4NDliYQ==, ActorId: [6:7519896303278578186:2290], ActorState: CleanupState, TraceId: 01jykrd1we4qqk7q4g17g65vn1, EndCleanup, isFinal: 0 2025-06-25T14:38:21.166733Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2325: SessionId: ydb://session/3?node_id=6&id=ZDFiOTlhZi1iMWM3OWQzNy0yNjk4YTA0MC03OTY4NDliYQ==, ActorId: [6:7519896303278578186:2290], ActorState: CleanupState, TraceId: 01jykrd1we4qqk7q4g17g65vn1, Sent query response back to proxy, proxyRequestId: 18, proxyId: [6:7519896286098708590:2228] Wait pool handlers 0.000016s: number handlers = 2 Wait pool handlers 1.000148s: number handlers = 2 Wait pool handlers 2.000536s: number handlers = 2 Wait pool handlers 3.001328s: number handlers = 2 Wait pool handlers 4.001707s: number handlers = 2 Wait pool handlers 5.001823s: number handlers = 2 Wait pool handlers 6.004403s: number handlers = 2 2025-06-25T14:38:27.532714Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7382: Cannot get console configs 2025-06-25T14:38:27.532752Z node 6 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded Wait pool handlers 7.004534s: number handlers = 2 Wait pool handlers 8.004690s: number handlers = 2 Wait pool handlers 9.004832s: number handlers = 2 Wait pool handlers 10.004959s: number handlers = 2 Wait pool handlers 11.005191s: number handlers = 2 Wait pool handlers 12.005341s: number handlers = 2 Wait pool handlers 13.006156s: number handlers = 2 2025-06-25T14:38:34.254267Z node 6 :KQP_WORKLOAD_SERVICE TRACE: pool_handlers_actors.cpp:689: [WorkloadService] [TPoolHandlerActorBase] ActorId: [6:7519896307573545584:2298], DatabaseId: /Root, PoolId: sample_pool_id, Try to start scheduled refresh Wait pool handlers 14.006289s: number handlers = 2 Wait pool handlers 15.009335s: number handlers = 2 Wait pool handlers 16.009473s: number handlers = 2 Wait pool handlers 17.009596s: number handlers = 2 Wait pool handlers 18.009727s: number handlers = 2 Wait pool handlers 19.012611s: number handlers = 2 Wait pool handlers 20.014910s: number handlers = 2 Wait pool handlers 21.015051s: number handlers = 2 Wait pool handlers 22.015363s: number handlers = 2 Wait pool handlers 23.015483s: number handlers = 2 Wait pool handlers 24.015607s: number handlers = 2 Wait pool handlers 25.015746s: number handlers = 2 Wait pool handlers 26.015882s: number handlers = 2 Wait pool handlers 27.016343s: number handlers = 2 Wait pool handlers 28.016610s: number handlers = 2 Wait pool handlers 29.016740s: number handlers = 2 Wait pool handlers 30.016876s: number handlers = 2 Wait pool handlers 31.016969s: number handlers = 2 Wait pool handlers 32.017055s: number handlers = 2 Wait pool handlers 33.017178s: number handlers = 2 Wait pool handlers 34.017313s: number handlers = 2 Wait pool handlers 35.018185s: number handlers = 2 Wait pool handlers 36.018313s: number handlers = 2 Wait pool handlers 37.018438s: number handlers = 2 Wait pool handlers 38.018539s: number handlers = 2 Wait pool handlers 39.021356s: number handlers = 2 Wait pool handlers 40.021481s: number handlers = 2 Wait pool handlers 41.025349s: number handlers = 2 Wait pool handlers 42.025449s: number handlers = 2 Wait pool handlers 43.029343s: number handlers = 2 Wait pool handlers 44.033339s: number handlers = 2 Wait pool handlers 45.033456s: number handlers = 2 Wait pool handlers 46.037333s: number handlers = 2 Wait pool handlers 47.041339s: number handlers = 2 Wait pool handlers 48.045339s: number handlers = 2 Wait pool handlers 49.049338s: number handlers = 2 Wait pool handlers 50.053342s: number handlers = 2 Wait pool handlers 51.054607s: number handlers = 2 Wait pool handlers 52.054945s: number handlers = 2 Wait pool handlers 53.057354s: number handlers = 2 Wait pool handlers 54.057551s: number handlers = 2 Wait pool handlers 55.058463s: number handlers = 2 Wait pool handlers 56.059738s: number handlers = 2 Wait pool handlers 57.059855s: number handlers = 2 Wait pool handlers 58.061362s: number handlers = 2 Wait pool handlers 59.061492s: number handlers = 2 Wait pool handlers 60.065339s: number handlers = 2 Wait pool handlers 61.066172s: number handlers = 2 Wait pool handlers 62.066287s: number handlers = 2 Wait pool handlers 63.066454s: number handlers = 2 Wait pool handlers 64.066585s: number handlers = 2 Wait pool handlers 65.066690s: number handlers = 2 Wait pool handlers 66.066815s: number handlers = 2 Wait pool handlers 67.067039s: number handlers = 2 Wait pool handlers 68.067168s: number handlers = 2 Wait pool handlers 69.068923s: number handlers = 2 Wait pool handlers 70.069319s: number handlers = 2 Wait pool handlers 71.073351s: number handlers = 2 Wait pool handlers 72.073468s: number handlers = 2 Wait pool handlers 73.073873s: number handlers = 2 Wait pool handlers 74.074055s: number handlers = 2 Wait pool handlers 75.076165s: number handlers = 2 Wait pool handlers 76.076312s: number handlers = 2 Wait pool handlers 77.076421s: number handlers = 2 Wait pool handlers 78.076543s: number handlers = 2 Wait pool handlers 79.076651s: number handlers = 2 Wait pool handlers 80.076791s: number handlers = 2 Wait pool handlers 81.076924s: number handlers = 2 Wait pool handlers 82.077039s: number handlers = 2 Wait pool handlers 83.077151s: number handlers = 2 Wait pool handlers 84.077257s: number handlers = 2 Wait pool handlers 85.077338s: number handlers = 2 2025-06-25T14:39:47.071440Z node 6 :KQP_WORKLOAD_SERVICE INFO: pool_handlers_actors.cpp:178: [WorkloadService] [TPoolHandlerActorBase] ActorId: [6:7519896307573545584:2298], DatabaseId: /Root, PoolId: sample_pool_id, Got stop pool handler request, waiting for 0 requests 2025-06-25T14:39:47.071466Z node 6 :KQP_WORKLOAD_SERVICE INFO: pool_handlers_actors.cpp:178: [WorkloadService] [TPoolHandlerActorBase] ActorId: [6:7519896307573545781:2322], DatabaseId: /Root, PoolId: default, Got stop pool handler request, waiting for 0 requests 2025-06-25T14:39:47.071628Z node 6 :KQP_WORKLOAD_SERVICE TRACE: kqp_workload_service.cpp:425: [WorkloadService] [Service] Got stop pool handler response, DatabaseId: /Root, PoolId: sample_pool_id 2025-06-25T14:39:47.071667Z node 6 :KQP_WORKLOAD_SERVICE TRACE: kqp_workload_service.cpp:425: [WorkloadService] [Service] Got stop pool handler response, DatabaseId: /Root, PoolId: default 2025-06-25T14:39:47.260212Z node 6 :KQP_SESSION INFO: kqp_session_actor.cpp:2370: SessionId: ydb://session/3?node_id=6&id=ZDFiOTlhZi1iMWM3OWQzNy0yNjk4YTA0MC03OTY4NDliYQ==, ActorId: [6:7519896303278578186:2290], ActorState: ReadyState, Session closed due to explicit close event 2025-06-25T14:39:47.260295Z node 6 :KQP_SESSION INFO: kqp_session_actor.cpp:2528: SessionId: ydb://session/3?node_id=6&id=ZDFiOTlhZi1iMWM3OWQzNy0yNjk4YTA0MC03OTY4NDliYQ==, ActorId: [6:7519896303278578186:2290], ActorState: ReadyState, Cleanup start, isFinal: 1 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-06-25T14:39:47.260357Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2589: SessionId: ydb://session/3?node_id=6&id=ZDFiOTlhZi1iMWM3OWQzNy0yNjk4YTA0MC03OTY4NDliYQ==, ActorId: [6:7519896303278578186:2290], ActorState: ReadyState, EndCleanup, isFinal: 1 2025-06-25T14:39:47.260391Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2601: SessionId: ydb://session/3?node_id=6&id=ZDFiOTlhZi1iMWM3OWQzNy0yNjk4YTA0MC03OTY4NDliYQ==, ActorId: [6:7519896303278578186:2290], ActorState: unknown state, Cleanup temp tables: 0 2025-06-25T14:39:47.260531Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2692: SessionId: ydb://session/3?node_id=6&id=ZDFiOTlhZi1iMWM3OWQzNy0yNjk4YTA0MC03OTY4NDliYQ==, ActorId: [6:7519896303278578186:2290], ActorState: unknown state, Session actor destroyed ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/workload_service/ut/unittest >> KqpWorkloadServiceDistributed::TestDistributedLargeConcurrentQueryLimit [GOOD] Test command err: 2025-06-25T14:38:49.032191Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519896445697422801:2138];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:49.032235Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00195e/r3tmp/tmphQ7WSx/pdisk_1.dat 2025-06-25T14:38:49.346596Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:49.346737Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:49.349932Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:38:49.351314Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519896445697422700:2080] 1750862329020204 != 1750862329020207 2025-06-25T14:38:49.363141Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 23424, node 1 2025-06-25T14:38:49.440824Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:38:49.440849Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:38:49.440860Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:38:49.441005Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27593 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:38:49.790947Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:38:49.812501Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:38:50.065153Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:38:51.803026Z node 1 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:440: [WorkloadService] [Service] Started workload service initialization 2025-06-25T14:38:51.816259Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:241: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7519896454287357912:2289], Start check tables existence, number paths: 2 2025-06-25T14:38:51.816393Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:100: [WorkloadService] [Service] Subscribed for config changes 2025-06-25T14:38:51.816417Z node 1 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:111: [WorkloadService] [Service] Resource pools was enanbled 2025-06-25T14:38:51.816462Z node 1 :KQP_WORKLOAD_SERVICE TRACE: kqp_workload_service.cpp:125: [WorkloadService] [Service] Updated node info, noode count: 1 2025-06-25T14:38:51.817761Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:182: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7519896454287357912:2289], Describe table /Root/.metadata/workload_manager/delayed_requests status PathErrorUnknown 2025-06-25T14:38:51.817835Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:182: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7519896454287357912:2289], Describe table /Root/.metadata/workload_manager/running_requests status PathErrorUnknown 2025-06-25T14:38:51.817876Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:289: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7519896454287357912:2289], Successfully finished 2025-06-25T14:38:51.817937Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:367: [WorkloadService] [Service] Cleanup completed, tables exists: 0 2025-06-25T14:38:51.818706Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:223: SessionId: ydb://session/3?node_id=1&id=OGY5M2JiOC1mYTkxMWIxMS04YjI5MTU4YS0xMDBlMTI3Nw==, ActorId: [0:0:0], ActorState: unknown state, Create session actor with id OGY5M2JiOC1mYTkxMWIxMS04YjI5MTU4YS0xMDBlMTI3Nw== 2025-06-25T14:38:51.818827Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:227: SessionId: ydb://session/3?node_id=1&id=OGY5M2JiOC1mYTkxMWIxMS04YjI5MTU4YS0xMDBlMTI3Nw==, ActorId: [1:7519896454287357928:2290], ActorState: unknown state, session actor bootstrapped 2025-06-25T14:38:51.832835Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:387: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519896454287357930:2298], DatabaseId: Root, PoolId: sample_pool_id, Start pool creating 2025-06-25T14:38:51.835980Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:38:51.837177Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:429: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519896454287357930:2298], DatabaseId: Root, PoolId: sample_pool_id, Subscribe on create pool tx: 281474976715658 2025-06-25T14:38:51.837301Z node 1 :KQP_WORKLOAD_SERVICE TRACE: scheme_actors.cpp:352: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519896454287357930:2298], DatabaseId: Root, PoolId: sample_pool_id, Tablet to pipe successfully connected 2025-06-25T14:38:51.845132Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519896454287357930:2298], DatabaseId: Root, PoolId: sample_pool_id, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:38:51.923746Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:387: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519896454287357930:2298], DatabaseId: Root, PoolId: sample_pool_id, Start pool creating 2025-06-25T14:38:51.927747Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519896454287357982:2331] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/sample_pool_id\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:38:51.927833Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:480: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519896454287357930:2298], DatabaseId: Root, PoolId: sample_pool_id, Pool successfully created 2025-06-25T14:38:51.928125Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:185: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896454287357989:2337], DatabaseId: Root, PoolId: sample_pool_id, Start pool fetching 2025-06-25T14:38:51.929179Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:223: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896454287357989:2337], DatabaseId: Root, PoolId: sample_pool_id, Pool info successfully fetched 2025-06-25T14:38:51.937258Z node 1 :KQP_SESSION INFO: kqp_session_actor.cpp:2370: SessionId: ydb://session/3?node_id=1&id=OGY5M2JiOC1mYTkxMWIxMS04YjI5MTU4YS0xMDBlMTI3Nw==, ActorId: [1:7519896454287357928:2290], ActorState: ReadyState, Session closed due to explicit close event 2025-06-25T14:38:51.937328Z node 1 :KQP_SESSION INFO: kqp_session_actor.cpp:2528: SessionId: ydb://session/3?node_id=1&id=OGY5M2JiOC1mYTkxMWIxMS04YjI5MTU4YS0xMDBlMTI3Nw==, ActorId: [1:7519896454287357928:2290], ActorState: ReadyState, Cleanup start, isFinal: 1 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-06-25T14:38:51.937362Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2589: SessionId: ydb://session/3?node_id=1&id=OGY5M2JiOC1mYTkxMWIxMS04YjI5MTU4YS0xMDBlMTI3Nw==, ActorId: [1:7519896454287357928:2290], ActorState: ReadyState, EndCleanup, isFinal: 1 2025-06-25T14:38:51.937405Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2601: SessionId: ydb://session/3?node_id=1&id=OGY5M2JiOC1mYTkxMWIxMS04YjI5MTU4YS0xMDBlMTI3Nw==, ActorId: [1:7519896454287357928:2290], ActorState: unknown state, Cleanup temp tables: 0 2025-06-25T14:38:51.937537Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2692: SessionId: ydb://session/3?node_id=1&id=OGY5M2JiOC1mYTkxMWIxMS04YjI5MTU4YS0xMDBlMTI3Nw==, ActorId: [1:7519896454287357928:2290], ActorState: unknown state, Session actor destroyed 2025-06-25T14:38:52.367601Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519896456739167612:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:52.367660Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00195e/r3tmp/tmpoT3OqA/pdisk_1.dat 2025-06-25T14:38:52.470097Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:38:52.471156Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519896456739167593:2080] 1750862332367275 != 1750862332367278 TServer::EnableGrpc on GrpcPort 21260, node 2 2025-06-25T14:38:52.506671Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:52.506746Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:52.508431Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:38:52.517997Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:38:52.518019Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:38:52.518025Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initia ... actionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-06-25T14:39:48.094835Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2589: SessionId: ydb://session/3?node_id=6&id=MmVkODU4ZDctMTQ1ZGE0YzQtYWU3OGRkZDctODRjMDY2ZTI=, ActorId: [6:7519896553097497085:2295], ActorState: ReadyState, EndCleanup, isFinal: 1 2025-06-25T14:39:48.094865Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2601: SessionId: ydb://session/3?node_id=6&id=MmVkODU4ZDctMTQ1ZGE0YzQtYWU3OGRkZDctODRjMDY2ZTI=, ActorId: [6:7519896553097497085:2295], ActorState: unknown state, Cleanup temp tables: 0 2025-06-25T14:39:48.094950Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2692: SessionId: ydb://session/3?node_id=6&id=MmVkODU4ZDctMTQ1ZGE0YzQtYWU3OGRkZDctODRjMDY2ZTI=, ActorId: [6:7519896553097497085:2295], ActorState: unknown state, Session actor destroyed 2025-06-25T14:39:48.107581Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1754: SessionId: ydb://session/3?node_id=8&id=OTY1OWNmMWEtYmE1OTc0NzctYTc0YWI3ZGEtMzY1YjFhYWY=, ActorId: [8:7519896700378750767:4912], ActorState: ExecuteState, TraceId: 01jykrfpvkd37ymp0defszsxe7, TEvTxResponse, CurrentTx: 1/1 response.status: SUCCESS 2025-06-25T14:39:48.107704Z node 8 :KQP_SESSION INFO: kqp_session_actor.cpp:2013: SessionId: ydb://session/3?node_id=8&id=OTY1OWNmMWEtYmE1OTc0NzctYTc0YWI3ZGEtMzY1YjFhYWY=, ActorId: [8:7519896700378750767:4912], ActorState: ExecuteState, TraceId: 01jykrfpvkd37ymp0defszsxe7, txInfo Status: Committed Kind: ReadWrite TotalDuration: 22.698 ServerDuration: 22.286 QueriesCount: 2 2025-06-25T14:39:48.107790Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2168: SessionId: ydb://session/3?node_id=8&id=OTY1OWNmMWEtYmE1OTc0NzctYTc0YWI3ZGEtMzY1YjFhYWY=, ActorId: [8:7519896700378750767:4912], ActorState: ExecuteState, TraceId: 01jykrfpvkd37ymp0defszsxe7, Create QueryResponse for action: QUERY_ACTION_EXECUTE with SUCCESS status 2025-06-25T14:39:48.107837Z node 8 :KQP_SESSION INFO: kqp_session_actor.cpp:2528: SessionId: ydb://session/3?node_id=8&id=OTY1OWNmMWEtYmE1OTc0NzctYTc0YWI3ZGEtMzY1YjFhYWY=, ActorId: [8:7519896700378750767:4912], ActorState: ExecuteState, TraceId: 01jykrfpvkd37ymp0defszsxe7, Cleanup start, isFinal: 0 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-06-25T14:39:48.107865Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2589: SessionId: ydb://session/3?node_id=8&id=OTY1OWNmMWEtYmE1OTc0NzctYTc0YWI3ZGEtMzY1YjFhYWY=, ActorId: [8:7519896700378750767:4912], ActorState: ExecuteState, TraceId: 01jykrfpvkd37ymp0defszsxe7, EndCleanup, isFinal: 0 2025-06-25T14:39:48.107907Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2325: SessionId: ydb://session/3?node_id=8&id=OTY1OWNmMWEtYmE1OTc0NzctYTc0YWI3ZGEtMzY1YjFhYWY=, ActorId: [8:7519896700378750767:4912], ActorState: ExecuteState, TraceId: 01jykrfpvkd37ymp0defszsxe7, Sent query response back to proxy, proxyRequestId: 560, proxyId: [8:7519896532875017173:2221] 2025-06-25T14:39:48.108769Z node 8 :KQP_WORKLOAD_SERVICE DEBUG: query_actor.cpp:240: [TQueryBase] [TRefreshPoolStateQuery] TraceId: sample_pool_id, RequestDatabase: /Root, RequestSessionId: , State: Update lease, TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=8&id=OTY1OWNmMWEtYmE1OTc0NzctYTc0YWI3ZGEtMzY1YjFhYWY=, TxId: 2025-06-25T14:39:48.108867Z node 8 :KQP_WORKLOAD_SERVICE DEBUG: query_actor.cpp:197: [TQueryBase] [TRefreshPoolStateQuery] TraceId: sample_pool_id, RequestDatabase: /Root, RequestSessionId: , State: Update lease, RunDataQuery: -- TRefreshPoolStateQuery::OnLeaseUpdated DECLARE $database_id AS Text; DECLARE $pool_id AS Text; SELECT COUNT(*) AS delayed_requests FROM `.metadata/workload_manager/delayed_requests` WHERE database = $database_id AND pool_id = $pool_id AND (wait_deadline IS NULL OR wait_deadline >= CurrentUtcTimestamp()) AND lease_deadline >= CurrentUtcTimestamp(); SELECT COUNT(*) AS running_requests FROM `.metadata/workload_manager/running_requests` WHERE database = $database_id AND pool_id = $pool_id AND lease_deadline >= CurrentUtcTimestamp(); 2025-06-25T14:39:48.109258Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:443: SessionId: ydb://session/3?node_id=8&id=OTY1OWNmMWEtYmE1OTc0NzctYTc0YWI3ZGEtMzY1YjFhYWY=, ActorId: [8:7519896700378750767:4912], ActorState: ReadyState, TraceId: 01jykrfpwddhest9zn76w2zv5s, received request, proxyRequestId: 561 prepared: 0 tx_control: 1 action: QUERY_ACTION_EXECUTE type: QUERY_TYPE_SQL_DML text: -- TRefreshPoolStateQuery::OnLeaseUpdated DECLARE $database_id AS Text; DECLARE $pool_id AS Text; SELECT COUNT(*) AS delayed_requests FROM `.metadata/workload_manager/delayed_requests` WHERE database = $database_id AND pool_id = $pool_id AND (wait_deadline IS NULL OR wait_deadline >= CurrentUtcTimestamp()) AND lease_deadline >= CurrentUtcTimestamp(); SELECT COUNT(*) AS running_requests FROM `.metadata/workload_manager/running_requests` WHERE database = $database_id AND pool_id = $pool_id AND lease_deadline >= CurrentUtcTimestamp(); rpcActor: [8:7519896700378750788:4917] database: /Root databaseId: /Root pool id: default 2025-06-25T14:39:48.109296Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:264: SessionId: ydb://session/3?node_id=8&id=OTY1OWNmMWEtYmE1OTc0NzctYTc0YWI3ZGEtMzY1YjFhYWY=, ActorId: [8:7519896700378750767:4912], ActorState: ReadyState, TraceId: 01jykrfpwddhest9zn76w2zv5s, request placed into pool from cache: default 2025-06-25T14:39:48.109891Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1352: SessionId: ydb://session/3?node_id=8&id=OTY1OWNmMWEtYmE1OTc0NzctYTc0YWI3ZGEtMzY1YjFhYWY=, ActorId: [8:7519896700378750767:4912], ActorState: ExecuteState, TraceId: 01jykrfpwddhest9zn76w2zv5s, ExecutePhyTx, tx: 0x000050C00054AB58 literal: 0 commit: 0 txCtx.DeferredEffects.size(): 0 2025-06-25T14:39:48.109941Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1503: SessionId: ydb://session/3?node_id=8&id=OTY1OWNmMWEtYmE1OTc0NzctYTc0YWI3ZGEtMzY1YjFhYWY=, ActorId: [8:7519896700378750767:4912], ActorState: ExecuteState, TraceId: 01jykrfpwddhest9zn76w2zv5s, Sending to Executer TraceId: 0 8 2025-06-25T14:39:48.110022Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1561: SessionId: ydb://session/3?node_id=8&id=OTY1OWNmMWEtYmE1OTc0NzctYTc0YWI3ZGEtMzY1YjFhYWY=, ActorId: [8:7519896700378750767:4912], ActorState: ExecuteState, TraceId: 01jykrfpwddhest9zn76w2zv5s, Created new KQP executer: [8:7519896700378750791:4912] isRollback: 0 2025-06-25T14:39:48.118700Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1754: SessionId: ydb://session/3?node_id=8&id=OTY1OWNmMWEtYmE1OTc0NzctYTc0YWI3ZGEtMzY1YjFhYWY=, ActorId: [8:7519896700378750767:4912], ActorState: ExecuteState, TraceId: 01jykrfpwddhest9zn76w2zv5s, TEvTxResponse, CurrentTx: 1/2 response.status: SUCCESS 2025-06-25T14:39:48.118803Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1352: SessionId: ydb://session/3?node_id=8&id=OTY1OWNmMWEtYmE1OTc0NzctYTc0YWI3ZGEtMzY1YjFhYWY=, ActorId: [8:7519896700378750767:4912], ActorState: ExecuteState, TraceId: 01jykrfpwddhest9zn76w2zv5s, ExecutePhyTx, tx: 0x000050C00054A918 literal: 1 commit: 1 txCtx.DeferredEffects.size(): 0 2025-06-25T14:39:48.121086Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1754: SessionId: ydb://session/3?node_id=8&id=OTY1OWNmMWEtYmE1OTc0NzctYTc0YWI3ZGEtMzY1YjFhYWY=, ActorId: [8:7519896700378750767:4912], ActorState: ExecuteState, TraceId: 01jykrfpwddhest9zn76w2zv5s, TEvTxResponse, CurrentTx: 2/2 response.status: SUCCESS 2025-06-25T14:39:48.121226Z node 8 :KQP_SESSION INFO: kqp_session_actor.cpp:2013: SessionId: ydb://session/3?node_id=8&id=OTY1OWNmMWEtYmE1OTc0NzctYTc0YWI3ZGEtMzY1YjFhYWY=, ActorId: [8:7519896700378750767:4912], ActorState: ExecuteState, TraceId: 01jykrfpwddhest9zn76w2zv5s, txInfo Status: Committed Kind: ReadOnly TotalDuration: 11.431 ServerDuration: 11.348 QueriesCount: 2 2025-06-25T14:39:48.121310Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2168: SessionId: ydb://session/3?node_id=8&id=OTY1OWNmMWEtYmE1OTc0NzctYTc0YWI3ZGEtMzY1YjFhYWY=, ActorId: [8:7519896700378750767:4912], ActorState: ExecuteState, TraceId: 01jykrfpwddhest9zn76w2zv5s, Create QueryResponse for action: QUERY_ACTION_EXECUTE with SUCCESS status 2025-06-25T14:39:48.121358Z node 8 :KQP_SESSION INFO: kqp_session_actor.cpp:2528: SessionId: ydb://session/3?node_id=8&id=OTY1OWNmMWEtYmE1OTc0NzctYTc0YWI3ZGEtMzY1YjFhYWY=, ActorId: [8:7519896700378750767:4912], ActorState: ExecuteState, TraceId: 01jykrfpwddhest9zn76w2zv5s, Cleanup start, isFinal: 0 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-06-25T14:39:48.121380Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2589: SessionId: ydb://session/3?node_id=8&id=OTY1OWNmMWEtYmE1OTc0NzctYTc0YWI3ZGEtMzY1YjFhYWY=, ActorId: [8:7519896700378750767:4912], ActorState: ExecuteState, TraceId: 01jykrfpwddhest9zn76w2zv5s, EndCleanup, isFinal: 0 2025-06-25T14:39:48.121414Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2325: SessionId: ydb://session/3?node_id=8&id=OTY1OWNmMWEtYmE1OTc0NzctYTc0YWI3ZGEtMzY1YjFhYWY=, ActorId: [8:7519896700378750767:4912], ActorState: ExecuteState, TraceId: 01jykrfpwddhest9zn76w2zv5s, Sent query response back to proxy, proxyRequestId: 561, proxyId: [8:7519896532875017173:2221] 2025-06-25T14:39:48.121776Z node 8 :KQP_WORKLOAD_SERVICE DEBUG: query_actor.cpp:240: [TQueryBase] [TRefreshPoolStateQuery] TraceId: sample_pool_id, RequestDatabase: /Root, RequestSessionId: , State: Describe pool, TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=8&id=OTY1OWNmMWEtYmE1OTc0NzctYTc0YWI3ZGEtMzY1YjFhYWY=, TxId: 2025-06-25T14:39:48.121850Z node 8 :KQP_WORKLOAD_SERVICE DEBUG: query_actor.cpp:367: [TQueryBase] [TRefreshPoolStateQuery] TraceId: sample_pool_id, RequestDatabase: /Root, RequestSessionId: , State: Describe pool, Finish with SUCCESS, SessionId: ydb://session/3?node_id=8&id=OTY1OWNmMWEtYmE1OTc0NzctYTc0YWI3ZGEtMzY1YjFhYWY=, TxId: 2025-06-25T14:39:48.121958Z node 8 :KQP_WORKLOAD_SERVICE TRACE: pool_handlers_actors.cpp:746: [WorkloadService] [TPoolHandlerActorBase] ActorId: [8:7519896554349853786:2275], DatabaseId: /Root, PoolId: sample_pool_id, succefully refreshed pool state, in flight: 0, delayed: 0 2025-06-25T14:39:48.122209Z node 8 :KQP_SESSION INFO: kqp_session_actor.cpp:2370: SessionId: ydb://session/3?node_id=8&id=OTY1OWNmMWEtYmE1OTc0NzctYTc0YWI3ZGEtMzY1YjFhYWY=, ActorId: [8:7519896700378750767:4912], ActorState: ReadyState, Session closed due to explicit close event 2025-06-25T14:39:48.122258Z node 8 :KQP_SESSION INFO: kqp_session_actor.cpp:2528: SessionId: ydb://session/3?node_id=8&id=OTY1OWNmMWEtYmE1OTc0NzctYTc0YWI3ZGEtMzY1YjFhYWY=, ActorId: [8:7519896700378750767:4912], ActorState: ReadyState, Cleanup start, isFinal: 1 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-06-25T14:39:48.122304Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2589: SessionId: ydb://session/3?node_id=8&id=OTY1OWNmMWEtYmE1OTc0NzctYTc0YWI3ZGEtMzY1YjFhYWY=, ActorId: [8:7519896700378750767:4912], ActorState: ReadyState, EndCleanup, isFinal: 1 2025-06-25T14:39:48.122334Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2601: SessionId: ydb://session/3?node_id=8&id=OTY1OWNmMWEtYmE1OTc0NzctYTc0YWI3ZGEtMzY1YjFhYWY=, ActorId: [8:7519896700378750767:4912], ActorState: unknown state, Cleanup temp tables: 0 2025-06-25T14:39:48.122456Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2692: SessionId: ydb://session/3?node_id=8&id=OTY1OWNmMWEtYmE1OTc0NzctYTc0YWI3ZGEtMzY1YjFhYWY=, ActorId: [8:7519896700378750767:4912], ActorState: unknown state, Session actor destroyed >> TKeyValueTest::TestWriteDeleteThenReadRemaining [GOOD] >> TKeyValueTest::TestWriteAndRenameWithCreationUnixTime >> TKeyValueTest::TestInlineWriteReadDeleteWithRestartsThenResponseOkNewApi >> TKeyValueTest::TestWriteAndRenameWithCreationUnixTime [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/keyvalue/ut/unittest >> TKeyValueTest::TestWriteAndRenameWithCreationUnixTime [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:57:2057] recipient: [1:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:57:2057] recipient: [1:52:2097] Leader for TabletID 72057594037927937 is [1:59:2099] sender: [1:60:2057] recipient: [1:52:2097] Leader for TabletID 72057594037927937 is [1:59:2099] sender: [1:77:2057] recipient: [1:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:57:2057] recipient: [2:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:57:2057] recipient: [2:53:2097] Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:60:2057] recipient: [2:53:2097] Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:77:2057] recipient: [2:14:2061] !Reboot 72057594037927937 (actor [2:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:452:2057] recipient: [2:38:2085] Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:455:2057] recipient: [2:454:2379] Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:456:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [2:457:2380] sender: [2:458:2057] recipient: [2:454:2379] !Reboot 72057594037927937 (actor [2:59:2099]) rebooted! !Reboot 72057594037927937 (actor [2:59:2099]) tablet resolver refreshed! new actor is[2:457:2380] Leader for TabletID 72057594037927937 is [2:457:2380] sender: [2:543:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:57:2057] recipient: [3:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:57:2057] recipient: [3:52:2097] Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:60:2057] recipient: [3:52:2097] Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:77:2057] recipient: [3:14:2061] !Reboot 72057594037927937 (actor [3:59:2099]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:452:2057] recipient: [3:38:2085] Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:455:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:456:2057] recipient: [3:454:2379] Leader for TabletID 72057594037927937 is [3:457:2380] sender: [3:458:2057] recipient: [3:454:2379] !Reboot 72057594037927937 (actor [3:59:2099]) rebooted! !Reboot 72057594037927937 (actor [3:59:2099]) tablet resolver refreshed! new actor is[3:457:2380] Leader for TabletID 72057594037927937 is [3:457:2380] sender: [3:543:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:57:2057] recipient: [4:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:57:2057] recipient: [4:53:2097] Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:60:2057] recipient: [4:53:2097] Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:77:2057] recipient: [4:14:2061] !Reboot 72057594037927937 (actor [4:59:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:453:2057] recipient: [4:38:2085] Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:456:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:457:2057] recipient: [4:455:2379] Leader for TabletID 72057594037927937 is [4:458:2380] sender: [4:459:2057] recipient: [4:455:2379] !Reboot 72057594037927937 (actor [4:59:2099]) rebooted! !Reboot 72057594037927937 (actor [4:59:2099]) tablet resolver refreshed! new actor is[4:458:2380] Leader for TabletID 72057594037927937 is [4:458:2380] sender: [4:544:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:57:2057] recipient: [5:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:57:2057] recipient: [5:53:2097] Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:60:2057] recipient: [5:53:2097] Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:77:2057] recipient: [5:14:2061] >> TKeyValueTest::TestWriteReadWithRestartsThenResponseOkNewApi >> TKeyValueTest::TestRenameWorks >> TKeyValueTest::TestWriteReadPatchRead >> Viewer::JsonStorageListingV2NodeIdFilter [GOOD] >> Viewer::JsonStorageListingV2PDiskIdFilter >> TKeyValueTest::TestWriteReadPatchRead [GOOD] >> TKeyValueTest::TestWriteReadDeleteWithRestartsThenResponseOkWithNewApi >> TGroupMapperTest::SanitizeGroupTest3dc >> ResourcePoolClassifiersSysView::TestResourcePoolClassifiersSysViewOnServerless [GOOD] >> ResourcePoolClassifiersSysView::TestResourcePoolClassifiersSysViewFilters >> TMultiversionObjectMap::MonteCarlo [GOOD] |83.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut/unittest >> TMultiversionObjectMap::MonteCarlo [GOOD] >> TKeyValueTest::TestWriteLongKey [GOOD] >> ResourcePoolClassifiersDdl::TestDropResourcePool [GOOD] |83.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/sys_view/ut_large/ydb-core-sys_view-ut_large |83.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/sys_view/ut_large/ydb-core-sys_view-ut_large |83.0%| [TA] {RESULT} $(B)/ydb/core/base/ut_board_subscriber/test-results/unittest/{meta.json ... results_accumulator.log} |83.0%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_bsvolume/test-results/unittest/{meta.json ... results_accumulator.log} |83.0%| [LD] {RESULT} $(B)/ydb/core/sys_view/ut_large/ydb-core-sys_view-ut_large ------- [TM] {asan, default-linux-x86_64, release} ydb/core/keyvalue/ut/unittest >> TKeyValueTest::TestWriteLongKey [GOOD] >> TKeyValueTest::TestWrite200KDeleteThenResponseError [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:57:2057] recipient: [2:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:57:2057] recipient: [2:53:2097] Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:60:2057] recipient: [2:53:2097] Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:77:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:57:2057] recipient: [3:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:57:2057] recipient: [3:52:2097] Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:60:2057] recipient: [3:52:2097] Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:77:2057] recipient: [3:14:2061] !Reboot 72057594037927937 (actor [3:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:79:2057] recipient: [3:38:2085] Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:82:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:83:2057] recipient: [3:81:2112] Leader for TabletID 72057594037927937 is [3:84:2113] sender: [3:85:2057] recipient: [3:81:2112] !Reboot 72057594037927937 (actor [3:59:2099]) rebooted! !Reboot 72057594037927937 (actor [3:59:2099]) tablet resolver refreshed! new actor is[3:84:2113] Leader for TabletID 72057594037927937 is [3:84:2113] sender: [3:170:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:57:2057] recipient: [4:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:57:2057] recipient: [4:53:2097] Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:60:2057] recipient: [4:53:2097] Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:77:2057] recipient: [4:14:2061] !Reboot 72057594037927937 (actor [4:59:2099]) on event NKikimr::TEvKeyValue::TEvAcquireLock ! Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:79:2057] recipient: [4:38:2085] Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:82:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:83:2057] recipient: [4:81:2112] Leader for TabletID 72057594037927937 is [4:84:2113] sender: [4:85:2057] recipient: [4:81:2112] !Reboot 72057594037927937 (actor [4:59:2099]) rebooted! !Reboot 72057594037927937 (actor [4:59:2099]) tablet resolver refreshed! new actor is[4:84:2113] Leader for TabletID 72057594037927937 is [4:84:2113] sender: [4:170:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:57:2057] recipient: [5:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:57:2057] recipient: [5:53:2097] Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:60:2057] recipient: [5:53:2097] Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:77:2057] recipient: [5:14:2061] !Reboot 72057594037927937 (actor [5:59:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:80:2057] recipient: [5:38:2085] Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:83:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:84:2057] recipient: [5:82:2112] Leader for TabletID 72057594037927937 is [5:85:2113] sender: [5:86:2057] recipient: [5:82:2112] !Reboot 72057594037927937 (actor [5:59:2099]) rebooted! !Reboot 72057594037927937 (actor [5:59:2099]) tablet resolver refreshed! new actor is[5:85:2113] Leader for TabletID 72057594037927937 is [5:85:2113] sender: [5:171:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:57:2057] recipient: [6:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:57:2057] recipient: [6:52:2097] Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:60:2057] recipient: [6:52:2097] Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:77:2057] recipient: [6:14:2061] !Reboot 72057594037927937 (actor [6:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:83:2057] recipient: [6:38:2085] Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:86:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:87:2057] recipient: [6:85:2115] Leader for TabletID 72057594037927937 is [6:88:2116] sender: [6:89:2057] recipient: [6:85:2115] !Reboot 72057594037927937 (actor [6:59:2099]) rebooted! !Reboot 72057594037927937 (actor [6:59:2099]) tablet resolver refreshed! new actor is[6:88:2116] Leader for TabletID 72057594037927937 is [6:88:2116] sender: [6:174:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:57:2057] recipient: [7:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:57:2057] recipient: [7:53:2097] Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:60:2057] recipient: [7:53:2097] Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:77:2057] recipient: [7:14:2061] !Reboot 72057594037927937 (actor [7:59:2099]) on event NKikimr::TEvKeyValue::TEvExecuteTransaction ! Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:83:2057] recipient: [7:38:2085] Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:86:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:87:2057] recipient: [7:85:2115] Leader for TabletID 72057594037927937 is [7:88:2116] sender: [7:89:2057] recipient: [7:85:2115] !Reboot 72057594037927937 (actor [7:59:2099]) rebooted! !Reboot 72057594037927937 (actor [7:59:2099]) tablet resolver refreshed! new actor is[7:88:2116] Leader for TabletID 72057594037927937 is [7:88:2116] sender: [7:174:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:57:2057] recipient: [8:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:57:2057] recipient: [8:54:2097] Leader for TabletID 72057594037927937 is [8:59:2099] sender: [8:60:2057] recipient: [8:54:2097] Leader for TabletID 72057594037927937 is [8:59:2099] sender: [8:77:2057] recipient: [8:14:2061] !Reboot 72057594037927937 (actor [8:59:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [8:59:2099] sender: [8:84:2057] recipient: [8:38:2085] Leader for TabletID 72057594037927937 is [8:59:2099] sender: [8:87:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [8:59:2099] sender: [8:88:2057] recipient: [8:86:2115] Leader for TabletID 72057594037927937 is [8:89:2116] sender: [8:90:2057] recipient: [8:86:2115] !Reboot 72057594037927937 (actor [8:59:2099]) rebooted! !Reboot 72057594037927937 (actor [8:59:2099]) tablet resolver refreshed! new actor is[8:89:2116] Leader for TabletID 72057594037927937 is [8:89:2116] sender: [8:175:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:57:2057] recipient: [9:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:57:2057] recipient: [9:53:2097] Leader for TabletID 72057594037927937 is [9:59:2099] sender: [9:60:2057] recipient: [9:53:2097] Leader for TabletID 72057594037927937 is [9:59:2099] sender: [9:77:2057] recipient: [9:14:2061] >> TKeyValueTest::TestWrite200KDeleteThenResponseErrorNewApi >> TKeyValueTest::TestIncorrectRequestThenResponseError >> TKeyValueTest::TestRewriteThenLastValue >> TKeyValueTest::TestIncorrectRequestThenResponseError [GOOD] >> TKeyValueTest::TestIncrementalKeySet >> TKeyValueTest::TestIncrementalKeySet [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/workload_service/ut/unittest >> ResourcePoolClassifiersDdl::TestDropResourcePool [GOOD] Test command err: 2025-06-25T14:38:48.854551Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519896439570900428:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:48.858107Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00196b/r3tmp/tmp6jRrsP/pdisk_1.dat 2025-06-25T14:38:49.163590Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519896439570900398:2080] 1750862328834414 != 1750862328834417 2025-06-25T14:38:49.184596Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:38:49.187435Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:49.187520Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:49.192324Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 4480, node 1 2025-06-25T14:38:49.271881Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:38:49.271900Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:38:49.271922Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:38:49.272052Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:29869 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:38:49.529022Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:38:49.866367Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:38:51.547407Z node 1 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:440: [WorkloadService] [Service] Started workload service initialization 2025-06-25T14:38:51.557828Z node 1 :KQP_WORKLOAD_SERVICE TRACE: kqp_workload_service.cpp:125: [WorkloadService] [Service] Updated node info, noode count: 1 2025-06-25T14:38:51.557879Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:100: [WorkloadService] [Service] Subscribed for config changes 2025-06-25T14:38:51.557918Z node 1 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:111: [WorkloadService] [Service] Resource pools was enanbled 2025-06-25T14:38:51.557999Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:241: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7519896452455802908:2289], Start check tables existence, number paths: 2 2025-06-25T14:38:51.559098Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:182: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7519896452455802908:2289], Describe table /Root/.metadata/workload_manager/delayed_requests status PathErrorUnknown 2025-06-25T14:38:51.559142Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:182: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7519896452455802908:2289], Describe table /Root/.metadata/workload_manager/running_requests status PathErrorUnknown 2025-06-25T14:38:51.559169Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:289: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7519896452455802908:2289], Successfully finished 2025-06-25T14:38:51.559215Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:367: [WorkloadService] [Service] Cleanup completed, tables exists: 0 2025-06-25T14:38:51.559805Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:223: SessionId: ydb://session/3?node_id=1&id=NmU5MTRjOGYtZmYxZGI5MzgtMzAzYzZkNDctODM1Njg5ODI=, ActorId: [0:0:0], ActorState: unknown state, Create session actor with id NmU5MTRjOGYtZmYxZGI5MzgtMzAzYzZkNDctODM1Njg5ODI= 2025-06-25T14:38:51.559963Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:227: SessionId: ydb://session/3?node_id=1&id=NmU5MTRjOGYtZmYxZGI5MzgtMzAzYzZkNDctODM1Njg5ODI=, ActorId: [1:7519896452455802924:2290], ActorState: unknown state, session actor bootstrapped 2025-06-25T14:38:51.583999Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:387: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519896452455802926:2298], DatabaseId: Root, PoolId: sample_pool_id, Start pool creating 2025-06-25T14:38:51.587424Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:38:51.588855Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:429: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519896452455802926:2298], DatabaseId: Root, PoolId: sample_pool_id, Subscribe on create pool tx: 281474976710658 2025-06-25T14:38:51.589016Z node 1 :KQP_WORKLOAD_SERVICE TRACE: scheme_actors.cpp:352: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519896452455802926:2298], DatabaseId: Root, PoolId: sample_pool_id, Tablet to pipe successfully connected 2025-06-25T14:38:51.599140Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519896452455802926:2298], DatabaseId: Root, PoolId: sample_pool_id, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:38:51.670149Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:387: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519896452455802926:2298], DatabaseId: Root, PoolId: sample_pool_id, Start pool creating 2025-06-25T14:38:51.674532Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519896452455802977:2330] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/sample_pool_id\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:38:51.674689Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:480: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519896452455802926:2298], DatabaseId: Root, PoolId: sample_pool_id, Pool successfully created 2025-06-25T14:38:51.676929Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:223: SessionId: ydb://session/3?node_id=1&id=ODNlZmQ1YmEtMTdlZGRkNjQtZmZhOTgxZjctNWZlOGY5Y2U=, ActorId: [0:0:0], ActorState: unknown state, Create session actor with id ODNlZmQ1YmEtMTdlZGRkNjQtZmZhOTgxZjctNWZlOGY5Y2U= 2025-06-25T14:38:51.677249Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:227: SessionId: ydb://session/3?node_id=1&id=ODNlZmQ1YmEtMTdlZGRkNjQtZmZhOTgxZjctNWZlOGY5Y2U=, ActorId: [1:7519896452455802984:2291], ActorState: unknown state, session actor bootstrapped 2025-06-25T14:38:51.677413Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:443: SessionId: ydb://session/3?node_id=1&id=ODNlZmQ1YmEtMTdlZGRkNjQtZmZhOTgxZjctNWZlOGY5Y2U=, ActorId: [1:7519896452455802984:2291], ActorState: ReadyState, TraceId: 01jykrdzrx4ywf5x3qwdpd4w30, received request, proxyRequestId: 3 prepared: 0 tx_control: 0 action: QUERY_ACTION_EXECUTE type: QUERY_TYPE_SQL_GENERIC_QUERY text: SELECT 42; rpcActor: [1:7519896452455802983:2335] database: Root databaseId: /Root pool id: sample_pool_id 2025-06-25T14:38:51.677451Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:157: [WorkloadService] [Service] Recieved subscription request, DatabaseId: /Root, PoolId: sample_pool_id 2025-06-25T14:38:51.677465Z node 1 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:561: [WorkloadService] [Service] Creating new database state for id /Root 2025-06-25T14:38:51.677511Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:169: [WorkloadService] [Service] Recieved new request from [1:7519896452455802984:2291], DatabaseId: /Root, PoolId: sample_pool_id, SessionId: ydb://session/3?node_id=1&id=ODNlZmQ1YmEtMTdlZGRkNjQtZmZhOTgxZjctNWZlOGY5Y2U= 2025-06-25T14:38:51.677556Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:185: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896452455802986:2292], DatabaseId: /Root, PoolId: sample_pool_id, Start pool fetching 2025-06-25T14:38:51.677628Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:574: [WorkloadService] [TDatabaseFetcherActor] ActorId: [1:7519896452455802987:2293], Database: /Root, Start database fetching 2025-06-25T14:38:51.678663Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:600: [WorkloadService] [TDatabaseFetcherActor] ActorId: [1:7519896452455802987:2293], Database: /Root, Database info successfully fetched, serverless: 0 2025-06-25T14:38:51.678704Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:240: [WorkloadService] [Service] Successfully fetched database info, DatabaseId: /Root, Serverless: 0 2025-06-25T14:38:51.678808Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:223: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896452455802986:2292], DatabaseId: /Root, PoolId: sample_pool_id, Pool info successfully fetched 2025-06-25T14:38:51.678849Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:44: [WorkloadService] [TPoolResolverActor] ActorId: [1:7519896452455802997:2294], DatabaseId: /Root, PoolId: sample_pool_id, SessionId: ydb://session/3?node_id=1&id=ODNlZmQ1YmEtMTdlZGRkNjQtZmZhOTgxZjctNWZlOGY5Y2U=, Start pool fetching 2025-06-25T14:38:51.678864Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:253: [WorkloadService] [Service] Successfully fetched pool sample_pool_id, DatabaseId: /Root 2025-06-25T14:38:51.678893Z node 1 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:571: [WorkloadService] [Service] Creating new handler for pool /Root/sample_pool_id 2025-06-25T14:38:51.679057Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: pool_handlers_actors.cpp:466: [WorkloadService] [TPoolHandlerActorBase] ActorId: [1:7519896452455802999:2296], DatabaseId: /Root, PoolId: sample_pool_id, Subscribed on schemeboard notifications for path: [OwnerId: 72057594046644480, L ... c5NDRlYTQtZjM4ZDg4MGItNGQzMzliMjYtZWIxYjI4YWM=, ActorId: [8:7519896738225097823:2613], ActorState: ReadyState, Cleanup start, isFinal: 1 CleanupCtx: 1 TransactionsToBeAborted.size(): 1 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-06-25T14:39:57.713475Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2589: SessionId: ydb://session/3?node_id=8&id=ZTc5NDRlYTQtZjM4ZDg4MGItNGQzMzliMjYtZWIxYjI4YWM=, ActorId: [8:7519896738225097823:2613], ActorState: CleanupState, EndCleanup, isFinal: 1 2025-06-25T14:39:57.713523Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2601: SessionId: ydb://session/3?node_id=8&id=ZTc5NDRlYTQtZjM4ZDg4MGItNGQzMzliMjYtZWIxYjI4YWM=, ActorId: [8:7519896738225097823:2613], ActorState: unknown state, Cleanup temp tables: 0 2025-06-25T14:39:57.713672Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2692: SessionId: ydb://session/3?node_id=8&id=ZTc5NDRlYTQtZjM4ZDg4MGItNGQzMzliMjYtZWIxYjI4YWM=, ActorId: [8:7519896738225097823:2613], ActorState: unknown state, Session actor destroyed 2025-06-25T14:39:57.713998Z node 8 :KQP_SESSION INFO: kqp_session_actor.cpp:2528: SessionId: ydb://session/3?node_id=8&id=ZGYwNDU1YmYtMTc3NDNjMjAtN2UzNTFhZjItNWMxYTM2OWQ=, ActorId: [8:7519896712455292789:2291], ActorState: ExecuteState, TraceId: 01jykrg07wccqa2vq4xf55pka3, Cleanup start, isFinal: 0 CleanupCtx: 1 TransactionsToBeAborted.size(): 0 WorkerId: [8:7519896738225097843:2291] WorkloadServiceCleanup: 0 2025-06-25T14:39:57.716429Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2589: SessionId: ydb://session/3?node_id=8&id=ZGYwNDU1YmYtMTc3NDNjMjAtN2UzNTFhZjItNWMxYTM2OWQ=, ActorId: [8:7519896712455292789:2291], ActorState: CleanupState, TraceId: 01jykrg07wccqa2vq4xf55pka3, EndCleanup, isFinal: 0 2025-06-25T14:39:57.716486Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2325: SessionId: ydb://session/3?node_id=8&id=ZGYwNDU1YmYtMTc3NDNjMjAtN2UzNTFhZjItNWMxYTM2OWQ=, ActorId: [8:7519896712455292789:2291], ActorState: CleanupState, TraceId: 01jykrg07wccqa2vq4xf55pka3, Sent query response back to proxy, proxyRequestId: 55, proxyId: [8:7519896699570390427:2169] 2025-06-25T14:39:57.720635Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:223: SessionId: ydb://session/3?node_id=8&id=Y2JjNzZiYWQtMzBhZDVkZGQtN2U5NDlkNjUtZmE2ZjRhMzM=, ActorId: [0:0:0], ActorState: unknown state, Create session actor with id Y2JjNzZiYWQtMzBhZDVkZGQtN2U5NDlkNjUtZmE2ZjRhMzM= 2025-06-25T14:39:57.720722Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:227: SessionId: ydb://session/3?node_id=8&id=Y2JjNzZiYWQtMzBhZDVkZGQtN2U5NDlkNjUtZmE2ZjRhMzM=, ActorId: [8:7519896738225097884:2627], ActorState: unknown state, session actor bootstrapped 2025-06-25T14:39:57.720996Z node 8 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:157: [WorkloadService] [Service] Recieved subscription request, DatabaseId: /Root, PoolId: my_pool 2025-06-25T14:39:57.721042Z node 8 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:185: [WorkloadService] [TPoolFetcherActor] ActorId: [8:7519896738225097885:2628], DatabaseId: /Root, PoolId: my_pool, Start pool fetching 2025-06-25T14:39:57.721133Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:443: SessionId: ydb://session/3?node_id=8&id=Y2JjNzZiYWQtMzBhZDVkZGQtN2U5NDlkNjUtZmE2ZjRhMzM=, ActorId: [8:7519896738225097884:2627], ActorState: ReadyState, TraceId: 01jykrg08scy221geysxfg7dwg, received request, proxyRequestId: 56 prepared: 0 tx_control: 0 action: QUERY_ACTION_EXECUTE type: QUERY_TYPE_SQL_GENERIC_QUERY text: SELECT 42; rpcActor: [8:7519896738225097883:2885] database: Root databaseId: /Root pool id: default 2025-06-25T14:39:57.721166Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:264: SessionId: ydb://session/3?node_id=8&id=Y2JjNzZiYWQtMzBhZDVkZGQtN2U5NDlkNjUtZmE2ZjRhMzM=, ActorId: [8:7519896738225097884:2627], ActorState: ReadyState, TraceId: 01jykrg08scy221geysxfg7dwg, request placed into pool from cache: default 2025-06-25T14:39:57.721261Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:575: SessionId: ydb://session/3?node_id=8&id=Y2JjNzZiYWQtMzBhZDVkZGQtN2U5NDlkNjUtZmE2ZjRhMzM=, ActorId: [8:7519896738225097884:2627], ActorState: ExecuteState, TraceId: 01jykrg08scy221geysxfg7dwg, Sending CompileQuery request 2025-06-25T14:39:57.721274Z node 8 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [8:7519896738225097885:2628], DatabaseId: /Root, PoolId: my_pool, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool my_pool not found or you don't have access permissions } 2025-06-25T14:39:57.721324Z node 8 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool my_pool, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool my_pool not found or you don't have access permissions } 2025-06-25T14:39:57.797696Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1352: SessionId: ydb://session/3?node_id=8&id=Y2JjNzZiYWQtMzBhZDVkZGQtN2U5NDlkNjUtZmE2ZjRhMzM=, ActorId: [8:7519896738225097884:2627], ActorState: ExecuteState, TraceId: 01jykrg08scy221geysxfg7dwg, ExecutePhyTx, tx: 0x000050C000258DD8 literal: 0 commit: 1 txCtx.DeferredEffects.size(): 0 2025-06-25T14:39:57.797756Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1503: SessionId: ydb://session/3?node_id=8&id=Y2JjNzZiYWQtMzBhZDVkZGQtN2U5NDlkNjUtZmE2ZjRhMzM=, ActorId: [8:7519896738225097884:2627], ActorState: ExecuteState, TraceId: 01jykrg08scy221geysxfg7dwg, Sending to Executer TraceId: 0 8 2025-06-25T14:39:57.797851Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1561: SessionId: ydb://session/3?node_id=8&id=Y2JjNzZiYWQtMzBhZDVkZGQtN2U5NDlkNjUtZmE2ZjRhMzM=, ActorId: [8:7519896738225097884:2627], ActorState: ExecuteState, TraceId: 01jykrg08scy221geysxfg7dwg, Created new KQP executer: [8:7519896738225097890:2627] isRollback: 0 2025-06-25T14:39:57.799291Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1852: SessionId: ydb://session/3?node_id=8&id=Y2JjNzZiYWQtMzBhZDVkZGQtN2U5NDlkNjUtZmE2ZjRhMzM=, ActorId: [8:7519896738225097884:2627], ActorState: ExecuteState, TraceId: 01jykrg08scy221geysxfg7dwg, Forwarded TEvStreamData to [8:7519896738225097883:2885] 2025-06-25T14:39:57.799946Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1754: SessionId: ydb://session/3?node_id=8&id=Y2JjNzZiYWQtMzBhZDVkZGQtN2U5NDlkNjUtZmE2ZjRhMzM=, ActorId: [8:7519896738225097884:2627], ActorState: ExecuteState, TraceId: 01jykrg08scy221geysxfg7dwg, TEvTxResponse, CurrentTx: 1/1 response.status: SUCCESS 2025-06-25T14:39:57.800110Z node 8 :KQP_SESSION INFO: kqp_session_actor.cpp:2013: SessionId: ydb://session/3?node_id=8&id=Y2JjNzZiYWQtMzBhZDVkZGQtN2U5NDlkNjUtZmE2ZjRhMzM=, ActorId: [8:7519896738225097884:2627], ActorState: ExecuteState, TraceId: 01jykrg08scy221geysxfg7dwg, txInfo Status: Committed Kind: Pure TotalDuration: 2.482 ServerDuration: 2.426 QueriesCount: 2 2025-06-25T14:39:57.800191Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2168: SessionId: ydb://session/3?node_id=8&id=Y2JjNzZiYWQtMzBhZDVkZGQtN2U5NDlkNjUtZmE2ZjRhMzM=, ActorId: [8:7519896738225097884:2627], ActorState: ExecuteState, TraceId: 01jykrg08scy221geysxfg7dwg, Create QueryResponse for action: QUERY_ACTION_EXECUTE with SUCCESS status 2025-06-25T14:39:57.800420Z node 8 :KQP_SESSION INFO: kqp_session_actor.cpp:2528: SessionId: ydb://session/3?node_id=8&id=Y2JjNzZiYWQtMzBhZDVkZGQtN2U5NDlkNjUtZmE2ZjRhMzM=, ActorId: [8:7519896738225097884:2627], ActorState: ExecuteState, TraceId: 01jykrg08scy221geysxfg7dwg, Cleanup start, isFinal: 1 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-06-25T14:39:57.800463Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2589: SessionId: ydb://session/3?node_id=8&id=Y2JjNzZiYWQtMzBhZDVkZGQtN2U5NDlkNjUtZmE2ZjRhMzM=, ActorId: [8:7519896738225097884:2627], ActorState: ExecuteState, TraceId: 01jykrg08scy221geysxfg7dwg, EndCleanup, isFinal: 1 2025-06-25T14:39:57.800514Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2325: SessionId: ydb://session/3?node_id=8&id=Y2JjNzZiYWQtMzBhZDVkZGQtN2U5NDlkNjUtZmE2ZjRhMzM=, ActorId: [8:7519896738225097884:2627], ActorState: ExecuteState, TraceId: 01jykrg08scy221geysxfg7dwg, Sent query response back to proxy, proxyRequestId: 56, proxyId: [8:7519896699570390427:2169] 2025-06-25T14:39:57.800549Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2601: SessionId: ydb://session/3?node_id=8&id=Y2JjNzZiYWQtMzBhZDVkZGQtN2U5NDlkNjUtZmE2ZjRhMzM=, ActorId: [8:7519896738225097884:2627], ActorState: unknown state, TraceId: 01jykrg08scy221geysxfg7dwg, Cleanup temp tables: 0 2025-06-25T14:39:57.800905Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2692: SessionId: ydb://session/3?node_id=8&id=Y2JjNzZiYWQtMzBhZDVkZGQtN2U5NDlkNjUtZmE2ZjRhMzM=, ActorId: [8:7519896738225097884:2627], ActorState: unknown state, TraceId: 01jykrg08scy221geysxfg7dwg, Session actor destroyed 2025-06-25T14:39:57.808671Z node 8 :KQP_SESSION INFO: kqp_session_actor.cpp:2370: SessionId: ydb://session/3?node_id=8&id=ZGYwNDU1YmYtMTc3NDNjMjAtN2UzNTFhZjItNWMxYTM2OWQ=, ActorId: [8:7519896712455292789:2291], ActorState: ReadyState, Session closed due to explicit close event 2025-06-25T14:39:57.808729Z node 8 :KQP_SESSION INFO: kqp_session_actor.cpp:2528: SessionId: ydb://session/3?node_id=8&id=ZGYwNDU1YmYtMTc3NDNjMjAtN2UzNTFhZjItNWMxYTM2OWQ=, ActorId: [8:7519896712455292789:2291], ActorState: ReadyState, Cleanup start, isFinal: 1 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-06-25T14:39:57.808757Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2589: SessionId: ydb://session/3?node_id=8&id=ZGYwNDU1YmYtMTc3NDNjMjAtN2UzNTFhZjItNWMxYTM2OWQ=, ActorId: [8:7519896712455292789:2291], ActorState: ReadyState, EndCleanup, isFinal: 1 2025-06-25T14:39:57.808780Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2601: SessionId: ydb://session/3?node_id=8&id=ZGYwNDU1YmYtMTc3NDNjMjAtN2UzNTFhZjItNWMxYTM2OWQ=, ActorId: [8:7519896712455292789:2291], ActorState: unknown state, Cleanup temp tables: 0 2025-06-25T14:39:57.808866Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2692: SessionId: ydb://session/3?node_id=8&id=ZGYwNDU1YmYtMTc3NDNjMjAtN2UzNTFhZjItNWMxYTM2OWQ=, ActorId: [8:7519896712455292789:2291], ActorState: unknown state, Session actor destroyed 2025-06-25T14:39:57.952329Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:223: SessionId: ydb://session/3?node_id=8&id=ZWM3MDczYmEtNGI5MTBiNWUtM2EwNTVlOWItNDNkNWIyZjk=, ActorId: [0:0:0], ActorState: unknown state, Create session actor with id ZWM3MDczYmEtNGI5MTBiNWUtM2EwNTVlOWItNDNkNWIyZjk= 2025-06-25T14:39:57.952543Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:227: SessionId: ydb://session/3?node_id=8&id=ZWM3MDczYmEtNGI5MTBiNWUtM2EwNTVlOWItNDNkNWIyZjk=, ActorId: [8:7519896738225097901:2632], ActorState: unknown state, session actor bootstrapped 2025-06-25T14:39:57.953153Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:443: SessionId: ydb://session/3?node_id=8&id=ZWM3MDczYmEtNGI5MTBiNWUtM2EwNTVlOWItNDNkNWIyZjk=, ActorId: [8:7519896738225097901:2632], ActorState: ReadyState, TraceId: 01jykrg0g197bk5ptwafkjs50f, received request, proxyRequestId: 58 prepared: 0 tx_control: 1 action: QUERY_ACTION_EXECUTE type: QUERY_TYPE_SQL_DML text: SELECT * FROM `//Root/.metadata/initialization/migrations`; rpcActor: [8:7519896738225097902:2633] database: /Root databaseId: /Root pool id: default 2025-06-25T14:39:57.953192Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:264: SessionId: ydb://session/3?node_id=8&id=ZWM3MDczYmEtNGI5MTBiNWUtM2EwNTVlOWItNDNkNWIyZjk=, ActorId: [8:7519896738225097901:2632], ActorState: ReadyState, TraceId: 01jykrg0g197bk5ptwafkjs50f, request placed into pool from cache: default 2025-06-25T14:39:57.953301Z node 8 :KQP_SESSION DEBUG: kqp_session_actor.cpp:575: SessionId: ydb://session/3?node_id=8&id=ZWM3MDczYmEtNGI5MTBiNWUtM2EwNTVlOWItNDNkNWIyZjk=, ActorId: [8:7519896738225097901:2632], ActorState: ExecuteState, TraceId: 01jykrg0g197bk5ptwafkjs50f, Sending CompileQuery request >> TKeyValueTest::TestGetStatusWorksNewApi >> TGroupMapperTest::SanitizeGroupTest3dc [GOOD] >> TKeyValueTest::TestWriteReadRangeDataLimitThenLimitWorks >> TGroupMapperTest::ReassignGroupTest3dc [GOOD] |83.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut/unittest >> TGroupMapperTest::SanitizeGroupTest3dc [GOOD] |83.0%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_extsubdomain/test-results/unittest/{meta.json ... results_accumulator.log} |83.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut/unittest >> TGroupMapperTest::ReassignGroupTest3dc [GOOD] >> TGroupMapperTest::Block42_1disk [GOOD] >> TKeyValueTest::TestBasicWriteRead |83.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut/unittest >> TGroupMapperTest::Block42_1disk [GOOD] |83.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/ut/scheme/ydb-core-kqp-ut-scheme |83.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/scheme/ydb-core-kqp-ut-scheme |83.1%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/scheme/ydb-core-kqp-ut-scheme >> KqpWorkloadServiceTables::TestLeaseExpiration [GOOD] >> KqpWorkloadServiceTables::TestLeaseUpdates >> KqpWorkloadServiceDistributed::TestDistributedLessConcurrentQueryLimit [GOOD] >> KqpWorkloadServiceSubscriptions::TestResourcePoolSubscription >> TKeyValueTest::TestGetStatusWorksNewApi [GOOD] |83.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_external_data_source_reboots/schemeshard-ut_external_data_source_reboots |83.1%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_external_data_source_reboots/schemeshard-ut_external_data_source_reboots |83.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_external_data_source_reboots/schemeshard-ut_external_data_source_reboots ------- [TM] {asan, default-linux-x86_64, release} ydb/core/keyvalue/ut/unittest >> TKeyValueTest::TestGetStatusWorksNewApi [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:57:2057] recipient: [1:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:57:2057] recipient: [1:52:2097] Leader for TabletID 72057594037927937 is [1:59:2099] sender: [1:60:2057] recipient: [1:52:2097] Leader for TabletID 72057594037927937 is [1:59:2099] sender: [1:77:2057] recipient: [1:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:57:2057] recipient: [2:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:57:2057] recipient: [2:53:2097] Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:60:2057] recipient: [2:53:2097] Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:77:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:57:2057] recipient: [3:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:57:2057] recipient: [3:52:2097] Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:60:2057] recipient: [3:52:2097] Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:77:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:57:2057] recipient: [4:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:57:2057] recipient: [4:53:2097] Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:60:2057] recipient: [4:53:2097] Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:77:2057] recipient: [4:14:2061] !Reboot 72057594037927937 (actor [4:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:79:2057] recipient: [4:38:2085] Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:82:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:83:2057] recipient: [4:81:2112] Leader for TabletID 72057594037927937 is [4:84:2113] sender: [4:85:2057] recipient: [4:81:2112] !Reboot 72057594037927937 (actor [4:59:2099]) rebooted! !Reboot 72057594037927937 (actor [4:59:2099]) tablet resolver refreshed! new actor is[4:84:2113] Leader for TabletID 72057594037927937 is [4:84:2113] sender: [4:170:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:57:2057] recipient: [5:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:57:2057] recipient: [5:53:2097] Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:60:2057] recipient: [5:53:2097] Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:77:2057] recipient: [5:14:2061] !Reboot 72057594037927937 (actor [5:59:2099]) on event NKikimr::TEvKeyValue::TEvGetStorageChannelStatus ! Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:79:2057] recipient: [5:38:2085] Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:82:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:83:2057] recipient: [5:81:2112] Leader for TabletID 72057594037927937 is [5:84:2113] sender: [5:85:2057] recipient: [5:81:2112] !Reboot 72057594037927937 (actor [5:59:2099]) rebooted! !Reboot 72057594037927937 (actor [5:59:2099]) tablet resolver refreshed! new actor is[5:84:2113] Leader for TabletID 72057594037927937 is [5:84:2113] sender: [5:170:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:57:2057] recipient: [6:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:57:2057] recipient: [6:52:2097] Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:60:2057] recipient: [6:52:2097] Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:77:2057] recipient: [6:14:2061] !Reboot 72057594037927937 (actor [6:59:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:80:2057] recipient: [6:38:2085] Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:83:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:84:2057] recipient: [6:82:2112] Leader for TabletID 72057594037927937 is [6:85:2113] sender: [6:86:2057] recipient: [6:82:2112] !Reboot 72057594037927937 (actor [6:59:2099]) rebooted! !Reboot 72057594037927937 (actor [6:59:2099]) tablet resolver refreshed! new actor is[6:85:2113] Leader for TabletID 72057594037927937 is [6:85:2113] sender: [6:171:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:57:2057] recipient: [7:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:57:2057] recipient: [7:53:2097] Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:60:2057] recipient: [7:53:2097] Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:77:2057] recipient: [7:14:2061] >> SystemView::ShowCreateTableColumnAlterColumn [GOOD] >> SystemView::ShowCreateTableColumnUpsertOptions >> TKeyValueTest::TestInlineWriteReadDeleteWithRestartsThenResponseOk [GOOD] >> TKeyValueTest::TestInlineEmptyWriteReadDeleteWithRestartsThenResponseOkNewApi >> TKeyValueTest::TestWriteReadWithRestartsThenResponseOk [GOOD] >> TKeyValueTest::TestWriteReadWhileWriteWorks |83.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/ut/olap/ydb-core-kqp-ut-olap |83.1%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/olap/ydb-core-kqp-ut-olap |83.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/olap/ydb-core-kqp-ut-olap >> KqpWorkloadServiceSubscriptions::TestResourcePoolSubscription [GOOD] >> KqpWorkloadServiceSubscriptions::TestResourcePoolSubscriptionAfterAlter >> TKeyValueTest::TestCopyRangeWorks [GOOD] >> TKeyValueTest::TestCopyRangeWorksNewApi >> TKeyValueTest::TestRenameWorks [GOOD] >> TKeyValueTest::TestRenameToLongKey >> TKeyValueTest::TestInlineWriteReadDeleteWithRestartsThenResponseOkNewApi [GOOD] >> TKeyValueTest::TestInlineWriteReadRangeLimitThenLimitWorks >> KqpPg::ValuesInsert-useSink [GOOD] >> PgCatalog::PgType >> ResourcePoolClassifiersSysView::TestResourcePoolClassifiersSysViewFilters [GOOD] >> KqpPg::TableInsert-useSink [GOOD] >> KqpPg::TempTablesSessionsIsolation |83.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_cdc_stream_reboots/ydb-core-tx-schemeshard-ut_cdc_stream_reboots |83.1%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_cdc_stream_reboots/ydb-core-tx-schemeshard-ut_cdc_stream_reboots |83.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_cdc_stream_reboots/ydb-core-tx-schemeshard-ut_cdc_stream_reboots >> TKeyValueTest::TestWrite200KDeleteThenResponseErrorNewApi [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/workload_service/ut/unittest >> ResourcePoolClassifiersSysView::TestResourcePoolClassifiersSysViewFilters [GOOD] Test command err: 2025-06-25T14:38:48.831147Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519896440419624374:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:48.831197Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0019c7/r3tmp/tmpPDHrVO/pdisk_1.dat 2025-06-25T14:38:49.127217Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 6447, node 1 2025-06-25T14:38:49.190647Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:49.191135Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:49.202879Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:38:49.224931Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:38:49.224954Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:38:49.224964Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:38:49.225134Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:29818 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:38:49.524841Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:38:49.546846Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:38:49.838092Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:38:51.358688Z node 1 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:440: [WorkloadService] [Service] Started workload service initialization 2025-06-25T14:38:51.362387Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:223: SessionId: ydb://session/3?node_id=1&id=YjYwYmEzYjEtNTAwZDE5ODEtMWM4MDI3YjEtNWRmYmYyMWU=, ActorId: [0:0:0], ActorState: unknown state, Create session actor with id YjYwYmEzYjEtNTAwZDE5ODEtMWM4MDI3YjEtNWRmYmYyMWU= 2025-06-25T14:38:51.363000Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:241: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7519896453304526860:2289], Start check tables existence, number paths: 2 2025-06-25T14:38:51.363093Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:227: SessionId: ydb://session/3?node_id=1&id=YjYwYmEzYjEtNTAwZDE5ODEtMWM4MDI3YjEtNWRmYmYyMWU=, ActorId: [1:7519896453304526861:2290], ActorState: unknown state, session actor bootstrapped 2025-06-25T14:38:51.363525Z node 1 :KQP_WORKLOAD_SERVICE TRACE: kqp_workload_service.cpp:125: [WorkloadService] [Service] Updated node info, noode count: 1 2025-06-25T14:38:51.363546Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:100: [WorkloadService] [Service] Subscribed for config changes 2025-06-25T14:38:51.363567Z node 1 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:111: [WorkloadService] [Service] Resource pools was enanbled 2025-06-25T14:38:51.366843Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:182: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7519896453304526860:2289], Describe table /Root/.metadata/workload_manager/delayed_requests status PathErrorUnknown 2025-06-25T14:38:51.366913Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:182: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7519896453304526860:2289], Describe table /Root/.metadata/workload_manager/running_requests status PathErrorUnknown 2025-06-25T14:38:51.366945Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:289: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7519896453304526860:2289], Successfully finished 2025-06-25T14:38:51.366993Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:367: [WorkloadService] [Service] Cleanup completed, tables exists: 0 2025-06-25T14:38:51.389063Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:387: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519896453304526878:2298], DatabaseId: Root, PoolId: sample_pool_id, Start pool creating 2025-06-25T14:38:51.393723Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:38:51.397209Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:429: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519896453304526878:2298], DatabaseId: Root, PoolId: sample_pool_id, Subscribe on create pool tx: 281474976710658 2025-06-25T14:38:51.399479Z node 1 :KQP_WORKLOAD_SERVICE TRACE: scheme_actors.cpp:352: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519896453304526878:2298], DatabaseId: Root, PoolId: sample_pool_id, Tablet to pipe successfully connected 2025-06-25T14:38:51.408996Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519896453304526878:2298], DatabaseId: Root, PoolId: sample_pool_id, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:38:51.489797Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:387: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519896453304526878:2298], DatabaseId: Root, PoolId: sample_pool_id, Start pool creating 2025-06-25T14:38:51.493746Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519896453304526929:2330] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/sample_pool_id\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:38:51.493852Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:480: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519896453304526878:2298], DatabaseId: Root, PoolId: sample_pool_id, Pool successfully created 2025-06-25T14:38:51.502158Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:157: [WorkloadService] [Service] Recieved subscription request, DatabaseId: /Root, PoolId: default 2025-06-25T14:38:51.502193Z node 1 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:561: [WorkloadService] [Service] Creating new database state for id /Root 2025-06-25T14:38:51.502277Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:443: SessionId: ydb://session/3?node_id=1&id=YjYwYmEzYjEtNTAwZDE5ODEtMWM4MDI3YjEtNWRmYmYyMWU=, ActorId: [1:7519896453304526861:2290], ActorState: ReadyState, TraceId: 01jykrdzkdb1zqz7z2q64c8gaw, received request, proxyRequestId: 3 prepared: 0 tx_control: 0 action: QUERY_ACTION_EXECUTE type: QUERY_TYPE_SQL_DDL text: GRANT DESCRIBE SCHEMA ON `/Root` TO `user@test`; GRANT DESCRIBE SCHEMA, SELECT ROW ON `/Root/.metadata/workload_manager/pools/sample_pool_id` TO `user@test`; rpcActor: [0:0:0] database: /Root databaseId: /Root pool id: default 2025-06-25T14:38:51.502293Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:185: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896453304526938:2292], DatabaseId: /Root, PoolId: default, Start pool fetching 2025-06-25T14:38:51.507006Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896453304526938:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:51.507147Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:51.724924Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:38:51.730393Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:38:51.732283Z node 1 :KQP_SESSION INFO: kqp_session_actor.cpp:2528: SessionId: ydb://session/3?node_id=1&id=YjYwYmEzYjEtNTAwZDE5ODEtMWM4MDI3YjEtNWRmYmYyMWU=, ActorId: [1:7519896453304526861:2290], ActorState: ExecuteState, TraceId: 01jykrdzkdb1zqz7z2q64c8gaw, Cleanup start, isFinal: 0 CleanupCtx: 1 TransactionsToBeAborted.size(): 0 WorkerId: [1:7519896453304526939:2290] WorkloadServiceCleanup: 0 2025-06-25T14:38:51.734229Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2589: SessionId: ydb://session/3?node_id=1&id=YjYwYmEzYjEtNTAwZDE5ODEtMWM4MDI3YjEtNWRmYmYyMWU=, ActorId: [1:7519896453304526861:2290], ActorState: CleanupState, TraceId: 01jykrdzkdb1zqz7z2q64c8gaw, EndCleanup, isFinal: 0 2025-06-25T14:38:51.734279Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2325: SessionId: ydb://session/3?node_id=1&id=YjYwYmEzYjEtNTAwZDE5ODEtMWM4MDI3YjEtNWRmYmYyMWU=, ActorId: [1:7519896453304526861:2290], ActorState: CleanupState, TraceId: 01jykrdzkdb1zqz7z2q64c8gaw, Sent query response back to proxy, proxyRequestId: 3, proxyId: [1:7519896440419624583:2237] 2025-06-25T14:38:51.739771Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:223: S ... e_id=9&id=M2NmODUzMWUtMzY2NDEwNjItZDhhZGU1ZGUtNmQ4OTNlY2M=, ActorId: [9:7519896805587071045:2939], ActorState: unknown state, Session actor destroyed 2025-06-25T14:40:13.635473Z node 9 :SCHEME_BOARD_SUBSCRIBER WARN: subscriber.cpp:991: [main][9:7519896762637395301:2612][/Root/test-shared/.metadata/workload_manager/classifiers/resource_pool_classifiers] Sync is incomplete in one of the ring groups: cookie# 42 2025-06-25T14:40:13.635563Z node 9 :SCHEME_BOARD_SUBSCRIBER WARN: subscriber.cpp:991: [main][9:7519896762637395301:2612][/Root/test-shared/.metadata/workload_manager/classifiers/resource_pool_classifiers] Sync is incomplete in one of the ring groups: cookie# 43 2025-06-25T14:40:13.636470Z node 9 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [9:7519896805587071054:2945], status: UNAVAILABLE, issues:
: Error: Table metadata loading, code: 1050
:1:1: Error: Failed to load metadata for table: db.[//Root/test-shared/.metadata/workload_manager/classifiers/resource_pool_classifiers]
: Error: LookupError, code: 2005 2025-06-25T14:40:13.636695Z node 9 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=9&id=MmNkNzFmYWYtMmQ4YTZmMmItZTc1YTAyMy1lZTc0NWRmMw==, ActorId: [9:7519896805587071051:2943], ActorState: ExecuteState, TraceId: 01jykrgfsq87w8y1q9rj00wdx1, ReplyQueryCompileError, status UNAVAILABLE remove tx with tx_id: 2025-06-25T14:40:13.636744Z node 9 :KQP_SESSION INFO: kqp_session_actor.cpp:2528: SessionId: ydb://session/3?node_id=9&id=MmNkNzFmYWYtMmQ4YTZmMmItZTc1YTAyMy1lZTc0NWRmMw==, ActorId: [9:7519896805587071051:2943], ActorState: ExecuteState, TraceId: 01jykrgfsq87w8y1q9rj00wdx1, Cleanup start, isFinal: 0 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-06-25T14:40:13.636770Z node 9 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2589: SessionId: ydb://session/3?node_id=9&id=MmNkNzFmYWYtMmQ4YTZmMmItZTc1YTAyMy1lZTc0NWRmMw==, ActorId: [9:7519896805587071051:2943], ActorState: ExecuteState, TraceId: 01jykrgfsq87w8y1q9rj00wdx1, EndCleanup, isFinal: 0 2025-06-25T14:40:13.636918Z node 9 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2325: SessionId: ydb://session/3?node_id=9&id=MmNkNzFmYWYtMmQ4YTZmMmItZTc1YTAyMy1lZTc0NWRmMw==, ActorId: [9:7519896805587071051:2943], ActorState: ExecuteState, TraceId: 01jykrgfsq87w8y1q9rj00wdx1, Sent query response back to proxy, proxyRequestId: 100, proxyId: [9:7519896749752492516:2171] 2025-06-25T14:40:13.637596Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=request_actor_cb.h:34;event=unexpected reply;response=operation { ready: true status: UNAVAILABLE issues { message: "Table metadata loading" issue_code: 1050 severity: 1 issues { position { row: 1 column: 1 } message: "Failed to load metadata for table: db.[//Root/test-shared/.metadata/workload_manager/classifiers/resource_pool_classifiers]" end_position { row: 1 column: 1 } severity: 1 issues { message: "LookupError" issue_code: 2005 severity: 1 } } } result { [type.googleapis.com/Ydb.Table.ExecuteQueryResult] { tx_meta { } } } } ; 2025-06-25T14:40:13.637984Z node 9 :METADATA_PROVIDER ERROR: log.h:466: accessor_snapshot_base.cpp:16 :cannot construct snapshot: on request failed:
: Error: Table metadata loading, code: 1050
:1:1: Error: Failed to load metadata for table: db.[//Root/test-shared/.metadata/workload_manager/classifiers/resource_pool_classifiers]
: Error: LookupError, code: 2005 2025-06-25T14:40:13.638054Z node 9 :KQP_SESSION INFO: kqp_session_actor.cpp:2370: SessionId: ydb://session/3?node_id=9&id=MmNkNzFmYWYtMmQ4YTZmMmItZTc1YTAyMy1lZTc0NWRmMw==, ActorId: [9:7519896805587071051:2943], ActorState: ReadyState, Session closed due to explicit close event 2025-06-25T14:40:13.638098Z node 9 :KQP_SESSION INFO: kqp_session_actor.cpp:2528: SessionId: ydb://session/3?node_id=9&id=MmNkNzFmYWYtMmQ4YTZmMmItZTc1YTAyMy1lZTc0NWRmMw==, ActorId: [9:7519896805587071051:2943], ActorState: ReadyState, Cleanup start, isFinal: 1 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-06-25T14:40:13.638131Z node 9 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2589: SessionId: ydb://session/3?node_id=9&id=MmNkNzFmYWYtMmQ4YTZmMmItZTc1YTAyMy1lZTc0NWRmMw==, ActorId: [9:7519896805587071051:2943], ActorState: ReadyState, EndCleanup, isFinal: 1 2025-06-25T14:40:13.638167Z node 9 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2601: SessionId: ydb://session/3?node_id=9&id=MmNkNzFmYWYtMmQ4YTZmMmItZTc1YTAyMy1lZTc0NWRmMw==, ActorId: [9:7519896805587071051:2943], ActorState: unknown state, Cleanup temp tables: 0 2025-06-25T14:40:13.638240Z node 9 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2692: SessionId: ydb://session/3?node_id=9&id=MmNkNzFmYWYtMmQ4YTZmMmItZTc1YTAyMy1lZTc0NWRmMw==, ActorId: [9:7519896805587071051:2943], ActorState: unknown state, Session actor destroyed 2025-06-25T14:40:13.677533Z node 9 :SYSTEM_VIEWS WARN: sysview_service.cpp:811: Summary delivery problem: service id# [9:7519896749752492482:2142], processor id# 72075186224037891, database# /Root/test-dedicated 2025-06-25T14:40:13.967925Z node 9 :KQP_SESSION DEBUG: kqp_session_actor.cpp:223: SessionId: ydb://session/3?node_id=9&id=Zjc4MTc5ODktNTYwZTA2NjQtNGE3NGJmY2UtZDQ1MDVjZWI=, ActorId: [0:0:0], ActorState: unknown state, Create session actor with id Zjc4MTc5ODktNTYwZTA2NjQtNGE3NGJmY2UtZDQ1MDVjZWI= 2025-06-25T14:40:13.968416Z node 9 :KQP_SESSION DEBUG: kqp_session_actor.cpp:227: SessionId: ydb://session/3?node_id=9&id=Zjc4MTc5ODktNTYwZTA2NjQtNGE3NGJmY2UtZDQ1MDVjZWI=, ActorId: [9:7519896805587071072:2951], ActorState: unknown state, session actor bootstrapped 2025-06-25T14:40:13.968658Z node 9 :KQP_SESSION DEBUG: kqp_session_actor.cpp:443: SessionId: ydb://session/3?node_id=9&id=Zjc4MTc5ODktNTYwZTA2NjQtNGE3NGJmY2UtZDQ1MDVjZWI=, ActorId: [9:7519896805587071072:2951], ActorState: ReadyState, TraceId: 01jykrgg4g9e7xq69kbreh5y4v, received request, proxyRequestId: 102 prepared: 0 tx_control: 1 action: QUERY_ACTION_EXECUTE type: QUERY_TYPE_SQL_DML text: SELECT * FROM `//Root/test-shared/.metadata/workload_manager/classifiers/resource_pool_classifiers`; rpcActor: [9:7519896805587071073:2952] database: /Root/test-shared databaseId: /Root/test-shared pool id: default 2025-06-25T14:40:13.968694Z node 9 :KQP_SESSION DEBUG: kqp_session_actor.cpp:264: SessionId: ydb://session/3?node_id=9&id=Zjc4MTc5ODktNTYwZTA2NjQtNGE3NGJmY2UtZDQ1MDVjZWI=, ActorId: [9:7519896805587071072:2951], ActorState: ReadyState, TraceId: 01jykrgg4g9e7xq69kbreh5y4v, request placed into pool from cache: default 2025-06-25T14:40:13.968785Z node 9 :KQP_SESSION DEBUG: kqp_session_actor.cpp:575: SessionId: ydb://session/3?node_id=9&id=Zjc4MTc5ODktNTYwZTA2NjQtNGE3NGJmY2UtZDQ1MDVjZWI=, ActorId: [9:7519896805587071072:2951], ActorState: ExecuteState, TraceId: 01jykrgg4g9e7xq69kbreh5y4v, Sending CompileQuery request 2025-06-25T14:40:13.981101Z node 9 :SCHEME_BOARD_SUBSCRIBER WARN: subscriber.cpp:991: [main][9:7519896762637395301:2612][/Root/test-shared/.metadata/workload_manager/classifiers/resource_pool_classifiers] Sync is incomplete in one of the ring groups: cookie# 44 2025-06-25T14:40:13.981200Z node 9 :SCHEME_BOARD_SUBSCRIBER WARN: subscriber.cpp:991: [main][9:7519896762637395301:2612][/Root/test-shared/.metadata/workload_manager/classifiers/resource_pool_classifiers] Sync is incomplete in one of the ring groups: cookie# 45 2025-06-25T14:40:13.982008Z node 9 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [9:7519896805587071075:2953], status: UNAVAILABLE, issues:
: Error: Table metadata loading, code: 1050
:1:1: Error: Failed to load metadata for table: db.[//Root/test-shared/.metadata/workload_manager/classifiers/resource_pool_classifiers]
: Error: LookupError, code: 2005 2025-06-25T14:40:13.982164Z node 9 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=9&id=Zjc4MTc5ODktNTYwZTA2NjQtNGE3NGJmY2UtZDQ1MDVjZWI=, ActorId: [9:7519896805587071072:2951], ActorState: ExecuteState, TraceId: 01jykrgg4g9e7xq69kbreh5y4v, ReplyQueryCompileError, status UNAVAILABLE remove tx with tx_id: 2025-06-25T14:40:13.982198Z node 9 :KQP_SESSION INFO: kqp_session_actor.cpp:2528: SessionId: ydb://session/3?node_id=9&id=Zjc4MTc5ODktNTYwZTA2NjQtNGE3NGJmY2UtZDQ1MDVjZWI=, ActorId: [9:7519896805587071072:2951], ActorState: ExecuteState, TraceId: 01jykrgg4g9e7xq69kbreh5y4v, Cleanup start, isFinal: 0 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-06-25T14:40:13.982214Z node 9 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2589: SessionId: ydb://session/3?node_id=9&id=Zjc4MTc5ODktNTYwZTA2NjQtNGE3NGJmY2UtZDQ1MDVjZWI=, ActorId: [9:7519896805587071072:2951], ActorState: ExecuteState, TraceId: 01jykrgg4g9e7xq69kbreh5y4v, EndCleanup, isFinal: 0 2025-06-25T14:40:13.982308Z node 9 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2325: SessionId: ydb://session/3?node_id=9&id=Zjc4MTc5ODktNTYwZTA2NjQtNGE3NGJmY2UtZDQ1MDVjZWI=, ActorId: [9:7519896805587071072:2951], ActorState: ExecuteState, TraceId: 01jykrgg4g9e7xq69kbreh5y4v, Sent query response back to proxy, proxyRequestId: 102, proxyId: [9:7519896749752492516:2171] 2025-06-25T14:40:13.982897Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=request_actor_cb.h:34;event=unexpected reply;response=operation { ready: true status: UNAVAILABLE issues { message: "Table metadata loading" issue_code: 1050 severity: 1 issues { position { row: 1 column: 1 } message: "Failed to load metadata for table: db.[//Root/test-shared/.metadata/workload_manager/classifiers/resource_pool_classifiers]" end_position { row: 1 column: 1 } severity: 1 issues { message: "LookupError" issue_code: 2005 severity: 1 } } } result { [type.googleapis.com/Ydb.Table.ExecuteQueryResult] { tx_meta { } } } } ; 2025-06-25T14:40:13.983096Z node 9 :METADATA_PROVIDER ERROR: log.h:466: accessor_snapshot_base.cpp:16 :cannot construct snapshot: on request failed:
: Error: Table metadata loading, code: 1050
:1:1: Error: Failed to load metadata for table: db.[//Root/test-shared/.metadata/workload_manager/classifiers/resource_pool_classifiers]
: Error: LookupError, code: 2005 2025-06-25T14:40:13.983180Z node 9 :KQP_SESSION INFO: kqp_session_actor.cpp:2370: SessionId: ydb://session/3?node_id=9&id=Zjc4MTc5ODktNTYwZTA2NjQtNGE3NGJmY2UtZDQ1MDVjZWI=, ActorId: [9:7519896805587071072:2951], ActorState: ReadyState, Session closed due to explicit close event 2025-06-25T14:40:13.983205Z node 9 :KQP_SESSION INFO: kqp_session_actor.cpp:2528: SessionId: ydb://session/3?node_id=9&id=Zjc4MTc5ODktNTYwZTA2NjQtNGE3NGJmY2UtZDQ1MDVjZWI=, ActorId: [9:7519896805587071072:2951], ActorState: ReadyState, Cleanup start, isFinal: 1 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-06-25T14:40:13.983219Z node 9 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2589: SessionId: ydb://session/3?node_id=9&id=Zjc4MTc5ODktNTYwZTA2NjQtNGE3NGJmY2UtZDQ1MDVjZWI=, ActorId: [9:7519896805587071072:2951], ActorState: ReadyState, EndCleanup, isFinal: 1 2025-06-25T14:40:13.983236Z node 9 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2601: SessionId: ydb://session/3?node_id=9&id=Zjc4MTc5ODktNTYwZTA2NjQtNGE3NGJmY2UtZDQ1MDVjZWI=, ActorId: [9:7519896805587071072:2951], ActorState: unknown state, Cleanup temp tables: 0 2025-06-25T14:40:13.983278Z node 9 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2692: SessionId: ydb://session/3?node_id=9&id=Zjc4MTc5ODktNTYwZTA2NjQtNGE3NGJmY2UtZDQ1MDVjZWI=, ActorId: [9:7519896805587071072:2951], ActorState: unknown state, Session actor destroyed |83.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_export_reboots_s3/ydb-core-tx-schemeshard-ut_export_reboots_s3 |83.1%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_export_reboots_s3/ydb-core-tx-schemeshard-ut_export_reboots_s3 |83.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_export_reboots_s3/ydb-core-tx-schemeshard-ut_export_reboots_s3 |83.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_serverless_reboots/ydb-core-tx-schemeshard-ut_serverless_reboots |83.1%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_serverless_reboots/ydb-core-tx-schemeshard-ut_serverless_reboots |83.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_serverless_reboots/ydb-core-tx-schemeshard-ut_serverless_reboots |83.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_base_reboots/ydb-core-tx-schemeshard-ut_base_reboots |83.1%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_base_reboots/ydb-core-tx-schemeshard-ut_base_reboots |83.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_base_reboots/ydb-core-tx-schemeshard-ut_base_reboots >> TKeyValueTest::TestWriteReadDeleteWithRestartsThenResponseOkWithNewApi [GOOD] |83.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_sysview_reboots/ydb-core-tx-schemeshard-ut_sysview_reboots |83.1%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_sysview_reboots/ydb-core-tx-schemeshard-ut_sysview_reboots |83.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_sysview_reboots/ydb-core-tx-schemeshard-ut_sysview_reboots |83.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_reboots/ydb-core-tx-schemeshard-ut_reboots |83.1%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_reboots/ydb-core-tx-schemeshard-ut_reboots |83.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_reboots/ydb-core-tx-schemeshard-ut_reboots >> TKeyValueTest::TestConcatWorks [GOOD] >> TKeyValueTest::TestConcatWorksNewApi ------- [TM] {asan, default-linux-x86_64, release} ydb/core/keyvalue/ut/unittest >> TKeyValueTest::TestWriteReadDeleteWithRestartsThenResponseOkWithNewApi [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:57:2057] recipient: [2:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:57:2057] recipient: [2:53:2097] Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:60:2057] recipient: [2:53:2097] Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:77:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:57:2057] recipient: [3:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:57:2057] recipient: [3:52:2097] Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:60:2057] recipient: [3:52:2097] Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:77:2057] recipient: [3:14:2061] !Reboot 72057594037927937 (actor [3:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:79:2057] recipient: [3:38:2085] Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:82:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:83:2057] recipient: [3:81:2112] Leader for TabletID 72057594037927937 is [3:84:2113] sender: [3:85:2057] recipient: [3:81:2112] !Reboot 72057594037927937 (actor [3:59:2099]) rebooted! !Reboot 72057594037927937 (actor [3:59:2099]) tablet resolver refreshed! new actor is[3:84:2113] Leader for TabletID 72057594037927937 is [3:84:2113] sender: [3:170:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:57:2057] recipient: [4:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:57:2057] recipient: [4:53:2097] Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:60:2057] recipient: [4:53:2097] Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:77:2057] recipient: [4:14:2061] !Reboot 72057594037927937 (actor [4:59:2099]) on event NKikimr::TEvKeyValue::TEvExecuteTransaction ! Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:79:2057] recipient: [4:38:2085] Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:82:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:83:2057] recipient: [4:81:2112] Leader for TabletID 72057594037927937 is [4:84:2113] sender: [4:85:2057] recipient: [4:81:2112] !Reboot 72057594037927937 (actor [4:59:2099]) rebooted! !Reboot 72057594037927937 (actor [4:59:2099]) tablet resolver refreshed! new actor is[4:84:2113] Leader for TabletID 72057594037927937 is [4:84:2113] sender: [4:170:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:57:2057] recipient: [5:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:57:2057] recipient: [5:53:2097] Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:60:2057] recipient: [5:53:2097] Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:77:2057] recipient: [5:14:2061] !Reboot 72057594037927937 (actor [5:59:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:80:2057] recipient: [5:38:2085] Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:83:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:84:2057] recipient: [5:82:2112] Leader for TabletID 72057594037927937 is [5:85:2113] sender: [5:86:2057] recipient: [5:82:2112] !Reboot 72057594037927937 (actor [5:59:2099]) rebooted! !Reboot 72057594037927937 (actor [5:59:2099]) tablet resolver refreshed! new actor is[5:85:2113] Leader for TabletID 72057594037927937 is [5:85:2113] sender: [5:171:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:57:2057] recipient: [6:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:57:2057] recipient: [6:52:2097] Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:60:2057] recipient: [6:52:2097] Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:77:2057] recipient: [6:14:2061] !Reboot 72057594037927937 (actor [6:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:83:2057] recipient: [6:38:2085] Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:86:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:87:2057] recipient: [6:85:2115] Leader for TabletID 72057594037927937 is [6:88:2116] sender: [6:89:2057] recipient: [6:85:2115] !Reboot 72057594037927937 (actor [6:59:2099]) rebooted! !Reboot 72057594037927937 (actor [6:59:2099]) tablet resolver refreshed! new actor is[6:88:2116] Leader for TabletID 72057594037927937 is [6:88:2116] sender: [6:174:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:57:2057] recipient: [7:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:57:2057] recipient: [7:53:2097] Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:60:2057] recipient: [7:53:2097] Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:77:2057] recipient: [7:14:2061] !Reboot 72057594037927937 (actor [7:59:2099]) on event NKikimr::TEvKeyValue::TEvRead ! Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:83:2057] recipient: [7:38:2085] Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:86:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:87:2057] recipient: [7:85:2115] Leader for TabletID 72057594037927937 is [7:88:2116] sender: [7:89:2057] recipient: [7:85:2115] !Reboot 72057594037927937 (actor [7:59:2099]) rebooted! !Reboot 72057594037927937 (actor [7:59:2099]) tablet resolver refreshed! new actor is[7:88:2116] Leader for TabletID 72057594037927937 is [7:88:2116] sender: [7:174:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:57:2057] recipient: [8:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:57:2057] recipient: [8:54:2097] Leader for TabletID 72057594037927937 is [8:59:2099] sender: [8:60:2057] recipient: [8:54:2097] Leader for TabletID 72057594037927937 is [8:59:2099] sender: [8:77:2057] recipient: [8:14:2061] !Reboot 72057594037927937 (actor [8:59:2099]) on event NKikimr::TEvKeyValue::TEvNotify ! Leader for TabletID 72057594037927937 is [8:59:2099] sender: [8:84:2057] recipient: [8:38:2085] Leader for TabletID 72057594037927937 is [8:59:2099] sender: [8:87:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [8:59:2099] sender: [8:88:2057] recipient: [8:86:2115] Leader for TabletID 72057594037927937 is [8:89:2116] sender: [8:90:2057] recipient: [8:86:2115] !Reboot 72057594037927937 (actor [8:59:2099]) rebooted! !Reboot 72057594037927937 (actor [8:59:2099]) tablet resolver refreshed! new actor is[8:89:2116] Leader for TabletID 72057594037927937 is [8:89:2116] sender: [8:107:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:57:2057] recipient: [9:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:57:2057] recipient: [9:53:2097] Leader for TabletID 72057594037927937 is [9:59:2099] sender: [9:60:2057] recipient: [9:53:2097] Leader for TabletID 72057594037927937 is [9:59:2099] sender: [9:77:2057] recipient: [9:14:2061] !Reboot 72057594037927937 (actor [9:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [9:59:2099] sender: [9:86:2057] recipient: [9:38:2085] Leader for TabletID 72057594037927937 is [9:59:2099] sender: [9:88:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [9:59:2099] sender: [9:90:2057] recipient: [9:89:2117] Leader for TabletID 72057594037927937 is [9:91:2118] sender: [9:92:2057] recipient: [9:89:2117] !Reboot 72057594037927937 (actor [9:59:2099]) rebooted! !Reboot 72057594037927937 (actor [9:59:2099]) tablet resolver refreshed! new actor is[9:91:2118] Leader for TabletID 72057594037927937 is [9:91:2118] sender: [9:177:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:57:2057] recipient: [10:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:57:2057] recipient: [10:54:2097] Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:60:2057] recipient: [10:54:2097] Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:77:2057] recipient: [10:14:2061] !Reboot 72057594037927937 (actor [10:59:2099]) on event NKikimr::TEvKeyValue::TEvExecuteTransaction ! Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:86:2057] recipient: [10:38:2085] Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:89:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:90:2057] recipient: [10:88:2117] Leader for TabletID 72057594037927937 is [10:91:2118] sender: [10:92:2057] recipient: [10:88:2117] !Reboot 72057594037927937 (actor [10:59:2099]) rebooted! !Reboot 72057594037927937 (actor [10:59:2099]) tablet resolver refreshed! new actor is[10:91:2118] Leader for TabletID 72057594037927937 is [10:91:2118] sender: [10:177:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:57:2057] recipient: [11:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:57:2057] recipient: [11:53:2097] Leader for TabletID 72057594037927937 is [11:59:2099] sender: [11:60:2057] recipient: [11:53:2097] Leader for TabletID 72057594037927937 is [11:59:2099] sender: [11:77:2057] recipient: [11:14:2061] !Reboot 72057594037927937 (actor [11:59:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [11:59:2099] sender: [11:87:2057] recipient: [11:38:2085] Leader for TabletID 72057594037927937 is [11:59:2099] sender: [11:90:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [11:59:2099] sender: [11:91:2057] recipient: [11:89:2117] Leader for TabletID 72057594037927937 is [11:92:2118] sender: [11:93:2057] recipient: [11:89:2117] !Reboot 72057594037927937 (actor [11:59:2099]) rebooted! !Reboot 72057594037927937 (actor [11:59:2099]) tablet resolver refreshed! new actor is[11:92:2118] Leader for TabletID 72057594037927937 is [11:92:2118] sender: [11:178:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:57:2057] recipient: [12:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:57:2057] recipient: [12:54:2097] Leader for TabletID 72057594037927937 is [12:59:2099] sender: [12:60:2057] recipient: [12:54:2097] Leader for TabletID 72057594037927937 is [12:59:2099] sender: [12:77:2057] recipient: [12:14:2061] !Reboot 72057594037927937 (actor [12:59:2099]) on event NKikimr::TEvKeyValue::TEvCollect ! Leader for TabletID 72057594037927937 is [12:59:2099] sender: [12:88:2057] recipient: [12:38:2085] Leader for TabletID 72057594037927937 is [12:59:2099] sender: [12:91:2057] recipient: [12:14:2061] Leader for TabletID 72057594037927937 is [12:59:2099] sender: [12:92:2057] recipient: [12:90:2118] Leader for TabletID 72057594037927937 is [12:93:2119] sender: [12:94:2057] recipient: [12:90:2118] !Reboot 72057594037927937 (actor [12:59:2099]) rebooted! !Reboot 72057594037927937 (actor [12:59:2099]) tablet resolver refreshed! new actor is[12:93:2119] Leader for TabletID 72057594037927937 is [12:93:2119] sender: [12:113:2057] recipient: [12:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [13:57:2057] recipient: [13:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [13:57:2057] recipient: [13:52:2097] Leader for TabletID 72057594037927937 is [13:59:2099] sender: [13:60:2057] recipient: [13:52:2097] Leader for TabletID 72057594037927937 is [13:59:2099] sender: [13:77:2057] recipient: [13:14:2061] !Reboot 72057594037927937 (actor [13:59:2099]) on event NKikimr::TEvKeyValue::TEvCompleteGC ! Leader for TabletID 72057594037927937 is [13:59:2099] sender: [13:89:2057] recipient: [13:38:2085] Leader for TabletID 72057594037927937 is [13:59:2099] sender: [13:92:2057] recipient: [13:14:2061] Leader for TabletID 72057594037927937 is [13:59:2099] sender: [13:93:2057] recipient: [13:91:2119] Leader for TabletID 72057594037927937 is [13:94:2120] sender: [13:95:2057] recipient: [13:91:2119] !Reboot 72057594037927937 (actor [13:59:2099]) rebooted! !Reboot 72057594037927937 (actor [13:59:2099]) tablet resolver refreshed! new actor is[13:94:2120] Leader for TabletID 72057594037927937 is [13:94:2120] sender: [13:114:2057] recipient: [13:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [14:57:2057] recipient: [14:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [14:57:2057] recipient: [14:54:2097] Leader for TabletID 72057594037927937 is [14:59:2099] sender: [14:60:2057] recipient: [14:54:2097] Leader for TabletID 72057594037927937 is [14:59:2099] sender: [14:77:2057] recipient: [14:14:2061] !Reboot 72057594037927937 (actor [14:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [14:59:2099] sender: [14:92:2057] recipient: [14:38:2085] Leader for TabletID 72057594037927937 is [14:59:2099] sender: [14:95:2057] recipient: [14:14:2061] Leader for TabletID 72057594037927937 is [14:59:2099] sender: [14:96:2057] recipient: [14:94:2122] Leader for TabletID 72057594037927937 is [14:97:2123] sender: [14:98:2057] recipient: [14:94:2122] !Reboot 72057594037927937 (actor [14:59:2099]) rebooted! !Reboot 72057594037927937 (actor [14:59:2099]) tablet resolver refreshed! new actor is[14:97:2123] Leader for TabletID 72057594037927937 is [14:97:2123] sender: [14:183:2057] recipient: [14:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [15:57:2057] recipient: [15:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [15:57:2057] recipient: [15:53:2097] Leader for TabletID 72057594037927937 is [15:59:2099] sender: [15:60:2057] recipient: [15:53:2097] Leader for TabletID 72057594037927937 is [15:59:2099] sender: [15:77:2057] recipient: [15:14:2061] !Reboot 72057594037927937 (actor [15:59:2099]) on event NKikimr::TEvKeyValue::TEvRead ! Leader for TabletID 72057594037927937 is [15:59:2099] sender: [15:92:2057] recipient: [15:38:2085] Leader for TabletID 72057594037927937 is [15:59:2099] sender: [15:95:2057] recipient: [15:14:2061] Leader for TabletID 72057594037927937 is [15:59:2099] sender: [15:96:2057] recipient: [15:94:2122] Leader for TabletID 72057594037927937 is [15:97:2123] sender: [15:98:2057] recipient: [15:94:2122] !Reboot 72057594037927937 (actor [15:59:2099]) rebooted! !Reboot 72057594037927937 (actor [15:59:2099]) tablet resolver refreshed! new actor is[15:97:2123] Leader for TabletID 72057594037927937 is [15:97:2123] sender: [15:183:2057] recipient: [15:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [16:57:2057] recipient: [16:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [16:57:2057] recipient: [16:53:2097] Leader for TabletID 72057594037927937 is [16:59:2099] sender: [16:60:2057] recipient: [16:53:2097] Leader for TabletID 72057594037927937 is [16:59:2099] sender: [16:77:2057] recipient: [16:14:2061] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/keyvalue/ut/unittest >> TKeyValueTest::TestWrite200KDeleteThenResponseErrorNewApi [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:57:2057] recipient: [1:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:57:2057] recipient: [1:52:2097] Leader for TabletID 72057594037927937 is [1:59:2099] sender: [1:60:2057] recipient: [1:52:2097] Leader for TabletID 72057594037927937 is [1:59:2099] sender: [1:77:2057] recipient: [1:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:57:2057] recipient: [2:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:57:2057] recipient: [2:53:2097] Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:60:2057] recipient: [2:53:2097] Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:77:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:57:2057] recipient: [3:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:57:2057] recipient: [3:52:2097] Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:60:2057] recipient: [3:52:2097] Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:77:2057] recipient: [3:14:2061] 2025-06-25T14:40:06.144736Z node 3 :KEYVALUE ERROR: keyvalue_state.cpp:3023: KeyValue# 72057594037927937 PrepareExecuteTransactionRequest return flase, Marker# KV73 Submsg# KeyValue# 72057594037927937 Can't delete Range, in DeleteRange, total limit of deletions per request (100000) reached, Marker# KV90 Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:57:2057] recipient: [4:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:57:2057] recipient: [4:53:2097] Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:60:2057] recipient: [4:53:2097] Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:77:2057] recipient: [4:14:2061] 2025-06-25T14:40:14.667866Z node 4 :KEYVALUE ERROR: keyvalue_state.cpp:3023: KeyValue# 72057594037927937 PrepareExecuteTransactionRequest return flase, Marker# KV73 Submsg# KeyValue# 72057594037927937 Can't delete Range, in DeleteRange, total limit of deletions per request (100000) reached, Marker# KV90 >> KqpWorkloadServiceSubscriptions::TestResourcePoolSubscriptionAfterAlter [GOOD] >> KqpWorkloadServiceSubscriptions::TestResourcePoolSubscriptionAfterAclChange |83.1%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tests/fq/control_plane_storage/ydb-tests-fq-control_plane_storage |83.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/fq/control_plane_storage/ydb-tests-fq-control_plane_storage |83.2%| [LD] {RESULT} $(B)/ydb/tests/fq/control_plane_storage/ydb-tests-fq-control_plane_storage |83.2%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/ut/scan/ydb-core-kqp-ut-scan |83.2%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/scan/ydb-core-kqp-ut-scan |83.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/scan/ydb-core-kqp-ut-scan >> TKeyValueTest::TestWriteReadWithRestartsThenResponseOkNewApi [GOOD] >> TKeyValueTest::TestWriteToExtraChannelThenReadMixedChannelsReturnsOk >> TKeyValueTest::TestBasicWriteRead [GOOD] >> TKeyValueTest::TestBasicWriteReadOverrun >> PgCatalog::PgType [GOOD] >> PgCatalog::InformationSchema >> TKeyValueTest::TestEmptyWriteReadDeleteWithRestartsThenResponseOk >> KqpPg::TempTablesSessionsIsolation [GOOD] >> KqpPg::TempTablesDrop >> TKeyValueTest::TestInlineEmptyWriteReadDeleteWithRestartsThenResponseOk [GOOD] >> TKeyValueTest::TestInlineCopyRangeWorks >> KqpWorkloadServiceSubscriptions::TestResourcePoolSubscriptionAfterAclChange [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/workload_service/ut/unittest >> KqpWorkloadServiceSubscriptions::TestResourcePoolSubscriptionAfterAclChange [GOOD] Test command err: 2025-06-25T14:38:49.943462Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519896445659095491:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:49.943522Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:38:49.995288Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519896446071476820:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:49.995874Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001940/r3tmp/tmpAtfhTO/pdisk_1.dat 2025-06-25T14:38:50.374386Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:50.374505Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:50.378312Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T14:38:50.380462Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:38:50.405329Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:38:50.414587Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:50.414662Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 13079, node 1 2025-06-25T14:38:50.424395Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:38:50.427779Z node 1 :GRPC_SERVER WARN: grpc_request_proxy.cpp:529: SchemeBoardDelete /Root Strong=0 2025-06-25T14:38:50.427824Z node 1 :GRPC_SERVER WARN: grpc_request_proxy.cpp:529: SchemeBoardDelete /Root Strong=0 2025-06-25T14:38:50.463325Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:38:50.463353Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:38:50.463359Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:38:50.463507Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:30185 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:38:50.802390Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:38:50.972530Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:38:51.005241Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:38:52.982805Z node 1 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:440: [WorkloadService] [Service] Started workload service initialization 2025-06-25T14:38:52.983017Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:100: [WorkloadService] [Service] Subscribed for config changes 2025-06-25T14:38:52.983044Z node 1 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:111: [WorkloadService] [Service] Resource pools was enanbled 2025-06-25T14:38:52.983996Z node 1 :KQP_WORKLOAD_SERVICE TRACE: kqp_workload_service.cpp:125: [WorkloadService] [Service] Updated node info, noode count: 2 2025-06-25T14:38:52.984083Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:241: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7519896458543998206:2294], Start check tables existence, number paths: 2 2025-06-25T14:38:52.985911Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:182: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7519896458543998206:2294], Describe table /Root/.metadata/workload_manager/delayed_requests status PathErrorUnknown 2025-06-25T14:38:52.985974Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:182: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7519896458543998206:2294], Describe table /Root/.metadata/workload_manager/running_requests status PathErrorUnknown 2025-06-25T14:38:52.986010Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:289: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7519896458543998206:2294], Successfully finished 2025-06-25T14:38:52.986065Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:367: [WorkloadService] [Service] Cleanup completed, tables exists: 0 2025-06-25T14:38:52.986187Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:223: SessionId: ydb://session/3?node_id=1&id=OTc2ZjA5NC1hM2U0ZWY5My0zNTYwOWM1NS1hZDliODdl, ActorId: [0:0:0], ActorState: unknown state, Create session actor with id OTc2ZjA5NC1hM2U0ZWY5My0zNTYwOWM1NS1hZDliODdl 2025-06-25T14:38:52.986270Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:227: SessionId: ydb://session/3?node_id=1&id=OTc2ZjA5NC1hM2U0ZWY5My0zNTYwOWM1NS1hZDliODdl, ActorId: [1:7519896458543998222:2295], ActorState: unknown state, session actor bootstrapped 2025-06-25T14:38:52.995778Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:387: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519896458543998224:2520], DatabaseId: Root, PoolId: sample_pool_id, Start pool creating 2025-06-25T14:38:52.999360Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:38:53.002047Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:429: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519896458543998224:2520], DatabaseId: Root, PoolId: sample_pool_id, Subscribe on create pool tx: 281474976710658 2025-06-25T14:38:53.002189Z node 1 :KQP_WORKLOAD_SERVICE TRACE: scheme_actors.cpp:352: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519896458543998224:2520], DatabaseId: Root, PoolId: sample_pool_id, Tablet to pipe successfully connected 2025-06-25T14:38:53.015483Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519896458543998224:2520], DatabaseId: Root, PoolId: sample_pool_id, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:38:53.089416Z node 2 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:440: [WorkloadService] [Service] Started workload service initialization 2025-06-25T14:38:53.097953Z node 2 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:241: [WorkloadService] [TCleanupTablesActor] ActorId: [2:7519896463251346255:2267], Start check tables existence, number paths: 2 2025-06-25T14:38:53.098493Z node 2 :KQP_WORKLOAD_SERVICE TRACE: kqp_workload_service.cpp:125: [WorkloadService] [Service] Updated node info, noode count: 2 2025-06-25T14:38:53.098534Z node 2 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:100: [WorkloadService] [Service] Subscribed for config changes 2025-06-25T14:38:53.098565Z node 2 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:111: [WorkloadService] [Service] Resource pools was enanbled 2025-06-25T14:38:53.099893Z node 2 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:182: [WorkloadService] [TCleanupTablesActor] ActorId: [2:7519896463251346255:2267], Describe table /Root/.metadata/workload_manager/delayed_requests status PathErrorUnknown 2025-06-25T14:38:53.099933Z node 2 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:182: [WorkloadService] [TCleanupTablesActor] ActorId: [2:7519896463251346255:2267], Describe table /Root/.metadata/workload_manager/running_requests status PathErrorUnknown 2025-06-25T14:38:53.099953Z node 2 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:289: [WorkloadService] [TCleanupTablesActor] ActorId: [2:7519896463251346255:2267], Successfully finished 2025-06-25T14:38:53.100104Z node 2 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:367: [WorkloadService] [Service] Cleanup completed, tables exists: 0 2025-06-25T14:38:53.103355Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:387: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519896458543998224:2520], DatabaseId: Root, PoolId: sample_pool_id, Start pool creating 2025-06-25T14:38:53.107860Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519896462838965597:2578] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/sample_pool_id\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:38:53.107954Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:480: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519896458543998224:2520], DatabaseId: Root, PoolId: sample_pool_id, Pool successfully created 2025-06-25T14:38:53.110218Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:223: SessionId: ydb://session/3?node_id=1&id=YWY5NThkN2QtZDI0NWVhMjYtMWFhZGZkNDctYjYwOTI0YQ==, ActorId: [0:0:0], ActorState: unknown state, Create session actor with id YWY5NThkN2QtZDI0NWVhMjYtMWFhZGZkNDctYjYwOTI0YQ== 2025-06-25T14:38:53.110317Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:227: SessionId: ydb://session/3?node_id=1&id=YWY5NThkN2QtZDI0NWVhMjYtMWFhZGZkNDctYjYwOTI0YQ==, ActorId: [1:7519896462838965607:2296], ActorState: unknown state, session actor bootstrapped 2025-06-25T14:38:53.110502Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:157: [WorkloadService] [Service] Recieved subscription request, DatabaseId: /Root, PoolId: sample_pool_id 2025-06-25T14:38:53.110519Z node 1 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:561: [WorkloadService] [Service] Creating new databa ... cated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:40:17.517735Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:40:18.132246Z node 12 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:40:20.206743Z node 12 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:440: [WorkloadService] [Service] Started workload service initialization 2025-06-25T14:40:20.206976Z node 12 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:100: [WorkloadService] [Service] Subscribed for config changes 2025-06-25T14:40:20.206997Z node 12 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:111: [WorkloadService] [Service] Resource pools was enanbled 2025-06-25T14:40:20.207031Z node 12 :KQP_WORKLOAD_SERVICE TRACE: kqp_workload_service.cpp:125: [WorkloadService] [Service] Updated node info, noode count: 1 2025-06-25T14:40:20.207066Z node 12 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:241: [WorkloadService] [TCleanupTablesActor] ActorId: [12:7519896835521507131:2290], Start check tables existence, number paths: 2 2025-06-25T14:40:20.212787Z node 12 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:182: [WorkloadService] [TCleanupTablesActor] ActorId: [12:7519896835521507131:2290], Describe table /Root/.metadata/workload_manager/delayed_requests status PathErrorUnknown 2025-06-25T14:40:20.212883Z node 12 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:182: [WorkloadService] [TCleanupTablesActor] ActorId: [12:7519896835521507131:2290], Describe table /Root/.metadata/workload_manager/running_requests status PathErrorUnknown 2025-06-25T14:40:20.212939Z node 12 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:289: [WorkloadService] [TCleanupTablesActor] ActorId: [12:7519896835521507131:2290], Successfully finished 2025-06-25T14:40:20.213011Z node 12 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:367: [WorkloadService] [Service] Cleanup completed, tables exists: 0 2025-06-25T14:40:20.215393Z node 12 :KQP_SESSION DEBUG: kqp_session_actor.cpp:223: SessionId: ydb://session/3?node_id=12&id=ZjE3YTg4MWQtZTBiNTU2YTUtNGY5OTBkNmQtZWNjMGIzYTU=, ActorId: [0:0:0], ActorState: unknown state, Create session actor with id ZjE3YTg4MWQtZTBiNTU2YTUtNGY5OTBkNmQtZWNjMGIzYTU= 2025-06-25T14:40:20.215472Z node 12 :KQP_SESSION DEBUG: kqp_session_actor.cpp:227: SessionId: ydb://session/3?node_id=12&id=ZjE3YTg4MWQtZTBiNTU2YTUtNGY5OTBkNmQtZWNjMGIzYTU=, ActorId: [12:7519896835521507155:2291], ActorState: unknown state, session actor bootstrapped 2025-06-25T14:40:20.216803Z node 12 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:387: [WorkloadService] [TPoolCreatorActor] ActorId: [12:7519896835521507157:2297], DatabaseId: Root, PoolId: sample_pool_id, Start pool creating 2025-06-25T14:40:20.220023Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:40:20.221175Z node 12 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:429: [WorkloadService] [TPoolCreatorActor] ActorId: [12:7519896835521507157:2297], DatabaseId: Root, PoolId: sample_pool_id, Subscribe on create pool tx: 281474976715658 2025-06-25T14:40:20.221292Z node 12 :KQP_WORKLOAD_SERVICE TRACE: scheme_actors.cpp:352: [WorkloadService] [TPoolCreatorActor] ActorId: [12:7519896835521507157:2297], DatabaseId: Root, PoolId: sample_pool_id, Tablet to pipe successfully connected 2025-06-25T14:40:20.228125Z node 12 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [12:7519896835521507157:2297], DatabaseId: Root, PoolId: sample_pool_id, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:40:20.289804Z node 12 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:387: [WorkloadService] [TPoolCreatorActor] ActorId: [12:7519896835521507157:2297], DatabaseId: Root, PoolId: sample_pool_id, Start pool creating 2025-06-25T14:40:20.292687Z node 12 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [12:7519896835521507208:2329] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/sample_pool_id\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:40:20.292772Z node 12 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:480: [WorkloadService] [TPoolCreatorActor] ActorId: [12:7519896835521507157:2297], DatabaseId: Root, PoolId: sample_pool_id, Pool successfully created 2025-06-25T14:40:20.293032Z node 12 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:157: [WorkloadService] [Service] Recieved subscription request, DatabaseId: Root, PoolId: sample_pool_id 2025-06-25T14:40:20.293055Z node 12 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:561: [WorkloadService] [Service] Creating new database state for id Root 2025-06-25T14:40:20.293130Z node 12 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:185: [WorkloadService] [TPoolFetcherActor] ActorId: [12:7519896835521507215:2292], DatabaseId: Root, PoolId: sample_pool_id, Start pool fetching 2025-06-25T14:40:20.294687Z node 12 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:223: [WorkloadService] [TPoolFetcherActor] ActorId: [12:7519896835521507215:2292], DatabaseId: Root, PoolId: sample_pool_id, Pool info successfully fetched 2025-06-25T14:40:20.294766Z node 12 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:253: [WorkloadService] [Service] Successfully fetched pool sample_pool_id, DatabaseId: Root 2025-06-25T14:40:20.294796Z node 12 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:571: [WorkloadService] [Service] Creating new handler for pool /Root/sample_pool_id 2025-06-25T14:40:20.294997Z node 12 :KQP_WORKLOAD_SERVICE DEBUG: pool_handlers_actors.cpp:466: [WorkloadService] [TPoolHandlerActorBase] ActorId: [12:7519896835521507224:2293], DatabaseId: Root, PoolId: sample_pool_id, Subscribed on schemeboard notifications for path: [OwnerId: 72057594046644480, LocalPathId: 5] 2025-06-25T14:40:20.296111Z node 12 :KQP_WORKLOAD_SERVICE DEBUG: pool_handlers_actors.cpp:274: [WorkloadService] [TPoolHandlerActorBase] ActorId: [12:7519896835521507224:2293], DatabaseId: Root, PoolId: sample_pool_id, Got watch notification 2025-06-25T14:40:20.301823Z node 12 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:157: [WorkloadService] [Service] Recieved subscription request, DatabaseId: /Root, PoolId: default 2025-06-25T14:40:20.301846Z node 12 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:561: [WorkloadService] [Service] Creating new database state for id /Root 2025-06-25T14:40:20.301885Z node 12 :KQP_SESSION DEBUG: kqp_session_actor.cpp:443: SessionId: ydb://session/3?node_id=12&id=ZjE3YTg4MWQtZTBiNTU2YTUtNGY5OTBkNmQtZWNjMGIzYTU=, ActorId: [12:7519896835521507155:2291], ActorState: ReadyState, TraceId: 01jykrgpad07n2rrs62p1sqt8e, received request, proxyRequestId: 3 prepared: 0 tx_control: 0 action: QUERY_ACTION_EXECUTE type: QUERY_TYPE_SQL_DDL text: GRANT ALL ON `/Root/.metadata/workload_manager/pools/sample_pool_id` TO `test@user`; rpcActor: [0:0:0] database: /Root databaseId: /Root pool id: default 2025-06-25T14:40:20.301895Z node 12 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:185: [WorkloadService] [TPoolFetcherActor] ActorId: [12:7519896835521507236:2295], DatabaseId: /Root, PoolId: default, Start pool fetching 2025-06-25T14:40:20.303357Z node 12 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [12:7519896835521507236:2295], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:40:20.303457Z node 12 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:40:20.314939Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:40:20.316597Z node 12 :KQP_SESSION INFO: kqp_session_actor.cpp:2528: SessionId: ydb://session/3?node_id=12&id=ZjE3YTg4MWQtZTBiNTU2YTUtNGY5OTBkNmQtZWNjMGIzYTU=, ActorId: [12:7519896835521507155:2291], ActorState: ExecuteState, TraceId: 01jykrgpad07n2rrs62p1sqt8e, Cleanup start, isFinal: 0 CleanupCtx: 1 TransactionsToBeAborted.size(): 0 WorkerId: [12:7519896835521507244:2291] WorkloadServiceCleanup: 0 2025-06-25T14:40:20.317758Z node 12 :KQP_WORKLOAD_SERVICE DEBUG: pool_handlers_actors.cpp:274: [WorkloadService] [TPoolHandlerActorBase] ActorId: [12:7519896835521507224:2293], DatabaseId: Root, PoolId: sample_pool_id, Got watch notification 2025-06-25T14:40:20.318399Z node 12 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2589: SessionId: ydb://session/3?node_id=12&id=ZjE3YTg4MWQtZTBiNTU2YTUtNGY5OTBkNmQtZWNjMGIzYTU=, ActorId: [12:7519896835521507155:2291], ActorState: CleanupState, TraceId: 01jykrgpad07n2rrs62p1sqt8e, EndCleanup, isFinal: 0 2025-06-25T14:40:20.318436Z node 12 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2325: SessionId: ydb://session/3?node_id=12&id=ZjE3YTg4MWQtZTBiNTU2YTUtNGY5OTBkNmQtZWNjMGIzYTU=, ActorId: [12:7519896835521507155:2291], ActorState: CleanupState, TraceId: 01jykrgpad07n2rrs62p1sqt8e, Sent query response back to proxy, proxyRequestId: 3, proxyId: [12:7519896822636604798:2188] 2025-06-25T14:40:20.325548Z node 12 :KQP_SESSION INFO: kqp_session_actor.cpp:2370: SessionId: ydb://session/3?node_id=12&id=ZjE3YTg4MWQtZTBiNTU2YTUtNGY5OTBkNmQtZWNjMGIzYTU=, ActorId: [12:7519896835521507155:2291], ActorState: ReadyState, Session closed due to explicit close event 2025-06-25T14:40:20.325583Z node 12 :KQP_SESSION INFO: kqp_session_actor.cpp:2528: SessionId: ydb://session/3?node_id=12&id=ZjE3YTg4MWQtZTBiNTU2YTUtNGY5OTBkNmQtZWNjMGIzYTU=, ActorId: [12:7519896835521507155:2291], ActorState: ReadyState, Cleanup start, isFinal: 1 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-06-25T14:40:20.325609Z node 12 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2589: SessionId: ydb://session/3?node_id=12&id=ZjE3YTg4MWQtZTBiNTU2YTUtNGY5OTBkNmQtZWNjMGIzYTU=, ActorId: [12:7519896835521507155:2291], ActorState: ReadyState, EndCleanup, isFinal: 1 2025-06-25T14:40:20.325633Z node 12 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2601: SessionId: ydb://session/3?node_id=12&id=ZjE3YTg4MWQtZTBiNTU2YTUtNGY5OTBkNmQtZWNjMGIzYTU=, ActorId: [12:7519896835521507155:2291], ActorState: unknown state, Cleanup temp tables: 0 2025-06-25T14:40:20.325688Z node 12 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2692: SessionId: ydb://session/3?node_id=12&id=ZjE3YTg4MWQtZTBiNTU2YTUtNGY5OTBkNmQtZWNjMGIzYTU=, ActorId: [12:7519896835521507155:2291], ActorState: unknown state, Session actor destroyed >> PgCatalog::InformationSchema [GOOD] >> PgCatalog::CheckSetConfig >> TKeyValueTest::TestObtainLockNewApi [GOOD] >> TKeyValueTest::TestLargeWriteAndDelete >> KqpPg::TempTablesDrop [GOOD] |83.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/sharding/ut/unittest |83.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/sharding/ut/unittest >> KqpPg::TempTablesWithCache |83.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/sharding/ut/unittest |83.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/sharding/ut/unittest >> TKeyValueTest::TestCleanUpDataOnEmptyTablet [GOOD] >> TKeyValueTest::TestCleanUpDataOnEmptyTabletResetGeneration >> TKeyValueTest::TestCleanUpDataOnEmptyTabletResetGeneration [GOOD] >> TKeyValueTest::TestCleanUpDataWithMockDisk >> TKeyValueTest::TestWriteReadRangeDataLimitThenLimitWorks [GOOD] >> TKeyValueTest::TestWriteReadRangeDataLimitThenLimitWorksNewApi >> Viewer::JsonStorageListingV1NodeIdFilter [GOOD] >> Viewer::JsonStorageListingV1PDiskIdFilter >> TKeyValueTest::TestBasicWriteReadOverrun [GOOD] >> TKeyValueTest::TestBlockedEvGetRequest >> KqpPg::TempTablesWithCache [GOOD] >> KqpPg::TableDeleteWhere+useSink >> TKeyValueTest::TestBlockedEvGetRequest [GOOD] >> KqpWorkloadServiceTables::TestLeaseUpdates [GOOD] >> KqpPg::TableDeleteAllData-useSink [GOOD] >> KqpPg::PgUpdateCompoundKey+useSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/keyvalue/ut/unittest >> TKeyValueTest::TestBlockedEvGetRequest [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:57:2057] recipient: [1:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:57:2057] recipient: [1:52:2097] Leader for TabletID 72057594037927937 is [1:59:2099] sender: [1:60:2057] recipient: [1:52:2097] Leader for TabletID 72057594037927937 is [1:59:2099] sender: [1:77:2057] recipient: [1:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:57:2057] recipient: [2:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:57:2057] recipient: [2:53:2097] Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:60:2057] recipient: [2:53:2097] Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:77:2057] recipient: [2:14:2061] !Reboot 72057594037927937 (actor [2:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:79:2057] recipient: [2:38:2085] Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:81:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:83:2057] recipient: [2:82:2112] Leader for TabletID 72057594037927937 is [2:84:2113] sender: [2:85:2057] recipient: [2:82:2112] !Reboot 72057594037927937 (actor [2:59:2099]) rebooted! !Reboot 72057594037927937 (actor [2:59:2099]) tablet resolver refreshed! new actor is[2:84:2113] Leader for TabletID 72057594037927937 is [2:84:2113] sender: [2:170:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:57:2057] recipient: [3:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:57:2057] recipient: [3:52:2097] Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:60:2057] recipient: [3:52:2097] Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:77:2057] recipient: [3:14:2061] !Reboot 72057594037927937 (actor [3:59:2099]) on event NKikimr::TEvKeyValue::TEvExecuteTransaction ! Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:79:2057] recipient: [3:38:2085] Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:82:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:83:2057] recipient: [3:81:2112] Leader for TabletID 72057594037927937 is [3:84:2113] sender: [3:85:2057] recipient: [3:81:2112] !Reboot 72057594037927937 (actor [3:59:2099]) rebooted! !Reboot 72057594037927937 (actor [3:59:2099]) tablet resolver refreshed! new actor is[3:84:2113] Leader for TabletID 72057594037927937 is [3:84:2113] sender: [3:170:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:57:2057] recipient: [4:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:57:2057] recipient: [4:53:2097] Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:60:2057] recipient: [4:53:2097] Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:77:2057] recipient: [4:14:2061] !Reboot 72057594037927937 (actor [4:59:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:80:2057] recipient: [4:38:2085] Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:83:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:84:2057] recipient: [4:82:2112] Leader for TabletID 72057594037927937 is [4:85:2113] sender: [4:86:2057] recipient: [4:82:2112] !Reboot 72057594037927937 (actor [4:59:2099]) rebooted! !Reboot 72057594037927937 (actor [4:59:2099]) tablet resolver refreshed! new actor is[4:85:2113] Leader for TabletID 72057594037927937 is [4:85:2113] sender: [4:171:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:57:2057] recipient: [5:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:57:2057] recipient: [5:53:2097] Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:60:2057] recipient: [5:53:2097] Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:77:2057] recipient: [5:14:2061] !Reboot 72057594037927937 (actor [5:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:83:2057] recipient: [5:38:2085] Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:86:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:87:2057] recipient: [5:85:2115] Leader for TabletID 72057594037927937 is [5:88:2116] sender: [5:89:2057] recipient: [5:85:2115] !Reboot 72057594037927937 (actor [5:59:2099]) rebooted! !Reboot 72057594037927937 (actor [5:59:2099]) tablet resolver refreshed! new actor is[5:88:2116] Leader for TabletID 72057594037927937 is [5:88:2116] sender: [5:174:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:57:2057] recipient: [6:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:57:2057] recipient: [6:52:2097] Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:60:2057] recipient: [6:52:2097] Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:77:2057] recipient: [6:14:2061] !Reboot 72057594037927937 (actor [6:59:2099]) on event NKikimr::TEvKeyValue::TEvRead ! Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:83:2057] recipient: [6:38:2085] Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:86:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:87:2057] recipient: [6:85:2115] Leader for TabletID 72057594037927937 is [6:88:2116] sender: [6:89:2057] recipient: [6:85:2115] !Reboot 72057594037927937 (actor [6:59:2099]) rebooted! !Reboot 72057594037927937 (actor [6:59:2099]) tablet resolver refreshed! new actor is[6:88:2116] Leader for TabletID 72057594037927937 is [6:88:2116] sender: [6:174:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:57:2057] recipient: [7:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:57:2057] recipient: [7:53:2097] Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:60:2057] recipient: [7:53:2097] Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:77:2057] recipient: [7:14:2061] !Reboot 72057594037927937 (actor [7:59:2099]) on event NKikimr::TEvKeyValue::TEvNotify ! Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:84:2057] recipient: [7:38:2085] Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:87:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:88:2057] recipient: [7:86:2115] Leader for TabletID 72057594037927937 is [7:89:2116] sender: [7:90:2057] recipient: [7:86:2115] !Reboot 72057594037927937 (actor [7:59:2099]) rebooted! !Reboot 72057594037927937 (actor [7:59:2099]) tablet resolver refreshed! new actor is[7:89:2116] Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:57:2057] recipient: [8:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:57:2057] recipient: [8:54:2097] Leader for TabletID 72057594037927937 is [8:59:2099] sender: [8:60:2057] recipient: [8:54:2097] Leader for TabletID 72057594037927937 is [8:59:2099] sender: [8:77:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:57:2057] recipient: [9:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:57:2057] recipient: [9:53:2097] Leader for TabletID 72057594037927937 is [9:59:2099] sender: [9:60:2057] recipient: [9:53:2097] Leader for TabletID 72057594037927937 is [9:59:2099] sender: [9:77:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:57:2057] recipient: [10:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:57:2057] recipient: [10:54:2097] Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:60:2057] recipient: [10:54:2097] Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:77:2057] recipient: [10:14:2061] !Reboot 72057594037927937 (actor [10:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:79:2057] recipient: [10:38:2085] Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:82:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:83:2057] recipient: [10:81:2112] Leader for TabletID 72057594037927937 is [10:84:2113] sender: [10:85:2057] recipient: [10:81:2112] !Reboot 72057594037927937 (actor [10:59:2099]) rebooted! !Reboot 72057594037927937 (actor [10:59:2099]) tablet resolver refreshed! new actor is[10:84:2113] Leader for TabletID 72057594037927937 is [10:84:2113] sender: [10:170:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:57:2057] recipient: [11:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:57:2057] recipient: [11:53:2097] Leader for TabletID 72057594037927937 is [11:59:2099] sender: [11:60:2057] recipient: [11:53:2097] Leader for TabletID 72057594037927937 is [11:59:2099] sender: [11:77:2057] recipient: [11:14:2061] !Reboot 72057594037927937 (actor [11:59:2099]) on event NKikimr::TEvKeyValue::TEvExecuteTransaction ! Leader for TabletID 72057594037927937 is [11:59:2099] sender: [11:79:2057] recipient: [11:38:2085] Leader for TabletID 72057594037927937 is [11:59:2099] sender: [11:82:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [11:59:2099] sender: [11:83:2057] recipient: [11:81:2112] Leader for TabletID 72057594037927937 is [11:84:2113] sender: [11:85:2057] recipient: [11:81:2112] !Reboot 72057594037927937 (actor [11:59:2099]) rebooted! !Reboot 72057594037927937 (actor [11:59:2099]) tablet resolver refreshed! new actor is[11:84:2113] Leader for TabletID 72057594037927937 is [11:84:2113] sender: [11:170:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:57:2057] recipient: [12:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:57:2057] recipient: [12:54:2097] Leader for TabletID 72057594037927937 is [12:59:2099] sender: [12:60:2057] recipient: [12:54:2097] Leader for TabletID 72057594037927937 is [12:59:2099] sender: [12:77:2057] recipient: [12:14:2061] !Reboot 72057594037927937 (actor [12:59:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [12:59:2099] sender: [12:80:2057] recipient: [12:38:2085] Leader for TabletID 72057594037927937 is [12:59:2099] sender: [12:83:2057] recipient: [12:14:2061] Leader for TabletID 72057594037927937 is [12:59:2099] sender: [12:84:2057] recipient: [12:82:2112] Leader for TabletID 72057594037927937 is [12:85:2113] sender: [12:86:2057] recipient: [12:82:2112] !Reboot 72057594037927937 (actor [12:59:2099]) rebooted! !Reboot 72057594037927937 (actor [12:59:2099]) tablet resolver refreshed! new actor is[12:85:2113] Leader for TabletID 72057594037927937 is [12:85:2113] sender: [12:171:2057] recipient: [12:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [13:57:2057] recipient: [13:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [13:57:2057] recipient: [13:52:2097] Leader for TabletID 72057594037927937 is [13:59:2099] sender: [13:60:2057] recipient: [13:52:2097] Leader for TabletID 72057594037927937 is [13:59:2099] sender: [13:77:2057] recipient: [13:14:2061] !Reboot 72057594037927937 (actor [13:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [13:59:2099] sender: [13:83:2057] recipient: [13:38:2085] Leader for TabletID 72057594037927937 is [13:59:2099] sender: [13:86:2057] recipient: [13:14:2061] Leader for TabletID 72057594037927937 is [13:59:2099] sender: [13:87:2057] recipient: [13:85:2115] Leader for TabletID 72057594037927937 is [13:88:2116] sender: [13:89:2057] recipient: [13:85:2115] !Reboot 72057594037927937 (actor [13:59:2099]) rebooted! !Reboot 72057594037927937 (actor [13:59:2099]) tablet resolver refreshed! new actor is[13:88:2116] Leader for TabletID 72057594037927937 is [13:88:2116] sender: [13:174:2057] recipient: [13:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [14:57:2057] recipient: [14:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [14:57:2057] recipient: [14:54:2097] Leader for TabletID 72057594037927937 is [14:59:2099] sender: [14:60:2057] recipient: [14:54:2097] Leader for TabletID 72057594037927937 is [14:59:2099] sender: [14:77:2057] recipient: [14:14:2061] !Reboot 72057594037927937 (actor [14:59:2099]) on event NKikimr::TEvKeyValue::TEvRead ! Leader for TabletID 72057594037927937 is [14:59:2099] sender: [14:83:2057] recipient: [14:38:2085] Leader for TabletID 72057594037927937 is [14:59:2099] sender: [14:86:2057] recipient: [14:14:2061] Leader for TabletID 72057594037927937 is [14:59:2099] sender: [14:87:2057] recipient: [14:85:2115] Leader for TabletID 72057594037927937 is [14:88:2116] sender: [14:89:2057] recipient: [14:85:2115] !Reboot 72057594037927937 (actor [14:59:2099]) rebooted! !Reboot 72057594037927937 (actor [14:59:2099]) tablet resolver refreshed! new actor is[14:88:2116] Leader for TabletID 72057594037927937 is [14:88:2116] sender: [14:174:2057] recipient: [14:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [15:57:2057] recipient: [15:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [15:57:2057] recipient: [15:53:2097] Leader for TabletID 72057594037927937 is [15:59:2099] sender: [15:60:2057] recipient: [15:53:2097] Leader for TabletID 72057594037927937 is [15:59:2099] sender: [15:77:2057] recipient: [15:14:2061] !Reboot 72057594037927937 (actor [15:59:2099]) on event NKikimr::TEvKeyValue::TEvNotify ! Leader for TabletID 72057594037927937 is [15:59:2099] sender: [15:84:2057] recipient: [15:38:2085] Leader for TabletID 72057594037927937 is [15:59:2099] sender: [15:87:2057] recipient: [15:14:2061] Leader for TabletID 72057594037927937 is [15:59:2099] sender: [15:88:2057] recipient: [15:86:2115] Leader for TabletID 72057594037927937 is [15:89:2116] sender: [15:90:2057] recipient: [15:86:2115] !Reboot 72057594037927937 (actor [15:59:2099]) rebooted! !Reboot 72057594037927937 (actor [15:59:2099]) tablet resolver refreshed! new actor is[15:89:2116] Leader for TabletID 72057594037927937 is [0:0:0] sender: [16:57:2057] recipient: [16:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [16:57:2057] recipient: [16:53:2097] Leader for TabletID 72057594037927937 is [16:59:2099] sender: [16:60:2057] recipient: [16:53:2097] Leader for TabletID 72057594037927937 is [16:59:2099] sender: [16:77:2057] recipient: [16:14:2061] 2025-06-25T14:40:29.676143Z node 17 :KEYVALUE ERROR: keyvalue_storage_read_request.cpp:254: {KV323@keyvalue_storage_read_request.cpp:254} Received BLOCKED EvGetResult. KeyValue# 72057594037927937 Status# BLOCKED Deadline# 18446744073709551 Now# 0 SentAt# 1970-01-01T00:00:00.000000Z GotAt# 0 ErrorReason# block race detected 2025-06-25T14:40:29.678899Z node 17 :TABLET_MAIN ERROR: tablet_sys.cpp:934: Tablet: 72057594037927937 HandleBlockBlobStorageResult, msg->Status: ALREADY, not discovered Marker# TSYS21 2025-06-25T14:40:29.678943Z node 17 :TABLET_MAIN ERROR: tablet_sys.cpp:1849: Tablet: 72057594037927937 Type: KeyValue, EReason: ReasonBootBSError, SuggestedGeneration: 0, KnownGeneration: 3 Marker# TSYS31 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/workload_service/ut/unittest >> KqpWorkloadServiceTables::TestLeaseUpdates [GOOD] Test command err: 2025-06-25T14:38:48.612958Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519896440065174548:2233];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:38:48.613556Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001a40/r3tmp/tmpQX09js/pdisk_1.dat 2025-06-25T14:38:48.989068Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 22218, node 1 2025-06-25T14:38:49.092784Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:49.101408Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:49.104114Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:38:49.159316Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:38:49.159341Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:38:49.159346Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:38:49.159445Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:63865 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:38:49.447247Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:38:49.587430Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:38:51.465365Z node 1 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:440: [WorkloadService] [Service] Started workload service initialization 2025-06-25T14:38:51.477848Z node 1 :KQP_WORKLOAD_SERVICE TRACE: kqp_workload_service.cpp:125: [WorkloadService] [Service] Updated node info, noode count: 1 2025-06-25T14:38:51.477887Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:100: [WorkloadService] [Service] Subscribed for config changes 2025-06-25T14:38:51.477907Z node 1 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:111: [WorkloadService] [Service] Resource pools was enanbled 2025-06-25T14:38:51.481048Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:223: SessionId: ydb://session/3?node_id=1&id=NGQ3MDZkNDAtNDU4NGRmMi0yZTI4MGJmNC04NDdlZjExYg==, ActorId: [0:0:0], ActorState: unknown state, Create session actor with id NGQ3MDZkNDAtNDU4NGRmMi0yZTI4MGJmNC04NDdlZjExYg== 2025-06-25T14:38:51.481613Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:241: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7519896452950076844:2289], Start check tables existence, number paths: 2 2025-06-25T14:38:51.481730Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:227: SessionId: ydb://session/3?node_id=1&id=NGQ3MDZkNDAtNDU4NGRmMi0yZTI4MGJmNC04NDdlZjExYg==, ActorId: [1:7519896452950076846:2290], ActorState: unknown state, session actor bootstrapped 2025-06-25T14:38:51.487601Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:182: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7519896452950076844:2289], Describe table /Root/.metadata/workload_manager/delayed_requests status PathErrorUnknown 2025-06-25T14:38:51.487665Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:182: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7519896452950076844:2289], Describe table /Root/.metadata/workload_manager/running_requests status PathErrorUnknown 2025-06-25T14:38:51.487691Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:289: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7519896452950076844:2289], Successfully finished 2025-06-25T14:38:51.487765Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:367: [WorkloadService] [Service] Cleanup completed, tables exists: 0 2025-06-25T14:38:51.509056Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:387: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519896452950076863:2297], DatabaseId: Root, PoolId: sample_pool_id, Start pool creating 2025-06-25T14:38:51.512376Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:38:51.515913Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:429: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519896452950076863:2297], DatabaseId: Root, PoolId: sample_pool_id, Subscribe on create pool tx: 281474976710658 2025-06-25T14:38:51.517200Z node 1 :KQP_WORKLOAD_SERVICE TRACE: scheme_actors.cpp:352: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519896452950076863:2297], DatabaseId: Root, PoolId: sample_pool_id, Tablet to pipe successfully connected 2025-06-25T14:38:51.528814Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519896452950076863:2297], DatabaseId: Root, PoolId: sample_pool_id, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:38:51.624676Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:387: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519896452950076863:2297], DatabaseId: Root, PoolId: sample_pool_id, Start pool creating 2025-06-25T14:38:51.628846Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519896452950076916:2330] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/sample_pool_id\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:38:51.628969Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:480: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519896452950076863:2297], DatabaseId: Root, PoolId: sample_pool_id, Pool successfully created 2025-06-25T14:38:51.629422Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:157: [WorkloadService] [Service] Recieved subscription request, DatabaseId: Root, PoolId: sample_pool_id 2025-06-25T14:38:51.629455Z node 1 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:561: [WorkloadService] [Service] Creating new database state for id Root 2025-06-25T14:38:51.629547Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:185: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896452950076923:2292], DatabaseId: Root, PoolId: sample_pool_id, Start pool fetching 2025-06-25T14:38:51.630577Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:223: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896452950076923:2292], DatabaseId: Root, PoolId: sample_pool_id, Pool info successfully fetched 2025-06-25T14:38:51.630662Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:253: [WorkloadService] [Service] Successfully fetched pool sample_pool_id, DatabaseId: Root 2025-06-25T14:38:51.630688Z node 1 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:571: [WorkloadService] [Service] Creating new handler for pool /Root/sample_pool_id 2025-06-25T14:38:51.630938Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: pool_handlers_actors.cpp:466: [WorkloadService] [TPoolHandlerActorBase] ActorId: [1:7519896452950076932:2293], DatabaseId: Root, PoolId: sample_pool_id, Subscribed on schemeboard notifications for path: [OwnerId: 72057594046644480, LocalPathId: 5] 2025-06-25T14:38:51.631874Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: pool_handlers_actors.cpp:274: [WorkloadService] [TPoolHandlerActorBase] ActorId: [1:7519896452950076932:2293], DatabaseId: Root, PoolId: sample_pool_id, Got watch notification 2025-06-25T14:38:51.643500Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:157: [WorkloadService] [Service] Recieved subscription request, DatabaseId: /Root, PoolId: default 2025-06-25T14:38:51.643524Z node 1 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:561: [WorkloadService] [Service] Creating new database state for id /Root 2025-06-25T14:38:51.643586Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:185: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896452950076944:2295], DatabaseId: /Root, PoolId: default, Start pool fetching 2025-06-25T14:38:51.643795Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:443: SessionId: ydb://session/3?node_id=1&id=NGQ3MDZkNDAtNDU4NGRmMi0yZTI4MGJmNC04NDdlZjExYg==, ActorId: [1:7519896452950076846:2290], ActorState: ReadyState, TraceId: 01jykrdzqten7kedzsvajjqv0p, received request, proxyRequestId: 3 prepared: 0 tx_control: 0 action: QUERY_ACTION_EXECUTE type: QUERY_TYPE_SQL_DDL text: DROP RESOURCE POOL sample_pool_id; rpcActor: [0:0:0] database: /Root databaseId: /Root pool id: default 2025-06-25T14:38:51.645044Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896452950076944:2295], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:51.645173Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:51.889342Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: pool_handlers_actors.cpp:294: [WorkloadService] [TPoolHandlerActorBase] ActorId: [1:7519896452950076932:2293], DatabaseId: Root, PoolId: sample_pool_id, Got delete notification 2025-06-25T14:38:51.892367Z node 1 :KQP_SESSION INFO: kqp_session_actor.cpp:2528: SessionId: ydb://session/3?node_id=1&id=NGQ3MDZkNDAtNDU4NGRmMi0yZTI4MGJmNC04NDdlZjExYg==, ActorId: [1:7519896452950076846:2290], ActorState: ExecuteState, TraceId: 01jykrdzqten7kedzsvajjqv0p, Cleanup start, isFinal: 0 CleanupCtx: 1 TransactionsToBeAborted ... _SESSION INFO: kqp_session_actor.cpp:2013: SessionId: ydb://session/3?node_id=10&id=MjY1NzVhOGYtODc0M2UyZmUtMWEwYjQ3NzItZTIwZjYxM2I=, ActorId: [10:7519896874101412131:2498], ActorState: ExecuteState, TraceId: 01jykrgz9t8f0j8s2mv3n5wxm3, txInfo Status: Committed Kind: ReadWrite TotalDuration: 7.224 ServerDuration: 6.873 QueriesCount: 2 2025-06-25T14:40:29.506596Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2168: SessionId: ydb://session/3?node_id=10&id=MjY1NzVhOGYtODc0M2UyZmUtMWEwYjQ3NzItZTIwZjYxM2I=, ActorId: [10:7519896874101412131:2498], ActorState: ExecuteState, TraceId: 01jykrgz9t8f0j8s2mv3n5wxm3, Create QueryResponse for action: QUERY_ACTION_EXECUTE with SUCCESS status 2025-06-25T14:40:29.506640Z node 10 :KQP_SESSION INFO: kqp_session_actor.cpp:2528: SessionId: ydb://session/3?node_id=10&id=MjY1NzVhOGYtODc0M2UyZmUtMWEwYjQ3NzItZTIwZjYxM2I=, ActorId: [10:7519896874101412131:2498], ActorState: ExecuteState, TraceId: 01jykrgz9t8f0j8s2mv3n5wxm3, Cleanup start, isFinal: 0 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-06-25T14:40:29.506665Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2589: SessionId: ydb://session/3?node_id=10&id=MjY1NzVhOGYtODc0M2UyZmUtMWEwYjQ3NzItZTIwZjYxM2I=, ActorId: [10:7519896874101412131:2498], ActorState: ExecuteState, TraceId: 01jykrgz9t8f0j8s2mv3n5wxm3, EndCleanup, isFinal: 0 2025-06-25T14:40:29.506703Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2325: SessionId: ydb://session/3?node_id=10&id=MjY1NzVhOGYtODc0M2UyZmUtMWEwYjQ3NzItZTIwZjYxM2I=, ActorId: [10:7519896874101412131:2498], ActorState: ExecuteState, TraceId: 01jykrgz9t8f0j8s2mv3n5wxm3, Sent query response back to proxy, proxyRequestId: 28, proxyId: [10:7519896779612130290:2161] 2025-06-25T14:40:29.506946Z node 10 :KQP_WORKLOAD_SERVICE DEBUG: query_actor.cpp:240: [TQueryBase] [TRefreshPoolStateQuery] TraceId: sample_pool_id, RequestDatabase: /Root, RequestSessionId: , State: Update lease, TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=10&id=MjY1NzVhOGYtODc0M2UyZmUtMWEwYjQ3NzItZTIwZjYxM2I=, TxId: 2025-06-25T14:40:29.507039Z node 10 :KQP_WORKLOAD_SERVICE DEBUG: query_actor.cpp:197: [TQueryBase] [TRefreshPoolStateQuery] TraceId: sample_pool_id, RequestDatabase: /Root, RequestSessionId: , State: Update lease, RunDataQuery: -- TRefreshPoolStateQuery::OnLeaseUpdated DECLARE $database_id AS Text; DECLARE $pool_id AS Text; SELECT COUNT(*) AS delayed_requests FROM `.metadata/workload_manager/delayed_requests` WHERE database = $database_id AND pool_id = $pool_id AND (wait_deadline IS NULL OR wait_deadline >= CurrentUtcTimestamp()) AND lease_deadline >= CurrentUtcTimestamp(); SELECT COUNT(*) AS running_requests FROM `.metadata/workload_manager/running_requests` WHERE database = $database_id AND pool_id = $pool_id AND lease_deadline >= CurrentUtcTimestamp(); 2025-06-25T14:40:29.507313Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:443: SessionId: ydb://session/3?node_id=10&id=MjY1NzVhOGYtODc0M2UyZmUtMWEwYjQ3NzItZTIwZjYxM2I=, ActorId: [10:7519896874101412131:2498], ActorState: ReadyState, TraceId: 01jykrgza38j9tr4s899ydknpj, received request, proxyRequestId: 29 prepared: 0 tx_control: 1 action: QUERY_ACTION_EXECUTE type: QUERY_TYPE_SQL_DML text: -- TRefreshPoolStateQuery::OnLeaseUpdated DECLARE $database_id AS Text; DECLARE $pool_id AS Text; SELECT COUNT(*) AS delayed_requests FROM `.metadata/workload_manager/delayed_requests` WHERE database = $database_id AND pool_id = $pool_id AND (wait_deadline IS NULL OR wait_deadline >= CurrentUtcTimestamp()) AND lease_deadline >= CurrentUtcTimestamp(); SELECT COUNT(*) AS running_requests FROM `.metadata/workload_manager/running_requests` WHERE database = $database_id AND pool_id = $pool_id AND lease_deadline >= CurrentUtcTimestamp(); rpcActor: [10:7519896874101412159:2504] database: /Root databaseId: /Root pool id: default 2025-06-25T14:40:29.507337Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:264: SessionId: ydb://session/3?node_id=10&id=MjY1NzVhOGYtODc0M2UyZmUtMWEwYjQ3NzItZTIwZjYxM2I=, ActorId: [10:7519896874101412131:2498], ActorState: ReadyState, TraceId: 01jykrgza38j9tr4s899ydknpj, request placed into pool from cache: default 2025-06-25T14:40:29.507742Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1352: SessionId: ydb://session/3?node_id=10&id=MjY1NzVhOGYtODc0M2UyZmUtMWEwYjQ3NzItZTIwZjYxM2I=, ActorId: [10:7519896874101412131:2498], ActorState: ExecuteState, TraceId: 01jykrgza38j9tr4s899ydknpj, ExecutePhyTx, tx: 0x000050C00048EE18 literal: 0 commit: 0 txCtx.DeferredEffects.size(): 0 2025-06-25T14:40:29.507790Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1503: SessionId: ydb://session/3?node_id=10&id=MjY1NzVhOGYtODc0M2UyZmUtMWEwYjQ3NzItZTIwZjYxM2I=, ActorId: [10:7519896874101412131:2498], ActorState: ExecuteState, TraceId: 01jykrgza38j9tr4s899ydknpj, Sending to Executer TraceId: 0 8 2025-06-25T14:40:29.507842Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1561: SessionId: ydb://session/3?node_id=10&id=MjY1NzVhOGYtODc0M2UyZmUtMWEwYjQ3NzItZTIwZjYxM2I=, ActorId: [10:7519896874101412131:2498], ActorState: ExecuteState, TraceId: 01jykrgza38j9tr4s899ydknpj, Created new KQP executer: [10:7519896874101412162:2498] isRollback: 0 2025-06-25T14:40:29.511317Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1754: SessionId: ydb://session/3?node_id=10&id=MjY1NzVhOGYtODc0M2UyZmUtMWEwYjQ3NzItZTIwZjYxM2I=, ActorId: [10:7519896874101412131:2498], ActorState: ExecuteState, TraceId: 01jykrgza38j9tr4s899ydknpj, TEvTxResponse, CurrentTx: 1/2 response.status: SUCCESS 2025-06-25T14:40:29.511388Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1352: SessionId: ydb://session/3?node_id=10&id=MjY1NzVhOGYtODc0M2UyZmUtMWEwYjQ3NzItZTIwZjYxM2I=, ActorId: [10:7519896874101412131:2498], ActorState: ExecuteState, TraceId: 01jykrgza38j9tr4s899ydknpj, ExecutePhyTx, tx: 0x000050C00086F098 literal: 1 commit: 1 txCtx.DeferredEffects.size(): 0 2025-06-25T14:40:29.512045Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1754: SessionId: ydb://session/3?node_id=10&id=MjY1NzVhOGYtODc0M2UyZmUtMWEwYjQ3NzItZTIwZjYxM2I=, ActorId: [10:7519896874101412131:2498], ActorState: ExecuteState, TraceId: 01jykrgza38j9tr4s899ydknpj, TEvTxResponse, CurrentTx: 2/2 response.status: SUCCESS 2025-06-25T14:40:29.512143Z node 10 :KQP_SESSION INFO: kqp_session_actor.cpp:2013: SessionId: ydb://session/3?node_id=10&id=MjY1NzVhOGYtODc0M2UyZmUtMWEwYjQ3NzItZTIwZjYxM2I=, ActorId: [10:7519896874101412131:2498], ActorState: ExecuteState, TraceId: 01jykrgza38j9tr4s899ydknpj, txInfo Status: Committed Kind: ReadOnly TotalDuration: 4.498 ServerDuration: 4.414 QueriesCount: 2 2025-06-25T14:40:29.512240Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2168: SessionId: ydb://session/3?node_id=10&id=MjY1NzVhOGYtODc0M2UyZmUtMWEwYjQ3NzItZTIwZjYxM2I=, ActorId: [10:7519896874101412131:2498], ActorState: ExecuteState, TraceId: 01jykrgza38j9tr4s899ydknpj, Create QueryResponse for action: QUERY_ACTION_EXECUTE with SUCCESS status 2025-06-25T14:40:29.512299Z node 10 :KQP_SESSION INFO: kqp_session_actor.cpp:2528: SessionId: ydb://session/3?node_id=10&id=MjY1NzVhOGYtODc0M2UyZmUtMWEwYjQ3NzItZTIwZjYxM2I=, ActorId: [10:7519896874101412131:2498], ActorState: ExecuteState, TraceId: 01jykrgza38j9tr4s899ydknpj, Cleanup start, isFinal: 0 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-06-25T14:40:29.512339Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2589: SessionId: ydb://session/3?node_id=10&id=MjY1NzVhOGYtODc0M2UyZmUtMWEwYjQ3NzItZTIwZjYxM2I=, ActorId: [10:7519896874101412131:2498], ActorState: ExecuteState, TraceId: 01jykrgza38j9tr4s899ydknpj, EndCleanup, isFinal: 0 2025-06-25T14:40:29.512379Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2325: SessionId: ydb://session/3?node_id=10&id=MjY1NzVhOGYtODc0M2UyZmUtMWEwYjQ3NzItZTIwZjYxM2I=, ActorId: [10:7519896874101412131:2498], ActorState: ExecuteState, TraceId: 01jykrgza38j9tr4s899ydknpj, Sent query response back to proxy, proxyRequestId: 29, proxyId: [10:7519896779612130290:2161] 2025-06-25T14:40:29.512628Z node 10 :KQP_WORKLOAD_SERVICE DEBUG: query_actor.cpp:240: [TQueryBase] [TRefreshPoolStateQuery] TraceId: sample_pool_id, RequestDatabase: /Root, RequestSessionId: , State: Describe pool, TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=10&id=MjY1NzVhOGYtODc0M2UyZmUtMWEwYjQ3NzItZTIwZjYxM2I=, TxId: 2025-06-25T14:40:29.512723Z node 10 :KQP_WORKLOAD_SERVICE DEBUG: query_actor.cpp:367: [TQueryBase] [TRefreshPoolStateQuery] TraceId: sample_pool_id, RequestDatabase: /Root, RequestSessionId: , State: Describe pool, Finish with SUCCESS, SessionId: ydb://session/3?node_id=10&id=MjY1NzVhOGYtODc0M2UyZmUtMWEwYjQ3NzItZTIwZjYxM2I=, TxId: 2025-06-25T14:40:29.512832Z node 10 :KQP_SESSION INFO: kqp_session_actor.cpp:2370: SessionId: ydb://session/3?node_id=10&id=MjY1NzVhOGYtODc0M2UyZmUtMWEwYjQ3NzItZTIwZjYxM2I=, ActorId: [10:7519896874101412131:2498], ActorState: ReadyState, Session closed due to explicit close event 2025-06-25T14:40:29.512861Z node 10 :KQP_SESSION INFO: kqp_session_actor.cpp:2528: SessionId: ydb://session/3?node_id=10&id=MjY1NzVhOGYtODc0M2UyZmUtMWEwYjQ3NzItZTIwZjYxM2I=, ActorId: [10:7519896874101412131:2498], ActorState: ReadyState, Cleanup start, isFinal: 1 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-06-25T14:40:29.512879Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2589: SessionId: ydb://session/3?node_id=10&id=MjY1NzVhOGYtODc0M2UyZmUtMWEwYjQ3NzItZTIwZjYxM2I=, ActorId: [10:7519896874101412131:2498], ActorState: ReadyState, EndCleanup, isFinal: 1 2025-06-25T14:40:29.512899Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2601: SessionId: ydb://session/3?node_id=10&id=MjY1NzVhOGYtODc0M2UyZmUtMWEwYjQ3NzItZTIwZjYxM2I=, ActorId: [10:7519896874101412131:2498], ActorState: unknown state, Cleanup temp tables: 0 2025-06-25T14:40:29.512954Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2692: SessionId: ydb://session/3?node_id=10&id=MjY1NzVhOGYtODc0M2UyZmUtMWEwYjQ3NzItZTIwZjYxM2I=, ActorId: [10:7519896874101412131:2498], ActorState: unknown state, Session actor destroyed 2025-06-25T14:40:29.518372Z node 10 :KQP_SESSION INFO: kqp_session_actor.cpp:2370: SessionId: ydb://session/3?node_id=10&id=YmE5YjVkMWQtZDYzZjczMGItNzFmODA3MjMtNDk0ZTZhZjk=, ActorId: [10:7519896796791999978:2291], ActorState: ReadyState, Session closed due to explicit close event 2025-06-25T14:40:29.518406Z node 10 :KQP_SESSION INFO: kqp_session_actor.cpp:2528: SessionId: ydb://session/3?node_id=10&id=YmE5YjVkMWQtZDYzZjczMGItNzFmODA3MjMtNDk0ZTZhZjk=, ActorId: [10:7519896796791999978:2291], ActorState: ReadyState, Cleanup start, isFinal: 1 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-06-25T14:40:29.518423Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2589: SessionId: ydb://session/3?node_id=10&id=YmE5YjVkMWQtZDYzZjczMGItNzFmODA3MjMtNDk0ZTZhZjk=, ActorId: [10:7519896796791999978:2291], ActorState: ReadyState, EndCleanup, isFinal: 1 2025-06-25T14:40:29.518443Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2601: SessionId: ydb://session/3?node_id=10&id=YmE5YjVkMWQtZDYzZjczMGItNzFmODA3MjMtNDk0ZTZhZjk=, ActorId: [10:7519896796791999978:2291], ActorState: unknown state, Cleanup temp tables: 0 2025-06-25T14:40:29.518493Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2692: SessionId: ydb://session/3?node_id=10&id=YmE5YjVkMWQtZDYzZjczMGItNzFmODA3MjMtNDk0ZTZhZjk=, ActorId: [10:7519896796791999978:2291], ActorState: unknown state, Session actor destroyed >> TKeyValueTest::TestRenameToLongKey [GOOD] >> TKeyValueTest::TestWriteReadWhileWriteWorks [GOOD] |83.2%| [TA] $(B)/ydb/core/kqp/workload_service/ut/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/keyvalue/ut/unittest >> TKeyValueTest::TestRenameToLongKey [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:57:2057] recipient: [1:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:57:2057] recipient: [1:52:2097] Leader for TabletID 72057594037927937 is [1:59:2099] sender: [1:60:2057] recipient: [1:52:2097] Leader for TabletID 72057594037927937 is [1:59:2099] sender: [1:77:2057] recipient: [1:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:57:2057] recipient: [2:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:57:2057] recipient: [2:53:2097] Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:60:2057] recipient: [2:53:2097] Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:77:2057] recipient: [2:14:2061] !Reboot 72057594037927937 (actor [2:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:79:2057] recipient: [2:38:2085] Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:81:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:83:2057] recipient: [2:82:2112] Leader for TabletID 72057594037927937 is [2:84:2113] sender: [2:85:2057] recipient: [2:82:2112] !Reboot 72057594037927937 (actor [2:59:2099]) rebooted! !Reboot 72057594037927937 (actor [2:59:2099]) tablet resolver refreshed! new actor is[2:84:2113] Leader for TabletID 72057594037927937 is [2:84:2113] sender: [2:170:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:57:2057] recipient: [3:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:57:2057] recipient: [3:52:2097] Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:60:2057] recipient: [3:52:2097] Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:77:2057] recipient: [3:14:2061] !Reboot 72057594037927937 (actor [3:59:2099]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:79:2057] recipient: [3:38:2085] Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:82:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:83:2057] recipient: [3:81:2112] Leader for TabletID 72057594037927937 is [3:84:2113] sender: [3:85:2057] recipient: [3:81:2112] !Reboot 72057594037927937 (actor [3:59:2099]) rebooted! !Reboot 72057594037927937 (actor [3:59:2099]) tablet resolver refreshed! new actor is[3:84:2113] Leader for TabletID 72057594037927937 is [3:84:2113] sender: [3:170:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:57:2057] recipient: [4:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:57:2057] recipient: [4:53:2097] Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:60:2057] recipient: [4:53:2097] Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:77:2057] recipient: [4:14:2061] !Reboot 72057594037927937 (actor [4:59:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:80:2057] recipient: [4:38:2085] Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:83:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:84:2057] recipient: [4:82:2112] Leader for TabletID 72057594037927937 is [4:85:2113] sender: [4:86:2057] recipient: [4:82:2112] !Reboot 72057594037927937 (actor [4:59:2099]) rebooted! !Reboot 72057594037927937 (actor [4:59:2099]) tablet resolver refreshed! new actor is[4:85:2113] Leader for TabletID 72057594037927937 is [4:85:2113] sender: [4:171:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:57:2057] recipient: [5:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:57:2057] recipient: [5:53:2097] Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:60:2057] recipient: [5:53:2097] Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:77:2057] recipient: [5:14:2061] !Reboot 72057594037927937 (actor [5:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:83:2057] recipient: [5:38:2085] Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:86:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:87:2057] recipient: [5:85:2115] Leader for TabletID 72057594037927937 is [5:88:2116] sender: [5:89:2057] recipient: [5:85:2115] !Reboot 72057594037927937 (actor [5:59:2099]) rebooted! !Reboot 72057594037927937 (actor [5:59:2099]) tablet resolver refreshed! new actor is[5:88:2116] Leader for TabletID 72057594037927937 is [5:88:2116] sender: [5:174:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:57:2057] recipient: [6:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:57:2057] recipient: [6:52:2097] Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:60:2057] recipient: [6:52:2097] Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:77:2057] recipient: [6:14:2061] !Reboot 72057594037927937 (actor [6:59:2099]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:83:2057] recipient: [6:38:2085] Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:85:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:87:2057] recipient: [6:86:2115] Leader for TabletID 72057594037927937 is [6:88:2116] sender: [6:89:2057] recipient: [6:86:2115] !Reboot 72057594037927937 (actor [6:59:2099]) rebooted! !Reboot 72057594037927937 (actor [6:59:2099]) tablet resolver refreshed! new actor is[6:88:2116] Leader for TabletID 72057594037927937 is [6:88:2116] sender: [6:174:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:57:2057] recipient: [7:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:57:2057] recipient: [7:53:2097] Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:60:2057] recipient: [7:53:2097] Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:77:2057] recipient: [7:14:2061] !Reboot 72057594037927937 (actor [7:59:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:84:2057] recipient: [7:38:2085] Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:87:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:88:2057] recipient: [7:86:2115] Leader for TabletID 72057594037927937 is [7:89:2116] sender: [7:90:2057] recipient: [7:86:2115] !Reboot 72057594037927937 (actor [7:59:2099]) rebooted! !Reboot 72057594037927937 (actor [7:59:2099]) tablet resolver refreshed! new actor is[7:89:2116] Leader for TabletID 72057594037927937 is [7:89:2116] sender: [7:175:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:57:2057] recipient: [8:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:57:2057] recipient: [8:54:2097] Leader for TabletID 72057594037927937 is [8:59:2099] sender: [8:60:2057] recipient: [8:54:2097] Leader for TabletID 72057594037927937 is [8:59:2099] sender: [8:77:2057] recipient: [8:14:2061] !Reboot 72057594037927937 (actor [8:59:2099]) on event NKikimr::TEvKeyValue::TEvCollect ! Leader for TabletID 72057594037927937 is [8:59:2099] sender: [8:85:2057] recipient: [8:38:2085] Leader for TabletID 72057594037927937 is [8:59:2099] sender: [8:88:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [8:59:2099] sender: [8:89:2057] recipient: [8:87:2116] Leader for TabletID 72057594037927937 is [8:90:2117] sender: [8:91:2057] recipient: [8:87:2116] !Reboot 72057594037927937 (actor [8:59:2099]) rebooted! !Reboot 72057594037927937 (actor [8:59:2099]) tablet resolver refreshed! new actor is[8:90:2117] Leader for TabletID 72057594037927937 is [8:90:2117] sender: [8:110:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:57:2057] recipient: [9:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:57:2057] recipient: [9:53:2097] Leader for TabletID 72057594037927937 is [9:59:2099] sender: [9:60:2057] recipient: [9:53:2097] Leader for TabletID 72057594037927937 is [9:59:2099] sender: [9:77:2057] recipient: [9:14:2061] !Reboot 72057594037927937 (actor [9:59:2099]) on event NKikimr::TEvKeyValue::TEvCompleteGC ! Leader for TabletID 72057594037927937 is [9:59:2099] sender: [9:86:2057] recipient: [9:38:2085] Leader for TabletID 72057594037927937 is [9:59:2099] sender: [9:89:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [9:59:2099] sender: [9:90:2057] recipient: [9:88:2117] Leader for TabletID 72057594037927937 is [9:91:2118] sender: [9:92:2057] recipient: [9:88:2117] !Reboot 72057594037927937 (actor [9:59:2099]) rebooted! !Reboot 72057594037927937 (actor [9:59:2099]) tablet resolver refreshed! new actor is[9:91:2118] Leader for TabletID 72057594037927937 is [9:91:2118] sender: [9:111:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:57:2057] recipient: [10:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:57:2057] recipient: [10:54:2097] Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:60:2057] recipient: [10:54:2097] Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:77:2057] recipient: [10:14:2061] !Reboot 72057594037927937 (actor [10:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:89:2057] recipient: [10:38:2085] Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:92:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:93:2057] recipient: [10:91:2120] Leader for TabletID 72057594037927937 is [10:94:2121] sender: [10:95:2057] recipient: [10:91:2120] !Reboot 72057594037927937 (actor [10:59:2099]) rebooted! !Reboot 72057594037927937 (actor [10:59:2099]) tablet resolver refreshed! new actor is[10:94:2121] Leader for TabletID 72057594037927937 is [10:94:2121] sender: [10:180:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:57:2057] recipient: [11:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:57:2057] recipient: [11:53:2097] Leader for TabletID 72057594037927937 is [11:59:2099] sender: [11:60:2057] recipient: [11:53:2097] Leader for TabletID 72057594037927937 is [11:59:2099] sender: [11:77:2057] recipient: [11:14:2061] !Reboot 72057594037927937 (actor [11:59:2099]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [11:59:2099] sender: [11:89:2057] recipient: [11:38:2085] Leader for TabletID 72057594037927937 is [11:59:2099] sender: [11:92:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [11:59:2099] sender: [11:93:2057] recipient: [11:91:2120] Leader for TabletID 72057594037927937 is [11:94:2121] sender: [11:95:2057] recipient: [11:91:2120] !Reboot 72057594037927937 (actor [11:59:2099]) rebooted! !Reboot 72057594037927937 (actor [11:59:2099]) tablet resolver refreshed! new actor is[11:94:2121] Leader for TabletID 72057594037927937 is [11:94:2121] sender: [11:180:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:57:2057] recipient: [12:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:57:2057] recipient: [12:54:2097] Leader for TabletID 72057594037927937 is [12:59:2099] sender: [12:60:2057] recipient: [12:54:2097] Leader for TabletID 72057594037927937 is [12:59:2099] sender: [12:77:2057] recipient: [12:14:2061] !Reboot 72057594037927937 (actor [12:59:209 ... is [12:59:2099] sender: [12:93:2057] recipient: [12:14:2061] Leader for TabletID 72057594037927937 is [12:59:2099] sender: [12:94:2057] recipient: [12:92:2120] Leader for TabletID 72057594037927937 is [12:95:2121] sender: [12:96:2057] recipient: [12:92:2120] !Reboot 72057594037927937 (actor [12:59:2099]) rebooted! !Reboot 72057594037927937 (actor [12:59:2099]) tablet resolver refreshed! new actor is[12:95:2121] Leader for TabletID 72057594037927937 is [12:95:2121] sender: [12:181:2057] recipient: [12:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [13:57:2057] recipient: [13:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [13:57:2057] recipient: [13:52:2097] Leader for TabletID 72057594037927937 is [13:59:2099] sender: [13:60:2057] recipient: [13:52:2097] Leader for TabletID 72057594037927937 is [13:59:2099] sender: [13:77:2057] recipient: [13:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [14:57:2057] recipient: [14:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [14:57:2057] recipient: [14:54:2097] Leader for TabletID 72057594037927937 is [14:59:2099] sender: [14:60:2057] recipient: [14:54:2097] Leader for TabletID 72057594037927937 is [14:59:2099] sender: [14:77:2057] recipient: [14:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [15:57:2057] recipient: [15:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [15:57:2057] recipient: [15:53:2097] Leader for TabletID 72057594037927937 is [15:59:2099] sender: [15:60:2057] recipient: [15:53:2097] Leader for TabletID 72057594037927937 is [15:59:2099] sender: [15:77:2057] recipient: [15:14:2061] !Reboot 72057594037927937 (actor [15:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [15:59:2099] sender: [15:79:2057] recipient: [15:38:2085] Leader for TabletID 72057594037927937 is [15:59:2099] sender: [15:82:2057] recipient: [15:14:2061] Leader for TabletID 72057594037927937 is [15:59:2099] sender: [15:83:2057] recipient: [15:81:2112] Leader for TabletID 72057594037927937 is [15:84:2113] sender: [15:85:2057] recipient: [15:81:2112] !Reboot 72057594037927937 (actor [15:59:2099]) rebooted! !Reboot 72057594037927937 (actor [15:59:2099]) tablet resolver refreshed! new actor is[15:84:2113] Leader for TabletID 72057594037927937 is [15:84:2113] sender: [15:170:2057] recipient: [15:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [16:57:2057] recipient: [16:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [16:57:2057] recipient: [16:53:2097] Leader for TabletID 72057594037927937 is [16:59:2099] sender: [16:60:2057] recipient: [16:53:2097] Leader for TabletID 72057594037927937 is [16:59:2099] sender: [16:77:2057] recipient: [16:14:2061] !Reboot 72057594037927937 (actor [16:59:2099]) on event NKikimr::TEvKeyValue::TEvAcquireLock ! Leader for TabletID 72057594037927937 is [16:59:2099] sender: [16:79:2057] recipient: [16:38:2085] Leader for TabletID 72057594037927937 is [16:59:2099] sender: [16:82:2057] recipient: [16:14:2061] Leader for TabletID 72057594037927937 is [16:59:2099] sender: [16:83:2057] recipient: [16:81:2112] Leader for TabletID 72057594037927937 is [16:84:2113] sender: [16:85:2057] recipient: [16:81:2112] !Reboot 72057594037927937 (actor [16:59:2099]) rebooted! !Reboot 72057594037927937 (actor [16:59:2099]) tablet resolver refreshed! new actor is[16:84:2113] Leader for TabletID 72057594037927937 is [16:84:2113] sender: [16:170:2057] recipient: [16:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [17:57:2057] recipient: [17:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [17:57:2057] recipient: [17:52:2097] Leader for TabletID 72057594037927937 is [17:59:2099] sender: [17:60:2057] recipient: [17:52:2097] Leader for TabletID 72057594037927937 is [17:59:2099] sender: [17:77:2057] recipient: [17:14:2061] !Reboot 72057594037927937 (actor [17:59:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [17:59:2099] sender: [17:80:2057] recipient: [17:38:2085] Leader for TabletID 72057594037927937 is [17:59:2099] sender: [17:83:2057] recipient: [17:14:2061] Leader for TabletID 72057594037927937 is [17:59:2099] sender: [17:84:2057] recipient: [17:82:2112] Leader for TabletID 72057594037927937 is [17:85:2113] sender: [17:86:2057] recipient: [17:82:2112] !Reboot 72057594037927937 (actor [17:59:2099]) rebooted! !Reboot 72057594037927937 (actor [17:59:2099]) tablet resolver refreshed! new actor is[17:85:2113] Leader for TabletID 72057594037927937 is [17:85:2113] sender: [17:171:2057] recipient: [17:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [18:57:2057] recipient: [18:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [18:57:2057] recipient: [18:53:2097] Leader for TabletID 72057594037927937 is [18:59:2099] sender: [18:60:2057] recipient: [18:53:2097] Leader for TabletID 72057594037927937 is [18:59:2099] sender: [18:77:2057] recipient: [18:14:2061] !Reboot 72057594037927937 (actor [18:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [18:59:2099] sender: [18:83:2057] recipient: [18:38:2085] Leader for TabletID 72057594037927937 is [18:59:2099] sender: [18:86:2057] recipient: [18:14:2061] Leader for TabletID 72057594037927937 is [18:59:2099] sender: [18:87:2057] recipient: [18:85:2115] Leader for TabletID 72057594037927937 is [18:88:2116] sender: [18:89:2057] recipient: [18:85:2115] !Reboot 72057594037927937 (actor [18:59:2099]) rebooted! !Reboot 72057594037927937 (actor [18:59:2099]) tablet resolver refreshed! new actor is[18:88:2116] Leader for TabletID 72057594037927937 is [18:88:2116] sender: [18:174:2057] recipient: [18:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [19:57:2057] recipient: [19:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [19:57:2057] recipient: [19:53:2097] Leader for TabletID 72057594037927937 is [19:59:2099] sender: [19:60:2057] recipient: [19:53:2097] Leader for TabletID 72057594037927937 is [19:59:2099] sender: [19:77:2057] recipient: [19:14:2061] !Reboot 72057594037927937 (actor [19:59:2099]) on event NKikimr::TEvKeyValue::TEvExecuteTransaction ! Leader for TabletID 72057594037927937 is [19:59:2099] sender: [19:83:2057] recipient: [19:38:2085] Leader for TabletID 72057594037927937 is [19:59:2099] sender: [19:86:2057] recipient: [19:14:2061] Leader for TabletID 72057594037927937 is [19:59:2099] sender: [19:87:2057] recipient: [19:85:2115] Leader for TabletID 72057594037927937 is [19:88:2116] sender: [19:89:2057] recipient: [19:85:2115] !Reboot 72057594037927937 (actor [19:59:2099]) rebooted! !Reboot 72057594037927937 (actor [19:59:2099]) tablet resolver refreshed! new actor is[19:88:2116] Leader for TabletID 72057594037927937 is [19:88:2116] sender: [19:174:2057] recipient: [19:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [20:57:2057] recipient: [20:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [20:57:2057] recipient: [20:54:2097] Leader for TabletID 72057594037927937 is [20:59:2099] sender: [20:60:2057] recipient: [20:54:2097] Leader for TabletID 72057594037927937 is [20:59:2099] sender: [20:77:2057] recipient: [20:14:2061] !Reboot 72057594037927937 (actor [20:59:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [20:59:2099] sender: [20:84:2057] recipient: [20:38:2085] Leader for TabletID 72057594037927937 is [20:59:2099] sender: [20:87:2057] recipient: [20:14:2061] Leader for TabletID 72057594037927937 is [20:59:2099] sender: [20:88:2057] recipient: [20:86:2115] Leader for TabletID 72057594037927937 is [20:89:2116] sender: [20:90:2057] recipient: [20:86:2115] !Reboot 72057594037927937 (actor [20:59:2099]) rebooted! !Reboot 72057594037927937 (actor [20:59:2099]) tablet resolver refreshed! new actor is[20:89:2116] Leader for TabletID 72057594037927937 is [20:89:2116] sender: [20:175:2057] recipient: [20:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [21:57:2057] recipient: [21:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [21:57:2057] recipient: [21:52:2097] Leader for TabletID 72057594037927937 is [21:59:2099] sender: [21:60:2057] recipient: [21:52:2097] Leader for TabletID 72057594037927937 is [21:59:2099] sender: [21:77:2057] recipient: [21:14:2061] !Reboot 72057594037927937 (actor [21:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [21:59:2099] sender: [21:87:2057] recipient: [21:38:2085] Leader for TabletID 72057594037927937 is [21:59:2099] sender: [21:90:2057] recipient: [21:14:2061] Leader for TabletID 72057594037927937 is [21:59:2099] sender: [21:91:2057] recipient: [21:89:2118] Leader for TabletID 72057594037927937 is [21:92:2119] sender: [21:93:2057] recipient: [21:89:2118] !Reboot 72057594037927937 (actor [21:59:2099]) rebooted! !Reboot 72057594037927937 (actor [21:59:2099]) tablet resolver refreshed! new actor is[21:92:2119] Leader for TabletID 72057594037927937 is [21:92:2119] sender: [21:178:2057] recipient: [21:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [22:57:2057] recipient: [22:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [22:57:2057] recipient: [22:53:2097] Leader for TabletID 72057594037927937 is [22:59:2099] sender: [22:60:2057] recipient: [22:53:2097] Leader for TabletID 72057594037927937 is [22:59:2099] sender: [22:77:2057] recipient: [22:14:2061] !Reboot 72057594037927937 (actor [22:59:2099]) on event NKikimr::TEvKeyValue::TEvExecuteTransaction ! Leader for TabletID 72057594037927937 is [22:59:2099] sender: [22:87:2057] recipient: [22:38:2085] Leader for TabletID 72057594037927937 is [22:59:2099] sender: [22:90:2057] recipient: [22:14:2061] Leader for TabletID 72057594037927937 is [22:59:2099] sender: [22:91:2057] recipient: [22:89:2118] Leader for TabletID 72057594037927937 is [22:92:2119] sender: [22:93:2057] recipient: [22:89:2118] !Reboot 72057594037927937 (actor [22:59:2099]) rebooted! !Reboot 72057594037927937 (actor [22:59:2099]) tablet resolver refreshed! new actor is[22:92:2119] Leader for TabletID 72057594037927937 is [22:92:2119] sender: [22:178:2057] recipient: [22:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [23:57:2057] recipient: [23:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [23:57:2057] recipient: [23:53:2097] Leader for TabletID 72057594037927937 is [23:59:2099] sender: [23:60:2057] recipient: [23:53:2097] Leader for TabletID 72057594037927937 is [23:59:2099] sender: [23:77:2057] recipient: [23:14:2061] !Reboot 72057594037927937 (actor [23:59:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [23:59:2099] sender: [23:88:2057] recipient: [23:38:2085] Leader for TabletID 72057594037927937 is [23:59:2099] sender: [23:91:2057] recipient: [23:14:2061] Leader for TabletID 72057594037927937 is [23:59:2099] sender: [23:92:2057] recipient: [23:90:2118] Leader for TabletID 72057594037927937 is [23:93:2119] sender: [23:94:2057] recipient: [23:90:2118] !Reboot 72057594037927937 (actor [23:59:2099]) rebooted! !Reboot 72057594037927937 (actor [23:59:2099]) tablet resolver refreshed! new actor is[23:93:2119] Leader for TabletID 72057594037927937 is [23:93:2119] sender: [23:179:2057] recipient: [23:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [24:57:2057] recipient: [24:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [24:57:2057] recipient: [24:53:2097] Leader for TabletID 72057594037927937 is [24:59:2099] sender: [24:60:2057] recipient: [24:53:2097] Leader for TabletID 72057594037927937 is [24:59:2099] sender: [24:77:2057] recipient: [24:14:2061] >> TKeyValueTest::TestInlineWriteReadRangeLimitThenLimitWorks [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/keyvalue/ut/unittest >> TKeyValueTest::TestWriteReadWhileWriteWorks [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:57:2057] recipient: [1:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:57:2057] recipient: [1:52:2097] Leader for TabletID 72057594037927937 is [1:59:2099] sender: [1:60:2057] recipient: [1:52:2097] Leader for TabletID 72057594037927937 is [1:59:2099] sender: [1:77:2057] recipient: [1:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:57:2057] recipient: [2:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:57:2057] recipient: [2:53:2097] Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:60:2057] recipient: [2:53:2097] Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:77:2057] recipient: [2:14:2061] !Reboot 72057594037927937 (actor [2:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:79:2057] recipient: [2:38:2085] Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:81:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:83:2057] recipient: [2:82:2112] Leader for TabletID 72057594037927937 is [2:84:2113] sender: [2:85:2057] recipient: [2:82:2112] !Reboot 72057594037927937 (actor [2:59:2099]) rebooted! !Reboot 72057594037927937 (actor [2:59:2099]) tablet resolver refreshed! new actor is[2:84:2113] Leader for TabletID 72057594037927937 is [2:84:2113] sender: [2:170:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:57:2057] recipient: [3:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:57:2057] recipient: [3:52:2097] Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:60:2057] recipient: [3:52:2097] Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:77:2057] recipient: [3:14:2061] !Reboot 72057594037927937 (actor [3:59:2099]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:79:2057] recipient: [3:38:2085] Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:82:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:83:2057] recipient: [3:81:2112] Leader for TabletID 72057594037927937 is [3:84:2113] sender: [3:85:2057] recipient: [3:81:2112] !Reboot 72057594037927937 (actor [3:59:2099]) rebooted! !Reboot 72057594037927937 (actor [3:59:2099]) tablet resolver refreshed! new actor is[3:84:2113] Leader for TabletID 72057594037927937 is [3:84:2113] sender: [3:170:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:57:2057] recipient: [4:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:57:2057] recipient: [4:53:2097] Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:60:2057] recipient: [4:53:2097] Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:77:2057] recipient: [4:14:2061] !Reboot 72057594037927937 (actor [4:59:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:80:2057] recipient: [4:38:2085] Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:82:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:84:2057] recipient: [4:83:2112] Leader for TabletID 72057594037927937 is [4:85:2113] sender: [4:86:2057] recipient: [4:83:2112] !Reboot 72057594037927937 (actor [4:59:2099]) rebooted! !Reboot 72057594037927937 (actor [4:59:2099]) tablet resolver refreshed! new actor is[4:85:2113] Leader for TabletID 72057594037927937 is [4:85:2113] sender: [4:171:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:57:2057] recipient: [5:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:57:2057] recipient: [5:53:2097] Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:60:2057] recipient: [5:53:2097] Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:77:2057] recipient: [5:14:2061] !Reboot 72057594037927937 (actor [5:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:83:2057] recipient: [5:38:2085] Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:86:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:87:2057] recipient: [5:85:2115] Leader for TabletID 72057594037927937 is [5:88:2116] sender: [5:89:2057] recipient: [5:85:2115] !Reboot 72057594037927937 (actor [5:59:2099]) rebooted! !Reboot 72057594037927937 (actor [5:59:2099]) tablet resolver refreshed! new actor is[5:88:2116] Leader for TabletID 72057594037927937 is [5:88:2116] sender: [5:174:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:57:2057] recipient: [6:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:57:2057] recipient: [6:52:2097] Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:60:2057] recipient: [6:52:2097] Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:77:2057] recipient: [6:14:2061] !Reboot 72057594037927937 (actor [6:59:2099]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:83:2057] recipient: [6:38:2085] Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:85:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:87:2057] recipient: [6:86:2115] Leader for TabletID 72057594037927937 is [6:88:2116] sender: [6:89:2057] recipient: [6:86:2115] !Reboot 72057594037927937 (actor [6:59:2099]) rebooted! !Reboot 72057594037927937 (actor [6:59:2099]) tablet resolver refreshed! new actor is[6:88:2116] Leader for TabletID 72057594037927937 is [6:88:2116] sender: [6:174:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:57:2057] recipient: [7:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:57:2057] recipient: [7:53:2097] Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:60:2057] recipient: [7:53:2097] Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:77:2057] recipient: [7:14:2061] !Reboot 72057594037927937 (actor [7:59:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:84:2057] recipient: [7:38:2085] Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:87:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:88:2057] recipient: [7:86:2115] Leader for TabletID 72057594037927937 is [7:89:2116] sender: [7:90:2057] recipient: [7:86:2115] !Reboot 72057594037927937 (actor [7:59:2099]) rebooted! !Reboot 72057594037927937 (actor [7:59:2099]) tablet resolver refreshed! new actor is[7:89:2116] Leader for TabletID 72057594037927937 is [7:89:2116] sender: [7:175:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:57:2057] recipient: [8:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:57:2057] recipient: [8:54:2097] Leader for TabletID 72057594037927937 is [8:59:2099] sender: [8:60:2057] recipient: [8:54:2097] Leader for TabletID 72057594037927937 is [8:59:2099] sender: [8:77:2057] recipient: [8:14:2061] !Reboot 72057594037927937 (actor [8:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [8:59:2099] sender: [8:86:2057] recipient: [8:38:2085] Leader for TabletID 72057594037927937 is [8:59:2099] sender: [8:88:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [8:59:2099] sender: [8:90:2057] recipient: [8:89:2117] Leader for TabletID 72057594037927937 is [8:91:2118] sender: [8:92:2057] recipient: [8:89:2117] !Reboot 72057594037927937 (actor [8:59:2099]) rebooted! !Reboot 72057594037927937 (actor [8:59:2099]) tablet resolver refreshed! new actor is[8:91:2118] Leader for TabletID 72057594037927937 is [8:91:2118] sender: [8:177:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:57:2057] recipient: [9:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:57:2057] recipient: [9:53:2097] Leader for TabletID 72057594037927937 is [9:59:2099] sender: [9:60:2057] recipient: [9:53:2097] Leader for TabletID 72057594037927937 is [9:59:2099] sender: [9:77:2057] recipient: [9:14:2061] !Reboot 72057594037927937 (actor [9:59:2099]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [9:59:2099] sender: [9:86:2057] recipient: [9:38:2085] Leader for TabletID 72057594037927937 is [9:59:2099] sender: [9:89:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [9:59:2099] sender: [9:90:2057] recipient: [9:88:2117] Leader for TabletID 72057594037927937 is [9:91:2118] sender: [9:92:2057] recipient: [9:88:2117] !Reboot 72057594037927937 (actor [9:59:2099]) rebooted! !Reboot 72057594037927937 (actor [9:59:2099]) tablet resolver refreshed! new actor is[9:91:2118] Leader for TabletID 72057594037927937 is [9:91:2118] sender: [9:177:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:57:2057] recipient: [10:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:57:2057] recipient: [10:54:2097] Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:60:2057] recipient: [10:54:2097] Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:77:2057] recipient: [10:14:2061] !Reboot 72057594037927937 (actor [10:59:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:87:2057] recipient: [10:38:2085] Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:90:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:91:2057] recipient: [10:89:2117] Leader for TabletID 72057594037927937 is [10:92:2118] sender: [10:93:2057] recipient: [10:89:2117] !Reboot 72057594037927937 (actor [10:59:2099]) rebooted! !Reboot 72057594037927937 (actor [10:59:2099]) tablet resolver refreshed! new actor is[10:92:2118] Leader for TabletID 72057594037927937 is [10:92:2118] sender: [10:178:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:57:2057] recipient: [11:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:57:2057] recipient: [11:53:2097] Leader for TabletID 72057594037927937 is [11:59:2099] sender: [11:60:2057] recipient: [11:53:2097] Leader for TabletID 72057594037927937 is [11:59:2099] sender: [11:77:2057] recipient: [11:14:2061] !Reboot 72057594037927937 (actor [11:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [11:59:2099] sender: [11:89:2057] recipient: [11:38:2085] Leader for TabletID 72057594037927937 is [11:59:2099] sender: [11:92:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [11:59:2099] sender: [11:93:2057] recipient: [11:91:2119] Leader for TabletID 72057594037927937 is [11:94:2120] sender: [11:95:2057] recipient: [11:91:2119] !Reboot 72057594037927937 (actor [11:59:2099]) rebooted! !Reboot 72057594037927937 (actor [11:59:2099]) tablet resolver refreshed! new actor is[11:94:2120] Leader for TabletID 72057594037927937 is [11:94:2120] sender: [11:180:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:57:2057] recipient: [12:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:57:2057] recipient: [12:54:2097] Leader for TabletID 72057594037927937 is [12:59:2099] sender: [12:60:2057] recipient: [12:54:2097] Leader for TabletID 72057594037927937 is [12:59:2099] sender: [12:77:2057] recipient: [12:14:2061] !Reboot 72057594037927937 (acto ... TabletID 72057594037927937 is [13:59:2099] sender: [13:93:2057] recipient: [13:14:2061] Leader for TabletID 72057594037927937 is [13:59:2099] sender: [13:94:2057] recipient: [13:92:2119] Leader for TabletID 72057594037927937 is [13:95:2120] sender: [13:96:2057] recipient: [13:92:2119] !Reboot 72057594037927937 (actor [13:59:2099]) rebooted! !Reboot 72057594037927937 (actor [13:59:2099]) tablet resolver refreshed! new actor is[13:95:2120] Leader for TabletID 72057594037927937 is [13:95:2120] sender: [13:181:2057] recipient: [13:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [14:57:2057] recipient: [14:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [14:57:2057] recipient: [14:54:2097] Leader for TabletID 72057594037927937 is [14:59:2099] sender: [14:60:2057] recipient: [14:54:2097] Leader for TabletID 72057594037927937 is [14:59:2099] sender: [14:77:2057] recipient: [14:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [15:57:2057] recipient: [15:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [15:57:2057] recipient: [15:53:2097] Leader for TabletID 72057594037927937 is [15:59:2099] sender: [15:60:2057] recipient: [15:53:2097] Leader for TabletID 72057594037927937 is [15:59:2099] sender: [15:77:2057] recipient: [15:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [16:57:2057] recipient: [16:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [16:57:2057] recipient: [16:53:2097] Leader for TabletID 72057594037927937 is [16:59:2099] sender: [16:60:2057] recipient: [16:53:2097] Leader for TabletID 72057594037927937 is [16:59:2099] sender: [16:77:2057] recipient: [16:14:2061] !Reboot 72057594037927937 (actor [16:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [16:59:2099] sender: [16:79:2057] recipient: [16:38:2085] Leader for TabletID 72057594037927937 is [16:59:2099] sender: [16:82:2057] recipient: [16:14:2061] Leader for TabletID 72057594037927937 is [16:59:2099] sender: [16:83:2057] recipient: [16:81:2112] Leader for TabletID 72057594037927937 is [16:84:2113] sender: [16:85:2057] recipient: [16:81:2112] !Reboot 72057594037927937 (actor [16:59:2099]) rebooted! !Reboot 72057594037927937 (actor [16:59:2099]) tablet resolver refreshed! new actor is[16:84:2113] Leader for TabletID 72057594037927937 is [16:84:2113] sender: [16:170:2057] recipient: [16:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [17:57:2057] recipient: [17:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [17:57:2057] recipient: [17:52:2097] Leader for TabletID 72057594037927937 is [17:59:2099] sender: [17:60:2057] recipient: [17:52:2097] Leader for TabletID 72057594037927937 is [17:59:2099] sender: [17:77:2057] recipient: [17:14:2061] !Reboot 72057594037927937 (actor [17:59:2099]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [17:59:2099] sender: [17:79:2057] recipient: [17:38:2085] Leader for TabletID 72057594037927937 is [17:59:2099] sender: [17:82:2057] recipient: [17:14:2061] Leader for TabletID 72057594037927937 is [17:59:2099] sender: [17:83:2057] recipient: [17:81:2112] Leader for TabletID 72057594037927937 is [17:84:2113] sender: [17:85:2057] recipient: [17:81:2112] !Reboot 72057594037927937 (actor [17:59:2099]) rebooted! !Reboot 72057594037927937 (actor [17:59:2099]) tablet resolver refreshed! new actor is[17:84:2113] Leader for TabletID 72057594037927937 is [17:84:2113] sender: [17:170:2057] recipient: [17:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [18:57:2057] recipient: [18:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [18:57:2057] recipient: [18:53:2097] Leader for TabletID 72057594037927937 is [18:59:2099] sender: [18:60:2057] recipient: [18:53:2097] Leader for TabletID 72057594037927937 is [18:59:2099] sender: [18:77:2057] recipient: [18:14:2061] !Reboot 72057594037927937 (actor [18:59:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [18:59:2099] sender: [18:80:2057] recipient: [18:38:2085] Leader for TabletID 72057594037927937 is [18:59:2099] sender: [18:83:2057] recipient: [18:14:2061] Leader for TabletID 72057594037927937 is [18:59:2099] sender: [18:84:2057] recipient: [18:82:2112] Leader for TabletID 72057594037927937 is [18:85:2113] sender: [18:86:2057] recipient: [18:82:2112] !Reboot 72057594037927937 (actor [18:59:2099]) rebooted! !Reboot 72057594037927937 (actor [18:59:2099]) tablet resolver refreshed! new actor is[18:85:2113] Leader for TabletID 72057594037927937 is [18:85:2113] sender: [18:171:2057] recipient: [18:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [19:57:2057] recipient: [19:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [19:57:2057] recipient: [19:53:2097] Leader for TabletID 72057594037927937 is [19:59:2099] sender: [19:60:2057] recipient: [19:53:2097] Leader for TabletID 72057594037927937 is [19:59:2099] sender: [19:77:2057] recipient: [19:14:2061] !Reboot 72057594037927937 (actor [19:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [19:59:2099] sender: [19:83:2057] recipient: [19:38:2085] Leader for TabletID 72057594037927937 is [19:59:2099] sender: [19:86:2057] recipient: [19:14:2061] Leader for TabletID 72057594037927937 is [19:59:2099] sender: [19:87:2057] recipient: [19:85:2115] Leader for TabletID 72057594037927937 is [19:88:2116] sender: [19:89:2057] recipient: [19:85:2115] !Reboot 72057594037927937 (actor [19:59:2099]) rebooted! !Reboot 72057594037927937 (actor [19:59:2099]) tablet resolver refreshed! new actor is[19:88:2116] Leader for TabletID 72057594037927937 is [19:88:2116] sender: [19:174:2057] recipient: [19:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [20:57:2057] recipient: [20:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [20:57:2057] recipient: [20:54:2097] Leader for TabletID 72057594037927937 is [20:59:2099] sender: [20:60:2057] recipient: [20:54:2097] Leader for TabletID 72057594037927937 is [20:59:2099] sender: [20:77:2057] recipient: [20:14:2061] !Reboot 72057594037927937 (actor [20:59:2099]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [20:59:2099] sender: [20:83:2057] recipient: [20:38:2085] Leader for TabletID 72057594037927937 is [20:59:2099] sender: [20:86:2057] recipient: [20:14:2061] Leader for TabletID 72057594037927937 is [20:59:2099] sender: [20:87:2057] recipient: [20:85:2115] Leader for TabletID 72057594037927937 is [20:88:2116] sender: [20:89:2057] recipient: [20:85:2115] !Reboot 72057594037927937 (actor [20:59:2099]) rebooted! !Reboot 72057594037927937 (actor [20:59:2099]) tablet resolver refreshed! new actor is[20:88:2116] Leader for TabletID 72057594037927937 is [20:88:2116] sender: [20:174:2057] recipient: [20:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [21:57:2057] recipient: [21:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [21:57:2057] recipient: [21:52:2097] Leader for TabletID 72057594037927937 is [21:59:2099] sender: [21:60:2057] recipient: [21:52:2097] Leader for TabletID 72057594037927937 is [21:59:2099] sender: [21:77:2057] recipient: [21:14:2061] !Reboot 72057594037927937 (actor [21:59:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [21:59:2099] sender: [21:84:2057] recipient: [21:38:2085] Leader for TabletID 72057594037927937 is [21:59:2099] sender: [21:87:2057] recipient: [21:14:2061] Leader for TabletID 72057594037927937 is [21:59:2099] sender: [21:88:2057] recipient: [21:86:2115] Leader for TabletID 72057594037927937 is [21:89:2116] sender: [21:90:2057] recipient: [21:86:2115] !Reboot 72057594037927937 (actor [21:59:2099]) rebooted! !Reboot 72057594037927937 (actor [21:59:2099]) tablet resolver refreshed! new actor is[21:89:2116] Leader for TabletID 72057594037927937 is [21:89:2116] sender: [21:175:2057] recipient: [21:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [22:57:2057] recipient: [22:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [22:57:2057] recipient: [22:53:2097] Leader for TabletID 72057594037927937 is [22:59:2099] sender: [22:60:2057] recipient: [22:53:2097] Leader for TabletID 72057594037927937 is [22:59:2099] sender: [22:77:2057] recipient: [22:14:2061] !Reboot 72057594037927937 (actor [22:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [22:59:2099] sender: [22:87:2057] recipient: [22:38:2085] Leader for TabletID 72057594037927937 is [22:59:2099] sender: [22:90:2057] recipient: [22:14:2061] Leader for TabletID 72057594037927937 is [22:59:2099] sender: [22:91:2057] recipient: [22:89:2118] Leader for TabletID 72057594037927937 is [22:92:2119] sender: [22:93:2057] recipient: [22:89:2118] !Reboot 72057594037927937 (actor [22:59:2099]) rebooted! !Reboot 72057594037927937 (actor [22:59:2099]) tablet resolver refreshed! new actor is[22:92:2119] Leader for TabletID 72057594037927937 is [22:92:2119] sender: [22:178:2057] recipient: [22:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [23:57:2057] recipient: [23:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [23:57:2057] recipient: [23:53:2097] Leader for TabletID 72057594037927937 is [23:59:2099] sender: [23:60:2057] recipient: [23:53:2097] Leader for TabletID 72057594037927937 is [23:59:2099] sender: [23:77:2057] recipient: [23:14:2061] !Reboot 72057594037927937 (actor [23:59:2099]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [23:59:2099] sender: [23:87:2057] recipient: [23:38:2085] Leader for TabletID 72057594037927937 is [23:59:2099] sender: [23:90:2057] recipient: [23:14:2061] Leader for TabletID 72057594037927937 is [23:59:2099] sender: [23:91:2057] recipient: [23:89:2118] Leader for TabletID 72057594037927937 is [23:92:2119] sender: [23:93:2057] recipient: [23:89:2118] !Reboot 72057594037927937 (actor [23:59:2099]) rebooted! !Reboot 72057594037927937 (actor [23:59:2099]) tablet resolver refreshed! new actor is[23:92:2119] Leader for TabletID 72057594037927937 is [23:92:2119] sender: [23:178:2057] recipient: [23:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [24:57:2057] recipient: [24:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [24:57:2057] recipient: [24:53:2097] Leader for TabletID 72057594037927937 is [24:59:2099] sender: [24:60:2057] recipient: [24:53:2097] Leader for TabletID 72057594037927937 is [24:59:2099] sender: [24:77:2057] recipient: [24:14:2061] !Reboot 72057594037927937 (actor [24:59:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [24:59:2099] sender: [24:88:2057] recipient: [24:38:2085] Leader for TabletID 72057594037927937 is [24:59:2099] sender: [24:91:2057] recipient: [24:14:2061] Leader for TabletID 72057594037927937 is [24:59:2099] sender: [24:92:2057] recipient: [24:90:2118] Leader for TabletID 72057594037927937 is [24:93:2119] sender: [24:94:2057] recipient: [24:90:2118] !Reboot 72057594037927937 (actor [24:59:2099]) rebooted! !Reboot 72057594037927937 (actor [24:59:2099]) tablet resolver refreshed! new actor is[24:93:2119] Leader for TabletID 72057594037927937 is [24:93:2119] sender: [24:179:2057] recipient: [24:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [25:57:2057] recipient: [25:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [25:57:2057] recipient: [25:54:2097] Leader for TabletID 72057594037927937 is [25:59:2099] sender: [25:60:2057] recipient: [25:54:2097] Leader for TabletID 72057594037927937 is [25:59:2099] sender: [25:77:2057] recipient: [25:14:2061] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/keyvalue/ut/unittest >> TKeyValueTest::TestInlineWriteReadRangeLimitThenLimitWorks [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:57:2057] recipient: [1:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:57:2057] recipient: [1:52:2097] Leader for TabletID 72057594037927937 is [1:59:2099] sender: [1:60:2057] recipient: [1:52:2097] Leader for TabletID 72057594037927937 is [1:59:2099] sender: [1:77:2057] recipient: [1:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:57:2057] recipient: [2:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:57:2057] recipient: [2:53:2097] Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:60:2057] recipient: [2:53:2097] Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:77:2057] recipient: [2:14:2061] !Reboot 72057594037927937 (actor [2:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:79:2057] recipient: [2:38:2085] Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:81:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:83:2057] recipient: [2:82:2112] Leader for TabletID 72057594037927937 is [2:84:2113] sender: [2:85:2057] recipient: [2:82:2112] !Reboot 72057594037927937 (actor [2:59:2099]) rebooted! !Reboot 72057594037927937 (actor [2:59:2099]) tablet resolver refreshed! new actor is[2:84:2113] Leader for TabletID 72057594037927937 is [2:84:2113] sender: [2:170:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:57:2057] recipient: [3:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:57:2057] recipient: [3:52:2097] Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:60:2057] recipient: [3:52:2097] Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:77:2057] recipient: [3:14:2061] !Reboot 72057594037927937 (actor [3:59:2099]) on event NKikimr::TEvKeyValue::TEvExecuteTransaction ! Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:79:2057] recipient: [3:38:2085] Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:82:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:83:2057] recipient: [3:81:2112] Leader for TabletID 72057594037927937 is [3:84:2113] sender: [3:85:2057] recipient: [3:81:2112] !Reboot 72057594037927937 (actor [3:59:2099]) rebooted! !Reboot 72057594037927937 (actor [3:59:2099]) tablet resolver refreshed! new actor is[3:84:2113] Leader for TabletID 72057594037927937 is [3:84:2113] sender: [3:170:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:57:2057] recipient: [4:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:57:2057] recipient: [4:53:2097] Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:60:2057] recipient: [4:53:2097] Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:77:2057] recipient: [4:14:2061] !Reboot 72057594037927937 (actor [4:59:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:80:2057] recipient: [4:38:2085] Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:83:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:84:2057] recipient: [4:82:2112] Leader for TabletID 72057594037927937 is [4:85:2113] sender: [4:86:2057] recipient: [4:82:2112] !Reboot 72057594037927937 (actor [4:59:2099]) rebooted! !Reboot 72057594037927937 (actor [4:59:2099]) tablet resolver refreshed! new actor is[4:85:2113] Leader for TabletID 72057594037927937 is [4:85:2113] sender: [4:171:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:57:2057] recipient: [5:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:57:2057] recipient: [5:53:2097] Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:60:2057] recipient: [5:53:2097] Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:77:2057] recipient: [5:14:2061] !Reboot 72057594037927937 (actor [5:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:83:2057] recipient: [5:38:2085] Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:86:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:87:2057] recipient: [5:85:2115] Leader for TabletID 72057594037927937 is [5:88:2116] sender: [5:89:2057] recipient: [5:85:2115] !Reboot 72057594037927937 (actor [5:59:2099]) rebooted! !Reboot 72057594037927937 (actor [5:59:2099]) tablet resolver refreshed! new actor is[5:88:2116] Leader for TabletID 72057594037927937 is [5:88:2116] sender: [5:174:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:57:2057] recipient: [6:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:57:2057] recipient: [6:52:2097] Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:60:2057] recipient: [6:52:2097] Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:77:2057] recipient: [6:14:2061] !Reboot 72057594037927937 (actor [6:59:2099]) on event NKikimr::TEvKeyValue::TEvRead ! Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:83:2057] recipient: [6:38:2085] Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:86:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:87:2057] recipient: [6:85:2115] Leader for TabletID 72057594037927937 is [6:88:2116] sender: [6:89:2057] recipient: [6:85:2115] !Reboot 72057594037927937 (actor [6:59:2099]) rebooted! !Reboot 72057594037927937 (actor [6:59:2099]) tablet resolver refreshed! new actor is[6:88:2116] Leader for TabletID 72057594037927937 is [6:88:2116] sender: [6:174:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:57:2057] recipient: [7:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:57:2057] recipient: [7:53:2097] Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:60:2057] recipient: [7:53:2097] Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:77:2057] recipient: [7:14:2061] !Reboot 72057594037927937 (actor [7:59:2099]) on event NKikimr::TEvKeyValue::TEvNotify ! Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:84:2057] recipient: [7:38:2085] Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:87:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:88:2057] recipient: [7:86:2115] Leader for TabletID 72057594037927937 is [7:89:2116] sender: [7:90:2057] recipient: [7:86:2115] !Reboot 72057594037927937 (actor [7:59:2099]) rebooted! !Reboot 72057594037927937 (actor [7:59:2099]) tablet resolver refreshed! new actor is[7:89:2116] Leader for TabletID 72057594037927937 is [7:89:2116] sender: [7:107:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:57:2057] recipient: [8:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:57:2057] recipient: [8:54:2097] Leader for TabletID 72057594037927937 is [8:59:2099] sender: [8:60:2057] recipient: [8:54:2097] Leader for TabletID 72057594037927937 is [8:59:2099] sender: [8:77:2057] recipient: [8:14:2061] !Reboot 72057594037927937 (actor [8:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [8:59:2099] sender: [8:86:2057] recipient: [8:38:2085] Leader for TabletID 72057594037927937 is [8:59:2099] sender: [8:89:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [8:59:2099] sender: [8:90:2057] recipient: [8:88:2117] Leader for TabletID 72057594037927937 is [8:91:2118] sender: [8:92:2057] recipient: [8:88:2117] !Reboot 72057594037927937 (actor [8:59:2099]) rebooted! !Reboot 72057594037927937 (actor [8:59:2099]) tablet resolver refreshed! new actor is[8:91:2118] Leader for TabletID 72057594037927937 is [8:91:2118] sender: [8:177:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:57:2057] recipient: [9:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:57:2057] recipient: [9:53:2097] Leader for TabletID 72057594037927937 is [9:59:2099] sender: [9:60:2057] recipient: [9:53:2097] Leader for TabletID 72057594037927937 is [9:59:2099] sender: [9:77:2057] recipient: [9:14:2061] !Reboot 72057594037927937 (actor [9:59:2099]) on event NKikimr::TEvKeyValue::TEvExecuteTransaction ! Leader for TabletID 72057594037927937 is [9:59:2099] sender: [9:86:2057] recipient: [9:38:2085] Leader for TabletID 72057594037927937 is [9:59:2099] sender: [9:88:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [9:59:2099] sender: [9:90:2057] recipient: [9:89:2117] Leader for TabletID 72057594037927937 is [9:91:2118] sender: [9:92:2057] recipient: [9:89:2117] !Reboot 72057594037927937 (actor [9:59:2099]) rebooted! !Reboot 72057594037927937 (actor [9:59:2099]) tablet resolver refreshed! new actor is[9:91:2118] Leader for TabletID 72057594037927937 is [9:91:2118] sender: [9:177:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:57:2057] recipient: [10:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:57:2057] recipient: [10:54:2097] Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:60:2057] recipient: [10:54:2097] Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:77:2057] recipient: [10:14:2061] !Reboot 72057594037927937 (actor [10:59:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:87:2057] recipient: [10:38:2085] Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:90:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:91:2057] recipient: [10:89:2117] Leader for TabletID 72057594037927937 is [10:92:2118] sender: [10:93:2057] recipient: [10:89:2117] !Reboot 72057594037927937 (actor [10:59:2099]) rebooted! !Reboot 72057594037927937 (actor [10:59:2099]) tablet resolver refreshed! new actor is[10:92:2118] Leader for TabletID 72057594037927937 is [10:92:2118] sender: [10:178:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:57:2057] recipient: [11:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:57:2057] recipient: [11:53:2097] Leader for TabletID 72057594037927937 is [11:59:2099] sender: [11:60:2057] recipient: [11:53:2097] Leader for TabletID 72057594037927937 is [11:59:2099] sender: [11:77:2057] recipient: [11:14:2061] !Reboot 72057594037927937 (actor [11:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [11:59:2099] sender: [11:90:2057] recipient: [11:38:2085] Leader for TabletID 72057594037927937 is [11:59:2099] sender: [11:93:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [11:59:2099] sender: [11:94:2057] recipient: [11:92:2120] Leader for TabletID 72057594037927937 is [11:95:2121] sender: [11:96:2057] recipient: [11:92:2120] !Reboot 72057594037927937 (actor [11:59:2099]) rebooted! !Reboot 72057594037927937 (actor [11:59:2099]) tablet resolver refreshed! new actor is[11:95:2121] Leader for TabletID 72057594037927937 is [11:95:2121] sender: [11:181:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:57:2057] recipient: [12:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:57:2057] recipient: [12:54:2097] Leader for TabletID 72057594037927937 is [12:59:2099] sender: [12:60:2057] recipient: [12:54:2097] Leader for TabletID 72057594037927937 is [12:59:2099] sender: [12:77:2057] recipient: [12:14:2061] !Reboot 7205759403 ... TabletID 72057594037927937 is [12:59:2099] sender: [12:92:2057] recipient: [12:14:2061] Leader for TabletID 72057594037927937 is [12:59:2099] sender: [12:94:2057] recipient: [12:93:2120] Leader for TabletID 72057594037927937 is [12:95:2121] sender: [12:96:2057] recipient: [12:93:2120] !Reboot 72057594037927937 (actor [12:59:2099]) rebooted! !Reboot 72057594037927937 (actor [12:59:2099]) tablet resolver refreshed! new actor is[12:95:2121] Leader for TabletID 72057594037927937 is [12:95:2121] sender: [12:181:2057] recipient: [12:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [13:57:2057] recipient: [13:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [13:57:2057] recipient: [13:52:2097] Leader for TabletID 72057594037927937 is [13:59:2099] sender: [13:60:2057] recipient: [13:52:2097] Leader for TabletID 72057594037927937 is [13:59:2099] sender: [13:77:2057] recipient: [13:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [14:57:2057] recipient: [14:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [14:57:2057] recipient: [14:54:2097] Leader for TabletID 72057594037927937 is [14:59:2099] sender: [14:60:2057] recipient: [14:54:2097] Leader for TabletID 72057594037927937 is [14:59:2099] sender: [14:77:2057] recipient: [14:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [15:57:2057] recipient: [15:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [15:57:2057] recipient: [15:53:2097] Leader for TabletID 72057594037927937 is [15:59:2099] sender: [15:60:2057] recipient: [15:53:2097] Leader for TabletID 72057594037927937 is [15:59:2099] sender: [15:77:2057] recipient: [15:14:2061] !Reboot 72057594037927937 (actor [15:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [15:59:2099] sender: [15:79:2057] recipient: [15:38:2085] Leader for TabletID 72057594037927937 is [15:59:2099] sender: [15:82:2057] recipient: [15:14:2061] Leader for TabletID 72057594037927937 is [15:59:2099] sender: [15:83:2057] recipient: [15:81:2112] Leader for TabletID 72057594037927937 is [15:84:2113] sender: [15:85:2057] recipient: [15:81:2112] !Reboot 72057594037927937 (actor [15:59:2099]) rebooted! !Reboot 72057594037927937 (actor [15:59:2099]) tablet resolver refreshed! new actor is[15:84:2113] Leader for TabletID 72057594037927937 is [15:84:2113] sender: [15:170:2057] recipient: [15:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [16:57:2057] recipient: [16:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [16:57:2057] recipient: [16:53:2097] Leader for TabletID 72057594037927937 is [16:59:2099] sender: [16:60:2057] recipient: [16:53:2097] Leader for TabletID 72057594037927937 is [16:59:2099] sender: [16:77:2057] recipient: [16:14:2061] !Reboot 72057594037927937 (actor [16:59:2099]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [16:59:2099] sender: [16:79:2057] recipient: [16:38:2085] Leader for TabletID 72057594037927937 is [16:59:2099] sender: [16:82:2057] recipient: [16:14:2061] Leader for TabletID 72057594037927937 is [16:59:2099] sender: [16:83:2057] recipient: [16:81:2112] Leader for TabletID 72057594037927937 is [16:84:2113] sender: [16:85:2057] recipient: [16:81:2112] !Reboot 72057594037927937 (actor [16:59:2099]) rebooted! !Reboot 72057594037927937 (actor [16:59:2099]) tablet resolver refreshed! new actor is[16:84:2113] Leader for TabletID 72057594037927937 is [16:84:2113] sender: [16:170:2057] recipient: [16:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [17:57:2057] recipient: [17:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [17:57:2057] recipient: [17:52:2097] Leader for TabletID 72057594037927937 is [17:59:2099] sender: [17:60:2057] recipient: [17:52:2097] Leader for TabletID 72057594037927937 is [17:59:2099] sender: [17:77:2057] recipient: [17:14:2061] !Reboot 72057594037927937 (actor [17:59:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [17:59:2099] sender: [17:80:2057] recipient: [17:38:2085] Leader for TabletID 72057594037927937 is [17:59:2099] sender: [17:83:2057] recipient: [17:14:2061] Leader for TabletID 72057594037927937 is [17:59:2099] sender: [17:84:2057] recipient: [17:82:2112] Leader for TabletID 72057594037927937 is [17:85:2113] sender: [17:86:2057] recipient: [17:82:2112] !Reboot 72057594037927937 (actor [17:59:2099]) rebooted! !Reboot 72057594037927937 (actor [17:59:2099]) tablet resolver refreshed! new actor is[17:85:2113] Leader for TabletID 72057594037927937 is [17:85:2113] sender: [17:171:2057] recipient: [17:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [18:57:2057] recipient: [18:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [18:57:2057] recipient: [18:53:2097] Leader for TabletID 72057594037927937 is [18:59:2099] sender: [18:60:2057] recipient: [18:53:2097] Leader for TabletID 72057594037927937 is [18:59:2099] sender: [18:77:2057] recipient: [18:14:2061] !Reboot 72057594037927937 (actor [18:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [18:59:2099] sender: [18:83:2057] recipient: [18:38:2085] Leader for TabletID 72057594037927937 is [18:59:2099] sender: [18:86:2057] recipient: [18:14:2061] Leader for TabletID 72057594037927937 is [18:59:2099] sender: [18:87:2057] recipient: [18:85:2115] Leader for TabletID 72057594037927937 is [18:88:2116] sender: [18:89:2057] recipient: [18:85:2115] !Reboot 72057594037927937 (actor [18:59:2099]) rebooted! !Reboot 72057594037927937 (actor [18:59:2099]) tablet resolver refreshed! new actor is[18:88:2116] Leader for TabletID 72057594037927937 is [18:88:2116] sender: [18:174:2057] recipient: [18:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [19:57:2057] recipient: [19:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [19:57:2057] recipient: [19:53:2097] Leader for TabletID 72057594037927937 is [19:59:2099] sender: [19:60:2057] recipient: [19:53:2097] Leader for TabletID 72057594037927937 is [19:59:2099] sender: [19:77:2057] recipient: [19:14:2061] !Reboot 72057594037927937 (actor [19:59:2099]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [19:59:2099] sender: [19:83:2057] recipient: [19:38:2085] Leader for TabletID 72057594037927937 is [19:59:2099] sender: [19:86:2057] recipient: [19:14:2061] Leader for TabletID 72057594037927937 is [19:59:2099] sender: [19:87:2057] recipient: [19:85:2115] Leader for TabletID 72057594037927937 is [19:88:2116] sender: [19:89:2057] recipient: [19:85:2115] !Reboot 72057594037927937 (actor [19:59:2099]) rebooted! !Reboot 72057594037927937 (actor [19:59:2099]) tablet resolver refreshed! new actor is[19:88:2116] Leader for TabletID 72057594037927937 is [19:88:2116] sender: [19:174:2057] recipient: [19:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [20:57:2057] recipient: [20:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [20:57:2057] recipient: [20:54:2097] Leader for TabletID 72057594037927937 is [20:59:2099] sender: [20:60:2057] recipient: [20:54:2097] Leader for TabletID 72057594037927937 is [20:59:2099] sender: [20:77:2057] recipient: [20:14:2061] !Reboot 72057594037927937 (actor [20:59:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [20:59:2099] sender: [20:84:2057] recipient: [20:38:2085] Leader for TabletID 72057594037927937 is [20:59:2099] sender: [20:87:2057] recipient: [20:14:2061] Leader for TabletID 72057594037927937 is [20:59:2099] sender: [20:88:2057] recipient: [20:86:2115] Leader for TabletID 72057594037927937 is [20:89:2116] sender: [20:90:2057] recipient: [20:86:2115] !Reboot 72057594037927937 (actor [20:59:2099]) rebooted! !Reboot 72057594037927937 (actor [20:59:2099]) tablet resolver refreshed! new actor is[20:89:2116] Leader for TabletID 72057594037927937 is [20:89:2116] sender: [20:175:2057] recipient: [20:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [21:57:2057] recipient: [21:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [21:57:2057] recipient: [21:52:2097] Leader for TabletID 72057594037927937 is [21:59:2099] sender: [21:60:2057] recipient: [21:52:2097] Leader for TabletID 72057594037927937 is [21:59:2099] sender: [21:77:2057] recipient: [21:14:2061] !Reboot 72057594037927937 (actor [21:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [21:59:2099] sender: [21:86:2057] recipient: [21:38:2085] Leader for TabletID 72057594037927937 is [21:59:2099] sender: [21:89:2057] recipient: [21:14:2061] Leader for TabletID 72057594037927937 is [21:59:2099] sender: [21:90:2057] recipient: [21:88:2117] Leader for TabletID 72057594037927937 is [21:91:2118] sender: [21:92:2057] recipient: [21:88:2117] !Reboot 72057594037927937 (actor [21:59:2099]) rebooted! !Reboot 72057594037927937 (actor [21:59:2099]) tablet resolver refreshed! new actor is[21:91:2118] Leader for TabletID 72057594037927937 is [21:91:2118] sender: [21:177:2057] recipient: [21:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [22:57:2057] recipient: [22:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [22:57:2057] recipient: [22:53:2097] Leader for TabletID 72057594037927937 is [22:59:2099] sender: [22:60:2057] recipient: [22:53:2097] Leader for TabletID 72057594037927937 is [22:59:2099] sender: [22:77:2057] recipient: [22:14:2061] !Reboot 72057594037927937 (actor [22:59:2099]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [22:59:2099] sender: [22:86:2057] recipient: [22:38:2085] Leader for TabletID 72057594037927937 is [22:59:2099] sender: [22:89:2057] recipient: [22:14:2061] Leader for TabletID 72057594037927937 is [22:59:2099] sender: [22:90:2057] recipient: [22:88:2117] Leader for TabletID 72057594037927937 is [22:91:2118] sender: [22:92:2057] recipient: [22:88:2117] !Reboot 72057594037927937 (actor [22:59:2099]) rebooted! !Reboot 72057594037927937 (actor [22:59:2099]) tablet resolver refreshed! new actor is[22:91:2118] Leader for TabletID 72057594037927937 is [22:91:2118] sender: [22:177:2057] recipient: [22:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [23:57:2057] recipient: [23:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [23:57:2057] recipient: [23:53:2097] Leader for TabletID 72057594037927937 is [23:59:2099] sender: [23:60:2057] recipient: [23:53:2097] Leader for TabletID 72057594037927937 is [23:59:2099] sender: [23:77:2057] recipient: [23:14:2061] !Reboot 72057594037927937 (actor [23:59:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [23:59:2099] sender: [23:87:2057] recipient: [23:38:2085] Leader for TabletID 72057594037927937 is [23:59:2099] sender: [23:90:2057] recipient: [23:14:2061] Leader for TabletID 72057594037927937 is [23:59:2099] sender: [23:91:2057] recipient: [23:89:2117] Leader for TabletID 72057594037927937 is [23:92:2118] sender: [23:93:2057] recipient: [23:89:2117] !Reboot 72057594037927937 (actor [23:59:2099]) rebooted! !Reboot 72057594037927937 (actor [23:59:2099]) tablet resolver refreshed! new actor is[23:92:2118] Leader for TabletID 72057594037927937 is [23:92:2118] sender: [23:178:2057] recipient: [23:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [24:57:2057] recipient: [24:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [24:57:2057] recipient: [24:53:2097] Leader for TabletID 72057594037927937 is [24:59:2099] sender: [24:60:2057] recipient: [24:53:2097] Leader for TabletID 72057594037927937 is [24:59:2099] sender: [24:77:2057] recipient: [24:14:2061] >> Sharding::XXUsage >> Sharding::XXUsage [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/sharding/ut/unittest >> Sharding::XXUsage [GOOD] Test command err: 8278356707735932879 13854416475599308532 7358228729739969623 10803012486657556593 17211108684699061020 14720204143641002276 462202664314785784 11685052263248848615 1832300482336121892 3343339729682483040 7814970665086387503 17244733234845521980 7422904454355384000 3001311100943765579 3260944796143333595 1400300483346638670 10314289303138473667 6720633673296234181 12899713205348439998 10662326420138750698 5420872468942746807 12487378229493739372 6893871292857352302 5581121822195329482 3715312376806542756 7144677705155712463 15338295285359074120 1101043971185540239 1848484455953783663 15082200474052529199 17099506110185566985 10786799683747182316 8826367415604130491 13459823845697343787 9076005820618857901 10723020107217592137 17907786371217648905 13865239583975478798 16935901824759250282 5437584422182162020 12340834484481963313 4701635631370648020 12272830864923792160 11585228026683895423 1864733083670980396 16873449921748817592 9629756591895670768 11174653977235451804 11353368686875971436 9522985878205981760 14562244668011624869 2263478404865651328 10773656467999567106 8886273492844052715 5072072586550255203 4574413871335798290 16078964594387328130 4154384915339061224 420299252068965014 6789133867507576775 15796999906988321883 13255855957386505947 10666768972087503467 6243197007449839747 12712916631386637286 12751877164228269091 5107723857970822170 16787528921609769449 2168385800523155287 2251998392647937844 18403692451088658834 16099921548389715096 13865542323441225777 12273248121776673339 3411112478258368643 3447210281047183088 10987807272221913628 7992233903678958512 13579025475816504303 4399355476769052215 9742656992869668786 438998350693111114 8692849443676544528 13466778401950329464 18352763080644449246 1328416328313305610 2544209762330429628 9771663725606270015 8371018376315958378 14256427574989795825 |83.2%| [TA] $(B)/ydb/core/tx/sharding/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> SystemView::ShowCreateTableColumnUpsertOptions [GOOD] >> SystemView::ShowCreateTableColumnUpsertIndex |83.3%| [TA] {RESULT} $(B)/ydb/core/kqp/workload_service/ut/test-results/unittest/{meta.json ... results_accumulator.log} |83.3%| [TA] {RESULT} $(B)/ydb/core/tx/sharding/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TKeyValueTest::TestInlineWriteReadWithRestartsThenResponseOkNewApi [GOOD] >> TKeyValueTest::TestInlineWriteReadWithRestartsWithNotCorrectUTF8NewApi >> KqpPg::PgUpdateCompoundKey+useSink [GOOD] >> KqpPg::PgUpdateCompoundKey-useSink >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_1__SYNC-pk_types8-all_types8-index8---SYNC] [GOOD] >> TKeyValueTest::TestCopyRangeWorksNewApi [GOOD] >> TKeyValueTest::TestCopyRangeToLongKey >> TKeyValueTest::TestInlineEmptyWriteReadDeleteWithRestartsThenResponseOkNewApi [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/keyvalue/ut/unittest >> TKeyValueTest::TestInlineEmptyWriteReadDeleteWithRestartsThenResponseOkNewApi [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:57:2057] recipient: [1:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:57:2057] recipient: [1:52:2097] Leader for TabletID 72057594037927937 is [1:59:2099] sender: [1:60:2057] recipient: [1:52:2097] Leader for TabletID 72057594037927937 is [1:59:2099] sender: [1:77:2057] recipient: [1:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:57:2057] recipient: [2:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:57:2057] recipient: [2:53:2097] Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:60:2057] recipient: [2:53:2097] Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:77:2057] recipient: [2:14:2061] !Reboot 72057594037927937 (actor [2:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:79:2057] recipient: [2:38:2085] Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:81:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:83:2057] recipient: [2:82:2112] Leader for TabletID 72057594037927937 is [2:84:2113] sender: [2:85:2057] recipient: [2:82:2112] !Reboot 72057594037927937 (actor [2:59:2099]) rebooted! !Reboot 72057594037927937 (actor [2:59:2099]) tablet resolver refreshed! new actor is[2:84:2113] Leader for TabletID 72057594037927937 is [2:84:2113] sender: [2:170:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:57:2057] recipient: [3:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:57:2057] recipient: [3:52:2097] Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:60:2057] recipient: [3:52:2097] Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:77:2057] recipient: [3:14:2061] !Reboot 72057594037927937 (actor [3:59:2099]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:79:2057] recipient: [3:38:2085] Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:82:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:83:2057] recipient: [3:81:2112] Leader for TabletID 72057594037927937 is [3:84:2113] sender: [3:85:2057] recipient: [3:81:2112] !Reboot 72057594037927937 (actor [3:59:2099]) rebooted! !Reboot 72057594037927937 (actor [3:59:2099]) tablet resolver refreshed! new actor is[3:84:2113] Leader for TabletID 72057594037927937 is [3:84:2113] sender: [3:170:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:57:2057] recipient: [4:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:57:2057] recipient: [4:53:2097] Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:60:2057] recipient: [4:53:2097] Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:77:2057] recipient: [4:14:2061] !Reboot 72057594037927937 (actor [4:59:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:80:2057] recipient: [4:38:2085] Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:83:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:84:2057] recipient: [4:82:2112] Leader for TabletID 72057594037927937 is [4:85:2113] sender: [4:86:2057] recipient: [4:82:2112] !Reboot 72057594037927937 (actor [4:59:2099]) rebooted! !Reboot 72057594037927937 (actor [4:59:2099]) tablet resolver refreshed! new actor is[4:85:2113] Leader for TabletID 72057594037927937 is [4:85:2113] sender: [4:171:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:57:2057] recipient: [5:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:57:2057] recipient: [5:53:2097] Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:60:2057] recipient: [5:53:2097] Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:77:2057] recipient: [5:14:2061] !Reboot 72057594037927937 (actor [5:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:83:2057] recipient: [5:38:2085] Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:86:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:87:2057] recipient: [5:85:2115] Leader for TabletID 72057594037927937 is [5:88:2116] sender: [5:89:2057] recipient: [5:85:2115] !Reboot 72057594037927937 (actor [5:59:2099]) rebooted! !Reboot 72057594037927937 (actor [5:59:2099]) tablet resolver refreshed! new actor is[5:88:2116] Leader for TabletID 72057594037927937 is [5:88:2116] sender: [5:174:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:57:2057] recipient: [6:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:57:2057] recipient: [6:52:2097] Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:60:2057] recipient: [6:52:2097] Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:77:2057] recipient: [6:14:2061] !Reboot 72057594037927937 (actor [6:59:2099]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:83:2057] recipient: [6:38:2085] Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:86:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:87:2057] recipient: [6:85:2115] Leader for TabletID 72057594037927937 is [6:88:2116] sender: [6:89:2057] recipient: [6:85:2115] !Reboot 72057594037927937 (actor [6:59:2099]) rebooted! !Reboot 72057594037927937 (actor [6:59:2099]) tablet resolver refreshed! new actor is[6:88:2116] Leader for TabletID 72057594037927937 is [6:88:2116] sender: [6:174:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:57:2057] recipient: [7:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:57:2057] recipient: [7:53:2097] Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:60:2057] recipient: [7:53:2097] Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:77:2057] recipient: [7:14:2061] !Reboot 72057594037927937 (actor [7:59:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:84:2057] recipient: [7:38:2085] Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:87:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:88:2057] recipient: [7:86:2115] Leader for TabletID 72057594037927937 is [7:89:2116] sender: [7:90:2057] recipient: [7:86:2115] !Reboot 72057594037927937 (actor [7:59:2099]) rebooted! !Reboot 72057594037927937 (actor [7:59:2099]) tablet resolver refreshed! new actor is[7:89:2116] Leader for TabletID 72057594037927937 is [7:89:2116] sender: [7:175:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:57:2057] recipient: [8:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:57:2057] recipient: [8:54:2097] Leader for TabletID 72057594037927937 is [8:59:2099] sender: [8:60:2057] recipient: [8:54:2097] Leader for TabletID 72057594037927937 is [8:59:2099] sender: [8:77:2057] recipient: [8:14:2061] !Reboot 72057594037927937 (actor [8:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [8:59:2099] sender: [8:86:2057] recipient: [8:38:2085] Leader for TabletID 72057594037927937 is [8:59:2099] sender: [8:89:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [8:59:2099] sender: [8:90:2057] recipient: [8:88:2117] Leader for TabletID 72057594037927937 is [8:91:2118] sender: [8:92:2057] recipient: [8:88:2117] !Reboot 72057594037927937 (actor [8:59:2099]) rebooted! !Reboot 72057594037927937 (actor [8:59:2099]) tablet resolver refreshed! new actor is[8:91:2118] Leader for TabletID 72057594037927937 is [8:91:2118] sender: [8:177:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:57:2057] recipient: [9:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:57:2057] recipient: [9:53:2097] Leader for TabletID 72057594037927937 is [9:59:2099] sender: [9:60:2057] recipient: [9:53:2097] Leader for TabletID 72057594037927937 is [9:59:2099] sender: [9:77:2057] recipient: [9:14:2061] !Reboot 72057594037927937 (actor [9:59:2099]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [9:59:2099] sender: [9:86:2057] recipient: [9:38:2085] Leader for TabletID 72057594037927937 is [9:59:2099] sender: [9:88:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [9:59:2099] sender: [9:90:2057] recipient: [9:89:2117] Leader for TabletID 72057594037927937 is [9:91:2118] sender: [9:92:2057] recipient: [9:89:2117] !Reboot 72057594037927937 (actor [9:59:2099]) rebooted! !Reboot 72057594037927937 (actor [9:59:2099]) tablet resolver refreshed! new actor is[9:91:2118] Leader for TabletID 72057594037927937 is [9:91:2118] sender: [9:177:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:57:2057] recipient: [10:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:57:2057] recipient: [10:54:2097] Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:60:2057] recipient: [10:54:2097] Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:77:2057] recipient: [10:14:2061] !Reboot 72057594037927937 (actor [10:59:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:87:2057] recipient: [10:38:2085] Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:90:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:91:2057] recipient: [10:89:2117] Leader for TabletID 72057594037927937 is [10:92:2118] sender: [10:93:2057] recipient: [10:89:2117] !Reboot 72057594037927937 (actor [10:59:2099]) rebooted! !Reboot 72057594037927937 (actor [10:59:2099]) tablet resolver refreshed! new actor is[10:92:2118] Leader for TabletID 72057594037927937 is [10:92:2118] sender: [10:178:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:57:2057] recipient: [11:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:57:2057] recipient: [11:53:2097] Leader for TabletID 72057594037927937 is [11:59:2099] sender: [11:60:2057] recipient: [11:53:2097] Leader for TabletID 72057594037927937 is [11:59:2099] sender: [11:77:2057] recipient: [11:14:2061] !Reboot 72057594037927937 (actor [11:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [11:59:2099] sender: [11:90:2057] recipient: [11:38:2085] Leader for TabletID 72057594037927937 is [11:59:2099] sender: [11:93:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [11:59:2099] sender: [11:94:2057] recipient: [11:92:2120] Leader for TabletID 72057594037927937 is [11:95:2121] sender: [11:96:2057] recipient: [11:92:2120] !Reboot 72057594037927937 (actor [11:59:2099]) rebooted! !Reboot 72057594037927937 (actor [11:59:2099]) tablet resolver refreshed! new actor is[11:95:2121] Leader for TabletID 72057594037927937 is [11:95:2121] sender: [11:181:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:57:2057] recipient: [12:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:57:2057] recipient: [12:54:2097] Leader for TabletID 72057594037927937 is [12:59:2099] sender: [12:60:2057] recipient: [12:54:2097] Leader for TabletID 72057594037927937 is [12:59:2099] sender: [12:77:2057] recipient: [12:14:2061] !Reboot 72057594037927937 (acto ... boot 72057594037927937 (actor [19:59:2099]) tablet resolver refreshed! new actor is[19:88:2116] Leader for TabletID 72057594037927937 is [19:88:2116] sender: [19:174:2057] recipient: [19:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [20:57:2057] recipient: [20:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [20:57:2057] recipient: [20:54:2097] Leader for TabletID 72057594037927937 is [20:59:2099] sender: [20:60:2057] recipient: [20:54:2097] Leader for TabletID 72057594037927937 is [20:59:2099] sender: [20:77:2057] recipient: [20:14:2061] !Reboot 72057594037927937 (actor [20:59:2099]) on event NKikimr::TEvKeyValue::TEvRead ! Leader for TabletID 72057594037927937 is [20:59:2099] sender: [20:83:2057] recipient: [20:38:2085] Leader for TabletID 72057594037927937 is [20:59:2099] sender: [20:86:2057] recipient: [20:14:2061] Leader for TabletID 72057594037927937 is [20:59:2099] sender: [20:87:2057] recipient: [20:85:2115] Leader for TabletID 72057594037927937 is [20:88:2116] sender: [20:89:2057] recipient: [20:85:2115] !Reboot 72057594037927937 (actor [20:59:2099]) rebooted! !Reboot 72057594037927937 (actor [20:59:2099]) tablet resolver refreshed! new actor is[20:88:2116] Leader for TabletID 72057594037927937 is [20:88:2116] sender: [20:174:2057] recipient: [20:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [21:57:2057] recipient: [21:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [21:57:2057] recipient: [21:52:2097] Leader for TabletID 72057594037927937 is [21:59:2099] sender: [21:60:2057] recipient: [21:52:2097] Leader for TabletID 72057594037927937 is [21:59:2099] sender: [21:77:2057] recipient: [21:14:2061] !Reboot 72057594037927937 (actor [21:59:2099]) on event NKikimr::TEvKeyValue::TEvNotify ! Leader for TabletID 72057594037927937 is [21:59:2099] sender: [21:84:2057] recipient: [21:38:2085] Leader for TabletID 72057594037927937 is [21:59:2099] sender: [21:87:2057] recipient: [21:14:2061] Leader for TabletID 72057594037927937 is [21:59:2099] sender: [21:88:2057] recipient: [21:86:2115] Leader for TabletID 72057594037927937 is [21:89:2116] sender: [21:90:2057] recipient: [21:86:2115] !Reboot 72057594037927937 (actor [21:59:2099]) rebooted! !Reboot 72057594037927937 (actor [21:59:2099]) tablet resolver refreshed! new actor is[21:89:2116] Leader for TabletID 72057594037927937 is [21:89:2116] sender: [21:107:2057] recipient: [21:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [22:57:2057] recipient: [22:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [22:57:2057] recipient: [22:53:2097] Leader for TabletID 72057594037927937 is [22:59:2099] sender: [22:60:2057] recipient: [22:53:2097] Leader for TabletID 72057594037927937 is [22:59:2099] sender: [22:77:2057] recipient: [22:14:2061] !Reboot 72057594037927937 (actor [22:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [22:59:2099] sender: [22:86:2057] recipient: [22:38:2085] Leader for TabletID 72057594037927937 is [22:59:2099] sender: [22:89:2057] recipient: [22:14:2061] Leader for TabletID 72057594037927937 is [22:59:2099] sender: [22:90:2057] recipient: [22:88:2117] Leader for TabletID 72057594037927937 is [22:91:2118] sender: [22:92:2057] recipient: [22:88:2117] !Reboot 72057594037927937 (actor [22:59:2099]) rebooted! !Reboot 72057594037927937 (actor [22:59:2099]) tablet resolver refreshed! new actor is[22:91:2118] Leader for TabletID 72057594037927937 is [22:91:2118] sender: [22:177:2057] recipient: [22:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [23:57:2057] recipient: [23:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [23:57:2057] recipient: [23:53:2097] Leader for TabletID 72057594037927937 is [23:59:2099] sender: [23:60:2057] recipient: [23:53:2097] Leader for TabletID 72057594037927937 is [23:59:2099] sender: [23:77:2057] recipient: [23:14:2061] !Reboot 72057594037927937 (actor [23:59:2099]) on event NKikimr::TEvKeyValue::TEvReadRange ! Leader for TabletID 72057594037927937 is [23:59:2099] sender: [23:86:2057] recipient: [23:38:2085] Leader for TabletID 72057594037927937 is [23:59:2099] sender: [23:89:2057] recipient: [23:14:2061] Leader for TabletID 72057594037927937 is [23:59:2099] sender: [23:90:2057] recipient: [23:88:2117] Leader for TabletID 72057594037927937 is [23:91:2118] sender: [23:92:2057] recipient: [23:88:2117] !Reboot 72057594037927937 (actor [23:59:2099]) rebooted! !Reboot 72057594037927937 (actor [23:59:2099]) tablet resolver refreshed! new actor is[23:91:2118] Leader for TabletID 72057594037927937 is [23:91:2118] sender: [23:177:2057] recipient: [23:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [24:57:2057] recipient: [24:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [24:57:2057] recipient: [24:53:2097] Leader for TabletID 72057594037927937 is [24:59:2099] sender: [24:60:2057] recipient: [24:53:2097] Leader for TabletID 72057594037927937 is [24:59:2099] sender: [24:77:2057] recipient: [24:14:2061] !Reboot 72057594037927937 (actor [24:59:2099]) on event NKikimr::TEvKeyValue::TEvNotify ! Leader for TabletID 72057594037927937 is [24:59:2099] sender: [24:87:2057] recipient: [24:38:2085] Leader for TabletID 72057594037927937 is [24:59:2099] sender: [24:90:2057] recipient: [24:14:2061] Leader for TabletID 72057594037927937 is [24:59:2099] sender: [24:91:2057] recipient: [24:89:2117] Leader for TabletID 72057594037927937 is [24:92:2118] sender: [24:93:2057] recipient: [24:89:2117] !Reboot 72057594037927937 (actor [24:59:2099]) rebooted! !Reboot 72057594037927937 (actor [24:59:2099]) tablet resolver refreshed! new actor is[24:92:2118] Leader for TabletID 72057594037927937 is [24:92:2118] sender: [24:110:2057] recipient: [24:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [25:57:2057] recipient: [25:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [25:57:2057] recipient: [25:54:2097] Leader for TabletID 72057594037927937 is [25:59:2099] sender: [25:60:2057] recipient: [25:54:2097] Leader for TabletID 72057594037927937 is [25:59:2099] sender: [25:77:2057] recipient: [25:14:2061] !Reboot 72057594037927937 (actor [25:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [25:59:2099] sender: [25:89:2057] recipient: [25:38:2085] Leader for TabletID 72057594037927937 is [25:59:2099] sender: [25:92:2057] recipient: [25:14:2061] Leader for TabletID 72057594037927937 is [25:59:2099] sender: [25:93:2057] recipient: [25:91:2119] Leader for TabletID 72057594037927937 is [25:94:2120] sender: [25:95:2057] recipient: [25:91:2119] !Reboot 72057594037927937 (actor [25:59:2099]) rebooted! !Reboot 72057594037927937 (actor [25:59:2099]) tablet resolver refreshed! new actor is[25:94:2120] Leader for TabletID 72057594037927937 is [25:94:2120] sender: [25:180:2057] recipient: [25:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [26:57:2057] recipient: [26:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [26:57:2057] recipient: [26:54:2097] Leader for TabletID 72057594037927937 is [26:59:2099] sender: [26:60:2057] recipient: [26:54:2097] Leader for TabletID 72057594037927937 is [26:59:2099] sender: [26:77:2057] recipient: [26:14:2061] !Reboot 72057594037927937 (actor [26:59:2099]) on event NKikimr::TEvKeyValue::TEvExecuteTransaction ! Leader for TabletID 72057594037927937 is [26:59:2099] sender: [26:89:2057] recipient: [26:38:2085] Leader for TabletID 72057594037927937 is [26:59:2099] sender: [26:92:2057] recipient: [26:14:2061] Leader for TabletID 72057594037927937 is [26:59:2099] sender: [26:93:2057] recipient: [26:91:2119] Leader for TabletID 72057594037927937 is [26:94:2120] sender: [26:95:2057] recipient: [26:91:2119] !Reboot 72057594037927937 (actor [26:59:2099]) rebooted! !Reboot 72057594037927937 (actor [26:59:2099]) tablet resolver refreshed! new actor is[26:94:2120] Leader for TabletID 72057594037927937 is [26:94:2120] sender: [26:180:2057] recipient: [26:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [27:57:2057] recipient: [27:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [27:57:2057] recipient: [27:53:2097] Leader for TabletID 72057594037927937 is [27:59:2099] sender: [27:60:2057] recipient: [27:53:2097] Leader for TabletID 72057594037927937 is [27:59:2099] sender: [27:77:2057] recipient: [27:14:2061] !Reboot 72057594037927937 (actor [27:59:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [27:59:2099] sender: [27:90:2057] recipient: [27:38:2085] Leader for TabletID 72057594037927937 is [27:59:2099] sender: [27:92:2057] recipient: [27:14:2061] Leader for TabletID 72057594037927937 is [27:59:2099] sender: [27:94:2057] recipient: [27:93:2119] Leader for TabletID 72057594037927937 is [27:95:2120] sender: [27:96:2057] recipient: [27:93:2119] !Reboot 72057594037927937 (actor [27:59:2099]) rebooted! !Reboot 72057594037927937 (actor [27:59:2099]) tablet resolver refreshed! new actor is[27:95:2120] Leader for TabletID 72057594037927937 is [27:95:2120] sender: [27:181:2057] recipient: [27:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [28:57:2057] recipient: [28:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [28:57:2057] recipient: [28:53:2097] Leader for TabletID 72057594037927937 is [28:59:2099] sender: [28:60:2057] recipient: [28:53:2097] Leader for TabletID 72057594037927937 is [28:59:2099] sender: [28:77:2057] recipient: [28:14:2061] !Reboot 72057594037927937 (actor [28:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [28:59:2099] sender: [28:93:2057] recipient: [28:38:2085] Leader for TabletID 72057594037927937 is [28:59:2099] sender: [28:96:2057] recipient: [28:14:2061] Leader for TabletID 72057594037927937 is [28:59:2099] sender: [28:97:2057] recipient: [28:95:2122] Leader for TabletID 72057594037927937 is [28:98:2123] sender: [28:99:2057] recipient: [28:95:2122] !Reboot 72057594037927937 (actor [28:59:2099]) rebooted! !Reboot 72057594037927937 (actor [28:59:2099]) tablet resolver refreshed! new actor is[28:98:2123] Leader for TabletID 72057594037927937 is [28:98:2123] sender: [28:184:2057] recipient: [28:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [29:57:2057] recipient: [29:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [29:57:2057] recipient: [29:54:2097] Leader for TabletID 72057594037927937 is [29:59:2099] sender: [29:60:2057] recipient: [29:54:2097] Leader for TabletID 72057594037927937 is [29:59:2099] sender: [29:77:2057] recipient: [29:14:2061] !Reboot 72057594037927937 (actor [29:59:2099]) on event NKikimr::TEvKeyValue::TEvRead ! Leader for TabletID 72057594037927937 is [29:59:2099] sender: [29:93:2057] recipient: [29:38:2085] Leader for TabletID 72057594037927937 is [29:59:2099] sender: [29:96:2057] recipient: [29:14:2061] Leader for TabletID 72057594037927937 is [29:59:2099] sender: [29:97:2057] recipient: [29:95:2122] Leader for TabletID 72057594037927937 is [29:98:2123] sender: [29:99:2057] recipient: [29:95:2122] !Reboot 72057594037927937 (actor [29:59:2099]) rebooted! !Reboot 72057594037927937 (actor [29:59:2099]) tablet resolver refreshed! new actor is[29:98:2123] Leader for TabletID 72057594037927937 is [29:98:2123] sender: [29:184:2057] recipient: [29:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [30:57:2057] recipient: [30:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [30:57:2057] recipient: [30:52:2097] Leader for TabletID 72057594037927937 is [30:59:2099] sender: [30:60:2057] recipient: [30:52:2097] Leader for TabletID 72057594037927937 is [30:59:2099] sender: [30:77:2057] recipient: [30:14:2061] >> SystemView::StoragePoolsRanges >> SystemView::VSlotsFields >> SystemView::AuthGroups_Access >> ShowCreateView::WithTablePathPrefix >> SystemView::PartitionStatsOneSchemeShard >> KqpPg::PgUpdateCompoundKey-useSink [GOOD] >> PgCatalog::CheckSetConfig [GOOD] >> PgCatalog::PgDatabase+useSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/pg/unittest >> KqpPg::PgUpdateCompoundKey-useSink [GOOD] Test command err: Trying to start YDB, gRPC: 18438, MsgBus: 61686 2025-06-25T14:36:51.646488Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519895937355923040:2113];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:36:51.676961Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000ae0/r3tmp/tmpNRoD7h/pdisk_1.dat 2025-06-25T14:36:51.996185Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 18438, node 1 2025-06-25T14:36:52.007809Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:36:52.007901Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:36:52.045291Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:36:52.184937Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:36:52.184966Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:36:52.184973Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:36:52.185355Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:61686 TClient is connected to server localhost:61686 2025-06-25T14:36:52.676521Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:36:52.853416Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 16 2025-06-25T14:36:54.866876Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:36:55.012926Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill --!syntax_pg INSERT INTO Pg1000_b (key, value) VALUES ( '0'::int2, ARRAY ['false'::bool, 'false'::bool] ); 2025-06-25T14:36:55.062941Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519895954535792866:2302], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:36:55.063020Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:36:55.063091Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519895954535792878:2305], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:36:55.067502Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715660:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:36:55.083335Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519895954535792880:2306], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715660 completed, doublechecking } 2025-06-25T14:36:55.187091Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519895954535792931:2397] txid# 281474976715661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } --!syntax_pg INSERT INTO Pg1000_b (key, value) VALUES ( '1'::int2, ARRAY ['true'::bool, 'true'::bool] ); 18 2025-06-25T14:36:55.963165Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:36:56.032449Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill --!syntax_pg INSERT INTO Pg1002_b (key, value) VALUES ( '0'::int2, ARRAY ['0'::"char", '0'::"char"] ); --!syntax_pg INSERT INTO Pg1002_b (key, value) VALUES ( '1'::int2, ARRAY ['1'::"char", '1'::"char"] ); --!syntax_pg INSERT INTO Pg1002_b (key, value) VALUES ( '2'::int2, ARRAY ['2'::"char", '2'::"char"] ); 21 2025-06-25T14:36:56.647655Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519895937355923040:2113];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:36:56.648021Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:36:56.677160Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) --!syntax_pg INSERT INTO Pg1005_b (key, value) VALUES ( '0'::int2, ARRAY ['0'::int2, '0'::int2] ); --!syntax_pg INSERT INTO Pg1005_b (key, value) VALUES ( '1'::int2, ARRAY ['1'::int2, '1'::int2] ); --!syntax_pg INSERT INTO Pg1005_b (key, value) VALUES ( '2'::int2, ARRAY ['2'::int2, '2'::int2] ); 23 2025-06-25T14:36:57.246998Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:36:57.304353Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill --!syntax_pg INSERT INTO Pg1007_b (key, value) VALUES ( '0'::int2, ARRAY ['0'::int4, '0'::int4] ); --!syntax_pg INSERT INTO Pg1007_b (key, value) VALUES ( '1'::int2, ARRAY ['1'::int4, '1'::int4] ); --!syntax_pg INSERT INTO Pg1007_b (key, value) VALUES ( '2'::int2, ARRAY ['2'::int4, '2'::int4] ); 20 2025-06-25T14:36:57.836390Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715682:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:36:57.898544Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill --!syntax_pg INSERT INTO Pg1016_b (key, value) VALUES ( '0'::int2, ARRAY ['0'::int8, '0'::int8] ); --!syntax_pg INSERT INTO Pg1016_b (key, value) VALUES ( '1'::int2, ARRAY ['1'::int8, '1'::int8] ); --!syntax_pg INSERT INTO Pg1016_b (key, value) VALUES ( '2'::int2, ARRAY ['2'::int8, '2'::int8] ); 700 2025-06-25T14:36:58.413096Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715688:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:36:58.459105Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill --!syntax_pg INSERT INTO Pg1021_b (key, value) VALUES ( '0'::int2, ARRAY ['0.5'::float4, '0.5'::float4] ); --!syntax_pg INSERT INTO Pg1021_b (key, value) VALUES ( '1'::int2, ARRAY ['1.5'::float4, '1.5'::float4] ); --!syntax_pg INSERT INTO Pg1021_b (key, value) VALUES ( '2'::int2, ARRAY ['2.5'::float4, '2.5'::float4] ); 701 2025-06-25T14:36:58.940109Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715694:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:36:59.000665Z node 1 :READ_TABLE_API WARN: rpc_read_table.cp ... ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:40:33.806766Z node 9 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [9:7519896892749044509:2303], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:40:33.806842Z node 9 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:40:33.806963Z node 9 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [9:7519896892749044514:2306], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:40:33.811342Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:40:33.844654Z node 9 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [9:7519896892749044516:2307], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-25T14:40:33.899064Z node 9 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [9:7519896892749044567:2389] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:40:34.066628Z node 9 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [9:7519896897044011908:2319], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:1:1: Error: At function: KiWriteTable!
:1:1: Error: Cannot update primary key column: key1
:1:1: Error: Cannot update primary key column: key2 2025-06-25T14:40:34.066806Z node 9 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=9&id=ODg2YmU2ZDctNjQ3NDcyOGMtY2Y5NGYyMmQtYTE5ZTY2ZTE=, ActorId: [9:7519896897044011901:2315], ActorState: ExecuteState, TraceId: 01jykrh3qs9104w619teyve6x2, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-06-25T14:40:34.100401Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... Trying to start YDB, gRPC: 29643, MsgBus: 30416 2025-06-25T14:40:35.601826Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7519896902142291787:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:40:35.601933Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000ae0/r3tmp/tmpUZBoJ3/pdisk_1.dat 2025-06-25T14:40:35.718266Z node 10 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:40:35.726998Z node 10 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [10:7519896902142291768:2080] 1750862435601415 != 1750862435601418 TServer::EnableGrpc on GrpcPort 29643, node 10 2025-06-25T14:40:35.739799Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:40:35.739888Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:40:35.741699Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:40:35.771778Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:40:35.771798Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:40:35.771807Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:40:35.771940Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:30416 TClient is connected to server localhost:30416 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:40:36.394163Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:40:36.607885Z node 10 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:40:39.432306Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7519896919322161585:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:40:39.432503Z node 10 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:40:39.441450Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:40:39.553972Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7519896919322161690:2303], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:40:39.554044Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7519896919322161695:2306], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:40:39.554049Z node 10 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:40:39.557660Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:40:39.566793Z node 10 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [10:7519896919322161697:2307], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-25T14:40:39.648622Z node 10 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [10:7519896919322161750:2392] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:40:40.050312Z node 10 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [10:7519896923617129111:2327], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:1:1: Error: At function: KiWriteTable!
:1:1: Error: Cannot update primary key column: key1
:1:1: Error: Cannot update primary key column: key2 2025-06-25T14:40:40.050602Z node 10 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=10&id=YTEzYTE4NzQtZTIxOWI1NDMtM2RmYjViNzQtODhmMWQ0Y2Y=, ActorId: [10:7519896923617129104:2323], ActorState: ExecuteState, TraceId: 01jykrh9jke4gcazjsen99f96t, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-06-25T14:40:40.056065Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:40:40.601988Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[10:7519896902142291787:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:40:40.602063Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> TKeyValueTest::TestWriteReadRangeDataLimitThenLimitWorksNewApi [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/keyvalue/ut/unittest >> TKeyValueTest::TestWriteReadRangeDataLimitThenLimitWorksNewApi [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:57:2057] recipient: [1:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:57:2057] recipient: [1:52:2097] Leader for TabletID 72057594037927937 is [1:59:2099] sender: [1:60:2057] recipient: [1:52:2097] Leader for TabletID 72057594037927937 is [1:59:2099] sender: [1:77:2057] recipient: [1:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:57:2057] recipient: [2:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:57:2057] recipient: [2:53:2097] Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:60:2057] recipient: [2:53:2097] Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:77:2057] recipient: [2:14:2061] !Reboot 72057594037927937 (actor [2:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:79:2057] recipient: [2:38:2085] Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:81:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:83:2057] recipient: [2:82:2112] Leader for TabletID 72057594037927937 is [2:84:2113] sender: [2:85:2057] recipient: [2:82:2112] !Reboot 72057594037927937 (actor [2:59:2099]) rebooted! !Reboot 72057594037927937 (actor [2:59:2099]) tablet resolver refreshed! new actor is[2:84:2113] Leader for TabletID 72057594037927937 is [2:84:2113] sender: [2:170:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:57:2057] recipient: [3:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:57:2057] recipient: [3:52:2097] Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:60:2057] recipient: [3:52:2097] Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:77:2057] recipient: [3:14:2061] !Reboot 72057594037927937 (actor [3:59:2099]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:79:2057] recipient: [3:38:2085] Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:82:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:83:2057] recipient: [3:81:2112] Leader for TabletID 72057594037927937 is [3:84:2113] sender: [3:85:2057] recipient: [3:81:2112] !Reboot 72057594037927937 (actor [3:59:2099]) rebooted! !Reboot 72057594037927937 (actor [3:59:2099]) tablet resolver refreshed! new actor is[3:84:2113] Leader for TabletID 72057594037927937 is [3:84:2113] sender: [3:170:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:57:2057] recipient: [4:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:57:2057] recipient: [4:53:2097] Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:60:2057] recipient: [4:53:2097] Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:77:2057] recipient: [4:14:2061] !Reboot 72057594037927937 (actor [4:59:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:80:2057] recipient: [4:38:2085] Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:83:2057] recipient: [4:82:2112] Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:84:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [4:85:2113] sender: [4:86:2057] recipient: [4:82:2112] !Reboot 72057594037927937 (actor [4:59:2099]) rebooted! !Reboot 72057594037927937 (actor [4:59:2099]) tablet resolver refreshed! new actor is[4:85:2113] Leader for TabletID 72057594037927937 is [4:85:2113] sender: [4:171:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:57:2057] recipient: [5:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:57:2057] recipient: [5:53:2097] Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:60:2057] recipient: [5:53:2097] Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:77:2057] recipient: [5:14:2061] !Reboot 72057594037927937 (actor [5:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:83:2057] recipient: [5:38:2085] Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:85:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:87:2057] recipient: [5:86:2115] Leader for TabletID 72057594037927937 is [5:88:2116] sender: [5:89:2057] recipient: [5:86:2115] !Reboot 72057594037927937 (actor [5:59:2099]) rebooted! !Reboot 72057594037927937 (actor [5:59:2099]) tablet resolver refreshed! new actor is[5:88:2116] Leader for TabletID 72057594037927937 is [5:88:2116] sender: [5:174:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:57:2057] recipient: [6:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:57:2057] recipient: [6:52:2097] Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:60:2057] recipient: [6:52:2097] Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:77:2057] recipient: [6:14:2061] !Reboot 72057594037927937 (actor [6:59:2099]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:83:2057] recipient: [6:38:2085] Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:86:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:87:2057] recipient: [6:85:2115] Leader for TabletID 72057594037927937 is [6:88:2116] sender: [6:89:2057] recipient: [6:85:2115] !Reboot 72057594037927937 (actor [6:59:2099]) rebooted! !Reboot 72057594037927937 (actor [6:59:2099]) tablet resolver refreshed! new actor is[6:88:2116] Leader for TabletID 72057594037927937 is [6:88:2116] sender: [6:174:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:57:2057] recipient: [7:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:57:2057] recipient: [7:53:2097] Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:60:2057] recipient: [7:53:2097] Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:77:2057] recipient: [7:14:2061] !Reboot 72057594037927937 (actor [7:59:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:84:2057] recipient: [7:38:2085] Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:86:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:88:2057] recipient: [7:87:2115] Leader for TabletID 72057594037927937 is [7:89:2116] sender: [7:90:2057] recipient: [7:87:2115] !Reboot 72057594037927937 (actor [7:59:2099]) rebooted! !Reboot 72057594037927937 (actor [7:59:2099]) tablet resolver refreshed! new actor is[7:89:2116] Leader for TabletID 72057594037927937 is [7:89:2116] sender: [7:175:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:57:2057] recipient: [8:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:57:2057] recipient: [8:54:2097] Leader for TabletID 72057594037927937 is [8:59:2099] sender: [8:60:2057] recipient: [8:54:2097] Leader for TabletID 72057594037927937 is [8:59:2099] sender: [8:77:2057] recipient: [8:14:2061] !Reboot 72057594037927937 (actor [8:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [8:59:2099] sender: [8:86:2057] recipient: [8:38:2085] Leader for TabletID 72057594037927937 is [8:59:2099] sender: [8:89:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [8:59:2099] sender: [8:90:2057] recipient: [8:88:2117] Leader for TabletID 72057594037927937 is [8:91:2118] sender: [8:92:2057] recipient: [8:88:2117] !Reboot 72057594037927937 (actor [8:59:2099]) rebooted! !Reboot 72057594037927937 (actor [8:59:2099]) tablet resolver refreshed! new actor is[8:91:2118] Leader for TabletID 72057594037927937 is [8:91:2118] sender: [8:177:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:57:2057] recipient: [9:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:57:2057] recipient: [9:53:2097] Leader for TabletID 72057594037927937 is [9:59:2099] sender: [9:60:2057] recipient: [9:53:2097] Leader for TabletID 72057594037927937 is [9:59:2099] sender: [9:77:2057] recipient: [9:14:2061] !Reboot 72057594037927937 (actor [9:59:2099]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [9:59:2099] sender: [9:86:2057] recipient: [9:38:2085] Leader for TabletID 72057594037927937 is [9:59:2099] sender: [9:89:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [9:59:2099] sender: [9:90:2057] recipient: [9:88:2117] Leader for TabletID 72057594037927937 is [9:91:2118] sender: [9:92:2057] recipient: [9:88:2117] !Reboot 72057594037927937 (actor [9:59:2099]) rebooted! !Reboot 72057594037927937 (actor [9:59:2099]) tablet resolver refreshed! new actor is[9:91:2118] Leader for TabletID 72057594037927937 is [9:91:2118] sender: [9:177:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:57:2057] recipient: [10:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:57:2057] recipient: [10:54:2097] Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:60:2057] recipient: [10:54:2097] Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:77:2057] recipient: [10:14:2061] !Reboot 72057594037927937 (actor [10:59:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:87:2057] recipient: [10:38:2085] Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:90:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:91:2057] recipient: [10:89:2117] Leader for TabletID 72057594037927937 is [10:92:2118] sender: [10:93:2057] recipient: [10:89:2117] !Reboot 72057594037927937 (actor [10:59:2099]) rebooted! !Reboot 72057594037927937 (actor [10:59:2099]) tablet resolver refreshed! new actor is[10:92:2118] Leader for TabletID 72057594037927937 is [10:92:2118] sender: [10:178:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:57:2057] recipient: [11:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:57:2057] recipient: [11:53:2097] Leader for TabletID 72057594037927937 is [11:59:2099] sender: [11:60:2057] recipient: [11:53:2097] Leader for TabletID 72057594037927937 is [11:59:2099] sender: [11:77:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:57:2057] recipient: [12:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:57:2057] recipient: [12:54:2097] Leader for TabletID 72057594037927937 is [12:59:2099] sender: [12:60:2057] recipient: [12:54:2097] Leader for TabletID 72057594037927937 is [12:59:2099] sender: [12:77:2057] recipient: [12:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [13:57:2057] recipient: [13:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [13:57:2057] recipient: [13:52:2097] Leader for TabletID 72057594037927937 is [13:59:2099] sender: [13:60:2057] recipient: [13:52:2097] Leader for TabletID 72057594037927937 is [13:59:2099] sender: [13:77:2057] recipient: [13:14:2061] !Reboot 72057594037927937 (actor [13:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [13:59:2099] sender: [13:79:2057] recipient: [13:38:2085] Leader for TabletID 72057594037927937 is [13:59:2099] sender: [13:82:2057] recipient: [13:14:2061] Leader for TabletID 72057594037927937 is [13:59:2099] sender: [13:83:2057] recipient: [13:81:2112] Leader for TabletID 72057594037927937 is [13:84:2113] sender: [13:85:2057] recipient: [13:81:2112] !Reboot 72057594037927937 (actor [13:59:2099]) rebooted! !Reboot 72057594037927937 (actor [13:59:2099]) tablet resolver refreshed! new actor is[13:84:2113] Leader for TabletID 72057594037927937 is [13:84:2113] sender: [13:170:2057] recipient: [13:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [14:57:2057] recipient: [14:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [14:57:2057] recipient: [14:54:2097] Leader for TabletID 72057594037927937 is [14:59:2099] sender: [14:60:2057] recipient: [14:54:2097] Leader for TabletID 72057594037927937 is [14:59:2099] sender: [14:77:2057] recipient: [14:14:2061] !Reboot 72057594037927937 (actor [14:59:2099]) on event NKikimr::TEvKeyValue::TEvExecuteTransaction ! Leader for TabletID 72057594037927937 is [14:59:2099] sender: [14:79:2057] recipient: [14:38:2085] Leader for TabletID 72057594037927937 is [14:59:2099] sender: [14:81:2057] recipient: [14:14:2061] Leader for TabletID 72057594037927937 is [14:59:2099] sender: [14:83:2057] recipient: [14:82:2112] Leader for TabletID 72057594037927937 is [14:84:2113] sender: [14:85:2057] recipient: [14:82:2112] !Reboot 72057594037927937 (actor [14:59:2099]) rebooted! !Reboot 72057594037927937 (actor [14:59:2099]) tablet resolver refreshed! new actor is[14:84:2113] Leader for TabletID 72057594037927937 is [14:84:2113] sender: [14:170:2057] recipient: [14:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [15:57:2057] recipient: [15:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [15:57:2057] recipient: [15:53:2097] Leader for TabletID 72057594037927937 is [15:59:2099] sender: [15:60:2057] recipient: [15:53:2097] Leader for TabletID 72057594037927937 is [15:59:2099] sender: [15:77:2057] recipient: [15:14:2061] !Reboot 72057594037927937 (actor [15:59:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [15:59:2099] sender: [15:80:2057] recipient: [15:38:2085] Leader for TabletID 72057594037927937 is [15:59:2099] sender: [15:83:2057] recipient: [15:14:2061] Leader for TabletID 72057594037927937 is [15:59:2099] sender: [15:84:2057] recipient: [15:82:2112] Leader for TabletID 72057594037927937 is [15:85:2113] sender: [15:86:2057] recipient: [15:82:2112] !Reboot 72057594037927937 (actor [15:59:2099]) rebooted! !Reboot 72057594037927937 (actor [15:59:2099]) tablet resolver refreshed! new actor is[15:85:2113] Leader for TabletID 72057594037927937 is [15:85:2113] sender: [15:171:2057] recipient: [15:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [16:57:2057] recipient: [16:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [16:57:2057] recipient: [16:53:2097] Leader for TabletID 72057594037927937 is [16:59:2099] sender: [16:60:2057] recipient: [16:53:2097] Leader for TabletID 72057594037927937 is [16:59:2099] sender: [16:77:2057] recipient: [16:14:2061] !Reboot 72057594037927937 (actor [16:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [16:59:2099] sender: [16:83:2057] recipient: [16:38:2085] Leader for TabletID 72057594037927937 is [16:59:2099] sender: [16:86:2057] recipient: [16:14:2061] Leader for TabletID 72057594037927937 is [16:59:2099] sender: [16:87:2057] recipient: [16:85:2115] Leader for TabletID 72057594037927937 is [16:88:2116] sender: [16:89:2057] recipient: [16:85:2115] !Reboot 72057594037927937 (actor [16:59:2099]) rebooted! !Reboot 72057594037927937 (actor [16:59:2099]) tablet resolver refreshed! new actor is[16:88:2116] Leader for TabletID 72057594037927937 is [16:88:2116] sender: [16:174:2057] recipient: [16:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [17:57:2057] recipient: [17:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [17:57:2057] recipient: [17:52:2097] Leader for TabletID 72057594037927937 is [17:59:2099] sender: [17:60:2057] recipient: [17:52:2097] Leader for TabletID 72057594037927937 is [17:59:2099] sender: [17:77:2057] recipient: [17:14:2061] !Reboot 72057594037927937 (actor [17:59:2099]) on event NKikimr::TEvKeyValue::TEvReadRange ! Leader for TabletID 72057594037927937 is [17:59:2099] sender: [17:83:2057] recipient: [17:38:2085] Leader for TabletID 72057594037927937 is [17:59:2099] sender: [17:86:2057] recipient: [17:14:2061] Leader for TabletID 72057594037927937 is [17:59:2099] sender: [17:87:2057] recipient: [17:85:2115] Leader for TabletID 72057594037927937 is [17:88:2116] sender: [17:89:2057] recipient: [17:85:2115] !Reboot 72057594037927937 (actor [17:59:2099]) rebooted! !Reboot 72057594037927937 (actor [17:59:2099]) tablet resolver refreshed! new actor is[17:88:2116] Leader for TabletID 72057594037927937 is [17:88:2116] sender: [17:174:2057] recipient: [17:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [18:57:2057] recipient: [18:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [18:57:2057] recipient: [18:53:2097] Leader for TabletID 72057594037927937 is [18:59:2099] sender: [18:60:2057] recipient: [18:53:2097] Leader for TabletID 72057594037927937 is [18:59:2099] sender: [18:77:2057] recipient: [18:14:2061] !Reboot 72057594037927937 (actor [18:59:2099]) on event NKikimr::TEvKeyValue::TEvNotify ! Leader for TabletID 72057594037927937 is [18:59:2099] sender: [18:84:2057] recipient: [18:38:2085] Leader for TabletID 72057594037927937 is [18:59:2099] sender: [18:87:2057] recipient: [18:14:2061] Leader for TabletID 72057594037927937 is [18:59:2099] sender: [18:88:2057] recipient: [18:86:2115] Leader for TabletID 72057594037927937 is [18:89:2116] sender: [18:90:2057] recipient: [18:86:2115] !Reboot 72057594037927937 (actor [18:59:2099]) rebooted! !Reboot 72057594037927937 (actor [18:59:2099]) tablet resolver refreshed! new actor is[18:89:2116] Leader for TabletID 72057594037927937 is [18:89:2116] sender: [18:107:2057] recipient: [18:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [19:57:2057] recipient: [19:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [19:57:2057] recipient: [19:53:2097] Leader for TabletID 72057594037927937 is [19:59:2099] sender: [19:60:2057] recipient: [19:53:2097] Leader for TabletID 72057594037927937 is [19:59:2099] sender: [19:77:2057] recipient: [19:14:2061] !Reboot 72057594037927937 (actor [19:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [19:59:2099] sender: [19:86:2057] recipient: [19:38:2085] Leader for TabletID 72057594037927937 is [19:59:2099] sender: [19:89:2057] recipient: [19:14:2061] Leader for TabletID 72057594037927937 is [19:59:2099] sender: [19:90:2057] recipient: [19:88:2117] Leader for TabletID 72057594037927937 is [19:91:2118] sender: [19:92:2057] recipient: [19:88:2117] !Reboot 72057594037927937 (actor [19:59:2099]) rebooted! !Reboot 72057594037927937 (actor [19:59:2099]) tablet resolver refreshed! new actor is[19:91:2118] Leader for TabletID 72057594037927937 is [19:91:2118] sender: [19:177:2057] recipient: [19:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [20:57:2057] recipient: [20:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [20:57:2057] recipient: [20:54:2097] Leader for TabletID 72057594037927937 is [20:59:2099] sender: [20:60:2057] recipient: [20:54:2097] Leader for TabletID 72057594037927937 is [20:59:2099] sender: [20:77:2057] recipient: [20:14:2061] !Reboot 72057594037927937 (actor [20:59:2099]) on event NKikimr::TEvKeyValue::TEvReadRange ! Leader for TabletID 72057594037927937 is [20:59:2099] sender: [20:86:2057] recipient: [20:38:2085] Leader for TabletID 72057594037927937 is [20:59:2099] sender: [20:89:2057] recipient: [20:14:2061] Leader for TabletID 72057594037927937 is [20:59:2099] sender: [20:90:2057] recipient: [20:88:2117] Leader for TabletID 72057594037927937 is [20:91:2118] sender: [20:92:2057] recipient: [20:88:2117] !Reboot 72057594037927937 (actor [20:59:2099]) rebooted! !Reboot 72057594037927937 (actor [20:59:2099]) tablet resolver refreshed! new actor is[20:91:2118] Leader for TabletID 72057594037927937 is [20:91:2118] sender: [20:177:2057] recipient: [20:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [21:57:2057] recipient: [21:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [21:57:2057] recipient: [21:52:2097] Leader for TabletID 72057594037927937 is [21:59:2099] sender: [21:60:2057] recipient: [21:52:2097] Leader for TabletID 72057594037927937 is [21:59:2099] sender: [21:77:2057] recipient: [21:14:2061] !Reboot 72057594037927937 (actor [21:59:2099]) on event NKikimr::TEvKeyValue::TEvNotify ! Leader for TabletID 72057594037927937 is [21:59:2099] sender: [21:87:2057] recipient: [21:38:2085] Leader for TabletID 72057594037927937 is [21:59:2099] sender: [21:90:2057] recipient: [21:14:2061] Leader for TabletID 72057594037927937 is [21:59:2099] sender: [21:91:2057] recipient: [21:89:2117] Leader for TabletID 72057594037927937 is [21:92:2118] sender: [21:93:2057] recipient: [21:89:2117] !Reboot 72057594037927937 (actor [21:59:2099]) rebooted! !Reboot 72057594037927937 (actor [21:59:2099]) tablet resolver refreshed! new actor is[21:92:2118] Leader for TabletID 72057594037927937 is [0:0:0] sender: [22:57:2057] recipient: [22:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [22:57:2057] recipient: [22:53:2097] Leader for TabletID 72057594037927937 is [22:59:2099] sender: [22:60:2057] recipient: [22:53:2097] Leader for TabletID 72057594037927937 is [22:59:2099] sender: [22:77:2057] recipient: [22:14:2061] >> SystemView::ShowCreateTablePartitionAtKeys >> SystemView::PartitionStatsOneSchemeShard [GOOD] >> SystemView::PartitionStatsOneSchemeShardDataQuery >> TKeyValueTest::TestConcatWorksNewApi [GOOD] >> TKeyValueTest::TestConcatToLongKey ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/runtime/unittest >> KqpScanLogs::GraceJoin-EnabledLogs 2025-06-25 14:40:27,643 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper execution timed out 2025-06-25 14:40:27,759 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper has overrun 600 secs timeout. Process tree before termination: pid rss ref pdirt 130271 48.7M 48.6M 25.2M test_tool run_ut @/home/runner/.ya/build/build_root/yft8/0015af/ydb/core/kqp/ut/runtime/test-results/unittest/testing_out_stuff/chunk1/testing_out_stuff/test_tool.args 130751 1.8G 1.8G 1.7G └─ ydb-core-kqp-ut-runtime --trace-path-append /home/runner/.ya/build/build_root/yft8/0015af/ydb/core/kqp/ut/runtime/test-results/unittest/testing_out_stuff/chunk1/ytest.re Test command err: cwd: /home/runner/.ya/build/build_root/yft8/0015af/ydb/core/kqp/ut/runtime/test-results/unittest/testing_out_stuff/chunk1 Trying to start YDB, gRPC: 17939, MsgBus: 16457 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0015af/r3tmp/tmp0VmPaM/pdisk_1.dat TServer::EnableGrpc on GrpcPort 17939, node 1 TClient is connected to server localhost:16457 TClient is connected to server localhost:16457 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... waiting... waiting... waiting... waiting... ( (let $1 (KqpTable '"/Root/KeyValue" '"72057594046644480:6" '"" '1)) (let $2 (KqpRowsSourceSettings $1 '('"Key" '"Value") '() (Void) '())) (let $3 (OptionalType (DataType 'Uint64))) (let $4 (OptionalType (DataType 'String))) (let $5 '('('"_logical_id" '778) '('"_id" '"6da9f94b-25172ef1-c2d3dcaf-1e15e662") '('"_wide_channels" (StructType '('"Key" $3) '('"Value" $4))))) (let $6 (DqPhyStage '((DqSource (DataSource '"KqpReadRangesSource") $2)) (lambda '($17) (block '( (let $18 (lambda '($19) (Member $19 '"Key") (Member $19 '"Value"))) (return (FromFlow (ExpandMap (ToFlow $17) $18))) ))) $5)) (let $7 '('1)) (let $8 (DqCnHashShuffle (TDqOutput $6 '0) $7 '1 '"HashV1")) (let $9 (StructType '('"t1.Key" $3) '('"t1.Value" $4) '('"t2.Key" $3) '('"t2.Value" $4))) (let $10 '('('"_logical_id" '676) '('"_id" '"ac9af41a-d39718b9-fcd79489-833f888c") '('"_wide_channels" $9))) (let $11 (DqPhyStage '($8) (lambda '($20) (block '( (let $21 '('0 '0 '1 '1)) (let $22 '('0 '2 '1 '3)) (let $23 (GraceSelfJoinCore (ToFlow $20) 'Full $7 $7 $21 $22 '('"t1.Value") '('"t2.Value") '())) (return (FromFlow (WideSort $23 '('('1 (Bool 'true)))))) ))) $10)) (let $12 (DqCnMerge (TDqOutput $11 '0) '('('1 '"Asc")))) (let $13 (DqPhyStage '($12) (lambda '($24) (FromFlow (NarrowMap (ToFlow $24) (lambda '($25 $26 $27 $28) (AsStruct '('"t1.Key" $25) '('"t1.Value" $26) '('"t2.Key" $27) '('"t2.Value" $28)))))) '('('"_logical_id" '688) '('"_id" '"f5bb2ea1-f17294f9-c79f785a-9084d84f")))) (let $14 '($6 $11 $13)) (let $15 '('"t1.Key" '"t1.Value" '"t2.Key" '"t2.Value")) (let $16 (DqCnResult (TDqOutput $13 '0) $15)) (return (KqpPhysicalQuery '((KqpPhysicalTx $14 '($16) '() '('('"type" '"generic")))) '((KqpTxResultBinding (ListType $9) '0 '0)) '('('"type" '"query")))) ) cwd: /home/runner/.ya/build/build_root/yft8/0015af/ydb/core/kqp/ut/runtime/test-results/unittest/testing_out_stuff/chunk1 Trying to start YDB, gRPC: 13373, MsgBus: 15686 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0015af/r3tmp/tmpgIopZ9/pdisk_1.dat TServer::EnableGrpc on GrpcPort 13373, node 2 TClient is connected to server localhost:15686 TClient is connected to server localhost:15686 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... waiting... waiting... waiting... waiting... Traceback (most recent call last): File "library/python/testing/yatest_common/yatest/common/process.py", line 384, in wait wait_for( File "library/python/testing/yatest_common/yatest/common/process.py", line 765, in wait_for raise TimeoutError(truncate(message, MAX_MESSAGE_LEN)) yatest.common.process.TimeoutError: 600 second(s) wait timeout has expired: Command '['/home/runner/.ya/tools/v4/9029509511/test_tool', 'run_ut', '@/home/runner/.ya/build/build_root/yft8/0015af/ydb/core/kqp/ut/runtime/test-results/unittest/testing_out_stuff/chunk1/testing_out_stuff/test_tool.args']' stopped by 600 seconds timeout During handling of the above exception, another exception occurred: Traceback (most recent call last): File "devtools/ya/test/programs/test_tool/run_test/run_test.py", line 1738, in main res.wait(check_exit_code=False, timeout=run_timeout, on_timeout=timeout_callback) File "library/python/testing/yatest_common/yatest/common/process.py", line 398, in wait raise ExecutionTimeoutError(self, str(e)) yatest.common.process.ExecutionTimeoutError: (("600 second(s) wait timeout has expired: Command '['/home/runner/.ya/tools/v4/9029509511/test_tool', 'run_ut', '@/home/runner/.ya/build/build_root/yft8/0015af/ydb/core/kqp/ut/runtime/test-results/unittest/testing_out_stuff/chunk1/testing_out_stuff/test_tool.args']' stopped by 600 seconds timeout",), {}) >> LdapAuthProviderTest_nonSecure::LdapFetchGroupsWithDontExistGroupAttribute >> LdapAuthProviderTest_nonSecure::LdapFetchGroupsDisableRequestToAD >> LdapAuthProviderTest_nonSecure::LdapRefreshRemoveUserBad >> LdapAuthProviderTest_StartTls::LdapFetchGroupsWithDefaultGroupAttributeGood >> LdapAuthProviderTest_LdapsScheme::LdapFetchGroupsFromAdLdapServer >> TLdapUtilsSearchFilterCreatorTest::GetFilterWithoutLoginPlaceholders [GOOD] >> TLdapUtilsUrisCreatorTest::CreateUrisFromHostnames [GOOD] >> TLdapUtilsUrisCreatorTest::CreateUrisFromIpV4List [GOOD] >> TLdapUtilsUrisCreatorTest::CreateUrisFromIpV6List [GOOD] >> TLdapUtilsUrisCreatorTest::CreateUrisFromHostnamesLdapsScheme [GOOD] >> TLdapUtilsUrisCreatorTest::CreateUrisFromHostnamesUnknownScheme [GOOD] >> LdapAuthProviderTest_LdapsScheme::LdapRefreshRemoveUserBad >> SystemView::VSlotsFields [GOOD] >> SystemView::TopPartitionsByCpuTables >> SystemView::StoragePoolsRanges [GOOD] >> SystemView::TopPartitionsByCpuFields |83.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/security/ldap_auth_provider/ut/unittest >> TLdapUtilsUrisCreatorTest::CreateUrisFromHostnamesUnknownScheme [GOOD] >> SystemView::AuthGroups_Access [GOOD] >> SystemView::AuthGroups_ResultOrder >> SystemView::Nodes |83.3%| [TA] $(B)/ydb/core/kqp/ut/runtime/test-results/unittest/{meta.json ... results_accumulator.log} >> SystemView::CollectPreparedQueries >> TKeyValueTest::TestInlineWriteReadWithRestartsWithNotCorrectUTF8NewApi [GOOD] >> TAsyncIndexTests::CdcAndSplitWithReboots[TabletReboots] [GOOD] >> TKeyValueTest::TestLargeWriteAndDelete [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/keyvalue/ut/unittest >> TKeyValueTest::TestInlineWriteReadWithRestartsWithNotCorrectUTF8NewApi [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:57:2057] recipient: [1:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:57:2057] recipient: [1:52:2097] Leader for TabletID 72057594037927937 is [1:59:2099] sender: [1:60:2057] recipient: [1:52:2097] Leader for TabletID 72057594037927937 is [1:59:2099] sender: [1:77:2057] recipient: [1:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:57:2057] recipient: [2:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:57:2057] recipient: [2:53:2097] Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:60:2057] recipient: [2:53:2097] Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:77:2057] recipient: [2:14:2061] !Reboot 72057594037927937 (actor [2:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:79:2057] recipient: [2:38:2085] Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:81:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:83:2057] recipient: [2:82:2112] Leader for TabletID 72057594037927937 is [2:84:2113] sender: [2:85:2057] recipient: [2:82:2112] !Reboot 72057594037927937 (actor [2:59:2099]) rebooted! !Reboot 72057594037927937 (actor [2:59:2099]) tablet resolver refreshed! new actor is[2:84:2113] Leader for TabletID 72057594037927937 is [2:84:2113] sender: [2:170:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:57:2057] recipient: [3:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:57:2057] recipient: [3:52:2097] Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:60:2057] recipient: [3:52:2097] Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:77:2057] recipient: [3:14:2061] !Reboot 72057594037927937 (actor [3:59:2099]) on event NKikimr::TEvKeyValue::TEvExecuteTransaction ! Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:79:2057] recipient: [3:38:2085] Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:82:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:83:2057] recipient: [3:81:2112] Leader for TabletID 72057594037927937 is [3:84:2113] sender: [3:85:2057] recipient: [3:81:2112] !Reboot 72057594037927937 (actor [3:59:2099]) rebooted! !Reboot 72057594037927937 (actor [3:59:2099]) tablet resolver refreshed! new actor is[3:84:2113] Leader for TabletID 72057594037927937 is [3:84:2113] sender: [3:170:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:57:2057] recipient: [4:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:57:2057] recipient: [4:53:2097] Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:60:2057] recipient: [4:53:2097] Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:77:2057] recipient: [4:14:2061] !Reboot 72057594037927937 (actor [4:59:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:80:2057] recipient: [4:38:2085] Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:83:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:84:2057] recipient: [4:82:2112] Leader for TabletID 72057594037927937 is [4:85:2113] sender: [4:86:2057] recipient: [4:82:2112] !Reboot 72057594037927937 (actor [4:59:2099]) rebooted! !Reboot 72057594037927937 (actor [4:59:2099]) tablet resolver refreshed! new actor is[4:85:2113] Leader for TabletID 72057594037927937 is [4:85:2113] sender: [4:171:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:57:2057] recipient: [5:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:57:2057] recipient: [5:53:2097] Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:60:2057] recipient: [5:53:2097] Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:77:2057] recipient: [5:14:2061] !Reboot 72057594037927937 (actor [5:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:83:2057] recipient: [5:38:2085] Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:86:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:87:2057] recipient: [5:85:2115] Leader for TabletID 72057594037927937 is [5:88:2116] sender: [5:89:2057] recipient: [5:85:2115] !Reboot 72057594037927937 (actor [5:59:2099]) rebooted! !Reboot 72057594037927937 (actor [5:59:2099]) tablet resolver refreshed! new actor is[5:88:2116] Leader for TabletID 72057594037927937 is [5:88:2116] sender: [5:174:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:57:2057] recipient: [6:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:57:2057] recipient: [6:52:2097] Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:60:2057] recipient: [6:52:2097] Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:77:2057] recipient: [6:14:2061] !Reboot 72057594037927937 (actor [6:59:2099]) on event NKikimr::TEvKeyValue::TEvExecuteTransaction ! Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:83:2057] recipient: [6:38:2085] Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:86:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:87:2057] recipient: [6:85:2115] Leader for TabletID 72057594037927937 is [6:88:2116] sender: [6:89:2057] recipient: [6:85:2115] !Reboot 72057594037927937 (actor [6:59:2099]) rebooted! !Reboot 72057594037927937 (actor [6:59:2099]) tablet resolver refreshed! new actor is[6:88:2116] Leader for TabletID 72057594037927937 is [6:88:2116] sender: [6:174:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:57:2057] recipient: [7:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:57:2057] recipient: [7:53:2097] Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:60:2057] recipient: [7:53:2097] Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:77:2057] recipient: [7:14:2061] !Reboot 72057594037927937 (actor [7:59:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:84:2057] recipient: [7:38:2085] Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:87:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:88:2057] recipient: [7:86:2115] Leader for TabletID 72057594037927937 is [7:89:2116] sender: [7:90:2057] recipient: [7:86:2115] !Reboot 72057594037927937 (actor [7:59:2099]) rebooted! !Reboot 72057594037927937 (actor [7:59:2099]) tablet resolver refreshed! new actor is[7:89:2116] Leader for TabletID 72057594037927937 is [7:89:2116] sender: [7:175:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:57:2057] recipient: [8:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:57:2057] recipient: [8:54:2097] Leader for TabletID 72057594037927937 is [8:59:2099] sender: [8:60:2057] recipient: [8:54:2097] Leader for TabletID 72057594037927937 is [8:59:2099] sender: [8:77:2057] recipient: [8:14:2061] !Reboot 72057594037927937 (actor [8:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [8:59:2099] sender: [8:87:2057] recipient: [8:38:2085] Leader for TabletID 72057594037927937 is [8:59:2099] sender: [8:90:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [8:59:2099] sender: [8:91:2057] recipient: [8:89:2118] Leader for TabletID 72057594037927937 is [8:92:2119] sender: [8:93:2057] recipient: [8:89:2118] !Reboot 72057594037927937 (actor [8:59:2099]) rebooted! !Reboot 72057594037927937 (actor [8:59:2099]) tablet resolver refreshed! new actor is[8:92:2119] Leader for TabletID 72057594037927937 is [8:92:2119] sender: [8:178:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:57:2057] recipient: [9:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:57:2057] recipient: [9:53:2097] Leader for TabletID 72057594037927937 is [9:59:2099] sender: [9:60:2057] recipient: [9:53:2097] Leader for TabletID 72057594037927937 is [9:59:2099] sender: [9:77:2057] recipient: [9:14:2061] !Reboot 72057594037927937 (actor [9:59:2099]) on event NKikimr::TEvKeyValue::TEvExecuteTransaction ! Leader for TabletID 72057594037927937 is [9:59:2099] sender: [9:87:2057] recipient: [9:38:2085] Leader for TabletID 72057594037927937 is [9:59:2099] sender: [9:90:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [9:59:2099] sender: [9:91:2057] recipient: [9:89:2118] Leader for TabletID 72057594037927937 is [9:92:2119] sender: [9:93:2057] recipient: [9:89:2118] !Reboot 72057594037927937 (actor [9:59:2099]) rebooted! !Reboot 72057594037927937 (actor [9:59:2099]) tablet resolver refreshed! new actor is[9:92:2119] Leader for TabletID 72057594037927937 is [9:92:2119] sender: [9:178:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:57:2057] recipient: [10:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:57:2057] recipient: [10:54:2097] Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:60:2057] recipient: [10:54:2097] Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:77:2057] recipient: [10:14:2061] !Reboot 72057594037927937 (actor [10:59:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:88:2057] recipient: [10:38:2085] Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:91:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:92:2057] recipient: [10:90:2118] Leader for TabletID 72057594037927937 is [10:93:2119] sender: [10:94:2057] recipient: [10:90:2118] !Reboot 72057594037927937 (actor [10:59:2099]) rebooted! !Reboot 72057594037927937 (actor [10:59:2099]) tablet resolver refreshed! new actor is[10:93:2119] Leader for TabletID 72057594037927937 is [10:93:2119] sender: [10:179:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:57:2057] recipient: [11:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:57:2057] recipient: [11:53:2097] Leader for TabletID 72057594037927937 is [11:59:2099] sender: [11:60:2057] recipient: [11:53:2097] Leader for TabletID 72057594037927937 is [11:59:2099] sender: [11:77:2057] recipient: [11:14:2061] !Reboot 72057594037927937 (actor [11:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [11:59:2099] sender: [11:91:2057] recipient: [11:38:2085] Leader for TabletID 72057594037927937 is [11:59:2099] sender: [11:94:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [11:59:2099] sender: [11:95:2057] recipient: [11:93:2121] Leader for TabletID 72057594037927937 is [11:96:2122] sender: [11:97:2057] recipient: [11:93:2121] !Reboot 72057594037927937 (actor [11:59:2099]) rebooted! !Reboot 72057594037927937 (actor [11:59:2099]) tablet resolver refreshed! new actor is[11:96:2122] Leader for TabletID 72057594037927937 is [11:96:2122] sender: [11:182:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:57:2057] recipient: [12:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:57:2057] recipient: [12:54:2097] Leader for TabletID 72057594037927937 is [12:59:2099] sender: [12:60:2057] recipient: [12:54:2097] Leader for TabletID 72057594037927937 is [12:59:2099] sender: [12:77:2057] recipient: [12:14:2061 ... 9:2099]) on event NKikimr::TEvKeyValue::TEvNotify ! Leader for TabletID 72057594037927937 is [25:59:2099] sender: [25:106:2057] recipient: [25:38:2085] Leader for TabletID 72057594037927937 is [25:59:2099] sender: [25:109:2057] recipient: [25:14:2061] Leader for TabletID 72057594037927937 is [25:59:2099] sender: [25:110:2057] recipient: [25:108:2131] Leader for TabletID 72057594037927937 is [25:111:2132] sender: [25:112:2057] recipient: [25:108:2131] !Reboot 72057594037927937 (actor [25:59:2099]) rebooted! !Reboot 72057594037927937 (actor [25:59:2099]) tablet resolver refreshed! new actor is[25:111:2132] Leader for TabletID 72057594037927937 is [25:111:2132] sender: [25:129:2057] recipient: [25:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [26:57:2057] recipient: [26:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [26:57:2057] recipient: [26:54:2097] Leader for TabletID 72057594037927937 is [26:59:2099] sender: [26:60:2057] recipient: [26:54:2097] Leader for TabletID 72057594037927937 is [26:59:2099] sender: [26:77:2057] recipient: [26:14:2061] !Reboot 72057594037927937 (actor [26:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [26:59:2099] sender: [26:108:2057] recipient: [26:38:2085] Leader for TabletID 72057594037927937 is [26:59:2099] sender: [26:111:2057] recipient: [26:14:2061] Leader for TabletID 72057594037927937 is [26:59:2099] sender: [26:112:2057] recipient: [26:110:2133] Leader for TabletID 72057594037927937 is [26:113:2134] sender: [26:114:2057] recipient: [26:110:2133] !Reboot 72057594037927937 (actor [26:59:2099]) rebooted! !Reboot 72057594037927937 (actor [26:59:2099]) tablet resolver refreshed! new actor is[26:113:2134] Leader for TabletID 72057594037927937 is [26:113:2134] sender: [26:199:2057] recipient: [26:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [27:57:2057] recipient: [27:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [27:57:2057] recipient: [27:53:2097] Leader for TabletID 72057594037927937 is [27:59:2099] sender: [27:60:2057] recipient: [27:53:2097] Leader for TabletID 72057594037927937 is [27:59:2099] sender: [27:77:2057] recipient: [27:14:2061] !Reboot 72057594037927937 (actor [27:59:2099]) on event NKikimr::TEvKeyValue::TEvReadRange ! Leader for TabletID 72057594037927937 is [27:59:2099] sender: [27:108:2057] recipient: [27:38:2085] Leader for TabletID 72057594037927937 is [27:59:2099] sender: [27:111:2057] recipient: [27:14:2061] Leader for TabletID 72057594037927937 is [27:59:2099] sender: [27:112:2057] recipient: [27:110:2133] Leader for TabletID 72057594037927937 is [27:113:2134] sender: [27:114:2057] recipient: [27:110:2133] !Reboot 72057594037927937 (actor [27:59:2099]) rebooted! !Reboot 72057594037927937 (actor [27:59:2099]) tablet resolver refreshed! new actor is[27:113:2134] Leader for TabletID 72057594037927937 is [27:113:2134] sender: [27:199:2057] recipient: [27:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [28:57:2057] recipient: [28:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [28:57:2057] recipient: [28:53:2097] Leader for TabletID 72057594037927937 is [28:59:2099] sender: [28:60:2057] recipient: [28:53:2097] Leader for TabletID 72057594037927937 is [28:59:2099] sender: [28:77:2057] recipient: [28:14:2061] !Reboot 72057594037927937 (actor [28:59:2099]) on event NKikimr::TEvKeyValue::TEvNotify ! Leader for TabletID 72057594037927937 is [28:59:2099] sender: [28:109:2057] recipient: [28:38:2085] Leader for TabletID 72057594037927937 is [28:59:2099] sender: [28:112:2057] recipient: [28:14:2061] Leader for TabletID 72057594037927937 is [28:59:2099] sender: [28:113:2057] recipient: [28:111:2133] Leader for TabletID 72057594037927937 is [28:114:2134] sender: [28:115:2057] recipient: [28:111:2133] !Reboot 72057594037927937 (actor [28:59:2099]) rebooted! !Reboot 72057594037927937 (actor [28:59:2099]) tablet resolver refreshed! new actor is[28:114:2134] Leader for TabletID 72057594037927937 is [0:0:0] sender: [29:57:2057] recipient: [29:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [29:57:2057] recipient: [29:54:2097] Leader for TabletID 72057594037927937 is [29:59:2099] sender: [29:60:2057] recipient: [29:54:2097] Leader for TabletID 72057594037927937 is [29:59:2099] sender: [29:77:2057] recipient: [29:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [30:57:2057] recipient: [30:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [30:57:2057] recipient: [30:52:2097] Leader for TabletID 72057594037927937 is [30:59:2099] sender: [30:60:2057] recipient: [30:52:2097] Leader for TabletID 72057594037927937 is [30:59:2099] sender: [30:77:2057] recipient: [30:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [31:57:2057] recipient: [31:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [31:57:2057] recipient: [31:53:2097] Leader for TabletID 72057594037927937 is [31:59:2099] sender: [31:60:2057] recipient: [31:53:2097] Leader for TabletID 72057594037927937 is [31:59:2099] sender: [31:77:2057] recipient: [31:14:2061] !Reboot 72057594037927937 (actor [31:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [31:59:2099] sender: [31:79:2057] recipient: [31:38:2085] Leader for TabletID 72057594037927937 is [31:59:2099] sender: [31:82:2057] recipient: [31:14:2061] Leader for TabletID 72057594037927937 is [31:59:2099] sender: [31:83:2057] recipient: [31:81:2112] Leader for TabletID 72057594037927937 is [31:84:2113] sender: [31:85:2057] recipient: [31:81:2112] !Reboot 72057594037927937 (actor [31:59:2099]) rebooted! !Reboot 72057594037927937 (actor [31:59:2099]) tablet resolver refreshed! new actor is[31:84:2113] Leader for TabletID 72057594037927937 is [31:84:2113] sender: [31:170:2057] recipient: [31:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [32:57:2057] recipient: [32:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [32:57:2057] recipient: [32:53:2097] Leader for TabletID 72057594037927937 is [32:59:2099] sender: [32:60:2057] recipient: [32:53:2097] Leader for TabletID 72057594037927937 is [32:59:2099] sender: [32:77:2057] recipient: [32:14:2061] !Reboot 72057594037927937 (actor [32:59:2099]) on event NKikimr::TEvKeyValue::TEvExecuteTransaction ! Leader for TabletID 72057594037927937 is [32:59:2099] sender: [32:79:2057] recipient: [32:38:2085] Leader for TabletID 72057594037927937 is [32:59:2099] sender: [32:82:2057] recipient: [32:14:2061] Leader for TabletID 72057594037927937 is [32:59:2099] sender: [32:83:2057] recipient: [32:81:2112] Leader for TabletID 72057594037927937 is [32:84:2113] sender: [32:85:2057] recipient: [32:81:2112] !Reboot 72057594037927937 (actor [32:59:2099]) rebooted! !Reboot 72057594037927937 (actor [32:59:2099]) tablet resolver refreshed! new actor is[32:84:2113] Leader for TabletID 72057594037927937 is [32:84:2113] sender: [32:170:2057] recipient: [32:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [33:57:2057] recipient: [33:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [33:57:2057] recipient: [33:53:2097] Leader for TabletID 72057594037927937 is [33:59:2099] sender: [33:60:2057] recipient: [33:53:2097] Leader for TabletID 72057594037927937 is [33:59:2099] sender: [33:77:2057] recipient: [33:14:2061] !Reboot 72057594037927937 (actor [33:59:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [33:59:2099] sender: [33:80:2057] recipient: [33:38:2085] Leader for TabletID 72057594037927937 is [33:59:2099] sender: [33:83:2057] recipient: [33:14:2061] Leader for TabletID 72057594037927937 is [33:59:2099] sender: [33:84:2057] recipient: [33:82:2112] Leader for TabletID 72057594037927937 is [33:85:2113] sender: [33:86:2057] recipient: [33:82:2112] !Reboot 72057594037927937 (actor [33:59:2099]) rebooted! !Reboot 72057594037927937 (actor [33:59:2099]) tablet resolver refreshed! new actor is[33:85:2113] Leader for TabletID 72057594037927937 is [33:85:2113] sender: [33:171:2057] recipient: [33:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [34:57:2057] recipient: [34:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [34:57:2057] recipient: [34:53:2097] Leader for TabletID 72057594037927937 is [34:59:2099] sender: [34:60:2057] recipient: [34:53:2097] Leader for TabletID 72057594037927937 is [34:59:2099] sender: [34:77:2057] recipient: [34:14:2061] !Reboot 72057594037927937 (actor [34:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [34:59:2099] sender: [34:83:2057] recipient: [34:38:2085] Leader for TabletID 72057594037927937 is [34:59:2099] sender: [34:86:2057] recipient: [34:14:2061] Leader for TabletID 72057594037927937 is [34:59:2099] sender: [34:87:2057] recipient: [34:85:2115] Leader for TabletID 72057594037927937 is [34:88:2116] sender: [34:89:2057] recipient: [34:85:2115] !Reboot 72057594037927937 (actor [34:59:2099]) rebooted! !Reboot 72057594037927937 (actor [34:59:2099]) tablet resolver refreshed! new actor is[34:88:2116] Leader for TabletID 72057594037927937 is [34:88:2116] sender: [34:174:2057] recipient: [34:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [35:57:2057] recipient: [35:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [35:57:2057] recipient: [35:53:2097] Leader for TabletID 72057594037927937 is [35:59:2099] sender: [35:60:2057] recipient: [35:53:2097] Leader for TabletID 72057594037927937 is [35:59:2099] sender: [35:77:2057] recipient: [35:14:2061] !Reboot 72057594037927937 (actor [35:59:2099]) on event NKikimr::TEvKeyValue::TEvReadRange ! Leader for TabletID 72057594037927937 is [35:59:2099] sender: [35:83:2057] recipient: [35:38:2085] Leader for TabletID 72057594037927937 is [35:59:2099] sender: [35:86:2057] recipient: [35:14:2061] Leader for TabletID 72057594037927937 is [35:59:2099] sender: [35:87:2057] recipient: [35:85:2115] Leader for TabletID 72057594037927937 is [35:88:2116] sender: [35:89:2057] recipient: [35:85:2115] !Reboot 72057594037927937 (actor [35:59:2099]) rebooted! !Reboot 72057594037927937 (actor [35:59:2099]) tablet resolver refreshed! new actor is[35:88:2116] Leader for TabletID 72057594037927937 is [35:88:2116] sender: [35:174:2057] recipient: [35:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [36:57:2057] recipient: [36:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [36:57:2057] recipient: [36:53:2097] Leader for TabletID 72057594037927937 is [36:59:2099] sender: [36:60:2057] recipient: [36:53:2097] Leader for TabletID 72057594037927937 is [36:59:2099] sender: [36:77:2057] recipient: [36:14:2061] !Reboot 72057594037927937 (actor [36:59:2099]) on event NKikimr::TEvKeyValue::TEvNotify ! Leader for TabletID 72057594037927937 is [36:59:2099] sender: [36:84:2057] recipient: [36:38:2085] Leader for TabletID 72057594037927937 is [36:59:2099] sender: [36:87:2057] recipient: [36:14:2061] Leader for TabletID 72057594037927937 is [36:59:2099] sender: [36:88:2057] recipient: [36:86:2115] Leader for TabletID 72057594037927937 is [36:89:2116] sender: [36:90:2057] recipient: [36:86:2115] !Reboot 72057594037927937 (actor [36:59:2099]) rebooted! !Reboot 72057594037927937 (actor [36:59:2099]) tablet resolver refreshed! new actor is[36:89:2116] Leader for TabletID 72057594037927937 is [0:0:0] sender: [37:57:2057] recipient: [37:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [37:57:2057] recipient: [37:54:2097] Leader for TabletID 72057594037927937 is [37:59:2099] sender: [37:60:2057] recipient: [37:54:2097] Leader for TabletID 72057594037927937 is [37:59:2099] sender: [37:77:2057] recipient: [37:14:2061] >> LdapAuthProviderTest_nonSecure::LdapFetchGroupsWithDontExistGroupAttribute [GOOD] >> LdapAuthProviderTest_nonSecure::LdapFetchGroupsWithInvalidRobotUserLoginBad ------- [TM] {asan, default-linux-x86_64, release} ydb/core/keyvalue/ut/unittest >> TKeyValueTest::TestLargeWriteAndDelete [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:57:2057] recipient: [1:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:57:2057] recipient: [1:52:2097] Leader for TabletID 72057594037927937 is [1:59:2099] sender: [1:60:2057] recipient: [1:52:2097] Leader for TabletID 72057594037927937 is [1:59:2099] sender: [1:77:2057] recipient: [1:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:57:2057] recipient: [2:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:57:2057] recipient: [2:53:2097] Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:60:2057] recipient: [2:53:2097] Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:77:2057] recipient: [2:14:2061] !Reboot 72057594037927937 (actor [2:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:79:2057] recipient: [2:38:2085] Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:81:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:83:2057] recipient: [2:82:2112] Leader for TabletID 72057594037927937 is [2:84:2113] sender: [2:85:2057] recipient: [2:82:2112] !Reboot 72057594037927937 (actor [2:59:2099]) rebooted! !Reboot 72057594037927937 (actor [2:59:2099]) tablet resolver refreshed! new actor is[2:84:2113] Leader for TabletID 72057594037927937 is [2:84:2113] sender: [2:170:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:57:2057] recipient: [3:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:57:2057] recipient: [3:52:2097] Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:60:2057] recipient: [3:52:2097] Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:77:2057] recipient: [3:14:2061] !Reboot 72057594037927937 (actor [3:59:2099]) on event NKikimr::TEvKeyValue::TEvAcquireLock ! Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:79:2057] recipient: [3:38:2085] Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:82:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:83:2057] recipient: [3:81:2112] Leader for TabletID 72057594037927937 is [3:84:2113] sender: [3:85:2057] recipient: [3:81:2112] !Reboot 72057594037927937 (actor [3:59:2099]) rebooted! !Reboot 72057594037927937 (actor [3:59:2099]) tablet resolver refreshed! new actor is[3:84:2113] Leader for TabletID 72057594037927937 is [3:84:2113] sender: [3:170:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:57:2057] recipient: [4:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:57:2057] recipient: [4:53:2097] Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:60:2057] recipient: [4:53:2097] Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:77:2057] recipient: [4:14:2061] !Reboot 72057594037927937 (actor [4:59:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:80:2057] recipient: [4:38:2085] Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:83:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:84:2057] recipient: [4:82:2112] Leader for TabletID 72057594037927937 is [4:85:2113] sender: [4:86:2057] recipient: [4:82:2112] !Reboot 72057594037927937 (actor [4:59:2099]) rebooted! !Reboot 72057594037927937 (actor [4:59:2099]) tablet resolver refreshed! new actor is[4:85:2113] Leader for TabletID 72057594037927937 is [4:85:2113] sender: [4:171:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:57:2057] recipient: [5:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:57:2057] recipient: [5:53:2097] Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:60:2057] recipient: [5:53:2097] Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:77:2057] recipient: [5:14:2061] !Reboot 72057594037927937 (actor [5:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:83:2057] recipient: [5:38:2085] Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:86:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:87:2057] recipient: [5:85:2115] Leader for TabletID 72057594037927937 is [5:88:2116] sender: [5:89:2057] recipient: [5:85:2115] !Reboot 72057594037927937 (actor [5:59:2099]) rebooted! !Reboot 72057594037927937 (actor [5:59:2099]) tablet resolver refreshed! new actor is[5:88:2116] Leader for TabletID 72057594037927937 is [5:88:2116] sender: [5:174:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:57:2057] recipient: [6:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:57:2057] recipient: [6:52:2097] Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:60:2057] recipient: [6:52:2097] Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:77:2057] recipient: [6:14:2061] !Reboot 72057594037927937 (actor [6:59:2099]) on event NKikimr::TEvKeyValue::TEvGetStorageChannelStatus ! Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:83:2057] recipient: [6:38:2085] Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:86:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:87:2057] recipient: [6:85:2115] Leader for TabletID 72057594037927937 is [6:88:2116] sender: [6:89:2057] recipient: [6:85:2115] !Reboot 72057594037927937 (actor [6:59:2099]) rebooted! !Reboot 72057594037927937 (actor [6:59:2099]) tablet resolver refreshed! new actor is[6:88:2116] Leader for TabletID 72057594037927937 is [6:88:2116] sender: [6:174:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:57:2057] recipient: [7:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:57:2057] recipient: [7:53:2097] Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:60:2057] recipient: [7:53:2097] Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:77:2057] recipient: [7:14:2061] !Reboot 72057594037927937 (actor [7:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:85:2057] recipient: [7:38:2085] Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:88:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:89:2057] recipient: [7:87:2117] Leader for TabletID 72057594037927937 is [7:90:2118] sender: [7:91:2057] recipient: [7:87:2117] !Reboot 72057594037927937 (actor [7:59:2099]) rebooted! !Reboot 72057594037927937 (actor [7:59:2099]) tablet resolver refreshed! new actor is[7:90:2118] Leader for TabletID 72057594037927937 is [7:90:2118] sender: [7:176:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:57:2057] recipient: [8:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:57:2057] recipient: [8:54:2097] Leader for TabletID 72057594037927937 is [8:59:2099] sender: [8:60:2057] recipient: [8:54:2097] Leader for TabletID 72057594037927937 is [8:59:2099] sender: [8:77:2057] recipient: [8:14:2061] !Reboot 72057594037927937 (actor [8:59:2099]) on event NKikimr::TEvKeyValue::TEvExecuteTransaction ! Leader for TabletID 72057594037927937 is [8:59:2099] sender: [8:85:2057] recipient: [8:38:2085] Leader for TabletID 72057594037927937 is [8:59:2099] sender: [8:88:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [8:59:2099] sender: [8:89:2057] recipient: [8:87:2117] Leader for TabletID 72057594037927937 is [8:90:2118] sender: [8:91:2057] recipient: [8:87:2117] !Reboot 72057594037927937 (actor [8:59:2099]) rebooted! !Reboot 72057594037927937 (actor [8:59:2099]) tablet resolver refreshed! new actor is[8:90:2118] Leader for TabletID 72057594037927937 is [8:90:2118] sender: [8:176:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:57:2057] recipient: [9:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:57:2057] recipient: [9:53:2097] Leader for TabletID 72057594037927937 is [9:59:2099] sender: [9:60:2057] recipient: [9:53:2097] Leader for TabletID 72057594037927937 is [9:59:2099] sender: [9:77:2057] recipient: [9:14:2061] !Reboot 72057594037927937 (actor [9:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [9:59:2099] sender: [9:87:2057] recipient: [9:38:2085] Leader for TabletID 72057594037927937 is [9:59:2099] sender: [9:90:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [9:59:2099] sender: [9:91:2057] recipient: [9:89:2119] Leader for TabletID 72057594037927937 is [9:92:2120] sender: [9:93:2057] recipient: [9:89:2119] !Reboot 72057594037927937 (actor [9:59:2099]) rebooted! !Reboot 72057594037927937 (actor [9:59:2099]) tablet resolver refreshed! new actor is[9:92:2120] Leader for TabletID 72057594037927937 is [9:92:2120] sender: [9:178:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:57:2057] recipient: [10:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:57:2057] recipient: [10:54:2097] Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:60:2057] recipient: [10:54:2097] Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:77:2057] recipient: [10:14:2061] !Reboot 72057594037927937 (actor [10:59:2099]) on event NKikimr::TEvKeyValue::TEvRead ! Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:87:2057] recipient: [10:38:2085] Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:89:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:91:2057] recipient: [10:90:2119] Leader for TabletID 72057594037927937 is [10:92:2120] sender: [10:93:2057] recipient: [10:90:2119] !Reboot 72057594037927937 (actor [10:59:2099]) rebooted! !Reboot 72057594037927937 (actor [10:59:2099]) tablet resolver refreshed! new actor is[10:92:2120] Leader for TabletID 72057594037927937 is [10:92:2120] sender: [10:178:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:57:2057] recipient: [11:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:57:2057] recipient: [11:53:2097] Leader for TabletID 72057594037927937 is [11:59:2099] sender: [11:60:2057] recipient: [11:53:2097] Leader for TabletID 72057594037927937 is [11:59:2099] sender: [11:77:2057] recipient: [11:14:2061] !Reboot 72057594037927937 (actor [11:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [11:59:2099] sender: [11:89:2057] recipient: [11:38:2085] Leader for TabletID 72057594037927937 is [11:59:2099] sender: [11:92:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [11:59:2099] sender: [11:93:2057] recipient: [11:91:2121] Leader for TabletID 72057594037927937 is [11:94:2122] sender: [11:95:2057] recipient: [11:91:2121] !Reboot 72057594037927937 (actor [11:59:2099]) rebooted! !Reboot 72057594037927937 (actor [11:59:2099]) tablet resolver refreshed! new actor is[11:94:2122] Leader for TabletID 72057594037927937 is [11:94:2122] sender: [11:180:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:57:2057] recipient: [12:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:57:2057] recipient: [12:54:2097] Leader for TabletID 72057594037927937 is [12:59:2099] sender: [12:60:2057] recipient: [12:54:2097] Leader for TabletID 72057594037927937 is [12:59:2099] sender: [12:77:2057] recipient: [12:14:2061] !Re ... [18:59:2099] sender: [18:98:2057] recipient: [18:14:2061] Leader for TabletID 72057594037927937 is [18:59:2099] sender: [18:99:2057] recipient: [18:97:2125] Leader for TabletID 72057594037927937 is [18:100:2126] sender: [18:101:2057] recipient: [18:97:2125] !Reboot 72057594037927937 (actor [18:59:2099]) rebooted! !Reboot 72057594037927937 (actor [18:59:2099]) tablet resolver refreshed! new actor is[18:100:2126] Leader for TabletID 72057594037927937 is [18:100:2126] sender: [18:186:2057] recipient: [18:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [19:57:2057] recipient: [19:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [19:57:2057] recipient: [19:53:2097] Leader for TabletID 72057594037927937 is [19:59:2099] sender: [19:60:2057] recipient: [19:53:2097] Leader for TabletID 72057594037927937 is [19:59:2099] sender: [19:77:2057] recipient: [19:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [20:57:2057] recipient: [20:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [20:57:2057] recipient: [20:54:2097] Leader for TabletID 72057594037927937 is [20:59:2099] sender: [20:60:2057] recipient: [20:54:2097] Leader for TabletID 72057594037927937 is [20:59:2099] sender: [20:77:2057] recipient: [20:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [21:57:2057] recipient: [21:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [21:57:2057] recipient: [21:52:2097] Leader for TabletID 72057594037927937 is [21:59:2099] sender: [21:60:2057] recipient: [21:52:2097] Leader for TabletID 72057594037927937 is [21:59:2099] sender: [21:77:2057] recipient: [21:14:2061] !Reboot 72057594037927937 (actor [21:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [21:59:2099] sender: [21:79:2057] recipient: [21:38:2085] Leader for TabletID 72057594037927937 is [21:59:2099] sender: [21:82:2057] recipient: [21:14:2061] Leader for TabletID 72057594037927937 is [21:59:2099] sender: [21:83:2057] recipient: [21:81:2112] Leader for TabletID 72057594037927937 is [21:84:2113] sender: [21:85:2057] recipient: [21:81:2112] !Reboot 72057594037927937 (actor [21:59:2099]) rebooted! !Reboot 72057594037927937 (actor [21:59:2099]) tablet resolver refreshed! new actor is[21:84:2113] Leader for TabletID 72057594037927937 is [21:84:2113] sender: [21:170:2057] recipient: [21:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [22:57:2057] recipient: [22:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [22:57:2057] recipient: [22:53:2097] Leader for TabletID 72057594037927937 is [22:59:2099] sender: [22:60:2057] recipient: [22:53:2097] Leader for TabletID 72057594037927937 is [22:59:2099] sender: [22:77:2057] recipient: [22:14:2061] !Reboot 72057594037927937 (actor [22:59:2099]) on event NKikimr::TEvKeyValue::TEvAcquireLock ! Leader for TabletID 72057594037927937 is [22:59:2099] sender: [22:79:2057] recipient: [22:38:2085] Leader for TabletID 72057594037927937 is [22:59:2099] sender: [22:82:2057] recipient: [22:14:2061] Leader for TabletID 72057594037927937 is [22:59:2099] sender: [22:83:2057] recipient: [22:81:2112] Leader for TabletID 72057594037927937 is [22:84:2113] sender: [22:85:2057] recipient: [22:81:2112] !Reboot 72057594037927937 (actor [22:59:2099]) rebooted! !Reboot 72057594037927937 (actor [22:59:2099]) tablet resolver refreshed! new actor is[22:84:2113] Leader for TabletID 72057594037927937 is [22:84:2113] sender: [22:170:2057] recipient: [22:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [23:57:2057] recipient: [23:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [23:57:2057] recipient: [23:53:2097] Leader for TabletID 72057594037927937 is [23:59:2099] sender: [23:60:2057] recipient: [23:53:2097] Leader for TabletID 72057594037927937 is [23:59:2099] sender: [23:77:2057] recipient: [23:14:2061] !Reboot 72057594037927937 (actor [23:59:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [23:59:2099] sender: [23:80:2057] recipient: [23:38:2085] Leader for TabletID 72057594037927937 is [23:59:2099] sender: [23:83:2057] recipient: [23:14:2061] Leader for TabletID 72057594037927937 is [23:59:2099] sender: [23:84:2057] recipient: [23:82:2112] Leader for TabletID 72057594037927937 is [23:85:2113] sender: [23:86:2057] recipient: [23:82:2112] !Reboot 72057594037927937 (actor [23:59:2099]) rebooted! !Reboot 72057594037927937 (actor [23:59:2099]) tablet resolver refreshed! new actor is[23:85:2113] Leader for TabletID 72057594037927937 is [23:85:2113] sender: [23:171:2057] recipient: [23:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [24:57:2057] recipient: [24:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [24:57:2057] recipient: [24:53:2097] Leader for TabletID 72057594037927937 is [24:59:2099] sender: [24:60:2057] recipient: [24:53:2097] Leader for TabletID 72057594037927937 is [24:59:2099] sender: [24:77:2057] recipient: [24:14:2061] !Reboot 72057594037927937 (actor [24:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [24:59:2099] sender: [24:83:2057] recipient: [24:38:2085] Leader for TabletID 72057594037927937 is [24:59:2099] sender: [24:86:2057] recipient: [24:14:2061] Leader for TabletID 72057594037927937 is [24:59:2099] sender: [24:87:2057] recipient: [24:85:2115] Leader for TabletID 72057594037927937 is [24:88:2116] sender: [24:89:2057] recipient: [24:85:2115] !Reboot 72057594037927937 (actor [24:59:2099]) rebooted! !Reboot 72057594037927937 (actor [24:59:2099]) tablet resolver refreshed! new actor is[24:88:2116] Leader for TabletID 72057594037927937 is [24:88:2116] sender: [24:174:2057] recipient: [24:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [25:57:2057] recipient: [25:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [25:57:2057] recipient: [25:54:2097] Leader for TabletID 72057594037927937 is [25:59:2099] sender: [25:60:2057] recipient: [25:54:2097] Leader for TabletID 72057594037927937 is [25:59:2099] sender: [25:77:2057] recipient: [25:14:2061] !Reboot 72057594037927937 (actor [25:59:2099]) on event NKikimr::TEvKeyValue::TEvExecuteTransaction ! Leader for TabletID 72057594037927937 is [25:59:2099] sender: [25:83:2057] recipient: [25:38:2085] Leader for TabletID 72057594037927937 is [25:59:2099] sender: [25:86:2057] recipient: [25:14:2061] Leader for TabletID 72057594037927937 is [25:59:2099] sender: [25:87:2057] recipient: [25:85:2115] Leader for TabletID 72057594037927937 is [25:88:2116] sender: [25:89:2057] recipient: [25:85:2115] !Reboot 72057594037927937 (actor [25:59:2099]) rebooted! !Reboot 72057594037927937 (actor [25:59:2099]) tablet resolver refreshed! new actor is[25:88:2116] Leader for TabletID 72057594037927937 is [25:88:2116] sender: [25:174:2057] recipient: [25:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [26:57:2057] recipient: [26:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [26:57:2057] recipient: [26:54:2097] Leader for TabletID 72057594037927937 is [26:59:2099] sender: [26:60:2057] recipient: [26:54:2097] Leader for TabletID 72057594037927937 is [26:59:2099] sender: [26:77:2057] recipient: [26:14:2061] !Reboot 72057594037927937 (actor [26:59:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [26:59:2099] sender: [26:84:2057] recipient: [26:38:2085] Leader for TabletID 72057594037927937 is [26:59:2099] sender: [26:87:2057] recipient: [26:14:2061] Leader for TabletID 72057594037927937 is [26:59:2099] sender: [26:88:2057] recipient: [26:86:2115] Leader for TabletID 72057594037927937 is [26:89:2116] sender: [26:90:2057] recipient: [26:86:2115] !Reboot 72057594037927937 (actor [26:59:2099]) rebooted! !Reboot 72057594037927937 (actor [26:59:2099]) tablet resolver refreshed! new actor is[26:89:2116] Leader for TabletID 72057594037927937 is [26:89:2116] sender: [26:175:2057] recipient: [26:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [27:57:2057] recipient: [27:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [27:57:2057] recipient: [27:53:2097] Leader for TabletID 72057594037927937 is [27:59:2099] sender: [27:60:2057] recipient: [27:53:2097] Leader for TabletID 72057594037927937 is [27:59:2099] sender: [27:77:2057] recipient: [27:14:2061] !Reboot 72057594037927937 (actor [27:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [27:59:2099] sender: [27:87:2057] recipient: [27:38:2085] Leader for TabletID 72057594037927937 is [27:59:2099] sender: [27:90:2057] recipient: [27:14:2061] Leader for TabletID 72057594037927937 is [27:59:2099] sender: [27:91:2057] recipient: [27:89:2118] Leader for TabletID 72057594037927937 is [27:92:2119] sender: [27:93:2057] recipient: [27:89:2118] !Reboot 72057594037927937 (actor [27:59:2099]) rebooted! !Reboot 72057594037927937 (actor [27:59:2099]) tablet resolver refreshed! new actor is[27:92:2119] Leader for TabletID 72057594037927937 is [27:92:2119] sender: [27:178:2057] recipient: [27:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [28:57:2057] recipient: [28:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [28:57:2057] recipient: [28:53:2097] Leader for TabletID 72057594037927937 is [28:59:2099] sender: [28:60:2057] recipient: [28:53:2097] Leader for TabletID 72057594037927937 is [28:59:2099] sender: [28:77:2057] recipient: [28:14:2061] !Reboot 72057594037927937 (actor [28:59:2099]) on event NKikimr::TEvKeyValue::TEvExecuteTransaction ! Leader for TabletID 72057594037927937 is [28:59:2099] sender: [28:87:2057] recipient: [28:38:2085] Leader for TabletID 72057594037927937 is [28:59:2099] sender: [28:90:2057] recipient: [28:14:2061] Leader for TabletID 72057594037927937 is [28:59:2099] sender: [28:91:2057] recipient: [28:89:2118] Leader for TabletID 72057594037927937 is [28:92:2119] sender: [28:93:2057] recipient: [28:89:2118] !Reboot 72057594037927937 (actor [28:59:2099]) rebooted! !Reboot 72057594037927937 (actor [28:59:2099]) tablet resolver refreshed! new actor is[28:92:2119] Leader for TabletID 72057594037927937 is [28:92:2119] sender: [28:178:2057] recipient: [28:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [29:57:2057] recipient: [29:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [29:57:2057] recipient: [29:54:2097] Leader for TabletID 72057594037927937 is [29:59:2099] sender: [29:60:2057] recipient: [29:54:2097] Leader for TabletID 72057594037927937 is [29:59:2099] sender: [29:77:2057] recipient: [29:14:2061] !Reboot 72057594037927937 (actor [29:59:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [29:59:2099] sender: [29:88:2057] recipient: [29:38:2085] Leader for TabletID 72057594037927937 is [29:59:2099] sender: [29:91:2057] recipient: [29:14:2061] Leader for TabletID 72057594037927937 is [29:59:2099] sender: [29:92:2057] recipient: [29:90:2118] Leader for TabletID 72057594037927937 is [29:93:2119] sender: [29:94:2057] recipient: [29:90:2118] !Reboot 72057594037927937 (actor [29:59:2099]) rebooted! !Reboot 72057594037927937 (actor [29:59:2099]) tablet resolver refreshed! new actor is[29:93:2119] Leader for TabletID 72057594037927937 is [29:93:2119] sender: [29:179:2057] recipient: [29:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [30:57:2057] recipient: [30:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [30:57:2057] recipient: [30:52:2097] Leader for TabletID 72057594037927937 is [30:59:2099] sender: [30:60:2057] recipient: [30:52:2097] Leader for TabletID 72057594037927937 is [30:59:2099] sender: [30:77:2057] recipient: [30:14:2061] >> LdapAuthProviderTest_StartTls::LdapFetchGroupsWithDefaultGroupAttributeGood [GOOD] >> LdapAuthProviderTest_StartTls::LdapFetchGroupsWithDefaultGroupAttributeDisableNestedGroupsGood >> LdapAuthProviderTest_nonSecure::LdapFetchGroupsDisableRequestToAD [GOOD] >> LdapAuthProviderTest_StartTls::LdapFetchGroupsWithRemovedUserCredentialsBad >> LdapAuthProviderTest_LdapsScheme::LdapFetchGroupsFromAdLdapServer [GOOD] >> LdapAuthProviderTest_LdapsScheme::LdapFetchGroupsWithDefaultGroupAttributeGood >> DbCounters::TabletsSimple >> PgCatalog::PgDatabase+useSink [GOOD] >> PgCatalog::PgDatabase-useSink |83.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/secondary_index/py3test >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_1__SYNC-pk_types8-all_types8-index8---SYNC] [GOOD] >> TKeyValueTest::TestWriteToExtraChannelThenReadMixedChannelsReturnsOk [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest >> TAsyncIndexTests::CdcAndSplitWithReboots[TabletReboots] [GOOD] Test command err: =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:115:2144] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:115:2144] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:130:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:133:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [1:136:2157] sender: [1:138:2058] recipient: [1:115:2144] 2025-06-25T14:34:57.249835Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:34:57.249958Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:34:57.250004Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:34:57.250046Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:34:57.250094Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:34:57.250134Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:34:57.250191Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:34:57.250280Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:34:57.251195Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:34:57.251569Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:34:57.348900Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7732: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-25T14:34:57.348970Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:34:57.350119Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:178:2058] recipient: [1:15:2062] 2025-06-25T14:34:57.364747Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:34:57.367597Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:34:57.367752Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:34:57.381234Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:34:57.381497Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:34:57.382298Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:34:57.382620Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:34:57.385740Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:34:57.385961Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:34:57.387179Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:34:57.387270Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:34:57.387397Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:34:57.387453Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:34:57.387498Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:34:57.387640Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:217:2058] recipient: [1:215:2214] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:217:2058] recipient: [1:215:2214] Leader for TabletID 72057594037968897 is [1:221:2218] sender: [1:222:2058] recipient: [1:215:2214] 2025-06-25T14:34:57.396045Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:242:2058] recipient: [1:15:2062] 2025-06-25T14:34:57.563733Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:34:57.564017Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:57.564271Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:34:57.565376Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:34:57.565719Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:34:57.565835Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:34:57.570809Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:34:57.571034Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:34:57.571262Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:57.571341Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:34:57.571393Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:34:57.571430Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:34:57.576004Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:57.576102Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:34:57.576159Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:34:57.578840Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:57.578914Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:34:57.578996Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:34:57.579077Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:34:57.583390Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:34:57.585879Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:34:57.586106Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:136:2157] sender: [1:257:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:34:57.587214Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:34:57.587376Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 Media ... ue BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } } } TableSchemaVersion: 2 IsBackup: false CdcStreams { Name: "Stream" Mode: ECdcStreamModeKeysOnly PathId { OwnerId: 72057594046678944 LocalId: 6 } State: ECdcStreamStateReady SchemaVersion: 1 Format: ECdcStreamFormatProto VirtualTimestamps: false AwsRegion: "" ResolvedTimestampsIntervalMs: 0 SchemaChanges: false } IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "\001\000\004\000\000\0002\000\000\000" IsPoint: false IsInclusive: false DatashardId: 72075186233409550 } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409551 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 2 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 6 PathsLimit: 10000 ShardsInside: 6 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 1 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:40:46.136954Z node 118 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:78: [TableChangeSenderShard][72075186233409550:2][72075186233409546][118:1089:2872] Handshake NKikimrChangeExchange.TEvStatus Status: STATUS_OK LastRecordOrder: 0 2025-06-25T14:40:46.137072Z node 118 :CHANGE_EXCHANGE DEBUG: change_sender_async_index.cpp:239: [AsyncIndexChangeSenderMain][72075186233409550:2][118:1047:2872] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186233409546 } 2025-06-25T14:40:46.137245Z node 118 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:123: [TableChangeSenderShard][72075186233409550:2][72075186233409546][118:1089:2872] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 1 Group: 1750862446092387 Step: 5000004 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046678944, LocalPathId: 4] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046678944, LocalPathId: 3] SchemaVersion: 2 LockId: 0 LockOffset: 0 },{ Order: 3 Group: 1750862446092387 Step: 5000004 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046678944, LocalPathId: 4] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046678944, LocalPathId: 3] SchemaVersion: 2 LockId: 0 LockOffset: 0 },{ Order: 5 Group: 1750862446092387 Step: 5000004 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046678944, LocalPathId: 4] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046678944, LocalPathId: 3] SchemaVersion: 2 LockId: 0 LockOffset: 0 }] } 2025-06-25T14:40:46.141422Z node 118 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:200: [TableChangeSenderShard][72075186233409550:2][72075186233409546][118:1089:2872] Handle NKikimrChangeExchange.TEvStatus Status: STATUS_OK RecordStatuses { Order: 1 Status: STATUS_OK Reason: REASON_NONE } RecordStatuses { Order: 3 Status: STATUS_OK Reason: REASON_NONE } RecordStatuses { Order: 5 Status: STATUS_OK Reason: REASON_NONE } LastRecordOrder: 5 2025-06-25T14:40:46.141524Z node 118 :CHANGE_EXCHANGE DEBUG: change_sender_async_index.cpp:239: [AsyncIndexChangeSenderMain][72075186233409550:2][118:1047:2872] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186233409546 } 2025-06-25T14:40:46.412232Z node 118 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/UserDefinedIndex/indexImplTable" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-25T14:40:46.412539Z node 118 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/UserDefinedIndex/indexImplTable" took 340us result status StatusSuccess 2025-06-25T14:40:46.413204Z node 118 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table/UserDefinedIndex/indexImplTable" PathDescription { Self { Name: "indexImplTable" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1002 CreateStep: 5000003 ParentPathId: 4 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeAsyncIndexImplTable Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "indexImplTable" Columns { Name: "indexed" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "indexed" KeyColumnNames: "key" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409546 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 6 PathsLimit: 10000 ShardsInside: 6 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 1 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/keyvalue/ut/unittest >> TKeyValueTest::TestWriteToExtraChannelThenReadMixedChannelsReturnsOk [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:57:2057] recipient: [1:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:57:2057] recipient: [1:52:2097] Leader for TabletID 72057594037927937 is [1:59:2099] sender: [1:60:2057] recipient: [1:52:2097] Leader for TabletID 72057594037927937 is [1:59:2099] sender: [1:77:2057] recipient: [1:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:57:2057] recipient: [2:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:57:2057] recipient: [2:53:2097] Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:60:2057] recipient: [2:53:2097] Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:77:2057] recipient: [2:14:2061] !Reboot 72057594037927937 (actor [2:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:79:2057] recipient: [2:38:2085] Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:81:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:83:2057] recipient: [2:82:2112] Leader for TabletID 72057594037927937 is [2:84:2113] sender: [2:85:2057] recipient: [2:82:2112] !Reboot 72057594037927937 (actor [2:59:2099]) rebooted! !Reboot 72057594037927937 (actor [2:59:2099]) tablet resolver refreshed! new actor is[2:84:2113] Leader for TabletID 72057594037927937 is [2:84:2113] sender: [2:170:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:57:2057] recipient: [3:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:57:2057] recipient: [3:52:2097] Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:60:2057] recipient: [3:52:2097] Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:77:2057] recipient: [3:14:2061] !Reboot 72057594037927937 (actor [3:59:2099]) on event NKikimr::TEvKeyValue::TEvExecuteTransaction ! Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:79:2057] recipient: [3:38:2085] Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:82:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:83:2057] recipient: [3:81:2112] Leader for TabletID 72057594037927937 is [3:84:2113] sender: [3:85:2057] recipient: [3:81:2112] !Reboot 72057594037927937 (actor [3:59:2099]) rebooted! !Reboot 72057594037927937 (actor [3:59:2099]) tablet resolver refreshed! new actor is[3:84:2113] Leader for TabletID 72057594037927937 is [3:84:2113] sender: [3:170:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:57:2057] recipient: [4:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:57:2057] recipient: [4:53:2097] Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:60:2057] recipient: [4:53:2097] Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:77:2057] recipient: [4:14:2061] !Reboot 72057594037927937 (actor [4:59:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:80:2057] recipient: [4:38:2085] Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:82:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:84:2057] recipient: [4:83:2112] Leader for TabletID 72057594037927937 is [4:85:2113] sender: [4:86:2057] recipient: [4:83:2112] !Reboot 72057594037927937 (actor [4:59:2099]) rebooted! !Reboot 72057594037927937 (actor [4:59:2099]) tablet resolver refreshed! new actor is[4:85:2113] Leader for TabletID 72057594037927937 is [4:85:2113] sender: [4:171:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:57:2057] recipient: [5:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:57:2057] recipient: [5:53:2097] Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:60:2057] recipient: [5:53:2097] Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:77:2057] recipient: [5:14:2061] !Reboot 72057594037927937 (actor [5:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:83:2057] recipient: [5:38:2085] Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:86:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:87:2057] recipient: [5:85:2115] Leader for TabletID 72057594037927937 is [5:88:2116] sender: [5:89:2057] recipient: [5:85:2115] !Reboot 72057594037927937 (actor [5:59:2099]) rebooted! !Reboot 72057594037927937 (actor [5:59:2099]) tablet resolver refreshed! new actor is[5:88:2116] Leader for TabletID 72057594037927937 is [5:88:2116] sender: [5:174:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:57:2057] recipient: [6:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:57:2057] recipient: [6:52:2097] Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:60:2057] recipient: [6:52:2097] Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:77:2057] recipient: [6:14:2061] !Reboot 72057594037927937 (actor [6:59:2099]) on event NKikimr::TEvKeyValue::TEvReadRange ! Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:83:2057] recipient: [6:38:2085] Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:85:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:87:2057] recipient: [6:86:2115] Leader for TabletID 72057594037927937 is [6:88:2116] sender: [6:89:2057] recipient: [6:86:2115] !Reboot 72057594037927937 (actor [6:59:2099]) rebooted! !Reboot 72057594037927937 (actor [6:59:2099]) tablet resolver refreshed! new actor is[6:88:2116] Leader for TabletID 72057594037927937 is [6:88:2116] sender: [6:174:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:57:2057] recipient: [7:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:57:2057] recipient: [7:53:2097] Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:60:2057] recipient: [7:53:2097] Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:77:2057] recipient: [7:14:2061] !Reboot 72057594037927937 (actor [7:59:2099]) on event NKikimr::TEvKeyValue::TEvNotify ! Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:84:2057] recipient: [7:38:2085] Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:87:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:88:2057] recipient: [7:86:2115] Leader for TabletID 72057594037927937 is [7:89:2116] sender: [7:90:2057] recipient: [7:86:2115] !Reboot 72057594037927937 (actor [7:59:2099]) rebooted! !Reboot 72057594037927937 (actor [7:59:2099]) tablet resolver refreshed! new actor is[7:89:2116] Leader for TabletID 72057594037927937 is [7:89:2116] sender: [7:107:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:57:2057] recipient: [8:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:57:2057] recipient: [8:54:2097] Leader for TabletID 72057594037927937 is [8:59:2099] sender: [8:60:2057] recipient: [8:54:2097] Leader for TabletID 72057594037927937 is [8:59:2099] sender: [8:77:2057] recipient: [8:14:2061] !Reboot 72057594037927937 (actor [8:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [8:59:2099] sender: [8:86:2057] recipient: [8:38:2085] Leader for TabletID 72057594037927937 is [8:59:2099] sender: [8:88:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [8:59:2099] sender: [8:90:2057] recipient: [8:89:2117] Leader for TabletID 72057594037927937 is [8:91:2118] sender: [8:92:2057] recipient: [8:89:2117] !Reboot 72057594037927937 (actor [8:59:2099]) rebooted! !Reboot 72057594037927937 (actor [8:59:2099]) tablet resolver refreshed! new actor is[8:91:2118] Leader for TabletID 72057594037927937 is [8:91:2118] sender: [8:177:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:57:2057] recipient: [9:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:57:2057] recipient: [9:53:2097] Leader for TabletID 72057594037927937 is [9:59:2099] sender: [9:60:2057] recipient: [9:53:2097] Leader for TabletID 72057594037927937 is [9:59:2099] sender: [9:77:2057] recipient: [9:14:2061] !Reboot 72057594037927937 (actor [9:59:2099]) on event NKikimr::TEvKeyValue::TEvReadRange ! Leader for TabletID 72057594037927937 is [9:59:2099] sender: [9:86:2057] recipient: [9:38:2085] Leader for TabletID 72057594037927937 is [9:59:2099] sender: [9:89:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [9:59:2099] sender: [9:90:2057] recipient: [9:88:2117] Leader for TabletID 72057594037927937 is [9:91:2118] sender: [9:92:2057] recipient: [9:88:2117] !Reboot 72057594037927937 (actor [9:59:2099]) rebooted! !Reboot 72057594037927937 (actor [9:59:2099]) tablet resolver refreshed! new actor is[9:91:2118] Leader for TabletID 72057594037927937 is [9:91:2118] sender: [9:177:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:57:2057] recipient: [10:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:57:2057] recipient: [10:54:2097] Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:60:2057] recipient: [10:54:2097] Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:77:2057] recipient: [10:14:2061] !Reboot 72057594037927937 (actor [10:59:2099]) on event NKikimr::TEvKeyValue::TEvNotify ! Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:87:2057] recipient: [10:38:2085] Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:90:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:91:2057] recipient: [10:89:2117] Leader for TabletID 72057594037927937 is [10:92:2118] sender: [10:93:2057] recipient: [10:89:2117] !Reboot 72057594037927937 (actor [10:59:2099]) rebooted! !Reboot 72057594037927937 (actor [10:59:2099]) tablet resolver refreshed! new actor is[10:92:2118] Leader for TabletID 72057594037927937 is [10:92:2118] sender: [10:110:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:57:2057] recipient: [11:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:57:2057] recipient: [11:53:2097] Leader for TabletID 72057594037927937 is [11:59:2099] sender: [11:60:2057] recipient: [11:53:2097] Leader for TabletID 72057594037927937 is [11:59:2099] sender: [11:77:2057] recipient: [11:14:2061] !Reboot 72057594037927937 (actor [11:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [11:59:2099] sender: [11:89:2057] recipient: [11:38:2085] Leader for TabletID 72057594037927937 is [11:59:2099] sender: [11:92:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [11:59:2099] sender: [11:93:2057] recipient: [11:91:2119] Leader for TabletID 72057594037927937 is [11:94:2120] sender: [11:95:2057] recipient: [11:91:2119] !Reboot 72057594037927937 (actor [11:59:2099]) rebooted! !Reboot 72057594037927937 (actor [11:59:2099]) tablet resolver refreshed! new actor is[11:94:2120] Leader for TabletID 72057594037927937 is [11:94:2120] sender: [11:180:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:57:2057] recipient: [12:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:57:2057] recipient: [12:54:2097] Leader for TabletID 72057594037927937 is [12:59:2099] sender: [12:60:2057] recipient: [12:54:2097] Leader for TabletID 72057594037927937 is [12:59:2099] sender: [12:77:2057] recipient: [12:14:2061] !Reboot 72057594037927937 (a ... 927937 (actor [23:59:2099]) tablet resolver refreshed! new actor is[23:88:2116] Leader for TabletID 72057594037927937 is [23:88:2116] sender: [23:174:2057] recipient: [23:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [24:57:2057] recipient: [24:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [24:57:2057] recipient: [24:53:2097] Leader for TabletID 72057594037927937 is [24:59:2099] sender: [24:60:2057] recipient: [24:53:2097] Leader for TabletID 72057594037927937 is [24:59:2099] sender: [24:77:2057] recipient: [24:14:2061] !Reboot 72057594037927937 (actor [24:59:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [24:59:2099] sender: [24:84:2057] recipient: [24:38:2085] Leader for TabletID 72057594037927937 is [24:59:2099] sender: [24:87:2057] recipient: [24:14:2061] Leader for TabletID 72057594037927937 is [24:59:2099] sender: [24:88:2057] recipient: [24:86:2115] Leader for TabletID 72057594037927937 is [24:89:2116] sender: [24:90:2057] recipient: [24:86:2115] !Reboot 72057594037927937 (actor [24:59:2099]) rebooted! !Reboot 72057594037927937 (actor [24:59:2099]) tablet resolver refreshed! new actor is[24:89:2116] Leader for TabletID 72057594037927937 is [24:89:2116] sender: [24:175:2057] recipient: [24:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [25:57:2057] recipient: [25:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [25:57:2057] recipient: [25:54:2097] Leader for TabletID 72057594037927937 is [25:59:2099] sender: [25:60:2057] recipient: [25:54:2097] Leader for TabletID 72057594037927937 is [25:59:2099] sender: [25:77:2057] recipient: [25:14:2061] !Reboot 72057594037927937 (actor [25:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [25:59:2099] sender: [25:87:2057] recipient: [25:38:2085] Leader for TabletID 72057594037927937 is [25:59:2099] sender: [25:89:2057] recipient: [25:14:2061] Leader for TabletID 72057594037927937 is [25:59:2099] sender: [25:91:2057] recipient: [25:90:2118] Leader for TabletID 72057594037927937 is [25:92:2119] sender: [25:93:2057] recipient: [25:90:2118] !Reboot 72057594037927937 (actor [25:59:2099]) rebooted! !Reboot 72057594037927937 (actor [25:59:2099]) tablet resolver refreshed! new actor is[25:92:2119] Leader for TabletID 72057594037927937 is [25:92:2119] sender: [25:178:2057] recipient: [25:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [26:57:2057] recipient: [26:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [26:57:2057] recipient: [26:54:2097] Leader for TabletID 72057594037927937 is [26:59:2099] sender: [26:60:2057] recipient: [26:54:2097] Leader for TabletID 72057594037927937 is [26:59:2099] sender: [26:77:2057] recipient: [26:14:2061] !Reboot 72057594037927937 (actor [26:59:2099]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [26:59:2099] sender: [26:87:2057] recipient: [26:38:2085] Leader for TabletID 72057594037927937 is [26:59:2099] sender: [26:90:2057] recipient: [26:14:2061] Leader for TabletID 72057594037927937 is [26:59:2099] sender: [26:91:2057] recipient: [26:89:2118] Leader for TabletID 72057594037927937 is [26:92:2119] sender: [26:93:2057] recipient: [26:89:2118] !Reboot 72057594037927937 (actor [26:59:2099]) rebooted! !Reboot 72057594037927937 (actor [26:59:2099]) tablet resolver refreshed! new actor is[26:92:2119] Leader for TabletID 72057594037927937 is [26:92:2119] sender: [26:178:2057] recipient: [26:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [27:57:2057] recipient: [27:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [27:57:2057] recipient: [27:53:2097] Leader for TabletID 72057594037927937 is [27:59:2099] sender: [27:60:2057] recipient: [27:53:2097] Leader for TabletID 72057594037927937 is [27:59:2099] sender: [27:77:2057] recipient: [27:14:2061] !Reboot 72057594037927937 (actor [27:59:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [27:59:2099] sender: [27:88:2057] recipient: [27:38:2085] Leader for TabletID 72057594037927937 is [27:59:2099] sender: [27:90:2057] recipient: [27:14:2061] Leader for TabletID 72057594037927937 is [27:59:2099] sender: [27:92:2057] recipient: [27:91:2118] Leader for TabletID 72057594037927937 is [27:93:2119] sender: [27:94:2057] recipient: [27:91:2118] !Reboot 72057594037927937 (actor [27:59:2099]) rebooted! !Reboot 72057594037927937 (actor [27:59:2099]) tablet resolver refreshed! new actor is[27:93:2119] Leader for TabletID 72057594037927937 is [27:93:2119] sender: [27:179:2057] recipient: [27:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [28:57:2057] recipient: [28:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [28:57:2057] recipient: [28:53:2097] Leader for TabletID 72057594037927937 is [28:59:2099] sender: [28:60:2057] recipient: [28:53:2097] Leader for TabletID 72057594037927937 is [28:59:2099] sender: [28:77:2057] recipient: [28:14:2061] !Reboot 72057594037927937 (actor [28:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [28:59:2099] sender: [28:91:2057] recipient: [28:38:2085] Leader for TabletID 72057594037927937 is [28:59:2099] sender: [28:93:2057] recipient: [28:14:2061] Leader for TabletID 72057594037927937 is [28:59:2099] sender: [28:95:2057] recipient: [28:94:2121] Leader for TabletID 72057594037927937 is [28:96:2122] sender: [28:97:2057] recipient: [28:94:2121] !Reboot 72057594037927937 (actor [28:59:2099]) rebooted! !Reboot 72057594037927937 (actor [28:59:2099]) tablet resolver refreshed! new actor is[28:96:2122] Leader for TabletID 72057594037927937 is [28:96:2122] sender: [28:182:2057] recipient: [28:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [29:57:2057] recipient: [29:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [29:57:2057] recipient: [29:54:2097] Leader for TabletID 72057594037927937 is [29:59:2099] sender: [29:60:2057] recipient: [29:54:2097] Leader for TabletID 72057594037927937 is [29:59:2099] sender: [29:77:2057] recipient: [29:14:2061] !Reboot 72057594037927937 (actor [29:59:2099]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [29:59:2099] sender: [29:91:2057] recipient: [29:38:2085] Leader for TabletID 72057594037927937 is [29:59:2099] sender: [29:94:2057] recipient: [29:14:2061] Leader for TabletID 72057594037927937 is [29:59:2099] sender: [29:95:2057] recipient: [29:93:2121] Leader for TabletID 72057594037927937 is [29:96:2122] sender: [29:97:2057] recipient: [29:93:2121] !Reboot 72057594037927937 (actor [29:59:2099]) rebooted! !Reboot 72057594037927937 (actor [29:59:2099]) tablet resolver refreshed! new actor is[29:96:2122] Leader for TabletID 72057594037927937 is [29:96:2122] sender: [29:182:2057] recipient: [29:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [30:57:2057] recipient: [30:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [30:57:2057] recipient: [30:52:2097] Leader for TabletID 72057594037927937 is [30:59:2099] sender: [30:60:2057] recipient: [30:52:2097] Leader for TabletID 72057594037927937 is [30:59:2099] sender: [30:77:2057] recipient: [30:14:2061] !Reboot 72057594037927937 (actor [30:59:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [30:59:2099] sender: [30:92:2057] recipient: [30:38:2085] Leader for TabletID 72057594037927937 is [30:59:2099] sender: [30:95:2057] recipient: [30:14:2061] Leader for TabletID 72057594037927937 is [30:59:2099] sender: [30:96:2057] recipient: [30:94:2121] Leader for TabletID 72057594037927937 is [30:97:2122] sender: [30:98:2057] recipient: [30:94:2121] !Reboot 72057594037927937 (actor [30:59:2099]) rebooted! !Reboot 72057594037927937 (actor [30:59:2099]) tablet resolver refreshed! new actor is[30:97:2122] Leader for TabletID 72057594037927937 is [30:97:2122] sender: [30:183:2057] recipient: [30:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [31:57:2057] recipient: [31:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [31:57:2057] recipient: [31:53:2097] Leader for TabletID 72057594037927937 is [31:59:2099] sender: [31:60:2057] recipient: [31:53:2097] Leader for TabletID 72057594037927937 is [31:59:2099] sender: [31:77:2057] recipient: [31:14:2061] !Reboot 72057594037927937 (actor [31:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [31:59:2099] sender: [31:94:2057] recipient: [31:38:2085] Leader for TabletID 72057594037927937 is [31:59:2099] sender: [31:96:2057] recipient: [31:14:2061] Leader for TabletID 72057594037927937 is [31:59:2099] sender: [31:98:2057] recipient: [31:97:2123] Leader for TabletID 72057594037927937 is [31:99:2124] sender: [31:100:2057] recipient: [31:97:2123] !Reboot 72057594037927937 (actor [31:59:2099]) rebooted! !Reboot 72057594037927937 (actor [31:59:2099]) tablet resolver refreshed! new actor is[31:99:2124] Leader for TabletID 72057594037927937 is [31:99:2124] sender: [31:185:2057] recipient: [31:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [32:57:2057] recipient: [32:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [32:57:2057] recipient: [32:53:2097] Leader for TabletID 72057594037927937 is [32:59:2099] sender: [32:60:2057] recipient: [32:53:2097] Leader for TabletID 72057594037927937 is [32:59:2099] sender: [32:77:2057] recipient: [32:14:2061] !Reboot 72057594037927937 (actor [32:59:2099]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [32:59:2099] sender: [32:94:2057] recipient: [32:38:2085] Leader for TabletID 72057594037927937 is [32:59:2099] sender: [32:97:2057] recipient: [32:14:2061] Leader for TabletID 72057594037927937 is [32:59:2099] sender: [32:98:2057] recipient: [32:96:2123] Leader for TabletID 72057594037927937 is [32:99:2124] sender: [32:100:2057] recipient: [32:96:2123] !Reboot 72057594037927937 (actor [32:59:2099]) rebooted! !Reboot 72057594037927937 (actor [32:59:2099]) tablet resolver refreshed! new actor is[32:99:2124] Leader for TabletID 72057594037927937 is [32:99:2124] sender: [32:185:2057] recipient: [32:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [33:57:2057] recipient: [33:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [33:57:2057] recipient: [33:53:2097] Leader for TabletID 72057594037927937 is [33:59:2099] sender: [33:60:2057] recipient: [33:53:2097] Leader for TabletID 72057594037927937 is [33:59:2099] sender: [33:77:2057] recipient: [33:14:2061] !Reboot 72057594037927937 (actor [33:59:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [33:59:2099] sender: [33:95:2057] recipient: [33:38:2085] Leader for TabletID 72057594037927937 is [33:59:2099] sender: [33:98:2057] recipient: [33:14:2061] Leader for TabletID 72057594037927937 is [33:59:2099] sender: [33:99:2057] recipient: [33:97:2123] Leader for TabletID 72057594037927937 is [33:100:2124] sender: [33:101:2057] recipient: [33:97:2123] !Reboot 72057594037927937 (actor [33:59:2099]) rebooted! !Reboot 72057594037927937 (actor [33:59:2099]) tablet resolver refreshed! new actor is[33:100:2124] Leader for TabletID 72057594037927937 is [33:100:2124] sender: [33:186:2057] recipient: [33:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [34:57:2057] recipient: [34:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [34:57:2057] recipient: [34:53:2097] Leader for TabletID 72057594037927937 is [34:59:2099] sender: [34:60:2057] recipient: [34:53:2097] Leader for TabletID 72057594037927937 is [34:59:2099] sender: [34:77:2057] recipient: [34:14:2061] >> TConsoleTests::TestAlterTenantTooManyStorageResourcesForRunningExtSubdomain [GOOD] >> ShowCreateView::WithTablePathPrefix [GOOD] >> ShowCreateView::WithSingleQuotedTablePathPrefix >> SystemView::CollectPreparedQueries [GOOD] >> SystemView::CollectScanQueries >> TKeyValueTest::TestEmptyWriteReadDeleteWithRestartsThenResponseOk [GOOD] >> TKeyValueTest::TestEmptyWriteReadDeleteWithRestartsThenResponseOkNewApi >> LdapAuthProviderTest_nonSecure::LdapFetchGroupsWithInvalidRobotUserLoginBad [GOOD] >> LdapAuthProviderTest_nonSecure::LdapFetchGroupsWithInvalidRobotUserPasswordBad >> LdapAuthProviderTest_StartTls::LdapFetchGroupsWithRemovedUserCredentialsBad [GOOD] >> LdapAuthProviderTest_StartTls::LdapRefreshGroupsInfoGood >> LdapAuthProviderTest_StartTls::LdapFetchGroupsWithDefaultGroupAttributeDisableNestedGroupsGood [GOOD] >> LdapAuthProviderTest_StartTls::LdapFetchGroupsWithDefaultGroupAttributeGoodUseListOfHosts >> LdapAuthProviderTest_LdapsScheme::LdapFetchGroupsWithDefaultGroupAttributeGood [GOOD] >> LdapAuthProviderTest_LdapsScheme::LdapFetchGroupsWithDefaultGroupAttributeDisableNestedGroupsGood ------- [TM] {asan, default-linux-x86_64, release} ydb/core/cms/console/ut/unittest >> TConsoleTests::TestAlterTenantTooManyStorageResourcesForRunningExtSubdomain [GOOD] Test command err: 2025-06-25T14:35:23.064660Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:35:23.064712Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:35:23.121075Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:35:24.354515Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:35:24.354578Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:35:24.413540Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:35:25.369098Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:35:25.369154Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:35:25.413389Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:35:29.812204Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:35:29.812271Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:35:29.853458Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:35:34.325811Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:35:34.325879Z node 5 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:35:34.376526Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:35:35.439511Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:35:35.439579Z node 6 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:35:35.497550Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:35:36.205315Z node 6 :PIPE_SERVER ERROR: tablet_pipe_server.cpp:228: [72057594046447617] NodeDisconnected NodeId# 7 2025-06-25T14:35:36.205797Z node 6 :PIPE_SERVER ERROR: tablet_pipe_server.cpp:228: [72057594046578946] NodeDisconnected NodeId# 7 2025-06-25T14:35:36.205861Z node 6 :PIPE_SERVER ERROR: tablet_pipe_server.cpp:228: [72057594037936131] NodeDisconnected NodeId# 7 2025-06-25T14:35:36.205941Z node 7 :TX_PROXY WARN: proxy_impl.cpp:227: actor# [7:352:2089] HANDLE TEvClientDestroyed from tablet# 72057594046447617 2025-06-25T14:35:37.613817Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:35:37.613921Z node 8 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:35:37.689794Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:35:38.627155Z node 8 :BS_CONTROLLER ERROR: {BSC26@console_interaction.cpp:112} failed to parse config obtained from Console ErrorReason# ydb/library/yaml_config/yaml_config_parser.cpp:1362: Condition violated: `config.HasDomainsConfig()' Yaml# --- metadata: kind: MainConfig cluster: "" version: 1 config: log_config: cluster_name: cluster1 allowed_labels: test: type: enum values: ? true selector_config: [] 2025-06-25T14:35:40.204613Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:35:40.204702Z node 9 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:35:40.358416Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:35:42.047818Z node 11 :BS_PDISK WARN: {BPD01@blobstorage_pdisk_blockdevice_async.cpp:922} Warning# got EIoResult::FileOpenError from IoContext->Setup PDiskId# 1000 2025-06-25T14:35:42.048302Z node 11 :BS_PDISK CRIT: {BPD39@blobstorage_pdisk_impl.cpp:2897} BlockDevice initialization error Details# Can't open file "/home/runner/.ya/build/build_root/yft8/000ee4/r3tmp/tmp713sYZ/pdisk_1.dat": unknown reason, errno# 0. PDiskId# 1000 2025-06-25T14:35:42.054546Z node 11 :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:300} PDiskId# 1000 bootstrapped to the StateError, reason# Can't open file "/home/runner/.ya/build/build_root/yft8/000ee4/r3tmp/tmp713sYZ/pdisk_1.dat": unknown reason, errno# 0. Can not be initialized Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/yft8/000ee4/r3tmp/tmp713sYZ/pdisk_1.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 6434732269944350959 PDiskId# 1000 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 2 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 1000 2025-06-25T14:35:42.369359Z node 9 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 9 Type# 268639257 2025-06-25T14:35:42.369781Z node 9 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 9 Type# 268639257 2025-06-25T14:35:42.386205Z node 11 :BS_LOCALRECOVERY CRIT: localrecovery_public.cpp:103: PDiskId# 1000 VDISK[80000000:_:0:0:0]: (2147483648) LocalRecovery FINISHED: {RecoveryDuration# INPROGRESS RecoveredLogStartLsn# 0 SuccessfulRecovery# false EmptyLogoBlobsDb# true EmptyBlocksDb# true EmptyBarriersDb# true EmptySyncLog# true EmptySyncer# true EmptyHuge# true LogRecLogoBlob# 0 LogRecBlock# 0 LogRecGC# 0 LogRecSyncLogIdx# 0 LogRecLogoBlobsDB# 0 LogRecBlocksDB# 0 LogRecBarriersDB# 0 LogRecCutLog# 0 LogRecLocalSyncData# 0 LogRecSyncerState# 0 LogRecHandoffDel# 0 LogRecHugeBlobAllocChunk# 0 LogRecHugeBlobFreeChunk# 0 LogRecHugeBlobEntryPoint# 0 LogRecHugeLogoBlob# 0 LogRecLogoBlobOpt# 0 LogRecPhantomBlob# 0 LogRecAnubisOsirisPut# 0 LogRecAddBulkSst# 0 LogoBlobFreshApply# 0 LogoBlobFreshSkip# 0 LogoBlobsBatchFreshApply# 0 LogoBlobsBatchFreshSkip#0 LogoBlobSyncLogApply# 0 LogoBlobSyncLogSkip# 0 HugeLogoBlobFreshApply# 0 HugeLogoBlobFreshSkip# 0 HugeLogoBlobSyncLogApply# 0 HugeLogoBlobSyncLogSkip# 0 BlockFreshApply# 0 BlockFreshSkip# 0 BlocksBatchFreshApply# 0 BlocksBatchFreshSkip# 0 BlockSyncLogApply# 0 BlockSyncLogSkip# 0 BarrierFreshApply# 0 BarrierFreshSkip# 0 BarriersBatchFreshApply# 0 BarriersBatchFreshSkip# 0 BarrierSyncLogApply# 0 BarrierSyncLogSkip# 0 GCBarrierFreshApply# 0 GCBarrierFreshSkip# 0 GCLogoBlobFreshApply# 0 GCLogoBlobFreshSkip# 0 GCSyncLogApply# 0 GCSyncLogSkip# 0 TryPutLogoBlobSyncData# 0 TryPutBlockSyncData# 0 TryPutBarrierSyncData# 0 HandoffDelFreshApply# 0 HandoffDelFreshSkip# 0 HugeBlobAllocChunkApply# 0 HugeBlobAllocChunkSkip# 0 HugeBlobFreeChunkApply# 0 HugeBlobFreeChunkSkip# 0 HugeLogoBlobToHeapApply# 0 HugeLogoBlobToHeapSkip# 0 HugeSlotsDelGenericApply# 0 HugeSlotsDelGenericSkip# 0 TryPutLogoBlobPhantom# 0 RecoveryLogDiapason# [18446744073709551615 0] StartingPoints# {} ReadLogReplies# {}} reason# Yard::Init failed, errorReason# "PDisk is in StateError, reason# PDiskId# 1000 bootstrapped to the StateError, reason# Can't open file "/home/runner/.ya/build/build_root/yft8/000ee4/r3tmp/tmp713sYZ/pdisk_1.dat": unknown reason, errno# 0. Can not be initialized" status# CORRUPTED;VDISK LOCAL RECOVERY FAILURE DUE TO LOGICAL ERROR 2025-06-25T14:35:42.405349Z node 11 :BS_LOCALRECOVERY CRIT: localrecovery_public.cpp:103: PDiskId# 1000 VDISK[80000001:_:0:0:0]: (2147483649) LocalRecovery FINISHED: {RecoveryDuration# INPROGRESS RecoveredLogStartLsn# 0 SuccessfulRecovery# false EmptyLogoBlobsDb# true EmptyBlocksDb# true EmptyBarriersDb# true EmptySyncLog# true EmptySyncer# true EmptyHuge# true LogRecLogoBlob# 0 LogRecBlock# 0 LogRecGC# 0 LogRecSyncLogIdx# 0 LogRecLogoBlobsDB# 0 LogRecBlocksDB# 0 LogRecBarriersDB# 0 LogRecCutLog# 0 LogRecLocalSyncData# 0 LogRecSyncerState# 0 LogRecHandoffDel# 0 LogRecHugeBlobAllocChunk# 0 LogRecHugeBlobFreeChunk# 0 LogRecHugeBlobEntryPoint# 0 LogRecHugeLogoBlob# 0 LogRecLogoBlobOpt# 0 LogRecPhantomBlob# 0 LogRecAnubisOsirisPut# 0 LogRecAddBulkSst# 0 LogoBlobFreshApply# 0 LogoBlobFreshSkip# 0 LogoBlobsBatchFreshApply# 0 LogoBlobsBatchFreshSkip#0 LogoBlobSyncLogApply# 0 LogoBlobSyncLogSkip# 0 HugeLogoBlobFreshApply# 0 HugeLogoBlobFreshSkip# 0 HugeLogoBlobSyncLogApply# 0 HugeLogoBlobSyncLogSkip# 0 BlockFreshApply# 0 BlockFreshSkip# 0 BlocksBatchFreshApply# 0 BlocksBatchFreshSkip# 0 BlockSyn ... 2025-06-25T14:39:42.750356Z node 108 :CMS_TENANTS ERROR: console_tenants_manager.cpp:252: TPoolManip(/dc-1/users/tenant-1:hdd-1) cannot create pool '/dc-1/users/tenant-1:hdd-1' (0): Group fit error BoxId# 1 StoragePoolId# 2 Error# failed to allocate group: no group options PDisks# {[(108:1-s[16/16])(109:1000-s[16/16]o)(110:1000-s[16/16]o)(111:1000-s[16/16]o)(112:1000-s[16/16]o)(113:1000-s[16/16]o)(114:1000-s[16/16]o)(115:1000-s[16/16]o)(116:1000-s[16/16]o)]} 2025-06-25T14:39:42.750582Z node 108 :CMS_TENANTS CRIT: console_tenants_manager.cpp:3514: Couldn't update storage pool /dc-1/users/tenant-1:hdd-1 for tenant /dc-1/users/tenant-1: Group fit error BoxId# 1 StoragePoolId# 2 Error# failed to allocate group: no group options PDisks# {[(108:1-s[16/16])(109:1000-s[16/16]o)(110:1000-s[16/16]o)(111:1000-s[16/16]o)(112:1000-s[16/16]o)(113:1000-s[16/16]o)(114:1000-s[16/16]o)(115:1000-s[16/16]o)(116:1000-s[16/16]o)]} 2025-06-25T14:39:48.791311Z node 108 :CMS_TENANTS ERROR: console_tenants_manager.cpp:252: TPoolManip(/dc-1/users/tenant-1:hdd-1) cannot create pool '/dc-1/users/tenant-1:hdd-1' (0): Group fit error BoxId# 1 StoragePoolId# 2 Error# failed to allocate group: no group options PDisks# {[(108:1-s[16/16])(109:1000-s[16/16]o)(110:1000-s[16/16]o)(111:1000-s[16/16]o)(112:1000-s[16/16]o)(113:1000-s[16/16]o)(114:1000-s[16/16]o)(115:1000-s[16/16]o)(116:1000-s[16/16]o)]} 2025-06-25T14:39:48.791498Z node 108 :CMS_TENANTS CRIT: console_tenants_manager.cpp:3514: Couldn't update storage pool /dc-1/users/tenant-1:hdd-1 for tenant /dc-1/users/tenant-1: Group fit error BoxId# 1 StoragePoolId# 2 Error# failed to allocate group: no group options PDisks# {[(108:1-s[16/16])(109:1000-s[16/16]o)(110:1000-s[16/16]o)(111:1000-s[16/16]o)(112:1000-s[16/16]o)(113:1000-s[16/16]o)(114:1000-s[16/16]o)(115:1000-s[16/16]o)(116:1000-s[16/16]o)]} 2025-06-25T14:39:54.764666Z node 108 :CMS_TENANTS ERROR: console_tenants_manager.cpp:252: TPoolManip(/dc-1/users/tenant-1:hdd-1) cannot create pool '/dc-1/users/tenant-1:hdd-1' (0): Group fit error BoxId# 1 StoragePoolId# 2 Error# failed to allocate group: no group options PDisks# {[(108:1-s[16/16])(109:1000-s[16/16]o)(110:1000-s[16/16]o)(111:1000-s[16/16]o)(112:1000-s[16/16]o)(113:1000-s[16/16]o)(114:1000-s[16/16]o)(115:1000-s[16/16]o)(116:1000-s[16/16]o)]} 2025-06-25T14:39:54.764898Z node 108 :CMS_TENANTS CRIT: console_tenants_manager.cpp:3514: Couldn't update storage pool /dc-1/users/tenant-1:hdd-1 for tenant /dc-1/users/tenant-1: Group fit error BoxId# 1 StoragePoolId# 2 Error# failed to allocate group: no group options PDisks# {[(108:1-s[16/16])(109:1000-s[16/16]o)(110:1000-s[16/16]o)(111:1000-s[16/16]o)(112:1000-s[16/16]o)(113:1000-s[16/16]o)(114:1000-s[16/16]o)(115:1000-s[16/16]o)(116:1000-s[16/16]o)]} 2025-06-25T14:39:54.876571Z node 108 :CMS_TENANTS ERROR: console_tenants_manager.cpp:252: TPoolManip(/dc-1/users/tenant-1:hdd-2) cannot create pool '/dc-1/users/tenant-1:hdd-2' (0): Group fit error BoxId# 1 StoragePoolId# 3 Error# failed to allocate group: no group options PDisks# {[(108:1-s[16/16])(109:1000-s[16/16]o)(110:1000-s[16/16]o)(111:1000-s[16/16]o)(112:1000-s[16/16]o)(113:1000-s[16/16]o)(114:1000-s[16/16]o)(115:1000-s[16/16]o)(116:1000-s[16/16]o)]} 2025-06-25T14:39:54.876922Z node 108 :CMS_TENANTS CRIT: console_tenants_manager.cpp:3514: Couldn't update storage pool /dc-1/users/tenant-1:hdd-2 for tenant /dc-1/users/tenant-1: Group fit error BoxId# 1 StoragePoolId# 3 Error# failed to allocate group: no group options PDisks# {[(108:1-s[16/16])(109:1000-s[16/16]o)(110:1000-s[16/16]o)(111:1000-s[16/16]o)(112:1000-s[16/16]o)(113:1000-s[16/16]o)(114:1000-s[16/16]o)(115:1000-s[16/16]o)(116:1000-s[16/16]o)]} 2025-06-25T14:40:00.545002Z node 108 :CMS_TENANTS ERROR: console_tenants_manager.cpp:252: TPoolManip(/dc-1/users/tenant-1:hdd-2) cannot create pool '/dc-1/users/tenant-1:hdd-2' (0): Group fit error BoxId# 1 StoragePoolId# 3 Error# failed to allocate group: no group options PDisks# {[(108:1-s[16/16])(109:1000-s[16/16]o)(110:1000-s[16/16]o)(111:1000-s[16/16]o)(112:1000-s[16/16]o)(113:1000-s[16/16]o)(114:1000-s[16/16]o)(115:1000-s[16/16]o)(116:1000-s[16/16]o)]} 2025-06-25T14:40:00.545384Z node 108 :CMS_TENANTS CRIT: console_tenants_manager.cpp:3514: Couldn't update storage pool /dc-1/users/tenant-1:hdd-2 for tenant /dc-1/users/tenant-1: Group fit error BoxId# 1 StoragePoolId# 3 Error# failed to allocate group: no group options PDisks# {[(108:1-s[16/16])(109:1000-s[16/16]o)(110:1000-s[16/16]o)(111:1000-s[16/16]o)(112:1000-s[16/16]o)(113:1000-s[16/16]o)(114:1000-s[16/16]o)(115:1000-s[16/16]o)(116:1000-s[16/16]o)]} 2025-06-25T14:40:06.570823Z node 108 :CMS_TENANTS ERROR: console_tenants_manager.cpp:252: TPoolManip(/dc-1/users/tenant-1:hdd-2) cannot create pool '/dc-1/users/tenant-1:hdd-2' (0): Group fit error BoxId# 1 StoragePoolId# 3 Error# failed to allocate group: no group options PDisks# {[(108:1-s[16/16])(109:1000-s[16/16]o)(110:1000-s[16/16]o)(111:1000-s[16/16]o)(112:1000-s[16/16]o)(113:1000-s[16/16]o)(114:1000-s[16/16]o)(115:1000-s[16/16]o)(116:1000-s[16/16]o)]} 2025-06-25T14:40:06.571056Z node 108 :CMS_TENANTS CRIT: console_tenants_manager.cpp:3514: Couldn't update storage pool /dc-1/users/tenant-1:hdd-2 for tenant /dc-1/users/tenant-1: Group fit error BoxId# 1 StoragePoolId# 3 Error# failed to allocate group: no group options PDisks# {[(108:1-s[16/16])(109:1000-s[16/16]o)(110:1000-s[16/16]o)(111:1000-s[16/16]o)(112:1000-s[16/16]o)(113:1000-s[16/16]o)(114:1000-s[16/16]o)(115:1000-s[16/16]o)(116:1000-s[16/16]o)]} 2025-06-25T14:40:12.645570Z node 108 :CMS_TENANTS ERROR: console_tenants_manager.cpp:252: TPoolManip(/dc-1/users/tenant-1:hdd-2) cannot create pool '/dc-1/users/tenant-1:hdd-2' (0): Group fit error BoxId# 1 StoragePoolId# 3 Error# failed to allocate group: no group options PDisks# {[(108:1-s[16/16])(109:1000-s[16/16]o)(110:1000-s[16/16]o)(111:1000-s[16/16]o)(112:1000-s[16/16]o)(113:1000-s[16/16]o)(114:1000-s[16/16]o)(115:1000-s[16/16]o)(116:1000-s[16/16]o)]} 2025-06-25T14:40:12.645905Z node 108 :CMS_TENANTS CRIT: console_tenants_manager.cpp:3514: Couldn't update storage pool /dc-1/users/tenant-1:hdd-2 for tenant /dc-1/users/tenant-1: Group fit error BoxId# 1 StoragePoolId# 3 Error# failed to allocate group: no group options PDisks# {[(108:1-s[16/16])(109:1000-s[16/16]o)(110:1000-s[16/16]o)(111:1000-s[16/16]o)(112:1000-s[16/16]o)(113:1000-s[16/16]o)(114:1000-s[16/16]o)(115:1000-s[16/16]o)(116:1000-s[16/16]o)]} 2025-06-25T14:40:18.540598Z node 108 :CMS_TENANTS ERROR: console_tenants_manager.cpp:252: TPoolManip(/dc-1/users/tenant-1:hdd-2) cannot create pool '/dc-1/users/tenant-1:hdd-2' (0): Group fit error BoxId# 1 StoragePoolId# 3 Error# failed to allocate group: no group options PDisks# {[(108:1-s[16/16])(109:1000-s[16/16]o)(110:1000-s[16/16]o)(111:1000-s[16/16]o)(112:1000-s[16/16]o)(113:1000-s[16/16]o)(114:1000-s[16/16]o)(115:1000-s[16/16]o)(116:1000-s[16/16]o)]} 2025-06-25T14:40:18.540850Z node 108 :CMS_TENANTS CRIT: console_tenants_manager.cpp:3514: Couldn't update storage pool /dc-1/users/tenant-1:hdd-2 for tenant /dc-1/users/tenant-1: Group fit error BoxId# 1 StoragePoolId# 3 Error# failed to allocate group: no group options PDisks# {[(108:1-s[16/16])(109:1000-s[16/16]o)(110:1000-s[16/16]o)(111:1000-s[16/16]o)(112:1000-s[16/16]o)(113:1000-s[16/16]o)(114:1000-s[16/16]o)(115:1000-s[16/16]o)(116:1000-s[16/16]o)]} 2025-06-25T14:40:24.484663Z node 108 :CMS_TENANTS ERROR: console_tenants_manager.cpp:252: TPoolManip(/dc-1/users/tenant-1:hdd-2) cannot create pool '/dc-1/users/tenant-1:hdd-2' (0): Group fit error BoxId# 1 StoragePoolId# 3 Error# failed to allocate group: no group options PDisks# {[(108:1-s[16/16])(109:1000-s[16/16]o)(110:1000-s[16/16]o)(111:1000-s[16/16]o)(112:1000-s[16/16]o)(113:1000-s[16/16]o)(114:1000-s[16/16]o)(115:1000-s[16/16]o)(116:1000-s[16/16]o)]} 2025-06-25T14:40:24.484883Z node 108 :CMS_TENANTS CRIT: console_tenants_manager.cpp:3514: Couldn't update storage pool /dc-1/users/tenant-1:hdd-2 for tenant /dc-1/users/tenant-1: Group fit error BoxId# 1 StoragePoolId# 3 Error# failed to allocate group: no group options PDisks# {[(108:1-s[16/16])(109:1000-s[16/16]o)(110:1000-s[16/16]o)(111:1000-s[16/16]o)(112:1000-s[16/16]o)(113:1000-s[16/16]o)(114:1000-s[16/16]o)(115:1000-s[16/16]o)(116:1000-s[16/16]o)]} 2025-06-25T14:40:30.369695Z node 108 :CMS_TENANTS ERROR: console_tenants_manager.cpp:252: TPoolManip(/dc-1/users/tenant-1:hdd-2) cannot create pool '/dc-1/users/tenant-1:hdd-2' (0): Group fit error BoxId# 1 StoragePoolId# 3 Error# failed to allocate group: no group options PDisks# {[(108:1-s[16/16])(109:1000-s[16/16]o)(110:1000-s[16/16]o)(111:1000-s[16/16]o)(112:1000-s[16/16]o)(113:1000-s[16/16]o)(114:1000-s[16/16]o)(115:1000-s[16/16]o)(116:1000-s[16/16]o)]} 2025-06-25T14:40:30.369909Z node 108 :CMS_TENANTS CRIT: console_tenants_manager.cpp:3514: Couldn't update storage pool /dc-1/users/tenant-1:hdd-2 for tenant /dc-1/users/tenant-1: Group fit error BoxId# 1 StoragePoolId# 3 Error# failed to allocate group: no group options PDisks# {[(108:1-s[16/16])(109:1000-s[16/16]o)(110:1000-s[16/16]o)(111:1000-s[16/16]o)(112:1000-s[16/16]o)(113:1000-s[16/16]o)(114:1000-s[16/16]o)(115:1000-s[16/16]o)(116:1000-s[16/16]o)]} 2025-06-25T14:40:36.186782Z node 108 :CMS_TENANTS ERROR: console_tenants_manager.cpp:252: TPoolManip(/dc-1/users/tenant-1:hdd-2) cannot create pool '/dc-1/users/tenant-1:hdd-2' (0): Group fit error BoxId# 1 StoragePoolId# 3 Error# failed to allocate group: no group options PDisks# {[(108:1-s[16/16])(109:1000-s[16/16]o)(110:1000-s[16/16]o)(111:1000-s[16/16]o)(112:1000-s[16/16]o)(113:1000-s[16/16]o)(114:1000-s[16/16]o)(115:1000-s[16/16]o)(116:1000-s[16/16]o)]} 2025-06-25T14:40:36.186932Z node 108 :CMS_TENANTS CRIT: console_tenants_manager.cpp:3514: Couldn't update storage pool /dc-1/users/tenant-1:hdd-2 for tenant /dc-1/users/tenant-1: Group fit error BoxId# 1 StoragePoolId# 3 Error# failed to allocate group: no group options PDisks# {[(108:1-s[16/16])(109:1000-s[16/16]o)(110:1000-s[16/16]o)(111:1000-s[16/16]o)(112:1000-s[16/16]o)(113:1000-s[16/16]o)(114:1000-s[16/16]o)(115:1000-s[16/16]o)(116:1000-s[16/16]o)]} 2025-06-25T14:40:42.216543Z node 108 :CMS_TENANTS ERROR: console_tenants_manager.cpp:252: TPoolManip(/dc-1/users/tenant-1:hdd-2) cannot create pool '/dc-1/users/tenant-1:hdd-2' (0): Group fit error BoxId# 1 StoragePoolId# 3 Error# failed to allocate group: no group options PDisks# {[(108:1-s[16/16])(109:1000-s[16/16]o)(110:1000-s[16/16]o)(111:1000-s[16/16]o)(112:1000-s[16/16]o)(113:1000-s[16/16]o)(114:1000-s[16/16]o)(115:1000-s[16/16]o)(116:1000-s[16/16]o)]} 2025-06-25T14:40:42.216787Z node 108 :CMS_TENANTS CRIT: console_tenants_manager.cpp:3514: Couldn't update storage pool /dc-1/users/tenant-1:hdd-2 for tenant /dc-1/users/tenant-1: Group fit error BoxId# 1 StoragePoolId# 3 Error# failed to allocate group: no group options PDisks# {[(108:1-s[16/16])(109:1000-s[16/16]o)(110:1000-s[16/16]o)(111:1000-s[16/16]o)(112:1000-s[16/16]o)(113:1000-s[16/16]o)(114:1000-s[16/16]o)(115:1000-s[16/16]o)(116:1000-s[16/16]o)]} 2025-06-25T14:40:48.708787Z node 108 :CMS_TENANTS ERROR: console_tenants_manager.cpp:252: TPoolManip(/dc-1/users/tenant-1:hdd-2) cannot create pool '/dc-1/users/tenant-1:hdd-2' (0): Group fit error BoxId# 1 StoragePoolId# 3 Error# failed to allocate group: no group options PDisks# {[(108:1-s[16/16])(109:1000-s[16/16]o)(110:1000-s[16/16]o)(111:1000-s[16/16]o)(112:1000-s[16/16]o)(113:1000-s[16/16]o)(114:1000-s[16/16]o)(115:1000-s[16/16]o)(116:1000-s[16/16]o)]} 2025-06-25T14:40:48.709128Z node 108 :CMS_TENANTS CRIT: console_tenants_manager.cpp:3514: Couldn't update storage pool /dc-1/users/tenant-1:hdd-2 for tenant /dc-1/users/tenant-1: Group fit error BoxId# 1 StoragePoolId# 3 Error# failed to allocate group: no group options PDisks# {[(108:1-s[16/16])(109:1000-s[16/16]o)(110:1000-s[16/16]o)(111:1000-s[16/16]o)(112:1000-s[16/16]o)(113:1000-s[16/16]o)(114:1000-s[16/16]o)(115:1000-s[16/16]o)(116:1000-s[16/16]o)]} >> TConsoleConfigSubscriptionTests::TestConfigNotificationRetries [GOOD] >> TConsoleConfigSubscriptionTests::TestConfigSubscriptionsCleanup >> SystemView::PartitionStatsOneSchemeShardDataQuery [GOOD] >> SystemView::PgTablesOneSchemeShardDataQuery >> BasicStatistics::TwoTables [GOOD] >> TxUsage::TwoSessionOneConsumer_Table >> TxUsage::WriteToTopic_Demo_20_RestartNo_Table >> TxUsage::WriteToTopic_Demo_3_Table >> TxUsage::WriteToTopic_Demo_2_Table >> TxUsage::Sinks_Oltp_WriteToTopic_1_Table >> TxUsage::Sinks_Oltp_WriteToTopic_2_Table >> LdapAuthProviderTest_nonSecure::LdapFetchGroupsWithInvalidRobotUserPasswordBad [GOOD] >> LdapAuthProviderTest_nonSecure::LdapFetchGroupsWithRemovedUserCredentialsBad >> LdapAuthProviderTest_StartTls::LdapFetchGroupsWithDefaultGroupAttributeGoodUseListOfHosts [GOOD] >> LdapAuthProviderTest_StartTls::LdapFetchGroupsWithDontExistGroupAttribute ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest >> BasicStatistics::TwoTables [GOOD] Test command err: 2025-06-25T14:38:36.743073Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:419:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:38:36.743414Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:38:36.743530Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001d27/r3tmp/tmpeTkcyi/pdisk_1.dat 2025-06-25T14:38:37.062073Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 6101, node 1 2025-06-25T14:38:37.333459Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:38:37.333525Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:38:37.333575Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:38:37.334236Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:38:37.336969Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:38:37.442279Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:37.442452Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:37.459210Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:26998 2025-06-25T14:38:38.043048Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-25T14:38:41.131667Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-25T14:38:41.166225Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:41.166408Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:41.208181Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T14:38:41.210266Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:38:41.430544Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:38:41.465993Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:41.466629Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:41.467253Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:41.467409Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:41.467649Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:41.467763Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:41.467834Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:41.467931Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:41.468007Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:41.658246Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:41.658391Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:41.671904Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:38:41.853396Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:38:41.897081Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-25T14:38:41.897196Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-25T14:38:41.950708Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-25T14:38:41.950936Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-25T14:38:41.951171Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-25T14:38:41.951232Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-25T14:38:41.951280Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-25T14:38:41.951344Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-25T14:38:41.951400Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-25T14:38:41.951449Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-25T14:38:41.951960Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-25T14:38:41.974565Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7949: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-25T14:38:41.974684Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7979: ConnectToSA(), pipe client id: [2:1793:2562], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-25T14:38:41.984046Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1808:2573] 2025-06-25T14:38:41.990144Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1849:2589] 2025-06-25T14:38:41.990444Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1849:2589], schemeshard id = 72075186224037897 2025-06-25T14:38:41.999795Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-25T14:38:42.019168Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-25T14:38:42.019234Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-25T14:38:42.019311Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-25T14:38:42.031907Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:42.037883Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-25T14:38:42.038061Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-25T14:38:42.234089Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-25T14:38:42.417060Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-25T14:38:42.478508Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-25T14:38:43.013342Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:38:43.231773Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2153:3026], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:43.231889Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:43.284394Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:43.638532Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2288:3059], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:43.638728Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:43.639863Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [1:2293:3063]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T14:38:43.640008Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-25T14:38:43.640062Z node 1 :STATISTICS DEBUG: service_impl.cpp:1219: ConnectToSA(), pipe client id = [1:2295:3065] 2025-06-25T14:38:43.640140Z nod ... 885Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-25T14:40:45.436936Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 5] is data table. 2025-06-25T14:40:45.436987Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:723: [72075186224037894] ScheduleNextTraversal. Skip traversal for datashard table [OwnerId: 72075186224037897, LocalPathId: 5] 2025-06-25T14:40:45.437277Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-25T14:40:45.458873Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DELETE FROM `.metadata/_statistics` WHERE owner_id = $owner_id AND local_path_id = $local_path_id; 2025-06-25T14:40:45.462551Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:6655:4754], DatabaseId: /Root/Database, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:40:45.462650Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:6665:4759], DatabaseId: /Root/Database, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:40:45.463157Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root/Database, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:40:45.480369Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976720658:2, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:40:45.549131Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:6669:4762], DatabaseId: /Root/Database, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976720658 completed, doublechecking } 2025-06-25T14:40:45.715176Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:6765:4808] txid# 281474976720659, issues: { message: "Check failed: path: \'/Root/Database/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72075186224037897, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:40:45.775726Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 121 ], ReplyToActorId[ [2:6794:4823]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T14:40:45.776012Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 121 ] 2025-06-25T14:40:45.776066Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 121, ReplyToActorId = [2:6794:4823], StatRequests.size() = 1 2025-06-25T14:40:45.918727Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=YWNlNGFjZjQtMjI0NmQyMTgtN2Y4M2NhZGQtYzAzNTk5ODQ=, TxId: 2025-06-25T14:40:45.918831Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=YWNlNGFjZjQtMjI0NmQyMTgtN2Y4M2NhZGQtYzAzNTk5ODQ=, TxId: 2025-06-25T14:40:45.919433Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-25T14:40:45.944340Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 5] 2025-06-25T14:40:45.944420Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-25T14:40:46.487473Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 122 ], ReplyToActorId[ [2:6822:4839]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T14:40:46.487805Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 122 ] 2025-06-25T14:40:46.487845Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 122, ReplyToActorId = [2:6822:4839], StatRequests.size() = 1 2025-06-25T14:40:47.914741Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 123 ], ReplyToActorId[ [2:6859:4857]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T14:40:47.915038Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 123 ] 2025-06-25T14:40:47.915081Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 123, ReplyToActorId = [2:6859:4857], StatRequests.size() = 1 2025-06-25T14:40:48.641361Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:285: [72075186224037894] EvPropagateTimeout 2025-06-25T14:40:48.652303Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-25T14:40:48.652384Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-25T14:40:48.652424Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 3] is data table. 2025-06-25T14:40:48.652459Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:723: [72075186224037894] ScheduleNextTraversal. Skip traversal for datashard table [OwnerId: 72075186224037897, LocalPathId: 3] 2025-06-25T14:40:48.652698Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-25T14:40:48.655323Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DELETE FROM `.metadata/_statistics` WHERE owner_id = $owner_id AND local_path_id = $local_path_id; 2025-06-25T14:40:48.669034Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=ZTY2NDkzMTYtOTkxZjY0NTItN2IxZDQxNTMtMzQ0OTkwOTM=, TxId: 2025-06-25T14:40:48.669089Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=ZTY2NDkzMTYtOTkxZjY0NTItN2IxZDQxNTMtMzQ0OTkwOTM=, TxId: 2025-06-25T14:40:48.669492Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-25T14:40:48.684017Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 3] 2025-06-25T14:40:48.684078Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-25T14:40:49.323664Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 124 ], ReplyToActorId[ [2:6921:4893]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T14:40:49.323941Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 124 ] 2025-06-25T14:40:49.323982Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 124, ReplyToActorId = [2:6921:4893], StatRequests.size() = 1 2025-06-25T14:40:50.796350Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 125 ], ReplyToActorId[ [2:6960:4913]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T14:40:50.796660Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 125 ] 2025-06-25T14:40:50.796704Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 125, ReplyToActorId = [2:6960:4913], StatRequests.size() = 1 2025-06-25T14:40:51.555704Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 2, schemeshard count = 1 2025-06-25T14:40:51.556107Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-06-25T14:40:51.557072Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-25T14:40:51.568107Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-25T14:40:51.568169Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-25T14:40:51.568213Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 4] is data table. 2025-06-25T14:40:51.568250Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:723: [72075186224037894] ScheduleNextTraversal. Skip traversal for datashard table [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-25T14:40:51.568536Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-25T14:40:51.571516Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DELETE FROM `.metadata/_statistics` WHERE owner_id = $owner_id AND local_path_id = $local_path_id; 2025-06-25T14:40:51.581835Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=MmM5Y2UzLWU1M2FmZjYxLWExMGFiNTMtODM4NTdiZjM=, TxId: 2025-06-25T14:40:51.581902Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=MmM5Y2UzLWU1M2FmZjYxLWExMGFiNTMtODM4NTdiZjM=, TxId: 2025-06-25T14:40:51.582421Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-25T14:40:51.599734Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-25T14:40:51.599798Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-25T14:40:52.214222Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 126 ], ReplyToActorId[ [2:7017:4944]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T14:40:52.214430Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 126 ] 2025-06-25T14:40:52.214458Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 126, ReplyToActorId = [2:7017:4944], StatRequests.size() = 1 2025-06-25T14:40:52.214917Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 127 ], ReplyToActorId[ [2:7019:4946]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T14:40:52.242999Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 127 ] 2025-06-25T14:40:52.243070Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 127, ReplyToActorId = [2:7019:4946], StatRequests.size() = 1 >> SystemView::AuthGroups_ResultOrder [GOOD] >> SystemView::AuthGroups_TableRange >> SystemView::CollectScanQueries [GOOD] >> SystemView::AuthUsers >> LdapAuthProviderTest_LdapsScheme::LdapFetchGroupsWithDefaultGroupAttributeDisableNestedGroupsGood [GOOD] >> LdapAuthProviderTest_LdapsScheme::LdapFetchGroupsWithDefaultGroupAttributeGoodUseListOfHosts >> SystemView::Nodes [GOOD] >> SystemView::PartitionStatsFields >> TKeyValueTest::TestCopyRangeToLongKey [GOOD] >> LdapAuthProviderTest_nonSecure::LdapRefreshRemoveUserBad [GOOD] >> LdapAuthProviderTest_nonSecure::LdapRefreshGroupsInfoWithError >> LdapAuthProviderTest_LdapsScheme::LdapRefreshRemoveUserBad [GOOD] >> LdapAuthProviderTest_LdapsScheme::LdapRefreshGroupsInfoWithError >> PgCatalog::PgDatabase-useSink [GOOD] >> PgCatalog::PgRoles >> TxUsage::WriteToTopic_Demo_4_Table ------- [TM] {asan, default-linux-x86_64, release} ydb/core/keyvalue/ut/unittest >> TKeyValueTest::TestCopyRangeToLongKey [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:57:2057] recipient: [1:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:57:2057] recipient: [1:52:2097] Leader for TabletID 72057594037927937 is [1:59:2099] sender: [1:60:2057] recipient: [1:52:2097] Leader for TabletID 72057594037927937 is [1:59:2099] sender: [1:77:2057] recipient: [1:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:57:2057] recipient: [2:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:57:2057] recipient: [2:53:2097] Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:60:2057] recipient: [2:53:2097] Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:77:2057] recipient: [2:14:2061] !Reboot 72057594037927937 (actor [2:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:79:2057] recipient: [2:38:2085] Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:81:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:83:2057] recipient: [2:82:2112] Leader for TabletID 72057594037927937 is [2:84:2113] sender: [2:85:2057] recipient: [2:82:2112] !Reboot 72057594037927937 (actor [2:59:2099]) rebooted! !Reboot 72057594037927937 (actor [2:59:2099]) tablet resolver refreshed! new actor is[2:84:2113] Leader for TabletID 72057594037927937 is [2:84:2113] sender: [2:170:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:57:2057] recipient: [3:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:57:2057] recipient: [3:52:2097] Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:60:2057] recipient: [3:52:2097] Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:77:2057] recipient: [3:14:2061] !Reboot 72057594037927937 (actor [3:59:2099]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:79:2057] recipient: [3:38:2085] Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:82:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:83:2057] recipient: [3:81:2112] Leader for TabletID 72057594037927937 is [3:84:2113] sender: [3:85:2057] recipient: [3:81:2112] !Reboot 72057594037927937 (actor [3:59:2099]) rebooted! !Reboot 72057594037927937 (actor [3:59:2099]) tablet resolver refreshed! new actor is[3:84:2113] Leader for TabletID 72057594037927937 is [3:84:2113] sender: [3:170:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:57:2057] recipient: [4:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:57:2057] recipient: [4:53:2097] Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:60:2057] recipient: [4:53:2097] Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:77:2057] recipient: [4:14:2061] !Reboot 72057594037927937 (actor [4:59:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:80:2057] recipient: [4:38:2085] Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:82:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:84:2057] recipient: [4:83:2112] Leader for TabletID 72057594037927937 is [4:85:2113] sender: [4:86:2057] recipient: [4:83:2112] !Reboot 72057594037927937 (actor [4:59:2099]) rebooted! !Reboot 72057594037927937 (actor [4:59:2099]) tablet resolver refreshed! new actor is[4:85:2113] Leader for TabletID 72057594037927937 is [4:85:2113] sender: [4:171:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:57:2057] recipient: [5:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:57:2057] recipient: [5:53:2097] Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:60:2057] recipient: [5:53:2097] Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:77:2057] recipient: [5:14:2061] !Reboot 72057594037927937 (actor [5:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:83:2057] recipient: [5:38:2085] Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:86:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:87:2057] recipient: [5:85:2115] Leader for TabletID 72057594037927937 is [5:88:2116] sender: [5:89:2057] recipient: [5:85:2115] !Reboot 72057594037927937 (actor [5:59:2099]) rebooted! !Reboot 72057594037927937 (actor [5:59:2099]) tablet resolver refreshed! new actor is[5:88:2116] Leader for TabletID 72057594037927937 is [5:88:2116] sender: [5:174:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:57:2057] recipient: [6:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:57:2057] recipient: [6:52:2097] Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:60:2057] recipient: [6:52:2097] Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:77:2057] recipient: [6:14:2061] !Reboot 72057594037927937 (actor [6:59:2099]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:83:2057] recipient: [6:38:2085] Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:86:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:87:2057] recipient: [6:85:2115] Leader for TabletID 72057594037927937 is [6:88:2116] sender: [6:89:2057] recipient: [6:85:2115] !Reboot 72057594037927937 (actor [6:59:2099]) rebooted! !Reboot 72057594037927937 (actor [6:59:2099]) tablet resolver refreshed! new actor is[6:88:2116] Leader for TabletID 72057594037927937 is [6:88:2116] sender: [6:174:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:57:2057] recipient: [7:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:57:2057] recipient: [7:53:2097] Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:60:2057] recipient: [7:53:2097] Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:77:2057] recipient: [7:14:2061] !Reboot 72057594037927937 (actor [7:59:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:84:2057] recipient: [7:38:2085] Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:87:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:88:2057] recipient: [7:86:2115] Leader for TabletID 72057594037927937 is [7:89:2116] sender: [7:90:2057] recipient: [7:86:2115] !Reboot 72057594037927937 (actor [7:59:2099]) rebooted! !Reboot 72057594037927937 (actor [7:59:2099]) tablet resolver refreshed! new actor is[7:89:2116] Leader for TabletID 72057594037927937 is [7:89:2116] sender: [7:175:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:57:2057] recipient: [8:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:57:2057] recipient: [8:54:2097] Leader for TabletID 72057594037927937 is [8:59:2099] sender: [8:60:2057] recipient: [8:54:2097] Leader for TabletID 72057594037927937 is [8:59:2099] sender: [8:77:2057] recipient: [8:14:2061] !Reboot 72057594037927937 (actor [8:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [8:59:2099] sender: [8:87:2057] recipient: [8:38:2085] Leader for TabletID 72057594037927937 is [8:59:2099] sender: [8:90:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [8:59:2099] sender: [8:91:2057] recipient: [8:89:2118] Leader for TabletID 72057594037927937 is [8:92:2119] sender: [8:93:2057] recipient: [8:89:2118] !Reboot 72057594037927937 (actor [8:59:2099]) rebooted! !Reboot 72057594037927937 (actor [8:59:2099]) tablet resolver refreshed! new actor is[8:92:2119] Leader for TabletID 72057594037927937 is [8:92:2119] sender: [8:178:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:57:2057] recipient: [9:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:57:2057] recipient: [9:53:2097] Leader for TabletID 72057594037927937 is [9:59:2099] sender: [9:60:2057] recipient: [9:53:2097] Leader for TabletID 72057594037927937 is [9:59:2099] sender: [9:77:2057] recipient: [9:14:2061] !Reboot 72057594037927937 (actor [9:59:2099]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [9:59:2099] sender: [9:87:2057] recipient: [9:38:2085] Leader for TabletID 72057594037927937 is [9:59:2099] sender: [9:90:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [9:59:2099] sender: [9:91:2057] recipient: [9:89:2118] Leader for TabletID 72057594037927937 is [9:92:2119] sender: [9:93:2057] recipient: [9:89:2118] !Reboot 72057594037927937 (actor [9:59:2099]) rebooted! !Reboot 72057594037927937 (actor [9:59:2099]) tablet resolver refreshed! new actor is[9:92:2119] Leader for TabletID 72057594037927937 is [9:92:2119] sender: [9:178:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:57:2057] recipient: [10:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:57:2057] recipient: [10:54:2097] Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:60:2057] recipient: [10:54:2097] Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:77:2057] recipient: [10:14:2061] !Reboot 72057594037927937 (actor [10:59:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:88:2057] recipient: [10:38:2085] Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:91:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:92:2057] recipient: [10:90:2118] Leader for TabletID 72057594037927937 is [10:93:2119] sender: [10:94:2057] recipient: [10:90:2118] !Reboot 72057594037927937 (actor [10:59:2099]) rebooted! !Reboot 72057594037927937 (actor [10:59:2099]) tablet resolver refreshed! new actor is[10:93:2119] Leader for TabletID 72057594037927937 is [10:93:2119] sender: [10:179:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:57:2057] recipient: [11:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:57:2057] recipient: [11:53:2097] Leader for TabletID 72057594037927937 is [11:59:2099] sender: [11:60:2057] recipient: [11:53:2097] Leader for TabletID 72057594037927937 is [11:59:2099] sender: [11:77:2057] recipient: [11:14:2061] !Reboot 72057594037927937 (actor [11:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [11:59:2099] sender: [11:91:2057] recipient: [11:38:2085] Leader for TabletID 72057594037927937 is [11:59:2099] sender: [11:94:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [11:59:2099] sender: [11:95:2057] recipient: [11:93:2121] Leader for TabletID 72057594037927937 is [11:96:2122] sender: [11:97:2057] recipient: [11:93:2121] !Reboot 72057594037927937 (actor [11:59:2099]) rebooted! !Reboot 72057594037927937 (actor [11:59:2099]) tablet resolver refreshed! new actor is[11:96:2122] Leader for TabletID 72057594037927937 is [11:96:2122] sender: [11:182:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:57:2057] recipient: [12:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:57:2057] recipient: [12:54:2097] Leader for TabletID 72057594037927937 is [12:59:2099] sender: [12:60:2057] recipient: [12:54:2097] Leader for TabletID 72057594037927937 is [12:59:2099] sender: [12:77:2057] recipient: [12:14:2061] !Reboot 72057594037927937 (acto ... 7 is [27:59:2099] sender: [27:92:2057] recipient: [27:38:2085] Leader for TabletID 72057594037927937 is [27:59:2099] sender: [27:94:2057] recipient: [27:14:2061] Leader for TabletID 72057594037927937 is [27:59:2099] sender: [27:96:2057] recipient: [27:95:2121] Leader for TabletID 72057594037927937 is [27:97:2122] sender: [27:98:2057] recipient: [27:95:2121] !Reboot 72057594037927937 (actor [27:59:2099]) rebooted! !Reboot 72057594037927937 (actor [27:59:2099]) tablet resolver refreshed! new actor is[27:97:2122] Leader for TabletID 72057594037927937 is [0:0:0] sender: [28:57:2057] recipient: [28:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [28:57:2057] recipient: [28:53:2097] Leader for TabletID 72057594037927937 is [28:59:2099] sender: [28:60:2057] recipient: [28:53:2097] Leader for TabletID 72057594037927937 is [28:59:2099] sender: [28:77:2057] recipient: [28:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [29:57:2057] recipient: [29:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [29:57:2057] recipient: [29:54:2097] Leader for TabletID 72057594037927937 is [29:59:2099] sender: [29:60:2057] recipient: [29:54:2097] Leader for TabletID 72057594037927937 is [29:59:2099] sender: [29:77:2057] recipient: [29:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [30:57:2057] recipient: [30:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [30:57:2057] recipient: [30:52:2097] Leader for TabletID 72057594037927937 is [30:59:2099] sender: [30:60:2057] recipient: [30:52:2097] Leader for TabletID 72057594037927937 is [30:59:2099] sender: [30:77:2057] recipient: [30:14:2061] !Reboot 72057594037927937 (actor [30:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [30:59:2099] sender: [30:79:2057] recipient: [30:38:2085] Leader for TabletID 72057594037927937 is [30:59:2099] sender: [30:82:2057] recipient: [30:14:2061] Leader for TabletID 72057594037927937 is [30:59:2099] sender: [30:83:2057] recipient: [30:81:2112] Leader for TabletID 72057594037927937 is [30:84:2113] sender: [30:85:2057] recipient: [30:81:2112] !Reboot 72057594037927937 (actor [30:59:2099]) rebooted! !Reboot 72057594037927937 (actor [30:59:2099]) tablet resolver refreshed! new actor is[30:84:2113] Leader for TabletID 72057594037927937 is [30:84:2113] sender: [30:170:2057] recipient: [30:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [31:57:2057] recipient: [31:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [31:57:2057] recipient: [31:53:2097] Leader for TabletID 72057594037927937 is [31:59:2099] sender: [31:60:2057] recipient: [31:53:2097] Leader for TabletID 72057594037927937 is [31:59:2099] sender: [31:77:2057] recipient: [31:14:2061] !Reboot 72057594037927937 (actor [31:59:2099]) on event NKikimr::TEvKeyValue::TEvAcquireLock ! Leader for TabletID 72057594037927937 is [31:59:2099] sender: [31:79:2057] recipient: [31:38:2085] Leader for TabletID 72057594037927937 is [31:59:2099] sender: [31:82:2057] recipient: [31:14:2061] Leader for TabletID 72057594037927937 is [31:59:2099] sender: [31:83:2057] recipient: [31:81:2112] Leader for TabletID 72057594037927937 is [31:84:2113] sender: [31:85:2057] recipient: [31:81:2112] !Reboot 72057594037927937 (actor [31:59:2099]) rebooted! !Reboot 72057594037927937 (actor [31:59:2099]) tablet resolver refreshed! new actor is[31:84:2113] Leader for TabletID 72057594037927937 is [31:84:2113] sender: [31:170:2057] recipient: [31:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [32:57:2057] recipient: [32:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [32:57:2057] recipient: [32:53:2097] Leader for TabletID 72057594037927937 is [32:59:2099] sender: [32:60:2057] recipient: [32:53:2097] Leader for TabletID 72057594037927937 is [32:59:2099] sender: [32:77:2057] recipient: [32:14:2061] !Reboot 72057594037927937 (actor [32:59:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [32:59:2099] sender: [32:80:2057] recipient: [32:38:2085] Leader for TabletID 72057594037927937 is [32:59:2099] sender: [32:82:2057] recipient: [32:14:2061] Leader for TabletID 72057594037927937 is [32:59:2099] sender: [32:84:2057] recipient: [32:83:2112] Leader for TabletID 72057594037927937 is [32:85:2113] sender: [32:86:2057] recipient: [32:83:2112] !Reboot 72057594037927937 (actor [32:59:2099]) rebooted! !Reboot 72057594037927937 (actor [32:59:2099]) tablet resolver refreshed! new actor is[32:85:2113] Leader for TabletID 72057594037927937 is [32:85:2113] sender: [32:171:2057] recipient: [32:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [33:57:2057] recipient: [33:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [33:57:2057] recipient: [33:53:2097] Leader for TabletID 72057594037927937 is [33:59:2099] sender: [33:60:2057] recipient: [33:53:2097] Leader for TabletID 72057594037927937 is [33:59:2099] sender: [33:77:2057] recipient: [33:14:2061] !Reboot 72057594037927937 (actor [33:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [33:59:2099] sender: [33:83:2057] recipient: [33:38:2085] Leader for TabletID 72057594037927937 is [33:59:2099] sender: [33:86:2057] recipient: [33:14:2061] Leader for TabletID 72057594037927937 is [33:59:2099] sender: [33:87:2057] recipient: [33:85:2115] Leader for TabletID 72057594037927937 is [33:88:2116] sender: [33:89:2057] recipient: [33:85:2115] !Reboot 72057594037927937 (actor [33:59:2099]) rebooted! !Reboot 72057594037927937 (actor [33:59:2099]) tablet resolver refreshed! new actor is[33:88:2116] Leader for TabletID 72057594037927937 is [33:88:2116] sender: [33:174:2057] recipient: [33:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [34:57:2057] recipient: [34:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [34:57:2057] recipient: [34:53:2097] Leader for TabletID 72057594037927937 is [34:59:2099] sender: [34:60:2057] recipient: [34:53:2097] Leader for TabletID 72057594037927937 is [34:59:2099] sender: [34:77:2057] recipient: [34:14:2061] !Reboot 72057594037927937 (actor [34:59:2099]) on event NKikimr::TEvKeyValue::TEvExecuteTransaction ! Leader for TabletID 72057594037927937 is [34:59:2099] sender: [34:83:2057] recipient: [34:38:2085] Leader for TabletID 72057594037927937 is [34:59:2099] sender: [34:86:2057] recipient: [34:14:2061] Leader for TabletID 72057594037927937 is [34:59:2099] sender: [34:87:2057] recipient: [34:85:2115] Leader for TabletID 72057594037927937 is [34:88:2116] sender: [34:89:2057] recipient: [34:85:2115] !Reboot 72057594037927937 (actor [34:59:2099]) rebooted! !Reboot 72057594037927937 (actor [34:59:2099]) tablet resolver refreshed! new actor is[34:88:2116] Leader for TabletID 72057594037927937 is [34:88:2116] sender: [34:174:2057] recipient: [34:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [35:57:2057] recipient: [35:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [35:57:2057] recipient: [35:53:2097] Leader for TabletID 72057594037927937 is [35:59:2099] sender: [35:60:2057] recipient: [35:53:2097] Leader for TabletID 72057594037927937 is [35:59:2099] sender: [35:77:2057] recipient: [35:14:2061] !Reboot 72057594037927937 (actor [35:59:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [35:59:2099] sender: [35:84:2057] recipient: [35:38:2085] Leader for TabletID 72057594037927937 is [35:59:2099] sender: [35:87:2057] recipient: [35:14:2061] Leader for TabletID 72057594037927937 is [35:59:2099] sender: [35:88:2057] recipient: [35:86:2115] Leader for TabletID 72057594037927937 is [35:89:2116] sender: [35:90:2057] recipient: [35:86:2115] !Reboot 72057594037927937 (actor [35:59:2099]) rebooted! !Reboot 72057594037927937 (actor [35:59:2099]) tablet resolver refreshed! new actor is[35:89:2116] Leader for TabletID 72057594037927937 is [35:89:2116] sender: [35:175:2057] recipient: [35:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [36:57:2057] recipient: [36:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [36:57:2057] recipient: [36:53:2097] Leader for TabletID 72057594037927937 is [36:59:2099] sender: [36:60:2057] recipient: [36:53:2097] Leader for TabletID 72057594037927937 is [36:59:2099] sender: [36:77:2057] recipient: [36:14:2061] !Reboot 72057594037927937 (actor [36:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [36:59:2099] sender: [36:87:2057] recipient: [36:38:2085] Leader for TabletID 72057594037927937 is [36:59:2099] sender: [36:90:2057] recipient: [36:14:2061] Leader for TabletID 72057594037927937 is [36:59:2099] sender: [36:91:2057] recipient: [36:89:2118] Leader for TabletID 72057594037927937 is [36:92:2119] sender: [36:93:2057] recipient: [36:89:2118] !Reboot 72057594037927937 (actor [36:59:2099]) rebooted! !Reboot 72057594037927937 (actor [36:59:2099]) tablet resolver refreshed! new actor is[36:92:2119] Leader for TabletID 72057594037927937 is [36:92:2119] sender: [36:178:2057] recipient: [36:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [37:57:2057] recipient: [37:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [37:57:2057] recipient: [37:54:2097] Leader for TabletID 72057594037927937 is [37:59:2099] sender: [37:60:2057] recipient: [37:54:2097] Leader for TabletID 72057594037927937 is [37:59:2099] sender: [37:77:2057] recipient: [37:14:2061] !Reboot 72057594037927937 (actor [37:59:2099]) on event NKikimr::TEvKeyValue::TEvExecuteTransaction ! Leader for TabletID 72057594037927937 is [37:59:2099] sender: [37:87:2057] recipient: [37:38:2085] Leader for TabletID 72057594037927937 is [37:59:2099] sender: [37:90:2057] recipient: [37:14:2061] Leader for TabletID 72057594037927937 is [37:59:2099] sender: [37:91:2057] recipient: [37:89:2118] Leader for TabletID 72057594037927937 is [37:92:2119] sender: [37:93:2057] recipient: [37:89:2118] !Reboot 72057594037927937 (actor [37:59:2099]) rebooted! !Reboot 72057594037927937 (actor [37:59:2099]) tablet resolver refreshed! new actor is[37:92:2119] Leader for TabletID 72057594037927937 is [37:92:2119] sender: [37:178:2057] recipient: [37:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [38:57:2057] recipient: [38:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [38:57:2057] recipient: [38:53:2097] Leader for TabletID 72057594037927937 is [38:59:2099] sender: [38:60:2057] recipient: [38:53:2097] Leader for TabletID 72057594037927937 is [38:59:2099] sender: [38:77:2057] recipient: [38:14:2061] !Reboot 72057594037927937 (actor [38:59:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [38:59:2099] sender: [38:88:2057] recipient: [38:38:2085] Leader for TabletID 72057594037927937 is [38:59:2099] sender: [38:91:2057] recipient: [38:14:2061] Leader for TabletID 72057594037927937 is [38:59:2099] sender: [38:92:2057] recipient: [38:90:2118] Leader for TabletID 72057594037927937 is [38:93:2119] sender: [38:94:2057] recipient: [38:90:2118] !Reboot 72057594037927937 (actor [38:59:2099]) rebooted! !Reboot 72057594037927937 (actor [38:59:2099]) tablet resolver refreshed! new actor is[38:93:2119] Leader for TabletID 72057594037927937 is [38:93:2119] sender: [38:179:2057] recipient: [38:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [39:57:2057] recipient: [39:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [39:57:2057] recipient: [39:53:2097] Leader for TabletID 72057594037927937 is [39:59:2099] sender: [39:60:2057] recipient: [39:53:2097] Leader for TabletID 72057594037927937 is [39:59:2099] sender: [39:77:2057] recipient: [39:14:2061] >> LdapAuthProviderTest_nonSecure::LdapFetchGroupsWithRemovedUserCredentialsBad [GOOD] >> LdapAuthProviderTest_nonSecure::LdapRefreshGroupsInfoGood >> TxUsage::WriteToTopic_Invalid_Session_Table >> LdapAuthProviderTest_LdapsScheme::LdapFetchGroupsWithDefaultGroupAttributeGoodUseListOfHosts [GOOD] >> LdapAuthProviderTest_LdapsScheme::LdapFetchGroupsWithCustomGroupAttributeGood >> LdapAuthProviderTest_StartTls::LdapFetchGroupsWithDontExistGroupAttribute [GOOD] >> LdapAuthProviderTest_StartTls::LdapFetchGroupsWithInvalidRobotUserLoginBad >> KqpPg::TableDeleteWhere+useSink [GOOD] >> KqpPg::TableDeleteWhere-useSink >> LdapAuthProviderTest_StartTls::LdapRefreshGroupsInfoGood [GOOD] >> LdapAuthProviderTest_StartTls::LdapRefreshGroupsInfoDisableNestedGroupsGood >> SystemView::TopPartitionsByCpuFields [GOOD] >> SystemView::TopPartitionsByCpuFollowers >> SystemView::PgTablesOneSchemeShardDataQuery [GOOD] >> SystemView::QueryStats >> SystemView::TopPartitionsByCpuTables [GOOD] >> SystemView::TopPartitionsByCpuRanges >> BasicStatistics::Simple [GOOD] >> LdapAuthProviderTest_LdapsScheme::LdapFetchGroupsWithCustomGroupAttributeGood [GOOD] >> LdapAuthProviderTest_LdapsScheme::LdapFetchGroupsUseInvalidSearchFilterBad >> LdapAuthProviderTest_StartTls::LdapFetchGroupsWithInvalidRobotUserLoginBad [GOOD] >> LdapAuthProviderTest_StartTls::LdapFetchGroupsWithInvalidRobotUserPasswordBad >> TxUsage::TwoSessionOneConsumer_Table [GOOD] >> TKeyValueTest::TestConcatToLongKey [GOOD] |83.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_olap_reboots/ydb-core-tx-schemeshard-ut_olap_reboots |83.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_olap_reboots/ydb-core-tx-schemeshard-ut_olap_reboots >> TxUsage::TwoSessionOneConsumer_Query ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest >> BasicStatistics::Simple [GOOD] Test command err: 2025-06-25T14:38:44.115920Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:419:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:38:44.116192Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:38:44.116277Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001c9a/r3tmp/tmpcxY4om/pdisk_1.dat 2025-06-25T14:38:44.402300Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 10876, node 1 2025-06-25T14:38:44.617566Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:38:44.617617Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:38:44.617658Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:38:44.618219Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:38:44.620331Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:38:44.716073Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:44.716211Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:44.730917Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:16587 2025-06-25T14:38:45.258054Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-25T14:38:48.195304Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-25T14:38:48.227657Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:48.227753Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:48.265541Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T14:38:48.267665Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:38:48.483553Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:38:48.517915Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:48.518349Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:48.518855Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:48.518975Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:48.519164Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:48.519277Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:48.519346Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:48.519435Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:48.519505Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:48.801861Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:48.801978Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:48.845407Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:38:48.982041Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:38:49.026732Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-25T14:38:49.026832Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-25T14:38:49.060029Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-25T14:38:49.060218Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-25T14:38:49.060443Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-25T14:38:49.060502Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-25T14:38:49.060577Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-25T14:38:49.060623Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-25T14:38:49.060669Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-25T14:38:49.060716Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-25T14:38:49.061175Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-25T14:38:49.088753Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7949: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-25T14:38:49.088865Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7979: ConnectToSA(), pipe client id: [2:1793:2562], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-25T14:38:49.098129Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1808:2573] 2025-06-25T14:38:49.105144Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1849:2589] 2025-06-25T14:38:49.105318Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1849:2589], schemeshard id = 72075186224037897 2025-06-25T14:38:49.111325Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-25T14:38:49.126868Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-25T14:38:49.126933Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-25T14:38:49.127015Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-25T14:38:49.137854Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:49.145741Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-25T14:38:49.145861Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-25T14:38:49.376173Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-25T14:38:49.512085Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-25T14:38:49.572210Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-25T14:38:50.136244Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:38:50.377490Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2153:3026], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:50.377615Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:50.393146Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:50.697558Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2288:3059], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:50.697735Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:50.699020Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [1:2293:3063]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T14:38:50.699165Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-25T14:38:50.699226Z node 1 :STATISTICS DEBUG: service_impl.cpp:1219: ConnectToSA(), pipe client id = [1:2295:3065] 2025-06-25T14:38:50.699282Z no ... CS DEBUG: schemeshard_impl.cpp:7919: Schedule next SendBaseStatsToSA in 30.000000s, at schemeshard: 72057594046644480 2025-06-25T14:40:54.022145Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 119 ], ReplyToActorId[ [2:6412:4622]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T14:40:54.022409Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 119 ] 2025-06-25T14:40:54.022451Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 119, ReplyToActorId = [2:6412:4622], StatRequests.size() = 1 2025-06-25T14:40:54.829045Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 2, schemeshard count = 1 2025-06-25T14:40:54.829371Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-06-25T14:40:54.829581Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-25T14:40:54.862802Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:8076: SendBaseStatsToSA(), path count: 2, at schemeshard: 72075186224037897 2025-06-25T14:40:54.862873Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7919: Schedule next SendBaseStatsToSA in 212.000000s, at schemeshard: 72075186224037897 2025-06-25T14:40:54.863168Z node 2 :STATISTICS DEBUG: tx_schemeshard_stats.cpp:21: [72075186224037894] TTxSchemeShardStats::Execute: schemeshard id# 72075186224037897, stats size# 49 2025-06-25T14:40:54.880965Z node 2 :STATISTICS DEBUG: tx_schemeshard_stats.cpp:132: [72075186224037894] TTxSchemeShardStats::Complete 2025-06-25T14:40:55.502295Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 120 ], ReplyToActorId[ [2:6445:4638]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T14:40:55.502534Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 120 ] 2025-06-25T14:40:55.502570Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 120, ReplyToActorId = [2:6445:4638], StatRequests.size() = 1 2025-06-25T14:40:56.288890Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-25T14:40:56.288973Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-25T14:40:56.289029Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 3] is data table. 2025-06-25T14:40:56.289070Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:723: [72075186224037894] ScheduleNextTraversal. Skip traversal for datashard table [OwnerId: 72075186224037897, LocalPathId: 3] 2025-06-25T14:40:56.289443Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-25T14:40:56.315909Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DELETE FROM `.metadata/_statistics` WHERE owner_id = $owner_id AND local_path_id = $local_path_id; 2025-06-25T14:40:56.327751Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:6468:4657], DatabaseId: /Root/Database, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:40:56.327880Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:6478:4662], DatabaseId: /Root/Database, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:40:56.328034Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root/Database, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:40:56.345405Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976720658:2, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:40:56.409689Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:6482:4665], DatabaseId: /Root/Database, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976720658 completed, doublechecking } 2025-06-25T14:40:56.557311Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:6577:4713] txid# 281474976720659, issues: { message: "Check failed: path: \'/Root/Database/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72075186224037897, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:40:56.608275Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 121 ], ReplyToActorId[ [2:6606:4728]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T14:40:56.608648Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 121 ] 2025-06-25T14:40:56.608724Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 121, ReplyToActorId = [2:6606:4728], StatRequests.size() = 1 2025-06-25T14:40:56.740561Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=ZGViNTFiYWEtY2JhNTg3YzUtNmQ3NTA2ZDktOTk4YWJkYjA=, TxId: 2025-06-25T14:40:56.740638Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=ZGViNTFiYWEtY2JhNTg3YzUtNmQ3NTA2ZDktOTk4YWJkYjA=, TxId: 2025-06-25T14:40:56.741158Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-25T14:40:56.777454Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 3] 2025-06-25T14:40:56.777524Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-25T14:40:57.358356Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 122 ], ReplyToActorId[ [2:6637:4744]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T14:40:57.358690Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 122 ] 2025-06-25T14:40:57.358742Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 122, ReplyToActorId = [2:6637:4744], StatRequests.size() = 1 2025-06-25T14:40:58.855336Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 123 ], ReplyToActorId[ [2:6672:4762]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T14:40:58.855611Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 123 ] 2025-06-25T14:40:58.855654Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 123, ReplyToActorId = [2:6672:4762], StatRequests.size() = 1 2025-06-25T14:40:59.625062Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:285: [72075186224037894] EvPropagateTimeout 2025-06-25T14:40:59.635957Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-25T14:40:59.636017Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-25T14:40:59.636078Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 4] is data table. 2025-06-25T14:40:59.636115Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:723: [72075186224037894] ScheduleNextTraversal. Skip traversal for datashard table [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-25T14:40:59.636996Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-25T14:40:59.640415Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DELETE FROM `.metadata/_statistics` WHERE owner_id = $owner_id AND local_path_id = $local_path_id; 2025-06-25T14:40:59.652137Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=YWMwMTIyMzMtMWJmNjE3Zi04MTE4NDg2Ni00MzIyN2EyNg==, TxId: 2025-06-25T14:40:59.652202Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=YWMwMTIyMzMtMWJmNjE3Zi04MTE4NDg2Ni00MzIyN2EyNg==, TxId: 2025-06-25T14:40:59.652704Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-25T14:40:59.670374Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-25T14:40:59.670438Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-25T14:41:00.341789Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 124 ], ReplyToActorId[ [2:6736:4798]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T14:41:00.342136Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 124 ] 2025-06-25T14:41:00.342201Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 124, ReplyToActorId = [2:6736:4798], StatRequests.size() = 1 2025-06-25T14:41:01.870629Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 125 ], ReplyToActorId[ [2:6775:4818]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T14:41:01.870925Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 125 ] 2025-06-25T14:41:01.870972Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 125, ReplyToActorId = [2:6775:4818], StatRequests.size() = 1 2025-06-25T14:41:02.655673Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 2, schemeshard count = 1 2025-06-25T14:41:02.656087Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-06-25T14:41:02.656451Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-25T14:41:02.669207Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-25T14:41:02.669284Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-25T14:41:03.314450Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 126 ], ReplyToActorId[ [2:6808:4834]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T14:41:03.314714Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 126 ] 2025-06-25T14:41:03.314750Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 126, ReplyToActorId = [2:6808:4834], StatRequests.size() = 1 >> BasicStatistics::TwoNodes [GOOD] >> SystemView::PartitionStatsFields [GOOD] >> SystemView::ConcurrentScans |83.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_filestore_reboots/ydb-core-tx-schemeshard-ut_filestore_reboots |83.3%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_olap_reboots/ydb-core-tx-schemeshard-ut_olap_reboots >> PgCatalog::PgRoles [GOOD] >> LdapAuthProviderTest_nonSecure::LdapRefreshGroupsInfoWithError [GOOD] |83.3%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_filestore_reboots/ydb-core-tx-schemeshard-ut_filestore_reboots |83.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_filestore_reboots/ydb-core-tx-schemeshard-ut_filestore_reboots |83.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_backup_collection_reboots/tx-schemeshard-ut_backup_collection_reboots |83.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_backup_collection_reboots/tx-schemeshard-ut_backup_collection_reboots |83.3%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_backup_collection_reboots/tx-schemeshard-ut_backup_collection_reboots >> TxUsage::WriteToTopic_Demo_20_RestartNo_Table [GOOD] >> LdapAuthProviderTest_LdapsScheme::LdapRefreshGroupsInfoWithError [GOOD] >> TxUsage::Sinks_Oltp_WriteToTopic_2_Table [GOOD] >> TxUsage::WriteToTopic_Demo_1_Table >> TxUsage::Sinks_Oltp_WriteToTopic_1_Table [GOOD] >> LdapAuthProviderTest_StartTls::LdapFetchGroupsWithInvalidRobotUserPasswordBad [GOOD] >> LdapAuthProviderTest_LdapsScheme::LdapFetchGroupsUseInvalidSearchFilterBad [GOOD] >> LdapAuthProviderTest_nonSecure::LdapRefreshGroupsInfoGood [GOOD] >> PgCatalog::PgTables >> LdapAuthProviderTest_StartTls::LdapFetchGroupsFromAdLdapServer >> TxUsage::Sinks_Oltp_WriteToTopic_1_Query >> LdapAuthProviderTest_nonSecure::LdapRefreshGroupsInfoDisableNestedGroupsGood >> TLdapUtilsSearchFilterCreatorTest::GetDefaultFilter [GOOD] >> TxUsage::WriteToTopic_Demo_21_RestartNo_Table >> TxUsage::Sinks_Oltp_WriteToTopic_2_Query |83.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_subdomain_reboots/ydb-core-tx-schemeshard-ut_subdomain_reboots |83.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_incremental_restore/ydb-core-tx-schemeshard-ut_incremental_restore |83.3%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_subdomain_reboots/ydb-core-tx-schemeshard-ut_subdomain_reboots |83.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_subdomain_reboots/ydb-core-tx-schemeshard-ut_subdomain_reboots |83.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_incremental_restore/ydb-core-tx-schemeshard-ut_incremental_restore |83.4%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_incremental_restore/ydb-core-tx-schemeshard-ut_incremental_restore >> SystemView::AuthUsers [GOOD] >> LdapAuthProviderTest_StartTls::LdapFetchGroupsFromAdLdapServer [GOOD] >> SystemView::AuthUsers_LockUnlock >> BasicStatistics::SimpleGlobalIndex [GOOD] >> SystemView::ShowCreateTableColumnUpsertIndex [GOOD] >> ShowCreateView::WithSingleQuotedTablePathPrefix [GOOD] >> unstable_connection.py::TestUnstableConnection::test [GOOD] >> TxUsage::WriteToTopic_Invalid_Session_Table [GOOD] >> TxUsage::WriteToTopic_Demo_3_Table [GOOD] >> SystemView::AuthGroups_TableRange [GOOD] >> SystemView::ConcurrentScans [GOOD] >> SystemView::ShowCreateTablePartitionAtKeys [GOOD] >> TKeyValueTest::TestInlineCopyRangeWorks [GOOD] >> LdapAuthProviderTest_StartTls::LdapRefreshGroupsInfoDisableNestedGroupsGood [GOOD] >> LdapAuthProviderTest_StartTls::LdapRefreshRemoveUserBad >> TLdapUtilsSearchFilterCreatorTest::GetFilterWithOneLoginPlaceholder [GOOD] >> SystemView::ShowCreateTableColumnAlterObject >> ShowCreateView::WithTwoTablePathPrefixes >> TxUsage::WriteToTopic_Demo_3_Query >> SystemView::AuthOwners >> SystemView::PDisksFields >> SystemView::ShowCreateTablePartitionByHash >> TKeyValueTest::TestInlineCopyRangeWorksNewApi >> TxUsage::WriteToTopic_Invalid_Session_Query >> LdapAuthProviderTest_StartTls::LdapFetchGroupsDisableRequestToAD >> TLdapUtilsSearchFilterCreatorTest::GetFilterWithSearchAttribute [GOOD] >> TLdapUtilsSearchFilterCreatorTest::GetFilterWithFewLoginPlaceholders [GOOD] >> TxUsage::WriteToTopic_Demo_2_Table [GOOD] |83.3%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_vector_index_build_reboots/tx-schemeshard-ut_vector_index_build_reboots |83.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_vector_index_build_reboots/tx-schemeshard-ut_vector_index_build_reboots |83.4%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_vector_index_build_reboots/tx-schemeshard-ut_vector_index_build_reboots |83.4%| [TA] {RESULT} $(B)/ydb/core/kqp/ut/runtime/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/security/ldap_auth_provider/ut/unittest >> LdapAuthProviderTest_LdapsScheme::LdapFetchGroupsUseInvalidSearchFilterBad [GOOD] Test command err: 2025-06-25T14:40:45.083981Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519896945318359599:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:40:45.084058Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000f55/r3tmp/tmpGaMxP5/pdisk_1.dat 2025-06-25T14:40:45.444439Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 15502, node 1 2025-06-25T14:40:45.466533Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:40:45.466600Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:40:45.468420Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:40:45.595301Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:40:45.595322Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:40:45.595333Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:40:45.595444Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:40:45.869601Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-25T14:40:45.872831Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-25T14:40:45.872864Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-25T14:40:45.874042Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldaps, uris: ldaps://localhost:16672, port: 16672 2025-06-25T14:40:45.874114Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-25T14:40:45.936170Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-25T14:40:45.987482Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:357: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (member:1.2.840.113556.1.4.1941:=uid=ldapuser,dc=search,dc=yandex,dc=net), attributes: 1.1 2025-06-25T14:40:46.035137Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****TylA (61AE3F9D) () has now valid token of ldapuser@ldap 2025-06-25T14:40:46.090696Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:40:48.177292Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519896958151914540:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:40:48.298102Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000f55/r3tmp/tmp89xA01/pdisk_1.dat 2025-06-25T14:40:48.428403Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:40:48.430819Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:40:48.430907Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:40:48.432520Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519896958151914504:2080] 1750862448137901 != 1750862448137904 2025-06-25T14:40:48.441399Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 62707, node 2 2025-06-25T14:40:48.573670Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:40:48.573689Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:40:48.573694Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:40:48.573791Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:40:48.662937Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-25T14:40:48.665460Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-25T14:40:48.665486Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-25T14:40:48.666153Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldaps, uris: ldaps://localhost:23680, port: 23680 2025-06-25T14:40:48.666242Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-25T14:40:48.716825Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-25T14:40:48.760627Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:357: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (member:1.2.840.113556.1.4.1941:=uid=ldapuser,dc=search,dc=yandex,dc=net), attributes: 1.1 2025-06-25T14:40:48.761467Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:382: Try to get nested groups - tree traversal 2025-06-25T14:40:48.761527Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managerOfProject1,cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=project1,cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-25T14:40:48.808667Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-25T14:40:48.860668Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-25T14:40:48.861677Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****Biqw (3DF2D338) () has now valid token of ldapuser@ldap 2025-06-25T14:40:51.756136Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519896967496923576:2086];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:40:51.756232Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000f55/r3tmp/tmpXHKL0n/pdisk_1.dat 2025-06-25T14:40:51.880282Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 7073, node 3 2025-06-25T14:40:51.900112Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:40:51.900197Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:40:51.910493Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:40:51.957874Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:40:51.958142Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:40:51.958150Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:40:51.958329Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:40:52.099334Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-25T14:40:52.102428Z node 3 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-25T14:40:52.102461Z node 3 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-25T14:40:52.103100Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldaps, uris: ldaps://localhost:21768, port: 21768 2025-06-25T14:40:52.103163Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-25T14:40:52.161185Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-25T14:40:52.209151Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****DZYA (E11926F4) () has now valid token of ldapuser@ldap 2025-06-25T14:40:55.428480Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519896984491441390:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:40:55.428564Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000f55/r3tmp/tmpbj4Mu6/pdisk_1.dat 2025-06-25T14:40:55.590590Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7519896984491441368:2080] 1750862455427362 != 1750862455427365 2025-06-25T14:40:55.600158Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:40:55.602741Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:40:55.602804Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:40:55.608721Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 29653, node 4 2025-06-25T14:40:55.740964Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:40:55.740987Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:40:55.740996Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:40:55.741142Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:40:55.900441Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-25T14:40:55.903097Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-25T14:40:55.903123Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-25T14:40:55.903854Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldaps, uris: ldaps://qqq:5903 ldaps://localhost:5903 ldaps://localhost:11111, port: 5903 2025-06-25T14:40:55.903943Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-25T14:40:55.989069Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-25T14:40:56.044635Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:357: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (member:1.2.840.113556.1.4.1941:=uid=ldapuser,dc=search,dc=yandex,dc=net), attributes: 1.1 2025-06-25T14:40:56.045635Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:382: Try to get nested groups - tree traversal 2025-06-25T14:40:56.045685Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managerOfProject1,cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=project1,cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-25T14:40:56.092787Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-25T14:40:56.140815Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-25T14:40:56.142139Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****mA6Q (A3E1271C) () has now valid token of ldapuser@ldap 2025-06-25T14:41:00.568138Z node 5 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[5:7519897007057298011:2145];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:41:00.568191Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000f55/r3tmp/tmpeSQQpU/pdisk_1.dat 2025-06-25T14:41:00.805179Z node 5 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:41:00.807526Z node 5 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [5:7519897007057297904:2080] 1750862460562107 != 1750862460562110 2025-06-25T14:41:00.821786Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:41:00.821879Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:41:00.825714Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 20780, node 5 2025-06-25T14:41:00.953049Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:41:00.953072Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:41:00.953080Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:41:00.953211Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:41:01.080852Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-25T14:41:01.082247Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-25T14:41:01.082275Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-25T14:41:01.083038Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldaps, uris: ldaps://localhost:31552, port: 31552 2025-06-25T14:41:01.083120Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-25T14:41:01.144730Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: groupDN 2025-06-25T14:41:01.196547Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:357: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (member:1.2.840.113556.1.4.1941:=uid=ldapuser,dc=search,dc=yandex,dc=net), attributes: 1.1 2025-06-25T14:41:01.197161Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:382: Try to get nested groups - tree traversal 2025-06-25T14:41:01.197224Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managerOfProject1,cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=project1,cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: groupDN 2025-06-25T14:41:01.244602Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: groupDN 2025-06-25T14:41:01.289055Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: groupDN 2025-06-25T14:41:01.290643Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****P9hw (06BA0298) () has now valid token of ldapuser@ldap 2025-06-25T14:41:05.761847Z node 6 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[6:7519897027584670357:2136];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:41:05.762005Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000f55/r3tmp/tmpkw2qfS/pdisk_1.dat 2025-06-25T14:41:06.355984Z node 6 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:41:06.369877Z node 6 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [6:7519897027584670251:2080] 1750862465724809 != 1750862465724812 2025-06-25T14:41:06.398782Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:41:06.398879Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:41:06.402672Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 26865, node 6 2025-06-25T14:41:06.620999Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:41:06.621027Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:41:06.621036Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:41:06.621184Z node 6 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:41:06.824453Z node 6 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:41:07.333428Z node 6 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-25T14:41:07.346986Z node 6 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-25T14:41:07.347025Z node 6 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-25T14:41:07.347816Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldaps, uris: ldaps://localhost:7098, port: 7098 2025-06-25T14:41:07.347890Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-25T14:41:07.432877Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: &(uid=ldapuser)(), attributes: memberOf 2025-06-25T14:41:07.432999Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:323: Could not perform search for filter &(uid=ldapuser)() on server ldaps://localhost:7098. Bad search filter 2025-06-25T14:41:07.433619Z node 6 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1809: Ticket eyJh****mwdg (9466F4CD) () has now permanent error message 'Could not login via LDAP (Could not perform search for filter &(uid=ldapuser)() on server ldaps://localhost:7098. Bad search filter)' ------- [TM] {asan, default-linux-x86_64, release} ydb/core/security/ldap_auth_provider/ut/unittest >> LdapAuthProviderTest_StartTls::LdapFetchGroupsWithInvalidRobotUserPasswordBad [GOOD] Test command err: 2025-06-25T14:40:45.083943Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519896941590091727:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:40:45.083997Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000f6d/r3tmp/tmpuho0AV/pdisk_1.dat 2025-06-25T14:40:45.401335Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 31454, node 1 2025-06-25T14:40:45.464033Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:40:45.464138Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:40:45.465881Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:40:45.599585Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:40:45.599611Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:40:45.599619Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:40:45.599754Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:40:45.939821Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-25T14:40:45.943195Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-25T14:40:45.943229Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-25T14:40:45.944606Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:3031, port: 3031 2025-06-25T14:40:45.944695Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:179: start TLS 2025-06-25T14:40:45.973093Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-25T14:40:46.017497Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-25T14:40:46.064648Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:357: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (member:1.2.840.113556.1.4.1941:=uid=ldapuser,dc=search,dc=yandex,dc=net), attributes: 1.1 2025-06-25T14:40:46.065252Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:382: Try to get nested groups - tree traversal 2025-06-25T14:40:46.065321Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managerOfProject1,cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=project1,cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-25T14:40:46.112712Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-25T14:40:46.160841Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-25T14:40:46.163153Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****2OXQ (E9CCE8ED) () has now valid token of ldapuser@ldap 2025-06-25T14:40:46.163341Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:40:48.093227Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519896954758343455:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:40:48.093319Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000f6d/r3tmp/tmpOYf3eR/pdisk_1.dat 2025-06-25T14:40:48.335611Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:40:48.346908Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:40:48.347004Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:40:48.350386Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 14073, node 2 2025-06-25T14:40:48.503336Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:40:48.503359Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:40:48.503364Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:40:48.503442Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:40:48.576587Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-25T14:40:48.577885Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-25T14:40:48.577933Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-25T14:40:48.578748Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:26557, port: 26557 2025-06-25T14:40:48.578852Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:179: start TLS 2025-06-25T14:40:48.590318Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-25T14:40:48.632760Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-25T14:40:48.678910Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****VzuA (33CFD211) () has now valid token of ldapuser@ldap 2025-06-25T14:40:51.753077Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519896970778654363:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:40:51.753201Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000f6d/r3tmp/tmpdF4eV0/pdisk_1.dat 2025-06-25T14:40:51.872158Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 23689, node 3 2025-06-25T14:40:51.913373Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:40:51.913464Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:40:51.914978Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:40:51.934802Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:40:51.934825Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:40:51.934831Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:40:51.934953Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:40:52.021174Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-25T14:40:52.024219Z node 3 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-25T14:40:52.024264Z node 3 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-25T14:40:52.024955Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://qqq:27861 ldap://localhost:27861 ldap://localhost:11111, port: 27861 2025-06-25T14:40:52.025044Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:179: start TLS 2025-06-25T14:40:52.038157Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-25T14:40:52.080809Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-25T14:40:52.124592Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:357: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (member:1.2.840.113556.1.4.1941:=uid=ldapuser,dc=search,dc=yandex,dc=net), attributes: 1.1 2025-06-25T14:40:52.125071Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:382: Try to get nested groups - tree traversal 2025-06-25T14:40:52.125116Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managerOfProject1,cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=project1,cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-25T14:40:52.172655Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-25T14:40:52.216662Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-25T14:40:52.217871Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****I-JQ (1BEDEF5F) () has now valid token of ldapuser@ldap 2025-06-25T14:40:55.068782Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519896984156876012:2086];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:40:55.071454Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000f6d/r3tmp/tmpAswjMd/pdisk_1.dat 2025-06-25T14:40:55.163898Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 20909, node 4 2025-06-25T14:40:55.202620Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:40:55.202773Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:40:55.218440Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:40:55.256183Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:40:55.256202Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:40:55.256210Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:40:55.256365Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:40:55.395826Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-25T14:40:55.399720Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-25T14:40:55.399749Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-25T14:40:55.400472Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:29704, port: 29704 2025-06-25T14:40:55.400551Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:179: start TLS 2025-06-25T14:40:55.410257Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-25T14:40:55.456811Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: groupDN 2025-06-25T14:40:55.501389Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****AxiQ (59B04498) () has now valid token of ldapuser@ldap 2025-06-25T14:41:00.573923Z node 5 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[5:7519897007267582720:2136];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:41:00.574232Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000f6d/r3tmp/tmpAHQNNy/pdisk_1.dat 2025-06-25T14:41:00.768018Z node 5 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:41:00.776568Z node 5 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [5:7519897007267582619:2080] 1750862460525575 != 1750862460525578 2025-06-25T14:41:00.790148Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:41:00.790230Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:41:00.793920Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 4884, node 5 2025-06-25T14:41:00.884929Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:41:00.884951Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:41:00.884959Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:41:00.885111Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:41:01.176712Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-25T14:41:01.177019Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-25T14:41:01.177035Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-25T14:41:01.177719Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:5579, port: 5579 2025-06-25T14:41:01.177778Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:179: start TLS 2025-06-25T14:41:01.190007Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=invalidRobouser,dc=search,dc=yandex,dc=net 2025-06-25T14:41:01.237062Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:201: Could not perform initial LDAP bind for dn cn=invalidRobouser,dc=search,dc=yandex,dc=net on server ldap://localhost:5579. Invalid credentials 2025-06-25T14:41:01.237647Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1809: Ticket eyJh****Gayw (CC6CFCFE) () has now permanent error message 'Could not login via LDAP (Could not perform initial LDAP bind for dn cn=invalidRobouser,dc=search,dc=yandex,dc=net on server ldap://localhost:5579. Invalid credentials)' 2025-06-25T14:41:06.480862Z node 6 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[6:7519897034266649190:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:41:06.492874Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000f6d/r3tmp/tmpk5N0h0/pdisk_1.dat 2025-06-25T14:41:06.856713Z node 6 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [6:7519897034266649161:2080] 1750862466469051 != 1750862466469054 2025-06-25T14:41:06.913455Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:41:06.913546Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:41:06.914687Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:41:06.922021Z node 6 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 6775, node 6 2025-06-25T14:41:07.107852Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:41:07.107876Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:41:07.107886Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:41:07.108040Z node 6 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:41:07.491115Z node 6 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-25T14:41:07.501667Z node 6 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-25T14:41:07.501704Z node 6 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-25T14:41:07.502613Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:63421, port: 63421 2025-06-25T14:41:07.502695Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:179: start TLS 2025-06-25T14:41:07.542936Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-25T14:41:07.584619Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:201: Could not perform initial LDAP bind for dn cn=robouser,dc=search,dc=yandex,dc=net on server ldap://localhost:63421. Invalid credentials 2025-06-25T14:41:07.585040Z node 6 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:41:07.585410Z node 6 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1809: Ticket eyJh****DhUg (669E0BA3) () has now permanent error message 'Could not login via LDAP (Could not perform initial LDAP bind for dn cn=robouser,dc=search,dc=yandex,dc=net on server ldap://localhost:63421. Invalid credentials)' ------- [TM] {asan, default-linux-x86_64, release} ydb/core/security/ldap_auth_provider/ut/unittest >> TLdapUtilsSearchFilterCreatorTest::GetFilterWithFewLoginPlaceholders [GOOD] Test command err: 2025-06-25T14:40:45.083924Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519896941859403228:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:40:45.084010Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000f44/r3tmp/tmpg8Q2N4/pdisk_1.dat 2025-06-25T14:40:45.401326Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 63047, node 1 2025-06-25T14:40:45.492951Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:40:45.493127Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:40:45.499256Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:40:45.595196Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:40:45.595215Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:40:45.595222Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:40:45.595333Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:40:45.948473Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-25T14:40:45.949403Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-25T14:40:45.949437Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-25T14:40:45.950203Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:8879, port: 8879 2025-06-25T14:40:45.950890Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-25T14:40:45.956457Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-25T14:40:46.004686Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:357: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (member:1.2.840.113556.1.4.1941:=uid=ldapuser,dc=search,dc=yandex,dc=net), attributes: 1.1 2025-06-25T14:40:46.005234Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:382: Try to get nested groups - tree traversal 2025-06-25T14:40:46.005307Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managerOfProject1,cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=project1,cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-25T14:40:46.052728Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-25T14:40:46.100685Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-25T14:40:46.101630Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:40:46.109300Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****cj8A (263E6E7A) () has now valid token of ldapuser@ldap 2025-06-25T14:40:50.084471Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519896941859403228:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:40:50.084572Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:40:50.114244Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****cj8A (263E6E7A) 2025-06-25T14:40:50.114371Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:8879, port: 8879 2025-06-25T14:40:50.114494Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-25T14:40:50.119115Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-25T14:40:50.119423Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:340: LDAP user ldapuser does not exist. LDAP search for filter uid=ldapuser on server ldap://localhost:8879 return no entries 2025-06-25T14:40:50.119611Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1809: Ticket eyJh****cj8A (263E6E7A) () has now permanent error message 'Could not login via LDAP (LDAP user ldapuser does not exist. LDAP search for filter uid=ldapuser on server ldap://localhost:8879 return no entries)' 2025-06-25T14:40:55.116693Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****cj8A (263E6E7A) test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000f44/r3tmp/tmprYl5nU/pdisk_1.dat 2025-06-25T14:40:56.947215Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519896989560840830:2145];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:40:57.129427Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:40:57.328098Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:40:57.328193Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:40:57.343873Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:40:57.358089Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:40:57.360516Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519896989560840710:2080] 1750862456853095 != 1750862456853098 TServer::EnableGrpc on GrpcPort 2576, node 2 2025-06-25T14:40:57.612991Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:40:57.613012Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:40:57.613018Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:40:57.613165Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:40:57.944545Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:40:58.180512Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-25T14:40:58.190472Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-25T14:40:58.190498Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-25T14:40:58.191272Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:32558, port: 32558 2025-06-25T14:40:58.191352Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-25T14:40:58.204485Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-25T14:40:58.205101Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:323: Could not perform search for filter uid=ldapuser on server ldap://localhost:32558. Server is busy 2025-06-25T14:40:58.205330Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket eyJh****IB_g (434D7BA3) () has now retryable error message 'Could not login via LDAP (Could not perform search for filter uid=ldapuser on server ldap://localhost:32558. Server is busy)' 2025-06-25T14:40:58.205613Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-25T14:40:58.205631Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-25T14:40:58.206600Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:32558, port: 32558 2025-06-25T14:40:58.206673Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-25T14:40:58.224482Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-25T14:40:58.228864Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:323: Could not perform search for filter uid=ldapuser on server ldap://localhost:32558. Server is busy 2025-06-25T14:40:58.229088Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket eyJh****IB_g (434D7BA3) () has now retryable error message 'Could not login via LDAP (Could not perform search for filter uid=ldapuser on server ldap://localhost:32558. Server is busy)' 2025-06-25T14:40:59.900880Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****IB_g (434D7BA3) 2025-06-25T14:40:59.901233Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-25T14:40:59.901265Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-25T14:40:59.909881Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:32558, port: 32558 2025-06-25T14:40:59.909992Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-25T14:40:59.913525Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-25T14:40:59.913958Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:323: Could not perform search for filter uid=ldapuser on server ldap://localhost:32558. Server is busy 2025-06-25T14:40:59.914139Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket eyJh****IB_g (434D7BA3) () has now retryable error message 'Could not login via LDAP (Could not perform search for filter uid=ldapuser on server ldap://localhost:32558. Server is busy)' 2025-06-25T14:41:01.900558Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519896989560840830:2145];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:41:01.900679Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:41:03.905047Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****IB_g (434D7BA3) 2025-06-25T14:41:03.905302Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-25T14:41:03.905318Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-25T14:41:03.907427Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:32558, port: 32558 2025-06-25T14:41:03.907525Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-25T14:41:03.926008Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-25T14:41:03.971028Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:357: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (member:1.2.840.113556.1.4.1941:=uid=ldapuser,dc=search,dc=yandex,dc=net), attributes: 1.1 2025-06-25T14:41:03.971717Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:382: Try to get nested groups - tree traversal 2025-06-25T14:41:03.971769Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managerOfProject1,cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=project1,cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-25T14:41:04.018974Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-25T14:41:04.062627Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-25T14:41:04.063531Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****IB_g (434D7BA3) () has now valid token of ldapuser@ldap 2025-06-25T14:41:07.913361Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****IB_g (434D7BA3) 2025-06-25T14:41:07.916608Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:32558, port: 32558 2025-06-25T14:41:07.917056Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-25T14:41:07.941617Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-25T14:41:07.992687Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:357: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (member:1.2.840.113556.1.4.1941:=uid=ldapuser,dc=search,dc=yandex,dc=net), attributes: 1.1 2025-06-25T14:41:07.993192Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:382: Try to get nested groups - tree traversal 2025-06-25T14:41:07.993235Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managerOfProject1,cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=project1,cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-25T14:41:08.036782Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-25T14:41:08.084480Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-25T14:41:08.085392Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****IB_g (434D7BA3) () has now valid token of ldapuser@ldap ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest >> BasicStatistics::SimpleGlobalIndex [GOOD] Test command err: 2025-06-25T14:38:39.644133Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:419:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:38:39.644544Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:38:39.644673Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001cce/r3tmp/tmp4LJmYD/pdisk_1.dat 2025-06-25T14:38:40.025385Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 13893, node 1 2025-06-25T14:38:40.261896Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:38:40.261942Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:38:40.261973Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:38:40.262409Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:38:40.263970Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:38:40.356878Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:40.357049Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:40.373482Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:29956 2025-06-25T14:38:40.936256Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-25T14:38:43.958076Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-25T14:38:43.990655Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:43.990745Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:44.029919Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T14:38:44.036163Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:38:44.241964Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:38:44.276878Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:44.277387Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:44.278028Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:44.278191Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:44.278406Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:44.278511Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:44.278582Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:44.278675Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:44.278742Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:44.458995Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:44.459089Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:44.471998Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:38:44.624786Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:38:44.669543Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-25T14:38:44.669625Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-25T14:38:44.703863Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-25T14:38:44.704065Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-25T14:38:44.704295Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-25T14:38:44.704382Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-25T14:38:44.704435Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-25T14:38:44.704488Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-25T14:38:44.704539Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-25T14:38:44.704589Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-25T14:38:44.705064Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-25T14:38:44.728582Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7949: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-25T14:38:44.728723Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7979: ConnectToSA(), pipe client id: [2:1794:2562], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-25T14:38:44.736159Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1808:2573] 2025-06-25T14:38:44.740060Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1830:2584] 2025-06-25T14:38:44.740801Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1830:2584], schemeshard id = 72075186224037897 2025-06-25T14:38:44.751553Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-25T14:38:44.772011Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-25T14:38:44.772075Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-25T14:38:44.772144Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-25T14:38:44.786021Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:44.793120Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-25T14:38:44.793241Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-25T14:38:45.008282Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-25T14:38:45.147781Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-25T14:38:45.215546Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-25T14:38:45.737061Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:38:45.971915Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2155:3027], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:45.972085Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:45.989765Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:46.329732Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2363:3071], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:46.329937Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:46.331376Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [1:2368:3075]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T14:38:46.331670Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-25T14:38:46.331766Z node 1 :STATISTICS DEBUG: service_impl.cpp:1219: ConnectToSA(), pipe client id = [1:2370:3077] 2025-06-25T14:38:46.331834Z no ... pp:1260: ReplySuccess(), request id = 120, ReplyToActorId = [2:6694:4740], StatRequests.size() = 1 2025-06-25T14:41:04.796004Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-25T14:41:04.796103Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-25T14:41:04.796165Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 3] is data table. 2025-06-25T14:41:04.796213Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:723: [72075186224037894] ScheduleNextTraversal. Skip traversal for datashard table [OwnerId: 72075186224037897, LocalPathId: 3] 2025-06-25T14:41:04.796608Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-25T14:41:04.813589Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DELETE FROM `.metadata/_statistics` WHERE owner_id = $owner_id AND local_path_id = $local_path_id; 2025-06-25T14:41:04.818957Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:6721:4762], DatabaseId: /Root/Database, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:04.819075Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:6731:4767], DatabaseId: /Root/Database, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:04.819202Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root/Database, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:04.834296Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976720658:2, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:41:04.903223Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:6735:4770], DatabaseId: /Root/Database, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976720658 completed, doublechecking } 2025-06-25T14:41:05.114742Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:6833:4818] txid# 281474976720659, issues: { message: "Check failed: path: \'/Root/Database/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72075186224037897, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:41:05.181288Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 121 ], ReplyToActorId[ [2:6862:4833]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T14:41:05.181585Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 121 ] 2025-06-25T14:41:05.181639Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 121, ReplyToActorId = [2:6862:4833], StatRequests.size() = 1 2025-06-25T14:41:05.331901Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=NmMxNDhhYjktYWYwODM0MmYtYWIxOTc3N2UtMmJiZDJkNDc=, TxId: 2025-06-25T14:41:05.331996Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=NmMxNDhhYjktYWYwODM0MmYtYWIxOTc3N2UtMmJiZDJkNDc=, TxId: 2025-06-25T14:41:05.332911Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-25T14:41:05.354287Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 3] 2025-06-25T14:41:05.354369Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-25T14:41:05.929127Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 122 ], ReplyToActorId[ [2:6890:4849]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T14:41:05.929432Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 122 ] 2025-06-25T14:41:05.929473Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 122, ReplyToActorId = [2:6890:4849], StatRequests.size() = 1 2025-06-25T14:41:07.469082Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 123 ], ReplyToActorId[ [2:6925:4867]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T14:41:07.469417Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 123 ] 2025-06-25T14:41:07.469464Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 123, ReplyToActorId = [2:6925:4867], StatRequests.size() = 1 2025-06-25T14:41:08.324994Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:285: [72075186224037894] EvPropagateTimeout 2025-06-25T14:41:08.335900Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-25T14:41:08.335975Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-25T14:41:08.336017Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 6] is data table. 2025-06-25T14:41:08.336051Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:723: [72075186224037894] ScheduleNextTraversal. Skip traversal for datashard table [OwnerId: 72075186224037897, LocalPathId: 6] 2025-06-25T14:41:08.336407Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-25T14:41:08.339600Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DELETE FROM `.metadata/_statistics` WHERE owner_id = $owner_id AND local_path_id = $local_path_id; 2025-06-25T14:41:08.373991Z node 2 :SYSTEM_VIEWS WARN: tx_interval_summary.cpp:212: [72075186224037891] TEvIntervalQuerySummary, time mismath: node id# 2, interval end# 1970-01-01T00:02:04.000000Z, event interval end# 2025-06-25T14:41:06.000000Z 2025-06-25T14:41:08.374908Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=ODk2Nzc2YWEtMzZmNjZlNGEtY2NkMWI1NDktMTI3MWZkYmY=, TxId: 2025-06-25T14:41:08.374978Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=ODk2Nzc2YWEtMzZmNjZlNGEtY2NkMWI1NDktMTI3MWZkYmY=, TxId: 2025-06-25T14:41:08.389095Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-25T14:41:08.413653Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 6] 2025-06-25T14:41:08.413710Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-25T14:41:09.073833Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 124 ], ReplyToActorId[ [2:6991:4903]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T14:41:09.074176Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 124 ] 2025-06-25T14:41:09.074224Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 124, ReplyToActorId = [2:6991:4903], StatRequests.size() = 1 2025-06-25T14:41:10.514935Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 125 ], ReplyToActorId[ [2:7030:4923]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T14:41:10.515300Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 125 ] 2025-06-25T14:41:10.515343Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 125, ReplyToActorId = [2:7030:4923], StatRequests.size() = 1 2025-06-25T14:41:11.243928Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 2, schemeshard count = 1 2025-06-25T14:41:11.244346Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-06-25T14:41:11.244663Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-25T14:41:11.257226Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-25T14:41:11.257290Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-25T14:41:11.257333Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 4] is data table. 2025-06-25T14:41:11.257367Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:723: [72075186224037894] ScheduleNextTraversal. Skip traversal for datashard table [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-25T14:41:11.257615Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-25T14:41:11.263735Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DELETE FROM `.metadata/_statistics` WHERE owner_id = $owner_id AND local_path_id = $local_path_id; 2025-06-25T14:41:11.273247Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=YTczYmYzNWUtM2MxZGE1ZGUtY2QwYjhiZTEtNTQ3NjUzNGQ=, TxId: 2025-06-25T14:41:11.273318Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=YTczYmYzNWUtM2MxZGE1ZGUtY2QwYjhiZTEtNTQ3NjUzNGQ=, TxId: 2025-06-25T14:41:11.274217Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-25T14:41:11.290910Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-25T14:41:11.290967Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-25T14:41:11.921914Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 126 ], ReplyToActorId[ [2:7089:4956]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T14:41:11.922251Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 126 ] 2025-06-25T14:41:11.922297Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 126, ReplyToActorId = [2:7089:4956], StatRequests.size() = 1 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest >> BasicStatistics::TwoNodes [GOOD] Test command err: 2025-06-25T14:38:35.312635Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:497:2377], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:38:35.312961Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:38:35.313098Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001d2b/r3tmp/tmpjzuN1u/pdisk_1.dat 2025-06-25T14:38:35.673954Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 11675, node 1 2025-06-25T14:38:35.885319Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:38:35.885376Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:38:35.885409Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:38:35.886066Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:38:35.888393Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:38:36.001556Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:36.001697Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:36.021189Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:4439 2025-06-25T14:38:36.595889Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-25T14:38:42.398820Z node 3 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 3 2025-06-25T14:38:42.399596Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-25T14:38:42.482363Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:42.482460Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:42.482967Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:42.483031Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:42.534116Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T14:38:42.534326Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 3 Cookie 3 2025-06-25T14:38:42.537978Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:38:42.538374Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:38:42.729442Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:38:42.763806Z node 3 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:42.764215Z node 3 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:42.764557Z node 3 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:42.764646Z node 3 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:42.764786Z node 3 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:42.764851Z node 3 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:42.764903Z node 3 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:42.764951Z node 3 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:42.765000Z node 3 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:42.932054Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:42.932146Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:42.932783Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:42.932863Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:42.946311Z node 3 :HIVE WARN: hive_impl.cpp:781: HIVE#72075186224037888 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T14:38:42.947408Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:38:42.950237Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:38:43.079142Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:38:43.114720Z node 3 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-25T14:38:43.114792Z node 3 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-25T14:38:43.135324Z node 3 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-25T14:38:43.136403Z node 3 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-25T14:38:43.136587Z node 3 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-25T14:38:43.136632Z node 3 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-25T14:38:43.136676Z node 3 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-25T14:38:43.136736Z node 3 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-25T14:38:43.136797Z node 3 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-25T14:38:43.136839Z node 3 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-25T14:38:43.137302Z node 3 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-25T14:38:43.166478Z node 3 :STATISTICS DEBUG: schemeshard_impl.cpp:7949: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-25T14:38:43.166584Z node 3 :STATISTICS DEBUG: schemeshard_impl.cpp:7979: ConnectToSA(), pipe client id: [3:2172:2565], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-25T14:38:43.172151Z node 3 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [3:2184:2574] 2025-06-25T14:38:43.179128Z node 3 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [3:2218:2590] 2025-06-25T14:38:43.179525Z node 3 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [3:2218:2590], schemeshard id = 72075186224037897 2025-06-25T14:38:43.181005Z node 3 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-25T14:38:43.199281Z node 3 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-25T14:38:43.199342Z node 3 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-25T14:38:43.199410Z node 3 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-25T14:38:43.216675Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976725657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:43.224621Z node 3 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976725657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-25T14:38:43.224763Z node 3 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976725657 2025-06-25T14:38:43.453265Z node 3 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-25T14:38:43.637991Z node 3 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976725657. Doublechecking... 2025-06-25T14:38:43.737592Z node 3 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-25T14:38:44.299132Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:38:44.299203Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:38:44.520986Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2552:3033], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:44.521159Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:44.538061Z node 3 :FL ... ts.size() = 1 2025-06-25T14:40:57.244740Z node 3 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-25T14:40:57.244827Z node 3 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-25T14:40:57.244891Z node 3 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 3] is data table. 2025-06-25T14:40:57.244958Z node 3 :STATISTICS DEBUG: aggregator_impl.cpp:723: [72075186224037894] ScheduleNextTraversal. Skip traversal for datashard table [OwnerId: 72075186224037897, LocalPathId: 3] 2025-06-25T14:40:57.245321Z node 3 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-25T14:40:57.288211Z node 3 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DELETE FROM `.metadata/_statistics` WHERE owner_id = $owner_id AND local_path_id = $local_path_id; 2025-06-25T14:40:57.292527Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7408:4332], DatabaseId: /Root/Database, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:40:57.292671Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7419:4337], DatabaseId: /Root/Database, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:40:57.292776Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root/Database, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:40:57.340205Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976725658:2, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:40:57.440582Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7422:4340], DatabaseId: /Root/Database, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976725658 completed, doublechecking } 2025-06-25T14:40:57.701907Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7518:4386] txid# 281474976725659, issues: { message: "Check failed: path: \'/Root/Database/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72075186224037897, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:40:57.774114Z node 3 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [3:7548:4402]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T14:40:57.774397Z node 3 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-25T14:40:57.774486Z node 3 :STATISTICS DEBUG: service_impl.cpp:1219: ConnectToSA(), pipe client id = [3:7550:4404] 2025-06-25T14:40:57.774544Z node 3 :STATISTICS DEBUG: service_impl.cpp:1248: SyncNode(), pipe client id = [3:7550:4404] 2025-06-25T14:40:57.774871Z node 3 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [3:7551:4405] 2025-06-25T14:40:57.775079Z node 3 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 3, client id = [3:7550:4404], server id = [3:7551:4405], tablet id = 72075186224037894, status = OK 2025-06-25T14:40:57.775171Z node 3 :STATISTICS DEBUG: aggregator_impl.cpp:133: [72075186224037894] EvConnectNode, pipe server id = [3:7551:4405], node id = 3, have schemeshards count = 0, need schemeshards count = 1 2025-06-25T14:40:57.775249Z node 3 :STATISTICS DEBUG: aggregator_impl.cpp:314: [72075186224037894] SendStatisticsToNode(), node id = 3, schemeshard count = 1 2025-06-25T14:40:57.775423Z node 3 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 3 2025-06-25T14:40:57.775503Z node 3 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 1, ReplyToActorId = [3:7548:4402], StatRequests.size() = 1 2025-06-25T14:40:58.171320Z node 3 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=3&id=ODdiZWM0YTUtMTcxNTkyYjMtZjBjN2I2ZGMtMmYxMmMzMWE=, TxId: 2025-06-25T14:40:58.171397Z node 3 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=3&id=ODdiZWM0YTUtMTcxNTkyYjMtZjBjN2I2ZGMtMmYxMmMzMWE=, TxId: 2025-06-25T14:40:58.189248Z node 3 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-25T14:40:58.209135Z node 3 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 3] 2025-06-25T14:40:58.209231Z node 3 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-25T14:40:58.290254Z node 3 :STATISTICS DEBUG: aggregator_impl.cpp:217: [72075186224037894] EvFastPropagateCheck 2025-06-25T14:40:58.290326Z node 3 :STATISTICS DEBUG: aggregator_impl.cpp:357: [72075186224037894] PropagateFastStatistics(), node count = 0, schemeshard count = 0 2025-06-25T14:40:58.362920Z node 3 :STATISTICS DEBUG: service_impl.cpp:1189: EvRequestTimeout, pipe client id = [3:7550:4404], schemeshard count = 1 2025-06-25T14:40:58.791329Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 121 ], ReplyToActorId[ [2:7582:3191]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T14:40:58.791655Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 121 ] 2025-06-25T14:40:58.791694Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 121, ReplyToActorId = [2:7582:3191], StatRequests.size() = 1 2025-06-25T14:41:00.198046Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 122 ], ReplyToActorId[ [2:7622:3201]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T14:41:00.198329Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 122 ] 2025-06-25T14:41:00.198574Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 122, ReplyToActorId = [2:7622:3201], StatRequests.size() = 1 2025-06-25T14:41:00.976722Z node 3 :STATISTICS DEBUG: aggregator_impl.cpp:285: [72075186224037894] EvPropagateTimeout 2025-06-25T14:41:00.989249Z node 3 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-25T14:41:00.989310Z node 3 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-25T14:41:00.989347Z node 3 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 4] is data table. 2025-06-25T14:41:00.989381Z node 3 :STATISTICS DEBUG: aggregator_impl.cpp:723: [72075186224037894] ScheduleNextTraversal. Skip traversal for datashard table [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-25T14:41:00.989769Z node 3 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-25T14:41:00.992446Z node 3 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DELETE FROM `.metadata/_statistics` WHERE owner_id = $owner_id AND local_path_id = $local_path_id; 2025-06-25T14:41:01.007796Z node 3 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=3&id=ZjFkODEzMTYtYTQ3OWFiMGYtZGI5YzdjOGItNzg2M2QyMDk=, TxId: 2025-06-25T14:41:01.007862Z node 3 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=3&id=ZjFkODEzMTYtYTQ3OWFiMGYtZGI5YzdjOGItNzg2M2QyMDk=, TxId: 2025-06-25T14:41:01.010481Z node 3 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-25T14:41:01.035301Z node 3 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-25T14:41:01.035360Z node 3 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-25T14:41:01.714806Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 123 ], ReplyToActorId[ [2:7690:3211]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T14:41:01.715083Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 123 ] 2025-06-25T14:41:01.715121Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 123, ReplyToActorId = [2:7690:3211], StatRequests.size() = 1 2025-06-25T14:41:03.318465Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 124 ], ReplyToActorId[ [2:7735:3221]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T14:41:03.318679Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 124 ] 2025-06-25T14:41:03.318711Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 124, ReplyToActorId = [2:7735:3221], StatRequests.size() = 1 2025-06-25T14:41:04.195695Z node 3 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 3, schemeshard count = 1 2025-06-25T14:41:04.196021Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-25T14:41:04.196320Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-06-25T14:41:04.196389Z node 3 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 3 2025-06-25T14:41:04.207744Z node 3 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-25T14:41:04.207808Z node 3 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-25T14:41:04.886192Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 125 ], ReplyToActorId[ [2:7772:3227]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T14:41:04.886463Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 125 ] 2025-06-25T14:41:04.886502Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 125, ReplyToActorId = [2:7772:3227], StatRequests.size() = 1 2025-06-25T14:41:04.887249Z node 3 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [3:7774:4483]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T14:41:04.890740Z node 3 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-25T14:41:04.890799Z node 3 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 2, ReplyToActorId = [3:7774:4483], StatRequests.size() = 1 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/keyvalue/ut/unittest >> TKeyValueTest::TestConcatToLongKey [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:57:2057] recipient: [1:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:57:2057] recipient: [1:52:2097] Leader for TabletID 72057594037927937 is [1:59:2099] sender: [1:60:2057] recipient: [1:52:2097] Leader for TabletID 72057594037927937 is [1:59:2099] sender: [1:77:2057] recipient: [1:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:57:2057] recipient: [2:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:57:2057] recipient: [2:53:2097] Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:60:2057] recipient: [2:53:2097] Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:77:2057] recipient: [2:14:2061] !Reboot 72057594037927937 (actor [2:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:79:2057] recipient: [2:38:2085] Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:81:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:83:2057] recipient: [2:82:2112] Leader for TabletID 72057594037927937 is [2:84:2113] sender: [2:85:2057] recipient: [2:82:2112] !Reboot 72057594037927937 (actor [2:59:2099]) rebooted! !Reboot 72057594037927937 (actor [2:59:2099]) tablet resolver refreshed! new actor is[2:84:2113] Leader for TabletID 72057594037927937 is [2:84:2113] sender: [2:170:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:57:2057] recipient: [3:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:57:2057] recipient: [3:52:2097] Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:60:2057] recipient: [3:52:2097] Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:77:2057] recipient: [3:14:2061] !Reboot 72057594037927937 (actor [3:59:2099]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:79:2057] recipient: [3:38:2085] Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:82:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:83:2057] recipient: [3:81:2112] Leader for TabletID 72057594037927937 is [3:84:2113] sender: [3:85:2057] recipient: [3:81:2112] !Reboot 72057594037927937 (actor [3:59:2099]) rebooted! !Reboot 72057594037927937 (actor [3:59:2099]) tablet resolver refreshed! new actor is[3:84:2113] Leader for TabletID 72057594037927937 is [3:84:2113] sender: [3:170:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:57:2057] recipient: [4:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:57:2057] recipient: [4:53:2097] Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:60:2057] recipient: [4:53:2097] Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:77:2057] recipient: [4:14:2061] !Reboot 72057594037927937 (actor [4:59:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:80:2057] recipient: [4:38:2085] Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:83:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:84:2057] recipient: [4:82:2112] Leader for TabletID 72057594037927937 is [4:85:2113] sender: [4:86:2057] recipient: [4:82:2112] !Reboot 72057594037927937 (actor [4:59:2099]) rebooted! !Reboot 72057594037927937 (actor [4:59:2099]) tablet resolver refreshed! new actor is[4:85:2113] Leader for TabletID 72057594037927937 is [4:85:2113] sender: [4:171:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:57:2057] recipient: [5:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:57:2057] recipient: [5:53:2097] Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:60:2057] recipient: [5:53:2097] Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:77:2057] recipient: [5:14:2061] !Reboot 72057594037927937 (actor [5:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:83:2057] recipient: [5:38:2085] Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:86:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:87:2057] recipient: [5:85:2115] Leader for TabletID 72057594037927937 is [5:88:2116] sender: [5:89:2057] recipient: [5:85:2115] !Reboot 72057594037927937 (actor [5:59:2099]) rebooted! !Reboot 72057594037927937 (actor [5:59:2099]) tablet resolver refreshed! new actor is[5:88:2116] Leader for TabletID 72057594037927937 is [5:88:2116] sender: [5:174:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:57:2057] recipient: [6:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:57:2057] recipient: [6:52:2097] Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:60:2057] recipient: [6:52:2097] Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:77:2057] recipient: [6:14:2061] !Reboot 72057594037927937 (actor [6:59:2099]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:83:2057] recipient: [6:38:2085] Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:85:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:87:2057] recipient: [6:86:2115] Leader for TabletID 72057594037927937 is [6:88:2116] sender: [6:89:2057] recipient: [6:86:2115] !Reboot 72057594037927937 (actor [6:59:2099]) rebooted! !Reboot 72057594037927937 (actor [6:59:2099]) tablet resolver refreshed! new actor is[6:88:2116] Leader for TabletID 72057594037927937 is [6:88:2116] sender: [6:174:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:57:2057] recipient: [7:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:57:2057] recipient: [7:53:2097] Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:60:2057] recipient: [7:53:2097] Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:77:2057] recipient: [7:14:2061] !Reboot 72057594037927937 (actor [7:59:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:84:2057] recipient: [7:38:2085] Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:87:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:88:2057] recipient: [7:86:2115] Leader for TabletID 72057594037927937 is [7:89:2116] sender: [7:90:2057] recipient: [7:86:2115] !Reboot 72057594037927937 (actor [7:59:2099]) rebooted! !Reboot 72057594037927937 (actor [7:59:2099]) tablet resolver refreshed! new actor is[7:89:2116] Leader for TabletID 72057594037927937 is [7:89:2116] sender: [7:175:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:57:2057] recipient: [8:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:57:2057] recipient: [8:54:2097] Leader for TabletID 72057594037927937 is [8:59:2099] sender: [8:60:2057] recipient: [8:54:2097] Leader for TabletID 72057594037927937 is [8:59:2099] sender: [8:77:2057] recipient: [8:14:2061] !Reboot 72057594037927937 (actor [8:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [8:59:2099] sender: [8:87:2057] recipient: [8:38:2085] Leader for TabletID 72057594037927937 is [8:59:2099] sender: [8:90:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [8:59:2099] sender: [8:91:2057] recipient: [8:89:2118] Leader for TabletID 72057594037927937 is [8:92:2119] sender: [8:93:2057] recipient: [8:89:2118] !Reboot 72057594037927937 (actor [8:59:2099]) rebooted! !Reboot 72057594037927937 (actor [8:59:2099]) tablet resolver refreshed! new actor is[8:92:2119] Leader for TabletID 72057594037927937 is [8:92:2119] sender: [8:178:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:57:2057] recipient: [9:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:57:2057] recipient: [9:53:2097] Leader for TabletID 72057594037927937 is [9:59:2099] sender: [9:60:2057] recipient: [9:53:2097] Leader for TabletID 72057594037927937 is [9:59:2099] sender: [9:77:2057] recipient: [9:14:2061] !Reboot 72057594037927937 (actor [9:59:2099]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [9:59:2099] sender: [9:87:2057] recipient: [9:38:2085] Leader for TabletID 72057594037927937 is [9:59:2099] sender: [9:90:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [9:59:2099] sender: [9:91:2057] recipient: [9:89:2118] Leader for TabletID 72057594037927937 is [9:92:2119] sender: [9:93:2057] recipient: [9:89:2118] !Reboot 72057594037927937 (actor [9:59:2099]) rebooted! !Reboot 72057594037927937 (actor [9:59:2099]) tablet resolver refreshed! new actor is[9:92:2119] Leader for TabletID 72057594037927937 is [9:92:2119] sender: [9:178:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:57:2057] recipient: [10:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:57:2057] recipient: [10:54:2097] Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:60:2057] recipient: [10:54:2097] Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:77:2057] recipient: [10:14:2061] !Reboot 72057594037927937 (actor [10:59:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:88:2057] recipient: [10:38:2085] Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:90:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:92:2057] recipient: [10:91:2118] Leader for TabletID 72057594037927937 is [10:93:2119] sender: [10:94:2057] recipient: [10:91:2118] !Reboot 72057594037927937 (actor [10:59:2099]) rebooted! !Reboot 72057594037927937 (actor [10:59:2099]) tablet resolver refreshed! new actor is[10:93:2119] Leader for TabletID 72057594037927937 is [10:93:2119] sender: [10:179:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:57:2057] recipient: [11:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:57:2057] recipient: [11:53:2097] Leader for TabletID 72057594037927937 is [11:59:2099] sender: [11:60:2057] recipient: [11:53:2097] Leader for TabletID 72057594037927937 is [11:59:2099] sender: [11:77:2057] recipient: [11:14:2061] !Reboot 72057594037927937 (actor [11:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [11:59:2099] sender: [11:90:2057] recipient: [11:38:2085] Leader for TabletID 72057594037927937 is [11:59:2099] sender: [11:93:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [11:59:2099] sender: [11:94:2057] recipient: [11:92:2120] Leader for TabletID 72057594037927937 is [11:95:2121] sender: [11:96:2057] recipient: [11:92:2120] !Reboot 72057594037927937 (actor [11:59:2099]) rebooted! !Reboot 72057594037927937 (actor [11:59:2099]) tablet resolver refreshed! new actor is[11:95:2121] Leader for TabletID 72057594037927937 is [11:95:2121] sender: [11:181:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:57:2057] recipient: [12:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:57:2057] recipient: [12:54:2097] Leader for TabletID 72057594037927937 is [12:59:2099] sender: [12:60:2057] recipient: [12:54:2097] Leader for TabletID 72057594037927937 is [12:59:2099] sender: [12:77:2057] recipient: [12:14:2061] !Reboot 72057594037927937 (acto ... s [33:59:2099] sender: [33:95:2057] recipient: [33:38:2085] Leader for TabletID 72057594037927937 is [33:59:2099] sender: [33:98:2057] recipient: [33:14:2061] Leader for TabletID 72057594037927937 is [33:59:2099] sender: [33:99:2057] recipient: [33:97:2123] Leader for TabletID 72057594037927937 is [33:100:2124] sender: [33:101:2057] recipient: [33:97:2123] !Reboot 72057594037927937 (actor [33:59:2099]) rebooted! !Reboot 72057594037927937 (actor [33:59:2099]) tablet resolver refreshed! new actor is[33:100:2124] Leader for TabletID 72057594037927937 is [0:0:0] sender: [34:57:2057] recipient: [34:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [34:57:2057] recipient: [34:53:2097] Leader for TabletID 72057594037927937 is [34:59:2099] sender: [34:60:2057] recipient: [34:53:2097] Leader for TabletID 72057594037927937 is [34:59:2099] sender: [34:77:2057] recipient: [34:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [35:57:2057] recipient: [35:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [35:57:2057] recipient: [35:53:2097] Leader for TabletID 72057594037927937 is [35:59:2099] sender: [35:60:2057] recipient: [35:53:2097] Leader for TabletID 72057594037927937 is [35:59:2099] sender: [35:77:2057] recipient: [35:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [36:57:2057] recipient: [36:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [36:57:2057] recipient: [36:53:2097] Leader for TabletID 72057594037927937 is [36:59:2099] sender: [36:60:2057] recipient: [36:53:2097] Leader for TabletID 72057594037927937 is [36:59:2099] sender: [36:77:2057] recipient: [36:14:2061] !Reboot 72057594037927937 (actor [36:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [36:59:2099] sender: [36:79:2057] recipient: [36:38:2085] Leader for TabletID 72057594037927937 is [36:59:2099] sender: [36:82:2057] recipient: [36:14:2061] Leader for TabletID 72057594037927937 is [36:59:2099] sender: [36:83:2057] recipient: [36:81:2112] Leader for TabletID 72057594037927937 is [36:84:2113] sender: [36:85:2057] recipient: [36:81:2112] !Reboot 72057594037927937 (actor [36:59:2099]) rebooted! !Reboot 72057594037927937 (actor [36:59:2099]) tablet resolver refreshed! new actor is[36:84:2113] Leader for TabletID 72057594037927937 is [36:84:2113] sender: [36:170:2057] recipient: [36:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [37:57:2057] recipient: [37:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [37:57:2057] recipient: [37:54:2097] Leader for TabletID 72057594037927937 is [37:59:2099] sender: [37:60:2057] recipient: [37:54:2097] Leader for TabletID 72057594037927937 is [37:59:2099] sender: [37:77:2057] recipient: [37:14:2061] !Reboot 72057594037927937 (actor [37:59:2099]) on event NKikimr::TEvKeyValue::TEvAcquireLock ! Leader for TabletID 72057594037927937 is [37:59:2099] sender: [37:79:2057] recipient: [37:38:2085] Leader for TabletID 72057594037927937 is [37:59:2099] sender: [37:82:2057] recipient: [37:14:2061] Leader for TabletID 72057594037927937 is [37:59:2099] sender: [37:83:2057] recipient: [37:81:2112] Leader for TabletID 72057594037927937 is [37:84:2113] sender: [37:85:2057] recipient: [37:81:2112] !Reboot 72057594037927937 (actor [37:59:2099]) rebooted! !Reboot 72057594037927937 (actor [37:59:2099]) tablet resolver refreshed! new actor is[37:84:2113] Leader for TabletID 72057594037927937 is [37:84:2113] sender: [37:170:2057] recipient: [37:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [38:57:2057] recipient: [38:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [38:57:2057] recipient: [38:53:2097] Leader for TabletID 72057594037927937 is [38:59:2099] sender: [38:60:2057] recipient: [38:53:2097] Leader for TabletID 72057594037927937 is [38:59:2099] sender: [38:77:2057] recipient: [38:14:2061] !Reboot 72057594037927937 (actor [38:59:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [38:59:2099] sender: [38:80:2057] recipient: [38:38:2085] Leader for TabletID 72057594037927937 is [38:59:2099] sender: [38:83:2057] recipient: [38:82:2112] Leader for TabletID 72057594037927937 is [38:59:2099] sender: [38:84:2057] recipient: [38:14:2061] Leader for TabletID 72057594037927937 is [38:85:2113] sender: [38:86:2057] recipient: [38:82:2112] !Reboot 72057594037927937 (actor [38:59:2099]) rebooted! !Reboot 72057594037927937 (actor [38:59:2099]) tablet resolver refreshed! new actor is[38:85:2113] Leader for TabletID 72057594037927937 is [38:85:2113] sender: [38:171:2057] recipient: [38:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [39:57:2057] recipient: [39:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [39:57:2057] recipient: [39:53:2097] Leader for TabletID 72057594037927937 is [39:59:2099] sender: [39:60:2057] recipient: [39:53:2097] Leader for TabletID 72057594037927937 is [39:59:2099] sender: [39:77:2057] recipient: [39:14:2061] !Reboot 72057594037927937 (actor [39:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [39:59:2099] sender: [39:83:2057] recipient: [39:38:2085] Leader for TabletID 72057594037927937 is [39:59:2099] sender: [39:86:2057] recipient: [39:14:2061] Leader for TabletID 72057594037927937 is [39:59:2099] sender: [39:87:2057] recipient: [39:85:2115] Leader for TabletID 72057594037927937 is [39:88:2116] sender: [39:89:2057] recipient: [39:85:2115] !Reboot 72057594037927937 (actor [39:59:2099]) rebooted! !Reboot 72057594037927937 (actor [39:59:2099]) tablet resolver refreshed! new actor is[39:88:2116] Leader for TabletID 72057594037927937 is [39:88:2116] sender: [39:174:2057] recipient: [39:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [40:57:2057] recipient: [40:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [40:57:2057] recipient: [40:53:2097] Leader for TabletID 72057594037927937 is [40:59:2099] sender: [40:60:2057] recipient: [40:53:2097] Leader for TabletID 72057594037927937 is [40:59:2099] sender: [40:77:2057] recipient: [40:14:2061] !Reboot 72057594037927937 (actor [40:59:2099]) on event NKikimr::TEvKeyValue::TEvExecuteTransaction ! Leader for TabletID 72057594037927937 is [40:59:2099] sender: [40:83:2057] recipient: [40:38:2085] Leader for TabletID 72057594037927937 is [40:59:2099] sender: [40:86:2057] recipient: [40:14:2061] Leader for TabletID 72057594037927937 is [40:59:2099] sender: [40:87:2057] recipient: [40:85:2115] Leader for TabletID 72057594037927937 is [40:88:2116] sender: [40:89:2057] recipient: [40:85:2115] !Reboot 72057594037927937 (actor [40:59:2099]) rebooted! !Reboot 72057594037927937 (actor [40:59:2099]) tablet resolver refreshed! new actor is[40:88:2116] Leader for TabletID 72057594037927937 is [40:88:2116] sender: [40:174:2057] recipient: [40:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [41:57:2057] recipient: [41:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [41:57:2057] recipient: [41:54:2097] Leader for TabletID 72057594037927937 is [41:59:2099] sender: [41:60:2057] recipient: [41:54:2097] Leader for TabletID 72057594037927937 is [41:59:2099] sender: [41:77:2057] recipient: [41:14:2061] !Reboot 72057594037927937 (actor [41:59:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [41:59:2099] sender: [41:84:2057] recipient: [41:38:2085] Leader for TabletID 72057594037927937 is [41:59:2099] sender: [41:87:2057] recipient: [41:14:2061] Leader for TabletID 72057594037927937 is [41:59:2099] sender: [41:88:2057] recipient: [41:86:2115] Leader for TabletID 72057594037927937 is [41:89:2116] sender: [41:90:2057] recipient: [41:86:2115] !Reboot 72057594037927937 (actor [41:59:2099]) rebooted! !Reboot 72057594037927937 (actor [41:59:2099]) tablet resolver refreshed! new actor is[41:89:2116] Leader for TabletID 72057594037927937 is [41:89:2116] sender: [41:175:2057] recipient: [41:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [42:57:2057] recipient: [42:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [42:57:2057] recipient: [42:52:2097] Leader for TabletID 72057594037927937 is [42:59:2099] sender: [42:60:2057] recipient: [42:52:2097] Leader for TabletID 72057594037927937 is [42:59:2099] sender: [42:77:2057] recipient: [42:14:2061] !Reboot 72057594037927937 (actor [42:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [42:59:2099] sender: [42:87:2057] recipient: [42:38:2085] Leader for TabletID 72057594037927937 is [42:59:2099] sender: [42:90:2057] recipient: [42:14:2061] Leader for TabletID 72057594037927937 is [42:59:2099] sender: [42:91:2057] recipient: [42:89:2118] Leader for TabletID 72057594037927937 is [42:92:2119] sender: [42:93:2057] recipient: [42:89:2118] !Reboot 72057594037927937 (actor [42:59:2099]) rebooted! !Reboot 72057594037927937 (actor [42:59:2099]) tablet resolver refreshed! new actor is[42:92:2119] Leader for TabletID 72057594037927937 is [42:92:2119] sender: [42:178:2057] recipient: [42:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [43:57:2057] recipient: [43:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [43:57:2057] recipient: [43:54:2097] Leader for TabletID 72057594037927937 is [43:59:2099] sender: [43:60:2057] recipient: [43:54:2097] Leader for TabletID 72057594037927937 is [43:59:2099] sender: [43:77:2057] recipient: [43:14:2061] !Reboot 72057594037927937 (actor [43:59:2099]) on event NKikimr::TEvKeyValue::TEvExecuteTransaction ! Leader for TabletID 72057594037927937 is [43:59:2099] sender: [43:87:2057] recipient: [43:38:2085] Leader for TabletID 72057594037927937 is [43:59:2099] sender: [43:90:2057] recipient: [43:14:2061] Leader for TabletID 72057594037927937 is [43:59:2099] sender: [43:91:2057] recipient: [43:89:2118] Leader for TabletID 72057594037927937 is [43:92:2119] sender: [43:93:2057] recipient: [43:89:2118] !Reboot 72057594037927937 (actor [43:59:2099]) rebooted! !Reboot 72057594037927937 (actor [43:59:2099]) tablet resolver refreshed! new actor is[43:92:2119] Leader for TabletID 72057594037927937 is [43:92:2119] sender: [43:178:2057] recipient: [43:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [44:57:2057] recipient: [44:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [44:57:2057] recipient: [44:53:2097] Leader for TabletID 72057594037927937 is [44:59:2099] sender: [44:60:2057] recipient: [44:53:2097] Leader for TabletID 72057594037927937 is [44:59:2099] sender: [44:77:2057] recipient: [44:14:2061] !Reboot 72057594037927937 (actor [44:59:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [44:59:2099] sender: [44:88:2057] recipient: [44:38:2085] Leader for TabletID 72057594037927937 is [44:59:2099] sender: [44:91:2057] recipient: [44:14:2061] Leader for TabletID 72057594037927937 is [44:59:2099] sender: [44:92:2057] recipient: [44:90:2118] Leader for TabletID 72057594037927937 is [44:93:2119] sender: [44:94:2057] recipient: [44:90:2118] !Reboot 72057594037927937 (actor [44:59:2099]) rebooted! !Reboot 72057594037927937 (actor [44:59:2099]) tablet resolver refreshed! new actor is[44:93:2119] Leader for TabletID 72057594037927937 is [44:93:2119] sender: [44:179:2057] recipient: [44:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [45:57:2057] recipient: [45:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [45:57:2057] recipient: [45:53:2097] Leader for TabletID 72057594037927937 is [45:59:2099] sender: [45:60:2057] recipient: [45:53:2097] Leader for TabletID 72057594037927937 is [45:59:2099] sender: [45:77:2057] recipient: [45:14:2061] >> DbCounters::TabletsSimple [GOOD] >> LabeledDbCounters::OneTablet >> LdapAuthProviderTest_StartTls::LdapFetchGroupsDisableRequestToAD [GOOD] >> LdapAuthProviderTest_StartTls::LdapFetchGroupsWithCustomGroupAttributeGood >> TxUsage::WriteToTopic_Demo_2_Query >> TKeyValueTest::TestCleanUpDataWithMockDisk [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/keyvalue/ut/unittest >> TKeyValueTest::TestCleanUpDataWithMockDisk [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:57:2057] recipient: [1:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:57:2057] recipient: [1:52:2097] Leader for TabletID 72057594037927937 is [1:59:2099] sender: [1:60:2057] recipient: [1:52:2097] Leader for TabletID 72057594037927937 is [1:59:2099] sender: [1:77:2057] recipient: [1:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:57:2057] recipient: [2:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:57:2057] recipient: [2:53:2097] Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:60:2057] recipient: [2:53:2097] Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:77:2057] recipient: [2:14:2061] !Reboot 72057594037927937 (actor [2:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:79:2057] recipient: [2:38:2085] Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:81:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:83:2057] recipient: [2:82:2112] Leader for TabletID 72057594037927937 is [2:84:2113] sender: [2:85:2057] recipient: [2:82:2112] !Reboot 72057594037927937 (actor [2:59:2099]) rebooted! !Reboot 72057594037927937 (actor [2:59:2099]) tablet resolver refreshed! new actor is[2:84:2113] Leader for TabletID 72057594037927937 is [2:84:2113] sender: [2:170:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:57:2057] recipient: [3:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:57:2057] recipient: [3:52:2097] Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:60:2057] recipient: [3:52:2097] Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:77:2057] recipient: [3:14:2061] !Reboot 72057594037927937 (actor [3:59:2099]) on event NKikimr::TEvKeyValue::TEvAcquireLock ! Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:79:2057] recipient: [3:38:2085] Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:82:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:83:2057] recipient: [3:81:2112] Leader for TabletID 72057594037927937 is [3:84:2113] sender: [3:85:2057] recipient: [3:81:2112] !Reboot 72057594037927937 (actor [3:59:2099]) rebooted! !Reboot 72057594037927937 (actor [3:59:2099]) tablet resolver refreshed! new actor is[3:84:2113] Leader for TabletID 72057594037927937 is [3:84:2113] sender: [3:170:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:57:2057] recipient: [4:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:57:2057] recipient: [4:53:2097] Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:60:2057] recipient: [4:53:2097] Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:77:2057] recipient: [4:14:2061] !Reboot 72057594037927937 (actor [4:59:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:80:2057] recipient: [4:38:2085] Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:83:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:84:2057] recipient: [4:82:2112] Leader for TabletID 72057594037927937 is [4:85:2113] sender: [4:86:2057] recipient: [4:82:2112] !Reboot 72057594037927937 (actor [4:59:2099]) rebooted! !Reboot 72057594037927937 (actor [4:59:2099]) tablet resolver refreshed! new actor is[4:85:2113] Leader for TabletID 72057594037927937 is [4:85:2113] sender: [4:171:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:57:2057] recipient: [5:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:57:2057] recipient: [5:53:2097] Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:60:2057] recipient: [5:53:2097] Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:77:2057] recipient: [5:14:2061] !Reboot 72057594037927937 (actor [5:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:83:2057] recipient: [5:38:2085] Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:86:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:87:2057] recipient: [5:85:2115] Leader for TabletID 72057594037927937 is [5:88:2116] sender: [5:89:2057] recipient: [5:85:2115] !Reboot 72057594037927937 (actor [5:59:2099]) rebooted! !Reboot 72057594037927937 (actor [5:59:2099]) tablet resolver refreshed! new actor is[5:88:2116] Leader for TabletID 72057594037927937 is [5:88:2116] sender: [5:174:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:57:2057] recipient: [6:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:57:2057] recipient: [6:52:2097] Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:60:2057] recipient: [6:52:2097] Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:77:2057] recipient: [6:14:2061] !Reboot 72057594037927937 (actor [6:59:2099]) on event NKikimr::TEvKeyValue::TEvCleanUpDataRequest ! Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:83:2057] recipient: [6:38:2085] Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:86:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:87:2057] recipient: [6:85:2115] Leader for TabletID 72057594037927937 is [6:88:2116] sender: [6:89:2057] recipient: [6:85:2115] !Reboot 72057594037927937 (actor [6:59:2099]) rebooted! !Reboot 72057594037927937 (actor [6:59:2099]) tablet resolver refreshed! new actor is[6:88:2116] Leader for TabletID 72057594037927937 is [6:88:2116] sender: [6:174:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:57:2057] recipient: [7:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:57:2057] recipient: [7:53:2097] Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:60:2057] recipient: [7:53:2097] Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:77:2057] recipient: [7:14:2061] !Reboot 72057594037927937 (actor [7:59:2099]) on event NKikimr::TEvKeyValue::TEvForceTabletDataCleanup ! Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:83:2057] recipient: [7:38:2085] Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:86:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:87:2057] recipient: [7:85:2115] Leader for TabletID 72057594037927937 is [7:88:2116] sender: [7:89:2057] recipient: [7:85:2115] !Reboot 72057594037927937 (actor [7:59:2099]) rebooted! !Reboot 72057594037927937 (actor [7:59:2099]) tablet resolver refreshed! new actor is[7:88:2116] Leader for TabletID 72057594037927937 is [7:88:2116] sender: [7:174:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:57:2057] recipient: [8:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:57:2057] recipient: [8:54:2097] Leader for TabletID 72057594037927937 is [8:59:2099] sender: [8:60:2057] recipient: [8:54:2097] Leader for TabletID 72057594037927937 is [8:59:2099] sender: [8:77:2057] recipient: [8:14:2061] !Reboot 72057594037927937 (actor [8:59:2099]) on event NKikimr::TEvTablet::TEvFollowerGcApplied ! Leader for TabletID 72057594037927937 is [8:59:2099] sender: [8:88:2057] recipient: [8:38:2085] Leader for TabletID 72057594037927937 is [8:59:2099] sender: [8:91:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [8:59:2099] sender: [8:92:2057] recipient: [8:90:2119] Leader for TabletID 72057594037927937 is [8:93:2120] sender: [8:94:2057] recipient: [8:90:2119] !Reboot 72057594037927937 (actor [8:59:2099]) rebooted! !Reboot 72057594037927937 (actor [8:59:2099]) tablet resolver refreshed! new actor is[8:93:2120] Leader for TabletID 72057594037927937 is [8:93:2120] sender: [8:179:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:57:2057] recipient: [9:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:57:2057] recipient: [9:53:2097] Leader for TabletID 72057594037927937 is [9:59:2099] sender: [9:60:2057] recipient: [9:53:2097] Leader for TabletID 72057594037927937 is [9:59:2099] sender: [9:77:2057] recipient: [9:14:2061] !Reboot 72057594037927937 (actor [9:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [9:59:2099] sender: [9:92:2057] recipient: [9:38:2085] Leader for TabletID 72057594037927937 is [9:59:2099] sender: [9:95:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [9:59:2099] sender: [9:96:2057] recipient: [9:94:2123] Leader for TabletID 72057594037927937 is [9:97:2124] sender: [9:98:2057] recipient: [9:94:2123] !Reboot 72057594037927937 (actor [9:59:2099]) rebooted! !Reboot 72057594037927937 (actor [9:59:2099]) tablet resolver refreshed! new actor is[9:97:2124] Leader for TabletID 72057594037927937 is [9:97:2124] sender: [9:183:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:57:2057] recipient: [10:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:57:2057] recipient: [10:54:2097] Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:60:2057] recipient: [10:54:2097] Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:77:2057] recipient: [10:14:2061] !Reboot 72057594037927937 (actor [10:59:2099]) on event NKikimr::TEvKeyValue::TEvCleanUpDataRequest ! Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:92:2057] recipient: [10:38:2085] Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:95:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:96:2057] recipient: [10:94:2123] Leader for TabletID 72057594037927937 is [10:97:2124] sender: [10:98:2057] recipient: [10:94:2123] !Reboot 72057594037927937 (actor [10:59:2099]) rebooted! !Reboot 72057594037927937 (actor [10:59:2099]) tablet resolver refreshed! new actor is[10:97:2124] Leader for TabletID 72057594037927937 is [10:97:2124] sender: [10:183:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:57:2057] recipient: [11:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:57:2057] recipient: [11:53:2097] Leader for TabletID 72057594037927937 is [11:59:2099] sender: [11:60:2057] recipient: [11:53:2097] Leader for TabletID 72057594037927937 is [11:59:2099] sender: [11:77:2057] recipient: [11:14:2061] !Reboot 72057594037927937 (actor [11:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [11:59:2099] sender: [11:94:2057] recipient: [11:38:2085] Leader for TabletID 72057594037927937 is [11:59:2099] sender: [11:97:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [11:59:2099] sender: [11:98:2057] recipient: [11:96:2125] Leader for TabletID 72057594037927937 is [11:99:2126] sender: [11:100:2057] recipient: [11:96:2125] !Reboot 72057594037927937 (actor [11:59:2099]) rebooted! !Reboot 72057594037927937 (actor [11:59:2099]) tablet resolver refreshed! new actor is[11:99:2126] Leader for TabletID 72057594037927937 is [11:99:2126] sender: [11:185:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:57:2057] recipient: [12:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:57:2057] recipient: [12:54:2097] Leader for TabletID 72057594037927937 is [12:59:2099] sender: [12:60:2057] recipient: [12:54:2097] Leader for TabletID 72057594037927937 is [12:59:2099] sender: [12:77:2057] recipient: [12: ... 927937 is [35:102:2127] sender: [35:188:2057] recipient: [35:17:2064] Leader for TabletID 72057594037927937 is [0:0:0] sender: [36:59:2057] recipient: [36:56:2099] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [36:59:2057] recipient: [36:56:2099] Leader for TabletID 72057594037927937 is [36:61:2101] sender: [36:62:2057] recipient: [36:56:2099] Leader for TabletID 72057594037927937 is [36:61:2101] sender: [36:79:2057] recipient: [36:17:2064] !Reboot 72057594037927937 (actor [36:61:2101]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [36:61:2101] sender: [36:98:2057] recipient: [36:41:2088] Leader for TabletID 72057594037927937 is [36:61:2101] sender: [36:101:2057] recipient: [36:17:2064] Leader for TabletID 72057594037927937 is [36:61:2101] sender: [36:102:2057] recipient: [36:100:2126] Leader for TabletID 72057594037927937 is [36:103:2127] sender: [36:104:2057] recipient: [36:100:2126] !Reboot 72057594037927937 (actor [36:61:2101]) rebooted! !Reboot 72057594037927937 (actor [36:61:2101]) tablet resolver refreshed! new actor is[36:103:2127] Leader for TabletID 72057594037927937 is [36:103:2127] sender: [36:189:2057] recipient: [36:17:2064] Leader for TabletID 72057594037927937 is [0:0:0] sender: [37:59:2057] recipient: [37:55:2099] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [37:59:2057] recipient: [37:55:2099] Leader for TabletID 72057594037927937 is [37:61:2101] sender: [37:62:2057] recipient: [37:55:2099] Leader for TabletID 72057594037927937 is [37:61:2101] sender: [37:79:2057] recipient: [37:17:2064] !Reboot 72057594037927937 (actor [37:61:2101]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [37:61:2101] sender: [37:101:2057] recipient: [37:41:2088] Leader for TabletID 72057594037927937 is [37:61:2101] sender: [37:104:2057] recipient: [37:17:2064] Leader for TabletID 72057594037927937 is [37:61:2101] sender: [37:105:2057] recipient: [37:103:2129] Leader for TabletID 72057594037927937 is [37:106:2130] sender: [37:107:2057] recipient: [37:103:2129] !Reboot 72057594037927937 (actor [37:61:2101]) rebooted! !Reboot 72057594037927937 (actor [37:61:2101]) tablet resolver refreshed! new actor is[37:106:2130] Leader for TabletID 72057594037927937 is [37:106:2130] sender: [37:192:2057] recipient: [37:17:2064] Leader for TabletID 72057594037927937 is [0:0:0] sender: [38:59:2057] recipient: [38:56:2099] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [38:59:2057] recipient: [38:56:2099] Leader for TabletID 72057594037927937 is [38:61:2101] sender: [38:62:2057] recipient: [38:56:2099] Leader for TabletID 72057594037927937 is [38:61:2101] sender: [38:79:2057] recipient: [38:17:2064] !Reboot 72057594037927937 (actor [38:61:2101]) on event NKikimr::TEvKeyValue::TEvExecuteTransaction ! Leader for TabletID 72057594037927937 is [38:61:2101] sender: [38:101:2057] recipient: [38:41:2088] Leader for TabletID 72057594037927937 is [38:61:2101] sender: [38:104:2057] recipient: [38:17:2064] Leader for TabletID 72057594037927937 is [38:61:2101] sender: [38:105:2057] recipient: [38:103:2129] Leader for TabletID 72057594037927937 is [38:106:2130] sender: [38:107:2057] recipient: [38:103:2129] !Reboot 72057594037927937 (actor [38:61:2101]) rebooted! !Reboot 72057594037927937 (actor [38:61:2101]) tablet resolver refreshed! new actor is[38:106:2130] Leader for TabletID 72057594037927937 is [38:106:2130] sender: [38:192:2057] recipient: [38:17:2064] Leader for TabletID 72057594037927937 is [0:0:0] sender: [39:59:2057] recipient: [39:56:2099] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [39:59:2057] recipient: [39:56:2099] Leader for TabletID 72057594037927937 is [39:61:2101] sender: [39:62:2057] recipient: [39:56:2099] Leader for TabletID 72057594037927937 is [39:61:2101] sender: [39:79:2057] recipient: [39:17:2064] !Reboot 72057594037927937 (actor [39:61:2101]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [39:61:2101] sender: [39:102:2057] recipient: [39:41:2088] Leader for TabletID 72057594037927937 is [39:61:2101] sender: [39:105:2057] recipient: [39:17:2064] Leader for TabletID 72057594037927937 is [39:61:2101] sender: [39:106:2057] recipient: [39:104:2129] Leader for TabletID 72057594037927937 is [39:107:2130] sender: [39:108:2057] recipient: [39:104:2129] !Reboot 72057594037927937 (actor [39:61:2101]) rebooted! !Reboot 72057594037927937 (actor [39:61:2101]) tablet resolver refreshed! new actor is[39:107:2130] Leader for TabletID 72057594037927937 is [39:107:2130] sender: [39:193:2057] recipient: [39:17:2064] Leader for TabletID 72057594037927937 is [0:0:0] sender: [40:59:2057] recipient: [40:55:2099] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [40:59:2057] recipient: [40:55:2099] Leader for TabletID 72057594037927937 is [40:61:2101] sender: [40:62:2057] recipient: [40:55:2099] Leader for TabletID 72057594037927937 is [40:61:2101] sender: [40:79:2057] recipient: [40:17:2064] !Reboot 72057594037927937 (actor [40:61:2101]) on event NKikimr::TEvKeyValue::TEvCollect ! Leader for TabletID 72057594037927937 is [40:61:2101] sender: [40:103:2057] recipient: [40:41:2088] Leader for TabletID 72057594037927937 is [40:61:2101] sender: [40:106:2057] recipient: [40:17:2064] Leader for TabletID 72057594037927937 is [40:61:2101] sender: [40:107:2057] recipient: [40:105:2130] Leader for TabletID 72057594037927937 is [40:108:2131] sender: [40:109:2057] recipient: [40:105:2130] !Reboot 72057594037927937 (actor [40:61:2101]) rebooted! !Reboot 72057594037927937 (actor [40:61:2101]) tablet resolver refreshed! new actor is[40:108:2131] Leader for TabletID 72057594037927937 is [40:108:2131] sender: [40:128:2057] recipient: [40:17:2064] Leader for TabletID 72057594037927937 is [0:0:0] sender: [41:59:2057] recipient: [41:55:2099] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [41:59:2057] recipient: [41:55:2099] Leader for TabletID 72057594037927937 is [41:61:2101] sender: [41:62:2057] recipient: [41:55:2099] Leader for TabletID 72057594037927937 is [41:61:2101] sender: [41:79:2057] recipient: [41:17:2064] !Reboot 72057594037927937 (actor [41:61:2101]) on event NKikimr::TEvKeyValue::TEvCompleteGC ! Leader for TabletID 72057594037927937 is [41:61:2101] sender: [41:104:2057] recipient: [41:41:2088] Leader for TabletID 72057594037927937 is [41:61:2101] sender: [41:107:2057] recipient: [41:17:2064] Leader for TabletID 72057594037927937 is [41:61:2101] sender: [41:108:2057] recipient: [41:106:2131] Leader for TabletID 72057594037927937 is [41:109:2132] sender: [41:110:2057] recipient: [41:106:2131] !Reboot 72057594037927937 (actor [41:61:2101]) rebooted! !Reboot 72057594037927937 (actor [41:61:2101]) tablet resolver refreshed! new actor is[41:109:2132] Leader for TabletID 72057594037927937 is [41:109:2132] sender: [41:129:2057] recipient: [41:17:2064] Leader for TabletID 72057594037927937 is [0:0:0] sender: [42:59:2057] recipient: [42:54:2099] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [42:59:2057] recipient: [42:54:2099] Leader for TabletID 72057594037927937 is [42:61:2101] sender: [42:62:2057] recipient: [42:54:2099] Leader for TabletID 72057594037927937 is [42:61:2101] sender: [42:79:2057] recipient: [42:17:2064] !Reboot 72057594037927937 (actor [42:61:2101]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [42:61:2101] sender: [42:107:2057] recipient: [42:41:2088] Leader for TabletID 72057594037927937 is [42:61:2101] sender: [42:110:2057] recipient: [42:17:2064] Leader for TabletID 72057594037927937 is [42:61:2101] sender: [42:111:2057] recipient: [42:109:2134] Leader for TabletID 72057594037927937 is [42:112:2135] sender: [42:113:2057] recipient: [42:109:2134] !Reboot 72057594037927937 (actor [42:61:2101]) rebooted! !Reboot 72057594037927937 (actor [42:61:2101]) tablet resolver refreshed! new actor is[42:112:2135] Leader for TabletID 72057594037927937 is [42:112:2135] sender: [42:198:2057] recipient: [42:17:2064] Leader for TabletID 72057594037927937 is [0:0:0] sender: [43:59:2057] recipient: [43:56:2099] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [43:59:2057] recipient: [43:56:2099] Leader for TabletID 72057594037927937 is [43:61:2101] sender: [43:62:2057] recipient: [43:56:2099] Leader for TabletID 72057594037927937 is [43:61:2101] sender: [43:79:2057] recipient: [43:17:2064] !Reboot 72057594037927937 (actor [43:61:2101]) on event NKikimr::TEvKeyValue::TEvCleanUpDataRequest ! Leader for TabletID 72057594037927937 is [43:61:2101] sender: [43:107:2057] recipient: [43:41:2088] Leader for TabletID 72057594037927937 is [43:61:2101] sender: [43:110:2057] recipient: [43:17:2064] Leader for TabletID 72057594037927937 is [43:61:2101] sender: [43:111:2057] recipient: [43:109:2134] Leader for TabletID 72057594037927937 is [43:112:2135] sender: [43:113:2057] recipient: [43:109:2134] !Reboot 72057594037927937 (actor [43:61:2101]) rebooted! !Reboot 72057594037927937 (actor [43:61:2101]) tablet resolver refreshed! new actor is[43:112:2135] Leader for TabletID 72057594037927937 is [43:112:2135] sender: [43:198:2057] recipient: [43:17:2064] Leader for TabletID 72057594037927937 is [0:0:0] sender: [44:59:2057] recipient: [44:55:2099] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [44:59:2057] recipient: [44:55:2099] Leader for TabletID 72057594037927937 is [44:61:2101] sender: [44:62:2057] recipient: [44:55:2099] Leader for TabletID 72057594037927937 is [44:61:2101] sender: [44:79:2057] recipient: [44:17:2064] !Reboot 72057594037927937 (actor [44:61:2101]) on event NKikimr::TEvKeyValue::TEvForceTabletDataCleanup ! Leader for TabletID 72057594037927937 is [44:61:2101] sender: [44:107:2057] recipient: [44:41:2088] Leader for TabletID 72057594037927937 is [44:61:2101] sender: [44:109:2057] recipient: [44:17:2064] Leader for TabletID 72057594037927937 is [44:61:2101] sender: [44:111:2057] recipient: [44:110:2134] Leader for TabletID 72057594037927937 is [44:112:2135] sender: [44:113:2057] recipient: [44:110:2134] !Reboot 72057594037927937 (actor [44:61:2101]) rebooted! !Reboot 72057594037927937 (actor [44:61:2101]) tablet resolver refreshed! new actor is[44:112:2135] Leader for TabletID 72057594037927937 is [44:112:2135] sender: [44:198:2057] recipient: [44:17:2064] Leader for TabletID 72057594037927937 is [0:0:0] sender: [45:59:2057] recipient: [45:55:2099] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [45:59:2057] recipient: [45:55:2099] Leader for TabletID 72057594037927937 is [45:61:2101] sender: [45:62:2057] recipient: [45:55:2099] Leader for TabletID 72057594037927937 is [45:61:2101] sender: [45:79:2057] recipient: [45:17:2064] !Reboot 72057594037927937 (actor [45:61:2101]) on event NKikimr::TEvTablet::TEvFollowerGcApplied ! Leader for TabletID 72057594037927937 is [45:61:2101] sender: [45:112:2057] recipient: [45:41:2088] Leader for TabletID 72057594037927937 is [45:61:2101] sender: [45:115:2057] recipient: [45:17:2064] Leader for TabletID 72057594037927937 is [45:61:2101] sender: [45:116:2057] recipient: [45:114:2138] Leader for TabletID 72057594037927937 is [45:117:2139] sender: [45:118:2057] recipient: [45:114:2138] !Reboot 72057594037927937 (actor [45:61:2101]) rebooted! !Reboot 72057594037927937 (actor [45:61:2101]) tablet resolver refreshed! new actor is[45:117:2139] Leader for TabletID 72057594037927937 is [45:117:2139] sender: [45:203:2057] recipient: [45:17:2064] Leader for TabletID 72057594037927937 is [0:0:0] sender: [46:59:2057] recipient: [46:55:2099] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [46:59:2057] recipient: [46:55:2099] Leader for TabletID 72057594037927937 is [46:61:2101] sender: [46:62:2057] recipient: [46:55:2099] Leader for TabletID 72057594037927937 is [46:61:2101] sender: [46:79:2057] recipient: [46:17:2064] |83.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/ut/federated_query/s3/ydb-core-kqp-ut-federated_query-s3 |83.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/federated_query/s3/ydb-core-kqp-ut-federated_query-s3 |83.4%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/federated_query/s3/ydb-core-kqp-ut-federated_query-s3 |83.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kqp/ut/federated_query/large_results/ydb-core-kqp-ut-federated_query-large_results |83.4%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/federated_query/large_results/ydb-core-kqp-ut-federated_query-large_results |83.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/federated_query/large_results/ydb-core-kqp-ut-federated_query-large_results |83.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_extsubdomain_reboots/ydb-core-tx-schemeshard-ut_extsubdomain_reboots |83.4%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_extsubdomain_reboots/ydb-core-tx-schemeshard-ut_extsubdomain_reboots |83.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_extsubdomain_reboots/ydb-core-tx-schemeshard-ut_extsubdomain_reboots >> SystemView::QueryStats [GOOD] >> SystemView::QueryStatsFields |83.4%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_index_build_reboots/ydb-core-tx-schemeshard-ut_index_build_reboots |83.4%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_index_build_reboots/ydb-core-tx-schemeshard-ut_index_build_reboots |83.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_index_build_reboots/ydb-core-tx-schemeshard-ut_index_build_reboots >> TxUsage::TwoSessionOneConsumer_Query [GOOD] >> TxUsage::WriteToTopic_Demo_4_Table [GOOD] >> TxUsage::WriteToTopic_Demo_10_Table >> TxUsage::WriteToTopic_Demo_4_Query >> LdapAuthProviderTest_StartTls::LdapFetchGroupsWithCustomGroupAttributeGood [GOOD] >> LdapAuthProviderTest_StartTls::LdapFetchGroupsUseInvalidSearchFilterBad |83.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/public/sdk/cpp/src/client/topic/ut/ydb-public-sdk-cpp-src-client-topic-ut |83.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/public/sdk/cpp/src/client/topic/ut/ydb-public-sdk-cpp-src-client-topic-ut |83.5%| [LD] {RESULT} $(B)/ydb/public/sdk/cpp/src/client/topic/ut/ydb-public-sdk-cpp-src-client-topic-ut >> TKeyValueTest::TestEmptyWriteReadDeleteWithRestartsThenResponseOkNewApi [GOOD] >> TKeyValueTest::TestGetStatusWorks |83.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_restore/ydb-core-tx-schemeshard-ut_restore |83.5%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_restore/ydb-core-tx-schemeshard-ut_restore |83.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_restore/ydb-core-tx-schemeshard-ut_restore >> PgCatalog::PgTables [GOOD] >> SystemView::PDisksFields [GOOD] >> SystemView::GroupsFields >> TxUsage::WriteToTopic_Demo_21_RestartNo_Table [GOOD] >> LdapAuthProviderTest_StartTls::LdapFetchGroupsUseInvalidSearchFilterBad [GOOD] >> LdapAuthProviderTest_nonSecure::LdapRefreshGroupsInfoDisableNestedGroupsGood [GOOD] >> TxUsage::WriteToTopic_Demo_21_RestartNo_Query >> TxUsage::Sinks_Oltp_WriteToTopic_2_Query [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/pg/unittest >> PgCatalog::PgTables [GOOD] Test command err: Trying to start YDB, gRPC: 12647, MsgBus: 14213 2025-06-25T14:36:54.475845Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519895950802802948:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:36:54.475899Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000aa3/r3tmp/tmp5ArRN5/pdisk_1.dat 2025-06-25T14:36:54.904027Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:36:54.904106Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:36:54.906570Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:36:54.961369Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 12647, node 1 2025-06-25T14:36:55.101001Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:36:55.101025Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:36:55.101044Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:36:55.101218Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14213 2025-06-25T14:36:55.496738Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:14213 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:36:55.887162Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:36:55.917039Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 1042 2025-06-25T14:36:57.782416Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664)
: Error: Bulk upsert to table '/Root/Coerce_pgbpchar_17472595041006102391_17823623939509273229' Typemod mismatch, got type pgbpchar for column value, type mod , but expected 2 --!syntax_pg INSERT INTO Coerce_pgbpchar_17472595041006102391_17823623939509273229 (key, value) VALUES ( '0'::int2, 'abcd'::bpchar ) 2025-06-25T14:36:58.006436Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519895967982672821:2302], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:36:58.006571Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:36:58.006878Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519895967982672833:2305], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:36:58.020350Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:36:58.035080Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519895967982672835:2306], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-25T14:36:58.118475Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519895967982672886:2394] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:36:58.495183Z node 1 :TX_DATASHARD CRIT: execute_kqp_data_tx_unit.cpp:449: Exception while executing KQP transaction [0:281474976715663] at 72075186224037888: ydb/core/tx/datashard/datashard_kqp_upsert_rows.cpp:87: Apply(): requirement !error failed. Incorrect value: Error while coercing value, reason: yql/essentials/minikql/mkql_terminator.cpp:47: ERROR: value too long for type character(2) 2025-06-25T14:36:58.497376Z node 1 :TX_DATASHARD ERROR: finish_propose_unit.cpp:174: Errors while proposing transaction txid 281474976715663 at tablet 72075186224037888 status: EXEC_ERROR errors: UNKNOWN (Tx was terminated: ydb/core/tx/datashard/datashard_kqp_upsert_rows.cpp:87: Apply(): requirement !error failed. Incorrect value: Error while coercing value, reason: yql/essentials/minikql/mkql_terminator.cpp:47: ERROR: value too long for type character(2) ) | 2025-06-25T14:36:58.497600Z node 1 :KQP_EXECUTER ERROR: kqp_data_executer.cpp:864: ActorId: [1:7519895967982672937:2300] TxId: 281474976715663. Ctx: { TraceId: 01jykragrj6z9e7f047bv3188s, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=N2ZkN2I1NjktNjE4ZTMzZGUtMWU0NjJiNmUtZmUwMzkwMzQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. EXEC_ERROR: [UNKNOWN] Tx was terminated: ydb/core/tx/datashard/datashard_kqp_upsert_rows.cpp:87: Apply(): requirement !error failed. Incorrect value: Error while coercing value, reason: yql/essentials/minikql/mkql_terminator.cpp:47: ERROR: value too long for type character(2) ; 2025-06-25T14:36:58.508290Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=1&id=N2ZkN2I1NjktNjE4ZTMzZGUtMWU0NjJiNmUtZmUwMzkwMzQ=, ActorId: [1:7519895963687705522:2300], ActorState: ExecuteState, TraceId: 01jykragrj6z9e7f047bv3188s, Create QueryResponse for error on request, msg:
: Error: Error executing transaction (ExecError): Execution failed
: Error: [UNKNOWN] Tx was terminated: ydb/core/tx/datashard/datashard_kqp_upsert_rows.cpp:87: Apply(): requirement !error failed. Incorrect value: Error while coercing value, reason: yql/essentials/minikql/mkql_terminator.cpp:47: ERROR: value too long for type character(2) 2025-06-25T14:36:58.560741Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664)
: Error: Bulk upsert to table '/Root/Coerce__pgbpchar_17472595041006102391_5352544928909966465' Typemod mismatch, got type _pgbpchar for column value, type mod , but expected 2 --!syntax_pg INSERT INTO Coerce__pgbpchar_17472595041006102391_5352544928909966465 (key, value) VALUES ( '0'::int2, '{abcd,abcd}'::_bpchar ) 2025-06-25T14:36:58.930140Z node 1 :TX_DATASHARD CRIT: execute_kqp_data_tx_unit.cpp:449: Exception while executing KQP transaction [0:281474976715668] at 72075186224037889: ydb/core/tx/datashard/datashard_kqp_upsert_rows.cpp:87: Apply(): requirement !error failed. Incorrect value: Error while coercing value, reason: yql/essentials/minikql/mkql_terminator.cpp:47: ERROR: value too long for type character(2) 2025-06-25T14:36:58.932111Z node 1 :TX_DATASHARD ERROR: finish_propose_unit.cpp:174: Errors while proposing transaction txid 281474976715668 at tablet 72075186224037889 status: EXEC_ERROR errors: UNKNOWN (Tx was terminated: ydb/core/tx/datashard/datashard_kqp_upsert_rows.cpp:87: Apply(): requirement !error failed. Incorrect value: Error while coercing value, reason: yql/essentials/minikql/mkql_terminator.cpp:47: ERROR: value too long for type character(2) ) | 2025-06-25T14:36:58.932329Z node 1 :KQP_EXECUTER ERROR: kqp_data_executer.cpp:864: ActorId: [1:7519895967982673072:2334] TxId: 281474976715668. Ctx: { TraceId: 01jykrahdsbgrmfk2b7zpy4x56, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OTUxZTNjNjktZjRjMGZmOGUtZmNmNDEzMDctMjNlZTk1YmE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. EXEC_ERROR: [UNKNOWN] Tx was terminated: ydb/core/tx/datashard/datashard_kqp_upsert_rows.cpp:87: Apply(): requirement !error failed. Incorrect value: Error while coercing value, reason: yql/essentials/minikql/mkql_terminator.cpp:47: ERROR: value too long for type character(2) ; 2025-06-25T14:36:58.932553Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=1&id=OTUxZTNjNjktZjRjMGZmOGUtZmNmNDEzMDctMjNlZTk1YmE=, ActorId: [1:7519895967982673029:2334], ActorState: ExecuteState, TraceId: 01jykrahdsbgrmfk2b7zpy4x56, Create QueryResponse for error on request, msg:
: Error: Error executing transaction (ExecError): Execution failed
: Error: [UNKNOWN] Tx was terminated: ydb/core/tx/datashard/datashard_kqp_upsert_rows.cpp:87: Apply(): requirement !error failed. Incorrect value: Error while coercing value, reason: yql/essentials/minikql/mkql_terminator.cpp:47: ERROR: value too long for type character(2) 1042 2025-06-25T14:36:58.978297Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715670:0, at schemeshard: 72057594046644480, firs ... T_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:41:00.235924Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:41:03.024493Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[13:7519897000743254446:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:41:03.024614Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:41:06.053964Z node 13 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [13:7519897035102993438:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:06.054146Z node 13 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:06.058672Z node 13 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [13:7519897035102993459:2300], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:06.066759Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:41:06.116651Z node 13 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [13:7519897035102993461:2301], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:41:06.218051Z node 13 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [13:7519897035102993516:2349] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 62067, MsgBus: 31396 2025-06-25T14:41:08.422040Z node 14 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[14:7519897042946818625:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:41:08.422117Z node 14 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000aa3/r3tmp/tmpjBvGzo/pdisk_1.dat 2025-06-25T14:41:08.690538Z node 14 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:41:08.710613Z node 14 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(14, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:41:08.710772Z node 14 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(14, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:41:08.716156Z node 14 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(14, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 62067, node 14 2025-06-25T14:41:08.789205Z node 14 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:41:08.789236Z node 14 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:41:08.789256Z node 14 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:41:08.789465Z node 14 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:31396 2025-06-25T14:41:09.447363Z node 14 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:31396 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:41:09.676102Z node 14 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:41:13.424550Z node 14 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[14:7519897042946818625:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:41:13.424653Z node 14 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:41:14.396333Z node 14 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [14:7519897068716622991:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:14.396419Z node 14 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [14:7519897068716623010:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:14.396544Z node 14 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:14.402074Z node 14 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:41:14.420785Z node 14 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [14:7519897068716623027:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:41:14.516673Z node 14 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [14:7519897068716623082:2343] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:41:14.714771Z node 14 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:14.823560Z node 14 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:19.674308Z node 14 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 14, TabletId: 72075186224037888 not found 2025-06-25T14:41:19.706813Z node 14 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:20.123072Z node 14 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:678: SelfId: [14:7519897094486427415:2408], TxId: 281474976715672, task: 1. Ctx: { SessionId : ydb://session/3?node_id=14&id=ZTA0Y2JlMzItYWE3YWNiOGQtOGEzMjk4YWYtYWYxODY3Nzc=. TraceId : 01jykrjgd828aww88gma46cn07. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. InternalError: PRECONDITION_FAILED DEFAULT_ERROR: {
: Error: Terminate was called, reason(57): ERROR: invalid input syntax for type boolean: "pg_proc" }. 2025-06-25T14:41:20.123758Z node 14 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [14:7519897094486427416:2409], TxId: 281474976715672, task: 2. Ctx: { CustomerSuppliedId : . TraceId : 01jykrjgd828aww88gma46cn07. SessionId : ydb://session/3?node_id=14&id=ZTA0Y2JlMzItYWE3YWNiOGQtOGEzMjk4YWYtYWYxODY3Nzc=. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Handle abort execution event from: [14:7519897094486427412:2405], status: PRECONDITION_FAILED, reason: {
: Error: Terminate execution } 2025-06-25T14:41:20.124580Z node 14 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=14&id=ZTA0Y2JlMzItYWE3YWNiOGQtOGEzMjk4YWYtYWYxODY3Nzc=, ActorId: [14:7519897090191460107:2405], ActorState: ExecuteState, TraceId: 01jykrjgd828aww88gma46cn07, Create QueryResponse for error on request, msg: >> TxUsage::Sinks_Oltp_WriteToTopic_3_Table ------- [TM] {asan, default-linux-x86_64, release} ydb/core/security/ldap_auth_provider/ut/unittest >> LdapAuthProviderTest_StartTls::LdapFetchGroupsUseInvalidSearchFilterBad [GOOD] Test command err: 2025-06-25T14:40:45.083915Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519896944929617585:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:40:45.084011Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000ee6/r3tmp/tmpLtIAbL/pdisk_1.dat 2025-06-25T14:40:45.405982Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519896944929617561:2080] 1750862445080647 != 1750862445080650 2025-06-25T14:40:45.419893Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 26114, node 1 2025-06-25T14:40:45.466619Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:40:45.466707Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:40:45.468409Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:40:45.595400Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:40:45.595418Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:40:45.595429Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:40:45.595541Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:40:45.864644Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-25T14:40:45.868935Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-25T14:40:45.868969Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-25T14:40:45.870210Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldaps, uris: ldaps://localhost:14097, port: 14097 2025-06-25T14:40:45.870279Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-25T14:40:45.932834Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-25T14:40:45.986129Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:357: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (member:1.2.840.113556.1.4.1941:=uid=ldapuser,dc=search,dc=yandex,dc=net), attributes: 1.1 2025-06-25T14:40:45.986710Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:382: Try to get nested groups - tree traversal 2025-06-25T14:40:45.986759Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managerOfProject1,cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=project1,cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-25T14:40:46.037152Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-25T14:40:46.084670Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-25T14:40:46.095267Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****GMAg (92CA952E) () has now valid token of ldapuser@ldap 2025-06-25T14:40:46.095727Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:40:50.087537Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519896944929617585:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:40:50.087642Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:40:51.099794Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****GMAg (92CA952E) 2025-06-25T14:40:51.100154Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldaps, uris: ldaps://localhost:14097, port: 14097 2025-06-25T14:40:51.100219Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-25T14:40:51.152773Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-25T14:40:51.156705Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:340: LDAP user ldapuser does not exist. LDAP search for filter uid=ldapuser on server ldaps://localhost:14097 return no entries 2025-06-25T14:40:51.157256Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1809: Ticket eyJh****GMAg (92CA952E) () has now permanent error message 'Could not login via LDAP (LDAP user ldapuser does not exist. LDAP search for filter uid=ldapuser on server ldaps://localhost:14097 return no entries)' 2025-06-25T14:40:55.105086Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****GMAg (92CA952E) 2025-06-25T14:40:57.235663Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519896994237474683:2206];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:40:57.236261Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000ee6/r3tmp/tmpSB2F4r/pdisk_1.dat 2025-06-25T14:40:57.497417Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:40:57.497507Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:40:57.512461Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519896994237474513:2080] 1750862457056938 != 1750862457056941 2025-06-25T14:40:57.534906Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:40:57.536780Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 19102, node 2 2025-06-25T14:40:57.704020Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:40:57.704038Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:40:57.704043Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:40:57.704137Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:40:58.105191Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:40:58.402065Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-25T14:40:58.406906Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-25T14:40:58.406936Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-25T14:40:58.407610Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldaps, uris: ldaps://localhost:1749, port: 1749 2025-06-25T14:40:58.407674Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-25T14:40:58.476818Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-25T14:40:58.480750Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:323: Could not perform search for filter uid=ldapuser on server ldaps://localhost:1749. Server is busy 2025-06-25T14:40:58.481202Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket eyJh****WQfg (518D8B6F) () has now retryable error message 'Could not login via LDAP (Could not perform search for filter uid=ldapuser on server ldaps://localhost:1749. Server is busy)' 2025-06-25T14:40:58.481457Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-25T14:40:58.481477Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-25T14:40:58.482341Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldaps, uris: ldaps://localhost:1749, port: 1749 2025-06-25T14:40:58.482411Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-25T14:40:58.544848Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-25T14:40:58.545437Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:323: Could not perform search for filter uid=ldapuser on server ldaps://localhost:1749. Server is busy 2025-06-25T14:40:58.545764Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket eyJh****WQfg (518D8B6F) () has now retryable error message 'Could not login via LDAP (Could not perform search for filter uid=ldapuser on server ldaps://localhost:1749. Server is busy)' 2025-06-25T14:41:00.096197Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****WQfg (518D8B6F) 2025-06-25T14:41:00.096522Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-25T14:41:00.096541Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-25T14:41:00.105408Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldaps, uris: ldaps://localhost:1749, port: 1749 2025-06-25T14:41:00.105503Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-25T14:41:00.161106Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-25T14:41:00.162096Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:323: Could not perform search for filter uid=ldapuser on server ldaps://localhost:1749. Server is busy 2025-06-25T14:41:00.162508Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket eyJh****WQfg (518D ... impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-25T14:41:09.516051Z node 3 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-25T14:41:09.516679Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:10225, port: 10225 2025-06-25T14:41:09.516741Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:179: start TLS 2025-06-25T14:41:09.531053Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-25T14:41:09.576925Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-25T14:41:09.624606Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:357: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (member:1.2.840.113556.1.4.1941:=uid=ldapuser,dc=search,dc=yandex,dc=net), attributes: 1.1 2025-06-25T14:41:09.669243Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****skgQ (E17D7247) () has now valid token of ldapuser@ldap 2025-06-25T14:41:12.535803Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519897060470940397:2170];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:41:12.536353Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000ee6/r3tmp/tmpxXqkE8/pdisk_1.dat 2025-06-25T14:41:12.670200Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:41:12.671177Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7519897060470940253:2080] 1750862472520300 != 1750862472520303 TServer::EnableGrpc on GrpcPort 32601, node 4 2025-06-25T14:41:12.695317Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:41:12.695605Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:41:12.703730Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:41:12.739730Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:41:12.739753Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:41:12.739764Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:41:12.739913Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:41:12.892724Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-25T14:41:12.895636Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-25T14:41:12.895663Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-25T14:41:12.896607Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:21643, port: 21643 2025-06-25T14:41:12.896707Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:179: start TLS 2025-06-25T14:41:12.908228Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-25T14:41:12.956737Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-25T14:41:13.001107Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****ydaQ (27DB4D7C) () has now valid token of ldapuser@ldap 2025-06-25T14:41:15.686157Z node 5 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[5:7519897071461525028:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:41:15.686244Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000ee6/r3tmp/tmpyKSrlB/pdisk_1.dat 2025-06-25T14:41:15.780229Z node 5 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:41:15.781330Z node 5 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [5:7519897071461525009:2080] 1750862475685647 != 1750862475685650 TServer::EnableGrpc on GrpcPort 6726, node 5 2025-06-25T14:41:15.819717Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:41:15.819780Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:41:15.821024Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:41:15.831885Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:41:15.832090Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:41:15.832099Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:41:15.832247Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:41:15.926258Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-25T14:41:15.929634Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-25T14:41:15.929668Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-25T14:41:15.930379Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:21475, port: 21475 2025-06-25T14:41:15.930464Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:179: start TLS 2025-06-25T14:41:15.945314Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-25T14:41:15.988757Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: groupDN 2025-06-25T14:41:16.037480Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:357: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (member:1.2.840.113556.1.4.1941:=uid=ldapuser,dc=search,dc=yandex,dc=net), attributes: 1.1 2025-06-25T14:41:16.039989Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:382: Try to get nested groups - tree traversal 2025-06-25T14:41:16.040049Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managerOfProject1,cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=project1,cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: groupDN 2025-06-25T14:41:16.088695Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: groupDN 2025-06-25T14:41:16.132677Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: groupDN 2025-06-25T14:41:16.133709Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****B7qA (FBAC6EF6) () has now valid token of ldapuser@ldap 2025-06-25T14:41:18.889814Z node 6 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[6:7519897085367482515:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:41:18.889917Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000ee6/r3tmp/tmpEnHIe2/pdisk_1.dat 2025-06-25T14:41:19.017463Z node 6 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:41:19.018403Z node 6 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [6:7519897085367482494:2080] 1750862478889086 != 1750862478889089 2025-06-25T14:41:19.038016Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:41:19.038107Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:41:19.039853Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 18271, node 6 2025-06-25T14:41:19.089115Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:41:19.089144Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:41:19.089154Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:41:19.089326Z node 6 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:41:19.198380Z node 6 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-25T14:41:19.201728Z node 6 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-25T14:41:19.201773Z node 6 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-25T14:41:19.202538Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:28471, port: 28471 2025-06-25T14:41:19.202626Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:179: start TLS 2025-06-25T14:41:19.221125Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-25T14:41:19.268859Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: &(uid=ldapuser)(), attributes: memberOf 2025-06-25T14:41:19.268952Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:323: Could not perform search for filter &(uid=ldapuser)() on server ldap://localhost:28471. Bad search filter 2025-06-25T14:41:19.269423Z node 6 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1809: Ticket eyJh****fApg (D82F9F38) () has now permanent error message 'Could not login via LDAP (Could not perform search for filter &(uid=ldapuser)() on server ldap://localhost:28471. Bad search filter)' ------- [TM] {asan, default-linux-x86_64, release} ydb/core/security/ldap_auth_provider/ut/unittest >> LdapAuthProviderTest_nonSecure::LdapRefreshGroupsInfoDisableNestedGroupsGood [GOOD] Test command err: 2025-06-25T14:40:45.084037Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519896944461366049:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:40:45.084110Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000f62/r3tmp/tmpJhxyC6/pdisk_1.dat 2025-06-25T14:40:45.432975Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 13921, node 1 2025-06-25T14:40:45.450711Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:40:45.450826Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:40:45.454193Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:40:45.595420Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:40:45.595449Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:40:45.595465Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:40:45.595590Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:40:45.832454Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-25T14:40:45.838525Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-25T14:40:45.838567Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-25T14:40:45.839406Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:9359, port: 9359 2025-06-25T14:40:45.842237Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-25T14:40:45.849331Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: groupDN 2025-06-25T14:40:45.898058Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****Yz9w (1FC6C7C8) () has now valid token of ldapuser@ldap 2025-06-25T14:40:47.975271Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519896950306446281:2236];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000f62/r3tmp/tmpnqjVMt/pdisk_1.dat 2025-06-25T14:40:48.010525Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:40:48.151782Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:40:48.155028Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519896950306446070:2080] 1750862447942323 != 1750862447942326 2025-06-25T14:40:48.164441Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:40:48.164537Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:40:48.165852Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 14646, node 2 2025-06-25T14:40:48.237933Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:40:48.237983Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:40:48.237993Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:40:48.238093Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:40:48.388422Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-25T14:40:48.389817Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-25T14:40:48.389840Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-25T14:40:48.390446Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:18425, port: 18425 2025-06-25T14:40:48.390535Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=invalidRobouser,dc=search,dc=yandex,dc=net 2025-06-25T14:40:48.395234Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:201: Could not perform initial LDAP bind for dn cn=invalidRobouser,dc=search,dc=yandex,dc=net on server ldap://localhost:18425. Invalid credentials 2025-06-25T14:40:48.395430Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1809: Ticket eyJh****ySpA (676B4EF0) () has now permanent error message 'Could not login via LDAP (Could not perform initial LDAP bind for dn cn=invalidRobouser,dc=search,dc=yandex,dc=net on server ldap://localhost:18425. Invalid credentials)' 2025-06-25T14:40:51.583119Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519896968470893851:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:40:51.583183Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000f62/r3tmp/tmpfKuD4h/pdisk_1.dat 2025-06-25T14:40:51.689874Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:40:51.692465Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519896968470893825:2080] 1750862451580996 != 1750862451580999 TServer::EnableGrpc on GrpcPort 29251, node 3 2025-06-25T14:40:51.717588Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:40:51.717671Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:40:51.719671Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:40:51.840859Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:40:51.840880Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:40:51.840887Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:40:51.841010Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:40:51.990113Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-25T14:40:51.990698Z node 3 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-25T14:40:51.990721Z node 3 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-25T14:40:51.991352Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:14972, port: 14972 2025-06-25T14:40:51.991407Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-25T14:40:51.994344Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:201: Could not perform initial LDAP bind for dn cn=robouser,dc=search,dc=yandex,dc=net on server ldap://localhost:14972. Invalid credentials 2025-06-25T14:40:51.994663Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1809: Ticket eyJh****wFSA (89AAB450) () has now permanent error message 'Could not login via LDAP (Could not perform initial LDAP bind for dn cn=robouser,dc=search,dc=yandex,dc=net on server ldap://localhost:14972. Invalid credentials)' test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000f62/r3tmp/tmppifIGJ/pdisk_1.dat 2025-06-25T14:40:55.079562Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:40:55.089379Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:40:55.090722Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7519896980952511997:2080] 1750862454878303 != 1750862454878306 2025-06-25T14:40:55.102892Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:40:55.102976Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 10508, node 4 2025-06-25T14:40:55.104746Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:40:55.165830Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:40:55.165848Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:40:55.165855Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:40:55.165951Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:40:55.265512Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-25T14:40:55.269120Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-25T14:40:55.269151Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-25T14:40:55.269831Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:63224, port: 63224 2025-06-25T14:40:55.269932Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-25T14:40:55.272973Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-25T14:40:55.273301Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:340: LDAP user ldapuser does not exist. LDAP search for filter uid=ldapuser on server ldap://localhost:63224 return no entries 2025-06-25T14:40:55.273509Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1809: Ticket eyJh****CYDg (3ABCE7AA) () has now permanent error message 'Could no ... 00.186413Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:41:00.186420Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:41:00.186551Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:41:00.280396Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-25T14:41:00.285040Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-25T14:41:00.285077Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-25T14:41:00.285786Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:3282, port: 3282 2025-06-25T14:41:00.285855Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-25T14:41:00.305012Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-25T14:41:00.356843Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:357: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (member:1.2.840.113556.1.4.1941:=uid=ldapuser,dc=search,dc=yandex,dc=net), attributes: 1.1 2025-06-25T14:41:00.357524Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:382: Try to get nested groups - tree traversal 2025-06-25T14:41:00.357589Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managerOfProject1,cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=project1,cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-25T14:41:00.400652Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-25T14:41:00.452590Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-25T14:41:00.454397Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****0ExA (AC3C0421) () has now valid token of ldapuser@ldap 2025-06-25T14:41:00.808196Z node 5 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:41:04.788627Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[5:7519897004939269867:2153];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:41:04.788682Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:41:05.811713Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****0ExA (AC3C0421) 2025-06-25T14:41:05.826934Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:3282, port: 3282 2025-06-25T14:41:05.827024Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-25T14:41:05.838264Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-25T14:41:05.892552Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:357: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (member:1.2.840.113556.1.4.1941:=uid=ldapuser,dc=search,dc=yandex,dc=net), attributes: 1.1 2025-06-25T14:41:05.893062Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:382: Try to get nested groups - tree traversal 2025-06-25T14:41:05.893093Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=project1,cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-25T14:41:05.937859Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-25T14:41:05.992584Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-25T14:41:05.994432Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****0ExA (AC3C0421) () has now valid token of ldapuser@ldap 2025-06-25T14:41:08.820052Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****0ExA (AC3C0421) 2025-06-25T14:41:08.820153Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:3282, port: 3282 2025-06-25T14:41:08.820230Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-25T14:41:08.832053Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-25T14:41:08.876605Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:357: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (member:1.2.840.113556.1.4.1941:=uid=ldapuser,dc=search,dc=yandex,dc=net), attributes: 1.1 2025-06-25T14:41:08.877202Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:382: Try to get nested groups - tree traversal 2025-06-25T14:41:08.877242Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=project1,cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-25T14:41:08.921316Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-25T14:41:08.968646Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-25T14:41:08.973271Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****0ExA (AC3C0421) () has now valid token of ldapuser@ldap 2025-06-25T14:41:11.193640Z node 6 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[6:7519897053838302068:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:41:11.193964Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000f62/r3tmp/tmpSGRQli/pdisk_1.dat 2025-06-25T14:41:11.373461Z node 6 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:41:11.389000Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:41:11.389114Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:41:11.392329Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 3928, node 6 2025-06-25T14:41:11.440138Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:41:11.440159Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:41:11.440166Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:41:11.440339Z node 6 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:41:11.592441Z node 6 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-25T14:41:11.595312Z node 6 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-25T14:41:11.595350Z node 6 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-25T14:41:11.596020Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:28901, port: 28901 2025-06-25T14:41:11.596096Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-25T14:41:11.604444Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-25T14:41:11.653124Z node 6 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****BcUQ (ECA1305F) () has now valid token of ldapuser@ldap 2025-06-25T14:41:12.204846Z node 6 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:41:15.202108Z node 6 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****BcUQ (ECA1305F) 2025-06-25T14:41:15.202215Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:28901, port: 28901 2025-06-25T14:41:15.202306Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-25T14:41:15.214343Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-25T14:41:15.260821Z node 6 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****BcUQ (ECA1305F) () has now valid token of ldapuser@ldap 2025-06-25T14:41:16.195965Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[6:7519897053838302068:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:41:16.196032Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:41:18.203465Z node 6 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****BcUQ (ECA1305F) 2025-06-25T14:41:18.203546Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:28901, port: 28901 2025-06-25T14:41:18.203617Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-25T14:41:18.216134Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-25T14:41:18.264944Z node 6 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****BcUQ (ECA1305F) () has now valid token of ldapuser@ldap >> SystemView::AuthOwners [GOOD] >> SystemView::AuthOwners_Access >> TxUsage::WriteToTopic_Invalid_Session_Query [GOOD] >> TxUsage::WriteToTopic_Two_WriteSession_Table >> TxUsage::Sinks_Oltp_WriteToTopic_1_Query [GOOD] >> SystemView::QueryStatsFields [GOOD] >> SystemView::PartitionStatsTtlFields |83.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/apps/pgwire/pgwire |83.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/apps/pgwire/pgwire |83.5%| [LD] {RESULT} $(B)/ydb/apps/pgwire/pgwire >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_1_Table >> SystemView::TopPartitionsByCpuFollowers [GOOD] >> SystemView::SystemViewFailOps >> LdapAuthProviderTest_StartTls::LdapRefreshRemoveUserBad [GOOD] >> LdapAuthProviderTest_StartTls::LdapRefreshGroupsInfoWithError >> SystemView::AuthUsers_LockUnlock [GOOD] >> SystemView::AuthUsers_Access >> BasicStatistics::ServerlessGlobalIndex [GOOD] >> TxUsage::WriteToTopic_Demo_1_Table [GOOD] >> BasicUsage::ReadWithoutConsumerWithRestarts [GOOD] >> BasicUsage::ReadWithRestarts ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest >> BasicStatistics::ServerlessGlobalIndex [GOOD] Test command err: 2025-06-25T14:38:39.615623Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:419:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:38:39.615944Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:38:39.616047Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001ce4/r3tmp/tmpgyoCH8/pdisk_1.dat 2025-06-25T14:38:39.987985Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 29997, node 1 2025-06-25T14:38:40.221367Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:38:40.221431Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:38:40.221473Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:38:40.222084Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:38:40.224424Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:38:40.318449Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:40.318594Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:40.333905Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:61321 2025-06-25T14:38:40.886056Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-25T14:38:43.988876Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-25T14:38:44.026343Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:44.026469Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:44.066820Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T14:38:44.069530Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:38:44.281684Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:38:44.317458Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:44.317987Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:44.318622Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:44.318799Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:44.319027Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:44.319131Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:44.319201Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:44.319297Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:44.319362Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:44.504162Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:44.504276Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:44.517437Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:38:44.657134Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:38:44.703736Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-25T14:38:44.703836Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-25T14:38:44.739410Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-25T14:38:44.739612Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-25T14:38:44.739815Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-25T14:38:44.739870Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-25T14:38:44.739934Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-25T14:38:44.739988Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-25T14:38:44.740051Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-25T14:38:44.740104Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-25T14:38:44.740667Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-25T14:38:44.763894Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7949: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-25T14:38:44.764028Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7979: ConnectToSA(), pipe client id: [2:1794:2562], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-25T14:38:44.771491Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1808:2573] 2025-06-25T14:38:44.776065Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1830:2584] 2025-06-25T14:38:44.776817Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1830:2584], schemeshard id = 72075186224037897 2025-06-25T14:38:44.790410Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Shared 2025-06-25T14:38:44.809902Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-25T14:38:44.809957Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-25T14:38:44.810013Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Shared/.metadata/_statistics 2025-06-25T14:38:44.821568Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:44.830203Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-25T14:38:44.830368Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-25T14:38:45.023233Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-25T14:38:45.178754Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-25T14:38:45.236706Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-25T14:38:45.767650Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:38:45.802915Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-25T14:38:46.411913Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:38:46.564479Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7894: Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult, at schemeshard: 72075186224037899 2025-06-25T14:38:46.564548Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7910: Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult, StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037899 2025-06-25T14:38:46.564634Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7979: ConnectToSA(), pipe client id: [2:2510:2907], at schemeshard: 72075186224037899, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037899 2025-06-25T14:38:46.566138Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:2512:2909] 2025-06-25T14:38:46.566420Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:2512:2909], schemeshard id = 72075186224037899 2025-06-25T14:38:47.674463Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2634:3200], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:47.674654Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06 ... :41:19.344222Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7785:5543], DatabaseId: /Root/Shared, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:19.344418Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7795:5548], DatabaseId: /Root/Shared, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:19.344675Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root/Shared, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:19.360030Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976720658:2, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:41:19.455305Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7799:5551], DatabaseId: /Root/Shared, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976720658 completed, doublechecking } 2025-06-25T14:41:19.546612Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 119 ], ReplyToActorId[ [2:7894:5599]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T14:41:19.546871Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 119 ] 2025-06-25T14:41:19.546910Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 119, ReplyToActorId = [2:7894:5599], StatRequests.size() = 1 2025-06-25T14:41:19.626176Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7899:5601] txid# 281474976720659, issues: { message: "Check failed: path: \'/Root/Shared/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72075186224037897, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:41:19.673890Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 120 ], ReplyToActorId[ [2:7928:5616]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T14:41:19.674154Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 120 ] 2025-06-25T14:41:19.674424Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:171: [72075186224037894] EvRequestStats, node id = 2, schemeshard count = 1, urgent = 0 2025-06-25T14:41:19.674477Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:314: [72075186224037894] SendStatisticsToNode(), node id = 2, schemeshard count = 1 2025-06-25T14:41:19.674620Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-25T14:41:19.674689Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 120, ReplyToActorId = [2:7928:5616], StatRequests.size() = 1 2025-06-25T14:41:19.801011Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=NmIwMGE4MTktZjdmNTA0ZDktN2FjN2E3YTEtOWJlN2YwZmU=, TxId: 2025-06-25T14:41:19.801093Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=NmIwMGE4MTktZjdmNTA0ZDktN2FjN2E3YTEtOWJlN2YwZmU=, TxId: 2025-06-25T14:41:19.801759Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-25T14:41:19.816054Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 3] 2025-06-25T14:41:19.816121Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-25T14:41:19.870386Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:217: [72075186224037894] EvFastPropagateCheck 2025-06-25T14:41:19.870458Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:357: [72075186224037894] PropagateFastStatistics(), node count = 0, schemeshard count = 0 2025-06-25T14:41:19.935578Z node 2 :STATISTICS DEBUG: service_impl.cpp:1189: EvRequestTimeout, pipe client id = [2:3142:3132], schemeshard count = 1 2025-06-25T14:41:20.237542Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:8076: SendBaseStatsToSA(), path count: 2, at schemeshard: 72075186224037899 2025-06-25T14:41:20.237616Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7919: Schedule next SendBaseStatsToSA in 221.000000s, at schemeshard: 72075186224037899 2025-06-25T14:41:20.237855Z node 2 :STATISTICS DEBUG: tx_schemeshard_stats.cpp:21: [72075186224037894] TTxSchemeShardStats::Execute: schemeshard id# 72075186224037899, stats size# 50 2025-06-25T14:41:20.251794Z node 2 :STATISTICS DEBUG: tx_schemeshard_stats.cpp:132: [72075186224037894] TTxSchemeShardStats::Complete 2025-06-25T14:41:21.147538Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 121 ], ReplyToActorId[ [2:7991:5655]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T14:41:21.147810Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 121 ] 2025-06-25T14:41:21.147854Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 121, ReplyToActorId = [2:7991:5655], StatRequests.size() = 1 2025-06-25T14:41:22.402214Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:285: [72075186224037894] EvPropagateTimeout 2025-06-25T14:41:22.413288Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-25T14:41:22.413348Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-25T14:41:22.413392Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037899, LocalPathId: 4] is data table. 2025-06-25T14:41:22.413437Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:723: [72075186224037894] ScheduleNextTraversal. Skip traversal for datashard table [OwnerId: 72075186224037899, LocalPathId: 4] 2025-06-25T14:41:22.413831Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Shared 2025-06-25T14:41:22.416331Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DELETE FROM `.metadata/_statistics` WHERE owner_id = $owner_id AND local_path_id = $local_path_id; 2025-06-25T14:41:22.427942Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=NzQ2OTY0ZGQtOTUzNzJiMjMtMzg0ZjI1YjgtZDdiOTk4NTg=, TxId: 2025-06-25T14:41:22.428004Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=NzQ2OTY0ZGQtOTUzNzJiMjMtMzg0ZjI1YjgtZDdiOTk4NTg=, TxId: 2025-06-25T14:41:22.428418Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-25T14:41:22.442643Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037899, LocalPathId: 4] 2025-06-25T14:41:22.442693Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-25T14:41:22.499745Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 122 ], ReplyToActorId[ [2:8057:5695]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T14:41:22.500010Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 122 ] 2025-06-25T14:41:22.500050Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 122, ReplyToActorId = [2:8057:5695], StatRequests.size() = 1 2025-06-25T14:41:23.902289Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 123 ], ReplyToActorId[ [2:8103:5720]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T14:41:23.902617Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 123 ] 2025-06-25T14:41:23.902675Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 123, ReplyToActorId = [2:8103:5720], StatRequests.size() = 1 2025-06-25T14:41:25.153109Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 2, schemeshard count = 2 2025-06-25T14:41:25.153597Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-06-25T14:41:25.153926Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-25T14:41:25.169145Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-25T14:41:25.169216Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-25T14:41:25.169262Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037899, LocalPathId: 2] is data table. 2025-06-25T14:41:25.169298Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:723: [72075186224037894] ScheduleNextTraversal. Skip traversal for datashard table [OwnerId: 72075186224037899, LocalPathId: 2] 2025-06-25T14:41:25.169555Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Shared 2025-06-25T14:41:25.172385Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DELETE FROM `.metadata/_statistics` WHERE owner_id = $owner_id AND local_path_id = $local_path_id; 2025-06-25T14:41:25.200817Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=NGY1MTE5ODYtN2M4N2E4MGQtN2Y5ZTIyMTAtMzljMThkMWQ=, TxId: 2025-06-25T14:41:25.200901Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=NGY1MTE5ODYtN2M4N2E4MGQtN2Y5ZTIyMTAtMzljMThkMWQ=, TxId: 2025-06-25T14:41:25.201560Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-25T14:41:25.225721Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037899, LocalPathId: 2] 2025-06-25T14:41:25.225779Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-25T14:41:25.302721Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 124 ], ReplyToActorId[ [2:8167:5757]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T14:41:25.303007Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 124 ] 2025-06-25T14:41:25.303054Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 124, ReplyToActorId = [2:8167:5757], StatRequests.size() = 1 >> TxUsage::WriteToTopic_Demo_1_Query >> SystemView::TopPartitionsByCpuRanges [GOOD] >> SystemView::TopPartitionsByTliFields >> TxUsage::WriteToTopic_Demo_3_Query [GOOD] >> SystemView::GroupsFields [GOOD] >> SystemView::Describe >> ttl_delete_s3.py::TestDeleteS3Ttl::test_data_unchanged_after_ttl_change [GOOD] >> ttl_delete_s3.py::TestDeleteS3Ttl::test_delete_s3_tiering >> TxUsage::WriteToTopic_Demo_10_Table [GOOD] >> TxUsage::WriteToTopic_Demo_32_Table >> TxUsage::WriteToTopic_Demo_10_Query >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_retryable_iam_error[tables_format_v1] [GOOD] >> TxUsage::WriteToTopic_Demo_2_Query [GOOD] >> HttpRequest::ProbeServerless [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_yc_events_processor[tables_format_v0] >> TKeyValueTest::TestGetStatusWorks [GOOD] >> BasicStatistics::Serverless [GOOD] >> TxUsage::WriteToTopic_Demo_21_RestartNo_Query [GOOD] >> TxUsage::WriteToTopic_Demo_24_Table |83.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tools/stress_tool/ydb_stress_tool |83.5%| [LD] {RESULT} $(B)/ydb/tools/stress_tool/ydb_stress_tool |83.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tools/stress_tool/ydb_stress_tool >> ShowCreateView::WithTwoTablePathPrefixes [GOOD] >> SystemView::AuthGroups >> KqpQueryPerf::UpdateOn+QueryService-UseSink >> KqpQueryPerf::IndexInsert-QueryService-UseSink >> KqpQueryPerf::Replace-QueryService+UseSink >> KqpQueryPerf::IndexReplace-QueryService-UseSink >> KqpQueryPerf::IndexLookupJoin-EnableStreamLookup-QueryService >> KqpQueryPerf::Insert+QueryService-UseSink >> KqpWorkload::STOCK >> KqpQueryPerf::IndexLookupJoin+EnableStreamLookup-QueryService >> TxUsage::WriteToTopic_Demo_4_Query [GOOD] >> KqpQueryPerf::IndexInsert+QueryService-UseSink >> KqpQueryPerf::IdxLookupJoinThreeWay+QueryService >> SystemView::SystemViewFailOps [GOOD] >> SystemView::TabletsFields ------- [TM] {asan, default-linux-x86_64, release} ydb/core/keyvalue/ut/unittest >> TKeyValueTest::TestGetStatusWorks [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:57:2057] recipient: [1:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:57:2057] recipient: [1:52:2097] Leader for TabletID 72057594037927937 is [1:59:2099] sender: [1:60:2057] recipient: [1:52:2097] Leader for TabletID 72057594037927937 is [1:59:2099] sender: [1:77:2057] recipient: [1:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:57:2057] recipient: [2:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:57:2057] recipient: [2:53:2097] Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:60:2057] recipient: [2:53:2097] Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:77:2057] recipient: [2:14:2061] !Reboot 72057594037927937 (actor [2:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:79:2057] recipient: [2:38:2085] Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:81:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:83:2057] recipient: [2:82:2112] Leader for TabletID 72057594037927937 is [2:84:2113] sender: [2:85:2057] recipient: [2:82:2112] !Reboot 72057594037927937 (actor [2:59:2099]) rebooted! !Reboot 72057594037927937 (actor [2:59:2099]) tablet resolver refreshed! new actor is[2:84:2113] Leader for TabletID 72057594037927937 is [2:84:2113] sender: [2:170:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:57:2057] recipient: [3:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:57:2057] recipient: [3:52:2097] Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:60:2057] recipient: [3:52:2097] Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:77:2057] recipient: [3:14:2061] !Reboot 72057594037927937 (actor [3:59:2099]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:79:2057] recipient: [3:38:2085] Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:82:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:83:2057] recipient: [3:81:2112] Leader for TabletID 72057594037927937 is [3:84:2113] sender: [3:85:2057] recipient: [3:81:2112] !Reboot 72057594037927937 (actor [3:59:2099]) rebooted! !Reboot 72057594037927937 (actor [3:59:2099]) tablet resolver refreshed! new actor is[3:84:2113] Leader for TabletID 72057594037927937 is [3:84:2113] sender: [3:170:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:57:2057] recipient: [4:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:57:2057] recipient: [4:53:2097] Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:60:2057] recipient: [4:53:2097] Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:77:2057] recipient: [4:14:2061] !Reboot 72057594037927937 (actor [4:59:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:80:2057] recipient: [4:38:2085] Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:83:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:84:2057] recipient: [4:82:2112] Leader for TabletID 72057594037927937 is [4:85:2113] sender: [4:86:2057] recipient: [4:82:2112] !Reboot 72057594037927937 (actor [4:59:2099]) rebooted! !Reboot 72057594037927937 (actor [4:59:2099]) tablet resolver refreshed! new actor is[4:85:2113] Leader for TabletID 72057594037927937 is [4:85:2113] sender: [4:171:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:57:2057] recipient: [5:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:57:2057] recipient: [5:53:2097] Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:60:2057] recipient: [5:53:2097] Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:77:2057] recipient: [5:14:2061] !Reboot 72057594037927937 (actor [5:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:83:2057] recipient: [5:38:2085] Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:86:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:87:2057] recipient: [5:85:2115] Leader for TabletID 72057594037927937 is [5:88:2116] sender: [5:89:2057] recipient: [5:85:2115] !Reboot 72057594037927937 (actor [5:59:2099]) rebooted! !Reboot 72057594037927937 (actor [5:59:2099]) tablet resolver refreshed! new actor is[5:88:2116] Leader for TabletID 72057594037927937 is [5:88:2116] sender: [5:174:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:57:2057] recipient: [6:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:57:2057] recipient: [6:52:2097] Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:60:2057] recipient: [6:52:2097] Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:77:2057] recipient: [6:14:2061] !Reboot 72057594037927937 (actor [6:59:2099]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:83:2057] recipient: [6:38:2085] Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:86:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:87:2057] recipient: [6:85:2115] Leader for TabletID 72057594037927937 is [6:88:2116] sender: [6:89:2057] recipient: [6:85:2115] !Reboot 72057594037927937 (actor [6:59:2099]) rebooted! !Reboot 72057594037927937 (actor [6:59:2099]) tablet resolver refreshed! new actor is[6:88:2116] Leader for TabletID 72057594037927937 is [6:88:2116] sender: [6:174:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:57:2057] recipient: [7:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:57:2057] recipient: [7:53:2097] Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:60:2057] recipient: [7:53:2097] Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:77:2057] recipient: [7:14:2061] !Reboot 72057594037927937 (actor [7:59:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:84:2057] recipient: [7:38:2085] Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:87:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:88:2057] recipient: [7:86:2115] Leader for TabletID 72057594037927937 is [7:89:2116] sender: [7:90:2057] recipient: [7:86:2115] !Reboot 72057594037927937 (actor [7:59:2099]) rebooted! !Reboot 72057594037927937 (actor [7:59:2099]) tablet resolver refreshed! new actor is[7:89:2116] Leader for TabletID 72057594037927937 is [7:89:2116] sender: [7:175:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:57:2057] recipient: [8:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:57:2057] recipient: [8:54:2097] Leader for TabletID 72057594037927937 is [8:59:2099] sender: [8:60:2057] recipient: [8:54:2097] Leader for TabletID 72057594037927937 is [8:59:2099] sender: [8:77:2057] recipient: [8:14:2061] !Reboot 72057594037927937 (actor [8:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [8:59:2099] sender: [8:86:2057] recipient: [8:38:2085] Leader for TabletID 72057594037927937 is [8:59:2099] sender: [8:89:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [8:59:2099] sender: [8:90:2057] recipient: [8:88:2117] Leader for TabletID 72057594037927937 is [8:91:2118] sender: [8:92:2057] recipient: [8:88:2117] !Reboot 72057594037927937 (actor [8:59:2099]) rebooted! !Reboot 72057594037927937 (actor [8:59:2099]) tablet resolver refreshed! new actor is[8:91:2118] Leader for TabletID 72057594037927937 is [8:91:2118] sender: [8:177:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:57:2057] recipient: [9:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:57:2057] recipient: [9:53:2097] Leader for TabletID 72057594037927937 is [9:59:2099] sender: [9:60:2057] recipient: [9:53:2097] Leader for TabletID 72057594037927937 is [9:59:2099] sender: [9:77:2057] recipient: [9:14:2061] !Reboot 72057594037927937 (actor [9:59:2099]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [9:59:2099] sender: [9:86:2057] recipient: [9:38:2085] Leader for TabletID 72057594037927937 is [9:59:2099] sender: [9:88:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [9:59:2099] sender: [9:90:2057] recipient: [9:89:2117] Leader for TabletID 72057594037927937 is [9:91:2118] sender: [9:92:2057] recipient: [9:89:2117] !Reboot 72057594037927937 (actor [9:59:2099]) rebooted! !Reboot 72057594037927937 (actor [9:59:2099]) tablet resolver refreshed! new actor is[9:91:2118] Leader for TabletID 72057594037927937 is [9:91:2118] sender: [9:177:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:57:2057] recipient: [10:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:57:2057] recipient: [10:54:2097] Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:60:2057] recipient: [10:54:2097] Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:77:2057] recipient: [10:14:2061] !Reboot 72057594037927937 (actor [10:59:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:87:2057] recipient: [10:38:2085] Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:90:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:91:2057] recipient: [10:89:2117] Leader for TabletID 72057594037927937 is [10:92:2118] sender: [10:93:2057] recipient: [10:89:2117] !Reboot 72057594037927937 (actor [10:59:2099]) rebooted! !Reboot 72057594037927937 (actor [10:59:2099]) tablet resolver refreshed! new actor is[10:92:2118] Leader for TabletID 72057594037927937 is [10:92:2118] sender: [10:178:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:57:2057] recipient: [11:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:57:2057] recipient: [11:53:2097] Leader for TabletID 72057594037927937 is [11:59:2099] sender: [11:60:2057] recipient: [11:53:2097] Leader for TabletID 72057594037927937 is [11:59:2099] sender: [11:77:2057] recipient: [11:14:2061] !Reboot 72057594037927937 (actor [11:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [11:59:2099] sender: [11:89:2057] recipient: [11:38:2085] Leader for TabletID 72057594037927937 is [11:59:2099] sender: [11:92:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [11:59:2099] sender: [11:93:2057] recipient: [11:91:2119] Leader for TabletID 72057594037927937 is [11:94:2120] sender: [11:95:2057] recipient: [11:91:2119] !Reboot 72057594037927937 (actor [11:59:2099]) rebooted! !Reboot 72057594037927937 (actor [11:59:2099]) tablet resolver refreshed! new actor is[11:94:2120] Leader for TabletID 72057594037927937 is [11:94:2120] sender: [11:180:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:57:2057] recipient: [12:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:57:2057] recipient: [12:54:2097] Leader for TabletID 72057594037927937 is [12:59:2099] sender: [12:60:2057] recipient: [12:54:2097] Leader for TabletID 72057594037927937 is [12:59:2099] sender: [12:77:2057] recipient: [12:14:2061] !Reboot 72057594037927937 (acto ... or TabletID 72057594037927937 is [29:59:2099] sender: [29:92:2057] recipient: [29:14:2061] Leader for TabletID 72057594037927937 is [29:59:2099] sender: [29:93:2057] recipient: [29:91:2119] Leader for TabletID 72057594037927937 is [29:94:2120] sender: [29:95:2057] recipient: [29:91:2119] !Reboot 72057594037927937 (actor [29:59:2099]) rebooted! !Reboot 72057594037927937 (actor [29:59:2099]) tablet resolver refreshed! new actor is[29:94:2120] Leader for TabletID 72057594037927937 is [29:94:2120] sender: [29:180:2057] recipient: [29:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [30:57:2057] recipient: [30:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [30:57:2057] recipient: [30:52:2097] Leader for TabletID 72057594037927937 is [30:59:2099] sender: [30:60:2057] recipient: [30:52:2097] Leader for TabletID 72057594037927937 is [30:59:2099] sender: [30:77:2057] recipient: [30:14:2061] !Reboot 72057594037927937 (actor [30:59:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [30:59:2099] sender: [30:90:2057] recipient: [30:38:2085] Leader for TabletID 72057594037927937 is [30:59:2099] sender: [30:92:2057] recipient: [30:14:2061] Leader for TabletID 72057594037927937 is [30:59:2099] sender: [30:94:2057] recipient: [30:93:2119] Leader for TabletID 72057594037927937 is [30:95:2120] sender: [30:96:2057] recipient: [30:93:2119] !Reboot 72057594037927937 (actor [30:59:2099]) rebooted! !Reboot 72057594037927937 (actor [30:59:2099]) tablet resolver refreshed! new actor is[30:95:2120] Leader for TabletID 72057594037927937 is [30:95:2120] sender: [30:181:2057] recipient: [30:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [31:57:2057] recipient: [31:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [31:57:2057] recipient: [31:53:2097] Leader for TabletID 72057594037927937 is [31:59:2099] sender: [31:60:2057] recipient: [31:53:2097] Leader for TabletID 72057594037927937 is [31:59:2099] sender: [31:77:2057] recipient: [31:14:2061] !Reboot 72057594037927937 (actor [31:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [31:59:2099] sender: [31:93:2057] recipient: [31:38:2085] Leader for TabletID 72057594037927937 is [31:59:2099] sender: [31:96:2057] recipient: [31:14:2061] Leader for TabletID 72057594037927937 is [31:59:2099] sender: [31:97:2057] recipient: [31:95:2122] Leader for TabletID 72057594037927937 is [31:98:2123] sender: [31:99:2057] recipient: [31:95:2122] !Reboot 72057594037927937 (actor [31:59:2099]) rebooted! !Reboot 72057594037927937 (actor [31:59:2099]) tablet resolver refreshed! new actor is[31:98:2123] Leader for TabletID 72057594037927937 is [31:98:2123] sender: [31:184:2057] recipient: [31:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [32:57:2057] recipient: [32:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [32:57:2057] recipient: [32:53:2097] Leader for TabletID 72057594037927937 is [32:59:2099] sender: [32:60:2057] recipient: [32:53:2097] Leader for TabletID 72057594037927937 is [32:59:2099] sender: [32:77:2057] recipient: [32:14:2061] !Reboot 72057594037927937 (actor [32:59:2099]) on event NKikimr::TEvKeyValue::TEvRead ! Leader for TabletID 72057594037927937 is [32:59:2099] sender: [32:93:2057] recipient: [32:38:2085] Leader for TabletID 72057594037927937 is [32:59:2099] sender: [32:96:2057] recipient: [32:14:2061] Leader for TabletID 72057594037927937 is [32:59:2099] sender: [32:97:2057] recipient: [32:95:2122] Leader for TabletID 72057594037927937 is [32:98:2123] sender: [32:99:2057] recipient: [32:95:2122] !Reboot 72057594037927937 (actor [32:59:2099]) rebooted! !Reboot 72057594037927937 (actor [32:59:2099]) tablet resolver refreshed! new actor is[32:98:2123] Leader for TabletID 72057594037927937 is [32:98:2123] sender: [32:184:2057] recipient: [32:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [33:57:2057] recipient: [33:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [33:57:2057] recipient: [33:53:2097] Leader for TabletID 72057594037927937 is [33:59:2099] sender: [33:60:2057] recipient: [33:53:2097] Leader for TabletID 72057594037927937 is [33:59:2099] sender: [33:77:2057] recipient: [33:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [34:57:2057] recipient: [34:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [34:57:2057] recipient: [34:53:2097] Leader for TabletID 72057594037927937 is [34:59:2099] sender: [34:60:2057] recipient: [34:53:2097] Leader for TabletID 72057594037927937 is [34:59:2099] sender: [34:77:2057] recipient: [34:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [35:57:2057] recipient: [35:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [35:57:2057] recipient: [35:53:2097] Leader for TabletID 72057594037927937 is [35:59:2099] sender: [35:60:2057] recipient: [35:53:2097] Leader for TabletID 72057594037927937 is [35:59:2099] sender: [35:77:2057] recipient: [35:14:2061] !Reboot 72057594037927937 (actor [35:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [35:59:2099] sender: [35:79:2057] recipient: [35:38:2085] Leader for TabletID 72057594037927937 is [35:59:2099] sender: [35:82:2057] recipient: [35:14:2061] Leader for TabletID 72057594037927937 is [35:59:2099] sender: [35:83:2057] recipient: [35:81:2112] Leader for TabletID 72057594037927937 is [35:84:2113] sender: [35:85:2057] recipient: [35:81:2112] !Reboot 72057594037927937 (actor [35:59:2099]) rebooted! !Reboot 72057594037927937 (actor [35:59:2099]) tablet resolver refreshed! new actor is[35:84:2113] Leader for TabletID 72057594037927937 is [35:84:2113] sender: [35:170:2057] recipient: [35:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [36:57:2057] recipient: [36:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [36:57:2057] recipient: [36:53:2097] Leader for TabletID 72057594037927937 is [36:59:2099] sender: [36:60:2057] recipient: [36:53:2097] Leader for TabletID 72057594037927937 is [36:59:2099] sender: [36:77:2057] recipient: [36:14:2061] !Reboot 72057594037927937 (actor [36:59:2099]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [36:59:2099] sender: [36:79:2057] recipient: [36:38:2085] Leader for TabletID 72057594037927937 is [36:59:2099] sender: [36:82:2057] recipient: [36:81:2112] Leader for TabletID 72057594037927937 is [36:59:2099] sender: [36:83:2057] recipient: [36:14:2061] Leader for TabletID 72057594037927937 is [36:84:2113] sender: [36:85:2057] recipient: [36:81:2112] !Reboot 72057594037927937 (actor [36:59:2099]) rebooted! !Reboot 72057594037927937 (actor [36:59:2099]) tablet resolver refreshed! new actor is[36:84:2113] Leader for TabletID 72057594037927937 is [36:84:2113] sender: [36:170:2057] recipient: [36:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [37:57:2057] recipient: [37:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [37:57:2057] recipient: [37:54:2097] Leader for TabletID 72057594037927937 is [37:59:2099] sender: [37:60:2057] recipient: [37:54:2097] Leader for TabletID 72057594037927937 is [37:59:2099] sender: [37:77:2057] recipient: [37:14:2061] !Reboot 72057594037927937 (actor [37:59:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [37:59:2099] sender: [37:80:2057] recipient: [37:38:2085] Leader for TabletID 72057594037927937 is [37:59:2099] sender: [37:83:2057] recipient: [37:14:2061] Leader for TabletID 72057594037927937 is [37:59:2099] sender: [37:84:2057] recipient: [37:82:2112] Leader for TabletID 72057594037927937 is [37:85:2113] sender: [37:86:2057] recipient: [37:82:2112] !Reboot 72057594037927937 (actor [37:59:2099]) rebooted! !Reboot 72057594037927937 (actor [37:59:2099]) tablet resolver refreshed! new actor is[37:85:2113] Leader for TabletID 72057594037927937 is [37:85:2113] sender: [37:171:2057] recipient: [37:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [38:57:2057] recipient: [38:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [38:57:2057] recipient: [38:53:2097] Leader for TabletID 72057594037927937 is [38:59:2099] sender: [38:60:2057] recipient: [38:53:2097] Leader for TabletID 72057594037927937 is [38:59:2099] sender: [38:77:2057] recipient: [38:14:2061] !Reboot 72057594037927937 (actor [38:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [38:59:2099] sender: [38:82:2057] recipient: [38:38:2085] Leader for TabletID 72057594037927937 is [38:59:2099] sender: [38:85:2057] recipient: [38:14:2061] Leader for TabletID 72057594037927937 is [38:59:2099] sender: [38:86:2057] recipient: [38:84:2114] Leader for TabletID 72057594037927937 is [38:87:2115] sender: [38:88:2057] recipient: [38:84:2114] !Reboot 72057594037927937 (actor [38:59:2099]) rebooted! !Reboot 72057594037927937 (actor [38:59:2099]) tablet resolver refreshed! new actor is[38:87:2115] Leader for TabletID 72057594037927937 is [38:87:2115] sender: [38:173:2057] recipient: [38:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [39:57:2057] recipient: [39:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [39:57:2057] recipient: [39:53:2097] Leader for TabletID 72057594037927937 is [39:59:2099] sender: [39:60:2057] recipient: [39:53:2097] Leader for TabletID 72057594037927937 is [39:59:2099] sender: [39:77:2057] recipient: [39:14:2061] !Reboot 72057594037927937 (actor [39:59:2099]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [39:59:2099] sender: [39:82:2057] recipient: [39:38:2085] Leader for TabletID 72057594037927937 is [39:59:2099] sender: [39:85:2057] recipient: [39:84:2114] Leader for TabletID 72057594037927937 is [39:59:2099] sender: [39:86:2057] recipient: [39:14:2061] Leader for TabletID 72057594037927937 is [39:87:2115] sender: [39:88:2057] recipient: [39:84:2114] !Reboot 72057594037927937 (actor [39:59:2099]) rebooted! !Reboot 72057594037927937 (actor [39:59:2099]) tablet resolver refreshed! new actor is[39:87:2115] Leader for TabletID 72057594037927937 is [39:87:2115] sender: [39:173:2057] recipient: [39:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [40:57:2057] recipient: [40:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [40:57:2057] recipient: [40:53:2097] Leader for TabletID 72057594037927937 is [40:59:2099] sender: [40:60:2057] recipient: [40:53:2097] Leader for TabletID 72057594037927937 is [40:59:2099] sender: [40:77:2057] recipient: [40:14:2061] !Reboot 72057594037927937 (actor [40:59:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [40:59:2099] sender: [40:83:2057] recipient: [40:38:2085] Leader for TabletID 72057594037927937 is [40:59:2099] sender: [40:86:2057] recipient: [40:14:2061] Leader for TabletID 72057594037927937 is [40:59:2099] sender: [40:87:2057] recipient: [40:85:2114] Leader for TabletID 72057594037927937 is [40:88:2115] sender: [40:89:2057] recipient: [40:85:2114] !Reboot 72057594037927937 (actor [40:59:2099]) rebooted! !Reboot 72057594037927937 (actor [40:59:2099]) tablet resolver refreshed! new actor is[40:88:2115] Leader for TabletID 72057594037927937 is [40:88:2115] sender: [40:174:2057] recipient: [40:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [41:57:2057] recipient: [41:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [41:57:2057] recipient: [41:54:2097] Leader for TabletID 72057594037927937 is [41:59:2099] sender: [41:60:2057] recipient: [41:54:2097] Leader for TabletID 72057594037927937 is [41:59:2099] sender: [41:77:2057] recipient: [41:14:2061] >> TxUsage::WriteToTopic_Demo_21_RestartBeforeCommit_Table >> TxUsage::WriteToTopic_Two_WriteSession_Table [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest >> HttpRequest::ProbeServerless [GOOD] Test command err: 2025-06-25T14:38:42.365295Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:419:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:38:42.365522Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:38:42.365614Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001cb5/r3tmp/tmpRSVmIw/pdisk_1.dat 2025-06-25T14:38:42.698844Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 26083, node 1 2025-06-25T14:38:42.910795Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:38:42.910860Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:38:42.910900Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:38:42.911483Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:38:42.918431Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:38:43.018647Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:43.018775Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:43.033777Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:8737 2025-06-25T14:38:43.530724Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-25T14:38:46.453826Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-25T14:38:46.489343Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:46.489456Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:46.527842Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T14:38:46.529384Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:38:46.732502Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:38:46.767215Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:46.767916Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:46.768396Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:46.768550Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:46.768807Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:46.768923Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:46.769020Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:46.769100Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:46.769233Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:46.953954Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:46.954097Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:46.967001Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:38:47.131785Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:38:47.183341Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-25T14:38:47.183440Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-25T14:38:47.222799Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-25T14:38:47.223105Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-25T14:38:47.223354Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-25T14:38:47.223423Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-25T14:38:47.223484Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-25T14:38:47.223560Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-25T14:38:47.223617Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-25T14:38:47.223673Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-25T14:38:47.224493Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-25T14:38:47.252430Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7949: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-25T14:38:47.252547Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7979: ConnectToSA(), pipe client id: [2:1797:2560], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-25T14:38:47.257470Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1807:2568] 2025-06-25T14:38:47.263048Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1843:2583] 2025-06-25T14:38:47.263693Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1843:2583], schemeshard id = 72075186224037897 2025-06-25T14:38:47.269875Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Shared 2025-06-25T14:38:47.319003Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-25T14:38:47.319075Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-25T14:38:47.319144Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Shared/.metadata/_statistics 2025-06-25T14:38:47.346816Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:47.355878Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-25T14:38:47.356038Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-25T14:38:47.538596Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-25T14:38:47.677474Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-25T14:38:47.745602Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-25T14:38:48.305557Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:38:48.336045Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-25T14:38:49.049508Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:38:49.220988Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7894: Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult, at schemeshard: 72075186224037899 2025-06-25T14:38:49.221049Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7910: Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult, StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037899 2025-06-25T14:38:49.221129Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7979: ConnectToSA(), pipe client id: [2:2518:2908], at schemeshard: 72075186224037899, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037899 2025-06-25T14:38:49.222985Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:2520:2909] 2025-06-25T14:38:49.223232Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:2520:2909], schemeshard id = 72075186224037899 2025-06-25T14:38:50.301132Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2623:3198], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:50.301275Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06- ... _distribution.cpp:92: [72075186224037894] TTxResponseTabletDistribution::Complete 2025-06-25T14:41:32.186415Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 3, current Round: 0 2025-06-25T14:41:32.187159Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:11498:8540], server id = [2:11503:8545], tablet id = 72075186224037905, status = OK 2025-06-25T14:41:32.187255Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:11498:8540], path = { OwnerId: 72075186224037899 LocalId: 2 } 2025-06-25T14:41:32.187520Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:11499:8541], server id = [2:11504:8546], tablet id = 72075186224037906, status = OK 2025-06-25T14:41:32.187572Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:11499:8541], path = { OwnerId: 72075186224037899 LocalId: 2 } 2025-06-25T14:41:32.193596Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:11500:8542], server id = [2:11507:8549], tablet id = 72075186224037907, status = OK 2025-06-25T14:41:32.193710Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:11500:8542], path = { OwnerId: 72075186224037899 LocalId: 2 } 2025-06-25T14:41:32.193880Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:11501:8543], server id = [2:11505:8547], tablet id = 72075186224037908, status = OK 2025-06-25T14:41:32.193928Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:11501:8543], path = { OwnerId: 72075186224037899 LocalId: 2 } 2025-06-25T14:41:32.194755Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:11502:8544], server id = [2:11506:8548], tablet id = 72075186224037909, status = OK 2025-06-25T14:41:32.194812Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:11502:8544], path = { OwnerId: 72075186224037899 LocalId: 2 } 2025-06-25T14:41:32.195705Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037905 2025-06-25T14:41:32.203277Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:11498:8540], server id = [2:11503:8545], tablet id = 72075186224037905 2025-06-25T14:41:32.203335Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-25T14:41:32.204075Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037906 2025-06-25T14:41:32.205003Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037908 2025-06-25T14:41:32.205114Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:11499:8541], server id = [2:11504:8546], tablet id = 72075186224037906 2025-06-25T14:41:32.205138Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-25T14:41:32.205502Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037907 2025-06-25T14:41:32.205839Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037909 2025-06-25T14:41:32.206084Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:11501:8543], server id = [2:11505:8547], tablet id = 72075186224037908 2025-06-25T14:41:32.206111Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-25T14:41:32.206341Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:11512:8554], server id = [2:11515:8557], tablet id = 72075186224037910, status = OK 2025-06-25T14:41:32.206430Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:11512:8554], path = { OwnerId: 72075186224037899 LocalId: 2 } 2025-06-25T14:41:32.207252Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:11500:8542], server id = [2:11507:8549], tablet id = 72075186224037907 2025-06-25T14:41:32.207280Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-25T14:41:32.207405Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:11502:8544], server id = [2:11506:8548], tablet id = 72075186224037909 2025-06-25T14:41:32.207434Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-25T14:41:32.207649Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:11514:8556], server id = [2:11517:8559], tablet id = 72075186224037911, status = OK 2025-06-25T14:41:32.207714Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:11514:8556], path = { OwnerId: 72075186224037899 LocalId: 2 } 2025-06-25T14:41:32.207788Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:11516:8558], server id = [2:11519:8561], tablet id = 72075186224037912, status = OK 2025-06-25T14:41:32.207825Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:11516:8558], path = { OwnerId: 72075186224037899 LocalId: 2 } 2025-06-25T14:41:32.208601Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:11518:8560], server id = [2:11522:8564], tablet id = 72075186224037913, status = OK 2025-06-25T14:41:32.208657Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:11518:8560], path = { OwnerId: 72075186224037899 LocalId: 2 } 2025-06-25T14:41:32.209464Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:11520:8562], server id = [2:11521:8563], tablet id = 72075186224037914, status = OK 2025-06-25T14:41:32.209516Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:11520:8562], path = { OwnerId: 72075186224037899 LocalId: 2 } 2025-06-25T14:41:32.210216Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037910 2025-06-25T14:41:32.211133Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:11512:8554], server id = [2:11515:8557], tablet id = 72075186224037910 2025-06-25T14:41:32.211163Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-25T14:41:32.211758Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037912 2025-06-25T14:41:32.212352Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037911 2025-06-25T14:41:32.212931Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:11516:8558], server id = [2:11519:8561], tablet id = 72075186224037912 2025-06-25T14:41:32.212966Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-25T14:41:32.213074Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037913 2025-06-25T14:41:32.213151Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:11514:8556], server id = [2:11517:8559], tablet id = 72075186224037911 2025-06-25T14:41:32.213175Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-25T14:41:32.213423Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037914 2025-06-25T14:41:32.213465Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-25T14:41:32.213675Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:27: [72075186224037894] TTxAggregateStatisticsResponse::Execute 2025-06-25T14:41:32.213836Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:118: [72075186224037894] TTxAggregateStatisticsResponse::Complete 2025-06-25T14:41:32.214155Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Shared 2025-06-25T14:41:32.215974Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:11518:8560], server id = [2:11522:8564], tablet id = 72075186224037913 2025-06-25T14:41:32.216001Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-25T14:41:32.216271Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:11520:8562], server id = [2:11521:8563], tablet id = 72075186224037914 2025-06-25T14:41:32.216295Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-25T14:41:32.217139Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-06-25T14:41:32.245675Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=YjZjYWRiZS0xOTQ1OTNjOS1kNjhkZTk0My02MzU3MjcyNw==, TxId: 2025-06-25T14:41:32.245743Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=YjZjYWRiZS0xOTQ1OTNjOS1kNjhkZTk0My02MzU3MjcyNw==, TxId: 2025-06-25T14:41:32.246473Z node 2 :SYSTEM_VIEWS WARN: tx_interval_summary.cpp:212: [72075186224037891] TEvIntervalQuerySummary, time mismath: node id# 2, interval end# 1970-01-01T00:02:05.000000Z, event interval end# 2025-06-25T14:41:30.000000Z 2025-06-25T14:41:32.246614Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-25T14:41:32.269761Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete force traversal for path [OwnerId: 72075186224037899, LocalPathId: 2] 2025-06-25T14:41:32.269840Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:50: [72075186224037894] TTxFinishTraversal::Complete. Send TEvAnalyzeResponse, OperationId=dmq%678[, ActorId=[1:6285:4005] 2025-06-25T14:41:32.271090Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [1:11550:6215]], StatType[ 2 ], StatRequestsCount[ 1 ] 2025-06-25T14:41:32.271378Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-25T14:41:32.271434Z node 1 :STATISTICS DEBUG: service_impl.cpp:812: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] resolve DatabasePath[ [OwnerId: 72057594046644480, LocalPathId: 2] ] 2025-06-25T14:41:32.276640Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-25T14:41:32.276774Z node 1 :STATISTICS DEBUG: service_impl.cpp:715: [TStatService::QueryStatistics] RequestId[ 1 ], Database[ Root/Shared ], TablePath[ /Root/Shared/.metadata/_statistics ] 2025-06-25T14:41:32.276842Z node 1 :STATISTICS DEBUG: service_impl.cpp:656: [TStatService::LoadStatistics] QueryId[ 1 ], PathId[ [OwnerId: 72075186224037899, LocalPathId: 2] ], StatType[ 2 ], ColumnTag[ 2 ] 2025-06-25T14:41:32.289785Z node 1 :STATISTICS DEBUG: service_impl.cpp:1152: TEvLoadStatisticsQueryResponse, request id = 1 Answer: '/Root/Database/Table1[Value]=4' >> TxUsage::WriteToTopic_Demo_41_Table |83.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_split_merge_reboots/ydb-core-tx-schemeshard-ut_split_merge_reboots |83.5%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_split_merge_reboots/ydb-core-tx-schemeshard-ut_split_merge_reboots |83.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_split_merge_reboots/ydb-core-tx-schemeshard-ut_split_merge_reboots ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest >> BasicStatistics::Serverless [GOOD] Test command err: 2025-06-25T14:38:43.078633Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:419:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:38:43.078853Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:38:43.078941Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001caa/r3tmp/tmp2M3jvG/pdisk_1.dat 2025-06-25T14:38:43.376655Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 13880, node 1 2025-06-25T14:38:43.586981Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:38:43.587044Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:38:43.587089Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:38:43.587580Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:38:43.589926Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:38:43.683470Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:43.683622Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:43.698574Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:18231 2025-06-25T14:38:44.242710Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-25T14:38:46.932885Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-25T14:38:46.963776Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:46.963878Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:47.005617Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T14:38:47.008475Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:38:47.206804Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:38:47.242299Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:47.243026Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:47.243554Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:47.243747Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:47.243980Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:47.244080Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:47.244169Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:47.244268Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:47.244355Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:47.447842Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:47.447948Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:47.463707Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:38:47.630751Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:38:47.682828Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-25T14:38:47.682928Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-25T14:38:47.717950Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-25T14:38:47.718160Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-25T14:38:47.718340Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-25T14:38:47.718384Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-25T14:38:47.718429Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-25T14:38:47.718479Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-25T14:38:47.718520Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-25T14:38:47.718561Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-25T14:38:47.718977Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-25T14:38:47.740168Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7949: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-25T14:38:47.740297Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7979: ConnectToSA(), pipe client id: [2:1794:2562], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-25T14:38:47.746390Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1808:2573] 2025-06-25T14:38:47.749760Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1830:2584] 2025-06-25T14:38:47.750358Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1830:2584], schemeshard id = 72075186224037897 2025-06-25T14:38:47.761346Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Shared 2025-06-25T14:38:47.777812Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-25T14:38:47.777868Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-25T14:38:47.777924Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Shared/.metadata/_statistics 2025-06-25T14:38:47.789076Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:47.795236Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-25T14:38:47.795371Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-25T14:38:47.960798Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-25T14:38:48.125488Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-25T14:38:48.193547Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-25T14:38:48.740995Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:38:48.780435Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-25T14:38:49.401277Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:38:49.562480Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7894: Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult, at schemeshard: 72075186224037899 2025-06-25T14:38:49.562565Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7910: Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult, StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037899 2025-06-25T14:38:49.562660Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7979: ConnectToSA(), pipe client id: [2:2527:2915], at schemeshard: 72075186224037899, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037899 2025-06-25T14:38:49.565132Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:2532:2917] 2025-06-25T14:38:49.565677Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:2532:2917], schemeshard id = 72075186224037899 2025-06-25T14:38:50.685845Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2634:3200], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:50.686004Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06 ... ats size# 25 2025-06-25T14:41:24.849844Z node 2 :STATISTICS DEBUG: tx_schemeshard_stats.cpp:132: [72075186224037894] TTxSchemeShardStats::Complete 2025-06-25T14:41:24.896088Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 118 ], ReplyToActorId[ [2:7524:5410]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T14:41:24.896401Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 118 ] 2025-06-25T14:41:24.896448Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 118, ReplyToActorId = [2:7524:5410], StatRequests.size() = 1 2025-06-25T14:41:26.168721Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-25T14:41:26.168812Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-25T14:41:26.168883Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 3] is data table. 2025-06-25T14:41:26.168935Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:723: [72075186224037894] ScheduleNextTraversal. Skip traversal for datashard table [OwnerId: 72075186224037897, LocalPathId: 3] 2025-06-25T14:41:26.169372Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Shared 2025-06-25T14:41:26.188363Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DELETE FROM `.metadata/_statistics` WHERE owner_id = $owner_id AND local_path_id = $local_path_id; 2025-06-25T14:41:26.193050Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7560:5436], DatabaseId: /Root/Shared, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:26.193168Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7570:5441], DatabaseId: /Root/Shared, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:26.193411Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root/Shared, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:26.209586Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976720658:2, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:41:26.295006Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7574:5444], DatabaseId: /Root/Shared, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976720658 completed, doublechecking } 2025-06-25T14:41:26.395353Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 119 ], ReplyToActorId[ [2:7669:5492]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T14:41:26.395643Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 119 ] 2025-06-25T14:41:26.395685Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 119, ReplyToActorId = [2:7669:5492], StatRequests.size() = 1 2025-06-25T14:41:26.498957Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7674:5494] txid# 281474976720659, issues: { message: "Check failed: path: \'/Root/Shared/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72075186224037897, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:41:26.552204Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 120 ], ReplyToActorId[ [2:7703:5509]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T14:41:26.552561Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 120 ] 2025-06-25T14:41:26.552990Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:171: [72075186224037894] EvRequestStats, node id = 2, schemeshard count = 1, urgent = 0 2025-06-25T14:41:26.553041Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:314: [72075186224037894] SendStatisticsToNode(), node id = 2, schemeshard count = 1 2025-06-25T14:41:26.553272Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-25T14:41:26.553358Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 120, ReplyToActorId = [2:7703:5509], StatRequests.size() = 1 2025-06-25T14:41:26.703881Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=OTg3NGU2YmYtNmNlOTM2YjctNmVhMWRlM2MtNjU3ZWJjMmE=, TxId: 2025-06-25T14:41:26.703979Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=OTg3NGU2YmYtNmNlOTM2YjctNmVhMWRlM2MtNjU3ZWJjMmE=, TxId: 2025-06-25T14:41:26.704630Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-25T14:41:26.725949Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 3] 2025-06-25T14:41:26.726014Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-25T14:41:26.772381Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:217: [72075186224037894] EvFastPropagateCheck 2025-06-25T14:41:26.772463Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:357: [72075186224037894] PropagateFastStatistics(), node count = 0, schemeshard count = 0 2025-06-25T14:41:26.872972Z node 2 :STATISTICS DEBUG: service_impl.cpp:1189: EvRequestTimeout, pipe client id = [2:2969:3063], schemeshard count = 1 2025-06-25T14:41:27.150137Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:8076: SendBaseStatsToSA(), path count: 1, at schemeshard: 72075186224037899 2025-06-25T14:41:27.150250Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7919: Schedule next SendBaseStatsToSA in 195.000000s, at schemeshard: 72075186224037899 2025-06-25T14:41:27.150454Z node 2 :STATISTICS DEBUG: tx_schemeshard_stats.cpp:21: [72075186224037894] TTxSchemeShardStats::Execute: schemeshard id# 72075186224037899, stats size# 26 2025-06-25T14:41:27.167088Z node 2 :STATISTICS DEBUG: tx_schemeshard_stats.cpp:132: [72075186224037894] TTxSchemeShardStats::Complete 2025-06-25T14:41:28.220688Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 121 ], ReplyToActorId[ [2:7764:5546]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T14:41:28.220987Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 121 ] 2025-06-25T14:41:28.221029Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 121, ReplyToActorId = [2:7764:5546], StatRequests.size() = 1 2025-06-25T14:41:29.680945Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:285: [72075186224037894] EvPropagateTimeout 2025-06-25T14:41:29.693193Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-25T14:41:29.693261Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-25T14:41:29.693300Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037899, LocalPathId: 2] is data table. 2025-06-25T14:41:29.693334Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:723: [72075186224037894] ScheduleNextTraversal. Skip traversal for datashard table [OwnerId: 72075186224037899, LocalPathId: 2] 2025-06-25T14:41:29.693607Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Shared 2025-06-25T14:41:29.696225Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DELETE FROM `.metadata/_statistics` WHERE owner_id = $owner_id AND local_path_id = $local_path_id; 2025-06-25T14:41:29.708991Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=MTljNmIyYzItNTc1NmJmN2YtMzk1OTM5MTItYjM4N2M3Yzk=, TxId: 2025-06-25T14:41:29.709063Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=MTljNmIyYzItNTc1NmJmN2YtMzk1OTM5MTItYjM4N2M3Yzk=, TxId: 2025-06-25T14:41:29.709617Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-25T14:41:29.724839Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037899, LocalPathId: 2] 2025-06-25T14:41:29.724896Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-25T14:41:29.775625Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 122 ], ReplyToActorId[ [2:7830:5586]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T14:41:29.775929Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 122 ] 2025-06-25T14:41:29.775970Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 122, ReplyToActorId = [2:7830:5586], StatRequests.size() = 1 2025-06-25T14:41:31.218048Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 123 ], ReplyToActorId[ [2:7876:5611]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T14:41:31.218406Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 123 ] 2025-06-25T14:41:31.218453Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 123, ReplyToActorId = [2:7876:5611], StatRequests.size() = 1 2025-06-25T14:41:32.597391Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 2, schemeshard count = 2 2025-06-25T14:41:32.597889Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-06-25T14:41:32.598219Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-25T14:41:32.610896Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-25T14:41:32.610961Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-25T14:41:32.683783Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 124 ], ReplyToActorId[ [2:7916:5633]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T14:41:32.684096Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 124 ] 2025-06-25T14:41:32.684142Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 124, ReplyToActorId = [2:7916:5633], StatRequests.size() = 1 >> TxUsage::Sinks_Oltp_WriteToTopic_3_Table [GOOD] >> TxUsage::WriteToTopic_Two_WriteSession_Query |83.5%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_sequence_reboots/ydb-core-tx-schemeshard-ut_sequence_reboots |83.6%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_sequence_reboots/ydb-core-tx-schemeshard-ut_sequence_reboots |83.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_sequence_reboots/ydb-core-tx-schemeshard-ut_sequence_reboots >> TxUsage::Sinks_Oltp_WriteToTopic_3_Query |83.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_bsvolume_reboots/ydb-core-tx-schemeshard-ut_bsvolume_reboots |83.6%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_bsvolume_reboots/ydb-core-tx-schemeshard-ut_bsvolume_reboots |83.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_bsvolume_reboots/ydb-core-tx-schemeshard-ut_bsvolume_reboots >> KqpWorkload::KV |83.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_rtmr_reboots/ydb-core-tx-schemeshard-ut_rtmr_reboots |83.6%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_rtmr_reboots/ydb-core-tx-schemeshard-ut_rtmr_reboots |83.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_rtmr_reboots/ydb-core-tx-schemeshard-ut_rtmr_reboots >> LdapAuthProviderTest_StartTls::LdapRefreshGroupsInfoWithError [GOOD] >> KqpQueryPerf::MultiDeleteFromTable-QueryService+UseSink >> KqpQueryPerf::UpdateOn-QueryService-UseSink |83.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/kesus/tablet/quoter_performance_test/quoter_performance_test |83.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kesus/tablet/quoter_performance_test/quoter_performance_test |83.6%| [LD] {RESULT} $(B)/ydb/core/kesus/tablet/quoter_performance_test/quoter_performance_test |83.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tools/query_replay_yt/query_replay_yt |83.6%| [LD] {RESULT} $(B)/ydb/tools/query_replay_yt/query_replay_yt |83.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tools/query_replay_yt/query_replay_yt >> TKeyValueTest::TestInlineCopyRangeWorksNewApi [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/security/ldap_auth_provider/ut/unittest >> LdapAuthProviderTest_StartTls::LdapRefreshGroupsInfoWithError [GOOD] Test command err: 2025-06-25T14:40:45.083962Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519896943625335585:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:40:45.084024Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000f03/r3tmp/tmpSvP9ad/pdisk_1.dat 2025-06-25T14:40:45.401382Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 63378, node 1 2025-06-25T14:40:45.484909Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:40:45.485015Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:40:45.497865Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:40:45.595242Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:40:45.595277Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:40:45.595293Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:40:45.595398Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:40:45.976363Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-25T14:40:45.979410Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-25T14:40:45.979444Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-25T14:40:45.980120Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:11907, port: 11907 2025-06-25T14:40:45.980750Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-25T14:40:45.985396Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-25T14:40:46.033215Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****ybxw (CEE54EAA) () has now valid token of ldapuser@ldap 2025-06-25T14:40:46.091608Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:40:48.065105Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519896957962228446:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:40:48.065153Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000f03/r3tmp/tmpGjTT4I/pdisk_1.dat 2025-06-25T14:40:48.330281Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519896957962228423:2080] 1750862448062456 != 1750862448062459 2025-06-25T14:40:48.332791Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:40:48.343698Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:40:48.343778Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:40:48.345618Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 32287, node 2 2025-06-25T14:40:48.410333Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:40:48.410364Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:40:48.410372Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:40:48.410513Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:40:48.684421Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-25T14:40:48.686602Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-25T14:40:48.686628Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-25T14:40:48.687253Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:11967, port: 11967 2025-06-25T14:40:48.687311Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:179: start TLS 2025-06-25T14:40:48.696289Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-25T14:40:48.745375Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-25T14:40:48.745823Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:340: LDAP user ldapuser does not exist. LDAP search for filter uid=ldapuser on server ldap://localhost:11967 return no entries 2025-06-25T14:40:48.747190Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1809: Ticket eyJh****fQAA (814157D0) () has now permanent error message 'Could not login via LDAP (LDAP user ldapuser does not exist. LDAP search for filter uid=ldapuser on server ldap://localhost:11967 return no entries)' 2025-06-25T14:40:51.596009Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519896969609928159:2171];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:40:51.597070Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000f03/r3tmp/tmpKQiOBG/pdisk_1.dat 2025-06-25T14:40:51.701875Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 11639, node 3 2025-06-25T14:40:51.726261Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:40:51.726373Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:40:51.728821Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:40:51.748859Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:40:51.748877Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:40:51.748884Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:40:51.749003Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:40:51.845950Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-25T14:40:51.849014Z node 3 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-25T14:40:51.849042Z node 3 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-25T14:40:51.849682Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:20822, port: 20822 2025-06-25T14:40:51.849761Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:179: start TLS 2025-06-25T14:40:51.858752Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-25T14:40:51.900746Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-25T14:40:51.948852Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:357: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (member:1.2.840.113556.1.4.1941:=uid=ldapuser,dc=search,dc=yandex,dc=net), attributes: 1.1 2025-06-25T14:40:51.952457Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:382: Try to get nested groups - tree traversal 2025-06-25T14:40:51.952516Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managerOfProject1,cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=project1,cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-25T14:40:51.996615Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-25T14:40:52.048639Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-25T14:40:52.053442Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****rseA (9BA18F1F) () has now valid token of ldapuser@ldap 2025-06-25T14:40:52.597482Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:40:55.599424Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****rseA (9BA18F1F) 2025-06-25T14:40:55.600403Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:20822, port: 20822 2025-06-25T14:40:55.600480Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:179: start TLS 2025-06-25T14:40:55.622941Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-25T14:40:55.668955Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-25T14:40:55.716557Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:357: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (member:1.2.840.113556.1.4.1941:=uid=ldapuser,dc=search,dc=yandex,dc=net), attributes: 1.1 2025-06-25T14:40:55.717049Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:382: Try to get nested groups - tree traversal 2025-06-25T14:40:55.717078Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=project1,cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025- ... _provider.cpp:340: LDAP user ldapuser does not exist. LDAP search for filter uid=ldapuser on server ldap://localhost:11806 return no entries 2025-06-25T14:41:21.209683Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1809: Ticket eyJh****ylZQ (58B71F52) () has now permanent error message 'Could not login via LDAP (LDAP user ldapuser does not exist. LDAP search for filter uid=ldapuser on server ldap://localhost:11806 return no entries)' 2025-06-25T14:41:24.146954Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****ylZQ (58B71F52) 2025-06-25T14:41:26.327760Z node 6 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[6:7519897118220887629:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:41:26.327858Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000f03/r3tmp/tmpPlWcsQ/pdisk_1.dat 2025-06-25T14:41:26.546949Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:41:26.547050Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:41:26.563066Z node 6 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:41:26.564536Z node 6 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [6:7519897118220887606:2080] 1750862486326554 != 1750862486326557 2025-06-25T14:41:26.570772Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 7691, node 6 2025-06-25T14:41:26.779818Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:41:26.779844Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:41:26.779853Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:41:26.780001Z node 6 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:41:26.997028Z node 6 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-25T14:41:26.999746Z node 6 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-25T14:41:26.999774Z node 6 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-25T14:41:27.000451Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:22730, port: 22730 2025-06-25T14:41:27.000546Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:179: start TLS 2025-06-25T14:41:27.044192Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-25T14:41:27.092883Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-25T14:41:27.093546Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:323: Could not perform search for filter uid=ldapuser on server ldap://localhost:22730. Server is busy 2025-06-25T14:41:27.094088Z node 6 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket eyJh****bXew (424F1D32) () has now retryable error message 'Could not login via LDAP (Could not perform search for filter uid=ldapuser on server ldap://localhost:22730. Server is busy)' 2025-06-25T14:41:27.094400Z node 6 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-25T14:41:27.094420Z node 6 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-25T14:41:27.095336Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:22730, port: 22730 2025-06-25T14:41:27.095418Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:179: start TLS 2025-06-25T14:41:27.117116Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-25T14:41:27.161053Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-25T14:41:27.162914Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:323: Could not perform search for filter uid=ldapuser on server ldap://localhost:22730. Server is busy 2025-06-25T14:41:27.163432Z node 6 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket eyJh****bXew (424F1D32) () has now retryable error message 'Could not login via LDAP (Could not perform search for filter uid=ldapuser on server ldap://localhost:22730. Server is busy)' 2025-06-25T14:41:27.366397Z node 6 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:41:29.374057Z node 6 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****bXew (424F1D32) 2025-06-25T14:41:29.374406Z node 6 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-25T14:41:29.374436Z node 6 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-25T14:41:29.375386Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:22730, port: 22730 2025-06-25T14:41:29.375451Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:179: start TLS 2025-06-25T14:41:29.400133Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-25T14:41:29.452803Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-25T14:41:29.453388Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:323: Could not perform search for filter uid=ldapuser on server ldap://localhost:22730. Server is busy 2025-06-25T14:41:29.454138Z node 6 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket eyJh****bXew (424F1D32) () has now retryable error message 'Could not login via LDAP (Could not perform search for filter uid=ldapuser on server ldap://localhost:22730. Server is busy)' 2025-06-25T14:41:31.332758Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[6:7519897118220887629:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:41:31.332849Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:41:32.377432Z node 6 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****bXew (424F1D32) 2025-06-25T14:41:32.377689Z node 6 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-25T14:41:32.377707Z node 6 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-25T14:41:32.378440Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:22730, port: 22730 2025-06-25T14:41:32.378505Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:179: start TLS 2025-06-25T14:41:32.391495Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-25T14:41:32.437107Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-25T14:41:32.480672Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:357: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (member:1.2.840.113556.1.4.1941:=uid=ldapuser,dc=search,dc=yandex,dc=net), attributes: 1.1 2025-06-25T14:41:32.481271Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:382: Try to get nested groups - tree traversal 2025-06-25T14:41:32.481323Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managerOfProject1,cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=project1,cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-25T14:41:32.532715Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-25T14:41:32.580802Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-25T14:41:32.582015Z node 6 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****bXew (424F1D32) () has now valid token of ldapuser@ldap 2025-06-25T14:41:36.389045Z node 6 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****bXew (424F1D32) 2025-06-25T14:41:36.389176Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:22730, port: 22730 2025-06-25T14:41:36.389244Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:179: start TLS 2025-06-25T14:41:36.414063Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-25T14:41:36.464887Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-25T14:41:36.512724Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:357: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (member:1.2.840.113556.1.4.1941:=uid=ldapuser,dc=search,dc=yandex,dc=net), attributes: 1.1 2025-06-25T14:41:36.513362Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:382: Try to get nested groups - tree traversal 2025-06-25T14:41:36.513414Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managerOfProject1,cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=project1,cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-25T14:41:36.560778Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-25T14:41:36.608748Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-25T14:41:36.610042Z node 6 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****bXew (424F1D32) () has now valid token of ldapuser@ldap >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_1_Table [GOOD] |83.6%| [TA] $(B)/ydb/core/security/ldap_auth_provider/ut/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/keyvalue/ut/unittest >> TKeyValueTest::TestInlineCopyRangeWorksNewApi [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:57:2057] recipient: [1:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:57:2057] recipient: [1:52:2097] Leader for TabletID 72057594037927937 is [1:59:2099] sender: [1:60:2057] recipient: [1:52:2097] Leader for TabletID 72057594037927937 is [1:59:2099] sender: [1:77:2057] recipient: [1:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:57:2057] recipient: [2:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:57:2057] recipient: [2:53:2097] Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:60:2057] recipient: [2:53:2097] Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:77:2057] recipient: [2:14:2061] !Reboot 72057594037927937 (actor [2:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:79:2057] recipient: [2:38:2085] Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:81:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:83:2057] recipient: [2:82:2112] Leader for TabletID 72057594037927937 is [2:84:2113] sender: [2:85:2057] recipient: [2:82:2112] !Reboot 72057594037927937 (actor [2:59:2099]) rebooted! !Reboot 72057594037927937 (actor [2:59:2099]) tablet resolver refreshed! new actor is[2:84:2113] Leader for TabletID 72057594037927937 is [2:84:2113] sender: [2:170:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:57:2057] recipient: [3:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:57:2057] recipient: [3:52:2097] Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:60:2057] recipient: [3:52:2097] Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:77:2057] recipient: [3:14:2061] !Reboot 72057594037927937 (actor [3:59:2099]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:79:2057] recipient: [3:38:2085] Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:82:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:83:2057] recipient: [3:81:2112] Leader for TabletID 72057594037927937 is [3:84:2113] sender: [3:85:2057] recipient: [3:81:2112] !Reboot 72057594037927937 (actor [3:59:2099]) rebooted! !Reboot 72057594037927937 (actor [3:59:2099]) tablet resolver refreshed! new actor is[3:84:2113] Leader for TabletID 72057594037927937 is [3:84:2113] sender: [3:170:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:57:2057] recipient: [4:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:57:2057] recipient: [4:53:2097] Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:60:2057] recipient: [4:53:2097] Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:77:2057] recipient: [4:14:2061] !Reboot 72057594037927937 (actor [4:59:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:80:2057] recipient: [4:38:2085] Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:83:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:84:2057] recipient: [4:82:2112] Leader for TabletID 72057594037927937 is [4:85:2113] sender: [4:86:2057] recipient: [4:82:2112] !Reboot 72057594037927937 (actor [4:59:2099]) rebooted! !Reboot 72057594037927937 (actor [4:59:2099]) tablet resolver refreshed! new actor is[4:85:2113] Leader for TabletID 72057594037927937 is [4:85:2113] sender: [4:171:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:57:2057] recipient: [5:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:57:2057] recipient: [5:53:2097] Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:60:2057] recipient: [5:53:2097] Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:77:2057] recipient: [5:14:2061] !Reboot 72057594037927937 (actor [5:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:83:2057] recipient: [5:38:2085] Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:86:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:87:2057] recipient: [5:85:2115] Leader for TabletID 72057594037927937 is [5:88:2116] sender: [5:89:2057] recipient: [5:85:2115] !Reboot 72057594037927937 (actor [5:59:2099]) rebooted! !Reboot 72057594037927937 (actor [5:59:2099]) tablet resolver refreshed! new actor is[5:88:2116] Leader for TabletID 72057594037927937 is [5:88:2116] sender: [5:174:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:57:2057] recipient: [6:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:57:2057] recipient: [6:52:2097] Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:60:2057] recipient: [6:52:2097] Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:77:2057] recipient: [6:14:2061] !Reboot 72057594037927937 (actor [6:59:2099]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:83:2057] recipient: [6:38:2085] Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:86:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:87:2057] recipient: [6:85:2115] Leader for TabletID 72057594037927937 is [6:88:2116] sender: [6:89:2057] recipient: [6:85:2115] !Reboot 72057594037927937 (actor [6:59:2099]) rebooted! !Reboot 72057594037927937 (actor [6:59:2099]) tablet resolver refreshed! new actor is[6:88:2116] Leader for TabletID 72057594037927937 is [6:88:2116] sender: [6:174:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:57:2057] recipient: [7:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:57:2057] recipient: [7:53:2097] Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:60:2057] recipient: [7:53:2097] Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:77:2057] recipient: [7:14:2061] !Reboot 72057594037927937 (actor [7:59:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:84:2057] recipient: [7:38:2085] Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:87:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:88:2057] recipient: [7:86:2115] Leader for TabletID 72057594037927937 is [7:89:2116] sender: [7:90:2057] recipient: [7:86:2115] !Reboot 72057594037927937 (actor [7:59:2099]) rebooted! !Reboot 72057594037927937 (actor [7:59:2099]) tablet resolver refreshed! new actor is[7:89:2116] Leader for TabletID 72057594037927937 is [7:89:2116] sender: [7:175:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:57:2057] recipient: [8:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:57:2057] recipient: [8:54:2097] Leader for TabletID 72057594037927937 is [8:59:2099] sender: [8:60:2057] recipient: [8:54:2097] Leader for TabletID 72057594037927937 is [8:59:2099] sender: [8:77:2057] recipient: [8:14:2061] !Reboot 72057594037927937 (actor [8:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [8:59:2099] sender: [8:86:2057] recipient: [8:38:2085] Leader for TabletID 72057594037927937 is [8:59:2099] sender: [8:89:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [8:59:2099] sender: [8:90:2057] recipient: [8:88:2117] Leader for TabletID 72057594037927937 is [8:91:2118] sender: [8:92:2057] recipient: [8:88:2117] !Reboot 72057594037927937 (actor [8:59:2099]) rebooted! !Reboot 72057594037927937 (actor [8:59:2099]) tablet resolver refreshed! new actor is[8:91:2118] Leader for TabletID 72057594037927937 is [8:91:2118] sender: [8:177:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:57:2057] recipient: [9:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:57:2057] recipient: [9:53:2097] Leader for TabletID 72057594037927937 is [9:59:2099] sender: [9:60:2057] recipient: [9:53:2097] Leader for TabletID 72057594037927937 is [9:59:2099] sender: [9:77:2057] recipient: [9:14:2061] !Reboot 72057594037927937 (actor [9:59:2099]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [9:59:2099] sender: [9:86:2057] recipient: [9:38:2085] Leader for TabletID 72057594037927937 is [9:59:2099] sender: [9:88:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [9:59:2099] sender: [9:90:2057] recipient: [9:89:2117] Leader for TabletID 72057594037927937 is [9:91:2118] sender: [9:92:2057] recipient: [9:89:2117] !Reboot 72057594037927937 (actor [9:59:2099]) rebooted! !Reboot 72057594037927937 (actor [9:59:2099]) tablet resolver refreshed! new actor is[9:91:2118] Leader for TabletID 72057594037927937 is [9:91:2118] sender: [9:177:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:57:2057] recipient: [10:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:57:2057] recipient: [10:54:2097] Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:60:2057] recipient: [10:54:2097] Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:77:2057] recipient: [10:14:2061] !Reboot 72057594037927937 (actor [10:59:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:87:2057] recipient: [10:38:2085] Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:90:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:91:2057] recipient: [10:89:2117] Leader for TabletID 72057594037927937 is [10:92:2118] sender: [10:93:2057] recipient: [10:89:2117] !Reboot 72057594037927937 (actor [10:59:2099]) rebooted! !Reboot 72057594037927937 (actor [10:59:2099]) tablet resolver refreshed! new actor is[10:92:2118] Leader for TabletID 72057594037927937 is [10:92:2118] sender: [10:178:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:57:2057] recipient: [11:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:57:2057] recipient: [11:53:2097] Leader for TabletID 72057594037927937 is [11:59:2099] sender: [11:60:2057] recipient: [11:53:2097] Leader for TabletID 72057594037927937 is [11:59:2099] sender: [11:77:2057] recipient: [11:14:2061] !Reboot 72057594037927937 (actor [11:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [11:59:2099] sender: [11:89:2057] recipient: [11:38:2085] Leader for TabletID 72057594037927937 is [11:59:2099] sender: [11:92:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [11:59:2099] sender: [11:93:2057] recipient: [11:91:2119] Leader for TabletID 72057594037927937 is [11:94:2120] sender: [11:95:2057] recipient: [11:91:2119] !Reboot 72057594037927937 (actor [11:59:2099]) rebooted! !Reboot 72057594037927937 (actor [11:59:2099]) tablet resolver refreshed! new actor is[11:94:2120] Leader for TabletID 72057594037927937 is [11:94:2120] sender: [11:180:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:57:2057] recipient: [12:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:57:2057] recipient: [12:54:2097] Leader for TabletID 72057594037927937 is [12:59:2099] sender: [12:60:2057] recipient: [12:54:2097] Leader for TabletID 72057594037927937 is [12:59:2099] sender: [12:77:2057] recipient: [12:14:2061] !Reboot 72057594037927937 (acto ... 46:81:2112] !Reboot 72057594037927937 (actor [46:59:2099]) rebooted! !Reboot 72057594037927937 (actor [46:59:2099]) tablet resolver refreshed! new actor is[46:84:2113] Leader for TabletID 72057594037927937 is [46:84:2113] sender: [46:170:2057] recipient: [46:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [47:57:2057] recipient: [47:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [47:57:2057] recipient: [47:53:2097] Leader for TabletID 72057594037927937 is [47:59:2099] sender: [47:60:2057] recipient: [47:53:2097] Leader for TabletID 72057594037927937 is [47:59:2099] sender: [47:77:2057] recipient: [47:14:2061] !Reboot 72057594037927937 (actor [47:59:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [47:59:2099] sender: [47:80:2057] recipient: [47:38:2085] Leader for TabletID 72057594037927937 is [47:59:2099] sender: [47:83:2057] recipient: [47:14:2061] Leader for TabletID 72057594037927937 is [47:59:2099] sender: [47:84:2057] recipient: [47:82:2112] Leader for TabletID 72057594037927937 is [47:85:2113] sender: [47:86:2057] recipient: [47:82:2112] !Reboot 72057594037927937 (actor [47:59:2099]) rebooted! !Reboot 72057594037927937 (actor [47:59:2099]) tablet resolver refreshed! new actor is[47:85:2113] Leader for TabletID 72057594037927937 is [47:85:2113] sender: [47:171:2057] recipient: [47:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [48:57:2057] recipient: [48:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [48:57:2057] recipient: [48:53:2097] Leader for TabletID 72057594037927937 is [48:59:2099] sender: [48:60:2057] recipient: [48:53:2097] Leader for TabletID 72057594037927937 is [48:59:2099] sender: [48:77:2057] recipient: [48:14:2061] !Reboot 72057594037927937 (actor [48:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [48:59:2099] sender: [48:83:2057] recipient: [48:38:2085] Leader for TabletID 72057594037927937 is [48:59:2099] sender: [48:86:2057] recipient: [48:14:2061] Leader for TabletID 72057594037927937 is [48:59:2099] sender: [48:87:2057] recipient: [48:85:2115] Leader for TabletID 72057594037927937 is [48:88:2116] sender: [48:89:2057] recipient: [48:85:2115] !Reboot 72057594037927937 (actor [48:59:2099]) rebooted! !Reboot 72057594037927937 (actor [48:59:2099]) tablet resolver refreshed! new actor is[48:88:2116] Leader for TabletID 72057594037927937 is [48:88:2116] sender: [48:174:2057] recipient: [48:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [49:57:2057] recipient: [49:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [49:57:2057] recipient: [49:53:2097] Leader for TabletID 72057594037927937 is [49:59:2099] sender: [49:60:2057] recipient: [49:53:2097] Leader for TabletID 72057594037927937 is [49:59:2099] sender: [49:77:2057] recipient: [49:14:2061] !Reboot 72057594037927937 (actor [49:59:2099]) on event NKikimr::TEvKeyValue::TEvExecuteTransaction ! Leader for TabletID 72057594037927937 is [49:59:2099] sender: [49:83:2057] recipient: [49:38:2085] Leader for TabletID 72057594037927937 is [49:59:2099] sender: [49:86:2057] recipient: [49:14:2061] Leader for TabletID 72057594037927937 is [49:59:2099] sender: [49:87:2057] recipient: [49:85:2115] Leader for TabletID 72057594037927937 is [49:88:2116] sender: [49:89:2057] recipient: [49:85:2115] !Reboot 72057594037927937 (actor [49:59:2099]) rebooted! !Reboot 72057594037927937 (actor [49:59:2099]) tablet resolver refreshed! new actor is[49:88:2116] Leader for TabletID 72057594037927937 is [49:88:2116] sender: [49:174:2057] recipient: [49:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [50:57:2057] recipient: [50:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [50:57:2057] recipient: [50:52:2097] Leader for TabletID 72057594037927937 is [50:59:2099] sender: [50:60:2057] recipient: [50:52:2097] Leader for TabletID 72057594037927937 is [50:59:2099] sender: [50:77:2057] recipient: [50:14:2061] !Reboot 72057594037927937 (actor [50:59:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [50:59:2099] sender: [50:84:2057] recipient: [50:38:2085] Leader for TabletID 72057594037927937 is [50:59:2099] sender: [50:87:2057] recipient: [50:14:2061] Leader for TabletID 72057594037927937 is [50:59:2099] sender: [50:88:2057] recipient: [50:86:2115] Leader for TabletID 72057594037927937 is [50:89:2116] sender: [50:90:2057] recipient: [50:86:2115] !Reboot 72057594037927937 (actor [50:59:2099]) rebooted! !Reboot 72057594037927937 (actor [50:59:2099]) tablet resolver refreshed! new actor is[50:89:2116] Leader for TabletID 72057594037927937 is [50:89:2116] sender: [50:175:2057] recipient: [50:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [51:57:2057] recipient: [51:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [51:57:2057] recipient: [51:53:2097] Leader for TabletID 72057594037927937 is [51:59:2099] sender: [51:60:2057] recipient: [51:53:2097] Leader for TabletID 72057594037927937 is [51:59:2099] sender: [51:77:2057] recipient: [51:14:2061] !Reboot 72057594037927937 (actor [51:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [51:59:2099] sender: [51:87:2057] recipient: [51:38:2085] Leader for TabletID 72057594037927937 is [51:59:2099] sender: [51:90:2057] recipient: [51:14:2061] Leader for TabletID 72057594037927937 is [51:59:2099] sender: [51:91:2057] recipient: [51:89:2118] Leader for TabletID 72057594037927937 is [51:92:2119] sender: [51:93:2057] recipient: [51:89:2118] !Reboot 72057594037927937 (actor [51:59:2099]) rebooted! !Reboot 72057594037927937 (actor [51:59:2099]) tablet resolver refreshed! new actor is[51:92:2119] Leader for TabletID 72057594037927937 is [51:92:2119] sender: [51:178:2057] recipient: [51:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [52:57:2057] recipient: [52:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [52:57:2057] recipient: [52:53:2097] Leader for TabletID 72057594037927937 is [52:59:2099] sender: [52:60:2057] recipient: [52:53:2097] Leader for TabletID 72057594037927937 is [52:59:2099] sender: [52:77:2057] recipient: [52:14:2061] !Reboot 72057594037927937 (actor [52:59:2099]) on event NKikimr::TEvKeyValue::TEvExecuteTransaction ! Leader for TabletID 72057594037927937 is [52:59:2099] sender: [52:87:2057] recipient: [52:38:2085] Leader for TabletID 72057594037927937 is [52:59:2099] sender: [52:90:2057] recipient: [52:14:2061] Leader for TabletID 72057594037927937 is [52:59:2099] sender: [52:91:2057] recipient: [52:89:2118] Leader for TabletID 72057594037927937 is [52:92:2119] sender: [52:93:2057] recipient: [52:89:2118] !Reboot 72057594037927937 (actor [52:59:2099]) rebooted! !Reboot 72057594037927937 (actor [52:59:2099]) tablet resolver refreshed! new actor is[52:92:2119] Leader for TabletID 72057594037927937 is [52:92:2119] sender: [52:178:2057] recipient: [52:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [53:57:2057] recipient: [53:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [53:57:2057] recipient: [53:53:2097] Leader for TabletID 72057594037927937 is [53:59:2099] sender: [53:60:2057] recipient: [53:53:2097] Leader for TabletID 72057594037927937 is [53:59:2099] sender: [53:77:2057] recipient: [53:14:2061] !Reboot 72057594037927937 (actor [53:59:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [53:59:2099] sender: [53:88:2057] recipient: [53:38:2085] Leader for TabletID 72057594037927937 is [53:59:2099] sender: [53:91:2057] recipient: [53:14:2061] Leader for TabletID 72057594037927937 is [53:59:2099] sender: [53:92:2057] recipient: [53:90:2118] Leader for TabletID 72057594037927937 is [53:93:2119] sender: [53:94:2057] recipient: [53:90:2118] !Reboot 72057594037927937 (actor [53:59:2099]) rebooted! !Reboot 72057594037927937 (actor [53:59:2099]) tablet resolver refreshed! new actor is[53:93:2119] Leader for TabletID 72057594037927937 is [53:93:2119] sender: [53:179:2057] recipient: [53:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [54:57:2057] recipient: [54:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [54:57:2057] recipient: [54:54:2097] Leader for TabletID 72057594037927937 is [54:59:2099] sender: [54:60:2057] recipient: [54:54:2097] Leader for TabletID 72057594037927937 is [54:59:2099] sender: [54:77:2057] recipient: [54:14:2061] !Reboot 72057594037927937 (actor [54:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [54:59:2099] sender: [54:91:2057] recipient: [54:38:2085] Leader for TabletID 72057594037927937 is [54:59:2099] sender: [54:94:2057] recipient: [54:14:2061] Leader for TabletID 72057594037927937 is [54:59:2099] sender: [54:95:2057] recipient: [54:93:2121] Leader for TabletID 72057594037927937 is [54:96:2122] sender: [54:97:2057] recipient: [54:93:2121] !Reboot 72057594037927937 (actor [54:59:2099]) rebooted! !Reboot 72057594037927937 (actor [54:59:2099]) tablet resolver refreshed! new actor is[54:96:2122] Leader for TabletID 72057594037927937 is [54:96:2122] sender: [54:182:2057] recipient: [54:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [55:57:2057] recipient: [55:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [55:57:2057] recipient: [55:54:2097] Leader for TabletID 72057594037927937 is [55:59:2099] sender: [55:60:2057] recipient: [55:54:2097] Leader for TabletID 72057594037927937 is [55:59:2099] sender: [55:77:2057] recipient: [55:14:2061] !Reboot 72057594037927937 (actor [55:59:2099]) on event NKikimr::TEvKeyValue::TEvReadRange ! Leader for TabletID 72057594037927937 is [55:59:2099] sender: [55:91:2057] recipient: [55:38:2085] Leader for TabletID 72057594037927937 is [55:59:2099] sender: [55:94:2057] recipient: [55:14:2061] Leader for TabletID 72057594037927937 is [55:59:2099] sender: [55:95:2057] recipient: [55:93:2121] Leader for TabletID 72057594037927937 is [55:96:2122] sender: [55:97:2057] recipient: [55:93:2121] !Reboot 72057594037927937 (actor [55:59:2099]) rebooted! !Reboot 72057594037927937 (actor [55:59:2099]) tablet resolver refreshed! new actor is[55:96:2122] Leader for TabletID 72057594037927937 is [55:96:2122] sender: [55:182:2057] recipient: [55:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [56:57:2057] recipient: [56:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [56:57:2057] recipient: [56:53:2097] Leader for TabletID 72057594037927937 is [56:59:2099] sender: [56:60:2057] recipient: [56:53:2097] Leader for TabletID 72057594037927937 is [56:59:2099] sender: [56:77:2057] recipient: [56:14:2061] !Reboot 72057594037927937 (actor [56:59:2099]) on event NKikimr::TEvKeyValue::TEvNotify ! Leader for TabletID 72057594037927937 is [56:59:2099] sender: [56:92:2057] recipient: [56:38:2085] Leader for TabletID 72057594037927937 is [56:59:2099] sender: [56:94:2057] recipient: [56:14:2061] Leader for TabletID 72057594037927937 is [56:59:2099] sender: [56:96:2057] recipient: [56:95:2121] Leader for TabletID 72057594037927937 is [56:97:2122] sender: [56:98:2057] recipient: [56:95:2121] !Reboot 72057594037927937 (actor [56:59:2099]) rebooted! !Reboot 72057594037927937 (actor [56:59:2099]) tablet resolver refreshed! new actor is[56:97:2122] Leader for TabletID 72057594037927937 is [0:0:0] sender: [57:57:2057] recipient: [57:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [57:57:2057] recipient: [57:53:2097] Leader for TabletID 72057594037927937 is [57:59:2099] sender: [57:60:2057] recipient: [57:53:2097] Leader for TabletID 72057594037927937 is [57:59:2099] sender: [57:77:2057] recipient: [57:14:2061] |83.6%| [TA] {RESULT} $(B)/ydb/core/security/ldap_auth_provider/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_1_Query >> SystemView::Describe [GOOD] >> SystemView::DescribeSystemFolder |83.6%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/tools/query_replay/ydb_query_replay |83.6%| [LD] {RESULT} $(B)/ydb/tools/query_replay/ydb_query_replay |83.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tools/query_replay/ydb_query_replay >> KqpQueryPerf::IndexDeleteOn-QueryService-UseSink >> SystemView::AuthOwners_Access [GOOD] >> SystemView::AuthOwners_ResultOrder >> TxUsage::WriteToTopic_Demo_32_Table [GOOD] >> BasicUsage::ReadWithRestarts [GOOD] >> BasicUsage::ConflictingWrites >> SystemView::TabletsFields [GOOD] >> SystemView::TabletsShards >> TxUsage::WriteToTopic_Demo_33_Table >> KqpQueryPerf::Update-QueryService+UseSink >> KqpQueryPerf::Replace-QueryService+UseSink [GOOD] >> KqpQueryPerf::Insert+QueryService-UseSink [GOOD] >> KqpQueryPerf::Insert+QueryService+UseSink >> KqpQueryPerf::IndexLookupJoin+EnableStreamLookup-QueryService [GOOD] >> KqpQueryPerf::IndexLookupJoin+EnableStreamLookup+QueryService >> KqpQueryPerf::UpdateOn+QueryService-UseSink [GOOD] >> KqpQueryPerf::IndexLookupJoin-EnableStreamLookup-QueryService [GOOD] >> KqpQueryPerf::IndexLookupJoin-EnableStreamLookup+QueryService ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::Replace-QueryService+UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 9976, MsgBus: 21506 2025-06-25T14:41:36.571491Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897161934244419:2225];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:41:36.571560Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000b79/r3tmp/tmpLm2Ogr/pdisk_1.dat 2025-06-25T14:41:37.570835Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:41:37.808683Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:41:37.808788Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:41:37.831905Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:41:37.855690Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:41:37.856262Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 9976, node 1 2025-06-25T14:41:38.314486Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:41:38.314509Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:41:38.314523Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:41:38.314667Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:21506 TClient is connected to server localhost:21506 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:41:39.975170Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:41:39.997901Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:41:40.020718Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:40.311782Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:40.654926Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:40.775488Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:41.571004Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519897161934244419:2225];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:41:41.571066Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:41:41.787038Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897183409082361:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:41.787187Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:42.770362Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:42.827239Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:42.874151Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:42.926083Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:42.983162Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:43.054982Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:43.088272Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:43.184021Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897191999017625:2436], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:43.184126Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:43.184694Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897191999017630:2439], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:43.196965Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:41:43.212287Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519897191999017632:2440], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:41:43.283707Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519897191999017683:3427] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } |83.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_data_erasure_reboots/ydb-core-tx-schemeshard-ut_data_erasure_reboots |83.7%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_data_erasure_reboots/ydb-core-tx-schemeshard-ut_data_erasure_reboots |83.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_data_erasure_reboots/ydb-core-tx-schemeshard-ut_data_erasure_reboots >> KqpQueryPerf::IdxLookupJoinThreeWay+QueryService [GOOD] >> KqpQueryPerf::IdxLookupJoinThreeWay-QueryService >> TxUsage::WriteToTopic_Demo_10_Query [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::UpdateOn+QueryService-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 27447, MsgBus: 10299 2025-06-25T14:41:36.576885Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897164364216254:2138];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:41:36.578865Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000b9b/r3tmp/tmpNgUm9g/pdisk_1.dat 2025-06-25T14:41:37.579586Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:41:37.656755Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:41:37.937255Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:41:38.055140Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:41:38.055243Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:41:38.207060Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 27447, node 1 2025-06-25T14:41:38.362984Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:41:38.363009Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:41:38.363017Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:41:38.363137Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10299 TClient is connected to server localhost:10299 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:41:40.294363Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:41:40.309202Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:41:40.319388Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:40.673661Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:40.977154Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:41:41.073717Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:41.594629Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519897164364216254:2138];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:41:41.594952Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:41:42.278226Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897190134021570:2370], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:42.278381Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:42.759814Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:42.797046Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:42.871559Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:42.954950Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:42.998005Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:43.047476Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:43.081469Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:43.178745Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897194428989528:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:43.178854Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:43.179229Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897194428989533:2438], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:43.194192Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:41:43.214099Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519897194428989535:2439], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:41:43.312533Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519897194428989589:3428] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } >> KqpQueryPerf::UpdateOn-QueryService-UseSink [GOOD] >> TxUsage::WriteToTopic_Demo_1_Query [GOOD] >> SystemView::AuthUsers_Access [GOOD] >> SystemView::AuthUsers_ResultOrder >> KqpPg::TableDeleteWhere-useSink [GOOD] >> KqpQueryPerf::MultiDeleteFromTable-QueryService+UseSink [GOOD] >> TxUsage::WriteToTopic_Demo_11_Table >> KqpQueryPerf::IndexInsert-QueryService-UseSink [GOOD] >> KqpQueryPerf::IndexInsert-QueryService+UseSink >> KqpQueryPerf::IndexInsert+QueryService-UseSink [GOOD] >> KqpQueryPerf::IndexInsert+QueryService+UseSink >> BasicStatistics::TwoServerlessDbs [GOOD] >> KqpQueryPerf::IndexReplace-QueryService-UseSink [GOOD] >> KqpQueryPerf::IndexReplace-QueryService+UseSink >> TxUsage::WriteToTopic_Demo_18_RestartNo_Table ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::UpdateOn-QueryService-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 14576, MsgBus: 18451 2025-06-25T14:41:40.021626Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897179260707372:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:41:40.021680Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000b39/r3tmp/tmpTA5InD/pdisk_1.dat 2025-06-25T14:41:41.008458Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519897179260707335:2080] 1750862500016821 != 1750862500016824 2025-06-25T14:41:41.034372Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:41:41.057963Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:41:41.063023Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:41:41.063103Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:41:41.071111Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:41:41.072473Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; TServer::EnableGrpc on GrpcPort 14576, node 1 2025-06-25T14:41:41.300792Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:41:41.300815Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:41:41.300821Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:41:41.300933Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:18451 TClient is connected to server localhost:18451 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:41:42.226315Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:41:42.242670Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:41:42.257151Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:42.478660Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:42.695194Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:42.771166Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:44.343012Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897196440578172:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:44.343141Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:44.779232Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:44.819044Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:44.869553Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:44.910149Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:44.951142Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:45.032735Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519897179260707372:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:41:45.033212Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:41:45.047201Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:45.128703Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:45.206725Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897200735546133:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:45.206851Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:45.207230Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897200735546138:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:45.211233Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:41:45.234168Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519897200735546140:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:41:45.329547Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519897200735546193:3428] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } >> KqpQueryPerf::Update+QueryService+UseSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::MultiDeleteFromTable-QueryService+UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 28206, MsgBus: 21478 2025-06-25T14:41:39.889940Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897176704509394:2226];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:41:39.889987Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000b3a/r3tmp/tmpEyEwRT/pdisk_1.dat 2025-06-25T14:41:40.552497Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519897176704509206:2080] 1750862499794646 != 1750862499794649 2025-06-25T14:41:40.631155Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:41:40.631243Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:41:40.653739Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:41:40.654088Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 28206, node 1 2025-06-25T14:41:40.892744Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:41:40.959174Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:41:40.959194Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:41:40.959200Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:41:40.961616Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:21478 TClient is connected to server localhost:21478 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:41:41.763709Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:41:41.796484Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:41:41.813168Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:42.003236Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:41:42.180152Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:42.304293Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:44.188664Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897198179347338:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:44.188828Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:44.551963Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:44.619771Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:44.661521Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:44.702693Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:44.773222Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:44.859185Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:44.895481Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519897176704509394:2226];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:41:44.895559Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:41:44.944972Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:45.006269Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897202474315302:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:45.006366Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:45.006501Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897202474315307:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:45.015367Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:41:45.028338Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519897202474315309:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:41:45.104786Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519897202474315360:3428] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } >> TKeyValueTest::TestRewriteThenLastValue [GOOD] >> TKeyValueTest::TestRenameWorksNewApi >> TxUsage::WriteToTopic_Two_WriteSession_Query [GOOD] >> KqpQueryPerf::KvRead+QueryService ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/pg/unittest >> KqpPg::TableDeleteWhere-useSink [GOOD] Test command err: Trying to start YDB, gRPC: 24418, MsgBus: 28611 2025-06-25T14:36:52.171388Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519895940926732798:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:36:52.171425Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000ab2/r3tmp/tmp5UOsTv/pdisk_1.dat 2025-06-25T14:36:52.720274Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519895940926732763:2080] 1750862212170198 != 1750862212170201 2025-06-25T14:36:52.729304Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:36:52.735043Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:36:52.735177Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:36:52.759927Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 24418, node 1 2025-06-25T14:36:52.840252Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:36:52.840281Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:36:52.840288Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:36:52.840448Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28611 2025-06-25T14:36:53.186202Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:28611 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:36:53.398018Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:36:53.444870Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:36:55.587078Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664)
: Error: Bulk upsert to table '/Root/Coerce_pgbpchar_17472595041006102391_17823623939509273229' Unable to coerce value for pgbpchar: Error while coercing value, reason: yql/essentials/minikql/mkql_terminator.cpp:47: ERROR: value too long for type character(2) 2025-06-25T14:36:55.825161Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664)
: Error: Bulk upsert to table '/Root/Coerce__pgbpchar_17472595041006102391_5352544928909966465' Unable to coerce value for _pgbpchar: Error while coercing value, reason: yql/essentials/minikql/mkql_terminator.cpp:47: ERROR: value too long for type character(2) 2025-06-25T14:36:55.947336Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) abcd 2025-06-25T14:36:56.191606Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) {abcd,abcd} 2025-06-25T14:36:56.454538Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) abcd 2025-06-25T14:36:56.688345Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) {"abcd ","abcd "} 2025-06-25T14:36:56.852554Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664)
: Error: Bulk upsert to table '/Root/Coerce_pgvarchar_17472595041006102391_17823623939509273229' Unable to coerce value for pgvarchar: Error while coercing value, reason: yql/essentials/minikql/mkql_terminator.cpp:47: ERROR: value too long for type character varying(2) 2025-06-25T14:36:56.957945Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664)
: Error: Bulk upsert to table '/Root/Coerce__pgvarchar_17472595041006102391_5352544928909966465' Unable to coerce value for _pgvarchar: Error while coercing value, reason: yql/essentials/minikql/mkql_terminator.cpp:47: ERROR: value too long for type character varying(2) 2025-06-25T14:36:57.107572Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:36:57.172964Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519895940926732798:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:36:57.175108Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; abcd 2025-06-25T14:36:57.287912Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710677:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) {abcd,abcd} 2025-06-25T14:36:57.472078Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710680:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) abcd 2025-06-25T14:36:57.641575Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710683:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) {abcd,abcd} 2025-06-25T14:36:57.795124Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710686:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664)
: Error: Bulk upsert to table '/Root/Coerce_pgbit_17472595041006102391_5866627432374416336' Unable to coerce value for pgbit: Error while coercing value, reason: yql/essentials/minikql/mkql_terminator.cpp:47: ERROR: bit string length 4 does not match type bit(2) 2025-06-25T14:36:57.926446Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710687:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664)
: Error: Bulk upsert to table '/Root/Coerce__pgbit_17472595041006102391_11087201080355820517' Unable to coerce value for _pgbit: Error while coercing value, reason: yql/essentials/minikql/mkql_terminator.cpp:47: ERROR: bit string length 4 does not match type bit(2) 2025-06-25T14:36:58.084168Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:19 ... eration_create_table.cpp:664) 829 2025-06-25T14:41:42.905381Z node 11 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-25T14:41:42.945506Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715853:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:43.090400Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715854:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 774 2025-06-25T14:41:43.242006Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715855:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:43.380260Z node 11 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-25T14:41:43.415905Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715857:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2950 2025-06-25T14:41:43.579321Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715858:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:43.745192Z node 11 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-25T14:41:43.783531Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715860:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:43.913297Z node 11 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-25T14:41:43.915542Z node 11 :TX_DATASHARD ERROR: read_table_scan.cpp:460: TReadTableScan: undelivered event TxId: 281474976715862 2025-06-25T14:41:43.915867Z node 11 :TX_DATASHARD ERROR: finish_propose_unit.cpp:245: Prepare transaction failed. txid 281474976715862 at tablet 72075186224037955 errors: WRONG_SHARD_STATE (Interrupted operation [0:281474976715862] at 72075186224037955 while waiting for scan finish) | 2025-06-25T14:41:43.916598Z node 11 :TX_DATASHARD ERROR: finish_propose_unit.cpp:174: Errors while proposing transaction txid 281474976715862 at tablet 72075186224037955 status: ERROR errors: WRONG_SHARD_STATE (Interrupted operation [0:281474976715862] at 72075186224037955 while waiting for scan finish) | 114 2025-06-25T14:41:43.966555Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715863:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:44.134003Z node 11 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-25T14:41:44.201841Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715865:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:44.384479Z node 11 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 3802 2025-06-25T14:41:44.429435Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715867:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:44.570463Z node 11 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-25T14:41:44.627297Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715869:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:44.743662Z node 11 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 4072 2025-06-25T14:41:44.781336Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715871:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:44.886696Z node 11 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-25T14:41:44.922620Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715873:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:45.006027Z node 11 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 142 2025-06-25T14:41:45.043400Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715875:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:45.175594Z node 11 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-25T14:41:45.197481Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715877:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:45.304811Z node 11 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-25T14:41:45.307838Z node 11 :TX_DATASHARD ERROR: finish_propose_unit.cpp:245: Prepare transaction failed. txid 281474976715879 at tablet 72075186224037963 errors: WRONG_SHARD_STATE (Interrupted operation [0:281474976715879] at 72075186224037963 while waiting for stream clearance) | 3615 2025-06-25T14:41:45.308798Z node 11 :TX_DATASHARD ERROR: finish_propose_unit.cpp:174: Errors while proposing transaction txid 281474976715879 at tablet 72075186224037963 status: ERROR errors: WRONG_SHARD_STATE (Interrupted operation [0:281474976715879] at 72075186224037963 while waiting for stream clearance) | 2025-06-25T14:41:45.339985Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715880:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:45.464962Z node 11 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-25T14:41:45.505043Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715882:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:45.689361Z node 11 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 3614 2025-06-25T14:41:45.730061Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715884:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:45.860617Z node 11 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-25T14:41:45.898133Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715886:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:46.020627Z node 11 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 22 2025-06-25T14:41:46.066884Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715888:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:46.252424Z node 11 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-25T14:41:46.294611Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715891:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:46.438538Z node 11 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill >> TxUsage::WriteToTopic_Demo_5_Table ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest >> BasicStatistics::TwoServerlessDbs [GOOD] Test command err: 2025-06-25T14:38:47.652506Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:419:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:38:47.652885Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:38:47.652994Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001c88/r3tmp/tmpuB4Ctb/pdisk_1.dat 2025-06-25T14:38:47.986764Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 8868, node 1 2025-06-25T14:38:48.208244Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:38:48.208286Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:38:48.208340Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:38:48.208786Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:38:48.210369Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:38:48.300536Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:48.300680Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:48.321750Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:62589 2025-06-25T14:38:48.972784Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-25T14:38:52.040093Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-25T14:38:52.075010Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:52.075110Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:52.114911Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T14:38:52.118319Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:38:52.332806Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:38:52.369021Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:52.369594Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:52.370228Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:52.370424Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:52.370644Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:52.370745Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:52.370820Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:52.370950Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:52.371025Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:52.558334Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:52.558453Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:52.571587Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:38:52.751995Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:38:52.808701Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-25T14:38:52.808801Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-25T14:38:52.846057Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-25T14:38:52.846257Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-25T14:38:52.846493Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-25T14:38:52.846561Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-25T14:38:52.846625Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-25T14:38:52.846678Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-25T14:38:52.846750Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-25T14:38:52.846798Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-25T14:38:52.847333Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-25T14:38:52.877795Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7949: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-25T14:38:52.877913Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7979: ConnectToSA(), pipe client id: [2:1794:2562], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-25T14:38:52.886013Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1808:2573] 2025-06-25T14:38:52.889849Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1830:2584] 2025-06-25T14:38:52.890567Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1830:2584], schemeshard id = 72075186224037897 2025-06-25T14:38:52.901422Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Shared 2025-06-25T14:38:52.927324Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-25T14:38:52.927382Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-25T14:38:52.927447Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Shared/.metadata/_statistics 2025-06-25T14:38:52.941208Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:52.948571Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-25T14:38:52.948707Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-25T14:38:53.131500Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-25T14:38:53.282040Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-25T14:38:53.339403Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-25T14:38:53.871234Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:38:53.901820Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-25T14:38:54.491718Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:38:54.691602Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7894: Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult, at schemeshard: 72075186224037899 2025-06-25T14:38:54.691667Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7910: Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult, StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037899 2025-06-25T14:38:54.691732Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7979: ConnectToSA(), pipe client id: [2:2514:2911], at schemeshard: 72075186224037899, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037899 2025-06-25T14:38:54.694275Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:2516:2913] 2025-06-25T14:38:54.694734Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:2516:2913], schemeshard id = 72075186224037899 2025-06-25T14:38:55.501188Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-25T14:38:56.010736Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:38:56.231752Z node 2 :STATISTICS DEBUG: schemeshard_impl.c ... Pool, opId: 281474976720658:2, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:41:39.484666Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:8851:6352], DatabaseId: /Root/Shared, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976720658 completed, doublechecking } 2025-06-25T14:41:39.689230Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:8951:6401] txid# 281474976720659, issues: { message: "Check failed: path: \'/Root/Shared/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72075186224037897, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:41:39.737229Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 118 ], ReplyToActorId[ [2:8980:6416]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T14:41:39.737521Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 118 ] 2025-06-25T14:41:39.737793Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:171: [72075186224037894] EvRequestStats, node id = 2, schemeshard count = 1, urgent = 0 2025-06-25T14:41:39.737853Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:314: [72075186224037894] SendStatisticsToNode(), node id = 2, schemeshard count = 1 2025-06-25T14:41:39.738020Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-25T14:41:39.738091Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 118, ReplyToActorId = [2:8980:6416], StatRequests.size() = 1 2025-06-25T14:41:39.870241Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=NjMwNGYzNGItOWE1OTU0ZDQtYjc1OTM1NTgtYzkzYTNjMDI=, TxId: 2025-06-25T14:41:39.870313Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=NjMwNGYzNGItOWE1OTU0ZDQtYjc1OTM1NTgtYzkzYTNjMDI=, TxId: 2025-06-25T14:41:39.870861Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-25T14:41:39.885416Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 3] 2025-06-25T14:41:39.887343Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-25T14:41:39.933216Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:217: [72075186224037894] EvFastPropagateCheck 2025-06-25T14:41:39.933269Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:357: [72075186224037894] PropagateFastStatistics(), node count = 0, schemeshard count = 0 2025-06-25T14:41:40.011081Z node 2 :STATISTICS DEBUG: service_impl.cpp:1189: EvRequestTimeout, pipe client id = [2:3643:3351], schemeshard count = 1 2025-06-25T14:41:40.337411Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:8076: SendBaseStatsToSA(), path count: 1, at schemeshard: 72075186224037899 2025-06-25T14:41:40.337517Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7919: Schedule next SendBaseStatsToSA in 197.000000s, at schemeshard: 72075186224037899 2025-06-25T14:41:40.337740Z node 2 :STATISTICS DEBUG: tx_schemeshard_stats.cpp:21: [72075186224037894] TTxSchemeShardStats::Execute: schemeshard id# 72075186224037899, stats size# 26 2025-06-25T14:41:40.366055Z node 2 :STATISTICS DEBUG: tx_schemeshard_stats.cpp:132: [72075186224037894] TTxSchemeShardStats::Complete 2025-06-25T14:41:40.595514Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 119 ], ReplyToActorId[ [2:9014:6438]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T14:41:40.595904Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 119 ] 2025-06-25T14:41:40.595955Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 119, ReplyToActorId = [2:9014:6438], StatRequests.size() = 1 2025-06-25T14:41:42.161932Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 120 ], ReplyToActorId[ [2:9061:6466]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T14:41:42.162349Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 120 ] 2025-06-25T14:41:42.162401Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 120, ReplyToActorId = [2:9061:6466], StatRequests.size() = 1 2025-06-25T14:41:42.761617Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:8076: SendBaseStatsToSA(), path count: 1, at schemeshard: 72075186224037905 2025-06-25T14:41:42.761703Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7919: Schedule next SendBaseStatsToSA in 212.000000s, at schemeshard: 72075186224037905 2025-06-25T14:41:42.762108Z node 2 :STATISTICS DEBUG: tx_schemeshard_stats.cpp:21: [72075186224037894] TTxSchemeShardStats::Execute: schemeshard id# 72075186224037905, stats size# 26 2025-06-25T14:41:42.786267Z node 2 :STATISTICS DEBUG: tx_schemeshard_stats.cpp:132: [72075186224037894] TTxSchemeShardStats::Complete 2025-06-25T14:41:43.062807Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:285: [72075186224037894] EvPropagateTimeout 2025-06-25T14:41:43.074237Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-25T14:41:43.074307Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-25T14:41:43.074352Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037899, LocalPathId: 2] is data table. 2025-06-25T14:41:43.074389Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:723: [72075186224037894] ScheduleNextTraversal. Skip traversal for datashard table [OwnerId: 72075186224037899, LocalPathId: 2] 2025-06-25T14:41:43.074693Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Shared 2025-06-25T14:41:43.077361Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DELETE FROM `.metadata/_statistics` WHERE owner_id = $owner_id AND local_path_id = $local_path_id; 2025-06-25T14:41:43.090956Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=N2QyYmNmOTEtNGQxYTBmOGItNzA1MjY2OGEtYWI3N2YzNDU=, TxId: 2025-06-25T14:41:43.091017Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=N2QyYmNmOTEtNGQxYTBmOGItNzA1MjY2OGEtYWI3N2YzNDU=, TxId: 2025-06-25T14:41:43.091538Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-25T14:41:43.106366Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037899, LocalPathId: 2] 2025-06-25T14:41:43.106431Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-25T14:41:43.797501Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 121 ], ReplyToActorId[ [2:9136:6515]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T14:41:43.797818Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 121 ] 2025-06-25T14:41:43.797864Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 121, ReplyToActorId = [2:9136:6515], StatRequests.size() = 1 2025-06-25T14:41:45.546122Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 122 ], ReplyToActorId[ [2:9187:6544]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T14:41:45.546458Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 122 ] 2025-06-25T14:41:45.546505Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 122, ReplyToActorId = [2:9187:6544], StatRequests.size() = 1 2025-06-25T14:41:46.361236Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 2, schemeshard count = 3 2025-06-25T14:41:46.361519Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-25T14:41:46.362050Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-06-25T14:41:46.377312Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-25T14:41:46.377399Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-25T14:41:46.377449Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037905, LocalPathId: 2] is data table. 2025-06-25T14:41:46.377489Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:723: [72075186224037894] ScheduleNextTraversal. Skip traversal for datashard table [OwnerId: 72075186224037905, LocalPathId: 2] 2025-06-25T14:41:46.377833Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Shared 2025-06-25T14:41:46.381499Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DELETE FROM `.metadata/_statistics` WHERE owner_id = $owner_id AND local_path_id = $local_path_id; 2025-06-25T14:41:46.411088Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=NmYyZDkzMTEtZDZkMGU3YWQtNWYwNmU2MzktY2IyZTEzMWU=, TxId: 2025-06-25T14:41:46.411167Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=NmYyZDkzMTEtZDZkMGU3YWQtNWYwNmU2MzktY2IyZTEzMWU=, TxId: 2025-06-25T14:41:46.411854Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-25T14:41:46.430365Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037905, LocalPathId: 2] 2025-06-25T14:41:46.430440Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-25T14:41:47.163178Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 123 ], ReplyToActorId[ [2:9259:6589]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T14:41:47.163508Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 123 ] 2025-06-25T14:41:47.163551Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 123, ReplyToActorId = [2:9259:6589], StatRequests.size() = 1 2025-06-25T14:41:47.164283Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 124 ], ReplyToActorId[ [2:9261:6591]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T14:41:47.169324Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 124 ] 2025-06-25T14:41:47.169414Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 124, ReplyToActorId = [2:9261:6591], StatRequests.size() = 1 >> KqpQueryPerf::Update-QueryService+UseSink [GOOD] >> TxUsage::WriteToTopic_Demo_24_Table [GOOD] >> KqpQueryPerf::DeleteOn+QueryService-UseSink >> KqpQueryPerf::IndexDeleteOn-QueryService-UseSink [GOOD] >> KqpQueryPerf::IndexDeleteOn-QueryService+UseSink >> ttl_unavailable_s3.py::TestUnavailableS3::test [GOOD] >> SystemView::TabletsShards [GOOD] >> SystemView::TabletsFollowers >> KqpQueryPerf::Delete-QueryService-UseSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::Update-QueryService+UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 13598, MsgBus: 16209 2025-06-25T14:41:44.225196Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897195906888577:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:41:44.234725Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000b34/r3tmp/tmpx0FPjb/pdisk_1.dat 2025-06-25T14:41:44.696240Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:41:44.696407Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:41:44.714686Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:41:44.744453Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519897195906888543:2080] 1750862504210202 != 1750862504210205 2025-06-25T14:41:44.756853Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 13598, node 1 2025-06-25T14:41:44.805677Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:41:44.805701Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:41:44.805707Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:41:44.805794Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16209 2025-06-25T14:41:45.232863Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:16209 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:41:45.383465Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:41:45.407962Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:45.587216Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:45.773501Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:45.860403Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:47.476095Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897208791792075:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:47.476208Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:47.902581Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:47.941782Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:47.991102Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:48.022273Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:48.099197Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:48.201477Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:48.321991Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:48.423816Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897213086760031:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:48.423953Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:48.428038Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897213086760036:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:48.432674Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:41:48.451715Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519897213086760038:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:41:48.517344Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519897213086760089:3424] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:41:49.228921Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519897195906888577:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:41:49.239939Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> TxUsage::WriteToTopic_Demo_21_RestartBeforeCommit_Table [GOOD] >> KqpQueryPerf::IndexUpsert+QueryService-UseSink |83.7%| [TA] $(B)/ydb/core/kqp/ut/pg/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpQueryPerf::IdxLookupJoin+QueryService |83.7%| [TA] {RESULT} $(B)/ydb/core/kqp/ut/pg/test-results/unittest/{meta.json ... results_accumulator.log} >> TxUsage::WriteToTopic_Demo_21_RestartBeforeCommit_Query >> KqpQueryPerf::IndexLookupJoin+EnableStreamLookup+QueryService [GOOD] >> TxUsage::WriteToTopic_Demo_41_Table [GOOD] >> KqpQueryPerf::Insert+QueryService+UseSink [GOOD] >> KqpQueryPerf::IndexLookupJoin-EnableStreamLookup+QueryService [GOOD] >> SystemView::AuthGroups [GOOD] >> SystemView::AuthGroupMembers >> SystemView::ShowCreateTablePartitionByHash [GOOD] >> SystemView::ShowCreateTablePartitionSettings >> KqpQueryPerf::Upsert+QueryService+UseSink >> TxUsage::WriteToTopic_Demo_33_Table [GOOD] >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_1_Query [GOOD] >> TxUsage::WriteToTopic_Demo_24_Query >> TxUsage::WriteToTopic_Demo_41_Query ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::IndexLookupJoin+EnableStreamLookup+QueryService [GOOD] Test command err: Trying to start YDB, gRPC: 26049, MsgBus: 24920 2025-06-25T14:41:36.580690Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897162381856423:2228];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:41:36.584851Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000b42/r3tmp/tmpL527Wy/pdisk_1.dat 2025-06-25T14:41:37.580572Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:41:37.778734Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:41:37.780914Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:41:37.812923Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:41:37.813011Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:41:37.853474Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 26049, node 1 2025-06-25T14:41:38.318706Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:41:38.318731Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:41:38.318738Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:41:38.318841Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24920 TClient is connected to server localhost:24920 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:41:40.132241Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:41:40.164668Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:41:40.176025Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:40.422778Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:40.750809Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:40.842817Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:41.580745Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519897162381856423:2228];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:41:41.617382Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:41:41.934538Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897183856694388:2370], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:41.934643Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:42.762306Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:42.833818Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:42.917231Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:42.994015Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:43.040894Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:43.124168Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:43.176152Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:43.228853Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897192446629651:2436], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:43.228923Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:43.229089Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897192446629656:2439], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:43.232905Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:41:43.245102Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519897192446629658:2440], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:41:43.304740Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519897192446629709:3435] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 28387, MsgBus: 4043 2025-06-25T14:41:46.496057Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519897204133314763:2231];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:41:46.496917Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000b42/r3tmp/tmpYk4rSx/pdisk_1.dat 2025-06-25T14:41:46.596443Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519897204133314567:2080] 1750862506413565 != 1750862506413568 2025-06-25T14:41:46.601920Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 28387, node 2 2025-06-25T14:41:46.612232Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:41:46.612331Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:41:46.672796Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:41:46.698262Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:41:46.698282Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:41:46.698291Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:41:46.698414Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4043 TClient is connected to server localhost:4043 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:41:47.234017Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:41:47.249471Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:47.331836Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:47.501457Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... 2025-06-25T14:41:47.556220Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:47.644015Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:50.356358Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519897221313185378:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:50.356467Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:50.428715Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:50.499235Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:50.578128Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:50.633900Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:50.696475Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:50.784336Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:50.869242Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:50.984943Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519897221313186049:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:50.985034Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:50.985274Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519897221313186054:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:50.989920Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:41:51.009982Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-25T14:41:51.011604Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519897221313186056:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:41:51.096739Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519897225608153403:3418] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:41:51.466078Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519897204133314763:2231];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:41:51.466144Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::IndexLookupJoin-EnableStreamLookup+QueryService [GOOD] Test command err: Trying to start YDB, gRPC: 14811, MsgBus: 9601 2025-06-25T14:41:36.580698Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897164179208292:2200];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:41:36.581064Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000b49/r3tmp/tmp1JhAU1/pdisk_1.dat 2025-06-25T14:41:37.600503Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:41:37.730942Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:41:37.731042Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:41:37.753072Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:41:37.834186Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:41:37.921864Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:41:38.055910Z node 1 :BS_CONTROLLER ERROR: {BSC07@impl.h:2206} ProcessControllerEvent event processing took too much time Type# 268637706 Duration# 0.127301s 2025-06-25T14:41:38.055981Z node 1 :BS_CONTROLLER ERROR: {BSC00@bsc.cpp:705} StateWork event processing took too much time Type# 2146435078 Duration# 0.127382s TServer::EnableGrpc on GrpcPort 14811, node 1 2025-06-25T14:41:38.344851Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:41:38.344875Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:41:38.344881Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:41:38.344987Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9601 TClient is connected to server localhost:9601 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:41:39.889655Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:41:40.009792Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:40.315462Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:40.683429Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:40.799388Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:41.577485Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519897164179208292:2200];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:41:41.578652Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:41:41.657357Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897185654046240:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:41.657471Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:42.759611Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:42.811202Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:42.841091Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:42.888760Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:42.935321Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:43.029121Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:43.124459Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:43.260702Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897194243981513:2436], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:43.260784Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:43.262499Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897194243981518:2439], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:43.266113Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:41:43.276966Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519897194243981520:2440], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:41:43.374180Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519897194243981571:3426] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 18111, MsgBus: 10346 2025-06-25T14:41:46.669636Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519897204377213346:2240];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000b49/r3tmp/tmpRwUsPj/pdisk_1.dat 2025-06-25T14:41:46.709338Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:41:46.807943Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:41:46.809327Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519897204377213128:2080] 1750862506590142 != 1750862506590145 2025-06-25T14:41:46.833885Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:41:46.833962Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 18111, node 2 2025-06-25T14:41:46.841396Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:41:47.008899Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:41:47.008922Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:41:47.008929Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:41:47.009063Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10346 TClient is connected to server localhost:10346 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-25T14:41:47.620360Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:41:47.634666Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:41:47.641379Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:41:47.652263Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:47.767972Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:47.954821Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:48.030997Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:50.240510Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519897221557083956:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:50.240605Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:50.303731Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:50.352633Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:50.435235Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:50.494658Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:50.553470Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:50.630963Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:50.718355Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:50.803978Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519897221557084617:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:50.804057Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:50.804278Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519897221557084622:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:50.807944Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:41:50.820939Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519897221557084624:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:41:50.888409Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519897221557084675:3416] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:41:51.624404Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519897204377213346:2240];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:41:51.624477Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::Insert+QueryService+UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 32461, MsgBus: 24454 2025-06-25T14:41:36.584895Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897162776819574:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:41:36.592647Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000b66/r3tmp/tmpBWXwjW/pdisk_1.dat 2025-06-25T14:41:37.588746Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:41:37.740107Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:41:37.793369Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:41:37.827174Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:41:37.830980Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:41:37.846013Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 32461, node 1 2025-06-25T14:41:38.322948Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:41:38.322966Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:41:38.322971Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:41:38.323075Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24454 TClient is connected to server localhost:24454 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:41:39.956694Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:41:40.009381Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:40.479117Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:40.736025Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:41:40.817634Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:41.550116Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897184251657647:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:41.550245Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:41.568626Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519897162776819574:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:41:41.568733Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:41:42.759610Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:42.802055Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:42.843396Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:42.923175Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:42.992492Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:43.075172Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:43.162581Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:43.283730Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897192841592918:2436], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:43.283799Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:43.283981Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897192841592923:2439], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:43.287585Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:41:43.304198Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519897192841592925:2440], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:41:43.380079Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519897192841592978:3429] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 4576, MsgBus: 9407 2025-06-25T14:41:46.296001Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519897204879522371:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:41:46.296142Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000b66/r3tmp/tmpMwtPER/pdisk_1.dat 2025-06-25T14:41:46.537470Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:41:46.538309Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519897204879522352:2080] 1750862506295528 != 1750862506295531 2025-06-25T14:41:46.546855Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:41:46.546929Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:41:46.549087Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 4576, node 2 2025-06-25T14:41:46.660811Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:41:46.660832Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:41:46.660839Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:41:46.660950Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9407 TClient is connected to server localhost:9407 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:41:47.152039Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:41:47.159632Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:41:47.171620Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:47.260946Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:47.418725Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:41:47.504195Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:47.603151Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:50.521323Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519897222059393177:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:50.521390Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:50.658543Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:50.733198Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:50.778283Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:50.823727Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:50.863082Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:50.936602Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:51.104972Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:51.231599Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519897226354361132:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:51.231725Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:51.231982Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519897226354361137:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:51.242859Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:41:51.268341Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519897226354361139:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:41:51.300412Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519897204879522371:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:41:51.300490Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:41:51.327807Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519897226354361190:3424] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_2_Table >> KqpQueryPerf::IdxLookupJoinThreeWay-QueryService [GOOD] >> TxUsage::WriteToTopic_Demo_33_Query |83.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/blobstorage/dsproxy/ut_ftol/ydb-core-blobstorage-dsproxy-ut_ftol |83.7%| [LD] {RESULT} $(B)/ydb/core/blobstorage/dsproxy/ut_ftol/ydb-core-blobstorage-dsproxy-ut_ftol |83.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/dsproxy/ut_ftol/ydb-core-blobstorage-dsproxy-ut_ftol >> SystemView::TopPartitionsByTliFields [GOOD] >> ViewQuerySplit::Basic [GOOD] >> ViewQuerySplit::WithPragmaTablePathPrefix [GOOD] >> ViewQuerySplit::WithPairedPragmaTablePathPrefix [GOOD] >> ViewQuerySplit::WithComments [GOOD] >> ViewQuerySplit::Joins >> ColumnStatistics::CountMinSketchServerlessStatistics [GOOD] >> ViewQuerySplit::Joins [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::IdxLookupJoinThreeWay-QueryService [GOOD] Test command err: Trying to start YDB, gRPC: 63562, MsgBus: 12059 2025-06-25T14:41:36.564185Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897161677938602:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:41:36.581057Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000b65/r3tmp/tmpnvQ1FY/pdisk_1.dat 2025-06-25T14:41:37.608179Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:41:37.808262Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:41:37.810852Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:41:37.816064Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:41:37.838799Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:41:37.844550Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 63562, node 1 2025-06-25T14:41:38.316914Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:41:38.316940Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:41:38.316954Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:41:38.317063Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:12059 TClient is connected to server localhost:12059 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:41:40.083688Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:41:40.098203Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:41:40.113401Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:40.395319Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:40.618521Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:40.688239Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:41.551137Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897183152776681:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:41.551244Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:41.564569Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519897161677938602:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:41:41.564681Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:41:42.762306Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:42.841135Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:42.885303Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:42.938510Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:42.973715Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:43.097771Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:43.154125Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:43.233210Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897191742711941:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:43.233302Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:43.237500Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897191742711946:2438], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:43.246822Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:41:43.261311Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519897191742711948:2439], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:41:43.357618Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519897191742711999:3428] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 17612, MsgBus: 1523 2025-06-25T14:41:46.997739Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519897203654509491:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:41:46.997810Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000b65/r3tmp/tmpvhsigK/pdisk_1.dat 2025-06-25T14:41:47.195891Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519897203654509469:2080] 1750862506996484 != 1750862506996487 2025-06-25T14:41:47.201538Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:41:47.207690Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:41:47.208068Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:41:47.210252Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 17612, node 2 2025-06-25T14:41:47.352939Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:41:47.352977Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:41:47.352987Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:41:47.353107Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1523 TClient is connected to server localhost:1523 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-25T14:41:48.023726Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:41:48.106633Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:41:48.113568Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:41:48.119066Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:48.214656Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:48.475812Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:48.659401Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:51.322321Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519897225129347567:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:51.322410Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:51.406541Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:51.489239Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:51.555789Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:51.643227Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:51.742765Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:51.843181Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:51.953376Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:52.000509Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519897203654509491:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:41:52.000733Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:41:52.042707Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519897229424315527:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:52.042793Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:52.043201Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519897229424315532:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:52.047079Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:41:52.059639Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519897229424315534:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:41:52.123500Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519897229424315586:3418] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } >> KqpQueryPerf::Update+QueryService+UseSink [GOOD] >> SystemView::DescribeSystemFolder [GOOD] >> SystemView::DescribeAccessDenied >> KqpQueryPerf::KvRead+QueryService [GOOD] >> KqpQueryPerf::KvRead-QueryService >> KqpQueryPerf::IndexUpdateOn-QueryService-UseSink >> KqpQueryPerf::IndexUpsert-QueryService-UseSink >> KqpQueryPerf::Delete+QueryService-UseSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::Update+QueryService+UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 2619, MsgBus: 22838 2025-06-25T14:41:49.347722Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897218238146765:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:41:49.347765Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000b31/r3tmp/tmpPDkn7f/pdisk_1.dat 2025-06-25T14:41:49.908333Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:41:49.910195Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:41:49.910351Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:41:49.915466Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 2619, node 1 2025-06-25T14:41:50.144942Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:41:50.144988Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:41:50.145002Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:41:50.145110Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:41:50.343939Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:22838 TClient is connected to server localhost:22838 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:41:51.204160Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:41:51.237492Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:51.517012Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:51.771782Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:51.904239Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:54.079528Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897239712984854:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:54.079626Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:54.352523Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519897218238146765:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:41:54.352607Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:41:54.434314Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:54.469309Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:54.510094Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:54.574101Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:54.616407Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:54.658018Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:54.707931Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:54.775581Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897239712985513:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:54.775684Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:54.776026Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897239712985518:2436], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:54.779782Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:41:54.799848Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710669, at schemeshard: 72057594046644480 2025-06-25T14:41:54.800246Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519897239712985520:2437], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:41:54.858194Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519897239712985571:3424] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } >> KqpQueryPerf::IndexInsert-QueryService+UseSink [GOOD] >> KqpQueryPerf::Delete-QueryService-UseSink [GOOD] >> KqpQueryPerf::Delete-QueryService+UseSink >> KqpQueryPerf::IndexInsert+QueryService+UseSink [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest >> ColumnStatistics::CountMinSketchServerlessStatistics [GOOD] Test command err: 2025-06-25T14:38:36.085396Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:582:2380], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:38:36.085843Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:38:36.085929Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001d29/r3tmp/tmpDsj8F1/pdisk_1.dat 2025-06-25T14:38:36.467073Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 64096, node 1 2025-06-25T14:38:36.703410Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:38:36.703468Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:38:36.703503Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:38:36.704434Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:38:36.707160Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:38:36.806389Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:36.806530Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:36.834594Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:4947 2025-06-25T14:38:37.391426Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-25T14:38:40.320363Z node 4 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 4 2025-06-25T14:38:40.359476Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:40.359605Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:40.401341Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 4 Cookie 4 2025-06-25T14:38:40.403714Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:38:40.639586Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:38:40.664276Z node 4 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:40.665300Z node 4 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:40.665562Z node 4 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:40.665692Z node 4 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:40.665904Z node 4 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:40.666006Z node 4 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:40.666066Z node 4 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:40.666122Z node 4 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:40.666168Z node 4 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:40.855980Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:40.856093Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:40.869805Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:38:41.025898Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:38:41.078519Z node 4 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-25T14:38:41.078639Z node 4 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-25T14:38:41.109602Z node 4 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-25T14:38:41.110977Z node 4 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-25T14:38:41.111205Z node 4 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-25T14:38:41.111265Z node 4 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-25T14:38:41.111354Z node 4 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-25T14:38:41.111410Z node 4 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-25T14:38:41.111480Z node 4 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-25T14:38:41.111554Z node 4 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-25T14:38:41.111999Z node 4 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-25T14:38:41.149248Z node 4 :STATISTICS DEBUG: schemeshard_impl.cpp:7949: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-25T14:38:41.149397Z node 4 :STATISTICS DEBUG: schemeshard_impl.cpp:7979: ConnectToSA(), pipe client id: [4:1959:2561], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-25T14:38:41.157746Z node 4 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [4:1974:2572] 2025-06-25T14:38:41.171328Z node 4 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [4:2008:2588] 2025-06-25T14:38:41.172055Z node 4 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [4:2008:2588], schemeshard id = 72075186224037897 2025-06-25T14:38:41.181996Z node 4 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Shared 2025-06-25T14:38:41.199045Z node 4 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-25T14:38:41.199110Z node 4 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-25T14:38:41.199177Z node 4 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Shared/.metadata/_statistics 2025-06-25T14:38:41.213653Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:41.230432Z node 4 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-25T14:38:41.230620Z node 4 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-25T14:38:41.394810Z node 4 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-25T14:38:41.557645Z node 4 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-25T14:38:41.624382Z node 4 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-25T14:38:42.311280Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:38:42.357988Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-25T14:38:45.258362Z node 3 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 3 2025-06-25T14:38:45.303055Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:45.303177Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:45.303736Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:45.303804Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:45.350515Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 3 Cookie 3 2025-06-25T14:38:45.351902Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:38:45.352644Z node 4 :HIVE WARN: hive_impl.cpp:781: HIVE#72075186224037888 Handle TEvInterconnect::TEvNodeConnected, NodeId 3 Cookie 3 2025-06-25T14:38:45.356380Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:38:45.434382Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:38:45.601772Z node 4 :STATISTICS DEBUG: schemeshard_impl.cpp:7894: Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult, at schemeshard: 72075186224037899 2025-06-25T14:38:45.601847Z node 4 :STATISTICS DEBUG: schemeshard_impl.cpp:7910: Handle TEvTxProxySchem ... 50.353748Z node 4 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-25T14:41:50.353792Z node 4 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037899, LocalPathId: 2] is column table. 2025-06-25T14:41:50.353842Z node 4 :STATISTICS DEBUG: aggregator_impl.cpp:732: [72075186224037894] Start schedule traversal navigate for path [OwnerId: 72075186224037899, LocalPathId: 2] 2025-06-25T14:41:50.367465Z node 4 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-25T14:41:50.394212Z node 4 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-25T14:41:50.394924Z node 4 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-25T14:41:50.395033Z node 4 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-25T14:41:50.396173Z node 4 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:58: [72075186224037894] TTxResponseTabletDistribution::Execute. Node count = 1 2025-06-25T14:41:50.412580Z node 4 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:92: [72075186224037894] TTxResponseTabletDistribution::Complete 2025-06-25T14:41:50.412860Z node 4 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 4, Round: 2, current Round: 0 2025-06-25T14:41:50.413700Z node 4 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 4, client id = [4:12193:7523], server id = [4:12194:7524], tablet id = 72075186224037911, status = OK 2025-06-25T14:41:50.414077Z node 4 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [4:12193:7523], path = { OwnerId: 72075186224037899 LocalId: 2 } 2025-06-25T14:41:50.418470Z node 4 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037911 2025-06-25T14:41:50.418591Z node 4 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 4 2025-06-25T14:41:50.418810Z node 4 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:27: [72075186224037894] TTxAggregateStatisticsResponse::Execute 2025-06-25T14:41:50.419002Z node 4 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:118: [72075186224037894] TTxAggregateStatisticsResponse::Complete 2025-06-25T14:41:50.419449Z node 4 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Shared 2025-06-25T14:41:50.422091Z node 4 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 4, client id = [4:12193:7523], server id = [4:12194:7524], tablet id = 72075186224037911 2025-06-25T14:41:50.422159Z node 4 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-25T14:41:50.422877Z node 4 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-06-25T14:41:50.471635Z node 4 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [4:12214:7543]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T14:41:50.471979Z node 4 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-25T14:41:50.472044Z node 4 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 2, ReplyToActorId = [4:12214:7543], StatRequests.size() = 1 2025-06-25T14:41:50.660624Z node 4 :SYSTEM_VIEWS WARN: tx_interval_summary.cpp:212: [72075186224037891] TEvIntervalQuerySummary, time mismath: node id# 4, interval end# 1970-01-01T00:02:04.000000Z, event interval end# 2025-06-25T14:41:48.000000Z 2025-06-25T14:41:50.661488Z node 4 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=4&id=YWExNTU0MzktODQ2YjIxMGEtMjI5MTQzNDMtNTU2YzU3MmI=, TxId: 2025-06-25T14:41:50.661541Z node 4 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=4&id=YWExNTU0MzktODQ2YjIxMGEtMjI5MTQzNDMtNTU2YzU3MmI=, TxId: 2025-06-25T14:41:50.662550Z node 4 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-25T14:41:50.677484Z node 4 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037899, LocalPathId: 2] 2025-06-25T14:41:50.677551Z node 4 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-25T14:41:51.494943Z node 4 :STATISTICS DEBUG: service_impl.cpp:252: Event round 2 is different from the current 0 2025-06-25T14:41:51.495227Z node 4 :STATISTICS DEBUG: service_impl.cpp:379: Skip TEvDispatchKeepAlive 2025-06-25T14:41:54.153864Z node 4 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 1, schemeshard count = 1 2025-06-25T14:41:54.154345Z node 4 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 4 2025-06-25T14:41:54.180782Z node 4 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-25T14:41:54.180857Z node 4 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-25T14:41:54.180896Z node 4 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037905, LocalPathId: 2] is column table. 2025-06-25T14:41:54.180929Z node 4 :STATISTICS DEBUG: aggregator_impl.cpp:732: [72075186224037894] Start schedule traversal navigate for path [OwnerId: 72075186224037905, LocalPathId: 2] 2025-06-25T14:41:54.184838Z node 4 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-25T14:41:54.210030Z node 4 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-25T14:41:54.210666Z node 4 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-25T14:41:54.210743Z node 4 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-25T14:41:54.211342Z node 4 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:58: [72075186224037894] TTxResponseTabletDistribution::Execute. Node count = 1 2025-06-25T14:41:54.240870Z node 4 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:92: [72075186224037894] TTxResponseTabletDistribution::Complete 2025-06-25T14:41:54.241158Z node 4 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 4, Round: 3, current Round: 0 2025-06-25T14:41:54.241968Z node 4 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 4, client id = [4:12358:7614], server id = [4:12359:7615], tablet id = 72075186224037912, status = OK 2025-06-25T14:41:54.242063Z node 4 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [4:12358:7614], path = { OwnerId: 72075186224037905 LocalId: 2 } 2025-06-25T14:41:54.246089Z node 4 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037912 2025-06-25T14:41:54.246192Z node 4 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 4 2025-06-25T14:41:54.246365Z node 4 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:27: [72075186224037894] TTxAggregateStatisticsResponse::Execute 2025-06-25T14:41:54.246525Z node 4 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:118: [72075186224037894] TTxAggregateStatisticsResponse::Complete 2025-06-25T14:41:54.246809Z node 4 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Shared 2025-06-25T14:41:54.250731Z node 4 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 4, client id = [4:12358:7614], server id = [4:12359:7615], tablet id = 72075186224037912 2025-06-25T14:41:54.250775Z node 4 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-25T14:41:54.251537Z node 4 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-06-25T14:41:54.279116Z node 4 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=4&id=NDA5NDc2YTYtMTg3YTY1YWEtZjQxZWU4MDAtYmZmNWRlZGM=, TxId: 2025-06-25T14:41:54.279172Z node 4 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=4&id=NDA5NDc2YTYtMTg3YTY1YWEtZjQxZWU4MDAtYmZmNWRlZGM=, TxId: 2025-06-25T14:41:54.280389Z node 4 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-25T14:41:54.281605Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [1:12382:6181]], StatType[ 2 ], StatRequestsCount[ 1 ] 2025-06-25T14:41:54.281951Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-25T14:41:54.282010Z node 1 :STATISTICS DEBUG: service_impl.cpp:812: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] resolve DatabasePath[ [OwnerId: 72057594046644480, LocalPathId: 2] ] 2025-06-25T14:41:54.286861Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-25T14:41:54.286953Z node 1 :STATISTICS DEBUG: service_impl.cpp:715: [TStatService::QueryStatistics] RequestId[ 1 ], Database[ Root/Shared ], TablePath[ /Root/Shared/.metadata/_statistics ] 2025-06-25T14:41:54.287047Z node 1 :STATISTICS DEBUG: service_impl.cpp:656: [TStatService::LoadStatistics] QueryId[ 1 ], PathId[ [OwnerId: 72075186224037899, LocalPathId: 2] ], StatType[ 2 ], ColumnTag[ 1 ] 2025-06-25T14:41:54.306104Z node 1 :STATISTICS DEBUG: service_impl.cpp:1152: TEvLoadStatisticsQueryResponse, request id = 1 2025-06-25T14:41:54.307503Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [1:12382:6181]], StatType[ 2 ], StatRequestsCount[ 1 ] 2025-06-25T14:41:54.307998Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-25T14:41:54.308075Z node 1 :STATISTICS DEBUG: service_impl.cpp:812: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] resolve DatabasePath[ [OwnerId: 72057594046644480, LocalPathId: 2] ] 2025-06-25T14:41:54.308591Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-25T14:41:54.308659Z node 1 :STATISTICS DEBUG: service_impl.cpp:715: [TStatService::QueryStatistics] RequestId[ 2 ], Database[ Root/Shared ], TablePath[ /Root/Shared/.metadata/_statistics ] 2025-06-25T14:41:54.308731Z node 1 :STATISTICS DEBUG: service_impl.cpp:656: [TStatService::LoadStatistics] QueryId[ 2 ], PathId[ [OwnerId: 72075186224037905, LocalPathId: 2] ], StatType[ 2 ], ColumnTag[ 1 ] 2025-06-25T14:41:54.314230Z node 1 :STATISTICS DEBUG: service_impl.cpp:1152: TEvLoadStatisticsQueryResponse, request id = 2 >> KqpQueryPerf::DeleteOn+QueryService-UseSink [GOOD] >> KqpQueryPerf::DeleteOn+QueryService+UseSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/ut/unittest >> ViewQuerySplit::Joins [GOOD] Test command err: 2025-06-25T14:40:38.764448Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519896911750007006:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:40:38.764544Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0012e6/r3tmp/tmpHNbtw8/pdisk_1.dat 2025-06-25T14:40:39.051703Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:40:39.051828Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:40:39.053369Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:40:39.065907Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519896911750006982:2080] 1750862438763721 != 1750862438763724 2025-06-25T14:40:39.069090Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 13262, node 1 2025-06-25T14:40:39.115774Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:40:39.115794Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:40:39.115801Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:40:39.115918Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:7056 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:40:39.315517Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:40:39.772152Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:40:40.467647Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896920339942213:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:40:40.467649Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896920339942202:2287], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:40:40.467732Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:40:40.469954Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:40:40.476106Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519896920339942216:2291], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:40:40.529617Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519896920339942267:2329] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:40:41.194773Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jykrh8z4a1f40gkphjcthj6c, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODU1NTI3NTgtN2M2YzU3NjgtYzJmZDhhNGYtMTc3ZjRkMmY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:40:41.256180Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:47: Scan started, actor: [1:7519896924634909608:2300], owner: [1:7519896924634909604:2298], scan id: 0, sys view info: Type: EVSlots SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-25T14:40:41.266008Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:323: Scan prepared, actor: [1:7519896924634909608:2300], schemeshard id: 72057594046644480, hive id: 72057594037968897, database: /Root, database owner: root@builtin, domain key: [OwnerId: 72057594046644480, LocalPathId: 1], database node count: 1 2025-06-25T14:40:41.289493Z node 1 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [1:7519896924634909608:2300], row count: 1, finished: 1 2025-06-25T14:40:41.289632Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:122: Scan finished, actor: [1:7519896924634909608:2300], owner: [1:7519896924634909604:2298], scan id: 0, sys view info: Type: EVSlots SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-25T14:40:41.306459Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750862441171, txId: 281474976715660] shutting down 2025-06-25T14:40:42.413038Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715663. Ctx: { TraceId: 01jykrhbt81zjs4sj7ks0331aj, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MmRjOWYzZmMtYmFiMjM3NjMtYzE5NzRhMjktYjVjM2I1ZGM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:40:42.415049Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:47: Scan started, actor: [1:7519896928929876948:2314], owner: [1:7519896928929876944:2312], scan id: 0, sys view info: Type: EVSlots SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-25T14:40:42.415425Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:323: Scan prepared, actor: [1:7519896928929876948:2314], schemeshard id: 72057594046644480, hive id: 72057594037968897, database: /Root, database owner: root@builtin, domain key: [OwnerId: 72057594046644480, LocalPathId: 1], database node count: 1 2025-06-25T14:40:42.415601Z node 1 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [1:7519896928929876948:2314], row count: 1, finished: 1 2025-06-25T14:40:42.415654Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:122: Scan finished, actor: [1:7519896928929876948:2314], owner: [1:7519896928929876944:2312], scan id: 0, sys view info: Type: EVSlots SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-25T14:40:42.416933Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750862442412, txId: 281474976715662] shutting down 2025-06-25T14:40:43.540667Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715665. Ctx: { TraceId: 01jykrhcwz0tfvnt9yhgabf6ph, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NDVhNzIzZDktNmYwYzg0ZGYtYTVkYmU5NmQtOTgyY2ViODM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:40:43.542288Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:47: Scan started, actor: [1:7519896933224844283:2326], owner: [1:7519896933224844279:2324], scan id: 0, sys view info: Type: EVSlots SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-25T14:40:43.542606Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:323: Scan prepared, actor: [1:7519896933224844283:2326], schemeshard id: 72057594046644480, hive id: 72057594037968897, database: /Root, database owner: root@builtin, domain key: [OwnerId: 72057594046644480, LocalPathId: 1], database node count: 1 2025-06-25T14:40:43.542763Z node 1 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [1:7519896933224844283:2326], row count: 1, finished: 1 2025-06-25T14:40:43.542800Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:122: Scan finished, actor: [1:7519896933224844283:2326], owner: [1:7519896933224844279:2324], scan id: 0, sys view info: Type: EVSlots SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-25T14:40:43.543849Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750862443539, txId: 281474976715664] shutting down 2025-06-25T14:40:43.764517Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519896911750007006:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:40:43.764601Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:40:44.652449Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715667. Ctx: { TraceId: 01jykrhe050ke72rhnjhb9wm8b, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTFjNTM5MzYtMWY4ZTBmNzAtMmRjZTdmY2ItZWQ3MmE0Yzc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:40:44.654233Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:47: Scan started, actor: [1:7519896937519811620:2339], owner: [1:7519896937519811617:2337], scan id: 0, sys view info: Type: EVSlots SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-25T14:40:44.654584Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:323: Scan prepared, actor: [1:7519896937519811620:2339], schemeshard id: 72057594046644480, hive id: 72057594037968897, database: /Root, database owner: root@builtin, domain key: [OwnerId: 72057594046644480, LocalPathId: 1], database node count: 1 2025-06-25T14:40:44.654773Z node 1 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [1:7519896937519811620:2339], row count: 2, fini ... d# 2025-06-25T14:41:52.000000Z, query count# 0 2025-06-25T14:41:52.539914Z node 15 :SYSTEM_VIEWS DEBUG: processor_impl.cpp:105: [72075186224037893] PersistQueryTopResults: table id# 8, interval end# 2025-06-25T14:41:52.000000Z, query count# 0, persisted# 0 2025-06-25T14:41:52.539938Z node 15 :SYSTEM_VIEWS DEBUG: processor_impl.cpp:105: [72075186224037893] PersistQueryTopResults: table id# 10, interval end# 2025-06-25T14:41:52.000000Z, query count# 0, persisted# 0 2025-06-25T14:41:52.539968Z node 15 :SYSTEM_VIEWS DEBUG: processor_impl.cpp:105: [72075186224037893] PersistQueryTopResults: table id# 12, interval end# 2025-06-25T14:41:52.000000Z, query count# 0, persisted# 0 2025-06-25T14:41:52.539990Z node 15 :SYSTEM_VIEWS DEBUG: processor_impl.cpp:105: [72075186224037893] PersistQueryTopResults: table id# 14, interval end# 2025-06-25T14:41:52.000000Z, query count# 0, persisted# 0 2025-06-25T14:41:52.540012Z node 15 :SYSTEM_VIEWS DEBUG: processor_impl.cpp:105: [72075186224037893] PersistQueryTopResults: table id# 9, interval end# 2025-06-25T15:00:00.000000Z, query count# 0, persisted# 0 2025-06-25T14:41:52.540032Z node 15 :SYSTEM_VIEWS DEBUG: processor_impl.cpp:105: [72075186224037893] PersistQueryTopResults: table id# 11, interval end# 2025-06-25T15:00:00.000000Z, query count# 0, persisted# 0 2025-06-25T14:41:52.540053Z node 15 :SYSTEM_VIEWS DEBUG: processor_impl.cpp:105: [72075186224037893] PersistQueryTopResults: table id# 13, interval end# 2025-06-25T15:00:00.000000Z, query count# 0, persisted# 0 2025-06-25T14:41:52.540074Z node 15 :SYSTEM_VIEWS DEBUG: processor_impl.cpp:105: [72075186224037893] PersistQueryTopResults: table id# 15, interval end# 2025-06-25T15:00:00.000000Z, query count# 0, persisted# 0 2025-06-25T14:41:52.543201Z node 15 :SYSTEM_VIEWS DEBUG: tx_aggregate.cpp:110: [72075186224037893] TTxAggregate::Complete 2025-06-25T14:41:52.554311Z node 12 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710681. Ctx: { TraceId: 01jykrkf0gczxj69hg4m175hgf, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=12&id=MzE2OTFiZjAtYjI5ZmExYmEtNmM1MGQ5NjQtOTFiMjEyY2Q=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:41:52.557846Z node 12 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750862511946, txId: 281474976710679] shutting down 2025-06-25T14:41:52.704538Z node 14 :SYSTEM_VIEWS DEBUG: partition_stats.cpp:510: NSysView::TPartitionStatsCollector: TEvProcessOverloaded , top size by CPU # 1, top size by TLI # 1, time# 2025-06-25T14:41:52.704401Z 2025-06-25T14:41:52.704824Z node 14 :SYSTEM_VIEWS DEBUG: tx_top_partitions.cpp:125: [72075186224037899] TTxTopPartitions::Execute: , partition by CPU count# 1, partition by TLI count# 1 2025-06-25T14:41:52.716733Z node 14 :SYSTEM_VIEWS DEBUG: tx_top_partitions.cpp:137: [72075186224037899] TTxTopPartitions::Complete 2025-06-25T14:41:52.856452Z node 15 :SYSTEM_VIEWS DEBUG: partition_stats.cpp:510: NSysView::TPartitionStatsCollector: TEvProcessOverloaded , top size by CPU # 1, top size by TLI # 1, time# 2025-06-25T14:41:52.855704Z 2025-06-25T14:41:52.856797Z node 15 :SYSTEM_VIEWS DEBUG: tx_top_partitions.cpp:125: [72075186224037893] TTxTopPartitions::Execute: , partition by CPU count# 1, partition by TLI count# 1 2025-06-25T14:41:52.865229Z node 15 :SYSTEM_VIEWS DEBUG: tx_top_partitions.cpp:137: [72075186224037893] TTxTopPartitions::Complete 2025-06-25T14:41:52.872774Z node 12 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710683. Ctx: { TraceId: 01jykrkgdwd8a2rhd4r5b0cjxm, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=12&id=ZTEwNzI2YWMtZGViYTJhNWEtMjc5ZWFhOTktZDYyNDFiNjE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:41:52.876577Z node 12 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:47: Scan started, actor: [12:7519897233057768347:2439], owner: [12:7519897233057768344:2437], scan id: 0, sys view info: Type: ETopPartitionsByTliOneMinute SourceObject { OwnerId: 72075186224037888 LocalId: 1 } 2025-06-25T14:41:52.877677Z node 15 :SYSTEM_VIEWS DEBUG: db_counters.cpp:542: [72075186224037893] TEvApplyCounters: services count# 1 2025-06-25T14:41:52.878414Z node 12 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:323: Scan prepared, actor: [12:7519897233057768347:2439], schemeshard id: 72075186224037888, hive id: 72057594037968897, database: /Root/Tenant1, database owner: root@builtin, domain key: [OwnerId: 72057594046644480, LocalPathId: 2], database node count: 2 2025-06-25T14:41:52.884447Z node 15 :SYSTEM_VIEWS DEBUG: processor_impl.cpp:641: [72075186224037893] Reply batch: range# From { IntervalEndUs: 1750862512000000 Rank: 0 } InclusiveFrom: true To { IntervalEndUs: 1750862512000000 Rank: 4294967295 } InclusiveTo: true Type: TOP_PARTITIONS_BY_TLI_ONE_MINUTE , rows# 1, bytes# 63, next# 2025-06-25T14:41:52.885072Z node 12 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [12:7519897233057768347:2439], row count: 1, finished: 1 2025-06-25T14:41:52.885159Z node 12 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:122: Scan finished, actor: [12:7519897233057768347:2439], owner: [12:7519897233057768344:2437], scan id: 0, sys view info: Type: ETopPartitionsByTliOneMinute SourceObject { OwnerId: 72075186224037888 LocalId: 1 } 2025-06-25T14:41:52.900594Z node 12 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750862512871, txId: 281474976710682] shutting down 2025-06-25T14:41:52.929155Z node 16 :SYSTEM_VIEWS WARN: sysview_service.cpp:811: Summary delivery problem: service id# [16:7519897145887253849:2073], processor id# 72075186224037893, database# /Root/Tenant1 2025-06-25T14:41:52.929439Z node 16 :SYSTEM_VIEWS INFO: sysview_service.cpp:880: Navigate by database succeeded: service id# [16:7519897145887253849:2073], database# /Root/Tenant1, processor id# 72075186224037893 2025-06-25T14:41:52.925268Z node 12 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 15 2025-06-25T14:41:52.926166Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(15, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-25T14:41:52.926398Z node 12 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 13 2025-06-25T14:41:52.926611Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-25T14:41:52.926712Z node 12 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 16 2025-06-25T14:41:52.926934Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(16, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-25T14:41:52.928479Z node 12 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 14 2025-06-25T14:41:52.929303Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(14, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-25T14:41:52.931670Z node 13 :SYSTEM_VIEWS WARN: sysview_service.cpp:811: Summary delivery problem: service id# [13:7519897145256073208:2076], processor id# 72075186224037899, database# /Root/Tenant2 2025-06-25T14:41:52.929029Z node 15 :SYSTEM_VIEWS WARN: sysview_service.cpp:811: Summary delivery problem: service id# [15:7519897147120687291:2073], processor id# 72075186224037893, database# /Root/Tenant1 2025-06-25T14:41:52.930715Z node 14 :SYSTEM_VIEWS WARN: sysview_service.cpp:811: Summary delivery problem: service id# [14:7519897146310903941:2075], processor id# 72075186224037899, database# /Root/Tenant2 2025-06-25T14:41:52.952077Z node 15 :SYSTEM_VIEWS INFO: sysview_service.cpp:880: Navigate by database succeeded: service id# [15:7519897147120687291:2073], database# /Root/Tenant1, processor id# 72075186224037893 2025-06-25T14:41:52.950108Z node 12 :HIVE WARN: hive_impl.cpp:944: HIVE#72057594037968897 THive::Handle::TEvUndelivered Sender=[14:7519897146310904127:2109], Type=268959746 2025-06-25T14:41:52.953484Z node 13 :SYSTEM_VIEWS INFO: sysview_service.cpp:880: Navigate by database succeeded: service id# [13:7519897145256073208:2076], database# /Root/Tenant2, processor id# 72075186224037899 2025-06-25T14:41:53.018391Z node 14 :SYSTEM_VIEWS INFO: sysview_service.cpp:880: Navigate by database succeeded: service id# [14:7519897146310903941:2075], database# /Root/Tenant2, processor id# 72075186224037899 2025-06-25T14:41:54.000231Z node 16 :SYSTEM_VIEWS DEBUG: sysview_service.cpp:593: Handle TEvPrivate::TEvProcessInterval: service id# [16:7519897145887253849:2073], interval end# 2025-06-25T14:41:54.000000Z, event interval end# 2025-06-25T14:41:54.000000Z 2025-06-25T14:41:54.000298Z node 16 :SYSTEM_VIEWS DEBUG: sysview_service.cpp:408: Rotate logs: service id# [16:7519897145887253849:2073], query logs count# 0, processor ids count# 1, processor id to database count# 1 2025-06-25T14:41:54.000335Z node 16 :SYSTEM_VIEWS DEBUG: sysview_service.cpp:593: Handle TEvPrivate::TEvProcessInterval: service id# [16:7519897137297319183:2064], interval end# 2025-06-25T14:41:54.000000Z, event interval end# 2025-06-25T14:41:54.000000Z 2025-06-25T14:41:54.000365Z node 16 :SYSTEM_VIEWS DEBUG: sysview_service.cpp:408: Rotate logs: service id# [16:7519897137297319183:2064], query logs count# 0, processor ids count# 0, processor id to database count# 0 2025-06-25T14:41:54.001024Z node 15 :SYSTEM_VIEWS DEBUG: sysview_service.cpp:593: Handle TEvPrivate::TEvProcessInterval: service id# [15:7519897138530752622:2064], interval end# 2025-06-25T14:41:54.000000Z, event interval end# 2025-06-25T14:41:54.000000Z 2025-06-25T14:41:54.001065Z node 15 :SYSTEM_VIEWS DEBUG: sysview_service.cpp:408: Rotate logs: service id# [15:7519897138530752622:2064], query logs count# 0, processor ids count# 0, processor id to database count# 0 2025-06-25T14:41:54.001138Z node 15 :SYSTEM_VIEWS DEBUG: sysview_service.cpp:593: Handle TEvPrivate::TEvProcessInterval: service id# [15:7519897147120687291:2073], interval end# 2025-06-25T14:41:54.000000Z, event interval end# 2025-06-25T14:41:54.000000Z 2025-06-25T14:41:54.001155Z node 15 :SYSTEM_VIEWS DEBUG: sysview_service.cpp:408: Rotate logs: service id# [15:7519897147120687291:2073], query logs count# 0, processor ids count# 1, processor id to database count# 1 2025-06-25T14:41:54.000231Z node 13 :SYSTEM_VIEWS DEBUG: sysview_service.cpp:593: Handle TEvPrivate::TEvProcessInterval: service id# [13:7519897145256073208:2076], interval end# 2025-06-25T14:41:54.000000Z, event interval end# 2025-06-25T14:41:54.000000Z 2025-06-25T14:41:54.000328Z node 13 :SYSTEM_VIEWS DEBUG: sysview_service.cpp:408: Rotate logs: service id# [13:7519897145256073208:2076], query logs count# 0, processor ids count# 1, processor id to database count# 1 2025-06-25T14:41:54.004476Z node 13 :SYSTEM_VIEWS DEBUG: sysview_service.cpp:593: Handle TEvPrivate::TEvProcessInterval: service id# [13:7519897132371171230:2064], interval end# 2025-06-25T14:41:54.000000Z, event interval end# 2025-06-25T14:41:54.000000Z 2025-06-25T14:41:54.004505Z node 13 :SYSTEM_VIEWS DEBUG: sysview_service.cpp:408: Rotate logs: service id# [13:7519897132371171230:2064], query logs count# 0, processor ids count# 0, processor id to database count# 0 2025-06-25T14:41:55.112468Z node 15 :SYSTEM_VIEWS DEBUG: sysview_service.cpp:669: Handle TEvPrivate::TEvProcessLabeledCounters: service id# [15:7519897138530752622:2064] 2025-06-25T14:41:55.272193Z node 13 :SYSTEM_VIEWS DEBUG: sysview_service.cpp:669: Handle TEvPrivate::TEvProcessLabeledCounters: service id# [13:7519897145256073208:2076] 2025-06-25T14:41:55.480475Z node 13 :SYSTEM_VIEWS DEBUG: sysview_service.cpp:669: Handle TEvPrivate::TEvProcessLabeledCounters: service id# [13:7519897132371171230:2064] |83.7%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/quoter/quoter_service_bandwidth_test/quoter_service_bandwidth_test >> SystemView::TabletsFollowers [GOOD] >> SystemView::TabletsRanges >> KqpQueryPerf::IndexReplace-QueryService+UseSink [GOOD] |83.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/quoter/quoter_service_bandwidth_test/quoter_service_bandwidth_test |83.8%| [LD] {RESULT} $(B)/ydb/core/quoter/quoter_service_bandwidth_test/quoter_service_bandwidth_test >> KqpQueryPerf::Update-QueryService-UseSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::IndexInsert-QueryService+UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 8310, MsgBus: 1666 2025-06-25T14:41:36.589104Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897160413654684:2167];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:41:36.589328Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000b56/r3tmp/tmpuzA8Ik/pdisk_1.dat 2025-06-25T14:41:37.592541Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:41:37.769993Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:41:37.777740Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:41:37.796683Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:41:37.819352Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:41:37.822137Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:41:38.005769Z node 1 :BS_CONTROLLER ERROR: {BSC07@impl.h:2206} ProcessControllerEvent event processing took too much time Type# 268637706 Duration# 0.101070s 2025-06-25T14:41:38.005847Z node 1 :BS_CONTROLLER ERROR: {BSC00@bsc.cpp:705} StateWork event processing took too much time Type# 2146435078 Duration# 0.101174s TServer::EnableGrpc on GrpcPort 8310, node 1 2025-06-25T14:41:38.348873Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:41:38.348896Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:41:38.348903Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:41:38.349021Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1666 TClient is connected to server localhost:1666 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:41:40.617124Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:41:40.642159Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:41:40.655957Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:40.999179Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:41.225114Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:41.325944Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:41.588707Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519897160413654684:2167];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:41:41.588789Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:41:42.434261Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897186183459957:2370], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:42.434369Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:42.809993Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:42.862803Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:42.918021Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:42.962930Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:43.029165Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:43.085350Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:43.173786Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:43.259563Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897190478427919:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:43.259627Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:43.260284Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897190478427924:2438], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:43.265168Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:41:43.287513Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519897190478427926:2439], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:41:43.355268Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519897190478427977:3420] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathState ... nown -> Disconnected 2025-06-25T14:41:48.858337Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:41:48.861233Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:41:48.872071Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:41:48.880526Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519897215523373113:2080] 1750862508569507 != 1750862508569510 TServer::EnableGrpc on GrpcPort 6796, node 2 2025-06-25T14:41:49.028803Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:41:49.028823Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:41:49.028832Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:41:49.028941Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28770 2025-06-25T14:41:49.612891Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:28770 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:41:49.686175Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:41:49.709838Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:49.799267Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:49.959826Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:50.061040Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:53.035836Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519897232703243912:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:53.035977Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:53.059865Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:53.109136Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:53.149214Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:53.181872Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:53.217832Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:53.306561Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:53.392068Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:53.479027Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519897236998211870:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:53.479146Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:53.479523Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519897236998211876:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:53.483599Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:41:53.498469Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519897236998211878:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:41:53.598638Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519897236998211929:3414] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:41:53.617708Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519897215523373340:2238];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:41:53.617767Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:41:54.846875Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:54.933477Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:55.043505Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::IndexInsert+QueryService+UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 12968, MsgBus: 12105 2025-06-25T14:41:36.576119Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897162429535216:2225];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:41:36.581786Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000b3f/r3tmp/tmpjs0dLd/pdisk_1.dat 2025-06-25T14:41:37.575489Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:41:37.757097Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:41:37.790190Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:41:37.790340Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:41:37.793573Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:41:37.955451Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 12968, node 1 2025-06-25T14:41:38.321195Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:41:38.321217Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:41:38.321223Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:41:38.321318Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:12105 TClient is connected to server localhost:12105 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:41:39.945185Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:41:39.976476Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:41:40.010994Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:40.309133Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:40.704541Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:40.815079Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:41.575838Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519897162429535216:2225];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:41:41.575909Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:41:41.753337Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897183904373158:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:41.753458Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:42.760399Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:42.804169Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:42.869270Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:42.910095Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:42.951335Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:43.005836Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:43.054678Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:43.180818Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897192494308421:2436], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:43.180902Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:43.181202Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897192494308426:2439], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:43.193806Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:41:43.219207Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519897192494308428:2440], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:41:43.293538Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519897192494308479:3427] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:41:45.064765Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first calle ... 5-06-25T14:41:49.008434Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:41:49.008511Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:41:49.022728Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 9506, node 2 2025-06-25T14:41:49.216880Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:41:49.216908Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:41:49.216918Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:41:49.217056Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10745 2025-06-25T14:41:49.733916Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:10745 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-25T14:41:49.906272Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:41:49.913812Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:41:49.924495Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:50.019652Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:50.211029Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:50.304415Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:53.088114Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519897233244174045:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:53.088209Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:53.178459Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:53.258645Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:53.307392Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:53.368943Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:53.409139Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:53.456330Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:53.517890Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:53.624526Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519897233244174704:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:53.624658Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:53.628443Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519897233244174709:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:53.636862Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:41:53.655458Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519897233244174711:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:41:53.735385Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519897233244174762:3417] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:41:53.737078Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519897211769336128:2226];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:41:53.737135Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:41:54.952519Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:55.019982Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:55.103659Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) >> SystemView::AuthOwners_ResultOrder [GOOD] >> SystemView::AuthOwners_TableRange ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::IndexReplace-QueryService+UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 2392, MsgBus: 11908 2025-06-25T14:41:36.583264Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897161439778724:2227];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:41:36.583503Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000b7a/r3tmp/tmpkxvVeM/pdisk_1.dat 2025-06-25T14:41:37.575627Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:41:37.792814Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:41:37.807689Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:41:37.809648Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:41:37.809727Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:41:37.811514Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:41:38.025219Z node 1 :BS_CONTROLLER ERROR: {BSC07@impl.h:2206} ProcessControllerEvent event processing took too much time Type# 268637706 Duration# 0.119830s 2025-06-25T14:41:38.025287Z node 1 :BS_CONTROLLER ERROR: {BSC00@bsc.cpp:705} StateWork event processing took too much time Type# 2146435078 Duration# 0.119910s TServer::EnableGrpc on GrpcPort 2392, node 1 2025-06-25T14:41:38.316692Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:41:38.316722Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:41:38.316734Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:41:38.316874Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11908 TClient is connected to server localhost:11908 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:41:40.262623Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:41:40.301903Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:41:40.311678Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:40.528966Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:40.778766Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:40.880212Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:41.581755Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519897161439778724:2227];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:41:41.581818Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:41:41.989001Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897182914616662:2370], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:41.989111Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:42.762022Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:42.828936Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:42.872807Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:42.936671Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:42.987854Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:43.061375Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:43.103739Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:43.184726Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897191504551918:2436], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:43.184823Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:43.185243Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897191504551923:2439], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:43.194335Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:41:43.210094Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519897191504551925:2440], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:41:43.306091Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519897191504551976:3424] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathSt ... c on GrpcPort 10476, node 2 2025-06-25T14:41:49.231063Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:41:49.276667Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:41:49.304065Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519897211742962108:2080] 1750862508902354 != 1750862508902357 2025-06-25T14:41:49.383943Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:41:49.383964Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:41:49.383972Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:41:49.384082Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3718 TClient is connected to server localhost:3718 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-25T14:41:49.961136Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:41:49.985511Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:41:50.000591Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:41:50.022157Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:50.105169Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:50.277326Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:50.365360Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:53.225560Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519897233217800236:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:53.225676Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:53.298402Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:53.332667Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:53.361795Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:53.395722Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:53.426771Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:53.459862Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:53.533638Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:53.653845Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519897233217800901:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:53.653947Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:53.654177Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519897233217800906:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:53.658942Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:41:53.670859Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519897233217800908:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:41:53.726547Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519897233217800959:3414] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:41:53.905480Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519897211742962130:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:41:53.905547Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:41:55.056903Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:55.167763Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:55.278089Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) >> KqpQueryPerf::Upsert-QueryService-UseSink >> KqpQueryPerf::RangeRead-QueryService >> KqpQueryPerf::IdxLookupJoin+QueryService [GOOD] >> KqpQueryPerf::IdxLookupJoin-QueryService >> KqpQueryPerf::MultiDeleteFromTable+QueryService-UseSink >> KqpQueryPerf::IndexDeleteOn-QueryService+UseSink [GOOD] >> KqpQueryPerf::Upsert+QueryService+UseSink [GOOD] >> KqpQueryPerf::UpdateOn-QueryService+UseSink >> SystemView::ShowCreateTableColumnAlterObject [GOOD] |83.8%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_pq_reboots/ydb-core-tx-schemeshard-ut_pq_reboots |83.8%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_pq_reboots/ydb-core-tx-schemeshard-ut_pq_reboots |83.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_pq_reboots/ydb-core-tx-schemeshard-ut_pq_reboots >> KqpQueryPerf::UpdateOn+QueryService+UseSink >> KqpQueryPerf::Upsert-QueryService+UseSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::IndexDeleteOn-QueryService+UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 8647, MsgBus: 32015 2025-06-25T14:41:42.572353Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897187675525509:2139];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:41:42.572598Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000b36/r3tmp/tmpoDA6Nj/pdisk_1.dat 2025-06-25T14:41:42.932338Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:41:42.936542Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519897187675525399:2080] 1750862502556481 != 1750862502556484 TServer::EnableGrpc on GrpcPort 8647, node 1 2025-06-25T14:41:42.979943Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:41:42.980253Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:41:43.022006Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:41:43.053392Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:41:43.053414Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:41:43.053420Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:41:43.053549Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:32015 TClient is connected to server localhost:32015 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-25T14:41:43.572462Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:41:43.611434Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:41:43.640332Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:43.829113Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:44.007970Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:44.103483Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:45.793249Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897200560428920:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:45.793400Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:46.134354Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:46.180095Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:46.262779Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:46.344388Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:46.388475Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:46.428580Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:46.478619Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:46.594577Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897204855396881:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:46.594665Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:46.594839Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897204855396886:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:46.598825Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:41:46.610141Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519897204855396888:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:41:46.700095Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519897204855396939:3418] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:41:47.559224Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519897187675525509:2139];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:41:47.559344Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:41:47.945065Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:48.000470Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part ... known -> Disconnected 2025-06-25T14:41:51.788941Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:41:51.793286Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:41:51.814337Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:41:51.816538Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519897224735962592:2080] 1750862511439952 != 1750862511439955 TServer::EnableGrpc on GrpcPort 65077, node 2 2025-06-25T14:41:52.024447Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:41:52.024471Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:41:52.024480Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:41:52.024593Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9760 2025-06-25T14:41:52.474146Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:9760 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:41:53.153896Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:41:53.179478Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:53.357475Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:53.642908Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:53.753143Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:56.000128Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519897241915833413:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:56.000232Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:56.061939Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:56.103966Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:56.145402Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:56.176648Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:56.227919Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:56.277928Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:56.319324Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:56.404647Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519897246210801366:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:56.404784Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:56.408716Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519897246210801371:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:56.413569Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:41:56.444546Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519897224735962611:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:41:56.444614Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:41:56.446215Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519897246210801373:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:41:56.515356Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519897246210801424:3419] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:41:57.599999Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:57.662380Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:57.778181Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) >> KqpQueryPerf::IndexUpsert+QueryService-UseSink [GOOD] >> KqpQueryPerf::IndexUpsert+QueryService+UseSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::Upsert+QueryService+UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 22903, MsgBus: 20372 2025-06-25T14:41:54.730074Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897241479799966:2228];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:41:54.749570Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000b1a/r3tmp/tmpwEVZMd/pdisk_1.dat 2025-06-25T14:41:55.421297Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519897241479799774:2080] 1750862514703988 != 1750862514703991 2025-06-25T14:41:55.463757Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:41:55.464335Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:41:55.464398Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:41:55.470579Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 22903, node 1 2025-06-25T14:41:55.554398Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:41:55.554422Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:41:55.554434Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:41:55.554570Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:41:55.696560Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:20372 TClient is connected to server localhost:20372 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:41:56.167214Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:41:56.187903Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:41:56.204012Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:56.365346Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:56.584578Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:56.669600Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:58.436431Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897258659670598:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:58.436566Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:58.784573Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:58.811553Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:58.845931Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:58.881650Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:58.956065Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:59.030025Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:59.073304Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:59.151294Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897262954638557:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:59.151372Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:59.151705Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897262954638562:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:59.155650Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:41:59.173857Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519897262954638564:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:41:59.260929Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519897262954638615:3424] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:41:59.755890Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519897241479799966:2228];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:41:59.756224Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> TxUsage::WriteToTopic_Demo_18_RestartNo_Table [GOOD] >> KqpQueryPerf::KvRead-QueryService [GOOD] >> TxUsage::WriteToTopic_Demo_18_RestartNo_Query >> KqpQueryPerf::Delete+QueryService-UseSink [GOOD] >> KqpQueryPerf::Delete+QueryService+UseSink >> KqpQueryPerf::AggregateToScalar+QueryService >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_yc_events_processor[tables_format_v0] [GOOD] >> KqpQueryPerf::Update+QueryService-UseSink >> KqpQueryPerf::Delete-QueryService+UseSink [GOOD] >> SystemView::AuthUsers_ResultOrder [GOOD] >> SystemView::AuthUsers_TableRange >> KqpQueryPerf::DeleteOn+QueryService+UseSink [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::KvRead-QueryService [GOOD] Test command err: Trying to start YDB, gRPC: 6733, MsgBus: 19998 2025-06-25T14:41:49.958468Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897219606134392:2140];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:41:49.958958Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000b2e/r3tmp/tmpk89v9W/pdisk_1.dat 2025-06-25T14:41:50.349322Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 6733, node 1 2025-06-25T14:41:50.421524Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:41:50.421627Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:41:50.428239Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:41:50.564976Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:41:50.565003Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:41:50.565009Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:41:50.565125Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19998 2025-06-25T14:41:50.960501Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:19998 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:41:51.470494Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:41:51.509459Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:41:51.514770Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:51.710590Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:51.939026Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:52.053962Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:54.240027Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897241080972398:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:54.240168Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:54.669186Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:54.753760Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:54.828861Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:54.864678Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:54.907904Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:54.948988Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519897219606134392:2140];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:41:54.949045Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:41:55.017769Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:55.081119Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:55.238253Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897245375940357:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:55.238336Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:55.238755Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897245375940362:2437], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:55.244645Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:41:55.267075Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519897245375940364:2438], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:41:55.350281Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519897245375940415:3421] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 6123, MsgBus: 26866 2025-06-25T14:41:57.576192Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519897253347443284:2149];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000b2e/r3tmp/tmpJ2woOi/pdisk_1.dat 2025-06-25T14:41:57.662942Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:41:57.743306Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:41:57.746158Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519897253347443151:2080] 1750862517515530 != 1750862517515533 2025-06-25T14:41:57.759674Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:41:57.759751Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 6123, node 2 2025-06-25T14:41:57.761233Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:41:57.929127Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:41:57.929152Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:41:57.929161Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:41:57.929289Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26866 TClient is connected to server localhost:26866 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-25T14:41:58.560427Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:41:58.597668Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:41:58.604098Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:41:58.611553Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:58.680724Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:58.913049Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:58.989566Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:01.052546Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519897270527313947:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:01.052635Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:01.106718Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:01.141674Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:01.179747Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:01.217047Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:01.292524Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:01.355286Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:01.405054Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:01.500063Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519897270527314610:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:01.500124Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519897270527314615:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:01.500191Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:01.504182Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:42:01.518435Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519897270527314617:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:42:01.579460Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519897270527314668:3415] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:42:02.554405Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519897253347443284:2149];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:02.554577Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> KqpQueryPerf::Update-QueryService-UseSink [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::Delete-QueryService+UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 15973, MsgBus: 17210 2025-06-25T14:41:51.897429Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897227789978783:2218];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:41:51.897883Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000b20/r3tmp/tmpTJFKSL/pdisk_1.dat 2025-06-25T14:41:52.352412Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519897227789978594:2080] 1750862511853795 != 1750862511853798 2025-06-25T14:41:52.356657Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:41:52.378466Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:41:52.378563Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 15973, node 1 2025-06-25T14:41:52.394464Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:41:52.548740Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:41:52.548765Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:41:52.548772Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:41:52.548902Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17210 2025-06-25T14:41:52.899433Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:17210 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:41:53.290269Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:41:53.318566Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:53.491964Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:53.618840Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:53.704770Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:55.442481Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897244969849426:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:55.442638Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:55.739946Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:55.776633Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:55.828589Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:55.915543Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:55.949701Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:56.006230Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:56.065672Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:56.133943Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897249264817385:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:56.134040Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:56.134413Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897249264817390:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:56.140436Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:41:56.161710Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519897249264817392:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:41:56.251050Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519897249264817443:3426] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:41:56.888464Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519897227789978783:2218];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:41:56.888540Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 4808, MsgBus: 8793 2025-06-25T14:41:58.602175Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519897257260436923:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:41:58.602215Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000b20/r3tmp/tmpxuxDKn/pdisk_1.dat 2025-06-25T14:41:58.773029Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:41:58.775685Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519897257260436889:2080] 1750862518590040 != 1750862518590043 2025-06-25T14:41:58.805083Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:41:58.805207Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:41:58.808224Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 4808, node 2 2025-06-25T14:41:58.987901Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:41:58.987919Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:41:58.987925Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:41:58.988021Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8793 TClient is connected to server localhost:8793 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-25T14:41:59.638797Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:41:59.642571Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:41:59.657411Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:41:59.675219Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:59.741350Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:59.908605Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:59.979958Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:02.063917Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519897274440307713:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:02.064004Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:02.101007Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:02.129314Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:02.158947Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:02.235452Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:02.267265Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:02.299991Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:02.373402Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:02.434867Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519897274440308374:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:02.434962Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:02.435605Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519897274440308379:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:02.439699Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:42:02.452554Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519897274440308381:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:42:02.506979Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519897274440308432:3421] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:42:03.603040Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519897257260436923:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:03.603108Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::DeleteOn+QueryService+UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 28584, MsgBus: 15673 2025-06-25T14:41:51.663934Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897227803986209:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:41:51.725423Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000b2b/r3tmp/tmp4gB4tY/pdisk_1.dat 2025-06-25T14:41:52.193980Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:41:52.194084Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:41:52.317872Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:41:52.352475Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519897227803986180:2080] 1750862511656742 != 1750862511656745 2025-06-25T14:41:52.370902Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 28584, node 1 2025-06-25T14:41:52.592888Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:41:52.592907Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:41:52.592916Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:41:52.593014Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:41:52.704433Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:15673 TClient is connected to server localhost:15673 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:41:53.523578Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:41:53.542390Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:41:53.551319Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:41:53.739177Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:53.917340Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:54.026951Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:55.816982Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897244983857000:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:55.817103Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:56.147989Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:56.245358Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:56.280387Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:56.318943Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:56.363483Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:56.425071Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:56.470743Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:56.559981Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897249278824958:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:56.560062Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:56.560320Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897249278824963:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:56.565409Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:41:56.580913Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519897249278824965:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:41:56.647210Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519897249278825016:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:41:56.664424Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519897227803986209:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:41:56.664485Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 9177, MsgBus: 17096 2025-06-25T14:41:58.801863Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519897255065180734:2235];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000b2b/r3tmp/tmprgnXJ3/pdisk_1.dat 2025-06-25T14:41:58.857478Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:41:58.910437Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:41:58.912537Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519897255065180524:2080] 1750862518688478 != 1750862518688481 2025-06-25T14:41:58.929268Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:41:58.929340Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:41:58.930510Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 9177, node 2 2025-06-25T14:41:59.104080Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:41:59.104103Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:41:59.104112Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:41:59.104242Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17096 TClient is connected to server localhost:17096 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-25T14:41:59.688612Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:41:59.712837Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:41:59.717921Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:41:59.735340Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:59.796672Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:59.934442Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:00.007509Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:02.384832Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519897272245051335:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:02.384904Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:02.432510Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:02.492268Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:02.525660Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:02.553426Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:02.586888Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:02.634949Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:02.677051Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:02.752640Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519897272245051991:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:02.752741Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:02.753120Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519897272245051996:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:02.757344Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:42:02.769672Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519897272245051998:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:42:02.848009Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519897272245052049:3417] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:42:03.750125Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519897255065180734:2235];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:03.750192Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> KqpQueryPerf::IndexUpsert-QueryService-UseSink [GOOD] >> KqpQueryPerf::IndexUpsert-QueryService+UseSink >> KqpQueryPerf::IndexUpdateOn-QueryService-UseSink [GOOD] >> KqpQueryPerf::IndexUpdateOn-QueryService+UseSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::Update-QueryService-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 23887, MsgBus: 2922 2025-06-25T14:41:59.146354Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897262345513270:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:41:59.153103Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000b07/r3tmp/tmpdxrch0/pdisk_1.dat 2025-06-25T14:41:59.665932Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:41:59.666033Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:41:59.686116Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:41:59.688471Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519897262345513237:2080] 1750862519120196 != 1750862519120199 2025-06-25T14:41:59.705830Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 23887, node 1 2025-06-25T14:41:59.830253Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:41:59.830269Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:41:59.830274Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:41:59.830387Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2922 2025-06-25T14:42:00.159472Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:2922 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:42:00.438831Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:42:00.468099Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:00.662462Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:00.829275Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:00.928035Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:02.623686Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897275230416764:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:02.623756Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:02.910603Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:02.947129Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:02.979861Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:03.008045Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:03.036422Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:03.100396Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:03.141006Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:03.236468Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897279525384726:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:03.236545Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:03.236683Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897279525384731:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:03.240970Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:42:03.252485Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519897279525384733:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:42:03.328524Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519897279525384784:3424] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:42:04.158327Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519897262345513270:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:04.158381Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> KqpQueryPerf::MultiRead+QueryService >> KqpQueryPerf::Upsert-QueryService-UseSink [GOOD] >> KqpQueryPerf::RangeRead-QueryService [GOOD] >> TxUsage::WriteToTopic_Demo_5_Table [GOOD] >> TxUsage::WriteToTopic_Demo_33_Query [GOOD] >> BasicUsage::ConflictingWrites [GOOD] >> Describe::LocationWithKillTablets >> KqpQueryPerf::UpdateOn-QueryService+UseSink [GOOD] >> KqpQueryPerf::IdxLookupJoin-QueryService [GOOD] >> TxUsage::WriteToTopic_Demo_24_Query [GOOD] >> KqpQueryPerf::MultiDeleteFromTable+QueryService-UseSink [GOOD] >> KqpQueryPerf::MultiDeleteFromTable+QueryService+UseSink >> KqpQueryPerf::Upsert-QueryService+UseSink [GOOD] >> TxUsage::WriteToTopic_Demo_34_Table >> KqpQueryPerf::RangeRead+QueryService >> BasicStatistics::TwoDatabases [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::Upsert-QueryService-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 28995, MsgBus: 5667 2025-06-25T14:42:00.400793Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897263258810392:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:00.400844Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000b05/r3tmp/tmpj70OIj/pdisk_1.dat 2025-06-25T14:42:00.882647Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:42:00.933350Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:42:00.933452Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:42:00.950245Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 28995, node 1 2025-06-25T14:42:01.119132Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:42:01.119171Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:42:01.119180Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:42:01.119300Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5667 2025-06-25T14:42:01.403661Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:5667 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:42:01.733293Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:42:01.764963Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:01.979389Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:42:02.146617Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:02.227228Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:03.873883Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897276143713883:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:03.873991Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:04.199184Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:04.237406Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:04.264036Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:04.292630Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:04.341561Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:04.421968Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:04.464704Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:04.564526Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897280438681846:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:04.564601Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:04.564612Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897280438681851:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:04.568658Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:42:04.583512Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519897280438681853:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:42:04.687588Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519897280438681904:3426] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:42:05.401288Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519897263258810392:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:05.401354Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::RangeRead-QueryService [GOOD] Test command err: Trying to start YDB, gRPC: 4045, MsgBus: 4205 2025-06-25T14:42:00.621221Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897264275837255:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:00.632771Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000b01/r3tmp/tmpSqPwdx/pdisk_1.dat 2025-06-25T14:42:01.092740Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519897264275837234:2080] 1750862520607578 != 1750862520607581 2025-06-25T14:42:01.108188Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:42:01.120285Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:42:01.120405Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:42:01.155633Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 4045, node 1 2025-06-25T14:42:01.214908Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:42:01.214949Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:42:01.214962Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:42:01.215107Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4205 2025-06-25T14:42:01.647210Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:4205 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:42:01.911398Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:42:01.931519Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:42:01.946699Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:02.107401Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:02.307172Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:02.393438Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:03.992022Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897277160740752:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:03.992143Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:04.306456Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:04.383068Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:04.428250Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:04.466654Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:04.501132Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:04.550665Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:04.607888Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:04.690004Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897281455708707:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:04.690086Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:04.690380Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897281455708712:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:04.694337Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:42:04.705111Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519897281455708714:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:42:04.760558Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519897281455708765:3421] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:42:05.621483Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519897264275837255:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:05.621671Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> KqpQueryPerf::Insert-QueryService-UseSink >> KqpQueryPerf::UpdateOn+QueryService+UseSink [GOOD] >> SystemView::TabletsRanges [GOOD] >> SystemView::TabletsRangesPredicateExtractDisabled >> TxUsage::Sinks_Oltp_WriteToTopic_4_Table >> TxUsage::WriteToTopic_Demo_21_RestartBeforeCommit_Query [GOOD] >> KqpQueryPerf::DeleteOn-QueryService-UseSink >> TxUsage::WriteToTopic_Demo_25_Table >> TxUsage::WriteToTopic_Demo_5_Query >> SystemView::AuthGroupMembers [GOOD] >> SystemView::AuthGroupMembers_Access ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::UpdateOn-QueryService+UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 11980, MsgBus: 14483 2025-06-25T14:42:01.864384Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897270162239274:2139];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:01.867393Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000afb/r3tmp/tmpe66YiD/pdisk_1.dat 2025-06-25T14:42:02.192574Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519897270162239164:2080] 1750862521856479 != 1750862521856482 2025-06-25T14:42:02.197829Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 11980, node 1 2025-06-25T14:42:02.256168Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:42:02.256262Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:42:02.259701Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:42:02.270835Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:42:02.270852Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:42:02.270859Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:42:02.270975Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14483 TClient is connected to server localhost:14483 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-25T14:42:02.865856Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:42:02.914003Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:42:02.934274Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:03.128398Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:03.270451Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:03.334676Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:04.901150Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897283047142684:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:04.901233Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:05.187218Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:05.251909Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:05.318441Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:05.354999Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:05.386506Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:05.418520Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:05.455396Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:05.510390Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897287342110640:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:05.510470Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897287342110645:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:05.510473Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:05.513059Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:42:05.521401Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519897287342110647:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:42:05.610128Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519897287342110698:3420] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:42:06.859617Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519897270162239274:2139];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:06.859694Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::IdxLookupJoin-QueryService [GOOD] Test command err: Trying to start YDB, gRPC: 23937, MsgBus: 30046 2025-06-25T14:41:53.520812Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897234916241055:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:41:53.546853Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000b1b/r3tmp/tmpwMPkFo/pdisk_1.dat 2025-06-25T14:41:53.993356Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:41:53.993465Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:41:54.005150Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:41:54.045780Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 23937, node 1 2025-06-25T14:41:54.192838Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:41:54.192864Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:41:54.192872Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:41:54.192975Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:30046 2025-06-25T14:41:54.560514Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:30046 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:41:54.958645Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:41:54.989243Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:41:55.014486Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:55.288386Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:55.503972Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:55.614816Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:57.341846Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897252096111835:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:57.341971Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:57.625133Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:57.661750Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:57.742724Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:57.820384Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:57.877087Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:57.977288Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:58.025859Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:58.097338Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897256391079795:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:58.097412Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:58.097643Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897256391079800:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:58.101163Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:41:58.112699Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519897256391079802:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:41:58.191197Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519897256391079853:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:41:58.521284Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519897234916241055:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:41:58.521359Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 21911, MsgBus: 13959 2025-06-25T14:42:00.688931Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519897263784867461:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:00.688972Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000b1b/r3tmp/tmpWbN3ZF/pdisk_1.dat 2025-06-25T14:42:00.834227Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519897263784867440:2080] 1750862520687956 != 1750862520687959 2025-06-25T14:42:00.842247Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:42:00.847265Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:42:00.847340Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 21911, node 2 2025-06-25T14:42:00.848991Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:42:00.972956Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:42:00.972976Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:42:00.972984Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:42:00.973090Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13959 TClient is connected to server localhost:13959 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-25T14:42:01.485548Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:42:01.519179Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:01.600531Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:01.709359Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:42:01.767696Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:01.859742Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:04.147446Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519897280964738262:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:04.147541Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:04.211123Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:04.254888Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:04.295593Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:04.325490Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:04.393367Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:04.471449Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:04.505619Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:04.574225Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519897280964738926:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:04.574301Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:04.574491Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519897280964738931:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:04.577890Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:42:04.589735Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519897280964738933:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:42:04.682194Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519897280964738984:3418] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:42:05.689134Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519897263784867461:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:05.689208Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::Upsert-QueryService+UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 29723, MsgBus: 18495 2025-06-25T14:42:02.399030Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897273007300231:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:02.400050Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000af1/r3tmp/tmpgQJjUY/pdisk_1.dat 2025-06-25T14:42:02.686037Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 29723, node 1 2025-06-25T14:42:02.754790Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:42:02.754811Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:42:02.754816Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:42:02.754930Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:42:02.784614Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:42:02.784737Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:42:02.786324Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:18495 TClient is connected to server localhost:18495 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:42:03.315379Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:42:03.333283Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:03.423125Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... 2025-06-25T14:42:03.508425Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:03.643080Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:03.707543Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:05.121501Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897285892203709:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:05.121626Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:05.448137Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:05.479303Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:05.517979Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:05.552055Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:05.598255Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:05.631032Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:05.699679Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:05.788366Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897285892204377:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:05.788444Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:05.788656Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897285892204382:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:05.792766Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:42:05.807703Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519897285892204384:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:42:05.868148Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519897285892204435:3418] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } >> TKeyValueTest::TestRenameWorksNewApi [GOOD] >> TxUsage::WriteToTopic_Demo_21_RestartAfterCommit_Table >> TxUsage::WriteToTopic_Demo_41_Query [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::UpdateOn+QueryService+UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 28663, MsgBus: 18956 2025-06-25T14:42:02.076781Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897274042692234:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:02.076880Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000af3/r3tmp/tmp1kmWan/pdisk_1.dat 2025-06-25T14:42:02.501145Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519897274042692214:2080] 1750862522075876 != 1750862522075879 2025-06-25T14:42:02.508543Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:42:02.519361Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:42:02.519457Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 28663, node 1 2025-06-25T14:42:02.527634Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:42:02.604293Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:42:02.604364Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:42:02.604376Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:42:02.604489Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:18956 TClient is connected to server localhost:18956 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-25T14:42:03.140565Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:42:03.255101Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:42:03.274828Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:03.427084Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:03.598729Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:03.679149Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:05.396988Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897286927595756:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:05.397094Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:05.698989Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:05.743905Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:05.785837Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:05.823193Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:05.855651Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:05.890619Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:05.969438Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:06.063286Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897291222563717:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:06.063377Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:06.063698Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897291222563722:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:06.067418Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:42:06.080872Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519897291222563724:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:42:06.178084Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519897291222563777:3421] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:42:07.078344Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519897274042692234:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:07.078399Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest >> BasicStatistics::TwoDatabases [GOOD] Test command err: 2025-06-25T14:38:38.115237Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:336:2218], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:38:38.115699Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:38:38.115836Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001d07/r3tmp/tmpVkeaF2/pdisk_1.dat 2025-06-25T14:38:38.503007Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 17129, node 1 2025-06-25T14:38:38.704293Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:38:38.704386Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:38:38.704425Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:38:38.704901Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:38:38.707797Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:38:38.799792Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:38.799951Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:38.815291Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:3232 2025-06-25T14:38:39.356063Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-25T14:38:42.511005Z node 3 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 3 2025-06-25T14:38:42.544437Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:42.544580Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:42.596625Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 3 Cookie 3 2025-06-25T14:38:42.598304Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:38:42.809335Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:38:42.842991Z node 3 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:42.843408Z node 3 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:42.843810Z node 3 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:42.843907Z node 3 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:42.843986Z node 3 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:42.844137Z node 3 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:42.844193Z node 3 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:42.844272Z node 3 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:42.844353Z node 3 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:43.040453Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:43.040618Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:43.053877Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:38:43.189249Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:38:43.233700Z node 3 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-25T14:38:43.233837Z node 3 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-25T14:38:43.270711Z node 3 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-25T14:38:43.272335Z node 3 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-25T14:38:43.272558Z node 3 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-25T14:38:43.272617Z node 3 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-25T14:38:43.272674Z node 3 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-25T14:38:43.272738Z node 3 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-25T14:38:43.272799Z node 3 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-25T14:38:43.272852Z node 3 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-25T14:38:43.273389Z node 3 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-25T14:38:43.301728Z node 3 :STATISTICS DEBUG: schemeshard_impl.cpp:7949: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-25T14:38:43.301838Z node 3 :STATISTICS DEBUG: schemeshard_impl.cpp:7979: ConnectToSA(), pipe client id: [3:1872:2562], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-25T14:38:43.308457Z node 3 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [3:1884:2571] 2025-06-25T14:38:43.309699Z node 3 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [3:1898:2581] 2025-06-25T14:38:43.309954Z node 3 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [3:1898:2581], schemeshard id = 72075186224037897 2025-06-25T14:38:43.319016Z node 3 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database1 2025-06-25T14:38:43.338399Z node 3 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-25T14:38:43.338453Z node 3 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-25T14:38:43.338532Z node 3 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database1/.metadata/_statistics 2025-06-25T14:38:43.352908Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:43.359827Z node 3 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-25T14:38:43.359962Z node 3 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-25T14:38:43.539391Z node 3 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-25T14:38:43.699507Z node 3 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-25T14:38:43.765295Z node 3 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-25T14:38:44.338780Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:38:44.468166Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-25T14:38:47.553902Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-25T14:38:47.588949Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:47.589064Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:47.628981Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T14:38:47.630829Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:38:47.836822Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224038889 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:47.837330Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224038889 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:47.837964Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224038889 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:47.838178Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224038889 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:47.838451Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224038889 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:47.838549Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224038889 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:47.838634Z node 2 :HIVE WARN: tx__cr ... nt64; DELETE FROM `.metadata/_statistics` WHERE owner_id = $owner_id AND local_path_id = $local_path_id; 2025-06-25T14:42:00.163359Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:10182:4408], DatabaseId: /Root/Database2, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:00.163492Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:10193:4413], DatabaseId: /Root/Database2, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:00.163612Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root/Database2, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:00.185279Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976730658:2, at schemeshard: 72075186224038898, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:42:00.285194Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:10196:4416], DatabaseId: /Root/Database2, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976730658 completed, doublechecking } 2025-06-25T14:42:00.457806Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:10286:4465] txid# 281474976730659, issues: { message: "Check failed: path: \'/Root/Database2/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72075186224038898, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:42:00.491932Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [2:10315:4480]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T14:42:00.492271Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-25T14:42:00.492397Z node 2 :STATISTICS DEBUG: service_impl.cpp:1219: ConnectToSA(), pipe client id = [2:10317:4482] 2025-06-25T14:42:00.492491Z node 2 :STATISTICS DEBUG: service_impl.cpp:1248: SyncNode(), pipe client id = [2:10317:4482] 2025-06-25T14:42:00.493080Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224038895] EvServerConnected, pipe server id = [2:10318:4483] 2025-06-25T14:42:00.493287Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:133: [72075186224038895] EvConnectNode, pipe server id = [2:10318:4483], node id = 2, have schemeshards count = 0, need schemeshards count = 1 2025-06-25T14:42:00.493364Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:314: [72075186224038895] SendStatisticsToNode(), node id = 2, schemeshard count = 1 2025-06-25T14:42:00.493527Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:10317:4482], server id = [2:10318:4483], tablet id = 72075186224038895, status = OK 2025-06-25T14:42:00.493662Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-25T14:42:00.493744Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 1, ReplyToActorId = [2:10315:4480], StatRequests.size() = 1 2025-06-25T14:42:00.645340Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=OTUwMjNmZTEtZmE2ZjUzY2MtZjI0ODcxYjYtN2Y2NGNjYmM=, TxId: 2025-06-25T14:42:00.645414Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=OTUwMjNmZTEtZmE2ZjUzY2MtZjI0ODcxYjYtN2Y2NGNjYmM=, TxId: 2025-06-25T14:42:00.646098Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224038895] TTxFinishTraversal::Execute 2025-06-25T14:42:00.663477Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224038895] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224038898, LocalPathId: 3] 2025-06-25T14:42:00.663539Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224038895] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-25T14:42:00.721199Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:217: [72075186224038895] EvFastPropagateCheck 2025-06-25T14:42:00.721281Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:357: [72075186224038895] PropagateFastStatistics(), node count = 0, schemeshard count = 0 2025-06-25T14:42:00.845232Z node 2 :STATISTICS DEBUG: service_impl.cpp:1189: EvRequestTimeout, pipe client id = [2:10317:4482], schemeshard count = 1 2025-06-25T14:42:01.668169Z node 3 :STATISTICS DEBUG: aggregator_impl.cpp:285: [72075186224037894] EvPropagateTimeout 2025-06-25T14:42:01.679463Z node 3 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-25T14:42:01.679530Z node 3 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-25T14:42:01.679566Z node 3 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 4] is data table. 2025-06-25T14:42:01.679601Z node 3 :STATISTICS DEBUG: aggregator_impl.cpp:723: [72075186224037894] ScheduleNextTraversal. Skip traversal for datashard table [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-25T14:42:01.679929Z node 3 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database1 2025-06-25T14:42:01.685505Z node 3 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DELETE FROM `.metadata/_statistics` WHERE owner_id = $owner_id AND local_path_id = $local_path_id; 2025-06-25T14:42:01.699403Z node 3 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=3&id=NGM5YzIyMzEtNjEyOWIwNTQtODRkZWE0NjItMWVjZWUzOWE=, TxId: 2025-06-25T14:42:01.699458Z node 3 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=3&id=NGM5YzIyMzEtNjEyOWIwNTQtODRkZWE0NjItMWVjZWUzOWE=, TxId: 2025-06-25T14:42:01.699826Z node 3 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-25T14:42:01.714963Z node 3 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-25T14:42:01.715016Z node 3 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-25T14:42:01.779318Z node 3 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 122 ], ReplyToActorId[ [3:10407:4759]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T14:42:01.779623Z node 3 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 122 ] 2025-06-25T14:42:01.779670Z node 3 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 122, ReplyToActorId = [3:10407:4759], StatRequests.size() = 1 2025-06-25T14:42:04.062832Z node 3 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 123 ], ReplyToActorId[ [3:10468:4779]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T14:42:04.063230Z node 3 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 123 ] 2025-06-25T14:42:04.063277Z node 3 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 123, ReplyToActorId = [3:10468:4779], StatRequests.size() = 1 2025-06-25T14:42:04.924982Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224038895] ScheduleNextTraversal 2025-06-25T14:42:04.925047Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224038895] ScheduleNextTraversal. No force traversals. 2025-06-25T14:42:04.925087Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224038895] IsColumnTable. Path [OwnerId: 72075186224038898, LocalPathId: 4] is data table. 2025-06-25T14:42:04.925120Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:723: [72075186224038895] ScheduleNextTraversal. Skip traversal for datashard table [OwnerId: 72075186224038898, LocalPathId: 4] 2025-06-25T14:42:04.925397Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database2 2025-06-25T14:42:04.928219Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DELETE FROM `.metadata/_statistics` WHERE owner_id = $owner_id AND local_path_id = $local_path_id; 2025-06-25T14:42:04.947092Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=YWRhOTcxMjAtOTBjYTI0ZGQtNmUxZjZmNTQtYzczM2ZmMzA=, TxId: 2025-06-25T14:42:04.947158Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=YWRhOTcxMjAtOTBjYTI0ZGQtNmUxZjZmNTQtYzczM2ZmMzA=, TxId: 2025-06-25T14:42:04.947879Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224038895] TTxFinishTraversal::Execute 2025-06-25T14:42:04.963216Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224038895] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224038898, LocalPathId: 4] 2025-06-25T14:42:04.963258Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224038895] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-25T14:42:06.016744Z node 3 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 2, schemeshard count = 2 2025-06-25T14:42:06.016979Z node 3 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 3 2025-06-25T14:42:06.017349Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-06-25T14:42:06.028635Z node 3 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-25T14:42:06.028700Z node 3 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-25T14:42:06.089069Z node 3 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 124 ], ReplyToActorId[ [3:10547:4793]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T14:42:06.089485Z node 3 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 124 ] 2025-06-25T14:42:06.089533Z node 3 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 124, ReplyToActorId = [3:10547:4793], StatRequests.size() = 1 2025-06-25T14:42:06.090402Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [2:10549:4553]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T14:42:06.094404Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-25T14:42:06.094461Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 2, ReplyToActorId = [2:10549:4553], StatRequests.size() = 1 >> KqpQueryPerf::Delete+QueryService+UseSink [GOOD] >> KqpQueryPerf::IndexUpdateOn+QueryService-UseSink >> KqpQueryPerf::RangeLimitRead-QueryService ------- [TM] {asan, default-linux-x86_64, release} ydb/core/keyvalue/ut/unittest >> TKeyValueTest::TestRenameWorksNewApi [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:57:2057] recipient: [1:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:57:2057] recipient: [1:52:2097] Leader for TabletID 72057594037927937 is [1:59:2099] sender: [1:60:2057] recipient: [1:52:2097] Leader for TabletID 72057594037927937 is [1:59:2099] sender: [1:77:2057] recipient: [1:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:57:2057] recipient: [2:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:57:2057] recipient: [2:53:2097] Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:60:2057] recipient: [2:53:2097] Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:77:2057] recipient: [2:14:2061] !Reboot 72057594037927937 (actor [2:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:79:2057] recipient: [2:38:2085] Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:81:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [2:59:2099] sender: [2:83:2057] recipient: [2:82:2112] Leader for TabletID 72057594037927937 is [2:84:2113] sender: [2:85:2057] recipient: [2:82:2112] !Reboot 72057594037927937 (actor [2:59:2099]) rebooted! !Reboot 72057594037927937 (actor [2:59:2099]) tablet resolver refreshed! new actor is[2:84:2113] Leader for TabletID 72057594037927937 is [2:84:2113] sender: [2:170:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:57:2057] recipient: [3:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:57:2057] recipient: [3:52:2097] Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:60:2057] recipient: [3:52:2097] Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:77:2057] recipient: [3:14:2061] !Reboot 72057594037927937 (actor [3:59:2099]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:79:2057] recipient: [3:38:2085] Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:82:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [3:59:2099] sender: [3:83:2057] recipient: [3:81:2112] Leader for TabletID 72057594037927937 is [3:84:2113] sender: [3:85:2057] recipient: [3:81:2112] !Reboot 72057594037927937 (actor [3:59:2099]) rebooted! !Reboot 72057594037927937 (actor [3:59:2099]) tablet resolver refreshed! new actor is[3:84:2113] Leader for TabletID 72057594037927937 is [3:84:2113] sender: [3:170:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:57:2057] recipient: [4:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:57:2057] recipient: [4:53:2097] Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:60:2057] recipient: [4:53:2097] Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:77:2057] recipient: [4:14:2061] !Reboot 72057594037927937 (actor [4:59:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:80:2057] recipient: [4:38:2085] Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:83:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [4:59:2099] sender: [4:84:2057] recipient: [4:82:2112] Leader for TabletID 72057594037927937 is [4:85:2113] sender: [4:86:2057] recipient: [4:82:2112] !Reboot 72057594037927937 (actor [4:59:2099]) rebooted! !Reboot 72057594037927937 (actor [4:59:2099]) tablet resolver refreshed! new actor is[4:85:2113] Leader for TabletID 72057594037927937 is [4:85:2113] sender: [4:171:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:57:2057] recipient: [5:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:57:2057] recipient: [5:53:2097] Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:60:2057] recipient: [5:53:2097] Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:77:2057] recipient: [5:14:2061] !Reboot 72057594037927937 (actor [5:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:83:2057] recipient: [5:38:2085] Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:86:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [5:59:2099] sender: [5:87:2057] recipient: [5:85:2115] Leader for TabletID 72057594037927937 is [5:88:2116] sender: [5:89:2057] recipient: [5:85:2115] !Reboot 72057594037927937 (actor [5:59:2099]) rebooted! !Reboot 72057594037927937 (actor [5:59:2099]) tablet resolver refreshed! new actor is[5:88:2116] Leader for TabletID 72057594037927937 is [5:88:2116] sender: [5:174:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:57:2057] recipient: [6:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:57:2057] recipient: [6:52:2097] Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:60:2057] recipient: [6:52:2097] Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:77:2057] recipient: [6:14:2061] !Reboot 72057594037927937 (actor [6:59:2099]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:83:2057] recipient: [6:38:2085] Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:86:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [6:59:2099] sender: [6:87:2057] recipient: [6:85:2115] Leader for TabletID 72057594037927937 is [6:88:2116] sender: [6:89:2057] recipient: [6:85:2115] !Reboot 72057594037927937 (actor [6:59:2099]) rebooted! !Reboot 72057594037927937 (actor [6:59:2099]) tablet resolver refreshed! new actor is[6:88:2116] Leader for TabletID 72057594037927937 is [6:88:2116] sender: [6:174:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:57:2057] recipient: [7:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:57:2057] recipient: [7:53:2097] Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:60:2057] recipient: [7:53:2097] Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:77:2057] recipient: [7:14:2061] !Reboot 72057594037927937 (actor [7:59:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:84:2057] recipient: [7:38:2085] Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:87:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [7:59:2099] sender: [7:88:2057] recipient: [7:86:2115] Leader for TabletID 72057594037927937 is [7:89:2116] sender: [7:90:2057] recipient: [7:86:2115] !Reboot 72057594037927937 (actor [7:59:2099]) rebooted! !Reboot 72057594037927937 (actor [7:59:2099]) tablet resolver refreshed! new actor is[7:89:2116] Leader for TabletID 72057594037927937 is [7:89:2116] sender: [7:175:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:57:2057] recipient: [8:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:57:2057] recipient: [8:54:2097] Leader for TabletID 72057594037927937 is [8:59:2099] sender: [8:60:2057] recipient: [8:54:2097] Leader for TabletID 72057594037927937 is [8:59:2099] sender: [8:77:2057] recipient: [8:14:2061] !Reboot 72057594037927937 (actor [8:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [8:59:2099] sender: [8:86:2057] recipient: [8:38:2085] Leader for TabletID 72057594037927937 is [8:59:2099] sender: [8:89:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [8:59:2099] sender: [8:90:2057] recipient: [8:88:2117] Leader for TabletID 72057594037927937 is [8:91:2118] sender: [8:92:2057] recipient: [8:88:2117] !Reboot 72057594037927937 (actor [8:59:2099]) rebooted! !Reboot 72057594037927937 (actor [8:59:2099]) tablet resolver refreshed! new actor is[8:91:2118] Leader for TabletID 72057594037927937 is [8:91:2118] sender: [8:177:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:57:2057] recipient: [9:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:57:2057] recipient: [9:53:2097] Leader for TabletID 72057594037927937 is [9:59:2099] sender: [9:60:2057] recipient: [9:53:2097] Leader for TabletID 72057594037927937 is [9:59:2099] sender: [9:77:2057] recipient: [9:14:2061] !Reboot 72057594037927937 (actor [9:59:2099]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [9:59:2099] sender: [9:86:2057] recipient: [9:38:2085] Leader for TabletID 72057594037927937 is [9:59:2099] sender: [9:89:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [9:59:2099] sender: [9:90:2057] recipient: [9:88:2117] Leader for TabletID 72057594037927937 is [9:91:2118] sender: [9:92:2057] recipient: [9:88:2117] !Reboot 72057594037927937 (actor [9:59:2099]) rebooted! !Reboot 72057594037927937 (actor [9:59:2099]) tablet resolver refreshed! new actor is[9:91:2118] Leader for TabletID 72057594037927937 is [9:91:2118] sender: [9:177:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:57:2057] recipient: [10:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:57:2057] recipient: [10:54:2097] Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:60:2057] recipient: [10:54:2097] Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:77:2057] recipient: [10:14:2061] !Reboot 72057594037927937 (actor [10:59:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:87:2057] recipient: [10:38:2085] Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:90:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [10:59:2099] sender: [10:91:2057] recipient: [10:89:2117] Leader for TabletID 72057594037927937 is [10:92:2118] sender: [10:93:2057] recipient: [10:89:2117] !Reboot 72057594037927937 (actor [10:59:2099]) rebooted! !Reboot 72057594037927937 (actor [10:59:2099]) tablet resolver refreshed! new actor is[10:92:2118] Leader for TabletID 72057594037927937 is [10:92:2118] sender: [10:178:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:57:2057] recipient: [11:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:57:2057] recipient: [11:53:2097] Leader for TabletID 72057594037927937 is [11:59:2099] sender: [11:60:2057] recipient: [11:53:2097] Leader for TabletID 72057594037927937 is [11:59:2099] sender: [11:77:2057] recipient: [11:14:2061] !Reboot 72057594037927937 (actor [11:59:2099]) on event NKikimr::TEvKeyValue::TEvCollect ! Leader for TabletID 72057594037927937 is [11:59:2099] sender: [11:88:2057] recipient: [11:38:2085] Leader for TabletID 72057594037927937 is [11:59:2099] sender: [11:91:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [11:59:2099] sender: [11:92:2057] recipient: [11:90:2118] Leader for TabletID 72057594037927937 is [11:93:2119] sender: [11:94:2057] recipient: [11:90:2118] !Reboot 72057594037927937 (actor [11:59:2099]) rebooted! !Reboot 72057594037927937 (actor [11:59:2099]) tablet resolver refreshed! new actor is[11:93:2119] Leader for TabletID 72057594037927937 is [11:93:2119] sender: [11:113:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:57:2057] recipient: [12:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:57:2057] recipient: [12:54:2097] Leader for TabletID 72057594037927937 is [12:59:2099] sender: [12:60:2057] recipient: [12:54:2097] Leader for TabletID 72057594037927937 is [12:59:2099] sender: [12:77:2057] recipient: [12:14:2061] !Reboot 72057594037927937 (actor [12:59:2 ... recipient: [60:81:2112] !Reboot 72057594037927937 (actor [60:59:2099]) rebooted! !Reboot 72057594037927937 (actor [60:59:2099]) tablet resolver refreshed! new actor is[60:84:2113] Leader for TabletID 72057594037927937 is [60:84:2113] sender: [60:170:2057] recipient: [60:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [61:57:2057] recipient: [61:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [61:57:2057] recipient: [61:53:2097] Leader for TabletID 72057594037927937 is [61:59:2099] sender: [61:60:2057] recipient: [61:53:2097] Leader for TabletID 72057594037927937 is [61:59:2099] sender: [61:77:2057] recipient: [61:14:2061] !Reboot 72057594037927937 (actor [61:59:2099]) on event NKikimr::TEvKeyValue::TEvExecuteTransaction ! Leader for TabletID 72057594037927937 is [61:59:2099] sender: [61:79:2057] recipient: [61:38:2085] Leader for TabletID 72057594037927937 is [61:59:2099] sender: [61:82:2057] recipient: [61:14:2061] Leader for TabletID 72057594037927937 is [61:59:2099] sender: [61:83:2057] recipient: [61:81:2112] Leader for TabletID 72057594037927937 is [61:84:2113] sender: [61:85:2057] recipient: [61:81:2112] !Reboot 72057594037927937 (actor [61:59:2099]) rebooted! !Reboot 72057594037927937 (actor [61:59:2099]) tablet resolver refreshed! new actor is[61:84:2113] Leader for TabletID 72057594037927937 is [61:84:2113] sender: [61:170:2057] recipient: [61:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [62:57:2057] recipient: [62:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [62:57:2057] recipient: [62:53:2097] Leader for TabletID 72057594037927937 is [62:59:2099] sender: [62:60:2057] recipient: [62:53:2097] Leader for TabletID 72057594037927937 is [62:59:2099] sender: [62:77:2057] recipient: [62:14:2061] !Reboot 72057594037927937 (actor [62:59:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [62:59:2099] sender: [62:80:2057] recipient: [62:38:2085] Leader for TabletID 72057594037927937 is [62:59:2099] sender: [62:83:2057] recipient: [62:14:2061] Leader for TabletID 72057594037927937 is [62:59:2099] sender: [62:84:2057] recipient: [62:82:2112] Leader for TabletID 72057594037927937 is [62:85:2113] sender: [62:86:2057] recipient: [62:82:2112] !Reboot 72057594037927937 (actor [62:59:2099]) rebooted! !Reboot 72057594037927937 (actor [62:59:2099]) tablet resolver refreshed! new actor is[62:85:2113] Leader for TabletID 72057594037927937 is [62:85:2113] sender: [62:171:2057] recipient: [62:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [63:57:2057] recipient: [63:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [63:57:2057] recipient: [63:53:2097] Leader for TabletID 72057594037927937 is [63:59:2099] sender: [63:60:2057] recipient: [63:53:2097] Leader for TabletID 72057594037927937 is [63:59:2099] sender: [63:77:2057] recipient: [63:14:2061] !Reboot 72057594037927937 (actor [63:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [63:59:2099] sender: [63:83:2057] recipient: [63:38:2085] Leader for TabletID 72057594037927937 is [63:59:2099] sender: [63:86:2057] recipient: [63:14:2061] Leader for TabletID 72057594037927937 is [63:59:2099] sender: [63:87:2057] recipient: [63:85:2115] Leader for TabletID 72057594037927937 is [63:88:2116] sender: [63:89:2057] recipient: [63:85:2115] !Reboot 72057594037927937 (actor [63:59:2099]) rebooted! !Reboot 72057594037927937 (actor [63:59:2099]) tablet resolver refreshed! new actor is[63:88:2116] Leader for TabletID 72057594037927937 is [63:88:2116] sender: [63:174:2057] recipient: [63:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [64:57:2057] recipient: [64:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [64:57:2057] recipient: [64:53:2097] Leader for TabletID 72057594037927937 is [64:59:2099] sender: [64:60:2057] recipient: [64:53:2097] Leader for TabletID 72057594037927937 is [64:59:2099] sender: [64:77:2057] recipient: [64:14:2061] !Reboot 72057594037927937 (actor [64:59:2099]) on event NKikimr::TEvKeyValue::TEvExecuteTransaction ! Leader for TabletID 72057594037927937 is [64:59:2099] sender: [64:83:2057] recipient: [64:38:2085] Leader for TabletID 72057594037927937 is [64:59:2099] sender: [64:85:2057] recipient: [64:14:2061] Leader for TabletID 72057594037927937 is [64:59:2099] sender: [64:87:2057] recipient: [64:86:2115] Leader for TabletID 72057594037927937 is [64:88:2116] sender: [64:89:2057] recipient: [64:86:2115] !Reboot 72057594037927937 (actor [64:59:2099]) rebooted! !Reboot 72057594037927937 (actor [64:59:2099]) tablet resolver refreshed! new actor is[64:88:2116] Leader for TabletID 72057594037927937 is [64:88:2116] sender: [64:174:2057] recipient: [64:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [65:57:2057] recipient: [65:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [65:57:2057] recipient: [65:53:2097] Leader for TabletID 72057594037927937 is [65:59:2099] sender: [65:60:2057] recipient: [65:53:2097] Leader for TabletID 72057594037927937 is [65:59:2099] sender: [65:77:2057] recipient: [65:14:2061] !Reboot 72057594037927937 (actor [65:59:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [65:59:2099] sender: [65:84:2057] recipient: [65:38:2085] Leader for TabletID 72057594037927937 is [65:59:2099] sender: [65:87:2057] recipient: [65:14:2061] Leader for TabletID 72057594037927937 is [65:59:2099] sender: [65:88:2057] recipient: [65:86:2115] Leader for TabletID 72057594037927937 is [65:89:2116] sender: [65:90:2057] recipient: [65:86:2115] !Reboot 72057594037927937 (actor [65:59:2099]) rebooted! !Reboot 72057594037927937 (actor [65:59:2099]) tablet resolver refreshed! new actor is[65:89:2116] Leader for TabletID 72057594037927937 is [65:89:2116] sender: [65:175:2057] recipient: [65:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [66:57:2057] recipient: [66:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [66:57:2057] recipient: [66:54:2097] Leader for TabletID 72057594037927937 is [66:59:2099] sender: [66:60:2057] recipient: [66:54:2097] Leader for TabletID 72057594037927937 is [66:59:2099] sender: [66:77:2057] recipient: [66:14:2061] !Reboot 72057594037927937 (actor [66:59:2099]) on event NKikimr::TEvKeyValue::TEvCollect ! Leader for TabletID 72057594037927937 is [66:59:2099] sender: [66:85:2057] recipient: [66:38:2085] Leader for TabletID 72057594037927937 is [66:59:2099] sender: [66:88:2057] recipient: [66:14:2061] Leader for TabletID 72057594037927937 is [66:59:2099] sender: [66:89:2057] recipient: [66:87:2116] Leader for TabletID 72057594037927937 is [66:90:2117] sender: [66:91:2057] recipient: [66:87:2116] !Reboot 72057594037927937 (actor [66:59:2099]) rebooted! !Reboot 72057594037927937 (actor [66:59:2099]) tablet resolver refreshed! new actor is[66:90:2117] Leader for TabletID 72057594037927937 is [66:90:2117] sender: [66:110:2057] recipient: [66:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [67:57:2057] recipient: [67:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [67:57:2057] recipient: [67:53:2097] Leader for TabletID 72057594037927937 is [67:59:2099] sender: [67:60:2057] recipient: [67:53:2097] Leader for TabletID 72057594037927937 is [67:59:2099] sender: [67:77:2057] recipient: [67:14:2061] !Reboot 72057594037927937 (actor [67:59:2099]) on event NKikimr::TEvKeyValue::TEvCompleteGC ! Leader for TabletID 72057594037927937 is [67:59:2099] sender: [67:86:2057] recipient: [67:38:2085] Leader for TabletID 72057594037927937 is [67:59:2099] sender: [67:88:2057] recipient: [67:14:2061] Leader for TabletID 72057594037927937 is [67:59:2099] sender: [67:90:2057] recipient: [67:89:2117] Leader for TabletID 72057594037927937 is [67:91:2118] sender: [67:92:2057] recipient: [67:89:2117] !Reboot 72057594037927937 (actor [67:59:2099]) rebooted! !Reboot 72057594037927937 (actor [67:59:2099]) tablet resolver refreshed! new actor is[67:91:2118] Leader for TabletID 72057594037927937 is [67:91:2118] sender: [67:111:2057] recipient: [67:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [68:57:2057] recipient: [68:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [68:57:2057] recipient: [68:53:2097] Leader for TabletID 72057594037927937 is [68:59:2099] sender: [68:60:2057] recipient: [68:53:2097] Leader for TabletID 72057594037927937 is [68:59:2099] sender: [68:77:2057] recipient: [68:14:2061] !Reboot 72057594037927937 (actor [68:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [68:59:2099] sender: [68:89:2057] recipient: [68:38:2085] Leader for TabletID 72057594037927937 is [68:59:2099] sender: [68:92:2057] recipient: [68:14:2061] Leader for TabletID 72057594037927937 is [68:59:2099] sender: [68:93:2057] recipient: [68:91:2120] Leader for TabletID 72057594037927937 is [68:94:2121] sender: [68:95:2057] recipient: [68:91:2120] !Reboot 72057594037927937 (actor [68:59:2099]) rebooted! !Reboot 72057594037927937 (actor [68:59:2099]) tablet resolver refreshed! new actor is[68:94:2121] Leader for TabletID 72057594037927937 is [68:94:2121] sender: [68:180:2057] recipient: [68:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [69:57:2057] recipient: [69:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [69:57:2057] recipient: [69:53:2097] Leader for TabletID 72057594037927937 is [69:59:2099] sender: [69:60:2057] recipient: [69:53:2097] Leader for TabletID 72057594037927937 is [69:59:2099] sender: [69:77:2057] recipient: [69:14:2061] !Reboot 72057594037927937 (actor [69:59:2099]) on event NKikimr::TEvKeyValue::TEvReadRange ! Leader for TabletID 72057594037927937 is [69:59:2099] sender: [69:89:2057] recipient: [69:38:2085] Leader for TabletID 72057594037927937 is [69:59:2099] sender: [69:92:2057] recipient: [69:14:2061] Leader for TabletID 72057594037927937 is [69:59:2099] sender: [69:93:2057] recipient: [69:91:2120] Leader for TabletID 72057594037927937 is [69:94:2121] sender: [69:95:2057] recipient: [69:91:2120] !Reboot 72057594037927937 (actor [69:59:2099]) rebooted! !Reboot 72057594037927937 (actor [69:59:2099]) tablet resolver refreshed! new actor is[69:94:2121] Leader for TabletID 72057594037927937 is [69:94:2121] sender: [69:180:2057] recipient: [69:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [70:57:2057] recipient: [70:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [70:57:2057] recipient: [70:54:2097] Leader for TabletID 72057594037927937 is [70:59:2099] sender: [70:60:2057] recipient: [70:54:2097] Leader for TabletID 72057594037927937 is [70:59:2099] sender: [70:77:2057] recipient: [70:14:2061] !Reboot 72057594037927937 (actor [70:59:2099]) on event NKikimr::TEvKeyValue::TEvNotify ! Leader for TabletID 72057594037927937 is [70:59:2099] sender: [70:90:2057] recipient: [70:38:2085] Leader for TabletID 72057594037927937 is [70:59:2099] sender: [70:93:2057] recipient: [70:14:2061] Leader for TabletID 72057594037927937 is [70:59:2099] sender: [70:94:2057] recipient: [70:92:2120] Leader for TabletID 72057594037927937 is [70:95:2121] sender: [70:96:2057] recipient: [70:92:2120] !Reboot 72057594037927937 (actor [70:59:2099]) rebooted! !Reboot 72057594037927937 (actor [70:59:2099]) tablet resolver refreshed! new actor is[70:95:2121] Leader for TabletID 72057594037927937 is [0:0:0] sender: [71:57:2057] recipient: [71:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [71:57:2057] recipient: [71:52:2097] Leader for TabletID 72057594037927937 is [71:59:2099] sender: [71:60:2057] recipient: [71:52:2097] Leader for TabletID 72057594037927937 is [71:59:2099] sender: [71:77:2057] recipient: [71:14:2061] >> TxUsage::WriteToTopic_Demo_42_Table >> KqpQueryPerf::AggregateToScalar+QueryService [GOOD] >> KqpQueryPerf::AggregateToScalar-QueryService >> SystemView::DescribeAccessDenied [GOOD] >> SystemView::CollectScriptingQueries >> KqpQueryPerf::Upsert+QueryService-UseSink >> KqpQueryPerf::IndexUpsert+QueryService+UseSink [GOOD] >> KqpQueryPerf::IndexDeleteOn+QueryService-UseSink >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_2_Table [GOOD] >> KqpQueryPerf::IndexReplace+QueryService-UseSink >> KqpQueryPerf::Update+QueryService-UseSink [GOOD] |83.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/olap/ttl_tiering/py3test >> unstable_connection.py::TestUnstableConnection::test [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/olap/ttl_tiering/py3test >> ttl_unavailable_s3.py::TestUnavailableS3::test [GOOD] Test command err: !!! simulating S3 hang up -- sending SIGSTOP !!! simulating S3 recovery -- sending SIGCONT ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::Delete+QueryService+UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 17233, MsgBus: 30200 2025-06-25T14:41:57.783434Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897253675903702:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:41:57.783479Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000b14/r3tmp/tmpCQnzNF/pdisk_1.dat 2025-06-25T14:41:58.246852Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 17233, node 1 2025-06-25T14:41:58.266271Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:41:58.266352Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:41:58.268043Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:41:58.314981Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:41:58.315009Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:41:58.315024Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:41:58.315139Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:30200 TClient is connected to server localhost:30200 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-25T14:41:58.800991Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:41:58.893872Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:41:58.956371Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:59.131437Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:59.342710Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:59.437289Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:01.086164Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897270855774489:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:01.086280Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:01.340977Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:01.376230Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:01.444807Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:01.476909Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:01.508467Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:01.545575Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:01.617934Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:01.675416Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897270855775153:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:01.675477Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:01.675481Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897270855775158:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:01.678576Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:42:01.687448Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519897270855775160:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:42:01.777715Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519897270855775211:3420] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:42:02.784266Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519897253675903702:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:02.796243Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 29173, MsgBus: 9223 2025-06-25T14:42:03.989772Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519897277608955406:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:03.989843Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000b14/r3tmp/tmpPDJXLI/pdisk_1.dat 2025-06-25T14:42:04.135807Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 29173, node 2 2025-06-25T14:42:04.175605Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:42:04.175683Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:42:04.200175Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:42:04.271798Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:42:04.271821Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:42:04.271828Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:42:04.271925Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9223 TClient is connected to server localhost:9223 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-25T14:42:04.708682Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:42:04.726418Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:42:04.800874Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:04.958867Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:05.006633Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:42:05.062948Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:07.144442Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519897294788826164:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:07.144513Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:07.198156Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:07.233251Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:07.262116Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:07.288614Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:07.369998Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:07.437643Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:07.510254Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:07.569925Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519897294788826825:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:07.570039Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:07.570292Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519897294788826830:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:07.574246Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:42:07.591449Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519897294788826832:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:42:07.666676Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519897294788826883:3417] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:42:08.990273Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519897277608955406:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:08.990364Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> Viewer::JsonStorageListingV2PDiskIdFilter [GOOD] >> KqpQueryPerf::MultiDeleteFromTable-QueryService-UseSink >> KqpQueryPerf::MultiRead-QueryService >> KqpQueryPerf::Replace-QueryService-UseSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::IndexUpsert+QueryService+UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 7229, MsgBus: 5138 2025-06-25T14:41:52.853710Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897232929530985:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:41:52.853750Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000b1d/r3tmp/tmpcmt3AI/pdisk_1.dat 2025-06-25T14:41:53.526678Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:41:53.526828Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:41:53.534083Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:41:53.560425Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519897232929530964:2080] 1750862512831990 != 1750862512831993 2025-06-25T14:41:53.563955Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 7229, node 1 2025-06-25T14:41:53.649781Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:41:53.649817Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:41:53.649828Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:41:53.649950Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:41:53.885769Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:5138 TClient is connected to server localhost:5138 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:41:54.381865Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:41:54.401867Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:41:54.411507Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:54.558921Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:54.713716Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:54.808899Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:56.837176Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897250109401772:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:56.837266Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:57.116616Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:57.173001Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:57.208781Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:57.252119Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:57.284371Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:57.339996Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:57.417948Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:57.492147Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897254404369728:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:57.492246Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:57.492609Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897254404369733:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:57.496955Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:41:57.508581Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519897254404369735:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:41:57.582085Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519897254404369786:3423] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:41:57.854428Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519897232929530985:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:41:57.854478Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:41:58.849718Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work ... own -> Disconnected 2025-06-25T14:42:02.674745Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:42:02.677256Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:42:02.684444Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519897273290561901:2080] 1750862522421466 != 1750862522421469 2025-06-25T14:42:02.687998Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 27980, node 2 2025-06-25T14:42:02.798678Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:42:02.798696Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:42:02.798704Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:42:02.798827Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28998 TClient is connected to server localhost:28998 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:42:03.326339Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:42:03.346136Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:03.431650Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:03.448643Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:42:03.650609Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:03.752558Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:05.787046Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519897286175465439:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:05.787105Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:05.830517Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:05.866674Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:05.896985Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:05.923479Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:05.958935Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:05.996202Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:06.068269Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:06.173925Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519897290470433399:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:06.174034Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:06.174292Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519897290470433404:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:06.177456Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:42:06.195770Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519897290470433406:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:42:06.292494Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519897290470433457:3425] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:42:07.337404Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:07.399775Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:07.438861Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519897273290561949:2077];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:07.438967Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:42:07.472779Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::Update+QueryService-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 15341, MsgBus: 29898 2025-06-25T14:42:04.490284Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897283041335334:2135];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000ae8/r3tmp/tmpnN7GAa/pdisk_1.dat 2025-06-25T14:42:04.742775Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:42:04.867654Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:42:04.867776Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:42:04.882994Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:42:04.886281Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:42:04.886660Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519897283041335237:2080] 1750862524461393 != 1750862524461396 TServer::EnableGrpc on GrpcPort 15341, node 1 2025-06-25T14:42:05.061832Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:42:05.061857Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:42:05.061872Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:42:05.062000Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:29898 2025-06-25T14:42:05.490329Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:29898 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:42:05.631816Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:42:05.645728Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:42:05.650785Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:05.851469Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:05.998491Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:42:06.079441Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:07.760743Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897295926238746:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:07.760881Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:08.114393Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:08.186014Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:08.257509Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:08.287310Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:08.319018Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:08.391069Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:08.467116Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:08.571843Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897300221206715:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:08.571909Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:08.572663Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897300221206720:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:08.576006Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:42:08.587675Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519897300221206722:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:42:08.655578Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519897300221206773:3423] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:42:09.470312Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519897283041335334:2135];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:09.470364Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> KqpQueryPerf::MultiRead+QueryService [GOOD] |83.9%| [TA] $(B)/ydb/core/keyvalue/ut/test-results/unittest/{meta.json ... results_accumulator.log} |83.9%| [TA] {RESULT} $(B)/ydb/core/keyvalue/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_2_Query >> KqpQueryPerf::ComputeLength+QueryService >> KqpQueryPerf::RangeLimitRead+QueryService ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::MultiRead+QueryService [GOOD] Test command err: Trying to start YDB, gRPC: 61026, MsgBus: 31429 2025-06-25T14:42:06.729689Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897289230375327:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:06.729912Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000ae1/r3tmp/tmpN6dWL0/pdisk_1.dat 2025-06-25T14:42:07.029199Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519897289230375308:2080] 1750862526728605 != 1750862526728608 2025-06-25T14:42:07.057247Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 61026, node 1 2025-06-25T14:42:07.139051Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:42:07.139251Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:42:07.141250Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:42:07.152910Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:42:07.152940Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:42:07.152948Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:42:07.153066Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:31429 TClient is connected to server localhost:31429 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:42:07.652472Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:42:07.671867Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:07.739856Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:42:07.793349Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:07.933014Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:08.000572Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:09.728921Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897302115278836:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:09.729082Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:10.045191Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:10.074507Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:10.111305Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:10.142844Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:10.210969Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:10.282030Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:10.314283Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:10.417028Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897306410246801:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:10.417133Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:10.417460Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897306410246806:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:10.421135Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:42:10.432581Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519897306410246808:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:42:10.508176Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519897306410246859:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:42:11.729955Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519897289230375327:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:11.730241Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> KqpQueryPerf::MultiDeleteFromTable+QueryService+UseSink [GOOD] >> KqpQueryPerf::RangeRead+QueryService [GOOD] >> KqpQueryPerf::Insert-QueryService-UseSink [GOOD] >> KqpQueryPerf::Insert-QueryService+UseSink >> KqpQueryPerf::DeleteOn-QueryService-UseSink [GOOD] >> KqpQueryPerf::DeleteOn-QueryService+UseSink >> KqpQueryPerf::IndexUpsert-QueryService+UseSink [GOOD] |83.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_move_reboots/ydb-core-tx-schemeshard-ut_move_reboots |83.9%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_move_reboots/ydb-core-tx-schemeshard-ut_move_reboots |83.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_move_reboots/ydb-core-tx-schemeshard-ut_move_reboots |83.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/external_sources/hive_metastore/ut/ydb-core-external_sources-hive_metastore-ut |83.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/external_sources/hive_metastore/ut/ydb-core-external_sources-hive_metastore-ut |83.9%| [LD] {RESULT} $(B)/ydb/core/external_sources/hive_metastore/ut/ydb-core-external_sources-hive_metastore-ut ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::MultiDeleteFromTable+QueryService+UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 9150, MsgBus: 20438 2025-06-25T14:42:01.081423Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897268186504771:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:01.082285Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000afc/r3tmp/tmplekCnU/pdisk_1.dat 2025-06-25T14:42:01.415319Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 9150, node 1 2025-06-25T14:42:01.484015Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:42:01.486176Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:42:01.494764Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:42:01.556642Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:42:01.556664Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:42:01.556671Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:42:01.556803Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:20438 TClient is connected to server localhost:20438 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-25T14:42:02.089444Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:42:02.187130Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:42:02.200958Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:42:02.213034Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:42:02.391074Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:02.599728Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:02.670008Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:04.239390Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897281071408251:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:04.239659Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:04.581069Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:04.607266Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:04.631310Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:04.657841Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:04.690025Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:04.763208Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:04.799869Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:04.883364Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897281071408913:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:04.883437Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:04.883643Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897281071408918:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:04.887588Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:42:04.896712Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519897281071408920:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:42:04.996116Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519897281071408971:3420] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:42:06.082876Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519897268186504771:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:06.082947Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 13122, MsgBus: 13716 2025-06-25T14:42:07.729523Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519897295128577457:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:07.729567Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000afc/r3tmp/tmpTtOjjK/pdisk_1.dat 2025-06-25T14:42:07.848263Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 13122, node 2 2025-06-25T14:42:07.892903Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:42:07.893006Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:42:07.894972Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:42:07.941474Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:42:07.941491Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:42:07.941498Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:42:07.941602Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13716 TClient is connected to server localhost:13716 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:42:08.429196Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:42:08.434265Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:42:08.443838Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:42:08.528954Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:08.718426Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:08.766899Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... 2025-06-25T14:42:08.799774Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:10.852397Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519897308013480942:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:10.852489Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:10.895834Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:10.939394Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:10.981908Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:11.028611Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:11.106097Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:11.161383Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:11.210905Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:11.296209Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519897312308448898:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:11.296283Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:11.296355Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519897312308448903:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:11.300183Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:42:11.316951Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519897312308448905:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:42:11.404448Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519897312308448956:3415] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:42:12.753810Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519897295128577457:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:12.754176Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> KqpQueryPerf::IndexUpdateOn-QueryService+UseSink [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::RangeRead+QueryService [GOOD] Test command err: Trying to start YDB, gRPC: 10499, MsgBus: 21084 2025-06-25T14:42:07.900786Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897296960399715:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:07.900874Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000adf/r3tmp/tmpf8LCRx/pdisk_1.dat 2025-06-25T14:42:08.340850Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:42:08.340961Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:42:08.344768Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:42:08.371610Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 10499, node 1 2025-06-25T14:42:08.543372Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:42:08.543414Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:42:08.543426Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:42:08.543575Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:21084 2025-06-25T14:42:08.945363Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:21084 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:42:09.235811Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:42:09.249801Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:42:09.264806Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:09.402645Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:09.576816Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:09.650537Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:11.192744Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897314140270486:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:11.192866Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:11.474291Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:11.503538Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:11.582943Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:11.623144Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:11.716397Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:11.790873Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:11.847182Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:11.938258Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897314140271152:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:11.938342Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:11.938583Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897314140271157:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:11.942429Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:42:11.959213Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519897314140271159:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:42:12.024770Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519897318435238508:3418] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:42:12.901860Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519897296960399715:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:12.901934Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::IndexUpsert-QueryService+UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 16059, MsgBus: 7367 2025-06-25T14:41:57.777690Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897254432788884:2155];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:41:57.795902Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000b0d/r3tmp/tmpTscQRD/pdisk_1.dat 2025-06-25T14:41:58.251517Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:41:58.251645Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:41:58.255276Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:41:58.271952Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:41:58.272433Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519897254432788767:2080] 1750862517744978 != 1750862517744981 TServer::EnableGrpc on GrpcPort 16059, node 1 2025-06-25T14:41:58.322940Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:41:58.322961Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:41:58.322971Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:41:58.323094Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:7367 2025-06-25T14:41:58.776424Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:7367 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:41:59.025037Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:41:59.083849Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:59.250011Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:59.437462Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:59.503056Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:01.166581Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897271612659602:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:01.166685Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:01.514408Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:01.548000Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:01.582412Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:01.620691Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:01.698355Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:01.733156Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:01.769839Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:01.872575Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897271612660266:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:01.872668Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:01.873075Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897271612660271:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:01.877134Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:42:01.889810Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519897271612660273:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:42:01.960917Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519897271612660324:3417] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:42:02.764051Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519897254432788884:2155];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:02.764130Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:42:03.214856Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:03.253760Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part ... 2025-06-25T14:42:06.551490Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:42:06.554509Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:42:06.575856Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:42:06.575878Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:42:06.575887Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:42:06.576016Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9905 TClient is connected to server localhost:9905 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:42:07.128132Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:42:07.134863Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:42:07.140679Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:07.223230Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:07.374396Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:07.428790Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:42:07.463755Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:09.684450Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519897305132877605:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:09.684554Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:09.734105Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:09.774750Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:09.809102Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:09.841419Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:09.877533Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:09.922814Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:09.998213Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:10.088183Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519897309427845572:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:10.088268Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:10.088554Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519897309427845577:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:10.094884Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:42:10.108403Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-25T14:42:10.109000Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519897309427845579:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:42:10.203901Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519897309427845630:3424] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:42:11.247819Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:11.290031Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:11.397771Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:11.399576Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519897292247974197:2168];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:11.399625Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> SystemView::TabletsRangesPredicateExtractDisabled [GOOD] >> KqpQueryPerf::RangeLimitRead-QueryService [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::IndexUpdateOn-QueryService+UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 11993, MsgBus: 1693 2025-06-25T14:41:57.722230Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897252942778127:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:41:57.722418Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000b12/r3tmp/tmp1KQH7P/pdisk_1.dat 2025-06-25T14:41:58.205837Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:41:58.208586Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519897252942778108:2080] 1750862517720382 != 1750862517720385 2025-06-25T14:41:58.215219Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected TServer::EnableGrpc on GrpcPort 11993, node 1 2025-06-25T14:41:58.224402Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:41:58.226428Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:41:58.340874Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:41:58.340899Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:41:58.340906Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:41:58.341020Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1693 2025-06-25T14:41:58.740173Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:1693 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:41:59.080860Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:41:59.116510Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:59.311385Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:41:59.482443Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:59.564811Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:01.311771Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897270122648944:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:01.311866Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:01.584344Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:01.656424Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:01.682262Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:01.709343Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:01.737064Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:01.775925Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:01.819788Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:01.891377Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897270122649603:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:01.891475Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:01.892773Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897270122649608:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:01.896425Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:42:01.916213Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519897270122649610:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:42:01.990960Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519897270122649661:3418] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:42:02.723567Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519897252942778127:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:02.723627Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:42:03.190350Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:03.296172Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part ... rver::EnableGrpc on GrpcPort 9439, node 2 2025-06-25T14:42:06.750395Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:42:06.750481Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:42:06.751999Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:42:06.777906Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:42:06.777926Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:42:06.777934Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:42:06.778041Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27545 TClient is connected to server localhost:27545 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:42:07.290379Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:42:07.297605Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:42:07.303056Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:07.392518Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:07.567658Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:42:07.583588Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:07.659946Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:09.819445Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519897304832838995:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:09.819529Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:09.867594Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:09.898477Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:09.975788Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:10.007158Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:10.042092Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:10.082071Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:10.154675Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:10.251971Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519897309127806958:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:10.252084Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:10.252304Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519897309127806963:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:10.255461Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:42:10.281549Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519897309127806965:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:42:10.365333Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519897309127807016:3418] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:42:11.443964Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:11.521450Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:11.565082Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:11.568484Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519897291947935618:2167];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:11.568919Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> KqpQueryPerf::AggregateToScalar-QueryService [GOOD] |83.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_external_table_reboots/ydb-core-tx-schemeshard-ut_external_table_reboots |83.9%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_external_table_reboots/ydb-core-tx-schemeshard-ut_external_table_reboots |83.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_external_table_reboots/ydb-core-tx-schemeshard-ut_external_table_reboots >> SystemView::AuthOwners_TableRange [GOOD] >> SystemView::AuthPermissions >> KqpQueryPerf::Upsert+QueryService-UseSink [GOOD] >> TxUsage::WriteToTopic_Demo_18_RestartNo_Query [GOOD] >> KqpQueryPerf::MultiRead-QueryService [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::RangeLimitRead-QueryService [GOOD] Test command err: Trying to start YDB, gRPC: 15109, MsgBus: 32019 2025-06-25T14:42:09.939522Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897304231122824:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:09.939673Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000ac7/r3tmp/tmpPZzQ7t/pdisk_1.dat 2025-06-25T14:42:10.327425Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:42:10.337770Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519897304231122802:2080] 1750862529938081 != 1750862529938084 TServer::EnableGrpc on GrpcPort 15109, node 1 2025-06-25T14:42:10.342409Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:42:10.342504Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:42:10.344700Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:42:10.399185Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:42:10.399206Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:42:10.399214Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:42:10.399363Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:32019 TClient is connected to server localhost:32019 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-25T14:42:10.959301Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:42:11.080718Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:42:11.098481Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:42:11.106147Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:11.280938Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:11.470960Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:11.559581Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:13.169452Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897321410993619:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:13.169540Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:13.502393Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:13.573548Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:13.632438Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:13.671246Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:13.701847Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:13.781963Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:13.829032Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:13.916358Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897321410994280:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:13.916451Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:13.916524Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897321410994285:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:13.919985Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:42:13.928898Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519897321410994287:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:42:14.001555Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519897321410994340:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:42:14.940518Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519897304231122824:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:14.940567Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> KqpQueryPerf::MultiDeleteFromTable-QueryService-UseSink [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/ut/unittest >> SystemView::TabletsRangesPredicateExtractDisabled [GOOD] Test command err: 2025-06-25T14:40:38.764388Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519896912341911501:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:40:38.764744Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0012cf/r3tmp/tmpMveI0e/pdisk_1.dat 2025-06-25T14:40:39.040695Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:40:39.041457Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519896912341911481:2080] 1750862438763739 != 1750862438763742 2025-06-25T14:40:39.051454Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:40:39.051615Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:40:39.053522Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 15899, node 1 2025-06-25T14:40:39.115024Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:40:39.115048Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:40:39.115056Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:40:39.115181Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28879 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:40:39.325556Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:40:39.774078Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:40:40.691076Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896920931846701:2287], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:40:40.691082Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896920931846711:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:40:40.691157Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:40:40.693901Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:40:40.700511Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519896920931846715:2291], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:40:40.779698Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519896920931846768:2330] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:40:41.194762Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jykrh8z434x4xd9vkhs0tyhs, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YzRhYTYzNS1kNWFmZDg5NC1kMDEyM2NhMS00YmQ2ZjYzMw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:40:41.255962Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:47: Scan started, actor: [1:7519896925226814107:2300], owner: [1:7519896925226814103:2298], scan id: 0, sys view info: Type: EStoragePools SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-25T14:40:41.257396Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:323: Scan prepared, actor: [1:7519896925226814107:2300], schemeshard id: 72057594046644480, hive id: 72057594037968897, database: /Root, database owner: root@builtin, domain key: [OwnerId: 72057594046644480, LocalPathId: 1], database node count: 1 2025-06-25T14:40:41.267664Z node 1 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [1:7519896925226814107:2300], row count: 0, finished: 1 2025-06-25T14:40:41.267783Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:122: Scan finished, actor: [1:7519896925226814107:2300], owner: [1:7519896925226814103:2298], scan id: 0, sys view info: Type: EStoragePools SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-25T14:40:41.290963Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750862441171, txId: 281474976715660] shutting down 2025-06-25T14:40:42.364159Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715663. Ctx: { TraceId: 01jykrhbt2ezv6yy8b6vydawa1, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjhiYzQzZDMtYjlhZDFkYzAtYTI4OGVkNDctMzdmZTJkYTU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:40:42.365390Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:47: Scan started, actor: [1:7519896929521781447:2314], owner: [1:7519896929521781443:2312], scan id: 0, sys view info: Type: EStoragePools SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-25T14:40:42.365932Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:323: Scan prepared, actor: [1:7519896929521781447:2314], schemeshard id: 72057594046644480, hive id: 72057594037968897, database: /Root, database owner: root@builtin, domain key: [OwnerId: 72057594046644480, LocalPathId: 1], database node count: 1 2025-06-25T14:40:42.366076Z node 1 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [1:7519896929521781447:2314], row count: 0, finished: 1 2025-06-25T14:40:42.366130Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:122: Scan finished, actor: [1:7519896929521781447:2314], owner: [1:7519896929521781443:2312], scan id: 0, sys view info: Type: EStoragePools SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-25T14:40:42.367178Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750862442363, txId: 281474976715662] shutting down 2025-06-25T14:40:43.420860Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715665. Ctx: { TraceId: 01jykrhcvcfsw9ye50cj4fxea4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZWFiYjZlNGMtZTA2YjU5MWUtM2ZmZGNkYTctZGQ3MDA5YTE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:40:43.421897Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:47: Scan started, actor: [1:7519896933816748780:2325], owner: [1:7519896933816748776:2323], scan id: 0, sys view info: Type: EStoragePools SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-25T14:40:43.422446Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:323: Scan prepared, actor: [1:7519896933816748780:2325], schemeshard id: 72057594046644480, hive id: 72057594037968897, database: /Root, database owner: root@builtin, domain key: [OwnerId: 72057594046644480, LocalPathId: 1], database node count: 1 2025-06-25T14:40:43.422619Z node 1 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [1:7519896933816748780:2325], row count: 0, finished: 1 2025-06-25T14:40:43.422703Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:122: Scan finished, actor: [1:7519896933816748780:2325], owner: [1:7519896933816748776:2323], scan id: 0, sys view info: Type: EStoragePools SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-25T14:40:43.423698Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750862443419, txId: 281474976715664] shutting down 2025-06-25T14:40:43.764608Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519896912341911501:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:40:43.764687Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:40:44.495679Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715667. Ctx: { TraceId: 01jykrhdwd10xeba8763f3e3ka, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NTE4ZDIyNDUtNWY2MzQ4OGYtZWE0NTY1MTktZTMyYjYzMzA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:40:44.497656Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:47: Scan started, actor: [1:7519896938111716117:2338], owner: [1:7519896938111716114:2336], scan id: 0, sys view info: Type: EStoragePools SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-25T14:40:44.498086Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:323: Scan prepared, actor: [1:7519896938111716117:2338], schemeshard id: 72057594046644480, hive id: 72057594037968897, database: /Root, database owner: root@builtin, domain key: [OwnerId: 72057594046644480, LocalPathId: 1], database node count: 1 2025-06-25T14:40:44.506253Z node 1 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [1:75 ... Id: 1 } 2025-06-25T14:42:06.667388Z node 20 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:323: Scan prepared, actor: [20:7519897288967083940:2408], schemeshard id: 72057594046644480, hive id: 72057594037968897, database: /Root, database owner: root@builtin, domain key: [OwnerId: 72057594046644480, LocalPathId: 1], database node count: 1 2025-06-25T14:42:06.667793Z node 20 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [20:7519897288967083940:2408], row count: 4, finished: 1 2025-06-25T14:42:06.667872Z node 20 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:122: Scan finished, actor: [20:7519897288967083940:2408], owner: [20:7519897288967083937:2406], scan id: 0, sys view info: Type: ETablets SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-25T14:42:06.670695Z node 20 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750862526662, txId: 281474976710677] shutting down 2025-06-25T14:42:06.900110Z node 20 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710680. Ctx: { TraceId: 01jykrky6k7q7qv4qf1j9rtvxf, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=20&id=ZTA1NzEzODItOTc2MmQwYTktZjQ3OWU0NTQtODQxYzQ5MmI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:42:06.964721Z node 20 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:47: Scan started, actor: [20:7519897288967083973:2417], owner: [20:7519897288967083970:2415], scan id: 0, sys view info: Type: ETablets SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-25T14:42:06.969943Z node 20 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:323: Scan prepared, actor: [20:7519897288967083973:2417], schemeshard id: 72057594046644480, hive id: 72057594037968897, database: /Root, database owner: root@builtin, domain key: [OwnerId: 72057594046644480, LocalPathId: 1], database node count: 1 2025-06-25T14:42:06.970415Z node 20 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [20:7519897288967083973:2417], row count: 4, finished: 1 2025-06-25T14:42:06.970533Z node 20 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:122: Scan finished, actor: [20:7519897288967083973:2417], owner: [20:7519897288967083970:2415], scan id: 0, sys view info: Type: ETablets SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-25T14:42:06.975457Z node 20 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750862526899, txId: 281474976710679] shutting down 2025-06-25T14:42:08.293976Z node 21 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[21:7519897300524182306:2057];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:08.299435Z node 21 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0012cf/r3tmp/tmpPjGOsC/pdisk_1.dat 2025-06-25T14:42:08.551070Z node 21 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [21:7519897300524182290:2080] 1750862528293458 != 1750862528293461 2025-06-25T14:42:08.571428Z node 21 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:42:08.579257Z node 21 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(21, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:42:08.579374Z node 21 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(21, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:42:08.585556Z node 21 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(21, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 26016, node 21 2025-06-25T14:42:08.693046Z node 21 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:42:08.693069Z node 21 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:42:08.693081Z node 21 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:42:08.693211Z node 21 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26304 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:42:09.212686Z node 21 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:42:09.221473Z node 21 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:42:09.315394Z node 21 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:42:13.294020Z node 21 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[21:7519897300524182306:2057];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:13.294128Z node 21 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:42:14.016305Z node 21 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:14.149568Z node 21 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [21:7519897326293986970:2313], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:14.149710Z node 21 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:14.149939Z node 21 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [21:7519897326293986982:2316], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:14.156382Z node 21 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:42:14.171578Z node 21 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [21:7519897326293986984:2317], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-25T14:42:14.256079Z node 21 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [21:7519897326293987038:2496] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:42:14.560706Z node 21 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710662. Ctx: { TraceId: 01jykrm5fz55ajdh4jjafencjw, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=21&id=ZWI4MzkwZTMtYzFmM2M5NDgtMTk1MGM1N2QtZjdjNDgxNjQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:42:14.562975Z node 21 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:47: Scan started, actor: [21:7519897326293987085:2331], owner: [21:7519897326293987084:2330], scan id: 0, sys view info: Type: ETablets SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-25T14:42:14.563503Z node 21 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:323: Scan prepared, actor: [21:7519897326293987085:2331], schemeshard id: 72057594046644480, hive id: 72057594037968897, database: /Root, database owner: root@builtin, domain key: [OwnerId: 72057594046644480, LocalPathId: 1], database node count: 1 2025-06-25T14:42:14.563962Z node 21 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [21:7519897326293987085:2331], row count: 4, finished: 1 2025-06-25T14:42:14.564029Z node 21 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:122: Scan finished, actor: [21:7519897326293987085:2331], owner: [21:7519897326293987084:2330], scan id: 0, sys view info: Type: ETablets SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-25T14:42:14.564197Z node 21 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:47: Scan started, actor: [21:7519897326293987091:2334], owner: [21:7519897326293987084:2330], scan id: 0, sys view info: Type: ETablets SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-25T14:42:14.565407Z node 21 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:323: Scan prepared, actor: [21:7519897326293987091:2334], schemeshard id: 72057594046644480, hive id: 72057594037968897, database: /Root, database owner: root@builtin, domain key: [OwnerId: 72057594046644480, LocalPathId: 1], database node count: 1 2025-06-25T14:42:14.565690Z node 21 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [21:7519897326293987091:2334], row count: 4, finished: 1 2025-06-25T14:42:14.565744Z node 21 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:122: Scan finished, actor: [21:7519897326293987091:2334], owner: [21:7519897326293987084:2330], scan id: 0, sys view info: Type: ETablets SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-25T14:42:14.567540Z node 21 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750862534558, txId: 281474976710661] shutting down ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::AggregateToScalar-QueryService [GOOD] Test command err: Trying to start YDB, gRPC: 4436, MsgBus: 18639 2025-06-25T14:42:04.282036Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897283075158654:2231];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:04.287829Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000ae9/r3tmp/tmp5VjmJV/pdisk_1.dat 2025-06-25T14:42:04.648944Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:42:04.672564Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:42:04.672664Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 4436, node 1 2025-06-25T14:42:04.674438Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:42:04.709347Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:42:04.709388Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:42:04.709396Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:42:04.709533Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:18639 TClient is connected to server localhost:18639 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:42:05.228240Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:42:05.251490Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:05.283398Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:42:05.373457Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:05.529077Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:05.606167Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:07.243376Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897295960061983:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:07.243481Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:07.656691Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:07.726925Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:07.758387Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:07.792105Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:07.828268Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:07.903015Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:07.953009Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:08.020146Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897300255029942:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:08.020253Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:08.020445Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897300255029947:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:08.024865Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:42:08.037065Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519897300255029949:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:42:08.102192Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519897300255030000:3422] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:42:09.320583Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519897283075158654:2231];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:09.320640Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 5651, MsgBus: 21106 2025-06-25T14:42:10.451251Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519897307731636000:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:10.451342Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000ae9/r3tmp/tmpuAS7gu/pdisk_1.dat 2025-06-25T14:42:10.568584Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:42:10.572499Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519897307731635981:2080] 1750862530447616 != 1750862530447619 2025-06-25T14:42:10.609675Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:42:10.609757Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:42:10.614544Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 5651, node 2 2025-06-25T14:42:10.760586Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:42:10.760608Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:42:10.760627Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:42:10.760735Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:21106 TClient is connected to server localhost:21106 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:42:11.325956Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:42:11.343129Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:42:11.351851Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:11.428657Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:11.489724Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:42:11.574501Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:11.662674Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:13.700549Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519897320616539487:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:13.700641Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:13.763758Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:13.834781Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:13.868345Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:13.899828Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:13.926216Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:13.961024Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:14.005081Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:14.096233Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519897324911507445:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:14.096343Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:14.096565Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519897324911507450:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:14.100634Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:42:14.115412Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519897324911507452:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:42:14.195718Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519897324911507503:3413] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:42:15.448696Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519897307731636000:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:15.448769Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> TxUsage::WriteToTopic_Demo_18_RestartBeforeCommit_Table >> KqpQueryPerf::Replace-QueryService-UseSink [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::Upsert+QueryService-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 22730, MsgBus: 26893 2025-06-25T14:42:10.677170Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897306473303407:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:10.680286Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000abe/r3tmp/tmpi2pca3/pdisk_1.dat 2025-06-25T14:42:11.097958Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:42:11.098864Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519897306473303386:2080] 1750862530674066 != 1750862530674069 2025-06-25T14:42:11.117831Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:42:11.117924Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:42:11.125322Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 22730, node 1 2025-06-25T14:42:11.308042Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:42:11.308067Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:42:11.308077Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:42:11.308204Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26893 2025-06-25T14:42:11.697484Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:26893 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:42:12.035541Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:42:12.079242Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:12.224793Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:12.384763Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:12.454735Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:14.118386Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897323653174197:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:14.118515Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:14.429943Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:14.464286Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:14.496270Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:14.527909Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:14.556282Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:14.598330Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:14.630421Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:14.719186Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897323653174860:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:14.719254Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:14.719475Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897323653174865:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:14.723290Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:42:14.735415Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519897323653174867:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:42:14.826889Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519897323653174918:3423] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:42:15.675014Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519897306473303407:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:15.675089Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> KqpQueryPerf::IndexUpdateOn+QueryService-UseSink [GOOD] >> KqpQueryPerf::IndexUpdateOn+QueryService+UseSink >> SystemView::AuthUsers_TableRange [GOOD] >> SystemView::AuthPermissions_ResultOrder ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::MultiRead-QueryService [GOOD] Test command err: Trying to start YDB, gRPC: 25722, MsgBus: 27718 2025-06-25T14:42:11.345495Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897310697497459:2099];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:11.353507Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000aab/r3tmp/tmpHWh2xa/pdisk_1.dat 2025-06-25T14:42:11.724839Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519897310697497386:2080] 1750862531290301 != 1750862531290304 2025-06-25T14:42:11.787256Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:42:11.789489Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:42:11.789615Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:42:11.793572Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 25722, node 1 2025-06-25T14:42:11.895185Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:42:11.895220Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:42:11.895227Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:42:11.895357Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27718 2025-06-25T14:42:12.355893Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:27718 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:42:12.534882Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:42:12.575249Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:42:12.588197Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:12.727329Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:12.876514Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:12.949689Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:14.452121Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897323582400903:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:14.452238Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:14.719455Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:14.749627Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:14.772815Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:14.796483Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:14.861362Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:14.904812Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:14.976274Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:15.080196Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897327877368867:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:15.080281Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897327877368872:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:15.080343Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:15.084065Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:42:15.095726Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519897327877368874:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:42:15.168138Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519897327877368925:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:42:16.327352Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519897310697497459:2099];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:16.327417Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; |83.9%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_replication_reboots/ydb-core-tx-schemeshard-ut_replication_reboots |83.9%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_replication_reboots/ydb-core-tx-schemeshard-ut_replication_reboots |84.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_replication_reboots/ydb-core-tx-schemeshard-ut_replication_reboots >> SystemView::CollectScriptingQueries [GOOD] >> KqpQueryPerf::IndexDeleteOn+QueryService-UseSink [GOOD] >> KqpQueryPerf::IndexDeleteOn+QueryService+UseSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::MultiDeleteFromTable-QueryService-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 1573, MsgBus: 62018 2025-06-25T14:42:11.272744Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897313386549061:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:11.272844Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000aa4/r3tmp/tmpfMhcR6/pdisk_1.dat 2025-06-25T14:42:11.600806Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519897313386549043:2080] 1750862531271572 != 1750862531271575 2025-06-25T14:42:11.631742Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:42:11.636077Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:42:11.636199Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:42:11.639025Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 1573, node 1 2025-06-25T14:42:11.780837Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:42:11.780869Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:42:11.780879Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:42:11.781036Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:62018 TClient is connected to server localhost:62018 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-25T14:42:12.287195Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:42:12.302981Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:42:12.322053Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:12.491226Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:42:12.647790Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:12.723867Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:14.386245Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897326271452563:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:14.386400Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:14.689323Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:14.719464Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:14.748499Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:14.775075Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:14.805399Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:14.840940Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:14.915420Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:14.973553Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897326271453225:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:14.973674Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:14.973825Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897326271453230:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:14.977329Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:42:14.987887Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519897326271453232:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:42:15.073471Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519897330566420579:3422] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:42:16.272654Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519897313386549061:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:16.272742Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> LabeledDbCounters::OneTablet [GOOD] >> LabeledDbCounters::OneTabletRemoveCounters >> KqpQueryPerf::RangeLimitRead+QueryService [GOOD] >> KqpQueryPerf::IndexReplace+QueryService-UseSink [GOOD] >> KqpQueryPerf::IndexReplace+QueryService+UseSink >> KqpQueryPerf::ComputeLength+QueryService [GOOD] >> KqpQueryPerf::ComputeLength-QueryService ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::Replace-QueryService-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 20067, MsgBus: 15760 2025-06-25T14:42:11.975317Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897311435133059:2234];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:11.975369Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000a9e/r3tmp/tmpdEM3O6/pdisk_1.dat 2025-06-25T14:42:12.321190Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:42:12.324495Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519897311435132839:2080] 1750862531920421 != 1750862531920424 TServer::EnableGrpc on GrpcPort 20067, node 1 2025-06-25T14:42:12.396816Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:42:12.397533Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:42:12.404833Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:42:12.472845Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:42:12.472875Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:42:12.472882Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:42:12.473020Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:15760 TClient is connected to server localhost:15760 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-25T14:42:12.972738Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:42:13.031094Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:42:13.060662Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:13.236244Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:42:13.403166Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:13.489727Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:15.205270Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897328615003676:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:15.205390Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:15.474454Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:15.500459Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:15.527969Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:15.554105Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:15.580720Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:15.655006Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:15.728088Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:15.782517Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897328615004335:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:15.782582Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:15.782609Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897328615004340:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:15.786065Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:42:15.795173Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519897328615004342:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:42:15.869098Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519897328615004393:3417] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:42:16.975280Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519897311435133059:2234];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:16.975360Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> TxUsage::WriteToTopic_Demo_34_Table [GOOD] >> KqpQueryPerf::Insert-QueryService+UseSink [GOOD] >> KqpQueryPerf::DeleteOn-QueryService+UseSink [GOOD] |84.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/tx/schemeshard/ut_failure_injection/ydb-core-tx-schemeshard-ut_failure_injection |84.0%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_failure_injection/ydb-core-tx-schemeshard-ut_failure_injection |84.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_failure_injection/ydb-core-tx-schemeshard-ut_failure_injection |84.0%| [LD] {default-linux-x86_64, release, asan} $(B)/ydb/core/external_sources/s3/ut/ydb-core-external_sources-s3-ut |84.0%| [LD] {RESULT} $(B)/ydb/core/external_sources/s3/ut/ydb-core-external_sources-s3-ut |84.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/external_sources/s3/ut/ydb-core-external_sources-s3-ut ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::RangeLimitRead+QueryService [GOOD] Test command err: Trying to start YDB, gRPC: 12916, MsgBus: 25284 2025-06-25T14:42:13.105465Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897319961785014:2122];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:13.105551Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000a87/r3tmp/tmpOhtTDj/pdisk_1.dat 2025-06-25T14:42:13.482371Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:42:13.482487Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:42:13.485579Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:42:13.488901Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519897319961784932:2080] 1750862533098459 != 1750862533098462 2025-06-25T14:42:13.506844Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 12916, node 1 2025-06-25T14:42:13.713935Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:42:13.713956Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:42:13.713967Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:42:13.714115Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25284 2025-06-25T14:42:14.117781Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:25284 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:42:14.283563Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:42:14.300501Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:14.442362Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:42:14.580783Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:14.650171Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:16.102491Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897332846688462:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:16.102558Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:16.343471Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:16.372925Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:16.394476Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:16.417743Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:16.438648Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:16.464365Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:16.500178Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:16.578350Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897332846689122:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:16.578409Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:16.578433Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897332846689127:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:16.581371Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:42:16.590297Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519897332846689129:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:42:16.644406Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519897332846689180:3421] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } >> TxUsage::WriteToTopic_Demo_34_Query ------- [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/ut/unittest >> SystemView::CollectScriptingQueries [GOOD] Test command err: 2025-06-25T14:40:46.090520Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519896947931121669:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:40:46.090637Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0012a0/r3tmp/tmpzmdvvy/pdisk_1.dat 2025-06-25T14:40:46.469876Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:40:46.483567Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:40:46.483665Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:40:46.489111Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 19417, node 1 2025-06-25T14:40:46.585004Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:40:46.585028Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:40:46.585037Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:40:46.585170Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8908 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:40:46.951695Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:40:47.021720Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateSubDomain, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_subdomain.cpp:259) waiting... 2025-06-25T14:40:47.046356Z node 5 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[5:7519896950688820805:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:40:47.046410Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/Tenant1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:40:47.106220Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:40:47.112486Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:40:47.216165Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519896951100439273:2157];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:40:47.245783Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:40:47.245866Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:40:47.246298Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:40:47.246348Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:40:47.254153Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 4 Cookie 4 2025-06-25T14:40:47.254189Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 5 Cookie 5 2025-06-25T14:40:47.264611Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:40:47.266138Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:40:47.464938Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/Tenant1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:40:47.592436Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateSubDomain, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_subdomain.cpp:259) waiting... 2025-06-25T14:40:47.621842Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519896950150420773:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:40:47.621905Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/Tenant2/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:40:47.627991Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519896952472153209:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:40:47.628047Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/Tenant2/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:40:47.682494Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:40:47.682666Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:40:47.691960Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:40:47.692042Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:40:47.692123Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:40:47.692694Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 3 Cookie 3 waiting... 2025-06-25T14:40:47.695825Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T14:40:47.697557Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:40:47.697964Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:40:48.177111Z node 5 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:40:48.473746Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:40:48.636554Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:40:48.630239Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:40:51.096524Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519896947931121669:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:40:51.096591Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:40:51.621370Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:40:51.873974Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896969405959485:2309], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:40:51.874333Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896969405959477:2306], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:40:51.874407Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:40:51.877550Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo ... : Error: Access denied 2025-06-25T14:42:06.428254Z node 32 :TX_PROXY ERROR: describe.cpp:395: Access denied for user0@builtin with access DescribeSchema to path Root/Tenant1
: Error: Access denied 2025-06-25T14:42:06.480585Z node 32 :TX_PROXY ERROR: describe.cpp:395: Access denied for user0@builtin with access DescribeSchema to path Root/.sys
: Error: Access denied 2025-06-25T14:42:06.557118Z node 32 :TX_PROXY ERROR: describe.cpp:395: Access denied for user0@builtin with access DescribeSchema to path Root/Tenant1/.sys
: Error: Access denied 2025-06-25T14:42:06.645474Z node 32 :TX_PROXY ERROR: describe.cpp:395: Access denied for user0@builtin with access DescribeSchema to path Root/.sys/partition_stats
: Error: Access denied 2025-06-25T14:42:06.691755Z node 32 :TX_PROXY ERROR: describe.cpp:395: Access denied for user0@builtin with access DescribeSchema to path Root/Tenant1/.sys/partition_stats
: Error: Access denied 2025-06-25T14:42:06.705371Z node 32 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 33 2025-06-25T14:42:06.705871Z node 32 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(33, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-25T14:42:06.706044Z node 32 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 34 2025-06-25T14:42:06.706598Z node 32 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(34, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-25T14:42:06.708023Z node 32 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 36 2025-06-25T14:42:06.708600Z node 32 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(36, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-25T14:42:06.724944Z node 32 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 35 2025-06-25T14:42:06.725537Z node 32 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(35, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-25T14:42:06.731177Z node 32 :HIVE WARN: hive_impl.cpp:944: HIVE#72057594037968897 THive::Handle::TEvUndelivered Sender=[35:7519897261690459526:2106], Type=268959746 2025-06-25T14:42:06.731214Z node 32 :HIVE WARN: hive_impl.cpp:944: HIVE#72057594037968897 THive::Handle::TEvUndelivered Sender=[35:7519897261690459526:2106], Type=268959746 2025-06-25T14:42:06.731238Z node 32 :HIVE WARN: hive_impl.cpp:944: HIVE#72057594037968897 THive::Handle::TEvUndelivered Sender=[35:7519897261690459526:2106], Type=268959746 2025-06-25T14:42:06.737620Z node 32 :HIVE WARN: hive_impl.cpp:944: HIVE#72057594037968897 THive::Handle::TEvUndelivered Sender=[35:7519897261690459526:2106], Type=268959746 2025-06-25T14:42:10.695747Z node 37 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[37:7519897306142557586:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:10.695828Z node 37 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0012a0/r3tmp/tmppFuRU0/pdisk_1.dat 2025-06-25T14:42:10.867136Z node 37 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:42:10.883612Z node 37 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(37, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:42:10.883738Z node 37 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(37, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:42:10.889653Z node 37 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(37, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 17964, node 37 2025-06-25T14:42:10.981290Z node 37 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:42:10.981314Z node 37 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:42:10.981326Z node 37 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:42:10.981491Z node 37 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14125 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:42:11.423399Z node 37 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:42:11.433456Z node 37 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:42:11.438983Z node 37 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:11.707318Z node 37 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:42:15.696531Z node 37 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[37:7519897306142557586:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:15.696642Z node 37 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:42:16.141183Z node 37 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [37:7519897331912362038:2299], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:16.141274Z node 37 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [37:7519897331912362033:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:16.141510Z node 37 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:16.146278Z node 37 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:42:16.159543Z node 37 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [37:7519897331912362047:2300], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-25T14:42:16.234739Z node 37 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [37:7519897331912362098:2392] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:42:16.352372Z node 37 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jykrm7eaeq2skc3jnfbjjhnj, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=37&id=NTk3YjYyM2ItYjVhZjE3N2UtNDRiMDdjYmEtYTE2ODJmMDc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:42:16.651455Z node 37 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715663. Ctx: { TraceId: 01jykrm7sveksawaf9422n7ap2, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=37&id=OTAzZDZkOTktODJlMWRlMjAtOWEyMmRlZjUtMzFmNDA4Y2U=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:42:16.735757Z node 37 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750862536696, txId: 281474976715662] shutting down 2025-06-25T14:42:16.989172Z node 37 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715665. Ctx: { TraceId: 01jykrm82t9mb61kxzfedzaaea, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=37&id=ZWIyODJkMC00NWE1YzJhMi04ODEwNTFjMi01ZDVmYWI5Mw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:42:16.992097Z node 37 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:47: Scan started, actor: [37:7519897331912362238:2334], owner: [37:7519897331912362235:2332], scan id: 0, sys view info: Type: ETopQueriesByReadBytesOneMinute SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-25T14:42:16.993022Z node 37 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:323: Scan prepared, actor: [37:7519897331912362238:2334], schemeshard id: 72057594046644480, hive id: 72057594037968897, database: /Root, database owner: root@builtin, domain key: [OwnerId: 72057594046644480, LocalPathId: 1], database node count: 1 2025-06-25T14:42:16.993417Z node 37 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [37:7519897331912362238:2334], row count: 2, finished: 1 2025-06-25T14:42:16.993492Z node 37 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:122: Scan finished, actor: [37:7519897331912362238:2334], owner: [37:7519897331912362235:2332], scan id: 0, sys view info: Type: ETopQueriesByReadBytesOneMinute SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-25T14:42:16.995645Z node 37 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750862536987, txId: 281474976715664] shutting down ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::Insert-QueryService+UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 11840, MsgBus: 13218 2025-06-25T14:42:08.098370Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897299109616883:2056];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:08.098409Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000ad0/r3tmp/tmpEkvgje/pdisk_1.dat 2025-06-25T14:42:08.459745Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:42:08.461277Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519897299109616868:2080] 1750862528097602 != 1750862528097605 TServer::EnableGrpc on GrpcPort 11840, node 1 2025-06-25T14:42:08.500392Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:42:08.500507Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:42:08.502416Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:42:08.525375Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:42:08.525395Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:42:08.525402Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:42:08.525531Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13218 TClient is connected to server localhost:13218 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-25T14:42:09.114665Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:42:09.163056Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:42:09.193175Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:42:09.202973Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:09.337298Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:09.487323Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:09.584490Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:11.204926Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897311994520388:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:11.205032Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:11.475117Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:11.505123Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:11.533792Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:11.572267Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:11.607163Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:11.684169Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:11.717861Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:11.795834Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897311994521047:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:11.795904Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:11.796068Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897311994521052:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:11.799599Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:42:11.813864Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519897311994521054:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:42:11.880294Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519897311994521105:3424] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:42:13.098577Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519897299109616883:2056];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:13.098678Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 26513, MsgBus: 30919 2025-06-25T14:42:14.191648Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519897325017034383:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:14.191737Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000ad0/r3tmp/tmpoH54n4/pdisk_1.dat 2025-06-25T14:42:14.293854Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 26513, node 2 2025-06-25T14:42:14.337440Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:42:14.337594Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:42:14.339786Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:42:14.357612Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:42:14.357633Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:42:14.357640Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:42:14.357754Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:30919 TClient is connected to server localhost:30919 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:42:14.826174Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-25T14:42:14.840580Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:14.922182Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:15.123556Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:15.200639Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:42:15.218695Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:17.197088Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519897337901937877:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:17.197247Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:17.246928Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:17.270552Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:17.294357Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:17.318637Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:17.343159Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:17.368239Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:17.395027Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:17.439773Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519897337901938534:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:17.439840Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:17.439902Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519897337901938539:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:17.442863Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:42:17.450712Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519897337901938541:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:42:17.532622Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519897337901938592:3418] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::DeleteOn-QueryService+UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 5791, MsgBus: 16050 2025-06-25T14:42:08.467011Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897298117751951:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:08.467074Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000acf/r3tmp/tmpP33e0t/pdisk_1.dat 2025-06-25T14:42:08.787207Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:42:08.791594Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519897298117751932:2080] 1750862528464661 != 1750862528464664 2025-06-25T14:42:08.832214Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected TServer::EnableGrpc on GrpcPort 5791, node 1 2025-06-25T14:42:08.834739Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:42:08.845845Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:42:08.928588Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:42:08.928618Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:42:08.928628Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:42:08.928732Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16050 TClient is connected to server localhost:16050 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-25T14:42:09.461837Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:42:09.526837Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:42:09.540972Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:42:09.554190Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:09.729809Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:42:09.866508Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:09.930592Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:11.566335Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897311002655459:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:11.566441Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:11.881751Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:11.920647Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:11.967505Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:12.003058Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:12.070248Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:12.146824Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:12.224099Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:12.320691Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897315297623425:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:12.320772Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:12.320996Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897315297623430:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:12.325142Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:42:12.343295Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519897315297623432:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:42:12.428028Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519897315297623483:3419] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:42:13.466610Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519897298117751951:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:13.466664Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 1914, MsgBus: 3359 2025-06-25T14:42:14.520641Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519897324892672373:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:14.520699Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000acf/r3tmp/tmpeR3UYm/pdisk_1.dat 2025-06-25T14:42:14.683106Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:42:14.684175Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519897324892672349:2080] 1750862534517877 != 1750862534517880 2025-06-25T14:42:14.700512Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:42:14.700601Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:42:14.703217Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 1914, node 2 2025-06-25T14:42:14.761730Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:42:14.761755Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:42:14.761763Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:42:14.761873Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3359 TClient is connected to server localhost:3359 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:42:15.211044Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-25T14:42:15.243222Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:15.320300Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:15.446544Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:15.507510Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:15.530259Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:42:17.303711Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519897337777575862:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:17.303786Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:17.343687Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:17.370480Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:17.394835Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:17.419379Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:17.445787Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:17.473069Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:17.543045Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:17.593675Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519897337777576526:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:17.593738Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519897337777576531:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:17.593739Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:17.596992Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:42:17.605662Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519897337777576533:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:42:17.706210Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519897337777576584:3421] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } ------- [TM] {asan, default-linux-x86_64, release} ydb/core/viewer/ut/unittest >> Viewer::JsonStorageListingV2PDiskIdFilter [GOOD] Test command err: 2025-06-25T14:35:43.634463Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:2805:2396], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:35:43.636774Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:35:43.637097Z node 5 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [5:2207:2339], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:35:43.637868Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:35:43.638200Z node 9 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [9:2219:2339], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:35:43.638944Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:35:43.639114Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [6:2210:2339], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:35:43.639915Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:35:43.640052Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:35:43.641142Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:35:43.641247Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:35:43.642162Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:35:43.643423Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [3:2808:2339], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:35:43.644151Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:2799:2339], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:35:43.645667Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:35:43.645765Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:35:43.645853Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:35:43.646064Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:35:43.647039Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [4:2204:2337], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:35:43.647569Z node 8 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [8:2216:2339], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:35:43.648243Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:35:43.648415Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [7:2213:2339], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:35:43.649035Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:35:43.649479Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:35:43.649657Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:35:43.649717Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:35:43.650534Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-25T14:35:44.325503Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:35:44.812854Z node 1 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:106} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2025-06-25T14:35:44.864586Z node 1 :BS_PDISK WARN: {BSP01@blobstorage_pdisk_actor.cpp:436} Magic sector is present on disk, now going to format device PDiskId# 1000 2025-06-25T14:35:46.212957Z node 1 :BS_PDISK WARN: {BSP01@blobstorage_pdisk_actor.cpp:374} Device formatting done PDiskId# 1000 TServer::EnableGrpc on GrpcPort 63683, node 1 TClient is connected to server localhost:21864 2025-06-25T14:35:47.067963Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:35:47.068041Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:35:47.068089Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:35:47.073122Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:37:30.113221Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:37:30.114788Z node 10 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [10:2629:2396], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:37:30.114968Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:37:30.115395Z node 17 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [17:2796:2339], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:37:30.117494Z node 11 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [11:2590:2339], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:37:30.117841Z node 15 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [15:2790:2339], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:37:30.117961Z node 17 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:37:30.118011Z node 17 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:37:30.118273Z node 12 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [12:2786:2339], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:37:30.119137Z node 11 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:37:30.119438Z node 15 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:37:30.119978Z node 11 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:37:30.120036Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:37:30.120367Z node 15 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:37:30.120866Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:37:30.122830Z node 13 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [Wo ... ath status: LookupError; 2025-06-25T14:39:27.515586Z node 23 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:39:27.515662Z node 24 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:39:27.515764Z node 27 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:39:27.516914Z node 24 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:39:27.524589Z node 25 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [25:2632:2339], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:39:27.527116Z node 25 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:39:27.527865Z node 25 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:39:27.528783Z node 20 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [20:1435:2181], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:39:27.530033Z node 20 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:39:27.531601Z node 20 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-25T14:39:28.351396Z node 19 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:39:28.659360Z node 19 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:106} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2025-06-25T14:39:28.696741Z node 19 :BS_PDISK WARN: {BSP01@blobstorage_pdisk_actor.cpp:436} Magic sector is present on disk, now going to format device PDiskId# 1000 2025-06-25T14:39:29.612113Z node 19 :BS_PDISK WARN: {BSP01@blobstorage_pdisk_actor.cpp:374} Device formatting done PDiskId# 1000 TServer::EnableGrpc on GrpcPort 16480, node 19 TClient is connected to server localhost:13118 2025-06-25T14:39:30.381891Z node 19 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:39:30.381993Z node 19 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:39:30.382074Z node 19 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:39:30.382848Z node 19 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:41:32.700632Z node 28 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [28:2838:2399], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:41:32.703062Z node 28 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:41:32.704177Z node 35 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [35:1868:2339], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:41:32.705039Z node 28 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:41:32.706075Z node 35 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:41:32.707248Z node 35 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:41:32.707483Z node 36 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [36:1871:2339], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:41:32.714191Z node 36 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:41:32.715713Z node 32 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [32:1172:2180], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:41:32.716125Z node 33 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [33:1862:2339], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:41:32.716303Z node 36 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:41:32.717687Z node 32 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:41:32.718888Z node 29 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [29:2832:2342], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:41:32.719202Z node 33 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:41:32.720110Z node 32 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:41:32.720170Z node 33 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:41:32.721963Z node 30 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [30:2841:2342], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:41:32.722649Z node 29 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:41:32.722716Z node 29 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:41:32.723844Z node 30 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:41:32.724114Z node 31 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [31:2793:2339], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:41:32.725853Z node 30 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:41:32.726191Z node 31 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:41:32.727495Z node 31 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:41:32.727718Z node 34 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [34:1865:2339], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:41:32.729298Z node 34 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:41:32.729358Z node 34 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-25T14:41:34.001825Z node 28 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:41:34.352174Z node 28 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:106} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2025-06-25T14:41:34.420753Z node 28 :BS_PDISK WARN: {BSP01@blobstorage_pdisk_actor.cpp:436} Magic sector is present on disk, now going to format device PDiskId# 1000 2025-06-25T14:41:35.944086Z node 28 :BS_PDISK WARN: {BSP01@blobstorage_pdisk_actor.cpp:374} Device formatting done PDiskId# 1000 TServer::EnableGrpc on GrpcPort 63779, node 28 TClient is connected to server localhost:27247 2025-06-25T14:41:36.830149Z node 28 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:41:36.830272Z node 28 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:41:36.830371Z node 28 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:41:36.831297Z node 28 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration >> SystemView::AuthGroupMembers_Access [GOOD] >> SystemView::AuthGroupMembers_ResultOrder >> TxUsage::Sinks_Oltp_WriteToTopic_4_Table [GOOD] >> Describe::LocationWithKillTablets [GOOD] >> Describe::DescribePartitionPermissions >> KqpQueryPerf::ComputeLength-QueryService [GOOD] >> TxUsage::Sinks_Oltp_WriteToTopic_4_Query >> TxUsage::WriteToTopic_Demo_25_Table [GOOD] |84.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_yc_events_processor[tables_format_v0] [GOOD] >> SystemView::ShowCreateTablePartitionSettings [GOOD] >> SystemView::ShowCreateTableReadReplicas ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::ComputeLength-QueryService [GOOD] Test command err: Trying to start YDB, gRPC: 10605, MsgBus: 13208 2025-06-25T14:42:13.062546Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897320161042834:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:13.062638Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000a94/r3tmp/tmpRxUoSn/pdisk_1.dat 2025-06-25T14:42:13.423584Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519897320161042811:2080] 1750862533060672 != 1750862533060675 2025-06-25T14:42:13.434252Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:42:13.461458Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:42:13.461590Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 10605, node 1 2025-06-25T14:42:13.463296Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:42:13.511500Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:42:13.511520Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:42:13.511538Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:42:13.511658Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13208 TClient is connected to server localhost:13208 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-25T14:42:14.074908Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:42:14.228029Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:42:14.241168Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:42:14.256362Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:14.379941Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:14.505149Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:14.559578Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:16.182150Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897333045946351:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:16.182239Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:16.487745Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:16.510564Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:16.536862Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:16.562978Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:16.590374Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:16.631167Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:16.701218Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:16.757389Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897333045947010:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:16.757453Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:16.757469Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897333045947015:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:16.760011Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:42:16.768416Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519897333045947017:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:42:16.857322Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519897333045947068:3425] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:42:18.062867Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519897320161042834:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:18.062953Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 4249, MsgBus: 9173 2025-06-25T14:42:18.581710Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519897343626946105:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:18.581752Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000a94/r3tmp/tmpggGhtO/pdisk_1.dat 2025-06-25T14:42:18.691297Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 4249, node 2 2025-06-25T14:42:18.732070Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:42:18.732156Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:42:18.733526Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:42:18.764551Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:42:18.764575Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:42:18.764586Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:42:18.764725Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9173 TClient is connected to server localhost:9173 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:42:19.133937Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:42:19.138775Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:42:19.143434Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:19.212815Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:19.345353Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:19.417621Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:19.586718Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:42:21.119516Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519897356511849607:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:21.119614Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:21.168750Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:21.189894Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:21.211881Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:21.233148Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:21.256386Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:21.282334Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:21.348106Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:21.390986Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519897356511850264:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:21.391043Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:21.391112Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519897356511850269:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:21.394040Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:42:21.401236Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519897356511850271:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:42:21.461879Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519897356511850322:3418] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } >> KqpQueryPerf::IndexUpdateOn+QueryService+UseSink [GOOD] >> TxUsage::WriteToTopic_Demo_25_Query >> TxUsage::WriteToTopic_Demo_5_Query [GOOD] >> KqpQueryPerf::IndexReplace+QueryService+UseSink [GOOD] >> TxUsage::WriteToTopic_Demo_21_RestartAfterCommit_Table [GOOD] >> KqpQueryPerf::IndexDeleteOn+QueryService+UseSink [GOOD] >> TxUsage::WriteToTopic_Demo_42_Table [GOOD] >> TxUsage::WriteToTopic_Demo_6_Table ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::IndexUpdateOn+QueryService+UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 1780, MsgBus: 12399 2025-06-25T14:42:09.895983Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897304049951380:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:09.896048Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000ac6/r3tmp/tmp19LiA5/pdisk_1.dat 2025-06-25T14:42:10.292415Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519897304049951360:2080] 1750862529894706 != 1750862529894709 2025-06-25T14:42:10.300537Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 1780, node 1 2025-06-25T14:42:10.342173Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:42:10.342315Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:42:10.346258Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:42:10.362869Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:42:10.362891Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:42:10.362902Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:42:10.363008Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:12399 TClient is connected to server localhost:12399 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-25T14:42:10.923605Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:42:11.076082Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:42:11.102141Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:42:11.110891Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:11.271685Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:11.473635Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:11.562750Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:13.152349Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897321229822169:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:13.152487Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:13.391502Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:13.423726Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:13.468966Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:13.533624Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:13.565599Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:13.627720Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:13.668285Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:13.764759Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897321229822833:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:13.764842Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:13.765070Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897321229822838:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:13.769087Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:42:13.786978Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519897321229822840:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:42:13.888843Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519897321229822891:3422] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:42:14.901123Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519897304049951380:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:14.901169Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:42:15.023912Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_w ... 17.800231Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519897337211755313:2080] 1750862537713012 != 1750862537713015 TServer::EnableGrpc on GrpcPort 8714, node 2 2025-06-25T14:42:17.842557Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:42:17.842578Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:42:17.842586Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:42:17.842688Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:42:17.851287Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:42:17.851346Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:42:17.852514Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:25356 TClient is connected to server localhost:25356 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:42:18.203993Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:42:18.214081Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:18.260702Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:18.429353Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:18.487605Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:18.746231Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:42:20.478420Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519897350096658835:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:20.478527Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:20.541877Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:20.568546Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:20.596252Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:20.625693Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:20.655433Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:20.687525Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:20.717173Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:20.772412Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519897350096659495:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:20.772471Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519897350096659500:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:20.772475Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:20.774894Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:42:20.783372Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519897350096659502:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:42:20.857102Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519897350096659553:3421] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:42:21.669489Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:21.701015Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:21.736916Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:22.713572Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519897337211755332:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:22.713644Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> TxUsage::WriteToTopic_Demo_21_RestartAfterCommit_Query ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::IndexReplace+QueryService+UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 14614, MsgBus: 17149 2025-06-25T14:42:10.946829Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897306538023667:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:10.946909Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000ab3/r3tmp/tmpyBlrt4/pdisk_1.dat 2025-06-25T14:42:11.320735Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:42:11.322982Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519897306538023645:2080] 1750862530945309 != 1750862530945312 2025-06-25T14:42:11.342164Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:42:11.342281Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:42:11.371990Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 14614, node 1 2025-06-25T14:42:11.412092Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:42:11.412122Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:42:11.412130Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:42:11.412260Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17149 TClient is connected to server localhost:17149 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-25T14:42:11.959886Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:42:11.995325Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:42:12.025146Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:42:12.035388Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:12.190405Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:12.321952Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:12.387122Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:13.966477Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897319422927176:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:13.966572Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:14.248666Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:14.273931Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:14.296779Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:14.323470Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:14.357093Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:14.388266Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:14.452009Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:14.529886Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897323717895136:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:14.529977Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:14.530079Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897323717895141:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:14.533287Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:42:14.541712Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519897323717895143:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:42:14.631239Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519897323717895194:3423] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:42:15.680332Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:15.760714Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/yd ... 8.578304Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519897341634943448:2080] 1750862538481159 != 1750862538481162 2025-06-25T14:42:18.623949Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:42:18.624056Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 29551, node 2 2025-06-25T14:42:18.637974Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:42:18.668235Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:42:18.668257Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:42:18.668266Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:42:18.668398Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28144 TClient is connected to server localhost:28144 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:42:19.116231Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:42:19.133803Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:19.206123Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:19.355519Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:19.431403Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:19.561295Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:42:21.080922Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519897354519846974:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:21.081032Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:21.146128Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:21.168722Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:21.190888Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:21.211868Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:21.235244Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:21.260774Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:21.285288Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:21.362133Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519897354519847628:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:21.362208Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519897354519847633:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:21.362231Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:21.365247Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:42:21.374959Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519897354519847635:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:42:21.468896Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519897354519847686:3414] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:42:22.176425Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:22.208081Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:22.276408Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:23.482418Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519897341634943471:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:23.482481Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::IndexDeleteOn+QueryService+UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 1803, MsgBus: 30217 2025-06-25T14:42:10.799060Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897309749451951:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:10.799128Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000abd/r3tmp/tmpSrPSJs/pdisk_1.dat 2025-06-25T14:42:11.297518Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:42:11.297705Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:42:11.317345Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:42:11.325264Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:42:11.330065Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519897309749451930:2080] 1750862530798151 != 1750862530798154 TServer::EnableGrpc on GrpcPort 1803, node 1 2025-06-25T14:42:11.460883Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:42:11.460907Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:42:11.460916Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:42:11.461033Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:30217 2025-06-25T14:42:11.820854Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:30217 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:42:12.137026Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:42:12.170823Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:42:12.312588Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:12.476694Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:12.561936Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:14.031987Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897326929322762:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:14.032074Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:14.287275Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:14.319065Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:14.354091Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:14.383850Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:14.414193Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:14.444459Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:14.511940Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:14.566274Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897326929323423:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:14.566362Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:14.566631Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897326929323428:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:14.569977Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:42:14.578625Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519897326929323430:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:42:14.675410Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519897326929323481:3424] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:42:15.799304Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519897309749451951:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:15.799356Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:42:15.799482Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:15.827913Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part ... # /home/runner/.ya/build/build_root/yft8/000abd/r3tmp/tmpn18iLQ/pdisk_1.dat 2025-06-25T14:42:18.267175Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:42:18.287638Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:42:18.287704Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:42:18.289328Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 65323, node 2 2025-06-25T14:42:18.320229Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:42:18.320247Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:42:18.320255Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:42:18.320388Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:21107 TClient is connected to server localhost:21107 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:42:18.682866Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:42:18.692906Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:18.742720Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:18.895706Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:18.971045Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:19.162163Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:42:21.037836Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519897355005265714:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:21.037940Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:21.090101Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:21.116434Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:21.144527Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:21.174173Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:21.239810Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:21.285201Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:21.313431Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:21.367341Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519897355005266378:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:21.367416Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519897355005266383:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:21.367432Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:21.370860Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:42:21.380442Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519897355005266385:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:42:21.462945Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519897355005266436:3419] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:42:22.176209Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:22.205132Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:22.234283Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:23.156265Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519897342120362230:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:23.156350Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> TxUsage::WriteToTopic_Demo_42_Query >> SystemView::AuthPermissions [GOOD] >> SystemView::AuthPermissions_Access >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_2_Query [GOOD] >> SystemView::AuthPermissions_ResultOrder [GOOD] >> SystemView::AuthPermissions_Selects >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_3_Table ------- [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/ut/unittest >> SystemView::ShowCreateTableColumnAlterObject [GOOD] Test command err: 2025-06-25T14:36:14.801874Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519895778647163561:2147];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:36:14.802038Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001315/r3tmp/tmpfu6AC2/pdisk_1.dat 2025-06-25T14:36:15.646650Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:36:15.646745Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:36:15.700412Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:36:15.708026Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 23712, node 1 2025-06-25T14:36:15.821492Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T14:36:15.828475Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:36:16.048842Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:36:16.048861Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:36:16.048868Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:36:16.048968Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:7580 TClient is connected to server localhost:7580 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:36:17.181210Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:36:19.572588Z node 1 :KQP_COMPILE_SERVICE INFO: kqp_compile_service.cpp:276: Subscribed for config changes 2025-06-25T14:36:19.572627Z node 1 :KQP_COMPILE_SERVICE INFO: kqp_compile_service.cpp:331: Updated config 2025-06-25T14:36:19.659314Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519895800122001099:2304], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:36:19.659410Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:36:19.659589Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519895800122001111:2307], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:36:19.662667Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:36:19.721782Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519895800122001113:2308], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:36:19.760422Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519895778647163561:2147];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:36:19.760499Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:36:19.814786Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519895800122001193:2767] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:36:19.815993Z node 1 :KQP_COMPILE_SERVICE DEBUG: kqp_compile_service.cpp:1186: Try to find query by queryId, queryId: {Cluster: db, Database: /Root, DatabaseId: /Root, UserSid: , Text: \n UPSERT OBJECT `accessKey` (TYPE SECRET) WITH (value = `secretAccessKey`);\n UPSERT OBJECT `secretKey` (TYPE SECRET) WITH (value = `fakeSecret`);\n CREATE EXTERNAL DATA SOURCE `tier1` WITH (\n SOURCE_TYPE = \"ObjectStorage\",\n LOCATION = \"http://fake.fake/olap-tier1\",\n AUTH_METHOD = \"AWS\",\n AWS_ACCESS_KEY_ID_SECRET_NAME = \"accessKey\",\n AWS_SECRET_ACCESS_KEY_SECRET_NAME = \"secretKey\",\n AWS_REGION = \"ru-central1\"\n );\n , Settings: {DocumentApiRestricted: 1, IsInternalCall: 0, QueryType: QUERY_TYPE_SQL_GENERIC_CONCURRENT_QUERY}, QueryParameterTypes: , GUCSettings: { "guc_settings": { "session_settings": { "ydb_user":"", "ydb_database":"Root" }, "settings": { "ydb_user":"", "ydb_database":"Root" }, "rollback_settings": { } } }} 2025-06-25T14:36:19.816091Z node 1 :KQP_COMPILE_SERVICE DEBUG: kqp_compile_service.cpp:413: Perform request, TraceId.SpanIdPtr: 0x000050F000031148 2025-06-25T14:36:19.816124Z node 1 :KQP_COMPILE_SERVICE DEBUG: kqp_compile_service.cpp:423: Received compile request, sender: [1:7519895800122001092:2301], queryUid: , queryText: "\n UPSERT OBJECT `accessKey` (TYPE SECRET) WITH (value = `secretAccessKey`);\n UPSERT OBJECT `secretKey` (TYPE SECRET) WITH (value = `fakeSecret`);\n CREATE EXTERNAL DATA SOURCE `tier1` WITH (\n SOURCE_TYPE = \"ObjectStorage\",\n LOCATION = \"http://fake.fake/olap-tier1\",\n AUTH_METHOD = \"AWS\",\n AWS_ACCESS_KEY_ID_SECRET_NAME = \"accessKey\",\n AWS_SECRET_ACCESS_KEY_SECRET_NAME = \"secretKey\",\n AWS_REGION = \"ru-central1\"\n );\n ", keepInCache: 1, split: 0{ TraceId: 01jykr9ba952efxwzyrabff81n, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YzBlYjQ5NTctNDRlZTE1ZDQtZjE1NTJmOS1lMTA5YzA3Yg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default} 2025-06-25T14:36:19.816238Z node 1 :KQP_COMPILE_SERVICE DEBUG: kqp_compile_service.cpp:1186: Try to find query by queryId, queryId: {Cluster: db, Database: /Root, DatabaseId: /Root, UserSid: , Text: \n UPSERT OBJECT `accessKey` (TYPE SECRET) WITH (value = `secretAccessKey`);\n UPSERT OBJECT `secretKey` (TYPE SECRET) WITH (value = `fakeSecret`);\n CREATE EXTERNAL DATA SOURCE `tier1` WITH (\n SOURCE_TYPE = \"ObjectStorage\",\n LOCATION = \"http://fake.fake/olap-tier1\",\n AUTH_METHOD = \"AWS\",\n AWS_ACCESS_KEY_ID_SECRET_NAME = \"accessKey\",\n AWS_SECRET_ACCESS_KEY_SECRET_NAME = \"secretKey\",\n AWS_REGION = \"ru-central1\"\n );\n , Settings: {DocumentApiRestricted: 1, IsInternalCall: 0, QueryType: QUERY_TYPE_SQL_GENERIC_CONCURRENT_QUERY}, QueryParameterTypes: , GUCSettings: { "guc_settings": { "session_settings": { "ydb_user":"", "ydb_database":"Root" }, "settings": { "ydb_user":"", "ydb_database":"Root" }, "rollback_settings": { } } }} 2025-06-25T14:36:19.816294Z node 1 :KQP_COMPILE_SERVICE DEBUG: kqp_compile_service.cpp:519: Added request to queue, sender: [1:7519895800122001092:2301], queueSize: 1 2025-06-25T14:36:19.816861Z node 1 :KQP_COMPILE_SERVICE DEBUG: kqp_compile_service.cpp:880: Created compile actor, sender: [1:7519895800122001092:2301], compileActor: [1:7519895800122001204:2312] 2025-06-25T14:36:20.192460Z node 1 :KQP_YQL INFO: log.cpp:67: TraceId: 01jykr9ba952efxwzyrabff81n, SessionId: CompileActor 2025-06-25 14:36:20.192 INFO ydb-core-sys_view-ut(pid=221105, tid=0x00007FE741A00640) [core dq] kqp_host.cpp:1375: Good place to weld in 2025-06-25T14:36:20.193726Z node 1 :KQP_YQL INFO: log.cpp:67: TraceId: 01jykr9ba952efxwzyrabff81n, SessionId: CompileActor 2025-06-25 14:36:20.193 INFO ydb-core-sys_view-ut(pid=221105, tid=0x00007FE741A00640) [core dq] kqp_host.cpp:1380: Compiled query: ( (let $1 (Write! world (DataSink '"kikimr" '"db") (Key '('objectId (String '"accessKey")) '('typeId (String '"SECRET"))) (Void) '('('mode 'upsertObject) '('features '('('"value" '"secretAccessKey")))))) (let $2 (Write! $1 (DataSink '"kikimr" '"db") (Key '('objectId (String '"secretKey")) '('typeId (String '"SECRET"))) (Void) '('('mode 'upsertObject) '('features '('('"value" '"fakeSecret")))))) (let $3 '('('"auth_method" '"AWS") '('"aws_access_key_id_secret_name" '"accessKey") '('"aws_region" '"ru-central1") '('"aws_secret_access_key_secret_name" '"secretKey") '('"location" '"http://fake.fake/olap-tier1") '('"source_type" '"ObjectStorage"))) (return (Write! $2 (DataSink '"kikimr" '"db") (Key '('objectId (String '"/Root/tier1")) '('typeId (String '"EXTERNAL_DATA_SOURCE"))) (Void) '('('mode 'createObject) '('features $3)))) ) 2025-06-25T14:36:20.194417Z node 1 :KQP_YQL INFO: log.cpp:67: TraceId: 01jykr9ba952efxwzyrabff81n, SessionId: CompileActor 2025-06-25 14:36:20.193 INFO ydb-core-sys_view-ut(pid=221105, tid=0x00007FE741A00640) [KQP] kqp_host.cpp:1386: Compiled query: ( (let $1 (Write! world (DataSink '"kikimr" '"db") (Key '('objectId (String '"accessKey")) '('typ ... eId: 01jykrkpzp39jsksqh46x0hhex, SessionId: CompileActor 2025-06-25 14:41:59.615 TRACE ydb-core-sys_view-ut(pid=221105, tid=0x00007FE73293D640) [KQP] kqp_transform.cpp:33: TxsPeephole: ( (let $1 (KqpTable '"//Root/.metadata/initialization/migrations" '"72057594046644480:7" '"" '1)) (let $2 '('"componentId" '"instant" '"modificationId")) (let $3 (Uint64 '"1001")) (let $4 (KqpRowsSourceSettings $1 $2 '('('"ItemsLimit" $3) '('"Sequential" '1)) (Void) '())) (let $5 (OptionalType (DataType 'Utf8))) (let $6 (StructType '('"componentId" $5) '('"instant" (OptionalType (DataType 'Uint32))) '('"modificationId" $5))) (let $7 '('('"_logical_id" '338) '('"_id" '"ff542fcf-6b62e9df-97a5b6f8-8e1b948e") '('"_wide_channels" $6))) (let $8 (DqPhyStage '((DqSource (DataSource '"KqpReadRangesSource") $4)) (lambda '($12) (block '( (let $13 (lambda '($14) (Member $14 '"componentId") (Member $14 '"instant") (Member $14 '"modificationId"))) (return (FromFlow (ExpandMap (Take (ToFlow $12) $3) $13))) ))) $7)) (let $9 (DqCnUnionAll (TDqOutput $8 '"0"))) (let $10 (DqPhyStage '($9) (lambda '($15) (FromFlow (NarrowMap (Take (ToFlow $15) $3) (lambda '($16 $17 $18) (AsStruct '('"componentId" $16) '('"instant" $17) '('"modificationId" $18)))))) '('('"_logical_id" '351) '('"_id" '"e896922c-1e206cc5-ec2f42df-9fe77404")))) (let $11 (DqCnResult (TDqOutput $10 '"0") '())) (return (KqpPhysicalTx '($8 $10) '($11) '() '('('"type" '"data")))) ) 2025-06-25T14:41:59.617597Z node 41 :KQP_YQL DEBUG: log.cpp:67: TraceId: 01jykrkpzp39jsksqh46x0hhex, SessionId: CompileActor 2025-06-25 14:41:59.617 DEBUG ydb-core-sys_view-ut(pid=221105, tid=0x00007FE73293D640) [perf] type_ann_expr.cpp:48: Execution of [TypeAnnotationTransformer::DoTransform] took 952us 2025-06-25T14:41:59.617856Z node 41 :KQP_YQL DEBUG: log.cpp:67: TraceId: 01jykrkpzp39jsksqh46x0hhex, SessionId: CompileActor 2025-06-25 14:41:59.617 DEBUG ydb-core-sys_view-ut(pid=221105, tid=0x00007FE73293D640) [perf] yql_expr_constraint.cpp:3279: Execution of [ConstraintTransformer::DoTransform] took 196us 2025-06-25T14:41:59.618837Z node 41 :KQP_YQL TRACE: log.cpp:67: TraceId: 01jykrkpzp39jsksqh46x0hhex, SessionId: CompileActor 2025-06-25 14:41:59.617 TRACE ydb-core-sys_view-ut(pid=221105, tid=0x00007FE73293D640) [KQP] kqp_transform.cpp:33: TxsPeephole: ( (let $1 (KqpTable '"//Root/.metadata/initialization/migrations" '"72057594046644480:7" '"" '1)) (let $2 '('"componentId" '"instant" '"modificationId")) (let $3 (Uint64 '"1001")) (let $4 (KqpRowsSourceSettings $1 $2 '('('"ItemsLimit" $3) '('"Sequential" '1)) (Void) '())) (let $5 (OptionalType (DataType 'Utf8))) (let $6 (StructType '('"componentId" $5) '('"instant" (OptionalType (DataType 'Uint32))) '('"modificationId" $5))) (let $7 '('('"_logical_id" '338) '('"_id" '"ff542fcf-6b62e9df-97a5b6f8-8e1b948e") '('"_wide_channels" $6))) (let $8 (DqPhyStage '((DqSource (DataSource '"KqpReadRangesSource") $4)) (lambda '($12) (block '( (let $13 (lambda '($14) (Member $14 '"componentId") (Member $14 '"instant") (Member $14 '"modificationId"))) (return (FromFlow (ExpandMap (Take (ToFlow $12) $3) $13))) ))) $7)) (let $9 (DqCnUnionAll (TDqOutput $8 '"0"))) (let $10 (DqPhyStage '($9) (lambda '($15) (FromFlow (NarrowMap (Take (ToFlow $15) $3) (lambda '($16 $17 $18) (AsStruct '('"componentId" $16) '('"instant" $17) '('"modificationId" $18)))))) '('('"_logical_id" '351) '('"_id" '"e896922c-1e206cc5-ec2f42df-9fe77404")))) (let $11 (DqCnResult (TDqOutput $10 '"0") '())) (return (KqpPhysicalTx '($8 $10) '($11) '() '('('"type" '"data")))) ) 2025-06-25T14:41:59.618906Z node 41 :KQP_YQL DEBUG: log.cpp:67: TraceId: 01jykrkpzp39jsksqh46x0hhex, SessionId: CompileActor 2025-06-25 14:41:59.618 DEBUG ydb-core-sys_view-ut(pid=221105, tid=0x00007FE73293D640) [perf] type_ann_expr.cpp:48: Execution of [TypeAnnotationTransformer::DoTransform] took 12us 2025-06-25T14:41:59.619227Z node 41 :KQP_YQL DEBUG: log.cpp:67: TraceId: 01jykrkpzp39jsksqh46x0hhex, SessionId: CompileActor 2025-06-25 14:41:59.619 DEBUG ydb-core-sys_view-ut(pid=221105, tid=0x00007FE73293D640) [perf] yql_expr_constraint.cpp:3279: Execution of [ConstraintTransformer::DoTransform] took 271us 2025-06-25T14:41:59.620156Z node 41 :KQP_YQL TRACE: log.cpp:67: TraceId: 01jykrkpzp39jsksqh46x0hhex, SessionId: CompileActor 2025-06-25 14:41:59.619 TRACE ydb-core-sys_view-ut(pid=221105, tid=0x00007FE73293D640) [KQP] kqp_transform.cpp:33: TxsPeephole: ( (let $1 (KqpTable '"//Root/.metadata/initialization/migrations" '"72057594046644480:7" '"" '1)) (let $2 '('"componentId" '"instant" '"modificationId")) (let $3 (Uint64 '"1001")) (let $4 (KqpRowsSourceSettings $1 $2 '('('"ItemsLimit" $3) '('"Sequential" '1)) (Void) '())) (let $5 (OptionalType (DataType 'Utf8))) (let $6 (StructType '('"componentId" $5) '('"instant" (OptionalType (DataType 'Uint32))) '('"modificationId" $5))) (let $7 '('('"_logical_id" '338) '('"_id" '"ff542fcf-6b62e9df-97a5b6f8-8e1b948e") '('"_wide_channels" $6))) (let $8 (DqPhyStage '((DqSource (DataSource '"KqpReadRangesSource") $4)) (lambda '($12) (block '( (let $13 (lambda '($14) (Member $14 '"componentId") (Member $14 '"instant") (Member $14 '"modificationId"))) (return (FromFlow (ExpandMap (Take (ToFlow $12) $3) $13))) ))) $7)) (let $9 (DqCnUnionAll (TDqOutput $8 '"0"))) (let $10 (DqPhyStage '($9) (lambda '($15) (FromFlow (NarrowMap (Take (ToFlow $15) $3) (lambda '($16 $17 $18) (AsStruct '('"componentId" $16) '('"instant" $17) '('"modificationId" $18)))))) '('('"_logical_id" '351) '('"_id" '"e896922c-1e206cc5-ec2f42df-9fe77404")))) (let $11 (DqCnResult (TDqOutput $10 '"0") '())) (return (KqpPhysicalTx '($8 $10) '($11) '() '('('"type" '"data")))) ) 2025-06-25T14:41:59.620229Z node 41 :KQP_YQL DEBUG: log.cpp:67: TraceId: 01jykrkpzp39jsksqh46x0hhex, SessionId: CompileActor 2025-06-25 14:41:59.620 DEBUG ydb-core-sys_view-ut(pid=221105, tid=0x00007FE73293D640) [perf] type_ann_expr.cpp:48: Execution of [TypeAnnotationTransformer::DoTransform] took 9us 2025-06-25T14:41:59.620469Z node 41 :KQP_YQL DEBUG: log.cpp:67: TraceId: 01jykrkpzp39jsksqh46x0hhex, SessionId: CompileActor 2025-06-25 14:41:59.620 DEBUG ydb-core-sys_view-ut(pid=221105, tid=0x00007FE73293D640) [perf] yql_expr_constraint.cpp:3279: Execution of [ConstraintTransformer::DoTransform] took 198us 2025-06-25T14:41:59.620814Z node 41 :KQP_YQL DEBUG: log.cpp:67: TraceId: 01jykrkpzp39jsksqh46x0hhex, SessionId: CompileActor 2025-06-25 14:41:59.620 DEBUG ydb-core-sys_view-ut(pid=221105, tid=0x00007FE73293D640) [perf] yql_expr_csee.cpp:620: Execution of [UpdateCompletness] took 277us 2025-06-25T14:41:59.621677Z node 41 :KQP_YQL DEBUG: log.cpp:67: TraceId: 01jykrkpzp39jsksqh46x0hhex, SessionId: CompileActor 2025-06-25 14:41:59.621 DEBUG ydb-core-sys_view-ut(pid=221105, tid=0x00007FE73293D640) [perf] yql_expr_csee.cpp:633: Execution of [EliminateCommonSubExpressionsForSubGraph] took 809us 2025-06-25T14:41:59.622752Z node 41 :KQP_YQL DEBUG: log.cpp:67: TraceId: 01jykrkpzp39jsksqh46x0hhex, SessionId: CompileActor 2025-06-25 14:41:59.621 DEBUG ydb-core-sys_view-ut(pid=221105, tid=0x00007FE73293D640) [KQP] kqp_opt_peephole.cpp:489: >>> TKqpTxPeepholeTransformer[skip]: ( (let $1 (KqpTable '"//Root/.metadata/initialization/migrations" '"72057594046644480:7" '"" '1)) (let $2 '('"componentId" '"instant" '"modificationId")) (let $3 (Uint64 '"1001")) (let $4 (KqpRowsSourceSettings $1 $2 '('('"ItemsLimit" $3) '('"Sequential" '1)) (Void) '())) (let $5 (OptionalType (DataType 'Utf8))) (let $6 (StructType '('"componentId" $5) '('"instant" (OptionalType (DataType 'Uint32))) '('"modificationId" $5))) (let $7 '('('"_logical_id" '338) '('"_id" '"ff542fcf-6b62e9df-97a5b6f8-8e1b948e") '('"_wide_channels" $6))) (let $8 (DqPhyStage '((DqSource (DataSource '"KqpReadRangesSource") $4)) (lambda '($12) (block '( (let $13 (lambda '($14) (Member $14 '"componentId") (Member $14 '"instant") (Member $14 '"modificationId"))) (return (FromFlow (ExpandMap (Take (ToFlow $12) $3) $13))) ))) $7)) (let $9 (DqCnUnionAll (TDqOutput $8 '"0"))) (let $10 (DqPhyStage '($9) (lambda '($15) (FromFlow (NarrowMap (Take (ToFlow $15) $3) (lambda '($16 $17 $18) (AsStruct '('"componentId" $16) '('"instant" $17) '('"modificationId" $18)))))) '('('"_logical_id" '351) '('"_id" '"e896922c-1e206cc5-ec2f42df-9fe77404")))) (let $11 (DqCnResult (TDqOutput $10 '"0") '())) (return (KqpPhysicalTx '($8 $10) '($11) '() '('('"type" '"data")))) ) 2025-06-25T14:41:59.624015Z node 41 :KQP_YQL TRACE: log.cpp:67: TraceId: 01jykrkpzp39jsksqh46x0hhex, SessionId: CompileActor 2025-06-25 14:41:59.622 TRACE ydb-core-sys_view-ut(pid=221105, tid=0x00007FE73293D640) [KQP] kqp_transform.cpp:33: PhysicalPeepholeTransformer: ( (let $1 (KqpTable '"//Root/.metadata/initialization/migrations" '"72057594046644480:7" '"" '1)) (let $2 '('"componentId" '"instant" '"modificationId")) (let $3 (Uint64 '"1001")) (let $4 (KqpRowsSourceSettings $1 $2 '('('"ItemsLimit" $3) '('"Sequential" '1)) (Void) '())) (let $5 (OptionalType (DataType 'Utf8))) (let $6 (StructType '('"componentId" $5) '('"instant" (OptionalType (DataType 'Uint32))) '('"modificationId" $5))) (let $7 '('('"_logical_id" '338) '('"_id" '"ff542fcf-6b62e9df-97a5b6f8-8e1b948e") '('"_wide_channels" $6))) (let $8 (DqPhyStage '((DqSource (DataSource '"KqpReadRangesSource") $4)) (lambda '($12) (block '( (let $13 (lambda '($14) (Member $14 '"componentId") (Member $14 '"instant") (Member $14 '"modificationId"))) (return (FromFlow (ExpandMap (Take (ToFlow $12) $3) $13))) ))) $7)) (let $9 (DqCnUnionAll (TDqOutput $8 '"0"))) (let $10 (DqPhyStage '($9) (lambda '($15) (FromFlow (NarrowMap (Take (ToFlow $15) $3) (lambda '($16 $17 $18) (AsStruct '('"componentId" $16) '('"instant" $17) '('"modificationId" $18)))))) '('('"_logical_id" '351) '('"_id" '"e896922c-1e206cc5-ec2f42df-9fe77404")))) (let $11 (DqCnResult (TDqOutput $10 '"0") '())) (return (KqpPhysicalQuery '((KqpPhysicalTx '($8 $10) '($11) '() '('('"type" '"data")))) '((KqpTxResultBinding (ListType $6) '"0" '"0")) '('('"type" '"data_query")))) ) 2025-06-25T14:41:59.631830Z node 41 :KQP_COMPILE_SERVICE DEBUG: kqp_compile_service.cpp:1186: Try to find query by queryId, queryId: {Cluster: db, Database: /Root, DatabaseId: /Root, UserSid: metadata@system, Text: SELECT * FROM `//Root/.metadata/secrets/values`;\n, Settings: {DocumentApiRestricted: 1, IsInternalCall: 0, QueryType: QUERY_TYPE_SQL_DML}, QueryParameterTypes: , GUCSettings: { "guc_settings": { "session_settings": { "ydb_user":"", "ydb_database":"Root" }, "settings": { "ydb_user":"", "ydb_database":"Root" }, "rollback_settings": { } } }} 2025-06-25T14:41:59.631954Z node 41 :KQP_COMPILE_SERVICE DEBUG: kqp_compile_service.cpp:413: Perform request, TraceId.SpanIdPtr: 0x000050F001B5F528 2025-06-25T14:41:59.632006Z node 41 :KQP_COMPILE_SERVICE DEBUG: kqp_compile_service.cpp:423: Received compile request, sender: [41:7519897261041406257:3371], queryUid: , queryText: "SELECT * FROM `//Root/.metadata/secrets/values`;\n", keepInCache: 0, split: 0{ TraceId: 01jykrkqaf67s1ch5rk5kqxa64, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=41&id=NWI3ODJmY2QtYWM3MTE0My1jNWM5MmE3YS02ZThkNTY4OA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default} 2025-06-25T14:41:59.632223Z node 41 :KQP_COMPILE_SERVICE DEBUG: kqp_compile_service.cpp:1186: Try to find query by queryId, queryId: {Cluster: db, Database: /Root, DatabaseId: /Root, UserSid: metadata@system, Text: SELECT * FROM `//Root/.metadata/secrets/values`;\n, Settings: {DocumentApiRestricted: 1, IsInternalCall: 0, QueryType: QUERY_TYPE_SQL_DML}, QueryParameterTypes: , GUCSettings: { "guc_settings": { "session_settings": { "ydb_user":"", "ydb_database":"Root" }, "settings": { "ydb_user":"", "ydb_database":"Root" }, "rollback_settings": { } } }} >> KqpQueryPerf::Replace+QueryService-UseSink >> SystemView::AuthGroupMembers_ResultOrder [GOOD] >> SystemView::AuthGroupMembers_TableRange >> TxUsage::WriteToTopic_Demo_11_Table [GOOD] >> TxUsage::WriteToTopic_Demo_34_Query [GOOD] >> TxUsage::WriteToTopic_Demo_11_Query >> TxUsage::WriteToTopic_Demo_35_Table >> TxUsage::WriteToTopic_Demo_18_RestartBeforeCommit_Table [GOOD] >> TxUsage::WriteToTopic_Demo_18_RestartBeforeCommit_Query >> KqpQueryPerf::Replace+QueryService-UseSink [GOOD] >> Describe::DescribePartitionPermissions [GOOD] >> DirectReadWithServer::KillPQTablet ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/perf/unittest >> KqpQueryPerf::Replace+QueryService-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 12889, MsgBus: 18989 2025-06-25T14:42:28.839790Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897387700300071:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:28.839844Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000a7c/r3tmp/tmp29Mike/pdisk_1.dat 2025-06-25T14:42:29.067024Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:42:29.067241Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519897387700300051:2080] 1750862548839158 != 1750862548839161 TServer::EnableGrpc on GrpcPort 12889, node 1 2025-06-25T14:42:29.100674Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:42:29.100696Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:42:29.100705Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:42:29.100826Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:42:29.168780Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:42:29.168877Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:42:29.170747Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:18989 TClient is connected to server localhost:18989 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:42:29.433773Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:42:29.446494Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:29.535584Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:29.657524Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:29.706757Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:29.856202Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:42:30.687762Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897396290236272:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:30.687869Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:30.863376Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:30.883686Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:30.905336Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:30.925692Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:30.946347Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:30.991128Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:31.014527Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:31.087142Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897400585204226:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:31.087203Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:31.087215Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897400585204231:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:31.089837Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:42:31.097082Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519897400585204233:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:42:31.172223Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519897400585204284:3420] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } >> TxUsage::Sinks_Oltp_WriteToTopic_4_Query [GOOD] >> SystemView::AuthPermissions_Selects [GOOD] >> SystemView::AuthPermissions_Access [GOOD] >> TxUsage::Sinks_Oltp_WriteToTopic_5_Table ------- [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/ut/unittest >> SystemView::AuthPermissions_Selects [GOOD] Test command err: 2025-06-25T14:40:46.296387Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519896949148833331:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:40:46.296437Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001297/r3tmp/tmpHcuYly/pdisk_1.dat 2025-06-25T14:40:46.752791Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:40:46.764472Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519896949148833312:2080] 1750862446294015 != 1750862446294018 TServer::EnableGrpc on GrpcPort 14861, node 1 2025-06-25T14:40:46.793191Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:40:46.793483Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:40:46.801098Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:40:46.915298Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:40:46.915321Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:40:46.915328Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:40:46.915462Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:61401 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:40:47.249796Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-25T14:40:47.280338Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:40:47.326049Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:40:49.284719Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896962033735925:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:40:49.285100Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896962033735914:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:40:49.285189Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:40:49.289552Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:40:49.300921Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519896962033735928:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-25T14:40:49.354019Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519896962033735981:2387] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:40:49.729495Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jykrhjm225xeepyyzhmj6nnb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OWVkMTg1YTYtZWU2MjIxZmMtMTA1MDM4YmQtNTRhOTRmYWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:40:49.985218Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jykrhk9t5bx2bdy293ym1v61, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTIxYzYxYjItZmQ5ZDJlODItYmI3YzRiOTEtMWIyZTUzYzQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:40:50.265620Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715664. Ctx: { TraceId: 01jykrhkas1q57efhdv95cwwka, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MzhkZmUyNWQtMjBlY2UxMTItNjgwOWViZi1lZGM1ZTVmMQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:40:50.279019Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:47: Scan started, actor: [1:7519896966328703382:2326], owner: [1:7519896966328703378:2324], scan id: 0, sys view info: Type: ETopQueriesByReadBytesOneMinute SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-25T14:40:50.285854Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:323: Scan prepared, actor: [1:7519896966328703382:2326], schemeshard id: 72057594046644480, hive id: 72057594037968897, database: /Root, database owner: root@builtin, domain key: [OwnerId: 72057594046644480, LocalPathId: 1], database node count: 1 2025-06-25T14:40:50.287678Z node 1 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [1:7519896966328703382:2326], row count: 2, finished: 1 2025-06-25T14:40:50.287799Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:122: Scan finished, actor: [1:7519896966328703382:2326], owner: [1:7519896966328703378:2324], scan id: 0, sys view info: Type: ETopQueriesByReadBytesOneMinute SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-25T14:40:50.291264Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750862450264, txId: 281474976715663] shutting down 2025-06-25T14:40:51.023705Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519896968108300422:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:40:51.023749Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001297/r3tmp/tmpInxAXO/pdisk_1.dat 2025-06-25T14:40:51.243752Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:40:51.243844Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:40:51.247171Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:40:51.263967Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 16571, node 2 2025-06-25T14:40:51.363507Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:40:51.363529Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:40:51.363537Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:40:51.363670Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27513 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:40:51.618455Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:40:51.628798Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:40:51.632815Z node 2 :FLAT_TX_SCHEM ... [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:42:33.503364Z node 33 :SYSTEM_VIEWS TRACE: auth_scan_base.h:171: Got navigate: { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Dir1 TableId: [72057594046644480:9:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } Children [SubDir1,SubDir2] }] } 2025-06-25T14:42:33.503389Z node 33 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [33:7519897405310354058:2378], row count: 0, finished: 0 2025-06-25T14:42:33.503444Z node 33 :SYSTEM_VIEWS TRACE: auth_scan_base.h:210: Navigate { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Dir1/SubDir1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:42:33.503594Z node 33 :SYSTEM_VIEWS TRACE: auth_scan_base.h:171: Got navigate: { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Dir1/SubDir1 TableId: [72057594046644480:10:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } Children [] }] } 2025-06-25T14:42:33.503653Z node 33 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [33:7519897405310354058:2378], row count: 2, finished: 0 2025-06-25T14:42:33.503776Z node 33 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:122: Scan finished, actor: [33:7519897405310354058:2378], owner: [33:7519897405310354055:2376], scan id: 0, sys view info: Type: EAuthPermissions SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-25T14:42:33.505036Z node 33 :SYSTEM_VIEWS TRACE: sysview_service.cpp:900: Collect query stats: service id# [33:7519897383835515563:2144], database# , query hash# 3187945588805523718, cpu time# 129921 2025-06-25T14:42:33.505677Z node 33 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750862553498, txId: 281474976710687] shutting down 2025-06-25T14:42:33.674984Z node 37 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[37:7519897386041603404:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:33.675062Z node 37 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/Tenant1/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:42:33.676783Z node 33 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710690. Ctx: { TraceId: 01jykrmrda53wsr4n0h60ftqfg, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=33&id=MTliMzg1NGYtZWE4ZGU0ZDEtMzk1ZTEwMi04Nzk5Yjc1NA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:42:33.679576Z node 36 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[36:7519897387045176057:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:33.679634Z node 36 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/Tenant1/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:42:33.679980Z node 33 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:47: Scan started, actor: [33:7519897405310354095:2387], owner: [33:7519897405310354090:2385], scan id: 0, sys view info: Type: EAuthPermissions SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-25T14:42:33.680531Z node 33 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:323: Scan prepared, actor: [33:7519897405310354095:2387], schemeshard id: 72057594046644480, hive id: 72057594037968897, database: /Root, database owner: root@builtin, domain key: [OwnerId: 72057594046644480, LocalPathId: 1], database node count: 1 2025-06-25T14:42:33.680565Z node 33 :SYSTEM_VIEWS DEBUG: auth_scan_base.h:100: ProceedToScan, tenant name: /Root tenant owner: root@builtin subject sid: empty require admin access: 0 is admin: 1 2025-06-25T14:42:33.680640Z node 33 :SYSTEM_VIEWS TRACE: auth_scan_base.h:210: Navigate { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:42:33.680871Z node 33 :SYSTEM_VIEWS TRACE: auth_scan_base.h:171: Got navigate: { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root TableId: [72057594046644480:1:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [{ Sid: user2 },{ Sid: user1 }] Groups: [] } Children [.metadata,Dir1,Table0,Tenant1,Tenant2] }] } 2025-06-25T14:42:33.680922Z node 33 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [33:7519897405310354095:2387], row count: 0, finished: 0 2025-06-25T14:42:33.681022Z node 33 :SYSTEM_VIEWS TRACE: auth_scan_base.h:210: Navigate { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Dir1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:42:33.681234Z node 33 :SYSTEM_VIEWS TRACE: auth_scan_base.h:171: Got navigate: { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Dir1 TableId: [72057594046644480:9:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } Children [SubDir1,SubDir2] }] } 2025-06-25T14:42:33.681274Z node 33 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [33:7519897405310354095:2387], row count: 0, finished: 0 2025-06-25T14:42:33.681359Z node 33 :SYSTEM_VIEWS TRACE: auth_scan_base.h:210: Navigate { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Dir1/SubDir1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:42:33.681533Z node 33 :SYSTEM_VIEWS TRACE: auth_scan_base.h:171: Got navigate: { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Dir1/SubDir1 TableId: [72057594046644480:10:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } Children [] }] } 2025-06-25T14:42:33.681591Z node 33 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [33:7519897405310354095:2387], row count: 1, finished: 0 2025-06-25T14:42:33.681739Z node 33 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:122: Scan finished, actor: [33:7519897405310354095:2387], owner: [33:7519897405310354090:2385], scan id: 0, sys view info: Type: EAuthPermissions SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-25T14:42:33.683513Z node 33 :SYSTEM_VIEWS TRACE: sysview_service.cpp:900: Collect query stats: service id# [33:7519897383835515563:2144], database# , query hash# 15123460272068726277, cpu time# 156398 2025-06-25T14:42:33.684119Z node 33 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750862553675, txId: 281474976710689] shutting down 2025-06-25T14:42:33.689362Z node 33 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 37 2025-06-25T14:42:33.689626Z node 36 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:42:33.689649Z node 34 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:42:33.689975Z node 33 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(37, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-25T14:42:33.690244Z node 33 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 35 2025-06-25T14:42:33.690801Z node 33 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(35, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-25T14:42:33.690952Z node 33 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 36 2025-06-25T14:42:33.691277Z node 33 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(36, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-25T14:42:33.691435Z node 33 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 34 2025-06-25T14:42:33.691902Z node 33 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(34, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-25T14:42:33.835242Z node 35 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[35:7519897383648049384:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:33.835367Z node 35 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/Tenant2/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:42:33.839375Z node 34 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[34:7519897386613041400:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:33.839459Z node 34 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/Tenant2/.metadata/initialization/migrations;error=timeout; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/ut/unittest >> SystemView::AuthPermissions_Access [GOOD] Test command err: 2025-06-25T14:40:38.922393Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519896913126086870:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:40:38.922462Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0012c2/r3tmp/tmpPt5b2X/pdisk_1.dat 2025-06-25T14:40:39.145639Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:40:39.180930Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 25558, node 1 2025-06-25T14:40:39.209700Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:40:39.209721Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:40:39.209729Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:40:39.209867Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:40:39.245883Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:40:39.245989Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:40:39.248896Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:16151 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:40:39.445805Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:40:39.618441Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/Root" OperationType: ESchemeOpCreateExtSubDomain SubDomain { Name: "Tenant1" } } TxId: 281474976710658 TabletId: 72057594046644480 Owner: "root@builtin" UserToken: "***" PeerName: "" , at schemeshard: 72057594046644480 2025-06-25T14:40:39.618652Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_extsubdomain.cpp:58: TCreateExtSubDomain Propose, path/Root/Tenant1, opId: 281474976710658:0, at schemeshard: 72057594046644480 2025-06-25T14:40:39.618705Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:347: AttachChild: child attached as only one child to the parent, parent id: [OwnerId: 72057594046644480, LocalPathId: 1], parent name: Root, child name: Tenant1, child id: [OwnerId: 72057594046644480, LocalPathId: 2], at schemeshard: 72057594046644480 2025-06-25T14:40:39.618806Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 0 2025-06-25T14:40:39.618822Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 281474976710658:0 type: TxCreateExtSubDomain target path: [OwnerId: 72057594046644480, LocalPathId: 2] source path: 2025-06-25T14:40:39.618907Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 1 2025-06-25T14:40:39.618968Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 281474976710658:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-25T14:40:39.618985Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-25T14:40:39.619025Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 1 2025-06-25T14:40:39.619042Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 2 2025-06-25T14:40:39.620673Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 281474976710658, response: Status: StatusAccepted TxId: 281474976710658 SchemeshardId: 72057594046644480 PathId: 2, at schemeshard: 72057594046644480 2025-06-25T14:40:39.620842Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976710658, database: /Root, subject: root@builtin, status: StatusAccepted, operation: CREATE DATABASE, path: /Root/Tenant1 2025-06-25T14:40:39.621020Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046644480 2025-06-25T14:40:39.621041Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046644480, txId: 281474976710658, path id: [OwnerId: 72057594046644480, LocalPathId: 1] 2025-06-25T14:40:39.621151Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046644480, txId: 281474976710658, path id: [OwnerId: 72057594046644480, LocalPathId: 2] 2025-06-25T14:40:39.621238Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046644480 2025-06-25T14:40:39.621256Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:7519896917421054706:2376], at schemeshard: 72057594046644480, txId: 281474976710658, path id: 1 waiting... 2025-06-25T14:40:39.621270Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:7519896917421054706:2376], at schemeshard: 72057594046644480, txId: 281474976710658, path id: 2 2025-06-25T14:40:39.621328Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 281474976710658:0, at schemeshard: 72057594046644480 2025-06-25T14:40:39.621347Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 281474976710658:0, at schemeshard: 72057594046644480 2025-06-25T14:40:39.621370Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 281474976710658:0, at tablet# 72057594046644480 2025-06-25T14:40:39.621393Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 281474976710658 ready parts: 1/1 2025-06-25T14:40:39.624292Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046644480 Flags: 2 } ExecLevel: 0 TxId: 281474976710658 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:40:39.625564Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 1 Version: 4 PathOwnerId: 72057594046644480, cookie: 281474976710658 2025-06-25T14:40:39.625626Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 1 Version: 4 PathOwnerId: 72057594046644480, cookie: 281474976710658 2025-06-25T14:40:39.625635Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046644480, txId: 281474976710658 2025-06-25T14:40:39.625664Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046644480, txId: 281474976710658, pathId: [OwnerId: 72057594046644480, LocalPathId: 1], version: 4 2025-06-25T14:40:39.625677Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 2 2025-06-25T14:40:39.625846Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 2 Version: 2 PathOwnerId: 72057594046644480, cookie: 281474976710658 2025-06-25T14:40:39.625900Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 2 Version: 2 PathOwnerId: 72057594046644480, cookie: 281474976710658 2025-06-25T14:40:39.625906Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046644480, txId: 281474976710658 2025-06-25T14:40:39.625912Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046644480, txId: 281474976710658, pathId: [OwnerId: 72057594046644480, LocalPathId: 2], version: 2 2025-06-25T14:40:39.625918Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 3 2025-06-25T14:40:39.625944Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 281474976710658, ready parts: 0/1, is published: true 2025-06-25T14:40:39.626073Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-25T14:40:39.626102Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 2814 ... ull> }] } 2025-06-25T14:42:33.625350Z node 41 :SYSTEM_VIEWS TRACE: auth_scan_base.h:171: Got navigate: { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/.metadata TableId: [72057594046644480:5:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } Children [workload_manager] }] } 2025-06-25T14:42:33.625389Z node 41 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [41:7519897407381754594:2385], row count: 0, finished: 0 2025-06-25T14:42:33.625773Z node 41 :SYSTEM_VIEWS TRACE: auth_scan_base.h:210: Navigate { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/.metadata/workload_manager TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:42:33.626097Z node 41 :SYSTEM_VIEWS TRACE: auth_scan_base.h:171: Got navigate: { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/.metadata/workload_manager TableId: [72057594046644480:6:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } Children [pools] }] } 2025-06-25T14:42:33.626138Z node 41 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [41:7519897407381754594:2385], row count: 0, finished: 0 2025-06-25T14:42:33.626741Z node 41 :SYSTEM_VIEWS TRACE: auth_scan_base.h:210: Navigate { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/.metadata/workload_manager/pools TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:42:33.626928Z node 41 :SYSTEM_VIEWS TRACE: auth_scan_base.h:171: Got navigate: { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/.metadata/workload_manager/pools TableId: [72057594046644480:7:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } Children [default] }] } 2025-06-25T14:42:33.626966Z node 41 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [41:7519897407381754594:2385], row count: 0, finished: 0 2025-06-25T14:42:33.627035Z node 41 :SYSTEM_VIEWS TRACE: auth_scan_base.h:210: Navigate { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/.metadata/workload_manager/pools/default TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:42:33.627250Z node 41 :SYSTEM_VIEWS TRACE: auth_scan_base.h:171: Got navigate: { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/.metadata/workload_manager/pools/default TableId: [72057594046644480:8:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindResourcePool DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-25T14:42:33.627342Z node 41 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [41:7519897407381754594:2385], row count: 6, finished: 0 2025-06-25T14:42:33.627440Z node 41 :SYSTEM_VIEWS TRACE: auth_scan_base.h:210: Navigate { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Dir1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:42:33.627635Z node 41 :SYSTEM_VIEWS TRACE: auth_scan_base.h:171: Got navigate: { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Dir1 TableId: [72057594046644480:9:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } Children [] }] } 2025-06-25T14:42:33.627681Z node 41 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [41:7519897407381754594:2385], row count: 1, finished: 0 2025-06-25T14:42:33.628197Z node 41 :SYSTEM_VIEWS TRACE: auth_scan_base.h:210: Navigate { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Dir2 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:42:33.628675Z node 41 :SYSTEM_VIEWS TRACE: auth_scan_base.h:171: Got navigate: { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Dir2 TableId: [72057594046644480:10:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } Children [] }] } 2025-06-25T14:42:33.628734Z node 41 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [41:7519897407381754594:2385], row count: 0, finished: 0 2025-06-25T14:42:33.628991Z node 41 :SYSTEM_VIEWS TRACE: auth_scan_base.h:210: Navigate { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Table0 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:42:33.629423Z node 41 :SYSTEM_VIEWS TRACE: auth_scan_base.h:171: Got navigate: { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Table0 TableId: [72057594046644480:4:1] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindTable DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-25T14:42:33.629459Z node 41 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [41:7519897407381754594:2385], row count: 0, finished: 0 2025-06-25T14:42:33.629603Z node 41 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:122: Scan finished, actor: [41:7519897407381754594:2385], owner: [41:7519897407381754591:2383], scan id: 0, sys view info: Type: EAuthPermissions SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-25T14:42:33.630557Z node 41 :SYSTEM_VIEWS TRACE: sysview_service.cpp:900: Collect query stats: service id# [41:7519897381611948732:2144], database# , query hash# 12107705915200741666, cpu time# 120216 2025-06-25T14:42:33.631271Z node 41 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750862553619, txId: 281474976715692] shutting down 2025-06-25T14:42:33.637790Z node 41 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 43 2025-06-25T14:42:33.638030Z node 42 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:42:33.638612Z node 41 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(43, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-25T14:42:33.638848Z node 41 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 44 2025-06-25T14:42:33.639329Z node 44 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:42:33.639408Z node 41 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(44, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-25T14:42:33.641003Z node 41 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 42 2025-06-25T14:42:33.641763Z node 41 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(42, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-25T14:42:33.642037Z node 41 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 45 2025-06-25T14:42:33.642705Z node 41 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(45, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-25T14:42:33.643264Z node 41 :HIVE WARN: hive_impl.cpp:944: HIVE#72057594037968897 THive::Handle::TEvUndelivered Sender=[45:7519897383533305167:2101], Type=268959746 2025-06-25T14:42:33.643330Z node 41 :HIVE WARN: hive_impl.cpp:944: HIVE#72057594037968897 THive::Handle::TEvUndelivered Sender=[45:7519897383533305167:2101], Type=268959746 2025-06-25T14:42:33.643358Z node 41 :HIVE WARN: hive_impl.cpp:944: HIVE#72057594037968897 THive::Handle::TEvUndelivered Sender=[45:7519897383533305167:2101], Type=268959746 2025-06-25T14:42:33.643382Z node 41 :HIVE WARN: hive_impl.cpp:944: HIVE#72057594037968897 THive::Handle::TEvUndelivered Sender=[42:7519897386650937434:2105], Type=268959746 2025-06-25T14:42:33.643411Z node 41 :HIVE WARN: hive_impl.cpp:944: HIVE#72057594037968897 THive::Handle::TEvUndelivered Sender=[42:7519897386650937434:2105], Type=268959746 2025-06-25T14:42:33.643439Z node 41 :HIVE WARN: hive_impl.cpp:944: HIVE#72057594037968897 THive::Handle::TEvUndelivered Sender=[42:7519897386650937434:2105], Type=268959746 2025-06-25T14:42:33.643473Z node 41 :HIVE WARN: hive_impl.cpp:944: HIVE#72057594037968897 THive::Handle::TEvUndelivered Sender=[42:7519897386650937434:2105], Type=268959746 >> KqpWorkload::STOCK [GOOD] >> TxUsage::WriteToTopic_Demo_26_Table >> IncrementalBackup::BackupRestore >> IncrementalBackup::SimpleRestore >> TxUsage::WriteToTopic_Demo_6_Table [GOOD] >> TxUsage::WriteToTopic_Demo_42_Query [GOOD] >> TxUsage::WriteToTopic_Demo_21_RestartAfterCommit_Query [GOOD] |84.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/batch_operations/unittest |84.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/batch_operations/unittest |84.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/batch_operations/unittest |84.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/batch_operations/unittest |84.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/batch_operations/unittest >> SystemView::AuthGroupMembers_TableRange [GOOD] >> SystemView::AuthEffectivePermissions >> TxUsage::WriteToTopic_Demo_6_Query |84.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/batch_operations/unittest |84.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/batch_operations/unittest |84.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/batch_operations/unittest |84.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/batch_operations/unittest |84.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/batch_operations/unittest |84.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/batch_operations/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/perf/unittest >> KqpWorkload::STOCK [GOOD] Test command err: Trying to start YDB, gRPC: 8421, MsgBus: 22781 2025-06-25T14:41:36.578974Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897164113555736:2227];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:41:36.579362Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000b51/r3tmp/tmpJ9CsYH/pdisk_1.dat 2025-06-25T14:41:37.580494Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:41:37.879204Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:41:37.879311Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:41:37.906558Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:41:38.055690Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:41:38.082986Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:41:38.217263Z node 1 :BS_CONTROLLER ERROR: {BSC07@impl.h:2206} ProcessControllerEvent event processing took too much time Type# 268637706 Duration# 0.112796s 2025-06-25T14:41:38.217330Z node 1 :BS_CONTROLLER ERROR: {BSC00@bsc.cpp:705} StateWork event processing took too much time Type# 2146435078 Duration# 0.112891s TServer::EnableGrpc on GrpcPort 8421, node 1 2025-06-25T14:41:38.408917Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:41:38.408940Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:41:38.408948Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:41:38.409075Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:22781 TClient is connected to server localhost:22781 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:41:40.415847Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:41:40.492393Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:41:41.580569Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519897164113555736:2227];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:41:41.600922Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:41:42.243787Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897189883359970:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:42.243908Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:42.759956Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:42.875390Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:43.569643Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:44.227099Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897198473298473:2598], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:44.227165Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:44.227392Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897198473298478:2601], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:44.230461Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710661:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:41:44.242890Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519897198473298480:2602], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710661 completed, doublechecking } 2025-06-25T14:41:44.339207Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519897198473298535:4888] txid# 281474976710662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 10], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:41:52.846835Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7382: Cannot get console configs 2025-06-25T14:41:52.846866Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded took: 0.390012s took: 0.390907s took: 0.392360s took: 0.396192s took: 0.397685s took: 0.403106s took: 0.403682s took: 0.404007s took: 0.404379s took: 0.404735s took: 4.296048s took: 4.363855s 2025-06-25T14:42:32.185772Z node 1 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_LOCKS_BROKEN;details=Operation is aborting because locks are not valid;tx_id=281474976711076; 2025-06-25T14:42:32.196418Z node 1 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:802: SelfId: [1:7519897404631739306:4985], Table: `/Root/stock` ([72057594046644480:2:1]), SessionActorId: [1:7519897383156901280:4985]Got LOCKS BROKEN for table `/Root/stock`. ShardID=72075186224037888, Sink=[1:7519897404631739306:4985].{
: Error: Operation is aborting because locks are not valid, code: 2001 } 2025-06-25T14:42:32.197014Z node 1 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:3004: SelfId: [1:7519897400336771182:4985], SessionActorId: [1:7519897383156901280:4985], statusCode=ABORTED. Issue=
: Error: Transaction locks invalidated. Table: `/Root/stock`., code: 2001
: Error: Operation is aborting because locks are not valid, code: 2001 . sessionActorId=[1:7519897383156901280:4985]. isRollback=0 2025-06-25T14:42:32.197305Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:1895: SessionId: ydb://session/3?node_id=1&id=ZWVmYzk3NDMtOWQ0ZjhlNzItOTE1MTBiZGYtMWI4MDRmOGM=, ActorId: [1:7519897383156901280:4985], ActorState: ExecuteState, TraceId: 01jykrmk86a0s2efdfq1c2kxff, got TEvKqpBuffer::TEvError in ExecuteState, status: ABORTED send to: [1:7519897404631739139:4985] from: [1:7519897400336771182:4985] 2025-06-25T14:42:32.197388Z node 1 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [1:7519897404631739139:4985] TxId: 281474976711076. Ctx: { TraceId: 01jykrmk86a0s2efdfq1c2kxff, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZWVmYzk3NDMtOWQ0ZjhlNzItOTE1MTBiZGYtMWI4MDRmOGM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Transaction locks invalidated. Table: `/Root/stock`., code: 2001 subissue: {
: Error: Operation is aborting because locks are not valid, code: 2001 } } 2025-06-25T14:42:32.197613Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=1&id=ZWVmYzk3NDMtOWQ0ZjhlNzItOTE1MTBiZGYtMWI4MDRmOGM=, ActorId: [1:7519897383156901280:4985], ActorState: ExecuteState, TraceId: 01jykrmk86a0s2efdfq1c2kxff, Create QueryResponse for error on request, msg: 2025-06-25T14:42:32.198910Z node 1 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_ABORTED;details=Distributed transaction aborted due to commit failure;tx_id=281474976711076; 2025-06-25T14:42:32.199062Z node 1 :TX_DATASHARD ERROR: datashard.cpp:751: Complete volatile write [1750862552230 : 281474976711076] from 72075186224037899 at tablet 72075186224037899, error: Status: STATUS_ABORTED Issues: { message: "Distributed transaction aborted due to commit failure" issue_code: 2011 severity: 1 } 2025-06-25T14:42:32.204121Z node 1 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event= ... abletStatus from node 1, TabletId: 72075186224037904 not found 2025-06-25T14:42:37.160846Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037914 not found 2025-06-25T14:42:37.160864Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037909 not found 2025-06-25T14:42:37.160871Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037927 not found 2025-06-25T14:42:37.160878Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037917 not found 2025-06-25T14:42:37.165678Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037929 not found 2025-06-25T14:42:37.165703Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037926 not found 2025-06-25T14:42:37.165711Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037910 not found 2025-06-25T14:42:37.165737Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037903 not found 2025-06-25T14:42:37.165745Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037922 not found 2025-06-25T14:42:37.165957Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037891 not found 2025-06-25T14:42:37.166021Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037906 not found 2025-06-25T14:42:37.166044Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037900 not found 2025-06-25T14:42:37.166054Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037890 not found 2025-06-25T14:42:37.166064Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037921 not found 2025-06-25T14:42:37.166072Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037925 not found 2025-06-25T14:42:37.166080Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037928 not found 2025-06-25T14:42:37.166100Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037897 not found 2025-06-25T14:42:37.166114Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037899 not found 2025-06-25T14:42:37.166125Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037905 not found 2025-06-25T14:42:37.166134Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037896 not found 2025-06-25T14:42:37.166143Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037915 not found 2025-06-25T14:42:37.166155Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037901 not found 2025-06-25T14:42:37.166493Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037898 not found 2025-06-25T14:42:37.166512Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037927 not found 2025-06-25T14:42:37.166527Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037911 not found 2025-06-25T14:42:37.167071Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037893 not found 2025-06-25T14:42:37.167549Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037918 not found 2025-06-25T14:42:37.167856Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037894 not found 2025-06-25T14:42:37.170036Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037917 not found 2025-06-25T14:42:37.258489Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037942 not found 2025-06-25T14:42:37.263935Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037934 not found 2025-06-25T14:42:37.263965Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037959 not found 2025-06-25T14:42:37.263973Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037940 not found 2025-06-25T14:42:37.263984Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037933 not found 2025-06-25T14:42:37.263994Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037954 not found 2025-06-25T14:42:37.264022Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037953 not found 2025-06-25T14:42:37.264050Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037968 not found 2025-06-25T14:42:37.264064Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037944 not found 2025-06-25T14:42:37.264071Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037937 not found 2025-06-25T14:42:37.264077Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037957 not found 2025-06-25T14:42:37.264083Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037963 not found 2025-06-25T14:42:37.264089Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037948 not found 2025-06-25T14:42:37.264095Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037950 not found 2025-06-25T14:42:37.264102Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037946 not found 2025-06-25T14:42:37.264108Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037952 not found 2025-06-25T14:42:37.264117Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037949 not found 2025-06-25T14:42:37.264746Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037967 not found 2025-06-25T14:42:37.264765Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037945 not found 2025-06-25T14:42:37.282983Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037966 not found 2025-06-25T14:42:37.283009Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037931 not found 2025-06-25T14:42:37.283017Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037930 not found 2025-06-25T14:42:37.283026Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037962 not found 2025-06-25T14:42:37.283034Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037958 not found 2025-06-25T14:42:37.283042Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037951 not found 2025-06-25T14:42:37.283050Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037941 not found 2025-06-25T14:42:37.283057Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037955 not found 2025-06-25T14:42:37.283066Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037932 not found 2025-06-25T14:42:37.283112Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037936 not found 2025-06-25T14:42:37.283122Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037965 not found 2025-06-25T14:42:37.283130Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037969 not found 2025-06-25T14:42:37.283143Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037956 not found 2025-06-25T14:42:37.283154Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037938 not found 2025-06-25T14:42:37.283170Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037961 not found 2025-06-25T14:42:37.283179Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037935 not found 2025-06-25T14:42:37.283187Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037939 not found 2025-06-25T14:42:37.283195Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037947 not found 2025-06-25T14:42:37.283202Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037964 not found 2025-06-25T14:42:37.286437Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037943 not found 2025-06-25T14:42:37.286489Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037960 not found >> KqpBatchUpdate::Large_3 >> KqpBatchDelete::Large_1 >> KqpBatchUpdate::ManyPartitions_2 >> KqpBatchDelete::MultiStatement >> KqpBatchUpdate::ManyPartitions_1 >> KqpBatchDelete::ManyPartitions_3 >> KqpBatchUpdate::TableWithIndex >> KqpBatchUpdate::MultiStatement >> KqpBatchUpdate::SimpleOnePartition >> KqpBatchUpdate::ColumnTable >> KqpBatchUpdate::TableNotExists >> KqpBatchDelete::ColumnTable >> KqpBatchDelete::UnknownColumn >> KqpBatchUpdate::ManyPartitions_3 >> KqpBatchDelete::Large_3 >> TxUsage::WriteToTopic_Demo_22_RestartNo_Table >> TxUsage::WriteToTopic_Demo_43_Table |84.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/batch_operations/unittest >> KqpBatchUpdate::SimplePartitions >> KqpBatchUpdate::HasTxControl >> KqpBatchDelete::SimplePartitions >> KqpBatchUpdate::Returning |84.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/batch_operations/unittest >> KqpBatchDelete::ManyPartitions_1 >> KqpBatchDelete::Returning >> KqpBatchDelete::HasTxControl >> KqpBatchUpdate::UpdateOn >> KqpBatchUpdate::UnknownColumn >> KqpBatchDelete::DeleteOn >> TxUsage::WriteToTopic_Demo_35_Table [GOOD] >> TxUsage::WriteToTopic_Demo_35_Query |84.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/batch_operations/unittest >> KqpBatchDelete::TableNotExists >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_3_Table [GOOD] >> DirectReadWithServer::KillPQTablet [GOOD] >> DirectReadWithServer::KillPQRBTablet [GOOD] >> LocalPartition::Restarts >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_3_Query >> KqpBatchUpdate::NotIdempotent >> TxUsage::Sinks_Oltp_WriteToTopic_5_Table [GOOD] >> TxUsage::Sinks_Oltp_WriteToTopic_5_Query >> TxUsage::WriteToTopic_Demo_18_RestartBeforeCommit_Query [GOOD] >> TxUsage::WriteToTopic_Demo_18_RestartAfterCommit_Table >> SystemView::ShowCreateTableReadReplicas [GOOD] >> SystemView::ShowCreateTableTtlSettings >> SystemView::AuthEffectivePermissions [GOOD] >> KqpBatchUpdate::UnknownColumn [GOOD] >> KqpBatchDelete::Returning [GOOD] >> KqpBatchDelete::DeleteOn [GOOD] >> KqpBatchDelete::UnknownColumn [GOOD] >> KqpBatchUpdate::UpdateOn [GOOD] >> IncrementalBackup::SimpleRestore [GOOD] >> IncrementalBackup::SimpleBackupBackupCollection+WithIncremental >> KqpBatchDelete::TableNotExists [GOOD] >> KqpBatchUpdate::Returning [GOOD] >> KqpBatchUpdate::NotIdempotent [GOOD] >> KqpBatchUpdate::MultiStatement [GOOD] >> BasicStatistics::TwoServerlessTwoSharedDbs [GOOD] >> KqpBatchDelete::MultiStatement [GOOD] >> KqpBatchDelete::HasTxControl [GOOD] >> KqpBatchUpdate::TableNotExists [GOOD] >> KqpBatchUpdate::HasTxControl [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/batch_operations/unittest >> KqpBatchUpdate::UnknownColumn [GOOD] Test command err: Trying to start YDB, gRPC: 31607, MsgBus: 13935 2025-06-25T14:42:40.790167Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897436097131731:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:40.790228Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000bc8/r3tmp/tmp9qokU7/pdisk_1.dat 2025-06-25T14:42:41.544517Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519897436097131712:2080] 1750862560789407 != 1750862560789410 2025-06-25T14:42:41.569134Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:42:41.569240Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:42:41.572623Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:42:41.616901Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 31607, node 1 2025-06-25T14:42:41.801080Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:42:42.716402Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:42:42.716431Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:42:42.716438Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:42:42.718165Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13935 TClient is connected to server localhost:13935 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:42:44.993957Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:42:45.123386Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:45.398949Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:45.530524Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:45.614532Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:45.790427Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519897436097131731:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:45.790489Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:42:45.828370Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897457571969842:2371], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:45.828513Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:47.941212Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.007102Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.029370Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.053486Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.080059Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.105900Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.171604Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.244807Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897470456872411:2440], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:48.244835Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897470456872416:2443], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:48.244858Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:48.246994Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:42:48.253132Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519897470456872418:2444], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:42:48.314593Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519897470456872469:3441] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:42:49.140650Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519897474751840041:2489], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:3:34: Error: At lambda, At function: Coalesce, At function: And
:4:41: Error: At function: ==
:4:27: Error: At function: Member
:4:27: Error: Member not found: UnknownColumn 2025-06-25T14:42:49.142484Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=M2MxYzgzMDItNjdmMTllODgtNDE3ZDJmYTEtOTYzNzcxZGE=, ActorId: [1:7519897474751840032:2483], ActorState: ExecuteState, TraceId: 01jykrn7ma80ryqwkt43m5gjm5, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-06-25T14:42:49.179749Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519897474751840050:2493], status: BAD_REQUEST, issues:
: Error: Type annotation, code: 1030
:4:43: Error: At function: KiUpdateTable!
:4:43: Error: Column 'UnknownColumn' does not exist in table '/Root/Test'., code: 2017 2025-06-25T14:42:49.179981Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=M2MxYzgzMDItNjdmMTllODgtNDE3ZDJmYTEtOTYzNzcxZGE=, ActorId: [1:7519897474751840032:2483], ActorState: ExecuteState, TraceId: 01jykrn7nv88325pekabdbnhyk, ReplyQueryCompileError, status BAD_REQUEST remove tx with tx_id: ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/batch_operations/unittest >> KqpBatchDelete::Returning [GOOD] Test command err: Trying to start YDB, gRPC: 2496, MsgBus: 26096 2025-06-25T14:42:40.813849Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897436852154992:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:40.813973Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000bc2/r3tmp/tmpcVQtRM/pdisk_1.dat 2025-06-25T14:42:41.541531Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519897436852154971:2080] 1750862560813040 != 1750862560813043 2025-06-25T14:42:41.556774Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:42:41.556906Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:42:41.565392Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:42:41.576454Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 2496, node 1 2025-06-25T14:42:41.832104Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:42:42.725840Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:42:42.725899Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:42:42.725909Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:42:42.726090Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26096 TClient is connected to server localhost:26096 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:42:44.994072Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:42:45.124938Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:45.393905Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:45.503634Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:45.589732Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:45.814052Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519897436852154992:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:45.814132Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:42:45.837104Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897458326993094:2370], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:45.837476Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:47.940997Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:47.968047Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:47.992204Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.015663Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.038278Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.068399Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.095772Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.168446Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897471211895656:2440], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:48.168583Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:48.168825Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897471211895661:2443], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:48.174151Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:42:48.184626Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519897471211895663:2444], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:42:48.260384Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519897471211895714:3437] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:42:49.107807Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519897475506863290:2490], status: GENERIC_ERROR, issues:
:2:22: Error: BATCH DELETE is unsupported with RETURNING 2025-06-25T14:42:49.108490Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=OGVhOGVjMDAtZjBiNDFkNjgtOGQ2NzAzMTEtODIxMDc1ODU=, ActorId: [1:7519897475506863281:2484], ActorState: ExecuteState, TraceId: 01jykrn7kz2n554fw7wb0j1c30, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/batch_operations/unittest >> KqpBatchDelete::DeleteOn [GOOD] Test command err: Trying to start YDB, gRPC: 7330, MsgBus: 1575 2025-06-25T14:42:40.840708Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897437903815978:2114];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:40.849271Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000bc5/r3tmp/tmpz89Fcp/pdisk_1.dat 2025-06-25T14:42:41.529770Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519897437903815902:2080] 1750862560834683 != 1750862560834686 2025-06-25T14:42:41.543554Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:42:41.548155Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:42:41.548274Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:42:41.598619Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:42:41.719026Z node 1 :BS_CONTROLLER ERROR: {BSC07@impl.h:2206} ProcessControllerEvent event processing took too much time Type# 268637706 Duration# 0.101372s 2025-06-25T14:42:41.719099Z node 1 :BS_CONTROLLER ERROR: {BSC00@bsc.cpp:705} StateWork event processing took too much time Type# 2146435078 Duration# 0.101485s TServer::EnableGrpc on GrpcPort 7330, node 1 2025-06-25T14:42:41.857641Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:42:42.717068Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:42:42.717108Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:42:42.717115Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:42:42.717295Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1575 TClient is connected to server localhost:1575 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:42:44.993976Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:42:45.123497Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:45.419354Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:45.562434Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:45.627727Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:45.834012Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897459378654034:2370], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:45.834115Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:45.842785Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519897437903815978:2114];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:45.842852Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:42:47.941007Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:47.963557Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:47.984229Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.004982Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.028411Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.059259Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.129569Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.203717Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897472263556605:2440], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:48.203776Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:48.203903Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897472263556610:2443], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:48.206472Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:42:48.213345Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519897472263556612:2444], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:42:48.279853Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519897472263556663:3444] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:42:49.104291Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519897476558524237:2490], status: GENERIC_ERROR, issues:
:2:22: Error: BATCH DELETE is unsupported with ON 2025-06-25T14:42:49.104514Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=YmE0MjNiMTItNWU4MjU2YS02Mjc2ZTI2Mi01YjgyOTFhOQ==, ActorId: [1:7519897476558524228:2484], ActorState: ExecuteState, TraceId: 01jykrn7kyeh672eamcjee5q8g, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/batch_operations/unittest >> KqpBatchUpdate::UpdateOn [GOOD] Test command err: Trying to start YDB, gRPC: 14135, MsgBus: 3535 2025-06-25T14:42:40.750279Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897435128149851:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:40.750353Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000bcb/r3tmp/tmpVOwcdr/pdisk_1.dat 2025-06-25T14:42:41.648564Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:42:41.648667Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:42:41.650356Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:42:41.676422Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519897435128149832:2080] 1750862560749588 != 1750862560749591 2025-06-25T14:42:41.698835Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 14135, node 1 2025-06-25T14:42:41.769114Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:42:42.724840Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:42:42.724858Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:42:42.724863Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:42:42.724964Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3535 TClient is connected to server localhost:3535 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:42:44.994456Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:42:45.123469Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:45.442139Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:42:45.530270Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:45.597738Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:45.750255Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519897435128149851:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:45.750326Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:42:45.828597Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897456602987968:2371], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:45.828729Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:47.941065Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.002798Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.024756Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.051361Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.083837Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.107829Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.176522Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.248190Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897469487890538:2440], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:48.248237Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897469487890543:2443], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:48.248240Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:48.250387Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:42:48.256602Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519897469487890545:2444], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:42:48.311778Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519897469487890596:3440] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:42:49.104283Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519897473782858171:2490], status: GENERIC_ERROR, issues:
:2:22: Error: BATCH UPDATE is unsupported with ON 2025-06-25T14:42:49.104540Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=NzY4YTU3NmQtYWUzNmJiNmYtN2E4ODhiZmQtMWM1ZjIxZjg=, ActorId: [1:7519897473782858162:2484], ActorState: ExecuteState, TraceId: 01jykrn7m2cjzc7sxemj69mrde, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/batch_operations/unittest >> KqpBatchUpdate::Returning [GOOD] Test command err: Trying to start YDB, gRPC: 6483, MsgBus: 17653 2025-06-25T14:42:40.733348Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897435761649714:2228];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:40.733400Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000bcc/r3tmp/tmpPt5ecZ/pdisk_1.dat 2025-06-25T14:42:41.537797Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:42:41.556949Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:42:41.557059Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:42:41.559241Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:42:41.730999Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TServer::EnableGrpc on GrpcPort 6483, node 1 2025-06-25T14:42:42.757082Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:42:42.757112Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:42:42.757118Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:42:42.757302Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17653 TClient is connected to server localhost:17653 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:42:45.000659Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:42:45.123463Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:45.398940Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:45.503108Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:45.546695Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:45.733805Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519897435761649714:2228];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:45.733880Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:42:45.828337Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897457236487646:2371], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:45.828573Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:47.940711Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:47.962837Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:47.981931Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.005657Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.028985Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.061629Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.093130Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.146583Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897470121390204:2440], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:48.146642Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897470121390209:2443], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:48.146689Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:48.152414Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:42:48.160655Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519897470121390211:2444], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:42:48.244163Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519897470121390262:3440] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:42:49.104257Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519897474416357837:2490], status: GENERIC_ERROR, issues:
:2:22: Error: BATCH UPDATE is unsupported with RETURNING 2025-06-25T14:42:49.105309Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=N2RlNTNkMDktNDUzMDAzOGYtMTkyNmJmZDUtYjZkNzk1ODk=, ActorId: [1:7519897474416357828:2484], ActorState: ExecuteState, TraceId: 01jykrn7m0aabhs2hxn6h86kq3, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/batch_operations/unittest >> KqpBatchDelete::UnknownColumn [GOOD] Test command err: Trying to start YDB, gRPC: 26120, MsgBus: 32247 2025-06-25T14:42:40.737980Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897435485634878:2230];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:40.738717Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000c5e/r3tmp/tmp51MuLJ/pdisk_1.dat 2025-06-25T14:42:41.509928Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:42:41.510035Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:42:41.512343Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:42:41.590998Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:42:41.719403Z node 1 :BS_CONTROLLER ERROR: {BSC07@impl.h:2206} ProcessControllerEvent event processing took too much time Type# 268637706 Duration# 0.102027s 2025-06-25T14:42:41.719473Z node 1 :BS_CONTROLLER ERROR: {BSC00@bsc.cpp:705} StateWork event processing took too much time Type# 2146435078 Duration# 0.102111s TServer::EnableGrpc on GrpcPort 26120, node 1 2025-06-25T14:42:41.747500Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:42:42.721044Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:42:42.721080Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:42:42.721086Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:42:42.721221Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:32247 TClient is connected to server localhost:32247 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:42:44.993936Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:42:45.123522Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:45.390012Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:45.465362Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:42:45.547934Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:45.739003Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519897435485634878:2230];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:45.739076Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:42:45.828375Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897456960472808:2371], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:45.828464Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:47.940762Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:47.964231Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:47.984923Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.009453Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.042324Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.076427Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.103317Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.155005Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897469845375365:2440], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:48.155077Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:48.155181Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897469845375370:2443], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:48.158153Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:42:48.166828Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519897469845375372:2444], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:42:48.223298Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519897469845375423:3438] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:42:49.135740Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519897474140342997:2490], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:2:31: Error: At lambda, At function: Coalesce, At function: And
:3:37: Error: At function: ==
:3:23: Error: At function: Member
:3:23: Error: Member not found: UnknownColumn 2025-06-25T14:42:49.137964Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=NGU0MTdhOTAtZTdhMWY5NTAtMzRmYjQ1OWUtYmIxODg2MWM=, ActorId: [1:7519897474140342988:2484], ActorState: ExecuteState, TraceId: 01jykrn7m1fzmqq1syybtd67wc, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: ------- [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/ut/unittest >> SystemView::AuthEffectivePermissions [GOOD] Test command err: 2025-06-25T14:40:38.922988Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519896912069728634:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:40:38.923079Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0012fc/r3tmp/tmp5tn82w/pdisk_1.dat 2025-06-25T14:40:39.156961Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:40:39.165778Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 28632, node 1 2025-06-25T14:40:39.169843Z node 1 :GRPC_SERVER WARN: grpc_request_proxy.cpp:529: SchemeBoardDelete /Root Strong=0 2025-06-25T14:40:39.170238Z node 1 :GRPC_SERVER WARN: grpc_request_proxy.cpp:529: SchemeBoardDelete /Root Strong=0 2025-06-25T14:40:39.194116Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:40:39.194133Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:40:39.194144Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:40:39.194261Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:40:39.251016Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:40:39.251123Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:40:39.253588Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:11952 TClient is connected to server localhost:11952 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:40:39.522150Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:40:39.929107Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:40:40.889692Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896920659664284:2304], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:40:40.889701Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896920659664292:2307], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:40:40.889800Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:40:40.892128Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:40:40.906642Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519896920659664298:2308], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:40:40.978322Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519896920659664379:2745] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:40:41.155836Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:40:41.943090Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:40:42.215765Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:40:42.534166Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:40:42.839972Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:40:43.129656Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715679:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:40:43.380250Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:40:43.426679Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710759:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:383) 2025-06-25T14:40:43.923220Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519896912069728634:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:40:43.923277Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:40:45.911673Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715705:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp:267) 2025-06-25T14:40:48.317861Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715734:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp:267) 2025-06-25T14:40:48.393208Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715735:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:40:48.750576Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:47: Scan started, actor: [1:7519896955019406099:3012], owner: [1:7519896955019406096:3010], scan id: 0, sys view info: Type: EShowCreate SourceObject { OwnerId: 1 LocalId: 0 } 2025-06-25T14:40:48.750649Z node 1 :SYSTEM_VIEWS INFO: show_create.cpp:107: Scan prepared, actor: [1:7519896955019406099:3012] 2025-06-25T14:40:48.800300Z node 1 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [1:7519896955019406099:3012], row count: 1, finished: 1 2025-06-25T14:40:48.800407Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:122: Scan finished, actor: [1:7519896955019406099:3012], owner: [1:7519896955019406096:3010], scan id: 0, sys view info: Type: EShowCreate SourceObject { OwnerId: 1 LocalId: 0 } 2025-06-25T14:40:51.428783Z node 6 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[6:7519896968427325116:2077];sen ... List RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:42:46.280061Z node 41 :SYSTEM_VIEWS TRACE: auth_scan_base.h:171: Got navigate: { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/.metadata/workload_manager/pools/default TableId: [72057594046644480:8:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindResourcePool DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-25T14:42:46.280124Z node 41 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [41:7519897463265675441:2346], row count: 5, finished: 0 2025-06-25T14:42:46.280435Z node 41 :SYSTEM_VIEWS TRACE: auth_scan_base.h:210: Navigate { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Dir1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:42:46.281847Z node 41 :SYSTEM_VIEWS TRACE: auth_scan_base.h:171: Got navigate: { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Dir1 TableId: [72057594046644480:9:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } Children [] }] } 2025-06-25T14:42:46.281899Z node 41 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [41:7519897463265675441:2346], row count: 1, finished: 0 2025-06-25T14:42:46.281959Z node 41 :SYSTEM_VIEWS TRACE: auth_scan_base.h:210: Navigate { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Table0 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:42:46.282270Z node 41 :SYSTEM_VIEWS TRACE: auth_scan_base.h:171: Got navigate: { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Table0 TableId: [72057594046644480:4:1] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindTable DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-25T14:42:46.282293Z node 41 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [41:7519897463265675441:2346], row count: 1, finished: 0 2025-06-25T14:42:46.282492Z node 41 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:122: Scan finished, actor: [41:7519897463265675441:2346], owner: [41:7519897463265675438:2344], scan id: 0, sys view info: Type: EAuthEffectivePermissions SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-25T14:42:46.283432Z node 41 :SYSTEM_VIEWS TRACE: sysview_service.cpp:900: Collect query stats: service id# [41:7519897433200902600:2144], database# , query hash# 11342553055430868283, cpu time# 114510 2025-06-25T14:42:46.283930Z node 41 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750862566272, txId: 281474976715676] shutting down 2025-06-25T14:42:46.385583Z node 41 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715679. Ctx: { TraceId: 01jykrn4wk63bw3w91ba0jxsfn, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=41&id=ZWE4Y2E0MzktMmYxMGZmZmQtYjU3ODVkOWUtZjdjOWYzOWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:42:46.388326Z node 41 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:47: Scan started, actor: [41:7519897463265675494:2355], owner: [41:7519897463265675491:2353], scan id: 0, sys view info: Type: EAuthEffectivePermissions SourceObject { OwnerId: 72075186224037888 LocalId: 1 } 2025-06-25T14:42:46.389120Z node 41 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:323: Scan prepared, actor: [41:7519897463265675494:2355], schemeshard id: 72075186224037888, hive id: 72057594037968897, database: /Root/Tenant1, database owner: root@builtin, domain key: [OwnerId: 72057594046644480, LocalPathId: 2], database node count: 2 2025-06-25T14:42:46.389143Z node 41 :SYSTEM_VIEWS DEBUG: auth_scan_base.h:100: ProceedToScan, tenant name: /Root/Tenant1 tenant owner: root@builtin subject sid: empty require admin access: 0 is admin: 1 2025-06-25T14:42:46.389206Z node 41 :SYSTEM_VIEWS TRACE: auth_scan_base.h:210: Navigate { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Tenant1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:42:46.389387Z node 41 :SYSTEM_VIEWS TRACE: auth_scan_base.h:171: Got navigate: { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Tenant1 TableId: [72075186224037888:1:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindSubdomain DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 2] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 2] Params { Version: 2 PlanResolution: 50 Coordinators: 72075186224037889 Coordinators: 72075186224037890 TimeCastBucketsPerMediator: 2 Mediators: 72075186224037891 Mediators: 72075186224037892 SchemeShard: 72075186224037888 SysViewProcessor: 72075186224037893 } ServerlessComputeResourcesMode: (empty maybe) Users: [{ Sid: user2 }] Groups: [] } Children [Dir2,Table1] }] } 2025-06-25T14:42:46.389428Z node 41 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [41:7519897463265675494:2355], row count: 1, finished: 0 2025-06-25T14:42:46.389519Z node 41 :SYSTEM_VIEWS TRACE: auth_scan_base.h:210: Navigate { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Tenant1/Dir2 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:42:46.389639Z node 41 :SYSTEM_VIEWS TRACE: auth_scan_base.h:171: Got navigate: { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Tenant1/Dir2 TableId: [72075186224037888:3:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 2] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 2] Params { Version: 2 PlanResolution: 50 Coordinators: 72075186224037889 Coordinators: 72075186224037890 TimeCastBucketsPerMediator: 2 Mediators: 72075186224037891 Mediators: 72075186224037892 SchemeShard: 72075186224037888 SysViewProcessor: 72075186224037893 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } Children [] }] } 2025-06-25T14:42:46.389671Z node 41 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [41:7519897463265675494:2355], row count: 2, finished: 0 2025-06-25T14:42:46.390088Z node 41 :SYSTEM_VIEWS TRACE: auth_scan_base.h:210: Navigate { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Tenant1/Table1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:42:46.390371Z node 41 :SYSTEM_VIEWS TRACE: auth_scan_base.h:171: Got navigate: { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Tenant1/Table1 TableId: [72075186224037888:2:1] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindTable DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 2] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 2] Params { Version: 2 PlanResolution: 50 Coordinators: 72075186224037889 Coordinators: 72075186224037890 TimeCastBucketsPerMediator: 2 Mediators: 72075186224037891 Mediators: 72075186224037892 SchemeShard: 72075186224037888 SysViewProcessor: 72075186224037893 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-25T14:42:46.390396Z node 41 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [41:7519897463265675494:2355], row count: 1, finished: 0 2025-06-25T14:42:46.390933Z node 41 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:122: Scan finished, actor: [41:7519897463265675494:2355], owner: [41:7519897463265675491:2353], scan id: 0, sys view info: Type: EAuthEffectivePermissions SourceObject { OwnerId: 72075186224037888 LocalId: 1 } 2025-06-25T14:42:46.391829Z node 41 :SYSTEM_VIEWS TRACE: sysview_service.cpp:900: Collect query stats: service id# [41:7519897433200902600:2144], database# , query hash# 17325808444334437222, cpu time# 91841 2025-06-25T14:42:46.392241Z node 41 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750862566384, txId: 281474976715678] shutting down 2025-06-25T14:42:46.396466Z node 41 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 44 2025-06-25T14:42:46.397152Z node 41 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(44, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-25T14:42:46.397250Z node 44 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:42:46.397334Z node 41 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 42 2025-06-25T14:42:46.397518Z node 41 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(42, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-25T14:42:46.397750Z node 41 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 45 2025-06-25T14:42:46.397744Z node 43 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:42:46.398457Z node 41 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(45, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-25T14:42:46.399568Z node 41 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 43 2025-06-25T14:42:46.400041Z node 41 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(43, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-25T14:42:46.401275Z node 41 :HIVE WARN: hive_impl.cpp:944: HIVE#72057594037968897 THive::Handle::TEvUndelivered Sender=[43:7519897437859780676:2101], Type=268959746 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/batch_operations/unittest >> KqpBatchUpdate::MultiStatement [GOOD] Test command err: Trying to start YDB, gRPC: 12091, MsgBus: 21827 2025-06-25T14:42:40.733311Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897437453840094:2223];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:40.733367Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000bdb/r3tmp/tmp6GDPpC/pdisk_1.dat 2025-06-25T14:42:41.523166Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:42:41.523275Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:42:41.586296Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:42:41.596365Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:42:41.729762Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TServer::EnableGrpc on GrpcPort 12091, node 1 2025-06-25T14:42:42.718783Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:42:42.718801Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:42:42.718825Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:42:42.718979Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:21827 TClient is connected to server localhost:21827 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:42:44.993969Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:42:45.123350Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:45.393937Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:45.547146Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:45.624945Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:45.734033Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519897437453840094:2223];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:45.734220Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:42:45.829304Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897458928678043:2371], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:45.829380Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:47.940867Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:47.964100Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:47.986673Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.015566Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.038996Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.066694Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.133271Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.189246Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897471813580606:2440], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:48.189322Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:48.189335Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897471813580611:2443], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:48.192349Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:42:48.200828Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519897471813580613:2444], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:42:48.255508Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519897471813580664:3437] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:42:49.164019Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519897476108548240:2490], status: GENERIC_ERROR, issues:
:5:32: Error: BATCH can't be used with multiple writes or reads. 2025-06-25T14:42:49.164244Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=NjU3ODFjMC01YTI2Zjc5NS1kY2M3ZDJiYS1kOTdiZjc5YQ==, ActorId: [1:7519897476108548231:2484], ActorState: ExecuteState, TraceId: 01jykrn7ky1eczkd3g1d5qrjv3, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-06-25T14:42:49.181173Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519897476108548244:2492], status: GENERIC_ERROR, issues:
:4:17: Error: BATCH can't be used with multiple writes or reads. 2025-06-25T14:42:49.182817Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=NjU3ODFjMC01YTI2Zjc5NS1kY2M3ZDJiYS1kOTdiZjc5YQ==, ActorId: [1:7519897476108548231:2484], ActorState: ExecuteState, TraceId: 01jykrn7ph26y1batwcw05tr35, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-06-25T14:42:49.199342Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519897476108548248:2494], status: GENERIC_ERROR, issues:
:4:17: Error: BATCH can't be used with multiple writes or reads. 2025-06-25T14:42:49.200971Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=NjU3ODFjMC01YTI2Zjc5NS1kY2M3ZDJiYS1kOTdiZjc5YQ==, ActorId: [1:7519897476108548231:2484], ActorState: ExecuteState, TraceId: 01jykrn7q41dwtnbq5g6abmce7, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-06-25T14:42:49.214348Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519897476108548252:2496], status: GENERIC_ERROR, issues:
:4:29: Error: BATCH can't be used with multiple writes or reads. 2025-06-25T14:42:49.214546Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=NjU3ODFjMC01YTI2Zjc5NS1kY2M3ZDJiYS1kOTdiZjc5YQ==, ActorId: [1:7519897476108548231:2484], ActorState: ExecuteState, TraceId: 01jykrn7qn8rjy345enwjdgm4g, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-06-25T14:42:49.229146Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519897476108548256:2498], status: GENERIC_ERROR, issues:
:4:29: Error: BATCH can't be used with multiple writes or reads. 2025-06-25T14:42:49.229446Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=NjU3ODFjMC01YTI2Zjc5NS1kY2M3ZDJiYS1kOTdiZjc5YQ==, ActorId: [1:7519897476108548231:2484], ActorState: ExecuteState, TraceId: 01jykrn7r249b47b1vj2fhgjce, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/batch_operations/unittest >> KqpBatchDelete::MultiStatement [GOOD] Test command err: Trying to start YDB, gRPC: 25864, MsgBus: 23046 2025-06-25T14:42:40.733224Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897439182220044:2223];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:40.733279Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000c05/r3tmp/tmpleTTvR/pdisk_1.dat 2025-06-25T14:42:41.526357Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:42:41.584926Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:42:41.585024Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:42:41.603742Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:42:41.732412Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TServer::EnableGrpc on GrpcPort 25864, node 1 2025-06-25T14:42:42.716805Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:42:42.716830Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:42:42.716846Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:42:42.716985Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23046 TClient is connected to server localhost:23046 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:42:44.994451Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:42:45.123462Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:42:45.400967Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:45.481985Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:45.518108Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:45.733570Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519897439182220044:2223];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:45.733641Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:42:45.835889Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897460657057986:2371], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:45.835985Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:47.940715Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:47.963147Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:47.984505Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.004726Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.027588Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.061227Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.128493Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.209109Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897473541960553:2440], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:48.209159Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:48.209383Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897473541960558:2443], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:48.212098Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:42:48.219379Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519897473541960560:2444], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:42:48.288536Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519897473541960611:3440] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:42:49.163990Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519897477836928186:2490], status: GENERIC_ERROR, issues:
:4:32: Error: BATCH can't be used with multiple writes or reads. 2025-06-25T14:42:49.164204Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=ZDQ4ZThjNjktZTczN2I4ZTItYjM5NDgxZS1lMTNlNWRjMg==, ActorId: [1:7519897477836928177:2484], ActorState: ExecuteState, TraceId: 01jykrn7kz0fdf2j9sg6wdax94, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-06-25T14:42:49.185401Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519897477836928190:2492], status: GENERIC_ERROR, issues:
:4:17: Error: BATCH can't be used with multiple writes or reads. 2025-06-25T14:42:49.185540Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=ZDQ4ZThjNjktZTczN2I4ZTItYjM5NDgxZS1lMTNlNWRjMg==, ActorId: [1:7519897477836928177:2484], ActorState: ExecuteState, TraceId: 01jykrn7pma3gkqh3xgw956w69, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-06-25T14:42:49.199588Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519897477836928194:2494], status: GENERIC_ERROR, issues:
:4:17: Error: BATCH can't be used with multiple writes or reads. 2025-06-25T14:42:49.199798Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=ZDQ4ZThjNjktZTczN2I4ZTItYjM5NDgxZS1lMTNlNWRjMg==, ActorId: [1:7519897477836928177:2484], ActorState: ExecuteState, TraceId: 01jykrn7q5f78zrstazknsk3rr, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-06-25T14:42:49.219380Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519897477836928198:2496], status: GENERIC_ERROR, issues:
:4:29: Error: BATCH can't be used with multiple writes or reads. 2025-06-25T14:42:49.219540Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=ZDQ4ZThjNjktZTczN2I4ZTItYjM5NDgxZS1lMTNlNWRjMg==, ActorId: [1:7519897477836928177:2484], ActorState: ExecuteState, TraceId: 01jykrn7qkehvfaryd2kxafpve, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-06-25T14:42:49.229792Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519897477836928202:2498], status: GENERIC_ERROR, issues:
:3:29: Error: BATCH can't be used with multiple writes or reads. 2025-06-25T14:42:49.229968Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=ZDQ4ZThjNjktZTczN2I4ZTItYjM5NDgxZS1lMTNlNWRjMg==, ActorId: [1:7519897477836928177:2484], ActorState: ExecuteState, TraceId: 01jykrn7r63m2t6nreeptg63n9, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/batch_operations/unittest >> KqpBatchUpdate::TableNotExists [GOOD] Test command err: Trying to start YDB, gRPC: 3781, MsgBus: 3549 2025-06-25T14:42:40.733828Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897437650543825:2223];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:40.733872Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000bfd/r3tmp/tmp28R7QE/pdisk_1.dat 2025-06-25T14:42:41.590707Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:42:41.590838Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:42:41.612492Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:42:41.685385Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:42:41.731048Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TServer::EnableGrpc on GrpcPort 3781, node 1 2025-06-25T14:42:42.719694Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:42:42.719722Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:42:42.719729Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:42:42.719846Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3549 TClient is connected to server localhost:3549 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:42:44.993707Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:42:45.123409Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:45.424746Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:42:45.555106Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:45.634969Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:45.756490Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519897437650543825:2223];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:45.757085Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:42:45.836792Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897459125381786:2371], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:45.836856Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:47.940813Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.004653Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.026369Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.047285Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.076600Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.103144Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.134019Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.178635Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897472010284349:2440], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:48.178704Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:48.178761Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897472010284354:2443], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:48.181438Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:42:48.189309Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519897472010284356:2444], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:42:48.286915Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519897472010284407:3439] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:42:49.176583Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519897476305251983:2490], status: SCHEME_ERROR, issues:
: Error: Pre type annotation, code: 1020
:3:34: Error: Cannot find table 'db.[/Root/TestBatchNotExists]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:42:49.176820Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=YjczMjllMC0xYTYyOWJhZC1jYTNmOGE0LWJmNDcwYjQ1, ActorId: [1:7519897476305251974:2484], ActorState: ExecuteState, TraceId: 01jykrn7nv4gyssykg5dgjehvb, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:42:49.210342Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519897476305251996:2493], status: SCHEME_ERROR, issues:
: Error: Pre type annotation, code: 1020
:4:41: Error: Cannot find table 'db.[/Root/TestBatchNotExists]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:42:49.210495Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=YjczMjllMC0xYTYyOWJhZC1jYTNmOGE0LWJmNDcwYjQ1, ActorId: [1:7519897476305251974:2484], ActorState: ExecuteState, TraceId: 01jykrn7px2z7ctjk03capcw6a, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/batch_operations/unittest >> KqpBatchDelete::TableNotExists [GOOD] Test command err: Trying to start YDB, gRPC: 17986, MsgBus: 11896 2025-06-25T14:42:42.585730Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897446721749093:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:42.588504Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000bc0/r3tmp/tmp7zFHln/pdisk_1.dat 2025-06-25T14:42:42.907367Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:42:42.907638Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519897446721749060:2080] 1750862562584228 != 1750862562584231 TServer::EnableGrpc on GrpcPort 17986, node 1 2025-06-25T14:42:42.947684Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:42:42.947712Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:42:42.947720Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:42:42.947840Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:42:42.998650Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:42:42.998761Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:42:43.000618Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:42:43.600440Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:11896 TClient is connected to server localhost:11896 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:42:44.999134Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:42:45.123553Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:45.406457Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:45.528956Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:45.606610Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:45.828347Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897459606652589:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:45.828489Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:47.585205Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519897446721749093:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:47.592530Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:42:47.941066Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.006349Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.031903Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.053256Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.076717Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.101914Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.175561Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.218927Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897472491555153:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:48.218983Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:48.219047Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897472491555158:2438], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:48.222010Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:42:48.229626Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519897472491555160:2439], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:42:48.300782Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519897472491555211:3435] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:42:49.116098Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519897476786522785:2485], status: SCHEME_ERROR, issues:
: Error: Pre type annotation, code: 1020
:2:35: Error: Cannot find table 'db.[/Root/TestBatchNotExists]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:42:49.116328Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=NjdlYjZmZTQtMmNkOTM0ZmQtOWNkNTIwNzYtMzhjNGNkZjA=, ActorId: [1:7519897476786522776:2479], ActorState: ExecuteState, TraceId: 01jykrn7ky2ar8dd58r7j402jr, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:42:49.160708Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519897476786522798:2488], status: SCHEME_ERROR, issues:
: Error: Pre type annotation, code: 1020
:3:41: Error: Cannot find table 'db.[/Root/TestBatchNotExists]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:42:49.160919Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=NjdlYjZmZTQtMmNkOTM0ZmQtOWNkNTIwNzYtMzhjNGNkZjA=, ActorId: [1:7519897476786522776:2479], ActorState: ExecuteState, TraceId: 01jykrn7n31y9mvn8ng24mek7e, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/batch_operations/unittest >> KqpBatchUpdate::NotIdempotent [GOOD] Test command err: Trying to start YDB, gRPC: 17990, MsgBus: 26025 2025-06-25T14:42:43.780573Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897450729049211:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:43.780695Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000bbd/r3tmp/tmp93wpd6/pdisk_1.dat 2025-06-25T14:42:43.983112Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:42:43.983378Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519897450729049190:2080] 1750862563779596 != 1750862563779599 TServer::EnableGrpc on GrpcPort 17990, node 1 2025-06-25T14:42:44.025984Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:42:44.026011Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:42:44.026019Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:42:44.026146Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:42:44.121199Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:42:44.121298Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:42:44.122975Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:26025 TClient is connected to server localhost:26025 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-25T14:42:44.794171Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:42:44.993937Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:42:45.123502Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:45.427278Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:42:45.639130Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:45.731664Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:46.137171Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897463613952707:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:46.137280Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:47.940793Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:47.967572Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.018879Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.048801Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.075818Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.105536Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.141650Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.194293Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897472203887968:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:48.194353Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:48.194387Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897472203887973:2436], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:48.197273Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:42:48.205208Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519897472203887975:2437], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:42:48.261705Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519897472203888026:3430] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:42:48.780496Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519897450729049211:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:48.780540Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:42:49.175715Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519897476498855602:2482], status: GENERIC_ERROR, issues:
: Error: Table intent determination, code: 1040
:3:43: Error: Batch update is only supported for idempotent updates. 2025-06-25T14:42:49.175903Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=YTVlZmY3ZWEtZThkOTExYWYtNTk1ODA4MWUtNDc3MDNhMjk=, ActorId: [1:7519897476498855593:2476], ActorState: ExecuteState, TraceId: 01jykrn7nr5hxcweqts3mz1w5h, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-06-25T14:42:49.194113Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519897476498855606:2484], status: GENERIC_ERROR, issues:
: Error: Table intent determination, code: 1040
:3:43: Error: Batch update is only supported for idempotent updates. 2025-06-25T14:42:49.194296Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=YTVlZmY3ZWEtZThkOTExYWYtNTk1ODA4MWUtNDc3MDNhMjk=, ActorId: [1:7519897476498855593:2476], ActorState: ExecuteState, TraceId: 01jykrn7pv4150x12cwtvz8a1f, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-06-25T14:42:49.209022Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519897476498855610:2486], status: GENERIC_ERROR, issues:
: Error: Table intent determination, code: 1040
:3:51: Error: Batch update is only supported for idempotent updates. 2025-06-25T14:42:49.209200Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=YTVlZmY3ZWEtZThkOTExYWYtNTk1ODA4MWUtNDc3MDNhMjk=, ActorId: [1:7519897476498855593:2476], ActorState: ExecuteState, TraceId: 01jykrn7qed8sdhdheczm5v3ee, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/batch_operations/unittest >> KqpBatchDelete::HasTxControl [GOOD] Test command err: Trying to start YDB, gRPC: 18695, MsgBus: 5253 2025-06-25T14:42:40.733329Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897435789111190:2222];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:40.733382Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000bce/r3tmp/tmpPNXwsA/pdisk_1.dat 2025-06-25T14:42:41.528555Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519897435789110998:2080] 1750862560712420 != 1750862560712423 2025-06-25T14:42:41.535444Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:42:41.563981Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:42:41.564082Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:42:41.565663Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:42:41.722099Z node 1 :BS_CONTROLLER ERROR: {BSC07@impl.h:2206} ProcessControllerEvent event processing took too much time Type# 268637706 Duration# 0.104188s 2025-06-25T14:42:41.722167Z node 1 :BS_CONTROLLER ERROR: {BSC00@bsc.cpp:705} StateWork event processing took too much time Type# 2146435078 Duration# 0.104266s 2025-06-25T14:42:41.730737Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TServer::EnableGrpc on GrpcPort 18695, node 1 2025-06-25T14:42:42.720820Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:42:42.720846Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:42:42.720851Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:42:42.725896Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5253 TClient is connected to server localhost:5253 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:42:44.993831Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:42:45.123432Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:45.405906Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:45.511940Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:45.569370Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:45.736037Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519897435789111190:2222];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:45.736105Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:42:45.828445Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897457263949151:2371], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:45.828593Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:47.940941Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:47.967058Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:47.993704Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.016008Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.039611Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.067903Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.101070Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.184136Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897470148851714:2440], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:48.184197Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:48.184322Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897470148851719:2443], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:48.187354Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:42:48.199183Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519897470148851721:2444], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:42:48.278802Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519897470148851772:3441] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:42:49.470074Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=1&id=MzA1ZWY2YWEtYmQ2ZjU4NDItODE0M2Q1MTEtMzZiMjQzYTE=, ActorId: [1:7519897474443819339:2484], ActorState: ExecuteState, TraceId: 01jykrn7ps7pzrg53cdfnn1zpp, Create QueryResponse for error on request, msg: BATCH operation can be executed only in NoTx mode. >> TxUsage::WriteToTopic_Demo_26_Table [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/batch_operations/unittest >> KqpBatchUpdate::HasTxControl [GOOD] Test command err: Trying to start YDB, gRPC: 5655, MsgBus: 9753 2025-06-25T14:42:40.734524Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897438164755293:2223];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:40.735376Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000bd1/r3tmp/tmpT1PQ2M/pdisk_1.dat 2025-06-25T14:42:41.605515Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:42:41.605613Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:42:41.605742Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:42:41.665335Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:42:41.731689Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TServer::EnableGrpc on GrpcPort 5655, node 1 2025-06-25T14:42:42.719216Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:42:42.719242Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:42:42.719250Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:42:42.719363Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9753 TClient is connected to server localhost:9753 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:42:44.993957Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:42:45.123609Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:45.459479Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:45.594748Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:45.665224Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:45.736158Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519897438164755293:2223];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:45.742767Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:42:45.828491Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897459639593237:2370], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:45.828629Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:47.940934Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:47.964724Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:47.989923Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.014053Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.038356Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.075333Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.109517Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.192649Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897472524495799:2440], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:48.192719Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:48.192818Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897472524495804:2443], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:48.195699Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:42:48.203389Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519897472524495806:2444], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:42:48.291346Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519897472524495857:3437] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:42:49.563842Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=1&id=OTkwYzc4NzQtZWI5MmM0OGMtZDE3MDcyY2ItODkwNDk5MTY=, ActorId: [1:7519897476819463424:2484], ActorState: ExecuteState, TraceId: 01jykrn7nhcepyaa6bzrr0758g, Create QueryResponse for error on request, msg: BATCH operation can be executed only in NoTx mode. ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest >> BasicStatistics::TwoServerlessTwoSharedDbs [GOOD] Test command err: 2025-06-25T14:38:47.591041Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:428:2310], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:38:47.591456Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:38:47.591570Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001c7d/r3tmp/tmpHvur9F/pdisk_1.dat 2025-06-25T14:38:47.936505Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 63012, node 1 2025-06-25T14:38:48.158553Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:38:48.158621Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:38:48.158654Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:38:48.159161Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:38:48.161849Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:38:48.263302Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:48.263456Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:48.278302Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:3321 2025-06-25T14:38:48.887279Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-25T14:38:52.010733Z node 3 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 3 2025-06-25T14:38:52.039932Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:52.040055Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:52.099386Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 3 Cookie 3 2025-06-25T14:38:52.101246Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:38:52.288245Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:38:52.323335Z node 3 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:52.323838Z node 3 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:52.324253Z node 3 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:52.324384Z node 3 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:52.324530Z node 3 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:52.324574Z node 3 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:52.324661Z node 3 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:52.324718Z node 3 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:52.324759Z node 3 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:52.522616Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:52.522747Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:52.537407Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:38:52.687109Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:38:52.720551Z node 3 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-25T14:38:52.720638Z node 3 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-25T14:38:52.757477Z node 3 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-25T14:38:52.757634Z node 3 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-25T14:38:52.757883Z node 3 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-25T14:38:52.757939Z node 3 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-25T14:38:52.758010Z node 3 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-25T14:38:52.758076Z node 3 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-25T14:38:52.758131Z node 3 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-25T14:38:52.758184Z node 3 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-25T14:38:52.758551Z node 3 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-25T14:38:52.781945Z node 3 :STATISTICS DEBUG: schemeshard_impl.cpp:7949: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-25T14:38:52.782044Z node 3 :STATISTICS DEBUG: schemeshard_impl.cpp:7979: ConnectToSA(), pipe client id: [3:1875:2563], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-25T14:38:52.789617Z node 3 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [3:1887:2572] 2025-06-25T14:38:52.792285Z node 3 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [3:1903:2582] 2025-06-25T14:38:52.792963Z node 3 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [3:1903:2582], schemeshard id = 72075186224037897 2025-06-25T14:38:52.800097Z node 3 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Shared1 2025-06-25T14:38:52.819471Z node 3 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-25T14:38:52.819524Z node 3 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-25T14:38:52.819598Z node 3 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Shared1/.metadata/_statistics 2025-06-25T14:38:52.832066Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:52.837751Z node 3 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-25T14:38:52.837854Z node 3 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-25T14:38:53.024060Z node 3 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-25T14:38:53.167408Z node 3 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-25T14:38:53.272443Z node 3 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-25T14:38:53.899157Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:38:54.019705Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-25T14:38:57.060838Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-25T14:38:57.099466Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:57.099559Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:57.149638Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T14:38:57.151630Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:38:57.339007Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224038889 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:57.339486Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224038889 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:57.340037Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224038889 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:57.340201Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224038889 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:57.340325Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224038889 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:57.340598Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224038889 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:57.340700Z node 2 :HIVE WARN: tx__create ... txid# 281474976730659, issues: { message: "Check failed: path: \'/Root/Shared2/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72075186224038898, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:42:43.167293Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [2:12597:5285]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T14:42:43.167703Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-25T14:42:43.167805Z node 2 :STATISTICS DEBUG: service_impl.cpp:1219: ConnectToSA(), pipe client id = [2:12599:5287] 2025-06-25T14:42:43.167880Z node 2 :STATISTICS DEBUG: service_impl.cpp:1248: SyncNode(), pipe client id = [2:12599:5287] 2025-06-25T14:42:43.168545Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224038895] EvServerConnected, pipe server id = [2:12600:5288] 2025-06-25T14:42:43.168757Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:133: [72075186224038895] EvConnectNode, pipe server id = [2:12600:5288], node id = 2, have schemeshards count = 0, need schemeshards count = 1 2025-06-25T14:42:43.168827Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:314: [72075186224038895] SendStatisticsToNode(), node id = 2, schemeshard count = 1 2025-06-25T14:42:43.169067Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:12599:5287], server id = [2:12600:5288], tablet id = 72075186224038895, status = OK 2025-06-25T14:42:43.169184Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-25T14:42:43.169246Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 1, ReplyToActorId = [2:12597:5285], StatRequests.size() = 1 2025-06-25T14:42:43.281925Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=ZmQzMjcxNDItNWJiYjBjMGQtYmViMTY2OGEtYjlkNjlkOTc=, TxId: 2025-06-25T14:42:43.281984Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=ZmQzMjcxNDItNWJiYjBjMGQtYmViMTY2OGEtYjlkNjlkOTc=, TxId: 2025-06-25T14:42:43.282546Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224038895] TTxFinishTraversal::Execute 2025-06-25T14:42:43.297948Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224038895] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224038898, LocalPathId: 3] 2025-06-25T14:42:43.297996Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224038895] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-25T14:42:43.357360Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:217: [72075186224038895] EvFastPropagateCheck 2025-06-25T14:42:43.357432Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:357: [72075186224038895] PropagateFastStatistics(), node count = 0, schemeshard count = 0 2025-06-25T14:42:43.458885Z node 2 :STATISTICS DEBUG: service_impl.cpp:1189: EvRequestTimeout, pipe client id = [2:12599:5287], schemeshard count = 1 2025-06-25T14:42:44.033413Z node 3 :STATISTICS DEBUG: schemeshard_impl.cpp:8076: SendBaseStatsToSA(), path count: 1, at schemeshard: 72075186224037899 2025-06-25T14:42:44.033476Z node 3 :STATISTICS DEBUG: schemeshard_impl.cpp:7919: Schedule next SendBaseStatsToSA in 197.000000s, at schemeshard: 72075186224037899 2025-06-25T14:42:44.033674Z node 3 :STATISTICS DEBUG: tx_schemeshard_stats.cpp:21: [72075186224037894] TTxSchemeShardStats::Execute: schemeshard id# 72075186224037899, stats size# 26 2025-06-25T14:42:44.049170Z node 3 :STATISTICS DEBUG: tx_schemeshard_stats.cpp:132: [72075186224037894] TTxSchemeShardStats::Complete 2025-06-25T14:42:44.319541Z node 3 :STATISTICS DEBUG: aggregator_impl.cpp:285: [72075186224037894] EvPropagateTimeout 2025-06-25T14:42:44.330607Z node 3 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-25T14:42:44.330661Z node 3 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-25T14:42:44.330697Z node 3 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037899, LocalPathId: 2] is data table. 2025-06-25T14:42:44.330721Z node 3 :STATISTICS DEBUG: aggregator_impl.cpp:723: [72075186224037894] ScheduleNextTraversal. Skip traversal for datashard table [OwnerId: 72075186224037899, LocalPathId: 2] 2025-06-25T14:42:44.330897Z node 3 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Shared1 2025-06-25T14:42:44.332983Z node 3 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DELETE FROM `.metadata/_statistics` WHERE owner_id = $owner_id AND local_path_id = $local_path_id; 2025-06-25T14:42:44.342729Z node 3 :SYSTEM_VIEWS WARN: tx_interval_summary.cpp:212: [72075186224037891] TEvIntervalQuerySummary, time mismath: node id# 3, interval end# 1970-01-01T00:02:04.000000Z, event interval end# 2025-06-25T14:42:42.000000Z 2025-06-25T14:42:44.343428Z node 3 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=3&id=MmRiZWI4ZDUtMWY0MDU5MzEtZmE1ODlkNDItZTc5NjAwODY=, TxId: 2025-06-25T14:42:44.343470Z node 3 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=3&id=MmRiZWI4ZDUtMWY0MDU5MzEtZmE1ODlkNDItZTc5NjAwODY=, TxId: 2025-06-25T14:42:44.343947Z node 3 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-25T14:42:44.357993Z node 3 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037899, LocalPathId: 2] 2025-06-25T14:42:44.358033Z node 3 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-25T14:42:44.514131Z node 3 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 119 ], ReplyToActorId[ [3:12697:5611]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T14:42:44.514410Z node 3 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 119 ] 2025-06-25T14:42:44.514448Z node 3 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 119, ReplyToActorId = [3:12697:5611], StatRequests.size() = 1 2025-06-25T14:42:46.484051Z node 3 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 120 ], ReplyToActorId[ [3:12770:5639]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T14:42:46.484349Z node 3 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 120 ] 2025-06-25T14:42:46.484389Z node 3 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 120, ReplyToActorId = [3:12770:5639], StatRequests.size() = 1 2025-06-25T14:42:46.956738Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:8076: SendBaseStatsToSA(), path count: 1, at schemeshard: 72075186224038900 2025-06-25T14:42:46.956802Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7919: Schedule next SendBaseStatsToSA in 220.000000s, at schemeshard: 72075186224038900 2025-06-25T14:42:46.957049Z node 2 :STATISTICS DEBUG: tx_schemeshard_stats.cpp:21: [72075186224038895] TTxSchemeShardStats::Execute: schemeshard id# 72075186224038900, stats size# 26 2025-06-25T14:42:46.970991Z node 2 :STATISTICS DEBUG: tx_schemeshard_stats.cpp:132: [72075186224038895] TTxSchemeShardStats::Complete 2025-06-25T14:42:47.226565Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224038895] ScheduleNextTraversal 2025-06-25T14:42:47.226635Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224038895] ScheduleNextTraversal. No force traversals. 2025-06-25T14:42:47.226674Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224038895] IsColumnTable. Path [OwnerId: 72075186224038900, LocalPathId: 2] is data table. 2025-06-25T14:42:47.226704Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:723: [72075186224038895] ScheduleNextTraversal. Skip traversal for datashard table [OwnerId: 72075186224038900, LocalPathId: 2] 2025-06-25T14:42:47.227046Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Shared2 2025-06-25T14:42:47.229057Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DELETE FROM `.metadata/_statistics` WHERE owner_id = $owner_id AND local_path_id = $local_path_id; 2025-06-25T14:42:47.239755Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=NTI0OTBhMjYtY2ExYjUzMjItMjNiOGNhMTEtNTMwMDY1ZTE=, TxId: 2025-06-25T14:42:47.239798Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=NTI0OTBhMjYtY2ExYjUzMjItMjNiOGNhMTEtNTMwMDY1ZTE=, TxId: 2025-06-25T14:42:47.240295Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224038895] TTxFinishTraversal::Execute 2025-06-25T14:42:47.255166Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224038895] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224038900, LocalPathId: 2] 2025-06-25T14:42:47.255206Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224038895] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-25T14:42:48.235458Z node 3 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 2, schemeshard count = 3 2025-06-25T14:42:48.236089Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-06-25T14:42:48.236620Z node 3 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 3 2025-06-25T14:42:48.248035Z node 3 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-25T14:42:48.248082Z node 3 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-25T14:42:48.422997Z node 3 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 121 ], ReplyToActorId[ [3:12867:5658]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T14:42:48.423365Z node 3 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 121 ] 2025-06-25T14:42:48.423409Z node 3 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 121, ReplyToActorId = [3:12867:5658], StatRequests.size() = 1 2025-06-25T14:42:48.424341Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [2:12869:5376]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T14:42:48.428509Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-25T14:42:48.428780Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:171: [72075186224038895] EvRequestStats, node id = 2, schemeshard count = 1, urgent = 0 2025-06-25T14:42:48.428819Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:314: [72075186224038895] SendStatisticsToNode(), node id = 2, schemeshard count = 1 2025-06-25T14:42:48.429045Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-25T14:42:48.429111Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 2, ReplyToActorId = [2:12869:5376], StatRequests.size() = 1 >> TxUsage::WriteToTopic_Demo_26_Query >> IncrementalBackup::BackupRestore [GOOD] >> IncrementalBackup::ComplexRestoreBackupCollection+WithIncremental >> TxUsage::WriteToTopic_Demo_22_RestartNo_Table [GOOD] >> KqpBatchDelete::TableWithIndex |84.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/batch_operations/unittest >> KqpBatchDelete::ManyPartitions_2 >> KqpBatchDelete::SimpleOnePartition >> KqpBatchDelete::Large_2 >> KqpBatchUpdate::Large_1 >> KqpBatchUpdate::Large_2 >> TxUsage::WriteToTopic_Demo_35_Query [GOOD] >> TxUsage::WriteToTopic_Demo_22_RestartNo_Query |84.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/batch_operations/unittest >> TxUsage::WriteToTopic_Demo_36_Table >> TxUsage::WriteToTopic_Demo_6_Query [GOOD] >> TxUsage::Sinks_Oltp_WriteToTopic_5_Query [GOOD] >> KqpBatchUpdate::TableWithIndex [GOOD] >> TxUsage::Sinks_Oltp_WriteToTopics_1_Table >> TxUsage::WriteToTopic_Demo_7_Table >> KqpBatchDelete::ColumnTable [GOOD] >> KqpBatchUpdate::ColumnTable [GOOD] >> TxUsage::WriteToTopic_Demo_43_Table [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/batch_operations/unittest >> KqpBatchUpdate::TableWithIndex [GOOD] Test command err: Trying to start YDB, gRPC: 65270, MsgBus: 12589 2025-06-25T14:42:40.733189Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897436740871128:2223];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:40.733246Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000bdd/r3tmp/tmpwAc8Q0/pdisk_1.dat 2025-06-25T14:42:41.568125Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:42:41.568246Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:42:41.572715Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:42:41.574901Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:42:41.729835Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TServer::EnableGrpc on GrpcPort 65270, node 1 2025-06-25T14:42:42.716911Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:42:42.716938Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:42:42.716948Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:42:42.717078Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:12589 TClient is connected to server localhost:12589 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:42:44.993699Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:42:45.123414Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:45.389175Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:45.470910Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:45.539912Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:45.733698Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519897436740871128:2223];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:45.733758Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:42:45.828461Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897458215709091:2371], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:45.835106Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:47.941038Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:47.965852Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:47.987741Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.013361Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.036389Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.063797Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.128673Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.205974Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897471100611653:2440], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:48.206031Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:48.206092Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897471100611658:2443], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:48.208856Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:42:48.216338Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519897471100611660:2444], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:42:48.317539Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519897471100611711:3441] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:42:49.122204Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:49.169177Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:49.238223Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:50.814614Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710677:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) |84.2%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |84.2%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |84.2%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |84.2%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |84.2%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |84.2%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |84.2%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |84.2%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |84.2%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/batch_operations/unittest >> KqpBatchDelete::ColumnTable [GOOD] Test command err: Trying to start YDB, gRPC: 2794, MsgBus: 6849 2025-06-25T14:42:40.733072Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897437173630757:2223];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:40.733111Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000c18/r3tmp/tmpRgRP16/pdisk_1.dat 2025-06-25T14:42:41.568493Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:42:41.568606Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:42:41.590957Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:42:41.628376Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:42:41.730910Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TServer::EnableGrpc on GrpcPort 2794, node 1 2025-06-25T14:42:42.720939Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:42:42.720962Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:42:42.720968Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:42:42.721095Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6849 TClient is connected to server localhost:6849 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:42:44.993719Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:42:45.123418Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:45.408801Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:45.551095Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:45.634195Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:45.739576Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519897437173630757:2223];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:45.740252Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:42:45.828409Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897458648468687:2371], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:45.828530Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:47.940805Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:47.963777Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:47.985105Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.007501Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.031674Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.063112Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.092078Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.148212Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897471533371244:2440], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:48.148284Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:48.148524Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897471533371249:2443], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:48.152416Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:42:48.160827Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519897471533371251:2444], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:42:48.239506Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519897471533371302:3435] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:42:49.156631Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T14:42:49.230839Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037922;self_id=[1:7519897475828338931:2493];tablet_id=72075186224037922;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:42:49.231066Z node 1 :TX_COLUMNSHARD WARN: ... events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-25T14:42:53.788418Z node 1 :TX_COLUMNSHARD WARN: ctor_logger.h:56: TColumnShard.StateWork at 72075186224037983 unhandled event type: NKikimr::TEvDataShard::TEvCancelTransactionProposal event: NKikimrTxDataShard.TEvCancelTransactionProposal TxId: 281474976710678 2025-06-25T14:42:53.788487Z node 1 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037983;self_id=[1:7519897475828339433:2507];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037983;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-25T14:42:53.788572Z node 1 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037931;self_id=[1:7519897475828339718:2551];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037931;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-25T14:42:53.788625Z node 1 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037931;self_id=[1:7519897475828339718:2551];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037931;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-25T14:42:53.789942Z node 1 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:2725: SelfId: [1:7519897484418276322:2484], SessionActorId: [1:7519897484418276310:2484], Got BAD REQUEST for tables. ShardID=72075186224037931, Sink=[1:7519897484418276322:2484].{
: Error: not initialized lock info in commit message, code: 2017 } 2025-06-25T14:42:53.790003Z node 1 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:3004: SelfId: [1:7519897484418276322:2484], SessionActorId: [1:7519897484418276310:2484], statusCode=BAD_REQUEST. Issue=
: Error: Bad request. Table: `/Root/TestOlap`., code: 2017
: Error: not initialized lock info in commit message, code: 2017 . sessionActorId=[1:7519897484418276310:2484]. isRollback=0 2025-06-25T14:42:53.792106Z node 1 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:2725: SelfId: [1:7519897484418276320:2484], SessionActorId: [1:7519897484418276310:2484], Got BAD REQUEST for tables. ShardID=72075186224037931, Sink=[1:7519897484418276320:2484].{
: Error: not initialized lock info in commit message, code: 2017 } 2025-06-25T14:42:53.792164Z node 1 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:3004: SelfId: [1:7519897484418276320:2484], SessionActorId: [1:7519897484418276310:2484], statusCode=BAD_REQUEST. Issue=
: Error: Bad request. Table: `/Root/TestOlap`., code: 2017
: Error: not initialized lock info in commit message, code: 2017 . sessionActorId=[1:7519897484418276310:2484]. isRollback=0 2025-06-25T14:42:53.792250Z node 1 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:2725: SelfId: [1:7519897484418276324:2484], SessionActorId: [1:7519897484418276310:2484], Got BAD REQUEST for tables. ShardID=72075186224037931, Sink=[1:7519897484418276324:2484].{
: Error: not initialized lock info in commit message, code: 2017 } 2025-06-25T14:42:53.792291Z node 1 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:3004: SelfId: [1:7519897484418276324:2484], SessionActorId: [1:7519897484418276310:2484], statusCode=BAD_REQUEST. Issue=
: Error: Bad request. Table: `/Root/TestOlap`., code: 2017
: Error: not initialized lock info in commit message, code: 2017 . sessionActorId=[1:7519897484418276310:2484]. isRollback=0 2025-06-25T14:42:53.792555Z node 1 :TX_COLUMNSHARD WARN: ctor_logger.h:56: TColumnShard.StateWork at 72075186224037983 unhandled event type: NKikimr::TEvDataShard::TEvCancelTransactionProposal event: NKikimrTxDataShard.TEvCancelTransactionProposal TxId: 281474976710681 2025-06-25T14:42:53.792590Z node 1 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037931;self_id=[1:7519897475828339718:2551];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037931;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-25T14:42:53.792651Z node 1 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037983;self_id=[1:7519897475828339433:2507];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037983;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-25T14:42:53.792652Z node 1 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037931;self_id=[1:7519897475828339718:2551];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037931;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-25T14:42:53.792697Z node 1 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037931;self_id=[1:7519897475828339718:2551];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037931;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-25T14:42:53.792721Z node 1 :TX_COLUMNSHARD WARN: ctor_logger.h:56: TColumnShard.StateWork at 72075186224037983 unhandled event type: NKikimr::TEvDataShard::TEvCancelTransactionProposal event: NKikimrTxDataShard.TEvCancelTransactionProposal TxId: 281474976710680 2025-06-25T14:42:53.792763Z node 1 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037983;self_id=[1:7519897475828339433:2507];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037983;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-25T14:42:53.792819Z node 1 :TX_COLUMNSHARD WARN: ctor_logger.h:56: TColumnShard.StateWork at 72075186224037983 unhandled event type: NKikimr::TEvDataShard::TEvCancelTransactionProposal event: NKikimrTxDataShard.TEvCancelTransactionProposal TxId: 281474976710682 2025-06-25T14:42:53.792896Z node 1 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037983;self_id=[1:7519897475828339433:2507];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037983;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-25T14:42:53.794727Z node 1 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:2725: SelfId: [1:7519897484418276326:2484], SessionActorId: [1:7519897484418276310:2484], Got BAD REQUEST for tables. ShardID=72075186224037931, Sink=[1:7519897484418276326:2484].{
: Error: not initialized lock info in commit message, code: 2017 } 2025-06-25T14:42:53.794802Z node 1 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:3004: SelfId: [1:7519897484418276326:2484], SessionActorId: [1:7519897484418276310:2484], statusCode=BAD_REQUEST. Issue=
: Error: Bad request. Table: `/Root/TestOlap`., code: 2017
: Error: not initialized lock info in commit message, code: 2017 . sessionActorId=[1:7519897484418276310:2484]. isRollback=0 2025-06-25T14:42:53.795016Z node 1 :TX_COLUMNSHARD WARN: ctor_logger.h:56: TColumnShard.StateWork at 72075186224037983 unhandled event type: NKikimr::TEvDataShard::TEvCancelTransactionProposal event: NKikimrTxDataShard.TEvCancelTransactionProposal TxId: 281474976710683 2025-06-25T14:42:53.795105Z node 1 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037983;self_id=[1:7519897475828339433:2507];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037983;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-25T14:42:53.795180Z node 1 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037931;self_id=[1:7519897475828339718:2551];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037931;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-25T14:42:53.796902Z node 1 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:2725: SelfId: [1:7519897484418276328:2484], SessionActorId: [1:7519897484418276310:2484], Got BAD REQUEST for tables. ShardID=72075186224037931, Sink=[1:7519897484418276328:2484].{
: Error: not initialized lock info in commit message, code: 2017 } 2025-06-25T14:42:53.796964Z node 1 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:3004: SelfId: [1:7519897484418276328:2484], SessionActorId: [1:7519897484418276310:2484], statusCode=BAD_REQUEST. Issue=
: Error: Bad request. Table: `/Root/TestOlap`., code: 2017
: Error: not initialized lock info in commit message, code: 2017 . sessionActorId=[1:7519897484418276310:2484]. isRollback=0 2025-06-25T14:42:53.797083Z node 1 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:2725: SelfId: [1:7519897484418276330:2484], SessionActorId: [1:7519897484418276310:2484], Got BAD REQUEST for tables. ShardID=72075186224037931, Sink=[1:7519897484418276330:2484].{
: Error: not initialized lock info in commit message, code: 2017 } 2025-06-25T14:42:53.797122Z node 1 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:3004: SelfId: [1:7519897484418276330:2484], SessionActorId: [1:7519897484418276310:2484], statusCode=BAD_REQUEST. Issue=
: Error: Bad request. Table: `/Root/TestOlap`., code: 2017
: Error: not initialized lock info in commit message, code: 2017 . sessionActorId=[1:7519897484418276310:2484]. isRollback=0 2025-06-25T14:42:53.797218Z node 1 :KQP_EXECUTER ERROR: kqp_partitioned_executer.cpp:886: [PARTITIONED] ActorId: [1:7519897484418276310:2484], ActorState: AbortState, INTERNAL_ERROR: 2025-06-25T14:42:53.797251Z node 1 :TX_COLUMNSHARD WARN: ctor_logger.h:56: TColumnShard.StateWork at 72075186224037983 unhandled event type: NKikimr::TEvDataShard::TEvCancelTransactionProposal event: NKikimrTxDataShard.TEvCancelTransactionProposal TxId: 281474976710684 2025-06-25T14:42:53.797319Z node 1 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037983;self_id=[1:7519897475828339433:2507];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037983;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-25T14:42:53.797401Z node 1 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037931;self_id=[1:7519897475828339718:2551];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037931;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-25T14:42:53.799256Z node 1 :KQP_SESSION ERROR: kqp_session_actor.cpp:2561: SessionId: ydb://session/3?node_id=1&id=N2IzYzc2YTctZTg5YzAyZDQtZjRjNTY0MjctM2FmMWQzMTc=, ActorId: [1:7519897475828338868:2484], ActorState: CleanupState, TraceId: 01jykrn9ym3rwhtnffrnqjg3d1, Failed to cleanup: 2025-06-25T14:42:53.800031Z node 1 :TX_COLUMNSHARD WARN: ctor_logger.h:56: TColumnShard.StateWork at 72075186224037983 unhandled event type: NKikimr::TEvDataShard::TEvCancelTransactionProposal event: NKikimrTxDataShard.TEvCancelTransactionProposal TxId: 281474976710685 2025-06-25T14:42:53.800046Z node 1 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037931;self_id=[1:7519897475828339718:2551];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037931;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-25T14:42:53.800114Z node 1 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037983;self_id=[1:7519897475828339433:2507];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037983;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; >> IncrementalBackup::SimpleBackupBackupCollection+WithIncremental [GOOD] >> IncrementalBackup::SimpleBackupBackupCollection-WithIncremental >> TxUsage::WriteToTopic_Demo_43_Query ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/batch_operations/unittest >> KqpBatchUpdate::ColumnTable [GOOD] Test command err: Trying to start YDB, gRPC: 2981, MsgBus: 4642 2025-06-25T14:42:40.733308Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897437453258646:2227];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:40.733434Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000c44/r3tmp/tmpoFaKN0/pdisk_1.dat 2025-06-25T14:42:41.583212Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:42:41.586517Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:42:41.586690Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:42:41.589505Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:42:41.717634Z node 1 :BS_CONTROLLER ERROR: {BSC07@impl.h:2206} ProcessControllerEvent event processing took too much time Type# 268637706 Duration# 0.100012s 2025-06-25T14:42:41.717702Z node 1 :BS_CONTROLLER ERROR: {BSC00@bsc.cpp:705} StateWork event processing took too much time Type# 2146435078 Duration# 0.100092s 2025-06-25T14:42:41.730945Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TServer::EnableGrpc on GrpcPort 2981, node 1 2025-06-25T14:42:42.718422Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:42:42.718442Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:42:42.718467Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:42:42.718567Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4642 TClient is connected to server localhost:4642 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:42:44.994086Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:42:45.123267Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:45.415589Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:45.515225Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:45.592094Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:45.737484Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519897437453258646:2227];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:45.760984Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:42:45.829501Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897458928096590:2371], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:45.829646Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:47.940863Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:47.963776Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:47.984755Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.007691Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.030618Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.056069Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.121262Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.215903Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897471812999158:2440], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:48.215966Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:48.216026Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897471812999163:2443], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:48.219223Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:42:48.225459Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519897471812999165:2444], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:42:48.282402Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519897471812999216:3437] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:42:49.200513Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/creat ... :KQP_COMPUTE ERROR: kqp_write_actor.cpp:3004: SelfId: [1:7519897488992871548:2483], SessionActorId: [1:7519897484697904232:2483], statusCode=BAD_REQUEST. Issue=
: Error: Bad request. Table: `/Root/TestOlap`., code: 2017
: Error: not initialized lock info in commit message, code: 2017 . sessionActorId=[1:7519897484697904232:2483]. isRollback=0 2025-06-25T14:42:54.212115Z node 1 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037931;self_id=[1:7519897476107967504:2531];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037931;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-25T14:42:54.212160Z node 1 :TX_COLUMNSHARD WARN: ctor_logger.h:56: TColumnShard.StateWork at 72075186224037931 unhandled event type: NKikimr::TEvDataShard::TEvCancelTransactionProposal event: NKikimrTxDataShard.TEvCancelTransactionProposal TxId: 281474976715679 2025-06-25T14:42:54.212198Z node 1 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037931;self_id=[1:7519897476107967504:2531];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037931;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-25T14:42:54.212249Z node 1 :TX_COLUMNSHARD WARN: ctor_logger.h:56: TColumnShard.StateWork at 72075186224037931 unhandled event type: NKikimr::TEvDataShard::TEvCancelTransactionProposal event: NKikimrTxDataShard.TEvCancelTransactionProposal TxId: 281474976715680 2025-06-25T14:42:54.212257Z node 1 :KQP_EXECUTER ERROR: kqp_partitioned_executer.cpp:886: [PARTITIONED] ActorId: [1:7519897484697904232:2483], ActorState: ExecuteState, INTERNAL_ERROR: {
: Error: ExecuteState, from BufferWriteActor by PartitionedExecuterActor } 2025-06-25T14:42:54.212285Z node 1 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037931;self_id=[1:7519897476107967504:2531];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037931;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-25T14:42:54.212353Z node 1 :TX_COLUMNSHARD WARN: ctor_logger.h:56: TColumnShard.StateWork at 72075186224037931 unhandled event type: NKikimr::TEvDataShard::TEvCancelTransactionProposal event: NKikimrTxDataShard.TEvCancelTransactionProposal TxId: 281474976715684 2025-06-25T14:42:54.212391Z node 1 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037931;self_id=[1:7519897476107967504:2531];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037931;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-25T14:42:54.212438Z node 1 :TX_COLUMNSHARD WARN: ctor_logger.h:56: TColumnShard.StateWork at 72075186224037931 unhandled event type: NKikimr::TEvDataShard::TEvCancelTransactionProposal event: NKikimrTxDataShard.TEvCancelTransactionProposal TxId: 281474976715685 2025-06-25T14:42:54.212473Z node 1 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037931;self_id=[1:7519897476107967504:2531];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037931;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-25T14:42:54.212549Z node 1 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [1:7519897488992871541:2483] TxId: 281474976715681. Ctx: { TraceId: 01jykrna2g7brgvh68p55fqg52, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjU2NzE5MzMtYjA5ODdmYWYtNzEwOTllOTAtYWY1NzBiYjk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Aborted by PartitionedExecuterActor, reason: got error from BufferWriteActor } 2025-06-25T14:42:54.212555Z node 1 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037983;self_id=[1:7519897476107967317:2503];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037983;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-25T14:42:54.212601Z node 1 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037983;self_id=[1:7519897476107967317:2503];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037983;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-25T14:42:54.212644Z node 1 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037983;self_id=[1:7519897476107967317:2503];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037983;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-25T14:42:54.212685Z node 1 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037983;self_id=[1:7519897476107967317:2503];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037983;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-25T14:42:54.212726Z node 1 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037983;self_id=[1:7519897476107967317:2503];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037983;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-25T14:42:54.212751Z node 1 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [1:7519897488992871549:2483] TxId: 281474976715685. Ctx: { TraceId: 01jykrna2g7brgvh68p55fqg52, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjU2NzE5MzMtYjA5ODdmYWYtNzEwOTllOTAtYWY1NzBiYjk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Aborted by PartitionedExecuterActor, reason: runtime error } 2025-06-25T14:42:54.212767Z node 1 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037983;self_id=[1:7519897476107967317:2503];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037983;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-25T14:42:54.212817Z node 1 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037983;self_id=[1:7519897476107967317:2503];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037983;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-25T14:42:54.212856Z node 1 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037983;self_id=[1:7519897476107967317:2503];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037983;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-25T14:42:54.212857Z node 1 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [1:7519897488992871547:2483] TxId: 281474976715684. Ctx: { TraceId: 01jykrna2g7brgvh68p55fqg52, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjU2NzE5MzMtYjA5ODdmYWYtNzEwOTllOTAtYWY1NzBiYjk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Aborted by PartitionedExecuterActor, reason: runtime error } 2025-06-25T14:42:54.212897Z node 1 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037983;self_id=[1:7519897476107967317:2503];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037983;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-25T14:42:54.212938Z node 1 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037983;self_id=[1:7519897476107967317:2503];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037983;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-25T14:42:54.212974Z node 1 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [1:7519897488992871545:2483] TxId: 281474976715683. Ctx: { TraceId: 01jykrna2g7brgvh68p55fqg52, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjU2NzE5MzMtYjA5ODdmYWYtNzEwOTllOTAtYWY1NzBiYjk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Aborted by PartitionedExecuterActor, reason: runtime error } 2025-06-25T14:42:54.213084Z node 1 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [1:7519897488992871533:2483] TxId: 281474976715677. Ctx: { TraceId: 01jykrna2g7brgvh68p55fqg52, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjU2NzE5MzMtYjA5ODdmYWYtNzEwOTllOTAtYWY1NzBiYjk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Aborted by PartitionedExecuterActor, reason: runtime error } 2025-06-25T14:42:54.213184Z node 1 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [1:7519897488992871543:2483] TxId: 281474976715682. Ctx: { TraceId: 01jykrna2g7brgvh68p55fqg52, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjU2NzE5MzMtYjA5ODdmYWYtNzEwOTllOTAtYWY1NzBiYjk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Aborted by PartitionedExecuterActor, reason: runtime error } 2025-06-25T14:42:54.213279Z node 1 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [1:7519897488992871537:2483] TxId: 281474976715679. Ctx: { TraceId: 01jykrna2g7brgvh68p55fqg52, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjU2NzE5MzMtYjA5ODdmYWYtNzEwOTllOTAtYWY1NzBiYjk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Aborted by PartitionedExecuterActor, reason: runtime error } 2025-06-25T14:42:54.213374Z node 1 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [1:7519897488992871531:2483] TxId: 281474976715676. Ctx: { TraceId: 01jykrna2g7brgvh68p55fqg52, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjU2NzE5MzMtYjA5ODdmYWYtNzEwOTllOTAtYWY1NzBiYjk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Aborted by PartitionedExecuterActor, reason: runtime error } 2025-06-25T14:42:54.213474Z node 1 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [1:7519897488992871535:2483] TxId: 281474976715678. Ctx: { TraceId: 01jykrna2g7brgvh68p55fqg52, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjU2NzE5MzMtYjA5ODdmYWYtNzEwOTllOTAtYWY1NzBiYjk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Aborted by PartitionedExecuterActor, reason: runtime error } 2025-06-25T14:42:54.213570Z node 1 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [1:7519897488992871539:2483] TxId: 281474976715680. Ctx: { TraceId: 01jykrna2g7brgvh68p55fqg52, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjU2NzE5MzMtYjA5ODdmYWYtNzEwOTllOTAtYWY1NzBiYjk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Aborted by PartitionedExecuterActor, reason: runtime error } 2025-06-25T14:42:54.258974Z node 1 :KQP_EXECUTER ERROR: kqp_partitioned_executer.cpp:886: [PARTITIONED] ActorId: [1:7519897484697904232:2483], ActorState: AbortState, INTERNAL_ERROR: 2025-06-25T14:42:54.260813Z node 1 :KQP_SESSION ERROR: kqp_session_actor.cpp:2561: SessionId: ydb://session/3?node_id=1&id=YjU2NzE5MzMtYjA5ODdmYWYtNzEwOTllOTAtYWY1NzBiYjk=, ActorId: [1:7519897476107966781:2483], ActorState: CleanupState, TraceId: 01jykrna2g7brgvh68p55fqg52, Failed to cleanup: |84.2%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |84.2%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |84.3%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |84.3%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |84.3%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |84.3%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |84.3%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |84.3%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |84.3%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |84.3%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |84.3%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |84.3%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |84.3%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |84.3%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |84.3%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |84.3%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_3_Query [GOOD] |84.3%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |84.3%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |84.3%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |84.3%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest >> TxUsage::WriteToTopic_Demo_18_RestartAfterCommit_Table [GOOD] |84.3%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |84.3%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_4_Table |84.3%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest >> KqpWorkload::KV [GOOD] >> TxUsage::WriteToTopic_Demo_18_RestartAfterCommit_Query |84.3%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |84.3%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |84.4%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest >> SystemView::PartitionStatsTtlFields [GOOD] >> SystemView::PartitionStatsLocksFields |84.4%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |84.4%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |84.4%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |84.4%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |84.4%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |84.4%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |84.4%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |84.4%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/perf/unittest >> KqpWorkload::KV [GOOD] Test command err: Trying to start YDB, gRPC: 10711, MsgBus: 17278 2025-06-25T14:41:38.414614Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897171310240586:2136];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:41:38.431862Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000b3c/r3tmp/tmp7LzZoT/pdisk_1.dat 2025-06-25T14:41:39.094169Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:41:39.094256Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:41:39.110687Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:41:39.125056Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 10711, node 1 2025-06-25T14:41:39.404940Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:41:39.404966Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:41:39.404973Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:41:39.405107Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:41:39.436935Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:17278 TClient is connected to server localhost:17278 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:41:40.681802Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:41:40.698041Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:41:42.586915Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897188490110314:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:42.587007Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:42.891710Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:43.455339Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519897171310240586:2136];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:41:43.458140Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:41:43.618692Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897192785079254:2421], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:43.618828Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:43.625199Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897192785079259:2424], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:43.631575Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:41:43.654330Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519897192785079261:2425], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-25T14:41:43.726515Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519897192785079312:3366] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:41:54.028422Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7382: Cannot get console configs 2025-06-25T14:41:54.028456Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded took: 0.133018s took: 0.136614s took: 0.140567s took: 0.141388s took: 0.141097s took: 0.144448s took: 0.126730s took: 0.166070s took: 0.165948s took: 0.166974s took: 0.120217s took: 0.123599s took: 0.130808s took: 0.132998s took: 0.134570s took: 0.134766s took: 0.138292s took: 0.138380s took: 0.138400s took: 0.134024s took: 0.204191s took: 0.207052s took: 0.208833s took: 0.210146s took: 0.210737s took: 0.216028s took: 0.217535s took: 0.217995s took: 0.219451s took: 0.219831s took: 0.052654s took: 0.052690s took: 0.052703s took: 0.052843s took: 0.053637s took: 0.054002s took: 0.054354s took: 0.054762s took: 0.055055s took: 0.056206s took: 0.161633s took: 0.169590s took: 0.170289s took: 0.174331s took: 0.173910s took: 0.160401s took: 0.175131s took: 0.176328s took: 0.176705s took: 0.174847s 2025-06-25T14:42:58.738392Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037907 not found 2025-06-25T14:42:58.738429Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037922 not found 2025-06-25T14:42:58.739398Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037905 not found 2025-06-25T14:42:58.748127Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037915 not found 2025-06-25T14:42:58.748164Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037926 not found 2025-06-25T14:42:58.748179Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037900 not found 2025-06-25T14:42:58.748193Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037892 not found 2025-06-25T14:42:58.748210Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037917 not found 2025-06-25T14:42:58.748223Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037914 not found 2025-06-25T14:42:58.748236Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037897 not found 2025-06-25T14:42:58.748250Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037908 not found 2025-06-25T14:42:58.749478Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037889 not found 2025-06-25T14:42:58.749628Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037913 not found 2025-06-25T14:42:58.759629Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037912 not found 2025-06-25T14:42:58.759672Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037904 not found 2025-06-25T14:42:58.759686Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037893 not found 2025-06-25T14:42:58.759701Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037890 not found 2025-06-25T14:42:58.759714Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037927 not found 2025-06-25T14:42:58.759728Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037891 not found 2025-06-25T14:42:58.759741Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037896 not found 2025-06-25T14:42:58.759755Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037919 not found 2025-06-25T14:42:58.759775Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037923 not found 2025-06-25T14:42:58.759790Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037901 not found 2025-06-25T14:42:58.759804Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037906 not found 2025-06-25T14:42:58.759818Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037911 not found 2025-06-25T14:42:58.759838Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037899 not found 2025-06-25T14:42:58.759856Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037909 not found 2025-06-25T14:42:58.759887Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037895 not found 2025-06-25T14:42:58.759908Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037902 not found 2025-06-25T14:42:58.759922Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037918 not found 2025-06-25T14:42:58.759936Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037903 not found 2025-06-25T14:42:58.759950Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037888 not found 2025-06-25T14:42:58.759964Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037921 not found 2025-06-25T14:42:58.759978Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037924 not found 2025-06-25T14:42:58.759990Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037925 not found 2025-06-25T14:42:58.760004Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037910 not found 2025-06-25T14:42:58.760071Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037916 not found 2025-06-25T14:42:58.760087Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037920 not found 2025-06-25T14:42:58.761370Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037894 not found 2025-06-25T14:42:58.763868Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037898 not found |84.4%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |84.4%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest >> TConsoleConfigSubscriptionTests::TestConfigSubscriptionsCleanup [GOOD] >> TConsoleConfigTests::TestAddConfigItem >> IncrementalBackup::ComplexRestoreBackupCollection+WithIncremental [FAIL] >> IncrementalBackup::ComplexRestoreBackupCollection-WithIncremental |84.4%| [TA] $(B)/ydb/core/kqp/ut/perf/test-results/unittest/{meta.json ... results_accumulator.log} |84.4%| [TA] {RESULT} $(B)/ydb/core/kqp/ut/perf/test-results/unittest/{meta.json ... results_accumulator.log} >> IncrementalBackup::SimpleBackupBackupCollection-WithIncremental [GOOD] |84.4%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |84.4%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |84.4%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |84.4%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |84.4%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |84.4%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |84.4%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |84.4%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |84.4%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest >> KqpBatchDelete::TableWithIndex [GOOD] >> ExternalIndex::Simple |84.5%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest |84.5%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_incremental_backup/unittest >> IncrementalBackup::SimpleBackupBackupCollection-WithIncremental [GOOD] Test command err: 2025-06-25T14:42:43.469397Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:42:43.469603Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:42:43.469677Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000949/r3tmp/tmpedArQP/pdisk_1.dat 2025-06-25T14:42:44.888347Z node 1 :BS_CONTROLLER ERROR: {BSC07@impl.h:2206} ProcessControllerEvent event processing took too much time Type# 268637706 Duration# 0.130065s 2025-06-25T14:42:44.888480Z node 1 :BS_CONTROLLER ERROR: {BSC00@bsc.cpp:705} StateWork event processing took too much time Type# 2146435078 Duration# 0.130231s 2025-06-25T14:42:44.899637Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T14:42:44.918076Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877761, Sender [1:557:2482], Recipient [1:373:2367]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:42:44.918166Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5052: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T14:42:44.918209Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5837: Pipe server connected, at tablet: 72057594046644480 2025-06-25T14:42:44.918422Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271122432, Sender [1:554:2480], Recipient [1:373:2367]: {TEvModifySchemeTransaction txid# 1 TabletId# 72057594046644480} 2025-06-25T14:42:44.918457Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4966: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-06-25T14:42:45.029090Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "Root" StoragePools { Name: "/Root:test" Kind: "test" } } } TxId: 1 TabletId: 72057594046644480 , at schemeshard: 72057594046644480 2025-06-25T14:42:45.037377Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //Root, opId: 1:0, at schemeshard: 72057594046644480 2025-06-25T14:42:45.045586Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 0 2025-06-25T14:42:45.045684Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046644480, LocalPathId: 1] source path: 2025-06-25T14:42:45.064538Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-25T14:42:45.064705Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:42:45.064859Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-25T14:42:45.074025Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046644480 PathId: 1, at schemeshard: 72057594046644480 2025-06-25T14:42:45.082736Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //Root 2025-06-25T14:42:45.082812Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:42:45.082843Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:276: Activate send for 1:0 2025-06-25T14:42:45.083077Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 2146435072, Sender [1:373:2367], Recipient [1:373:2367]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-25T14:42:45.083108Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4972: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-25T14:42:45.083181Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046644480 2025-06-25T14:42:45.083292Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046644480 2025-06-25T14:42:45.083339Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:42:45.083365Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:42:45.094473Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-25T14:42:45.095099Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:42:45.095144Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:276: Activate send for 1:0 2025-06-25T14:42:45.095283Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 2146435072, Sender [1:373:2367], Recipient [1:373:2367]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-25T14:42:45.095318Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4972: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-25T14:42:45.095380Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046644480 2025-06-25T14:42:45.095447Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046644480 2025-06-25T14:42:45.095501Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:42:45.095592Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-25T14:42:45.095978Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:42:45.096006Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:276: Activate send for 1:0 2025-06-25T14:42:45.096099Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 2146435072, Sender [1:373:2367], Recipient [1:373:2367]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-25T14:42:45.096127Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4972: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-25T14:42:45.096165Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046644480 2025-06-25T14:42:45.096207Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046644480 2025-06-25T14:42:45.096273Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046644480 2025-06-25T14:42:45.096356Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-25T14:42:45.096404Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:42:45.106488Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046644480 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:42:45.106940Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:42:45.106979Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046644480 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:42:45.115237Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 2025-06-25T14:42:45.116362Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877760, Sender [1:562:2487], Recipient [1:373:2367]: NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594046316545 Status: OK ServerId: [1:564:2488] Leader: 1 Dead: 0 Generation: 2 VersionInfo: } 2025-06-25T14:42:45.116414Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5050: StateWork, processing event TEvTabletPipe::TEvClientConnected 2025-06-25T14:42:45.116451Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5787: Handle TEvClientConnected, tabletId: 72057594046316545, status: OK, at schemeshard: 72057594046644480 2025-06-25T14:42:45.116530Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269091328, Sender [1:369:2363], Recipient [1:373:2367]: NKikimrTx.TEvProposeTransactionStatus Status: 16 StepId: 500 TxId: 1 2025-06-25T14:42:45.116766Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877761, Sender [1:566:2490], Recipient [1:373:2367]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:42:45.116795Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5052: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T14:42:45.116819Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5837: Pipe server connected, at tablet: 72057594046644480 2025-06-25T14:42:45.116926Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124996, Sender [1:554:2480], Recipient [1:373:2367]: NKikimrScheme.TEvNotifyTxCompletion TxId: 1 2025-06-25T14:42:45.116948Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvSchemeShard::TEvNotifyTxCompletion 2025-06-25T14:42:45.116997Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 1, at schemeshard: 72057594046644480 2025-06-25T14:42:45.117022Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 1, ready parts: 0/1, is published: true 2025-06-25T14:42:45. ... { RawX1: 627 RawX2: 12884904419 } Origin: 72075186224037888 State: 2 TxId: 281474976715662 Step: 0 Generation: 1 2025-06-25T14:43:00.713543Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1791: TOperation FindRelatedPartByTabletId, TxId: 281474976715662, tablet: 72075186224037888, partId: 1 2025-06-25T14:43:00.713647Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:632: TTxOperationReply execute, operationId: 281474976715662:1, at schemeshard: 72057594046644480, message: Source { RawX1: 627 RawX2: 12884904419 } Origin: 72075186224037888 State: 2 TxId: 281474976715662 Step: 0 Generation: 1 2025-06-25T14:43:00.713685Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1047: NTableState::TProposedWaitParts operationId# 281474976715662:1 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046644480 2025-06-25T14:43:00.713735Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1051: NTableState::TProposedWaitParts operationId# 281474976715662:1 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046644480 message: Source { RawX1: 627 RawX2: 12884904419 } Origin: 72075186224037888 State: 2 TxId: 281474976715662 Step: 0 Generation: 1 2025-06-25T14:43:00.713769Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:670: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 281474976715662:1, shardIdx: 72057594046644480:1, shard: 72075186224037888, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046644480 2025-06-25T14:43:00.713807Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:707: all shard schema changes has been received, operationId: 281474976715662:1, at schemeshard: 72057594046644480 2025-06-25T14:43:00.713843Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:719: send schema changes ack message, operation: 281474976715662:1, datashard: 72075186224037889, at schemeshard: 72057594046644480 2025-06-25T14:43:00.713874Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:719: send schema changes ack message, operation: 281474976715662:1, datashard: 72075186224037888, at schemeshard: 72057594046644480 2025-06-25T14:43:00.713899Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 281474976715662:1 129 -> 240 2025-06-25T14:43:00.714000Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-25T14:43:00.714319Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 281474976715662:1, at schemeshard: 72057594046644480 2025-06-25T14:43:00.714342Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:43:00.714367Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:276: Activate send for 281474976715662:1 2025-06-25T14:43:00.714439Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:632: Send to actor: [3:892:2696] msg type: 269552132 msg: NKikimrTxDataShard.TEvSchemaChangedResult TxId: 281474976715662 at schemeshard: 72057594046644480 2025-06-25T14:43:00.714481Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:632: Send to actor: [3:627:2531] msg type: 269552132 msg: NKikimrTxDataShard.TEvSchemaChangedResult TxId: 281474976715662 at schemeshard: 72057594046644480 2025-06-25T14:43:00.714550Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715662 datashard 72075186224037888 state Ready 2025-06-25T14:43:00.714610Z node 3 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-25T14:43:00.714745Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715662 datashard 72075186224037889 state Ready 2025-06-25T14:43:00.714771Z node 3 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037889 Got TEvSchemaChangedResult from SS at 72075186224037889 2025-06-25T14:43:00.714864Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 2146435072, Sender [3:373:2367], Recipient [3:373:2367]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-25T14:43:00.714886Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4972: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-25T14:43:00.714919Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 281474976715662:1, at schemeshard: 72057594046644480 2025-06-25T14:43:00.714958Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_states.h:93: TCopyTable::TWaitCopyTableBarrier operationId: 281474976715662:1ProgressState, operation type TxCopyTable 2025-06-25T14:43:00.714992Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-25T14:43:00.715023Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:1061: Set barrier, OperationId: 281474976715662:1, name: CopyTableBarrier, done: 1, blocked: 1, parts count: 2 2025-06-25T14:43:00.715062Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:1105: All parts have reached barrier, tx: 281474976715662, done: 1, blocked: 1 2025-06-25T14:43:00.715138Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_states.h:76: TCopyTable::TWaitCopyTableBarrier operationId: 281474976715662:1 HandleReply TEvPrivate::TEvCompleteBarrier, msg: NKikimr::NSchemeShard::TEvPrivate::TEvCompleteBarrier { TxId: 281474976715662 Name: CopyTableBarrier }, at tablet# 72057594046644480 2025-06-25T14:43:00.715174Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 281474976715662:1 240 -> 240 2025-06-25T14:43:00.715523Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:43:00.715545Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:276: Activate send for 281474976715662:1 2025-06-25T14:43:00.715609Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 2146435072, Sender [3:373:2367], Recipient [3:373:2367]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-25T14:43:00.715635Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4972: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-25T14:43:00.715671Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 281474976715662:1, at schemeshard: 72057594046644480 2025-06-25T14:43:00.715703Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046644480] TDone opId# 281474976715662:1 ProgressState 2025-06-25T14:43:00.715798Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-25T14:43:00.715821Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976715662:1 progress is 2/2 2025-06-25T14:43:00.715855Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976715662 ready parts: 2/2 2025-06-25T14:43:00.715889Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976715662:1 progress is 2/2 2025-06-25T14:43:00.715924Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976715662 ready parts: 2/2 2025-06-25T14:43:00.715955Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 281474976715662, ready parts: 2/2, is published: true 2025-06-25T14:43:00.716009Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1656: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [3:862:2676] message: TxId: 281474976715662 2025-06-25T14:43:00.716051Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976715662 ready parts: 2/2 2025-06-25T14:43:00.716091Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976715662:0 2025-06-25T14:43:00.716117Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 281474976715662:0 2025-06-25T14:43:00.716181Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 10] was 2 2025-06-25T14:43:00.716206Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976715662:1 2025-06-25T14:43:00.716220Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 281474976715662:1 2025-06-25T14:43:00.716287Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 11] was 3 2025-06-25T14:43:00.716353Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate source path for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 3 2025-06-25T14:43:00.716786Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:43:00.716863Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:632: Send to actor: [3:862:2676] msg type: 271124998 msg: NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976715662 at schemeshard: 72057594046644480 2025-06-25T14:43:00.717160Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877764, Sender [3:877:2684], Recipient [3:373:2367]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-25T14:43:00.717192Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5053: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-25T14:43:00.717217Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5885: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-25T14:43:00.855130Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [3:976:2761], serverId# [3:977:2762], sessionId# [0:0:0] 2025-06-25T14:43:00.855326Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715663. Ctx: { TraceId: 01jykrnjzkapnp3wnas174ntyw, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=Nzg2NTUzNzktY2I4NmFjOTItOTRkZThkZDktYTYyMjQyOWE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root { items { uint32_value: 1 } items { uint32_value: 10 } }, { items { uint32_value: 2 } items { uint32_value: 20 } }, { items { uint32_value: 3 } items { uint32_value: 30 } } 2025-06-25T14:43:01.015628Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715664. Ctx: { TraceId: 01jykrnk40bsdrbpewpcsntnd1, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NjBmYTU2YTgtNDlmZWM3MDgtMmQ3MWY1Ni02Y2YwZWRk, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root { items { uint32_value: 1 } items { uint32_value: 10 } }, { items { uint32_value: 2 } items { uint32_value: 20 } }, { items { uint32_value: 3 } items { uint32_value: 30 } } |84.5%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest >> TConsoleConfigTests::TestAddConfigItem [GOOD] >> TConsoleConfigTests::TestAutoKind ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/batch_operations/unittest >> KqpBatchDelete::TableWithIndex [GOOD] Test command err: Trying to start YDB, gRPC: 29957, MsgBus: 32647 2025-06-25T14:42:52.319778Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897488039002466:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:52.319840Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000bbb/r3tmp/tmp0XZOFK/pdisk_1.dat 2025-06-25T14:42:52.595332Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 29957, node 1 2025-06-25T14:42:52.659453Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:42:52.659574Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:42:52.662265Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:42:52.674848Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:42:52.674871Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:42:52.674877Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:42:52.674991Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:32647 TClient is connected to server localhost:32647 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:42:53.186786Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:42:53.201142Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:42:53.214252Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:53.326134Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:42:53.355192Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:53.503105Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:53.583762Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:55.145776Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897500923905968:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:55.145916Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:55.469441Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:55.493718Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:55.516384Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:55.541558Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:55.566832Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:55.594876Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:55.632114Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:55.712004Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897500923906628:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:55.712090Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:55.712235Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897500923906633:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:55.715611Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:42:55.725486Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519897500923906635:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:42:55.809357Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519897500923906686:3422] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:42:56.806918Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:56.853628Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:56.923827Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:57.320146Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519897488039002466:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:57.320207Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:42:58.568263Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715677:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) |84.5%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest >> TxUsage::WriteToTopic_Demo_36_Table [GOOD] >> TConsoleConfigTests::TestAutoKind [GOOD] >> TConsoleConfigTests::TestAllowedScopes |84.5%| [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest >> TxUsage::WriteToTopic_Demo_36_Query >> TxUsage::WriteToTopic_Demo_22_RestartNo_Query [GOOD] >> TConsoleConfigTests::TestAllowedScopes [GOOD] >> TConsoleConfigTests::TestAffectedConfigs >> BsControllerConfig::PDiskCreate >> BsControllerConfig::AddDriveSerial >> BsControllerConfig::OverlayMapCrossReferences >> BsControllerConfig::MergeIntersectingBoxes >> BsControllerConfig::Basic >> BsControllerConfig::ReassignGroupDisk >> BsControllerConfig::OverlayMap >> TxUsage::WriteToTopic_Demo_26_Query [GOOD] >> BsControllerConfig::SelectAllGroups >> BsControllerConfig::ExtendByCreatingSeparateBox >> BsControllerConfig::ManyPDisksRestarts >> TxUsage::WriteToTopic_Demo_22_RestartBeforeCommit_Table >> TBlobStorageProxyTest::TestGetMultipart >> TBlobStorageProxyTest::TestEmptyDiscover >> BsControllerConfig::OverlayMap [GOOD] >> TBlobStorageProxyTest::TestPutGetStatusErasure3Plus2Block >> TBlobStorageProxyTest::TestCollectGarbagePersistence >> TxUsage::WriteToTopic_Demo_27_Table >> TConsoleConfigTests::TestAffectedConfigs [GOOD] |84.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut_bscontroller/unittest >> BsControllerConfig::OverlayMap [GOOD] >> LocalPartition::Restarts [GOOD] >> LocalPartition::WithoutPartitionWithRestart >> KqpBatchDelete::Large_1 [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/cms/console/ut/unittest >> TConsoleConfigTests::TestAffectedConfigs [GOOD] Test command err: 2025-06-25T14:35:41.185094Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:35:41.185151Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:35:41.263349Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:35:43.416827Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:35:43.416896Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:35:43.461752Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:35:45.335672Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:35:45.335735Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:35:45.406493Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:35:47.252665Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:35:47.252735Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:35:47.301977Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:35:49.291453Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:35:49.291526Z node 5 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:35:49.373582Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:35:51.106671Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:35:51.106740Z node 6 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:35:51.153446Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:35:52.738708Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:35:52.738777Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:35:52.781511Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:35:54.630565Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:35:54.630631Z node 8 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:35:54.685005Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:35:57.054578Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:35:57.054662Z node 9 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:35:57.154221Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:35:59.525628Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:35:59.525709Z node 10 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:35:59.569930Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:36:02.208978Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:36:02.209058Z node 11 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:36:02.281130Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:36:04.495287Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:36:04.495375Z node 12 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:36:04.591303Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:36:07.286887Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:36:07.286970Z node 13 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:36:07.457923Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:36:09.874778Z node 14 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:36:09.874867Z node 14 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:36:09.957823Z node 14 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:36:12.221373Z node 15 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:36:12.221480Z node 15 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:36:12.269375Z node 15 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:36:14.830787Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:36:14.830879Z node 16 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:36:14.901945Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:36:17.470557Z node 17 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:36:17.470643Z node 17 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:36:17.520625Z node 17 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:36:18.255053Z node 17 :CMS_CONFIGS ERROR: console_configs_provider.cpp:1235: Unexpected config sender died for subscription id=1 2025-06-25T14:36:19.107394Z node 18 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:36:19.107479Z node 18 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:36:19.167471Z node 18 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:36:20.223322Z node 18 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7382: Cannot get console configs 2025-06-25T14:36:20.223430Z node 18 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:36:20.307862Z node 18 :CMS_CONFIGS ERROR: console_configs_provider.cpp:1201: Couldn't deliver config notification for subscription id=1 tabletid=8651011 serviceid=[0:0:0] nodeid=1 host=host1 tenant=tenant1 nodetype=type1 kinds=2 lastprovidedconfig= 2025-06-25T14:36:21.226337Z node 19 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:36:21.226441Z node 19 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:36:21.281609Z node 19 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:36:22.264153Z node 19 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7382: Cannot get console configs 2025-06-25T14:36:22.264265Z node 19 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:36:22.375603Z node 19 :CMS_CONFIGS ERROR: console_configs_provider.cpp:1201: Couldn't deliver config notification for subscription id=1 tabletid=0 serviceid=[19:8246204620103118691:7960687] nodeid=1 host=host1 tenant=tenant1 nodetype=type1 kinds=2 lastprovidedconfig= 2025-06-25T14:36:23.149680Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:36:23.149773Z node 20 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:36:23.245511Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:36:27.446830Z node 21 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:36:27.446927Z node 21 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:36:27.493792Z node 21 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:36:32.240679Z node 22 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:36:32.240767Z node 22 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:36:32.290543Z node 22 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:36:34.145087Z node 23 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:36:34.145185Z node 23 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:36:34.193824Z node 23 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:36:36.198236Z node 24 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:36:36.198330Z node 24 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:36:36.248905Z node 24 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:36:43.640832Z node 24 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7382: Cannot get console configs 2025-06-25T14:36:43.640953Z node 24 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:40:52.120404Z node 24 :CMS_CONFIGS ERROR: console_configs_provider.cpp:1201: Couldn't deliver config notification for subscription id=1 tabletid=0 serviceid=[100:28538277257700723:0] nodeid=100 host=host100 tenant=tenant-100 nodetype=type100 kinds=2 lastprovidedconfig= 2025-06-25T14:40:53.131196Z node 25 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:40:53.131293Z node 25 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:40:53.211385Z node 25 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:41:00.414024Z node 25 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7382: Cannot get console configs 2025-06-25T14:41:00.414136Z node 25 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:43:01.724675Z node 26 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:43:01.724775Z node 26 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:43:01.771672Z node 26 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:43:03.116542Z node 27 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:43:03.116635Z node 27 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:43:03.160464Z node 27 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:43:04.261463Z node 28 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:43:04.261555Z node 28 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:43:04.326902Z node 28 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:43:05.451277Z node 29 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:43:05.451378Z node 29 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:43:05.495405Z node 29 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) >> BsControllerConfig::SelectAllGroups [GOOD] >> TBlobStorageProxyTest::TestEmptyDiscover [GOOD] >> TBlobStorageProxyTest::TestEmptyDiscoverMaxi >> TBlobStorageProxyTest::TestDoubleFailure |84.5%| [TA] $(B)/ydb/core/cms/console/ut/test-results/unittest/{meta.json ... results_accumulator.log} |84.5%| [TA] {RESULT} $(B)/ydb/core/cms/console/ut/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut_bscontroller/unittest >> BsControllerConfig::SelectAllGroups [GOOD] Test command err: 2025-06-25T14:43:05.606125Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828672 Event# NKikimr::TEvTablet::TEvBoot 2025-06-25T14:43:05.625145Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828673 Event# NKikimr::TEvTablet::TEvRestored 2025-06-25T14:43:05.629135Z node 1 :BS_CONTROLLER DEBUG: {BSC22@console_interaction.cpp:14} Console interaction started 2025-06-25T14:43:05.633013Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828684 Event# NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-25T14:43:05.634240Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268639244 Event# NKikimr::TEvNodeWardenStorageConfig 2025-06-25T14:43:05.634371Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 131082 Event# NActors::TEvInterconnect::TEvNodesInfo 2025-06-25T14:43:05.634422Z node 1 :BS_CONTROLLER DEBUG: {BSC01@bsc.cpp:531} Handle TEvInterconnect::TEvNodesInfo 2025-06-25T14:43:05.634733Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS01@init_scheme.cpp:17} TTxInitScheme Execute 2025-06-25T14:43:05.646236Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS03@init_scheme.cpp:44} TTxInitScheme Complete 2025-06-25T14:43:05.646329Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM01@migrate.cpp:190} Execute tx 2025-06-25T14:43:05.647964Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM02@migrate.cpp:251} Complete tx IncompatibleData# false 2025-06-25T14:43:05.648103Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-25T14:43:05.648184Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-25T14:43:05.648252Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion 2025-06-25T14:43:05.818691Z node 1 :BS_CONTROLLER ERROR: {BSC07@impl.h:2206} ProcessControllerEvent event processing took too much time Type# 268637706 Duration# 0.117189s 2025-06-25T14:43:05.818824Z node 1 :BS_CONTROLLER ERROR: {BSC00@bsc.cpp:705} StateWork event processing took too much time Type# 2146435078 Duration# 0.117342s ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/batch_operations/unittest >> KqpBatchDelete::Large_1 [GOOD] Test command err: Trying to start YDB, gRPC: 27808, MsgBus: 21945 2025-06-25T14:42:40.733255Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897435234341625:2227];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:40.733334Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000beb/r3tmp/tmpG2Tk38/pdisk_1.dat 2025-06-25T14:42:41.501024Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:42:41.501120Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:42:41.502721Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:42:41.607046Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:42:41.717582Z node 1 :BS_CONTROLLER ERROR: {BSC07@impl.h:2206} ProcessControllerEvent event processing took too much time Type# 268637706 Duration# 0.100806s 2025-06-25T14:42:41.717649Z node 1 :BS_CONTROLLER ERROR: {BSC00@bsc.cpp:705} StateWork event processing took too much time Type# 2146435078 Duration# 0.100887s 2025-06-25T14:42:41.731898Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TServer::EnableGrpc on GrpcPort 27808, node 1 2025-06-25T14:42:42.716900Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:42:42.716922Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:42:42.716932Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:42:42.717076Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:21945 TClient is connected to server localhost:21945 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:42:44.993850Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:42:45.123558Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:45.411083Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:42:45.499887Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:45.575300Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:45.733574Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519897435234341625:2227];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:45.733634Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:42:45.828480Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897456709179564:2371], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:45.828592Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:47.941081Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:47.963893Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:47.985818Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.015034Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.039402Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.069925Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.095291Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.178919Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897469594082126:2440], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:48.178999Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:48.179221Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897469594082131:2443], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:48.182234Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:42:48.191206Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519897469594082133:2444], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:42:48.259099Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519897469594082184:3435] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:42:49.085850Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation ... ROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519897522907169457:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:43:00.443586Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000beb/r3tmp/tmpuj8IaC/pdisk_1.dat 2025-06-25T14:43:00.538519Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:43:00.539925Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519897522907169436:2080] 1750862580442995 != 1750862580442998 TServer::EnableGrpc on GrpcPort 20566, node 3 2025-06-25T14:43:00.577370Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:43:00.577463Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:43:00.579193Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:43:00.595226Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:43:00.595261Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:43:00.595270Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:43:00.595435Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17349 TClient is connected to server localhost:17349 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:43:01.055106Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:43:01.060842Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:43:01.075480Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:43:01.141845Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:43:01.304021Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:43:01.377101Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:43:01.529546Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:43:03.753105Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519897535792072941:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:43:03.753232Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:43:03.797447Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:43:03.823043Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:43:03.850858Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:43:03.874341Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:43:03.898272Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:43:03.924701Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:43:03.954104Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:43:04.035218Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519897540087040893:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:43:04.035318Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:43:04.035341Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519897540087040898:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:43:04.038823Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:43:04.047108Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519897540087040900:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:43:04.124954Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519897540087040951:3417] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:43:04.998750Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:43:05.443579Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519897522907169457:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:43:05.443650Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> IncrementalBackup::ComplexRestoreBackupCollection-WithIncremental [GOOD] >> TBlobStorageProxyTest::TestInFlightPuts >> BsControllerConfig::OverlayMapCrossReferences [GOOD] >> TBlobStorageProxyTest::TestGetMultipart [GOOD] >> TBlobStorageProxyTest::TestGetFail >> BsControllerConfig::Basic [GOOD] >> BsControllerConfig::DeleteStoragePool >> TBlobStorageProxyTest::TestVPutVGetPersistence >> TBlobStorageProxyTest::TestProxyPutInvalidSize >> KqpBatchDelete::Large_2 [GOOD] >> BsControllerConfig::AddDriveSerial [GOOD] >> BsControllerConfig::AddDriveSerialMassive |84.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut_bscontroller/unittest >> BsControllerConfig::OverlayMapCrossReferences [GOOD] >> BsControllerConfig::PDiskCreate [GOOD] >> TBlobStorageProxyTest::TestPutGetStatusErasure3Plus2Block [GOOD] >> TBlobStorageProxyTest::TestPutGetStatusErasure3Plus2Stripe ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut_bscontroller/unittest >> BsControllerConfig::PDiskCreate [GOOD] Test command err: Leader for TabletID 72057594037932033 is [0:0:0] sender: [1:225:2066] recipient: [1:203:2077] IGNORE Leader for TabletID 72057594037932033 is [0:0:0] sender: [1:225:2066] recipient: [1:203:2077] Leader for TabletID 72057594037932033 is [1:233:2079] sender: [1:237:2066] recipient: [1:203:2077] 2025-06-25T14:43:05.605819Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828672 Event# NKikimr::TEvTablet::TEvBoot 2025-06-25T14:43:05.624559Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828673 Event# NKikimr::TEvTablet::TEvRestored 2025-06-25T14:43:05.629103Z node 1 :BS_CONTROLLER DEBUG: {BSC22@console_interaction.cpp:14} Console interaction started 2025-06-25T14:43:05.633040Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828684 Event# NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-25T14:43:05.634378Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268639244 Event# NKikimr::TEvNodeWardenStorageConfig 2025-06-25T14:43:05.634588Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 131082 Event# NActors::TEvInterconnect::TEvNodesInfo 2025-06-25T14:43:05.634630Z node 1 :BS_CONTROLLER DEBUG: {BSC01@bsc.cpp:531} Handle TEvInterconnect::TEvNodesInfo 2025-06-25T14:43:05.634817Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS01@init_scheme.cpp:17} TTxInitScheme Execute 2025-06-25T14:43:05.645913Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS03@init_scheme.cpp:44} TTxInitScheme Complete 2025-06-25T14:43:05.646093Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM01@migrate.cpp:190} Execute tx 2025-06-25T14:43:05.647980Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM02@migrate.cpp:251} Complete tx IncompatibleData# false 2025-06-25T14:43:05.648145Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-25T14:43:05.648217Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-25T14:43:05.648278Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion Leader for TabletID 72057594037932033 is [1:233:2079] sender: [1:258:2066] recipient: [1:20:2067] 2025-06-25T14:43:05.661764Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion 2025-06-25T14:43:05.661916Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-25T14:43:05.673566Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-25T14:43:05.673724Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-25T14:43:05.673804Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-25T14:43:05.673869Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-25T14:43:05.673972Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-25T14:43:05.674019Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-25T14:43:05.674102Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-25T14:43:05.674163Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-25T14:43:05.684845Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-25T14:43:05.684999Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-25T14:43:05.696741Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-25T14:43:05.696864Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE01@load_everything.cpp:21} TTxLoadEverything Execute 2025-06-25T14:43:05.700684Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE03@load_everything.cpp:587} TTxLoadEverything Complete 2025-06-25T14:43:05.700743Z node 1 :BS_CONTROLLER DEBUG: {BSC09@impl.h:2213} LoadFinished 2025-06-25T14:43:05.700915Z node 1 :BS_CONTROLLER DEBUG: {BSC18@console_interaction.cpp:31} Console connection service started 2025-06-25T14:43:05.700955Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE04@load_everything.cpp:592} TTxLoadEverything InitQueue processed 2025-06-25T14:43:05.716385Z node 1 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { DefineHostConfig { HostConfigId: 1 Drive { Path: "/dev/disk1" } Drive { Path: "/dev/disk2" SharedWithOs: true } Drive { Path: "/dev/disk3" Type: SSD } } } Command { DefineBox { BoxId: 1 Name: "test box" Host { Key { Fqdn: "::1" IcPort: 12001 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12002 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12003 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12004 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12005 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12006 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12007 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12008 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12009 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12010 } HostConfigId: 1 } } } Command { QueryBaseConfig { } } } 2025-06-25T14:43:05.716934Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 1:1000 Path# /dev/disk1 2025-06-25T14:43:05.716986Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 1:1001 Path# /dev/disk2 2025-06-25T14:43:05.717017Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 1:1002 Path# /dev/disk3 2025-06-25T14:43:05.717050Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 2:1000 Path# /dev/disk1 2025-06-25T14:43:05.717074Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 2:1001 Path# /dev/disk2 2025-06-25T14:43:05.717096Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 2:1002 Path# /dev/disk3 2025-06-25T14:43:05.717116Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 3:1000 Path# /dev/disk1 2025-06-25T14:43:05.717139Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 3:1001 Path# /dev/disk2 2025-06-25T14:43:05.717160Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 3:1002 Path# /dev/disk3 2025-06-25T14:43:05.717185Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 4:1000 Path# /dev/disk1 2025-06-25T14:43:05.717206Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 4:1001 Path# /dev/disk2 2025-06-25T14:43:05.717241Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 4:1002 Path# /dev/disk3 2025-06-25T14:43:05.717282Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 5:1000 Path# /dev/disk1 2025-06-25T14:43:05.717311Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 5:1001 Path# /dev/disk2 2025-06-25T14:43:05.717332Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 5:1002 Path# /dev/disk3 2025-06-25T14:43:05.717363Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 6:1000 Path# /dev/disk1 2025-06-25T14:43:05.717389Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 6:1001 Path# /dev/disk2 2025-06-25T14:43:05.717412Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 6:1002 Path# /dev/disk3 2025-06-25T14:43:05.717433Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 7:1000 Path# /dev/disk1 2025-06-25T14:43:05.717466Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 7:1001 Path# /dev/disk2 2025-06-25T14:43:05.717492Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 7:1002 Path# /dev/disk3 2025-06-25T14:43:05.717513Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 8:1000 Path# /dev/disk1 2025-06-25T14:43:05.717533Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 8:1001 Path# /dev/disk2 2025-06-25T14:43:05.717567Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 8:1002 Path# /dev/disk3 2025-06-25T14:43:05.717589Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 9:1000 Path# /dev/disk1 2025-06-25T14:43:05.717610Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 9:1001 Path# /dev/disk2 2025-06-25T14:43:05.717631Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 9:1002 Path# /dev/disk3 2025-06-25T14:43:05.717665Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 10:1000 Path# /dev/disk1 2025-06-25T14:43:05.717690Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 10:1001 Path# /dev/disk2 2025-06-25T14:43:05.717712Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 10:1002 Path# /dev/disk3 Leader for TabletID 72057594037932033 is [0:0:0] sender: [11:225:2066] recipient: [11:206:2077] IGNORE Leader for TabletID 72057594037932033 is [0:0:0] sender: [11:225:2066] recipient: [11:206:2077] Leader for TabletID 72057594037932033 is [11:236:2079] sender: [11:237:2066] recipient: [11:206:2077] 2025-06-25T14:43:07.897112Z node 11 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828672 Event# NKikimr::TEvTablet::TEvBoot 2025-06-25T14:43:07.897985Z node 11 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828673 Event# NKikimr::TEvTablet::TEvRestored 2025-06-25T14:43:07.898169Z node 11 :BS_CONTROLLER DEBUG: {BSC22@console_interaction.cpp:14} Console interaction started 2025-06-25T14:43:07.899009Z node 11 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828684 Event# NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-25T14:43:07.899395Z node 11 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268639244 Event# NKikimr::TEvNodeWardenStorageConfig 2025-06-25T14:43:07.899480Z node 11 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 131082 Event# NActors::TEvInterconnect::TEvNodesInfo 2025-06-25T14:43:07.899497Z node 11 :BS_CONTROLLER DEBUG: {BSC01@bsc.cpp:531} Handle TEvInterconnect::TEvNodesInfo 2025-06-25T14:43:07.899625Z node 11 :BS_CONTROLLER DEBUG: {BSCTXIS01@init_scheme.cpp:17} TTxInitScheme Execute 2025-06-25T14:43:07.906680Z node 11 :BS_CONTROLLER DEBUG: {BSCTXIS03@init_scheme.cpp:44} TTxInitScheme Complete 2025-06-25T14:43:07.906805Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM01@migrate.cpp:190} Execute tx 2025-06-25T14:43:07.906883Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM02@migrate.cpp:251} Complete tx IncompatibleData# false 2025-06-25T14:43:07.907002Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-25T14:43:07.907062Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-25T14:43:07.907110Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion Leader for TabletID 72057594037932033 is [11:236:2079] sender: [11:258:2066] recipient: [11:20:2067] 2025-06-25T14:43:07.918479Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion 2025-06-25T14:43:07.918659Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-25T14:43:07.929459Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-25T14:43:07.929592Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-25T14:43:07.929647Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-25T14:43:07.929705Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-25T14:43:07.929819Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-25T14:43:07.929906Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-25T14:43:07.929945Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-25T14:43:07.929986Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-25T14:43:07.940747Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-25T14:43:07.940898Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-25T14:43:07.952132Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-25T14:43:07.952297Z node 11 :BS_CONTROLLER DEBUG: {BSCTXLE01@load_everything.cpp:21} TTxLoadEverything Execute 2025-06-25T14:43:07.953492Z node 11 :BS_CONTROLLER DEBUG: {BSCTXLE03@load_everything.cpp:587} TTxLoadEverything Complete 2025-06-25T14:43:07.953542Z node 11 :BS_CONTROLLER DEBUG: {BSC09@impl.h:2213} LoadFinished 2025-06-25T14:43:07.953711Z node 11 :BS_CONTROLLER DEBUG: {BSC18@console_interaction.cpp:31} Console connection service started 2025-06-25T14:43:07.953761Z node 11 :BS_CONTROLLER DEBUG: {BSCTXLE04@load_everything.cpp:592} TTxLoadEverything InitQueue processed 2025-06-25T14:43:07.954496Z node 11 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { DefineHostConfig { HostConfigId: 2 Drive { Path: "/dev/disk1" } Drive { Path: "/dev/disk2" SharedWithOs: true } Drive { Path: "/dev/disk3" Type: SSD } } } Command { DefineBox { BoxId: 1 Name: "test box" Host { Key { Fqdn: "::1" IcPort: 12001 } HostConfigId: 2 } Host { Key { Fqdn: "::1" IcPort: 12002 } HostConfigId: 2 } Host { Key { Fqdn: "::1" IcPort: 12003 } HostConfigId: 2 } Host { Key { Fqdn: "::1" IcPort: 12004 } HostConfigId: 2 } Host { Key { Fqdn: "::1" IcPort: 12005 } HostConfigId: 2 } Host { Key { Fqdn: "::1" IcPort: 12006 } HostConfigId: 2 } Host { Key { Fqdn: "::1" IcPort: 12007 } HostConfigId: 2 } Host { Key { Fqdn: "::1" IcPort: 12008 } HostConfigId: 2 } Host { Key { Fqdn: "::1" IcPort: 12009 } HostConfigId: 2 } Host { Key { Fqdn: "::1" IcPort: 12010 } HostConfigId: 2 } } } Command { QueryBaseConfig { } } } 2025-06-25T14:43:07.954936Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 11:1000 Path# /dev/disk1 2025-06-25T14:43:07.954976Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 11:1001 Path# /dev/disk2 2025-06-25T14:43:07.955006Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 11:1002 Path# /dev/disk3 2025-06-25T14:43:07.955034Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 12:1000 Path# /dev/disk1 2025-06-25T14:43:07.955073Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 12:1001 Path# /dev/disk2 2025-06-25T14:43:07.955097Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 12:1002 Path# /dev/disk3 2025-06-25T14:43:07.955119Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 13:1000 Path# /dev/disk1 2025-06-25T14:43:07.955141Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 13:1001 Path# /dev/disk2 2025-06-25T14:43:07.955164Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 13:1002 Path# /dev/disk3 2025-06-25T14:43:07.955200Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 14:1000 Path# /dev/disk1 2025-06-25T14:43:07.955223Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 14:1001 Path# /dev/disk2 2025-06-25T14:43:07.955249Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 14:1002 Path# /dev/disk3 2025-06-25T14:43:07.955273Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 15:1000 Path# /dev/disk1 2025-06-25T14:43:07.955304Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 15:1001 Path# /dev/disk2 2025-06-25T14:43:07.955332Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 15:1002 Path# /dev/disk3 2025-06-25T14:43:07.955422Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 16:1000 Path# /dev/disk1 2025-06-25T14:43:07.955452Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 16:1001 Path# /dev/disk2 2025-06-25T14:43:07.955474Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 16:1002 Path# /dev/disk3 2025-06-25T14:43:07.955496Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 17:1000 Path# /dev/disk1 2025-06-25T14:43:07.955533Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 17:1001 Path# /dev/disk2 2025-06-25T14:43:07.955575Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 17:1002 Path# /dev/disk3 2025-06-25T14:43:07.955597Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 18:1000 Path# /dev/disk1 2025-06-25T14:43:07.955620Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 18:1001 Path# /dev/disk2 2025-06-25T14:43:07.955643Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 18:1002 Path# /dev/disk3 2025-06-25T14:43:07.955693Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 19:1000 Path# /dev/disk1 2025-06-25T14:43:07.955717Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 19:1001 Path# /dev/disk2 2025-06-25T14:43:07.955747Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 19:1002 Path# /dev/disk3 2025-06-25T14:43:07.955768Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 20:1000 Path# /dev/disk1 2025-06-25T14:43:07.955790Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 20:1001 Path# /dev/disk2 2025-06-25T14:43:07.955811Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 20:1002 Path# /dev/disk3 >> TxUsage::WriteToTopic_Demo_7_Table [GOOD] >> BsControllerConfig::ReassignGroupDisk [GOOD] >> TxUsage::WriteToTopic_Demo_11_Query [GOOD] >> TxUsage::WriteToTopic_Demo_43_Query [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/batch_operations/unittest >> KqpBatchDelete::Large_2 [GOOD] Test command err: Trying to start YDB, gRPC: 19170, MsgBus: 10576 2025-06-25T14:42:52.512724Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897490416465053:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:52.512773Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000ba9/r3tmp/tmpxUyDiZ/pdisk_1.dat 2025-06-25T14:42:52.799768Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 19170, node 1 2025-06-25T14:42:52.882061Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:42:52.882085Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:42:52.882095Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:42:52.882196Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:42:52.894865Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:42:52.894954Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:42:52.896635Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:10576 TClient is connected to server localhost:10576 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:42:53.419172Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:42:53.433245Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:42:53.443986Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:53.525567Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:42:53.563295Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:53.723200Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:53.791252Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:55.278390Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897503301368565:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:55.278512Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:55.622472Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:55.655008Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:55.681064Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:55.708114Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:55.776591Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:55.804255Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:55.869465Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:55.918229Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897503301369226:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:55.918296Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:55.918637Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897503301369231:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:55.922261Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:42:55.932133Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519897503301369233:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:42:55.997963Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519897503301369284:3417] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:42:56.776430Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:57.513050Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519897490416465053:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:57.513122Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 21781, MsgBus: 11617 2025-06-25T14:43:01.526173Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519897525982591411:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:43:01.526222Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000ba9/r3tmp/tmpyC498q/pdisk_1.dat 2025-06-25T14:43:01.667732Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:43:01.681779Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:43:01.681857Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:43:01.684827Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 21781, node 2 2025-06-25T14:43:01.741762Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:43:01.741785Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:43:01.741793Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:43:01.741924Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11617 TClient is connected to server localhost:11617 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:43:02.264380Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:43:02.276048Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:43:02.338892Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:43:02.502488Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:43:02.546513Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:43:02.567239Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:43:04.513886Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519897538867494911:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:43:04.513945Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:43:04.572506Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:43:04.600059Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:43:04.626339Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:43:04.654258Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:43:04.694536Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:43:04.720855Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:43:04.787134Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:43:04.831040Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519897538867495573:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:43:04.831115Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:43:04.831196Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519897538867495578:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:43:04.833670Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:43:04.840846Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519897538867495580:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:43:04.916603Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519897538867495631:3420] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:43:05.727271Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:43:06.528395Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519897525982591411:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:43:06.528465Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> TBlobStorageProxyTest::TestGetFail [GOOD] >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_4_Table [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut_bscontroller/unittest >> BsControllerConfig::ReassignGroupDisk [GOOD] Test command err: Leader for TabletID 72057594037932033 is [0:0:0] sender: [1:291:2068] recipient: [1:257:2079] IGNORE Leader for TabletID 72057594037932033 is [0:0:0] sender: [1:291:2068] recipient: [1:257:2079] Leader for TabletID 72057594037932033 is [1:304:2081] sender: [1:305:2068] recipient: [1:257:2079] 2025-06-25T14:43:05.607192Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828672 Event# NKikimr::TEvTablet::TEvBoot 2025-06-25T14:43:05.624538Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828673 Event# NKikimr::TEvTablet::TEvRestored 2025-06-25T14:43:05.629147Z node 1 :BS_CONTROLLER DEBUG: {BSC22@console_interaction.cpp:14} Console interaction started 2025-06-25T14:43:05.689970Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828684 Event# NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-25T14:43:05.690583Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268639244 Event# NKikimr::TEvNodeWardenStorageConfig 2025-06-25T14:43:05.690792Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 131082 Event# NActors::TEvInterconnect::TEvNodesInfo 2025-06-25T14:43:05.690832Z node 1 :BS_CONTROLLER DEBUG: {BSC01@bsc.cpp:531} Handle TEvInterconnect::TEvNodesInfo 2025-06-25T14:43:05.691075Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS01@init_scheme.cpp:17} TTxInitScheme Execute 2025-06-25T14:43:05.701654Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS03@init_scheme.cpp:44} TTxInitScheme Complete 2025-06-25T14:43:05.701766Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM01@migrate.cpp:190} Execute tx 2025-06-25T14:43:05.701938Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM02@migrate.cpp:251} Complete tx IncompatibleData# false 2025-06-25T14:43:05.702033Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-25T14:43:05.702180Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-25T14:43:05.702244Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion Leader for TabletID 72057594037932033 is [1:304:2081] sender: [1:326:2068] recipient: [1:22:2069] 2025-06-25T14:43:05.713760Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion 2025-06-25T14:43:05.713912Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-25T14:43:05.724700Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-25T14:43:05.724835Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-25T14:43:05.724904Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-25T14:43:05.724979Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-25T14:43:05.725098Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-25T14:43:05.725148Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-25T14:43:05.725191Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-25T14:43:05.725252Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-25T14:43:05.735982Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-25T14:43:05.736152Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-25T14:43:05.746932Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-25T14:43:05.747087Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE01@load_everything.cpp:21} TTxLoadEverything Execute 2025-06-25T14:43:05.748327Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE03@load_everything.cpp:587} TTxLoadEverything Complete 2025-06-25T14:43:05.748377Z node 1 :BS_CONTROLLER DEBUG: {BSC09@impl.h:2213} LoadFinished 2025-06-25T14:43:05.748576Z node 1 :BS_CONTROLLER DEBUG: {BSC18@console_interaction.cpp:31} Console connection service started 2025-06-25T14:43:05.748628Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE04@load_everything.cpp:592} TTxLoadEverything InitQueue processed 2025-06-25T14:43:05.762891Z node 1 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { DefineHostConfig { HostConfigId: 1 Drive { Path: "/dev/disk" } } } Command { DefineBox { BoxId: 1 Name: "box" Host { Key { Fqdn: "::1" IcPort: 12001 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12002 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12003 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12004 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12005 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12006 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12007 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12008 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12009 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12010 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12011 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12012 } HostConfigId: 1 } } } Command { DefineStoragePool { BoxId: 1 StoragePoolId: 1 Name: "storage pool" ErasureSpecies: "block-4-2" VDiskKind: "Default" NumGroups: 8 PDiskFilter { Property { Type: ROT } } } } } 2025-06-25T14:43:05.763504Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 1:1000 Path# /dev/disk 2025-06-25T14:43:05.763670Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 2:1000 Path# /dev/disk 2025-06-25T14:43:05.763711Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 3:1000 Path# /dev/disk 2025-06-25T14:43:05.763737Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 4:1000 Path# /dev/disk 2025-06-25T14:43:05.763763Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 5:1000 Path# /dev/disk 2025-06-25T14:43:05.763785Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 6:1000 Path# /dev/disk 2025-06-25T14:43:05.763806Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 7:1000 Path# /dev/disk 2025-06-25T14:43:05.763830Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 8:1000 Path# /dev/disk 2025-06-25T14:43:05.763866Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 9:1000 Path# /dev/disk 2025-06-25T14:43:05.763897Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 10:1000 Path# /dev/disk 2025-06-25T14:43:05.763942Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 11:1000 Path# /dev/disk 2025-06-25T14:43:05.763972Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 12:1000 Path# /dev/disk 2025-06-25T14:43:05.795330Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 Response# Status { Success: true } Status { Success: true } Status { Success: true } Success: true ConfigTxSeqNo: 1 2025-06-25T14:43:05.797538Z node 1 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { UpdateDriveStatus { HostKey { NodeId: 1 } Path: "/dev/disk" Status: INACTIVE } } } Response# Status { Success: true } Success: true ConfigTxSeqNo: 2 Leader for TabletID 72057594037932033 is [0:0:0] sender: [13:291:2068] recipient: [13:257:2079] IGNORE Leader for TabletID 72057594037932033 is [0:0:0] sender: [13:291:2068] recipient: [13:257:2079] Leader for TabletID 72057594037932033 is [13:302:2081] sender: [13:304:2068] recipient: [13:257:2079] 2025-06-25T14:43:08.068545Z node 13 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828672 Event# NKikimr::TEvTablet::TEvBoot 2025-06-25T14:43:08.069694Z node 13 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828673 Event# NKikimr::TEvTablet::TEvRestored 2025-06-25T14:43:08.069887Z node 13 :BS_CONTROLLER DEBUG: {BSC22@console_interaction.cpp:14} Console interaction started 2025-06-25T14:43:08.071042Z node 13 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828684 Event# NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-25T14:43:08.071654Z node 13 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268639244 Event# NKikimr::TEvNodeWardenStorageConfig 2025-06-25T14:43:08.071852Z node 13 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 131082 Event# NActors::TEvInterconnect::TEvNodesInfo 2025-06-25T14:43:08.071889Z node 13 :BS_CONTROLLER DEBUG: {BSC01@bsc.cpp:531} Handle TEvInterconnect::TEvNodesInfo 2025-06-25T14:43:08.072043Z node 13 :BS_CONTROLLER DEBUG: {BSCTXIS01@init_scheme.cpp:17} TTxInitScheme Execute 2025-06-25T14:43:08.079981Z node 13 :BS_CONTROLLER DEBUG: {BSCTXIS03@init_scheme.cpp:44} TTxInitScheme Complete 2025-06-25T14:43:08.080082Z node 13 :BS_CONTROLLER DEBUG: {BSCTXM01@migrate.cpp:190} Execute tx 2025-06-25T14:43:08.080162Z node 13 :BS_CONTROLLER DEBUG: {BSCTXM02@migrate.cpp:251} Complete tx IncompatibleData# false 2025-06-25T14:43:08.080241Z node 13 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-25T14:43:08.080303Z node 13 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-25T14:43:08.080393Z node 13 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion Leader for TabletID 72057594037932033 is [13:302:2081] sender: [13:326:2068] recipient: [13:22:2069] 2025-06-25T14:43:08.091726Z node 13 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion 2025-06-25T14:43:08.091947Z node 13 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-25T14:43:08.102798Z node 13 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-25T14:43:08.102944Z node 13 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-25T14:43:08.103014Z node 13 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-25T14:43:08.103147Z node 13 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-25T14:43:08.103276Z node 13 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-25T14:43:08.103339Z node 13 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-25T14:43:08.103392Z node 13 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-25T14:43:08.103462Z node 13 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-25T14:43:08.114204Z node 13 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-25T14:43:08.114344Z node 13 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-25T14:43:08.125088Z node 13 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-25T14:43:08.125284Z node 13 :BS_CONTROLLER DEBUG: {BSCTXLE01@load_everything.cpp:21} TTxLoadEverything Execute 2025-06-25T14:43:08.126228Z node 13 :BS_CONTROLLER DEBUG: {BSCTXLE03@load_everything.cpp:587} TTxLoadEverything Complete 2025-06-25T14:43:08.126261Z node 13 :BS_CONTROLLER DEBUG: {BSC09@impl.h:2213} LoadFinished 2025-06-25T14:43:08.126426Z node 13 :BS_CONTROLLER DEBUG: {BSC18@console_interaction.cpp:31} Console connection service started 2025-06-25T14:43:08.126466Z node 13 :BS_CONTROLLER DEBUG: {BSCTXLE04@load_everything.cpp:592} TTxLoadEverything InitQueue processed 2025-06-25T14:43:08.127067Z node 13 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { DefineHostConfig { HostConfigId: 2 Drive { Path: "/dev/disk" } } } Command { DefineBox { BoxId: 1 Name: "box" Host { Key { Fqdn: "::1" IcPort: 12001 } HostConfigId: 2 } Host { Key { Fqdn: "::1" IcPort: 12002 } HostConfigId: 2 } Host { Key { Fqdn: "::1" IcPort: 12003 } HostConfigId: 2 } Host { Key { Fqdn: "::1" IcPort: 12004 } HostConfigId: 2 } Host { Key { Fqdn: "::1" IcPort: 12005 } HostConfigId: 2 } Host { Key { Fqdn: "::1" IcPort: 12006 } HostConfigId: 2 } Host { Key { Fqdn: "::1" IcPort: 12007 } HostConfigId: 2 } Host { Key { Fqdn: "::1" IcPort: 12008 } HostConfigId: 2 } Host { Key { Fqdn: "::1" IcPort: 12009 } HostConfigId: 2 } Host { Key { Fqdn: "::1" IcPort: 12010 } HostConfigId: 2 } Host { Key { Fqdn: "::1" IcPort: 12011 } HostConfigId: 2 } Host { Key { Fqdn: "::1" IcPort: 12012 } HostConfigId: 2 } } } Command { DefineStoragePool { BoxId: 1 StoragePoolId: 1 Name: "storage pool" ErasureSpecies: "block-4-2" VDiskKind: "Default" NumGroups: 8 PDiskFilter { Property { Type: ROT } } } } } 2025-06-25T14:43:08.127413Z node 13 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 13:1000 Path# /dev/disk 2025-06-25T14:43:08.127456Z node 13 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 14:1000 Path# /dev/disk 2025-06-25T14:43:08.127474Z node 13 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 15:1000 Path# /dev/disk 2025-06-25T14:43:08.127496Z node 13 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 16:1000 Path# /dev/disk 2025-06-25T14:43:08.127513Z node 13 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 17:1000 Path# /dev/disk 2025-06-25T14:43:08.127529Z node 13 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 18:1000 Path# /dev/disk 2025-06-25T14:43:08.127542Z node 13 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 19:1000 Path# /dev/disk 2025-06-25T14:43:08.127555Z node 13 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 20:1000 Path# /dev/disk 2025-06-25T14:43:08.127569Z node 13 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 21:1000 Path# /dev/disk 2025-06-25T14:43:08.127582Z node 13 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 22:1000 Path# /dev/disk 2025-06-25T14:43:08.127593Z node 13 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 23:1000 Path# /dev/disk 2025-06-25T14:43:08.127624Z node 13 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 24:1000 Path# /dev/disk 2025-06-25T14:43:08.145901Z node 13 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 13 Type# 268639257 Response# Status { Success: true } Status { Success: true } Status { Success: true } Success: true ConfigTxSeqNo: 1 2025-06-25T14:43:08.147923Z node 13 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { UpdateDriveStatus { HostKey { NodeId: 1 } Path: "/dev/disk" Status: INACTIVE } } } Response# Status { ErrorDescription: "Host not found NodeId# 1 HostKey# NodeId: 1\n incorrect" FailReason: kHostNotFound FailParam { NodeId: 1 } } ErrorDescription: "Host not found NodeId# 1 HostKey# NodeId: 1\n incorrect" >> TBlobStorageProxyTest::TestDoubleGroups >> TBlobStorageProxyTest::TestProxyPutInvalidSize [GOOD] >> TBlobStorageProxyTest::TestProxyLongTailDiscoverSingleFailure >> TBlobStorageProxyTest::TestDoubleFailure [GOOD] >> TBlobStorageProxyTest::TestDoubleFailureMirror3Plus2 >> TBlobStorageProxyTest::TestInFlightPuts [GOOD] >> TBlobStorageProxyTest::TestHugeCollectGarbage >> TxUsage::WriteToTopic_Demo_12_Table >> TxUsage::Sinks_Oltp_WriteToTopics_1_Table [GOOD] >> TxUsage::WriteToTopic_Demo_7_Query |84.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/dsproxy/ut_fat/unittest >> TBlobStorageProxyTest::TestGetFail [GOOD] >> TBlobStorageProxyTest::TestPartialGetBlock >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_4_Query >> TxUsage::Sinks_Oltp_WriteToTopics_1_Query >> TBlobStorageProxyTest::TestPersistence >> TBlobStorageProxyTest::TestPutGetStatusErasureMirror3 >> TBlobStorageProxyTest::TestVPutVGetPersistence [GOOD] >> TBlobStorageProxyTest::TestPutGetStatusErasure3Plus1Block >> TBlobStorageProxyTest::TestEmptyDiscoverMaxi [GOOD] |84.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/dsproxy/ut_fat/unittest >> TBlobStorageProxyTest::TestVPutVGetPersistence [GOOD] >> TxUsage::WriteToTopic_Demo_44_Table >> TBlobStorageProxyTest::TestPartialGetBlock [GOOD] >> TBlobStorageProxyTest::TestPartialGetMirror >> TBlobStorageProxyTest::TestPutGetStatusErasure3Plus2Stripe [GOOD] >> TxUsage::WriteToTopic_Demo_18_RestartAfterCommit_Query [GOOD] >> TBlobStorageProxyTest::TestHugeCollectGarbage [GOOD] >> BsControllerConfig::AddDriveSerialMassive [GOOD] |84.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/dsproxy/ut_fat/unittest >> TBlobStorageProxyTest::TestEmptyDiscoverMaxi [GOOD] >> SystemView::PartitionStatsLocksFields [GOOD] >> SystemView::QueryStatsAllTables >> TBlobStorageProxyTest::TestDoubleFailureMirror3Plus2 [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut_bscontroller/unittest >> BsControllerConfig::AddDriveSerialMassive [GOOD] Test command err: Leader for TabletID 72057594037932033 is [0:0:0] sender: [1:225:2066] recipient: [1:203:2077] IGNORE Leader for TabletID 72057594037932033 is [0:0:0] sender: [1:225:2066] recipient: [1:203:2077] Leader for TabletID 72057594037932033 is [1:233:2079] sender: [1:237:2066] recipient: [1:203:2077] 2025-06-25T14:43:05.607862Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828672 Event# NKikimr::TEvTablet::TEvBoot 2025-06-25T14:43:05.624537Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828673 Event# NKikimr::TEvTablet::TEvRestored 2025-06-25T14:43:05.630015Z node 1 :BS_CONTROLLER DEBUG: {BSC22@console_interaction.cpp:14} Console interaction started 2025-06-25T14:43:05.632162Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828684 Event# NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-25T14:43:05.634378Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268639244 Event# NKikimr::TEvNodeWardenStorageConfig 2025-06-25T14:43:05.634580Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 131082 Event# NActors::TEvInterconnect::TEvNodesInfo 2025-06-25T14:43:05.634608Z node 1 :BS_CONTROLLER DEBUG: {BSC01@bsc.cpp:531} Handle TEvInterconnect::TEvNodesInfo 2025-06-25T14:43:05.634804Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS01@init_scheme.cpp:17} TTxInitScheme Execute 2025-06-25T14:43:05.646350Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS03@init_scheme.cpp:44} TTxInitScheme Complete 2025-06-25T14:43:05.646498Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM01@migrate.cpp:190} Execute tx 2025-06-25T14:43:05.647969Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM02@migrate.cpp:251} Complete tx IncompatibleData# false 2025-06-25T14:43:05.648125Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-25T14:43:05.648229Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-25T14:43:05.648330Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion Leader for TabletID 72057594037932033 is [1:233:2079] sender: [1:257:2066] recipient: [1:20:2067] 2025-06-25T14:43:05.663350Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion 2025-06-25T14:43:05.663496Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-25T14:43:05.674250Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-25T14:43:05.674377Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-25T14:43:05.674441Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-25T14:43:05.674523Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-25T14:43:05.674637Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-25T14:43:05.674695Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-25T14:43:05.674734Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-25T14:43:05.674800Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-25T14:43:05.685467Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-25T14:43:05.685622Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-25T14:43:05.696455Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-25T14:43:05.696645Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE01@load_everything.cpp:21} TTxLoadEverything Execute 2025-06-25T14:43:05.702028Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE03@load_everything.cpp:587} TTxLoadEverything Complete 2025-06-25T14:43:05.702082Z node 1 :BS_CONTROLLER DEBUG: {BSC09@impl.h:2213} LoadFinished 2025-06-25T14:43:05.702231Z node 1 :BS_CONTROLLER DEBUG: {BSC18@console_interaction.cpp:31} Console connection service started 2025-06-25T14:43:05.702265Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE04@load_everything.cpp:592} TTxLoadEverything InitQueue processed 2025-06-25T14:43:05.715184Z node 1 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { AddDriveSerial { Serial: "SN_123" BoxId: 1 } } } 2025-06-25T14:43:05.725506Z node 1 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { AddDriveSerial { Serial: "SN_123" BoxId: 1 } } } 2025-06-25T14:43:05.726231Z node 1 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { AddDriveSerial { Serial: "SN_123" BoxId: 1 } } } Leader for TabletID 72057594037932033 is [0:0:0] sender: [11:225:2066] recipient: [11:206:2077] IGNORE Leader for TabletID 72057594037932033 is [0:0:0] sender: [11:225:2066] recipient: [11:206:2077] Leader for TabletID 72057594037932033 is [11:236:2079] sender: [11:237:2066] recipient: [11:206:2077] 2025-06-25T14:43:07.505634Z node 11 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828672 Event# NKikimr::TEvTablet::TEvBoot 2025-06-25T14:43:07.506726Z node 11 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828673 Event# NKikimr::TEvTablet::TEvRestored 2025-06-25T14:43:07.506976Z node 11 :BS_CONTROLLER DEBUG: {BSC22@console_interaction.cpp:14} Console interaction started 2025-06-25T14:43:07.508123Z node 11 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828684 Event# NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-25T14:43:07.508648Z node 11 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268639244 Event# NKikimr::TEvNodeWardenStorageConfig 2025-06-25T14:43:07.508809Z node 11 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 131082 Event# NActors::TEvInterconnect::TEvNodesInfo 2025-06-25T14:43:07.508836Z node 11 :BS_CONTROLLER DEBUG: {BSC01@bsc.cpp:531} Handle TEvInterconnect::TEvNodesInfo 2025-06-25T14:43:07.509037Z node 11 :BS_CONTROLLER DEBUG: {BSCTXIS01@init_scheme.cpp:17} TTxInitScheme Execute 2025-06-25T14:43:07.517975Z node 11 :BS_CONTROLLER DEBUG: {BSCTXIS03@init_scheme.cpp:44} TTxInitScheme Complete 2025-06-25T14:43:07.518134Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM01@migrate.cpp:190} Execute tx 2025-06-25T14:43:07.518245Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM02@migrate.cpp:251} Complete tx IncompatibleData# false 2025-06-25T14:43:07.518352Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-25T14:43:07.518456Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-25T14:43:07.518541Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion Leader for TabletID 72057594037932033 is [11:236:2079] sender: [11:257:2066] recipient: [11:20:2067] 2025-06-25T14:43:07.530991Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion 2025-06-25T14:43:07.531140Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-25T14:43:07.541915Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-25T14:43:07.542040Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-25T14:43:07.542122Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-25T14:43:07.542208Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-25T14:43:07.542347Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-25T14:43:07.542410Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-25T14:43:07.542467Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-25T14:43:07.542553Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-25T14:43:07.553278Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-25T14:43:07.553411Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-25T14:43:07.564140Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-25T14:43:07.564278Z node 11 :BS_CONTROLLER DEBUG: {BSCTXLE01@load_everything.cpp:21} TTxLoadEverything Execute 2025-06-25T14:43:07.565488Z node 11 :BS_CONTROLLER DEBUG: {BSCTXLE03@load_everything.cpp:587} TTxLoadEverything Complete 2025-06-25T14:43:07.565532Z node 11 :BS_CONTROLLER DEBUG: {BSC09@impl.h:2213} LoadFinished 2025-06-25T14:43:07.565706Z node 11 :BS_CONTROLLER DEBUG: {BSC18@console_interaction.cpp:31} Console connection service started 2025-06-25T14:43:07.565800Z node 11 :BS_CONTROLLER DEBUG: {BSCTXLE04@load_everything.cpp:592} TTxLoadEverything InitQueue processed 2025-06-25T14:43:07.566431Z node 11 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { AddDriveSerial { Serial: "SN_123" BoxId: 1 } } } 2025-06-25T14:43:07.567505Z node 11 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# ... ommand { AddDriveSerial { Serial: "SN_5" BoxId: 1 } } } 2025-06-25T14:43:09.692913Z node 21 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { AddDriveSerial { Serial: "SN_6" BoxId: 1 } } } 2025-06-25T14:43:09.693487Z node 21 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { AddDriveSerial { Serial: "SN_7" BoxId: 1 } } } 2025-06-25T14:43:09.694115Z node 21 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { AddDriveSerial { Serial: "SN_8" BoxId: 1 } } } 2025-06-25T14:43:09.694806Z node 21 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { AddDriveSerial { Serial: "SN_9" BoxId: 1 } } } 2025-06-25T14:43:09.695501Z node 21 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_0" } } } 2025-06-25T14:43:09.696213Z node 21 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_1" } } } 2025-06-25T14:43:09.697305Z node 21 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_2" } } } 2025-06-25T14:43:09.697980Z node 21 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_3" } } } 2025-06-25T14:43:09.698585Z node 21 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_4" } } } 2025-06-25T14:43:09.699225Z node 21 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_5" } } } 2025-06-25T14:43:09.699881Z node 21 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_6" } } } 2025-06-25T14:43:09.701173Z node 21 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_7" } } } 2025-06-25T14:43:09.701853Z node 21 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_8" } } } 2025-06-25T14:43:09.702582Z node 21 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_9" } } } Leader for TabletID 72057594037932033 is [0:0:0] sender: [31:225:2066] recipient: [31:201:2077] IGNORE Leader for TabletID 72057594037932033 is [0:0:0] sender: [31:225:2066] recipient: [31:201:2077] Leader for TabletID 72057594037932033 is [31:233:2079] sender: [31:234:2066] recipient: [31:201:2077] 2025-06-25T14:43:11.722270Z node 31 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828672 Event# NKikimr::TEvTablet::TEvBoot 2025-06-25T14:43:11.723515Z node 31 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828673 Event# NKikimr::TEvTablet::TEvRestored 2025-06-25T14:43:11.723739Z node 31 :BS_CONTROLLER DEBUG: {BSC22@console_interaction.cpp:14} Console interaction started 2025-06-25T14:43:11.725157Z node 31 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828684 Event# NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-25T14:43:11.725794Z node 31 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268639244 Event# NKikimr::TEvNodeWardenStorageConfig 2025-06-25T14:43:11.725956Z node 31 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 131082 Event# NActors::TEvInterconnect::TEvNodesInfo 2025-06-25T14:43:11.725979Z node 31 :BS_CONTROLLER DEBUG: {BSC01@bsc.cpp:531} Handle TEvInterconnect::TEvNodesInfo 2025-06-25T14:43:11.726180Z node 31 :BS_CONTROLLER DEBUG: {BSCTXIS01@init_scheme.cpp:17} TTxInitScheme Execute 2025-06-25T14:43:11.734149Z node 31 :BS_CONTROLLER DEBUG: {BSCTXIS03@init_scheme.cpp:44} TTxInitScheme Complete 2025-06-25T14:43:11.734247Z node 31 :BS_CONTROLLER DEBUG: {BSCTXM01@migrate.cpp:190} Execute tx 2025-06-25T14:43:11.734357Z node 31 :BS_CONTROLLER DEBUG: {BSCTXM02@migrate.cpp:251} Complete tx IncompatibleData# false 2025-06-25T14:43:11.734448Z node 31 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-25T14:43:11.734534Z node 31 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-25T14:43:11.734608Z node 31 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion Leader for TabletID 72057594037932033 is [31:233:2079] sender: [31:257:2066] recipient: [31:20:2067] 2025-06-25T14:43:11.745769Z node 31 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion 2025-06-25T14:43:11.745933Z node 31 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-25T14:43:11.756692Z node 31 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-25T14:43:11.756812Z node 31 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-25T14:43:11.756903Z node 31 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-25T14:43:11.756981Z node 31 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-25T14:43:11.757063Z node 31 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-25T14:43:11.757100Z node 31 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-25T14:43:11.757118Z node 31 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-25T14:43:11.757144Z node 31 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-25T14:43:11.767961Z node 31 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-25T14:43:11.768089Z node 31 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-25T14:43:11.780805Z node 31 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-25T14:43:11.780947Z node 31 :BS_CONTROLLER DEBUG: {BSCTXLE01@load_everything.cpp:21} TTxLoadEverything Execute 2025-06-25T14:43:11.782047Z node 31 :BS_CONTROLLER DEBUG: {BSCTXLE03@load_everything.cpp:587} TTxLoadEverything Complete 2025-06-25T14:43:11.782106Z node 31 :BS_CONTROLLER DEBUG: {BSC09@impl.h:2213} LoadFinished 2025-06-25T14:43:11.782262Z node 31 :BS_CONTROLLER DEBUG: {BSC18@console_interaction.cpp:31} Console connection service started 2025-06-25T14:43:11.782298Z node 31 :BS_CONTROLLER DEBUG: {BSCTXLE04@load_everything.cpp:592} TTxLoadEverything InitQueue processed 2025-06-25T14:43:11.782806Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { AddDriveSerial { Serial: "SN_0" BoxId: 1 } } } 2025-06-25T14:43:11.783912Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { AddDriveSerial { Serial: "SN_1" BoxId: 1 } } } 2025-06-25T14:43:11.784509Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { AddDriveSerial { Serial: "SN_2" BoxId: 1 } } } 2025-06-25T14:43:11.785013Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { AddDriveSerial { Serial: "SN_3" BoxId: 1 } } } 2025-06-25T14:43:11.785500Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { AddDriveSerial { Serial: "SN_4" BoxId: 1 } } } 2025-06-25T14:43:11.786073Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { AddDriveSerial { Serial: "SN_5" BoxId: 1 } } } 2025-06-25T14:43:11.786625Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { AddDriveSerial { Serial: "SN_6" BoxId: 1 } } } 2025-06-25T14:43:11.787255Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { AddDriveSerial { Serial: "SN_7" BoxId: 1 } } } 2025-06-25T14:43:11.787833Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { AddDriveSerial { Serial: "SN_8" BoxId: 1 } } } 2025-06-25T14:43:11.788390Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { AddDriveSerial { Serial: "SN_9" BoxId: 1 } } } 2025-06-25T14:43:11.789020Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_0" } } } 2025-06-25T14:43:11.789748Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_1" } } } 2025-06-25T14:43:11.790388Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_2" } } } 2025-06-25T14:43:11.790956Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_3" } } } 2025-06-25T14:43:11.791623Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_4" } } } 2025-06-25T14:43:11.792427Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_5" } } } 2025-06-25T14:43:11.792893Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_6" } } } 2025-06-25T14:43:11.793428Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_7" } } } 2025-06-25T14:43:11.793993Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_8" } } } 2025-06-25T14:43:11.794674Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_9" } } } >> TxUsage::WriteToTopic_Demo_36_Query [GOOD] |84.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/dsproxy/ut_fat/unittest >> TBlobStorageProxyTest::TestHugeCollectGarbage [GOOD] |84.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/dsproxy/ut_fat/unittest >> TBlobStorageProxyTest::TestPutGetStatusErasure3Plus2Stripe [GOOD] >> KqpBatchDelete::Large_3 [GOOD] >> TBlobStorageProxyTest::TestProxyPutSingleTimeout ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/dsproxy/ut_fat/unittest >> TBlobStorageProxyTest::TestDoubleFailureMirror3Plus2 [GOOD] Test command err: 2025-06-25T14:43:08.557605Z :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:427} PDiskId# 1 Can not be initialized! Format is incomplete. Magic sector is not present on disk. Maybe wrong PDiskKey Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/yft8/00148e/r3tmp/tmpIl39vK//vdisk_bad_0/pdisk.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 123 PDiskId# 1 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 1 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 1 2025-06-25T14:43:08.559036Z :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:427} PDiskId# 2 Can not be initialized! Format is incomplete. Magic sector is not present on disk. Maybe wrong PDiskKey Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/yft8/00148e/r3tmp/tmpIl39vK//vdisk_bad_1/pdisk.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 123 PDiskId# 2 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 1 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 2 2025-06-25T14:43:08.564665Z :BS_LOCALRECOVERY CRIT: localrecovery_public.cpp:103: PDiskId# 2 VDISK[0:_:0:1:0]: (0) LocalRecovery FINISHED: {RecoveryDuration# INPROGRESS RecoveredLogStartLsn# 0 SuccessfulRecovery# false EmptyLogoBlobsDb# true EmptyBlocksDb# true EmptyBarriersDb# true EmptySyncLog# true EmptySyncer# true EmptyHuge# true LogRecLogoBlob# 0 LogRecBlock# 0 LogRecGC# 0 LogRecSyncLogIdx# 0 LogRecLogoBlobsDB# 0 LogRecBlocksDB# 0 LogRecBarriersDB# 0 LogRecCutLog# 0 LogRecLocalSyncData# 0 LogRecSyncerState# 0 LogRecHandoffDel# 0 LogRecHugeBlobAllocChunk# 0 LogRecHugeBlobFreeChunk# 0 LogRecHugeBlobEntryPoint# 0 LogRecHugeLogoBlob# 0 LogRecLogoBlobOpt# 0 LogRecPhantomBlob# 0 LogRecAnubisOsirisPut# 0 LogRecAddBulkSst# 0 LogoBlobFreshApply# 0 LogoBlobFreshSkip# 0 LogoBlobsBatchFreshApply# 0 LogoBlobsBatchFreshSkip#0 LogoBlobSyncLogApply# 0 LogoBlobSyncLogSkip# 0 HugeLogoBlobFreshApply# 0 HugeLogoBlobFreshSkip# 0 HugeLogoBlobSyncLogApply# 0 HugeLogoBlobSyncLogSkip# 0 BlockFreshApply# 0 BlockFreshSkip# 0 BlocksBatchFreshApply# 0 BlocksBatchFreshSkip# 0 BlockSyncLogApply# 0 BlockSyncLogSkip# 0 BarrierFreshApply# 0 BarrierFreshSkip# 0 BarriersBatchFreshApply# 0 BarriersBatchFreshSkip# 0 BarrierSyncLogApply# 0 BarrierSyncLogSkip# 0 GCBarrierFreshApply# 0 GCBarrierFreshSkip# 0 GCLogoBlobFreshApply# 0 GCLogoBlobFreshSkip# 0 GCSyncLogApply# 0 GCSyncLogSkip# 0 TryPutLogoBlobSyncData# 0 TryPutBlockSyncData# 0 TryPutBarrierSyncData# 0 HandoffDelFreshApply# 0 HandoffDelFreshSkip# 0 HugeBlobAllocChunkApply# 0 HugeBlobAllocChunkSkip# 0 HugeBlobFreeChunkApply# 0 HugeBlobFreeChunkSkip# 0 HugeLogoBlobToHeapApply# 0 HugeLogoBlobToHeapSkip# 0 HugeSlotsDelGenericApply# 0 HugeSlotsDelGenericSkip# 0 TryPutLogoBlobPhantom# 0 RecoveryLogDiapason# [18446744073709551615 0] StartingPoints# {} ReadLogReplies# {}} reason# Yard::Init failed, errorReason# "PDisk is in StateError, reason# PDiskId# 2 Can not be initialized! Format is incomplete. Magic sector is not present on disk. Maybe wrong PDiskKey" status# CORRUPTED;VDISK LOCAL RECOVERY FAILURE DUE TO LOGICAL ERROR 2025-06-25T14:43:08.565056Z :BS_LOCALRECOVERY CRIT: localrecovery_public.cpp:103: PDiskId# 1 VDISK[0:_:0:0:0]: (0) LocalRecovery FINISHED: {RecoveryDuration# INPROGRESS RecoveredLogStartLsn# 0 SuccessfulRecovery# false EmptyLogoBlobsDb# true EmptyBlocksDb# true EmptyBarriersDb# true EmptySyncLog# true EmptySyncer# true EmptyHuge# true LogRecLogoBlob# 0 LogRecBlock# 0 LogRecGC# 0 LogRecSyncLogIdx# 0 LogRecLogoBlobsDB# 0 LogRecBlocksDB# 0 LogRecBarriersDB# 0 LogRecCutLog# 0 LogRecLocalSyncData# 0 LogRecSyncerState# 0 LogRecHandoffDel# 0 LogRecHugeBlobAllocChunk# 0 LogRecHugeBlobFreeChunk# 0 LogRecHugeBlobEntryPoint# 0 LogRecHugeLogoBlob# 0 LogRecLogoBlobOpt# 0 LogRecPhantomBlob# 0 LogRecAnubisOsirisPut# 0 LogRecAddBulkSst# 0 LogoBlobFreshApply# 0 LogoBlobFreshSkip# 0 LogoBlobsBatchFreshApply# 0 LogoBlobsBatchFreshSkip#0 LogoBlobSyncLogApply# 0 LogoBlobSyncLogSkip# 0 HugeLogoBlobFreshApply# 0 HugeLogoBlobFreshSkip# 0 HugeLogoBlobSyncLogApply# 0 HugeLogoBlobSyncLogSkip# 0 BlockFreshApply# 0 BlockFreshSkip# 0 BlocksBatchFreshApply# 0 BlocksBatchFreshSkip# 0 BlockSyncLogApply# 0 BlockSyncLogSkip# 0 BarrierFreshApply# 0 BarrierFreshSkip# 0 BarriersBatchFreshApply# 0 BarriersBatchFreshSkip# 0 BarrierSyncLogApply# 0 BarrierSyncLogSkip# 0 GCBarrierFreshApply# 0 GCBarrierFreshSkip# 0 GCLogoBlobFreshApply# 0 GCLogoBlobFreshSkip# 0 GCSyncLogApply# 0 GCSyncLogSkip# 0 TryPutLogoBlobSyncData# 0 TryPutBlockSyncData# 0 TryPutBarrierSyncData# 0 HandoffDelFreshApply# 0 HandoffDelFreshSkip# 0 HugeBlobAllocChunkApply# 0 HugeBlobAllocChunkSkip# 0 HugeBlobFreeChunkApply# 0 HugeBlobFreeChunkSkip# 0 HugeLogoBlobToHeapApply# 0 HugeLogoBlobToHeapSkip# 0 HugeSlotsDelGenericApply# 0 HugeSlotsDelGenericSkip# 0 TryPutLogoBlobPhantom# 0 RecoveryLogDiapason# [18446744073709551615 0] StartingPoints# {} ReadLogReplies# {}} reason# Yard::Init failed, errorReason# "PDisk is in StateError, reason# PDiskId# 1 Can not be initialized! Format is incomplete. Magic sector is not present on disk. Maybe wrong PDiskKey" status# CORRUPTED;VDISK LOCAL RECOVERY FAILURE DUE TO LOGICAL ERROR 2025-06-25T14:43:11.758988Z :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:427} PDiskId# 1 Can not be initialized! Format is incomplete. Magic sector is not present on disk. Maybe wrong PDiskKey Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/yft8/00148e/r3tmp/tmpPosmxa//vdisk_bad_0/pdisk.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 123 PDiskId# 1 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 1 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 1 2025-06-25T14:43:11.759516Z :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:427} PDiskId# 2 Can not be initialized! Format is incomplete. Magic sector is not present on disk. Maybe wrong PDiskKey Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/yft8/00148e/r3tmp/tmpPosmxa//vdisk_bad_1/pdisk.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 123 PDiskId# 2 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 1 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 2 2025-06-25T14:43:11.774062Z :BS_LOCALRECOVERY CRIT: localrecovery_public.cpp:103: PDiskId# 1 VDISK[0:_:0:0:0]: (0) LocalRecovery FINISHED: {RecoveryDuration# INPROGRESS RecoveredLogStartLsn# 0 SuccessfulRecovery# false EmptyLogoBlobsDb# true EmptyBlocksDb# true EmptyBarriersDb# true EmptySyncLog# true EmptySyncer# true EmptyHuge# true LogRecLogoBlob# 0 LogRecBlock# 0 LogRecGC# 0 LogRecSyncLogIdx# 0 LogRecLogoBlobsDB# 0 LogRecBlocksDB# 0 LogRecBarriersDB# 0 LogRecCutLog# 0 LogRecLocalSyncData# 0 LogRecSyncerState# 0 LogRecHandoffDel# 0 LogRecHugeBlobAllocChunk# 0 LogRecHugeBlobFreeChunk# 0 LogRecHugeBlobEntryPoint# 0 LogRecHugeLogoBlob# 0 LogRecLogoBlobOpt# 0 LogRecPhantomBlob# 0 LogRecAnubisOsirisPut# 0 LogRecAddBulkSst# 0 LogoBlobFreshApply# 0 LogoBlobFreshSkip# 0 LogoBlobsBatchFreshApply# 0 LogoBlobsBatchFreshSkip#0 LogoBlobSyncLogApply# 0 LogoBlobSyncLogSkip# 0 HugeLogoBlobFreshApply# 0 HugeLogoBlobFreshSkip# 0 HugeLogoBlobSyncLogApply# 0 HugeLogoBlobSyncLogSkip# 0 BlockFreshApply# 0 BlockFreshSkip# 0 BlocksBatchFreshApply# 0 BlocksBatchFreshSkip# 0 BlockSyncLogApply# 0 BlockSyncLogSkip# 0 BarrierFreshApply# 0 BarrierFreshSkip# 0 BarriersBatchFreshApply# 0 BarriersBatchFreshSkip# 0 BarrierSyncLogApply# 0 BarrierSyncLogSkip# 0 GCBarrierFreshApply# 0 GCBarrierFreshSkip# 0 GCLogoBlobFreshApply# 0 GCLogoBlobFreshSkip# 0 GCSyncLogApply# 0 GCSyncLogSkip# 0 TryPutLogoBlobSyncData# 0 TryPutBlockSyncData# 0 TryPutBarrierSyncData# 0 HandoffDelFreshApply# 0 HandoffDelFreshSkip# 0 HugeBlobAllocChunkApply# 0 HugeBlobAllocChunkSkip# 0 HugeBlobFreeChunkApply# 0 HugeBlobFreeChunkSkip# 0 HugeLogoBlobToHeapApply# 0 HugeLogoBlobToHeapSkip# 0 HugeSlotsDelGenericApply# 0 HugeSlotsDelGenericSkip# 0 TryPutLogoBlobPhantom# 0 RecoveryLogDiapason# [18446744073709551615 0] StartingPoints# {} ReadLogReplies# {}} reason# Yard::Init failed, errorReason# "PDisk is in StateError, reason# PDiskId# 1 Can not be initialized! Format is incomplete. Magic sector is not present on disk. Maybe wrong PDiskKey" status# CORRUPTED;VDISK LOCAL RECOVERY FAILURE DUE TO LOGICAL ERROR 2025-06-25T14:43:11.774172Z :BS_LOCALRECOVERY CRIT: localrecovery_public.cpp:103: PDiskId# 2 VDISK[0:_:0:1:0]: (0) LocalRecovery FINISHED: {RecoveryDuration# INPROGRESS RecoveredLogStartLsn# 0 SuccessfulRecovery# false EmptyLogoBlobsDb# true EmptyBlocksDb# true EmptyBarriersDb# true EmptySyncLog# true EmptySyncer# true EmptyHuge# true LogRecLogoBlob# 0 LogRecBlock# 0 LogRecGC# 0 LogRecSyncLogIdx# 0 LogRecLogoBlobsDB# 0 LogRecBlocksDB# 0 LogRecBarriersDB# 0 LogRecCutLog# 0 LogRecLocalSyncData# 0 LogRecSyncerState# 0 LogRecHandoffDel# 0 LogRecHugeBlobAllocChunk# 0 LogRecHugeBlobFreeChunk# 0 LogRecHugeBlobEntryPoint# 0 LogRecHugeLogoBlob# 0 LogRecLogoBlobOpt# 0 LogRecPhantomBlob# 0 LogRecAnubisOsirisPut# 0 LogRecAddBulkSst# 0 LogoBlobFreshApply# 0 LogoBlobFreshSkip# 0 LogoBlobsBatchFreshApply# 0 LogoBlobsBatchFreshSkip#0 LogoBlobSyncLogApply# 0 LogoBlobSyncLogSkip# 0 HugeLogoBlobFreshApply# 0 HugeLogoBlobFreshSkip# 0 HugeLogoBlobSyncLogApply# 0 HugeLogoBlobSyncLogSkip# 0 BlockFreshApply# 0 BlockFreshSkip# 0 BlocksBatchFreshApply# 0 BlocksBatchFreshSkip# 0 BlockSyncLogApply# 0 BlockSyncLogSkip# 0 BarrierFreshApply# 0 BarrierFreshSkip# 0 BarriersBatchFreshApply# 0 BarriersBatchFreshSkip# 0 BarrierSyncLogApply# 0 BarrierSyncLogSkip# 0 GCBarrierFreshApply# 0 GCBarrierFreshSkip# 0 GCLogoBlobFreshApply# 0 GCLogoBlobFreshSkip# 0 GCSyncLogApply# 0 GCSyncLogSkip# 0 TryPutLogoBlobSyncData# 0 TryPutBlockSyncData# 0 TryPutBarrierSyncData# 0 HandoffDelFreshApply# 0 HandoffDelFreshSkip# 0 HugeBlobAllocChunkApply# 0 HugeBlobAllocChunkSkip# 0 HugeBlobFreeChunkApply# 0 HugeBlobFreeChunkSkip# 0 HugeLogoBlobToHeapApply# 0 HugeLogoBlobToHeapSkip# 0 HugeSlotsDelGenericApply# 0 HugeSlotsDelGenericSkip# 0 TryPutLogoBlobPhantom# 0 RecoveryLogDiapason# [18446744073709551615 0] StartingPoints# {} ReadLogReplies# {}} reason# Yard::Init failed, errorReason# "PDisk is in StateError, reason# PDiskId# 2 Can not be initialized! Format is incomplete. Magic sector is not present on disk. Maybe wrong PDiskKey" status# CORRUPTED;VDISK LOCAL RECOVERY FAILURE DUE TO LOGICAL ERROR >> TxUsage::WriteToTopic_Demo_37_Table >> TBlobStorageProxyTest::TestPartialGetMirror [GOOD] >> TBlobStorageProxyTest::TestSingleFailureMirror >> TBlobStorageProxyTest::TestProxyRestoreOnGetBlock >> TBlobStorageProxyTest::TestPersistence [GOOD] >> TBlobStorageProxyTest::TestPartialGetStripe >> TBlobStorageProxyTest::TestNormal >> TBlobStorageProxyTest::TestCollectGarbagePersistence [GOOD] >> TBlobStorageProxyTest::TestCollectGarbageAfterLargeData >> TBlobStorageProxyTest::TestPutGetStatusErasure4Plus2Block |84.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/dsproxy/ut_fat/unittest >> TBlobStorageProxyTest::TestPartialGetMirror [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/batch_operations/unittest >> KqpBatchDelete::Large_3 [GOOD] Test command err: Trying to start YDB, gRPC: 61491, MsgBus: 5328 2025-06-25T14:42:40.733058Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897435359808084:2223];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:40.733103Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000c4c/r3tmp/tmpHN7x9i/pdisk_1.dat 2025-06-25T14:42:41.539470Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:42:41.593469Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:42:41.593572Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:42:41.634039Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:42:41.730718Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TServer::EnableGrpc on GrpcPort 61491, node 1 2025-06-25T14:42:42.721740Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:42:42.721762Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:42:42.721771Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:42:42.721949Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5328 TClient is connected to server localhost:5328 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:42:44.995596Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:42:45.123388Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:45.411641Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:45.579454Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:45.646616Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:45.785524Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519897435359808084:2223];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:45.793539Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:42:45.851510Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897456834646009:2371], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:45.851621Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:47.941027Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:47.966964Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:47.989595Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.013171Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.044738Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.111854Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.139974Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.191400Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897469719548566:2439], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:48.191475Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:48.191522Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897469719548571:2442], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:48.194479Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:42:48.201627Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519897469719548573:2443], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:42:48.289348Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519897469719548624:3431] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:42:49.092000Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:56.472804Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7382: Cannot get console configs 2025-06-25T14:42:56.472830Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded Trying to start YDB, gRPC: 23444, MsgBus: 13551 2025-06-25T14:42:59.263091Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519897519727655609:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:59.263164Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000c4c/r3tmp/tmpjrCCjA/pdisk_1.dat 2025-06-25T14:42:59.374033Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:42:59.374290Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519897519727655590:2080] 1750862579262645 != 1750862579262648 TServer::EnableGrpc on GrpcPort 23444, node 2 2025-06-25T14:42:59.404932Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:42:59.405062Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:42:59.411154Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:42:59.447829Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:42:59.447856Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:42:59.447865Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:42:59.448020Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13551 TClient is connected to server localhost:13551 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:42:59.957276Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:42:59.969944Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:43:00.012815Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:43:00.143395Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:43:00.200608Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:43:00.327028Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:43:02.283703Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519897532612559099:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:43:02.283797Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:43:02.335537Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:43:02.362035Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:43:02.385828Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:43:02.410884Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:43:02.440417Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:43:02.506895Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:43:02.536212Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:43:02.595156Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519897532612559760:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:43:02.595245Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:43:02.595315Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519897532612559765:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:43:02.599458Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:43:02.611162Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519897532612559767:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:43:02.675734Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519897532612559818:3421] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:43:03.578307Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:43:04.263419Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519897519727655609:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:43:04.263493Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> TBlobStorageProxyTest::TestDoubleGroups [GOOD] >> TBlobStorageProxyTest::TestDoubleFailureStripe4Plus2 >> TBlobStorageProxyTest::TestPutGetStatusErasureMirror3 [GOOD] >> TBlobStorageProxyTest::TestPutGetStatusErasureMirror3Plus2 >> TBlobStorageProxyTest::TestProxyPutSingleTimeout [GOOD] >> TBlobStorageProxyTest::TestProxyRestoreOnDiscoverBlock >> TBlobStorageProxyTest::TestProxyGetSingleTimeout >> TBlobStorageProxyTest::TestPutGetStatusErasure3Plus1Block [GOOD] >> TBlobStorageProxyTest::TestPutGetStatusErasure3Plus1Stripe >> TxUsage::WriteToTopic_Demo_19_RestartNo_Table >> KqpBatchUpdate::Large_2 [GOOD] >> TBlobStorageProxyTest::TestDoubleEmptyGet >> TBlobStorageProxyTest::TestBlock >> TBlobStorageProxyTest::TestPartialGetStripe [GOOD] >> TBlobStorageProxyTest::TestCollectGarbageAfterLargeData [GOOD] |84.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/dsproxy/ut_fat/unittest >> TBlobStorageProxyTest::TestPartialGetStripe [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/batch_operations/unittest >> KqpBatchUpdate::Large_2 [GOOD] Test command err: Trying to start YDB, gRPC: 4283, MsgBus: 2662 2025-06-25T14:42:52.649761Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897489733831604:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:52.649826Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000b90/r3tmp/tmpG91HUG/pdisk_1.dat 2025-06-25T14:42:52.960827Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 4283, node 1 2025-06-25T14:42:53.022447Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:42:53.024286Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:42:53.025847Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:42:53.040030Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:42:53.040062Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:42:53.040069Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:42:53.040203Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2662 TClient is connected to server localhost:2662 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:42:53.577389Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-25T14:42:53.601181Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:53.655263Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:42:53.791610Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:53.927726Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:53.981219Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:55.499280Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897502618735117:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:55.499369Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:55.859087Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:55.888534Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:55.915707Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:55.943718Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:55.972665Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:56.045316Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:56.076622Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:56.163828Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897506913703078:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:56.163909Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:56.163978Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897506913703083:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:56.167530Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:42:56.177416Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519897506913703085:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:42:56.278100Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519897506913703136:3421] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:42:57.215219Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:57.649888Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519897489733831604:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:57.649944Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 26757, MsgBus: 20788 2025-06-25T14:43:05.827146Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519897542365395695:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:43:05.827271Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000b90/r3tmp/tmpKqIR8R/pdisk_1.dat 2025-06-25T14:43:05.947480Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:43:05.948186Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519897542365395676:2080] 1750862585826171 != 1750862585826174 TServer::EnableGrpc on GrpcPort 26757, node 2 2025-06-25T14:43:05.975596Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:43:05.975713Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:43:05.982580Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:43:06.008528Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:43:06.008547Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:43:06.008553Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:43:06.008664Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:20788 TClient is connected to server localhost:20788 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-25T14:43:06.463129Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:43:06.480681Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:43:06.563183Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:43:06.729087Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:43:06.796198Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:43:06.840768Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:43:08.955663Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519897555250299221:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:43:08.955747Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:43:09.006393Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:43:09.034152Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:43:09.100830Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:43:09.167872Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:43:09.198659Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:43:09.234797Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:43:09.303084Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:43:09.383946Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519897559545267187:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:43:09.384077Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:43:09.384080Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519897559545267192:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:43:09.387480Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:43:09.397702Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519897559545267194:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:43:09.460057Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519897559545267245:3422] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:43:10.425759Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:43:10.827100Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519897542365395695:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:43:10.827185Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> TBlobStorageProxyTest::TestSingleFailureMirror [GOOD] >> TBlobStorageProxyTest::TestVBlockVPutVGet |84.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/dsproxy/ut_fat/unittest >> TBlobStorageProxyTest::TestCollectGarbageAfterLargeData [GOOD] >> TBlobStorageProxyTest::TestNormal [GOOD] >> TBlobStorageProxyTest::TestNormalMirror >> TBlobStorageProxyTest::TestDoubleFailureStripe4Plus2 [GOOD] >> TBlobStorageProxyTest::TestPutGetStatusErasure4Plus2Block [GOOD] >> TBlobStorageProxyTest::TestPutGetStatusErasure4Plus2Stripe >> TBlobStorageProxyTest::TestProxyGetSingleTimeout [GOOD] >> TBlobStorageProxyTest::TestProxyDiscoverSingleTimeout >> TBlobStorageProxyTest::TestBlock [GOOD] >> TBlobStorageProxyTest::TestBatchedPutRequestDoesNotContainAHugeBlob >> TBlobStorageProxyTest::TestVBlockVPutVGet [GOOD] >> TBlobStorageProxyTest::TestProxySimpleDiscoverNone >> TBlobStorageProxyTest::TestPutGetStatusErasure3Plus1Stripe [GOOD] >> TxUsage::WriteToTopic_Demo_22_RestartBeforeCommit_Table [GOOD] >> TBlobStorageProxyTest::TestVPutVCollectVGetRace >> LocalPartition::WithoutPartitionWithRestart [GOOD] >> LocalPartition::WithoutPartitionUnknownEndpoint >> TBlobStorageProxyTest::TestPutGetStatusErasureMirror3Plus2 [GOOD] >> TBlobStorageProxyTest::TestProxySimpleDiscover ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/dsproxy/ut_fat/unittest >> TBlobStorageProxyTest::TestDoubleFailureStripe4Plus2 [GOOD] Test command err: 2025-06-25T14:43:16.664368Z :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:427} PDiskId# 1 Can not be initialized! Format is incomplete. Magic sector is not present on disk. Maybe wrong PDiskKey Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/yft8/001479/r3tmp/tmpIeQuBo//vdisk_bad_0/pdisk.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 123 PDiskId# 1 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 1 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 1 2025-06-25T14:43:16.665878Z :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:427} PDiskId# 2 Can not be initialized! Format is incomplete. Magic sector is not present on disk. Maybe wrong PDiskKey Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/yft8/001479/r3tmp/tmpIeQuBo//vdisk_bad_1/pdisk.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 123 PDiskId# 2 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 1 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 2 2025-06-25T14:43:16.701035Z :BS_LOCALRECOVERY CRIT: localrecovery_public.cpp:103: PDiskId# 2 VDISK[0:_:0:1:0]: (0) LocalRecovery FINISHED: {RecoveryDuration# INPROGRESS RecoveredLogStartLsn# 0 SuccessfulRecovery# false EmptyLogoBlobsDb# true EmptyBlocksDb# true EmptyBarriersDb# true EmptySyncLog# true EmptySyncer# true EmptyHuge# true LogRecLogoBlob# 0 LogRecBlock# 0 LogRecGC# 0 LogRecSyncLogIdx# 0 LogRecLogoBlobsDB# 0 LogRecBlocksDB# 0 LogRecBarriersDB# 0 LogRecCutLog# 0 LogRecLocalSyncData# 0 LogRecSyncerState# 0 LogRecHandoffDel# 0 LogRecHugeBlobAllocChunk# 0 LogRecHugeBlobFreeChunk# 0 LogRecHugeBlobEntryPoint# 0 LogRecHugeLogoBlob# 0 LogRecLogoBlobOpt# 0 LogRecPhantomBlob# 0 LogRecAnubisOsirisPut# 0 LogRecAddBulkSst# 0 LogoBlobFreshApply# 0 LogoBlobFreshSkip# 0 LogoBlobsBatchFreshApply# 0 LogoBlobsBatchFreshSkip#0 LogoBlobSyncLogApply# 0 LogoBlobSyncLogSkip# 0 HugeLogoBlobFreshApply# 0 HugeLogoBlobFreshSkip# 0 HugeLogoBlobSyncLogApply# 0 HugeLogoBlobSyncLogSkip# 0 BlockFreshApply# 0 BlockFreshSkip# 0 BlocksBatchFreshApply# 0 BlocksBatchFreshSkip# 0 BlockSyncLogApply# 0 BlockSyncLogSkip# 0 BarrierFreshApply# 0 BarrierFreshSkip# 0 BarriersBatchFreshApply# 0 BarriersBatchFreshSkip# 0 BarrierSyncLogApply# 0 BarrierSyncLogSkip# 0 GCBarrierFreshApply# 0 GCBarrierFreshSkip# 0 GCLogoBlobFreshApply# 0 GCLogoBlobFreshSkip# 0 GCSyncLogApply# 0 GCSyncLogSkip# 0 TryPutLogoBlobSyncData# 0 TryPutBlockSyncData# 0 TryPutBarrierSyncData# 0 HandoffDelFreshApply# 0 HandoffDelFreshSkip# 0 HugeBlobAllocChunkApply# 0 HugeBlobAllocChunkSkip# 0 HugeBlobFreeChunkApply# 0 HugeBlobFreeChunkSkip# 0 HugeLogoBlobToHeapApply# 0 HugeLogoBlobToHeapSkip# 0 HugeSlotsDelGenericApply# 0 HugeSlotsDelGenericSkip# 0 TryPutLogoBlobPhantom# 0 RecoveryLogDiapason# [18446744073709551615 0] StartingPoints# {} ReadLogReplies# {}} reason# Yard::Init failed, errorReason# "PDisk is in StateError, reason# PDiskId# 2 Can not be initialized! Format is incomplete. Magic sector is not present on disk. Maybe wrong PDiskKey" status# CORRUPTED;VDISK LOCAL RECOVERY FAILURE DUE TO LOGICAL ERROR 2025-06-25T14:43:16.701391Z :BS_LOCALRECOVERY CRIT: localrecovery_public.cpp:103: PDiskId# 1 VDISK[0:_:0:0:0]: (0) LocalRecovery FINISHED: {RecoveryDuration# INPROGRESS RecoveredLogStartLsn# 0 SuccessfulRecovery# false EmptyLogoBlobsDb# true EmptyBlocksDb# true EmptyBarriersDb# true EmptySyncLog# true EmptySyncer# true EmptyHuge# true LogRecLogoBlob# 0 LogRecBlock# 0 LogRecGC# 0 LogRecSyncLogIdx# 0 LogRecLogoBlobsDB# 0 LogRecBlocksDB# 0 LogRecBarriersDB# 0 LogRecCutLog# 0 LogRecLocalSyncData# 0 LogRecSyncerState# 0 LogRecHandoffDel# 0 LogRecHugeBlobAllocChunk# 0 LogRecHugeBlobFreeChunk# 0 LogRecHugeBlobEntryPoint# 0 LogRecHugeLogoBlob# 0 LogRecLogoBlobOpt# 0 LogRecPhantomBlob# 0 LogRecAnubisOsirisPut# 0 LogRecAddBulkSst# 0 LogoBlobFreshApply# 0 LogoBlobFreshSkip# 0 LogoBlobsBatchFreshApply# 0 LogoBlobsBatchFreshSkip#0 LogoBlobSyncLogApply# 0 LogoBlobSyncLogSkip# 0 HugeLogoBlobFreshApply# 0 HugeLogoBlobFreshSkip# 0 HugeLogoBlobSyncLogApply# 0 HugeLogoBlobSyncLogSkip# 0 BlockFreshApply# 0 BlockFreshSkip# 0 BlocksBatchFreshApply# 0 BlocksBatchFreshSkip# 0 BlockSyncLogApply# 0 BlockSyncLogSkip# 0 BarrierFreshApply# 0 BarrierFreshSkip# 0 BarriersBatchFreshApply# 0 BarriersBatchFreshSkip# 0 BarrierSyncLogApply# 0 BarrierSyncLogSkip# 0 GCBarrierFreshApply# 0 GCBarrierFreshSkip# 0 GCLogoBlobFreshApply# 0 GCLogoBlobFreshSkip# 0 GCSyncLogApply# 0 GCSyncLogSkip# 0 TryPutLogoBlobSyncData# 0 TryPutBlockSyncData# 0 TryPutBarrierSyncData# 0 HandoffDelFreshApply# 0 HandoffDelFreshSkip# 0 HugeBlobAllocChunkApply# 0 HugeBlobAllocChunkSkip# 0 HugeBlobFreeChunkApply# 0 HugeBlobFreeChunkSkip# 0 HugeLogoBlobToHeapApply# 0 HugeLogoBlobToHeapSkip# 0 HugeSlotsDelGenericApply# 0 HugeSlotsDelGenericSkip# 0 TryPutLogoBlobPhantom# 0 RecoveryLogDiapason# [18446744073709551615 0] StartingPoints# {} ReadLogReplies# {}} reason# Yard::Init failed, errorReason# "PDisk is in StateError, reason# PDiskId# 1 Can not be initialized! Format is incomplete. Magic sector is not present on disk. Maybe wrong PDiskKey" status# CORRUPTED;VDISK LOCAL RECOVERY FAILURE DUE TO LOGICAL ERROR ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/dsproxy/ut_fat/unittest >> TBlobStorageProxyTest::TestVBlockVPutVGet [GOOD] Test command err: 2025-06-25T14:43:15.690089Z :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:427} PDiskId# 1 Can not be initialized! Format is incomplete. Magic sector is not present on disk. Maybe wrong PDiskKey Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/yft8/001416/r3tmp/tmp7J4LNi//vdisk_bad_0/pdisk.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 123 PDiskId# 1 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 1 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 1 2025-06-25T14:43:15.694829Z :BS_LOCALRECOVERY CRIT: localrecovery_public.cpp:103: PDiskId# 1 VDISK[0:_:0:0:0]: (0) LocalRecovery FINISHED: {RecoveryDuration# INPROGRESS RecoveredLogStartLsn# 0 SuccessfulRecovery# false EmptyLogoBlobsDb# true EmptyBlocksDb# true EmptyBarriersDb# true EmptySyncLog# true EmptySyncer# true EmptyHuge# true LogRecLogoBlob# 0 LogRecBlock# 0 LogRecGC# 0 LogRecSyncLogIdx# 0 LogRecLogoBlobsDB# 0 LogRecBlocksDB# 0 LogRecBarriersDB# 0 LogRecCutLog# 0 LogRecLocalSyncData# 0 LogRecSyncerState# 0 LogRecHandoffDel# 0 LogRecHugeBlobAllocChunk# 0 LogRecHugeBlobFreeChunk# 0 LogRecHugeBlobEntryPoint# 0 LogRecHugeLogoBlob# 0 LogRecLogoBlobOpt# 0 LogRecPhantomBlob# 0 LogRecAnubisOsirisPut# 0 LogRecAddBulkSst# 0 LogoBlobFreshApply# 0 LogoBlobFreshSkip# 0 LogoBlobsBatchFreshApply# 0 LogoBlobsBatchFreshSkip#0 LogoBlobSyncLogApply# 0 LogoBlobSyncLogSkip# 0 HugeLogoBlobFreshApply# 0 HugeLogoBlobFreshSkip# 0 HugeLogoBlobSyncLogApply# 0 HugeLogoBlobSyncLogSkip# 0 BlockFreshApply# 0 BlockFreshSkip# 0 BlocksBatchFreshApply# 0 BlocksBatchFreshSkip# 0 BlockSyncLogApply# 0 BlockSyncLogSkip# 0 BarrierFreshApply# 0 BarrierFreshSkip# 0 BarriersBatchFreshApply# 0 BarriersBatchFreshSkip# 0 BarrierSyncLogApply# 0 BarrierSyncLogSkip# 0 GCBarrierFreshApply# 0 GCBarrierFreshSkip# 0 GCLogoBlobFreshApply# 0 GCLogoBlobFreshSkip# 0 GCSyncLogApply# 0 GCSyncLogSkip# 0 TryPutLogoBlobSyncData# 0 TryPutBlockSyncData# 0 TryPutBarrierSyncData# 0 HandoffDelFreshApply# 0 HandoffDelFreshSkip# 0 HugeBlobAllocChunkApply# 0 HugeBlobAllocChunkSkip# 0 HugeBlobFreeChunkApply# 0 HugeBlobFreeChunkSkip# 0 HugeLogoBlobToHeapApply# 0 HugeLogoBlobToHeapSkip# 0 HugeSlotsDelGenericApply# 0 HugeSlotsDelGenericSkip# 0 TryPutLogoBlobPhantom# 0 RecoveryLogDiapason# [18446744073709551615 0] StartingPoints# {} ReadLogReplies# {}} reason# Yard::Init failed, errorReason# "PDisk is in StateError, reason# PDiskId# 1 Can not be initialized! Format is incomplete. Magic sector is not present on disk. Maybe wrong PDiskKey" status# CORRUPTED;VDISK LOCAL RECOVERY FAILURE DUE TO LOGICAL ERROR |84.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/dsproxy/ut_fat/unittest >> TBlobStorageProxyTest::TestPutGetStatusErasure3Plus1Stripe [GOOD] >> TxUsage::WriteToTopic_Demo_22_RestartBeforeCommit_Query >> TBlobStorageProxyTest::TestBatchedPutRequestDoesNotContainAHugeBlob [GOOD] |84.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/dsproxy/ut_fat/unittest >> TBlobStorageProxyTest::TestPutGetStatusErasureMirror3Plus2 [GOOD] >> TBlobStorageProxyTest::TestProxySimpleDiscoverNone [GOOD] >> TBlobStorageProxyTest::TestPutGetMany >> TBlobStorageProxyTest::TestVPutVCollectVGetRace [GOOD] >> TBlobStorageProxyTest::TestVGetNoData >> LabeledDbCounters::OneTabletRemoveCounters [GOOD] >> LabeledDbCounters::OneTabletRestart >> TxUsage::WriteToTopic_Demo_12_Table [GOOD] |84.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/dsproxy/ut_fat/unittest >> TBlobStorageProxyTest::TestBatchedPutRequestDoesNotContainAHugeBlob [GOOD] >> TBlobStorageProxyTest::TestDoubleEmptyGet [GOOD] >> TBlobStorageProxyTest::TestCompactedGetMultipart [GOOD] >> TBlobStorageProxyTest::TestBlockPersistence >> TBlobStorageProxyTest::TestGetAndRangeGetManyBlobs >> TBlobStorageProxyTest::TestProxyRestoreOnGetStripe >> TBlobStorageProxyTest::TestProxySimpleDiscover [GOOD] >> TBlobStorageProxyTest::TestProxySimpleDiscoverMaxi >> TxUsage::WriteToTopic_Demo_12_Query >> TBlobStorageProxyTest::TestNormalMirror [GOOD] |84.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/dsproxy/ut_fat/unittest >> TBlobStorageProxyTest::TestCompactedGetMultipart [GOOD] >> TBlobStorageProxyTest::TestVPutVGet >> KqpBatchUpdate::Large_1 [GOOD] >> TBlobStorageProxyTest::TestProxyDiscoverSingleTimeout [GOOD] >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_4_Query [GOOD] |84.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/dsproxy/ut_fat/unittest >> TBlobStorageProxyTest::TestNormalMirror [GOOD] >> TBlobStorageProxyTest::TestQuadrupleGroups >> TBlobStorageProxyTest::TestVGetNoData [GOOD] |84.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/dsproxy/ut_fat/unittest >> TBlobStorageProxyTest::TestProxyDiscoverSingleTimeout [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_incremental_backup/unittest >> IncrementalBackup::ComplexRestoreBackupCollection-WithIncremental [GOOD] Test command err: 2025-06-25T14:42:43.469397Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:42:43.469560Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:42:43.469632Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000962/r3tmp/tmpzhjnNl/pdisk_1.dat 2025-06-25T14:42:44.887033Z node 1 :BS_CONTROLLER ERROR: {BSC07@impl.h:2206} ProcessControllerEvent event processing took too much time Type# 268637706 Duration# 0.128779s 2025-06-25T14:42:44.887161Z node 1 :BS_CONTROLLER ERROR: {BSC00@bsc.cpp:705} StateWork event processing took too much time Type# 2146435078 Duration# 0.128944s 2025-06-25T14:42:44.898678Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T14:42:44.918063Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877761, Sender [1:557:2482], Recipient [1:373:2367]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:42:44.918172Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5052: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T14:42:44.918216Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5837: Pipe server connected, at tablet: 72057594046644480 2025-06-25T14:42:44.918390Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271122432, Sender [1:554:2480], Recipient [1:373:2367]: {TEvModifySchemeTransaction txid# 1 TabletId# 72057594046644480} 2025-06-25T14:42:44.918421Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4966: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-06-25T14:42:45.034733Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "Root" StoragePools { Name: "/Root:test" Kind: "test" } } } TxId: 1 TabletId: 72057594046644480 , at schemeshard: 72057594046644480 2025-06-25T14:42:45.037385Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //Root, opId: 1:0, at schemeshard: 72057594046644480 2025-06-25T14:42:45.045583Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 0 2025-06-25T14:42:45.045685Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046644480, LocalPathId: 1] source path: 2025-06-25T14:42:45.064524Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-25T14:42:45.064705Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:42:45.064862Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-25T14:42:45.073997Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046644480 PathId: 1, at schemeshard: 72057594046644480 2025-06-25T14:42:45.082749Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //Root 2025-06-25T14:42:45.082820Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:42:45.082852Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:276: Activate send for 1:0 2025-06-25T14:42:45.083083Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 2146435072, Sender [1:373:2367], Recipient [1:373:2367]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-25T14:42:45.083118Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4972: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-25T14:42:45.083180Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046644480 2025-06-25T14:42:45.083243Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046644480 2025-06-25T14:42:45.083331Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:42:45.083359Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:42:45.094473Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-25T14:42:45.095089Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:42:45.095141Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:276: Activate send for 1:0 2025-06-25T14:42:45.095295Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 2146435072, Sender [1:373:2367], Recipient [1:373:2367]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-25T14:42:45.095339Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4972: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-25T14:42:45.095393Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046644480 2025-06-25T14:42:45.095443Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046644480 2025-06-25T14:42:45.095488Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:42:45.095565Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-25T14:42:45.095931Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:42:45.095952Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:276: Activate send for 1:0 2025-06-25T14:42:45.096052Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 2146435072, Sender [1:373:2367], Recipient [1:373:2367]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-25T14:42:45.096080Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4972: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-25T14:42:45.096114Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046644480 2025-06-25T14:42:45.096154Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046644480 2025-06-25T14:42:45.096194Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046644480 2025-06-25T14:42:45.096247Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-25T14:42:45.096343Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:42:45.106569Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046644480 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:42:45.107021Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:42:45.107063Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046644480 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:42:45.115229Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 2025-06-25T14:42:45.116274Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877760, Sender [1:562:2487], Recipient [1:373:2367]: NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594046316545 Status: OK ServerId: [1:564:2488] Leader: 1 Dead: 0 Generation: 2 VersionInfo: } 2025-06-25T14:42:45.116342Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5050: StateWork, processing event TEvTabletPipe::TEvClientConnected 2025-06-25T14:42:45.116379Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5787: Handle TEvClientConnected, tabletId: 72057594046316545, status: OK, at schemeshard: 72057594046644480 2025-06-25T14:42:45.116471Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269091328, Sender [1:369:2363], Recipient [1:373:2367]: NKikimrTx.TEvProposeTransactionStatus Status: 16 StepId: 500 TxId: 1 2025-06-25T14:42:45.116751Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877761, Sender [1:566:2490], Recipient [1:373:2367]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:42:45.116782Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5052: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T14:42:45.116807Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5837: Pipe server connected, at tablet: 72057594046644480 2025-06-25T14:42:45.116921Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124996, Sender [1:554:2480], Recipient [1:373:2367]: NKikimrScheme.TEvNotifyTxCompletion TxId: 1 2025-06-25T14:42:45.116942Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4968: StateWork, processing event TEvSchemeShard::TEvNotifyTxCompletion 2025-06-25T14:42:45.116996Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 1, at schemeshard: 72057594046644480 2025-06-25T14:42:45.117024Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 1, ready parts: 0/1, is published: true 2025-06-25T14:42:45. ... schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976715668 ready parts: 6/7 2025-06-25T14:43:07.509677Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976715668:6 progress is 6/7 2025-06-25T14:43:07.509699Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976715668 ready parts: 6/7 2025-06-25T14:43:07.509725Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 281474976715668, ready parts: 6/7, is published: true 2025-06-25T14:43:07.509942Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 2146435072, Sender [3:373:2367], Recipient [3:373:2367]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-25T14:43:07.509970Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4972: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-25T14:43:07.510007Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 281474976715668:4, at schemeshard: 72057594046644480 2025-06-25T14:43:07.510032Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046644480] TDone opId# 281474976715668:4 ProgressState 2025-06-25T14:43:07.510094Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-25T14:43:07.510116Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976715668:4 progress is 7/7 2025-06-25T14:43:07.510136Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976715668 ready parts: 7/7 2025-06-25T14:43:07.510174Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976715668:4 progress is 7/7 2025-06-25T14:43:07.510196Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976715668 ready parts: 7/7 2025-06-25T14:43:07.510218Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 281474976715668, ready parts: 7/7, is published: true 2025-06-25T14:43:07.510290Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1656: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [3:1196:2903] message: TxId: 281474976715668 2025-06-25T14:43:07.510361Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976715668 ready parts: 7/7 2025-06-25T14:43:07.510431Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976715668:0 2025-06-25T14:43:07.510479Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 281474976715668:0 2025-06-25T14:43:07.510558Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 17] was 2 2025-06-25T14:43:07.510598Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976715668:1 2025-06-25T14:43:07.510619Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 281474976715668:1 2025-06-25T14:43:07.510649Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 18] was 2 2025-06-25T14:43:07.510670Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976715668:2 2025-06-25T14:43:07.510690Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 281474976715668:2 2025-06-25T14:43:07.510716Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 19] was 3 2025-06-25T14:43:07.510739Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976715668:3 2025-06-25T14:43:07.510760Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 281474976715668:3 2025-06-25T14:43:07.510847Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 20] was 3 2025-06-25T14:43:07.510883Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate source path for pathId [OwnerId: 72057594046644480, LocalPathId: 6] was 3 2025-06-25T14:43:07.510927Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976715668:4 2025-06-25T14:43:07.510946Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 281474976715668:4 2025-06-25T14:43:07.510997Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 21] was 3 2025-06-25T14:43:07.511022Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate source path for pathId [OwnerId: 72057594046644480, LocalPathId: 12] was 3 2025-06-25T14:43:07.511048Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976715668:5 2025-06-25T14:43:07.511070Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 281474976715668:5 2025-06-25T14:43:07.511116Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 22] was 3 2025-06-25T14:43:07.511140Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate source path for pathId [OwnerId: 72057594046644480, LocalPathId: 13] was 3 2025-06-25T14:43:07.511167Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976715668:6 2025-06-25T14:43:07.511185Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 281474976715668:6 2025-06-25T14:43:07.511245Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 23] was 3 2025-06-25T14:43:07.511269Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate source path for pathId [OwnerId: 72057594046644480, LocalPathId: 16] was 3 2025-06-25T14:43:07.511767Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:43:07.511911Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:43:07.511998Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:43:07.512113Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:43:07.512198Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:632: Send to actor: [3:1196:2903] msg type: 271124998 msg: NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976715668 at schemeshard: 72057594046644480 2025-06-25T14:43:07.512813Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877764, Sender [3:1203:2909], Recipient [3:373:2367]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-25T14:43:07.512851Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5053: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-25T14:43:07.512879Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5885: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-25T14:43:07.707762Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037893, clientId# [3:1487:3134], serverId# [3:1488:3135], sessionId# [0:0:0] 2025-06-25T14:43:07.707977Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715669. Ctx: { TraceId: 01jykrnsm0es3rtaer28qkchy9, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=Y2I1N2NmMzktNTNmYzMwM2UtYmE4NTUzYjQtYTMxNDhkY2U=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root { items { uint32_value: 1 } items { uint32_value: 10 } }, { items { uint32_value: 2 } items { uint32_value: 20 } }, { items { uint32_value: 3 } items { uint32_value: 30 } }, { items { uint32_value: 4 } items { uint32_value: 40 } }, { items { uint32_value: 5 } items { uint32_value: 50 } } 2025-06-25T14:43:07.870527Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037895, clientId# [3:1516:3151], serverId# [3:1517:3152], sessionId# [0:0:0] 2025-06-25T14:43:07.870746Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715670. Ctx: { TraceId: 01jykrnst7435jcvjwkgbd9ftb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NTEyZDUzMTYtYjI1MTFiMmItMjA4NjUzLWQzNTYwMjM0, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root { items { uint32_value: 11 } items { uint32_value: 101 } }, { items { uint32_value: 21 } items { uint32_value: 201 } }, { items { uint32_value: 31 } items { uint32_value: 301 } }, { items { uint32_value: 41 } items { uint32_value: 401 } }, { items { uint32_value: 51 } items { uint32_value: 501 } } 2025-06-25T14:43:08.029059Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037892, clientId# [3:1545:3168], serverId# [3:1546:3169], sessionId# [0:0:0] 2025-06-25T14:43:08.029305Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715671. Ctx: { TraceId: 01jykrnsza2jdaxv1wwcvska5t, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NjA2Y2YzZmQtMzI0YTYzZWYtNzAwZTIwOGEtYjQyZWQ0MzQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root { items { uint32_value: 12 } items { uint32_value: 102 } }, { items { uint32_value: 22 } items { uint32_value: 202 } }, { items { uint32_value: 32 } items { uint32_value: 302 } }, { items { uint32_value: 42 } items { uint32_value: 402 } }, { items { uint32_value: 52 } items { uint32_value: 502 } } 2025-06-25T14:43:08.167391Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037894, clientId# [3:1574:3185], serverId# [3:1575:3186], sessionId# [0:0:0] 2025-06-25T14:43:08.167601Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715672. Ctx: { TraceId: 01jykrnt47e6x2ykfj6svvtecs, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=N2IzZTY4NTctOTE5N2YzNGItNTU3MzQyMDMtYWY5NGJmZjY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root { items { uint32_value: 13 } items { uint32_value: 103 } }, { items { uint32_value: 23 } items { uint32_value: 203 } }, { items { uint32_value: 33 } items { uint32_value: 303 } }, { items { uint32_value: 43 } items { uint32_value: 403 } }, { items { uint32_value: 53 } items { uint32_value: 503 } } >> TBlobStorageProxyTest::TestPutGetStatusErasure4Plus2Stripe [GOOD] |84.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/dsproxy/ut_fat/unittest >> TBlobStorageProxyTest::TestVGetNoData [GOOD] >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_5_Table ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/batch_operations/unittest >> KqpBatchUpdate::Large_1 [GOOD] Test command err: Trying to start YDB, gRPC: 23971, MsgBus: 63255 2025-06-25T14:42:52.576055Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897490817820638:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:52.580740Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000b9a/r3tmp/tmprdfSBo/pdisk_1.dat 2025-06-25T14:42:52.829881Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519897490817820617:2080] 1750862572573955 != 1750862572573958 2025-06-25T14:42:52.834941Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 23971, node 1 2025-06-25T14:42:52.893839Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:42:52.893867Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:42:52.893874Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:42:52.893963Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:42:52.948747Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:42:52.948852Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:42:52.951746Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:63255 TClient is connected to server localhost:63255 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:42:53.392835Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:42:53.405244Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:42:53.422557Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:42:53.551255Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:53.593217Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:42:53.681245Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:53.761625Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:55.075442Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897503702724151:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:55.075574Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:55.335882Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:55.359763Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:55.380932Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:55.401496Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:55.424703Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:55.449408Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:55.474989Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:55.519600Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897503702724807:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:55.519687Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:55.519701Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897503702724812:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:55.523035Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:42:55.533679Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519897503702724814:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:42:55.605215Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519897503702724865:3418] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:42:56.571059Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:57.576287Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519897490817820638:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:57.576371Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;actio ... ;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 4518, MsgBus: 6360 2025-06-25T14:43:13.810380Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519897577300347234:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:43:13.810451Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000b9a/r3tmp/tmpDFVRdX/pdisk_1.dat 2025-06-25T14:43:14.005197Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:43:14.006228Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519897577300347215:2080] 1750862593810018 != 1750862593810021 2025-06-25T14:43:14.023569Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:43:14.023680Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:43:14.025675Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 4518, node 3 2025-06-25T14:43:14.065847Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:43:14.065871Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:43:14.065880Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:43:14.066034Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6360 TClient is connected to server localhost:6360 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:43:14.655763Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:43:14.672917Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:43:14.749896Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:43:14.868112Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:43:14.913475Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:43:14.974328Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:43:17.095859Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519897594480218024:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:43:17.095947Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:43:17.156095Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:43:17.191254Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:43:17.258437Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:43:17.284289Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:43:17.309647Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:43:17.379066Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:43:17.422778Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:43:17.507380Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519897594480218690:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:43:17.507459Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:43:17.507511Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519897594480218695:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:43:17.511163Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:43:17.521367Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519897594480218697:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:43:17.590672Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519897594480218748:3421] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:43:18.690497Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:43:18.810564Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519897577300347234:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:43:18.810641Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; |84.6%| [TA] $(B)/ydb/core/tx/datashard/ut_incremental_backup/test-results/unittest/{meta.json ... results_accumulator.log} |84.6%| [TA] {RESULT} $(B)/ydb/core/tx/datashard/ut_incremental_backup/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpBatchUpdate::SimpleOnePartition [GOOD] >> TBlobStorageProxyTest::TestPutGetMany [GOOD] >> TBlobStorageProxyTest::TestVPutVGet [GOOD] >> TBlobStorageProxyTest::TestVPutVGetLimit |84.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/dsproxy/ut_fat/unittest >> TBlobStorageProxyTest::TestPutGetStatusErasure4Plus2Stripe [GOOD] >> BsControllerConfig::MergeIntersectingBoxes [GOOD] >> BsControllerConfig::MoveGroups |84.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/dsproxy/ut_fat/unittest >> TBlobStorageProxyTest::TestPutGetMany [GOOD] >> TBlobStorageProxyTest::TestProxyLongTailDiscoverSingleFailure [GOOD] |84.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/partition_stats/ut/unittest >> ResultFormatter::EmptyDict [GOOD] >> ResultFormatter::Dict [GOOD] >> ResultFormatter::Decimal [GOOD] >> BsControllerConfig::ExtendByCreatingSeparateBox [GOOD] >> BsControllerConfig::ExtendBoxAndStoragePool >> TBlobStorageProxyTest::TestProxyRestoreOnDiscoverBlock [GOOD] >> TBlobStorageProxyTest::TestBlockPersistence [GOOD] >> TBlobStorageProxyTest::TestCollectGarbage >> TBlobStorageProxyTest::TestProxyRestoreOnGetBlock [GOOD] >> TBlobStorageProxyTest::TestProxyRestoreOnGetMirror ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/batch_operations/unittest >> KqpBatchUpdate::SimpleOnePartition [GOOD] Test command err: Trying to start YDB, gRPC: 64864, MsgBus: 11086 2025-06-25T14:42:40.733242Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897437286792989:2223];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:40.733309Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000be3/r3tmp/tmpf4wxsa/pdisk_1.dat 2025-06-25T14:42:41.585543Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:42:41.585645Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:42:41.608178Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:42:41.680889Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:42:41.730964Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TServer::EnableGrpc on GrpcPort 64864, node 1 2025-06-25T14:42:42.725356Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:42:42.725384Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:42:42.725390Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:42:42.725566Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11086 TClient is connected to server localhost:11086 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:42:44.993857Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:42:45.123492Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:45.417971Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:45.497650Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:45.537776Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:45.733773Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519897437286792989:2223];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:45.733845Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:42:45.828398Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897458761630923:2371], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:45.828672Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:47.941000Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:47.964767Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:47.987367Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.011654Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.035342Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.104806Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.134216Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.182065Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897471646533487:2440], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:48.182133Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:48.182236Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897471646533492:2443], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:48.184780Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:42:48.216880Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519897471646533494:2444], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:42:48.293423Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519897471646533545:3437] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 8376, MsgBus: 2328 2025-06-25T14:42:53.989653Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519897492623034622:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:53.989744Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000be3/r3tmp/tmpucVgw6/pdisk_1.dat 2025-06-25T14:42:54.091631Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:42:5 ... d: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:43:08.263801Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519897537373357672:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:43:08.263862Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 20480, MsgBus: 15642 2025-06-25T14:43:13.002398Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519897577234795127:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:43:13.002455Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000be3/r3tmp/tmppDQKas/pdisk_1.dat 2025-06-25T14:43:13.155848Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:43:13.172517Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:43:13.172655Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:43:13.173972Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 20480, node 4 2025-06-25T14:43:13.212736Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:43:13.212760Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:43:13.212767Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:43:13.212891Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:15642 TClient is connected to server localhost:15642 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:43:13.596159Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:43:13.601315Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:43:13.610326Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:43:13.686508Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:43:13.835568Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:43:13.927364Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:43:14.069071Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:43:16.515100Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519897590119698617:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:43:16.515207Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:43:16.571022Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:43:16.605452Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:43:16.637134Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:43:16.675356Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:43:16.707602Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:43:16.745547Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:43:16.819609Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:43:16.879829Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519897590119699277:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:43:16.879896Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:43:16.880248Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519897590119699282:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:43:16.885043Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:43:16.897475Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519897590119699284:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:43:16.979938Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519897590119699335:3420] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:43:18.002523Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519897577234795127:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:43:18.002581Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/dsproxy/ut_fat/unittest >> TBlobStorageProxyTest::TestProxyLongTailDiscoverSingleFailure [GOOD] Test command err: 2025-06-25T14:43:15.268759Z :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:427} PDiskId# 1 Can not be initialized! Format is incomplete. Magic sector is not present on disk. Maybe wrong PDiskKey Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/yft8/00147c/r3tmp/tmptZlpeJ//vdisk_bad_0/pdisk.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 123 PDiskId# 1 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 1 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 1 2025-06-25T14:43:15.307193Z :BS_LOCALRECOVERY CRIT: localrecovery_public.cpp:103: PDiskId# 1 VDISK[0:_:0:0:0]: (0) LocalRecovery FINISHED: {RecoveryDuration# INPROGRESS RecoveredLogStartLsn# 0 SuccessfulRecovery# false EmptyLogoBlobsDb# true EmptyBlocksDb# true EmptyBarriersDb# true EmptySyncLog# true EmptySyncer# true EmptyHuge# true LogRecLogoBlob# 0 LogRecBlock# 0 LogRecGC# 0 LogRecSyncLogIdx# 0 LogRecLogoBlobsDB# 0 LogRecBlocksDB# 0 LogRecBarriersDB# 0 LogRecCutLog# 0 LogRecLocalSyncData# 0 LogRecSyncerState# 0 LogRecHandoffDel# 0 LogRecHugeBlobAllocChunk# 0 LogRecHugeBlobFreeChunk# 0 LogRecHugeBlobEntryPoint# 0 LogRecHugeLogoBlob# 0 LogRecLogoBlobOpt# 0 LogRecPhantomBlob# 0 LogRecAnubisOsirisPut# 0 LogRecAddBulkSst# 0 LogoBlobFreshApply# 0 LogoBlobFreshSkip# 0 LogoBlobsBatchFreshApply# 0 LogoBlobsBatchFreshSkip#0 LogoBlobSyncLogApply# 0 LogoBlobSyncLogSkip# 0 HugeLogoBlobFreshApply# 0 HugeLogoBlobFreshSkip# 0 HugeLogoBlobSyncLogApply# 0 HugeLogoBlobSyncLogSkip# 0 BlockFreshApply# 0 BlockFreshSkip# 0 BlocksBatchFreshApply# 0 BlocksBatchFreshSkip# 0 BlockSyncLogApply# 0 BlockSyncLogSkip# 0 BarrierFreshApply# 0 BarrierFreshSkip# 0 BarriersBatchFreshApply# 0 BarriersBatchFreshSkip# 0 BarrierSyncLogApply# 0 BarrierSyncLogSkip# 0 GCBarrierFreshApply# 0 GCBarrierFreshSkip# 0 GCLogoBlobFreshApply# 0 GCLogoBlobFreshSkip# 0 GCSyncLogApply# 0 GCSyncLogSkip# 0 TryPutLogoBlobSyncData# 0 TryPutBlockSyncData# 0 TryPutBarrierSyncData# 0 HandoffDelFreshApply# 0 HandoffDelFreshSkip# 0 HugeBlobAllocChunkApply# 0 HugeBlobAllocChunkSkip# 0 HugeBlobFreeChunkApply# 0 HugeBlobFreeChunkSkip# 0 HugeLogoBlobToHeapApply# 0 HugeLogoBlobToHeapSkip# 0 HugeSlotsDelGenericApply# 0 HugeSlotsDelGenericSkip# 0 TryPutLogoBlobPhantom# 0 RecoveryLogDiapason# [18446744073709551615 0] StartingPoints# {} ReadLogReplies# {}} reason# Yard::Init failed, errorReason# "PDisk is in StateError, reason# PDiskId# 1 Can not be initialized! Format is incomplete. Magic sector is not present on disk. Maybe wrong PDiskKey" status# CORRUPTED;VDISK LOCAL RECOVERY FAILURE DUE TO LOGICAL ERROR 2025-06-25T14:43:16.802553Z :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:427} PDiskId# 2 Can not be initialized! Format is incomplete. Magic sector is not present on disk. Maybe wrong PDiskKey Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/yft8/00147c/r3tmp/tmptZlpeJ//vdisk_bad_1/pdisk.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 123 PDiskId# 2 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 1 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 2 2025-06-25T14:43:16.810201Z :BS_LOCALRECOVERY CRIT: localrecovery_public.cpp:103: PDiskId# 2 VDISK[0:_:0:1:0]: (0) LocalRecovery FINISHED: {RecoveryDuration# INPROGRESS RecoveredLogStartLsn# 0 SuccessfulRecovery# false EmptyLogoBlobsDb# true EmptyBlocksDb# true EmptyBarriersDb# true EmptySyncLog# true EmptySyncer# true EmptyHuge# true LogRecLogoBlob# 0 LogRecBlock# 0 LogRecGC# 0 LogRecSyncLogIdx# 0 LogRecLogoBlobsDB# 0 LogRecBlocksDB# 0 LogRecBarriersDB# 0 LogRecCutLog# 0 LogRecLocalSyncData# 0 LogRecSyncerState# 0 LogRecHandoffDel# 0 LogRecHugeBlobAllocChunk# 0 LogRecHugeBlobFreeChunk# 0 LogRecHugeBlobEntryPoint# 0 LogRecHugeLogoBlob# 0 LogRecLogoBlobOpt# 0 LogRecPhantomBlob# 0 LogRecAnubisOsirisPut# 0 LogRecAddBulkSst# 0 LogoBlobFreshApply# 0 LogoBlobFreshSkip# 0 LogoBlobsBatchFreshApply# 0 LogoBlobsBatchFreshSkip#0 LogoBlobSyncLogApply# 0 LogoBlobSyncLogSkip# 0 HugeLogoBlobFreshApply# 0 HugeLogoBlobFreshSkip# 0 HugeLogoBlobSyncLogApply# 0 HugeLogoBlobSyncLogSkip# 0 BlockFreshApply# 0 BlockFreshSkip# 0 BlocksBatchFreshApply# 0 BlocksBatchFreshSkip# 0 BlockSyncLogApply# 0 BlockSyncLogSkip# 0 BarrierFreshApply# 0 BarrierFreshSkip# 0 BarriersBatchFreshApply# 0 BarriersBatchFreshSkip# 0 BarrierSyncLogApply# 0 BarrierSyncLogSkip# 0 GCBarrierFreshApply# 0 GCBarrierFreshSkip# 0 GCLogoBlobFreshApply# 0 GCLogoBlobFreshSkip# 0 GCSyncLogApply# 0 GCSyncLogSkip# 0 TryPutLogoBlobSyncData# 0 TryPutBlockSyncData# 0 TryPutBarrierSyncData# 0 HandoffDelFreshApply# 0 HandoffDelFreshSkip# 0 HugeBlobAllocChunkApply# 0 HugeBlobAllocChunkSkip# 0 HugeBlobFreeChunkApply# 0 HugeBlobFreeChunkSkip# 0 HugeLogoBlobToHeapApply# 0 HugeLogoBlobToHeapSkip# 0 HugeSlotsDelGenericApply# 0 HugeSlotsDelGenericSkip# 0 TryPutLogoBlobPhantom# 0 RecoveryLogDiapason# [18446744073709551615 0] StartingPoints# {} ReadLogReplies# {}} reason# Yard::Init failed, errorReason# "PDisk is in StateError, reason# PDiskId# 2 Can not be initialized! Format is incomplete. Magic sector is not present on disk. Maybe wrong PDiskKey" status# CORRUPTED;VDISK LOCAL RECOVERY FAILURE DUE TO LOGICAL ERROR 2025-06-25T14:43:18.381244Z :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:427} PDiskId# 3 Can not be initialized! Format is incomplete. Magic sector is not present on disk. Maybe wrong PDiskKey Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/yft8/00147c/r3tmp/tmptZlpeJ//vdisk_bad_2/pdisk.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 123 PDiskId# 3 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 1 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 3 2025-06-25T14:43:18.384291Z :BS_LOCALRECOVERY CRIT: localrecovery_public.cpp:103: PDiskId# 3 VDISK[0:_:0:2:0]: (0) LocalRecovery FINISHED: {RecoveryDuration# INPROGRESS RecoveredLogStartLsn# 0 SuccessfulRecovery# false EmptyLogoBlobsDb# true EmptyBlocksDb# true EmptyBarriersDb# true EmptySyncLog# true EmptySyncer# true EmptyHuge# true LogRecLogoBlob# 0 LogRecBlock# 0 LogRecGC# 0 LogRecSyncLogIdx# 0 LogRecLogoBlobsDB# 0 LogRecBlocksDB# 0 LogRecBarriersDB# 0 LogRecCutLog# 0 LogRecLocalSyncData# 0 LogRecSyncerState# 0 LogRecHandoffDel# 0 LogRecHugeBlobAllocChunk# 0 LogRecHugeBlobFreeChunk# 0 LogRecHugeBlobEntryPoint# 0 LogRecHugeLogoBlob# 0 LogRecLogoBlobOpt# 0 LogRecPhantomBlob# 0 LogRecAnubisOsirisPut# 0 LogRecAddBulkSst# 0 LogoBlobFreshApply# 0 LogoBlobFreshSkip# 0 LogoBlobsBatchFreshApply# 0 LogoBlobsBatchFreshSkip#0 LogoBlobSyncLogApply# 0 LogoBlobSyncLogSkip# 0 HugeLogoBlobFreshApply# 0 HugeLogoBlobFreshSkip# 0 HugeLogoBlobSyncLogApply# 0 HugeLogoBlobSyncLogSkip# 0 BlockFreshApply# 0 BlockFreshSkip# 0 BlocksBatchFreshApply# 0 BlocksBatchFreshSkip# 0 BlockSyncLogApply# 0 BlockSyncLogSkip# 0 BarrierFreshApply# 0 BarrierFreshSkip# 0 BarriersBatchFreshApply# 0 BarriersBatchFreshSkip# 0 BarrierSyncLogApply# 0 BarrierSyncLogSkip# 0 GCBarrierFreshApply# 0 GCBarrierFreshSkip# 0 GCLogoBlobFreshApply# 0 GCLogoBlobFreshSkip# 0 GCSyncLogApply# 0 GCSyncLogSkip# 0 TryPutLogoBlobSyncData# 0 TryPutBlockSyncData# 0 TryPutBarrierSyncData# 0 HandoffDelFreshApply# 0 HandoffDelFreshSkip# 0 HugeBlobAllocChunkApply# 0 HugeBlobAllocChunkSkip# 0 HugeBlobFreeChunkApply# 0 HugeBlobFreeChunkSkip# 0 HugeLogoBlobToHeapApply# 0 HugeLogoBlobToHeapSkip# 0 HugeSlotsDelGenericApply# 0 HugeSlotsDelGenericSkip# 0 TryPutLogoBlobPhantom# 0 RecoveryLogDiapason# [18446744073709551615 0] StartingPoints# {} ReadLogReplies# {}} reason# Yard::Init failed, errorReason# "PDisk is in StateError, reason# PDiskId# 3 Can not be initialized! Format is incomplete. Magic sector is not present on disk. Maybe wrong PDiskKey" status# CORRUPTED;VDISK LOCAL RECOVERY FAILURE DUE TO LOGICAL ERROR 2025-06-25T14:43:20.083025Z :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:427} PDiskId# 4 Can not be initialized! Format is incomplete. Magic sector is not present on disk. Maybe wrong PDiskKey Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/yft8/00147c/r3tmp/tmptZlpeJ//vdisk_bad_3/pdisk.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 123 PDiskId# 4 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 1 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 4 2025-06-25T14:43:20.094943Z :BS_LOCALRECOVERY CRIT: localrecovery_public.cpp:103: PDiskId# 4 VDISK[0:_:0:3:0]: (0) LocalRecovery FINISHED: {RecoveryDuration# INPROGRESS RecoveredLogStartLsn# 0 SuccessfulRecovery# false EmptyLogoBlobsDb# true EmptyBlocksDb# true EmptyBarriersDb# true EmptySyncLog# true EmptySyncer# true EmptyHuge# true LogRecLogoBlob# 0 LogRecBlock# 0 LogRecGC# 0 LogRecSyncLogIdx# 0 LogRecLogoBlobsDB# 0 LogRecBlocksDB# 0 LogRecBarriersDB# 0 LogRecCutLog# 0 LogRecLocalSyncData# 0 LogRecSyncerState# 0 LogRecHandoffDel# 0 LogRecHugeBlobAllocChunk# 0 LogRecHugeBlobFreeChunk# 0 LogRecHugeBlobEntryPoint# 0 LogRecHugeLogoBlob# 0 LogRecLogoBlobOpt# 0 LogRecPhantomBlob# 0 LogRecAnubisOsirisPut# 0 LogRecAddBulkSst# 0 LogoBlobFreshApply# 0 LogoBlobFreshSkip# 0 LogoBlobsBatchFreshApply# 0 LogoBlobsBatchFreshSkip#0 LogoBlobSyncLogApply# 0 LogoBlobSyncLogSkip# 0 HugeLogoBlobFreshApply# 0 HugeLogoBlobFreshSkip# 0 HugeLogoBlobSyncLogApply# 0 HugeLogoBlobSyncLogSkip# 0 BlockFreshApply# 0 BlockFreshSkip# 0 BlocksBatchFreshApply# 0 BlocksBatchFreshSkip# 0 BlockSyncLogApply# 0 BlockSyncLogSkip# 0 BarrierFreshApply# 0 BarrierFreshSkip# 0 BarriersBatchFreshApply# 0 BarriersBatchFreshSkip# 0 BarrierSyncLogApply# 0 BarrierSyncLogSkip# 0 GCBarrierFreshApply# 0 GCBarrierFreshSkip# 0 GCLogoBlobFreshApply# 0 GCLogoBlobFreshSkip# 0 GCSyncLogApply# 0 GCSyncLogSkip# 0 TryPutLogoBlobSyncData# 0 TryPutBlockSyncData# 0 TryPutBarrierSyncData# 0 HandoffDelFreshApply# 0 HandoffDelFreshSkip# 0 HugeBlobAllocChunkApply# 0 HugeBlobAllocChunkSkip# 0 HugeBlobFreeChunkApply# 0 HugeBlobFreeChunkSkip# 0 HugeLogoBlobToHeapApply# 0 HugeLogoBlobToHeapSkip# 0 HugeSlotsDelGenericApply# 0 HugeSlotsDelGenericSkip# 0 TryPutLogoBlobPhantom# 0 RecoveryLogDiapason# [18446744073709551615 0] StartingPoints# {} ReadLogReplies# {}} reason# Yard::Init failed, errorReason# "PDisk is in StateError, reason# PDiskId# 4 Can not be initialized! Format is incomplete. Magic sector is not present on disk. Maybe wrong PDiskKey" status# CORRUPTED;VDISK LOCAL RECOVERY FAILURE DUE TO LOGICAL ERROR 2025-06-25T14:43:21.623298Z :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:427} PDiskId# 5 Can not be initialized! Format is incomplete. Magic sector is not present on disk. Maybe wrong PDiskKey Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/yft8/00147c/r3tmp/tmptZlpeJ//vdisk_bad_4/pdisk.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 123 PDiskId# 5 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 1 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 5 2025-06-25T14:43:21.628503Z :BS_LOCALRECOVERY CRIT: localrecovery_public.cpp:103: PDiskId# 5 VDISK[0:_:0:4:0]: (0) LocalRecovery FINISHED: {RecoveryDuration# INPROGRESS RecoveredLogStartLsn# 0 SuccessfulRecovery# false EmptyLogoBlobsDb# true EmptyBlocksDb# true EmptyBarriersDb# true EmptySyncLog# true EmptySyncer# true EmptyHuge# true LogRecLogoBlob# 0 LogRecBlock# 0 LogRecGC# 0 LogRecSyncLogIdx# 0 LogRecLogoBlobsDB# 0 LogRecBlocksDB# 0 LogRecBarriersDB# 0 LogRecCutLog# 0 LogRecLocalSyncData# 0 LogRecSyncerState# 0 LogRecHandoffDel# 0 LogRecHugeBlobAllocChunk# 0 LogRecHugeBlobFreeChunk# 0 LogRecHugeBlobEntryPoint# 0 LogRecHugeLogoBlob# 0 LogRecLogoBlobOpt# 0 LogRecPhantomBlob# 0 LogRecAnubisOsirisPut# 0 LogRecAddBulkSst# 0 LogoBlobFreshApply# 0 LogoBlobFreshSkip# 0 LogoBlobsBatchFreshApply# 0 LogoBlobsBatchFreshSkip#0 LogoBlobSyncLogApply# 0 LogoBlobSyncLogSkip# 0 HugeLogoBlobFreshApply# 0 HugeLogoBlobFreshSkip# 0 HugeLogoBlobSyncLogApply# 0 HugeLogoBlobSyncLogSkip# 0 BlockFreshApply# 0 BlockFreshSkip# 0 BlocksBatchFreshApply# 0 BlocksBatchFreshSkip# 0 BlockSyncLogApply# 0 BlockSyncLogSkip# 0 BarrierFreshApply# 0 BarrierFreshSkip# 0 BarriersBatchFreshApply# 0 BarriersBatchFreshSkip# 0 BarrierSyncLogApply# 0 BarrierSyncLogSkip# 0 GCBarrierFreshApply# 0 GCBarrierFreshSkip# 0 GCLogoBlobFreshApply# 0 GCLogoBlobFreshSkip# 0 GCSyncLogApply# 0 GCSyncLogSkip# 0 TryPutLogoBlobSyncData# 0 TryPutBlockSyncData# 0 TryPutBarrierSyncData# 0 HandoffDelFreshApply# 0 HandoffDelFreshSkip# 0 HugeBlobAllocChunkApply# 0 HugeBlobAllocChunkSkip# 0 HugeBlobFreeChunkApply# 0 HugeBlobFreeChunkSkip# 0 HugeLogoBlobToHeapApply# 0 HugeLogoBlobToHeapSkip# 0 HugeSlotsDelGenericApply# 0 HugeSlotsDelGenericSkip# 0 TryPutLogoBlobPhantom# 0 RecoveryLogDiapason# [18446744073709551615 0] StartingPoints# {} ReadLogReplies# {}} reason# Yard::Init failed, errorReason# "PDisk is in StateError, reason# PDiskId# 5 Can not be initialized! Format is incomplete. Magic sector is not present on disk. Maybe wrong PDiskKey" status# CORRUPTED;VDISK LOCAL RECOVERY FAILURE DUE TO LOGICAL ERROR 2025-06-25T14:43:23.311218Z :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:427} PDiskId# 6 Can not be initialized! Format is incomplete. Magic sector is not present on disk. Maybe wrong PDiskKey Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/yft8/00147c/r3tmp/tmptZlpeJ//vdisk_bad_5/pdisk.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 123 PDiskId# 6 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 1 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 6 2025-06-25T14:43:23.315208Z :BS_LOCALRECOVERY CRIT: localrecovery_public.cpp:103: PDiskId# 6 VDISK[0:_:0:5:0]: (0) LocalRecovery FINISHED: {RecoveryDuration# INPROGRESS RecoveredLogStartLsn# 0 SuccessfulRecovery# false EmptyLogoBlobsDb# true EmptyBlocksDb# true EmptyBarriersDb# true EmptySyncLog# true EmptySyncer# true EmptyHuge# true LogRecLogoBlob# 0 LogRecBlock# 0 LogRecGC# 0 LogRecSyncLogIdx# 0 LogRecLogoBlobsDB# 0 LogRecBlocksDB# 0 LogRecBarriersDB# 0 LogRecCutLog# 0 LogRecLocalSyncData# 0 LogRecSyncerState# 0 LogRecHandoffDel# 0 LogRecHugeBlobAllocChunk# 0 LogRecHugeBlobFreeChunk# 0 LogRecHugeBlobEntryPoint# 0 LogRecHugeLogoBlob# 0 LogRecLogoBlobOpt# 0 LogRecPhantomBlob# 0 LogRecAnubisOsirisPut# 0 LogRecAddBulkSst# 0 LogoBlobFreshApply# 0 LogoBlobFreshSkip# 0 LogoBlobsBatchFreshApply# 0 LogoBlobsBatchFreshSkip#0 LogoBlobSyncLogApply# 0 LogoBlobSyncLogSkip# 0 HugeLogoBlobFreshApply# 0 HugeLogoBlobFreshSkip# 0 HugeLogoBlobSyncLogApply# 0 HugeLogoBlobSyncLogSkip# 0 BlockFreshApply# 0 BlockFreshSkip# 0 BlocksBatchFreshApply# 0 BlocksBatchFreshSkip# 0 BlockSyncLogApply# 0 BlockSyncLogSkip# 0 BarrierFreshApply# 0 BarrierFreshSkip# 0 BarriersBatchFreshApply# 0 BarriersBatchFreshSkip# 0 BarrierSyncLogApply# 0 BarrierSyncLogSkip# 0 GCBarrierFreshApply# 0 GCBarrierFreshSkip# 0 GCLogoBlobFreshApply# 0 GCLogoBlobFreshSkip# 0 GCSyncLogApply# 0 GCSyncLogSkip# 0 TryPutLogoBlobSyncData# 0 TryPutBlockSyncData# 0 TryPutBarrierSyncData# 0 HandoffDelFreshApply# 0 HandoffDelFreshSkip# 0 HugeBlobAllocChunkApply# 0 HugeBlobAllocChunkSkip# 0 HugeBlobFreeChunkApply# 0 HugeBlobFreeChunkSkip# 0 HugeLogoBlobToHeapApply# 0 HugeLogoBlobToHeapSkip# 0 HugeSlotsDelGenericApply# 0 HugeSlotsDelGenericSkip# 0 TryPutLogoBlobPhantom# 0 RecoveryLogDiapason# [18446744073709551615 0] StartingPoints# {} ReadLogReplies# {}} reason# Yard::Init failed, errorReason# "PDisk is in StateError, reason# PDiskId# 6 Can not be initialized! Format is incomplete. Magic sector is not present on disk. Maybe wrong PDiskKey" status# CORRUPTED;VDISK LOCAL RECOVERY FAILURE DUE TO LOGICAL ERROR |84.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/result_formatter/ut/unittest >> ResultFormatter::Decimal [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/dsproxy/ut_fat/unittest >> TBlobStorageProxyTest::TestProxyRestoreOnDiscoverBlock [GOOD] Test command err: 2025-06-25T14:43:16.739461Z :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:427} PDiskId# 2 Can not be initialized! Format is incomplete. Magic sector is not present on disk. Maybe wrong PDiskKey Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/yft8/00141d/r3tmp/tmpzajJhs//vdisk_bad_1/pdisk.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 123 PDiskId# 2 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 1 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 2 2025-06-25T14:43:16.784586Z :BS_LOCALRECOVERY CRIT: localrecovery_public.cpp:103: PDiskId# 2 VDISK[0:_:0:1:0]: (0) LocalRecovery FINISHED: {RecoveryDuration# INPROGRESS RecoveredLogStartLsn# 0 SuccessfulRecovery# false EmptyLogoBlobsDb# true EmptyBlocksDb# true EmptyBarriersDb# true EmptySyncLog# true EmptySyncer# true EmptyHuge# true LogRecLogoBlob# 0 LogRecBlock# 0 LogRecGC# 0 LogRecSyncLogIdx# 0 LogRecLogoBlobsDB# 0 LogRecBlocksDB# 0 LogRecBarriersDB# 0 LogRecCutLog# 0 LogRecLocalSyncData# 0 LogRecSyncerState# 0 LogRecHandoffDel# 0 LogRecHugeBlobAllocChunk# 0 LogRecHugeBlobFreeChunk# 0 LogRecHugeBlobEntryPoint# 0 LogRecHugeLogoBlob# 0 LogRecLogoBlobOpt# 0 LogRecPhantomBlob# 0 LogRecAnubisOsirisPut# 0 LogRecAddBulkSst# 0 LogoBlobFreshApply# 0 LogoBlobFreshSkip# 0 LogoBlobsBatchFreshApply# 0 LogoBlobsBatchFreshSkip#0 LogoBlobSyncLogApply# 0 LogoBlobSyncLogSkip# 0 HugeLogoBlobFreshApply# 0 HugeLogoBlobFreshSkip# 0 HugeLogoBlobSyncLogApply# 0 HugeLogoBlobSyncLogSkip# 0 BlockFreshApply# 0 BlockFreshSkip# 0 BlocksBatchFreshApply# 0 BlocksBatchFreshSkip# 0 BlockSyncLogApply# 0 BlockSyncLogSkip# 0 BarrierFreshApply# 0 BarrierFreshSkip# 0 BarriersBatchFreshApply# 0 BarriersBatchFreshSkip# 0 BarrierSyncLogApply# 0 BarrierSyncLogSkip# 0 GCBarrierFreshApply# 0 GCBarrierFreshSkip# 0 GCLogoBlobFreshApply# 0 GCLogoBlobFreshSkip# 0 GCSyncLogApply# 0 GCSyncLogSkip# 0 TryPutLogoBlobSyncData# 0 TryPutBlockSyncData# 0 TryPutBarrierSyncData# 0 HandoffDelFreshApply# 0 HandoffDelFreshSkip# 0 HugeBlobAllocChunkApply# 0 HugeBlobAllocChunkSkip# 0 HugeBlobFreeChunkApply# 0 HugeBlobFreeChunkSkip# 0 HugeLogoBlobToHeapApply# 0 HugeLogoBlobToHeapSkip# 0 HugeSlotsDelGenericApply# 0 HugeSlotsDelGenericSkip# 0 TryPutLogoBlobPhantom# 0 RecoveryLogDiapason# [18446744073709551615 0] StartingPoints# {} ReadLogReplies# {}} reason# Yard::Init failed, errorReason# "PDisk is in StateError, reason# PDiskId# 2 Can not be initialized! Format is incomplete. Magic sector is not present on disk. Maybe wrong PDiskKey" status# CORRUPTED;VDISK LOCAL RECOVERY FAILURE DUE TO LOGICAL ERROR 2025-06-25T14:43:19.554884Z :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:427} PDiskId# 3 Can not be initialized! Format is incomplete. Magic sector is not present on disk. Maybe wrong PDiskKey Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/yft8/00141d/r3tmp/tmpzajJhs//vdisk_bad_2/pdisk.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 123 PDiskId# 3 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 1 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 3 2025-06-25T14:43:19.560770Z :BS_LOCALRECOVERY CRIT: localrecovery_public.cpp:103: PDiskId# 3 VDISK[0:_:0:2:0]: (0) LocalRecovery FINISHED: {RecoveryDuration# INPROGRESS RecoveredLogStartLsn# 0 SuccessfulRecovery# false EmptyLogoBlobsDb# true EmptyBlocksDb# true EmptyBarriersDb# true EmptySyncLog# true EmptySyncer# true EmptyHuge# true LogRecLogoBlob# 0 LogRecBlock# 0 LogRecGC# 0 LogRecSyncLogIdx# 0 LogRecLogoBlobsDB# 0 LogRecBlocksDB# 0 LogRecBarriersDB# 0 LogRecCutLog# 0 LogRecLocalSyncData# 0 LogRecSyncerState# 0 LogRecHandoffDel# 0 LogRecHugeBlobAllocChunk# 0 LogRecHugeBlobFreeChunk# 0 LogRecHugeBlobEntryPoint# 0 LogRecHugeLogoBlob# 0 LogRecLogoBlobOpt# 0 LogRecPhantomBlob# 0 LogRecAnubisOsirisPut# 0 LogRecAddBulkSst# 0 LogoBlobFreshApply# 0 LogoBlobFreshSkip# 0 LogoBlobsBatchFreshApply# 0 LogoBlobsBatchFreshSkip#0 LogoBlobSyncLogApply# 0 LogoBlobSyncLogSkip# 0 HugeLogoBlobFreshApply# 0 HugeLogoBlobFreshSkip# 0 HugeLogoBlobSyncLogApply# 0 HugeLogoBlobSyncLogSkip# 0 BlockFreshApply# 0 BlockFreshSkip# 0 BlocksBatchFreshApply# 0 BlocksBatchFreshSkip# 0 BlockSyncLogApply# 0 BlockSyncLogSkip# 0 BarrierFreshApply# 0 BarrierFreshSkip# 0 BarriersBatchFreshApply# 0 BarriersBatchFreshSkip# 0 BarrierSyncLogApply# 0 BarrierSyncLogSkip# 0 GCBarrierFreshApply# 0 GCBarrierFreshSkip# 0 GCLogoBlobFreshApply# 0 GCLogoBlobFreshSkip# 0 GCSyncLogApply# 0 GCSyncLogSkip# 0 TryPutLogoBlobSyncData# 0 TryPutBlockSyncData# 0 TryPutBarrierSyncData# 0 HandoffDelFreshApply# 0 HandoffDelFreshSkip# 0 HugeBlobAllocChunkApply# 0 HugeBlobAllocChunkSkip# 0 HugeBlobFreeChunkApply# 0 HugeBlobFreeChunkSkip# 0 HugeLogoBlobToHeapApply# 0 HugeLogoBlobToHeapSkip# 0 HugeSlotsDelGenericApply# 0 HugeSlotsDelGenericSkip# 0 TryPutLogoBlobPhantom# 0 RecoveryLogDiapason# [18446744073709551615 0] StartingPoints# {} ReadLogReplies# {}} reason# Yard::Init failed, errorReason# "PDisk is in StateError, reason# PDiskId# 3 Can not be initialized! Format is incomplete. Magic sector is not present on disk. Maybe wrong PDiskKey" status# CORRUPTED;VDISK LOCAL RECOVERY FAILURE DUE TO LOGICAL ERROR 2025-06-25T14:43:20.978072Z :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:427} PDiskId# 3 Can not be initialized! Format is incomplete. Magic sector is not present on disk. Maybe wrong PDiskKey Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/yft8/00141d/r3tmp/tmpzajJhs//vdisk_bad_2/pdisk.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 123 PDiskId# 3 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 1 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 3 2025-06-25T14:43:20.992703Z :BS_LOCALRECOVERY CRIT: localrecovery_public.cpp:103: PDiskId# 3 VDISK[0:_:0:2:0]: (0) LocalRecovery FINISHED: {RecoveryDuration# INPROGRESS RecoveredLogStartLsn# 0 SuccessfulRecovery# false EmptyLogoBlobsDb# true EmptyBlocksDb# true EmptyBarriersDb# true EmptySyncLog# true EmptySyncer# true EmptyHuge# true LogRecLogoBlob# 0 LogRecBlock# 0 LogRecGC# 0 LogRecSyncLogIdx# 0 LogRecLogoBlobsDB# 0 LogRecBlocksDB# 0 LogRecBarriersDB# 0 LogRecCutLog# 0 LogRecLocalSyncData# 0 LogRecSyncerState# 0 LogRecHandoffDel# 0 LogRecHugeBlobAllocChunk# 0 LogRecHugeBlobFreeChunk# 0 LogRecHugeBlobEntryPoint# 0 LogRecHugeLogoBlob# 0 LogRecLogoBlobOpt# 0 LogRecPhantomBlob# 0 LogRecAnubisOsirisPut# 0 LogRecAddBulkSst# 0 LogoBlobFreshApply# 0 LogoBlobFreshSkip# 0 LogoBlobsBatchFreshApply# 0 LogoBlobsBatchFreshSkip#0 LogoBlobSyncLogApply# 0 LogoBlobSyncLogSkip# 0 HugeLogoBlobFreshApply# 0 HugeLogoBlobFreshSkip# 0 HugeLogoBlobSyncLogApply# 0 HugeLogoBlobSyncLogSkip# 0 BlockFreshApply# 0 BlockFreshSkip# 0 BlocksBatchFreshApply# 0 BlocksBatchFreshSkip# 0 BlockSyncLogApply# 0 BlockSyncLogSkip# 0 BarrierFreshApply# 0 BarrierFreshSkip# 0 BarriersBatchFreshApply# 0 BarriersBatchFreshSkip# 0 BarrierSyncLogApply# 0 BarrierSyncLogSkip# 0 GCBarrierFreshApply# 0 GCBarrierFreshSkip# 0 GCLogoBlobFreshApply# 0 GCLogoBlobFreshSkip# 0 GCSyncLogApply# 0 GCSyncLogSkip# 0 TryPutLogoBlobSyncData# 0 TryPutBlockSyncData# 0 TryPutBarrierSyncData# 0 HandoffDelFreshApply# 0 HandoffDelFreshSkip# 0 HugeBlobAllocChunkApply# 0 HugeBlobAllocChunkSkip# 0 HugeBlobFreeChunkApply# 0 HugeBlobFreeChunkSkip# 0 HugeLogoBlobToHeapApply# 0 HugeLogoBlobToHeapSkip# 0 HugeSlotsDelGenericApply# 0 HugeSlotsDelGenericSkip# 0 TryPutLogoBlobPhantom# 0 RecoveryLogDiapason# [18446744073709551615 0] StartingPoints# {} ReadLogReplies# {}} reason# Yard::Init failed, errorReason# "PDisk is in StateError, reason# PDiskId# 3 Can not be initialized! Format is incomplete. Magic sector is not present on disk. Maybe wrong PDiskKey" status# CORRUPTED;VDISK LOCAL RECOVERY FAILURE DUE TO LOGICAL ERROR 2025-06-25T14:43:22.418692Z :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:427} PDiskId# 3 Can not be initialized! Format is incomplete. Magic sector is not present on disk. Maybe wrong PDiskKey Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/yft8/00141d/r3tmp/tmpzajJhs//vdisk_bad_2/pdisk.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 123 PDiskId# 3 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 1 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 3 2025-06-25T14:43:22.426934Z :BS_LOCALRECOVERY CRIT: localrecovery_public.cpp:103: PDiskId# 3 VDISK[0:_:0:2:0]: (0) LocalRecovery FINISHED: {RecoveryDuration# INPROGRESS RecoveredLogStartLsn# 0 SuccessfulRecovery# false EmptyLogoBlobsDb# true EmptyBlocksDb# true EmptyBarriersDb# true EmptySyncLog# true EmptySyncer# true EmptyHuge# true LogRecLogoBlob# 0 LogRecBlock# 0 LogRecGC# 0 LogRecSyncLogIdx# 0 LogRecLogoBlobsDB# 0 LogRecBlocksDB# 0 LogRecBarriersDB# 0 LogRecCutLog# 0 LogRecLocalSyncData# 0 LogRecSyncerState# 0 LogRecHandoffDel# 0 LogRecHugeBlobAllocChunk# 0 LogRecHugeBlobFreeChunk# 0 LogRecHugeBlobEntryPoint# 0 LogRecHugeLogoBlob# 0 LogRecLogoBlobOpt# 0 LogRecPhantomBlob# 0 LogRecAnubisOsirisPut# 0 LogRecAddBulkSst# 0 LogoBlobFreshApply# 0 LogoBlobFreshSkip# 0 LogoBlobsBatchFreshApply# 0 LogoBlobsBatchFreshSkip#0 LogoBlobSyncLogApply# 0 LogoBlobSyncLogSkip# 0 HugeLogoBlobFreshApply# 0 HugeLogoBlobFreshSkip# 0 HugeLogoBlobSyncLogApply# 0 HugeLogoBlobSyncLogSkip# 0 BlockFreshApply# 0 BlockFreshSkip# 0 BlocksBatchFreshApply# 0 BlocksBatchFreshSkip# 0 BlockSyncLogApply# 0 BlockSyncLogSkip# 0 BarrierFreshApply# 0 BarrierFreshSkip# 0 BarriersBatchFreshApply# 0 BarriersBatchFreshSkip# 0 BarrierSyncLogApply# 0 BarrierSyncLogSkip# 0 GCBarrierFreshApply# 0 GCBarrierFreshSkip# 0 GCLogoBlobFreshApply# 0 GCLogoBlobFreshSkip# 0 GCSyncLogApply# 0 GCSyncLogSkip# 0 TryPutLogoBlobSyncData# 0 TryPutBlockSyncData# 0 TryPutBarrierSyncData# 0 HandoffDelFreshApply# 0 HandoffDelFreshSkip# 0 HugeBlobAllocChunkApply# 0 HugeBlobAllocChunkSkip# 0 HugeBlobFreeChunkApply# 0 HugeBlobFreeChunkSkip# 0 HugeLogoBlobToHeapApply# 0 HugeLogoBlobToHeapSkip# 0 HugeSlotsDelGenericApply# 0 HugeSlotsDelGenericSkip# 0 TryPutLogoBlobPhantom# 0 RecoveryLogDiapason# [18446744073709551615 0] StartingPoints# {} ReadLogReplies# {}} reason# Yard::Init failed, errorReason# "PDisk is in StateError, reason# PDiskId# 3 Can not be initialized! Format is incomplete. Magic sector is not present on disk. Maybe wrong PDiskKey" status# CORRUPTED;VDISK LOCAL RECOVERY FAILURE DUE TO LOGICAL ERROR 2025-06-25T14:43:23.713522Z :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:427} PDiskId# 3 Can not be initialized! Format is incomplete. Magic sector is not present on disk. Maybe wrong PDiskKey Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/yft8/00141d/r3tmp/tmpzajJhs//vdisk_bad_2/pdisk.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 123 PDiskId# 3 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 1 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 3 2025-06-25T14:43:23.729397Z :BS_LOCALRECOVERY CRIT: localrecovery_public.cpp:103: PDiskId# 3 VDISK[0:_:0:2:0]: (0) LocalRecovery FINISHED: {RecoveryDuration# INPROGRESS RecoveredLogStartLsn# 0 SuccessfulRecovery# false EmptyLogoBlobsDb# true EmptyBlocksDb# true EmptyBarriersDb# true EmptySyncLog# true EmptySyncer# true EmptyHuge# true LogRecLogoBlob# 0 LogRecBlock# 0 LogRecGC# 0 LogRecSyncLogIdx# 0 LogRecLogoBlobsDB# 0 LogRecBlocksDB# 0 LogRecBarriersDB# 0 LogRecCutLog# 0 LogRecLocalSyncData# 0 LogRecSyncerState# 0 LogRecHandoffDel# 0 LogRecHugeBlobAllocChunk# 0 LogRecHugeBlobFreeChunk# 0 LogRecHugeBlobEntryPoint# 0 LogRecHugeLogoBlob# 0 LogRecLogoBlobOpt# 0 LogRecPhantomBlob# 0 LogRecAnubisOsirisPut# 0 LogRecAddBulkSst# 0 LogoBlobFreshApply# 0 LogoBlobFreshSkip# 0 LogoBlobsBatchFreshApply# 0 LogoBlobsBatchFreshSkip#0 LogoBlobSyncLogApply# 0 LogoBlobSyncLogSkip# 0 HugeLogoBlobFreshApply# 0 HugeLogoBlobFreshSkip# 0 HugeLogoBlobSyncLogApply# 0 HugeLogoBlobSyncLogSkip# 0 BlockFreshApply# 0 BlockFreshSkip# 0 BlocksBatchFreshApply# 0 BlocksBatchFreshSkip# 0 BlockSyncLogApply# 0 BlockSyncLogSkip# 0 BarrierFreshApply# 0 BarrierFreshSkip# 0 BarriersBatchFreshApply# 0 BarriersBatchFreshSkip# 0 BarrierSyncLogApply# 0 BarrierSyncLogSkip# 0 GCBarrierFreshApply# 0 GCBarrierFreshSkip# 0 GCLogoBlobFreshApply# 0 GCLogoBlobFreshSkip# 0 GCSyncLogApply# 0 GCSyncLogSkip# 0 TryPutLogoBlobSyncData# 0 TryPutBlockSyncData# 0 TryPutBarrierSyncData# 0 HandoffDelFreshApply# 0 HandoffDelFreshSkip# 0 HugeBlobAllocChunkApply# 0 HugeBlobAllocChunkSkip# 0 HugeBlobFreeChunkApply# 0 HugeBlobFreeChunkSkip# 0 HugeLogoBlobToHeapApply# 0 HugeLogoBlobToHeapSkip# 0 HugeSlotsDelGenericApply# 0 HugeSlotsDelGenericSkip# 0 TryPutLogoBlobPhantom# 0 RecoveryLogDiapason# [18446744073709551615 0] StartingPoints# {} ReadLogReplies# {}} reason# Yard::Init failed, errorReason# "PDisk is in StateError, reason# PDiskId# 3 Can not be initialized! Format is incomplete. Magic sector is not present on disk. Maybe wrong PDiskKey" status# CORRUPTED;VDISK LOCAL RECOVERY FAILURE DUE TO LOGICAL ERROR >> TBlobStorageProxyTest::TestVPutVGetLimit [GOOD] >> TxUsage::WriteToTopic_Demo_37_Table [GOOD] >> TxUsage::WriteToTopic_Demo_7_Query [GOOD] |84.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/dsproxy/ut_fat/unittest >> TBlobStorageProxyTest::TestVPutVGetLimit [GOOD] >> TBlobStorageProxyTest::TestCollectGarbage [GOOD] >> TxUsage::WriteToTopic_Demo_37_Query >> KqpBatchDelete::SimpleOnePartition [GOOD] |84.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/dsproxy/ut_fat/unittest >> TBlobStorageProxyTest::TestCollectGarbage [GOOD] >> BsControllerConfig::DeleteStoragePool [GOOD] >> TxUsage::WriteToTopic_Demo_27_Table [GOOD] >> TBlobStorageProxyTest::TestProxySimpleDiscoverMaxi [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/batch_operations/unittest >> KqpBatchDelete::SimpleOnePartition [GOOD] Test command err: Trying to start YDB, gRPC: 13128, MsgBus: 13054 2025-06-25T14:42:52.490647Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897486990786838:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:52.490779Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000bb3/r3tmp/tmpcEa7uK/pdisk_1.dat 2025-06-25T14:42:52.809836Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519897486990786818:2080] 1750862572489471 != 1750862572489474 2025-06-25T14:42:52.817056Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 13128, node 1 2025-06-25T14:42:52.890294Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:42:52.890476Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:42:52.890483Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:42:52.890494Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:42:52.890501Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:42:52.890657Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:42:52.891890Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:13054 TClient is connected to server localhost:13054 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:42:53.397705Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-25T14:42:53.422769Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:53.511534Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:42:53.577944Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:53.734842Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:53.794421Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:55.323663Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897499875690347:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:55.323803Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:55.620305Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:55.673329Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:55.701954Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:55.727487Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:55.753395Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:55.785445Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:55.815060Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:55.898556Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897499875691008:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:55.898638Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:55.898662Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897499875691013:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:55.902042Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:42:55.910939Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519897499875691015:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:42:56.006434Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519897504170658362:3421] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:42:57.490584Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519897486990786838:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:57.490643Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 1368, MsgBus: 23984 2025-06-25T14:43:00.828727Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519897521263222583:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:43:00.828794Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPat ... s.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519897559501982670:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:43:13.808105Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 21059, MsgBus: 11380 2025-06-25T14:43:17.737577Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519897595189503258:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:43:17.737614Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000bb3/r3tmp/tmpykxZkg/pdisk_1.dat 2025-06-25T14:43:17.843201Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7519897595189503236:2080] 1750862597737021 != 1750862597737024 2025-06-25T14:43:17.852699Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 21059, node 4 2025-06-25T14:43:17.876484Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:43:17.876559Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:43:17.877845Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:43:17.888834Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:43:17.888857Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:43:17.888864Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:43:17.888985Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11380 TClient is connected to server localhost:11380 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-25T14:43:18.308927Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:43:18.313983Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:43:18.319506Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:43:18.379026Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:43:18.548329Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:43:18.630335Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:43:18.766657Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:43:21.048268Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519897612369374055:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:43:21.048366Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:43:21.112442Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:43:21.148196Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:43:21.181987Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:43:21.218868Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:43:21.253450Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:43:21.291274Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:43:21.368117Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:43:21.431719Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519897612369374717:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:43:21.431815Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:43:21.431901Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519897612369374722:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:43:21.436779Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:43:21.448160Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519897612369374724:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:43:21.549142Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519897612369374775:3417] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:43:22.740420Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519897595189503258:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:43:22.740497Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> TxUsage::WriteToTopic_Demo_19_RestartNo_Table [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut_bscontroller/unittest >> BsControllerConfig::DeleteStoragePool [GOOD] Test command err: Leader for TabletID 72057594037932033 is [0:0:0] sender: [1:225:2066] recipient: [1:203:2077] IGNORE Leader for TabletID 72057594037932033 is [0:0:0] sender: [1:225:2066] recipient: [1:203:2077] Leader for TabletID 72057594037932033 is [1:233:2079] sender: [1:237:2066] recipient: [1:203:2077] 2025-06-25T14:43:05.605790Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828672 Event# NKikimr::TEvTablet::TEvBoot 2025-06-25T14:43:05.624541Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828673 Event# NKikimr::TEvTablet::TEvRestored 2025-06-25T14:43:05.629104Z node 1 :BS_CONTROLLER DEBUG: {BSC22@console_interaction.cpp:14} Console interaction started 2025-06-25T14:43:05.632178Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828684 Event# NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-25T14:43:05.634104Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268639244 Event# NKikimr::TEvNodeWardenStorageConfig 2025-06-25T14:43:05.634347Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 131082 Event# NActors::TEvInterconnect::TEvNodesInfo 2025-06-25T14:43:05.634389Z node 1 :BS_CONTROLLER DEBUG: {BSC01@bsc.cpp:531} Handle TEvInterconnect::TEvNodesInfo 2025-06-25T14:43:05.634691Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS01@init_scheme.cpp:17} TTxInitScheme Execute 2025-06-25T14:43:05.646282Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS03@init_scheme.cpp:44} TTxInitScheme Complete 2025-06-25T14:43:05.646433Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM01@migrate.cpp:190} Execute tx 2025-06-25T14:43:05.647972Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM02@migrate.cpp:251} Complete tx IncompatibleData# false 2025-06-25T14:43:05.648127Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-25T14:43:05.648221Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-25T14:43:05.648298Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion Leader for TabletID 72057594037932033 is [1:233:2079] sender: [1:257:2066] recipient: [1:20:2067] 2025-06-25T14:43:05.661568Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion 2025-06-25T14:43:05.661714Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-25T14:43:05.673486Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-25T14:43:05.673594Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-25T14:43:05.673681Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-25T14:43:05.673747Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-25T14:43:05.673825Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-25T14:43:05.673903Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-25T14:43:05.673940Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-25T14:43:05.673993Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-25T14:43:05.684721Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-25T14:43:05.684882Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-25T14:43:05.695471Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-25T14:43:05.695596Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE01@load_everything.cpp:21} TTxLoadEverything Execute 2025-06-25T14:43:05.700687Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE03@load_everything.cpp:587} TTxLoadEverything Complete 2025-06-25T14:43:05.700740Z node 1 :BS_CONTROLLER DEBUG: {BSC09@impl.h:2213} LoadFinished 2025-06-25T14:43:05.700918Z node 1 :BS_CONTROLLER DEBUG: {BSC18@console_interaction.cpp:31} Console connection service started 2025-06-25T14:43:05.700967Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE04@load_everything.cpp:592} TTxLoadEverything InitQueue processed 2025-06-25T14:43:05.713382Z node 1 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {} Leader for TabletID 72057594037932033 is [0:0:0] sender: [11:225:2066] recipient: [11:206:2077] IGNORE Leader for TabletID 72057594037932033 is [0:0:0] sender: [11:225:2066] recipient: [11:206:2077] Leader for TabletID 72057594037932033 is [11:236:2079] sender: [11:237:2066] recipient: [11:206:2077] 2025-06-25T14:43:07.261170Z node 11 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828672 Event# NKikimr::TEvTablet::TEvBoot 2025-06-25T14:43:07.262213Z node 11 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828673 Event# NKikimr::TEvTablet::TEvRestored 2025-06-25T14:43:07.262436Z node 11 :BS_CONTROLLER DEBUG: {BSC22@console_interaction.cpp:14} Console interaction started 2025-06-25T14:43:07.263474Z node 11 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828684 Event# NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-25T14:43:07.263912Z node 11 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268639244 Event# NKikimr::TEvNodeWardenStorageConfig 2025-06-25T14:43:07.264057Z node 11 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 131082 Event# NActors::TEvInterconnect::TEvNodesInfo 2025-06-25T14:43:07.264087Z node 11 :BS_CONTROLLER DEBUG: {BSC01@bsc.cpp:531} Handle TEvInterconnect::TEvNodesInfo 2025-06-25T14:43:07.264262Z node 11 :BS_CONTROLLER DEBUG: {BSCTXIS01@init_scheme.cpp:17} TTxInitScheme Execute 2025-06-25T14:43:07.272168Z node 11 :BS_CONTROLLER DEBUG: {BSCTXIS03@init_scheme.cpp:44} TTxInitScheme Complete 2025-06-25T14:43:07.272296Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM01@migrate.cpp:190} Execute tx 2025-06-25T14:43:07.272411Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM02@migrate.cpp:251} Complete tx IncompatibleData# false 2025-06-25T14:43:07.272518Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-25T14:43:07.272636Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-25T14:43:07.272723Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion Leader for TabletID 72057594037932033 is [11:236:2079] sender: [11:257:2066] recipient: [11:20:2067] 2025-06-25T14:43:07.283871Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion 2025-06-25T14:43:07.283993Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-25T14:43:07.294690Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-25T14:43:07.294822Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-25T14:43:07.294927Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-25T14:43:07.295005Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-25T14:43:07.295093Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-25T14:43:07.295135Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-25T14:43:07.295172Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-25T14:43:07.295222Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-25T14:43:07.305989Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-25T14:43:07.306123Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-25T14:43:07.316901Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-25T14:43:07.317035Z node 11 :BS_CONTROLLER DEBUG: {BSCTXLE01@load_everything.cpp:21} TTxLoadEverything Execute 2025-06-25T14:43:07.318203Z node 11 :BS_CONTROLLER DEBUG: {BSCTXLE03@load_everything.cpp:587} TTxLoadEverything Complete 2025-06-25T14:43:07.318245Z node 11 :BS_CONTROLLER DEBUG: {BSC09@impl.h:2213} LoadFinished 2025-06-25T14:43:07.318428Z node 11 :BS_CONTROLLER DEBUG: {BSC18@console_interaction.cpp:31} Console connection service started 2025-06-25T14:43:07.318477Z node 11 :BS_CONTROLLER DEBUG: {BSCTXLE04@load_everything.cpp:592} TTxLoadEverything InitQueue processed 2025-06-25T14:43:07.318982Z node 11 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {} Leader for TabletID 72057594037932033 is [0:0:0] sender: [21:3065:2106] recipient: [21:2964:2117] IGNORE Leader for TabletID 72057594037932033 is [0:0:0] sender: [21:3065:2106] recipient: [21:2964:2117] Leader for TabletID 72057594037932033 is [21:3112:2119] sender: [21:3115:2106] recipient: [21:2964:2117] 2025-06-25T14:43:09.858084Z node 21 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828672 Event# NKikimr::TEvTablet::TEvBoot 2025-06-25T14:43:09.858928Z node 21 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828673 Event# NKikimr::TEvTablet::TEvRestored 2025-06-25T14:43:09.859085Z n ... 1 Path# /dev/disk2 2025-06-25T14:43:19.490890Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 96:1002 Path# /dev/disk3 2025-06-25T14:43:19.490916Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 97:1000 Path# /dev/disk1 2025-06-25T14:43:19.490944Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 97:1001 Path# /dev/disk2 2025-06-25T14:43:19.490974Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 97:1002 Path# /dev/disk3 2025-06-25T14:43:19.491000Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 98:1000 Path# /dev/disk1 2025-06-25T14:43:19.491042Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 98:1001 Path# /dev/disk2 2025-06-25T14:43:19.491070Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 98:1002 Path# /dev/disk3 2025-06-25T14:43:19.491097Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 99:1000 Path# /dev/disk1 2025-06-25T14:43:19.491125Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 99:1001 Path# /dev/disk2 2025-06-25T14:43:19.491153Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 99:1002 Path# /dev/disk3 2025-06-25T14:43:19.491181Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 100:1000 Path# /dev/disk1 2025-06-25T14:43:19.491208Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 100:1001 Path# /dev/disk2 2025-06-25T14:43:19.491233Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 100:1002 Path# /dev/disk3 2025-06-25T14:43:19.491260Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 101:1000 Path# /dev/disk1 2025-06-25T14:43:19.491286Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 101:1001 Path# /dev/disk2 2025-06-25T14:43:19.491314Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 101:1002 Path# /dev/disk3 2025-06-25T14:43:19.491339Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 102:1000 Path# /dev/disk1 2025-06-25T14:43:19.491365Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 102:1001 Path# /dev/disk2 2025-06-25T14:43:19.491390Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 102:1002 Path# /dev/disk3 2025-06-25T14:43:19.491416Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 103:1000 Path# /dev/disk1 2025-06-25T14:43:19.491442Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 103:1001 Path# /dev/disk2 2025-06-25T14:43:19.491466Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 103:1002 Path# /dev/disk3 2025-06-25T14:43:19.491492Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 104:1000 Path# /dev/disk1 2025-06-25T14:43:19.491520Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 104:1001 Path# /dev/disk2 2025-06-25T14:43:19.491545Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 104:1002 Path# /dev/disk3 2025-06-25T14:43:19.491572Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 105:1000 Path# /dev/disk1 2025-06-25T14:43:19.491598Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 105:1001 Path# /dev/disk2 2025-06-25T14:43:19.491625Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 105:1002 Path# /dev/disk3 2025-06-25T14:43:19.491649Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 106:1000 Path# /dev/disk1 2025-06-25T14:43:19.491674Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 106:1001 Path# /dev/disk2 2025-06-25T14:43:19.491702Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 106:1002 Path# /dev/disk3 2025-06-25T14:43:19.491728Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 107:1000 Path# /dev/disk1 2025-06-25T14:43:19.491754Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 107:1001 Path# /dev/disk2 2025-06-25T14:43:19.491791Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 107:1002 Path# /dev/disk3 2025-06-25T14:43:19.491818Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 108:1000 Path# /dev/disk1 2025-06-25T14:43:19.491849Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 108:1001 Path# /dev/disk2 2025-06-25T14:43:19.491875Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 108:1002 Path# /dev/disk3 2025-06-25T14:43:19.491900Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 109:1000 Path# /dev/disk1 2025-06-25T14:43:19.491926Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 109:1001 Path# /dev/disk2 2025-06-25T14:43:19.491952Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 109:1002 Path# /dev/disk3 2025-06-25T14:43:19.491975Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 110:1000 Path# /dev/disk1 2025-06-25T14:43:19.491999Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 110:1001 Path# /dev/disk2 2025-06-25T14:43:19.492025Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 110:1002 Path# /dev/disk3 2025-06-25T14:43:19.492052Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 111:1000 Path# /dev/disk1 2025-06-25T14:43:19.492076Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 111:1001 Path# /dev/disk2 2025-06-25T14:43:19.492102Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 111:1002 Path# /dev/disk3 2025-06-25T14:43:19.492129Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 112:1000 Path# /dev/disk1 2025-06-25T14:43:19.492155Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 112:1001 Path# /dev/disk2 2025-06-25T14:43:19.492181Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 112:1002 Path# /dev/disk3 2025-06-25T14:43:19.492206Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 113:1000 Path# /dev/disk1 2025-06-25T14:43:19.492230Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 113:1001 Path# /dev/disk2 2025-06-25T14:43:19.492257Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 113:1002 Path# /dev/disk3 2025-06-25T14:43:19.492281Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 114:1000 Path# /dev/disk1 2025-06-25T14:43:19.492328Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 114:1001 Path# /dev/disk2 2025-06-25T14:43:19.492358Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 114:1002 Path# /dev/disk3 2025-06-25T14:43:19.492385Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 115:1000 Path# /dev/disk1 2025-06-25T14:43:19.492411Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 115:1001 Path# /dev/disk2 2025-06-25T14:43:19.492438Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 115:1002 Path# /dev/disk3 2025-06-25T14:43:19.492465Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 116:1000 Path# /dev/disk1 2025-06-25T14:43:19.492491Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 116:1001 Path# /dev/disk2 2025-06-25T14:43:19.492518Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 116:1002 Path# /dev/disk3 2025-06-25T14:43:19.492544Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 117:1000 Path# /dev/disk1 2025-06-25T14:43:19.492595Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 117:1001 Path# /dev/disk2 2025-06-25T14:43:19.492634Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 117:1002 Path# /dev/disk3 2025-06-25T14:43:19.492660Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 118:1000 Path# /dev/disk1 2025-06-25T14:43:19.492687Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 118:1001 Path# /dev/disk2 2025-06-25T14:43:19.492713Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 118:1002 Path# /dev/disk3 2025-06-25T14:43:19.492741Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 119:1000 Path# /dev/disk1 2025-06-25T14:43:19.492768Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 119:1001 Path# /dev/disk2 2025-06-25T14:43:19.492794Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 119:1002 Path# /dev/disk3 2025-06-25T14:43:19.492819Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 120:1000 Path# /dev/disk1 2025-06-25T14:43:19.492847Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 120:1001 Path# /dev/disk2 2025-06-25T14:43:19.492874Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 120:1002 Path# /dev/disk3 2025-06-25T14:43:19.514083Z node 71 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { DefineStoragePool { BoxId: 1 StoragePoolId: 1 Name: "storage pool 1" ErasureSpecies: "block-4-2" VDiskKind: "Default" NumGroups: 50 PDiskFilter { Property { Type: ROT } } } } } 2025-06-25T14:43:19.609042Z node 71 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 71 Type# 268639257 2025-06-25T14:43:19.616811Z node 71 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { DefineStoragePool { BoxId: 1 StoragePoolId: 2 Name: "storage pool 2" ErasureSpecies: "block-4-2" VDiskKind: "Default" NumGroups: 50 PDiskFilter { Property { Type: SSD } } } } Command { DeleteStoragePool { BoxId: 1 StoragePoolId: 2 ItemConfigGeneration: 1 } } } 2025-06-25T14:43:19.688862Z node 71 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { DeleteStoragePool { BoxId: 1 StoragePoolId: 1 ItemConfigGeneration: 1 } } Command { QueryBaseConfig { } } } 2025-06-25T14:43:19.721695Z node 71 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 71 Type# 268639257 >> TxUsage::WriteToTopic_Demo_8_Table |84.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/dsproxy/ut_fat/unittest >> TBlobStorageProxyTest::TestProxySimpleDiscoverMaxi [GOOD] >> TxUsage::WriteToTopic_Demo_27_Query >> TxUsage::WriteToTopic_Demo_19_RestartNo_Query >> TxUsage::Sinks_Oltp_WriteToTopics_1_Query [GOOD] >> TxUsage::Sinks_Oltp_WriteToTopics_2_Table >> TBlobStorageProxyTest::TestQuadrupleGroups [GOOD] >> TBlobStorageProxyTest::TestSingleFailure >> TExtSubDomainTest::GenericCases >> TExtSubDomainTest::DeclareAndDefineWithNodes-AlterDatabaseCreateHiveFirst-false >> TExtSubDomainTest::CreateTableInsideAndAlterDomainAndTable-AlterDatabaseCreateHiveFirst-false >> TExtSubDomainTest::DeclareAndDefineWithoutNodes-AlterDatabaseCreateHiveFirst-false >> TExtSubDomainTest::DeclareAndLs >> TBlobStorageProxyTest::TestProxyRestoreOnGetStripe [GOOD] >> TBlobStorageProxyTest::TestProxyRestoreOnGetMirror3Plus2 >> TxUsage::WriteToTopic_Demo_12_Query [GOOD] >> KqpBatchUpdate::Large_3 [GOOD] >> TxUsage::WriteToTopic_Demo_13_Table >> TBlobStorageProxyTest::TestProxyRestoreOnGetMirror [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/batch_operations/unittest >> KqpBatchUpdate::Large_3 [GOOD] Test command err: Trying to start YDB, gRPC: 20771, MsgBus: 21088 2025-06-25T14:42:40.733994Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897438267218179:2228];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:40.734053Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000c10/r3tmp/tmpjrpqBs/pdisk_1.dat 2025-06-25T14:42:41.545437Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:42:41.548027Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:42:41.548131Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:42:41.585088Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:42:41.730822Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TServer::EnableGrpc on GrpcPort 20771, node 1 2025-06-25T14:42:42.722144Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:42:42.722167Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:42:42.722174Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:42:42.722406Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:21088 TClient is connected to server localhost:21088 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:42:44.993920Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:42:45.123529Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:45.407403Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:45.519407Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:45.598639Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:45.748400Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519897438267218179:2228];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:45.752595Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:42:45.828579Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897459742056108:2371], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:45.828680Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:47.941127Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:47.965773Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:47.987861Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.010051Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.034173Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.071282Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.099118Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.160058Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897472626958666:2440], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:48.160129Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:48.160224Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897472626958671:2443], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:48.163349Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:42:48.172135Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519897472626958673:2444], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:42:48.260270Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519897472626958724:3438] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:42:49.089299Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:56.516838Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7382: Cannot get console configs 2025-06-25T14:42:56.516874Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded Trying to start YDB, gRPC: 16487, MsgBus: 28385 2025-06-25T14:43:07.199113 ... e=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000c10/r3tmp/tmpiNWmJS/pdisk_1.dat 2025-06-25T14:43:07.330281Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 16487, node 2 2025-06-25T14:43:07.331362Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:43:07.331429Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:43:07.335548Z node 2 :GRPC_SERVER WARN: grpc_request_proxy.cpp:529: SchemeBoardDelete /Root Strong=0 2025-06-25T14:43:07.335740Z node 2 :GRPC_SERVER WARN: grpc_request_proxy.cpp:529: SchemeBoardDelete /Root Strong=0 2025-06-25T14:43:07.335855Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:43:07.362202Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:43:07.362226Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:43:07.362233Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:43:07.362371Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28385 TClient is connected to server localhost:28385 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:43:07.842324Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:43:07.851047Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:43:07.865825Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:43:07.945238Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:43:08.103960Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:43:08.181359Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:43:08.303316Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:43:10.529978Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519897566183101425:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:43:10.530061Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:43:10.602860Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:43:10.636480Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:43:10.680360Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:43:10.710903Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:43:10.742772Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:43:10.776538Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:43:10.818834Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:43:10.904362Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519897566183102083:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:43:10.904467Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:43:10.904549Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519897566183102088:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:43:10.908416Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:43:10.918893Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519897566183102090:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:43:11.004663Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519897570478069437:3419] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:43:12.066602Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:43:12.199745Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519897553298197919:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:43:12.199824Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:43:22.329664Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7382: Cannot get console configs 2025-06-25T14:43:22.329686Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded |84.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/dsproxy/ut_fat/unittest >> TBlobStorageProxyTest::TestProxyRestoreOnGetMirror [GOOD] >> TExtSubDomainTest::DeclareAndLs [GOOD] |84.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/service/ut_topic_reader/unittest |84.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/service/ut_topic_reader/unittest >> TBlobStorageProxyTest::TestSingleFailure [GOOD] >> TxUsage::WriteToTopic_Demo_22_RestartBeforeCommit_Query [GOOD] >> TExtSubDomainTest::DeclareAndDefineWithoutNodes-AlterDatabaseCreateHiveFirst-false [GOOD] >> LocalPartition::WithoutPartitionUnknownEndpoint [GOOD] >> LocalPartition::WithoutPartitionPartitionRelocation >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_5_Table [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_ext_tenant/unittest >> TExtSubDomainTest::DeclareAndLs [GOOD] Test command err: 2025-06-25T14:43:31.896173Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897657759879080:2147];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:43:31.896416Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000a25/r3tmp/tmpiEY8p0/pdisk_1.dat 2025-06-25T14:43:32.353518Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:43:32.396228Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:43:32.396486Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:43:32.398861Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:21933 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-25T14:43:32.604472Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7519897657759879202:2105] Handle TEvNavigate describe path dc-1 2025-06-25T14:43:32.641005Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7519897662054846776:2258] HANDLE EvNavigateScheme dc-1 2025-06-25T14:43:32.642781Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7519897662054846526:2119], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:43:32.642868Z node 1 :TX_PROXY_SCHEME_CACHE TRACE: cache.cpp:2321: Create subscriber: self# [1:7519897662054846526:2119], path# /dc-1, domainOwnerId# 72057594046644480 2025-06-25T14:43:32.643141Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1003: [main][1:7519897662054846777:2259][/dc-1] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-25T14:43:32.645078Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519897657759878936:2049] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7519897662054846781:2259] 2025-06-25T14:43:32.645136Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519897657759878936:2049] Subscribe: subscriber# [1:7519897662054846781:2259], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-25T14:43:32.645173Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519897657759878939:2052] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7519897662054846782:2259] 2025-06-25T14:43:32.645180Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519897657759878942:2055] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7519897662054846783:2259] 2025-06-25T14:43:32.645195Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519897657759878942:2055] Subscribe: subscriber# [1:7519897662054846783:2259], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-25T14:43:32.645207Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519897657759878939:2052] Subscribe: subscriber# [1:7519897662054846782:2259], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-25T14:43:32.645256Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519897662054846781:2259][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519897657759878936:2049] 2025-06-25T14:43:32.645285Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519897662054846783:2259][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519897657759878942:2055] 2025-06-25T14:43:32.645300Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519897662054846782:2259][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519897657759878939:2052] 2025-06-25T14:43:32.645308Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519897657759878936:2049] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7519897662054846781:2259] 2025-06-25T14:43:32.645352Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519897657759878942:2055] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7519897662054846783:2259] 2025-06-25T14:43:32.645356Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519897662054846777:2259][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519897662054846778:2259] 2025-06-25T14:43:32.645368Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519897657759878939:2052] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7519897662054846782:2259] 2025-06-25T14:43:32.645402Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519897662054846777:2259][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519897662054846780:2259] 2025-06-25T14:43:32.645447Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:852: [main][1:7519897662054846777:2259][/dc-1] Set up state: owner# [1:7519897662054846526:2119], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-25T14:43:32.646593Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519897662054846777:2259][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519897662054846779:2259] 2025-06-25T14:43:32.646658Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:870: [main][1:7519897662054846777:2259][/dc-1] Path was already updated: owner# [1:7519897662054846526:2119], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-25T14:43:32.646703Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519897662054846781:2259][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519897662054846778:2259], cookie# 1 2025-06-25T14:43:32.646718Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519897662054846782:2259][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519897662054846779:2259], cookie# 1 2025-06-25T14:43:32.646799Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519897662054846783:2259][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519897662054846780:2259], cookie# 1 2025-06-25T14:43:32.646977Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519897657759878936:2049] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519897662054846781:2259], cookie# 1 2025-06-25T14:43:32.647015Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519897657759878939:2052] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519897662054846782:2259], cookie# 1 2025-06-25T14:43:32.647029Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519897657759878942:2055] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519897662054846783:2259], cookie# 1 2025-06-25T14:43:32.647053Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519897662054846781:2259][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519897657759878936:2049], cookie# 1 2025-06-25T14:43:32.647065Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519897662054846782:2259][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519897657759878939:2052], cookie# 1 2025-06-25T14:43:32.647088Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519897662054846783:2259][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519897657759878942:2055], cookie# 1 2025-06-25T14:43:32.647122Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519897662054846777:2259][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519897662054846778:2259], cookie# 1 2025-06-25T14:43:32.647142Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519897662054846777:2259][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2025-06-25T14:43:32.647155Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519897662054846777:2259][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519897662054846779:2259], cookie# 1 2025-06-25T14:43:32.647168Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519897662054846777:2259][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 2, failures# 0 2025-06-25T14:43:32.647179Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519897662054846777:2259][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519897662054846780:2259], cookie# 1 2025-06-25T14:43:32.647196Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:984: [main][1:7519897662054846777:2259][/dc-1] Sync is done in the ring group: cookie# 1, ring group# 0, size# 3, half# 1, successes# 3, failures# 0, partial# 0 2025-06-25T14:43:32.686802Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [1:7519897662054846526:2119], notify# NKikimr::TSchemeBoardEvents::TEvNotifyUpdate { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DescribeSchemeResult: Status: StatusSuccess Path: "/dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 Data ... : [OwnerId: 72057594046644480, LocalPathId: 2] DomainId: [OwnerId: 72057594046644480, LocalPathId: 2] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_0 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 1 IsSync: true Partial: 0 } 2025-06-25T14:43:32.938881Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [1:7519897662054846846:2301], recipient# [1:7519897662054846838:2299], result# { ErrorCount: 1 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0 TableId: [72057594046644480:2:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: RedirectLookupError Kind: KindExtSubdomain DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 2] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 2] Params { Version: 1 PlanResolution: 0 TimeCastBucketsPerMediator: 0 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-25T14:43:32.938920Z node 1 :TX_PROXY INFO: describe.cpp:356: Actor# [1:7519897662054846838:2299] HANDLE EvNavigateKeySetResult TDescribeReq marker# P5 ErrorCount# 1 TClient::Ls response: Status: 128 StatusCode: ERROR Issues { message: "Default error" severity: 1 } SchemeStatus: 13 ErrorReason: "Could not resolve redirected path" TClient::Ls request: /dc-1 2025-06-25T14:43:32.948914Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7519897657759879202:2105] Handle TEvNavigate describe path /dc-1 2025-06-25T14:43:32.964046Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7519897662054846848:2303] HANDLE EvNavigateScheme /dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1750862612919 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeExtSubDomain CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1750862612954 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" } Children { Name: ".sys" PathId: 18446744073709551615 ... (TRUNCATED) 2025-06-25T14:43:32.964166Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7519897662054846526:2119], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:43:32.964265Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:887: [main][1:7519897662054846777:2259][/dc-1] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvSyncRequest: sender# [1:7519897662054846526:2119], cookie# 4 2025-06-25T14:43:32.964343Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519897662054846781:2259][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519897662054846778:2259], cookie# 4 2025-06-25T14:43:32.964361Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519897662054846782:2259][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519897662054846779:2259], cookie# 4 2025-06-25T14:43:32.964374Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519897662054846783:2259][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519897662054846780:2259], cookie# 4 2025-06-25T14:43:32.964392Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519897657759878939:2052] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519897662054846782:2259], cookie# 4 2025-06-25T14:43:32.964406Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519897657759878936:2049] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519897662054846781:2259], cookie# 4 2025-06-25T14:43:32.964412Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519897657759878942:2055] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519897662054846783:2259], cookie# 4 2025-06-25T14:43:32.964464Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519897662054846782:2259][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 5 Partial: 0 }: sender# [1:7519897657759878939:2052], cookie# 4 2025-06-25T14:43:32.964480Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519897662054846781:2259][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 5 Partial: 0 }: sender# [1:7519897657759878936:2049], cookie# 4 2025-06-25T14:43:32.964495Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519897662054846783:2259][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 5 Partial: 0 }: sender# [1:7519897657759878942:2055], cookie# 4 2025-06-25T14:43:32.964516Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519897662054846777:2259][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 5 Partial: 0 }: sender# [1:7519897662054846779:2259], cookie# 4 2025-06-25T14:43:32.964532Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519897662054846777:2259][/dc-1] Sync is in progress: cookie# 4, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2025-06-25T14:43:32.964544Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519897662054846777:2259][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 5 Partial: 0 }: sender# [1:7519897662054846778:2259], cookie# 4 2025-06-25T14:43:32.964554Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519897662054846777:2259][/dc-1] Sync is in progress: cookie# 4, ring group# 0, size# 3, half# 1, successes# 2, failures# 0 2025-06-25T14:43:32.964565Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519897662054846777:2259][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 5 Partial: 0 }: sender# [1:7519897662054846780:2259], cookie# 4 2025-06-25T14:43:32.964581Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:984: [main][1:7519897662054846777:2259][/dc-1] Sync is done in the ring group: cookie# 4, ring group# 0, size# 3, half# 1, successes# 3, failures# 0, partial# 0 2025-06-25T14:43:32.964624Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [1:7519897662054846526:2119], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1 PathId: Partial: 0 } 2025-06-25T14:43:32.964670Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [1:7519897662054846526:2119], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1 PathId: Partial: 0 }, by path# { Subscriber: { Subscriber: [1:7519897662054846777:2259] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 4 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 1750862612919 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-25T14:43:32.964729Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [1:7519897662054846526:2119], cacheItem# { Subscriber: { Subscriber: [1:7519897662054846777:2259] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 4 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 1750862612919 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 4 IsSync: true Partial: 0 } 2025-06-25T14:43:32.964882Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [1:7519897662054846849:2304], recipient# [1:7519897662054846848:2303], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [72057594046644480:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-25T14:43:32.964914Z node 1 :TX_PROXY DEBUG: describe.cpp:356: Actor# [1:7519897662054846848:2303] HANDLE EvNavigateKeySetResult TDescribeReq marker# P5 ErrorCount# 0 2025-06-25T14:43:32.964961Z node 1 :TX_PROXY DEBUG: describe.cpp:435: Actor# [1:7519897662054846848:2303] SEND to# 72057594046644480 shardToRequest NKikimrSchemeOp.TDescribePath Path: "/dc-1" Options { ShowPrivateTable: true } 2025-06-25T14:43:32.965427Z node 1 :TX_PROXY DEBUG: describe.cpp:448: Actor# [1:7519897662054846848:2303] Handle TEvDescribeSchemeResult Forward to# [1:7519897662054846847:2302] Cookie: 0 TEvDescribeSchemeResult: NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 63 Record# Status: StatusSuccess Path: "/dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1750862612919 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } StoragePools { Name: "/dc-1:test" Kind: "test" } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/dc-1" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046644480 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/dsproxy/ut_fat/unittest >> TBlobStorageProxyTest::TestSingleFailure [GOOD] Test command err: 2025-06-25T14:43:32.238547Z :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:427} PDiskId# 1 Can not be initialized! Format is incomplete. Magic sector is not present on disk. Maybe wrong PDiskKey Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/yft8/0013b7/r3tmp/tmp24JAzc//vdisk_bad_0/pdisk.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 123 PDiskId# 1 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 1 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 1 2025-06-25T14:43:32.291063Z :BS_LOCALRECOVERY CRIT: localrecovery_public.cpp:103: PDiskId# 1 VDISK[0:_:0:0:0]: (0) LocalRecovery FINISHED: {RecoveryDuration# INPROGRESS RecoveredLogStartLsn# 0 SuccessfulRecovery# false EmptyLogoBlobsDb# true EmptyBlocksDb# true EmptyBarriersDb# true EmptySyncLog# true EmptySyncer# true EmptyHuge# true LogRecLogoBlob# 0 LogRecBlock# 0 LogRecGC# 0 LogRecSyncLogIdx# 0 LogRecLogoBlobsDB# 0 LogRecBlocksDB# 0 LogRecBarriersDB# 0 LogRecCutLog# 0 LogRecLocalSyncData# 0 LogRecSyncerState# 0 LogRecHandoffDel# 0 LogRecHugeBlobAllocChunk# 0 LogRecHugeBlobFreeChunk# 0 LogRecHugeBlobEntryPoint# 0 LogRecHugeLogoBlob# 0 LogRecLogoBlobOpt# 0 LogRecPhantomBlob# 0 LogRecAnubisOsirisPut# 0 LogRecAddBulkSst# 0 LogoBlobFreshApply# 0 LogoBlobFreshSkip# 0 LogoBlobsBatchFreshApply# 0 LogoBlobsBatchFreshSkip#0 LogoBlobSyncLogApply# 0 LogoBlobSyncLogSkip# 0 HugeLogoBlobFreshApply# 0 HugeLogoBlobFreshSkip# 0 HugeLogoBlobSyncLogApply# 0 HugeLogoBlobSyncLogSkip# 0 BlockFreshApply# 0 BlockFreshSkip# 0 BlocksBatchFreshApply# 0 BlocksBatchFreshSkip# 0 BlockSyncLogApply# 0 BlockSyncLogSkip# 0 BarrierFreshApply# 0 BarrierFreshSkip# 0 BarriersBatchFreshApply# 0 BarriersBatchFreshSkip# 0 BarrierSyncLogApply# 0 BarrierSyncLogSkip# 0 GCBarrierFreshApply# 0 GCBarrierFreshSkip# 0 GCLogoBlobFreshApply# 0 GCLogoBlobFreshSkip# 0 GCSyncLogApply# 0 GCSyncLogSkip# 0 TryPutLogoBlobSyncData# 0 TryPutBlockSyncData# 0 TryPutBarrierSyncData# 0 HandoffDelFreshApply# 0 HandoffDelFreshSkip# 0 HugeBlobAllocChunkApply# 0 HugeBlobAllocChunkSkip# 0 HugeBlobFreeChunkApply# 0 HugeBlobFreeChunkSkip# 0 HugeLogoBlobToHeapApply# 0 HugeLogoBlobToHeapSkip# 0 HugeSlotsDelGenericApply# 0 HugeSlotsDelGenericSkip# 0 TryPutLogoBlobPhantom# 0 RecoveryLogDiapason# [18446744073709551615 0] StartingPoints# {} ReadLogReplies# {}} reason# Yard::Init failed, errorReason# "PDisk is in StateError, reason# PDiskId# 1 Can not be initialized! Format is incomplete. Magic sector is not present on disk. Maybe wrong PDiskKey" status# CORRUPTED;VDISK LOCAL RECOVERY FAILURE DUE TO LOGICAL ERROR |84.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/service/ut_topic_reader/unittest |84.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/service/ut_topic_reader/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_ext_tenant/unittest >> TExtSubDomainTest::DeclareAndDefineWithoutNodes-AlterDatabaseCreateHiveFirst-false [GOOD] Test command err: 2025-06-25T14:43:31.967401Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897655740037390:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:43:31.967487Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000a36/r3tmp/tmpdngaTN/pdisk_1.dat 2025-06-25T14:43:32.456345Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:43:32.456438Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:43:32.470437Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:43:32.477991Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:43:32.479002Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519897655740037371:2080] 1750862611966582 != 1750862611966585 TClient is connected to server localhost:14953 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-25T14:43:32.779500Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7519897655740037584:2101] Handle TEvNavigate describe path dc-1 2025-06-25T14:43:32.800519Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7519897660035005379:2427] HANDLE EvNavigateScheme dc-1 2025-06-25T14:43:32.800677Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7519897660035004932:2131], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:43:32.800717Z node 1 :TX_PROXY_SCHEME_CACHE TRACE: cache.cpp:2321: Create subscriber: self# [1:7519897660035004932:2131], path# /dc-1, domainOwnerId# 72057594046644480 2025-06-25T14:43:32.800904Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1003: [main][1:7519897660035005380:2428][/dc-1] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-25T14:43:32.802477Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519897655740037340:2049] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7519897660035005384:2428] 2025-06-25T14:43:32.802535Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519897655740037340:2049] Subscribe: subscriber# [1:7519897660035005384:2428], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-25T14:43:32.802595Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519897655740037343:2052] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7519897660035005385:2428] 2025-06-25T14:43:32.802617Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519897655740037343:2052] Subscribe: subscriber# [1:7519897660035005385:2428], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-25T14:43:32.802635Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519897655740037346:2055] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7519897660035005386:2428] 2025-06-25T14:43:32.802647Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519897655740037346:2055] Subscribe: subscriber# [1:7519897660035005386:2428], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-25T14:43:32.802683Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519897660035005384:2428][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519897655740037340:2049] 2025-06-25T14:43:32.802716Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519897660035005385:2428][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519897655740037343:2052] 2025-06-25T14:43:32.802741Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519897660035005386:2428][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519897655740037346:2055] 2025-06-25T14:43:32.802792Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519897660035005380:2428][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519897660035005381:2428] 2025-06-25T14:43:32.802844Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519897660035005380:2428][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519897660035005382:2428] 2025-06-25T14:43:32.802902Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:852: [main][1:7519897660035005380:2428][/dc-1] Set up state: owner# [1:7519897660035004932:2131], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-25T14:43:32.803026Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519897660035005380:2428][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519897660035005383:2428] 2025-06-25T14:43:32.803070Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:870: [main][1:7519897660035005380:2428][/dc-1] Path was already updated: owner# [1:7519897660035004932:2131], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-25T14:43:32.803098Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519897660035005384:2428][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519897660035005381:2428], cookie# 1 2025-06-25T14:43:32.803121Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519897660035005385:2428][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519897660035005382:2428], cookie# 1 2025-06-25T14:43:32.803134Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519897660035005386:2428][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519897660035005383:2428], cookie# 1 2025-06-25T14:43:32.803156Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519897655740037340:2049] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7519897660035005384:2428] 2025-06-25T14:43:32.803192Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519897655740037340:2049] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519897660035005384:2428], cookie# 1 2025-06-25T14:43:32.803212Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519897655740037343:2052] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7519897660035005385:2428] 2025-06-25T14:43:32.803223Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519897655740037343:2052] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519897660035005385:2428], cookie# 1 2025-06-25T14:43:32.803236Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519897655740037346:2055] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7519897660035005386:2428] 2025-06-25T14:43:32.803246Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519897655740037346:2055] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519897660035005386:2428], cookie# 1 2025-06-25T14:43:32.804375Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519897660035005384:2428][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519897655740037340:2049], cookie# 1 2025-06-25T14:43:32.804402Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519897660035005385:2428][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519897655740037343:2052], cookie# 1 2025-06-25T14:43:32.804419Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519897660035005386:2428][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519897655740037346:2055], cookie# 1 2025-06-25T14:43:32.804468Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519897660035005380:2428][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519897660035005381:2428], cookie# 1 2025-06-25T14:43:32.804496Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519897660035005380:2428][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2025-06-25T14:43:32.804510Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519897660035005380:2428][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519897660035005382:2428], cookie# 1 2025-06-25T14:43:32.804521Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519897660035005380:2428][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 2, failures# 0 2025-06-25T14:43:32.804531Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519897660035005380:2428][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519897660035005383:2428], cookie# 1 2025-06-25T14:43:32.804552Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:984: [main][1:7519897660035005380:2428][/dc-1] Sync is done in the ring group: cookie# 1, ring group# 0, size# 3, half# 1, successes# 3, failures# 0, partial# 0 2025-06-25T14:43:32.857755Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [1:7519897660035004932:2131], notify# NKikimr::TSchemeBoardEvents::TEvNotifyUpdate { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DescribeSchemeResult: Status: StatusSuccess Path: "/dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 Shards ... ath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_0 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 2 IsSync: true Partial: 0 } 2025-06-25T14:43:33.650220Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [1:7519897664329973009:2655], recipient# [1:7519897664329973008:2654], result# { ErrorCount: 1 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0 TableId: [72057594046644480:2:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: RedirectLookupError Kind: KindExtSubdomain DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 2] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 2] Params { Version: 1 PlanResolution: 0 TimeCastBucketsPerMediator: 0 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-25T14:43:33.650248Z node 1 :TX_PROXY INFO: describe.cpp:356: Actor# [1:7519897664329973008:2654] HANDLE EvNavigateKeySetResult TDescribeReq marker# P5 ErrorCount# 1 TClient::Ls response: Status: 128 StatusCode: ERROR Issues { message: "Default error" severity: 1 } SchemeStatus: 13 ErrorReason: "Could not resolve redirected path" TClient::Ls request: /dc-1 2025-06-25T14:43:33.657198Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7519897655740037584:2101] Handle TEvNavigate describe path /dc-1 2025-06-25T14:43:33.673836Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7519897664329973011:2657] HANDLE EvNavigateScheme /dc-1 2025-06-25T14:43:33.673967Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7519897660035004932:2131], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:43:33.674053Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:887: [main][1:7519897660035005380:2428][/dc-1] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvSyncRequest: sender# [1:7519897660035004932:2131], cookie# 4 2025-06-25T14:43:33.674099Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519897660035005384:2428][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519897660035005381:2428], cookie# 4 2025-06-25T14:43:33.674131Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519897660035005385:2428][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519897660035005382:2428], cookie# 4 2025-06-25T14:43:33.674144Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519897660035005386:2428][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519897660035005383:2428], cookie# 4 2025-06-25T14:43:33.674163Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519897655740037340:2049] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519897660035005384:2428], cookie# 4 2025-06-25T14:43:33.674186Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519897655740037343:2052] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519897660035005385:2428], cookie# 4 2025-06-25T14:43:33.674209Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519897655740037346:2055] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519897660035005386:2428], cookie# 4 2025-06-25T14:43:33.674243Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519897660035005384:2428][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 5 Partial: 0 }: sender# [1:7519897655740037340:2049], cookie# 4 2025-06-25T14:43:33.674254Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519897660035005385:2428][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 5 Partial: 0 }: sender# [1:7519897655740037343:2052], cookie# 4 2025-06-25T14:43:33.674264Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519897660035005386:2428][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 5 Partial: 0 }: sender# [1:7519897655740037346:2055], cookie# 4 2025-06-25T14:43:33.674283Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519897660035005380:2428][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 5 Partial: 0 }: sender# [1:7519897660035005381:2428], cookie# 4 2025-06-25T14:43:33.674303Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519897660035005380:2428][/dc-1] Sync is in progress: cookie# 4, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2025-06-25T14:43:33.674318Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519897660035005380:2428][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 5 Partial: 0 }: sender# [1:7519897660035005382:2428], cookie# 4 2025-06-25T14:43:33.674328Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519897660035005380:2428][/dc-1] Sync is in progress: cookie# 4, ring group# 0, size# 3, half# 1, successes# 2, failures# 0 2025-06-25T14:43:33.674339Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519897660035005380:2428][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 5 Partial: 0 }: sender# [1:7519897660035005383:2428], cookie# 4 2025-06-25T14:43:33.674353Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:984: [main][1:7519897660035005380:2428][/dc-1] Sync is done in the ring group: cookie# 4, ring group# 0, size# 3, half# 1, successes# 3, failures# 0, partial# 0 2025-06-25T14:43:33.674578Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [1:7519897660035004932:2131], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1 PathId: Partial: 0 } 2025-06-25T14:43:33.674632Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [1:7519897660035004932:2131], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1 PathId: Partial: 0 }, by path# { Subscriber: { Subscriber: [1:7519897660035005380:2428] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 4 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 1750862613087 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-25T14:43:33.674682Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [1:7519897660035004932:2131], cacheItem# { Subscriber: { Subscriber: [1:7519897660035005380:2428] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 4 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 1750862613087 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 4 IsSync: true Partial: 0 } 2025-06-25T14:43:33.674799Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [1:7519897664329973012:2658], recipient# [1:7519897664329973011:2657], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [72057594046644480:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-25T14:43:33.674847Z node 1 :TX_PROXY DEBUG: describe.cpp:356: Actor# [1:7519897664329973011:2657] HANDLE EvNavigateKeySetResult TDescribeReq marker# P5 ErrorCount# 0 2025-06-25T14:43:33.674906Z node 1 :TX_PROXY DEBUG: describe.cpp:435: Actor# [1:7519897664329973011:2657] SEND to# 72057594046644480 shardToRequest NKikimrSchemeOp.TDescribePath Path: "/dc-1" Options { ShowPrivateTable: true } 2025-06-25T14:43:33.675509Z node 1 :TX_PROXY DEBUG: describe.cpp:448: Actor# [1:7519897664329973011:2657] Handle TEvDescribeSchemeResult Forward to# [1:7519897664329973010:2656] Cookie: 0 TEvDescribeSchemeResult: NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 63 Record# Status: StatusSuccess Path: "/dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1750862613087 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } StoragePools { Name: "" Kind: "storage-pool-number-1" } StoragePools { Name: "" Kind: "storage-pool-number-2" } StoragePools { Name: "/dc-1:test" Kind: "test" } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/dc-1" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046644480 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1750862613087 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeExtSubDomain CreateFinished: true CreateTxId: 281474976710658 CreateStep: 1750862613115 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" } DomainDescription { SchemeShardId_Depricated: 72057594046... (TRUNCATED) >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_5_Query |84.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/service/ut_topic_reader/unittest |84.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/service/ut_topic_reader/unittest >> TCheckpointStorageTest::ShouldCreateCheckpoint >> TStorageServiceTest::ShouldNotRegisterPrevGeneration >> TStorageServiceTest::ShouldNotCreateCheckpointAfterGenerationChanged >> TCheckpointStorageTest::ShouldRegisterCoordinator >> TStateStorageTest::ShouldSaveGetOldSmallState2Tasks >> TStateStorageTest::ShouldIssueErrorOnWrongGetStateParams >> TStateStorageTest::ShouldSaveGetOldSmallState >> TStorageServiceTest::ShouldRegister >> TCheckpointStorageTest::ShouldUpdateCheckpointStatusForCheckpointsWithTheSameGenAndNo >> TExtSubDomainTest::GenericCases [GOOD] >> TExtSubDomainTest::DeclareAndDefineWithNodes-AlterDatabaseCreateHiveFirst-false [GOOD] >> TExtSubDomainTest::DeclareAndDefineWithNodes-AlterDatabaseCreateHiveFirst-true |84.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/service/ut_topic_reader/unittest |84.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/service/ut_topic_reader/unittest >> TStateStorageTest::ShouldIssueErrorOnWrongGetStateParams [GOOD] >> TStateStorageTest::ShouldIssueErrorOnNonExistentState >> TxUsage::WriteToTopic_Demo_22_RestartAfterCommit_Table >> ResultFormatter::Primitive [GOOD] >> ResultFormatter::Struct [GOOD] |84.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/service/ut_topic_reader/unittest >> TCheckpointStorageTest::ShouldRegisterCoordinator [GOOD] >> TCheckpointStorageTest::ShouldGetCoordinators ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_ext_tenant/unittest >> TExtSubDomainTest::GenericCases [GOOD] Test command err: 2025-06-25T14:43:32.066696Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897658583397301:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:43:32.066772Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000a40/r3tmp/tmpc7qalB/pdisk_1.dat 2025-06-25T14:43:32.502057Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:43:32.512821Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:43:32.513386Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:43:32.524620Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:9832 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-25T14:43:32.701463Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7519897658583397508:2118] Handle TEvNavigate describe path dc-1 2025-06-25T14:43:32.732372Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7519897658583397982:2443] HANDLE EvNavigateScheme dc-1 2025-06-25T14:43:32.732519Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7519897658583397573:2145], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:43:32.732549Z node 1 :TX_PROXY_SCHEME_CACHE TRACE: cache.cpp:2321: Create subscriber: self# [1:7519897658583397573:2145], path# /dc-1, domainOwnerId# 72057594046644480 2025-06-25T14:43:32.732726Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1003: [main][1:7519897658583397983:2444][/dc-1] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-25T14:43:32.739041Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519897654288429928:2050] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7519897658583397987:2444] 2025-06-25T14:43:32.739113Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519897654288429928:2050] Subscribe: subscriber# [1:7519897658583397987:2444], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-25T14:43:32.739186Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519897654288429934:2056] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7519897658583397989:2444] 2025-06-25T14:43:32.739209Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519897654288429934:2056] Subscribe: subscriber# [1:7519897658583397989:2444], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-25T14:43:32.739271Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519897658583397987:2444][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519897654288429928:2050] 2025-06-25T14:43:32.739300Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519897658583397989:2444][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519897654288429934:2056] 2025-06-25T14:43:32.739338Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519897658583397983:2444][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519897658583397984:2444] 2025-06-25T14:43:32.739393Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519897658583397983:2444][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519897658583397986:2444] 2025-06-25T14:43:32.739445Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:852: [main][1:7519897658583397983:2444][/dc-1] Set up state: owner# [1:7519897658583397573:2145], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-25T14:43:32.739564Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519897658583397987:2444][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519897658583397984:2444], cookie# 1 2025-06-25T14:43:32.739579Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519897658583397988:2444][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519897658583397985:2444], cookie# 1 2025-06-25T14:43:32.739590Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519897658583397989:2444][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519897658583397986:2444], cookie# 1 2025-06-25T14:43:32.739613Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519897654288429928:2050] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7519897658583397987:2444] 2025-06-25T14:43:32.739632Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519897654288429928:2050] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519897658583397987:2444], cookie# 1 2025-06-25T14:43:32.739653Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519897654288429934:2056] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7519897658583397989:2444] 2025-06-25T14:43:32.739664Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519897654288429934:2056] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519897658583397989:2444], cookie# 1 2025-06-25T14:43:32.740382Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519897654288429931:2053] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7519897658583397988:2444] 2025-06-25T14:43:32.740420Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519897654288429931:2053] Subscribe: subscriber# [1:7519897658583397988:2444], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-25T14:43:32.740460Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519897654288429931:2053] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519897658583397988:2444], cookie# 1 2025-06-25T14:43:32.740496Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519897658583397987:2444][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519897654288429928:2050], cookie# 1 2025-06-25T14:43:32.740509Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519897658583397989:2444][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519897654288429934:2056], cookie# 1 2025-06-25T14:43:32.740531Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519897658583397988:2444][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519897654288429931:2053] 2025-06-25T14:43:32.740547Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519897658583397988:2444][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519897654288429931:2053], cookie# 1 2025-06-25T14:43:32.740596Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519897658583397983:2444][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519897658583397984:2444], cookie# 1 2025-06-25T14:43:32.740619Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519897658583397983:2444][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2025-06-25T14:43:32.740640Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519897658583397983:2444][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519897658583397986:2444], cookie# 1 2025-06-25T14:43:32.740650Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519897658583397983:2444][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 2, failures# 0 2025-06-25T14:43:32.740669Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519897658583397983:2444][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519897658583397985:2444] 2025-06-25T14:43:32.740708Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:870: [main][1:7519897658583397983:2444][/dc-1] Path was already updated: owner# [1:7519897658583397573:2145], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-25T14:43:32.740725Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519897658583397983:2444][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519897658583397985:2444], cookie# 1 2025-06-25T14:43:32.740742Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:984: [main][1:7519897658583397983:2444][/dc-1] Sync is done in the ring group: cookie# 1, ring group# 0, size# 3, half# 1, successes# 3, failures# 0, partial# 0 2025-06-25T14:43:32.740841Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519897654288429931:2053] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7519897658583397988:2444] 2025-06-25T14:43:32.778715Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [1:7519897658583397573:2145], notify# NKikimr::TSchemeBoardEvents::TEvNotifyUpdate { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DescribeSchemeResult: Status: StatusSuccess Path: "/dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataS ... 4:43:35.226530Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519897671468300677:3010][/dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers Version: 0 }: sender# [1:7519897654288429934:2056] 2025-06-25T14:43:35.226566Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519897671468300659:3010][/dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers Version: 0 }: sender# [1:7519897671468300672:3010] 2025-06-25T14:43:35.226569Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [1:7519897658583397573:2145], cacheItem# { Subscriber: { Subscriber: [1:7519897671468300657:3008] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:43:35.226597Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [1:7519897658583397573:2145], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/.metadata/workload_manager/running_requests PathId: Strong: 1 } 2025-06-25T14:43:35.226599Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519897671468300659:3010][/dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers Version: 0 }: sender# [1:7519897671468300673:3010] 2025-06-25T14:43:35.226624Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:852: [main][1:7519897671468300659:3010][/dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers] Set up state: owner# [1:7519897658583397573:2145], state# { Deleted: 1 Strong: 1 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-25T14:43:35.226634Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [1:7519897658583397573:2145], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/.metadata/workload_manager/running_requests PathId: Strong: 1 }, by path# { Subscriber: { Subscriber: [1:7519897671468300658:3009] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 0 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-25T14:43:35.226643Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519897671468300659:3010][/dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers Version: 0 }: sender# [1:7519897671468300674:3010] 2025-06-25T14:43:35.226668Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:870: [main][1:7519897671468300659:3010][/dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers] Ignore empty state: owner# [1:7519897658583397573:2145], state# { Deleted: 1 Strong: 1 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 1 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-25T14:43:35.226672Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [1:7519897658583397573:2145], cacheItem# { Subscriber: { Subscriber: [1:7519897671468300658:3009] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:43:35.226685Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519897654288429928:2050] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [1:7519897671468300675:3010] 2025-06-25T14:43:35.226700Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519897654288429931:2053] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [1:7519897671468300676:3010] 2025-06-25T14:43:35.226705Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [1:7519897658583397573:2145], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers PathId: Strong: 1 } 2025-06-25T14:43:35.226715Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519897654288429934:2056] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [1:7519897671468300677:3010] 2025-06-25T14:43:35.226740Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [1:7519897658583397573:2145], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers PathId: Strong: 1 }, by path# { Subscriber: { Subscriber: [1:7519897671468300659:3010] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 0 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-25T14:43:35.226781Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [1:7519897658583397573:2145], cacheItem# { Subscriber: { Subscriber: [1:7519897671468300659:3010] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:43:35.226822Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [1:7519897671468300678:3011], recipient# [1:7519897671468300655:2298], result# { ErrorCount: 2 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo },{ Path: dc-1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:43:35.226858Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [1:7519897671468300679:3012], recipient# [1:7519897671468300656:2299], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:43:36.150726Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7519897658583397573:2145], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:43:36.150893Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [1:7519897658583397573:2145], cacheItem# { Subscriber: { Subscriber: [1:7519897662878365389:2532] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:43:36.150984Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [1:7519897675763267989:3016], recipient# [1:7519897675763267988:2300], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:43:36.227397Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7519897658583397573:2145], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:43:36.227512Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [1:7519897658583397573:2145], cacheItem# { Subscriber: { Subscriber: [1:7519897671468300659:3010] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:43:36.227601Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [1:7519897675763267997:3017], recipient# [1:7519897675763267996:2301], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } >> TxUsage::WriteToTopic_Demo_37_Query [GOOD] |84.7%| [TA] $(B)/ydb/core/tx/replication/service/ut_topic_reader/test-results/unittest/{meta.json ... results_accumulator.log} |84.8%| [TA] {RESULT} $(B)/ydb/core/tx/replication/service/ut_topic_reader/test-results/unittest/{meta.json ... results_accumulator.log} >> TCheckpointStorageTest::ShouldCreateCheckpoint [GOOD] >> TCheckpointStorageTest::ShouldCreateGetCheckpoints |84.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/result_formatter/ut/unittest >> ResultFormatter::Struct [GOOD] >> TStorageServiceTest::ShouldNotRegisterPrevGeneration [GOOD] >> TStorageServiceTest::ShouldNotCreateCheckpointWhenUnregistered >> TStorageServiceTest::ShouldRegister [GOOD] >> TStorageServiceTest::ShouldRegisterNextGeneration >> TStateStorageTest::ShouldSaveGetOldSmallState2Tasks [GOOD] >> TStorageServiceTest::ShouldCreateCheckpoint >> TCheckpointStorageTest::ShouldGetCoordinators [GOOD] >> TCheckpointStorageTest::ShouldMarkCheckpointsGc >> TStateStorageTest::ShouldSaveGetOldSmallState [GOOD] >> TStateStorageTest::ShouldSaveGetOldBigState >> TxUsage::WriteToTopic_Demo_38_Table >> TCheckpointStorageTest::ShouldUpdateCheckpointStatusForCheckpointsWithTheSameGenAndNo [GOOD] >> TGcTest::ShouldRemovePreviousCheckpoints >> TStateStorageTest::ShouldSaveGetOldBigState [GOOD] >> TStateStorageTest::ShouldSaveGetIncrementSmallState >> TStorageServiceTest::ShouldRegisterNextGeneration [GOOD] >> TStorageServiceTest::ShouldPendingAndCompleteCheckpoint >> TStateStorageTest::ShouldIssueErrorOnNonExistentState [GOOD] >> TStateStorageTest::ShouldLoadLastSnapshot >> TStorageServiceTest::ShouldNotCreateCheckpointAfterGenerationChanged [GOOD] >> TStorageServiceTest::ShouldNotCompleteCheckpointWithoutCreation >> TStorageServiceTest::ShouldNotCreateCheckpointWhenUnregistered [GOOD] >> TStorageServiceTest::ShouldNotCreateCheckpointTwice >> TStorageServiceTest::ShouldCreateCheckpoint [GOOD] >> TStorageServiceTest::ShouldGetCheckpoints >> TStateStorageTest::ShouldSaveGetIncrementSmallState [GOOD] >> TStateStorageTest::ShouldSaveGetIncrementBigState >> TBlobStorageProxyTest::TestProxyRestoreOnGetMirror3Plus2 [GOOD] >> TCheckpointStorageTest::ShouldCreateGetCheckpoints [GOOD] >> TCheckpointStorageTest::ShouldGetCheckpointsEmpty >> TStorageServiceTest::ShouldNotCompleteCheckpointWithoutCreation [GOOD] >> TStorageServiceTest::ShouldNotAbortCheckpointWithoutCreation >> TStateStorageTest::ShouldLoadLastSnapshot [GOOD] >> TStateStorageTest::ShouldNotGetNonExistendSnaphotState >> TExtSubDomainTest::CreateTableInsideAndAlterDomainAndTable-AlterDatabaseCreateHiveFirst-false [GOOD] >> TExtSubDomainTest::CreateTableInsideAndAlterDomainAndTable-AlterDatabaseCreateHiveFirst-true >> TYqlDateTimeTests::SimpleUpsertSelect >> TCheckpointStorageTest::ShouldMarkCheckpointsGc [GOOD] >> TCheckpointStorageTest::ShouldNotDeleteUnmarkedCheckpoints >> TTableProfileTests::ExplicitPartitionsSimple >> YdbYqlClient::TestColumnOrder >> GrpcConnectionStringParserTest::NoDatabaseFlag >> TGRpcYdbTest::ExecuteQueryImplicitSession >> YdbYqlClient::BuildInfo >> YdbYqlClient::TestYqlWrongTable >> TGRpcNewCoordinationClient::SessionSemaphoreInfiniteTimeout >> TStateStorageTest::ShouldSaveGetIncrementBigState [GOOD] >> TStateStorageTest::ShouldNotGetNonExistendState |84.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/dsproxy/ut_fat/unittest >> TBlobStorageProxyTest::TestProxyRestoreOnGetMirror3Plus2 [GOOD] >> TxUsage::WriteToTopic_Demo_19_RestartNo_Query [GOOD] >> TExtSubDomainTest::DeclareAndDefineWithNodes-AlterDatabaseCreateHiveFirst-true [GOOD] >> TCheckpointStorageTest::ShouldGetCheckpointsEmpty [GOOD] >> TCheckpointStorageTest::ShouldDeleteGraph >> TStorageServiceTest::ShouldNotCreateCheckpointTwice [GOOD] >> TStorageServiceTest::ShouldNotPendingCheckpointWithoutCreation >> TStorageServiceTest::ShouldNotAbortCheckpointWithoutCreation [GOOD] >> TStorageServiceTest::ShouldNotCompleteCheckpointWithoutPending >> TStateStorageTest::ShouldNotGetNonExistendSnaphotState [GOOD] >> TStateStorageTest::ShouldLoadIncrementSnapshot >> TStateStorageTest::ShouldNotGetNonExistendState [GOOD] >> TStorageServiceTest::ShouldPendingAndCompleteCheckpoint [GOOD] >> TStorageServiceTest::ShouldSaveState >> TxUsage::WriteToTopic_Demo_19_RestartBeforeCommit_Table ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_ext_tenant/unittest >> TExtSubDomainTest::DeclareAndDefineWithNodes-AlterDatabaseCreateHiveFirst-true [GOOD] Test command err: 2025-06-25T14:43:32.187071Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897660543777895:2152];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:43:32.187386Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000a29/r3tmp/tmpG1b73q/pdisk_1.dat 2025-06-25T14:43:32.680812Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:43:32.680896Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:43:32.692434Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:43:32.719958Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TClient is connected to server localhost:11450 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-25T14:43:32.928602Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7519897660543777996:2118] Handle TEvNavigate describe path dc-1 2025-06-25T14:43:32.958659Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7519897660543778464:2436] HANDLE EvNavigateScheme dc-1 2025-06-25T14:43:32.958781Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7519897660543778019:2131], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:43:32.958815Z node 1 :TX_PROXY_SCHEME_CACHE TRACE: cache.cpp:2321: Create subscriber: self# [1:7519897660543778019:2131], path# /dc-1, domainOwnerId# 72057594046644480 2025-06-25T14:43:32.959065Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1003: [main][1:7519897660543778465:2437][/dc-1] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-25T14:43:32.960892Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519897660543777711:2053] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7519897660543778470:2437] 2025-06-25T14:43:32.960890Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519897660543777708:2050] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7519897660543778469:2437] 2025-06-25T14:43:32.960935Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519897660543777708:2050] Subscribe: subscriber# [1:7519897660543778469:2437], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-25T14:43:32.960960Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519897660543777711:2053] Subscribe: subscriber# [1:7519897660543778470:2437], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-25T14:43:32.960990Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519897660543777714:2056] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7519897660543778471:2437] 2025-06-25T14:43:32.961012Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519897660543777714:2056] Subscribe: subscriber# [1:7519897660543778471:2437], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-25T14:43:32.961036Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519897660543778469:2437][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519897660543777708:2050] 2025-06-25T14:43:32.961065Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519897660543778470:2437][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519897660543777711:2053] 2025-06-25T14:43:32.961090Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519897660543777708:2050] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7519897660543778469:2437] 2025-06-25T14:43:32.961090Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519897660543778471:2437][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519897660543777714:2056] 2025-06-25T14:43:32.961113Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519897660543777711:2053] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7519897660543778470:2437] 2025-06-25T14:43:32.961146Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519897660543777714:2056] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7519897660543778471:2437] 2025-06-25T14:43:32.961172Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519897660543778465:2437][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519897660543778466:2437] 2025-06-25T14:43:32.961218Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519897660543778465:2437][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519897660543778467:2437] 2025-06-25T14:43:32.961309Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:852: [main][1:7519897660543778465:2437][/dc-1] Set up state: owner# [1:7519897660543778019:2131], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-25T14:43:32.961444Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519897660543778465:2437][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519897660543778468:2437] 2025-06-25T14:43:32.961499Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:870: [main][1:7519897660543778465:2437][/dc-1] Path was already updated: owner# [1:7519897660543778019:2131], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-25T14:43:32.961553Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519897660543778469:2437][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519897660543778466:2437], cookie# 1 2025-06-25T14:43:32.961568Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519897660543778470:2437][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519897660543778467:2437], cookie# 1 2025-06-25T14:43:32.961580Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519897660543778471:2437][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519897660543778468:2437], cookie# 1 2025-06-25T14:43:32.961617Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519897660543777708:2050] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519897660543778469:2437], cookie# 1 2025-06-25T14:43:32.961641Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519897660543777711:2053] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519897660543778470:2437], cookie# 1 2025-06-25T14:43:32.961656Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519897660543777714:2056] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519897660543778471:2437], cookie# 1 2025-06-25T14:43:32.962102Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519897660543778469:2437][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519897660543777708:2050], cookie# 1 2025-06-25T14:43:32.962147Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519897660543778470:2437][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519897660543777711:2053], cookie# 1 2025-06-25T14:43:32.962161Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519897660543778471:2437][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519897660543777714:2056], cookie# 1 2025-06-25T14:43:32.962210Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519897660543778465:2437][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519897660543778466:2437], cookie# 1 2025-06-25T14:43:32.962238Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519897660543778465:2437][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2025-06-25T14:43:32.962254Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519897660543778465:2437][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519897660543778467:2437], cookie# 1 2025-06-25T14:43:32.962266Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519897660543778465:2437][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 2, failures# 0 2025-06-25T14:43:32.962281Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519897660543778465:2437][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519897660543778468:2437], cookie# 1 2025-06-25T14:43:32.962303Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:984: [main][1:7519897660543778465:2437][/dc-1] Sync is done in the ring group: cookie# 1, ring group# 0, size# 3, half# 1, successes# 3, failures# 0, partial# 0 2025-06-25T14:43:33.067199Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [1:7519897660543778019:2131], notify# NKikimr::TSchemeBoardEvents::TEvNotifyUpdate { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DescribeSchemeResult: Status: StatusSuccess Path: "/dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 Data ... ] Subscribe: subscriber# [3:7519897694284882352:2782], path# /dc-1/.metadata/workload_manager/running_requests, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-25T14:43:40.190993Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [3:7519897681399979205:2050] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1/.metadata/workload_manager/running_requests DomainOwnerId: 72057594046644480 }: sender# [3:7519897694284882350:2782] 2025-06-25T14:43:40.191009Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [3:7519897681399979205:2050] Upsert description: path# /dc-1/.metadata/workload_manager/running_requests 2025-06-25T14:43:40.191027Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [3:7519897681399979503:2129], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/.metadata/workload_manager/delayed_requests PathId: Strong: 1 } 2025-06-25T14:43:40.191031Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [3:7519897681399979205:2050] Subscribe: subscriber# [3:7519897694284882350:2782], path# /dc-1/.metadata/workload_manager/running_requests, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-25T14:43:40.191062Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][3:7519897694284882351:2782][/dc-1/.metadata/workload_manager/running_requests] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/workload_manager/running_requests Version: 0 }: sender# [3:7519897681399979208:2053] 2025-06-25T14:43:40.191087Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][3:7519897694284882352:2782][/dc-1/.metadata/workload_manager/running_requests] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/workload_manager/running_requests Version: 0 }: sender# [3:7519897681399979211:2056] 2025-06-25T14:43:40.191112Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][3:7519897694284882350:2782][/dc-1/.metadata/workload_manager/running_requests] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/workload_manager/running_requests Version: 0 }: sender# [3:7519897681399979205:2050] 2025-06-25T14:43:40.191138Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][3:7519897694284882339:2782][/dc-1/.metadata/workload_manager/running_requests] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/workload_manager/running_requests Version: 0 }: sender# [3:7519897694284882348:2782] 2025-06-25T14:43:40.191138Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [3:7519897681399979503:2129], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/.metadata/workload_manager/delayed_requests PathId: Strong: 1 }, by path# { Subscriber: { Subscriber: [3:7519897694284882338:2781] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 0 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-25T14:43:40.191164Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][3:7519897694284882339:2782][/dc-1/.metadata/workload_manager/running_requests] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/workload_manager/running_requests Version: 0 }: sender# [3:7519897694284882349:2782] 2025-06-25T14:43:40.191192Z node 3 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:852: [main][3:7519897694284882339:2782][/dc-1/.metadata/workload_manager/running_requests] Set up state: owner# [3:7519897681399979503:2129], state# { Deleted: 1 Strong: 1 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-25T14:43:40.191236Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7519897681399979503:2129], cacheItem# { Subscriber: { Subscriber: [3:7519897694284882338:2781] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:43:40.191246Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][3:7519897694284882339:2782][/dc-1/.metadata/workload_manager/running_requests] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/workload_manager/running_requests Version: 0 }: sender# [3:7519897694284882347:2782] 2025-06-25T14:43:40.191279Z node 3 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:870: [main][3:7519897694284882339:2782][/dc-1/.metadata/workload_manager/running_requests] Ignore empty state: owner# [3:7519897681399979503:2129], state# { Deleted: 1 Strong: 1 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 1 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-25T14:43:40.191297Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [3:7519897681399979503:2129], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/.metadata/workload_manager/running_requests PathId: Strong: 1 } 2025-06-25T14:43:40.191304Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [3:7519897681399979208:2053] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [3:7519897694284882351:2782] 2025-06-25T14:43:40.191328Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [3:7519897681399979211:2056] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [3:7519897694284882352:2782] 2025-06-25T14:43:40.191347Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [3:7519897681399979503:2129], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/.metadata/workload_manager/running_requests PathId: Strong: 1 }, by path# { Subscriber: { Subscriber: [3:7519897694284882339:2782] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 0 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-25T14:43:40.191370Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [3:7519897681399979205:2050] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [3:7519897694284882350:2782] 2025-06-25T14:43:40.191397Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7519897681399979503:2129], cacheItem# { Subscriber: { Subscriber: [3:7519897694284882339:2782] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:43:40.191462Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7519897694284882353:2784], recipient# [3:7519897694284882337:2281], result# { ErrorCount: 2 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo },{ Path: dc-1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:43:40.534679Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7519897681399979503:2129], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:43:40.534825Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7519897681399979503:2129], cacheItem# { Subscriber: { Subscriber: [3:7519897685694947678:2770] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:43:40.534934Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7519897694284882355:2785], recipient# [3:7519897694284882354:2282], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:43:41.199787Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7519897681399979503:2129], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:43:41.199905Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7519897681399979503:2129], cacheItem# { Subscriber: { Subscriber: [3:7519897694284882330:2780] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:43:41.199989Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7519897698579849671:2789], recipient# [3:7519897698579849670:2283], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } >> TStorageServiceTest::ShouldGetCheckpoints [GOOD] >> TStorageServiceTest::ShouldAbortCheckpoint >> YdbYqlClient::TestReadTableMultiShardWholeTable >> TxUsage::WriteToTopic_Demo_13_Table [GOOD] >> TxUsage::WriteToTopic_Demo_8_Table [GOOD] >> TStorageServiceTest::ShouldNotPendingCheckpointWithoutCreation [GOOD] >> TStorageServiceTest::ShouldNotPendingCheckpointGenerationChanged >> BsControllerConfig::MoveGroups [GOOD] >> TxUsage::WriteToTopic_Demo_13_Query >> TStateStorageTest::ShouldLoadIncrementSnapshot [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut_bscontroller/unittest >> BsControllerConfig::MoveGroups [GOOD] Test command err: Leader for TabletID 72057594037932033 is [0:0:0] sender: [1:3065:2106] recipient: [1:2964:2117] IGNORE Leader for TabletID 72057594037932033 is [0:0:0] sender: [1:3065:2106] recipient: [1:2964:2117] Leader for TabletID 72057594037932033 is [1:3112:2119] sender: [1:3114:2106] recipient: [1:2964:2117] 2025-06-25T14:43:05.908935Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828672 Event# NKikimr::TEvTablet::TEvBoot 2025-06-25T14:43:05.912527Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828673 Event# NKikimr::TEvTablet::TEvRestored 2025-06-25T14:43:05.912814Z node 1 :BS_CONTROLLER DEBUG: {BSC22@console_interaction.cpp:14} Console interaction started 2025-06-25T14:43:05.914457Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828684 Event# NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-25T14:43:05.914913Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268639244 Event# NKikimr::TEvNodeWardenStorageConfig 2025-06-25T14:43:05.915064Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 131082 Event# NActors::TEvInterconnect::TEvNodesInfo 2025-06-25T14:43:05.915084Z node 1 :BS_CONTROLLER DEBUG: {BSC01@bsc.cpp:531} Handle TEvInterconnect::TEvNodesInfo 2025-06-25T14:43:05.915299Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS01@init_scheme.cpp:17} TTxInitScheme Execute 2025-06-25T14:43:05.923159Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS03@init_scheme.cpp:44} TTxInitScheme Complete 2025-06-25T14:43:05.923292Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM01@migrate.cpp:190} Execute tx 2025-06-25T14:43:05.923480Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM02@migrate.cpp:251} Complete tx IncompatibleData# false 2025-06-25T14:43:05.923607Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-25T14:43:05.923721Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-25T14:43:05.923786Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion Leader for TabletID 72057594037932033 is [1:3112:2119] sender: [1:3138:2106] recipient: [1:60:2107] 2025-06-25T14:43:05.935391Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion 2025-06-25T14:43:05.935497Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-25T14:43:05.946002Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-25T14:43:05.946086Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-25T14:43:05.946128Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-25T14:43:05.946190Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-25T14:43:05.946287Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-25T14:43:05.946336Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-25T14:43:05.946377Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-25T14:43:05.946431Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-25T14:43:05.957085Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-25T14:43:05.957198Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-25T14:43:05.967871Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-25T14:43:05.968016Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE01@load_everything.cpp:21} TTxLoadEverything Execute 2025-06-25T14:43:05.969150Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE03@load_everything.cpp:587} TTxLoadEverything Complete 2025-06-25T14:43:05.969192Z node 1 :BS_CONTROLLER DEBUG: {BSC09@impl.h:2213} LoadFinished 2025-06-25T14:43:05.969353Z node 1 :BS_CONTROLLER DEBUG: {BSC18@console_interaction.cpp:31} Console connection service started 2025-06-25T14:43:05.969415Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE04@load_everything.cpp:592} TTxLoadEverything InitQueue processed 2025-06-25T14:43:05.982692Z node 1 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { DefineHostConfig { HostConfigId: 1 Drive { Path: "/dev/disk1" } Drive { Path: "/dev/disk2" SharedWithOs: true } Drive { Path: "/dev/disk3" Type: SSD } } } Command { DefineBox { BoxId: 1 Name: "first box" Host { Key { Fqdn: "::1" IcPort: 12001 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12002 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12003 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12004 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12005 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12006 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12007 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12008 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12009 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12010 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12011 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12012 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12013 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12014 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12015 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12016 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12017 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12018 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12019 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12020 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12021 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12022 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12023 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12024 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12025 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12026 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12027 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12028 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12029 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12030 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12031 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12032 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12033 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12034 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12035 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12036 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12037 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12038 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12039 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12040 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12041 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12042 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12043 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12044 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12045 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12046 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12047 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12048 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12049 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12050 } HostConfigId: 1 } } } Command { DefineStoragePool { BoxId: 1 StoragePoolId: 1 Name: "first storage pool" ErasureSpecies: "block-4-2" VDiskKind: "Default" NumGroups: 150 PDiskFilter { Property { Type: ROT } } } } } 2025-06-25T14:43:05.984098Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 1:1000 Path# /dev/disk1 2025-06-25T14:43:05.984151Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 1:1001 Path# /dev/disk2 2025-06-25T14:43:05.984174Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 1:1002 Path# /dev/disk3 2025-06-25T14:43:05.984194Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 2:1000 Path# /dev/disk1 2025-06-25T14:43:05.984216Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 2:1001 Path# /dev/disk2 2025-06-25T14:43:05.984238Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 2:1002 Path# /dev/disk3 2025-06-25T14:43:05.984260Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 3:1000 Path# /dev/disk1 2025-06-25T14:43:05.984295Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 3:1001 Path# /dev/disk2 2025-06-25T14:43:05.984335Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 3:1002 Path# /dev/disk3 2025-06-25T14:43:05.984378Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 4:1000 Path# /dev/disk1 2025-06-25T14:43:05.984414Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 4:1001 Path# /dev/disk2 2025-06-25T14:43:05.984440Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 4:1002 Path# /dev/disk3 2025-06-25T14:43:05.984461Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 5:1000 Path# /dev/disk1 2025-06-25T14:43:05.984481Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 5:1001 Path# /dev/disk2 2025-06-25T14:43:05.984501Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 5:1002 Path# /dev/disk3 2025-06-25T14:43:05.984532Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 6:1000 Path# /dev/disk1 2025-06-25T14:43:05.984556Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 6:1001 Path# /dev/disk2 2025-06-25T14:43:05.984579Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 6:1002 Path# /dev/disk3 2025-06-25T14:43:05.984601Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 7:1000 Path# /dev/disk1 2025-06-25T14:43:05.984632Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 7:1001 Path# /dev/disk2 2025-06-25T14:43:05.984660Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:3 ... Id# 178:1001 Path# /dev/disk2 2025-06-25T14:43:34.322753Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 178:1002 Path# /dev/disk3 2025-06-25T14:43:34.322782Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 179:1000 Path# /dev/disk1 2025-06-25T14:43:34.322819Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 179:1001 Path# /dev/disk2 2025-06-25T14:43:34.322847Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 179:1002 Path# /dev/disk3 2025-06-25T14:43:34.322873Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 180:1000 Path# /dev/disk1 2025-06-25T14:43:34.322903Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 180:1001 Path# /dev/disk2 2025-06-25T14:43:34.322931Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 180:1002 Path# /dev/disk3 2025-06-25T14:43:34.322957Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 181:1000 Path# /dev/disk1 2025-06-25T14:43:34.322984Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 181:1001 Path# /dev/disk2 2025-06-25T14:43:34.323012Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 181:1002 Path# /dev/disk3 2025-06-25T14:43:34.323039Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 182:1000 Path# /dev/disk1 2025-06-25T14:43:34.323067Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 182:1001 Path# /dev/disk2 2025-06-25T14:43:34.323094Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 182:1002 Path# /dev/disk3 2025-06-25T14:43:34.323120Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 183:1000 Path# /dev/disk1 2025-06-25T14:43:34.323147Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 183:1001 Path# /dev/disk2 2025-06-25T14:43:34.323175Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 183:1002 Path# /dev/disk3 2025-06-25T14:43:34.323201Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 184:1000 Path# /dev/disk1 2025-06-25T14:43:34.323228Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 184:1001 Path# /dev/disk2 2025-06-25T14:43:34.323256Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 184:1002 Path# /dev/disk3 2025-06-25T14:43:34.323280Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 185:1000 Path# /dev/disk1 2025-06-25T14:43:34.323304Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 185:1001 Path# /dev/disk2 2025-06-25T14:43:34.323332Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 185:1002 Path# /dev/disk3 2025-06-25T14:43:34.323358Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 186:1000 Path# /dev/disk1 2025-06-25T14:43:34.323384Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 186:1001 Path# /dev/disk2 2025-06-25T14:43:34.323410Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 186:1002 Path# /dev/disk3 2025-06-25T14:43:34.323445Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 187:1000 Path# /dev/disk1 2025-06-25T14:43:34.323480Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 187:1001 Path# /dev/disk2 2025-06-25T14:43:34.323507Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 187:1002 Path# /dev/disk3 2025-06-25T14:43:34.323535Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 188:1000 Path# /dev/disk1 2025-06-25T14:43:34.323562Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 188:1001 Path# /dev/disk2 2025-06-25T14:43:34.323588Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 188:1002 Path# /dev/disk3 2025-06-25T14:43:34.323615Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 189:1000 Path# /dev/disk1 2025-06-25T14:43:34.323640Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 189:1001 Path# /dev/disk2 2025-06-25T14:43:34.323667Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 189:1002 Path# /dev/disk3 2025-06-25T14:43:34.323693Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 190:1000 Path# /dev/disk1 2025-06-25T14:43:34.323719Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 190:1001 Path# /dev/disk2 2025-06-25T14:43:34.323746Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 190:1002 Path# /dev/disk3 2025-06-25T14:43:34.323774Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 191:1000 Path# /dev/disk1 2025-06-25T14:43:34.323803Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 191:1001 Path# /dev/disk2 2025-06-25T14:43:34.323828Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 191:1002 Path# /dev/disk3 2025-06-25T14:43:34.323851Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 192:1000 Path# /dev/disk1 2025-06-25T14:43:34.323877Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 192:1001 Path# /dev/disk2 2025-06-25T14:43:34.323902Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 192:1002 Path# /dev/disk3 2025-06-25T14:43:34.323929Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 193:1000 Path# /dev/disk1 2025-06-25T14:43:34.323953Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 193:1001 Path# /dev/disk2 2025-06-25T14:43:34.323980Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 193:1002 Path# /dev/disk3 2025-06-25T14:43:34.324004Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 194:1000 Path# /dev/disk1 2025-06-25T14:43:34.324029Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 194:1001 Path# /dev/disk2 2025-06-25T14:43:34.324056Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 194:1002 Path# /dev/disk3 2025-06-25T14:43:34.324086Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 195:1000 Path# /dev/disk1 2025-06-25T14:43:34.324112Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 195:1001 Path# /dev/disk2 2025-06-25T14:43:34.324137Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 195:1002 Path# /dev/disk3 2025-06-25T14:43:34.324163Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 196:1000 Path# /dev/disk1 2025-06-25T14:43:34.324190Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 196:1001 Path# /dev/disk2 2025-06-25T14:43:34.324216Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 196:1002 Path# /dev/disk3 2025-06-25T14:43:34.324243Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 197:1000 Path# /dev/disk1 2025-06-25T14:43:34.324268Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 197:1001 Path# /dev/disk2 2025-06-25T14:43:34.324295Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 197:1002 Path# /dev/disk3 2025-06-25T14:43:34.324548Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 198:1000 Path# /dev/disk1 2025-06-25T14:43:34.324585Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 198:1001 Path# /dev/disk2 2025-06-25T14:43:34.324612Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 198:1002 Path# /dev/disk3 2025-06-25T14:43:34.324641Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 199:1000 Path# /dev/disk1 2025-06-25T14:43:34.324666Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 199:1001 Path# /dev/disk2 2025-06-25T14:43:34.324692Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 199:1002 Path# /dev/disk3 2025-06-25T14:43:34.324719Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 200:1000 Path# /dev/disk1 2025-06-25T14:43:34.324746Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 200:1001 Path# /dev/disk2 2025-06-25T14:43:34.324772Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 200:1002 Path# /dev/disk3 2025-06-25T14:43:34.605382Z node 151 :BS_CONTROLLER ERROR: {BSC07@impl.h:2206} ProcessControllerEvent event processing took too much time Type# 268637706 Duration# 0.287181s 2025-06-25T14:43:34.605588Z node 151 :BS_CONTROLLER ERROR: {BSC00@bsc.cpp:705} StateWork event processing took too much time Type# 2146435078 Duration# 0.287411s 2025-06-25T14:43:34.618472Z node 151 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 151 Type# 268639257 2025-06-25T14:43:34.641792Z node 151 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { QueryBaseConfig { } } } 2025-06-25T14:43:34.746149Z node 151 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { MoveGroups { BoxId: 1 OriginStoragePoolId: 2 OriginStoragePoolGeneration: 1 TargetStoragePoolId: 1 TargetStoragePoolGeneration: 1 ExplicitGroupId: 2147483748 } } } 2025-06-25T14:43:34.762807Z node 151 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { QueryBaseConfig { } } } 2025-06-25T14:43:34.855235Z node 151 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { MoveGroups { BoxId: 1 OriginStoragePoolId: 2 OriginStoragePoolGeneration: 2 TargetStoragePoolId: 1 TargetStoragePoolGeneration: 2 ExplicitGroupId: 2147483749 } } } 2025-06-25T14:43:34.871424Z node 151 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { QueryBaseConfig { } } } 2025-06-25T14:43:34.945324Z node 151 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { MoveGroups { BoxId: 1 OriginStoragePoolId: 2 OriginStoragePoolGeneration: 3 TargetStoragePoolId: 1 TargetStoragePoolGeneration: 3 } } } 2025-06-25T14:43:34.963577Z node 151 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { QueryBaseConfig { } } } >> TStorageServiceTest::ShouldSaveState [GOOD] >> TStorageServiceTest::ShouldUseGc >> TTableProfileTests::UseDefaultProfile >> TStorageServiceTest::ShouldNotCompleteCheckpointWithoutPending [GOOD] >> TStorageServiceTest::ShouldNotCompleteCheckpointGenerationChanged >> TGRpcYdbTest::ExecuteQueryImplicitSession [GOOD] >> TGRpcYdbTest::ExecuteQueryWithUuid >> YdbYqlClient::BuildInfo [GOOD] >> YdbYqlClient::AlterTableAddIndexAsyncOp >> TGRpcNewCoordinationClient::SessionSemaphoreInfiniteTimeout [GOOD] >> TGRpcNewCoordinationClient::SessionReconnectReattach >> TCheckpointStorageTest::ShouldDeleteGraph [GOOD] >> TCheckpointStorageTest::ShouldDeleteMarkedCheckpoints >> TGcTest::ShouldRemovePreviousCheckpoints [GOOD] >> TGcTest::ShouldIgnoreIncrementCheckpoint >> TCheckpointStorageTest::ShouldNotDeleteUnmarkedCheckpoints [GOOD] >> TCheckpointStorageTest::ShouldRetryOnExistingGraphDescId >> GrpcConnectionStringParserTest::NoDatabaseFlag [GOOD] >> GrpcConnectionStringParserTest::IncorrectConnectionString |84.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/checkpoint_storage/ut/unittest >> TStateStorageTest::ShouldNotGetNonExistendState [GOOD] >> YdbYqlClient::TestYqlWrongTable [GOOD] >> YdbYqlClient::TraceId >> GrpcConnectionStringParserTest::IncorrectConnectionString [GOOD] >> GrpcConnectionStringParserTest::CommonClientSettingsFromConnectionString >> TxUsage::WriteToTopic_Demo_8_Query >> TGRpcClientLowTest::SimpleRequest >> TStorageServiceTest::ShouldNotPendingCheckpointGenerationChanged [GOOD] >> YdbYqlClient::TestColumnOrder [GOOD] >> YdbYqlClient::TestDecimal >> TStorageServiceTest::ShouldAbortCheckpoint [GOOD] >> TStorageServiceTest::ShouldGetState >> BsControllerConfig::ExtendBoxAndStoragePool [GOOD] >> TYqlDateTimeTests::SimpleUpsertSelect [GOOD] >> TYqlDateTimeTests::DatetimeKey >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_5_Query [GOOD] >> TCheckpointStorageTest::ShouldRetryOnExistingGraphDescId [GOOD] >> TTableProfileTests::ExplicitPartitionsSimple [GOOD] >> TTableProfileTests::ExplicitPartitionsUnordered ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut_bscontroller/unittest >> BsControllerConfig::ExtendBoxAndStoragePool [GOOD] Test command err: Leader for TabletID 72057594037932033 is [0:0:0] sender: [1:3065:2106] recipient: [1:2964:2117] IGNORE Leader for TabletID 72057594037932033 is [0:0:0] sender: [1:3065:2106] recipient: [1:2964:2117] Leader for TabletID 72057594037932033 is [1:3112:2119] sender: [1:3114:2106] recipient: [1:2964:2117] 2025-06-25T14:43:06.051672Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828672 Event# NKikimr::TEvTablet::TEvBoot 2025-06-25T14:43:06.058203Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828673 Event# NKikimr::TEvTablet::TEvRestored 2025-06-25T14:43:06.058586Z node 1 :BS_CONTROLLER DEBUG: {BSC22@console_interaction.cpp:14} Console interaction started 2025-06-25T14:43:06.060705Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828684 Event# NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-25T14:43:06.061326Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268639244 Event# NKikimr::TEvNodeWardenStorageConfig 2025-06-25T14:43:06.061514Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 131082 Event# NActors::TEvInterconnect::TEvNodesInfo 2025-06-25T14:43:06.061548Z node 1 :BS_CONTROLLER DEBUG: {BSC01@bsc.cpp:531} Handle TEvInterconnect::TEvNodesInfo 2025-06-25T14:43:06.061887Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS01@init_scheme.cpp:17} TTxInitScheme Execute 2025-06-25T14:43:06.071291Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS03@init_scheme.cpp:44} TTxInitScheme Complete 2025-06-25T14:43:06.071456Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM01@migrate.cpp:190} Execute tx 2025-06-25T14:43:06.071623Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM02@migrate.cpp:251} Complete tx IncompatibleData# false 2025-06-25T14:43:06.071764Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-25T14:43:06.071865Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-25T14:43:06.071936Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion Leader for TabletID 72057594037932033 is [1:3112:2119] sender: [1:3138:2106] recipient: [1:60:2107] 2025-06-25T14:43:06.083812Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion 2025-06-25T14:43:06.083968Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-25T14:43:06.094721Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-25T14:43:06.094857Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-25T14:43:06.094946Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-25T14:43:06.095028Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-25T14:43:06.095151Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-25T14:43:06.095224Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-25T14:43:06.095277Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-25T14:43:06.095331Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-25T14:43:06.106063Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-25T14:43:06.106207Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-25T14:43:06.116978Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-25T14:43:06.117126Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE01@load_everything.cpp:21} TTxLoadEverything Execute 2025-06-25T14:43:06.118483Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE03@load_everything.cpp:587} TTxLoadEverything Complete 2025-06-25T14:43:06.118531Z node 1 :BS_CONTROLLER DEBUG: {BSC09@impl.h:2213} LoadFinished 2025-06-25T14:43:06.118758Z node 1 :BS_CONTROLLER DEBUG: {BSC18@console_interaction.cpp:31} Console connection service started 2025-06-25T14:43:06.118819Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE04@load_everything.cpp:592} TTxLoadEverything InitQueue processed 2025-06-25T14:43:06.133856Z node 1 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { DefineHostConfig { HostConfigId: 1 Drive { Path: "/dev/disk1" } Drive { Path: "/dev/disk2" SharedWithOs: true } Drive { Path: "/dev/disk3" Type: SSD } } } Command { DefineBox { BoxId: 1 Name: "first box" Host { Key { Fqdn: "::1" IcPort: 12001 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12002 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12003 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12004 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12005 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12006 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12007 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12008 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12009 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12010 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12011 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12012 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12013 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12014 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12015 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12016 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12017 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12018 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12019 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12020 } HostConfigId: 1 } } } Command { DefineStoragePool { BoxId: 1 StoragePoolId: 1 Name: "first storage pool" ErasureSpecies: "block-4-2" VDiskKind: "Default" NumGroups: 60 PDiskFilter { Property { Type: ROT } } } } Command { QueryBaseConfig { } } } 2025-06-25T14:43:06.134730Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 1:1000 Path# /dev/disk1 2025-06-25T14:43:06.134785Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 1:1001 Path# /dev/disk2 2025-06-25T14:43:06.134814Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 1:1002 Path# /dev/disk3 2025-06-25T14:43:06.134840Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 2:1000 Path# /dev/disk1 2025-06-25T14:43:06.134866Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 2:1001 Path# /dev/disk2 2025-06-25T14:43:06.134892Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 2:1002 Path# /dev/disk3 2025-06-25T14:43:06.134946Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 3:1000 Path# /dev/disk1 2025-06-25T14:43:06.135002Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 3:1001 Path# /dev/disk2 2025-06-25T14:43:06.135035Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 3:1002 Path# /dev/disk3 2025-06-25T14:43:06.135059Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 4:1000 Path# /dev/disk1 2025-06-25T14:43:06.135082Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 4:1001 Path# /dev/disk2 2025-06-25T14:43:06.135110Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 4:1002 Path# /dev/disk3 2025-06-25T14:43:06.135135Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 5:1000 Path# /dev/disk1 2025-06-25T14:43:06.135161Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 5:1001 Path# /dev/disk2 2025-06-25T14:43:06.135185Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 5:1002 Path# /dev/disk3 2025-06-25T14:43:06.135237Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 6:1000 Path# /dev/disk1 2025-06-25T14:43:06.135267Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 6:1001 Path# /dev/disk2 2025-06-25T14:43:06.135292Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 6:1002 Path# /dev/disk3 2025-06-25T14:43:06.135326Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 7:1000 Path# /dev/disk1 2025-06-25T14:43:06.135358Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 7:1001 Path# /dev/disk2 2025-06-25T14:43:06.135385Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 7:1002 Path# /dev/disk3 2025-06-25T14:43:06.135409Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 8:1000 Path# /dev/disk1 2025-06-25T14:43:06.135444Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 8:1001 Path# /dev/disk2 2025-06-25T14:43:06.135486Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 8:1002 Path# /dev/disk3 2025-06-25T14:43:06.135523Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 9:1000 Path# /dev/disk1 2025-06-25T14:43:06.135556Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 9:1001 Path# /dev/disk2 2025-06-25T14:43:06.135579Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 9:1002 Path# /dev/disk3 2025-06-25T14:43:06.135614Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 10:1000 Path# /dev/disk1 2025-06-25T14:43:06.135644Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 10:1001 Path# /dev/disk2 2025-06-25T14:43:06.135694Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 10:1002 Path# /dev/disk3 2025-06-25T14:43:06.135721Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 11:1000 Path# /dev/disk1 2025-06-25T14:43:06.135745Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 11:1001 Path# /dev/disk2 2025-06-25T14:43:06.135778Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Cr ... S_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 204:1002 Path# /dev/disk3 2025-06-25T14:43:36.564643Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 205:1000 Path# /dev/disk1 2025-06-25T14:43:36.564684Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 205:1001 Path# /dev/disk2 2025-06-25T14:43:36.564721Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 205:1002 Path# /dev/disk3 2025-06-25T14:43:36.564752Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 206:1000 Path# /dev/disk1 2025-06-25T14:43:36.564779Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 206:1001 Path# /dev/disk2 2025-06-25T14:43:36.564814Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 206:1002 Path# /dev/disk3 2025-06-25T14:43:36.564846Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 207:1000 Path# /dev/disk1 2025-06-25T14:43:36.564872Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 207:1001 Path# /dev/disk2 2025-06-25T14:43:36.564900Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 207:1002 Path# /dev/disk3 2025-06-25T14:43:36.564940Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 208:1000 Path# /dev/disk1 2025-06-25T14:43:36.564972Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 208:1001 Path# /dev/disk2 2025-06-25T14:43:36.565021Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 208:1002 Path# /dev/disk3 2025-06-25T14:43:36.565075Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 209:1000 Path# /dev/disk1 2025-06-25T14:43:36.565129Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 209:1001 Path# /dev/disk2 2025-06-25T14:43:36.565181Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 209:1002 Path# /dev/disk3 2025-06-25T14:43:36.565240Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 210:1000 Path# /dev/disk1 2025-06-25T14:43:36.565283Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 210:1001 Path# /dev/disk2 2025-06-25T14:43:36.565327Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 210:1002 Path# /dev/disk3 2025-06-25T14:43:36.893492Z node 161 :BS_CONTROLLER ERROR: {BSC07@impl.h:2206} ProcessControllerEvent event processing took too much time Type# 268637706 Duration# 0.335320s 2025-06-25T14:43:36.893700Z node 161 :BS_CONTROLLER ERROR: {BSC00@bsc.cpp:705} StateWork event processing took too much time Type# 2146435078 Duration# 0.335548s 2025-06-25T14:43:36.908164Z node 161 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 161 Type# 268639257 2025-06-25T14:43:36.943162Z node 161 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { DefineHostConfig { HostConfigId: 4 Drive { Path: "/dev/disk1" } Drive { Path: "/dev/disk2" SharedWithOs: true } Drive { Path: "/dev/disk3" Type: SSD } } } Command { DefineBox { BoxId: 1 Name: "first box" Host { Key { Fqdn: "::1" IcPort: 12001 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12002 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12003 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12004 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12005 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12006 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12007 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12008 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12009 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12010 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12011 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12012 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12013 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12014 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12015 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12016 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12017 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12018 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12019 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12020 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12021 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12022 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12023 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12024 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12025 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12026 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12027 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12028 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12029 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12030 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12031 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12032 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12033 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12034 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12035 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12036 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12037 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12038 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12039 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12040 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12041 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12042 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12043 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12044 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12045 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12046 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12047 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12048 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12049 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12050 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12051 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12052 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12053 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12054 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12055 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12056 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12057 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12058 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12059 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12060 } HostConfigId: 4 } ItemConfigGeneration: 1 } } Command { DefineStoragePool { BoxId: 1 StoragePoolId: 1 Name: "first storage pool" ErasureSpecies: "block-4-2" VDiskKind: "Default" NumGroups: 180 PDiskFilter { Property { Type: ROT } } ItemConfigGeneration: 1 } } Command { QueryBaseConfig { } } } 2025-06-25T14:43:36.945352Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 211:1000 Path# /dev/disk1 2025-06-25T14:43:36.945418Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 211:1001 Path# /dev/disk2 2025-06-25T14:43:36.945445Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 211:1002 Path# /dev/disk3 2025-06-25T14:43:36.945471Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 212:1000 Path# /dev/disk1 2025-06-25T14:43:36.945499Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 212:1001 Path# /dev/disk2 2025-06-25T14:43:36.945542Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 212:1002 Path# /dev/disk3 2025-06-25T14:43:36.945569Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 213:1000 Path# /dev/disk1 2025-06-25T14:43:36.945594Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 213:1001 Path# /dev/disk2 2025-06-25T14:43:36.945618Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 213:1002 Path# /dev/disk3 2025-06-25T14:43:36.945647Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 214:1000 Path# /dev/disk1 2025-06-25T14:43:36.945687Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 214:1001 Path# /dev/disk2 2025-06-25T14:43:36.945715Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 214:1002 Path# /dev/disk3 2025-06-25T14:43:36.945741Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 215:1000 Path# /dev/disk1 2025-06-25T14:43:36.945764Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 215:1001 Path# /dev/disk2 2025-06-25T14:43:36.945788Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 215:1002 Path# /dev/disk3 2025-06-25T14:43:36.945813Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 216:1000 Path# /dev/disk1 2025-06-25T14:43:36.945842Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 216:1001 Path# /dev/disk2 2025-06-25T14:43:36.945868Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 216:1002 Path# /dev/disk3 2025-06-25T14:43:36.945895Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 217:1000 Path# /dev/disk1 2025-06-25T14:43:36.945921Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 217:1001 Path# /dev/disk2 2025-06-25T14:43:36.945946Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 217:1002 Path# /dev/disk3 2025-06-25T14:43:36.945969Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 218:1000 Path# /dev/disk1 2025-06-25T14:43:36.945993Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 218:1001 Path# /dev/disk2 2025-06-25T14:43:36.946015Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 218:1002 Path# /dev/disk3 2025-06-25T14:43:36.946044Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 219:1000 Path# /dev/disk1 2025-06-25T14:43:36.946070Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 219:1001 Path# /dev/disk2 2025-06-25T14:43:36.946097Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 219:1002 Path# /dev/disk3 2025-06-25T14:43:36.946122Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 220:1000 Path# /dev/disk1 2025-06-25T14:43:36.946150Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 220:1001 Path# /dev/disk2 2025-06-25T14:43:36.946176Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 220:1002 Path# /dev/disk3 2025-06-25T14:43:37.024057Z node 161 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 161 Type# 268639257 >> TGRpcYdbTest::CreateTableBadRequest >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_6_Table |84.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/checkpoint_storage/ut/unittest >> TStateStorageTest::ShouldLoadIncrementSnapshot [GOOD] >> TStorageServiceTest::ShouldNotCompleteCheckpointGenerationChanged [GOOD] >> YdbYqlClient::TestReadTableMultiShardWholeTable [GOOD] >> YdbYqlClient::TestReadTableMultiShardWholeTableUseSnapshot >> YdbLogStore::LogStore >> TGRpcNewCoordinationClient::SessionReconnectReattach [GOOD] >> TGRpcNewCoordinationClientAuth::OwnersAndPermissions >> YdbYqlClient::SecurityTokenAuth ------- [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/checkpoint_storage/ut/unittest >> TStorageServiceTest::ShouldNotPendingCheckpointGenerationChanged [GOOD] Test command err: 2025-06-25T14:43:37.750429Z node 1 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:178: Successfully bootstrapped TStorageProxy [1:7519897679811940221:2048] with connection to localhost:27824:local 2025-06-25T14:43:37.752757Z node 1 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.17] Got TEvRegisterCoordinatorRequest 2025-06-25T14:43:38.582505Z node 1 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:199: [graph_graphich.17] Graph registered 2025-06-25T14:43:38.582536Z node 1 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.17] Send TEvRegisterCoordinatorResponse 2025-06-25T14:43:38.582836Z node 1 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.16] Got TEvRegisterCoordinatorRequest 2025-06-25T14:43:38.673805Z node 1 :STREAMS_STORAGE_SERVICE WARN: storage_proxy.cpp:197: [graph_graphich.16] Failed to register graph:
: Warning: Table: local/TStorageServiceTestShouldNotRegisterPrevGeneration/coordinators_sync, pk: graph_graphich, current generation: 17, expected/new generation: 16, operation: RegisterCheck, code: 400130 2025-06-25T14:43:38.673834Z node 1 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.16] Send TEvRegisterCoordinatorResponse 2025-06-25T14:43:39.400509Z node 2 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:178: Successfully bootstrapped TStorageProxy [2:7519897689928330045:2048] with connection to localhost:27824:local 2025-06-25T14:43:39.401408Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:209: [graph_graphich.17] [17:1] Got TEvCreateCheckpointRequest 2025-06-25T14:43:40.047192Z node 2 :STREAMS_STORAGE_SERVICE WARN: storage_proxy.cpp:249: [graph_graphich.17] [17:1] Failed to create checkpoint:
: Warning: Table: local/TStorageServiceTestShouldNotCreateCheckpointWhenUnregistered/coordinators_sync, pk: graph_graphich, current generation: 0, expected/new generation: 17, operation: Check, code: 400130 2025-06-25T14:43:40.047218Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:253: [graph_graphich.17] [17:1] Send TEvCreateCheckpointResponse 2025-06-25T14:43:40.912573Z node 3 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:178: Successfully bootstrapped TStorageProxy [3:7519897693884724783:2048] with connection to localhost:27824:local 2025-06-25T14:43:40.912669Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.17] Got TEvRegisterCoordinatorRequest 2025-06-25T14:43:41.177898Z node 3 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:199: [graph_graphich.17] Graph registered 2025-06-25T14:43:41.177929Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.17] Send TEvRegisterCoordinatorResponse 2025-06-25T14:43:41.178285Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:209: [graph_graphich.17] [17:1] Got TEvCreateCheckpointRequest 2025-06-25T14:43:41.932610Z node 3 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:251: [graph_graphich.17] [17:1] Checkpoint created 2025-06-25T14:43:41.932687Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:253: [graph_graphich.17] [17:1] Send TEvCreateCheckpointResponse 2025-06-25T14:43:41.933079Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:209: [graph_graphich.17] [17:1] Got TEvCreateCheckpointRequest 2025-06-25T14:43:42.256573Z node 3 :STREAMS_STORAGE_SERVICE WARN: storage_proxy.cpp:249: [graph_graphich.17] [17:1] Failed to create checkpoint:
: Error: Constraint violated. Table: `local/TStorageServiceTestShouldNotCreateCheckpointTwice/checkpoints_metadata`., code: 2012
: Error: Conflict with existing key., code: 2012 2025-06-25T14:43:42.256601Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:253: [graph_graphich.17] [17:1] Send TEvCreateCheckpointResponse 2025-06-25T14:43:43.470389Z node 4 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:178: Successfully bootstrapped TStorageProxy [4:7519897702118727463:2048] with connection to localhost:27824:local 2025-06-25T14:43:43.470493Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.17] Got TEvRegisterCoordinatorRequest 2025-06-25T14:43:43.844544Z node 4 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:199: [graph_graphich.17] Graph registered 2025-06-25T14:43:43.844575Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.17] Send TEvRegisterCoordinatorResponse 2025-06-25T14:43:43.844880Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:261: [graph_graphich.17] [17:1] Got TEvSetCheckpointPendingCommitStatusRequest 2025-06-25T14:43:44.047759Z node 4 :STREAMS_STORAGE_SERVICE WARN: storage_proxy.cpp:274: [graph_graphich.17] [17:1] Failed to set 'PendingCommit' status:
: Warning: Failed to select checkpoint '17:1', code: 400080 2025-06-25T14:43:44.047792Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:278: [graph_graphich.17] [17:1] Send TEvSetCheckpointPendingCommitStatusResponse 2025-06-25T14:43:45.208293Z node 5 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:178: Successfully bootstrapped TStorageProxy [5:7519897712530953688:2048] with connection to localhost:27824:local 2025-06-25T14:43:45.213364Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.17] Got TEvRegisterCoordinatorRequest 2025-06-25T14:43:45.472394Z node 5 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:199: [graph_graphich.17] Graph registered 2025-06-25T14:43:45.472418Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.17] Send TEvRegisterCoordinatorResponse 2025-06-25T14:43:45.472903Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:209: [graph_graphich.17] [17:1] Got TEvCreateCheckpointRequest 2025-06-25T14:43:46.444562Z node 5 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:251: [graph_graphich.17] [17:1] Checkpoint created 2025-06-25T14:43:46.444591Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:253: [graph_graphich.17] [17:1] Send TEvCreateCheckpointResponse 2025-06-25T14:43:46.444879Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.18] Got TEvRegisterCoordinatorRequest 2025-06-25T14:43:46.647701Z node 5 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:199: [graph_graphich.18] Graph registered 2025-06-25T14:43:46.647747Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.18] Send TEvRegisterCoordinatorResponse 2025-06-25T14:43:46.648061Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:261: [graph_graphich.17] [17:1] Got TEvSetCheckpointPendingCommitStatusRequest 2025-06-25T14:43:46.779516Z node 5 :STREAMS_STORAGE_SERVICE WARN: storage_proxy.cpp:274: [graph_graphich.17] [17:1] Failed to set 'PendingCommit' status:
: Warning: Table: local/TStorageServiceTestShouldNotPendingCheckpointGenerationChanged/coordinators_sync, pk: graph_graphich, current generation: 18, expected/new generation: 17, operation: Check, code: 400130 2025-06-25T14:43:46.779546Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:278: [graph_graphich.17] [17:1] Send TEvSetCheckpointPendingCommitStatusResponse >> TStorageServiceTest::ShouldGetState [GOOD] >> TStorageServiceTest::ShouldUseGc [GOOD] >> TxUsage::WriteToTopic_Demo_27_Query [GOOD] >> TGRpcYdbTest::ExecuteQueryWithUuid [GOOD] >> TGRpcYdbTest::ExecuteQueryWithParametersBadRequest >> TCheckpointStorageTest::ShouldDeleteMarkedCheckpoints [GOOD] >> GrpcConnectionStringParserTest::CommonClientSettingsFromConnectionString [GOOD] >> LocalityOperation::LocksFromAnotherTenants+UseSink >> TGRpcClientLowTest::SimpleRequest [GOOD] >> TGRpcClientLowTest::SimpleRequestDummyService >> TTableProfileTests::UseDefaultProfile [GOOD] >> TTableProfileTests::UseTableProfilePreset >> YdbYqlClient::TraceId [GOOD] >> YdbYqlClient::Utf8DatabasePassViaHeader >> LocalPartition::WithoutPartitionPartitionRelocation [GOOD] >> LocalPartition::DirectWriteWithoutDescribeResourcesPermission >> YdbYqlClient::AlterTableAddIndexAsyncOp [GOOD] >> YdbYqlClient::AlterTableAddIndexWithDataColumn >> TxUsage::WriteToTopic_Demo_28_Table >> YdbYqlClient::TestDecimal [GOOD] >> YdbYqlClient::TestBusySession |84.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/checkpoint_storage/ut/unittest >> TCheckpointStorageTest::ShouldRetryOnExistingGraphDescId [GOOD] >> TxUsage::Sinks_Oltp_WriteToTopics_2_Table [GOOD] >> TxUsage::WriteToTopic_Demo_22_RestartAfterCommit_Table [GOOD] >> TExtSubDomainTest::CreateTableInsideAndAlterDomainAndTable-AlterDatabaseCreateHiveFirst-true [GOOD] >> TGRpcLdapAuthentication::LdapAuthWithInvalidRobouserLogin >> TGRpcYdbTest::CreateTableBadRequest [GOOD] >> TGRpcYdbTest::CreateTableBadRequest2 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/checkpoint_storage/ut/unittest >> TStorageServiceTest::ShouldNotCompleteCheckpointGenerationChanged [GOOD] Test command err: 2025-06-25T14:43:37.951348Z node 1 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:178: Successfully bootstrapped TStorageProxy [1:7519897682346806593:2048] with connection to localhost:20741:local 2025-06-25T14:43:37.951471Z node 1 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.17] Got TEvRegisterCoordinatorRequest 2025-06-25T14:43:38.814990Z node 1 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:199: [graph_graphich.17] Graph registered 2025-06-25T14:43:38.815021Z node 1 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.17] Send TEvRegisterCoordinatorResponse 2025-06-25T14:43:38.815379Z node 1 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:209: [graph_graphich.17] [17:1] Got TEvCreateCheckpointRequest 2025-06-25T14:43:39.568605Z node 1 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:251: [graph_graphich.17] [17:1] Checkpoint created 2025-06-25T14:43:39.568648Z node 1 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:253: [graph_graphich.17] [17:1] Send TEvCreateCheckpointResponse 2025-06-25T14:43:39.568986Z node 1 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.18] Got TEvRegisterCoordinatorRequest 2025-06-25T14:43:39.674687Z node 1 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:199: [graph_graphich.18] Graph registered 2025-06-25T14:43:39.674717Z node 1 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.18] Send TEvRegisterCoordinatorResponse 2025-06-25T14:43:39.674917Z node 1 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:209: [graph_graphich.17] [17:2] Got TEvCreateCheckpointRequest 2025-06-25T14:43:39.735866Z node 1 :STREAMS_STORAGE_SERVICE WARN: storage_proxy.cpp:249: [graph_graphich.17] [17:2] Failed to create checkpoint:
: Warning: Table: local/TStorageServiceTestShouldNotCreateCheckpointAfterGenerationChanged/coordinators_sync, pk: graph_graphich, current generation: 18, expected/new generation: 17, operation: Check, code: 400130 2025-06-25T14:43:39.735899Z node 1 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:253: [graph_graphich.17] [17:2] Send TEvCreateCheckpointResponse 2025-06-25T14:43:40.684521Z node 2 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:178: Successfully bootstrapped TStorageProxy [2:7519897694804002050:2048] with connection to localhost:20741:local 2025-06-25T14:43:40.684603Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.17] Got TEvRegisterCoordinatorRequest 2025-06-25T14:43:40.870954Z node 2 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:199: [graph_graphich.17] Graph registered 2025-06-25T14:43:40.871004Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.17] Send TEvRegisterCoordinatorResponse 2025-06-25T14:43:40.871267Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:286: [graph_graphich.17] [17:1] Got TEvCompleteCheckpointRequest 2025-06-25T14:43:41.093469Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:302: [graph_graphich.17] [17:1] Failed to set 'Completed' status:
: Warning: Failed to select checkpoint '17:1', code: 400080 2025-06-25T14:43:41.093501Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:311: [graph_graphich.17] [17:1] Send TEvCompleteCheckpointResponse 2025-06-25T14:43:41.796422Z node 3 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:178: Successfully bootstrapped TStorageProxy [3:7519897697987313849:2048] with connection to localhost:20741:local 2025-06-25T14:43:41.796526Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.17] Got TEvRegisterCoordinatorRequest 2025-06-25T14:43:42.001914Z node 3 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:199: [graph_graphich.17] Graph registered 2025-06-25T14:43:42.001944Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.17] Send TEvRegisterCoordinatorResponse 2025-06-25T14:43:42.004885Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:319: [graph_graphich.17] [17:1] Got TEvAbortCheckpointRequest 2025-06-25T14:43:42.184396Z node 3 :STREAMS_STORAGE_SERVICE WARN: storage_proxy.cpp:331: [graph_graphich.17] [17:1] Failed to abort checkpoint:
: Warning: Failed to select checkpoint '17:1', code: 400080 2025-06-25T14:43:42.184447Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:335: [graph_graphich.17] [17:1] Send TEvAbortCheckpointResponse 2025-06-25T14:43:43.631999Z node 4 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:178: Successfully bootstrapped TStorageProxy [4:7519897703303978003:2048] with connection to localhost:20741:local 2025-06-25T14:43:43.632284Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.17] Got TEvRegisterCoordinatorRequest 2025-06-25T14:43:43.901007Z node 4 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:199: [graph_graphich.17] Graph registered 2025-06-25T14:43:43.901038Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.17] Send TEvRegisterCoordinatorResponse 2025-06-25T14:43:43.901788Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:209: [graph_graphich.17] [17:1] Got TEvCreateCheckpointRequest 2025-06-25T14:43:44.774636Z node 4 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:251: [graph_graphich.17] [17:1] Checkpoint created 2025-06-25T14:43:44.774677Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:253: [graph_graphich.17] [17:1] Send TEvCreateCheckpointResponse 2025-06-25T14:43:44.774994Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:286: [graph_graphich.17] [17:1] Got TEvCompleteCheckpointRequest 2025-06-25T14:43:45.146272Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:302: [graph_graphich.17] [17:1] Failed to set 'Completed' status:
: Warning: Selected checkpoint '17:1' with status Pending, while expected PendingCommit, code: 400080 2025-06-25T14:43:45.146308Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:311: [graph_graphich.17] [17:1] Send TEvCompleteCheckpointResponse 2025-06-25T14:43:46.272420Z node 5 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:178: Successfully bootstrapped TStorageProxy [5:7519897715411501419:2048] with connection to localhost:20741:local 2025-06-25T14:43:46.272523Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.17] Got TEvRegisterCoordinatorRequest 2025-06-25T14:43:46.501226Z node 5 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:199: [graph_graphich.17] Graph registered 2025-06-25T14:43:46.501253Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.17] Send TEvRegisterCoordinatorResponse 2025-06-25T14:43:46.501791Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:209: [graph_graphich.17] [17:1] Got TEvCreateCheckpointRequest 2025-06-25T14:43:47.604600Z node 5 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:251: [graph_graphich.17] [17:1] Checkpoint created 2025-06-25T14:43:47.604632Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:253: [graph_graphich.17] [17:1] Send TEvCreateCheckpointResponse 2025-06-25T14:43:47.606164Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:261: [graph_graphich.17] [17:1] Got TEvSetCheckpointPendingCommitStatusRequest 2025-06-25T14:43:48.259041Z node 5 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:276: [graph_graphich.17] [17:1] Status updated to 'PendingCommit' 2025-06-25T14:43:48.259078Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:278: [graph_graphich.17] [17:1] Send TEvSetCheckpointPendingCommitStatusResponse 2025-06-25T14:43:48.267226Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.18] Got TEvRegisterCoordinatorRequest 2025-06-25T14:43:48.366727Z node 5 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:199: [graph_graphich.18] Graph registered 2025-06-25T14:43:48.366754Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.18] Send TEvRegisterCoordinatorResponse 2025-06-25T14:43:48.372410Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:286: [graph_graphich.17] [17:1] Got TEvCompleteCheckpointRequest 2025-06-25T14:43:48.548472Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:302: [graph_graphich.17] [17:1] Failed to set 'Completed' status:
: Warning: Table: local/TStorageServiceTestShouldNotPendingCheckpointGenerationChanged/coordinators_sync, pk: graph_graphich, current generation: 18, expected/new generation: 17, operation: Check, code: 400130 2025-06-25T14:43:48.548506Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:311: [graph_graphich.17] [17:1] Send TEvCompleteCheckpointResponse >> TYqlDateTimeTests::DatetimeKey [GOOD] >> TYqlDateTimeTests::TimestampKey >> TxUsage::Sinks_Oltp_WriteToTopics_2_Query >> TGcTest::ShouldIgnoreIncrementCheckpoint [GOOD] >> TStateStorageTest::ShouldCountStates >> TTableProfileTests::ExplicitPartitionsUnordered [GOOD] >> TTableProfileTests::ExplicitPartitionsComplex >> TxUsage::WriteToTopic_Demo_38_Table [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_ext_tenant/unittest >> TExtSubDomainTest::CreateTableInsideAndAlterDomainAndTable-AlterDatabaseCreateHiveFirst-true [GOOD] Test command err: 2025-06-25T14:43:32.096163Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897660201174828:2148];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:43:32.096495Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000a35/r3tmp/tmpuMnsu5/pdisk_1.dat 2025-06-25T14:43:32.633566Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:43:32.633683Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:43:32.650466Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:43:32.658746Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:26766 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-25T14:43:32.873181Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7519897660201174940:2118] Handle TEvNavigate describe path dc-1 2025-06-25T14:43:32.892547Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7519897660201175422:2449] HANDLE EvNavigateScheme dc-1 2025-06-25T14:43:32.892673Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7519897660201174993:2140], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:43:32.892703Z node 1 :TX_PROXY_SCHEME_CACHE TRACE: cache.cpp:2321: Create subscriber: self# [1:7519897660201174993:2140], path# /dc-1, domainOwnerId# 72057594046644480 2025-06-25T14:43:32.892899Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1003: [main][1:7519897660201175423:2450][/dc-1] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-25T14:43:32.894680Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519897655906207360:2050] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7519897660201175427:2450] 2025-06-25T14:43:32.894739Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519897655906207360:2050] Subscribe: subscriber# [1:7519897660201175427:2450], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-25T14:43:32.894802Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519897655906207363:2053] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7519897660201175428:2450] 2025-06-25T14:43:32.894830Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519897655906207363:2053] Subscribe: subscriber# [1:7519897660201175428:2450], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-25T14:43:32.894851Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519897655906207366:2056] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7519897660201175429:2450] 2025-06-25T14:43:32.894864Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519897655906207366:2056] Subscribe: subscriber# [1:7519897660201175429:2450], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-25T14:43:32.894916Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519897660201175427:2450][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519897655906207360:2050] 2025-06-25T14:43:32.894938Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519897660201175428:2450][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519897655906207363:2053] 2025-06-25T14:43:32.894955Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519897660201175429:2450][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519897655906207366:2056] 2025-06-25T14:43:32.895008Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519897660201175423:2450][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519897660201175424:2450] 2025-06-25T14:43:32.895079Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519897660201175423:2450][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519897660201175425:2450] 2025-06-25T14:43:32.895125Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:852: [main][1:7519897660201175423:2450][/dc-1] Set up state: owner# [1:7519897660201174993:2140], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-25T14:43:32.895231Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519897660201175423:2450][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519897660201175426:2450] 2025-06-25T14:43:32.895272Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:870: [main][1:7519897660201175423:2450][/dc-1] Path was already updated: owner# [1:7519897660201174993:2140], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-25T14:43:32.895304Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519897660201175427:2450][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519897660201175424:2450], cookie# 1 2025-06-25T14:43:32.895318Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519897660201175428:2450][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519897660201175425:2450], cookie# 1 2025-06-25T14:43:32.895348Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519897660201175429:2450][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519897660201175426:2450], cookie# 1 2025-06-25T14:43:32.895373Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519897655906207360:2050] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7519897660201175427:2450] 2025-06-25T14:43:32.895397Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519897655906207360:2050] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519897660201175427:2450], cookie# 1 2025-06-25T14:43:32.895415Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519897655906207363:2053] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7519897660201175428:2450] 2025-06-25T14:43:32.895426Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519897655906207363:2053] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519897660201175428:2450], cookie# 1 2025-06-25T14:43:32.895440Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519897655906207366:2056] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7519897660201175429:2450] 2025-06-25T14:43:32.895451Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519897655906207366:2056] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519897660201175429:2450], cookie# 1 2025-06-25T14:43:32.896376Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519897660201175427:2450][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519897655906207360:2050], cookie# 1 2025-06-25T14:43:32.896395Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519897660201175428:2450][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519897655906207363:2053], cookie# 1 2025-06-25T14:43:32.896466Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519897660201175429:2450][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519897655906207366:2056], cookie# 1 2025-06-25T14:43:32.896507Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519897660201175423:2450][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519897660201175424:2450], cookie# 1 2025-06-25T14:43:32.896545Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519897660201175423:2450][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2025-06-25T14:43:32.896562Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519897660201175423:2450][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519897660201175425:2450], cookie# 1 2025-06-25T14:43:32.896573Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519897660201175423:2450][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 2, failures# 0 2025-06-25T14:43:32.896586Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519897660201175423:2450][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519897660201175426:2450], cookie# 1 2025-06-25T14:43:32.896605Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:984: [main][1:7519897660201175423:2450][/dc-1] Sync is done in the ring group: cookie# 1, ring group# 0, size# 3, half# 1, successes# 3, failures# 0, partial# 0 2025-06-25T14:43:32.946977Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [1:7519897660201174993:2140], notify# NKikimr::TSchemeBoardEvents::TEvNotifyUpdate { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DescribeSchemeResult: Status: StatusSuccess Path: "/dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 Data ... 480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:43:49.744869Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7519897732592752066:4037], recipient# [3:7519897732592752065:2310], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:43:49.780652Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7519897698233011233:2130], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:43:49.780791Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7519897698233011233:2130], cacheItem# { Subscriber: { Subscriber: [3:7519897702527979317:2687] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:43:49.780890Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7519897732592752068:4038], recipient# [3:7519897732592752067:2311], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:43:50.558137Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7519897698233011233:2130], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:43:50.558270Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7519897698233011233:2130], cacheItem# { Subscriber: { Subscriber: [3:7519897715412881834:3185] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:43:50.558357Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7519897736887719384:4042], recipient# [3:7519897736887719383:2312], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:43:50.748960Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7519897698233011233:2130], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:43:50.749108Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7519897698233011233:2130], cacheItem# { Subscriber: { Subscriber: [3:7519897702527979317:2687] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:43:50.749191Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7519897736887719386:4043], recipient# [3:7519897736887719385:2313], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:43:50.784833Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7519897698233011233:2130], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:43:50.784990Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7519897698233011233:2130], cacheItem# { Subscriber: { Subscriber: [3:7519897702527979317:2687] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:43:50.785087Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7519897736887719388:4044], recipient# [3:7519897736887719387:2314], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:43:51.559955Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7519897698233011233:2130], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:43:51.560056Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7519897698233011233:2130], cacheItem# { Subscriber: { Subscriber: [3:7519897715412881834:3185] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:43:51.560147Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7519897741182686704:4048], recipient# [3:7519897741182686703:2315], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:43:51.752043Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7519897698233011233:2130], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:43:51.752169Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7519897698233011233:2130], cacheItem# { Subscriber: { Subscriber: [3:7519897702527979317:2687] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:43:51.752246Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7519897741182686707:4049], recipient# [3:7519897741182686706:2316], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } >> ClientStatsCollector::PrepareQuery >> YdbYqlClient::TestReadTableMultiShardWholeTableUseSnapshot [GOOD] >> YdbYqlClient::TestReadTableMultiShardWithDescribe ------- [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/checkpoint_storage/ut/unittest >> TStorageServiceTest::ShouldGetState [GOOD] Test command err: 2025-06-25T14:43:39.582567Z node 1 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:178: Successfully bootstrapped TStorageProxy [1:7519897688798942696:2048] with connection to localhost:26541:local 2025-06-25T14:43:39.582665Z node 1 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.17] Got TEvRegisterCoordinatorRequest 2025-06-25T14:43:39.767404Z node 1 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:199: [graph_graphich.17] Graph registered 2025-06-25T14:43:39.767428Z node 1 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.17] Send TEvRegisterCoordinatorResponse 2025-06-25T14:43:39.768351Z node 1 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:209: [graph_graphich.17] [17:1] Got TEvCreateCheckpointRequest 2025-06-25T14:43:40.478000Z node 1 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:251: [graph_graphich.17] [17:1] Checkpoint created 2025-06-25T14:43:40.478048Z node 1 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:253: [graph_graphich.17] [17:1] Send TEvCreateCheckpointResponse 2025-06-25T14:43:41.189003Z node 2 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:178: Successfully bootstrapped TStorageProxy [2:7519897696434150828:2048] with connection to localhost:26541:local 2025-06-25T14:43:41.189103Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.17] Got TEvRegisterCoordinatorRequest 2025-06-25T14:43:41.449329Z node 2 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:199: [graph_graphich.17] Graph registered 2025-06-25T14:43:41.449362Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.17] Send TEvRegisterCoordinatorResponse 2025-06-25T14:43:41.449968Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:209: [graph_graphich.17] [17:1] Got TEvCreateCheckpointRequest 2025-06-25T14:43:42.225531Z node 2 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:251: [graph_graphich.17] [17:1] Checkpoint created 2025-06-25T14:43:42.225561Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:253: [graph_graphich.17] [17:1] Send TEvCreateCheckpointResponse 2025-06-25T14:43:42.226023Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:209: [graph_graphich.17] [17:2] Got TEvCreateCheckpointRequest 2025-06-25T14:43:42.458934Z node 2 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:251: [graph_graphich.17] [17:2] Checkpoint created 2025-06-25T14:43:42.458972Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:253: [graph_graphich.17] [17:2] Send TEvCreateCheckpointResponse 2025-06-25T14:43:42.459354Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:209: [graph_graphich.17] [17:3] Got TEvCreateCheckpointRequest 2025-06-25T14:43:42.824667Z node 2 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:251: [graph_graphich.17] [17:3] Checkpoint created 2025-06-25T14:43:42.824697Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:253: [graph_graphich.17] [17:3] Send TEvCreateCheckpointResponse 2025-06-25T14:43:42.825644Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:343: [graph_graphich] Got TEvGetCheckpointsMetadataRequest 2025-06-25T14:43:43.098070Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:356: [graph_graphich] Send TEvGetCheckpointsMetadataResponse 2025-06-25T14:43:44.150983Z node 3 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:178: Successfully bootstrapped TStorageProxy [3:7519897705654519677:2048] with connection to localhost:26541:local 2025-06-25T14:43:44.151060Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.17] Got TEvRegisterCoordinatorRequest 2025-06-25T14:43:44.432277Z node 3 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:199: [graph_graphich.17] Graph registered 2025-06-25T14:43:44.432329Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.17] Send TEvRegisterCoordinatorResponse 2025-06-25T14:43:44.432562Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:209: [graph_graphich.17] [17:1] Got TEvCreateCheckpointRequest 2025-06-25T14:43:45.373046Z node 3 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:251: [graph_graphich.17] [17:1] Checkpoint created 2025-06-25T14:43:45.373077Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:253: [graph_graphich.17] [17:1] Send TEvCreateCheckpointResponse 2025-06-25T14:43:45.373581Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:261: [graph_graphich.17] [17:1] Got TEvSetCheckpointPendingCommitStatusRequest 2025-06-25T14:43:45.813364Z node 3 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:276: [graph_graphich.17] [17:1] Status updated to 'PendingCommit' 2025-06-25T14:43:45.813398Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:278: [graph_graphich.17] [17:1] Send TEvSetCheckpointPendingCommitStatusResponse 2025-06-25T14:43:45.813678Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:209: [graph_graphich.17] [17:2] Got TEvCreateCheckpointRequest 2025-06-25T14:43:46.025750Z node 3 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:251: [graph_graphich.17] [17:2] Checkpoint created 2025-06-25T14:43:46.025781Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:253: [graph_graphich.17] [17:2] Send TEvCreateCheckpointResponse 2025-06-25T14:43:46.026097Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:261: [graph_graphich.17] [17:2] Got TEvSetCheckpointPendingCommitStatusRequest 2025-06-25T14:43:46.237600Z node 3 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:276: [graph_graphich.17] [17:2] Status updated to 'PendingCommit' 2025-06-25T14:43:46.237627Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:278: [graph_graphich.17] [17:2] Send TEvSetCheckpointPendingCommitStatusResponse 2025-06-25T14:43:46.237926Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:286: [graph_graphich.17] [17:2] Got TEvCompleteCheckpointRequest 2025-06-25T14:43:46.502596Z node 3 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:304: [graph_graphich.17] [17:2] Status updated to 'Completed' 2025-06-25T14:43:46.502635Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:311: [graph_graphich.17] [17:2] Send TEvCompleteCheckpointResponse 2025-06-25T14:43:46.507849Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:319: [graph_graphich.17] [17:1] Got TEvAbortCheckpointRequest 2025-06-25T14:43:46.858097Z node 3 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:333: [graph_graphich.17] [17:1] Checkpoint aborted 2025-06-25T14:43:46.858125Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:335: [graph_graphich.17] [17:1] Send TEvAbortCheckpointResponse 2025-06-25T14:43:46.859035Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:319: [graph_graphich.17] [17:2] Got TEvAbortCheckpointRequest 2025-06-25T14:43:47.164041Z node 3 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:333: [graph_graphich.17] [17:2] Checkpoint aborted 2025-06-25T14:43:47.164078Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:335: [graph_graphich.17] [17:2] Send TEvAbortCheckpointResponse 2025-06-25T14:43:47.164442Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:343: [graph_graphich] Got TEvGetCheckpointsMetadataRequest 2025-06-25T14:43:47.395406Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:356: [graph_graphich] Send TEvGetCheckpointsMetadataResponse 2025-06-25T14:43:48.453304Z node 4 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:178: Successfully bootstrapped TStorageProxy [4:7519897727045468223:2048] with connection to localhost:26541:local 2025-06-25T14:43:48.453396Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.17] Got TEvRegisterCoordinatorRequest 2025-06-25T14:43:48.716568Z node 4 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:199: [graph_graphich.17] Graph registered 2025-06-25T14:43:48.716599Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.17] Send TEvRegisterCoordinatorResponse 2025-06-25T14:43:48.716954Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:209: [graph_graphich.17] [17:1] Got TEvCreateCheckpointRequest 2025-06-25T14:43:49.564779Z node 4 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:251: [graph_graphich.17] [17:1] Checkpoint created 2025-06-25T14:43:49.564814Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:253: [graph_graphich.17] [17:1] Send TEvCreateCheckpointResponse 2025-06-25T14:43:49.565145Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:365: [graph_graphich] [17:1] Got TEvSaveTaskState: task 1317 2025-06-25T14:43:49.751007Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:389: [graph_graphich] [17:1] TEvSaveTaskState Apply: task: 1317 2025-06-25T14:43:49.751053Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:404: [graph_graphich] [17:1] Send TEvSaveTaskStateResult: task: 1317 2025-06-25T14:43:49.751759Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:413: [graph_graphich] [17:1] Got TEvGetTaskState: tasks {1317} 2025-06-25T14:43:49.751796Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: ydb_state_storage.cpp:532: [graph_graphich] [17:1] GetState, tasks: 1317 2025-06-25T14:43:50.365617Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: ydb_state_storage.cpp:667: [graph_graphich] [17:1] ListOfStates results: 2025-06-25T14:43:50.365711Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: ydb_state_storage.cpp:688: [graph_graphich] [17:1] taskId 1317 checkpoint id: 17:1, rows count: 1 2025-06-25T14:43:50.365753Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: ydb_state_storage.cpp:920: [graph_graphich] [17:1] SkipStatesInFuture, skip 0 checkpoints 2025-06-25T14:43:50.367361Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: ydb_state_storage.cpp:812: [graph_graphich] [17:1] SelectState: task_id 1317, seq_no 1, blob_seq_num 0 2025-06-25T14:43:50.811420Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: ydb_state_storage.cpp:423: [graph_graphich] [17:1] DeserializeState, task id 1317, blob size 49 2025-06-25T14:43:50.811482Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: ydb_state_storage.cpp:979: [graph_graphich] [17:1] ApplyIncrements 2025-06-25T14:43:50.824363Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:432: [graph_graphich] [{ Id: 1 Generation: 17 }] Send TEvGetTaskStateResult: tasks: {1317} >> TGRpcNewCoordinationClientAuth::OwnersAndPermissions [GOOD] >> TGRpcYdbTest::AlterTableAddIndexBadRequest >> YdbYqlClient::TestReadTableOneBatch >> TxUsage::WriteToTopic_Demo_38_Query |84.8%| [TA] $(B)/ydb/core/tx/tx_proxy/ut_ext_tenant/test-results/unittest/{meta.json ... results_accumulator.log} |84.8%| [TA] {RESULT} $(B)/ydb/core/tx/tx_proxy/ut_ext_tenant/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/checkpoint_storage/ut/unittest >> TStorageServiceTest::ShouldUseGc [GOOD] Test command err: 2025-06-25T14:43:37.951589Z node 1 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:178: Successfully bootstrapped TStorageProxy [1:7519897681120863399:2048] with connection to localhost:10280:local 2025-06-25T14:43:37.951682Z node 1 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.17] Got TEvRegisterCoordinatorRequest 2025-06-25T14:43:38.708201Z node 1 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:199: [graph_graphich.17] Graph registered 2025-06-25T14:43:38.708230Z node 1 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.17] Send TEvRegisterCoordinatorResponse 2025-06-25T14:43:39.384420Z node 2 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:178: Successfully bootstrapped TStorageProxy [2:7519897688745907748:2048] with connection to localhost:10280:local 2025-06-25T14:43:39.385291Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.17] Got TEvRegisterCoordinatorRequest 2025-06-25T14:43:39.600593Z node 2 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:199: [graph_graphich.17] Graph registered 2025-06-25T14:43:39.600623Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.17] Send TEvRegisterCoordinatorResponse 2025-06-25T14:43:39.601117Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.18] Got TEvRegisterCoordinatorRequest 2025-06-25T14:43:39.678589Z node 2 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:199: [graph_graphich.18] Graph registered 2025-06-25T14:43:39.678618Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.18] Send TEvRegisterCoordinatorResponse 2025-06-25T14:43:39.678883Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.17] Got TEvRegisterCoordinatorRequest 2025-06-25T14:43:39.726099Z node 2 :STREAMS_STORAGE_SERVICE WARN: storage_proxy.cpp:197: [graph_graphich.17] Failed to register graph:
: Warning: Table: local/TStorageServiceTestShouldRegisterNextGeneration/coordinators_sync, pk: graph_graphich, current generation: 18, expected/new generation: 17, operation: RegisterCheck, code: 400130 2025-06-25T14:43:39.726117Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.17] Send TEvRegisterCoordinatorResponse 2025-06-25T14:43:40.453635Z node 3 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:178: Successfully bootstrapped TStorageProxy [3:7519897696113841110:2048] with connection to localhost:10280:local 2025-06-25T14:43:40.453787Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.17] Got TEvRegisterCoordinatorRequest 2025-06-25T14:43:40.638441Z node 3 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:199: [graph_graphich.17] Graph registered 2025-06-25T14:43:40.638473Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.17] Send TEvRegisterCoordinatorResponse 2025-06-25T14:43:40.638833Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:209: [graph_graphich.17] [17:1] Got TEvCreateCheckpointRequest 2025-06-25T14:43:41.435362Z node 3 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:251: [graph_graphich.17] [17:1] Checkpoint created 2025-06-25T14:43:41.435396Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:253: [graph_graphich.17] [17:1] Send TEvCreateCheckpointResponse 2025-06-25T14:43:41.435661Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:261: [graph_graphich.17] [17:1] Got TEvSetCheckpointPendingCommitStatusRequest 2025-06-25T14:43:41.783940Z node 3 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:276: [graph_graphich.17] [17:1] Status updated to 'PendingCommit' 2025-06-25T14:43:41.783975Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:278: [graph_graphich.17] [17:1] Send TEvSetCheckpointPendingCommitStatusResponse 2025-06-25T14:43:41.784357Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:209: [graph_graphich.17] [17:2] Got TEvCreateCheckpointRequest 2025-06-25T14:43:41.988273Z node 3 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:251: [graph_graphich.17] [17:2] Checkpoint created 2025-06-25T14:43:41.988332Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:253: [graph_graphich.17] [17:2] Send TEvCreateCheckpointResponse 2025-06-25T14:43:41.988811Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:261: [graph_graphich.17] [17:2] Got TEvSetCheckpointPendingCommitStatusRequest 2025-06-25T14:43:42.128837Z node 3 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:276: [graph_graphich.17] [17:2] Status updated to 'PendingCommit' 2025-06-25T14:43:42.128861Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:278: [graph_graphich.17] [17:2] Send TEvSetCheckpointPendingCommitStatusResponse 2025-06-25T14:43:42.129200Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:286: [graph_graphich.17] [17:2] Got TEvCompleteCheckpointRequest 2025-06-25T14:43:42.332515Z node 3 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:304: [graph_graphich.17] [17:2] Status updated to 'Completed' 2025-06-25T14:43:42.332545Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:311: [graph_graphich.17] [17:2] Send TEvCompleteCheckpointResponse 2025-06-25T14:43:42.344485Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:343: [graph_graphich] Got TEvGetCheckpointsMetadataRequest 2025-06-25T14:43:42.662180Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:356: [graph_graphich] Send TEvGetCheckpointsMetadataResponse 2025-06-25T14:43:43.631799Z node 4 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:178: Successfully bootstrapped TStorageProxy [4:7519897709299299201:2048] with connection to localhost:10280:local 2025-06-25T14:43:43.631892Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.17] Got TEvRegisterCoordinatorRequest 2025-06-25T14:43:43.880115Z node 4 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:199: [graph_graphich.17] Graph registered 2025-06-25T14:43:43.880149Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.17] Send TEvRegisterCoordinatorResponse 2025-06-25T14:43:43.904615Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:209: [graph_graphich.17] [17:1] Got TEvCreateCheckpointRequest 2025-06-25T14:43:44.661028Z node 4 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:251: [graph_graphich.17] [17:1] Checkpoint created 2025-06-25T14:43:44.661061Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:253: [graph_graphich.17] [17:1] Send TEvCreateCheckpointResponse 2025-06-25T14:43:44.662356Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:365: [graph_graphich] [17:1] Got TEvSaveTaskState: task 1317 2025-06-25T14:43:44.884598Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:389: [graph_graphich] [17:1] TEvSaveTaskState Apply: task: 1317 2025-06-25T14:43:44.888415Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:404: [graph_graphich] [17:1] Send TEvSaveTaskStateResult: task: 1317 2025-06-25T14:43:46.020476Z node 5 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:178: Successfully bootstrapped TStorageProxy [5:7519897718019125686:2048] with connection to localhost:10280:local 2025-06-25T14:43:46.020533Z node 5 :STREAMS_STORAGE_SERVICE INFO: gc.cpp:83: Successfully bootstrapped storage GC [5:7519897722314093091:2132] 2025-06-25T14:43:46.020584Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.17] Got TEvRegisterCoordinatorRequest 2025-06-25T14:43:46.278780Z node 5 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:199: [graph_graphich.17] Graph registered 2025-06-25T14:43:46.278808Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.17] Send TEvRegisterCoordinatorResponse 2025-06-25T14:43:46.317130Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:209: [graph_graphich.17] [17:1] Got TEvCreateCheckpointRequest 2025-06-25T14:43:47.252548Z node 5 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:251: [graph_graphich.17] [17:1] Checkpoint created 2025-06-25T14:43:47.252582Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:253: [graph_graphich.17] [17:1] Send TEvCreateCheckpointResponse 2025-06-25T14:43:47.252892Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:261: [graph_graphich.17] [17:1] Got TEvSetCheckpointPendingCommitStatusRequest 2025-06-25T14:43:47.616586Z node 5 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:276: [graph_graphich.17] [17:1] Status updated to 'PendingCommit' 2025-06-25T14:43:47.616632Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:278: [graph_graphich.17] [17:1] Send TEvSetCheckpointPendingCommitStatusResponse 2025-06-25T14:43:47.616999Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:286: [graph_graphich.17] [17:1] Got TEvCompleteCheckpointRequest 2025-06-25T14:43:47.936557Z node 5 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:304: [graph_graphich.17] [17:1] Status updated to 'Completed' 2025-06-25T14:43:47.936596Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:307: [graph_graphich.17] [17:1] Send TEvNewCheckpointSucceeded 2025-06-25T14:43:47.936617Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:311: [graph_graphich.17] [17:1] Send TEvCompleteCheckpointResponse 2025-06-25T14:43:47.936748Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: gc.cpp:93: GC received upperbound checkpoint 17:1 for graph 'graph_graphich' 2025-06-25T14:43:47.937063Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:209: [graph_graphich.17] [17:2] Got TEvCreateCheckpointRequest 2025-06-25T14:43:48.132427Z node 5 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:251: [graph_graphich.17] [17:2] Checkpoint created 2025-06-25T14:43:48.132459Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:253: [graph_graphich.17] [17:2] Send TEvCreateCheckpointResponse 2025-06-25T14:43:48.136983Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:261: [graph_graphich.17] [17:2] Got TEvSetCheckpointPendingCommitStatusRequest 2025-06-25T14:43:48.188566Z node 5 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:276: [graph_graphich.17] [17:2] Status updated to 'PendingCommit' 2025-06-25T14:43:48.188607Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:278: [graph_graphich.17] [17:2] Send TEvSetCheckpointPendingCommitStatusResponse 2025-06-25T14:43:48.192770Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:286: [graph_graphich.17] [17:2] Got TEvCompleteCheckpointRequest 2025-06-25T14:43:48.248582Z node 5 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:304: [graph_graphich.17] [17:2] Status updated to 'Completed' 2025-06-25T14:43:48.248629Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:307: [graph_graphich.17] [17:2] Send TEvNewCheckpointSucceeded 2025-06-25T14:43:48.248656Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:311: [graph_graphich.17] [17:2] Send TEvCompleteCheckpointResponse 2025-06-25T14:43:48.248795Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: gc.cpp:93: GC received upperbound checkpoint 17:2 for graph 'graph_graphich' 2025-06-25T14:43:48.251619Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:209: [graph_graphich.17] [17:3] Got TEvCreateCheckpointRequest 2025-06-25T14:43:48.345457Z node 5 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:251: [graph_graphich.17] [17:3] Checkpoint created 2025-06-25T14:43:48.345495Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:253: [graph_graphich.17] [17:3] Send TEvCreateCheckpointResponse 2025-06-25T14:43:48.345852Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:261: [graph_graphich.17] [17:3] Got TEvSetCheckpointPendingCommitStatusRequest 2025-06-25T14:43:48.395835Z node 5 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:276: [graph_graphich.17] [17:3] Status updated to 'PendingCommit' 2025-06-25T14:43:48.395873Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:278: [graph_graphich.17] [17:3] Send TEvSetCheckpointPendingCommitStatusResponse 2025-06-25T14:43:48.396711Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:286: [graph_graphich.17] [17:3] Got TEvCompleteCheckpointRequest 2025-06-25T14:43:48.460628Z node 5 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:304: [graph_graphich.17] [17:3] Status updated to 'Completed' 2025-06-25T14:43:48.460667Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:307: [graph_graphich.17] [17:3] Send TEvNewCheckpointSucceeded 2025-06-25T14:43:48.460694Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:311: [graph_graphich.17] [17:3] Send TEvCompleteCheckpointResponse 2025-06-25T14:43:48.460797Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: gc.cpp:93: GC received upperbound checkpoint 17:3 for graph 'graph_graphich' 2025-06-25T14:43:48.461083Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:343: [graph_graphich] Got TEvGetCheckpointsMetadataRequest 2025-06-25T14:43:48.608113Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: gc.cpp:170: GC deleted checkpoints of graph 'graph_graphich' up to 17:1 2025-06-25T14:43:48.612883Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: gc.cpp:170: GC deleted checkpoints of graph 'graph_graphich' up to 17:3 2025-06-25T14:43:48.622074Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: gc.cpp:170: GC deleted checkpoints of graph 'graph_graphich' up to 17:2 2025-06-25T14:43:48.676144Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:356: [graph_graphich] Send TEvGetCheckpointsMetadataResponse 2025-06-25T14:43:48.776817Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:343: [graph_graphich] Got TEvGetCheckpointsMetadataRequest 2025-06-25T14:43:48.801901Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:356: [graph_graphich] Send TEvGetCheckpointsMetadataResponse 2025-06-25T14:43:48.908412Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:343: [graph_graphich] Got TEvGetCheckpointsMetadataRequest 2025-06-25T14:43:48.927592Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:356: [graph_graphich] Send TEvGetCheckpointsMetadataResponse 2025-06-25T14:43:49.036610Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:343: [graph_graphich] Got TEvGetCheckpointsMetadataRequest 2025-06-25T14:43:49.059457Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:356: [graph_graphich] Send TEvGetCheckpointsMetadataResponse 2025-06-25T14:43:49.168413Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:343: [graph_graphich] Got TEvGetCheckpointsMetadataRequest 2025-06-25T14:43:49.183971Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:356: [graph_graphich] Send TEvGetCheckpointsMetadataResponse 2025-06-25T14:43:49.285417Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:343: [graph_graphich] Got TEvGetCheckpointsMetadataRequest 2025-06-25T14:43:49.316818Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:356: [graph_graphich] Send TEvGetCheckpointsMetadataResponse 2025-06-25T14:43:49.420485Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:343: [graph_graphich] Got TEvGetCheckpointsMetadataRequest 2025-06-25T14:43:49.435651Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:356: [graph_graphich] Send TEvGetCheckpointsMetadataResponse 2025-06-25T14:43:49.536505Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:343: [graph_graphich] Got TEvGetCheckpointsMetadataRequest 2025-06-25T14:43:49.556808Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:356: [graph_graphich] Send TEvGetCheckpointsMetadataResponse 2025-06-25T14:43:49.657787Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:343: [graph_graphich] Got TEvGetCheckpointsMetadataRequest 2025-06-25T14:43:49.676876Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:356: [graph_graphich] Send TEvGetCheckpointsMetadataResponse 2025-06-25T14:43:49.780576Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:343: [graph_graphich] Got TEvGetCheckpointsMetadataRequest 2025-06-25T14:43:49.804726Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:356: [graph_graphich] Send TEvGetCheckpointsMetadataResponse 2025-06-25T14:43:49.905445Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:343: [graph_graphich] Got TEvGetCheckpointsMetadataRequest 2025-06-25T14:43:49.925291Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:356: [graph_graphich] Send TEvGetCheckpointsMetadataResponse 2025-06-25T14:43:50.025909Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:343: [graph_graphich] Got TEvGetCheckpointsMetadataRequest 2025-06-25T14:43:50.042853Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:356: [graph_graphich] Send TEvGetCheckpointsMetadataResponse 2025-06-25T14:43:50.148596Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:343: [graph_graphich] Got TEvGetCheckpointsMetadataRequest 2025-06-25T14:43:50.167074Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:356: [graph_graphich] Send TEvGetCheckpointsMetadataResponse 2025-06-25T14:43:50.268424Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:343: [graph_graphich] Got TEvGetCheckpointsMetadataRequest 2025-06-25T14:43:50.300853Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:356: [graph_graphich] Send TEvGetCheckpointsMetadataResponse 2025-06-25T14:43:50.404538Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:343: [graph_graphich] Got TEvGetCheckpointsMetadataRequest 2025-06-25T14:43:50.457706Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:356: [graph_graphich] Send TEvGetCheckpointsMetadataResponse 2025-06-25T14:43:50.558622Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:343: [graph_graphich] Got TEvGetCheckpointsMetadataRequest 2025-06-25T14:43:50.655401Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:356: [graph_graphich] Send TEvGetCheckpointsMetadataResponse |84.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/checkpoint_storage/ut/unittest >> TCheckpointStorageTest::ShouldDeleteMarkedCheckpoints [GOOD] >> TxUsage::WriteToTopic_Demo_13_Query [GOOD] >> TxUsage::WriteToTopic_Demo_22_RestartAfterCommit_Query >> TStateStorageTest::ShouldCountStates [GOOD] >> TStateStorageTest::ShouldCountStatesNonExistentCheckpoint >> YdbYqlClient::SecurityTokenAuth [GOOD] >> YdbYqlClient::RetryOperationTemplate >> TRegisterNodeOverLegacyService::ServerWithCertVerification_ClientWithCorrectCerts_AccessDenied >> TGRpcClientLowTest::SimpleRequestDummyService [GOOD] >> TGRpcClientLowTest::MultipleSimpleRequests >> TxUsage::WriteToTopic_Demo_14_Table >> TGRpcYdbTest::ExecuteQueryWithParametersBadRequest [GOOD] >> TGRpcYdbTest::ExecuteQueryWithParametersExplicitSession >> YdbYqlClient::Utf8DatabasePassViaHeader [GOOD] >> YdbYqlClient::TestYqlTypesFromPreparedQuery >> TRegisterNodeOverDiscoveryService::ServerWithCertVerification_ClientWithCorrectCerts_EmptyAllowedSids >> YdbYqlClient::TestBusySession [GOOD] >> YdbYqlClient::TestConstraintViolation >> YdbMonitoring::SelfCheck >> TGRpcYdbTest::RemoveNotExistedDirectory >> TGRpcYdbTest::CreateTableBadRequest2 [GOOD] >> TGRpcYdbTest::CreateTableBadRequest3 >> TStateStorageTest::ShouldCountStatesNonExistentCheckpoint [GOOD] >> TxUsage::WriteToTopic_Demo_19_RestartBeforeCommit_Table [GOOD] >> TGRpcLdapAuthentication::LdapAuthWithInvalidRobouserLogin [GOOD] >> TGRpcLdapAuthentication::LdapAuthWithInvalidRobouserPassword >> YdbYqlClient::AlterTableAddIndexWithDataColumn [GOOD] >> YdbYqlClient::CheckDefaultTableSettings1 >> TxUsage::WriteToTopic_Demo_19_RestartBeforeCommit_Query >> TTableProfileTests::UseTableProfilePreset [GOOD] >> TTableProfileTests::UseTableProfilePresetViaSdk >> TYqlDateTimeTests::TimestampKey [GOOD] >> TYqlDateTimeTests::IntervalKey >> ClientStatsCollector::PrepareQuery [GOOD] >> ClientStatsCollector::CounterCacheMiss >> LocalityOperation::LocksFromAnotherTenants+UseSink [GOOD] >> LocalityOperation::LocksFromAnotherTenants-UseSink >> TGRpcYdbTest::AlterTableAddIndexBadRequest [GOOD] >> TGRpcYdbTest::BeginTxRequestError >> TTableProfileTests::ExplicitPartitionsComplex [GOOD] >> TTableProfileTests::ExplicitPartitionsWrongKeyFormat >> YdbYqlClient::TestReadTableMultiShardWithDescribe [GOOD] >> YdbYqlClient::TestReadTableMultiShardWithDescribeAndRowLimit >> BsControllerConfig::ManyPDisksRestarts [GOOD] >> BsControllerConfig::MergeBoxes >> YdbYqlClient::TestReadTableOneBatch [GOOD] >> YdbYqlClient::TestReadTableNotNullBorder >> TRegisterNodeOverLegacyService::ServerWithCertVerification_ClientWithCorrectCerts_AccessDenied [GOOD] >> TRegisterNodeOverLegacyService::ServerWithoutCertVerification_ClientProvidesCorrectCerts ------- [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/checkpoint_storage/ut/unittest >> TStateStorageTest::ShouldCountStatesNonExistentCheckpoint [GOOD] Test command err: 2025-06-25T14:43:41.920073Z node 1 :STREAMS_STORAGE_SERVICE INFO: gc.cpp:83: Successfully bootstrapped storage GC [1:38:2085] Count graph descriptions query: --!syntax_v1 PRAGMA TablePathPrefix("local/TGcTestShouldRemovePreviousCheckpoints"); SELECT * FROM checkpoints_graphs_description; 2025-06-25T14:43:42.239662Z node 1 :STREAMS_STORAGE_SERVICE DEBUG: gc.cpp:93: GC received upperbound checkpoint 11:3 for graph 'graph' 2025-06-25T14:43:42.867068Z node 1 :STREAMS_STORAGE_SERVICE DEBUG: gc.cpp:170: GC deleted checkpoints of graph 'graph' up to 11:3 Count graph descriptions query: --!syntax_v1 PRAGMA TablePathPrefix("local/TGcTestShouldRemovePreviousCheckpoints"); SELECT * FROM checkpoints_graphs_description; 2025-06-25T14:43:49.864140Z node 2 :STREAMS_STORAGE_SERVICE INFO: gc.cpp:83: Successfully bootstrapped storage GC [2:38:2085] Count graph descriptions query: --!syntax_v1 PRAGMA TablePathPrefix("local/ShouldIgnoreIncrementCheckpoint"); SELECT * FROM checkpoints_graphs_description; 2025-06-25T14:43:50.360628Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: gc.cpp:93: GC received upperbound checkpoint 11:3 for graph 'graph' 2025-06-25T14:43:50.360698Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: gc.cpp:96: GC skip increment checkpoint for graph 'graph' >> YdbYqlClient::RetryOperationTemplate [GOOD] >> YdbYqlClient::RetryOperationSync >> YdbMonitoring::SelfCheck [GOOD] >> YdbMonitoring::SelfCheckWithNodesDying >> TGRpcLdapAuthentication::LdapAuthWithInvalidRobouserPassword [GOOD] >> TGRpcLdapAuthentication::LdapAuthWithInvalidLogin >> TGRpcYdbTest::ExecuteQueryWithParametersExplicitSession [GOOD] >> TGRpcYdbTest::ExplainQuery >> SystemView::ShowCreateTableTtlSettings [GOOD] >> SystemView::ShowCreateTableTemporary >> TGRpcYdbTest::RemoveNotExistedDirectory [GOOD] >> TGRpcYdbTest::SdkUuid >> TxUsage::WriteToTopic_Demo_8_Query [GOOD] >> TGRpcYdbTest::CreateTableBadRequest3 [GOOD] >> TGRpcYdbTest::CreateAlterCopyAndDropTable >> YdbYqlClient::TestYqlTypesFromPreparedQuery [GOOD] >> YdbYqlClient::TestConstraintViolation [GOOD] >> YdbYqlClient::CheckDefaultTableSettings1 [GOOD] >> YdbYqlClient::CheckDefaultTableSettings2 >> YdbTableBulkUpsert::ValidRetry >> TGRpcClientLowTest::MultipleSimpleRequests [GOOD] >> TGRpcLdapAuthentication::LdapAuthServerIsUnavailable ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> YdbYqlClient::TestYqlTypesFromPreparedQuery [GOOD] Test command err: 2025-06-25T14:43:42.104019Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897704004177717:2233];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:43:42.104247Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0018be/r3tmp/tmpUsZcfv/pdisk_1.dat 2025-06-25T14:43:42.596971Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:43:42.597054Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:43:42.602327Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:43:42.612639Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 65438, node 1 2025-06-25T14:43:42.671523Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T14:43:42.726377Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:43:42.726400Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:43:42.726423Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:43:42.726550Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:43:43.090908Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:32096 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:43:43.334109Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:43:45.283246Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897716889080445:2298], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:43:45.283314Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:43:45.619190Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519897716889080466:2633] txid# 281474976710658, issues: { message: "Column Key has wrong key type Json" severity: 1 } 2025-06-25T14:43:45.648455Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897716889080476:2303], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:43:45.648541Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:43:45.663806Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519897716889080483:2643] txid# 281474976710659, issues: { message: "Column Key has wrong key type Yson" severity: 1 } test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0018be/r3tmp/tmpEhAYvx/pdisk_1.dat TServer::EnableGrpc on GrpcPort 62875, node 4 TClient is connected to server localhost:19378 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0018be/r3tmp/tmpYWzRRC/pdisk_1.dat TServer::EnableGrpc on GrpcPort 16685, node 7 TClient is connected to server localhost:12885 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-25T14:43:57.173855Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7519897767523122531:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:43:57.173938Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0018be/r3tmp/tmpdMQbnY/pdisk_1.dat 2025-06-25T14:43:57.521565Z node 10 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:43:57.549169Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:43:57.549267Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:43:57.553300Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 2774, node 10 2025-06-25T14:43:57.676869Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:43:57.676895Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:43:57.676901Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:43:57.677062Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:18367 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:43:58.026976Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:43:58.213008Z node 10 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:44:00.727216Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7519897780408025445:2301], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:00.727305Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7519897780408025437:2298], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:00.727377Z node 10 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:00.730712Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:44:00.762629Z node 10 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [10:7519897780408025451:2302], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:44:00.857436Z node 10 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [10:7519897780408025529:2675] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> YdbYqlClient::TestConstraintViolation [GOOD] Test command err: 2025-06-25T14:43:42.082396Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897701420744831:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:43:42.082441Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001879/r3tmp/tmpNfFeKy/pdisk_1.dat 2025-06-25T14:43:42.475783Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:43:42.521511Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:43:42.521582Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:43:42.525153Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 13290, node 1 2025-06-25T14:43:42.728979Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:43:42.744491Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:43:42.744533Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:43:42.744699Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:43:43.108429Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:63063 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:43:43.259990Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:43:44.935357Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897710010680393:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:43:44.935461Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:43:45.617641Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:43:45.757893Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897714305647872:2308], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:43:45.757947Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897714305647877:2311], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:43:45.757951Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:43:45.761179Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:43:45.789169Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519897714305647879:2312], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-25T14:43:45.876175Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519897714305647955:2786] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:43:46.041427Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jykrpyyx2xdmxthzbf870k5v, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTc1MTdkYjYtMTU0YWE0ODgtNWY3ZjBlNTMtZTc2NjY2ZmI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:43:46.256167Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jykrpz8way6hd0de8v9a8d3s, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTc1MTdkYjYtMTU0YWE0ODgtNWY3ZjBlNTMtZTc2NjY2ZmI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:43:47.607242Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519897722819792815:2144];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:43:47.609624Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001879/r3tmp/tmpgHGBQW/pdisk_1.dat 2025-06-25T14:43:47.866311Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:43:47.902718Z node 4 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 4 Type# 268639257 TServer::EnableGrpc on GrpcPort 14891, node 4 2025-06-25T14:43:47.948998Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:43:47.949023Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:43:47.949030Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:43:47.949169Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:43:47.953386Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:43:47.953483Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:43:47.960279Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:24427 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:43:48.198450Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:43:48.608470Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:43:50.616773Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519897735704695619:2300], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:43:50.616833Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519897735704695612:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:43:50.616963Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:43:50.620506Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 28 ... y1mMmJiYjVlMS1hMmZlNmM5, ActorId: [7:7519897759650231261:2296], ActorState: ExecuteState, TraceId: 01jykrq8p34hkxg3xmg0dyz52a, Reply query error, msg: Pending previous query completion proxyRequestId: 9 2025-06-25T14:43:55.745904Z node 7 :KQP_SESSION WARN: kqp_session_actor.cpp:2246: SessionId: ydb://session/3?node_id=7&id=NWMwMTQ1MzMtYjcxMTQzYy1mMmJiYjVlMS1hMmZlNmM5, ActorId: [7:7519897759650231261:2296], ActorState: ExecuteState, TraceId: 01jykrq8p34hkxg3xmg0dyz52a, Reply query error, msg: Pending previous query completion proxyRequestId: 10 2025-06-25T14:43:55.747177Z node 7 :KQP_SESSION WARN: kqp_session_actor.cpp:2246: SessionId: ydb://session/3?node_id=7&id=NWMwMTQ1MzMtYjcxMTQzYy1mMmJiYjVlMS1hMmZlNmM5, ActorId: [7:7519897759650231261:2296], ActorState: ExecuteState, TraceId: 01jykrq8p34hkxg3xmg0dyz52a, Reply query error, msg: Pending previous query completion proxyRequestId: 11 2025-06-25T14:43:55.747614Z node 7 :KQP_SESSION WARN: kqp_session_actor.cpp:2246: SessionId: ydb://session/3?node_id=7&id=NWMwMTQ1MzMtYjcxMTQzYy1mMmJiYjVlMS1hMmZlNmM5, ActorId: [7:7519897759650231261:2296], ActorState: ExecuteState, TraceId: 01jykrq8p34hkxg3xmg0dyz52a, Reply query error, msg: Pending previous query completion proxyRequestId: 12 2025-06-25T14:43:55.748277Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7519897759650231285:2302], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:43:55.845169Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7519897759650231389:2678] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:43:57.434069Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7519897768814199175:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:43:57.434269Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001879/r3tmp/tmp6c1maK/pdisk_1.dat 2025-06-25T14:43:57.786873Z node 10 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:43:57.808909Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:43:57.808995Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:43:57.813093Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 15136, node 10 2025-06-25T14:43:57.942434Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:43:57.942477Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:43:57.942486Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:43:57.942630Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27790 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:43:58.219727Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:43:58.471478Z node 10 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:44:00.797454Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7519897781699102077:2298], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:00.797541Z node 10 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:00.817991Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:44:00.935952Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7519897781699102259:2308], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:00.936061Z node 10 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:00.936349Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7519897781699102264:2311], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:00.941056Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:44:00.960723Z node 10 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [10:7519897781699102266:2312], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-25T14:44:01.055767Z node 10 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [10:7519897785994069635:2797] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:44:01.124985Z node 10 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710661. Ctx: { TraceId: 01jykrqds751g7h0f09azbpds4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=10&id=ZTNjOGZlZmUtYTY3YzAxM2UtMjhjNWFkMWYtYTg1ZTY3MTE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:44:01.221414Z node 10 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710662. Ctx: { TraceId: 01jykrqe0255gqzr4aqqf542fq, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=10&id=ZTNjOGZlZmUtYTY3YzAxM2UtMjhjNWFkMWYtYTg1ZTY3MTE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:44:01.227236Z node 10 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_CONSTRAINT_VIOLATION;details=Conflict with existing key.;tx_id=3; 2025-06-25T14:44:01.237423Z node 10 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:226: Prepare transaction failed. txid 3 at tablet 72075186224037888 errors: Status: STATUS_CONSTRAINT_VIOLATION Issues: { message: "Conflict with existing key." issue_code: 2012 severity: 1 } 2025-06-25T14:44:01.237637Z node 10 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:168: Errors while proposing transaction txid 3 at tablet 72075186224037888 Status: STATUS_CONSTRAINT_VIOLATION Issues: { message: "Conflict with existing key." issue_code: 2012 severity: 1 } 2025-06-25T14:44:01.237894Z node 10 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:819: SelfId: [10:7519897785994069694:2296], Table: `Root/Test` ([72057594046644480:2:1]), SessionActorId: [10:7519897781699102074:2296]Got CONSTRAINT VIOLATION for table `Root/Test`. ShardID=72075186224037888, Sink=[10:7519897785994069694:2296].{
: Error: Conflict with existing key., code: 2012 } 2025-06-25T14:44:01.238578Z node 10 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:3004: SelfId: [10:7519897785994069687:2296], SessionActorId: [10:7519897781699102074:2296], statusCode=PRECONDITION_FAILED. Issue=
: Error: Constraint violated. Table: `Root/Test`., code: 2012
: Error: Conflict with existing key., code: 2012 . sessionActorId=[10:7519897781699102074:2296]. isRollback=0 2025-06-25T14:44:01.238839Z node 10 :KQP_SESSION WARN: kqp_session_actor.cpp:1895: SessionId: ydb://session/3?node_id=10&id=ZTNjOGZlZmUtYTY3YzAxM2UtMjhjNWFkMWYtYTg1ZTY3MTE=, ActorId: [10:7519897781699102074:2296], ActorState: ExecuteState, TraceId: 01jykrqe0255gqzr4aqqf542fq, got TEvKqpBuffer::TEvError in ExecuteState, status: PRECONDITION_FAILED send to: [10:7519897785994069688:2296] from: [10:7519897785994069687:2296] 2025-06-25T14:44:01.238954Z node 10 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [10:7519897785994069688:2296] TxId: 281474976710662. Ctx: { TraceId: 01jykrqe0255gqzr4aqqf542fq, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=10&id=ZTNjOGZlZmUtYTY3YzAxM2UtMjhjNWFkMWYtYTg1ZTY3MTE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. PRECONDITION_FAILED: {
: Error: Constraint violated. Table: `Root/Test`., code: 2012 subissue: {
: Error: Conflict with existing key., code: 2012 } } 2025-06-25T14:44:01.239164Z node 10 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=10&id=ZTNjOGZlZmUtYTY3YzAxM2UtMjhjNWFkMWYtYTg1ZTY3MTE=, ActorId: [10:7519897781699102074:2296], ActorState: ExecuteState, TraceId: 01jykrqe0255gqzr4aqqf542fq, Create QueryResponse for error on request, msg: >> TxUsage::WriteToTopic_Demo_28_Table [GOOD] >> ClientStatsCollector::CounterCacheMiss [GOOD] >> ClientStatsCollector::CounterRetryOperation >> LocalPartition::DirectWriteWithoutDescribeResourcesPermission [GOOD] >> LocalPartition::WithoutPartitionWithSplit >> TxUsage::WriteToTopic_Demo_28_Query >> TYqlDateTimeTests::IntervalKey [GOOD] >> TYqlDateTimeTests::SimpleOperations >> TxUsage::WriteToTopic_Demo_9_Table >> TGRpcYdbTest::BeginTxRequestError [GOOD] >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_6_Table [GOOD] >> TGRpcNewCoordinationClient::CreateDropDescribe >> YdbYqlClient::TestReadTableNotNullBorder [GOOD] >> YdbYqlClient::TestReadTableNotNullBorder2 >> YdbYqlClient::CreateTableWithPartitionAtKeys >> TRegisterNodeOverLegacyService::ServerWithoutCertVerification_ClientProvidesCorrectCerts [GOOD] >> TRegisterNodeOverLegacyService::ServerWithoutCertVerification_ClientProvidesEmptyClientCerts >> TRegisterNodeOverDiscoveryService::ServerWithCertVerification_ClientWithCorrectCerts_EmptyAllowedSids [GOOD] >> TRegisterNodeOverDiscoveryService::ServerWithCertVerification_ClientWithCorrectCerts_AllowOnlyDefaultGroup >> YdbYqlClient::TestReadTableMultiShardWithDescribeAndRowLimit [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> TGRpcYdbTest::BeginTxRequestError [GOOD] Test command err: 2025-06-25T14:43:42.044390Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897701889203630:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:43:42.044459Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001885/r3tmp/tmpBmTw3I/pdisk_1.dat 2025-06-25T14:43:42.503968Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:43:42.517805Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:43:42.517926Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:43:42.532384Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 19283, node 1 2025-06-25T14:43:42.726942Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:43:42.726965Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:43:42.726971Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:43:42.727058Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:43:43.066237Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:17010 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:43:43.292652Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:43:43.385426Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateKesus, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_kesus.cpp:30) 2025-06-25T14:43:46.477003Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519897721800732131:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:43:46.477074Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001885/r3tmp/tmpz9OQxc/pdisk_1.dat 2025-06-25T14:43:46.781937Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:43:46.799640Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:43:46.799723Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:43:46.806251Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:43:46.818638Z node 4 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 4 Type# 268639257 TServer::EnableGrpc on GrpcPort 25476, node 4 2025-06-25T14:43:46.854823Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:43:46.854847Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:43:46.854854Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:43:46.854948Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:61814 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:43:47.096482Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:43:47.193951Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateKesus, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_kesus.cpp:30) 2025-06-25T14:43:47.508683Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:43:50.890378Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519897738113138047:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:43:50.890628Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001885/r3tmp/tmpUh6DO1/pdisk_1.dat 2025-06-25T14:43:51.118806Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:43:51.148858Z node 7 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 7 Type# 268639257 TServer::EnableGrpc on GrpcPort 4804, node 7 2025-06-25T14:43:51.192608Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:43:51.192633Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:43:51.192641Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:43:51.192790Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:43:51.216487Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:43:51.216589Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:43:51.238916Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:11750 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:43:51.437461Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:43:51.628167Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:43:51.713955Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateKesus, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first calle ... eshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_kesus.cpp:30) 2025-06-25T14:43:52.513373Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:43:52.552537Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:43:52.591828Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:43:55.332854Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7519897759803694428:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:43:55.332922Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001885/r3tmp/tmp2Lp6ad/pdisk_1.dat 2025-06-25T14:43:55.633709Z node 10 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:43:55.649641Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:43:55.649717Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:43:55.664037Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:43:55.676718Z node 10 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 10 Type# 268639257 TServer::EnableGrpc on GrpcPort 26560, node 10 2025-06-25T14:43:55.804524Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:43:55.804548Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:43:55.804556Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:43:55.804708Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9327 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:43:56.074304Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:43:56.164347Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:43:56.413108Z node 10 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:44:00.251563Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7519897778833417557:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:00.251905Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001885/r3tmp/tmpEJml42/pdisk_1.dat 2025-06-25T14:44:00.384735Z node 13 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:00.390574Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:00.390656Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:00.402209Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 10152, node 13 2025-06-25T14:44:00.480018Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:00.480053Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:00.480061Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:00.480222Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8509 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:00.762077Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:01.261572Z node 13 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:44:03.562151Z node 13 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [13:7519897791718320422:2295], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:03.562267Z node 13 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:03.564452Z node 13 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [13:7519897791718320434:2298], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:03.569900Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:44:03.596240Z node 13 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [13:7519897791718320436:2299], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:44:03.669715Z node 13 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [13:7519897791718320514:2677] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:44:03.671099Z node 13 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=13&id=ODNlMzZjMDctNWJkZDRmMTktYmQ0MWU4OWYtYWI5ZjcxMDA=, ActorId: [13:7519897791718320404:2293], ActorState: ExecuteState, TraceId: 01jykrqgb422bjtenapc996t7z, ReplyQueryCompileError, status NOT_FOUND remove tx with tx_id: 2025-06-25T14:44:03.675542Z node 13 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=13&id=ODNlMzZjMDctNWJkZDRmMTktYmQ0MWU4OWYtYWI5ZjcxMDA=, ActorId: [13:7519897791718320404:2293], ActorState: ExecuteState, TraceId: 01jykrqges5wmtjrtdp53n2qh9, ReplyQueryCompileError, status NOT_FOUND remove tx with tx_id: 2025-06-25T14:44:03.677654Z node 13 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=13&id=ODNlMzZjMDctNWJkZDRmMTktYmQ0MWU4OWYtYWI5ZjcxMDA=, ActorId: [13:7519897791718320404:2293], ActorState: ExecuteState, TraceId: 01jykrqgex6v5kngsv2etd6287, ReplyQueryCompileError, status NOT_FOUND remove tx with tx_id: >> TGRpcLdapAuthentication::LdapAuthWithInvalidLogin [GOOD] >> TGRpcLdapAuthentication::LdapAuthWithInvalidPassword >> TGRpcYdbTest::CreateAlterCopyAndDropTable [GOOD] >> TGRpcYdbTest::CreateDeleteYqlSession >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_6_Query >> TxUsage::WriteToTopic_Demo_14_Table [GOOD] >> TTableProfileTests::UseTableProfilePresetViaSdk [GOOD] >> TTableProfileTests::WrongTableProfile ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> YdbYqlClient::TestReadTableMultiShardWithDescribeAndRowLimit [GOOD] Test command err: 2025-06-25T14:43:44.136657Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897712885005483:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:43:44.137766Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00186e/r3tmp/tmpAJaTXv/pdisk_1.dat 2025-06-25T14:43:44.556530Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:43:44.571448Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:43:44.571535Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:43:44.579451Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 3266, node 1 2025-06-25T14:43:44.776871Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:43:44.776892Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:43:44.776899Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:43:44.776993Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5670 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-25T14:43:45.150123Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:43:45.239254Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:43:45.351205Z node 1 :GRPC_SERVER INFO: grpc_request_proxy.cpp:592: Got grpc request# ListEndpointsRequest, traceId# 01jykrpyj2acjmkawn1myc8m65, sdkBuildInfo# ydb-cpp-sdk/dev, state# AS_NOT_PERFORMED, database# undef, peer# ipv6:[::1]:48666, grpcInfo# grpc-c++/1.54.3 grpc-c/31.0.0 (linux; chttp2), timeout# 9.973155s 2025-06-25T14:43:45.416319Z node 1 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:595: Got grpc request# CreateSessionRequest, traceId# 01jykrpyk9a283xg1dz5ar7b00, sdkBuildInfo# ydb-cpp-sdk/dev, state# AS_NOT_PERFORMED, database# undef, peer# ipv6:[::1]:48678, grpcInfo# grpc-c++/1.54.3 grpc-c/31.0.0 (linux; chttp2), timeout# undef 2025-06-25T14:43:47.209879Z node 1 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:595: Got grpc request# CreateTableRequest, traceId# 01jykrq0c9abegxfpwxpvp1kwn, sdkBuildInfo# ydb-cpp-sdk/dev, state# AS_NOT_PERFORMED, database# undef, peer# ipv6:[::1]:45368, grpcInfo# grpc-c++/1.54.3 grpc-c/31.0.0 (linux; chttp2), timeout# undef 2025-06-25T14:43:47.210450Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7519897712885005700:2143] Handle TEvProposeTransaction 2025-06-25T14:43:47.210471Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:7519897712885005700:2143] TxId# 281474976710658 ProcessProposeTransaction 2025-06-25T14:43:47.210514Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:7519897712885005700:2143] Cookie# 0 userReqId# "" txid# 281474976710658 SEND to# [1:7519897725769908366:2607] 2025-06-25T14:43:47.279520Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:7519897725769908366:2607] txid# 281474976710658 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateTable CreateTable { Name: "Test" Columns { Name: "Key" Type: "Uint32" NotNull: false } Columns { Name: "Fk" Type: "Uint64" NotNull: false } Columns { Name: "Value" Type: "String" NotNull: false } KeyColumnNames: "Key" KeyColumnNames: "Fk" UniformPartitionsCount: 16 PartitionConfig { } Temporary: false } CreateIndexedTable { } } } DatabaseName: "" RequestType: "" PeerName: "ipv6:[::1]:45368" 2025-06-25T14:43:47.279747Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:7519897725769908366:2607] txid# 281474976710658 Bootstrap, UserSID: CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-25T14:43:47.280101Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1660: Actor# [1:7519897725769908366:2607] txid# 281474976710658 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-06-25T14:43:47.280156Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:7519897725769908366:2607] txid# 281474976710658 TEvNavigateKeySet requested from SchemeCache 2025-06-25T14:43:47.280332Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:7519897725769908366:2607] txid# 281474976710658 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-25T14:43:47.280447Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:7519897725769908366:2607] HANDLE EvNavigateKeySetResult, txid# 281474976710658 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-25T14:43:47.280487Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:7519897725769908366:2607] txid# 281474976710658 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976710658 TabletId# 72057594046644480} 2025-06-25T14:43:47.280605Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:7519897725769908366:2607] txid# 281474976710658 HANDLE EvClientConnected 2025-06-25T14:43:47.282447Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:43:47.285691Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [1:7519897725769908366:2607] txid# 281474976710658 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976710658} 2025-06-25T14:43:47.285745Z node 1 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [1:7519897725769908366:2607] txid# 281474976710658 SEND to# [1:7519897725769908365:2297] Source {TEvProposeTransactionStatus txid# 281474976710658 Status# 53} 2025-06-25T14:43:47.287028Z node 1 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:489: SchemeBoardUpdate /Root 2025-06-25T14:43:47.287102Z node 1 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:518: Can't update SecurityState for /Root - no PublicKeys 2025-06-25T14:43:47.287113Z node 1 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:489: SchemeBoardUpdate /Root 2025-06-25T14:43:47.287141Z node 1 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:518: Can't update SecurityState for /Root - no PublicKeys 2025-06-25T14:43:47.331429Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:7519897725769908419:2658], Recipient [1:7519897725769908586:2304]: NKikimr::TEvTablet::TEvBoot 2025-06-25T14:43:47.331440Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:7519897725769908424:2663], Recipient [1:7519897725769908597:2309]: NKikimr::TEvTablet::TEvBoot 2025-06-25T14:43:47.332494Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:7519897725769908421:2660], Recipient [1:7519897725769908600:2311]: NKikimr::TEvTablet::TEvBoot 2025-06-25T14:43:47.332687Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:7519897725769908427:2666], Recipient [1:7519897725769908610:2314]: NKikimr::TEvTablet::TEvBoot 2025-06-25T14:43:47.334974Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:7519897725769908430:2669], Recipient [1:7519897725769908612:2315]: NKikimr::TEvTablet::TEvBoot 2025-06-25T14:43:47.335437Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:7519897725769908420:2659], Recipient [1:7519897725769908587:2305]: NKikimr::TEvTablet::TEvBoot 2025-06-25T14:43:47.335480Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:7519897725769908432:2671], Recipient [1:7519897725769908585:2303]: NKikimr::TEvTablet::TEvBoot 2025-06-25T14:43:47.336013Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:7519897725769908428:2667], Recipient [1:7519897725769908581:2300]: NKikimr::TEvTablet::TEvBoot 2025-06-25T14:43:47.336450Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:7519897725769908418:2657], Recipient [1:7519897725769908596:2308]: NKikimr::TEvTablet::TEvBoot 2025-06-25T14:43:47.337044Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:7519897725769908422:2661], Recipient [1:7519897725769908607:2312]: NKikimr::TEvTablet::TEvBoot 2025-06-25T14:43:47.337444Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:7519897725769908423:2662], Recipient [1:7519897725769908583:2301]: NKikimr::TEvTablet::TEvBoot 2025-06-25T14:43:47.337788Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:7519897725769908431:2670], Recipient [1:7519897725769908588:2306]: NKikimr::TEvTablet::TEvBoot 2025-06-25T14:43:47.338063Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:7519897725769908417:2656], Recipient [1:7519897725769908584:2302]: NKikimr::TEvTablet::TEvBoot 2025-06-25T14:43:47.338339Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:7519897725769908426:2665], Recipient [1:7519897725769908609:2313]: NKikimr::TEvTablet::TEvBoot 2025-06-25T14:43:47.338649Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:7519897725769908429:2668], Recipient [1:7519897725769908595:2307]: NKikimr::TEvTablet::TEvBoot 2025-06-25T14:43:47.338925Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:7519897725769908425 ... 075186224037897 has no attached operations 2025-06-25T14:44:04.942274Z node 10 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 72075186224037897 2025-06-25T14:44:04.942301Z node 10 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037897 2025-06-25T14:44:04.943611Z node 10 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435082, Sender [10:7519897799521244544:2140], Recipient [10:7519897795226276250:2305]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvRegisterScanActor 2025-06-25T14:44:04.943639Z node 10 :TX_DATASHARD TRACE: datashard_impl.h:3162: StateWork, processing event TEvPrivate::TEvRegisterScanActor 2025-06-25T14:44:04.943677Z node 10 :READ_TABLE_API DEBUG: rpc_read_table.cpp:267: [10:7519897799521244526:2363] Adding quota request to queue ShardId: 0, TxId: 281474976715680 2025-06-25T14:44:04.943705Z node 10 :READ_TABLE_API DEBUG: rpc_read_table.cpp:629: [10:7519897799521244526:2363] Assign stream quota to Shard 0, Quota 5, TxId 281474976715680 Reserved: 5 of 25, Queued: 0 2025-06-25T14:44:04.943841Z node 10 :TX_DATASHARD DEBUG: read_table_scan.cpp:514: Got quota for read table scan ShardId: 72075186224037897, TxId: 281474976715681, MessageQuota: 5 2025-06-25T14:44:04.944034Z node 10 :TX_DATASHARD DEBUG: read_table_scan.cpp:662: Send response data ShardId: 72075186224037897, TxId: 281474976715681, Size: 54, Rows: 0, PendingAcks: 1, MessageQuota: 4 2025-06-25T14:44:04.944247Z node 10 :READ_TABLE_API DEBUG: rpc_read_table.cpp:647: [10:7519897799521244526:2363] got stream part, size: 75, RU required: 128 rate limiter absent 2025-06-25T14:44:04.944347Z node 10 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549569, Sender [10:7519897799521244527:2363], Recipient [10:7519897795226276250:2305]: NKikimrTxDataShard.TEvCancelTransactionProposal TxId: 281474976715681 2025-06-25T14:44:04.944363Z node 10 :TX_DATASHARD TRACE: datashard_impl.h:3174: StateWork, processing event TEvDataShard::TEvCancelTransactionProposal 2025-06-25T14:44:04.944376Z node 10 :TX_DATASHARD DEBUG: datashard__cancel_tx_proposal.cpp:73: Got TEvDataShard::TEvCancelTransactionProposal 72075186224037897 txId 281474976715681 2025-06-25T14:44:04.944403Z node 10 :TX_DATASHARD DEBUG: datashard__cancel_tx_proposal.cpp:44: Start TTxCancelTransactionProposal at tablet 72075186224037897 txId 281474976715681 2025-06-25T14:44:04.944468Z node 10 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287431, Sender [10:7519897799521244527:2363], Recipient [10:7519897795226276250:2305]: NKikimrTx.TEvInterruptTransaction TxId: 281474976715681 2025-06-25T14:44:04.944481Z node 10 :TX_DATASHARD TRACE: datashard_impl.h:3155: StateWork, processing event TEvTxProcessing::TEvInterruptTransaction 2025-06-25T14:44:04.944545Z node 10 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553190, Sender [10:7519897799521244527:2363], Recipient [10:7519897795226276250:2305]: NKikimrTxDataShard.TEvDiscardVolatileSnapshotRequest OwnerId: 72057594046644480 PathId: 2 Step: 1750862644979 TxId: 281474976715680 2025-06-25T14:44:04.944654Z node 10 :READ_TABLE_API DEBUG: rpc_read_table.cpp:563: [10:7519897799521244526:2363] Starting inactivity timer for 600.000000s with tag 3 2025-06-25T14:44:04.944692Z node 10 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [10:7519897795226276250:2305], Recipient [10:7519897795226276250:2305]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T14:44:04.944706Z node 10 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T14:44:04.944725Z node 10 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037897 2025-06-25T14:44:04.944739Z node 10 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037897 active 1 active planned 0 immediate 1 planned 0 2025-06-25T14:44:04.944755Z node 10 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715681] at 72075186224037897 for ReadTableScan 2025-06-25T14:44:04.944771Z node 10 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715681] at 72075186224037897 on unit ReadTableScan 2025-06-25T14:44:04.944807Z node 10 :TX_DATASHARD NOTICE: read_table_scan_unit.cpp:240: Interrupted operation [0:281474976715681] at 72075186224037897 while waiting for scan finish 2025-06-25T14:44:04.944822Z node 10 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715681] at 72075186224037897 is Executed 2025-06-25T14:44:04.944844Z node 10 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715681] at 72075186224037897 executing on unit ReadTableScan 2025-06-25T14:44:04.944855Z node 10 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715681] at 72075186224037897 to execution unit FinishPropose 2025-06-25T14:44:04.944863Z node 10 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715681] at 72075186224037897 on unit FinishPropose 2025-06-25T14:44:04.944886Z node 10 :TX_DATASHARD ERROR: finish_propose_unit.cpp:245: Prepare transaction failed. txid 281474976715681 at tablet 72075186224037897 errors: WRONG_SHARD_STATE (Interrupted operation [0:281474976715681] at 72075186224037897 while waiting for scan finish) | 2025-06-25T14:44:04.944912Z node 10 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715681] at 72075186224037897 is DelayCompleteNoMoreRestarts 2025-06-25T14:44:04.944926Z node 10 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715681] at 72075186224037897 executing on unit FinishPropose 2025-06-25T14:44:04.944935Z node 10 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715681] at 72075186224037897 to execution unit CompletedOperations 2025-06-25T14:44:04.944944Z node 10 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715681] at 72075186224037897 on unit CompletedOperations 2025-06-25T14:44:04.944971Z node 10 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715681] at 72075186224037897 is Executed 2025-06-25T14:44:04.944981Z node 10 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715681] at 72075186224037897 executing on unit CompletedOperations 2025-06-25T14:44:04.944991Z node 10 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:281474976715681] at 72075186224037897 has finished 2025-06-25T14:44:04.945002Z node 10 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037897 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:44:04.945011Z node 10 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 72075186224037897 2025-06-25T14:44:04.945020Z node 10 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037897 has no attached operations 2025-06-25T14:44:04.945030Z node 10 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 72075186224037897 2025-06-25T14:44:04.946343Z node 10 :TX_DATASHARD DEBUG: read_table_scan.cpp:483: Got stream data ack ShardId: 72075186224037897, TxId: 281474976715681, PendingAcks: 0 2025-06-25T14:44:04.946383Z node 10 :TX_DATASHARD DEBUG: read_table_scan.cpp:718: Finish scan ShardId: 72075186224037897, TxId: 281474976715681, MessageQuota: 4 2025-06-25T14:44:04.946577Z node 10 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037897 2025-06-25T14:44:04.946595Z node 10 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715681] at 72075186224037897 on unit FinishPropose 2025-06-25T14:44:04.946611Z node 10 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715681 at tablet 72075186224037897 send to client, exec latency: 3 ms, propose latency: 5 ms, status: ERROR 2025-06-25T14:44:04.946631Z node 10 :TX_DATASHARD ERROR: finish_propose_unit.cpp:174: Errors while proposing transaction txid 281474976715681 at tablet 72075186224037897 status: ERROR errors: WRONG_SHARD_STATE (Interrupted operation [0:281474976715681] at 72075186224037897 while waiting for scan finish) | 2025-06-25T14:44:04.946675Z node 10 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037897 2025-06-25T14:44:04.949937Z node 10 :READ_TABLE_API NOTICE: rpc_read_table.cpp:531: [10:7519897799521244526:2363] Finish grpc stream, status: 400000 2025-06-25T14:44:04.959688Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a00012de80] received request Name# SchemeOperation ok# false data# peer# current inflight# 0 2025-06-25T14:44:04.959978Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000082280] received request Name# SchemeOperationStatus ok# false data# peer# current inflight# 0 2025-06-25T14:44:04.960011Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000053480] received request Name# SchemeDescribe ok# false data# peer# current inflight# 0 2025-06-25T14:44:04.960156Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000133e80] received request Name# ChooseProxy ok# false data# peer# current inflight# 0 2025-06-25T14:44:04.960213Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a00012e480] received request Name# PersQueueRequest ok# false data# peer# current inflight# 0 2025-06-25T14:44:04.960321Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a00012a280] received request Name# SchemeInitRoot ok# false data# peer# current inflight# 0 2025-06-25T14:44:04.960407Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000000680] received request Name# ResolveNode ok# false data# peer# current inflight# 0 2025-06-25T14:44:04.960490Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0000bca80] received request Name# FillNode ok# false data# peer# current inflight# 0 2025-06-25T14:44:04.960573Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0000d0880] received request Name# DrainNode ok# false data# peer# current inflight# 0 2025-06-25T14:44:04.960657Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000107480] received request Name# BlobStorageConfig ok# false data# peer# current inflight# 0 2025-06-25T14:44:04.960728Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000108c80] received request Name# HiveCreateTablet ok# false data# peer# current inflight# 0 2025-06-25T14:44:04.960819Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a00017e880] received request Name# TestShardControl ok# false data# peer# current inflight# 0 2025-06-25T14:44:04.960879Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000175e80] received request Name# RegisterNode ok# false data# peer# current inflight# 0 2025-06-25T14:44:04.960989Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000053a80] received request Name# CmsRequest ok# false data# peer# current inflight# 0 2025-06-25T14:44:04.961048Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000190280] received request Name# ConsoleRequest ok# false data# peer# current inflight# 0 2025-06-25T14:44:04.961144Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a00017d680] received request Name# InterconnectDebug ok# false data# peer# current inflight# 0 2025-06-25T14:44:04.961199Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000183680] received request Name# TabletStateRequest ok# false data# peer# current inflight# 0 >> TGRpcYdbTest::SdkUuid [GOOD] >> TGRpcYdbTest::SdkUuidViaParams >> YdbYqlClient::CheckDefaultTableSettings2 [GOOD] >> TTableProfileTests::ExplicitPartitionsWrongKeyFormat [GOOD] >> TTableProfileTests::ExplicitPartitionsWrongKeyType >> LocalityOperation::LocksFromAnotherTenants-UseSink [GOOD] >> TxUsage::WriteToTopic_Demo_14_Query >> TGRpcLdapAuthentication::LdapAuthServerIsUnavailable [GOOD] >> TGRpcLdapAuthentication::DisableBuiltinAuthMechanism >> TGRpcClientLowTest::GrpcRequestProxy >> TGRpcYdbTest::ExplainQuery [GOOD] >> YdbTableBulkUpsert::ValidRetry [GOOD] >> YdbTableBulkUpsert::Types ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> YdbYqlClient::CheckDefaultTableSettings2 [GOOD] Test command err: test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001875/r3tmp/tmpK0UGaR/pdisk_1.dat TServer::EnableGrpc on GrpcPort 21497, node 1 TClient is connected to server localhost:18135 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-25T14:43:46.411048Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519897719892325705:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:43:46.411120Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001875/r3tmp/tmpw1IAew/pdisk_1.dat 2025-06-25T14:43:46.586601Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:43:46.602089Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:43:46.602156Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:43:46.610293Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 62963, node 4 2025-06-25T14:43:46.730006Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:43:46.730025Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:43:46.730031Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:43:46.730137Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6385 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:43:46.951345Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:43:47.432445Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:43:49.481523Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519897732777228574:2301], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:43:49.481633Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:43:49.781067Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:43:49.908491Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519897732777228754:2311], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:43:49.908581Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:43:49.908813Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519897732777228759:2314], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:43:49.912926Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:43:49.947230Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519897732777228761:2315], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-25T14:43:50.008509Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519897737072196132:2796] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:43:50.153770Z node 4 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jykrq30k57vc9hc5g6q32951, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=NjYyZTk4ZjEtM2I5YjRjNTMtY2VjYmQwNzUtODNjYTU3YzI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:43:50.249315Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:43:50.407554Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710759:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:383) 2025-06-25T14:43:50.492220Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:171) 2025-06-25T14:43:52.190969Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519897746078692936:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:43:52.191019Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001875/r3tmp/tmprU4E6W/pdisk_1.dat 2025-06-25T14:43:52.479084Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:43:52.517447Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:43:52.517528Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 26944, node 7 2025-06-25T14:43:52.609028Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:43:52.750300Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:43:52.750325Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:43:52.750334Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:43:52.750480Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5007 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 Sch ... seId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:43:55.752076Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:43:55.845180Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519897758963595980:2308], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:43:55.845260Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:43:55.845737Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519897758963595985:2311], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:43:55.849036Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:43:55.881882Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7519897758963595987:2312], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-25T14:43:55.977209Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7519897758963596060:2796] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:43:56.051525Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710661. Ctx: { TraceId: 01jykrq8t4756a0h5hcx9k7pgr, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=MmQ5YjFlZTYtMTlmYWRlOTQtOGU4NjJiZmItNTg2ZGNlYzk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:43:56.114759Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976715758:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:43:56.185447Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976715759:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:383) 2025-06-25T14:43:56.364224Z node 7 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-25T14:43:58.278557Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7519897772930864072:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:43:58.284723Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001875/r3tmp/tmpFUB91n/pdisk_1.dat 2025-06-25T14:43:58.504804Z node 10 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:43:58.531297Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:43:58.531370Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:43:58.534434Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:43:58.545089Z node 10 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 10 Type# 268639257 TServer::EnableGrpc on GrpcPort 28150, node 10 2025-06-25T14:43:58.596128Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:43:58.596149Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:43:58.596157Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:43:58.596256Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:62434 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:43:58.950761Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:43:59.317125Z node 10 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:44:01.144509Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:44:02.947189Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7519897787305126579:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:02.947322Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001875/r3tmp/tmpbkBC2o/pdisk_1.dat 2025-06-25T14:44:03.261084Z node 13 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:03.284970Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:03.285061Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:03.290997Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:44:03.301421Z node 13 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 13 Type# 268639257 TServer::EnableGrpc on GrpcPort 9273, node 13 2025-06-25T14:44:03.396541Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:03.396570Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:03.396578Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:03.396719Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:15862 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:03.697014Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:04.003117Z node 13 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:44:06.313186Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) >> YdbYqlClient::TestDecimal1 ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> LocalityOperation::LocksFromAnotherTenants-UseSink [GOOD] Test command err: 2025-06-25T14:43:42.116249Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897701935871249:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:43:42.126769Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001880/r3tmp/tmp9eiPa7/pdisk_1.dat 2025-06-25T14:43:42.582729Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:43:42.590958Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:43:42.591073Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:43:42.602734Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 12878, node 1 2025-06-25T14:43:42.869901Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:43:42.869923Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:43:42.869931Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:43:42.870080Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:43:43.125012Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:17353 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 Pa... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:43:43.259216Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:43:47.075159Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519897723656086038:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:43:47.076096Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001880/r3tmp/tmpauOPqI/pdisk_1.dat 2025-06-25T14:43:47.311563Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:43:47.337330Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:43:47.337425Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:43:47.342667Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 14198, node 4 2025-06-25T14:43:47.540974Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:43:47.541003Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:43:47.541018Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:43:47.541164Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:7206 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 Pa... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:43:47.819837Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:43:48.068469Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:43:51.941054Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519897743069456288:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:43:51.941163Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001880/r3tmp/tmp1SXO7b/pdisk_1.dat 2025-06-25T14:43:52.201567Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:43:52.246974Z node 7 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 7 Type# 268639257 TServer::EnableGrpc on GrpcPort 11360, node 7 2025-06-25T14:43:52.280857Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:43:52.280982Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:43:52.358917Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:43:52.389049Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:43:52.389071Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:43:52.389079Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:43:52.389237Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13549 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:43:52.729225Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:43:52.746191Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:43:52.964467Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:13549 2025-06-25T14:43:53.103292Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateSubDomain, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_subdomain.cpp:259) waiting... 2025-06-25T14:43:53.126977Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:43:53.642703Z node 9 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[9:7519897752362475721:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:43:53.646921Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/ydb_tenant_0/.metadata/initializati ... 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/ydb_tenant_0/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:44:01.634562Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:01.634682Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:01.636671Z node 10 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 12 Cookie 12 2025-06-25T14:44:01.638116Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:44:01.722715Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateSubDomain, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_subdomain.cpp:259) waiting... 2025-06-25T14:44:01.751086Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:02.265728Z node 11 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[11:7519897788051784993:2158];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:02.265840Z node 11 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/ydb_tenant_1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:44:02.369216Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(11, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:02.369313Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(11, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:02.372963Z node 10 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 11 Cookie 11 2025-06-25T14:44:02.387295Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(11, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:44:02.624970Z node 12 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:44:03.272636Z node 11 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:44:04.043562Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:44:04.263321Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:44:04.373693Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7519897798543498015:2327], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:04.373797Z node 10 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:04.374133Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7519897798543498027:2330], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:04.377961Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715664:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:44:04.408462Z node 10 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [10:7519897798543498029:2331], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715664 completed, doublechecking } 2025-06-25T14:44:04.502851Z node 10 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [10:7519897798543498100:3386] txid# 281474976715665, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:44:04.607220Z node 10 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715666. Ctx: { TraceId: 01jykrqh4mf1v71egdh2t0yfmz, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=10&id=ZmM3NDcwZmYtOWM0OTI3NjYtMWFlZDM3YzAtN2FlODUzYjI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:44:04.796857Z node 10 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715667. Ctx: { TraceId: 01jykrqhdc76rwstkmkzkb7ag3, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=10&id=ZmM3NDcwZmYtOWM0OTI3NjYtMWFlZDM3YzAtN2FlODUzYjI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:44:04.979639Z node 10 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715668. Ctx: { TraceId: 01jykrqhk14bx085djhx3zwt9b, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=10&id=ZmM3NDcwZmYtOWM0OTI3NjYtMWFlZDM3YzAtN2FlODUzYjI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:44:05.132412Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[10:7519897781363626817:2076];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:05.132495Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:44:05.810743Z node 10 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715669. Ctx: { TraceId: 01jykrqhk14bx085djhx3zwt9b, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=10&id=ZmM3NDcwZmYtOWM0OTI3NjYtMWFlZDM3YzAtN2FlODUzYjI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:44:05.814474Z node 10 :KQP_EXECUTER ERROR: kqp_data_executer.cpp:818: ActorId: [10:7519897802838465556:2320] TxId: 281474976715669. Ctx: { TraceId: 01jykrqhk14bx085djhx3zwt9b, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=10&id=ZmM3NDcwZmYtOWM0OTI3NjYtMWFlZDM3YzAtN2FlODUzYjI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Handle TEvProposeTransactionResult: unable to select coordinator. Tx canceled, actorId: [10:7519897802838465556:2320], previously selected coordinator: 72075186224037888, coordinator selected at propose result: 72075186224037890 2025-06-25T14:44:05.814999Z node 10 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=10&id=ZmM3NDcwZmYtOWM0OTI3NjYtMWFlZDM3YzAtN2FlODUzYjI=, ActorId: [10:7519897798543497826:2320], ActorState: ExecuteState, TraceId: 01jykrqhk14bx085djhx3zwt9b, Create QueryResponse for error on request, msg: 2025-06-25T14:44:05.816113Z node 10 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715670. Ctx: { TraceId: 01jykrqhk14bx085djhx3zwt9b, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=10&id=ZmM3NDcwZmYtOWM0OTI3NjYtMWFlZDM3YzAtN2FlODUzYjI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:44:05.825408Z node 10 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 11 2025-06-25T14:44:05.826079Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(11, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-25T14:44:05.826304Z node 10 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 12 2025-06-25T14:44:05.826671Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-25T14:44:06.492439Z node 11 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [11:7519897805231654509:2281], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:44:06.493825Z node 11 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/ydb_tenant_1/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:44:06.588904Z node 11 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [11:7519897805231654509:2281], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:44:06.619393Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[12:7519897786623602504:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:06.620892Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/ydb_tenant_0/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:44:06.764348Z node 11 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [11:7519897805231654509:2281], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:44:07.023408Z node 11 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [11:7519897805231654509:2281], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:44:07.265134Z node 11 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[11:7519897788051784993:2158];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:07.265223Z node 11 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/ydb_tenant_1/.metadata/initialization/migrations;error=timeout; ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> TGRpcYdbTest::ExplainQuery [GOOD] Test command err: 2025-06-25T14:43:41.987533Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897699347793253:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:43:41.987651Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00188c/r3tmp/tmpgY3ljN/pdisk_1.dat 2025-06-25T14:43:42.445623Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:43:42.488555Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:43:42.488649Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:43:42.496724Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 8852, node 1 2025-06-25T14:43:42.732578Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:43:42.732608Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:43:42.732614Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:43:42.732752Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:43:43.018440Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:15953 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:43:43.246050Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:43:46.204705Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519897719008836982:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:43:46.211634Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00188c/r3tmp/tmpFIxq0w/pdisk_1.dat 2025-06-25T14:43:46.533340Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 3495, node 4 2025-06-25T14:43:46.586074Z node 4 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 4 Type# 268639257 2025-06-25T14:43:46.617264Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:43:46.617379Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:43:46.691850Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:43:46.745291Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:43:46.745315Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:43:46.745321Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:43:46.745447Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27803 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:43:46.978750Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:43:47.257776Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:43:49.542466Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519897731893739852:2295], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:43:49.542466Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519897731893739863:2298], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:43:49.542570Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:43:49.546744Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:43:49.576469Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519897731893739866:2299], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:43:49.658406Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519897731893739946:2684] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:43:51.680538Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519897740970318052:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:43:51.680678Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00188c/r3tmp/tmpUZsqXE/pdisk_1.dat 2025-06-25T14:43:51.940337Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 23002, node 7 2025-06-25T14:43:51.992858Z node 7 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 7 Type# 268639257 2025-06-25T14:43:52.022375Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:43:52.022482Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:43:52.088497Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:43:52.222714Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:43:52.222735Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:43:52.222745Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:43:52.222887Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9437 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParam ... Unsupported protobuf type:
: Error: ydb/core/kqp/session_actor/kqp_session_actor.cpp:1052: ydb/library/mkql_proto/mkql_proto.cpp:1435: Unknown protobuf type: 2025-06-25T14:43:56.817479Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7519897763121414287:2245];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:43:56.817545Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00188c/r3tmp/tmpI2VqmR/pdisk_1.dat 2025-06-25T14:43:57.079534Z node 10 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:43:57.152981Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:43:57.153060Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 8161, node 10 2025-06-25T14:43:57.286369Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:43:57.286390Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:43:57.286400Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:43:57.286542Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:43:57.327293Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:62254 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:43:57.794805Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:43:57.820496Z node 10 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:44:00.474876Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7519897780301284270:2295], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:00.474975Z node 10 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:00.475217Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7519897780301284282:2298], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:00.479621Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:44:00.504512Z node 10 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [10:7519897780301284284:2299], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:44:00.591517Z node 10 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [10:7519897780301284359:2673] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:44:02.294099Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7519897788168781275:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:02.294181Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00188c/r3tmp/tmpwPTWvH/pdisk_1.dat 2025-06-25T14:44:02.562855Z node 13 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 4947, node 13 2025-06-25T14:44:02.685735Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:02.686186Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:02.841849Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:44:02.854029Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:02.854059Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:02.854070Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:02.854241Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6762 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:03.153122Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:03.303747Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:44:03.373668Z node 13 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:44:06.050375Z node 13 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [13:7519897805348651637:2301], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:06.050468Z node 13 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:06.050861Z node 13 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [13:7519897805348651649:2304], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:06.054963Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:44:06.094623Z node 13 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [13:7519897805348651651:2305], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-25T14:44:06.165672Z node 13 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [13:7519897805348651722:2795] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:44:06.258077Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710661. Ctx: { TraceId: 01jykrqjs0894rejckbzfgtmea, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=OTY3MDJlYWYtMTAyZjgzYTQtNzc0MmY2MzctNDRjM2RkOTQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root >> TGRpcNewCoordinationClient::CreateDropDescribe [GOOD] >> TGRpcNewCoordinationClient::CreateAlter >> TxUsage::WriteToTopic_Demo_38_Query [GOOD] >> TRegisterNodeOverLegacyService::ServerWithoutCertVerification_ClientProvidesEmptyClientCerts [GOOD] >> TTableProfileTests::DescribeTableWithPartitioningPolicy >> YdbYqlClient::CreateTableWithPartitionAtKeys [GOOD] >> YdbYqlClient::CreateAndAltertTableWithPartitioningBySize >> TGRpcYdbTest::MakeListRemoveDirectory >> TxUsage::WriteToTopic_Demo_39_Table >> TxUsage::WriteToTopic_Demo_22_RestartAfterCommit_Query [GOOD] >> TGRpcLdapAuthentication::LdapAuthWithInvalidPassword [GOOD] >> TGRpcLdapAuthentication::LdapAuthWithEmptyPassword >> YdbYqlClient::TestReadTableNotNullBorder2 [GOOD] >> YdbYqlClient::TestReadTableSnapshot >> YdbYqlClient::SecurityTokenAuthMultiTenantSDK >> YdbYqlClient::RetryOperationSync [GOOD] >> YdbYqlClient::RetryOperationLimitedDuration >> TGRpcYdbTest::ExecuteQueryBadRequest >> ClientStatsCollector::CounterRetryOperation [GOOD] >> ClientStatsCollector::ExternalMetricRegistryByRawPtr >> TYqlDateTimeTests::SimpleOperations [GOOD] >> TGRpcYdbTest::CreateDeleteYqlSession [GOOD] >> TGRpcYdbTest::SdkUuidViaParams [GOOD] >> TGRpcYdbTest::ReadTable >> TGRpcClientLowTest::GrpcRequestProxy [GOOD] >> TGRpcClientLowTest::GrpcRequestProxyWithoutToken ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> TGRpcYdbTest::CreateDeleteYqlSession [GOOD] Test command err: 2025-06-25T14:43:49.062752Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897732602278759:2216];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:43:49.063735Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00186a/r3tmp/tmpljxg59/pdisk_1.dat 2025-06-25T14:43:49.467994Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:43:49.468071Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:43:49.475931Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:43:49.478074Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:43:49.553700Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 11491, node 1 2025-06-25T14:43:49.608705Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:43:49.608729Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:43:49.608740Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:43:49.608860Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28769 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:43:50.003869Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:43:50.069299Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:43:53.415830Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519897752349772093:2083];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:43:53.416439Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00186a/r3tmp/tmpLgtPwM/pdisk_1.dat 2025-06-25T14:43:53.609262Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:43:53.625383Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:43:53.625453Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:43:53.631515Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:43:53.648780Z node 4 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 4 Type# 268639257 TServer::EnableGrpc on GrpcPort 16154, node 4 2025-06-25T14:43:53.696952Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:43:53.696974Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:43:53.696980Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:43:53.697086Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3031 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:43:53.989809Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:43:54.086914Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519897756644740236:2590] txid# 281474976715658, issues: { message: "Unknown column \'BlaBla\' specified in key column list" severity: 1 } 2025-06-25T14:43:57.900769Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519897767475115385:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:43:57.900812Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00186a/r3tmp/tmp8ULbq1/pdisk_1.dat 2025-06-25T14:43:58.060203Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:43:58.077500Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:43:58.077573Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:43:58.083306Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:43:58.094054Z node 7 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 7 Type# 268639257 TServer::EnableGrpc on GrpcPort 14615, node 7 2025-06-25T14:43:58.140577Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:43:58.140597Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:43:58.140603Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:43:58.140735Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27705 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:43:58.461646Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:02.436699Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7519897790136488390:2076];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:02.436763Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00186a/r3tmp/tmp03kpH2/pdisk_1.dat 2025-06-25T14:44:02.674923Z node 10 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 15634, node 10 2025-06-25T14:44:02.741604Z node 10 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 10 Type# 268639257 2025-06-25T14:44:02.793106Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:02.793196Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:02.805764Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:44:02.843604Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:02.843628Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:02.843636Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:02.843765Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19557 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:03.113625Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:03.171147Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:44:03.451947Z node 10 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:44:03.471884Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:171) 2025-06-25T14:44:03.831673Z node 10 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 10, TabletId: 72075186224037888 not found 2025-06-25T14:44:07.228343Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7519897812665935877:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:07.228415Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00186a/r3tmp/tmp4A4zBa/pdisk_1.dat 2025-06-25T14:44:07.369751Z node 13 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:07.393576Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:07.393663Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:07.401794Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 19271, node 13 2025-06-25T14:44:07.498524Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:07.498549Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:07.498558Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:07.498711Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:7137 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:07.805897Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:08.234502Z node 13 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> TYqlDateTimeTests::SimpleOperations [GOOD] Test command err: 2025-06-25T14:43:41.985955Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897699252687813:2150];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:43:41.986260Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0018c2/r3tmp/tmpzHpNIK/pdisk_1.dat 2025-06-25T14:43:42.449093Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:43:42.449165Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:43:42.480544Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:43:42.515218Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 12582, node 1 2025-06-25T14:43:42.729840Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:43:42.729864Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:43:42.729871Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:43:42.729968Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:43:42.989720Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:27337 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:43:43.254317Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:43:45.009865Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:43:45.152242Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897716432558070:2301], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:43:45.152242Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897716432558077:2304], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:43:45.152336Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:43:45.158041Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:43:45.180755Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519897716432558084:2305], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-25T14:43:45.247121Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519897716432558154:2793] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:43:45.879337Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710661. Ctx: { TraceId: 01jykrpybxf9anstpchtgq9752, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjJkYzczNWItNTcxMDViOWYtMTE0YTk3YTQtMTZkYWJjYw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:43:46.253404Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710662. Ctx: { TraceId: 01jykrpz5fdx0tfwtcyjnfqgbm, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjJkYzczNWItNTcxMDViOWYtMTE0YTk3YTQtMTZkYWJjYw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:43:46.361452Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710663. Ctx: { TraceId: 01jykrpzf0arhahqcjfn5pa53y, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjJkYzczNWItNTcxMDViOWYtMTE0YTk3YTQtMTZkYWJjYw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:43:46.464402Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710664. Ctx: { TraceId: 01jykrpzj04ksjyq8ecqwscjf2, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjJkYzczNWItNTcxMDViOWYtMTE0YTk3YTQtMTZkYWJjYw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:43:46.575430Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710665. Ctx: { TraceId: 01jykrpzn89bh5cxwp9mnr846b, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjJkYzczNWItNTcxMDViOWYtMTE0YTk3YTQtMTZkYWJjYw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:43:48.150406Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519897729380091281:2076];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:43:48.150543Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0018c2/r3tmp/tmp8Rb7Ns/pdisk_1.dat TServer::EnableGrpc on GrpcPort 2194, node 4 2025-06-25T14:43:48.393262Z node 4 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 4 Type# 268639257 2025-06-25T14:43:48.436425Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:43:48.463272Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:43:48.463351Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:43:48.469359Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:43:48.473071Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:43:48.473096Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:43:48.473105Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:43:48.473249Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11457 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:43:48.734538Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:43:49.171974Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:43:51.078460Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:43:51.145653Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519897742264994318:2303], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you ... ase: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=10&id=ZGZkNDBkZTYtZjBmMTNiZGUtYjliNDlmNDgtZjRjNTg0NDQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:44:03.308579Z node 10 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715664. Ctx: { TraceId: 01jykrqfyrf71wg0t0vndknv1f, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=10&id=ZGZkNDBkZTYtZjBmMTNiZGUtYjliNDlmNDgtZjRjNTg0NDQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:44:05.128727Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7519897801936976122:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:05.128783Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0018c2/r3tmp/tmpKWchG1/pdisk_1.dat 2025-06-25T14:44:05.335161Z node 13 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 13418, node 13 2025-06-25T14:44:05.439239Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:05.439261Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:05.439269Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:05.439407Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:44:05.458623Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:05.458710Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:05.462581Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:10048 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:05.744992Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:06.147243Z node 13 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:44:08.192650Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:44:08.301179Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:44:08.359525Z node 13 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [13:7519897814821879265:2306], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:08.359621Z node 13 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:08.359880Z node 13 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [13:7519897814821879277:2309], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:08.364062Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715660:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:44:08.388851Z node 13 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [13:7519897814821879279:2310], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715660 completed, doublechecking } 2025-06-25T14:44:08.470473Z node 13 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [13:7519897814821879372:2884] txid# 281474976715661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:44:08.568687Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jykrqn150q5r7ceghszhh6en, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=M2VmMzNmZDEtYWVkYzAwNzgtMzUwOTJiZTYtMmVlYjU0MQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:44:08.689499Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715663. Ctx: { TraceId: 01jykrqn8wbz75mt7h8ykxmyf3, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=M2VmMzNmZDEtYWVkYzAwNzgtMzUwOTJiZTYtMmVlYjU0MQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:44:09.203243Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715664. Ctx: { TraceId: 01jykrqnc461t6tcnz75da97j2, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=M2VmMzNmZDEtYWVkYzAwNzgtMzUwOTJiZTYtMmVlYjU0MQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:44:09.209886Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715665. Ctx: { TraceId: 01jykrqnc461t6tcnz75da97j2, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=M2VmMzNmZDEtYWVkYzAwNzgtMzUwOTJiZTYtMmVlYjU0MQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:44:09.649936Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715666. Ctx: { TraceId: 01jykrqnw9cxc6pkbt9603qjqc, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=M2VmMzNmZDEtYWVkYzAwNzgtMzUwOTJiZTYtMmVlYjU0MQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:44:09.656343Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715667. Ctx: { TraceId: 01jykrqnw9cxc6pkbt9603qjqc, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=M2VmMzNmZDEtYWVkYzAwNzgtMzUwOTJiZTYtMmVlYjU0MQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:44:09.777130Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715668. Ctx: { TraceId: 01jykrqpa6b6hkhgpzdbs70zve, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=M2VmMzNmZDEtYWVkYzAwNzgtMzUwOTJiZTYtMmVlYjU0MQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:44:09.881334Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715669. Ctx: { TraceId: 01jykrqpdr6mrm14v9r1wg4xzx, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=M2VmMzNmZDEtYWVkYzAwNzgtMzUwOTJiZTYtMmVlYjU0MQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:44:09.978818Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715670. Ctx: { TraceId: 01jykrqph11bayv2r3brpkq3yf, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=M2VmMzNmZDEtYWVkYzAwNzgtMzUwOTJiZTYtMmVlYjU0MQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:44:10.094676Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715671. Ctx: { TraceId: 01jykrqpkzdky2zwbvq4tcdz0p, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=M2VmMzNmZDEtYWVkYzAwNzgtMzUwOTJiZTYtMmVlYjU0MQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:44:10.131646Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[13:7519897801936976122:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:10.131735Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:44:10.212059Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715672. Ctx: { TraceId: 01jykrqpqp7dms02gb1bfe5hj7, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=M2VmMzNmZDEtYWVkYzAwNzgtMzUwOTJiZTYtMmVlYjU0MQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:44:10.487413Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715673. Ctx: { TraceId: 01jykrqpv80nqcax1nzmkn1vdm, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=M2VmMzNmZDEtYWVkYzAwNzgtMzUwOTJiZTYtMmVlYjU0MQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:44:10.491340Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715674. Ctx: { TraceId: 01jykrqpv80nqcax1nzmkn1vdm, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=M2VmMzNmZDEtYWVkYzAwNzgtMzUwOTJiZTYtMmVlYjU0MQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root >> TGRpcLdapAuthentication::DisableBuiltinAuthMechanism [GOOD] >> TxUsage::WriteToTopic_Demo_19_RestartBeforeCommit_Query [GOOD] >> YdbTableBulkUpsert::Types [GOOD] >> YdbTableBulkUpsert::Uint8 >> TGRpcNewCoordinationClient::CreateAlter [GOOD] >> TGRpcNewCoordinationClient::NodeNotFound >> TxUsage::WriteToTopic_Demo_23_RestartNo_Table >> YdbYqlClient::TestDecimal1 [GOOD] >> YdbYqlClient::TestDecimal35 ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> TGRpcLdapAuthentication::DisableBuiltinAuthMechanism [GOOD] Test command err: 2025-06-25T14:43:47.448675Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897722879976560:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:43:47.448740Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00186c/r3tmp/tmpTE55ei/pdisk_1.dat 2025-06-25T14:43:47.930354Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:43:47.930448Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:43:47.933546Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:43:47.940296Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 3307, node 1 2025-06-25T14:43:48.083098Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:43:48.083120Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:43:48.083127Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:43:48.083235Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:65034 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:43:48.418052Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:43:48.494493Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:43:51.924021Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519897741901145184:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:43:51.924331Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00186c/r3tmp/tmpGBeqoR/pdisk_1.dat 2025-06-25T14:43:52.206292Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:43:52.237600Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:43:52.237697Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:43:52.243690Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 16295, node 4 2025-06-25T14:43:52.469211Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:43:52.469237Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:43:52.469244Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:43:52.469390Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16702 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:43:52.720837Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:43:56.620408Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519897764977347733:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:43:56.632448Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00186c/r3tmp/tmpbp63eQ/pdisk_1.dat 2025-06-25T14:43:56.929348Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:43:56.962399Z node 7 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 7 Type# 268639257 TServer::EnableGrpc on GrpcPort 13101, node 7 2025-06-25T14:43:57.016777Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:43:57.020292Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:43:57.046707Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:43:57.092339Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:43:57.092370Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:43:57.092377Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:43:57.092490Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26114 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:43:57.384916Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:43:57.690910Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:44:01.617375Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[7:7519897764977347733:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:01.617452Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:44:03.562442Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7519897793002515491:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:03.562814Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00186c/r3tmp/tmp9KQRSZ/pdisk_1.dat 2025-06-25T14:44:03.834464Z node 10 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 25511, node 10 2025-06-25T14:44:03.896446Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:03.896528Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:04.001088Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:44:04.014456Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:04.014479Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:04.014487Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:04.014628Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:64396 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:04.329912Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:04.613157Z node 10 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:44:08.492642Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7519897817169804899:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:08.492739Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00186c/r3tmp/tmpu2WG9K/pdisk_1.dat 2025-06-25T14:44:08.757525Z node 13 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 12031, node 13 2025-06-25T14:44:08.804887Z node 13 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 13 Type# 268639257 2025-06-25T14:44:08.887187Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:08.887283Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:08.920239Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:08.920265Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:08.920273Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:08.920421Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:44:08.922422Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:2870 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:09.212591Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... >> TGRpcYdbTest::MakeListRemoveDirectory [GOOD] >> TGRpcYdbTest::GetOperationBadRequest >> TTableProfileTests::WrongTableProfile [GOOD] >> TYqlDateTimeTests::DateKey >> TTableProfileTests::ExplicitPartitionsWrongKeyType [GOOD] >> TGRpcLdapAuthentication::LdapAuthSettingsWithEmptyHosts >> YdbYqlClient::TestReadTableMultiShard >> TRegisterNodeOverDiscoveryService::ServerWithCertVerification_ClientWithCorrectCerts_AllowOnlyDefaultGroup [GOOD] >> TRegisterNodeOverDiscoveryService::ServerWithIssuerVerification_ClientWithSameIssuer >> YdbYqlClient::CreateAndAltertTableWithPartitioningBySize [GOOD] >> YdbYqlClient::CreateAndAltertTableWithPartitioningByLoad >> TGRpcLdapAuthentication::LdapAuthWithEmptyPassword [GOOD] >> TGRpcYdbTest::ExecuteQueryBadRequest [GOOD] >> TGRpcYdbTest::ExecuteQueryExplicitSession ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> TTableProfileTests::ExplicitPartitionsWrongKeyType [GOOD] Test command err: 2025-06-25T14:43:42.071419Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897703873018823:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:43:42.071467Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0018a1/r3tmp/tmp7Zi3JZ/pdisk_1.dat 2025-06-25T14:43:42.554361Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:43:42.554458Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:43:42.559039Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:43:42.574599Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 9383, node 1 2025-06-25T14:43:42.759098Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:43:42.759121Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:43:42.759128Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:43:42.759228Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:43:43.104238Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:3220 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:43:43.250140Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... TClient is connected to server localhost:3220 2025-06-25T14:43:43.600119Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateSubDomain, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_subdomain.cpp:259) waiting... 2025-06-25T14:43:43.625367Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:43:44.143079Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519897711097218932:2140];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:43:44.143968Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/ydb_ut_tenant/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:43:44.209366Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:43:44.209428Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:43:44.213298Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 3 Cookie 3 2025-06-25T14:43:44.216793Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:3220 2025-06-25T14:43:44.506688Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) TClient is connected to server localhost:3220 TClient::Ls request: /Root/ydb_ut_tenant/table-1 2025-06-25T14:43:45.189120Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "table-1" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710660 CreateStep: 1750862624790 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "table-1" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 ... (TRUNCATED) 2025-06-25T14:43:45.233354Z node 1 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 3 2025-06-25T14:43:45.234079Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-25T14:43:48.570983Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519897730933740371:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:43:48.571303Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0018a1/r3tmp/tmpSRioc3/pdisk_1.dat 2025-06-25T14:43:48.793782Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:43:48.837235Z node 4 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 4 Type# 268639257 TServer::EnableGrpc on GrpcPort 20467, node 4 2025-06-25T14:43:48.885259Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:43:48.885358Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:43:48.903711Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:43:48.961801Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:43:48.961819Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:43:48.961825Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:43:48.961959Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:12060 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:43:49.218996Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... TClient is connected to server localhost:12060 2025-06-25T14:43:49.513980Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateSubDomain, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_subdomain.cpp:259) waiting... 2025-06-25T14:43:49.542093Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:43:49.581559Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:43:5 ... configuration TClient is connected to server localhost:5190 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:01.490990Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:01.789093Z node 10 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:5190 2025-06-25T14:44:01.867333Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateSubDomain, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_subdomain.cpp:259) waiting... 2025-06-25T14:44:01.892498Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:02.402581Z node 12 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[12:7519897789384075303:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:02.402654Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/ydb_ut_tenant/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:44:02.439408Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:02.439504Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:02.445139Z node 10 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 12 Cookie 12 2025-06-25T14:44:02.445907Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:5190 2025-06-25T14:44:03.041382Z node 10 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 12 2025-06-25T14:44:03.041852Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-25T14:44:03.404953Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/ydb_ut_tenant/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:44:03.407037Z node 12 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:44:04.412696Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/ydb_ut_tenant/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:44:05.413005Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/ydb_ut_tenant/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:44:08.088580Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7519897815684081951:2154];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0018a1/r3tmp/tmpGKWeD2/pdisk_1.dat 2025-06-25T14:44:08.179886Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:44:08.325649Z node 13 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 6270, node 13 2025-06-25T14:44:08.470918Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:08.471034Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:08.533133Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:08.533160Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:08.533169Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:08.533330Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:44:08.537018Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:20412 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:08.955879Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:09.089740Z node 13 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:20412 2025-06-25T14:44:09.402119Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateSubDomain, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_subdomain.cpp:259) waiting... 2025-06-25T14:44:09.430179Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:09.935812Z node 15 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[15:7519897818419971090:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:09.935889Z node 15 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/ydb_ut_tenant/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:44:09.982087Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(15, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:09.982199Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(15, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:09.985167Z node 13 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 15 Cookie 15 2025-06-25T14:44:09.986192Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(15, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:20412 2025-06-25T14:44:10.310633Z node 13 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [13:7519897824274017761:2906] txid# 281474976710660, issues: { message: "Error at split boundary 0: Value of type Uint64 expected in tuple at position 1" severity: 1 } 2025-06-25T14:44:10.316526Z node 13 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 15 2025-06-25T14:44:10.317155Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(15, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-25T14:44:10.935941Z node 15 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/ydb_ut_tenant/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:44:10.948290Z node 15 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:44:11.939098Z node 15 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/ydb_ut_tenant/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:44:12.940972Z node 15 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/ydb_ut_tenant/.metadata/initialization/migrations;error=incorrect path status: LookupError; >> YdbYqlClient::TestYqlIssues >> YdbYqlClient::SecurityTokenAuthMultiTenantSDK [GOOD] >> YdbYqlClient::SecurityTokenAuthMultiTenantSDKAsync >> YdbYqlClient::TestReadTableSnapshot [GOOD] >> TxUsage::WriteToTopic_Demo_19_RestartAfterCommit_Table >> TTableProfileTests::DescribeTableWithPartitioningPolicy [GOOD] >> TTableProfileTests::DescribeTableOptions >> ClientStatsCollector::ExternalMetricRegistryByRawPtr [GOOD] >> ClientStatsCollector::ExternalMetricRegistryStdSharedPtr >> TGRpcClientLowTest::GrpcRequestProxyWithoutToken [GOOD] >> TGRpcClientLowTest::GrpcRequestProxyCheckTokenWhenItIsSpecified_Ignore ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> TGRpcLdapAuthentication::LdapAuthWithEmptyPassword [GOOD] Test command err: 2025-06-25T14:43:53.301958Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897749666267826:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:43:53.304244Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001865/r3tmp/tmpMrDMBe/pdisk_1.dat 2025-06-25T14:43:53.764397Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:43:53.764524Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:43:53.767844Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:43:53.798125Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 62024, node 1 2025-06-25T14:43:53.964492Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:43:53.964520Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:43:53.964527Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:43:53.964655Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3903 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-25T14:43:54.324660Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:43:54.351521Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:43:57.925927Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519897769842119090:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:43:57.926021Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001865/r3tmp/tmpGUd83H/pdisk_1.dat 2025-06-25T14:43:58.184039Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:43:58.190917Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:43:58.190997Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:43:58.196860Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 26326, node 4 2025-06-25T14:43:58.328159Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:43:58.328179Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:43:58.328202Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:43:58.328372Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6181 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:43:58.601877Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:43:58.620696Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:43:58.940427Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001865/r3tmp/tmpBnKYOu/pdisk_1.dat 2025-06-25T14:44:02.247807Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519897788358292889:2160];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:02.286119Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:44:02.377398Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:02.400559Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:02.400634Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 62584, node 7 2025-06-25T14:44:02.410225Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:44:02.492619Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:02.492643Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:02.492649Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:02.492786Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26253 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:02.757805Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:02.770850Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:44:06.874122Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7519897808099729645:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:06.874231Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001865/r3tmp/tmpSQfq8A/pdisk_1.dat 2025-06-25T14:44:07.005545Z node 10 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:07.024908Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:07.024994Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:07.032573Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 7896, node 10 2025-06-25T14:44:07.089043Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:07.089068Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:07.089076Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:07.089210Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:63109 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:07.375721Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:11.051928Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7519897829052408313:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:11.052018Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001865/r3tmp/tmpJmI2eT/pdisk_1.dat 2025-06-25T14:44:11.339682Z node 13 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 26378, node 13 2025-06-25T14:44:11.418186Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:11.418313Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:11.454842Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:44:11.471360Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:11.471379Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:11.471384Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:11.471532Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11568 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:11.774412Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:12.082114Z node 13 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; >> TxUsage::Sinks_Oltp_WriteToTopics_2_Query [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> YdbYqlClient::TestReadTableSnapshot [GOOD] Test command err: 2025-06-25T14:43:55.457493Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897758428439448:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:43:55.457534Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001861/r3tmp/tmpLRwpTd/pdisk_1.dat 2025-06-25T14:43:56.009131Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:43:56.054286Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:43:56.054364Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:43:56.057358Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 12218, node 1 2025-06-25T14:43:56.061559Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:43:56.107987Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:43:56.108034Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:43:56.108041Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:43:56.108161Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:63107 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-25T14:43:56.563863Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:43:56.584736Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:43:56.620512Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:43:58.652962Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897771313342342:2298], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:43:58.653055Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:43:58.909078Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:43:59.072633Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897775608309803:2308], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:43:59.072712Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:43:59.073052Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897775608309808:2311], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:43:59.076734Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:43:59.100878Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519897775608309810:2312], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-25T14:43:59.195229Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519897775608309886:2809] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:43:59.304631Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710661. Ctx: { TraceId: 01jykrqbyz9mz992kpvy5808c9, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MzM2ZTIwZWYtZGRlNDZkODItZTc4MjliNzctM2Y5MjBkOTg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:43:59.485366Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710662. Ctx: { TraceId: 01jykrqc6y3j09e9r832996v8x, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MzM2ZTIwZWYtZGRlNDZkODItZTc4MjliNzctM2Y5MjBkOTg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:43:59.498801Z node 1 :TX_PROXY ERROR: read_table_impl.cpp:2919: [ReadTable [1:7519897775608309965:2329] TxId# 281474976710663] RESPONSE Status# ResolveError shard: 0 table: Root/Test 2025-06-25T14:43:59.507071Z node 1 :TX_PROXY ERROR: read_table_impl.cpp:2919: [ReadTable [1:7519897775608309972:2330] TxId# 281474976710664] RESPONSE Status# ResolveError shard: 0 table: Root/Test 2025-06-25T14:44:00.991537Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519897781933008657:2076];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:00.991598Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001861/r3tmp/tmpfI8md9/pdisk_1.dat 2025-06-25T14:44:01.150818Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:01.166017Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:01.166074Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:01.170573Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 28368, node 4 2025-06-25T14:44:01.260879Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:01.260907Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:01.260913Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:01.261042Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:22595 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:01.502530Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:01.998946Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:44:03.768261Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519897794817911509:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:03.768366Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or ... :09.376051Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7519897819148990284:2796] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:44:09.435770Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jykrqnws5tmyvg05wc264s8m, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=ZGQ3Y2NlNzQtNWFmM2JjZjEtMTRiODJmZDQtZGZiZWIyMzk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:44:09.565593Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jykrqp3adzs6028kasna8tqc, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=ZGQ3Y2NlNzQtNWFmM2JjZjEtMTRiODJmZDQtZGZiZWIyMzk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:44:09.607450Z node 7 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-25T14:44:11.353421Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7519897829049276878:2191];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:11.353736Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001861/r3tmp/tmpVDbVR8/pdisk_1.dat 2025-06-25T14:44:11.555173Z node 10 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 14658, node 10 2025-06-25T14:44:11.700393Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:11.700583Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:11.739074Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:44:11.756894Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:11.756917Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:11.756925Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:11.757066Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:21404 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:12.025262Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:12.101981Z node 10 :GRPC_SERVER INFO: grpc_request_proxy.cpp:592: Got grpc request# ListEndpointsRequest, traceId# 01jykrqrp59njp8t167zy095fx, sdkBuildInfo# ydb-cpp-sdk/dev, state# AS_NOT_PERFORMED, database# undef, peer# ipv6:[::1]:42982, grpcInfo# grpc-c++/1.54.3 grpc-c/31.0.0 (linux; chttp2), timeout# 9.998367s 2025-06-25T14:44:12.111223Z node 10 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:595: Got grpc request# CreateSessionRequest, traceId# 01jykrqrpeexa14ah838e663s2, sdkBuildInfo# ydb-cpp-sdk/dev, state# AS_NOT_PERFORMED, database# undef, peer# ipv6:[::1]:42990, grpcInfo# grpc-c++/1.54.3 grpc-c/31.0.0 (linux; chttp2), timeout# undef 2025-06-25T14:44:12.352978Z node 10 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:44:14.580678Z node 10 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:595: Got grpc request# ExecuteSchemeQueryRequest, traceId# 01jykrqv3m7q1t17jw22gqj4zx, sdkBuildInfo# ydb-cpp-sdk/dev, state# AS_NOT_PERFORMED, database# undef, peer# ipv6:[::1]:42994, grpcInfo# grpc-c++/1.54.3 grpc-c/31.0.0 (linux; chttp2), timeout# undef 2025-06-25T14:44:14.585449Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7519897841934179657:2298], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:14.585558Z node 10 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:14.603547Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:44:14.613126Z node 10 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:489: SchemeBoardUpdate /Root 2025-06-25T14:44:14.613257Z node 10 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:518: Can't update SecurityState for /Root - no PublicKeys 2025-06-25T14:44:14.613329Z node 10 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:489: SchemeBoardUpdate /Root 2025-06-25T14:44:14.613386Z node 10 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:518: Can't update SecurityState for /Root - no PublicKeys 2025-06-25T14:44:14.685919Z node 10 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:489: SchemeBoardUpdate /Root 2025-06-25T14:44:14.685930Z node 10 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:489: SchemeBoardUpdate /Root 2025-06-25T14:44:14.686030Z node 10 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:518: Can't update SecurityState for /Root - no PublicKeys 2025-06-25T14:44:14.686033Z node 10 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:518: Can't update SecurityState for /Root - no PublicKeys 2025-06-25T14:44:14.709164Z node 10 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:595: Got grpc request# ReadTableRequest, traceId# 01jykrqv7n8xc4954hzahe8ct5, sdkBuildInfo# undef, state# AS_NOT_PERFORMED, database# undef, peer# ipv6:[::1]:43010, grpcInfo# grpc-c++/1.54.3 grpc-c/31.0.0 (linux; chttp2), timeout# undef 2025-06-25T14:44:14.719914Z node 10 :READ_TABLE_API DEBUG: rpc_read_table.cpp:267: [10:7519897841934179829:2307] Adding quota request to queue ShardId: 0, TxId: 281474976715659 2025-06-25T14:44:14.719967Z node 10 :READ_TABLE_API DEBUG: rpc_read_table.cpp:629: [10:7519897841934179829:2307] Assign stream quota to Shard 0, Quota 5, TxId 281474976715659 Reserved: 5 of 25, Queued: 0 2025-06-25T14:44:14.720731Z node 10 :READ_TABLE_API DEBUG: rpc_read_table.cpp:647: [10:7519897841934179829:2307] got stream part, size: 35, RU required: 128 rate limiter absent 2025-06-25T14:44:14.721091Z node 10 :READ_TABLE_API DEBUG: rpc_read_table.cpp:563: [10:7519897841934179829:2307] Starting inactivity timer for 600.000000s with tag 3 2025-06-25T14:44:14.721150Z node 10 :READ_TABLE_API NOTICE: rpc_read_table.cpp:531: [10:7519897841934179829:2307] Finish grpc stream, status: 400000 2025-06-25T14:44:14.733125Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000089480] received request Name# SchemeOperation ok# false data# peer# current inflight# 0 2025-06-25T14:44:14.733144Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0000f0680] received request Name# SchemeOperationStatus ok# false data# peer# current inflight# 0 2025-06-25T14:44:14.733317Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a00008f480] received request Name# SchemeDescribe ok# false data# peer# current inflight# 0 2025-06-25T14:44:14.733426Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0000f4280] received request Name# ChooseProxy ok# false data# peer# current inflight# 0 2025-06-25T14:44:14.733468Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0000e0480] received request Name# PersQueueRequest ok# false data# peer# current inflight# 0 2025-06-25T14:44:14.733616Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0000dec80] received request Name# SchemeInitRoot ok# false data# peer# current inflight# 0 2025-06-25T14:44:14.733658Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0000e0a80] received request Name# ResolveNode ok# false data# peer# current inflight# 0 2025-06-25T14:44:14.733800Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0000e1680] received request Name# FillNode ok# false data# peer# current inflight# 0 2025-06-25T14:44:14.733859Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0000e2280] received request Name# DrainNode ok# false data# peer# current inflight# 0 2025-06-25T14:44:14.733979Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a00008e880] received request Name# BlobStorageConfig ok# false data# peer# current inflight# 0 2025-06-25T14:44:14.734039Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0000f4e80] received request Name# HiveCreateTablet ok# false data# peer# current inflight# 0 2025-06-25T14:44:14.734149Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0000e3a80] received request Name# TestShardControl ok# false data# peer# current inflight# 0 2025-06-25T14:44:14.734204Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0000dfe80] received request Name# RegisterNode ok# false data# peer# current inflight# 0 2025-06-25T14:44:14.734336Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0000de680] received request Name# CmsRequest ok# false data# peer# current inflight# 0 2025-06-25T14:44:14.734393Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0000de080] received request Name# ConsoleRequest ok# false data# peer# current inflight# 0 2025-06-25T14:44:14.734520Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0000e2e80] received request Name# InterconnectDebug ok# false data# peer# current inflight# 0 2025-06-25T14:44:14.734575Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000053480] received request Name# TabletStateRequest ok# false data# peer# current inflight# 0 >> TxUsage::Sinks_Oltp_WriteToTopics_3_Table >> TxUsage::WriteToTopic_Demo_28_Query [GOOD] >> TGRpcYdbTest::ReadTable [GOOD] >> TGRpcYdbTest::ReadTablePg >> TGRpcNewCoordinationClient::NodeNotFound [GOOD] >> TGRpcNewCoordinationClient::MultipleSessionsSemaphores >> TGRpcNewCoordinationClient::SessionMethods >> YdbOlapStore::ManyTables >> TGRpcYdbTest::GetOperationBadRequest [GOOD] >> TGRpcYdbTest::OperationTimeout >> TxUsage::WriteToTopic_Demo_29_Table >> TxUsage::WriteToTopic_Demo_14_Query [GOOD] >> YdbYqlClient::TestDecimal35 [GOOD] >> YdbYqlClient::TestDecimalFullStack >> TGRpcLdapAuthentication::LdapAuthSettingsWithEmptyHosts [GOOD] >> TGRpcLdapAuthentication::LdapAuthSettingsWithEmptyBaseDn >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_6_Query [FAIL] >> YdbTableBulkUpsertOlap::UpsertCsvBug >> TxUsage::WriteToTopic_Demo_15_Table >> TxUsage::WriteToTopic_Demo_9_Table [GOOD] >> TxUsage::Sinks_Olap_WriteToTopicAndTable_2_Table >> YdbYqlClient::CreateAndAltertTableWithPartitioningByLoad [GOOD] >> YdbYqlClient::CreateAndAltertTableWithReadReplicasSettings >> YdbYqlClient::TestReadTableMultiShard [GOOD] >> YdbYqlClient::TestReadTableMultiShardUseSnapshot >> BsControllerConfig::MergeBoxes [GOOD] >> YdbYqlClient::SecurityTokenAuthMultiTenantSDKAsync [GOOD] >> YdbYqlClient::SimpleColumnFamilies >> TGRpcClientLowTest::GrpcRequestProxyCheckTokenWhenItIsSpecified_Ignore [GOOD] >> TGRpcClientLowTest::GrpcRequestProxyCheckTokenWhenItIsSpecified_Check >> TGRpcYdbTest::ExecuteQueryExplicitSession [GOOD] >> TGRpcYdbTest::ExecuteDmlQuery >> YdbYqlClient::TestYqlIssues [GOOD] >> YdbYqlClient::TestYqlSessionClosed ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut_bscontroller/unittest >> BsControllerConfig::MergeBoxes [GOOD] Test command err: Leader for TabletID 72057594037932033 is [0:0:0] sender: [1:11115:2156] recipient: [1:10913:2167] IGNORE Leader for TabletID 72057594037932033 is [0:0:0] sender: [1:11115:2156] recipient: [1:10913:2167] Leader for TabletID 72057594037932033 is [1:11212:2169] sender: [1:11215:2156] recipient: [1:10913:2167] 2025-06-25T14:43:06.362473Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828672 Event# NKikimr::TEvTablet::TEvBoot 2025-06-25T14:43:06.366573Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828673 Event# NKikimr::TEvTablet::TEvRestored 2025-06-25T14:43:06.366957Z node 1 :BS_CONTROLLER DEBUG: {BSC22@console_interaction.cpp:14} Console interaction started 2025-06-25T14:43:06.369013Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828684 Event# NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-25T14:43:06.369732Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268639244 Event# NKikimr::TEvNodeWardenStorageConfig 2025-06-25T14:43:06.369885Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 131082 Event# NActors::TEvInterconnect::TEvNodesInfo 2025-06-25T14:43:06.369905Z node 1 :BS_CONTROLLER DEBUG: {BSC01@bsc.cpp:531} Handle TEvInterconnect::TEvNodesInfo 2025-06-25T14:43:06.370318Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS01@init_scheme.cpp:17} TTxInitScheme Execute 2025-06-25T14:43:06.377287Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS03@init_scheme.cpp:44} TTxInitScheme Complete 2025-06-25T14:43:06.377420Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM01@migrate.cpp:190} Execute tx 2025-06-25T14:43:06.377545Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM02@migrate.cpp:251} Complete tx IncompatibleData# false 2025-06-25T14:43:06.377636Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-25T14:43:06.377690Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-25T14:43:06.377756Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion Leader for TabletID 72057594037932033 is [1:11212:2169] sender: [1:11238:2156] recipient: [1:110:2157] 2025-06-25T14:43:06.390232Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion 2025-06-25T14:43:06.390413Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-25T14:43:06.401230Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-25T14:43:06.401361Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-25T14:43:06.401434Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-25T14:43:06.401528Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-25T14:43:06.401642Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-25T14:43:06.401709Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-25T14:43:06.401747Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-25T14:43:06.401807Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-25T14:43:06.412590Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-25T14:43:06.412722Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-25T14:43:06.423354Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-25T14:43:06.423474Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE01@load_everything.cpp:21} TTxLoadEverything Execute 2025-06-25T14:43:06.424257Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE03@load_everything.cpp:587} TTxLoadEverything Complete 2025-06-25T14:43:06.424288Z node 1 :BS_CONTROLLER DEBUG: {BSC09@impl.h:2213} LoadFinished 2025-06-25T14:43:06.424440Z node 1 :BS_CONTROLLER DEBUG: {BSC18@console_interaction.cpp:31} Console connection service started 2025-06-25T14:43:06.424510Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE04@load_everything.cpp:592} TTxLoadEverything InitQueue processed 2025-06-25T14:43:06.436688Z node 1 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { DefineHostConfig { HostConfigId: 1 Drive { Path: "/dev/disk0" } Drive { Path: "/dev/disk1" } Drive { Path: "/dev/disk2" } Drive { Path: "/dev/disk3" } Drive { Path: "/dev/disk4" } Drive { Path: "/dev/disk5" } Drive { Path: "/dev/disk6" } Drive { Path: "/dev/disk7" } Drive { Path: "/dev/disk8" Type: SSD } Drive { Path: "/dev/disk9" Type: SSD } Drive { Path: "/dev/disk10" Type: SSD } Drive { Path: "/dev/disk11" Type: SSD } Drive { Path: "/dev/disk12" Type: SSD } Drive { Path: "/dev/disk13" Type: SSD } Drive { Path: "/dev/disk14" Type: SSD } Drive { Path: "/dev/disk15" Type: SSD } } } Command { DefineBox { BoxId: 1 Name: "test box" Host { Key { Fqdn: "::1" IcPort: 12001 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12002 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12003 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12004 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12005 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12006 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12007 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12008 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12009 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12010 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12011 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12012 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12013 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12014 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12015 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12016 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12017 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12018 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12019 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12020 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12021 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12022 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12023 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12024 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12025 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12026 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12027 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12028 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12029 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12030 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12031 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12032 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12033 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12034 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12035 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12036 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12037 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12038 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12039 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12040 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12041 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12042 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12043 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12044 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12045 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12046 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12047 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12048 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12049 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12050 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12051 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12052 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12053 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12054 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12055 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12056 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12057 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12058 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12059 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12060 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12061 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12062 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12063 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12064 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12065 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12066 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12067 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12068 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12069 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12070 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12071 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12072 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12073 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12074 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12075 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12076 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12077 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12078 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12079 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12080 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12081 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12082 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12083 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12084 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12085 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12086 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12087 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12088 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12089 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12090 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12091 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12092 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12093 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12094 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12095 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12096 } HostConfigId: 1 } Host { Ke ... pp:340} Create new pdisk PDiskId# 276:1000 Path# /dev/disk1 2025-06-25T14:44:10.846549Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 276:1001 Path# /dev/disk2 2025-06-25T14:44:10.846572Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 276:1002 Path# /dev/disk3 2025-06-25T14:44:10.846594Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 277:1000 Path# /dev/disk1 2025-06-25T14:44:10.846615Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 277:1001 Path# /dev/disk2 2025-06-25T14:44:10.846638Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 277:1002 Path# /dev/disk3 2025-06-25T14:44:10.846660Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 278:1000 Path# /dev/disk1 2025-06-25T14:44:10.846683Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 278:1001 Path# /dev/disk2 2025-06-25T14:44:10.846710Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 278:1002 Path# /dev/disk3 2025-06-25T14:44:10.846731Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 279:1000 Path# /dev/disk1 2025-06-25T14:44:10.846754Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 279:1001 Path# /dev/disk2 2025-06-25T14:44:10.846777Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 279:1002 Path# /dev/disk3 2025-06-25T14:44:10.846799Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 280:1000 Path# /dev/disk1 2025-06-25T14:44:10.846820Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 280:1001 Path# /dev/disk2 2025-06-25T14:44:10.846843Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 280:1002 Path# /dev/disk3 2025-06-25T14:44:10.846864Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 281:1000 Path# /dev/disk1 2025-06-25T14:44:10.846887Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 281:1001 Path# /dev/disk2 2025-06-25T14:44:10.846912Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 281:1002 Path# /dev/disk3 2025-06-25T14:44:10.846934Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 282:1000 Path# /dev/disk1 2025-06-25T14:44:10.846956Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 282:1001 Path# /dev/disk2 2025-06-25T14:44:10.846979Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 282:1002 Path# /dev/disk3 2025-06-25T14:44:10.847000Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 283:1000 Path# /dev/disk1 2025-06-25T14:44:10.847022Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 283:1001 Path# /dev/disk2 2025-06-25T14:44:10.847047Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 283:1002 Path# /dev/disk3 2025-06-25T14:44:10.847081Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 284:1000 Path# /dev/disk1 2025-06-25T14:44:10.847104Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 284:1001 Path# /dev/disk2 2025-06-25T14:44:10.847138Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 284:1002 Path# /dev/disk3 2025-06-25T14:44:10.847184Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 285:1000 Path# /dev/disk1 2025-06-25T14:44:10.847215Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 285:1001 Path# /dev/disk2 2025-06-25T14:44:10.847240Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 285:1002 Path# /dev/disk3 2025-06-25T14:44:10.847264Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 286:1000 Path# /dev/disk1 2025-06-25T14:44:10.847287Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 286:1001 Path# /dev/disk2 2025-06-25T14:44:10.847308Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 286:1002 Path# /dev/disk3 2025-06-25T14:44:10.847332Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 287:1000 Path# /dev/disk1 2025-06-25T14:44:10.847354Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 287:1001 Path# /dev/disk2 2025-06-25T14:44:10.847379Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 287:1002 Path# /dev/disk3 2025-06-25T14:44:10.847402Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 288:1000 Path# /dev/disk1 2025-06-25T14:44:10.847430Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 288:1001 Path# /dev/disk2 2025-06-25T14:44:10.847454Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 288:1002 Path# /dev/disk3 2025-06-25T14:44:10.847477Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 289:1000 Path# /dev/disk1 2025-06-25T14:44:10.847499Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 289:1001 Path# /dev/disk2 2025-06-25T14:44:10.847521Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 289:1002 Path# /dev/disk3 2025-06-25T14:44:10.847542Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 290:1000 Path# /dev/disk1 2025-06-25T14:44:10.847565Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 290:1001 Path# /dev/disk2 2025-06-25T14:44:10.847589Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 290:1002 Path# /dev/disk3 2025-06-25T14:44:10.847612Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 291:1000 Path# /dev/disk1 2025-06-25T14:44:10.847634Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 291:1001 Path# /dev/disk2 2025-06-25T14:44:10.847659Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 291:1002 Path# /dev/disk3 2025-06-25T14:44:10.847683Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 292:1000 Path# /dev/disk1 2025-06-25T14:44:10.847705Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 292:1001 Path# /dev/disk2 2025-06-25T14:44:10.847728Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 292:1002 Path# /dev/disk3 2025-06-25T14:44:10.847758Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 293:1000 Path# /dev/disk1 2025-06-25T14:44:10.847783Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 293:1001 Path# /dev/disk2 2025-06-25T14:44:10.847807Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 293:1002 Path# /dev/disk3 2025-06-25T14:44:10.847830Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 294:1000 Path# /dev/disk1 2025-06-25T14:44:10.847856Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 294:1001 Path# /dev/disk2 2025-06-25T14:44:10.847878Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 294:1002 Path# /dev/disk3 2025-06-25T14:44:10.847900Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 295:1000 Path# /dev/disk1 2025-06-25T14:44:10.847924Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 295:1001 Path# /dev/disk2 2025-06-25T14:44:10.847949Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 295:1002 Path# /dev/disk3 2025-06-25T14:44:10.847973Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 296:1000 Path# /dev/disk1 2025-06-25T14:44:10.847997Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 296:1001 Path# /dev/disk2 2025-06-25T14:44:10.848021Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 296:1002 Path# /dev/disk3 2025-06-25T14:44:10.848057Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 297:1000 Path# /dev/disk1 2025-06-25T14:44:10.848095Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 297:1001 Path# /dev/disk2 2025-06-25T14:44:10.848118Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 297:1002 Path# /dev/disk3 2025-06-25T14:44:10.848141Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 298:1000 Path# /dev/disk1 2025-06-25T14:44:10.848169Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 298:1001 Path# /dev/disk2 2025-06-25T14:44:10.848192Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 298:1002 Path# /dev/disk3 2025-06-25T14:44:10.848215Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 299:1000 Path# /dev/disk1 2025-06-25T14:44:10.848239Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 299:1001 Path# /dev/disk2 2025-06-25T14:44:10.848358Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 299:1002 Path# /dev/disk3 2025-06-25T14:44:10.848384Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 300:1000 Path# /dev/disk1 2025-06-25T14:44:10.848409Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 300:1001 Path# /dev/disk2 2025-06-25T14:44:10.848431Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 300:1002 Path# /dev/disk3 2025-06-25T14:44:11.030554Z node 251 :BS_CONTROLLER ERROR: {BSC07@impl.h:2206} ProcessControllerEvent event processing took too much time Type# 268637706 Duration# 0.185871s 2025-06-25T14:44:11.030724Z node 251 :BS_CONTROLLER ERROR: {BSC00@bsc.cpp:705} StateWork event processing took too much time Type# 2146435078 Duration# 0.186063s 2025-06-25T14:44:11.042021Z node 251 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 251 Type# 268639257 2025-06-25T14:44:11.058581Z node 251 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { MergeBoxes { OriginBoxId: 2 OriginBoxGeneration: 1 TargetBoxId: 1 TargetBoxGeneration: 1 StoragePoolIdMap { OriginStoragePoolId: 1 TargetStoragePoolId: 2 } } } } 2025-06-25T14:44:11.082356Z node 251 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:400} Execute TEvControllerConfigRequest Request# {Command { ReadBox { BoxId: 1 } } Command { QueryBaseConfig { } } } >> ClientStatsCollector::ExternalMetricRegistryStdSharedPtr [GOOD] |84.9%| [TA] $(B)/ydb/core/mind/bscontroller/ut_bscontroller/test-results/unittest/{meta.json ... results_accumulator.log} |84.9%| [TA] {RESULT} $(B)/ydb/core/mind/bscontroller/ut_bscontroller/test-results/unittest/{meta.json ... results_accumulator.log} >> TGRpcNewCoordinationClient::MultipleSessionsSemaphores [GOOD] >> TGRpcNewCoordinationClient::SessionAcquireAcceptedCallback >> TGRpcNewCoordinationClient::SessionMethods [GOOD] >> TGRpcNewCoordinationClient::SessionDescribeWatchData >> TxUsage::WriteToTopic_Demo_9_Query >> TYqlDateTimeTests::DateKey [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> ClientStatsCollector::ExternalMetricRegistryStdSharedPtr [GOOD] Test command err: 2025-06-25T14:43:54.820026Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897756482691802:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:43:54.820060Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001864/r3tmp/tmpFW0UAz/pdisk_1.dat 2025-06-25T14:43:55.226949Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:43:55.290021Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:43:55.290101Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:43:55.290463Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T14:43:55.292294Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 14879, node 1 2025-06-25T14:43:55.378734Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:43:55.378756Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:43:55.378763Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:43:55.378850Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13804 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-25T14:43:55.829410Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:43:55.842498Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:43:57.753940Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897769367594658:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:43:57.754044Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:43:57.760418Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897769367594670:2300], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:43:57.767845Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:43:57.812515Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519897769367594672:2301], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:43:57.910336Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519897769367594741:2661] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:43:58.332841Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710660. Ctx: { TraceId: , Database: , DatabaseId: , SessionId: ydb://session/3?node_id=1&id=MzMzZTEzM2YtNWJjZjhmZjgtNGIxNTM3OGEtN2RkZGFkZjg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. Database not set, use /Root 2025-06-25T14:43:59.764214Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519897774999537036:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:43:59.764381Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001864/r3tmp/tmpuvWG0M/pdisk_1.dat 2025-06-25T14:43:59.933464Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:43:59.950634Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:43:59.950748Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:43:59.957988Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 28082, node 4 2025-06-25T14:44:00.042849Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:00.042891Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:00.042908Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:00.043035Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4996 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:00.297805Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:00.780451Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:44:02.598700Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519897787884439898:2300], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:02.598758Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519897787884439890:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:02.598851Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:02.602211Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:44:02.636491Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519897787884439904:2301], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:44:02.699657Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519897787884439981:2674] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:44:04.768782Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519897798454227367:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:04.770500Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001864/r3tmp/tmp5v ... :7519897819929065500:2322] from: [7:7519897819929065467:2322] 2025-06-25T14:44:09.916774Z node 7 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [7:7519897819929065500:2322] TxId: 281474976710678. Ctx: { TraceId: 01jykrqphs56jvpx60gs9md1y7, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=YjJkNTQ2ODItYzU4MmUzN2UtMjc1ZDMzNjktMzNjOGM0OWE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Transaction locks invalidated. Table: `Root/names`., code: 2001 subissue: {
: Error: Operation is aborting because locks are not valid, code: 2001 } } 2025-06-25T14:44:09.916922Z node 7 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=7&id=YjJkNTQ2ODItYzU4MmUzN2UtMjc1ZDMzNjktMzNjOGM0OWE=, ActorId: [7:7519897815634097871:2322], ActorState: ExecuteState, TraceId: 01jykrqphs56jvpx60gs9md1y7, Create QueryResponse for error on request, msg: 2025-06-25T14:44:11.728181Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7519897828031602626:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:11.728257Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001864/r3tmp/tmpXZW58J/pdisk_1.dat 2025-06-25T14:44:11.968523Z node 10 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 32500, node 10 2025-06-25T14:44:12.064037Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:12.064165Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:12.089757Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:44:12.095585Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:12.095619Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:12.095627Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:12.095768Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:29303 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:12.401805Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:12.776476Z node 10 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:44:15.125593Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7519897845211472855:2301], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:15.125624Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7519897845211472847:2298], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:15.125699Z node 10 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:15.129858Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:44:15.151394Z node 10 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [10:7519897845211472861:2302], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:44:15.238379Z node 10 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [10:7519897845211472936:2682] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:44:16.697694Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7519897848745009385:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:16.697739Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001864/r3tmp/tmp5kYnFS/pdisk_1.dat 2025-06-25T14:44:16.887076Z node 13 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 28844, node 13 2025-06-25T14:44:17.024656Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:17.024785Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:17.036015Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:44:17.043161Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:17.043188Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:17.043196Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:17.043332Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:29937 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:17.282220Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:17.739984Z node 13 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:44:19.848589Z node 13 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [13:7519897861629912276:2298], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:19.848704Z node 13 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:19.849066Z node 13 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [13:7519897861629912288:2301], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:19.853234Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:44:19.877027Z node 13 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [13:7519897861629912290:2302], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:44:19.973041Z node 13 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [13:7519897861629912369:2676] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } >> TGRpcLdapAuthentication::LdapAuthSettingsWithEmptyBaseDn [GOOD] >> TGRpcLdapAuthentication::LdapAuthSettingsWithEmptyBindDn >> TGRpcYdbTest::OperationTimeout [GOOD] >> TGRpcYdbTest::OperationCancelAfter >> YdbYqlClient::ConnectDbAclIsStrictlyChecked >> TTableProfileTests::DescribeTableOptions [GOOD] >> TGRpcYdbTest::ReadTablePg [GOOD] >> TxUsage::WriteToTopic_Demo_39_Table [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> TYqlDateTimeTests::DateKey [GOOD] Test command err: 2025-06-25T14:43:45.656808Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897716351096280:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:43:45.656846Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00186d/r3tmp/tmpY07sO7/pdisk_1.dat 2025-06-25T14:43:46.086327Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:43:46.086438Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:43:46.093980Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:43:46.101002Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 25362, node 1 2025-06-25T14:43:46.232973Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:43:46.232998Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:43:46.233005Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:43:46.233127Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2664 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:43:46.581276Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:43:46.667721Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:2664 2025-06-25T14:43:46.843534Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateSubDomain, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_subdomain.cpp:259) waiting... 2025-06-25T14:43:46.864242Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:43:47.372907Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519897725689090388:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:43:47.373187Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/ydb_ut_tenant/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:43:47.401411Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:43:47.401477Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:43:47.434886Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 3 Cookie 3 2025-06-25T14:43:47.436434Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:2664 2025-06-25T14:43:47.951104Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:43:48.384467Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:2664 TClient::Ls request: /Root/table-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "table-1" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710660 CreateStep: 1750862628081 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "table-1" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 ... (TRUNCATED) 2025-06-25T14:43:48.716533Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) TClient is connected to server localhost:2664 TClient::Ls request: /Root/table-2 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "table-2" PathId: 4 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710661 CreateStep: 1750862628802 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "table-2" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 ... (TRUNCATED) 2025-06-25T14:43:49.015251Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) TClient is connected to server localhost:2664 TClient::Ls request: /Root/table-3 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "table-3" PathId: 5 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710662 CreateStep: 1750862629096 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "table-3" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 ... (TRUNCATED) 2025-06-25T14:43:49.327359Z node 1 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 3 2025-06-25T14:43:49.327925Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-25T14:43:52.045767Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519897748240679088:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:43:52.045835Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00186d/r3tmp/tmpTLUXHi/pdisk_1.dat 2025-06-25T14:43:52.282352Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:43:52.345482Z node 4 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 4 Type# 268639257 TServer::EnableGrpc on GrpcPort 4356, node 4 2025-06-25T14:43:52.425402Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:43:52.425520Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:43:52.461650Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:43:52.581803Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:4 ... 2057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:08.682131Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:08.796495Z node 10 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:22952 2025-06-25T14:44:09.120984Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateSubDomain, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_subdomain.cpp:259) waiting... 2025-06-25T14:44:09.153824Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:09.660761Z node 12 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[12:7519897818492228390:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:09.660877Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/ydb_ut_tenant/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:44:09.703832Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:09.703965Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:09.707495Z node 10 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 12 Cookie 12 2025-06-25T14:44:09.708515Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:22952 2025-06-25T14:44:10.229107Z node 10 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 12 2025-06-25T14:44:10.229698Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-25T14:44:10.662122Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/ydb_ut_tenant/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:44:10.683269Z node 12 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:44:11.662577Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/ydb_ut_tenant/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:44:12.663079Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/ydb_ut_tenant/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:44:15.143322Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7519897845605169511:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:15.143417Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00186d/r3tmp/tmpixKrz3/pdisk_1.dat 2025-06-25T14:44:15.314126Z node 13 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:15.337442Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:15.337559Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:15.346802Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 29783, node 13 2025-06-25T14:44:15.448839Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:15.448866Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:15.448878Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:15.449053Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16427 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-25T14:44:15.881385Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:44:16.154500Z node 13 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:44:19.749898Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:44:19.857470Z node 13 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [13:7519897862785039849:2302], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:19.857562Z node 13 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [13:7519897862785039857:2305], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:19.857647Z node 13 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:19.863111Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:44:19.911928Z node 13 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [13:7519897862785039863:2306], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-25T14:44:19.986049Z node 13 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [13:7519897862785039934:2789] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:44:20.144029Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[13:7519897845605169511:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:20.144121Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:44:20.170311Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jykrr08f48ygd96j50jfa0ct, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=MTVmNjkzNmEtNjc5YWZmNWEtOTc5NWMwNzMtMmUxMGFmNzA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:44:20.501363Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jykrr0kc7qgcp9n7geft7dc7, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=MTVmNjkzNmEtNjc5YWZmNWEtOTc5NWMwNzMtMmUxMGFmNzA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:44:20.771821Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715663. Ctx: { TraceId: 01jykrr0x0ebczxy8c67r06c7a, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=MTVmNjkzNmEtNjc5YWZmNWEtOTc5NWMwNzMtMmUxMGFmNzA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:44:20.994080Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715664. Ctx: { TraceId: 01jykrr15ef9x9tvd64byz0crx, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=MTVmNjkzNmEtNjc5YWZmNWEtOTc5NWMwNzMtMmUxMGFmNzA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root >> TxUsage::WriteToTopic_Demo_39_Query >> YdbImport::EmptyData ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> TGRpcYdbTest::ReadTablePg [GOOD] Test command err: 2025-06-25T14:43:57.867369Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897767475094253:2229];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:43:57.867425Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00185b/r3tmp/tmptiCz5D/pdisk_1.dat 2025-06-25T14:43:58.398813Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:43:58.398933Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:43:58.405401Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:43:58.407391Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:43:58.458862Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 1181, node 1 2025-06-25T14:43:58.512780Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:43:58.512808Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:43:58.512820Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:43:58.512956Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25222 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-25T14:43:58.868495Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:43:58.902578Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:43:59.007874Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519897776065029571:2592] txid# 281474976715658, issues: { message: "Path does not exist" issue_code: 200200 severity: 1 } 2025-06-25T14:44:02.343665Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519897788815627238:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:02.343713Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00185b/r3tmp/tmpwjyBea/pdisk_1.dat 2025-06-25T14:44:02.718377Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:02.740902Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:02.741001Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:02.747970Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 30381, node 4 2025-06-25T14:44:02.921452Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:02.921595Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:02.921604Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:02.921739Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:15085 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:03.213494Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:03.376723Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:44:05.695507Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519897801700530132:2301], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:05.695507Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519897801700530123:2298], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:05.695583Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:05.699063Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:44:05.746444Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519897801700530137:2302], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:44:05.831365Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519897801700530217:2676] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:44:07.866372Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519897812865614705:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:07.866415Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00185b/r3tmp/tmpKOIrX7/pdisk_1.dat 2025-06-25T14:44:08.057573Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:08.074258Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:08.074389Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:08.089347Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 63721, node 7 2025-06-25T14:44:08.205059Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:08.205085Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:08.205093Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:08.205241Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11397 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Deprica ... -06-25T14:44:18.810571Z node 13 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:518: Can't update SecurityState for /Root - no PublicKeys 2025-06-25T14:44:18.904811Z node 13 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:489: SchemeBoardUpdate /Root 2025-06-25T14:44:18.904916Z node 13 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:518: Can't update SecurityState for /Root - no PublicKeys 2025-06-25T14:44:18.904926Z node 13 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:489: SchemeBoardUpdate /Root 2025-06-25T14:44:18.904970Z node 13 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:518: Can't update SecurityState for /Root - no PublicKeys 2025-06-25T14:44:18.924872Z node 13 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:595: Got grpc request# CreateSessionRequest, traceId# 01jykrqzbc32ba7xxy9p6f4hby, sdkBuildInfo# undef, state# AS_NOT_PERFORMED, database# undef, peer# ipv6:[::1]:59184, grpcInfo# grpc-c++/1.54.3 grpc-c/31.0.0 (linux; chttp2), timeout# undef 2025-06-25T14:44:18.981854Z node 13 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:44:21.641325Z node 13 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:595: Got grpc request# ExecuteDataQueryRequest, traceId# 01jykrr209f188g4r2byhje9a9, sdkBuildInfo# undef, state# AS_NOT_PERFORMED, database# undef, peer# ipv6:[::1]:59184, grpcInfo# grpc-c++/1.54.3 grpc-c/31.0.0 (linux; chttp2), timeout# undef 2025-06-25T14:44:21.643354Z node 13 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [13:7519897869002839883:2304], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:21.643415Z node 13 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [13:7519897869002839872:2301], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:21.643708Z node 13 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:21.647664Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:44:21.662544Z node 13 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:489: SchemeBoardUpdate /Root 2025-06-25T14:44:21.662675Z node 13 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:518: Can't update SecurityState for /Root - no PublicKeys 2025-06-25T14:44:21.662686Z node 13 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:489: SchemeBoardUpdate /Root 2025-06-25T14:44:21.662730Z node 13 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:518: Can't update SecurityState for /Root - no PublicKeys 2025-06-25T14:44:21.675648Z node 13 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:489: SchemeBoardUpdate /Root 2025-06-25T14:44:21.675770Z node 13 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:518: Can't update SecurityState for /Root - no PublicKeys 2025-06-25T14:44:21.679075Z node 13 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:489: SchemeBoardUpdate /Root 2025-06-25T14:44:21.679185Z node 13 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:518: Can't update SecurityState for /Root - no PublicKeys 2025-06-25T14:44:21.683202Z node 13 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [13:7519897869002839886:2305], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-25T14:44:21.783403Z node 13 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [13:7519897869002839965:2783] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:44:21.925881Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710661. Ctx: { TraceId: 01jykrr209f188g4r2byhje9a9, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=OTM0MjYwZWEtZjAxMGZkYWYtNmVkYzdhNjAtYTc2ZGM3NDA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:44:21.949731Z node 13 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:595: Got grpc request# ReadTableRequest, traceId# 01jykrr29xe9de31sapz5e038g, sdkBuildInfo# undef, state# AS_NOT_PERFORMED, database# undef, peer# ipv6:[::1]:59184, grpcInfo# grpc-c++/1.54.3 grpc-c/31.0.0 (linux; chttp2), timeout# undef 2025-06-25T14:44:21.950048Z node 13 :READ_TABLE_API NOTICE: rpc_read_table.cpp:531: [13:7519897869002840009:2312] Finish grpc stream, status: 400010 2025-06-25T14:44:21.954883Z node 13 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:595: Got grpc request# ReadTableRequest, traceId# 01jykrr2a267rnv656qg1r97zq, sdkBuildInfo# undef, state# AS_NOT_PERFORMED, database# undef, peer# ipv6:[::1]:59184, grpcInfo# grpc-c++/1.54.3 grpc-c/31.0.0 (linux; chttp2), timeout# undef 2025-06-25T14:44:21.966235Z node 13 :READ_TABLE_API DEBUG: rpc_read_table.cpp:267: [13:7519897869002840013:2313] Adding quota request to queue ShardId: 0, TxId: 281474976710662 2025-06-25T14:44:21.966290Z node 13 :READ_TABLE_API DEBUG: rpc_read_table.cpp:629: [13:7519897869002840013:2313] Assign stream quota to Shard 0, Quota 5, TxId 281474976710662 Reserved: 5 of 25, Queued: 0 2025-06-25T14:44:21.968634Z node 13 :READ_TABLE_API DEBUG: rpc_read_table.cpp:647: [13:7519897869002840013:2313] got stream part, size: 246, RU required: 128 rate limiter absent 2025-06-25T14:44:21.968922Z node 13 :READ_TABLE_API DEBUG: rpc_read_table.cpp:563: [13:7519897869002840013:2313] Starting inactivity timer for 600.000000s with tag 3 2025-06-25T14:44:21.970949Z node 13 :READ_TABLE_API NOTICE: rpc_read_table.cpp:531: [13:7519897869002840013:2313] Finish grpc stream, status: 400000 2025-06-25T14:44:21.974977Z node 13 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:595: Got grpc request# ReadTableRequest, traceId# 01jykrr2ap2j5wfa827kqzpze4, sdkBuildInfo# undef, state# AS_NOT_PERFORMED, database# undef, peer# ipv6:[::1]:59184, grpcInfo# grpc-c++/1.54.3 grpc-c/31.0.0 (linux; chttp2), timeout# undef 2025-06-25T14:44:21.997571Z node 13 :READ_TABLE_API DEBUG: rpc_read_table.cpp:267: [13:7519897869002840042:2316] Adding quota request to queue ShardId: 0, TxId: 281474976710664 2025-06-25T14:44:21.997612Z node 13 :READ_TABLE_API DEBUG: rpc_read_table.cpp:629: [13:7519897869002840042:2316] Assign stream quota to Shard 0, Quota 5, TxId 281474976710664 Reserved: 5 of 25, Queued: 0 2025-06-25T14:44:22.004798Z node 13 :READ_TABLE_API DEBUG: rpc_read_table.cpp:647: [13:7519897869002840042:2316] got stream part, size: 84, RU required: 128 rate limiter absent 2025-06-25T14:44:22.005075Z node 13 :READ_TABLE_API DEBUG: rpc_read_table.cpp:563: [13:7519897869002840042:2316] Starting inactivity timer for 600.000000s with tag 3 2025-06-25T14:44:22.062595Z node 13 :READ_TABLE_API NOTICE: rpc_read_table.cpp:531: [13:7519897869002840042:2316] Finish grpc stream, status: 400000 2025-06-25T14:44:22.064386Z node 13 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:595: Got grpc request# ReadTableRequest, traceId# 01jykrr2dgb6z0k21k1ev0f86e, sdkBuildInfo# undef, state# AS_NOT_PERFORMED, database# undef, peer# ipv6:[::1]:59184, grpcInfo# grpc-c++/1.54.3 grpc-c/31.0.0 (linux; chttp2), timeout# undef 2025-06-25T14:44:22.073083Z node 13 :READ_TABLE_API DEBUG: rpc_read_table.cpp:267: [13:7519897873297807360:2318] Adding quota request to queue ShardId: 0, TxId: 281474976710666 2025-06-25T14:44:22.073132Z node 13 :READ_TABLE_API DEBUG: rpc_read_table.cpp:629: [13:7519897873297807360:2318] Assign stream quota to Shard 0, Quota 5, TxId 281474976710666 Reserved: 5 of 25, Queued: 0 2025-06-25T14:44:22.074133Z node 13 :READ_TABLE_API DEBUG: rpc_read_table.cpp:647: [13:7519897873297807360:2318] got stream part, size: 210, RU required: 128 rate limiter absent 2025-06-25T14:44:22.074460Z node 13 :READ_TABLE_API DEBUG: rpc_read_table.cpp:563: [13:7519897873297807360:2318] Starting inactivity timer for 600.000000s with tag 3 2025-06-25T14:44:22.116024Z node 13 :READ_TABLE_API NOTICE: rpc_read_table.cpp:531: [13:7519897873297807360:2318] Finish grpc stream, status: 400000 2025-06-25T14:44:22.118484Z node 13 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0000c4880] received request Name# SchemeOperation ok# false data# peer# current inflight# 0 2025-06-25T14:44:22.118485Z node 13 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0000b4c80] received request Name# SchemeOperationStatus ok# false data# peer# current inflight# 0 2025-06-25T14:44:22.118717Z node 13 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0000b4680] received request Name# ChooseProxy ok# false data# peer# current inflight# 0 2025-06-25T14:44:22.118721Z node 13 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0000bb880] received request Name# SchemeDescribe ok# false data# peer# current inflight# 0 2025-06-25T14:44:22.118901Z node 13 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000054080] received request Name# PersQueueRequest ok# false data# peer# current inflight# 0 2025-06-25T14:44:22.118912Z node 13 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000054680] received request Name# SchemeInitRoot ok# false data# peer# current inflight# 0 2025-06-25T14:44:22.119061Z node 13 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0000d4a80] received request Name# ResolveNode ok# false data# peer# current inflight# 0 2025-06-25T14:44:22.119077Z node 13 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0000cf680] received request Name# FillNode ok# false data# peer# current inflight# 0 2025-06-25T14:44:22.119218Z node 13 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0000cea80] received request Name# DrainNode ok# false data# peer# current inflight# 0 2025-06-25T14:44:22.119232Z node 13 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a00003d280] received request Name# BlobStorageConfig ok# false data# peer# current inflight# 0 2025-06-25T14:44:22.119394Z node 13 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a00010b080] received request Name# HiveCreateTablet ok# false data# peer# current inflight# 0 2025-06-25T14:44:22.119408Z node 13 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0000cd280] received request Name# TestShardControl ok# false data# peer# current inflight# 0 2025-06-25T14:44:22.119549Z node 13 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000076880] received request Name# RegisterNode ok# false data# peer# current inflight# 0 2025-06-25T14:44:22.119570Z node 13 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000077480] received request Name# CmsRequest ok# false data# peer# current inflight# 0 2025-06-25T14:44:22.119713Z node 13 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000054c80] received request Name# ConsoleRequest ok# false data# peer# current inflight# 0 2025-06-25T14:44:22.119746Z node 13 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0000cde80] received request Name# InterconnectDebug ok# false data# peer# current inflight# 0 2025-06-25T14:44:22.119870Z node 13 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0000b4080] received request Name# TabletStateRequest ok# false data# peer# current inflight# 0 ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> TTableProfileTests::DescribeTableOptions [GOOD] Test command err: 2025-06-25T14:43:56.937845Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897764611195279:2211];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:43:56.938263Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00185e/r3tmp/tmpbBVczf/pdisk_1.dat 2025-06-25T14:43:57.477022Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:43:57.504386Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:43:57.504483Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:43:57.528609Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:43:57.565275Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 7201, node 1 2025-06-25T14:43:57.678335Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:43:57.679475Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:43:57.679493Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:43:57.692398Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:43:57.928505Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:11571 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:43:58.134010Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... Trying to register node Register node result Status { Code: UNAUTHORIZED Reason: "Cannot authorize node. Access denied" } 2025-06-25T14:44:01.425416Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519897786465690390:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:01.430868Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00185e/r3tmp/tmpVDSYZN/pdisk_1.dat 2025-06-25T14:44:01.643706Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:01.660255Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:01.660487Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:01.666432Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 8317, node 4 2025-06-25T14:44:01.895131Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:01.895164Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:01.895173Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:01.895357Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:63577 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:02.170954Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... Trying to register node Register node result Status { Code: OK } NodeId: 1024 DomainPath: "Root" Expire: 1750869841622966 Nodes { NodeId: 1024 Host: "localhost" Port: 5489 ResolveHost: "localhost" Address: "localhost" Location { DataCenter: "DataCenter" Rack: "Rack" Unit: "Body" } Expire: 1750869841622966 } Nodes { NodeId: 4 Host: "::1" Port: 12001 ResolveHost: "::1" Address: "::1" Location { DataCenterNum: 49 RoomNum: 1 RackNum: 1 BodyNum: 1 DataCenter: "1" Module: "1" Rack: "1" Unit: "1" } } Nodes { NodeId: 5 Host: "::1" Port: 12002 ResolveHost: "::1" Address: "::1" Location { DataCenterNum: 50 RoomNum: 2 RackNum: 2 BodyNum: 2 DataCenter: "2" Module: "2" Rack: "2" Unit: "2" } } Nodes { NodeId: 6 Host: "::1" Port: 12003 ResolveHost: "::1" Address: "::1" Location { DataCenterNum: 51 RoomNum: 3 RackNum: 3 BodyNum: 3 DataCenter: "3" Module: "3" Rack: "3" Unit: "3" } } 2025-06-25T14:44:02.464429Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:44:05.983227Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519897803266400701:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:05.983328Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00185e/r3tmp/tmpDetr82/pdisk_1.dat 2025-06-25T14:44:06.173719Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 4627, node 7 2025-06-25T14:44:06.300224Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:06.300264Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:06.300276Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:06.300416Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:44:06.302905Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:06.302990Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:06.305933Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:15944 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:06.565559Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... Trying to register node Register node result Status { Code: OK } NodeId: 1024 DomainPath: "Root" Expire: 1750869846162777 Nodes { NodeId: 1024 Host: "localhost" Port: 16438 ResolveHost: "localhost" Address: "localhost" Location { DataCenter: "DataCenter" Rack: "Rack" Unit: "Body" } Expire: 1750869846162777 } Nodes { NodeId: 7 Host: "::1" Port: 12001 ResolveHost: "::1" Address: "::1" Location { DataCenterNum: 49 RoomNum: 1 RackNum: 1 BodyNum: 1 DataCenter: "1" Module: "1" Rack: "1" Unit: "1" } } Nodes { NodeId: 8 Host: "::1" Port: 12002 ResolveHost: "::1" Address: "::1" Location { DataCenterNum: 50 RoomNum: 2 RackNum: 2 BodyNum: 2 DataCenter: "2" Module: "2" Rack: "2" Unit: "2" ... /ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... TClient is connected to server localhost:63039 2025-06-25T14:44:11.518643Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateSubDomain, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_subdomain.cpp:259) waiting... 2025-06-25T14:44:11.554367Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:11.600827Z node 10 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:44:12.071521Z node 12 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[12:7519897831412081181:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:12.071598Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/ydb_ut_tenant/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:44:12.135108Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:12.135199Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:12.165476Z node 10 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 12 Cookie 12 2025-06-25T14:44:12.180413Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:63039 2025-06-25T14:44:13.136298Z node 12 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:44:14.230352Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) TClient is connected to server localhost:63039 TClient::Ls request: /Root/ydb_ut_tenant/table-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "table-1" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715660 CreateStep: 1750862654460 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "table-1" Columns { Name: "Data" Type: "String" TypeId: 4097 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "KeyHash" Type: "Uint64" TypeId: 4 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name... (TRUNCATED) 2025-06-25T14:44:14.874465Z node 10 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 12 2025-06-25T14:44:14.876387Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-25T14:44:15.575102Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/ydb_ut_tenant/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:44:15.595203Z node 12 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [12:7519897844296984239:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:44:15.683953Z node 12 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [12:7519897844296984239:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:44:16.787613Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7519897848214246644:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:16.788870Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00185e/r3tmp/tmpQtjBnn/pdisk_1.dat 2025-06-25T14:44:17.075075Z node 13 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:17.127811Z node 13 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 13 Type# 268639257 TServer::EnableGrpc on GrpcPort 7564, node 13 2025-06-25T14:44:17.137095Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:17.137208Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:17.141372Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:44:17.179671Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:17.179695Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:17.179705Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:17.179870Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24676 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:17.573178Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:17.847876Z node 13 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:24676 2025-06-25T14:44:17.965141Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateSubDomain, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_subdomain.cpp:259) waiting... 2025-06-25T14:44:17.990730Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:18.509756Z node 15 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[15:7519897856680810071:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:18.509858Z node 15 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/ydb_ut_tenant/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:44:18.624290Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(15, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:18.624390Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(15, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:18.633200Z node 13 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 15 Cookie 15 2025-06-25T14:44:18.640591Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(15, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:24676 2025-06-25T14:44:19.017271Z node 13 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 15 2025-06-25T14:44:19.017715Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(15, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-25T14:44:19.513971Z node 15 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/ydb_ut_tenant/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:44:19.514097Z node 15 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:44:20.516798Z node 15 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/ydb_ut_tenant/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:44:21.524516Z node 15 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/ydb_ut_tenant/.metadata/initialization/migrations;error=incorrect path status: LookupError; >> YdbTableBulkUpsertOlap::UpsertCsvBug [GOOD] >> YdbTableBulkUpsertOlap::UpsertCSV >> YdbYqlClient::CreateAndAltertTableWithReadReplicasSettings [GOOD] >> YdbYqlClient::CreateTableWithMESettings >> YdbMonitoring::SelfCheckWithNodesDying [GOOD] >> YdbOlapStore::BulkUpsert >> YdbYqlClient::TestReadTableMultiShardUseSnapshot [GOOD] >> YdbYqlClient::TestReadTableMultiShardOneRow >> YdbYqlClient::TestDecimalFullStack [GOOD] >> YdbYqlClient::TestDescribeDirectory >> TGRpcClientLowTest::GrpcRequestProxyCheckTokenWhenItIsSpecified_Check [GOOD] >> TGRpcClientLowTest::ChangeAcl >> YdbTableBulkUpsert::Uint8 [GOOD] >> YdbTableBulkUpsert::Timeout >> TRegisterNodeOverDiscoveryService::ServerWithIssuerVerification_ClientWithSameIssuer [GOOD] >> TRegisterNodeOverDiscoveryService::ServerWithOutCertVerification_ClientProvidesExpiredCert >> YdbYqlClient::TestYqlSessionClosed [GOOD] >> YdbYqlClient::TestYqlLongSessionPrepareError >> YdbQueryService::TestCreateAndAttachSession >> TGRpcYdbTest::ExecuteDmlQuery [GOOD] >> TGRpcYdbTest::ExecutePreparedQuery >> TGRpcLdapAuthentication::LdapAuthWithValidCredentials >> LocalPartition::WithoutPartitionWithSplit [GOOD] >> TxUsage::SessionAbort_Table >> TGRpcNewCoordinationClient::SessionDescribeWatchData [GOOD] >> TGRpcNewCoordinationClient::SessionDescribeWatchOwners >> TxUsage::WriteToTopic_Demo_23_RestartNo_Table [GOOD] >> YdbTableBulkUpsertOlap::UpsertArrowBatch >> TGRpcLdapAuthentication::LdapAuthSettingsWithEmptyBindDn [GOOD] >> TGRpcLdapAuthentication::LdapAuthSettingsWithEmptyBindPassword >> TGRpcNewCoordinationClient::SessionAcquireAcceptedCallback [GOOD] >> YdbYqlClient::ConnectDbAclIsStrictlyChecked [GOOD] >> YdbYqlClient::ConnectDbAclIsOffWhenYdbRequestsWithoutDatabase >> TGRpcYdbTest::OperationCancelAfter [GOOD] >> TGRpcYdbTest::KeepAlive ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> TGRpcNewCoordinationClient::SessionAcquireAcceptedCallback [GOOD] Test command err: 2025-06-25T14:44:05.928928Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897804230264065:2076];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:05.936594Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00184b/r3tmp/tmpzJWsvA/pdisk_1.dat 2025-06-25T14:44:06.308671Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:06.351662Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:06.351762Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 14693, node 1 2025-06-25T14:44:06.365653Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:44:06.413317Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:06.413339Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:06.413346Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:06.414113Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26477 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:06.748503Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:06.830526Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateKesus, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_kesus.cpp:30) 2025-06-25T14:44:06.919371Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateKesus, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_kesus.cpp:30) 2025-06-25T14:44:06.944158Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:44:07.026556Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpDropKesus, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_drop_kesus.cpp:186) 2025-06-25T14:44:07.038221Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037888 not found 2025-06-25T14:44:07.045210Z node 1 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,1) wasn't found 2025-06-25T14:44:09.843826Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519897817571368461:2152];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00184b/r3tmp/tmp7eWgm4/pdisk_1.dat 2025-06-25T14:44:09.892522Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:44:09.978044Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:09.991150Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:09.991220Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:10.002138Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 15517, node 4 2025-06-25T14:44:10.184760Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:10.184778Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:10.184785Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:10.184901Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:62564 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:10.487993Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:10.556302Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateKesus, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_kesus.cpp:30) 2025-06-25T14:44:10.685464Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterKesus, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_kesus.cpp:19) 2025-06-25T14:44:10.774986Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterKesus, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_kesus.cpp:19) 2025-06-25T14:44:10.839462Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:44:10.841305Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterKesus, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_kesus.cpp:19) 2025-06-25T14:44:13.807441Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519897836168621747:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:13.807502Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00184b/r3tmp/tmpT8yVTZ/pdisk_1.dat 2025-06-25T14:44:13.964493Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:13.981297Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:13.981372Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:13.984571Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 27176, node 7 2025-06-25T14:44:14.044469Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:14.044497Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:14.044506Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:14.044664Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23208 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:14.286246Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:18.060694Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7519897856480461922:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:18.060751Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00184b/r3tmp/tmpN89noV/pdisk_1.dat 2025-06-25T14:44:18.283209Z node 10 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:18.325030Z node 10 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 10 Type# 268639257 TServer::EnableGrpc on GrpcPort 24445, node 10 2025-06-25T14:44:18.380910Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:18.380933Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:18.380942Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:18.381088Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:44:18.381124Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:18.381205Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:18.383534Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:20682 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:18.657835Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:18.736352Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateKesus, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_kesus.cpp:30) 2025-06-25T14:44:19.124010Z node 10 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:44:22.501421Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7519897875181652807:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:22.501565Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00184b/r3tmp/tmp8X30UH/pdisk_1.dat 2025-06-25T14:44:22.664805Z node 13 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:22.689221Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:22.689311Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:22.693732Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 16477, node 13 2025-06-25T14:44:22.823833Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:22.823856Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:22.823864Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:22.823992Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:22686 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:23.133288Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:23.230675Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateKesus, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_kesus.cpp:30) 2025-06-25T14:44:23.515050Z node 13 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; >> TxUsage::WriteToTopic_Demo_29_Table [GOOD] >> YdbImport::EmptyData [GOOD] >> YdbImport::ImportFromS3ToExistingTable >> SystemView::QueryStatsAllTables [GOOD] >> SystemView::QueryStatsRetries >> YdbQueryService::TestCreateAndAttachSession [GOOD] >> YdbYqlClient::TestReadTableMultiShardOneRow [GOOD] >> YdbQueryService::TestAttachTwice >> YdbYqlClient::TestReadTableBatchLimits >> TTableProfileTests::OverwriteCompactionPolicy >> TGRpcClientLowTest::ChangeAcl [GOOD] >> YdbYqlClient::TestDescribeDirectory [GOOD] >> YdbYqlClient::CreateTableWithMESettings [GOOD] >> TxUsage::WriteToTopic_Demo_44_Table [GOOD] >> TxUsage::WriteToTopic_Demo_19_RestartAfterCommit_Table [GOOD] >> TGRpcLdapAuthentication::LdapAuthWithValidCredentials [GOOD] >> TGRpcLdapAuthentication::LdapAuthWithInvalidSearchFilter >> TGRpcNewCoordinationClient::SessionDescribeWatchOwners [GOOD] >> TGRpcNewCoordinationClient::SessionDescribeWatchReplace >> TBlobStorageProxyTest::TestGetAndRangeGetManyBlobs [GOOD] >> TBlobStorageProxyTest::TestEmptyRange >> YdbYqlClient::TestYqlLongSessionPrepareError [GOOD] >> YdbYqlClient::TestYqlLongSessionMultipleErrors >> YdbTableBulkUpsert::Timeout [GOOD] >> YdbTableBulkUpsert::ZeroRows >> TxUsage::WriteToTopic_Demo_44_Query ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> TGRpcClientLowTest::ChangeAcl [GOOD] Test command err: 2025-06-25T14:44:08.537857Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897815425346568:2148];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:08.538256Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001849/r3tmp/tmpJZ93P6/pdisk_1.dat 2025-06-25T14:44:08.975080Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:08.975157Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:08.977479Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:44:08.991663Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 27649, node 1 2025-06-25T14:44:09.071176Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:09.071196Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:09.071206Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:09.071330Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:30185 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:09.331405Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:09.538550Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TestRequest(database="/Root", token="root@builtin") => {SUCCESS, 0} 2025-06-25T14:44:11.229627Z node 1 :GRPC_SERVER WARN: grpc_request_proxy.cpp:529: SchemeBoardDelete /blabla Strong=1 TestRequest(database="/blabla", token="root@builtin") => {STATUS_CODE_UNSPECIFIED, 16} 2025-06-25T14:44:11.273914Z node 1 :GRPC_SERVER WARN: grpc_request_proxy.cpp:529: SchemeBoardDelete /blabla Strong=1 TestRequest(database="blabla", token="root@builtin") => {STATUS_CODE_UNSPECIFIED, 16} 2025-06-25T14:44:12.575750Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519897831990009271:2076];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:12.575801Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001849/r3tmp/tmpsOsP6e/pdisk_1.dat 2025-06-25T14:44:12.703915Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:12.710872Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:12.710932Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:12.718540Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 7978, node 4 2025-06-25T14:44:12.837352Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:12.837377Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:12.837390Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:12.837532Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28388 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:13.129838Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... TestRequest(database="/Root", token="") => {STATUS_CODE_UNSPECIFIED, 16} 2025-06-25T14:44:13.227919Z node 4 :GRPC_SERVER WARN: grpc_request_proxy.cpp:529: SchemeBoardDelete /blabla Strong=1 TestRequest(database="/blabla", token="") => {STATUS_CODE_UNSPECIFIED, 16} 2025-06-25T14:44:13.239570Z node 4 :GRPC_SERVER WARN: grpc_request_proxy.cpp:529: SchemeBoardDelete /blabla Strong=1 TestRequest(database="blabla", token="") => {STATUS_CODE_UNSPECIFIED, 16} 2025-06-25T14:44:16.751094Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519897847965432418:2076];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:16.751131Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001849/r3tmp/tmpRJWBWY/pdisk_1.dat 2025-06-25T14:44:16.910929Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:16.926704Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:16.926780Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:16.931649Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 28764, node 7 2025-06-25T14:44:17.028504Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:17.028523Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:17.028532Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:17.028669Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:29811 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:17.222885Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:17.760528Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TestRequest(database="/Root", token="") => {SUCCESS, 0} TestRequest(database="/blabla", token="") => {SUCCESS, 0} TestRequest(database="blabla", token="") => {SUCCESS, 0} TestRequest(database="/Root", token="root@builtin") => {SUCCESS, 0} TestRequest(database="/blabla", token="root@builtin") => {STATUS_CODE_UNSPECIFIED, 16} 2025-06-25T14:44:19.522874Z node 7 :GRPC_SERVER WARN: grpc_request_proxy.cpp:529: SchemeBoardDelete /blabla Strong=1 TestRequest(database="blabla", token="root@builtin") => {STATUS_CODE_UNSPECIFIED, 16} 2025-06-25T14:44:19.537483Z node 7 :GRPC_SERVER WARN: grpc_request_proxy.cpp:529: SchemeBoardDelete /blabla Strong=1 TestRequest(database="/Root", token="invalid token") => {UNAUTHORIZED, 0} 2025-06-25T14:44:19.548726Z node 7 :TICKET_PARSER ERROR: ticket_parser_impl.h:963: Ticket **** (717F937C): Unknown token TestRequest(database="/blabla", token="invalid token") => {STATUS_CODE_UNSPECIFIED, 16} 2025-06-25T14:44:19.573508Z node 7 :GRPC_SERVER WARN: grpc_request_proxy.cpp:529: SchemeBoardDelete /blabla Strong=1 2025-06-25T14:44:19.589063Z node 7 :GRPC_SERVER WARN: grpc_request_proxy.cpp:529: SchemeBoardDelete /blabla Strong=1 TestRequest(database="blabla", token="invalid token") => {STATUS_CODE_UNSPECIFIED, 16} 2025-06-25T14:44:21.025302Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7519897865930523202:2242];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001849/r3tmp/tmpjLcdDw/pdisk_1.dat 2025-06-25T14:44:21.070115Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:44:21.177502Z node 10 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:21.196394Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:21.196478Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:21.203562Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:44:21.215087Z node 10 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 10 Type# 268639257 TServer::EnableGrpc on GrpcPort 16526, node 10 2025-06-25T14:44:21.252984Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:21.253000Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:21.253007Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:21.253104Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:20318 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:21.438564Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:21.988438Z node 10 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TestRequest(database="/Root", token="") => {SUCCESS, 0} TestRequest(database="/blabla", token="") => {SUCCESS, 0} TestRequest(database="blabla", token="") => {SUCCESS, 0} TestRequest(database="/Root", token="root@builtin") => {SUCCESS, 0} TestRequest(database="/blabla", token="root@builtin") => {STATUS_CODE_UNSPECIFIED, 16} 2025-06-25T14:44:23.925796Z node 10 :GRPC_SERVER WARN: grpc_request_proxy.cpp:529: SchemeBoardDelete /blabla Strong=1 2025-06-25T14:44:23.946164Z node 10 :GRPC_SERVER WARN: grpc_request_proxy.cpp:529: SchemeBoardDelete /blabla Strong=1 TestRequest(database="blabla", token="root@builtin") => {STATUS_CODE_UNSPECIFIED, 16} TestRequest(database="/Root", token="invalid token") => {STATUS_CODE_UNSPECIFIED, 16} 2025-06-25T14:44:23.972969Z node 10 :TICKET_PARSER ERROR: ticket_parser_impl.h:963: Ticket **** (717F937C): Unknown token 2025-06-25T14:44:23.987634Z node 10 :GRPC_SERVER WARN: grpc_request_proxy.cpp:529: SchemeBoardDelete /blabla Strong=1 TestRequest(database="/blabla", token="invalid token") => {STATUS_CODE_UNSPECIFIED, 16} 2025-06-25T14:44:24.011458Z node 10 :GRPC_SERVER WARN: grpc_request_proxy.cpp:529: SchemeBoardDelete /blabla Strong=1 TestRequest(database="blabla", token="invalid token") => {STATUS_CODE_UNSPECIFIED, 16} 2025-06-25T14:44:25.719114Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7519897889590466456:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:25.719174Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001849/r3tmp/tmpajUI3b/pdisk_1.dat 2025-06-25T14:44:25.947408Z node 13 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 22369, node 13 2025-06-25T14:44:26.081471Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:26.081567Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:26.089410Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:44:26.098446Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:26.098466Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:26.098477Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:26.098615Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13431 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:26.393732Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... TClient is connected to server localhost:13431 2025-06-25T14:44:26.741372Z node 13 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:44:26.806564Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> YdbYqlClient::CreateTableWithMESettings [GOOD] Test command err: 2025-06-25T14:44:06.051688Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897808597251187:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:06.051742Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00184a/r3tmp/tmpF2ORQZ/pdisk_1.dat 2025-06-25T14:44:06.407939Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:06.443154Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 7308, node 1 2025-06-25T14:44:06.458188Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:06.458293Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:06.475335Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:44:06.503967Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:06.503996Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:06.504004Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:06.504117Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17071 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:06.826267Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:07.069908Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:44:08.760183Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:44:10.758719Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519897824239169246:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:10.758838Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00184a/r3tmp/tmpx420nF/pdisk_1.dat 2025-06-25T14:44:10.984168Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:10.984254Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:10.991621Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:11.015538Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 17257, node 4 2025-06-25T14:44:11.095496Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:11.095521Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:11.095532Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:11.095687Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6552 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:11.367157Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:11.765569Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:44:13.845962Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:44:13.973910Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:171) 2025-06-25T14:44:14.036810Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:171) 2025-06-25T14:44:14.091486Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:171) 2025-06-25T14:44:15.675768Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519897844328085472:2175];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:15.682953Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00184a/r3tmp/tmpiQLGFe/pdisk_1.dat 2025-06-25T14:44:15.836494Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:15.856789Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:15.856879Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:15.864625Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 24938, node 7 2025-06-25T14:44:15.946715Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:15.946738Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:15.946746Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:15.946868Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11535 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:16.202811Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:16.683057Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:44:18.568401Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:44:18.703995Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:171) 2025-06-25T14:44:18.768535Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:171) 2025-06-25T14:44:20.314087Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7519897868755864919:2154];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00184a/r3tmp/tmpq9PeFS/pdisk_1.dat 2025-06-25T14:44:20.398792Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:44:20.487315Z node 10 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:20.506317Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:20.506410Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:20.515737Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 30592, node 10 2025-06-25T14:44:20.725561Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:20.725584Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:20.725592Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:20.725744Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:22192 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:21.045012Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:21.061172Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:44:21.316424Z node 10 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:44:23.680432Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:44:23.848877Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:171) 2025-06-25T14:44:23.861640Z node 10 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 10, TabletId: 72075186224037888 not found 2025-06-25T14:44:23.861668Z node 10 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 10, TabletId: 72075186224037888 not found 2025-06-25T14:44:25.291912Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7519897890049668644:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:25.292127Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00184a/r3tmp/tmpk8PMfd/pdisk_1.dat 2025-06-25T14:44:25.430462Z node 13 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:25.446424Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:25.446543Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:25.455102Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 32289, node 13 2025-06-25T14:44:25.623185Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:25.623213Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:25.623222Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:25.623385Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26382 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:25.900047Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:26.300758Z node 13 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:44:28.764260Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> YdbYqlClient::TestDescribeDirectory [GOOD] Test command err: 2025-06-25T14:44:09.685368Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897820285271077:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:09.685418Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001842/r3tmp/tmpIyoQ2f/pdisk_1.dat 2025-06-25T14:44:10.042862Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 16327, node 1 2025-06-25T14:44:10.187430Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:10.187513Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:10.202780Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:44:10.243649Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:10.243673Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:10.243680Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:10.243792Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:12678 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:10.552015Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:10.696010Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:44:12.515507Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897833170173940:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:12.515518Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897833170173948:2300], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:12.515648Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:12.519514Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:44:12.540255Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519897833170173954:2301], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:44:12.610891Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519897833170174021:2670] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:44:14.570325Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519897841320520044:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:14.570414Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001842/r3tmp/tmpoVWgod/pdisk_1.dat 2025-06-25T14:44:14.762020Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:14.779836Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:14.779927Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:14.786562Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 1340, node 4 2025-06-25T14:44:14.861662Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:14.861698Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:14.861711Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:14.861848Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26733 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:15.137961Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:15.600554Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:44:17.311821Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519897854205422920:2300], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:17.311892Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519897854205422912:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:17.311959Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:17.319279Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:44:17.350909Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519897854205422926:2301], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:44:17.413731Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519897854205422997:2680] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:44:18.925756Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519897857983619989:2073];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001842/r3tmp/tmpnmTljn/pdisk_1.dat 2025-06-25T14:44:18.926192Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:44:19.151901Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 23865, node 7 2025-06-25T14:44:19.262962Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:19.263055Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> C ... 2Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:19.276928Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:19.277068Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11151 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:19.569735Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:19.952432Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:44:22.357114Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:44:22.503255Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519897875163490344:2304], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:22.503261Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519897875163490352:2307], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:22.503325Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:22.506685Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:44:22.530060Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7519897875163490358:2308], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-25T14:44:22.604832Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7519897875163490433:2791] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:44:22.714154Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jykrr2v57fmzyhxf20qnyt3h, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=YjZjMmNkOTAtNTM1ZDM0N2EtYzdhZWQ4NzktZDEzYTFjMDA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:44:22.800963Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jykrr32e2j9pnjdms4wcrcf7, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=YjZjMmNkOTAtNTM1ZDM0N2EtYzdhZWQ4NzktZDEzYTFjMDA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:44:22.908100Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715663. Ctx: { TraceId: 01jykrr3524rjrws79s7pa5v0n, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=YjZjMmNkOTAtNTM1ZDM0N2EtYzdhZWQ4NzktZDEzYTFjMDA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:44:23.049375Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715664. Ctx: { TraceId: 01jykrr3ae9n5xvs8we3tjt7s2, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=YjZjMmNkOTAtNTM1ZDM0N2EtYzdhZWQ4NzktZDEzYTFjMDA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:44:23.133453Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715665. Ctx: { TraceId: 01jykrr3ctd1k4qtqe2yw01m8h, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=YjZjMmNkOTAtNTM1ZDM0N2EtYzdhZWQ4NzktZDEzYTFjMDA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:44:23.914650Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[7:7519897857983619989:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:23.914729Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:44:24.041889Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715666. Ctx: { TraceId: 01jykrr3gn11pgqak7b052rfrd, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=YjZjMmNkOTAtNTM1ZDM0N2EtYzdhZWQ4NzktZDEzYTFjMDA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:44:24.048254Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715667. Ctx: { TraceId: 01jykrr3gn11pgqak7b052rfrd, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=YjZjMmNkOTAtNTM1ZDM0N2EtYzdhZWQ4NzktZDEzYTFjMDA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:44:25.536906Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7519897887344494126:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:25.536961Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001842/r3tmp/tmpbO1fnd/pdisk_1.dat 2025-06-25T14:44:25.772736Z node 10 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:25.776255Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:25.776350Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:25.795290Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 15812, node 10 2025-06-25T14:44:25.897808Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:25.897829Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:25.897834Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:25.897961Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16870 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:26.189627Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:26.573121Z node 10 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:44:28.811818Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7519897900229397017:2300], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:28.811913Z node 10 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:28.842694Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) >> TGRpcLdapAuthentication::LdapAuthSettingsWithEmptyBindPassword [GOOD] >> TGRpcLdapAuthentication::LdapAuthSetIncorrectDomain >> TGRpcYdbTest::ExecutePreparedQuery [GOOD] >> TGRpcYdbTest::ExecuteQueryCache >> TxUsage::WriteToTopic_Demo_15_Table [GOOD] >> YdbYqlClient::ConnectDbAclIsOffWhenYdbRequestsWithoutDatabase [GOOD] >> YdbYqlClient::CopyTables >> TBlobStorageProxyTest::TestEmptyRange [GOOD] >> YdbTableBulkUpsertOlap::UpsertArrowBatch [GOOD] >> YdbTableBulkUpsertOlap::UpsertArrowDupField >> TGRpcYdbTest::KeepAlive [GOOD] >> TxUsage::WriteToTopic_Demo_15_Query >> YdbS3Internal::TestS3Listing >> TRegisterNodeOverDiscoveryService::ServerWithCertVerification_ClientWithCorrectCerts >> YdbYqlClient::TestTzTypesFullStack |84.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/dsproxy/ut_fat/unittest >> TBlobStorageProxyTest::TestEmptyRange [GOOD] >> YdbImport::ImportFromS3ToExistingTable [GOOD] >> TYqlDecimalTests::SimpleUpsertSelect >> TxUsage::WriteToTopic_Demo_19_RestartAfterCommit_Query ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> TGRpcYdbTest::KeepAlive [GOOD] Test command err: 2025-06-25T14:44:10.931035Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897824783249853:2148];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:10.932679Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00183e/r3tmp/tmptCMKtw/pdisk_1.dat 2025-06-25T14:44:11.378662Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:11.378744Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:11.380564Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:44:11.398734Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 25854, node 1 2025-06-25T14:44:11.496036Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:11.496052Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:11.496057Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:11.496129Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14226 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 Pa... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:11.881212Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:11.928277Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:44:12.022428Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpRmDir, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_rmdir.cpp:66) 2025-06-25T14:44:14.770255Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519897840156267138:2076];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:14.770315Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00183e/r3tmp/tmptA62px/pdisk_1.dat 2025-06-25T14:44:14.911403Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:14.934259Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:14.934337Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:14.938904Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 14325, node 4 2025-06-25T14:44:15.032518Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:15.032549Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:15.032557Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:15.032690Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8377 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:15.232879Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:18.853130Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519897857846306174:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:18.858461Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00183e/r3tmp/tmppbSBgh/pdisk_1.dat 2025-06-25T14:44:19.202053Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 31559, node 7 2025-06-25T14:44:19.350575Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:19.350730Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:19.379053Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:44:19.379705Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:19.379715Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:19.379722Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:19.379844Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5832 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:19.609356Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting...
: Error: Operation timeout. 2025-06-25T14:44:23.434438Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7519897880742791617:2142];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:23.444713Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00183e/r3tmp/tmpMvDeJh/pdisk_1.dat 2025-06-25T14:44:23.722013Z node 10 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:23.758434Z node 10 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 10 Type# 268639257 TServer::EnableGrpc on GrpcPort 3979, node 10 2025-06-25T14:44:23.816770Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:23.816909Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:23.902496Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:44:23.904690Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:23.904717Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:23.904726Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:23.904840Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:12061 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:24.185868Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting...
: Error: Operation cancelled. 2025-06-25T14:44:28.042866Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7519897899071949511:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:28.042925Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00183e/r3tmp/tmpqFjw4X/pdisk_1.dat 2025-06-25T14:44:28.225756Z node 13 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:28.255213Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:28.255299Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:28.260647Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 15076, node 13 2025-06-25T14:44:28.457134Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:28.457162Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:28.457171Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:28.457314Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:61137 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:28.763789Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:29.063278Z node 13 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; >> YdbYqlClient::RetryOperationLimitedDuration [GOOD] >> YdbQueryService::TestAttachTwice [GOOD] >> YdbQueryService::TestForbidExecuteWithoutAttach >> TxUsage::WriteToTopic_Demo_23_RestartNo_Query >> YdbYqlClient::ConnectDbAclIsOffWhenTokenIsOptionalAndNull >> TGRpcNewCoordinationClient::SessionDescribeWatchReplace [GOOD] >> TGRpcNewCoordinationClient::SessionCreateUpdateDeleteSemaphore >> TGRpcLdapAuthentication::LdapAuthWithInvalidSearchFilter [GOOD] >> TGRpcNewClient::SimpleYqlQuery ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> YdbYqlClient::RetryOperationLimitedDuration [GOOD] Test command err: 2025-06-25T14:43:51.110519Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897740406352140:2144];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:43:51.110646Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001866/r3tmp/tmpDoSVOA/pdisk_1.dat 2025-06-25T14:43:51.557586Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:43:51.581181Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:43:51.581278Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:43:51.620875Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 6847, node 1 2025-06-25T14:43:51.872109Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:43:51.872142Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:43:51.872150Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:43:51.872257Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:43:52.135529Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:5455 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:43:52.347678Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:43:54.317032Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897753291254940:2299], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:43:54.317161Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:43:54.571224Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:43:54.769520Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:43:54.957251Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897753291255151:2317], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:43:54.957304Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:43:54.957442Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897753291255156:2320], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:43:54.960219Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710660:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:43:54.978082Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519897753291255158:2321], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710660 completed, doublechecking } 2025-06-25T14:43:55.080774Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519897757586222526:2816] txid# 281474976710661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:43:55.151953Z node 1 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:304: Access denied: self# [1:7519897757586222550:2826], for# test_user@builtin, access# DescribeSchema 2025-06-25T14:43:55.151985Z node 1 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:304: Access denied: self# [1:7519897757586222550:2826], for# test_user@builtin, access# DescribeSchema 2025-06-25T14:43:55.159725Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519897757586222544:2325], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:2:21: Error: At function: KiReadTable!
:2:21: Error: Cannot find table 'db.[Root/Test]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:43:55.161350Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=NDg3OWEyNWEtY2U5ZGYxYWUtMjcyNjZmYTEtNGFmZDJmZjc=, ActorId: [1:7519897753291255144:2315], ActorState: ExecuteState, TraceId: 01jykrq7ycfeyafpwtgffqfbee, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:43:56.553253Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519897765524519910:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:43:56.553327Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001866/r3tmp/tmpUKoB6v/pdisk_1.dat 2025-06-25T14:43:56.745833Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:43:56.758924Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:43:56.759000Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:43:56.763846Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 11204, node 4 2025-06-25T14:43:56.908682Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:43:56.908714Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:43:56.908722Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:43:56.908880Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1316 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:43:57.181835Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:43:57.200589Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:43:57.570421Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:43:59.524036Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519897778409422773:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: { Disconnected 2025-06-25T14:44:01.742355Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:01.765886Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 1592, node 7 2025-06-25T14:44:01.839585Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:01.839610Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:01.839617Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:01.839736Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:61038 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:02.163109Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:02.561459Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Previous query attempt was finished with unsuccessful status OVERLOADED: Sending retry attempt 1 of 5 2025-06-25T14:44:06.550033Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[7:7519897785542662361:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:06.550120Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Previous query attempt was finished with unsuccessful status CLIENT_RESOURCE_EXHAUSTED: Sending retry attempt 2 of 5 Previous query attempt was finished with unsuccessful status UNAVAILABLE: Sending retry attempt 3 of 5 Previous query attempt was finished with unsuccessful status BAD_SESSION: Sending retry attempt 4 of 5 Previous query attempt was finished with unsuccessful status SESSION_BUSY: Sending retry attempt 5 of 5 2025-06-25T14:44:06.804611Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519897807017499900:2302], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:06.804672Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519897807017499905:2305], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:06.804717Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:06.808660Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:44:06.836461Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7519897807017499914:2306], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:44:06.896903Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7519897807017500004:2686] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Previous query attempt was finished with unsuccessful status OVERLOADED: Sending retry attempt 1 of 5 Previous query attempt was finished with unsuccessful status CLIENT_RESOURCE_EXHAUSTED: Sending retry attempt 2 of 5 Previous query attempt was finished with unsuccessful status UNAVAILABLE: Sending retry attempt 3 of 5 Previous query attempt was finished with unsuccessful status BAD_SESSION: Sending retry attempt 4 of 5 Previous query attempt was finished with unsuccessful status SESSION_BUSY: Sending retry attempt 5 of 5 Previous query attempt was finished with unsuccessful status NOT_FOUND: Sending retry attempt 1 of 1 Previous query attempt was finished with unsuccessful status NOT_FOUND: Sending retry attempt 1 of 1 Previous query attempt was finished with unsuccessful status UNDETERMINED: Sending retry attempt 1 of 1 Previous query attempt was finished with unsuccessful status UNDETERMINED: Sending retry attempt 1 of 1 Previous query attempt was finished with unsuccessful status TRANSPORT_UNAVAILABLE: Sending retry attempt 1 of 1 Previous query attempt was finished with unsuccessful status TRANSPORT_UNAVAILABLE: Sending retry attempt 1 of 1 2025-06-25T14:44:11.623027Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7519897828672871848:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:11.623406Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001866/r3tmp/tmpHcnfAC/pdisk_1.dat 2025-06-25T14:44:11.791806Z node 10 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:11.824530Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:11.824618Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:11.832119Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 3482, node 10 2025-06-25T14:44:11.972736Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:11.972760Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:11.972769Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:11.972915Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24841 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:12.290371Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:12.638066Z node 10 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Previous query attempt was finished with unsuccessful status OVERLOADED: Sending retry attempt 1 of 3 Previous query attempt was finished with unsuccessful status OVERLOADED: Sending retry attempt 2 of 3 2025-06-25T14:44:16.617490Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[10:7519897828672871848:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:16.617577Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Previous query attempt was finished with unsuccessful status OVERLOADED: Sending retry attempt 3 of 3 Previous query attempt was finished with unsuccessful status OVERLOADED: Sending retry attempt 1 of 3 2025-06-25T14:44:26.752498Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7382: Cannot get console configs 2025-06-25T14:44:26.752528Z node 10 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded Previous query attempt was finished with unsuccessful status OVERLOADED: Sending retry attempt 1 of 3 Previous query attempt was finished with unsuccessful status OVERLOADED: Sending retry attempt 2 of 3 Previous query attempt was finished with unsuccessful status OVERLOADED: Sending retry attempt 3 of 3 Previous query attempt was finished with unsuccessful status OVERLOADED: Sending retry attempt 1 of 3 >> YdbOlapStore::LogLast50ByResource >> SystemView::ShowCreateTableTemporary [GOOD] >> SystemView::ShowCreateTableSequences >> TTableProfileTests::OverwriteCompactionPolicy [GOOD] >> TTableProfileTests::OverwriteExecutionPolicy >> YdbTableBulkUpsert::ZeroRows [GOOD] >> YdbTableBulkUpsertOlap::UpsertArrowDupField [GOOD] >> YdbTableBulkUpsertOlap::ParquetImportBug >> TGRpcLdapAuthentication::LdapAuthSetIncorrectDomain [GOOD] >> YdbYqlClient::TestYqlLongSessionMultipleErrors [GOOD] >> TGRpcYdbTest::ExecuteQueryCache [GOOD] >> YdbYqlClient::TestReadTableBatchLimits [GOOD] >> TxUsage::WriteToTopic_Demo_39_Query [GOOD] >> YdbYqlClient::TestDoubleKey ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> YdbTableBulkUpsert::ZeroRows [GOOD] Test command err: 2025-06-25T14:44:03.477860Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897791527498832:2083];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:03.494194Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001850/r3tmp/tmpTm8h5U/pdisk_1.dat 2025-06-25T14:44:03.923794Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:03.950505Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:03.950590Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:03.954426Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 14806, node 1 2025-06-25T14:44:04.193003Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:04.193024Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:04.193031Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:04.193159Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:29531 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-25T14:44:04.489537Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:04.610696Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:06.584636Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) CLIENT_DEADLINE_EXCEEDED 2025-06-25T14:44:07.057958Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897808707370929:2401], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:07.058021Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897808707370919:2398], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:07.058148Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:07.062398Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:44:07.079702Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519897808707370933:2402], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-25T14:44:07.147597Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519897808707371060:4183] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:44:07.564492Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jykrqkrg2gv89d7mhgqs8f1w, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZGFiODBiNWUtYTkyZjhhYjEtOTM1MWVhY2QtNTM4N2U3M2U=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:44:09.038039Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519897819609133255:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:09.038083Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001850/r3tmp/tmpN28l1k/pdisk_1.dat 2025-06-25T14:44:09.193562Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:09.205740Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:09.205813Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:09.212069Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:44:09.227595Z node 4 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 4 Type# 268639257 TServer::EnableGrpc on GrpcPort 4315, node 4 2025-06-25T14:44:09.279913Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:09.279939Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:09.279947Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:09.280077Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24060 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:09.538208Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:10.064417Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:44:11.870897Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:44:13.590908Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519897835342502096:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:13.590987Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001850/r3tmp/tmpjGEpbA/pdisk_1.dat 2025-06-25T14:44:13.749795Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:13.764191Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:13.764254Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:13.770065Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 20672, node 7 2025-06-25T14:44:13.848016Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:13.848040Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:13.848048Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:13.848205Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is c ... 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:14.132790Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:14.605696Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:44:16.457331Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:44:18.592604Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[7:7519897835342502096:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:18.592674Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:44:25.746308Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7519897889033418531:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:25.746397Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001850/r3tmp/tmp6CSUVA/pdisk_1.dat 2025-06-25T14:44:25.958976Z node 10 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:25.975493Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:25.975584Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:25.991070Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 16371, node 10 2025-06-25T14:44:26.116989Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:26.117018Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:26.117031Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:26.117204Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23251 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:26.422861Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:26.757555Z node 10 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:44:29.130711Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 1 usec
: Error: Bulk upsert to table '/Root/ui32' longTx ydb://long-tx/read-only timed out, duration: 0 sec 2 usec
: Error: Bulk upsert to table '/Root/ui32' longTx ydb://long-tx/read-only timed out, duration: 0 sec 4 usec
: Error: Bulk upsert to table '/Root/ui32' longTx ydb://long-tx/read-only timed out, duration: 0 sec 8 usec
: Error: Bulk upsert to table '/Root/ui32' longTx ydb://long-tx/read-only timed out, duration: 0 sec 16 usec
: Error: Bulk upsert to table '/Root/ui32' longTx ydb://long-tx/read-only timed out, duration: 0 sec 32 usec
: Error: Bulk upsert to table '/Root/ui32' longTx ydb://long-tx/read-only timed out, duration: 0 sec 64 usec
: Error: Bulk upsert to table '/Root/ui32' longTx ydb://long-tx/read-only timed out, duration: 0 sec 128 usec
: Error: Bulk upsert to table '/Root/ui32' longTx ydb://long-tx/read-only timed out, duration: 0 sec 256 usec
: Error: Bulk upsert to table '/Root/ui32' longTx ydb://long-tx/read-only timed out, duration: 0 sec 512 usec
: Error: Bulk upsert to table '/Root/ui32' longTx ydb://long-tx/read-only timed out, duration: 0 sec 1024 usec
: Error: Bulk upsert to table '/Root/ui32' longTx ydb://long-tx/read-only timed out, duration: 0 sec 2048 usec
: Error: Bulk upsert to table '/Root/ui32' longTx ydb://long-tx/read-only timed out, duration: 0 sec 4096 usec
: Error: Bulk upsert to table '/Root/ui32' Deadline exceeded 8192 usec
: Error: Bulk upsert to table '/Root/ui32' Deadline exceeded 16384 usec 2025-06-25T14:44:31.644767Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7519897912309613326:2229];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:31.644836Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001850/r3tmp/tmpDozM5k/pdisk_1.dat 2025-06-25T14:44:31.823852Z node 13 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:31.846401Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:31.846499Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:31.851534Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 3748, node 13 2025-06-25T14:44:31.864430Z node 13 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 13 Type# 268639257 2025-06-25T14:44:31.906208Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:31.906234Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:31.906243Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:31.906380Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19309 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:32.194908Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:32.645240Z node 13 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:44:34.818257Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> TGRpcLdapAuthentication::LdapAuthSetIncorrectDomain [GOOD] Test command err: 2025-06-25T14:44:15.200567Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897846539184908:2148];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:15.204036Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001832/r3tmp/tmpgY2jpW/pdisk_1.dat 2025-06-25T14:44:15.569044Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:15.577230Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:15.577287Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 8012, node 1 2025-06-25T14:44:15.598527Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:44:15.702466Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:15.702487Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:15.702492Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:15.702601Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13218 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:15.998953Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:16.201375Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:44:18.923231Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519897857482509876:2195];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001832/r3tmp/tmpOUkMXx/pdisk_1.dat 2025-06-25T14:44:18.969327Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:44:19.037884Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:19.056103Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:19.056169Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:19.066066Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 8583, node 4 2025-06-25T14:44:19.134029Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:19.134051Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:19.134058Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:19.134211Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:18465 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:19.367454Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:23.046111Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519897878867117525:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:23.046167Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001832/r3tmp/tmpTajvNV/pdisk_1.dat 2025-06-25T14:44:23.188001Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:23.201614Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:23.201673Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:23.207785Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 21564, node 7 2025-06-25T14:44:23.304799Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:23.304819Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:23.304827Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:23.304954Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17834 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:23.552181Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:27.316634Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7519897897670108496:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:27.316700Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001832/r3tmp/tmpxJQWkL/pdisk_1.dat 2025-06-25T14:44:27.531041Z node 10 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:27.587493Z node 10 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 10 Type# 268639257 TServer::EnableGrpc on GrpcPort 15243, node 10 2025-06-25T14:44:27.635630Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:27.635733Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:27.660129Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:44:27.726098Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:27.726120Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:27.726129Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:27.726291Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:20983 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:28.005724Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:32.002937Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7519897917699303552:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:32.002989Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001832/r3tmp/tmpHirxoJ/pdisk_1.dat 2025-06-25T14:44:32.209140Z node 13 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:32.226103Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:32.226197Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:32.234207Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 26542, node 13 2025-06-25T14:44:32.337175Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:32.337206Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:32.337213Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:32.337340Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:61152 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:32.660284Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:33.045379Z node 13 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> YdbYqlClient::TestYqlLongSessionMultipleErrors [GOOD] Test command err: 2025-06-25T14:44:16.385504Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897847417478627:2083];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:16.386582Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001830/r3tmp/tmpofRBSj/pdisk_1.dat 2025-06-25T14:44:16.676862Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:16.718236Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 25072, node 1 2025-06-25T14:44:16.729865Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:16.731060Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:16.734065Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:44:16.763990Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:16.764012Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:16.764019Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:16.764108Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:21274 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:17.062586Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:17.392419Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:44:19.150303Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897860302381464:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:19.150405Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:19.470804Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:44:19.611242Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897860302381634:2308], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:19.611321Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:19.611804Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897860302381639:2311], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:19.615086Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:44:19.645086Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519897860302381641:2312], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-25T14:44:19.723624Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519897860302381719:2795] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:44:19.776913Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519897860302381737:2316], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:2:25: Error: At function: KiWriteTable!
:2:43: Error: Failed to convert type: Struct<'Key':String,'Value':String> to Struct<'Key':Uint32?,'Value':String?>
:2:43: Error: Failed to convert 'Key': String to Optional
:2:43: Error: Failed to convert input columns types to scheme types, code: 2031 2025-06-25T14:44:19.777158Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=MWM4OGYzNDQtOTBjNzU5NS1lODU1ZGY5LWEzNmZjZDEy, ActorId: [1:7519897860302381461:2295], ActorState: ExecuteState, TraceId: 01jykrr00t4rw0w8b8bvd6hvs4, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-06-25T14:44:21.312917Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519897872795945608:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:21.313011Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001830/r3tmp/tmp7qQ9oz/pdisk_1.dat 2025-06-25T14:44:21.573472Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:21.614107Z node 4 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 4 Type# 268639257 TServer::EnableGrpc on GrpcPort 12962, node 4 2025-06-25T14:44:21.641229Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:21.641322Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:21.686793Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:44:21.731929Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:21.731953Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:21.731965Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:21.732140Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4652 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:22.034519Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:22.327079Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:44:24.429520Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519897885680848491:2299], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:24.429683Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:26.108781Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519897892860210053:2074];send_to=[0:730719953665814 ... 05745113102:2308], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:29.626008Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:29.626424Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519897905745113107:2311], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:29.629954Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:44:29.653088Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7519897905745113109:2312], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-25T14:44:29.713910Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7519897905745113184:2792] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:44:29.771451Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jykrr9srd85m0peq8jj1jjvt, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=NjlmNmU4ZDUtMzExYzA0OTAtNjE4YTgxYTEtYTJiNWEyYmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:44:29.836068Z node 7 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [7:7519897905745113229:2323], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:2:13: Error: At function: KiReadTable!
:2:13: Error: Cannot find table 'db.[Root/BadTable]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:44:29.837741Z node 7 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=7&id=NjlmNmU4ZDUtMzExYzA0OTAtNjE4YTgxYTEtYTJiNWEyYmQ=, ActorId: [7:7519897905745112926:2296], ActorState: ExecuteState, TraceId: 01jykrr9z60dd9835ykeny5vn1, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:44:29.897070Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jykrra0n7x89abhdfnmngwxw, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=NjlmNmU4ZDUtMzExYzA0OTAtNjE4YTgxYTEtYTJiNWEyYmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:44:30.035854Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715663. Ctx: { TraceId: 01jykrra2n62mmzqhrn2a3ybcj, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=NjlmNmU4ZDUtMzExYzA0OTAtNjE4YTgxYTEtYTJiNWEyYmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:44:31.604356Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7519897914174827720:2095];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:31.606817Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001830/r3tmp/tmpAMiZDA/pdisk_1.dat 2025-06-25T14:44:31.836146Z node 10 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:31.875008Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:31.875091Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:31.881902Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 16488, node 10 2025-06-25T14:44:32.064846Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:32.064877Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:32.064887Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:32.065021Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14095 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:32.397931Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:32.609522Z node 10 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:44:35.217463Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7519897931354697888:2298], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:35.217562Z node 10 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:35.237691Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:44:35.338897Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7519897931354698046:2308], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:35.338983Z node 10 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:35.338988Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7519897931354698051:2311], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:35.342694Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:44:35.369836Z node 10 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [10:7519897931354698053:2312], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-25T14:44:35.446211Z node 10 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [10:7519897931354698124:2789] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:44:35.462056Z node 10 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [10:7519897931354698135:2316], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:2:25: Error: At function: KiWriteTable!
:2:25: Error: Cannot find table 'db.[Root/BadTable1]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:44:35.462304Z node 10 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=10&id=ODZlMGE2NjktNDY2MWMzYmUtN2IwYjEzYTUtODI2OWIyNmU=, ActorId: [10:7519897931354697862:2296], ActorState: ExecuteState, TraceId: 01jykrrfca3p40da4b143zdrrt, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:44:35.497680Z node 10 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [10:7519897931354698160:2322], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:2:25: Error: At function: KiWriteTable!
:2:25: Error: Cannot find table 'db.[Root/BadTable2]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:44:35.498119Z node 10 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=10&id=ODZlMGE2NjktNDY2MWMzYmUtN2IwYjEzYTUtODI2OWIyNmU=, ActorId: [10:7519897931354697862:2296], ActorState: ExecuteState, TraceId: 01jykrrfgf8mw79d801h6n8th7, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> TGRpcYdbTest::ExecuteQueryCache [GOOD] Test command err: 2025-06-25T14:44:11.788824Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897828184107676:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:11.788871Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001839/r3tmp/tmpEYQGhA/pdisk_1.dat 2025-06-25T14:44:12.266067Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:12.279333Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:12.279407Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:12.282574Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:44:12.315446Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 16515, node 1 2025-06-25T14:44:12.355293Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:12.355316Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:12.355325Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:12.355462Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:21059 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:12.662762Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:12.824881Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:44:16.047047Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519897851219047528:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:16.047168Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001839/r3tmp/tmpWuN9Rz/pdisk_1.dat 2025-06-25T14:44:16.203802Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:16.226121Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:16.226220Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:16.232628Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 13522, node 4 2025-06-25T14:44:16.373089Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:16.373110Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:16.373124Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:16.373265Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5143 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:16.637825Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:16.656473Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:44:17.056780Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:44:19.046574Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519897864103950405:2298], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:19.046574Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519897864103950397:2295], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:19.046667Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:19.050480Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:44:19.076781Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519897864103950411:2299], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:44:19.173270Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519897864103950485:2671] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:44:21.350739Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519897871005327727:2244];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001839/r3tmp/tmpSDi9xM/pdisk_1.dat 2025-06-25T14:44:21.400614Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:44:21.531341Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:21.572773Z node 7 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 7 Type# 268639257 TServer::EnableGrpc on GrpcPort 2507, node 7 2025-06-25T14:44:21.597437Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:21.597528Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:21.632259Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:44:21.663889Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:21.663907Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:21.663919Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:21.664075Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28580 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true ... calPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:44:25.014520Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710661. Ctx: { TraceId: 01jykrr57waqzfyvbvxvqxd0v8, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=OGRhYjY4N2ItODVkNWEyMmYtODBhZjVjMzctNjM0Mzc1NzQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:44:25.202485Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710662. Ctx: { TraceId: 01jykrr5fg1hyszs4v19gk9hsn, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=OGRhYjY4N2ItODVkNWEyMmYtODBhZjVjMzctNjM0Mzc1NzQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:44:26.937725Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7519897892277251280:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:26.937797Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001839/r3tmp/tmpI163It/pdisk_1.dat 2025-06-25T14:44:27.164960Z node 10 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:27.199122Z node 10 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 10 Type# 268639257 TServer::EnableGrpc on GrpcPort 16565, node 10 2025-06-25T14:44:27.274673Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:27.274772Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:27.289193Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:27.289216Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:27.289224Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:27.289357Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:44:27.320900Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:10938 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:27.658714Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:27.996660Z node 10 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:44:30.547110Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7519897909457121463:2295], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:30.547155Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7519897909457121471:2298], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:30.547197Z node 10 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:30.550169Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:44:30.574869Z node 10 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [10:7519897909457121477:2299], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:44:30.637820Z node 10 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [10:7519897909457121553:2693] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:44:32.124689Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7519897916833020416:2076];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:32.124865Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001839/r3tmp/tmprhLEef/pdisk_1.dat 2025-06-25T14:44:32.349515Z node 13 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 11023, node 13 2025-06-25T14:44:32.467827Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:32.467919Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:32.471718Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:44:32.493518Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:32.493536Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:32.493543Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:32.493640Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:21683 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:32.783816Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:33.132531Z node 13 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:44:35.518291Z node 13 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [13:7519897929717923296:2298], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:35.518302Z node 13 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [13:7519897929717923289:2295], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:35.518387Z node 13 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:35.521689Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:44:35.543064Z node 13 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [13:7519897929717923303:2299], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:44:35.646123Z node 13 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [13:7519897929717923374:2678] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } >> YdbLogStore::LogStore [GOOD] >> YdbLogStore::LogStoreNegative >> YdbYqlClient::TestTzTypesFullStack [GOOD] >> YdbYqlClient::TestVariant ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> YdbYqlClient::TestReadTableBatchLimits [GOOD] Test command err: 2025-06-25T14:44:15.330892Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897843854384219:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:15.330955Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001831/r3tmp/tmp3LFWFX/pdisk_1.dat 2025-06-25T14:44:15.684255Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:15.703775Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 23591, node 1 2025-06-25T14:44:15.727306Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:15.727406Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:15.750888Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:15.750910Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:15.750943Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:15.750968Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:44:15.751066Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8694 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:16.053289Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:16.076186Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:44:16.127625Z node 1 :GRPC_SERVER INFO: grpc_request_proxy.cpp:592: Got grpc request# ListEndpointsRequest, traceId# 01jykrqwkx74y9cdad3wps9gwh, sdkBuildInfo# ydb-cpp-sdk/dev, state# AS_NOT_PERFORMED, database# undef, peer# ipv6:[::1]:39604, grpcInfo# grpc-c++/1.54.3 grpc-c/31.0.0 (linux; chttp2), timeout# 9.986722s 2025-06-25T14:44:16.153495Z node 1 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:595: Got grpc request# CreateSessionRequest, traceId# 01jykrqwmbenm9wa8j5xn5ycsk, sdkBuildInfo# ydb-cpp-sdk/dev, state# AS_NOT_PERFORMED, database# undef, peer# ipv6:[::1]:39608, grpcInfo# grpc-c++/1.54.3 grpc-c/31.0.0 (linux; chttp2), timeout# undef 2025-06-25T14:44:16.348425Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:44:17.952149Z node 1 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:595: Got grpc request# CreateTableRequest, traceId# 01jykrqycz1qfagmt01z4xc9bh, sdkBuildInfo# ydb-cpp-sdk/dev, state# AS_NOT_PERFORMED, database# undef, peer# ipv6:[::1]:39622, grpcInfo# grpc-c++/1.54.3 grpc-c/31.0.0 (linux; chttp2), timeout# undef 2025-06-25T14:44:17.952748Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7519897843854384400:2140] Handle TEvProposeTransaction 2025-06-25T14:44:17.952776Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:7519897843854384400:2140] TxId# 281474976715658 ProcessProposeTransaction 2025-06-25T14:44:17.952825Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:7519897843854384400:2140] Cookie# 0 userReqId# "" txid# 281474976715658 SEND to# [1:7519897852444319776:2611] 2025-06-25T14:44:18.030214Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:7519897852444319776:2611] txid# 281474976715658 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateTable CreateTable { Name: "Test" Columns { Name: "Key" Type: "Uint32" NotNull: false } Columns { Name: "Fk" Type: "Uint64" NotNull: false } Columns { Name: "Value" Type: "String" NotNull: false } KeyColumnNames: "Key" KeyColumnNames: "Fk" UniformPartitionsCount: 16 PartitionConfig { } Temporary: false } CreateIndexedTable { } } } DatabaseName: "" RequestType: "" PeerName: "ipv6:[::1]:39622" 2025-06-25T14:44:18.030264Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:7519897852444319776:2611] txid# 281474976715658 Bootstrap, UserSID: CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-25T14:44:18.030600Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1660: Actor# [1:7519897852444319776:2611] txid# 281474976715658 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-06-25T14:44:18.030664Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:7519897852444319776:2611] txid# 281474976715658 TEvNavigateKeySet requested from SchemeCache 2025-06-25T14:44:18.030824Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:7519897852444319776:2611] txid# 281474976715658 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-25T14:44:18.030937Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:7519897852444319776:2611] HANDLE EvNavigateKeySetResult, txid# 281474976715658 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-25T14:44:18.030982Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:7519897852444319776:2611] txid# 281474976715658 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715658 TabletId# 72057594046644480} 2025-06-25T14:44:18.031089Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:7519897852444319776:2611] txid# 281474976715658 HANDLE EvClientConnected 2025-06-25T14:44:18.032695Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:44:18.037121Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [1:7519897852444319776:2611] txid# 281474976715658 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715658} 2025-06-25T14:44:18.037173Z node 1 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [1:7519897852444319776:2611] txid# 281474976715658 SEND to# [1:7519897852444319775:2296] Source {TEvProposeTransactionStatus txid# 281474976715658 Status# 53} 2025-06-25T14:44:18.037925Z node 1 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:489: SchemeBoardUpdate /Root 2025-06-25T14:44:18.037984Z node 1 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:518: Can't update SecurityState for /Root - no PublicKeys 2025-06-25T14:44:18.037990Z node 1 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:489: SchemeBoardUpdate /Root 2025-06-25T14:44:18.038013Z node 1 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:518: Can't update SecurityState for /Root - no PublicKeys 2025-06-25T14:44:18.078964Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:7519897856739287146:2683], Recipient [1:7519897856739287296:2309]: NKikimr::TEvTablet::TEvBoot 2025-06-25T14:44:18.079672Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:7519897856739287131:2668], Recipient [1:7519897856739287279:2300]: NKikimr::TEvTablet::TEvBoot 2025-06-25T14:44:18.080091Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:7519897856739287142:2679], Recipient [1:7519897856739287287:2304]: NKikimr::TEvTablet::TEvBoot 2025-06-25T14:44:18.080161Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:7519897856739287135:2672], Recipient [1:7519897856739287304:2313]: NKikimr::TEvTablet::TEvBoot 2025-06-25T14:44:18.081132Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:7519897856739287136:2673], Recipient [1:7519897856739287281:2301]: NKikimr::TEvTablet::TEvBoot 2025-06-25T14:44:18.081581Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:7519897856739287140:2677], Recipient [1:7519897856739287289:2305]: NKikimr::TEvTablet::TEvBoot 2025-06-25T14:44:18.081913Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:7519897856739287139:2676], Recipient [1:7519897856739287282:2302]: NKikimr::TEvTablet::TEvBoot 2025-06-25T14:44:18.082322Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:7519897856739287133:2670], Recipient [1:7519897856739287283:2303]: NKikimr::TEvTablet::TEvBoot 2025-06-25T14:44:18.082702Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:7519897856739287143:2680], Recipient [1:7519897856739287291:2307]: NKikimr::TEvTablet::TEvBoot 2025-06-25T14:44:18.083076Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:7519897856739287134:2671], Recipient [1:7519897856739287290:2306]: NKikimr::TEvTablet::TEvBoot 2025-06-25T14:44:18.083645Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:7519897856739287141:2678], Recipient [1:7519897856739287277:2299]: NKikimr::TEvTablet::TEvBoot 2025-06-25T14:44:18.083949Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:7519897856739287145:2682], Recipient [1:7519897856739287309:2314]: NKikimr::TEvTablet::TEvBoot 2025-06-25T14:44:18.084089Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:7519897856739287132:2669], Recipient [1:7519897856739287295:2308]: NKikimr::TEvTablet::TEvBoot 2025-06-25T14:44:18.084578Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:7519897856739287138:2675], Recipient [1:7519897856739287302:2312]: NKikimr::TEvTablet::TEvBoot 2025-06-25T14:44:18.084605Z node 1 :TX_ ... 678 2025-06-25T14:44:36.172733Z node 10 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553190, Sender [10:7519897937055358448:2362], Recipient [10:7519897924170454812:2308]: NKikimrTxDataShard.TEvDiscardVolatileSnapshotRequest OwnerId: 72057594046644480 PathId: 2 Step: 1750862676185 TxId: 281474976715678 2025-06-25T14:44:36.172837Z node 10 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553190, Sender [10:7519897937055358448:2362], Recipient [10:7519897924170454805:2307]: NKikimrTxDataShard.TEvDiscardVolatileSnapshotRequest OwnerId: 72057594046644480 PathId: 2 Step: 1750862676185 TxId: 281474976715678 2025-06-25T14:44:36.172861Z node 10 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553190, Sender [10:7519897937055358448:2362], Recipient [10:7519897924170454792:2302]: NKikimrTxDataShard.TEvDiscardVolatileSnapshotRequest OwnerId: 72057594046644480 PathId: 2 Step: 1750862676185 TxId: 281474976715678 2025-06-25T14:44:36.172947Z node 10 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553190, Sender [10:7519897937055358448:2362], Recipient [10:7519897924170454801:2305]: NKikimrTxDataShard.TEvDiscardVolatileSnapshotRequest OwnerId: 72057594046644480 PathId: 2 Step: 1750862676185 TxId: 281474976715678 2025-06-25T14:44:36.172968Z node 10 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553190, Sender [10:7519897937055358448:2362], Recipient [10:7519897924170454798:2304]: NKikimrTxDataShard.TEvDiscardVolatileSnapshotRequest OwnerId: 72057594046644480 PathId: 2 Step: 1750862676185 TxId: 281474976715678 2025-06-25T14:44:36.173057Z node 10 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553190, Sender [10:7519897937055358448:2362], Recipient [10:7519897924170454796:2303]: NKikimrTxDataShard.TEvDiscardVolatileSnapshotRequest OwnerId: 72057594046644480 PathId: 2 Step: 1750862676185 TxId: 281474976715678 2025-06-25T14:44:36.173070Z node 10 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553190, Sender [10:7519897937055358448:2362], Recipient [10:7519897924170454791:2301]: NKikimrTxDataShard.TEvDiscardVolatileSnapshotRequest OwnerId: 72057594046644480 PathId: 2 Step: 1750862676185 TxId: 281474976715678 2025-06-25T14:44:36.173160Z node 10 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553190, Sender [10:7519897937055358448:2362], Recipient [10:7519897924170454819:2309]: NKikimrTxDataShard.TEvDiscardVolatileSnapshotRequest OwnerId: 72057594046644480 PathId: 2 Step: 1750862676185 TxId: 281474976715678 ---- batch start ---- [[0u];[0u];["A"]] ---- batch end ---- ---- batch start ---- [[1u];[2u];["A"]] ---- batch end ---- ---- batch start ---- [[2u];[4u];["A"]] ---- batch end ---- ---- batch start ---- [[3u];[6u];["A"]] ---- batch end ---- ---- batch start ---- [[4u];[8u];["A"]] ---- batch end ---- ---- batch start ---- [[5u];[10u];["A"]] ---- batch end ---- ---- batch start ---- [[6u];[12u];["A"]] ---- batch end ---- ---- batch start ---- [[7u];[14u];["A"]] ---- batch end ---- ---- batch start ---- [[8u];[16u];["A"]] ---- batch end ---- ---- batch start ---- [[9u];[18u];["A"]] ---- batch end ---- ---- batch start ---- [[10u];[20u];["A"]] ---- batch end ---- ---- batch start ---- [[11u];[22u];["A"]] ---- batch end ---- ---- batch start ---- [[12u];[24u];["A"]] ---- batch end ---- ---- batch start ---- [[13u];[26u];["A"]] ---- batch end ---- ---- batch start ---- [[14u];[28u];["A"]] ---- batch end ---- ---- batch start ---- [[15u];[30u];["A"]] ---- batch end ---- ---- batch start ---- [[16u];[32u];["A"]] ---- batch end ---- ---- batch start ---- [[17u];[34u];["A"]] ---- batch end ---- ---- batch start ---- [[18u];[36u];["A"]] ---- batch end ---- ---- batch start ---- [[19u];[38u];["A"]] ---- batch end ---- ---- batch start ---- [[20u];[40u];["A"]] ---- batch end ---- ---- batch start ---- [[21u];[42u];["A"]] ---- batch end ---- ---- batch start ---- [[22u];[44u];["A"]] ---- batch end ---- ---- batch start ---- [[23u];[46u];["A"]] ---- batch end ---- ---- batch start ---- [[24u];[48u];["A"]] ---- batch end ---- ---- batch start ---- [[25u];[50u];["A"]] ---- batch end ---- ---- batch start ---- [[26u];[52u];["A"]] ---- batch end ---- ---- batch start ---- [[27u];[54u];["A"]] ---- batch end ---- ---- batch start ---- [[28u];[56u];["A"]] ---- batch end ---- ---- batch start ---- [[29u];[58u];["A"]] ---- batch end ---- ---- batch start ---- [[30u];[60u];["A"]] ---- batch end ---- ---- batch start ---- [[31u];[62u];["A"]] ---- batch end ---- ---- batch start ---- [[32u];[64u];["A"]] ---- batch end ---- ---- batch start ---- [[33u];[66u];["A"]] ---- batch end ---- ---- batch start ---- [[34u];[68u];["A"]] ---- batch end ---- ---- batch start ---- [[35u];[70u];["A"]] ---- batch end ---- ---- batch start ---- [[36u];[72u];["A"]] ---- batch end ---- ---- batch start ---- [[37u];[74u];["A"]] ---- batch end ---- ---- batch start ---- [[38u];[76u];["A"]] ---- batch end ---- ---- batch start ---- [[39u];[78u];["A"]] ---- batch end ---- ---- batch start ---- [[40u];[80u];["A"]] ---- batch end ---- ---- batch start ---- [[41u];[82u];["A"]] ---- batch end ---- ---- batch start ---- [[42u];[84u];["A"]] ---- batch end ---- ---- batch start ---- [[43u];[86u];["A"]] ---- batch end ---- ---- batch start ---- [[44u];[88u];["A"]] ---- batch end ---- ---- batch start ---- [[45u];[90u];["A"]] ---- batch end ---- ---- batch start ---- [[46u];[92u];["A"]] ---- batch end ---- ---- batch start ---- [[47u];[94u];["A"]] ---- batch end ---- ---- batch start ---- [[48u];[96u];["A"]] ---- batch end ---- ---- batch start ---- [[49u];[98u];["A"]] ---- batch end ---- ---- batch start ---- [[50u];[100u];["A"]] ---- batch end ---- ---- batch start ---- [[51u];[102u];["A"]] ---- batch end ---- ---- batch start ---- [[52u];[104u];["A"]] ---- batch end ---- ---- batch start ---- [[53u];[106u];["A"]] ---- batch end ---- ---- batch start ---- [[54u];[108u];["A"]] ---- batch end ---- ---- batch start ---- [[55u];[110u];["A"]] ---- batch end ---- ---- batch start ---- [[56u];[112u];["A"]] ---- batch end ---- ---- batch start ---- [[57u];[114u];["A"]] ---- batch end ---- ---- batch start ---- [[58u];[116u];["A"]] ---- batch end ---- ---- batch start ---- [[59u];[118u];["A"]] ---- batch end ---- ---- batch start ---- [[60u];[120u];["A"]] ---- batch end ---- ---- batch start ---- [[61u];[122u];["A"]] ---- batch end ---- ---- batch start ---- [[62u];[124u];["A"]] ---- batch end ---- ---- batch start ---- [[63u];[126u];["A"]] ---- batch end ---- ---- batch start ---- [[64u];[128u];["A"]] ---- batch end ---- ---- batch start ---- [[65u];[130u];["A"]] ---- batch end ---- ---- batch start ---- [[66u];[132u];["A"]] ---- batch end ---- ---- batch start ---- [[67u];[134u];["A"]] ---- batch end ---- ---- batch start ---- [[68u];[136u];["A"]] ---- batch end ---- ---- batch start ---- [[69u];[138u];["A"]] ---- batch end ---- ---- batch start ---- [[70u];[140u];["A"]] ---- batch end ---- ---- batch start ---- [[71u];[142u];["A"]] ---- batch end ---- ---- batch start ---- [[72u];[144u];["A"]] ---- batch end ---- ---- batch start ---- [[73u];[146u];["A"]] ---- batch end ---- ---- batch start ---- [[74u];[148u];["A"]] ---- batch end ---- ---- batch start ---- [[75u];[150u];["A"]] ---- batch end ---- ---- batch start ---- [[76u];[152u];["A"]] ---- batch end ---- ---- batch start ---- [[77u];[154u];["A"]] ---- batch end ---- ---- batch start ---- [[78u];[156u];["A"]] ---- batch end ---- ---- batch start ---- [[79u];[158u];["A"]] ---- batch end ---- ---- batch start ---- [[80u];[160u];["A"]] ---- batch end ---- ---- batch start ---- [[81u];[162u];["A"]] ---- batch end ---- ---- batch start ---- [[82u];[164u];["A"]] ---- batch end ---- ---- batch start ---- [[83u];[166u];["A"]] ---- batch end ---- ---- batch start ---- [[84u];[168u];["A"]] ---- batch end ---- ---- batch start ---- [[85u];[170u];["A"]] ---- batch end ---- ---- batch start ---- [[86u];[172u];["A"]] ---- batch end ---- ---- batch start ---- [[87u];[174u];["A"]] ---- batch end ---- ---- batch start ---- [[88u];[176u];["A"]] ---- batch end ---- ---- batch start ---- [[89u];[178u];["A"]] ---- batch end ---- ---- batch start ---- [[90u];[180u];["A"]] ---- batch end ---- ---- batch start ---- [[91u];[182u];["A"]] ---- batch end ---- ---- batch start ---- [[92u];[184u];["A"]] ---- batch end ---- ---- batch start ---- [[93u];[186u];["A"]] ---- batch end ---- ---- batch start ---- [[94u];[188u];["A"]] ---- batch end ---- ---- batch start ---- [[95u];[190u];["A"]] ---- batch end ---- ---- batch start ---- [[96u];[192u];["A"]] ---- batch end ---- ---- batch start ---- [[97u];[194u];["A"]] ---- batch end ---- ---- batch start ---- [[98u];[196u];["A"]] ---- batch end ---- ---- batch start ---- [[99u];[198u];["A"]] ---- batch end ---- 2025-06-25T14:44:36.183464Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0000be280] received request Name# SchemeOperation ok# false data# peer# current inflight# 0 2025-06-25T14:44:36.183463Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000198c80] received request Name# SchemeOperationStatus ok# false data# peer# current inflight# 0 2025-06-25T14:44:36.183673Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0000be880] received request Name# SchemeDescribe ok# false data# peer# current inflight# 0 2025-06-25T14:44:36.183711Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0001ba880] received request Name# ChooseProxy ok# false data# peer# current inflight# 0 2025-06-25T14:44:36.183890Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a00004f880] received request Name# PersQueueRequest ok# false data# peer# current inflight# 0 2025-06-25T14:44:36.183906Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000198080] received request Name# SchemeInitRoot ok# false data# peer# current inflight# 0 2025-06-25T14:44:36.184062Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0000cc080] received request Name# ResolveNode ok# false data# peer# current inflight# 0 2025-06-25T14:44:36.184067Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000034880] received request Name# FillNode ok# false data# peer# current inflight# 0 2025-06-25T14:44:36.184253Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000040880] received request Name# DrainNode ok# false data# peer# current inflight# 0 2025-06-25T14:44:36.184283Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000151880] received request Name# BlobStorageConfig ok# false data# peer# current inflight# 0 2025-06-25T14:44:36.184438Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000181e80] received request Name# HiveCreateTablet ok# false data# peer# current inflight# 0 2025-06-25T14:44:36.184477Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000109880] received request Name# TestShardControl ok# false data# peer# current inflight# 0 2025-06-25T14:44:36.184652Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a00016d480] received request Name# RegisterNode ok# false data# peer# current inflight# 0 2025-06-25T14:44:36.184729Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a00007e680] received request Name# CmsRequest ok# false data# peer# current inflight# 0 2025-06-25T14:44:36.184807Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a00016e080] received request Name# ConsoleRequest ok# false data# peer# current inflight# 0 2025-06-25T14:44:36.184876Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000038480] received request Name# InterconnectDebug ok# false data# peer# current inflight# 0 2025-06-25T14:44:36.184944Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000183680] received request Name# TabletStateRequest ok# false data# peer# current inflight# 0 >> TxUsage::WriteToTopic_Demo_40_Table >> TxUsage::WriteToTopic_Demo_29_Query >> YdbYqlClient::CopyTables [GOOD] >> YdbYqlClient::CreateAndAltertTableWithCompactionPolicy >> YdbTableBulkUpsertOlap::UpsertCSV [GOOD] >> YdbTableBulkUpsertOlap::UpsertCSV_DataShard >> YdbS3Internal::TestS3Listing [GOOD] >> YdbS3Internal::TestAccessCheck >> TxUsage::SessionAbort_Table [GOOD] >> SystemView::QueryStatsRetries [GOOD] >> TxUsage::WriteToTopic_Demo_9_Query [GOOD] >> YdbQueryService::TestForbidExecuteWithoutAttach [GOOD] >> YdbQueryService::TestCreateDropAttachSession >> YdbYqlClient::ConnectDbAclIsOffWhenTokenIsOptionalAndNull [GOOD] >> YdbYqlClient::ColumnFamiliesWithStorageAndIndex >> TGRpcNewClient::TestAuth >> YdbTableBulkUpsert::Simple >> YdbImport::Simple >> TxUsage::SessionAbort_Query >> TYqlDecimalTests::SimpleUpsertSelect [GOOD] >> TYqlDecimalTests::NegativeValues >> YdbTableBulkUpsert::Nulls >> TGRpcNewCoordinationClient::SessionCreateUpdateDeleteSemaphore [GOOD] >> TxUsage::WriteToTopic_Demo_50_Table >> TAuthenticationWithSqlExecution::CreateAlterUserWithHash >> TGRpcNewClient::SimpleYqlQuery [GOOD] >> TGRpcNewClient::CreateAlterUpsertDrop ------- [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/ut/unittest >> SystemView::QueryStatsRetries [GOOD] Test command err: 2025-06-25T14:40:38.978023Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519896912418880678:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:40:38.978126Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0012e7/r3tmp/tmpGEHZ1K/pdisk_1.dat 2025-06-25T14:40:39.199069Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:40:39.231826Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 3566, node 1 2025-06-25T14:40:39.257303Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:40:39.257321Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:40:39.257334Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:40:39.257425Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:40:39.304049Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:40:39.304164Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:40:39.306657Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:20825 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:40:39.486102Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:40:39.519440Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateSubDomain, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_subdomain.cpp:259) waiting... 2025-06-25T14:40:39.531828Z node 5 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[5:7519896917559392683:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:40:39.531873Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/Tenant1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:40:39.534733Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519896918225019323:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:40:39.534766Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/Tenant1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:40:39.547468Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:40:39.554267Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:40:39.554349Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:40:39.555678Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:40:39.555719Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:40:39.565971Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 5 Cookie 5 2025-06-25T14:40:39.566161Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 4 Cookie 4 2025-06-25T14:40:39.589293Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:40:39.611198Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:40:39.735349Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateSubDomain, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_subdomain.cpp:259) waiting... 2025-06-25T14:40:39.747727Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519896918173993999:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:40:39.747773Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/Tenant2/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:40:39.751470Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:40:39.751557Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:40:39.752750Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519896917559196173:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:40:39.752851Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/Tenant2/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:40:39.754052Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 3 Cookie 3 2025-06-25T14:40:39.758013Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:40:39.759351Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... 2025-06-25T14:40:39.763247Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:40:39.763348Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:40:39.776368Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T14:40:39.776715Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:40:39.985116Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:40:40.592388Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:40:40.592425Z node 5 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:40:40.752597Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:40:40.757784Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:40:41.345024Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:40:41.469041Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896925303783857:2303], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:40:41.469043Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896925303783866:2306], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:40:41.469130Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:40:41.472484Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715663:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 20 ... d: [71:7519897890892976415:2306], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:26.490441Z node 71 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:26.497120Z node 71 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:44:26.547563Z node 71 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [71:7519897890892976429:2310], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-25T14:44:26.654345Z node 71 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [71:7519897890892976505:2711] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:44:26.894782Z node 71 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710661. Ctx: { TraceId: 01jykrr6qq0d715dw8dcgc0wm7, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=71&id=ZDY1NGY3MDMtNDhhMzI4OWItZDM2ZGI5OWUtNTJlYWQwNWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:44:27.099946Z node 71 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710663. Ctx: { TraceId: 01jykrr759f5cvrt60843fewq2, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=71&id=OTg0ZWVhY2EtMzlhZjM3ZC00OTJlYTQwMy0xM2NiZjk0Nw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:44:27.102482Z node 71 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:47: Scan started, actor: [71:7519897895187943890:2330], owner: [71:7519897895187943886:2328], scan id: 0, sys view info: Type: ETopQueriesByRequestUnitsOneHour SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-25T14:44:27.103614Z node 71 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:323: Scan prepared, actor: [71:7519897895187943890:2330], schemeshard id: 72057594046644480, hive id: 72057594037968897, database: /Root, database owner: root@builtin, domain key: [OwnerId: 72057594046644480, LocalPathId: 1], database node count: 1 2025-06-25T14:44:27.104261Z node 71 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [71:7519897895187943890:2330], row count: 1, finished: 1 2025-06-25T14:44:27.104383Z node 71 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:122: Scan finished, actor: [71:7519897895187943890:2330], owner: [71:7519897895187943886:2328], scan id: 0, sys view info: Type: ETopQueriesByRequestUnitsOneHour SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-25T14:44:27.107629Z node 71 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750862667092, txId: 281474976710662] shutting down 2025-06-25T14:44:29.909374Z node 76 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[76:7519897904201811527:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:29.909495Z node 76 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0012e7/r3tmp/tmpJOZmFD/pdisk_1.dat 2025-06-25T14:44:30.162322Z node 76 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:30.173858Z node 76 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(76, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:30.174026Z node 76 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(76, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:30.184934Z node 76 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(76, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 2746, node 76 2025-06-25T14:44:30.277444Z node 76 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:30.277476Z node 76 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:30.277495Z node 76 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:30.277732Z node 76 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6221 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:30.926550Z node 76 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:44:30.928922Z node 76 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... 2025-06-25T14:44:30.975798Z node 76 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:44:34.910646Z node 76 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[76:7519897904201811527:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:34.910751Z node 76 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:44:36.376388Z node 76 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [76:7519897934266583584:2306], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:36.376389Z node 76 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [76:7519897934266583592:2309], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:36.376538Z node 76 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:36.382052Z node 76 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:44:36.416562Z node 76 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [76:7519897934266583598:2310], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-25T14:44:36.498472Z node 76 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [76:7519897934266583670:2707] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:44:36.719634Z node 76 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jykrrgcp0ebgkafxxpkcde4b, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=76&id=NWFlNWQyYWUtZTIwNzc0ZDUtMzA1YzVkNTUtNDM3NDUxOWY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:44:36.968683Z node 76 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715663. Ctx: { TraceId: 01jykrrgrd6xgzypzfx3mtysc1, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=76&id=NDM2ZTgyMzQtMTdkM2VkN2YtNTdjMzg0Y2YtZjhjMjNiNA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:44:36.971542Z node 76 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:47: Scan started, actor: [76:7519897934266583759:2330], owner: [76:7519897934266583755:2328], scan id: 0, sys view info: Type: ETopQueriesByReadBytesOneMinute SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-25T14:44:36.972398Z node 76 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:323: Scan prepared, actor: [76:7519897934266583759:2330], schemeshard id: 72057594046644480, hive id: 72057594037968897, database: /Root, database owner: root@builtin, domain key: [OwnerId: 72057594046644480, LocalPathId: 1], database node count: 1 2025-06-25T14:44:36.973269Z node 76 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [76:7519897934266583759:2330], row count: 1, finished: 1 2025-06-25T14:44:36.973409Z node 76 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:122: Scan finished, actor: [76:7519897934266583759:2330], owner: [76:7519897934266583755:2328], scan id: 0, sys view info: Type: ETopQueriesByReadBytesOneMinute SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-25T14:44:36.976915Z node 76 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750862676962, txId: 281474976715662] shutting down >> TxUsage::Sinks_Olap_WriteToTopicAndTable_2_Table [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> TGRpcNewCoordinationClient::SessionCreateUpdateDeleteSemaphore [GOOD] Test command err: 2025-06-25T14:44:18.215208Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897856140926687:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:18.215326Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001826/r3tmp/tmpc3b0E4/pdisk_1.dat 2025-06-25T14:44:18.579452Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 10139, node 1 2025-06-25T14:44:18.633108Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T14:44:18.634214Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:18.634295Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:18.647344Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:44:18.791323Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:18.791351Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:18.791364Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:18.791464Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3601 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:19.112023Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:19.240718Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:44:19.292977Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateKesus, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_kesus.cpp:30) 2025-06-25T14:44:22.496453Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519897873128193605:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:22.496526Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001826/r3tmp/tmpURyNXW/pdisk_1.dat 2025-06-25T14:44:22.661989Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:22.681757Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:22.681856Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:22.686992Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 26107, node 4 2025-06-25T14:44:22.793294Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:22.793319Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:22.793326Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:22.793467Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24451 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:23.098865Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:23.112123Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:44:23.189987Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateKesus, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_kesus.cpp:30) 2025-06-25T14:44:23.561735Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:44:26.895798Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519897891591910691:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:26.895891Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001826/r3tmp/tmpySnD3A/pdisk_1.dat 2025-06-25T14:44:27.052289Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:27.073462Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:27.073546Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:27.077385Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 26275, node 7 2025-06-25T14:44:27.243515Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:27.243530Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:27.243535Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:27.243651Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2192 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:27.506690Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:27.665302Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateKesus, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_kesus.cpp:30) 2025-06-25T14:44:27.903059Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:44:31.308297Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7519897915130178011:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:31.308399Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001826/r3tmp/tmp1Qllga/pdisk_1.dat 2025-06-25T14:44:31.418488Z node 10 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:31.443621Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:31.443710Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:31.450315Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 26050, node 10 2025-06-25T14:44:31.512921Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:31.512940Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:31.512945Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:31.513045Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28299 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:31.777730Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:31.792987Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:44:31.894943Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateKesus, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_kesus.cpp:30) 2025-06-25T14:44:35.764392Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7519897931931528889:2076];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:35.764501Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001826/r3tmp/tmpc1uQW6/pdisk_1.dat 2025-06-25T14:44:35.934927Z node 13 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:35.955417Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:35.955499Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:35.965740Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:44:35.984806Z node 13 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 13 Type# 268639257 TServer::EnableGrpc on GrpcPort 21064, node 13 2025-06-25T14:44:36.025560Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:36.025582Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:36.025590Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:36.025720Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:18841 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:36.295382Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:36.375551Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateKesus, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_kesus.cpp:30) >> YdbTableBulkUpsertOlap::ParquetImportBug [GOOD] >> YdbTableBulkUpsertOlap::ParquetImportBug_Datashard >> TRegisterNodeOverDiscoveryService::ServerWithCertVerification_ClientWithCorrectCerts [GOOD] >> TRegisterNodeOverDiscoveryService::ServerWithCertVerification_ClientProvidesEmptyClientCerts >> YdbYqlClient::TestDoubleKey [GOOD] >> YdbYqlClient::TestMultipleModifications >> TTableProfileTests::OverwriteExecutionPolicy [GOOD] >> TTableProfileTests::OverwritePartitioningPolicy >> TGRpcYdbTest::DropTableBadRequest >> TxUsage::Sinks_Olap_WriteToTopicAndTable_2_Query >> TRegisterNodeOverDiscoveryService::ServerWithoutCertVerification_ClientProvidesCorrectCerts >> YdbYqlClient::TestVariant [GOOD] >> YdbYqlClient::TestTransactionQueryError ------- [TM] {asan, default-linux-x86_64, release} ydb/core/viewer/ut/unittest >> Viewer::JsonStorageListingV1PDiskIdFilter 2025-06-25 14:44:39,750 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper execution timed out 2025-06-25 14:44:40,109 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper has overrun 600 secs timeout. Process tree before termination: pid rss ref pdirt 196784 47.4M 46.5M 23.9M test_tool run_ut @/home/runner/.ya/build/build_root/yft8/0007bb/ydb/core/viewer/ut/test-results/unittest/testing_out_stuff/chunk4/testing_out_stuff/test_tool.args 198777 3.9G 3.8G 3.9G └─ ydb-core-viewer-ut --trace-path-append /home/runner/.ya/build/build_root/yft8/0007bb/ydb/core/viewer/ut/test-results/unittest/testing_out_stuff/chunk4/ytest.report.trace Test command err: 2025-06-25T14:34:58.674517Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:121:2167], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:34:58.674894Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:34:58.675137Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-25T14:34:59.041031Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 11240, node 1 TClient is connected to server localhost:14294 2025-06-25T14:36:07.772955Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:2812:2396], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:36:07.774638Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:36:07.775512Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:36:07.775713Z node 5 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [5:2255:2339], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:36:07.775896Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [6:2258:2339], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:36:07.776092Z node 8 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [8:2264:2339], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:36:07.830185Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:36:07.830312Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:36:07.830638Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:36:07.830685Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:36:07.831057Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:36:07.831120Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:36:07.831931Z node 10 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [10:2270:2339], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:36:07.832999Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [7:2261:2339], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:36:07.834097Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:36:07.834911Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:36:07.835021Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:36:07.835470Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:36:07.836418Z node 9 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [9:2267:2339], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:36:07.837650Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:36:07.837973Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:36:07.840164Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [3:2806:2339], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:36:07.841827Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [4:1276:2184], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:36:07.842054Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:36:07.842579Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:36:07.843035Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:36:07.843080Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-25T14:36:08.561914Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:36:08.804223Z node 2 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:106} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2025-06-25T14:36:08.878920Z node 2 :BS_PDISK WARN: {BSP01@blobstorage_pdisk_actor.cpp:436} Magic sector is present on disk, now going to format device PDiskId# 1000 2025-06-25T14:36:10.648800Z node 2 :BS_PDISK WARN: {BSP01@blobstorage_pdisk_actor.cpp:374} Device formatting done PDiskId# 1000 TServer::EnableGrpc on GrpcPort 9801, node 2 TClient is connected to server localhost:61974 2025-06-25T14:36:12.107907Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:36:12.107992Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:36:12.108053Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:36:12.117757Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:38:02.448738Z node 11 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [11:2631:2396], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:38:02.451412Z node 11 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:38:02.452005Z node 18 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [18:2798:2339], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:38:02.453285Z node 12 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [12:2592:2339], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:38:02.453460Z node 11 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:38:02.454363Z node 16 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [16:2792:2339], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:38:02.454490Z node 18 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:38:02.454568Z node 18 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:38:02.454721Z node 13 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [13:2788:2339], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:38:02.455606Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:38:02.455952Z node 16 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:38:02.456368Z node 12 :METADATA_PROVIDER ERROR: log. ... OR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:40:04.791177Z node 23 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:40:04.791340Z node 24 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [24:2269:2339], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:40:04.792135Z node 24 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:40:04.792709Z node 24 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-25T14:40:05.230249Z node 20 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:40:05.430203Z node 20 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:106} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2025-06-25T14:40:05.466823Z node 20 :BS_PDISK WARN: {BSP01@blobstorage_pdisk_actor.cpp:436} Magic sector is present on disk, now going to format device PDiskId# 1000 2025-06-25T14:40:06.287261Z node 20 :BS_PDISK WARN: {BSP01@blobstorage_pdisk_actor.cpp:374} Device formatting done PDiskId# 1000 TServer::EnableGrpc on GrpcPort 18543, node 20 TClient is connected to server localhost:5879 2025-06-25T14:40:06.862347Z node 20 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:40:06.862438Z node 20 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:40:06.862506Z node 20 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:40:06.863337Z node 20 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:42:24.863899Z node 29 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [29:2810:2396], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:42:24.864910Z node 29 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:42:24.865627Z node 29 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:42:24.867008Z node 30 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [30:2804:2339], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:42:24.867694Z node 31 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [31:2813:2339], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:42:24.867850Z node 32 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [32:2817:2342], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:42:24.868089Z node 36 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [36:1473:2339], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:42:24.868218Z node 37 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [37:1476:2339], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:42:24.868850Z node 30 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:42:24.869155Z node 34 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [34:2823:2342], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:42:24.869891Z node 30 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:42:24.869942Z node 31 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:42:24.870016Z node 32 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:42:24.870190Z node 36 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:42:24.870239Z node 37 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:42:24.870539Z node 36 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:42:24.870576Z node 37 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:42:24.870869Z node 31 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:42:24.870955Z node 32 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:42:24.871020Z node 34 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:42:24.871354Z node 34 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:42:24.871780Z node 35 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [35:2826:2340], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:42:24.872325Z node 35 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:42:24.873033Z node 33 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [33:2820:2342], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:42:24.873127Z node 35 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:42:24.874128Z node 33 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:42:24.874178Z node 33 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-25T14:42:25.366077Z node 29 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:42:25.573724Z node 29 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:106} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2025-06-25T14:42:25.603398Z node 29 :BS_PDISK WARN: {BSP01@blobstorage_pdisk_actor.cpp:436} Magic sector is present on disk, now going to format device PDiskId# 1000 2025-06-25T14:42:26.254102Z node 29 :BS_PDISK WARN: {BSP01@blobstorage_pdisk_actor.cpp:374} Device formatting done PDiskId# 1000 TServer::EnableGrpc on GrpcPort 31385, node 29 TClient is connected to server localhost:10105 2025-06-25T14:42:26.833405Z node 29 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:42:26.833497Z node 29 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:42:26.833570Z node 29 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:42:26.833971Z node 29 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration Traceback (most recent call last): File "library/python/testing/yatest_common/yatest/common/process.py", line 384, in wait wait_for( File "library/python/testing/yatest_common/yatest/common/process.py", line 765, in wait_for raise TimeoutError(truncate(message, MAX_MESSAGE_LEN)) yatest.common.process.TimeoutError: 600 second(s) wait timeout has expired: Command '['/home/runner/.ya/tools/v4/9029509511/test_tool', 'run_ut', '@/home/runner/.ya/build/build_root/yft8/0007bb/ydb/core/viewer/ut/test-results/unittest/testing_out_stuff/chunk4/testing_out_stuff/test_tool.args']' stopped by 600 seconds timeout During handling of the above exception, another exception occurred: Traceback (most recent call last): File "devtools/ya/test/programs/test_tool/run_test/run_test.py", line 1738, in main res.wait(check_exit_code=False, timeout=run_timeout, on_timeout=timeout_callback) File "library/python/testing/yatest_common/yatest/common/process.py", line 398, in wait raise ExecutionTimeoutError(self, str(e)) yatest.common.process.ExecutionTimeoutError: (("600 second(s) wait timeout has expired: Command '['/home/runner/.ya/tools/v4/9029509511/test_tool', 'run_ut', '@/home/runner/.ya/build/build_root/yft8/0007bb/ydb/core/viewer/ut/test-results/unittest/testing_out_stuff/chunk4/testing_out_stuff/test_tool.args']' stopped by 600 seconds timeout",), {}) >> YdbQueryService::TestCreateDropAttachSession [GOOD] >> YdbQueryService::TestCreateAttachAndDropAttachedSession >> TGRpcNewClient::TestAuth [GOOD] >> TGRpcNewClient::YqlQueryWithParams |85.0%| [TA] $(B)/ydb/core/viewer/ut/test-results/unittest/{meta.json ... results_accumulator.log} |85.0%| [TA] {RESULT} $(B)/ydb/core/viewer/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> YdbLogStore::LogStoreNegative [GOOD] >> YdbLogStore::Dirs >> YdbYqlClient::CreateAndAltertTableWithCompactionPolicy [GOOD] >> YdbYqlClient::CreateAndAltertTableWithKeyBloomFilter >> YdbS3Internal::TestAccessCheck [GOOD] >> YdbS3Internal::BadRequests >> YdbTableBulkUpsertOlap::UpsertCSV_DataShard [GOOD] >> YdbTableBulkUpsertOlap::UpsertMixed >> TGRpcNewClient::CreateAlterUpsertDrop [GOOD] >> TGRpcNewClient::InMemoryTables >> YdbTableBulkUpsert::Simple [GOOD] >> YdbTableBulkUpsert::SyncIndexShouldSucceed >> YdbYqlClient::ColumnFamiliesWithStorageAndIndex [GOOD] >> YdbYqlClient::ColumnFamiliesDescriptionWithStorageAndIndex >> YdbImport::Simple [GOOD] >> YdbIndexTable::AlterIndexImplBySuperUser >> TxUsage::WriteToTopic_Demo_15_Query [GOOD] >> TYqlDecimalTests::NegativeValues [GOOD] >> TYqlDecimalTests::DecimalKey >> TAuthenticationWithSqlExecution::CreateAlterUserWithHash [GOOD] >> TDatabaseQuotas::DisableWritesToDatabase >> TGRpcAuthentication::ValidCredentials >> TxUsage::WriteToTopic_Demo_16_Table >> TGRpcYdbTest::DropTableBadRequest [GOOD] >> TGRpcYdbTest::CreateTableWithIndex >> YdbTableBulkUpsertOlap::ParquetImportBug_Datashard [GOOD] >> YdbTableBulkUpsertOlap::UpsertArrowBatch_DataShard >> YdbYqlClient::TestMultipleModifications [GOOD] >> YdbYqlClient::TestDescribeTableWithShardStats >> TxUsage::WriteToTopic_Demo_23_RestartNo_Query [GOOD] >> TxUsage::WriteToTopic_Demo_19_RestartAfterCommit_Query [GOOD] >> YdbYqlClient::TestTransactionQueryError [GOOD] >> YdbYqlClient::TestReadWrongTable >> YdbQueryService::TestCreateAttachAndDropAttachedSession [GOOD] >> TxUsage::WriteToTopic_Demo_23_RestartBeforeCommit_Table >> TGRpcNewClient::YqlQueryWithParams [GOOD] >> TGRpcNewClient::YqlExplainDataQuery >> TxUsage::WriteToTopic_Demo_20_RestartNo_Query >> YdbS3Internal::BadRequests [GOOD] >> YdbScripting::BasicV0 >> YdbTableBulkUpsert::SyncIndexShouldSucceed [GOOD] >> YdbTableBulkUpsert::Overload >> YdbYqlClient::CreateAndAltertTableWithKeyBloomFilter [GOOD] >> TxUsage::WriteToTopic_Demo_29_Query [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> YdbQueryService::TestCreateAttachAndDropAttachedSession [GOOD] Test command err: 2025-06-25T14:44:26.207904Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897892164424990:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:26.207964Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001819/r3tmp/tmpJNPhcd/pdisk_1.dat 2025-06-25T14:44:26.573114Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:26.620839Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:26.620916Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:26.629101Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 9041, node 1 2025-06-25T14:44:26.713179Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:26.713201Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:26.713206Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:26.713319Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:7958 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:27.079188Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:27.213056Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:44:28.985676Z node 1 :KQP_PROXY WARN: kqp_proxy_service.cpp:1578: Failed to parse session id: unknownSesson 2025-06-25T14:44:30.385164Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519897910552437096:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:30.385221Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001819/r3tmp/tmpVOqPIF/pdisk_1.dat 2025-06-25T14:44:30.575558Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 7007, node 4 2025-06-25T14:44:30.697067Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:30.697088Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:30.697098Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:30.697260Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:44:30.702057Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:30.702157Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:30.707352Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:7680 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-25T14:44:30.940929Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:44:31.407692Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:44:34.961098Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519897927672930701:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:34.961162Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001819/r3tmp/tmpF2Tt4k/pdisk_1.dat 2025-06-25T14:44:35.082733Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:35.097738Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:35.097817Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:35.103638Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 24849, node 7 2025-06-25T14:44:35.153372Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:35.153393Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:35.153402Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:35.153536Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23833 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:35.396628Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:35.984561Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:44:39.867800Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7519897949794944482:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:39.867894Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001819/r3tmp/tmp9a1epr/pdisk_1.dat 2025-06-25T14:44:40.231545Z node 10 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 28460, node 10 2025-06-25T14:44:40.285009Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:40.285111Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:40.299829Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:44:40.366840Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:40.366866Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:40.366875Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:40.367005Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23508 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:40.645834Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:40.943670Z node 10 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:44:44.818621Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7519897969975802540:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:44.818689Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001819/r3tmp/tmposbGqb/pdisk_1.dat 2025-06-25T14:44:44.955913Z node 13 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:44.972978Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:44.973064Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:44.980095Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 20147, node 13 2025-06-25T14:44:45.081160Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:45.081184Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:45.081194Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:45.081328Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11026 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:45.333572Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:45.348946Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:44:45.845913Z node 13 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:44:47.967853Z node 13 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1692: Updated YQL logs priority to current level: 4 2025-06-25T14:44:47.970599Z node 13 :KQP_PROXY INFO: kqp_proxy_service.cpp:454: Cannot start publishing usage, tenants: /Root, empty 2025-06-25T14:44:47.979881Z node 13 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1489: TraceId: "01jykrrs7b73t8ht95g0nse4nh", Request has 18444993211021.571763s seconds to be completed 2025-06-25T14:44:47.984377Z node 13 :KQP_SESSION DEBUG: kqp_session_actor.cpp:223: SessionId: ydb://session/3?node_id=13&id=NDQ4Y2IyZWItNmQ3ZWE4OWItM2NlYjdjY2MtMjA5YmQwZmI=, ActorId: [0:0:0], ActorState: unknown state, Create session actor with id NDQ4Y2IyZWItNmQ3ZWE4OWItM2NlYjdjY2MtMjA5YmQwZmI= 2025-06-25T14:44:47.984458Z node 13 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1564: TraceId: "01jykrrs7b73t8ht95g0nse4nh", Created new session, sessionId: ydb://session/3?node_id=13&id=NDQ4Y2IyZWItNmQ3ZWE4OWItM2NlYjdjY2MtMjA5YmQwZmI=, workerId: [13:7519897982860705433:2293], database: , longSession: 1, local sessions count: 1 2025-06-25T14:44:47.984501Z node 13 :KQP_PROXY INFO: kqp_proxy_service.cpp:454: Cannot start publishing usage, tenants: /Root, empty 2025-06-25T14:44:47.984708Z node 13 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:663: Received create session request, trace_id: 01jykrrs7b73t8ht95g0nse4nh 2025-06-25T14:44:47.984761Z node 13 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:524: Subscribed for config changes. 2025-06-25T14:44:47.984787Z node 13 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:531: Updated table service config. 2025-06-25T14:44:47.984807Z node 13 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1692: Updated YQL logs priority to current level: 4 2025-06-25T14:44:47.984852Z node 13 :KQP_PROXY INFO: kqp_proxy_service.cpp:454: Cannot start publishing usage, tenants: /Root, empty 2025-06-25T14:44:47.984932Z node 13 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-25T14:44:47.985015Z node 13 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-25T14:44:47.985670Z node 13 :KQP_SESSION DEBUG: kqp_session_actor.cpp:227: SessionId: ydb://session/3?node_id=13&id=NDQ4Y2IyZWItNmQ3ZWE4OWItM2NlYjdjY2MtMjA5YmQwZmI=, ActorId: [13:7519897982860705433:2293], ActorState: unknown state, session actor bootstrapped 2025-06-25T14:44:47.990754Z node 13 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-25T14:44:47.990817Z node 13 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-25T14:44:47.990840Z node 13 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-25T14:44:48.002707Z node 13 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:856: Received ping session request, has local session: ydb://session/3?node_id=13&id=NDQ4Y2IyZWItNmQ3ZWE4OWItM2NlYjdjY2MtMjA5YmQwZmI=, rpc ctrl: [13:7519897987155672731:2294], sameNode: 1, trace_id: 2025-06-25T14:44:48.002745Z node 13 :KQP_PROXY TRACE: kqp_proxy_service.cpp:878: Attach local session: [13:7519897982860705433:2293] to rpc: [13:7519897987155672731:2294] on same node 2025-06-25T14:44:48.014398Z node 13 :KQP_SESSION INFO: kqp_session_actor.cpp:2370: SessionId: ydb://session/3?node_id=13&id=NDQ4Y2IyZWItNmQ3ZWE4OWItM2NlYjdjY2MtMjA5YmQwZmI=, ActorId: [13:7519897982860705433:2293], ActorState: ReadyState, Session closed due to explicit close event 2025-06-25T14:44:48.014461Z node 13 :KQP_SESSION INFO: kqp_session_actor.cpp:2528: SessionId: ydb://session/3?node_id=13&id=NDQ4Y2IyZWItNmQ3ZWE4OWItM2NlYjdjY2MtMjA5YmQwZmI=, ActorId: [13:7519897982860705433:2293], ActorState: ReadyState, Cleanup start, isFinal: 1 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-06-25T14:44:48.014503Z node 13 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2589: SessionId: ydb://session/3?node_id=13&id=NDQ4Y2IyZWItNmQ3ZWE4OWItM2NlYjdjY2MtMjA5YmQwZmI=, ActorId: [13:7519897982860705433:2293], ActorState: ReadyState, EndCleanup, isFinal: 1 2025-06-25T14:44:48.014530Z node 13 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2601: SessionId: ydb://session/3?node_id=13&id=NDQ4Y2IyZWItNmQ3ZWE4OWItM2NlYjdjY2MtMjA5YmQwZmI=, ActorId: [13:7519897982860705433:2293], ActorState: unknown state, Cleanup temp tables: 0 2025-06-25T14:44:48.014643Z node 13 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2692: SessionId: ydb://session/3?node_id=13&id=NDQ4Y2IyZWItNmQ3ZWE4OWItM2NlYjdjY2MtMjA5YmQwZmI=, ActorId: [13:7519897982860705433:2293], ActorState: unknown state, Session actor destroyed 2025-06-25T14:44:48.014732Z node 13 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1374: Session closed, sessionId: ydb://session/3?node_id=13&id=NDQ4Y2IyZWItNmQ3ZWE4OWItM2NlYjdjY2MtMjA5YmQwZmI=, workerId: [13:7519897982860705433:2293], local sessions count: 0 2025-06-25T14:44:48.022523Z node 13 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:902: Received ping session request, request_id: 3, sender: [13:7519897987155672734:2296], trace_id: 2025-06-25T14:44:48.022672Z node 13 :KQP_PROXY NOTICE: kqp_proxy_service.cpp:1585: Session not found: ydb://session/3?node_id=13&id=NDQ4Y2IyZWItNmQ3ZWE4OWItM2NlYjdjY2MtMjA5YmQwZmI= 2025-06-25T14:44:48.022764Z node 13 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:974: Forwarded response to sender actor, requestId: 3, sender: [13:7519897987155672734:2296], selfId: [13:7519897969975802654:2149], source: [13:7519897969975802654:2149] >> TTableProfileTests::OverwritePartitioningPolicy [GOOD] >> TTableProfileTests::OverwriteStoragePolicy >> YdbLogStore::Dirs [GOOD] >> YdbLogStore::LogTable >> YdbIndexTable::AlterIndexImplBySuperUser [GOOD] >> YdbIndexTable::CreateTableAddIndex >> TRegisterNodeOverDiscoveryService::ServerWithCertVerification_ClientProvidesEmptyClientCerts [GOOD] >> TRegisterNodeOverDiscoveryService::ServerWithCertVerification_ClientProvidesServerCerts >> TGRpcNewClient::InMemoryTables [GOOD] >> TxUsage::WriteToTopic_Demo_50_Table [GOOD] >> TRegisterNodeOverDiscoveryService::ServerWithOutCertVerification_ClientProvidesExpiredCert [GOOD] >> TRegisterNodeOverDiscoveryService::ServerWithoutCertVerification_ClientDoesNotProvideClientCerts >> YdbTableBulkUpsertOlap::UpsertMixed [GOOD] >> YdbYqlClient::AlterTableAddIndex ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> YdbYqlClient::CreateAndAltertTableWithKeyBloomFilter [GOOD] Test command err: 2025-06-25T14:44:23.410351Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897878762248920:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:23.410497Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001822/r3tmp/tmpHUVz2W/pdisk_1.dat 2025-06-25T14:44:23.838247Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:23.851742Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:23.851840Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:23.879848Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 28312, node 1 2025-06-25T14:44:24.113337Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:24.113367Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:24.113381Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:24.113602Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19446 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:24.417082Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:44:24.420116Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... 2025-06-25T14:44:24.479005Z node 1 :GRPC_PROXY_NO_CONNECT_ACCESS DEBUG: grpc_request_check_actor.h:542: Skip check permission connect db, AllowYdbRequestsWithoutDatabase is off, there is no db provided from user, database: /Root, user: root@builtin, from ip: ipv6:[::1]:36014 Call 2025-06-25T14:44:24.498538Z node 1 :GRPC_PROXY_NO_CONNECT_ACCESS DEBUG: grpc_request_check_actor.h:542: Skip check permission connect db, AllowYdbRequestsWithoutDatabase is off, there is no db provided from user, database: /Root, user: root@builtin, from ip: ipv6:[::1]:36026 2025-06-25T14:44:26.468495Z node 1 :GRPC_PROXY_NO_CONNECT_ACCESS DEBUG: grpc_request_check_actor.h:542: Skip check permission connect db, AllowYdbRequestsWithoutDatabase is off, there is no db provided from user, database: /Root, user: root@builtin, from ip: ipv6:[::1]:34854 Call Call 2025-06-25T14:44:26.545293Z node 1 :GRPC_PROXY_NO_CONNECT_ACCESS DEBUG: grpc_request_check_actor.h:578: Skip check permission connect db, user is a admin, database: /Root, user: root@builtin, from ip: ipv6:[::1]:34890 2025-06-25T14:44:26.555200Z node 1 :GRPC_PROXY_NO_CONNECT_ACCESS DEBUG: grpc_request_check_actor.h:578: Skip check permission connect db, user is a admin, database: /Root, user: root@builtin, from ip: ipv6:[::1]:34904 2025-06-25T14:44:26.556530Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:44:28.130021Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519897899700012135:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:28.130161Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001822/r3tmp/tmpNxqVbn/pdisk_1.dat 2025-06-25T14:44:28.299296Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:28.313482Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:28.313563Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:28.319032Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 26941, node 4 2025-06-25T14:44:28.394142Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:28.394159Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:28.394166Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:28.394318Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:62270 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:28.685998Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:29.151913Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:44:32.526654Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519897917645525397:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:32.526691Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001822/r3tmp/tmpEaXffA/pdisk_1.dat 2025-06-25T14:44:32.707246Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 3625, node 7 2025-06-25T14:44:32.875767Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:32.875846Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:32.898143Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:44:32.912586Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:32.912610Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:32.912618Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:32.912766Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6500 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 Pa... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:33.185644Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:33.533499Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_ ... 1750862677340, transactions count in step: 1, at schemeshard: 72057594046644480 2025-06-25T14:44:37.299651Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:1105: All parts have reached barrier, tx: 281474976715688, done: 0, blocked: 1 2025-06-25T14:44:37.304167Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976715688:0 2025-06-25T14:44:37.318938Z node 7 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 7, TabletId: 72075186224037893 not found 2025-06-25T14:44:37.319590Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046644480 2025-06-25T14:44:39.041169Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7519897950312687750:2076];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:39.041205Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001822/r3tmp/tmpwQg6ti/pdisk_1.dat 2025-06-25T14:44:39.238404Z node 10 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:39.262278Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:39.262365Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:39.270555Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:44:39.285078Z node 10 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 10 Type# 268639257 TServer::EnableGrpc on GrpcPort 5487, node 10 2025-06-25T14:44:39.421279Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:39.421299Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:39.421307Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:39.421436Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17546 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:39.866025Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:40.064435Z node 10 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:17546 2025-06-25T14:44:42.582426Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) TClient is connected to server localhost:17546 TClient::Ls request: Root/Test TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Test" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1750862682709 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Test" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyCo... (TRUNCATED) 2025-06-25T14:44:42.973952Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:171) TClient is connected to server localhost:17546 TClient::Ls request: Root/Test TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Test" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1750862682709 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Test" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyCo... (TRUNCATED) 2025-06-25T14:44:45.144732Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7519897974707738918:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:45.144789Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001822/r3tmp/tmpaQbF3k/pdisk_1.dat 2025-06-25T14:44:45.382916Z node 13 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:45.441236Z node 13 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 13 Type# 268639257 TServer::EnableGrpc on GrpcPort 2072, node 13 2025-06-25T14:44:45.491212Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:45.491309Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:45.526297Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:44:45.541385Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:45.541408Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:45.541417Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:45.541560Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8770 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:45.832384Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:46.166407Z node 13 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:44:48.592741Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:44:48.734103Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:171) >> TGRpcAuthentication::ValidCredentials [GOOD] >> TxUsage::WriteToTopic_Demo_30_Table >> TGRpcAuthentication::NoConnectRights ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> TGRpcNewClient::InMemoryTables [GOOD] Test command err: 2025-06-25T14:44:27.153087Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897897117549953:2076];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:27.155399Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00180f/r3tmp/tmpo8JPR0/pdisk_1.dat 2025-06-25T14:44:27.593129Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:27.631941Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:27.632037Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:27.632662Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 24253, node 1 2025-06-25T14:44:27.685297Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:44:27.735055Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:27.735076Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:27.735096Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:27.735201Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17485 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:28.115968Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:28.172532Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:44:31.298878Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519897912915358419:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:31.298975Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00180f/r3tmp/tmpYkUc46/pdisk_1.dat 2025-06-25T14:44:31.438814Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:31.458884Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:31.458944Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:31.463226Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 10496, node 4 2025-06-25T14:44:31.551092Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:31.551119Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:31.551127Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:31.551275Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10388 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:31.838117Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:35.620669Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519897929362743370:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:35.620725Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00180f/r3tmp/tmpO10pwO/pdisk_1.dat 2025-06-25T14:44:35.723280Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:35.741775Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:35.741853Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:35.745434Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 27756, node 7 2025-06-25T14:44:35.802710Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:35.802730Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:35.802737Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:35.802875Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:63368 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:36.031531Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:36.629756Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:44:38.682492Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:44:38.861276Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519897942247646395:2304], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:38.861354Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:38.861430Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519897942247646407:2307], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:38.872542Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /ho ... s: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:44:41.040862Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7519897956499968756:2088];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:41.046228Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00180f/r3tmp/tmp4qCMzm/pdisk_1.dat 2025-06-25T14:44:41.178623Z node 10 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:41.192783Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:41.192862Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:41.203925Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:44:41.219792Z node 10 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 10 Type# 268639257 TServer::EnableGrpc on GrpcPort 18328, node 10 2025-06-25T14:44:41.268837Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:41.268859Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:41.268867Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:41.269002Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:32668 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 Pa... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:41.607839Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:42.057077Z node 10 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:44:43.902075Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:44:44.008245Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:171) 2025-06-25T14:44:44.040244Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7519897969384871867:2319], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:44.040319Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7519897969384871859:2316], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:44.040423Z node 10 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:44.044108Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715661:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:44:44.066548Z node 10 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [10:7519897969384871873:2320], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715661 completed, doublechecking } 2025-06-25T14:44:44.150939Z node 10 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [10:7519897969384871946:2882] txid# 281474976715662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:44:44.235957Z node 10 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715663. Ctx: { TraceId: 01jykrrqw6evtg1zyqg3t6853p, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=10&id=NjU1ZGQ0ZTAtZDJkZGE1YzktNTM2NWZiOTgtNzM0OTFlMGQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:44:44.365504Z node 10 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 10, TabletId: 72075186224037888 not found 2025-06-25T14:44:45.820871Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7519897975423686239:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:45.829087Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00180f/r3tmp/tmp0GMTlM/pdisk_1.dat 2025-06-25T14:44:45.965891Z node 13 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:45.993579Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:45.993660Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:46.013645Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 19557, node 13 2025-06-25T14:44:46.104953Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:46.104975Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:46.104984Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:46.105126Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3289 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 Pa... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:46.450249Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:46.824451Z node 13 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:44:48.902721Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:44:49.029061Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:171) 2025-06-25T14:44:49.109912Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:171) >> YdbYqlClient::ColumnFamiliesDescriptionWithStorageAndIndex [GOOD] >> YdbYqlClient::ColumnFamiliesExternalBlobsWithoutDefaultProfile >> TxUsage::Write_Random_Sized_Messages_In_Wide_Transactions_Table >> YdbOlapStore::LogLast50 >> TRegisterNodeOverDiscoveryService::ServerWithoutCertVerification_ClientProvidesCorrectCerts [GOOD] >> TRegisterNodeOverDiscoveryService::ServerWithoutCertVerification_ClientProvidesEmptyClientCerts >> TxUsage::SessionAbort_Query [GOOD] >> YdbTableBulkUpsertOlap::UpsertArrowBatch_DataShard [GOOD] >> TYqlDecimalTests::DecimalKey [GOOD] >> YdbYqlClient::TestDescribeTableWithShardStats [GOOD] >> YdbYqlClient::TestExplicitPartitioning >> TGRpcYdbTest::CreateTableWithIndex [GOOD] >> TGRpcYdbTest::CreateYqlSession >> YdbYqlClient::RetryOperationAsync >> TxUsage::Offsets_Cannot_Be_Promoted_When_Reading_In_A_Transaction_Table >> TRegisterNodeOverDiscoveryService::ServerWithCertVerification_ClientProvideIncorrectCerts >> YdbYqlClient::TestReadWrongTable [GOOD] >> KqpBatchDelete::ManyPartitions_2 [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> YdbTableBulkUpsertOlap::UpsertArrowBatch_DataShard [GOOD] Test command err: 2025-06-25T14:44:27.087289Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897897011411409:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:27.088803Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00180e/r3tmp/tmprIjEx3/pdisk_1.dat 2025-06-25T14:44:27.539822Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:27.558593Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:27.558669Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 8244, node 1 2025-06-25T14:44:27.576196Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:44:27.791810Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:27.791850Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:27.791861Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:27.792074Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3653 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-25T14:44:28.092850Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:28.115785Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... TClient is connected to server localhost:3653 2025-06-25T14:44:28.349813Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnStore, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_store.cpp:451) waiting... 2025-06-25T14:44:28.452786Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519897901306379701:2283];fline=columnshard.cpp:99;event=initialize_shard;step=OnActivateExecutor; 2025-06-25T14:44:28.490116Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519897901306379701:2283];fline=columnshard.cpp:117;event=initialize_shard;step=initialize_tiring_finished; 2025-06-25T14:44:28.490313Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 72075186224037889 2025-06-25T14:44:28.498222Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519897901306379701:2283];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:44:28.498473Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519897901306379701:2283];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:44:28.498905Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519897901306379701:2283];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:44:28.499246Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519897901306379701:2283];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:44:28.499368Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519897901306379701:2283];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:44:28.499471Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519897901306379701:2283];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:44:28.499584Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519897901306379701:2283];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:44:28.499706Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519897901306379701:2283];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:44:28.499812Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519897901306379701:2283];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:44:28.499918Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519897901306379701:2283];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:44:28.500040Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519897901306379701:2283];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:44:28.503641Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519897901306379674:2280];fline=columnshard.cpp:99;event=initialize_shard;step=OnActivateExecutor; 2025-06-25T14:44:28.536764Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519897901306379674:2280];fline=columnshard.cpp:117;event=initialize_shard;step=initialize_tiring_finished; 2025-06-25T14:44:28.536907Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 72075186224037888 2025-06-25T14:44:28.542406Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519897901306379674:2280];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:44:28.542470Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519897901306379674:2280];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:44:28.542686Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519897901306379674:2280];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:44:28.542798Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519897901306379674:2280];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:44:28.542933Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519897901306379674:2280];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:44:28.543039Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519897901306379674:2280];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:44:28.543232Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519897901306379674:2280];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:44:28.543350Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519897901306379674:2280];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:44:28.543457Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519897901306379674:2280];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:44:28.543578Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519897901306379674:2280];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:44:28.543676Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519897901306379674:2280];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:44:28.546296Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519897901306379675:2281];fline=columnshard.cpp:99;event=initialize_shard;step=OnActivateExecutor; 2025-06-25T14:44:28.590028Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519897901306379675:2281];fline=columnshard.cpp:117;event=initialize_shard;step=initialize_tiring_finished; 2025-06-25T14:44:28.590150Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 72075186224037890 2025-06-25T14:44:28.595536Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519897901306379675:2281];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; ... eat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:44:50.802512Z node 13 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1750862690808} 2025-06-25T14:44:50.802586Z node 13 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:44:50.802641Z node 13 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:44:50.802669Z node 13 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T14:44:50.802698Z node 13 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-25T14:44:50.802753Z node 13 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1750862690808 : 281474976715658] from 72075186224037888 at tablet 72075186224037888 send result to client [13:7519897983990894906:2218], exec latency: 0 ms, propose latency: 10 ms 2025-06-25T14:44:50.802796Z node 13 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715658 state Ready TxInFly 0 2025-06-25T14:44:50.802833Z node 13 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:44:50.803787Z node 13 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1750862690815 2025-06-25T14:44:50.806136Z node 13 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715658 datashard 72075186224037888 state Ready 2025-06-25T14:44:50.806234Z node 13 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=8984;columns=10; 2025-06-25T14:44:50.829206Z node 13 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [13:7519897996875797598:2739], serverId# [13:7519897996875797599:2740], sessionId# [0:0:0] 2025-06-25T14:44:50.829329Z node 13 :TX_DATASHARD INFO: datashard__op_rows.cpp:26: TTxDirectBase(36) Execute: at tablet# 72075186224037888 2025-06-25T14:44:50.832328Z node 13 :TX_DATASHARD INFO: datashard__op_rows.cpp:80: TTxDirectBase(36) Complete: at tablet# 72075186224037888 2025-06-25T14:44:50.832371Z node 13 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 SUCCESS Upsert done: 0.025574s 2025-06-25T14:44:50.849796Z node 13 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [13:7519897996875797607:2306], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:50.849878Z node 13 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:50.850136Z node 13 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [13:7519897996875797619:2309], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:50.853528Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:44:50.864030Z node 13 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T14:44:50.872259Z node 13 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T14:44:50.877095Z node 13 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [13:7519897996875797621:2310], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-25T14:44:50.949882Z node 13 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [13:7519897996875797695:2803] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:44:51.054541Z node 13 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T14:44:51.054642Z node 13 :TX_DATASHARD DEBUG: check_snapshot_tx_unit.cpp:153: Prepared Snapshot transaction txId 281474976715661 at tablet 72075186224037888 2025-06-25T14:44:51.059040Z node 13 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-25T14:44:51.061380Z node 13 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715661 at step 1750862691109 at tablet 72075186224037888 { Transactions { TxId: 281474976715661 AckTo { RawX1: 0 RawX2: 0 } } Step: 1750862691109 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-06-25T14:44:51.061400Z node 13 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:44:51.061519Z node 13 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:44:51.061533Z node 13 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-06-25T14:44:51.061551Z node 13 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1750862691109:281474976715661] in PlanQueue unit at 72075186224037888 2025-06-25T14:44:51.061673Z node 13 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1750862691109:281474976715661 keys extracted: 0 2025-06-25T14:44:51.061944Z node 13 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:44:51.065190Z node 13 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1750862691109} 2025-06-25T14:44:51.065228Z node 13 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:44:51.065261Z node 13 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1750862691109 : 281474976715661] from 72075186224037888 at tablet 72075186224037888 send result to client [13:7519898001170765034:2818], exec latency: 0 ms, propose latency: 3 ms 2025-06-25T14:44:51.065277Z node 13 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:44:51.066917Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jykrrygw1sjymms5z4n924m5, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=OGZmYjY2ZGUtOWFlMmYyYmEtMmZkYWU2ZTktOTY5OGI3OTY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:44:51.070316Z node 13 :TX_DATASHARD INFO: datashard__kqp_scan.cpp:214: Start scan, at: [13:7519898001170765062:2128], tablet: [13:7519897996875797498:2300], scanId: 4, table: /Root/LogsX, gen: 1, deadline: 2025-06-25T14:54:51.069649Z 2025-06-25T14:44:51.070410Z node 13 :TX_DATASHARD DEBUG: datashard__kqp_scan.cpp:109: Got ScanDataAck, at: [13:7519898001170765062:2128], scanId: 4, table: /Root/LogsX, gen: 1, tablet: [13:7519897996875797498:2300], freeSpace: 8388608;limits:(bytes=0;chunks=0); 2025-06-25T14:44:51.070434Z node 13 :TX_DATASHARD DEBUG: datashard__kqp_scan.cpp:124: Wakeup driver at: [13:7519898001170765062:2128] 2025-06-25T14:44:51.072080Z node 13 :TX_DATASHARD DEBUG: datashard__kqp_scan.cpp:311: Range 0 of 1 exhausted: try next one. table: /Root/LogsX range: [(Utf8 : NULL, Timestamp : NULL) ; ()) next range: 2025-06-25T14:44:51.072096Z node 13 :TX_DATASHARD DEBUG: datashard__kqp_scan.cpp:226: TableRanges is over, at: [13:7519898001170765062:2128], scanId: 4, table: /Root/LogsX 2025-06-25T14:44:51.072125Z node 13 :TX_DATASHARD DEBUG: datashard__kqp_scan.cpp:340: Finish scan, at: [13:7519898001170765062:2128], scanId: 4, table: /Root/LogsX, reason: Done, abortEvent: 2025-06-25T14:44:51.072156Z node 13 :TX_DATASHARD DEBUG: datashard__kqp_scan.cpp:465: Send ScanData, from: [13:7519898001170765062:2128], to: [13:7519898001170765057:2319], scanId: 4, table: /Root/LogsX, bytes: 11000, rows: 100, page faults: 0, finished: 1, pageFault: 0 2025-06-25T14:44:51.072475Z node 13 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037888 2025-06-25T14:44:51.072542Z node 13 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:44:51.072556Z node 13 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:44:51.072572Z node 13 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-25T14:44:51.072607Z node 13 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:44:51.082179Z node 13 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750862691109, txId: 281474976715661] shutting down 2025-06-25T14:44:51.593804Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715663. Ctx: { TraceId: 01jykrryrz1933q3vnrx29vxdp, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=YWYzNjU3NDEtYjQxMWJkMWItZmNmNDg1NjAtYTUzZTIzNjA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root SUCCESS count returned 100 rows Negative (wrong format): BAD_REQUEST Negative (wrong data): SCHEME_ERROR FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=8016;columns=9; 2025-06-25T14:44:51.707176Z node 13 :ARROW_HELPER ERROR: log.cpp:784: fline=arrow_helpers.cpp:142;event=cannot_parse;message=Invalid: Ran out of field metadata, likely malformed;schema_columns_count=10;schema_columns=timestamp,resource_type,resource_id,uid,level,message,json_payload,ingested_at,saved_at,request_id; Negative (less columns): BAD_REQUEST FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=8984;columns=10; 2025-06-25T14:44:51.721437Z node 13 :ARROW_HELPER ERROR: log.cpp:784: fline=arrow_helpers.cpp:142;event=cannot_parse;message=Serialization error: batch is not valid: Invalid: Offsets buffer size (bytes): 400 isn't large enough for length: 100;schema_columns_count=10;schema_columns=timestamp,resource_type,resource_id,uid,level,message,json_payload,ingested_at,saved_at,request_id; Negative (reordered columns): BAD_REQUEST ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> TYqlDecimalTests::DecimalKey [GOOD] Test command err: 2025-06-25T14:44:24.832326Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897882807034486:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:24.832383Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00181a/r3tmp/tmpBGIoed/pdisk_1.dat 2025-06-25T14:44:25.211309Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:25.254213Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 9963, node 1 2025-06-25T14:44:25.279589Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:25.279841Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:25.330326Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:44:25.363720Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:25.363746Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:25.363755Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:25.363889Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:29263 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:25.666011Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:25.859929Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:44:27.787145Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:44:29.703699Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519897904180977188:2085];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:29.779474Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00181a/r3tmp/tmpXac4Vs/pdisk_1.dat 2025-06-25T14:44:29.913248Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:29.932387Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:29.932478Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:29.938491Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:44:29.948521Z node 4 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 4 Type# 268639257 TServer::EnableGrpc on GrpcPort 63680, node 4 2025-06-25T14:44:29.990821Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:29.990846Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:29.990854Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:29.991014Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27882 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:30.245415Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:30.704435Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:44:32.615627Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:44:34.440939Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519897927734509132:2183];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:34.449535Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00181a/r3tmp/tmp7WRtoi/pdisk_1.dat 2025-06-25T14:44:34.644088Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:34.658825Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:34.658914Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:34.665575Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 9170, node 7 2025-06-25T14:44:34.678970Z node 7 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 7 Type# 268639257 2025-06-25T14:44:34.749408Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:34.749428Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:34.749436Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:34.749592Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26695 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:35.021032Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:35.452823Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:44:37.641671Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table ... it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:44:44.235517Z node 10 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710661. Ctx: { TraceId: 01jykrrqwx3r50zj68rhqmymnp, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=10&id=ZWJkNWFhNDctMWU1YTNjNmItZjYyMTI1NWYtNzczMzUyNWE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:44:44.340142Z node 10 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710662. Ctx: { TraceId: 01jykrrr2m1atg3gmgp7j97k1v, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=10&id=ZWJkNWFhNDctMWU1YTNjNmItZjYyMTI1NWYtNzczMzUyNWE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:44:44.477922Z node 10 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710663. Ctx: { TraceId: 01jykrrr5se3jphpr405b0h3cv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=10&id=ZWJkNWFhNDctMWU1YTNjNmItZjYyMTI1NWYtNzczMzUyNWE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:44:44.576663Z node 10 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710664. Ctx: { TraceId: 01jykrrra39m21vxecfvr5gqnv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=10&id=ZWJkNWFhNDctMWU1YTNjNmItZjYyMTI1NWYtNzczMzUyNWE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:44:44.752711Z node 10 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710665. Ctx: { TraceId: 01jykrrrd5brc48snfjm8msje8, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=10&id=ZWJkNWFhNDctMWU1YTNjNmItZjYyMTI1NWYtNzczMzUyNWE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:44:46.691318Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7519897978513811421:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:46.691417Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00181a/r3tmp/tmphB0Q5v/pdisk_1.dat 2025-06-25T14:44:47.000862Z node 13 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:47.020032Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:47.020124Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:47.026034Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:44:47.035631Z node 13 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 13 Type# 268639257 TServer::EnableGrpc on GrpcPort 28962, node 13 2025-06-25T14:44:47.079633Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:47.079665Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:47.079675Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:47.079834Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24214 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:47.457010Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:47.716026Z node 13 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:44:49.959743Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:44:50.054493Z node 13 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [13:7519897995693681758:2301], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:50.054600Z node 13 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:50.054941Z node 13 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [13:7519897995693681770:2304], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:50.059579Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:44:50.085351Z node 13 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [13:7519897995693681772:2305], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-25T14:44:50.167583Z node 13 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [13:7519897995693681860:2792] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:44:50.269638Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jykrrxr4fcedejveax40rg13, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=YWUyZjc1NzUtM2E5ZWU5NzAtODViZjFlNTgtM2U3YjhlMGM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:44:50.397410Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jykrrxza04pb7qmhhk2e7yv3, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=YWUyZjc1NzUtM2E5ZWU5NzAtODViZjFlNTgtM2U3YjhlMGM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:44:50.522924Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715663. Ctx: { TraceId: 01jykrry388ck987aq1enk2svd, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=YWUyZjc1NzUtM2E5ZWU5NzAtODViZjFlNTgtM2U3YjhlMGM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:44:50.659451Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715664. Ctx: { TraceId: 01jykrry75862p922z916m578t, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=YWUyZjc1NzUtM2E5ZWU5NzAtODViZjFlNTgtM2U3YjhlMGM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:44:50.797229Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715665. Ctx: { TraceId: 01jykrrybd64xe3zq590enehb2, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=YWUyZjc1NzUtM2E5ZWU5NzAtODViZjFlNTgtM2U3YjhlMGM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:44:50.917371Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715666. Ctx: { TraceId: 01jykrryfj0a6netqw4nsjcev3, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=YWUyZjc1NzUtM2E5ZWU5NzAtODViZjFlNTgtM2U3YjhlMGM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:44:51.037371Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715667. Ctx: { TraceId: 01jykrrykab7cjw5npvp7q83w9, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=YWUyZjc1NzUtM2E5ZWU5NzAtODViZjFlNTgtM2U3YjhlMGM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:44:51.257598Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715668. Ctx: { TraceId: 01jykrryq294k3rq9t3qctnmh3, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=YWUyZjc1NzUtM2E5ZWU5NzAtODViZjFlNTgtM2U3YjhlMGM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:44:51.493329Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715669. Ctx: { TraceId: 01jykrryxz4wk3a6sn8mwbrzsq, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=YWUyZjc1NzUtM2E5ZWU5NzAtODViZjFlNTgtM2U3YjhlMGM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:44:51.692405Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[13:7519897978513811421:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:51.692480Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:44:51.790451Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715670. Ctx: { TraceId: 01jykrrz5df87hxbbqnpzyc5ag, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=YWUyZjc1NzUtM2E5ZWU5NzAtODViZjFlNTgtM2U3YjhlMGM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root >> TGRpcNewClient::YqlExplainDataQuery [GOOD] >> TGRpcNewCoordinationClient::CheckUnauthorized ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> YdbYqlClient::TestReadWrongTable [GOOD] Test command err: 2025-06-25T14:44:33.842227Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897920917104115:2083];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:33.842312Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017ff/r3tmp/tmp8tLhee/pdisk_1.dat 2025-06-25T14:44:34.232718Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:34.300985Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:34.301051Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:34.309564Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 23088, node 1 2025-06-25T14:44:34.364729Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:34.364748Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:34.364754Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:34.364828Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4999 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:34.619724Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:34.853946Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:44:36.593965Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897933802006958:2300], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:36.594352Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897933802006949:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:36.594430Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:36.598572Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:44:36.620391Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519897933802006963:2301], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:44:36.696116Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519897933802007037:2666] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017ff/r3tmp/tmpSFQPKm/pdisk_1.dat 2025-06-25T14:44:39.000423Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519897943819930535:2076];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:39.034829Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:44:39.309601Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:39.350717Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:39.350800Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:39.365828Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 61923, node 4 2025-06-25T14:44:39.652622Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:39.652662Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:39.652672Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:39.652830Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6730 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-25T14:44:39.996452Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:40.031106Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:42.381305Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519897960999800724:2298], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:42.381322Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519897960999800729:2301], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:42.381363Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:42.386286Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:44:42.407244Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519897960999800738:2302], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:44:42.488121Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519897960999800809:2673] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:44:44.232372Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519897967955906571:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:44.232446Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017ff/r3tmp/tmpkthlKp/pdisk_1.dat 2025-06-25T14:44:44.394964Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:44.413723Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:44.413785Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:44.418815Z node 7 :H ... e.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:47.223994Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:44:47.397189Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519897980840809623:2308], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:47.397274Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:47.398368Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519897980840809628:2311], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:47.402212Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:44:47.425844Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7519897980840809630:2312], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-25T14:44:47.520343Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7519897980840809715:2793] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:44:47.582107Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jykrrv539ewjtn8j39fc0fhp, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=YWI5NGI1ODQtM2UxNDFhNzYtOTk5MDU0M2QtNjgxYjJkZTE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:44:47.680614Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jykrrvc57z79d5512ytzxhj0, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=NmM3YzczMDgtN2FlZWJiYWUtMjI2ZjI3YzgtMjM5ZDQzNjU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:44:47.767504Z node 7 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=7&id=YWI5NGI1ODQtM2UxNDFhNzYtOTk5MDU0M2QtNjgxYjJkZTE=, ActorId: [7:7519897980840809449:2295], ActorState: ExecuteState, TraceId: 01jykrrvem1s6sgp3qt4gryby2, Create QueryResponse for error on request, msg: 2025-06-25T14:44:49.427516Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7519897991138191918:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:49.427585Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017ff/r3tmp/tmpRLVKkw/pdisk_1.dat 2025-06-25T14:44:49.598323Z node 10 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:49.614760Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:49.614846Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:49.619846Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 14360, node 10 2025-06-25T14:44:49.712892Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:49.712915Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:49.712926Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:49.713046Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27379 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:50.102251Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:50.153583Z node 10 :GRPC_SERVER INFO: grpc_request_proxy.cpp:592: Got grpc request# ListEndpointsRequest, traceId# 01jykrrxv9bkwvbz0h78v6rg96, sdkBuildInfo# ydb-cpp-sdk/dev, state# AS_NOT_PERFORMED, database# undef, peer# ipv6:[::1]:40582, grpcInfo# grpc-c++/1.54.3 grpc-c/31.0.0 (linux; chttp2), timeout# 9.998789s 2025-06-25T14:44:50.161155Z node 10 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:595: Got grpc request# CreateSessionRequest, traceId# 01jykrrxvgf8dyxt61jqwjewh4, sdkBuildInfo# ydb-cpp-sdk/dev, state# AS_NOT_PERFORMED, database# undef, peer# ipv6:[::1]:40590, grpcInfo# grpc-c++/1.54.3 grpc-c/31.0.0 (linux; chttp2), timeout# undef 2025-06-25T14:44:50.435981Z node 10 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:44:52.665504Z node 10 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:595: Got grpc request# ReadTableRequest, traceId# 01jykrs09sbh21ptq8wv79mat0, sdkBuildInfo# undef, state# AS_NOT_PERFORMED, database# undef, peer# ipv6:[::1]:40600, grpcInfo# grpc-c++/1.54.3 grpc-c/31.0.0 (linux; chttp2), timeout# undef 2025-06-25T14:44:52.667035Z node 10 :TX_PROXY ERROR: read_table_impl.cpp:567: [ReadTable [10:7519898004023094775:2297] TxId# 281474976715658] Navigate request failed for table 'Root/NoTable' 2025-06-25T14:44:52.667151Z node 10 :TX_PROXY ERROR: read_table_impl.cpp:2919: [ReadTable [10:7519898004023094775:2297] TxId# 281474976715658] RESPONSE Status# ResolveError shard: 0 table: Root/NoTable 2025-06-25T14:44:52.667586Z node 10 :READ_TABLE_API NOTICE: rpc_read_table.cpp:531: [10:7519898004023094774:2297] Finish grpc stream, status: 400070
: Error: Failed to resolve table Root/NoTable, code: 200400
: Error: Got ResolveError response from TxProxy
: Error: Failed to resolve table Root/NoTable 2025-06-25T14:44:52.687149Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0000ab080] received request Name# SchemeOperation ok# false data# peer# current inflight# 0 2025-06-25T14:44:52.687412Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0000c4880] received request Name# SchemeOperationStatus ok# false data# peer# current inflight# 0 2025-06-25T14:44:52.687638Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000117080] received request Name# SchemeDescribe ok# false data# peer# current inflight# 0 2025-06-25T14:44:52.687826Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0000f3680] received request Name# ChooseProxy ok# false data# peer# current inflight# 0 2025-06-25T14:44:52.687997Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000036080] received request Name# PersQueueRequest ok# false data# peer# current inflight# 0 2025-06-25T14:44:52.688187Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000033680] received request Name# SchemeInitRoot ok# false data# peer# current inflight# 0 2025-06-25T14:44:52.688408Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000080480] received request Name# ResolveNode ok# false data# peer# current inflight# 0 2025-06-25T14:44:52.688644Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000035a80] received request Name# FillNode ok# false data# peer# current inflight# 0 2025-06-25T14:44:52.688817Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a00016c280] received request Name# DrainNode ok# false data# peer# current inflight# 0 2025-06-25T14:44:52.688991Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0000c3c80] received request Name# BlobStorageConfig ok# false data# peer# current inflight# 0 2025-06-25T14:44:52.689159Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0000c3080] received request Name# HiveCreateTablet ok# false data# peer# current inflight# 0 2025-06-25T14:44:52.689356Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a00016aa80] received request Name# TestShardControl ok# false data# peer# current inflight# 0 2025-06-25T14:44:52.689477Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000043880] received request Name# RegisterNode ok# false data# peer# current inflight# 0 2025-06-25T14:44:52.689521Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a00015de80] received request Name# CmsRequest ok# false data# peer# current inflight# 0 2025-06-25T14:44:52.689703Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000140a80] received request Name# ConsoleRequest ok# false data# peer# current inflight# 0 2025-06-25T14:44:52.689720Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a00016b680] received request Name# InterconnectDebug ok# false data# peer# current inflight# 0 2025-06-25T14:44:52.689872Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0000c4280] received request Name# TabletStateRequest ok# false data# peer# current inflight# 0 >> YdbIndexTable::CreateTableAddIndex [GOOD] >> YdbIndexTable::AlterTableAddIndex ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/batch_operations/unittest >> KqpBatchDelete::ManyPartitions_2 [GOOD] Test command err: Trying to start YDB, gRPC: 23893, MsgBus: 25859 2025-06-25T14:42:52.442323Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897489874929746:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:52.442458Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000bb7/r3tmp/tmp6s7Ehk/pdisk_1.dat 2025-06-25T14:42:52.809077Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 23893, node 1 2025-06-25T14:42:52.877291Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:42:52.877315Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:42:52.877321Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:42:52.877442Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:42:52.879208Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:42:52.879322Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:42:52.881142Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:25859 TClient is connected to server localhost:25859 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:42:53.381374Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:42:53.396773Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:53.467297Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:42:53.551589Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:53.713790Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:53.788997Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:55.296338Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897502759833250:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:55.296472Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:55.579000Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:55.606627Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:55.634170Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:55.665033Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:55.691001Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:55.723343Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:55.789899Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:55.838677Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897502759833912:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:55.838739Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:55.838750Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897502759833917:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:55.842068Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:42:55.851522Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519897502759833919:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:42:55.914890Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519897502759833970:3421] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:42:56.741061Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:57.442625Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519897489874929746:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:57.442683Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 3646, MsgBus: 62534 2025-06-25T14:43:01.504816Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519897529131648228:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:43:01.504877Z node ... pp:54;actor=TTableExistsActor;event=undelivered;self_id=[12:7519897960861671225:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:42.091904Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000bb7/r3tmp/tmpsi5wFv/pdisk_1.dat 2025-06-25T14:44:42.303452Z node 12 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:42.317655Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:42.317757Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:42.320699Z node 12 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [12:7519897960861671204:2080] 1750862682090743 != 1750862682090746 2025-06-25T14:44:42.321356Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 63032, node 12 2025-06-25T14:44:42.397002Z node 12 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:42.397033Z node 12 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:42.397047Z node 12 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:42.397222Z node 12 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:32719 2025-06-25T14:44:43.135963Z node 12 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:32719 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:43.267364Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:43.277903Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:44:43.295483Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:44:43.430452Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:44:43.728327Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:44:43.837667Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:44:47.092093Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[12:7519897960861671225:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:47.092185Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:44:47.558014Z node 12 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [12:7519897982336509324:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:47.558150Z node 12 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:47.666083Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:44:47.722304Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:44:47.800185Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:44:47.858753Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:44:47.905521Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:44:47.969944Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:44:48.047420Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:44:48.180507Z node 12 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [12:7519897986631477287:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:48.180681Z node 12 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:48.181163Z node 12 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [12:7519897986631477292:2437], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:48.188227Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:44:48.216860Z node 12 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [12:7519897986631477294:2438], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:44:48.281060Z node 12 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [12:7519897986631477345:3435] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:44:49.744719Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... >> YdbScripting::BasicV0 [GOOD] >> YdbScripting::BasicV1 >> YdbOlapStore::LogLast50ByResource [GOOD] >> YdbOlapStore::LogNonExistingRequest >> TGRpcAuthentication::NoConnectRights [GOOD] >> TGRpcAuthentication::NoDescribeRights >> TxUsage::WriteToTopic_Demo_40_Table [GOOD] >> YdbYqlClient::AlterTableAddIndex [GOOD] >> YdbTableBulkUpsert::Overload [GOOD] >> YdbTableBulkUpsert::RetryOperationSync >> YdbOlapStore::ManyTables [GOOD] >> YdbOlapStore::LogPagingBetween >> TGRpcYdbTest::CreateYqlSession [GOOD] >> TGRpcYdbTest::CreateYqlSessionExecuteQuery >> YdbYqlClient::ColumnFamiliesExternalBlobsWithoutDefaultProfile [GOOD] >> YdbYqlClient::CheckDefaultTableSettings3 >> TxUsage::Sinks_Oltp_WriteToTopics_3_Table [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> YdbYqlClient::AlterTableAddIndex [GOOD] Test command err: 2025-06-25T14:44:19.620912Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897863175662972:2082];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:19.620985Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001824/r3tmp/tmp8hrHig/pdisk_1.dat 2025-06-25T14:44:20.100494Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:20.100602Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:20.127745Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:20.134890Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:44:20.158312Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 27523, node 1 2025-06-25T14:44:20.201090Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:20.201115Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:20.201124Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:20.201231Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9204 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:20.607865Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:20.648428Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:44:22.558454Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) SUCCESS 2025-06-25T14:44:22.741697Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897876060566036:2306], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:22.741707Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897876060566045:2309], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:22.741794Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:22.745286Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:44:22.767916Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519897876060566050:2310], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-25T14:44:22.840296Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519897876060566124:2804] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:44:23.164285Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jykrr32j990xk9ctr3ttngpd, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MTc3MDBlOTAtZTYzN2M5NmItOTY1Y2RjN2YtM2ZiZTNkMWE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:44:23.180236Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750862663200, txId: 281474976715661] shutting down 2025-06-25T14:44:23.236769Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037888 not found 2025-06-25T14:44:23.241814Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) BAD_REQUEST 2025-06-25T14:44:23.410341Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:44:23.428345Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037889 not found SUCCESS 2025-06-25T14:44:23.600161Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037890 not found 2025-06-25T14:44:25.096765Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519897888876657327:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:25.097562Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001824/r3tmp/tmpOdFSZw/pdisk_1.dat 2025-06-25T14:44:25.266679Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:25.279470Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:25.279928Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:25.291285Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 2377, node 4 2025-06-25T14:44:25.373585Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:25.373611Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:25.373621Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:25.373762Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10220 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:25.625791Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... TClient is connected to server localhost:10220 2025-06-25T14:44:25.873464Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnStore, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_store.cpp:451) waiting... 2025-06-25T14:44:25.952166Z node 4 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=72075186224037890;self_id=[4:7519897888876658327:2279];fline=columnshard.cpp:99;event=initialize_shard;step=OnActivateExecutor; 2025-06-25T14:44:25.980101Z node 4 : ... "o":"6","t":"AssembleOriginalData"},"w":29,"id":12},"26":{"p":{"p":{"data":[{"name":"timestamp","id":1},{"name":"resource_type","id":2},{"name":"resource_id","id":3},{"name":"uid","id":4},{"name":"level","id":5},{"name":"message","id":6},{"name":"json_payload","id":7},{"name":"ingested_at","id":8},{"name":"saved_at","id":9},{"name":"request_id","id":10},{"name":"flt","id":11},{"name":"dbl","id":12}]},"o":"0","t":"ReserveMemory"},"w":0,"id":26}}}; 2025-06-25T14:44:49.608764Z node 10 :TX_COLUMNSHARD DEBUG: log.cpp:784: external_task_id=;fline=actor.cpp:48;task=agents_waiting=1;additional_info=();; 2025-06-25T14:44:49.608807Z node 10 :TX_COLUMNSHARD DEBUG: log.cpp:784: external_task_id=;fline=actor.cpp:48;task=agents_waiting=1;additional_info=();; 2025-06-25T14:44:49.608924Z node 10 :TX_COLUMNSHARD DEBUG: log.cpp:784: external_task_id=;fline=actor.cpp:48;task=agents_waiting=1;additional_info=();; 2025-06-25T14:44:49.608978Z node 10 :TX_COLUMNSHARD DEBUG: log.cpp:784: external_task_id=;fline=actor.cpp:48;task=agents_waiting=1;additional_info=();; 2025-06-25T14:44:49.610657Z node 10 :TX_COLUMNSHARD DEBUG: log.cpp:784: event_type=NKikimr::NBlobCache::TEvBlobCache::TEvReadBlobRangeResult;fline=task.cpp:110;event=OnDataReady;task=agents_waiting=0;additional_info=();;external_task_id=; 2025-06-25T14:44:49.610706Z node 10 :TX_COLUMNSHARD DEBUG: log.cpp:784: event_type=NKikimr::NBlobCache::TEvBlobCache::TEvReadBlobRangeResult;fline=task.cpp:110;event=OnDataReady;task=agents_waiting=0;additional_info=();;external_task_id=; 2025-06-25T14:44:49.610995Z node 10 :TX_COLUMNSHARD DEBUG: log.cpp:784: event_type=NKikimr::NBlobCache::TEvBlobCache::TEvReadBlobRangeResult;fline=task.cpp:110;event=OnDataReady;task=agents_waiting=0;additional_info=();;external_task_id=; 2025-06-25T14:44:49.612167Z node 10 :TX_COLUMNSHARD DEBUG: log.cpp:784: event_type=NKikimr::NBlobCache::TEvBlobCache::TEvReadBlobRangeResult;fline=task.cpp:110;event=OnDataReady;task=agents_waiting=0;additional_info=();;external_task_id=; 2025-06-25T14:44:49.624613Z node 10 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Finished read cookie: 1 at tablet 72075186224037888 2025-06-25T14:44:49.627139Z node 10 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Finished read cookie: 1 at tablet 72075186224037889 2025-06-25T14:44:49.643683Z node 10 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750862689198, txId: 18446744073709551615] shutting down 2025-06-25T14:44:49.801227Z node 10 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037891;parent=[10:7519897977957729477:2282];fline=actor.cpp:33;event=skip_flush_writing; 2025-06-25T14:44:49.803244Z node 10 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037888;parent=[10:7519897977957729471:2280];fline=actor.cpp:33;event=skip_flush_writing; 2025-06-25T14:44:49.803292Z node 10 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037889;parent=[10:7519897977957729473:2281];fline=actor.cpp:33;event=skip_flush_writing; 2025-06-25T14:44:49.803317Z node 10 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037890;parent=[10:7519897977957729500:2283];fline=actor.cpp:33;event=skip_flush_writing; 2025-06-25T14:44:51.480692Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7519897999181596699:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:51.480749Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001824/r3tmp/tmpzAklla/pdisk_1.dat 2025-06-25T14:44:51.861268Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:51.861362Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:51.866834Z node 13 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:51.886512Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 23590, node 13 2025-06-25T14:44:52.137069Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:52.137095Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:52.137105Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:52.137283Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2050 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-25T14:44:52.580327Z node 13 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:52.616248Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:55.719331Z node 13 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [13:7519898016361466926:2302], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:55.719434Z node 13 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:55.759654Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:44:55.881968Z node 13 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [13:7519898016361467091:2312], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:55.882094Z node 13 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:55.882525Z node 13 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [13:7519898016361467096:2315], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:55.886389Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:44:55.912530Z node 13 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [13:7519898016361467098:2316], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-25T14:44:56.009561Z node 13 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [13:7519898020656434475:2808] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:44:56.097427Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710661. Ctx: { TraceId: 01jykrs3e8f6abf0mpa0nbp4af, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=N2ZhMTg0NjQtZTA4YTViZDktNWUwYWQyN2QtYjIxM2M5Mjk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:44:56.187548Z node 13 :TX_PROXY WARN: rpc_alter_table.cpp:329: [AlterTableAddIndex [13:7519898020656434540:2331] TxId# 281474976710663] Access check failed 2025-06-25T14:44:56.221331Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976715758:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:44:56.306723Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976715759:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:383) 2025-06-25T14:44:56.444800Z node 13 :TX_PROXY ERROR: rpc_alter_table.cpp:274: [AlterTableAddIndex [13:7519898020656434914:2345] TxId# 281474976710665] Unable to navigate: Root/WrongPath status: PathErrorUnknown 2025-06-25T14:44:56.480832Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[13:7519897999181596699:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:56.480902Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:44:56.585380Z node 13 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 13, TabletId: 72075186224037889 not found >> IndexBuildTest::Lock >> VectorIndexBuildTest::SimpleDuplicates >> IndexBuildTest::CancellationNotEnoughRetries >> TxUsage::WriteToTopic_Demo_40_Query >> IndexBuildTest::ShadowDataNotAllowedByDefault >> TxUsage::Sinks_Oltp_WriteToTopics_3_Query >> TGRpcNewCoordinationClient::CheckUnauthorized [GOOD] >> TGRpcNewCoordinationClient::BasicMethods >> YdbIndexTable::AlterTableAddIndex [GOOD] >> YdbLogStore::AlterLogStore >> TTableProfileTests::OverwriteStoragePolicy [GOOD] >> TTableProfileTests::OverwriteCachingPolicy >> TRegisterNodeOverDiscoveryService::ServerWithCertVerification_ClientProvidesServerCerts [GOOD] >> TRegisterNodeOverDiscoveryService::ServerWithCertVerification_ClientProvidesCorruptedPrivatekey >> IndexBuildTest::ShadowDataNotAllowedByDefault [GOOD] >> IndexBuildTest::ShadowDataEdgeCases >> VectorIndexBuildTest::RecreatedColumns >> YdbScripting::BasicV1 [GOOD] >> IndexBuildTest::Lock [GOOD] >> IndexBuildTest::MergeIndexTableShardsOnlyWhenReady >> TRegisterNodeOverDiscoveryService::ServerWithoutCertVerification_ClientProvidesEmptyClientCerts [GOOD] >> TRegisterNodeOverLegacyService::ServerWithCertVerification_ClientWithCorrectCerts >> TGRpcAuthentication::NoDescribeRights [GOOD] >> TGRpcClientLowTest::BiStreamPing >> TxUsage::WriteToTopic_Demo_16_Table [GOOD] >> TxUsage::WriteToTopic_Demo_30_Table [GOOD] >> IndexBuildTest::ShadowDataEdgeCases [GOOD] >> IndexBuildTest::WithFollowers ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> YdbScripting::BasicV1 [GOOD] Test command err: test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001803/r3tmp/tmpqfrfbC/pdisk_1.dat 2025-06-25T14:44:33.886256Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897922146254888:2251];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:33.894883Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:44:34.239548Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:34.250835Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:34.250930Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:34.255070Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 27431, node 1 2025-06-25T14:44:34.363918Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:34.363938Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:34.363947Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:34.364064Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:29957 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:34.679243Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:34.887249Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:44:36.626397Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:44:37.119469Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897939326126734:2400], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:37.119536Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897939326126726:2397], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:37.119645Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:37.122167Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:44:37.136971Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519897939326126740:2401], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-25T14:44:37.233418Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519897939326126843:4116] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:44:37.663222Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710661. Ctx: { TraceId: 01jykrrh3x70zaq318hr1g3man, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MTVhM2JjMTAtMzcxMzZjYmQtM2RhMjI2Y2QtMTcyYWFmZDA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root SUCCESS 2025-06-25T14:44:39.285897Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519897947439634160:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:39.285945Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001803/r3tmp/tmpOYukJv/pdisk_1.dat 2025-06-25T14:44:39.529919Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 1257, node 4 2025-06-25T14:44:39.567392Z node 4 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 4 Type# 268639257 2025-06-25T14:44:39.604077Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:39.604111Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:39.604120Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:39.604244Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:44:39.661155Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:39.661239Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:39.680201Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:5590 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:39.925531Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:40.335882Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:44:42.308803Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:44:42.677466Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519897960324538948:2397], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:42.677466Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519897960324538953:2400], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:42.677531Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:42.681150Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:44:42.698910Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519897960324538962:2401], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-25T14:44:42.764295Z node 4 :TX_PR ... ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:51.565899Z node 10 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:44:53.713312Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7519898010050450117:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:53.713428Z node 10 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:53.861169Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:44:54.025002Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7519898014345417613:2308], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:54.025102Z node 10 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:54.025202Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7519898014345417618:2311], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:54.028777Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:44:54.058432Z node 10 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [10:7519898014345417620:2312], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-25T14:44:54.165860Z node 10 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [10:7519898014345417689:2787] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:44:54.241708Z node 10 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jykrrz2a2h10hpz3k9rrzc2t, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=10&id=MjZlM2FlNDUtYzNiZDlkN2YtNWQzYzVjNmQtNTk2ZDgwMDU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:44:54.375558Z node 10 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715663. Ctx: { TraceId: 01jykrs1vs7btsk0d15q8m4a67, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=10&id=MjAwOTg0MjEtYTg2NzVkNzMtZWE4NWRmNzktZWQ1NTg1MDI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:44:54.387308Z node 10 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750862694406, txId: 281474976715662] shutting down 2025-06-25T14:44:56.264661Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7519898019779994384:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:56.264724Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001803/r3tmp/tmpHR8fSy/pdisk_1.dat 2025-06-25T14:44:56.497805Z node 13 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 15641, node 13 2025-06-25T14:44:56.591399Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:56.591537Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:56.594476Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:44:56.603547Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:56.603569Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:56.603576Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:56.603725Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:21286 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-25T14:44:56.781993Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:44:57.315111Z node 13 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:44:59.621840Z node 13 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [13:7519898032664897304:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:59.621928Z node 13 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:59.711914Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:44:59.799543Z node 13 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [13:7519898032664897466:2308], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:59.799644Z node 13 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:59.799960Z node 13 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [13:7519898032664897471:2311], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:59.804492Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:44:59.822640Z node 13 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [13:7519898032664897473:2312], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-25T14:44:59.877426Z node 13 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [13:7519898032664897550:2801] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:44:59.926139Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jykrs4cf0ewqct3h8rcr4t8t, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=Mjc0ZGNiLTM5NDA5MzRjLWJiM2Y4NjdkLTY5YTRiMTQw, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:45:00.028923Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715663. Ctx: { TraceId: 01jykrs7d05ccwbp6wbtzt8g1h, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=OTY5NTM3NjgtNGRjY2EwOTMtMzA5Mzg0ZTItMWI3MzU2Nzc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:45:00.032983Z node 13 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750862700069, txId: 281474976715662] shutting down >> TxUsage::WriteToTopic_Demo_16_Query >> TxUsage::WriteToTopic_Demo_30_Query >> TRegisterNodeOverDiscoveryService::ServerWithoutCertVerification_ClientDoesNotProvideClientCerts [GOOD] >> TRegisterNodeOverDiscoveryService::ServerWithCertVerification_ClientProvideIncorrectCerts [GOOD] >> TRegisterNodeOverDiscoveryService::ServerWithCertVerification_ClientDoesNotProvideAnyCerts >> YdbTableBulkUpsert::RetryOperationSync [GOOD] >> YdbTableBulkUpsert::RetryOperation >> IndexBuildTest::WithFollowers [GOOD] >> TxUsage::WriteToTopic_Demo_20_RestartNo_Query [GOOD] >> TxUsage::Sinks_Olap_WriteToTopicAndTable_2_Query [GOOD] >> TGRpcYdbTest::CreateYqlSessionExecuteQuery [GOOD] >> TGRpcYdbTest::DeleteFromAfterCreate >> IndexBuildTest::BaseCase ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> TRegisterNodeOverDiscoveryService::ServerWithoutCertVerification_ClientDoesNotProvideClientCerts [GOOD] Test command err: 2025-06-25T14:43:57.315832Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897767032630656:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:43:57.315894Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00185c/r3tmp/tmpXmI5ti/pdisk_1.dat 2025-06-25T14:43:57.902078Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:43:57.902174Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:43:57.916340Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:43:57.967273Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 5130, node 1 2025-06-25T14:43:58.112647Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:43:58.112671Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:43:58.112680Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:43:58.112798Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:43:58.357948Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:61938 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:43:58.515093Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:43:58.633050Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket D163B3200CB9F736CCA4F66B42D3AF181946786EBBFA24E280339515EC227BDA (ipv6:[::1]:36406) has now valid token of C=RU,ST=MSK,L=MSK,O=YA,OU=UtTest,CN=localhost@cert 2025-06-25T14:43:58.729720Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (B6C6F477) (ipv6:[::1]:36428) has now valid token of root@builtin 2025-06-25T14:43:58.824945Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db /Root, token db , DomainLoginOnly 1 2025-06-25T14:43:58.825021Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-25T14:43:58.825032Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-25T14:43:58.825121Z node 1 :TICKET_PARSER ERROR: ticket_parser_impl.h:963: Ticket **** (0C093832): Could not find correct token validator 2025-06-25T14:44:01.818327Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519897786961554538:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:01.818427Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00185c/r3tmp/tmpQiWoR4/pdisk_1.dat 2025-06-25T14:44:02.029789Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:02.048285Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:02.048387Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:02.056936Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 9014, node 4 2025-06-25T14:44:02.136074Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:02.136091Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:02.136097Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:02.136214Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:21389 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:02.437895Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:02.523912Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket D163B3200CB9F736CCA4F66B42D3AF181946786EBBFA24E280339515EC227BDA (ipv6:[::1]:51366) has now valid token of C=RU,ST=MSK,L=MSK,O=YA,OU=UtTest,CN=localhost@cert 2025-06-25T14:44:02.633546Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (B6C6F477) (ipv6:[::1]:51386) has now valid token of root@builtin 2025-06-25T14:44:02.750819Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db /Root, token db , DomainLoginOnly 1 2025-06-25T14:44:02.750847Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-25T14:44:02.750855Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-25T14:44:02.750885Z node 4 :TICKET_PARSER ERROR: ticket_parser_impl.h:963: Ticket **** (0C093832): Could not find correct token validator 2025-06-25T14:44:02.852479Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:44:06.464642Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519897805916211365:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:06.472181Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00185c/r3tmp/tmppSODuQ/pdisk_1.dat 2025-06-25T14:44:06.674539Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:06.682653Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:06.682764Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:06.687208Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 19859, node 7 2025-06-25T14:44:06.757815Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:06.757835Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:06.757842Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:06.757986Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14626 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:07.015430Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called ... de(22, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 1433, node 22 2025-06-25T14:44:39.065100Z node 22 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:39.065127Z node 22 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:39.065137Z node 22 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:39.065303Z node 22 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25387 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:39.510166Z node 22 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:39.724895Z node 22 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:44:43.715896Z node 22 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[22:7519897943686171288:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:43.715980Z node 22 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:44:49.698118Z node 22 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (B6C6F477) (ipv6:[::1]:50944) has now valid token of root@builtin 2025-06-25T14:44:49.770088Z node 22 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db /Root, token db , DomainLoginOnly 1 2025-06-25T14:44:49.770116Z node 22 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-25T14:44:49.770141Z node 22 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-25T14:44:49.770186Z node 22 :TICKET_PARSER ERROR: ticket_parser_impl.h:963: Ticket **** (0C093832): Could not find correct token validator 2025-06-25T14:44:51.394628Z node 25 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[25:7519897999833234973:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:51.396650Z node 25 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00185c/r3tmp/tmpI6fozT/pdisk_1.dat 2025-06-25T14:44:51.743498Z node 25 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:51.782409Z node 25 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(25, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:51.782510Z node 25 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(25, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:51.787904Z node 25 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(25, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 28854, node 25 2025-06-25T14:44:51.957078Z node 25 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:51.957100Z node 25 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:51.957110Z node 25 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:51.957269Z node 25 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5391 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-25T14:44:52.492645Z node 25 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:44:52.496590Z node 25 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:44:52.656598Z node 25 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (B6C6F477) (ipv6:[::1]:40264) has now valid token of root@builtin 2025-06-25T14:44:52.742893Z node 25 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db /Root, token db , DomainLoginOnly 1 2025-06-25T14:44:52.742932Z node 25 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-25T14:44:52.742945Z node 25 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-25T14:44:52.742984Z node 25 :TICKET_PARSER ERROR: ticket_parser_impl.h:963: Ticket **** (0C093832): Could not find correct token validator 2025-06-25T14:44:57.542680Z node 28 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[28:7519898023402063285:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:57.543180Z node 28 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00185c/r3tmp/tmpFprEsR/pdisk_1.dat 2025-06-25T14:44:57.719924Z node 28 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:57.753438Z node 28 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(28, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:57.753545Z node 28 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(28, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:57.758186Z node 28 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(28, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 18154, node 28 2025-06-25T14:44:57.838869Z node 28 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:57.838902Z node 28 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:57.838914Z node 28 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:57.839108Z node 28 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:22347 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:58.219892Z node 28 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:58.354473Z node 28 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (B6C6F477) (ipv6:[::1]:38550) has now valid token of root@builtin 2025-06-25T14:44:58.413861Z node 28 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db /Root, token db , DomainLoginOnly 1 2025-06-25T14:44:58.413894Z node 28 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-25T14:44:58.413907Z node 28 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-25T14:44:58.413949Z node 28 :TICKET_PARSER ERROR: ticket_parser_impl.h:963: Ticket **** (0C093832): Could not find correct token validator 2025-06-25T14:44:58.562777Z node 28 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; >> YdbYqlClient::CheckDefaultTableSettings3 [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index_build/unittest >> IndexBuildTest::WithFollowers [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:45:00.205446Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:45:00.205525Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:45:00.205551Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:45:00.205589Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:45:00.206342Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:45:00.206384Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:45:00.206436Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:45:00.206914Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:45:00.207616Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:45:00.209516Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:45:00.285033Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:45:00.285082Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:45:00.305786Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:45:00.306144Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:45:00.306299Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:45:00.312656Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:45:00.312932Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:45:00.314369Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:45:00.315858Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:45:00.321812Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:45:00.322931Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:45:00.335559Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:45:00.335644Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:45:00.335902Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:45:00.335947Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:45:00.336004Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:45:00.336091Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:45:00.342793Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:45:00.496914Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:45:00.497121Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:45:00.497342Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:45:00.497397Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:45:00.497625Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:45:00.497713Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:45:00.499847Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:45:00.500048Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:45:00.500215Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:45:00.500297Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:45:00.500355Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:45:00.500401Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:45:00.502428Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:45:00.502490Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:45:00.502536Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:45:00.504019Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:45:00.504077Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:45:00.504129Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:45:00.504172Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:45:00.514578Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:45:00.516282Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:45:00.516490Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:45:00.517382Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:45:00.517510Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:45:00.517575Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:45:00.517860Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:45:00.517909Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:45:00.518082Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:45:00.518164Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:45:00.523182Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:45:00.523241Z node 1 :FLAT_TX_SCHEMESHARD ... oard.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 104, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 7 2025-06-25T14:45:03.287074Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-25T14:45:03.288393Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 8 PathOwnerId: 72057594046678944, cookie: 104 2025-06-25T14:45:03.288465Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 8 PathOwnerId: 72057594046678944, cookie: 104 2025-06-25T14:45:03.288495Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 104 2025-06-25T14:45:03.288519Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 104, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 8 2025-06-25T14:45:03.288553Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-06-25T14:45:03.289447Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 104 2025-06-25T14:45:03.289551Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 104 2025-06-25T14:45:03.289585Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 104 2025-06-25T14:45:03.290183Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 104:2, at schemeshard: 72057594046678944 2025-06-25T14:45:03.290243Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_table.cpp:414: TDropTable TProposedDeletePart operationId: 104:2 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:45:03.290490Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove table for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 4 2025-06-25T14:45:03.290612Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#104:2 progress is 3/3 2025-06-25T14:45:03.290639Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 104 ready parts: 3/3 2025-06-25T14:45:03.290669Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#104:2 progress is 3/3 2025-06-25T14:45:03.290691Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 104 ready parts: 3/3 2025-06-25T14:45:03.290717Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 104, ready parts: 3/3, is published: false 2025-06-25T14:45:03.290743Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 104 ready parts: 3/3 2025-06-25T14:45:03.290782Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 104:0 2025-06-25T14:45:03.290824Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 104:0 2025-06-25T14:45:03.290911Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-25T14:45:03.290948Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 104:1 2025-06-25T14:45:03.290966Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 104:1 2025-06-25T14:45:03.290992Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-25T14:45:03.291013Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 104:2 2025-06-25T14:45:03.291031Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 104:2 2025-06-25T14:45:03.291067Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 2025-06-25T14:45:03.291092Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 104, publications: 1, subscribers: 1 2025-06-25T14:45:03.291132Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 104, [OwnerId: 72057594046678944, LocalPathId: 4], 18446744073709551615 2025-06-25T14:45:03.291522Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 104 2025-06-25T14:45:03.291579Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 104 2025-06-25T14:45:03.291603Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 104 2025-06-25T14:45:03.291635Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 104, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 18446744073709551615 2025-06-25T14:45:03.291669Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 2 2025-06-25T14:45:03.291733Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 104, subscribers: 1 2025-06-25T14:45:03.291768Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:212: TTxAckPublishToSchemeBoard Notify send TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, to actorId: [3:340:2317] 2025-06-25T14:45:03.292857Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2025-06-25T14:45:03.295832Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2025-06-25T14:45:03.296112Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2025-06-25T14:45:03.296160Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2025-06-25T14:45:03.297165Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2025-06-25T14:45:03.297266Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 104: got EvNotifyTxCompletionResult 2025-06-25T14:45:03.297303Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 104: satisfy waiter [3:707:2662] TestWaitNotification: OK eventTxId 104 2025-06-25T14:45:03.297849Z node 3 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/WithFollowers" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:45:03.298102Z node 3 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/WithFollowers" took 281us result status StatusSuccess 2025-06-25T14:45:03.298512Z node 3 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/WithFollowers" PathDescription { Self { Name: "WithFollowers" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 8 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 8 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 2 TableSchemaVersion: 4 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "WithFollowers" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value0" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "value1" Type: "Utf8" TypeId: 4608 Id: 3 NotNull: false IsBuildInProgress: false } Columns { Name: "valueFloat" Type: "Float" TypeId: 33 Id: 4 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 4 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TDatabaseQuotas::DisableWritesToDatabase [GOOD] >> TGRpcAuthentication::InvalidPassword >> TxUsage::WriteToTopic_Demo_20_RestartBeforeCommit_Table >> TGRpcNewCoordinationClient::BasicMethods [GOOD] >> TxUsage::WriteToTopic_Demo_23_RestartBeforeCommit_Table [GOOD] >> TxUsage::Sinks_Olap_WriteToTopicAndTable_3_Table ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> YdbYqlClient::CheckDefaultTableSettings3 [GOOD] Test command err: 2025-06-25T14:44:35.717825Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897929690393124:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:35.717903Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017f8/r3tmp/tmp9q7HNo/pdisk_1.dat 2025-06-25T14:44:36.012176Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:36.056538Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 25138, node 1 2025-06-25T14:44:36.085061Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:36.085139Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:36.094759Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:44:36.096698Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:36.096727Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:36.096736Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:36.096878Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26270 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:36.360990Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:36.726823Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:44:40.025428Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519897952824466404:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:40.025477Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017f8/r3tmp/tmp6oqwG4/pdisk_1.dat 2025-06-25T14:44:40.287940Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 13614, node 4 2025-06-25T14:44:40.426494Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:40.426577Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:40.451046Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:44:40.583938Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:40.583971Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:40.583980Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:40.584125Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:21188 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:40.841210Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:41.077882Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:21188 2025-06-25T14:44:41.198303Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_subdomain.cpp:92: TCreateSubDomain Propose, path: /Root/ydb_ut_tenant, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-06-25T14:44:41.198772Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 281474976715658:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-25T14:44:41.198803Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateSubDomain, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_subdomain.cpp:259) waiting... 2025-06-25T14:44:41.202164Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976715658, database: /Root, subject: , status: StatusAccepted, operation: CREATE DATABASE, path: /Root/ydb_ut_tenant 2025-06-25T14:44:41.216639Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 1750862681260, transactions count in step: 1, at schemeshard: 72057594046644480 2025-06-25T14:44:41.219013Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976715658:0 2025-06-25T14:44:41.219063Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 281474976715658, publications: 2, subscribers: 1 2025-06-25T14:44:41.221917Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046644480, txId: 281474976715658, subscribers: 1 2025-06-25T14:44:41.226900Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: /Root/ydb_ut_tenant, opId: 281474976715659:0, at schemeshard: 72057594046644480 2025-06-25T14:44:41.227562Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 281474976715659:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-25T14:44:41.227604Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:41.229448Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976715659, database: /Root, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: /Root/ydb_ut_tenant 2025-06-25T14:44:41.745611Z node 6 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[6:7519897955291122579:2215];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:41.759815Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:41.760150Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:41.761204Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# ObjectStorage, ObjectStorage, PostgreSQL 2025-06-25T14:44:41.761235Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:44:41.774974Z node 4 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 6 Cookie 6 2025-06-25T14:44:41.781984Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:44:41.853505Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/ydb_ut_tenant/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:44:42.152792Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 1750862682198, transactions count in step: 1, at schemeshard: 72057594046644480 2025-06-25T14:44:42.154882Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976715659:0 2025-06-25T14:44:42.155118Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: ... n: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:53.336320Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:53.601762Z node 10 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:19563 2025-06-25T14:44:53.754669Z node 10 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_subdomain.cpp:92: TCreateSubDomain Propose, path: /Root/ydb_ut_tenant, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-06-25T14:44:53.755040Z node 10 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 281474976715658:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-25T14:44:53.755075Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateSubDomain, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_subdomain.cpp:259) waiting... 2025-06-25T14:44:53.758070Z node 10 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976715658, database: /Root, subject: , status: StatusAccepted, operation: CREATE DATABASE, path: /Root/ydb_ut_tenant 2025-06-25T14:44:53.774320Z node 10 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 1750862693818, transactions count in step: 1, at schemeshard: 72057594046644480 2025-06-25T14:44:53.777314Z node 10 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976715658:0 2025-06-25T14:44:53.777368Z node 10 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 281474976715658, publications: 2, subscribers: 1 2025-06-25T14:44:53.779569Z node 10 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046644480, txId: 281474976715658, subscribers: 1 2025-06-25T14:44:53.785995Z node 10 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: /Root/ydb_ut_tenant, opId: 281474976715659:0, at schemeshard: 72057594046644480 2025-06-25T14:44:53.786426Z node 10 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 281474976715659:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-25T14:44:53.786452Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:44:53.788118Z node 10 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976715659, database: /Root, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: /Root/ydb_ut_tenant waiting... 2025-06-25T14:44:54.297860Z node 12 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[12:7519898011833325651:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:54.297959Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/ydb_ut_tenant/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:44:54.362885Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:54.363007Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:54.365700Z node 10 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 12 Cookie 12 2025-06-25T14:44:54.366351Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:44:54.739635Z node 10 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 1750862694784, transactions count in step: 1, at schemeshard: 72057594046644480 2025-06-25T14:44:54.742003Z node 10 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976715659:0 2025-06-25T14:44:54.742175Z node 10 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 281474976715659, publications: 1, subscribers: 1 2025-06-25T14:44:54.743668Z node 10 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046644480, txId: 281474976715659, subscribers: 1 2025-06-25T14:44:55.306014Z node 12 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:44:56.459884Z node 10 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_table.cpp:423: TCreateTable Propose, path: /Root/ydb_ut_tenant/Table-1, opId: 281474976715660:0, at schemeshard: 72057594046644480 2025-06-25T14:44:56.461169Z node 10 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 281474976715660:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-25T14:44:56.461203Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:44:56.463061Z node 10 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976715660, database: /Root/ydb_ut_tenant, subject: , status: StatusAccepted, operation: CREATE TABLE, path: /Root/ydb_ut_tenant/Table-1 2025-06-25T14:44:56.546621Z node 10 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 1750862696590, transactions count in step: 1, at schemeshard: 72057594046644480 2025-06-25T14:44:56.563577Z node 10 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976715660:0 2025-06-25T14:44:56.619047Z node 10 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 12 2025-06-25T14:44:56.619493Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-25T14:44:58.832668Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7519898028740118827:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:58.832727Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017f8/r3tmp/tmpqIg6d7/pdisk_1.dat 2025-06-25T14:44:59.032864Z node 13 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:59.037262Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:59.037359Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:59.044293Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 5760, node 13 2025-06-25T14:44:59.185293Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:59.185322Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:59.185327Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:59.185462Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:21511 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:59.522683Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:59.838046Z node 13 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:45:02.403471Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) >> YdbLogStore::AlterLogStore [GOOD] >> TxUsage::Offsets_Cannot_Be_Promoted_When_Reading_In_A_Transaction_Table [GOOD] >> IndexBuildTest::RejectsCreate >> VectorIndexBuildTest::TTxReply_DoExecute_Throws ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> TGRpcNewCoordinationClient::BasicMethods [GOOD] Test command err: 2025-06-25T14:44:40.184646Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897950439482434:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:40.184721Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017eb/r3tmp/tmp64zJcX/pdisk_1.dat 2025-06-25T14:44:40.683509Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:40.683607Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:40.686626Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:40.698455Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:44:40.748989Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 65501, node 1 2025-06-25T14:44:40.881460Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:40.881479Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:40.881489Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:40.881601Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:44:41.220516Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:9107 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:41.335886Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:45.014832Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519897974338032121:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:45.014916Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017eb/r3tmp/tmpMPO2jh/pdisk_1.dat 2025-06-25T14:44:45.228790Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 6140, node 4 2025-06-25T14:44:45.267437Z node 4 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 4 Type# 268639257 2025-06-25T14:44:45.317861Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:45.317885Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:45.317899Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:45.318043Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:44:45.348906Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:45.348981Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:45.354911Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:28613 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:45.576882Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:46.050003Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:44:48.020199Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519897987222934991:2300], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:48.020728Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519897987222934983:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:48.020818Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:48.024891Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:44:48.053001Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519897987222934997:2301], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:44:48.117575Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519897987222935074:2682] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:44:50.107415Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519897997301592998:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:50.109365Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017eb/r3tmp/tmpa6soTt/pdisk_1.dat 2025-06-25T14:44:50.289349Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:50.305586Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:50.305670Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:50.322217Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 29765, node 7 2025-06-25T14:44:50.414734Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:50.414760Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:50.414767Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:50.414935Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:31496 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:50.727568Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:51.128475Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:44:53.465090Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519898010186495917:2298], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:53.465211Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:53.499916Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:44:53.680379Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519898010186496075:2308], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:53.680470Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:53.680689Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519898010186496080:2311], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:53.684076Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:44:53.710107Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7519898010186496082:2312], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-25T14:44:53.789384Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7519898010186496152:2792] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017eb/r3tmp/tmpjlx1XG/pdisk_1.dat 2025-06-25T14:44:55.589364Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7519898016753320789:2148];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:55.673480Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:44:55.755149Z node 10 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:55.774183Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:55.774295Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:55.779496Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 5372, node 10 2025-06-25T14:44:56.004751Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:56.004776Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:56.004784Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:56.004919Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9035 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:56.331087Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:56.489241Z node 10 :TX_PROXY ERROR: schemereq.cpp:1096: Actor# [10:7519898021048288901:2599] txid# 281474976715658, Access denied for bad@builtin on path /Root, with access CreateTable 2025-06-25T14:44:56.489411Z node 10 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [10:7519898021048288901:2599] txid# 281474976715658, issues: { message: "Access denied for bad@builtin on path /Root" issue_code: 200000 severity: 1 } 2025-06-25T14:44:56.589535Z node 10 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:45:00.449043Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7519898039976645857:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:45:00.449207Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017eb/r3tmp/tmpu4Fcdq/pdisk_1.dat 2025-06-25T14:45:00.645469Z node 13 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 5208, node 13 2025-06-25T14:45:00.760516Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:45:00.760546Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:45:00.760562Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:45:00.760751Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:45:00.774257Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:45:00.774358Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:45:00.777723Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:63609 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:45:01.113410Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:45:01.193580Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateKesus, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_kesus.cpp:30) 2025-06-25T14:45:01.465275Z node 13 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; >> TRegisterNodeOverLegacyService::ServerWithCertVerification_ClientWithCorrectCerts [GOOD] >> TRegisterNodeOverLegacyService::ServerWithCertVerification_ClientProvidesEmptyClientCerts >> TGRpcClientLowTest::BiStreamPing [GOOD] >> TGRpcClientLowTest::BiStreamCancelled ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> YdbLogStore::AlterLogStore [GOOD] Test command err: 2025-06-25T14:44:40.425786Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897952318929218:2076];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:40.425964Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017df/r3tmp/tmpfczaby/pdisk_1.dat 2025-06-25T14:44:41.030804Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 10362, node 1 2025-06-25T14:44:41.235867Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:41.235953Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:41.239070Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:41.239091Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:41.239097Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:41.239192Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:44:41.242070Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:44:41.463142Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:5418 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:41.651279Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:43.686489Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) SUCCESS 3 rows in 0.014932s 2025-06-25T14:44:43.836017Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897965203832270:2305], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:43.836019Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897965203832281:2308], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:43.836106Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:43.839339Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:44:43.861039Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519897965203832284:2309], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-25T14:44:43.961519Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519897965203832358:2794] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:44:44.493606Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710661. Ctx: { TraceId: 01jykrrqnt6sw5m6hq9a3qypvs, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OTczYWZjZWMtMWVkNjE4Y2YtYTRiNjM3ZWQtYWM4ZTBmMzQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root SUCCESS count returned 3 rows 2025-06-25T14:44:45.943093Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519897973385890540:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:45.943139Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017df/r3tmp/tmpVPpEwV/pdisk_1.dat 2025-06-25T14:44:46.071369Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:46.093997Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:46.094065Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:46.099068Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 24036, node 4 2025-06-25T14:44:46.183491Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:46.183511Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:46.183518Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:46.183642Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:12829 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:46.398462Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... TClient is connected to server localhost:12829 2025-06-25T14:44:46.968845Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:44:48.822972Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:44:48.965282Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_table.cpp:506: TAlterTable Propose, path: Root/Foo/TimestampIndex/indexImplTable, pathId: , opId: 281474976715659:0, at schemeshard: 72057594046644480 2025-06-25T14:44:48.965437Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 281474976715659:1, propose status:StatusNameConflict, reason: Check failed: path: '/Root/Foo/TimestampIndex/indexImplTable', error: path is not a common path (id: [OwnerId: 72057594046644480, LocalPathId: 4], type: EPathTypeTable, state: EPathStateNoChanges), at schemeshard: 72057594046644480 2025-06-25T14:44:48.967296Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976715659, database: /Root, subject: , status: StatusNameConflict, reason: Check failed: path: '/Root/Foo/TimestampIndex/indexImplTable', error: path is not a common path (id: [OwnerId: 72057594046644480, LocalPathId: 4], type: EPathTypeTable, state: EPathStateNoChanges), operation: ALTER TABLE, path: Root/Foo/TimestampIndex/indexImplTable 2025-06-25T14:44:48.967487Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519897986270793823:2941] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/Foo/TimestampIndex/indexImplTable\', error: path is not a common path (id: [OwnerId: 72057594046644480, LocalPathId: 4], type: EPathTypeTable, state: EPathStateNoChanges)" severity: 1 } Error 128: Administrative access denied TClient::Ls request: /Root/Foo/TimestampIndex/indexImplTable TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "indexImplTable" PathId: 4 SchemeshardId: 7205759404664 ... log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519897997143379315:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:50.788667Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017df/r3tmp/tmpGhsL6U/pdisk_1.dat 2025-06-25T14:44:51.167686Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:51.204814Z node 7 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 7 Type# 268639257 TServer::EnableGrpc on GrpcPort 3109, node 7 2025-06-25T14:44:51.250707Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:51.250792Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:51.273303Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:44:51.347009Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:51.347029Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:51.347034Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:51.347134Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26797 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:51.615942Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:51.706690Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:44:51.935856Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:44:56.024553Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7519898022236177034:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:56.024635Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017df/r3tmp/tmpQNbR2C/pdisk_1.dat 2025-06-25T14:44:56.275641Z node 10 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:56.304232Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:56.304424Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:56.309292Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 28707, node 10 2025-06-25T14:44:56.405693Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:56.405717Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:56.405726Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:56.405889Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23815 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:56.678705Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:56.766060Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:44:56.907692Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:44:57.089146Z node 10 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:44:57.095750Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710759:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:383) 2025-06-25T14:45:00.767551Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7519898037705264754:2076];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:45:00.767612Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017df/r3tmp/tmpzTwEEO/pdisk_1.dat 2025-06-25T14:45:00.982115Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:45:00.982188Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:45:00.987182Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:45:00.993902Z node 13 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 26510, node 13 2025-06-25T14:45:01.082211Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:45:01.082232Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:45:01.082240Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:45:01.082394Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25440 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:45:01.371183Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... >> TxUsage::Offsets_Cannot_Be_Promoted_When_Reading_In_A_Transaction_Query >> VectorIndexBuildTest::CreateAndDrop >> VectorIndexBuildTest::RecreatedColumns [GOOD] >> VectorIndexBuildTest::PrefixedDuplicates >> TxUsage::WriteToTopic_Demo_23_RestartBeforeCommit_Query >> YdbYqlClient::RetryOperationAsync [GOOD] >> YdbYqlClient::QueryLimits >> VectorIndexBuildTest::TTxReply_DoExecute_Throws [GOOD] >> VectorIndexBuildTest::TTxProgress_Throws >> IndexBuildTest::RejectsCreate [GOOD] >> IndexBuildTest::RejectsDropIndex >> VectorIndexBuildTest::ServerLessDB-smallScanBuffer-false >> TTableProfileTests::OverwriteCachingPolicy [GOOD] >> VectorIndexBuildTest::SimpleDuplicates [GOOD] >> VectorIndexBuildTest::TTxInit_Throws >> TGRpcYdbTest::DeleteFromAfterCreate [GOOD] >> YdbTableBulkUpsert::RetryOperation [GOOD] >> TGRpcAuthentication::InvalidPassword [GOOD] >> TGRpcAuthentication::DisableLoginAuthentication >> IndexBuildTest::RejectsDropIndex [GOOD] >> IndexBuildTest::RejectsCancel >> YdbOlapStore::LogLast50 [GOOD] >> YdbOlapStore::LogGrepNonExisting >> VectorIndexBuildTest::TTxProgress_Throws [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> TTableProfileTests::OverwriteCachingPolicy [GOOD] Test command err: 2025-06-25T14:44:30.570725Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897908430082008:2239];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:30.570784Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00180c/r3tmp/tmpNVSJ4d/pdisk_1.dat 2025-06-25T14:44:30.854251Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:30.893830Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 24509, node 1 2025-06-25T14:44:30.935892Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:30.936033Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:30.961357Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:30.961377Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:30.961384Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:30.961492Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:44:30.963498Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:15000 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:31.256726Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... TClient is connected to server localhost:15000 2025-06-25T14:44:31.569148Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:44:31.572185Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateSubDomain, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_subdomain.cpp:259) waiting... 2025-06-25T14:44:31.596761Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:32.224106Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:32.224173Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:32.253479Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 3 Cookie 3 2025-06-25T14:44:32.254918Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:15000 2025-06-25T14:44:32.642125Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:44:33.135454Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:15000 TClient::Ls request: /Root/ydb_ut_tenant/table-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "table-1" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710660 CreateStep: 1750862673000 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "table-1" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 ... (TRUNCATED) 2025-06-25T14:44:33.326539Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) TClient is connected to server localhost:15000 TClient::Ls request: /Root/ydb_ut_tenant/table-2 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "table-2" PathId: 4 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710661 CreateStep: 1750862673630 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "table-2" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 ... (TRUNCATED) 2025-06-25T14:44:33.955881Z node 1 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 3 2025-06-25T14:44:33.958596Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-25T14:44:36.591842Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519897933422820266:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:36.591894Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00180c/r3tmp/tmp2RU98g/pdisk_1.dat 2025-06-25T14:44:36.814488Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:36.841347Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:36.841451Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:36.845936Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 26174, node 4 2025-06-25T14:44:36.857206Z node 4 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 4 Type# 268639257 2025-06-25T14:44:36.893274Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:36.893291Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:36.893297Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:36.893383Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2215 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:37.215620Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ... hVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "table-4" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 ... (TRUNCATED) 2025-06-25T14:44:57.626385Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:44:58.162410Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[12:7519898008136254111:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:58.162502Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/ydb_ut_tenant/.metadata/initialization/migrations;error=timeout; TClient is connected to server localhost:23676 TClient::Ls request: /Root/ydb_ut_tenant/table-5 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "table-5" PathId: 7 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715664 CreateStep: 1750862697940 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "table-5" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 ... (TRUNCATED) 2025-06-25T14:44:58.460877Z node 10 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 12 2025-06-25T14:44:58.480753Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-25T14:45:00.883744Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7519898037633079246:2076];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:45:00.883843Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00180c/r3tmp/tmpoR2VOk/pdisk_1.dat 2025-06-25T14:45:01.078803Z node 13 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:45:01.109489Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:45:01.109632Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:45:01.117500Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 4031, node 13 2025-06-25T14:45:01.215608Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:45:01.215635Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:45:01.215647Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:45:01.215803Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:22464 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:45:01.672003Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:45:01.890854Z node 13 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:22464 2025-06-25T14:45:02.171105Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateSubDomain, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_subdomain.cpp:259) waiting... 2025-06-25T14:45:02.212858Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:45:02.720374Z node 15 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[15:7519898048022582779:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:45:02.725040Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(15, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:45:02.725190Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(15, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:45:02.720457Z node 15 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/ydb_ut_tenant/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:45:02.734375Z node 13 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 15 Cookie 15 2025-06-25T14:45:02.744333Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(15, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:22464 2025-06-25T14:45:03.260205Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:03.733003Z node 15 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:22464 TClient::Ls request: /Root/ydb_ut_tenant/table-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "table-1" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715660 CreateStep: 1750862703610 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "table-1" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 ... (TRUNCATED) 2025-06-25T14:45:04.319167Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) TClient is connected to server localhost:22464 TClient::Ls request: /Root/ydb_ut_tenant/table-2 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "table-2" PathId: 4 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715661 CreateStep: 1750862704710 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "table-2" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 ... (TRUNCATED) 2025-06-25T14:45:05.315571Z node 13 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 15 2025-06-25T14:45:05.319440Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(15, (0,0,0,0)) VolatileState: Connected -> Disconnected ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> TGRpcYdbTest::DeleteFromAfterCreate [GOOD] Test command err: 2025-06-25T14:44:43.372697Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897967341438790:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:43.372738Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017ce/r3tmp/tmpDySLSs/pdisk_1.dat 2025-06-25T14:44:43.823723Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:43.866268Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:43.866362Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:43.873284Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 32550, node 1 2025-06-25T14:44:43.976856Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:43.976882Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:43.976891Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:43.976996Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24270 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:44.307613Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:44.382436Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519897971636407011:2605] txid# 281474976710658, issues: { message: "Path does not exist" issue_code: 200200 severity: 1 } 2025-06-25T14:44:44.401664Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:44:47.399688Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519897980869542229:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:47.399860Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017ce/r3tmp/tmpSkQOp5/pdisk_1.dat 2025-06-25T14:44:47.623965Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:47.649600Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:47.649660Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 8461, node 4 2025-06-25T14:44:47.665528Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:44:47.862651Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:47.862694Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:47.862704Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:47.862846Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:62571 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:48.118162Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:48.224081Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:44:48.439831Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:44:48.448496Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:44:50.450994Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519897993754445551:2316], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:50.451011Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519897993754445543:2313], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:50.451092Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:50.454716Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715660:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:44:50.481278Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519897993754445557:2317], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715660 completed, doublechecking } 2025-06-25T14:44:50.581601Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519897993754445632:3051] txid# 281474976715661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 11], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:44:51.513565Z node 4 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jykrry4g7fm9hmjp6adz3pjj, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=NTY5YmMyYTEtMzVkMmJmZDgtYjVjMWNkZjEtNDIwNDk1YWM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:44:51.528976Z node 4 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715663. Ctx: { TraceId: 01jykrry4g7fm9hmjp6adz3pjj, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=NTY5YmMyYTEtMzVkMmJmZDgtYjVjMWNkZjEtNDIwNDk1YWM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:44:51.758178Z node 4 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715664. Ctx: { TraceId: 01jykrrz75102tvj7rhx9p638f, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=YWI0MjhjOGEtYzU5MTE5YTYtZjEyYzAxZmUtYmQ4NDM5NGM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:44:53.631000Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519898006504358850:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:53.636380Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017ce/r3tmp/tmpZDxurA/pdisk_1.dat 2025-06-25T14:44:53.932004Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:53.970870Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:53.970970Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> ... Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:58.932246Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:6613 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:59.217646Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:59.658003Z node 10 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:45:02.113143Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7519898048817575949:2295], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:02.113158Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7519898048817575956:2298], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:02.113232Z node 10 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:02.116773Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:45:02.151293Z node 10 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [10:7519898048817575963:2299], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:45:02.236928Z node 10 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [10:7519898048817576038:2677] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:45:02.366511Z node 10 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [10:7519898048817576067:2309], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:1:1: Error: At function: KiReadTable!
:1:1: Error: Cannot find table 'db.[Root/NotFound]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:45:02.366808Z node 10 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=10&id=NTc0YmFmZWUtNmNiZmJiZjUtMzU5MjNiY2QtZDUxM2Y2ZWQ=, ActorId: [10:7519898048817575933:2293], ActorState: ExecuteState, TraceId: 01jykrs9r596q1ag6kt0p3mxzf, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:45:04.037901Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7519898054120786081:2203];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017ce/r3tmp/tmpcEGyDx/pdisk_1.dat 2025-06-25T14:45:04.150151Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:45:04.221709Z node 13 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:45:04.247292Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:45:04.247397Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:45:04.253851Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 17533, node 13 2025-06-25T14:45:04.385044Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:45:04.385067Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:45:04.385077Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:45:04.385242Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9034 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:45:04.758709Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:45:04.900802Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:04.992024Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:05.032730Z node 13 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:45:07.533480Z node 13 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [13:7519898067005689074:2309], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:07.533487Z node 13 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [13:7519898067005689066:2306], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:07.533541Z node 13 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:07.536948Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715660:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:45:07.559806Z node 13 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [13:7519898067005689080:2310], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715660 completed, doublechecking } 2025-06-25T14:45:07.663874Z node 13 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [13:7519898067005689153:2893] txid# 281474976715661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:45:07.766925Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jykrseta86r6fdkm2mbrwcpc, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=YmE1NDkxZDQtNjZhYzA3ODktYTdjYzYxODUtOWFlODQzOGM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:45:07.872709Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715663. Ctx: { TraceId: 01jykrsf1x9vkc95pv4np6fg3x, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=YmE1NDkxZDQtNjZhYzA3ODktYTdjYzYxODUtOWFlODQzOGM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> YdbTableBulkUpsert::RetryOperation [GOOD] Test command err: 2025-06-25T14:44:40.304394Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897951885491396:2147];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017ef/r3tmp/tmp8IhBF9/pdisk_1.dat 2025-06-25T14:44:40.535896Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:44:40.708796Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:40.708894Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:40.724491Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:40.744580Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:44:40.777139Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 5642, node 1 2025-06-25T14:44:41.014955Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:41.014979Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:41.014990Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:41.015107Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8408 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-25T14:44:41.304551Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:41.351092Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:43.178662Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) SUCCESS 3 rows in 0.017431s 2025-06-25T14:44:43.725015Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897964770396129:2401], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:43.725097Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897964770396121:2398], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:43.725227Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:43.729002Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:44:43.762708Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519897964770396135:2402], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-25T14:44:43.821564Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519897964770396236:4210] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:44:44.488946Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jykrrqjb2yx0cgsv9ag65mcp, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YzVjNmJlOWQtODAzMDM3MzUtYzk5OTA0OGUtNGFlM2M2MWY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root SUCCESS count returned 3 rows 2025-06-25T14:44:45.916633Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519897975322933723:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:45.921112Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017ef/r3tmp/tmpB1HApc/pdisk_1.dat 2025-06-25T14:44:46.122665Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:46.140876Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:46.140953Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:46.150194Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 12970, node 4 2025-06-25T14:44:46.173044Z node 4 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 4 Type# 268639257 2025-06-25T14:44:46.348853Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:46.348876Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:46.348882Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:46.349001Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14906 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:46.618723Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:46.965310Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:44:48.876692Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664)
: Error: Bulk upsert to table '/Root/ui8' Only async-indexed tables are supported by BulkUpsert
: Error: Bulk upsert to table '/Root/ui8/Value_index/indexImplTable' unknown table 2025-06-25T14:44:50.544460Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519897993469485869:2078];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:50.578535Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017ef/r3tmp/tmpscXAJS/pdisk_1.dat 2025-06-25T14:44:50.802888Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:50.822097Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:50.822173Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:50.829254Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:44:50.844893Z node 7 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 7 Type# 268639257 TServer::EnableGrpc on GrpcPort 4973, node 7 2025-06-25T14:44:50.980890Z node 7 :NET_CLASSIF ... ;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017ef/r3tmp/tmpBc2Bia/pdisk_1.dat 2025-06-25T14:44:58.605727Z node 10 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:58.624281Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:58.624747Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:58.629214Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 7403, node 10 2025-06-25T14:44:58.704502Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:58.704527Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:58.704540Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:58.704707Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4709 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:58.988796Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:59.466978Z node 10 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:45:01.622676Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) Injecting ABORTED 10 times Result: ABORTED Injecting ABORTED 6 times Result: ABORTED Injecting ABORTED 5 times Result: SUCCESS Injecting ABORTED 3 times Result: SUCCESS Injecting ABORTED 0 times Result: SUCCESS Injecting OVERLOADED 10 times Result: OVERLOADED Injecting OVERLOADED 6 times Result: OVERLOADED Injecting OVERLOADED 5 times Result: SUCCESS Injecting OVERLOADED 3 times Result: SUCCESS Injecting OVERLOADED 0 times Result: SUCCESS Injecting CLIENT_RESOURCE_EXHAUSTED 10 times Result: CLIENT_RESOURCE_EXHAUSTED Injecting CLIENT_RESOURCE_EXHAUSTED 6 times Result: CLIENT_RESOURCE_EXHAUSTED Injecting CLIENT_RESOURCE_EXHAUSTED 5 times Result: SUCCESS Injecting CLIENT_RESOURCE_EXHAUSTED 3 times Result: SUCCESS Injecting CLIENT_RESOURCE_EXHAUSTED 0 times Result: SUCCESS Injecting UNAVAILABLE 10 times Result: UNAVAILABLE Injecting UNAVAILABLE 6 times Result: UNAVAILABLE Injecting UNAVAILABLE 5 times Result: SUCCESS Injecting UNAVAILABLE 3 times Result: SUCCESS Injecting UNAVAILABLE 0 times Result: SUCCESS Injecting BAD_SESSION 10 times Result: BAD_SESSION Injecting BAD_SESSION 6 times Result: BAD_SESSION Injecting BAD_SESSION 5 times Result: SUCCESS Injecting BAD_SESSION 3 times Result: SUCCESS Injecting BAD_SESSION 0 times Result: SUCCESS Injecting SESSION_BUSY 10 times Result: SESSION_BUSY Injecting SESSION_BUSY 6 times Result: SESSION_BUSY Injecting SESSION_BUSY 5 times Result: SUCCESS Injecting SESSION_BUSY 3 times Result: SUCCESS Injecting SESSION_BUSY 0 times Result: SUCCESS Injecting NOT_FOUND 10 times Result: NOT_FOUND Injecting NOT_FOUND 6 times Result: NOT_FOUND Injecting NOT_FOUND 5 times Result: SUCCESS Injecting NOT_FOUND 3 times Result: SUCCESS Injecting NOT_FOUND 0 times Result: SUCCESS Injecting UNDETERMINED 10 times Result: UNDETERMINED Injecting UNDETERMINED 6 times Result: UNDETERMINED Injecting UNDETERMINED 5 times Result: SUCCESS Injecting UNDETERMINED 3 times Result: SUCCESS Injecting UNDETERMINED 0 times Result: SUCCESS Injecting TRANSPORT_UNAVAILABLE 10 times Result: TRANSPORT_UNAVAILABLE Injecting TRANSPORT_UNAVAILABLE 6 times Result: TRANSPORT_UNAVAILABLE Injecting TRANSPORT_UNAVAILABLE 5 times Result: SUCCESS Injecting TRANSPORT_UNAVAILABLE 3 times Result: SUCCESS Injecting TRANSPORT_UNAVAILABLE 0 times Result: SUCCESS 2025-06-25T14:45:03.901910Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7519898051702104075:2076];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:45:03.901958Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017ef/r3tmp/tmpG9JckD/pdisk_1.dat 2025-06-25T14:45:04.095288Z node 13 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:45:04.115562Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:45:04.115654Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:45:04.121827Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 13503, node 13 2025-06-25T14:45:04.218068Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:45:04.218092Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:45:04.218101Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:45:04.218268Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8557 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:45:04.534559Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:45:04.915961Z node 13 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:45:07.091948Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) Injecting ABORTED 10 times Result: ABORTED Injecting ABORTED 6 times Result: ABORTED Injecting ABORTED 5 times Result: SUCCESS Injecting ABORTED 3 times Result: SUCCESS Injecting ABORTED 0 times Result: SUCCESS Injecting OVERLOADED 10 times Result: OVERLOADED Injecting OVERLOADED 6 times Result: OVERLOADED Injecting OVERLOADED 5 times Result: SUCCESS Injecting OVERLOADED 3 times Result: SUCCESS Injecting OVERLOADED 0 times Result: SUCCESS Injecting CLIENT_RESOURCE_EXHAUSTED 10 times Result: CLIENT_RESOURCE_EXHAUSTED Injecting CLIENT_RESOURCE_EXHAUSTED 6 times Result: CLIENT_RESOURCE_EXHAUSTED Injecting CLIENT_RESOURCE_EXHAUSTED 5 times Result: SUCCESS Injecting CLIENT_RESOURCE_EXHAUSTED 3 times Result: SUCCESS Injecting CLIENT_RESOURCE_EXHAUSTED 0 times Result: SUCCESS Injecting UNAVAILABLE 10 times Result: UNAVAILABLE Injecting UNAVAILABLE 6 times Result: UNAVAILABLE Injecting UNAVAILABLE 5 times Result: SUCCESS Injecting UNAVAILABLE 3 times Result: SUCCESS Injecting UNAVAILABLE 0 times Result: SUCCESS Injecting BAD_SESSION 10 times Result: BAD_SESSION Injecting BAD_SESSION 6 times Result: BAD_SESSION Injecting BAD_SESSION 5 times Result: SUCCESS Injecting BAD_SESSION 3 times Result: SUCCESS Injecting BAD_SESSION 0 times Result: SUCCESS Injecting SESSION_BUSY 10 times Result: SESSION_BUSY Injecting SESSION_BUSY 6 times Result: SESSION_BUSY Injecting SESSION_BUSY 5 times Result: SUCCESS Injecting SESSION_BUSY 3 times Result: SUCCESS Injecting SESSION_BUSY 0 times Result: SUCCESS Injecting NOT_FOUND 10 times Result: NOT_FOUND Injecting NOT_FOUND 6 times Result: NOT_FOUND Injecting NOT_FOUND 5 times Result: SUCCESS Injecting NOT_FOUND 3 times Result: SUCCESS Injecting NOT_FOUND 0 times Result: SUCCESS Injecting UNDETERMINED 10 times Result: UNDETERMINED Injecting UNDETERMINED 6 times Result: UNDETERMINED Injecting UNDETERMINED 5 times Result: SUCCESS Injecting UNDETERMINED 3 times Result: SUCCESS Injecting UNDETERMINED 0 times Result: SUCCESS Injecting TRANSPORT_UNAVAILABLE 10 times Result: TRANSPORT_UNAVAILABLE Injecting TRANSPORT_UNAVAILABLE 6 times Result: TRANSPORT_UNAVAILABLE Injecting TRANSPORT_UNAVAILABLE 5 times Result: SUCCESS Injecting TRANSPORT_UNAVAILABLE 3 times Result: SUCCESS Injecting TRANSPORT_UNAVAILABLE 0 times Result: SUCCESS >> BasicStatistics::NotFullStatisticsDatashard [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index_build/unittest >> VectorIndexBuildTest::TTxProgress_Throws [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:45:06.351133Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:45:06.351229Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:45:06.351270Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:45:06.351309Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:45:06.351353Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:45:06.351385Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:45:06.351440Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:45:06.351536Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:45:06.352275Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:45:06.352657Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:45:06.428785Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:45:06.428849Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:45:06.445730Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:45:06.446152Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:45:06.446316Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:45:06.453807Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:45:06.454146Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:45:06.454707Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:45:06.454944Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:45:06.458067Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:45:06.458232Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:45:06.459041Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:45:06.459084Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:45:06.459174Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:45:06.459209Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:45:06.459239Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:45:06.459309Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:45:06.465712Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:45:06.608260Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:45:06.608457Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:45:06.608610Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:45:06.608645Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:45:06.608808Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:45:06.608875Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:45:06.610642Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:45:06.610785Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:45:06.610927Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:45:06.610983Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:45:06.611049Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:45:06.611076Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:45:06.612491Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:45:06.612545Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:45:06.612582Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:45:06.614166Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:45:06.614207Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:45:06.614262Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:45:06.614304Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:45:06.618034Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:45:06.619774Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:45:06.619975Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:45:06.620897Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:45:06.621030Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:45:06.621085Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:45:06.621393Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:45:06.621451Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:45:06.621712Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:45:06.621820Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:45:06.623931Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:45:06.623985Z node 1 :FLAT_TX_SCHEMESHARD ... it to activation from: 2025-06-25T14:45:09.884236Z node 2 :TX_DATASHARD INFO: datashard_loans.cpp:177: 72075186233409549 Initiating switch from PreOffline to Offline state 2025-06-25T14:45:09.886991Z node 2 :TX_DATASHARD INFO: datashard_impl.h:3310: 72075186233409549 Reporting state Offline to schemeshard 72057594046678944 2025-06-25T14:45:09.887157Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268828683, Sender [2:623:2569], Recipient [2:632:2576]: NKikimr::TEvTablet::TEvFollowerGcApplied Leader for TabletID 72057594046678944 is [2:1074:2989] sender: [2:1136:2058] recipient: [2:15:2062] 2025-06-25T14:45:09.887651Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877760, Sender [2:1135:3039], Recipient [2:632:2576]: NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594046678944 Status: OK ServerId: [2:1137:3040] Leader: 1 Dead: 0 Generation: 3 VersionInfo: } 2025-06-25T14:45:09.887690Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3165: StateWork, processing event TEvTabletPipe::TEvClientConnected 2025-06-25T14:45:09.887782Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5633: Handle TEvStateChanged, at schemeshard: 72057594046678944, message: Source { RawX1: 632 RawX2: 8589937168 } TabletId: 72075186233409549 State: 4 2025-06-25T14:45:09.887857Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186233409549, state: Offline, at schemeshard: 72057594046678944 2025-06-25T14:45:09.889147Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:4 hive 72057594037968897 at ss 72057594046678944 2025-06-25T14:45:09.889307Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269552133, Sender [2:1074:2989], Recipient [2:632:2576]: NKikimrTxDataShard.TEvStateChangedResult TabletId: 72057594046678944 State: 4 2025-06-25T14:45:09.889333Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3137: StateWork, processing event TEvDataShard::TEvStateChangedResult 2025-06-25T14:45:09.889359Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2962: Handle TEvStateChangedResult datashard 72075186233409549 state Offline 2025-06-25T14:45:09.889517Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877763, Sender [2:1135:3039], Recipient [2:632:2576]: NKikimr::TEvTabletPipe::TEvClientDestroyed { TabletId: 72057594046678944 ClientId: [2:1135:3039] ServerId: [2:1137:3040] } 2025-06-25T14:45:09.889548Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3166: StateWork, processing event TEvTabletPipe::TEvClientDestroyed 2025-06-25T14:45:09.889791Z node 2 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 4 TxId_Deprecated: 4 TabletID: 72075186233409549 2025-06-25T14:45:09.889950Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 4 ShardOwnerId: 72057594046678944 ShardLocalIdx: 4, at schemeshard: 72057594046678944 2025-06-25T14:45:09.890194Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 6] was 1 Forgetting tablet 72075186233409549 2025-06-25T14:45:09.890538Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268829696, Sender [2:623:2569], Recipient [2:632:2576]: NKikimr::TEvTablet::TEvTabletDead 2025-06-25T14:45:09.890774Z node 2 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186233409549 2025-06-25T14:45:09.890885Z node 2 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186233409549 2025-06-25T14:45:09.892158Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-25T14:45:09.892211Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 6], at schemeshard: 72057594046678944 2025-06-25T14:45:09.892284Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 5 2025-06-25T14:45:09.894358Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:4 2025-06-25T14:45:09.894408Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:4 tabletId 72075186233409549 2025-06-25T14:45:09.894555Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-25T14:45:09.905921Z node 2 :TX_DATASHARD DEBUG: datashard_loans.cpp:220: 72075186233409550 in PreOffline state HasSharedBobs: 0 SchemaOperations: [ ] OutReadSets count: 0 ChangesQueue size: 0 ChangeExchangeSplit: 1 siblings to be activated: wait to activation from: 2025-06-25T14:45:09.906006Z node 2 :TX_DATASHARD INFO: datashard_loans.cpp:177: 72075186233409550 Initiating switch from PreOffline to Offline state 2025-06-25T14:45:09.908126Z node 2 :TX_DATASHARD INFO: datashard_impl.h:3310: 72075186233409550 Reporting state Offline to schemeshard 72057594046678944 2025-06-25T14:45:09.908239Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268828683, Sender [2:738:2671], Recipient [2:747:2678]: NKikimr::TEvTablet::TEvFollowerGcApplied 2025-06-25T14:45:09.908436Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877760, Sender [2:1152:3055], Recipient [2:747:2678]: NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594046678944 Status: OK ServerId: [2:1153:3056] Leader: 1 Dead: 0 Generation: 3 VersionInfo: } 2025-06-25T14:45:09.908463Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3165: StateWork, processing event TEvTabletPipe::TEvClientConnected 2025-06-25T14:45:09.908532Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5633: Handle TEvStateChanged, at schemeshard: 72057594046678944, message: Source { RawX1: 747 RawX2: 8589937270 } TabletId: 72075186233409550 State: 4 2025-06-25T14:45:09.908574Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186233409550, state: Offline, at schemeshard: 72057594046678944 2025-06-25T14:45:09.909621Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:5 hive 72057594037968897 at ss 72057594046678944 2025-06-25T14:45:09.909727Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269552133, Sender [2:1074:2989], Recipient [2:747:2678]: NKikimrTxDataShard.TEvStateChangedResult TabletId: 72057594046678944 State: 4 2025-06-25T14:45:09.909749Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3137: StateWork, processing event TEvDataShard::TEvStateChangedResult 2025-06-25T14:45:09.909768Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2962: Handle TEvStateChangedResult datashard 72075186233409550 state Offline 2025-06-25T14:45:09.909848Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877763, Sender [2:1152:3055], Recipient [2:747:2678]: NKikimr::TEvTabletPipe::TEvClientDestroyed { TabletId: 72057594046678944 ClientId: [2:1152:3055] ServerId: [2:1153:3056] } 2025-06-25T14:45:09.909866Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3166: StateWork, processing event TEvTabletPipe::TEvClientDestroyed 2025-06-25T14:45:09.910000Z node 2 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 5 TxId_Deprecated: 5 TabletID: 72075186233409550 2025-06-25T14:45:09.910154Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268829696, Sender [2:738:2671], Recipient [2:747:2678]: NKikimr::TEvTablet::TEvTabletDead 2025-06-25T14:45:09.910296Z node 2 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186233409550 2025-06-25T14:45:09.910369Z node 2 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186233409550 Forgetting tablet 72075186233409550 2025-06-25T14:45:09.911383Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 5 ShardOwnerId: 72057594046678944 ShardLocalIdx: 5, at schemeshard: 72057594046678944 2025-06-25T14:45:09.911556Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 7] was 1 2025-06-25T14:45:09.911807Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-25T14:45:09.911830Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 7], at schemeshard: 72057594046678944 2025-06-25T14:45:09.911884Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-06-25T14:45:09.913091Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:5 2025-06-25T14:45:09.913141Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:5 tabletId 72075186233409550 2025-06-25T14:45:09.913210Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-25T14:45:09.965782Z node 2 :BUILD_INDEX DEBUG: schemeshard_build_index__get.cpp:19: TIndexBuilder::TXTYPE_GET_INDEX_BUILD: DoExecute DatabaseName: "/MyRoot" IndexBuildId: 102 2025-06-25T14:45:09.966067Z node 2 :BUILD_INDEX DEBUG: schemeshard_build_index_tx_base.h:104: TIndexBuilder::TXTYPE_GET_INDEX_BUILD: Reply Status: SUCCESS IndexBuild { Id: 102 Issues { message: "Unhandled exception ydb/core/tx/schemeshard/schemeshard_build_index__progress.cpp:1138: Unreachable" severity: 1 } Issues { message: "TShardStatus { ShardIdx: 72057594046678944:4 Status: INVALID UploadStatus: STATUS_CODE_UNSPECIFIED DebugMessage:
: Error: Shard or requested range is empty\n SeqNoRound: 0 Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 } }" severity: 1 } State: STATE_UNSPECIFIED Settings { source_path: "/MyRoot/vectors" index { name: "index1" index_columns: "embedding" global_vector_kmeans_tree_index { } } max_shards_in_flight: 2 ScanSettings { MaxBatchRows: 1 MaxBatchBytes: 8388608 MaxBatchRetries: 50 } } } BUILDINDEX RESPONSE Get: NKikimrIndexBuilder.TEvGetResponse Status: SUCCESS IndexBuild { Id: 102 Issues { message: "Unhandled exception ydb/core/tx/schemeshard/schemeshard_build_index__progress.cpp:1138: Unreachable" severity: 1 } Issues { message: "TShardStatus { ShardIdx: 72057594046678944:4 Status: INVALID UploadStatus: STATUS_CODE_UNSPECIFIED DebugMessage:
: Error: Shard or requested range is empty\n SeqNoRound: 0 Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 } }" severity: 1 } State: STATE_UNSPECIFIED Settings { source_path: "/MyRoot/vectors" index { name: "index1" index_columns: "embedding" global_vector_kmeans_tree_index { } } max_shards_in_flight: 2 ScanSettings { MaxBatchRows: 1 MaxBatchBytes: 8388608 MaxBatchRetries: 50 } } } >> TRegisterNodeOverDiscoveryService::ServerWithCertVerification_ClientProvidesCorruptedPrivatekey [GOOD] >> TRegisterNodeOverDiscoveryService::ServerWithCertVerification_ClientProvidesExpiredCert >> TRegisterNodeOverLegacyService::ServerWithCertVerification_ClientProvidesEmptyClientCerts [GOOD] >> TRegisterNodeOverLegacyService::ServerWithCertVerification_ClientDoesNotProvideCorrectCerts >> KqpBatchDelete::ManyPartitions_3 [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest >> BasicStatistics::NotFullStatisticsDatashard [GOOD] Test command err: 2025-06-25T14:38:46.530349Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:419:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:38:46.530658Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:38:46.530751Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001c89/r3tmp/tmpG5Fk6L/pdisk_1.dat 2025-06-25T14:38:46.836470Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 27188, node 1 2025-06-25T14:38:47.055350Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:38:47.055390Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:38:47.055417Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:38:47.055825Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:38:47.057857Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:38:47.162817Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:47.162963Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:47.177507Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:10584 2025-06-25T14:38:47.716014Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-25T14:38:50.916378Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-25T14:38:50.953968Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:50.954068Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:51.005629Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T14:38:51.008020Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:38:51.216248Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:38:51.256411Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:51.257048Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:51.257539Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:51.257683Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:51.257958Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:51.258037Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:51.258113Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:51.258219Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:51.258296Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:51.467068Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:51.467177Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:51.480769Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:38:51.618233Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:38:51.658800Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-25T14:38:51.658911Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-25T14:38:51.690658Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-25T14:38:51.692444Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-25T14:38:51.692646Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-25T14:38:51.692818Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-25T14:38:51.692863Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-25T14:38:51.692910Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-25T14:38:51.692952Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-25T14:38:51.692998Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-25T14:38:51.694657Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-25T14:38:51.729225Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7949: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-25T14:38:51.729322Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7979: ConnectToSA(), pipe client id: [2:1793:2562], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-25T14:38:51.736210Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1805:2572] 2025-06-25T14:38:51.746554Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1825:2582] 2025-06-25T14:38:51.747021Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1825:2582], schemeshard id = 72075186224037897 2025-06-25T14:38:51.757210Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-25T14:38:51.778077Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-25T14:38:51.778140Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-25T14:38:51.778236Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-25T14:38:51.792833Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:51.800425Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-25T14:38:51.800566Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-25T14:38:51.982675Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-25T14:38:52.154535Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-25T14:38:52.222758Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-25T14:38:52.851465Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:38:53.067118Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2148:3024], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:53.067252Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:53.083205Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:53.421073Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2440:3070], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:53.421166Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:53.421959Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [1:2444:3074]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T14:38:53.422097Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-25T14:38:53.422148Z node 1 :STATISTICS DEBUG: service_impl.cpp:1219: ConnectToSA(), pipe client id = [1:2446:3076] 2025-06-25T14:38:53.422195Z no ... 80 2025-06-25T14:44:17.343690Z node 1 :STATISTICS DEBUG: schemeshard_impl.cpp:7919: Schedule next SendBaseStatsToSA in 30.000000s, at schemeshard: 72057594046644480 2025-06-25T14:44:17.518139Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-25T14:44:17.518211Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-25T14:44:18.844828Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 2, schemeshard count = 1 2025-06-25T14:44:18.845179Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-06-25T14:44:18.845439Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-25T14:44:20.299194Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-25T14:44:20.299249Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-25T14:44:22.852882Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:285: [72075186224037894] EvPropagateTimeout 2025-06-25T14:44:22.867505Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-25T14:44:22.867582Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-25T14:44:25.447865Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 2, schemeshard count = 1 2025-06-25T14:44:25.448029Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-25T14:44:25.448330Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-06-25T14:44:25.460807Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-25T14:44:25.460867Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-25T14:44:27.946913Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-25T14:44:27.946978Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-25T14:44:29.077193Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:285: [72075186224037894] EvPropagateTimeout 2025-06-25T14:44:30.493664Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-25T14:44:30.493742Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-25T14:44:31.839661Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 2, schemeshard count = 1 2025-06-25T14:44:31.840048Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-06-25T14:44:31.840344Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-25T14:44:33.176531Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-25T14:44:33.176597Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-25T14:44:35.653632Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:285: [72075186224037894] EvPropagateTimeout 2025-06-25T14:44:35.664406Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-25T14:44:35.664481Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-25T14:44:38.325785Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 2, schemeshard count = 1 2025-06-25T14:44:38.326181Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-06-25T14:44:38.326462Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-25T14:44:38.338036Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-25T14:44:38.338111Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-25T14:44:40.993027Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-25T14:44:40.993103Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-25T14:44:42.169328Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:285: [72075186224037894] EvPropagateTimeout 2025-06-25T14:44:43.476843Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-25T14:44:43.476924Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-25T14:44:44.648856Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 2, schemeshard count = 1 2025-06-25T14:44:44.648977Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-25T14:44:44.649205Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-06-25T14:44:45.879132Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-25T14:44:45.879193Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-25T14:44:48.220725Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:285: [72075186224037894] EvPropagateTimeout 2025-06-25T14:44:48.231616Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-25T14:44:48.231683Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-25T14:44:50.548193Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 2, schemeshard count = 1 2025-06-25T14:44:50.548617Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-06-25T14:44:50.548921Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-25T14:44:50.572915Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-25T14:44:50.572993Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-25T14:44:53.280696Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-25T14:44:53.280760Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-25T14:44:54.596828Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:285: [72075186224037894] EvPropagateTimeout 2025-06-25T14:44:56.014922Z node 1 :STATISTICS DEBUG: schemeshard_impl.cpp:7949: ResolveSA(), StatisticsAggregatorId=18446744073709551615, at schemeshard: 72057594046644480 2025-06-25T14:44:56.014998Z node 1 :STATISTICS DEBUG: schemeshard_impl.cpp:7961: ConnectToSA(), no StatisticsAggregatorId, at schemeshard: 72057594046644480 2025-06-25T14:44:56.015033Z node 1 :STATISTICS DEBUG: schemeshard_impl.cpp:7992: SendBaseStatsToSA(), no StatisticsAggregatorId, at schemeshard: 72057594046644480 2025-06-25T14:44:56.015068Z node 1 :STATISTICS DEBUG: schemeshard_impl.cpp:7919: Schedule next SendBaseStatsToSA in 30.000000s, at schemeshard: 72057594046644480 2025-06-25T14:44:56.226186Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-25T14:44:56.226259Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-25T14:44:57.611740Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 2, schemeshard count = 1 2025-06-25T14:44:57.612165Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-06-25T14:44:57.612442Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-25T14:44:58.879636Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-25T14:44:58.879716Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-25T14:45:01.194922Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:285: [72075186224037894] EvPropagateTimeout 2025-06-25T14:45:01.205654Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-25T14:45:01.205719Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-25T14:45:03.483643Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 2, schemeshard count = 1 2025-06-25T14:45:03.483827Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-25T14:45:03.484166Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-06-25T14:45:03.495342Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-25T14:45:03.495415Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-25T14:45:03.517792Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:8076: SendBaseStatsToSA(), path count: 2, at schemeshard: 72075186224037897 2025-06-25T14:45:03.517891Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7919: Schedule next SendBaseStatsToSA in 233.000000s, at schemeshard: 72075186224037897 2025-06-25T14:45:03.518134Z node 2 :STATISTICS DEBUG: tx_schemeshard_stats.cpp:21: [72075186224037894] TTxSchemeShardStats::Execute: schemeshard id# 72075186224037897, stats size# 49 ... waiting for TEvSchemeShardStats 2 (done) ... waiting for TEvPropagateStatistics 2025-06-25T14:45:03.536945Z node 2 :STATISTICS DEBUG: tx_schemeshard_stats.cpp:132: [72075186224037894] TTxSchemeShardStats::Complete 2025-06-25T14:45:05.813810Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-25T14:45:05.813916Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-25T14:45:07.001256Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:285: [72075186224037894] EvPropagateTimeout 2025-06-25T14:45:08.431457Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-25T14:45:08.431519Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-25T14:45:09.716095Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 2, schemeshard count = 1 2025-06-25T14:45:09.716515Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 ... waiting for TEvPropagateStatistics (done) 2025-06-25T14:45:09.716932Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 3 ], ReplyToActorId[ [2:12601:7180]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T14:45:09.717551Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-25T14:45:09.720294Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] 2025-06-25T14:45:09.724497Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 3, ReplyToActorId = [2:12601:7180], StatRequests.size() = 1 >> TGRpcClientLowTest::BiStreamCancelled [GOOD] >> VectorIndexBuildTest::TTxInit_Throws [GOOD] |85.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/grpc_streaming/ut/unittest >> TRegisterNodeOverDiscoveryService::ServerWithCertVerification_ClientDoesNotProvideAnyCerts [GOOD] >> TRegisterNodeOverDiscoveryService::ServerWithCertVerification_ClientProvidesCorruptedCert >> TGRpcStreamingTest::ClientNeverWrites >> TGRpcStreamingTest::WritesDoneFromClient >> TGRpcStreamingTest::SimpleEcho >> TGRpcStreamingTest::WriteAndFinishWorks >> SystemView::ShowCreateTableSequences [GOOD] >> SystemView::ShowCreateTablePartitionPolicyIndexTable >> YdbYqlClient::QueryLimits [GOOD] >> YdbYqlClient::QueryStats ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index_build/unittest >> VectorIndexBuildTest::TTxInit_Throws [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:45:00.205444Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:45:00.205526Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:45:00.205555Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:45:00.205602Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:45:00.206328Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:45:00.206359Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:45:00.206426Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:45:00.206913Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:45:00.207608Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:45:00.209514Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:45:00.282892Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:45:00.282962Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:45:00.303773Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:45:00.304153Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:45:00.304294Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:45:00.309685Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:45:00.310928Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:45:00.314382Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:45:00.315875Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:45:00.321677Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:45:00.322938Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:45:00.335614Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:45:00.335728Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:45:00.335896Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:45:00.335958Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:45:00.336018Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:45:00.336123Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:45:00.343363Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:45:00.471628Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:45:00.472785Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:45:00.473752Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:45:00.473795Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:45:00.474905Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:45:00.474979Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:45:00.477471Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:45:00.478234Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:45:00.478401Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:45:00.478514Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:45:00.478557Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:45:00.478595Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:45:00.480298Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:45:00.480364Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:45:00.480408Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:45:00.481619Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:45:00.481654Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:45:00.481709Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:45:00.481750Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:45:00.491051Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:45:00.493180Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:45:00.493381Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:45:00.495445Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:45:00.495590Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:45:00.495651Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:45:00.496841Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:45:00.496926Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:45:00.497141Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:45:00.497230Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:45:00.499630Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:45:00.499676Z node 1 :FLAT_TX_SCHEMESHARD ... :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:45:11.981415Z node 2 :TX_DATASHARD DEBUG: datashard_loans.cpp:220: 72075186233409552 in PreOffline state HasSharedBobs: 0 SchemaOperations: [ ] OutReadSets count: 0 ChangesQueue size: 0 ChangeExchangeSplit: 1 siblings to be activated: wait to activation from: 2025-06-25T14:45:11.981568Z node 2 :TX_DATASHARD INFO: datashard_loans.cpp:177: 72075186233409552 Initiating switch from PreOffline to Offline state 2025-06-25T14:45:11.984667Z node 2 :TX_DATASHARD INFO: datashard_impl.h:3310: 72075186233409552 Reporting state Offline to schemeshard 72057594046678944 2025-06-25T14:45:11.984851Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268828683, Sender [2:1229:3136], Recipient [2:1238:3143]: NKikimr::TEvTablet::TEvFollowerGcApplied Leader for TabletID 72057594046678944 is [2:2762:4639] sender: [2:2821:2058] recipient: [2:15:2062] 2025-06-25T14:45:11.985449Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877760, Sender [2:2820:4686], Recipient [2:1238:3143]: NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594046678944 Status: OK ServerId: [2:2822:4687] Leader: 1 Dead: 0 Generation: 3 VersionInfo: } 2025-06-25T14:45:11.985489Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3165: StateWork, processing event TEvTabletPipe::TEvClientConnected 2025-06-25T14:45:11.985601Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5633: Handle TEvStateChanged, at schemeshard: 72057594046678944, message: Source { RawX1: 1238 RawX2: 8589937735 } TabletId: 72075186233409552 State: 4 2025-06-25T14:45:11.985693Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186233409552, state: Offline, at schemeshard: 72057594046678944 2025-06-25T14:45:11.987268Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:7 hive 72057594037968897 at ss 72057594046678944 2025-06-25T14:45:11.987445Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269552133, Sender [2:2762:4639], Recipient [2:1238:3143]: NKikimrTxDataShard.TEvStateChangedResult TabletId: 72057594046678944 State: 4 2025-06-25T14:45:11.987481Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3137: StateWork, processing event TEvDataShard::TEvStateChangedResult 2025-06-25T14:45:11.987519Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2962: Handle TEvStateChangedResult datashard 72075186233409552 state Offline 2025-06-25T14:45:11.987711Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877763, Sender [2:2820:4686], Recipient [2:1238:3143]: NKikimr::TEvTabletPipe::TEvClientDestroyed { TabletId: 72057594046678944 ClientId: [2:2820:4686] ServerId: [2:2822:4687] } 2025-06-25T14:45:11.987744Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3166: StateWork, processing event TEvTabletPipe::TEvClientDestroyed 2025-06-25T14:45:11.987981Z node 2 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 7 TxId_Deprecated: 7 TabletID: 72075186233409552 2025-06-25T14:45:11.988266Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268829696, Sender [2:1229:3136], Recipient [2:1238:3143]: NKikimr::TEvTablet::TEvTabletDead 2025-06-25T14:45:11.988538Z node 2 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186233409552 2025-06-25T14:45:11.988661Z node 2 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186233409552 2025-06-25T14:45:11.990074Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 7 ShardOwnerId: 72057594046678944 ShardLocalIdx: 7, at schemeshard: 72057594046678944 2025-06-25T14:45:11.990389Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 9] was 1 Forgetting tablet 72075186233409552 2025-06-25T14:45:11.990753Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-25T14:45:11.990798Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 9], at schemeshard: 72057594046678944 2025-06-25T14:45:11.990875Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 5 2025-06-25T14:45:11.993559Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:7 2025-06-25T14:45:11.993647Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:7 tabletId 72075186233409552 2025-06-25T14:45:11.994087Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-25T14:45:12.006069Z node 2 :TX_DATASHARD DEBUG: datashard_loans.cpp:220: 72075186233409553 in PreOffline state HasSharedBobs: 0 SchemaOperations: [ ] OutReadSets count: 0 ChangesQueue size: 0 ChangeExchangeSplit: 1 siblings to be activated: wait to activation from: 2025-06-25T14:45:12.006191Z node 2 :TX_DATASHARD INFO: datashard_loans.cpp:177: 72075186233409553 Initiating switch from PreOffline to Offline state 2025-06-25T14:45:12.008994Z node 2 :TX_DATASHARD INFO: datashard_impl.h:3310: 72075186233409553 Reporting state Offline to schemeshard 72057594046678944 2025-06-25T14:45:12.009140Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268828683, Sender [2:1640:3535], Recipient [2:1649:3542]: NKikimr::TEvTablet::TEvFollowerGcApplied 2025-06-25T14:45:12.009378Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877760, Sender [2:2839:4704], Recipient [2:1649:3542]: NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594046678944 Status: OK ServerId: [2:2840:4705] Leader: 1 Dead: 0 Generation: 3 VersionInfo: } 2025-06-25T14:45:12.009424Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3165: StateWork, processing event TEvTabletPipe::TEvClientConnected 2025-06-25T14:45:12.009536Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5633: Handle TEvStateChanged, at schemeshard: 72057594046678944, message: Source { RawX1: 1649 RawX2: 8589938134 } TabletId: 72075186233409553 State: 4 2025-06-25T14:45:12.009608Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186233409553, state: Offline, at schemeshard: 72057594046678944 2025-06-25T14:45:12.011165Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:8 hive 72057594037968897 at ss 72057594046678944 2025-06-25T14:45:12.011309Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269552133, Sender [2:2762:4639], Recipient [2:1649:3542]: NKikimrTxDataShard.TEvStateChangedResult TabletId: 72057594046678944 State: 4 2025-06-25T14:45:12.011342Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3137: StateWork, processing event TEvDataShard::TEvStateChangedResult 2025-06-25T14:45:12.011364Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2962: Handle TEvStateChangedResult datashard 72075186233409553 state Offline 2025-06-25T14:45:12.011486Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877763, Sender [2:2839:4704], Recipient [2:1649:3542]: NKikimr::TEvTabletPipe::TEvClientDestroyed { TabletId: 72057594046678944 ClientId: [2:2839:4704] ServerId: [2:2840:4705] } 2025-06-25T14:45:12.011518Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3166: StateWork, processing event TEvTabletPipe::TEvClientDestroyed 2025-06-25T14:45:12.011691Z node 2 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 8 TxId_Deprecated: 8 TabletID: 72075186233409553 2025-06-25T14:45:12.011806Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 8 ShardOwnerId: 72057594046678944 ShardLocalIdx: 8, at schemeshard: 72057594046678944 2025-06-25T14:45:12.011993Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 10] was 1 Forgetting tablet 72075186233409553 2025-06-25T14:45:12.012237Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268829696, Sender [2:1640:3535], Recipient [2:1649:3542]: NKikimr::TEvTablet::TEvTabletDead 2025-06-25T14:45:12.012416Z node 2 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186233409553 2025-06-25T14:45:12.012495Z node 2 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186233409553 2025-06-25T14:45:12.013609Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-25T14:45:12.013652Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 10], at schemeshard: 72057594046678944 2025-06-25T14:45:12.013713Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-06-25T14:45:12.015965Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:8 2025-06-25T14:45:12.016017Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:8 tabletId 72075186233409553 2025-06-25T14:45:12.016440Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-25T14:45:12.038271Z node 2 :BUILD_INDEX DEBUG: schemeshard_build_index__get.cpp:19: TIndexBuilder::TXTYPE_GET_INDEX_BUILD: DoExecute DatabaseName: "/MyRoot" IndexBuildId: 102 2025-06-25T14:45:12.038567Z node 2 :BUILD_INDEX DEBUG: schemeshard_build_index_tx_base.h:104: TIndexBuilder::TXTYPE_GET_INDEX_BUILD: Reply Status: SUCCESS IndexBuild { Id: 102 Issues { message: "Init IndexBuild unhandled exception ydb/core/tx/schemeshard/schemeshard_info_types.h:3656: Condition violated: `creationConfig.ParseFromString(row.template GetValue())\'" severity: 1 } State: STATE_DONE Settings { source_path: "/MyRoot/vectors" index { name: "by_embedding" global_vector_kmeans_tree_index { } } max_shards_in_flight: 2 ScanSettings { MaxBatchRows: 1 MaxBatchBytes: 8388608 MaxBatchRetries: 50 } } Progress: 100 } BUILDINDEX RESPONSE Get: NKikimrIndexBuilder.TEvGetResponse Status: SUCCESS IndexBuild { Id: 102 Issues { message: "Init IndexBuild unhandled exception ydb/core/tx/schemeshard/schemeshard_info_types.h:3656: Condition violated: `creationConfig.ParseFromString(row.template GetValue())\'" severity: 1 } State: STATE_DONE Settings { source_path: "/MyRoot/vectors" index { name: "by_embedding" global_vector_kmeans_tree_index { } } max_shards_in_flight: 2 ScanSettings { MaxBatchRows: 1 MaxBatchBytes: 8388608 MaxBatchRetries: 50 } } Progress: 100 } ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> TGRpcClientLowTest::BiStreamCancelled [GOOD] Test command err: 2025-06-25T14:44:46.756811Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897980329298523:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:46.756866Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017b1/r3tmp/tmpN78Pyx/pdisk_1.dat 2025-06-25T14:44:47.253115Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:47.253200Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:47.265836Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:44:47.299119Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:47.313327Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 31246, node 1 2025-06-25T14:44:47.350365Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:47.350389Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:47.350398Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:47.350500Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:30792 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:47.704768Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:47.804710Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:30792 TClient is connected to server localhost:30792 2025-06-25T14:44:48.162795Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:44:49.727449Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897993214201414:2300], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:49.727459Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897993214201424:2303], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:49.727515Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:49.730707Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710660:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:44:49.747291Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519897993214201429:2304], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710660 completed, doublechecking } 2025-06-25T14:44:49.826029Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519897993214201505:2690] txid# 281474976710661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } TClient is connected to server localhost:30792 TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1750862687777 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "\n\016\010\001\020\200\204\002\032\004user \003" EffectiveACL: "\n\016\010\001\020\200\204\002\032\004user \003" PathVersion: 10 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 10 ACLVersion: 1 EffectiveACLVersion: 1 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 2 } ChildrenExist: true } Children { Name: ".metadata" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 281474976710660 CreateStep: 1750862689786 ParentPathId: 1 PathState: EPathStateCreate Owner: "met... (TRUNCATED) 2025-06-25T14:44:52.060685Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519898002176719385:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:52.060741Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017b1/r3tmp/tmpiO6jNq/pdisk_1.dat 2025-06-25T14:44:52.477383Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:52.495518Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:52.495611Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:52.502908Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 62833, node 4 2025-06-25T14:44:52.677013Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:52.677031Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:52.677044Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:52.677171Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25951 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:52.993760Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:53.186094Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:25951 2025-06-25T14:44:57.000479Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519898022032325141:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:57.000554Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017b1/r3tmp/tmpoQdvz7/pdisk_1.dat 2025-06-25T14:44:57.174859Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:57.189883Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:57.189969Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:57.195469Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 11450, node 7 2025-06-25T14:44:57.336066Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable ... ile: (empty maybe) 2025-06-25T14:44:57.336233Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6180 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:57.628533Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... TClient is connected to server localhost:6180 2025-06-25T14:44:58.013507Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:6180 2025-06-25T14:44:58.159013Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:45:00.120323Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519898039212195341:2301], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:00.120326Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519898039212195349:2304], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:00.120400Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:00.123723Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715660:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:45:00.144578Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7519898039212195355:2305], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715660 completed, doublechecking } 2025-06-25T14:45:00.240249Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7519898039212195428:2698] txid# 281474976715661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } TClient is connected to server localhost:6180 TClient::Ls request: Root 2025-06-25T14:45:00.587463Z node 7 :TX_PROXY ERROR: describe.cpp:395: Access denied for user with access DescribeSchema to path Root TClient::Ls response: Status: 128 StatusCode: ERROR Issues { message: "Default error" severity: 1 } SchemeStatus: 12 ErrorReason: "Access denied" 2025-06-25T14:45:02.029056Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7519898047785894948:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:45:02.037331Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017b1/r3tmp/tmpGbB8zy/pdisk_1.dat 2025-06-25T14:45:02.180379Z node 10 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:45:02.201748Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:45:02.201843Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:45:02.208885Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 63760, node 10 2025-06-25T14:45:02.284295Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:45:02.284338Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:45:02.284350Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:45:02.284546Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14026 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:45:02.593608Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:45:02.666771Z node 10 :TICKET_PARSER ERROR: ticket_parser_impl.h:963: Ticket some****oken (BB86510A): Could not find correct token validator 2025-06-25T14:45:02.666900Z node 10 :GRPC_SERVER ERROR: ydb_dummy.cpp:94: Received TEvRefreshTokenResponse, Authenticated = 0 2025-06-25T14:45:07.070863Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7519898066498075550:2185];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:45:07.071267Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017b1/r3tmp/tmpl4xa9p/pdisk_1.dat 2025-06-25T14:45:07.340304Z node 13 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:45:07.360336Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:45:07.360455Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:45:07.372376Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:45:07.397581Z node 13 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 13 Type# 268639257 TServer::EnableGrpc on GrpcPort 30344, node 13 2025-06-25T14:45:07.469149Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:45:07.469168Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:45:07.469174Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:45:07.469300Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23806 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:45:07.890497Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:45:08.081486Z node 13 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; >> TxUsage::WriteToTopic_Demo_30_Query [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/batch_operations/unittest >> KqpBatchDelete::ManyPartitions_3 [GOOD] Test command err: Trying to start YDB, gRPC: 17116, MsgBus: 64423 2025-06-25T14:42:40.738641Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897439259625521:2230];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:40.738847Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000c32/r3tmp/tmpDaEhPO/pdisk_1.dat 2025-06-25T14:42:41.588884Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:42:41.588986Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:42:41.640555Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:42:41.675406Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 17116, node 1 2025-06-25T14:42:41.749377Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:42:42.720967Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:42:42.720992Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:42:42.720997Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:42:42.721181Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:64423 TClient is connected to server localhost:64423 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:42:44.993916Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:42:45.123502Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:45.406533Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:45.537181Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:45.616193Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:45.742415Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519897439259625521:2230];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:45.742554Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:42:45.828293Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897460734463432:2371], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:45.828439Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:47.940878Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:47.963918Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:47.989899Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.013978Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.038957Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.062182Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.091565Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.148583Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897473619365988:2440], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:48.148670Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:48.148874Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897473619365993:2443], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:48.152575Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:42:48.165650Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519897473619365995:2444], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:42:48.223452Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519897473619366046:3432] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:42:49.094487Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:56.548613Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7382: Cannot get console configs 2025-06-25T14:42:56.548650Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded Trying to start YDB, gRPC: 23362, MsgBus: 21588 2025-06-25T14:42:57.782359 ... pe: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... Trying to start YDB, gRPC: 19359, MsgBus: 5311 2025-06-25T14:44:58.943608Z node 12 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[12:7519898030441772653:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:58.943679Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000c32/r3tmp/tmpxGzCmB/pdisk_1.dat 2025-06-25T14:44:59.095655Z node 12 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:59.109203Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:59.109300Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:59.112796Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 19359, node 12 2025-06-25T14:44:59.161497Z node 12 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:59.161518Z node 12 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:59.161530Z node 12 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:59.161685Z node 12 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5311 TClient is connected to server localhost:5311 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:59.942472Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:59.952421Z node 12 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:44:59.963037Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:45:00.051169Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:45:00.276279Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:45:00.371229Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:45:03.943949Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[12:7519898030441772653:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:45:03.944052Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:45:04.329416Z node 12 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [12:7519898056211578039:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:04.329620Z node 12 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:04.391923Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:04.442350Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:04.525045Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:04.572273Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:04.658229Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:04.714624Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:04.808080Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:04.920719Z node 12 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [12:7519898056211578706:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:04.920854Z node 12 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:04.921997Z node 12 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [12:7519898056211578711:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:04.927844Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:45:04.946779Z node 12 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [12:7519898056211578713:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:45:05.040667Z node 12 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [12:7519898060506546066:3435] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:45:06.745448Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... |85.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/grpc_streaming/ut/unittest |85.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/grpc_streaming/ut/unittest >> VectorIndexBuildTest::CreateAndDrop [GOOD] >> VectorIndexBuildTest::CommonDB >> TGRpcAuthentication::DisableLoginAuthentication [GOOD] >> ReadRows::KillTabletDuringRead >> TxUsage::WriteToTopic_Demo_31_Table >> YdbTableBulkUpsert::Nulls [GOOD] >> YdbTableBulkUpsert::NotNulls >> HttpRequest::Probe [GOOD] |85.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/grpc_streaming/ut/unittest >> IndexBuildTest::RejectsCancel [GOOD] >> TGRpcStreamingTest::ClientDisconnects >> TGRpcStreamingTest::WritesDoneFromClient [GOOD] >> TGRpcStreamingTest::WriteAndFinishWorks [GOOD] >> TGRpcStreamingTest::ClientNeverWrites [GOOD] >> TGRpcStreamingTest::SimpleEcho [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index_build/unittest >> IndexBuildTest::RejectsCancel [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:45:06.154133Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:45:06.154219Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:45:06.154275Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:45:06.154309Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:45:06.154348Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:45:06.154375Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:45:06.154440Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:45:06.154514Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:45:06.155191Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:45:06.155566Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:45:06.231110Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:45:06.231169Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:45:06.246934Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:45:06.247321Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:45:06.247468Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:45:06.253100Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:45:06.253402Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:45:06.253955Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:45:06.254210Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:45:06.257663Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:45:06.257834Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:45:06.258950Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:45:06.259002Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:45:06.259161Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:45:06.259214Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:45:06.259252Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:45:06.259342Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:45:06.266104Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:45:06.372903Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:45:06.373087Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:45:06.373223Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:45:06.373262Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:45:06.373447Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:45:06.373529Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:45:06.375666Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:45:06.375829Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:45:06.376020Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:45:06.376088Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:45:06.376130Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:45:06.376162Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:45:06.377968Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:45:06.378041Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:45:06.378113Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:45:06.379445Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:45:06.379472Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:45:06.379517Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:45:06.379547Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:45:06.382341Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:45:06.383747Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:45:06.383904Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:45:06.384679Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:45:06.384791Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:45:06.384838Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:45:06.385099Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:45:06.385142Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:45:06.385294Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:45:06.385373Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:45:06.387106Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:45:06.387147Z node 1 :FLAT_TX_SCHEMESHARD ... max_shards_in_flight: 2 ScanSettings { MaxBatchRows: 1 } } Progress: 100 StartTime { } EndTime { seconds: 30 } } BUILDINDEX RESPONSE Get: NKikimrIndexBuilder.TEvGetResponse Status: SUCCESS IndexBuild { Id: 102 Issues { message: "TShardStatus { ShardIdx: 72057594046678944:2 Status: DONE UploadStatus: STATUS_CODE_UNSPECIFIED DebugMessage:
: Error: Shard or requested range is empty\n SeqNoRound: 1 Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 } }" severity: 1 } Issues { message: "TShardStatus { ShardIdx: 72057594046678944:3 Status: DONE UploadStatus: STATUS_CODE_UNSPECIFIED DebugMessage:
: Error: Shard or requested range is empty\n SeqNoRound: 1 Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 } }" severity: 1 } Issues { message: "TShardStatus { ShardIdx: 72057594046678944:4 Status: DONE UploadStatus: STATUS_CODE_UNSPECIFIED DebugMessage:
: Error: Shard or requested range is empty\n SeqNoRound: 1 Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 } }" severity: 1 } Issues { message: "TShardStatus { ShardIdx: 72057594046678944:5 Status: DONE UploadStatus: STATUS_CODE_UNSPECIFIED DebugMessage:
: Error: Shard or requested range is empty\n SeqNoRound: 1 Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 } }" severity: 1 } Issues { message: "TShardStatus { ShardIdx: 72057594046678944:6 Status: DONE UploadStatus: STATUS_CODE_UNSPECIFIED DebugMessage:
: Error: Shard or requested range is empty\n SeqNoRound: 1 Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 } }" severity: 1 } Issues { message: "TShardStatus { ShardIdx: 72057594046678944:7 Status: DONE UploadStatus: STATUS_CODE_UNSPECIFIED DebugMessage:
: Error: Shard or requested range is empty\n SeqNoRound: 1 Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 } }" severity: 1 } Issues { message: "TShardStatus { ShardIdx: 72057594046678944:8 Status: DONE UploadStatus: STATUS_CODE_UNSPECIFIED DebugMessage:
: Error: Shard or requested range is empty\n SeqNoRound: 1 Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 } }" severity: 1 } Issues { message: "TShardStatus { ShardIdx: 72057594046678944:9 Status: DONE UploadStatus: STATUS_CODE_UNSPECIFIED DebugMessage:
: Error: Shard or requested range is empty\n SeqNoRound: 1 Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 } }" severity: 1 } Issues { message: "TShardStatus { ShardIdx: 72057594046678944:10 Status: DONE UploadStatus: STATUS_CODE_UNSPECIFIED DebugMessage:
: Error: Shard or requested range is empty\n SeqNoRound: 1 Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 } }" severity: 1 } State: STATE_DONE Settings { source_path: "/MyRoot/Table" index { name: "index1" index_columns: "index" global_index { } } max_shards_in_flight: 2 ScanSettings { MaxBatchRows: 1 } } Progress: 100 StartTime { } EndTime { seconds: 30 } } 2025-06-25T14:45:14.455096Z node 3 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:45:14.455288Z node 3 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table" took 204us result status StatusSuccess 2025-06-25T14:45:14.455598Z node 3 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table" PathDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 TableSchemaVersion: 3 TablePartitionVersion: 1 } ChildrenExist: true } Table { Name: "Table" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "index" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 3 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableIndexes { Name: "index1" LocalPathId: 3 Type: EIndexTypeGlobal State: EIndexStateReady KeyColumnNames: "index" SchemaVersion: 2 PathOwnerId: 72057594046678944 DataSize: 0 IndexImplTableDescriptions { } } TableSchemaVersion: 3 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 10 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 11 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:45:14.457442Z node 3 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/index1" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-25T14:45:14.457713Z node 3 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/index1" took 285us result status StatusSuccess 2025-06-25T14:45:14.458471Z node 3 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table/index1" PathDescription { Self { Name: "index1" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeTableIndex CreateFinished: true CreateTxId: 281474976710758 CreateStep: 5000004 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableIndexVersion: 2 } ChildrenExist: true } Children { Name: "indexImplTable" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710758 CreateStep: 5000004 ParentPathId: 3 PathState: EPathStateAlter Owner: "root@builtin" ACL: "" PathSubType: EPathSubTypeSyncIndexImplTable Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 11 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } TableIndex { Name: "index1" LocalPathId: 3 Type: EIndexTypeGlobal State: EIndexStateReady KeyColumnNames: "index" SchemaVersion: 2 PathOwnerId: 72057594046678944 DataSize: 0 IndexImplTableDescriptions { Columns { Name: "index" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "index" KeyColumnNames: "key" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 KeepEraseMarkers: false MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> IndexBuildTest::BaseCase [GOOD] >> IndexBuildTest::CancellationNoTable ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest >> HttpRequest::Probe [GOOD] Test command err: 2025-06-25T14:38:42.549485Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:419:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:38:42.549804Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:38:42.549912Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001cb0/r3tmp/tmpcL7blj/pdisk_1.dat 2025-06-25T14:38:42.848243Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 30167, node 1 2025-06-25T14:38:43.068331Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:38:43.068390Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:38:43.068446Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:38:43.069026Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:38:43.075061Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:38:43.171467Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:43.171597Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:43.186218Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:7287 2025-06-25T14:38:43.703293Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-25T14:38:46.545155Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-25T14:38:46.577992Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:46.578108Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:46.615895Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T14:38:46.618142Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:38:46.833012Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:38:46.868070Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:46.868614Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:46.869223Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:46.869367Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:46.869571Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:46.869666Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:46.869731Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:46.869798Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:46.869846Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:47.057708Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:47.057841Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:47.070834Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:38:47.226347Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:38:47.269802Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-25T14:38:47.269912Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-25T14:38:47.330228Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-25T14:38:47.330442Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-25T14:38:47.330675Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-25T14:38:47.330736Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-25T14:38:47.330790Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-25T14:38:47.330842Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-25T14:38:47.330898Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-25T14:38:47.330966Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-25T14:38:47.331501Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-25T14:38:47.358412Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7949: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-25T14:38:47.358533Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7979: ConnectToSA(), pipe client id: [2:1793:2562], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-25T14:38:47.371062Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1808:2573] 2025-06-25T14:38:47.377657Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1849:2589] 2025-06-25T14:38:47.377837Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1849:2589], schemeshard id = 72075186224037897 2025-06-25T14:38:47.385557Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-25T14:38:47.417978Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-25T14:38:47.418055Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-25T14:38:47.418162Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-25T14:38:47.431057Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:47.438626Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-25T14:38:47.438776Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-25T14:38:47.622797Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-25T14:38:47.743569Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-25T14:38:47.811865Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-25T14:38:48.332245Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:38:48.690514Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2153:3026], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:48.690681Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:48.717968Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715659:0, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T14:38:49.051484Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2293:2831];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:38:49.051662Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2293:2831];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:38:49.051941Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2293:2831];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:38:49.052063Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2293:2831];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:38:49.052182Z node 2 :TX_COLUMNSHARD WARN: ... BUG: tx_response_tablet_distribution.cpp:58: [72075186224037894] TTxResponseTabletDistribution::Execute. Node count = 1 2025-06-25T14:45:13.110150Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:92: [72075186224037894] TTxResponseTabletDistribution::Complete 2025-06-25T14:45:13.110414Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 3, current Round: 0 2025-06-25T14:45:13.111236Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:17338:11224], server id = [2:17343:11229], tablet id = 72075186224037899, status = OK 2025-06-25T14:45:13.111361Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:17338:11224], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-25T14:45:13.111622Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:17339:11225], server id = [2:17344:11230], tablet id = 72075186224037900, status = OK 2025-06-25T14:45:13.111679Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:17339:11225], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-25T14:45:13.113431Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:17340:11226], server id = [2:17346:11232], tablet id = 72075186224037901, status = OK 2025-06-25T14:45:13.113503Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:17340:11226], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-25T14:45:13.113809Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:17341:11227], server id = [2:17345:11231], tablet id = 72075186224037902, status = OK 2025-06-25T14:45:13.113863Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:17341:11227], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-25T14:45:13.114666Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:17342:11228], server id = [2:17348:11234], tablet id = 72075186224037903, status = OK 2025-06-25T14:45:13.114722Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:17342:11228], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-25T14:45:13.115117Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037899 2025-06-25T14:45:13.115995Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037900 2025-06-25T14:45:13.116793Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:17338:11224], server id = [2:17343:11229], tablet id = 72075186224037899 2025-06-25T14:45:13.116843Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-25T14:45:13.117112Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:17339:11225], server id = [2:17344:11230], tablet id = 72075186224037900 2025-06-25T14:45:13.117141Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-25T14:45:13.117611Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037901 2025-06-25T14:45:13.118071Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037902 2025-06-25T14:45:13.118299Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:17340:11226], server id = [2:17346:11232], tablet id = 72075186224037901 2025-06-25T14:45:13.118327Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-25T14:45:13.118584Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:17351:11237], server id = [2:17355:11241], tablet id = 72075186224037904, status = OK 2025-06-25T14:45:13.118650Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:17351:11237], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-25T14:45:13.118880Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:17353:11239], server id = [2:17356:11242], tablet id = 72075186224037905, status = OK 2025-06-25T14:45:13.118925Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:17353:11239], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-25T14:45:13.119422Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037903 2025-06-25T14:45:13.120020Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:17341:11227], server id = [2:17345:11231], tablet id = 72075186224037902 2025-06-25T14:45:13.120050Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-25T14:45:13.120352Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:17357:11243], server id = [2:17359:11245], tablet id = 72075186224037906, status = OK 2025-06-25T14:45:13.120411Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:17357:11243], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-25T14:45:13.120884Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:17358:11244], server id = [2:17360:11246], tablet id = 72075186224037907, status = OK 2025-06-25T14:45:13.120938Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:17358:11244], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-25T14:45:13.121633Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:17342:11228], server id = [2:17348:11234], tablet id = 72075186224037903 2025-06-25T14:45:13.121664Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-25T14:45:13.121839Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037904 2025-06-25T14:45:13.122398Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:17362:11248], server id = [2:17364:11250], tablet id = 72075186224037908, status = OK 2025-06-25T14:45:13.122458Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:17362:11248], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-25T14:45:13.122885Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037905 2025-06-25T14:45:13.123500Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:17351:11237], server id = [2:17355:11241], tablet id = 72075186224037904 2025-06-25T14:45:13.123533Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-25T14:45:13.123851Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037906 2025-06-25T14:45:13.124175Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:17353:11239], server id = [2:17356:11242], tablet id = 72075186224037905 2025-06-25T14:45:13.124202Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-25T14:45:13.124664Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037907 2025-06-25T14:45:13.124991Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:17357:11243], server id = [2:17359:11245], tablet id = 72075186224037906 2025-06-25T14:45:13.125019Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-25T14:45:13.125106Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037908 2025-06-25T14:45:13.125162Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-25T14:45:13.125348Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:27: [72075186224037894] TTxAggregateStatisticsResponse::Execute 2025-06-25T14:45:13.125572Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:118: [72075186224037894] TTxAggregateStatisticsResponse::Complete 2025-06-25T14:45:13.125987Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-25T14:45:13.129843Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:17358:11244], server id = [2:17360:11246], tablet id = 72075186224037907 2025-06-25T14:45:13.129881Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-25T14:45:13.130580Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:17362:11248], server id = [2:17364:11250], tablet id = 72075186224037908 2025-06-25T14:45:13.130620Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-25T14:45:13.131039Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-06-25T14:45:13.173738Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=YjNhMDE2MWEtYzg1OTkxM2EtYmUwNzJlNjktNWQ4YmRiOTc=, TxId: 2025-06-25T14:45:13.173845Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=YjNhMDE2MWEtYzg1OTkxM2EtYmUwNzJlNjktNWQ4YmRiOTc=, TxId: 2025-06-25T14:45:13.174628Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-25T14:45:13.189185Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete force traversal for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-25T14:45:13.189257Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:50: [72075186224037894] TTxFinishTraversal::Complete. Send TEvAnalyzeResponse, OperationId=̟m=3 , ActorId=[1:5776:3841] 2025-06-25T14:45:13.191890Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [1:17404:9492]], StatType[ 2 ], StatRequestsCount[ 1 ] 2025-06-25T14:45:13.192106Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-25T14:45:13.192154Z node 1 :STATISTICS DEBUG: service_impl.cpp:812: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] resolve DatabasePath[ [OwnerId: 72057594046644480, LocalPathId: 2] ] 2025-06-25T14:45:13.218837Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-25T14:45:13.218903Z node 1 :STATISTICS DEBUG: service_impl.cpp:715: [TStatService::QueryStatistics] RequestId[ 1 ], Database[ Root/Database ], TablePath[ /Root/Database/.metadata/_statistics ] 2025-06-25T14:45:13.218955Z node 1 :STATISTICS DEBUG: service_impl.cpp:656: [TStatService::LoadStatistics] QueryId[ 1 ], PathId[ [OwnerId: 72075186224037897, LocalPathId: 4] ], StatType[ 2 ], ColumnTag[ 2 ] 2025-06-25T14:45:13.228092Z node 1 :STATISTICS DEBUG: service_impl.cpp:1152: TEvLoadStatisticsQueryResponse, request id = 1 Answer: '/Root/Database/Table1[Value]=4' >> TGRpcStreamingTest::ReadFinish ------- [TM] {asan, default-linux-x86_64, release} ydb/core/grpc_streaming/ut/unittest >> TGRpcStreamingTest::WritesDoneFromClient [GOOD] Test command err: 2025-06-25T14:45:12.620466Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898089791274696:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:45:12.620536Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017c7/r3tmp/tmpm8H0aH/pdisk_1.dat 2025-06-25T14:45:12.984608Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:45:13.020945Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:45:13.021080Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:45:13.024401Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:45:13.078130Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:227: [0x51f00002a080] stream accepted Name# Session ok# true peer# ipv6:[::1]:50184 2025-06-25T14:45:13.078477Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:301: [0x51f00002a080] facade attach Name# Session actor# [1:7519898094086242473:2258] peer# ipv6:[::1]:50184 2025-06-25T14:45:13.078496Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:325: [0x51f00002a080] facade read Name# Session peer# ipv6:[::1]:50184 2025-06-25T14:45:13.078639Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:353: [0x51f00002a080] read finished Name# Session ok# false data# peer# ipv6:[::1]:50184 2025-06-25T14:45:13.078662Z node 1 :GRPC_SERVER DEBUG: grpc_streaming_ut.cpp:302: Received TEvReadFinished, success = 0 2025-06-25T14:45:13.078690Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:511: [0x51f00002a080] facade finish Name# Session peer# ipv6:[::1]:50184 grpc status# (9) message# Everything is A-OK 2025-06-25T14:45:13.079075Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:268: [0x51f00002a080] stream done notification Name# Session ok# true peer# ipv6:[::1]:50184 2025-06-25T14:45:13.079105Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:547: [0x51f00002a080] stream finished Name# Session ok# true peer# ipv6:[::1]:50184 grpc status# (9) message# Everything is A-OK 2025-06-25T14:45:13.079117Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:580: [0x51f00002a080] deregistering request Name# Session peer# ipv6:[::1]:50184 (finish done) 2025-06-25T14:45:13.079193Z node 1 :GRPC_SERVER DEBUG: grpc_streaming_ut.cpp:312: Received TEvNotifiedWhenDone ------- [TM] {asan, default-linux-x86_64, release} ydb/core/grpc_streaming/ut/unittest >> TGRpcStreamingTest::ClientNeverWrites [GOOD] Test command err: 2025-06-25T14:45:12.621061Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898091890185628:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:45:12.621166Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017b2/r3tmp/tmp7PKhlz/pdisk_1.dat 2025-06-25T14:45:12.981600Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:45:13.039902Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:45:13.039989Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:45:13.044633Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:45:13.078111Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:227: [0x51f00002ae80] stream accepted Name# Session ok# true peer# ipv6:[::1]:55272 2025-06-25T14:45:13.078854Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:301: [0x51f00002ae80] facade attach Name# Session actor# [1:7519898096185153406:2258] peer# ipv6:[::1]:55272 2025-06-25T14:45:13.078883Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:325: [0x51f00002ae80] facade read Name# Session peer# ipv6:[::1]:55272 2025-06-25T14:45:13.078937Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:401: [0x51f00002ae80] facade write Name# Session data# peer# ipv6:[::1]:55272 2025-06-25T14:45:13.079185Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:511: [0x51f00002ae80] facade finish Name# Session peer# ipv6:[::1]:55272 grpc status# (0) message# 2025-06-25T14:45:13.079208Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:456: [0x51f00002ae80] write finished Name# Session ok# true peer# ipv6:[::1]:55272 2025-06-25T14:45:13.079229Z node 1 :GRPC_SERVER DEBUG: grpc_streaming_ut.cpp:187: Received TEvWriteFinished, success = 1 2025-06-25T14:45:13.079418Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:353: [0x51f00002ae80] read finished Name# Session ok# false data# peer# ipv6:[::1]:55272 2025-06-25T14:45:13.079444Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:547: [0x51f00002ae80] stream finished Name# Session ok# true peer# ipv6:[::1]:55272 grpc status# (0) message# 2025-06-25T14:45:13.079484Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:580: [0x51f00002ae80] deregistering request Name# Session peer# ipv6:[::1]:55272 (finish done) 2025-06-25T14:45:13.079615Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:268: [0x51f00002ae80] stream done notification Name# Session ok# true peer# ipv6:[::1]:55272 2025-06-25T14:45:13.080469Z node 1 :GRPC_SERVER DEBUG: grpc_streaming_ut.cpp:181: Received TEvReadFinished, success = 0 2025-06-25T14:45:13.080483Z node 1 :GRPC_SERVER DEBUG: grpc_streaming_ut.cpp:194: Received TEvNotifiedWhenDone ------- [TM] {asan, default-linux-x86_64, release} ydb/core/grpc_streaming/ut/unittest >> TGRpcStreamingTest::WriteAndFinishWorks [GOOD] Test command err: 2025-06-25T14:45:12.620368Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898091891295123:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:45:12.620455Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017ab/r3tmp/tmppupTpE/pdisk_1.dat 2025-06-25T14:45:13.015950Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:45:13.022183Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:45:13.022309Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:45:13.029126Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:45:13.080868Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:227: [0x51f000029280] stream accepted Name# Session ok# true peer# ipv6:[::1]:38344 2025-06-25T14:45:13.081921Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:301: [0x51f000029280] facade attach Name# Session actor# [1:7519898096186262898:2258] peer# ipv6:[::1]:38344 2025-06-25T14:45:13.081951Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:401: [0x51f000029280] facade write Name# Session data# peer# ipv6:[::1]:38344 2025-06-25T14:45:13.082150Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:396: [0x51f000029280] facade write Name# Session data# peer# ipv6:[::1]:38344 grpc status# (0) message# 2025-06-25T14:45:13.082559Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:456: [0x51f000029280] write finished Name# Session ok# true peer# ipv6:[::1]:38344 2025-06-25T14:45:13.082895Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:268: [0x51f000029280] stream done notification Name# Session ok# true peer# ipv6:[::1]:38344 2025-06-25T14:45:13.083038Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:456: [0x51f000029280] write finished Name# Session ok# true peer# ipv6:[::1]:38344 2025-06-25T14:45:13.083088Z node 1 :GRPC_SERVER DEBUG: grpc_streaming_ut.cpp:347: Received TEvWriteFinished, success = 1 2025-06-25T14:45:13.083100Z node 1 :GRPC_SERVER DEBUG: grpc_streaming_ut.cpp:347: Received TEvWriteFinished, success = 1 2025-06-25T14:45:13.083119Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:547: [0x51f000029280] stream finished Name# Session ok# true peer# ipv6:[::1]:38344 grpc status# (0) message# 2025-06-25T14:45:13.083182Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:580: [0x51f000029280] deregistering request Name# Session peer# ipv6:[::1]:38344 (finish done) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/grpc_streaming/ut/unittest >> TGRpcStreamingTest::SimpleEcho [GOOD] Test command err: 2025-06-25T14:45:12.620575Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898088049148523:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:45:12.620631Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017c0/r3tmp/tmpGtutfC/pdisk_1.dat 2025-06-25T14:45:12.981675Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:45:13.032117Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:45:13.032203Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:45:13.033943Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:45:13.099242Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:227: [0x51f00002a080] stream accepted Name# Session ok# true peer# ipv6:[::1]:57678 2025-06-25T14:45:13.100848Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:301: [0x51f00002a080] facade attach Name# Session actor# [1:7519898092344116298:2258] peer# ipv6:[::1]:57678 2025-06-25T14:45:13.100891Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:325: [0x51f00002a080] facade read Name# Session peer# ipv6:[::1]:57678 2025-06-25T14:45:13.101068Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:353: [0x51f00002a080] read finished Name# Session ok# true data# peer# ipv6:[::1]:57678 2025-06-25T14:45:13.104251Z node 1 :GRPC_SERVER DEBUG: grpc_streaming_ut.cpp:142: Received TEvReadFinished, success = 1 2025-06-25T14:45:13.104295Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:401: [0x51f00002a080] facade write Name# Session data# peer# ipv6:[::1]:57678 2025-06-25T14:45:13.104664Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:511: [0x51f00002a080] facade finish Name# Session peer# ipv6:[::1]:57678 grpc status# (0) message# 2025-06-25T14:45:13.105261Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:456: [0x51f00002a080] write finished Name# Session ok# true peer# ipv6:[::1]:57678 2025-06-25T14:45:13.105516Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:268: [0x51f00002a080] stream done notification Name# Session ok# true peer# ipv6:[::1]:57678 2025-06-25T14:45:13.105591Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:547: [0x51f00002a080] stream finished Name# Session ok# true peer# ipv6:[::1]:57678 grpc status# (0) message# 2025-06-25T14:45:13.105641Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:580: [0x51f00002a080] deregistering request Name# Session peer# ipv6:[::1]:57678 (finish done) >> IndexBuildTest::CancellationNoTable [GOOD] >> IndexBuildTest::CancelBuild >> TRegisterNodeOverLegacyService::ServerWithCertVerification_ClientDoesNotProvideCorrectCerts [GOOD] >> VectorIndexBuildTest::ServerLessDB-smallScanBuffer-false [GOOD] >> VectorIndexBuildTest::ServerLessDB-smallScanBuffer-true |85.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/executer_actor/ut/unittest |85.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/executer_actor/ut/unittest |85.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/executer_actor/ut/unittest >> TxUsage::WriteToTopic_Demo_40_Query [GOOD] |85.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/executer_actor/ut/unittest |85.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/executer_actor/ut/unittest >> TxUsage::WriteToTopic_Demo_16_Query [GOOD] |85.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/executer_actor/ut/unittest >> YdbOlapStore::LogNonExistingRequest [GOOD] >> YdbOlapStore::LogNonExistingUserId ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> TRegisterNodeOverLegacyService::ServerWithCertVerification_ClientDoesNotProvideCorrectCerts [GOOD] Test command err: 2025-06-25T14:44:44.252633Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897969997535165:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:44.252760Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017c2/r3tmp/tmp4gyEFD/pdisk_1.dat 2025-06-25T14:44:44.645018Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:44.676696Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:44.676772Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 14171, node 1 2025-06-25T14:44:44.735115Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:44:44.789616Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:44.789649Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:44.789658Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:44.789793Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8072 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:45.116677Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:45.266041Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:44:45.330631Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (B6C6F477) (ipv6:[::1]:60898) has now valid token of root@builtin 2025-06-25T14:44:45.428947Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db /Root, token db , DomainLoginOnly 1 2025-06-25T14:44:45.428972Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-25T14:44:45.428980Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-25T14:44:45.429011Z node 1 :TICKET_PARSER ERROR: ticket_parser_impl.h:963: Ticket **** (0C093832): Could not find correct token validator 2025-06-25T14:44:48.607787Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519897987197331330:2148];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:48.608899Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017c2/r3tmp/tmphL940w/pdisk_1.dat 2025-06-25T14:44:48.759309Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:48.778673Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:48.778740Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:48.785421Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 24577, node 4 2025-06-25T14:44:48.888851Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:48.888893Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:48.888901Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:48.889018Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16130 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:49.117486Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:49.256133Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (B6C6F477) (ipv6:[::1]:39552) has now valid token of root@builtin 2025-06-25T14:44:49.302303Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db /Root, token db , DomainLoginOnly 1 2025-06-25T14:44:49.302330Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-25T14:44:49.302338Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-25T14:44:49.302370Z node 4 :TICKET_PARSER ERROR: ticket_parser_impl.h:963: Ticket **** (0C093832): Could not find correct token validator 2025-06-25T14:44:52.823924Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519898001993945020:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:52.823978Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017c2/r3tmp/tmp8ZtjWM/pdisk_1.dat 2025-06-25T14:44:53.052219Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 27833, node 7 2025-06-25T14:44:53.173255Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:53.173347Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:53.176263Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:44:53.192898Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:53.192927Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:53.192933Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:53.193068Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28702 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:53.493919Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:53.692569Z node 7 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (B6C6F477) (ipv6:[::1]:40598) has now valid token of root@builtin 2025-06-25T14:44:53.800613Z node 7 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db /Root, token db , DomainLoginOnly 1 2025-06-25T14:44:53.800636Z node 7 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-25T14:44:53.800642Z node 7 :TICKET_P ... _to=[0:7307199536658146131:7762515]; 2025-06-25T14:45:02.059489Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017c2/r3tmp/tmpqbsWwl/pdisk_1.dat 2025-06-25T14:45:02.240805Z node 13 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:45:02.265078Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:45:02.265170Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:45:02.270172Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:45:02.279774Z node 13 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 13 Type# 268639257 TServer::EnableGrpc on GrpcPort 9819, node 13 2025-06-25T14:45:02.328384Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:45:02.328407Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:45:02.328416Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:45:02.328579Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:30127 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:45:02.619646Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... Trying to register node Register node result Status { Code: OK } NodeId: 1024 DomainPath: "Root" Expire: 1750869902233375 Nodes { NodeId: 1024 Host: "localhost" Port: 8516 ResolveHost: "localhost" Address: "localhost" Location { DataCenter: "DataCenter" Rack: "Rack" Unit: "Body" } Expire: 1750869902233375 } Nodes { NodeId: 13 Host: "::1" Port: 12001 ResolveHost: "::1" Address: "::1" Location { DataCenterNum: 49 RoomNum: 1 RackNum: 1 BodyNum: 1 DataCenter: "1" Module: "1" Rack: "1" Unit: "1" } } Nodes { NodeId: 14 Host: "::1" Port: 12002 ResolveHost: "::1" Address: "::1" Location { DataCenterNum: 50 RoomNum: 2 RackNum: 2 BodyNum: 2 DataCenter: "2" Module: "2" Rack: "2" Unit: "2" } } Nodes { NodeId: 15 Host: "::1" Port: 12003 ResolveHost: "::1" Address: "::1" Location { DataCenterNum: 51 RoomNum: 3 RackNum: 3 BodyNum: 3 DataCenter: "3" Module: "3" Rack: "3" Unit: "3" } } 2025-06-25T14:45:06.682713Z node 16 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[16:7519898062893039641:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:45:06.682790Z node 16 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017c2/r3tmp/tmpkGb5o0/pdisk_1.dat 2025-06-25T14:45:06.896685Z node 16 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:45:06.919303Z node 16 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(16, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:45:06.919419Z node 16 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(16, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:45:06.927499Z node 16 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(16, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 64684, node 16 2025-06-25T14:45:07.062141Z node 16 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:45:07.062173Z node 16 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:45:07.062182Z node 16 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:45:07.062376Z node 16 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24714 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:45:07.394034Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... Trying to register node Register node result Status { Code: OK } NodeId: 1024 DomainPath: "Root" Expire: 1750869906887807 Nodes { NodeId: 1024 Host: "localhost" Port: 27429 ResolveHost: "localhost" Address: "localhost" Location { DataCenter: "DataCenter" Rack: "Rack" Unit: "Body" } Expire: 1750869906887807 } Nodes { NodeId: 16 Host: "::1" Port: 12001 ResolveHost: "::1" Address: "::1" Location { DataCenterNum: 49 RoomNum: 1 RackNum: 1 BodyNum: 1 DataCenter: "1" Module: "1" Rack: "1" Unit: "1" } } Nodes { NodeId: 17 Host: "::1" Port: 12002 ResolveHost: "::1" Address: "::1" Location { DataCenterNum: 50 RoomNum: 2 RackNum: 2 BodyNum: 2 DataCenter: "2" Module: "2" Rack: "2" Unit: "2" } } Nodes { NodeId: 18 Host: "::1" Port: 12003 ResolveHost: "::1" Address: "::1" Location { DataCenterNum: 51 RoomNum: 3 RackNum: 3 BodyNum: 3 DataCenter: "3" Module: "3" Rack: "3" Unit: "3" } } 2025-06-25T14:45:11.647330Z node 19 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[19:7519898087015249895:2076];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:45:11.647460Z node 19 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017c2/r3tmp/tmpTXv6dZ/pdisk_1.dat 2025-06-25T14:45:11.808414Z node 19 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:45:11.833251Z node 19 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(19, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:45:11.833356Z node 19 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(19, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:45:11.838256Z node 19 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(19, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 17537, node 19 2025-06-25T14:45:11.907235Z node 19 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:45:11.907257Z node 19 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:45:11.907262Z node 19 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:45:11.907390Z node 19 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5922 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:45:12.312620Z node 19 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... Trying to register node 2025-06-25T14:45:12.467688Z node 19 :TICKET_PARSER ERROR: ticket_parser_impl.h:963: Ticket E3A621F0A9EEB0CE700DED537E9A7EB1C6019E000B8B6D14D6190F8F785E2002: Cannot create token from certificate. Client certificate failed verification Register node result Status { Code: ERROR Reason: "Cannot create token from certificate. Client certificate failed verification" } |85.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/executer_actor/ut/unittest >> TGRpcStreamingTest::ClientDisconnects [GOOD] |85.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/executer_actor/ut/unittest |85.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/executer_actor/ut/unittest >> TxUsage::WriteToTopic_Demo_17_Table >> VectorIndexBuildTest::PrefixedDuplicates [GOOD] >> VectorIndexBuildTest::DescriptionIsPersisted-prefixed-true ------- [TM] {asan, default-linux-x86_64, release} ydb/core/grpc_streaming/ut/unittest >> TGRpcStreamingTest::ClientDisconnects [GOOD] Test command err: 2025-06-25T14:45:14.978940Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898099294425619:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:45:14.979126Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001791/r3tmp/tmpkviUy1/pdisk_1.dat 2025-06-25T14:45:15.307609Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:45:15.384185Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:45:15.384301Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:45:15.387298Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:45:15.390647Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:227: [0x51f000029280] stream accepted Name# Session ok# true peer# ipv6:[::1]:34708 2025-06-25T14:45:15.390979Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:301: [0x51f000029280] facade attach Name# Session actor# [1:7519898103589393395:2258] peer# ipv6:[::1]:34708 2025-06-25T14:45:15.391897Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:268: [0x51f000029280] stream done notification Name# Session ok# true peer# ipv6:[::1]:34708 2025-06-25T14:45:15.391955Z node 1 :GRPC_SERVER DEBUG: grpc_streaming_ut.cpp:230: Received TEvNotifiedWhenDone 2025-06-25T14:45:15.392285Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:547: [0x51f000029280] stream finished Name# Session ok# false peer# unknown grpc status# (1) message# Request abandoned 2025-06-25T14:45:15.392330Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:580: [0x51f000029280] deregistering request Name# Session peer# unknown (finish done) >> TGRpcStreamingTest::ReadFinish [GOOD] >> KqpBatchUpdate::ManyPartitions_2 [GOOD] >> TxUsage::Offsets_Cannot_Be_Promoted_When_Reading_In_A_Transaction_Query [GOOD] >> VectorIndexBuildTest::CommonDB [GOOD] >> VectorIndexBuildTest::DescriptionIsPersisted-prefixed-false >> ReadRows::KillTabletDuringRead [GOOD] >> YdbTableBulkUpsert::NotNulls [GOOD] >> YdbTableBulkUpsert::Errors ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/unittest >> TxUsage::WriteToTopic_Demo_40_Query [GOOD] Test command err: 2025-06-25T14:40:54.537677Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519896979921259559:2144];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:40:54.537836Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:40:54.772218Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0019d0/r3tmp/tmpo2XqUG/pdisk_1.dat 2025-06-25T14:40:54.989511Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:40:55.037326Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:40:55.037425Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:40:55.066082Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 32683, node 1 2025-06-25T14:40:55.212966Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/yft8/0019d0/r3tmp/yandexhQe5av.tmp 2025-06-25T14:40:55.212994Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/yft8/0019d0/r3tmp/yandexhQe5av.tmp 2025-06-25T14:40:55.216416Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/yft8/0019d0/r3tmp/yandexhQe5av.tmp 2025-06-25T14:40:55.216533Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:40:55.418894Z INFO: TTestServer started on Port 14553 GrpcPort 32683 2025-06-25T14:40:55.544598Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:14553 PQClient connected to localhost:32683 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:40:55.795249Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:40:55.816925Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:40:55.835749Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-25T14:40:55.846409Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:40:56.062709Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715660, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:40:58.179086Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896997101129398:2299], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:40:58.179340Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:40:58.180706Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896997101129434:2303], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:40:58.196037Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:40:58.236057Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519896997101129436:2304], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715662 completed, doublechecking } 2025-06-25T14:40:58.313934Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519896997101129500:2444] txid# 281474976715663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:40:59.124904Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519896997101129515:2310], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:40:59.133071Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=YzVmNjY4ZmYtYzk1OGQ3NjItN2Q5ZWY5MWUtZWY1NmU3ZmU=, ActorId: [1:7519896997101129395:2297], ActorState: ExecuteState, TraceId: 01jykrhv9p75np4x9hc9ksf2kj, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:40:59.139736Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-25T14:40:59.211405Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:40:59.249124Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:40:59.347848Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); 2025-06-25T14:40:59.535092Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519896979921259559:2144];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:40:59.535152Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; === CheckClustersList. Subcribe to ClusterTracker from [1:7519897001396097089:2624] === CheckClustersList. Ok 2025-06-25T14:41:06.520465Z :WriteToTopic_Demo_3_Table INFO: TTopicSdkTestSetup started 2025-06-25T14:41:06.546175Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:132: new create topic request 2025-06-25T14:41:06.597243Z node 1 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72075186224037893][] pipe [1:7519897031460868383:2719] connected; active server actors: 1 2025-06-25T14:41:06.597502Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1516: [72075186224037893][test-topic] updating configuration. Deleted partitions []. Added partitions [0] 2025-06-25T14:41:06.598313Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:1040: [72075186224037893][test-topic] Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at RB 72075186224037893 2025-06-25T14:41:06.598417Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:138: [72075186224037893][test-topic] BALANCER INIT DONE for test-topic: (0, 72075186224037892) 2025-06-25T14:41:06.599514Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:493: [72075186224037893][test-topic] TEvClientConnected TabletId 72057594046644480, NodeId 1, Generation 2 2025-06-25T14:41:06.612612Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3114: [PQ: 72075186224037892] Handle TEvInterconnect::TEvNodeInfo 2025-06-25T14:41:06.613621Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3107: [PQ: 72075186224037892] Registered with mediator time cast 2025-06-25T14:41:06.613868Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3146: [PQ: 72075186224037892] Transactions request. Fro ... UG: partition_actor.cpp:192: session cookie 1 consumer test-consumer session test-consumer_19_1_7109507684128990330_v1 TopicId: Topic /Root/topic_A in database: Root, partition 0(assignId:1) committing to position 100 prev 96 end 100 by cookie 9 2025-06-25T14:45:14.965771Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:348: Handle TEvRequest topic: 'topic_A' requestId: 2025-06-25T14:45:14.965813Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:2812: [PQ: 72075186224037894] got client message batch for topic 'topic_A' partition 0 2025-06-25T14:45:14.965945Z node 19 :PERSQUEUE DEBUG: partition.cpp:3346: [PQ: 72075186224037894, Partition: 0, State: StateIdle] Topic 'topic_A' partition 0 user test-consumer offset is set to 100 (startOffset 96) session test-consumer_19_1_7109507684128990330_v1 2025-06-25T14:45:14.966092Z node 19 :PERSQUEUE DEBUG: read.h:272: CacheProxy. Passthrough write request to KV 2025-06-25T14:45:14.967037Z node 19 :PERSQUEUE DEBUG: partition_read.cpp:882: [PQ: 72075186224037894, Partition: 0, State: StateIdle] Topic 'topic_A' partition 0 user test-consumer readTimeStamp for offset 100 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 0 2025-06-25T14:45:14.967089Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'topic_A' partition: 0 messageNo: 0 requestId: cookie: 9 2025-06-25T14:45:14.967102Z node 19 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72075186224037894, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-06-25T14:45:14.967148Z node 19 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72075186224037894, Partition: 0, State: StateIdle] need more data for compaction. cumulativeSize=4000885, count=1, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-25T14:45:14.967213Z node 19 :PQ_READ_PROXY DEBUG: partition_actor.cpp:652: session cookie 1 consumer test-consumer session test-consumer_19_1_7109507684128990330_v1 TopicId: Topic /Root/topic_A in database: Root, partition 0(assignId:1) initDone 1 event { Cookie: 9 } 2025-06-25T14:45:14.967263Z node 19 :PQ_READ_PROXY DEBUG: partition_actor.cpp:950: session cookie 1 consumer test-consumer session test-consumer_19_1_7109507684128990330_v1 TopicId: Topic /Root/topic_A in database: Root, partition 0(assignId:1) commit done to position 100 endOffset 100 with cookie 9 2025-06-25T14:45:14.967305Z node 19 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:696: session cookie 1 consumer test-consumer session test-consumer_19_1_7109507684128990330_v1 replying for commits: assignId# 1, from# 9, to# 9, offset# 100 2025-06-25T14:45:14.968381Z :DEBUG: [/Root] [/Root] [18317aa6-6c16f7cb-8f4c1fcc-95e81f69] [] Committed response: { partitions_committed_offsets { partition_session_id: 1 committed_offset: 100 } } 2025-06-25T14:45:15.309531Z node 19 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037892, Partition: 0, State: StateIdle] no data for compaction 2025-06-25T14:45:15.412478Z node 19 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72075186224037894, Partition: 0, State: StateIdle] need more data for compaction. cumulativeSize=4000885, count=1, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-25T14:45:16.125288Z node 19 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2378: session cookie 1 consumer test-consumer session test-consumer_19_1_7109507684128990330_v1 checking auth because of timeout 2025-06-25T14:45:16.125386Z node 19 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:41: session cookie 1 consumer test-consumer session test-consumer_19_1_7109507684128990330_v1 auth for : test-consumer 2025-06-25T14:45:16.125969Z node 19 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:131: session cookie 1 consumer test-consumer session test-consumer_19_1_7109507684128990330_v1 Handle describe topics response 2025-06-25T14:45:16.126117Z node 19 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:68: session cookie 1 consumer test-consumer session test-consumer_19_1_7109507684128990330_v1 auth is DEAD 2025-06-25T14:45:16.126147Z node 19 :PQ_READ_PROXY INFO: read_session_actor.cpp:1033: session cookie 1 consumer test-consumer session test-consumer_19_1_7109507684128990330_v1 auth ok: topics# 1, initDone# 1 2025-06-25T14:45:16.414858Z node 19 :PQ_READ_PROXY DEBUG: direct_read_actor.cpp:459: session cookie 2 consumer test-consumer session test-consumer_19_1_7109507684128990330_v1 checking auth because of timeout 2025-06-25T14:45:16.414975Z node 19 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:41: session cookie 2 consumer test-consumer session test-consumer_19_1_7109507684128990330_v1 auth for : test-consumer 2025-06-25T14:45:16.415861Z node 19 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:131: session cookie 2 consumer test-consumer session test-consumer_19_1_7109507684128990330_v1 Handle describe topics response 2025-06-25T14:45:16.416002Z node 19 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:68: session cookie 2 consumer test-consumer session test-consumer_19_1_7109507684128990330_v1 auth is DEAD 2025-06-25T14:45:16.416028Z node 19 :PQ_READ_PROXY INFO: direct_read_actor.cpp:299: session cookie 2 consumer test-consumer session test-consumer_19_1_7109507684128990330_v1 auth ok: topics# 1, initDone# 1 2025-06-25T14:45:16.997269Z :INFO: [/Root] [/Root] [18317aa6-6c16f7cb-8f4c1fcc-95e81f69] Closing read session. Close timeout: 0.000000s 2025-06-25T14:45:16.997352Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): -:topic_A:0:1:99:100 2025-06-25T14:45:16.997406Z :INFO: [/Root] [/Root] [18317aa6-6c16f7cb-8f4c1fcc-95e81f69] Counters: { Errors: 0 CurrentSessionLifetimeMs: 4091 BytesRead: 100000000 MessagesRead: 100 BytesReadCompressed: 100000000 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-25T14:45:16.997523Z :NOTICE: [/Root] [/Root] [18317aa6-6c16f7cb-8f4c1fcc-95e81f69] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Close with zero timeout " } 2025-06-25T14:45:16.997570Z :DEBUG: [/Root] [/Root] [18317aa6-6c16f7cb-8f4c1fcc-95e81f69] [] Abort session to cluster 2025-06-25T14:45:16.997999Z :DEBUG: [/Root] 0x000051E000058190 TDirectReadSessionManager ServerSessionId=test-consumer_19_1_7109507684128990330_v1 Close 2025-06-25T14:45:16.998361Z :DEBUG: [/Root] 0x000051E000058190 TDirectReadSessionManager ServerSessionId=test-consumer_19_1_7109507684128990330_v1 Close 2025-06-25T14:45:16.998491Z :NOTICE: [/Root] [/Root] [18317aa6-6c16f7cb-8f4c1fcc-95e81f69] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-06-25T14:45:17.001193Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|a3123892-4a3835ec-c2ec91f8-b9579587_0] PartitionId [0] Generation [1] Write session: close. Timeout 0.000000s 2025-06-25T14:45:17.001255Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|a3123892-4a3835ec-c2ec91f8-b9579587_0] PartitionId [0] Generation [1] Write session will now close 2025-06-25T14:45:17.001381Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|a3123892-4a3835ec-c2ec91f8-b9579587_0] PartitionId [0] Generation [1] Write session: aborting 2025-06-25T14:45:17.001786Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|a3123892-4a3835ec-c2ec91f8-b9579587_0] PartitionId [0] Generation [1] Write session: gracefully shut down, all writes complete 2025-06-25T14:45:17.001853Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|a3123892-4a3835ec-c2ec91f8-b9579587_0] PartitionId [0] Generation [1] Write session: destroy 2025-06-25T14:45:17.002742Z node 19 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer test-consumer session test-consumer_19_1_7109507684128990330_v1 grpc read done: success# 0, data# { } 2025-06-25T14:45:17.002783Z node 19 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 1 consumer test-consumer session test-consumer_19_1_7109507684128990330_v1 grpc read failed 2025-06-25T14:45:17.002802Z node 19 :PQ_READ_PROXY DEBUG: direct_read_actor.cpp:83: Direct read proxy [19:7519898094874584533:2516]: session cookie 2 consumer test-consumer session test-consumer_19_1_7109507684128990330_v1 grpc read done: success# 0, data# { } 2025-06-25T14:45:17.002820Z node 19 :PQ_READ_PROXY INFO: read_session_actor.cpp:92: session cookie 1 consumer test-consumer session test-consumer_19_1_7109507684128990330_v1 grpc closed 2025-06-25T14:45:17.002830Z node 19 :PQ_READ_PROXY INFO: direct_read_actor.cpp:86: Direct read proxy [19:7519898094874584533:2516]: session cookie 2 consumer test-consumer session test-consumer_19_1_7109507684128990330_v1grpc read failed 2025-06-25T14:45:17.002854Z node 19 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 1 consumer test-consumer session test-consumer_19_1_7109507684128990330_v1 is DEAD 2025-06-25T14:45:17.002879Z node 19 :PQ_READ_PROXY INFO: direct_read_actor.cpp:65: Direct read proxy [19:7519898094874584533:2516]: session cookie 2 consumer test-consumer session test-consumer_19_1_7109507684128990330_v1 grpc closed 2025-06-25T14:45:17.002904Z node 19 :PQ_READ_PROXY INFO: direct_read_actor.cpp:153: Direct read proxy [19:7519898094874584533:2516]: session cookie 2 consumer test-consumer session test-consumer_19_1_7109507684128990330_v1 proxy is DEAD 2025-06-25T14:45:17.003706Z node 19 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 4 sessionId: test-message_group_id|a3123892-4a3835ec-c2ec91f8-b9579587_0 grpc read done: success: 0 data: 2025-06-25T14:45:17.003723Z node 19 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 4 sessionId: test-message_group_id|a3123892-4a3835ec-c2ec91f8-b9579587_0 grpc read failed 2025-06-25T14:45:17.003755Z node 19 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 4 sessionId: test-message_group_id|a3123892-4a3835ec-c2ec91f8-b9579587_0 grpc closed 2025-06-25T14:45:17.003775Z node 19 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 4 sessionId: test-message_group_id|a3123892-4a3835ec-c2ec91f8-b9579587_0 is DEAD 2025-06-25T14:45:17.004546Z node 19 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037894 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-25T14:45:17.004582Z node 19 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037894 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-25T14:45:17.005168Z node 19 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037895][topic_A] pipe [19:7519898094874584503:2507] disconnected; active server actors: 1 2025-06-25T14:45:17.005192Z node 19 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037895][topic_A] pipe [19:7519898094874584503:2507] client test-consumer disconnected session test-consumer_19_1_7109507684128990330_v1 2025-06-25T14:45:17.005312Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037894] server disconnected, pipe [19:7519898081989682293:2461] destroyed 2025-06-25T14:45:17.005369Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037894] server disconnected, pipe [19:7519898081989682296:2461] destroyed 2025-06-25T14:45:17.005390Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:2452: [PQ: 72075186224037894] Destroy direct read session test-consumer_19_1_7109507684128990330_v1 2025-06-25T14:45:17.005412Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037894] server disconnected, pipe [19:7519898094874584517:2513] destroyed 2025-06-25T14:45:17.005469Z node 19 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72075186224037894, Partition: 0, State: StateIdle] TPartition::DropOwner. 2025-06-25T14:45:17.005551Z node 19 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: test-consumer_19_1_7109507684128990330_v1 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/grpc_streaming/ut/unittest >> TGRpcStreamingTest::ReadFinish [GOOD] Test command err: 2025-06-25T14:45:16.028418Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898106236961013:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:45:16.028739Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001782/r3tmp/tmpxMJajZ/pdisk_1.dat 2025-06-25T14:45:16.339065Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:45:16.339971Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519898106236960995:2080] 1750862716027594 != 1750862716027597 2025-06-25T14:45:16.383998Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:227: [0x51f000029280] stream accepted Name# Session ok# true peer# ipv6:[::1]:50212 2025-06-25T14:45:16.384291Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:301: [0x51f000029280] facade attach Name# Session actor# [1:7519898106236961499:2254] peer# ipv6:[::1]:50212 2025-06-25T14:45:16.384338Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:325: [0x51f000029280] facade read Name# Session peer# ipv6:[::1]:50212 2025-06-25T14:45:16.384416Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:511: [0x51f000029280] facade finish Name# Session peer# ipv6:[::1]:50212 grpc status# (0) message# 2025-06-25T14:45:16.384826Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:268: [0x51f000029280] stream done notification Name# Session ok# true peer# ipv6:[::1]:50212 2025-06-25T14:45:16.384844Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:353: [0x51f000029280] read finished Name# Session ok# false data# peer# ipv6:[::1]:50212 2025-06-25T14:45:16.384874Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:547: [0x51f000029280] stream finished Name# Session ok# true peer# ipv6:[::1]:50212 grpc status# (0) message# 2025-06-25T14:45:16.384898Z node 1 :GRPC_SERVER DEBUG: grpc_streaming_ut.cpp:265: Received TEvReadFinished, success = 0 2025-06-25T14:45:16.384918Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:580: [0x51f000029280] deregistering request Name# Session peer# ipv6:[::1]:50212 (finish done) 2025-06-25T14:45:16.406414Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:45:16.406504Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:45:16.409015Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected >> TxUsage::WriteToTopic_Demo_20_RestartBeforeCommit_Table [GOOD] >> TxUsage::ReadRuleGeneration |85.2%| [TA] $(B)/ydb/core/grpc_streaming/ut/test-results/unittest/{meta.json ... results_accumulator.log} |85.2%| [TA] {RESULT} $(B)/ydb/core/grpc_streaming/ut/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/batch_operations/unittest >> KqpBatchUpdate::ManyPartitions_2 [GOOD] Test command err: Trying to start YDB, gRPC: 22664, MsgBus: 19153 2025-06-25T14:42:40.738598Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897437703722232:2225];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:40.738920Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000be0/r3tmp/tmp4tPgcK/pdisk_1.dat 2025-06-25T14:42:41.610327Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:42:41.678991Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:42:41.679108Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:42:41.690416Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 22664, node 1 2025-06-25T14:42:41.747693Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:42:42.718471Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:42:42.718503Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:42:42.718522Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:42:42.718645Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19153 TClient is connected to server localhost:19153 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:42:44.995935Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:42:45.123481Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:45.406050Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:45.573068Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:45.671855Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:45.772517Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519897437703722232:2225];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:45.777947Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:42:45.855014Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897459178560175:2371], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:45.855139Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:47.940841Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:47.962937Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:47.983645Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.002848Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.025946Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.053251Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.119737Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.167944Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897472063462734:2439], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:48.168033Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:48.168165Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897472063462739:2442], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:48.171898Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:42:48.179580Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519897472063462741:2443], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:42:48.256270Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519897472063462792:3436] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:42:49.086684Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:56.525827Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7382: Cannot get console configs 2025-06-25T14:42:56.525856Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded Trying to start YDB, gRPC: 63555, MsgBus: 19403 2025-06-25T14:42:58.352091 ... eshard__operation_create_table.cpp:664) waiting... Trying to start YDB, gRPC: 13751, MsgBus: 9726 2025-06-25T14:45:05.647261Z node 12 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[12:7519898059771339064:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:45:05.647350Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000be0/r3tmp/tmp3TI60U/pdisk_1.dat 2025-06-25T14:45:05.805569Z node 12 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:45:05.806902Z node 12 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [12:7519898059771339044:2080] 1750862705646220 != 1750862705646223 2025-06-25T14:45:05.822175Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:45:05.822307Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:45:05.824739Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 13751, node 12 2025-06-25T14:45:05.885025Z node 12 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:45:05.885055Z node 12 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:45:05.885070Z node 12 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:45:05.885251Z node 12 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9726 2025-06-25T14:45:06.663125Z node 12 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:9726 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:45:06.744462Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:45:06.763648Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:45:06.854544Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:45:07.117990Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:45:07.295450Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:45:10.647413Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[12:7519898059771339064:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:45:10.647532Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:45:10.771470Z node 12 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [12:7519898081246177177:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:10.771610Z node 12 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:10.855911Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:10.896450Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:10.938906Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:10.981289Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:11.025974Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:11.107405Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:11.151209Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:11.228507Z node 12 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [12:7519898085541145134:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:11.228620Z node 12 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [12:7519898085541145139:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:11.228622Z node 12 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:11.233360Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:45:11.247326Z node 12 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [12:7519898085541145141:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:45:11.309781Z node 12 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [12:7519898085541145192:3426] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:45:12.987575Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... >> IndexBuildTest::CancelBuild [GOOD] >> TxUsage::WriteToTopic_Demo_20_RestartBeforeCommit_Query ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> ReadRows::KillTabletDuringRead [GOOD] Test command err: 2025-06-25T14:44:41.012135Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897956831987453:2077];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:41.012887Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017d4/r3tmp/tmpSVoMv0/pdisk_1.dat 2025-06-25T14:44:41.429445Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:41.459751Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:41.459823Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:41.465438Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 64926, node 1 2025-06-25T14:44:41.533484Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:41.533507Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:41.533518Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:41.533620Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27576 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:41.847378Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:41.877134Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:44:42.020300Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:27576 TClient is connected to server localhost:27576 2025-06-25T14:44:42.250856Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:44:43.981895Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897965421923041:2300], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:43.981903Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897965421923049:2303], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:43.981983Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:43.986048Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710660:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:44:44.023921Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519897965421923055:2304], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710660 completed, doublechecking } 2025-06-25T14:44:44.118173Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519897969716890424:2696] txid# 281474976710661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } TClient is connected to server localhost:27576 2025-06-25T14:44:44.707052Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:44:49.228775Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [4:421:2377], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:44:49.229075Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:44:49.229184Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017d4/r3tmp/tmpIDHVNk/pdisk_1.dat 2025-06-25T14:44:49.721594Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:44:49.823439Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:49.823595Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:49.839175Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:44:50.210712Z node 4 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877761, Sender [4:967:2765], Recipient [4:518:2429]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:44:50.210795Z node 4 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5052: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T14:44:50.210853Z node 4 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5837: Pipe server connected, at tablet: 72057594046644480 2025-06-25T14:44:50.210969Z node 4 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271122432, Sender [4:964:2763], Recipient [4:518:2429]: {TEvModifySchemeTransaction txid# 281474976715657 TabletId# 72057594046644480} 2025-06-25T14:44:50.210997Z node 4 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4966: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-06-25T14:44:50.309542Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/Root" OperationType: ESchemeOpCreateSubDomain SubDomain { Name: "tenant" } } TxId: 281474976715657 TabletId: 72057594046644480 PeerName: "" , at schemeshard: 72057594046644480 2025-06-25T14:44:50.309786Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_subdomain.cpp:92: TCreateSubDomain Propose, path: /Root/tenant, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-25T14:44:50.309891Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:347: AttachChild: child attached as only one child to the parent, parent id: [OwnerId: 72057594046644480, LocalPathId: 1], parent name: Root, child name: tenant, child id: [OwnerId: 72057594046644480, LocalPathId: 2], at schemeshard: 72057594046644480 2025-06-25T14:44:50.310034Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 0 2025-06-25T14:44:50.310081Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 281474976715657:0 type: TxCreateSubDomain target path: [OwnerId: 72057594046644480, LocalPathId: 2] source path: 2025-06-25T14:44:50.310250Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 1 2025-06-25T14:44:50.310375Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 281474976715657:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-25T14:44:50.310438Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_subdomain.cpp:259) 2025-06-25T14:44:50.310512Z node 4 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-25T14:44:50.310569Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 1 2025-06-25T14:44:50.310624Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 2 2025 ... 131:7762515]; 2025-06-25T14:45:05.156411Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017d4/r3tmp/tmpgibwF8/pdisk_1.dat 2025-06-25T14:45:05.269864Z node 6 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:45:05.287515Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:45:05.287613Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:45:05.293350Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 19853, node 6 2025-06-25T14:45:05.358619Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:45:05.358644Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:45:05.358652Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:45:05.358776Z node 6 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:18348 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:45:05.630801Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... TClient is connected to server localhost:18348 2025-06-25T14:45:06.175244Z node 6 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:45:09.519326Z node 9 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[9:7519898077005779430:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:45:09.519892Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017d4/r3tmp/tmpPB1I8t/pdisk_1.dat 2025-06-25T14:45:09.707463Z node 9 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:45:09.728411Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:45:09.728520Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:45:09.739276Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 19859, node 9 2025-06-25T14:45:09.811186Z node 9 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:45:09.811210Z node 9 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:45:09.811218Z node 9 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:45:09.811371Z node 9 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27027 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:45:10.086566Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:45:17.482883Z node 12 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [12:274:2317], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:45:17.483299Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:45:17.483476Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017d4/r3tmp/tmpkrBCr9/pdisk_1.dat 2025-06-25T14:45:17.769727Z node 12 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 12 Type# 268639257 2025-06-25T14:45:17.771425Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:45:17.805267Z node 12 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:45:17.810300Z node 12 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [12:33:2080] 1750862713964534 != 1750862713964537 2025-06-25T14:45:17.857210Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:45:17.857382Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:45:17.869096Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:45:17.951489Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:18.277827Z node 12 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [12:694:2576], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:18.277963Z node 12 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [12:704:2581], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:18.278069Z node 12 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:18.284353Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:45:18.343473Z node 12 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:45:18.466312Z node 12 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [12:708:2584], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:45:18.502294Z node 12 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [12:778:2623] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:45:18.602219Z node 12 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715660. Ctx: { TraceId: 01jykrssa3a1k1vcrtankvc1qm, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=12&id=NGJjNTBkNzYtMjU0NWFjNjMtZDM2Nzk2YTAtNjBkZmM4NjM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root Stoping tablet id: 720751862240378882025-06-25T14:45:18.665750Z node 12 :RPC_REQUEST WARN: rpc_read_rows.cpp:757: TReadRowsRPC CancelReads, shardIds# [72075186224037888, ] 2025-06-25T14:45:18.665845Z node 12 :RPC_REQUEST ERROR: rpc_read_rows.cpp:777: TReadRowsRPC ReplyWithError: Failed to connect to shard 72075186224037888 >> TxUsage::Sinks_Olap_WriteToTopicAndTable_3_Table [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index_build/unittest >> IndexBuildTest::CancelBuild [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:45:04.554887Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:45:04.554977Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:45:04.555013Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:45:04.555050Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:45:04.555094Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:45:04.555120Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:45:04.555172Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:45:04.555245Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:45:04.555974Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:45:04.556299Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:45:04.631515Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:45:04.631581Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:45:04.657818Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:45:04.658216Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:45:04.658368Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:45:04.664134Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:45:04.664504Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:45:04.665134Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:45:04.665433Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:45:04.673768Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:45:04.673951Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:45:04.675198Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:45:04.675262Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:45:04.675389Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:45:04.675434Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:45:04.675475Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:45:04.675595Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:45:04.690633Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:45:04.812078Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:45:04.812333Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:45:04.812542Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:45:04.812588Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:45:04.812815Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:45:04.812883Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:45:04.817716Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:45:04.817911Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:45:04.818111Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:45:04.818184Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:45:04.818236Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:45:04.818277Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:45:04.820252Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:45:04.820362Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:45:04.820413Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:45:04.822192Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:45:04.822235Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:45:04.822299Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:45:04.822358Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:45:04.825933Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:45:04.827921Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:45:04.828166Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:45:04.829168Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:45:04.829331Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:45:04.829396Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:45:04.829719Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:45:04.829785Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:45:04.829970Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:45:04.830081Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:45:04.832334Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:45:04.832386Z node 1 :FLAT_TX_SCHEMESHARD ... done id#281474976710760:0 progress is 1/1 2025-06-25T14:45:19.854434Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976710760 ready parts: 1/1 2025-06-25T14:45:19.854475Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 281474976710760, ready parts: 1/1, is published: true 2025-06-25T14:45:19.854537Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1656: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [3:129:2153] message: TxId: 281474976710760 2025-06-25T14:45:19.854582Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976710760 ready parts: 1/1 2025-06-25T14:45:19.854617Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976710760:0 2025-06-25T14:45:19.854642Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 281474976710760:0 2025-06-25T14:45:19.854700Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 13 FAKE_COORDINATOR: Erasing txId 281474976710760 2025-06-25T14:45:19.856979Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6830: Handle: TEvNotifyTxCompletionResult: txId# 281474976710760 2025-06-25T14:45:19.857051Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6832: Message: TxId: 281474976710760 2025-06-25T14:45:19.857116Z node 3 :BUILD_INDEX INFO: schemeshard_build_index__progress.cpp:1930: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvNotifyTxCompletionResult, id# 102, txId# 281474976710760 2025-06-25T14:45:19.857196Z node 3 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1933: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvNotifyTxCompletionResult, TIndexBuildInfo: TBuildInfo{ IndexBuildId: 102, Uid: , DomainPathId: [OwnerId: 72057594046678944, LocalPathId: 1], TablePathId: [OwnerId: 72057594046678944, LocalPathId: 2], IndexType: EIndexTypeGlobal, IndexName: index1, IndexColumn: index, State: Cancellation_Unlocking, IsBroken: 0, IsCancellationRequested: 1, Issue: , SubscribersCount: 1, CreateSender: [3:1175:3025], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 281474976710757, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976710758, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 0, ApplyTxId: 281474976710759, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976710760, UnlockTxStatus: StatusAccepted, UnlockTxDone: 0, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }}, txId# 281474976710760 2025-06-25T14:45:19.858960Z node 3 :BUILD_INDEX NOTICE: schemeshard_build_index__progress.cpp:1129: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 102 Cancellation_Unlocking 2025-06-25T14:45:19.859045Z node 3 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1130: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 102 Cancellation_Unlocking TBuildInfo{ IndexBuildId: 102, Uid: , DomainPathId: [OwnerId: 72057594046678944, LocalPathId: 1], TablePathId: [OwnerId: 72057594046678944, LocalPathId: 2], IndexType: EIndexTypeGlobal, IndexName: index1, IndexColumn: index, State: Cancellation_Unlocking, IsBroken: 0, IsCancellationRequested: 1, Issue: , SubscribersCount: 1, CreateSender: [3:1175:3025], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 281474976710757, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976710758, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 0, ApplyTxId: 281474976710759, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976710760, UnlockTxStatus: StatusAccepted, UnlockTxDone: 1, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }} 2025-06-25T14:45:19.859099Z node 3 :BUILD_INDEX INFO: schemeshard_build_index_tx_base.cpp:24: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: Change state from Cancellation_Unlocking to Cancelled 2025-06-25T14:45:19.860561Z node 3 :BUILD_INDEX NOTICE: schemeshard_build_index__progress.cpp:1129: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 102 Cancelled 2025-06-25T14:45:19.860645Z node 3 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1130: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 102 Cancelled TBuildInfo{ IndexBuildId: 102, Uid: , DomainPathId: [OwnerId: 72057594046678944, LocalPathId: 1], TablePathId: [OwnerId: 72057594046678944, LocalPathId: 2], IndexType: EIndexTypeGlobal, IndexName: index1, IndexColumn: index, State: Cancelled, IsBroken: 0, IsCancellationRequested: 1, Issue: , SubscribersCount: 1, CreateSender: [3:1175:3025], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 281474976710757, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976710758, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 0, ApplyTxId: 281474976710759, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976710760, UnlockTxStatus: StatusAccepted, UnlockTxDone: 1, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }} 2025-06-25T14:45:19.860693Z node 3 :BUILD_INDEX TRACE: schemeshard_build_index_tx_base.cpp:333: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TIndexBuildInfo SendNotifications: : id# 102, subscribers count# 1 2025-06-25T14:45:19.860859Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-25T14:45:19.860914Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [3:1269:3108] TestWaitNotification: OK eventTxId 102 2025-06-25T14:45:19.863123Z node 3 :BUILD_INDEX DEBUG: schemeshard_build_index__get.cpp:19: TIndexBuilder::TXTYPE_GET_INDEX_BUILD: DoExecute DatabaseName: "/MyRoot" IndexBuildId: 102 2025-06-25T14:45:19.863372Z node 3 :BUILD_INDEX DEBUG: schemeshard_build_index_tx_base.h:104: TIndexBuilder::TXTYPE_GET_INDEX_BUILD: Reply Status: SUCCESS IndexBuild { Id: 102 State: STATE_CANCELLED Settings { source_path: "/MyRoot/Table" index { name: "index1" index_columns: "index" global_index { } } max_shards_in_flight: 2 ScanSettings { MaxBatchRows: 1 } } Progress: 0 StartTime { } EndTime { } } BUILDINDEX RESPONSE Get: NKikimrIndexBuilder.TEvGetResponse Status: SUCCESS IndexBuild { Id: 102 State: STATE_CANCELLED Settings { source_path: "/MyRoot/Table" index { name: "index1" index_columns: "index" global_index { } } max_shards_in_flight: 2 ScanSettings { MaxBatchRows: 1 } } Progress: 0 StartTime { } EndTime { } } 2025-06-25T14:45:19.865464Z node 3 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:45:19.865698Z node 3 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table" took 255us result status StatusSuccess 2025-06-25T14:45:19.866141Z node 3 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table" PathDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 TableSchemaVersion: 3 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "index" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 3 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 3 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 10 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 11 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:45:19.868123Z node 3 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/index1" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-25T14:45:19.868355Z node 3 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/index1" took 234us result status StatusPathDoesNotExist 2025-06-25T14:45:19.868522Z node 3 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/Table/index1\', error: path has been deleted (id: [OwnerId: 72057594046678944, LocalPathId: 3], type: EPathTypeTableIndex, state: EPathStateNotExist), drop stepId: 5000005, drop txId: 281474976710759" Path: "/MyRoot/Table/index1" PathId: 3 LastExistedPrefixPath: "/MyRoot/Table" LastExistedPrefixPathId: 2 LastExistedPrefixDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> YdbOlapStore::LogPagingBetween [GOOD] >> YdbOlapStore::LogWithUnionAllAscending |85.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/slow/unittest |85.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/slow/unittest >> VectorIndexBuildTest::DescriptionIsPersisted-prefixed-true [GOOD] >> TCacheTest::List >> TCacheTest::RacyRecreateAndSync >> TCacheTest::CheckSystemViewAccess >> TCacheTest::Attributes >> TCacheTest::MigrationCommon >> TPQTestSlow::TestWriteVeryBigMessage >> TCacheTest::MigrationLostMessage >> SlowTopicAutopartitioning::CDC_Write >> TCacheTest::Recreate >> TCacheTest::WatchRoot >> TCacheTest::SystemView >> TPQTestSlow::TestOnDiskStoredSourceIds >> TRegisterNodeOverDiscoveryService::ServerWithCertVerification_ClientProvidesCorruptedCert [GOOD] >> TRegisterNodeOverDiscoveryService::ServerWithCertVerification_ClientDoesNotProvideClientCerts >> TxUsage::Sinks_Olap_WriteToTopicAndTable_3_Query >> TxUsage::WriteToTopic_Demo_23_RestartBeforeCommit_Query [GOOD] >> TCacheTest::Recreate [GOOD] >> TCacheTest::SysLocks >> TCacheTest::CheckSystemViewAccess [GOOD] >> TCacheTest::CookiesArePreserved >> TCacheTest::SystemView [GOOD] >> TCacheTest::TableSchemaVersion >> TCacheTest::Attributes [GOOD] >> TCacheTest::CheckAccess >> TCacheTest::List [GOOD] >> TCacheTest::MigrationCommit >> TCacheTest::RacyRecreateAndSync [GOOD] >> TCacheTest::RacyCreateAndSync >> TCacheTest::WatchRoot [GOOD] >> TCacheTestWithDrops::LookupErrorUponEviction ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index_build/unittest >> VectorIndexBuildTest::DescriptionIsPersisted-prefixed-true [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:45:01.439548Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:45:01.439630Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:45:01.439663Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:45:01.439695Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:45:01.439733Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:45:01.439759Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:45:01.439813Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:45:01.439882Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:45:01.440698Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:45:01.440990Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:45:01.506929Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:45:01.506985Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:45:01.522803Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:45:01.523174Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:45:01.523331Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:45:01.528794Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:45:01.529102Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:45:01.529731Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:45:01.529993Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:45:01.533245Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:45:01.533425Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:45:01.534629Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:45:01.534694Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:45:01.534858Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:45:01.534910Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:45:01.534951Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:45:01.535054Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:45:01.541958Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:45:01.655809Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:45:01.656010Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:45:01.656188Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:45:01.656235Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:45:01.656459Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:45:01.656537Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:45:01.658569Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:45:01.658737Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:45:01.658933Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:45:01.658997Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:45:01.659050Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:45:01.659083Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:45:01.660813Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:45:01.660861Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:45:01.660902Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:45:01.662380Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:45:01.662424Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:45:01.662471Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:45:01.662514Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:45:01.671843Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:45:01.673940Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:45:01.674118Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:45:01.675040Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:45:01.675168Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:45:01.675222Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:45:01.675490Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:45:01.675540Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:45:01.675713Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:45:01.675808Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:45:01.677861Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:45:01.677923Z node 1 :FLAT_TX_SCHEMESHARD ... mplete, at schemeshard: 72057594046678944 2025-06-25T14:45:21.330754Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:45:21.330805Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:45:21.330858Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:45:21.334002Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594046678944 is [3:2648:4392] sender: [3:2706:2058] recipient: [3:15:2062] 2025-06-25T14:45:21.376509Z node 3 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/vectors/by_embedding" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-25T14:45:21.376803Z node 3 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/vectors/by_embedding" took 339us result status StatusSuccess 2025-06-25T14:45:21.378152Z node 3 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/vectors/by_embedding" PathDescription { Self { Name: "by_embedding" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeTableIndex CreateFinished: true CreateTxId: 281474976710758 CreateStep: 5000004 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 9 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 9 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 6 TableIndexVersion: 2 } ChildrenExist: true } Children { Name: "indexImplLevelTable" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710758 CreateStep: 5000004 ParentPathId: 3 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" PathSubType: EPathSubTypeVectorKmeansTreeIndexImplTable Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: false } Children { Name: "indexImplPostingTable" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710758 CreateStep: 5000004 ParentPathId: 3 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" PathSubType: EPathSubTypeVectorKmeansTreeIndexImplTable Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: false } Children { Name: "indexImplPrefixTable" PathId: 6 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710758 CreateStep: 5000004 ParentPathId: 3 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" PathSubType: EPathSubTypeVectorKmeansTreeIndexImplTable Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 5 PathsLimit: 10000 ShardsInside: 10 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } TableIndex { Name: "by_embedding" LocalPathId: 3 Type: EIndexTypeGlobalVectorKmeansTree State: EIndexStateReady KeyColumnNames: "prefix" KeyColumnNames: "embedding" SchemaVersion: 2 PathOwnerId: 72057594046678944 DataColumnNames: "covered" DataSize: 0 IndexImplTableDescriptions { PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 KeepEraseMarkers: false MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 3 MaxPartitionsCount: 3 } } } IndexImplTableDescriptions { PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 KeepEraseMarkers: false MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 3 MaxPartitionsCount: 3 } } } IndexImplTableDescriptions { PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 KeepEraseMarkers: false MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 3 MaxPartitionsCount: 3 } } } VectorIndexKmeansTreeDescription { Settings { settings { metric: DISTANCE_COSINE vector_type: VECTOR_TYPE_FLOAT vector_dimension: 1024 } clusters: 4 levels: 5 } } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TCacheTest::CheckAccess [GOOD] >> TCacheTest::SysLocks [GOOD] >> TCacheTest::CookiesArePreserved [GOOD] >> VectorIndexBuildTest::DescriptionIsPersisted-prefixed-false [GOOD] >> TCacheTest::TableSchemaVersion [GOOD] >> TCacheTest::Navigate >> TCacheTest::RacyCreateAndSync [GOOD] >> TxUsage::WriteToTopic_Demo_23_RestartAfterCommit_Table ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_cache/unittest >> TCacheTest::SysLocks [GOOD] Test command err: 2025-06-25T14:45:22.216266Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:45:22.216336Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TestModificationResults wait txId: 1 2025-06-25T14:45:22.370578Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 FAKE_COORDINATOR: Erasing txId 1 TestModificationResult got TxId: 1, wait until txId: 1 TestModificationResults wait txId: 101 FAKE_COORDINATOR: Add transaction: 101 at step: 5000002 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000001 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 101 at step: 5000002 FAKE_COORDINATOR: Erasing txId 101 TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 101 2025-06-25T14:45:22.392763Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 101 TestModificationResults wait txId: 102 2025-06-25T14:45:22.395408Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpRmDir, opId: 102:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_rmdir.cpp:66) FAKE_COORDINATOR: Add transaction: 102 at step: 5000003 FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000002 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 102 at step: 5000003 FAKE_COORDINATOR: Erasing txId 102 TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 102 2025-06-25T14:45:22.427390Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 102 TestModificationResults wait txId: 103 FAKE_COORDINATOR: Add transaction: 103 at step: 5000004 FAKE_COORDINATOR: advance: minStep5000004 State->FrontStep: 5000003 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 103 at step: 5000004 FAKE_COORDINATOR: Erasing txId 103 TestModificationResult got TxId: 103, wait until txId: 103 TestWaitNotification wait txId: 103 2025-06-25T14:45:22.437027Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 103, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 103 2025-06-25T14:45:22.637232Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:45:22.637301Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TestModificationResults wait txId: 1 2025-06-25T14:45:22.684855Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 FAKE_COORDINATOR: Erasing txId 1 TestModificationResult got TxId: 1, wait until txId: 1 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_cache/unittest >> TCacheTest::CookiesArePreserved [GOOD] Test command err: 2025-06-25T14:45:22.203111Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:45:22.203151Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TestModificationResults wait txId: 1 2025-06-25T14:45:22.376253Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 FAKE_COORDINATOR: Erasing txId 1 TestModificationResult got TxId: 1, wait until txId: 1 TestModificationResults wait txId: 101 2025-06-25T14:45:22.386964Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateSubDomain, opId: 101:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_subdomain.cpp:259) FAKE_COORDINATOR: Add transaction: 101 at step: 5000002 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000001 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 101 at step: 5000002 FAKE_COORDINATOR: Erasing txId 101 TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 101 2025-06-25T14:45:22.392785Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 101 TestModificationResults wait txId: 102 2025-06-25T14:45:22.394698Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 102:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) TestModificationResult got TxId: 102, wait until txId: 102 2025-06-25T14:45:22.399891Z node 1 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:304: Access denied: self# [1:215:2198], for# user1@builtin, access# DescribeSchema 2025-06-25T14:45:22.400452Z node 1 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:304: Access denied: self# [1:221:2204], for# user1@builtin, access# 2025-06-25T14:45:22.681453Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:45:22.681513Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TestModificationResults wait txId: 1 2025-06-25T14:45:22.737089Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 FAKE_COORDINATOR: Erasing txId 1 TestModificationResult got TxId: 1, wait until txId: 1 TestModificationResults wait txId: 101 2025-06-25T14:45:22.743143Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateSubDomain, opId: 101:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_subdomain.cpp:259) FAKE_COORDINATOR: Add transaction: 101 at step: 5000002 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000001 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 101 at step: 5000002 FAKE_COORDINATOR: Erasing txId 101 TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 101 2025-06-25T14:45:22.749979Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 101 TestModificationResults wait txId: 102 FAKE_COORDINATOR: Add transaction: 102 at step: 5000003 FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000002 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 102 at step: 5000003 FAKE_COORDINATOR: Erasing txId 102 TestModificationResult got TxId: 102, wait until txId: 102 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_cache/unittest >> TCacheTest::CheckAccess [GOOD] Test command err: 2025-06-25T14:45:22.204941Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:45:22.204990Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TestModificationResults wait txId: 1 2025-06-25T14:45:22.380245Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 FAKE_COORDINATOR: Erasing txId 1 TestModificationResult got TxId: 1, wait until txId: 1 TestModificationResults wait txId: 101 FAKE_COORDINATOR: Add transaction: 101 at step: 5000002 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000001 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 101 at step: 5000002 FAKE_COORDINATOR: Erasing txId 101 TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 101 2025-06-25T14:45:22.397741Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 101 2025-06-25T14:45:22.644689Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:45:22.644734Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TestModificationResults wait txId: 1 2025-06-25T14:45:22.699069Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 FAKE_COORDINATOR: Erasing txId 1 TestModificationResult got TxId: 1, wait until txId: 1 TestModificationResults wait txId: 101 FAKE_COORDINATOR: Add transaction: 101 at step: 5000002 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000001 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 101 at step: 5000002 FAKE_COORDINATOR: Erasing txId 101 TestModificationResult got TxId: 101, wait until txId: 101 TestModificationResults wait txId: 102 2025-06-25T14:45:22.710450Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 102:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) TestModificationResult got TxId: 102, wait until txId: 102 2025-06-25T14:45:22.714058Z node 2 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:304: Access denied: self# [2:200:2189], for# user1@builtin, access# DescribeSchema 2025-06-25T14:45:22.714503Z node 2 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:304: Access denied: self# [2:204:2193], for# user1@builtin, access# DescribeSchema >> TCacheTest::MigrationCommon [GOOD] >> TCacheTest::MigrationDeletedPathNavigate >> TCacheTest::MigrationLostMessage [GOOD] >> TCacheTest::MigrationUndo ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_cache/unittest >> TCacheTest::TableSchemaVersion [GOOD] Test command err: 2025-06-25T14:45:22.212075Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:45:22.212130Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TestModificationResults wait txId: 1 2025-06-25T14:45:22.383000Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 FAKE_COORDINATOR: Erasing txId 1 TestModificationResult got TxId: 1, wait until txId: 1 2025-06-25T14:45:22.639957Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:45:22.640004Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TestModificationResults wait txId: 1 2025-06-25T14:45:22.692682Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 FAKE_COORDINATOR: Erasing txId 1 TestModificationResult got TxId: 1, wait until txId: 1 TestModificationResults wait txId: 101 2025-06-25T14:45:22.729302Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 101:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 101 FAKE_COORDINATOR: Add transaction: 101 at step: 5000002 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000001 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 101 at step: 5000002 FAKE_COORDINATOR: Send Plan to tablet 72075186233409546 for txId: 101 at step: 5000002 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 FAKE_COORDINATOR: Erasing txId 101 TestWaitNotification: OK eventTxId 101 TestModificationResults wait txId: 102 2025-06-25T14:45:22.881077Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 102:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:171) TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 102 FAKE_COORDINATOR: Add transaction: 102 at step: 5000003 FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000002 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 102 at step: 5000003 FAKE_COORDINATOR: Send Plan to tablet 72075186233409546 for txId: 102 at step: 5000003 FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000003 FAKE_COORDINATOR: Erasing txId 102 TestWaitNotification: OK eventTxId 102 >> TCacheTest::MigrationCommit [GOOD] >> TCacheTest::Navigate [GOOD] >> TCacheTest::PathBelongsToDomain ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_cache/unittest >> TCacheTest::RacyCreateAndSync [GOOD] Test command err: 2025-06-25T14:45:22.218484Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:45:22.218541Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TestModificationResults wait txId: 1 2025-06-25T14:45:22.394063Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 FAKE_COORDINATOR: Erasing txId 1 TestModificationResult got TxId: 1, wait until txId: 1 TestModificationResults wait txId: 101 FAKE_COORDINATOR: Add transaction: 101 at step: 5000002 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000001 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 101 at step: 5000002 FAKE_COORDINATOR: Erasing txId 101 TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 101 2025-06-25T14:45:22.410546Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 101 TestModificationResults wait txId: 102 2025-06-25T14:45:22.412984Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpRmDir, opId: 102:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_rmdir.cpp:66) FAKE_COORDINATOR: Add transaction: 102 at step: 5000003 FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000002 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 102 at step: 5000003 FAKE_COORDINATOR: Erasing txId 102 TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 102 2025-06-25T14:45:22.454224Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 102 TestModificationResults wait txId: 103 FAKE_COORDINATOR: Add transaction: 103 at step: 5000004 FAKE_COORDINATOR: advance: minStep5000004 State->FrontStep: 5000003 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 103 at step: 5000004 FAKE_COORDINATOR: Erasing txId 103 TestModificationResult got TxId: 103, wait until txId: 103 TestWaitNotification wait txId: 103 2025-06-25T14:45:22.474740Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 103, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 103 2025-06-25T14:45:22.844146Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:45:22.844218Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TestModificationResults wait txId: 1 2025-06-25T14:45:22.893406Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 FAKE_COORDINATOR: Erasing txId 1 TestModificationResult got TxId: 1, wait until txId: 1 TestModificationResults wait txId: 101 FAKE_COORDINATOR: Add transaction: 101 at step: 5000002 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000001 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 101 at step: 5000002 FAKE_COORDINATOR: Erasing txId 101 TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 101 2025-06-25T14:45:22.904940Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 101 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index_build/unittest >> VectorIndexBuildTest::DescriptionIsPersisted-prefixed-false [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:45:07.612731Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:45:07.612813Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:45:07.612846Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:45:07.612877Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:45:07.612912Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:45:07.612936Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:45:07.612979Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:45:07.613051Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:45:07.613742Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:45:07.614033Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:45:07.688113Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:45:07.688171Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:45:07.703915Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:45:07.704256Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:45:07.704410Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:45:07.709868Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:45:07.710187Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:45:07.710753Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:45:07.710997Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:45:07.714177Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:45:07.714335Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:45:07.715404Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:45:07.715453Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:45:07.715569Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:45:07.715608Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:45:07.715642Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:45:07.715712Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:45:07.722819Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:45:07.841036Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:45:07.841248Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:45:07.841479Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:45:07.841520Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:45:07.841727Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:45:07.841795Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:45:07.843591Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:45:07.843757Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:45:07.843913Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:45:07.843972Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:45:07.844008Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:45:07.844039Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:45:07.846413Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:45:07.846466Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:45:07.846501Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:45:07.847980Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:45:07.848037Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:45:07.848121Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:45:07.848171Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:45:07.851436Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:45:07.857662Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:45:07.857846Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:45:07.858776Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:45:07.858904Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:45:07.858968Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:45:07.859222Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:45:07.859271Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:45:07.859444Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:45:07.859525Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:45:07.862080Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:45:07.862136Z node 1 :FLAT_TX_SCHEMESHARD ... ld , records: 1, at schemeshard: 72057594046678944 2025-06-25T14:45:22.470938Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4702: KMeansTreeSample records: 0, at schemeshard: 72057594046678944 2025-06-25T14:45:22.471089Z node 3 :BUILD_INDEX DEBUG: schemeshard_info_types.h:3693: AddShardStatus id# 102 shard 72057594046678944:11 2025-06-25T14:45:22.471183Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4791: SnapshotTables: snapshots: 0 tables: 0, at schemeshard: 72057594046678944 2025-06-25T14:45:22.471246Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4818: SnapshotSteps: snapshots: 0, at schemeshard: 72057594046678944 2025-06-25T14:45:22.471318Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4845: LongLocks: records: 4, at schemeshard: 72057594046678944 2025-06-25T14:45:22.476913Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:45:22.477457Z node 3 :BUILD_INDEX NOTICE: schemeshard_build_index__progress.cpp:1129: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 102 Done 2025-06-25T14:45:22.477541Z node 3 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1130: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 102 Done TBuildInfo{ IndexBuildId: 102, Uid: , DomainPathId: [OwnerId: 72057594046678944, LocalPathId: 1], TablePathId: [OwnerId: 72057594046678944, LocalPathId: 2], IndexType: EIndexTypeGlobalVectorKmeansTree, IndexName: by_embedding, IndexColumn: embedding, DataColumns: covered, State: Done, IsBroken: 0, IsCancellationRequested: 0, Issue: , SubscribersCount: 0, CreateSender: [0:0:0], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 281474976710757, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976710758, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 0, ApplyTxId: 281474976720769, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976720770, UnlockTxStatus: StatusAccepted, UnlockTxDone: 1, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }} 2025-06-25T14:45:22.477583Z node 3 :BUILD_INDEX TRACE: schemeshard_build_index_tx_base.cpp:333: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TIndexBuildInfo SendNotifications: : id# 102, subscribers count# 0 2025-06-25T14:45:22.481318Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:45:22.481375Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:45:22.481640Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:45:22.481686Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:45:22.481734Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:45:22.481813Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594046678944 is [3:3212:5005] sender: [3:3273:2058] recipient: [3:15:2062] 2025-06-25T14:45:22.529685Z node 3 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/vectors/by_embedding" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-25T14:45:22.529968Z node 3 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/vectors/by_embedding" took 323us result status StatusSuccess 2025-06-25T14:45:22.531120Z node 3 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/vectors/by_embedding" PathDescription { Self { Name: "by_embedding" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeTableIndex CreateFinished: true CreateTxId: 281474976710758 CreateStep: 5000004 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 8 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 8 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 TableIndexVersion: 2 } ChildrenExist: true } Children { Name: "indexImplLevelTable" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710758 CreateStep: 5000004 ParentPathId: 3 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" PathSubType: EPathSubTypeVectorKmeansTreeIndexImplTable Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: false } Children { Name: "indexImplPostingTable" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710758 CreateStep: 5000004 ParentPathId: 3 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" PathSubType: EPathSubTypeVectorKmeansTreeIndexImplTable Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 7 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } TableIndex { Name: "by_embedding" LocalPathId: 3 Type: EIndexTypeGlobalVectorKmeansTree State: EIndexStateReady KeyColumnNames: "embedding" SchemaVersion: 2 PathOwnerId: 72057594046678944 DataColumnNames: "covered" DataSize: 0 IndexImplTableDescriptions { PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 KeepEraseMarkers: false MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 3 MaxPartitionsCount: 3 } } } IndexImplTableDescriptions { PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 KeepEraseMarkers: false MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 3 MaxPartitionsCount: 3 } } } VectorIndexKmeansTreeDescription { Settings { settings { metric: DISTANCE_COSINE vector_type: VECTOR_TYPE_FLOAT vector_dimension: 1024 } clusters: 4 levels: 5 } } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TCacheTest::PathBelongsToDomain [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_cache/unittest >> TCacheTest::MigrationCommit [GOOD] Test command err: 2025-06-25T14:45:22.200644Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:45:22.200697Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TestModificationResults wait txId: 1 2025-06-25T14:45:22.392961Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 FAKE_COORDINATOR: Erasing txId 1 TestModificationResult got TxId: 1, wait until txId: 1 TestModificationResults wait txId: 101 FAKE_COORDINATOR: Add transaction: 101 at step: 5000002 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000001 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 101 at step: 5000002 FAKE_COORDINATOR: Erasing txId 101 TestModificationResult got TxId: 101, wait until txId: 101 TestModificationResults wait txId: 102 FAKE_COORDINATOR: Add transaction: 102 at step: 5000003 FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000002 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 102 at step: 5000003 FAKE_COORDINATOR: Erasing txId 102 TestModificationResult got TxId: 102, wait until txId: 102 TestModificationResults wait txId: 103 FAKE_COORDINATOR: Add transaction: 103 at step: 5000004 FAKE_COORDINATOR: advance: minStep5000004 State->FrontStep: 5000003 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 103 at step: 5000004 FAKE_COORDINATOR: Erasing txId 103 TestModificationResult got TxId: 103, wait until txId: 103 TestWaitNotification wait txId: 101 TestWaitNotification wait txId: 102 TestWaitNotification wait txId: 103 2025-06-25T14:45:22.419099Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-06-25T14:45:22.419214Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-06-25T14:45:22.419565Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 103, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 101 TestWaitNotification: OK eventTxId 102 TestWaitNotification: OK eventTxId 103 2025-06-25T14:45:22.632356Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:45:22.632416Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TestModificationResults wait txId: 1 2025-06-25T14:45:22.689196Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 FAKE_COORDINATOR: Erasing txId 1 TestModificationResult got TxId: 1, wait until txId: 1 Leader for TabletID 72057594046678944 is [2:73:2111] sender: [2:178:2067] recipient: [2:48:2095] Leader for TabletID 72057594046678944 is [2:73:2111] sender: [2:181:2067] recipient: [2:24:2071] Leader for TabletID 72057594046678944 is [2:73:2111] sender: [2:182:2067] recipient: [2:180:2173] Leader for TabletID 72057594046678944 is [2:183:2174] sender: [2:184:2067] recipient: [2:180:2173] 2025-06-25T14:45:22.732076Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:45:22.732147Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TestModificationResults wait txId: 101 Leader for TabletID 72057594046678944 is [2:183:2174] sender: [2:214:2067] recipient: [2:24:2071] 2025-06-25T14:45:22.761335Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateSubDomain, opId: 101:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_subdomain.cpp:259) FAKE_COORDINATOR: Add transaction: 101 at step: 5000002 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000001 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 101 at step: 5000002 FAKE_COORDINATOR: Erasing txId 101 TestModificationResult got TxId: 101, wait until txId: 101 TestModificationResults wait txId: 102 2025-06-25T14:45:22.768209Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 102:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) Leader for TabletID 72075186233409546 is [0:0:0] sender: [2:249:2067] recipient: [2:241:2215] IGNORE Leader for TabletID 72075186233409546 is [0:0:0] sender: [2:249:2067] recipient: [2:241:2215] Leader for TabletID 72075186233409547 is [0:0:0] sender: [2:252:2067] recipient: [2:244:2217] IGNORE Leader for TabletID 72075186233409547 is [0:0:0] sender: [2:252:2067] recipient: [2:244:2217] Leader for TabletID 72075186233409546 is [0:0:0] sender: [2:254:2067] recipient: [2:24:2071] IGNORE Leader for TabletID 72075186233409546 is [0:0:0] sender: [2:254:2067] recipient: [2:24:2071] Leader for TabletID 72075186233409546 is [2:253:2221] sender: [2:255:2067] recipient: [2:241:2215] Leader for TabletID 72075186233409547 is [0:0:0] sender: [2:256:2067] recipient: [2:24:2071] IGNORE Leader for TabletID 72075186233409547 is [0:0:0] sender: [2:256:2067] recipient: [2:24:2071] Leader for TabletID 72075186233409547 is [2:258:2223] sender: [2:261:2067] recipient: [2:244:2217] TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 101 TestWaitNotification wait txId: 102 2025-06-25T14:45:22.797158Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 101 Leader for TabletID 72075186233409546 is [2:253:2221] sender: [2:293:2067] recipient: [2:24:2071] Leader for TabletID 72075186233409547 is [2:258:2223] sender: [2:294:2067] recipient: [2:24:2071] FAKE_COORDINATOR: Add transaction: 102 at step: 5000003 FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000002 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 102 at step: 5000003 FAKE_COORDINATOR: Erasing txId 102 TestWaitNotification: OK eventTxId 102 TestModificationResults wait txId: 103 TestModificationResult got TxId: 103, wait until txId: 103 TestModificationResults wait txId: 104 2025-06-25T14:45:22.879309Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 104:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) TestModificationResult got TxId: 104, wait until txId: 104 TestWaitNotification wait txId: 103 TestWaitNotification wait txId: 104 Leader for TabletID 72075186233409548 is [0:0:0] sender: [2:344:2067] recipient: [2:340:2287] IGNORE Leader for TabletID 72075186233409548 is [0:0:0] sender: [2:344:2067] recipient: [2:340:2287] Leader for TabletID 72075186233409548 is [0:0:0] sender: [2:345:2067] recipient: [2:24:2071] IGNORE Leader for TabletID 72075186233409548 is [0:0:0] sender: [2:345:2067] recipient: [2:24:2071] Leader for TabletID 72075186233409548 is [2:347:2291] sender: [2:348:2067] recipient: [2:340:2287] Leader for TabletID 72075186233409548 is [2:347:2291] sender: [2:349:2067] recipient: [2:24:2071] TestWaitNotification: OK eventTxId 103 TestWaitNotification: OK eventTxId 104 TestModificationResults wait txId: 105 2025-06-25T14:45:23.057326Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpUpgradeSubDomain, opId: 105:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_upgrade_subdomain.cpp:1232) Leader for TabletID 72075186233409549 is [0:0:0] sender: [2:424:2067] recipient: [2:420:2336] IGNORE Leader for TabletID 72075186233409549 is [0:0:0] sender: [2:424:2067] recipient: [2:420:2336] Leader for TabletID 72075186233409549 is [0:0:0] sender: [2:426:2067] recipient: [2:24:2071] IGNORE Leader for TabletID 72075186233409549 is [0:0:0] sender: [2:426:2067] recipient: [2:24:2071] Leader for TabletID 72075186233409549 is [2:427:2339] sender: [2:428:2067] recipient: [2:420:2336] 2025-06-25T14:45:23.099501Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:45:23.099569Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TestModificationResult got TxId: 105, wait until txId: 105 TestWaitNotification wait txId: 105 Leader for TabletID 72075186233409549 is [2:427:2339] sender: [2:455:2067] recipient: [2:24:2071] TestWaitNotification: OK eventTxId 105 { Path: Root/USER_0/DirA TableId: [72057594046678944:3:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 2] ResourcesDomainKey: [OwnerId: 72057594046678944, LocalPathId: 2] Params { Version: 3 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 SchemeShard: 72075186233409549 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } TestModificationResults wait txId: 106 2025-06-25T14:45:23.152233Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:5475: Mark as Migrated path id [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-25T14:45:23.152286Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:5475: Mark as Migrated path id [OwnerId: 72057594046678944, LocalPathId: 4] 2025-06-25T14:45:23.152630Z node 2 :FLAT_TX_SCHEMESHARD ERROR: schemeshard__operation_upgrade_subdomain.cpp:1464: TWait ProgressState, dependent transaction: 106, parent transaction: 105, at schemeshard: 72057594046678944 2025-06-25T14:45:23.152792Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpUpgradeSubDomainDecision, opId: 106:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_upgrade_subdomain.cpp:571) TestModificationResult got TxId: 106, wait until txId: 106 TestWaitNotification wait txId: 106 2025-06-25T14:45:23.170311Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:5936: Got TEvUpdateAck for unknown txId 105, at schemeshard: 72057594046678944 2025-06-25T14:45:23.170459Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:5936: Got TEvUpdateAck for unknown txId 105, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 106 { Path: Root/USER_0/DirA TableId: [72057594046678944:3:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 2] ResourcesDomainKey: [OwnerId: 72057594046678944, LocalPathId: 2] Params { Version: 3 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 SchemeShard: 72075186233409549 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } Leader for TabletID 72057594046678944 is [2:183:2174] sender: [2:516:2067] recipient: [2:48:2095] Leader for TabletID 72057594046678944 is [2:183:2174] sender: [2:518:2067] recipient: [2:24:2071] Leader for TabletID 72057594046678944 is [2:183:2174] sender: [2:520:2067] recipient: [2:519:2410] Leader for TabletID 72057594046678944 is [2:521:2411] sender: [2:522:2067] recipient: [2:519:2410] 2025-06-25T14:45:23.219480Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:45:23.219544Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded Leader for TabletID 72057594046678944 is [2:521:2411] sender: [2:549:2067] recipient: [2:24:2071] { Path: Root/USER_0/DirA TableId: [72057594046678944:3:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 2] ResourcesDomainKey: [OwnerId: 72057594046678944, LocalPathId: 2] Params { Version: 3 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 SchemeShard: 72075186233409549 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } >> YdbYqlClient::QueryStats [GOOD] >> YdbYqlClient::RenameTables >> TCacheTest::MigrationUndo [GOOD] >> YdbTableBulkUpsert::Errors [GOOD] >> YdbTableBulkUpsert::Limits ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_cache/unittest >> TCacheTest::PathBelongsToDomain [GOOD] Test command err: 2025-06-25T14:45:23.199157Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:45:23.199265Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TestModificationResults wait txId: 1 2025-06-25T14:45:23.309528Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 FAKE_COORDINATOR: Erasing txId 1 TestModificationResult got TxId: 1, wait until txId: 1 TestModificationResults wait txId: 101 FAKE_COORDINATOR: Add transaction: 101 at step: 5000002 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000001 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 101 at step: 5000002 FAKE_COORDINATOR: Erasing txId 101 TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 101 2025-06-25T14:45:23.330214Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 101 2025-06-25T14:45:23.670131Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:45:23.670182Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TestModificationResults wait txId: 1 2025-06-25T14:45:23.716168Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 FAKE_COORDINATOR: Erasing txId 1 TestModificationResult got TxId: 1, wait until txId: 1 TestModificationResults wait txId: 101 2025-06-25T14:45:23.720701Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateSubDomain, opId: 101:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_subdomain.cpp:259) FAKE_COORDINATOR: Add transaction: 101 at step: 5000002 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000001 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 101 at step: 5000002 FAKE_COORDINATOR: Erasing txId 101 TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 101 2025-06-25T14:45:23.725952Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 101 TestModificationResults wait txId: 102 FAKE_COORDINATOR: Add transaction: 102 at step: 5000003 FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000002 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 102 at step: 5000003 FAKE_COORDINATOR: Erasing txId 102 TestModificationResult got TxId: 102, wait until txId: 102 2025-06-25T14:45:23.734348Z node 2 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:287: Path does not belong to the specified domain: self# [2:229:2206], domain# [OwnerId: 72057594046678944, LocalPathId: 1], path's domain# [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-25T14:45:23.734616Z node 2 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:287: Path does not belong to the specified domain: self# [2:231:2208], domain# [OwnerId: 72057594046678944, LocalPathId: 1], path's domain# [OwnerId: 72057594046678944, LocalPathId: 2] >> YdbYqlClient::TestExplicitPartitioning [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_cache/unittest >> TCacheTest::MigrationUndo [GOOD] Test command err: 2025-06-25T14:45:22.213859Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:45:22.213904Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TestModificationResults wait txId: 1 2025-06-25T14:45:22.370672Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 FAKE_COORDINATOR: Erasing txId 1 TestModificationResult got TxId: 1, wait until txId: 1 Leader for TabletID 72057594046678944 is [1:73:2111] sender: [1:178:2067] recipient: [1:48:2095] Leader for TabletID 72057594046678944 is [1:73:2111] sender: [1:181:2067] recipient: [1:24:2071] Leader for TabletID 72057594046678944 is [1:73:2111] sender: [1:182:2067] recipient: [1:180:2173] Leader for TabletID 72057594046678944 is [1:183:2174] sender: [1:184:2067] recipient: [1:180:2173] 2025-06-25T14:45:22.419211Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:45:22.419261Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TestModificationResults wait txId: 101 Leader for TabletID 72057594046678944 is [1:183:2174] sender: [1:214:2067] recipient: [1:24:2071] 2025-06-25T14:45:22.450258Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateSubDomain, opId: 101:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_subdomain.cpp:259) FAKE_COORDINATOR: Add transaction: 101 at step: 5000002 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000001 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 101 at step: 5000002 FAKE_COORDINATOR: Erasing txId 101 TestModificationResult got TxId: 101, wait until txId: 101 TestModificationResults wait txId: 102 2025-06-25T14:45:22.458930Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 102:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) Leader for TabletID 72075186233409546 is [0:0:0] sender: [1:250:2067] recipient: [1:241:2215] IGNORE Leader for TabletID 72075186233409546 is [0:0:0] sender: [1:250:2067] recipient: [1:241:2215] Leader for TabletID 72075186233409547 is [0:0:0] sender: [1:251:2067] recipient: [1:244:2217] IGNORE Leader for TabletID 72075186233409547 is [0:0:0] sender: [1:251:2067] recipient: [1:244:2217] Leader for TabletID 72075186233409546 is [0:0:0] sender: [1:255:2067] recipient: [1:24:2071] IGNORE Leader for TabletID 72075186233409546 is [0:0:0] sender: [1:255:2067] recipient: [1:24:2071] Leader for TabletID 72075186233409547 is [0:0:0] sender: [1:257:2067] recipient: [1:24:2071] IGNORE Leader for TabletID 72075186233409547 is [0:0:0] sender: [1:257:2067] recipient: [1:24:2071] Leader for TabletID 72075186233409546 is [1:253:2221] sender: [1:258:2067] recipient: [1:241:2215] Leader for TabletID 72075186233409547 is [1:256:2223] sender: [1:259:2067] recipient: [1:244:2217] TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 101 TestWaitNotification wait txId: 102 2025-06-25T14:45:22.492790Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 101 Leader for TabletID 72075186233409546 is [1:253:2221] sender: [1:293:2067] recipient: [1:24:2071] Leader for TabletID 72075186233409547 is [1:256:2223] sender: [1:294:2067] recipient: [1:24:2071] FAKE_COORDINATOR: Add transaction: 102 at step: 5000003 FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000002 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 102 at step: 5000003 FAKE_COORDINATOR: Erasing txId 102 TestWaitNotification: OK eventTxId 102 TestModificationResults wait txId: 103 TestModificationResult got TxId: 103, wait until txId: 103 TestModificationResults wait txId: 104 2025-06-25T14:45:22.585627Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 104:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) TestModificationResult got TxId: 104, wait until txId: 104 TestWaitNotification wait txId: 103 TestWaitNotification wait txId: 104 Leader for TabletID 72075186233409548 is [0:0:0] sender: [1:344:2067] recipient: [1:340:2287] IGNORE Leader for TabletID 72075186233409548 is [0:0:0] sender: [1:344:2067] recipient: [1:340:2287] Leader for TabletID 72075186233409548 is [0:0:0] sender: [1:345:2067] recipient: [1:24:2071] IGNORE Leader for TabletID 72075186233409548 is [0:0:0] sender: [1:345:2067] recipient: [1:24:2071] Leader for TabletID 72075186233409548 is [1:347:2291] sender: [1:348:2067] recipient: [1:340:2287] Leader for TabletID 72075186233409548 is [1:347:2291] sender: [1:349:2067] recipient: [1:24:2071] TestWaitNotification: OK eventTxId 103 TestWaitNotification: OK eventTxId 104 TestModificationResults wait txId: 105 2025-06-25T14:45:22.782779Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpUpgradeSubDomain, opId: 105:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_upgrade_subdomain.cpp:1232) Leader for TabletID 72075186233409549 is [0:0:0] sender: [1:423:2067] recipient: [1:419:2335] IGNORE Leader for TabletID 72075186233409549 is [0:0:0] sender: [1:423:2067] recipient: [1:419:2335] Leader for TabletID 72075186233409549 is [0:0:0] sender: [1:424:2067] recipient: [1:24:2071] IGNORE Leader for TabletID 72075186233409549 is [0:0:0] sender: [1:424:2067] recipient: [1:24:2071] Leader for TabletID 72075186233409549 is [1:426:2339] sender: [1:427:2067] recipient: [1:419:2335] Leader for TabletID 72075186233409549 is [1:426:2339] sender: [1:428:2067] recipient: [1:24:2071] 2025-06-25T14:45:22.824677Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:45:22.824733Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TestModificationResult got TxId: 105, wait until txId: 105 TestWaitNotification wait txId: 105 TestWaitNotification: OK eventTxId 105 { Path: Root/USER_0/DirA TableId: [72057594046678944:3:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 2] ResourcesDomainKey: [OwnerId: 72057594046678944, LocalPathId: 2] Params { Version: 3 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 SchemeShard: 72075186233409549 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } TestModificationResults wait txId: 106 2025-06-25T14:45:22.848123Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:5475: Mark as Migrated path id [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-25T14:45:22.848176Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:5475: Mark as Migrated path id [OwnerId: 72057594046678944, LocalPathId: 4] 2025-06-25T14:45:22.848581Z node 1 :FLAT_TX_SCHEMESHARD ERROR: schemeshard__operation_upgrade_subdomain.cpp:1464: TWait ProgressState, dependent transaction: 106, parent transaction: 105, at schemeshard: 72057594046678944 2025-06-25T14:45:22.848681Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpUpgradeSubDomainDecision, opId: 106:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_upgrade_subdomain.cpp:571) TestModificationResult got TxId: 106, wait until txId: 106 TestWaitNotification wait txId: 106 2025-06-25T14:45:22.869698Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:5936: Got TEvUpdateAck for unknown txId 105, at schemeshard: 72057594046678944 2025-06-25T14:45:22.870084Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:5936: Got TEvUpdateAck for unknown txId 105, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 106 { Path: Root/USER_0/DirA TableId: [72057594046678944:3:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 2] ResourcesDomainKey: [OwnerId: 72057594046678944, LocalPathId: 2] Params { Version: 3 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 SchemeShard: 72075186233409549 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } TestModificationResults wait txId: 107 TestModificationResult got TxId: 107, wait until txId: 107 TestWaitNotification wait txId: 107 skipDeleteNotification path: /Root/USER_0/DirA/Table1 pathId: [OwnerId: 72057594046678944, LocalPathId: 4] Strong: 1 TestWaitNotification: OK eventTxId 107 TestModificationResults wait txId: 108 2025-06-25T14:45:22.923643Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpRmDir, opId: 108:0, at schemeshard: 72075186233409549, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_rmdir.cpp:66) TestModificationResult got TxId: 108, wait until txId: 108 TestWaitNotification wait txId: 108 skipDeleteNotification path: /Root/USER_0/DirA pathId: [OwnerId: 72057594046678944, LocalPathId: 3] Strong: 1 TestWaitNotification: OK eventTxId 108 TestModificationResults wait txId: 109 skipDeleteNotification path: /Root/USER_0/DirA pathId: [OwnerId: 72057594046678944, LocalPathId: 3] Strong: 1 TestModificationResult got TxId: 109, wait until txId: 109 TestWaitNotification wait txId: 109 TestWaitNotification: OK eventTxId 109 TestModificationResults wait txId: 110 2025-06-25T14:45:23.002106Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 110:0, at schemeshard: 72075186233409549, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) Leader for TabletID 72075186233409550 is [0:0:0] sender: [1:626:2067] recipient: [1:622:2504] IGNORE Leader for TabletID 72075186233409550 is [0:0:0] sender: [1:626:2067] recipient: [1:622:2504] Leader for TabletID 72075186233409550 is [0:0:0] sender: [1:627:2067] recipient: [1:24:2071] IGNORE ... emeshard: 72057594046678944 2025-06-25T14:45:24.087627Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2842: TTxInit for TableIndexes, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:45:24.087728Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2921: TTxInit for TableIndexKeys, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:45:24.088036Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3422: TTxInit for KesusInfos, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:45:24.088140Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3458: TTxInit for KesusAlters, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:45:24.088358Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3684: TTxInit for TxShards, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:45:24.088499Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3829: TTxInit for ShardToDelete, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:45:24.088589Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3846: TTxInit for BackupSettings, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:45:24.088771Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4006: TTxInit for ShardBackupStatus, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:45:24.088849Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4022: TTxInit for CompletedBackup, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:45:24.089029Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4307: TTxInit for Publications, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:45:24.089244Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4646: IndexBuild , records: 0, at schemeshard: 72057594046678944 2025-06-25T14:45:24.089306Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4702: KMeansTreeSample records: 0, at schemeshard: 72057594046678944 2025-06-25T14:45:24.089398Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4791: SnapshotTables: snapshots: 0 tables: 0, at schemeshard: 72057594046678944 2025-06-25T14:45:24.089465Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4818: SnapshotSteps: snapshots: 0, at schemeshard: 72057594046678944 2025-06-25T14:45:24.089505Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4845: LongLocks: records: 0, at schemeshard: 72057594046678944 2025-06-25T14:45:24.089748Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-06-25T14:45:24.090892Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-25T14:45:24.091026Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:45:24.091833Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 2146435083, Sender [2:520:2404], Recipient [2:520:2404]: NKikimr::NSchemeShard::TEvPrivate::TEvServerlessStorageBilling 2025-06-25T14:45:24.091867Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5018: StateWork, processing event TEvPrivate::TEvServerlessStorageBilling 2025-06-25T14:45:24.092411Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:45:24.092451Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:45:24.092501Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:45:24.092546Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:45:24.092575Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:45:24.092599Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-25T14:45:24.092697Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 274399233, Sender [2:536:2404], Recipient [2:520:2404]: NKikimr::TEvTxAllocatorClient::TEvAllocateResult 2025-06-25T14:45:24.092723Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5109: StateWork, processing event TEvTxAllocatorClient::TEvAllocateResult 2025-06-25T14:45:24.092773Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:45:24.103968Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [2:163:2159], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/USER_0 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:45:24.104147Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [2:163:2159], cacheItem# { Subscriber: { Subscriber: [2:386:2321] DomainOwnerId: 72057594046678944 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusSuccess Kind: 8 TableKind: 0 Created: 1 CreateStep: 5000002 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] DomainId: [OwnerId: 72057594046678944, LocalPathId: 2] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: Root/USER_0 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:45:24.104400Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [2:548:2421], recipient# [2:547:2420], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/USER_0 TableId: [72057594046678944:2:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindSubdomain DomainInfo { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 2] ResourcesDomainKey: [OwnerId: 72057594046678944, LocalPathId: 2] Params { Version: 3 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } { Path: Root/USER_0 TableId: [72057594046678944:2:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindSubdomain DomainInfo { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 2] ResourcesDomainKey: [OwnerId: 72057594046678944, LocalPathId: 2] Params { Version: 3 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-25T14:45:24.104783Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [2:163:2159], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/USER_0/DirA TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:45:24.104962Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [2:163:2159], cacheItem# { Subscriber: { Subscriber: [2:395:2324] DomainOwnerId: 72057594046678944 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 200 PathId: [OwnerId: 72057594046678944, LocalPathId: 3] DomainId: [OwnerId: 72057594046678944, LocalPathId: 2] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: Root/USER_0/DirA TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:45:24.105134Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [2:550:2423], recipient# [2:549:2422], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/USER_0/DirA TableId: [72057594046678944:3:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 2] ResourcesDomainKey: [OwnerId: 72057594046678944, LocalPathId: 2] Params { Version: 3 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } { Path: Root/USER_0/DirA TableId: [72057594046678944:3:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 2] ResourcesDomainKey: [OwnerId: 72057594046678944, LocalPathId: 2] Params { Version: 3 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-25T14:45:24.105572Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [2:163:2159], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/USER_0/DirA/Table1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:45:24.105660Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [2:163:2159], cacheItem# { Subscriber: { Subscriber: [2:404:2327] DomainOwnerId: 72057594046678944 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusSuccess Kind: 3 TableKind: 1 Created: 1 CreateStep: 250 PathId: [OwnerId: 72057594046678944, LocalPathId: 4] DomainId: [OwnerId: 72057594046678944, LocalPathId: 2] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 1 }, entry# { Path: Root/USER_0/DirA/Table1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:45:24.105841Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [2:552:2425], recipient# [2:551:2424], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/USER_0/DirA/Table1 TableId: [72057594046678944:4:1] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindTable DomainInfo { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 2] ResourcesDomainKey: [OwnerId: 72057594046678944, LocalPathId: 2] Params { Version: 3 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } >> TxUsage::WriteToTopic_Demo_31_Table [GOOD] >> KqpJoinOrder::FourWayJoinLeftFirst+ColumnStore >> KqpIndexLookupJoin::SimpleLeftOnlyJoin-StreamLookup >> KqpJoinOrder::TPCH9_100 >> KqpJoinOrder::UdfConstantFold+ColumnStore >> KqpJoinOrder::TestJoinOrderHintsSimple-ColumnStore >> KqpJoinOrder::SortingsWithLookupJoinByPrefix+RemoveLimitOperator >> KqpIndexLookupJoin::CheckCastUint64ToInt64+StreamLookupJoin-NotNull >> KqpJoinOrder::CanonizedJoinOrderTPCH1 >> KqpIndexLookupJoin::LeftSemiJoinWithLeftFilter+StreamLookup >> KqpJoin::RightSemiJoin_FullScan >> KqpIndexLookupJoin::CheckCastUint32ToUint16-StreamLookupJoin-NotNull >> TxUsage::WriteToTopic_Demo_31_Query ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> YdbYqlClient::TestExplicitPartitioning [GOOD] Test command err: 2025-06-25T14:44:37.906092Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897938922260849:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:37.907469Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017f0/r3tmp/tmp2MXVaW/pdisk_1.dat 2025-06-25T14:44:38.302239Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:38.317408Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:38.317483Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:38.320718Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 12146, node 1 2025-06-25T14:44:38.396873Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:38.396895Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:38.396902Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:38.397021Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9253 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:38.688280Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:38.913820Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:44:40.909985Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897951807163707:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:40.910088Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:41.165437Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519897956102131029:2618] txid# 281474976710658, issues: { message: "Column Key has wrong key type Double" severity: 1 } 2025-06-25T14:44:42.799918Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519897962220387150:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:42.800113Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017f0/r3tmp/tmp2n1dKb/pdisk_1.dat 2025-06-25T14:44:43.222344Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:43.236130Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:43.236215Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:43.243502Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 22938, node 4 2025-06-25T14:44:43.356891Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:43.356930Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:43.356956Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:43.357097Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3084 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:43.662488Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:43.872434Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:44:46.156772Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519897979400257372:2316], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:46.156872Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:46.180854Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519897979400257385:2319], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:46.181185Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:46.205822Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519897979400257412:2330], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:46.206213Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:46.238369Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:44:46.251746Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519897979400257472:2656] txid# 281474976715665, issues: { message: "Check failed: path: \'/Root/Test\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 2], type: EPathTypeTable, state: EPathStateCreate)" severity: 1 } 2025-06-25T14:44:46.251977Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519897979400257466:2650] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/Test\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 2], type: EPathTypeTable, state: EPathStateCreate)" severity: 1 } 2025-06-25T14:44:46.255680Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519897979400257468:2652] txid# 281474976715661, issues: { message: "Check failed: path: \'/Root/Test\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 2], type: EPathTypeTable, state: EPathStateCreate)" severity: 1 } 2025-06-25T14:44:46.255813Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519897979400257469:2653] txid# 281474976715662, issues: { message: "Check failed: path: \'/Root/Test\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 2], type: EPathTypeTable, state: EPathStateCreate)" severity: 1 } 2025-06-25T14:44:46.255887Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519897979400257467:2651] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/Test\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 2], type: EPathTypeTable, state: EPathStateCreate)" severity: 1 } 2025-06-25T14:44:46.259037Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519897979400257470:2654] txid# 281474976715663, issues: { message: "Check failed: path: \'/Root/Test\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 2], type: EPathTypeTable, state: EP ... Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19708 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:48.818574Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:49.312678Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:44:51.570163Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:44:51.695940Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519898001016824893:2307], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:51.695996Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519898001016824901:2310], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:51.696035Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:51.698719Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:44:51.715718Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7519898001016824907:2311], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-25T14:44:51.780762Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7519898001016824980:2839] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:44:51.841601Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jykrrzbe5669gvapxas5m8nf, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=ZmEyNzY5YzctMzgyODc1ZC0zN2M0MTZiYy02NGNlOGRjNg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:44:53.573297Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7519898008236832581:2190];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017f0/r3tmp/tmphTwCsM/pdisk_1.dat 2025-06-25T14:44:53.612244Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:44:53.743935Z node 10 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:53.763928Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:53.764012Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:53.772264Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 26182, node 10 2025-06-25T14:44:53.888843Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:53.888861Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:53.888868Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:53.889000Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4879 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:54.273851Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:54.568809Z node 10 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:44:56.724736Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:44:58.568431Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[10:7519898008236832581:2190];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:58.568515Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:45:08.709705Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7382: Cannot get console configs 2025-06-25T14:45:08.709734Z node 10 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:45:23.075853Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7519898137085853585:2502], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:23.075854Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7519898137085853577:2499], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:23.075915Z node 10 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:23.079223Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:45:23.096901Z node 10 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [10:7519898137085853591:2503], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-25T14:45:23.197842Z node 10 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [10:7519898137085853666:3178] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:45:23.301540Z node 10 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710661. Ctx: { TraceId: 01jykrsy024zsvq82v111zfd5m, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=10&id=NTgwY2M4YWMtZTY1MTdlZmEtZjY5YWNlN2UtNmYyNzlhZDQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:45:23.711787Z node 10 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710662. Ctx: { TraceId: 01jykrsy7mffarh3ezvtg0ytzg, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=10&id=NTgwY2M4YWMtZTY1MTdlZmEtZjY5YWNlN2UtNmYyNzlhZDQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root >> KqpIndexLookupJoin::CheckCastInt64ToUint64-StreamLookupJoin-NotNull >> KqpIndexLookupJoin::LeftJoinRightNullFilter+StreamLookup >> KqpJoinOrder::CanonizedJoinOrderTPCH7 >> VectorIndexBuildTest::ServerLessDB-smallScanBuffer-true [GOOD] >> VectorIndexBuildTest::Shard_Build_Error >> TxUsage::WriteToTopic_Demo_17_Table [GOOD] >> TxUsage::WriteToTopic_Demo_17_Query >> YdbYqlClient::RenameTables [GOOD] >> VectorIndexBuildTest::Shard_Build_Error [GOOD] >> YdbOlapStore::LogGrepNonExisting [GOOD] >> YdbOlapStore::LogGrepExisting >> IndexBuildTest::MergeIndexTableShardsOnlyWhenReady [GOOD] >> IndexBuildTest::IndexPartitioningIsPersisted >> YdbTableBulkUpsert::Limits [GOOD] >> YdbTableBulkUpsert::DecimalPK ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index_build/unittest >> VectorIndexBuildTest::Shard_Build_Error [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:45:09.032899Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:45:09.032989Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:45:09.033025Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:45:09.033067Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:45:09.033111Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:45:09.033139Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:45:09.033187Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:45:09.033291Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:45:09.034085Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:45:09.034426Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:45:09.104401Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:45:09.104458Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:45:09.122405Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:45:09.122698Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:45:09.122825Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:45:09.128130Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:45:09.128470Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:45:09.129111Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:45:09.129360Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:45:09.132427Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:45:09.132580Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:45:09.133654Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:45:09.133712Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:45:09.133854Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:45:09.133901Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:45:09.133943Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:45:09.134041Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:45:09.141913Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:45:09.249595Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:45:09.249809Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:45:09.250003Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:45:09.250051Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:45:09.250282Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:45:09.250371Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:45:09.252583Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:45:09.252761Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:45:09.252966Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:45:09.253025Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:45:09.253063Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:45:09.253096Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:45:09.258223Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:45:09.258291Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:45:09.258335Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:45:09.261824Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:45:09.261888Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:45:09.261945Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:45:09.261991Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:45:09.265204Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:45:09.269378Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:45:09.269607Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:45:09.270540Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:45:09.270683Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:45:09.270743Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:45:09.271033Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:45:09.271084Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:45:09.271276Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:45:09.271363Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:45:09.273363Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:45:09.273423Z node 1 :FLAT_TX_SCHEMESHARD ... shard_impl.h:3137: StateWork, processing event TEvDataShard::TEvStateChangedResult 2025-06-25T14:45:29.603601Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:2962: Handle TEvStateChangedResult datashard 72075186233409550 state Offline 2025-06-25T14:45:29.603798Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:3 hive 72057594037968897 at ss 72057594046678944 2025-06-25T14:45:29.603878Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877763, Sender [3:1148:3053], Recipient [3:763:2694]: NKikimr::TEvTabletPipe::TEvClientDestroyed { TabletId: 72057594046678944 ClientId: [3:1148:3053] ServerId: [3:1151:3056] } 2025-06-25T14:45:29.603907Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3166: StateWork, processing event TEvTabletPipe::TEvClientDestroyed 2025-06-25T14:45:29.604051Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269552133, Sender [3:1066:2983], Recipient [3:464:2428]: NKikimrTxDataShard.TEvStateChangedResult TabletId: 72057594046678944 State: 4 2025-06-25T14:45:29.604079Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3137: StateWork, processing event TEvDataShard::TEvStateChangedResult 2025-06-25T14:45:29.604103Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:2962: Handle TEvStateChangedResult datashard 72075186233409547 state Offline 2025-06-25T14:45:29.604203Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:2 hive 72057594037968897 at ss 72057594046678944 2025-06-25T14:45:29.604279Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877763, Sender [3:1149:3054], Recipient [3:464:2428]: NKikimr::TEvTabletPipe::TEvClientDestroyed { TabletId: 72057594046678944 ClientId: [3:1149:3054] ServerId: [3:1152:3057] } 2025-06-25T14:45:29.604323Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3166: StateWork, processing event TEvTabletPipe::TEvClientDestroyed 2025-06-25T14:45:29.604378Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269552133, Sender [3:1066:2983], Recipient [3:466:2429]: NKikimrTxDataShard.TEvStateChangedResult TabletId: 72057594046678944 State: 4 2025-06-25T14:45:29.604400Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3137: StateWork, processing event TEvDataShard::TEvStateChangedResult 2025-06-25T14:45:29.604422Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:2962: Handle TEvStateChangedResult datashard 72075186233409548 state Offline 2025-06-25T14:45:29.604565Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877763, Sender [3:1150:3055], Recipient [3:466:2429]: NKikimr::TEvTabletPipe::TEvClientDestroyed { TabletId: 72057594046678944 ClientId: [3:1150:3055] ServerId: [3:1153:3058] } 2025-06-25T14:45:29.604592Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3166: StateWork, processing event TEvTabletPipe::TEvClientDestroyed 2025-06-25T14:45:29.604698Z node 3 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 5 TxId_Deprecated: 5 TabletID: 72075186233409550 2025-06-25T14:45:29.604953Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268829696, Sender [3:752:2685], Recipient [3:763:2694]: NKikimr::TEvTablet::TEvTabletDead 2025-06-25T14:45:29.605199Z node 3 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186233409550 2025-06-25T14:45:29.605298Z node 3 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186233409550 Forgetting tablet 72075186233409550 2025-06-25T14:45:29.606720Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 5 ShardOwnerId: 72057594046678944 ShardLocalIdx: 5, at schemeshard: 72057594046678944 2025-06-25T14:45:29.606984Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 7] was 1 2025-06-25T14:45:29.607182Z node 3 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 3 TxId_Deprecated: 3 TabletID: 72075186233409547 2025-06-25T14:45:29.607637Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-25T14:45:29.607673Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 7], at schemeshard: 72057594046678944 2025-06-25T14:45:29.607730Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 Forgetting tablet 72075186233409547 2025-06-25T14:45:29.607963Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268829696, Sender [3:447:2415], Recipient [3:464:2428]: NKikimr::TEvTablet::TEvTabletDead 2025-06-25T14:45:29.608193Z node 3 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186233409547 2025-06-25T14:45:29.608278Z node 3 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186233409547 2025-06-25T14:45:29.609774Z node 3 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 2 TxId_Deprecated: 2 TabletID: 72075186233409548 2025-06-25T14:45:29.610024Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 3 ShardOwnerId: 72057594046678944 ShardLocalIdx: 3, at schemeshard: 72057594046678944 2025-06-25T14:45:29.610207Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 1 2025-06-25T14:45:29.610500Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046678944 ShardLocalIdx: 2, at schemeshard: 72057594046678944 2025-06-25T14:45:29.610646Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 1 Forgetting tablet 72075186233409548 2025-06-25T14:45:29.610888Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268829696, Sender [3:449:2416], Recipient [3:466:2429]: NKikimr::TEvTablet::TEvTabletDead 2025-06-25T14:45:29.611097Z node 3 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186233409548 2025-06-25T14:45:29.611174Z node 3 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186233409548 2025-06-25T14:45:29.614718Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:5 2025-06-25T14:45:29.614763Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:5 tabletId 72075186233409550 2025-06-25T14:45:29.616553Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 2 candidates, at schemeshard: 72057594046678944 2025-06-25T14:45:29.616662Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 2 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-25T14:45:29.616713Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 5], at schemeshard: 72057594046678944 2025-06-25T14:45:29.616804Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-25T14:45:29.616852Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 4], at schemeshard: 72057594046678944 2025-06-25T14:45:29.616885Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-25T14:45:29.616916Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-06-25T14:45:29.616947Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-25T14:45:29.617087Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:3 2025-06-25T14:45:29.617118Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:3 tabletId 72075186233409547 2025-06-25T14:45:29.617226Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-06-25T14:45:29.617257Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:2 tabletId 72075186233409548 2025-06-25T14:45:29.618479Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 3 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-25T14:45:29.672502Z node 3 :BUILD_INDEX DEBUG: schemeshard_build_index__get.cpp:19: TIndexBuilder::TXTYPE_GET_INDEX_BUILD: DoExecute DatabaseName: "/MyRoot" IndexBuildId: 102 2025-06-25T14:45:29.672869Z node 3 :BUILD_INDEX DEBUG: schemeshard_build_index_tx_base.h:104: TIndexBuilder::TXTYPE_GET_INDEX_BUILD: Reply Status: SUCCESS IndexBuild { Id: 102 Issues { message: "One of the shards report BUILD_ERROR
: Error: Shard or requested range is empty\n
: Error: Datashard test fail\n at Filling stage, process has to be canceled, shardId: 72075186233409549, shardIdx: 72057594046678944:4" severity: 1 } Issues { message: "TShardStatus { ShardIdx: 72057594046678944:4 Status: BUILD_ERROR UploadStatus: STATUS_CODE_UNSPECIFIED DebugMessage:
: Error: Shard or requested range is empty\n
: Error: Datashard test fail\n SeqNoRound: 0 Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 } }" severity: 1 } State: STATE_REJECTED Settings { source_path: "/MyRoot/vectors" index { name: "index1" index_columns: "embedding" global_vector_kmeans_tree_index { } } max_shards_in_flight: 2 ScanSettings { MaxBatchRows: 1 MaxBatchBytes: 8388608 MaxBatchRetries: 50 } } Progress: 0 } BUILDINDEX RESPONSE Get: NKikimrIndexBuilder.TEvGetResponse Status: SUCCESS IndexBuild { Id: 102 Issues { message: "One of the shards report BUILD_ERROR
: Error: Shard or requested range is empty\n
: Error: Datashard test fail\n at Filling stage, process has to be canceled, shardId: 72075186233409549, shardIdx: 72057594046678944:4" severity: 1 } Issues { message: "TShardStatus { ShardIdx: 72057594046678944:4 Status: BUILD_ERROR UploadStatus: STATUS_CODE_UNSPECIFIED DebugMessage:
: Error: Shard or requested range is empty\n
: Error: Datashard test fail\n SeqNoRound: 0 Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 } }" severity: 1 } State: STATE_REJECTED Settings { source_path: "/MyRoot/vectors" index { name: "index1" index_columns: "embedding" global_vector_kmeans_tree_index { } } max_shards_in_flight: 2 ScanSettings { MaxBatchRows: 1 MaxBatchBytes: 8388608 MaxBatchRetries: 50 } } Progress: 0 } >> KqpIndexLookupJoin::CheckCastUint64ToInt64+StreamLookupJoin-NotNull [GOOD] >> KqpIndexLookupJoin::CheckCastUint64ToInt64+StreamLookupJoin+NotNull ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> YdbYqlClient::RenameTables [GOOD] Test command err: 2025-06-25T14:44:54.073680Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898011735509130:2230];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:54.080700Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001798/r3tmp/tmp1ehboC/pdisk_1.dat 2025-06-25T14:44:54.798135Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:54.812097Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:54.812186Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:54.819096Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 30149, node 1 2025-06-25T14:44:55.080929Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:44:55.114411Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:55.114431Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:55.114441Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:55.114546Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27271 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:55.455383Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... Previous query attempt was finished with unsuccessful status OVERLOADED: Sending retry attempt 1 of 5 2025-06-25T14:44:59.074144Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519898011735509130:2230];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:59.074221Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Previous query attempt was finished with unsuccessful status CLIENT_RESOURCE_EXHAUSTED: Sending retry attempt 2 of 5 Previous query attempt was finished with unsuccessful status UNAVAILABLE: Sending retry attempt 3 of 5 Previous query attempt was finished with unsuccessful status BAD_SESSION: Sending retry attempt 4 of 5 Previous query attempt was finished with unsuccessful status SESSION_BUSY: Sending retry attempt 5 of 5 2025-06-25T14:45:02.594478Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898046095248505:2315], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:02.594476Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898046095248515:2318], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:02.594560Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:02.597462Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:45:02.614635Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898046095248519:2319], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:45:02.675803Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898046095248588:2708] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Previous query attempt was finished with unsuccessful status OVERLOADED: Sending retry attempt 1 of 5 Previous query attempt was finished with unsuccessful status CLIENT_RESOURCE_EXHAUSTED: Sending retry attempt 2 of 5 Previous query attempt was finished with unsuccessful status UNAVAILABLE: Sending retry attempt 3 of 5 Previous query attempt was finished with unsuccessful status BAD_SESSION: Sending retry attempt 4 of 5 Previous query attempt was finished with unsuccessful status SESSION_BUSY: Sending retry attempt 5 of 5 Previous query attempt was finished with unsuccessful status NOT_FOUND: Sending retry attempt 1 of 1 Previous query attempt was finished with unsuccessful status NOT_FOUND: Sending retry attempt 1 of 1 Previous query attempt was finished with unsuccessful status UNDETERMINED: Sending retry attempt 1 of 1 Previous query attempt was finished with unsuccessful status UNDETERMINED: Sending retry attempt 1 of 1 Previous query attempt was finished with unsuccessful status TRANSPORT_UNAVAILABLE: Sending retry attempt 1 of 1 Previous query attempt was finished with unsuccessful status TRANSPORT_UNAVAILABLE: Sending retry attempt 1 of 1 2025-06-25T14:45:08.621481Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519898071295067598:2154];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001798/r3tmp/tmp2VztQv/pdisk_1.dat 2025-06-25T14:45:08.679664Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:45:08.749770Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:45:08.770416Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:45:08.770488Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:45:08.775385Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 5646, node 4 2025-06-25T14:45:08.818546Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:45:08.818593Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:45:08.818602Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:45:08.818760Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3062 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:45:09.040737Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:45:09.626497Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:45:11.487096Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519898084179970402:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:11.487171Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:11.526491Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first c ... 5: IgniteOperation, opId: 281474976715672:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-25T14:45:28.333255Z node 13 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976715672, database: /Root, subject: , status: StatusAccepted, operation: DROP TABLE, path: Root/Table-1 2025-06-25T14:45:28.333328Z node 13 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [13:7519898158382499087:3581] txid# 281474976715672 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715672} 2025-06-25T14:45:28.333367Z node 13 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [13:7519898158382499087:3581] txid# 281474976715672 SEND to# [13:7519898158382499086:2353] Source {TEvProposeTransactionStatus txid# 281474976715672 Status# 53} 2025-06-25T14:45:28.334709Z node 13 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:489: SchemeBoardUpdate /Root 2025-06-25T14:45:28.334828Z node 13 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:518: Can't update SecurityState for /Root - no PublicKeys 2025-06-25T14:45:28.334843Z node 13 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:489: SchemeBoardUpdate /Root 2025-06-25T14:45:28.334899Z node 13 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:518: Can't update SecurityState for /Root - no PublicKeys 2025-06-25T14:45:28.344868Z node 13 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 1750862728391, transactions count in step: 1, at schemeshard: 72057594046644480 2025-06-25T14:45:28.353222Z node 13 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:1105: All parts have reached barrier, tx: 281474976715672, done: 0, blocked: 1 2025-06-25T14:45:28.361638Z node 13 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976715672:0 2025-06-25T14:45:28.361731Z node 13 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 281474976715672, publications: 2, subscribers: 1 2025-06-25T14:45:28.363036Z node 13 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:489: SchemeBoardUpdate /Root 2025-06-25T14:45:28.363154Z node 13 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:518: Can't update SecurityState for /Root - no PublicKeys 2025-06-25T14:45:28.363162Z node 13 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:489: SchemeBoardUpdate /Root 2025-06-25T14:45:28.363225Z node 13 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:518: Can't update SecurityState for /Root - no PublicKeys 2025-06-25T14:45:28.364457Z node 13 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046644480, txId: 281474976715672, subscribers: 1 2025-06-25T14:45:28.378420Z node 13 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 13, TabletId: 72075186224037890 not found 2025-06-25T14:45:28.380067Z node 13 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046644480 2025-06-25T14:45:28.384038Z node 13 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:595: Got grpc request# DropTableRequest, traceId# 01jykrt35z117pac5wy5qpvts3, sdkBuildInfo# ydb-cpp-sdk/dev, state# AS_NOT_PERFORMED, database# undef, peer# ipv6:[::1]:47242, grpcInfo# grpc-c++/1.54.3 grpc-c/31.0.0 (linux; chttp2), timeout# undef 2025-06-25T14:45:28.384196Z node 13 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [13:7519898141202627873:2140] Handle TEvProposeTransaction 2025-06-25T14:45:28.384213Z node 13 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [13:7519898141202627873:2140] TxId# 281474976715673 ProcessProposeTransaction 2025-06-25T14:45:28.384244Z node 13 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [13:7519898141202627873:2140] Cookie# 0 userReqId# "" txid# 281474976715673 SEND to# [13:7519898158382499180:3668] 2025-06-25T14:45:28.386384Z node 13 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [13:7519898158382499180:3668] txid# 281474976715673 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "Root" OperationType: ESchemeOpDropTable Drop { Name: "Table-2" } } } DatabaseName: "" RequestType: "" PeerName: "ipv6:[::1]:47242" 2025-06-25T14:45:28.386421Z node 13 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [13:7519898158382499180:3668] txid# 281474976715673 Bootstrap, UserSID: CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-25T14:45:28.386471Z node 13 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [13:7519898158382499180:3668] txid# 281474976715673 TEvNavigateKeySet requested from SchemeCache 2025-06-25T14:45:28.386851Z node 13 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [13:7519898158382499180:3668] txid# 281474976715673 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-25T14:45:28.386956Z node 13 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [13:7519898158382499180:3668] HANDLE EvNavigateKeySetResult, txid# 281474976715673 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-25T14:45:28.387004Z node 13 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [13:7519898158382499180:3668] txid# 281474976715673 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715673 TabletId# 72057594046644480} 2025-06-25T14:45:28.387171Z node 13 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [13:7519898158382499180:3668] txid# 281474976715673 HANDLE EvClientConnected 2025-06-25T14:45:28.387351Z node 13 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_drop_table.cpp:492: TDropTable Propose, path: Root/Table-2, pathId: 0, opId: 281474976715673:0, at schemeshard: 72057594046644480 2025-06-25T14:45:28.387494Z node 13 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 281474976715673:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-25T14:45:28.389462Z node 13 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976715673, database: /Root, subject: , status: StatusAccepted, operation: DROP TABLE, path: Root/Table-2 2025-06-25T14:45:28.389528Z node 13 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [13:7519898158382499180:3668] txid# 281474976715673 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715673} 2025-06-25T14:45:28.389571Z node 13 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [13:7519898158382499180:3668] txid# 281474976715673 SEND to# [13:7519898158382499179:2357] Source {TEvProposeTransactionStatus txid# 281474976715673 Status# 53} 2025-06-25T14:45:28.390837Z node 13 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:489: SchemeBoardUpdate /Root 2025-06-25T14:45:28.390950Z node 13 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:518: Can't update SecurityState for /Root - no PublicKeys 2025-06-25T14:45:28.391035Z node 13 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:489: SchemeBoardUpdate /Root 2025-06-25T14:45:28.391096Z node 13 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:518: Can't update SecurityState for /Root - no PublicKeys 2025-06-25T14:45:28.399371Z node 13 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 1750862728447, transactions count in step: 1, at schemeshard: 72057594046644480 2025-06-25T14:45:28.404372Z node 13 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:1105: All parts have reached barrier, tx: 281474976715673, done: 0, blocked: 1 2025-06-25T14:45:28.407590Z node 13 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:489: SchemeBoardUpdate /Root 2025-06-25T14:45:28.407727Z node 13 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:518: Can't update SecurityState for /Root - no PublicKeys 2025-06-25T14:45:28.407746Z node 13 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:489: SchemeBoardUpdate /Root 2025-06-25T14:45:28.407805Z node 13 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:518: Can't update SecurityState for /Root - no PublicKeys 2025-06-25T14:45:28.409123Z node 13 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976715673:0 2025-06-25T14:45:28.416323Z node 13 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000166280] received request Name# SchemeOperation ok# false data# peer# current inflight# 0 2025-06-25T14:45:28.416329Z node 13 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0000d3280] received request Name# SchemeOperationStatus ok# false data# peer# current inflight# 0 2025-06-25T14:45:28.416562Z node 13 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0000df880] received request Name# SchemeDescribe ok# false data# peer# current inflight# 0 2025-06-25T14:45:28.416586Z node 13 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0000d3880] received request Name# ChooseProxy ok# false data# peer# current inflight# 0 2025-06-25T14:45:28.416747Z node 13 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000168c80] received request Name# PersQueueRequest ok# false data# peer# current inflight# 0 2025-06-25T14:45:28.416772Z node 13 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000094880] received request Name# SchemeInitRoot ok# false data# peer# current inflight# 0 2025-06-25T14:45:28.416921Z node 13 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a00013a480] received request Name# ResolveNode ok# false data# peer# current inflight# 0 2025-06-25T14:45:28.416953Z node 13 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000131480] received request Name# FillNode ok# false data# peer# current inflight# 0 2025-06-25T14:45:28.417081Z node 13 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000180080] received request Name# DrainNode ok# false data# peer# current inflight# 0 2025-06-25T14:45:28.417134Z node 13 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a00005ca80] received request Name# BlobStorageConfig ok# false data# peer# current inflight# 0 2025-06-25T14:45:28.417261Z node 13 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a00005ee80] received request Name# HiveCreateTablet ok# false data# peer# current inflight# 0 2025-06-25T14:45:28.417314Z node 13 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a00005e880] received request Name# TestShardControl ok# false data# peer# current inflight# 0 2025-06-25T14:45:28.417411Z node 13 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a00005dc80] received request Name# RegisterNode ok# false data# peer# current inflight# 0 2025-06-25T14:45:28.417480Z node 13 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a00005d080] received request Name# CmsRequest ok# false data# peer# current inflight# 0 2025-06-25T14:45:28.417588Z node 13 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a00005c480] received request Name# ConsoleRequest ok# false data# peer# current inflight# 0 2025-06-25T14:45:28.417662Z node 13 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0000d9e80] received request Name# InterconnectDebug ok# false data# peer# current inflight# 0 2025-06-25T14:45:28.417755Z node 13 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a00005d680] received request Name# TabletStateRequest ok# false data# peer# current inflight# 0 2025-06-25T14:45:28.419130Z node 13 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 13, TabletId: 72075186224037889 not found 2025-06-25T14:45:28.420658Z node 13 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046644480 >> KqpIndexLookupJoin::LeftSemiJoinWithLeftFilter+StreamLookup [GOOD] >> KqpIndexLookupJoin::LeftSemiJoinWithDuplicatesInRightTable-StreamLookupJoin >> KqpIndexLookupJoin::SimpleLeftOnlyJoin-StreamLookup [GOOD] >> KqpIndexLookupJoin::SimpleLeftSemiJoin+StreamLookup >> KqpIndexLookupJoin::CheckCastUint32ToUint16-StreamLookupJoin-NotNull [GOOD] >> KqpIndexLookupJoin::CheckCastUint32ToUint16-StreamLookupJoin+NotNull >> KqpIndexLookupJoin::CheckCastInt64ToUint64-StreamLookupJoin-NotNull [GOOD] >> KqpIndexLookupJoin::CheckCastInt64ToUint64-StreamLookupJoin+NotNull >> IndexBuildTest::IndexPartitioningIsPersisted [GOOD] >> KqpJoin::RightSemiJoin_FullScan [GOOD] >> KqpJoin::RightSemiJoin_ComplexSecondaryIndexPrefix >> TRegisterNodeOverDiscoveryService::ServerWithCertVerification_ClientDoesNotProvideClientCerts [GOOD] >> TRegisterNodeOverDiscoveryService::ServerWithCertVerification_AuthNotRequired >> KqpJoinOrder::ShuffleEliminationReuseShuffleTwoJoins >> KqpJoinOrder::SortingsWithLookupJoin1+RemoveLimitOperator ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index_build/unittest >> IndexBuildTest::IndexPartitioningIsPersisted [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:45:00.205470Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:45:00.205580Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:45:00.205619Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:45:00.205655Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:45:00.206347Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:45:00.206391Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:45:00.206474Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:45:00.206924Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:45:00.207651Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:45:00.209531Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:45:00.284076Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:45:00.284132Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:45:00.302310Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:45:00.302757Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:45:00.303014Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:45:00.310492Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:45:00.310927Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:45:00.314498Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:45:00.315892Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:45:00.321831Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:45:00.323029Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:45:00.335561Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:45:00.335649Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:45:00.335917Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:45:00.335976Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:45:00.336029Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:45:00.336110Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:45:00.342941Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:45:00.477857Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:45:00.478084Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:45:00.478295Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:45:00.478338Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:45:00.478560Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:45:00.478654Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:45:00.481280Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:45:00.481434Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:45:00.481654Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:45:00.481727Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:45:00.481763Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:45:00.481796Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:45:00.485354Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:45:00.485406Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:45:00.485462Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:45:00.487627Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:45:00.487675Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:45:00.487727Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:45:00.487766Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:45:00.491106Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:45:00.494241Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:45:00.494438Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:45:00.495279Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:45:00.495421Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:45:00.495477Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:45:00.496822Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:45:00.496884Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:45:00.497116Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:45:00.497233Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:45:00.499263Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:45:00.499312Z node 1 :FLAT_TX_SCHEMESHARD ... ntSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:45:32.197307Z node 3 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/Index" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-25T14:45:32.197567Z node 3 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/Index" took 288us result status StatusSuccess 2025-06-25T14:45:32.198283Z node 3 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table/Index" PathDescription { Self { Name: "Index" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeTableIndex CreateFinished: true CreateTxId: 281474976710758 CreateStep: 5000004 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableIndexVersion: 2 } ChildrenExist: true } Children { Name: "indexImplTable" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710758 CreateStep: 5000004 ParentPathId: 3 PathState: EPathStateAlter Owner: "root@builtin" ACL: "" PathSubType: EPathSubTypeSyncIndexImplTable Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } TableIndex { Name: "Index" LocalPathId: 3 Type: EIndexTypeGlobal State: EIndexStateReady KeyColumnNames: "value" SchemaVersion: 2 PathOwnerId: 72057594046678944 DataSize: 0 IndexImplTableDescriptions { PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 KeepEraseMarkers: false MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 3 MaxPartitionsCount: 3 } } } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:45:32.198873Z node 3 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/Index/indexImplTable" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-25T14:45:32.199208Z node 3 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/Index/indexImplTable" took 345us result status StatusSuccess 2025-06-25T14:45:32.200058Z node 3 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table/Index/indexImplTable" PathDescription { Self { Name: "indexImplTable" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710758 CreateStep: 5000004 ParentPathId: 3 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeSyncIndexImplTable Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "indexImplTable" Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "value" KeyColumnNames: "key" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 KeepEraseMarkers: false MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 3 MaxPartitionsCount: 3 } } SplitBoundary { KeyPrefix { Tuple { Optional { Text: "alice" } } Tuple { } } } SplitBoundary { KeyPrefix { Tuple { Optional { Text: "bob" } } Tuple { } } } TableSchemaVersion: 2 IsBackup: false IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "\002\000\005\000\000\000alice\000\000\000\200" IsPoint: false IsInclusive: false DatashardId: 72075186233409547 } TablePartitions { EndOfRangeKeyPrefix: "\002\000\003\000\000\000bob\000\000\000\200" IsPoint: false IsInclusive: false DatashardId: 72075186233409548 } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409549 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 3 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 4 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> KqpIndexLookupJoin::LeftJoinRightNullFilter+StreamLookup [GOOD] >> KqpIndexLookupJoin::LeftJoinRightNullFilter-StreamLookup >> TxUsage::WriteToTopic_Demo_20_RestartBeforeCommit_Query [GOOD] >> OlapEstimationRowsCorrectness::TPCDS96 >> TxUsage::WriteToTopic_Demo_20_RestartAfterCommit_Table >> TxUsage::ReadRuleGeneration [GOOD] >> TRegisterNodeOverDiscoveryService::ServerWithCertVerification_ClientProvidesExpiredCert [GOOD] >> TxUsage::WriteToTopic_Demo_31_Query [GOOD] >> TxUsage::Sinks_Olap_WriteToTopicAndTable_1_Table >> KqpIndexLookupJoin::CheckCastUint64ToInt64+StreamLookupJoin+NotNull [GOOD] >> YdbTableBulkUpsert::DecimalPK [GOOD] >> TxUsage::WriteToTopic_Demo_32_Query ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> TRegisterNodeOverDiscoveryService::ServerWithCertVerification_ClientProvidesExpiredCert [GOOD] Test command err: 2025-06-25T14:44:33.949286Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897920687471090:2148];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:33.956620Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001801/r3tmp/tmpa98fCO/pdisk_1.dat 2025-06-25T14:44:34.285716Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 2546, node 1 2025-06-25T14:44:34.353116Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:34.353205Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:34.394739Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:44:34.412106Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:34.412126Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:34.412133Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:34.414889Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:63443 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:34.676216Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:34.755296Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket 0CE16983231F51159CCED0D55DBE013F3A6824F87814ACCA486AC09E08717E2D (ipv6:[::1]:57202) has now valid token of C=RU,ST=MSK,L=MSK,O=YA,OU=UtTest,CN=localhost@cert 2025-06-25T14:44:34.828609Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (B6C6F477) (ipv6:[::1]:57224) has now valid token of root@builtin 2025-06-25T14:44:34.885817Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db /Root, token db , DomainLoginOnly 1 2025-06-25T14:44:34.885848Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-25T14:44:34.885857Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-25T14:44:34.885898Z node 1 :TICKET_PARSER ERROR: ticket_parser_impl.h:963: Ticket **** (0C093832): Could not find correct token validator 2025-06-25T14:44:34.950460Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:44:37.851369Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519897941774414544:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:37.851771Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001801/r3tmp/tmpBy3PkE/pdisk_1.dat 2025-06-25T14:44:38.057388Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 18777, node 4 2025-06-25T14:44:38.175933Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:38.176014Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:38.229210Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:44:38.292149Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:38.292190Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:38.292203Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:38.292373Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24862 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:38.539812Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:38.646450Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket 0CE16983231F51159CCED0D55DBE013F3A6824F87814ACCA486AC09E08717E2D (ipv6:[::1]:55998) has now valid token of C=RU,ST=MSK,L=MSK,O=YA,OU=UtTest,CN=localhost@cert 2025-06-25T14:44:38.727904Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (B6C6F477) (ipv6:[::1]:56022) has now valid token of root@builtin 2025-06-25T14:44:38.861218Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db /Root, token db , DomainLoginOnly 1 2025-06-25T14:44:38.861246Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-25T14:44:38.861255Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-25T14:44:38.861304Z node 4 :TICKET_PARSER ERROR: ticket_parser_impl.h:963: Ticket **** (0C093832): Could not find correct token validator 2025-06-25T14:44:38.866996Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:44:42.752572Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519897960842019601:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:42.752619Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001801/r3tmp/tmpTxTHo2/pdisk_1.dat 2025-06-25T14:44:42.895124Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:42.897435Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:42.897499Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:42.904485Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 21852, node 7 2025-06-25T14:44:42.993534Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:42.993552Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:42.993558Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:42.993681Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23526 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:43.225480Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called ... : HIVE#72057594037968897 Node(25, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:45:12.034491Z node 25 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(25, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:45:12.041175Z node 25 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(25, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 23136, node 25 2025-06-25T14:45:12.109615Z node 25 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:45:12.109647Z node 25 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:45:12.109657Z node 25 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:45:12.109856Z node 25 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13281 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:45:12.439168Z node 25 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:45:12.850798Z node 25 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:45:16.842807Z node 25 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[25:7519898084359182619:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:45:16.842903Z node 25 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; E0625 14:45:22.521353745 411311 ssl_transport_security.cc:1431] Handshake failed with fatal error SSL_ERROR_SSL: error:1417C086:SSL routines:tls_process_client_certificate:certificate verify failed. E0625 14:45:22.546228105 407612 ssl_transport_security.cc:1431] Handshake failed with fatal error SSL_ERROR_SSL: error:1417C086:SSL routines:tls_process_client_certificate:certificate verify failed. E0625 14:45:22.578373678 407612 ssl_transport_security.cc:1431] Handshake failed with fatal error SSL_ERROR_SSL: error:1417C086:SSL routines:tls_process_client_certificate:certificate verify failed. E0625 14:45:22.597298542 407612 ssl_transport_security.cc:1431] Handshake failed with fatal error SSL_ERROR_SSL: error:1417C086:SSL routines:tls_process_client_certificate:certificate verify failed. E0625 14:45:22.630397470 407612 ssl_transport_security.cc:1431] Handshake failed with fatal error SSL_ERROR_SSL: error:1417C086:SSL routines:tls_process_client_certificate:certificate verify failed. E0625 14:45:22.651731612 411344 ssl_transport_security.cc:1431] Handshake failed with fatal error SSL_ERROR_SSL: error:1417C086:SSL routines:tls_process_client_certificate:certificate verify failed. E0625 14:45:22.685009426 407612 ssl_transport_security.cc:1431] Handshake failed with fatal error SSL_ERROR_SSL: error:1417C086:SSL routines:tls_process_client_certificate:certificate verify failed. E0625 14:45:22.703435178 407711 ssl_transport_security.cc:1431] Handshake failed with fatal error SSL_ERROR_SSL: error:1417C086:SSL routines:tls_process_client_certificate:certificate verify failed. E0625 14:45:22.760258355 407612 ssl_transport_security.cc:1431] Handshake failed with fatal error SSL_ERROR_SSL: error:1417C086:SSL routines:tls_process_client_certificate:certificate verify failed. E0625 14:45:22.781977431 411455 ssl_transport_security.cc:1431] Handshake failed with fatal error SSL_ERROR_SSL: error:1417C086:SSL routines:tls_process_client_certificate:certificate verify failed. E0625 14:45:22.818643483 407611 ssl_transport_security.cc:1431] Handshake failed with fatal error SSL_ERROR_SSL: error:1417C086:SSL routines:tls_process_client_certificate:certificate verify failed. E0625 14:45:22.842779902 407611 ssl_transport_security.cc:1431] Handshake failed with fatal error SSL_ERROR_SSL: error:1417C086:SSL routines:tls_process_client_certificate:certificate verify failed. 2025-06-25T14:45:24.336362Z node 28 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[28:7519898141693233905:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:45:24.336433Z node 28 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001801/r3tmp/tmppWNTsi/pdisk_1.dat 2025-06-25T14:45:24.514060Z node 28 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:45:24.547252Z node 28 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(28, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:45:24.547377Z node 28 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(28, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:45:24.557400Z node 28 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(28, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 10421, node 28 2025-06-25T14:45:24.654659Z node 28 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:45:24.654690Z node 28 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:45:24.654700Z node 28 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:45:24.654921Z node 28 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26068 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:45:25.105287Z node 28 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:45:25.348465Z node 28 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:45:29.336650Z node 28 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[28:7519898141693233905:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:45:29.336746Z node 28 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; E0625 14:45:35.199665003 411894 ssl_transport_security.cc:1431] Handshake failed with fatal error SSL_ERROR_SSL: error:1417C086:SSL routines:tls_process_client_certificate:certificate verify failed. E0625 14:45:35.226616357 412107 ssl_transport_security.cc:1431] Handshake failed with fatal error SSL_ERROR_SSL: error:1417C086:SSL routines:tls_process_client_certificate:certificate verify failed. E0625 14:45:35.281407136 412107 ssl_transport_security.cc:1431] Handshake failed with fatal error SSL_ERROR_SSL: error:1417C086:SSL routines:tls_process_client_certificate:certificate verify failed. E0625 14:45:35.300707837 411894 ssl_transport_security.cc:1431] Handshake failed with fatal error SSL_ERROR_SSL: error:1417C086:SSL routines:tls_process_client_certificate:certificate verify failed. E0625 14:45:35.353903206 412107 ssl_transport_security.cc:1431] Handshake failed with fatal error SSL_ERROR_SSL: error:1417C086:SSL routines:tls_process_client_certificate:certificate verify failed. E0625 14:45:35.377007578 412107 ssl_transport_security.cc:1431] Handshake failed with fatal error SSL_ERROR_SSL: error:1417C086:SSL routines:tls_process_client_certificate:certificate verify failed. E0625 14:45:35.406200538 416554 ssl_transport_security.cc:1431] Handshake failed with fatal error SSL_ERROR_SSL: error:1417C086:SSL routines:tls_process_client_certificate:certificate verify failed. E0625 14:45:35.423805843 411894 ssl_transport_security.cc:1431] Handshake failed with fatal error SSL_ERROR_SSL: error:1417C086:SSL routines:tls_process_client_certificate:certificate verify failed. E0625 14:45:35.456422407 411895 ssl_transport_security.cc:1431] Handshake failed with fatal error SSL_ERROR_SSL: error:1417C086:SSL routines:tls_process_client_certificate:certificate verify failed. E0625 14:45:35.482428926 412107 ssl_transport_security.cc:1431] Handshake failed with fatal error SSL_ERROR_SSL: error:1417C086:SSL routines:tls_process_client_certificate:certificate verify failed. E0625 14:45:35.521045417 411894 ssl_transport_security.cc:1431] Handshake failed with fatal error SSL_ERROR_SSL: error:1417C086:SSL routines:tls_process_client_certificate:certificate verify failed. E0625 14:45:35.540196530 412108 ssl_transport_security.cc:1431] Handshake failed with fatal error SSL_ERROR_SSL: error:1417C086:SSL routines:tls_process_client_certificate:certificate verify failed. >> KqpIndexLookupJoin::LeftSemiJoinWithDuplicatesInRightTable-StreamLookupJoin [GOOD] >> KqpIndexLookupJoin::CheckCastUint32ToUint16-StreamLookupJoin+NotNull [GOOD] >> KqpIndexLookupJoin::SimpleLeftSemiJoin+StreamLookup [GOOD] >> KqpIndexLookupJoin::CheckCastInt64ToUint64-StreamLookupJoin+NotNull [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpIndexLookupJoin::CheckCastUint64ToInt64+StreamLookupJoin+NotNull [GOOD] Test command err: Trying to start YDB, gRPC: 18679, MsgBus: 61513 2025-06-25T14:45:25.312488Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898144879496355:2160];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:45:25.321023Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000ea8/r3tmp/tmpcirUzH/pdisk_1.dat 2025-06-25T14:45:25.745131Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:45:25.747965Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519898144879496225:2080] 1750862725292534 != 1750862725292537 TServer::EnableGrpc on GrpcPort 18679, node 1 2025-06-25T14:45:25.808881Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:45:25.809081Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:45:25.819242Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:45:25.888827Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:45:25.888854Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:45:25.888859Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:45:25.892452Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:61513 2025-06-25T14:45:26.312754Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:61513 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:45:26.576665Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:45:26.598669Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:45:26.617385Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:45:26.799912Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:45:26.960404Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:45:27.042266Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:45:28.248911Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898157764399758:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:28.249041Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:28.571834Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:28.596584Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:28.620976Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:28.646498Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:28.675133Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:28.705287Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:28.746846Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:28.794675Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898157764400417:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:28.794734Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:28.794739Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898157764400422:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:28.798152Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:45:28.806626Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898157764400424:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:45:28.904874Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898157764400475:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:45:29.878098Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:29.931897Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/yd ... ; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000ea8/r3tmp/tmpXgolnI/pdisk_1.dat 2025-06-25T14:45:31.433485Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:45:31.434217Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:45:31.434283Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:45:31.434459Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519898171679621208:2080] 1750862731271662 != 1750862731271665 2025-06-25T14:45:31.444713Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 5861, node 2 2025-06-25T14:45:31.492604Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:45:31.492632Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:45:31.492643Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:45:31.492743Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:62222 TClient is connected to server localhost:62222 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:45:31.987078Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:45:31.997365Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:45:32.010420Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:45:32.102375Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:45:32.235772Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:45:32.304669Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:45:32.331246Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:45:34.460602Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898184564524727:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:34.460700Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:34.521581Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:34.560868Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:34.589327Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:34.627131Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:34.660466Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:34.692785Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:34.729311Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:34.791172Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898184564525382:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:34.791246Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:34.791507Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898184564525387:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:34.795044Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:45:34.808288Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519898184564525389:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:45:34.882442Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519898184564525440:3415] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:45:35.924293Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:35.986951Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:36.284892Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519898171679621248:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:45:36.284958Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> TxUsage::Sinks_Olap_WriteToTopicAndTable_3_Query [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> YdbTableBulkUpsert::DecimalPK [GOOD] Test command err: 2025-06-25T14:44:40.648680Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897952914141681:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:40.648721Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017d8/r3tmp/tmpqlD9fY/pdisk_1.dat 2025-06-25T14:44:41.283143Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 30601, node 1 2025-06-25T14:44:41.377645Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T14:44:41.432072Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:41.432092Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:41.432101Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:41.432232Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:44:41.515172Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:41.515255Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:41.535102Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:22824 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-25T14:44:41.780624Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:41.860459Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:43.650367Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) SUCCESS 2025-06-25T14:44:43.852402Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897965799044751:2308], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:43.852468Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897965799044743:2305], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:43.852621Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:43.855759Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:44:43.880878Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519897965799044757:2309], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-25T14:44:43.941527Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519897965799044827:2799] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:44:44.681097Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710661. Ctx: { TraceId: 01jykrrqpaay75qw6jhy3g7z9z, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Njg0Y2EzZS00YmJlMWFmMS00NDI5Y2VmNy1iYjA5OTI5OQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root SUCCESS count returned 0 rows 2025-06-25T14:44:45.065145Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710662. Ctx: { TraceId: 01jykrrrhff5hnys0pszxh3870, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Njg0Y2EzZS00YmJlMWFmMS00NDI5Y2VmNy1iYjA5OTI5OQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root SUCCESS count returned 1 rows 2025-06-25T14:44:45.140676Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037888 not found 2025-06-25T14:44:45.161526Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) SUCCESS 2025-06-25T14:44:45.540850Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710665. Ctx: { TraceId: 01jykrrs1d0vwnbt7b5w0jxkr3, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZjgyOGU4NC03NWM0Mjk5NC1jYjZiMjYxZC1lYWQ5MDkxOQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root SUCCESS count returned 0 rows 2025-06-25T14:44:45.652386Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519897952914141681:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:45.652432Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:44:45.890400Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710666. Ctx: { TraceId: 01jykrrsbkatb606xwn9q5s0g9, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZjgyOGU4NC03NWM0Mjk5NC1jYjZiMjYxZC1lYWQ5MDkxOQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root SUCCESS count returned 1 rows 2025-06-25T14:44:45.947041Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037889 not found 2025-06-25T14:44:45.979620Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) SUCCESS 2025-06-25T14:44:46.404519Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710669. Ctx: { TraceId: 01jykrrsthf9srh3mb0pfcfqc7, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZWJlYjU5N2EtNGZiYTY4M2ItMmRmZjZjYzEtZTIzM2YyYWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root SUCCESS count returned 0 rows 2025-06-25T14:44:46.786125Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710670. Ctx: { TraceId: 01jykrrt6g4w84h1637b9qyg3s, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZWJlYjU5N2EtNGZiYTY4M2ItMmRmZjZjYzEtZTIzM2YyYWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root SUCCESS count returned 1 rows 2025-06-25T14:44:46.839898Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037890 not found 2025-06-25T14:44:46.863698Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) SUCCESS 2025-06-25T14:44:47.308400Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710673. Ctx: { TraceId: 01jykrrtr49hp0cp76psn1px1t, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjY4MzRmZWEtNjkyZTlmYi02OGE1Y2FhMy03ZGJiN2Q2, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root SUCCESS count returned 0 rows 2025-06-25T14:44:47.651116Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710674. Ctx: { TraceId: 01jykrrv2s36r7d6qqfnx0svfk, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjY4MzRmZWEtNjkyZTlmYi02OGE1Y2FhMy03ZGJiN2Q2, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root SUCCESS count returned 1 rows 2025-06-25T14:44:47.708967Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037891 not found 2025-06-25T14:44:47.759272Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) SUCCESS 2025-06-25T14:44:48.244877Z node 1 :KQP_EXE ... hard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:45:20.530797Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:45:22.554205Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664)
: Error: Bulk upsert to table '/Root/Traces' unknown table
: Error: Bulk upsert to table '/Root/Logs' Missing key columns: Timestamp
: Error: Bulk upsert to table '/Root/Logs' Missing key columns: Shard
: Error: Bulk upsert to table '/Root/Logs' Type mismatch, got type Uint64 for column App, but expected Utf8
: Error: Bulk upsert to table '/Root/Logs' Type mismatch, got type Uint64 for column Message, but expected Utf8
: Error: Bulk upsert to table '/Root/Logs' Unknown column: HttpCode 2025-06-25T14:45:24.753557Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7519898139405164303:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:45:24.753615Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017d8/r3tmp/tmps9dU3K/pdisk_1.dat 2025-06-25T14:45:24.911347Z node 10 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:45:24.928263Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:45:24.928402Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:45:24.933845Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 7413, node 10 2025-06-25T14:45:24.987804Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:45:24.987833Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:45:24.987842Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:45:24.987977Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27872 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:45:25.265634Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:45:25.780595Z node 10 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:45:28.297173Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664)
: Error: Bulk upsert to table '/Root/Limits' Row key size of 1100002 bytes is larger than the allowed threshold 1049600
: Error: Bulk upsert to table '/Root/Limits' Row key size of 1100002 bytes is larger than the allowed threshold 1049600
: Error: Bulk upsert to table '/Root/Limits' Row key size of 1100000 bytes is larger than the allowed threshold 1049600
: Error: Bulk upsert to table '/Root/Limits' Row cell size of 17000022 bytes is larger than the allowed threshold 16777216 2025-06-25T14:45:31.142322Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7519898173579953680:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:45:31.142372Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017d8/r3tmp/tmp0bmDpG/pdisk_1.dat 2025-06-25T14:45:31.408449Z node 13 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 21200, node 13 2025-06-25T14:45:31.486567Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:45:31.486723Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:45:31.511328Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:45:31.536982Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:45:31.537009Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:45:31.537021Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:45:31.537219Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:65330 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:45:31.865122Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:45:32.152473Z node 13 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:45:35.152483Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:35.321818Z node 13 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [13:7519898190759824027:2309], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:35.321824Z node 13 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [13:7519898190759824019:2306], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:35.321898Z node 13 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:35.327061Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:45:35.352514Z node 13 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [13:7519898190759824033:2310], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-25T14:45:35.410108Z node 13 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [13:7519898190759824106:2803] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:45:35.657962Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jykrt9yp7npmh8tp0v3246p0, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=ODU5NmQ3MDItOWI5YmUyMTYtZDQ4YzYxZi04NzUzYzI5, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root >> TxUsage::WriteToTopic_Demo_23_RestartAfterCommit_Table [GOOD] >> TRegisterNodeOverDiscoveryService::ServerWithCertVerification_AuthNotRequired [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpIndexLookupJoin::LeftSemiJoinWithDuplicatesInRightTable-StreamLookupJoin [GOOD] Test command err: Trying to start YDB, gRPC: 10395, MsgBus: 1551 2025-06-25T14:45:25.291408Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898146336167787:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:45:25.291506Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000ead/r3tmp/tmptzU8hT/pdisk_1.dat 2025-06-25T14:45:25.688454Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519898146336167763:2080] 1750862725286470 != 1750862725286473 2025-06-25T14:45:25.706553Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:45:25.733374Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:45:25.744492Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:45:25.756861Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 10395, node 1 2025-06-25T14:45:25.888787Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:45:25.888815Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:45:25.888821Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:45:25.888932Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1551 2025-06-25T14:45:26.320936Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:1551 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:45:26.576124Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:45:26.616773Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:45:26.814514Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:45:26.954359Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:45:27.029068Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:28.313003Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898159221071296:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:28.313127Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:28.571768Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:28.595183Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:28.622199Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:28.655814Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:28.678918Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:28.706743Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:28.737519Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:28.794211Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898159221071951:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:28.794270Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:28.794383Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898159221071956:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:28.798248Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:45:28.811004Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898159221071958:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:45:28.872164Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898159221072009:3423] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:45:29.894081Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:29.926059Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:29.962582Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ... a/build/build_root/yft8/000ead/r3tmp/tmpfZcuGS/pdisk_1.dat 2025-06-25T14:45:31.743959Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:45:31.813431Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:45:31.814596Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519898172303409882:2080] 1750862731700636 != 1750862731700639 TServer::EnableGrpc on GrpcPort 11897, node 2 2025-06-25T14:45:31.892689Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:45:31.892762Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:45:31.901030Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:45:31.952749Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:45:31.952767Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:45:31.952775Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:45:31.952867Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4924 TClient is connected to server localhost:4924 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-25T14:45:32.382635Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:45:32.405494Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:45:32.478594Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:45:32.630799Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:45:32.711498Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:45:32.717040Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:45:34.685297Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898185188313411:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:34.685374Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:34.741698Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:34.780407Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:34.813593Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:34.881744Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:34.909590Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:34.946041Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:35.014863Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:35.076098Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898189483281373:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:35.076162Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:35.076199Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898189483281378:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:35.079010Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:45:35.089375Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519898189483281380:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:45:35.147228Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519898189483281431:3416] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:45:36.218715Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:36.263637Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:36.707915Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519898172303410113:2243];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:45:36.717856Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpIndexLookupJoin::CheckCastUint32ToUint16-StreamLookupJoin+NotNull [GOOD] Test command err: Trying to start YDB, gRPC: 3829, MsgBus: 13801 2025-06-25T14:45:25.739239Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898143887389468:2137];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:45:25.740627Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000e94/r3tmp/tmpudYaes/pdisk_1.dat 2025-06-25T14:45:26.118616Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:45:26.118735Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:45:26.123433Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:45:26.126519Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:45:26.158202Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519898143887389368:2080] 1750862725727949 != 1750862725727952 TServer::EnableGrpc on GrpcPort 3829, node 1 2025-06-25T14:45:26.212546Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:45:26.212574Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:45:26.212581Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:45:26.212687Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13801 2025-06-25T14:45:26.749451Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:13801 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:45:26.935355Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:45:26.971092Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:45:26.997360Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:45:27.150548Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:45:27.302673Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:45:27.360252Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:45:28.869848Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898156772292892:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:28.869951Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:29.092815Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:29.114050Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:29.135109Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:29.157424Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:29.179752Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:29.248340Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:29.293328Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:29.372512Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898161067260854:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:29.372586Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:29.372661Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898161067260859:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:29.376170Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:45:29.386980Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898161067260861:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:45:29.487987Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898161067260912:3424] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:45:30.426516Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:30.474104Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ ... me/runner/.ya/build/build_root/yft8/000e94/r3tmp/tmpZJS9Fa/pdisk_1.dat 2025-06-25T14:45:32.011785Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:45:32.109210Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:45:32.122095Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:45:32.122177Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:45:32.149438Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 14864, node 2 2025-06-25T14:45:32.208844Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:45:32.208865Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:45:32.208873Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:45:32.208980Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:22444 TClient is connected to server localhost:22444 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-25T14:45:32.685944Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:45:32.692781Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:45:32.701208Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:32.764638Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:45:32.950771Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:45:32.958266Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:45:33.030788Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:45:35.089376Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898187511394277:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:35.089444Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:35.146953Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:35.185154Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:35.228662Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:35.270384Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:35.313431Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:35.360037Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:35.433303Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:35.532484Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898187511394938:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:35.532537Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:35.532678Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898187511394943:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:35.535114Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:45:35.553746Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519898187511394945:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:45:35.617225Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519898187511394996:3415] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:45:36.678355Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:36.742108Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:36.946724Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519898170331523614:2164];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:45:36.946792Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpIndexLookupJoin::SimpleLeftSemiJoin+StreamLookup [GOOD] Test command err: Trying to start YDB, gRPC: 11087, MsgBus: 14298 2025-06-25T14:45:25.290435Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898146058233412:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:45:25.290493Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000eaa/r3tmp/tmpFoqAVy/pdisk_1.dat 2025-06-25T14:45:25.744479Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519898146058233386:2080] 1750862725288880 != 1750862725288883 2025-06-25T14:45:25.759697Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:45:25.790577Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:45:25.790687Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:45:25.792344Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 11087, node 1 2025-06-25T14:45:25.889952Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:45:25.889985Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:45:25.889992Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:45:25.892351Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14298 2025-06-25T14:45:26.304675Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:14298 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:45:26.576708Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:45:26.599086Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:45:26.615191Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:45:26.792142Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:45:26.947358Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:45:27.027019Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:45:28.264763Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898158943136921:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:28.264864Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:28.581914Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:28.605999Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:28.629196Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:28.653020Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:28.676755Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:28.708282Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:28.776018Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:28.855576Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898158943137583:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:28.855655Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:28.855679Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898158943137588:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:28.859200Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:45:28.868092Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898158943137590:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:45:28.966134Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898158943137641:3424] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:45:29.888658Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:29.917741Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/yd ... lf { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:45:32.411747Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:45:32.418800Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:45:32.422108Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:45:32.499436Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:45:32.704804Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:45:32.746678Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:45:32.843037Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:45:34.821517Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898182502655266:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:34.821588Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:34.880835Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:34.913658Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:34.954985Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:35.004195Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:35.057238Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:35.133892Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:35.170771Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:35.235313Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898186797623224:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:35.235391Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:35.235746Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898186797623229:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:35.239614Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:45:35.253580Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519898186797623231:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:45:35.350694Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519898186797623282:3415] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:45:36.409955Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:36.444156Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:36.478587Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:36.510492Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:36.540542Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:36.609941Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715677:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:36.699545Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519898169617751779:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:45:36.699601Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> KqpJoin::JoinWithDuplicates ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpIndexLookupJoin::CheckCastInt64ToUint64-StreamLookupJoin+NotNull [GOOD] Test command err: Trying to start YDB, gRPC: 25909, MsgBus: 8495 2025-06-25T14:45:26.447186Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898149834762684:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:45:26.464627Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000e92/r3tmp/tmpBc5Rkg/pdisk_1.dat 2025-06-25T14:45:26.857864Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:45:26.860490Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519898149834762660:2080] 1750862726435301 != 1750862726435304 TServer::EnableGrpc on GrpcPort 25909, node 1 2025-06-25T14:45:26.882202Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:45:26.882295Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:45:26.916519Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:45:26.944808Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:45:26.944842Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:45:26.944851Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:45:26.944970Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8495 TClient is connected to server localhost:8495 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:45:27.440086Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:45:27.467840Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:45:27.468345Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:45:27.588443Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:45:27.722845Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:45:27.791567Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:45:29.059539Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898162719666178:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:29.059622Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:29.310298Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:29.333381Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:29.356200Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:29.381026Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:29.406605Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:29.435072Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:29.468456Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:29.532905Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898162719666833:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:29.532976Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:29.533188Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898162719666838:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:29.537094Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:45:29.547313Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898162719666840:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:45:29.617132Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898162719666891:3419] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:45:30.484408Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:30.546072Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:31.447612Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExist ... de 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000e92/r3tmp/tmpvoF9gS/pdisk_1.dat 2025-06-25T14:45:32.323676Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:45:32.335969Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:45:32.336065Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:45:32.338549Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 12830, node 2 2025-06-25T14:45:32.388813Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:45:32.388833Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:45:32.388840Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:45:32.388939Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:64035 TClient is connected to server localhost:64035 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:45:32.910621Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:45:32.916199Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:45:32.923287Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:45:33.013414Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:45:33.171633Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:45:33.182314Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:45:33.259561Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:45:35.334352Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898187099371378:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:35.334436Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:35.378175Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:35.451230Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:35.516196Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:35.543976Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:35.570226Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:35.638541Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:35.675693Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:35.748479Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898187099372045:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:35.748605Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:35.748937Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898187099372050:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:35.752124Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:45:35.759886Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519898187099372052:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:45:35.851758Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519898187099372103:3414] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:45:36.918000Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:37.031478Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:37.144403Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519898174214467861:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:45:37.156169Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> TRegisterNodeOverDiscoveryService::ServerWithCertVerification_AuthNotRequired [GOOD] Test command err: 2025-06-25T14:44:54.811243Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898012115642454:2148];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:54.811380Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001797/r3tmp/tmpMDmT0Z/pdisk_1.dat 2025-06-25T14:44:55.295081Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:55.299931Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:55.300024Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:55.305700Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:44:55.333108Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 31729, node 1 2025-06-25T14:44:55.385366Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:55.385395Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:55.385403Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:55.385532Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:7301 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:55.784578Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:55.815254Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:44:55.909170Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1809: Ticket CDE00E30EBF77CC38D9D48280E08C92D42DC5AC624513352BDE11567F637F5DC (ipv6:[::1]:45418) has now permanent error message 'Cannot create token from certificate. Client certificate failed verification' 2025-06-25T14:44:55.909775Z node 1 :TICKET_PARSER ERROR: ticket_parser_impl.h:963: Ticket CDE00E30EBF77CC38D9D48280E08C92D42DC5AC624513352BDE11567F637F5DC: Cannot create token from certificate. Client certificate failed verification 2025-06-25T14:44:55.989588Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (B6C6F477) (ipv6:[::1]:45424) has now valid token of root@builtin 2025-06-25T14:44:56.065606Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db /Root, token db , DomainLoginOnly 1 2025-06-25T14:44:56.065645Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-25T14:44:56.065653Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-25T14:44:56.065680Z node 1 :TICKET_PARSER ERROR: ticket_parser_impl.h:963: Ticket **** (0C093832): Could not find correct token validator 2025-06-25T14:44:59.084861Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519898034598994048:2076];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:59.085445Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001797/r3tmp/tmpGXUxY3/pdisk_1.dat 2025-06-25T14:44:59.303443Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:59.322209Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:59.322298Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:59.326733Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 14584, node 4 2025-06-25T14:44:59.408689Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:59.408714Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:59.408722Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:59.408874Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1098 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:59.697834Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:59.791664Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1809: Ticket CDE00E30EBF77CC38D9D48280E08C92D42DC5AC624513352BDE11567F637F5DC (ipv6:[::1]:40140) has now permanent error message 'Cannot create token from certificate. Client certificate failed verification' 2025-06-25T14:44:59.792126Z node 4 :TICKET_PARSER ERROR: ticket_parser_impl.h:963: Ticket CDE00E30EBF77CC38D9D48280E08C92D42DC5AC624513352BDE11567F637F5DC: Cannot create token from certificate. Client certificate failed verification 2025-06-25T14:44:59.856841Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (B6C6F477) (ipv6:[::1]:40152) has now valid token of root@builtin 2025-06-25T14:44:59.925698Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db /Root, token db , DomainLoginOnly 1 2025-06-25T14:44:59.925724Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-25T14:44:59.925732Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-25T14:44:59.925765Z node 4 :TICKET_PARSER ERROR: ticket_parser_impl.h:963: Ticket **** (0C093832): Could not find correct token validator 2025-06-25T14:45:00.134165Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:45:03.444670Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519898050242852776:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:45:03.444720Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001797/r3tmp/tmpLtz5Bl/pdisk_1.dat 2025-06-25T14:45:03.589219Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:45:03.589293Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:45:03.589789Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:45:03.611428Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 13220, node 7 2025-06-25T14:45:03.755797Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:45:03.755849Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:45:03.755858Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:45:03.755997Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14113 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name ... 7968897 Node(19, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:45:22.601432Z node 19 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(19, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:45:22.610237Z node 19 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(19, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 3969, node 19 2025-06-25T14:45:22.730367Z node 19 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:45:22.730387Z node 19 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:45:22.730394Z node 19 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:45:22.730539Z node 19 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:22121 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:45:23.059302Z node 19 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:45:23.190704Z node 19 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (B6C6F477) (ipv6:[::1]:47298) has now valid token of root@builtin 2025-06-25T14:45:23.238580Z node 19 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db /Root, token db , DomainLoginOnly 1 2025-06-25T14:45:23.238606Z node 19 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-25T14:45:23.238615Z node 19 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-25T14:45:23.238649Z node 19 :TICKET_PARSER ERROR: ticket_parser_impl.h:963: Ticket **** (0C093832): Could not find correct token validator 2025-06-25T14:45:27.660685Z node 22 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[22:7519898154511213505:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:45:27.662915Z node 22 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001797/r3tmp/tmpw2pcI7/pdisk_1.dat 2025-06-25T14:45:27.865813Z node 22 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:45:27.891838Z node 22 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(22, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:45:27.891936Z node 22 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(22, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:45:27.897115Z node 22 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(22, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 7521, node 22 2025-06-25T14:45:28.035864Z node 22 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:45:28.035900Z node 22 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:45:28.035910Z node 22 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:45:28.036060Z node 22 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6255 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:45:28.403074Z node 22 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:45:28.531832Z node 22 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (B6C6F477) (ipv6:[::1]:36466) has now valid token of root@builtin 2025-06-25T14:45:28.594782Z node 22 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db /Root, token db , DomainLoginOnly 1 2025-06-25T14:45:28.594820Z node 22 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-25T14:45:28.594834Z node 22 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-25T14:45:28.594893Z node 22 :TICKET_PARSER ERROR: ticket_parser_impl.h:963: Ticket **** (0C093832): Could not find correct token validator 2025-06-25T14:45:28.688188Z node 22 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:45:33.478911Z node 25 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[25:7519898181277977079:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:45:33.478997Z node 25 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001797/r3tmp/tmpJOsqLR/pdisk_1.dat 2025-06-25T14:45:33.685710Z node 25 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:45:33.758587Z node 25 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 25 Type# 268639257 TServer::EnableGrpc on GrpcPort 11376, node 25 2025-06-25T14:45:33.819505Z node 25 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(25, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:45:33.819603Z node 25 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(25, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:45:33.845365Z node 25 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(25, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:45:33.856367Z node 25 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:45:33.856393Z node 25 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:45:33.856404Z node 25 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:45:33.856564Z node 25 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3791 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:45:34.290682Z node 25 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:45:34.432846Z node 25 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket 713C035BF5C74B21E2A498C90C8F508D1DD6EE36C93B5272AF5095B3CEC8C076 (ipv6:[::1]:47816) has now valid token of C=RU,ST=MSK,L=MSK,O=YA,OU=UtTest,CN=localhost@cert 2025-06-25T14:45:34.497216Z node 25 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:45:34.596758Z node 25 :TICKET_PARSER ERROR: ticket_parser_impl.h:963: Ticket **** (717F937C): Unknown token 2025-06-25T14:45:34.661203Z node 25 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1809: Ticket 5A8CDD20A8744DF27C2E35D0E3402C554387C3FC958DE9D48E643698EA56F618 (ipv6:[::1]:47868) has now permanent error message 'Cannot create token from certificate. Client certificate failed verification' 2025-06-25T14:45:34.661860Z node 25 :TICKET_PARSER ERROR: ticket_parser_impl.h:963: Ticket 5A8CDD20A8744DF27C2E35D0E3402C554387C3FC958DE9D48E643698EA56F618: Cannot create token from certificate. Client certificate failed verification >> KqpJoinOrder::TPCDSEveryQueryWorks-ColumnStore >> TxUsage::WriteToTopic_Demo_17_Query [GOOD] >> KqpJoin::RightSemiJoin_SimpleKey >> TxUsage::Sinks_Olap_WriteToTopicAndTable_4_Table >> OlapEstimationRowsCorrectness::TPCH2 >> KqpJoinOrder::FiveWayJoinWithComplexPreds2+ColumnStore >> OlapEstimationRowsCorrectness::TPCH21 >> TxUsage::Sinks_Oltp_WriteToTopics_3_Query [GOOD] >> TxUsage::Transactions_Conflict_On_SeqNo_Table >> KqpJoin::RightSemiJoin_ComplexSecondaryIndexPrefix [GOOD] >> KqpIndexLookupJoin::LeftJoinRightNullFilter-StreamLookup [GOOD] >> KqpIndexLookupJoin::InnerJoinOnlyLeftColumn+StreamLookup >> TxUsage::WriteToTopic_Demo_23_RestartAfterCommit_Query >> TxUsage::Sinks_Oltp_WriteToTopics_4_Table >> KqpJoinOrder::FiveWayJoinWithConstantFold-ColumnStore ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoin::RightSemiJoin_ComplexSecondaryIndexPrefix [GOOD] Test command err: Trying to start YDB, gRPC: 29202, MsgBus: 18822 2025-06-25T14:45:25.499604Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898146879755514:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:45:25.508813Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000e98/r3tmp/tmpx5H4LS/pdisk_1.dat 2025-06-25T14:45:25.911363Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:45:25.912476Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519898146879755482:2080] 1750862725497236 != 1750862725497239 TServer::EnableGrpc on GrpcPort 29202, node 1 2025-06-25T14:45:25.949871Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:45:25.958259Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:45:25.975741Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:45:25.988123Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:45:25.988160Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:45:25.988169Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:45:25.988291Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:18822 TClient is connected to server localhost:18822 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-25T14:45:26.531169Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:45:26.619149Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:45:26.648138Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:45:26.813405Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:45:26.967805Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:27.056501Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:45:28.537291Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898159764659004:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:28.537626Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:28.841238Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:28.909219Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:28.934613Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:28.964228Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:28.993239Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:29.020136Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:29.049273Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:29.099205Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898164059626961:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:29.099312Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:29.099518Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898164059626966:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:29.102865Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:45:29.112514Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898164059626968:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:45:29.214504Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898164059627019:3418] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:45:30.211270Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:30.245244Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:30.275712Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part propo ... t_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:45:33.072295Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:7330 TClient is connected to server localhost:7330 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:45:33.647743Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:45:33.660943Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:45:33.674556Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:45:33.744783Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:45:33.772603Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:45:33.923899Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:45:34.006321Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:45:36.137120Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898195148365347:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:36.137264Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:36.183959Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:36.220123Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:36.258173Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:36.290593Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:36.337540Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:36.426202Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:36.501224Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:36.613377Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898195148366012:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:36.613464Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:36.613715Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898195148366017:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:36.619045Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:45:36.633073Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519898195148366019:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:45:36.721947Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519898195148366070:3420] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:45:37.731235Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:37.756231Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519898177968494536:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:45:37.756349Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:45:37.817935Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:37.898330Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:37.943469Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:37.981814Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpIndexLookupJoin::LeftJoinRightNullFilter-StreamLookup [GOOD] Test command err: Trying to start YDB, gRPC: 15887, MsgBus: 26475 2025-06-25T14:45:26.799449Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898149322375285:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:45:26.799642Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000e91/r3tmp/tmpYyCMsa/pdisk_1.dat 2025-06-25T14:45:27.117857Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 15887, node 1 2025-06-25T14:45:27.201734Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:45:27.201833Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:45:27.207056Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:45:27.207078Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:45:27.207105Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:45:27.207229Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:45:27.207275Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:26475 TClient is connected to server localhost:26475 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:45:27.711559Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:45:27.724029Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:45:27.743489Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:45:27.826554Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:45:27.889654Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:45:28.043863Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:45:28.117674Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:29.689631Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898162207278783:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:29.689738Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:29.949955Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:29.982991Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:30.052045Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:30.091272Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:30.130065Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:30.203222Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:30.231808Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:30.311318Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898166502246747:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:30.311388Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:30.311539Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898166502246752:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:30.314929Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:45:30.326983Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898166502246754:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:45:30.398665Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898166502246805:3419] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:45:31.441249Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:31.487637Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:31.515782Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but pr ... lf { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:45:34.855825Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:45:34.861441Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:45:34.875283Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:45:34.988569Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:45:35.112216Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... 2025-06-25T14:45:35.155335Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:35.217249Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:45:37.181258Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898198300727991:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:37.181331Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:37.239983Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:37.273057Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:37.306933Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:37.342352Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:37.378315Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:37.438992Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:37.475145Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:37.538490Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898198300728649:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:37.538572Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:37.538823Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898198300728654:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:37.542057Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:45:37.553116Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519898198300728656:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:45:37.615171Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519898198300728707:3419] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:45:38.616132Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:38.649968Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:38.684984Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:38.714229Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:38.755047Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:38.793144Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715677:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:39.080513Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519898185415824489:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:45:39.080577Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> YdbOlapStore::LogNonExistingUserId [GOOD] >> YdbOlapStore::LogPagingBefore >> KqpJoinOrder::SortingsWithLookupJoin2-RemoveLimitOperator >> KqpJoin::ExclusionJoin >> KqpJoin::JoinWithDuplicates [GOOD] >> KqpJoin::LeftJoinPushdownPredicate_NestedJoin >> KqpIndexLookupJoin::InnerJoinOnlyLeftColumn+StreamLookup [GOOD] >> KqpIndexLookupJoin::InnerJoinOnlyLeftColumn-StreamLookup >> TxUsage::WriteToTopic_Demo_44_Query [GOOD] >> TxUsage::WriteToTopic_Demo_32_Query [GOOD] >> KqpJoin::RightSemiJoin_SimpleKey [GOOD] >> KqpJoin::RightTableIndexPredicate >> TxUsage::WriteToTopic_Demo_45_Table >> KqpJoin::ExclusionJoin [GOOD] >> KqpJoin::CrossJoinCount >> TxUsage::WriteToTopic_Demo_20_RestartAfterCommit_Table [GOOD] >> SystemView::ShowCreateTablePartitionPolicyIndexTable [GOOD] >> SystemView::StoragePoolsFields >> TCacheTest::MigrationDeletedPathNavigate [GOOD] >> YdbLogStore::LogTable [GOOD] >> YdbLogStore::AlterLogTable ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_cache/unittest >> TCacheTest::MigrationDeletedPathNavigate [GOOD] Test command err: 2025-06-25T14:45:22.212918Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:45:22.212964Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TestModificationResults wait txId: 1 2025-06-25T14:45:22.375984Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 FAKE_COORDINATOR: Erasing txId 1 TestModificationResult got TxId: 1, wait until txId: 1 2025-06-25T14:45:22.386615Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 65543, Sender [1:177:2172], Recipient [1:73:2111]: NActors::TEvents::TEvPoison 2025-06-25T14:45:22.387208Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 Leader for TabletID 72057594046678944 is [1:73:2111] sender: [1:178:2067] recipient: [1:48:2095] Leader for TabletID 72057594046678944 is [1:73:2111] sender: [1:181:2067] recipient: [1:24:2071] Leader for TabletID 72057594046678944 is [1:73:2111] sender: [1:182:2067] recipient: [1:180:2173] Leader for TabletID 72057594046678944 is [1:183:2174] sender: [1:184:2067] recipient: [1:180:2173] 2025-06-25T14:45:22.393464Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4894: StateInit, received event# 268828672, Sender [1:180:2173], Recipient [1:183:2174]: NKikimr::TEvTablet::TEvBoot 2025-06-25T14:45:22.405654Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4894: StateInit, received event# 268828673, Sender [1:180:2173], Recipient [1:183:2174]: NKikimr::TEvTablet::TEvRestored 2025-06-25T14:45:22.405794Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4894: StateInit, received event# 268828684, Sender [1:180:2173], Recipient [1:183:2174]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-25T14:45:22.410200Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:45:22.410282Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:45:22.410365Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:45:22.410398Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:45:22.410428Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:45:22.410453Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:45:22.410506Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:45:22.410570Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:45:22.411249Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:45:22.411508Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:45:22.432070Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:45:22.437139Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:45:22.437323Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:45:22.437565Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4894: StateInit, received event# 65542, Sender [1:7238242728502259555:7369577], Recipient [1:183:2174]: TSystem::Undelivered 2025-06-25T14:45:22.437604Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4896: StateInit, processing event TEvents::TEvUndelivered 2025-06-25T14:45:22.437641Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:45:22.437670Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:45:22.437889Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__root_data_erasure_manager.cpp:92: [RootDataErasureManager] Clear operation queue and active pipes 2025-06-25T14:45:22.437936Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:45:22.438575Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1393: TTxInit for Paths, read records: 1, at schemeshard: 72057594046678944 2025-06-25T14:45:22.438710Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1467: TTxInit for UserAttributes, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:45:22.438779Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1493: TTxInit for UserAttributesAlterData, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:45:22.439212Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1795: TTxInit for Tables, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:45:22.439290Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__root_data_erasure_manager.cpp:452: [RootDataErasureManager] Restore: Generation# 0, Status# 0, WakeupInterval# 604800 s, NumberDataErasureTenantsInRunning# 0 2025-06-25T14:45:22.439509Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2043: TTxInit for Columns, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:45:22.439620Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2103: TTxInit for ColumnsAlters, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:45:22.439687Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2161: TTxInit for Shards, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:45:22.439772Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2247: TTxInit for TablePartitions, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:45:22.439861Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2313: TTxInit for TableShardPartitionConfigs, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:45:22.440010Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2463: TTxInit for ChannelsBinding, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:45:22.440261Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2842: TTxInit for TableIndexes, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:45:22.440387Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2921: TTxInit for TableIndexKeys, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:45:22.440720Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3422: TTxInit for KesusInfos, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:45:22.440808Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3458: TTxInit for KesusAlters, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:45:22.440964Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3684: TTxInit for TxShards, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:45:22.441066Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3829: TTxInit for ShardToDelete, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:45:22.441140Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3846: TTxInit for BackupSettings, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:45:22.441318Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4006: TTxInit for ShardBackupStatus, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:45:22.441423Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4022: TTxInit for CompletedBackup, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:45:22.441528Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4307: TTxInit for Publications, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:45:22.441744Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4646: IndexBuild , records: 0, at schemeshard: 72057594046678944 2025-06-25T14:45:22.441815Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4702: KMeansTreeSample records: 0, at schemeshard: 72057594046678944 2025-06-25T14:45:22.441928Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4791: SnapshotTables: snapshots: 0 tables: 0, at schemeshard: 72057594046678944 2025-06-25T14:45:22.441974Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4818: SnapshotSteps: snapshots: 0, at schemeshard: 72057594046678944 2025-06-25T14:45:22.442021Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4845: LongLocks: records: 0, at schemeshard: 72057594046678944 2025-06-25T14:45:22.442257Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-06-25T14:45:22.443371Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-25T14:45:22.443494Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:45:22.444114Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 2146435083, Sender [1:183:2174], Recipient [1:183:2174]: NKikimr::NSchemeShard::TEvPrivate::TEvServerlessStorageBilling 2025-06-25T14:45:22.444155Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5018: StateWork, processing event TEvPrivate::TEvServerlessStorageBilling 2025-06-25T14:45:22.444769Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:45:22.444820Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:45:22.444891Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:45:22.444929Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:45:22.444967Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:45:22.445005Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-25T14:45:22.445343Z no ... essor: 18446744073709551615, TenantStatisticsAggregator: 18446744073709551615, TenantGraphShard: 18446744073709551615, TenantRootACL: }, subDomain->GetVersion(): 3, actualEffectiveACLVersion: 0, actualUserAttrsVersion: 1, tenantHive: 18446744073709551615, tenantSysViewProcessor: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-25T14:45:23.020908Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__sync_update_tenants.cpp:36: TTxSyncTenant DoComplete, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-25T14:45:23.020955Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046678944 { Path: Root/USER_0/DirA TableId: [72057594046678944:3:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 2] ResourcesDomainKey: [OwnerId: 72057594046678944, LocalPathId: 2] Params { Version: 3 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 SchemeShard: 72075186233409549 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-25T14:45:23.442251Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:45:23.442317Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TestModificationResults wait txId: 1 2025-06-25T14:45:23.491190Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 FAKE_COORDINATOR: Erasing txId 1 TestModificationResult got TxId: 1, wait until txId: 1 Leader for TabletID 72057594046678944 is [2:73:2111] sender: [2:178:2067] recipient: [2:48:2095] Leader for TabletID 72057594046678944 is [2:73:2111] sender: [2:181:2067] recipient: [2:24:2071] Leader for TabletID 72057594046678944 is [2:73:2111] sender: [2:182:2067] recipient: [2:180:2173] Leader for TabletID 72057594046678944 is [2:183:2174] sender: [2:184:2067] recipient: [2:180:2173] 2025-06-25T14:45:23.531417Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:45:23.531468Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TestModificationResults wait txId: 101 Leader for TabletID 72057594046678944 is [2:183:2174] sender: [2:214:2067] recipient: [2:24:2071] 2025-06-25T14:45:23.558959Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateSubDomain, opId: 101:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_subdomain.cpp:259) FAKE_COORDINATOR: Add transaction: 101 at step: 5000002 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000001 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 101 at step: 5000002 FAKE_COORDINATOR: Erasing txId 101 TestModificationResult got TxId: 101, wait until txId: 101 TestModificationResults wait txId: 102 2025-06-25T14:45:23.565003Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 102:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) Leader for TabletID 72075186233409546 is [0:0:0] sender: [2:249:2067] recipient: [2:241:2215] IGNORE Leader for TabletID 72075186233409546 is [0:0:0] sender: [2:249:2067] recipient: [2:241:2215] Leader for TabletID 72075186233409547 is [0:0:0] sender: [2:252:2067] recipient: [2:244:2217] IGNORE Leader for TabletID 72075186233409547 is [0:0:0] sender: [2:252:2067] recipient: [2:244:2217] Leader for TabletID 72075186233409546 is [0:0:0] sender: [2:254:2067] recipient: [2:24:2071] IGNORE Leader for TabletID 72075186233409546 is [0:0:0] sender: [2:254:2067] recipient: [2:24:2071] Leader for TabletID 72075186233409546 is [2:253:2221] sender: [2:255:2067] recipient: [2:241:2215] Leader for TabletID 72075186233409547 is [0:0:0] sender: [2:256:2067] recipient: [2:24:2071] IGNORE Leader for TabletID 72075186233409547 is [0:0:0] sender: [2:256:2067] recipient: [2:24:2071] Leader for TabletID 72075186233409547 is [2:258:2223] sender: [2:261:2067] recipient: [2:244:2217] TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 101 TestWaitNotification wait txId: 102 2025-06-25T14:45:23.580151Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 101 Leader for TabletID 72075186233409546 is [2:253:2221] sender: [2:293:2067] recipient: [2:24:2071] Leader for TabletID 72075186233409547 is [2:258:2223] sender: [2:294:2067] recipient: [2:24:2071] FAKE_COORDINATOR: Add transaction: 102 at step: 5000003 FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000002 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 102 at step: 5000003 FAKE_COORDINATOR: Erasing txId 102 TestWaitNotification: OK eventTxId 102 TestModificationResults wait txId: 103 TestModificationResult got TxId: 103, wait until txId: 103 TestModificationResults wait txId: 104 2025-06-25T14:45:23.622904Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 104:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) TestModificationResult got TxId: 104, wait until txId: 104 TestWaitNotification wait txId: 103 TestWaitNotification wait txId: 104 Leader for TabletID 72075186233409548 is [0:0:0] sender: [2:344:2067] recipient: [2:340:2287] IGNORE Leader for TabletID 72075186233409548 is [0:0:0] sender: [2:344:2067] recipient: [2:340:2287] Leader for TabletID 72075186233409548 is [0:0:0] sender: [2:345:2067] recipient: [2:24:2071] IGNORE Leader for TabletID 72075186233409548 is [0:0:0] sender: [2:345:2067] recipient: [2:24:2071] Leader for TabletID 72075186233409548 is [2:347:2291] sender: [2:348:2067] recipient: [2:340:2287] Leader for TabletID 72075186233409548 is [2:347:2291] sender: [2:349:2067] recipient: [2:24:2071] TestWaitNotification: OK eventTxId 103 TestWaitNotification: OK eventTxId 104 TestModificationResults wait txId: 105 2025-06-25T14:45:23.747010Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpUpgradeSubDomain, opId: 105:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_upgrade_subdomain.cpp:1232) Leader for TabletID 72075186233409549 is [0:0:0] sender: [2:424:2067] recipient: [2:420:2336] IGNORE Leader for TabletID 72075186233409549 is [0:0:0] sender: [2:424:2067] recipient: [2:420:2336] Leader for TabletID 72075186233409549 is [0:0:0] sender: [2:426:2067] recipient: [2:24:2071] IGNORE Leader for TabletID 72075186233409549 is [0:0:0] sender: [2:426:2067] recipient: [2:24:2071] Leader for TabletID 72075186233409549 is [2:427:2339] sender: [2:428:2067] recipient: [2:420:2336] 2025-06-25T14:45:23.782307Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:45:23.782357Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TestModificationResult got TxId: 105, wait until txId: 105 TestWaitNotification wait txId: 105 Leader for TabletID 72075186233409549 is [2:427:2339] sender: [2:455:2067] recipient: [2:24:2071] TestWaitNotification: OK eventTxId 105 TestModificationResults wait txId: 106 2025-06-25T14:45:23.829100Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:5475: Mark as Migrated path id [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-25T14:45:23.829170Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:5475: Mark as Migrated path id [OwnerId: 72057594046678944, LocalPathId: 4] 2025-06-25T14:45:23.829491Z node 2 :FLAT_TX_SCHEMESHARD ERROR: schemeshard__operation_upgrade_subdomain.cpp:1464: TWait ProgressState, dependent transaction: 106, parent transaction: 105, at schemeshard: 72057594046678944 2025-06-25T14:45:23.829637Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpUpgradeSubDomainDecision, opId: 106:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_upgrade_subdomain.cpp:571) TestModificationResult got TxId: 106, wait until txId: 106 TestWaitNotification wait txId: 106 2025-06-25T14:45:23.846242Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:5936: Got TEvUpdateAck for unknown txId 105, at schemeshard: 72057594046678944 2025-06-25T14:45:23.846607Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:5936: Got TEvUpdateAck for unknown txId 105, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 106 TestModificationResults wait txId: 107 TestModificationResult got TxId: 107, wait until txId: 107 TestWaitNotification wait txId: 107 TestWaitNotification: OK eventTxId 107 TestModificationResults wait txId: 108 2025-06-25T14:45:23.886382Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 108:0, at schemeshard: 72075186233409549, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) Leader for TabletID 72075186233409550 is [0:0:0] sender: [2:559:2067] recipient: [2:556:2443] IGNORE Leader for TabletID 72075186233409550 is [0:0:0] sender: [2:559:2067] recipient: [2:556:2443] Leader for TabletID 72075186233409550 is [0:0:0] sender: [2:562:2067] recipient: [2:24:2071] IGNORE Leader for TabletID 72075186233409550 is [0:0:0] sender: [2:562:2067] recipient: [2:24:2071] Leader for TabletID 72075186233409550 is [2:563:2447] sender: [2:564:2067] recipient: [2:556:2443] Leader for TabletID 72075186233409550 is [2:563:2447] sender: [2:565:2067] recipient: [2:24:2071] TestModificationResult got TxId: 108, wait until txId: 108 TestWaitNotification wait txId: 108 Forgetting tablet 72075186233409548 TestWaitNotification: OK eventTxId 108 2025-06-25T14:45:26.328653Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7382: Cannot get console configs 2025-06-25T14:45:26.328722Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:45:26.404608Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7382: Cannot get console configs 2025-06-25T14:45:26.404678Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded >> KqpIndexLookupJoin::InnerJoinOnlyLeftColumn-StreamLookup [GOOD] >> KqpJoinOrder::CanonizedJoinOrderTPCH13 >> KqpJoin::LeftJoinPushdownPredicate_NestedJoin [GOOD] >> TCacheTestWithDrops::LookupErrorUponEviction [GOOD] >> KqpJoin::RightTableIndexPredicate [GOOD] >> IndexBuildTest::CancellationNotEnoughRetries [GOOD] >> IndexBuildTest::CheckLimitWithDroppedIndex ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_cache/unittest >> TCacheTestWithDrops::LookupErrorUponEviction [GOOD] Test command err: 2025-06-25T14:45:22.209440Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:45:22.209489Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TestModificationResults wait txId: 1 2025-06-25T14:45:22.379336Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 FAKE_COORDINATOR: Erasing txId 1 TestModificationResult got TxId: 1, wait until txId: 1 TestModificationResults wait txId: 101 FAKE_COORDINATOR: Add transaction: 101 at step: 5000002 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000001 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 101 at step: 5000002 FAKE_COORDINATOR: Erasing txId 101 TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 101 2025-06-25T14:45:22.398863Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 101 TestModificationResults wait txId: 102 2025-06-25T14:45:22.400434Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpRmDir, opId: 102:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_rmdir.cpp:66) FAKE_COORDINATOR: Add transaction: 102 at step: 5000003 FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000002 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 102 at step: 5000003 FAKE_COORDINATOR: Erasing txId 102 TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 102 2025-06-25T14:45:22.438900Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 102 2025-06-25T14:45:22.843665Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:45:22.843725Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TestModificationResults wait txId: 1 2025-06-25T14:45:22.896572Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 FAKE_COORDINATOR: Erasing txId 1 TestModificationResult got TxId: 1, wait until txId: 1 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpIndexLookupJoin::InnerJoinOnlyLeftColumn-StreamLookup [GOOD] Test command err: Trying to start YDB, gRPC: 3440, MsgBus: 13061 2025-06-25T14:45:41.975320Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898214494308891:2169];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:45:41.975561Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000e7b/r3tmp/tmpqMWPpK/pdisk_1.dat 2025-06-25T14:45:42.400234Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:45:42.406784Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519898214494308755:2080] 1750862741936679 != 1750862741936682 2025-06-25T14:45:42.428685Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:45:42.428759Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:45:42.433618Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 3440, node 1 2025-06-25T14:45:42.632129Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:45:42.632152Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:45:42.632164Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:45:42.632282Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13061 2025-06-25T14:45:43.006672Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:13061 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:45:43.267203Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:45:43.288854Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:45:43.303968Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:45:43.463291Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:45:43.667964Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:45:43.791509Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:45:45.393553Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898231674179567:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:45.393689Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:45.739287Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:45.791123Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:45.832814Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:45.907176Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:45.961449Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:46.014279Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:46.049095Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:46.120050Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898235969147523:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:46.120158Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:46.120407Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898235969147528:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:46.127380Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:45:46.144457Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898235969147530:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:45:46.213406Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898235969147581:3416] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:45:46.973018Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519898214494308891:2169];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:45:46.973082Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:45:47.245482Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_w ... lf { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:45:49.689593Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:45:49.704926Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:45:49.723096Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:45:49.805130Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:45:49.941139Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:45:49.949955Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:45:50.018595Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:45:52.011868Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898260540526216:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:52.011936Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:52.089471Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:52.119512Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:52.149687Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:52.218008Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:52.308978Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:52.365200Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:52.405383Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:52.497578Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898260540526879:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:52.497633Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:52.498022Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898260540526884:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:52.501459Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:45:52.513966Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519898260540526886:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:45:52.579010Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519898260540526937:3419] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:45:53.706479Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:53.755339Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:53.799373Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:53.841631Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:53.930494Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519898243360655402:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:45:53.930916Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:45:53.934281Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:54.017353Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715677:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoin::LeftJoinPushdownPredicate_NestedJoin [GOOD] Test command err: Trying to start YDB, gRPC: 7931, MsgBus: 10939 2025-06-25T14:45:39.979246Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898208024321817:2233];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:45:39.979701Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000e87/r3tmp/tmpx0dQhI/pdisk_1.dat 2025-06-25T14:45:40.517226Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519898208024321596:2080] 1750862739917069 != 1750862739917072 2025-06-25T14:45:40.526806Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:45:40.527593Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:45:40.527663Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:45:40.547157Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 7931, node 1 2025-06-25T14:45:40.666343Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:45:40.666368Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:45:40.666391Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:45:40.666504Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:45:40.912597Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:10939 TClient is connected to server localhost:10939 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:45:41.526365Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:45:41.542631Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:45:41.564941Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:45:41.770830Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:45:41.988487Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:45:42.068014Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:45:43.590058Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898225204192421:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:43.590147Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:43.908286Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:43.959937Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:43.989886Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:44.078136Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:44.133388Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:44.220864Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:44.265215Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:44.332681Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898229499160386:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:44.332760Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898229499160391:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:44.332783Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:44.340055Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:45:44.357875Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898229499160393:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:45:44.446038Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898229499160444:3418] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:45:44.972410Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519898208024321817:2233];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:45:44.972513Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:45:45.550105Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_w ... node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:45:47.808726Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 13720, node 2 2025-06-25T14:45:47.859302Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:45:47.859326Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:45:47.859334Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:45:47.859455Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:12339 TClient is connected to server localhost:12339 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:45:48.431607Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:45:48.444896Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:45:48.469090Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:45:48.545088Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:45:48.668852Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:45:48.736357Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:45:48.828334Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:45:50.804430Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898251159098340:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:50.804522Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:50.871242Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:50.915039Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:50.960701Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:51.004605Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:51.089662Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:51.133207Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:51.203051Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:51.332145Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898255454066305:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:51.332233Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:51.332501Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898255454066310:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:51.336972Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:45:51.350729Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-25T14:45:51.351895Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519898255454066312:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:45:51.421460Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519898255454066363:3417] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:45:52.544497Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:52.586603Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:52.637383Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:52.639373Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519898238274195069:2245];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:45:52.639410Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; |85.3%| [TA] $(B)/ydb/core/tx/scheme_board/ut_cache/test-results/unittest/{meta.json ... results_accumulator.log} |85.3%| [TA] {RESULT} $(B)/ydb/core/tx/scheme_board/ut_cache/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoin::RightTableIndexPredicate [GOOD] Test command err: Trying to start YDB, gRPC: 27995, MsgBus: 19940 2025-06-25T14:45:40.800797Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898209225722012:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:45:40.830109Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000e84/r3tmp/tmpozbVAi/pdisk_1.dat 2025-06-25T14:45:41.316565Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519898209225721979:2080] 1750862740780249 != 1750862740780252 2025-06-25T14:45:41.332915Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:45:41.354969Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:45:41.355038Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:45:41.357072Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 27995, node 1 2025-06-25T14:45:41.556734Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:45:41.556753Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:45:41.556759Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:45:41.556861Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19940 2025-06-25T14:45:41.892463Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:19940 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:45:42.321757Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:45:42.338217Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:45:42.501003Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:45:42.687706Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:45:42.763157Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:45:44.389174Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898226405592816:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:44.389286Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:44.650729Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:44.690775Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:44.719058Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:44.746697Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:44.819798Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:44.862732Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:44.895841Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:44.999859Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898226405593475:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:44.999946Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:45.000236Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898226405593480:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:45.004698Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:45:45.021452Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898230700560778:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:45:45.115327Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898230700560829:3420] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:45:45.803692Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519898209225722012:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:45:45.803754Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:45:46.142065Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:46.175263Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation pa ... :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519898248309674769:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:45:49.513587Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000e84/r3tmp/tmpGWYpHD/pdisk_1.dat 2025-06-25T14:45:49.692713Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519898248309674746:2080] 1750862749473118 != 1750862749473121 2025-06-25T14:45:49.705067Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:45:49.707303Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:45:49.707362Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:45:49.713868Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 25104, node 2 2025-06-25T14:45:49.812787Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:45:49.812808Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:45:49.812815Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:45:49.812921Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:20274 TClient is connected to server localhost:20274 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:45:50.433528Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:45:50.440525Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:45:50.459080Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:45:50.548813Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:45:50.551530Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:45:50.701587Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:45:50.768507Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:45:52.720505Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898261194578248:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:52.720591Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:52.809916Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:52.865435Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:52.907001Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:52.945213Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:52.981952Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:53.046203Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:53.091268Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:53.177525Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898265489546201:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:53.177627Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:53.177918Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898265489546206:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:53.182302Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:45:53.207701Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519898265489546208:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:45:53.293961Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519898265489546259:3412] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:45:54.356962Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:54.520459Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519898248309674769:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:45:54.520501Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> YdbOlapStore::BulkUpsert [GOOD] >> YdbOlapStore::DuplicateRows >> TxUsage::WriteToTopic_Demo_23_RestartAfterCommit_Query [GOOD] >> YdbOlapStore::LogGrepExisting [GOOD] >> YdbOlapStore::LogExistingRequest >> KqpJoinOrder::SortingsWithLookupJoin4+RemoveLimitOperator >> KqpJoinOrder::FiveWayJoinStatsOverride-ColumnStore >> IndexBuildTest::CheckLimitWithDroppedIndex [GOOD] >> IndexBuildTest::DropIndex >> KqpJoinOrder::TPCDS34-ColumnStore >> TxUsage::Sinks_Olap_WriteToTopicAndTable_1_Table [GOOD] >> KqpJoin::CrossJoinCount [GOOD] >> KqpIndexLookupJoin::LeftJoinCustomColumnOrder+StreamLookup >> TxUsage::WriteToTopic_Demo_20_RestartAfterCommit_Query ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/unittest >> TxUsage::WriteToTopic_Demo_32_Query [GOOD] Test command err: 2025-06-25T14:40:54.538753Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519896981622732641:2228];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:40:54.539245Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0019c0/r3tmp/tmpTQrOGr/pdisk_1.dat 2025-06-25T14:40:54.813016Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-25T14:40:55.041481Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:40:55.050876Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519896981622732449:2080] 1750862454516508 != 1750862454516511 2025-06-25T14:40:55.071054Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:40:55.071142Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:40:55.094910Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 5264, node 1 2025-06-25T14:40:55.215234Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/yft8/0019c0/r3tmp/yandexcqsoJG.tmp 2025-06-25T14:40:55.215259Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/yft8/0019c0/r3tmp/yandexcqsoJG.tmp 2025-06-25T14:40:55.215508Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/yft8/0019c0/r3tmp/yandexcqsoJG.tmp 2025-06-25T14:40:55.215655Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:40:55.418817Z INFO: TTestServer started on Port 31464 GrpcPort 5264 2025-06-25T14:40:55.523027Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:31464 PQClient connected to localhost:5264 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:40:55.748228Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-25T14:40:55.787665Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-25T14:40:55.814235Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:40:56.008950Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710660, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:40:56.025226Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710661, at schemeshard: 72057594046644480 2025-06-25T14:40:58.203470Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896998802602431:2300], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:40:58.203781Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:40:58.204277Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896998802602443:2303], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:40:58.210353Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:40:58.226519Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519896998802602445:2304], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710662 completed, doublechecking } 2025-06-25T14:40:58.311047Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519896998802602509:2443] txid# 281474976710663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:40:59.143465Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519896998802602517:2310], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:40:59.149614Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=MTNjYzQwZDItYTBhY2YwYS1iNDczNzMzYi1lOWExMzQ0Yg==, ActorId: [1:7519896998802602420:2298], ActorState: ExecuteState, TraceId: 01jykrhvah35n5txjfz7thsar4, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:40:59.151785Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-25T14:40:59.211636Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:40:59.285366Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:40:59.384161Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); 2025-06-25T14:40:59.532391Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519896981622732641:2228];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:40:59.532462Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; === CheckClustersList. Subcribe to ClusterTracker from [1:7519897003097570092:2621] === CheckClustersList. Ok 2025-06-25T14:41:06.308610Z :WriteToTopic_Demo_2_Table INFO: TTopicSdkTestSetup started 2025-06-25T14:41:06.354153Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:132: new create topic request 2025-06-25T14:41:06.382977Z node 1 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72075186224037893][] pipe [1:7519897033162341392:2718] connected; active server actors: 1 2025-06-25T14:41:06.383239Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1516: [72075186224037893][test-topic] updating configuration. Deleted partitions []. Added partitions [0] 2025-06-25T14:41:06.383833Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:1040: [72075186224037893][test-topic] Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at RB 72075186224037893 2025-06-25T14:41:06.383924Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:138: [72075186224037893][test-topic] BALANCER INIT DONE for test-topic: (0, 72075186224037892) 2025-06-25T14:41:06.384783Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:493: [72075186224037893][test-topic] TEvClientConnected TabletId 72057594046644480, NodeId 1, Generation 2 2025-06-25T14:41:06.389121Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3114: [PQ: 72075186224037892] Handle TEvInterconnect::TEvNodeInfo 2025-06-25T14:41:06.389792Z node 1 :PERSQUEUE DEBU ... 968842Z node 13 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72075186224037894, Partition: {0, {13, 281474976710677}, 100001}, State: StateIdle] TPartition::ReplyWrite. Partition: {0, {13, 281474976710677}, 100001} 2025-06-25T14:45:48.968885Z node 13 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72075186224037894, Partition: {0, {13, 281474976710677}, 100001}, State: StateIdle] Answering for message sourceid: '\0test-message_group_id', Topic: 'topic_A', Partition: {0, {13, 281474976710677}, 100001}, SeqNo: 2, partNo: 13, Offset: 0 is stored on disk 2025-06-25T14:45:48.972514Z node 13 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'topic_A' partition: 0 messageNo: 1 requestId: cookie: 2 2025-06-25T14:45:48.972605Z node 13 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037894 (partition=0) Received event: NKikimr::TEvPersQueue::TEvResponse 2025-06-25T14:45:48.975022Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|ffb33d7e-33851c0c-4ad59bd7-79958cc_0] PartitionId [0] Generation [1] Write session: OnReadDone gRpcStatusCode: 0 2025-06-25T14:45:48.975190Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|ffb33d7e-33851c0c-4ad59bd7-79958cc_0] PartitionId [0] Generation [1] Write session got write response: acks { seq_no: 2 written_in_tx { } } write_statistics { persisting_time { nanos: 149000000 } min_queue_wait_time { } max_queue_wait_time { } partition_quota_wait_time { } topic_quota_wait_time { } } 2025-06-25T14:45:48.975243Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|ffb33d7e-33851c0c-4ad59bd7-79958cc_0] PartitionId [0] Generation [1] OnAck: seqNo=2, txId={ydb://session/3?node_id=13&id=YWE5YTI1OWUtNjFlZDRmYjktOTNhMWJhNjYtYjdmNTA3YTY=, 01jykrtq1f2jp1xavc3g6tqspe}, WriteCount=1, AckCount=1 2025-06-25T14:45:48.977254Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|ffb33d7e-33851c0c-4ad59bd7-79958cc_0] PartitionId [0] Generation [1] Write session: acknoledged message 2 2025-06-25T14:45:48.983895Z node 13 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72075186224037894, Partition: {0, {13, 281474976710677}, 100001}, State: StateIdle] need more data for compaction. cumulativeSize=7001240, count=1, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-25T14:45:48.985032Z node 13 :PERSQUEUE DEBUG: pq_impl.cpp:3255: [PQ: 72075186224037894] Handle TEvPersQueue::TEvProposeTransaction SourceActor { RawX1: 7519898246154738893 RawX2: 4503655461947796 } TxId: 281474976710678 Data { Operations { PartitionId: 0 Path: "/Root/topic_A" SupportivePartition: 100001 } Op: Commit SendingShards: 72075186224037894 ReceivingShards: 72075186224037894 Immediate: true WriteId { NodeId: 13 KeyId: 281474976710677 } } 2025-06-25T14:45:48.985066Z node 13 :PERSQUEUE DEBUG: pq_impl.cpp:3295: [PQ: 72075186224037894] PartitionId {0, {13, 281474976710677}, 100001} for WriteId {13, 281474976710677} 2025-06-25T14:45:48.985090Z node 13 :PERSQUEUE DEBUG: pq_impl.cpp:3385: [PQ: 72075186224037894] TxId 281474976710678 has WriteId {13, 281474976710677} 2025-06-25T14:45:48.985108Z node 13 :PERSQUEUE DEBUG: pq_impl.cpp:3410: [PQ: 72075186224037894] immediate transaction 2025-06-25T14:45:48.985175Z node 13 :PERSQUEUE DEBUG: partition.cpp:1295: [PQ: 72075186224037894, Partition: {0, {13, 281474976710677}, 100001}, State: StateIdle] Handle TEvPQ::TEvGetWriteInfoRequest 2025-06-25T14:45:48.985267Z node 13 :PERSQUEUE DEBUG: partition.cpp:1290: [PQ: 72075186224037894, Partition: {0, {13, 281474976710677}, 100001}, State: StateIdle] Send TEvPQ::TEvGetWriteInfoResponse 2025-06-25T14:45:48.985301Z node 13 :PERSQUEUE DEBUG: partition.cpp:1401: [PQ: 72075186224037894, Partition: 0, State: StateIdle] Handle TEvPQ::TEvGetWriteInfoResponse 2025-06-25T14:45:48.985352Z node 13 :PERSQUEUE DEBUG: partition.cpp:2502: [PQ: 72075186224037894, Partition: 0, State: StateIdle] TPartition::CommitWriteOperations TxId: (empty maybe) 2025-06-25T14:45:48.985395Z node 13 :PERSQUEUE DEBUG: partition.cpp:2528: [PQ: 72075186224037894, Partition: 0, State: StateIdle] Head=Offset 1 PartNo 0 PackedSize 0 count 0 nextOffset 1 batches 0, NewHead=Offset 1 PartNo 0 PackedSize 0 count 0 nextOffset 1 batches 0 2025-06-25T14:45:48.985428Z node 13 :PERSQUEUE DEBUG: partition.cpp:2547: [PQ: 72075186224037894, Partition: 0, State: StateIdle] add key D0000100001_00000000000000000000_00000_0000000001_00013? 2025-06-25T14:45:48.985586Z node 13 :PERSQUEUE DEBUG: partition.cpp:2558: [PQ: 72075186224037894, Partition: 0, State: StateIdle] PartitionedBlob.GetFormedBlobs().size=1 2025-06-25T14:45:48.985644Z node 13 :PERSQUEUE DEBUG: partition_write.cpp:1117: [PQ: 72075186224037894, Partition: 0, State: StateIdle] writing blob: topic 'topic_A' partition 0 old key D0000100001_00000000000000000000_00000_0000000001_00013? new key d0000000000_00000000000000000001_00000_0000000001_00013? size 7001240 WTime 1750862748985 2025-06-25T14:45:48.985733Z node 13 :PERSQUEUE DEBUG: partition.cpp:3403: [PQ: 72075186224037894, Partition: 0, State: StateIdle] schedule TEvPersQueue::TEvProposeTransactionResult(COMPLETE), reason= 2025-06-25T14:45:48.985918Z node 13 :PERSQUEUE DEBUG: read.h:272: CacheProxy. Passthrough write request to KV 2025-06-25T14:45:48.985941Z node 13 :PERSQUEUE DEBUG: read.h:328: CacheProxy. Rename blob from D0000100001_00000000000000000000_00000_0000000001_00013? to d0000000000_00000000000000000001_00000_0000000001_00013? 2025-06-25T14:45:48.987491Z node 13 :PERSQUEUE DEBUG: cache_eviction.h:349: Renaming head blob in L1. Old partition 100001 old offset 0 old count 1 new partition 0 new offset 1 new count 1 actorID [13:7519898246154738712:2439] 2025-06-25T14:45:48.987609Z node 13 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72075186224037894, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 7000382 2025-06-25T14:45:48.987733Z node 13 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72075186224037894, Partition: 0, State: StateIdle] need more data for compaction. cumulativeSize=7017776, count=2, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-25T14:45:48.988386Z node 13 :PERSQUEUE DEBUG: pq_l2_cache.cpp:179: PQ Cache (L2). Renamed. old Tablet '72075186224037894' partition 100001 offset 0 partno 0 count 1 parts 13 suffix '63', new Tablet '72075186224037894' partition 0 offset 1 partno 0 count 1 parts 13 suffix '63' 2025-06-25T14:45:48.988424Z node 13 :PERSQUEUE DEBUG: pq_impl.cpp:5236: [PQ: 72075186224037894] Handle TEvPQ::TEvTransactionCompleted WriteId {13, 281474976710677} 2025-06-25T14:45:48.988452Z node 13 :PERSQUEUE DEBUG: pq_impl.cpp:5262: [PQ: 72075186224037894] send TEvPQ::TEvDeletePartition to partition {0, {13, 281474976710677}, 100001} 2025-06-25T14:45:48.988489Z node 13 :PERSQUEUE DEBUG: partition.cpp:3863: [PQ: 72075186224037894, Partition: {0, {13, 281474976710677}, 100001}, State: StateIdle] Handle TEvPQ::TEvDeletePartition 2025-06-25T14:45:48.988703Z node 13 :PERSQUEUE DEBUG: read.h:272: CacheProxy. Passthrough write request to KV 2025-06-25T14:45:48.988721Z node 13 :PERSQUEUE DEBUG: read.h:348: CacheProxy. Delete blobs from D0000100001(+) to D0000100002(-) 2025-06-25T14:45:48.989240Z node 13 :PERSQUEUE DEBUG: pq_impl.cpp:5134: [PQ: 72075186224037894] Handle TEvLongTxService::TEvLockStatus LockId: 281474976710677 LockNode: 13 Status: STATUS_NOT_FOUND 2025-06-25T14:45:48.989261Z node 13 :PERSQUEUE DEBUG: pq_impl.cpp:5149: [PQ: 72075186224037894] TxWriteInfo: WriteId {13, 281474976710677}, TxId 281474976710678, Status STATUS_SUBSCRIBED 2025-06-25T14:45:48.989278Z node 13 :PERSQUEUE DEBUG: pq_impl.cpp:5159: [PQ: 72075186224037894] there is already a transaction TxId 281474976710678 for WriteId {13, 281474976710677} 2025-06-25T14:45:48.989554Z node 13 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72075186224037894, Partition: {0, {13, 281474976710677}, 100001}, State: StateIdle] need more data for compaction. cumulativeSize=7001240, count=1, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-25T14:45:48.989589Z node 13 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72075186224037894, Partition: {0, {13, 281474976710677}, 100001}, State: StateIdle] need more data for compaction. cumulativeSize=7001240, count=1, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-25T14:45:48.990096Z node 13 :PERSQUEUE DEBUG: pq_impl.cpp:5196: [PQ: 72075186224037894] Handle TEvPQ::TEvDeletePartitionDone {0, {13, 281474976710677}, 100001} 2025-06-25T14:45:48.990161Z node 13 :PERSQUEUE DEBUG: pq_impl.cpp:3638: [PQ: 72075186224037894] send TEvUnsubscribeLock for WriteId {13, 281474976710677} 2025-06-25T14:45:48.990196Z node 13 :PERSQUEUE WARN: pq_impl.cpp:4269: [PQ: 72075186224037894] Unknown transaction 281474976710678 2025-06-25T14:45:48.990254Z node 13 :PERSQUEUE DEBUG: pq_impl.cpp:3683: [PQ: 72075186224037894] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-06-25T14:45:48.990791Z node 13 :PERSQUEUE DEBUG: pq_impl.cpp:1241: [PQ: 72075186224037894] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-25T14:45:48.994499Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|ffb33d7e-33851c0c-4ad59bd7-79958cc_0] PartitionId [0] Generation [1] Write session: close. Timeout 0.000000s 2025-06-25T14:45:48.994552Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|ffb33d7e-33851c0c-4ad59bd7-79958cc_0] PartitionId [0] Generation [1] Write session will now close 2025-06-25T14:45:48.994595Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|ffb33d7e-33851c0c-4ad59bd7-79958cc_0] PartitionId [0] Generation [1] Write session: aborting 2025-06-25T14:45:48.995031Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|ffb33d7e-33851c0c-4ad59bd7-79958cc_0] PartitionId [0] Generation [1] Write session: gracefully shut down, all writes complete 2025-06-25T14:45:48.995087Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|ffb33d7e-33851c0c-4ad59bd7-79958cc_0] PartitionId [0] Generation [1] Write session: destroy 2025-06-25T14:45:48.997585Z node 13 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 3 sessionId: test-message_group_id|ffb33d7e-33851c0c-4ad59bd7-79958cc_0 grpc read done: success: 0 data: 2025-06-25T14:45:48.997618Z node 13 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 3 sessionId: test-message_group_id|ffb33d7e-33851c0c-4ad59bd7-79958cc_0 grpc read failed 2025-06-25T14:45:48.997662Z node 13 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:818: session v1 closed cookie: 3 sessionId: test-message_group_id|ffb33d7e-33851c0c-4ad59bd7-79958cc_0 2025-06-25T14:45:48.997680Z node 13 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 3 sessionId: test-message_group_id|ffb33d7e-33851c0c-4ad59bd7-79958cc_0 is DEAD 2025-06-25T14:45:48.998050Z node 13 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037894 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-25T14:45:48.998089Z node 13 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037894 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-25T14:45:48.998123Z node 13 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037894 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-25T14:45:48.998302Z node 13 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037894] server disconnected, pipe [13:7519898246154738807:2459] destroyed 2025-06-25T14:45:48.998331Z node 13 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037894] server disconnected, pipe [13:7519898246154738810:2459] destroyed 2025-06-25T14:45:48.998352Z node 13 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037894] server disconnected, pipe [13:7519898246154738867:2459] destroyed 2025-06-25T14:45:48.998388Z node 13 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72075186224037894, Partition: 0, State: StateIdle] TPartition::DropOwner. ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoin::CrossJoinCount [GOOD] Test command err: Trying to start YDB, gRPC: 28300, MsgBus: 17803 2025-06-25T14:45:45.073051Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898233001705092:2079];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:45:45.082775Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000e74/r3tmp/tmpAsYz2J/pdisk_1.dat 2025-06-25T14:45:45.585752Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:45:45.590447Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:45:45.590542Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 28300, node 1 2025-06-25T14:45:45.592857Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:45:45.748812Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:45:45.748834Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:45:45.748845Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:45:45.748971Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17803 2025-06-25T14:45:46.084457Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:17803 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:45:46.326794Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:45:46.340626Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:45:46.351701Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:45:46.549705Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:45:46.738423Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:45:46.815067Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:45:48.540774Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898245886608556:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:48.540906Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:48.809674Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:48.847459Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:48.883611Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:48.919746Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:48.959124Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:49.037800Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:49.067958Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:49.155992Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898250181576520:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:49.156070Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:49.156525Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898250181576525:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:49.160090Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:45:49.168944Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898250181576527:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:45:49.234573Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898250181576578:3427] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:45:50.105663Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519898233001705092:2079];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:45:50.105727Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:45:50.381263Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:50.415544Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok ... 5-06-25T14:45:52.154230Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:45:52.154309Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:45:52.158663Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 7714, node 2 2025-06-25T14:45:52.288399Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:45:52.288422Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:45:52.288431Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:45:52.288546Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26655 TClient is connected to server localhost:26655 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:45:52.904291Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:45:52.913068Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:45:52.934312Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:45:52.996483Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:45:53.028720Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:45:53.198256Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:45:53.313151Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:45:55.692445Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898276553511884:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:55.692543Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:55.794665Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:55.864544Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:55.938412Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:56.001863Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:56.058038Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:56.145635Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:56.197778Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:56.272098Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898280848479839:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:56.272226Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:56.272569Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898280848479844:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:56.277046Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:45:56.293058Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519898280848479846:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:45:56.389342Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519898280848479897:3416] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:45:56.984401Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519898259373641164:2091];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:45:56.984470Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:45:57.621588Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:57.669340Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:57.713881Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) >> TxUsage::WriteToTopic_Demo_45_Table [GOOD] >> SystemView::StoragePoolsFields [GOOD] >> IndexBuildTest::DropIndex [GOOD] >> TxUsage::Sinks_Oltp_WriteToTopics_4_Table [GOOD] >> YdbLogStore::AlterLogTable [FAIL] >> TxUsage::Sinks_Olap_WriteToTopicAndTable_1_Query >> KqpJoinOrder::TPCHRandomJoinViewJustWorks+ColumnStore >> TxUsage::WriteToTopic_Demo_45_Query >> TxUsage::Sinks_Oltp_WriteToTopics_4_Query >> KqpJoin::FullOuterJoin ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index_build/unittest >> IndexBuildTest::DropIndex [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:45:00.205449Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:45:00.205539Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:45:00.205589Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:45:00.205634Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:45:00.206334Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:45:00.206371Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:45:00.206437Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:45:00.206912Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:45:00.207686Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:45:00.209516Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:45:00.282201Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:45:00.282269Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:45:00.302488Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:45:00.302882Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:45:00.303046Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:45:00.309488Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:45:00.310938Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:45:00.314371Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:45:00.315887Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:45:00.321904Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:45:00.323087Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:45:00.335560Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:45:00.335654Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:45:00.335853Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:45:00.335914Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:45:00.336004Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:45:00.336095Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:45:00.342790Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:45:00.477076Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:45:00.477267Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:45:00.477471Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:45:00.477514Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:45:00.477725Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:45:00.477795Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:45:00.479936Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:45:00.480096Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:45:00.480277Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:45:00.480354Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:45:00.480390Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:45:00.480429Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:45:00.482587Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:45:00.482644Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:45:00.482674Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:45:00.484230Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:45:00.484277Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:45:00.484347Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:45:00.484395Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:45:00.495197Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:45:00.497116Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:45:00.497314Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:45:00.498001Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:45:00.498115Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:45:00.498163Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:45:00.498376Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:45:00.498413Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:45:00.498546Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:45:00.498616Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:45:00.500157Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:45:00.500206Z node 1 :FLAT_TX_SCHEMESHARD ... 5 PathOwnerId: 72057594046678944, cookie: 105 2025-06-25T14:46:01.365914Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 105 2025-06-25T14:46:01.365956Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 105, pathId: [OwnerId: 72057594046678944, LocalPathId: 7], version: 18446744073709551615 2025-06-25T14:46:01.366001Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 7] was 5 2025-06-25T14:46:01.366818Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 8 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 105 2025-06-25T14:46:01.366889Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 8 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 105 2025-06-25T14:46:01.366915Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 105 2025-06-25T14:46:01.366950Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 105, pathId: [OwnerId: 72057594046678944, LocalPathId: 8], version: 18446744073709551615 2025-06-25T14:46:01.366980Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 8] was 3 2025-06-25T14:46:01.377374Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 15 PathOwnerId: 72057594046678944, cookie: 105 2025-06-25T14:46:01.377490Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 15 PathOwnerId: 72057594046678944, cookie: 105 2025-06-25T14:46:01.377523Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 105 2025-06-25T14:46:01.377559Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 105, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 15 2025-06-25T14:46:01.377595Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-25T14:46:01.378859Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 7 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 105 2025-06-25T14:46:01.378941Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 7 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 105 2025-06-25T14:46:01.378971Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 105 2025-06-25T14:46:01.379518Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 105:0, at schemeshard: 72057594046678944 2025-06-25T14:46:01.379572Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_table.cpp:414: TDropTable TProposedDeletePart operationId: 105:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:46:01.379822Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove table for pathId [OwnerId: 72057594046678944, LocalPathId: 7] was 4 2025-06-25T14:46:01.379970Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#105:0 progress is 2/3 2025-06-25T14:46:01.380011Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 105 ready parts: 2/3 2025-06-25T14:46:01.380051Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#105:0 progress is 2/3 2025-06-25T14:46:01.380087Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 105 ready parts: 2/3 2025-06-25T14:46:01.380133Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 105, ready parts: 2/3, is published: false 2025-06-25T14:46:01.385421Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 8 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 105 2025-06-25T14:46:01.385523Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 8 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 105 2025-06-25T14:46:01.385557Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 105 2025-06-25T14:46:01.386343Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 9 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 105 2025-06-25T14:46:01.386421Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 9 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 105 2025-06-25T14:46:01.386449Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 105 2025-06-25T14:46:01.386479Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 105, pathId: [OwnerId: 72057594046678944, LocalPathId: 9], version: 18446744073709551615 2025-06-25T14:46:01.386513Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 9] was 4 2025-06-25T14:46:01.386609Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 105, ready parts: 2/3, is published: true 2025-06-25T14:46:01.388452Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 105:2, at schemeshard: 72057594046678944 2025-06-25T14:46:01.388497Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_table.cpp:414: TDropTable TProposedDeletePart operationId: 105:2 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:46:01.388713Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove table for pathId [OwnerId: 72057594046678944, LocalPathId: 9] was 3 2025-06-25T14:46:01.388823Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#105:2 progress is 3/3 2025-06-25T14:46:01.388849Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 105 ready parts: 3/3 2025-06-25T14:46:01.388880Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#105:2 progress is 3/3 2025-06-25T14:46:01.388904Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 105 ready parts: 3/3 2025-06-25T14:46:01.388930Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 105, ready parts: 3/3, is published: true 2025-06-25T14:46:01.388988Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1656: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [3:420:2375] message: TxId: 105 2025-06-25T14:46:01.389033Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 105 ready parts: 3/3 2025-06-25T14:46:01.389080Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 105:0 2025-06-25T14:46:01.389115Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 105:0 2025-06-25T14:46:01.389214Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 7] was 3 2025-06-25T14:46:01.389252Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 105:1 2025-06-25T14:46:01.389272Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 105:1 2025-06-25T14:46:01.389300Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 8] was 2 2025-06-25T14:46:01.389321Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 105:2 2025-06-25T14:46:01.389342Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 105:2 2025-06-25T14:46:01.389376Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 9] was 2 2025-06-25T14:46:01.396743Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 105 2025-06-25T14:46:01.397592Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 105 2025-06-25T14:46:01.397692Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 105 2025-06-25T14:46:01.397737Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 105 2025-06-25T14:46:01.397839Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 105 2025-06-25T14:46:01.399501Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 105 2025-06-25T14:46:01.399697Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 105: got EvNotifyTxCompletionResult 2025-06-25T14:46:01.399741Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 105: satisfy waiter [3:951:2872] TestWaitNotification: OK eventTxId 105 >> KqpJoinOrder::TestJoinOrderHintsSimple-ColumnStore [GOOD] >> KqpJoinOrder::SortingsWithLookupJoinByPrefix+RemoveLimitOperator [GOOD] >> KqpJoin::JoinLeftPureExclusion |85.4%| [TA] $(B)/ydb/core/tx/schemeshard/ut_index_build/test-results/unittest/{meta.json ... results_accumulator.log} >> TPQTestSlow::TestWriteVeryBigMessage [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::TestJoinOrderHintsSimple-ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 7554, MsgBus: 27679 2025-06-25T14:45:25.299235Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898144145930638:2206];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:45:25.299776Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000e9c/r3tmp/tmpefALFB/pdisk_1.dat 2025-06-25T14:45:25.721598Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519898144145930472:2080] 1750862725292225 != 1750862725292228 2025-06-25T14:45:25.765728Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:45:25.767169Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:45:25.767256Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 7554, node 1 2025-06-25T14:45:25.773029Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:45:25.885752Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:45:25.885795Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:45:25.885802Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:45:25.885904Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27679 2025-06-25T14:45:26.300464Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:27679 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:45:26.575027Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:45:28.060443Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898157030833004:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:28.060619Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:28.061189Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898157030833016:2295], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:28.067552Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:45:28.078243Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898157030833018:2296], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:45:28.166585Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898157030833069:2336] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:45:28.513932Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:28.647820Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:28.673794Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:28.698328Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:28.743431Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:28.871655Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:28.896636Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:28.919427Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:28.943723Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:28.967061Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:29.030616Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:29.054048Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:29.077856Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:29.565985Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:29.595490Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/sc ... ss_local=67;result=not_found; 2025-06-25T14:45:58.304419Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038607;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:45:58.305091Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038642;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:45:58.306109Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038643;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:45:58.306551Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038627;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:45:58.310196Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038642;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:45:58.310775Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038583;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:45:58.311493Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038627;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:45:58.312006Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038617;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:45:58.316058Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038583;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:45:58.316569Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038617;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:45:58.316862Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038579;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:45:58.316995Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038633;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:45:58.320504Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038633;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:45:58.321046Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038601;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:45:58.325195Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038579;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:45:58.325634Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038601;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:45:58.325813Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038603;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:45:58.326195Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038652;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:45:58.330653Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038603;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:45:58.330837Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038652;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:45:58.331204Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038621;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:45:58.331284Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038635;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:45:58.336197Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038621;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:45:58.336343Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038635;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:45:58.336821Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038599;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:45:58.336877Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038605;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:45:58.341659Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038599;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:45:58.341706Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038605;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:45:58.342309Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038623;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:45:58.342622Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038654;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:45:58.347350Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038654;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:45:58.347543Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038623;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:45:58.347952Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038619;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:45:58.348040Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038625;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:45:58.354069Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038625;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:45:58.354070Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038619;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:45:58.354701Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038609;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:45:58.355035Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038597;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:45:58.360130Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038609;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:45:58.360129Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038597;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:45:58.452139Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jykrt60ddgf9gss6mgey3n0p", SessionId: ydb://session/3?node_id=1&id=ODdmNzJhZjMtYTVhZjlkNTItMzEzNWJlNzEtZDRiYzVkMWE=, Slow query, duration: 27.174211s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:45:58.836852Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:45:58.837176Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;self_id=[1:7519898174210708517:2837];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038170;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038629;receive=72075186224038331; 2025-06-25T14:45:58.837180Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:45:58.837543Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716;
: Warning: Execution, code: 1060
: Warning: Unapplied hint: Rows(R T # 1), code: 4534
: Warning: Execution, code: 1060
: Warning: Unapplied hint: Rows(R T # 1), code: 4534 >> KqpBatchUpdate::ManyPartitions_3 [GOOD] |85.4%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_index_build/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/slow/unittest >> TPQTestSlow::TestWriteVeryBigMessage [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:107:2057] recipient: [1:105:2137] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:107:2057] recipient: [1:105:2137] Leader for TabletID 72057594037927937 is [1:111:2141] sender: [1:112:2057] recipient: [1:105:2137] 2025-06-25T14:45:22.421425Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:45:22.421516Z node 1 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [1:153:2057] recipient: [1:151:2172] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [1:153:2057] recipient: [1:151:2172] Leader for TabletID 72057594037927938 is [1:157:2176] sender: [1:158:2057] recipient: [1:151:2172] Leader for TabletID 72057594037927937 is [1:111:2141] sender: [1:183:2057] recipient: [1:14:2061] 2025-06-25T14:45:22.449609Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:45:22.474186Z node 1 :PERSQUEUE INFO: pq_impl.cpp:1497: [PQ: 72057594037927937] Config applied version 1 actor [1:181:2194] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "rt3.dc1--asdfgs--topic" Version: 1 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 1 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 1 Important: false } 2025-06-25T14:45:22.477570Z node 1 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [1:189:2200] 2025-06-25T14:45:22.481272Z node 1 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [1:189:2200] 2025-06-25T14:45:22.484052Z node 1 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [1:190:2201] 2025-06-25T14:45:22.485798Z node 1 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 2 [1:190:2201] 2025-06-25T14:45:22.495686Z node 1 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|c57fb310-ad60bb30-1019099d-166e1d8b_0 generated for partition 1 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-25T14:45:22.504280Z node 1 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|418f3144-8bd8f687-f09ea9a-559728fb_1 generated for partition 1 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-25T14:45:22.523197Z node 1 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|9a72d2fd-7eb83c7b-cdf3b9b9-98a4dace_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default Leader for TabletID 72057594037927937 is [1:111:2141] sender: [1:244:2057] recipient: [1:103:2136] Leader for TabletID 72057594037927937 is [1:111:2141] sender: [1:247:2057] recipient: [1:246:2246] Leader for TabletID 72057594037927937 is [1:111:2141] sender: [1:248:2057] recipient: [1:14:2061] Leader for TabletID 72057594037927937 is [1:249:2247] sender: [1:250:2057] recipient: [1:246:2246] 2025-06-25T14:45:22.579195Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:45:22.579277Z node 1 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-25T14:45:22.579831Z node 1 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [1:298:2288] 2025-06-25T14:45:22.581870Z node 1 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [1:299:2289] 2025-06-25T14:45:22.587224Z node 1 :PERSQUEUE INFO: partition_init.cpp:895: [rt3.dc1--asdfgs--topic:0:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-25T14:45:22.587290Z node 1 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 3 [1:298:2288] 2025-06-25T14:45:22.589751Z node 1 :PERSQUEUE INFO: partition_init.cpp:895: [rt3.dc1--asdfgs--topic:1:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-25T14:45:22.589797Z node 1 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 3 [1:299:2289] 2025-06-25T14:45:22.600295Z node 1 :PERSQUEUE WARN: pq_l2_cache.cpp:94: PQ Cache (L2). Same blob insertion. Tablet '72057594037927937' partition 0 offset 0 partno 0 count 1 parts 2 suffix '63' size 1048786 Leader for TabletID 72057594037927937 is [1:249:2247] sender: [1:327:2057] recipient: [1:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:107:2057] recipient: [2:105:2137] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:107:2057] recipient: [2:105:2137] Leader for TabletID 72057594037927937 is [2:111:2141] sender: [2:112:2057] recipient: [2:105:2137] 2025-06-25T14:45:23.063587Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:45:23.063681Z node 2 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [2:153:2057] recipient: [2:151:2172] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [2:153:2057] recipient: [2:151:2172] Leader for TabletID 72057594037927938 is [2:157:2176] sender: [2:158:2057] recipient: [2:151:2172] Leader for TabletID 72057594037927937 is [2:111:2141] sender: [2:183:2057] recipient: [2:14:2061] 2025-06-25T14:45:23.079549Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:45:23.080365Z node 2 :PERSQUEUE INFO: pq_impl.cpp:1497: [PQ: 72057594037927937] Config applied version 2 actor [2:181:2194] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "rt3.dc1--asdfgs--topic" Version: 2 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 2 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 2 Important: false } 2025-06-25T14:45:23.080960Z node 2 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [2:189:2200] 2025-06-25T14:45:23.083228Z node 2 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [2:189:2200] 2025-06-25T14:45:23.084630Z node 2 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [2:190:2201] 2025-06-25T14:45:23.086246Z node 2 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 2 [2:190:2201] 2025-06-25T14:45:23.092186Z node 2 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|f344550c-f21b4c5e-a34aa8a3-fbb3f96c_0 generated for partition 1 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-25T14:45:23.098424Z node 2 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|e0807b6-b1ad9705-c44f9737-23d30061_1 generated for partition 1 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-25T14:45:23.114511Z node 2 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|49ffc400-d5c39cd2-f301e161-b1db4d_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default !Reboot 72057594037927937 (actor [2:111:2141]) on event NKikimr::TEvPersQueue::TEvOffsets ! Leader for TabletID 72057594037927937 is [2:111:2141] sender: [2:243:2057] recipient: [2:103:2136] Leader for TabletID 72057594037927937 is [2:111:2141] sender: [2:246:2057] recipient: [2:245:2245] Leader for TabletID 72057594037927937 is [2:111:2141] sender: [2:247:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [2:248:2246] sender: [2:249:2057] recipient: [2:245:2245] 2025-06-25T14:45:23.160197Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:45:23.160265Z node 2 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-25T14:45:23.160918Z node 2 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [2:297:2287] 2025-06-25T14:45:23.162517Z node 2 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [2:298:2288] 2025-06-25T14:45:23.168720Z node 2 :PERSQUEUE INFO: partition_init.cpp:895: [rt3.dc1--asdfgs--topic:0:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-25T14:45:23.168787Z node 2 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 3 [2:297:2287] 2025-06-25T14:45:23.171508Z node 2 :PERSQUEUE INFO: partition_init.cpp:895: [rt3.dc1--asdfgs--topic:1:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-25T14:45:23.171555Z node 2 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 3 [2:298:2288] 2025-06-25T14:45:23.181334Z node 2 :PERSQUEUE WARN: pq_l2_cache.cpp:94: PQ Cache (L2). Same blob insertion. Tablet '72057594037927937' partition 0 offset 0 partno 0 count 1 parts 2 suffix '63' size 1048786 !Reboot 72057594037927937 (actor [2:111:2141]) rebooted! !Reboot 72057594037927937 (actor [2:111:2141]) tablet resolver refreshed! new actor is[2:248:2246] Leader for TabletID 72057594037927937 is [2:248:2246] sender: [2:352:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [2:248:2246] sender: [2:355:2057] recipient: [2:103:2136] Leader for TabletID 72057594037927937 is [2:248:2246] sender: [2:358:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [2:248:2246] sender: [2:359:2057] recipient: [2:357:2319] Leader for TabletID 72057594037927937 is [2:360:2320] sender: [2:361:2057] recipient: [2:357:2319] 2025-06-25T14:45:24.451228Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:45:24.451295Z node 2 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-25T14:45:24.452039Z node 2 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [2:411:2363] 2025-06-25T ... it] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 4 [52:385:2351] 2025-06-25T14:46:03.084355Z node 52 :PERSQUEUE INFO: partition_init.cpp:895: [rt3.dc1--asdfgs--topic:1:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-25T14:46:03.084413Z node 52 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 4 [52:386:2352] 2025-06-25T14:46:03.110624Z node 52 :PERSQUEUE WARN: pq_l2_cache.cpp:94: PQ Cache (L2). Same blob insertion. Tablet '72057594037927937' partition 0 offset 0 partno 0 count 1 parts 2 suffix '63' size 1048786 !Reboot 72057594037927937 (actor [52:249:2247]) rebooted! !Reboot 72057594037927937 (actor [52:249:2247]) tablet resolver refreshed! new actor is[52:334:2308] Leader for TabletID 72057594037927937 is [52:334:2308] sender: [52:437:2057] recipient: [52:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [53:107:2057] recipient: [53:105:2137] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [53:107:2057] recipient: [53:105:2137] Leader for TabletID 72057594037927937 is [53:111:2141] sender: [53:112:2057] recipient: [53:105:2137] 2025-06-25T14:46:05.260767Z node 53 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:46:05.260848Z node 53 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [53:153:2057] recipient: [53:151:2172] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [53:153:2057] recipient: [53:151:2172] Leader for TabletID 72057594037927938 is [53:157:2176] sender: [53:158:2057] recipient: [53:151:2172] Leader for TabletID 72057594037927937 is [53:111:2141] sender: [53:183:2057] recipient: [53:14:2061] 2025-06-25T14:46:05.291363Z node 53 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:46:05.292430Z node 53 :PERSQUEUE INFO: pq_impl.cpp:1497: [PQ: 72057594037927937] Config applied version 53 actor [53:181:2194] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "rt3.dc1--asdfgs--topic" Version: 53 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 53 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 53 Important: false } 2025-06-25T14:46:05.293125Z node 53 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [53:189:2200] 2025-06-25T14:46:05.295910Z node 53 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [53:189:2200] 2025-06-25T14:46:05.297887Z node 53 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [53:190:2201] 2025-06-25T14:46:05.299953Z node 53 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 2 [53:190:2201] 2025-06-25T14:46:05.307260Z node 53 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|51ada907-4bf5a6b7-7f4b80e8-c2cd3021_0 generated for partition 1 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-25T14:46:05.313870Z node 53 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|52fb4dbc-ce7e2a20-421f5675-dc794d82_1 generated for partition 1 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-25T14:46:05.335087Z node 53 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|fe73eddc-f1cd07c9-29a0c22b-83b43751_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default Leader for TabletID 72057594037927937 is [53:111:2141] sender: [53:244:2057] recipient: [53:103:2136] Leader for TabletID 72057594037927937 is [53:111:2141] sender: [53:247:2057] recipient: [53:14:2061] Leader for TabletID 72057594037927937 is [53:111:2141] sender: [53:248:2057] recipient: [53:246:2246] Leader for TabletID 72057594037927937 is [53:249:2247] sender: [53:250:2057] recipient: [53:246:2246] 2025-06-25T14:46:05.400905Z node 53 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:46:05.400973Z node 53 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-25T14:46:05.401767Z node 53 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [53:298:2288] 2025-06-25T14:46:05.404381Z node 53 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [53:299:2289] 2025-06-25T14:46:05.413744Z node 53 :PERSQUEUE INFO: partition_init.cpp:895: [rt3.dc1--asdfgs--topic:1:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-25T14:46:05.413833Z node 53 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 3 [53:299:2289] 2025-06-25T14:46:05.414982Z node 53 :PERSQUEUE INFO: partition_init.cpp:895: [rt3.dc1--asdfgs--topic:0:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-25T14:46:05.415035Z node 53 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 3 [53:298:2288] 2025-06-25T14:46:05.426920Z node 53 :PERSQUEUE WARN: pq_l2_cache.cpp:94: PQ Cache (L2). Same blob insertion. Tablet '72057594037927937' partition 0 offset 0 partno 0 count 1 parts 2 suffix '63' size 1048786 Leader for TabletID 72057594037927937 is [53:249:2247] sender: [53:325:2057] recipient: [53:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [54:107:2057] recipient: [54:105:2137] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [54:107:2057] recipient: [54:105:2137] Leader for TabletID 72057594037927937 is [54:111:2141] sender: [54:112:2057] recipient: [54:105:2137] 2025-06-25T14:46:06.218309Z node 54 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:46:06.218393Z node 54 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [54:153:2057] recipient: [54:151:2172] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [54:153:2057] recipient: [54:151:2172] Leader for TabletID 72057594037927938 is [54:157:2176] sender: [54:158:2057] recipient: [54:151:2172] Leader for TabletID 72057594037927937 is [54:111:2141] sender: [54:183:2057] recipient: [54:14:2061] 2025-06-25T14:46:06.239088Z node 54 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:46:06.240056Z node 54 :PERSQUEUE INFO: pq_impl.cpp:1497: [PQ: 72057594037927937] Config applied version 54 actor [54:181:2194] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "rt3.dc1--asdfgs--topic" Version: 54 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 54 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 54 Important: false } 2025-06-25T14:46:06.240751Z node 54 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [54:189:2200] 2025-06-25T14:46:06.243436Z node 54 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [54:189:2200] 2025-06-25T14:46:06.245470Z node 54 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [54:190:2201] 2025-06-25T14:46:06.247586Z node 54 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 2 [54:190:2201] 2025-06-25T14:46:06.255399Z node 54 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|bf89ee85-52f1c5e5-8aa74fba-6da1ce36_0 generated for partition 1 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-25T14:46:06.263602Z node 54 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|5ed74f76-3adf7e0c-2ddedd28-173a1d11_1 generated for partition 1 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-25T14:46:06.288090Z node 54 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|d5714921-cb7edc1b-52fa0f45-10952c2_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default Leader for TabletID 72057594037927937 is [54:111:2141] sender: [54:244:2057] recipient: [54:103:2136] Leader for TabletID 72057594037927937 is [54:111:2141] sender: [54:246:2057] recipient: [54:14:2061] Leader for TabletID 72057594037927937 is [54:111:2141] sender: [54:248:2057] recipient: [54:247:2246] Leader for TabletID 72057594037927937 is [54:249:2247] sender: [54:250:2057] recipient: [54:247:2246] 2025-06-25T14:46:06.363588Z node 54 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:46:06.363651Z node 54 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-25T14:46:06.364587Z node 54 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [54:298:2288] 2025-06-25T14:46:06.367055Z node 54 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [54:299:2289] 2025-06-25T14:46:06.375673Z node 54 :PERSQUEUE INFO: partition_init.cpp:895: [rt3.dc1--asdfgs--topic:0:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-25T14:46:06.375738Z node 54 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 3 [54:298:2288] 2025-06-25T14:46:06.379276Z node 54 :PERSQUEUE INFO: partition_init.cpp:895: [rt3.dc1--asdfgs--topic:1:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-25T14:46:06.379339Z node 54 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 3 [54:299:2289] 2025-06-25T14:46:06.388985Z node 54 :PERSQUEUE WARN: pq_l2_cache.cpp:94: PQ Cache (L2). Same blob insertion. Tablet '72057594037927937' partition 0 offset 0 partno 0 count 1 parts 2 suffix '63' size 1048786 Leader for TabletID 72057594037927937 is [54:249:2247] sender: [54:327:2057] recipient: [54:14:2061] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::SortingsWithLookupJoinByPrefix+RemoveLimitOperator [GOOD] Test command err: Trying to start YDB, gRPC: 10714, MsgBus: 11639 2025-06-25T14:45:25.299305Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898147723406084:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:45:25.299404Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000e9d/r3tmp/tmplj4ukG/pdisk_1.dat 2025-06-25T14:45:25.720723Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:45:25.722785Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519898147723406048:2080] 1750862725286807 != 1750862725286810 2025-06-25T14:45:25.766942Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:45:25.767056Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:45:25.768668Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 10714, node 1 2025-06-25T14:45:25.883125Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:45:25.888382Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:45:25.888409Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:45:25.888603Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11639 2025-06-25T14:45:26.315379Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:11639 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:45:26.574953Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:45:28.071116Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898160608308593:2295], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:28.073234Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898160608308582:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:28.073492Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:28.075221Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:45:28.084267Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898160608308596:2296], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:45:28.144728Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898160608308647:2336] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:45:28.523296Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:28.634942Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:28.666001Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:28.694334Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:28.749002Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:28.882446Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:28.907472Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:28.938049Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:28.963029Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:29.027730Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:29.054994Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:29.082321Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:29.102742Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:29.563689Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:29.587614Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/ ... 96632Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038551;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:45:58.300013Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038605;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:45:58.300703Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038551;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:45:58.300820Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038583;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:45:58.301182Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038480;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:45:58.305624Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038583;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:45:58.305624Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038480;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:45:58.306126Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038569;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:45:58.306148Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038573;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:45:58.310669Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038569;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:45:58.310810Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038573;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:45:58.311282Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038587;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:45:58.311283Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038549;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:45:58.315756Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038587;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:45:58.315763Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038549;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:45:58.316294Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038617;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:45:58.316295Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038546;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:45:58.320442Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038617;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:45:58.320603Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038546;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:45:58.321025Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038565;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:45:58.321102Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038540;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:45:58.325394Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038540;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:45:58.325396Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038565;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:45:58.325881Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038607;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:45:58.325887Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038542;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:45:58.330199Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038607;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:45:58.330461Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038542;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:45:58.330785Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038500;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:45:58.330957Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038621;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:45:58.335629Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038621;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:45:58.335629Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038500;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:45:58.336080Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038593;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:45:58.336080Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038488;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:45:58.340497Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038593;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:45:58.341049Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038499;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:45:58.345373Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038488;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:45:58.346029Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038510;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:45:58.346478Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038499;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:45:58.347006Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038521;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:45:58.353771Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038510;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:45:58.363136Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038521;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:45:58.500576Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jykrt5tjfdvte844cr9017ax", SessionId: ydb://session/3?node_id=1&id=Yzg2MzU4MmEtOTQ0OGE4OGMtNTAwMDY3OWEtMTZjYzZkMzI=, Slow query, duration: 27.409985s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:45:58.853629Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; 2025-06-25T14:45:58.854185Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; 2025-06-25T14:45:58.854776Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;self_id=[1:7519898177788183748:2782];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038170;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038629;receive=72075186224038331; 2025-06-25T14:45:58.855595Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; >> KqpIndexLookupJoin::LeftJoinCustomColumnOrder+StreamLookup [GOOD] >> KqpIndexLookupJoin::LeftJoinCustomColumnOrder-StreamLookup ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/unittest >> TxUsage::WriteToTopic_Demo_23_RestartAfterCommit_Query [GOOD] Test command err: 2025-06-25T14:40:54.519272Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519896980261945609:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:40:54.519356Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0019fb/r3tmp/tmpGa74ck/pdisk_1.dat 2025-06-25T14:40:54.800887Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-25T14:40:55.019340Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:40:55.052063Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:40:55.052147Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:40:55.053857Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 20439, node 1 2025-06-25T14:40:55.212138Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/yft8/0019fb/r3tmp/yandex7Uqr8G.tmp 2025-06-25T14:40:55.212172Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/yft8/0019fb/r3tmp/yandex7Uqr8G.tmp 2025-06-25T14:40:55.214076Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/yft8/0019fb/r3tmp/yandex7Uqr8G.tmp 2025-06-25T14:40:55.214232Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:40:55.418860Z INFO: TTestServer started on Port 18477 GrpcPort 20439 2025-06-25T14:40:55.531564Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:18477 PQClient connected to localhost:20439 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:40:55.772008Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:40:55.803476Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:40:55.811189Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-25T14:40:55.824207Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:40:56.087295Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715660, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:40:56.109792Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715661, at schemeshard: 72057594046644480 2025-06-25T14:40:58.174862Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896997441815567:2300], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:40:58.174958Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:40:58.179534Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896997441815579:2303], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:40:58.208530Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:40:58.217834Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896997441815611:2306], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:40:58.218034Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:40:58.232990Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519896997441815581:2304], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715662 completed, doublechecking } 2025-06-25T14:40:58.346439Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519896997441815637:2443] txid# 281474976715663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:40:59.127084Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519896997441815653:2310], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:40:59.134305Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=NGUwZDJiZWQtYTY1ZDhmY2MtNTQzYTJhOGUtNGQyNTYzYTU=, ActorId: [1:7519896997441815564:2298], ActorState: ExecuteState, TraceId: 01jykrhv8z94wa4hb7ddb0afq1, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:40:59.137077Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-25T14:40:59.212063Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:40:59.282461Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:40:59.385631Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); 2025-06-25T14:40:59.518253Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519896980261945609:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:40:59.518318Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; === CheckClustersList. Subcribe to ClusterTracker from [1:7519897001736783229:2623] === CheckClustersList. Ok 2025-06-25T14:41:05.030822Z :WriteToTopic_Demo_20_RestartNo_Table INFO: TTopicSdkTestSetup started 2025-06-25T14:41:05.075072Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:132: new create topic request 2025-06-25T14:41:05.173829Z node 1 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72075186224037893][] pipe [1:7519897027506587189:2703] connected; active server actors: 1 2025-06-25T14:41:05.184405Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1516: [72075186224037893][test-topic] updating configuration. Deleted partitions []. Added partitions [0] 2025-06-25T14:41:05.185464Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:1040: [72075186224037893][test-topic] Discovered subdomain [OwnerId: 72057594046644480 ... 'topic_A' partition 0 user test-consumer readTimeStamp for offset 2 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 0 2025-06-25T14:45:56.679707Z node 19 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72075186224037894, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-06-25T14:45:56.679753Z node 19 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037894, Partition: 0, State: StateIdle] no data for compaction 2025-06-25T14:45:56.679787Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'topic_A' partition: 0 messageNo: 0 requestId: cookie: 2 2025-06-25T14:45:56.679886Z node 19 :PQ_READ_PROXY DEBUG: partition_actor.cpp:652: session cookie 1 consumer test-consumer session test-consumer_19_1_17971945634023306661_v1 TopicId: Topic /Root/topic_A in database: Root, partition 0(assignId:1) initDone 1 event { Cookie: 2 } 2025-06-25T14:45:56.679928Z node 19 :PQ_READ_PROXY DEBUG: partition_actor.cpp:950: session cookie 1 consumer test-consumer session test-consumer_19_1_17971945634023306661_v1 TopicId: Topic /Root/topic_A in database: Root, partition 0(assignId:1) commit done to position 2 endOffset 2 with cookie 2 2025-06-25T14:45:56.679970Z node 19 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:696: session cookie 1 consumer test-consumer session test-consumer_19_1_17971945634023306661_v1 replying for commits: assignId# 1, from# 2, to# 2, offset# 2 2025-06-25T14:45:56.684514Z :DEBUG: [/Root] [/Root] [8a1363d3-4d5ee09c-1b4e1f80-ac53918] [] Committed response: { partitions_committed_offsets { partition_session_id: 1 committed_offset: 2 } } 2025-06-25T14:45:57.267226Z node 19 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7382: Cannot get console configs 2025-06-25T14:45:57.267259Z node 19 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:45:57.385516Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): -:topic_A:0:1:1:2 2025-06-25T14:45:57.385582Z :INFO: [/Root] [/Root] [8a1363d3-4d5ee09c-1b4e1f80-ac53918] Counters: { Errors: 0 CurrentSessionLifetimeMs: 1000 BytesRead: 14000000 MessagesRead: 2 BytesReadCompressed: 14000000 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-25T14:45:57.412098Z node 19 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2378: session cookie 1 consumer test-consumer session test-consumer_19_1_17971945634023306661_v1 checking auth because of timeout 2025-06-25T14:45:57.412165Z node 19 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:41: session cookie 1 consumer test-consumer session test-consumer_19_1_17971945634023306661_v1 auth for : test-consumer 2025-06-25T14:45:57.412711Z node 19 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:131: session cookie 1 consumer test-consumer session test-consumer_19_1_17971945634023306661_v1 Handle describe topics response 2025-06-25T14:45:57.412813Z node 19 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:68: session cookie 1 consumer test-consumer session test-consumer_19_1_17971945634023306661_v1 auth is DEAD 2025-06-25T14:45:57.412883Z node 19 :PQ_READ_PROXY INFO: read_session_actor.cpp:1033: session cookie 1 consumer test-consumer session test-consumer_19_1_17971945634023306661_v1 auth ok: topics# 1, initDone# 1 2025-06-25T14:45:57.452786Z node 19 :PQ_READ_PROXY DEBUG: direct_read_actor.cpp:459: session cookie 2 consumer test-consumer session test-consumer_19_1_17971945634023306661_v1 checking auth because of timeout 2025-06-25T14:45:57.452880Z node 19 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:41: session cookie 2 consumer test-consumer session test-consumer_19_1_17971945634023306661_v1 auth for : test-consumer 2025-06-25T14:45:57.453464Z node 19 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:131: session cookie 2 consumer test-consumer session test-consumer_19_1_17971945634023306661_v1 Handle describe topics response 2025-06-25T14:45:57.453558Z node 19 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:68: session cookie 2 consumer test-consumer session test-consumer_19_1_17971945634023306661_v1 auth is DEAD 2025-06-25T14:45:57.453630Z node 19 :PQ_READ_PROXY INFO: direct_read_actor.cpp:299: session cookie 2 consumer test-consumer session test-consumer_19_1_17971945634023306661_v1 auth ok: topics# 1, initDone# 1 2025-06-25T14:45:58.122268Z node 19 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037892, Partition: 0, State: StateIdle] no data for compaction 2025-06-25T14:45:58.453961Z node 19 :PQ_READ_PROXY DEBUG: direct_read_actor.cpp:459: session cookie 2 consumer test-consumer session test-consumer_19_1_17971945634023306661_v1 checking auth because of timeout 2025-06-25T14:45:58.454048Z node 19 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:41: session cookie 2 consumer test-consumer session test-consumer_19_1_17971945634023306661_v1 auth for : test-consumer 2025-06-25T14:45:58.460884Z node 19 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:131: session cookie 2 consumer test-consumer session test-consumer_19_1_17971945634023306661_v1 Handle describe topics response 2025-06-25T14:45:58.461008Z node 19 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:68: session cookie 2 consumer test-consumer session test-consumer_19_1_17971945634023306661_v1 auth is DEAD 2025-06-25T14:45:58.461090Z node 19 :PQ_READ_PROXY INFO: direct_read_actor.cpp:299: session cookie 2 consumer test-consumer session test-consumer_19_1_17971945634023306661_v1 auth ok: topics# 1, initDone# 1 2025-06-25T14:45:58.507577Z :INFO: [/Root] [/Root] [8a1363d3-4d5ee09c-1b4e1f80-ac53918] Closing read session. Close timeout: 0.000000s 2025-06-25T14:45:58.507634Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): -:topic_A:0:1:1:2 2025-06-25T14:45:58.507683Z :INFO: [/Root] [/Root] [8a1363d3-4d5ee09c-1b4e1f80-ac53918] Counters: { Errors: 0 CurrentSessionLifetimeMs: 2123 BytesRead: 14000000 MessagesRead: 2 BytesReadCompressed: 14000000 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-25T14:45:58.507783Z :NOTICE: [/Root] [/Root] [8a1363d3-4d5ee09c-1b4e1f80-ac53918] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Close with zero timeout " } 2025-06-25T14:45:58.507831Z :DEBUG: [/Root] [/Root] [8a1363d3-4d5ee09c-1b4e1f80-ac53918] [] Abort session to cluster 2025-06-25T14:45:58.508299Z :DEBUG: [/Root] 0x000051E000204590 TDirectReadSessionManager ServerSessionId=test-consumer_19_1_17971945634023306661_v1 Close 2025-06-25T14:45:58.509902Z :DEBUG: [/Root] 0x000051E000204590 TDirectReadSessionManager ServerSessionId=test-consumer_19_1_17971945634023306661_v1 Close 2025-06-25T14:45:58.510035Z :NOTICE: [/Root] [/Root] [8a1363d3-4d5ee09c-1b4e1f80-ac53918] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-06-25T14:45:58.511840Z node 19 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer test-consumer session test-consumer_19_1_17971945634023306661_v1 grpc read done: success# 0, data# { } 2025-06-25T14:45:58.511871Z node 19 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 1 consumer test-consumer session test-consumer_19_1_17971945634023306661_v1 grpc read failed 2025-06-25T14:45:58.511905Z node 19 :PQ_READ_PROXY INFO: read_session_actor.cpp:92: session cookie 1 consumer test-consumer session test-consumer_19_1_17971945634023306661_v1 grpc closed 2025-06-25T14:45:58.511936Z node 19 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 1 consumer test-consumer session test-consumer_19_1_17971945634023306661_v1 is DEAD 2025-06-25T14:45:58.512208Z node 19 :PQ_READ_PROXY DEBUG: direct_read_actor.cpp:83: Direct read proxy [19:7519898279715677802:2518]: session cookie 2 consumer test-consumer session test-consumer_19_1_17971945634023306661_v1 grpc read done: success# 0, data# { } 2025-06-25T14:45:58.512255Z node 19 :PQ_READ_PROXY INFO: direct_read_actor.cpp:86: Direct read proxy [19:7519898279715677802:2518]: session cookie 2 consumer test-consumer session test-consumer_19_1_17971945634023306661_v1grpc read failed 2025-06-25T14:45:58.512303Z node 19 :PQ_READ_PROXY INFO: direct_read_actor.cpp:65: Direct read proxy [19:7519898279715677802:2518]: session cookie 2 consumer test-consumer session test-consumer_19_1_17971945634023306661_v1 grpc closed 2025-06-25T14:45:58.512340Z node 19 :PQ_READ_PROXY INFO: direct_read_actor.cpp:153: Direct read proxy [19:7519898279715677802:2518]: session cookie 2 consumer test-consumer session test-consumer_19_1_17971945634023306661_v1 proxy is DEAD 2025-06-25T14:45:58.512913Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:2452: [PQ: 72075186224037894] Destroy direct read session test-consumer_19_1_17971945634023306661_v1 2025-06-25T14:45:58.512945Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037894] server disconnected, pipe [19:7519898279715677796:2515] destroyed 2025-06-25T14:45:58.512989Z node 19 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037895][topic_A] pipe [19:7519898279715677793:2512] disconnected; active server actors: 1 2025-06-25T14:45:58.513011Z node 19 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037895][topic_A] pipe [19:7519898279715677793:2512] client test-consumer disconnected session test-consumer_19_1_17971945634023306661_v1 2025-06-25T14:45:58.513116Z node 19 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: test-consumer_19_1_17971945634023306661_v1 2025-06-25T14:45:58.520348Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|d28b38d7-cb22d786-3a1ed5cd-28cf39a7_0] PartitionId [0] Generation [2] Write session: close. Timeout 0.000000s 2025-06-25T14:45:58.520407Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|d28b38d7-cb22d786-3a1ed5cd-28cf39a7_0] PartitionId [0] Generation [2] Write session will now close 2025-06-25T14:45:58.520458Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|d28b38d7-cb22d786-3a1ed5cd-28cf39a7_0] PartitionId [0] Generation [2] Write session: aborting 2025-06-25T14:45:58.520925Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|d28b38d7-cb22d786-3a1ed5cd-28cf39a7_0] PartitionId [0] Generation [2] Write session: gracefully shut down, all writes complete 2025-06-25T14:45:58.520971Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|d28b38d7-cb22d786-3a1ed5cd-28cf39a7_0] PartitionId [0] Generation [2] Write session: destroy 2025-06-25T14:45:58.522050Z node 19 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 5 sessionId: test-message_group_id|d28b38d7-cb22d786-3a1ed5cd-28cf39a7_0 grpc read done: success: 0 data: 2025-06-25T14:45:58.522080Z node 19 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 5 sessionId: test-message_group_id|d28b38d7-cb22d786-3a1ed5cd-28cf39a7_0 grpc read failed 2025-06-25T14:45:58.522114Z node 19 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 5 sessionId: test-message_group_id|d28b38d7-cb22d786-3a1ed5cd-28cf39a7_0 grpc closed 2025-06-25T14:45:58.522133Z node 19 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 5 sessionId: test-message_group_id|d28b38d7-cb22d786-3a1ed5cd-28cf39a7_0 is DEAD 2025-06-25T14:45:58.522786Z node 19 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037894 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-25T14:45:58.522938Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037894] server disconnected, pipe [19:7519898271125743147:2493] destroyed 2025-06-25T14:45:58.522982Z node 19 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72075186224037894, Partition: 0, State: StateIdle] TPartition::DropOwner. >> KqpFlipJoin::LeftSemi_2 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/ut/unittest >> SystemView::StoragePoolsFields [GOOD] Test command err: 2025-06-25T14:40:44.665093Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519896939363417163:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:40:44.665212Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0012ad/r3tmp/tmpC2gINR/pdisk_1.dat 2025-06-25T14:40:44.924356Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:40:44.963486Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 1907, node 1 2025-06-25T14:40:44.992083Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:40:44.992107Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:40:44.992114Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:40:44.992260Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:40:44.996247Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:40:44.996349Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:40:44.999246Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:6612 TClient is connected to server localhost:6612 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:40:45.433388Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:40:45.671173Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:40:47.299166Z node 1 :KQP_COMPILE_SERVICE INFO: kqp_compile_service.cpp:276: Subscribed for config changes 2025-06-25T14:40:47.299206Z node 1 :KQP_COMPILE_SERVICE INFO: kqp_compile_service.cpp:331: Updated config 2025-06-25T14:40:47.355800Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896952248320142:2302], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:40:47.355925Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:40:47.356138Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896952248320154:2305], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:40:47.359484Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:40:47.389466Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519896952248320156:2306], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:40:47.448236Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519896952248320229:2749] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:40:47.449365Z node 1 :KQP_COMPILE_SERVICE DEBUG: kqp_compile_service.cpp:1186: Try to find query by queryId, queryId: {Cluster: db, Database: /Root, DatabaseId: /Root, UserSid: , Text: \n UPSERT OBJECT `accessKey` (TYPE SECRET) WITH (value = `secretAccessKey`);\n UPSERT OBJECT `secretKey` (TYPE SECRET) WITH (value = `fakeSecret`);\n CREATE EXTERNAL DATA SOURCE `tier1` WITH (\n SOURCE_TYPE = \"ObjectStorage\",\n LOCATION = \"http://fake.fake/olap-tier1\",\n AUTH_METHOD = \"AWS\",\n AWS_ACCESS_KEY_ID_SECRET_NAME = \"accessKey\",\n AWS_SECRET_ACCESS_KEY_SECRET_NAME = \"secretKey\",\n AWS_REGION = \"ru-central1\"\n );\n , Settings: {DocumentApiRestricted: 1, IsInternalCall: 0, QueryType: QUERY_TYPE_SQL_GENERIC_CONCURRENT_QUERY}, QueryParameterTypes: , GUCSettings: { "guc_settings": { "session_settings": { "ydb_user":"", "ydb_database":"Root" }, "settings": { "ydb_user":"", "ydb_database":"Root" }, "rollback_settings": { } } }} 2025-06-25T14:40:47.449487Z node 1 :KQP_COMPILE_SERVICE DEBUG: kqp_compile_service.cpp:413: Perform request, TraceId.SpanIdPtr: 0x000050F0000490B8 2025-06-25T14:40:47.449523Z node 1 :KQP_COMPILE_SERVICE DEBUG: kqp_compile_service.cpp:423: Received compile request, sender: [1:7519896952248320138:2299], queryUid: , queryText: "\n UPSERT OBJECT `accessKey` (TYPE SECRET) WITH (value = `secretAccessKey`);\n UPSERT OBJECT `secretKey` (TYPE SECRET) WITH (value = `fakeSecret`);\n CREATE EXTERNAL DATA SOURCE `tier1` WITH (\n SOURCE_TYPE = \"ObjectStorage\",\n LOCATION = \"http://fake.fake/olap-tier1\",\n AUTH_METHOD = \"AWS\",\n AWS_ACCESS_KEY_ID_SECRET_NAME = \"accessKey\",\n AWS_SECRET_ACCESS_KEY_SECRET_NAME = \"secretKey\",\n AWS_REGION = \"ru-central1\"\n );\n ", keepInCache: 1, split: 0{ TraceId: 01jykrhgqsb4gr5vv4n640bkeq, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NTcwY2I3OWItODExNzBkNDItYWRmZDQ1NGEtMmQyMjBlNjg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default} 2025-06-25T14:40:47.449652Z node 1 :KQP_COMPILE_SERVICE DEBUG: kqp_compile_service.cpp:1186: Try to find query by queryId, queryId: {Cluster: db, Database: /Root, DatabaseId: /Root, UserSid: , Text: \n UPSERT OBJECT `accessKey` (TYPE SECRET) WITH (value = `secretAccessKey`);\n UPSERT OBJECT `secretKey` (TYPE SECRET) WITH (value = `fakeSecret`);\n CREATE EXTERNAL DATA SOURCE `tier1` WITH (\n SOURCE_TYPE = \"ObjectStorage\",\n LOCATION = \"http://fake.fake/olap-tier1\",\n AUTH_METHOD = \"AWS\",\n AWS_ACCESS_KEY_ID_SECRET_NAME = \"accessKey\",\n AWS_SECRET_ACCESS_KEY_SECRET_NAME = \"secretKey\",\n AWS_REGION = \"ru-central1\"\n );\n , Settings: {DocumentApiRestricted: 1, IsInternalCall: 0, QueryType: QUERY_TYPE_SQL_GENERIC_CONCURRENT_QUERY}, QueryParameterTypes: , GUCSettings: { "guc_settings": { "session_settings": { "ydb_user":"", "ydb_database":"Root" }, "settings": { "ydb_user":"", "ydb_database":"Root" }, "rollback_settings": { } } }} 2025-06-25T14:40:47.449725Z node 1 :KQP_COMPILE_SERVICE DEBUG: kqp_compile_service.cpp:519: Added request to queue, sender: [1:7519896952248320138:2299], queueSize: 1 2025-06-25T14:40:47.450305Z node 1 :KQP_COMPILE_SERVICE DEBUG: kqp_compile_service.cpp:880: Created compile actor, sender: [1:7519896952248320138:2299], compileActor: [1:7519896952248320240:2310] 2025-06-25T14:40:47.799439Z node 1 :KQP_YQL INFO: log.cpp:67: TraceId: 01jykrhgqsb4gr5vv4n640bkeq, SessionId: CompileActor 2025-06-25 14:40:47.799 INFO ydb-core-sys_view-ut(pid=280037, tid=0x00007FBEBBBB9640) [core dq] kqp_host.cpp:1375: Good place to weld in 2025-06-25T14:40:47.801657Z node 1 :KQP_YQL INFO: log.cpp:67: TraceId: 01jykrhgqsb4gr5vv4n640bkeq, SessionId: CompileActor 2025-06-25 14:40:47.800 INFO ydb-core-sys_view-ut(pid=280037, tid=0x00007FBEBBBB9640) [core dq] kqp_host.cpp:1380: Compiled query: ( (let $1 (Write! world (DataSink '"kikimr" '"db") (Key '('objectId (String '"accessKey")) '('typeId (String '"SECRET"))) (Void) '('('mode 'upsertObject) '('features '('('"value" '"secretAccessKey")))))) (let $2 (Write! $1 (DataSink '"kikimr" '"db") (Key '('objectId (String '"secretKey")) '('typeId (String '"SECRET"))) (Void) '('('mode 'upsertObject) '('features '('('"value" '"fakeSecret")))))) (let $3 '('('"auth_method" '"AWS") '('"aws_access_key_id_secret_name" '"accessKey") '('"aws_region" '"ru-central1") '('"aws_secret_access_key_secret_name" '"secretKey") '('"location" '"http://fake.fake/olap-tier1") '('"source_type" '"ObjectStorage"))) (return (Write! $2 (DataSink '"kikimr" '"db") (Key '('objectId (String '"/Root/tier1")) '('typeId (String '"EXTERNAL_DATA_SOURCE"))) (Void) '('('mode 'createObject) '('features $3)))) ) 2025-06-25T14:40:47.802483Z node 1 :KQP_YQL INFO: log.cpp:67: TraceId: 01jykrhgqsb4gr5vv4n640bkeq, SessionId: CompileActor 2025-06-25 14:40:47.801 INFO ydb-core-sys_view-ut(pid=280037, tid=0x00007FBEBBBB9640) [KQP] kqp_host.cpp:1386: Compiled query: ( (let $1 (Write! world (DataSink '"kikimr" '"db") (Key '('objectId (String '"accessKey")) '('typeId (String '"SECRET"))) (Void) '('('mode 'upsertObject) '('features '('('"value" '"secretAccessKey")))))) (let $2 (Write! $1 (DataSink '"kikimr" '"db") (Key '('objectId (String '"secretKey")) '('typeId (String '"SECRET"))) (Void) '('('mode 'upsertObject) '('features '('('"value" '"fakeSecret")))))) (let $3 '('('"auth_method" '"AWS") '('"aws_access_key_id_secret_name" '"accessKey") '('"aws_region" '"ru-central1") '('"aws_s ... p:67: TraceId: 01jykrtr7842hevassmth41m0m, SessionId: CompileActor 2025-06-25 14:45:49.966 INFO ydb-core-sys_view-ut(pid=280037, tid=0x00007FBEA885B640) [core exec] yql_execution.cpp:153: State is ExecutionComplete after apply async changes for node #42 2025-06-25T14:45:49.966919Z node 36 :KQP_YQL INFO: log.cpp:67: TraceId: 01jykrtr7842hevassmth41m0m, SessionId: CompileActor 2025-06-25 14:45:49.966 INFO ydb-core-sys_view-ut(pid=280037, tid=0x00007FBEA885B640) [core exec] yql_execution.cpp:59: Begin, root #43 2025-06-25T14:45:49.966954Z node 36 :KQP_YQL INFO: log.cpp:67: TraceId: 01jykrtr7842hevassmth41m0m, SessionId: CompileActor 2025-06-25 14:45:49.966 INFO ydb-core-sys_view-ut(pid=280037, tid=0x00007FBEA885B640) [core exec] yql_execution.cpp:72: Collect unused nodes for root #43, status: Ok 2025-06-25T14:45:49.966992Z node 36 :KQP_YQL TRACE: log.cpp:67: TraceId: 01jykrtr7842hevassmth41m0m, SessionId: CompileActor 2025-06-25 14:45:49.966 TRACE ydb-core-sys_view-ut(pid=280037, tid=0x00007FBEA885B640) [core exec] yql_execution.cpp:387: {0}, callable #43 2025-06-25T14:45:49.967045Z node 36 :KQP_YQL INFO: log.cpp:67: TraceId: 01jykrtr7842hevassmth41m0m, SessionId: CompileActor 2025-06-25 14:45:49.967 INFO ydb-core-sys_view-ut(pid=280037, tid=0x00007FBEA885B640) [core exec] yql_execution.cpp:577: Node #43 finished execution 2025-06-25T14:45:49.967097Z node 36 :KQP_YQL INFO: log.cpp:67: TraceId: 01jykrtr7842hevassmth41m0m, SessionId: CompileActor 2025-06-25 14:45:49.967 INFO ydb-core-sys_view-ut(pid=280037, tid=0x00007FBEA885B640) [core exec] yql_execution.cpp:594: Node #43 created 0 trackable nodes: 2025-06-25T14:45:49.967134Z node 36 :KQP_YQL INFO: log.cpp:67: TraceId: 01jykrtr7842hevassmth41m0m, SessionId: CompileActor 2025-06-25 14:45:49.967 INFO ydb-core-sys_view-ut(pid=280037, tid=0x00007FBEA885B640) [core exec] yql_execution.cpp:87: Finish, output #43, status: Ok 2025-06-25T14:45:49.967179Z node 36 :KQP_YQL INFO: log.cpp:67: TraceId: 01jykrtr7842hevassmth41m0m, SessionId: CompileActor 2025-06-25 14:45:49.967 INFO ydb-core-sys_view-ut(pid=280037, tid=0x00007FBEA885B640) [core exec] yql_execution.cpp:93: Creating finalizing transformer, output #43 2025-06-25T14:45:49.968084Z node 36 :KQP_COMPILE_SERVICE DEBUG: kqp_compile_service.cpp:632: Received response, sender: [36:7519898229893429540:3179], status: SUCCESS, compileActor: [36:7519898247073303218:3507] 2025-06-25T14:45:49.968155Z node 36 :KQP_COMPILE_SERVICE DEBUG: kqp_compile_service.cpp:903: Send response, sender: [36:7519898229893429540:3179], queryUid: 2d421db3-c0b52317-6c26a7a7-88970175, status:SUCCESS 2025-06-25T14:45:50.064520Z node 36 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 36, TabletId: 72075186224037907 not found 2025-06-25T14:45:50.065491Z node 36 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 36, TabletId: 72075186224037909 not found 2025-06-25T14:45:50.072576Z node 36 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 36, TabletId: 72075186224037910 not found 2025-06-25T14:45:50.077411Z node 36 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 36, TabletId: 72075186224037908 not found 2025-06-25T14:45:50.356027Z node 36 :KQP_COMPILE_SERVICE DEBUG: kqp_compile_service.cpp:1186: Try to find query by queryId, queryId: {Cluster: db, Database: /Root, DatabaseId: /Root, UserSid: metadata@system, Text: SELECT * FROM `//Root/.metadata/initialization/migrations`;\n, Settings: {DocumentApiRestricted: 1, IsInternalCall: 0, QueryType: QUERY_TYPE_SQL_DML}, QueryParameterTypes: , GUCSettings: { "guc_settings": { "session_settings": { "ydb_user":"", "ydb_database":"Root" }, "settings": { "ydb_user":"", "ydb_database":"Root" }, "rollback_settings": { } } }} 2025-06-25T14:45:52.683910Z node 41 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[41:7519898260882915422:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:45:52.684062Z node 41 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0012ad/r3tmp/tmpRBKp8r/pdisk_1.dat 2025-06-25T14:45:52.919096Z node 41 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(41, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:45:52.919243Z node 41 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(41, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:45:52.973253Z node 41 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(41, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:45:52.990047Z node 41 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:45:52.996539Z node 41 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [41:7519898260882915404:2080] 1750862752676503 != 1750862752676506 TServer::EnableGrpc on GrpcPort 14177, node 41 2025-06-25T14:45:53.217194Z node 41 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:45:53.217220Z node 41 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:45:53.217235Z node 41 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:45:53.217436Z node 41 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:45:53.705526Z node 41 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:3320 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:45:53.902341Z node 41 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:45:53.917471Z node 41 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:45:57.680456Z node 41 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[41:7519898260882915422:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:45:57.680575Z node 41 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:45:59.677876Z node 41 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [41:7519898290947687108:2295], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:59.678026Z node 41 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:59.678385Z node 41 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [41:7519898290947687135:2298], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:59.691883Z node 41 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:45:59.720539Z node 41 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [41:7519898290947687137:2299], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:45:59.825478Z node 41 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [41:7519898290947687192:2344] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:46:00.083179Z node 41 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710661. Ctx: { TraceId: 01jykrtw4za55qwn5197kw0z14, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=41&id=MTc3NTBkNmUtMThhMDQ4YmMtMzA1YjYyMWYtODMxNDU3Y2E=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:46:00.087897Z node 41 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:47: Scan started, actor: [41:7519898295242654530:2309], owner: [41:7519898295242654527:2307], scan id: 0, sys view info: Type: EStoragePools SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-25T14:46:00.097030Z node 41 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:323: Scan prepared, actor: [41:7519898295242654530:2309], schemeshard id: 72057594046644480, hive id: 72057594037968897, database: /Root, database owner: root@builtin, domain key: [OwnerId: 72057594046644480, LocalPathId: 1], database node count: 1 2025-06-25T14:46:00.119336Z node 41 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [41:7519898295242654530:2309], row count: 1, finished: 1 2025-06-25T14:46:00.119531Z node 41 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:122: Scan finished, actor: [41:7519898295242654530:2309], owner: [41:7519898295242654527:2307], scan id: 0, sys view info: Type: EStoragePools SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-25T14:46:00.123208Z node 41 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750862760080, txId: 281474976710660] shutting down ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/batch_operations/unittest >> KqpBatchUpdate::ManyPartitions_3 [GOOD] Test command err: Trying to start YDB, gRPC: 20858, MsgBus: 15215 2025-06-25T14:42:40.733123Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897438940380025:2223];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:40.733168Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000c55/r3tmp/tmpCzhjaN/pdisk_1.dat 2025-06-25T14:42:41.563026Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:42:41.563112Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:42:41.564357Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:42:41.580542Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:42:41.729767Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TServer::EnableGrpc on GrpcPort 20858, node 1 2025-06-25T14:42:42.716955Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:42:42.716989Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:42:42.716995Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:42:42.717125Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:15215 TClient is connected to server localhost:15215 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:42:44.998820Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:42:45.123414Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:45.425938Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:45.571413Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:45.655414Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:45.753396Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519897438940380025:2223];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:45.772176Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:42:45.836116Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897460415217955:2371], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:45.836221Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:47.941165Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:47.964137Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:47.983219Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.003766Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.025987Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.055207Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.082645Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.165170Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897473300120514:2439], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:48.165240Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:48.165329Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897473300120519:2442], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:48.168610Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:42:48.176779Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519897473300120521:2443], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:42:48.252145Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519897473300120572:3429] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:42:49.091956Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:56.478431Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7382: Cannot get console configs 2025-06-25T14:42:56.478465Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded Trying to start YDB, gRPC: 16638, MsgBus: 13247 2025-06-25T14:43:06.428928 ... undelivered;self_id=[12:7519898238786461419:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:45:47.571017Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000c55/r3tmp/tmpVYBuHq/pdisk_1.dat 2025-06-25T14:45:47.866477Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:45:47.866609Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:45:47.867392Z node 12 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:45:47.869183Z node 12 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [12:7519898238786461400:2080] 1750862747567764 != 1750862747567767 2025-06-25T14:45:47.888851Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 62224, node 12 2025-06-25T14:45:48.015005Z node 12 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:45:48.015032Z node 12 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:45:48.015043Z node 12 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:45:48.015218Z node 12 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10041 2025-06-25T14:45:48.709419Z node 12 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:10041 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:45:49.078826Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:45:49.108800Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:45:49.253687Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:45:49.517331Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:45:49.707272Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:45:52.572576Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[12:7519898238786461419:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:45:52.572671Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:45:54.194229Z node 12 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [12:7519898268851234142:2372], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:54.194361Z node 12 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:54.347909Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:54.594393Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:54.662643Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:54.722582Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:54.784166Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:54.976767Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:55.075583Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:55.200397Z node 12 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [12:7519898273146202125:2438], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:55.200579Z node 12 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:55.201040Z node 12 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [12:7519898273146202130:2441], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:55.207250Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:45:55.228107Z node 12 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [12:7519898273146202132:2442], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:45:55.297795Z node 12 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [12:7519898273146202183:3437] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:45:57.525580Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:46:02.841327Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7382: Cannot get console configs 2025-06-25T14:46:02.841361Z node 12 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded >> KqpBatchDelete::ManyPartitions_1 [GOOD] >> KqpJoinOrder::FiveWayJoinWithConstantFold+ColumnStore >> KqpIndexLookupJoin::LeftOnly+StreamLookup >> KqpJoin::RightSemiJoin_KeyPrefix >> KqpIndexLookupJoin::SimpleInnerJoin-StreamLookup ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/batch_operations/unittest >> KqpBatchDelete::ManyPartitions_1 [GOOD] Test command err: Trying to start YDB, gRPC: 62911, MsgBus: 8422 2025-06-25T14:42:40.738759Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897435570915808:2229];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:40.739666Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000bd7/r3tmp/tmpqw0eYQ/pdisk_1.dat 2025-06-25T14:42:41.529587Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519897435570915604:2080] 1750862560723121 != 1750862560723124 2025-06-25T14:42:41.532364Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:42:41.582172Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:42:41.582321Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:42:41.676950Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 62911, node 1 2025-06-25T14:42:41.742885Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:42:42.760552Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:42:42.760583Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:42:42.760592Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:42:42.760824Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8422 TClient is connected to server localhost:8422 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:42:44.993901Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:42:45.123577Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:42:45.392888Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:45.465664Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:45.540696Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:45.738974Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519897435570915808:2229];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:45.745141Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:42:45.828301Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897457045753728:2371], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:45.828435Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:47.941063Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.007200Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.080514Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.107779Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.139428Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.175969Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.202406Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.278044Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897469930656293:2439], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:48.278102Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:48.278185Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897469930656298:2442], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:48.281477Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:42:48.290448Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519897469930656300:2443], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:42:48.364946Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519897469930656351:3436] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:42:49.213613Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... Trying to start YDB, gRPC: 18863, MsgBus: 20750 2025-06-25T14:42:51.526745Z node 2 :METADATA_PROVI ... ard__operation_create_table.cpp:664) waiting... Trying to start YDB, gRPC: 15553, MsgBus: 21636 2025-06-25T14:45:53.676923Z node 20 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[20:7519898264588209123:2133];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:45:53.677022Z node 20 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000bd7/r3tmp/tmp3aQg7f/pdisk_1.dat 2025-06-25T14:45:54.011753Z node 20 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:45:54.035094Z node 20 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [20:7519898264588209030:2080] 1750862753647326 != 1750862753647329 2025-06-25T14:45:54.039473Z node 20 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(20, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:45:54.040148Z node 20 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(20, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:45:54.045294Z node 20 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(20, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 15553, node 20 2025-06-25T14:45:54.225197Z node 20 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:45:54.225229Z node 20 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:45:54.225244Z node 20 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:45:54.225464Z node 20 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:45:54.708484Z node 20 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:21636 TClient is connected to server localhost:21636 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:45:55.990389Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:45:56.015754Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:45:56.215599Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:45:56.670978Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:45:56.826900Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:45:58.680494Z node 20 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[20:7519898264588209123:2133];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:45:58.680609Z node 20 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:46:01.867005Z node 20 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [20:7519898298947949056:2376], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:01.867175Z node 20 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:02.085724Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:02.250551Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:02.326169Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:02.392882Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:02.467891Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:02.560607Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:02.634008Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:02.779613Z node 20 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [20:7519898303242917026:2440], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:02.779853Z node 20 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:02.780639Z node 20 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [20:7519898303242917031:2443], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:02.790594Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:46:02.824811Z node 20 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [20:7519898303242917033:2444], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:46:02.916633Z node 20 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [20:7519898303242917088:3437] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:46:05.412361Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... >> KqpJoinOrder::SortingsComplexOrderBy-RemoveLimitOperator >> TxUsage::Sinks_Olap_WriteToTopicAndTable_4_Table [GOOD] >> KqpJoin::FullOuterJoin [GOOD] >> KqpJoin::FullOuterJoin2 >> KqpJoinOrder::SortingsWithLookupJoin1+RemoveLimitOperator [GOOD] >> KqpJoinOrder::TPCDS96+ColumnStore >> KqpJoin::JoinLeftPureExclusion [GOOD] >> KqpJoin::JoinLeftPureCross >> TxUsage::Sinks_Olap_WriteToTopicAndTable_4_Query >> TxUsage::WriteToTopic_Demo_45_Query [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::SortingsWithLookupJoin1+RemoveLimitOperator [GOOD] Test command err: Trying to start YDB, gRPC: 26314, MsgBus: 14399 2025-06-25T14:45:33.111203Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898178140778260:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:45:33.118848Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000e8b/r3tmp/tmpZxLeAV/pdisk_1.dat 2025-06-25T14:45:33.526403Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:45:33.526549Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:45:33.534579Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:45:33.585237Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 26314, node 1 2025-06-25T14:45:33.662065Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:45:33.662087Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:45:33.662094Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:45:33.662194Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14399 2025-06-25T14:45:34.146034Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:14399 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:45:34.339220Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:45:34.361509Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:45:36.179720Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898191025680772:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:36.179722Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898191025680761:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:36.179808Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:36.183605Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:45:36.195442Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898191025680775:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:45:36.280008Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898191025680826:2336] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:45:36.581324Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:36.688613Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:36.719708Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:36.785546Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:36.826833Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:36.950411Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:36.982464Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:37.013377Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:37.080180Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:37.131663Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:37.192856Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:37.229339Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:37.260572Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:37.897187Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:37.933250Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/sc ... .cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:07.122882Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038471;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:07.123483Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038648;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:07.124544Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038553;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:07.124994Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038640;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:07.128799Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038648;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:07.129388Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038652;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:07.129514Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038640;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:07.130116Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038626;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:07.134421Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038652;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:07.134846Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038626;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:07.135054Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038644;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:07.135493Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038643;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:07.148215Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038643;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:07.148729Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038644;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:07.148773Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038636;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:07.149236Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038635;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:07.154193Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038636;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:07.154568Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038635;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:07.154785Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038634;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:07.155170Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038659;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:07.160152Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038659;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:07.160918Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038660;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:07.166582Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038634;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:07.167244Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038639;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:07.169804Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038660;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:07.170446Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038637;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:07.175175Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038639;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:07.175862Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038656;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:07.180187Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038637;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:07.184861Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:07.189357Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038656;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:07.189892Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038625;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:07.193513Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:07.194210Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038658;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:07.199826Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038625;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:07.203183Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038658;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:07.203750Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038628;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:07.213753Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038628;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:07.291596Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038531;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:07.324704Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038531;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:07.360567Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jykrtecjc7gfz434379rsk2f", SessionId: ydb://session/3?node_id=1&id=MWQzZmExOTYtNDQzYWQ0Yy00ZmU0YWU3LTYwNDU0ODgw, Slow query, duration: 27.501708s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:46:07.580044Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:46:07.580551Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:46:07.581164Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;self_id=[1:7519898212500523278:2776];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038170;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038331;receive=72075186224038629; 2025-06-25T14:46:07.581562Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716;
:7:3: Warning: ORDER BY without LIMIT in subquery will be ignored, code: 4504
:7:3: Warning: ORDER BY without LIMIT in subquery will be ignored, code: 4504 >> TxUsage::WriteToTopic_Demo_46_Table >> KqpFlipJoin::LeftSemi_2 [GOOD] >> KqpFlipJoin::LeftSemi_3 >> KqpIndexLookupJoin::LeftJoinCustomColumnOrder-StreamLookup [GOOD] >> KqpJoinOrder::TPCDS95-ColumnStore ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpIndexLookupJoin::LeftJoinCustomColumnOrder-StreamLookup [GOOD] Test command err: Trying to start YDB, gRPC: 61078, MsgBus: 4552 2025-06-25T14:46:00.213949Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898296603530429:2202];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:46:00.214003Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000e6b/r3tmp/tmpl81ufp/pdisk_1.dat 2025-06-25T14:46:00.728882Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519898296603530264:2080] 1750862760172261 != 1750862760172264 2025-06-25T14:46:00.788719Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:46:00.794407Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:46:00.794495Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:46:00.798728Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 61078, node 1 2025-06-25T14:46:01.090298Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:46:01.090320Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:46:01.090341Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:46:01.090447Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:46:01.212841Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:4552 TClient is connected to server localhost:4552 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:46:01.903091Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:46:01.915593Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:46:01.931561Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:46:02.077684Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:46:02.331379Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:46:02.404184Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:46:03.873037Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898309488433785:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:03.873127Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:04.320630Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:04.416945Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:04.472628Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:04.556954Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:04.596587Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:04.643326Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:04.706564Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:04.825974Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898313783401750:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:04.826042Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:04.826231Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898313783401755:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:04.829841Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:46:04.883885Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898313783401757:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:46:04.985629Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898313783401808:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:46:05.220434Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519898296603530429:2202];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:46:05.220783Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:46:06.393552Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_wo ... lf { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:46:09.637557Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:46:09.647199Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:46:09.670984Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:46:09.672404Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... 2025-06-25T14:46:09.744453Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:09.952950Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:46:10.057978Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:46:12.716423Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898348052678242:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:12.716510Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:12.758973Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:12.799971Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:12.839252Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:12.875674Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:12.950508Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:13.013689Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:13.082552Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:13.216590Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898352347646199:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:13.216670Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:13.216696Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898352347646204:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:13.220925Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:46:13.245723Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519898352347646206:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:46:13.323288Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519898352347646257:3410] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:46:13.616486Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519898330872807613:2182];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:46:13.683292Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:46:15.173861Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:15.247055Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:15.280498Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:15.314629Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:15.395485Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:15.470031Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715677:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) >> TxUsage::WriteToTopic_Demo_20_RestartAfterCommit_Query [GOOD] >> KqpJoin::RightSemiJoin_KeyPrefix [GOOD] >> KqpJoin::RightSemiJoin_SecondaryIndex >> KqpJoin::FullOuterJoin2 [GOOD] >> KqpIndexLookupJoin::SimpleInnerJoin-StreamLookup [GOOD] >> KqpIndexLookupJoin::SimpleLeftJoin+StreamLookup >> KqpIndexLookupJoin::LeftOnly+StreamLookup [GOOD] >> KqpIndexLookupJoin::LeftOnly-StreamLookup >> KqpJoinOrder::TestJoinHint1-ColumnStore >> KqpJoin::JoinLeftPureCross [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoin::FullOuterJoin2 [GOOD] Test command err: Trying to start YDB, gRPC: 20025, MsgBus: 18871 2025-06-25T14:46:03.485556Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898307910992615:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:46:03.485597Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000e65/r3tmp/tmpBnWwuw/pdisk_1.dat 2025-06-25T14:46:04.092898Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:46:04.099859Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519898307910992589:2080] 1750862763478170 != 1750862763478173 2025-06-25T14:46:04.132117Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:46:04.132216Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:46:04.143549Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 20025, node 1 2025-06-25T14:46:04.286740Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:46:04.286757Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:46:04.286763Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:46:04.286855Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:46:04.507171Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:18871 TClient is connected to server localhost:18871 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:46:05.207304Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:46:05.226526Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:46:05.245666Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:46:05.390602Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:46:05.567168Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:46:05.667433Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:46:07.450580Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898325090863398:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:07.450706Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:07.840158Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:07.934553Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:08.021188Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:08.066624Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:08.127607Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:08.173897Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:08.249813Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:08.360459Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898329385831360:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:08.360552Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:08.360749Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898329385831365:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:08.364564Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:46:08.411300Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898329385831367:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:46:08.471531Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898329385831420:3417] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:46:08.494204Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519898307910992615:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:46:08.494302Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:46:09.928176Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/ ... -06-25T14:46:12.262797Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:46:12.262884Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:46:12.269916Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 30009, node 2 2025-06-25T14:46:12.412766Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:46:12.412788Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:46:12.412795Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:46:12.412893Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19888 TClient is connected to server localhost:19888 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:46:12.934580Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:46:12.945697Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:46:12.957620Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:46:13.109168Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:46:13.137044Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:46:13.290511Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:46:13.384388Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:46:15.580441Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898361981880415:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:15.580540Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:15.682932Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:15.752071Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:15.791428Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:15.879920Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:15.925348Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:16.018465Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:16.086920Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:16.174022Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898366276848373:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:16.174122Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:16.174305Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898366276848378:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:16.178564Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:46:16.200951Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519898366276848380:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:46:16.284408Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519898366276848431:3412] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:46:17.044499Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519898349096977037:2155];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:46:17.044562Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:46:17.677108Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:17.748866Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:17.816521Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoin::JoinLeftPureCross [GOOD] Test command err: Trying to start YDB, gRPC: 21887, MsgBus: 15252 2025-06-25T14:46:06.235784Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898322886300108:2186];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:46:06.236080Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000e63/r3tmp/tmp5NXsS4/pdisk_1.dat 2025-06-25T14:46:06.909663Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:46:06.909747Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:46:06.913386Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:46:06.946276Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:46:06.952519Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519898322886299960:2080] 1750862766201236 != 1750862766201239 TServer::EnableGrpc on GrpcPort 21887, node 1 2025-06-25T14:46:07.172969Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:46:07.172997Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:46:07.173005Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:46:07.173113Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:46:07.231313Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:15252 TClient is connected to server localhost:15252 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:46:08.205665Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:46:08.280823Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:46:08.299492Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:46:08.477441Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:46:08.679373Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:46:08.772953Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:46:10.755991Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898340066170768:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:10.756079Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:11.043935Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:11.080138Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:11.126624Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:11.197223Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:11.228899Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519898322886300108:2186];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:46:11.229035Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:46:11.231889Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:11.305987Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:11.359578Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:11.470396Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898344361138725:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:11.470492Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:11.470823Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898344361138730:2436], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:11.474587Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:46:11.490566Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898344361138732:2437], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:46:11.553064Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898344361138783:3414] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 14264, MsgBus: 27432 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000e63/r3tmp/tmp2fjCwz/pdisk_1.dat 2025-06-25T14:46:14.459799Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:46:14.535655Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:46:14.535732Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:46:14.539047Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:46:14.540937Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519898355714477591:2080] 1750862774237312 != 1750862774237315 2025-06-25T14:46:14.591015Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 14264, node 2 2025-06-25T14:46:14.816631Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:46:14.816652Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:46:14.816663Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:46:14.816779Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27432 2025-06-25T14:46:15.252647Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:27432 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-25T14:46:15.701857Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:46:15.717186Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:46:15.730057Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:46:15.831645Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:46:16.065229Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:46:16.160172Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:46:18.268512Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898372894348413:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:18.268648Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:18.341304Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:18.385704Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:18.448130Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:18.522713Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:18.567171Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:18.620263Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:18.699083Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:18.809756Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898372894349075:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:18.809843Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:18.810065Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898372894349080:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:18.814271Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:46:18.838679Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519898372894349082:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:46:18.930487Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519898372894349133:3417] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } >> KqpBatchDelete::SimplePartitions [GOOD] >> TxUsage::Sinks_Oltp_WriteToTopics_4_Query [GOOD] >> KqpIndexLookupJoin::RightSemi >> YdbOlapStore::LogPagingBefore [GOOD] >> YdbOlapStore::LogPagingAfter ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/unittest >> TxUsage::WriteToTopic_Demo_20_RestartAfterCommit_Query [GOOD] Test command err: 2025-06-25T14:41:10.166273Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897049855984302:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:41:10.166480Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:41:10.356661Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00196a/r3tmp/tmpLo6IjB/pdisk_1.dat 2025-06-25T14:41:10.546215Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519897049855984284:2080] 1750862470165270 != 1750862470165273 2025-06-25T14:41:10.555694Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 28479, node 1 2025-06-25T14:41:10.599608Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:41:10.599693Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:41:10.606915Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:41:10.636616Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/yft8/00196a/r3tmp/yandexwa803s.tmp 2025-06-25T14:41:10.636637Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/yft8/00196a/r3tmp/yandexwa803s.tmp 2025-06-25T14:41:10.636799Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/yft8/00196a/r3tmp/yandexwa803s.tmp 2025-06-25T14:41:10.636979Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:41:10.685988Z INFO: TTestServer started on Port 12216 GrpcPort 28479 TClient is connected to server localhost:12216 PQClient connected to localhost:28479 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:41:10.980929Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:41:11.009408Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:41:11.023937Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-25T14:41:11.030967Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:41:11.185135Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... 2025-06-25T14:41:12.860567Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897058445919666:2298], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:12.860678Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:12.860949Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897058445919678:2301], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:12.864500Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:41:12.876763Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519897058445919681:2303], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710662 completed, doublechecking } 2025-06-25T14:41:13.089743Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519897058445919745:2441] txid# 281474976710663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:41:13.124903Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:13.155707Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:13.220201Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519897062740887049:2309], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:5:17: Error: At function: KiReadTable!
:5:17: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Versions]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:41:13.220466Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=YzcwODMzMTktOGVhY2E1NzItNmRiODUzMjktMjVmMzY5OTQ=, ActorId: [1:7519897058445919663:2297], ActorState: ExecuteState, TraceId: 01jykrj9mtb3rvqf5rjhzph1f7, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:41:13.221420Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:13.222655Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 5 column: 17 } message: "At function: KiReadTable!" end_position { row: 5 column: 17 } severity: 1 issues { position { row: 5 column: 17 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Versions]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 5 column: 17 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); === CheckClustersList. Subcribe to ClusterTracker from [1:7519897062740887325:2618] 2025-06-25T14:41:15.166402Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519897049855984302:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:41:15.166462Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; === CheckClustersList. Ok 2025-06-25T14:41:19.353628Z :WriteToTopic_Demo_1_Table INFO: TTopicSdkTestSetup started 2025-06-25T14:41:19.365679Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:132: new create topic request 2025-06-25T14:41:19.386587Z node 1 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72075186224037893][] pipe [1:7519897088510691315:2710] connected; active server actors: 1 2025-06-25T14:41:19.386839Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1516: [72075186224037893][test-topic] updating configuration. Deleted partitions []. Added partitions [0] 2025-06-25T14:41:19.387470Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:1040: [72075186224037893][test-topic] Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at RB 72075186224037893 2025-06-25T14:41:19.387615Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:138: [72075186224037893][test-topic] BALANCER INIT DONE for test-topic: (0, 72075186224037892) 2025-06-25T14:41:19.388344Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:493: [72075186224037893][test-topic] TEvClientConnected TabletId 72057594046644480, NodeId 1, Generation 2 2025-06-25T14:41:19.390372Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3114: [PQ: 72075186224037892] Handle TEvInterconnect::TEvNodeInfo 2025-06-25T14:41:19.391328Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3107: [PQ: 72075186224037892] Registered with mediator time cast 2025-06-25T14:41:19.391593Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3146: [PQ: 72075186224037892] Transacti ... n_actor.cpp:192: session cookie 1 consumer test-consumer session test-consumer_19_1_6679484965136411759_v1 TopicId: Topic /Root/topic_A in database: Root, partition 0(assignId:1) committing to position 12 prev 0 end 12 by cookie 2 2025-06-25T14:46:17.851869Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:348: Handle TEvRequest topic: 'topic_A' requestId: 2025-06-25T14:46:17.851908Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:2812: [PQ: 72075186224037894] got client message batch for topic 'topic_A' partition 0 2025-06-25T14:46:17.852036Z node 19 :PERSQUEUE DEBUG: partition.cpp:3346: [PQ: 72075186224037894, Partition: 0, State: StateIdle] Topic 'topic_A' partition 0 user test-consumer offset is set to 12 (startOffset 12) session test-consumer_19_1_6679484965136411759_v1 2025-06-25T14:46:17.852220Z node 19 :PERSQUEUE DEBUG: read.h:272: CacheProxy. Passthrough write request to KV 2025-06-25T14:46:17.853409Z node 19 :PERSQUEUE DEBUG: partition_read.cpp:882: [PQ: 72075186224037894, Partition: 0, State: StateIdle] Topic 'topic_A' partition 0 user test-consumer readTimeStamp for offset 12 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 0 2025-06-25T14:46:17.853483Z node 19 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72075186224037894, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-06-25T14:46:17.853527Z node 19 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037894, Partition: 0, State: StateIdle] no data for compaction 2025-06-25T14:46:17.853559Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'topic_A' partition: 0 messageNo: 0 requestId: cookie: 2 2025-06-25T14:46:17.853665Z node 19 :PQ_READ_PROXY DEBUG: partition_actor.cpp:652: session cookie 1 consumer test-consumer session test-consumer_19_1_6679484965136411759_v1 TopicId: Topic /Root/topic_A in database: Root, partition 0(assignId:1) initDone 1 event { Cookie: 2 } 2025-06-25T14:46:17.853704Z node 19 :PQ_READ_PROXY DEBUG: partition_actor.cpp:950: session cookie 1 consumer test-consumer session test-consumer_19_1_6679484965136411759_v1 TopicId: Topic /Root/topic_A in database: Root, partition 0(assignId:1) commit done to position 12 endOffset 12 with cookie 2 2025-06-25T14:46:17.853748Z node 19 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:696: session cookie 1 consumer test-consumer session test-consumer_19_1_6679484965136411759_v1 replying for commits: assignId# 1, from# 2, to# 2, offset# 12 2025-06-25T14:46:17.860836Z :DEBUG: [/Root] [/Root] [59d01cb7-bcf3a8e5-9a49252a-a913b491] [] Committed response: { partitions_committed_offsets { partition_session_id: 1 committed_offset: 12 } } 2025-06-25T14:46:18.381518Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): -:topic_A:0:1:11:12 2025-06-25T14:46:18.381592Z :INFO: [/Root] [/Root] [59d01cb7-bcf3a8e5-9a49252a-a913b491] Counters: { Errors: 0 CurrentSessionLifetimeMs: 1000 BytesRead: 15000000 MessagesRead: 12 BytesReadCompressed: 15000000 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-25T14:46:18.411953Z node 19 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2378: session cookie 1 consumer test-consumer session test-consumer_19_1_6679484965136411759_v1 checking auth because of timeout 2025-06-25T14:46:18.412416Z node 19 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:41: session cookie 1 consumer test-consumer session test-consumer_19_1_6679484965136411759_v1 auth for : test-consumer 2025-06-25T14:46:18.413094Z node 19 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:131: session cookie 1 consumer test-consumer session test-consumer_19_1_6679484965136411759_v1 Handle describe topics response 2025-06-25T14:46:18.413213Z node 19 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:68: session cookie 1 consumer test-consumer session test-consumer_19_1_6679484965136411759_v1 auth is DEAD 2025-06-25T14:46:18.413304Z node 19 :PQ_READ_PROXY INFO: read_session_actor.cpp:1033: session cookie 1 consumer test-consumer session test-consumer_19_1_6679484965136411759_v1 auth ok: topics# 1, initDone# 1 2025-06-25T14:46:18.434250Z node 19 :PQ_READ_PROXY DEBUG: direct_read_actor.cpp:459: session cookie 2 consumer test-consumer session test-consumer_19_1_6679484965136411759_v1 checking auth because of timeout 2025-06-25T14:46:18.434334Z node 19 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:41: session cookie 2 consumer test-consumer session test-consumer_19_1_6679484965136411759_v1 auth for : test-consumer 2025-06-25T14:46:18.435924Z node 19 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:131: session cookie 2 consumer test-consumer session test-consumer_19_1_6679484965136411759_v1 Handle describe topics response 2025-06-25T14:46:18.436055Z node 19 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:68: session cookie 2 consumer test-consumer session test-consumer_19_1_6679484965136411759_v1 auth is DEAD 2025-06-25T14:46:18.436139Z node 19 :PQ_READ_PROXY INFO: direct_read_actor.cpp:299: session cookie 2 consumer test-consumer session test-consumer_19_1_6679484965136411759_v1 auth ok: topics# 1, initDone# 1 2025-06-25T14:46:18.877178Z node 19 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037892, Partition: 0, State: StateIdle] no data for compaction 2025-06-25T14:46:19.384985Z :INFO: [/Root] [/Root] [59d01cb7-bcf3a8e5-9a49252a-a913b491] Closing read session. Close timeout: 0.000000s 2025-06-25T14:46:19.385056Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): -:topic_A:0:1:11:12 2025-06-25T14:46:19.385173Z :INFO: [/Root] [/Root] [59d01cb7-bcf3a8e5-9a49252a-a913b491] Counters: { Errors: 0 CurrentSessionLifetimeMs: 2004 BytesRead: 15000000 MessagesRead: 12 BytesReadCompressed: 15000000 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-25T14:46:19.385341Z :NOTICE: [/Root] [/Root] [59d01cb7-bcf3a8e5-9a49252a-a913b491] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Close with zero timeout " } 2025-06-25T14:46:19.385398Z :DEBUG: [/Root] [/Root] [59d01cb7-bcf3a8e5-9a49252a-a913b491] [] Abort session to cluster 2025-06-25T14:46:19.386148Z :DEBUG: [/Root] 0x000051E000DEDD90 TDirectReadSessionManager ServerSessionId=test-consumer_19_1_6679484965136411759_v1 Close 2025-06-25T14:46:19.386671Z :DEBUG: [/Root] 0x000051E000DEDD90 TDirectReadSessionManager ServerSessionId=test-consumer_19_1_6679484965136411759_v1 Close 2025-06-25T14:46:19.386814Z :NOTICE: [/Root] [/Root] [59d01cb7-bcf3a8e5-9a49252a-a913b491] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-06-25T14:46:19.388402Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|287501a3-35f5c029-dfb3ddd7-26ac9c6b_0] PartitionId [0] Generation [2] Write session: close. Timeout 0.000000s 2025-06-25T14:46:19.388464Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|287501a3-35f5c029-dfb3ddd7-26ac9c6b_0] PartitionId [0] Generation [2] Write session will now close 2025-06-25T14:46:19.388509Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|287501a3-35f5c029-dfb3ddd7-26ac9c6b_0] PartitionId [0] Generation [2] Write session: aborting 2025-06-25T14:46:19.388931Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|287501a3-35f5c029-dfb3ddd7-26ac9c6b_0] PartitionId [0] Generation [2] Write session: gracefully shut down, all writes complete 2025-06-25T14:46:19.388978Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|287501a3-35f5c029-dfb3ddd7-26ac9c6b_0] PartitionId [0] Generation [2] Write session: destroy 2025-06-25T14:46:19.394476Z node 19 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer test-consumer session test-consumer_19_1_6679484965136411759_v1 grpc read done: success# 0, data# { } 2025-06-25T14:46:19.394514Z node 19 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 1 consumer test-consumer session test-consumer_19_1_6679484965136411759_v1 grpc read failed 2025-06-25T14:46:19.400379Z node 19 :PQ_READ_PROXY INFO: read_session_actor.cpp:1645: session cookie 1 consumer test-consumer session test-consumer_19_1_6679484965136411759_v1 closed 2025-06-25T14:46:19.400484Z node 19 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 1 consumer test-consumer session test-consumer_19_1_6679484965136411759_v1 is DEAD 2025-06-25T14:46:19.400618Z node 19 :PQ_READ_PROXY DEBUG: direct_read_actor.cpp:83: Direct read proxy [19:7519898369799450850:2524]: session cookie 2 consumer test-consumer session test-consumer_19_1_6679484965136411759_v1 grpc read done: success# 0, data# { } 2025-06-25T14:46:19.400649Z node 19 :PQ_READ_PROXY INFO: direct_read_actor.cpp:86: Direct read proxy [19:7519898369799450850:2524]: session cookie 2 consumer test-consumer session test-consumer_19_1_6679484965136411759_v1grpc read failed 2025-06-25T14:46:19.400691Z node 19 :PQ_READ_PROXY DEBUG: direct_read_actor.cpp:349: Direct read proxy [19:7519898369799450850:2524]: session cookie 2 consumer test-consumer session test-consumer_19_1_6679484965136411759_v1 Close session with reason: reads done signal, closing everything 2025-06-25T14:46:19.400707Z node 19 :PQ_READ_PROXY INFO: direct_read_actor.cpp:367: session cookie 2 consumer test-consumer session test-consumer_19_1_6679484965136411759_v1 closed 2025-06-25T14:46:19.400744Z node 19 :PQ_READ_PROXY INFO: direct_read_actor.cpp:153: Direct read proxy [19:7519898369799450850:2524]: session cookie 2 consumer test-consumer session test-consumer_19_1_6679484965136411759_v1 proxy is DEAD 2025-06-25T14:46:19.402423Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:2452: [PQ: 72075186224037894] Destroy direct read session test-consumer_19_1_6679484965136411759_v1 2025-06-25T14:46:19.402467Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037894] server disconnected, pipe [19:7519898369799450844:2521] destroyed 2025-06-25T14:46:19.402518Z node 19 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037895][topic_A] pipe [19:7519898369799450841:2518] disconnected; active server actors: 1 2025-06-25T14:46:19.402523Z node 19 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: test-consumer_19_1_6679484965136411759_v1 2025-06-25T14:46:19.402543Z node 19 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037895][topic_A] pipe [19:7519898369799450841:2518] client test-consumer disconnected session test-consumer_19_1_6679484965136411759_v1 2025-06-25T14:46:19.420483Z node 19 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 5 sessionId: test-message_group_id|287501a3-35f5c029-dfb3ddd7-26ac9c6b_0 grpc read done: success: 0 data: 2025-06-25T14:46:19.420515Z node 19 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 5 sessionId: test-message_group_id|287501a3-35f5c029-dfb3ddd7-26ac9c6b_0 grpc read failed 2025-06-25T14:46:19.420547Z node 19 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 5 sessionId: test-message_group_id|287501a3-35f5c029-dfb3ddd7-26ac9c6b_0 grpc closed 2025-06-25T14:46:19.420564Z node 19 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 5 sessionId: test-message_group_id|287501a3-35f5c029-dfb3ddd7-26ac9c6b_0 is DEAD 2025-06-25T14:46:19.421143Z node 19 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037894 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-25T14:46:19.425521Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037894] server disconnected, pipe [19:7519898361209516194:2499] destroyed 2025-06-25T14:46:19.425570Z node 19 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72075186224037894, Partition: 0, State: StateIdle] TPartition::DropOwner. >> LabeledDbCounters::OneTabletRestart [GOOD] >> LabeledDbCounters::TwoTablets >> TxUsage::Transactions_Conflict_On_SeqNo_Query >> KqpIndexLookupJoin::Inner+StreamLookup ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/batch_operations/unittest >> KqpBatchDelete::SimplePartitions [GOOD] Test command err: Trying to start YDB, gRPC: 7507, MsgBus: 64339 2025-06-25T14:42:40.733314Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897438239088134:2223];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:40.733370Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000bd8/r3tmp/tmpho8hH1/pdisk_1.dat 2025-06-25T14:42:41.596286Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:42:41.651788Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:42:41.651892Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:42:41.678261Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:42:41.729546Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TServer::EnableGrpc on GrpcPort 7507, node 1 2025-06-25T14:42:42.724912Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:42:42.724933Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:42:42.724938Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:42:42.725057Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:64339 TClient is connected to server localhost:64339 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:42:44.995082Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:42:45.123496Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:45.414667Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:45.517027Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:45.592434Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:45.733395Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519897438239088134:2223];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:45.733469Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:42:45.828472Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897459713926088:2370], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:45.828588Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:47.941092Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:47.964883Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:47.986661Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.011831Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.045756Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.110762Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.148109Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.197323Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897472598828652:2440], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:48.197407Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:48.197479Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897472598828657:2443], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:48.201118Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:42:48.213634Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519897472598828659:2444], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:42:48.287296Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519897472598828710:3442] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 20690, MsgBus: 63436 2025-06-25T14:42:54.397959Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519897495779254246:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:54.398038Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000bd8/r3tmp/tmp0Sk0sJ/pdisk_1.dat 2025-06-25T14:42:54.500237Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:42:5 ... k failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:46:01.184517Z node 15 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7382: Cannot get console configs 2025-06-25T14:46:01.184546Z node 15 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded Trying to start YDB, gRPC: 11554, MsgBus: 15198 2025-06-25T14:46:04.180627Z node 16 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[16:7519898312384043088:2236];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000bd8/r3tmp/tmpUj9ZNh/pdisk_1.dat 2025-06-25T14:46:04.286752Z node 16 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:46:04.469589Z node 16 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(16, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:46:04.469695Z node 16 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(16, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:46:04.475107Z node 16 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(16, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:46:04.477156Z node 16 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 11554, node 16 2025-06-25T14:46:04.581149Z node 16 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:46:04.581172Z node 16 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:46:04.581181Z node 16 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:46:04.581348Z node 16 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:15198 2025-06-25T14:46:05.147572Z node 16 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:15198 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-25T14:46:05.307757Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:46:05.340335Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:46:05.439116Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:46:05.660793Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:46:05.774878Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:46:09.143867Z node 16 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[16:7519898312384043088:2236];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:46:09.143957Z node 16 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:46:10.461680Z node 16 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [16:7519898338153848309:2371], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:10.461780Z node 16 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:10.671516Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:10.722487Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:10.784822Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:10.865360Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:10.925414Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:11.002084Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:11.063613Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:11.192496Z node 16 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [16:7519898342448816286:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:11.192641Z node 16 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:11.196725Z node 16 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [16:7519898342448816291:2438], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:11.205704Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:46:11.225012Z node 16 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [16:7519898342448816293:2439], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:46:11.279982Z node 16 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [16:7519898342448816344:3423] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:46:19.464695Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7382: Cannot get console configs 2025-06-25T14:46:19.464720Z node 16 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded >> KqpFlipJoin::LeftSemi_3 [GOOD] >> KqpJoinOrder::FiveWayJoinWithConstantFold-ColumnStore [GOOD] >> KqpIndexLookupJoin::LeftOnlyJoinValueColumn+StreamLookup >> TxUsage::Sinks_Olap_WriteToTopicAndTable_1_Query [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpFlipJoin::LeftSemi_3 [GOOD] Test command err: Trying to start YDB, gRPC: 4356, MsgBus: 2902 2025-06-25T14:46:09.601716Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898334079452094:2245];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:46:09.602354Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000e5d/r3tmp/tmpvIfabA/pdisk_1.dat 2025-06-25T14:46:10.071471Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:46:10.071571Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:46:10.077533Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:46:10.094198Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519898334079451848:2080] 1750862769498643 != 1750862769498646 2025-06-25T14:46:10.101676Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 4356, node 1 2025-06-25T14:46:10.368907Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:46:10.368942Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:46:10.368956Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:46:10.369081Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:46:10.586548Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:2902 TClient is connected to server localhost:2902 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:46:11.227901Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:46:11.252787Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:46:11.282525Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:46:11.434430Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:11.652088Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:46:11.778540Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:13.469258Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898351259322674:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:13.469377Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:13.727224Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:13.751007Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:13.786860Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:13.857993Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:13.886125Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:13.930218Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:13.982356Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:14.048688Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898355554290632:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:14.048752Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:14.049048Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898355554290637:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:14.052516Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:46:14.066870Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898355554290639:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:46:14.161090Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898355554290690:3421] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:46:14.584405Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519898334079452094:2245];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:46:14.584462Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:46:15.101223Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work ... ecting -> Connected TServer::EnableGrpc on GrpcPort 11002, node 2 2025-06-25T14:46:17.700825Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:46:17.700844Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:46:17.700854Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:46:17.700952Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23921 TClient is connected to server localhost:23921 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-25T14:46:18.312264Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:46:18.407059Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:46:18.413726Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:46:18.421226Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:46:18.485271Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:46:18.646932Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:46:18.752159Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:46:21.185534Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898384837535365:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:21.185604Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:21.228012Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:21.266642Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:21.345755Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:21.427348Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:21.488837Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:21.551053Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:21.617804Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:21.741024Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898384837536032:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:21.741134Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:21.748496Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898384837536037:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:21.756945Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:46:21.773536Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519898384837536039:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:46:21.865943Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519898384837536090:3414] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:46:22.308412Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519898367657664758:2214];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:46:22.308489Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:46:23.317563Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:23.362841Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:23.443517Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:23.494482Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) >> KqpIndexLookupJoin::MultiJoins >> KqpIndexLookupJoin::SimpleLeftJoin+StreamLookup [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::FiveWayJoinWithConstantFold-ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 9649, MsgBus: 19434 2025-06-25T14:45:42.660773Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898216700515587:2127];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:45:42.662027Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000e77/r3tmp/tmpCu4Suh/pdisk_1.dat 2025-06-25T14:45:43.102440Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:45:43.102528Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:45:43.104537Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:45:43.140440Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519898216700515499:2080] 1750862742627625 != 1750862742627628 2025-06-25T14:45:43.154869Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 9649, node 1 2025-06-25T14:45:43.199852Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:45:43.199865Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:45:43.200447Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:45:43.200586Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19434 TClient is connected to server localhost:19434 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-25T14:45:43.665011Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:45:43.767205Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:45:45.598077Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898229585418030:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:45.598196Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:45.598596Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898229585418042:2295], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:45.602391Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:45:45.628720Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898229585418044:2296], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:45:45.700021Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898229585418097:2337] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:45:46.028854Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:46.172779Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:46.215531Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:46.268947Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:46.306439Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:46.454869Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:46.506096Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:46.588141Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:46.632889Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:46.710263Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:46.748037Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:46.786348Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:46.827810Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:47.518003Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:47.555757Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/sc ... 09029Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038497;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:17.712967Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038561;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:17.713789Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038551;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:17.714657Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038497;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:17.715149Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038563;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:17.719062Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038551;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:17.719567Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038545;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:17.720879Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038563;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:17.721427Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038489;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:17.724881Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038545;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:17.725455Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038553;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:17.728091Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038489;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:17.728902Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038543;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:17.730333Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038553;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:17.730834Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038501;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:17.733809Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038543;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:17.734336Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038605;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:17.735598Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038501;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:17.739104Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038533;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:17.739182Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038605;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:17.739745Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038511;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:17.743309Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038533;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:17.744958Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038531;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:17.747261Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038511;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:17.747794Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038523;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:17.749932Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038531;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:17.750500Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038525;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:17.753017Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038523;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:17.753572Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038491;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:17.755939Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038525;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:17.756745Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038589;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:17.759644Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038491;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:17.760372Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038611;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:17.762326Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038589;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:17.763796Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038565;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:17.765045Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038611;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:17.765709Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038603;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:17.769040Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038565;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:17.769704Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038567;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:17.770598Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038603;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:17.777011Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038567;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:17.868624Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jykrtqq8735bf1931m8h9wfe", SessionId: ydb://session/3?node_id=1&id=M2JmNDg5NDMtYWI0ZTA5MGYtZDg5ZjRhZmUtYzFmZDNiZmE=, Slow query, duration: 28.451760s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:46:18.128204Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:46:18.128954Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;self_id=[1:7519898298304911758:4310];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038331;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038629;receive=72075186224038170; 2025-06-25T14:46:18.131034Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:46:18.187229Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; >> KqpIndexLookupJoin::LeftOnly-StreamLookup [GOOD] >> KqpIndexLookupJoin::CheckCastUtf8ToString+StreamLookupJoin-NotNull ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpIndexLookupJoin::SimpleLeftJoin+StreamLookup [GOOD] Test command err: Trying to start YDB, gRPC: 24270, MsgBus: 63948 2025-06-25T14:46:11.735093Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898343646324120:2216];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:46:11.735294Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000e54/r3tmp/tmpQLojnh/pdisk_1.dat 2025-06-25T14:46:12.327119Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:46:12.327212Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:46:12.342573Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:46:12.348195Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519898343646323940:2080] 1750862771708091 != 1750862771708094 2025-06-25T14:46:12.437285Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 24270, node 1 2025-06-25T14:46:12.628796Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:46:12.628819Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:46:12.628826Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:46:12.628940Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:46:12.764662Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:63948 TClient is connected to server localhost:63948 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:46:13.391466Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:46:13.413483Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:46:13.432627Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:46:13.633996Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:46:13.794746Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:46:13.858882Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:46:15.446637Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898360826194749:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:15.446748Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:15.778629Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:15.825830Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:15.907702Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:15.966413Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:16.042688Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:16.121823Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:16.174151Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:16.276681Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898365121162707:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:16.276774Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:16.276997Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898365121162712:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:16.281216Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:46:16.296117Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898365121162714:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:46:16.366032Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898365121162767:3423] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:46:16.729428Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519898343646324120:2216];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:46:16.729511Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:46:17.767396Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/ ... RN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:46:20.659412Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19378 2025-06-25T14:46:21.139353Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:19378 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:46:21.489472Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:46:21.501164Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:46:21.522561Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:21.610857Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:46:21.886331Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:21.998435Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:46:24.080469Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898397185079085:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:24.080569Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:24.183983Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:24.245419Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:24.281447Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:24.361807Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:24.440395Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:24.523465Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:24.595306Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:24.709423Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898397185079750:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:24.709486Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:24.709623Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898397185079755:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:24.713805Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:46:24.730346Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519898397185079757:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:46:24.824719Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519898397185079808:3424] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:46:26.314747Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:26.357677Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:26.391854Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:26.491777Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:26.552268Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:26.614237Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710677:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) >> YdbOlapStore::LogWithUnionAllAscending [GOOD] >> YdbOlapStore::LogWithUnionAllDescending ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpIndexLookupJoin::LeftOnly-StreamLookup [GOOD] Test command err: Trying to start YDB, gRPC: 7431, MsgBus: 65154 2025-06-25T14:46:10.888521Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898339155126664:2179];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:46:10.888760Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000e59/r3tmp/tmpKaC9hq/pdisk_1.dat 2025-06-25T14:46:11.550919Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:46:11.551016Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:46:11.562904Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:46:11.619520Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:46:11.628453Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519898339155126518:2080] 1750862770832445 != 1750862770832448 TServer::EnableGrpc on GrpcPort 7431, node 1 2025-06-25T14:46:11.880823Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:46:11.880845Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:46:11.880852Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:46:11.880978Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:46:11.897100Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:65154 TClient is connected to server localhost:65154 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:46:13.081761Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:46:13.114454Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:46:13.137457Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:46:13.334676Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:46:13.571317Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:46:13.704556Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:15.866114Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898360629964627:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:15.866204Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:15.880660Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519898339155126664:2179];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:46:15.922380Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:46:16.214820Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:16.247834Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:16.304054Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:16.369408Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:16.408987Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:16.462092Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:16.512003Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:16.628544Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898364924932588:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:16.628664Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:16.629093Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898364924932593:2436], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:16.633345Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:46:16.662524Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898364924932595:2437], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:46:16.730968Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898364924932646:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:46:17.900015Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_w ... 31 2025-06-25T14:46:21.448768Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:16731 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:46:22.090144Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:46:22.122856Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:46:22.254884Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:46:22.548588Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:46:22.672827Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:46:25.301452Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898404243767800:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:25.301549Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:25.373063Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:25.431032Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519898382768929842:2155];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:46:25.431344Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:46:25.435680Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:25.491532Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:25.590199Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:25.652232Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:25.717970Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:25.784808Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:25.896369Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898404243768464:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:25.896450Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:25.896657Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898404243768469:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:25.899878Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:46:25.912556Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519898404243768471:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:46:25.973611Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519898404243768522:3420] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:46:27.481775Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:27.543160Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:27.586851Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:27.636807Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:27.691974Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:27.734156Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710677:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) >> KqpJoin::RightSemiJoin_SecondaryIndex [GOOD] >> KqpJoinOrder::SortingsByPKWithLookupJoin+RemoveLimitOperator >> KqpJoinOrder::ShuffleEliminationManyKeysJoinPredicate >> TxUsage::WriteToTopic_Demo_46_Table [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoin::RightSemiJoin_SecondaryIndex [GOOD] Test command err: Trying to start YDB, gRPC: 9682, MsgBus: 30242 2025-06-25T14:46:10.908806Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898337382569509:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:46:10.930172Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000e57/r3tmp/tmpENG7B5/pdisk_1.dat 2025-06-25T14:46:11.558898Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:46:11.559015Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:46:11.603068Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:46:11.637838Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 9682, node 1 2025-06-25T14:46:11.820549Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:46:11.820570Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:46:11.820576Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:46:11.820665Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:46:11.936460Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:30242 TClient is connected to server localhost:30242 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:46:12.654898Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:46:12.709608Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:46:12.854545Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:46:13.081082Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:46:13.159983Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:14.653953Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898354562440276:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:14.654041Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:14.973156Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:15.053693Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:15.090390Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:15.144906Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:15.178591Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:15.232024Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:15.276605Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:15.388725Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898358857408233:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:15.388796Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:15.391479Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898358857408238:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:15.395825Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:46:15.410391Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710669, at schemeshard: 72057594046644480 2025-06-25T14:46:15.410757Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898358857408240:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:46:15.488022Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898358857408291:3419] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:46:15.909691Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519898337382569509:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:46:15.909773Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:46:16.627442Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:16.697822Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, ... d to server localhost:22326 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:46:21.685721Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:46:21.696747Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:46:21.716067Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:46:21.854880Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:22.222461Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:46:22.336620Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:46:24.707089Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898401346597782:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:24.707189Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:24.795727Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:24.847849Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:24.896833Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:24.974132Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:25.016471Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:25.097088Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:25.101461Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519898384166727109:2175];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:46:25.102773Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:46:25.218364Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:25.311867Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898405641565746:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:25.311944Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:25.312378Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898405641565751:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:25.317910Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:46:25.333175Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519898405641565753:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:46:25.395082Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519898405641565807:3423] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:46:26.695717Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:26.755304Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:26.787002Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:26.819724Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:26.896208Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:27.977476Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710679:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) >> KqpIndexLookupJoin::RightSemi [GOOD] >> KqpIndexLookupJoin::SimpleInnerJoin+StreamLookup >> KqpJoinOrder::SortingsWithLookupJoin2-RemoveLimitOperator [GOOD] >> TxUsage::WriteToTopic_Demo_46_Query >> KqpIndexLookupJoin::Left+StreamLookup ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/unittest >> TxUsage::Sinks_Olap_WriteToTopicAndTable_1_Query [GOOD] Test command err: 2025-06-25T14:41:28.167219Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897125954460257:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:41:28.223632Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:41:28.411720Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001943/r3tmp/tmpnmEiEs/pdisk_1.dat 2025-06-25T14:41:28.665483Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:41:28.669761Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519897125954460227:2080] 1750862488160357 != 1750862488160360 2025-06-25T14:41:28.678189Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:41:28.678355Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:41:28.687576Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 61851, node 1 2025-06-25T14:41:28.881008Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/yft8/001943/r3tmp/yandexbkuJ4O.tmp 2025-06-25T14:41:28.881036Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/yft8/001943/r3tmp/yandexbkuJ4O.tmp 2025-06-25T14:41:28.881195Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/yft8/001943/r3tmp/yandexbkuJ4O.tmp 2025-06-25T14:41:28.881367Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:41:29.025647Z INFO: TTestServer started on Port 24409 GrpcPort 61851 TClient is connected to server localhost:24409 2025-06-25T14:41:29.241009Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; PQClient connected to localhost:61851 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:41:29.375520Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:41:29.391198Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:41:29.409020Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:41:29.421028Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:41:29.531047Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715660, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:41:32.332817Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897143134330211:2301], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:32.332936Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:32.334768Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897143134330223:2304], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:32.340513Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:41:32.366648Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519897143134330225:2305], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715662 completed, doublechecking } 2025-06-25T14:41:32.710016Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519897143134330289:2446] txid# 281474976715663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:41:32.739013Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:32.789167Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:32.970336Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519897143134330297:2311], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:5:17: Error: At function: KiReadTable!
:5:17: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Versions]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:41:32.972479Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=ZGRlM2VlMjUtNTNmMWI1MTYtZmM4NjUxNDctODFhZTA4ZWY=, ActorId: [1:7519897143134330208:2299], ActorState: ExecuteState, TraceId: 01jykrjwn4d5fqhfbatxn2b3pq, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:41:32.974605Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 5 column: 17 } message: "At function: KiReadTable!" end_position { row: 5 column: 17 } severity: 1 issues { position { row: 5 column: 17 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Versions]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 5 column: 17 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-25T14:41:32.980151Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); 2025-06-25T14:41:33.166927Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519897125954460257:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:41:33.166980Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; === CheckClustersList. Subcribe to ClusterTracker from [1:7519897147429297895:2628] === CheckClustersList. Ok 2025-06-25T14:41:39.963588Z :ReadWithRestarts INFO: TTopicSdkTestSetup started 2025-06-25T14:41:39.998729Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:132: new create topic request 2025-06-25T14:41:40.041378Z node 1 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72075186224037893][] pipe [1:7519897177494069195:2727] connected; active server actors: 1 2025-06-25T14:41:40.041601Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1516: [72075186224037893][test-topic] updating configuration. Deleted partitions []. Added partitions [0] 2025-06-25T14:41:40.044523Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3114: [PQ: 72075186224037892] Handle TEvInterconnect::TEvNodeInfo 2025-06-25T14:41:40.045057Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3107: [PQ: 72075186224037892] Registered with mediator time cast 2025-06-25T14:41:40.045188Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:1040: [72075186224037893][test-topic] Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at RB 72075186224037893 2025-06-25T14:41:40.045286Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:138: [72075186224037893][test-topic] BALANCER INIT DONE for test-topic: (0, 72075186224037892) 2025-06-25T14:41:40.045555Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3146: [PQ: 72075186224037892] Transactions request. ... tate: StateIdle] need more data for compaction. cumulativeSize=293, count=1, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-25T14:46:23.617308Z node 20 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'topic_A' partition: 0 messageNo: 0 requestId: cookie: 2 2025-06-25T14:46:23.617458Z node 20 :PQ_READ_PROXY DEBUG: partition_actor.cpp:652: session cookie 1 consumer test-consumer session test-consumer_20_1_13605105579200603432_v1 TopicId: Topic /Root/topic_A in database: Root, partition 0(assignId:1) initDone 1 event { Cookie: 2 } 2025-06-25T14:46:23.617510Z node 20 :PQ_READ_PROXY DEBUG: partition_actor.cpp:950: session cookie 1 consumer test-consumer session test-consumer_20_1_13605105579200603432_v1 TopicId: Topic /Root/topic_A in database: Root, partition 0(assignId:1) commit done to position 1 endOffset 1 with cookie 2 2025-06-25T14:46:23.617562Z node 20 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:696: session cookie 1 consumer test-consumer session test-consumer_20_1_13605105579200603432_v1 replying for commits: assignId# 1, from# 2, to# 2, offset# 1 2025-06-25T14:46:24.422596Z node 20 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:566: init check schema 2025-06-25T14:46:24.423909Z node 20 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:627: session v1 cookie: 4 sessionId: test-message_group_id|561b2f7f-f2fbc260-6d39884f-1e2afdf0_0 describe result for acl check 2025-06-25T14:46:24.560461Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): -:topic_A:0:1:0:1 2025-06-25T14:46:24.560557Z :INFO: [/Root] [/Root] [26ba3ecb-a4155de2-6334fb1a-a526c2a9] Counters: { Errors: 0 CurrentSessionLifetimeMs: 1004 BytesRead: 144 MessagesRead: 1 BytesReadCompressed: 144 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-25T14:46:24.572099Z node 20 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2378: session cookie 1 consumer test-consumer session test-consumer_20_1_13605105579200603432_v1 checking auth because of timeout 2025-06-25T14:46:24.572231Z node 20 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:41: session cookie 1 consumer test-consumer session test-consumer_20_1_13605105579200603432_v1 auth for : test-consumer 2025-06-25T14:46:24.573485Z node 20 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:131: session cookie 1 consumer test-consumer session test-consumer_20_1_13605105579200603432_v1 Handle describe topics response 2025-06-25T14:46:24.573740Z node 20 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:68: session cookie 1 consumer test-consumer session test-consumer_20_1_13605105579200603432_v1 auth is DEAD 2025-06-25T14:46:24.573905Z node 20 :PQ_READ_PROXY INFO: read_session_actor.cpp:1033: session cookie 1 consumer test-consumer session test-consumer_20_1_13605105579200603432_v1 auth ok: topics# 1, initDone# 1 2025-06-25T14:46:24.599807Z node 20 :PQ_READ_PROXY DEBUG: direct_read_actor.cpp:459: session cookie 2 consumer test-consumer session test-consumer_20_1_13605105579200603432_v1 checking auth because of timeout 2025-06-25T14:46:24.600213Z node 20 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:41: session cookie 2 consumer test-consumer session test-consumer_20_1_13605105579200603432_v1 auth for : test-consumer 2025-06-25T14:46:24.603289Z node 20 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:131: session cookie 2 consumer test-consumer session test-consumer_20_1_13605105579200603432_v1 Handle describe topics response 2025-06-25T14:46:24.603462Z node 20 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:68: session cookie 2 consumer test-consumer session test-consumer_20_1_13605105579200603432_v1 auth is DEAD 2025-06-25T14:46:24.603509Z node 20 :PQ_READ_PROXY INFO: direct_read_actor.cpp:299: session cookie 2 consumer test-consumer session test-consumer_20_1_13605105579200603432_v1 auth ok: topics# 1, initDone# 1 2025-06-25T14:46:25.607615Z node 20 :PQ_READ_PROXY DEBUG: direct_read_actor.cpp:459: session cookie 2 consumer test-consumer session test-consumer_20_1_13605105579200603432_v1 checking auth because of timeout 2025-06-25T14:46:25.608092Z node 20 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:41: session cookie 2 consumer test-consumer session test-consumer_20_1_13605105579200603432_v1 auth for : test-consumer 2025-06-25T14:46:25.615311Z node 20 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:131: session cookie 2 consumer test-consumer session test-consumer_20_1_13605105579200603432_v1 Handle describe topics response 2025-06-25T14:46:25.615483Z node 20 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:68: session cookie 2 consumer test-consumer session test-consumer_20_1_13605105579200603432_v1 auth is DEAD 2025-06-25T14:46:25.664841Z node 20 :PQ_READ_PROXY INFO: direct_read_actor.cpp:299: session cookie 2 consumer test-consumer session test-consumer_20_1_13605105579200603432_v1 auth ok: topics# 1, initDone# 1 2025-06-25T14:46:27.209223Z node 20 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72075186224037894] server connected, pipe [20:7519898413239322797:4485], now have 1 active actors on pipe 2025-06-25T14:46:27.215225Z :INFO: [/Root] [/Root] [26ba3ecb-a4155de2-6334fb1a-a526c2a9] Closing read session. Close timeout: 0.000000s 2025-06-25T14:46:27.215307Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): -:topic_A:0:1:0:1 2025-06-25T14:46:27.215378Z :INFO: [/Root] [/Root] [26ba3ecb-a4155de2-6334fb1a-a526c2a9] Counters: { Errors: 0 CurrentSessionLifetimeMs: 3659 BytesRead: 144 MessagesRead: 1 BytesReadCompressed: 144 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-25T14:46:27.215530Z :NOTICE: [/Root] [/Root] [26ba3ecb-a4155de2-6334fb1a-a526c2a9] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Close with zero timeout " } 2025-06-25T14:46:27.215600Z :DEBUG: [/Root] [/Root] [26ba3ecb-a4155de2-6334fb1a-a526c2a9] [] Abort session to cluster 2025-06-25T14:46:27.218996Z :DEBUG: [/Root] 0x000051E000E6F990 TDirectReadSessionManager ServerSessionId=test-consumer_20_1_13605105579200603432_v1 Close 2025-06-25T14:46:27.219587Z :DEBUG: [/Root] 0x000051E000E6F990 TDirectReadSessionManager ServerSessionId=test-consumer_20_1_13605105579200603432_v1 Close 2025-06-25T14:46:27.219795Z :NOTICE: [/Root] [/Root] [26ba3ecb-a4155de2-6334fb1a-a526c2a9] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-06-25T14:46:27.219693Z node 20 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer test-consumer session test-consumer_20_1_13605105579200603432_v1 grpc read done: success# 0, data# { } 2025-06-25T14:46:27.219744Z node 20 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 1 consumer test-consumer session test-consumer_20_1_13605105579200603432_v1 grpc read failed 2025-06-25T14:46:27.219781Z node 20 :PQ_READ_PROXY INFO: read_session_actor.cpp:92: session cookie 1 consumer test-consumer session test-consumer_20_1_13605105579200603432_v1 grpc closed 2025-06-25T14:46:27.219828Z node 20 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 1 consumer test-consumer session test-consumer_20_1_13605105579200603432_v1 is DEAD 2025-06-25T14:46:27.221676Z node 20 :PERSQUEUE DEBUG: pq_impl.cpp:2452: [PQ: 72075186224037894] Destroy direct read session test-consumer_20_1_13605105579200603432_v1 2025-06-25T14:46:27.221733Z node 20 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037894] server disconnected, pipe [20:7519898396059453143:2864] destroyed 2025-06-25T14:46:27.221785Z node 20 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037895][topic_A] pipe [20:7519898396059453140:2861] disconnected; active server actors: 1 2025-06-25T14:46:27.221819Z node 20 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037895][topic_A] pipe [20:7519898396059453140:2861] client test-consumer disconnected session test-consumer_20_1_13605105579200603432_v1 2025-06-25T14:46:27.221964Z node 20 :PQ_READ_PROXY DEBUG: caching_service.cpp:398: Direct read cache: close session for proxy [20:7519898396059453149:2867] 2025-06-25T14:46:27.222011Z node 20 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: test-consumer_20_1_13605105579200603432_v1 2025-06-25T14:46:27.222433Z node 20 :PQ_READ_PROXY DEBUG: direct_read_actor.cpp:83: Direct read proxy [20:7519898396059453149:2867]: session cookie 2 consumer test-consumer session test-consumer_20_1_13605105579200603432_v1 grpc read done: success# 0, data# { } 2025-06-25T14:46:27.222473Z node 20 :PQ_READ_PROXY INFO: direct_read_actor.cpp:86: Direct read proxy [20:7519898396059453149:2867]: session cookie 2 consumer test-consumer session test-consumer_20_1_13605105579200603432_v1grpc read failed 2025-06-25T14:46:27.222508Z node 20 :PQ_READ_PROXY INFO: direct_read_actor.cpp:65: Direct read proxy [20:7519898396059453149:2867]: session cookie 2 consumer test-consumer session test-consumer_20_1_13605105579200603432_v1 grpc closed 2025-06-25T14:46:27.222536Z node 20 :PQ_READ_PROXY INFO: direct_read_actor.cpp:153: Direct read proxy [20:7519898396059453149:2867]: session cookie 2 consumer test-consumer session test-consumer_20_1_13605105579200603432_v1 proxy is DEAD 2025-06-25T14:46:27.232529Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|561b2f7f-f2fbc260-6d39884f-1e2afdf0_0] PartitionId [0] Generation [1] Write session: close. Timeout 0.000000s 2025-06-25T14:46:27.232597Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|561b2f7f-f2fbc260-6d39884f-1e2afdf0_0] PartitionId [0] Generation [1] Write session will now close 2025-06-25T14:46:27.232683Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|561b2f7f-f2fbc260-6d39884f-1e2afdf0_0] PartitionId [0] Generation [1] Write session: aborting 2025-06-25T14:46:27.233254Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|561b2f7f-f2fbc260-6d39884f-1e2afdf0_0] PartitionId [0] Generation [1] Write session: gracefully shut down, all writes complete 2025-06-25T14:46:27.233322Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|561b2f7f-f2fbc260-6d39884f-1e2afdf0_0] PartitionId [0] Generation [1] Write session: destroy 2025-06-25T14:46:27.237103Z node 20 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 4 sessionId: test-message_group_id|561b2f7f-f2fbc260-6d39884f-1e2afdf0_0 grpc read done: success: 0 data: 2025-06-25T14:46:27.237145Z node 20 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 4 sessionId: test-message_group_id|561b2f7f-f2fbc260-6d39884f-1e2afdf0_0 grpc read failed 2025-06-25T14:46:27.237198Z node 20 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 4 sessionId: test-message_group_id|561b2f7f-f2fbc260-6d39884f-1e2afdf0_0 grpc closed 2025-06-25T14:46:27.237227Z node 20 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 4 sessionId: test-message_group_id|561b2f7f-f2fbc260-6d39884f-1e2afdf0_0 is DEAD 2025-06-25T14:46:27.238628Z node 20 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037894 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-25T14:46:27.238675Z node 20 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037894 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-25T14:46:27.239115Z node 20 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037894] server disconnected, pipe [20:7519898396059453054:2849] destroyed 2025-06-25T14:46:27.239146Z node 20 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037894] server disconnected, pipe [20:7519898396059453057:2849] destroyed 2025-06-25T14:46:27.239194Z node 20 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72075186224037894, Partition: 0, State: StateIdle] TPartition::DropOwner. >> BasicStatistics::NotFullStatisticsColumnshard [GOOD] >> KqpJoinOrder::SortingsWithLookupJoin1-RemoveLimitOperator >> KqpIndexLookupJoin::Inner+StreamLookup [GOOD] >> KqpIndexLookupJoin::Inner-StreamLookup >> KqpIndexLookupJoin::LeftOnlyJoinValueColumn+StreamLookup [GOOD] >> KqpIndexLookupJoin::LeftOnlyJoinValueColumn-StreamLookup >> KqpJoinOrder::CanonizedJoinOrderTPCH14 ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> YdbLogStore::AlterLogTable [FAIL] Test command err: 2025-06-25T14:43:50.724612Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897737030229967:2077];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:43:50.724676Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001868/r3tmp/tmpDSmRVF/pdisk_1.dat 2025-06-25T14:43:51.160600Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:43:51.160705Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:43:51.165348Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:43:51.189494Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 11869, node 1 2025-06-25T14:43:51.252852Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:43:51.252873Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:43:51.252889Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:43:51.252988Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:64745 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:43:51.595329Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:43:51.746316Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:43:51.863432Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/Root" OperationType: ESchemeOpCreateColumnStore CreateColumnStore { Name: "LogStore" ColumnShardCount: 4 SchemaPresets { Name: "default" Schema { Columns { Name: "timestamp" Type: "Uint8" NotNull: true } Columns { Name: "resource_type" Type: "Utf8" NotNull: true } Columns { Name: "resource_id" Type: "Utf8" NotNull: true } Columns { Name: "uid" Type: "Utf8" NotNull: true } Columns { Name: "level" Type: "Int32" } Columns { Name: "message" Type: "Utf8" } Columns { Name: "json_payload" Type: "JsonDocument" } Columns { Name: "request_id" Type: "Utf8" } Columns { Name: "ingested_at" Type: "Timestamp" } Columns { Name: "saved_at" Type: "Timestamp" } KeyColumnNames: "timestamp" KeyColumnNames: "resource_type" KeyColumnNames: "resource_id" KeyColumnNames: "uid" DefaultCompression { Codec: ColumnCodecLZ4 } } } } } TxId: 281474976710658 TabletId: 72057594046644480 PeerName: "ipv6:[::1]:33544" , at schemeshard: 72057594046644480 2025-06-25T14:43:51.863828Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: create_store.cpp:331: TCreateOlapStore Propose, path: /Root/LogStore, opId: 281474976710658:0, at schemeshard: 72057594046644480 2025-06-25T14:43:51.866790Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:347: AttachChild: child attached as only one child to the parent, parent id: [OwnerId: 72057594046644480, LocalPathId: 1], parent name: Root, child name: LogStore, child id: [OwnerId: 72057594046644480, LocalPathId: 2], at schemeshard: 72057594046644480 2025-06-25T14:43:51.866844Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 0 2025-06-25T14:43:51.866864Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 281474976710658:0 type: TxCreateOlapStore target path: [OwnerId: 72057594046644480, LocalPathId: 2] source path: 2025-06-25T14:43:51.866942Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason new shard created for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 1 2025-06-25T14:43:51.866979Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason new shard created for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 2 2025-06-25T14:43:51.867032Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason new shard created for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 3 2025-06-25T14:43:51.867085Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason new shard created for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 4 2025-06-25T14:43:51.867339Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 5 2025-06-25T14:43:51.869527Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 281474976710658:0 1 -> 2 2025-06-25T14:43:51.869775Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 281474976710658:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-25T14:43:51.869809Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnStore, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_store.cpp:451) 2025-06-25T14:43:51.869947Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 1 2025-06-25T14:43:51.869981Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 6 2025-06-25T14:43:51.877188Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 281474976710658, response: Status: StatusAccepted TxId: 281474976710658 SchemeshardId: 72057594046644480 PathId: 2, at schemeshard: 72057594046644480 2025-06-25T14:43:51.877412Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976710658, database: /Root, subject: , status: StatusAccepted, operation: CREATE COLUMN STORE, path: /Root/LogStore 2025-06-25T14:43:51.877667Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046644480 2025-06-25T14:43:51.877705Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046644480, txId: 281474976710658, path id: [OwnerId: 72057594046644480, LocalPathId: 1] 2025-06-25T14:43:51.877864Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046644480, txId: 281474976710658, path id: [OwnerId: 72057594046644480, LocalPathId: 2] 2025-06-25T14:43:51.877933Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046644480 2025-06-25T14:43:51.877951Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:7519897741325197795:2369], at schemeshard: 72057594046644480, txId: 281474976710658, path id: 1 2025-06-25T14:43:51.877987Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:7519897741325197795:2369], at schemeshard: 72057594046644480, txId: 281474976710658, path id: 2 2025-06-25T14:43:51.878030Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 281474976710658:0, at schemeshard: 72057594046644480 2025-06-25T14:43:51.878062Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 281474976710658:0 ProgressState, operation type: TxCreateOlapStore, at tablet# 72057594046644480 2025-06-25T14:43:51.878849Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:359: TCreateParts opId# 281474976710658:0 CreateRequest Event to Hive: 72057594037968897 msg: Owner: 72057594046644480 OwnerIdx: 1 TabletType: ColumnShard ObjectDomain { SchemeShard: 72057594046644480 PathId: 1 } ObjectId: 2 BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StorageP ... erId: 72057594046644480, LocalPathId: 2] was 3 2025-06-25T14:45:48.767810Z node 64 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 1 ShardOwnerId: 72057594046644480 ShardLocalIdx: 1, at schemeshard: 72057594046644480 2025-06-25T14:45:48.771417Z node 64 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 64, TabletId: 72075186224037889 not found 2025-06-25T14:45:48.771453Z node 64 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 64, TabletId: 72075186224037891 not found 2025-06-25T14:45:48.771468Z node 64 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 64, TabletId: 72075186224037888 not found 2025-06-25T14:45:48.771487Z node 64 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 64, TabletId: 72075186224037890 not found 2025-06-25T14:45:48.773366Z node 64 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 2 2025-06-25T14:45:48.773731Z node 64 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[64:7519898239083219690:2287];ev=NKikimr::TEvTablet::TEvTabletDead;fline=columnshard_impl.cpp:864;event=tablet_die; 2025-06-25T14:45:48.779271Z node 64 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 3 ShardOwnerId: 72057594046644480 ShardLocalIdx: 3, at schemeshard: 72057594046644480 2025-06-25T14:45:48.779848Z node 64 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 1 2025-06-25T14:45:48.780077Z node 64 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:2 2025-06-25T14:45:48.780090Z node 64 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:2 tabletId 72075186224037889 2025-06-25T14:45:48.780130Z node 64 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:4 2025-06-25T14:45:48.780137Z node 64 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:4 tabletId 72075186224037891 2025-06-25T14:45:48.780172Z node 64 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046644480 2025-06-25T14:45:48.780185Z node 64 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046644480, LocalPathId: 2], at schemeshard: 72057594046644480 2025-06-25T14:45:48.780224Z node 64 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 1 2025-06-25T14:45:48.789574Z node 64 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[64:7519898239083219645:2284];ev=NKikimr::TEvTablet::TEvTabletDead;fline=columnshard_impl.cpp:864;event=tablet_die; 2025-06-25T14:45:48.805208Z node 64 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[64:7519898239083219651:2286];ev=NKikimr::TEvTablet::TEvTabletDead;fline=columnshard_impl.cpp:864;event=tablet_die; 2025-06-25T14:45:48.809524Z node 64 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[64:7519898239083219650:2285];ev=NKikimr::TEvTablet::TEvTabletDead;fline=columnshard_impl.cpp:864;event=tablet_die; 2025-06-25T14:45:48.845895Z node 64 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:1 2025-06-25T14:45:48.845939Z node 64 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:1 tabletId 72075186224037888 2025-06-25T14:45:48.845998Z node 64 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:3 2025-06-25T14:45:48.846016Z node 64 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:3 tabletId 72075186224037890 2025-06-25T14:45:48.846054Z node 64 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046644480 2025-06-25T14:45:54.308427Z node 67 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[67:7519898272048696802:2153];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:45:54.463507Z node 67 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001868/r3tmp/tmpEYD1dn/pdisk_1.dat 2025-06-25T14:45:54.687807Z node 67 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:45:54.717196Z node 67 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(67, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:45:54.717303Z node 67 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(67, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:45:54.733281Z node 67 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(67, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 12599, node 67 2025-06-25T14:45:54.945132Z node 67 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:45:54.945162Z node 67 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:45:54.945171Z node 67 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:45:54.945340Z node 67 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:45:55.276453Z node 67 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:6943 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:45:55.485731Z node 67 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:45:55.588070Z node 67 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/Root" OperationType: ESchemeOpCreateColumnStore CreateColumnStore { Name: "LogStore" ColumnShardCount: 4 SchemaPresets { Name: "default" Schema { Columns { Name: "timestamp" Type: "Timestamp" NotNull: true } Columns { Name: "resource_type" Type: "Utf8" NotNull: true } Columns { Name: "resource_id" Type: "Utf8" NotNull: true } Columns { Name: "uid" Type: "Utf8" NotNull: true } Columns { Name: "level" Type: "Int32" } Columns { Name: "message" Type: "Utf8" } Columns { Name: "json_payload" Type: "JsonDocument" } Columns { Name: "request_id" Type: "Utf8" } Columns { Name: "ingested_at" Type: "Timestamp" } Columns { Name: "saved_at" Type: "Timestamp" } KeyColumnNames: "timestamp" KeyColumnNames: "resource_type" KeyColumnNames: "resource_id" KeyColumnNames: "uid" DefaultCompression { Codec: ColumnCodecLZ4 } } } } } TxId: 281474976710658 TabletId: 72057594046644480 PeerName: "ipv6:[::1]:54780" , at schemeshard: 72057594046644480 2025-06-25T14:45:55.588856Z node 67 :FLAT_TX_SCHEMESHARD NOTICE: create_store.cpp:331: TCreateOlapStore Propose, path: /Root/LogStore, opId: 281474976710658:0, at schemeshard: 72057594046644480 2025-06-25T14:45:55.588907Z node 67 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 281474976710658:1, propose status:StatusPreconditionFailed, reason: Column stores are not supported, at schemeshard: 72057594046644480 2025-06-25T14:45:55.593256Z node 67 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 281474976710658, response: Status: StatusPreconditionFailed Reason: "Column stores are not supported" TxId: 281474976710658 SchemeshardId: 72057594046644480, at schemeshard: 72057594046644480 2025-06-25T14:45:55.593521Z node 67 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976710658, database: /Root, subject: , status: StatusPreconditionFailed, reason: Column stores are not supported, operation: CREATE COLUMN STORE, path: /Root/LogStore 2025-06-25T14:45:55.593802Z node 67 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [67:7519898276343664931:2607] txid# 281474976710658, issues: { message: "Column stores are not supported" severity: 1 } assertion failed at ydb/services/ydb/ydb_logstore_ut.cpp:435, virtual void NTestSuiteYdbLogStore::TTestCaseAlterLogTable::Execute_(NUnitTest::TTestContext &): (res.GetStatus() == EStatus::SUCCESS) failed: (PRECONDITION_FAILED != SUCCESS)
: Error: Column stores are not supported , with diff: (PRE|SUC)C(ONDITION_FAIL|)E(D|SS) TBackTrace::Capture()+28 (0x1D48269C) NUnitTest::NPrivate::RaiseError(char const*, TBasicString> const&, bool)+592 (0x1D9416D0) NTestSuiteYdbLogStore::TTestCaseAlterLogTable::Execute_(NUnitTest::TTestContext&)+8888 (0x1CFA70E8) std::__y1::__function::__func, void ()>::operator()()+280 (0x1CFD0578) TColoredProcessor::Run(std::__y1::function, TBasicString> const&, char const*, bool)+534 (0x1D9788B6) NUnitTest::TTestBase::Run(std::__y1::function, TBasicString> const&, char const*, bool)+505 (0x1D948259) NTestSuiteYdbLogStore::TCurrentTest::Execute()+1204 (0x1CFCF744) NUnitTest::TTestFactory::Execute()+2438 (0x1D949B26) NUnitTest::RunMain(int, char**)+5213 (0x1D972E2D) ??+0 (0x7F136D3E6D90) __libc_start_main+128 (0x7F136D3E6E40) _start+41 (0x19C7C029) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::SortingsWithLookupJoin2-RemoveLimitOperator [GOOD] Test command err: Trying to start YDB, gRPC: 4505, MsgBus: 22830 2025-06-25T14:45:44.951087Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898225362576003:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:45:44.951180Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000e76/r3tmp/tmpRiP98u/pdisk_1.dat 2025-06-25T14:45:45.505647Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:45:45.505761Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:45:45.567725Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519898225362575983:2080] 1750862744936227 != 1750862744936230 2025-06-25T14:45:45.587055Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 4505, node 1 2025-06-25T14:45:45.605032Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:45:45.800151Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:45:45.800191Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:45:45.800201Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:45:45.800332Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:45:45.964972Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:22830 TClient is connected to server localhost:22830 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:45:46.487380Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:45:46.505658Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:45:48.510824Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898242542445812:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:48.510992Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:48.512433Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898242542445824:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:48.516048Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:45:48.526926Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898242542445826:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:45:48.634223Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898242542445877:2337] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:45:48.943828Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:49.059659Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:49.094259Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:49.124140Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:49.153921Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:49.287964Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:49.319811Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:49.364439Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:49.388805Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:49.421548Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:49.455543Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:49.491784Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:49.569110Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:49.956505Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519898225362576003:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:45:49.956596Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:45:50.178342Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, subopera ... line=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:25.001212Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038647;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:25.001737Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038623;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:25.004694Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038641;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:25.005111Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038599;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:25.009902Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038623;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:25.010393Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038579;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:25.014286Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038579;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:25.014815Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038639;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:25.017421Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038599;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:25.017817Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038611;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:25.022175Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038611;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:25.022677Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038635;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:25.027081Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038639;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:25.027482Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038649;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:25.030878Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038635;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:25.031410Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038587;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:25.035206Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038587;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:25.035678Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038657;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:25.039465Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038649;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:25.039838Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:25.043798Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:25.047633Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038657;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:25.048018Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038655;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:25.048351Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038653;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:25.052168Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038653;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:25.052737Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038637;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:25.059267Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038655;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:25.059669Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038591;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:25.063602Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038591;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:25.064846Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038637;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:25.065263Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038529;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:25.072852Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038627;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:25.081349Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038529;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:25.081785Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038645;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:25.085123Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038627;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:25.085527Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038617;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:25.090007Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038617;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:25.090754Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038633;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:25.095571Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038633;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:25.098718Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038645;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:25.141466Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038626;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:25.148282Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038626;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:25.248643Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jykrttcnejaznqsj6zprr1fg", SessionId: ydb://session/3?node_id=1&id=NDkxMjg4YzAtZmI3MjlkNTctMjFhNWE0M2MtZDdjMTI4Njc=, Slow query, duration: 33.098745s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:46:25.531310Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:46:25.531371Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:46:25.532095Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/service/ut/unittest >> BasicStatistics::NotFullStatisticsColumnshard [GOOD] Test command err: 2025-06-25T14:38:42.041292Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:419:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:38:42.041671Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:38:42.041791Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001cba/r3tmp/tmprrFFpP/pdisk_1.dat 2025-06-25T14:38:42.402620Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 22563, node 1 2025-06-25T14:38:42.637045Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:38:42.637104Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:38:42.637150Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:38:42.637868Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:38:42.640365Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:38:42.743269Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:42.743425Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:42.758237Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:7501 2025-06-25T14:38:43.307571Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-25T14:38:45.892475Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-25T14:38:45.948834Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:45.948971Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:45.994971Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T14:38:45.997294Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:38:46.251701Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:38:46.287380Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:46.287894Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:46.288398Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:46.288517Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:46.288683Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:46.288758Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:46.288804Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:46.288873Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:46.288943Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T14:38:46.476430Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:38:46.476530Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:38:46.489062Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:38:46.639293Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:38:46.696008Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-25T14:38:46.696118Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-25T14:38:46.739026Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-25T14:38:46.739255Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-25T14:38:46.739489Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-25T14:38:46.739563Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-25T14:38:46.739623Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-25T14:38:46.739680Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-25T14:38:46.739728Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-25T14:38:46.739784Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-25T14:38:46.740340Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-25T14:38:46.764021Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7949: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-25T14:38:46.764156Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7979: ConnectToSA(), pipe client id: [2:1793:2562], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-25T14:38:46.774397Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1808:2573] 2025-06-25T14:38:46.782374Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1849:2589] 2025-06-25T14:38:46.782681Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1849:2589], schemeshard id = 72075186224037897 2025-06-25T14:38:46.793127Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-25T14:38:46.813262Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-25T14:38:46.813345Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-25T14:38:46.813438Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-25T14:38:46.826149Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:38:46.833923Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-25T14:38:46.834063Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-25T14:38:47.012270Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-25T14:38:47.167209Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-25T14:38:47.227181Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-25T14:38:47.770257Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:38:48.101698Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2153:3026], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:48.101883Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:38:48.124952Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715659:0, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T14:38:48.299516Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2245:2806];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:38:48.299785Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2245:2806];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:38:48.300146Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2245:2806];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:38:48.300289Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2245:2806];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:38:48.300459Z node 2 :TX_COLUMNSHARD WARN: ... ard count = 1 2025-06-25T14:45:36.417299Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-25T14:45:37.373089Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-25T14:45:37.373174Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-25T14:45:38.724191Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:285: [72075186224037894] EvPropagateTimeout 2025-06-25T14:45:38.840910Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-25T14:45:38.840975Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-25T14:45:40.529236Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 1, schemeshard count = 1 2025-06-25T14:45:40.529454Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-25T14:45:40.717217Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-25T14:45:40.717301Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-25T14:45:42.736007Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-25T14:45:42.736084Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-25T14:45:43.585001Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:285: [72075186224037894] EvPropagateTimeout 2025-06-25T14:45:44.915987Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-25T14:45:44.916067Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-25T14:45:46.061102Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 1, schemeshard count = 1 2025-06-25T14:45:46.061304Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-25T14:45:47.557097Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-25T14:45:47.557168Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-25T14:45:49.945194Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:285: [72075186224037894] EvPropagateTimeout 2025-06-25T14:45:50.135281Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-25T14:45:50.135360Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-25T14:45:51.353126Z node 1 :STATISTICS DEBUG: schemeshard_impl.cpp:7949: ResolveSA(), StatisticsAggregatorId=18446744073709551615, at schemeshard: 72057594046644480 2025-06-25T14:45:51.353208Z node 1 :STATISTICS DEBUG: schemeshard_impl.cpp:7961: ConnectToSA(), no StatisticsAggregatorId, at schemeshard: 72057594046644480 2025-06-25T14:45:51.353245Z node 1 :STATISTICS DEBUG: schemeshard_impl.cpp:7992: SendBaseStatsToSA(), no StatisticsAggregatorId, at schemeshard: 72057594046644480 2025-06-25T14:45:51.353281Z node 1 :STATISTICS DEBUG: schemeshard_impl.cpp:7919: Schedule next SendBaseStatsToSA in 30.000000s, at schemeshard: 72057594046644480 2025-06-25T14:45:52.876943Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 1, schemeshard count = 1 2025-06-25T14:45:52.877114Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-25T14:45:53.132900Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-25T14:45:53.132964Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-25T14:45:55.796905Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-25T14:45:55.796982Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-25T14:45:56.949114Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:285: [72075186224037894] EvPropagateTimeout 2025-06-25T14:45:58.471933Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-25T14:45:58.472011Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-25T14:45:59.585176Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 1, schemeshard count = 1 2025-06-25T14:45:59.585441Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-25T14:46:01.089723Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-25T14:46:01.089802Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-25T14:46:03.309037Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:285: [72075186224037894] EvPropagateTimeout 2025-06-25T14:46:03.517724Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-25T14:46:03.517801Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-25T14:46:06.061085Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 1, schemeshard count = 1 2025-06-25T14:46:06.061287Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-25T14:46:06.274854Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-25T14:46:06.274933Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-25T14:46:08.609204Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-25T14:46:08.609284Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-25T14:46:09.669112Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:285: [72075186224037894] EvPropagateTimeout 2025-06-25T14:46:11.341198Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-25T14:46:11.341259Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-25T14:46:12.505076Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 1, schemeshard count = 1 2025-06-25T14:46:12.505290Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-25T14:46:14.015971Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-25T14:46:14.016062Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-25T14:46:16.405037Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:285: [72075186224037894] EvPropagateTimeout 2025-06-25T14:46:16.593619Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-25T14:46:16.593700Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-25T14:46:19.301067Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 1, schemeshard count = 1 2025-06-25T14:46:19.301261Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-25T14:46:19.553156Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-25T14:46:19.553238Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-25T14:46:22.281219Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-25T14:46:22.281301Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-25T14:46:23.360936Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:285: [72075186224037894] EvPropagateTimeout 2025-06-25T14:46:24.803461Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-25T14:46:24.803538Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-25T14:46:25.971724Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 1, schemeshard count = 1 2025-06-25T14:46:25.971967Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-25T14:46:27.556917Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-25T14:46:27.557017Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-25T14:46:30.009186Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:285: [72075186224037894] EvPropagateTimeout 2025-06-25T14:46:30.185008Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-25T14:46:30.185088Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-25T14:46:31.409683Z node 1 :STATISTICS DEBUG: schemeshard_impl.cpp:7949: ResolveSA(), StatisticsAggregatorId=18446744073709551615, at schemeshard: 72057594046644480 2025-06-25T14:46:31.409780Z node 1 :STATISTICS DEBUG: schemeshard_impl.cpp:7961: ConnectToSA(), no StatisticsAggregatorId, at schemeshard: 72057594046644480 2025-06-25T14:46:31.409817Z node 1 :STATISTICS DEBUG: schemeshard_impl.cpp:7992: SendBaseStatsToSA(), no StatisticsAggregatorId, at schemeshard: 72057594046644480 2025-06-25T14:46:31.409856Z node 1 :STATISTICS DEBUG: schemeshard_impl.cpp:7919: Schedule next SendBaseStatsToSA in 30.000000s, at schemeshard: 72057594046644480 2025-06-25T14:46:31.645960Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:8076: SendBaseStatsToSA(), path count: 2, at schemeshard: 72075186224037897 2025-06-25T14:46:31.646059Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7919: Schedule next SendBaseStatsToSA in 231.000000s, at schemeshard: 72075186224037897 2025-06-25T14:46:31.646345Z node 2 :STATISTICS DEBUG: tx_schemeshard_stats.cpp:21: [72075186224037894] TTxSchemeShardStats::Execute: schemeshard id# 72075186224037897, stats size# 53 ... waiting for TEvSchemeShardStats 2 (done) ... waiting for TEvPropagateStatistics 2025-06-25T14:46:31.670012Z node 2 :STATISTICS DEBUG: tx_schemeshard_stats.cpp:132: [72075186224037894] TTxSchemeShardStats::Complete 2025-06-25T14:46:33.125132Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 1, schemeshard count = 1 2025-06-25T14:46:33.125367Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 ... waiting for TEvPropagateStatistics (done) 2025-06-25T14:46:33.125718Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 4 ], ReplyToActorId[ [2:17048:10379]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T14:46:33.133848Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 4 ] 2025-06-25T14:46:33.133941Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 4, ReplyToActorId = [2:17048:10379], StatRequests.size() = 1 |85.5%| [TA] $(B)/ydb/core/statistics/service/ut/test-results/unittest/{meta.json ... results_accumulator.log} |85.5%| [TA] {RESULT} $(B)/ydb/core/statistics/service/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpIndexLookupJoin::MultiJoins [GOOD] >> KqpIndexLookupJoin::LeftSemiJoinWithLeftFilter-StreamLookup >> KqpJoinOrder::CanonizedJoinOrderTPCH18 >> KqpJoinOrder::SortingsSimpleOrderByAliasIndexDesc+RemoveLimitOperator >> YdbOlapStore::LogExistingRequest [GOOD] >> YdbOlapStore::LogExistingUserId >> KqpIndexLookupJoin::CheckCastUtf8ToString+StreamLookupJoin-NotNull [GOOD] >> KqpIndexLookupJoin::CheckCastUtf8ToString+StreamLookupJoin+NotNull >> KqpJoin::IdxLookupPartialLeftPredicate >> TGroupMapperTest::MonteCarlo [GOOD] >> KqpIndexLookupJoin::Left+StreamLookup [GOOD] >> KqpIndexLookupJoin::Left-StreamLookup >> KqpIndexLookupJoin::SimpleInnerJoin+StreamLookup [GOOD] |85.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/bscontroller/ut/unittest >> TGroupMapperTest::MonteCarlo [GOOD] >> KqpIndexLookupJoin::Inner-StreamLookup [GOOD] |85.5%| [TA] $(B)/ydb/core/mind/bscontroller/ut/test-results/unittest/{meta.json ... results_accumulator.log} |85.5%| [TA] {RESULT} $(B)/ydb/core/mind/bscontroller/ut/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpIndexLookupJoin::SimpleInnerJoin+StreamLookup [GOOD] Test command err: Trying to start YDB, gRPC: 10104, MsgBus: 26749 2025-06-25T14:46:24.536196Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898398897026092:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:46:24.536242Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000e49/r3tmp/tmp1TU6CJ/pdisk_1.dat 2025-06-25T14:46:25.156549Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519898398897026074:2080] 1750862784530585 != 1750862784530588 2025-06-25T14:46:25.159814Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:46:25.184495Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:46:25.184591Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:46:25.189312Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 10104, node 1 2025-06-25T14:46:25.404839Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:46:25.404861Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:46:25.404868Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:46:25.404965Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:46:25.611623Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:26749 TClient is connected to server localhost:26749 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:46:26.331675Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:46:26.348901Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:46:26.363109Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:46:26.602663Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:46:26.846955Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:46:26.966913Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:29.013229Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898420371864198:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:29.013344Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:29.407170Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:29.461558Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:29.499426Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:29.537104Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519898398897026092:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:46:29.537589Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:46:29.540648Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:29.614005Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:29.713163Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:29.775887Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:29.886847Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898420371864866:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:29.886915Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:29.887208Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898420371864871:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:29.891029Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:46:29.906407Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898420371864873:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:46:29.965704Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898420371864924:3426] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:46:31.230478Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/ ... RN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:46:34.628292Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:20607 2025-06-25T14:46:35.036425Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:20607 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:46:35.428261Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:46:35.468156Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:46:35.486750Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:46:35.623611Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:46:35.882668Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:46:36.014974Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:46:38.608092Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898457205648300:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:38.608202Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:38.706527Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:38.773143Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:38.814916Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:38.868766Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:38.911962Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:38.985720Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:39.077423Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:39.188083Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898461500616261:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:39.188190Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:39.188367Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898461500616266:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:39.192733Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:46:39.210174Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519898461500616268:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:46:39.283353Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519898461500616319:3419] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:46:40.879730Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:40.980303Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:41.033760Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:41.106599Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:41.152157Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:41.194947Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710677:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) >> KqpIndexLookupJoin::LeftOnlyJoinValueColumn-StreamLookup [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpIndexLookupJoin::Inner-StreamLookup [GOOD] Test command err: Trying to start YDB, gRPC: 27599, MsgBus: 17309 2025-06-25T14:46:26.110698Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898406430341857:2150];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000e47/r3tmp/tmp5afyye/pdisk_1.dat 2025-06-25T14:46:26.492128Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:46:26.754486Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:46:26.754565Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:46:26.798659Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:46:26.804386Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519898402135374425:2080] 1750862785979448 != 1750862785979451 2025-06-25T14:46:26.810305Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 27599, node 1 2025-06-25T14:46:27.087254Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:46:27.087272Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:46:27.087279Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:46:27.087379Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:46:27.087473Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:17309 TClient is connected to server localhost:17309 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:46:28.086931Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:46:28.104285Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:46:28.115805Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:46:28.322790Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:46:28.516084Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:46:28.645618Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:46:30.728370Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898423610212534:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:30.728452Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:31.020460Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:31.056694Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:31.073029Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519898406430341857:2150];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:46:31.074626Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:46:31.092646Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:31.135696Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:31.183859Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:31.247411Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:31.320395Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:31.421399Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898427905180486:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:31.421502Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:31.421948Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898427905180491:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:31.424976Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:46:31.436839Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898427905180493:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:46:31.498136Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898427905180545:3420] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:46:33.443919Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/ ... WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:46:36.544904Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2617 2025-06-25T14:46:36.980406Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:2617 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:46:37.165981Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:46:37.171909Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:46:37.187078Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:46:37.294758Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:46:37.547955Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:46:37.670486Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:46:40.244169Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898467927261694:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:40.244283Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:40.312926Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:40.383857Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:40.456093Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:40.488930Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:40.537378Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:40.630276Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:40.711636Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:40.817210Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898467927262368:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:40.817282Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:40.817479Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898467927262373:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:40.821018Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:46:40.844433Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519898467927262375:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:46:40.945101Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519898467927262426:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:46:42.369037Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:42.408437Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:42.460851Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:42.501379Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:42.537910Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:42.615644Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710677:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) >> KqpJoinOrder::TPCDS61-ColumnStore >> KqpJoinOrder::SortingsWithLookupJoin4+RemoveLimitOperator [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpIndexLookupJoin::LeftOnlyJoinValueColumn-StreamLookup [GOOD] Test command err: Trying to start YDB, gRPC: 15403, MsgBus: 29203 2025-06-25T14:46:27.397059Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898413165886672:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:46:27.404391Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000e46/r3tmp/tmpoqrg8F/pdisk_1.dat 2025-06-25T14:46:27.913909Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:46:27.913997Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:46:27.923663Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:46:27.968833Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 15403, node 1 2025-06-25T14:46:28.215010Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:46:28.215032Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:46:28.215041Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:46:28.215138Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:46:28.440098Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:29203 TClient is connected to server localhost:29203 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:46:29.223057Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:46:29.250736Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:46:29.435078Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:46:29.635360Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:46:29.768550Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:46:31.722594Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898430345757440:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:31.722723Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:32.077216Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:32.147507Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:32.208836Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:32.275214Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:32.322944Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:32.400452Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519898413165886672:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:46:32.400544Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:46:32.422766Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:32.489812Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:32.577173Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898434640725394:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:32.577274Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:32.577497Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898434640725399:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:32.581630Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:46:32.599219Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898434640725401:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:46:32.694486Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898434640725452:3424] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:46:34.132773Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:34.206770Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /ho ... PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:46:37.895859Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:46:37.908576Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:46:37.926595Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:46:38.042453Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:46:38.203779Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:38.303794Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:46:41.043926Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898471668137082:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:41.044023Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:41.134244Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:41.197009Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:41.268219Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:41.346192Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:41.391394Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:41.450152Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:41.453549Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519898450193299167:2205];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:46:41.453597Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:46:41.529607Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:41.672663Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898471668137741:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:41.672737Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:41.673099Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898471668137746:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:41.676729Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:46:41.693290Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-25T14:46:41.695685Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519898471668137748:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:46:41.787208Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519898471668137799:3412] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:46:43.175347Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:43.217323Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:43.280888Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:43.325771Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:43.408297Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:43.447826Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715677:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) >> KqpJoinOrder::FiveWayJoinWithConstantFoldOpt-ColumnStore >> KqpIndexLookupJoin::CheckCastUtf8ToString+StreamLookupJoin+NotNull [GOOD] >> TxUsage::Sinks_Olap_WriteToTopicAndTable_4_Query [GOOD] >> KqpIndexLookupJoin::JoinWithComplexCondition+StreamLookupJoin >> KqpJoin::IdxLookupPartialLeftPredicate [GOOD] >> KqpJoin::IdxLookupPartialWithTempTable ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpIndexLookupJoin::CheckCastUtf8ToString+StreamLookupJoin+NotNull [GOOD] Test command err: Trying to start YDB, gRPC: 22113, MsgBus: 17639 2025-06-25T14:46:30.398707Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898424287796986:2220];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:46:30.416739Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000e41/r3tmp/tmpwaA9Rh/pdisk_1.dat 2025-06-25T14:46:31.066709Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:46:31.066790Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:46:31.075616Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:46:31.100393Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519898424287796804:2080] 1750862790346493 != 1750862790346496 2025-06-25T14:46:31.100636Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 22113, node 1 2025-06-25T14:46:31.348460Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:46:31.425492Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:46:31.425513Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:46:31.425520Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:46:31.425634Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17639 TClient is connected to server localhost:17639 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:46:32.566875Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:46:32.605281Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:46:32.615180Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:46:32.879616Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:46:33.104178Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:46:33.202283Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:46:35.304501Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898445762634927:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:35.304613Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:35.380539Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519898424287796986:2220];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:46:35.380598Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:46:35.955067Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:36.044750Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:36.081419Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:36.118790Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:36.160875Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:36.227270Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:36.266612Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:36.368904Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898450057602891:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:36.369008Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:36.369281Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898450057602896:2437], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:36.382496Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:46:36.401207Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898450057602898:2438], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:46:36.495937Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898450057602953:3425] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:46:37.725136Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/ ... ed at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) Trying to start YDB, gRPC: 12250, MsgBus: 29220 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000e41/r3tmp/tmpPVm0De/pdisk_1.dat 2025-06-25T14:46:39.972432Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:46:40.008546Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:46:40.008613Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:46:40.016596Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:46:40.020504Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519898464990974482:2080] 1750862799649411 != 1750862799649414 2025-06-25T14:46:40.042720Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 12250, node 2 2025-06-25T14:46:40.188755Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:46:40.188772Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:46:40.188780Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:46:40.188873Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:29220 2025-06-25T14:46:40.716488Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:29220 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:46:41.025305Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:46:41.032687Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:46:41.042961Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:46:41.130166Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:46:41.316258Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:46:41.418166Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:46:44.026358Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898482170845294:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:44.026458Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:44.113907Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:44.174518Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:44.259648Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:44.310489Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:44.355541Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:44.403163Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:44.480994Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:44.585776Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898486465813245:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:44.585870Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:44.586263Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898486465813250:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:44.590664Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:46:44.612554Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519898486465813252:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:46:44.675130Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519898486465813303:3415] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:46:46.593373Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:46.713447Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::SortingsWithLookupJoin4+RemoveLimitOperator [GOOD] Test command err: Trying to start YDB, gRPC: 15055, MsgBus: 5640 2025-06-25T14:45:59.165468Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898293332307677:2233];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:45:59.165517Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000e6f/r3tmp/tmpUvJu1D/pdisk_1.dat 2025-06-25T14:45:59.711565Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:45:59.711674Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:45:59.725919Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:45:59.817563Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:45:59.828130Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519898293332307472:2080] 1750862759102124 != 1750862759102127 TServer::EnableGrpc on GrpcPort 15055, node 1 2025-06-25T14:45:59.991981Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:45:59.992010Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:45:59.992020Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:45:59.992119Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:46:00.188496Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:5640 TClient is connected to server localhost:5640 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:46:00.802036Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:46:00.835623Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:46:02.716224Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898306217210001:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:02.716375Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:02.720736Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898306217210013:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:02.725050Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:46:02.736546Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898306217210015:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:46:02.828894Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898306217210066:2335] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:46:03.087695Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:03.224774Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:03.272814Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:03.324683Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:03.389123Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:03.629720Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:03.680071Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:03.726037Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:03.808457Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:03.839153Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:03.872536Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:03.947579Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:04.006974Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:04.165958Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519898293332307677:2233];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:46:04.166031Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:46:04.597255Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperat ... :45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:40.204050Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038481;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:40.206284Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038437;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:40.206692Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038435;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:40.212618Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038483;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:40.221267Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038483;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:40.221729Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038477;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:40.228179Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038435;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:40.230338Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038477;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:40.230880Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038459;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:40.236798Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038513;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:40.241293Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038513;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:40.241851Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038461;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:40.246585Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038459;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:40.246983Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038577;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:40.251742Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038577;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:40.252299Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038549;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:40.254421Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038461;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:40.254851Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038519;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:40.263551Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038519;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:40.264302Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038549;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:40.264768Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038529;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:40.268902Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038567;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:40.273782Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038567;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:40.274312Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038545;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:40.277485Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038529;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:40.277889Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038447;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:40.282903Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038545;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:40.283464Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038465;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:40.286483Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038447;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:40.286972Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038501;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:40.294824Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038501;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:40.295372Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038539;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:40.296930Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038465;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:40.297406Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038497;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:40.303785Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038539;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:40.306695Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038497;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:40.307210Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038507;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:40.308691Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038449;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:40.313009Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038449;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:40.319988Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038507;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:40.475941Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jykrv90w4stsra60a84aebdz", SessionId: ydb://session/3?node_id=1&id=NWExODhiZDUtNjVmY2YxMDYtZmU4M2IxZjktNTI3NTJjZjI=, Slow query, duration: 33.342863s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:46:40.789322Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:46:40.789746Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:46:40.790171Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;self_id=[1:7519898430771287665:5465];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038629;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038170;receive=72075186224038331; 2025-06-25T14:46:40.790519Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716;
:8:3: Warning: ORDER BY without LIMIT in subquery will be ignored, code: 4504
:8:3: Warning: ORDER BY without LIMIT in subquery will be ignored, code: 4504 >> KqpJoinOrder::DatetimeConstantFold-ColumnStore >> KqpIndexLookupJoin::LeftSemiJoinWithLeftFilter-StreamLookup [GOOD] >> TxUsage::Write_Random_Sized_Messages_In_Wide_Transactions_Table [GOOD] >> TxUsage::WriteToTopic_Demo_46_Query [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpIndexLookupJoin::LeftSemiJoinWithLeftFilter-StreamLookup [GOOD] Test command err: Trying to start YDB, gRPC: 8768, MsgBus: 15652 2025-06-25T14:46:28.297638Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898414488039184:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:46:28.297685Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000e42/r3tmp/tmpZSNl6L/pdisk_1.dat 2025-06-25T14:46:28.948442Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:46:28.958222Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519898414488039162:2080] 1750862788276038 != 1750862788276041 2025-06-25T14:46:28.973462Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:46:28.973564Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:46:28.980133Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 8768, node 1 2025-06-25T14:46:29.186723Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:46:29.186744Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:46:29.186751Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:46:29.186840Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:46:29.406968Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:15652 TClient is connected to server localhost:15652 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:46:30.258190Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:46:30.276083Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:46:30.514726Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:46:30.759994Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:46:30.875784Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:46:33.067423Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898435962877289:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:33.067554Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:33.300452Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519898414488039184:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:46:33.300547Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:46:33.424966Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:33.473981Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:33.553624Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:33.604909Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:33.647067Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:33.701762Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:33.744426Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:33.840692Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898435962877950:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:33.840772Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:33.841157Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898435962877955:2436], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:33.845005Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:46:33.868385Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898435962877957:2437], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:46:33.924248Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898435962878008:3421] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:46:35.021060Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:35.082566Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part ... d to server localhost:15227 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:46:40.981086Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:46:40.997074Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:46:41.006809Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:46:41.094280Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:46:41.262946Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:46:41.352263Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:46:44.540431Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519898464927405584:2159];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:46:44.540489Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:46:44.664513Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898486402243565:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:44.664652Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:44.761090Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:44.825046Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:44.916078Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:44.985933Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:45.029464Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:45.120420Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:45.203131Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:45.325314Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898490697211522:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:45.325419Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:45.325743Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898490697211527:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:45.330447Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:46:45.350189Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519898490697211529:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:46:45.431251Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519898490697211580:3420] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:46:47.058511Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:47.102651Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:47.139981Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:47.182130Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:47.223296Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:47.262859Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710677:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) >> TxUsage::WriteToTopic_Demo_47_Table >> KqpJoinOrder::CanonizedJoinOrderLookupBug >> KqpJoinOrder::FiveWayJoinStatsOverride-ColumnStore [GOOD] >> KqpJoinOrder::TPCDS95+ColumnStore >> KqpIndexLookupJoin::Left-StreamLookup [GOOD] >> TPQTestSlow::TestOnDiskStoredSourceIds [GOOD] >> KqpJoin::IdxLookupLeftPredicate ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpIndexLookupJoin::Left-StreamLookup [GOOD] Test command err: Trying to start YDB, gRPC: 23076, MsgBus: 2412 2025-06-25T14:46:34.345562Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898443494117135:2235];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:46:34.345617Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000e3c/r3tmp/tmpA0RUNG/pdisk_1.dat 2025-06-25T14:46:34.974872Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:46:34.996549Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519898443494116926:2080] 1750862794281235 != 1750862794281238 2025-06-25T14:46:35.037889Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:46:35.037969Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:46:35.043651Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 23076, node 1 2025-06-25T14:46:35.331201Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:46:35.331479Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:46:35.331486Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:46:35.331494Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:46:35.331591Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2412 TClient is connected to server localhost:2412 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:46:36.452794Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:46:36.488629Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:46:36.499497Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:46:36.714606Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:46:37.013145Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:37.190443Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:46:39.346205Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519898443494117135:2235];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:46:39.346275Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:46:39.445922Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898464968955066:2370], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:39.446017Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:39.794531Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:39.849131Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:39.966344Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:40.012668Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:40.062431Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:40.116558Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:40.153732Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:40.261008Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898469263923023:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:40.261122Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:40.261467Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898469263923028:2436], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:40.265247Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:46:40.298021Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898469263923030:2437], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:46:40.368076Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898469263923083:3420] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:46:41.721685Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_wo ... PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:46:45.077688Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:46:45.088873Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:46:45.101933Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:46:45.203093Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:46:45.506881Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:46:45.624396Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:46:48.305429Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898501921274181:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:48.305499Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:48.434155Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:48.522156Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:48.576291Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:48.608902Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:48.736405Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:48.764815Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519898480446436246:2178];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:46:48.768569Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:46:48.818526Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:48.897625Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:49.011759Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898506216242150:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:49.011843Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:49.012271Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898506216242155:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:49.016181Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:46:49.033003Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-25T14:46:49.033233Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519898506216242157:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:46:49.111783Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519898506216242209:3419] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:46:50.266200Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:50.360246Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:50.426188Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:50.492715Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:50.566813Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:50.616129Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715677:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/slow/unittest >> TPQTestSlow::TestOnDiskStoredSourceIds [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:107:2057] recipient: [1:105:2137] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:107:2057] recipient: [1:105:2137] Leader for TabletID 72057594037927937 is [1:111:2141] sender: [1:112:2057] recipient: [1:105:2137] 2025-06-25T14:45:22.421369Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:45:22.421448Z node 1 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [1:153:2057] recipient: [1:151:2172] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [1:153:2057] recipient: [1:151:2172] Leader for TabletID 72057594037927938 is [1:157:2176] sender: [1:158:2057] recipient: [1:151:2172] Leader for TabletID 72057594037927937 is [1:111:2141] sender: [1:183:2057] recipient: [1:14:2061] 2025-06-25T14:45:22.451595Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:45:22.476630Z node 1 :PERSQUEUE INFO: pq_impl.cpp:1497: [PQ: 72057594037927937] Config applied version 1 actor [1:181:2194] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 SourceIdMaxCounts: 3 } PartitionIds: 0 PartitionIds: 1 TopicName: "rt3.dc1--asdfgs--topic" Version: 1 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 1 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 1 Important: false } 2025-06-25T14:45:22.477570Z node 1 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [1:189:2200] 2025-06-25T14:45:22.481276Z node 1 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [1:189:2200] 2025-06-25T14:45:22.484052Z node 1 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [1:190:2201] 2025-06-25T14:45:22.485798Z node 1 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 2 [1:190:2201] 2025-06-25T14:45:22.496168Z node 1 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|d5de5b1e-13086887-90aef902-af843ebb_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-25T14:45:22.502474Z node 1 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|6be89e1e-551e3a57-5df21a73-66e5757d_1 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-25T14:45:22.521266Z node 1 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|3bdbe766-cbd6f39-40e3e502-b16179a7_2 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-25T14:45:22.527263Z node 1 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|41d300ed-1746a551-3d7062b-2ac4215f_3 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-25T14:45:22.532780Z node 1 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|ddbadbe7-99c9c57d-23b60485-caa64299_4 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-25T14:45:22.539691Z node 1 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|7f83da01-90f30fb0-5a5de571-8db494a_5 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:107:2057] recipient: [2:105:2137] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:107:2057] recipient: [2:105:2137] Leader for TabletID 72057594037927937 is [2:111:2141] sender: [2:112:2057] recipient: [2:105:2137] 2025-06-25T14:45:22.945974Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:45:22.946059Z node 2 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [2:153:2057] recipient: [2:151:2172] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [2:153:2057] recipient: [2:151:2172] Leader for TabletID 72057594037927938 is [2:157:2176] sender: [2:158:2057] recipient: [2:151:2172] Leader for TabletID 72057594037927937 is [2:111:2141] sender: [2:183:2057] recipient: [2:14:2061] !Reboot 72057594037927937 (actor [2:111:2141]) on event NKikimr::TEvPersQueue::TEvUpdateConfigBuilder ! Leader for TabletID 72057594037927937 is [2:111:2141] sender: [2:185:2057] recipient: [2:103:2136] Leader for TabletID 72057594037927937 is [2:111:2141] sender: [2:188:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [2:111:2141] sender: [2:189:2057] recipient: [2:187:2197] Leader for TabletID 72057594037927937 is [2:190:2198] sender: [2:191:2057] recipient: [2:187:2197] 2025-06-25T14:45:22.980633Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:45:22.980702Z node 2 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info !Reboot 72057594037927937 (actor [2:111:2141]) rebooted! !Reboot 72057594037927937 (actor [2:111:2141]) tablet resolver refreshed! new actor is[2:190:2198] Leader for TabletID 72057594037927937 is [2:190:2198] sender: [2:270:2057] recipient: [2:14:2061] 2025-06-25T14:45:24.576230Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:45:24.577231Z node 2 :PERSQUEUE INFO: pq_impl.cpp:1497: [PQ: 72057594037927937] Config applied version 2 actor [2:181:2194] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 SourceIdMaxCounts: 3 } PartitionIds: 0 PartitionIds: 1 TopicName: "rt3.dc1--asdfgs--topic" Version: 2 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 2 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 2 Important: false } 2025-06-25T14:45:24.578131Z node 2 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [2:276:2260] 2025-06-25T14:45:24.580434Z node 2 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 3 [2:276:2260] 2025-06-25T14:45:24.582210Z node 2 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [2:277:2261] 2025-06-25T14:45:24.583964Z node 2 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 3 [2:277:2261] 2025-06-25T14:45:24.592968Z node 2 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|bf25642f-dac8772d-5930ad68-defc0344_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-25T14:45:24.600949Z node 2 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|eee9d2a3-6aec921c-a7c8e0e2-b1a55686_1 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-25T14:45:24.626801Z node 2 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|c6a0b14e-baddc18b-bd47b49-9673ad90_2 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-25T14:45:24.636465Z node 2 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|85e44e4c-155bc0e7-39eea7a5-94d231e1_3 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-25T14:45:24.645023Z node 2 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|ae6bf79f-66ce67bb-2d6d54fb-9b3079c3_4 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-25T14:45:24.653642Z node 2 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|36dd4c3c-1d8f1602-f91c9b7d-db0f9ae4_5 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:107:2057] recipient: [3:105:2137] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:107:2057] recipient: [3:105:2137] Leader for TabletID 72057594037927937 is [3:111:2141] sender: [3:112:2057] recipient: [3:105:2137] 2025-06-25T14:45:24.968584Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:45:24.968662Z node 3 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [3:153:2057] recipient: [3:151:2172] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [3:153:2057] recipient: [3:151:2172] Leader for TabletID 72057594037927938 is [3:157:2176] sender: [3:158:2057] recipient: [3:151:2172] Leader for TabletID 72057594037927937 is [3:111:2141] sender: [3:183:2057] recipient: [3:14:2061] !Reboot 72057594037927937 (actor [3:111:2141]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [3:111:2141] sender: [3:185:2057] recipient: [3:103:2136] Leader for TabletID 72057594037927937 is [3:111:2141] sender: [3:188:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [3:111:2141] sender: [3:189:2057] recipient: [3:187:2197] Leader for TabletID 72057594037927937 is [3:190:2198] sender: [3:191:2057] recipient: [3:187:2197] 2025-06-25T14:45:25.009478Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:45:25.009565Z node 3 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info !Reboot 72057594037927937 (actor [3:111:2141]) rebooted! !Reboot 72057594037927937 (actor [3:111:2141]) tablet resolver refreshed! new actor is[3:190:2198] Leader for TabletID 72057594037927937 is [3:190:2198] sender: [3:270:2057] recipient: [3:14:2061] 2025-06-25T14:45:26.729779Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:45:26.730686Z node 3 :PERSQUEUE INFO: pq_impl.cpp:1497: [PQ: 72057594037927937] Config applied version 3 actor [3:181:2194] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 SourceIdMaxCounts: 3 } PartitionIds: 0 PartitionIds: 1 TopicName: "rt3.dc1--asdfgs--topic" Version: 3 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 3 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 3 Important: false } 2025-06-25T14:45:26.731496Z node 3 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 7205 ... 7927937, Partition: 0, State: StateInit] bootstrapping 0 [47:189:2200] 2025-06-25T14:46:49.897353Z node 47 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [47:189:2200] 2025-06-25T14:46:49.899449Z node 47 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [47:190:2201] 2025-06-25T14:46:49.905771Z node 47 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 2 [47:190:2201] 2025-06-25T14:46:49.918424Z node 47 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|fa49bd15-5f7d65c2-46c5f44-3735cd54_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-25T14:46:49.930443Z node 47 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|4cf179f8-98e1c886-6818dc7c-84a4baae_1 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-25T14:46:49.972148Z node 47 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|e96f615d-599f41a-e4093462-d06ff68c_2 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-25T14:46:49.991302Z node 47 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|31392845-ec7f7500-baf67199-fd575f43_3 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-25T14:46:50.011456Z node 47 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|9da5deb7-856692d1-a2e42450-39904044_4 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-25T14:46:50.031473Z node 47 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|425a7e6-db0b5f94-67bcb27a-b8888d0_5 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default !Reboot 72057594037927937 (actor [47:111:2141]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [47:111:2141] sender: [47:285:2057] recipient: [47:103:2136] Leader for TabletID 72057594037927937 is [47:111:2141] sender: [47:288:2057] recipient: [47:14:2061] Leader for TabletID 72057594037927937 is [47:111:2141] sender: [47:289:2057] recipient: [47:287:2280] Leader for TabletID 72057594037927937 is [47:290:2281] sender: [47:291:2057] recipient: [47:287:2280] 2025-06-25T14:46:50.123149Z node 47 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:46:50.123221Z node 47 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-25T14:46:50.124222Z node 47 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [47:339:2322] 2025-06-25T14:46:50.127133Z node 47 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [47:340:2323] 2025-06-25T14:46:50.136999Z node 47 :PERSQUEUE INFO: partition_init.cpp:895: [rt3.dc1--asdfgs--topic:0:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-25T14:46:50.137076Z node 47 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 3 [47:339:2322] 2025-06-25T14:46:50.139478Z node 47 :PERSQUEUE INFO: partition_init.cpp:895: [rt3.dc1--asdfgs--topic:1:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-25T14:46:50.139557Z node 47 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 3 [47:340:2323] !Reboot 72057594037927937 (actor [47:111:2141]) rebooted! !Reboot 72057594037927937 (actor [47:111:2141]) tablet resolver refreshed! new actor is[47:290:2281] Leader for TabletID 72057594037927937 is [47:290:2281] sender: [47:390:2057] recipient: [47:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [48:107:2057] recipient: [48:105:2137] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [48:107:2057] recipient: [48:105:2137] Leader for TabletID 72057594037927937 is [48:111:2141] sender: [48:112:2057] recipient: [48:105:2137] 2025-06-25T14:46:52.292207Z node 48 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:46:52.292280Z node 48 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [48:153:2057] recipient: [48:151:2172] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [48:153:2057] recipient: [48:151:2172] Leader for TabletID 72057594037927938 is [48:157:2176] sender: [48:158:2057] recipient: [48:151:2172] Leader for TabletID 72057594037927937 is [48:111:2141] sender: [48:183:2057] recipient: [48:14:2061] 2025-06-25T14:46:52.349536Z node 48 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:46:52.350580Z node 48 :PERSQUEUE INFO: pq_impl.cpp:1497: [PQ: 72057594037927937] Config applied version 48 actor [48:181:2194] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 SourceIdMaxCounts: 3 } PartitionIds: 0 PartitionIds: 1 TopicName: "rt3.dc1--asdfgs--topic" Version: 48 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 48 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 48 Important: false } 2025-06-25T14:46:52.351423Z node 48 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [48:189:2200] 2025-06-25T14:46:52.354082Z node 48 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [48:189:2200] 2025-06-25T14:46:52.356044Z node 48 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [48:190:2201] 2025-06-25T14:46:52.364053Z node 48 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 2 [48:190:2201] 2025-06-25T14:46:52.373141Z node 48 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|1832fb2b-5bc0d3e2-e293ce7d-1da83fb0_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-25T14:46:52.386474Z node 48 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|ec4c4a38-ea821d01-291a9986-f95748ac_1 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-25T14:46:52.427516Z node 48 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|e3621446-cdd85cf0-1b0783dc-bfe9a5a_2 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-25T14:46:52.444395Z node 48 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|584a58e4-a632602d-7f1bb283-2f3435d6_3 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-25T14:46:52.457768Z node 48 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|ff6c4468-12e36001-2c72d8bf-f8779ad0_4 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-25T14:46:52.467757Z node 48 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|d8dca99d-69f6be17-9384aafa-1094b80c_5 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default Leader for TabletID 72057594037927937 is [0:0:0] sender: [49:107:2057] recipient: [49:105:2137] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [49:107:2057] recipient: [49:105:2137] Leader for TabletID 72057594037927937 is [49:111:2141] sender: [49:112:2057] recipient: [49:105:2137] 2025-06-25T14:46:53.319751Z node 49 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:46:53.319839Z node 49 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [49:153:2057] recipient: [49:151:2172] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [49:153:2057] recipient: [49:151:2172] Leader for TabletID 72057594037927938 is [49:157:2176] sender: [49:158:2057] recipient: [49:151:2172] Leader for TabletID 72057594037927937 is [49:111:2141] sender: [49:183:2057] recipient: [49:14:2061] 2025-06-25T14:46:53.365308Z node 49 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:46:53.366185Z node 49 :PERSQUEUE INFO: pq_impl.cpp:1497: [PQ: 72057594037927937] Config applied version 49 actor [49:181:2194] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 SourceIdMaxCounts: 3 } PartitionIds: 0 PartitionIds: 1 TopicName: "rt3.dc1--asdfgs--topic" Version: 49 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 49 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 49 Important: false } 2025-06-25T14:46:53.366961Z node 49 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [49:189:2200] 2025-06-25T14:46:53.373781Z node 49 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [49:189:2200] 2025-06-25T14:46:53.375805Z node 49 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [49:190:2201] 2025-06-25T14:46:53.382220Z node 49 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 2 [49:190:2201] 2025-06-25T14:46:53.399695Z node 49 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|88d719b9-17a02986-916783fa-1633d183_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-25T14:46:53.412744Z node 49 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|801ce58d-87772ee6-ea1fac7c-f6b28c8_1 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-25T14:46:53.473887Z node 49 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|75985792-3b0cee29-9063f186-567ba628_2 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-25T14:46:53.498895Z node 49 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|bf42cea1-3a0bd9db-c72fb413-46fc2884_3 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-25T14:46:53.514707Z node 49 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|68ee02d1-2e09d132-d8869285-88ce7ad1_4 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-25T14:46:53.533632Z node 49 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|43c225c1-d6eb952e-3ebacf2c-f2262010_5 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default >> SlowTopicAutopartitioning::CDC_Write [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::FiveWayJoinStatsOverride-ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 16912, MsgBus: 2369 2025-06-25T14:45:59.645435Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898293474433885:2233];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:45:59.650030Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000e6e/r3tmp/tmpcQOkSg/pdisk_1.dat 2025-06-25T14:46:00.198744Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:46:00.198849Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:46:00.207372Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:46:00.283709Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:46:00.284457Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519898293474433664:2080] 1750862759563968 != 1750862759563971 TServer::EnableGrpc on GrpcPort 16912, node 1 2025-06-25T14:46:00.448844Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:46:00.448863Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:46:00.448889Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:46:00.448992Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:46:00.644508Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:2369 TClient is connected to server localhost:2369 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:46:01.295666Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:46:01.312849Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:46:03.673564Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898310654303494:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:03.673675Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:03.675584Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898310654303506:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:03.679901Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:46:03.701210Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-25T14:46:03.701452Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898310654303508:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:46:03.801116Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898310654303561:2337] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:46:04.153452Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:04.269668Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:04.309964Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:04.381976Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:04.435837Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:04.621765Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:04.637827Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519898293474433885:2233];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:46:04.637943Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:46:04.698860Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:04.750204Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:04.783804Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:04.823284Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:04.859944Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:04.901253Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:04.981394Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation ... 02146Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038592;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:43.507376Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038578;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:46:43.511333Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038592;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:46:43.512664Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038555;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:43.516797Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038581;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:43.522077Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038555;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:46:43.522543Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038523;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:43.526271Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038581;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:46:43.526821Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038561;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:43.531811Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038523;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:46:43.536816Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038480;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:43.540738Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038561;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:46:43.541259Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038547;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:43.546275Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038480;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:46:43.546770Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038601;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:43.550351Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038547;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:46:43.551015Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038551;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:43.557349Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038551;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:46:43.557993Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038535;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:43.561551Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038601;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:46:43.562030Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038596;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:43.570983Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038535;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:46:43.571483Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038572;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:43.577355Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038596;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:46:43.577854Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038554;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:43.578988Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038572;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:46:43.579544Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038612;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:43.593661Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038612;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:46:43.594161Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038552;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:43.595206Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038554;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:46:43.595672Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038550;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:43.605444Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038550;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:46:43.605963Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038580;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:43.609707Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038552;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:46:43.610434Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038582;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:43.615214Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038580;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:46:43.621092Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038582;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:46:43.668383Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038584;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:43.685784Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038584;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:46:43.705100Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038471;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:43.711562Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038471;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:46:43.828583Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jykrv9zm88x1y22drb8gsaks", SessionId: ydb://session/3?node_id=1&id=ZjExYzMwN2YtODVkZjkxOS03YWI3OGQxNS03ZDYxMmJhOQ==, Slow query, duration: 35.711159s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:46:44.157408Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; 2025-06-25T14:46:44.157887Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; 2025-06-25T14:46:44.158521Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;self_id=[1:7519898396553665743:4252];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038331;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038170;receive=72075186224038629; 2025-06-25T14:46:44.158876Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; >> KqpJoin::RightTableValuePredicate >> KqpJoin::IdxLookupPartialWithTempTable [GOOD] >> KqpJoinOrder::TPCDS92+ColumnStore >> KqpJoinOrder::FiveWayJoinWithComplexPreds+ColumnStore >> KqpJoinOrder::FourWayJoinLeftFirst+ColumnStore [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoin::IdxLookupPartialWithTempTable [GOOD] Test command err: Trying to start YDB, gRPC: 25784, MsgBus: 12426 2025-06-25T14:46:40.057709Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898467601407936:2201];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:46:40.057800Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000e38/r3tmp/tmpWjBLC9/pdisk_1.dat 2025-06-25T14:46:40.576459Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519898467601407774:2080] 1750862800018424 != 1750862800018427 2025-06-25T14:46:40.608437Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:46:40.626069Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:46:40.626171Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:46:40.644217Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 25784, node 1 2025-06-25T14:46:40.908817Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:46:40.908838Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:46:40.908845Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:46:40.908943Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:46:41.017619Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:12426 TClient is connected to server localhost:12426 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:46:41.791345Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:46:41.810098Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:46:41.820810Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:46:42.062014Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:46:42.274437Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:46:42.390922Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:46:44.841234Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898484781278605:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:44.841325Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:45.043652Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519898467601407936:2201];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:46:45.043712Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:46:45.420686Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:45.464375Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:45.507089Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:45.543882Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:45.618123Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:45.663201Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:45.705796Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:45.791748Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898489076246561:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:45.791825Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:45.792065Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898489076246566:2436], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:45.796082Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:46:45.818513Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710669, at schemeshard: 72057594046644480 2025-06-25T14:46:45.818774Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898489076246568:2437], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:46:45.888462Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898489076246621:3420] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:46:47.062993Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is u ... -06-25T14:46:49.470092Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:46:49.470168Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:46:49.479022Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 16855, node 2 2025-06-25T14:46:49.751306Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:46:49.751331Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:46:49.751339Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:46:49.751477Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:30908 2025-06-25T14:46:50.307512Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:30908 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:46:50.661311Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:46:50.668186Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:46:50.678824Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:46:50.759963Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:46:50.925655Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:46:51.042484Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:53.201291Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898524090306196:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:53.201354Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:53.258496Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:53.289869Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:53.326867Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:53.370589Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:53.408169Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:53.463628Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:53.514840Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:53.617849Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898524090306850:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:53.617938Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:53.619356Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898524090306855:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:53.623207Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:46:53.636927Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519898524090306857:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:46:53.715645Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519898524090306908:3410] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:46:54.271208Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519898506910435445:2091];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:46:54.271309Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:46:55.129481Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:55.179408Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:55.240979Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) >> KqpJoinOrder::UdfConstantFold+ColumnStore [GOOD] >> KqpIndexLookupJoin::JoinWithComplexCondition+StreamLookupJoin [GOOD] >> KqpIndexLookupJoin::JoinWithComplexCondition-StreamLookupJoin >> KqpJoinOrder::SortingsComplexOrderBy-RemoveLimitOperator [GOOD] >> KqpIndexLookupJoin::CheckCastUint64ToInt64-StreamLookupJoin-NotNull >> KqpBatchUpdate::ManyPartitions_1 [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::FourWayJoinLeftFirst+ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 25992, MsgBus: 7574 2025-06-25T14:45:25.311290Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898146963620270:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:45:25.311352Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000ea1/r3tmp/tmpBHCrWM/pdisk_1.dat 2025-06-25T14:45:25.822098Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:45:25.838605Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:45:25.838702Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:45:25.843146Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 25992, node 1 2025-06-25T14:45:25.916221Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:45:25.916254Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:45:25.916264Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:45:25.916384Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:7574 2025-06-25T14:45:26.323108Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:7574 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:45:26.614192Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:45:26.641332Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:45:28.320988Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898159848522770:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:28.321109Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:28.321392Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898159848522781:2295], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:28.324812Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:45:28.337020Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898159848522786:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:45:28.436805Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898159848522837:2336] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:45:28.708014Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T14:45:28.921521Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519898159848523053:2309];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:45:28.921768Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519898159848523053:2309];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:45:28.922047Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519898159848523053:2309];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:45:28.922174Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519898159848523053:2309];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:45:28.922299Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519898159848523053:2309];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:45:28.922425Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519898159848523053:2309];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:45:28.922529Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519898159848523053:2309];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:45:28.922646Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519898159848523053:2309];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:45:28.922824Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519898159848523053:2309];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:45:28.922965Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519898159848523053:2309];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:45:28.923085Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519898159848523053:2309];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:45:28.924691Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519898159848523128:2321];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:45:28.924752Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519898159848523128:2321];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:45:28.924967Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519898159848523128:2321];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:45:28.925085Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519898159848523128:2321];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:45:28.925211Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519898159848523128:2321];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:45:28.925331Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519898159848523128:2321];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:45:28.925473Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519898159848523128:2321];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:45:28.925604Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519898159848523128:2321];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:45:28.926032Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519898159848523128:2321];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:45:28.926194Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519898159848523128:2321];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abs ... line=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:42.628428Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039263;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:42.629081Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039253;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:42.634852Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039253;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:42.635429Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039229;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:42.640907Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039229;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:42.641523Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039361;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:42.647060Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039361;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:42.647719Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039291;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:42.652413Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039321;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:42.652954Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039227;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:42.653082Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039291;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:42.653709Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039313;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:42.661836Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039313;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:42.662776Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039227;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:42.663224Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039363;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:42.663960Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039297;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:42.668819Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039297;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:42.669372Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039265;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:42.674179Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039363;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:42.674594Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039257;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:42.679829Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039257;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:42.680506Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039289;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:42.686068Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039265;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:42.686574Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039233;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:42.690692Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039289;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:42.695293Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039233;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:42.695957Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039319;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:42.695989Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039215;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:42.701283Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039319;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:42.704325Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039215;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:42.704969Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039351;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:42.713805Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039351;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:42.843013Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039196;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:42.843475Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039198;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:42.853458Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039196;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:42.854000Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039186;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:42.861553Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039198;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:42.862056Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039294;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:42.867536Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039186;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:42.875325Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039294;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:42.914419Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039187;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:42.923911Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039187;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:43.066026Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jykrv7vjdaa7f7zq7kjnqp6m", SessionId: ydb://session/3?node_id=1&id=YThlNmRlY2ItNWEwOTM0NGItNzU1M2Y2YjUtOWJiOWE0OTI=, Slow query, duration: 37.127312s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:46:43.571062Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:46:43.571545Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:46:43.573242Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::UdfConstantFold+ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 14887, MsgBus: 8636 2025-06-25T14:45:25.289902Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898145143461548:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:45:25.289968Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000ea2/r3tmp/tmplBJDf9/pdisk_1.dat 2025-06-25T14:45:25.756411Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519898145143461524:2080] 1750862725286932 != 1750862725286935 2025-06-25T14:45:25.757789Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:45:25.807342Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:45:25.807442Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 14887, node 1 2025-06-25T14:45:25.810425Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:45:25.904142Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:45:25.904181Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:45:25.904189Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:45:25.904319Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8636 2025-06-25T14:45:26.304650Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:8636 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:45:26.667737Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:45:26.696870Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:45:28.395974Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898158028364065:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:28.396052Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898158028364057:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:28.396168Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:28.399761Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:45:28.408496Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898158028364071:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:45:28.476193Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898158028364122:2336] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:45:28.745973Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T14:45:28.930013Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519898158028364360:2314];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:45:28.930017Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519898158028364369:2315];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:45:28.930199Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519898158028364369:2315];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:45:28.930425Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519898158028364369:2315];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:45:28.930528Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519898158028364369:2315];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:45:28.930642Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519898158028364369:2315];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:45:28.930736Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519898158028364360:2314];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:45:28.930750Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519898158028364369:2315];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:45:28.930831Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519898158028364369:2315];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:45:28.930891Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519898158028364360:2314];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:45:28.930919Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519898158028364369:2315];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:45:28.930982Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519898158028364360:2314];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:45:28.931001Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519898158028364369:2315];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:45:28.931080Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519898158028364360:2314];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:45:28.931117Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519898158028364369:2315];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:45:28.931190Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519898158028364360:2314];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:45:28.931215Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519898158028364369:2315];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:45:28.931313Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519898158028364360:2314];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:45:28.931412Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519898158028364360:2314];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:45:28.931495Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519898158028364360:2314];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; ... 64262Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039295;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:46.067426Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039319;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:46.068052Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039383;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:46.068735Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039295;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:46.069265Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039267;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:46.072927Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039383;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:46.073662Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039349;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:46.074081Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039267;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:46.074616Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039263;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:46.078656Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039349;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:46.079120Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039263;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:46.079739Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039335;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:46.080179Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039317;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:46.084742Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039335;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:46.085188Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039317;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:46.085350Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039347;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:46.085688Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039265;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:46.089987Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039265;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:46.090239Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039347;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:46.090602Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039333;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:46.090790Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039301;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:46.095859Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039301;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:46.100978Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039339;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:46.103403Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039333;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:46.103901Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039325;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:46.105839Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039339;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:46.106312Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039365;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:46.108529Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039325;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:46.109068Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039331;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:46.111069Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039365;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:46.111657Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039327;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:46.113903Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039331;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:46.114624Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039329;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:46.116895Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039327;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:46.117632Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039323;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:46.121298Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039329;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:46.122146Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039323;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:46.122888Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039297;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:46.123364Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039299;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:46.127785Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039299;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:46.128841Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039297;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:46.259751Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jykrv9abbxwq74k02t7abvr8", SessionId: ydb://session/3?node_id=1&id=NDE5MTRhNWUtNDc3MjBkMDQtYTZiNTdkMTMtZDE2OTdmNWQ=, Slow query, duration: 38.823326s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:46:46.618717Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:46:46.619248Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:46:46.619958Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;self_id=[1:7519898334122059254:7020];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038933;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224039392;receive=72075186224039094; 2025-06-25T14:46:46.620396Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; >> KqpJoinOrder::TPCDS34-ColumnStore [GOOD] >> KqpJoinOrder::CanonizedJoinOrderTPCH1 [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/batch_operations/unittest >> KqpBatchUpdate::ManyPartitions_1 [GOOD] Test command err: Trying to start YDB, gRPC: 31508, MsgBus: 63320 2025-06-25T14:42:40.733169Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897436382135711:2223];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:40.733218Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000c3e/r3tmp/tmpm7NElY/pdisk_1.dat 2025-06-25T14:42:41.566536Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:42:41.566638Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:42:41.569135Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:42:41.576644Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:42:41.718613Z node 1 :BS_CONTROLLER ERROR: {BSC07@impl.h:2206} ProcessControllerEvent event processing took too much time Type# 268637706 Duration# 0.100106s 2025-06-25T14:42:41.718671Z node 1 :BS_CONTROLLER ERROR: {BSC00@bsc.cpp:705} StateWork event processing took too much time Type# 2146435078 Duration# 0.100172s 2025-06-25T14:42:41.732443Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TServer::EnableGrpc on GrpcPort 31508, node 1 2025-06-25T14:42:42.725053Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:42:42.725070Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:42:42.725076Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:42:42.725262Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:63320 TClient is connected to server localhost:63320 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:42:44.994629Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:42:45.123405Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:45.414683Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:45.563412Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:45.645582Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:45.754553Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519897436382135711:2223];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:45.758437Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:42:45.830615Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897457856973654:2371], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:45.830713Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:47.941047Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:47.964401Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.027155Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.053516Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.080080Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.107975Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.139099Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.193206Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897470741876212:2440], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:48.193284Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:48.193356Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897470741876217:2443], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:48.197134Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:42:48.207377Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519897470741876219:2444], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:42:48.336547Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519897470741876270:3437] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:42:49.163172Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation ... =undelivered;self_id=[20:7519898463771959857:2082];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:46:39.812474Z node 20 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000c3e/r3tmp/tmp3RYVMV/pdisk_1.dat 2025-06-25T14:46:40.376878Z node 20 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(20, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:46:40.377024Z node 20 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(20, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:46:40.383214Z node 20 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(20, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:46:40.388212Z node 20 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:46:40.390900Z node 20 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [20:7519898463771959798:2080] 1750862799743880 != 1750862799743883 TServer::EnableGrpc on GrpcPort 8225, node 20 2025-06-25T14:46:40.681215Z node 20 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:46:40.681251Z node 20 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:46:40.681268Z node 20 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:46:40.681475Z node 20 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:46:40.824757Z node 20 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:15026 TClient is connected to server localhost:15026 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:46:42.822124Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:46:42.845260Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:46:43.038972Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:46:43.609865Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:46:43.864230Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:46:44.778044Z node 20 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[20:7519898463771959857:2082];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:46:44.778217Z node 20 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:46:49.617771Z node 20 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [20:7519898506721634462:2378], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:49.618403Z node 20 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:49.703903Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:49.826300Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:49.938669Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:50.049179Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:50.126608Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:50.219787Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:50.330195Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:50.485581Z node 20 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [20:7519898511016602433:2443], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:50.485761Z node 20 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:50.486137Z node 20 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [20:7519898511016602438:2446], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:50.496086Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:46:50.528862Z node 20 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [20:7519898511016602440:2447], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:46:50.588634Z node 20 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [20:7519898511016602493:3457] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:46:53.543076Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:46:55.330668Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7382: Cannot get console configs 2025-06-25T14:46:55.330697Z node 20 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::SortingsComplexOrderBy-RemoveLimitOperator [GOOD] Test command err: Trying to start YDB, gRPC: 3892, MsgBus: 10877 2025-06-25T14:46:11.867068Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898343533994923:2081];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:46:11.871394Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000e51/r3tmp/tmpLKa2Xq/pdisk_1.dat 2025-06-25T14:46:12.512988Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:46:12.513182Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:46:12.513265Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:46:12.519572Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 3892, node 1 2025-06-25T14:46:12.701814Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:46:12.701837Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:46:12.701844Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:46:12.701940Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:46:12.882678Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:10877 TClient is connected to server localhost:10877 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:46:13.458273Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:46:15.527740Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898360713864688:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:15.527830Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:15.527882Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898360713864700:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:15.531248Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:46:15.552535Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898360713864702:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:46:15.628988Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898360713864753:2336] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:46:16.017373Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:16.133134Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:16.167731Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:16.198654Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:16.236449Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:16.438527Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:16.488632Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:16.535027Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:16.592181Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:16.627680Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:16.666120Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:16.706807Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:16.742837Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:16.869384Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519898343533994923:2081];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:46:16.869438Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:46:17.405851Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:17.464223Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself ... 05165Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038635;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:53.706805Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038622;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:53.707208Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038648;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:53.711404Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038648;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:53.716041Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038635;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:53.716657Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038641;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:53.721283Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038641;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:53.721838Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038644;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:53.724685Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038642;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:53.726657Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038644;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:53.727085Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038646;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:53.731383Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038646;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:53.732287Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038637;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:53.735843Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038642;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:53.736305Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038656;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:53.737170Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038637;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:53.737867Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038638;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:53.746524Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038656;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:53.747033Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038659;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:53.750657Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038638;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:53.751162Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038650;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:53.751274Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038659;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:53.751818Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038634;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:53.762014Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038650;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:53.762354Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038634;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:53.762485Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038658;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:53.762824Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038660;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:53.767185Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038660;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:53.767812Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038654;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:53.775806Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038658;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:53.778701Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038620;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:53.783393Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038620;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:53.785739Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038654;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:53.786197Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038636;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:53.788880Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038632;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:53.794926Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038636;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:53.796157Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038652;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:53.797395Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038632;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:53.797969Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038640;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:53.802887Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038640;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:53.807554Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038652;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:53.953233Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jykrvnav5dvckctxwtjngxbc", SessionId: ydb://session/3?node_id=1&id=MzQzMDJhYWMtY2RhOTYyYTgtYTE0ZDUzNTQtYTNjYTNlYWQ=, Slow query, duration: 34.213830s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:46:54.285983Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:46:54.286470Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:46:54.287156Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;self_id=[1:7519898390778642343:2842];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038170;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038331;receive=72075186224038629; 2025-06-25T14:46:54.287556Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; >> KqpJoin::IdxLookupLeftPredicate [GOOD] >> KqpJoin::HashJoinWithAsTable >> KqpJoin::IndexLoookupJoinStructJoin+StreamLookupJoin >> KqpJoinOrder::FourWayJoinLeftFirst-ColumnStore >> KqpJoin::RightSemiJoin_ComplexKey >> KqpFlipJoin::Inner_1 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::TPCDS34-ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 15503, MsgBus: 8981 2025-06-25T14:45:59.735547Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898293132753000:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:45:59.735578Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000e6c/r3tmp/tmpgqhHzo/pdisk_1.dat 2025-06-25T14:46:00.335359Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:46:00.338314Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519898293132752981:2080] 1750862759733965 != 1750862759733968 2025-06-25T14:46:00.344983Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:46:00.345072Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:46:00.356131Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 15503, node 1 2025-06-25T14:46:00.496760Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:46:00.496779Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:46:00.496786Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:46:00.496888Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8981 2025-06-25T14:46:00.796713Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:8981 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:46:01.315797Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:46:01.345134Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:46:03.429648Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898310312622812:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:03.429771Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:03.430036Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898310312622824:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:03.434168Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:46:03.456740Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-25T14:46:03.462168Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898310312622826:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:46:03.564250Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898310312622877:2338] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:46:03.984419Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:04.139843Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:04.252475Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:04.287717Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:04.318125Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:04.509308Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:04.558809Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:04.604041Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:04.657518Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:04.729262Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:04.736491Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519898293132753000:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:46:04.736584Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:46:04.790417Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:04.870442Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:04.919249Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation ... 71253Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:41.076265Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038653;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:41.077043Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038519;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:41.080344Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:41.080991Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038641;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:41.082449Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038519;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:41.083027Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038623;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:41.088703Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038623;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:41.089676Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038631;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:41.094857Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038631;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:41.095310Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038641;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:41.095550Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038627;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:41.095823Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038643;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:41.105272Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038643;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:41.105935Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038657;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:41.110665Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038627;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:41.111278Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038645;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:41.120807Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038657;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:41.121393Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038499;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:41.125160Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038645;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:41.125708Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038637;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:41.130884Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038499;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:41.131452Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038635;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:41.135471Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038637;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:41.136059Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038655;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:41.145301Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038635;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:41.147027Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038659;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:41.152857Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038655;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:41.153466Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038649;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:41.159207Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038649;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:41.159872Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038651;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:41.160935Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038659;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:41.161489Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038647;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:41.169557Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038651;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:41.172459Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038647;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:41.173037Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038661;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:41.182942Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038661;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:41.234900Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038595;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:41.240010Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038595;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:41.250745Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038616;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:46:41.255282Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038616;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:41.345071Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jykrv9rsfwt76yfaxfx2062a", SessionId: ydb://session/3?node_id=1&id=N2ViODE3YzktZGY5NmQ3ZjAtZjYzNWYzYzgtZmY0MjU5OWE=, Slow query, duration: 33.447048s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:46:41.848770Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:46:41.849261Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:46:41.849958Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;self_id=[1:7519898434866700506:5480];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038629;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038331;receive=72075186224038170; 2025-06-25T14:46:41.850347Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; >> KqpJoin::RightTableValuePredicate [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::CanonizedJoinOrderTPCH1 [GOOD] Test command err: Trying to start YDB, gRPC: 4742, MsgBus: 11675 2025-06-25T14:45:25.319150Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898146242629177:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:45:25.319290Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000e99/r3tmp/tmpyCWjNC/pdisk_1.dat 2025-06-25T14:45:25.747726Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:45:25.768291Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:45:25.768398Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 4742, node 1 2025-06-25T14:45:25.777255Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:45:25.892638Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:45:25.892678Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:45:25.892689Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:45:25.892792Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11675 2025-06-25T14:45:26.324510Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:11675 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:45:26.675478Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:45:26.709023Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:45:28.347762Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898159127531686:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:28.347877Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:28.347939Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898159127531694:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:28.356223Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:45:28.373850Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898159127531700:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:45:28.467723Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898159127531751:2336] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:45:28.734119Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T14:45:28.907060Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519898159127531989:2309];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:45:28.907262Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519898159127531989:2309];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:45:28.907519Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519898159127531989:2309];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:45:28.907628Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519898159127531989:2309];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:45:28.907725Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519898159127531989:2309];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:45:28.907820Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519898159127531989:2309];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:45:28.907924Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519898159127531989:2309];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:45:28.908040Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519898159127531989:2309];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:45:28.908130Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519898159127531989:2309];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:45:28.908224Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519898159127531989:2309];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:45:28.908300Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519898159127531989:2309];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:45:28.919706Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519898159127532000:2313];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:45:28.919842Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519898159127532000:2313];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:45:28.920110Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519898159127532000:2313];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:45:28.920263Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519898159127532000:2313];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:45:28.920422Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519898159127532000:2313];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:45:28.920567Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519898159127532000:2313];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:45:28.920750Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519898159127532000:2313];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:45:28.920871Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519898159127532000:2313];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:45:28.921008Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519898159127532000:2313];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:45:28.921187Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519898159127532000:2313];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=ab ... .282702Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039273;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:46.284569Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039233;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:46.285039Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039326;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:46.294214Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039326;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:46.294899Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039285;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:46.297659Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039273;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:46.298962Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039219;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:46.304199Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039219;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:46.304447Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039285;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:46.304784Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039299;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:46.305001Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039279;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:46.310679Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039279;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:46.310800Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039299;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:46.311256Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039370;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:46.311256Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039269;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:46.317284Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039370;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:46.317760Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039269;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:46.317834Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039255;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:46.319966Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039271;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:46.323147Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039255;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:46.323693Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039283;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:46.327734Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039271;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:46.328843Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039235;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:46.331288Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039283;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:46.332057Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039267;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:46.337846Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039235;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:46.338548Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039267;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:46.338675Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039292;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:46.339305Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039287;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:46.348740Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039287;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:46.349412Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039282;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:46.353295Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039292;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:46.355329Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039236;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:46.360274Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039282;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:46.367146Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039236;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:46.439350Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039239;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:46.445693Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039239;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:46.626807Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jykrv7rx1hsp3cwtc9z6q15k", SessionId: ydb://session/3?node_id=1&id=NGNkNDk5My1lNmUwM2UxMi05YmU0ZjcyYy1iNTU3NmIyMQ==, Slow query, duration: 40.770024s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:46:46.999059Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:46:46.999557Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:46:47.000676Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;self_id=[1:7519898330926259707:6941];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038933;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224039094;receive=72075186224039392; 2025-06-25T14:46:47.001073Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:46:48.096638Z node 1 :KQP_YQL WARN: log.cpp:67: TraceId: 01jykrwg8855p6m9q40rw2pmj7, SessionId: CompileActor 2025-06-25 14:46:48.057 WARN ydb-core-kqp-ut-join(pid=412084, tid=0x00007F4772DF3640) [KQP] kqp_opt_phy_olap_agg.cpp:48: Expected TCoMember callable to get column under aggregation. Got: Failed to render expression to pretty string: yql/essentials/ast/yql_expr.cpp:1973 BuildValueNode(): requirement ctx.AllowFreeArgs failed, message: Free arguments are not allowed 2025-06-25T14:46:50.836722Z node 1 :KQP_YQL WARN: log.cpp:67: TraceId: 01jykrwk0xd66vk4yk8rrx6a4h, SessionId: CompileActor 2025-06-25 14:46:50.836 WARN ydb-core-kqp-ut-join(pid=412084, tid=0x00007F4772DF3640) [KQP] kqp_opt_phy_olap_agg.cpp:48: Expected TCoMember callable to get column under aggregation. Got: Failed to render expression to pretty string: yql/essentials/ast/yql_expr.cpp:1973 BuildValueNode(): requirement ctx.AllowFreeArgs failed, message: Free arguments are not allowed ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoin::RightTableValuePredicate [GOOD] Test command err: Trying to start YDB, gRPC: 10484, MsgBus: 2209 2025-06-25T14:46:56.986504Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898538722531940:2223];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:46:57.014719Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000e1f/r3tmp/tmpnaq7DV/pdisk_1.dat 2025-06-25T14:46:57.675531Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:46:57.675630Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:46:57.678550Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:46:57.693150Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:46:57.728460Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519898538722531750:2080] 1750862816935192 != 1750862816935195 TServer::EnableGrpc on GrpcPort 10484, node 1 2025-06-25T14:46:57.880192Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:46:57.880211Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:46:57.880218Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:46:57.880345Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:46:57.988595Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:2209 TClient is connected to server localhost:2209 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:46:58.807256Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:46:58.833434Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:46:58.851326Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:46:59.196047Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:46:59.376132Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:46:59.479549Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:47:01.254457Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898560197369888:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:01.254559Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:01.751263Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:01.812960Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:01.909358Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:01.963734Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:01.992531Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519898538722531940:2223];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:47:01.992792Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:47:02.011245Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:02.177382Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:02.231500Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:02.322995Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898564492337849:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:02.323083Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:02.323342Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898564492337854:2437], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:02.327665Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:47:02.345449Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710669, at schemeshard: 72057594046644480 2025-06-25T14:47:02.345715Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898564492337856:2438], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:47:02.444691Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898564492337908:3423] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:47:03.764981Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) >> KqpJoinOrder::ShuffleEliminationReuseShuffleTwoJoins [GOOD] >> KqpJoinOrder::TPCH9_100 [GOOD] >> KqpJoinOrder::CanonizedJoinOrderTPCH6 >> KqpJoinOrder::SortingsWithLookupJoin3-RemoveLimitOperator ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/slow/unittest >> SlowTopicAutopartitioning::CDC_Write [GOOD] Test command err: 2025-06-25T14:45:22.201283Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898134001960131:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:45:22.201336Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001d05/r3tmp/tmpW8pLsV/pdisk_1.dat 2025-06-25T14:45:22.372790Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-25T14:45:22.510499Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:45:22.566842Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:45:22.566970Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:45:22.568808Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 5571, node 1 2025-06-25T14:45:22.765208Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/yft8/001d05/r3tmp/yandexUa6S11.tmp 2025-06-25T14:45:22.765255Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/yft8/001d05/r3tmp/yandexUa6S11.tmp 2025-06-25T14:45:22.770408Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/yft8/001d05/r3tmp/yandexUa6S11.tmp 2025-06-25T14:45:22.770657Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:45:22.996891Z INFO: TTestServer started on Port 16069 GrpcPort 5571 TClient is connected to server localhost:16069 PQClient connected to localhost:5571 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-25T14:45:23.206045Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:45:23.293142Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-25T14:45:23.335556Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... waiting... 2025-06-25T14:45:24.714569Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898142591895467:2298], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:24.714645Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898142591895480:2302], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:24.714705Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:24.723597Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:45:24.735478Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898142591895482:2303], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710662 completed, doublechecking } 2025-06-25T14:45:24.833180Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898142591895546:2441] txid# 281474976710663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:45:25.128492Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519898142591895554:2309], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:45:25.133313Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=N2E2MmRkMzktNzg5M2U4ODctNGM1OTFlNjgtYmVmOGVjMDk=, ActorId: [1:7519898142591895465:2297], ActorState: ExecuteState, TraceId: 01jykrszjv6qmf0xj49f3g2pjw, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:45:25.137310Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-25T14:45:25.193222Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:25.223846Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:25.309025Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); === CheckClustersList. Subcribe to ClusterTracker from [1:7519898146886863138:2618] 2025-06-25T14:45:27.203879Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519898134001960131:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:45:27.204397Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; === CheckClustersList. Ok 2025-06-25T14:45:31.797669Z :TopicSplitMerge INFO: TTopicSdkTestSetup started 2025-06-25T14:45:31.917229Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877761, Sender [1:7519898172656667126:2697], Recipient [1:7519898134001960406:2143]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:45:31.917274Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5052: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T14:45:31.917286Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5837: Pipe server connected, at tablet: 72057594046644480 2025-06-25T14:45:31.917327Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271122432, Sender [1:7519898172656667122:2694], Recipient [1:7519898134001960406:2143]: {TEvModifySchemeTransaction txid# 281474976710673 TabletId# 72057594046644480} 2025-06-25T14:45:31.917343Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4966: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-06-25T14:45:31.962737Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/Root" OperationType: ESchemeOpCreateTable CreateTable { Name: "origin" Columns { Name: "id" Type: "Uint64" NotNull: false } Columns { Name: "order" Type: "Uint64" NotNull: false } Columns { Name: "value" Type: "Utf8" NotNull: false } KeyColumnNames: "id" KeyColumnNames: "order" UniformPartitionsCount: 64 PartitionConfig { PartitioningPolicy { MinPartitionsCount: 64 MaxPartitionsCount: 64 } } Temporary: false } } TxId: 281474976710673 TabletId: 72057594046644480 Owner: "root@builtin" UserToken: "***" PeerName: "" , at schemeshard: 72057594046644480 2025-06-25T14:45:31.963107Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_table.cpp:423: TCreateTable Propose, path: /Root/origin, opId: 281474976710673:0, at schemeshard: 72057594046644480 2025-06-25T14:45:31.963218Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_table.cpp:430: TCreateTable Propose, path: /Root/origin, opId: 2 ... 20842:3233], Recipient [1:7519898494779220931:3245], Cookie: 0 2025-06-25T14:46:53.702417Z node 1 :PERSQUEUE TRACE: partition.h:584: StateIdle, received event# 271188536, Sender [1:7519898494779220842:3233], Recipient [1:7519898494779220931:3245]: NKikimrPQ.TEvSubDomainStatus SubDomainOutOfSpace: false 2025-06-25T14:46:53.702430Z node 1 :PERSQUEUE TRACE: partition.h:626: StateIdle, processing event TEvPQ::TEvSubDomainStatus 2025-06-25T14:46:53.702471Z node 1 :PERSQUEUE TRACE: partition.h:582: StateIdle event# 271188491 (NKikimr::TEvPQ::TEvPartitionStatus), Tablet [1:7519898494779220842:3233], Partition 2, Sender [1:7519898494779220842:3233], Recipient [1:7519898494779220931:3245], Cookie: 0 2025-06-25T14:46:53.702501Z node 1 :PERSQUEUE TRACE: partition.h:584: StateIdle, received event# 271188491, Sender [1:7519898494779220842:3233], Recipient [1:7519898494779220931:3245]: NKikimr::TEvPQ::TEvPartitionStatus 2025-06-25T14:46:53.702513Z node 1 :PERSQUEUE TRACE: partition.h:602: StateIdle, processing event TEvPQ::TEvPartitionStatus 2025-06-25T14:46:53.702572Z node 1 :PERSQUEUE DEBUG: partition.cpp:873: [PQ: 72075186224037958, Partition: 2, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 0 PartitionConfig{ MaxCountInPartition: 2147483647 LifetimeSeconds: 86400 WriteSpeedInBytesPerSecond: 1048576 BurstSize: 1048576 TotalPartitions: 4 } 2025-06-25T14:46:53.702654Z node 1 :PERSQUEUE TRACE: partition.h:582: StateIdle event# 271188536 (NKikimr::TEvPQ::TEvSubDomainStatus), Tablet [1:7519898494779220842:3233], Partition 3, Sender [1:7519898494779220842:3233], Recipient [1:7519898494779220932:3246], Cookie: 0 2025-06-25T14:46:53.702702Z node 1 :PERSQUEUE TRACE: partition.h:584: StateIdle, received event# 271188536, Sender [1:7519898494779220842:3233], Recipient [1:7519898494779220932:3246]: NKikimrPQ.TEvSubDomainStatus SubDomainOutOfSpace: false 2025-06-25T14:46:53.702721Z node 1 :PERSQUEUE TRACE: partition.h:626: StateIdle, processing event TEvPQ::TEvSubDomainStatus 2025-06-25T14:46:53.702761Z node 1 :PERSQUEUE TRACE: partition.h:582: StateIdle event# 271188491 (NKikimr::TEvPQ::TEvPartitionStatus), Tablet [1:7519898494779220842:3233], Partition 3, Sender [1:7519898494779220842:3233], Recipient [1:7519898494779220932:3246], Cookie: 0 2025-06-25T14:46:53.702789Z node 1 :PERSQUEUE TRACE: partition.h:584: StateIdle, received event# 271188491, Sender [1:7519898494779220842:3233], Recipient [1:7519898494779220932:3246]: NKikimr::TEvPQ::TEvPartitionStatus 2025-06-25T14:46:53.702801Z node 1 :PERSQUEUE TRACE: partition.h:602: StateIdle, processing event TEvPQ::TEvPartitionStatus 2025-06-25T14:46:53.702865Z node 1 :PERSQUEUE DEBUG: partition.cpp:873: [PQ: 72075186224037958, Partition: 3, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 0 PartitionConfig{ MaxCountInPartition: 2147483647 LifetimeSeconds: 86400 WriteSpeedInBytesPerSecond: 1048576 BurstSize: 1048576 TotalPartitions: 4 } 2025-06-25T14:46:53.702959Z node 1 :PERSQUEUE TRACE: pq_impl.cpp:5307: HandleHook, received event# 271188503, Sender [1:7519898181246604681:2731], Recipient [1:7519898176951636951:2635]: NKikimr::TEvPQ::TEvPartitionLabeledCounters 2025-06-25T14:46:53.702976Z node 1 :PERSQUEUE TRACE: pq_impl.cpp:5321: HandleHook, processing event TEvPQ::TEvPartitionLabeledCounters 2025-06-25T14:46:53.703022Z node 1 :PERSQUEUE TRACE: pq_impl.cpp:5307: HandleHook, received event# 271188503, Sender [1:7519898181246604643:2730], Recipient [1:7519898176951636951:2635]: NKikimr::TEvPQ::TEvPartitionLabeledCounters 2025-06-25T14:46:53.703032Z node 1 :PERSQUEUE TRACE: pq_impl.cpp:5321: HandleHook, processing event TEvPQ::TEvPartitionLabeledCounters 2025-06-25T14:46:53.703067Z node 1 :PERSQUEUE TRACE: pq_impl.cpp:5307: HandleHook, received event# 271188503, Sender [1:7519898494779220931:3245], Recipient [1:7519898494779220842:3233]: NKikimr::TEvPQ::TEvPartitionLabeledCounters 2025-06-25T14:46:53.703078Z node 1 :PERSQUEUE TRACE: pq_impl.cpp:5321: HandleHook, processing event TEvPQ::TEvPartitionLabeledCounters 2025-06-25T14:46:53.703111Z node 1 :PERSQUEUE TRACE: pq_impl.cpp:5307: HandleHook, received event# 271188503, Sender [1:7519898494779220932:3246], Recipient [1:7519898494779220842:3233]: NKikimr::TEvPQ::TEvPartitionLabeledCounters 2025-06-25T14:46:53.703122Z node 1 :PERSQUEUE TRACE: pq_impl.cpp:5321: HandleHook, processing event TEvPQ::TEvPartitionLabeledCounters 2025-06-25T14:46:53.703233Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: partition_scale_manager.cpp:28: TPartitionScaleManager::HandleScaleStatusChange need to split partition 0 2025-06-25T14:46:53.703611Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:688: [72075186224037957][streamImpl] Send TEvPeriodicTopicStats PathId: 15 Generation: 1 StatsReportRound: 103 DataSize: 9211158 UsedReserveSize: 0 2025-06-25T14:46:53.703704Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1823: [72075186224037957][streamImpl] ProcessPendingStats. PendingUpdates size 4 2025-06-25T14:46:53.704038Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271188001, Sender [1:7519898176951636950:2634], Recipient [1:7519898134001960406:2143]: NKikimrPQ.TEvPeriodicTopicStats PathId: 15 Generation: 1 Round: 103 DataSize: 9211158 UsedReserveSize: 0 SubDomainOutOfSpace: false 2025-06-25T14:46:53.704057Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4993: StateWork, processing event TEvPersQueue::TEvPeriodicTopicStats 2025-06-25T14:46:53.704074Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__pq_stats.cpp:100: Got periodic topic stats at partition [OwnerId: 72057594046644480, LocalPathId: 15] DataSize 9211158 UsedReserveSize 0 2025-06-25T14:46:53.704096Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__pq_stats.cpp:141: Will delay TTxStoreTopicStats on# 0.099995s, queue# 1 2025-06-25T14:46:53.751261Z node 1 :PERSQUEUE TRACE: partition.h:582: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [1:7519898176951636951:2635], Partition 0, Sender [0:0:0], Recipient [1:7519898181246604681:2731], Cookie: 0 2025-06-25T14:46:53.751329Z node 1 :PERSQUEUE TRACE: partition.h:584: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [1:7519898181246604681:2731]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-25T14:46:53.751351Z node 1 :PERSQUEUE TRACE: partition.h:610: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-25T14:46:53.751403Z node 1 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037956, Partition: 0, State: StateIdle] Have 0 items to delete old stuff 2025-06-25T14:46:53.751464Z node 1 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037956, Partition: 0, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-25T14:46:53.751483Z node 1 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037956, Partition: 0, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-25T14:46:53.751509Z node 1 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037956, Partition: 0, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-25T14:46:53.756487Z node 1 :PERSQUEUE TRACE: partition.h:582: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [1:7519898176951636951:2635], Partition 1, Sender [0:0:0], Recipient [1:7519898181246604643:2730], Cookie: 0 2025-06-25T14:46:53.756564Z node 1 :PERSQUEUE TRACE: partition.h:584: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [1:7519898181246604643:2730]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-25T14:46:53.756585Z node 1 :PERSQUEUE TRACE: partition.h:610: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-25T14:46:53.756631Z node 1 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037956, Partition: 1, State: StateIdle] Have 0 items to delete old stuff 2025-06-25T14:46:53.756696Z node 1 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037956, Partition: 1, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-25T14:46:53.756717Z node 1 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037956, Partition: 1, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-25T14:46:53.756741Z node 1 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037956, Partition: 1, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-25T14:46:53.764632Z node 1 :PERSQUEUE TRACE: partition.h:582: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [1:7519898494779220842:3233], Partition 2, Sender [0:0:0], Recipient [1:7519898494779220931:3245], Cookie: 0 2025-06-25T14:46:53.764688Z node 1 :PERSQUEUE TRACE: partition.h:584: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [1:7519898494779220931:3245]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-25T14:46:53.764708Z node 1 :PERSQUEUE TRACE: partition.h:610: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-25T14:46:53.764742Z node 1 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037958, Partition: 2, State: StateIdle] Have 0 items to delete old stuff 2025-06-25T14:46:53.764811Z node 1 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037958, Partition: 2, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-25T14:46:53.764830Z node 1 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037958, Partition: 2, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-25T14:46:53.764852Z node 1 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037958, Partition: 2, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-25T14:46:53.764908Z node 1 :PERSQUEUE TRACE: partition.h:582: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [1:7519898494779220842:3233], Partition 3, Sender [0:0:0], Recipient [1:7519898494779220932:3246], Cookie: 0 2025-06-25T14:46:53.764931Z node 1 :PERSQUEUE TRACE: partition.h:584: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [1:7519898494779220932:3246]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-25T14:46:53.764943Z node 1 :PERSQUEUE TRACE: partition.h:610: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-25T14:46:53.764961Z node 1 :PERSQUEUE TRACE: partition.cpp:410: [PQ: 72075186224037958, Partition: 3, State: StateIdle] Have 0 items to delete old stuff 2025-06-25T14:46:53.764985Z node 1 :PERSQUEUE TRACE: partition.cpp:419: [PQ: 72075186224037958, Partition: 3, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-25T14:46:53.764997Z node 1 :PERSQUEUE TRACE: partition_write.cpp:162: [PQ: 72075186224037958, Partition: 3, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-25T14:46:53.765010Z node 1 :PERSQUEUE TRACE: partition_write.cpp:299: [PQ: 72075186224037958, Partition: 3, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-25T14:46:53.785690Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:7519898134001960406:2143]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:46:53.785727Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:46:53.785770Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [1:7519898134001960406:2143], Recipient [1:7519898134001960406:2143]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:46:53.785785Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime >> KqpJoinOrder::SortingsComplexOrderBy+RemoveLimitOperator >> KqpIndexLookupJoin::JoinWithComplexCondition-StreamLookupJoin [GOOD] >> KqpIndexLookupJoin::CheckCastUint64ToInt64-StreamLookupJoin-NotNull [GOOD] >> KqpIndexLookupJoin::CheckCastUint64ToInt64-StreamLookupJoin+NotNull ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::ShuffleEliminationReuseShuffleTwoJoins [GOOD] Test command err: Trying to start YDB, gRPC: 6631, MsgBus: 4317 2025-06-25T14:45:33.011662Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898177083202399:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:45:33.011713Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000e8d/r3tmp/tmpe7CYNS/pdisk_1.dat 2025-06-25T14:45:33.506746Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:45:33.506825Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:45:33.515336Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:45:33.573910Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 6631, node 1 2025-06-25T14:45:33.751340Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:45:33.751369Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:45:33.751377Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:45:33.751478Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4317 2025-06-25T14:45:33.998329Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:4317 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:45:34.412909Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:45:34.427999Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:45:36.160966Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898194263072197:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:36.161083Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:36.162252Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898194263072209:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:36.165698Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:45:36.175673Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898194263072211:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:45:36.253208Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898194263072262:2338] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:45:36.507591Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T14:45:36.723839Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519898194263072488:2309];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:45:36.723838Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519898194263072490:2311];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:45:36.724013Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519898194263072490:2311];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:45:36.724264Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519898194263072490:2311];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:45:36.724383Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519898194263072490:2311];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:45:36.724496Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519898194263072490:2311];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:45:36.724628Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519898194263072490:2311];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:45:36.724717Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519898194263072490:2311];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:45:36.724821Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519898194263072490:2311];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:45:36.724921Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519898194263072490:2311];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:45:36.725025Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519898194263072490:2311];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:45:36.725139Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519898194263072490:2311];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:45:36.728408Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519898194263072488:2309];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:45:36.728623Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519898194263072488:2309];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:45:36.728729Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519898194263072488:2309];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:45:36.728838Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519898194263072488:2309];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:45:36.728965Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519898194263072488:2309];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:45:36.729089Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519898194263072488:2309];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:45:36.729183Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519898194263072488:2309];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:45:36.729275Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519898194263072488:2309];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:45:36.729384Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519898194263072488:2309];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstr ... 15545Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039307;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:53.416706Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039305;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:46:53.417187Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039375;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:53.420282Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039307;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:46:53.421017Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039369;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:53.426321Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039375;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:46:53.426966Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039398;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:53.438187Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039369;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:46:53.438668Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039279;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:53.445877Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039398;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:46:53.446354Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039385;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:53.451276Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039385;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:46:53.451841Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039397;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:53.451997Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039279;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:46:53.452395Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039337;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:53.457389Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039337;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:46:53.457938Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039321;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:53.464778Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039397;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:46:53.465244Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039377;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:53.467525Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039321;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:46:53.469371Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039408;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:53.470274Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039377;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:46:53.470823Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039331;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:53.474566Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039408;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:46:53.475127Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039394;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:53.475909Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039331;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:46:53.480009Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039394;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:46:53.480641Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039389;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:53.480768Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039293;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:53.485502Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039389;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:46:53.486053Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039275;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:53.489236Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039293;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:46:53.489653Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039384;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:53.490817Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039275;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:46:53.491482Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039309;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:53.495058Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039309;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:46:53.495893Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039345;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:53.497660Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039384;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:46:53.500152Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039345;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:46:53.503385Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039287;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:53.508021Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039287;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:46:53.709956Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jykrvhr0a7ewje82r5p4xphc", SessionId: ydb://session/3?node_id=1&id=MjI1ZWE5M2EtYTc1NzBjNzgtYzY3OTdlNDktYzY4MGNlNjU=, Slow query, duration: 37.644655s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:46:54.085131Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; 2025-06-25T14:46:54.085131Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; 2025-06-25T14:46:54.085462Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;self_id=[1:7519898374651734541:6950];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038933;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224039392;receive=72075186224039094; 2025-06-25T14:46:54.085854Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::TPCH9_100 [GOOD] Test command err: Trying to start YDB, gRPC: 15306, MsgBus: 13996 2025-06-25T14:45:25.305129Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898147951551197:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:45:25.305197Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000ea4/r3tmp/tmpsAWLXg/pdisk_1.dat 2025-06-25T14:45:25.719477Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:45:25.756951Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:45:25.757062Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:45:25.758889Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 15306, node 1 2025-06-25T14:45:25.888808Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:45:25.888839Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:45:25.888848Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:45:25.888963Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13996 2025-06-25T14:45:26.320484Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:13996 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:45:26.652966Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:45:28.196246Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898160836453701:2295], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:28.196258Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898160836453693:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:28.196367Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:28.199877Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:45:28.209766Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898160836453707:2296], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:45:28.284173Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898160836453758:2335] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:45:28.598475Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T14:45:28.785675Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519898160836453983:2310];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:45:28.785678Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519898160836454016:2316];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:45:28.785988Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519898160836454016:2316];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:45:28.786276Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519898160836453983:2310];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:45:28.786278Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519898160836454016:2316];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:45:28.786418Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519898160836454016:2316];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:45:28.786471Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519898160836453983:2310];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:45:28.786561Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519898160836454016:2316];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:45:28.786627Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519898160836453983:2310];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:45:28.786697Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519898160836454016:2316];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:45:28.786747Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519898160836453983:2310];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:45:28.786797Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519898160836454016:2316];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:45:28.786837Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519898160836453983:2310];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:45:28.786923Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519898160836454016:2316];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:45:28.786937Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519898160836453983:2310];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:45:28.787034Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519898160836454016:2316];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:45:28.787039Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519898160836453983:2310];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:45:28.787120Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519898160836453983:2310];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:45:28.787142Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519898160836454016:2316];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:45:28.787201Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519898160836453983:2310];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:45:28.787265Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519898160836454016:2316];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:45:28.787324Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;s ... 43762Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039299;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:44.048204Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039313;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:44.048355Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039299;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:44.048885Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039231;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:44.049008Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039279;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:44.053773Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039231;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:44.054111Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039279;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:44.054437Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039375;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:44.058745Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039375;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:44.059242Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039367;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:44.064339Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039367;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:44.064911Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039361;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:44.070115Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039361;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:44.070635Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039219;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:44.074811Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039219;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:44.076726Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039424;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:44.082288Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039424;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:44.082836Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039347;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:44.086914Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039347;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:44.087463Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039261;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:44.092040Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039261;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:44.093702Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039335;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:44.098109Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039335;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:44.098985Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039414;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:44.103608Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039414;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:44.104140Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039365;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:44.107893Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039365;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:44.108806Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039275;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:44.112602Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039275;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:44.113179Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039422;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:44.117400Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039422;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:44.118182Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039394;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:44.122696Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039394;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:44.123250Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039396;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:44.127147Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039396;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:44.127575Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039247;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:44.131673Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039247;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:44.134111Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039371;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:44.139194Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039371;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:44.146057Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039418;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:44.151709Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039418;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:44.309373Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jykrv7yrdt2pdecn4q2dyz43", SessionId: ydb://session/3?node_id=1&id=NmNlZTBlNmEtOTc4NGY4NDItMWFlMTQ5ZGItMTcxNzJiNzg=, Slow query, duration: 38.268366s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:46:44.607538Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:46:44.608003Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:46:44.608619Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;self_id=[1:7519898332635180903:6840];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038933;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224039392;receive=72075186224039094; 2025-06-25T14:46:44.609077Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; >> KqpJoin::HashJoinWithAsTable [GOOD] >> KqpJoinOrder::FiveWayJoinWithConstantFoldOpt+ColumnStore ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpIndexLookupJoin::JoinWithComplexCondition-StreamLookupJoin [GOOD] Test command err: Trying to start YDB, gRPC: 29728, MsgBus: 1091 2025-06-25T14:46:49.508219Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898506664344285:2186];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:46:49.517806Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000e2f/r3tmp/tmpcv7iId/pdisk_1.dat 2025-06-25T14:46:50.233390Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:46:50.233477Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:46:50.248791Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519898506664344127:2080] 1750862809401449 != 1750862809401452 2025-06-25T14:46:50.253545Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:46:50.254432Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 29728, node 1 2025-06-25T14:46:50.408548Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:46:50.516839Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:46:50.516856Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:46:50.516866Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:46:50.516963Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1091 TClient is connected to server localhost:1091 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:46:51.640555Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:46:51.676058Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:46:51.861744Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:46:52.060012Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:46:52.181347Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:46:54.277158Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898528139182242:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:54.277256Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:54.481680Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519898506664344285:2186];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:46:54.481803Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:46:54.743466Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:54.776084Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:54.829494Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:54.870600Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:54.908997Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:54.981491Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:55.065711Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:55.145870Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898532434150196:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:55.145958Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:55.146139Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898532434150201:2436], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:55.149987Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:46:55.163599Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898532434150203:2437], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:46:55.253769Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898532434150256:3425] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:46:56.429202Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:56.488496Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part ... r subscription [2:7519898547800369324:2080] 1750862819370810 != 1750862819370813 2025-06-25T14:46:59.707780Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:46:59.711708Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:46:59.714782Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 18300, node 2 2025-06-25T14:46:59.837718Z node 2 :BS_CONTROLLER ERROR: {BSC07@impl.h:2206} ProcessControllerEvent event processing took too much time Type# 268637706 Duration# 0.104049s 2025-06-25T14:46:59.837798Z node 2 :BS_CONTROLLER ERROR: {BSC00@bsc.cpp:705} StateWork event processing took too much time Type# 2146435078 Duration# 0.104137s 2025-06-25T14:46:59.972810Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:46:59.972831Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:46:59.972840Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:46:59.972942Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10938 2025-06-25T14:47:00.432448Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:10938 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:47:00.774635Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:47:00.781348Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:47:00.799144Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:47:00.925477Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:47:01.102293Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:47:01.191036Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:47:03.792421Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898564980240138:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:03.792505Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:03.858620Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:03.948178Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:03.992526Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:04.056527Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:04.095278Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:04.142419Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:04.226569Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:04.326497Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898569275208091:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:04.326576Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:04.326811Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898569275208096:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:04.330526Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:47:04.344237Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519898569275208098:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:47:04.429985Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519898547800369539:2227];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:47:04.430082Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:47:04.447310Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519898569275208149:3419] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:47:05.730328Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:05.844345Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) >> TxUsage::WriteToTopic_Demo_47_Table [GOOD] >> KqpJoinOrder::CanonizedJoinOrderTPCH7 [GOOD] >> KqpJoin::IndexLoookupJoinStructJoin+StreamLookupJoin [GOOD] >> KqpJoin::IdxLookupSelf ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoin::HashJoinWithAsTable [GOOD] Test command err: Trying to start YDB, gRPC: 31328, MsgBus: 19918 2025-06-25T14:46:54.265367Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898526656476573:2227];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:46:54.265494Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000e22/r3tmp/tmpwseHK2/pdisk_1.dat 2025-06-25T14:46:54.911257Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:46:54.911333Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:46:54.914070Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:46:54.953299Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 31328, node 1 2025-06-25T14:46:55.245828Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:46:55.245854Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:46:55.246148Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:46:55.246263Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:46:55.267773Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:19918 TClient is connected to server localhost:19918 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:46:56.301938Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:46:56.342500Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:46:56.355604Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:46:56.523060Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:46:56.836139Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:46:56.932562Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:46:58.909898Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898543836347213:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:58.910000Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:59.245110Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:59.265989Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519898526656476573:2227];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:46:59.266052Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:46:59.276775Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:59.312600Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:59.349245Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:59.383353Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:59.419451Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:59.467408Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:59.569055Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898548131315168:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:59.569108Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:59.569372Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898548131315173:2436], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:59.572939Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:46:59.590412Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898548131315175:2437], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:46:59.684071Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898548131315226:3421] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:47:00.894213Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:00.960381Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok ... 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519898566160724445:2232];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:47:03.322159Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000e22/r3tmp/tmpCZ1rbH/pdisk_1.dat 2025-06-25T14:47:03.535686Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:47:03.535794Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:47:03.538096Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:47:03.547449Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:47:03.548794Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519898566160724242:2080] 1750862823189558 != 1750862823189561 TServer::EnableGrpc on GrpcPort 23962, node 2 2025-06-25T14:47:03.764873Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:47:03.764896Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:47:03.764905Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:47:03.765007Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4061 2025-06-25T14:47:04.240495Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:4061 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:47:04.889599Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:47:04.895987Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:47:04.911225Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:47:05.055131Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:47:05.364144Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:47:05.522912Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:47:08.236442Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519898566160724445:2232];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:47:08.236506Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:47:08.380475Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898587635562386:2370], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:08.380591Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:08.438296Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:08.497329Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:08.586420Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:08.641553Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:08.678228Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:08.730596Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:08.795545Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:08.908004Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898587635563049:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:08.908082Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:08.908405Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898587635563054:2436], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:08.913364Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:47:08.929354Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519898587635563056:2437], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:47:08.999191Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519898587635563107:3423] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:47:10.249405Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) >> KqpJoinOrder::TPCDS87-ColumnStore >> OlapEstimationRowsCorrectness::TPCH9 >> KqpFlipJoin::Inner_1 [GOOD] >> KqpFlipJoin::Inner_2 >> TxUsage::WriteToTopic_Demo_47_Query >> KqpJoinOrder::TestJoinHint1-ColumnStore [GOOD] >> KqpJoinOrder::TPCHEveryQueryWorks+ColumnStore >> YdbOlapStore::LogPagingAfter [GOOD] >> KqpJoin::RightSemiJoin_ComplexKey [GOOD] >> KqpJoin::RightSemiJoin_ComplexSecondaryIndex >> KqpJoinOrder::TPCDS87+ColumnStore ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::CanonizedJoinOrderTPCH7 [GOOD] Test command err: Trying to start YDB, gRPC: 26799, MsgBus: 23048 2025-06-25T14:45:27.994939Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898156457683414:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:45:27.994984Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000e8f/r3tmp/tmpoBxy76/pdisk_1.dat 2025-06-25T14:45:28.378435Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 26799, node 1 2025-06-25T14:45:28.413954Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:45:28.414055Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:45:28.414713Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:45:28.414734Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:45:28.414745Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:45:28.414876Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:45:28.415653Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:23048 TClient is connected to server localhost:23048 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:45:28.903365Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:45:29.022665Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:45:30.707564Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898169342585903:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:30.707631Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:30.707698Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898169342585915:2295], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:30.711450Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:45:30.720367Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898169342585917:2296], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:45:30.780989Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898169342585968:2331] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:45:31.132668Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T14:45:31.346259Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519898173637553486:2317];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:45:31.346507Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519898173637553486:2317];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:45:31.346755Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519898173637553486:2317];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:45:31.346870Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519898173637553486:2317];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:45:31.346967Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519898173637553486:2317];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:45:31.347056Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519898173637553486:2317];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:45:31.347188Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519898173637553486:2317];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:45:31.347293Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519898173637553486:2317];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:45:31.347386Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519898173637553486:2317];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:45:31.347484Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519898173637553486:2317];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:45:31.347582Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519898173637553486:2317];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:45:31.353054Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519898173637553487:2318];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:45:31.353123Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519898173637553487:2318];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:45:31.353298Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519898173637553487:2318];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:45:31.353403Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519898173637553487:2318];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:45:31.353515Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519898173637553487:2318];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:45:31.353614Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519898173637553487:2318];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:45:31.353726Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519898173637553487:2318];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:45:31.353829Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519898173637553487:2318];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:45:31.353957Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519898173637553487:2318];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:45:31.354060Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519898173637553487:2318];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:45:31.354185Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;s ... 95052Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039275;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:50.099425Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039275;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:50.100070Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039323;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:50.104595Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039323;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:50.105140Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039279;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:50.110307Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039279;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:50.111092Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039315;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:50.113209Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039363;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:50.113723Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039317;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:50.115741Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039315;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:50.116283Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039263;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:50.118651Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039317;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:50.119207Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039251;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:50.121773Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039263;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:50.122320Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039229;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:50.130121Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039251;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:50.130689Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039307;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:50.134753Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039229;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:50.135248Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039293;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:50.140264Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039293;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:50.141546Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039309;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:50.144739Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039307;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:50.145217Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039245;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:50.146899Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039309;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:50.147439Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039311;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:50.149886Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039245;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:50.150447Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039313;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:50.151870Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039311;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:50.152580Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039305;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:50.154595Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039313;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:50.155099Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039327;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:50.157366Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039305;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:50.157888Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039325;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:50.159172Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039327;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:50.159647Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039227;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:50.162255Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039325;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:50.163120Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039277;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:50.168389Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039277;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:50.168957Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039259;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:50.170016Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039227;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:50.178509Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039259;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:46:50.289675Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jykrvb4wbfey8053yvt30hq2", SessionId: ydb://session/3?node_id=1&id=YTZlYjc4ODgtYWU1Yjk3MmYtMWVhZjAzYWYtZmZkZDllZTI=, Slow query, duration: 40.980564s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:46:50.734976Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:46:50.735460Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:46:50.736082Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;self_id=[1:7519898427040674114:9005];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224039094;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038933;receive=72075186224039392; 2025-06-25T14:46:50.736484Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::TestJoinHint1-ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 28083, MsgBus: 25847 2025-06-25T14:46:21.642580Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898385859696706:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:46:21.642610Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000e4c/r3tmp/tmp2OtWdT/pdisk_1.dat 2025-06-25T14:46:22.379044Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519898385859696688:2080] 1750862781641159 != 1750862781641162 2025-06-25T14:46:22.490452Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:46:22.511887Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:46:22.511975Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:46:22.513665Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 28083, node 1 2025-06-25T14:46:22.865424Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:46:22.866100Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:46:22.866107Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:46:22.866125Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:46:22.866251Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25847 TClient is connected to server localhost:25847 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:46:23.660621Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:46:23.677877Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:46:25.661007Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898403039566520:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:25.661182Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:25.667478Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898403039566532:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:25.672215Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:46:25.694344Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898403039566534:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:46:25.791259Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898403039566585:2336] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:46:26.130404Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:26.302927Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:26.355429Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:26.395036Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:26.441106Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:26.646966Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519898385859696706:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:46:26.647015Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:46:26.731379Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:26.798457Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:26.874871Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:26.922462Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:26.988618Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:27.036760Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:27.074138Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:27.121760Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:27.783616Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, subope ... 34305Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038622;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:08.736394Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038625;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:08.736875Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038645;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:08.742128Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038622;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:08.742664Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038597;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:08.745427Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038645;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:08.746230Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038652;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:08.750431Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038652;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:08.754884Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038597;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:08.755317Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:08.759494Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:08.759982Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038640;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:08.768842Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038654;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:08.769340Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038640;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:08.769872Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038631;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:08.777235Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038654;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:08.777805Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038611;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:08.784939Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038631;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:08.786378Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038611;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:08.786908Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038649;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:08.792886Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038619;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:08.796224Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038649;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:08.796817Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038641;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:08.810875Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038619;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:08.811338Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038617;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:08.812941Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038641;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:08.813372Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038603;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:08.819080Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038617;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:08.821932Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038603;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:08.822516Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038653;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:08.827464Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038653;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:08.828101Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038647;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:08.829398Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038639;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:08.834070Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038647;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:08.835685Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038616;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:08.840070Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038639;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:08.844180Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038616;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:08.845235Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038587;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:08.861693Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038587;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:08.991647Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038456;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:08.998628Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038456;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:09.052635Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jykrvzc4d4j42n8nd5s29ajr", SessionId: ydb://session/3?node_id=1&id=YTU4MTM5OTYtNWZlNGU5NGQtODk1YjAxOWItODk5NTI3YzI=, Slow query, duration: 39.031209s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:47:09.343307Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:47:09.344075Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;self_id=[1:7519898544773512499:5333];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038629;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038331;receive=72075186224038170; 2025-06-25T14:47:09.344556Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:47:09.345499Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; >> KqpJoinOrder::SortingsSimpleOrderByAliasIndexDesc-RemoveLimitOperator >> KqpIndexLookupJoin::CheckCastUint64ToInt64-StreamLookupJoin+NotNull [GOOD] >> KqpJoin::LeftJoinWithNull+StreamLookupJoin ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> YdbOlapStore::LogPagingAfter [GOOD] Test command err: 2025-06-25T14:44:36.362198Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897936155384394:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:36.362301Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017f4/r3tmp/tmpacH9Q2/pdisk_1.dat 2025-06-25T14:44:36.704643Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:36.713765Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:36.713851Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 7083, node 1 2025-06-25T14:44:36.717645Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:44:36.808057Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:36.808078Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:36.808084Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:36.808191Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9518 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:37.142731Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:37.370715Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:9518 2025-06-25T14:44:37.388561Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnStore, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_store.cpp:451) waiting... 2025-06-25T14:44:37.517332Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519897940450352713:2285];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:44:37.517571Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519897940450352713:2285];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:44:37.517825Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519897940450352713:2285];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:44:37.517937Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519897940450352713:2285];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:44:37.518063Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519897940450352713:2285];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:44:37.518163Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519897940450352713:2285];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:44:37.518248Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519897940450352713:2285];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:44:37.518351Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519897940450352713:2285];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:44:37.518451Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519897940450352713:2285];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:44:37.518538Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519897940450352713:2285];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:44:37.518645Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519897940450352713:2285];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:44:37.584818Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519897940450352714:2286];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:44:37.584880Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519897940450352714:2286];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:44:37.585244Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519897940450352714:2286];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:44:37.585366Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519897940450352714:2286];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:44:37.585472Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519897940450352714:2286];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:44:37.585566Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519897940450352714:2286];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:44:37.585645Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519897940450352714:2286];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:44:37.585744Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519897940450352714:2286];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:44:37.585831Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519897940450352714:2286];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:44:37.585948Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519897940450352714:2286];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:44:37.586033Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519897940450352714:2286];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:44:37.637183Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519897940450352712:2284];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:44:37.637257Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519897940450352712:2284];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:44:37.637495Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519897940450352712:2284];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:44:37.637592Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519897940450352712:2284];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:44:37.637707Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519897940450352712:2284];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:44:37.637827Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519897940450352712:2284];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:44:37.637943Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519897940450352712:2284];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description= ... un-kqfvx6aroe" NodeId: 28 StartTimeMs: 1750862832928 CreateTimeMs: 1750862832835 UpdateTimeMs: 1750862832959 } MaxMemoryUsage: 1048576 } 2025-06-25T14:47:13.002648Z node 28 :KQP_EXECUTER INFO: kqp_planner.cpp:697: TxId: 281474976710670. Ctx: { TraceId: 01jykrx7r9dq88ynze0enmv4wd, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=28&id=YWE4NDA5OGYtMjg5MWM0MjMtYTQ0MDJkMzEtMzU0YjcyNGU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Compute actor has finished execution: [28:7519898604351673055:3157] 2025-06-25T14:47:13.002680Z node 28 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:668: ActorId: [28:7519898604351672983:3099] TxId: 281474976710670. Ctx: { TraceId: 01jykrx7r9dq88ynze0enmv4wd, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=28&id=YWE4NDA5OGYtMjg5MWM0MjMtYTQ0MDJkMzEtMzU0YjcyNGU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Waiting for: CA [28:7519898604351673075:3173], CA [28:7519898604351673076:3174], 2025-06-25T14:47:13.002939Z node 28 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:357: ActorId: [28:7519898604351672983:3099] TxId: 281474976710670. Ctx: { TraceId: 01jykrx7r9dq88ynze0enmv4wd, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=28&id=YWE4NDA5OGYtMjg5MWM0MjMtYTQ0MDJkMzEtMzU0YjcyNGU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Send TEvStreamData to [28:7519898600056705629:3099], seqNo: 1, nRows: 0 2025-06-25T14:47:13.003089Z node 28 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:442: ActorId: [28:7519898604351672983:3099] TxId: 281474976710670. Ctx: { TraceId: 01jykrx7r9dq88ynze0enmv4wd, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=28&id=YWE4NDA5OGYtMjg5MWM0MjMtYTQ0MDJkMzEtMzU0YjcyNGU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [28:7519898604351673076:3174], task: 66, state: COMPUTE_STATE_EXECUTING, stats: { CpuTimeUs: 1214 Tasks { TaskId: 66 StageId: 2 CpuTimeUs: 229 FinishTimeMs: 1750862832960 ComputeCpuTimeUs: 45 BuildCpuTimeUs: 184 HostName: "ghrun-kqfvx6aroe" NodeId: 28 CreateTimeMs: 1750862832836 CurrentWaitOutputTimeUs: 56 UpdateTimeMs: 1750862832960 } MaxMemoryUsage: 1048576 } 2025-06-25T14:47:13.003136Z node 28 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:668: ActorId: [28:7519898604351672983:3099] TxId: 281474976710670. Ctx: { TraceId: 01jykrx7r9dq88ynze0enmv4wd, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=28&id=YWE4NDA5OGYtMjg5MWM0MjMtYTQ0MDJkMzEtMzU0YjcyNGU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Waiting for: CA [28:7519898604351673075:3173], CA [28:7519898604351673076:3174], 2025-06-25T14:47:13.003234Z node 28 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:442: ActorId: [28:7519898604351672983:3099] TxId: 281474976710670. Ctx: { TraceId: 01jykrx7r9dq88ynze0enmv4wd, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=28&id=YWE4NDA5OGYtMjg5MWM0MjMtYTQ0MDJkMzEtMzU0YjcyNGU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [28:7519898604351673075:3173], task: 65, state: COMPUTE_STATE_FINISHED, stats: { CpuTimeUs: 10098 DurationUs: 27000 Tasks { TaskId: 65 StageId: 1 CpuTimeUs: 510 FinishTimeMs: 1750862832960 ComputeCpuTimeUs: 207 BuildCpuTimeUs: 303 HostName: "ghrun-kqfvx6aroe" NodeId: 28 StartTimeMs: 1750862832933 CreateTimeMs: 1750862832851 UpdateTimeMs: 1750862832962 } MaxMemoryUsage: 1048576 } 2025-06-25T14:47:13.003283Z node 28 :KQP_EXECUTER INFO: kqp_planner.cpp:697: TxId: 281474976710670. Ctx: { TraceId: 01jykrx7r9dq88ynze0enmv4wd, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=28&id=YWE4NDA5OGYtMjg5MWM0MjMtYTQ0MDJkMzEtMzU0YjcyNGU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Compute actor has finished execution: [28:7519898604351673075:3173] 2025-06-25T14:47:13.003320Z node 28 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:668: ActorId: [28:7519898604351672983:3099] TxId: 281474976710670. Ctx: { TraceId: 01jykrx7r9dq88ynze0enmv4wd, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=28&id=YWE4NDA5OGYtMjg5MWM0MjMtYTQ0MDJkMzEtMzU0YjcyNGU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Waiting for: CA [28:7519898604351673076:3174], 2025-06-25T14:47:13.003397Z node 28 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1852: SessionId: ydb://session/3?node_id=28&id=YWE4NDA5OGYtMjg5MWM0MjMtYTQ0MDJkMzEtMzU0YjcyNGU=, ActorId: [28:7519898600056705629:3099], ActorState: ExecuteState, TraceId: 01jykrx7r9dq88ynze0enmv4wd, Forwarded TEvStreamData to [28:7519898600056705627:3098] 2025-06-25T14:47:13.006305Z node 28 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:423: TxId: 281474976710670, send ack to channelId: 66, seqNo: 1, enough: 0, freeSpace: 8388469, to: [28:7519898604351673113:3174] 2025-06-25T14:47:13.006388Z node 28 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [28:7519898604351673076:3174], TxId: 281474976710670, task: 66. Ctx: { TraceId : 01jykrx7r9dq88ynze0enmv4wd. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=28&id=YWE4NDA5OGYtMjg5MWM0MjMtYTQ0MDJkMzEtMzU0YjcyNGU=. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. CA StateFunc 271646922 2025-06-25T14:47:13.006490Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976710670, task: 66. Tasks execution finished, don't wait for ack delivery in input channelId: 65, seqNo: [1] 2025-06-25T14:47:13.006516Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:675: TxId: 281474976710670, task: 66. Tasks execution finished 2025-06-25T14:47:13.006542Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:510: SelfId: [28:7519898604351673076:3174], TxId: 281474976710670, task: 66. Ctx: { TraceId : 01jykrx7r9dq88ynze0enmv4wd. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=28&id=YWE4NDA5OGYtMjg5MWM0MjMtYTQ0MDJkMzEtMzU0YjcyNGU=. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Compute state finished. All channels and sinks finished 2025-06-25T14:47:13.006696Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:494: TxId: 281474976710670, task: 66. pass away 2025-06-25T14:47:13.006859Z node 28 :KQP_COMPUTE DEBUG: log.cpp:784: fline=kqp_compute_actor_factory.cpp:67;problem=finish_compute_actor;tx_id=281474976710670;task_id=66;success=1;message={
: Error: COMPUTE_STATE_FINISHED }; 2025-06-25T14:47:13.007385Z node 28 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:442: ActorId: [28:7519898604351672983:3099] TxId: 281474976710670. Ctx: { TraceId: 01jykrx7r9dq88ynze0enmv4wd, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=28&id=YWE4NDA5OGYtMjg5MWM0MjMtYTQ0MDJkMzEtMzU0YjcyNGU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [28:7519898604351673076:3174], task: 66, state: COMPUTE_STATE_FINISHED, stats: { CpuTimeUs: 1760 Tasks { TaskId: 66 StageId: 2 CpuTimeUs: 236 FinishTimeMs: 1750862833006 ComputeCpuTimeUs: 52 BuildCpuTimeUs: 184 HostName: "ghrun-kqfvx6aroe" NodeId: 28 CreateTimeMs: 1750862832836 UpdateTimeMs: 1750862833006 } MaxMemoryUsage: 1048576 } 2025-06-25T14:47:13.007444Z node 28 :KQP_EXECUTER INFO: kqp_planner.cpp:697: TxId: 281474976710670. Ctx: { TraceId: 01jykrx7r9dq88ynze0enmv4wd, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=28&id=YWE4NDA5OGYtMjg5MWM0MjMtYTQ0MDJkMzEtMzU0YjcyNGU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Compute actor has finished execution: [28:7519898604351673076:3174] 2025-06-25T14:47:13.007614Z node 28 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:2188: ActorId: [28:7519898604351672983:3099] TxId: 281474976710670. Ctx: { TraceId: 01jykrx7r9dq88ynze0enmv4wd, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=28&id=YWE4NDA5OGYtMjg5MWM0MjMtYTQ0MDJkMzEtMzU0YjcyNGU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. terminate execution. 2025-06-25T14:47:13.007708Z node 28 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:862: ActorId: [28:7519898604351672983:3099] TxId: 281474976710670. Ctx: { TraceId: 01jykrx7r9dq88ynze0enmv4wd, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=28&id=YWE4NDA5OGYtMjg5MWM0MjMtYTQ0MDJkMzEtMzU0YjcyNGU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Resource usage for last stat interval: ComputeTime: 0.163541s ReadRows: 0 ReadBytes: 0 ru: 109 rate limiter was not found force flag: 1 2025-06-25T14:47:13.007811Z node 28 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1754: SessionId: ydb://session/3?node_id=28&id=YWE4NDA5OGYtMjg5MWM0MjMtYTQ0MDJkMzEtMzU0YjcyNGU=, ActorId: [28:7519898600056705629:3099], ActorState: ExecuteState, TraceId: 01jykrx7r9dq88ynze0enmv4wd, TEvTxResponse, CurrentTx: 2/2 response.status: SUCCESS 2025-06-25T14:47:13.008242Z node 28 :KQP_SESSION INFO: kqp_session_actor.cpp:2013: SessionId: ydb://session/3?node_id=28&id=YWE4NDA5OGYtMjg5MWM0MjMtYTQ0MDJkMzEtMzU0YjcyNGU=, ActorId: [28:7519898600056705629:3099], ActorState: ExecuteState, TraceId: 01jykrx7r9dq88ynze0enmv4wd, txInfo Status: Active Kind: ReadOnly TotalDuration: 0 ServerDuration: 335.483 QueriesCount: 1 2025-06-25T14:47:13.008349Z node 28 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2168: SessionId: ydb://session/3?node_id=28&id=YWE4NDA5OGYtMjg5MWM0MjMtYTQ0MDJkMzEtMzU0YjcyNGU=, ActorId: [28:7519898600056705629:3099], ActorState: ExecuteState, TraceId: 01jykrx7r9dq88ynze0enmv4wd, Create QueryResponse for action: QUERY_ACTION_EXECUTE with SUCCESS status 2025-06-25T14:47:13.008509Z node 28 :KQP_SESSION INFO: kqp_session_actor.cpp:2528: SessionId: ydb://session/3?node_id=28&id=YWE4NDA5OGYtMjg5MWM0MjMtYTQ0MDJkMzEtMzU0YjcyNGU=, ActorId: [28:7519898600056705629:3099], ActorState: ExecuteState, TraceId: 01jykrx7r9dq88ynze0enmv4wd, Cleanup start, isFinal: 1 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-06-25T14:47:13.008576Z node 28 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2589: SessionId: ydb://session/3?node_id=28&id=YWE4NDA5OGYtMjg5MWM0MjMtYTQ0MDJkMzEtMzU0YjcyNGU=, ActorId: [28:7519898600056705629:3099], ActorState: ExecuteState, TraceId: 01jykrx7r9dq88ynze0enmv4wd, EndCleanup, isFinal: 1 2025-06-25T14:47:13.008670Z node 28 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2325: SessionId: ydb://session/3?node_id=28&id=YWE4NDA5OGYtMjg5MWM0MjMtYTQ0MDJkMzEtMzU0YjcyNGU=, ActorId: [28:7519898600056705629:3099], ActorState: ExecuteState, TraceId: 01jykrx7r9dq88ynze0enmv4wd, Sent query response back to proxy, proxyRequestId: 5, proxyId: [28:7519898514157355901:2185] 2025-06-25T14:47:13.008731Z node 28 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2601: SessionId: ydb://session/3?node_id=28&id=YWE4NDA5OGYtMjg5MWM0MjMtYTQ0MDJkMzEtMzU0YjcyNGU=, ActorId: [28:7519898600056705629:3099], ActorState: unknown state, TraceId: 01jykrx7r9dq88ynze0enmv4wd, Cleanup temp tables: 0 2025-06-25T14:47:13.013886Z node 28 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750862832005, txId: 18446744073709551615] shutting down 2025-06-25T14:47:13.014061Z node 28 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2692: SessionId: ydb://session/3?node_id=28&id=YWE4NDA5OGYtMjg5MWM0MjMtYTQ0MDJkMzEtMzU0YjcyNGU=, ActorId: [28:7519898600056705629:3099], ActorState: unknown state, TraceId: 01jykrx7r9dq88ynze0enmv4wd, Session actor destroyed RESULT: [] --------------------- STATS: total CPU: 2867 duration: 1584 usec cpu: 1584 usec duration: 318947 usec cpu: 348146 usec { name: "/Root/OlapStore/log1" } 2025-06-25T14:47:13.240782Z node 28 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037890;self_id=[28:7519898531337226031:2286];ev=NActors::TEvents::TEvWakeup;fline=columnshard.cpp:250;event=TEvPrivate::TEvPeriodicWakeup::MANUAL;tablet_id=72075186224037890; >> KqpJoinOrder::FiveWayJoinWithComplexPreds2+ColumnStore [GOOD] |85.6%| [TA] $(B)/ydb/core/persqueue/ut/slow/test-results/unittest/{meta.json ... results_accumulator.log} >> OlapEstimationRowsCorrectness::TPCDS96 [GOOD] >> KqpJoin::IdxLookupSelf [GOOD] >> KqpJoinOrder::DatetimeConstantFold+ColumnStore >> KqpFlipJoin::Inner_2 [GOOD] >> YdbOlapStore::DuplicateRows [GOOD] >> YdbOlapStore::LogCountByResource >> YdbOlapStore::LogExistingUserId [GOOD] |85.6%| [TA] {RESULT} $(B)/ydb/core/persqueue/ut/slow/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpIndexLookupJoin::CheckCastUint64ToInt64-StreamLookupJoin+NotNull [GOOD] Test command err: Trying to start YDB, gRPC: 11056, MsgBus: 17835 2025-06-25T14:47:00.937182Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898553244161977:2132];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:47:00.937595Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000e1c/r3tmp/tmpnGihJb/pdisk_1.dat 2025-06-25T14:47:01.640600Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:47:01.646827Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519898553244161883:2080] 1750862820896498 != 1750862820896501 2025-06-25T14:47:01.661762Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:47:01.661874Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:47:01.665548Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 11056, node 1 2025-06-25T14:47:01.872923Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:47:01.872946Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:47:01.872957Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:47:01.873080Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:47:02.002501Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:17835 TClient is connected to server localhost:17835 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:47:02.736574Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:47:02.755009Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:47:02.773100Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:47:03.066795Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:47:03.292098Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:03.476256Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:47:05.933666Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519898553244161977:2132];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:47:05.933756Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:47:06.277246Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898579013967300:2370], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:06.277336Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:06.922354Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:06.977803Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:07.031115Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:07.074741Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:07.133975Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:07.178261Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:07.257840Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:07.344037Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898583308935263:2436], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:07.344111Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:07.344527Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898583308935268:2439], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:07.349014Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:47:07.366361Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710669, at schemeshard: 72057594046644480 2025-06-25T14:47:07.367275Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898583308935270:2440], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:47:07.433259Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898583308935321:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:47:08.675027Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is u ... called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) Trying to start YDB, gRPC: 1869, MsgBus: 5388 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000e1c/r3tmp/tmpAsyaff/pdisk_1.dat 2025-06-25T14:47:11.374435Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:47:11.509122Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:47:11.509876Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:47:11.509946Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:47:11.510286Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519898603190135065:2080] 1750862831185389 != 1750862831185392 2025-06-25T14:47:11.533876Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 1869, node 2 2025-06-25T14:47:11.772892Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:47:11.772913Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:47:11.772922Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:47:11.773043Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5388 2025-06-25T14:47:12.247854Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:5388 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:47:12.522441Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:47:12.536537Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:47:12.554469Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:47:12.651451Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:47:12.925961Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:47:13.028192Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:15.704840Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898620370005876:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:15.704929Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:15.800465Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:15.846001Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:15.903259Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:15.956998Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:16.022673Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:16.100810Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:16.150809Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:16.271398Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898624664973829:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:16.271478Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:16.271843Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898624664973834:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:16.274901Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:47:16.293113Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519898624664973836:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:47:16.378720Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519898624664973888:3419] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:47:17.504878Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:17.582210Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/unittest >> TxUsage::Sinks_Olap_WriteToTopicAndTable_4_Query [GOOD] Test command err: 2025-06-25T14:40:54.525068Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519896981054015454:2231];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:40:54.525214Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0019e5/r3tmp/tmpgc718D/pdisk_1.dat 2025-06-25T14:40:54.783452Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-25T14:40:54.996060Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:40:54.996195Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:40:55.040539Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519896981054015256:2080] 1750862454511374 != 1750862454511377 2025-06-25T14:40:55.040855Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:40:55.082035Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 7942, node 1 2025-06-25T14:40:55.212947Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/yft8/0019e5/r3tmp/yandexYuMtnm.tmp 2025-06-25T14:40:55.212982Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/yft8/0019e5/r3tmp/yandexYuMtnm.tmp 2025-06-25T14:40:55.215906Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/yft8/0019e5/r3tmp/yandexYuMtnm.tmp 2025-06-25T14:40:55.216030Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:40:55.418789Z INFO: TTestServer started on Port 3139 GrpcPort 7942 2025-06-25T14:40:55.533847Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:3139 PQClient connected to localhost:7942 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:40:55.828504Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:40:55.880627Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:40:55.905265Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-25T14:40:55.910712Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:40:56.085259Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710660, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:40:56.100699Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710661, at schemeshard: 72057594046644480 2025-06-25T14:40:58.898292Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896998233885237:2301], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:40:58.898395Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:40:58.899014Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896998233885249:2304], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:40:58.903270Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:40:58.932815Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519896998233885251:2305], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710662 completed, doublechecking } 2025-06-25T14:40:59.145730Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519897002528852611:2446] txid# 281474976710663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:40:59.219255Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:40:59.260810Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:40:59.293132Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519897002528852626:2311], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:5:17: Error: At function: KiReadTable!
:5:17: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Versions]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:40:59.295210Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=ODE1ZDA3M2ItNzE0ZTkwZmYtYzNhYjIxMTktOTRjMjU3NjA=, ActorId: [1:7519896998233885234:2299], ActorState: ExecuteState, TraceId: 01jykrhw0869ds163nb6jt0r3h, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:40:59.297709Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 5 column: 17 } message: "At function: KiReadTable!" end_position { row: 5 column: 17 } severity: 1 issues { position { row: 5 column: 17 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Versions]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 5 column: 17 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-25T14:40:59.338379Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); 2025-06-25T14:40:59.558141Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519896981054015454:2231];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:40:59.558767Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; === CheckClustersList. Subcribe to ClusterTracker from [1:7519897002528852917:2626] === CheckClustersList. Ok 2025-06-25T14:41:05.235133Z :Sinks_Oltp_WriteToTopic_1_Table INFO: TTopicSdkTestSetup started 2025-06-25T14:41:05.274427Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:132: new create topic request 2025-06-25T14:41:05.336812Z node 1 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72075186224037893][] pipe [1:7519897028298656896:2718] connected; active server actors: 1 2025-06-25T14:41:05.337294Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1516: [72075186224037893][test-topic] updating configuration. Deleted partitions []. Added partitions [0] 2025-06-25T14:41:05.338541Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:1040: [72075186224037893][test-topic] Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at RB 72075186224037893 2025-06-25T14:41:05.338704Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:138: [72075186224037893][test-topic] BALANCER INIT DONE for test-topic: (0, 72075186224037892) 2025-06-25T14:41:05.338819Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3114: [PQ: 72075186224037892] Handle TEvInterconnect::TEvNodeInfo 2025-06-25T14:41:05.340291Z node 1 :PERSQUEUE D ... mer test-consumer session test-consumer_20_3_1772808415203153944_v1 grpc read done: success# 0, data# { } 2025-06-25T14:46:48.634139Z node 20 :PQ_READ_PROXY INFO: direct_read_actor.cpp:86: Direct read proxy [20:7519898482767097440:3354]: session cookie 4 consumer test-consumer session test-consumer_20_3_1772808415203153944_v1grpc read failed 2025-06-25T14:46:48.634175Z node 20 :PQ_READ_PROXY INFO: direct_read_actor.cpp:65: Direct read proxy [20:7519898482767097440:3354]: session cookie 4 consumer test-consumer session test-consumer_20_3_1772808415203153944_v1 grpc closed 2025-06-25T14:46:48.634199Z node 20 :PQ_READ_PROXY INFO: direct_read_actor.cpp:153: Direct read proxy [20:7519898482767097440:3354]: session cookie 4 consumer test-consumer session test-consumer_20_3_1772808415203153944_v1 proxy is DEAD 2025-06-25T14:46:48.635260Z node 20 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 3 consumer test-consumer session test-consumer_20_3_1772808415203153944_v1 grpc read done: success# 0, data# { } 2025-06-25T14:46:48.635286Z node 20 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 3 consumer test-consumer session test-consumer_20_3_1772808415203153944_v1 grpc read failed 2025-06-25T14:46:48.635313Z node 20 :PQ_READ_PROXY INFO: read_session_actor.cpp:92: session cookie 3 consumer test-consumer session test-consumer_20_3_1772808415203153944_v1 grpc closed 2025-06-25T14:46:48.635346Z node 20 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 3 consumer test-consumer session test-consumer_20_3_1772808415203153944_v1 is DEAD 2025-06-25T14:46:48.636758Z node 20 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037897][topic_B] pipe [20:7519898482767097432:3349] disconnected; active server actors: 1 2025-06-25T14:46:48.636786Z node 20 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037897][topic_B] pipe [20:7519898482767097432:3349] client test-consumer disconnected session test-consumer_20_3_1772808415203153944_v1 2025-06-25T14:46:48.636877Z node 20 :PERSQUEUE DEBUG: pq_impl.cpp:2452: [PQ: 72075186224037896] Destroy direct read session test-consumer_20_3_1772808415203153944_v1 2025-06-25T14:46:48.636907Z node 20 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037896] server disconnected, pipe [20:7519898482767097435:3352] destroyed 2025-06-25T14:46:48.636947Z node 20 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: test-consumer_20_3_1772808415203153944_v1 2025-06-25T14:46:48.637513Z :INFO: [/Root] [/Root] [9655cffc-cdf099f2-fe364e9c-fc54e4dc] Closing read session. Close timeout: 0.000000s 2025-06-25T14:46:48.637562Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): -:topic_A:0:1:0:1 2025-06-25T14:46:48.637590Z :INFO: [/Root] [/Root] [9655cffc-cdf099f2-fe364e9c-fc54e4dc] Counters: { Errors: 0 CurrentSessionLifetimeMs: 7375 BytesRead: 144 MessagesRead: 1 BytesReadCompressed: 144 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-25T14:46:48.637648Z :NOTICE: [/Root] [/Root] [9655cffc-cdf099f2-fe364e9c-fc54e4dc] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Close with zero timeout " } 2025-06-25T14:46:48.637678Z :DEBUG: [/Root] [/Root] [9655cffc-cdf099f2-fe364e9c-fc54e4dc] [] Abort session to cluster 2025-06-25T14:46:48.638329Z :DEBUG: [/Root] 0x000051E00016C190 TDirectReadSessionManager ServerSessionId=test-consumer_20_1_10113169284785426403_v1 Close 2025-06-25T14:46:48.638594Z :DEBUG: [/Root] 0x000051E00016C190 TDirectReadSessionManager ServerSessionId=test-consumer_20_1_10113169284785426403_v1 Close 2025-06-25T14:46:48.638665Z :NOTICE: [/Root] [/Root] [9655cffc-cdf099f2-fe364e9c-fc54e4dc] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-06-25T14:46:48.638567Z node 20 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer test-consumer session test-consumer_20_1_10113169284785426403_v1 grpc read done: success# 0, data# { } 2025-06-25T14:46:48.638591Z node 20 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 1 consumer test-consumer session test-consumer_20_1_10113169284785426403_v1 grpc read failed 2025-06-25T14:46:48.638619Z node 20 :PQ_READ_PROXY INFO: read_session_actor.cpp:92: session cookie 1 consumer test-consumer session test-consumer_20_1_10113169284785426403_v1 grpc closed 2025-06-25T14:46:48.638650Z node 20 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 1 consumer test-consumer session test-consumer_20_1_10113169284785426403_v1 is DEAD 2025-06-25T14:46:48.639863Z node 20 :PERSQUEUE DEBUG: pq_impl.cpp:2452: [PQ: 72075186224037894] Destroy direct read session test-consumer_20_1_10113169284785426403_v1 2025-06-25T14:46:48.639900Z node 20 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037894] server disconnected, pipe [20:7519898474177162757:3312] destroyed 2025-06-25T14:46:48.639936Z node 20 :PQ_READ_PROXY DEBUG: caching_service.cpp:398: Direct read cache: close session for proxy [20:7519898474177162762:3314] 2025-06-25T14:46:48.639978Z node 20 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: test-consumer_20_1_10113169284785426403_v1 2025-06-25T14:46:48.640470Z node 20 :PQ_READ_PROXY DEBUG: direct_read_actor.cpp:83: Direct read proxy [20:7519898474177162762:3314]: session cookie 2 consumer test-consumer session test-consumer_20_1_10113169284785426403_v1 grpc read done: success# 0, data# { } 2025-06-25T14:46:48.640500Z node 20 :PQ_READ_PROXY INFO: direct_read_actor.cpp:86: Direct read proxy [20:7519898474177162762:3314]: session cookie 2 consumer test-consumer session test-consumer_20_1_10113169284785426403_v1grpc read failed 2025-06-25T14:46:48.640531Z node 20 :PQ_READ_PROXY INFO: direct_read_actor.cpp:65: Direct read proxy [20:7519898474177162762:3314]: session cookie 2 consumer test-consumer session test-consumer_20_1_10113169284785426403_v1 grpc closed 2025-06-25T14:46:48.640554Z node 20 :PQ_READ_PROXY INFO: direct_read_actor.cpp:153: Direct read proxy [20:7519898474177162762:3314]: session cookie 2 consumer test-consumer session test-consumer_20_1_10113169284785426403_v1 proxy is DEAD 2025-06-25T14:46:48.640697Z node 20 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037895][topic_A] pipe [20:7519898474177162754:3309] disconnected; active server actors: 1 2025-06-25T14:46:48.640719Z node 20 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037895][topic_A] pipe [20:7519898474177162754:3309] client test-consumer disconnected session test-consumer_20_1_10113169284785426403_v1 2025-06-25T14:46:48.652863Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|65457f69-3675db0c-cabba3-5f3e049b_0] PartitionId [0] Generation [1] Write session: close. Timeout 0.000000s 2025-06-25T14:46:48.652925Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|65457f69-3675db0c-cabba3-5f3e049b_0] PartitionId [0] Generation [1] Write session will now close 2025-06-25T14:46:48.652976Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|65457f69-3675db0c-cabba3-5f3e049b_0] PartitionId [0] Generation [1] Write session: aborting 2025-06-25T14:46:48.653441Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|65457f69-3675db0c-cabba3-5f3e049b_0] PartitionId [0] Generation [1] Write session: gracefully shut down, all writes complete 2025-06-25T14:46:48.653490Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|65457f69-3675db0c-cabba3-5f3e049b_0] PartitionId [0] Generation [1] Write session: destroy 2025-06-25T14:46:48.655964Z node 20 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 7 sessionId: test-message_group_id|65457f69-3675db0c-cabba3-5f3e049b_0 grpc read done: success: 0 data: 2025-06-25T14:46:48.655996Z node 20 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 7 sessionId: test-message_group_id|65457f69-3675db0c-cabba3-5f3e049b_0 grpc read failed 2025-06-25T14:46:48.656049Z node 20 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 7 sessionId: test-message_group_id|65457f69-3675db0c-cabba3-5f3e049b_0 grpc closed 2025-06-25T14:46:48.656071Z node 20 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 7 sessionId: test-message_group_id|65457f69-3675db0c-cabba3-5f3e049b_0 is DEAD 2025-06-25T14:46:48.656948Z node 20 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037896 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-25T14:46:48.656987Z node 20 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037896 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-25T14:46:48.657624Z node 20 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037896] server disconnected, pipe [20:7519898469882195214:3274] destroyed 2025-06-25T14:46:48.657662Z node 20 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037896] server disconnected, pipe [20:7519898469882195217:3274] destroyed 2025-06-25T14:46:48.657699Z node 20 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72075186224037896, Partition: 0, State: StateIdle] TPartition::DropOwner. 2025-06-25T14:46:48.664510Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|c348bf66-95a739bd-1f799de9-9161d235_0] PartitionId [0] Generation [1] Write session: close. Timeout 0.000000s 2025-06-25T14:46:48.664544Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|c348bf66-95a739bd-1f799de9-9161d235_0] PartitionId [0] Generation [1] Write session will now close 2025-06-25T14:46:48.664577Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|c348bf66-95a739bd-1f799de9-9161d235_0] PartitionId [0] Generation [1] Write session: aborting 2025-06-25T14:46:48.664882Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|c348bf66-95a739bd-1f799de9-9161d235_0] PartitionId [0] Generation [1] Write session: gracefully shut down, all writes complete 2025-06-25T14:46:48.664909Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|c348bf66-95a739bd-1f799de9-9161d235_0] PartitionId [0] Generation [1] Write session: destroy 2025-06-25T14:46:48.672167Z node 20 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 5 sessionId: test-message_group_id|c348bf66-95a739bd-1f799de9-9161d235_0 grpc read done: success: 0 data: 2025-06-25T14:46:48.672200Z node 20 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 5 sessionId: test-message_group_id|c348bf66-95a739bd-1f799de9-9161d235_0 grpc read failed 2025-06-25T14:46:48.672245Z node 20 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 5 sessionId: test-message_group_id|c348bf66-95a739bd-1f799de9-9161d235_0 grpc closed 2025-06-25T14:46:48.672266Z node 20 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 5 sessionId: test-message_group_id|c348bf66-95a739bd-1f799de9-9161d235_0 is DEAD 2025-06-25T14:46:48.673139Z node 20 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037894 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-25T14:46:48.673180Z node 20 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037894 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-25T14:46:48.673814Z node 20 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037894] server disconnected, pipe [20:7519898469882195168:3264] destroyed 2025-06-25T14:46:48.673849Z node 20 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037894] server disconnected, pipe [20:7519898469882195171:3264] destroyed 2025-06-25T14:46:48.673892Z node 20 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72075186224037894, Partition: 0, State: StateIdle] TPartition::DropOwner. ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoin::IdxLookupSelf [GOOD] Test command err: Trying to start YDB, gRPC: 25457, MsgBus: 29490 2025-06-25T14:47:04.654357Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898570548736760:2084];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:47:04.903314Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000e19/r3tmp/tmpswP8Rg/pdisk_1.dat 2025-06-25T14:47:05.173267Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519898570548736712:2080] 1750862824627489 != 1750862824627492 2025-06-25T14:47:05.226147Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:47:05.226231Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:47:05.260691Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:47:05.262400Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 25457, node 1 2025-06-25T14:47:05.568928Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:47:05.568945Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:47:05.568954Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:47:05.569046Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:47:05.692639Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:29490 TClient is connected to server localhost:29490 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:47:06.631135Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:47:06.680590Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:47:06.835376Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:47:07.020454Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:47:07.098556Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:47:09.041559Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898592023574827:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:09.041665Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:09.439924Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:09.489672Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:09.544603Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:09.608448Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:09.654600Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:09.660755Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519898570548736760:2084];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:47:09.660939Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:47:09.759030Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:09.841287Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:09.919056Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898592023575485:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:09.919117Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:09.919373Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898592023575490:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:09.923719Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:47:09.940436Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710669, at schemeshard: 72057594046644480 2025-06-25T14:47:09.941422Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898592023575492:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:47:10.036479Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898596318542843:3424] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:47:11.274363Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/ ... 3.343632Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:47:13.343712Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:47:13.344479Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519898610109311293:2080] 1750862833062693 != 1750862833062696 2025-06-25T14:47:13.364845Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 24013, node 2 2025-06-25T14:47:13.588977Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:47:13.589000Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:47:13.589009Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:47:13.589126Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16703 2025-06-25T14:47:14.073228Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:16703 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:47:14.462791Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:47:14.493467Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:47:14.679849Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:47:14.859935Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:47:14.983742Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:47:17.601755Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898627289182107:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:17.601843Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:17.653851Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:17.704424Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:17.747186Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:17.822314Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:17.864971Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:17.978588Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:18.074740Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:18.080530Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519898610109311500:2232];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:47:18.081049Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:47:18.177304Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898631584150066:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:18.177391Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:18.177617Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898631584150071:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:18.181909Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:47:18.199338Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519898631584150073:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:47:18.253834Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519898631584150126:3416] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:47:19.822517Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:19.884961Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:19.946540Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::FiveWayJoinWithComplexPreds2+ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 9043, MsgBus: 65343 2025-06-25T14:45:41.533303Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898216445526988:2183];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:45:41.541251Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000e7e/r3tmp/tmp2rZkLo/pdisk_1.dat 2025-06-25T14:45:41.922868Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:45:41.922956Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:45:41.939699Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:45:42.005060Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:45:42.008431Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519898216445526841:2080] 1750862741493035 != 1750862741493038 TServer::EnableGrpc on GrpcPort 9043, node 1 2025-06-25T14:45:42.108911Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:45:42.108929Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:45:42.108935Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:45:42.109033Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:65343 2025-06-25T14:45:42.513182Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:65343 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:45:42.835528Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:45:42.872818Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:45:44.652137Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898229330429378:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:44.652148Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898229330429370:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:44.652225Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:44.662604Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:45:44.681319Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898229330429384:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:45:44.748081Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898229330429435:2334] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:45:45.052376Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T14:45:45.275231Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519898233625397011:2318];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:45:45.275231Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519898233625396899:2311];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:45:45.275367Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519898233625396899:2311];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:45:45.275568Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519898233625396899:2311];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:45:45.275643Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519898233625396899:2311];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:45:45.275703Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519898233625396899:2311];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:45:45.275758Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519898233625396899:2311];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:45:45.275813Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519898233625396899:2311];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:45:45.275867Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519898233625396899:2311];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:45:45.275958Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519898233625396899:2311];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:45:45.276021Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519898233625397011:2318];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:45:45.276069Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519898233625396899:2311];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:45:45.276145Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519898233625397011:2318];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:45:45.276179Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519898233625396899:2311];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:45:45.276232Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519898233625397011:2318];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:45:45.276332Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519898233625397011:2318];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:45:45.276427Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519898233625397011:2318];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:45:45.276520Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519898233625397011:2318];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:45:45.276606Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519898233625397011:2318];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:45:45.276685Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519898233625397011:2318];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; ... 4;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:02.872180Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039233;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:02.872219Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039189;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:02.881516Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039189;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:02.882110Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039225;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:02.886478Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039233;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:02.887001Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039211;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:02.895840Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039211;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:02.896690Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039225;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:02.897135Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039283;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:02.900969Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039273;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:02.906901Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039283;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:02.907852Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039213;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:02.910429Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039273;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:02.915516Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039213;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:02.915893Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039269;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:02.920904Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039257;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:02.924542Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039269;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:02.925042Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039271;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:02.925950Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039257;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:02.926444Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039215;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:02.929441Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039271;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:02.930021Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039261;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:02.930898Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039215;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:02.931371Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039303;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:02.936292Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039261;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:02.936848Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039247;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:02.940275Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039247;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:02.940619Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039303;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:02.941127Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039267;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:02.944957Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039227;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:02.949047Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039267;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:02.949550Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039313;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:02.953179Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039227;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:02.953704Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039191;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:02.958226Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039313;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:02.958717Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039209;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:02.962249Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039191;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:02.962737Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039319;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:02.966836Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039209;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:02.970777Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039319;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:02.974053Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039317;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:02.978271Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039317;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:03.094244Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jykrvwtxcxn07cjjbm6bzyce", SessionId: ydb://session/3?node_id=1&id=N2FhNTM0OS02ZjU3M2YzNS1iOWRmZWZkNC1kMjEyODBiNg==, Slow query, duration: 35.672165s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:47:03.463048Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:47:03.463071Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:47:03.463950Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpFlipJoin::Inner_2 [GOOD] Test command err: Trying to start YDB, gRPC: 13349, MsgBus: 4276 2025-06-25T14:47:05.617141Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898576271991901:2135];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:47:05.617267Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000e10/r3tmp/tmpuLr4nq/pdisk_1.dat 2025-06-25T14:47:06.192107Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:47:06.192200Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:47:06.219582Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:47:06.224431Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519898576271991804:2080] 1750862825576876 != 1750862825576879 2025-06-25T14:47:06.230515Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 13349, node 1 2025-06-25T14:47:06.508776Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:47:06.508792Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:47:06.508797Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:47:06.508881Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:47:06.641624Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:4276 TClient is connected to server localhost:4276 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:47:07.440009Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:47:07.478242Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:47:07.499670Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:47:07.738612Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:47:07.956184Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:47:08.073193Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:47:09.705837Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898593451862627:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:09.705917Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:10.059603Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:10.111304Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:10.150851Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:10.178922Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:10.260748Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:10.330596Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:10.374407Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:10.437328Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898597746830584:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:10.437399Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:10.437600Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898597746830589:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:10.441308Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:47:10.456382Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898597746830591:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:47:10.515942Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898597746830642:3421] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:47:10.604544Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519898576271991901:2135];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:47:10.604596Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:47:11.772808Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_wo ... nnecting -> Connected TServer::EnableGrpc on GrpcPort 20821, node 2 2025-06-25T14:47:14.440869Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:47:14.440889Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:47:14.440896Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:47:14.440987Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9050 2025-06-25T14:47:14.972790Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:9050 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:47:15.329257Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:47:15.337205Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:47:15.355024Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:47:15.524398Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:47:15.711829Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:47:15.811504Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:47:18.175727Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898631276230897:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:18.175795Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:18.251816Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:18.345370Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:18.432834Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:18.491995Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:18.539847Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:18.602913Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:18.667353Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:18.756187Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898631276231556:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:18.756280Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:18.756559Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898631276231561:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:18.760893Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:47:18.774053Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519898631276231563:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:47:18.871057Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519898631276231614:3416] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:47:18.976752Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519898609801392973:2220];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:47:19.018037Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:47:20.044409Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:20.118926Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:20.197030Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:20.262036Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> OlapEstimationRowsCorrectness::TPCDS96 [GOOD] Test command err: Trying to start YDB, gRPC: 19142, MsgBus: 22144 2025-06-25T14:45:35.502404Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898189447214900:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:45:35.503401Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000e88/r3tmp/tmpyU2HNP/pdisk_1.dat 2025-06-25T14:45:35.896369Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519898189447214869:2080] 1750862735495263 != 1750862735495266 2025-06-25T14:45:35.901091Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:45:35.909247Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:45:35.909332Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:45:35.911019Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 19142, node 1 2025-06-25T14:45:36.043005Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:45:36.043030Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:45:36.043038Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:45:36.043149Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:22144 2025-06-25T14:45:36.526770Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:22144 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:45:36.751965Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:45:36.774049Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:45:38.562463Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898202332117409:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:38.562821Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898202332117401:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:38.562910Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:38.567761Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:45:38.581150Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898202332117415:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:45:38.640235Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898202332117466:2335] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:45:39.004623Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T14:45:39.223420Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519898206627085007:2313];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:45:39.223421Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519898206627085020:2317];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:45:39.223657Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519898206627085020:2317];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:45:39.223926Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519898206627085020:2317];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:45:39.224020Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519898206627085020:2317];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:45:39.224319Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519898206627085020:2317];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:45:39.224425Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519898206627085020:2317];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:45:39.224545Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519898206627085020:2317];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:45:39.224663Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519898206627085020:2317];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:45:39.224765Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519898206627085020:2317];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:45:39.224855Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519898206627085020:2317];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:45:39.224859Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519898206627085007:2313];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:45:39.225109Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519898206627085020:2317];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:45:39.225173Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519898206627085007:2313];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:45:39.225327Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519898206627085007:2313];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:45:39.225456Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519898206627085007:2313];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:45:39.225559Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519898206627085007:2313];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:45:39.225677Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519898206627085007:2313];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:45:39.225865Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519898206627085007:2313];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:45:39.225981Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519898206627085007:2313];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunk ... 93085Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039405;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:59.798331Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039388;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:46:59.802517Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039405;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:46:59.803162Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039415;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:59.805030Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039408;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:59.811578Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039415;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:46:59.812202Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039222;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:59.813411Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039408;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:46:59.813965Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039347;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:59.819192Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039347;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:46:59.819775Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039406;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:59.824245Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039222;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:46:59.824588Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039406;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:46:59.825156Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039412;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:59.830208Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039412;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:46:59.830777Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039404;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:59.833448Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039419;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:59.836188Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039404;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:46:59.836869Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039402;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:59.838384Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039419;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:46:59.838927Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039403;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:59.842135Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039402;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:46:59.842674Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039381;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:59.847855Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039403;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:46:59.851857Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039381;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:46:59.852715Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039413;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:59.856596Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039422;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:59.862100Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039413;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:46:59.862628Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039384;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:59.865911Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039422;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:46:59.866390Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039400;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:59.871971Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039384;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:46:59.875498Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039400;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:46:59.876160Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039399;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:59.877736Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039393;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:59.884515Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039393;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:46:59.885279Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039416;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:59.891426Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039399;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:46:59.893881Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039416;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:46:59.894476Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039371;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:46:59.903832Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039371;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:47:00.156874Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jykrvktt2wr41gdy6jwk0j0d", SessionId: ydb://session/3?node_id=1&id=YzY1NjA2ZjQtY2NlMjJjM2ItZjU4ZTU4MS1mMzFjMDAyNA==, Slow query, duration: 41.954262s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:47:00.854204Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; 2025-06-25T14:47:00.854721Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; 2025-06-25T14:47:00.857127Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;self_id=[1:7519898498684916145:9633];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224039392;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038933;receive=72075186224039094; 2025-06-25T14:47:00.857582Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; >> KqpJoinOrder::SortingsWithLookupJoin1-RemoveLimitOperator [GOOD] >> KqpBatchUpdate::SimplePartitions [GOOD] >> KqpJoinOrder::SortingsByPKWithLookupJoin+RemoveLimitOperator [GOOD] >> KqpJoin::RightSemiJoin_ComplexSecondaryIndex [GOOD] >> KqpFlipJoin::RightSemi_1 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/batch_operations/unittest >> KqpBatchUpdate::SimplePartitions [GOOD] Test command err: Trying to start YDB, gRPC: 8032, MsgBus: 1569 2025-06-25T14:42:40.739483Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897437409322168:2225];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:40.739768Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000bd4/r3tmp/tmpSRbNkt/pdisk_1.dat 2025-06-25T14:42:41.589759Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:42:41.595860Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:42:41.595957Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:42:41.600201Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 8032, node 1 2025-06-25T14:42:41.738508Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:42:42.728343Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:42:42.728365Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:42:42.728370Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:42:42.728560Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1569 TClient is connected to server localhost:1569 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:42:44.994541Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:42:45.124695Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:45.402180Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:45.562772Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:45.650770Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:42:45.756890Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519897437409322168:2225];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:45.764046Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:42:45.836936Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897458884160100:2371], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:45.837006Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:47.940676Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.002683Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.024209Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.046724Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.110274Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.173555Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.224710Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:42:48.259230Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897471769062664:2439], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:48.259299Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:48.259380Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897471769062669:2442], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:42:48.262045Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:42:48.270242Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519897471769062671:2443], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:42:48.326228Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519897471769062722:3440] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 22360, MsgBus: 8500 2025-06-25T14:42:56.059222Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519897504569997198:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:42:56.059280Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000bd4/r3tmp/tmpDazpGR/pdisk_1.dat 2025-06-25T14:42:56.174590Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:42:56.18 ... duled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:46:46.847940Z node 15 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [15:7519898492782237786:3428] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:46:54.832416Z node 15 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7382: Cannot get console configs 2025-06-25T14:46:54.832452Z node 15 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded Trying to start YDB, gRPC: 25854, MsgBus: 16974 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000bd4/r3tmp/tmpJ5fDqc/pdisk_1.dat 2025-06-25T14:47:03.674451Z node 16 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:47:03.675464Z node 16 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(16, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:47:03.675549Z node 16 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(16, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:47:03.703635Z node 16 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:47:03.704473Z node 16 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [16:7519898567787717868:2080] 1750862823281628 != 1750862823281631 2025-06-25T14:47:03.707354Z node 16 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(16, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 25854, node 16 2025-06-25T14:47:03.899552Z node 16 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:47:03.899581Z node 16 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:47:03.899590Z node 16 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:47:03.899742Z node 16 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:47:04.304429Z node 16 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:16974 TClient is connected to server localhost:16974 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:47:04.873861Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:47:04.889273Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:47:04.915026Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:47:05.061249Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:47:05.341225Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:47:05.467749Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:47:09.207448Z node 16 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [16:7519898593557523275:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:09.207540Z node 16 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:09.287040Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:09.364821Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:09.449359Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:09.542548Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:09.623599Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:09.703887Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:09.753935Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:09.849793Z node 16 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [16:7519898593557523944:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:09.849889Z node 16 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:09.850126Z node 16 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [16:7519898593557523949:2436], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:09.855727Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:47:09.878780Z node 16 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [16:7519898593557523951:2437], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:47:09.966959Z node 16 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [16:7519898593557524002:3427] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:47:18.637051Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7382: Cannot get console configs 2025-06-25T14:47:18.637074Z node 16 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded >> KqpJoinOrder::SortingsWithLookupJoin4-RemoveLimitOperator >> OlapEstimationRowsCorrectness::TPCH3 >> KqpJoinOrder::TestJoinOrderHintsComplex+ColumnStore >> KqpJoinOrder::CanonizedJoinOrderTPCDS64_small >> KqpJoinOrder::CanonizedJoinOrderTPCH17 |85.6%| [TA] $(B)/ydb/core/kqp/ut/batch_operations/test-results/unittest/{meta.json ... results_accumulator.log} |85.6%| [TA] {RESULT} $(B)/ydb/core/kqp/ut/batch_operations/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::SortingsWithLookupJoin1-RemoveLimitOperator [GOOD] Test command err: Trying to start YDB, gRPC: 14575, MsgBus: 8009 2025-06-25T14:46:36.036798Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898451645752706:2222];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:46:36.037124Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000e3b/r3tmp/tmpFn8pDp/pdisk_1.dat 2025-06-25T14:46:36.800698Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:46:36.800797Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:46:36.806305Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:46:36.936429Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:46:36.938148Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519898451645752522:2080] 1750862796007157 != 1750862796007160 TServer::EnableGrpc on GrpcPort 14575, node 1 2025-06-25T14:46:37.008581Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:46:37.203670Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:46:37.203687Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:46:37.203693Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:46:37.203801Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8009 TClient is connected to server localhost:8009 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:46:38.149795Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:46:38.189277Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:46:40.504243Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898468825622353:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:40.504395Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:40.504779Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898468825622365:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:40.508419Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:46:40.521704Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-25T14:46:40.521928Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898468825622367:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:46:40.580013Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898468825622418:2338] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:46:40.976079Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:41.036384Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519898451645752706:2222];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:46:41.036455Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:46:41.128690Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:41.164795Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:41.200946Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:41.239364Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:41.413347Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:41.451384Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:41.547463Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:41.597053Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:41.682172Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:41.727603Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:41.769592Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:41.855080Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation ... 28145Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038658;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:17.829674Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038614;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:17.830163Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038584;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:17.832861Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038658;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:17.833352Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038642;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:17.835140Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038584;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:17.835657Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038590;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:17.838551Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038642;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:17.839364Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038636;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:17.840447Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038590;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:17.840968Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038606;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:17.845458Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038636;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:17.845990Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038606;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:17.846456Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038644;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:17.846876Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038592;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:17.851735Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038644;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:17.851818Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038592;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:17.852807Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038624;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:17.853238Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038626;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:17.858509Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038626;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:17.859396Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038632;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:17.861380Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038624;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:17.862244Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038648;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:17.863363Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038632;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:17.864502Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038656;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:17.866774Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038648;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:17.868079Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038660;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:17.868233Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038656;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:17.873187Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038660;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:17.885490Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038612;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:17.885679Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038654;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:17.891242Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038654;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:17.892183Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038640;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:17.894045Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038612;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:17.894692Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038622;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:17.897032Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038640;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:17.897597Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038630;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:17.904368Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038630;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:17.904441Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038622;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:17.904897Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038646;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:17.909673Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038646;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:17.966296Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jykrwebc1ffe08mqet6c3rce", SessionId: ydb://session/3?node_id=1&id=MzJmYzhiMTctZWYyZWE1ZWMtZDg4MDBjNjEtN2VmNjhhYTg=, Slow query, duration: 32.609291s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:47:18.241470Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:47:18.241985Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:47:18.242535Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;self_id=[1:7519898516070272958:3455];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038170;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038629;receive=72075186224038331; 2025-06-25T14:47:18.242964Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::SortingsByPKWithLookupJoin+RemoveLimitOperator [GOOD] Test command err: Trying to start YDB, gRPC: 21690, MsgBus: 3614 2025-06-25T14:46:32.824954Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898434240060995:2205];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:46:32.868693Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000e3f/r3tmp/tmpeSpeLg/pdisk_1.dat 2025-06-25T14:46:33.330752Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:46:33.330850Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:46:33.333547Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:46:33.339132Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519898434240060801:2080] 1750862792705059 != 1750862792705062 2025-06-25T14:46:33.344070Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 21690, node 1 2025-06-25T14:46:33.544796Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:46:33.544829Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:46:33.544835Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:46:33.544935Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:46:33.868558Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:3614 TClient is connected to server localhost:3614 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:46:34.462990Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:46:34.512809Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:46:36.590049Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898451419930630:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:36.590181Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:36.591420Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898451419930642:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:36.595482Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:46:36.608660Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-25T14:46:36.609040Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898451419930644:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:46:36.706034Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898451419930695:2337] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:46:37.217193Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:37.421169Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:37.503098Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:37.545959Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:37.632654Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:37.807824Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519898434240060995:2205];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:46:37.808105Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:46:37.817320Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:37.872557Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:37.905776Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:37.984796Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:38.024218Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:38.111317Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:38.145351Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:38.174907Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation ... 25495Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038591;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:19.228691Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038539;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:19.229237Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038605;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:19.229853Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038591;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:19.230340Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038593;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:19.233784Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038605;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:19.238261Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038593;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:19.239070Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038619;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:19.243849Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038619;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:19.244947Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038635;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:19.250027Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038635;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:19.250713Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038603;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:19.252637Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038653;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:19.259882Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038603;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:19.261227Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038653;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:19.261755Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038659;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:19.264942Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038637;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:19.270991Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038659;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:19.271532Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038631;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:19.274128Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038637;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:19.274788Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038595;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:19.281140Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038631;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:19.281677Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038645;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:19.288408Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038595;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:19.289332Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038645;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:19.289713Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038633;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:19.289895Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038571;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:19.302384Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038633;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:19.303195Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038621;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:19.307210Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038571;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:19.307813Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038623;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:19.312256Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038621;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:19.316916Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038625;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:19.316914Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038623;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:19.321000Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038651;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:19.326821Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038625;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:19.327792Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038657;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:19.330039Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038651;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:19.341077Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038657;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:19.374134Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038448;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:19.398839Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038448;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:19.520023Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jykrwaed0m95fsncd39kk5wn", SessionId: ydb://session/3?node_id=1&id=OTU3MTQzNGItYTg5YTgzYTYtZjhiNTNmNjktNmFlZmVjMTE=, Slow query, duration: 38.162079s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:47:19.907842Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:47:19.908372Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:47:19.908880Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;self_id=[1:7519898481484707592:2773];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038170;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038629;receive=72075186224038331; 2025-06-25T14:47:19.909321Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoin::RightSemiJoin_ComplexSecondaryIndex [GOOD] Test command err: Trying to start YDB, gRPC: 18821, MsgBus: 5529 2025-06-25T14:47:05.618388Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898575641120643:2153];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:47:05.627426Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000e11/r3tmp/tmpBqn52y/pdisk_1.dat 2025-06-25T14:47:06.187817Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:47:06.187916Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:47:06.204632Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:47:06.207802Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519898575641120526:2080] 1750862825569893 != 1750862825569896 2025-06-25T14:47:06.224082Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 18821, node 1 2025-06-25T14:47:06.426219Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:47:06.426233Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:47:06.426237Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:47:06.426326Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:47:06.612427Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:5529 TClient is connected to server localhost:5529 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:47:07.236387Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:47:07.248709Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:47:07.276557Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:47:07.454413Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:47:07.665475Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:47:07.743863Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:47:09.677251Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898592820991351:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:09.677350Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:10.027137Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:10.093411Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:10.148066Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:10.188117Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:10.237064Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:10.301302Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:10.366116Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:10.477641Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898597115959305:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:10.477718Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:10.478032Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898597115959310:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:10.481947Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:47:10.502779Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898597115959312:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:47:10.602362Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898597115959363:3418] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:47:10.604347Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519898575641120643:2153];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:47:10.604420Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:47:11.888178Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_wo ... classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:47:16.388865Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28117 TClient is connected to server localhost:28117 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:47:16.910483Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:47:16.917170Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:47:16.929391Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:47:16.994589Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:47:17.016350Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:47:17.275637Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:47:17.406977Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:47:19.829368Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898633993519975:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:19.829430Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:19.899691Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:19.962117Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:20.027295Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:20.073443Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:20.141275Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:20.223974Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:20.309016Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:20.508708Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898638288487934:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:20.508801Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:20.509158Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898638288487939:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:20.512976Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:47:20.530696Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519898638288487941:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:47:20.611138Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519898638288487992:3413] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:47:20.942316Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519898616813649182:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:47:20.942711Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:47:22.018632Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:22.084161Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:22.125717Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:22.169302Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:22.235715Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) >> OlapEstimationRowsCorrectness::TPCH2 [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> YdbOlapStore::LogExistingUserId [GOOD] Test command err: 2025-06-25T14:44:52.828222Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898005522874052:2076];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:52.837401Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017ad/r3tmp/tmpbwfD5N/pdisk_1.dat 2025-06-25T14:44:53.255462Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:53.278009Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:53.278088Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:53.281720Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 24749, node 1 2025-06-25T14:44:53.336949Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T14:44:53.424644Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:53.424665Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:53.424676Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:53.424772Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:61051 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-25T14:44:53.844643Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:53.877753Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... TClient is connected to server localhost:61051 2025-06-25T14:44:54.216699Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnStore, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_store.cpp:451) waiting... 2025-06-25T14:44:54.384247Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519898014112809647:2287];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:44:54.384688Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519898014112809647:2287];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:44:54.384972Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519898014112809647:2287];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:44:54.385111Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519898014112809647:2287];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:44:54.385209Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519898014112809647:2287];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:44:54.385320Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519898014112809647:2287];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:44:54.385432Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519898014112809647:2287];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:44:54.385541Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519898014112809647:2287];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:44:54.385638Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519898014112809647:2287];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:44:54.385746Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519898014112809647:2287];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:44:54.385860Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519898014112809647:2287];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:44:54.462481Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519898014112809629:2284];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:44:54.462558Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519898014112809629:2284];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:44:54.462795Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519898014112809629:2284];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:44:54.462895Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519898014112809629:2284];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:44:54.462999Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519898014112809629:2284];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:44:54.463093Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519898014112809629:2284];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:44:54.463210Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519898014112809629:2284];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:44:54.463315Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519898014112809629:2284];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:44:54.463430Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519898014112809629:2284];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:44:54.463523Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519898014112809629:2284];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:44:54.463633Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519898014112809629:2284];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:44:54.506012Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519898014112809630:2285];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:44:54.506092Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519898014112809630:2285];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:44:54.506343Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519898014112809630:2285];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:44:54.506433Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519898014112809630:2285];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:44:54.506548Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519898014112809630:2285];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:44:54.506633Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519898014112809630:2285];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:44:54.506717Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:751 ... task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 41, seqNo: [1] 2025-06-25T14:47:22.229557Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976710670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 42, seqNo: [1] 2025-06-25T14:47:22.229574Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976710670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 43, seqNo: [1] 2025-06-25T14:47:22.229593Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976710670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 44, seqNo: [1] 2025-06-25T14:47:22.229609Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976710670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 45, seqNo: [1] 2025-06-25T14:47:22.229625Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976710670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 46, seqNo: [1] 2025-06-25T14:47:22.229642Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976710670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 47, seqNo: [1] 2025-06-25T14:47:22.229658Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976710670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 48, seqNo: [1] 2025-06-25T14:47:22.229676Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976710670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 49, seqNo: [1] 2025-06-25T14:47:22.229692Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976710670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 50, seqNo: [1] 2025-06-25T14:47:22.229709Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976710670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 51, seqNo: [1] 2025-06-25T14:47:22.229726Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976710670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 52, seqNo: [1] 2025-06-25T14:47:22.229742Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976710670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 53, seqNo: [1] 2025-06-25T14:47:22.229759Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976710670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 54, seqNo: [1] 2025-06-25T14:47:22.229776Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976710670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 55, seqNo: [1] 2025-06-25T14:47:22.229793Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976710670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 56, seqNo: [1] 2025-06-25T14:47:22.229808Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976710670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 57, seqNo: [1] 2025-06-25T14:47:22.229825Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976710670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 58, seqNo: [1] 2025-06-25T14:47:22.229842Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976710670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 59, seqNo: [1] 2025-06-25T14:47:22.229859Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976710670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 60, seqNo: [1] 2025-06-25T14:47:22.229875Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976710670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 61, seqNo: [1] 2025-06-25T14:47:22.229891Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976710670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 62, seqNo: [1] 2025-06-25T14:47:22.229908Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976710670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 63, seqNo: [1] 2025-06-25T14:47:22.229926Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976710670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 64, seqNo: [1] 2025-06-25T14:47:22.229954Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:675: TxId: 281474976710670, task: 65. Tasks execution finished 2025-06-25T14:47:22.229985Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:510: SelfId: [28:7519898637671060792:3156], TxId: 281474976710670, task: 65. Ctx: { TraceId : 01jykrxf5r4qca2aqa7m03z04b. SessionId : ydb://session/3?node_id=28&id=ZWY3MWIzYzAtNjIwMDA5N2YtNzk5ZjQzNC00MjNkMWUz. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Compute state finished. All channels and sinks finished 2025-06-25T14:47:22.230179Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:494: TxId: 281474976710670, task: 65. pass away 2025-06-25T14:47:22.230345Z node 28 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:442: ActorId: [28:7519898633376093417:3082] TxId: 281474976710670. Ctx: { TraceId: 01jykrxf5r4qca2aqa7m03z04b, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=28&id=ZWY3MWIzYzAtNjIwMDA5N2YtNzk5ZjQzNC00MjNkMWUz, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [28:7519898637671060792:3156], task: 65, state: COMPUTE_STATE_FINISHED, stats: { CpuTimeUs: 10796 Tasks { TaskId: 65 StageId: 1 CpuTimeUs: 702 FinishTimeMs: 1750862842228 InputRows: 1 InputBytes: 310 OutputRows: 1 OutputBytes: 310 ResultRows: 1 ResultBytes: 310 ComputeCpuTimeUs: 315 BuildCpuTimeUs: 387 HostName: "ghrun-kqfvx6aroe" NodeId: 28 CreateTimeMs: 1750862840074 UpdateTimeMs: 1750862842230 } MaxMemoryUsage: 1048576 } 2025-06-25T14:47:22.230412Z node 28 :KQP_COMPUTE DEBUG: log.cpp:784: fline=kqp_compute_actor_factory.cpp:67;problem=finish_compute_actor;tx_id=281474976710670;task_id=65;success=1;message={
: Error: COMPUTE_STATE_FINISHED }; 2025-06-25T14:47:22.230446Z node 28 :KQP_EXECUTER INFO: kqp_planner.cpp:697: TxId: 281474976710670. Ctx: { TraceId: 01jykrxf5r4qca2aqa7m03z04b, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=28&id=ZWY3MWIzYzAtNjIwMDA5N2YtNzk5ZjQzNC00MjNkMWUz, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Compute actor has finished execution: [28:7519898637671060792:3156] 2025-06-25T14:47:22.230609Z node 28 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:2188: ActorId: [28:7519898633376093417:3082] TxId: 281474976710670. Ctx: { TraceId: 01jykrxf5r4qca2aqa7m03z04b, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=28&id=ZWY3MWIzYzAtNjIwMDA5N2YtNzk5ZjQzNC00MjNkMWUz, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. terminate execution. 2025-06-25T14:47:22.230689Z node 28 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:862: ActorId: [28:7519898633376093417:3082] TxId: 281474976710670. Ctx: { TraceId: 01jykrxf5r4qca2aqa7m03z04b, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=28&id=ZWY3MWIzYzAtNjIwMDA5N2YtNzk5ZjQzNC00MjNkMWUz, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Resource usage for last stat interval: ComputeTime: 0.103523s ReadRows: 50 ReadBytes: 16000 ru: 69 rate limiter was not found force flag: 1 2025-06-25T14:47:22.230783Z node 28 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1754: SessionId: ydb://session/3?node_id=28&id=ZWY3MWIzYzAtNjIwMDA5N2YtNzk5ZjQzNC00MjNkMWUz, ActorId: [28:7519898629081126068:3082], ActorState: ExecuteState, TraceId: 01jykrxf5r4qca2aqa7m03z04b, TEvTxResponse, CurrentTx: 1/1 response.status: SUCCESS 2025-06-25T14:47:22.231185Z node 28 :KQP_SESSION INFO: kqp_session_actor.cpp:2013: SessionId: ydb://session/3?node_id=28&id=ZWY3MWIzYzAtNjIwMDA5N2YtNzk5ZjQzNC00MjNkMWUz, ActorId: [28:7519898629081126068:3082], ActorState: ExecuteState, TraceId: 01jykrxf5r4qca2aqa7m03z04b, txInfo Status: Active Kind: ReadOnly TotalDuration: 0 ServerDuration: 2301.051 QueriesCount: 1 2025-06-25T14:47:22.231271Z node 28 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2168: SessionId: ydb://session/3?node_id=28&id=ZWY3MWIzYzAtNjIwMDA5N2YtNzk5ZjQzNC00MjNkMWUz, ActorId: [28:7519898629081126068:3082], ActorState: ExecuteState, TraceId: 01jykrxf5r4qca2aqa7m03z04b, Create QueryResponse for action: QUERY_ACTION_EXECUTE with SUCCESS status 2025-06-25T14:47:22.231403Z node 28 :KQP_SESSION INFO: kqp_session_actor.cpp:2528: SessionId: ydb://session/3?node_id=28&id=ZWY3MWIzYzAtNjIwMDA5N2YtNzk5ZjQzNC00MjNkMWUz, ActorId: [28:7519898629081126068:3082], ActorState: ExecuteState, TraceId: 01jykrxf5r4qca2aqa7m03z04b, Cleanup start, isFinal: 1 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-06-25T14:47:22.231472Z node 28 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2589: SessionId: ydb://session/3?node_id=28&id=ZWY3MWIzYzAtNjIwMDA5N2YtNzk5ZjQzNC00MjNkMWUz, ActorId: [28:7519898629081126068:3082], ActorState: ExecuteState, TraceId: 01jykrxf5r4qca2aqa7m03z04b, EndCleanup, isFinal: 1 2025-06-25T14:47:22.231566Z node 28 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2325: SessionId: ydb://session/3?node_id=28&id=ZWY3MWIzYzAtNjIwMDA5N2YtNzk5ZjQzNC00MjNkMWUz, ActorId: [28:7519898629081126068:3082], ActorState: ExecuteState, TraceId: 01jykrxf5r4qca2aqa7m03z04b, Sent query response back to proxy, proxyRequestId: 5, proxyId: [28:7519898560361645470:2184] 2025-06-25T14:47:22.231623Z node 28 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2601: SessionId: ydb://session/3?node_id=28&id=ZWY3MWIzYzAtNjIwMDA5N2YtNzk5ZjQzNC00MjNkMWUz, ActorId: [28:7519898629081126068:3082], ActorState: unknown state, TraceId: 01jykrxf5r4qca2aqa7m03z04b, Cleanup temp tables: 0 2025-06-25T14:47:22.235582Z node 28 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750862839000, txId: 18446744073709551615] shutting down 2025-06-25T14:47:22.237141Z node 28 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2692: SessionId: ydb://session/3?node_id=28&id=ZWY3MWIzYzAtNjIwMDA5N2YtNzk5ZjQzNC00MjNkMWUz, ActorId: [28:7519898629081126068:3082], ActorState: unknown state, TraceId: 01jykrxf5r4qca2aqa7m03z04b, Session actor destroyed RESULT: [[42000u;"nginx";"resource_6";"19";[2];["message"];["{\"auth\":{\"org_id\":7704,\"service\":{\"internal\":\"false\",\"ip\":\"258.258.258.258\"},\"type\":\"token\",\"user\":{\"id\":1000042,\"ip\":\"257.257.257.257\",\"is_cloud\":\"false\"}}}"]]] --------------------- STATS: total CPU: 1061 duration: 2287107 usec cpu: 441458 usec { name: "/Root/OlapStore/log1" reads { rows: 50 bytes: 16000 } } 2025-06-25T14:47:22.488551Z node 28 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037891;self_id=[28:7519898573246548289:2286];ev=NActors::TEvents::TEvWakeup;fline=columnshard.cpp:250;event=TEvPrivate::TEvPeriodicWakeup::MANUAL;tablet_id=72075186224037891; 2025-06-25T14:47:22.528698Z node 28 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037889;self_id=[28:7519898573246548309:2288];ev=NActors::TEvents::TEvWakeup;fline=columnshard.cpp:250;event=TEvPrivate::TEvPeriodicWakeup::MANUAL;tablet_id=72075186224037889; >> KqpJoin::LeftJoinWithNull+StreamLookupJoin [GOOD] >> KqpJoin::LeftJoinPushdownPredicate_Simple >> KqpJoinOrder::TPCDS94+ColumnStore >> KqpJoinOrder::FiveWayJoinWithPreds+ColumnStore >> KqpJoinOrder::CanonizedJoinOrderTPCH3 >> KqpIndexLookupJoin::LeftSemi >> KqpJoinOrder::CanonizedJoinOrderTPCH2 >> KqpJoinOrder::SortingsSimpleOrderByAliasIndexDesc+RemoveLimitOperator [GOOD] >> LabeledDbCounters::TwoTablets [GOOD] >> LabeledDbCounters::TwoTabletsKillOneTablet >> KqpJoinOrder::TPCDS95-ColumnStore [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> OlapEstimationRowsCorrectness::TPCH2 [GOOD] Test command err: Trying to start YDB, gRPC: 17103, MsgBus: 63549 2025-06-25T14:45:41.209916Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898213985314092:2125];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:45:41.209945Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000e7f/r3tmp/tmplfnRon/pdisk_1.dat 2025-06-25T14:45:41.790641Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:45:41.790787Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:45:41.802560Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:45:41.848422Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519898213985314007:2080] 1750862741196395 != 1750862741196398 2025-06-25T14:45:41.859785Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 17103, node 1 2025-06-25T14:45:41.924248Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:45:41.924276Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:45:41.924283Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:45:41.924400Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:63549 2025-06-25T14:45:42.188843Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:63549 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:45:42.515102Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:45:44.511471Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898226870216537:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:44.511675Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898226870216548:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:44.514911Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:45:44.519317Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:44.527633Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898226870216551:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:45:44.626229Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898226870216602:2336] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:45:44.967353Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T14:45:45.152823Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519898231165184163:2316];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:45:45.152990Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519898231165184131:2311];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:45:45.153060Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519898231165184163:2316];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:45:45.153072Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519898231165184131:2311];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:45:45.153310Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519898231165184131:2311];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:45:45.153418Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519898231165184131:2311];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:45:45.153452Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519898231165184163:2316];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:45:45.153516Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519898231165184131:2311];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:45:45.153556Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519898231165184163:2316];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:45:45.153607Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519898231165184131:2311];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:45:45.153640Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519898231165184163:2316];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:45:45.153697Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519898231165184131:2311];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:45:45.153771Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519898231165184163:2316];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:45:45.153830Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519898231165184131:2311];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:45:45.153873Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519898231165184163:2316];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:45:45.153923Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519898231165184131:2311];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:45:45.153957Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519898231165184163:2316];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:45:45.154048Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519898231165184131:2311];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:45:45.154053Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519898231165184163:2316];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:45:45.154138Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519898231165184163:2316];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:45:45.154157Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519898231165184131:2311];tablet_id=72075186224037903;process=TTxInitSche ... 57819Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039377;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:08.160490Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039398;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:08.160952Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039277;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:08.170031Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039277;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:08.170640Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039412;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:08.170789Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039377;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:08.171207Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039419;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:08.176008Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039419;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:08.179957Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039412;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:08.181063Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039333;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:08.185850Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039333;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:08.186445Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039414;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:08.187148Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039387;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:08.191473Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039414;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:08.195917Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039387;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:08.196643Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039416;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:08.201052Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039335;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:08.205362Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039416;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:08.205923Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039381;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:08.209309Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039335;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:08.209828Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039345;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:08.215187Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039381;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:08.215772Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039373;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:08.218291Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039345;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:08.218784Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039421;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:08.224075Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039373;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:08.227299Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039421;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:08.227858Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039395;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:08.228940Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039406;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:08.237157Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039395;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:08.237740Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039359;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:08.236300Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039406;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:08.241104Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039396;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:08.246062Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039359;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:08.246946Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039418;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:08.249696Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039396;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:08.250511Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039365;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:08.255536Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039418;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:08.258347Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039365;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:08.517169Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039363;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:08.542304Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039363;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:08.597275Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jykrvx3t0dph5sdg24kvrygw", SessionId: ydb://session/3?node_id=1&id=ZDQwNDIzYzMtMzZiMTg1N2EtNTVkOTQ2YzAtZTc4ODlkOTE=, Slow query, duration: 40.890790s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:47:09.023063Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:47:09.023565Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:47:09.024327Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;self_id=[1:7519898501748171807:8733];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224039094;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224039392;receive=72075186224038933; 2025-06-25T14:47:09.024727Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; >> OlapEstimationRowsCorrectness::TPCH21 [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::SortingsSimpleOrderByAliasIndexDesc+RemoveLimitOperator [GOOD] Test command err: Trying to start YDB, gRPC: 24922, MsgBus: 4026 2025-06-25T14:46:39.692740Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898464824226890:2220];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:46:39.693058Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000e33/r3tmp/tmpntaON6/pdisk_1.dat 2025-06-25T14:46:40.246899Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:46:40.246986Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:46:40.259433Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:46:40.312106Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519898464824226708:2080] 1750862799635571 != 1750862799635574 2025-06-25T14:46:40.330734Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 24922, node 1 2025-06-25T14:46:40.496815Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:46:40.496834Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:46:40.496841Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:46:40.496948Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:46:40.680411Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:4026 TClient is connected to server localhost:4026 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:46:41.419165Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:46:41.454577Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:46:43.782566Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898482004096534:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:43.782662Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898482004096542:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:43.782727Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:43.788275Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:46:43.813475Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898482004096548:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:46:43.905063Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898482004096599:2337] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:46:44.333077Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:44.476434Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:44.555150Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:44.589872Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:44.626655Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:44.680414Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519898464824226890:2220];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:46:44.680488Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:46:44.983076Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:45.061894Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:45.140804Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:45.247059Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:45.299084Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:45.370930Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:45.418765Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:45.466252Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:46.372824Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperat ... =72075186224038555;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:24.174709Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038585;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:24.176686Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038543;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:24.177164Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038515;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:24.179370Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038585;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:24.179956Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038591;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:24.184192Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038515;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:24.184201Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038591;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:24.184772Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038611;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:24.185217Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038613;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:24.189427Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038611;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:24.190009Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038613;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:24.190717Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038609;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:24.190724Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038559;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:24.195472Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038609;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:24.196031Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038605;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:24.196862Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038559;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:24.197327Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038620;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:24.200776Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038605;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:24.201249Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038621;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:24.201763Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038620;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:24.202219Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038617;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:24.205747Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038621;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:24.206254Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038545;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:24.207283Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038617;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:24.207856Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038541;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:24.210524Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038545;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:24.211062Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038561;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:24.212492Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038541;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:24.212956Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038615;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:24.215800Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038561;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:24.216244Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038603;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:24.217626Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038615;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:24.218109Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038583;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:24.220778Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038603;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:24.221311Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038567;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:24.222701Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038583;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:24.223289Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038607;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:24.225632Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038567;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:24.232625Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038607;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:24.311858Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jykrwht69s6pgm8eqkya8wqz", SessionId: ydb://session/3?node_id=1&id=MmE2YWEwMTAtMWNmNDRhNTEtY2Q4NWQ1MjctNTk0Y2U0OWU=, Slow query, duration: 35.408664s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:47:24.591881Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:47:24.592398Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:47:24.592994Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;self_id=[1:7519898615148108434:5420];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038629;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038170;receive=72075186224038331; 2025-06-25T14:47:24.593379Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716;
: Warning: Execution, code: 1060
:2:1: Warning: Given predicate is not suitable for used index: ix_bank_document_exec_dt_accounts, code: 2503
: Warning: Execution, code: 1060
:2:1: Warning: Given predicate is not suitable for used index: ix_bank_document_exec_dt_accounts, code: 2503 >> TxUsage::WriteToTopic_Demo_47_Query [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::TPCDS95-ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 25080, MsgBus: 19474 2025-06-25T14:46:18.670244Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898375018802502:2125];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:46:18.670272Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000e4e/r3tmp/tmps1eaVf/pdisk_1.dat 2025-06-25T14:46:19.373101Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:46:19.373208Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:46:19.374118Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:46:19.385696Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:46:19.392527Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519898375018802417:2080] 1750862778661223 != 1750862778661226 TServer::EnableGrpc on GrpcPort 25080, node 1 2025-06-25T14:46:19.590135Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:46:19.590156Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:46:19.590163Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:46:19.590261Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:46:19.660710Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:19474 TClient is connected to server localhost:19474 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:46:20.555037Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:46:20.589315Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:46:22.981875Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898392198672251:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:22.981996Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:22.982254Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898392198672263:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:22.986090Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:46:23.001024Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898392198672265:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:46:23.079543Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898396493639612:2340] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:46:23.488305Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:23.634212Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:23.672086Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519898375018802502:2125];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:46:23.672157Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:46:23.676385Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:23.710877Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:23.741420Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:23.935437Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:24.001261Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:24.082669Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:24.155608Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:24.197012Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:24.236810Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:24.315039Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:24.355710Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:25.397998Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, subope ... ntroller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:02.994773Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038618;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:02.996904Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038657;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:02.999347Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038618;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:03.000588Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038614;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:03.000958Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038657;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:03.001347Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038656;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:03.006495Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038614;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:03.007039Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038640;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:03.010500Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038656;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:03.011038Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038622;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:03.015324Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038622;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:03.015908Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038650;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:03.019979Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038640;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:03.019980Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038650;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:03.020880Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038661;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:03.024538Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038634;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:03.025228Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038661;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:03.025872Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038620;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:03.030431Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038620;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:03.030970Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038644;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:03.035714Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038644;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:03.036577Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038634;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:03.036981Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038598;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:03.041554Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038598;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:03.042393Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038653;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:03.052802Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038653;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:03.080642Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038652;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:03.081685Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038646;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:03.087745Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038652;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:03.088345Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038646;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:03.088480Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038624;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:03.088910Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038658;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:03.093563Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038658;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:03.097438Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038624;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:03.205618Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jykrvx4rcvggdate6v43npqq", SessionId: ydb://session/3?node_id=1&id=NmYwZTVjZDctOTZiZTVjZGQtNWE0YTI3NGItNWI4N2NkNTE=, Slow query, duration: 35.468413s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:47:04.330226Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:47:04.331014Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;self_id=[1:7519898439443320822:3168];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038170;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038629;receive=72075186224038331; 2025-06-25T14:47:04.331398Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:47:04.332264Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:47:26.004585Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jykrxbps291ndrxpekb71r9p", SessionId: ydb://session/3?node_id=1&id=NmYwZTVjZDctOTZiZTVjZGQtNWE0YTI3NGItNWI4N2NkNTE=, Slow query, duration: 10.586956s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "pragma TablePathPrefix = \"/Root/test/ds/\";\n-- NB: Subquerys\n$ws_wh =\n(select ws1.ws_order_number ws_order_number,ws1.ws_warehouse_sk wh1,ws2.ws_warehouse_sk wh2\n from web_sales ws1 cross join web_sales ws2\n where ws1.ws_order_number = ws2.ws_order_number\n and ws1.ws_warehouse_sk <> ws2.ws_warehouse_sk);\n-- start query 1 in stream 0 using template query95.tpl and seed 2031708268\n select\n count(distinct ws1.ws_order_number) as `order count`\n ,sum(ws_ext_ship_cost) as `total shipping cost`\n ,sum(ws_net_profit) as `total net profit`\nfrom\n web_sales ws1\n cross join date_dim\n cross join customer_address\n cross join web_site\nwhere\n cast(d_date as date) between cast('2002-4-01' as date) and\n (cast('2002-4-01' as date) + DateTime::IntervalFromDays(60))\nand ws1.ws_ship_date_sk = d_date_sk\nand ws1.ws_ship_addr_sk = ca_address_sk\nand ca_state = 'AL'\nand ws1.ws_web_site_sk = web_site_sk\nand web_company_name = 'pri'\nand ws1.ws_order_number in (select ws_order_number\n from $ws_wh)\nand ws1.ws_order_number in (select wr_order_number\n from web_returns cross join $ws_wh ws_wh\n where wr_order_number = ws_wh.ws_order_number)\norder by `order count`\nlimit 100;\n", parameters: 0b >> KqpJoinOrder::TPCDS92-ColumnStore >> KqpJoinOrder::DatetimeConstantFold-ColumnStore [GOOD] >> KqpFlipJoin::RightSemi_1 [GOOD] >> KqpFlipJoin::RightOnly_3 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> OlapEstimationRowsCorrectness::TPCH21 [GOOD] Test command err: Trying to start YDB, gRPC: 14064, MsgBus: 20547 2025-06-25T14:45:41.595545Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898216468450627:2146];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:45:41.596837Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000e7c/r3tmp/tmpzBrtV9/pdisk_1.dat 2025-06-25T14:45:42.072676Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519898216468450519:2080] 1750862741581245 != 1750862741581248 2025-06-25T14:45:42.076580Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:45:42.078912Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:45:42.078985Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:45:42.111075Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 14064, node 1 2025-06-25T14:45:42.336999Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:45:42.337026Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:45:42.337033Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:45:42.337134Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:20547 2025-06-25T14:45:42.596569Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:20547 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:45:43.035304Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:45:44.924251Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898229353353049:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:44.924405Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:44.924885Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898229353353061:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:44.929554Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:45:44.943146Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898229353353063:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:45:45.022452Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898233648320410:2335] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:45:45.317717Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T14:45:45.539179Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519898233648320647:2322];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:45:45.539348Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519898233648320647:2322];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:45:45.539633Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519898233648320647:2322];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:45:45.539735Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519898233648320647:2322];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:45:45.540218Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519898233648320647:2322];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:45:45.540333Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519898233648320647:2322];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:45:45.540448Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519898233648320647:2322];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:45:45.540544Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519898233648320647:2322];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:45:45.540633Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519898233648320647:2322];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:45:45.540744Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519898233648320647:2322];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:45:45.540834Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519898233648320647:2322];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:45:45.554026Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519898233648320675:2324];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:45:45.554080Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519898233648320675:2324];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:45:45.554253Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519898233648320675:2324];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:45:45.554362Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519898233648320675:2324];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:45:45.554451Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519898233648320675:2324];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:45:45.554548Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519898233648320675:2324];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:45:45.554650Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519898233648320675:2324];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:45:45.562873Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519898233648320675:2324];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:45:45.563087Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519898233648320675:2324];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:45:45.563228Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519898233648320675:2324];tablet_id=72075186224037901;process=TTxInitSchema:: ... 02408Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039418;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:08.303122Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039391;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:08.306587Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039416;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:08.307141Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039418;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:08.307706Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039420;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:08.316026Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039420;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:08.316696Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039422;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:08.320686Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039416;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:08.322313Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039424;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:08.323406Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039422;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:08.324102Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039385;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:08.328790Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039424;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:08.329467Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039387;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:08.335231Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039387;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:08.335892Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039385;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:08.335906Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039408;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:08.341257Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039408;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:08.341979Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039410;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:08.343388Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039389;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:08.347905Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039410;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:08.348662Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039389;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:08.348816Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039384;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:08.349124Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039407;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:08.354237Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039384;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:08.354926Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039388;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:08.357289Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039407;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:08.357899Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039409;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:08.360749Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039388;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:08.361348Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039397;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:08.363119Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039409;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:08.363693Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039411;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:08.366787Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039397;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:08.367448Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039415;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:08.373179Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039415;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:08.373811Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039423;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:08.374700Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039411;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:08.375156Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039413;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:08.379925Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039423;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:08.380680Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039413;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:08.381886Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039414;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:08.388356Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039414;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:08.574777Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jykrvxv984trtf5kwfsp7tym", SessionId: ydb://session/3?node_id=1&id=MmU4MzIyZGEtMTVlYmM1YWYtNTAwM2Y3ODgtM2MxZmE3YTc=, Slow query, duration: 40.117357s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:47:08.853906Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:47:08.854376Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:47:08.855142Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;self_id=[1:7519898542886020697:9755];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224039392;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038933;receive=72075186224039094; 2025-06-25T14:47:08.855555Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; >> TxUsage::WriteToTopic_Demo_48_Table >> KqpIndexLookupJoin::LeftJoinOnlyRightColumn+StreamLookup >> KqpJoin::LeftJoinPushdownPredicate_Simple [GOOD] >> KqpIndexLookupJoin::InnerJoinCustomColumnOrder+StreamLookup ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoin::LeftJoinPushdownPredicate_Simple [GOOD] Test command err: Trying to start YDB, gRPC: 1029, MsgBus: 30413 2025-06-25T14:47:19.971814Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898636998769423:2200];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:47:19.971965Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000df5/r3tmp/tmplyFFkp/pdisk_1.dat 2025-06-25T14:47:20.555275Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519898636998769260:2080] 1750862839941955 != 1750862839941958 2025-06-25T14:47:20.561271Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:47:20.569014Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:47:20.569116Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:47:20.582607Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 1029, node 1 2025-06-25T14:47:20.836621Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:47:20.836653Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:47:20.836662Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:47:20.836787Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:47:20.976460Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:30413 TClient is connected to server localhost:30413 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:47:21.699416Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:47:21.725905Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:47:21.755385Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:47:21.975864Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:47:22.177387Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:47:22.277361Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:47:24.183802Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898658473607371:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:24.183876Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:24.446282Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:24.484008Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:24.534793Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:24.579573Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:24.626709Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:24.677137Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:24.758969Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:24.876659Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898658473608033:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:24.876745Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:24.876983Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898658473608038:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:24.880662Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:47:24.909718Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710669, at schemeshard: 72057594046644480 2025-06-25T14:47:24.910007Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898658473608040:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:47:24.964436Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519898636998769423:2200];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:47:24.964496Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:47:24.991661Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898658473608091:3417] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:47:26.161543Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is und ... 25-06-25T14:47:29.222433Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:47:29.222503Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:47:29.223795Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 28824, node 2 2025-06-25T14:47:29.396906Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:47:29.396932Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:47:29.396940Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:47:29.397063Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4495 2025-06-25T14:47:29.912420Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:4495 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-25T14:47:30.273614Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:47:30.283997Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:47:30.290974Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:47:30.421534Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:47:30.583571Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:47:30.671656Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:47:33.006076Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898693998706947:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:33.006148Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:33.102225Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:33.151552Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:33.198083Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:33.236623Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:33.297815Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:33.343485Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:33.423047Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:33.505595Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898693998707606:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:33.505677Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:33.505890Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898693998707611:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:33.509045Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:47:33.518029Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519898693998707613:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:47:33.589699Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519898693998707664:3417] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:47:33.888457Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519898672523868850:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:47:33.888510Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:47:34.770835Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:34.821669Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:34.883316Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) >> KqpJoinOrder::SortingsByPK-RemoveLimitOperator >> KqpJoinOrder::FiveWayJoinWithConstantFoldOpt-ColumnStore [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::DatetimeConstantFold-ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 3387, MsgBus: 1954 2025-06-25T14:46:50.103384Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898511754012786:2226];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:46:50.103624Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000e2a/r3tmp/tmpA6Y6WZ/pdisk_1.dat 2025-06-25T14:46:50.892180Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:46:50.892271Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:46:50.902615Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:46:50.971478Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:46:50.976479Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519898511754012587:2080] 1750862810050515 != 1750862810050518 TServer::EnableGrpc on GrpcPort 3387, node 1 2025-06-25T14:46:51.100154Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:46:51.228939Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:46:51.228970Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:46:51.228986Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:46:51.229112Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1954 TClient is connected to server localhost:1954 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:46:52.261455Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:46:54.622201Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898528933882414:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:54.622339Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:54.625006Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898528933882426:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:54.630120Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:46:54.658536Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898528933882428:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:46:54.719212Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898528933882479:2338] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:46:55.056508Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:55.100568Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519898511754012786:2226];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:46:55.100668Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:46:55.171941Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:55.208115Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:55.239149Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:55.271205Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:55.574940Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:55.633168Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:55.671645Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:55.745068Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:55.781063Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:55.819322Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:55.861308Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:55.914588Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:56.668002Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/ ... 23222Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038589;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:30.223787Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038567;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:30.228889Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038459;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:30.233207Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038567;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:30.233793Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038543;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:30.238031Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038459;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:30.238699Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038535;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:30.245728Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038543;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:30.246263Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038657;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:30.251823Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038535;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:30.252435Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038573;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:30.259090Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038657;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:30.259664Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038599;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:30.265389Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038599;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:30.266104Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038639;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:30.268698Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038573;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:30.269165Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038601;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:30.273588Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038601;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:30.274108Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038595;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:30.279422Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038639;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:30.279900Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038638;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:30.279939Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038595;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:30.281195Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038655;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:30.285381Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038638;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:30.285955Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038537;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:30.288224Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038655;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:30.288834Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038661;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:30.294002Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038661;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:30.294593Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038537;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:30.294611Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038653;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:30.295185Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038605;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:30.299615Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038653;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:30.300275Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038651;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:30.300980Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038605;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:30.301526Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038659;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:30.306367Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038659;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:30.306366Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038651;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:30.306941Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038647;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:30.307341Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038649;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:30.311690Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038649;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:30.311732Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038647;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:30.417293Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jykrwvg0f2stxgv1ssrc20c7", SessionId: ydb://session/3?node_id=1&id=ZjM3ZDFhZGQtYTZlMWQxOGEtZGQ1ZGJhYmMtNjUzZTBhMjY=, Slow query, duration: 31.599906s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:47:30.776277Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:47:30.776799Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:47:30.777398Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;self_id=[1:7519898606243310418:4277];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038331;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038170;receive=72075186224038629; 2025-06-25T14:47:30.777785Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; >> KqpIndexLookupJoin::LeftSemi [GOOD] >> KqpIndexLookupJoin::LeftSemiJoinWithDuplicatesInRightTable+StreamLookupJoin >> KqpJoinOrder::CanonizedJoinOrderTPCH13 [GOOD] >> KqpJoinOrder::FourWayJoinWithPredsAndEquivAndLeft-ColumnStore ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::FiveWayJoinWithConstantFoldOpt-ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 28793, MsgBus: 19298 2025-06-25T14:46:47.593417Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898497291205493:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:46:47.593525Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000e31/r3tmp/tmpVlp35a/pdisk_1.dat 2025-06-25T14:46:48.274312Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519898497291205467:2080] 1750862807567910 != 1750862807567913 2025-06-25T14:46:48.281040Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:46:48.310989Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:46:48.311105Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:46:48.312688Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 28793, node 1 2025-06-25T14:46:48.541315Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:46:48.541343Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:46:48.541352Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:46:48.541482Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:46:48.688570Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:19298 TClient is connected to server localhost:19298 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:46:49.771236Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:46:49.800766Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:46:52.112287Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898518766042596:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:52.112428Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:52.112854Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898518766042608:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:52.116726Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:46:52.130981Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898518766042610:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:46:52.224274Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898518766042661:2338] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:46:52.596519Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519898497291205493:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:46:52.596574Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:46:52.600430Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:52.727339Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:52.765351Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:52.805097Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:52.882477Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:53.059890Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:53.093758Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:53.139344Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:53.173379Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:53.208748Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:53.260116Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:53.344496Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:53.382473Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:54.044941Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, subope ... 45060Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038465;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:28.551699Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038457;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:28.552296Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038443;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:28.553425Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038465;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:28.553951Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038485;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:28.562725Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038485;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:28.563345Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038541;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:28.565015Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038443;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:28.565474Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038533;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:28.572193Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038541;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:28.573755Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038533;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:28.574272Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038479;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:28.576828Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038526;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:28.583004Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038479;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:28.583552Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038553;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:28.585395Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038526;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:28.585878Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038505;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:28.592069Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038553;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:28.594239Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038505;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:28.594739Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038524;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:28.596954Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038557;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:28.603142Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038524;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:28.604484Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038557;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:28.604943Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038522;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:28.608797Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038569;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:28.613608Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038522;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:28.614171Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038449;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:28.617503Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038569;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:28.618018Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038461;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:28.622814Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038449;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:28.623315Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038463;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:28.626625Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038461;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:28.627278Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038447;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:28.632172Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038463;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:28.632749Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038551;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:28.636990Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038447;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:28.637460Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038562;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:28.642436Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038562;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:28.643001Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038507;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:28.648746Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038507;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:28.656283Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038551;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:28.852732Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jykrws6dd6t2r64tdc7mv5dp", SessionId: ydb://session/3?node_id=1&id=MmUxOGRiYTAtYjk1MzdmZGItYzJlOTEwMzYtZGY5ODNjNGI=, Slow query, duration: 32.391267s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:47:29.329512Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:47:29.330039Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:47:29.330624Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;self_id=[1:7519898639025152378:5384];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038629;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038331;receive=72075186224038170; 2025-06-25T14:47:29.331018Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; >> KqpJoin::AllowJoinsForComplexPredicates-StreamLookup >> KqpJoinOrder::CanonizedJoinOrderTPCH12 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::CanonizedJoinOrderTPCH13 [GOOD] Test command err: Trying to start YDB, gRPC: 11410, MsgBus: 21157 2025-06-25T14:45:56.173103Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898280450238383:2240];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:45:56.188851Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000e73/r3tmp/tmpibqke8/pdisk_1.dat 2025-06-25T14:45:56.783122Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:45:56.783244Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:45:56.800505Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:45:56.828598Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:45:56.831376Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519898280450238166:2080] 1750862756114569 != 1750862756114572 TServer::EnableGrpc on GrpcPort 11410, node 1 2025-06-25T14:45:57.012979Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:45:57.012999Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:45:57.013016Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:45:57.013136Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:45:57.172428Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:21157 TClient is connected to server localhost:21157 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:45:57.766847Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:45:57.786482Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:45:59.880723Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898293335140695:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:59.880829Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:59.881186Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898293335140707:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:59.885148Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:45:59.899272Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898293335140709:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:45:59.996892Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898293335140760:2336] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:46:00.374956Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T14:46:00.654142Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519898297630108258:2310];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:46:00.654357Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519898297630108258:2310];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:46:00.654595Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519898297630108258:2310];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:46:00.654736Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519898297630108258:2310];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:46:00.654834Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519898297630108258:2310];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:46:00.654930Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519898297630108258:2310];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:46:00.655037Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519898297630108258:2310];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:46:00.655124Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519898297630108258:2310];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:46:00.655231Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519898297630108258:2310];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:46:00.655347Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519898297630108258:2310];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:46:00.655454Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519898297630108258:2310];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:46:00.697046Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519898297630108312:2316];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:46:00.697144Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519898297630108312:2316];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:46:00.697364Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519898297630108312:2316];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:46:00.697470Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519898297630108312:2316];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:46:00.697566Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519898297630108312:2316];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:46:00.697669Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519898297630108312:2316];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:46:00.697766Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519898297630108312:2316];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:46:00.697865Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519898297630108312:2316];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:46:00.697990Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519898297630108312:2316];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunk ... 63128Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039387;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:23.867746Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:23.868140Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039387;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:23.869330Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039390;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:23.869623Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039386;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:23.873689Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039386;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:23.874419Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039343;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:23.874682Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039390;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:23.875412Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039406;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:23.879189Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039406;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:23.879984Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039384;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:23.883846Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039343;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:23.883910Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039384;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:23.885145Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039389;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:23.885609Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039411;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:23.890024Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039389;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:23.890234Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039411;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:23.891102Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039375;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:23.891393Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039423;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:23.898752Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039375;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:23.899635Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039395;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:23.905140Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039423;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:23.905771Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039422;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:23.911281Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039395;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:23.912073Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039376;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:23.914791Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039422;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:23.915646Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039413;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:23.923411Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039376;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:23.923411Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039413;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:23.923907Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039420;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:23.928697Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039420;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:23.929003Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039408;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:23.929590Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039424;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:23.933929Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039408;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:23.933966Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039424;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:23.937299Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039354;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:23.941961Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039354;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:23.942429Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039418;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:23.946584Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039417;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:23.947687Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039418;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:23.951559Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039417;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:24.112617Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jykrwfrh5w4fxkp947d816tt", SessionId: ydb://session/3?node_id=1&id=MzU1MGFkMzMtZWQ5Nzg2NmQtY2NiZjY1MDEtODMwZDRhMGY=, Slow query, duration: 37.310815s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:47:24.448301Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;self_id=[1:7519898589687935082:9327];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224039094;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224039392;receive=72075186224038933; 2025-06-25T14:47:24.448438Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:47:24.449969Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:47:24.450206Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; >> KqpFlipJoin::RightOnly_3 [GOOD] >> KqpIndexLookupJoin::LeftJoinOnlyRightColumn+StreamLookup [GOOD] >> KqpIndexLookupJoin::LeftJoinOnlyRightColumn-StreamLookup >> KqpJoinOrder::CanonizedJoinOrderLookupBug [GOOD] >> KqpIndexLookupJoin::InnerJoinCustomColumnOrder+StreamLookup [GOOD] >> KqpIndexLookupJoin::InnerJoinCustomColumnOrder-StreamLookup ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpFlipJoin::RightOnly_3 [GOOD] Test command err: Trying to start YDB, gRPC: 17997, MsgBus: 26913 2025-06-25T14:47:27.351888Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898668226626212:2134];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:47:27.366622Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000df1/r3tmp/tmpobOvGw/pdisk_1.dat 2025-06-25T14:47:27.959480Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:47:27.959576Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:47:27.985049Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:47:27.989790Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519898668226626115:2080] 1750862847328579 != 1750862847328582 2025-06-25T14:47:27.996451Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 17997, node 1 2025-06-25T14:47:28.188030Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:47:28.188048Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:47:28.188054Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:47:28.188153Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:47:28.352224Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:26913 TClient is connected to server localhost:26913 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:47:29.243724Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:47:29.271309Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:47:29.470700Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:47:29.705492Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:47:29.818492Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:47:31.837026Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898685406496920:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:31.837122Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:32.177767Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:32.213962Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:32.255396Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:32.285639Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:32.317910Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:32.357296Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519898668226626212:2134];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:47:32.357338Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:47:32.391586Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:32.497615Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:32.568640Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898689701464877:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:32.568730Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:32.568903Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898689701464882:2436], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:32.572724Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:47:32.588139Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-25T14:47:32.589038Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898689701464884:2437], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:47:32.680090Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898689701464935:3415] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:47:34.201908Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/ ... necting -> Connected TServer::EnableGrpc on GrpcPort 4660, node 2 2025-06-25T14:47:36.835481Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:47:36.835501Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:47:36.835509Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:47:36.835607Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10323 2025-06-25T14:47:37.325129Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:10323 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:47:37.646069Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:47:37.663369Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:47:37.757916Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:37.944037Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:47:38.029478Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:47:40.656765Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898726031390717:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:40.656846Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:40.746691Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:40.793853Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:40.838377Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:40.882471Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:40.922377Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:40.994465Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:41.053100Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:41.168680Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898730326358666:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:41.168764Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:41.168991Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898730326358671:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:41.178203Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:47:41.200498Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710669, at schemeshard: 72057594046644480 2025-06-25T14:47:41.204603Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519898730326358673:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:47:41.272542Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519898730326358724:3409] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:47:41.277340Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519898708851519947:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:47:41.277469Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:47:42.875720Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:42.939554Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:42.986214Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:43.040422Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) >> KqpJoinOrder::FiveWayJoin+ColumnStore ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::CanonizedJoinOrderLookupBug [GOOD] Test command err: Trying to start YDB, gRPC: 23793, MsgBus: 2454 2025-06-25T14:46:52.718464Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898520489313877:2221];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:46:52.718865Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000e23/r3tmp/tmpdQRvet/pdisk_1.dat 2025-06-25T14:46:53.354048Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:46:53.354129Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:46:53.356872Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:46:53.368405Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519898520489313694:2080] 1750862812657982 != 1750862812657985 2025-06-25T14:46:53.384501Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 23793, node 1 2025-06-25T14:46:53.579757Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:46:53.579776Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:46:53.579783Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:46:53.579913Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:46:53.639457Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:2454 TClient is connected to server localhost:2454 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:46:54.389325Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:46:54.448566Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:46:56.666888Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898537669183523:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:56.667044Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:56.667451Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898537669183535:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:56.671450Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:46:56.685942Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-25T14:46:56.686190Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898537669183537:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:46:56.763932Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898537669183590:2337] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:46:57.292667Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:57.491470Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:57.536808Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:57.581328Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:57.640897Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:57.684874Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519898520489313877:2221];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:46:57.684951Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:46:57.910284Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:57.977847Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:58.023013Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:58.057032Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:58.106909Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:58.166135Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:58.240044Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:58.292575Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation ... 0Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038626;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:35.963844Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038657;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:47:35.964985Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038627;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:35.970752Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038626;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:47:35.971195Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038575;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:35.973780Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038627;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:47:35.974343Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038533;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:35.980166Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038575;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:47:35.983161Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038533;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:47:35.983727Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038632;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:35.984826Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038593;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:35.993381Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038593;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:47:35.993915Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038617;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:35.996624Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038632;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:47:35.997035Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038545;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:36.002512Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038617;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:47:36.003065Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038641;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:36.006145Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038545;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:47:36.006693Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038649;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:36.011866Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038641;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:47:36.015296Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038649;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:47:36.015901Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038633;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:36.016670Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038659;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:36.025200Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038659;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:47:36.025661Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038625;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:36.028902Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038633;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:47:36.029363Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038634;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:36.034133Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038634;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:47:36.036107Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038625;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:47:36.036668Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038639;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:36.040891Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038637;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:36.045663Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038637;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:47:36.046183Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038655;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:36.049329Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038639;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:47:36.049781Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038435;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:36.058313Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038655;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:47:36.060714Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038435;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:47:36.115242Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038492;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:36.122014Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038492;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:47:36.179717Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jykrwxxw45kmv5d1j3tvxe01", SessionId: ydb://session/3?node_id=1&id=ODI2Yzg4MmMtYjhmNTg0NmItZjVjN2RmZWItZDYyYjUyMDc=, Slow query, duration: 34.870730s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:47:36.493675Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; 2025-06-25T14:47:36.494187Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; 2025-06-25T14:47:36.495168Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;self_id=[1:7519898567733961092:2885];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038170;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038629;receive=72075186224038331; 2025-06-25T14:47:36.495615Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716;
:3:9: Warning: Symbol $limit is not used, code: 4527
:2:9: Warning: Symbol $browserGroup is not used, code: 4527
:1:9: Warning: Symbol $quotaName is not used, code: 4527
:4:9: Warning: Symbol $offset is not used, code: 4527
:3:9: Warning: Symbol $limit is not used, code: 4527
:2:9: Warning: Symbol $browserGroup is not used, code: 4527
:1:9: Warning: Symbol $quotaName is not used, code: 4527
:4:9: Warning: Symbol $offset is not used, code: 4527 >> KqpIndexLookupJoin::LeftSemiJoinWithDuplicatesInRightTable+StreamLookupJoin [GOOD] >> KqpJoinOrder::TPCHRandomJoinViewJustWorks+ColumnStore [GOOD] >> KqpIndexLookupJoin::LeftJoinSkipNullFilter+StreamLookup ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpIndexLookupJoin::LeftSemiJoinWithDuplicatesInRightTable+StreamLookupJoin [GOOD] Test command err: Trying to start YDB, gRPC: 15094, MsgBus: 21802 2025-06-25T14:47:30.936443Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898682473986875:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:47:30.983577Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000dcf/r3tmp/tmpYeqYaM/pdisk_1.dat 2025-06-25T14:47:31.594652Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:47:31.608863Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:47:31.613400Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:47:31.625344Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519898682473986848:2080] 1750862850925061 != 1750862850925064 2025-06-25T14:47:31.638120Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 15094, node 1 2025-06-25T14:47:32.050045Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:47:32.050680Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:47:32.050690Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:47:32.050698Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:47:32.050809Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:21802 TClient is connected to server localhost:21802 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:47:33.199410Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:47:33.215812Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:47:33.223344Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:47:33.430136Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:47:33.695835Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:47:33.775535Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:47:35.668350Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898703948824971:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:35.668607Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:35.944091Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519898682473986875:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:47:35.944152Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:47:36.167095Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:36.213418Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:36.294800Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:36.345154Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:36.394137Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:36.465182Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:36.551831Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:36.632435Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898708243792933:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:36.632506Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:36.632793Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898708243792938:2436], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:36.646720Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:47:36.660272Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898708243792940:2437], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:47:36.740797Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898708243792994:3424] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:47:37.758589Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/ ... files were not loaded 2025-06-25T14:47:41.085820Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519898727146941434:2080] 1750862860781510 != 1750862860781513 2025-06-25T14:47:41.089075Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:47:41.089178Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:47:41.090572Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 3384, node 2 2025-06-25T14:47:41.261023Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:47:41.261043Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:47:41.261051Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:47:41.261187Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23232 2025-06-25T14:47:41.816533Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:23232 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:47:42.137339Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:47:42.148909Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:47:42.174928Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:47:42.263705Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:47:42.483623Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:47:42.616147Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:47:45.784561Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519898727146941456:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:47:45.834756Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:47:46.405203Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898752916746835:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:46.405306Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:46.444022Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:46.497175Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:46.584914Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:46.653627Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:46.706695Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:46.762901Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:46.842904Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:46.936467Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898752916747501:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:46.936556Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:46.936863Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898752916747506:2437], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:46.945318Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:47:46.962747Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-25T14:47:46.963616Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519898752916747508:2438], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:47:47.059297Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519898757211714855:3424] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:47:48.186106Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:48.242773Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) >> KqpJoinOrder::TPCDS90-ColumnStore >> KqpJoin::JoinAggregateSingleRow ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::TPCHRandomJoinViewJustWorks+ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 24549, MsgBus: 63277 2025-06-25T14:46:02.935721Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898305218102451:2165];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:46:02.942053Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000e66/r3tmp/tmpzZj8eC/pdisk_1.dat 2025-06-25T14:46:03.315831Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519898305218102311:2080] 1750862762878214 != 1750862762878217 2025-06-25T14:46:03.342596Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 24549, node 1 2025-06-25T14:46:03.404607Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:46:03.404703Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:46:03.409083Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:46:03.516798Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:46:03.516817Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:46:03.516831Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:46:03.516922Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:63277 2025-06-25T14:46:03.924489Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:63277 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:46:04.331526Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:46:04.354566Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:46:06.440278Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898322397972141:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:06.440431Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:06.440728Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898322397972153:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:06.444551Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:46:06.457768Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898322397972155:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:46:06.519881Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898322397972206:2336] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:46:06.849533Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T14:46:07.125016Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519898326692939752:2315];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:46:07.125204Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519898326692939752:2315];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:46:07.125433Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519898326692939752:2315];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:46:07.125577Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519898326692939752:2315];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:46:07.125679Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519898326692939752:2315];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:46:07.125776Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519898326692939752:2315];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:46:07.125887Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519898326692939752:2315];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:46:07.126109Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519898326692939752:2315];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:46:07.126216Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519898326692939752:2315];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:46:07.126309Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519898326692939752:2315];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:46:07.126440Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519898326692939752:2315];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:46:07.143549Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519898326692939745:2314];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:46:07.143599Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519898326692939745:2314];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:46:07.143790Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519898326692939745:2314];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:46:07.143903Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519898326692939745:2314];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:46:07.143998Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519898326692939745:2314];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:46:07.144083Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519898326692939745:2314];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:46:07.144160Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519898326692939745:2314];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:46:07.144269Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519898326692939745:2314];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:46:07.148708Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519898326692939745:2314];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunk ... 64540Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039389;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:35.569923Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039389;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:35.570473Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039353;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:35.571074Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039373;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:35.571614Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039311;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:35.576874Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039311;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:35.577505Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039343;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:35.580673Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039353;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:35.581201Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039321;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:35.586645Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039321;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:35.587229Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039407;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:35.591763Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039343;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:35.592190Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039377;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:35.592949Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039407;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:35.593442Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039385;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:35.597340Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039385;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:35.597881Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039415;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:35.604653Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039377;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:35.605098Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039413;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:35.609863Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039415;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:35.610314Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039403;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:35.614462Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039403;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:35.615038Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039405;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:35.617254Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039413;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:35.617670Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039351;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:35.621895Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039351;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:35.622471Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039347;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:35.627425Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039405;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:35.627882Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039423;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:35.632273Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039423;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:35.635417Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039347;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:35.636606Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039421;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:35.640879Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039313;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:35.646456Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039313;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:35.648007Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039399;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:35.649444Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039421;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:35.650681Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039387;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:35.656137Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039387;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:35.661199Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039399;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:35.931518Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039383;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:35.947566Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039383;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:36.036210Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jykrws7754m8pg6k8mf6zfyn", SessionId: ydb://session/3?node_id=1&id=NjFkMTQ3YWUtYTA1MjQ0NDUtOGFiNmJkNzctNDVjNjBjZDk=, Slow query, duration: 39.548662s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:47:36.327101Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:47:36.327646Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:47:36.328162Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;self_id=[1:7519898550031274051:6848];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038933;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224039392;receive=72075186224039094; 2025-06-25T14:47:36.328619Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; >> KqpJoin::AllowJoinsForComplexPredicates-StreamLookup [GOOD] >> KqpJoin::ComplexJoin >> KqpJoinOrder::TPCDS88+ColumnStore >> KqpIndexLookupJoin::InnerJoinCustomColumnOrder-StreamLookup [GOOD] >> KqpIndexLookupJoin::LeftJoinOnlyRightColumn-StreamLookup [GOOD] >> ExternalIndex::Simple [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpIndexLookupJoin::InnerJoinCustomColumnOrder-StreamLookup [GOOD] Test command err: Trying to start YDB, gRPC: 13427, MsgBus: 10610 2025-06-25T14:47:37.533355Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898712816131656:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:47:37.594106Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000dc8/r3tmp/tmpz4yHyj/pdisk_1.dat 2025-06-25T14:47:38.236266Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:47:38.240453Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519898712816131626:2080] 1750862857510092 != 1750862857510095 2025-06-25T14:47:38.253009Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:47:38.253089Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:47:38.258911Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 13427, node 1 2025-06-25T14:47:38.526370Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:47:38.526390Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:47:38.526401Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:47:38.526512Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:47:38.600426Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:10610 TClient is connected to server localhost:10610 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:47:39.611081Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:47:39.634699Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:47:39.647370Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:47:39.813470Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:47:40.119616Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:47:40.272547Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:47:42.536405Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519898712816131656:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:47:42.536494Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:47:42.773180Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898734290969748:2370], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:42.773291Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:43.254285Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:43.301589Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:43.363224Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:43.443678Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:43.497639Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:43.542028Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:43.603046Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:43.665701Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898738585937706:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:43.665776Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:43.666015Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898738585937711:2438], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:43.669824Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:47:43.682385Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898738585937713:2439], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:47:43.752634Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898738585937766:3431] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:47:45.216689Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/ ... WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:47:48.421106Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9974 2025-06-25T14:47:48.864666Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:9974 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-25T14:47:49.166818Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:47:49.176373Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:47:49.191804Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:47:49.292380Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:47:49.587506Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:47:49.723935Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:47:51.905531Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898774560468279:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:51.905605Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:52.014006Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:52.073453Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:52.126846Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:52.178116Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:52.250082Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:52.311728Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:52.389643Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:52.489650Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898778855436240:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:52.489803Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:52.490054Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898778855436245:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:52.494033Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:47:52.509136Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519898778855436247:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:47:52.600064Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519898778855436298:3424] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:47:53.844396Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:53.912849Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:53.983040Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:54.043966Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:54.099228Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:54.182273Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710677:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpIndexLookupJoin::LeftJoinOnlyRightColumn-StreamLookup [GOOD] Test command err: Trying to start YDB, gRPC: 64421, MsgBus: 2286 2025-06-25T14:47:36.990399Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898709565111898:2196];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:47:36.990484Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000dca/r3tmp/tmppLyk2J/pdisk_1.dat 2025-06-25T14:47:37.607122Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519898709565111714:2080] 1750862856849874 != 1750862856849877 2025-06-25T14:47:37.638791Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:47:37.648581Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:47:37.648681Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:47:37.725813Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 64421, node 1 2025-06-25T14:47:37.931489Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:47:37.931518Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:47:37.931525Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:47:37.931624Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:47:38.016753Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:2286 TClient is connected to server localhost:2286 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:47:39.000936Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:47:39.062362Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:47:39.270378Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:47:39.554325Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:47:39.691762Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:47:41.782167Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898731039949848:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:41.782261Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:41.946385Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519898709565111898:2196];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:47:41.946450Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:47:42.164112Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:42.248901Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:42.280792Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:42.350174Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:42.434162Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:42.477966Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:42.522018Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:42.625727Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898735334917807:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:42.625800Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:42.626094Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898735334917812:2436], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:42.629764Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:47:42.643530Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898735334917814:2437], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:47:42.732071Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898735334917866:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:47:43.813744Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:43.851793Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part ... on TClient is connected to server localhost:63584 2025-06-25T14:47:47.684502Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:63584 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:47:47.886261Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:47:47.896426Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:47:47.913128Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:47:48.040345Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:47:48.230123Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:47:48.313389Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:47:51.196441Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898773902940007:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:51.196530Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:51.314016Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:51.477261Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:51.558323Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:51.619959Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:51.675750Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:51.761588Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:51.839823Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:51.932438Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898773902940667:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:51.932514Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:51.932794Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898773902940672:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:51.936551Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:47:51.952649Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710669, at schemeshard: 72057594046644480 2025-06-25T14:47:51.953060Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519898773902940674:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:47:52.010870Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519898778197908022:3423] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:47:53.414480Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:53.464820Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:53.516391Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:53.572674Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:53.624727Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:53.658372Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710677:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) >> KqpJoinOrder::FiveWayJoinWithConstantFold+ColumnStore [GOOD] >> KqpJoinOrder::SortingsWithLookupJoin3-RemoveLimitOperator [GOOD] >> KqpIndexLookupJoin::LeftJoinSkipNullFilter+StreamLookup [GOOD] >> KqpIndexLookupJoin::LeftJoinSkipNullFilter-StreamLookup >> KqpJoinOrder::FiveWayJoinWithComplexPreds2-ColumnStore >> KqpFlipJoin::Right_3 ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ext_index/ut/unittest >> ExternalIndex::Simple [GOOD] Test command err: 2025-06-25T14:43:05.070098Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:109:2155], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:43:05.070541Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:43:05.070667Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:43:05.070796Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/cs_index/external;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001212/r3tmp/tmpfYOMIs/pdisk_1.dat 2025-06-25T14:43:05.473458Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 15979, node 1 TClient is connected to server localhost:20449 2025-06-25T14:43:06.026541Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:392: actor# [1:62:2109] Handle TEvGetProxyServicesRequest 2025-06-25T14:43:06.027051Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:392: actor# [1:62:2109] Handle TEvGetProxyServicesRequest 2025-06-25T14:43:06.037860Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:43:06.086527Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:43:06.093410Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:43:06.093474Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:43:06.093500Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:43:06.094736Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:43:06.095141Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750862582488775 != 1750862582488779 2025-06-25T14:43:06.141266Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:62:2109] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-25T14:43:06.142262Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 2025-06-25T14:43:06.143845Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:43:06.143962Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:43:06.155250Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:43:06.272337Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:62:2109] Handle TEvProposeTransaction 2025-06-25T14:43:06.272407Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:62:2109] TxId# 281474976715657 ProcessProposeTransaction 2025-06-25T14:43:06.273240Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:62:2109] Cookie# 0 userReqId# "" txid# 281474976715657 SEND to# [1:640:2533] 2025-06-25T14:43:06.347245Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:640:2533] txid# 281474976715657 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateColumnStore CreateColumnStore { Name: "olapStore" ColumnShardCount: 4 SchemaPresets { Name: "default" Schema { Columns { Name: "timestamp" Type: "Timestamp" NotNull: true } Columns { Name: "resource_id" Type: "Utf8" DataAccessorConstructor { ClassName: "SPARSED" } } Columns { Name: "uid" Type: "Utf8" NotNull: true StorageId: "__MEMORY" } Columns { Name: "level" Type: "Int32" } Columns { Name: "message" Type: "Utf8" StorageId: "__MEMORY" } Columns { Name: "json_payload" Type: "JsonDocument" } KeyColumnNames: "timestamp" KeyColumnNames: "uid" } } } } } ExecTimeoutPeriod: 18446744073709551615 2025-06-25T14:43:06.347344Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:640:2533] txid# 281474976715657 Bootstrap, UserSID: CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-25T14:43:06.347905Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1660: Actor# [1:640:2533] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-06-25T14:43:06.348045Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:640:2533] txid# 281474976715657 TEvNavigateKeySet requested from SchemeCache 2025-06-25T14:43:06.348344Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:640:2533] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-25T14:43:06.348620Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:640:2533] HANDLE EvNavigateKeySetResult, txid# 281474976715657 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-25T14:43:06.348712Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:640:2533] txid# 281474976715657 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715657 TabletId# 72057594046644480} 2025-06-25T14:43:06.353037Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnStore, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_store.cpp:451) 2025-06-25T14:43:06.354088Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:640:2533] txid# 281474976715657 HANDLE EvClientConnected 2025-06-25T14:43:06.356218Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [1:640:2533] txid# 281474976715657 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715657} 2025-06-25T14:43:06.356285Z node 1 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [1:640:2533] txid# 281474976715657 SEND to# [1:639:2532] Source {TEvProposeTransactionStatus txid# 281474976715657 Status# 53} 2025-06-25T14:43:06.457474Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=72075186224037888;self_id=[1:696:2579];fline=columnshard.cpp:99;event=initialize_shard;step=OnActivateExecutor; 2025-06-25T14:43:06.486681Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=72075186224037888;self_id=[1:696:2579];fline=columnshard.cpp:117;event=initialize_shard;step=initialize_tiring_finished; 2025-06-25T14:43:06.487063Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 72075186224037888 2025-06-25T14:43:06.496111Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:696:2579];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:43:06.496561Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:696:2579];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:43:06.496872Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:696:2579];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:43:06.497012Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:696:2579];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:43:06.497129Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:696:2579];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:43:06.497288Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:696:2579];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:43:06.497394Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:696:2579];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:43:06.497499Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:696:2579];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:43:06.497631Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:696:2579];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:43:06.497735Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:696:2579];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:43:06.497854Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:696:2579];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:43:06.521017Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 72075186224037888 2025-06-25T14:43:06.521517Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-25T14:43:06.521607Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-25T14:43:06.521804Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T14:43:06.521950Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-25T14:43:06.522051Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-25T14:43:06.522096Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;e ... '"instant" '"modificationId")) (let $3 (Uint64 '"1001")) (let $4 (KqpRowsSourceSettings $1 $2 '('('"ItemsLimit" $3) '('"Sequential" '1)) (Void) '())) (let $5 (OptionalType (DataType 'Utf8))) (let $6 (StructType '('"componentId" $5) '('"instant" (OptionalType (DataType 'Uint32))) '('"modificationId" $5))) (let $7 '('('"_logical_id" '338) '('"_id" '"7fe80a51-18324215-e10f0ffe-99717c6e") '('"_wide_channels" $6))) (let $8 (DqPhyStage '((DqSource (DataSource '"KqpReadRangesSource") $4)) (lambda '($12) (block '( (let $13 (lambda '($14) (Member $14 '"componentId") (Member $14 '"instant") (Member $14 '"modificationId"))) (return (FromFlow (ExpandMap (Take (ToFlow $12) $3) $13))) ))) $7)) (let $9 (DqCnUnionAll (TDqOutput $8 '"0"))) (let $10 (DqPhyStage '($9) (lambda '($15) (FromFlow (NarrowMap (Take (ToFlow $15) $3) (lambda '($16 $17 $18) (AsStruct '('"componentId" $16) '('"instant" $17) '('"modificationId" $18)))))) '('('"_logical_id" '351) '('"_id" '"1648ae03-3f923941-b185c661-7db1078b")))) (let $11 (DqCnResult (TDqOutput $10 '"0") '())) (return (KqpPhysicalQuery '((KqpPhysicalTx '($8 $10) '($11) '() '('('"type" '"data")))) '((KqpTxResultBinding (ListType $6) '"0" '"0")) '('('"type" '"data_query")))) ) 2025-06-25T14:47:56.823686Z node 1 :KQP_YQL INFO: log.cpp:67: TraceId: 01jykrykpg2pr3jeb17ymjsk9k, SessionId: CompileActor 2025-06-25 14:47:56.823 INFO ydb-services-ext_index-ut(pid=312769, tid=0x00007F173DB01D40) [core exec] yql_execution.cpp:466: Register async execution for node #268 2025-06-25T14:47:56.823812Z node 1 :KQP_YQL TRACE: log.cpp:67: TraceId: 01jykrykpg2pr3jeb17ymjsk9k, SessionId: CompileActor 2025-06-25 14:47:56.823 TRACE ydb-services-ext_index-ut(pid=312769, tid=0x00007F173DB01D40) [core exec] yql_execution.cpp:387: {3}, callable #277 2025-06-25T14:47:56.823899Z node 1 :KQP_YQL INFO: log.cpp:67: TraceId: 01jykrykpg2pr3jeb17ymjsk9k, SessionId: CompileActor 2025-06-25 14:47:56.823 INFO ydb-services-ext_index-ut(pid=312769, tid=0x00007F173DB01D40) [core exec] yql_execution.cpp:577: Node #277 finished execution 2025-06-25T14:47:56.823956Z node 1 :KQP_YQL INFO: log.cpp:67: TraceId: 01jykrykpg2pr3jeb17ymjsk9k, SessionId: CompileActor 2025-06-25 14:47:56.823 INFO ydb-services-ext_index-ut(pid=312769, tid=0x00007F173DB01D40) [core exec] yql_execution.cpp:594: Node #277 created 0 trackable nodes: 2025-06-25T14:47:56.824033Z node 1 :KQP_YQL INFO: log.cpp:67: TraceId: 01jykrykpg2pr3jeb17ymjsk9k, SessionId: CompileActor 2025-06-25 14:47:56.824 INFO ydb-services-ext_index-ut(pid=312769, tid=0x00007F173DB01D40) [core exec] yql_execution.cpp:87: Finish, output #280, status: Async 2025-06-25T14:47:56.824709Z node 1 :KQP_YQL INFO: log.cpp:67: TraceId: 01jykrykpg2pr3jeb17ymjsk9k, SessionId: CompileActor 2025-06-25 14:47:56.824 INFO ydb-services-ext_index-ut(pid=312769, tid=0x00007F173DB01D40) [core exec] yql_execution.cpp:133: Completed async execution for node #268 2025-06-25T14:47:56.824786Z node 1 :KQP_YQL INFO: log.cpp:67: TraceId: 01jykrykpg2pr3jeb17ymjsk9k, SessionId: CompileActor 2025-06-25 14:47:56.824 INFO ydb-services-ext_index-ut(pid=312769, tid=0x00007F173DB01D40) [core exec] yql_execution.cpp:153: State is ExecutionRequired after apply async changes for node #268 2025-06-25T14:47:56.824849Z node 1 :KQP_YQL INFO: log.cpp:67: TraceId: 01jykrykpg2pr3jeb17ymjsk9k, SessionId: CompileActor 2025-06-25 14:47:56.824 INFO ydb-services-ext_index-ut(pid=312769, tid=0x00007F173DB01D40) [core exec] yql_execution.cpp:59: Begin, root #280 2025-06-25T14:47:56.824903Z node 1 :KQP_YQL INFO: log.cpp:67: TraceId: 01jykrykpg2pr3jeb17ymjsk9k, SessionId: CompileActor 2025-06-25 14:47:56.824 INFO ydb-services-ext_index-ut(pid=312769, tid=0x00007F173DB01D40) [core exec] yql_execution.cpp:72: Collect unused nodes for root #280, status: Ok 2025-06-25T14:47:56.824953Z node 1 :KQP_YQL TRACE: log.cpp:67: TraceId: 01jykrykpg2pr3jeb17ymjsk9k, SessionId: CompileActor 2025-06-25 14:47:56.824 TRACE ydb-services-ext_index-ut(pid=312769, tid=0x00007F173DB01D40) [core exec] yql_execution.cpp:387: {0}, callable #280 2025-06-25T14:47:56.825006Z node 1 :KQP_YQL TRACE: log.cpp:67: TraceId: 01jykrykpg2pr3jeb17ymjsk9k, SessionId: CompileActor 2025-06-25 14:47:56.824 TRACE ydb-services-ext_index-ut(pid=312769, tid=0x00007F173DB01D40) [core exec] yql_execution.cpp:387: {1}, callable #279 2025-06-25T14:47:56.825059Z node 1 :KQP_YQL TRACE: log.cpp:67: TraceId: 01jykrykpg2pr3jeb17ymjsk9k, SessionId: CompileActor 2025-06-25 14:47:56.825 TRACE ydb-services-ext_index-ut(pid=312769, tid=0x00007F173DB01D40) [core exec] yql_execution.cpp:387: {2}, callable #278 2025-06-25T14:47:56.825148Z node 1 :KQP_YQL TRACE: log.cpp:67: TraceId: 01jykrykpg2pr3jeb17ymjsk9k, SessionId: CompileActor 2025-06-25 14:47:56.825 TRACE ydb-services-ext_index-ut(pid=312769, tid=0x00007F173DB01D40) [core exec] yql_execution.cpp:387: {3}, callable #275 2025-06-25T14:47:56.825202Z node 1 :KQP_YQL TRACE: log.cpp:67: TraceId: 01jykrykpg2pr3jeb17ymjsk9k, SessionId: CompileActor 2025-06-25 14:47:56.825 TRACE ydb-services-ext_index-ut(pid=312769, tid=0x00007F173DB01D40) [core exec] yql_execution.cpp:387: {4}, callable #268 2025-06-25T14:47:56.825347Z node 1 :KQP_YQL INFO: log.cpp:67: TraceId: 01jykrykpg2pr3jeb17ymjsk9k, SessionId: CompileActor 2025-06-25 14:47:56.825 INFO ydb-services-ext_index-ut(pid=312769, tid=0x00007F173DB01D40) [core exec] yql_execution.cpp:577: Node #268 finished execution 2025-06-25T14:47:56.825424Z node 1 :KQP_YQL INFO: log.cpp:67: TraceId: 01jykrykpg2pr3jeb17ymjsk9k, SessionId: CompileActor 2025-06-25 14:47:56.825 INFO ydb-services-ext_index-ut(pid=312769, tid=0x00007F173DB01D40) [core exec] yql_execution.cpp:594: Node #268 created 0 trackable nodes: 2025-06-25T14:47:56.825480Z node 1 :KQP_YQL TRACE: log.cpp:67: TraceId: 01jykrykpg2pr3jeb17ymjsk9k, SessionId: CompileActor 2025-06-25 14:47:56.825 TRACE ydb-services-ext_index-ut(pid=312769, tid=0x00007F173DB01D40) [core exec] yql_execution.cpp:387: {3}, callable #275 2025-06-25T14:47:56.825536Z node 1 :KQP_YQL INFO: log.cpp:67: TraceId: 01jykrykpg2pr3jeb17ymjsk9k, SessionId: CompileActor 2025-06-25 14:47:56.825 INFO ydb-services-ext_index-ut(pid=312769, tid=0x00007F173DB01D40) [core exec] yql_execution.cpp:577: Node #275 finished execution 2025-06-25T14:47:56.825602Z node 1 :KQP_YQL TRACE: log.cpp:67: TraceId: 01jykrykpg2pr3jeb17ymjsk9k, SessionId: CompileActor 2025-06-25 14:47:56.825 TRACE ydb-services-ext_index-ut(pid=312769, tid=0x00007F173DB01D40) [core exec] yql_execution.cpp:387: {2}, callable #278 2025-06-25T14:47:56.825807Z node 1 :KQP_YQL INFO: log.cpp:67: TraceId: 01jykrykpg2pr3jeb17ymjsk9k, SessionId: CompileActor 2025-06-25 14:47:56.825 INFO ydb-services-ext_index-ut(pid=312769, tid=0x00007F173DB01D40) [core exec] yql_execution.cpp:577: Node #278 finished execution 2025-06-25T14:47:56.825865Z node 1 :KQP_YQL INFO: log.cpp:67: TraceId: 01jykrykpg2pr3jeb17ymjsk9k, SessionId: CompileActor 2025-06-25 14:47:56.825 INFO ydb-services-ext_index-ut(pid=312769, tid=0x00007F173DB01D40) [core exec] yql_execution.cpp:594: Node #278 created 0 trackable nodes: 2025-06-25T14:47:56.825921Z node 1 :KQP_YQL TRACE: log.cpp:67: TraceId: 01jykrykpg2pr3jeb17ymjsk9k, SessionId: CompileActor 2025-06-25 14:47:56.825 TRACE ydb-services-ext_index-ut(pid=312769, tid=0x00007F173DB01D40) [core exec] yql_execution.cpp:387: {1}, callable #279 2025-06-25T14:47:56.825991Z node 1 :KQP_YQL INFO: log.cpp:67: TraceId: 01jykrykpg2pr3jeb17ymjsk9k, SessionId: CompileActor 2025-06-25 14:47:56.825 INFO ydb-services-ext_index-ut(pid=312769, tid=0x00007F173DB01D40) [core exec] yql_execution.cpp:577: Node #279 finished execution 2025-06-25T14:47:56.826047Z node 1 :KQP_YQL INFO: log.cpp:67: TraceId: 01jykrykpg2pr3jeb17ymjsk9k, SessionId: CompileActor 2025-06-25 14:47:56.826 INFO ydb-services-ext_index-ut(pid=312769, tid=0x00007F173DB01D40) [core exec] yql_execution.cpp:594: Node #279 created 0 trackable nodes: 2025-06-25T14:47:56.826109Z node 1 :KQP_YQL TRACE: log.cpp:67: TraceId: 01jykrykpg2pr3jeb17ymjsk9k, SessionId: CompileActor 2025-06-25 14:47:56.826 TRACE ydb-services-ext_index-ut(pid=312769, tid=0x00007F173DB01D40) [core exec] yql_execution.cpp:387: {0}, callable #280 2025-06-25T14:47:56.826172Z node 1 :KQP_YQL INFO: log.cpp:67: TraceId: 01jykrykpg2pr3jeb17ymjsk9k, SessionId: CompileActor 2025-06-25 14:47:56.826 INFO ydb-services-ext_index-ut(pid=312769, tid=0x00007F173DB01D40) [core exec] yql_execution.cpp:577: Node #280 finished execution 2025-06-25T14:47:56.826226Z node 1 :KQP_YQL INFO: log.cpp:67: TraceId: 01jykrykpg2pr3jeb17ymjsk9k, SessionId: CompileActor 2025-06-25 14:47:56.826 INFO ydb-services-ext_index-ut(pid=312769, tid=0x00007F173DB01D40) [core exec] yql_execution.cpp:594: Node #280 created 0 trackable nodes: 2025-06-25T14:47:56.826291Z node 1 :KQP_YQL INFO: log.cpp:67: TraceId: 01jykrykpg2pr3jeb17ymjsk9k, SessionId: CompileActor 2025-06-25 14:47:56.826 INFO ydb-services-ext_index-ut(pid=312769, tid=0x00007F173DB01D40) [core exec] yql_execution.cpp:87: Finish, output #280, status: Ok 2025-06-25T14:47:56.826355Z node 1 :KQP_YQL INFO: log.cpp:67: TraceId: 01jykrykpg2pr3jeb17ymjsk9k, SessionId: CompileActor 2025-06-25 14:47:56.826 INFO ydb-services-ext_index-ut(pid=312769, tid=0x00007F173DB01D40) [core exec] yql_execution.cpp:93: Creating finalizing transformer, output #280 2025-06-25T14:47:56.847136Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:353: actor# [1:62:2109] Handle TEvExecuteKqpTransaction 2025-06-25T14:47:56.847197Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:342: actor# [1:62:2109] TxId# 281474976716232 ProcessProposeKqpTransaction 2025-06-25T14:47:56.871156Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:353: actor# [1:62:2109] Handle TEvExecuteKqpTransaction 2025-06-25T14:47:56.871218Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:342: actor# [1:62:2109] TxId# 281474976716233 ProcessProposeKqpTransaction 2025-06-25T14:47:57.045845Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037888;parent=[1:696:2579];fline=actor.cpp:33;event=skip_flush_writing; 2025-06-25T14:47:57.046079Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037889;parent=[1:701:2581];fline=actor.cpp:33;event=skip_flush_writing; 2025-06-25T14:47:57.046134Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037891;parent=[1:704:2583];fline=actor.cpp:33;event=skip_flush_writing; 2025-06-25T14:47:57.046186Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037890;parent=[1:709:2585];fline=actor.cpp:33;event=skip_flush_writing; 2025-06-25T14:47:57.058428Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037888;self_id=[1:696:2579];ev=NActors::TEvents::TEvWakeup;fline=columnshard.cpp:250;event=TEvPrivate::TEvPeriodicWakeup::MANUAL;tablet_id=72075186224037888; 2025-06-25T14:47:57.058561Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037889;self_id=[1:701:2581];ev=NActors::TEvents::TEvWakeup;fline=columnshard.cpp:250;event=TEvPrivate::TEvPeriodicWakeup::MANUAL;tablet_id=72075186224037889; 2025-06-25T14:47:57.058621Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037891;self_id=[1:704:2583];ev=NActors::TEvents::TEvWakeup;fline=columnshard.cpp:250;event=TEvPrivate::TEvPeriodicWakeup::MANUAL;tablet_id=72075186224037891; 2025-06-25T14:47:57.058679Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037890;self_id=[1:709:2585];ev=NActors::TEvents::TEvWakeup;fline=columnshard.cpp:250;event=TEvPrivate::TEvPeriodicWakeup::MANUAL;tablet_id=72075186224037890; REQUEST=SELECT COUNT(*) FROM `/Root/.metadata/initialization/migrations`;EXPECTATION=1 E0625 14:47:57.914826834 312769 backup_poller.cc:113] run_poller: UNKNOWN:Timer list shutdown {created_time:"2025-06-25T14:47:57.914595977+00:00"} >> KqpJoinOrder::FourWayJoinLeftFirst-ColumnStore [GOOD] |85.7%| [TA] $(B)/ydb/services/ext_index/ut/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::SortingsWithLookupJoin3-RemoveLimitOperator [GOOD] Test command err: Trying to start YDB, gRPC: 32748, MsgBus: 2318 2025-06-25T14:47:09.392741Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898592880787151:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:47:09.392780Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000e0e/r3tmp/tmpSbMwCv/pdisk_1.dat 2025-06-25T14:47:10.073203Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:47:10.073287Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:47:10.075982Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:47:10.117274Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 32748, node 1 2025-06-25T14:47:10.324936Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:47:10.325014Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:47:10.325023Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:47:10.325129Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:47:10.386808Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:2318 TClient is connected to server localhost:2318 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:47:11.329629Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:47:11.364676Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:47:13.623291Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898610060656937:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:13.623405Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:13.623700Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898610060656949:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:13.627276Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:47:13.640783Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898610060656951:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:47:13.736149Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898610060657002:2337] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:47:14.019599Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:14.223102Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:14.303577Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:14.363154Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:14.393210Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519898592880787151:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:47:14.393264Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:47:14.396284Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:14.561377Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:14.618133Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:14.662660Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:14.721370Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:14.769148Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:14.810399Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:14.854456Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:14.888736Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:15.661048Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemesha ... 95666Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038562;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:52.106980Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038608;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:52.107441Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038612;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:52.112117Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038562;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:52.112573Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038458;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:52.116564Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038612;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:52.117093Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038492;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:52.121337Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038458;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:52.121914Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038498;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:52.125676Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038492;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:52.126238Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038488;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:52.134599Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038498;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:52.135056Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038570;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:52.138991Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038488;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:52.139425Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038578;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:52.147990Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038570;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:52.152213Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038578;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:52.156579Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038504;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:52.160716Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038594;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:52.169647Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038504;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:52.170091Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038516;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:52.173515Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038594;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:52.173940Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038600;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:52.182975Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038516;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:52.187201Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038600;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:52.187622Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038474;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:52.204699Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038474;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:52.205136Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038606;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:52.213964Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038606;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:52.214528Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038522;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:52.222556Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038522;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:52.223134Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038582;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:52.231952Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038582;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:52.236663Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038546;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:52.237339Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038431;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:52.251592Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038431;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:52.252033Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038520;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:52.254167Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038546;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:52.254653Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038610;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:52.259306Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038610;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:52.265239Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038520;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:52.412532Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jykrxedn7618g4hhwz681v41", SessionId: ydb://session/3?node_id=1&id=MmQzMDE3YTQtMmIyYzI2Y2EtODUyZmY5ZGUtYTFmOGI3ZTk=, Slow query, duration: 34.214457s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:47:52.763273Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:47:52.763717Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:47:52.764256Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;self_id=[1:7519898730319765759:5291];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038629;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038170;receive=72075186224038331; 2025-06-25T14:47:52.764677Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; |85.7%| [TA] {RESULT} $(B)/ydb/services/ext_index/ut/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::FourWayJoinLeftFirst-ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 5661, MsgBus: 23812 2025-06-25T14:47:05.020397Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898572634078178:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:47:05.020454Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000e16/r3tmp/tmpfA2JLI/pdisk_1.dat 2025-06-25T14:47:05.744661Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 5661, node 1 2025-06-25T14:47:05.892823Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:47:05.892920Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:47:05.894463Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:47:06.040851Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:47:06.040869Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:47:06.040876Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:47:06.040976Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:47:06.084424Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:23812 TClient is connected to server localhost:23812 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:47:06.885084Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:47:06.905174Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:47:09.011580Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898594108915265:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:09.011692Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:09.012112Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898594108915277:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:09.015835Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:47:09.032959Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-25T14:47:09.034304Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898594108915279:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:47:09.096602Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898594108915332:2337] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:47:09.425855Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:09.583590Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:09.617876Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:09.651480Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:09.686039Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:09.903579Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:09.939121Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:09.975469Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:10.022611Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519898572634078178:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:47:10.022684Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:47:10.053006Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:10.129672Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:10.170946Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:10.213277Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:10.270884Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:11.112098Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESc ... 25034Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038566;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:50.028066Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038592;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:50.030033Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038566;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:50.030583Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038634;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:50.032713Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038646;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:50.035386Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038634;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:50.035964Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038574;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:50.040740Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038646;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:50.041185Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038624;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:50.045740Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038624;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:50.046315Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038639;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:50.050184Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038574;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:50.050647Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038618;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:50.051138Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038639;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:50.052840Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038660;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:50.058962Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038618;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:50.059545Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038571;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:50.062319Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038660;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:50.062880Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038642;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:50.072172Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038642;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:50.073253Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038571;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:50.073793Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038554;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:50.076940Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038652;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:50.083384Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038554;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:50.083871Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038627;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:50.086272Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038652;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:50.086896Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038616;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:50.097468Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038616;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:50.098005Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038654;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:50.092301Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038627;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:50.100925Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038659;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:50.106851Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038654;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:50.107452Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038601;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:50.109753Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038659;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:50.110603Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038641;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:50.115434Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038641;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:50.119167Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038601;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:50.119694Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038653;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:50.121264Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038610;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:50.124581Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038653;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:50.130010Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038610;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:50.337560Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jykrx9rc7pxcpvhpct5pnkm6", SessionId: ydb://session/3?node_id=1&id=MmE2MjRjNTUtY2Y5MzFmOTAtNWU4MGJjYy1kMmQ2OGFhZQ==, Slow query, duration: 36.915025s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:47:50.771163Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:47:50.771655Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:47:50.772275Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;self_id=[1:7519898735842861848:5503];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038629;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038170;receive=72075186224038331; 2025-06-25T14:47:50.772747Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::FiveWayJoinWithConstantFold+ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 20315, MsgBus: 8374 2025-06-25T14:46:10.796305Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898338713850697:2223];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:46:10.796520Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000e56/r3tmp/tmp9efE87/pdisk_1.dat 2025-06-25T14:46:11.424064Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:46:11.424178Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:46:11.430077Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:46:11.451860Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:46:11.456545Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519898338713850511:2080] 1750862770757234 != 1750862770757237 TServer::EnableGrpc on GrpcPort 20315, node 1 2025-06-25T14:46:11.681373Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:46:11.681391Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:46:11.681398Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:46:11.681496Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:46:11.796471Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:8374 TClient is connected to server localhost:8374 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:46:12.579474Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:46:12.589517Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:46:15.042220Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898360188687634:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:15.042326Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:15.042570Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898360188687646:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:15.046445Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:46:15.065894Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898360188687648:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:46:15.156137Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898360188687699:2338] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:46:15.603319Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T14:46:15.796704Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519898338713850697:2223];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:46:15.796794Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:46:15.919983Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519898360188687924:2313];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:46:15.920231Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519898360188687924:2313];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:46:15.921190Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519898360188687924:2313];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:46:15.921334Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519898360188687924:2313];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:46:15.921468Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519898360188687924:2313];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:46:15.921590Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519898360188687924:2313];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:46:15.921688Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519898360188687924:2313];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:46:15.921805Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519898360188687924:2313];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:46:15.921932Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519898360188687924:2313];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:46:15.922052Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519898360188687924:2313];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:46:15.922177Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519898360188687924:2313];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:46:15.930884Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519898360188688019:2322];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:46:15.931040Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519898360188688019:2322];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:46:15.931271Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519898360188688019:2322];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:46:15.931380Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519898360188688019:2322];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:46:15.931489Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519898360188688019:2322];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:46:15.931609Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519898360188688019:2322];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:46:15.931712Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519898360188688019:2322];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:46:15.931803Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519898360188688019:2322];tablet_ ... 25652Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039326;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:37.926086Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039319;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:37.926087Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039261;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:37.931071Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039319;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:37.931110Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039261;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:37.931604Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039374;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:37.931607Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039393;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:37.936714Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039393;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:37.937299Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039331;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:37.937339Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039374;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:37.937818Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039346;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:37.942712Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039331;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:37.943233Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039285;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:37.943460Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039346;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:37.943937Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039375;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:37.948270Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039285;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:37.948925Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039381;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:37.950107Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039375;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:37.950650Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039332;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:37.954365Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039381;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:37.954887Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039295;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:37.957672Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039332;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:37.958197Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039320;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:37.959992Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039295;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:37.961569Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039325;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:37.963308Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039320;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:37.963810Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039316;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:37.967259Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039325;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:37.967887Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039311;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:37.969274Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039316;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:37.969755Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039354;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:37.973131Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039311;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:37.973645Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039359;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:37.974730Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039354;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:37.975189Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039322;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:37.980050Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039322;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:37.981550Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039323;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:37.985962Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039323;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:37.986079Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039359;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:38.119220Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039391;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:38.124206Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039391;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:38.212698Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jykrwyac47th1k2hzrk2q8em", SessionId: ydb://session/3?node_id=1&id=ZTM0MjUwZmQtNjYxYjgzODItODZiYWMwMzctMTAzMzdjODY=, Slow query, duration: 36.503852s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:47:38.572304Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:47:38.572834Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:47:38.573539Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;self_id=[1:7519898635066643509:8794];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224039094;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224039392;receive=72075186224038933; 2025-06-25T14:47:38.573987Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; >> KqpJoin::JoinAggregateSingleRow [GOOD] >> KqpJoin::JoinConvert >> KqpJoinOrder::ShuffleEliminationTpcdsMapJoinBug >> KqpJoinOrder::SortingsComplexOrderBy+RemoveLimitOperator [GOOD] >> KqpIndexLookupJoin::JoinWithSubquery+StreamLookup >> KqpJoinOrder::TestJoinHint2+ColumnStore ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::SortingsComplexOrderBy+RemoveLimitOperator [GOOD] Test command err: Trying to start YDB, gRPC: 29222, MsgBus: 24083 2025-06-25T14:47:09.969078Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898592976893469:2230];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:47:09.991046Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000e0a/r3tmp/tmpnJwKkt/pdisk_1.dat 2025-06-25T14:47:10.586759Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:47:10.586824Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:47:10.590005Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:47:10.631624Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:47:10.640513Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519898592976893263:2080] 1750862829873971 != 1750862829873974 TServer::EnableGrpc on GrpcPort 29222, node 1 2025-06-25T14:47:10.836766Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:47:10.836785Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:47:10.836792Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:47:10.836883Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:47:10.964626Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:24083 TClient is connected to server localhost:24083 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:47:11.852394Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:47:11.880699Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:47:14.036636Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898614451730392:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:14.036778Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:14.037086Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898614451730404:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:14.040952Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:47:14.055076Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898614451730406:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:47:14.157357Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898614451730457:2337] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:47:14.596460Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:14.718266Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:14.802323Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:14.856887Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:14.897525Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:14.964630Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519898592976893469:2230];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:47:14.964703Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:47:15.116347Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:15.197345Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:15.242498Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:15.279750Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:15.351306Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:15.396342Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:15.434841Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:15.487150Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:16.294087Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, subope ... 38361Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038504;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:54.747516Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038504;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:54.749510Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038584;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:54.749998Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038528;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:54.753179Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038612;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:54.767633Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038528;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:54.770906Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038612;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:54.771422Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038435;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:54.774726Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038482;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:54.780282Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038482;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:54.785248Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038435;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:54.785764Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038490;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:54.789054Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038590;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:54.796516Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038490;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:54.797087Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038594;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:54.801546Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038590;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:54.802031Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038458;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:54.807829Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038458;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:54.809814Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038594;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:54.810641Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038554;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:54.813288Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038592;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:54.819987Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038554;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:54.822363Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038592;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:54.823043Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038431;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:54.825427Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038522;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:54.834760Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038522;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:54.835466Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038526;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:54.837132Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038431;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:54.837584Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038564;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:54.846909Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038564;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:54.847640Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038596;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:54.849937Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038526;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:54.850424Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038429;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:54.856164Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038429;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:54.861321Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038445;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:54.871155Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038445;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:54.871689Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038451;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:54.885876Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038451;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:54.886424Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038509;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:54.896227Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038509;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:54.973630Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038596;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:55.235161Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jykrxf053hn3pa3t4v0epq0v", SessionId: ydb://session/3?node_id=1&id=ZWYxNWUyOGYtNGE2NjNiMGQtMzM1MmE5MjktZWVmMThlZjg=, Slow query, duration: 36.445661s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:47:55.475755Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:47:55.476229Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:47:55.476890Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;self_id=[1:7519898743300774935:5423];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038629;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038331;receive=72075186224038170; 2025-06-25T14:47:55.477319Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; >> KqpJoinOrder::CanonizedJoinOrderTPCH5 >> KqpJoin::ComplexJoin [GOOD] >> KqpJoinOrder::TPCDS96+ColumnStore [GOOD] >> KqpJoinOrder::OltpJoinTypeHintCBOTurnOFF ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoin::ComplexJoin [GOOD] Test command err: Trying to start YDB, gRPC: 28043, MsgBus: 28571 2025-06-25T14:47:42.665560Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898734148539634:2232];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:47:42.665987Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000dc2/r3tmp/tmplAsSIE/pdisk_1.dat 2025-06-25T14:47:43.374580Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:47:43.374696Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:47:43.378399Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:47:43.427962Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:47:43.432075Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519898734148539428:2080] 1750862862583353 != 1750862862583356 TServer::EnableGrpc on GrpcPort 28043, node 1 2025-06-25T14:47:43.587566Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:47:43.632862Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:47:43.632882Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:47:43.632889Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:47:43.633017Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28571 TClient is connected to server localhost:28571 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:47:44.702830Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:47:44.732757Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:47:44.754943Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:47:44.950196Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:47:45.196177Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:47:45.313995Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:47:47.261036Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898755623377568:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:47.261129Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:47.648444Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519898734148539634:2232];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:47:47.648532Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:47:47.680256Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:47.724916Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:47.804369Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:47.844686Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:47.925752Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:47.985598Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:48.052882Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:48.192138Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898759918345526:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:48.192244Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:48.196425Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898759918345531:2436], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:48.205103Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:47:48.239585Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710669, at schemeshard: 72057594046644480 2025-06-25T14:47:48.239902Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898759918345533:2437], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:47:48.316901Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898759918345586:3424] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 3054, MsgBus: 14603 2025-06-25T14:47:56.238893Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54 ... nt is connected to server localhost:14603 2025-06-25T14:47:57.234390Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:14603 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-25T14:47:57.521487Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:47:57.526784Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:47:57.542381Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:47:57.639038Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:47:57.841846Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:47:57.935117Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:48:00.448801Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898812233983195:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:00.448872Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:00.515112Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:00.560935Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:00.613778Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:00.694141Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:00.746717Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:00.830568Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:00.887495Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:00.967751Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898812233983853:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:00.967850Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:00.968207Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898812233983858:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:00.972011Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:48:00.986170Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710669, at schemeshard: 72057594046644480 2025-06-25T14:48:00.986404Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519898812233983860:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:48:01.048757Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519898816528951207:3414] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:48:01.228459Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519898795054112521:2179];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:48:01.228519Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:48:02.247513Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:02.294771Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:02.338501Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:02.418609Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:02.475053Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) >> KqpJoinOrder::SortingsSimpleOrderByAliasIndexDesc-RemoveLimitOperator [GOOD] >> YdbOlapStore::LogWithUnionAllDescending [GOOD] >> YdbOlapStore::LogTsRangeDescending >> KqpJoinOrder::TPCDS61-ColumnStore [GOOD] >> KqpIndexLookupJoin::LeftJoinSkipNullFilter-StreamLookup [GOOD] >> KqpFlipJoin::Right_3 [GOOD] >> KqpIndexLookupJoin::CheckAllKeyTypesCast >> KqpFlipJoin::RightOnly_1 ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/olap/ttl_tiering/py3test >> ttl_delete_s3.py::TestDeleteS3Ttl::test_delete_s3_tiering 2025-06-25 14:47:49,999 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper execution timed out 2025-06-25 14:47:50,698 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper has overrun 600 secs timeout. Process tree before termination: pid rss ref pdirt 246683 729M 719M 651M ydb-tests-olap-ttl_tiering --basetemp /home/runner/.ya/build/build_root/yft8/000cfc/tmp --capture no -c pkg:library.python.pytest:pytest.yatest.ini -p no:factor --doctest-modu 259703 3.2G 3.1G 2.6G ├─ ydbd server --suppress-version-check --yaml-config=/home/runner/.ya/build/build_root/yft8/000cfc/ydb/tests/olap/ttl_tiering/test-results/py3test/testing_out_stuff/chunk3 262227 540M 535M 522M └─ moto_server s3 --port 10665 Test command err: File "library/python/pytest/main.py", line 101, in main rc = pytest.main( File "contrib/python/pytest/py3/_pytest/config/__init__.py", line 175, in main ret: Union[ExitCode, int] = config.hook.pytest_cmdline_main( File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 512, in __call__ return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120, in _hookexec return self._inner_hookexec(hook_name, methods, kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_callers.py", line 121, in _multicall res = hook_impl.function(*args) File "contrib/python/pytest/py3/_pytest/main.py", line 320, in pytest_cmdline_main return wrap_session(config, _main) File "contrib/python/pytest/py3/_pytest/main.py", line 273, in wrap_session session.exitstatus = doit(config, session) or 0 File "contrib/python/pytest/py3/_pytest/main.py", line 327, in _main config.hook.pytest_runtestloop(session=session) File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 512, in __call__ return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120, in _hookexec return self._inner_hookexec(hook_name, methods, kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_callers.py", line 121, in _multicall res = hook_impl.function(*args) File "contrib/python/pytest/py3/_pytest/main.py", line 352, in pytest_runtestloop item.config.hook.pytest_runtest_protocol(item=item, nextitem=nextitem) File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 512, in __call__ return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120, in _hookexec return self._inner_hookexec(hook_name, methods, kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_callers.py", line 121, in _multicall res = hook_impl.function(*args) File "contrib/python/pytest/py3/_pytest/runner.py", line 115, in pytest_runtest_protocol runtestprotocol(item, nextitem=nextitem) File "contrib/python/pytest/py3/_pytest/runner.py", line 134, in runtestprotocol reports.append(call_and_report(item, "call", log)) File "contrib/python/pytest/py3/_pytest/runner.py", line 223, in call_and_report call = call_runtest_hook(item, when, **kwds) File "contrib/python/pytest/py3/_pytest/runner.py", line 262, in call_runtest_hook return CallInfo.from_call( File "contrib/python/pytest/py3/_pytest/runner.py", line 342, in from_call result: Optional[TResult] = func() File "contrib/python/pytest/py3/_pytest/runner.py", line 263, in lambda: ihook(item=item, **kwds), when=when, reraise=reraise File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 512, in __call__ return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120, in _hookexec return self._inner_hookexec(hook_name, methods, kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_callers.py", line 121, in _multicall res = hook_impl.function(*args) File "contrib/python/pytest/py3/_pytest/runner.py", line 170, in pytest_runtest_call item.runtest() File "contrib/python/pytest/py3/_pytest/python.py", line 1844, in runtest self.ihook.pytest_pyfunc_call(pyfuncitem=self) File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 512, in __call__ return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120, in _hookexec return self._inner_hookexec(hook_name, methods, kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_callers.py", line 121, in _multicall res = hook_impl.function(*args) File "library/python/pytest/plugins/ya.py", line 563, in pytest_pyfunc_call pyfuncitem.retval = testfunction(**testargs) File "ydb/tests/olap/ttl_tiering/ttl_delete_s3.py", line 255, in test_delete_s3_tiering self.ydb_client.query(""" File "ydb/tests/olap/common/ydb_client.py", line 24, in query return self.session_pool.execute_with_retries(statement) File "contrib/python/ydb/py3/ydb/query/pool.py", line 204, in execute_with_retries return retry_operation_sync(wrapped_callee, retry_settings) File "contrib/python/ydb/py3/ydb/retries.py", line 133, in retry_operation_sync for next_opt in opt_generator: File "contrib/python/ydb/py3/ydb/retries.py", line 94, in retry_operation_impl result = YdbRetryOperationFinalResult(callee(*args, **kwargs)) File "contrib/python/ydb/py3/ydb/query/pool.py", line 202, in wrapped_callee return [result_set for result_set in it] File "contrib/python/ydb/py3/ydb/_utilities.py", line 173, in __next__ return self._next() File "contrib/python/ydb/py3/ydb/_utilities.py", line 164, in _next res = self.wrapper(next(self.it)) File "contrib/python/grpcio/py3/grpc/_channel.py", line 475, in __next__ return self._next() File "contrib/python/grpcio/py3/grpc/_channel.py", line 872, in _next _common.wait(self._state.condition.wait, _response_ready) File "contrib/python/grpcio/py3/grpc/_common.py", line 150, in wait _wait_once(wait_fn, MAXIMUM_WAIT_TIMEOUT, spin_cb) File "contrib/python/grpcio/py3/grpc/_common.py", line 112, in _wait_once wait_fn(timeout=timeout) File "contrib/tools/python3/Lib/threading.py", line 359, in wait gotit = waiter.acquire(True, timeout) File "library/python/pytest/plugins/ya.py", line 344, in _graceful_shutdown traceback.print_stack(file=sys.stderr) Traceback (most recent call last): File "library/python/testing/yatest_common/yatest/common/process.py", line 384, in wait wait_for( File "library/python/testing/yatest_common/yatest/common/process.py", line 765, in wait_for raise TimeoutError(truncate(message, MAX_MESSAGE_LEN)) yatest.common.process.TimeoutError: ...d_root/yft8/000cfc/tmp', '--capture', 'no', '-c', 'pkg:library.python.pytest:pytest.yatest.ini', '-p', 'no:factor', '--doctest-modules', '--ya-trace', '/home/runner/.ya/build/build_root/yft8/000cfc/ydb/tests/olap/ttl_tiering/test-results/py3test/testing_out_stuff/chunk3/ytest.report.trace', '--build-root', '/home/runner/.ya/build/build_root/yft8/000cfc', '--source-root', '/home/runner/.ya/build/build_root/yft8/000cfc/environment/arcadia', '--output-dir', '/home/runner/.ya/build/build_root/yft8/000cfc/ydb/tests/olap/ttl_tiering/test-results/py3test/testing_out_stuff/chunk3/testing_out_stuff', '--durations', '0', '--project-path', 'ydb/tests/olap/ttl_tiering', '--test-tool-bin', '/home/runner/.ya/tools/v4/9029509511/test_tool', '--ya-version', '2', '--collect-cores', '--sanitizer-extra-checks', '--build-type', 'release', '--tb', 'short', '--modulo', '10', '--modulo-index', '3', '--partition-mode', 'SEQUENTIAL', '--dep-root', 'ydb/tests/olap/ttl_tiering', '--flags', 'APPLE_SDK_LOCAL=yes', '--flags', 'CFLAGS=-fno-omit-frame-pointer -Wno-unknown-argument', '--flags', 'DEBUGINFO_LINES_ONLY=yes', '--flags', 'DISABLE_FLAKE8_MIGRATIONS=yes', '--flags', 'OPENSOURCE=yes', '--flags', 'SANITIZER_TYPE=address', '--flags', 'TESTS_REQUESTED=yes', '--flags', 'USE_AIO=static', '--flags', 'USE_CLANG_CL=yes', '--flags', 'USE_EAT_MY_DATA=yes', '--flags', 'USE_ICONV=static', '--flags', 'USE_IDN=static', '--flags', 'USE_PREBUILT_TOOLS=no', '--sanitize', 'address']' stopped by 600 seconds timeout During handling of the above exception, another exception occurred: Traceback (most recent call last): File "devtools/ya/test/programs/test_tool/run_test/run_test.py", line 1738, in main res.wait(check_exit_code=False, timeout=run_timeout, on_timeout=timeout_callback) File "library/python/testing/yatest_common/yatest/common/process.py", line 398, in wait raise ExecutionTimeoutError(self, str(e)) yatest.common.process.ExecutionTimeoutError: (("...d_root/yft8/000cfc/tmp', '--capture', 'no', '-c', 'pkg:library.python.pytest:pytest.yatest.ini', '-p', 'no:factor', '--doctest-modules', '--ya-trace', '/home/runner/.ya/build/build_root/yft8/000cfc/ydb/tests/olap/ttl_tiering/test-results/py3test/testing_out_stuff/chunk3/ytest.report.trace', '--build-root', '/home/runner/.ya/build/build_root/yft8/000cfc', '--source-root', '/home/runner/.ya/build/build_root/yft8/000cfc/environment/arcadia', '--output-dir', '/home/runner/.ya/build/build_root/yft8/000cfc/ydb/tests/olap/ttl_tiering/test-results/py3test/testing_out_stuff/chunk3/testing_out_stuff', '--durations', '0', '--project-path', 'ydb/tests/olap/ttl_tiering', '--test-tool-bin', '/home/runner/.ya/tools/v4/9029509511/test_tool', '--ya-version', '2', '--collect-cores', '--sanitizer-extra-checks', '--build-type', 'release', '--tb', 'short', '--modulo', '10', '--modulo-index', '3', '--partition-mode', 'SEQUENTIAL', '--dep-root', 'ydb/tests/olap/ttl_tiering', '--flags', 'APPLE_SDK_LOCAL=yes', '--flags', 'CFLAGS=-fno-omit-frame-pointer -Wno-unknown-argument', '--flags', 'DEBUGINFO_LINES_ONLY=yes', '--flags', 'DISABLE_FLAKE8_MIGRATIONS=yes', '--flags', 'OPENSOURCE=yes', '--flags', 'SANITIZER_TYPE=address', '--flags', 'TESTS_REQUESTED=yes', '--flags', 'USE_AIO=static', '--flags', 'USE_CLANG_CL=yes', '--flags', 'USE_EAT_MY_DATA=yes', '--flags', 'USE_ICONV=static', '--flags', 'USE_IDN=static', '--flags', 'USE_PREBUILT_TOOLS=no', '--sanitize', 'address']' stopped by 600 seconds timeout",), {}) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::SortingsSimpleOrderByAliasIndexDesc-RemoveLimitOperator [GOOD] Test command err: Trying to start YDB, gRPC: 21398, MsgBus: 14380 2025-06-25T14:47:19.255698Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898636314418657:2231];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:47:19.272156Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000df8/r3tmp/tmp2sXBpH/pdisk_1.dat 2025-06-25T14:47:19.917093Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:47:19.917184Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:47:19.927637Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:47:19.976944Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:47:19.980421Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519898636314418448:2080] 1750862839154927 != 1750862839154930 TServer::EnableGrpc on GrpcPort 21398, node 1 2025-06-25T14:47:20.155962Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:47:20.197867Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:47:20.198930Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:47:20.198946Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:47:20.199058Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14380 TClient is connected to server localhost:14380 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:47:21.395051Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:47:21.414357Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:47:23.550634Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898653494288280:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:23.550708Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:23.551062Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898653494288292:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:23.555363Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:47:23.570476Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898653494288294:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:47:23.661849Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898653494288345:2337] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:47:23.951048Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:24.058143Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:24.085472Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:24.116629Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:24.148580Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:24.252806Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519898636314418657:2231];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:47:24.252854Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:47:24.332955Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:24.375139Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:24.442850Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:24.474040Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:24.499727Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:24.567607Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:24.600988Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:24.679836Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:25.218955Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, subope ... =72075186224038600;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:03.265973Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038478;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:03.269451Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038548;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:03.270001Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038508;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:03.274617Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038478;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:03.275171Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038552;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:03.278634Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038508;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:03.279252Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038560;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:03.283948Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038552;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:03.287989Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038560;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:03.288888Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038570;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:03.292672Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038606;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:03.297112Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038606;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:03.297708Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038566;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:03.302643Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038566;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:03.303016Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038570;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:03.303221Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038598;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:03.303448Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038562;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:03.311769Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038562;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:03.312427Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038435;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:03.317319Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038435;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:03.317913Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038520;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:03.321058Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038598;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:03.321848Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038456;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:03.326818Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038456;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:03.327466Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038556;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:03.330820Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038520;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:03.331892Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038572;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:03.340790Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038556;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:03.341392Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038608;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:03.344921Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038572;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:03.345516Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038538;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:03.350587Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038608;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:03.351258Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038516;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:03.354390Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038538;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:03.355216Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038433;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:03.363919Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038433;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:03.364624Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038516;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:03.365304Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038528;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:03.374174Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038528;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:03.596648Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jykrxqfafbcv10b05vy6erd3", SessionId: ydb://session/3?node_id=1&id=ODFlMzlmNDEtZjAwNTVlNmEtNTgzZmFlMjUtNjJkYzQyODg=, Slow query, duration: 36.129227s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:48:03.913524Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:48:03.914020Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:48:03.914452Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;self_id=[1:7519898786638300790:5512];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038629;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038331;receive=72075186224038170; 2025-06-25T14:48:03.914836Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716;
: Warning: Execution, code: 1060
:2:1: Warning: Given predicate is not suitable for used index: ix_bank_document_exec_dt_accounts, code: 2503
: Warning: Execution, code: 1060
:2:1: Warning: Given predicate is not suitable for used index: ix_bank_document_exec_dt_accounts, code: 2503 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpIndexLookupJoin::LeftJoinSkipNullFilter-StreamLookup [GOOD] Test command err: Trying to start YDB, gRPC: 24364, MsgBus: 30539 2025-06-25T14:47:50.660929Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898769410905900:2080];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:47:50.661425Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000dbd/r3tmp/tmpzPPILp/pdisk_1.dat 2025-06-25T14:47:51.317576Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:47:51.317683Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:47:51.324423Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:47:51.348850Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 24364, node 1 2025-06-25T14:47:51.636651Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:47:51.636672Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:47:51.636679Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:47:51.636787Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:47:51.684467Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:30539 TClient is connected to server localhost:30539 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:47:52.625575Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:47:52.654670Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:47:52.846738Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:47:53.088296Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:47:53.189819Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:47:55.102383Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898790885743952:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:55.102479Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:55.416537Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:55.492267Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:55.541671Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:55.581970Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:55.647069Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:55.664395Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519898769410905900:2080];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:47:55.664466Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:47:55.735902Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:55.827909Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:55.944701Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898790885744612:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:55.944788Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:55.945013Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898790885744617:2436], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:55.953283Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:47:55.976014Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898790885744619:2437], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:47:56.038897Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898795180711968:3417] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:47:57.708858Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:57.766426Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /ho ... PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:48:01.729571Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:48:01.735921Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:48:01.743855Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:48:01.830393Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:48:02.064606Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:48:02.167919Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:48:05.092578Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898831172287579:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:05.092649Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:05.155066Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:05.194518Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:05.277551Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:05.292422Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519898809697449493:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:48:05.292472Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:48:05.339013Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:05.408116Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:05.471446Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:05.674045Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:05.766266Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898831172288248:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:05.766353Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:05.766562Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898831172288253:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:05.770224Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:48:05.790445Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710669, at schemeshard: 72057594046644480 2025-06-25T14:48:05.796953Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519898831172288255:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:48:05.867185Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519898831172288306:3420] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:48:07.534644Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:07.591615Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:07.638351Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:07.704755Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:07.739169Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:07.791379Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710677:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::TPCDS96+ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 9630, MsgBus: 9768 2025-06-25T14:46:13.965565Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898351443958885:2206];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:46:13.966565Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000e4f/r3tmp/tmppLPAUz/pdisk_1.dat 2025-06-25T14:46:14.353676Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:46:14.358798Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519898351443958715:2080] 1750862773937901 != 1750862773937904 2025-06-25T14:46:14.367333Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:46:14.367427Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 9630, node 1 2025-06-25T14:46:14.375700Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:46:14.511248Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:46:14.511271Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:46:14.511277Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:46:14.511379Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9768 2025-06-25T14:46:14.968438Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:9768 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:46:15.290697Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:46:15.303379Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:46:17.334246Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898368623828543:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:17.334376Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:17.334772Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898368623828555:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:17.338755Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:46:17.362757Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898368623828557:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:46:17.452895Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898368623828608:2336] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:46:17.855333Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T14:46:18.120539Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519898368623828845:2309];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:46:18.120723Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519898368623828845:2309];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:46:18.120955Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519898368623828845:2309];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:46:18.121094Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519898368623828845:2309];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:46:18.121230Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519898368623828845:2309];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:46:18.121351Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519898368623828845:2309];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:46:18.121472Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519898368623828845:2309];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:46:18.121583Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519898368623828845:2309];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:46:18.121668Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519898372918796162:2320];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:46:18.121687Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519898368623828845:2309];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:46:18.121704Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519898372918796162:2320];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:46:18.121787Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519898368623828845:2309];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:46:18.121846Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519898372918796162:2320];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:46:18.121877Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519898368623828845:2309];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:46:18.121950Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519898372918796162:2320];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:46:18.122073Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519898372918796162:2320];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:46:18.122163Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519898372918796162:2320];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:46:18.122267Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519898372918796162:2320];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:46:18.122370Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519898372918796162:2320];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:46:18.122459Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519898372918796162:2320];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 20 ... 76134Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039402;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:44.681010Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039402;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:44.682707Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039412;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:44.686592Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039348;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:44.687168Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039412;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:44.749290Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039421;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:44.749290Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039398;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:44.754427Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039398;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:44.754966Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039422;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:44.755936Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039421;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:44.757397Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039388;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:44.759332Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039422;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:44.761466Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039388;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:44.762280Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039383;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:44.762472Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039384;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:44.766559Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039383;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:44.767171Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039410;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:44.775376Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039410;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:44.775920Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039386;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:44.779503Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039386;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:44.780069Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039301;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:44.780462Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039384;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:44.781004Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039376;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:44.785072Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039376;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:44.785582Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039350;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:44.792189Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039301;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:44.792642Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039381;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:44.794958Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039350;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:44.795514Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039414;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:44.796590Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039381;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:44.797100Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039319;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:44.801853Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039414;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:44.802390Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039396;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:44.808037Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039319;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:44.810195Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039396;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:44.810700Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:44.812271Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039406;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:44.817982Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:44.820153Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039406;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:44.821107Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039408;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:47:44.829619Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039408;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:44.938434Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jykrx1reec0x1xxcyzpmm26f", SessionId: ydb://session/3?node_id=1&id=NzU0N2E1MTctODJmNWIzOTgtYmYxNGQ2NC0xZWU5Yzc3Nw==, Slow query, duration: 39.707001s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:47:46.039256Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:47:46.039854Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:47:46.040900Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;self_id=[1:7519898639206813269:8170];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224039094;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038933;receive=72075186224039392; 2025-06-25T14:47:46.041352Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::TPCDS61-ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 31679, MsgBus: 22304 2025-06-25T14:46:47.155572Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898498315841680:2226];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:46:47.175886Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000e32/r3tmp/tmpILjTZL/pdisk_1.dat 2025-06-25T14:46:47.878591Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:46:47.878697Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:46:47.890547Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:46:47.945170Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519898498315841482:2080] 1750862807097829 != 1750862807097832 2025-06-25T14:46:47.966684Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 31679, node 1 2025-06-25T14:46:48.134699Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:46:48.176133Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:46:48.176151Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:46:48.176158Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:46:48.176260Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:22304 TClient is connected to server localhost:22304 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:46:49.182946Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:46:51.804376Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898515495711324:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:51.804849Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898515495711313:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:51.804948Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:51.808304Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:46:51.822496Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898515495711327:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:46:51.880371Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898515495711378:2337] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:46:52.144580Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519898498315841680:2226];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:46:52.144668Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:46:52.203219Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:52.327481Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:52.366550Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:52.428119Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:52.475325Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:52.713438Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:52.791069Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:52.836075Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:52.898921Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:52.932144Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:53.005536Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:53.052356Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:53.096857Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:46:53.825216Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/ ... 474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:31.345560Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038571;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:31.350423Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038571;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:31.351030Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038543;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:31.354169Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038599;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:31.354682Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038555;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:31.361721Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038543;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:31.363265Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038555;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:31.363873Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038545;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:31.368997Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038582;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:31.372598Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038545;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:31.373180Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038523;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:31.375276Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038582;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:31.375807Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038585;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:31.385027Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038523;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:31.385453Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038591;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:31.388468Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038585;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:31.389193Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038618;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:31.394251Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038591;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:31.395324Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038614;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:31.399488Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038614;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:31.400026Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038616;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:31.401352Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038618;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:31.401945Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038607;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:31.406267Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038607;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:31.406767Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038515;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:31.415171Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038515;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:31.415736Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038451;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:31.419955Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038451;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:31.420515Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038616;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:31.428612Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038611;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:31.436988Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038611;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:31.474319Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038437;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:31.487737Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038437;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:31.599935Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jykrws112vz2cw4ta9nc1505", SessionId: ydb://session/3?node_id=1&id=NWNmMDQzOTctZGUwZmM4OWEtMzFhYjNmOTMtNWUwY2U4NDQ=, Slow query, duration: 35.309576s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:47:32.361385Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:47:32.361857Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:47:32.362758Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;self_id=[1:7519898558445392840:3207];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038170;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038629;receive=72075186224038331; 2025-06-25T14:47:32.363154Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:48:04.200155Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jykrybrp07rjb6c17sr7q5fr", SessionId: ydb://session/3?node_id=1&id=NWNmMDQzOTctZGUwZmM4OWEtMzFhYjNmOTMtNWUwY2U4NDQ=, Slow query, duration: 15.953094s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "pragma TablePathPrefix = \"/Root/test/ds/\";\n\n-- NB: Subquerys\n-- start query 1 in stream 0 using template query61.tpl and seed 1930872976\nselect promotions,total,cast(promotions as float)/cast(total as float)*100\nfrom\n (select sum(ss_ext_sales_price) promotions\n from store_sales\n cross join store\n cross join promotion\n cross join date_dim\n cross join customer\n cross join customer_address\n cross join item\n where ss_sold_date_sk = d_date_sk\n and ss_store_sk = s_store_sk\n and ss_promo_sk = p_promo_sk\n and ss_customer_sk= c_customer_sk\n and ca_address_sk = c_current_addr_sk\n and ss_item_sk = i_item_sk\n and ca_gmt_offset = -6\n and i_category = 'Sports'\n and (p_channel_dmail = 'Y' or p_channel_email = 'Y' or p_channel_tv = 'Y')\n and s_gmt_offset = -6\n and d_year = 2001\n and d_moy = 12) promotional_sales cross join\n (select sum(ss_ext_sales_price) total\n from store_sales\n cross join store\n cross join date_dim\n cross join customer\n cross join customer_address\n cross join item\n where ss_sold_date_sk = d_date_sk\n and ss_store_sk = s_store_sk\n and ss_customer_sk= c_customer_sk\n and ca_address_sk = c_current_addr_sk\n and ss_item_sk = i_item_sk\n and ca_gmt_offset = -6\n and i_category = 'Sports'\n and s_gmt_offset = -6\n and d_year = 2001\n and d_moy = 12) all_sales\norder by promotions, total\nlimit 100;\n", parameters: 0b >> KqpJoinOrder::SortingsWithLookupJoin2+RemoveLimitOperator >> KqpJoin::JoinDupColumnRight >> KqpJoin::JoinConvert [GOOD] >> KqpIndexLookupJoin::LeftJoinOnlyLeftColumn+StreamLookup >> KqpJoinOrder::TPCDS96-ColumnStore >> KqpJoinOrder::CanonizedJoinOrderTPCH15 >> YdbOlapStore::LogCountByResource [GOOD] >> KqpIndexLookupJoin::JoinWithSubquery+StreamLookup [GOOD] >> KqpIndexLookupJoin::JoinWithSubquery-StreamLookup ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoin::JoinConvert [GOOD] Test command err: Trying to start YDB, gRPC: 29914, MsgBus: 3296 2025-06-25T14:47:54.045658Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898787541430641:2226];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:47:54.045991Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000dba/r3tmp/tmpacAqwk/pdisk_1.dat 2025-06-25T14:47:54.610978Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:47:54.611098Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:47:54.634126Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:47:54.637836Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:47:54.644576Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519898783246463146:2080] 1750862874002185 != 1750862874002188 TServer::EnableGrpc on GrpcPort 29914, node 1 2025-06-25T14:47:54.904265Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:47:54.904286Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:47:54.904293Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:47:54.904409Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:47:55.032497Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:3296 TClient is connected to server localhost:3296 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:47:55.627431Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:47:55.649027Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:47:55.658561Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:47:55.886923Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:47:56.056785Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:47:56.145489Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:47:58.114056Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898804721301254:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:58.114143Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:58.531234Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:58.573764Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:58.617543Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:58.657544Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:58.700146Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:58.753602Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:58.809663Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:58.895214Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898804721301907:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:58.895320Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:58.900750Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898804721301912:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:58.905196Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:47:58.920578Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898804721301914:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:47:58.992188Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898804721301965:3414] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:47:59.064971Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519898787541430641:2226];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:47:59.065129Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:48:00.302509Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_wo ... ecting -> Connected TServer::EnableGrpc on GrpcPort 29130, node 2 2025-06-25T14:48:04.658286Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:48:04.658311Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:48:04.658321Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:48:04.658439Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27168 2025-06-25T14:48:05.036608Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:27168 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:48:05.286124Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:48:05.297192Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:48:05.316303Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:48:05.444581Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:48:05.659704Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:48:05.731756Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:08.645902Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898845176928321:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:08.645970Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:08.678123Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:08.735538Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:08.785910Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:08.829781Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:08.922694Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:08.989808Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:09.048337Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:09.095416Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519898827997057649:2195];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:48:09.095531Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:48:09.133669Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898849471896275:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:09.133748Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:09.133940Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898849471896280:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:09.137697Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:48:09.160596Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519898849471896282:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:48:09.235347Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519898849471896333:3419] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:48:10.735404Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:10.780060Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:10.818525Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:11.417950Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) >> KqpJoinOrder::TPCDS16+ColumnStore >> KqpJoinOrder::ShuffleEliminationManyKeysJoinPredicate [GOOD] >> KqpJoinOrder::SortingsWithLookupJoin4-RemoveLimitOperator [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> YdbOlapStore::LogCountByResource [GOOD] Test command err: 2025-06-25T14:43:57.646161Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897768930554400:2148];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:43:57.646265Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001854/r3tmp/tmpeRaazJ/pdisk_1.dat 2025-06-25T14:43:58.212653Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:43:58.212729Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:43:58.215217Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:43:58.237739Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 18199, node 1 2025-06-25T14:43:58.416924Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:43:58.416944Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:43:58.416957Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:43:58.417077Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:43:58.656589Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:6702 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:43:58.772524Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... self_check_result: GOOD issue_log { id: "YELLOW-f489-1231c6b1" status: YELLOW message: "Database has compute issues" location { database { name: "/Root" } } reason: "YELLOW-1ba8-1231c6b1" type: "DATABASE" level: 1 } issue_log { id: "YELLOW-1ba8-1231c6b1" status: YELLOW message: "Compute is overloaded" location { database { name: "/Root" } } reason: "YELLOW-e9e2-1231c6b1-1" reason: "YELLOW-e9e2-1231c6b1-2" reason: "YELLOW-e9e2-1231c6b1-3" type: "COMPUTE" level: 2 } issue_log { id: "YELLOW-e9e2-1231c6b1-1" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 1 host: "::1" port: 12001 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 } issue_log { id: "YELLOW-e9e2-1231c6b1-2" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 2 host: "::1" port: 12002 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 } issue_log { id: "YELLOW-e9e2-1231c6b1-3" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 3 host: "::1" port: 12003 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 } location { id: 1 host: "::1" port: 12001 } 2025-06-25T14:44:01.996355Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519897783257230981:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:01.996399Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:44:02.038947Z node 6 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[6:7519897788938419955:2180];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:02.040296Z node 5 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[5:7519897790375672404:2158];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001854/r3tmp/tmpKc7LVj/pdisk_1.dat 2025-06-25T14:44:02.256866Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:44:02.314845Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:44:02.381837Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:02.404797Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:02.404908Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:02.406433Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:02.406505Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:02.410090Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:02.410171Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:02.415252Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:44:02.416132Z node 4 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 6 Cookie 6 2025-06-25T14:44:02.417150Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:44:02.417505Z node 4 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 5 Cookie 5 2025-06-25T14:44:02.418244Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 20081, node 4 2025-06-25T14:44:02.648436Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:02.648454Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:02.648459Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:02.648570Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:44:03.026623Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:44:03.040431Z node 6 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:44:03.044417Z node 5 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:8598 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:03.153597Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:06.997366Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519897783257230981:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:06.997456Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:44:07.027525Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[6:7519897788938419955:2180];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:07.027598Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:44:07.025373Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[5:7519897790375672404:2158];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:07.025442Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/ ... Id: ydb://session/3?node_id=47&id=ZGM5MDZhOTQtOTA1MzRkYjgtODkxNzcxNDMtNzA1MzM2NWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Sending channels info to compute actor: [47:7519898857685650677:3383], channels: 1 2025-06-25T14:48:11.806246Z node 47 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:442: ActorId: [47:7519898857685650674:3094] TxId: 281474976710674. Ctx: { TraceId: 01jykryzpwakngtwc0fd1z7af5, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=47&id=ZGM5MDZhOTQtOTA1MzRkYjgtODkxNzcxNDMtNzA1MzM2NWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [47:7519898857685650677:3383], task: 1, state: COMPUTE_STATE_EXECUTING, stats: { } 2025-06-25T14:48:11.806282Z node 47 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:668: ActorId: [47:7519898857685650674:3094] TxId: 281474976710674. Ctx: { TraceId: 01jykryzpwakngtwc0fd1z7af5, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=47&id=ZGM5MDZhOTQtOTA1MzRkYjgtODkxNzcxNDMtNzA1MzM2NWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Waiting for: CA [47:7519898857685650677:3383], 2025-06-25T14:48:11.806461Z node 47 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:357: ActorId: [47:7519898857685650674:3094] TxId: 281474976710674. Ctx: { TraceId: 01jykryzpwakngtwc0fd1z7af5, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=47&id=ZGM5MDZhOTQtOTA1MzRkYjgtODkxNzcxNDMtNzA1MzM2NWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Send TEvStreamData to [47:7519898844800747916:3094], seqNo: 1, nRows: 1 2025-06-25T14:48:11.806603Z node 47 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:442: ActorId: [47:7519898857685650674:3094] TxId: 281474976710674. Ctx: { TraceId: 01jykryzpwakngtwc0fd1z7af5, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=47&id=ZGM5MDZhOTQtOTA1MzRkYjgtODkxNzcxNDMtNzA1MzM2NWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [47:7519898857685650677:3383], task: 1, state: COMPUTE_STATE_EXECUTING, stats: { CpuTimeUs: 374 Tasks { TaskId: 1 CpuTimeUs: 215 FinishTimeMs: 1750862891805 OutputRows: 1 OutputBytes: 3 ResultRows: 1 ResultBytes: 3 ComputeCpuTimeUs: 69 BuildCpuTimeUs: 146 HostName: "ghrun-kqfvx6aroe" NodeId: 47 CreateTimeMs: 1750862891805 CurrentWaitOutputTimeUs: 42 UpdateTimeMs: 1750862891805 } MaxMemoryUsage: 1048576 } 2025-06-25T14:48:11.806699Z node 47 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:668: ActorId: [47:7519898857685650674:3094] TxId: 281474976710674. Ctx: { TraceId: 01jykryzpwakngtwc0fd1z7af5, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=47&id=ZGM5MDZhOTQtOTA1MzRkYjgtODkxNzcxNDMtNzA1MzM2NWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Waiting for: CA [47:7519898857685650677:3383], 2025-06-25T14:48:11.806746Z node 47 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1852: SessionId: ydb://session/3?node_id=47&id=ZGM5MDZhOTQtOTA1MzRkYjgtODkxNzcxNDMtNzA1MzM2NWI=, ActorId: [47:7519898844800747916:3094], ActorState: ExecuteState, TraceId: 01jykryzpwakngtwc0fd1z7af5, Forwarded TEvStreamData to [47:7519898844800747914:3093] 2025-06-25T14:48:11.806770Z node 47 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [47:7519898857685650677:3383], TxId: 281474976710674, task: 1. Ctx: { SessionId : ydb://session/3?node_id=47&id=ZGM5MDZhOTQtOTA1MzRkYjgtODkxNzcxNDMtNzA1MzM2NWI=. CustomerSuppliedId : . TraceId : 01jykryzpwakngtwc0fd1z7af5. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. CA StateFunc 271646926 2025-06-25T14:48:11.806902Z node 47 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:1074: SelfId: [47:7519898857685650677:3383], TxId: 281474976710674, task: 1. Ctx: { SessionId : ydb://session/3?node_id=47&id=ZGM5MDZhOTQtOTA1MzRkYjgtODkxNzcxNDMtNzA1MzM2NWI=. CustomerSuppliedId : . TraceId : 01jykryzpwakngtwc0fd1z7af5. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Received channels info: Update { Id: 1 TransportVersion: DATA_TRANSPORT_OOB_PICKLE_1_0 SrcTaskId: 1 SrcEndpoint { ActorId { RawX1: 7519898857685650677 RawX2: 4503801490836791 } } DstEndpoint { ActorId { RawX1: 7519898857685650674 RawX2: 4503801490836502 } } InMemory: true } 2025-06-25T14:48:11.806948Z node 47 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:670: TxId: 281474976710674, task: 1. Tasks execution finished, waiting for chunk delivery in output channelId: 1, seqNo: [1] 2025-06-25T14:48:11.807465Z node 47 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:423: TxId: 281474976710674, send ack to channelId: 1, seqNo: 1, enough: 0, freeSpace: 8388552, to: [47:7519898857685650678:3383] 2025-06-25T14:48:11.807513Z node 47 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [47:7519898857685650677:3383], TxId: 281474976710674, task: 1. Ctx: { SessionId : ydb://session/3?node_id=47&id=ZGM5MDZhOTQtOTA1MzRkYjgtODkxNzcxNDMtNzA1MzM2NWI=. CustomerSuppliedId : . TraceId : 01jykryzpwakngtwc0fd1z7af5. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. CA StateFunc 271646922 2025-06-25T14:48:11.807539Z node 47 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:675: TxId: 281474976710674, task: 1. Tasks execution finished 2025-06-25T14:48:11.807557Z node 47 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:510: SelfId: [47:7519898857685650677:3383], TxId: 281474976710674, task: 1. Ctx: { SessionId : ydb://session/3?node_id=47&id=ZGM5MDZhOTQtOTA1MzRkYjgtODkxNzcxNDMtNzA1MzM2NWI=. CustomerSuppliedId : . TraceId : 01jykryzpwakngtwc0fd1z7af5. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Compute state finished. All channels and sinks finished 2025-06-25T14:48:11.807639Z node 47 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:494: TxId: 281474976710674, task: 1. pass away 2025-06-25T14:48:11.807735Z node 47 :KQP_COMPUTE DEBUG: log.cpp:784: fline=kqp_compute_actor_factory.cpp:67;problem=finish_compute_actor;tx_id=281474976710674;task_id=1;success=1;message={
: Error: COMPUTE_STATE_FINISHED }; 2025-06-25T14:48:11.808033Z node 47 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:442: ActorId: [47:7519898857685650674:3094] TxId: 281474976710674. Ctx: { TraceId: 01jykryzpwakngtwc0fd1z7af5, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=47&id=ZGM5MDZhOTQtOTA1MzRkYjgtODkxNzcxNDMtNzA1MzM2NWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [47:7519898857685650677:3383], task: 1, state: COMPUTE_STATE_FINISHED, stats: { CpuTimeUs: 1038 Tasks { TaskId: 1 CpuTimeUs: 218 FinishTimeMs: 1750862891807 OutputRows: 1 OutputBytes: 3 ResultRows: 1 ResultBytes: 3 ComputeCpuTimeUs: 72 BuildCpuTimeUs: 146 HostName: "ghrun-kqfvx6aroe" NodeId: 47 CreateTimeMs: 1750862891805 UpdateTimeMs: 1750862891807 } MaxMemoryUsage: 1048576 } 2025-06-25T14:48:11.808088Z node 47 :KQP_EXECUTER INFO: kqp_planner.cpp:697: TxId: 281474976710674. Ctx: { TraceId: 01jykryzpwakngtwc0fd1z7af5, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=47&id=ZGM5MDZhOTQtOTA1MzRkYjgtODkxNzcxNDMtNzA1MzM2NWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Compute actor has finished execution: [47:7519898857685650677:3383] 2025-06-25T14:48:11.808191Z node 47 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:2188: ActorId: [47:7519898857685650674:3094] TxId: 281474976710674. Ctx: { TraceId: 01jykryzpwakngtwc0fd1z7af5, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=47&id=ZGM5MDZhOTQtOTA1MzRkYjgtODkxNzcxNDMtNzA1MzM2NWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. terminate execution. 2025-06-25T14:48:11.808236Z node 47 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:862: ActorId: [47:7519898857685650674:3094] TxId: 281474976710674. Ctx: { TraceId: 01jykryzpwakngtwc0fd1z7af5, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=47&id=ZGM5MDZhOTQtOTA1MzRkYjgtODkxNzcxNDMtNzA1MzM2NWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Resource usage for last stat interval: ComputeTime: 0.001038s ReadRows: 0 ReadBytes: 0 ru: 1 rate limiter was not found force flag: 1 2025-06-25T14:48:11.808291Z node 47 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1754: SessionId: ydb://session/3?node_id=47&id=ZGM5MDZhOTQtOTA1MzRkYjgtODkxNzcxNDMtNzA1MzM2NWI=, ActorId: [47:7519898844800747916:3094], ActorState: ExecuteState, TraceId: 01jykryzpwakngtwc0fd1z7af5, TEvTxResponse, CurrentTx: 2/2 response.status: SUCCESS 2025-06-25T14:48:11.812298Z node 47 :KQP_SESSION INFO: kqp_session_actor.cpp:2013: SessionId: ydb://session/3?node_id=47&id=ZGM5MDZhOTQtOTA1MzRkYjgtODkxNzcxNDMtNzA1MzM2NWI=, ActorId: [47:7519898844800747916:3094], ActorState: ExecuteState, TraceId: 01jykryzpwakngtwc0fd1z7af5, txInfo Status: Active Kind: ReadOnly TotalDuration: 0 ServerDuration: 2123.419 QueriesCount: 1 2025-06-25T14:48:11.812392Z node 47 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2168: SessionId: ydb://session/3?node_id=47&id=ZGM5MDZhOTQtOTA1MzRkYjgtODkxNzcxNDMtNzA1MzM2NWI=, ActorId: [47:7519898844800747916:3094], ActorState: ExecuteState, TraceId: 01jykryzpwakngtwc0fd1z7af5, Create QueryResponse for action: QUERY_ACTION_EXECUTE with SUCCESS status 2025-06-25T14:48:11.812499Z node 47 :KQP_SESSION INFO: kqp_session_actor.cpp:2528: SessionId: ydb://session/3?node_id=47&id=ZGM5MDZhOTQtOTA1MzRkYjgtODkxNzcxNDMtNzA1MzM2NWI=, ActorId: [47:7519898844800747916:3094], ActorState: ExecuteState, TraceId: 01jykryzpwakngtwc0fd1z7af5, Cleanup start, isFinal: 1 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-06-25T14:48:11.812534Z node 47 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2589: SessionId: ydb://session/3?node_id=47&id=ZGM5MDZhOTQtOTA1MzRkYjgtODkxNzcxNDMtNzA1MzM2NWI=, ActorId: [47:7519898844800747916:3094], ActorState: ExecuteState, TraceId: 01jykryzpwakngtwc0fd1z7af5, EndCleanup, isFinal: 1 2025-06-25T14:48:11.812598Z node 47 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2325: SessionId: ydb://session/3?node_id=47&id=ZGM5MDZhOTQtOTA1MzRkYjgtODkxNzcxNDMtNzA1MzM2NWI=, ActorId: [47:7519898844800747916:3094], ActorState: ExecuteState, TraceId: 01jykryzpwakngtwc0fd1z7af5, Sent query response back to proxy, proxyRequestId: 5, proxyId: [47:7519898763196365418:2148] 2025-06-25T14:48:11.812631Z node 47 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2601: SessionId: ydb://session/3?node_id=47&id=ZGM5MDZhOTQtOTA1MzRkYjgtODkxNzcxNDMtNzA1MzM2NWI=, ActorId: [47:7519898844800747916:3094], ActorState: unknown state, TraceId: 01jykryzpwakngtwc0fd1z7af5, Cleanup temp tables: 0 2025-06-25T14:48:11.813724Z node 47 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750862889000, txId: 18446744073709551615] shutting down 2025-06-25T14:48:11.813829Z node 47 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2692: SessionId: ydb://session/3?node_id=47&id=ZGM5MDZhOTQtOTA1MzRkYjgtODkxNzcxNDMtNzA1MzM2NWI=, ActorId: [47:7519898844800747916:3094], ActorState: unknown state, TraceId: 01jykryzpwakngtwc0fd1z7af5, Session actor destroyed 2025-06-25T14:48:11.814589Z node 47 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037891;parent=[47:7519898771786300981:2286];fline=actor.cpp:33;event=skip_flush_writing; RESULT: [[3u]] --------------------- STATS: total CPU: 4746 duration: 2098134 usec cpu: 984126 usec { name: "/Root/OlapStore/log1" reads { rows: 2 bytes: 16 } } duration: 4467 usec cpu: 6329 usec 2025-06-25T14:48:11.892739Z node 47 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037890;parent=[47:7519898771786300976:2285];fline=actor.cpp:33;event=skip_flush_writing; 2025-06-25T14:48:11.897026Z node 47 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037889;parent=[47:7519898771786300986:2287];fline=actor.cpp:33;event=skip_flush_writing; >> TxUsage::Transactions_Conflict_On_SeqNo_Table [GOOD] >> KqpFlipJoin::RightOnly_1 [GOOD] >> KqpFlipJoin::RightOnly_2 >> TxUsage::WriteToTopic_Demo_48_Table [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::SortingsWithLookupJoin4-RemoveLimitOperator [GOOD] Test command err: Trying to start YDB, gRPC: 8076, MsgBus: 26703 2025-06-25T14:47:27.394227Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898671349721023:2227];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:47:27.395680Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000de8/r3tmp/tmpDwHWgq/pdisk_1.dat 2025-06-25T14:47:28.015726Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:47:28.023382Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519898671349720825:2080] 1750862847371033 != 1750862847371036 2025-06-25T14:47:28.039312Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:47:28.039427Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 8076, node 1 2025-06-25T14:47:28.062035Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:47:28.260912Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:47:28.260935Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:47:28.260942Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:47:28.261046Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:47:28.360573Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:26703 TClient is connected to server localhost:26703 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:47:29.270331Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:47:29.285303Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:47:31.379644Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898688529590654:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:31.379751Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:31.380374Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898688529590666:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:31.388547Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:47:31.407269Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-25T14:47:31.408185Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898688529590668:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:47:31.479684Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898688529590721:2338] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:47:31.900715Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:32.068559Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:32.110751Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:32.146571Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:32.182473Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:32.395585Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519898671349721023:2227];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:47:32.395653Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:47:32.421538Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:32.461574Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:32.499758Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:32.536226Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:32.570229Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:32.605558Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:32.679093Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:32.760370Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operatio ... 99458Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:11.003754Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:48:11.004249Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038641;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:11.004534Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038602;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:48:11.005042Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038618;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:11.008384Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038641;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:48:11.008904Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038643;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:11.012817Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038643;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:48:11.013176Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038618;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:48:11.013289Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038649;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:11.013579Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038580;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:11.020531Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038580;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:48:11.021063Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038586;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:11.021094Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038649;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:48:11.021706Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038645;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:11.025498Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038586;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:48:11.025964Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038644;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:11.026169Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038645;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:48:11.026639Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038648;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:11.030404Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038644;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:48:11.030929Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038587;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:11.031116Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038648;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:48:11.031589Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038624;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:11.035417Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038587;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:48:11.035935Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038661;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:11.036185Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038624;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:48:11.036679Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038628;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:11.040236Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038661;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:48:11.040737Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038592;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:11.041578Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038628;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:48:11.042568Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038652;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:11.044948Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038592;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:48:11.045669Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038567;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:11.047127Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038652;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:48:11.047582Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038660;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:11.049784Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038567;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:48:11.050239Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038620;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:11.054127Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038620;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:48:11.054979Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038630;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:11.057181Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038660;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:48:11.060189Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038630;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:48:11.175909Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jykrxztp7gvsqeq3c61xh3r7", SessionId: ydb://session/3?node_id=1&id=NDdlMWFmNzctZTg3OTNiMDMtZDJjZWM1MTEtYzZhYmU5MTc=, Slow query, duration: 35.152734s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:48:11.424576Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; 2025-06-25T14:48:11.424891Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; 2025-06-25T14:48:11.425293Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;self_id=[1:7519898722889335675:2895];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038170;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038629;receive=72075186224038331; 2025-06-25T14:48:11.425699Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; >> KqpJoinOrder::CanonizedJoinOrderTPCH14 [GOOD] >> KqpJoin::LeftJoinWithNull-StreamLookupJoin ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::ShuffleEliminationManyKeysJoinPredicate [GOOD] Test command err: Trying to start YDB, gRPC: 20736, MsgBus: 6338 2025-06-25T14:46:33.311069Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898438883313675:2225];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:46:33.311337Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000e3e/r3tmp/tmpg0xEbA/pdisk_1.dat 2025-06-25T14:46:33.832825Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:46:33.849077Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:46:33.849171Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:46:33.850492Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519898438883313486:2080] 1750862793265278 != 1750862793265281 2025-06-25T14:46:33.857994Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 20736, node 1 2025-06-25T14:46:34.103805Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:46:34.103822Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:46:34.103831Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:46:34.103920Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:46:34.280302Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:6338 TClient is connected to server localhost:6338 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:46:35.034921Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:46:37.438348Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898456063183320:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:37.438367Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898456063183332:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:37.438468Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:37.448524Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:46:37.466850Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898456063183334:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:46:37.532109Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898456063183385:2338] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:46:38.052168Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T14:46:38.296385Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519898438883313675:2225];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:46:38.296448Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:46:38.522923Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519898460358150892:2311];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:46:38.523155Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519898460358150892:2311];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:46:38.523380Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519898460358150892:2311];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:46:38.523499Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519898460358150892:2311];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:46:38.523602Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519898460358150892:2311];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:46:38.523704Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519898460358150892:2311];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:46:38.523800Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519898460358150892:2311];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:46:38.523914Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519898460358150892:2311];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:46:38.524010Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519898460358150892:2311];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:46:38.524123Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519898460358150892:2311];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:46:38.524251Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519898460358150892:2311];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:46:38.549564Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519898460358150905:2315];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:46:38.549614Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519898460358150905:2315];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:46:38.549811Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519898460358150905:2315];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:46:38.549909Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519898460358150905:2315];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:46:38.550006Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519898460358150905:2315];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:46:38.550119Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519898460358150905:2315];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:46:38.550211Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519898460358150905:2315];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:46:38.550313Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519898460358150905:2315];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:46:38.550405Z node 1 :TX_COLUMNS ... 70784Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039275;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:05.274750Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039275;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:05.275648Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039299;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:05.276123Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039307;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:05.280204Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039307;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:05.281324Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039255;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:05.283195Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039309;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:05.291429Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039255;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:05.291910Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039297;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:05.293886Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039309;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:05.294304Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039325;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:05.298630Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039325;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:05.299458Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039297;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:05.299890Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039273;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:05.299982Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039265;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:05.310131Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039265;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:05.310667Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039257;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:05.312577Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039273;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:05.313081Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039279;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:05.319510Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039257;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:05.321212Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039279;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:05.321789Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039261;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:05.322492Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039317;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:05.329149Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039261;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:05.329925Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039321;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:05.333966Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039317;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:05.334462Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039327;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:05.340012Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039321;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:05.343562Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039327;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:05.345908Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039245;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:05.350580Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039245;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:05.351577Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039233;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:05.356234Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039233;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:05.357002Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039247;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:05.361659Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039331;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:05.367104Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039331;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:05.367703Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039249;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:05.371015Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039247;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:05.371522Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039329;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:05.376082Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039329;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:05.379373Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039249;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:05.568196Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jykrxnzr3e1zwpgk5ybbcy4y", SessionId: ydb://session/3?node_id=1&id=NWQ4ZGJmZWYtYmQyNjBlZTMtYTc2ZWY3Y2UtMWY2OTAxNDU=, Slow query, duration: 39.623251s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:48:05.989065Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:48:05.989587Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:48:05.990681Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;self_id=[1:7519898730941135064:8199];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224039094;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038933;receive=72075186224039392; 2025-06-25T14:48:05.991133Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; >> TxUsage::WriteToTopic_Demo_48_Query >> KqpIndexLookupJoin::LeftJoinOnlyLeftColumn+StreamLookup [GOOD] >> KqpIndexLookupJoin::LeftJoinOnlyLeftColumn-StreamLookup >> KqpIndexLookupJoin::JoinWithSubquery-StreamLookup [GOOD] >> KqpJoin::JoinDupColumnRight [GOOD] >> KqpJoin::JoinDupColumnRightPure >> KqpJoinOrder::TPCDS87-ColumnStore [GOOD] >> KqpJoinOrder::TestJoinOrderHintsManyHintTrees >> KqpJoinOrder::TPCDS61+ColumnStore ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::CanonizedJoinOrderTPCH14 [GOOD] Test command err: Trying to start YDB, gRPC: 31724, MsgBus: 24079 2025-06-25T14:46:36.783022Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898449612941433:2184];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:46:36.783215Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000e39/r3tmp/tmpo9B5AM/pdisk_1.dat 2025-06-25T14:46:37.590829Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:46:37.590908Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:46:37.600134Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:46:37.625697Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519898449612941275:2080] 1750862796758801 != 1750862796758804 2025-06-25T14:46:37.647331Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 31724, node 1 2025-06-25T14:46:37.824530Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:46:37.920956Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:46:37.920983Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:46:37.920992Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:46:37.921086Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24079 TClient is connected to server localhost:24079 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:46:39.058322Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:46:39.093814Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:46:41.784468Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519898449612941433:2184];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:46:41.784569Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:46:41.974120Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898471087778408:2295], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:41.974243Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:41.974768Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898471087778420:2298], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:41.978309Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:46:41.997362Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898471087778422:2299], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:46:42.053613Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898475382745769:2340] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:46:42.403288Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T14:46:42.781591Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519898475382746025:2324];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:46:42.781843Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519898475382746025:2324];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:46:42.782058Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519898475382746025:2324];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:46:42.782158Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519898475382746025:2324];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:46:42.782239Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519898475382746025:2324];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:46:42.782315Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519898475382746025:2324];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:46:42.782393Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519898475382746025:2324];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:46:42.782479Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519898475382746025:2324];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:46:42.782597Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519898475382746025:2324];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:46:42.782689Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519898475382746025:2324];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:46:42.782787Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519898475382746025:2324];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:46:42.825353Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519898475382746017:2316];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:46:42.825401Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519898475382746017:2316];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:46:42.825594Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519898475382746017:2316];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:46:42.825700Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519898475382746017:2316];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:46:42.825793Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519898475382746017:2316];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:46:42.825905Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519898475382746017:2316];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:46:42.826017Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519898475382746017:2316];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:46:42.826147Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519898475382746017:2316];tabl ... 57819Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039353;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:05.062670Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039353;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:05.065823Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039361;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:05.066225Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039419;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:05.068814Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039307;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:05.075597Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039419;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:05.076094Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039409;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:05.081240Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039409;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:05.081800Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039367;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:05.084963Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039307;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:05.085384Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039309;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:05.090071Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039309;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:05.090616Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039291;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:05.094497Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039367;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:05.094884Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039351;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:05.099460Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039291;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:05.099962Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039373;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:05.103760Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039351;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:05.108177Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039373;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:05.108706Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039343;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:05.112971Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039396;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:05.117290Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039343;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:05.121911Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039396;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:05.124199Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039391;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:05.128220Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:05.137204Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039391;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:05.137619Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039329;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:05.141130Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:05.141546Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039422;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:05.146728Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039329;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:05.147236Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039385;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:05.150350Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039422;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:05.150937Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039420;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:05.156237Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039385;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:05.159628Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039420;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:05.160170Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039397;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:05.160972Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039388;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:05.169091Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039397;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:05.170655Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039387;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:05.173124Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039388;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:05.186473Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039387;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:05.457094Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jykrxrs5fkgyg9vdpezte4kq", SessionId: ydb://session/3?node_id=1&id=MzQ5N2E3ODYtZGE2MGI1NTYtN2IwMTg2MmItN2JjODg2ODU=, Slow query, duration: 36.650984s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:48:05.982561Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:48:05.983129Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:48:05.983683Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;self_id=[1:7519898793210381402:9824];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224039392;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224039094;receive=72075186224038933; 2025-06-25T14:48:05.984124Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpIndexLookupJoin::JoinWithSubquery-StreamLookup [GOOD] Test command err: Trying to start YDB, gRPC: 2302, MsgBus: 24384 2025-06-25T14:48:05.707558Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898831169481494:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:48:05.707648Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000dad/r3tmp/tmp73ReBH/pdisk_1.dat 2025-06-25T14:48:06.341456Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:48:06.367138Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:48:06.367238Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:48:06.369241Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 2302, node 1 2025-06-25T14:48:06.676653Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:48:06.676678Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:48:06.676687Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:48:06.676788Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:48:06.738045Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:24384 TClient is connected to server localhost:24384 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:48:07.697059Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:48:07.726891Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:48:07.734935Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:48:07.923469Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:48:08.132009Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:48:08.244195Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:48:10.541405Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898852644319578:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:10.541512Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:10.712419Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519898831169481494:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:48:10.755292Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:48:10.920865Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:10.959039Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:11.032583Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:11.077389Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:11.148423Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:11.188489Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:11.229169Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:11.302471Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898856939287533:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:11.302544Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:11.302848Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898856939287538:2436], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:11.306616Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:48:11.320290Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898856939287540:2437], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:48:11.377900Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898856939287591:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:48:12.700917Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:12.748938Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, ... d to server localhost:21401 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-25T14:48:16.458566Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:48:16.468754Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:48:16.486184Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:48:16.578448Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:48:16.934032Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:17.053591Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:48:19.556355Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898891244899959:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:19.556546Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:19.613634Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:19.666388Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:19.706871Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:19.743962Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:19.778062Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:19.825008Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:19.881230Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:19.973720Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898891244900614:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:19.973807Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:19.974306Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898891244900619:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:19.978316Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:48:19.996454Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519898891244900621:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:48:20.078907Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519898895539867968:3416] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:48:20.246891Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519898874065029192:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:48:20.246945Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:48:21.176291Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:21.232133Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:21.286089Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:21.330327Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:21.374856Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:21.459562Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710677:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::TPCDS87-ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 5714, MsgBus: 5433 2025-06-25T14:47:13.769647Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898611164357364:2199];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:47:13.769901Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000e06/r3tmp/tmpwAHape/pdisk_1.dat 2025-06-25T14:47:14.506476Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:47:14.506585Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:47:14.516781Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:47:14.520558Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519898611164357203:2080] 1750862833730016 != 1750862833730019 2025-06-25T14:47:14.536847Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 5714, node 1 2025-06-25T14:47:14.765427Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:47:14.765444Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:47:14.765451Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:47:14.765538Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:47:14.770133Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:5433 TClient is connected to server localhost:5433 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:47:15.842883Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:47:15.860721Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:47:18.302136Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898632639194333:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:18.302262Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:18.302648Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898632639194345:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:18.306527Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:47:18.326756Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898632639194347:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:47:18.431340Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898632639194398:2337] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:47:18.760441Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519898611164357364:2199];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:47:18.760501Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:47:18.775896Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:18.922840Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:18.970333Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:19.006528Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:19.081921Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:19.307066Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:19.341413Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:19.382000Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:19.410993Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:19.443769Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:19.500025Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:19.569362Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:19.626526Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:20.666066Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperatio ... lve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:58.534988Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038650;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:58.535546Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038636;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:58.538588Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038628;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:58.539156Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038644;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:58.548257Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038636;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:58.548428Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038644;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:58.549032Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038632;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:58.553113Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038656;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:58.558360Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038632;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:58.558958Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038646;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:58.562429Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038656;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:58.563021Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038634;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:58.567990Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038634;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:58.571733Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038646;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:58.572430Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038660;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:58.577144Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038638;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:58.581905Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038638;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:58.582499Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038658;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:58.585416Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038660;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:58.585923Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038654;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:58.591436Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038658;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:58.593865Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038654;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:58.594426Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038652;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:58.597086Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038648;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:58.601894Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038648;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:58.604913Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038652;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:58.605424Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038640;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:58.608944Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038626;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:58.614013Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038640;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:58.614590Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038624;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:47:58.618124Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038626;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:58.625626Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038624;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:47:58.784557Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jykrxk64acbsp04rcr75fdez", SessionId: ydb://session/3?node_id=1&id=NDYxZDAwNjctNjFlMDg5MmMtNTNiNWE0NzYtNDcwYTFlNGY=, Slow query, duration: 35.707459s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:47:59.379880Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:47:59.380346Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:47:59.381613Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;self_id=[1:7519898765783206061:5449];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038629;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038170;receive=72075186224038331; 2025-06-25T14:47:59.382014Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:48:19.486456Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jykrz0d703pfkvt3qpy69s53", SessionId: ydb://session/3?node_id=1&id=NDYxZDAwNjctNjFlMDg5MmMtNTNiNWE0NzYtNDcwYTFlNGY=, Slow query, duration: 10.102815s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "PRAGMA TablePathPrefix='/Root/test/ds';\n\n-- NB: Subquerys\n$bla1 = (select distinct\n COALESCE(c_last_name,'') as c_last_name,\n COALESCE(c_first_name,'') as c_first_name,\n COALESCE(cast(d_date as date), cast(0 as Date)) as d_date\n from store_sales as store_sales\n cross join date_dim as date_dim\n cross join customer as customer\n where store_sales.ss_sold_date_sk = date_dim.d_date_sk\n and store_sales.ss_customer_sk = customer.c_customer_sk\n and d_month_seq between 1221 and 1221+11);\n\n$bla2 = ((select distinct\n COALESCE(c_last_name,'') as c_last_name,\n COALESCE(c_first_name,'') as c_first_name,\n COALESCE(cast(d_date as date), cast(0 as Date)) as d_date\n from catalog_sales as catalog_sales\n cross join date_dim as date_dim\n cross join customer as customer\n where catalog_sales.cs_sold_date_sk = date_dim.d_date_sk\n and catalog_sales.cs_bill_customer_sk = customer.c_customer_sk\n and d_month_seq between 1221 and 1221+11)\n union all\n (select distinct\n COALESCE(c_last_name,'') as c_last_name,\n COALESCE(c_first_name,'') as c_first_name,\n COALESCE(cast(d_date as date), cast(0 as Date)) as d_date\n from web_sales as web_sales\n cross join date_dim as date_dim\n cross join customer as customer\n where web_sales.ws_sold_date_sk = date_dim.d_date_sk\n and web_sales.ws_bill_customer_sk = customer.c_customer_sk\n and d_month_seq between 1221 and 1221+11));\n\n-- start query 1 in stream 0 using template query87.tpl and seed 1819994127\nselect count(*)\nfrom $bla1 bla1 left only join $bla2 bla2 using (c_last_name, c_first_name, d_date)\n;\n\n-- end query 1 in stream 0 using template query87.tpl", parameters: 0b >> KqpJoinOrder::SortingsByPK-RemoveLimitOperator [GOOD] >> KqpJoinOrder::Sortings4Year-RemoveLimitOperator >> KqpJoinOrder::CanonizedJoinOrderTPCH16 >> KqpJoinOrder::FiveWayJoinWithPredsAndEquiv-ColumnStore >> KqpFlipJoin::RightOnly_2 [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::SortingsByPK-RemoveLimitOperator [GOOD] Test command err: Trying to start YDB, gRPC: 30506, MsgBus: 27435 2025-06-25T14:47:39.317005Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898719959763040:2220];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:47:39.317193Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000dc5/r3tmp/tmps2x9nw/pdisk_1.dat 2025-06-25T14:47:40.138514Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:47:40.138599Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:47:40.157317Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:47:40.228392Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519898719959762858:2080] 1750862859275966 != 1750862859275969 2025-06-25T14:47:40.237233Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 30506, node 1 2025-06-25T14:47:40.321171Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:47:40.456876Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:47:40.456897Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:47:40.456904Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:47:40.456993Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27435 TClient is connected to server localhost:27435 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:47:41.530761Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:47:43.759282Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898737139632699:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:43.759352Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898737139632690:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:43.759599Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:43.762488Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:47:43.772712Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898737139632704:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:47:43.847902Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898737139632755:2337] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:47:44.209960Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:44.286878Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519898719959763040:2220];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:47:44.286981Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:47:44.363030Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:44.411365Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:44.451929Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:44.498029Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:44.738735Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:44.812499Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:44.878176Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:44.926448Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:44.979609Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:45.039010Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:45.072900Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:45.122699Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:46.345218Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/ ... 24982Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038608;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:22.630599Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038602;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:22.631123Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038556;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:22.652509Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038556;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:22.653073Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038606;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:22.658261Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038606;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:22.658919Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038610;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:22.663592Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038610;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:22.664202Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038612;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:22.670359Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038612;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:22.671028Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038614;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:22.675912Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038614;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:22.676768Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038616;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:22.681838Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038616;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:22.682432Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038618;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:22.687246Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038618;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:22.687859Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038572;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:22.693125Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038572;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:22.693711Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038622;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:22.698365Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038622;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:22.698974Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038560;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:22.703678Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038560;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:22.704251Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038594;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:22.709935Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038608;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:22.710422Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038594;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:22.711020Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038596;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:22.711028Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038592;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:22.716745Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038596;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:22.719779Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038592;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:22.720580Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038650;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:22.721494Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038656;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:22.726031Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038650;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:22.726588Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038604;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:22.730510Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038656;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:22.731168Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038652;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:22.731429Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038604;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:22.732013Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038620;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:22.741603Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038652;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:22.744364Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038620;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:22.748366Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038588;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:22.758006Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038588;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:22.960739Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jykryc995xkkhhkb4tz0mj1g", SessionId: ydb://session/3?node_id=1&id=YmQ1YWIxZGUtZDlkZDI1ZDItZjAwNjZiYjEtODdjYjhjZA==, Slow query, duration: 34.182719s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:48:23.247029Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:48:23.247564Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:48:23.248475Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;self_id=[1:7519898861693709275:5299];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038629;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038331;receive=72075186224038170; 2025-06-25T14:48:23.248921Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpFlipJoin::RightOnly_2 [GOOD] Test command err: Trying to start YDB, gRPC: 64576, MsgBus: 20040 2025-06-25T14:48:11.384867Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898858433556218:2134];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:48:11.397954Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000da6/r3tmp/tmpfHgcLw/pdisk_1.dat 2025-06-25T14:48:12.026057Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:48:12.026142Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:48:12.040491Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519898858433556115:2080] 1750862891374797 != 1750862891374800 2025-06-25T14:48:12.050547Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:48:12.052837Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 64576, node 1 2025-06-25T14:48:12.335543Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:48:12.335561Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:48:12.335568Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:48:12.335694Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:48:12.412630Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:20040 TClient is connected to server localhost:20040 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:48:13.506943Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:48:13.532921Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:48:13.551718Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:48:13.778127Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:48:13.976324Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:48:14.098281Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:48:16.305119Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898879908394214:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:16.305212Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:16.411838Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519898858433556218:2134];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:48:16.412093Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:48:16.807666Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:16.856950Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:16.923223Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:16.977393Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:17.023128Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:17.067018Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:17.116722Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:17.206046Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898884203362172:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:17.206157Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:17.206483Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898884203362177:2436], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:17.210331Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:48:17.224482Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898884203362179:2437], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:48:17.309456Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898884203362232:3415] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:48:18.718658Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/ ... onnecting -> Connected TServer::EnableGrpc on GrpcPort 7391, node 2 2025-06-25T14:48:21.548873Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:48:21.548890Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:48:21.548896Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:48:21.548991Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:48:21.937081Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:1150 TClient is connected to server localhost:1150 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:48:22.389465Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:48:22.400907Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:48:22.418169Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:48:22.536590Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:48:22.872515Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:48:22.967246Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:48:25.796428Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898918725174458:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:25.796517Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:25.845633Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:25.864502Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519898897250336360:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:48:25.864549Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:48:25.895732Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:25.961939Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:25.996763Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:26.040728Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:26.121736Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:26.179139Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:26.241983Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898923020142415:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:26.242059Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:26.242328Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898923020142420:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:26.245548Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:48:26.258814Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519898923020142422:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:48:26.329361Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519898923020142473:3417] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:48:27.746243Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:27.795378Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:27.853310Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:27.898495Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) >> KqpJoin::LeftJoinWithNull-StreamLookupJoin [GOOD] >> KqpJoin::PushdownPredicateNoFullScan >> KqpJoinOrder::CanonizedJoinOrderTPCH18 [GOOD] >> KqpJoinOrder::SortingsByPKWithLookupJoin-RemoveLimitOperator >> KqpIndexLookupJoin::LeftJoinOnlyLeftColumn-StreamLookup [GOOD] >> KqpJoin::JoinDupColumnRightPure [GOOD] >> KqpJoinOrder::FourWayJoinWithPredsAndEquivAndLeft+ColumnStore ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpIndexLookupJoin::LeftJoinOnlyLeftColumn-StreamLookup [GOOD] Test command err: Trying to start YDB, gRPC: 14099, MsgBus: 1528 2025-06-25T14:48:14.347533Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898871080148894:2176];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:48:14.352692Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000da4/r3tmp/tmpwrITBi/pdisk_1.dat 2025-06-25T14:48:14.972009Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:48:14.972113Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:48:15.004782Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519898871080148756:2080] 1750862894299305 != 1750862894299308 2025-06-25T14:48:15.031434Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:48:15.031657Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 14099, node 1 2025-06-25T14:48:15.224748Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:48:15.224765Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:48:15.224771Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:48:15.224860Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:48:15.365145Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:1528 TClient is connected to server localhost:1528 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:48:16.212370Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:48:16.241257Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:48:16.264519Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:48:16.463886Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:48:16.673273Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:48:16.806975Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:48:19.025108Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898892554986877:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:19.025196Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:19.320443Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519898871080148894:2176];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:48:19.320495Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:48:19.328492Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:19.394506Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:19.427073Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:19.455329Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:19.530265Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:19.603306Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:19.682076Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:19.742540Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898892554987539:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:19.742611Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:19.742825Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898892554987544:2436], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:19.746787Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:48:19.760996Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898892554987546:2437], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:48:19.857308Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898892554987598:3419] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:48:21.085957Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_wo ... maybe) 2025-06-25T14:48:24.020889Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:48:24.020897Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:48:24.021008Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:48:24.292425Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:23635 TClient is connected to server localhost:23635 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-25T14:48:25.037575Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:48:25.058687Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:48:25.155956Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:48:25.490225Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:48:25.576376Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:28.803962Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898933187547682:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:28.804050Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:28.863341Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:28.938255Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:28.985477Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:29.025400Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:29.074066Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:29.143536Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:29.193897Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:29.273130Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898937482515635:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:29.273266Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:29.273576Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898937482515640:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:29.278119Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:48:29.296225Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519898937482515642:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:48:29.387036Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519898937482515693:3417] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:48:30.897828Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:30.954659Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:31.002306Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:31.053109Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:31.120298Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:31.209146Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710677:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) >> KqpJoinOrder::FourWayJoinWithPredsAndEquivAndLeft-ColumnStore [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/unittest >> TxUsage::Transactions_Conflict_On_SeqNo_Table [GOOD] Test command err: 2025-06-25T14:40:54.533453Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519896979786932782:2185];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:40:54.535928Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0019af/r3tmp/tmp399A9B/pdisk_1.dat 2025-06-25T14:40:54.848711Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-25T14:40:55.052754Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:40:55.054931Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:40:55.054981Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:40:55.058910Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 21116, node 1 2025-06-25T14:40:55.212935Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/yft8/0019af/r3tmp/yandexCKARAO.tmp 2025-06-25T14:40:55.212956Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/yft8/0019af/r3tmp/yandexCKARAO.tmp 2025-06-25T14:40:55.216482Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/yft8/0019af/r3tmp/yandexCKARAO.tmp 2025-06-25T14:40:55.216600Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:40:55.418841Z INFO: TTestServer started on Port 20473 GrpcPort 21116 2025-06-25T14:40:55.541353Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:20473 PQClient connected to localhost:21116 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:40:55.786512Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:40:55.812181Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:40:55.837067Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-25T14:40:55.855411Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:40:56.049269Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710660, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:40:56.064850Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710661, at schemeshard: 72057594046644480 2025-06-25T14:40:58.324534Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896996966802610:2299], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:40:58.324657Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:40:58.332697Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896996966802623:2303], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:40:58.341202Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:40:58.359613Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519896996966802625:2304], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710662 completed, doublechecking } 2025-06-25T14:40:58.452267Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519896996966802689:2443] txid# 281474976710663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:40:59.188982Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519896996966802697:2310], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:40:59.190827Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=NWI3MTdkOC05Mjg3YzNhYy1jMmNiYWQ0Yy04NjcxN2JiYQ==, ActorId: [1:7519896996966802601:2298], ActorState: ExecuteState, TraceId: 01jykrhve934ttd9sr790mbnaa, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:40:59.192897Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-25T14:40:59.213427Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:40:59.240919Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:40:59.340547Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); 2025-06-25T14:40:59.532564Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519896979786932782:2185];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:40:59.532612Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; === CheckClustersList. Subcribe to ClusterTracker from [1:7519897001261770271:2621] === CheckClustersList. Ok 2025-06-25T14:41:05.300569Z :TwoSessionOneConsumer_Table INFO: TTopicSdkTestSetup started 2025-06-25T14:41:05.347061Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:132: new create topic request 2025-06-25T14:41:05.393702Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3114: [PQ: 72075186224037892] Handle TEvInterconnect::TEvNodeInfo 2025-06-25T14:41:05.394588Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3107: [PQ: 72075186224037892] Registered with mediator time cast 2025-06-25T14:41:05.394810Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3146: [PQ: 72075186224037892] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-25T14:41:05.395027Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:752: [PQ: 72075186224037892] doesn't have tx info 2025-06-25T14:41:05.395055Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:764: [PQ: 72075186224037892] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-25T14:41:05.395071Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:985: [PQ: 72075186224037892] no config, start with empty partitions and default config 2025-06-25T14:41:05.395102Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4949: [PQ: 72075186224037892] Txs.size=0, PlannedTxs.size=0 2025-06-25T14:41:05.395140Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72075186224037892] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:41:05.395187Z node 1 :PERSQUEUE INFO: pq ... PartitionWriter 72075186224037898 (partition=4) Received event: NActors::TEvents::TEvPoison 2025-06-25T14:48:19.826906Z node 19 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037898 (partition=4) Received event: NActors::TEvents::TEvPoison 2025-06-25T14:48:19.826934Z node 19 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037898 (partition=4) Received event: NActors::TEvents::TEvPoison 2025-06-25T14:48:19.826968Z node 19 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037898 (partition=4) Received event: NActors::TEvents::TEvPoison 2025-06-25T14:48:19.827171Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037898] server disconnected, pipe [19:7519898820193463670:2430] destroyed 2025-06-25T14:48:19.827199Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037898] server disconnected, pipe [19:7519898820193463639:2430] destroyed 2025-06-25T14:48:19.827221Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037898] server disconnected, pipe [19:7519898820193463331:2430] destroyed 2025-06-25T14:48:19.827246Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037898] server disconnected, pipe [19:7519898820193463609:2430] destroyed 2025-06-25T14:48:19.827265Z node 19 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72075186224037898, Partition: 4, State: StateIdle] TPartition::DropOwner. 2025-06-25T14:48:19.827272Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037898] server disconnected, pipe [19:7519898820193463711:2430] destroyed 2025-06-25T14:48:19.832493Z node 19 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 4450 sessionId: test-message_group_id_3|8f30e90-4f395e34-42c10913-529b9dfa_0 grpc read done: success: 0 data: 2025-06-25T14:48:19.832518Z node 19 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 4450 sessionId: test-message_group_id_3|8f30e90-4f395e34-42c10913-529b9dfa_0 grpc read failed 2025-06-25T14:48:19.832552Z node 19 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 4450 sessionId: test-message_group_id_3|8f30e90-4f395e34-42c10913-529b9dfa_0 grpc closed 2025-06-25T14:48:19.832575Z node 19 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 4450 sessionId: test-message_group_id_3|8f30e90-4f395e34-42c10913-529b9dfa_0 is DEAD 2025-06-25T14:48:19.833554Z node 19 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037904 (partition=3) Received event: NActors::TEvents::TEvPoison 2025-06-25T14:48:19.833606Z node 19 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037904 (partition=3) Received event: NActors::TEvents::TEvPoison 2025-06-25T14:48:19.833644Z node 19 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037904 (partition=3) Received event: NActors::TEvents::TEvPoison 2025-06-25T14:48:19.833680Z node 19 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037904 (partition=3) Received event: NActors::TEvents::TEvPoison 2025-06-25T14:48:19.833712Z node 19 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037904 (partition=3) Received event: NActors::TEvents::TEvPoison 2025-06-25T14:48:19.834340Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037904] server disconnected, pipe [19:7519898798718615324:5062] destroyed 2025-06-25T14:48:19.834372Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037904] server disconnected, pipe [19:7519898798718615256:5062] destroyed 2025-06-25T14:48:19.834400Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037904] server disconnected, pipe [19:7519898798718615137:5062] destroyed 2025-06-25T14:48:19.834427Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037904] server disconnected, pipe [19:7519898798718615212:5062] destroyed 2025-06-25T14:48:19.834458Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037904] server disconnected, pipe [19:7519898798718615353:5062] destroyed 2025-06-25T14:48:19.834499Z node 19 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72075186224037904, Partition: 3, State: StateIdle] TPartition::DropOwner. 2025-06-25T14:48:19.842195Z node 19 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 6 sessionId: test-message_group_id_2|9b247891-54a5099a-f632f409-b2b7671f_0 grpc read done: success: 0 data: 2025-06-25T14:48:19.842218Z node 19 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 6 sessionId: test-message_group_id_2|9b247891-54a5099a-f632f409-b2b7671f_0 grpc read failed 2025-06-25T14:48:19.842253Z node 19 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 6 sessionId: test-message_group_id_2|9b247891-54a5099a-f632f409-b2b7671f_0 grpc closed 2025-06-25T14:48:19.842272Z node 19 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 6 sessionId: test-message_group_id_2|9b247891-54a5099a-f632f409-b2b7671f_0 is DEAD 2025-06-25T14:48:19.843181Z node 19 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037906 (partition=2) Received event: NActors::TEvents::TEvPoison 2025-06-25T14:48:19.843229Z node 19 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037906 (partition=2) Received event: NActors::TEvents::TEvPoison 2025-06-25T14:48:19.843263Z node 19 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037906 (partition=2) Received event: NActors::TEvents::TEvPoison 2025-06-25T14:48:19.843290Z node 19 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037906 (partition=2) Received event: NActors::TEvents::TEvPoison 2025-06-25T14:48:19.843322Z node 19 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037906 (partition=2) Received event: NActors::TEvents::TEvPoison 2025-06-25T14:48:19.843959Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037906] server disconnected, pipe [19:7519898373516664769:2764] destroyed 2025-06-25T14:48:19.843989Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037906] server disconnected, pipe [19:7519898373516664468:2764] destroyed 2025-06-25T14:48:19.844014Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037906] server disconnected, pipe [19:7519898270437412993:2764] destroyed 2025-06-25T14:48:19.844038Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037906] server disconnected, pipe [19:7519898369221696820:2764] destroyed 2025-06-25T14:48:19.844060Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037906] server disconnected, pipe [19:7519898373516665086:2764] destroyed 2025-06-25T14:48:19.844265Z node 19 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72075186224037906, Partition: 2, State: StateIdle] TPartition::DropOwner. 2025-06-25T14:48:19.852477Z node 19 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 5027 sessionId: test-message_group_id_1|7ab9b258-9252c60c-8e5d0c54-c8c55253_0 grpc read done: success: 0 data: 2025-06-25T14:48:19.852501Z node 19 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 5027 sessionId: test-message_group_id_1|7ab9b258-9252c60c-8e5d0c54-c8c55253_0 grpc read failed 2025-06-25T14:48:19.852545Z node 19 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 5027 sessionId: test-message_group_id_1|7ab9b258-9252c60c-8e5d0c54-c8c55253_0 grpc closed 2025-06-25T14:48:19.852568Z node 19 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 5027 sessionId: test-message_group_id_1|7ab9b258-9252c60c-8e5d0c54-c8c55253_0 is DEAD 2025-06-25T14:48:19.853513Z node 19 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037908 (partition=1) Received event: NActors::TEvents::TEvPoison 2025-06-25T14:48:19.853568Z node 19 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037908 (partition=1) Received event: NActors::TEvents::TEvPoison 2025-06-25T14:48:19.853599Z node 19 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037908 (partition=1) Received event: NActors::TEvents::TEvPoison 2025-06-25T14:48:19.853624Z node 19 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037908 (partition=1) Received event: NActors::TEvents::TEvPoison 2025-06-25T14:48:19.853663Z node 19 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037908 (partition=1) Received event: NActors::TEvents::TEvPoison 2025-06-25T14:48:19.854299Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037908] server disconnected, pipe [19:7519898850258248432:4752] destroyed 2025-06-25T14:48:19.854336Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037908] server disconnected, pipe [19:7519898850258248429:4752] destroyed 2025-06-25T14:48:19.854365Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037908] server disconnected, pipe [19:7519898850258248344:4752] destroyed 2025-06-25T14:48:19.854389Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037908] server disconnected, pipe [19:7519898850258248427:4752] destroyed 2025-06-25T14:48:19.854415Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037908] server disconnected, pipe [19:7519898850258248447:4752] destroyed 2025-06-25T14:48:19.854456Z node 19 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72075186224037908, Partition: 1, State: StateIdle] TPartition::DropOwner. 2025-06-25T14:48:19.860488Z node 19 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 15 sessionId: test-message_group_id_0|fef7a53b-7bb3b3d-a50dbb96-5be0095b_0 grpc read done: success: 0 data: 2025-06-25T14:48:19.860513Z node 19 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 15 sessionId: test-message_group_id_0|fef7a53b-7bb3b3d-a50dbb96-5be0095b_0 grpc read failed 2025-06-25T14:48:19.860549Z node 19 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 15 sessionId: test-message_group_id_0|fef7a53b-7bb3b3d-a50dbb96-5be0095b_0 grpc closed 2025-06-25T14:48:19.860571Z node 19 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 15 sessionId: test-message_group_id_0|fef7a53b-7bb3b3d-a50dbb96-5be0095b_0 is DEAD 2025-06-25T14:48:19.861929Z node 19 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037897 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-25T14:48:19.861979Z node 19 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037897 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-25T14:48:19.862011Z node 19 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037897 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-25T14:48:19.862039Z node 19 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037897 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-25T14:48:19.862077Z node 19 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037897 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-25T14:48:19.862523Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037897] server disconnected, pipe [19:7519898373516664772:2794] destroyed 2025-06-25T14:48:19.862556Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037897] server disconnected, pipe [19:7519898369221697020:2794] destroyed 2025-06-25T14:48:19.862582Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037897] server disconnected, pipe [19:7519898270437413105:2794] destroyed 2025-06-25T14:48:19.862604Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037897] server disconnected, pipe [19:7519898369221696805:2794] destroyed 2025-06-25T14:48:19.862645Z node 19 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72075186224037897, Partition: 0, State: StateIdle] TPartition::DropOwner. 2025-06-25T14:48:19.862852Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037897] server disconnected, pipe [19:7519898373516665052:2794] destroyed ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoin::JoinDupColumnRightPure [GOOD] Test command err: Trying to start YDB, gRPC: 5868, MsgBus: 8246 2025-06-25T14:48:14.320759Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898873012799440:2140];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:48:14.321107Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000da3/r3tmp/tmpld7zh6/pdisk_1.dat 2025-06-25T14:48:14.928416Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519898873012799336:2080] 1750862894260807 != 1750862894260810 2025-06-25T14:48:14.951112Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:48:14.951207Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:48:14.963317Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:48:14.966024Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 5868, node 1 2025-06-25T14:48:15.240813Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:48:15.240831Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:48:15.240838Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:48:15.240924Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:48:15.322481Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:8246 TClient is connected to server localhost:8246 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:48:16.174564Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:48:16.190532Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:48:16.199420Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:48:16.418241Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:48:16.713992Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:48:16.813911Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:48:19.053967Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898894487637483:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:19.054069Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:19.312418Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519898873012799440:2140];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:48:19.312470Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:48:19.387637Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:19.420741Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:19.455601Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:19.549326Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:19.657660Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:19.738014Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:19.800802Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:19.901534Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898894487638142:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:19.901598Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:19.901903Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898894487638147:2436], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:19.905535Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:48:19.920038Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898894487638149:2437], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:48:19.980895Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898894487638201:3419] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:48:21.354225Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work ... 4.740716Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519898915492915525:2080] 1750862904284131 != 1750862904284134 2025-06-25T14:48:24.765154Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:48:24.765227Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:48:24.773818Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 14389, node 2 2025-06-25T14:48:24.980917Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:48:24.980937Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:48:24.980945Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:48:24.981072Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:48:25.412991Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:21169 TClient is connected to server localhost:21169 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:48:25.945225Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:48:25.955534Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:48:26.051286Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:48:26.268418Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:48:26.365813Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:48:28.872697Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898932672786366:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:28.872779Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:28.981780Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:29.035629Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:29.090783Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:29.141478Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:29.209787Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:29.320169Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:29.348504Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519898915492915586:2083];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:48:29.350733Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:48:29.391585Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:29.468115Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898936967754322:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:29.468228Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:29.468517Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519898936967754327:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:29.472719Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:48:29.488596Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519898936967754329:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:48:29.562112Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519898936967754381:3423] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:48:31.531226Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:31.593275Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:31.636152Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::CanonizedJoinOrderTPCH18 [GOOD] Test command err: Trying to start YDB, gRPC: 64487, MsgBus: 6574 2025-06-25T14:46:39.694474Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898464327541386:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:46:39.694516Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000e37/r3tmp/tmpwhXHL6/pdisk_1.dat 2025-06-25T14:46:40.425212Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:46:40.425296Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:46:40.453318Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:46:40.536403Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519898464327541366:2080] 1750862799658773 != 1750862799658776 2025-06-25T14:46:40.573619Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 64487, node 1 2025-06-25T14:46:40.799423Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:46:40.799440Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:46:40.799452Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:46:40.799550Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:46:40.816417Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:6574 TClient is connected to server localhost:6574 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:46:41.919603Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:46:41.934177Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:46:44.531212Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898485802378491:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:44.531340Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:44.531728Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898485802378503:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:44.535760Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:46:44.553244Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898485802378505:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:46:44.659702Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898485802378556:2339] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:46:44.700388Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519898464327541386:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:46:44.700498Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:46:45.187462Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T14:46:45.489164Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519898490097346084:2314];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:46:45.489365Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519898490097346084:2314];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:46:45.489643Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519898490097346084:2314];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:46:45.489788Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519898490097346084:2314];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:46:45.489896Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519898490097346084:2314];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:46:45.490017Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519898490097346084:2314];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:46:45.490128Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519898490097346084:2314];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:46:45.490247Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519898490097346084:2314];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:46:45.490368Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519898490097346084:2314];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:46:45.490470Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519898490097346084:2314];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:46:45.490558Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519898490097346084:2314];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:46:45.500208Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519898490097346082:2312];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:46:45.500273Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519898490097346082:2312];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:46:45.500496Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519898490097346082:2312];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:46:45.500602Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519898490097346082:2312];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:46:45.500702Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519898490097346082:2312];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:46:45.500803Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519898490097346082:2312];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:46:45.500903Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519898490097346082:2312];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:46:45.500998Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519898490097346082:2312];tablet_ ... 96400Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039273;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:14.596680Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039321;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:14.596978Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039279;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:14.602278Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039279;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:14.602915Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039209;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:14.606472Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039321;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:14.606953Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039311;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:14.616761Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039209;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:14.617277Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039277;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:14.612305Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039311;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:14.620826Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039319;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:14.627663Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039319;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:14.630188Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039277;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:14.630703Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039241;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:14.632869Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039245;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:14.640474Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039241;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:14.640973Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039223;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:14.646481Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039245;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:14.646994Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039213;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:14.654342Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039223;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:14.654873Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039307;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:14.656871Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039213;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:14.657327Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039211;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:14.664083Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039307;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:14.667123Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039211;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:14.667625Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039347;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:14.669053Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039299;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:14.681814Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039347;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:14.682246Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039299;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:14.682316Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039265;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:14.682646Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039329;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:14.687300Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039329;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:14.691644Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039265;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:14.693174Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039243;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:14.696567Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039257;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:14.701979Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039243;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:14.702601Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039287;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:14.705499Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039257;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:14.711705Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039287;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:14.733335Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039240;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:14.738107Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039240;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:14.840530Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jykrxxpa4h1mtfwn1z4a32vz", SessionId: ydb://session/3?node_id=1&id=ZjlmZjBjZmYtNGZlYTBjZjgtZWVkYWE3NDYtZDE5MWEyYzA=, Slow query, duration: 41.006071s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:48:15.441375Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:48:15.441851Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:48:15.442364Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;self_id=[1:7519898709140713771:7025];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038933;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224039094;receive=72075186224039392; 2025-06-25T14:48:15.442786Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; >> KqpJoin::TwoJoinsWithQueryService ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::FourWayJoinWithPredsAndEquivAndLeft-ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 62954, MsgBus: 22230 2025-06-25T14:47:41.588652Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898728415962589:2234];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:47:41.596396Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000dc3/r3tmp/tmpiY7I7L/pdisk_1.dat 2025-06-25T14:47:42.262989Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:47:42.263060Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:47:42.271099Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:47:42.316406Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519898728415962367:2080] 1750862861498843 != 1750862861498846 2025-06-25T14:47:42.330887Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 62954, node 1 2025-06-25T14:47:42.561221Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:47:42.561239Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:47:42.561246Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:47:42.561336Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:47:42.580431Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:22230 TClient is connected to server localhost:22230 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:47:43.764203Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:47:43.777341Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:47:46.034220Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898749890799499:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:46.034343Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:46.034656Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898749890799511:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:46.038324Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:47:46.068498Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898749890799513:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:47:46.176155Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898749890799564:2338] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:47:46.579626Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519898728415962589:2234];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:47:46.579763Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:47:46.583193Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:46.713515Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:46.779156Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:46.818700Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:46.878330Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:47.115242Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:47.160218Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:47.214361Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:47.263663Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:47.300878Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:47.333550Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:47.397266Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:47.428783Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:48.418215Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, subope ... 13914Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038559;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:26.623057Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038559;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:26.623721Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038611;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:26.625826Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038583;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:26.626302Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038473;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:26.635484Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038611;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:26.636006Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038463;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:26.641104Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038473;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:26.641614Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038513;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:26.646850Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038513;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:26.647490Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038574;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:26.649098Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038463;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:26.649544Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038570;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:26.658415Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038574;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:26.658945Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038505;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:26.664019Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038505;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:26.664872Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038570;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:26.665333Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038627;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:26.670293Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038627;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:26.670930Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:26.673026Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038499;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:26.679838Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:26.681474Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038499;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:26.683160Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038608;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:26.688827Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038571;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:26.697580Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038608;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:26.698071Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038621;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:26.700075Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038571;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:26.700685Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038591;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:26.709691Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038591;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:26.711583Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038594;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:26.713059Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038621;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:26.713944Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038661;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:26.723621Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038661;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:26.724144Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038572;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:26.720302Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038594;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:26.729724Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038617;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:26.734742Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038617;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:26.737491Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038572;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:26.819545Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038635;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:26.826615Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038635;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:26.988515Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jykryec191fz8fvjj7kffgpx", SessionId: ydb://session/3?node_id=1&id=NWYzYzA3OTItMmFlZDJjMWItNDVlNzY2ZC1kMjY2Y2ZkMA==, Slow query, duration: 36.074884s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:48:27.494932Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:48:27.495502Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:48:27.496104Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;self_id=[1:7519898779955576992:2840];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038170;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038629;receive=72075186224038331; 2025-06-25T14:48:27.496558Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; >> KqpJoinOrder::SortingsByPrefixWithAttrEquiToPK-RemoveLimitOperator >> KqpFlipJoin::Right_1 >> KqpIndexLookupJoin::JoinByComplexKeyWithNullComponents+StreamLookupJoin >> KqpJoinOrder::FiveWayJoinWithPreds-ColumnStore >> KqpJoinOrder::TPCDS92-ColumnStore [GOOD] >> KqpJoin::PushdownPredicateNoFullScan [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoin::PushdownPredicateNoFullScan [GOOD] Test command err: Trying to start YDB, gRPC: 21348, MsgBus: 21456 2025-06-25T14:48:22.184740Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898907107702433:2237];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:48:22.184771Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d9a/r3tmp/tmpfhxPkw/pdisk_1.dat 2025-06-25T14:48:22.882075Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:48:22.882168Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:48:22.894368Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:48:22.918326Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:48:22.920444Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519898907107702220:2080] 1750862902141506 != 1750862902141509 TServer::EnableGrpc on GrpcPort 21348, node 1 2025-06-25T14:48:23.130441Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:48:23.150013Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:48:23.150031Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:48:23.150038Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:48:23.150152Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:21456 TClient is connected to server localhost:21456 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:48:24.221604Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:48:24.252014Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:48:24.445752Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:48:24.666230Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:48:24.770822Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:48:26.743089Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898924287573035:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:26.743210Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:27.100047Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:27.156733Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:27.188993Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519898907107702433:2237];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:48:27.189060Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:48:27.229599Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:27.299138Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:27.372167Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:27.458746Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:27.500804Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:27.591601Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898928582540994:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:27.591681Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:27.591941Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898928582540999:2436], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:27.599059Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:48:27.616813Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898928582541001:2437], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:48:27.721081Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898928582541053:3421] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:48:29.158685Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:29.233711Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation pa ... askId\":2,\"OutputBytes\":4}],\"PeakMemoryUsageBytes\":65536,\"CpuTimeUs\":7324}],\"UseLlvm\":\"undefined\",\"OutputRows\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"PhysicalStageId\":1,\"FinishedTasks\":1,\"InputBytes\":{\"Count\":1,\"Sum\":4,\"Max\":4,\"Min\":4},\"MaxMemoryUsage\":{\"Count\":1,\"Sum\":1048576,\"Max\":1048576,\"Min\":1048576,\"History\":[8,1048576]},\"BaseTimeMs\":1750862920590,\"Output\":[{\"Pop\":{\"Chunks\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"Rows\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"LastMessageMs\":{\"Count\":1,\"Sum\":7,\"Max\":7,\"Min\":7},\"FirstMessageMs\":{\"Count\":1,\"Sum\":7,\"Max\":7,\"Min\":7},\"Bytes\":{\"Count\":1,\"Sum\":4,\"Max\":4,\"Min\":4,\"History\":[8,4]}},\"Name\":\"RESULT\",\"Push\":{\"LastMessageMs\":{\"Count\":1,\"Sum\":7,\"Max\":7,\"Min\":7},\"Rows\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"Chunks\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"ResumeMessageMs\":{\"Count\":1,\"Sum\":7,\"Max\":7,\"Min\":7},\"FirstMessageMs\":{\"Count\":1,\"Sum\":7,\"Max\":7,\"Min\":7},\"PauseMessageMs\":{\"Count\":1,\"Sum\":6,\"Max\":6,\"Min\":6},\"WaitTimeUs\":{\"Count\":1,\"Sum\":145,\"Max\":145,\"Min\":145,\"History\":[8,145]},\"WaitPeriods\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"WaitMessageMs\":{\"Count\":1,\"Max\":7,\"Min\":6}}}],\"CpuTimeUs\":{\"Count\":1,\"Sum\":490,\"Max\":490,\"Min\":490,\"History\":[8,490]},\"StageDurationUs\":0,\"ResultRows\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"ResultBytes\":{\"Count\":1,\"Sum\":4,\"Max\":4,\"Min\":4},\"OutputBytes\":{\"Count\":1,\"Sum\":4,\"Max\":4,\"Min\":4},\"Input\":[{\"Pop\":{\"Chunks\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"Rows\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"LastMessageMs\":{\"Count\":1,\"Sum\":7,\"Max\":7,\"Min\":7},\"FirstMessageMs\":{\"Count\":1,\"Sum\":7,\"Max\":7,\"Min\":7},\"Bytes\":{\"Count\":1,\"Sum\":4,\"Max\":4,\"Min\":4,\"History\":[8,4]}},\"Name\":\"2\",\"Push\":{\"LastMessageMs\":{\"Count\":1,\"Sum\":7,\"Max\":7,\"Min\":7},\"Rows\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"Chunks\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"ResumeMessageMs\":{\"Count\":1,\"Sum\":7,\"Max\":7,\"Min\":7},\"FirstMessageMs\":{\"Count\":1,\"Sum\":7,\"Max\":7,\"Min\":7},\"Bytes\":{\"Count\":1,\"Sum\":4,\"Max\":4,\"Min\":4,\"History\":[8,4]},\"PauseMessageMs\":{\"Count\":1,\"Sum\":6,\"Max\":6,\"Min\":6},\"WaitTimeUs\":{\"Count\":1,\"Sum\":108,\"Max\":108,\"Min\":108,\"History\":[8,108]},\"WaitPeriods\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"WaitMessageMs\":{\"Count\":1,\"Max\":7,\"Min\":6}}}],\"UpdateTimeMs\":7,\"InputRows\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"Tasks\":1}}],\"Node Type\":\"Precompute_1\",\"Parent Relationship\":\"InitPlan\",\"PlanNodeType\":\"Materialize\"}],\"Node Type\":\"Query\",\"Stats\":{\"Compilation\":{\"FromCache\":false,\"DurationUs\":809489,\"CpuTimeUs\":803836},\"ProcessCpuTimeUs\":2341,\"TotalDurationUs\":876843,\"ResourcePoolId\":\"default\",\"QueuedTimeUs\":0},\"PlanNodeType\":\"Query\"},\"meta\":{\"version\":\"0.2\",\"type\":\"query\"},\"SimplifiedPlan\":{\"PlanNodeId\":0,\"Plans\":[{\"PlanNodeId\":1,\"Plans\":[{\"PlanNodeId\":2,\"Plans\":[{\"PlanNodeId\":4,\"Plans\":[{\"PlanNodeId\":5,\"Plans\":[{\"PlanNodeId\":6,\"Plans\":[{\"PlanNodeId\":7,\"Operators\":[{\"E-Rows\":\"1\",\"Columns\":[\"id\",\"value\"],\"E-Size\":\"0\",\"E-Cost\":\"0\",\"Name\":\"TableLookup\",\"Table\":\"TableRight\",\"LookupKeyColumns\":[\"id\"]}],\"Node Type\":\"TableLookup\",\"PlanNodeType\":\"Connection\"}],\"Operators\":[{\"E-Rows\":\"1\",\"Predicate\":\"Exist(item.id)\",\"Name\":\"Filter\",\"E-Size\":\"0\",\"E-Cost\":\"0\"}],\"Node Type\":\"Filter\"},{\"PlanNodeId\":12,\"Plans\":[{\"PlanNodeId\":13,\"Operators\":[{\"Scan\":\"Parallel\",\"ReadRange\":[\"hash_key (9488119898155926451)\"],\"E-Size\":\"0\",\"Name\":\"TablePointLookup\",\"Path\":\"\\/Root\\/TableLeft\",\"E-Rows\":\"1\",\"Table\":\"TableLeft\",\"ReadColumns\":[\"ref_id\"],\"E-Cost\":\"0\"}],\"Node Type\":\"TablePointLookup\"}],\"Operators\":[{\"E-Size\":\"0\",\"A-SelfCpu\":0.493,\"Name\":\"Filter\",\"Predicate\":\"Exist(item.ref_id)\",\"A-Rows\":1,\"E-Rows\":\"1\",\"A-Cpu\":0.493,\"E-Cost\":\"0\",\"A-Size\":4}],\"Node Type\":\"Filter\"}],\"Operators\":[{\"E-Rows\":\"0.2\",\"Condition\":\"r.id = l.ref_id\",\"Name\":\"InnerJoin (MapJoin)\",\"E-Size\":\"0\",\"E-Cost\":\"4.5\"}],\"Node Type\":\"InnerJoin (MapJoin)\"}],\"Operators\":[{\"A-Rows\":1,\"A-SelfCpu\":1.076,\"A-Cpu\":1.569,\"A-Size\":7,\"Name\":\"Limit\",\"Limit\":\"1001\"}],\"Node Type\":\"Limit\"}],\"Operators\":[{\"A-Rows\":1,\"A-SelfCpu\":0.456,\"A-Cpu\":2.025,\"A-Size\":7,\"Name\":\"Limit\",\"Limit\":\"1001\"}],\"Node Type\":\"Limit\"}],\"Node Type\":\"ResultSet_2\",\"PlanNodeType\":\"ResultSet\"}],\"Node Type\":\"Query\",\"PlanNodeType\":\"Query\"}}" query_ast: "(\n(declare %kqp%tx_result_binding_0_0 (TupleType (DataType \'Uint64) (DataType \'Uint64)))\n(declare %kqp%tx_result_binding_1_0 (ListType (StructType \'(\'\"ref_id\" (OptionalType (DataType \'Uint64))))))\n(let $1 (DataType \'Uint64))\n(let $2 (OptionalType $1))\n(let $3 \'(\'\"_partition_mode\" \'\"single\"))\n(let $4 \'(\'(\'\"_logical_id\" \'1146) \'(\'\"_id\" \'\"9166bbc8-32838d28-d34197be-d29eb64a\") $3))\n(let $5 (DqPhyStage \'() (lambda \'() (block \'(\n (let $40 \'((DataType \'String) \'\"\" \'1))\n (let $41 (CallableType \'(\'1) \'($1) $40 \'($2 \'\"Init\")))\n (let $42 (Udf \'\"Digest.MurMurHash\" (Void) (VoidType) \'\"\" $41 (VoidType) \'\"\" \'(\'(\'\"strict\"))))\n (let $43 (Apply $42 (ToString (Utf8 \'\"target\"))))\n (return (ToStream (Just \'($43 $43))))\n))) $4))\n(let $6 (DqCnValue (TDqOutput $5 \'0)))\n(let $7 (KqpPhysicalTx \'($5) \'($6) \'() \'(\'(\'\"type\" \'\"compute\"))))\n(let $8 (KqpTable \'\"/Root/TableLeft\" \'\"72057594046644480:18\" \'\"\" \'1))\n(let $9 \'(\'\"ref_id\"))\n(let $10 \'\"%kqp%tx_result_binding_0_0\")\n(let $11 (TupleType $1 $1))\n(let $12 (KqlKeyInc (Nth %kqp%tx_result_binding_0_0 \'1)))\n(let $13 (KqpRowsSourceSettings $8 $9 \'() \'($12 $12)))\n(let $14 (DqPhyStage \'((DqSource (DataSource \'\"KqpReadRangesSource\") $13)) (lambda \'($44) (FromFlow (OrderedFilter (ToFlow $44) (lambda \'($45) (Exists (Member $45 \'\"ref_id\")))))) \'(\'(\'\"_logical_id\" \'1228) \'(\'\"_id\" \'\"669daff0-a6b8f493-6b23d511-1c388a6\"))))\n(let $15 (DqCnUnionAll (TDqOutput $14 \'0)))\n(let $16 (DqPhyStage \'($15) (lambda \'($46) $46) \'(\'(\'\"_logical_id\" \'1744) \'(\'\"_id\" \'\"4332d4b1-6d250dbc-b3f3e565-477cfc74\"))))\n(let $17 (DqCnResult (TDqOutput $16 \'0) \'()))\n(let $18 (KqpTxResultBinding $11 \'0 \'0))\n(let $19 \'(\'(\'\"type\" \'\"data\")))\n(let $20 (KqpPhysicalTx \'($14 $16) \'($17) \'(\'($10 $18)) $19))\n(let $21 \'\"%kqp%tx_result_binding_1_0\")\n(let $22 (StructType \'(\'\"ref_id\" $2)))\n(let $23 (ListType $22))\n(let $24 %kqp%tx_result_binding_1_0)\n(let $25 \'(\'(\'\"_logical_id\" \'1270) \'(\'\"_id\" \'\"50fa9fa5-e94d98a0-ed51a8bf-1fd752ed\") $3))\n(let $26 (DqPhyStage \'() (lambda \'() (Iterator (PartitionByKey $24 (lambda \'($47) (Member $47 \'\"ref_id\")) (Void) (Void) (lambda \'($48) (Map (Filter (FlatMap $48 (lambda \'($49) (Take (Nth $49 \'1) (Uint64 \'1)))) (lambda \'($50) (Exists (Member $50 \'\"ref_id\")))) (lambda \'($51) (AsStruct \'(\'\"id\" (Member $51 \'\"ref_id\"))))))))) $25))\n(let $27 (KqpTable \'\"/Root/TableRight\" \'\"72057594046644480:17\" \'\"\" \'1))\n(let $28 (KqpCnStreamLookup (TDqOutput $26 \'0) $27 \'(\'\"id\" \'\"value\") (ListType (StructType \'(\'\"id\" $2))) \'(\'(\'\"Strategy\" \'\"LookupRows\"))))\n(let $29 (Uint64 \'\"1001\"))\n(let $30 (StructType \'(\'\"r.value\" (OptionalType (DataType \'Utf8)))))\n(let $31 \'(\'(\'\"_logical_id\" \'1536) \'(\'\"_id\" \'\"71ef9228-bd4b8df0-401cbceb-c58194f1\") \'(\'\"_wide_channels\" $30)))\n(let $32 (DqPhyStage \'($28) (lambda \'($52) (block \'(\n (let $53 \'(\'Many \'Hashed \'Compact))\n (let $54 (SqueezeToDict (FlatMap (ToFlow $24) (lambda \'($55) (block \'(\n (let $56 (Member $55 \'\"ref_id\"))\n (let $57 (Nothing (OptionalType (TupleType $1 $22))))\n (let $58 (IfPresent $56 (lambda \'($59) (Just \'($59 $55))) $57))\n (return (If (Exists $56) $58 $57))\n )))) (lambda \'($60) (Nth $60 \'0)) (lambda \'($61) (Nth $61 \'1)) $53))\n (return (FromFlow (ExpandMap (Take (FlatMap $54 (lambda \'($62) (MapJoinCore (OrderedFilter (ToFlow $52) (lambda \'($63) (Exists (Member $63 \'\"id\")))) $62 \'\"Inner\" \'(\'\"id\") $9 \'(\'\"value\" \'\"r.value\") \'() \'(\'\"r.id\") \'(\'\"l.ref_id\")))) $29) (lambda \'($64) (Member $64 \'\"r.value\")))))\n))) $31))\n(let $33 (DqCnUnionAll (TDqOutput $32 \'0)))\n(let $34 (DqPhyStage \'($33) (lambda \'($65) (FromFlow (NarrowMap (Take (ToFlow $65) $29) (lambda \'($66) (AsStruct \'(\'\"r.value\" $66)))))) \'(\'(\'\"_logical_id\" \'1549) \'(\'\"_id\" \'\"29d6250c-21c6ecc3-a447757-dd7c232a\"))))\n(let $35 \'($26 $32 $34))\n(let $36 (DqCnResult (TDqOutput $34 \'0) \'(\'\"r.value\")))\n(let $37 (KqpTxResultBinding $23 \'1 \'0))\n(let $38 (KqpPhysicalTx $35 \'($36) \'(\'($21 $37)) $19))\n(let $39 \'($7 $20 $38))\n(return (KqpPhysicalQuery $39 \'((KqpTxResultBinding (ListType $30) \'\"2\" \'0)) \'(\'(\'\"type\" \'\"data_query\"))))\n)\n" total_duration_us: 876843 total_cpu_time_us: 825384 query_meta: "{\"query_database\":\"/Root\",\"query_parameter_types\":{},\"table_metadata\":[\"{\\\"DoesExist\\\":true,\\\"Cluster\\\":\\\"db\\\",\\\"Name\\\":\\\"/Root/TableLeft\\\",\\\"SysView\\\":\\\"\\\",\\\"PathId\\\":{\\\"OwnerId\\\":72057594046644480,\\\"TableId\\\":18},\\\"SchemaVersion\\\":1,\\\"Kind\\\":1,\\\"Columns\\\":[{\\\"Name\\\":\\\"data\\\",\\\"Id\\\":3,\\\"Type\\\":\\\"Utf8\\\",\\\"TypeId\\\":4608,\\\"NotNull\\\":false,\\\"DefaultFromSequence\\\":\\\"\\\",\\\"DefaultKind\\\":0,\\\"DefaultFromLiteral\\\":{},\\\"IsBuildInProgress\\\":false,\\\"DefaultFromSequencePathId\\\":{\\\"OwnerId\\\":18446744073709551615,\\\"TableId\\\":18446744073709551615}},{\\\"Name\\\":\\\"hash_key\\\",\\\"Id\\\":1,\\\"Type\\\":\\\"Uint64\\\",\\\"TypeId\\\":4,\\\"NotNull\\\":false,\\\"DefaultFromSequence\\\":\\\"\\\",\\\"DefaultKind\\\":0,\\\"DefaultFromLiteral\\\":{},\\\"IsBuildInProgress\\\":false,\\\"DefaultFromSequencePathId\\\":{\\\"OwnerId\\\":18446744073709551615,\\\"TableId\\\":18446744073709551615}},{\\\"Name\\\":\\\"ref_id\\\",\\\"Id\\\":2,\\\"Type\\\":\\\"Uint64\\\",\\\"TypeId\\\":4,\\\"NotNull\\\":false,\\\"DefaultFromSequence\\\":\\\"\\\",\\\"DefaultKind\\\":0,\\\"DefaultFromLiteral\\\":{},\\\"IsBuildInProgress\\\":false,\\\"DefaultFromSequencePathId\\\":{\\\"OwnerId\\\":18446744073709551615,\\\"TableId\\\":18446744073709551615}}],\\\"KeyColunmNames\\\":[\\\"hash_key\\\"],\\\"RecordsCount\\\":0,\\\"DataSize\\\":0,\\\"StatsLoaded\\\":false}\",\"{\\\"DoesExist\\\":true,\\\"Cluster\\\":\\\"db\\\",\\\"Name\\\":\\\"/Root/TableRight\\\",\\\"SysView\\\":\\\"\\\",\\\"PathId\\\":{\\\"OwnerId\\\":72057594046644480,\\\"TableId\\\":17},\\\"SchemaVersion\\\":1,\\\"Kind\\\":1,\\\"Columns\\\":[{\\\"Name\\\":\\\"id\\\",\\\"Id\\\":1,\\\"Type\\\":\\\"Uint64\\\",\\\"TypeId\\\":4,\\\"NotNull\\\":false,\\\"DefaultFromSequence\\\":\\\"\\\",\\\"DefaultKind\\\":0,\\\"DefaultFromLiteral\\\":{},\\\"IsBuildInProgress\\\":false,\\\"DefaultFromSequencePathId\\\":{\\\"OwnerId\\\":18446744073709551615,\\\"TableId\\\":18446744073709551615}},{\\\"Name\\\":\\\"value\\\",\\\"Id\\\":2,\\\"Type\\\":\\\"Utf8\\\",\\\"TypeId\\\":4608,\\\"NotNull\\\":false,\\\"DefaultFromSequence\\\":\\\"\\\",\\\"DefaultKind\\\":0,\\\"DefaultFromLiteral\\\":{},\\\"IsBuildInProgress\\\":false,\\\"DefaultFromSequencePathId\\\":{\\\"OwnerId\\\":18446744073709551615,\\\"TableId\\\":18446744073709551615}}],\\\"KeyColunmNames\\\":[\\\"id\\\"],\\\"RecordsCount\\\":0,\\\"DataSize\\\":0,\\\"StatsLoaded\\\":false}\"],\"table_meta_serialization_type\":2,\"created_at\":\"1750862920\",\"query_type\":\"QUERY_TYPE_SQL_DML\",\"query_syntax\":\"1\",\"query_cluster\":\"db\",\"query_id\":\"33115150-5a2624c4-513b7a97-dd561779\",\"version\":\"1.0\"}" ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::TPCDS92-ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 9629, MsgBus: 12848 2025-06-25T14:47:34.748977Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898701522981102:2078];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:47:34.754172Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000dcb/r3tmp/tmp82ZwSX/pdisk_1.dat 2025-06-25T14:47:35.577077Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:47:35.577177Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:47:35.593669Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:47:35.692900Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:47:35.700846Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519898701522981058:2080] 1750862854667853 != 1750862854667856 TServer::EnableGrpc on GrpcPort 9629, node 1 2025-06-25T14:47:35.756632Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:47:35.985159Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:47:35.985180Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:47:35.985193Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:47:35.985302Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:12848 TClient is connected to server localhost:12848 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:47:37.034270Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:47:37.056852Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:47:39.201280Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898722997818184:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:39.201408Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:39.204473Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898722997818196:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:39.208886Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:47:39.224837Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898722997818198:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:47:39.296573Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898722997818250:2341] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:47:39.749381Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519898701522981102:2078];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:47:39.749493Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:47:39.827727Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:39.970860Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:40.038290Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:40.093684Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:40.170615Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:40.406075Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:40.460808Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:40.507820Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:40.593185Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:40.644516Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:40.705200Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:40.795460Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:40.858157Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:41.760423Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, subopera ... 49314Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038571;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:21.254312Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038571;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:48:21.254846Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038603;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:21.260076Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038545;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:48:21.260493Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038614;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:21.266050Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038603;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:48:21.266432Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038579;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:21.269424Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038614;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:48:21.269977Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038607;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:21.275371Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038579;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:48:21.275936Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038589;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:21.278660Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038607;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:48:21.279188Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038567;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:21.284757Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038589;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:48:21.284905Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038567;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:48:21.285366Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038640;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:21.285397Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038593;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:21.290228Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038640;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:48:21.290601Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038593;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:48:21.290835Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038581;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:21.291132Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038658;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:21.296488Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038658;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:48:21.297066Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038632;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:21.298630Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038581;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:48:21.299151Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038493;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:21.301908Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038632;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:48:21.302532Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038573;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:21.305449Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038493;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:48:21.305961Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038489;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:21.306784Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038573;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:48:21.307293Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038621;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:21.310387Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038489;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:48:21.310974Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038617;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:21.314257Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038621;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:48:21.314789Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038583;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:21.316149Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038617;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:48:21.316652Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038547;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:21.318812Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038583;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:48:21.319415Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038587;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:21.321838Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038547;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:48:21.323426Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038587;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:48:21.477061Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jykry87w2ak3z2nbwmkh4tbj", SessionId: ydb://session/3?node_id=1&id=YWFhNWU0NWYtOWUzY2Y2MzUtYTZhNGY3MmMtMjMzZGZjZmE=, Slow query, duration: 36.839511s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:48:22.069261Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; 2025-06-25T14:48:22.069792Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; 2025-06-25T14:48:22.070354Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;self_id=[1:7519898860436797108:5355];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038629;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038331;receive=72075186224038170; 2025-06-25T14:48:22.070774Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; >> KqpJoin::TwoJoinsWithQueryService [GOOD] >> KqpJoinOrder::TPCDS88-ColumnStore >> KqpJoinOrder::FiveWayJoinWithComplexPreds+ColumnStore [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoin::TwoJoinsWithQueryService [GOOD] Test command err: Trying to start YDB, gRPC: 22809, MsgBus: 25246 2025-06-25T14:48:37.729752Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898969508183886:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:48:37.729798Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d8c/r3tmp/tmpqf4OZJ/pdisk_1.dat 2025-06-25T14:48:38.440413Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519898969508183866:2080] 1750862917728518 != 1750862917728521 2025-06-25T14:48:38.477236Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:48:38.477352Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:48:38.487206Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:48:38.488726Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 22809, node 1 2025-06-25T14:48:38.716793Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:48:38.716815Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:48:38.716822Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:48:38.716915Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:48:38.814698Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:25246 TClient is connected to server localhost:25246 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:48:39.967150Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:48:39.988929Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:48:42.649849Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898990983020998:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:42.649958Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:42.732440Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519898969508183886:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:48:42.732499Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:48:43.012084Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:43.208540Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898995277988404:2306], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:43.208608Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:43.232220Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:43.332463Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898995277988483:2318], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:43.332556Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:43.351486Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:43.418776Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898995277988560:2329], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:43.418856Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:43.419227Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898995277988566:2332], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:43.422963Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710661:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:48:43.452816Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898995277988568:2333], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710661 completed, doublechecking } 2025-06-25T14:48:43.548590Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898995277988620:2496] txid# 281474976710662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } >> KqpJoinOrder::CanonizedJoinOrderTPCH9 >> KqpFlipJoin::Right_1 [GOOD] >> KqpFlipJoin::Right_2 >> KqpIndexLookupJoin::JoinByComplexKeyWithNullComponents+StreamLookupJoin [GOOD] >> KqpIndexLookupJoin::JoinByComplexKeyWithNullComponents-StreamLookupJoin >> KqpJoinOrder::SortingsByPrefixWithConstant+RemoveLimitOperator ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::FiveWayJoinWithComplexPreds+ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 31350, MsgBus: 62910 2025-06-25T14:46:58.073870Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898547014254899:2141];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:46:58.074717Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000e1d/r3tmp/tmp9FMzy7/pdisk_1.dat 2025-06-25T14:46:58.516057Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:46:58.539224Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:46:58.539303Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:46:58.541518Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 31350, node 1 2025-06-25T14:46:58.800471Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:46:58.800503Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:46:58.800509Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:46:58.800602Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:46:59.081117Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:62910 TClient is connected to server localhost:62910 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:46:59.743183Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:46:59.777727Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:47:02.054430Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898564194124615:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:02.054556Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:02.054869Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898564194124627:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:02.059885Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:47:02.080112Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898564194124629:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:47:02.163291Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898564194124682:2338] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:47:02.561509Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T14:47:02.961414Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519898564194124926:2313];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:47:02.961625Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519898564194124926:2313];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:47:02.961862Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519898564194124926:2313];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:47:02.961986Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519898564194124926:2313];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:47:02.962100Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519898564194124926:2313];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:47:02.962201Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519898564194124926:2313];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:47:02.962287Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519898564194124926:2313];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:47:02.962381Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519898564194124926:2313];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:47:02.962479Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519898564194124926:2313];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:47:02.962585Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519898564194124926:2313];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:47:02.962669Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519898564194124926:2313];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:47:02.984155Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519898564194124924:2311];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:47:02.984210Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519898564194124924:2311];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:47:02.984551Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519898564194124924:2311];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:47:02.984662Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519898564194124924:2311];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:47:02.984750Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519898564194124924:2311];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:47:02.984849Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519898564194124924:2311];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:47:02.984943Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519898564194124924:2311];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:47:02.985042Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519898564194124924:2311];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:47:02.985141Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519898564194124924:2311];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:47:02.985244Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519898564194124924:2311];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline= ... 70066Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039369;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:28.172962Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039375;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:28.173497Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039339;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:28.179124Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039369;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:28.179670Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039351;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:28.182417Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039339;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:28.182933Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039341;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:28.190692Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039351;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:28.191116Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039353;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:28.195565Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039353;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:28.197864Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039341;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:28.198317Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039245;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:28.203248Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039245;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:28.203801Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039317;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:28.204953Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039233;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:28.212292Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039233;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:28.212880Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039227;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:28.218408Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039317;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:28.218864Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039217;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:28.223333Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039217;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:28.223860Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039422;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:28.227527Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039227;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:28.228030Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039347;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:28.233654Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039422;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:28.234196Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039363;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:28.239844Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039347;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:28.240272Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039294;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:28.242960Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039363;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:28.243512Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039333;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:28.252151Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039333;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:28.252714Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039249;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:28.258040Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039294;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:28.258492Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039373;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:28.263089Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039373;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:28.263754Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039379;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:28.265478Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039249;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:28.266000Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039404;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:28.271447Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039379;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:28.272077Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039418;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:28.276747Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039418;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:28.279020Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039404;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:28.640543Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jykryd4g3c30yzb6pay0jmhc", SessionId: ydb://session/3?node_id=1&id=NDQ0NTFmYmEtZWE3YWNiODYtZTQxNTk1ZWQtODA5ODMzZTk=, Slow query, duration: 38.991546s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:48:29.024022Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:48:29.024812Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:48:29.025111Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;self_id=[1:7519898774647557563:6891];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038933;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224039094;receive=72075186224039392; 2025-06-25T14:48:29.025535Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; >> KqpJoinOrder::FiveWayJoinWithPredsAndEquiv+ColumnStore >> KqpJoinOrder::FiveWayJoinWithComplexPreds2-ColumnStore [GOOD] >> KqpJoinOrder::TPCDS90-ColumnStore [GOOD] >> KqpJoinOrder::CanonizedJoinOrderTPCH6 [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::FiveWayJoinWithComplexPreds2-ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 32763, MsgBus: 15271 2025-06-25T14:48:00.857099Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898812833110810:2220];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:48:00.857423Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000db3/r3tmp/tmpOESKw8/pdisk_1.dat 2025-06-25T14:48:01.533277Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:48:01.533395Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:48:01.544834Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:48:01.602059Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:48:01.605908Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519898812833110628:2080] 1750862880805769 != 1750862880805772 TServer::EnableGrpc on GrpcPort 32763, node 1 2025-06-25T14:48:01.844412Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:48:01.868571Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:48:01.868589Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:48:01.868602Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:48:01.868697Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:15271 TClient is connected to server localhost:15271 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:48:02.751012Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:48:02.783552Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:48:05.431779Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898834307947754:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:05.431906Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:05.432212Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898834307947766:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:05.436369Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:48:05.450804Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898834307947768:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:48:05.528436Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898834307947819:2340] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:48:05.844721Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519898812833110810:2220];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:48:05.844827Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:48:06.059822Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:06.180553Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:06.218625Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:06.290860Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:06.336275Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:06.502378Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:06.539657Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:06.575587Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:06.610754Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:06.701551Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:06.753072Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:06.833865Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:06.885681Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:07.602443Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, subope ... 41920Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038595;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:44.042333Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038644;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:44.042398Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038577;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:44.047182Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038644;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:44.047677Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038519;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:44.048419Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038577;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:44.048973Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038593;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:44.051224Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038519;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:44.051772Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038609;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:44.054482Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038593;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:44.057019Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038609;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:44.057518Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038615;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:44.062349Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038615;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:44.083432Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038638;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:44.088658Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038638;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:44.089129Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038636;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:44.093878Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038636;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:44.095049Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038661;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:44.100171Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038661;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:44.100916Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038653;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:44.106177Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038660;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:44.107091Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038653;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:44.108182Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038641;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:44.111140Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038660;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:44.111790Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038628;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:44.120730Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038641;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:44.121378Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038628;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:44.121385Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038625;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:44.122025Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038630;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:44.129664Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038625;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:44.131348Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038630;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:44.131919Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038627;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:44.136608Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038627;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:44.137171Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038632;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:44.137370Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038634;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:44.141559Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038632;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:44.141961Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038634;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:44.142252Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038631;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:44.142575Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038637;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:44.146597Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038631;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:44.148022Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038637;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:44.236575Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jykrz11dakxbqw4m5ztq35jt", SessionId: ydb://session/3?node_id=1&id=M2IxODg4ZGEtZjczNDVmYzItYjJkOTVmNmQtODRiZTNjY2Q=, Slow query, duration: 34.207012s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:48:44.506113Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:48:44.506637Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:48:44.506916Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;self_id=[1:7519898928797247260:4608];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038331;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038170;receive=72075186224038629; 2025-06-25T14:48:44.507258Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::TPCDS90-ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 4720, MsgBus: 20734 2025-06-25T14:47:51.933620Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898774752415127:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:47:51.933668Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000dbc/r3tmp/tmpudy29w/pdisk_1.dat 2025-06-25T14:47:52.591763Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519898774752415091:2080] 1750862871914538 != 1750862871914541 2025-06-25T14:47:52.592448Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:47:52.629496Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:47:52.629590Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:47:52.631050Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 4720, node 1 2025-06-25T14:47:52.786382Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:47:52.786401Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:47:52.786407Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:47:52.786504Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:47:52.968239Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:20734 TClient is connected to server localhost:20734 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:47:53.867123Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:47:53.937405Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:47:56.082352Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898796227252222:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:56.082476Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:56.082831Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898796227252234:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:56.086646Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:47:56.113438Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898796227252236:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:47:56.192162Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898796227252287:2337] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:47:56.603695Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:56.794613Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:56.849104Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:56.882896Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:56.914566Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:56.934342Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519898774752415127:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:47:56.934388Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:47:57.175371Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:57.245149Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:57.308713Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:57.366355Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:57.443775Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:57.479139Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:57.519299Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:57.570722Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:47:58.324956Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, subopera ... 51922Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038653;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:33.556211Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038640;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:33.560756Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038657;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:33.563735Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038653;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:33.564169Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038632;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:33.568094Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038632;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:33.570062Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038657;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:33.570614Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038581;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:33.572911Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038655;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:33.579486Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038581;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:33.580021Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038658;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:33.581557Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038655;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:33.581979Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038654;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:33.587173Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038658;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:33.587690Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038565;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:33.591396Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038654;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:33.591898Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038646;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:33.596017Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038565;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:33.600061Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038646;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:33.600579Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038462;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:33.603981Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038579;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:33.607863Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038579;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:33.609434Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038462;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:33.609959Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038628;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:33.613065Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038539;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:33.618553Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038628;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:33.619127Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038630;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:33.621429Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038539;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:33.622048Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038661;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:33.627289Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038630;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:33.627909Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038634;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:33.632039Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038661;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:33.636892Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038641;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:33.640836Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038634;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:33.641356Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038652;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:33.646367Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038652;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:33.647527Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038641;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:33.648186Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038638;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:33.653224Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038650;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:33.656640Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038638;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:33.662648Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038650;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:33.759354Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jykryrde5cs0qvcd19j4mba0", SessionId: ydb://session/3?node_id=1&id=Mzc3MWY2MzctMjNiMzVjZTgtNGFkZjMxYy04N2QzMzE1MQ==, Slow query, duration: 32.559945s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:48:34.295704Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:48:34.296246Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:48:34.296994Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;self_id=[1:7519898869241712609:4261];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038331;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038629;receive=72075186224038170; 2025-06-25T14:48:34.297422Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; >> YdbOlapStore::LogTsRangeDescending [GOOD] >> KqpFlipJoin::Right_2 [GOOD] >> KqpIndexLookupJoin::JoinByComplexKeyWithNullComponents-StreamLookupJoin [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpFlipJoin::Right_2 [GOOD] Test command err: Trying to start YDB, gRPC: 11831, MsgBus: 24932 2025-06-25T14:48:39.088608Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898977279975599:2141];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:48:39.089056Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d89/r3tmp/tmpx0jB05/pdisk_1.dat 2025-06-25T14:48:39.835979Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:48:39.836061Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:48:39.856912Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:48:39.883347Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:48:39.884396Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519898972985008190:2080] 1750862919037505 != 1750862919037508 TServer::EnableGrpc on GrpcPort 11831, node 1 2025-06-25T14:48:40.117294Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:48:40.129479Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:48:40.129500Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:48:40.129508Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:48:40.129616Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24932 TClient is connected to server localhost:24932 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:48:41.289598Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:48:41.304861Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:48:41.313677Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:48:41.554873Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:48:41.797205Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:48:41.910593Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:48:44.072605Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519898977279975599:2141];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:48:44.072666Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:48:44.469056Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898998754813616:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:44.469156Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:44.775127Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:44.829634Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:44.866997Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:44.938004Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:44.974789Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:45.038470Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:45.128639Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:45.235475Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899003049781585:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:45.235528Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:45.235791Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899003049781590:2437], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:45.239410Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:48:45.255305Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899003049781592:2438], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:48:45.359857Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899003049781643:3421] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:48:46.592422Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/ ... ecting -> Connected TServer::EnableGrpc on GrpcPort 12950, node 2 2025-06-25T14:48:49.040728Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:48:49.040744Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:48:49.040752Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:48:49.040846Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27897 2025-06-25T14:48:49.608552Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:27897 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:48:49.985494Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:48:49.998384Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:48:50.010945Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:48:50.121421Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:48:50.293039Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:48:50.378579Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:48:52.882478Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519899036047725131:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:52.882565Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:52.946354Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:52.987098Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:53.061189Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:53.100132Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:53.175451Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:53.262398Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:53.335164Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:53.493753Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519899040342693097:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:53.493841Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:53.494164Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519899040342693102:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:53.498332Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:48:53.521350Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519899040342693104:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:48:53.594597Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519899040342693155:3417] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:48:53.608489Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519899018867854533:2232];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:48:53.608545Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:48:54.907356Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:54.966073Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:55.018294Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:55.073135Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) >> KqpJoinOrder::CanonizedJoinOrderTPCDS64 >> KqpJoinOrder::CanonizedJoinOrderTPCH10 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::CanonizedJoinOrderTPCH6 [GOOD] Test command err: Trying to start YDB, gRPC: 19124, MsgBus: 11442 2025-06-25T14:47:08.349178Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898587546331574:2233];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:47:08.361767Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000e0f/r3tmp/tmpveFYu0/pdisk_1.dat 2025-06-25T14:47:09.023650Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:47:09.023750Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:47:09.062932Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:47:09.147490Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 19124, node 1 2025-06-25T14:47:09.367804Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:47:09.416732Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:47:09.416752Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:47:09.416759Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:47:09.416849Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11442 TClient is connected to server localhost:11442 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:47:10.317454Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:47:12.585753Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898604726201184:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:12.585872Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:12.585968Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898604726201195:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:12.590186Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:47:12.607959Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898604726201198:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:47:12.675818Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898604726201249:2337] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:47:13.045765Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T14:47:13.318479Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519898609021168795:2319];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:47:13.318667Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519898609021168795:2319];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:47:13.318911Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519898609021168795:2319];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:47:13.319030Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519898609021168795:2319];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:47:13.319117Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519898609021168795:2319];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:47:13.319202Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519898609021168795:2319];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:47:13.319289Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519898609021168795:2319];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:47:13.319406Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519898609021168795:2319];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:47:13.319517Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519898609021168795:2319];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:47:13.319618Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519898609021168795:2319];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:47:13.319717Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519898609021168795:2319];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:47:13.366471Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519898609021168794:2318];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:47:13.366524Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519898609021168794:2318];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:47:13.366726Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519898609021168794:2318];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:47:13.366841Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519898609021168794:2318];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:47:13.366936Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519898609021168794:2318];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:47:13.367019Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519898609021168794:2318];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:47:13.367098Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519898609021168794:2318];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:47:13.367183Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519898609021168794:2318];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:47:13.367260Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519898609021168794:2318];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:47:13.367335Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519898609021168794:2318];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:47:13.367412Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;s ... .622834Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039369;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:41.627925Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039369;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:41.627928Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039393;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:41.628466Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039421;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:41.628467Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039277;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:41.632916Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039277;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:41.633195Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039421;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:41.633509Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039309;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:41.633734Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039347;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:41.638524Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039309;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:41.638551Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039347;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:41.639094Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039373;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:41.639094Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039410;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:41.644007Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039410;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:41.644049Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039373;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:41.644587Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039353;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:41.644827Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039379;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:41.649808Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039353;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:41.649823Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039379;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:41.650420Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039365;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:41.651197Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039383;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:41.656761Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039383;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:41.657390Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039335;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:41.662883Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039335;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:41.663482Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039345;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:41.669263Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039365;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:41.669723Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039341;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:41.673582Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039345;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:41.673939Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039359;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:41.677893Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039341;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:41.678734Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039377;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:41.678805Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039359;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:41.679520Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039235;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:41.684470Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039377;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:41.684921Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039235;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:41.685595Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039388;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:41.691288Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039388;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:41.841006Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jykrys2d1gwz66wxy3yzb4sd", SessionId: ydb://session/3?node_id=1&id=OTkwOGI1YjEtNWFkN2I5ZTAtZWIzNWI3ZmMtNzk3ZmRhOA==, Slow query, duration: 39.970735s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:48:42.421547Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:48:42.422109Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:48:42.422678Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;self_id=[1:7519898879604152946:8215];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224039094;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224039392;receive=72075186224038933; 2025-06-25T14:48:42.423135Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:48:43.421484Z node 1 :KQP_YQL WARN: log.cpp:67: TraceId: 01jyks00ww34t4apy33f9x925g, SessionId: CompileActor 2025-06-25 14:48:43.420 WARN ydb-core-kqp-ut-join(pid=430889, tid=0x00007F3E0AB75640) [KQP] kqp_opt_phy_olap_agg.cpp:48: Expected TCoMember callable to get column under aggregation. Got: Failed to render expression to pretty string: yql/essentials/ast/yql_expr.cpp:1973 BuildValueNode(): requirement ctx.AllowFreeArgs failed, message: Free arguments are not allowed 2025-06-25T14:48:44.668584Z node 1 :KQP_YQL WARN: log.cpp:67: TraceId: 01jyks02jk7bnvmd4mnc3z4xkk, SessionId: CompileActor 2025-06-25 14:48:44.667 WARN ydb-core-kqp-ut-join(pid=430889, tid=0x00007F3E0A366640) [KQP] kqp_opt_phy_olap_agg.cpp:48: Expected TCoMember callable to get column under aggregation. Got: Failed to render expression to pretty string: yql/essentials/ast/yql_expr.cpp:1973 BuildValueNode(): requirement ctx.AllowFreeArgs failed, message: Free arguments are not allowed ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> YdbOlapStore::LogTsRangeDescending [GOOD] Test command err: 2025-06-25T14:44:18.798186Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897857496926867:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:18.798291Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001825/r3tmp/tmpoHEe3B/pdisk_1.dat 2025-06-25T14:44:19.342211Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:19.358534Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:19.358607Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:19.362032Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 26578, node 1 2025-06-25T14:44:19.637989Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:19.638012Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:19.638018Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:19.638118Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:44:19.843017Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:31014 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:19.988886Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... TClient is connected to server localhost:31014 2025-06-25T14:44:20.333690Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/Root" OperationType: ESchemeOpCreateColumnStore CreateColumnStore { Name: "OlapStore" ColumnShardCount: 4 SchemaPresets { Name: "default" Schema { Columns { Name: "message" Type: "Utf8" } Columns { Name: "json_payload" Type: "JsonDocument" } Columns { Name: "resource_id" Type: "Utf8" NotNull: true } Columns { Name: "uid" Type: "Utf8" NotNull: true } Columns { Name: "timestamp" Type: "Timestamp" NotNull: true } Columns { Name: "resource_type" Type: "Utf8" NotNull: true } Columns { Name: "level" Type: "Int32" } Columns { Name: "ingested_at" Type: "Timestamp" } Columns { Name: "saved_at" Type: "Timestamp" } Columns { Name: "request_id" Type: "Utf8" } KeyColumnNames: "timestamp" KeyColumnNames: "resource_type" KeyColumnNames: "resource_id" KeyColumnNames: "uid" } } } } TxId: 281474976710658 TabletId: 72057594046644480 PeerName: "" , at schemeshard: 72057594046644480 2025-06-25T14:44:20.334099Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: create_store.cpp:331: TCreateOlapStore Propose, path: /Root/OlapStore, opId: 281474976710658:0, at schemeshard: 72057594046644480 2025-06-25T14:44:20.334644Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:347: AttachChild: child attached as only one child to the parent, parent id: [OwnerId: 72057594046644480, LocalPathId: 1], parent name: Root, child name: OlapStore, child id: [OwnerId: 72057594046644480, LocalPathId: 2], at schemeshard: 72057594046644480 2025-06-25T14:44:20.334682Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 0 2025-06-25T14:44:20.334702Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 281474976710658:0 type: TxCreateOlapStore target path: [OwnerId: 72057594046644480, LocalPathId: 2] source path: 2025-06-25T14:44:20.334768Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason new shard created for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 1 2025-06-25T14:44:20.334823Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason new shard created for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 2 2025-06-25T14:44:20.334871Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason new shard created for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 3 2025-06-25T14:44:20.334901Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason new shard created for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 4 2025-06-25T14:44:20.335163Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 5 2025-06-25T14:44:20.337035Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 281474976710658:0 1 -> 2 2025-06-25T14:44:20.337320Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 281474976710658:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-25T14:44:20.337350Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnStore, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_store.cpp:451) 2025-06-25T14:44:20.337468Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 1 2025-06-25T14:44:20.337502Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 6 2025-06-25T14:44:20.341117Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 281474976710658, response: Status: StatusAccepted TxId: 281474976710658 SchemeshardId: 72057594046644480 PathId: 2, at schemeshard: 72057594046644480 2025-06-25T14:44:20.341321Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976710658, database: /Root, subject: , status: StatusAccepted, operation: CREATE COLUMN STORE, path: /Root/OlapStore 2025-06-25T14:44:20.341520Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046644480 2025-06-25T14:44:20.341540Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046644480, txId: 281474976710658, path id: [OwnerId: 72057594046644480, LocalPathId: 1] 2025-06-25T14:44:20.341697Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046644480, txId: 281474976710658, path id: [OwnerId: 72057594046644480, LocalPathId: 2] 2025-06-25T14:44:20.341794Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046644480 2025-06-25T14:44:20.341813Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:7519897861791894716:2376], at schemeshard: 72057594046644480, txId: 281474976710658, path id: 1 2025-06-25T14:44:20.341825Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:7519897861791894716:2376], at schemeshard: 72057594046644480, txId: 281474976710658, path id: 2 2025-06-25T14:44:20.341881Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 281474976710658:0, at schemeshard: 72057594046644480 2025-06-25T14:44:20.341910Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 281474976710658:0 ProgressState, operation type: TxCreateOlapStore, at tablet# 72057594046644480 2025-06-25T14:44:20.342518Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:359: TCreateParts opId# 281474976710658:0 CreateRequest Event to Hive: 72057594037968897 msg: Owner: 72057594046644480 OwnerIdx: 1 TabletType: ColumnShard ObjectDomain { SchemeShard: 72057594046644480 PathId: 1 } ObjectId: 2 BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolNam ... eqNo: [1] 2025-06-25T14:48:53.834371Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 42, seqNo: [1] 2025-06-25T14:48:53.834385Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 43, seqNo: [1] 2025-06-25T14:48:53.834400Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 44, seqNo: [1] 2025-06-25T14:48:53.834414Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 45, seqNo: [1] 2025-06-25T14:48:53.834429Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 46, seqNo: [1] 2025-06-25T14:48:53.834444Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 47, seqNo: [1] 2025-06-25T14:48:53.834459Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 48, seqNo: [1] 2025-06-25T14:48:53.834474Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 49, seqNo: [1] 2025-06-25T14:48:53.834489Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 50, seqNo: [1] 2025-06-25T14:48:53.834503Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 51, seqNo: [1] 2025-06-25T14:48:53.834517Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 52, seqNo: [1] 2025-06-25T14:48:53.834532Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 53, seqNo: [1] 2025-06-25T14:48:53.834546Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 54, seqNo: [1] 2025-06-25T14:48:53.834562Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 55, seqNo: [1] 2025-06-25T14:48:53.834577Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 56, seqNo: [1] 2025-06-25T14:48:53.834590Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 57, seqNo: [1] 2025-06-25T14:48:53.834603Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 58, seqNo: [1] 2025-06-25T14:48:53.834617Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 59, seqNo: [1] 2025-06-25T14:48:53.834631Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 60, seqNo: [1] 2025-06-25T14:48:53.834644Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 61, seqNo: [1] 2025-06-25T14:48:53.834659Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 62, seqNo: [1] 2025-06-25T14:48:53.834674Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 63, seqNo: [1] 2025-06-25T14:48:53.834689Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 64, seqNo: [1] 2025-06-25T14:48:53.834710Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:675: TxId: 281474976715670, task: 65. Tasks execution finished 2025-06-25T14:48:53.834748Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:510: SelfId: [28:7519899040617316492:3169], TxId: 281474976715670, task: 65. Ctx: { TraceId : 01jyks0aee8hh9tdny4rn4gf90. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=28&id=NTM3NmZiMjAtMzMzNjhhMGYtNjg3ZDI3OTYtOTYxYWNiNmE=. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Compute state finished. All channels and sinks finished 2025-06-25T14:48:53.834945Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:494: TxId: 281474976715670, task: 65. pass away 2025-06-25T14:48:53.835191Z node 28 :KQP_COMPUTE DEBUG: log.cpp:784: fline=kqp_compute_actor_factory.cpp:67;problem=finish_compute_actor;tx_id=281474976715670;task_id=65;success=1;message={
: Error: COMPUTE_STATE_FINISHED }; 2025-06-25T14:48:53.836019Z node 28 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:442: ActorId: [28:7519899040617316408:3095] TxId: 281474976715670. Ctx: { TraceId: 01jyks0aee8hh9tdny4rn4gf90, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=28&id=NTM3NmZiMjAtMzMzNjhhMGYtNjg3ZDI3OTYtOTYxYWNiNmE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [28:7519899040617316492:3169], task: 65, state: COMPUTE_STATE_FINISHED, stats: { CpuTimeUs: 10378 Tasks { TaskId: 65 StageId: 1 CpuTimeUs: 542 FinishTimeMs: 1750862933833 ComputeCpuTimeUs: 108 BuildCpuTimeUs: 434 HostName: "ghrun-kqfvx6aroe" NodeId: 28 CreateTimeMs: 1750862933633 UpdateTimeMs: 1750862933834 } MaxMemoryUsage: 1048576 } 2025-06-25T14:48:53.836114Z node 28 :KQP_EXECUTER INFO: kqp_planner.cpp:697: TxId: 281474976715670. Ctx: { TraceId: 01jyks0aee8hh9tdny4rn4gf90, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=28&id=NTM3NmZiMjAtMzMzNjhhMGYtNjg3ZDI3OTYtOTYxYWNiNmE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Compute actor has finished execution: [28:7519899040617316492:3169] 2025-06-25T14:48:53.836269Z node 28 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:2188: ActorId: [28:7519899040617316408:3095] TxId: 281474976715670. Ctx: { TraceId: 01jyks0aee8hh9tdny4rn4gf90, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=28&id=NTM3NmZiMjAtMzMzNjhhMGYtNjg3ZDI3OTYtOTYxYWNiNmE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. terminate execution. 2025-06-25T14:48:53.840476Z node 28 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:862: ActorId: [28:7519899040617316408:3095] TxId: 281474976715670. Ctx: { TraceId: 01jyks0aee8hh9tdny4rn4gf90, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=28&id=NTM3NmZiMjAtMzMzNjhhMGYtNjg3ZDI3OTYtOTYxYWNiNmE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Resource usage for last stat interval: ComputeTime: 0.062469s ReadRows: 0 ReadBytes: 0 ru: 41 rate limiter was not found force flag: 1 2025-06-25T14:48:53.840595Z node 28 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1754: SessionId: ydb://session/3?node_id=28&id=NTM3NmZiMjAtMzMzNjhhMGYtNjg3ZDI3OTYtOTYxYWNiNmE=, ActorId: [28:7519899036322349054:3095], ActorState: ExecuteState, TraceId: 01jyks0aee8hh9tdny4rn4gf90, TEvTxResponse, CurrentTx: 2/2 response.status: SUCCESS 2025-06-25T14:48:53.841016Z node 28 :KQP_SESSION INFO: kqp_session_actor.cpp:2013: SessionId: ydb://session/3?node_id=28&id=NTM3NmZiMjAtMzMzNjhhMGYtNjg3ZDI3OTYtOTYxYWNiNmE=, ActorId: [28:7519899036322349054:3095], ActorState: ExecuteState, TraceId: 01jyks0aee8hh9tdny4rn4gf90, txInfo Status: Active Kind: ReadOnly TotalDuration: 0 ServerDuration: 350.749 QueriesCount: 1 2025-06-25T14:48:53.841094Z node 28 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2168: SessionId: ydb://session/3?node_id=28&id=NTM3NmZiMjAtMzMzNjhhMGYtNjg3ZDI3OTYtOTYxYWNiNmE=, ActorId: [28:7519899036322349054:3095], ActorState: ExecuteState, TraceId: 01jyks0aee8hh9tdny4rn4gf90, Create QueryResponse for action: QUERY_ACTION_EXECUTE with SUCCESS status 2025-06-25T14:48:53.841249Z node 28 :KQP_SESSION INFO: kqp_session_actor.cpp:2528: SessionId: ydb://session/3?node_id=28&id=NTM3NmZiMjAtMzMzNjhhMGYtNjg3ZDI3OTYtOTYxYWNiNmE=, ActorId: [28:7519899036322349054:3095], ActorState: ExecuteState, TraceId: 01jyks0aee8hh9tdny4rn4gf90, Cleanup start, isFinal: 1 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-06-25T14:48:53.841309Z node 28 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2589: SessionId: ydb://session/3?node_id=28&id=NTM3NmZiMjAtMzMzNjhhMGYtNjg3ZDI3OTYtOTYxYWNiNmE=, ActorId: [28:7519899036322349054:3095], ActorState: ExecuteState, TraceId: 01jyks0aee8hh9tdny4rn4gf90, EndCleanup, isFinal: 1 2025-06-25T14:48:53.841392Z node 28 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2325: SessionId: ydb://session/3?node_id=28&id=NTM3NmZiMjAtMzMzNjhhMGYtNjg3ZDI3OTYtOTYxYWNiNmE=, ActorId: [28:7519899036322349054:3095], ActorState: ExecuteState, TraceId: 01jyks0aee8hh9tdny4rn4gf90, Sent query response back to proxy, proxyRequestId: 5, proxyId: [28:7519898950422999176:2088] 2025-06-25T14:48:53.841457Z node 28 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2601: SessionId: ydb://session/3?node_id=28&id=NTM3NmZiMjAtMzMzNjhhMGYtNjg3ZDI3OTYtOTYxYWNiNmE=, ActorId: [28:7519899036322349054:3095], ActorState: unknown state, TraceId: 01jyks0aee8hh9tdny4rn4gf90, Cleanup temp tables: 0 2025-06-25T14:48:53.861432Z node 28 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750862933001, txId: 18446744073709551615] shutting down 2025-06-25T14:48:53.861626Z node 28 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2692: SessionId: ydb://session/3?node_id=28&id=NTM3NmZiMjAtMzMzNjhhMGYtNjg3ZDI3OTYtOTYxYWNiNmE=, ActorId: [28:7519899036322349054:3095], ActorState: unknown state, TraceId: 01jyks0aee8hh9tdny4rn4gf90, Session actor destroyed 2025-06-25T14:48:53.896676Z node 28 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037888;self_id=[28:7519898963307902154:2289];ev=NActors::TEvents::TEvWakeup;fline=columnshard.cpp:250;event=TEvPrivate::TEvPeriodicWakeup::MANUAL;tablet_id=72075186224037888; 2025-06-25T14:48:54.032510Z node 28 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037890;parent=[28:7519898963307902118:2286];fline=actor.cpp:33;event=skip_flush_writing; 2025-06-25T14:48:54.032617Z node 28 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037891;parent=[28:7519898963307902135:2288];fline=actor.cpp:33;event=skip_flush_writing; 2025-06-25T14:48:54.073830Z node 28 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037891;self_id=[28:7519898963307902135:2288];ev=NActors::TEvents::TEvWakeup;fline=columnshard.cpp:250;event=TEvPrivate::TEvPeriodicWakeup::MANUAL;tablet_id=72075186224037891; 2025-06-25T14:48:54.143233Z node 28 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037889;parent=[28:7519898963307902119:2287];fline=actor.cpp:33;event=skip_flush_writing; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpIndexLookupJoin::JoinByComplexKeyWithNullComponents-StreamLookupJoin [GOOD] Test command err: Trying to start YDB, gRPC: 26315, MsgBus: 6781 2025-06-25T14:48:39.870514Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898977401288034:2223];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:48:39.870742Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d88/r3tmp/tmpwDNeBU/pdisk_1.dat 2025-06-25T14:48:40.477655Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:48:40.477734Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:48:40.528824Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:48:40.559828Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:48:40.564406Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519898977401287849:2080] 1750862919795752 != 1750862919795755 TServer::EnableGrpc on GrpcPort 26315, node 1 2025-06-25T14:48:40.884470Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:48:40.924831Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:48:40.924854Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:48:40.924861Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:48:40.924957Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6781 TClient is connected to server localhost:6781 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:48:42.042574Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:48:42.084694Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:48:42.105967Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:42.289637Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:48:42.527470Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:48:42.662515Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:48:44.863047Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519898977401288034:2223];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:48:44.870981Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:48:45.090790Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899003171093268:2370], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:45.090890Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:45.560497Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:45.620675Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:45.701344Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:45.763163Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:45.806879Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:45.852447Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:45.925998Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:46.021395Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899007466061237:2436], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:46.021465Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:46.021788Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899007466061242:2439], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:46.026094Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:48:46.048070Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899007466061244:2440], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:48:46.112551Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899007466061295:3423] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:48:47.157584Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_wo ... ed at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) Trying to start YDB, gRPC: 26116, MsgBus: 10726 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d88/r3tmp/tmpvJDl6z/pdisk_1.dat 2025-06-25T14:48:49.430884Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:48:49.430951Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:48:49.432458Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:48:49.442635Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:48:49.451839Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:48:49.454356Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519899022617225531:2080] 1750862929007129 != 1750862929007132 TServer::EnableGrpc on GrpcPort 26116, node 2 2025-06-25T14:48:49.728793Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:48:49.728817Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:48:49.728823Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:48:49.728938Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:48:50.012441Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:10726 TClient is connected to server localhost:10726 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:48:50.751305Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:48:50.768678Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:48:50.790441Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:48:50.931137Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:48:51.152640Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:48:51.243160Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:48:55.045560Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519899044092063646:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:55.045633Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:55.105712Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:55.183838Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:55.223180Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:55.273216Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:55.334944Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:55.414550Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:55.470562Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:55.570766Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519899048387031603:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:55.570842Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:55.571145Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519899048387031608:2436], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:55.574942Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:48:55.592193Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519899048387031610:2437], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:48:55.654754Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519899048387031661:3423] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:48:56.903515Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:56.957881Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) >> KqpJoinOrder::CanonizedJoinOrderTPCH19 >> KqpIndexLookupJoin::SimpleLeftSemiJoin-StreamLookup >> KqpJoinOrder::TPCDS95+ColumnStore [GOOD] >> KqpIndexLookupJoin::CheckCastInt32ToInt16-StreamLookupJoin-NotNull >> KqpJoinOrder::SortingsWithLookupJoin3+RemoveLimitOperator >> KqpJoinOrder::FiveWayJoinWithConstantFoldOpt+ColumnStore [GOOD] >> KqpJoinOrder::OltpJoinTypeHintCBOTurnOFF [GOOD] >> KqpJoinOrder::TPCDS92+ColumnStore [GOOD] >> KqpJoinOrder::DatetimeConstantFold+ColumnStore [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::OltpJoinTypeHintCBOTurnOFF [GOOD] Test command err: Trying to start YDB, gRPC: 21217, MsgBus: 7479 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000da9/r3tmp/tmpcTpPxv/pdisk_1.dat 2025-06-25T14:48:09.320458Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:48:09.452101Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:48:09.461640Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:48:09.473317Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:48:09.553128Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519898844089275427:2080] 1750862888789576 != 1750862888789579 2025-06-25T14:48:09.558017Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 21217, node 1 2025-06-25T14:48:09.840221Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:48:09.840239Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:48:09.840249Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:48:09.844439Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:48:09.904709Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:7479 TClient is connected to server localhost:7479 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:48:10.598468Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:48:10.618792Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:48:13.191200Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898865564112550:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:13.191313Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:13.191634Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898865564112562:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:13.194830Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:48:13.213788Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898865564112564:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:48:13.319899Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898865564112615:2338] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:48:13.711844Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:13.857411Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:13.930130Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:13.967359Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:14.006839Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:14.216503Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:14.260077Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:14.305461Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:14.335755Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:14.368088Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:14.402228Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:14.437058Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:14.481517Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:15.505616Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:15.557353Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operat ... 85264Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038631;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:56.388703Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038639;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:48:56.389219Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038647;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:56.394692Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038631;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:48:56.395274Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038638;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:56.402308Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038647;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:48:56.402901Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038641;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:56.408246Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038641;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:48:56.412277Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038638;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:48:56.413013Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038643;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:56.417135Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038646;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:56.422384Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038643;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:48:56.423083Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038656;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:56.426712Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038646;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:48:56.427259Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038649;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:56.432488Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038656;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:48:56.433146Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038651;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:56.434554Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038649;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:48:56.435785Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038658;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:56.445236Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038651;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:48:56.445677Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038658;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:48:56.445781Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038623;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:56.446141Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038628;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:56.450834Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038623;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:48:56.451460Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038625;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:56.456721Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038625;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:48:56.457340Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038635;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:56.461198Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038628;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:48:56.461947Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038661;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:56.466682Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038635;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:48:56.467423Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038622;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:56.471465Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038661;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:48:56.472058Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038637;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:56.480888Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038622;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:48:56.481894Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038654;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:56.485655Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038637;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:48:56.486150Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038456;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:56.491385Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038654;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:48:56.495883Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038456;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:48:56.551542Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038494;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:48:56.557398Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038494;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:48:56.616014Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jykrz92r3vdaqw087bkwxx0h", SessionId: ydb://session/3?node_id=1&id=YzZlYWNiM2QtODljODUwNTEtNTk2YmI3ZTItY2ZlN2M2Yjc=, Slow query, duration: 38.351033s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:48:56.934701Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; 2025-06-25T14:48:56.935180Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; 2025-06-25T14:48:56.936041Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;self_id=[1:7519899007298058647:5349];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038629;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038331;receive=72075186224038170; 2025-06-25T14:48:56.936466Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::FiveWayJoinWithConstantFoldOpt+ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 6929, MsgBus: 19037 2025-06-25T14:47:12.280212Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898605802917006:2227];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:47:12.280654Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000e08/r3tmp/tmpkCyFmP/pdisk_1.dat 2025-06-25T14:47:12.810331Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:47:12.810419Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:47:12.891574Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519898605802916807:2080] 1750862832210106 != 1750862832210109 2025-06-25T14:47:12.910311Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:47:12.912433Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 6929, node 1 2025-06-25T14:47:13.196771Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:47:13.196789Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:47:13.196798Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:47:13.196883Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:47:13.280416Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:19037 TClient is connected to server localhost:19037 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:47:14.327296Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:47:14.353098Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:47:16.420866Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898622982786637:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:16.420950Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898622982786649:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:16.421010Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:16.425207Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:47:16.448480Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898622982786651:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:47:16.543911Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898622982786702:2341] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:47:16.879440Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T14:47:17.132496Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519898627277754245:2314];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:47:17.132811Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519898627277754245:2314];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:47:17.133096Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519898627277754245:2314];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:47:17.133216Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519898627277754245:2314];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:47:17.133313Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519898627277754245:2314];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:47:17.133424Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519898627277754245:2314];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:47:17.133527Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519898627277754245:2314];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:47:17.133625Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519898627277754245:2314];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:47:17.133711Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519898627277754245:2314];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:47:17.133829Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519898627277754245:2314];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:47:17.133933Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519898627277754245:2314];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:47:17.137451Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519898627277754248:2317];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:47:17.137504Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519898627277754248:2317];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:47:17.137727Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519898627277754248:2317];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:47:17.137818Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519898627277754248:2317];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:47:17.137933Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519898627277754248:2317];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:47:17.138026Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519898627277754248:2317];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:47:17.138111Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519898627277754248:2317];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:47:17.138199Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519898627277754248:2317];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:47:17.138284Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519898627277754248:2317];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; ... 14162Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039385;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:45.323368Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039385;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:45.323944Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039355;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:45.324833Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039377;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:45.325288Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039393;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:45.330611Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039393;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:45.335741Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039355;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:45.336223Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039341;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:45.336224Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039379;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:45.341285Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039341;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:45.341917Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039375;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:45.341969Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039379;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:45.342494Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039389;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:45.347421Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039375;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:45.347500Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039389;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:45.348006Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039417;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:45.348006Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039411;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:45.353657Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039411;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:45.354242Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039423;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:45.357117Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039417;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:45.357706Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039403;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:45.359557Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039423;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:45.360109Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039419;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:45.365461Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039419;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:45.366430Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039401;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:45.368864Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039403;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:45.369361Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039405;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:45.371716Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039401;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:45.372268Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039387;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:45.377302Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039387;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:45.377842Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039373;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:45.379844Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039405;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:45.380336Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039407;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:45.386742Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039373;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:45.387294Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039391;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:45.389074Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039407;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:45.389592Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039421;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:45.392870Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039391;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:45.401194Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039421;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:45.437612Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039394;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:45.443241Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039394;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:45.575002Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jykryxp88tqde6npf6c32dk0", SessionId: ydb://session/3?node_id=1&id=YzJmMTYzYTEtOWJjNWRkNjgtOTE0MGVhMGQtOTc2NzBhNA==, Slow query, duration: 38.973977s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:48:45.861965Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:48:45.862403Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:48:45.862818Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;self_id=[1:7519898897860738281:8223];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224039094;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224039392;receive=72075186224038933; 2025-06-25T14:48:45.863241Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::TPCDS95+ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 31774, MsgBus: 24596 2025-06-25T14:46:53.465222Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898525403735005:2232];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:46:53.476654Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000e29/r3tmp/tmphJjkr5/pdisk_1.dat 2025-06-25T14:46:54.140563Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:46:54.159179Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:46:54.159265Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:46:54.162444Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 31774, node 1 2025-06-25T14:46:54.448962Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:46:54.469075Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:46:54.469093Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:46:54.469103Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:46:54.469208Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24596 TClient is connected to server localhost:24596 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:46:55.432327Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:46:55.481999Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:46:57.786201Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898542583604633:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:57.786347Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:57.786643Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898542583604645:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:46:57.790578Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:46:57.812186Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898542583604647:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:46:57.876762Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898542583604698:2339] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:46:58.278682Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T14:46:58.452497Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519898525403735005:2232];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:46:58.452584Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:46:58.690047Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519898546878572246:2318];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:46:58.690265Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519898546878572246:2318];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:46:58.690529Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519898546878572246:2318];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:46:58.690672Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519898546878572246:2318];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:46:58.690809Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519898546878572246:2318];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:46:58.690934Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519898546878572246:2318];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:46:58.691046Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519898546878572246:2318];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:46:58.691149Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519898546878572246:2318];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:46:58.691275Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519898546878572246:2318];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:46:58.691401Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519898546878572246:2318];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:46:58.691495Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519898546878572246:2318];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:46:58.691582Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519898546878572248:2320];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:46:58.691607Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519898546878572248:2320];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:46:58.691724Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519898546878572248:2320];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:46:58.691832Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519898546878572248:2320];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:46:58.691924Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519898546878572248:2320];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:46:58.692013Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519898546878572248:2320];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:46:58.692110Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519898546878572248:2320];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:46:58.692191Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519898546878572248:2320];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:46:58.697169Z node 1 :TX_COLUMNSHARD WARN: ... anager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:21.669972Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039398;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:48:21.672149Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039402;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:48:21.672565Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039371;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:21.676783Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039401;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:21.681249Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039371;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:48:21.681763Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039403;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:21.685754Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039401;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:48:21.686226Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039388;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:21.690549Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039403;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:48:21.691066Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039422;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:21.695101Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039388;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:48:21.695600Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039255;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:21.699900Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039422;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:48:21.704555Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039355;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:21.708488Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039255;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:48:21.708928Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039418;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:21.713141Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039355;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:48:21.713531Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039269;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:21.717627Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039418;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:48:21.718169Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039235;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:21.722451Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039269;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:48:21.723103Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:21.727115Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039235;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:48:21.727883Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039386;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:21.733366Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039386;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:48:21.734877Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039400;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:21.740145Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:48:21.740574Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039400;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:48:21.740926Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039331;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:21.741140Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039421;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:21.746674Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039421;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:48:21.747086Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039331;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:48:21.747673Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039424;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:21.752923Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039424;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:48:21.998980Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jykry79gbyv7mbntq0wk8qw3", SessionId: ydb://session/3?node_id=1&id=Y2YwM2RhYzgtNzY3YmVkYzEtNDYwZWEyNTQtODU5OTVlMzk=, Slow query, duration: 38.333999s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:48:22.583994Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; 2025-06-25T14:48:22.584532Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; 2025-06-25T14:48:22.584782Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;self_id=[1:7519898864706207792:9860];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224039392;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224039094;receive=72075186224038933; 2025-06-25T14:48:22.585224Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; 2025-06-25T14:48:53.408629Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jykrztjs0fb3k7fc1t9nws14", SessionId: ydb://session/3?node_id=1&id=Y2YwM2RhYzgtNzY3YmVkYzEtNDYwZWEyNTQtODU5OTVlMzk=, Slow query, duration: 17.223096s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "pragma TablePathPrefix = \"/Root/test/ds/\";\n-- NB: Subquerys\n$ws_wh =\n(select ws1.ws_order_number ws_order_number,ws1.ws_warehouse_sk wh1,ws2.ws_warehouse_sk wh2\n from web_sales ws1 cross join web_sales ws2\n where ws1.ws_order_number = ws2.ws_order_number\n and ws1.ws_warehouse_sk <> ws2.ws_warehouse_sk);\n-- start query 1 in stream 0 using template query95.tpl and seed 2031708268\n select\n count(distinct ws1.ws_order_number) as `order count`\n ,sum(ws_ext_ship_cost) as `total shipping cost`\n ,sum(ws_net_profit) as `total net profit`\nfrom\n web_sales ws1\n cross join date_dim\n cross join customer_address\n cross join web_site\nwhere\n cast(d_date as date) between cast('2002-4-01' as date) and\n (cast('2002-4-01' as date) + DateTime::IntervalFromDays(60))\nand ws1.ws_ship_date_sk = d_date_sk\nand ws1.ws_ship_addr_sk = ca_address_sk\nand ca_state = 'AL'\nand ws1.ws_web_site_sk = web_site_sk\nand web_company_name = 'pri'\nand ws1.ws_order_number in (select ws_order_number\n from $ws_wh)\nand ws1.ws_order_number in (select wr_order_number\n from web_returns cross join $ws_wh ws_wh\n where wr_order_number = ws_wh.ws_order_number)\norder by `order count`\nlimit 100;\n", parameters: 0b >> TxUsage::WriteToTopic_Demo_48_Query [GOOD] >> KqpJoinOrder::CanonizedJoinOrderTPCH21 >> TxUsage::WriteToTopic_Demo_50_Query >> KqpJoinOrder::TPCDS34+ColumnStore ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::TPCDS92+ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 22847, MsgBus: 29374 2025-06-25T14:46:57.549603Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898540675242968:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:46:57.549630Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000e1e/r3tmp/tmpKLEb1F/pdisk_1.dat 2025-06-25T14:46:58.189507Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:46:58.189613Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:46:58.194385Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:46:58.244301Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:46:58.250893Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519898540675242950:2080] 1750862817545010 != 1750862817545013 TServer::EnableGrpc on GrpcPort 22847, node 1 2025-06-25T14:46:58.477322Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:46:58.477343Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:46:58.477355Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:46:58.477449Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:46:58.596733Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:29374 TClient is connected to server localhost:29374 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:46:59.244704Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:46:59.257908Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:47:01.816630Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898557855112774:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:01.816735Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:01.817020Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898557855112786:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:01.824343Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:47:01.840499Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898557855112788:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:47:01.911626Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898557855112839:2337] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:47:02.286445Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T14:47:02.552424Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519898540675242968:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:47:02.552504Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:47:02.705516Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519898562150080402:2322];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:47:02.705728Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519898562150080402:2322];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:47:02.706016Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519898562150080402:2322];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:47:02.706129Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519898562150080402:2322];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:47:02.706214Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519898562150080402:2322];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:47:02.706341Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519898562150080402:2322];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:47:02.706451Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519898562150080402:2322];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:47:02.706555Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519898562150080402:2322];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:47:02.706645Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519898562150080402:2322];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:47:02.706758Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519898562150080402:2322];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:47:02.706870Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519898562150080402:2322];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:47:02.709667Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519898562150080382:2312];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:47:02.709712Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519898562150080382:2312];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:47:02.709898Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519898562150080382:2312];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:47:02.709990Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519898562150080382:2312];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:47:02.710078Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519898562150080382:2312];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:47:02.710174Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519898562150080382:2312];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:47:02.710267Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519898562150080382:2312];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:47:02.710368Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519898562150080382:2312];tabl ... 6-25T14:48:32.259999Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039236;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:32.266942Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039413;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:32.267947Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039394;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:32.273387Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039394;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:32.274089Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039276;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:32.276603Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039236;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:32.277065Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039242;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:32.282333Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039242;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:32.282951Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039272;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:32.288299Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039272;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:32.288964Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039244;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:32.291366Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039276;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:32.291784Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039232;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:32.297922Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039244;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:32.298591Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039417;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:32.304420Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039232;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:32.304933Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039391;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:32.309501Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039391;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:32.310047Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039417;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:32.310544Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039208;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:32.315138Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039208;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:32.316358Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039270;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:32.320875Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039211;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:32.325406Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039270;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:32.326227Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039399;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:32.331133Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039399;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:32.331780Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039405;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:32.333825Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039211;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:32.334336Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039403;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:32.339695Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039403;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:32.344830Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039401;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:32.349565Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039401;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:32.352103Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039405;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:32.549179Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039257;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:32.563338Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039257;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:32.687351Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jykrydfe8x2p02dq54fzr4dm", SessionId: ydb://session/3?node_id=1&id=MTliNzE4MzUtZWE3YWZiNDUtYmFmYTE3N2UtMzdjNzFjNDQ=, Slow query, duration: 42.650254s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:48:33.376286Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:48:33.376831Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:48:33.379858Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;self_id=[1:7519898781193447893:6980];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038933;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224039392;receive=72075186224039094; 2025-06-25T14:48:33.380384Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:48:55.129381Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyks01qb1gmcgm2ytzgjggnx", SessionId: ydb://session/3?node_id=1&id=MTliNzE4MzUtZWE3YWZiNDUtYmFmYTE3N2UtMzdjNzFjNDQ=, Slow query, duration: 11.629068s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "pragma TablePathPrefix = \"/Root/test/ds/\";\n\n$bla = (\n SELECT\n web_sales.ws_item_sk bla_item_sk,\n avg(ws_ext_discount_amt) bla_ext_discount_amt\n FROM\n web_sales\n cross join date_dim\n WHERE\n cast(d_date as date) between cast('2001-03-12' as date) and\n (cast('2001-03-12' as date) + DateTime::IntervalFromDays(90))\n and d_date_sk = ws_sold_date_sk\n group by web_sales.ws_item_sk\n );\n\n-- start query 1 in stream 0 using template query92.tpl and seed 2031708268\nselect\n sum(ws_ext_discount_amt) as `Excess Discount Amount`\nfrom\n web_sales\n cross join item\n cross join date_dim\n join $bla bla on (item.i_item_sk = bla.bla_item_sk)\nwhere\ni_manufact_id = 356\nand i_item_sk = ws_item_sk\nand cast(d_date as date) between cast('2001-03-12' as date) and\n (cast('2001-03-12' as date) + DateTime::IntervalFromDays(90))\nand d_date_sk = ws_sold_date_sk\nand ws_ext_discount_amt\n > 1.3 * bla.bla_ext_discount_amt\norder by `Excess Discount Amount`\nlimit 100;\n", parameters: 0b ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::DatetimeConstantFold+ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 20743, MsgBus: 7608 2025-06-25T14:47:22.790329Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898650415273985:2214];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:47:22.790531Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000df3/r3tmp/tmplfQvTX/pdisk_1.dat 2025-06-25T14:47:23.357099Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:47:23.357152Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:47:23.361481Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:47:23.408361Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:47:23.408949Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519898650415273799:2080] 1750862842711810 != 1750862842711813 TServer::EnableGrpc on GrpcPort 20743, node 1 2025-06-25T14:47:23.636892Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:47:23.636912Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:47:23.636919Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:47:23.637028Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:47:23.800360Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:7608 TClient is connected to server localhost:7608 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:47:24.704670Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:47:24.777141Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:47:26.791459Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898667595143632:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:26.791591Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:26.791966Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898667595143644:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:26.796826Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:47:26.811126Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898667595143646:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:47:26.891380Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898667595143699:2339] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:47:27.326879Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T14:47:27.639604Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519898671890111239:2310];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:47:27.639836Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519898671890111239:2310];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:47:27.640105Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519898671890111239:2310];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:47:27.640255Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519898671890111239:2310];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:47:27.640378Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519898671890111239:2310];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:47:27.640478Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519898671890111239:2310];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:47:27.640568Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519898671890111239:2310];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:47:27.640670Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519898671890111239:2310];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:47:27.640762Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519898671890111239:2310];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:47:27.640855Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519898671890111239:2310];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:47:27.640949Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519898671890111239:2310];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:47:27.649852Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519898671890111245:2316];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:47:27.649909Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519898671890111245:2316];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:47:27.650104Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519898671890111245:2316];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:47:27.650199Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519898671890111245:2316];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:47:27.650290Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519898671890111245:2316];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:47:27.650377Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519898671890111245:2316];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:47:27.650463Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519898671890111245:2316];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:47:27.650554Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519898671890111245:2316];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:47:27.650642Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519898671890111245:2316];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; ... 88999Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039333;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:54.095152Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039303;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:54.095708Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039277;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:54.100281Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039277;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:54.101117Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039395;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:54.101671Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039333;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:54.102056Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039379;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:54.105988Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039395;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:54.106610Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039393;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:54.116009Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039393;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:54.119997Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039379;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:54.121344Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039313;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:54.124633Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039355;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:54.129961Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039355;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:54.130489Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039381;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:54.134646Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039381;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:54.135157Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039383;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:54.136987Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039313;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:54.137392Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039283;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:54.141985Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039283;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:54.142487Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039351;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:54.148146Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039383;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:54.152160Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039351;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:54.152660Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039341;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:54.156848Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039389;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:54.161697Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039389;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:54.162187Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039301;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:54.165522Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039341;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:54.166022Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039327;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:54.171701Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039301;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:54.174393Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039327;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:54.175008Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039323;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:54.177007Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039291;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:54.183700Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039323;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:54.184263Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039391;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:54.185952Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039291;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:54.186475Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039367;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:54.195551Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039367;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:54.196038Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039345;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:54.198971Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039345;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:54.199996Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039391;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:54.459021Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jykrz7xdc95pjfh26q030w2y", SessionId: ydb://session/3?node_id=1&id=YTYwZTNlMjctYWUzYWEyMjEtZmVhZWE1YjMtZDA3ZmViNzc=, Slow query, duration: 37.389466s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:48:54.802144Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:48:54.802873Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:48:54.803220Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;self_id=[1:7519898942473095483:8221];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224039094;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038933;receive=72075186224039392; 2025-06-25T14:48:54.803606Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; >> KqpJoinOrder::CanonizedJoinOrderTPCH4 >> TxUsage::Transactions_Conflict_On_SeqNo_Query [GOOD] >> KqpJoinOrder::SortingsWithLookupJoin2+RemoveLimitOperator [GOOD] >> KqpIndexLookupJoin::CheckCastInt32ToInt16-StreamLookupJoin-NotNull [GOOD] >> KqpIndexLookupJoin::CheckCastInt32ToInt16-StreamLookupJoin+NotNull >> TxUsage::The_Transaction_Starts_On_One_Version_And_Ends_On_The_Other >> KqpIndexLookupJoin::InnerJoinLeftFilter+StreamLookup >> KqpJoin::JoinLeftPureInnerConverted >> OlapEstimationRowsCorrectness::TPCH9 [GOOD] >> KqpIndexLookupJoin::SimpleLeftSemiJoin-StreamLookup [GOOD] >> KqpJoin::AllowJoinsForComplexPredicates+StreamLookup ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::SortingsWithLookupJoin2+RemoveLimitOperator [GOOD] Test command err: Trying to start YDB, gRPC: 16594, MsgBus: 15142 2025-06-25T14:48:14.160788Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898871176830186:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:48:14.189124Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000da1/r3tmp/tmpsfyvHO/pdisk_1.dat 2025-06-25T14:48:14.725220Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:48:14.725325Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:48:14.745927Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:48:14.833108Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:48:14.836471Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519898871176830159:2080] 1750862894157093 != 1750862894157096 TServer::EnableGrpc on GrpcPort 16594, node 1 2025-06-25T14:48:15.028020Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:48:15.028038Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:48:15.028045Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:48:15.028161Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:48:15.182146Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:15142 TClient is connected to server localhost:15142 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:48:16.002463Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:48:16.017134Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:48:18.402099Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898888356699991:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:18.402233Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:18.402679Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898888356700003:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:18.406680Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:48:18.435168Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898888356700005:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:48:18.520569Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898888356700057:2339] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:48:18.859890Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:19.027253Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:19.062511Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:19.104753Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:19.133337Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:19.161216Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519898871176830186:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:48:19.161263Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:48:19.297511Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:19.339163Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:19.376971Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:19.407498Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:19.439506Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:19.529284Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:19.562885Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:19.601230Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:20.365840Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, subope ... :45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:02.507535Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038618;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:02.508200Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038579;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:02.510634Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038544;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:02.511133Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038532;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:02.512518Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038579;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:02.512998Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038655;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:02.515790Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038532;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:02.516273Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038582;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:02.517322Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038655;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:02.517789Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038643;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:02.520669Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038582;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:02.521180Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038567;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:02.522586Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038643;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:02.523462Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038628;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:02.525717Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038567;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:02.526276Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038658;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:02.527978Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038628;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:02.529538Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038545;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:02.531585Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038658;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:02.532110Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038603;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:02.534728Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038545;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:02.535163Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038526;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:02.536972Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038603;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:02.537492Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038551;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:02.540559Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038526;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:02.541067Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038525;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:02.542728Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038551;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:02.543228Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038548;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:02.546209Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038525;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:02.546678Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038585;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:02.550402Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038585;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:02.550892Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038540;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:02.554411Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038540;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:02.554863Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038569;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:02.558309Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038569;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:02.558771Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038527;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:02.562200Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038527;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:02.562750Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038522;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:02.566165Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038522;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:02.608338Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038548;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:02.637986Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jykrzdbzetsk1hxq95301wmr", SessionId: ydb://session/3?node_id=1&id=NWM5MzRhNGItMjgyNWZmZmMtODgyYjI1ZWQtZWYzODUyNDI=, Slow query, duration: 39.982364s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:49:02.932158Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:49:02.932674Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:49:02.934631Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;self_id=[1:7519898969961094888:4208];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038331;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038629;receive=72075186224038170; 2025-06-25T14:49:02.935070Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716;
:8:3: Warning: ORDER BY without LIMIT in subquery will be ignored, code: 4504
:8:3: Warning: ORDER BY without LIMIT in subquery will be ignored, code: 4504 >> KqpJoinOrder::TPCDS96-ColumnStore [GOOD] >> KqpJoin::JoinLeftPureInner ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> OlapEstimationRowsCorrectness::TPCH9 [GOOD] Test command err: Trying to start YDB, gRPC: 8369, MsgBus: 28724 2025-06-25T14:47:13.765066Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898608789117361:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:47:13.765907Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000e04/r3tmp/tmpsiJTxk/pdisk_1.dat 2025-06-25T14:47:14.405326Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:47:14.405449Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:47:14.410264Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:47:14.473750Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:47:14.476538Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519898608789117336:2080] 1750862833738430 != 1750862833738433 TServer::EnableGrpc on GrpcPort 8369, node 1 2025-06-25T14:47:14.728772Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:47:14.728789Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:47:14.728796Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:47:14.728896Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:47:14.806870Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:28724 TClient is connected to server localhost:28724 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:47:15.931862Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:47:15.961395Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:47:18.112525Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898630263954473:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:18.112597Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898630263954463:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:18.112918Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:18.116405Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:47:18.130294Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898630263954477:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:47:18.191290Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898630263954528:2339] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:47:18.545900Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T14:47:18.828438Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519898630263954748:2321];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:47:18.828646Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519898630263954748:2321];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:47:18.828881Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519898630263954748:2321];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:47:18.828989Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519898630263954748:2321];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:47:18.829109Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519898630263954748:2321];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:47:18.829217Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519898630263954748:2321];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:47:18.829316Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519898630263954748:2321];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:47:18.829412Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519898630263954748:2321];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:47:18.829515Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519898630263954748:2321];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:47:18.829609Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519898630263954748:2321];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:47:18.829700Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519898630263954748:2321];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:47:18.840068Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519898630263954740:2313];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:47:18.840136Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519898630263954740:2313];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:47:18.848806Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519898630263954740:2313];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:47:18.849022Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519898630263954740:2313];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:47:18.849147Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519898630263954740:2313];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:47:18.849278Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519898630263954740:2313];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:47:18.849367Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519898630263954740:2313];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:47:18.849484Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519898630263954740:2313];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:47:18.849573Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519898630263954740:2313];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; ... 24480Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039390;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:50.431832Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039370;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:50.432954Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039390;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:50.433394Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039360;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:50.440421Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039356;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:50.442321Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039360;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:50.442881Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039362;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:50.454171Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039356;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:50.454705Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039358;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:50.458725Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039362;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:50.459215Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039382;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:50.464169Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039382;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:50.466641Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039358;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:50.467140Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039384;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:50.468946Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039412;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:50.471959Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039384;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:50.477705Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039412;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:50.478280Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039376;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:50.481501Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039416;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:50.486568Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039416;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:50.487158Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039378;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:50.488375Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039376;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:50.488840Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039424;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:50.499850Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039378;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:50.501218Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039424;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:50.501925Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039406;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:50.502994Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039418;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:50.507291Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039418;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:50.510567Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039406;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:50.511519Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039420;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:50.517893Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039422;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:50.524233Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039420;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:50.533113Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039422;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:50.533776Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039396;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:50.534418Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039410;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:50.539148Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039410;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:50.539717Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039400;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:50.543537Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039396;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:50.546702Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039237;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:50.551869Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039237;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:50.552260Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039400;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:50.774757Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jykryxtnekr6g2xaray1vv07", SessionId: ydb://session/3?node_id=1&id=NTBkNDdhYzUtZjYyZTExMi0zMDY4ZWNlMi02MjY2OGVhMw==, Slow query, duration: 44.032534s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:48:51.438674Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:48:51.439164Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:48:51.439786Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;self_id=[1:7519898905141906206:8251];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224039094;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224039392;receive=72075186224038933; 2025-06-25T14:48:51.440210Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::TPCDS96-ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 18437, MsgBus: 17521 2025-06-25T14:48:14.859666Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898872328574401:2143];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:48:14.860022Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000da0/r3tmp/tmpiNaYqd/pdisk_1.dat 2025-06-25T14:48:15.449295Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:48:15.449404Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:48:15.452485Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:48:15.498308Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:48:15.500452Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519898872328574284:2080] 1750862894783445 != 1750862894783448 TServer::EnableGrpc on GrpcPort 18437, node 1 2025-06-25T14:48:15.844878Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:48:15.844898Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:48:15.844905Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:48:15.845037Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:48:15.872428Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:17521 TClient is connected to server localhost:17521 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:48:16.705925Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:48:19.130282Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898893803411414:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:19.130408Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:19.130653Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898893803411426:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:19.134305Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:48:19.161100Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898893803411428:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:48:19.223300Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898893803411479:2338] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:48:19.548594Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:19.695616Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:19.738250Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:19.771477Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:19.810258Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:19.840873Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519898872328574401:2143];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:48:19.840947Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:48:19.984426Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:20.056447Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:20.096895Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:20.153560Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:20.187750Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:20.214330Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:20.256329Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:20.290588Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:20.927226Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/ ... 50477Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038563;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:00.453664Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038545;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:00.454072Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038565;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:00.454386Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038563;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:00.454765Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038561;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:00.458824Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038561;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:00.459330Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038559;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:00.462342Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038565;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:00.462842Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038533;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:00.468124Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038533;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:00.468647Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038535;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:00.472526Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038535;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:00.473031Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038529;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:00.477135Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038529;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:00.477648Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038515;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:00.481818Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038559;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:00.482264Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038531;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:00.486330Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038531;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:00.486859Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038521;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:00.490936Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038521;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:00.491463Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038523;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:00.493959Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038515;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:00.494456Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038525;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:00.498726Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038525;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:00.499246Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038527;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:00.503535Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038523;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:00.503965Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038507;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:00.507394Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038527;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:00.507931Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038511;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:00.511851Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038507;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:00.516069Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038511;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:00.516513Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038503;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:00.520729Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038505;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:00.524441Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038503;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:00.524893Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038547;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:00.526519Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038505;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:00.526944Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038509;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:00.531015Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038509;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:00.534436Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038547;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:00.535003Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038537;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:00.544998Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038537;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:00.737114Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jykrze18dmjzdvy9jrn1pqz1", SessionId: ydb://session/3?node_id=1&id=MTM3MDQzYWQtNmFkNDQ3YzMtMzlhYWI4ZjUtNzEwNGZjMTQ=, Slow query, duration: 37.399985s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:49:01.355046Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:49:01.355513Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:49:01.356443Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;self_id=[1:7519899026947423812:5482];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038629;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038331;receive=72075186224038170; 2025-06-25T14:49:01.356833Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; >> KqpJoinOrder::TPCDSEveryQueryWorks+ColumnStore >> OlapEstimationRowsCorrectness::TPCH3 [GOOD] >> KqpJoinOrder::CanonizedJoinOrderTPCDS64_small [GOOD] >> KqpJoinOrder::TestJoinOrderHintsComplex+ColumnStore [GOOD] >> KqpJoinOrder::FiveWayJoin-ColumnStore >> KqpJoinOrder::Sortings4Year-RemoveLimitOperator [GOOD] >> KqpIndexLookupJoin::CheckCastInt32ToInt16-StreamLookupJoin+NotNull [GOOD] >> KqpJoin::JoinLeftPureInnerConverted [GOOD] >> KqpJoin::JoinMismatchDictKeyTypes >> KqpJoinOrder::CanonizedJoinOrderTPCH17 [GOOD] >> KqpIndexLookupJoin::InnerJoinLeftFilter+StreamLookup [GOOD] >> KqpIndexLookupJoin::InnerJoinLeftFilter-StreamLookup ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::Sortings4Year-RemoveLimitOperator [GOOD] Test command err: Trying to start YDB, gRPC: 28249, MsgBus: 11663 2025-06-25T14:48:28.645598Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898932118213736:2220];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:48:28.645887Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d92/r3tmp/tmpZHTfbQ/pdisk_1.dat 2025-06-25T14:48:29.382278Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:48:29.382406Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:48:29.399141Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:48:29.456947Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:48:29.460460Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519898932118213554:2080] 1750862908581459 != 1750862908581462 TServer::EnableGrpc on GrpcPort 28249, node 1 2025-06-25T14:48:29.607978Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:48:29.729157Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:48:29.729175Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:48:29.729185Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:48:29.729285Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11663 TClient is connected to server localhost:11663 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:48:30.731651Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:48:30.764546Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:48:32.735874Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898949298083382:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:32.735995Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:32.736258Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898949298083394:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:32.740200Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:48:32.752729Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898949298083396:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:48:32.840880Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898949298083449:2339] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:48:33.496042Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:33.620459Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519898932118213736:2220];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:48:33.620548Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:48:33.630554Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:33.667149Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:33.708240Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:33.785636Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:34.003226Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:34.064435Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:34.118514Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:34.201321Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:34.259355Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:34.314242Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:34.407117Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:34.456026Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:35.221019Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, subope ... 86820Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038568;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:13.392228Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038622;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:13.396045Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038540;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:13.401476Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038568;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:13.401995Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038610;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:13.407911Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038540;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:13.411176Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038610;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:13.411792Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038652;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:13.412553Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038630;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:13.420615Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038652;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:13.421257Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038598;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:13.424906Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038630;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:13.425373Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038634;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:13.430851Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038634;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:13.431449Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038636;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:13.434629Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038598;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:13.435071Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038584;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:13.443796Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038584;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:13.445154Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038636;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:13.445593Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038552;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:13.448882Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038624;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:13.455069Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038552;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:13.455710Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038638;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:13.458229Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038624;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:13.458810Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038524;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:13.469149Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038638;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:13.469990Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038620;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:13.472859Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038524;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:13.473661Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038656;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:13.479996Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038620;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:13.481807Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038656;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:13.482401Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038650;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:13.485039Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038654;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:13.490808Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038650;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:13.491389Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038628;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:13.493533Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038654;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:13.494082Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038640;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:13.498647Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038640;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:13.504208Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038648;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:13.511997Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038648;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:13.516233Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038628;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:13.665165Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jykrzw4qarr15m64y8ycdrv6", SessionId: ydb://session/3?node_id=1&id=YWRjZDY4MGYtNDRjM2E4MzMtM2IxMzMwMTgtNDIzZDJhNWI=, Slow query, duration: 35.880483s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:49:14.225270Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:49:14.225713Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:49:14.226330Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;self_id=[1:7519899095326999740:5778];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038629;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038170;receive=72075186224038331; 2025-06-25T14:49:14.226710Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; >> KqpJoinOrder::FiveWayJoinWithPredsAndEquiv-ColumnStore [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpIndexLookupJoin::CheckCastInt32ToInt16-StreamLookupJoin+NotNull [GOOD] Test command err: Trying to start YDB, gRPC: 17163, MsgBus: 2652 2025-06-25T14:49:03.359121Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899081428968906:2230];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:49:03.359514Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d79/r3tmp/tmprAgxGY/pdisk_1.dat 2025-06-25T14:49:04.016511Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519899081428968704:2080] 1750862943281407 != 1750862943281410 2025-06-25T14:49:04.042410Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:49:04.057485Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:49:04.057563Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:49:04.065070Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 17163, node 1 2025-06-25T14:49:04.284472Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:49:04.360393Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:49:04.360415Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:49:04.360422Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:49:04.360515Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2652 TClient is connected to server localhost:2652 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:49:05.279170Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:49:05.303837Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:49:05.315358Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:49:05.486017Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:49:05.682682Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:49:05.764803Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:49:07.570248Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899098608839527:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:07.570354Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:07.865660Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:07.940848Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:07.978527Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:08.017119Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:08.052332Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:08.143942Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:08.207226Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:08.280272Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899102903807483:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:08.280355Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:08.280474Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899102903807488:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:08.283731Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:49:08.296886Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899102903807490:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:49:08.351218Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519899081428968906:2230];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:49:08.351265Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:49:08.403793Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899102903807541:3425] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:49:09.519542Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_wo ... Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:49:11.419132Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:49:11.421710Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:49:11.428450Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519899117429026483:2080] 1750862951159719 != 1750862951159722 2025-06-25T14:49:11.430808Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 11501, node 2 2025-06-25T14:49:11.632806Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:49:11.632823Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:49:11.632831Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:49:11.632938Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:15457 2025-06-25T14:49:12.192437Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:15457 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:49:12.390337Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:49:12.395538Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:49:12.402289Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:49:12.482965Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:49:12.756567Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:49:12.871670Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:49:16.090063Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519899138903864580:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:16.090142Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:16.156916Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:16.192518Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:16.194002Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519899117429026704:2237];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:49:16.194121Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:49:16.235667Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:16.278440Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:16.324864Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:16.402899Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:16.464768Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:16.564436Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519899138903865242:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:16.564564Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:16.564923Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519899138903865247:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:16.568251Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:49:16.582061Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710669, at schemeshard: 72057594046644480 2025-06-25T14:49:16.582279Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519899138903865249:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:49:16.683006Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519899138903865300:3419] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:49:17.905251Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:18.010620Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::TestJoinOrderHintsComplex+ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 21399, MsgBus: 15886 2025-06-25T14:47:27.582086Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898669190915572:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:47:27.587012Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000de6/r3tmp/tmp0FLXZH/pdisk_1.dat 2025-06-25T14:47:28.215327Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:47:28.215423Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:47:28.218830Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:47:28.268475Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519898669190915534:2080] 1750862847553307 != 1750862847553310 2025-06-25T14:47:28.285336Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 21399, node 1 2025-06-25T14:47:28.461966Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:47:28.461993Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:47:28.462040Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:47:28.462185Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:47:28.536997Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:15886 TClient is connected to server localhost:15886 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:47:29.447210Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:47:31.562134Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898686370785363:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:31.562327Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:31.563751Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898686370785375:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:31.577533Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:47:31.613327Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898686370785378:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:47:31.692712Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898686370785430:2339] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:47:32.108397Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T14:47:32.436653Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519898690665752981:2315];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:47:32.436872Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519898690665752981:2315];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:47:32.437208Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519898690665752981:2315];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:47:32.437337Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519898690665752981:2315];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:47:32.437458Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519898690665752981:2315];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:47:32.437574Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519898690665752981:2315];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:47:32.437675Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519898690665752981:2315];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:47:32.437774Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519898690665752981:2315];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:47:32.437877Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519898690665752981:2315];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:47:32.437987Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519898690665752981:2315];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:47:32.438094Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519898690665752981:2315];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:47:32.470475Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519898690665753008:2321];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:47:32.470575Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519898690665753008:2321];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:47:32.470787Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519898690665753008:2321];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:47:32.470892Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519898690665753008:2321];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:47:32.470995Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519898690665753008:2321];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:47:32.471101Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519898690665753008:2321];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:47:32.471204Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519898690665753008:2321];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:47:32.471310Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519898690665753008:2321];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:47:32.471435Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519898690665753008:2321];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:47:32.471542Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519898690665753008:2321];tablet_id=72075186224037900;process=TTxInitSchema:: ... : tablet_id=72075186224039355;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:01.514914Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039355;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:01.515495Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039287;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:01.518260Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039275;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:01.518721Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039339;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:01.520461Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039287;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:01.521022Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039333;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:01.529940Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039333;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:01.530670Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039321;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:01.533370Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039339;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:01.533878Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039265;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:01.539503Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039321;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:01.541848Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039265;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:01.542509Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039375;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:01.544929Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039299;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:01.551777Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039299;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:01.552374Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039353;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:01.556677Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039375;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:01.557116Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039257;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:01.561794Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039257;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:01.562432Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039351;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:01.565754Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039353;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:01.566254Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039327;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:01.572165Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039351;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:01.572940Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039335;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:01.573734Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039327;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:01.574266Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039365;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:01.578078Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039335;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:01.578483Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039365;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:01.578622Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039297;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:01.578929Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039255;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:01.583888Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039255;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:01.587306Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039297;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:01.588041Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039361;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:01.592265Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039361;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:01.592869Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039363;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:01.597383Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039363;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:01.597949Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039233;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:01.606616Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039233;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:01.852369Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jykrzcjpeyhytm0j0xwsfdck", SessionId: ydb://session/3?node_id=1&id=MzNhYTJkNzEtOTQ1MjNhMDgtOTdlMzlmODYtODI4ZTliNGY=, Slow query, duration: 40.005831s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:49:02.266929Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:49:02.267619Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:49:02.268200Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;self_id=[1:7519899029968224924:10011];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224039392;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038933;receive=72075186224039094; 2025-06-25T14:49:02.268608Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716;
: Warning: Execution, code: 1060
: Warning: Unapplied hint: JoinOrder( (Unused1 Unused2) (Unused3 Unused4) ), code: 4534
: Warning: Unapplied hint: Rows(Unused # 10e8), code: 4534
: Warning: Unapplied hint: Rows(R T # 1), code: 4534
: Warning: Execution, code: 1060
: Warning: Unapplied hint: JoinOrder( (Unused1 Unused2) (Unused3 Unused4) ), code: 4534
: Warning: Unapplied hint: Rows(Unused # 10e8), code: 4534
: Warning: Unapplied hint: Rows(R T # 1), code: 4534 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> OlapEstimationRowsCorrectness::TPCH3 [GOOD] Test command err: Trying to start YDB, gRPC: 15394, MsgBus: 4968 2025-06-25T14:47:27.476397Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898668639960950:2220];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:47:27.476708Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000dea/r3tmp/tmpW9KBMS/pdisk_1.dat 2025-06-25T14:47:28.031759Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:47:28.031840Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:47:28.110023Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519898668639960768:2080] 1750862847433777 != 1750862847433780 2025-06-25T14:47:28.146061Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:47:28.149340Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 15394, node 1 2025-06-25T14:47:28.378584Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:47:28.378605Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:47:28.378612Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:47:28.378697Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:47:28.431595Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:4968 TClient is connected to server localhost:4968 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:47:29.233916Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:47:31.532243Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898685819830601:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:31.532413Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:31.532697Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898685819830613:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:31.536833Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:47:31.547676Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-25T14:47:31.547881Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898685819830615:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:47:31.660539Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898685819830666:2338] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:47:32.021597Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T14:47:32.357499Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519898690114798156:2311];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:47:32.357735Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519898690114798156:2311];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:47:32.358047Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519898690114798156:2311];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:47:32.358225Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519898690114798156:2311];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:47:32.358355Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519898690114798156:2311];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:47:32.358487Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519898690114798156:2311];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:47:32.358617Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519898690114798156:2311];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:47:32.358747Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519898690114798156:2311];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:47:32.358883Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519898690114798156:2311];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:47:32.359020Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519898690114798156:2311];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:47:32.359148Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519898690114798156:2311];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:47:32.361044Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519898690114798163:2318];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:47:32.361088Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519898690114798163:2318];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:47:32.361248Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519898690114798163:2318];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:47:32.361349Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519898690114798163:2318];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:47:32.361462Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519898690114798163:2318];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:47:32.361557Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519898690114798163:2318];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:47:32.361646Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519898690114798163:2318];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:47:32.361757Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519898690114798163:2318];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:47:32.361855Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519898690114798163:2318];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; ... 33484Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039412;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:02.434253Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039417;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:02.434756Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039416;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:02.438468Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039412;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:02.439299Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039399;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:02.439605Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039416;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:02.440078Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039424;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:02.445405Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039399;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:02.445468Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039424;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:02.445992Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039411;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:02.445992Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039404;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:02.450915Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039404;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:02.450915Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039411;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:02.451421Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039419;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:02.451643Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039385;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:02.456274Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039385;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:02.456874Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039406;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:02.456892Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039419;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:02.457376Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039408;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:02.461652Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039406;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:02.462191Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039397;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:02.462535Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039408;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:02.463326Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039405;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:02.467100Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039397;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:02.467636Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039373;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:02.468190Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039405;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:02.468936Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039421;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:02.472634Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039373;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:02.473189Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039388;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:02.473462Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039421;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:02.474008Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039395;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:02.477858Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039388;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:02.478690Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039395;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:02.478836Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039389;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:02.479454Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039393;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:02.483857Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039389;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:02.484127Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039393;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:02.484709Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039403;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:02.484966Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039407;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:02.489529Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039403;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:02.489932Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039407;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:02.688628Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jykrzagc658kpdk8d723gv5m", SessionId: ydb://session/3?node_id=1&id=NWM1NTdmNGEtNmRhNmI5NzktZTI2ODM4ZGMtMjJmYzY5NTI=, Slow query, duration: 42.963272s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:49:02.978511Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:49:02.980852Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;self_id=[1:7519898904863197044:6782];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038933;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224039392;receive=72075186224039094; 2025-06-25T14:49:02.981523Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:49:02.982551Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; >> KqpJoinOrder::CanonizedJoinOrderTPCH3 [GOOD] >> TxUsage::WriteToTopic_Demo_50_Query [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::CanonizedJoinOrderTPCDS64_small [GOOD] Test command err: Trying to start YDB, gRPC: 28974, MsgBus: 16777 2025-06-25T14:47:27.697613Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898671339651492:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:47:27.697659Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000de7/r3tmp/tmpVyBG9I/pdisk_1.dat 2025-06-25T14:47:28.365901Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:47:28.366017Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:47:28.367763Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:47:28.455148Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 28974, node 1 2025-06-25T14:47:28.600811Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:47:28.600830Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:47:28.600837Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:47:28.600926Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:47:28.683789Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:16777 TClient is connected to server localhost:16777 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:47:29.686363Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:47:29.715736Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:47:32.134107Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898692814488585:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:32.134234Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:32.134613Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898692814488597:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:32.138777Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:47:32.154829Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898692814488599:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:47:32.236193Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898692814488650:2338] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:47:32.704571Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519898671339651492:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:47:32.704620Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:47:32.722879Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T14:47:33.017349Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519898692814488879:2314];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:47:33.017534Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519898692814488879:2314];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:47:33.017801Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519898692814488879:2314];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:47:33.017902Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519898692814488879:2314];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:47:33.017990Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519898692814488879:2314];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:47:33.018084Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519898692814488879:2314];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:47:33.018213Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519898692814488879:2314];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:47:33.018298Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519898692814488879:2314];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:47:33.018420Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519898692814488879:2314];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:47:33.018569Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519898692814488879:2314];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:47:33.018683Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519898692814488879:2314];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:47:33.020526Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519898692814488877:2312];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:47:33.020574Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519898692814488877:2312];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:47:33.020740Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519898692814488877:2312];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:47:33.020863Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519898692814488877:2312];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:47:33.020942Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519898692814488877:2312];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:47:33.021020Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519898692814488877:2312];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:47:33.021131Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519898692814488877:2312];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:47:33.021211Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519898692814488877:2312];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:47:33.021578Z node 1 :TX_COLUMNSHARD WARN: ... 5760Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039419;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:01.229962Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039419;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:01.230580Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039267;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:01.230982Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039394;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:01.235226Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039394;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:01.236088Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039406;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:01.240883Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039401;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:01.244891Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039406;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:01.245432Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039424;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:01.246852Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039401;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:01.247406Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039420;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:01.251702Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039420;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:01.256805Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039424;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:01.257760Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039412;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:01.260790Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039421;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:01.267102Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039412;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:01.267632Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039353;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:01.272208Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039353;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:01.273411Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039421;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:01.273843Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039417;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:01.278267Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039417;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:01.278827Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039390;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:01.280957Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039377;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:01.285331Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039377;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:01.292676Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039390;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:01.309965Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039413;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:01.310018Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039389;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:01.315041Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039413;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:01.318650Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039387;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:01.323106Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039387;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:01.324953Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:01.325873Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039389;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:01.329371Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:01.343071Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039399;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:01.349557Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039399;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:01.453967Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039372;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:01.462757Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039285;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:01.464963Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039372;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:01.467176Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039321;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:01.467242Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039285;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:01.471151Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039321;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:01.539785Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jykrza4gacvpc7t0ens6tayd", SessionId: ydb://session/3?node_id=1&id=MTVkNjhmY2MtZTkzNzkzZmMtYTNlOTBkMGEtZTdiZTJmOTk=, Slow query, duration: 42.193284s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:49:02.266613Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:49:02.267071Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:49:02.267969Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;self_id=[1:7519899027821994124:10070];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224039392;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038933;receive=72075186224039094; 2025-06-25T14:49:02.268386Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; >> KqpJoinOrder::FiveWayJoinWithPreds+ColumnStore [GOOD] >> KqpJoinOrder::SortingsByPKWithLookupJoin-RemoveLimitOperator [GOOD] >> KqpJoin::JoinLeftPureInner [GOOD] >> KqpJoin::JoinLeftPureFull >> KqpJoinOrder::ShuffleEliminationDifferentJoinPredicateKeyTypeCorrectness2 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::FiveWayJoinWithPredsAndEquiv-ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 25574, MsgBus: 12270 2025-06-25T14:48:30.364195Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898940002225121:2232];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:48:30.364271Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d91/r3tmp/tmp97FZX4/pdisk_1.dat 2025-06-25T14:48:31.055248Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:48:31.055327Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:48:31.057012Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:48:31.180464Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519898940002224911:2080] 1750862910225894 != 1750862910225897 2025-06-25T14:48:31.209625Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 25574, node 1 2025-06-25T14:48:31.404481Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:48:31.480811Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:48:31.480840Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:48:31.480849Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:48:31.480946Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:12270 TClient is connected to server localhost:12270 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:48:32.513330Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:48:35.009259Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898961477062035:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:35.009402Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:35.020480Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898961477062047:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:35.028868Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:48:35.055421Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898961477062049:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:48:35.136478Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898961477062100:2338] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:48:35.362906Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519898940002225121:2232];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:48:35.362979Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:48:35.623759Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:35.871330Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:35.901252Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:35.969711Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:36.014822Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:36.204763Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:36.242324Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:36.275419Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:36.325048Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:36.365175Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:36.414429Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:36.454488Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:36.488732Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:37.346240Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/ ... 09637Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038628;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:13.012993Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038596;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:13.013488Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038588;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:13.013743Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038628;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:13.014163Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038562;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:13.017483Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038588;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:13.017878Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038562;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:13.017948Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038634;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:13.018351Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038660;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:13.022024Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038634;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:13.022573Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038567;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:13.027080Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038660;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:13.027298Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038567;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:13.027595Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038648;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:13.027738Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038624;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:13.032101Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038624;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:13.032101Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038648;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:13.032622Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038646;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:13.033157Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038644;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:13.037013Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038646;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:13.037093Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038644;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:13.037635Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038642;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:13.037735Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038630;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:13.042676Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038642;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:13.042685Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038630;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:13.043184Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038659;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:13.043184Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038658;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:13.047777Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038659;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:13.047825Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038658;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:13.048340Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038611;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:13.048400Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038656;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:13.053145Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038656;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:13.053168Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038611;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:13.053573Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038626;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:13.054038Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038640;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:13.058229Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038626;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:13.058245Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038640;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:13.058756Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038592;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:13.058812Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038661;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:13.063489Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038592;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:13.068288Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038661;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:13.158414Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jykrzy1r4m3x9nyy02ewrckg", SessionId: ydb://session/3?node_id=1&id=MjEzZjFmYzAtYWZiMDgyM2YtMTIwNDhiYjctMjExNmM3ODc=, Slow query, duration: 33.421265s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:49:13.525627Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:49:13.526104Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:49:13.527082Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;self_id=[1:7519898991541839200:2874];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038170;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038331;receive=72075186224038629; 2025-06-25T14:49:13.527450Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; >> KqpJoinOrder::CanonizedJoinOrderTPCDS78 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::CanonizedJoinOrderTPCH17 [GOOD] Test command err: Trying to start YDB, gRPC: 9947, MsgBus: 21914 2025-06-25T14:47:27.780957Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898668135592659:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:47:27.781030Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000de1/r3tmp/tmppyjlKa/pdisk_1.dat 2025-06-25T14:47:28.401684Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:47:28.401762Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:47:28.406752Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:47:28.437203Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 9947, node 1 2025-06-25T14:47:28.692801Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:47:28.692821Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:47:28.692829Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:47:28.692921Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:47:28.840540Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:21914 TClient is connected to server localhost:21914 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:47:29.581935Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:47:29.601201Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:47:32.078900Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898689610429767:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:32.079019Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:32.079325Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898689610429779:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:32.083296Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:47:32.113193Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898689610429781:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:47:32.186566Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898689610429832:2340] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:47:32.741525Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T14:47:32.782656Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519898668135592659:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:47:32.782714Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:47:33.147213Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519898689610430078:2314];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:47:33.147454Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519898689610430078:2314];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:47:33.147751Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519898689610430078:2314];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:47:33.147859Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519898689610430078:2314];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:47:33.147956Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519898689610430078:2314];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:47:33.148061Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519898689610430078:2314];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:47:33.148175Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519898689610430078:2314];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:47:33.148277Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519898689610430078:2314];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:47:33.156566Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519898689610430078:2314];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:47:33.156800Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519898689610430078:2314];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:47:33.156951Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519898689610430078:2314];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:47:33.195441Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519898693905397565:2327];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:47:33.195532Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519898693905397565:2327];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:47:33.195764Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519898693905397565:2327];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:47:33.195884Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519898693905397565:2327];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:47:33.195995Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519898693905397565:2327];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:47:33.196103Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519898693905397565:2327];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:47:33.196210Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519898693905397565:2327];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:47:33.204420Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519898693905397565:2327];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:47:33.204611Z node 1 :TX_COLUMNSHARD WARN: lo ... 3.014524Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039315;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:03.020726Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039335;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:03.021737Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039408;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:03.027356Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039315;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:03.027806Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039316;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:03.034336Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039408;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:03.034757Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039311;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:03.038560Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039316;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:03.038934Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039311;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:03.039012Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039386;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:03.040091Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039414;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:03.043685Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039386;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:03.044194Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039380;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:03.052289Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039380;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:03.052438Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039414;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:03.052876Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039396;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:03.056868Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039398;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:03.060890Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039396;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:03.061561Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039404;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:03.066453Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039398;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:03.066935Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039410;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:03.070351Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039404;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:03.070937Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039385;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:03.075823Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039410;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:03.080253Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039423;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:03.084437Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039385;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:03.084967Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039384;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:03.089841Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039384;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:03.090408Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039420;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:03.095387Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039420;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:03.095917Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039400;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:03.097051Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039423;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:03.101633Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039400;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:03.102418Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039424;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:03.106769Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039424;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:03.107334Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039406;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:03.111602Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039406;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:03.112119Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039395;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:03.116918Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039395;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:03.117450Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039388;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:03.121472Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039388;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:03.368789Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jykrzbvce0pq44qewqtc2yyz", SessionId: ydb://session/3?node_id=1&id=YjVkMjQ1MmUtYzQwNGM1ZS1iYWU5NDY1Ny1iYzg1NzVj, Slow query, duration: 42.267530s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:49:03.835790Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:49:03.836213Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:49:03.836678Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;self_id=[1:7519899033207870155:10133];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224039392;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038933;receive=72075186224039094; 2025-06-25T14:49:03.837055Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; >> KqpJoin::RightTableKeyPredicate >> KqpJoinOrder::TPCDS87+ColumnStore [GOOD] >> KqpJoinOrder::TestJoinOrderHintsSimple+ColumnStore >> KqpJoinOrder::CanonizedJoinOrderTPCH8 >> KqpJoin::AllowJoinsForComplexPredicates+StreamLookup [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/unittest >> TxUsage::WriteToTopic_Demo_50_Query [GOOD] Test command err: 2025-06-25T14:40:58.001114Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519896994715900038:2227];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:40:58.009353Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:40:58.539998Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001994/r3tmp/tmpA6IgzM/pdisk_1.dat 2025-06-25T14:40:58.888665Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519896994715899848:2080] 1750862457977737 != 1750862457977740 2025-06-25T14:40:58.891567Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:40:58.899636Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:40:58.899730Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:40:58.905484Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 11388, node 1 2025-06-25T14:40:58.991381Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:40:59.145125Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/yft8/001994/r3tmp/yandexchRb3R.tmp 2025-06-25T14:40:59.145151Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/yft8/001994/r3tmp/yandexchRb3R.tmp 2025-06-25T14:40:59.145323Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/yft8/001994/r3tmp/yandexchRb3R.tmp 2025-06-25T14:40:59.145454Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:40:59.225865Z INFO: TTestServer started on Port 8992 GrpcPort 11388 TClient is connected to server localhost:8992 PQClient connected to localhost:11388 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:40:59.780438Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:40:59.794281Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:40:59.808467Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-25T14:40:59.814398Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:40:59.993574Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710660, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:41:02.306929Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897016190737119:2301], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:02.306929Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897016190737131:2304], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:02.307039Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:02.311578Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:41:02.335507Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710662, at schemeshard: 72057594046644480 2025-06-25T14:41:02.341299Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519897016190737133:2305], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710662 completed, doublechecking } 2025-06-25T14:41:02.565796Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519897016190737197:2444] txid# 281474976710663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:41:02.600108Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:02.780968Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:02.860765Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519897016190737205:2311], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:5:17: Error: At function: KiReadTable!
:5:17: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Versions]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:41:02.863051Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=YmIyYWViNzktNjJlYWQzYWItMzA4OGYzZTctMjUyOGMwNGY=, ActorId: [1:7519897016190737101:2299], ActorState: ExecuteState, TraceId: 01jykrhza1ep7panpxzrjnze6h, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:41:02.869722Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 5 column: 17 } message: "At function: KiReadTable!" end_position { row: 5 column: 17 } severity: 1 issues { position { row: 5 column: 17 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Versions]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 5 column: 17 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-25T14:41:02.931023Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); 2025-06-25T14:41:03.003924Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519896994715900038:2227];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:41:03.003981Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; === CheckClustersList. Subcribe to ClusterTracker from [1:7519897020485704788:2624] === CheckClustersList. Ok 2025-06-25T14:41:08.817727Z :WriteToTopic_Demo_4_Table INFO: TTopicSdkTestSetup started 2025-06-25T14:41:08.837679Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:132: new create topic request 2025-06-25T14:41:08.869809Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3114: [PQ: 72075186224037892] Handle TEvInterconnect::TEvNodeInfo 2025-06-25T14:41:08.870619Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3107: [PQ: 72075186224037892] Registered with mediator time cast 2025-06-25T14:41:08.870840Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3146: [PQ: 72075186224037892] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-25T14:41:08.871046Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:752: [PQ: 72075186224037892] doesn't have tx info 2025-06-25T14:41:08.871076Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:764: [PQ: 72075186224037892] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-25T14:41:08.871093Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:985: [PQ: 72075186224037892] no config, start with empty partitions and default config 2025-06-25T14:41:08.871118Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4949: [PQ: 72075186224037892] Txs.size=0, PlannedTxs.size=0 2025-06-25T14:41:08.871 ... nd: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } SourceIdMaxCounts: 6000000 } PartitionIds: 0 TopicName: "test-topic" Version: 0 RequireAuthWrite: true RequireAuthRead: true FormatVersion: 0 Codecs { } TopicPath: "/Root/test-topic" YcCloudId: "" YcFolderId: "" YdbDatabaseId: "" YdbDatabasePath: "/Root" Partitions { PartitionId: 0 Status: Active CreateVersion: 1 TabletId: 0 } ReadRuleGenerations: 0 AllPartitions { PartitionId: 0 Status: Active CreateVersion: 1 TabletId: 0 } Consumers { Name: "test-consumer" ReadFromTimestampsMs: 0 FormatVersion: 0 Codec { } ServiceType: "data-streams" Version: 0 Generation: 0 } } BootstrapConfig { } SourceActor { RawX1: 7519899106597520501 RawX2: 81604380768 } Partitions { Partition { PartitionId: 0 } } 2025-06-25T14:49:22.350340Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:3683: [PQ: 72075186224037892] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-06-25T14:49:22.356504Z node 19 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:1040: [72075186224037893][test-topic] Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at RB 72075186224037893 2025-06-25T14:49:22.360401Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:3107: [PQ: 72075186224037892] Registered with mediator time cast 2025-06-25T14:49:22.360659Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:1241: [PQ: 72075186224037892] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-25T14:49:22.360683Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:4353: [PQ: 72075186224037892] Try execute txs with state EXECUTED 2025-06-25T14:49:22.360703Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:4398: [PQ: 72075186224037892] TxId 281474976710672, State EXECUTED 2025-06-25T14:49:22.360727Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:4345: [PQ: 72075186224037892] TxId 281474976710672 State EXECUTED FrontTxId 281474976710672 2025-06-25T14:49:22.360745Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:4046: [PQ: 72075186224037892] TPersQueue::SendEvReadSetAckToSenders 2025-06-25T14:49:22.360764Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:4288: [PQ: 72075186224037892] TxId 281474976710672, NewState WAIT_RS_ACKS 2025-06-25T14:49:22.360782Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:4323: [PQ: 72075186224037892] TxId 281474976710672 moved from EXECUTED to WAIT_RS_ACKS 2025-06-25T14:49:22.360810Z node 19 :PERSQUEUE DEBUG: transaction.cpp:366: [TxId: 281474976710672] PredicateAcks: 0/0 2025-06-25T14:49:22.360819Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:4599: [PQ: 72075186224037892] HaveAllRecipientsReceive 1, AllSupportivePartitionsHaveBeenDeleted 1 2025-06-25T14:49:22.360837Z node 19 :PERSQUEUE DEBUG: transaction.cpp:366: [TxId: 281474976710672] PredicateAcks: 0/0 2025-06-25T14:49:22.360855Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:4660: [PQ: 72075186224037892] add an TxId 281474976710672 to the list for deletion 2025-06-25T14:49:22.360878Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:4288: [PQ: 72075186224037892] TxId 281474976710672, NewState DELETING 2025-06-25T14:49:22.360904Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:3882: [PQ: 72075186224037892] delete key for TxId 281474976710672 2025-06-25T14:49:22.360971Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:3683: [PQ: 72075186224037892] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-06-25T14:49:22.362593Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:1241: [PQ: 72075186224037892] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-25T14:49:22.362620Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:4353: [PQ: 72075186224037892] Try execute txs with state DELETING 2025-06-25T14:49:22.362637Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:4398: [PQ: 72075186224037892] TxId 281474976710672, State DELETING 2025-06-25T14:49:22.362658Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:4610: [PQ: 72075186224037892] delete TxId 281474976710672 2025-06-25T14:49:22.364694Z :DEBUG: [/Root] MessageGroupId [src] SessionId [] Write session: try to update token 2025-06-25T14:49:22.365066Z :INFO: [/Root] MessageGroupId [src] SessionId [] Write session: Do CDS request 2025-06-25T14:49:22.365103Z :INFO: [/Root] MessageGroupId [src] SessionId [] Start write session. Will connect to endpoint: localhost:62359 2025-06-25T14:49:22.401685Z :DEBUG: [/Root] MessageGroupId [src] SessionId [] Write session: send init request: init_request { topic: "test-topic" message_group_id: "src" } 2025-06-25T14:49:22.408479Z node 19 :PQ_WRITE_PROXY DEBUG: grpc_pq_write.h:107: new grpc connection 2025-06-25T14:49:22.408519Z node 19 :PQ_WRITE_PROXY DEBUG: grpc_pq_write.h:141: new session created cookie 1 2025-06-25T14:49:22.412566Z node 19 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 1 sessionId: grpc read done: success: 1 data: init_request { topic: "test-topic" message_group_id: "src" } 2025-06-25T14:49:22.412701Z node 19 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:442: session request cookie: 1 topic: "test-topic" message_group_id: "src" from ipv6:[::1]:60926 2025-06-25T14:49:22.412719Z node 19 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:1532: write session: cookie=1 sessionId= userAgent="pqv1 server" ip=ipv6:[::1]:60926 proto=v1 topic=test-topic durationSec=0 2025-06-25T14:49:22.412730Z node 19 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:566: init check schema 2025-06-25T14:49:22.418824Z node 19 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:627: session v1 cookie: 1 sessionId: describe result for acl check 2025-06-25T14:49:22.418993Z node 19 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:62: TTableHelper SelectQuery: --!syntax_v1 DECLARE $Hash AS Uint64; DECLARE $Topic AS Utf8; DECLARE $SourceId AS Utf8; SELECT Partition, CreateTime, AccessTime, SeqNo FROM `//Root/.metadata/TopicPartitionsMapping` WHERE Hash == $Hash AND Topic == $Topic AND ProducerId == $SourceId; 2025-06-25T14:49:22.419009Z node 19 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:63: TTableHelper UpdateQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint64; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64; DECLARE $SeqNo AS Uint64; UPSERT INTO `//Root/.metadata/TopicPartitionsMapping` (Hash, Topic, ProducerId, CreateTime, AccessTime, Partition, SeqNo) VALUES ($Hash, $Topic, $SourceId, $CreateTime, $AccessTime, $Partition, $SeqNo); 2025-06-25T14:49:22.419022Z node 19 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:64: TTableHelper UpdateAccessTimeQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint64; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64; UPDATE `//Root/.metadata/TopicPartitionsMapping` SET AccessTime = $AccessTime WHERE Hash = $Hash AND Topic = $Topic AND ProducerId = $SourceId AND Partition = $Partition; 2025-06-25T14:49:22.419065Z node 19 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:305: TPartitionChooser [19:7519899162432096507:2433] (SourceId=src, PreferedPartition=(NULL)) ReplyResult: Partition=0, SeqNo=0 2025-06-25T14:49:22.419093Z node 19 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:689: ProceedPartition. session cookie: 1 sessionId: partition: 0 expectedGeneration: (NULL) 2025-06-25T14:49:22.419705Z node 19 :PQ_WRITE_PROXY DEBUG: writer.cpp:819: TPartitionWriter 72075186224037892 (partition=0) TEvClientConnected Status OK, TabletId: 72075186224037892, NodeId 19, Generation: 1 2025-06-25T14:49:22.419753Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72075186224037892] server connected, pipe [19:7519899162432096510:2433], now have 1 active actors on pipe 2025-06-25T14:49:22.419842Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:348: Handle TEvRequest topic: 'test-topic' requestId: 2025-06-25T14:49:22.419866Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:2812: [PQ: 72075186224037892] got client message batch for topic 'test-topic' partition 0 2025-06-25T14:49:22.419942Z node 19 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie src|53890b85-63a89935-d7964c74-f6c34d34_0 generated for partition 0 topic 'test-topic' owner src 2025-06-25T14:49:22.420047Z node 19 :PERSQUEUE DEBUG: partition_write.cpp:34: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyOwnerOk. Partition: 0 2025-06-25T14:49:22.420097Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'test-topic' partition: 0 messageNo: 0 requestId: cookie: 0 2025-06-25T14:49:22.420238Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:348: Handle TEvRequest topic: 'test-topic' requestId: 2025-06-25T14:49:22.420256Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:2812: [PQ: 72075186224037892] got client message batch for topic 'test-topic' partition 0 2025-06-25T14:49:22.420338Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'test-topic' partition: 0 messageNo: 0 requestId: cookie: 0 2025-06-25T14:49:22.420412Z node 19 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:865: session inited cookie: 1 partition: 0 MaxSeqNo: 0 sessionId: src|53890b85-63a89935-d7964c74-f6c34d34_0 2025-06-25T14:49:22.424976Z :INFO: [/Root] MessageGroupId [src] SessionId [] Counters: { Errors: 0 CurrentSessionLifetimeMs: 1750862962424 BytesWritten: 0 MessagesWritten: 0 BytesWrittenCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-25T14:49:22.425098Z :INFO: [/Root] MessageGroupId [src] SessionId [] Write session established. Init response: session_id: "src|53890b85-63a89935-d7964c74-f6c34d34_0" topic: "test-topic" 2025-06-25T14:49:22.426051Z :INFO: [/Root] MessageGroupId [src] SessionId [src|53890b85-63a89935-d7964c74-f6c34d34_0] Write session: close. Timeout = 0 ms 2025-06-25T14:49:22.426094Z :INFO: [/Root] MessageGroupId [src] SessionId [src|53890b85-63a89935-d7964c74-f6c34d34_0] Write session will now close 2025-06-25T14:49:22.426134Z :DEBUG: [/Root] MessageGroupId [src] SessionId [src|53890b85-63a89935-d7964c74-f6c34d34_0] Write session: aborting 2025-06-25T14:49:22.426528Z :INFO: [/Root] MessageGroupId [src] SessionId [src|53890b85-63a89935-d7964c74-f6c34d34_0] Write session: gracefully shut down, all writes complete 2025-06-25T14:49:22.426564Z :DEBUG: [/Root] MessageGroupId [src] SessionId [src|53890b85-63a89935-d7964c74-f6c34d34_0] Write session: destroy 2025-06-25T14:49:22.436545Z node 19 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 1 sessionId: src|53890b85-63a89935-d7964c74-f6c34d34_0 grpc read done: success: 0 data: 2025-06-25T14:49:22.436573Z node 19 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 1 sessionId: src|53890b85-63a89935-d7964c74-f6c34d34_0 grpc read failed 2025-06-25T14:49:22.436601Z node 19 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 1 sessionId: src|53890b85-63a89935-d7964c74-f6c34d34_0 grpc closed 2025-06-25T14:49:22.436615Z node 19 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 1 sessionId: src|53890b85-63a89935-d7964c74-f6c34d34_0 is DEAD 2025-06-25T14:49:22.437294Z node 19 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037892 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-25T14:49:22.444563Z node 19 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037892] server disconnected, pipe [19:7519899162432096510:2433] destroyed 2025-06-25T14:49:22.444632Z node 19 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::DropOwner. 2025-06-25T14:49:22.448480Z :WriteToTopic_Demo_50_Query INFO: Topic created ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::FiveWayJoinWithPreds+ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 11309, MsgBus: 9542 2025-06-25T14:47:30.682829Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898682829777655:2215];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:47:30.682876Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000dd9/r3tmp/tmpPH2ryk/pdisk_1.dat 2025-06-25T14:47:31.413276Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:47:31.413369Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:47:31.460489Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519898682829777480:2080] 1750862850654717 != 1750862850654720 2025-06-25T14:47:31.479073Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:47:31.479605Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 11309, node 1 2025-06-25T14:47:31.680399Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:47:31.728768Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:47:31.728787Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:47:31.728804Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:47:31.728913Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9542 TClient is connected to server localhost:9542 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:47:32.619164Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:47:32.656878Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:47:35.023926Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898704304614607:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:35.024072Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:35.024366Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898704304614619:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:35.028697Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:47:35.041011Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-25T14:47:35.041858Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898704304614621:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:47:35.108192Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898704304614672:2340] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:47:35.470793Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T14:47:35.705395Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519898704304614925:2322];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:47:35.705623Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519898704304614925:2322];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:47:35.705850Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519898704304614925:2322];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:47:35.705968Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519898704304614925:2322];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:47:35.706070Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519898704304614925:2322];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:47:35.706178Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519898704304614925:2322];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:47:35.706302Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519898704304614925:2322];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:47:35.706406Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519898704304614925:2322];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:47:35.706510Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519898704304614925:2322];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:47:35.706633Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519898704304614925:2322];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:47:35.706719Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519898704304614925:2322];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:47:35.769943Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519898704304614920:2317];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:47:35.770007Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519898704304614920:2317];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:47:35.770186Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519898704304614920:2317];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:47:35.770277Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519898704304614920:2317];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:47:35.770368Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519898704304614920:2317];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:47:35.770449Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519898704304614920:2317];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:47:35.770524Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519898704304614920:2317];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:47:35.770608Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519898704304614920:2317];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:47:35.770686Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=7207518622403 ... 13041Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039203;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:05.715670Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039371;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:49:05.716226Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039379;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:05.717858Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039203;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:49:05.718627Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039383;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:05.721458Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039379;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:49:05.722044Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039381;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:05.724248Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039383;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:49:05.724743Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039375;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:05.728007Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039375;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:49:05.728553Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039377;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:05.729493Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039381;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:49:05.730044Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039367;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:05.733997Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039377;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:49:05.734578Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039373;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:05.734780Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039367;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:49:05.735614Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039365;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:05.740780Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039365;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:49:05.740806Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039373;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:49:05.741305Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039205;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:05.741305Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039359;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:05.746038Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039205;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:49:05.746575Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039359;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:49:05.746595Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039191;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:05.747151Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039327;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:05.751666Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039191;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:49:05.752204Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039331;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:05.752217Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039327;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:49:05.753267Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039187;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:05.757746Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039331;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:49:05.758075Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039187;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:49:05.758471Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039209;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:05.758557Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039211;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:05.763102Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039209;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:49:05.763102Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039211;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:49:05.763641Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039207;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:05.763640Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039199;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:05.768008Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039199;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:49:05.768397Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039207;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:49:05.772772Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039197;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:05.777788Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039197;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:49:05.855612Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jykrzg0w7nhvww58ch88jbex", SessionId: ydb://session/3?node_id=1&id=N2VkNjg0OWEtNzFjNGJhZjEtZGJmMDRlNGYtYmQ4NzQzMTM=, Slow query, duration: 40.482592s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:49:06.216166Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; 2025-06-25T14:49:06.216697Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; 2025-06-25T14:49:06.217919Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;self_id=[1:7519898983477533358:8255];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224039094;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038933;receive=72075186224039392; 2025-06-25T14:49:06.218365Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::CanonizedJoinOrderTPCH3 [GOOD] Test command err: Trying to start YDB, gRPC: 27544, MsgBus: 11374 2025-06-25T14:47:30.960276Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898682524685742:2140];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:47:30.960623Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000dd8/r3tmp/tmpsaPoVq/pdisk_1.dat 2025-06-25T14:47:31.540557Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:47:31.540634Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:47:31.542673Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519898682524685630:2080] 1750862850859233 != 1750862850859236 2025-06-25T14:47:31.566303Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:47:31.572481Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 27544, node 1 2025-06-25T14:47:31.804860Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:47:31.804881Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:47:31.804888Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:47:31.804984Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:47:31.952428Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:11374 TClient is connected to server localhost:11374 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:47:32.847448Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:47:32.904948Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:47:35.258109Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898703999522753:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:35.258229Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:35.258515Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898703999522765:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:35.262115Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:47:35.281950Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898703999522767:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:47:35.384061Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898703999522818:2337] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:47:35.936655Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519898682524685742:2140];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:47:35.936962Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:47:35.952152Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T14:47:36.320410Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519898708294490309:2313];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:47:36.320666Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519898708294490309:2313];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:47:36.320932Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519898708294490309:2313];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:47:36.321078Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519898708294490309:2313];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:47:36.321201Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519898708294490309:2313];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:47:36.321331Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519898708294490309:2313];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:47:36.321453Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519898708294490309:2313];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:47:36.322395Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519898708294490308:2312];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:47:36.322436Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519898708294490308:2312];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:47:36.322565Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519898708294490308:2312];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:47:36.322657Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519898708294490308:2312];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:47:36.322740Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519898708294490308:2312];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:47:36.322817Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519898708294490308:2312];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:47:36.322900Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519898708294490308:2312];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:47:36.322991Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519898708294490308:2312];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:47:36.323079Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519898708294490308:2312];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:47:36.323173Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519898708294490308:2312];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:47:36.323274Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519898708294490308:2312];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:47:36.329075Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519898708294490309:2313];tabl ... 42884Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039279;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:05.652225Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039279;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:05.653265Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039363;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:05.653799Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039309;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:05.656898Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039413;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:05.662880Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039309;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:05.663424Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039409;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:05.666190Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039413;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:05.666760Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039305;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:05.671454Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039409;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:05.674695Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039305;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:05.675212Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039416;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:05.676959Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039187;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:05.684286Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039416;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:05.686023Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039187;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:05.686606Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039397;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:05.689237Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039421;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:05.695689Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039397;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:05.697645Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039421;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:05.698248Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039373;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:05.701104Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039407;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:05.707465Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039373;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:05.710326Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039407;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:05.710893Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039369;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:05.713152Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039411;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:05.719841Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039369;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:05.722118Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039411;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:05.722648Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039273;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:05.724745Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039371;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:05.731622Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039273;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:05.733508Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039371;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:05.734145Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039345;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:05.736444Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039423;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:05.743124Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039345;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:05.743698Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039367;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:05.745080Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039423;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:05.745520Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039381;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:05.754404Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039381;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:05.756039Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039375;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:05.757454Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039367;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:05.769141Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039375;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:06.036556Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jykrzjb59nf9wb4c63wbdn7w", SessionId: ydb://session/3?node_id=1&id=ZjcxNDU3MzAtOGVlZmEyYTUtY2QxZWU0MGYtN2NkYTkyM2Y=, Slow query, duration: 38.286163s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:49:06.388424Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:49:06.389003Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:49:06.389658Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;self_id=[1:7519898944517727357:7097];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038933;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224039392;receive=72075186224039094; 2025-06-25T14:49:06.390133Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoin::AllowJoinsForComplexPredicates+StreamLookup [GOOD] Test command err: Trying to start YDB, gRPC: 23238, MsgBus: 29701 2025-06-25T14:49:03.088808Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899077273595810:2085];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d7b/r3tmp/tmpOefLgx/pdisk_1.dat 2025-06-25T14:49:03.463245Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:49:03.768431Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519899077273595745:2080] 1750862942969488 != 1750862942969491 2025-06-25T14:49:03.785839Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:49:03.787404Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:49:03.787507Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:49:03.795825Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 23238, node 1 2025-06-25T14:49:03.958766Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:49:03.958789Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:49:03.958794Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:49:03.958880Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:49:04.169224Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:29701 TClient is connected to server localhost:29701 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:49:04.983417Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:49:05.044922Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:49:05.290446Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:49:05.524082Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:49:05.655605Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:49:07.721245Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899098748433852:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:07.721362Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:08.092443Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519899077273595810:2085];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:49:08.092492Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:49:08.132734Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:08.184248Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:08.236282Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:08.281689Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:08.325630Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:08.406077Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:08.528175Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:08.666826Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899103043401814:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:08.666899Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:08.667259Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899103043401819:2436], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:08.671408Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:49:08.690952Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899103043401821:2437], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:49:08.769507Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899103043401874:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:49:10.156199Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:10.218363Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation pa ... n.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710677:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) Trying to start YDB, gRPC: 14849, MsgBus: 28916 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d7b/r3tmp/tmpATqsc5/pdisk_1.dat 2025-06-25T14:49:12.790236Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519899119530006495:2223];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:49:12.884576Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:49:12.959916Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:49:12.968454Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519899119530006281:2080] 1750862952513006 != 1750862952513009 2025-06-25T14:49:13.001272Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:49:13.001349Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:49:13.007939Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 14849, node 2 2025-06-25T14:49:13.263163Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:49:13.263188Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:49:13.263196Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:49:13.263324Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28916 2025-06-25T14:49:13.592707Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:28916 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:49:13.940546Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:49:13.946717Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:49:13.954809Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:49:14.062730Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:49:14.373479Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:49:14.534967Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:49:17.064430Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519899141004844382:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:17.064526Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:17.145312Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:17.189629Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:17.235621Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:17.275892Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:17.311918Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:17.350126Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:17.396337Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:17.504447Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519899141004845039:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:17.510601Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:17.511475Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519899141004845044:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:17.515448Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:49:17.538962Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519899141004845046:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:49:17.596532Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519899119530006495:2223];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:49:17.596635Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:49:17.599068Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519899141004845097:3414] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::SortingsByPKWithLookupJoin-RemoveLimitOperator [GOOD] Test command err: Trying to start YDB, gRPC: 27278, MsgBus: 23420 2025-06-25T14:48:33.341423Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898952566859311:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:48:33.341466Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d8f/r3tmp/tmpHGsvbP/pdisk_1.dat 2025-06-25T14:48:34.000456Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:48:34.012422Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519898952566859285:2080] 1750862913313629 != 1750862913313632 2025-06-25T14:48:34.033264Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:48:34.033372Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:48:34.042474Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 27278, node 1 2025-06-25T14:48:34.295092Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:48:34.295112Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:48:34.295143Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:48:34.295255Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:48:34.378877Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:23420 TClient is connected to server localhost:23420 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:48:35.488277Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:48:38.344447Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519898952566859311:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:48:38.348447Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:48:38.402676Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898974041696414:2295], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:38.402784Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:38.403040Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898974041696426:2298], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:38.406894Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:48:38.438676Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898974041696428:2299], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:48:38.542935Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898974041696479:2339] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:48:39.112029Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:39.263754Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:39.302007Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:39.347902Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:39.380353Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:39.562069Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:39.591965Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:39.625130Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:39.707017Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:39.796461Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:39.869668Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:39.913247Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:39.950796Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:41.045054Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/ ... 15997Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038570;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:16.916236Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038488;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:16.916570Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038590;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:16.920014Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038488;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:16.920423Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038486;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:16.922678Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038590;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:16.923245Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038592;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:16.923909Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038486;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:16.924284Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038500;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:16.928182Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038592;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:16.928769Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038550;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:16.930673Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038500;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:16.931253Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038558;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:16.933962Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038550;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:16.935150Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038580;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:16.936548Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038558;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:16.937103Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038478;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:16.940687Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038580;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:16.941251Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038572;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:16.942220Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038478;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:16.942730Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038480;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:16.947351Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038480;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:16.947692Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038572;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:16.948200Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038432;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:16.948205Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038528;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:16.955055Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038432;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:16.955058Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038528;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:16.955629Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038508;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:16.955740Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038560;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:16.960982Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038508;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:16.961549Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038594;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:16.961686Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038560;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:16.962412Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038588;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:16.967639Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038588;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:16.967673Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038594;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:16.968199Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038494;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:16.968229Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038429;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:16.973115Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038494;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:16.973676Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038530;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:16.978949Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038530;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:16.981277Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038429;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:17.140787Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyks01p10rnsg0a3m5jrgggy", SessionId: ydb://session/3?node_id=1&id=MWRiNmM1OGItYjg5M2NmNTItMmU1ZGMwMjQtOGUxNDU2MzE=, Slow query, duration: 33.682977s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:49:17.402884Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:49:17.403166Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;self_id=[1:7519899068530995712:4596];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038331;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038170;receive=72075186224038629; 2025-06-25T14:49:17.404052Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:49:17.404672Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; >> KqpJoinOrder::TPCHRandomJoinViewJustWorks-ColumnStore >> KqpJoinOrder::CanonizedJoinOrderTPCH20 >> KqpJoin::JoinMismatchDictKeyTypes [GOOD] >> KqpJoinOrder::SortingsByPrefixWithAttrEquiToPK-RemoveLimitOperator [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::TPCDS87+ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 10834, MsgBus: 26617 2025-06-25T14:47:16.326770Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898620704650947:2131];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:47:16.326807Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000dff/r3tmp/tmpUxBfCw/pdisk_1.dat 2025-06-25T14:47:16.867138Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:47:16.867224Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:47:16.897033Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:47:16.969976Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:47:16.974128Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519898620704650847:2080] 1750862836302795 != 1750862836302798 TServer::EnableGrpc on GrpcPort 10834, node 1 2025-06-25T14:47:17.209553Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:47:17.209572Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:47:17.209578Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:47:17.209670Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:47:17.361582Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:26617 TClient is connected to server localhost:26617 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:47:18.315060Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:47:18.365372Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:47:20.721943Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898637884520678:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:20.722076Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:20.723431Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898637884520690:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:20.727204Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:47:20.755929Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898637884520692:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:47:20.827857Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898637884520743:2337] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:47:21.329273Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519898620704650947:2131];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:47:21.329325Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:47:21.506178Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T14:47:21.794224Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519898642179488240:2316];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:47:21.794404Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519898642179488240:2316];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:47:21.794637Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519898642179488240:2316];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:47:21.794735Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519898642179488240:2316];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:47:21.794854Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519898642179488240:2316];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:47:21.794945Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519898642179488240:2316];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:47:21.795024Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519898642179488240:2316];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:47:21.795111Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519898642179488240:2316];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:47:21.795228Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519898642179488240:2316];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:47:21.795319Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519898642179488240:2316];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:47:21.795413Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519898642179488240:2316];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:47:21.797566Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519898642179488236:2312];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:47:21.797627Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519898642179488236:2312];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:47:21.797831Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519898642179488236:2312];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:47:21.797934Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519898642179488236:2312];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:47:21.798029Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519898642179488236:2312];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:47:21.798118Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519898642179488236:2312];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:47:21.798206Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519898642179488236:2312];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:47:21.798301Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519898642179488236:2312];tabl ... ve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:49.149183Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039353;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:49.149712Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039365;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:49.152504Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039355;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:49.153048Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039389;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:49.168359Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039389;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:49.168842Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039376;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:49.173910Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039365;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:49.174366Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039371;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:49.177431Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039376;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:49.178082Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039400;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:49.178593Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039371;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:49.179080Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039387;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:49.189642Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039400;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:49.190065Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039362;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:49.194525Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039362;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:49.196112Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039387;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:49.196560Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039369;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:49.200947Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039360;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:49.202399Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039369;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:49.202895Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039390;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:49.207089Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039390;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:49.211326Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039360;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:49.211771Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039395;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:49.216148Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039395;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:49.216590Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039374;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:49.221223Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039380;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:49.222486Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039374;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:49.223599Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039378;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:49.228038Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039378;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:49.232571Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039380;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:49.236704Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039361;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:48:49.249406Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039361;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:48:49.478520Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jykrz0r5ab6q93zzv271r020", SessionId: ydb://session/3?node_id=1&id=ZWJkZGFiZjAtYTg1MzBhYmMtNTZmZmY4YjgtZmFjZjBlNDQ=, Slow query, duration: 39.744770s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:48:50.208427Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:48:50.208911Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:48:50.209478Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;self_id=[1:7519898985776930084:10427];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224039392;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038933;receive=72075186224039094; 2025-06-25T14:48:50.209918Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:49:16.083361Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyks0kmvb1vbyfvcdjyb91gg", SessionId: ydb://session/3?node_id=1&id=ZWJkZGFiZjAtYTg1MzBhYmMtNTZmZmY4YjgtZmFjZjBlNDQ=, Slow query, duration: 14.227075s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "PRAGMA TablePathPrefix='/Root/test/ds';\n\n-- NB: Subquerys\n$bla1 = (select distinct\n COALESCE(c_last_name,'') as c_last_name,\n COALESCE(c_first_name,'') as c_first_name,\n COALESCE(cast(d_date as date), cast(0 as Date)) as d_date\n from store_sales as store_sales\n cross join date_dim as date_dim\n cross join customer as customer\n where store_sales.ss_sold_date_sk = date_dim.d_date_sk\n and store_sales.ss_customer_sk = customer.c_customer_sk\n and d_month_seq between 1221 and 1221+11);\n\n$bla2 = ((select distinct\n COALESCE(c_last_name,'') as c_last_name,\n COALESCE(c_first_name,'') as c_first_name,\n COALESCE(cast(d_date as date), cast(0 as Date)) as d_date\n from catalog_sales as catalog_sales\n cross join date_dim as date_dim\n cross join customer as customer\n where catalog_sales.cs_sold_date_sk = date_dim.d_date_sk\n and catalog_sales.cs_bill_customer_sk = customer.c_customer_sk\n and d_month_seq between 1221 and 1221+11)\n union all\n (select distinct\n COALESCE(c_last_name,'') as c_last_name,\n COALESCE(c_first_name,'') as c_first_name,\n COALESCE(cast(d_date as date), cast(0 as Date)) as d_date\n from web_sales as web_sales\n cross join date_dim as date_dim\n cross join customer as customer\n where web_sales.ws_sold_date_sk = date_dim.d_date_sk\n and web_sales.ws_bill_customer_sk = customer.c_customer_sk\n and d_month_seq between 1221 and 1221+11));\n\n-- start query 1 in stream 0 using template query87.tpl and seed 1819994127\nselect count(*)\nfrom $bla1 bla1 left only join $bla2 bla2 using (c_last_name, c_first_name, d_date)\n;\n\n-- end query 1 in stream 0 using template query87.tpl", parameters: 0b >> KqpJoinOrder::SortingsDifferentDirs+RemoveLimitOperator >> KqpJoinOrder::TestJoinHint2-ColumnStore >> KqpJoinOrder::TPCH12_100 >> KqpFlipJoin::RightSemi_2 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoin::JoinMismatchDictKeyTypes [GOOD] Test command err: Trying to start YDB, gRPC: 9007, MsgBus: 24229 2025-06-25T14:49:11.993113Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899117528893128:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:49:11.993160Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d72/r3tmp/tmp19LtZz/pdisk_1.dat 2025-06-25T14:49:12.655462Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519899117528893084:2080] 1750862951986181 != 1750862951986184 2025-06-25T14:49:12.705764Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:49:12.740825Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:49:12.740899Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:49:12.749232Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 9007, node 1 2025-06-25T14:49:13.024538Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:49:13.052856Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:49:13.052875Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:49:13.052884Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:49:13.053001Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24229 TClient is connected to server localhost:24229 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:49:14.167243Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:49:14.187193Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:49:14.197753Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:49:14.410744Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:49:14.587816Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:49:14.658245Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:49:16.382877Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899139003731197:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:16.382969Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:16.644964Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:16.717580Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:16.791413Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:16.835828Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:16.865236Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:16.939893Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:16.982054Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:16.996456Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519899117528893128:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:49:16.996513Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:49:17.053040Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899143298699157:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:17.053111Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:17.053403Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899143298699162:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:17.058530Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:49:17.071378Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710669, at schemeshard: 72057594046644480 2025-06-25T14:49:17.073059Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899143298699164:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:49:17.163550Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899143298699218:3425] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:49:18.492019Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is und ... 06-25T14:49:20.463114Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d72/r3tmp/tmp6ilH8N/pdisk_1.dat 2025-06-25T14:49:20.639812Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:49:20.644490Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519899154900366492:2080] 1750862960408263 != 1750862960408266 TServer::EnableGrpc on GrpcPort 13651, node 2 2025-06-25T14:49:20.657426Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:49:20.657491Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:49:20.663232Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:49:20.787950Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:49:20.787970Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:49:20.787977Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:49:20.788073Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:65473 TClient is connected to server localhost:65473 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:49:21.350509Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:49:21.364509Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:49:21.382099Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:49:21.473924Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:49:21.511021Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:49:21.675840Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:49:21.775489Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:49:24.288790Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519899172080237291:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:24.288861Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:24.352013Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:24.398499Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:24.439945Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:24.488237Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:24.542850Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:24.602619Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:24.658171Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:24.782178Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519899172080237953:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:24.782257Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:24.782471Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519899172080237958:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:24.785597Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:49:24.798053Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-25T14:49:24.798485Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519899172080237960:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:49:24.874800Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519899172080238011:3415] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:49:25.444902Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519899154900366699:2232];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:49:25.445012Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:49:26.064535Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) >> KqpFlipJoin::Inner_3 >> KqpJoinOrder::CanonizedJoinOrderTPCH2 [GOOD] >> KqpIndexLookupJoin::InnerJoinLeftFilter-StreamLookup [GOOD] >> TxUsage::The_Transaction_Starts_On_One_Version_And_Ends_On_The_Other [GOOD] >> KqpJoinOrder::SortingsByPrefixWithAttrEquiToPK+RemoveLimitOperator ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::SortingsByPrefixWithAttrEquiToPK-RemoveLimitOperator [GOOD] Test command err: Trying to start YDB, gRPC: 63042, MsgBus: 5151 2025-06-25T14:48:38.437681Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898974436093051:2078];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:48:38.455727Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d8b/r3tmp/tmpHFaJfl/pdisk_1.dat 2025-06-25T14:48:39.026453Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:48:39.026545Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:48:39.031775Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:48:39.137688Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:48:39.140501Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519898974436092985:2080] 1750862918397150 != 1750862918397153 TServer::EnableGrpc on GrpcPort 63042, node 1 2025-06-25T14:48:39.353164Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:48:39.353187Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:48:39.353194Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:48:39.353289Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:48:39.465275Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:5151 TClient is connected to server localhost:5151 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:48:40.587204Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:48:40.613275Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:48:42.771429Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898991615962819:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:42.771555Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:42.771855Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898991615962831:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:42.775647Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:48:42.792423Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898991615962833:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:48:42.873823Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898991615962884:2337] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:48:43.283788Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:43.433470Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:43.438624Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519898974436093051:2078];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:48:43.438691Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:48:43.508554Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:43.541296Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:43.606547Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:43.740798Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:43.777596Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:43.822792Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:43.853089Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:43.884820Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:43.979143Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:44.016836Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:44.091909Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:45.028430Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperat ... =72075186224038567;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:20.639581Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038569;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:20.640073Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038615;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:20.648927Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038533;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:20.653373Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038615;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:20.653880Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038557;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:20.658510Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038533;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:20.658998Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038465;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:20.663446Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038557;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:20.663923Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038501;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:20.672489Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038465;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:20.672977Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038579;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:20.677575Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038501;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:20.678137Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038647;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:20.682133Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038579;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:20.682725Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038553;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:20.687183Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038647;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:20.687792Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038649;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:20.692038Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038553;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:20.696469Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038649;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:20.697046Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038609;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:20.700962Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038613;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:20.706004Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038613;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:20.706588Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038661;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:20.710241Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038609;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:20.711065Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038617;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:20.715892Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038661;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:20.720426Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038617;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:20.721063Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038495;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:20.723890Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038645;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:20.733087Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038495;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:20.733581Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038641;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:20.736459Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038645;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:20.736989Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038657;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:20.742185Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038657;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:20.742757Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038497;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:20.747028Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038641;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:20.747534Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038565;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:20.751906Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038497;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:20.756097Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038565;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:20.884723Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyks05k6c69e58zrpvtp2970", SessionId: ydb://session/3?node_id=1&id=YWI2YmZhMGEtZTQ1YWVkYTEtYTFjZGQ0NzUtYTI1YTA3MDM=, Slow query, duration: 33.421935s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:49:21.239837Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:49:21.240406Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:49:21.240750Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;self_id=[1:7519899120465007422:5438];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038629;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038170;receive=72075186224038331; 2025-06-25T14:49:21.241255Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716;
: Warning: Execution, code: 1060
:4:1: Warning: Given predicate is not suitable for used index: ix_bank_document_exec_dt_accounts, code: 2503
: Warning: Execution, code: 1060
:4:1: Warning: Given predicate is not suitable for used index: ix_bank_document_exec_dt_accounts, code: 2503 >> KqpJoinOrder::FiveWayJoinWithPreds-ColumnStore [GOOD] >> TxUsage::TestRetentionOnLongTxAndBigMessages ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpIndexLookupJoin::InnerJoinLeftFilter-StreamLookup [GOOD] Test command err: Trying to start YDB, gRPC: 19936, MsgBus: 63081 2025-06-25T14:49:11.955312Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899115029942550:2232];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:49:11.955508Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d70/r3tmp/tmpFWlAMP/pdisk_1.dat 2025-06-25T14:49:12.506471Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:49:12.506570Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:49:12.512792Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:49:12.588490Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519899115029942330:2080] 1750862951912882 != 1750862951912885 2025-06-25T14:49:12.592620Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 19936, node 1 2025-06-25T14:49:12.876610Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:49:12.876627Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:49:12.876637Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:49:12.876746Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:49:12.954273Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:63081 TClient is connected to server localhost:63081 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:49:14.062048Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:49:14.085789Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:49:14.099009Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:49:14.306533Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:49:14.497152Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:49:14.603100Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:49:16.433059Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899136504780435:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:16.433160Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:16.731912Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:16.796271Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:16.873278Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:16.901134Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:16.952363Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519899115029942550:2232];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:49:16.952442Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:49:16.973443Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:17.006303Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:17.082912Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:17.152758Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899140799748399:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:17.152846Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:17.153138Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899140799748404:2436], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:17.157144Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:49:17.168985Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899140799748406:2437], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:49:17.230136Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899140799748457:3420] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:49:18.622462Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/ ... d to server localhost:25515 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:49:22.477393Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:49:22.488494Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:49:22.506943Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:49:22.602322Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:49:22.785137Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:49:22.917880Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:49:25.154587Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519899176909725553:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:25.154652Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:25.212457Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:25.253661Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:25.301203Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:25.349592Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:25.410075Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:25.479991Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:25.558644Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:25.672718Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519899176909726214:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:25.672799Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:25.673325Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519899176909726219:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:25.677054Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:49:25.693883Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519899176909726221:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:49:25.772973Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519899176909726272:3414] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:49:26.105041Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519899159729854793:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:49:26.105087Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:49:27.330220Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:27.380370Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:27.422449Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:27.464209Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:27.513312Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:27.601608Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710677:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) >> KqpJoinOrder::TestJoinHint1+ColumnStore >> KqpJoin::JoinLeftPureFull [GOOD] >> KqpJoinOrder::FiveWayJoinWithComplexPreds-ColumnStore ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::CanonizedJoinOrderTPCH2 [GOOD] Test command err: Trying to start YDB, gRPC: 62963, MsgBus: 11847 2025-06-25T14:47:31.098026Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898686409495263:2228];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:47:31.098266Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000dce/r3tmp/tmpDY9v46/pdisk_1.dat 2025-06-25T14:47:31.803690Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:47:31.803796Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:47:31.814081Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:47:31.888607Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519898686409495071:2080] 1750862851024892 != 1750862851024895 2025-06-25T14:47:31.900382Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 62963, node 1 2025-06-25T14:47:32.181797Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:47:32.182315Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:47:32.182321Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:47:32.182327Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:47:32.182418Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11847 TClient is connected to server localhost:11847 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:47:33.254978Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:47:35.583971Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898703589364904:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:35.583992Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898703589364916:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:35.584087Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:35.587989Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:47:35.603869Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898703589364918:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:47:35.708035Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898703589364969:2338] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:47:36.080980Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519898686409495263:2228];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:47:36.081056Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:47:36.098127Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T14:47:36.439666Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519898707884332558:2324];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:47:36.439856Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519898707884332558:2324];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:47:36.440078Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519898707884332558:2324];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:47:36.440166Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519898707884332558:2324];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:47:36.440240Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519898707884332558:2324];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:47:36.441101Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519898707884332552:2321];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:47:36.441143Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519898707884332552:2321];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:47:36.441309Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519898707884332552:2321];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:47:36.441395Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519898707884332552:2321];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:47:36.441472Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519898707884332552:2321];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:47:36.441548Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519898707884332552:2321];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:47:36.441629Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519898707884332552:2321];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:47:36.441711Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519898707884332552:2321];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:47:36.441802Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519898707884332552:2321];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:47:36.441909Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519898707884332552:2321];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:47:36.442001Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519898707884332552:2321];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:47:36.444473Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519898707884332558:2324];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:47:36.444630Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519898707884332558:2324];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:47:36.444712Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519898707884332558:2324];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:47:36.444788Z node 1 :TX_COLU ... 71900Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:04.578678Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039404;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:49:04.579310Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039416;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:04.583148Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:49:04.583623Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039394;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:04.589190Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039416;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:49:04.590376Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039377;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:04.597264Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039394;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:49:04.597815Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039390;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:04.604184Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039377;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:49:04.607545Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039390;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:49:04.608121Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039296;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:04.612982Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039398;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:04.617904Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039296;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:49:04.618422Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039319;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:04.622799Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039398;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:49:04.623347Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039408;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:04.629449Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039408;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:49:04.630155Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039371;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:04.634994Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039319;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:49:04.635540Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039396;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:04.639799Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039371;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:49:04.640290Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039417;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:04.645146Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039396;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:49:04.645754Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039412;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:04.653890Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039417;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:49:04.654552Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039400;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:04.659515Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039412;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:49:04.660157Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039402;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:04.669901Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039402;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:49:04.670444Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039424;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:04.672624Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039400;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:49:04.673312Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039418;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:04.676983Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039424;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:49:04.677696Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039420;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:04.678352Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039418;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:49:04.678963Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039406;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:04.683461Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039420;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:49:04.684181Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039415;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:04.690062Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039415;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:49:04.690963Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039406;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:49:04.880901Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jykrzckf99xym7czhf828ncr", SessionId: ydb://session/3?node_id=1&id=YzU1ZjIyZDUtMzI1OGI3YmMtYjdjOGY0OC1mNzcyYmE3Ng==, Slow query, duration: 43.008810s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:49:05.336113Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; 2025-06-25T14:49:05.336149Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; 2025-06-25T14:49:05.336542Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;self_id=[1:7519898978467317082:8198];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224039094;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224039392;receive=72075186224038933; 2025-06-25T14:49:05.337460Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; >> KqpJoin::RightTableKeyPredicate [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoin::JoinLeftPureFull [GOOD] Test command err: Trying to start YDB, gRPC: 18446, MsgBus: 5900 2025-06-25T14:49:15.366058Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899135019438864:2228];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:49:15.395685Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d6f/r3tmp/tmpYj8pKL/pdisk_1.dat 2025-06-25T14:49:16.008327Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:49:16.011146Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519899135019438672:2080] 1750862955309646 != 1750862955309649 2025-06-25T14:49:16.021468Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:49:16.021552Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:49:16.033236Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 18446, node 1 2025-06-25T14:49:16.352405Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:49:16.420011Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:49:16.420030Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:49:16.420037Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:49:16.420141Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5900 TClient is connected to server localhost:5900 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:49:17.343667Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:49:17.382326Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:49:17.402642Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:49:17.595465Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:49:17.873421Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:49:17.975033Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:49:19.951641Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899152199309496:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:19.951748Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:20.185650Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:20.240789Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:20.319370Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:20.356699Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519899135019438864:2228];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:49:20.368418Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:49:20.386535Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:20.437894Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:20.482537Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:20.562634Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:20.643042Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899156494277455:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:20.643118Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:20.643463Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899156494277460:2436], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:20.647466Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:49:20.667848Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899156494277462:2437], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:49:20.764016Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899156494277514:3420] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 24410, MsgBus: 6325 2025-06-25T14:49:23.532652Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519899168879311828:2089];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d6f/r3tmp/tmpaFDIUU/pdisk_1.dat 2025-06-25T14:49:23.650583Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:49:23.783267Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:49:23.789545Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:49:23.797995Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519899168879311760:2080] 1750862963454060 != 1750862963454063 2025-06-25T14:49:23.811707Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:49:23.814126Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 24410, node 2 2025-06-25T14:49:23.992764Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:49:23.992784Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:49:23.992790Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:49:23.992905Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6325 2025-06-25T14:49:24.540420Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:6325 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:49:24.845102Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:49:24.853100Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:49:24.859610Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:49:25.052635Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:49:25.281571Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:49:25.375794Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:49:28.009323Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519899190354149877:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:28.009388Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:28.070202Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:28.119489Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:28.155399Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:28.229942Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:28.328140Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:28.420593Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:28.514772Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:28.557857Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519899168879311828:2089];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:49:28.559554Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:49:28.632842Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519899190354150534:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:28.632953Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:28.636406Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519899190354150539:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:28.643935Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:49:28.660966Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519899190354150541:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:49:28.758958Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519899190354150594:3417] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } >> KqpJoinOrder::ShuffleEliminationDifferentJoinPredicateKeyTypeCorrectness1 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::FiveWayJoinWithPreds-ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 9304, MsgBus: 19089 2025-06-25T14:48:40.684472Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898983226825142:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:48:40.685501Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d86/r3tmp/tmpdMcSgM/pdisk_1.dat 2025-06-25T14:48:41.304561Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519898983226825114:2080] 1750862920673110 != 1750862920673113 2025-06-25T14:48:41.322713Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:48:41.324489Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:48:41.324563Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:48:41.340065Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 9304, node 1 2025-06-25T14:48:41.648971Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:48:41.648995Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:48:41.649005Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:48:41.649158Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:48:41.679888Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:19089 TClient is connected to server localhost:19089 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:48:42.712712Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:48:42.729192Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:48:44.886133Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899000406694947:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:44.886246Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:44.886566Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899000406694959:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:44.890157Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:48:44.907214Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899000406694961:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:48:44.995996Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899000406695012:2337] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:48:45.352732Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:45.491110Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:45.540099Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:45.601326Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:45.648615Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:45.677143Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519898983226825142:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:48:45.677194Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:48:45.864052Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:45.893520Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:45.925532Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:45.983810Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:46.022983Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:46.055041Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:46.123314Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:46.152338Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:46.805599Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, subopera ... 49267Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038511;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:21.554607Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038511;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:21.555225Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038479;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:21.555269Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038545;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:21.555731Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038578;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:21.565806Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038479;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:21.566163Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038578;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:21.566449Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038496;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:21.566615Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038600;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:21.572179Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038496;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:21.573059Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038566;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:21.576003Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038600;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:21.576653Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038553;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:21.579454Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038566;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:21.580091Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038603;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:21.582498Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038553;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:21.583124Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038547;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:21.586561Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038603;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:21.587145Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038521;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:21.589552Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038547;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:21.590137Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038574;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:21.596232Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038574;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:21.598105Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038521;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:21.598614Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038576;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:21.600850Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038551;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:21.608131Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038576;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:21.610458Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038551;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:21.610969Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038615;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:21.612786Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038554;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:21.618814Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038554;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:21.619365Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038520;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:21.619564Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038615;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:21.620198Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038572;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:21.625194Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038520;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:21.625891Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038534;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:21.626274Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038572;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:21.626906Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038548;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:21.633228Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038534;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:21.633950Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038548;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:21.634040Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038580;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:21.639610Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038580;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:21.728679Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyks070aa31cqfackj2fta2t", SessionId: ydb://session/3?node_id=1&id=ZmJhM2Y1YWMtY2NlNDRkMjEtZjBmZTUzNzAtZTMzYjU4YWY=, Slow query, duration: 32.821427s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:49:22.007386Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:49:22.007904Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:49:22.008707Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;self_id=[1:7519899077716122539:4198];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038331;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038629;receive=72075186224038170; 2025-06-25T14:49:22.009160Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoin::RightTableKeyPredicate [GOOD] Test command err: Trying to start YDB, gRPC: 18106, MsgBus: 3116 2025-06-25T14:49:24.829175Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899171814880923:2220];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:49:24.859770Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d69/r3tmp/tmp5P9oKe/pdisk_1.dat 2025-06-25T14:49:25.441445Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:49:25.441562Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:49:25.446657Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:49:25.456481Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:49:25.472744Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519899171814880741:2080] 1750862964775921 != 1750862964775924 TServer::EnableGrpc on GrpcPort 18106, node 1 2025-06-25T14:49:25.774325Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:49:25.807834Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:49:25.807856Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:49:25.807975Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:49:25.808080Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3116 TClient is connected to server localhost:3116 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:49:26.512333Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:49:26.529754Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:49:26.546267Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:26.726832Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:49:26.991978Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:49:27.126435Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:49:28.866217Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899188994751565:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:28.866310Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:29.170347Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:29.242273Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:29.276796Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:29.340790Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:29.405626Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:29.460556Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:29.538004Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:29.643203Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899193289719542:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:29.643292Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:29.643635Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899193289719547:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:29.647481Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:49:29.664866Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710669, at schemeshard: 72057594046644480 2025-06-25T14:49:29.665494Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899193289719549:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:49:29.757062Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899193289719600:3420] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:49:29.879304Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519899171814880923:2220];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:49:29.879378Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:49:31.109485Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) >> KqpIndexLookupJoin::SimpleLeftJoin-StreamLookup >> OlapEstimationRowsCorrectness::TPCH11 >> KqpJoinOrder::TPCDS90+ColumnStore >> KqpJoinOrder::CanonizedJoinOrderTPCH12 [GOOD] >> KqpJoinOrder::TestJoinOrderHintsComplex-ColumnStore >> KqpFlipJoin::RightSemi_2 [GOOD] >> KqpFlipJoin::RightSemi_3 >> LabeledDbCounters::TwoTabletsKillOneTablet [GOOD] >> ShowCreateView::Basic >> KqpFlipJoin::Inner_3 [GOOD] >> KqpFlipJoin::LeftSemi_1 >> KqpJoinOrder::TPCDS94+ColumnStore [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::CanonizedJoinOrderTPCH12 [GOOD] Test command err: Trying to start YDB, gRPC: 19188, MsgBus: 21929 2025-06-25T14:47:44.895838Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898741499832686:2137];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:47:44.914363Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000dc0/r3tmp/tmpBtqwfb/pdisk_1.dat 2025-06-25T14:47:45.549249Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:47:45.549334Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:47:45.559420Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:47:45.564477Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519898741499832586:2080] 1750862864870150 != 1750862864870153 2025-06-25T14:47:45.582949Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 19188, node 1 2025-06-25T14:47:45.812738Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:47:45.812757Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:47:45.812764Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:47:45.812861Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:47:45.926001Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:21929 TClient is connected to server localhost:21929 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:47:46.920007Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:47:46.957780Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:47:49.274587Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898762974669711:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:49.274659Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898762974669723:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:49.274700Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:49.279196Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:47:49.295764Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898762974669725:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:47:49.369512Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898762974669776:2337] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:47:49.887699Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T14:47:49.898857Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519898741499832686:2137];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:47:49.898918Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:47:50.210765Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519898767269637283:2320];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:47:50.226398Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519898767269637269:2315];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:47:50.226638Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519898767269637269:2315];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:47:50.226879Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519898767269637269:2315];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:47:50.226970Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519898767269637269:2315];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:47:50.227054Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519898767269637269:2315];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:47:50.227164Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519898767269637269:2315];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:47:50.227286Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519898767269637269:2315];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:47:50.227388Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519898767269637269:2315];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:47:50.227481Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519898767269637269:2315];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:47:50.227567Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519898767269637269:2315];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:47:50.227688Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519898767269637269:2315];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:47:50.232551Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519898767269637283:2320];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:47:50.232707Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519898767269637283:2320];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:47:50.232804Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519898767269637283:2320];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:47:50.232889Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519898767269637283:2320];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:47:50.232974Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519898767269637283:2320];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:47:50.233069Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519898767269637283:2320];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:47:50.233167Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519898767269637283:2320];tabl ... 97257Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039205;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:49:21.297756Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039201;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:21.301967Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039201;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:49:21.302561Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039209;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:21.307086Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039209;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:49:21.307926Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039197;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:21.311646Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039197;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:49:21.312099Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039196;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:21.316600Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039196;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:49:21.317488Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039243;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:21.322317Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039243;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:49:21.322923Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039203;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:21.326420Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039203;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:49:21.327322Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039194;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:21.332001Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039194;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:49:21.333183Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039214;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:21.337040Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039214;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:49:21.337593Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039248;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:21.341781Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039248;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:49:21.342325Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039218;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:21.347011Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039218;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:49:21.347509Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039230;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:21.352093Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039230;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:49:21.352753Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039226;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:21.357501Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039226;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:49:21.358055Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039207;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:21.362476Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039229;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:21.362753Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039207;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:49:21.363395Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039241;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:21.367728Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039229;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:49:21.372374Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039241;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:49:21.372931Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039189;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:21.374506Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039195;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:21.379197Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039195;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:49:21.379703Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039227;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:21.384369Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039189;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:49:21.384805Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039215;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:21.393103Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039227;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:49:21.396198Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039408;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:21.398101Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039215;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:49:21.405432Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039408;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:49:21.591225Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jykrzwfk91dqm1tzndfncdt4", SessionId: ydb://session/3?node_id=1&id=NDAyODU0OGUtODBhMmRhZDAtZjg4YTc5ZWMtMmVjY2QxY2Q=, Slow query, duration: 43.458876s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:49:22.007275Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; 2025-06-25T14:49:22.007736Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; 2025-06-25T14:49:22.008644Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;self_id=[1:7519899046442556428:8265];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224039094;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038933;receive=72075186224039392; 2025-06-25T14:49:22.009045Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; >> KqpJoinOrder::FiveWayJoin+ColumnStore [GOOD] >> KqpJoinOrder::SortingsByPrefixWithConstant+RemoveLimitOperator [GOOD] >> TxUsage::TestRetentionOnLongTxAndBigMessages [GOOD] >> KqpJoin::LeftJoinPushdownPredicate_NoPushdown >> KqpIndexLookupJoin::SimpleLeftJoin-StreamLookup [GOOD] >> KqpIndexLookupJoin::SimpleLeftOnlyJoin+StreamLookup ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::TPCDS94+ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 7726, MsgBus: 18200 2025-06-25T14:47:29.731227Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898679109936587:2148];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:47:29.731597Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000ddb/r3tmp/tmpasDl0C/pdisk_1.dat 2025-06-25T14:47:30.346958Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:47:30.347052Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:47:30.353855Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:47:30.468015Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:47:30.468758Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519898679109936477:2080] 1750862849678462 != 1750862849678465 TServer::EnableGrpc on GrpcPort 7726, node 1 2025-06-25T14:47:30.632977Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:47:30.633000Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:47:30.633007Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:47:30.633107Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:47:30.729213Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:18200 TClient is connected to server localhost:18200 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:47:31.635480Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:47:31.661606Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:47:34.273280Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898700584773604:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:34.273426Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:34.273785Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898700584773616:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:34.277654Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:47:34.290542Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898700584773618:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:47:34.392543Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898700584773669:2339] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:47:34.719909Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519898679109936587:2148];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:47:34.719976Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:47:34.836433Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T14:47:35.239474Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519898704879741219:2319];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:47:35.239709Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519898704879741219:2319];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:47:35.239960Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519898704879741219:2319];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:47:35.240109Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519898704879741219:2319];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:47:35.240217Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519898704879741219:2319];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:47:35.240367Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519898704879741219:2319];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:47:35.240488Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519898704879741219:2319];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:47:35.240596Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519898704879741219:2319];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:47:35.240707Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519898704879741219:2319];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:47:35.240831Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519898704879741219:2319];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:47:35.240926Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519898704879741219:2319];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:47:35.242405Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519898704879741221:2321];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:47:35.242456Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519898704879741221:2321];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:47:35.242619Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519898704879741221:2321];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:47:35.242704Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519898704879741221:2321];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:47:35.242803Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519898704879741221:2321];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:47:35.242913Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519898704879741221:2321];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:47:35.243000Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519898704879741221:2321];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:47:35.243122Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519898704879741221:2321];tablet ... path_id;ss_local=63;result=not_found; 2025-06-25T14:49:05.499760Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039397;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:05.499762Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039195;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:05.500754Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039407;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:05.506486Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039195;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:05.510395Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039407;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:05.510845Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039358;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:05.512874Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039419;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:05.517103Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039419;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:05.517647Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039200;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:05.517980Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039358;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:05.518514Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039411;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:05.524897Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039200;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:05.525471Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039197;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:05.528691Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039411;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:05.529114Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039399;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:05.533062Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039399;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:05.533583Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039193;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:05.537783Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039197;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:05.538224Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039401;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:05.541988Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039401;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:05.542551Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039421;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:05.545163Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039193;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:05.545602Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039409;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:05.551150Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039421;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:05.552639Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039409;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:05.553161Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039423;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:05.556908Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039405;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:05.561008Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039405;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:05.561560Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039201;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:05.564650Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039423;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:05.565093Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039413;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:05.569058Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039413;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:05.571346Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039201;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:05.571781Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039415;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:05.639818Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039415;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:05.876430Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jykrzepy92vvsvsh42xp44f2", SessionId: ydb://session/3?node_id=1&id=ZDZjZTY1ODUtZjYxMmQ1NmYtNDQ5YzMxOGMtNTJjY2UxZDU=, Slow query, duration: 41.845754s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:49:06.488953Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:49:06.489410Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:49:06.490463Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;self_id=[1:7519899057067117752:10462];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224039392;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224039094;receive=72075186224038933; 2025-06-25T14:49:06.490880Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:49:32.288115Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyks13rd0q06wtcxq211tf80", SessionId: ydb://session/3?node_id=1&id=ZDZjZTY1ODUtZjYxMmQ1NmYtNDQ5YzMxOGMtNTJjY2UxZDU=, Slow query, duration: 13.937833s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "pragma TablePathPrefix = \"/Root/test/ds/\";\n\n-- NB: Subquerys\n$bla1 = (select ws_order_number\n from web_sales\n group by ws_order_number\n having COUNT(DISTINCT ws_warehouse_sk) > 1);\n\n-- start query 1 in stream 0 using template query94.tpl and seed 2031708268\nselect\n count(distinct ws1.ws_order_number) as `order count`\n ,sum(ws_ext_ship_cost) as `total shipping cost`\n ,sum(ws_net_profit) as `total net profit`\nfrom\n web_sales ws1\n cross join date_dim\n cross join customer_address\n cross join web_site\n left semi join $bla1 bla1 on (ws1.ws_order_number = bla1.ws_order_number)\n left only join web_returns on (ws1.ws_order_number = web_returns.wr_order_number)\nwhere\n cast(d_date as date) between cast('1999-4-01' as date) and\n (cast('1999-4-01' as date) + DateTime::IntervalFromDays(60))\nand ws1.ws_ship_date_sk = d_date_sk\nand ws1.ws_ship_addr_sk = ca_address_sk\nand ca_state = 'NE'\nand ws1.ws_web_site_sk = web_site_sk\nand web_company_name = 'pri'\norder by `order count`\nlimit 100;\n", parameters: 0b ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::FiveWayJoin+ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 20847, MsgBus: 28308 2025-06-25T14:47:48.341196Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898760887903116:2235];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:47:48.341248Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000dbf/r3tmp/tmpgPfPbo/pdisk_1.dat 2025-06-25T14:47:49.035087Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:47:49.039232Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:47:49.039334Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:47:49.041825Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 20847, node 1 2025-06-25T14:47:49.213675Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:47:49.213704Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:47:49.213714Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:47:49.213826Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:47:49.344466Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:28308 TClient is connected to server localhost:28308 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:47:50.054196Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:47:50.069761Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:47:53.057462Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898782362740040:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:53.057584Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:53.060409Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898782362740052:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:53.068271Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:47:53.087630Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898782362740054:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:47:53.192018Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898782362740105:2339] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:47:53.345095Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519898760887903116:2235];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:47:53.345217Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:47:53.745354Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T14:47:54.052241Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519898782362740360:2321];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:47:54.052536Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519898782362740360:2321];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:47:54.052836Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519898782362740360:2321];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:47:54.052979Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519898782362740360:2321];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:47:54.053078Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519898782362740360:2321];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:47:54.053165Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519898782362740360:2321];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:47:54.053260Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519898782362740360:2321];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:47:54.053393Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519898782362740360:2321];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:47:54.053494Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519898782362740360:2321];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:47:54.053598Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519898782362740360:2321];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:47:54.053690Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519898782362740360:2321];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:47:54.055925Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519898782362740362:2323];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:47:54.055966Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519898782362740362:2323];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:47:54.056150Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519898782362740362:2323];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:47:54.056253Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519898782362740362:2323];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:47:54.056363Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519898782362740362:2323];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:47:54.056474Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519898782362740362:2323];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:47:54.056561Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519898782362740362:2323];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:47:54.056642Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519898782362740362:2323];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:47:54.056733Z node 1 :TX_COLUMNSHARD WARN: ... 96837Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039384;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:24.002585Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039414;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:24.003005Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039384;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:24.003169Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039400;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:24.003632Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039410;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:24.008799Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039400;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:24.009388Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039404;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:24.009828Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039410;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:24.010384Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039407;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:24.014326Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039404;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:24.015042Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039420;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:24.015174Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039407;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:24.015885Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039421;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:24.020383Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039420;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:24.020597Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039421;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:24.020927Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039405;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:24.021082Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039413;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:24.027599Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039413;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:24.027608Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039405;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:24.028233Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039417;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:24.028570Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039423;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:24.033810Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039423;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:24.033890Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039417;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:24.034653Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039402;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:24.034656Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039396;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:24.040153Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039396;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:24.041227Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039401;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:24.041237Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039402;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:24.041820Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039387;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:24.046519Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039401;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:24.047086Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039391;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:24.047097Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039387;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:24.047865Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039238;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:24.053692Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039238;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:24.053979Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039391;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:24.054515Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039406;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:24.054862Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039418;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:24.060753Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039418;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:24.060948Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039406;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:24.061378Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039386;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:24.068085Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039386;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:24.228509Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyks03h85w6z09kpyyvcnwx3", SessionId: ydb://session/3?node_id=1&id=YTc1ZTkyZTEtY2NlMWVjZGItYTdiNWUyZWQtM2QxMWE5OTM=, Slow query, duration: 38.875508s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:49:24.556685Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:49:24.557244Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:49:24.557786Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;self_id=[1:7519899130255146897:9994];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224039392;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224039094;receive=72075186224038933; 2025-06-25T14:49:24.558212Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::SortingsByPrefixWithConstant+RemoveLimitOperator [GOOD] Test command err: Trying to start YDB, gRPC: 10125, MsgBus: 22240 2025-06-25T14:48:49.868728Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899022407951344:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:48:49.908748Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d82/r3tmp/tmprryFVC/pdisk_1.dat 2025-06-25T14:48:50.492563Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:48:50.492663Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:48:50.508088Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:48:50.526671Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519899022407951307:2080] 1750862929827611 != 1750862929827614 2025-06-25T14:48:50.533466Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 10125, node 1 2025-06-25T14:48:50.728537Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:48:50.728560Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:48:50.728567Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:48:50.728656Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:48:50.872608Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:22240 TClient is connected to server localhost:22240 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:48:52.040661Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:48:52.080834Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:48:54.901367Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519899022407951344:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:48:54.901620Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:48:54.959369Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899043882788444:2298], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:54.959855Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899043882788431:2295], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:54.959957Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:54.973019Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:48:54.992526Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899043882788447:2299], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:48:55.064256Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899048177755794:2340] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:48:55.644139Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:55.926618Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:55.972628Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:56.015737Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:56.087618Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:56.275778Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:56.343014Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:56.378407Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:56.409161Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:56.455637Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:56.487365Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:56.525029Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:56.594152Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:57.318334Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, subope ... 95899Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038590;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:35.200488Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038590;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:35.201029Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038469;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:35.206850Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038579;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:35.207264Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038559;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:35.211460Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038559;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:35.212392Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038469;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:35.212815Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038622;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:35.216984Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038607;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:35.221522Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038607;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:35.222078Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038549;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:35.225058Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038622;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:35.225487Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038637;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:35.226827Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038549;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:35.227371Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038573;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:35.232020Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038573;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:35.232901Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038598;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:35.235410Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038637;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:35.235835Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038610;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:35.244207Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038610;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:35.244595Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038598;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:35.245241Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038644;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:35.248853Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038606;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:35.253482Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038644;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:35.254402Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038600;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:35.257348Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038606;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:35.258269Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038658;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:35.263083Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038600;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:35.263677Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038650;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:35.266922Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038658;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:35.267491Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038597;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:35.276297Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038597;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:35.276888Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038595;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:35.280521Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038650;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:35.281051Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038618;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:35.282312Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038595;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:35.282889Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038645;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:35.285692Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038618;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:35.286282Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038617;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:35.287838Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038645;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:35.295473Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038617;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:35.484757Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyks0hny9r2y0g62hdk748r6", SessionId: ydb://session/3?node_id=1&id=MzFmNmJmZjctYjQ5N2EzODgtNmYwOTAzNmMtOTAzNTBiNTk=, Slow query, duration: 35.646153s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:49:35.825827Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:49:35.826319Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:49:35.830791Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;self_id=[1:7519899138372085362:4179];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038331;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038170;receive=72075186224038629; 2025-06-25T14:49:35.831246Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; >> KqpJoinOrder::SortingsSimpleOrderByPKAlias+RemoveLimitOperator >> KqpFlipJoin::RightSemi_3 [GOOD] >> KqpFlipJoin::LeftSemi_1 [GOOD] >> KqpIndexLookupJoin::CheckCastInt64ToUint64+StreamLookupJoin-NotNull >> OlapEstimationRowsCorrectness::TPCH5 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpFlipJoin::LeftSemi_1 [GOOD] Test command err: Trying to start YDB, gRPC: 29645, MsgBus: 24123 2025-06-25T14:49:29.269336Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899195689126915:2231];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:49:29.276560Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d5d/r3tmp/tmpDN8riC/pdisk_1.dat 2025-06-25T14:49:29.981265Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:49:29.981353Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:49:29.983418Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:49:29.995732Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:49:30.008433Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519899195689126709:2080] 1750862969203713 != 1750862969203716 TServer::EnableGrpc on GrpcPort 29645, node 1 2025-06-25T14:49:30.276404Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:49:30.285058Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:49:30.285075Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:49:30.285085Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:49:30.285198Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24123 TClient is connected to server localhost:24123 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:49:31.348038Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:49:31.403422Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:49:31.576266Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:49:31.824487Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:49:31.925932Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:33.782260Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899212868997526:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:33.782353Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:34.252413Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519899195689126915:2231];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:49:34.252523Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:49:34.369746Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:34.467317Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:34.513483Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:34.593924Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:34.646297Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:34.731058Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:34.788577Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:34.856158Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899217163965485:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:34.856228Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:34.856718Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899217163965490:2437], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:34.860984Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:49:34.885333Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899217163965492:2438], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:49:34.989225Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899217163965543:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:49:36.330705Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:36.382214Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation pa ... iles were not loaded TServer::EnableGrpc on GrpcPort 8571, node 2 2025-06-25T14:49:39.688840Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:49:39.688859Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:49:39.688868Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:49:39.688978Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:49:40.020576Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:63261 TClient is connected to server localhost:63261 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:49:40.490392Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:49:40.508722Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:49:40.519070Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:49:40.635100Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:49:40.943022Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:49:41.078086Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:49:44.016439Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519899237189564726:2235];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:49:44.016499Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:49:44.518694Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519899258664402619:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:44.518765Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:44.614448Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:44.718324Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:44.783145Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:44.843983Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:44.893715Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:44.993497Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:45.059786Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:45.132670Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519899262959370575:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:45.132761Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:45.133186Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519899262959370580:2436], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:45.137234Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:49:45.155007Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519899262959370582:2437], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:49:45.255216Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519899262959370633:3419] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:49:46.416010Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:46.490080Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:46.526020Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:46.568983Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpFlipJoin::RightSemi_3 [GOOD] Test command err: Trying to start YDB, gRPC: 25702, MsgBus: 5699 2025-06-25T14:49:29.055682Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899193730621511:2151];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:49:29.088481Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d5a/r3tmp/tmpKISKmD/pdisk_1.dat 2025-06-25T14:49:29.559786Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:49:29.571835Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519899193730621398:2080] 1750862969017527 != 1750862969017530 2025-06-25T14:49:29.595916Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:49:29.596007Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:49:29.605748Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 25702, node 1 2025-06-25T14:49:29.868863Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:49:29.868880Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:49:29.868887Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:49:29.869001Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:49:30.063677Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:5699 TClient is connected to server localhost:5699 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:49:30.817838Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:49:30.848215Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:49:31.083515Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:49:31.293597Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:49:31.404045Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:49:33.358518Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899210910492205:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:33.358609Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:33.690199Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:33.730139Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:33.816702Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:33.892537Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:33.948596Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:34.032729Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:34.054922Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519899193730621511:2151];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:49:34.055016Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:49:34.100666Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:34.168151Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899215205460168:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:34.168222Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:34.168507Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899215205460173:2436], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:34.172609Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:49:34.185333Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710669, at schemeshard: 72057594046644480 2025-06-25T14:49:34.185860Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899215205460175:2437], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:49:34.276716Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899215205460226:3420] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:49:35.557429Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_wo ... ecting -> Connected TServer::EnableGrpc on GrpcPort 20724, node 2 2025-06-25T14:49:38.928861Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:49:38.928902Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:49:38.928925Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:49:38.929094Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:49:39.325409Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:19921 TClient is connected to server localhost:19921 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:49:39.962600Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:49:39.973388Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:49:39.986321Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:40.064604Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:49:40.274415Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:49:40.401230Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:49:42.765297Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519899249433242440:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:42.765393Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:42.825602Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:42.919645Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:42.982501Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:43.060526Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:43.107606Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:43.159940Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:43.219064Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:43.301053Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519899232253371629:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:49:43.301119Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:49:43.326949Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519899253728210395:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:43.327051Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:43.331140Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519899253728210400:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:43.336039Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:49:43.390960Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519899253728210402:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:49:43.467886Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519899253728210453:3414] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:49:44.934720Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:44.994773Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:45.073375Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:45.132001Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) >> ShowCreateView::Basic [FAIL] >> ShowCreateView::FromTable >> KqpJoinOrder::TestJoinHint2+ColumnStore [GOOD] >> KqpJoinOrder::TPCDS16-ColumnStore >> KqpJoinOrder::CanonizedJoinOrderTPCH11 >> KqpJoinOrder::SortingsWithLookupJoin3+RemoveLimitOperator [GOOD] >> KqpIndexLookupJoin::SimpleLeftOnlyJoin+StreamLookup [GOOD] >> KqpJoin::LeftJoinPushdownPredicate_NoPushdown [GOOD] >> KqpJoin::LeftJoinPushdownPredicate_Nulls ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpIndexLookupJoin::SimpleLeftOnlyJoin+StreamLookup [GOOD] Test command err: Trying to start YDB, gRPC: 63066, MsgBus: 31498 2025-06-25T14:49:35.588737Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899218107665969:2153];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:49:35.589285Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d51/r3tmp/tmp0sLZrC/pdisk_1.dat 2025-06-25T14:49:36.158049Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:49:36.166591Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:49:36.166673Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:49:36.169487Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 63066, node 1 2025-06-25T14:49:36.456777Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:49:36.456795Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:49:36.456801Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:49:36.456906Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:49:36.587848Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:31498 TClient is connected to server localhost:31498 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:49:37.309226Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:49:37.343262Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:49:37.531319Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:49:37.760630Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:49:37.864350Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:49:39.636798Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899235287536646:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:39.636892Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:40.014094Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:40.056151Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:40.105933Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:40.181605Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:40.232652Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:40.281607Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:40.335710Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:40.457274Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899239582504601:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:40.457349Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:40.457668Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899239582504606:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:40.462846Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:49:40.478717Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899239582504608:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:49:40.540225Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899239582504659:3418] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:49:40.595820Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519899218107665969:2153];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:49:40.595865Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:49:41.733503Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:41.822178Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /ho ... d to server localhost:64242 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:49:46.394205Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:49:46.401074Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:49:46.417981Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:46.546813Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:49:46.750699Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:49:46.884037Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:49.376852Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519899281646645107:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:49.376946Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:49.490728Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:49.541664Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:49.591896Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:49.638447Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:49.689731Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:49.774366Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:49.849205Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:49.936572Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519899281646645765:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:49.936664Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:49.936786Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519899281646645770:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:49.941646Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:49:49.956437Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519899260171807247:2245];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:49:49.956491Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:49:49.975437Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519899281646645772:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:49:50.061411Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519899285941613120:3415] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:49:51.546962Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:51.640857Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:51.734096Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:51.780684Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:51.828707Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:51.929385Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710677:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::TestJoinHint2+ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 25908, MsgBus: 1153 2025-06-25T14:48:06.489583Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898835967706485:2140];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:48:06.489802Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000daa/r3tmp/tmp7XLCgh/pdisk_1.dat 2025-06-25T14:48:07.075063Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:48:07.075145Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:48:07.097887Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:48:07.112336Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519898835967706373:2080] 1750862886444967 != 1750862886444970 2025-06-25T14:48:07.132012Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 25908, node 1 2025-06-25T14:48:07.348781Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:48:07.348804Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:48:07.348810Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:48:07.348924Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:48:07.493520Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:1153 TClient is connected to server localhost:1153 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:48:08.452270Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:48:08.506767Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:48:11.053310Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898857442543497:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:11.053506Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:11.053894Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898857442543509:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:11.057975Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:48:11.070558Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898857442543511:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:48:11.176656Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898857442543562:2338] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:48:11.480443Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519898835967706485:2140];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:48:11.480539Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:48:11.871829Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T14:48:12.202309Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519898861737511085:2326];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:48:12.203322Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519898861737511081:2323];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:48:12.203547Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519898861737511081:2323];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:48:12.203793Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519898861737511081:2323];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:48:12.203889Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519898861737511081:2323];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:48:12.203986Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519898861737511081:2323];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:48:12.204088Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519898861737511081:2323];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:48:12.204196Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519898861737511081:2323];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:48:12.204294Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519898861737511081:2323];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:48:12.204410Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519898861737511081:2323];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:48:12.204418Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519898861737511085:2326];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:48:12.204544Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519898861737511081:2323];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:48:12.204606Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519898861737511085:2326];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:48:12.204643Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519898861737511081:2323];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:48:12.204697Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519898861737511085:2326];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:48:12.204781Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519898861737511085:2326];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:48:12.204869Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519898861737511085:2326];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:48:12.204947Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519898861737511085:2326];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:48:12.205042Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519898861737511085:2326];tablet_ ... 95787Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039415;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:39.398472Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039419;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:49:39.399086Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039400;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:39.404274Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039400;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:49:39.405195Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039409;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:39.409733Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039415;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:49:39.410244Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039403;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:39.410426Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039409;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:49:39.411006Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039390;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:39.415477Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039403;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:49:39.416102Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039389;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:39.416211Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039390;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:49:39.416729Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039395;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:39.421136Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039395;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:49:39.421776Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039393;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:39.426101Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039393;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:49:39.426686Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039405;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:39.428991Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039389;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:49:39.429504Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039402;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:39.431428Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039405;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:49:39.431981Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039401;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:39.436945Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039401;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:49:39.437507Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039404;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:39.437855Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039402;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:49:39.438368Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039407;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:39.441622Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039404;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:49:39.442158Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039398;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:39.442837Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039407;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:49:39.443311Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039397;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:39.457749Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039398;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:49:39.458260Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039391;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:39.458951Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039397;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:49:39.459387Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039423;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:39.462945Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039391;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:49:39.463595Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039423;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:49:39.464013Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039399;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:39.464179Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039413;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:39.468860Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039413;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:49:39.479598Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039399;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:49:39.571299Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039298;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:39.579379Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039298;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:49:39.652987Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyks0kntf0d3sy04jrhnczbh", SessionId: ydb://session/3?node_id=1&id=YTNiZGE1ZGQtMmQzNjI4NDQtYWY1YThiNjAtZjlhOWIwZDg=, Slow query, duration: 37.768970s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:49:39.997384Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; 2025-06-25T14:49:39.997897Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; 2025-06-25T14:49:39.998514Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;self_id=[1:7519899089370813362:6996];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038933;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224039094;receive=72075186224039392; 2025-06-25T14:49:39.998955Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::SortingsWithLookupJoin3+RemoveLimitOperator [GOOD] Test command err: Trying to start YDB, gRPC: 2257, MsgBus: 20046 2025-06-25T14:49:03.321617Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899081889880260:2216];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:49:03.321763Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d78/r3tmp/tmpR7cMG4/pdisk_1.dat 2025-06-25T14:49:03.979430Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:49:03.979530Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:49:03.992894Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:49:04.055322Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519899081889880082:2080] 1750862943274282 != 1750862943274285 2025-06-25T14:49:04.061443Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 2257, node 1 2025-06-25T14:49:04.312811Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:49:04.312826Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:49:04.312832Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:49:04.312944Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:49:04.324596Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:20046 TClient is connected to server localhost:20046 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:49:05.200404Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:49:07.471904Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899099069749909:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:07.472022Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:07.472398Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899099069749921:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:07.476637Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:49:07.489747Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-25T14:49:07.490367Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899099069749923:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:49:07.568187Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899099069749974:2337] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:49:08.087627Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:08.235853Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:08.277999Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:08.316395Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519899081889880260:2216];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:49:08.316462Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:49:08.369541Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:08.440779Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:08.599265Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:08.635532Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:08.680730Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:08.729537Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:08.779762Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:08.828620Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:08.909853Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:09.011518Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:09.708640Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, subopera ... :45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:46.308584Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038633;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:46.308985Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038614;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:46.314010Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038602;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:46.314596Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038524;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:46.317245Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038614;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:46.317793Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038506;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:46.323518Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038524;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:46.324126Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038641;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:46.326482Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038506;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:46.327072Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038661;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:46.335973Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038661;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:46.336999Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038641;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:46.337761Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038608;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:46.340835Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038606;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:46.346861Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038608;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:46.347490Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038612;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:46.349554Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038606;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:46.350115Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038550;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:46.363047Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038550;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:46.363518Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038637;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:46.364879Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038612;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:46.365340Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038639;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:46.388601Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038637;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:46.389155Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038546;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:46.392663Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038639;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:46.393139Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038651;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:46.401994Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038651;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:46.402580Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038548;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:46.405877Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038546;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:46.406301Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038659;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:46.418420Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038659;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:46.418922Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038647;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:46.419105Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038548;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:46.419526Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038635;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:46.432543Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038647;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:46.433149Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038635;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:46.433289Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038657;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:46.434178Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038621;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:46.443596Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038657;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:46.448023Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038621;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:46.664820Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyks0xvhfmmzsddrvcw2gk3v", SessionId: ydb://session/3?node_id=1&id=Y2I5NDdhMTMtYzBkNDExMTAtMWQ1ODNjY2QtMmYxYTFkYTM=, Slow query, duration: 34.359288s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:49:47.036959Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:49:47.037565Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:49:47.043760Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;self_id=[1:7519899154904336569:3735];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038170;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038629;receive=72075186224038331; 2025-06-25T14:49:47.044209Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716;
:7:3: Warning: ORDER BY without LIMIT in subquery will be ignored, code: 4504
:7:3: Warning: ORDER BY without LIMIT in subquery will be ignored, code: 4504 ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/unittest >> TxUsage::TestRetentionOnLongTxAndBigMessages [GOOD] Test command err: 2025-06-25T14:40:54.692418Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519896980015181217:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:40:54.692545Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0019a2/r3tmp/tmpPYne0v/pdisk_1.dat 2025-06-25T14:40:54.964580Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-25T14:40:55.138510Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:40:55.140503Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519896980015181199:2080] 1750862454684157 != 1750862454684160 2025-06-25T14:40:55.158639Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:40:55.158721Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:40:55.160654Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 12668, node 1 2025-06-25T14:40:55.216900Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/yft8/0019a2/r3tmp/yandexdEDIb4.tmp 2025-06-25T14:40:55.216922Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/yft8/0019a2/r3tmp/yandexdEDIb4.tmp 2025-06-25T14:40:55.217072Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/yft8/0019a2/r3tmp/yandexdEDIb4.tmp 2025-06-25T14:40:55.217194Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:40:55.418927Z INFO: TTestServer started on Port 19115 GrpcPort 12668 TClient is connected to server localhost:19115 PQClient connected to localhost:12668 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-25T14:40:55.728741Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:40:55.748502Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:40:55.786250Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:40:55.797785Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-25T14:40:55.804359Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:40:56.033039Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715660, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:40:58.740440Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896997195051191:2304], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:40:58.741363Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896997195051178:2300], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:40:58.741453Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:40:58.746310Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:40:58.758876Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519896997195051223:2307], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:40:58.758994Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:40:58.773008Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519896997195051194:2305], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715662 completed, doublechecking } 2025-06-25T14:40:58.846772Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519896997195051250:2442] txid# 281474976715663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:40:59.228529Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519896997195051259:2311], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:40:59.228826Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=MTc4MDQ5YTctYTZjZDllZDAtYzI1MWUyNy1kMzQzODRjYg==, ActorId: [1:7519896997195051173:2298], ActorState: ExecuteState, TraceId: 01jykrhvswa694ypvgccfgnx9x, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:40:59.231465Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-25T14:40:59.233139Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:40:59.267506Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:40:59.361140Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); 2025-06-25T14:40:59.688556Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519896980015181217:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:40:59.688651Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; === CheckClustersList. Subcribe to ClusterTracker from [1:7519897001490018853:2621] === CheckClustersList. Ok 2025-06-25T14:41:06.745022Z :Sinks_Oltp_WriteToTopic_2_Table INFO: TTopicSdkTestSetup started 2025-06-25T14:41:06.792697Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:132: new create topic request 2025-06-25T14:41:06.831884Z node 1 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72075186224037893][] pipe [1:7519897031554790166:2728] connected; active server actors: 1 2025-06-25T14:41:06.832135Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1516: [72075186224037893][test-topic] updating configuration. Deleted partitions []. Added partitions [0] 2025-06-25T14:41:06.836487Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:1040: [72075186224037893][test-topic] Discovered subdomain [OwnerId: 72057594 ... t" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } SourceIdMaxCounts: 6000000 } PartitionIds: 0 TopicName: "test-topic" Version: 0 RequireAuthWrite: true RequireAuthRead: true FormatVersion: 0 Codecs { } TopicPath: "/Root/test-topic" YcCloudId: "" YcFolderId: "" YdbDatabaseId: "" YdbDatabasePath: "/Root" Partitions { PartitionId: 0 Status: Active CreateVersion: 1 TabletId: 0 } ReadRuleGenerations: 0 AllPartitions { PartitionId: 0 Status: Active CreateVersion: 1 TabletId: 0 } Consumers { Name: "test-consumer" ReadFromTimestampsMs: 0 FormatVersion: 0 Codec { } ServiceType: "data-streams" Version: 0 Generation: 0 } } BootstrapConfig { } SourceActor { RawX1: 7519899201609240536 RawX2: 64424511591 } Partitions { Partition { PartitionId: 0 } } 2025-06-25T14:49:43.775232Z node 15 :PERSQUEUE DEBUG: pq_impl.cpp:3683: [PQ: 72075186224037892] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-06-25T14:49:43.780571Z node 15 :PERSQUEUE DEBUG: pq_impl.cpp:3107: [PQ: 72075186224037892] Registered with mediator time cast 2025-06-25T14:49:43.781322Z node 15 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:1040: [72075186224037893][test-topic] Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at RB 72075186224037893 2025-06-25T14:49:43.781700Z node 15 :PERSQUEUE DEBUG: pq_impl.cpp:1241: [PQ: 72075186224037892] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-25T14:49:43.781723Z node 15 :PERSQUEUE DEBUG: pq_impl.cpp:4353: [PQ: 72075186224037892] Try execute txs with state EXECUTED 2025-06-25T14:49:43.781745Z node 15 :PERSQUEUE DEBUG: pq_impl.cpp:4398: [PQ: 72075186224037892] TxId 281474976715672, State EXECUTED 2025-06-25T14:49:43.781767Z node 15 :PERSQUEUE DEBUG: pq_impl.cpp:4345: [PQ: 72075186224037892] TxId 281474976715672 State EXECUTED FrontTxId 281474976715672 2025-06-25T14:49:43.781785Z node 15 :PERSQUEUE DEBUG: pq_impl.cpp:4046: [PQ: 72075186224037892] TPersQueue::SendEvReadSetAckToSenders 2025-06-25T14:49:43.781806Z node 15 :PERSQUEUE DEBUG: pq_impl.cpp:4288: [PQ: 72075186224037892] TxId 281474976715672, NewState WAIT_RS_ACKS 2025-06-25T14:49:43.781823Z node 15 :PERSQUEUE DEBUG: pq_impl.cpp:4323: [PQ: 72075186224037892] TxId 281474976715672 moved from EXECUTED to WAIT_RS_ACKS 2025-06-25T14:49:43.781848Z node 15 :PERSQUEUE DEBUG: transaction.cpp:366: [TxId: 281474976715672] PredicateAcks: 0/0 2025-06-25T14:49:43.781856Z node 15 :PERSQUEUE DEBUG: pq_impl.cpp:4599: [PQ: 72075186224037892] HaveAllRecipientsReceive 1, AllSupportivePartitionsHaveBeenDeleted 1 2025-06-25T14:49:43.781870Z node 15 :PERSQUEUE DEBUG: transaction.cpp:366: [TxId: 281474976715672] PredicateAcks: 0/0 2025-06-25T14:49:43.781886Z node 15 :PERSQUEUE DEBUG: pq_impl.cpp:4660: [PQ: 72075186224037892] add an TxId 281474976715672 to the list for deletion 2025-06-25T14:49:43.781907Z node 15 :PERSQUEUE DEBUG: pq_impl.cpp:4288: [PQ: 72075186224037892] TxId 281474976715672, NewState DELETING 2025-06-25T14:49:43.781930Z node 15 :PERSQUEUE DEBUG: pq_impl.cpp:3882: [PQ: 72075186224037892] delete key for TxId 281474976715672 2025-06-25T14:49:43.781988Z node 15 :PERSQUEUE DEBUG: pq_impl.cpp:3683: [PQ: 72075186224037892] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-06-25T14:49:43.783997Z node 15 :PERSQUEUE DEBUG: pq_impl.cpp:1241: [PQ: 72075186224037892] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-25T14:49:43.784025Z node 15 :PERSQUEUE DEBUG: pq_impl.cpp:4353: [PQ: 72075186224037892] Try execute txs with state DELETING 2025-06-25T14:49:43.784044Z node 15 :PERSQUEUE DEBUG: pq_impl.cpp:4398: [PQ: 72075186224037892] TxId 281474976715672, State DELETING 2025-06-25T14:49:43.784065Z node 15 :PERSQUEUE DEBUG: pq_impl.cpp:4610: [PQ: 72075186224037892] delete TxId 281474976715672 2025-06-25T14:49:43.788957Z :DEBUG: [/Root] MessageGroupId [src] SessionId [] Write session: try to update token 2025-06-25T14:49:43.789313Z :INFO: [/Root] MessageGroupId [src] SessionId [] Write session: Do CDS request 2025-06-25T14:49:43.789343Z :INFO: [/Root] MessageGroupId [src] SessionId [] Start write session. Will connect to endpoint: localhost:9501 2025-06-25T14:49:43.822699Z :DEBUG: [/Root] MessageGroupId [src] SessionId [] Write session: send init request: init_request { topic: "test-topic" message_group_id: "src" } 2025-06-25T14:49:43.827220Z node 15 :PQ_WRITE_PROXY DEBUG: grpc_pq_write.h:107: new grpc connection 2025-06-25T14:49:43.827260Z node 15 :PQ_WRITE_PROXY DEBUG: grpc_pq_write.h:141: new session created cookie 1 2025-06-25T14:49:43.832567Z node 15 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 1 sessionId: grpc read done: success: 1 data: init_request { topic: "test-topic" message_group_id: "src" } 2025-06-25T14:49:43.832727Z node 15 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:442: session request cookie: 1 topic: "test-topic" message_group_id: "src" from ipv6:[::1]:48268 2025-06-25T14:49:43.832747Z node 15 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:1532: write session: cookie=1 sessionId= userAgent="pqv1 server" ip=ipv6:[::1]:48268 proto=v1 topic=test-topic durationSec=0 2025-06-25T14:49:43.832760Z node 15 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:566: init check schema 2025-06-25T14:49:43.837106Z node 15 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:627: session v1 cookie: 1 sessionId: describe result for acl check 2025-06-25T14:49:43.837261Z node 15 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:62: TTableHelper SelectQuery: --!syntax_v1 DECLARE $Hash AS Uint64; DECLARE $Topic AS Utf8; DECLARE $SourceId AS Utf8; SELECT Partition, CreateTime, AccessTime, SeqNo FROM `//Root/.metadata/TopicPartitionsMapping` WHERE Hash == $Hash AND Topic == $Topic AND ProducerId == $SourceId; 2025-06-25T14:49:43.837273Z node 15 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:63: TTableHelper UpdateQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint64; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64; DECLARE $SeqNo AS Uint64; UPSERT INTO `//Root/.metadata/TopicPartitionsMapping` (Hash, Topic, ProducerId, CreateTime, AccessTime, Partition, SeqNo) VALUES ($Hash, $Topic, $SourceId, $CreateTime, $AccessTime, $Partition, $SeqNo); 2025-06-25T14:49:43.837282Z node 15 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:64: TTableHelper UpdateAccessTimeQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint64; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64; UPDATE `//Root/.metadata/TopicPartitionsMapping` SET AccessTime = $AccessTime WHERE Hash = $Hash AND Topic = $Topic AND ProducerId = $SourceId AND Partition = $Partition; 2025-06-25T14:49:43.837320Z node 15 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:305: TPartitionChooser [15:7519899253148849208:2420] (SourceId=src, PreferedPartition=(NULL)) ReplyResult: Partition=0, SeqNo=0 2025-06-25T14:49:43.837341Z node 15 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:689: ProceedPartition. session cookie: 1 sessionId: partition: 0 expectedGeneration: (NULL) 2025-06-25T14:49:43.838231Z node 15 :PQ_WRITE_PROXY DEBUG: writer.cpp:819: TPartitionWriter 72075186224037892 (partition=0) TEvClientConnected Status OK, TabletId: 72075186224037892, NodeId 15, Generation: 1 2025-06-25T14:49:43.838287Z node 15 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72075186224037892] server connected, pipe [15:7519899253148849211:2420], now have 1 active actors on pipe 2025-06-25T14:49:43.838457Z node 15 :PERSQUEUE DEBUG: pq_impl.cpp:348: Handle TEvRequest topic: 'test-topic' requestId: 2025-06-25T14:49:43.838487Z node 15 :PERSQUEUE DEBUG: pq_impl.cpp:2812: [PQ: 72075186224037892] got client message batch for topic 'test-topic' partition 0 2025-06-25T14:49:43.838571Z node 15 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie src|215db702-9061de68-1564c932-63aad5f6_0 generated for partition 0 topic 'test-topic' owner src 2025-06-25T14:49:43.838699Z node 15 :PERSQUEUE DEBUG: partition_write.cpp:34: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyOwnerOk. Partition: 0 2025-06-25T14:49:43.838757Z node 15 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'test-topic' partition: 0 messageNo: 0 requestId: cookie: 0 2025-06-25T14:49:43.839061Z node 15 :PERSQUEUE DEBUG: pq_impl.cpp:348: Handle TEvRequest topic: 'test-topic' requestId: 2025-06-25T14:49:43.839084Z node 15 :PERSQUEUE DEBUG: pq_impl.cpp:2812: [PQ: 72075186224037892] got client message batch for topic 'test-topic' partition 0 2025-06-25T14:49:43.839153Z node 15 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'test-topic' partition: 0 messageNo: 0 requestId: cookie: 0 2025-06-25T14:49:43.839226Z node 15 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:865: session inited cookie: 1 partition: 0 MaxSeqNo: 0 sessionId: src|215db702-9061de68-1564c932-63aad5f6_0 2025-06-25T14:49:43.840284Z :INFO: [/Root] MessageGroupId [src] SessionId [] Counters: { Errors: 0 CurrentSessionLifetimeMs: 1750862983840 BytesWritten: 0 MessagesWritten: 0 BytesWrittenCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-25T14:49:43.840409Z :INFO: [/Root] MessageGroupId [src] SessionId [] Write session established. Init response: session_id: "src|215db702-9061de68-1564c932-63aad5f6_0" topic: "test-topic" 2025-06-25T14:49:43.840694Z :INFO: [/Root] MessageGroupId [src] SessionId [src|215db702-9061de68-1564c932-63aad5f6_0] Write session: close. Timeout = 0 ms 2025-06-25T14:49:43.840727Z :INFO: [/Root] MessageGroupId [src] SessionId [src|215db702-9061de68-1564c932-63aad5f6_0] Write session will now close 2025-06-25T14:49:43.840762Z :DEBUG: [/Root] MessageGroupId [src] SessionId [src|215db702-9061de68-1564c932-63aad5f6_0] Write session: aborting 2025-06-25T14:49:43.841082Z :INFO: [/Root] MessageGroupId [src] SessionId [src|215db702-9061de68-1564c932-63aad5f6_0] Write session: gracefully shut down, all writes complete 2025-06-25T14:49:43.841113Z :DEBUG: [/Root] MessageGroupId [src] SessionId [src|215db702-9061de68-1564c932-63aad5f6_0] Write session: destroy 2025-06-25T14:49:43.852413Z node 15 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 1 sessionId: src|215db702-9061de68-1564c932-63aad5f6_0 grpc read done: success: 0 data: 2025-06-25T14:49:43.852451Z node 15 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 1 sessionId: src|215db702-9061de68-1564c932-63aad5f6_0 grpc read failed 2025-06-25T14:49:43.852495Z node 15 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 1 sessionId: src|215db702-9061de68-1564c932-63aad5f6_0 grpc closed 2025-06-25T14:49:43.852510Z node 15 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 1 sessionId: src|215db702-9061de68-1564c932-63aad5f6_0 is DEAD 2025-06-25T14:49:43.853897Z node 15 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037892 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-25T14:49:43.860520Z node 15 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037892] server disconnected, pipe [15:7519899253148849211:2420] destroyed 2025-06-25T14:49:43.860592Z node 15 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::DropOwner. 2025-06-25T14:49:43.860718Z :TestRetentionOnLongTxAndBigMessages INFO: Topic created >> KqpIndexLookupJoin::CheckCastInt64ToUint64+StreamLookupJoin-NotNull [GOOD] >> KqpIndexLookupJoin::CheckCastInt64ToUint64+StreamLookupJoin+NotNull >> KqpJoinOrder::UdfConstantFold-ColumnStore >> KqpJoinOrder::TPCDS94-ColumnStore >> KqpJoin::FullOuterJoinSizeCheck >> KqpIndexLookupJoin::CheckCastInt32ToInt16+StreamLookupJoin-NotNull >> KqpJoinOrder::ShuffleEliminationTpcdsMapJoinBug [GOOD] >> KqpJoinOrder::CanonizedJoinOrderTPCH5 [GOOD] >> ShowCreateView::FromTable [FAIL] >> ShowCreateView::WithPairedTablePathPrefix ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::ShuffleEliminationTpcdsMapJoinBug [GOOD] Test command err: Trying to start YDB, gRPC: 26945, MsgBus: 11500 2025-06-25T14:48:04.332902Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898828287999665:2220];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:48:04.333184Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000db0/r3tmp/tmp8xOyFB/pdisk_1.dat 2025-06-25T14:48:05.026825Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:48:05.026918Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:48:05.042409Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:48:05.111926Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519898828287999483:2080] 1750862884268729 != 1750862884268732 2025-06-25T14:48:05.146465Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 26945, node 1 2025-06-25T14:48:05.269134Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:48:05.318407Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:48:05.332375Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:48:05.332400Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:48:05.332559Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11500 TClient is connected to server localhost:11500 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:48:06.384234Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:48:06.440652Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:48:09.056884Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898849762836608:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:09.057002Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:09.057454Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898849762836620:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:09.061613Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:48:09.092158Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898849762836622:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:48:09.168613Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898849762836673:2338] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:48:09.336407Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519898828287999665:2220];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:48:09.336474Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:48:09.573699Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T14:48:09.806161Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519898849762836922:2317];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:48:09.806384Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519898849762836922:2317];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:48:09.806642Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519898849762836922:2317];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:48:09.806746Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519898849762836922:2317];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:48:09.806864Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519898849762836922:2317];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:48:09.806987Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519898849762836922:2317];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:48:09.807108Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519898849762836922:2317];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:48:09.807213Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519898849762836922:2317];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:48:09.807332Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519898849762836922:2317];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:48:09.807447Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519898849762836922:2317];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:48:09.807564Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519898849762836922:2317];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:48:09.810453Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519898849762836925:2320];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:48:09.810500Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519898849762836925:2320];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:48:09.810639Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519898849762836925:2320];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:48:09.811020Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519898849762836925:2320];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:48:09.811197Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519898849762836925:2320];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:48:09.811293Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519898849762836925:2320];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:48:09.811393Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519898849762836925:2320];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:48:09.811477Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519898849762836925:2320];tabl ... 78506Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039349;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:35.879217Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039380;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:35.879774Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039407;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:35.891759Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039407;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:35.894334Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039349;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:35.894921Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039385;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:35.901273Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039387;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:35.905241Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039385;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:35.905729Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039357;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:35.910341Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039387;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:35.911375Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039355;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:35.914730Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039357;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:35.915295Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039351;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:35.920267Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039355;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:35.924098Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039351;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:35.924688Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039273;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:35.928842Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039368;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:35.933479Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039368;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:35.934127Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039361;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:35.935876Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039273;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:35.936287Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039375;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:35.939080Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039361;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:35.939613Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039337;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:35.944381Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039337;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:35.945050Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039353;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:35.946789Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039375;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:35.947261Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039291;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:35.949778Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039353;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:35.950305Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039341;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:35.951560Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039291;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:35.952531Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039339;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:35.958118Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039341;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:35.958842Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039283;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:35.962466Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039339;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:35.963103Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039305;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:35.963856Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039283;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:35.965048Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039285;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:35.970722Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039285;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:35.971438Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039305;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:35.971452Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039327;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:35.992803Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039327;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:36.076548Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyks0gv06yg4ce5d2pz4gcr6", SessionId: ydb://session/3?node_id=1&id=ZTQ1ZjFmNWYtZDg1ZjExNDktYjhlYTFjOWItYjI0NTZlMTg=, Slow query, duration: 37.099653s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:49:36.676672Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:49:36.677219Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:49:36.679195Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;self_id=[1:7519899128935755558:8227];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224039094;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224039392;receive=72075186224038933; 2025-06-25T14:49:36.679615Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; >> KqpIndexLookupJoin::CheckCastInt64ToUint64+StreamLookupJoin+NotNull [GOOD] >> KqpJoin::LeftJoinPushdownPredicate_Nulls [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::CanonizedJoinOrderTPCH5 [GOOD] Test command err: Trying to start YDB, gRPC: 3905, MsgBus: 7239 2025-06-25T14:48:06.820530Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898837546008819:2142];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:48:06.822006Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000dab/r3tmp/tmpz4W7Ei/pdisk_1.dat 2025-06-25T14:48:07.329762Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519898837546008702:2080] 1750862886763783 != 1750862886763786 2025-06-25T14:48:07.377048Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:48:07.404952Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:48:07.405031Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:48:07.413162Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 3905, node 1 2025-06-25T14:48:07.638502Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:48:07.638535Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:48:07.638544Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:48:07.638634Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:48:07.822931Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:7239 TClient is connected to server localhost:7239 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:48:08.717161Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:48:11.023872Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898859020845827:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:11.024015Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:11.031085Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898859020845839:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:11.034735Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:48:11.056405Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898859020845841:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:48:11.120272Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898859020845892:2338] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:48:11.528146Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T14:48:11.812529Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519898837546008819:2142];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:48:11.812595Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:48:11.904636Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519898859020846128:2314];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:48:11.904906Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519898859020846128:2314];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:48:11.905181Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519898859020846128:2314];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:48:11.905311Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519898859020846128:2314];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:48:11.905408Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519898859020846128:2314];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:48:11.905506Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519898859020846128:2314];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:48:11.905621Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519898859020846128:2314];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:48:11.905744Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519898859020846128:2314];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:48:11.905839Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519898859020846128:2314];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:48:11.905955Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519898859020846128:2314];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:48:11.906045Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519898859020846128:2314];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:48:11.955153Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519898859020846117:2310];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:48:11.955202Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519898859020846117:2310];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:48:11.955390Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519898859020846117:2310];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:48:11.955492Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519898859020846117:2310];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:48:11.955606Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519898859020846117:2310];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:48:11.955694Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519898859020846117:2310];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:48:11.955776Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519898859020846117:2310];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:48:11.955873Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519898859020846117:2310];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:48:11.955993Z node 1 :TX_COLUMNSHA ... 73449Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039334;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:36.879589Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039344;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:49:36.880196Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039418;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:36.882171Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039334;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:49:36.882689Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039424;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:36.902424Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039418;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:49:36.902919Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039386;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:36.905003Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039424;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:49:36.905466Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039398;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:36.910609Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039398;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:49:36.911185Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039420;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:36.914668Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039386;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:49:36.915141Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039419;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:36.922751Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039419;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:49:36.923325Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039416;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:36.924978Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039420;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:49:36.925547Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:36.933771Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039416;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:49:36.935678Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:49:36.936122Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039405;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:36.941007Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039402;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:36.944758Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039405;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:49:36.945398Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039412;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:36.950250Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039402;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:49:36.950934Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039404;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:36.954856Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039412;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:49:36.955486Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039400;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:36.963344Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039404;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:49:36.963896Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039422;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:36.967765Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039400;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:49:36.968222Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039390;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:36.980209Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039390;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:49:36.984556Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039422;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:49:36.985048Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039394;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:36.987075Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039406;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:36.993518Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039394;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:49:36.994215Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039408;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:36.999799Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039406;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:49:37.000329Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039396;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:37.005091Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039396;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:49:37.006998Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039408;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:49:37.296661Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyks0j3c2tyy7xxkc0gn751n", SessionId: ydb://session/3?node_id=1&id=Mzc0ZWJlYzUtZTVhMzZmYTYtNzZmYzVmN2QtZjc5ZWU0NjU=, Slow query, duration: 37.024119s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:49:37.756828Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; 2025-06-25T14:49:37.757331Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; 2025-06-25T14:49:37.757927Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;self_id=[1:7519899181143448525:9976];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224039392;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224039094;receive=72075186224038933; 2025-06-25T14:49:37.758340Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; >> KqpJoinOrder::CanonizedJoinOrderTPCH15 [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpIndexLookupJoin::CheckCastInt64ToUint64+StreamLookupJoin+NotNull [GOOD] Test command err: Trying to start YDB, gRPC: 5313, MsgBus: 5088 2025-06-25T14:49:49.336345Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899277547285553:2079];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:49:49.340440Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d4a/r3tmp/tmpJx03rL/pdisk_1.dat 2025-06-25T14:49:49.913762Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:49:49.919583Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519899277547285500:2080] 1750862989244936 != 1750862989244939 2025-06-25T14:49:49.963275Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:49:49.963382Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:49:49.973778Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 5313, node 1 2025-06-25T14:49:50.160209Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:49:50.160228Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:49:50.160236Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:49:50.160340Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:49:50.356659Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:5088 TClient is connected to server localhost:5088 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:49:51.077557Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:49:51.123041Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:49:51.142723Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:49:51.381709Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:49:51.622701Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:49:51.711508Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:49:53.567434Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899294727156320:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:53.567528Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:53.940198Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:53.971764Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:54.012966Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:54.056830Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:54.096427Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:54.141627Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:54.185506Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:54.302623Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899299022124272:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:54.302686Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:54.302883Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899299022124277:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:54.306975Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:49:54.329510Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710669, at schemeshard: 72057594046644480 2025-06-25T14:49:54.330865Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899299022124279:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:49:54.336853Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519899277547285553:2079];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:49:54.336909Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:49:54.423710Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899299022124332:3415] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:49:55.701568Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo u ... test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d4a/r3tmp/tmpUEge9k/pdisk_1.dat 2025-06-25T14:49:57.875037Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:49:57.875104Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:49:57.878039Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:49:57.884421Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519899313736371293:2080] 1750862997595920 != 1750862997595923 2025-06-25T14:49:57.886848Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 24788, node 2 2025-06-25T14:49:58.040773Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:49:58.040790Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:49:58.040797Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:49:58.040894Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16480 TClient is connected to server localhost:16480 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-25T14:49:58.767010Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:49:58.813417Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:49:58.821051Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:49:58.838586Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:49:58.958491Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:49:59.115485Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:49:59.261413Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:02.096467Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519899335211209401:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:02.096567Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:02.141131Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:02.188231Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:02.268376Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:02.311038Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:02.352195Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:02.433007Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:02.493722Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:02.567070Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519899335211210060:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:02.567147Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:02.567532Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519899335211210065:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:02.571805Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:50:02.584719Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519899335211210067:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:50:02.633156Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519899313736371414:2146];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:50:02.633237Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:50:02.647462Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519899335211210118:3414] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:50:04.120238Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:04.242529Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoin::LeftJoinPushdownPredicate_Nulls [GOOD] Test command err: Trying to start YDB, gRPC: 16212, MsgBus: 16703 2025-06-25T14:49:44.753256Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899257911463885:2124];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:49:44.756483Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d4d/r3tmp/tmpnNASBy/pdisk_1.dat 2025-06-25T14:49:45.389285Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:49:45.389378Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:49:45.408404Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519899257911463783:2080] 1750862984690431 != 1750862984690434 2025-06-25T14:49:45.413976Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:49:45.422369Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 16212, node 1 2025-06-25T14:49:45.655765Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:49:45.655783Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:49:45.655789Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:49:45.655891Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:49:45.793271Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:16703 TClient is connected to server localhost:16703 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:49:46.738961Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:49:46.792080Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:49:46.804468Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:49:47.078996Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:49:47.266896Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:49:47.368108Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:49:49.636477Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899279386301893:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:49.636559Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:49.752517Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519899257911463885:2124];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:49:49.801050Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:49:50.076646Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:50.162019Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:50.214977Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:50.282446Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:50.331877Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:50.426338Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:50.490706Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:50.604760Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899283681269859:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:50.604835Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:50.605161Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899283681269864:2437], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:50.609492Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:49:50.629101Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899283681269866:2438], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:49:50.717620Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899283681269917:3421] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:49:52.489861Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/ ... 25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:49:55.002023Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519899299851341457:2080] 1750862994553790 != 1750862994553793 2025-06-25T14:49:55.013087Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 29887, node 2 2025-06-25T14:49:55.172081Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:49:55.172096Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:49:55.172103Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:49:55.172208Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:29613 2025-06-25T14:49:55.640527Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:29613 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:49:55.929369Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:49:55.937188Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:49:55.950834Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:49:56.060505Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:49:56.260430Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:49:56.343412Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:49:58.508429Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519899317031212262:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:58.508511Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:58.611029Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:58.665093Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:58.694449Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:58.743440Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:58.781781Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:58.857475Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:58.937998Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:59.024267Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519899321326180218:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:59.024406Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:59.026132Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519899321326180223:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:59.028887Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:49:59.046628Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519899321326180225:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:49:59.134796Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519899321326180276:3414] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:49:59.577211Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519899299851341476:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:49:59.577268Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:50:00.150072Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:00.193854Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:00.307372Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) >> KqpIndexLookupJoin::InnerJoinOnlyRightColumn+StreamLookup >> KqpJoin::FullOuterJoinSizeCheck [GOOD] >> KqpJoin::FullOuterJoinNotNullJoinKey >> KqpIndexLookupJoin::CheckCastInt32ToInt16+StreamLookupJoin-NotNull [GOOD] >> KqpIndexLookupJoin::CheckCastInt32ToInt16+StreamLookupJoin+NotNull >> KqpJoinOrder::SortingsDifferentDirs-RemoveLimitOperator >> KqpJoinOrder::CanonizedJoinOrderTPCH22 >> KqpIndexLookupJoin::CheckCastUint32ToUint16+StreamLookupJoin-NotNull ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::CanonizedJoinOrderTPCH15 [GOOD] Test command err: Trying to start YDB, gRPC: 21423, MsgBus: 14604 2025-06-25T14:48:15.024754Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898875584700945:2232];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d9d/r3tmp/tmpVCcggL/pdisk_1.dat 2025-06-25T14:48:15.404807Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:48:15.655718Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:48:15.655808Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:48:15.660827Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:48:15.698271Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:48:15.700486Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519898871289733443:2080] 1750862894984954 != 1750862894984957 TServer::EnableGrpc on GrpcPort 21423, node 1 2025-06-25T14:48:15.941399Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:48:15.941509Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:48:15.941518Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:48:15.941651Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:48:16.013597Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:14604 TClient is connected to server localhost:14604 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:48:16.758913Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:48:16.801276Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:48:18.950335Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898888469603271:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:18.950456Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:18.951635Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898888469603283:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:18.956840Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:48:18.978478Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898888469603285:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:48:19.077190Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898892764570633:2337] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:48:19.431110Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T14:48:19.658646Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519898892764570914:2322];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:48:19.658858Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519898892764570914:2322];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:48:19.659102Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519898892764570914:2322];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:48:19.659644Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519898892764570914:2322];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:48:19.659818Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519898892764570914:2322];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:48:19.659925Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519898892764570914:2322];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:48:19.660050Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519898892764570914:2322];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:48:19.660160Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519898892764570914:2322];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:48:19.660271Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519898892764570914:2322];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:48:19.660382Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519898892764570914:2322];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:48:19.660477Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519898892764570914:2322];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:48:19.693996Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519898892764570876:2310];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:48:19.694061Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519898892764570876:2310];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:48:19.694256Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519898892764570876:2310];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:48:19.694356Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519898892764570876:2310];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:48:19.694460Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519898892764570876:2310];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:48:19.694569Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519898892764570876:2310];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:48:19.694675Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519898892764570876:2310];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:48:19.694787Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519898892764570876:2310];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:48:19.694873Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519898892764570876:2310];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunk ... .619116Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039408;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:45.619541Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039378;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:45.623061Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039378;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:45.623579Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039363;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:45.623609Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039404;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:45.628175Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039404;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:45.628714Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039397;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:45.633433Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039397;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:45.633973Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:45.641486Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039363;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:45.641982Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039367;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:45.642242Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:45.642722Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039389;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:45.647068Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039389;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:45.649687Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039367;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:45.650229Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039359;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:45.654971Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039359;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:45.655532Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039360;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:45.656715Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039361;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:45.661458Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039361;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:45.661995Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039353;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:45.668303Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039360;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:45.668811Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039376;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:45.670669Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039353;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:45.671366Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039385;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:45.673385Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039376;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:45.673933Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039371;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:45.678298Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039371;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:45.678842Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039366;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:45.682984Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039366;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:45.683505Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039364;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:45.687620Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039385;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:45.692094Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039364;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:45.692967Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039355;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:45.696826Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039375;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:45.701050Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039355;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:45.706344Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039375;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:45.916268Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyks0re58xsw9bw7m1wyaqyg", SessionId: ydb://session/3?node_id=1&id=ODM3ZTA3MDItZTE3YmJmOTYtYmQzNGM5ZTYtOWZkMGE4MDk=, Slow query, duration: 39.157988s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:49:46.423441Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:49:46.424001Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:49:46.424530Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;self_id=[1:7519899219182140914:9914];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224039392;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224039094;receive=72075186224038933; 2025-06-25T14:49:46.424940Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:49:47.749699Z node 1 :KQP_YQL WARN: log.cpp:67: TraceId: 01jyks1zet6fy72dgagda3tp7k, SessionId: CompileActor 2025-06-25 14:49:47.749 WARN ydb-core-kqp-ut-join(pid=436429, tid=0x00007F247EAC5640) [KQP] kqp_opt_phy_olap_agg.cpp:48: Expected TCoMember callable to get column under aggregation. Got: Failed to render expression to pretty string: yql/essentials/ast/yql_expr.cpp:1973 BuildValueNode(): requirement ctx.AllowFreeArgs failed, message: Free arguments are not allowed 2025-06-25T14:49:51.965145Z node 1 :KQP_YQL WARN: log.cpp:67: TraceId: 01jyks23j9fbw2vnmt9w5kwpp7, SessionId: CompileActor 2025-06-25 14:49:51.964 WARN ydb-core-kqp-ut-join(pid=436429, tid=0x00007F247EAC5640) [KQP] kqp_opt_phy_olap_agg.cpp:48: Expected TCoMember callable to get column under aggregation. Got: Failed to render expression to pretty string: yql/essentials/ast/yql_expr.cpp:1973 BuildValueNode(): requirement ctx.AllowFreeArgs failed, message: Free arguments are not allowed >> TxUsage::Write_Random_Sized_Messages_In_Wide_Transactions_Query >> KqpJoinOrder::FiveWayJoin-ColumnStore [GOOD] >> KqpJoinOrder::TestJoinOrderHintsManyHintTrees [GOOD] >> KqpJoin::IndexLoookupJoinStructJoin-StreamLookupJoin ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::FiveWayJoin-ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 12894, MsgBus: 3128 2025-06-25T14:49:19.099106Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899150009738950:2136];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:49:19.102872Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d6b/r3tmp/tmpGhZXeD/pdisk_1.dat 2025-06-25T14:49:19.624907Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:49:19.625008Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:49:19.633686Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:49:19.634598Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:49:19.634811Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519899150009738851:2080] 1750862959049676 != 1750862959049679 TServer::EnableGrpc on GrpcPort 12894, node 1 2025-06-25T14:49:19.908607Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:49:19.908625Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:49:19.908632Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:49:19.908727Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:49:20.099678Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:3128 TClient is connected to server localhost:3128 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:49:20.726215Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:49:23.093230Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899167189608691:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:23.093310Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899167189608680:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:23.093630Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:23.100459Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:49:23.132456Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899167189608694:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:49:23.208181Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899167189608747:2338] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:49:23.627248Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:23.757103Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:23.798204Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:23.831019Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:23.863396Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:24.059336Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:24.094473Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519899150009738950:2136];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:49:24.094522Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:49:24.109225Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:24.187142Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:24.240348Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:24.306080Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:24.346788Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:24.386224Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:24.434581Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:25.227632Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/cor ... 14438Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038622;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:02.217670Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038586;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:02.218257Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038654;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:02.223076Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038622;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:02.223642Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038630;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:02.226889Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038654;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:02.227456Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038656;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:02.232670Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038630;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:02.233341Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038578;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:02.238511Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038656;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:02.239008Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038610;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:02.243948Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038610;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:02.244580Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038580;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:02.249409Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038580;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:02.249976Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038632;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:02.251608Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038578;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:02.252093Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038638;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:02.254685Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038632;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:02.255233Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038606;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:02.260028Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038606;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:02.260812Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038614;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:02.264823Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038638;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:02.265148Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038614;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:02.265458Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038642;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:02.265819Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038608;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:02.270270Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038608;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:02.270778Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038576;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:02.274247Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038642;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:02.274800Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038548;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:02.275358Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038576;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:02.279072Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038548;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:02.352130Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038555;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:02.353954Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038660;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:02.361429Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038660;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:02.362043Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038658;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:02.364462Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038555;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:02.364961Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038534;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:02.367079Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038658;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:02.367609Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038644;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:02.372797Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038644;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:02.374194Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038534;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:02.524633Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyks1cvn8b3nazmh3mx8eac3", SessionId: ydb://session/3?node_id=1&id=ODAyY2M4NTQtN2VjNjcyNDEtNzZjMTc4ZjAtMTExMWM3OTg=, Slow query, duration: 34.854560s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:50:02.929600Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:50:02.930151Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:50:02.930931Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;self_id=[1:7519899291743686390:5448];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038629;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038170;receive=72075186224038331; 2025-06-25T14:50:02.931363Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::TestJoinOrderHintsManyHintTrees [GOOD] Test command err: Trying to start YDB, gRPC: 30593, MsgBus: 23039 2025-06-25T14:48:25.086331Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898920718807810:2227];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:48:25.086786Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d97/r3tmp/tmpLgkc33/pdisk_1.dat 2025-06-25T14:48:25.782917Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:48:25.782992Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:48:25.798914Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:48:25.832438Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519898920718807612:2080] 1750862905013944 != 1750862905013947 2025-06-25T14:48:25.857140Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 30593, node 1 2025-06-25T14:48:26.080550Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:48:26.167704Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:48:26.167730Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:48:26.167744Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:48:26.167876Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23039 TClient is connected to server localhost:23039 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:48:27.351419Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:48:29.398930Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898937898677442:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:29.399068Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:29.399331Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898937898677454:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:29.402835Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:48:29.430977Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898937898677456:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:48:29.528965Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898937898677507:2338] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:48:30.061138Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519898920718807810:2227];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:48:30.062265Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:48:30.067658Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T14:48:30.331996Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519898942193645057:2322];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:48:30.332190Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519898942193645057:2322];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:48:30.332588Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519898942193645057:2322];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:48:30.332719Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519898942193645057:2322];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:48:30.332830Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519898942193645057:2322];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:48:30.332953Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519898942193645057:2322];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:48:30.332994Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519898942193645048:2313];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:48:30.333060Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519898942193645048:2313];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:48:30.333108Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519898942193645057:2322];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:48:30.333168Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519898942193645048:2313];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:48:30.333211Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519898942193645057:2322];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:48:30.333250Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519898942193645048:2313];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:48:30.333334Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519898942193645057:2322];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:48:30.333379Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519898942193645048:2313];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:48:30.333448Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519898942193645057:2322];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:48:30.333477Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519898942193645048:2313];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:48:30.333547Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519898942193645057:2322];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:48:30.333565Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519898942193645048:2313];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:48:30.337537Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519898942193645048:2313];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:48:30.337659Z node 1 :TX_COLU ... 976710714; 2025-06-25T14:49:55.581703Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039391;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:55.581801Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039323;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:55.586164Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039391;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:55.586708Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039393;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:55.591165Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039393;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:55.591718Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:55.598786Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:55.600206Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039323;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:55.600729Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039418;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:55.605132Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039418;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:55.605710Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039401;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:55.608887Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039406;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:55.617132Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039401;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:55.617612Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039386;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:55.620395Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039406;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:55.620807Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039307;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:55.627150Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039386;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:55.627603Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039417;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:55.629938Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039307;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:55.630504Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039421;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:55.634420Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039421;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:55.634941Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039423;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:55.636372Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039417;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:55.637553Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039420;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:55.647254Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039423;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:55.647682Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039411;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:55.651034Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039420;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:55.651457Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039411;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:55.651588Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039424;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:55.652056Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039413;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:55.661091Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039424;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:55.661756Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039252;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:55.664376Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039413;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:55.665656Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039410;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:55.674203Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039252;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:55.675514Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039414;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:55.677438Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039410;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:55.680225Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039414;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:55.834318Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039400;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:55.857871Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039400;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:55.914225Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyks12bj0shwh61qgxnj660d", SessionId: ydb://session/3?node_id=1&id=ZTU3NDYwNTItZGRjOGZmM2YtMzI4YjljNDgtYzNkNmJlOWM=, Slow query, duration: 38.999332s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:49:56.136744Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:49:56.137162Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:49:56.138668Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;self_id=[1:7519899268611215743:10086];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224039392;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224039094;receive=72075186224038933; 2025-06-25T14:49:56.139048Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716;
: Warning: Execution, code: 1060
: Warning: Unapplied hint: Rows(R T # 1), code: 4534
: Warning: Execution, code: 1060
: Warning: Unapplied hint: Rows(R T # 1), code: 4534 >> KqpIndexLookupJoin::InnerJoinOnlyRightColumn+StreamLookup [GOOD] >> KqpIndexLookupJoin::InnerJoinOnlyRightColumn-StreamLookup >> KqpJoin::FullOuterJoinNotNullJoinKey [GOOD] >> KqpJoinOrder::TPCDS23 >> KqpJoinOrder::SortingsDifferentDirs+RemoveLimitOperator [GOOD] >> KqpIndexLookupJoin::CheckCastInt32ToInt16+StreamLookupJoin+NotNull [GOOD] >> ShowCreateView::WithPairedTablePathPrefix [FAIL] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoin::FullOuterJoinNotNullJoinKey [GOOD] Test command err: Trying to start YDB, gRPC: 6503, MsgBus: 20519 2025-06-25T14:49:59.342009Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899323661463971:2241];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:49:59.345711Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d40/r3tmp/tmplGlLdj/pdisk_1.dat 2025-06-25T14:49:59.834662Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:49:59.834788Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:49:59.842170Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:49:59.844434Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519899323661463745:2080] 1750862999165927 != 1750862999165930 2025-06-25T14:49:59.860533Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 6503, node 1 2025-06-25T14:50:00.052789Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:50:00.052808Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:50:00.052817Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:50:00.052924Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:50:00.252444Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:20519 TClient is connected to server localhost:20519 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:50:01.079698Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:50:01.129376Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:50:01.410074Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:01.687346Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:01.788786Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:03.762013Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899340841334557:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:03.762095Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:04.179911Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:04.241134Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:04.252387Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519899323661463971:2241];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:50:04.252451Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:50:04.283875Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:04.335869Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:04.440987Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:04.491916Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:04.542716Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:04.635832Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899345136302517:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:04.635931Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:04.636199Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899345136302522:2436], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:04.639908Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:50:04.661000Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710669, at schemeshard: 72057594046644480 2025-06-25T14:50:04.661473Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899345136302524:2437], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:50:04.735683Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899345136302577:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:50:06.083242Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_w ... test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d40/r3tmp/tmpp83EJm/pdisk_1.dat 2025-06-25T14:50:09.353095Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:50:09.414247Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:50:09.435720Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:50:09.435795Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:50:09.436421Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519899364184358512:2080] 1750863009081075 != 1750863009081078 2025-06-25T14:50:09.442136Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 27043, node 2 2025-06-25T14:50:09.624742Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:50:09.624759Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:50:09.624766Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:50:09.624847Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:50:10.084411Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:61649 TClient is connected to server localhost:61649 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:50:10.646265Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:50:10.652773Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:50:10.658972Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:10.762288Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:10.963081Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:11.069650Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:13.897203Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519899381364229336:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:13.897286Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:13.956941Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:13.996161Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:14.067294Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:14.118409Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:14.181759Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:14.261287Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:14.310397Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:14.400590Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519899385659197302:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:14.400669Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:14.400973Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519899385659197307:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:14.404693Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:50:14.427051Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710669, at schemeshard: 72057594046644480 2025-06-25T14:50:14.427517Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519899385659197309:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:50:14.507048Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519899385659197361:3421] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:50:15.834458Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:15.873599Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) >> KqpJoinOrder::ShuffleEliminationDifferentJoinPredicateKeyTypeCorrectness2 [GOOD] >> KqpIndexLookupJoin::CheckCastUint32ToUint16+StreamLookupJoin-NotNull [GOOD] >> KqpIndexLookupJoin::CheckCastUint32ToUint16+StreamLookupJoin+NotNull >> KqpJoinOrder::TPCHRandomJoinViewJustWorks-ColumnStore [GOOD] >> KqpJoinOrder::Chain65Nodes ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpIndexLookupJoin::CheckCastInt32ToInt16+StreamLookupJoin+NotNull [GOOD] Test command err: Trying to start YDB, gRPC: 24336, MsgBus: 20168 2025-06-25T14:50:00.476585Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899325645554558:2190];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:50:00.768491Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d3e/r3tmp/tmp5RgMKI/pdisk_1.dat 2025-06-25T14:50:01.079974Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:50:01.092438Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519899325645554393:2080] 1750863000366944 != 1750863000366947 2025-06-25T14:50:01.128936Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:50:01.129040Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:50:01.136108Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 24336, node 1 2025-06-25T14:50:01.444572Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:50:01.444803Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:50:01.444814Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:50:01.444820Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:50:01.460386Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:20168 TClient is connected to server localhost:20168 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:50:02.378425Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:50:02.430568Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:50:02.449879Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:02.651052Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:02.876123Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:50:02.963340Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:04.861406Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899342825425226:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:04.861534Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:05.274241Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:05.339250Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:05.400278Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:05.415309Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519899325645554558:2190];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:50:05.415381Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:50:05.447838Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:05.497262Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:05.549224Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:05.631291Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:05.736668Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899347120393187:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:05.736810Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:05.737121Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899347120393192:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:05.741111Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:50:05.768524Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710669, at schemeshard: 72057594046644480 2025-06-25T14:50:05.768957Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899347120393194:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:50:05.832928Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899347120393247:3425] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:50:07.614214Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is u ... lled at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) Trying to start YDB, gRPC: 8580, MsgBus: 10647 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d3e/r3tmp/tmpP8jGmX/pdisk_1.dat 2025-06-25T14:50:09.924838Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519899363680975558:2080] 1750863009554973 != 1750863009554976 2025-06-25T14:50:09.924934Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:50:09.965595Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:50:10.087230Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:50:10.087309Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:50:10.089762Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 8580, node 2 2025-06-25T14:50:10.272757Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:50:10.272775Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:50:10.272784Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:50:10.272876Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:50:10.554044Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:10647 TClient is connected to server localhost:10647 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:50:11.330227Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:50:11.347926Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:50:11.364509Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:11.464904Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:11.815769Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:11.935633Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:14.309350Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519899385155813667:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:14.309415Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:14.380512Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:14.463749Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:14.533860Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:14.614181Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:14.651795Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:14.714576Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:14.774053Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:14.862119Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519899385155814328:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:14.862220Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:14.862472Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519899385155814333:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:14.866226Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:50:14.883502Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519899385155814335:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:50:14.978881Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519899385155814388:3415] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:50:16.926907Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:17.031770Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::SortingsDifferentDirs+RemoveLimitOperator [GOOD] Test command err: Trying to start YDB, gRPC: 12599, MsgBus: 27583 2025-06-25T14:49:28.637360Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899189414777314:2198];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:49:28.670437Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d5f/r3tmp/tmp5AOzNF/pdisk_1.dat 2025-06-25T14:49:29.424067Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:49:29.424167Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:49:29.433345Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:49:29.495472Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:49:29.500492Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519899189414777154:2080] 1750862968576031 != 1750862968576034 TServer::EnableGrpc on GrpcPort 12599, node 1 2025-06-25T14:49:29.660396Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:49:29.713502Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:49:29.713521Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:49:29.713528Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:49:29.713622Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27583 TClient is connected to server localhost:27583 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:49:30.705875Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:49:30.721582Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:49:32.858938Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899206594646982:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:32.859057Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:32.859329Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899206594646994:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:32.863068Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:49:32.882587Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899206594646996:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:49:32.956305Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899206594647047:2337] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:49:33.404631Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:33.540398Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:33.614423Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:33.628417Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519899189414777314:2198];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:49:33.628492Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:49:33.668873Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:33.718061Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:33.894924Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:33.982374Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:34.022470Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:34.059875Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:34.089127Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:34.147057Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:34.178724Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:34.216991Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:34.947582Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, subope ... 10714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:12.928692Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038654;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:12.940591Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038532;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:12.944593Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038654;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:12.944996Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038638;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:12.948918Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038532;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:12.949420Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038568;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:12.953364Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038638;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:12.953860Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038592;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:12.957984Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038568;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:12.958460Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038536;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:12.961932Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038592;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:12.962329Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038642;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:12.966877Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038536;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:12.967339Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038660;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:12.970544Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038642;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:12.971029Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038622;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:12.975408Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038660;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:12.975921Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038656;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:12.979027Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038622;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:12.979508Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038614;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:12.983469Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038656;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:12.987036Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038614;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:12.987770Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038616;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:12.989156Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038464;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:12.995662Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038616;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:12.996205Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038630;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:12.998354Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038464;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:12.998830Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038488;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:13.006698Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038630;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:13.007144Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038658;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:13.008602Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038488;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:13.009007Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038600;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:13.013481Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038600;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:13.016565Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038658;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:13.017116Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038620;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:13.021022Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038640;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:13.025559Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038640;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:13.026038Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038618;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:13.029559Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038620;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:13.029991Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038644;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:13.034286Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038618;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:13.038425Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038644;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:13.220566Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyks1pcv24exajbqt0pzg52k", SessionId: ydb://session/3?node_id=1&id=ZjZiZjIwYi0xZDI3M2Q4NC05Yjc1Y2Y4LWIwZjY4YWFk, Slow query, duration: 35.784249s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:50:13.513820Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:50:13.514409Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:50:13.520720Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::ShuffleEliminationDifferentJoinPredicateKeyTypeCorrectness2 [GOOD] Test command err: Trying to start YDB, gRPC: 17646, MsgBus: 10497 2025-06-25T14:49:24.054018Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899171387843176:2193];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:49:24.054068Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d6a/r3tmp/tmpPRenbo/pdisk_1.dat 2025-06-25T14:49:24.582801Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:49:24.582902Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:49:24.584991Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 17646, node 1 2025-06-25T14:49:24.631250Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:49:24.796864Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:49:24.796880Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:49:24.796886Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:49:24.796986Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:49:25.036451Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:10497 TClient is connected to server localhost:10497 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:49:25.643667Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:49:25.668809Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:49:27.530027Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899184272745522:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:27.530144Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:27.530452Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899184272745534:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:27.534173Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:49:27.545485Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899184272745536:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:49:27.607596Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899184272745587:2336] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:49:27.878720Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:28.020765Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:28.055778Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:28.127785Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:28.172944Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:28.300698Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:28.333763Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:28.413274Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:28.449306Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:28.498888Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:28.538845Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:28.569534Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:28.602461Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:29.056544Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519899171387843176:2193];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:49:29.056596Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:49:29.248863Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/scheme ... 51987Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038541;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:06.855824Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038541;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:06.855824Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038593;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:06.856287Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038453;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:06.856296Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038553;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:06.860960Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038453;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:06.860960Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038553;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:06.861436Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038644;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:06.861436Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038423;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:06.865286Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038423;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:06.865603Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038644;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:06.865793Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038449;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:06.866072Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038523;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:06.869716Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038449;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:06.870196Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038517;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:06.870494Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038523;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:06.870967Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038624;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:06.874225Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038517;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:06.874774Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038630;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:06.875140Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038624;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:06.875596Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038587;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:06.879022Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038630;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:06.879474Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038491;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:06.879781Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038587;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:06.880249Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038589;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:06.883675Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038491;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:06.884265Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038489;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:06.884603Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038589;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:06.885078Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038642;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:06.888505Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038489;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:06.889000Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038585;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:06.889760Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038642;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:06.890221Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038583;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:06.893217Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038585;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:06.893729Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038581;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:06.894409Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038583;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:06.895084Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038607;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:06.898142Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038581;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:06.898658Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038621;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:06.899151Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038607;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:06.910547Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038621;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:07.036555Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyks1gsw8hwa7h69sycxe09c", SessionId: ydb://session/3?node_id=1&id=ZDgxNWY3YjgtOTEyYTkyZGYtZDM3ZmI1M2QtNjBlMjIxY2E=, Slow query, duration: 35.327643s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:50:07.307036Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:50:07.307523Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:50:07.308060Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;self_id=[1:7519899308826822397:5301];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038629;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038331;receive=72075186224038170; 2025-06-25T14:50:07.308550Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::TPCHRandomJoinViewJustWorks-ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 9216, MsgBus: 2945 2025-06-25T14:49:27.141413Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899186130044802:2201];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:49:27.495450Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d63/r3tmp/tmpzbZ7mV/pdisk_1.dat 2025-06-25T14:49:27.859573Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:49:27.859658Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:49:27.863322Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:49:27.911984Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519899186130044632:2080] 1750862967079190 != 1750862967079193 2025-06-25T14:49:27.943333Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 9216, node 1 2025-06-25T14:49:28.178037Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:49:28.178707Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:49:28.178717Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:49:28.178726Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:49:28.178845Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2945 TClient is connected to server localhost:2945 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:49:29.287335Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:49:29.332604Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:49:31.413400Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899203309914458:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:31.413492Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:31.413718Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899203309914470:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:31.417633Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:49:31.444519Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899203309914472:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:49:31.544673Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899203309914526:2338] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:49:32.000282Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:32.124508Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519899186130044802:2201];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:49:32.124630Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:49:32.269302Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:32.314288Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:32.392483Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:32.432405Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:32.590100Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:32.619285Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:32.692271Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:32.729384Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:32.785116Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:32.829661Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:32.864102Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:32.900870Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:33.731178Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperatio ... 25421Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038614;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:11.929300Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038514;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:11.929431Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038614;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:11.929880Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038580;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:11.929932Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038604;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:11.934152Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038604;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:11.934671Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038630;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:11.938534Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038630;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:11.939548Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038580;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:11.940001Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038518;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:11.940613Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038550;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:11.945721Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038550;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:11.946226Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038552;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:11.951560Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038552;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:11.951634Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038518;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:11.952059Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038588;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:11.952059Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038650;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:11.955901Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038588;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:11.956665Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038590;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:11.960229Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038590;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:11.960895Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038632;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:11.964832Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038632;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:11.965360Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038640;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:11.969410Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038640;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:11.970310Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038646;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:11.974318Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038646;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:11.974842Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038608;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:11.978821Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038608;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:11.979306Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038652;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:11.983238Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038652;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:11.984174Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038648;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:11.987859Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038648;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:11.988671Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038654;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:11.992272Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038654;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:11.993458Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038616;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:11.997463Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038616;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:11.997932Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038656;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:12.001101Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038656;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:12.014482Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038650;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:12.034861Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038622;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:12.039973Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038622;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:12.097408Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyks1n1xa4678gpd92398bc8", SessionId: ydb://session/3?node_id=1&id=NmJjMjQ2ZDYtYTBiZjM0YWUtNTIwMzUwNGEtZDY5M2JhNzI=, Slow query, duration: 36.035428s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:50:12.433845Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:50:12.434186Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:50:12.434717Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;self_id=[1:7519899233374691846:2847];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038170;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038629;receive=72075186224038331; 2025-06-25T14:50:12.435019Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; >> KqpJoinOrder::CanonizedJoinOrderTPCH16 [GOOD] >> KqpJoinOrder::TestJoinHint2-ColumnStore [GOOD] >> KqpJoin::IndexLoookupJoinStructJoin-StreamLookupJoin [GOOD] >> KqpJoin::JoinAggregate >> KqpJoinOrder::SortingsByPrefixWithAttrEquiToPK+RemoveLimitOperator [GOOD] >> KqpJoinOrder::FourWayJoinWithPredsAndEquivAndLeft+ColumnStore [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/ut/unittest >> ShowCreateView::WithPairedTablePathPrefix [FAIL] Test command err: 2025-06-25T14:40:48.877181Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519896954491203309:2240];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:40:48.877258Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00128b/r3tmp/tmpUxITyv/pdisk_1.dat 2025-06-25T14:40:49.348952Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:40:49.349055Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:40:49.357002Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:40:49.365275Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:40:49.425856Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 28852, node 1 2025-06-25T14:40:49.496290Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:40:49.496338Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:40:49.496346Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:40:49.496486Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28238 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-25T14:40:49.868426Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:40:49.888719Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:40:49.916698Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) waiting... 2025-06-25T14:40:49.952294Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519896960203950910:2158];send_to=[0:7307199536658146131:7762515]; waiting... 2025-06-25T14:40:50.043921Z node 3 :SYSTEM_VIEWS DEBUG: sysview_service.cpp:669: Handle TEvPrivate::TEvProcessLabeledCounters: service id# [3:7519896955908983441:2062] 2025-06-25T14:40:50.051822Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:40:50.051896Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:40:50.059023Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 3 Cookie 3 2025-06-25T14:40:50.059855Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:40:50.105495Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/Database1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:40:50.178905Z node 3 :SYSTEM_VIEWS DEBUG: partition_stats.cpp:32: NSysView::TPartitionStatsCollector bootstrapped 2025-06-25T14:40:50.183414Z node 3 :SYSTEM_VIEWS INFO: processor_impl.cpp:41: [72075186224037893] OnActivateExecutor 2025-06-25T14:40:50.183455Z node 3 :SYSTEM_VIEWS DEBUG: tx_init_schema.cpp:15: [72075186224037893] TTxInitSchema::Execute 2025-06-25T14:40:50.197310Z node 3 :SYSTEM_VIEWS DEBUG: sysview_service.cpp:658: Handle TEvPrivate::TEvProcessCounters: service id# [3:7519896960203950813:2073] 2025-06-25T14:40:50.200416Z node 2 :SYSTEM_VIEWS DEBUG: sysview_service.cpp:669: Handle TEvPrivate::TEvProcessLabeledCounters: service id# [2:7519896957329573993:2062] 2025-06-25T14:40:50.199607Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:40:50.204432Z node 3 :SYSTEM_VIEWS DEBUG: tx_init_schema.cpp:42: [72075186224037893] TTxInitSchema::Complete 2025-06-25T14:40:50.204476Z node 3 :SYSTEM_VIEWS DEBUG: tx_init.cpp:136: [72075186224037893] TTxInit::Execute 2025-06-25T14:40:50.204978Z node 3 :SYSTEM_VIEWS DEBUG: tx_init.cpp:257: [72075186224037893] Loading interval summaries: query count# 0, node ids count# 0, total count# 0 2025-06-25T14:40:50.205010Z node 3 :SYSTEM_VIEWS DEBUG: tx_init.cpp:284: [72075186224037893] Loading interval metrics: query count# 0 2025-06-25T14:40:50.205036Z node 3 :SYSTEM_VIEWS DEBUG: tx_init.cpp:362: [72075186224037893] Loading interval query tops: total query count# 0 2025-06-25T14:40:50.205060Z node 3 :SYSTEM_VIEWS DEBUG: tx_init.cpp:408: [72075186224037893] Loading nodes to request: nodes count# 0, hashes count# 0 2025-06-25T14:40:50.205112Z node 3 :SYSTEM_VIEWS DEBUG: tx_init.cpp:51: [72075186224037893] Loading results: table# 6, result count# 0 2025-06-25T14:40:50.205177Z node 3 :SYSTEM_VIEWS DEBUG: tx_init.cpp:51: [72075186224037893] Loading results: table# 7, result count# 0 2025-06-25T14:40:50.205219Z node 3 :SYSTEM_VIEWS DEBUG: tx_init.cpp:51: [72075186224037893] Loading results: table# 8, result count# 0 2025-06-25T14:40:50.205250Z node 3 :SYSTEM_VIEWS DEBUG: tx_init.cpp:51: [72075186224037893] Loading results: table# 9, result count# 0 2025-06-25T14:40:50.205288Z node 3 :SYSTEM_VIEWS DEBUG: tx_init.cpp:51: [72075186224037893] Loading results: table# 10, result count# 0 2025-06-25T14:40:50.205311Z node 3 :SYSTEM_VIEWS DEBUG: tx_init.cpp:51: [72075186224037893] Loading results: table# 11, result count# 0 2025-06-25T14:40:50.205335Z node 3 :SYSTEM_VIEWS DEBUG: tx_init.cpp:51: [72075186224037893] Loading results: table# 12, result count# 0 2025-06-25T14:40:50.205357Z node 3 :SYSTEM_VIEWS DEBUG: tx_init.cpp:51: [72075186224037893] Loading results: table# 13, result count# 0 2025-06-25T14:40:50.205403Z node 3 :SYSTEM_VIEWS DEBUG: tx_init.cpp:51: [72075186224037893] Loading results: table# 14, result count# 0 2025-06-25T14:40:50.205437Z node 3 :SYSTEM_VIEWS DEBUG: tx_init.cpp:51: [72075186224037893] Loading results: table# 15, result count# 0 2025-06-25T14:40:50.205474Z node 3 :SYSTEM_VIEWS DEBUG: tx_init.cpp:129: [72075186224037893] Loading results: table# 16, partCount count# 0 2025-06-25T14:40:50.205518Z node 3 :SYSTEM_VIEWS DEBUG: tx_init.cpp:129: [72075186224037893] Loading results: table# 19, partCount count# 0 2025-06-25T14:40:50.205552Z node 3 :SYSTEM_VIEWS DEBUG: tx_init.cpp:82: [72075186224037893] Loading results: table# 17, result count# 0 2025-06-25T14:40:50.205579Z node 3 :SYSTEM_VIEWS DEBUG: tx_init.cpp:82: [72075186224037893] Loading results: table# 18, result count# 0 2025-06-25T14:40:50.205609Z node 3 :SYSTEM_VIEWS DEBUG: tx_init.cpp:82: [72075186224037893] Loading results: table# 20, result count# 0 2025-06-25T14:40:50.205635Z node 3 :SYSTEM_VIEWS DEBUG: tx_init.cpp:82: [72075186224037893] Loading results: table# 21, result count# 0 2025-06-25T14:40:50.205735Z node 3 :SYSTEM_VIEWS DEBUG: processor_impl.cpp:333: [72075186224037893] Reset: interval end# 2025-06-25T14:40:50.000000Z 2025-06-25T14:40:50.215667Z node 3 :SYSTEM_VIEWS DEBUG: tx_init.cpp:488: [72075186224037893] TTxInit::Complete 2025-06-25T14:40:50.222539Z node 3 :SYSTEM_VIEWS DEBUG: tx_configure.cpp:20: [72075186224037893] TTxConfigure::Execute: database# /Root/Database1 2025-06-25T14:40:50.232345Z node 3 :SYSTEM_VIEWS DEBUG: tx_configure.cpp:30: [72075186224037893] TTxConfigure::Complete 2025-06-25T14:40:50.234625Z node 3 :SYSTEM_VIEWS INFO: partition_stats.cpp:522: NSysView::TPartitionStatsCollector initialized: domain key# [OwnerId: 72057594046644480, LocalPathId: 2], sysview processor id# 72075186224037893 2025-06-25T14:40:50.268440Z node 3 :SYSTEM_VIEWS DEBUG: sysview_service.cpp:778: Handle TEvSysView::TEvRegisterDbCounters: service id# [3:7519896960203950813:2073], path id# [OwnerId: 72057594046644480, LocalPathId: 2], service# 2 2025-06-25T14:40:50.271609Z node 3 :SYSTEM_VIEWS INFO: sysview_service.cpp:860: Navigate by path id succeeded: service id# [3:7519896960203950813:2073], path id# [OwnerId: 72057594046644480, LocalPathId: 2], database# /Root/Database1 2025-06-25T14:40:50.272488Z node 3 :SYSTEM_VIEWS INFO: sysview_service.cpp:880: Navigate by database succeeded: service id# [3:7519896960203950813:2073], database# /Root/Database1, processor id# 72075186224037893 2025-06-25T14:40:50.281701Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) waiting... 2025-06-25T14:40:50.329517Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519896965919508773:2172];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:40:50.350316Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/Database2/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; waiting... 2025-06-25T14:40:50.379389Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:40:50.379467Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:40:50.389758Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T14:40:50.390466Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting ... e(19, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 29097, node 19 2025-06-25T14:49:41.212527Z node 19 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:49:41.421186Z node 19 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:49:41.421211Z node 19 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:49:41.421221Z node 19 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:49:41.421423Z node 19 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24903 TClient is connected to server localhost:24903 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:49:42.929308Z node 19 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:49:45.079515Z node 19 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[19:7519899242596865817:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:49:45.079599Z node 19 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:49:48.324572Z node 19 :KQP_PROXY ERROR: kqp_proxy_service.cpp:1482: TraceId: "01jyks1w14d3y6zg58j1rjfnwa", Request deadline has expired for 0.121215s seconds (NYdb::Dev::TContractViolation) Attempt to use result with not successfull status. TCreateSessionResult::GetSession 2025-06-25T14:49:52.648839Z node 24 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[24:7519899294244532396:2077];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:49:52.648941Z node 24 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00128b/r3tmp/tmpI7mwq9/pdisk_1.dat 2025-06-25T14:49:53.389536Z node 24 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:49:53.609929Z node 24 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(24, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:49:53.610050Z node 24 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(24, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:49:53.644239Z node 24 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(24, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 7520, node 24 2025-06-25T14:49:53.673298Z node 24 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 24 Type# 268639257 2025-06-25T14:49:53.721213Z node 24 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:49:54.120781Z node 24 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:49:54.120814Z node 24 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:49:54.120826Z node 24 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:49:54.121034Z node 24 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19108 TClient is connected to server localhost:19108 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:49:55.662303Z node 24 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:49:57.652681Z node 24 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[24:7519899294244532396:2077];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:49:57.652768Z node 24 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:50:01.736480Z node 24 :KQP_PROXY ERROR: kqp_proxy_service.cpp:1482: TraceId: "01jyks28acea8j3z1k0m3tcbw9", Request deadline has expired for 0.959974s seconds (NYdb::Dev::TContractViolation) Attempt to use result with not successfull status. TCreateSessionResult::GetSession 2025-06-25T14:50:05.658375Z node 29 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[29:7519899350307164856:2076];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:50:05.682011Z node 29 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00128b/r3tmp/tmpLuwfLW/pdisk_1.dat 2025-06-25T14:50:06.382208Z node 29 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:50:06.394464Z node 29 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(29, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:50:06.394599Z node 29 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(29, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:50:06.409488Z node 29 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(29, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:50:06.444540Z node 29 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 29 Type# 268639257 TServer::EnableGrpc on GrpcPort 4931, node 29 2025-06-25T14:50:06.683297Z node 29 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:50:06.793417Z node 29 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:50:06.793447Z node 29 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:50:06.793459Z node 29 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:50:06.793692Z node 29 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:20162 TClient is connected to server localhost:20162 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:50:08.213955Z node 29 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:50:10.640506Z node 29 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[29:7519899350307164856:2076];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:50:10.640605Z node 29 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:50:15.802732Z node 29 :KQP_PROXY ERROR: kqp_proxy_service.cpp:1482: TraceId: "01jyks2mjr8kwmc4c4kv4ce2p5", Request deadline has expired for 2.459385s seconds (NYdb::Dev::TContractViolation) Attempt to use result with not successfull status. TCreateSessionResult::GetSession ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::TestJoinHint2-ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 15705, MsgBus: 24168 2025-06-25T14:49:28.888899Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899189867771339:2191];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:49:28.888955Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d60/r3tmp/tmp4N5N2i/pdisk_1.dat 2025-06-25T14:49:29.666339Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:49:29.666420Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:49:29.680561Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:49:29.681385Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:49:29.712653Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519899189867771181:2080] 1750862968834759 != 1750862968834762 TServer::EnableGrpc on GrpcPort 15705, node 1 2025-06-25T14:49:30.028453Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:49:30.028898Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:49:30.028905Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:49:30.028911Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:49:30.029015Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24168 TClient is connected to server localhost:24168 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:49:31.001436Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:49:31.036826Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:49:33.157221Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899211342608309:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:33.157300Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:33.157554Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899211342608321:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:33.161266Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:49:33.173156Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899211342608323:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:49:33.258881Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899211342608374:2338] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:49:33.699482Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:33.800265Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:33.836927Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:33.876783Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519899189867771339:2191];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:49:33.876842Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:49:33.918727Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:33.971245Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:34.165124Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:34.197582Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:34.226557Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:34.257230Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:34.291866Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:34.324815Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:34.358050Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:34.397243Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:35.069051Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, subope ... 07973Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038623;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:16.416415Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038623;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:16.416995Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038651;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:16.419887Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:16.420330Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038657;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:16.427274Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038651;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:16.427712Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038619;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:16.430627Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038657;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:16.431088Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038625;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:16.435702Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038625;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:16.440386Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038619;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:16.440828Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038627;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:16.444791Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038587;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:16.449396Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038587;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:16.449967Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038567;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:16.453387Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038627;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:16.453973Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038613;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:16.454991Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038567;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:16.455473Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038637;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:16.462623Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038613;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:16.463169Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038599;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:16.464669Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038637;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:16.465201Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038557;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:16.471851Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038599;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:16.473869Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038557;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:16.474385Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038559;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:16.479104Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038559;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:16.479633Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038563;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:16.480502Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038645;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:16.484348Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038563;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:16.484718Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038645;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:16.485075Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038633;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:16.485075Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038639;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:16.489652Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038639;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:16.490170Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038617;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:16.494752Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038617;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:16.495251Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038596;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:16.495778Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038633;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:16.497174Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038609;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:16.500932Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038609;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:16.503037Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038596;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:16.648778Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyks1pbf7vznwa6aj512v0d4", SessionId: ydb://session/3?node_id=1&id=NGYwNTkwOWItZmJkODc3YWMtNmI2ZjdjNWYtZjcxNTliYTM=, Slow query, duration: 39.256834s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:50:16.927898Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:50:16.928378Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:50:16.928663Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;self_id=[1:7519899237112417949:2768];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038170;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038629;receive=72075186224038331; 2025-06-25T14:50:16.929022Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; >> KqpIndexLookupJoin::InnerJoinOnlyRightColumn-StreamLookup [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::SortingsByPrefixWithAttrEquiToPK+RemoveLimitOperator [GOOD] Test command err: Trying to start YDB, gRPC: 31934, MsgBus: 13833 2025-06-25T14:49:30.411721Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899197267024892:2129];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:49:30.412237Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d58/r3tmp/tmpFy27f9/pdisk_1.dat 2025-06-25T14:49:31.101312Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519899197267024801:2080] 1750862970370596 != 1750862970370599 2025-06-25T14:49:31.101707Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:49:31.124898Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:49:31.124981Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:49:31.134347Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 31934, node 1 2025-06-25T14:49:31.421608Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:49:31.422009Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:49:31.422022Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:49:31.422029Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:49:31.422115Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13833 TClient is connected to server localhost:13833 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:49:32.147818Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:49:32.161498Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:49:34.277988Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899214446894633:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:34.278132Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:34.278435Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899214446894645:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:34.281989Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:49:34.295124Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899214446894647:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:49:34.396037Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899214446894698:2337] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:49:34.796706Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:34.987181Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:35.019884Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:35.051818Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:35.122428Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:35.376191Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:35.391342Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519899197267024892:2129];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:49:35.391417Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:49:35.412622Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:35.462009Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:35.496488Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:35.576063Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:35.621434Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:35.698408Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:35.743433Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:36.552280Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, subope ... =72075186224038659;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:17.516302Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038661;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:17.523242Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038661;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:17.523727Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038639;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:17.523801Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038657;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:17.524146Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038637;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:17.528629Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038657;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:17.529193Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038645;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:17.529738Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038637;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:17.530220Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:17.537523Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038645;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:17.538112Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038647;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:17.539131Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:17.539650Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038623;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:17.546368Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038647;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:17.546368Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038623;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:17.546895Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038651;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:17.546895Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038595;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:17.551656Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038651;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:17.552148Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038595;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:17.552235Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038643;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:17.552631Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038621;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:17.557210Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038621;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:17.557209Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038643;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:17.557733Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038551;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:17.557923Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038635;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:17.562212Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038551;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:17.562765Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038641;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:17.564975Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038635;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:17.565511Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038615;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:17.566039Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038641;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:17.566408Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038653;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:17.570093Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038615;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:17.570266Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038653;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:17.570629Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038631;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:17.570765Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038655;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:17.576107Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038655;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:17.576107Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038631;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:17.598570Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038654;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:17.610766Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038654;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:17.644554Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyks1qn91b3k1wypxp1t3e22", SessionId: ydb://session/3?node_id=1&id=MjdjYjEyY2QtNzAzYzJkNjgtZjI2OWNjNWUtNzY2YzIxY2U=, Slow query, duration: 38.915058s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:50:17.933634Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:50:17.934053Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:50:17.934736Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;self_id=[1:7519899330411033453:5085];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038331;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038629;receive=72075186224038170; 2025-06-25T14:50:17.935148Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716;
: Warning: Execution, code: 1060
:4:1: Warning: Given predicate is not suitable for used index: ix_bank_document_exec_dt_accounts, code: 2503
: Warning: Execution, code: 1060
:4:1: Warning: Given predicate is not suitable for used index: ix_bank_document_exec_dt_accounts, code: 2503 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::CanonizedJoinOrderTPCH16 [GOOD] Test command err: Trying to start YDB, gRPC: 9960, MsgBus: 5115 2025-06-25T14:48:28.774854Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898932857226658:2168];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:48:28.774924Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d94/r3tmp/tmprcgNRu/pdisk_1.dat 2025-06-25T14:48:29.386299Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:48:29.386388Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:48:29.397872Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:48:29.448428Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519898932857226517:2080] 1750862908734201 != 1750862908734204 2025-06-25T14:48:29.477382Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 9960, node 1 2025-06-25T14:48:29.752800Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:48:29.752826Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:48:29.752833Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:48:29.752946Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:48:29.796416Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:5115 TClient is connected to server localhost:5115 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:48:30.590467Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:48:30.613512Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:48:32.930152Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898950037096351:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:32.930274Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:32.930599Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898950037096363:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:32.934777Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:48:32.954381Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-25T14:48:32.954703Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898950037096365:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:48:33.016284Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898954332063712:2337] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:48:33.509609Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T14:48:33.829653Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519898954332063954:2311];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:48:33.829912Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519898954332063954:2311];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:48:33.830186Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519898954332063954:2311];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:48:33.830322Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519898954332063954:2311];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:48:33.830429Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519898954332063954:2311];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:48:33.830550Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519898954332063954:2311];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:48:33.830651Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519898954332063954:2311];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:48:33.830765Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519898954332063954:2311];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:48:33.830913Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519898954332063954:2311];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:48:33.831031Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519898954332063954:2311];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:48:33.831123Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519898954332063954:2311];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:48:33.833717Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519898932857226658:2168];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:48:33.837558Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519898954332063958:2315];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:48:33.837603Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519898954332063958:2315];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:48:33.837811Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519898954332063958:2315];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:48:33.837918Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519898954332063958:2315];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:48:33.838047Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519898954332063958:2315];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:48:33.838150Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519898954332063958:2315];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:48:33.838276Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519898954332063958:2315];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:48:33.838405Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519898954332063958:2315];tablet_id=72075186224037896;p ... 51690Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039367;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:02.060219Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039367;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:02.065482Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039413;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:02.065980Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039415;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:02.070866Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039415;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:02.071465Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039351;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:02.072992Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039365;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:02.078253Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039365;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:02.078760Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039421;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:02.083185Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039421;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:02.083761Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039391;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:02.091557Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039391;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:02.093298Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039351;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:02.093769Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039401;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:02.097603Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039385;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:02.103262Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039401;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:02.103767Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039371;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:02.108147Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039371;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:02.110412Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039385;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:02.110814Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039353;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:02.115461Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039353;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:02.115983Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039409;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:02.120194Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039399;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:02.129280Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039399;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:02.129912Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039379;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:02.133167Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039409;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:02.133603Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039411;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:02.138241Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039411;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:02.140813Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039379;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:02.141307Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039423;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:02.144960Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039403;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:02.146988Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039423;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:02.149568Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039403;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:02.150204Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039271;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:02.154966Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039271;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:02.155741Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039363;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:02.158256Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039393;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:02.163334Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039393;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:02.165163Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039363;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:02.378391Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039377;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:02.392053Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039377;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:02.504596Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyks181hc26z88gqa8etgbx1", SessionId: ydb://session/3?node_id=1&id=N2I2YmQ2MWItZTFmNmY5MjItZmFhOWQ0MzctMmI4MzU1YjA=, Slow query, duration: 39.766436s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:50:02.921783Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; 2025-06-25T14:50:02.922267Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; 2025-06-25T14:50:02.922756Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;self_id=[1:7519899233504981822:8184];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224039094;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038933;receive=72075186224039392; 2025-06-25T14:50:02.923126Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; >> KqpJoinOrder::FiveWayJoinWithComplexPreds-ColumnStore [GOOD] >> KqpJoinOrder::TestJoinOrderHintsComplex-ColumnStore [GOOD] >> KqpIndexLookupJoin::CheckCastUint32ToUint16+StreamLookupJoin+NotNull [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::FourWayJoinWithPredsAndEquivAndLeft+ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 10830, MsgBus: 29328 2025-06-25T14:48:34.764587Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898956554580954:2231];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:48:34.765437Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d8e/r3tmp/tmpouV0X4/pdisk_1.dat 2025-06-25T14:48:35.343905Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:48:35.343996Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:48:35.353807Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:48:35.413239Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:48:35.416582Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519898956554580735:2080] 1750862914646345 != 1750862914646348 TServer::EnableGrpc on GrpcPort 10830, node 1 2025-06-25T14:48:35.613282Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:48:35.613315Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:48:35.613321Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:48:35.613462Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:48:35.684559Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:29328 TClient is connected to server localhost:29328 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:48:36.615042Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:48:36.634898Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:48:39.595931Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898978029417859:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:39.596055Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:39.596330Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898978029417871:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:39.603946Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:48:39.621161Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-25T14:48:39.624563Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898978029417873:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:48:39.701044Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898978029417924:2338] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:48:39.733348Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519898956554580954:2231];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:48:39.733414Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:48:40.290264Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T14:48:40.605549Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519898982324385470:2314];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:48:40.605766Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519898982324385470:2314];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:48:40.605995Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519898982324385470:2314];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:48:40.606100Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519898982324385470:2314];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:48:40.606203Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519898982324385470:2314];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:48:40.606306Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519898982324385470:2314];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:48:40.606401Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519898982324385470:2314];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:48:40.606484Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519898982324385470:2314];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:48:40.606570Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519898982324385470:2314];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:48:40.606658Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519898982324385470:2314];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:48:40.606739Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519898982324385470:2314];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:48:40.616771Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519898982324385475:2319];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:48:40.616822Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519898982324385475:2319];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:48:40.617010Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519898982324385475:2319];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:48:40.617099Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519898982324385475:2319];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:48:40.617174Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519898982324385475:2319];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:48:40.617253Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519898982324385475:2319];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:48:40.617337Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519898982324385475:2319];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_regis ... 59714Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039313;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:04.263972Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039261;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:04.264567Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039355;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:04.273578Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039313;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:04.274051Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039381;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:04.274663Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039355;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:04.275045Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039279;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:04.280148Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039279;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:04.285734Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039381;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:04.286203Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039353;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:04.289925Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039341;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:04.297445Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039353;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:04.297908Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039293;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:04.301416Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039341;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:04.301873Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039325;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:04.307245Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039325;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:04.307956Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039303;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:04.311609Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039293;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:04.314629Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039359;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:04.319842Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039359;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:04.323823Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039303;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:04.324907Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039335;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:04.328945Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039323;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:04.336260Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039323;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:04.336880Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039380;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:04.340671Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039335;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:04.341478Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039297;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:04.347456Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039297;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:04.349265Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039380;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:04.349821Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039329;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:04.353037Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039378;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:04.359686Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039378;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:04.361588Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039329;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:04.362171Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039345;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:04.364989Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039379;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:04.371329Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039345;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:04.371987Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039369;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:04.374255Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039379;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:04.379903Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039369;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:04.460560Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:04.468459Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:04.588197Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyks1dc04v0jq1gyfqww0rjv", SessionId: ydb://session/3?node_id=1&id=ZWM2NDNlODAtMjc1ZTEzOWYtMmYxOWNhNDYtYzUyMDMxZjU=, Slow query, duration: 36.395152s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:50:04.943343Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:50:04.943821Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:50:04.944341Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;self_id=[1:7519899295857052377:9663];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224039392;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224039094;receive=72075186224038933; 2025-06-25T14:50:04.944741Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpIndexLookupJoin::InnerJoinOnlyRightColumn-StreamLookup [GOOD] Test command err: Trying to start YDB, gRPC: 65253, MsgBus: 4464 2025-06-25T14:50:08.120103Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899360423699690:2231];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:50:08.120265Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d3d/r3tmp/tmpFovdm5/pdisk_1.dat 2025-06-25T14:50:08.662816Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:50:08.664512Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519899360423699483:2080] 1750863008022263 != 1750863008022266 2025-06-25T14:50:08.677932Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:50:08.678015Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:50:08.692272Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 65253, node 1 2025-06-25T14:50:08.996870Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:50:08.996905Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:50:08.996927Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:50:08.997032Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:50:09.016492Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:4464 TClient is connected to server localhost:4464 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:50:09.923310Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:50:09.958436Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:10.214455Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:10.475544Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:10.597558Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:13.105064Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899381898537611:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:13.105164Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:13.122917Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519899360423699690:2231];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:50:13.122970Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:50:13.467573Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:13.502320Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:13.569727Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:13.603728Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:13.639267Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:13.723367Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:13.763089Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:13.840876Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899381898538270:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:13.840925Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:13.841133Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899381898538275:2436], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:13.845303Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:50:13.856952Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710669, at schemeshard: 72057594046644480 2025-06-25T14:50:13.860477Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899381898538277:2437], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:50:13.946817Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899381898538328:3419] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:50:15.295961Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_wo ... d to server localhost:19588 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:50:18.654594Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:50:18.664957Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:50:18.680946Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:18.780138Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:19.025760Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:19.115835Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:21.953366Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519899417227874896:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:21.953443Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:22.073826Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:22.180057Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:22.225035Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:22.293474Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:22.341618Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519899400048004292:2232];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:50:22.341696Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:50:22.368954Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:22.434849Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:22.483011Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:22.579470Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519899421522842856:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:22.579567Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:22.584178Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519899421522842861:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:22.587684Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:50:22.607144Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519899421522842863:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:50:22.663404Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519899421522842915:3415] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:50:23.965344Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:24.006068Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:24.043143Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:24.085637Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:24.114389Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:24.143808Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715677:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) >> KqpJoinOrder::ShuffleEliminationDifferentJoinPredicateKeyTypeCorrectness1 [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpIndexLookupJoin::CheckCastUint32ToUint16+StreamLookupJoin+NotNull [GOOD] Test command err: Trying to start YDB, gRPC: 17550, MsgBus: 10909 2025-06-25T14:50:10.356398Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899371003847947:2125];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:50:10.356588Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d33/r3tmp/tmpybSG30/pdisk_1.dat 2025-06-25T14:50:11.018822Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:50:11.018911Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:50:11.081012Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519899371003847862:2080] 1750863010323547 != 1750863010323550 2025-06-25T14:50:11.085771Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:50:11.093201Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 17550, node 1 2025-06-25T14:50:11.383690Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:50:11.393229Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:50:11.393253Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:50:11.393271Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:50:11.393368Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10909 TClient is connected to server localhost:10909 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:50:12.428905Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:50:12.485247Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:50:12.502369Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:12.797875Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:13.031181Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:13.120293Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:15.149079Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899392478685977:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:15.149160Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:15.340470Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519899371003847947:2125];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:50:15.340545Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:50:15.648096Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:15.711734Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:15.767174Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:15.813197Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:15.873956Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:15.938231Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:16.033485Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:16.130081Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899396773653937:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:16.130144Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:16.130333Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899396773653942:2437], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:16.134672Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:50:16.161840Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899396773653944:2438], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:50:16.251916Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899396773653995:3420] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:50:17.493929Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/ ... 7 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:50:19.570107Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:50:19.573779Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:50:19.574840Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:50:19.574977Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519899409076238666:2080] 1750863019315241 != 1750863019315244 TServer::EnableGrpc on GrpcPort 5097, node 2 2025-06-25T14:50:19.797002Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:50:19.797022Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:50:19.797033Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:50:19.797126Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13183 2025-06-25T14:50:20.444451Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:13183 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:50:20.665336Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:50:20.670376Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:50:20.686133Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:20.766771Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:50:20.958758Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:21.143314Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:23.157149Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519899426256109500:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:23.157253Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:23.218395Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:23.318734Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:23.405840Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:23.485789Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:23.605353Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:23.706033Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:23.801206Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:23.955915Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519899426256110171:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:23.956084Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:23.956380Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519899426256110176:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:23.961475Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:50:23.980383Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710669, at schemeshard: 72057594046644480 2025-06-25T14:50:23.980678Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519899426256110178:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:50:24.071902Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519899430551077525:3418] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:50:24.368455Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519899409076238742:2090];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:50:24.368552Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:50:25.377801Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:25.491018Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::TestJoinOrderHintsComplex-ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 12459, MsgBus: 10925 2025-06-25T14:49:37.450768Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899228094902531:2220];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:49:37.453383Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d4e/r3tmp/tmpTi4Xaf/pdisk_1.dat 2025-06-25T14:49:38.084244Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:49:38.088360Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519899228094902349:2080] 1750862977383743 != 1750862977383746 2025-06-25T14:49:38.093099Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:49:38.093176Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:49:38.097435Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 12459, node 1 2025-06-25T14:49:38.280754Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:49:38.280771Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:49:38.280778Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:49:38.280882Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:49:38.386759Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:10925 TClient is connected to server localhost:10925 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:49:39.080137Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:49:39.097556Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:49:41.410403Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899245274772185:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:41.410753Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899245274772174:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:41.410812Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:41.415007Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:49:41.441043Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899245274772188:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:49:41.508826Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899245274772241:2337] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:49:41.892033Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:42.047476Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:42.091732Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:42.141678Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:42.191219Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:42.375652Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:42.412668Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519899228094902531:2220];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:49:42.412839Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:49:42.454947Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:42.530285Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:42.606630Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:42.661739Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:42.713127Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:42.790721Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:42.841221Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:43.674029Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, subope ... rnal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:19.333958Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038618;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:19.334393Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038565;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:19.338170Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038593;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:19.338508Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038565;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:19.338902Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038642;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:19.339217Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038527;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:19.343227Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038527;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:19.343227Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038642;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:19.343611Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038547;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:19.343615Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038579;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:19.347822Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038547;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:19.347823Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038579;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:19.348298Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038473;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:19.348300Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038615;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:19.352614Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038615;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:19.352614Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038473;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:19.353117Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038613;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:19.353117Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038495;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:19.357662Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038495;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:19.357665Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038613;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:19.358166Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038463;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:19.358652Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038499;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:19.363359Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038499;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:19.364128Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038644;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:19.373754Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038644;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:19.374263Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038465;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:19.376982Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038463;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:19.377349Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038561;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:19.379119Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038465;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:19.379589Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038573;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:19.381851Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038561;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:19.382351Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038533;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:19.384254Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038573;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:19.384776Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038584;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:19.394296Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038533;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:19.395764Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038584;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:19.395848Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038586;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:19.396192Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038563;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:19.403738Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038586;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:19.408367Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038563;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:19.524682Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyks1yz6fbnatykpbwvvdr1b", SessionId: ydb://session/3?node_id=1&id=ZjYyOTVhNzctYTEzNzdlOTEtYWFhMGMxNDMtZDU4YzRiYzE=, Slow query, duration: 33.309901s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:50:19.813831Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; 2025-06-25T14:50:19.813973Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; 2025-06-25T14:50:19.814519Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716;
: Warning: Execution, code: 1060
: Warning: Unapplied hint: JoinOrder( (Unused1 Unused2) (Unused3 Unused4) ), code: 4534
: Warning: Unapplied hint: Rows(Unused # 10e8), code: 4534
: Warning: Unapplied hint: Rows(R T # 1), code: 4534
: Warning: Execution, code: 1060
: Warning: Unapplied hint: JoinOrder( (Unused1 Unused2) (Unused3 Unused4) ), code: 4534
: Warning: Unapplied hint: Rows(Unused # 10e8), code: 4534
: Warning: Unapplied hint: Rows(R T # 1), code: 4534 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::FiveWayJoinWithComplexPreds-ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 30245, MsgBus: 3325 2025-06-25T14:49:32.805399Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899207347166961:2220];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:49:32.805767Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d55/r3tmp/tmp2SVAxn/pdisk_1.dat 2025-06-25T14:49:33.432721Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519899207347166779:2080] 1750862972763091 != 1750862972763094 2025-06-25T14:49:33.468160Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:49:33.468476Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:49:33.468571Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:49:33.473632Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 30245, node 1 2025-06-25T14:49:33.728749Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:49:33.728771Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:49:33.728778Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:49:33.728868Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:49:33.764413Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:3325 TClient is connected to server localhost:3325 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:49:34.662247Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:49:34.686646Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:49:36.765558Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899224527036603:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:36.765683Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:36.765890Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899224527036615:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:36.772697Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:49:36.791860Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899224527036617:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:49:36.860022Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899224527036671:2338] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:49:37.248223Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:37.355750Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:37.387927Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:37.459958Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:37.510567Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:37.655068Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:37.685250Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:37.758461Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:37.786674Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519899207347166961:2220];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:49:37.786731Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:49:37.813262Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:37.892887Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:37.940332Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:37.978059Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:38.052499Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:38.746103Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperat ... line=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:16.342727Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038493;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:16.343395Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038471;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:16.346770Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038514;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:16.347326Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038516;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:16.357033Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038471;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:16.357615Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038501;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:16.361274Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038516;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:16.361807Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038482;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:16.363201Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038501;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:16.363841Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038477;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:16.369701Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038477;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:16.371913Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038482;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:16.372438Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038451;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:16.383365Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038451;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:16.383963Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038527;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:16.388244Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038527;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:16.392980Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038524;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:16.402303Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038524;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:16.402893Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038499;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:16.412282Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038499;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:16.412983Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038500;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:16.424076Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038500;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:16.424637Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038566;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:16.428607Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038560;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:16.433264Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038560;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:16.433855Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038460;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:16.436726Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038566;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:16.437281Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038522;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:16.442769Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038522;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:16.444197Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038460;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:16.444758Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038535;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:16.448900Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038530;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:16.449942Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038535;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:16.450488Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038469;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:16.454053Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038530;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:16.454567Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038488;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:16.461257Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038469;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:16.467701Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038488;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:16.504967Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038598;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:16.518686Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038598;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:16.522663Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038562;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:16.534976Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038562;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:16.660693Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyks1t3m03292f8hkxt3rp06", SessionId: ydb://session/3?node_id=1&id=N2FhNjRhZDEtZjU5YzE5Y2QtOWIzNzhmMGMtZmFmYmI2OTM=, Slow query, duration: 35.423363s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:50:17.043400Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:50:17.043480Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:50:17.043964Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; >> DataShardWrite::UpsertImmediate >> DataShardWrite::WriteImmediateBadRequest >> DataShardWrite::IncrementImmediate ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::ShuffleEliminationDifferentJoinPredicateKeyTypeCorrectness1 [GOOD] Test command err: Trying to start YDB, gRPC: 6095, MsgBus: 6376 2025-06-25T14:49:33.878867Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899209279304468:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:49:33.884873Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d54/r3tmp/tmpDFTyBS/pdisk_1.dat 2025-06-25T14:49:34.525148Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:49:34.525274Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:49:34.529231Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:49:34.574009Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:49:34.576485Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519899209279304429:2080] 1750862973807130 != 1750862973807133 TServer::EnableGrpc on GrpcPort 6095, node 1 2025-06-25T14:49:34.744777Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:49:34.744792Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:49:34.744802Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:49:34.744894Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:49:34.872575Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:6376 TClient is connected to server localhost:6376 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:49:35.611265Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:49:37.917133Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899226459174260:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:37.917276Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:37.924428Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899226459174272:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:37.931317Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:49:37.940105Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899226459174274:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:49:38.020516Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899230754141621:2339] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:49:38.439271Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:38.590266Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:38.631305Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:38.660628Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:38.685564Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:38.824391Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:38.856575Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:38.882143Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519899209279304468:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:49:38.882532Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:49:38.904090Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:38.938267Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:38.970917Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:39.001495Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:39.039039Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:39.072929Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:39.752767Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/ ... 01390Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038465;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:20.204664Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038623;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:20.205178Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038439;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:20.206218Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038465;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:20.206781Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038627;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:20.210316Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038439;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:20.210843Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038600;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:20.211524Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038627;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:20.211974Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038612;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:20.215840Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038600;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:20.216447Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038655;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:20.216792Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038612;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:20.217347Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038657;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:20.221590Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038655;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:20.222095Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038618;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:20.222187Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038657;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:20.222744Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038659;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:20.227018Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038618;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:20.227579Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038659;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:20.227659Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038588;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:20.228230Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038646;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:20.231591Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038588;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:20.232591Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038481;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:20.235582Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038646;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:20.236674Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038619;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:20.237283Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038481;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:20.237848Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038449;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:20.241575Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038619;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:20.242489Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038449;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:20.270460Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038471;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:20.271467Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038433;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:20.276348Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038471;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:20.277454Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038433;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:20.278055Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038429;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:20.278332Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038451;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:20.283436Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038451;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:20.283937Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038487;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:20.289595Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038487;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:20.292977Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038429;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:20.293740Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038604;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:20.298645Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038604;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:20.377780Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyks1v9wb24mgap5cwbk8jqn", SessionId: ydb://session/3?node_id=1&id=OGEwYTE0ZGEtMTBjODY1ZDAtNmQyMjVjYTEtNjYyYWFmZDk=, Slow query, duration: 37.917025s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:50:20.689428Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; 2025-06-25T14:50:20.689902Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; 2025-06-25T14:50:20.690083Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;self_id=[1:7519899372488088578:5495];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038629;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038331;receive=72075186224038170; 2025-06-25T14:50:20.690531Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; >> DataShardWrite::UpsertImmediateManyColumns >> KqpJoinOrder::TPCDS16+ColumnStore [GOOD] |86.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest |86.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSysColV0::InnerJoinSelect >> KqpSysColV1::StreamInnerJoinTables >> KqpSysColV0::SelectRange >> KqpSystemView::ReadSuccess >> KqpSysColV0::InnerJoinSelectAsterisk >> KqpSystemView::PartitionStatsRanges >> KqpSystemView::Sessions >> KqpSysColV1::StreamSelectRowById >> KqpSystemView::PartitionStatsRange1 >> KqpJoin::JoinAggregate [GOOD] |86.2%| [TA] $(B)/ydb/core/sys_view/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpSysColV1::StreamSelectRowAsterisk ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::TPCDS16+ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 13084, MsgBus: 10417 2025-06-25T14:48:18.564601Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898889104179829:2223];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:48:18.577906Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d9b/r3tmp/tmpWErc6S/pdisk_1.dat 2025-06-25T14:48:19.281863Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:48:19.281976Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:48:19.288830Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:48:19.356144Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:48:19.364065Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519898889104179644:2080] 1750862898488341 != 1750862898488344 TServer::EnableGrpc on GrpcPort 13084, node 1 2025-06-25T14:48:19.556485Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:48:19.652876Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:48:19.652900Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:48:19.652907Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:48:19.653034Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10417 TClient is connected to server localhost:10417 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:48:20.622013Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:48:20.657461Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:48:22.830863Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898906284049470:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:22.830956Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:22.831427Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898906284049482:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:22.835171Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:48:22.851315Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898906284049484:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:48:22.952153Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898906284049535:2337] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:48:23.274482Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T14:48:23.577124Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519898910579017078:2315];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:48:23.577339Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519898910579017078:2315];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:48:23.577593Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519898910579017078:2315];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:48:23.577719Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519898910579017078:2315];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:48:23.577824Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519898910579017078:2315];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:48:23.577931Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519898910579017078:2315];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:48:23.578073Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519898910579017078:2315];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:48:23.578164Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519898910579017078:2315];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:48:23.578825Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519898910579017078:2315];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:48:23.578991Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519898910579017078:2315];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:48:23.579103Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519898910579017078:2315];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:48:23.579289Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519898910579017118:2323];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:48:23.579328Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519898910579017118:2323];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:48:23.579480Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519898910579017118:2323];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:48:23.579577Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519898910579017118:2323];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:48:23.579674Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519898910579017118:2323];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:48:23.579767Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519898910579017118:2323];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:48:23.579863Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519898910579017118:2323];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:48:23.579953Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519898910579017118:2323];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:48:23.580045Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519898910579017118:2323];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunk ... _id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:51.051174Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039319;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:51.052862Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039341;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:51.061854Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039319;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:51.062358Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039309;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:51.063575Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039341;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:51.064003Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039377;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:51.072277Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039309;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:51.072572Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039377;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:51.073186Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039349;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:51.075246Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039327;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:51.078354Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039349;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:51.078958Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039275;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:51.080645Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039327;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:51.081289Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039305;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:51.084176Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039275;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:51.085066Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039373;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:51.086388Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039305;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:51.086959Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039375;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:51.091744Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039373;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:51.092109Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039375;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:51.092405Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039245;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:51.092705Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039347;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:51.101857Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039245;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:51.102365Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039243;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:51.105296Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039347;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:51.105975Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039367;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:51.107720Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039243;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:51.111352Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039367;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:51.113573Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039359;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:51.116093Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039277;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:51.118877Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039359;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:51.130147Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039277;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:51.231714Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039339;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:51.236292Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039339;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:51.308376Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyks0x2rbz93r0x01weyj03r", SessionId: ydb://session/3?node_id=1&id=NTE4NGJjNmYtYzg0YWVhNWQtYzRjNWRhMDgtNDJkZGVhYWY=, Slow query, duration: 39.795333s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:49:51.961252Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:49:51.961854Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:49:51.962220Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;self_id=[1:7519899181162001189:8215];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224039094;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224039392;receive=72075186224038933; 2025-06-25T14:49:51.962975Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:50:21.800202Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyks2j8wb7skkd1j1qvxtdtx", SessionId: ydb://session/3?node_id=1&id=NTE4NGJjNmYtYzg0YWVhNWQtYzRjNWRhMDgtNDJkZGVhYWY=, Slow query, duration: 15.816258s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "-- NB: Subquerys\n$orders_with_several_warehouses = (\n select cs_order_number\n from `/Root/test/ds/catalog_sales`\n group by cs_order_number\n having count(distinct cs_warehouse_sk) > 1\n);\n\n-- start query 1 in stream 0 using template query16.tpl and seed 171719422\nselect\n count(distinct cs1.cs_order_number) as `order count`\n ,sum(cs_ext_ship_cost) as `total shipping cost`\n ,sum(cs_net_profit) as `total net profit`\nfrom\n `/Root/test/ds/catalog_sales` cs1\n cross join `/Root/test/ds/date_dim`\n cross join `/Root/test/ds/customer_address`\n cross join `/Root/test/ds/call_center`\n left semi join $orders_with_several_warehouses cs2 on cs1.cs_order_number = cs2.cs_order_number\n left only join `/Root/test/ds/catalog_returns` cr1 on cs1.cs_order_number = cr1.cr_order_number\nwhere\n cast(d_date as date) between cast('1999-4-01' as date) and\n (cast('1999-4-01' as date) + DateTime::IntervalFromDays(60))\nand cs1.cs_ship_date_sk = d_date_sk\nand cs1.cs_ship_addr_sk = ca_address_sk\nand ca_state = 'IL'\nand cs1.cs_call_center_sk = cc_call_center_sk\nand cc_county in ('Richland County','Bronx County','Maverick County','Mesa County',\n 'Raleigh County'\n)\norder by `order count`\nlimit 100;\n", parameters: 0b >> KqpSystemView::NodesRange2 >> KqpJoinOrder::CanonizedJoinOrderTPCH9 [GOOD] >> DataShardWrite::WriteImmediateBadRequest [GOOD] >> DataShardWrite::WriteImmediateSeveralOperations ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoin::JoinAggregate [GOOD] Test command err: Trying to start YDB, gRPC: 1414, MsgBus: 3135 2025-06-25T14:50:14.377699Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899386334275990:2132];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:50:14.378134Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d2f/r3tmp/tmpU4IwjC/pdisk_1.dat 2025-06-25T14:50:15.179423Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:50:15.179510Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:50:15.182097Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:50:15.206255Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:50:15.207519Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519899386334275896:2080] 1750863014340121 != 1750863014340124 TServer::EnableGrpc on GrpcPort 1414, node 1 2025-06-25T14:50:15.420416Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:50:15.472749Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:50:15.472774Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:50:15.472780Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:50:15.472869Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3135 TClient is connected to server localhost:3135 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:50:16.458604Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-25T14:50:16.494706Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:16.688030Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:16.865211Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:16.955387Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:19.367332Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519899386334275990:2132];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:50:19.367389Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:50:19.413394Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899407809114001:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:19.413511Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:19.774560Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:19.859113Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:19.912045Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:19.941105Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:19.982293Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:20.072535Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:20.125743Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:20.209111Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899412104081959:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:20.209298Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:20.209590Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899412104081964:2436], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:20.213638Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:50:20.232673Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899412104081966:2437], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:50:20.292563Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899412104082017:3419] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:50:21.796771Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:21.929314Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part pr ... 25-06-25T14:50:24.526406Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:50:24.526467Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:50:24.534149Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 29663, node 2 2025-06-25T14:50:24.696777Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:50:24.696797Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:50:24.696803Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:50:24.696915Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4639 2025-06-25T14:50:25.192327Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:4639 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:50:25.309484Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:50:25.333584Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:50:25.350928Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:25.452123Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:50:25.665416Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:25.775143Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:27.981368Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519899441572579792:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:27.981445Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:28.039031Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:28.076912Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:28.123111Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:28.157132Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:28.219594Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:28.305742Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:28.385455Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:28.495853Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519899445867547752:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:28.495932Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:28.496183Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519899445867547757:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:28.499916Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:50:28.515260Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519899445867547759:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:50:28.581482Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519899445867547810:3410] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:50:29.192432Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519899428687676515:2239];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:50:29.192493Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:50:29.668414Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:29.701267Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:29.734888Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) >> DataShardWrite::UpsertImmediate [GOOD] >> DataShardWrite::ReplaceImmediate >> DataShardWrite::UpsertImmediateManyColumns [GOOD] >> DataShardWrite::UpsertPrepared+Volatile >> DataShardWrite::IncrementImmediate [GOOD] >> DataShardWrite::ExecSQLUpsertPrepared-EvWrite-Volatile >> KqpSystemView::QueryStatsSimple |86.2%| [TA] {RESULT} $(B)/ydb/core/sys_view/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpJoinOrder::SortingsSimpleOrderByPKAlias+RemoveLimitOperator [GOOD] >> KqpSysColV0::SelectRowById ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::CanonizedJoinOrderTPCH9 [GOOD] Test command err: Trying to start YDB, gRPC: 5655, MsgBus: 16982 2025-06-25T14:48:47.295881Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899014321697446:2223];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:48:47.296118Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d83/r3tmp/tmpiyuByF/pdisk_1.dat 2025-06-25T14:48:47.836578Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:48:47.836693Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:48:47.867503Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:48:47.868590Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519899014321697261:2080] 1750862927251035 != 1750862927251038 2025-06-25T14:48:47.882856Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 5655, node 1 2025-06-25T14:48:48.076128Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:48:48.076148Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:48:48.076158Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:48:48.076275Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:48:48.313729Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:16982 TClient is connected to server localhost:16982 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:48:49.080411Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:48:49.113429Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:48:51.954375Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899031501567088:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:51.954504Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:51.955103Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899031501567101:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:51.958893Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:48:52.027035Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899031501567103:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:48:52.099802Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899035796534450:2339] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:48:52.284395Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519899014321697446:2223];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:48:52.284451Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:48:52.569667Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T14:48:52.921032Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519899035796534713:2321];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:48:52.921312Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519899035796534713:2321];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:48:52.921603Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519899035796534713:2321];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:48:52.921710Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519899035796534713:2321];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:48:52.921821Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519899035796534713:2321];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:48:52.921940Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519899035796534713:2321];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:48:52.922064Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519899035796534713:2321];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:48:52.922169Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519899035796534713:2321];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:48:52.922315Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519899035796534713:2321];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:48:52.922438Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519899035796534713:2321];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:48:52.922542Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519899035796534713:2321];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:48:52.923275Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519899035796534685:2312];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:48:52.923309Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519899035796534685:2312];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:48:52.923461Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519899035796534685:2312];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:48:52.923579Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519899035796534685:2312];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:48:52.923685Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519899035796534685:2312];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:48:52.923804Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519899035796534685:2312];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:48:52.923912Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519899035796534685:2312];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:48:52.924009Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519899035796534685:2312];tablet ... ternal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:16.159120Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039375;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:16.159643Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039383;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:16.162089Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039394;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:16.162599Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039371;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:16.164267Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039383;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:16.164897Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039386;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:16.168520Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039371;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:16.169138Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039410;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:16.172130Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039410;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:16.174440Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039386;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:16.174884Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039422;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:16.176686Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039414;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:16.178958Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039422;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:16.179412Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039400;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:16.181218Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039414;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:16.181751Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039343;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:16.184362Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039400;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:16.184893Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039337;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:16.186802Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039343;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:16.187329Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039384;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:16.189646Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039337;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:16.190115Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039403;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:16.191678Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039384;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:16.192136Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039367;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:16.194550Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039403;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:16.195055Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039385;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:16.196203Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039367;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:16.197433Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039388;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:16.199872Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039385;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:16.200374Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039259;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:16.201883Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039388;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:16.202824Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039309;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:16.205260Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039259;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:16.205766Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039321;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:16.207072Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039309;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:16.207984Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039361;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:16.210363Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039321;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:16.212245Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039361;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:16.213553Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039418;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:16.217452Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039418;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:16.385022Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyks1q5ragyk3ddyegkd9h17", SessionId: ydb://session/3?node_id=1&id=YTlhNjEzNGUtODkzNTczOWUtMjlhNTZkYTctNzJlMDNiNjg=, Slow query, duration: 38.151970s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:50:16.789444Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:50:16.789968Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:50:16.790936Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;self_id=[1:7519899254839903879:7280];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038933;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224039094;receive=72075186224039392; 2025-06-25T14:50:16.791324Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:50:16.941979Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710717, at schemeshard: 72057594046644480 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::SortingsSimpleOrderByPKAlias+RemoveLimitOperator [GOOD] Test command err: Trying to start YDB, gRPC: 4433, MsgBus: 64720 2025-06-25T14:49:47.567841Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899272159199936:2225];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:49:47.572800Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d4b/r3tmp/tmpg6aa9F/pdisk_1.dat 2025-06-25T14:49:48.250138Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:49:48.250213Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:49:48.273876Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:49:48.302330Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:49:48.308440Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519899272159199748:2080] 1750862987488839 != 1750862987488842 TServer::EnableGrpc on GrpcPort 4433, node 1 2025-06-25T14:49:48.570376Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:49:48.624788Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:49:48.624809Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:49:48.624818Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:49:48.624916Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:64720 TClient is connected to server localhost:64720 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:49:49.675210Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:49:49.716462Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:49:51.824664Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899289339069575:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:51.824760Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:51.829523Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899289339069586:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:51.835579Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:49:51.857995Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899289339069589:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:49:51.965461Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899289339069640:2337] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:49:52.290061Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:52.393906Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:52.427766Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:52.463237Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:52.507597Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:52.530227Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519899272159199936:2225];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:49:52.530392Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:49:52.669832Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:52.718340Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:52.757829Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:52.792433Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:52.823424Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:52.852984Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:52.882700Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:52.927523Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:53.601071Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, subopera ... 65075Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038441;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:32.366679Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038497;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:32.367150Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038447;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:32.379846Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038447;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:32.381918Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038441;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:32.382399Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038479;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:32.383530Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038605;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:32.388859Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038479;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:32.389481Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038491;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:32.395901Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038605;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:32.396400Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038597;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:32.401856Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038597;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:32.402574Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038624;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:32.407016Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038491;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:32.407501Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038638;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:32.415900Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038624;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:32.416425Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038533;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:32.421116Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038638;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:32.421634Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038646;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:32.423806Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038533;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:32.424509Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038652;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:32.433738Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038646;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:32.434296Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038648;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:32.438543Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038652;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:32.439046Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038626;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:32.444056Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038648;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:32.448721Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038660;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:32.452664Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038626;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:32.453900Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038622;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:32.458779Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038660;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:32.459317Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038632;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:32.463503Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038622;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:32.464136Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038620;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:32.473446Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038632;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:32.474018Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038503;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:32.478176Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038620;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:32.478726Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038543;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:32.483594Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038503;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:32.484132Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038654;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:32.488133Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038543;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:32.497747Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038654;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:32.700593Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyks28sg3q4xhkq0fvdfff5t", SessionId: ydb://session/3?node_id=1&id=ZDFjNzhlMzctY2U3YmQ2ZDItOTNmZjEyYWEtYjMxMDk5NzE=, Slow query, duration: 36.427671s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:50:33.229487Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:50:33.230002Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:50:33.231145Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;self_id=[1:7519899422483080972:5341];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038629;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038331;receive=72075186224038170; 2025-06-25T14:50:33.231597Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; >> KqpSysColV0::SelectRange [GOOD] >> KqpSystemView::ReadSuccess [GOOD] >> KqpSystemView::PartitionStatsRange1 [GOOD] >> KqpSysColV1::StreamSelectRowById [GOOD] >> KqpSysColV1::StreamInnerJoinTables [GOOD] >> KqpSysColV0::SelectRowAsterisk >> DataShardWrite::UpsertPrepared+Volatile [GOOD] >> DataShardWrite::UpsertPrepared-Volatile >> KqpSystemView::PartitionStatsRanges [GOOD] >> DataShardWrite::WriteImmediateSeveralOperations [GOOD] >> DataShardWrite::UpsertPreparedManyTables+Volatile >> KqpSysColV0::InnerJoinSelect [GOOD] >> KqpSysColV0::InnerJoinSelectAsterisk [GOOD] >> DataShardWrite::ReplaceImmediate [GOOD] >> DataShardWrite::ReplaceImmediate_DefaultValue >> KqpSysColV1::StreamSelectRowAsterisk [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSysColV0::SelectRange [GOOD] Test command err: Trying to start YDB, gRPC: 28865, MsgBus: 15982 2025-06-25T14:50:33.241392Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899470303247858:2143];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:50:33.241498Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001abc/r3tmp/tmpHeKSPo/pdisk_1.dat 2025-06-25T14:50:33.821101Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519899470303247740:2080] 1750863033196965 != 1750863033196968 2025-06-25T14:50:33.863723Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:50:33.866660Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:50:33.870253Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:50:33.948996Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 28865, node 1 2025-06-25T14:50:34.230928Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:50:34.230946Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:50:34.230952Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:50:34.231049Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:50:34.248648Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:15982 TClient is connected to server localhost:15982 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:50:35.572852Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:50:35.604924Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:35.775152Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:35.956678Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:36.039860Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:37.955502Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899487483118570:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:37.955638Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:38.217828Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519899470303247858:2143];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:50:38.217903Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:50:38.411007Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:38.457203Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:38.522508Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:38.568540Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:38.606015Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:38.707503Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:38.759449Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:38.837695Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899491778086531:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:38.837812Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:38.838263Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899491778086536:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:38.842956Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:50:38.854400Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710669, at schemeshard: 72057594046644480 2025-06-25T14:50:38.855015Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899491778086538:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:50:38.918038Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899491778086590:3417] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSystemView::ReadSuccess [GOOD] Test command err: Trying to start YDB, gRPC: 1602, MsgBus: 4630 2025-06-25T14:50:33.250851Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899467179433261:2231];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:50:33.250927Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001ab3/r3tmp/tmp8A6Qbz/pdisk_1.dat 2025-06-25T14:50:33.940587Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519899467179433042:2080] 1750863033218946 != 1750863033218949 2025-06-25T14:50:33.975156Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:50:33.975238Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:50:33.975475Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:50:33.987128Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 1602, node 1 2025-06-25T14:50:34.255119Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:50:34.305106Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:50:34.305123Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:50:34.305130Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:50:34.305225Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4630 TClient is connected to server localhost:4630 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:50:35.345971Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:50:35.410167Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:35.691571Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:50:35.922366Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:36.004013Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:37.475320Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899484359303852:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:37.475419Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:38.202728Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:38.248709Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:38.252807Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519899467179433261:2231];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:50:38.253243Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:50:38.290764Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:38.332233Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:38.404209Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:38.513064Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:38.561784Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:38.666302Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899488654271819:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:38.666367Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:38.666685Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899488654271824:2437], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:38.670518Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:50:38.686750Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899488654271826:2438], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:50:38.770149Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899488654271877:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:50:40.118223Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:50:40.420556Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710674. Ctx: { TraceId: 01jyks3kn24zr6ertshdj9p7yw, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NDQ1ZDQ1NjEtZWFhYWJmZDgtYzE4N2NmOWMtODg1OThmYmY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:50:40.470383Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863040397, txId: 281474976710673] shutting down ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSysColV1::StreamInnerJoinTables [GOOD] Test command err: Trying to start YDB, gRPC: 32396, MsgBus: 8290 2025-06-25T14:50:33.196693Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899466829290328:2132];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:50:33.196750Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001aa7/r3tmp/tmpPFqlGP/pdisk_1.dat 2025-06-25T14:50:33.877250Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:50:33.877325Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:50:33.878638Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:50:33.880506Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519899466829290232:2080] 1750863033188795 != 1750863033188798 2025-06-25T14:50:33.909321Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 32396, node 1 2025-06-25T14:50:34.212549Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:50:34.229096Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:50:34.229113Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:50:34.229120Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:50:34.229214Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8290 TClient is connected to server localhost:8290 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:50:35.374866Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:50:35.401305Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:50:35.432369Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:35.649558Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:35.848709Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:35.948404Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:37.463530Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899484009161065:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:37.463632Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:38.199232Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519899466829290328:2132];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:50:38.199298Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:50:38.206323Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:38.253739Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:38.330220Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:38.374031Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:38.413729Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:38.494807Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:38.561514Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:38.672062Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899488304129032:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:38.672129Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:38.672365Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899488304129037:2436], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:38.676083Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:50:38.687420Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899488304129039:2437], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:50:38.777478Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899488304129090:3427] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:50:40.762658Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863040766, txId: 281474976710672] shutting down [[[108u];["One"];[8];["Value5"];[108u];["One"];#;["Value31"]]] >> KqpJoinOrder::TPCDS88-ColumnStore [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSystemView::PartitionStatsRange1 [GOOD] Test command err: Trying to start YDB, gRPC: 11558, MsgBus: 61586 2025-06-25T14:50:33.224911Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899466516785127:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:50:33.224958Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001a9d/r3tmp/tmpeOv6N6/pdisk_1.dat 2025-06-25T14:50:33.824636Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519899466516785107:2080] 1750863033214517 != 1750863033214520 2025-06-25T14:50:33.857944Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:50:33.868919Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:50:33.870253Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:50:33.874159Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 11558, node 1 2025-06-25T14:50:34.250329Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:50:34.250358Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:50:34.250370Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:50:34.250466Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:50:34.293183Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:61586 TClient is connected to server localhost:61586 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:50:35.515785Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:50:35.538817Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:50:35.553206Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:35.768167Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:35.947935Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:50:36.072780Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:37.458339Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899483696655944:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:37.458456Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:38.206429Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:38.228429Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519899466516785127:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:50:38.228522Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:50:38.279462Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:38.306769Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:38.342738Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:38.384150Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:38.462772Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:38.518960Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:38.647127Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899487991623908:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:38.647225Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:38.647649Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899487991623913:2437], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:38.650620Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:50:38.664629Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899487991623915:2438], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:50:38.744708Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899487991623966:3429] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:50:40.506891Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863040482, txId: 281474976710672] shutting down ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSystemView::PartitionStatsRanges [GOOD] Test command err: Trying to start YDB, gRPC: 63986, MsgBus: 10858 2025-06-25T14:50:33.193248Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899467747739873:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:50:33.193310Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001abe/r3tmp/tmpB8o2Gc/pdisk_1.dat 2025-06-25T14:50:33.928417Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519899467747739856:2080] 1750863033189571 != 1750863033189574 2025-06-25T14:50:33.958984Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:50:33.976056Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:50:33.976148Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:50:33.984028Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 63986, node 1 2025-06-25T14:50:34.239672Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:50:34.240172Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:50:34.240191Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:50:34.240201Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:50:34.240297Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10858 TClient is connected to server localhost:10858 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:50:35.442177Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:50:35.477689Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:35.653467Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:35.878191Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:35.961019Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:37.293129Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899484927610702:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:37.293221Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:38.196542Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519899467747739873:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:50:38.196918Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:50:38.202683Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:38.274180Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:38.338681Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:38.380341Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:38.427114Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:38.480800Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:38.560785Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:38.671660Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899489222578668:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:38.671724Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:38.672094Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899489222578673:2436], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:38.676093Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:50:38.688987Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899489222578675:2437], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:50:38.746881Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899489222578726:3422] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:50:40.846869Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863040831, txId: 281474976715672] shutting down ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSysColV1::StreamSelectRowById [GOOD] Test command err: Trying to start YDB, gRPC: 18042, MsgBus: 22347 2025-06-25T14:50:33.208901Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899468983545909:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:50:33.208951Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001ab2/r3tmp/tmpTEbf6e/pdisk_1.dat 2025-06-25T14:50:33.799443Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519899468983545878:2080] 1750863033189168 != 1750863033189171 2025-06-25T14:50:33.907964Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:50:33.912676Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:50:33.912922Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:50:33.921522Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 18042, node 1 2025-06-25T14:50:34.268569Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:50:34.272868Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:50:34.272887Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:50:34.272903Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:50:34.273019Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:22347 TClient is connected to server localhost:22347 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:50:35.399408Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:50:35.424517Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:50:35.447010Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:35.676845Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:35.858714Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:35.940897Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:38.082026Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899490458383988:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:38.082146Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:38.212363Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519899468983545909:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:50:38.212443Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:50:38.440251Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:38.480860Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:38.517241Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:38.563096Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:38.620178Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:38.670208Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:38.745042Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:38.847190Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899490458384652:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:38.847273Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:38.847530Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899490458384657:2436], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:38.851496Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:50:38.873759Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899490458384659:2437], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:50:38.972594Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899490458384711:3419] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:50:40.538070Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863040542, txId: 281474976710672] shutting down >> KqpSystemView::NodesSimple ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSysColV0::InnerJoinSelectAsterisk [GOOD] Test command err: Trying to start YDB, gRPC: 20220, MsgBus: 28980 2025-06-25T14:50:33.235786Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899468625844206:2233];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:50:33.240784Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001aa1/r3tmp/tmpQKieeN/pdisk_1.dat 2025-06-25T14:50:33.849430Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:50:33.849552Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:50:33.876071Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:50:33.883697Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 20220, node 1 2025-06-25T14:50:34.221133Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:50:34.221153Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:50:34.221165Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:50:34.221265Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:50:34.228673Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:28980 TClient is connected to server localhost:28980 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:50:35.429375Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:50:35.454405Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:50:35.466604Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:35.693572Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:35.887681Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:35.977464Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:37.280926Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899485805714801:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:37.281110Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:38.207040Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:38.232506Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519899468625844206:2233];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:50:38.232575Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:50:38.271989Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:38.308302Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:38.343644Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:38.427119Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:38.470502Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:38.516460Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:38.644571Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899490100682761:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:38.644650Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:38.644802Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899490100682766:2436], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:38.649992Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:50:38.665213Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899490100682768:2437], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:50:38.762219Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899490100682819:3418] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSysColV0::InnerJoinSelect [GOOD] Test command err: Trying to start YDB, gRPC: 21209, MsgBus: 19953 2025-06-25T14:50:33.208687Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899467424417677:2215];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:50:33.234785Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001ab6/r3tmp/tmpRPrKz2/pdisk_1.dat 2025-06-25T14:50:33.893232Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:50:33.896536Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519899467424417503:2080] 1750863033188967 != 1750863033188970 2025-06-25T14:50:33.911393Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:50:33.911480Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:50:33.921605Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 21209, node 1 2025-06-25T14:50:34.198760Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:50:34.229678Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:50:34.229696Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:50:34.229702Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:50:34.229781Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19953 TClient is connected to server localhost:19953 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:50:35.413123Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:50:35.442594Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:50:35.668092Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:35.881891Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:35.999295Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:37.613299Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899484604288316:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:37.613450Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:38.200514Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519899467424417677:2215];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:50:38.200589Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:50:38.203191Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:38.273570Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:38.309399Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:38.347226Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:38.388253Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:38.451679Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:38.521372Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:38.644176Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899488899256273:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:38.644249Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:38.644659Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899488899256278:2436], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:38.650585Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:50:38.672475Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899488899256280:2437], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:50:38.763818Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899488899256331:3421] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } >> KqpJoinOrder::TPCDS61+ColumnStore [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSysColV1::StreamSelectRowAsterisk [GOOD] Test command err: Trying to start YDB, gRPC: 11383, MsgBus: 2832 2025-06-25T14:50:35.489791Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899478101822537:2222];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:50:35.489992Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001a9b/r3tmp/tmpNnJIHf/pdisk_1.dat 2025-06-25T14:50:36.016280Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:50:36.022343Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519899478101822353:2080] 1750863035445507 != 1750863035445510 2025-06-25T14:50:36.032126Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:50:36.032194Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:50:36.037641Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 11383, node 1 2025-06-25T14:50:36.280919Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:50:36.280942Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:50:36.280954Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:50:36.281063Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:50:36.482960Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:2832 TClient is connected to server localhost:2832 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:50:37.323148Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:50:37.355956Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:50:37.374097Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:37.651022Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:37.825745Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:37.911368Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:39.282094Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899495281693164:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:39.282246Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:39.631503Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:39.672849Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:39.791715Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:39.830411Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:39.883940Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:39.945763Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:40.007079Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:40.091428Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899499576661118:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:40.091524Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:40.091890Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899499576661123:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:40.095548Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:50:40.110802Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899499576661125:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:50:40.237860Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899499576661176:3417] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:50:40.488392Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519899478101822537:2222];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:50:40.488451Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:50:41.529459Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863041564, txId: 281474976715672] shutting down >> KqpJoinOrder::FiveWayJoinWithPredsAndEquiv+ColumnStore [GOOD] |86.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest |86.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest |86.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> DataShardWrite::ExecSQLUpsertPrepared-EvWrite-Volatile [GOOD] >> DataShardWrite::ExecSQLUpsertPrepared+EvWrite-Volatile >> KqpSysColV1::InnerJoinSelectAsterisk >> KqpJoinOrder::CanonizedJoinOrderTPCH19 [GOOD] >> KqpSysColV0::UpdateAndDelete >> KqpSystemView::Join ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::TPCDS88-ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 15217, MsgBus: 6575 2025-06-25T14:48:45.776839Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899003181101396:2212];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:48:45.791721Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d85/r3tmp/tmpJNrCNR/pdisk_1.dat 2025-06-25T14:48:46.441923Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519899003181101222:2080] 1750862925691885 != 1750862925691888 2025-06-25T14:48:46.467959Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:48:46.469735Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:48:46.469852Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:48:46.478999Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 15217, node 1 2025-06-25T14:48:46.772852Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:48:46.788586Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:48:46.788610Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:48:46.788622Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:48:46.788798Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6575 TClient is connected to server localhost:6575 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:48:47.718689Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:48:50.291637Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899024655938351:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:50.291769Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:50.292157Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899024655938363:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:50.298964Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:48:50.316833Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899024655938365:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:48:50.396026Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899024655938416:2340] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:48:50.764577Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519899003181101396:2212];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:48:50.764658Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:48:50.866898Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:51.006355Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:51.061845Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:51.116229Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:51.175447Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:51.371381Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:51.405217Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:51.439649Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:51.488165Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:51.528852Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:51.568348Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:51.624244Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:51.677150Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:52.628651Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/cor ... ProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:30.477157Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038641;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:30.477224Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038491;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:30.482290Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038641;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:30.487053Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038491;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:30.487574Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038599;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:30.488897Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038585;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:30.497120Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038585;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:30.497755Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038637;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:30.501164Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038599;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:30.501688Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038657;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:30.506202Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038657;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:30.506770Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038633;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:30.510395Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038637;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:30.511328Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038623;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:49:30.512083Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038633;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:30.524488Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038623;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:30.704336Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyks0d2s3y7065sp2rdr60yq", SessionId: ydb://session/3?node_id=1&id=NWE2ZGQwOGYtMWYxOWFhY2UtZTljYmFmYzctZDM0ZTUzYjM=, Slow query, duration: 35.574246s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:49:31.577535Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:49:31.578050Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:49:31.582372Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;self_id=[1:7519899101965366148:4171];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038331;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038629;receive=72075186224038170; 2025-06-25T14:49:31.582858Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:50:38.194107Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyks2e5tezr1ewy0q95b58jx", SessionId: ydb://session/3?node_id=1&id=NWE2ZGQwOGYtMWYxOWFhY2UtZTljYmFmYzctZDM0ZTUzYjM=, Slow query, duration: 36.407134s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "pragma TablePathPrefix = \"/Root/test/ds/\";\n-- NB: Subquerys\n-- start query 1 in stream 0 using template query88.tpl and seed 318176889\nselect *\nfrom\n (select count(*) h8_30_to_9\n from store_sales cross join household_demographics cross join time_dim cross join store\n where ss_sold_time_sk = time_dim.t_time_sk\n and ss_hdemo_sk = household_demographics.hd_demo_sk\n and ss_store_sk = s_store_sk\n and time_dim.t_hour = 8\n and time_dim.t_minute >= 30\n and ((household_demographics.hd_dep_count = 2 and household_demographics.hd_vehicle_count<=2+2) or\n (household_demographics.hd_dep_count = 4 and household_demographics.hd_vehicle_count<=4+2) or\n (household_demographics.hd_dep_count = 3 and household_demographics.hd_vehicle_count<=3+2))\n and store.s_store_name = 'ese') s1 cross join\n (select count(*) h9_to_9_30\n from store_sales cross join household_demographics cross join time_dim cross join store\n where ss_sold_time_sk = time_dim.t_time_sk\n and ss_hdemo_sk = household_demographics.hd_demo_sk\n and ss_store_sk = s_store_sk\n and time_dim.t_hour = 9\n and time_dim.t_minute < 30\n and ((household_demographics.hd_dep_count = 2 and household_demographics.hd_vehicle_count<=2+2) or\n (household_demographics.hd_dep_count = 4 and household_demographics.hd_vehicle_count<=4+2) or\n (household_demographics.hd_dep_count = 3 and household_demographics.hd_vehicle_count<=3+2))\n and store.s_store_name = 'ese') s2 cross join\n (select count(*) h9_30_to_10\n from store_sales cross join household_demographics cross join time_dim cross join store\n where ss_sold_time_sk = time_dim.t_time_sk\n and ss_hdemo_sk = household_demographics.hd_demo_sk\n and ss_store_sk = s_store_sk\n and time_dim.t_hour = 9\n and time_dim.t_minute >= 30\n and ((household_demographics.hd_dep_count = 2 and household_demographics.hd_vehicle_count<=2+2) or\n (household_demographics.hd_dep_count = 4 and household_demographics.hd_vehicle_count<=4+2) or\n (household_demographics.hd_dep_count = 3 and household_demographics.hd_vehicle_count<=3+2))\n and store.s_store_name = 'ese') s3 cross join\n (select count(*) h10_to_10_30\n from store_sales cross join household_demographics cross join time_dim cross join store\n where ss_sold_time_sk = time_dim.t_time_sk\n and ss_hdemo_sk = household_demographics.hd_demo_sk\n and ss_store_sk = s_store_sk\n and time_dim.t_hour = 10\n and time_dim.t_minute < 30\n and ((household_demographics.hd_dep_count = 2 and household_demographics.hd_vehicle_count<=2+2) or\n (household_demographics.hd_dep_count = 4 and household_demographics.hd_vehicle_count<=4+2) or\n (household_demographics.hd_dep_count = 3 and household_demographics.hd_vehicle_count<=3+2))\n and store.s_store_name = 'ese') s4 cross join\n (select count(*) h10_30_to_11\n from store_sales cross join household_demographics cross join time_dim cross join store\n where ss_sold_time_sk = time_dim.t_time_sk\n and ss_hdemo_sk = household_demographics.hd_demo_sk\n and ss_store_sk = s_store_sk\n and time_dim.t_hour = 10\n and time_dim.t_minute >= 30\n and ((household_demographics.hd_dep_count = 2 and household_demographics.hd_vehicle_count<=2+2) or\n (household_demographics.hd_dep_count = 4 and household_demographics.hd_vehicle_count<=4+2) or\n (household_demographics.hd_dep_count = 3 and household_demographics.hd_vehicle_count<=3+2))\n and store.s_store_name = 'ese') s5 cross join\n (select count(*) h11_to_11_30\n from store_sales cross join household_demographics cross join time_dim cross join store\n where ss_sold_time_sk = time_dim.t_time_sk\n and ss_hdemo_sk = household_demographics.hd_demo_sk\n and ss_store_sk = s_store_sk\n and time_dim.t_hour = 11\n and time_dim.t_minute < 30\n and ((household_demographics.hd_dep_count = 2 and household_demographics.hd_vehicle_count<=2+2) or\n (household_demographics.hd_dep_count = 4 and household_demographics.hd_vehicle_count<=4+2) or\n (household_demographics.hd_dep_count = 3 and household_demographics.hd_vehicle_count<=3+2))\n and store.s_store_name = 'ese') s6 cross join\n (select count(*) h11_30_to_12\n from store_sales cross join household_demographics cross join time_dim cross join store\n where ss_sold_time_sk = time_dim.t_time_sk\n and ss_hdemo_sk = household_demographics.hd_demo_sk\n and ss_store_sk = s_store_sk\n and time_dim.t_hour = 11\n and time_dim.t_minute >= 30\n and ((household_demographics.hd_dep_count = 2 and household_demographics.hd_vehicle_count<=2+2) or\n (household_demographics.hd_dep_count = 4 and household_demographics.hd_vehicle_count<=4+2) or\n (household_demographics.hd_dep_count = 3 and household_demographics.hd_vehicle_count<=3+2))\n and store.s_store_name = 'ese') s7 cross join\n (select count(*) h12_to_12_30\n from store_sales cross join household_demographics cross join time_dim cross join store\n where ss_sold_time_sk = time_dim.t_time_sk\n and ss_hdemo_sk = household_demographics.hd_demo_sk\n and ss_store_sk = s_store_sk\n and time_dim.t_hour = 12\n and time_dim.t_minute < 30\n and ((household_demographics.hd_dep_count = 2 and household_demographics.hd_vehicle_count<=2+2) or\n (household_demographics.hd_dep_count = 4 and household_demographics.hd_vehicle_count<=4+2) or\n (household_demographics.hd_dep_count = 3 and household_demographics.hd_vehicle_count<=3+2))\n and store.s_store_name = 'ese') s8\n;", parameters: 0b >> KqpSystemView::NodesRange1 >> KqpSysColV1::UpdateAndDelete |86.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest |86.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::TPCDS61+ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 29902, MsgBus: 1244 2025-06-25T14:48:25.097127Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898917686962247:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:48:25.108572Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d96/r3tmp/tmpiZ5ujA/pdisk_1.dat 2025-06-25T14:48:25.687504Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:48:25.687613Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:48:25.690247Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:48:25.698062Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519898917686962215:2080] 1750862905047899 != 1750862905047902 2025-06-25T14:48:25.711699Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 29902, node 1 2025-06-25T14:48:25.944012Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:48:25.944041Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:48:25.944047Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:48:25.944150Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:48:26.152577Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:1244 TClient is connected to server localhost:1244 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:48:27.098888Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:48:27.128745Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:48:29.540185Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898934866832049:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:29.540328Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:29.543277Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898934866832061:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:29.546955Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:48:29.558833Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898934866832063:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:48:29.664030Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898934866832114:2338] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:48:30.027165Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T14:48:30.100599Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519898917686962247:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:48:30.100660Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:48:30.360327Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519898939161799699:2325];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:48:30.360484Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519898939161799699:2325];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:48:30.360689Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519898939161799699:2325];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:48:30.360807Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519898939161799699:2325];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:48:30.360911Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519898939161799699:2325];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:48:30.361021Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519898939161799699:2325];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:48:30.361113Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519898939161799699:2325];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:48:30.361190Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519898939161799699:2325];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:48:30.361290Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519898939161799699:2325];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:48:30.361387Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519898939161799699:2325];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:48:30.361484Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519898939161799699:2325];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:48:30.392180Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519898939161799656:2312];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:48:30.392278Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519898939161799656:2312];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:48:30.400540Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519898939161799656:2312];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:48:30.400657Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519898939161799656:2312];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:48:30.400945Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519898939161799656:2312];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:48:30.401057Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519898939161799656:2312];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:48:30.401193Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519898939161799656:2312];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:48:30.401306Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519898939161799656:2312];tablet_ ... les_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:54.522710Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039407;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:54.527805Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039407;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:54.532690Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039415;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:54.533243Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039406;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:54.538452Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039406;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:54.539165Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039403;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:54.540853Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039386;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:54.547704Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039403;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:54.550269Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039386;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:54.550963Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039396;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:54.553287Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039385;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:54.560514Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039396;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:54.561021Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039424;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:54.566698Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039385;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:54.567197Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039411;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:54.574084Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039424;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:54.574588Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039409;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:54.576828Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039411;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:54.577259Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039404;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:54.583558Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039409;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:54.584108Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039420;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:54.586636Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039404;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:54.587208Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039395;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:54.593942Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039420;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:54.594593Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039399;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:54.595934Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039395;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:54.596855Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039242;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:54.601940Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039242;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:54.602477Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039387;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:54.604214Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039399;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:54.607333Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039387;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:54.796198Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039383;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:54.824144Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039383;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:49:54.893172Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyks12ke9swgy3a66nzsxbx2", SessionId: ydb://session/3?node_id=1&id=ZTBmZDZjMjMtNDY5NGY3OTEtNjc3ZjJiZmMtMWU2M2Y5NTk=, Slow query, duration: 37.725878s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:49:55.718487Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:49:55.719003Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:49:55.719958Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;self_id=[1:7519899287054212416:11011];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224039392;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038933;receive=72075186224039094; 2025-06-25T14:49:55.720445Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:50:35.102097Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyks2ve819djwb2v8bb9a5xy", SessionId: ydb://session/3?node_id=1&id=ZTBmZDZjMjMtNDY5NGY3OTEtNjc3ZjJiZmMtMWU2M2Y5NTk=, Slow query, duration: 19.733221s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "pragma TablePathPrefix = \"/Root/test/ds/\";\n\n-- NB: Subquerys\n-- start query 1 in stream 0 using template query61.tpl and seed 1930872976\nselect promotions,total,cast(promotions as float)/cast(total as float)*100\nfrom\n (select sum(ss_ext_sales_price) promotions\n from store_sales\n cross join store\n cross join promotion\n cross join date_dim\n cross join customer\n cross join customer_address\n cross join item\n where ss_sold_date_sk = d_date_sk\n and ss_store_sk = s_store_sk\n and ss_promo_sk = p_promo_sk\n and ss_customer_sk= c_customer_sk\n and ca_address_sk = c_current_addr_sk\n and ss_item_sk = i_item_sk\n and ca_gmt_offset = -6\n and i_category = 'Sports'\n and (p_channel_dmail = 'Y' or p_channel_email = 'Y' or p_channel_tv = 'Y')\n and s_gmt_offset = -6\n and d_year = 2001\n and d_moy = 12) promotional_sales cross join\n (select sum(ss_ext_sales_price) total\n from store_sales\n cross join store\n cross join date_dim\n cross join customer\n cross join customer_address\n cross join item\n where ss_sold_date_sk = d_date_sk\n and ss_store_sk = s_store_sk\n and ss_customer_sk= c_customer_sk\n and ca_address_sk = c_current_addr_sk\n and ss_item_sk = i_item_sk\n and ca_gmt_offset = -6\n and i_category = 'Sports'\n and s_gmt_offset = -6\n and d_year = 2001\n and d_moy = 12) all_sales\norder by promotions, total\nlimit 100;\n", parameters: 0b >> KqpSystemView::Sessions [GOOD] >> KqpSysColV0::SelectRowById [GOOD] >> KqpSystemView::PartitionStatsRange2 >> DataShardWrite::UpsertPrepared-Volatile [GOOD] >> DataShardWrite::UpsertNoLocksArbiter >> DataShardWrite::ReplaceImmediate_DefaultValue [GOOD] >> DataShardWrite::UpdateImmediate >> KqpSystemView::PartitionStatsFollower >> DataShardWrite::UpsertPreparedManyTables+Volatile [GOOD] >> DataShardWrite::UpsertPreparedManyTables-Volatile >> KqpSystemView::QueryStatsScan >> KqpSysColV0::InnerJoinTables ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::CanonizedJoinOrderTPCH19 [GOOD] Test command err: Trying to start YDB, gRPC: 27351, MsgBus: 5209 2025-06-25T14:49:02.157588Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899079912407001:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:49:02.157628Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d7c/r3tmp/tmpqbs70e/pdisk_1.dat 2025-06-25T14:49:02.933733Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:49:02.933839Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:49:03.015792Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519899079912406983:2080] 1750862942117324 != 1750862942117327 2025-06-25T14:49:03.017263Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:49:03.029174Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 27351, node 1 2025-06-25T14:49:03.189121Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:49:03.197272Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:49:03.197279Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:49:03.197287Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:49:03.197399Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5209 TClient is connected to server localhost:5209 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:49:04.256291Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:49:04.292760Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:49:06.649341Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899097092276814:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:06.649434Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:06.652374Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899097092276825:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:06.658606Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:49:06.677540Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-25T14:49:06.679866Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899097092276828:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:49:06.760187Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899097092276879:2337] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:49:07.105787Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T14:49:07.162086Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519899079912407001:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:49:07.162156Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:49:07.536578Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519899101387244422:2319];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:49:07.536771Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519899101387244422:2319];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:49:07.536981Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519899101387244422:2319];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:49:07.537082Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519899101387244422:2319];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:49:07.537162Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519899101387244422:2319];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:49:07.537244Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519899101387244422:2319];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:49:07.537322Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519899101387244422:2319];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:49:07.537411Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519899101387244422:2319];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:49:07.537518Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519899101387244422:2319];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:49:07.537624Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519899101387244422:2319];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:49:07.537707Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519899101387244422:2319];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:49:07.538147Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519899101387244449:2321];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:49:07.538176Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519899101387244449:2321];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:49:07.538306Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519899101387244449:2321];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:49:07.538387Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519899101387244449:2321];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:49:07.538460Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519899101387244449:2321];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:49:07.538561Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519899101387244449:2321];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:49:07.538646Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519899101387244449:2321];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register ... 60975Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039419;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:28.066610Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039419;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:28.067219Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039388;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:28.072098Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039384;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:28.076697Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039390;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:28.080731Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039388;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:28.081229Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039396;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:28.085912Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039390;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:28.086379Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039422;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:28.090071Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039396;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:28.090725Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039402;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:28.096127Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039422;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:28.097291Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039402;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:28.097559Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039389;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:28.097905Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039399;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:28.102729Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039389;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:28.103377Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039404;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:28.108042Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039404;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:28.108117Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039399;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:28.108973Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039418;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:28.108979Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039417;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:28.114224Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039417;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:28.114476Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039418;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:28.114972Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039408;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:28.115185Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039409;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:28.121374Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039408;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:28.121388Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039409;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:28.122015Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039423;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:28.122153Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039421;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:28.128456Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039421;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:28.128609Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039423;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:28.129147Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039407;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:28.130100Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039387;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:28.135132Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039407;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:28.135957Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039393;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:28.136059Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039387;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:28.136935Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039411;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:28.141377Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039393;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:28.142007Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039413;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:28.142314Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039411;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:28.147238Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039413;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:28.324680Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyks2641em1a15mzjjd13nrq", SessionId: ydb://session/3?node_id=1&id=ODVjYmViN2UtMzBjMzQwNmEtZDQ1NjMzOWQtYWMyMTE0OTE=, Slow query, duration: 34.783716s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:50:28.795439Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:50:28.795776Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;self_id=[1:7519899380560167422:8895];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224039094;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224039392;receive=72075186224038933; 2025-06-25T14:50:28.796185Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:50:28.796246Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::FiveWayJoinWithPredsAndEquiv+ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 1823, MsgBus: 62820 2025-06-25T14:48:53.692228Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899039263622852:2228];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:48:53.696681Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d80/r3tmp/tmpk2jCTJ/pdisk_1.dat 2025-06-25T14:48:54.301201Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:48:54.304512Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519899039263622660:2080] 1750862933623737 != 1750862933623740 2025-06-25T14:48:54.325074Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:48:54.325172Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:48:54.334809Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 1823, node 1 2025-06-25T14:48:54.569029Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:48:54.569048Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:48:54.569054Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:48:54.569141Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:48:54.625175Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:62820 TClient is connected to server localhost:62820 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:48:55.581960Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:48:55.611310Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:48:58.118081Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899060738459786:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:58.118231Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:58.118622Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899060738459798:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:58.122380Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:48:58.135649Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-25T14:48:58.135863Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899060738459800:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:48:58.208215Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899060738459851:2338] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:48:58.667570Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T14:48:58.680683Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519899039263622852:2228];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:48:58.680809Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:48:59.018805Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519899060738460101:2317];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:48:59.022774Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519899060738460096:2312];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:48:59.022938Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519899060738460096:2312];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:48:59.023158Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519899060738460096:2312];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:48:59.023275Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519899060738460096:2312];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:48:59.023361Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519899060738460096:2312];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:48:59.023444Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519899060738460096:2312];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:48:59.023525Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519899060738460096:2312];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:48:59.023606Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519899060738460096:2312];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:48:59.023691Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519899060738460096:2312];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:48:59.023771Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519899060738460096:2312];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:48:59.023881Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519899060738460096:2312];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:48:59.028002Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519899060738460101:2317];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:48:59.028174Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519899060738460101:2317];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:48:59.028263Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519899060738460101:2317];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:48:59.028357Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519899060738460101:2317];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:48:59.028444Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519899060738460101:2317];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:48:59.028523Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519899060738460101:2317];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_registe ... 11087Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039403;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:25.512568Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039404;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:25.513030Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039388;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:25.518330Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039388;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:25.518948Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039398;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:25.524655Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039398;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:25.524667Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039403;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:25.525168Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039393;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:25.525200Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039277;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:25.530568Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039393;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:25.535547Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039277;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:25.536069Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039391;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:25.536863Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039401;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:25.546065Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039401;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:25.546841Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039405;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:25.549891Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039391;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:25.550460Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039411;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:25.559786Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039411;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:25.560753Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039405;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:25.561323Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039409;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:25.564652Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039412;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:25.570725Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039409;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:25.571352Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039365;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:25.574080Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039412;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:25.574894Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039399;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:25.583966Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039365;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:25.584121Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039399;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:25.584731Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039417;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:25.584832Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039421;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:25.589666Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039421;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:25.590373Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039385;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:25.590599Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039417;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:25.591245Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039389;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:25.595206Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039385;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:25.595849Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039395;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:25.599590Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039389;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:25.604844Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039395;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:25.614805Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039189;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:25.620650Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039189;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:25.740760Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039378;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:25.750979Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039378;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:25.872379Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyks1yah3db4ehvjqgqrkts4", SessionId: ydb://session/3?node_id=1&id=ZjZlMjA1YmItNGQxOTQzM2ItN2U4Mjg2YzctZTkwYTdkZWY=, Slow query, duration: 40.318808s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:50:26.177658Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:50:26.181021Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:50:26.181532Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;self_id=[1:7519899331321444558:8202];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224039094;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038933;receive=72075186224039392; 2025-06-25T14:50:26.182395Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; >> KqpJoinOrder::CanonizedJoinOrderTPCH10 [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSystemView::Sessions [GOOD] Test command err: Trying to start YDB, gRPC: 63216, MsgBus: 15527 2025-06-25T14:50:33.270933Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899467095946151:2229];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:50:33.271444Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001ac4/r3tmp/tmpVfZx1b/pdisk_1.dat 2025-06-25T14:50:33.909019Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:50:33.909132Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:50:33.913639Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:50:33.916181Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519899467095945950:2080] 1750863033201534 != 1750863033201537 2025-06-25T14:50:33.979798Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 63216, node 1 2025-06-25T14:50:34.231819Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:50:34.232043Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:50:34.232057Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:50:34.232064Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:50:34.232144Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:15527 TClient is connected to server localhost:15527 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:50:35.409708Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:50:35.432838Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:50:35.450610Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:50:35.466700Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:35.697001Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:35.857804Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:35.966832Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:37.661538Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899484275816769:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:37.661647Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:38.202806Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:38.232272Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519899467095946151:2229];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:50:38.232347Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:50:38.281338Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:38.328101Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:38.366317Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:38.455500Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:38.535428Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:38.610299Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:38.723936Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899488570784741:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:38.724033Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:38.724194Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899488570784746:2437], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:38.727940Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710670:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:50:38.740124Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899488570784748:2438], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710670 completed, doublechecking } 2025-06-25T14:50:38.807867Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899488570784799:3429] txid# 281474976710671, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 1 ydb-cpp-sdk/dev 2025-06-25T14:50:45.996037Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863045956, txId: 281474976710685] shutting down ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSysColV0::SelectRowById [GOOD] Test command err: Trying to start YDB, gRPC: 13052, MsgBus: 23599 2025-06-25T14:50:39.528423Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899494171615426:2232];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:50:39.528940Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001a84/r3tmp/tmpabuMVn/pdisk_1.dat 2025-06-25T14:50:40.166289Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:50:40.166385Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:50:40.169656Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:50:40.204500Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519899494171615220:2080] 1750863039437522 != 1750863039437525 2025-06-25T14:50:40.226559Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 13052, node 1 2025-06-25T14:50:40.396649Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:50:40.396686Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:50:40.396694Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:50:40.396807Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:50:40.500733Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:23599 TClient is connected to server localhost:23599 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:50:41.379646Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:50:41.416536Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:41.673099Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:41.892830Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:42.003262Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:43.773539Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899511351486057:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:43.773643Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:44.113645Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:44.153547Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:44.198799Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:44.239800Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:44.278827Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:44.339822Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:44.438831Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:44.511347Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519899494171615426:2232];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:50:44.511434Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:50:44.584507Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899515646454015:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:44.584584Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:44.584880Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899515646454020:2436], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:44.588704Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:50:44.609161Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899515646454022:2437], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:50:44.689003Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899515646454073:3421] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } >> KqpJoinOrder::UdfConstantFold-ColumnStore [GOOD] >> KqpSysColV1::SelectRowAsterisk >> KqpSysColV0::SelectRowAsterisk [GOOD] >> KqpSystemView::PartitionStatsSimple |86.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSysColV0::SelectRowAsterisk [GOOD] Test command err: Trying to start YDB, gRPC: 63942, MsgBus: 7758 2025-06-25T14:50:41.729867Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899504184320233:2225];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:50:41.730040Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001a82/r3tmp/tmpmFnRHD/pdisk_1.dat 2025-06-25T14:50:42.361426Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:50:42.391879Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:50:42.391959Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:50:42.401028Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 63942, node 1 2025-06-25T14:50:42.596776Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:50:42.596797Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:50:42.596803Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:50:42.596888Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:50:42.714479Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:7758 TClient is connected to server localhost:7758 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:50:43.417533Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:50:43.431709Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:50:43.443066Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:43.625032Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:43.812579Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:43.920487Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:45.806088Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899521364190865:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:45.806242Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:46.177461Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:46.249121Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:46.292518Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:46.381338Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:46.418885Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:46.484266Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:46.527672Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:46.615938Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899525659158819:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:46.616036Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:46.616435Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899525659158824:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:46.620777Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:50:46.641056Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899525659158826:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:50:46.720460Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519899504184320233:2225];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:50:46.720558Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:50:46.736990Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899525659158877:3420] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } >> KqpSysColV1::StreamInnerJoinSelect >> KqpSysColV1::SelectRange |86.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> DataShardWrite::ExecSQLUpsertPrepared+EvWrite-Volatile [GOOD] >> DataShardWrite::ExecSQLUpsertPrepared-EvWrite+Volatile ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::UdfConstantFold-ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 8942, MsgBus: 3495 2025-06-25T14:49:58.584725Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899317864029129:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:49:58.588246Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d44/r3tmp/tmppHatL2/pdisk_1.dat 2025-06-25T14:49:59.242163Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:49:59.242272Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:49:59.243913Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:49:59.305649Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519899317864029093:2080] 1750862998535065 != 1750862998535068 2025-06-25T14:49:59.306293Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 8942, node 1 2025-06-25T14:49:59.498777Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:49:59.498796Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:49:59.498802Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:49:59.498910Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:49:59.604855Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:3495 TClient is connected to server localhost:3495 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:50:00.504889Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:50:00.523372Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:50:02.779590Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899335043898923:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:02.779726Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:02.780106Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899335043898935:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:02.784184Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:50:02.804977Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-25T14:50:02.805218Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899335043898937:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:50:02.892170Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899335043898988:2337] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:50:03.204336Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:03.331326Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:03.378596Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:03.417496Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:03.445266Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:03.579467Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:03.586383Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519899317864029129:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:50:03.586428Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:50:03.630117Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:03.669850Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:03.714561Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:03.756575Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:03.804445Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:03.846130Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:03.903965Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_c ... 92918Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038603;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:43.096351Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038567;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:43.096809Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038603;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:43.096820Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038565;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:43.097287Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038649;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:43.100914Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038565;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:43.101423Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038523;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:43.101506Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038649;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:43.102314Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038623;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:43.105916Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038523;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:43.106387Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038623;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:43.106435Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038555;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:43.106897Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038641;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:43.111955Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038555;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:43.112290Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038641;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:43.112618Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038559;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:43.112781Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038643;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:43.116929Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038643;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:43.116929Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038559;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:43.117464Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038639;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:43.117736Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038637;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:43.121550Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038639;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:43.121578Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038637;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:43.122086Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038619;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:43.122083Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038613;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:43.126790Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038619;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:43.126790Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038613;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:43.127321Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038587;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:43.127393Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038617;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:43.131634Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038587;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:43.131634Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038617;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:43.132067Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038651;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:43.132133Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038657;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:43.136518Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038657;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:43.136518Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038651;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:43.137077Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038627;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:43.137314Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038661;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:43.141688Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038627;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:43.141690Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038661;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:43.142124Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038655;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:43.146917Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038655;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:43.246369Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyks2ke48r23fnf9ren5ej73", SessionId: ydb://session/3?node_id=1&id=NmM3ODRiZDktYTNhNjRiOTktYzk1ZjFhYjEtZTBlM2QzMTQ=, Slow query, duration: 36.069514s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:50:43.488214Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; 2025-06-25T14:50:43.488779Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; 2025-06-25T14:50:43.490268Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;self_id=[1:7519899425238229988:4378];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038331;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038629;receive=72075186224038170; 2025-06-25T14:50:43.490743Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::CanonizedJoinOrderTPCH10 [GOOD] Test command err: Trying to start YDB, gRPC: 32585, MsgBus: 12163 2025-06-25T14:48:59.884174Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899064022792189:2229];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:48:59.886040Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d7e/r3tmp/tmpp98DKy/pdisk_1.dat 2025-06-25T14:49:00.545149Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:49:00.545242Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:49:00.556777Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:49:00.611566Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:49:00.612460Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519899064022791988:2080] 1750862939828564 != 1750862939828567 TServer::EnableGrpc on GrpcPort 32585, node 1 2025-06-25T14:49:00.864642Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:49:00.876809Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:49:00.876831Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:49:00.876838Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:49:00.876926Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:12163 TClient is connected to server localhost:12163 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:49:02.091316Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:49:04.326018Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899085497629118:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:04.326118Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:04.326476Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899085497629130:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:04.330344Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:49:04.343753Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899085497629132:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:49:04.400193Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899085497629183:2340] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:49:04.689959Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T14:49:04.865281Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519899064022792189:2229];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:49:04.868492Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:49:05.094399Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519899085497629429:2315];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:49:05.094622Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519899085497629429:2315];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:49:05.094872Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519899085497629429:2315];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:49:05.094960Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519899085497629429:2315];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:49:05.095032Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519899085497629429:2315];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:49:05.095116Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519899085497629429:2315];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:49:05.095196Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519899085497629429:2315];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:49:05.095280Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519899085497629429:2315];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:49:05.095392Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519899085497629429:2315];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:49:05.095489Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519899085497629429:2315];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:49:05.095564Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519899085497629429:2315];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:49:05.098899Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519899085497629427:2313];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:49:05.098943Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519899085497629427:2313];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:49:05.099103Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519899085497629427:2313];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:49:05.099204Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519899085497629427:2313];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:49:05.099310Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519899085497629427:2313];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:49:05.099397Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519899085497629427:2313];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:49:05.099482Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519899085497629427:2313];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:49:05.099563Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519899085497629427:2313];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:49:05.099640Z node 1 :TX_COLU ... 74634Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039412;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:28.680176Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039412;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:28.683542Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039318;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:28.685077Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039336;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:28.685556Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039338;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:28.690390Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039338;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:28.690995Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039312;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:28.695964Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039312;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:28.696909Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039292;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:28.702129Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039292;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:28.702732Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039420;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:28.707749Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039420;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:28.711364Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039344;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:28.716478Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039344;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:28.717149Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039378;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:28.722525Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039378;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:28.723128Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039310;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:28.728408Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039310;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:28.729089Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039282;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:28.731474Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039318;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:28.731931Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039370;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:28.734415Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039282;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:28.734966Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039322;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:28.737631Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039370;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:28.738341Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039286;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:28.740576Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039322;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:28.741236Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039284;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:28.748946Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039286;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:28.749534Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039316;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:28.754278Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039284;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:28.754533Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039316;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:28.754857Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039260;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:28.755282Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039382;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:28.759903Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039260;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:28.760147Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039382;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:28.760964Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039388;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:28.762525Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039414;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:28.766319Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039388;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:28.767563Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039414;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:28.838093Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039381;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:28.844359Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039381;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:28.916909Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyks23qpesmz12ptqt87xbwa", SessionId: ydb://session/3?node_id=1&id=NzY1YTkyYTItYWI5NGU4NjctZjI0OGNmOTctMTdhNWU5NTU=, Slow query, duration: 37.810704s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:50:29.200026Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:50:29.200528Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:50:29.200790Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;self_id=[1:7519899364670551769:8747];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224039094;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038933;receive=72075186224039392; 2025-06-25T14:50:29.201199Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; >> DataShardWrite::UpdateImmediate [GOOD] >> DataShardWrite::RejectOnChangeQueueOverflow |86.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSystemView::PartitionStatsParametricRanges >> KqpJoinOrder::CanonizedJoinOrderTPCH4 [GOOD] >> KqpSysColV1::SelectRowById >> KqpSystemView::FailNavigate >> DataShardWrite::UpsertPreparedManyTables-Volatile [GOOD] >> DataShardWrite::UpsertPreparedNoTxCache+Volatile >> KqpSysColV1::InnerJoinSelectAsterisk [GOOD] >> DataShardWrite::UpsertNoLocksArbiter [GOOD] >> DataShardWrite::UpsertLostPrepareArbiter >> KqpSysColV1::InnerJoinSelect >> KqpSysColV1::InnerJoinTables >> KqpSysColV0::UpdateAndDelete [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSysColV1::InnerJoinSelectAsterisk [GOOD] Test command err: Trying to start YDB, gRPC: 5584, MsgBus: 17279 2025-06-25T14:50:44.873433Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899514469686103:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:50:44.898254Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001a6e/r3tmp/tmpOSh5bU/pdisk_1.dat 2025-06-25T14:50:45.597148Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519899514469686075:2080] 1750863044845815 != 1750863044845818 2025-06-25T14:50:45.598740Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:50:45.624917Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:50:45.625004Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:50:45.633458Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 5584, node 1 2025-06-25T14:50:45.924087Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:50:45.924626Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:50:45.924634Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:50:45.924642Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:50:45.924779Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17279 TClient is connected to server localhost:17279 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:50:46.896121Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:50:46.921320Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:47.135170Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:47.348358Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:47.466917Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:49.370104Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899535944524189:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:49.370207Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:49.773753Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:49.820057Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:49.876415Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519899514469686103:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:50:49.876520Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:50:49.902997Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:49.986333Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:50.033133Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:50.083303Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:50.131790Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:50.232441Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899540239492151:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:50.232579Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:50.233041Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899540239492156:2436], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:50.237156Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:50:50.254729Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899540239492158:2437], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:50:50.327978Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899540239492211:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } >> KqpJoinOrder::SortingsDifferentDirs-RemoveLimitOperator [GOOD] >> KqpSystemView::PartitionStatsRange2 [GOOD] >> KqpSysColV1::UpdateAndDelete [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSysColV0::UpdateAndDelete [GOOD] Test command err: Trying to start YDB, gRPC: 10355, MsgBus: 7547 2025-06-25T14:50:45.036988Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899522243858935:2210];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:50:45.037372Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001a75/r3tmp/tmpRxjhqF/pdisk_1.dat 2025-06-25T14:50:45.644440Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519899517948891465:2080] 1750863044973911 != 1750863044973914 2025-06-25T14:50:45.649896Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:50:45.663048Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:50:45.663177Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:50:45.674133Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 10355, node 1 2025-06-25T14:50:45.834732Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:50:45.834759Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:50:45.834769Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:50:45.834879Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:50:46.040447Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:7547 TClient is connected to server localhost:7547 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:50:46.874669Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:50:46.898771Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:50:46.914925Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:47.202569Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:47.412045Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:47.511196Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:49.309189Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899539423729573:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:49.309284Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:49.742012Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:49.823338Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:49.896453Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:49.936944Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:49.976394Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:50.027621Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519899522243858935:2210];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:50:50.027692Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:50:50.056845Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:50.168076Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:50.282310Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899543718697544:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:50.282379Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:50.282721Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899543718697549:2436], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:50.286555Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:50:50.301671Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899543718697551:2437], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:50:50.380761Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899543718697604:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSystemView::PartitionStatsRange2 [GOOD] Test command err: Trying to start YDB, gRPC: 14097, MsgBus: 12526 2025-06-25T14:50:47.098276Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899529867178681:2176];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:50:47.098546Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001a5c/r3tmp/tmpoSCXzW/pdisk_1.dat 2025-06-25T14:50:47.729596Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519899529867178543:2080] 1750863047051288 != 1750863047051291 2025-06-25T14:50:47.739498Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:50:47.744450Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:50:47.821420Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:50:47.824636Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 14097, node 1 2025-06-25T14:50:48.008259Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:50:48.008283Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:50:48.008297Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:50:48.008417Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:50:48.104053Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:12526 TClient is connected to server localhost:12526 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:50:48.895195Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:50:48.909391Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:50:48.927552Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:49.108624Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:49.329060Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:49.439995Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:51.299105Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899547047049369:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:51.299244Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:51.788606Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:51.864141Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:51.906956Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:51.953187Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:52.009375Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:52.088650Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519899529867178681:2176];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:50:52.088770Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:50:52.095851Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:52.178870Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:52.249284Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899551342017329:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:52.249381Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:52.249644Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899551342017334:2436], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:52.254126Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:50:52.267956Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899551342017336:2437], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:50:52.360914Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899551342017389:3420] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:50:53.866368Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863053846, txId: 281474976710672] shutting down ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::CanonizedJoinOrderTPCH4 [GOOD] Test command err: Trying to start YDB, gRPC: 13855, MsgBus: 15274 2025-06-25T14:49:09.773608Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899107039827361:2191];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:49:09.773891Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d73/r3tmp/tmpQ0J8lA/pdisk_1.dat 2025-06-25T14:49:10.379351Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:49:10.379433Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:49:10.424620Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:49:10.427678Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519899107039827208:2080] 1750862949700383 != 1750862949700386 2025-06-25T14:49:10.434577Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 13855, node 1 2025-06-25T14:49:10.652854Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:49:10.652891Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:49:10.652901Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:49:10.652991Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:49:10.704473Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:15274 TClient is connected to server localhost:15274 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:49:11.507624Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:49:13.790389Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899124219697041:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:13.790489Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:13.790808Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899124219697053:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:13.796941Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:49:13.822409Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899124219697055:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:49:13.912849Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899124219697106:2339] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:49:14.447203Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T14:49:14.689233Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519899128514664634:2317];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:49:14.689236Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519899128514664638:2320];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:49:14.689475Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519899128514664638:2320];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:49:14.689788Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519899128514664638:2320];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:49:14.689987Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519899128514664638:2320];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:49:14.690108Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519899128514664638:2320];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:49:14.690236Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519899128514664638:2320];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:49:14.690268Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519899128514664634:2317];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:49:14.690353Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519899128514664638:2320];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:49:14.690435Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519899128514664634:2317];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:49:14.690461Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519899128514664638:2320];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:49:14.690573Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519899128514664634:2317];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:49:14.690586Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519899128514664638:2320];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:49:14.690684Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519899128514664634:2317];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:49:14.690716Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519899128514664638:2320];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:49:14.690783Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519899128514664634:2317];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:49:14.690842Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519899128514664638:2320];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:49:14.690898Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519899128514664634:2317];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:49:14.690997Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519899128514664634:2317];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:49:14.691087Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519899128514664634:2317];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:49:14.691186Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519899128514664634:2317];tablet_id=72075186224037894;process=TTxInitSchema:: ... 02081Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039235;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:38.006382Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039235;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:38.006908Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039359;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:38.010772Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039359;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:38.011296Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039371;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:38.015847Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039371;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:38.016487Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039394;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:38.018990Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039394;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:38.019516Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039397;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:38.020112Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039329;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:38.020534Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039391;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:38.023444Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039397;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:38.023964Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039389;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:38.024121Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039391;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:38.024776Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039422;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:38.028028Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039389;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:38.028549Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039406;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:38.028955Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039422;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:38.029402Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039398;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:38.032438Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039406;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:38.033022Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039418;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:38.034408Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039398;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:38.034872Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039367;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:38.037313Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039418;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:38.037790Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039404;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:38.039089Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039367;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:38.039561Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039410;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:38.042501Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039404;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:38.043032Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039405;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:38.044290Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039410;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:38.044807Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039400;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:38.048002Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039405;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:38.048796Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039384;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:38.048833Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039400;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:38.049386Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039390;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:38.053432Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039384;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:38.053601Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039390;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:38.054443Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039399;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:38.055340Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039387;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:38.058794Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039399;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:38.059915Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039387;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:38.215370Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyks2e01bg3dwdszm1whnac2", SessionId: ydb://session/3?node_id=1&id=OTIzNWZhZGQtNjhjZGFlYmEtZWI2Y2Q1MjItZDg4NWU4ODE=, Slow query, duration: 36.613660s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:50:38.528809Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; 2025-06-25T14:50:38.529257Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; 2025-06-25T14:50:38.529522Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;self_id=[1:7519899343263064724:6982];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038933;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224039392;receive=72075186224039094; 2025-06-25T14:50:38.530095Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; >> KqpSysColV0::InnerJoinTables [GOOD] >> KqpSystemView::QueryStatsScan [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSysColV1::UpdateAndDelete [GOOD] Test command err: Trying to start YDB, gRPC: 31881, MsgBus: 13385 2025-06-25T14:50:45.784220Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899520094923914:2214];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:50:45.784631Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001a60/r3tmp/tmpkKW9mK/pdisk_1.dat 2025-06-25T14:50:46.288461Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519899520094923728:2080] 1750863045721933 != 1750863045721936 2025-06-25T14:50:46.289434Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:50:46.327752Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:50:46.327865Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:50:46.340219Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 31881, node 1 2025-06-25T14:50:46.635676Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:50:46.635696Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:50:46.635705Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:50:46.635783Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:50:46.784429Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:13385 TClient is connected to server localhost:13385 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:50:47.775218Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:50:47.801329Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:50:47.825050Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:48.039366Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:48.236073Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:48.333317Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:50.158375Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899541569761841:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:50.158512Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:50.569419Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:50.619083Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:50.661730Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:50.700158Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:50.738807Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:50.776548Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519899520094923914:2214];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:50:50.779134Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:50:50.795891Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:50.861373Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:50.934069Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899541569762500:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:50.934146Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:50.934318Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899541569762505:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:50.938174Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:50:50.962760Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899541569762507:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:50:51.024740Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899545864729854:3418] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } >> KqpSysColV1::SelectRowAsterisk [GOOD] >> DataShardWrite::ExecSQLUpsertPrepared-EvWrite+Volatile [GOOD] >> DataShardWrite::ExecSQLUpsertPrepared+EvWrite+Volatile >> KqpSystemView::PartitionStatsRange3 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::SortingsDifferentDirs-RemoveLimitOperator [GOOD] Test command err: Trying to start YDB, gRPC: 28340, MsgBus: 11281 2025-06-25T14:50:09.662424Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899363915830774:2233];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:50:09.662834Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d3a/r3tmp/tmpuA6amA/pdisk_1.dat 2025-06-25T14:50:10.409860Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:50:10.409936Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:50:10.415657Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:50:10.424401Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519899363915830565:2080] 1750863009569274 != 1750863009569277 2025-06-25T14:50:10.429348Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 28340, node 1 2025-06-25T14:50:10.644851Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:50:10.665575Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:50:10.665592Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:50:10.665607Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:50:10.665692Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11281 TClient is connected to server localhost:11281 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:50:11.935755Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:50:14.644411Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519899363915830774:2233];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:50:14.644473Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:50:14.824034Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899385390667699:2295], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:14.824153Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:14.824471Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899385390667711:2298], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:14.828101Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:50:14.860217Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899385390667713:2299], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:50:14.931923Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899385390667764:2341] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:50:15.180943Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:15.293124Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:15.334712Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:15.375280Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:15.412264Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:15.580558Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:15.630464Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:15.712433Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:15.746317Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:15.788156Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:15.855732Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:15.885379Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:15.934160Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:16.669130Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/ ... line=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:49.787572Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038585;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:49.788242Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038509;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:49.790743Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038603;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:49.791274Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038595;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:49.793276Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038509;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:49.794258Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038627;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:49.795769Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038595;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:49.796267Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038569;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:49.799166Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038627;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:49.799681Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038654;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:49.800455Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038569;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:49.800948Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038571;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:49.804288Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038654;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:49.804865Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038637;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:49.805209Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038571;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:49.805689Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038620;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:49.809343Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038637;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:49.809845Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038649;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:49.809969Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038620;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:49.810444Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038640;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:49.814645Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038649;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:49.815030Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038640;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:49.815674Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038630;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:49.815946Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038632;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:49.820417Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038632;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:49.820448Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038630;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:49.821033Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038644;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:49.821033Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038652;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:49.825635Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038644;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:49.825773Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038652;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:49.826276Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038636;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:49.826779Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038642;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:49.831488Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038636;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:49.831506Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038642;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:49.832040Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038650;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:49.832152Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:49.836770Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:49.842174Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038650;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:49.868179Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038645;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:49.868392Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038657;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:49.877421Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038645;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:49.878900Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038657;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:49.981924Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyks2z1040wxpxvthfg8h9g4", SessionId: ydb://session/3?node_id=1&id=ZjFkZjZjOWQtODFlYmUzMGYtNWE2N2QwYjUtMjM4OTI2MmE=, Slow query, duration: 30.940730s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:50:50.249167Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:50:50.249167Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:50:50.249934Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; >> KqpSystemView::PartitionStatsSimple [GOOD] >> KqpSysColV1::SelectRange [GOOD] >> DataShardWrite::RejectOnChangeQueueOverflow [GOOD] >> DataShardWrite::UpsertBrokenLockArbiter ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSysColV0::InnerJoinTables [GOOD] Test command err: Trying to start YDB, gRPC: 17494, MsgBus: 24167 2025-06-25T14:50:48.081133Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899532013091713:2220];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:50:48.083865Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001a58/r3tmp/tmp6xm11C/pdisk_1.dat 2025-06-25T14:50:48.708512Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:50:48.708617Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:50:48.710741Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:50:48.761130Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:50:48.762771Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519899532013091531:2080] 1750863048024125 != 1750863048024128 TServer::EnableGrpc on GrpcPort 17494, node 1 2025-06-25T14:50:48.927673Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:50:48.927693Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:50:48.927704Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:50:48.927823Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:50:49.045356Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:24167 TClient is connected to server localhost:24167 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:50:49.877132Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:50:49.905096Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:50:49.913871Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:50.147320Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:50.420122Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:50.524383Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:52.334395Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899549192962361:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:52.334494Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:52.682477Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:52.715406Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:52.750290Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:52.808111Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:52.847377Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:52.902656Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:52.946730Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:53.048434Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519899532013091713:2220];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:50:53.048527Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:50:53.048617Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899553487930311:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:53.048713Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:53.052500Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899553487930316:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:53.057991Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:50:53.070427Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710669, at schemeshard: 72057594046644480 2025-06-25T14:50:53.070984Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899553487930318:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:50:53.151961Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899553487930371:3417] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } >> KqpJoinOrder::CanonizedJoinOrderTPCH21 [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSystemView::QueryStatsScan [GOOD] Test command err: Trying to start YDB, gRPC: 25381, MsgBus: 13738 2025-06-25T14:50:47.641299Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899528878839880:2135];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:50:47.643869Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001a59/r3tmp/tmpaxn2cX/pdisk_1.dat 2025-06-25T14:50:48.242348Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:50:48.242432Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:50:48.243660Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:50:48.252860Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:50:48.256891Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519899528878839783:2080] 1750863047594176 != 1750863047594179 TServer::EnableGrpc on GrpcPort 25381, node 1 2025-06-25T14:50:48.530016Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:50:48.530038Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:50:48.530049Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:50:48.530213Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:50:48.650737Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:13738 TClient is connected to server localhost:13738 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:50:49.270003Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:50:49.297769Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:50:49.323201Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:49.555811Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:49.768266Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:49.875789Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:51.686014Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899546058710602:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:51.686107Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:51.941186Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:51.995026Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:52.044940Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:52.109826Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:52.177603Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:52.225080Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:52.271089Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:52.360958Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899550353678566:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:52.361045Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:52.361271Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899550353678571:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:52.365532Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:50:52.377702Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899550353678573:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:50:52.456460Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899550353678624:3421] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:50:52.640427Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519899528878839880:2135];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:50:52.653360Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:50:55.365194Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863053982, txId: 281474976710672] shutting down 2025-06-25T14:50:55.471836Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863055464, txId: 281474976710675] shutting down >> KqpSysColV1::StreamInnerJoinSelect [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSysColV1::SelectRowAsterisk [GOOD] Test command err: Trying to start YDB, gRPC: 18998, MsgBus: 29808 2025-06-25T14:50:49.126350Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899539490177146:2220];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:50:49.126716Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001a56/r3tmp/tmpHjTexC/pdisk_1.dat 2025-06-25T14:50:49.772897Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:50:49.774016Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519899539490176964:2080] 1750863049101470 != 1750863049101473 2025-06-25T14:50:49.798435Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:50:49.798518Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:50:49.802545Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 18998, node 1 2025-06-25T14:50:50.100754Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:50:50.179542Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:50:50.179569Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:50:50.179640Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:50:50.180037Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:29808 TClient is connected to server localhost:29808 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:50:51.069807Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:50:51.083531Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:50:51.091951Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:51.281516Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:51.441215Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:51.540919Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:53.322462Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899556670047786:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:53.322588Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:53.785996Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:53.830838Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:53.863578Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:53.891318Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:53.923926Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:53.964824Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:54.036276Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:54.112588Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519899539490177146:2220];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:50:54.112660Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:50:54.168456Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899560965015744:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:54.168582Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:54.169417Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899560965015749:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:54.173247Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:50:54.181158Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899560965015751:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:50:54.239637Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899560965015804:3420] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } >> KqpSystemView::NodesSimple [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSystemView::PartitionStatsSimple [GOOD] Test command err: Trying to start YDB, gRPC: 27561, MsgBus: 6147 2025-06-25T14:50:49.294453Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899538531226424:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:50:49.294497Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001a53/r3tmp/tmptPBYTn/pdisk_1.dat 2025-06-25T14:50:49.987172Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:50:49.987265Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:50:49.992456Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:50:49.995159Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 27561, node 1 2025-06-25T14:50:50.349469Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:50:50.349756Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:50:50.349764Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:50:50.349771Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:50:50.349860Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6147 TClient is connected to server localhost:6147 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:50:51.269518Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:50:51.294971Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:50:51.302168Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:51.534209Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:51.765508Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:51.861022Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:53.762077Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899555711097234:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:53.762172Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:54.097726Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:54.136194Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:54.176736Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:54.264299Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:54.296525Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:54.299451Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519899538531226424:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:50:54.300173Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:50:54.366554Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:54.409372Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:54.496494Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899560006065192:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:54.496615Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:54.497016Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899560006065197:2436], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:54.500842Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:50:54.517913Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710669, at schemeshard: 72057594046644480 2025-06-25T14:50:54.518116Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899560006065199:2437], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:50:54.588924Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899560006065250:3428] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:50:55.894256Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863055880, txId: 281474976710672] shutting down ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSysColV1::SelectRange [GOOD] Test command err: Trying to start YDB, gRPC: 27514, MsgBus: 26447 2025-06-25T14:50:50.652278Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899539782377176:2186];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:50:50.657675Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001a4a/r3tmp/tmpBE8Z4F/pdisk_1.dat 2025-06-25T14:50:51.265407Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:50:51.265487Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:50:51.279163Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:50:51.320741Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519899539782377028:2080] 1750863050609332 != 1750863050609335 2025-06-25T14:50:51.334369Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 27514, node 1 2025-06-25T14:50:51.501015Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:50:51.501036Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:50:51.501045Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:50:51.501169Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:50:51.609265Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:26447 TClient is connected to server localhost:26447 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:50:52.183051Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:50:52.230598Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:52.385159Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:52.547245Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:52.642913Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:54.312793Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899556962247834:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:54.312897Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:54.619375Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:54.667358Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:54.738359Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:54.777352Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:54.812085Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:54.847798Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:54.888873Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:54.967664Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899556962248495:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:54.967744Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:54.968015Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899556962248500:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:54.971682Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:50:54.984857Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899556962248502:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:50:55.055332Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899561257215849:3417] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:50:55.654060Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519899539782377176:2186];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:50:55.654115Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> DataShardWrite::UpsertPreparedNoTxCache+Volatile [GOOD] >> DataShardWrite::UpsertPreparedNoTxCache-Volatile >> KqpSystemView::FailResolve >> DataShardWrite::UpsertLostPrepareArbiter [GOOD] >> DataShardWrite::UpsertNoLocksArbiterRestart ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSysColV1::StreamInnerJoinSelect [GOOD] Test command err: Trying to start YDB, gRPC: 13578, MsgBus: 19340 2025-06-25T14:50:50.568876Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899540501215955:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:50:50.572903Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001a4d/r3tmp/tmpRAoyAW/pdisk_1.dat 2025-06-25T14:50:51.134201Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:50:51.159941Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519899540501215914:2080] 1750863050538502 != 1750863050538505 2025-06-25T14:50:51.174176Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:50:51.174269Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 13578, node 1 2025-06-25T14:50:51.176896Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:50:51.466726Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:50:51.466749Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:50:51.466757Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:50:51.466878Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:50:51.593148Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:19340 TClient is connected to server localhost:19340 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:50:52.195218Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:50:52.211969Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:50:52.218284Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:52.379320Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:52.618710Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:52.715758Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:54.442891Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899557681086740:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:54.442986Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:54.733493Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:54.775511Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:54.819309Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:54.899323Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:54.951163Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:55.013556Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:55.069095Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:55.171050Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899561976054702:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:55.171131Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:55.171455Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899561976054707:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:55.175593Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:50:55.200572Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899561976054709:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:50:55.264136Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899561976054760:3419] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:50:55.576560Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519899540501215955:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:50:55.576669Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:50:56.781162Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863056810, txId: 281474976710672] shutting down ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSystemView::NodesSimple [GOOD] Test command err: Trying to start YDB, gRPC: 64750, MsgBus: 1375 2025-06-25T14:50:43.972671Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899512300784031:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:50:43.972721Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:50:44.160375Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519899516334967083:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:50:44.160724Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:50:44.236058Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519899515293878546:2210];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:50:44.249661Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001a81/r3tmp/tmpfGBZNC/pdisk_1.dat 2025-06-25T14:50:45.039802Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:50:45.170935Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:50:45.176089Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:50:45.263287Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:50:45.262237Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:50:45.308974Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:50:45.713245Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:50:45.713907Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:50:45.713983Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:50:45.715112Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:50:45.715173Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:50:45.745860Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:50:45.745927Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:50:45.779358Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 3 Cookie 3 2025-06-25T14:50:45.779403Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T14:50:45.874489Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:50:45.875072Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:50:45.877285Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 64750, node 1 2025-06-25T14:50:46.404917Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:50:46.404936Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:50:46.404944Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:50:46.405070Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1375 TClient is connected to server localhost:1375 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:50:48.342619Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:50:48.424139Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:49.000066Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519899512300784031:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:50:49.000133Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:50:49.121687Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519899516334967083:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:50:49.121774Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:50:49.135317Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:49.216438Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519899515293878546:2210];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:50:49.216516Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:50:49.684742Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:50.262712Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:52.478096Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899550955491694:2336], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:52.478183Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:52.912384Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:53.040254Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:53.134025Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:53.292064Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:53.442914Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:53.511182Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:53.622345Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:53.865357Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899555250459853:2381], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:53.865479Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:53.865769Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899555250459858:2384], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:53.870381Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:50:53.908513Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899555250459860:2385], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:50:54.014888Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899559545427233:4241] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:50:55.462622Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863055440, txId: 281474976710672] shutting down >> KqpSysColV1::SelectRowById [GOOD] >> KqpSystemView::FailNavigate [GOOD] >> KqpSystemView::PartitionStatsParametricRanges [GOOD] >> KqpJoinOrder::TPCDS16-ColumnStore [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSysColV1::SelectRowById [GOOD] Test command err: Trying to start YDB, gRPC: 8699, MsgBus: 65021 2025-06-25T14:50:52.851899Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899550773931430:2220];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:50:52.856636Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001a3b/r3tmp/tmpYQHM35/pdisk_1.dat 2025-06-25T14:50:53.271499Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:50:53.271616Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:50:53.277415Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:50:53.304550Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:50:53.307901Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519899550773931248:2080] 1750863052777390 != 1750863052777393 TServer::EnableGrpc on GrpcPort 8699, node 1 2025-06-25T14:50:53.455707Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:50:53.455728Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:50:53.455739Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:50:53.455838Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:50:53.776439Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:65021 TClient is connected to server localhost:65021 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:50:54.207253Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:50:54.229850Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:50:54.240642Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:54.462799Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:54.677023Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:54.768719Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:56.489047Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899567953802071:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:56.489150Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:56.764406Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:56.797526Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:56.868808Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:56.898378Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:56.931385Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:56.970445Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:57.039999Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:57.140565Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899572248770033:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:57.140629Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:57.140913Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899572248770038:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:57.144110Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:50:57.154754Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899572248770040:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:50:57.216789Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899572248770091:3416] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:50:57.810085Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519899550773931430:2220];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:50:57.810139Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::CanonizedJoinOrderTPCH21 [GOOD] Test command err: Trying to start YDB, gRPC: 10703, MsgBus: 11467 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d76/r3tmp/tmpJsdDbp/pdisk_1.dat 2025-06-25T14:49:09.464457Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:49:09.621214Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:49:09.621283Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:49:09.625036Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:49:09.688322Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519899103066903664:2080] 1750862948936416 != 1750862948936419 2025-06-25T14:49:09.695622Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 10703, node 1 2025-06-25T14:49:09.833151Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:49:09.833167Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:49:09.833174Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:49:09.840929Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:49:10.021059Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:11467 TClient is connected to server localhost:11467 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:49:10.748232Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:49:10.776678Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:49:12.870630Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899120246773491:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:12.870758Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:12.872553Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899120246773503:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:12.876084Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:49:12.895246Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899120246773505:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:49:13.000394Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899120246773556:2337] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:49:13.453578Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T14:49:13.725574Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519899124541741115:2323];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:49:13.725749Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519899124541741115:2323];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:49:13.725975Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519899124541741115:2323];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:49:13.726095Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519899124541741115:2323];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:49:13.726181Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519899124541741115:2323];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:49:13.726289Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519899124541741115:2323];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:49:13.726382Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519899124541741115:2323];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:49:13.726486Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519899124541741115:2323];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:49:13.726568Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519899124541741115:2323];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:49:13.726689Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519899124541741115:2323];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:49:13.726787Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519899124541741115:2323];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:49:13.749219Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519899124541741098:2312];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:49:13.749270Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519899124541741098:2312];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:49:13.749443Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519899124541741098:2312];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:49:13.749533Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519899124541741098:2312];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:49:13.749619Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519899124541741098:2312];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:49:13.749706Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519899124541741098:2312];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:49:13.749803Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519899124541741098:2312];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:49:13.749887Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519899124541741098:2312];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:49:13.749978Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519899124541741098:2312];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:49:13.750051Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519899124541741098:2312];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:1 ... 4263Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039309;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:35.555626Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039339;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:35.557098Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039289;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:35.558233Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039309;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:35.558701Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039295;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:35.562800Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039289;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:35.563405Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039297;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:35.563489Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039295;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:35.563990Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039381;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:35.568278Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039297;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:35.568682Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039381;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:35.568847Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039367;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:35.569177Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039303;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:35.575361Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039303;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:35.575627Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039367;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:35.575929Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039369;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:35.576200Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039317;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:35.581506Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039369;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:35.582093Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039365;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:35.582802Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039317;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:35.583337Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039401;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:35.588567Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039401;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:35.589193Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039347;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:35.591141Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039365;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:35.591710Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039375;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:35.593796Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039347;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:35.594294Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039379;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:35.596782Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039375;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:35.597271Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039327;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:35.601197Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039379;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:35.601763Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039361;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:35.601935Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039327;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:35.602440Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039313;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:35.606337Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039361;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:35.606696Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039313;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:35.607148Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039277;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:35.607309Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039321;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:35.611573Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039277;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:35.613074Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039321;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:35.706467Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039292;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:35.711635Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039292;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:35.749434Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyks2bpjc96wyec0w9qvy9th", SessionId: ydb://session/3?node_id=1&id=M2FjNjI5Y2ItN2FkOGVhNy05MjJhNDk5My1iMTQzZWU4OQ==, Slow query, duration: 36.496816s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:50:36.078221Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:50:36.078691Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:50:36.079215Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;self_id=[1:7519899442369377257:10005];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224039392;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224039094;receive=72075186224038933; 2025-06-25T14:50:36.079564Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSystemView::FailNavigate [GOOD] Test command err: Trying to start YDB, gRPC: 14592, MsgBus: 25178 2025-06-25T14:50:53.068252Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899554505461941:2228];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:50:53.070913Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001a36/r3tmp/tmpd9MHus/pdisk_1.dat 2025-06-25T14:50:53.471377Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519899554505461749:2080] 1750863053044453 != 1750863053044456 2025-06-25T14:50:53.575876Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 14592, node 1 2025-06-25T14:50:53.587719Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:50:53.587818Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:50:53.637365Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:50:53.696856Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:50:53.696884Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:50:53.696892Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:50:53.697004Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:50:54.059797Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:25178 TClient is connected to server localhost:25178 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:50:54.715383Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:50:54.746499Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:50:54.757760Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:50:54.775424Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:55.022774Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:55.261057Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:55.354040Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:56.955493Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899567390365269:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:56.955619Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:57.226532Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:57.273041Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:57.336109Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:57.383352Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:57.417556Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:57.498668Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:57.548971Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:57.659567Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899571685333227:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:57.659649Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:57.659717Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899571685333232:2436], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:57.663799Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710670:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:50:57.684880Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899571685333234:2437], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710670 completed, doublechecking } 2025-06-25T14:50:57.779262Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899571685333285:3423] txid# 281474976710671, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:50:58.080914Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519899554505461941:2228];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:50:58.081160Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:50:58.709092Z node 1 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:304: Access denied: self# [1:7519899575980300870:3603], for# user0@builtin, access# DescribeSchema 2025-06-25T14:50:58.709132Z node 1 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:304: Access denied: self# [1:7519899575980300870:3603], for# user0@builtin, access# DescribeSchema 2025-06-25T14:50:58.720318Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519899575980300860:2480], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:2:13: Error: At function: KiReadTable!
:2:13: Error: Cannot find table 'db.[/Root/.sys/partition_stats]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:50:58.720956Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=YjRmYjU1OWEtZWQyMmZiNTUtMWU0MDVlOWItOTRhYjIwMzM=, ActorId: [1:7519899575980300853:2476], ActorState: ExecuteState, TraceId: 01jyks45q98dxwwy68213wbbwh, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: >> KqpSysColV1::InnerJoinSelect [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSystemView::PartitionStatsParametricRanges [GOOD] Test command err: Trying to start YDB, gRPC: 16931, MsgBus: 21100 2025-06-25T14:50:52.577056Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899550339015986:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:50:52.577143Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001a44/r3tmp/tmpO8FZ3m/pdisk_1.dat 2025-06-25T14:50:53.041412Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:50:53.044141Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519899550339015953:2080] 1750863052575912 != 1750863052575915 TServer::EnableGrpc on GrpcPort 16931, node 1 2025-06-25T14:50:53.061609Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:50:53.061694Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:50:53.079142Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:50:53.152915Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:50:53.152933Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:50:53.152946Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:50:53.153063Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:21100 2025-06-25T14:50:53.559391Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:21100 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:50:53.905379Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:50:53.933484Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:50:53.950913Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:54.141944Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:50:54.366597Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:54.445865Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:56.321580Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899567518886765:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:56.321684Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:56.619424Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:56.667110Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:56.739410Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:56.821640Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:56.852078Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:56.897443Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:56.981636Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:57.037750Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899571813854725:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:57.037815Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:57.037875Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899571813854730:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:57.041061Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:50:57.051291Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899571813854732:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:50:57.109133Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899571813854783:3417] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:50:57.577661Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519899550339015986:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:50:57.577759Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:50:58.736908Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863058707, txId: 281474976710672] shutting down >> KqpSysColV1::InnerJoinTables [GOOD] >> KqpSystemView::Join [GOOD] >> KqpJoinOrder::TestJoinOrderHintsSimple+ColumnStore [GOOD] >> KqpSystemView::NodesRange2 [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::TPCDS16-ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 11724, MsgBus: 1904 2025-06-25T14:49:52.973462Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899291178888478:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:49:52.977960Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d47/r3tmp/tmppivrvk/pdisk_1.dat 2025-06-25T14:49:53.729025Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:49:53.729112Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:49:53.749238Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:49:53.791878Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:49:53.792243Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519899291178888445:2080] 1750862992926121 != 1750862992926124 TServer::EnableGrpc on GrpcPort 11724, node 1 2025-06-25T14:49:53.961677Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:49:54.069968Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:49:54.069987Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:49:54.069998Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:49:54.070110Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1904 TClient is connected to server localhost:1904 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:49:55.078816Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:49:55.104775Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:49:57.563503Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899312653725569:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:57.563606Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:57.563839Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899312653725581:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:57.566951Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:49:57.589522Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899312653725583:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:49:57.655915Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899312653725634:2338] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:49:57.976450Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519899291178888478:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:49:57.976525Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:49:58.084299Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:58.222441Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:58.251327Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:58.294768Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:58.342114Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:58.713937Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:58.769225Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:58.848862Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:58.938246Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:58.991034Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:59.052675Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:59.107813Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:59.160282Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:49:59.888947Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperat ... line=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:35.515098Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038573;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:35.515237Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:35.515692Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038631;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:35.515692Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038637;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:35.519841Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038631;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:35.521081Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038641;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:35.523597Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038637;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:35.524192Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038607;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:35.525517Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038641;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:35.526058Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038649;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:35.528096Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038607;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:35.528867Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038579;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:35.530384Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038649;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:35.530878Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038587;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:35.536437Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038579;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:35.536983Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038591;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:35.541544Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038587;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:35.541890Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038591;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:35.542448Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038609;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:35.542510Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038659;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:35.546963Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038659;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:35.548800Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038657;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:35.551160Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038609;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:35.552133Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038651;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:35.556613Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038651;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:35.557163Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038617;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:35.562511Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038657;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:35.563113Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038655;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:35.567556Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038655;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:35.568138Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038643;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:35.572524Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038643;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:35.573078Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038633;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:35.573507Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038617;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:35.574071Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038645;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:35.577624Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038633;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:35.578188Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038639;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:35.579363Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038645;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:35.579942Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038615;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:35.582602Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038639;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:35.583109Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038647;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:35.588296Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038615;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:35.591390Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038647;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:35.724389Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyks2enydswmrxjhedzbsz15", SessionId: ydb://session/3?node_id=1&id=YmY0ZDVmNzAtNzU5NjM1ZDgtMTRiOTIzMzYtODdkY2NkOTQ=, Slow query, duration: 33.409848s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:50:36.397544Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:50:36.397694Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:50:36.398150Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSysColV1::InnerJoinSelect [GOOD] Test command err: Trying to start YDB, gRPC: 7400, MsgBus: 8640 2025-06-25T14:50:53.799675Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899556151742703:2229];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:50:53.800979Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001a35/r3tmp/tmpW6oMIP/pdisk_1.dat 2025-06-25T14:50:54.312487Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519899556151742502:2080] 1750863053722535 != 1750863053722538 2025-06-25T14:50:54.319639Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:50:54.326247Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:50:54.326318Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:50:54.328735Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 7400, node 1 2025-06-25T14:50:54.538729Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:50:54.538750Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:50:54.538756Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:50:54.538870Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:50:54.780620Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:8640 TClient is connected to server localhost:8640 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:50:55.405537Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:50:55.442629Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:55.639229Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:55.866811Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:55.962830Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:57.759809Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899573331613320:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:57.759894Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:58.041994Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:58.070091Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:58.113735Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:58.143338Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:58.175506Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:58.207715Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:58.277323Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:58.362927Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899577626581283:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:58.363015Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:58.363398Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899577626581288:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:58.367049Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:50:58.380708Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899577626581290:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:50:58.451998Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899577626581341:3421] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:50:58.780417Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519899556151742703:2229];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:50:58.780500Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> DataShardWrite::ExecSQLUpsertPrepared+EvWrite+Volatile [GOOD] >> DataShardWrite::InsertImmediate ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSystemView::Join [GOOD] Test command err: Trying to start YDB, gRPC: 23969, MsgBus: 6482 2025-06-25T14:50:45.051886Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899518259842155:2225];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:50:45.052107Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001a6b/r3tmp/tmplaFncK/pdisk_1.dat 2025-06-25T14:50:45.570361Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:50:45.570447Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:50:45.572034Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:50:45.599543Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:50:45.600690Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519899513964874671:2080] 1750863045026805 != 1750863045026808 TServer::EnableGrpc on GrpcPort 23969, node 1 2025-06-25T14:50:45.832870Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:50:45.832891Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:50:45.832898Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:50:45.833005Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:50:46.052433Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:6482 TClient is connected to server localhost:6482 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:50:46.720980Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:50:46.752752Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:50:46.767380Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:46.944868Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:47.214814Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:47.306530Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:49.154364Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899535439712789:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:49.154482Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:49.594690Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:49.663547Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:49.723963Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:49.765662Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:49.805471Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:49.849559Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:49.896206Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:50.000468Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899535439713457:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:50.000557Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:50.000961Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899535439713462:2436], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:50.005180Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:50:50.015247Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899539734680760:2437], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:50:50.040551Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519899518259842155:2225];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:50:50.040683Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:50:50.121451Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899539734680811:3425] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:50:51.431808Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863051396, txId: 281474976710672] shutting down waiting... 2025-06-25T14:50:52.651014Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863052628, txId: 281474976710674] shutting down waiting... 2025-06-25T14:50:53.944879Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863053926, txId: 281474976710676] shutting down waiting... 2025-06-25T14:50:55.228124Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863055204, txId: 281474976710678] shutting down waiting... 2025-06-25T14:50:56.395255Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863056388, txId: 281474976710680] shutting down waiting... 2025-06-25T14:50:57.651401Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863057601, txId: 281474976710682] shutting down waiting... 2025-06-25T14:50:58.852117Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863058840, txId: 281474976710684] shutting down waiting... 2025-06-25T14:51:00.038494Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863060022, txId: 281474976710686] shutting down 2025-06-25T14:51:00.416885Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863060396, txId: 281474976710688] shutting down 2025-06-25T14:51:00.476633Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7382: Cannot get console configs 2025-06-25T14:51:00.476665Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded |86.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest |86.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest |86.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest |86.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSysColV1::InnerJoinTables [GOOD] Test command err: Trying to start YDB, gRPC: 13018, MsgBus: 31265 2025-06-25T14:50:54.307854Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899559963064535:2156];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:50:54.337243Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001a2e/r3tmp/tmpyzypgk/pdisk_1.dat 2025-06-25T14:50:54.978311Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:50:54.978390Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:50:54.980785Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:50:55.031959Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519899559963064417:2080] 1750863054296734 != 1750863054296737 2025-06-25T14:50:55.044012Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 13018, node 1 2025-06-25T14:50:55.178457Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:50:55.178477Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:50:55.178483Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:50:55.178567Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:50:55.294736Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:31265 TClient is connected to server localhost:31265 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:50:55.914660Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:50:55.929333Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:50:55.936613Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:56.131415Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:50:56.301362Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:56.399306Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:57.933987Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899572847967943:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:57.934069Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:58.205483Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:58.273541Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:58.306853Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:58.355896Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:58.432072Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:58.477234Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:58.518938Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:58.587973Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899577142935904:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:58.588035Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:58.591454Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899577142935909:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:58.595083Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:50:58.607029Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899577142935911:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:50:58.713146Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899577142935962:3419] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:50:59.308498Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519899559963064535:2156];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:50:59.308553Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/unittest >> TxUsage::Write_Random_Sized_Messages_In_Wide_Transactions_Query 2025-06-25 14:50:58,274 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper execution timed out 2025-06-25 14:50:58,559 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper has overrun 600 secs timeout. Process tree before termination: pid rss ref pdirt 285042 48.0M 47.4M 24.6M test_tool run_ut @/home/runner/.ya/build/build_root/yft8/001976/ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/test-results/unittest/testing_out_stuff/chunk9/testi 285243 2.1G 2.1G 1.6G └─ src-client-topic-ut-with_direct_read_ut --trace-path-append /home/runner/.ya/build/build_root/yft8/001976/ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/test Test command err: 2025-06-25T14:41:00.401435Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897008398034444:2223];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:41:00.402447Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:41:00.673466Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001976/r3tmp/tmpDzLcSw/pdisk_1.dat 2025-06-25T14:41:00.965933Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:41:00.966036Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:41:00.978953Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:41:00.981295Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:41:00.984297Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519897008398034259:2080] 1750862460384117 != 1750862460384120 TServer::EnableGrpc on GrpcPort 23712, node 1 2025-06-25T14:41:01.122378Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/yft8/001976/r3tmp/yandex19a5lA.tmp 2025-06-25T14:41:01.122401Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/yft8/001976/r3tmp/yandex19a5lA.tmp 2025-06-25T14:41:01.122551Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/yft8/001976/r3tmp/yandex19a5lA.tmp 2025-06-25T14:41:01.122658Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:41:01.178665Z INFO: TTestServer started on Port 12694 GrpcPort 23712 TClient is connected to server localhost:12694 2025-06-25T14:41:01.399307Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; PQClient connected to localhost:23712 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:41:01.637211Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:41:01.654855Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:41:01.676624Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:41:01.904563Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710660, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:41:05.404444Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519897008398034444:2223];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:41:05.404519Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:41:05.596735Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897029872871535:2301], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:05.596884Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:05.597194Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897029872871541:2305], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:05.608642Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:41:05.616739Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897029872871582:2309], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:05.616858Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:41:05.642846Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519897029872871550:2306], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710662 completed, doublechecking } 2025-06-25T14:41:05.727984Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519897029872871606:2446] txid# 281474976710663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:41:06.139470Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:06.150623Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519897029872871615:2312], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:41:06.153095Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=ODQ3ZDgwMjgtMmNhODk5ZS1iZDdiMTgyLTIxODQ3YmE=, ActorId: [1:7519897029872871533:2300], ActorState: ExecuteState, TraceId: 01jykrj2ft7qn60806tf2basdm, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:41:06.155671Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-25T14:41:06.202770Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:41:06.329076Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); === CheckClustersList. Subcribe to ClusterTracker from [1:7519897034167839200:2628] === CheckClustersList. Ok 2025-06-25T14:41:12.794370Z :WriteToTopic_Invalid_Session_Table INFO: TTopicSdkTestSetup started 2025-06-25T14:41:12.813645Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:132: new create topic request 2025-06-25T14:41:12.838466Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3114: [PQ: 72075186224037892] Handle TEvInterconnect::TEvNodeInfo 2025-06-25T14:41:12.838578Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3107: [PQ: 72075186224037892] Registered with mediator time cast 2025-06-25T14:41:12.839303Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3146: [PQ: 72075186224037892] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-25T14:41:12.839475Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:752: [PQ: 72075186224037892] doesn't have tx info 2025-06-25T14:41:12.839511Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:764: [PQ: 72075186224037892] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-25T14:41:12.839526Z node 1 :PERSQUEUE DEBUG: pq ... 17 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72075186224037897, Partition: {0, {17, 281474976710698}, 100002}, State: StateIdle] TPartition::ReplyWrite. Partition: {0, {17, 281474976710698}, 100002} 2025-06-25T14:50:58.253709Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72075186224037897, Partition: {0, {17, 281474976710698}, 100002}, State: StateIdle] Answering for message sourceid: '\0test-message_group_id_2_0', Topic: 'topic_A', Partition: {0, {17, 281474976710698}, 100002}, SeqNo: 1, partNo: 2, Offset: 0 is stored on disk 2025-06-25T14:50:58.253729Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72075186224037897, Partition: {0, {17, 281474976710698}, 100002}, State: StateIdle] TPartition::ReplyWrite. Partition: {0, {17, 281474976710698}, 100002} 2025-06-25T14:50:58.256599Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id_2_0|67e2811f-ecd250a0-d79c933e-19972a13_0] PartitionId [0] Generation [1] Write session got write response: acks { seq_no: 1 written_in_tx { } } write_statistics { persisting_time { nanos: 64000000 } min_queue_wait_time { } max_queue_wait_time { } partition_quota_wait_time { } topic_quota_wait_time { } } 2025-06-25T14:50:58.253749Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72075186224037897, Partition: {0, {17, 281474976710698}, 100002}, State: StateIdle] Answering for message sourceid: '\0test-message_group_id_2_0', Topic: 'topic_A', Partition: {0, {17, 281474976710698}, 100002}, SeqNo: 1, partNo: 3, Offset: 0 is stored on disk 2025-06-25T14:50:58.253769Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72075186224037897, Partition: {0, {17, 281474976710698}, 100002}, State: StateIdle] TPartition::ReplyWrite. Partition: {0, {17, 281474976710698}, 100002} 2025-06-25T14:50:58.253790Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72075186224037897, Partition: {0, {17, 281474976710698}, 100002}, State: StateIdle] Answering for message sourceid: '\0test-message_group_id_2_0', Topic: 'topic_A', Partition: {0, {17, 281474976710698}, 100002}, SeqNo: 1, partNo: 4, Offset: 0 is stored on disk 2025-06-25T14:50:58.256650Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id_2_0|67e2811f-ecd250a0-d79c933e-19972a13_0] PartitionId [0] Generation [1] OnAck: seqNo=1, txId={ydb://session/3?node_id=17&id=NzZmNmZhMWUtMTY1NzM5M2ItZjIxZjdhM2YtNDdjZWJkZWI=, 01jyks4545dkqyzkebe8h51jjs}, WriteCount=1, AckCount=1 2025-06-25T14:50:58.253809Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72075186224037897, Partition: {0, {17, 281474976710698}, 100002}, State: StateIdle] TPartition::ReplyWrite. Partition: {0, {17, 281474976710698}, 100002} 2025-06-25T14:50:58.253829Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72075186224037897, Partition: {0, {17, 281474976710698}, 100002}, State: StateIdle] Answering for message sourceid: '\0test-message_group_id_2_0', Topic: 'topic_A', Partition: {0, {17, 281474976710698}, 100002}, SeqNo: 1, partNo: 5, Offset: 0 is stored on disk 2025-06-25T14:50:58.256695Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id_2_0|67e2811f-ecd250a0-d79c933e-19972a13_0] PartitionId [0] Generation [1] Write session: acknoledged message 1 2025-06-25T14:50:58.253847Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72075186224037897, Partition: {0, {17, 281474976710698}, 100002}, State: StateIdle] TPartition::ReplyWrite. Partition: {0, {17, 281474976710698}, 100002} 2025-06-25T14:50:58.253869Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72075186224037897, Partition: {0, {17, 281474976710698}, 100002}, State: StateIdle] Answering for message sourceid: '\0test-message_group_id_2_0', Topic: 'topic_A', Partition: {0, {17, 281474976710698}, 100002}, SeqNo: 1, partNo: 6, Offset: 0 is stored on disk 2025-06-25T14:50:58.253888Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72075186224037897, Partition: {0, {17, 281474976710698}, 100002}, State: StateIdle] TPartition::ReplyWrite. Partition: {0, {17, 281474976710698}, 100002} 2025-06-25T14:50:58.253908Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72075186224037897, Partition: {0, {17, 281474976710698}, 100002}, State: StateIdle] Answering for message sourceid: '\0test-message_group_id_2_0', Topic: 'topic_A', Partition: {0, {17, 281474976710698}, 100002}, SeqNo: 1, partNo: 7, Offset: 0 is stored on disk 2025-06-25T14:50:58.253925Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72075186224037897, Partition: {0, {17, 281474976710698}, 100002}, State: StateIdle] TPartition::ReplyWrite. Partition: {0, {17, 281474976710698}, 100002} 2025-06-25T14:50:58.253945Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72075186224037897, Partition: {0, {17, 281474976710698}, 100002}, State: StateIdle] Answering for message sourceid: '\0test-message_group_id_2_0', Topic: 'topic_A', Partition: {0, {17, 281474976710698}, 100002}, SeqNo: 1, partNo: 8, Offset: 0 is stored on disk 2025-06-25T14:50:58.253964Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72075186224037897, Partition: {0, {17, 281474976710698}, 100002}, State: StateIdle] TPartition::ReplyWrite. Partition: {0, {17, 281474976710698}, 100002} 2025-06-25T14:50:58.253984Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72075186224037897, Partition: {0, {17, 281474976710698}, 100002}, State: StateIdle] Answering for message sourceid: '\0test-message_group_id_2_0', Topic: 'topic_A', Partition: {0, {17, 281474976710698}, 100002}, SeqNo: 1, partNo: 9, Offset: 0 is stored on disk 2025-06-25T14:50:58.254016Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72075186224037897, Partition: {0, {17, 281474976710698}, 100002}, State: StateIdle] TPartition::ReplyWrite. Partition: {0, {17, 281474976710698}, 100002} 2025-06-25T14:50:58.254035Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72075186224037897, Partition: {0, {17, 281474976710698}, 100002}, State: StateIdle] Answering for message sourceid: '\0test-message_group_id_2_0', Topic: 'topic_A', Partition: {0, {17, 281474976710698}, 100002}, SeqNo: 1, partNo: 10, Offset: 0 is stored on disk 2025-06-25T14:50:58.254822Z node 17 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72075186224037897, Partition: {0, {17, 281474976710698}, 100002}, State: StateIdle] need more data for compaction. cumulativeSize=5121033, count=1, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-25T14:50:58.254863Z node 17 :PERSQUEUE DEBUG: pq_impl.cpp:1382: [PQ: 72075186224037897] Topic 'topic_A' counters. CacheSize 5121033 CachedBlobs 1 2025-06-25T14:50:58.254933Z node 17 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'topic_A' partition: 0 messageNo: 1 requestId: cookie: 1 2025-06-25T14:50:58.255006Z node 17 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037897 (partition=0) Received event: NKikimr::TEvPersQueue::TEvResponse 2025-06-25T14:50:58.257077Z node 17 :PERSQUEUE DEBUG: pq_l2_cache.cpp:114: PQ Cache (L2). Evicting blob. Tablet '72075186224037910' partition 100001 offset 0 partno 16 count 1 parts 6 suffix '124' size 3073664 2025-06-25T14:50:58.257140Z node 17 :PERSQUEUE DEBUG: pq_l2_cache.cpp:114: PQ Cache (L2). Evicting blob. Tablet '72075186224037912' partition 100001 offset 0 partno 0 count 1 parts 16 suffix '63' size 8193572 2025-06-25T14:50:58.257207Z node 17 :PERSQUEUE DEBUG: pq_l2_cache.cpp:120: PQ Cache (L2). Adding blob. Tablet '72075186224037897' partition 100002 offset 0 partno 0 count 1 parts 10 suffix '63' size 5121033 2025-06-25T14:50:58.257509Z node 17 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72075186224037904, Partition: {3, {17, 281474976710682}, 100000}, State: StateIdle] need more data for compaction. cumulativeSize=2048510, count=1, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-25T14:50:58.257598Z node 17 :PERSQUEUE DEBUG: cache_eviction.h:435: Erasing blob in L1. Partition 100001 offset 0 size 3073664 cause it's been evicted from L2. Actual L1 size: 0 2025-06-25T14:50:58.257627Z node 17 :PERSQUEUE NOTICE: read.h:371: Have to remove new data from cache. Topic topic_A, tablet id72075186224037910, cookie 0 2025-06-25T14:50:58.257695Z node 17 :PERSQUEUE DEBUG: cache_eviction.h:435: Erasing blob in L1. Partition 100001 offset 0 size 8193572 cause it's been evicted from L2. Actual L1 size: 0 2025-06-25T14:50:58.257737Z node 17 :PERSQUEUE NOTICE: read.h:371: Have to remove new data from cache. Topic topic_A, tablet id72075186224037912, cookie 0 2025-06-25T14:50:58.263377Z :DEBUG: [/Root] TraceId [] SessionId [] PartitionId [1] Generation [0] Write session: try to update token 2025-06-25T14:50:58.407988Z :INFO: [/Root] TraceId [] SessionId [] PartitionId [1] Generation [0] Get partition location async, partition 1, delay 0.000000s 2025-06-25T14:50:58.408057Z :TRACE: [/Root] TRACE_EVENT DescribePartitionRequest path=/Root/topic_A partition_id=1 2025-06-25T14:50:58.408418Z :DEBUG: [/Root] TraceId [] SessionId [] PartitionId [1] Generation [0] Getting partition location, partition 1 2025-06-25T14:50:58.464254Z node 17 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72075186224037903, Partition: {10, {17, 281474976710682}, 100000}, State: StateIdle] need more data for compaction. cumulativeSize=6657308, count=1, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-25T14:50:58.465876Z node 17 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:566: init check schema 2025-06-25T14:50:58.468480Z node 17 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:627: session v1 cookie: 41 sessionId: test-message_group_id_1_18|b2d59274-adb7fac9-91ae86b3-994ca30_0 describe result for acl check 2025-06-25T14:50:58.496247Z node 17 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72075186224037901, Partition: {9, {17, 281474976710690}, 100001}, State: StateIdle] need more data for compaction. cumulativeSize=1536423, count=1, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-25T14:50:58.524899Z node 17 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:166: new Describe partition request 2025-06-25T14:50:58.525020Z node 17 :PQ_READ_PROXY DEBUG: schema_actors.cpp:1209: TDescribePartitionActor for request path: "/Root/topic_A" partition_id: 1 include_location: true 2025-06-25T14:50:58.525074Z node 17 :PQ_READ_PROXY DEBUG: schema_actors.cpp:1219: TDescribePartitionActor[17:7519899575275868649:3354]: Bootstrap 2025-06-25T14:50:58.528453Z node 17 :PQ_READ_PROXY DEBUG: schema_actors.cpp:657: DescribeTopicImpl [17:7519899575275868649:3354]: Request location Traceback (most recent call last): File "library/python/testing/yatest_common/yatest/common/process.py", line 384, in wait wait_for( File "library/python/testing/yatest_common/yatest/common/process.py", line 765, in wait_for raise TimeoutError(truncate(message, MAX_MESSAGE_LEN)) yatest.common.process.TimeoutError: 600 second(s) wait timeout has expired: Command '['/home/runner/.ya/tools/v4/9029509511/test_tool', 'run_ut', '@/home/runner/.ya/build/build_root/yft8/001976/ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/test-results/unittest/testing_out_stuff/chunk9/testing_out_stuff/test_tool.args']' stopped by 600 seconds timeout During handling of the above exception, another exception occurred: Traceback (most recent call last): File "devtools/ya/test/programs/test_tool/run_test/run_test.py", line 1738, in main res.wait(check_exit_code=False, timeout=run_timeout, on_timeout=timeout_callback) File "library/python/testing/yatest_common/yatest/common/process.py", line 398, in wait raise ExecutionTimeoutError(self, str(e)) yatest.common.process.ExecutionTimeoutError: (("600 second(s) wait timeout has expired: Command '['/home/runner/.ya/tools/v4/9029509511/test_tool', 'run_ut', '@/home/runner/.ya/build/build_root/yft8/001976/ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/test-results/unittest/testing_out_stuff/chunk9/testing_out_stuff/test_tool.args']' stopped by 600 seconds timeout",), {}) |86.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTests::RacyAlterTableAndConditionalErase |86.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardColumnTableTTL::AlterColumnTable >> TSchemeShardTTLTests::LegacyTtlSettingsNoTiersAlterTable >> TSchemeShardColumnTableTTL::CreateColumnTable >> TSchemeShardTTLTests::AlterTableShouldSucceedOnIndexedTable >> TSchemeShardTTLTests::CreateTableShouldSucceedOnIndexedTable >> TSchemeShardTTLTests::CreateTableShouldFailOnWrongUnit-EnableTablePgTypes-false >> TSchemeShardTTLUtility::ValidateTiers [GOOD] >> TSchemeShardTTLTestsWithReboots::AlterTable >> DataShardWrite::UpsertBrokenLockArbiter [GOOD] >> DataShardWrite::PreparedDistributedWritePageFault >> KqpSystemView::PartitionStatsRange3 [GOOD] |86.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLUtility::ValidateTiers [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSystemView::NodesRange2 [GOOD] Test command err: Trying to start YDB, gRPC: 12001, MsgBus: 20227 2025-06-25T14:50:36.464878Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899483678165998:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:50:36.464920Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:50:36.646301Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519899479598104968:2179];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:50:36.646370Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:50:36.664481Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519899482024015774:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:50:36.664532Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:50:36.712983Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519899482085299997:2158];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:50:37.710764Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:50:38.061467Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:50:38.061983Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:50:38.062455Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001a92/r3tmp/tmpU1wXOR/pdisk_1.dat 2025-06-25T14:50:38.110948Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:50:38.150905Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:50:38.151710Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:50:38.212708Z node 5 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:50:38.295866Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:50:38.301216Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:50:39.181264Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:50:39.227328Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:50:39.304499Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:50:39.502231Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:50:39.564651Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:50:39.940516Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:50:39.940632Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:50:39.946235Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:50:39.946293Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:50:39.946980Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:50:39.947014Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:50:39.949796Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:50:39.949892Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:50:39.976651Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 5 Cookie 5 2025-06-25T14:50:39.976695Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T14:50:39.976858Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:50:39.976915Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:50:39.977047Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:50:39.997106Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:50:39.999049Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:50:40.012978Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:50:40.015312Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 4 Cookie 4 2025-06-25T14:50:40.041860Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:50:40.042423Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 3 Cookie 3 2025-06-25T14:50:40.055788Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 12001, node 1 2025-06-25T14:50:41.129016Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:50:41.129040Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:50:41.129047Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:50:41.129167Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:50:41.468483Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519899483678165998:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:50:41.468601Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:50:41.644554Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519899479598104968:2179];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:50:41.644625Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:50:41.664414Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519899482024015774:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:50:41.664492Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:50:41.687136Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519899482085299997:2158];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:50:41.687225Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; TClient is connected to server localhost:20227 TClient is connected to server localhost:20227 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:50:45.326749Z node 1 :BS_CONTROLLER ERROR: {BSC07@impl.h:2206} ProcessControllerEvent event processing took too much time Type# 2146435072 Duration# 0.182152s 2025-06-25T14:50:45.326789Z node 1 :BS_CONTROLLER ERROR: {BSC00@bsc.cpp:705} StateWork event processing took too much time Type# 2146435078 Duration# 0.190354s 2025-06-25T14:50:45.469482Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:50:45.969784Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:47.430251Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:49.167761Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:50.096742Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:54.485135Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7382: Cannot get console configs 2025-06-25T14:50:54.485176Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:50:55.391160Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899565282546674:2343], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:55.391313Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:55.866322Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:55.970652Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:56.062264Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:56.150636Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:56.288131Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:56.472082Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:56.636773Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:56.804500Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899569577514786:2383], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:56.804580Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:56.804785Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899569577514791:2386], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:56.813947Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:50:56.838993Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899569577514793:2387], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:50:56.895131Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899569577514868:4245] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:50:58.339523Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863058298, txId: 281474976710672] shutting down >> DataShardWrite::UpsertPreparedNoTxCache-Volatile [GOOD] >> DataShardWrite::WriteCommitVersion >> TSchemeShardTTLUtility::GetExpireAfter [GOOD] >> TSchemeShardTTLTestsWithReboots::MoveTable |86.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest |86.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest |86.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSystemView::PartitionStatsRange3 [GOOD] Test command err: Trying to start YDB, gRPC: 4999, MsgBus: 30845 2025-06-25T14:50:56.703582Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899567578562209:2220];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:50:56.703936Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001a21/r3tmp/tmpj9bsVK/pdisk_1.dat 2025-06-25T14:50:57.319628Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519899567578562027:2080] 1750863056652683 != 1750863056652686 2025-06-25T14:50:57.334609Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:50:57.334715Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:50:57.337493Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:50:57.339425Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 4999, node 1 2025-06-25T14:50:57.516891Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:50:57.516917Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:50:57.516924Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:50:57.517023Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:50:57.652722Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:30845 TClient is connected to server localhost:30845 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:50:58.255380Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:50:58.284001Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:58.458259Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:58.652527Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:58.757346Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:51:00.330342Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899584758432864:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:00.330464Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:00.584974Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:51:00.652764Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:51:00.682810Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:51:00.721269Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:51:00.764803Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:51:00.838222Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:51:00.871999Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:51:00.955749Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899584758433528:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:00.955887Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:00.962597Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899584758433533:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:00.966444Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:51:00.975554Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899584758433535:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:51:01.067762Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899589053400882:3419] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:51:01.661732Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519899567578562209:2220];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:51:01.661828Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:51:02.187091Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863062169, txId: 281474976710672] shutting down |86.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest |86.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTests::CreateTableShouldSucceedOnIndexedTable [GOOD] >> TSchemeShardTTLTestsWithReboots::CreateTable >> TSchemeShardTTLTests::AlterTableShouldSucceedOnIndexedTable [GOOD] >> TSchemeShardTTLTests::LegacyTtlSettingsNoTiersAlterTable [GOOD] |86.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLUtility::GetExpireAfter [GOOD] >> TSchemeShardTTLTestsWithReboots::CopyTable >> TSchemeShardTTLTests::CreateTableShouldSucceed-EnableTablePgTypes-true >> TSchemeShardTTLTests::LegacyTtlSettingsNoTiers >> KqpJoinOrder::TPCDS34+ColumnStore [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::TestJoinOrderHintsSimple+ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 6457, MsgBus: 19387 2025-06-25T14:49:24.990963Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899174275623555:2135];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:49:24.991121Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d66/r3tmp/tmpnLaEmr/pdisk_1.dat 2025-06-25T14:49:25.624120Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:49:25.639730Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519899174275623458:2080] 1750862964973616 != 1750862964973619 2025-06-25T14:49:25.643303Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:49:25.643423Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:49:25.661160Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 6457, node 1 2025-06-25T14:49:25.868894Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:49:25.868921Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:49:25.868932Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:49:25.869043Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:49:25.996413Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:19387 TClient is connected to server localhost:19387 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:49:26.528579Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:49:28.404237Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899191455493284:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:28.404333Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:28.404426Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899191455493296:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:28.408120Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:49:28.426818Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899191455493298:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:49:28.495298Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899191455493349:2336] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:49:28.818868Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T14:49:29.083494Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519899191455493536:2309];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:49:29.083695Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519899191455493536:2309];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:49:29.083911Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519899191455493536:2309];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:49:29.083999Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519899191455493536:2309];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:49:29.084109Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519899191455493536:2309];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:49:29.084206Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519899191455493536:2309];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:49:29.084299Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519899191455493536:2309];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:49:29.084688Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519899191455493536:2309];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:49:29.084790Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519899191455493536:2309];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:49:29.084879Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519899191455493536:2309];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:49:29.084977Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519899191455493536:2309];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:49:29.088009Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519899195750460884:2320];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:49:29.088058Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519899195750460884:2320];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:49:29.088204Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519899195750460884:2320];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:49:29.088285Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519899195750460884:2320];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:49:29.088715Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519899195750460884:2320];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:49:29.088811Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519899195750460884:2320];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:49:29.088953Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519899195750460884:2320];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:49:29.089052Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519899195750460884:2320];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:49:29.089139Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519899195750460884:2320];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:49:29.089233Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519899195750460884:2320];tablet_id=72075186224037896;process=TTxInitSchema::Ex ... current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:50.304903Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039419;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:50.310866Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039186;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:50.311400Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039401;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:50.313364Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039419;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:50.313846Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039399;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:50.320596Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039401;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:50.321193Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039417;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:50.326842Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039399;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:50.327264Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039244;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:50.332229Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039244;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:50.333642Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039417;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:50.336978Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039314;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:50.345256Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039314;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:50.345835Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039208;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:50.358959Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039208;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:50.359672Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039391;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:50.364984Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039391;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:50.365582Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039405;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:50.373858Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039405;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:50.374453Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039389;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:50.382654Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039389;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:50.383185Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039312;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:50.391191Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039312;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:50.391746Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039409;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:50.396049Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039409;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:50.396923Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039415;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:50.400723Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039415;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:50.401295Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039332;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:50.405510Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039332;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:50.406025Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039387;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:50.410473Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039387;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:50.410997Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039403;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:50.415229Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039403;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:50.415753Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039411;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:50.417288Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039300;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:50.419977Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039411;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:50.421065Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039423;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:50.424709Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039423;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:50.424942Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039300;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:50.530838Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039195;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:50.543906Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039195;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:50.620748Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyks2we0e8xb8dx8m8x7nynd", SessionId: ydb://session/3?node_id=1&id=NTdhMTA5M2YtN2Y5NTQxMDUtMzI3MTE4YzgtMjFlYzlmODI=, Slow query, duration: 34.235660s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:50:50.878145Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; 2025-06-25T14:50:50.878575Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; 2025-06-25T14:50:50.879693Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716;
: Warning: Execution, code: 1060
: Warning: Unapplied hint: Rows(R T # 1), code: 4534
: Warning: Execution, code: 1060
: Warning: Unapplied hint: Rows(R T # 1), code: 4534 >> DataShardWrite::UpsertNoLocksArbiterRestart [GOOD] >> DataShardWrite::UpsertLostPrepareArbiterRestart >> KqpSystemView::FailResolve [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTests::LegacyTtlSettingsNoTiersAlterTable [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:51:03.031807Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:51:03.031890Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:03.031930Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:51:03.031964Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:51:03.032536Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:51:03.032587Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:51:03.032647Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:03.032780Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:51:03.033527Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:51:03.035200Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:51:03.125978Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:51:03.126052Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:51:03.146508Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:51:03.146951Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:51:03.147131Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:51:03.153158Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:51:03.153475Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:51:03.154076Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:03.154334Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:51:03.157396Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:03.157545Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:51:03.160523Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:51:03.160587Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:03.160721Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:51:03.160768Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:51:03.160817Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:51:03.160892Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:51:03.169496Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:51:03.300293Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:51:03.300542Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:03.300724Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:51:03.300780Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:51:03.300981Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:51:03.301055Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:51:03.308417Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:03.308649Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:51:03.308793Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:03.308843Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:51:03.308890Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:51:03.308918Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:51:03.312969Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:03.313135Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:51:03.313194Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:51:03.315040Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:03.315102Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:03.315175Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:03.315239Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:51:03.318754Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:51:03.320660Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:51:03.320857Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:51:03.321768Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:03.321888Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:51:03.321937Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:03.322156Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:51:03.322197Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:03.322332Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:51:03.322388Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:51:03.324106Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:51:03.324141Z node 1 :FLAT_TX_SCHEMESHARD ... meBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 102, path id: 2 2025-06-25T14:51:03.671217Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-25T14:51:03.671269Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1086: NTableState::TProposedWaitParts operationId# 102:0 ProgressState at tablet: 72057594046678944 2025-06-25T14:51:03.671816Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 4 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T14:51:03.671918Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 4 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T14:51:03.671963Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 102 2025-06-25T14:51:03.671997Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 4 2025-06-25T14:51:03.672035Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-25T14:51:03.672106Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 102, ready parts: 0/1, is published: true 2025-06-25T14:51:03.675475Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-25T14:51:03.687609Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6369: Handle TEvProposeTransactionResult, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 102 Step: 5000003 OrderId: 102 ExecLatency: 0 ProposeLatency: 4 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 1160 } } CommitVersion { Step: 5000003 TxId: 102 } 2025-06-25T14:51:03.687677Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1791: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409546, partId: 0 2025-06-25T14:51:03.687800Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:632: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 102 Step: 5000003 OrderId: 102 ExecLatency: 0 ProposeLatency: 4 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 1160 } } CommitVersion { Step: 5000003 TxId: 102 } 2025-06-25T14:51:03.687890Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_part.cpp:109: HandleReply TEvDataShard::TEvProposeTransactionResult Ignore message: tablet# 72057594046678944, ev# TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 102 Step: 5000003 OrderId: 102 ExecLatency: 0 ProposeLatency: 4 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 1160 } } CommitVersion { Step: 5000003 TxId: 102 } FAKE_COORDINATOR: Erasing txId 102 2025-06-25T14:51:03.688854Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5596: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 313 RawX2: 4294969594 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 2025-06-25T14:51:03.688904Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1791: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409546, partId: 0 2025-06-25T14:51:03.689037Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:632: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: Source { RawX1: 313 RawX2: 4294969594 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 2025-06-25T14:51:03.689093Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1047: NTableState::TProposedWaitParts operationId# 102:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 2025-06-25T14:51:03.689175Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1051: NTableState::TProposedWaitParts operationId# 102:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 313 RawX2: 4294969594 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 2025-06-25T14:51:03.689233Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:670: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 102:0, shardIdx: 72057594046678944:1, shard: 72075186233409546, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:03.689297Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:707: all shard schema changes has been received, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-25T14:51:03.689336Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:719: send schema changes ack message, operation: 102:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-06-25T14:51:03.689379Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 102:0 129 -> 240 2025-06-25T14:51:03.692091Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-25T14:51:03.692422Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-25T14:51:03.692689Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-25T14:51:03.692727Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 102:0 ProgressState 2025-06-25T14:51:03.692838Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:0 progress is 1/1 2025-06-25T14:51:03.692874Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-25T14:51:03.692908Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:0 progress is 1/1 2025-06-25T14:51:03.692937Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-25T14:51:03.692987Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: true 2025-06-25T14:51:03.693061Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1656: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:340:2317] message: TxId: 102 2025-06-25T14:51:03.693113Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-25T14:51:03.693147Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 102:0 2025-06-25T14:51:03.693174Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 102:0 2025-06-25T14:51:03.693304Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-25T14:51:03.695611Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-25T14:51:03.695661Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:398:2368] TestWaitNotification: OK eventTxId 102 2025-06-25T14:51:03.696304Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/TTLEnabledTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:51:03.696601Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/TTLEnabledTable" took 272us result status StatusSuccess 2025-06-25T14:51:03.697052Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/TTLEnabledTable" PathDescription { Self { Name: "TTLEnabledTable" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "TTLEnabledTable" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "modified_at" Type: "Timestamp" TypeId: 50 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 2 TTLSettings { Enabled { ColumnName: "modified_at" ExpireAfterSeconds: 3600 } } IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 1 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTests::CreateTableShouldSucceedOnIndexedTable [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:51:03.036288Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:51:03.036410Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:03.036449Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:51:03.036490Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:51:03.036529Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:51:03.036556Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:51:03.036620Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:03.036703Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:51:03.037418Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:51:03.037752Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:51:03.129006Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:51:03.129071Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:51:03.147303Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:51:03.147727Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:51:03.147873Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:51:03.154483Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:51:03.154813Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:51:03.155412Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:03.155689Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:51:03.158946Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:03.159096Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:51:03.160533Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:51:03.160593Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:03.160762Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:51:03.160821Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:51:03.160869Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:51:03.160977Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:51:03.167577Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:51:03.310603Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:51:03.310834Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:03.311028Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:51:03.311093Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:51:03.311343Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:51:03.311413Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:51:03.313554Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:03.313790Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:51:03.313952Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:03.314025Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:51:03.314097Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:51:03.314136Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:51:03.315999Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:03.316058Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:51:03.316395Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:51:03.318107Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:03.318164Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:03.318234Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:03.318311Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:51:03.336787Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:51:03.340823Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:51:03.341023Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:51:03.342100Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:03.342247Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:51:03.342311Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:03.342616Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:51:03.342684Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:03.342874Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:51:03.342945Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:51:03.345383Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:51:03.345435Z node 1 :FLAT_TX_SCHEMESHARD ... 8977Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 101:2 129 -> 240 2025-06-25T14:51:03.679503Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5596: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 332 RawX2: 4294969609 } Origin: 72075186233409547 State: 2 TxId: 101 Step: 0 Generation: 2 2025-06-25T14:51:03.679540Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1791: TOperation FindRelatedPartByTabletId, TxId: 101, tablet: 72075186233409547, partId: 0 2025-06-25T14:51:03.679641Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:632: TTxOperationReply execute, operationId: 101:0, at schemeshard: 72057594046678944, message: Source { RawX1: 332 RawX2: 4294969609 } Origin: 72075186233409547 State: 2 TxId: 101 Step: 0 Generation: 2 2025-06-25T14:51:03.679681Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1047: NTableState::TProposedWaitParts operationId# 101:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 2025-06-25T14:51:03.679736Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1051: NTableState::TProposedWaitParts operationId# 101:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 332 RawX2: 4294969609 } Origin: 72075186233409547 State: 2 TxId: 101 Step: 0 Generation: 2 2025-06-25T14:51:03.679778Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:670: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 101:0, shardIdx: 72057594046678944:1, shard: 72075186233409547, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:03.679803Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:707: all shard schema changes has been received, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:51:03.679846Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:719: send schema changes ack message, operation: 101:0, datashard: 72075186233409547, at schemeshard: 72057594046678944 2025-06-25T14:51:03.679901Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 101:0 129 -> 240 2025-06-25T14:51:03.682280Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-25T14:51:03.682366Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-25T14:51:03.682462Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-25T14:51:03.689020Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-25T14:51:03.689165Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 101:2, at schemeshard: 72057594046678944 2025-06-25T14:51:03.689264Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:51:03.689360Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 101:2, at schemeshard: 72057594046678944 2025-06-25T14:51:03.689651Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:51:03.689895Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 101:2, at schemeshard: 72057594046678944 2025-06-25T14:51:03.690012Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 101:2 ProgressState 2025-06-25T14:51:03.690140Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#101:2 progress is 2/3 2025-06-25T14:51:03.690182Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 101 ready parts: 2/3 2025-06-25T14:51:03.690219Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#101:2 progress is 2/3 2025-06-25T14:51:03.690253Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 101 ready parts: 2/3 2025-06-25T14:51:03.690290Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 101, ready parts: 2/3, is published: true 2025-06-25T14:51:03.690523Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:51:03.690559Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 101:0 ProgressState 2025-06-25T14:51:03.690613Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#101:0 progress is 3/3 2025-06-25T14:51:03.690638Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 101 ready parts: 3/3 2025-06-25T14:51:03.690675Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#101:0 progress is 3/3 2025-06-25T14:51:03.690705Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 101 ready parts: 3/3 2025-06-25T14:51:03.690739Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 101, ready parts: 3/3, is published: true 2025-06-25T14:51:03.690811Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1656: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:385:2351] message: TxId: 101 2025-06-25T14:51:03.690861Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 101 ready parts: 3/3 2025-06-25T14:51:03.690919Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 101:0 2025-06-25T14:51:03.690962Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 101:0 2025-06-25T14:51:03.691143Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-25T14:51:03.691188Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 101:1 2025-06-25T14:51:03.691210Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 101:1 2025-06-25T14:51:03.691238Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-25T14:51:03.691258Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 101:2 2025-06-25T14:51:03.691277Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 101:2 2025-06-25T14:51:03.691344Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 2025-06-25T14:51:03.693815Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-25T14:51:03.693863Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:386:2352] TestWaitNotification: OK eventTxId 101 2025-06-25T14:51:03.694392Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/TTLEnabledTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:51:03.694658Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/TTLEnabledTable" took 279us result status StatusSuccess 2025-06-25T14:51:03.695258Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/TTLEnabledTable" PathDescription { Self { Name: "TTLEnabledTable" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: true } Table { Name: "TTLEnabledTable" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "modified_at" Type: "Timestamp" TypeId: 50 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableIndexes { Name: "UserDefinedIndexByExpireAt" LocalPathId: 3 Type: EIndexTypeGlobal State: EIndexStateReady KeyColumnNames: "modified_at" SchemaVersion: 1 PathOwnerId: 72057594046678944 DataSize: 0 IndexImplTableDescriptions { } } TableSchemaVersion: 1 TTLSettings { Enabled { ColumnName: "modified_at" ExpireAfterSeconds: 3600 Tiers { ApplyAfterSeconds: 3600 Delete { } } } } IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 |86.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTests::AlterTableShouldSucceedOnIndexedTable [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:51:03.031034Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:51:03.031137Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:03.031182Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:51:03.031224Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:51:03.033706Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:51:03.033774Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:51:03.033864Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:03.033940Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:51:03.034966Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:51:03.035362Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:51:03.120092Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:51:03.120152Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:51:03.137159Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:51:03.137543Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:51:03.137805Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:51:03.143257Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:51:03.143662Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:51:03.145951Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:03.147059Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:51:03.154076Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:03.154959Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:51:03.160497Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:51:03.160557Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:03.160697Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:51:03.160744Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:51:03.160823Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:51:03.160896Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:51:03.167629Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:51:03.298086Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:51:03.298297Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:03.298489Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:51:03.298546Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:51:03.298758Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:51:03.298826Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:51:03.300880Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:03.301054Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:51:03.301216Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:03.301273Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:51:03.301345Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:51:03.301382Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:51:03.303080Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:03.303170Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:51:03.303215Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:51:03.304704Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:03.304756Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:03.304827Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:03.304889Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:51:03.308280Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:51:03.309800Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:51:03.309948Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:51:03.310767Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:03.310883Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:51:03.310943Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:03.311181Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:51:03.311230Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:03.311376Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:51:03.311433Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:51:03.313141Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:51:03.313185Z node 1 :FLAT_TX_SCHEMESHARD ... 025-06-25T14:51:03.678369Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1086: NTableState::TProposedWaitParts operationId# 102:0 ProgressState at tablet: 72057594046678944 2025-06-25T14:51:03.679166Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 4 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T14:51:03.679268Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 4 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T14:51:03.679312Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 102 2025-06-25T14:51:03.679347Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 4 2025-06-25T14:51:03.679390Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-06-25T14:51:03.679479Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 102, ready parts: 0/1, is published: true 2025-06-25T14:51:03.683936Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-25T14:51:03.709897Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6369: Handle TEvProposeTransactionResult, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409547 Status: COMPLETE TxId: 102 Step: 5000003 OrderId: 102 ExecLatency: 0 ProposeLatency: 4 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409547 CpuTimeUsec: 1197 } } CommitVersion { Step: 5000003 TxId: 102 } 2025-06-25T14:51:03.709971Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1791: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409547, partId: 0 2025-06-25T14:51:03.710133Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:632: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409547 Status: COMPLETE TxId: 102 Step: 5000003 OrderId: 102 ExecLatency: 0 ProposeLatency: 4 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409547 CpuTimeUsec: 1197 } } CommitVersion { Step: 5000003 TxId: 102 } 2025-06-25T14:51:03.710253Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_part.cpp:109: HandleReply TEvDataShard::TEvProposeTransactionResult Ignore message: tablet# 72057594046678944, ev# TxKind: TX_KIND_SCHEME Origin: 72075186233409547 Status: COMPLETE TxId: 102 Step: 5000003 OrderId: 102 ExecLatency: 0 ProposeLatency: 4 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409547 CpuTimeUsec: 1197 } } CommitVersion { Step: 5000003 TxId: 102 } FAKE_COORDINATOR: Erasing txId 102 2025-06-25T14:51:03.711406Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5596: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 332 RawX2: 4294969609 } Origin: 72075186233409547 State: 2 TxId: 102 Step: 0 Generation: 2 2025-06-25T14:51:03.711462Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1791: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409547, partId: 0 2025-06-25T14:51:03.711614Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:632: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: Source { RawX1: 332 RawX2: 4294969609 } Origin: 72075186233409547 State: 2 TxId: 102 Step: 0 Generation: 2 2025-06-25T14:51:03.711675Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1047: NTableState::TProposedWaitParts operationId# 102:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 2025-06-25T14:51:03.711775Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1051: NTableState::TProposedWaitParts operationId# 102:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 332 RawX2: 4294969609 } Origin: 72075186233409547 State: 2 TxId: 102 Step: 0 Generation: 2 2025-06-25T14:51:03.711845Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:670: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 102:0, shardIdx: 72057594046678944:1, shard: 72075186233409547, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:03.711888Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:707: all shard schema changes has been received, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-25T14:51:03.711940Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:719: send schema changes ack message, operation: 102:0, datashard: 72075186233409547, at schemeshard: 72057594046678944 2025-06-25T14:51:03.711992Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 102:0 129 -> 240 2025-06-25T14:51:03.721500Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-25T14:51:03.722910Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-25T14:51:03.723290Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-25T14:51:03.723343Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 102:0 ProgressState 2025-06-25T14:51:03.723456Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:0 progress is 1/1 2025-06-25T14:51:03.723514Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-25T14:51:03.723559Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:0 progress is 1/1 2025-06-25T14:51:03.723593Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-25T14:51:03.723649Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: true 2025-06-25T14:51:03.723734Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1656: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:385:2351] message: TxId: 102 2025-06-25T14:51:03.723785Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-25T14:51:03.723828Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 102:0 2025-06-25T14:51:03.723866Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 102:0 2025-06-25T14:51:03.723993Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-25T14:51:03.726688Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-25T14:51:03.726766Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:468:2427] TestWaitNotification: OK eventTxId 102 2025-06-25T14:51:03.727434Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/TTLEnabledTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:51:03.727732Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/TTLEnabledTable" took 302us result status StatusSuccess 2025-06-25T14:51:03.728278Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/TTLEnabledTable" PathDescription { Self { Name: "TTLEnabledTable" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: true } Table { Name: "TTLEnabledTable" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "modified_at" Type: "Timestamp" TypeId: 50 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableIndexes { Name: "UserDefinedIndexByExpireAt" LocalPathId: 3 Type: EIndexTypeGlobal State: EIndexStateReady KeyColumnNames: "modified_at" SchemaVersion: 1 PathOwnerId: 72057594046678944 DataSize: 0 IndexImplTableDescriptions { } } TableSchemaVersion: 2 TTLSettings { Enabled { ColumnName: "modified_at" ExpireAfterSeconds: 3600 Tiers { ApplyAfterSeconds: 3600 Delete { } } } } IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TSchemeShardTTLTests::AlterTableShouldSuccess >> TSchemeShardTTLTests::LegacyTtlSettingsNoTiers [GOOD] >> KqpJoinOrder::TPCDS88+ColumnStore [GOOD] |86.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest |86.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest |86.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSystemView::FailResolve [GOOD] Test command err: Trying to start YDB, gRPC: 1064, MsgBus: 30807 2025-06-25T14:50:58.580903Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899574178424087:2235];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:50:58.581116Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001a1f/r3tmp/tmpni9XNf/pdisk_1.dat 2025-06-25T14:50:58.984724Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 1064, node 1 2025-06-25T14:50:59.010927Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:50:59.012251Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:50:59.034726Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:50:59.129405Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:50:59.129427Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:50:59.129433Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:50:59.129545Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:30807 2025-06-25T14:50:59.577084Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:30807 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:50:59.775280Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:50:59.790743Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:50:59.805384Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:59.952068Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:51:00.120254Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:51:00.212583Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:51:01.855705Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899587063327409:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:01.855829Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:02.130077Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:51:02.169798Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:51:02.248479Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:51:02.284305Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:51:02.322225Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:51:02.373811Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:51:02.419289Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:51:02.525406Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899591358295369:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:02.525490Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:02.525918Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899591358295374:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:02.530933Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:51:02.548733Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899591358295376:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:51:02.631672Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899591358295429:3417] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:51:03.588815Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519899574178424087:2235];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:51:03.588947Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:51:03.640635Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:51:03.781819Z node 1 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:304: Access denied: self# [1:7519899595653263049:3623], for# user0@builtin, access# SelectRow 2025-06-25T14:51:03.781982Z node 1 :KQP_EXECUTER ERROR: kqp_table_resolver.cpp:275: TxId: 281474976710674. Error resolving keys for entry: { TableId: [OwnerId: 72057594046644480, LocalPathId: 1] Access: 1 SyncVersion: false Status: AccessDenied Kind: KindUnknown PartitionsCount: 0 DomainInfo From: (Uint64 : NULL, Uint64 : NULL, Uint64 : NULL, Uint32 : NULL) IncFrom: 1 To: () IncTo: 0 } 2025-06-25T14:51:03.790716Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=1&id=ODBlYjg5M2EtNmQyMmIxMGEtOTRiMzgyMTktMTM4ZmUzMDQ=, ActorId: [1:7519899595653263022:2478], ActorState: ExecuteState, TraceId: 01jyks4akp9rjz56f5h8dn2qyy, Create QueryResponse for error on request, msg: 2025-06-25T14:51:03.792050Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863063780, txId: 281474976710673] shutting down 2025-06-25T14:51:03.792999Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710675. Ctx: { TraceId: 01jyks4akp9rjz56f5h8dn2qyy, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODBlYjg5M2EtNmQyMmIxMGEtOTRiMzgyMTktMTM4ZmUzMDQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root >> TSchemeShardTTLTests::TtlTiersValidation >> TSchemeShardColumnTableTTL::AlterColumnTable_Negative >> TSchemeShardTTLTests::AlterTableShouldSucceedOnAsyncIndexedTable >> KqpSystemView::NodesRange1 [GOOD] >> TSchemeShardTTLTests::ShouldSkipDroppedColumn ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTests::LegacyTtlSettingsNoTiers [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:51:04.507685Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:51:04.507754Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:04.507783Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:51:04.507808Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:51:04.507840Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:51:04.507861Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:51:04.507902Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:04.507955Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:51:04.508704Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:51:04.509002Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:51:04.576714Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:51:04.576782Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:51:04.595041Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:51:04.595475Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:51:04.595662Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:51:04.601070Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:51:04.601369Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:51:04.602049Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:04.602344Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:51:04.605504Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:04.605694Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:51:04.606923Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:51:04.606986Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:04.607124Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:51:04.607181Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:51:04.607222Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:51:04.607303Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:51:04.617132Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:51:04.767476Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:51:04.767739Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:04.767980Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:51:04.768027Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:51:04.768254Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:51:04.768375Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:51:04.777130Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:04.777373Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:51:04.777571Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:04.777626Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:51:04.777688Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:51:04.777723Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:51:04.779646Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:04.779751Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:51:04.779795Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:51:04.781449Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:04.781511Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:04.781590Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:04.781644Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:51:04.790869Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:51:04.792747Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:51:04.792914Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:51:04.793811Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:04.793944Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:51:04.794002Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:04.794290Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:51:04.794348Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:04.794516Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:51:04.794595Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:51:04.796492Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:51:04.796542Z node 1 :FLAT_TX_SCHEMESHARD ... ish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 5 2025-06-25T14:51:05.050628Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-25T14:51:05.051676Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 101 2025-06-25T14:51:05.051761Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 101 2025-06-25T14:51:05.051791Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 101 2025-06-25T14:51:05.051823Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 3 2025-06-25T14:51:05.051853Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-25T14:51:05.051924Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 101, ready parts: 0/1, is published: true 2025-06-25T14:51:05.052633Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6369: Handle TEvProposeTransactionResult, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 101 Step: 5000002 OrderId: 101 ExecLatency: 0 ProposeLatency: 3 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 1301 } } CommitVersion { Step: 5000002 TxId: 101 } 2025-06-25T14:51:05.052684Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1791: TOperation FindRelatedPartByTabletId, TxId: 101, tablet: 72075186233409546, partId: 0 2025-06-25T14:51:05.052813Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:632: TTxOperationReply execute, operationId: 101:0, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 101 Step: 5000002 OrderId: 101 ExecLatency: 0 ProposeLatency: 3 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 1301 } } CommitVersion { Step: 5000002 TxId: 101 } 2025-06-25T14:51:05.052928Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_part.cpp:109: HandleReply TEvDataShard::TEvProposeTransactionResult Ignore message: tablet# 72057594046678944, ev# TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 101 Step: 5000002 OrderId: 101 ExecLatency: 0 ProposeLatency: 3 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 1301 } } CommitVersion { Step: 5000002 TxId: 101 } 2025-06-25T14:51:05.053470Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5596: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 313 RawX2: 4294969594 } Origin: 72075186233409546 State: 2 TxId: 101 Step: 0 Generation: 2 2025-06-25T14:51:05.053512Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1791: TOperation FindRelatedPartByTabletId, TxId: 101, tablet: 72075186233409546, partId: 0 2025-06-25T14:51:05.053612Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:632: TTxOperationReply execute, operationId: 101:0, at schemeshard: 72057594046678944, message: Source { RawX1: 313 RawX2: 4294969594 } Origin: 72075186233409546 State: 2 TxId: 101 Step: 0 Generation: 2 2025-06-25T14:51:05.053684Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1047: NTableState::TProposedWaitParts operationId# 101:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 2025-06-25T14:51:05.053836Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1051: NTableState::TProposedWaitParts operationId# 101:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 313 RawX2: 4294969594 } Origin: 72075186233409546 State: 2 TxId: 101 Step: 0 Generation: 2 2025-06-25T14:51:05.053912Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:670: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 101:0, shardIdx: 72057594046678944:1, shard: 72075186233409546, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:05.053965Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:707: all shard schema changes has been received, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:51:05.054009Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:719: send schema changes ack message, operation: 101:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-06-25T14:51:05.054051Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 101:0 129 -> 240 2025-06-25T14:51:05.057498Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-25T14:51:05.059210Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-25T14:51:05.059309Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:51:05.059434Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:51:05.059746Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:51:05.059799Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 101:0 ProgressState 2025-06-25T14:51:05.059898Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#101:0 progress is 1/1 2025-06-25T14:51:05.059936Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-25T14:51:05.059974Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#101:0 progress is 1/1 2025-06-25T14:51:05.060022Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-25T14:51:05.060063Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 101, ready parts: 1/1, is published: true 2025-06-25T14:51:05.060141Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1656: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:340:2317] message: TxId: 101 2025-06-25T14:51:05.060191Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-25T14:51:05.060231Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 101:0 2025-06-25T14:51:05.060265Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 101:0 2025-06-25T14:51:05.060453Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-25T14:51:05.062415Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-25T14:51:05.062460Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:341:2318] TestWaitNotification: OK eventTxId 101 2025-06-25T14:51:05.063033Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/TTLEnabledTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:51:05.063301Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/TTLEnabledTable" took 245us result status StatusSuccess 2025-06-25T14:51:05.063780Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/TTLEnabledTable" PathDescription { Self { Name: "TTLEnabledTable" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "TTLEnabledTable" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "modified_at" Type: "Timestamp" TypeId: 50 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 1 TTLSettings { Enabled { ColumnName: "modified_at" ExpireAfterSeconds: 3600 } } IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 1 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TSchemeShardTTLTests::AlterTableShouldSuccess [GOOD] >> TSchemeShardTTLTests::CreateTableShouldFailOnBeforeEpochTTL >> KqpJoinOrder::TPCDS94-ColumnStore [GOOD] |86.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTests::CreateTableShouldSucceedAsyncOnIndexedTable >> TSchemeShardTTLTests::BuildIndexShouldSucceed >> TSchemeShardColumnTableTTL::CreateColumnTableNegative_UnknownColumn >> DataShardWrite::InsertImmediate [GOOD] >> DataShardWrite::ImmediateAndPlannedCommittedOpsRace ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTests::AlterTableShouldSuccess [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:51:05.270840Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:51:05.270940Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:05.270978Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:51:05.271014Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:51:05.271054Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:51:05.271081Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:51:05.271139Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:05.271211Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:51:05.271943Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:51:05.272282Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:51:05.350584Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:51:05.350647Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:51:05.367524Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:51:05.367904Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:51:05.368072Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:51:05.373551Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:51:05.373856Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:51:05.374523Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:05.374829Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:51:05.378424Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:05.378613Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:51:05.379901Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:51:05.379960Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:05.380118Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:51:05.380175Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:51:05.380223Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:51:05.380346Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:51:05.386536Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:51:05.510489Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:51:05.510733Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:05.510938Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:51:05.510985Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:51:05.511198Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:51:05.511281Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:51:05.517457Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:05.517673Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:51:05.517872Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:05.517931Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:51:05.517999Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:51:05.518045Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:51:05.520008Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:05.520111Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:51:05.520164Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:51:05.521818Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:05.521878Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:05.521952Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:05.522008Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:51:05.525804Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:51:05.527675Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:51:05.527857Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:51:05.528863Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:05.529027Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:51:05.529090Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:05.529372Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:51:05.529423Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:05.529594Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:51:05.529667Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:51:05.531650Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:51:05.531701Z node 1 :FLAT_TX_SCHEMESHARD ... __publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 104, path id: 2 2025-06-25T14:51:05.918485Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-25T14:51:05.918541Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1086: NTableState::TProposedWaitParts operationId# 104:0 ProgressState at tablet: 72057594046678944 2025-06-25T14:51:05.919019Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 5 PathOwnerId: 72057594046678944, cookie: 104 2025-06-25T14:51:05.919124Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 5 PathOwnerId: 72057594046678944, cookie: 104 2025-06-25T14:51:05.919163Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 104 2025-06-25T14:51:05.919206Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 104, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 5 2025-06-25T14:51:05.919260Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-25T14:51:05.919348Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 104, ready parts: 0/1, is published: true 2025-06-25T14:51:05.929112Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2025-06-25T14:51:05.941039Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6369: Handle TEvProposeTransactionResult, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 104 Step: 5000004 OrderId: 104 ExecLatency: 0 ProposeLatency: 4 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 1296 } } CommitVersion { Step: 5000004 TxId: 104 } 2025-06-25T14:51:05.941092Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1791: TOperation FindRelatedPartByTabletId, TxId: 104, tablet: 72075186233409546, partId: 0 2025-06-25T14:51:05.941243Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:632: TTxOperationReply execute, operationId: 104:0, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 104 Step: 5000004 OrderId: 104 ExecLatency: 0 ProposeLatency: 4 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 1296 } } CommitVersion { Step: 5000004 TxId: 104 } 2025-06-25T14:51:05.941353Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_part.cpp:109: HandleReply TEvDataShard::TEvProposeTransactionResult Ignore message: tablet# 72057594046678944, ev# TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 104 Step: 5000004 OrderId: 104 ExecLatency: 0 ProposeLatency: 4 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 1296 } } CommitVersion { Step: 5000004 TxId: 104 } FAKE_COORDINATOR: Erasing txId 104 2025-06-25T14:51:05.942238Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5596: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 313 RawX2: 4294969594 } Origin: 72075186233409546 State: 2 TxId: 104 Step: 0 Generation: 2 2025-06-25T14:51:05.942304Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1791: TOperation FindRelatedPartByTabletId, TxId: 104, tablet: 72075186233409546, partId: 0 2025-06-25T14:51:05.942432Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:632: TTxOperationReply execute, operationId: 104:0, at schemeshard: 72057594046678944, message: Source { RawX1: 313 RawX2: 4294969594 } Origin: 72075186233409546 State: 2 TxId: 104 Step: 0 Generation: 2 2025-06-25T14:51:05.942480Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1047: NTableState::TProposedWaitParts operationId# 104:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 2025-06-25T14:51:05.942628Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1051: NTableState::TProposedWaitParts operationId# 104:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 313 RawX2: 4294969594 } Origin: 72075186233409546 State: 2 TxId: 104 Step: 0 Generation: 2 2025-06-25T14:51:05.942695Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:670: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 104:0, shardIdx: 72057594046678944:1, shard: 72075186233409546, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:05.942730Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:707: all shard schema changes has been received, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-25T14:51:05.942764Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:719: send schema changes ack message, operation: 104:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-06-25T14:51:05.942830Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 104:0 129 -> 240 2025-06-25T14:51:05.945422Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-25T14:51:05.945760Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-25T14:51:05.946032Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-25T14:51:05.946076Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 104:0 ProgressState 2025-06-25T14:51:05.946195Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#104:0 progress is 1/1 2025-06-25T14:51:05.946232Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-25T14:51:05.946272Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#104:0 progress is 1/1 2025-06-25T14:51:05.946306Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-25T14:51:05.946354Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 104, ready parts: 1/1, is published: true 2025-06-25T14:51:05.946415Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1656: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:340:2317] message: TxId: 104 2025-06-25T14:51:05.946470Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-25T14:51:05.946519Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 104:0 2025-06-25T14:51:05.946550Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 104:0 2025-06-25T14:51:05.946693Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-25T14:51:05.948304Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 104: got EvNotifyTxCompletionResult 2025-06-25T14:51:05.948366Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 104: satisfy waiter [1:446:2416] TestWaitNotification: OK eventTxId 104 2025-06-25T14:51:05.948959Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/TTLEnabledTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:51:05.949174Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/TTLEnabledTable" took 226us result status StatusSuccess 2025-06-25T14:51:05.949617Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/TTLEnabledTable" PathDescription { Self { Name: "TTLEnabledTable" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 3 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "TTLEnabledTable" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "modified_at" Type: "Timestamp" TypeId: 50 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 3 TTLSettings { Disabled { } } IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 1 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TSchemeShardTTLTests::AlterTableShouldSucceedOnAsyncIndexedTable [GOOD] >> TSchemeShardTTLTests::CreateTableShouldFailOnBeforeEpochTTL [GOOD] >> TSchemeShardTTLTests::TtlTiersValidation [GOOD] |86.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest |86.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTests::ShouldCheckQuotas >> TSchemeShardTTLTests::CreateTableShouldFailOnWrongColumnType ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::TPCDS34+ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 6517, MsgBus: 26200 2025-06-25T14:49:09.448016Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899106685682579:2139];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:49:09.451894Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d75/r3tmp/tmpr8W3sd/pdisk_1.dat 2025-06-25T14:49:10.105358Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:49:10.105440Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:49:10.108245Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:49:10.156688Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:49:10.157498Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519899106685682476:2080] 1750862949430566 != 1750862949430569 TServer::EnableGrpc on GrpcPort 6517, node 1 2025-06-25T14:49:10.376773Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:49:10.376791Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:49:10.376796Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:49:10.376877Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:49:10.447352Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:26200 TClient is connected to server localhost:26200 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:49:11.395291Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:49:11.457268Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:49:13.591212Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899123865552305:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:13.591306Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:13.591753Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899123865552317:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:13.595538Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:49:13.619952Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-25T14:49:13.620180Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899123865552319:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:49:13.699000Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899123865552370:2339] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:49:14.131642Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T14:49:14.473619Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519899128160519865:2312];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:49:14.473868Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519899128160519865:2312];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:49:14.474163Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519899128160519865:2312];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:49:14.474284Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519899128160519865:2312];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:49:14.474378Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519899128160519865:2312];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:49:14.474480Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519899128160519865:2312];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:49:14.474610Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519899128160519865:2312];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:49:14.474748Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519899128160519865:2312];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:49:14.474874Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519899128160519865:2312];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:49:14.475050Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519899128160519865:2312];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:49:14.475192Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519899128160519865:2312];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:49:14.477647Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519899128160519863:2310];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:49:14.477705Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519899128160519863:2310];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:49:14.477937Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519899128160519863:2310];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:49:14.478039Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519899128160519863:2310];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:49:14.478126Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519899128160519863:2310];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:49:14.478205Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519899128160519863:2310];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:49:14.478289Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519899128160519863:2310];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:49:14.478400Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519899128160519863:2310];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:49:14.478512Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=720751862240 ... ;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:37.153434Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039335;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:37.153941Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039309;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:37.156650Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:37.157183Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039359;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:37.158551Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039309;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:37.159375Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039405;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:37.161416Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039359;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:37.162363Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039421;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:37.164098Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039405;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:37.165114Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039365;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:37.167555Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039421;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:37.168187Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039419;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:37.169506Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039365;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:37.170090Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039311;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:37.172875Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039419;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:37.173561Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039397;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:37.174764Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039311;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:37.175466Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039317;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:37.178858Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039397;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:37.183200Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039363;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:37.183468Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039317;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:37.184080Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039387;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:37.189652Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039387;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:37.190246Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039331;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:37.195204Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039363;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:37.195813Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039319;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:37.203204Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039331;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:37.205734Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039319;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:37.258675Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039262;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:37.264104Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039262;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:37.291506Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039412;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:37.298472Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039412;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:37.435007Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyks2c66cytj8x8cw3r7crpv", SessionId: ydb://session/3?node_id=1&id=ZDE0OWUwMTAtZWZkYmFhYTEtNTFmNjU4NTUtZjExMDY0ZTI=, Slow query, duration: 37.683991s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:50:37.997273Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:50:37.997766Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:50:37.998205Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;self_id=[1:7519899437398220333:9730];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224039392;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224039094;receive=72075186224038933; 2025-06-25T14:50:37.998605Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:50:55.987559Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyks3ryxd6zss414e0a9n89c", SessionId: ydb://session/3?node_id=1&id=ZDE0OWUwMTAtZWZkYmFhYTEtNTFmNjU4NTUtZjExMDY0ZTI=, Slow query, duration: 10.389310s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "pragma TablePathPrefix = \"/Root/test/ds/\";\n\n-- NB: Subquerys\n-- start query 1 in stream 0 using template query34.tpl and seed 1971067816\nselect c_last_name\n ,c_first_name\n ,c_salutation\n ,c_preferred_cust_flag\n ,ss_ticket_number\n ,cnt from\n (select store_sales.ss_ticket_number ss_ticket_number\n ,store_sales.ss_customer_sk ss_customer_sk\n ,count(*) cnt\n from store_sales as store_sales \n cross join date_dim as date_dim\n cross join store as store\n cross join household_demographics as household_demographics\n where store_sales.ss_sold_date_sk = date_dim.d_date_sk\n and store_sales.ss_store_sk = store.s_store_sk\n and store_sales.ss_hdemo_sk = household_demographics.hd_demo_sk\n and (date_dim.d_dom between 1 and 3 or date_dim.d_dom between 25 and 28)\n and (household_demographics.hd_buy_potential = '>10000' or\n household_demographics.hd_buy_potential = 'Unknown')\n and household_demographics.hd_vehicle_count > 0\n and (case when household_demographics.hd_vehicle_count > 0\n\t then household_demographics.hd_dep_count/ household_demographics.hd_vehicle_count\n\t else null\n\t end) > 1.2\n and date_dim.d_year in (2000,2000+1,2000+2)\n and store.s_county in ('Salem County','Terrell County','Arthur County','Oglethorpe County',\n 'Lunenburg County','Perry County','Halifax County','Sumner County')\n group by store_sales.ss_ticket_number,store_sales.ss_customer_sk) dn \n cross join customer as customer\n where ss_customer_sk = c_customer_sk\n and cnt between 15 and 20\n order by c_last_name,c_first_name,c_salutation,c_preferred_cust_flag desc, ss_ticket_number;\n\n-- end query 1 in stream 0 using template query34.tpl", parameters: 0b ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTests::CreateTableShouldFailOnBeforeEpochTTL [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:51:06.568257Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:51:06.568389Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:06.568428Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:51:06.568459Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:51:06.568495Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:51:06.568521Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:51:06.568572Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:06.568632Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:51:06.569352Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:51:06.569717Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:51:06.645671Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:51:06.645726Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:51:06.660575Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:51:06.660898Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:51:06.661070Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:51:06.665925Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:51:06.666187Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:51:06.666779Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:06.667032Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:51:06.669825Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:06.669977Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:51:06.671000Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:51:06.671049Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:06.671166Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:51:06.671211Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:51:06.671246Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:51:06.671323Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:51:06.677042Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:51:06.794002Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:51:06.794232Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:06.794419Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:51:06.794456Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:51:06.794651Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:51:06.794731Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:51:06.796891Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:06.797067Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:51:06.797217Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:06.797265Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:51:06.797327Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:51:06.797366Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:51:06.799079Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:06.799127Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:51:06.799216Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:51:06.800498Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:06.800539Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:06.800594Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:06.800672Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:51:06.807367Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:51:06.809134Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:51:06.809266Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:51:06.810188Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:06.810333Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:51:06.810391Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:06.810627Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:51:06.810676Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:06.810837Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:51:06.810907Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:51:06.812866Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:51:06.812905Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:51:06.813078Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:06.813126Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 1, path id: 1 2025-06-25T14:51:06.813400Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:06.813438Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 1:0 ProgressState 2025-06-25T14:51:06.813517Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1:0 progress is 1/1 2025-06-25T14:51:06.813549Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-25T14:51:06.813587Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1:0 progress is 1/1 2025-06-25T14:51:06.813624Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-25T14:51:06.813657Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 1, ready parts: 1/1, is published: false 2025-06-25T14:51:06.813694Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-25T14:51:06.813729Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1:0 2025-06-25T14:51:06.813761Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 1:0 2025-06-25T14:51:06.813817Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-25T14:51:06.813849Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 1, publications: 1, subscribers: 0 2025-06-25T14:51:06.813914Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 1, [OwnerId: 72057594046678944, LocalPathId: 1], 3 2025-06-25T14:51:06.815502Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-06-25T14:51:06.815595Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-06-25T14:51:06.815630Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1 2025-06-25T14:51:06.815665Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 3 2025-06-25T14:51:06.815698Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:51:06.815773Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1, subscribers: 0 2025-06-25T14:51:06.818304Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1 2025-06-25T14:51:06.818712Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1, at schemeshard: 72057594046678944 WARNING: All log messages before y_absl::InitializeLog() is called are written to STDERR W0000 00:00:1750863066.819610 450915 text_format.cc:398] Warning parsing text-format NKikimrSchemeOp.TTableDescription: 9:35: text format contains deprecated field "ExpireAfterSeconds" TestModificationResults wait txId: 101 2025-06-25T14:51:06.819950Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:434: actor# [1:274:2263] Bootstrap 2025-06-25T14:51:06.837819Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:453: actor# [1:274:2263] Become StateWork (SchemeCache [1:279:2268]) 2025-06-25T14:51:06.840159Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateTable CreateTable { Name: "TTLEnabledTable" Columns { Name: "key" Type: "Uint64" } Columns { Name: "modified_at" Type: "Timestamp" } KeyColumnNames: "key" TTLSettings { Enabled { ColumnName: "modified_at" ExpireAfterSeconds: 3153600000 Tiers { ApplyAfterSeconds: 3153600000 Delete { } } } } } } TxId: 101 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:51:06.840528Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_table.cpp:423: TCreateTable Propose, path: /MyRoot/TTLEnabledTable, opId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:51:06.840649Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_table.cpp:430: TCreateTable Propose, path: /MyRoot/TTLEnabledTable, opId: 101:0, schema: Name: "TTLEnabledTable" Columns { Name: "key" Type: "Uint64" } Columns { Name: "modified_at" Type: "Timestamp" } KeyColumnNames: "key" TTLSettings { Enabled { ColumnName: "modified_at" ExpireAfterSeconds: 3153600000 Tiers { ApplyAfterSeconds: 3153600000 Delete { } } } }, at schemeshard: 72057594046678944 2025-06-25T14:51:06.841025Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 101:1, propose status:StatusSchemeError, reason: TTL should be less than 1750863066 seconds (20264 days, 55 years). The ttl behaviour is undefined before 1970., at schemeshard: 72057594046678944 2025-06-25T14:51:06.842055Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:274:2263] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-25T14:51:06.844553Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 101, response: Status: StatusSchemeError Reason: "TTL should be less than 1750863066 seconds (20264 days, 55 years). The ttl behaviour is undefined before 1970." TxId: 101 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:51:06.844787Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 101, database: /MyRoot, subject: , status: StatusSchemeError, reason: TTL should be less than 1750863066 seconds (20264 days, 55 years). The ttl behaviour is undefined before 1970., operation: CREATE TABLE, path: /MyRoot/TTLEnabledTable 2025-06-25T14:51:06.845221Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 TestModificationResult got TxId: 101, wait until txId: 101 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSystemView::NodesRange1 [GOOD] Test command err: Trying to start YDB, gRPC: 15141, MsgBus: 16489 2025-06-25T14:50:46.684904Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899523118492851:2213];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:50:46.684979Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:50:46.764781Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519899525053650262:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:50:46.764825Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:50:46.921809Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519899524089544706:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:50:46.921878Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:50:47.033867Z node 5 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[5:7519899529441545324:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:50:47.033901Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:50:47.416882Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519899524150571904:2185];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:50:47.917452Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001a65/r3tmp/tmpN6jnzk/pdisk_1.dat 2025-06-25T14:50:48.110635Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:50:48.415242Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:50:48.418670Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:50:48.453712Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:50:48.477985Z node 5 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:50:48.530027Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:50:48.499419Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:50:48.599445Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:50:48.620151Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:50:49.284729Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:50:49.474207Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:50:49.474307Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:50:49.548640Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:50:49.596707Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:50:49.632996Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:50:49.643022Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:50:49.643104Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:50:49.643610Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:50:49.643659Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:50:49.643736Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:50:49.643764Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:50:49.643933Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:50:49.643966Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:50:49.646252Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:50:49.656297Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 5 Cookie 5 2025-06-25T14:50:49.656340Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 4 Cookie 4 2025-06-25T14:50:49.669602Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T14:50:49.717079Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 3 Cookie 3 2025-06-25T14:50:49.717224Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:50:49.717453Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:50:49.717552Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:50:49.717633Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:50:49.717929Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:50:49.718277Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:50:50.272941Z node 1 :BS_CONTROLLER ERROR: {BSC07@impl.h:2206} ProcessControllerEvent event processing took too much time Type# 268637706 Duration# 0.121100s 2025-06-25T14:50:50.273088Z node 1 :BS_CONTROLLER ERROR: {BSC00@bsc.cpp:705} StateWork event processing took too much time Type# 2146435078 Duration# 0.121266s 2025-06-25T14:50:50.312359Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; TServer::EnableGrpc on GrpcPort 15141, node 1 2025-06-25T14:50:51.048913Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:50:51.048932Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:50:51.048939Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:50:51.049094Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:50:51.634522Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519899523118492851:2213];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:50:51.635894Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:50:51.784425Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519899525053650262:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:50:51.792454Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:50:51.924476Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519899524089544706:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:50:51.924557Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:50:51.968584Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519899524150571904:2185];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:50:51.968631Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:50:52.034414Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[5:7519899529441545324:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:50:52.034472Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; TClient is connected to server localhost:16489 TClient is connected to server localhost:16489 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:50:55.235114Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:50:55.468674Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:56.461263Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:57.516936Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:58.151985Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:51:00.556120Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899583248036797:2329], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:00.556259Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:00.874200Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:51:00.939262Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:51:00.993847Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:51:01.041463Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:51:01.088390Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:51:01.166383Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:51:01.203029Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:51:01.258846Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899587543004863:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:01.258915Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899587543004868:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:01.258922Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:01.262066Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:51:01.279530Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899587543004870:2370], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:51:01.383701Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899587543004947:4172] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:51:02.786539Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863062758, txId: 281474976710672] shutting down ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTests::TtlTiersValidation [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:51:06.115954Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:51:06.116025Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:06.116061Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:51:06.116098Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:51:06.116131Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:51:06.116153Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:51:06.116200Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:06.116263Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:51:06.116887Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:51:06.117157Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:51:06.187290Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:51:06.187360Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:51:06.201679Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:51:06.202099Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:51:06.202282Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:51:06.207739Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:51:06.208035Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:51:06.208790Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:06.209082Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:51:06.212281Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:06.212495Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:51:06.213793Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:51:06.213854Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:06.214003Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:51:06.214070Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:51:06.214115Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:51:06.214208Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:51:06.221549Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:51:06.347568Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:51:06.347806Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:06.347980Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:51:06.348027Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:51:06.348243Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:51:06.348359Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:51:06.351080Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:06.351318Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:51:06.351528Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:06.351590Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:51:06.351654Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:51:06.351693Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:51:06.354836Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:06.354958Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:51:06.355017Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:51:06.357038Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:06.357100Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:06.357169Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:06.357228Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:51:06.360834Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:51:06.362469Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:51:06.362623Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:51:06.363514Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:06.363639Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:51:06.363694Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:06.363956Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:51:06.364010Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:06.364162Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:51:06.364250Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:51:06.366827Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:51:06.366887Z node 1 :FLAT_TX_SCHEMESHARD ... 2025-06-25T14:51:06.615287Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-25T14:51:06.616206Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 101 2025-06-25T14:51:06.616297Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 101 2025-06-25T14:51:06.616343Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 101 2025-06-25T14:51:06.616377Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 3 2025-06-25T14:51:06.616407Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-25T14:51:06.616464Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 101, ready parts: 0/1, is published: true 2025-06-25T14:51:06.617110Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6369: Handle TEvProposeTransactionResult, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 101 Step: 5000002 OrderId: 101 ExecLatency: 0 ProposeLatency: 3 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 1245 } } CommitVersion { Step: 5000002 TxId: 101 } 2025-06-25T14:51:06.617161Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1791: TOperation FindRelatedPartByTabletId, TxId: 101, tablet: 72075186233409546, partId: 0 2025-06-25T14:51:06.617292Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:632: TTxOperationReply execute, operationId: 101:0, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 101 Step: 5000002 OrderId: 101 ExecLatency: 0 ProposeLatency: 3 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 1245 } } CommitVersion { Step: 5000002 TxId: 101 } 2025-06-25T14:51:06.617383Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_part.cpp:109: HandleReply TEvDataShard::TEvProposeTransactionResult Ignore message: tablet# 72057594046678944, ev# TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 101 Step: 5000002 OrderId: 101 ExecLatency: 0 ProposeLatency: 3 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 1245 } } CommitVersion { Step: 5000002 TxId: 101 } 2025-06-25T14:51:06.617876Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5596: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 313 RawX2: 4294969594 } Origin: 72075186233409546 State: 2 TxId: 101 Step: 0 Generation: 2 2025-06-25T14:51:06.617917Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1791: TOperation FindRelatedPartByTabletId, TxId: 101, tablet: 72075186233409546, partId: 0 2025-06-25T14:51:06.618038Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:632: TTxOperationReply execute, operationId: 101:0, at schemeshard: 72057594046678944, message: Source { RawX1: 313 RawX2: 4294969594 } Origin: 72075186233409546 State: 2 TxId: 101 Step: 0 Generation: 2 2025-06-25T14:51:06.618093Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1047: NTableState::TProposedWaitParts operationId# 101:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 2025-06-25T14:51:06.618194Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1051: NTableState::TProposedWaitParts operationId# 101:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 313 RawX2: 4294969594 } Origin: 72075186233409546 State: 2 TxId: 101 Step: 0 Generation: 2 2025-06-25T14:51:06.618256Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:670: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 101:0, shardIdx: 72057594046678944:1, shard: 72075186233409546, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:06.618302Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:707: all shard schema changes has been received, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:51:06.618343Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:719: send schema changes ack message, operation: 101:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-06-25T14:51:06.618379Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 101:0 129 -> 240 2025-06-25T14:51:06.621161Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-25T14:51:06.622953Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-25T14:51:06.623050Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:51:06.623145Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:51:06.623461Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:51:06.623503Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 101:0 ProgressState 2025-06-25T14:51:06.623588Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#101:0 progress is 1/1 2025-06-25T14:51:06.623624Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-25T14:51:06.623677Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#101:0 progress is 1/1 2025-06-25T14:51:06.623715Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-25T14:51:06.623749Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 101, ready parts: 1/1, is published: true 2025-06-25T14:51:06.623810Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1656: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:340:2317] message: TxId: 101 2025-06-25T14:51:06.623853Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-25T14:51:06.623888Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 101:0 2025-06-25T14:51:06.623925Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 101:0 2025-06-25T14:51:06.624073Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-25T14:51:06.625802Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-25T14:51:06.625850Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:341:2318] TestWaitNotification: OK eventTxId 101 TestModificationResults wait txId: 102 2025-06-25T14:51:06.628964Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpAlterTable AlterTable { Name: "TTLEnabledTable" TTLSettings { Enabled { ColumnName: "modified_at" Tiers { ApplyAfterSeconds: 3600 Delete { } } Tiers { ApplyAfterSeconds: 7200 Delete { } } } } } } TxId: 102 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:51:06.629188Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_table.cpp:506: TAlterTable Propose, path: /MyRoot/TTLEnabledTable, pathId: , opId: 102:0, at schemeshard: 72057594046678944 2025-06-25T14:51:06.629516Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 102:1, propose status:StatusInvalidParameter, reason: Tier 0: only the last tier in TTL settings can have Delete action, at schemeshard: 72057594046678944 2025-06-25T14:51:06.631695Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 102, response: Status: StatusInvalidParameter Reason: "Tier 0: only the last tier in TTL settings can have Delete action" TxId: 102 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:51:06.631916Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 102, database: /MyRoot, subject: , status: StatusInvalidParameter, reason: Tier 0: only the last tier in TTL settings can have Delete action, operation: ALTER TABLE, path: /MyRoot/TTLEnabledTable TestModificationResult got TxId: 102, wait until txId: 102 TestModificationResults wait txId: 103 2025-06-25T14:51:06.634850Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpAlterTable AlterTable { Name: "TTLEnabledTable" TTLSettings { Enabled { ColumnName: "modified_at" Tiers { ApplyAfterSeconds: 3600 EvictToExternalStorage { Storage: "/Root/abc" } } Tiers { ApplyAfterSeconds: 7200 Delete { } } } } } } TxId: 103 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:51:06.635111Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_table.cpp:506: TAlterTable Propose, path: /MyRoot/TTLEnabledTable, pathId: , opId: 103:0, at schemeshard: 72057594046678944 2025-06-25T14:51:06.635426Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 103:1, propose status:StatusInvalidParameter, reason: Only DELETE via TTL is allowed for row-oriented tables, at schemeshard: 72057594046678944 2025-06-25T14:51:06.637526Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 103, response: Status: StatusInvalidParameter Reason: "Only DELETE via TTL is allowed for row-oriented tables" TxId: 103 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:51:06.637723Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 103, database: /MyRoot, subject: , status: StatusInvalidParameter, reason: Only DELETE via TTL is allowed for row-oriented tables, operation: ALTER TABLE, path: /MyRoot/TTLEnabledTable TestModificationResult got TxId: 103, wait until txId: 103 |86.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardColumnTableTTL::CreateColumnTableNegative_UnknownColumn [GOOD] >> TSchemeShardTTLTests::CreateTableShouldSucceedAsyncOnIndexedTable [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTests::AlterTableShouldSucceedOnAsyncIndexedTable [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:51:06.179356Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:51:06.179432Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:06.179464Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:51:06.179497Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:51:06.179532Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:51:06.179560Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:51:06.179600Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:06.179665Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:51:06.182965Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:51:06.183330Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:51:06.254349Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:51:06.254412Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:51:06.278205Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:51:06.278561Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:51:06.278714Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:51:06.283364Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:51:06.283596Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:51:06.284193Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:06.284480Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:51:06.287192Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:06.287342Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:51:06.288404Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:51:06.288455Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:06.288604Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:51:06.288650Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:51:06.288685Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:51:06.288757Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:51:06.294240Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:51:06.397106Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:51:06.397341Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:06.397503Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:51:06.397543Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:51:06.397737Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:51:06.397810Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:51:06.399692Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:06.399867Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:51:06.400006Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:06.400050Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:51:06.400109Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:51:06.400142Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:51:06.401772Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:06.401856Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:51:06.401898Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:51:06.403212Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:06.403263Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:06.403323Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:06.403369Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:51:06.406537Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:51:06.408011Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:51:06.408172Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:51:06.409176Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:06.409292Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:51:06.409344Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:06.409611Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:51:06.409659Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:06.409819Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:51:06.409886Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:51:06.411572Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:51:06.411610Z node 1 :FLAT_TX_SCHEMESHARD ... 5-06-25T14:51:06.737496Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1086: NTableState::TProposedWaitParts operationId# 102:0 ProgressState at tablet: 72057594046678944 2025-06-25T14:51:06.738499Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 4 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T14:51:06.738590Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 4 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T14:51:06.738638Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 102 2025-06-25T14:51:06.738680Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 4 2025-06-25T14:51:06.738717Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-06-25T14:51:06.738793Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 102, ready parts: 0/1, is published: true 2025-06-25T14:51:06.743551Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-25T14:51:06.769595Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6369: Handle TEvProposeTransactionResult, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409547 Status: COMPLETE TxId: 102 Step: 5000003 OrderId: 102 ExecLatency: 0 ProposeLatency: 4 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409547 CpuTimeUsec: 995 } } CommitVersion { Step: 5000003 TxId: 102 } 2025-06-25T14:51:06.769657Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1791: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409547, partId: 0 2025-06-25T14:51:06.769795Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:632: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409547 Status: COMPLETE TxId: 102 Step: 5000003 OrderId: 102 ExecLatency: 0 ProposeLatency: 4 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409547 CpuTimeUsec: 995 } } CommitVersion { Step: 5000003 TxId: 102 } 2025-06-25T14:51:06.769899Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_part.cpp:109: HandleReply TEvDataShard::TEvProposeTransactionResult Ignore message: tablet# 72057594046678944, ev# TxKind: TX_KIND_SCHEME Origin: 72075186233409547 Status: COMPLETE TxId: 102 Step: 5000003 OrderId: 102 ExecLatency: 0 ProposeLatency: 4 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409547 CpuTimeUsec: 995 } } CommitVersion { Step: 5000003 TxId: 102 } FAKE_COORDINATOR: Erasing txId 102 2025-06-25T14:51:06.770893Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5596: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 332 RawX2: 4294969609 } Origin: 72075186233409547 State: 2 TxId: 102 Step: 0 Generation: 2 2025-06-25T14:51:06.770937Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1791: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409547, partId: 0 2025-06-25T14:51:06.771047Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:632: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: Source { RawX1: 332 RawX2: 4294969609 } Origin: 72075186233409547 State: 2 TxId: 102 Step: 0 Generation: 2 2025-06-25T14:51:06.771095Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1047: NTableState::TProposedWaitParts operationId# 102:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 2025-06-25T14:51:06.771179Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1051: NTableState::TProposedWaitParts operationId# 102:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 332 RawX2: 4294969609 } Origin: 72075186233409547 State: 2 TxId: 102 Step: 0 Generation: 2 2025-06-25T14:51:06.771254Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:670: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 102:0, shardIdx: 72057594046678944:1, shard: 72075186233409547, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:06.771291Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:707: all shard schema changes has been received, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-25T14:51:06.771324Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:719: send schema changes ack message, operation: 102:0, datashard: 72075186233409547, at schemeshard: 72057594046678944 2025-06-25T14:51:06.771370Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 102:0 129 -> 240 2025-06-25T14:51:06.773915Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-25T14:51:06.774291Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-25T14:51:06.774553Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-25T14:51:06.774593Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 102:0 ProgressState 2025-06-25T14:51:06.774693Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:0 progress is 1/1 2025-06-25T14:51:06.774726Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-25T14:51:06.774762Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:0 progress is 1/1 2025-06-25T14:51:06.774789Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-25T14:51:06.774834Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: true 2025-06-25T14:51:06.774893Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1656: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:385:2351] message: TxId: 102 2025-06-25T14:51:06.774937Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-25T14:51:06.774977Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 102:0 2025-06-25T14:51:06.775011Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 102:0 2025-06-25T14:51:06.775126Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-25T14:51:06.776629Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-25T14:51:06.776684Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:515:2438] TestWaitNotification: OK eventTxId 102 2025-06-25T14:51:06.777479Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/TTLEnabledTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:51:06.777729Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/TTLEnabledTable" took 262us result status StatusSuccess 2025-06-25T14:51:06.778273Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/TTLEnabledTable" PathDescription { Self { Name: "TTLEnabledTable" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: true } Table { Name: "TTLEnabledTable" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "modified_at" Type: "Timestamp" TypeId: 50 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableIndexes { Name: "UserDefinedIndexByExpireAt" LocalPathId: 3 Type: EIndexTypeGlobalAsync State: EIndexStateReady KeyColumnNames: "modified_at" SchemaVersion: 1 PathOwnerId: 72057594046678944 DataSize: 0 IndexImplTableDescriptions { } } TableSchemaVersion: 2 TTLSettings { Enabled { ColumnName: "modified_at" ExpireAfterSeconds: 3600 Tiers { ApplyAfterSeconds: 3600 Delete { } } } } IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TSchemeShardTTLTests::BuildIndexShouldSucceed [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardColumnTableTTL::CreateColumnTableNegative_UnknownColumn [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:51:06.993416Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:51:06.993491Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:06.993528Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:51:06.993559Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:51:06.993593Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:51:06.993622Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:51:06.993666Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:06.993727Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:51:06.994429Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:51:06.994753Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:51:07.070650Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:51:07.070703Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:51:07.088708Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:51:07.089096Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:51:07.089273Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:51:07.094838Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:51:07.095150Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:51:07.095796Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:07.096089Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:51:07.099251Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:07.099429Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:51:07.100603Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:51:07.100660Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:07.100788Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:51:07.100835Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:51:07.100885Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:51:07.100966Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:51:07.107313Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:51:07.237095Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:51:07.237312Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:07.237490Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:51:07.237532Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:51:07.237751Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:51:07.237827Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:51:07.239751Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:07.239939Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:51:07.240111Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:07.240188Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:51:07.240247Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:51:07.240291Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:51:07.242125Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:07.242220Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:51:07.242260Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:51:07.243925Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:07.243990Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:07.244058Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:07.244104Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:51:07.254349Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:51:07.256533Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:51:07.256699Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:51:07.257699Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:07.257835Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:51:07.257897Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:07.258160Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:51:07.258220Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:07.258378Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:51:07.258466Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:51:07.260585Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:51:07.260645Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:51:07.260834Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:07.260883Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 1, path id: 1 2025-06-25T14:51:07.261212Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:07.261257Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 1:0 ProgressState 2025-06-25T14:51:07.261357Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1:0 progress is 1/1 2025-06-25T14:51:07.261388Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-25T14:51:07.261424Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1:0 progress is 1/1 2025-06-25T14:51:07.261475Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-25T14:51:07.261512Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 1, ready parts: 1/1, is published: false 2025-06-25T14:51:07.261560Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-25T14:51:07.261603Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1:0 2025-06-25T14:51:07.261634Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 1:0 2025-06-25T14:51:07.261694Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-25T14:51:07.261729Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 1, publications: 1, subscribers: 0 2025-06-25T14:51:07.261759Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 1, [OwnerId: 72057594046678944, LocalPathId: 1], 3 2025-06-25T14:51:07.263846Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-06-25T14:51:07.263950Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-06-25T14:51:07.263982Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1 2025-06-25T14:51:07.264016Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 3 2025-06-25T14:51:07.264053Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:51:07.264150Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1, subscribers: 0 2025-06-25T14:51:07.267070Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1 2025-06-25T14:51:07.267595Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1, at schemeshard: 72057594046678944 TestModificationResults wait txId: 101 2025-06-25T14:51:07.268937Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:434: actor# [1:274:2263] Bootstrap 2025-06-25T14:51:07.289187Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:453: actor# [1:274:2263] Become StateWork (SchemeCache [1:279:2268]) 2025-06-25T14:51:07.291819Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateColumnTable CreateColumnTable { Name: "TTLEnabledTable" Schema { Columns { Name: "key" Type: "Uint64" NotNull: true } Columns { Name: "modified_at" Type: "Timestamp" } KeyColumnNames: "key" } TtlSettings { Enabled { ColumnName: "created_at" } } } } TxId: 101 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:51:07.292145Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: create_table.cpp:593: TCreateColumnTable Propose, path: /MyRoot/TTLEnabledTable, opId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:51:07.292558Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 101:1, propose status:StatusSchemeError, reason: Incorrect ttl column - not found in scheme, at schemeshard: 72057594046678944 2025-06-25T14:51:07.293588Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:274:2263] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-25T14:51:07.295856Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 101, response: Status: StatusSchemeError Reason: "Incorrect ttl column - not found in scheme" TxId: 101 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:51:07.296054Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 101, database: /MyRoot, subject: , status: StatusSchemeError, reason: Incorrect ttl column - not found in scheme, operation: CREATE COLUMN TABLE, path: /MyRoot/ 2025-06-25T14:51:07.296460Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 TestModificationResult got TxId: 101, wait until txId: 101 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTests::CreateTableShouldSucceedAsyncOnIndexedTable [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:51:06.913976Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:51:06.914063Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:06.914101Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:51:06.914135Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:51:06.914177Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:51:06.914206Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:51:06.914258Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:06.914322Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:51:06.915090Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:51:06.915447Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:51:06.993762Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:51:06.993829Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:51:07.023585Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:51:07.024030Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:51:07.024232Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:51:07.037329Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:51:07.037786Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:51:07.038419Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:07.038742Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:51:07.053121Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:07.053335Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:51:07.054652Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:51:07.054719Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:07.054865Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:51:07.054918Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:51:07.054962Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:51:07.055072Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:51:07.070879Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:51:07.220682Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:51:07.220903Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:07.221081Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:51:07.221157Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:51:07.221364Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:51:07.221438Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:51:07.224039Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:07.224237Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:51:07.224427Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:07.224484Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:51:07.224559Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:51:07.224604Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:51:07.228788Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:07.228889Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:51:07.229069Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:51:07.230922Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:07.230998Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:07.231066Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:07.231121Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:51:07.234572Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:51:07.243380Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:51:07.243571Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:51:07.244650Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:07.244805Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:51:07.244866Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:07.245125Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:51:07.245177Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:07.245343Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:51:07.245433Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:51:07.248024Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:51:07.248082Z node 1 :FLAT_TX_SCHEMESHARD ... node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 101:2 129 -> 240 2025-06-25T14:51:07.512656Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5596: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 332 RawX2: 4294969609 } Origin: 72075186233409547 State: 2 TxId: 101 Step: 0 Generation: 2 2025-06-25T14:51:07.512685Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1791: TOperation FindRelatedPartByTabletId, TxId: 101, tablet: 72075186233409547, partId: 0 2025-06-25T14:51:07.512767Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:632: TTxOperationReply execute, operationId: 101:0, at schemeshard: 72057594046678944, message: Source { RawX1: 332 RawX2: 4294969609 } Origin: 72075186233409547 State: 2 TxId: 101 Step: 0 Generation: 2 2025-06-25T14:51:07.512799Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1047: NTableState::TProposedWaitParts operationId# 101:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 2025-06-25T14:51:07.512848Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1051: NTableState::TProposedWaitParts operationId# 101:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 332 RawX2: 4294969609 } Origin: 72075186233409547 State: 2 TxId: 101 Step: 0 Generation: 2 2025-06-25T14:51:07.512894Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:670: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 101:0, shardIdx: 72057594046678944:1, shard: 72075186233409547, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:07.512921Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:707: all shard schema changes has been received, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:51:07.512948Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:719: send schema changes ack message, operation: 101:0, datashard: 72075186233409547, at schemeshard: 72057594046678944 2025-06-25T14:51:07.512976Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 101:0 129 -> 240 2025-06-25T14:51:07.517672Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-25T14:51:07.517863Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-25T14:51:07.517938Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-25T14:51:07.528090Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-25T14:51:07.528347Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 101:2, at schemeshard: 72057594046678944 2025-06-25T14:51:07.528704Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:51:07.529023Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 101:2, at schemeshard: 72057594046678944 2025-06-25T14:51:07.529214Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:51:07.529512Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 101:2, at schemeshard: 72057594046678944 2025-06-25T14:51:07.529543Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 101:2 ProgressState 2025-06-25T14:51:07.529619Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#101:2 progress is 2/3 2025-06-25T14:51:07.529646Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 101 ready parts: 2/3 2025-06-25T14:51:07.529677Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#101:2 progress is 2/3 2025-06-25T14:51:07.529707Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 101 ready parts: 2/3 2025-06-25T14:51:07.529734Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 101, ready parts: 2/3, is published: true 2025-06-25T14:51:07.530087Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:51:07.530112Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 101:0 ProgressState 2025-06-25T14:51:07.530169Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#101:0 progress is 3/3 2025-06-25T14:51:07.530186Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 101 ready parts: 3/3 2025-06-25T14:51:07.530213Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#101:0 progress is 3/3 2025-06-25T14:51:07.530229Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 101 ready parts: 3/3 2025-06-25T14:51:07.530244Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 101, ready parts: 3/3, is published: true 2025-06-25T14:51:07.530298Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1656: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:385:2351] message: TxId: 101 2025-06-25T14:51:07.530335Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 101 ready parts: 3/3 2025-06-25T14:51:07.530366Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 101:0 2025-06-25T14:51:07.530392Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 101:0 2025-06-25T14:51:07.530496Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-25T14:51:07.530527Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 101:1 2025-06-25T14:51:07.530540Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 101:1 2025-06-25T14:51:07.530566Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-25T14:51:07.530579Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 101:2 2025-06-25T14:51:07.530599Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 101:2 2025-06-25T14:51:07.530627Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 2025-06-25T14:51:07.533254Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-25T14:51:07.533304Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:386:2352] TestWaitNotification: OK eventTxId 101 2025-06-25T14:51:07.533807Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/TTLEnabledTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:51:07.534080Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/TTLEnabledTable" took 257us result status StatusSuccess 2025-06-25T14:51:07.534622Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/TTLEnabledTable" PathDescription { Self { Name: "TTLEnabledTable" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: true } Table { Name: "TTLEnabledTable" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "modified_at" Type: "Timestamp" TypeId: 50 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableIndexes { Name: "UserDefinedIndexByExpireAt" LocalPathId: 3 Type: EIndexTypeGlobalAsync State: EIndexStateReady KeyColumnNames: "modified_at" SchemaVersion: 1 PathOwnerId: 72057594046678944 DataSize: 0 IndexImplTableDescriptions { } } TableSchemaVersion: 1 TTLSettings { Enabled { ColumnName: "modified_at" ExpireAfterSeconds: 3600 Tiers { ApplyAfterSeconds: 3600 Delete { } } } } IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TSchemeShardTTLTests::CheckCounters >> DataShardWrite::PreparedDistributedWritePageFault [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::TPCDS94-ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 3109, MsgBus: 23859 2025-06-25T14:49:58.760765Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899319020373855:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:49:58.764762Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d42/r3tmp/tmpdupIKU/pdisk_1.dat 2025-06-25T14:49:59.411729Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:49:59.416451Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:49:59.484057Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:49:59.512282Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 3109, node 1 2025-06-25T14:49:59.704625Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:49:59.704646Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:49:59.704652Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:49:59.704741Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:49:59.819214Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:23859 TClient is connected to server localhost:23859 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:50:00.892211Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:50:00.957033Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:50:03.094623Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899340495210941:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:03.094740Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:03.095010Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899340495210953:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:03.098718Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:50:03.118477Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899340495210955:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:50:03.183631Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899340495211006:2338] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:50:03.451698Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:03.575211Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:03.608984Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:03.667345Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:03.704999Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:03.768505Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519899319020373855:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:50:03.768559Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:50:03.864688Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:03.898434Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:03.936349Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:03.969036Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:04.023868Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:04.071027Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:04.115040Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:04.155712Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:04.907260Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemesh ... 32482Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038533;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:43.032979Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038553;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:43.033016Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038527;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:43.037189Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038553;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:43.037303Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038527;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:43.037695Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038548;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:43.038119Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038521;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:43.042356Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038548;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:43.042835Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038545;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:43.042853Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038521;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:43.043353Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038523;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:43.047943Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038523;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:43.048673Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038519;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:43.051265Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038545;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:43.051736Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038547;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:43.053136Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038519;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:43.053612Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038505;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:43.055622Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038547;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:43.056109Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038541;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:43.058382Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038505;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:43.059070Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038566;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:43.060610Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038541;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:43.061083Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038503;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:43.063296Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038566;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:43.063770Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038591;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:43.065380Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038503;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:43.065883Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038561;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:43.068294Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038591;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:43.069265Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038539;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:43.070061Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038561;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:43.070645Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038557;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:43.074509Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038539;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:43.075180Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038549;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:43.080989Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038557;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:43.081610Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038525;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:43.089645Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038525;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:43.090433Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038555;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:43.090789Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038549;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:43.095586Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038555;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:43.138922Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038616;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=67;result=not_found; 2025-06-25T14:50:43.147445Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038616;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:43.176195Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyks2kbd5r2qtd63656k3hrk", SessionId: ydb://session/3?node_id=1&id=NDBiNWE0ZGMtMTU5MTEyZi1hNzMwMjNhMy1iNWFiM2I0YQ==, Slow query, duration: 36.090391s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:50:43.713372Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038170;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:50:43.713861Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038629;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:50:43.714182Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;self_id=[1:7519899422099605826:4217];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038331;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038629;receive=72075186224038170; 2025-06-25T14:50:43.715772Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038331;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; >> KqpJoinOrder::TestJoinHint1+ColumnStore [GOOD] >> TSchemeShardTTLTests::AlterTableShouldFailOnSimultaneousDropColumnAndEnableTTL >> TSchemeShardTTLTests::CreateTableShouldFailOnWrongColumnType [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::TPCDS88+ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 18141, MsgBus: 31523 2025-06-25T14:47:56.567845Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898795489276908:2220];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:47:56.572446Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000db9/r3tmp/tmptc3h1u/pdisk_1.dat 2025-06-25T14:47:57.218744Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:47:57.218861Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:47:57.225248Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:47:57.233371Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:47:57.234623Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519898795489276726:2080] 1750862876500825 != 1750862876500828 TServer::EnableGrpc on GrpcPort 18141, node 1 2025-06-25T14:47:57.503744Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:47:57.571604Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:47:57.571623Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:47:57.571629Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:47:57.571754Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:31523 TClient is connected to server localhost:31523 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:47:58.425846Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:48:01.025439Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898816964113853:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:01.025579Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:01.026043Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898816964113865:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:01.030204Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:48:01.055898Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898816964113867:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:48:01.145824Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898816964113918:2338] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:48:01.521230Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T14:48:01.532506Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519898795489276908:2220];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:48:01.532566Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:48:01.758681Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519898816964114116:2315];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:48:01.758864Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519898816964114116:2315];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:48:01.759098Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519898816964114116:2315];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:48:01.759189Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519898816964114116:2315];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:48:01.759303Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519898816964114116:2315];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:48:01.759400Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519898816964114116:2315];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:48:01.759485Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519898816964114116:2315];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:48:01.759573Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519898816964114116:2315];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:48:01.759672Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519898816964114116:2315];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:48:01.759785Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519898816964114116:2315];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:48:01.759878Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519898816964114116:2315];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:48:01.778104Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519898816964114118:2317];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:48:01.778153Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519898816964114118:2317];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:48:01.778329Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519898816964114118:2317];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:48:01.778432Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519898816964114118:2317];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:48:01.778569Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519898816964114118:2317];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:48:01.778665Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519898816964114118:2317];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:48:01.778781Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519898816964114118:2317];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:48:01.778873Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519898816964114118:2317];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:48:01.778995Z node 1 :TX_COLU ... gressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:32.216994Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039231;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:49:32.217910Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039243;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:32.221928Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039325;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:49:32.222462Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039201;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:32.222780Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039243;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:49:32.223264Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039313;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:32.227424Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039201;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:49:32.227846Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039313;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:49:32.228060Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039263;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:32.228441Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039213;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:32.233040Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039263;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:49:32.233553Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039213;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:49:32.233690Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039327;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:32.234052Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039245;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:49:32.238829Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039327;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:49:32.240043Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039245;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:49:32.381904Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyks0bmr2whywy7wn8zqzgev", SessionId: ydb://session/3?node_id=1&id=MzFlZTVmNGMtYjIyZWZmY2YtZWViYmUwOTQtNWQ2NjdlNjA=, Slow query, duration: 38.724583s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:49:32.937440Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; 2025-06-25T14:49:32.937878Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; 2025-06-25T14:49:32.938480Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;self_id=[1:7519899053187349693:6928];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038933;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224039392;receive=72075186224039094; 2025-06-25T14:49:32.938864Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; 2025-06-25T14:50:55.238433Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyks2s6p7meb8f4ph6d4be73", SessionId: ydb://session/3?node_id=1&id=MzFlZTVmNGMtYjIyZWZmY2YtZWViYmUwOTQtNWQ2NjdlNjA=, Slow query, duration: 42.159046s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "pragma TablePathPrefix = \"/Root/test/ds/\";\n-- NB: Subquerys\n-- start query 1 in stream 0 using template query88.tpl and seed 318176889\nselect *\nfrom\n (select count(*) h8_30_to_9\n from store_sales cross join household_demographics cross join time_dim cross join store\n where ss_sold_time_sk = time_dim.t_time_sk\n and ss_hdemo_sk = household_demographics.hd_demo_sk\n and ss_store_sk = s_store_sk\n and time_dim.t_hour = 8\n and time_dim.t_minute >= 30\n and ((household_demographics.hd_dep_count = 2 and household_demographics.hd_vehicle_count<=2+2) or\n (household_demographics.hd_dep_count = 4 and household_demographics.hd_vehicle_count<=4+2) or\n (household_demographics.hd_dep_count = 3 and household_demographics.hd_vehicle_count<=3+2))\n and store.s_store_name = 'ese') s1 cross join\n (select count(*) h9_to_9_30\n from store_sales cross join household_demographics cross join time_dim cross join store\n where ss_sold_time_sk = time_dim.t_time_sk\n and ss_hdemo_sk = household_demographics.hd_demo_sk\n and ss_store_sk = s_store_sk\n and time_dim.t_hour = 9\n and time_dim.t_minute < 30\n and ((household_demographics.hd_dep_count = 2 and household_demographics.hd_vehicle_count<=2+2) or\n (household_demographics.hd_dep_count = 4 and household_demographics.hd_vehicle_count<=4+2) or\n (household_demographics.hd_dep_count = 3 and household_demographics.hd_vehicle_count<=3+2))\n and store.s_store_name = 'ese') s2 cross join\n (select count(*) h9_30_to_10\n from store_sales cross join household_demographics cross join time_dim cross join store\n where ss_sold_time_sk = time_dim.t_time_sk\n and ss_hdemo_sk = household_demographics.hd_demo_sk\n and ss_store_sk = s_store_sk\n and time_dim.t_hour = 9\n and time_dim.t_minute >= 30\n and ((household_demographics.hd_dep_count = 2 and household_demographics.hd_vehicle_count<=2+2) or\n (household_demographics.hd_dep_count = 4 and household_demographics.hd_vehicle_count<=4+2) or\n (household_demographics.hd_dep_count = 3 and household_demographics.hd_vehicle_count<=3+2))\n and store.s_store_name = 'ese') s3 cross join\n (select count(*) h10_to_10_30\n from store_sales cross join household_demographics cross join time_dim cross join store\n where ss_sold_time_sk = time_dim.t_time_sk\n and ss_hdemo_sk = household_demographics.hd_demo_sk\n and ss_store_sk = s_store_sk\n and time_dim.t_hour = 10\n and time_dim.t_minute < 30\n and ((household_demographics.hd_dep_count = 2 and household_demographics.hd_vehicle_count<=2+2) or\n (household_demographics.hd_dep_count = 4 and household_demographics.hd_vehicle_count<=4+2) or\n (household_demographics.hd_dep_count = 3 and household_demographics.hd_vehicle_count<=3+2))\n and store.s_store_name = 'ese') s4 cross join\n (select count(*) h10_30_to_11\n from store_sales cross join household_demographics cross join time_dim cross join store\n where ss_sold_time_sk = time_dim.t_time_sk\n and ss_hdemo_sk = household_demographics.hd_demo_sk\n and ss_store_sk = s_store_sk\n and time_dim.t_hour = 10\n and time_dim.t_minute >= 30\n and ((household_demographics.hd_dep_count = 2 and household_demographics.hd_vehicle_count<=2+2) or\n (household_demographics.hd_dep_count = 4 and household_demographics.hd_vehicle_count<=4+2) or\n (household_demographics.hd_dep_count = 3 and household_demographics.hd_vehicle_count<=3+2))\n and store.s_store_name = 'ese') s5 cross join\n (select count(*) h11_to_11_30\n from store_sales cross join household_demographics cross join time_dim cross join store\n where ss_sold_time_sk = time_dim.t_time_sk\n and ss_hdemo_sk = household_demographics.hd_demo_sk\n and ss_store_sk = s_store_sk\n and time_dim.t_hour = 11\n and time_dim.t_minute < 30\n and ((household_demographics.hd_dep_count = 2 and household_demographics.hd_vehicle_count<=2+2) or\n (household_demographics.hd_dep_count = 4 and household_demographics.hd_vehicle_count<=4+2) or\n (household_demographics.hd_dep_count = 3 and household_demographics.hd_vehicle_count<=3+2))\n and store.s_store_name = 'ese') s6 cross join\n (select count(*) h11_30_to_12\n from store_sales cross join household_demographics cross join time_dim cross join store\n where ss_sold_time_sk = time_dim.t_time_sk\n and ss_hdemo_sk = household_demographics.hd_demo_sk\n and ss_store_sk = s_store_sk\n and time_dim.t_hour = 11\n and time_dim.t_minute >= 30\n and ((household_demographics.hd_dep_count = 2 and household_demographics.hd_vehicle_count<=2+2) or\n (household_demographics.hd_dep_count = 4 and household_demographics.hd_vehicle_count<=4+2) or\n (household_demographics.hd_dep_count = 3 and household_demographics.hd_vehicle_count<=3+2))\n and store.s_store_name = 'ese') s7 cross join\n (select count(*) h12_to_12_30\n from store_sales cross join household_demographics cross join time_dim cross join store\n where ss_sold_time_sk = time_dim.t_time_sk\n and ss_hdemo_sk = household_demographics.hd_demo_sk\n and ss_store_sk = s_store_sk\n and time_dim.t_hour = 12\n and time_dim.t_minute < 30\n and ((household_demographics.hd_dep_count = 2 and household_demographics.hd_vehicle_count<=2+2) or\n (household_demographics.hd_dep_count = 4 and household_demographics.hd_vehicle_count<=4+2) or\n (household_demographics.hd_dep_count = 3 and household_demographics.hd_vehicle_count<=3+2))\n and store.s_store_name = 'ese') s8\n;", parameters: 0b >> TSchemeShardTTLTests::ShouldCheckQuotas [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTests::BuildIndexShouldSucceed [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:51:06.951397Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:51:06.951489Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:06.951528Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:51:06.951565Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:51:06.951618Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:51:06.951646Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:51:06.951703Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:06.951774Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:51:06.952594Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:51:06.952951Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:51:07.035243Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:51:07.035300Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:51:07.053237Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:51:07.053630Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:51:07.053795Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:51:07.059905Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:51:07.060204Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:51:07.060906Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:07.061198Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:51:07.064729Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:07.064910Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:51:07.066100Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:51:07.066174Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:07.066307Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:51:07.066364Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:51:07.066404Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:51:07.066480Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:51:07.073049Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:51:07.214713Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:51:07.214958Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:07.215185Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:51:07.215245Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:51:07.216745Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:51:07.216857Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:51:07.222197Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:07.222404Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:51:07.222616Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:07.222673Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:51:07.222747Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:51:07.222790Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:51:07.226021Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:07.226130Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:51:07.226175Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:51:07.229254Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:07.229322Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:07.229403Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:07.229464Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:51:07.233205Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:51:07.235199Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:51:07.235403Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:51:07.236668Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:07.236804Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:51:07.236867Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:07.237158Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:51:07.237212Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:07.237392Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:51:07.237463Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:51:07.239809Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:51:07.239855Z node 1 :FLAT_TX_SCHEMESHARD ... 10760, at schemeshard: 72057594046678944 FAKE_COORDINATOR: Add transaction: 281474976710760 at step: 5000006 FAKE_COORDINATOR: advance: minStep5000006 State->FrontStep: 5000005 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710760 at step: 5000006 2025-06-25T14:51:07.913793Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000006, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:07.913872Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976710760 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000006 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:51:07.913903Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_lock.cpp:44: [72057594046678944] TDropLock TPropose opId# 281474976710760:0 HandleReply TEvOperationPlan: step# 5000006 2025-06-25T14:51:07.913935Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 281474976710760:0 128 -> 240 2025-06-25T14:51:07.915011Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 281474976710760:0, at schemeshard: 72057594046678944 2025-06-25T14:51:07.915043Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 281474976710760:0 ProgressState 2025-06-25T14:51:07.915092Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976710760:0 progress is 1/1 2025-06-25T14:51:07.915110Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976710760 ready parts: 1/1 2025-06-25T14:51:07.915131Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976710760:0 progress is 1/1 2025-06-25T14:51:07.915149Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976710760 ready parts: 1/1 2025-06-25T14:51:07.915184Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 281474976710760, ready parts: 1/1, is published: true 2025-06-25T14:51:07.915226Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1656: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:126:2150] message: TxId: 281474976710760 2025-06-25T14:51:07.915252Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976710760 ready parts: 1/1 2025-06-25T14:51:07.915279Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976710760:0 2025-06-25T14:51:07.915296Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 281474976710760:0 2025-06-25T14:51:07.915333Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 FAKE_COORDINATOR: Erasing txId 281474976710760 2025-06-25T14:51:07.916484Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6830: Handle: TEvNotifyTxCompletionResult: txId# 281474976710760 2025-06-25T14:51:07.916539Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6832: Message: TxId: 281474976710760 2025-06-25T14:51:07.916591Z node 1 :BUILD_INDEX INFO: schemeshard_build_index__progress.cpp:1930: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvNotifyTxCompletionResult, id# 102, txId# 281474976710760 2025-06-25T14:51:07.916675Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1933: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvNotifyTxCompletionResult, TIndexBuildInfo: TBuildInfo{ IndexBuildId: 102, Uid: , DomainPathId: [OwnerId: 72057594046678944, LocalPathId: 1], TablePathId: [OwnerId: 72057594046678944, LocalPathId: 2], IndexType: EIndexTypeGlobal, IndexName: UserDefinedIndexByValue, IndexColumn: value, State: Unlocking, IsBroken: 0, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [1:389:2359], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 281474976710757, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976710758, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 5000004, ApplyTxId: 281474976710759, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976710760, UnlockTxStatus: StatusAccepted, UnlockTxDone: 0, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }}, txId# 281474976710760 2025-06-25T14:51:07.917818Z node 1 :BUILD_INDEX NOTICE: schemeshard_build_index__progress.cpp:1129: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 102 Unlocking 2025-06-25T14:51:07.917883Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1130: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 102 Unlocking TBuildInfo{ IndexBuildId: 102, Uid: , DomainPathId: [OwnerId: 72057594046678944, LocalPathId: 1], TablePathId: [OwnerId: 72057594046678944, LocalPathId: 2], IndexType: EIndexTypeGlobal, IndexName: UserDefinedIndexByValue, IndexColumn: value, State: Unlocking, IsBroken: 0, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [1:389:2359], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 281474976710757, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976710758, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 5000004, ApplyTxId: 281474976710759, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976710760, UnlockTxStatus: StatusAccepted, UnlockTxDone: 1, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }} 2025-06-25T14:51:07.917926Z node 1 :BUILD_INDEX INFO: schemeshard_build_index_tx_base.cpp:24: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: Change state from Unlocking to Done 2025-06-25T14:51:07.919020Z node 1 :BUILD_INDEX NOTICE: schemeshard_build_index__progress.cpp:1129: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 102 Done 2025-06-25T14:51:07.919098Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1130: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 102 Done TBuildInfo{ IndexBuildId: 102, Uid: , DomainPathId: [OwnerId: 72057594046678944, LocalPathId: 1], TablePathId: [OwnerId: 72057594046678944, LocalPathId: 2], IndexType: EIndexTypeGlobal, IndexName: UserDefinedIndexByValue, IndexColumn: value, State: Done, IsBroken: 0, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [1:389:2359], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 281474976710757, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976710758, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 5000004, ApplyTxId: 281474976710759, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976710760, UnlockTxStatus: StatusAccepted, UnlockTxDone: 1, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }} 2025-06-25T14:51:07.919147Z node 1 :BUILD_INDEX TRACE: schemeshard_build_index_tx_base.cpp:333: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TIndexBuildInfo SendNotifications: : id# 102, subscribers count# 1 2025-06-25T14:51:07.919238Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-25T14:51:07.919273Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:480:2439] TestWaitNotification: OK eventTxId 102 2025-06-25T14:51:07.919788Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/TTLEnabledTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:51:07.920014Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/TTLEnabledTable" took 239us result status StatusSuccess 2025-06-25T14:51:07.920534Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/TTLEnabledTable" PathDescription { Self { Name: "TTLEnabledTable" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 TableSchemaVersion: 3 TablePartitionVersion: 1 } ChildrenExist: true } Table { Name: "TTLEnabledTable" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Uint64" TypeId: 4 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "modified_at" Type: "Timestamp" TypeId: 50 Id: 3 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableIndexes { Name: "UserDefinedIndexByValue" LocalPathId: 3 Type: EIndexTypeGlobal State: EIndexStateReady KeyColumnNames: "value" SchemaVersion: 2 PathOwnerId: 72057594046678944 DataSize: 0 IndexImplTableDescriptions { } } TableSchemaVersion: 3 TTLSettings { Enabled { ColumnName: "modified_at" ExpireAfterSeconds: 3600 Tiers { ApplyAfterSeconds: 3600 Delete { } } } } IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 |86.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTests::CreateTableShouldSucceed-EnableTablePgTypes-false ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTests::CreateTableShouldFailOnWrongColumnType [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:51:07.924618Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:51:07.924690Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:07.924718Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:51:07.924746Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:51:07.924785Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:51:07.924810Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:51:07.924848Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:07.924901Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:51:07.925516Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:51:07.925764Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:51:07.996117Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:51:07.996168Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:51:08.010909Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:51:08.011314Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:51:08.011517Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:51:08.017850Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:51:08.018198Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:51:08.018839Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:08.019178Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:51:08.022336Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:08.022513Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:51:08.023725Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:51:08.023780Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:08.023910Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:51:08.023963Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:51:08.024007Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:51:08.024093Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:51:08.030932Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:51:08.153754Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:51:08.153942Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:08.154088Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:51:08.154123Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:51:08.154288Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:51:08.154458Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:51:08.159889Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:08.160115Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:51:08.160302Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:08.160368Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:51:08.160431Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:51:08.160478Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:51:08.163253Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:08.163305Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:51:08.163374Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:51:08.164830Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:08.164889Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:08.164950Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:08.165012Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:51:08.167618Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:51:08.171359Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:51:08.171557Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:51:08.172565Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:08.172719Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:51:08.172782Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:08.173062Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:51:08.173121Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:08.173298Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:51:08.173395Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:51:08.175381Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:51:08.175445Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:51:08.175639Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:08.175686Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 1, path id: 1 2025-06-25T14:51:08.176031Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:08.176080Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 1:0 ProgressState 2025-06-25T14:51:08.176180Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1:0 progress is 1/1 2025-06-25T14:51:08.176217Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-25T14:51:08.176256Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1:0 progress is 1/1 2025-06-25T14:51:08.176340Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-25T14:51:08.176382Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 1, ready parts: 1/1, is published: false 2025-06-25T14:51:08.176427Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-25T14:51:08.176469Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1:0 2025-06-25T14:51:08.176506Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 1:0 2025-06-25T14:51:08.176572Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-25T14:51:08.176611Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 1, publications: 1, subscribers: 0 2025-06-25T14:51:08.176643Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 1, [OwnerId: 72057594046678944, LocalPathId: 1], 3 2025-06-25T14:51:08.178533Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-06-25T14:51:08.178644Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-06-25T14:51:08.178682Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1 2025-06-25T14:51:08.178721Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 3 2025-06-25T14:51:08.178760Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:51:08.178846Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1, subscribers: 0 2025-06-25T14:51:08.181759Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1 2025-06-25T14:51:08.182249Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1, at schemeshard: 72057594046678944 TestModificationResults wait txId: 101 2025-06-25T14:51:08.183528Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:434: actor# [1:274:2263] Bootstrap 2025-06-25T14:51:08.204539Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:453: actor# [1:274:2263] Become StateWork (SchemeCache [1:279:2268]) 2025-06-25T14:51:08.207264Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateTable CreateTable { Name: "TTLEnabledTable" Columns { Name: "key" Type: "Uint64" } Columns { Name: "modified_at" Type: "String" } KeyColumnNames: "key" TTLSettings { Enabled { ColumnName: "modified_at" } } } } TxId: 101 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:51:08.207620Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_table.cpp:423: TCreateTable Propose, path: /MyRoot/TTLEnabledTable, opId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:51:08.207732Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_table.cpp:430: TCreateTable Propose, path: /MyRoot/TTLEnabledTable, opId: 101:0, schema: Name: "TTLEnabledTable" Columns { Name: "key" Type: "Uint64" } Columns { Name: "modified_at" Type: "String" } KeyColumnNames: "key" TTLSettings { Enabled { ColumnName: "modified_at" } }, at schemeshard: 72057594046678944 2025-06-25T14:51:08.208131Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 101:1, propose status:StatusSchemeError, reason: Unsupported column type, at schemeshard: 72057594046678944 2025-06-25T14:51:08.209184Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:274:2263] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-25T14:51:08.214358Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 101, response: Status: StatusSchemeError Reason: "Unsupported column type" TxId: 101 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:51:08.214617Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 101, database: /MyRoot, subject: , status: StatusSchemeError, reason: Unsupported column type, operation: CREATE TABLE, path: /MyRoot/TTLEnabledTable 2025-06-25T14:51:08.215081Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 TestModificationResult got TxId: 101, wait until txId: 101 >> TSchemeShardTTLTests::CreateTableShouldFailOnWrongUnit-EnableTablePgTypes-true ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTests::ShouldCheckQuotas [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:51:07.801840Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:51:07.801930Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:07.801971Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:51:07.802004Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:51:07.802049Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:51:07.802078Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:51:07.802133Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:07.802211Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:51:07.803298Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:51:07.803669Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:51:07.884609Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:51:07.884675Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:51:07.901107Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:51:07.901658Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:51:07.901843Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:51:07.908006Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:51:07.908350Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:51:07.909279Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:07.909589Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:51:07.913106Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:07.913307Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:51:07.914553Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:51:07.914617Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:07.914760Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:51:07.914816Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:51:07.914861Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:51:07.914948Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:51:07.921687Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:51:08.055699Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:51:08.055947Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:08.056146Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:51:08.056209Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:51:08.056483Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:51:08.056573Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:51:08.058788Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:08.059000Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:51:08.059173Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:08.059238Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:51:08.059333Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:51:08.059373Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:51:08.061245Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:08.061351Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:51:08.061407Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:51:08.063134Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:08.063192Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:08.063269Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:08.063327Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:51:08.066991Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:51:08.068931Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:51:08.069098Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:51:08.070040Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:08.070184Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:51:08.070248Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:08.070551Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:51:08.070608Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:08.070782Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:51:08.070853Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:51:08.072893Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:51:08.072941Z node 1 :FLAT_TX_SCHEMESHARD ... 46678944 2025-06-25T14:51:08.597228Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 7 PathOwnerId: 72057594046678944, cookie: 103 2025-06-25T14:51:08.597328Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 7 PathOwnerId: 72057594046678944, cookie: 103 2025-06-25T14:51:08.597368Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 103 2025-06-25T14:51:08.597400Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 7 2025-06-25T14:51:08.597445Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 6 2025-06-25T14:51:08.598261Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 3 PathOwnerId: 72057594046678944, cookie: 103 2025-06-25T14:51:08.598315Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 3 PathOwnerId: 72057594046678944, cookie: 103 2025-06-25T14:51:08.598333Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 103 2025-06-25T14:51:08.598360Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 3 2025-06-25T14:51:08.598386Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 4 2025-06-25T14:51:08.598427Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 103, ready parts: 0/1, is published: true 2025-06-25T14:51:08.602059Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6369: Handle TEvProposeTransactionResult, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409549 Status: COMPLETE TxId: 103 Step: 200 OrderId: 103 ExecLatency: 0 ProposeLatency: 3 DomainCoordinators: 72075186233409546 TxStats { PerShardStats { ShardId: 72075186233409549 CpuTimeUsec: 1020 } } CommitVersion { Step: 200 TxId: 103 } 2025-06-25T14:51:08.602119Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1791: TOperation FindRelatedPartByTabletId, TxId: 103, tablet: 72075186233409549, partId: 0 2025-06-25T14:51:08.602245Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:632: TTxOperationReply execute, operationId: 103:0, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409549 Status: COMPLETE TxId: 103 Step: 200 OrderId: 103 ExecLatency: 0 ProposeLatency: 3 DomainCoordinators: 72075186233409546 TxStats { PerShardStats { ShardId: 72075186233409549 CpuTimeUsec: 1020 } } CommitVersion { Step: 200 TxId: 103 } 2025-06-25T14:51:08.602339Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_part.cpp:109: HandleReply TEvDataShard::TEvProposeTransactionResult Ignore message: tablet# 72057594046678944, ev# TxKind: TX_KIND_SCHEME Origin: 72075186233409549 Status: COMPLETE TxId: 103 Step: 200 OrderId: 103 ExecLatency: 0 ProposeLatency: 3 DomainCoordinators: 72075186233409546 TxStats { PerShardStats { ShardId: 72075186233409549 CpuTimeUsec: 1020 } } CommitVersion { Step: 200 TxId: 103 } 2025-06-25T14:51:08.603378Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5596: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 551 RawX2: 4294969789 } Origin: 72075186233409549 State: 2 TxId: 103 Step: 0 Generation: 2 2025-06-25T14:51:08.603424Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1791: TOperation FindRelatedPartByTabletId, TxId: 103, tablet: 72075186233409549, partId: 0 2025-06-25T14:51:08.603536Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:632: TTxOperationReply execute, operationId: 103:0, at schemeshard: 72057594046678944, message: Source { RawX1: 551 RawX2: 4294969789 } Origin: 72075186233409549 State: 2 TxId: 103 Step: 0 Generation: 2 2025-06-25T14:51:08.603590Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1047: NTableState::TProposedWaitParts operationId# 103:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 2025-06-25T14:51:08.603685Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1051: NTableState::TProposedWaitParts operationId# 103:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 551 RawX2: 4294969789 } Origin: 72075186233409549 State: 2 TxId: 103 Step: 0 Generation: 2 2025-06-25T14:51:08.603760Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:670: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 103:0, shardIdx: 72057594046678944:4, shard: 72075186233409549, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:08.603797Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:707: all shard schema changes has been received, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-25T14:51:08.603829Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:719: send schema changes ack message, operation: 103:0, datashard: 72075186233409549, at schemeshard: 72057594046678944 2025-06-25T14:51:08.603867Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 103:0 129 -> 240 2025-06-25T14:51:08.621605Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-25T14:51:08.621853Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-25T14:51:08.628621Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-25T14:51:08.628767Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-25T14:51:08.629071Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-25T14:51:08.629114Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 103:0 ProgressState 2025-06-25T14:51:08.629204Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#103:0 progress is 1/1 2025-06-25T14:51:08.629244Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-25T14:51:08.629287Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#103:0 progress is 1/1 2025-06-25T14:51:08.629320Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-25T14:51:08.629350Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 103, ready parts: 1/1, is published: true 2025-06-25T14:51:08.629415Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1656: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:410:2374] message: TxId: 103 2025-06-25T14:51:08.629460Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-25T14:51:08.629493Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 103:0 2025-06-25T14:51:08.629523Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 103:0 2025-06-25T14:51:08.629625Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 2025-06-25T14:51:08.631235Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-06-25T14:51:08.631285Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [1:579:2513] TestWaitNotification: OK eventTxId 103 W0000 00:00:1750863068.631773 451481 text_format.cc:398] Warning parsing text-format NKikimrSchemeOp.TTableDescription: 9:35: text format contains deprecated field "ExpireAfterSeconds" TestModificationResults wait txId: 104 2025-06-25T14:51:08.634836Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/SubDomain" OperationType: ESchemeOpCreateTable CreateTable { Name: "Table4" Columns { Name: "key" Type: "Uint64" } Columns { Name: "modified_at" Type: "Timestamp" } KeyColumnNames: "key" TTLSettings { Enabled { ColumnName: "modified_at" ExpireAfterSeconds: 3600 SysSettings { RunInterval: 1799999999 } Tiers { ApplyAfterSeconds: 3600 Delete { } } } } } } TxId: 104 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:51:08.635171Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_table.cpp:423: TCreateTable Propose, path: /MyRoot/SubDomain/Table4, opId: 104:0, at schemeshard: 72057594046678944 2025-06-25T14:51:08.635289Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_table.cpp:430: TCreateTable Propose, path: /MyRoot/SubDomain/Table4, opId: 104:0, schema: Name: "Table4" Columns { Name: "key" Type: "Uint64" } Columns { Name: "modified_at" Type: "Timestamp" } KeyColumnNames: "key" TTLSettings { Enabled { ColumnName: "modified_at" ExpireAfterSeconds: 3600 SysSettings { RunInterval: 1799999999 } Tiers { ApplyAfterSeconds: 3600 Delete { } } } }, at schemeshard: 72057594046678944 2025-06-25T14:51:08.635685Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 104:1, propose status:StatusSchemeError, reason: TTL run interval cannot be less than limit: 1800, at schemeshard: 72057594046678944 2025-06-25T14:51:08.640648Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 104, response: Status: StatusSchemeError Reason: "TTL run interval cannot be less than limit: 1800" TxId: 104 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:51:08.640928Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 104, database: /MyRoot/SubDomain, subject: , status: StatusSchemeError, reason: TTL run interval cannot be less than limit: 1800, operation: CREATE TABLE, path: /MyRoot/SubDomain/Table4 TestModificationResult got TxId: 104, wait until txId: 104 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_write/unittest >> DataShardWrite::PreparedDistributedWritePageFault [GOOD] Test command err: 2025-06-25T14:50:34.833392Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:50:34.833555Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:50:34.833607Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00110a/r3tmp/tmpNJQfeh/pdisk_1.dat 2025-06-25T14:50:35.496239Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T14:50:35.508530Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:50:35.576343Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:50:35.584024Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750863031225113 != 1750863031225117 2025-06-25T14:50:35.643084Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:50:35.643224Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:50:35.657213Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:50:35.774973Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:35.858067Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvBoot 2025-06-25T14:50:35.859129Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvRestored 2025-06-25T14:50:35.859859Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:627:2531] 2025-06-25T14:50:35.860119Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T14:50:35.910065Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-25T14:50:35.912410Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T14:50:35.912578Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T14:50:35.914373Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-25T14:50:35.914465Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-25T14:50:35.914527Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-25T14:50:35.914954Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T14:50:35.915094Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T14:50:35.915207Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:643:2531] in generation 1 2025-06-25T14:50:35.927458Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T14:50:36.010107Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-25T14:50:36.010300Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T14:50:36.010422Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:645:2541] 2025-06-25T14:50:36.010480Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T14:50:36.010538Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-25T14:50:36.010579Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:50:36.010783Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:627:2531], Recipient [1:627:2531]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T14:50:36.010837Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T14:50:36.011217Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-25T14:50:36.011314Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-25T14:50:36.011401Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:50:36.011438Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:50:36.011491Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-25T14:50:36.011523Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-25T14:50:36.011556Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-25T14:50:36.011607Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-25T14:50:36.011663Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:50:36.012042Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:634:2535], Recipient [1:627:2531]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:50:36.012083Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T14:50:36.012124Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:623:2528], serverId# [1:634:2535], sessionId# [0:0:0] 2025-06-25T14:50:36.012213Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:373:2367], Recipient [1:634:2535] 2025-06-25T14:50:36.012263Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-25T14:50:36.012436Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T14:50:36.012675Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-25T14:50:36.012754Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-25T14:50:36.012858Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-25T14:50:36.013008Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-25T14:50:36.013068Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-06-25T14:50:36.013124Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-06-25T14:50:36.013162Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-25T14:50:36.013480Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-06-25T14:50:36.013529Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit StoreSchemeTx 2025-06-25T14:50:36.013573Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit FinishPropose 2025-06-25T14:50:36.013609Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-25T14:50:36.013655Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayComplete 2025-06-25T14:50:36.013682Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit FinishPropose 2025-06-25T14:50:36.013714Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit WaitForPlan 2025-06-25T14:50:36.013744Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit WaitForPlan 2025-06-25T14:50:36.013766Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:281474976715657] at 72075186224037888 is not ready to execute on unit WaitForPlan 2025-06-25T14:50:36.015101Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269746185, Sender [1:646:2542], Recipient [1:627:2531]: NKikimr::TEvTxProxySchemeCache::TEvWatchNotifyUpdated 2025-06-25T14:50:36.015150Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T14:50:36.028937Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-25T14:50:36.029019Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-25T14:50:36.029055Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-25T14:50:36.029106Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715657 at tablet 72075186224037888 send to client, exec latency: 0 ms, propose late ... Add [3500:1234567890011] at 72075186224037888 to execution unit LoadWriteDetails 2025-06-25T14:51:07.779974Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:1234567890011] at 72075186224037888 on unit LoadTxDetails 2025-06-25T14:51:07.780281Z node 7 :TX_DATASHARD TRACE: datashard_write_operation.cpp:64: Parsing write transaction for 1234567890011 at 72075186224037888, record: Operations { Type: OPERATION_UPSERT TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } ColumnIds: 1 ColumnIds: 2 PayloadIndex: 0 PayloadFormat: FORMAT_CELLVEC } TxId: 1234567890011 TxMode: MODE_PREPARE Locks { Op: Commit } 2025-06-25T14:51:07.780515Z node 7 :TX_DATASHARD TRACE: datashard_write_operation.cpp:213: Table /Root/table, shard: 72075186224037888, write point (Int32 : 1) 2025-06-25T14:51:07.780584Z node 7 :TX_DATASHARD TRACE: key_validator.cpp:54: -- AddWriteRange: (Int32 : 1) table: [72057594046644480:2:1] 2025-06-25T14:51:07.780674Z node 7 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:683: LoadWriteDetails at 72075186224037888 loaded writeOp from db 3500:1234567890011 keys extracted: 1 2025-06-25T14:51:07.780721Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:1234567890011] at 72075186224037888 is Executed 2025-06-25T14:51:07.780753Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:1234567890011] at 72075186224037888 executing on unit LoadWriteDetails 2025-06-25T14:51:07.780780Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [3500:1234567890011] at 72075186224037888 to execution unit BuildAndWaitDependencies 2025-06-25T14:51:07.780808Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:1234567890011] at 72075186224037888 on unit BuildAndWaitDependencies 2025-06-25T14:51:07.780874Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:455: Operation [3500:1234567890011] is the new logically complete end at 72075186224037888 2025-06-25T14:51:07.780917Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:461: Operation [3500:1234567890011] is the new logically incomplete end at 72075186224037888 2025-06-25T14:51:07.780961Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [3500:1234567890011] at 72075186224037888 2025-06-25T14:51:07.781005Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:1234567890011] at 72075186224037888 is Executed 2025-06-25T14:51:07.781029Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:1234567890011] at 72075186224037888 executing on unit BuildAndWaitDependencies 2025-06-25T14:51:07.781054Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [3500:1234567890011] at 72075186224037888 to execution unit BuildWriteOutRS 2025-06-25T14:51:07.781079Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:1234567890011] at 72075186224037888 on unit BuildWriteOutRS 2025-06-25T14:51:07.781131Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:1234567890011] at 72075186224037888 is Executed 2025-06-25T14:51:07.781157Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:1234567890011] at 72075186224037888 executing on unit BuildWriteOutRS 2025-06-25T14:51:07.781185Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [3500:1234567890011] at 72075186224037888 to execution unit StoreAndSendWriteOutRS 2025-06-25T14:51:07.781210Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:1234567890011] at 72075186224037888 on unit StoreAndSendWriteOutRS 2025-06-25T14:51:07.781235Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:1234567890011] at 72075186224037888 is Executed 2025-06-25T14:51:07.781260Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:1234567890011] at 72075186224037888 executing on unit StoreAndSendWriteOutRS 2025-06-25T14:51:07.781281Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [3500:1234567890011] at 72075186224037888 to execution unit PrepareWriteTxInRS 2025-06-25T14:51:07.781304Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:1234567890011] at 72075186224037888 on unit PrepareWriteTxInRS 2025-06-25T14:51:07.781333Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:1234567890011] at 72075186224037888 is Executed 2025-06-25T14:51:07.781370Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:1234567890011] at 72075186224037888 executing on unit PrepareWriteTxInRS 2025-06-25T14:51:07.781397Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [3500:1234567890011] at 72075186224037888 to execution unit LoadAndWaitInRS 2025-06-25T14:51:07.781422Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:1234567890011] at 72075186224037888 on unit LoadAndWaitInRS 2025-06-25T14:51:07.781448Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:1234567890011] at 72075186224037888 is Executed 2025-06-25T14:51:07.781475Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:1234567890011] at 72075186224037888 executing on unit LoadAndWaitInRS 2025-06-25T14:51:07.781496Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [3500:1234567890011] at 72075186224037888 to execution unit ExecuteWrite 2025-06-25T14:51:07.781519Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:1234567890011] at 72075186224037888 on unit ExecuteWrite 2025-06-25T14:51:07.781551Z node 7 :TX_DATASHARD DEBUG: execute_write_unit.cpp:251: Executing write operation for [3500:1234567890011] at 72075186224037888 2025-06-25T14:51:07.781955Z node 7 :TX_DATASHARD TRACE: execute_write_unit.cpp:122: Tablet 72075186224037888 is not ready for [3500:1234567890011] execution 2025-06-25T14:51:07.782074Z node 7 :TX_DATASHARD DEBUG: datashard_write_operation.cpp:454: tx 1234567890011 at 72075186224037888 released its data 2025-06-25T14:51:07.782133Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:1234567890011] at 72075186224037888 is Restart 2025-06-25T14:51:07.782166Z node 7 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-25T14:51:07.782213Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 72075186224037888 2025-06-25T14:51:07.782267Z node 7 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-25T14:51:07.782317Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-25T14:51:07.782676Z node 7 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:51:07.782739Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:1234567890011] at 72075186224037888 on unit ExecuteWrite 2025-06-25T14:51:07.782789Z node 7 :TX_DATASHARD DEBUG: execute_write_unit.cpp:251: Executing write operation for [3500:1234567890011] at 72075186224037888 2025-06-25T14:51:07.783128Z node 7 :TX_DATASHARD TRACE: datashard_write_operation.cpp:64: Parsing write transaction for 1234567890011 at 72075186224037888, record: Operations { Type: OPERATION_UPSERT TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } ColumnIds: 1 ColumnIds: 2 PayloadIndex: 0 PayloadFormat: FORMAT_CELLVEC } TxId: 1234567890011 TxMode: MODE_PREPARE Locks { Op: Commit } 2025-06-25T14:51:07.783217Z node 7 :TX_DATASHARD TRACE: datashard_write_operation.cpp:213: Table /Root/table, shard: 72075186224037888, write point (Int32 : 1) 2025-06-25T14:51:07.783294Z node 7 :TX_DATASHARD TRACE: key_validator.cpp:54: -- AddWriteRange: (Int32 : 1) table: [72057594046644480:2:1] 2025-06-25T14:51:07.783380Z node 7 :TX_DATASHARD DEBUG: datashard_write_operation.cpp:547: tx 1234567890011 at 72075186224037888 restored its data 2025-06-25T14:51:07.783536Z node 7 :TX_DATASHARD DEBUG: execute_write_unit.cpp:416: Executed write operation for [3500:1234567890011] at 72075186224037888, row count=1 2025-06-25T14:51:07.783592Z node 7 :TX_DATASHARD TRACE: locks.cpp:194: Lock 1234567890001 marked broken at v{min} 2025-06-25T14:51:07.783691Z node 7 :TX_DATASHARD TRACE: execute_write_unit.cpp:47: add locks to result: 0 2025-06-25T14:51:07.783764Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:1234567890011] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-25T14:51:07.783826Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:1234567890011] at 72075186224037888 executing on unit ExecuteWrite 2025-06-25T14:51:07.783875Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [3500:1234567890011] at 72075186224037888 to execution unit CompleteWrite 2025-06-25T14:51:07.783915Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:1234567890011] at 72075186224037888 on unit CompleteWrite 2025-06-25T14:51:07.784124Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:1234567890011] at 72075186224037888 is DelayComplete 2025-06-25T14:51:07.784161Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:1234567890011] at 72075186224037888 executing on unit CompleteWrite 2025-06-25T14:51:07.784205Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [3500:1234567890011] at 72075186224037888 to execution unit CompletedOperations 2025-06-25T14:51:07.784245Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:1234567890011] at 72075186224037888 on unit CompletedOperations 2025-06-25T14:51:07.784295Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:1234567890011] at 72075186224037888 is Executed 2025-06-25T14:51:07.784392Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:1234567890011] at 72075186224037888 executing on unit CompletedOperations 2025-06-25T14:51:07.784438Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [3500:1234567890011] at 72075186224037888 has finished 2025-06-25T14:51:07.784489Z node 7 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:51:07.784533Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 72075186224037888 2025-06-25T14:51:07.784588Z node 7 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-25T14:51:07.784634Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-25T14:51:07.785182Z node 7 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 3500} 2025-06-25T14:51:07.785525Z node 7 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:51:07.785583Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [3500:1234567890011] at 72075186224037888 on unit CompleteWrite 2025-06-25T14:51:07.785648Z node 7 :TX_DATASHARD DEBUG: datashard.cpp:826: Complete write [3500 : 1234567890011] from 72075186224037888 at tablet 72075186224037888 send result to client [7:754:2612] 2025-06-25T14:51:07.785707Z node 7 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 >> TSchemeShardTTLTests::CreateTableShouldFailOnUnspecifiedTTL >> TSchemeShardTTLTests::ConditionalErase >> TSchemeShardTTLTests::AlterTableShouldSuccessOnSimultaneousAddColumnAndEnableTTL >> TSchemeShardTTLTests::BackupCopyHasNoTtlSettings >> KqpJoinOrder::TPCH12_100 [GOOD] >> TSchemeShardTTLTests::CreateTableShouldFailOnUnknownColumn >> TSchemeShardTTLTests::AlterTableShouldFailOnSimultaneousDropColumnAndEnableTTL [GOOD] >> DataShardWrite::WriteCommitVersion [GOOD] |86.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTests::BuildAsyncIndexShouldSucceed >> KqpSystemView::PartitionStatsFollower [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTests::AlterTableShouldFailOnSimultaneousDropColumnAndEnableTTL [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:51:08.960951Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:51:08.961028Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:08.961059Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:51:08.961089Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:51:08.961130Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:51:08.961153Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:51:08.961196Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:08.961263Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:51:08.961981Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:51:08.962293Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:51:09.034023Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:51:09.034072Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:51:09.055298Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:51:09.055746Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:51:09.055915Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:51:09.061978Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:51:09.062285Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:51:09.062865Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:09.063172Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:51:09.066324Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:09.066500Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:51:09.067613Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:51:09.067664Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:09.067837Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:51:09.067899Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:51:09.067942Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:51:09.068028Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:51:09.074365Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:51:09.191823Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:51:09.192040Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:09.192257Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:51:09.192296Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:51:09.192513Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:51:09.192593Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:51:09.194640Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:09.194844Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:51:09.195004Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:09.195050Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:51:09.195121Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:51:09.195156Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:51:09.196926Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:09.197033Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:51:09.197074Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:51:09.198454Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:09.198503Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:09.198571Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:09.198619Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:51:09.201747Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:51:09.203330Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:51:09.203478Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:51:09.204383Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:09.204516Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:51:09.204573Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:09.204801Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:51:09.204848Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:09.205013Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:51:09.205076Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:51:09.206830Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:51:09.206871Z node 1 :FLAT_TX_SCHEMESHARD ... to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 101, path id: 1 2025-06-25T14:51:09.427585Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 101, path id: 2 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 2025-06-25T14:51:09.428123Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:51:09.428189Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1086: NTableState::TProposedWaitParts operationId# 101:0 ProgressState at tablet: 72057594046678944 FAKE_COORDINATOR: Erasing txId 101 2025-06-25T14:51:09.431146Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 101 2025-06-25T14:51:09.431313Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 101 2025-06-25T14:51:09.431359Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 101 2025-06-25T14:51:09.431399Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 5 2025-06-25T14:51:09.431460Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-25T14:51:09.432545Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 101 2025-06-25T14:51:09.432640Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 101 2025-06-25T14:51:09.432676Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 101 2025-06-25T14:51:09.432706Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 3 2025-06-25T14:51:09.432745Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-25T14:51:09.432822Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 101, ready parts: 0/1, is published: true 2025-06-25T14:51:09.433608Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6369: Handle TEvProposeTransactionResult, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 101 Step: 5000002 OrderId: 101 ExecLatency: 0 ProposeLatency: 3 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 1233 } } CommitVersion { Step: 5000002 TxId: 101 } 2025-06-25T14:51:09.433660Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1791: TOperation FindRelatedPartByTabletId, TxId: 101, tablet: 72075186233409546, partId: 0 2025-06-25T14:51:09.433807Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:632: TTxOperationReply execute, operationId: 101:0, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 101 Step: 5000002 OrderId: 101 ExecLatency: 0 ProposeLatency: 3 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 1233 } } CommitVersion { Step: 5000002 TxId: 101 } 2025-06-25T14:51:09.433913Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_part.cpp:109: HandleReply TEvDataShard::TEvProposeTransactionResult Ignore message: tablet# 72057594046678944, ev# TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 101 Step: 5000002 OrderId: 101 ExecLatency: 0 ProposeLatency: 3 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 1233 } } CommitVersion { Step: 5000002 TxId: 101 } 2025-06-25T14:51:09.434511Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5596: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 313 RawX2: 4294969594 } Origin: 72075186233409546 State: 2 TxId: 101 Step: 0 Generation: 2 2025-06-25T14:51:09.434563Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1791: TOperation FindRelatedPartByTabletId, TxId: 101, tablet: 72075186233409546, partId: 0 2025-06-25T14:51:09.434678Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:632: TTxOperationReply execute, operationId: 101:0, at schemeshard: 72057594046678944, message: Source { RawX1: 313 RawX2: 4294969594 } Origin: 72075186233409546 State: 2 TxId: 101 Step: 0 Generation: 2 2025-06-25T14:51:09.434740Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1047: NTableState::TProposedWaitParts operationId# 101:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 2025-06-25T14:51:09.434859Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1051: NTableState::TProposedWaitParts operationId# 101:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 313 RawX2: 4294969594 } Origin: 72075186233409546 State: 2 TxId: 101 Step: 0 Generation: 2 2025-06-25T14:51:09.434931Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:670: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 101:0, shardIdx: 72057594046678944:1, shard: 72075186233409546, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:09.434985Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:707: all shard schema changes has been received, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:51:09.435029Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:719: send schema changes ack message, operation: 101:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-06-25T14:51:09.435068Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 101:0 129 -> 240 2025-06-25T14:51:09.437203Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-25T14:51:09.438929Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-25T14:51:09.439032Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:51:09.439140Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:51:09.439458Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:51:09.439509Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 101:0 ProgressState 2025-06-25T14:51:09.439615Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#101:0 progress is 1/1 2025-06-25T14:51:09.439656Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-25T14:51:09.439701Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#101:0 progress is 1/1 2025-06-25T14:51:09.439735Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-25T14:51:09.439772Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 101, ready parts: 1/1, is published: true 2025-06-25T14:51:09.439844Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1656: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:340:2317] message: TxId: 101 2025-06-25T14:51:09.439895Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-25T14:51:09.439938Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 101:0 2025-06-25T14:51:09.439972Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 101:0 2025-06-25T14:51:09.440154Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-25T14:51:09.441779Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-25T14:51:09.441828Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:341:2318] TestWaitNotification: OK eventTxId 101 TestModificationResults wait txId: 102 2025-06-25T14:51:09.445125Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpAlterTable AlterTable { Name: "TTLEnabledTable" DropColumns { Name: "modified_at" } TTLSettings { Enabled { ColumnName: "modified_at" } } } } TxId: 102 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:51:09.445369Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_table.cpp:506: TAlterTable Propose, path: /MyRoot/TTLEnabledTable, pathId: , opId: 102:0, at schemeshard: 72057594046678944 2025-06-25T14:51:09.445721Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 102:1, propose status:StatusInvalidParameter, reason: Cannot enable TTL on dropped column: 'modified_at', at schemeshard: 72057594046678944 2025-06-25T14:51:09.447829Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 102, response: Status: StatusInvalidParameter Reason: "Cannot enable TTL on dropped column: \'modified_at\'" TxId: 102 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:51:09.448068Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 102, database: /MyRoot, subject: , status: StatusInvalidParameter, reason: Cannot enable TTL on dropped column: 'modified_at', operation: ALTER TABLE, path: /MyRoot/TTLEnabledTable TestModificationResult got TxId: 102, wait until txId: 102 >> KqpJoinOrder::CanonizedJoinOrderTPCH20 [GOOD] >> TSchemeShardTTLTests::CreateTableShouldFailOnUnspecifiedTTL [GOOD] >> TSchemeShardTTLTests::CreateTableShouldFailOnUnknownColumn [GOOD] >> DataShardWrite::UpsertLostPrepareArbiterRestart [GOOD] >> TSchemeShardTTLTests::AlterTableShouldSuccessOnSimultaneousAddColumnAndEnableTTL [GOOD] |86.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/partition_stats/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTests::CreateTableShouldFailOnUnknownColumn [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:51:09.997352Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:51:09.997446Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:09.997482Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:51:09.997517Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:51:09.997563Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:51:09.997599Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:51:09.997646Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:09.997705Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:51:09.998376Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:51:09.998702Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:51:10.056209Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:51:10.056267Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:51:10.068507Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:51:10.068836Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:51:10.069006Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:51:10.073694Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:51:10.073935Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:51:10.074452Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:10.074669Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:51:10.077286Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:10.077455Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:51:10.078337Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:51:10.078384Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:10.078513Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:51:10.078554Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:51:10.078589Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:51:10.078653Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:51:10.083584Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:51:10.201521Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:51:10.201713Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:10.201869Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:51:10.201900Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:51:10.202097Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:51:10.202175Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:51:10.204285Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:10.204463Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:51:10.204597Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:10.204633Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:51:10.204686Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:51:10.204716Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:51:10.206436Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:10.206489Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:51:10.206583Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:51:10.208267Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:10.208361Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:10.208422Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:10.208477Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:51:10.217172Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:51:10.218838Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:51:10.218957Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:51:10.219637Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:10.219735Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:51:10.219775Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:10.219965Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:51:10.220001Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:10.220116Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:51:10.220159Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:51:10.221672Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:51:10.221702Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:51:10.221831Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:10.221862Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 1, path id: 1 2025-06-25T14:51:10.222086Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:10.222139Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 1:0 ProgressState 2025-06-25T14:51:10.222222Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1:0 progress is 1/1 2025-06-25T14:51:10.222248Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-25T14:51:10.222272Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1:0 progress is 1/1 2025-06-25T14:51:10.222303Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-25T14:51:10.222326Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 1, ready parts: 1/1, is published: false 2025-06-25T14:51:10.222352Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-25T14:51:10.222379Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1:0 2025-06-25T14:51:10.222403Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 1:0 2025-06-25T14:51:10.222444Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-25T14:51:10.222469Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 1, publications: 1, subscribers: 0 2025-06-25T14:51:10.222493Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 1, [OwnerId: 72057594046678944, LocalPathId: 1], 3 2025-06-25T14:51:10.223749Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-06-25T14:51:10.223817Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-06-25T14:51:10.223841Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1 2025-06-25T14:51:10.223866Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 3 2025-06-25T14:51:10.223910Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:51:10.224011Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1, subscribers: 0 2025-06-25T14:51:10.226463Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1 2025-06-25T14:51:10.226955Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1, at schemeshard: 72057594046678944 TestModificationResults wait txId: 101 2025-06-25T14:51:10.228083Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:434: actor# [1:274:2263] Bootstrap 2025-06-25T14:51:10.247662Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:453: actor# [1:274:2263] Become StateWork (SchemeCache [1:279:2268]) 2025-06-25T14:51:10.250221Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateTable CreateTable { Name: "TTLEnabledTable" Columns { Name: "key" Type: "Uint64" } Columns { Name: "modified_at" Type: "Timestamp" } KeyColumnNames: "key" TTLSettings { Enabled { ColumnName: "created_at" } } } } TxId: 101 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:51:10.250513Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_table.cpp:423: TCreateTable Propose, path: /MyRoot/TTLEnabledTable, opId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:51:10.250598Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_table.cpp:430: TCreateTable Propose, path: /MyRoot/TTLEnabledTable, opId: 101:0, schema: Name: "TTLEnabledTable" Columns { Name: "key" Type: "Uint64" } Columns { Name: "modified_at" Type: "Timestamp" } KeyColumnNames: "key" TTLSettings { Enabled { ColumnName: "created_at" } }, at schemeshard: 72057594046678944 2025-06-25T14:51:10.250981Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 101:1, propose status:StatusSchemeError, reason: Cannot enable TTL on unknown column: 'created_at', at schemeshard: 72057594046678944 2025-06-25T14:51:10.251969Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:274:2263] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-25T14:51:10.254524Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 101, response: Status: StatusSchemeError Reason: "Cannot enable TTL on unknown column: \'created_at\'" TxId: 101 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:51:10.254752Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 101, database: /MyRoot, subject: , status: StatusSchemeError, reason: Cannot enable TTL on unknown column: 'created_at', operation: CREATE TABLE, path: /MyRoot/TTLEnabledTable 2025-06-25T14:51:10.255134Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 TestModificationResult got TxId: 101, wait until txId: 101 >> TSchemeShardTTLTests::BackupCopyHasNoTtlSettings [GOOD] >> TSchemeShardColumnTableTTL::AlterColumnTable [GOOD] >> ResultFormatter::EmptyResultSet [GOOD] >> ResultFormatter::EmptyList [GOOD] >> ResultFormatter::EmptyTuple [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTests::CreateTableShouldFailOnUnspecifiedTTL [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:51:09.786631Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:51:09.786702Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:09.786741Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:51:09.786803Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:51:09.786843Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:51:09.786875Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:51:09.786933Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:09.787002Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:51:09.787750Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:51:09.788066Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:51:09.863319Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:51:09.863384Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:51:09.880890Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:51:09.881275Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:51:09.881472Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:51:09.887192Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:51:09.887498Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:51:09.888202Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:09.888542Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:51:09.891988Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:09.892192Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:51:09.893495Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:51:09.893563Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:09.893704Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:51:09.893759Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:51:09.893804Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:51:09.893892Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:51:09.901770Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:51:10.034854Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:51:10.035078Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:10.035291Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:51:10.035339Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:51:10.035532Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:51:10.035620Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:51:10.037539Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:10.037721Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:51:10.037903Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:10.037953Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:51:10.038028Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:51:10.038071Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:51:10.039582Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:10.039623Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:51:10.039691Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:51:10.041006Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:10.041053Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:10.041104Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:10.041146Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:51:10.043588Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:51:10.044963Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:51:10.045124Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:51:10.046063Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:10.046181Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:51:10.046223Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:10.046432Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:51:10.046477Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:10.046609Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:51:10.046657Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:51:10.048994Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:51:10.049038Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:51:10.049184Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:10.049227Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 1, path id: 1 2025-06-25T14:51:10.049528Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:10.049573Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 1:0 ProgressState 2025-06-25T14:51:10.049665Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1:0 progress is 1/1 2025-06-25T14:51:10.049698Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-25T14:51:10.049729Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1:0 progress is 1/1 2025-06-25T14:51:10.049776Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-25T14:51:10.049813Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 1, ready parts: 1/1, is published: false 2025-06-25T14:51:10.049848Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-25T14:51:10.049885Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1:0 2025-06-25T14:51:10.049918Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 1:0 2025-06-25T14:51:10.049978Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-25T14:51:10.050008Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 1, publications: 1, subscribers: 0 2025-06-25T14:51:10.050042Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 1, [OwnerId: 72057594046678944, LocalPathId: 1], 3 2025-06-25T14:51:10.051611Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-06-25T14:51:10.051698Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-06-25T14:51:10.051728Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1 2025-06-25T14:51:10.051757Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 3 2025-06-25T14:51:10.051785Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:51:10.051861Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1, subscribers: 0 2025-06-25T14:51:10.054677Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1 2025-06-25T14:51:10.055130Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1, at schemeshard: 72057594046678944 TestModificationResults wait txId: 101 2025-06-25T14:51:10.056204Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:434: actor# [1:274:2263] Bootstrap 2025-06-25T14:51:10.075818Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:453: actor# [1:274:2263] Become StateWork (SchemeCache [1:279:2268]) 2025-06-25T14:51:10.078412Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateTable CreateTable { Name: "TTLEnabledTable" Columns { Name: "key" Type: "Uint64" } Columns { Name: "modified_at" Type: "Timestamp" } KeyColumnNames: "key" TTLSettings { } } } TxId: 101 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:51:10.078720Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_table.cpp:423: TCreateTable Propose, path: /MyRoot/TTLEnabledTable, opId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:51:10.078812Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_table.cpp:430: TCreateTable Propose, path: /MyRoot/TTLEnabledTable, opId: 101:0, schema: Name: "TTLEnabledTable" Columns { Name: "key" Type: "Uint64" } Columns { Name: "modified_at" Type: "Timestamp" } KeyColumnNames: "key" TTLSettings { }, at schemeshard: 72057594046678944 2025-06-25T14:51:10.079197Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 101:1, propose status:StatusSchemeError, reason: TTL status must be specified, at schemeshard: 72057594046678944 2025-06-25T14:51:10.080155Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:274:2263] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-25T14:51:10.082776Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 101, response: Status: StatusSchemeError Reason: "TTL status must be specified" TxId: 101 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:51:10.083014Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 101, database: /MyRoot, subject: , status: StatusSchemeError, reason: TTL status must be specified, operation: CREATE TABLE, path: /MyRoot/TTLEnabledTable 2025-06-25T14:51:10.083406Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 TestModificationResult got TxId: 101, wait until txId: 101 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::TestJoinHint1+ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 63271, MsgBus: 2425 2025-06-25T14:49:31.711691Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899203031324797:2199];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:49:31.716661Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d57/r3tmp/tmpBFfgcW/pdisk_1.dat 2025-06-25T14:49:32.266846Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:49:32.266922Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:49:32.282057Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:49:32.364716Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519899203031324610:2080] 1750862971626348 != 1750862971626351 2025-06-25T14:49:32.384561Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 63271, node 1 2025-06-25T14:49:32.667050Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:49:32.667072Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:49:32.667078Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:49:32.667177Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:49:32.705824Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:2425 TClient is connected to server localhost:2425 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:49:33.603199Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:49:33.616348Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:49:35.730927Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899220211194443:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:35.731024Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:35.731271Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899220211194455:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:35.734976Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:49:35.758320Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899220211194457:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:49:35.832008Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899220211194510:2337] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:49:36.415214Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T14:49:36.712758Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519899203031324797:2199];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:49:36.712889Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:49:36.852047Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519899224506162070:2325];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:49:36.852251Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519899224506162070:2325];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:49:36.852738Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519899224506161994:2311];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:49:36.852778Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519899224506161994:2311];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:49:36.856664Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519899224506161994:2311];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:49:36.856828Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519899224506161994:2311];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:49:36.856938Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519899224506161994:2311];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:49:36.857056Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519899224506161994:2311];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:49:36.857112Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519899224506162070:2325];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:49:36.857149Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519899224506161994:2311];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:49:36.857476Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519899224506162070:2325];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:49:36.857599Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519899224506162070:2325];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:49:36.857699Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519899224506162070:2325];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:49:36.857793Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519899224506162070:2325];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:49:36.857887Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519899224506162070:2325];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:49:36.857985Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519899224506162070:2325];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:49:36.858116Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519899224506162070:2325];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:49:36.858222Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519899224506162070:2325];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:49:36.860801Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519899224506161994:2311];tablet_ ... line=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:59.998452Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039390;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:59.998678Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039407;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:50:59.999143Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:59.999239Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039393;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:00.002959Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039393;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:51:00.003571Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039411;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:00.003690Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:51:00.004425Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039399;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:00.007825Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039411;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:51:00.008637Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039421;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:00.008826Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039399;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:51:00.012873Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039421;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:51:00.025128Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039408;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:00.025209Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039424;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:00.031951Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039424;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:51:00.032196Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039408;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:51:00.032642Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039423;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:00.032812Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039404;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:00.036729Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039404;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:51:00.036985Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039423;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:51:00.037348Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039409;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:00.037412Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039422;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:00.041557Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039422;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:51:00.042191Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039417;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:00.042320Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039409;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:51:00.042884Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039388;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:00.046902Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039417;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:51:00.047526Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039414;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:00.047690Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039388;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:51:00.048571Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039403;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:00.054506Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039414;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:51:00.055074Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039403;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:51:00.055172Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039416;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:00.055662Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039415;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:00.060576Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039416;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:51:00.060610Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039415;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:51:00.061224Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039418;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:00.061224Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039401;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:00.066177Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039418;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:51:00.066402Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039401;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:51:00.067516Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039406;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:00.073736Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039406;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715714; 2025-06-25T14:51:00.167075Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyks36c1ayf8nbtxw94ezvar", SessionId: ydb://session/3?node_id=1&id=Yzc1NjkxZDctNWFmZmE3YmYtMTBkYjFjZC0xYjkzODgyYw==, Slow query, duration: 33.605557s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:51:00.412807Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; 2025-06-25T14:51:00.413115Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; 2025-06-25T14:51:00.413423Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715716; >> TSchemeShardColumnTableTTL::CreateColumnTableNegative_ColumnType ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSystemView::PartitionStatsFollower [GOOD] Test command err: Trying to start YDB, gRPC: 19736, MsgBus: 21474 2025-06-25T14:50:47.216299Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899528948863870:2180];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:50:47.224795Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001a5e/r3tmp/tmpvjx7yf/pdisk_1.dat 2025-06-25T14:50:47.831115Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:50:47.831187Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:50:47.867597Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:50:47.896178Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:50:47.905973Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519899528948863728:2080] 1750863047183735 != 1750863047183738 TServer::EnableGrpc on GrpcPort 19736, node 1 2025-06-25T14:50:48.044863Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:50:48.044892Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:50:48.044905Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:50:48.045011Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:50:48.220932Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:21474 TClient is connected to server localhost:21474 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:50:48.947385Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:50:48.973164Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:50:49.828444Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:7519899528948864052:2146]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:50:49.828514Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:50:49.828620Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [1:7519899528948864052:2146], Recipient [1:7519899528948864052:2146]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:50:49.828638Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:50:50.845499Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:7519899528948864052:2146]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:50:50.845540Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:50:50.845613Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [1:7519899528948864052:2146], Recipient [1:7519899528948864052:2146]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:50:50.845633Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:50:50.881096Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899541833766256:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:50.881197Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:51.132288Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877761, Sender [1:7519899546128733578:2310], Recipient [1:7519899528948864052:2146]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:50:51.132335Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5052: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T14:50:51.132349Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5837: Pipe server connected, at tablet: 72057594046644480 2025-06-25T14:50:51.132394Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271122432, Sender [1:7519899546128733574:2307], Recipient [1:7519899528948864052:2146]: {TEvModifySchemeTransaction txid# 281474976710658 TabletId# 72057594046644480} 2025-06-25T14:50:51.132410Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4966: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-06-25T14:50:51.206623Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/Root" OperationType: ESchemeOpCreateTable CreateTable { Name: "Followers" Columns { Name: "Key" Type: "Uint64" NotNull: false } Columns { Name: "Value" Type: "String" NotNull: false } KeyColumnNames: "Key" PartitionConfig { ColumnFamilies { Id: 0 StorageConfig { SysLog { PreferredPoolKind: "test" } Log { PreferredPoolKind: "test" } Data { PreferredPoolKind: "test" } } } FollowerGroups { FollowerCount: 3 RequireAllDataCenters: false } } Temporary: false } } TxId: 281474976710658 TabletId: 72057594046644480 PeerName: "" , at schemeshard: 72057594046644480 2025-06-25T14:50:51.207011Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_table.cpp:423: TCreateTable Propose, path: /Root/Followers, opId: 281474976710658:0, at schemeshard: 72057594046644480 2025-06-25T14:50:51.207133Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_table.cpp:430: TCreateTable Propose, path: /Root/Followers, opId: 281474976710658:0, schema: Name: "Followers" Columns { Name: "Key" Type: "Uint64" NotNull: false } Columns { Name: "Value" Type: "String" NotNull: false } KeyColumnNames: "Key" PartitionConfig { ColumnFamilies { Id: 0 StorageConfig { SysLog { PreferredPoolKind: "test" } Log { PreferredPoolKind: "test" } Data { PreferredPoolKind: "test" } } } FollowerGroups { FollowerCount: 3 RequireAllDataCenters: false } } Temporary: false, at schemeshard: 72057594046644480 2025-06-25T14:50:51.207648Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:347: AttachChild: child attached as only one child to the parent, parent id: [OwnerId: 72057594046644480, LocalPathId: 1], parent name: Root, child name: Followers, child id: [OwnerId: 72057594046644480, LocalPathId: 2], at schemeshard: 72057594046644480 2025-06-25T14:50:51.207696Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 0 2025-06-25T14:50:51.207712Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 281474976710658:0 type: TxCreateTable target path: [OwnerId: 72057594046644480, LocalPathId: 2] source path: 2025-06-25T14:50:51.207746Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason new shard created for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 1 2025-06-25T14:50:51.207829Z node 1 :SYSTEM_VIEWS TRACE: partition_stats.cpp:83: TEvSysView::TEvSetPartitioning: domainKey [OwnerId: 72057594046644480, LocalPathId: 1] pathId [OwnerId: 72057594046644480, LocalPathId: 2] path /Root/Followers ShardIndices size 1 2025-06-25T14:50:51.208342Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason new path created for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 2 2025-06-25T14:50:51.208357Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 281474976710658:0 1 -> 2 2025-06-25T14:50:51.209069Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_create_table.cpp:741: TCreateTable Propose creating new table opId# 281474976710658:0 path# /Root/Followers pathId# [OwnerId: 72057594046644480, LocalPathId: 2] schemeshard# 72057594046644480 tx# WorkingDir: "/Root" OperationType: ESchemeOpCreateTable CreateTable { Name: "Followers" Columns { Name: "Key" Type: "Uint64" NotNull: false } Columns { Name: "Value" Type: "String" NotNull: false } KeyColumnNames: "Key" PartitionConfig { ColumnFamilies { Id: 0 StorageConfig { SysLog { PreferredPoolKind: "test" } Log { PreferredPoolKind: "test" } Data { PreferredPoolKind: "test" } } } FollowerGroups { FollowerCount: 3 RequireAllDataCenters: false } } Temporary: false } FailOnExist: false 2025-06-25T14:50:51.209188Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 281474976710658:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-25T14:50:51.209218Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:51.209308Z node 1 :FLAT_TX_SCHEM ... 7:2561] Leader: 1 Dead: 0 Generation: 2 VersionInfo: } 2025-06-25T14:51:06.314628Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3275: StateWorkAsFollower, processing event TEvTabletPipe::TEvClientConnected 2025-06-25T14:51:06.314710Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269553162, Sender [1:7519899546128733658:2301], Recipient [1:7519899528948864052:2146]: NKikimrTxDataShard.TEvPeriodicTableStats DatashardId: 72075186224037888 TableLocalId: 2 Generation: 1 Round: 0 TableStats { ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 1 RangeReadRows: 2 LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { } ShardState: 3 NodeId: 1 StartTime: 1750863051298 TableOwnerId: 72057594046644480 FollowerId: 3 2025-06-25T14:51:06.314721Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4992: StateWork, processing event TEvDataShard::TEvPeriodicTableStats 2025-06-25T14:51:06.314750Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037888 followerId 3 pathId [OwnerId: 72057594046644480, LocalPathId: 2] state 'Readonly' dataSize 0 rowCount 0 cpuUsage 0 2025-06-25T14:51:06.314792Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:570: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037888 followerId 3 pathId [OwnerId: 72057594046644480, LocalPathId: 2] raw table stats: ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 1 RangeReadRows: 2 LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 2025-06-25T14:51:06.314814Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:610: Will delay TTxStoreTableStats on# 0.099996s, queue# 1 2025-06-25T14:51:06.317838Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3269: StateWorkAsFollower, received event# 2146435079, Sender [0:0:0], Recipient [1:7519899546128733659:2302]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvPeriodicWakeup 2025-06-25T14:51:06.317868Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3285: StateWorkAsFollower, processing event TEvPrivate::TEvPeriodicWakeup 2025-06-25T14:51:06.414395Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [1:7519899528948864052:2146]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-06-25T14:51:06.414428Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5131: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-06-25T14:51:06.414446Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046644480, queue size# 1 2025-06-25T14:51:06.414495Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:601: Will execute TTxStoreStats, queue# 1 2025-06-25T14:51:06.414508Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:610: Will delay TTxStoreTableStats on# 0.000303s, queue# 1 2025-06-25T14:51:06.414553Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 2 shard idx 72057594046644480:1 data size 0 row count 0 2025-06-25T14:51:06.414599Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037888 maps to shardIdx: 72057594046644480:1 followerId=3, pathId: [OwnerId: 72057594046644480, LocalPathId: 2], pathId map=Followers, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-25T14:51:06.414608Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037888, followerId 3 2025-06-25T14:51:06.414668Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:51:06.414900Z node 1 :SYSTEM_VIEWS TRACE: partition_stats.cpp:152: TEvSysView::TEvSendPartitionStats: domainKey [OwnerId: 72057594046644480, LocalPathId: 1] pathId [OwnerId: 72057594046644480, LocalPathId: 2] shardIdx 72057594046644480 1 followerId 3 stats DataSize: 0 RowCount: 0 IndexSize: 0 CPUCores: 0 TabletId: 72075186224037888 NodeId: 1 StartTime: 1750863051298 AccessTime: 0 UpdateTime: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 1 RangeReadRows: 2 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 ByKeyFilterSize: 0 FollowerId: 3 LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 2025-06-25T14:51:06.415358Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [1:7519899528948864052:2146]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-06-25T14:51:06.415374Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5131: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-06-25T14:51:06.415385Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046644480, queue size# 0 2025-06-25T14:51:06.869100Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:7519899528948864052:2146]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:51:06.869147Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:51:06.869199Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [1:7519899528948864052:2146], Recipient [1:7519899528948864052:2146]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:51:06.869215Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:51:07.682792Z node 1 :SYSTEM_VIEWS INFO: sysview_service.cpp:886: Navigate by database succeeded: service id# [1:7519899528948863748:2060], database# /Root, no sysview processor 2025-06-25T14:51:07.869589Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:7519899528948864052:2146]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:51:07.869627Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:51:07.869667Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [1:7519899528948864052:2146], Recipient [1:7519899528948864052:2146]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:51:07.869682Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime ... SELECT from partition_stats for /Root/Followers , attempt 1 2025-06-25T14:51:08.869967Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:7519899528948864052:2146]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:51:08.870010Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:51:08.870051Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [1:7519899528948864052:2146], Recipient [1:7519899528948864052:2146]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:51:08.870069Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:51:09.107627Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:47: Scan started, actor: [1:7519899623438145427:2435], owner: [1:7519899623438145424:2433], scan id: 0, sys view info: Type: EPartitionStats SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-25T14:51:09.116029Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:323: Scan prepared, actor: [1:7519899623438145427:2435], schemeshard id: 72057594046644480, hive id: 72057594037968897, database: /Root, database owner: root@builtin, domain key: [OwnerId: 72057594046644480, LocalPathId: 1], database node count: 1 2025-06-25T14:51:09.116301Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 274595843, Sender [1:7519899623438145427:2435], Recipient [1:7519899528948864052:2146]: NKikimrSysView.TEvGetPartitionStats DomainKeyOwnerId: 72057594046644480 DomainKeyPathId: 1 From { } FromInclusive: true To { } ToInclusive: false IncludePathColumn: true 2025-06-25T14:51:09.116395Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5020: StateWork, processing event NSysView::TEvSysView::TEvGetPartitionStats 2025-06-25T14:51:09.116558Z node 1 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [1:7519899623438145427:2435], row count: 2, finished: 1 2025-06-25T14:51:09.116644Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:122: Scan finished, actor: [1:7519899623438145427:2435], owner: [1:7519899623438145424:2433], scan id: 0, sys view info: Type: EPartitionStats SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-25T14:51:09.118806Z node 1 :SYSTEM_VIEWS TRACE: sysview_service.cpp:900: Collect query stats: service id# [1:7519899528948863748:2060], database# /Root, query hash# 3266603936201095014, cpu time# 252315 SELECT * FROM `/Root/.sys/partition_stats` WHERE FollowerId != 0 AND (RowReads != 0 OR RangeReadRows != 0) AND Path = '/Root/Followers' ... SELECT from partition_stats, attempt 0 2025-06-25T14:51:09.459751Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:47: Scan started, actor: [1:7519899623438145449:2445], owner: [1:7519899623438145445:2443], scan id: 0, sys view info: Type: EPartitionStats SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-25T14:51:09.461375Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:323: Scan prepared, actor: [1:7519899623438145449:2445], schemeshard id: 72057594046644480, hive id: 72057594037968897, database: /Root, database owner: root@builtin, domain key: [OwnerId: 72057594046644480, LocalPathId: 1], database node count: 1 2025-06-25T14:51:09.461605Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 274595843, Sender [1:7519899623438145449:2445], Recipient [1:7519899528948864052:2146]: NKikimrSysView.TEvGetPartitionStats DomainKeyOwnerId: 72057594046644480 DomainKeyPathId: 1 From { } FromInclusive: true To { } ToInclusive: false IncludePathColumn: true 2025-06-25T14:51:09.461627Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5020: StateWork, processing event NSysView::TEvSysView::TEvGetPartitionStats 2025-06-25T14:51:09.461760Z node 1 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:67: Sending scan batch, actor: [1:7519899623438145449:2445], row count: 2, finished: 1 2025-06-25T14:51:09.461830Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:122: Scan finished, actor: [1:7519899623438145449:2445], owner: [1:7519899623438145445:2443], scan id: 0, sys view info: Type: EPartitionStats SourceObject { OwnerId: 72057594046644480 LocalId: 1 } 2025-06-25T14:51:09.464936Z node 1 :SYSTEM_VIEWS TRACE: sysview_service.cpp:900: Collect query stats: service id# [1:7519899528948863748:2060], database# /Root, query hash# 14960494650040056739, cpu time# 324038 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_write/unittest >> DataShardWrite::WriteCommitVersion [GOOD] Test command err: 2025-06-25T14:50:34.970526Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:50:34.970684Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:50:34.970737Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0010e1/r3tmp/tmpgzylIp/pdisk_1.dat 2025-06-25T14:50:35.532679Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T14:50:35.536121Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:50:35.585304Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:50:35.595719Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750863031225476 != 1750863031225480 2025-06-25T14:50:35.649067Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:50:35.649242Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:50:35.660858Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:50:35.777235Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:35.852230Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvBoot 2025-06-25T14:50:35.857665Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvRestored 2025-06-25T14:50:35.858178Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:627:2531] 2025-06-25T14:50:35.858459Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T14:50:35.913894Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-25T14:50:35.914658Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T14:50:35.914787Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T14:50:35.916518Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-25T14:50:35.916608Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-25T14:50:35.916665Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-25T14:50:35.917079Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T14:50:35.917228Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T14:50:35.917335Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:643:2531] in generation 1 2025-06-25T14:50:35.929104Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T14:50:35.973453Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-25T14:50:35.973675Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T14:50:35.973822Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:645:2541] 2025-06-25T14:50:35.973859Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T14:50:35.973907Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-25T14:50:35.973951Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:50:35.974175Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:627:2531], Recipient [1:627:2531]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T14:50:35.974231Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T14:50:35.974644Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-25T14:50:35.974745Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-25T14:50:35.974832Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:50:35.974885Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:50:35.974949Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-25T14:50:35.974984Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-25T14:50:35.975019Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-25T14:50:35.975060Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-25T14:50:35.975106Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:50:35.975544Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:634:2535], Recipient [1:627:2531]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:50:35.975587Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T14:50:35.975626Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:623:2528], serverId# [1:634:2535], sessionId# [0:0:0] 2025-06-25T14:50:35.975710Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:373:2367], Recipient [1:634:2535] 2025-06-25T14:50:35.975743Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-25T14:50:35.975873Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T14:50:35.976223Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-25T14:50:35.976476Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-25T14:50:35.976566Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-25T14:50:35.976682Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-25T14:50:35.976746Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-06-25T14:50:35.976782Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-06-25T14:50:35.976815Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-25T14:50:35.977137Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-06-25T14:50:35.977171Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit StoreSchemeTx 2025-06-25T14:50:35.977226Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit FinishPropose 2025-06-25T14:50:35.977267Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-25T14:50:35.977316Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayComplete 2025-06-25T14:50:35.977344Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit FinishPropose 2025-06-25T14:50:35.977376Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit WaitForPlan 2025-06-25T14:50:35.977415Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit WaitForPlan 2025-06-25T14:50:35.977450Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:281474976715657] at 72075186224037888 is not ready to execute on unit WaitForPlan 2025-06-25T14:50:35.978947Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269746185, Sender [1:646:2542], Recipient [1:627:2531]: NKikimr::TEvTxProxySchemeCache::TEvWatchNotifyUpdated 2025-06-25T14:50:35.979026Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T14:50:35.992952Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-25T14:50:35.993032Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-25T14:50:35.993065Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-25T14:50:35.993127Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715657 at tablet 72075186224037888 send to client, exec latency: 0 ms, propose late ... 3:281474976715666] at 72075186224037890 on unit BuildAndWaitDependencies 2025-06-25T14:51:09.195392Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:455: Operation [1543:281474976715666] is the new logically complete end at 72075186224037890 2025-06-25T14:51:09.195415Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:461: Operation [1543:281474976715666] is the new logically incomplete end at 72075186224037890 2025-06-25T14:51:09.195441Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [1543:281474976715666] at 72075186224037890 2025-06-25T14:51:09.195469Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1543:281474976715666] at 72075186224037890 is Executed 2025-06-25T14:51:09.195509Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1543:281474976715666] at 72075186224037890 executing on unit BuildAndWaitDependencies 2025-06-25T14:51:09.195536Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1543:281474976715666] at 72075186224037890 to execution unit ExecuteWrite 2025-06-25T14:51:09.195558Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1543:281474976715666] at 72075186224037890 on unit ExecuteWrite 2025-06-25T14:51:09.195585Z node 7 :TX_DATASHARD DEBUG: execute_write_unit.cpp:251: Executing write operation for [1543:281474976715666] at 72075186224037890 2025-06-25T14:51:09.195625Z node 7 :TX_DATASHARD TRACE: datashard_kqp.cpp:694: Send commit decision from 72075186224037890 to 72075186224037889 2025-06-25T14:51:09.195655Z node 7 :TX_DATASHARD TRACE: datashard_kqp.cpp:725: Will wait for volatile decision from 72075186224037889 to 72075186224037890 2025-06-25T14:51:09.195747Z node 7 :TX_DATASHARD DEBUG: execute_write_unit.cpp:416: Executed write operation for [1543:281474976715666] at 72075186224037890, row count=1 2025-06-25T14:51:09.195853Z node 7 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:180: Deleted RS at 72075186224037890 source 72075186224037890 dest 72075186224037888 consumer 72075186224037888 seqno 1 txId 281474976715660 2025-06-25T14:51:09.195891Z node 7 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:180: Deleted RS at 72075186224037890 source 72075186224037890 dest 72075186224037889 consumer 72075186224037889 seqno 2 txId 281474976715660 2025-06-25T14:51:09.195953Z node 7 :TX_DATASHARD TRACE: execute_write_unit.cpp:47: add locks to result: 0 2025-06-25T14:51:09.196002Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1543:281474976715666] at 72075186224037890 is DelayCompleteNoMoreRestarts 2025-06-25T14:51:09.196027Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1543:281474976715666] at 72075186224037890 executing on unit ExecuteWrite 2025-06-25T14:51:09.196050Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1543:281474976715666] at 72075186224037890 to execution unit CompleteWrite 2025-06-25T14:51:09.196075Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1543:281474976715666] at 72075186224037890 on unit CompleteWrite 2025-06-25T14:51:09.196167Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1543:281474976715666] at 72075186224037890 is DelayComplete 2025-06-25T14:51:09.196193Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1543:281474976715666] at 72075186224037890 executing on unit CompleteWrite 2025-06-25T14:51:09.196217Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1543:281474976715666] at 72075186224037890 to execution unit CompletedOperations 2025-06-25T14:51:09.196242Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1543:281474976715666] at 72075186224037890 on unit CompletedOperations 2025-06-25T14:51:09.196285Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1543:281474976715666] at 72075186224037890 is Executed 2025-06-25T14:51:09.196322Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1543:281474976715666] at 72075186224037890 executing on unit CompletedOperations 2025-06-25T14:51:09.196350Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [1543:281474976715666] at 72075186224037890 has finished 2025-06-25T14:51:09.196377Z node 7 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037890 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:51:09.196402Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 72075186224037890 2025-06-25T14:51:09.196427Z node 7 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037890 has no attached operations 2025-06-25T14:51:09.196448Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 72075186224037890 2025-06-25T14:51:09.197076Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287425, Sender [7:666:2551], Recipient [7:670:2553]: {TEvReadSet step# 1543 txid# 281474976715666 TabletSource# 72075186224037889 TabletDest# 72075186224037890 SetTabletProducer# 72075186224037889 ReadSet.Size()# 0 Seqno# 0 Flags# 7} 2025-06-25T14:51:09.197125Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3151: StateWork, processing event TEvTxProcessing::TEvReadSet 2025-06-25T14:51:09.197176Z node 7 :TX_DATASHARD DEBUG: datashard.cpp:3359: Receive RS at 72075186224037890 source 72075186224037889 dest 72075186224037890 producer 72075186224037889 txId 281474976715666 2025-06-25T14:51:09.197260Z node 7 :TX_DATASHARD DEBUG: datashard__readset.cpp:15: TTxReadSet::Execute at 72075186224037890 got read set: {TEvReadSet step# 1543 txid# 281474976715666 TabletSource# 72075186224037889 TabletDest# 72075186224037890 SetTabletProducer# 72075186224037889 ReadSet.Size()# 0 Seqno# 0 Flags# 7} 2025-06-25T14:51:09.197490Z node 7 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037889 step# 1543} 2025-06-25T14:51:09.197654Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287425, Sender [7:670:2553], Recipient [7:666:2551]: {TEvReadSet step# 1543 txid# 281474976715666 TabletSource# 72075186224037890 TabletDest# 72075186224037889 SetTabletProducer# 72075186224037890 ReadSet.Size()# 0 Seqno# 0 Flags# 7} 2025-06-25T14:51:09.197703Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3151: StateWork, processing event TEvTxProcessing::TEvReadSet 2025-06-25T14:51:09.197735Z node 7 :TX_DATASHARD DEBUG: datashard.cpp:3359: Receive RS at 72075186224037889 source 72075186224037890 dest 72075186224037889 producer 72075186224037890 txId 281474976715666 2025-06-25T14:51:09.197781Z node 7 :TX_DATASHARD DEBUG: datashard__readset.cpp:15: TTxReadSet::Execute at 72075186224037889 got read set: {TEvReadSet step# 1543 txid# 281474976715666 TabletSource# 72075186224037890 TabletDest# 72075186224037889 SetTabletProducer# 72075186224037890 ReadSet.Size()# 0 Seqno# 0 Flags# 7} 2025-06-25T14:51:09.198063Z node 7 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037890 step# 1543} 2025-06-25T14:51:09.198457Z node 7 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-06-25T14:51:09.198511Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1543:281474976715666] at 72075186224037889 on unit ExecuteWrite 2025-06-25T14:51:09.198564Z node 7 :TX_DATASHARD DEBUG: datashard.cpp:3990: Send RS 3 at 72075186224037889 from 72075186224037889 to 72075186224037890 txId 281474976715666 2025-06-25T14:51:09.198620Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1543:281474976715666] at 72075186224037889 on unit CompleteWrite 2025-06-25T14:51:09.198676Z node 7 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-25T14:51:09.198778Z node 7 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 72075186224037889 2025-06-25T14:51:09.198918Z node 7 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037890 2025-06-25T14:51:09.198953Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1543:281474976715666] at 72075186224037890 on unit ExecuteWrite 2025-06-25T14:51:09.198982Z node 7 :TX_DATASHARD DEBUG: datashard.cpp:3990: Send RS 3 at 72075186224037890 from 72075186224037890 to 72075186224037889 txId 281474976715666 2025-06-25T14:51:09.199025Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1543:281474976715666] at 72075186224037890 on unit CompleteWrite 2025-06-25T14:51:09.199055Z node 7 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037890 2025-06-25T14:51:09.199099Z node 7 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 72075186224037890 2025-06-25T14:51:09.199207Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287425, Sender [7:666:2551], Recipient [7:670:2553]: {TEvReadSet step# 1543 txid# 281474976715666 TabletSource# 72075186224037889 TabletDest# 72075186224037890 SetTabletProducer# 72075186224037889 ReadSet.Size()# 2 Seqno# 3 Flags# 0} 2025-06-25T14:51:09.199234Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3151: StateWork, processing event TEvTxProcessing::TEvReadSet 2025-06-25T14:51:09.199260Z node 7 :TX_DATASHARD DEBUG: datashard.cpp:3359: Receive RS at 72075186224037890 source 72075186224037889 dest 72075186224037890 producer 72075186224037889 txId 281474976715666 2025-06-25T14:51:09.199320Z node 7 :TX_DATASHARD DEBUG: datashard__readset.cpp:15: TTxReadSet::Execute at 72075186224037890 got read set: {TEvReadSet step# 1543 txid# 281474976715666 TabletSource# 72075186224037889 TabletDest# 72075186224037890 SetTabletProducer# 72075186224037889 ReadSet.Size()# 2 Seqno# 3 Flags# 0} 2025-06-25T14:51:09.199446Z node 7 :TX_DATASHARD DEBUG: datashard.cpp:755: Complete volatile write [1543 : 281474976715666] from 72075186224037890 at tablet 72075186224037890 send result to client [7:1031:2791] 2025-06-25T14:51:09.199843Z node 7 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037890 2025-06-25T14:51:09.200193Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287425, Sender [7:670:2553], Recipient [7:666:2551]: {TEvReadSet step# 1543 txid# 281474976715666 TabletSource# 72075186224037890 TabletDest# 72075186224037889 SetTabletProducer# 72075186224037890 ReadSet.Size()# 2 Seqno# 3 Flags# 0} 2025-06-25T14:51:09.200222Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3151: StateWork, processing event TEvTxProcessing::TEvReadSet 2025-06-25T14:51:09.200267Z node 7 :TX_DATASHARD DEBUG: datashard.cpp:3359: Receive RS at 72075186224037889 source 72075186224037890 dest 72075186224037889 producer 72075186224037890 txId 281474976715666 2025-06-25T14:51:09.200327Z node 7 :TX_DATASHARD DEBUG: datashard__readset.cpp:15: TTxReadSet::Execute at 72075186224037889 got read set: {TEvReadSet step# 1543 txid# 281474976715666 TabletSource# 72075186224037890 TabletDest# 72075186224037889 SetTabletProducer# 72075186224037890 ReadSet.Size()# 2 Seqno# 3 Flags# 0} 2025-06-25T14:51:09.200404Z node 7 :TX_DATASHARD DEBUG: datashard.cpp:755: Complete volatile write [1543 : 281474976715666] from 72075186224037889 at tablet 72075186224037889 send result to client [7:1031:2791] 2025-06-25T14:51:09.200878Z node 7 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-25T14:51:09.201924Z node 7 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 72075186224037890 |86.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/result_formatter/ut/unittest >> ResultFormatter::EmptyTuple [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTests::AlterTableShouldSuccessOnSimultaneousAddColumnAndEnableTTL [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:51:09.897788Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:51:09.897878Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:09.897916Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:51:09.897951Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:51:09.897993Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:51:09.898024Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:51:09.898074Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:09.898159Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:51:09.898901Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:51:09.899206Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:51:09.965649Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:51:09.965721Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:51:09.979302Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:51:09.979671Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:51:09.979863Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:51:09.985071Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:51:09.985361Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:51:09.986014Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:09.986305Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:51:09.989363Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:09.989539Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:51:09.990747Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:51:09.990806Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:09.990946Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:51:09.990994Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:51:09.991051Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:51:09.991139Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:51:09.997207Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:51:10.116825Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:51:10.117074Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:10.117278Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:51:10.117323Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:51:10.117551Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:51:10.117641Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:51:10.120642Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:10.120817Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:51:10.120974Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:10.121056Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:51:10.121102Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:51:10.121130Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:51:10.122436Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:10.122484Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:51:10.122513Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:51:10.123593Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:10.123630Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:10.123673Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:10.123710Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:51:10.126420Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:51:10.127565Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:51:10.127673Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:51:10.128447Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:10.128541Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:51:10.128584Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:10.128780Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:51:10.128817Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:10.128955Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:51:10.129007Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:51:10.130586Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:51:10.130645Z node 1 :FLAT_TX_SCHEMESHARD ... schemeshard: 72057594046678944, txId: 102, path id: 2 2025-06-25T14:51:10.423067Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-25T14:51:10.423116Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1086: NTableState::TProposedWaitParts operationId# 102:0 ProgressState at tablet: 72057594046678944 2025-06-25T14:51:10.424637Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 4 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T14:51:10.424747Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 4 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T14:51:10.424785Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 102 2025-06-25T14:51:10.424839Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 4 2025-06-25T14:51:10.424876Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-25T14:51:10.424957Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 102, ready parts: 0/1, is published: true FAKE_COORDINATOR: Erasing txId 102 2025-06-25T14:51:10.425997Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6369: Handle TEvProposeTransactionResult, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 102 Step: 5000003 OrderId: 102 ExecLatency: 0 ProposeLatency: 2 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 1074 } } CommitVersion { Step: 5000003 TxId: 102 } 2025-06-25T14:51:10.426031Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1791: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409546, partId: 0 2025-06-25T14:51:10.426142Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:632: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 102 Step: 5000003 OrderId: 102 ExecLatency: 0 ProposeLatency: 2 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 1074 } } CommitVersion { Step: 5000003 TxId: 102 } 2025-06-25T14:51:10.426230Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_part.cpp:109: HandleReply TEvDataShard::TEvProposeTransactionResult Ignore message: tablet# 72057594046678944, ev# TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 102 Step: 5000003 OrderId: 102 ExecLatency: 0 ProposeLatency: 2 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 1074 } } CommitVersion { Step: 5000003 TxId: 102 } 2025-06-25T14:51:10.427012Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5596: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 313 RawX2: 4294969594 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 2025-06-25T14:51:10.427050Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1791: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409546, partId: 0 2025-06-25T14:51:10.427148Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:632: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: Source { RawX1: 313 RawX2: 4294969594 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 2025-06-25T14:51:10.427197Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1047: NTableState::TProposedWaitParts operationId# 102:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 2025-06-25T14:51:10.427298Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1051: NTableState::TProposedWaitParts operationId# 102:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 313 RawX2: 4294969594 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 2025-06-25T14:51:10.427399Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:670: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 102:0, shardIdx: 72057594046678944:1, shard: 72075186233409546, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:10.427445Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:707: all shard schema changes has been received, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-25T14:51:10.427487Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:719: send schema changes ack message, operation: 102:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-06-25T14:51:10.427527Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 102:0 129 -> 240 2025-06-25T14:51:10.429546Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-25T14:51:10.430124Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-25T14:51:10.430491Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-25T14:51:10.430727Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-25T14:51:10.430765Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 102:0 ProgressState 2025-06-25T14:51:10.430858Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:0 progress is 1/1 2025-06-25T14:51:10.430886Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-25T14:51:10.430923Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:0 progress is 1/1 2025-06-25T14:51:10.430953Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-25T14:51:10.430987Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: true 2025-06-25T14:51:10.431054Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1656: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:340:2317] message: TxId: 102 2025-06-25T14:51:10.431148Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-25T14:51:10.431184Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 102:0 2025-06-25T14:51:10.431213Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 102:0 2025-06-25T14:51:10.431345Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-25T14:51:10.432887Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-25T14:51:10.432934Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:398:2368] TestWaitNotification: OK eventTxId 102 2025-06-25T14:51:10.433497Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/TTLEnabledTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:51:10.433768Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/TTLEnabledTable" took 242us result status StatusSuccess 2025-06-25T14:51:10.434335Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/TTLEnabledTable" PathDescription { Self { Name: "TTLEnabledTable" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "TTLEnabledTable" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "modified_at" Type: "Timestamp" TypeId: 50 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 2 TTLSettings { Enabled { ColumnName: "modified_at" ExpireAfterSeconds: 3600 Tiers { ApplyAfterSeconds: 3600 Delete { } } } } IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 1 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTests::BackupCopyHasNoTtlSettings [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:51:09.905856Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:51:09.905915Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:09.905946Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:51:09.905974Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:51:09.906003Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:51:09.906025Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:51:09.906057Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:09.906109Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:51:09.906732Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:51:09.906967Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:51:09.967276Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:51:09.967323Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:51:09.985425Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:51:09.985821Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:51:09.985988Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:51:09.991434Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:51:09.991732Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:51:09.992440Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:09.992729Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:51:09.995914Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:09.996088Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:51:09.997323Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:51:09.997386Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:09.997542Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:51:09.997595Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:51:09.997638Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:51:09.997722Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:51:10.003866Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:51:10.117720Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:51:10.117893Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:10.118036Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:51:10.118075Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:51:10.118225Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:51:10.118289Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:51:10.121322Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:10.121494Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:51:10.121661Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:10.121711Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:51:10.121807Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:51:10.121845Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:51:10.124906Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:10.125006Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:51:10.125054Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:51:10.126532Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:10.126579Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:10.126644Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:10.126692Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:51:10.129862Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:51:10.131901Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:51:10.132094Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:51:10.133038Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:10.133170Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:51:10.133228Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:10.133509Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:51:10.133570Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:10.133722Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:51:10.133790Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:51:10.141075Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:51:10.141119Z node 1 :FLAT_TX_SCHEMESHARD ... 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5596: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 415 RawX2: 4294969678 } Origin: 72075186233409547 State: 2 TxId: 102 Step: 0 Generation: 2 2025-06-25T14:51:10.485186Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1791: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409547, partId: 0 2025-06-25T14:51:10.485312Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:632: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: Source { RawX1: 415 RawX2: 4294969678 } Origin: 72075186233409547 State: 2 TxId: 102 Step: 0 Generation: 2 2025-06-25T14:51:10.485378Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1047: NTableState::TProposedWaitParts operationId# 102:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 2025-06-25T14:51:10.485486Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1051: NTableState::TProposedWaitParts operationId# 102:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 415 RawX2: 4294969678 } Origin: 72075186233409547 State: 2 TxId: 102 Step: 0 Generation: 2 2025-06-25T14:51:10.485553Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:670: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 102:0, shardIdx: 72057594046678944:2, shard: 72075186233409547, left await: 1, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:10.485600Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1056: NTableState::TProposedWaitParts operationId# 102:0 HandleReply TEvDataShard::TEvSchemaChanged CollectSchemaChanged: false 2025-06-25T14:51:10.488331Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-25T14:51:10.488790Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-25T14:51:10.500771Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5596: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 313 RawX2: 4294969594 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 2025-06-25T14:51:10.500830Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1791: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409546, partId: 0 2025-06-25T14:51:10.500940Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:632: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: Source { RawX1: 313 RawX2: 4294969594 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 2025-06-25T14:51:10.500990Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1047: NTableState::TProposedWaitParts operationId# 102:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 2025-06-25T14:51:10.501084Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1051: NTableState::TProposedWaitParts operationId# 102:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 313 RawX2: 4294969594 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 2025-06-25T14:51:10.501146Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:670: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 102:0, shardIdx: 72057594046678944:1, shard: 72075186233409546, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:10.501179Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:707: all shard schema changes has been received, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-25T14:51:10.501215Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:719: send schema changes ack message, operation: 102:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-06-25T14:51:10.501253Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:719: send schema changes ack message, operation: 102:0, datashard: 72075186233409547, at schemeshard: 72057594046678944 2025-06-25T14:51:10.501285Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 102:0 129 -> 240 2025-06-25T14:51:10.503594Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-25T14:51:10.504057Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-25T14:51:10.504119Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_states.h:93: TCopyTable::TWaitCopyTableBarrier operationId: 102:0ProgressState, operation type TxCopyTable 2025-06-25T14:51:10.504185Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:1061: Set barrier, OperationId: 102:0, name: CopyTableBarrier, done: 0, blocked: 1, parts count: 1 2025-06-25T14:51:10.504244Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:1105: All parts have reached barrier, tx: 102, done: 0, blocked: 1 2025-06-25T14:51:10.504333Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_states.h:76: TCopyTable::TWaitCopyTableBarrier operationId: 102:0 HandleReply TEvPrivate::TEvCompleteBarrier, msg: NKikimr::NSchemeShard::TEvPrivate::TEvCompleteBarrier { TxId: 102 Name: CopyTableBarrier }, at tablet# 72057594046678944 2025-06-25T14:51:10.504368Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 102:0 240 -> 240 2025-06-25T14:51:10.506172Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-25T14:51:10.506221Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 102:0 ProgressState 2025-06-25T14:51:10.506322Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:0 progress is 1/1 2025-06-25T14:51:10.506354Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-25T14:51:10.506387Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:0 progress is 1/1 2025-06-25T14:51:10.506422Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-25T14:51:10.506468Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: true 2025-06-25T14:51:10.506568Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1656: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:340:2317] message: TxId: 102 2025-06-25T14:51:10.506619Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-25T14:51:10.506668Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 102:0 2025-06-25T14:51:10.506696Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 102:0 2025-06-25T14:51:10.506834Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-25T14:51:10.506865Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate source path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-25T14:51:10.508532Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-25T14:51:10.508595Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:441:2400] TestWaitNotification: OK eventTxId 102 2025-06-25T14:51:10.509144Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/TTLEnabledTableCopy" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:51:10.509382Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/TTLEnabledTableCopy" took 275us result status StatusSuccess 2025-06-25T14:51:10.509846Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/TTLEnabledTableCopy" PathDescription { Self { Name: "TTLEnabledTableCopy" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 102 CreateStep: 5000003 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "TTLEnabledTableCopy" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "ts" Type: "Timestamp" TypeId: 50 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 1 IsBackup: true IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TSchemeShardTTLTests::BuildAsyncIndexShouldSucceed [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_write/unittest >> DataShardWrite::UpsertLostPrepareArbiterRestart [GOOD] Test command err: 2025-06-25T14:50:34.837659Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:50:34.837808Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:50:34.837870Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001109/r3tmp/tmpqk3dGw/pdisk_1.dat 2025-06-25T14:50:35.497558Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T14:50:35.510415Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:50:35.573463Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:50:35.584466Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750863031227503 != 1750863031227507 2025-06-25T14:50:35.645104Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:50:35.645262Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:50:35.658544Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:50:35.772800Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:35.849683Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvBoot 2025-06-25T14:50:35.850844Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvRestored 2025-06-25T14:50:35.851270Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:627:2531] 2025-06-25T14:50:35.851629Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T14:50:35.935595Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-25T14:50:35.936439Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T14:50:35.936583Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T14:50:35.938322Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-25T14:50:35.938405Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-25T14:50:35.938461Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-25T14:50:35.938844Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T14:50:35.938989Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T14:50:35.939076Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:643:2531] in generation 1 2025-06-25T14:50:35.952582Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T14:50:36.002039Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-25T14:50:36.002242Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T14:50:36.002361Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:645:2541] 2025-06-25T14:50:36.002397Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T14:50:36.002442Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-25T14:50:36.002482Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:50:36.002730Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:627:2531], Recipient [1:627:2531]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T14:50:36.002811Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T14:50:36.003167Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-25T14:50:36.003263Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-25T14:50:36.003329Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:50:36.003373Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:50:36.003431Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-25T14:50:36.003464Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-25T14:50:36.003496Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-25T14:50:36.003546Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-25T14:50:36.003594Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:50:36.003981Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:634:2535], Recipient [1:627:2531]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:50:36.004021Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T14:50:36.004077Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:623:2528], serverId# [1:634:2535], sessionId# [0:0:0] 2025-06-25T14:50:36.004170Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:373:2367], Recipient [1:634:2535] 2025-06-25T14:50:36.004205Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-25T14:50:36.004323Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T14:50:36.004529Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-25T14:50:36.004589Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-25T14:50:36.004668Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-25T14:50:36.004744Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-25T14:50:36.004794Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-06-25T14:50:36.004843Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-06-25T14:50:36.004881Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-25T14:50:36.005174Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-06-25T14:50:36.005210Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit StoreSchemeTx 2025-06-25T14:50:36.005250Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit FinishPropose 2025-06-25T14:50:36.005282Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-25T14:50:36.005323Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayComplete 2025-06-25T14:50:36.005351Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit FinishPropose 2025-06-25T14:50:36.005382Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit WaitForPlan 2025-06-25T14:50:36.005409Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit WaitForPlan 2025-06-25T14:50:36.005433Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:281474976715657] at 72075186224037888 is not ready to execute on unit WaitForPlan 2025-06-25T14:50:36.006699Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269746185, Sender [1:646:2542], Recipient [1:627:2531]: NKikimr::TEvTxProxySchemeCache::TEvWatchNotifyUpdated 2025-06-25T14:50:36.006749Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T14:50:36.017298Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-25T14:50:36.017370Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-25T14:50:36.017403Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-25T14:50:36.017553Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715657 at tablet 72075186224037888 send to client, exec latency: 0 ms, propose late ... : NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:51:10.005568Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T14:51:10.005602Z node 7 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037890, clientId# [7:924:2741], serverId# [7:925:2742], sessionId# [0:0:0] 2025-06-25T14:51:10.005664Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553169, Sender [7:923:2740], Recipient [7:679:2558]: NKikimrTxDataShard.TEvGetInfoRequest 2025-06-25T14:51:10.006346Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [7:928:2745], Recipient [7:679:2558]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:51:10.006386Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T14:51:10.006419Z node 7 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037890, clientId# [7:927:2744], serverId# [7:928:2745], sessionId# [0:0:0] 2025-06-25T14:51:10.006560Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553215, Sender [7:926:2743], Recipient [7:679:2558]: NKikimrTxDataShard.TEvRead ReadId: 1002 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 ResultFormat: FORMAT_CELLVEC RangesSize: 1 2025-06-25T14:51:10.006656Z node 7 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2452: TTxReadViaPipeline execute: at tablet# 72075186224037890, FollowerId 0 2025-06-25T14:51:10.006688Z node 7 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 72075186224037890 CompleteEdge# v1001/1000001 IncompleteEdge# v{min} UnprotectedReadEdge# v{min} ImmediateWriteEdge# v{min} ImmediateWriteEdgeReplied# v{min} 2025-06-25T14:51:10.006715Z node 7 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2555: 72075186224037890 changed HEAD read to non-repeatable v4000/18446744073709551615 2025-06-25T14:51:10.006756Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:3] at 72075186224037890 on unit CheckRead 2025-06-25T14:51:10.006810Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:3] at 72075186224037890 is Executed 2025-06-25T14:51:10.006835Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:3] at 72075186224037890 executing on unit CheckRead 2025-06-25T14:51:10.006858Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:3] at 72075186224037890 to execution unit BuildAndWaitDependencies 2025-06-25T14:51:10.006882Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:3] at 72075186224037890 on unit BuildAndWaitDependencies 2025-06-25T14:51:10.006918Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:3] at 72075186224037890 2025-06-25T14:51:10.006947Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:3] at 72075186224037890 is Executed 2025-06-25T14:51:10.006970Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:3] at 72075186224037890 executing on unit BuildAndWaitDependencies 2025-06-25T14:51:10.006994Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:3] at 72075186224037890 to execution unit ExecuteRead 2025-06-25T14:51:10.007015Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:3] at 72075186224037890 on unit ExecuteRead 2025-06-25T14:51:10.007081Z node 7 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1578: 72075186224037890 Execute read# 1, request: { ReadId: 1002 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 ResultFormat: FORMAT_CELLVEC } 2025-06-25T14:51:10.007207Z node 7 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2163: 72075186224037890 Complete read# {[7:926:2743], 1002} after executionsCount# 1 2025-06-25T14:51:10.007246Z node 7 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2137: 72075186224037890 read iterator# {[7:926:2743], 1002} sends rowCount# 0, bytes# 0, quota rows left# 18446744073709551615, quota bytes left# 18446744073709551615, hasUnreadQueries# 0, total queries# 1, firstUnprocessed# 0 2025-06-25T14:51:10.007295Z node 7 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2188: 72075186224037890 read iterator# {[7:926:2743], 1002} finished in read 2025-06-25T14:51:10.007335Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:3] at 72075186224037890 is Executed 2025-06-25T14:51:10.007379Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:3] at 72075186224037890 executing on unit ExecuteRead 2025-06-25T14:51:10.007407Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:3] at 72075186224037890 to execution unit CompletedOperations 2025-06-25T14:51:10.007432Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:3] at 72075186224037890 on unit CompletedOperations 2025-06-25T14:51:10.007468Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:3] at 72075186224037890 is Executed 2025-06-25T14:51:10.007492Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:3] at 72075186224037890 executing on unit CompletedOperations 2025-06-25T14:51:10.007516Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:3] at 72075186224037890 has finished 2025-06-25T14:51:10.007543Z node 7 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2687: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037890 2025-06-25T14:51:10.007609Z node 7 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2736: TTxReadViaPipeline(69) Complete: at tablet# 72075186224037890 2025-06-25T14:51:10.008153Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [7:931:2748], Recipient [7:676:2556]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:51:10.008187Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T14:51:10.008220Z node 7 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037891, clientId# [7:930:2747], serverId# [7:931:2748], sessionId# [0:0:0] 2025-06-25T14:51:10.008630Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553169, Sender [7:929:2746], Recipient [7:676:2556]: NKikimrTxDataShard.TEvGetInfoRequest 2025-06-25T14:51:10.009268Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [7:934:2751], Recipient [7:676:2556]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:51:10.009304Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T14:51:10.009337Z node 7 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037891, clientId# [7:933:2750], serverId# [7:934:2751], sessionId# [0:0:0] 2025-06-25T14:51:10.009475Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553215, Sender [7:932:2749], Recipient [7:676:2556]: NKikimrTxDataShard.TEvRead ReadId: 1003 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 ResultFormat: FORMAT_CELLVEC RangesSize: 1 2025-06-25T14:51:10.009580Z node 7 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2452: TTxReadViaPipeline execute: at tablet# 72075186224037891, FollowerId 0 2025-06-25T14:51:10.009619Z node 7 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 72075186224037891 CompleteEdge# v1000/281474976715657 IncompleteEdge# v{min} UnprotectedReadEdge# v{min} ImmediateWriteEdge# v{min} ImmediateWriteEdgeReplied# v{min} 2025-06-25T14:51:10.009647Z node 7 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2555: 72075186224037891 changed HEAD read to non-repeatable v4000/18446744073709551615 2025-06-25T14:51:10.009685Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:2] at 72075186224037891 on unit CheckRead 2025-06-25T14:51:10.009738Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 72075186224037891 is Executed 2025-06-25T14:51:10.009764Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:2] at 72075186224037891 executing on unit CheckRead 2025-06-25T14:51:10.009789Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:2] at 72075186224037891 to execution unit BuildAndWaitDependencies 2025-06-25T14:51:10.009814Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:2] at 72075186224037891 on unit BuildAndWaitDependencies 2025-06-25T14:51:10.009850Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:2] at 72075186224037891 2025-06-25T14:51:10.009877Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 72075186224037891 is Executed 2025-06-25T14:51:10.009901Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:2] at 72075186224037891 executing on unit BuildAndWaitDependencies 2025-06-25T14:51:10.009926Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:2] at 72075186224037891 to execution unit ExecuteRead 2025-06-25T14:51:10.009950Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:2] at 72075186224037891 on unit ExecuteRead 2025-06-25T14:51:10.010017Z node 7 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1578: 72075186224037891 Execute read# 1, request: { ReadId: 1003 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 ResultFormat: FORMAT_CELLVEC } 2025-06-25T14:51:10.010104Z node 7 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2163: 72075186224037891 Complete read# {[7:932:2749], 1003} after executionsCount# 1 2025-06-25T14:51:10.010143Z node 7 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2137: 72075186224037891 read iterator# {[7:932:2749], 1003} sends rowCount# 0, bytes# 0, quota rows left# 18446744073709551615, quota bytes left# 18446744073709551615, hasUnreadQueries# 0, total queries# 1, firstUnprocessed# 0 2025-06-25T14:51:10.010192Z node 7 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2188: 72075186224037891 read iterator# {[7:932:2749], 1003} finished in read 2025-06-25T14:51:10.010229Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 72075186224037891 is Executed 2025-06-25T14:51:10.010254Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:2] at 72075186224037891 executing on unit ExecuteRead 2025-06-25T14:51:10.010275Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:2] at 72075186224037891 to execution unit CompletedOperations 2025-06-25T14:51:10.010297Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:2] at 72075186224037891 on unit CompletedOperations 2025-06-25T14:51:10.010334Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 72075186224037891 is Executed 2025-06-25T14:51:10.010357Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:2] at 72075186224037891 executing on unit CompletedOperations 2025-06-25T14:51:10.010380Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:2] at 72075186224037891 has finished 2025-06-25T14:51:10.010405Z node 7 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2687: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037891 2025-06-25T14:51:10.010467Z node 7 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2736: TTxReadViaPipeline(69) Complete: at tablet# 72075186224037891 >> TSchemeShardColumnTableTTL::CreateColumnTableNegative_ColumnType [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardColumnTableTTL::AlterColumnTable [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:51:03.032723Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:51:03.032813Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:03.032861Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:51:03.032899Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:51:03.032946Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:51:03.032979Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:51:03.033041Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:03.033120Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:51:03.033903Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:51:03.040712Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:51:03.120176Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:51:03.120244Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:51:03.137087Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:51:03.137485Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:51:03.137643Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:51:03.143401Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:51:03.143709Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:51:03.145938Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:03.147060Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:51:03.153773Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:03.154951Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:51:03.160537Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:51:03.160601Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:03.160755Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:51:03.160811Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:51:03.160855Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:51:03.160934Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:51:03.167578Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:51:03.295014Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:51:03.295254Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:03.295457Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:51:03.295510Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:51:03.295752Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:51:03.295837Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:51:03.297823Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:03.298005Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:51:03.298154Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:03.298224Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:51:03.298320Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:51:03.298359Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:51:03.300192Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:03.300349Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:51:03.300398Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:51:03.301904Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:03.301959Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:03.302035Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:03.302092Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:51:03.305661Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:51:03.307410Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:51:03.307615Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:51:03.308562Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:03.308713Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:51:03.308775Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:03.309027Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:51:03.309075Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:03.309242Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:51:03.309330Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:51:03.311230Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:51:03.311280Z node 1 :FLAT_TX_SCHEMESHARD ... rd: 72057594046678944 2025-06-25T14:51:10.149989Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-25T14:51:10.150077Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-25T14:51:10.150132Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-25T14:51:10.150185Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-25T14:51:10.152709Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-25T14:51:10.152806Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-25T14:51:10.152899Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-25T14:51:10.152936Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 103:0 ProgressState 2025-06-25T14:51:10.153046Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#103:0 progress is 1/1 2025-06-25T14:51:10.153096Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-25T14:51:10.153136Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#103:0 progress is 1/1 2025-06-25T14:51:10.153170Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-25T14:51:10.153202Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 103, ready parts: 1/1, is published: true 2025-06-25T14:51:10.153287Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1656: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:2743:3943] message: TxId: 103 2025-06-25T14:51:10.153333Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-25T14:51:10.153405Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 103:0 2025-06-25T14:51:10.153436Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 103:0 2025-06-25T14:51:10.154539Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 66 2025-06-25T14:51:10.156908Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-06-25T14:51:10.156955Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [1:3945:5082] TestWaitNotification: OK eventTxId 103 2025-06-25T14:51:10.157518Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/TTLEnabledTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:51:10.157768Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/TTLEnabledTable" took 287us result status StatusSuccess 2025-06-25T14:51:10.158418Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/TTLEnabledTable" PathDescription { Self { Name: "TTLEnabledTable" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeColumnTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 11 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 11 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 ColumnTableVersion: 3 ColumnTableSchemaVersion: 1 ColumnTableTtlSettingsVersion: 3 } ChildrenExist: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 64 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } ColumnTableDescription { Name: "TTLEnabledTable" Schema { Columns { Id: 1 Name: "key" Type: "Uint64" TypeId: 4 NotNull: true StorageId: "" DefaultValue { } ColumnFamilyId: 0 } Columns { Id: 2 Name: "modified_at" Type: "Timestamp" TypeId: 50 NotNull: true StorageId: "" DefaultValue { } ColumnFamilyId: 0 } Columns { Id: 3 Name: "saved_at" Type: "Datetime" TypeId: 49 NotNull: false StorageId: "" DefaultValue { } ColumnFamilyId: 0 } Columns { Id: 4 Name: "data" Type: "Utf8" TypeId: 4608 NotNull: false StorageId: "" DefaultValue { } ColumnFamilyId: 0 } KeyColumnNames: "modified_at" NextColumnId: 5 Version: 1 Options { SchemeNeedActualization: false } ColumnFamilies { Id: 0 Name: "default" } NextColumnFamilyId: 1 } TtlSettings { Disabled { } Version: 3 } ColumnShardCount: 64 Sharding { ColumnShards: 72075186233409546 ColumnShards: 72075186233409547 ColumnShards: 72075186233409548 ColumnShards: 72075186233409549 ColumnShards: 72075186233409550 ColumnShards: 72075186233409551 ColumnShards: 72075186233409552 ColumnShards: 72075186233409553 ColumnShards: 72075186233409554 ColumnShards: 72075186233409555 ColumnShards: 72075186233409556 ColumnShards: 72075186233409557 ColumnShards: 72075186233409558 ColumnShards: 72075186233409559 ColumnShards: 72075186233409560 ColumnShards: 72075186233409561 ColumnShards: 72075186233409562 ColumnShards: 72075186233409563 ColumnShards: 72075186233409564 ColumnShards: 72075186233409565 ColumnShards: 72075186233409566 ColumnShards: 72075186233409567 ColumnShards: 72075186233409568 ColumnShards: 72075186233409569 ColumnShards: 72075186233409570 ColumnShards: 72075186233409571 ColumnShards: 72075186233409572 ColumnShards: 72075186233409573 ColumnShards: 72075186233409574 ColumnShards: 72075186233409575 ColumnShards: 72075186233409576 ColumnShards: 72075186233409577 ColumnShards: 72075186233409578 ColumnShards: 72075186233409579 ColumnShards: 72075186233409580 ColumnShards: 72075186233409581 ColumnShards: 72075186233409582 ColumnShards: 72075186233409583 ColumnShards: 72075186233409584 ColumnShards: 72075186233409585 ColumnShards: 72075186233409586 ColumnShards: 72075186233409587 ColumnShards: 72075186233409588 ColumnShards: 72075186233409589 ColumnShards: 72075186233409590 ColumnShards: 72075186233409591 ColumnShards: 72075186233409592 ColumnShards: 72075186233409593 ColumnShards: 72075186233409594 ColumnShards: 72075186233409595 ColumnShards: 72075186233409596 ColumnShards: 72075186233409597 ColumnShards: 72075186233409598 ColumnShards: 72075186233409599 ColumnShards: 72075186233409600 ColumnShards: 72075186233409601 ColumnShards: 72075186233409602 ColumnShards: 72075186233409603 ColumnShards: 72075186233409604 ColumnShards: 72075186233409605 ColumnShards: 72075186233409606 ColumnShards: 72075186233409607 ColumnShards: 72075186233409608 ColumnShards: 72075186233409609 HashSharding { Function: HASH_FUNCTION_CONSISTENCY_64 Columns: "modified_at" } } StorageConfig { DataChannelCount: 64 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 TestModificationResults wait txId: 104 2025-06-25T14:51:10.161513Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpAlterColumnTable AlterColumnTable { Name: "TTLEnabledTable" AlterSchema { AlterColumns { Name: "data" DefaultValue: "10" } } } } TxId: 104 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:51:10.161707Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: alter_table.cpp:282: TAlterColumnTable Propose, path: /MyRoot/TTLEnabledTable, opId: 104:0, at schemeshard: 72057594046678944 2025-06-25T14:51:10.165919Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 104:1, propose status:StatusSchemeError, reason: schema update error: sparsed columns are disabled, at schemeshard: 72057594046678944 2025-06-25T14:51:10.168364Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 104, response: Status: StatusSchemeError Reason: "schema update error: sparsed columns are disabled" TxId: 104 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:51:10.168548Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 104, database: /MyRoot, subject: , status: StatusSchemeError, reason: schema update error: sparsed columns are disabled, operation: ALTER COLUMN TABLE, path: /MyRoot/TTLEnabledTable TestModificationResult got TxId: 104, wait until txId: 104 TestWaitNotification wait txId: 104 2025-06-25T14:51:10.168883Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 104: send EvNotifyTxCompletion 2025-06-25T14:51:10.168941Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 104 2025-06-25T14:51:10.169394Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 104, at schemeshard: 72057594046678944 2025-06-25T14:51:10.169480Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 104: got EvNotifyTxCompletionResult 2025-06-25T14:51:10.169528Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 104: satisfy waiter [1:4281:5418] TestWaitNotification: OK eventTxId 104 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTests::BuildAsyncIndexShouldSucceed [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:51:10.399204Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:51:10.399297Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:10.399340Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:51:10.399375Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:51:10.399410Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:51:10.399432Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:51:10.399484Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:10.399552Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:51:10.400298Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:51:10.400648Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:51:10.466657Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:51:10.466715Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:51:10.480791Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:51:10.481230Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:51:10.481387Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:51:10.486645Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:51:10.486899Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:51:10.487512Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:10.487903Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:51:10.491025Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:10.491189Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:51:10.492183Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:51:10.492249Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:10.492415Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:51:10.492481Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:51:10.492531Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:51:10.492618Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:51:10.498009Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:51:10.621245Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:51:10.621420Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:10.621560Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:51:10.621594Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:51:10.621756Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:51:10.621828Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:51:10.625195Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:10.625349Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:51:10.625482Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:10.625527Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:51:10.625636Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:51:10.625664Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:51:10.629326Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:10.629403Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:51:10.629436Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:51:10.630601Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:10.630653Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:10.630710Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:10.630749Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:51:10.633048Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:51:10.634531Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:51:10.634664Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:51:10.635281Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:10.635382Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:51:10.635428Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:10.635747Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:51:10.635783Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:10.635904Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:51:10.635958Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:51:10.637586Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:51:10.637621Z node 1 :FLAT_TX_SCHEMESHARD ... d: 72057594046678944 FAKE_COORDINATOR: Add transaction: 281474976710760 at step: 5000006 FAKE_COORDINATOR: advance: minStep5000006 State->FrontStep: 5000005 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710760 at step: 5000006 2025-06-25T14:51:11.179823Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000006, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:11.179910Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976710760 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000006 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:51:11.179972Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_lock.cpp:44: [72057594046678944] TDropLock TPropose opId# 281474976710760:0 HandleReply TEvOperationPlan: step# 5000006 2025-06-25T14:51:11.180015Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 281474976710760:0 128 -> 240 2025-06-25T14:51:11.181576Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 281474976710760:0, at schemeshard: 72057594046678944 2025-06-25T14:51:11.181637Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 281474976710760:0 ProgressState 2025-06-25T14:51:11.181727Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976710760:0 progress is 1/1 2025-06-25T14:51:11.181752Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976710760 ready parts: 1/1 2025-06-25T14:51:11.181796Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976710760:0 progress is 1/1 2025-06-25T14:51:11.181834Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976710760 ready parts: 1/1 2025-06-25T14:51:11.181869Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 281474976710760, ready parts: 1/1, is published: true 2025-06-25T14:51:11.181927Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1656: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:126:2150] message: TxId: 281474976710760 2025-06-25T14:51:11.181968Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976710760 ready parts: 1/1 2025-06-25T14:51:11.181995Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976710760:0 2025-06-25T14:51:11.182018Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 281474976710760:0 2025-06-25T14:51:11.182067Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 FAKE_COORDINATOR: Erasing txId 281474976710760 2025-06-25T14:51:11.183506Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6830: Handle: TEvNotifyTxCompletionResult: txId# 281474976710760 2025-06-25T14:51:11.183555Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6832: Message: TxId: 281474976710760 2025-06-25T14:51:11.183617Z node 1 :BUILD_INDEX INFO: schemeshard_build_index__progress.cpp:1930: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvNotifyTxCompletionResult, id# 102, txId# 281474976710760 2025-06-25T14:51:11.183693Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1933: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvNotifyTxCompletionResult, TIndexBuildInfo: TBuildInfo{ IndexBuildId: 102, Uid: , DomainPathId: [OwnerId: 72057594046678944, LocalPathId: 1], TablePathId: [OwnerId: 72057594046678944, LocalPathId: 2], IndexType: EIndexTypeGlobalAsync, IndexName: UserDefinedIndexByValue, IndexColumn: value, State: Unlocking, IsBroken: 0, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [1:389:2359], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 281474976710757, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976710758, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 5000004, ApplyTxId: 281474976710759, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976710760, UnlockTxStatus: StatusAccepted, UnlockTxDone: 0, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }}, txId# 281474976710760 2025-06-25T14:51:11.185052Z node 1 :BUILD_INDEX NOTICE: schemeshard_build_index__progress.cpp:1129: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 102 Unlocking 2025-06-25T14:51:11.185124Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1130: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 102 Unlocking TBuildInfo{ IndexBuildId: 102, Uid: , DomainPathId: [OwnerId: 72057594046678944, LocalPathId: 1], TablePathId: [OwnerId: 72057594046678944, LocalPathId: 2], IndexType: EIndexTypeGlobalAsync, IndexName: UserDefinedIndexByValue, IndexColumn: value, State: Unlocking, IsBroken: 0, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [1:389:2359], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 281474976710757, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976710758, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 5000004, ApplyTxId: 281474976710759, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976710760, UnlockTxStatus: StatusAccepted, UnlockTxDone: 1, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }} 2025-06-25T14:51:11.185169Z node 1 :BUILD_INDEX INFO: schemeshard_build_index_tx_base.cpp:24: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: Change state from Unlocking to Done 2025-06-25T14:51:11.186524Z node 1 :BUILD_INDEX NOTICE: schemeshard_build_index__progress.cpp:1129: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 102 Done 2025-06-25T14:51:11.186596Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1130: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 102 Done TBuildInfo{ IndexBuildId: 102, Uid: , DomainPathId: [OwnerId: 72057594046678944, LocalPathId: 1], TablePathId: [OwnerId: 72057594046678944, LocalPathId: 2], IndexType: EIndexTypeGlobalAsync, IndexName: UserDefinedIndexByValue, IndexColumn: value, State: Done, IsBroken: 0, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [1:389:2359], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 281474976710757, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976710758, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 5000004, ApplyTxId: 281474976710759, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976710760, UnlockTxStatus: StatusAccepted, UnlockTxDone: 1, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }} 2025-06-25T14:51:11.186624Z node 1 :BUILD_INDEX TRACE: schemeshard_build_index_tx_base.cpp:333: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TIndexBuildInfo SendNotifications: : id# 102, subscribers count# 1 2025-06-25T14:51:11.186717Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-25T14:51:11.186753Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:480:2439] TestWaitNotification: OK eventTxId 102 2025-06-25T14:51:11.187370Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/TTLEnabledTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:51:11.187639Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/TTLEnabledTable" took 260us result status StatusSuccess 2025-06-25T14:51:11.188142Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/TTLEnabledTable" PathDescription { Self { Name: "TTLEnabledTable" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 TableSchemaVersion: 3 TablePartitionVersion: 1 } ChildrenExist: true } Table { Name: "TTLEnabledTable" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Uint64" TypeId: 4 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "modified_at" Type: "Timestamp" TypeId: 50 Id: 3 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableIndexes { Name: "UserDefinedIndexByValue" LocalPathId: 3 Type: EIndexTypeGlobalAsync State: EIndexStateReady KeyColumnNames: "value" SchemaVersion: 2 PathOwnerId: 72057594046678944 DataSize: 0 IndexImplTableDescriptions { } } TableSchemaVersion: 3 TTLSettings { Enabled { ColumnName: "modified_at" ExpireAfterSeconds: 3600 Tiers { ApplyAfterSeconds: 3600 Delete { } } } } IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> DataShardWrite::ImmediateAndPlannedCommittedOpsRace [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::TPCH12_100 [GOOD] Test command err: Trying to start YDB, gRPC: 18953, MsgBus: 13143 2025-06-25T14:49:29.047217Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899192992227163:2176];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:49:29.047518Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d5b/r3tmp/tmpORcPy4/pdisk_1.dat 2025-06-25T14:49:29.810471Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:49:29.810555Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:49:29.814057Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:49:29.895535Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519899192992227025:2080] 1750862969006525 != 1750862969006528 2025-06-25T14:49:29.911409Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 18953, node 1 2025-06-25T14:49:30.088435Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:49:30.226140Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:49:30.226157Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:49:30.226163Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:49:30.226267Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13143 TClient is connected to server localhost:13143 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:49:31.181285Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:49:31.200911Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:49:33.165516Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899210172096859:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:33.165576Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:33.166214Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899210172096871:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:33.170920Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:49:33.196767Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899210172096873:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:49:33.273778Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899210172096924:2340] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:49:33.730615Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T14:49:34.000118Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519899210172097136:2312];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:49:34.000286Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519899210172097136:2312];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:49:34.008608Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519899210172097136:2312];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:49:34.008736Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519899210172097136:2312];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:49:34.008822Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519899210172097136:2312];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:49:34.008914Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519899210172097136:2312];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:49:34.008999Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519899210172097136:2312];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:49:34.009093Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519899210172097136:2312];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:49:34.009181Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519899210172097136:2312];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:49:34.009275Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519899210172097136:2312];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:49:34.009365Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519899210172097136:2312];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:49:34.016041Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519899210172097135:2311];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:49:34.016090Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519899210172097135:2311];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:49:34.016262Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519899210172097135:2311];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:49:34.024437Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519899210172097135:2311];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:49:34.024603Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519899210172097135:2311];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:49:34.024697Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519899210172097135:2311];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:49:34.024782Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519899210172097135:2311];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:49:34.024871Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519899210172097135:2311];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:49:34.024959Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519899210172097135:2311];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunk ... 61643Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039384;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:56.667248Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039384;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:56.667460Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039388;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:56.668082Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039362;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:56.668249Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039342;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:56.673905Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039342;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:56.674145Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039362;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:56.674585Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039352;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:56.674669Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039368;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:56.683353Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039352;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:56.684033Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039351;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:56.685325Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039368;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:56.685753Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039328;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:56.692679Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039351;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:56.693328Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039322;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:56.696977Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039328;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:56.697448Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039383;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:56.697933Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039322;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:56.698490Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039346;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:56.702598Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039383;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:56.702940Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039346;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:56.703659Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039358;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:56.703939Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039344;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:56.712560Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039358;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:56.713232Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039382;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:56.718061Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039344;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:56.723093Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039382;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:56.723621Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039350;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:56.724567Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039410;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:56.733609Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039410;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:56.734131Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039404;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:56.737384Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039350;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:56.737868Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039367;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:56.743697Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039404;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:56.744202Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039314;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:56.747465Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039367;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:56.754014Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039314;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:56.755631Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039417;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:56.761445Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039334;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:56.765363Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039417;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:56.772529Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039334;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:56.975049Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyks31xf486gn9n970rq0w1q", SessionId: ydb://session/3?node_id=1&id=Y2E0ODhmYzAtZWMzYWIyNDgtZTg4MWQxZWQtZTk2ZGEzZjU=, Slow query, duration: 34.974111s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:50:57.263765Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:50:57.264274Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:50:57.264673Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;self_id=[1:7519899437805400885:7282];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038933;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224039392;receive=72075186224039094; 2025-06-25T14:50:57.265092Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardColumnTableTTL::CreateColumnTableNegative_ColumnType [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:51:11.384257Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:51:11.384348Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:11.384389Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:51:11.384416Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:51:11.384449Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:51:11.384473Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:51:11.384507Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:11.384557Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:51:11.385233Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:51:11.385588Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:51:11.458353Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:51:11.458419Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:51:11.472391Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:51:11.472711Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:51:11.472852Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:51:11.477424Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:51:11.477686Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:51:11.478176Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:11.478419Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:51:11.481240Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:11.481383Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:51:11.482372Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:51:11.482424Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:11.482535Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:51:11.482585Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:51:11.482627Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:51:11.482702Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:51:11.487823Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:51:11.609315Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:51:11.609536Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:11.609727Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:51:11.609773Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:51:11.609972Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:51:11.610060Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:51:11.612006Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:11.612191Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:51:11.612404Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:11.612462Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:51:11.612553Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:51:11.612602Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:51:11.614281Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:11.614375Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:51:11.614461Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:51:11.616892Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:11.616944Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:11.617010Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:11.617058Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:51:11.620778Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:51:11.623414Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:51:11.623577Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:51:11.624574Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:11.624703Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:51:11.624765Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:11.625008Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:51:11.625060Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:11.625220Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:51:11.625286Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:51:11.627225Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:51:11.627273Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:51:11.627474Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:11.627531Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 1, path id: 1 2025-06-25T14:51:11.627887Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:11.627930Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 1:0 ProgressState 2025-06-25T14:51:11.628019Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1:0 progress is 1/1 2025-06-25T14:51:11.628056Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-25T14:51:11.628092Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1:0 progress is 1/1 2025-06-25T14:51:11.628143Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-25T14:51:11.628181Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 1, ready parts: 1/1, is published: false 2025-06-25T14:51:11.628235Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-25T14:51:11.628284Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1:0 2025-06-25T14:51:11.628336Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 1:0 2025-06-25T14:51:11.628401Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-25T14:51:11.628445Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 1, publications: 1, subscribers: 0 2025-06-25T14:51:11.628479Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 1, [OwnerId: 72057594046678944, LocalPathId: 1], 3 2025-06-25T14:51:11.630314Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-06-25T14:51:11.630415Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-06-25T14:51:11.630457Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1 2025-06-25T14:51:11.630495Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 3 2025-06-25T14:51:11.630534Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:51:11.630698Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1, subscribers: 0 2025-06-25T14:51:11.633525Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1 2025-06-25T14:51:11.634039Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1, at schemeshard: 72057594046678944 WARNING: All log messages before y_absl::InitializeLog() is called are written to STDERR W0000 00:00:1750863071.635136 452980 text_format.cc:398] Warning parsing text-format NKikimrSchemeOp.TColumnTableDescription: 11:43: text format contains deprecated field "ExpireAfterSeconds" TestModificationResults wait txId: 101 2025-06-25T14:51:11.635519Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:434: actor# [1:274:2263] Bootstrap 2025-06-25T14:51:11.657047Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:453: actor# [1:274:2263] Become StateWork (SchemeCache [1:279:2268]) 2025-06-25T14:51:11.659601Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateColumnTable CreateColumnTable { Name: "TTLEnabledTable" Schema { Columns { Name: "key" Type: "Uint64" NotNull: true } Columns { Name: "modified_at" Type: "String" } KeyColumnNames: "key" } TtlSettings { Enabled { ColumnName: "modified_at" ExpireAfterSeconds: 3600 Tiers { ApplyAfterSeconds: 3600 Delete { } } } } } } TxId: 101 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:51:11.659938Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: create_table.cpp:593: TCreateColumnTable Propose, path: /MyRoot/TTLEnabledTable, opId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:51:11.660355Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 101:1, propose status:StatusSchemeError, reason: Unsupported column type, at schemeshard: 72057594046678944 2025-06-25T14:51:11.661447Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:274:2263] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-25T14:51:11.663851Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 101, response: Status: StatusSchemeError Reason: "Unsupported column type" TxId: 101 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:51:11.664082Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 101, database: /MyRoot, subject: , status: StatusSchemeError, reason: Unsupported column type, operation: CREATE COLUMN TABLE, path: /MyRoot/ 2025-06-25T14:51:11.664546Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 TestModificationResult got TxId: 101, wait until txId: 101 W0000 00:00:1750863071.664998 452980 text_format.cc:398] Warning parsing text-format NKikimrSchemeOp.TColumnTableDescription: 11:43: text format contains deprecated field "ExpireAfterSeconds" TestModificationResults wait txId: 102 2025-06-25T14:51:11.667425Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateColumnTable CreateColumnTable { Name: "TTLEnabledTable" Schema { Columns { Name: "key" Type: "Uint64" NotNull: true } Columns { Name: "modified_at" Type: "DyNumber" } KeyColumnNames: "key" } TtlSettings { Enabled { ColumnName: "modified_at" ExpireAfterSeconds: 3600 Tiers { ApplyAfterSeconds: 3600 Delete { } } } } } } TxId: 102 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:51:11.667727Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: create_table.cpp:593: TCreateColumnTable Propose, path: /MyRoot/TTLEnabledTable, opId: 102:0, at schemeshard: 72057594046678944 2025-06-25T14:51:11.667916Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 102:1, propose status:StatusSchemeError, reason: Type 'DyNumber' specified for column 'modified_at' is not supported, at schemeshard: 72057594046678944 2025-06-25T14:51:11.669566Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 102, response: Status: StatusSchemeError Reason: "Type \'DyNumber\' specified for column \'modified_at\' is not supported" TxId: 102 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:51:11.669745Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 102, database: /MyRoot, subject: , status: StatusSchemeError, reason: Type 'DyNumber' specified for column 'modified_at' is not supported, operation: CREATE COLUMN TABLE, path: /MyRoot/ TestModificationResult got TxId: 102, wait until txId: 102 |86.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest |86.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest >> TVectorIndexTests::CreateTableCoveredEmbedding >> TVectorIndexTests::CreateTableMultiColumn >> TAsyncIndexTests::CdcAndSplitWithReboots[PipeResets] >> TAsyncIndexTests::MergeIndexWithReboots[PipeResets] |86.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest |86.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest |86.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest |86.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest |86.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest |86.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest |86.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest |86.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest >> TSchemeShardColumnTableTTL::AlterColumnTable_Negative [GOOD] >> TAsyncIndexTests::MergeIndexWithReboots[TabletReboots] |86.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::CanonizedJoinOrderTPCH20 [GOOD] Test command err: Trying to start YDB, gRPC: 14674, MsgBus: 27266 2025-06-25T14:49:27.306587Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899184285241908:2164];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:49:27.306721Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d62/r3tmp/tmpKhAjBx/pdisk_1.dat 2025-06-25T14:49:27.899337Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:49:27.904398Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519899184285241775:2080] 1750862967251303 != 1750862967251306 2025-06-25T14:49:27.918789Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:49:27.918869Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:49:27.925180Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 14674, node 1 2025-06-25T14:49:28.152724Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:49:28.152750Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:49:28.152756Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:49:28.152852Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:49:28.256527Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:27266 TClient is connected to server localhost:27266 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:49:29.362671Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:49:29.387155Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:49:31.785268Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899201465111607:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:31.785391Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:31.791843Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899201465111619:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:31.795799Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:49:31.809567Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899201465111621:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:49:31.884067Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899201465111672:2337] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:49:32.300480Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519899184285241908:2164];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:49:32.300583Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:49:32.336028Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T14:49:32.660180Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519899205760079185:2316];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:49:32.660446Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519899205760079185:2316];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:49:32.660482Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519899205760079219:2323];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:49:32.660520Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519899205760079219:2323];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:49:32.660712Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519899205760079185:2316];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:49:32.660821Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519899205760079185:2316];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:49:32.660921Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519899205760079219:2323];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:49:32.660925Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519899205760079185:2316];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:49:32.661023Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519899205760079219:2323];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:49:32.661043Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519899205760079185:2316];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:49:32.661134Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519899205760079219:2323];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:49:32.661149Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519899205760079185:2316];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:49:32.661225Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519899205760079219:2323];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:49:32.661246Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519899205760079185:2316];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:49:32.661323Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519899205760079219:2323];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:49:32.661340Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519899205760079185:2316];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:49:32.661417Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519899205760079219:2323];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:49:32.661465Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519899205760079185:2316];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:49:32.661528Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519899205760079219:2323];table ... 03509Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039330;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:54.507292Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039330;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:54.507550Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039346;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:54.507778Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039293;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:54.508077Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039374;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:54.512031Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039293;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:54.512292Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039374;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:54.512568Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039334;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:54.512839Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039360;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:54.516722Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039334;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:54.517056Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039360;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:54.517239Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039370;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:54.517499Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039297;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:54.521297Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039297;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:54.521297Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039370;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:54.521794Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039265;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:54.521795Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039350;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:54.525805Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039265;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:54.525828Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039350;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:54.526314Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039271;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:54.526315Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039352;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:54.530760Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039271;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:54.530784Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039352;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:54.531187Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039313;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:54.531187Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039368;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:54.541478Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039313;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:54.541477Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039368;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:54.541906Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039325;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:54.541906Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039336;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:54.547015Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039336;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:54.547015Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039325;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:54.547615Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039303;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:54.548050Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039263;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:54.551602Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039263;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:54.551968Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039366;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:54.556703Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039303;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:54.557200Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039281;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:54.559997Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039281;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:54.560959Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039366;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:54.564650Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039339;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:54.567862Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039339;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:54.799062Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyks30tt981ykpqa4avkvrzb", SessionId: ydb://session/3?node_id=1&id=NGZjMzE1ZjctNTM4ZjNlNTItNTM2ZDE5ZmMtYTliNmJhNjU=, Slow query, duration: 33.907928s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:50:55.185112Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:50:55.185571Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:50:55.188294Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;self_id=[1:7519899429098413487:6918];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224038933;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224039392;receive=72075186224039094; 2025-06-25T14:50:55.188952Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_write/unittest >> DataShardWrite::ImmediateAndPlannedCommittedOpsRace [GOOD] Test command err: 2025-06-25T14:50:34.833901Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:50:34.838005Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:50:34.838089Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00112b/r3tmp/tmpDKzmW7/pdisk_1.dat 2025-06-25T14:50:35.499724Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T14:50:35.508460Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:50:35.570599Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:50:35.583751Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750863031225339 != 1750863031225343 2025-06-25T14:50:35.646306Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:50:35.646445Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:50:35.658260Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:50:35.771164Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:35.852007Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvBoot 2025-06-25T14:50:35.853083Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvRestored 2025-06-25T14:50:35.853472Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:627:2531] 2025-06-25T14:50:35.853723Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T14:50:35.903721Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-25T14:50:35.904429Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T14:50:35.904558Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T14:50:35.907213Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-25T14:50:35.907299Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-25T14:50:35.907348Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-25T14:50:35.910077Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T14:50:35.910258Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T14:50:35.910373Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:643:2531] in generation 1 2025-06-25T14:50:35.921283Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T14:50:35.959831Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-25T14:50:35.962968Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T14:50:35.963182Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:645:2541] 2025-06-25T14:50:35.963222Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T14:50:35.963278Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-25T14:50:35.963361Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:50:35.963598Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:627:2531], Recipient [1:627:2531]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T14:50:35.963677Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T14:50:35.966833Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-25T14:50:35.966951Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-25T14:50:35.967024Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:50:35.967064Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:50:35.967129Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-25T14:50:35.967168Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-25T14:50:35.967200Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-25T14:50:35.967250Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-25T14:50:35.967295Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:50:35.969799Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:634:2535], Recipient [1:627:2531]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:50:35.969867Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T14:50:35.969921Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:623:2528], serverId# [1:634:2535], sessionId# [0:0:0] 2025-06-25T14:50:35.970040Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:373:2367], Recipient [1:634:2535] 2025-06-25T14:50:35.970081Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-25T14:50:35.971329Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T14:50:35.976647Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-25T14:50:35.977453Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-25T14:50:35.977603Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-25T14:50:35.977684Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-25T14:50:35.977752Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-06-25T14:50:35.977789Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-06-25T14:50:35.977828Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-25T14:50:35.978166Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-06-25T14:50:35.978205Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit StoreSchemeTx 2025-06-25T14:50:35.978241Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit FinishPropose 2025-06-25T14:50:35.978282Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-25T14:50:35.978335Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayComplete 2025-06-25T14:50:35.978363Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit FinishPropose 2025-06-25T14:50:35.978396Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit WaitForPlan 2025-06-25T14:50:35.978436Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit WaitForPlan 2025-06-25T14:50:35.978469Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:281474976715657] at 72075186224037888 is not ready to execute on unit WaitForPlan 2025-06-25T14:50:35.979961Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269746185, Sender [1:646:2542], Recipient [1:627:2531]: NKikimr::TEvTxProxySchemeCache::TEvWatchNotifyUpdated 2025-06-25T14:50:35.980019Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T14:50:35.992038Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-25T14:50:35.992120Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-25T14:50:35.992155Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-25T14:50:35.992193Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715657 at tablet 72075186224037888 send to client, exec latency: 0 ms, propose late ... outreadset.cpp:150: Receive RS Ack at 72075186224037888 source 72075186224037888 dest 72075186224037889 consumer 72075186224037889 txId 1234567890011 ... observed 2 more commits after readset unblock ... unblocking NKikimr::TEvBlobStorage::TEvPut from TABLET_REQ_WRITE_LOG to BS_PROXY_ACTOR ... unblocking NKikimr::TEvBlobStorage::TEvPut from TABLET_REQ_WRITE_LOG to BS_PROXY_ACTOR ... unblocking NKikimr::TEvBlobStorage::TEvPut from TABLET_REQ_WRITE_LOG to BS_PROXY_ACTOR ... unblocking NKikimr::TEvBlobStorage::TEvPut from TABLET_REQ_WRITE_LOG to BS_PROXY_ACTOR 2025-06-25T14:51:11.518770Z node 7 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:51:11.518845Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [2000:1234567890012] at 72075186224037888 on unit StoreAndSendWriteOutRS 2025-06-25T14:51:11.518899Z node 7 :TX_DATASHARD DEBUG: datashard.cpp:3990: Send RS 2 at 72075186224037888 from 72075186224037888 to 72075186224037889 txId 1234567890012 2025-06-25T14:51:11.519151Z node 7 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:51:11.519183Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [2000:1234567890012] at 72075186224037888 on unit CompleteWrite 2025-06-25T14:51:11.519243Z node 7 :TX_DATASHARD DEBUG: datashard.cpp:826: Complete write [2000 : 1234567890012] from 72075186224037888 at tablet 72075186224037888 send result to client [7:799:2651] 2025-06-25T14:51:11.519313Z node 7 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:51:11.519426Z node 7 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 72075186224037888 2025-06-25T14:51:11.519612Z node 7 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:51:11.519642Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1500:1234567890011] at 72075186224037888 on unit CompleteWrite 2025-06-25T14:51:11.519681Z node 7 :TX_DATASHARD DEBUG: datashard.cpp:826: Complete write [1500 : 1234567890011] from 72075186224037888 at tablet 72075186224037888 send result to client [7:762:2626] 2025-06-25T14:51:11.519750Z node 7 :TX_DATASHARD DEBUG: datashard.cpp:563: Send delayed Ack RS Ack at 72075186224037888 {TEvReadSet step# 1500 txid# 1234567890011 TabletSource# 72075186224037889 TabletDest# 72075186224037888 SetTabletConsumer# 72075186224037888 Flags# 0 Seqno# 1} 2025-06-25T14:51:11.519810Z node 7 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:51:11.519963Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287425, Sender [7:658:2546], Recipient [7:661:2548]: {TEvReadSet step# 2000 txid# 1234567890012 TabletSource# 72075186224037888 TabletDest# 72075186224037889 SetTabletProducer# 72075186224037888 ReadSet.Size()# 2 Seqno# 2 Flags# 0} 2025-06-25T14:51:11.520024Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3151: StateWork, processing event TEvTxProcessing::TEvReadSet 2025-06-25T14:51:11.520071Z node 7 :TX_DATASHARD DEBUG: datashard.cpp:3359: Receive RS at 72075186224037889 source 72075186224037888 dest 72075186224037889 producer 72075186224037888 txId 1234567890012 2025-06-25T14:51:11.520191Z node 7 :TX_DATASHARD DEBUG: datashard__readset.cpp:15: TTxReadSet::Execute at 72075186224037889 got read set: {TEvReadSet step# 2000 txid# 1234567890012 TabletSource# 72075186224037888 TabletDest# 72075186224037889 SetTabletProducer# 72075186224037888 ReadSet.Size()# 2 Seqno# 2 Flags# 0} 2025-06-25T14:51:11.520273Z node 7 :TX_DATASHARD TRACE: operation.cpp:67: Filled readset for [2000:1234567890012] from=72075186224037888 to=72075186224037889origin=72075186224037888 2025-06-25T14:51:11.520378Z node 7 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 72075186224037889 2025-06-25T14:51:11.520528Z node 7 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:51:11.520559Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:8] at 72075186224037888 on unit FinishProposeWrite 2025-06-25T14:51:11.520607Z node 7 :TX_DATASHARD TRACE: finish_propose_write_unit.cpp:163: Propose transaction complete txid 8 at tablet 72075186224037888 send to client, propose latency: 3 ms, status: STATUS_COMPLETED 2025-06-25T14:51:11.520701Z node 7 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:51:11.520857Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [7:658:2546], Recipient [7:661:2548]: {TEvReadSet step# 1500 txid# 1234567890011 TabletSource# 72075186224037889 TabletDest# 72075186224037888 SetTabletConsumer# 72075186224037888 Flags# 0 Seqno# 1} 2025-06-25T14:51:11.520896Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T14:51:11.520946Z node 7 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 72075186224037889 source 72075186224037889 dest 72075186224037888 consumer 72075186224037888 txId 1234567890011 2025-06-25T14:51:11.521073Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [7:661:2548], Recipient [7:661:2548]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T14:51:11.521105Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T14:51:11.521183Z node 7 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2025-06-25T14:51:11.521241Z node 7 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 active 1 active planned 1 immediate 0 planned 1 2025-06-25T14:51:11.521295Z node 7 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [2000:1234567890012] at 72075186224037889 for LoadAndWaitInRS 2025-06-25T14:51:11.521335Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [2000:1234567890012] at 72075186224037889 on unit LoadAndWaitInRS 2025-06-25T14:51:11.521384Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [2000:1234567890012] at 72075186224037889 is Executed 2025-06-25T14:51:11.521437Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [2000:1234567890012] at 72075186224037889 executing on unit LoadAndWaitInRS 2025-06-25T14:51:11.521482Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [2000:1234567890012] at 72075186224037889 to execution unit ExecuteWrite 2025-06-25T14:51:11.521529Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [2000:1234567890012] at 72075186224037889 on unit ExecuteWrite 2025-06-25T14:51:11.521568Z node 7 :TX_DATASHARD DEBUG: execute_write_unit.cpp:251: Executing write operation for [2000:1234567890012] at 72075186224037889 2025-06-25T14:51:11.521622Z node 7 :TX_DATASHARD TRACE: execute_write_unit.cpp:390: Operation [2000:1234567890012] at 72075186224037889 aborting because locks are not valid 2025-06-25T14:51:11.521684Z node 7 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_LOCKS_BROKEN;details=Operation is aborting because locks are not valid;tx_id=1234567890012; 2025-06-25T14:51:11.521763Z node 7 :TX_DATASHARD INFO: datashard_write_operation.cpp:707: Write transaction 1234567890012 at 72075186224037889 has an error: Operation is aborting because locks are not valid 2025-06-25T14:51:11.521821Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [2000:1234567890012] at 72075186224037889 is Executed 2025-06-25T14:51:11.521855Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [2000:1234567890012] at 72075186224037889 executing on unit ExecuteWrite 2025-06-25T14:51:11.521879Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [2000:1234567890012] at 72075186224037889 to execution unit CompleteWrite 2025-06-25T14:51:11.521901Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [2000:1234567890012] at 72075186224037889 on unit CompleteWrite 2025-06-25T14:51:11.522141Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [2000:1234567890012] at 72075186224037889 is DelayComplete 2025-06-25T14:51:11.522176Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [2000:1234567890012] at 72075186224037889 executing on unit CompleteWrite 2025-06-25T14:51:11.522228Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [2000:1234567890012] at 72075186224037889 to execution unit CompletedOperations 2025-06-25T14:51:11.522272Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [2000:1234567890012] at 72075186224037889 on unit CompletedOperations 2025-06-25T14:51:11.522308Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [2000:1234567890012] at 72075186224037889 is Executed 2025-06-25T14:51:11.522330Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [2000:1234567890012] at 72075186224037889 executing on unit CompletedOperations 2025-06-25T14:51:11.522357Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [2000:1234567890012] at 72075186224037889 has finished 2025-06-25T14:51:11.522403Z node 7 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:51:11.522441Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 72075186224037889 2025-06-25T14:51:11.522494Z node 7 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037889 has no attached operations 2025-06-25T14:51:11.522535Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 72075186224037889 2025-06-25T14:51:11.523055Z node 7 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-06-25T14:51:11.523085Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [2000:1234567890012] at 72075186224037889 on unit CompleteWrite 2025-06-25T14:51:11.523119Z node 7 :TX_DATASHARD DEBUG: datashard.cpp:826: Complete write [2000 : 1234567890012] from 72075186224037889 at tablet 72075186224037889 send result to client [7:799:2651] 2025-06-25T14:51:11.523165Z node 7 :TX_DATASHARD DEBUG: datashard.cpp:563: Send delayed Ack RS Ack at 72075186224037889 {TEvReadSet step# 2000 txid# 1234567890012 TabletSource# 72075186224037888 TabletDest# 72075186224037889 SetTabletConsumer# 72075186224037889 Flags# 0 Seqno# 2} 2025-06-25T14:51:11.523191Z node 7 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-25T14:51:11.523288Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [7:661:2548], Recipient [7:658:2546]: {TEvReadSet step# 2000 txid# 1234567890012 TabletSource# 72075186224037888 TabletDest# 72075186224037889 SetTabletConsumer# 72075186224037889 Flags# 0 Seqno# 2} 2025-06-25T14:51:11.523315Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T14:51:11.523352Z node 7 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 72075186224037888 source 72075186224037888 dest 72075186224037889 consumer 72075186224037889 txId 1234567890012 >> TVectorIndexTests::CreateTable |86.7%| [TA] $(B)/ydb/core/tx/datashard/ut_write/test-results/unittest/{meta.json ... results_accumulator.log} |86.7%| [TA] {RESULT} $(B)/ydb/core/tx/datashard/ut_write/test-results/unittest/{meta.json ... results_accumulator.log} >> TSchemeShardTTLTests::ConditionalErase [GOOD] >> TAsyncIndexTests::MergeMainWithReboots[TabletReboots] >> TAsyncIndexTests::CreateTable |86.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest >> TVectorIndexTests::CreateTableMultiColumn [GOOD] >> TVectorIndexTests::CreateTableCoveredEmbedding [GOOD] |86.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardColumnTableTTL::AlterColumnTable_Negative [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:51:06.186407Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:51:06.186502Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:06.186543Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:51:06.186576Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:51:06.186630Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:51:06.186658Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:51:06.186717Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:06.186780Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:51:06.187485Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:51:06.187804Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:51:06.268850Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:51:06.268907Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:51:06.276688Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:51:06.276865Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:51:06.277002Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:51:06.283347Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:51:06.283551Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:51:06.284108Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:06.284297Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:51:06.286637Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:06.286803Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:51:06.287910Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:51:06.287981Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:06.288170Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:51:06.288222Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:51:06.288259Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:51:06.288358Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:51:06.296997Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:243:2058] recipient: [1:15:2062] 2025-06-25T14:51:06.433558Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:51:06.433776Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:06.433986Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:51:06.434049Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:51:06.434249Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:51:06.434311Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:51:06.436247Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:06.436448Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:51:06.436627Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:06.436675Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:51:06.436722Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:51:06.436760Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:51:06.438434Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:06.438484Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:51:06.438522Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:51:06.440068Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:06.440141Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:06.440186Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:06.440234Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:51:06.443595Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:51:06.445144Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:51:06.445339Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:51:06.446238Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:06.446355Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:51:06.446414Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:06.446644Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:51:06.446692Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:06.446862Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:51:06.446945Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:51:06.448664Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:51:06.448704Z node 1 :FLAT_TX_SCHEMESHARD ... 5T14:51:12.466346Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:51:12.466388Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:51:12.466441Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:51:12.466492Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:51:12.466581Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:51:12.466618Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:51:12.466657Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:51:12.466703Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:51:12.466768Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:51:12.466842Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:51:12.466873Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 101:0 ProgressState 2025-06-25T14:51:12.466969Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#101:0 progress is 1/1 2025-06-25T14:51:12.467008Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-25T14:51:12.467052Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#101:0 progress is 1/1 2025-06-25T14:51:12.467085Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-25T14:51:12.467124Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 101, ready parts: 1/1, is published: true 2025-06-25T14:51:12.467186Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1656: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:2749:3949] message: TxId: 101 2025-06-25T14:51:12.467225Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-25T14:51:12.467269Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 101:0 2025-06-25T14:51:12.467312Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 101:0 2025-06-25T14:51:12.468191Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 66 2025-06-25T14:51:12.471203Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-25T14:51:12.471249Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:2750:3950] TestWaitNotification: OK eventTxId 101 2025-06-25T14:51:12.471747Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/TTLEnabledTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:51:12.471995Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/TTLEnabledTable" took 260us result status StatusSuccess 2025-06-25T14:51:12.472648Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/TTLEnabledTable" PathDescription { Self { Name: "TTLEnabledTable" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeColumnTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 ColumnTableVersion: 1 ColumnTableSchemaVersion: 1 } ChildrenExist: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 64 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } ColumnTableDescription { Name: "TTLEnabledTable" Schema { Columns { Id: 1 Name: "key" Type: "Uint64" TypeId: 4 NotNull: true StorageId: "" DefaultValue { } ColumnFamilyId: 0 } Columns { Id: 2 Name: "modified_at" Type: "Timestamp" TypeId: 50 NotNull: false StorageId: "" DefaultValue { } ColumnFamilyId: 0 } Columns { Id: 3 Name: "str" Type: "String" TypeId: 4097 NotNull: false StorageId: "" DefaultValue { } ColumnFamilyId: 0 } KeyColumnNames: "key" NextColumnId: 4 Version: 1 Options { SchemeNeedActualization: false } ColumnFamilies { Id: 0 Name: "default" } NextColumnFamilyId: 1 } ColumnShardCount: 64 Sharding { ColumnShards: 72075186233409546 ColumnShards: 72075186233409547 ColumnShards: 72075186233409548 ColumnShards: 72075186233409549 ColumnShards: 72075186233409550 ColumnShards: 72075186233409551 ColumnShards: 72075186233409552 ColumnShards: 72075186233409553 ColumnShards: 72075186233409554 ColumnShards: 72075186233409555 ColumnShards: 72075186233409556 ColumnShards: 72075186233409557 ColumnShards: 72075186233409558 ColumnShards: 72075186233409559 ColumnShards: 72075186233409560 ColumnShards: 72075186233409561 ColumnShards: 72075186233409562 ColumnShards: 72075186233409563 ColumnShards: 72075186233409564 ColumnShards: 72075186233409565 ColumnShards: 72075186233409566 ColumnShards: 72075186233409567 ColumnShards: 72075186233409568 ColumnShards: 72075186233409569 ColumnShards: 72075186233409570 ColumnShards: 72075186233409571 ColumnShards: 72075186233409572 ColumnShards: 72075186233409573 ColumnShards: 72075186233409574 ColumnShards: 72075186233409575 ColumnShards: 72075186233409576 ColumnShards: 72075186233409577 ColumnShards: 72075186233409578 ColumnShards: 72075186233409579 ColumnShards: 72075186233409580 ColumnShards: 72075186233409581 ColumnShards: 72075186233409582 ColumnShards: 72075186233409583 ColumnShards: 72075186233409584 ColumnShards: 72075186233409585 ColumnShards: 72075186233409586 ColumnShards: 72075186233409587 ColumnShards: 72075186233409588 ColumnShards: 72075186233409589 ColumnShards: 72075186233409590 ColumnShards: 72075186233409591 ColumnShards: 72075186233409592 ColumnShards: 72075186233409593 ColumnShards: 72075186233409594 ColumnShards: 72075186233409595 ColumnShards: 72075186233409596 ColumnShards: 72075186233409597 ColumnShards: 72075186233409598 ColumnShards: 72075186233409599 ColumnShards: 72075186233409600 ColumnShards: 72075186233409601 ColumnShards: 72075186233409602 ColumnShards: 72075186233409603 ColumnShards: 72075186233409604 ColumnShards: 72075186233409605 ColumnShards: 72075186233409606 ColumnShards: 72075186233409607 ColumnShards: 72075186233409608 ColumnShards: 72075186233409609 HashSharding { Function: HASH_FUNCTION_CONSISTENCY_64 Columns: "key" } } StorageConfig { DataChannelCount: 64 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 WARNING: All log messages before y_absl::InitializeLog() is called are written to STDERR W0000 00:00:1750863072.473367 450764 text_format.cc:398] Warning parsing text-format NKikimrSchemeOp.TAlterColumnTable: 6:35: text format contains deprecated field "ExpireAfterSeconds" TestModificationResults wait txId: 102 2025-06-25T14:51:12.475395Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpAlterColumnTable AlterColumnTable { Name: "TTLEnabledTable" AlterTtlSettings { Enabled { ColumnName: "str" ExpireAfterSeconds: 3600 Tiers { ApplyAfterSeconds: 3600 Delete { } } } } } } TxId: 102 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:51:12.475597Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: alter_table.cpp:282: TAlterColumnTable Propose, path: /MyRoot/TTLEnabledTable, opId: 102:0, at schemeshard: 72057594046678944 2025-06-25T14:51:12.476082Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 102:1, propose status:StatusSchemeError, reason: ttl update error: Unsupported column type. in alter constructor STANDALONE_UPDATE, at schemeshard: 72057594046678944 2025-06-25T14:51:12.478402Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 102, response: Status: StatusSchemeError Reason: "ttl update error: Unsupported column type. in alter constructor STANDALONE_UPDATE" TxId: 102 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:51:12.478572Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 102, database: /MyRoot, subject: , status: StatusSchemeError, reason: ttl update error: Unsupported column type. in alter constructor STANDALONE_UPDATE, operation: ALTER COLUMN TABLE, path: /MyRoot/TTLEnabledTable TestModificationResult got TxId: 102, wait until txId: 102 |86.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest |86.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest |86.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest >> TAsyncIndexTests::OnlineBuild >> TAsyncIndexTests::CdcAndMergeWithReboots[TabletReboots] |86.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest |86.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest >> TUniqueIndexTests::CreateTable ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTests::ConditionalErase [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:51:09.903624Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:51:09.903707Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:09.903739Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:51:09.903767Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:51:09.903806Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:51:09.903828Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:51:09.903889Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:09.903955Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:51:09.904665Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:51:09.904947Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:51:09.973615Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:51:09.973673Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:51:09.987950Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:51:09.988294Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:51:09.988472Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:51:09.993164Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:51:09.993420Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:51:09.993983Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:09.994228Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:51:09.997098Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:09.997235Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:51:09.998211Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:51:09.998260Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:09.998374Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:51:09.998423Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:51:09.998458Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:51:09.998521Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:51:10.003734Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:51:10.121929Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:51:10.122130Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:10.122283Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:51:10.122323Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:51:10.122534Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:51:10.122613Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:51:10.124501Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:10.124661Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:51:10.124799Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:10.124842Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:51:10.124890Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:51:10.124926Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:51:10.126337Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:10.126374Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:51:10.126428Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:51:10.127501Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:10.127545Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:10.127599Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:10.127647Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:51:10.135167Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:51:10.136646Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:51:10.136796Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:51:10.137424Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:10.137511Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:51:10.137560Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:10.137744Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:51:10.137779Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:10.137907Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:51:10.137963Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:51:10.139657Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:51:10.139698Z node 1 :FLAT_TX_SCHEMESHARD ... amp: 1600463040214000 ColumnUnit: UNIT_AUTO } SchemaVersion: 1 Limits { BatchMaxBytes: 512000 BatchMinKeys: 1 BatchMaxKeys: 256 }, at schemeshard: 72057594046678944 2025-06-25T14:51:13.466844Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__conditional_erase.cpp:213: Run conditional erase, tabletId: 72075186233409551, request: TableId: 7 Expiration { ColumnId: 2 WallClockTimestamp: 1600466640214000 ColumnUnit: UNIT_MICROSECONDS } SchemaVersion: 1 Limits { BatchMaxBytes: 512000 BatchMinKeys: 1 BatchMaxKeys: 256 }, at schemeshard: 72057594046678944 2025-06-25T14:51:13.467363Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6744: Conditional erase accepted: tabletId: 72075186233409550, at schemeshard: 72057594046678944 2025-06-25T14:51:13.467759Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6744: Conditional erase accepted: tabletId: 72075186233409547, at schemeshard: 72057594046678944 2025-06-25T14:51:13.468470Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6744: Conditional erase accepted: tabletId: 72075186233409548, at schemeshard: 72057594046678944 2025-06-25T14:51:13.468634Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6744: Conditional erase accepted: tabletId: 72075186233409549, at schemeshard: 72057594046678944 2025-06-25T14:51:13.468773Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6744: Conditional erase accepted: tabletId: 72075186233409551, at schemeshard: 72057594046678944 2025-06-25T14:51:13.468875Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6744: Conditional erase accepted: tabletId: 72075186233409546, at schemeshard: 72057594046678944 2025-06-25T14:51:13.468946Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:347: TTxScheduleConditionalErase Execute: at schemeshard: 72057594046678944 2025-06-25T14:51:13.468982Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:397: Successful conditional erase: tabletId: 72075186233409547, at schemeshard: 72057594046678944 2025-06-25T14:51:13.472988Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:347: TTxScheduleConditionalErase Execute: at schemeshard: 72057594046678944 2025-06-25T14:51:13.473029Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:397: Successful conditional erase: tabletId: 72075186233409550, at schemeshard: 72057594046678944 2025-06-25T14:51:13.474028Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:347: TTxScheduleConditionalErase Execute: at schemeshard: 72057594046678944 2025-06-25T14:51:13.474060Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:397: Successful conditional erase: tabletId: 72075186233409549, at schemeshard: 72057594046678944 2025-06-25T14:51:13.475212Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:347: TTxScheduleConditionalErase Execute: at schemeshard: 72057594046678944 2025-06-25T14:51:13.475241Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:397: Successful conditional erase: tabletId: 72075186233409548, at schemeshard: 72057594046678944 2025-06-25T14:51:13.475772Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:347: TTxScheduleConditionalErase Execute: at schemeshard: 72057594046678944 2025-06-25T14:51:13.475801Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:397: Successful conditional erase: tabletId: 72075186233409546, at schemeshard: 72057594046678944 2025-06-25T14:51:13.476034Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:453: TTxScheduleConditionalErase Complete: at schemeshard: 72057594046678944 2025-06-25T14:51:13.476168Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:56: TTxRunConditionalErase DoExecute: at schemeshard: 72057594046678944 2025-06-25T14:51:13.476247Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__conditional_erase.cpp:106: Skip conditional erase: shardIdx: 72057594046678944:2, run at: 2020-09-18T23:04:00.214000Z, at schemeshard: 72057594046678944 2025-06-25T14:51:13.477993Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:453: TTxScheduleConditionalErase Complete: at schemeshard: 72057594046678944 2025-06-25T14:51:13.478073Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:56: TTxRunConditionalErase DoExecute: at schemeshard: 72057594046678944 2025-06-25T14:51:13.478108Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__conditional_erase.cpp:106: Skip conditional erase: shardIdx: 72057594046678944:5, run at: 2020-09-18T23:04:00.215000Z, at schemeshard: 72057594046678944 2025-06-25T14:51:13.478159Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:453: TTxScheduleConditionalErase Complete: at schemeshard: 72057594046678944 2025-06-25T14:51:13.478211Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:453: TTxScheduleConditionalErase Complete: at schemeshard: 72057594046678944 2025-06-25T14:51:13.478296Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:56: TTxRunConditionalErase DoExecute: at schemeshard: 72057594046678944 2025-06-25T14:51:13.478319Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__conditional_erase.cpp:106: Skip conditional erase: shardIdx: 72057594046678944:4, run at: 2020-09-18T23:04:00.215000Z, at schemeshard: 72057594046678944 2025-06-25T14:51:13.478355Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:453: TTxScheduleConditionalErase Complete: at schemeshard: 72057594046678944 2025-06-25T14:51:13.478381Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:189: TTxRunConditionalErase DoComplete: at schemeshard: 72057594046678944 2025-06-25T14:51:13.478411Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:189: TTxRunConditionalErase DoComplete: at schemeshard: 72057594046678944 2025-06-25T14:51:13.478425Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:189: TTxRunConditionalErase DoComplete: at schemeshard: 72057594046678944 2025-06-25T14:51:13.478450Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:56: TTxRunConditionalErase DoExecute: at schemeshard: 72057594046678944 2025-06-25T14:51:13.478467Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__conditional_erase.cpp:106: Skip conditional erase: shardIdx: 72057594046678944:3, run at: 2020-09-18T23:04:00.216000Z, at schemeshard: 72057594046678944 2025-06-25T14:51:13.478501Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:189: TTxRunConditionalErase DoComplete: at schemeshard: 72057594046678944 2025-06-25T14:51:13.478540Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:56: TTxRunConditionalErase DoExecute: at schemeshard: 72057594046678944 2025-06-25T14:51:13.478558Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__conditional_erase.cpp:106: Skip conditional erase: shardIdx: 72057594046678944:1, run at: 2020-09-18T23:04:00.216000Z, at schemeshard: 72057594046678944 2025-06-25T14:51:13.478575Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:189: TTxRunConditionalErase DoComplete: at schemeshard: 72057594046678944 2025-06-25T14:51:13.545689Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046678944, queue size# 5 2025-06-25T14:51:13.545849Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 2 shard idx 72057594046678944:1 data size 0 row count 0 2025-06-25T14:51:13.545943Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186233409546 maps to shardIdx: 72057594046678944:1 followerId=0, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], pathId map=TTLEnabledTable1, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-25T14:51:13.546052Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186233409546 2025-06-25T14:51:13.546105Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 3 shard idx 72057594046678944:2 data size 0 row count 0 2025-06-25T14:51:13.546142Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186233409547 maps to shardIdx: 72057594046678944:2 followerId=0, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], pathId map=TTLEnabledTable2, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-25T14:51:13.546178Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186233409547 2025-06-25T14:51:13.546220Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 5 shard idx 72057594046678944:4 data size 43 row count 1 2025-06-25T14:51:13.546261Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186233409549 maps to shardIdx: 72057594046678944:4 followerId=0, pathId: [OwnerId: 72057594046678944, LocalPathId: 5], pathId map=TTLEnabledTable4, is column=0, is olap=0, RowCount 1, DataSize 43 2025-06-25T14:51:13.546318Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186233409549 2025-06-25T14:51:13.546352Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 4 shard idx 72057594046678944:3 data size 603 row count 2 2025-06-25T14:51:13.546389Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186233409548 maps to shardIdx: 72057594046678944:3 followerId=0, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], pathId map=TTLEnabledTable3, is column=0, is olap=0, RowCount 2, DataSize 603 2025-06-25T14:51:13.546428Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186233409548 2025-06-25T14:51:13.546455Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 6 shard idx 72057594046678944:5 data size 627 row count 2 2025-06-25T14:51:13.546518Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186233409550 maps to shardIdx: 72057594046678944:5 followerId=0, pathId: [OwnerId: 72057594046678944, LocalPathId: 6], pathId map=TTLEnabledTable5, is column=0, is olap=0, RowCount 2, DataSize 627, with borrowed parts 2025-06-25T14:51:13.546575Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186233409550 2025-06-25T14:51:13.559039Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:347: TTxScheduleConditionalErase Execute: at schemeshard: 72057594046678944 2025-06-25T14:51:13.559097Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:397: Successful conditional erase: tabletId: 72075186233409551, at schemeshard: 72057594046678944 2025-06-25T14:51:13.561048Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:453: TTxScheduleConditionalErase Complete: at schemeshard: 72057594046678944 2025-06-25T14:51:13.561177Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:56: TTxRunConditionalErase DoExecute: at schemeshard: 72057594046678944 2025-06-25T14:51:13.561224Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__conditional_erase.cpp:106: Skip conditional erase: shardIdx: 72057594046678944:6, run at: 2020-09-18T23:04:00.218000Z, at schemeshard: 72057594046678944 2025-06-25T14:51:13.561281Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:189: TTxRunConditionalErase DoComplete: at schemeshard: 72057594046678944 |86.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest >> TVectorIndexTests::CreateTableCoveredEmbedding [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:51:13.305083Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:51:13.305200Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:13.305248Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:51:13.305279Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:51:13.305343Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:51:13.305366Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:51:13.305422Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:13.305478Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:51:13.306052Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:51:13.307378Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:51:13.380893Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:51:13.380965Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:51:13.407906Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:51:13.408217Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:51:13.408362Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:51:13.413557Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:51:13.413854Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:51:13.414488Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:13.414702Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:51:13.417888Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:13.418049Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:51:13.419087Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:51:13.419144Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:13.419290Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:51:13.419345Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:51:13.419398Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:51:13.419471Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:51:13.425396Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:51:13.549806Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:51:13.550007Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:13.550224Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:51:13.550276Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:51:13.550481Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:51:13.550563Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:51:13.553208Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:13.553377Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:51:13.553562Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:13.553664Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:51:13.553711Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:51:13.553749Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:51:13.555886Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:13.555944Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:51:13.555983Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:51:13.557549Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:13.557591Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:13.557640Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:13.557691Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:51:13.567546Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:51:13.569725Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:51:13.569934Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:51:13.570658Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:13.570782Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:51:13.570822Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:13.571014Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:51:13.571051Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:13.571203Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:51:13.571274Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:51:13.573522Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:51:13.573571Z node 1 :FLAT_TX_SCHEMESHARD ... : 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } TableIndex { Name: "idx_vector" LocalPathId: 3 Type: EIndexTypeGlobalVectorKmeansTree State: EIndexStateReady KeyColumnNames: "embedding" SchemaVersion: 1 PathOwnerId: 72057594046678944 DataColumnNames: "embedding" DataSize: 0 IndexImplTableDescriptions { PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } } IndexImplTableDescriptions { PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } } VectorIndexKmeansTreeDescription { Settings { settings { metric: DISTANCE_COSINE vector_type: VECTOR_TYPE_FLOAT vector_dimension: 1024 } clusters: 4 levels: 5 } } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:51:13.922801Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/vectors/idx_vector/indexImplLevelTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-25T14:51:13.923003Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/vectors/idx_vector/indexImplLevelTable" took 208us result status StatusSuccess 2025-06-25T14:51:13.923432Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/vectors/idx_vector/indexImplLevelTable" PathDescription { Self { Name: "indexImplLevelTable" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 3 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeVectorKmeansTreeIndexImplTable Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "indexImplLevelTable" Columns { Name: "__ydb_parent" Type: "Uint64" TypeId: 4 Id: 1 NotNull: true IsBuildInProgress: false } Columns { Name: "__ydb_id" Type: "Uint64" TypeId: 4 Id: 2 NotNull: true IsBuildInProgress: false } Columns { Name: "__ydb_centroid" Type: "String" TypeId: 4097 Id: 3 NotNull: true IsBuildInProgress: false } KeyColumnNames: "__ydb_parent" KeyColumnNames: "__ydb_id" KeyColumnIds: 1 KeyColumnIds: 2 TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 4 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:51:13.924039Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/vectors/idx_vector/indexImplPostingTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-25T14:51:13.924305Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/vectors/idx_vector/indexImplPostingTable" took 226us result status StatusSuccess 2025-06-25T14:51:13.924836Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/vectors/idx_vector/indexImplPostingTable" PathDescription { Self { Name: "indexImplPostingTable" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 3 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeVectorKmeansTreeIndexImplTable Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "indexImplPostingTable" Columns { Name: "__ydb_parent" Type: "Uint64" TypeId: 4 Id: 1 NotNull: true IsBuildInProgress: false } Columns { Name: "id" Type: "Uint64" TypeId: 4 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "embedding" Type: "String" TypeId: 4097 Id: 3 NotNull: false IsBuildInProgress: false } KeyColumnNames: "__ydb_parent" KeyColumnNames: "id" KeyColumnIds: 1 KeyColumnIds: 2 TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest >> TVectorIndexTests::CreateTableMultiColumn [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:51:13.308982Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:51:13.309081Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:13.309131Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:51:13.309168Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:51:13.309221Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:51:13.309259Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:51:13.309327Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:13.309407Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:51:13.310167Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:51:13.310498Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:51:13.384170Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:51:13.384250Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:51:13.407911Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:51:13.408420Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:51:13.408602Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:51:13.414617Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:51:13.414916Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:51:13.415529Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:13.415754Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:51:13.419002Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:13.419153Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:51:13.420166Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:51:13.420237Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:13.420404Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:51:13.420467Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:51:13.420523Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:51:13.420599Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:51:13.426641Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:51:13.538008Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:51:13.538175Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:13.538313Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:51:13.538350Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:51:13.538497Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:51:13.538549Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:51:13.540757Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:13.540925Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:51:13.541098Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:13.541266Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:51:13.541300Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:51:13.541326Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:51:13.543115Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:13.543174Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:51:13.543205Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:51:13.544597Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:13.544633Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:13.544681Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:13.544730Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:51:13.547319Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:51:13.548658Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:51:13.548811Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:51:13.549687Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:13.549802Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:51:13.549842Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:13.550078Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:51:13.550120Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:13.550244Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:51:13.550367Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:51:13.551937Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:51:13.551981Z node 1 :FLAT_TX_SCHEMESHARD ... nNames: "covered1" DataColumnNames: "covered2" DataSize: 0 IndexImplTableDescriptions { PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } } IndexImplTableDescriptions { PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } } VectorIndexKmeansTreeDescription { Settings { settings { metric: DISTANCE_COSINE vector_type: VECTOR_TYPE_FLOAT vector_dimension: 1024 } } } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:51:13.921105Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/vectors/idx_vector/indexImplLevelTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-25T14:51:13.921352Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/vectors/idx_vector/indexImplLevelTable" took 249us result status StatusSuccess 2025-06-25T14:51:13.922055Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/vectors/idx_vector/indexImplLevelTable" PathDescription { Self { Name: "indexImplLevelTable" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 3 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeVectorKmeansTreeIndexImplTable Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "indexImplLevelTable" Columns { Name: "__ydb_parent" Type: "Uint64" TypeId: 4 Id: 1 NotNull: true IsBuildInProgress: false } Columns { Name: "__ydb_id" Type: "Uint64" TypeId: 4 Id: 2 NotNull: true IsBuildInProgress: false } Columns { Name: "__ydb_centroid" Type: "String" TypeId: 4097 Id: 3 NotNull: true IsBuildInProgress: false } KeyColumnNames: "__ydb_parent" KeyColumnNames: "__ydb_id" KeyColumnIds: 1 KeyColumnIds: 2 TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 4 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:51:13.922807Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/vectors/idx_vector/indexImplPostingTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-25T14:51:13.923025Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/vectors/idx_vector/indexImplPostingTable" took 248us result status StatusSuccess 2025-06-25T14:51:13.923615Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/vectors/idx_vector/indexImplPostingTable" PathDescription { Self { Name: "indexImplPostingTable" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 3 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeVectorKmeansTreeIndexImplTable Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "indexImplPostingTable" Columns { Name: "__ydb_parent" Type: "Uint64" TypeId: 4 Id: 1 NotNull: true IsBuildInProgress: false } Columns { Name: "id1" Type: "String" TypeId: 4097 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "id2" Type: "String" TypeId: 4097 Id: 3 NotNull: false IsBuildInProgress: false } Columns { Name: "covered1" Type: "String" TypeId: 4097 Id: 4 NotNull: false IsBuildInProgress: false } Columns { Name: "covered2" Type: "String" TypeId: 4097 Id: 5 NotNull: false IsBuildInProgress: false } KeyColumnNames: "__ydb_parent" KeyColumnNames: "id1" KeyColumnNames: "id2" KeyColumnIds: 1 KeyColumnIds: 2 KeyColumnIds: 3 TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TAsyncIndexTests::SplitMainWithReboots[TabletReboots] >> KqpIndexLookupJoin::CheckAllKeyTypesCast [GOOD] >> TAsyncIndexTests::MergeMainWithReboots[PipeResets] >> TVectorIndexTests::CreateTable [GOOD] >> TAsyncIndexTests::MergeBothWithReboots[PipeResets] >> TAsyncIndexTests::CreateTable [GOOD] |86.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest >> TVectorIndexTests::CreateTable [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:51:13.935493Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:51:13.935595Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:13.935643Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:51:13.935684Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:51:13.935728Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:51:13.935781Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:51:13.935852Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:13.935933Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:51:13.936694Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:51:13.937023Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:51:14.013251Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:51:14.013314Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:51:14.029499Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:51:14.029869Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:51:14.030049Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:51:14.048871Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:51:14.049208Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:51:14.049810Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:14.050006Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:51:14.065296Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:14.065477Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:51:14.066886Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:51:14.066942Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:14.067055Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:51:14.067095Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:51:14.067139Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:51:14.067206Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:51:14.077183Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:51:14.226649Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:51:14.226872Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:14.227063Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:51:14.227111Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:51:14.227316Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:51:14.227388Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:51:14.234621Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:14.234811Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:51:14.235026Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:14.235110Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:51:14.235151Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:51:14.235185Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:51:14.237870Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:14.237931Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:51:14.237976Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:51:14.239873Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:14.239940Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:14.239989Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:14.240035Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:51:14.243447Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:51:14.245325Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:51:14.245507Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:51:14.246366Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:14.246509Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:51:14.246584Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:14.246845Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:51:14.246900Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:14.247063Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:51:14.247161Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:51:14.249234Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:51:14.249283Z node 1 :FLAT_TX_SCHEMESHARD ... chemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T14:51:14.705565Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 102 2025-06-25T14:51:14.705704Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T14:51:14.705777Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T14:51:14.705811Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 102 2025-06-25T14:51:14.706054Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T14:51:14.706109Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T14:51:14.706132Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 102 2025-06-25T14:51:14.706154Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 18446744073709551615 2025-06-25T14:51:14.706177Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 4 2025-06-25T14:51:14.707047Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 5 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T14:51:14.707117Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 5 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T14:51:14.707139Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 102 2025-06-25T14:51:14.707164Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 5], version: 18446744073709551615 2025-06-25T14:51:14.707188Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 4 2025-06-25T14:51:14.707239Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/4, is published: true 2025-06-25T14:51:14.710441Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 102:3, at schemeshard: 72057594046678944 2025-06-25T14:51:14.710492Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_table.cpp:414: TDropTable TProposedDeletePart operationId: 102:3 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:51:14.710789Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove table for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 3 2025-06-25T14:51:14.710921Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:3 progress is 2/4 2025-06-25T14:51:14.710957Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 2/4 2025-06-25T14:51:14.710988Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:3 progress is 2/4 2025-06-25T14:51:14.711032Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 2/4 2025-06-25T14:51:14.711071Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 102, ready parts: 2/4, is published: true 2025-06-25T14:51:14.711801Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 102:2, at schemeshard: 72057594046678944 2025-06-25T14:51:14.711839Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_table.cpp:414: TDropTable TProposedDeletePart operationId: 102:2 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:51:14.712029Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove table for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 2025-06-25T14:51:14.712110Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:2 progress is 3/4 2025-06-25T14:51:14.712132Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 3/4 2025-06-25T14:51:14.712153Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:2 progress is 3/4 2025-06-25T14:51:14.712216Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 3/4 2025-06-25T14:51:14.712243Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 102, ready parts: 3/4, is published: true 2025-06-25T14:51:14.712527Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-25T14:51:14.712807Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-25T14:51:14.712838Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_table.cpp:414: TDropTable TProposedDeletePart operationId: 102:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:51:14.713003Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove table for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-25T14:51:14.713086Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:0 progress is 4/4 2025-06-25T14:51:14.713107Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 4/4 2025-06-25T14:51:14.713130Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:0 progress is 4/4 2025-06-25T14:51:14.713155Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 4/4 2025-06-25T14:51:14.713187Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 102, ready parts: 4/4, is published: true 2025-06-25T14:51:14.713239Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1656: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:420:2375] message: TxId: 102 2025-06-25T14:51:14.713283Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 4/4 2025-06-25T14:51:14.713322Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 102:0 2025-06-25T14:51:14.713348Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 102:0 2025-06-25T14:51:14.713431Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-25T14:51:14.713465Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 102:1 2025-06-25T14:51:14.713498Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 102:1 2025-06-25T14:51:14.713546Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-25T14:51:14.713571Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 102:2 2025-06-25T14:51:14.713587Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 102:2 2025-06-25T14:51:14.713648Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 2 2025-06-25T14:51:14.713686Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 102:3 2025-06-25T14:51:14.713707Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 102:3 2025-06-25T14:51:14.713741Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 2 2025-06-25T14:51:14.713993Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-25T14:51:14.714040Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-25T14:51:14.714173Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-25T14:51:14.714197Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-25T14:51:14.714219Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-25T14:51:14.714290Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-25T14:51:14.715588Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-25T14:51:14.717274Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-25T14:51:14.717314Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:537:2485] TestWaitNotification: OK eventTxId 102 |86.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest |86.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest >> TAsyncIndexTests::SplitIndexWithReboots[PipeResets] >> TUniqueIndexTests::CreateTable [GOOD] |86.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest >> TAsyncIndexTests::CreateTable [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:51:14.285663Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:51:14.285754Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:14.285800Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:51:14.285832Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:51:14.285870Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:51:14.285936Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:51:14.285991Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:14.286054Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:51:14.286776Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:51:14.287107Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:51:14.367600Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:51:14.367670Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:51:14.386906Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:51:14.387286Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:51:14.387438Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:51:14.392485Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:51:14.392753Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:51:14.393366Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:14.393565Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:51:14.396622Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:14.396807Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:51:14.397830Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:51:14.397884Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:14.398014Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:51:14.398062Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:51:14.398105Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:51:14.398173Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:51:14.404107Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:51:14.511619Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:51:14.511842Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:14.512026Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:51:14.512076Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:51:14.512386Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:51:14.512462Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:51:14.514714Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:14.514881Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:51:14.515041Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:14.515114Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:51:14.515157Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:51:14.515195Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:51:14.517131Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:14.517193Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:51:14.517236Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:51:14.518760Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:14.518802Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:14.518857Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:14.518899Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:51:14.522154Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:51:14.523894Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:51:14.524075Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:51:14.524954Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:14.525100Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:51:14.525164Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:14.525440Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:51:14.525493Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:14.525662Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:51:14.525782Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:51:14.527701Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:51:14.527745Z node 1 :FLAT_TX_SCHEMESHARD ... 2075186233409547, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:14.826055Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:707: all shard schema changes has been received, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:51:14.826092Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:719: send schema changes ack message, operation: 101:0, datashard: 72075186233409547, at schemeshard: 72057594046678944 2025-06-25T14:51:14.826126Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 101:0 129 -> 240 2025-06-25T14:51:14.830751Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-25T14:51:14.830947Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-25T14:51:14.831036Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-25T14:51:14.836217Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-25T14:51:14.836463Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 101:2, at schemeshard: 72057594046678944 2025-06-25T14:51:14.836790Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:51:14.837376Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 101:2, at schemeshard: 72057594046678944 2025-06-25T14:51:14.837644Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:51:14.838043Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 101:2, at schemeshard: 72057594046678944 2025-06-25T14:51:14.838101Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 101:2 ProgressState 2025-06-25T14:51:14.838210Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#101:2 progress is 2/3 2025-06-25T14:51:14.838250Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 101 ready parts: 2/3 2025-06-25T14:51:14.838289Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#101:2 progress is 2/3 2025-06-25T14:51:14.838323Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 101 ready parts: 2/3 2025-06-25T14:51:14.838356Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 101, ready parts: 2/3, is published: true 2025-06-25T14:51:14.838807Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:51:14.838858Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 101:0 ProgressState 2025-06-25T14:51:14.838916Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#101:0 progress is 3/3 2025-06-25T14:51:14.838939Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 101 ready parts: 3/3 2025-06-25T14:51:14.838966Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#101:0 progress is 3/3 2025-06-25T14:51:14.838999Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 101 ready parts: 3/3 2025-06-25T14:51:14.839023Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 101, ready parts: 3/3, is published: true 2025-06-25T14:51:14.839086Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1656: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:385:2351] message: TxId: 101 2025-06-25T14:51:14.839130Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 101 ready parts: 3/3 2025-06-25T14:51:14.839171Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 101:0 2025-06-25T14:51:14.839202Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 101:0 2025-06-25T14:51:14.839356Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-25T14:51:14.839404Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 101:1 2025-06-25T14:51:14.839427Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 101:1 2025-06-25T14:51:14.839462Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-25T14:51:14.839489Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 101:2 2025-06-25T14:51:14.839508Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 101:2 2025-06-25T14:51:14.839555Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 2025-06-25T14:51:14.842327Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-25T14:51:14.842378Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:386:2352] TestWaitNotification: OK eventTxId 101 2025-06-25T14:51:14.842815Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/UserDefinedIndex" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-25T14:51:14.843043Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/UserDefinedIndex" took 246us result status StatusSuccess 2025-06-25T14:51:14.843905Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table/UserDefinedIndex" PathDescription { Self { Name: "UserDefinedIndex" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeTableIndex CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableIndexVersion: 1 } ChildrenExist: true } Children { Name: "indexImplTable" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 3 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" PathSubType: EPathSubTypeAsyncIndexImplTable Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } TableIndex { Name: "UserDefinedIndex" LocalPathId: 3 Type: EIndexTypeGlobalAsync State: EIndexStateReady KeyColumnNames: "indexed" SchemaVersion: 1 PathOwnerId: 72057594046678944 DataSize: 0 IndexImplTableDescriptions { PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TAsyncIndexTests::OnlineBuild [GOOD] >> TAsyncIndexTests::Decimal |86.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest |86.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest >> TVectorIndexTests::CreateTablePrefix |86.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest |86.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest >> TAsyncIndexTests::MergeBothWithReboots[TabletReboots] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpIndexLookupJoin::CheckAllKeyTypesCast [GOOD] Test command err: Trying to start YDB, gRPC: 6017, MsgBus: 2770 2025-06-25T14:48:01.048390Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898813753041209:2141];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:48:01.050003Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000db6/r3tmp/tmp6n21eh/pdisk_1.dat 2025-06-25T14:48:01.629895Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:48:01.629979Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:48:01.639245Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:48:01.678786Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:48:01.708433Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519898809458073798:2080] 1750862880969386 != 1750862880969389 TServer::EnableGrpc on GrpcPort 6017, node 1 2025-06-25T14:48:01.960880Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:48:01.960901Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:48:01.960913Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:48:01.961028Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:48:02.048504Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:2770 TClient is connected to server localhost:2770 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:48:02.874702Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-25T14:48:02.907969Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:03.159798Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:48:03.466415Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:48:03.597132Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:48:05.736505Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898830932911923:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:05.736647Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:06.040416Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519898813753041209:2141];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:48:06.040509Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:48:06.226583Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:06.268819Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:06.302742Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:06.374237Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:06.426728Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:06.516064Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:06.585279Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:48:06.676004Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898835227879883:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:06.676090Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:06.676445Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898835227879888:2436], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:48:06.681433Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:48:06.696438Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710669, at schemeshard: 72057594046644480 2025-06-25T14:48:06.696760Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898835227879891:2438], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:48:06.800762Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898835227879943:3430] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:48:08.045912Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work ... GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:2:13: Error: At function: RemovePrefixMembers, At function: Sort, At function: PersistableRepr, At function: SqlProject
:3:36: Error: At function: EquiJoin
:3:36: Error: Cannot compare key columns (l.Key has type: Optional, r.Key has type: Optional) 2025-06-25T14:50:33.131247Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=2&id=MWFjMzE4MDYtN2U1MDRkMTAtMzUyZDEyNDUtZWUwMTMyNDA=, ActorId: [2:7519898885357871668:2476], ActorState: ExecuteState, TraceId: 01jyks3cr83t13t7zkawrtbm4j, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-06-25T14:50:33.197972Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7519899469473435033:5991], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:2:13: Error: At function: RemovePrefixMembers, At function: Sort, At function: PersistableRepr, At function: SqlProject
:3:36: Error: At function: EquiJoin
:3:36: Error: Cannot compare key columns (l.Key has type: Optional, r.Key has type: Optional) 2025-06-25T14:50:33.200545Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=2&id=MWFjMzE4MDYtN2U1MDRkMTAtMzUyZDEyNDUtZWUwMTMyNDA=, ActorId: [2:7519898885357871668:2476], ActorState: ExecuteState, TraceId: 01jyks3csw3x1esrjqch0p2nbc, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-06-25T14:50:41.413119Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7519899503833173865:6145], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:2:13: Error: At function: RemovePrefixMembers, At function: Sort, At function: PersistableRepr, At function: SqlProject
:3:36: Error: At function: EquiJoin
:3:36: Error: Cannot compare key columns (l.Key has type: Optional, r.Key has type: Optional) 2025-06-25T14:50:41.416246Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=2&id=MWFjMzE4MDYtN2U1MDRkMTAtMzUyZDEyNDUtZWUwMTMyNDA=, ActorId: [2:7519898885357871668:2476], ActorState: ExecuteState, TraceId: 01jyks3mv64xh5zrkt4mfcsvsd, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-06-25T14:50:45.543021Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7519899521013043308:6225], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:2:13: Error: At function: RemovePrefixMembers, At function: Sort, At function: PersistableRepr, At function: SqlProject
:3:36: Error: At function: EquiJoin
:3:36: Error: Cannot compare key columns (l.Key has type: Optional, r.Key has type: Optional) 2025-06-25T14:50:45.544557Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=2&id=MWFjMzE4MDYtN2U1MDRkMTAtMzUyZDEyNDUtZWUwMTMyNDA=, ActorId: [2:7519898885357871668:2476], ActorState: ExecuteState, TraceId: 01jyks3rv92e70f4xvrq1205s4, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-06-25T14:50:45.578737Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7519899521013043324:6232], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:2:13: Error: At function: RemovePrefixMembers, At function: Sort, At function: PersistableRepr, At function: SqlProject
:3:36: Error: At function: EquiJoin
:3:36: Error: Cannot compare key columns (l.Key has type: Optional, r.Key has type: Optional) 2025-06-25T14:50:45.581886Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=2&id=MWFjMzE4MDYtN2U1MDRkMTAtMzUyZDEyNDUtZWUwMTMyNDA=, ActorId: [2:7519898885357871668:2476], ActorState: ExecuteState, TraceId: 01jyks3rxh5sc4xen2y5w3et1v, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-06-25T14:50:45.627382Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7519899521013043337:6238], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:2:13: Error: At function: RemovePrefixMembers, At function: Sort, At function: PersistableRepr, At function: SqlProject
:3:36: Error: At function: EquiJoin
:3:36: Error: Cannot compare key columns (l.Key has type: Optional, r.Key has type: Optional) 2025-06-25T14:50:45.628414Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=2&id=MWFjMzE4MDYtN2U1MDRkMTAtMzUyZDEyNDUtZWUwMTMyNDA=, ActorId: [2:7519898885357871668:2476], ActorState: ExecuteState, TraceId: 01jyks3ryn3p8naqm52en07ct4, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-06-25T14:50:53.661687Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7519899555372782192:6394], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:2:13: Error: At function: RemovePrefixMembers, At function: Sort, At function: PersistableRepr, At function: SqlProject
:3:34: Error: At function: EquiJoin
:3:34: Error: Cannot compare key columns (l.Key has type: Optional, r.Key has type: Optional) 2025-06-25T14:50:53.665767Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=2&id=MWFjMzE4MDYtN2U1MDRkMTAtMzUyZDEyNDUtZWUwMTMyNDA=, ActorId: [2:7519898885357871668:2476], ActorState: ExecuteState, TraceId: 01jyks40syf3r1bmnq440ndmsn, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-06-25T14:50:53.713779Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7519899555372782205:6400], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:2:13: Error: At function: RemovePrefixMembers, At function: Sort, At function: PersistableRepr, At function: SqlProject
:3:34: Error: At function: EquiJoin
:3:34: Error: Cannot compare key columns (l.Key has type: Optional, r.Key has type: Optional) 2025-06-25T14:50:53.716555Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=2&id=MWFjMzE4MDYtN2U1MDRkMTAtMzUyZDEyNDUtZWUwMTMyNDA=, ActorId: [2:7519898885357871668:2476], ActorState: ExecuteState, TraceId: 01jyks40vgam9nj2gmmq6mz6rw, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-06-25T14:50:57.781759Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7519899572552651628:6480], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:2:13: Error: At function: RemovePrefixMembers, At function: Sort, At function: PersistableRepr, At function: SqlProject
:3:34: Error: At function: EquiJoin
:3:34: Error: Cannot compare key columns (l.Key has type: Optional, r.Key has type: Optional) 2025-06-25T14:50:57.784727Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=2&id=MWFjMzE4MDYtN2U1MDRkMTAtMzUyZDEyNDUtZWUwMTMyNDA=, ActorId: [2:7519898885357871668:2476], ActorState: ExecuteState, TraceId: 01jyks44ts0jh4hf098jzywpv2, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-06-25T14:50:57.824038Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7519899572552651641:6486], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:2:13: Error: At function: RemovePrefixMembers, At function: Sort, At function: PersistableRepr, At function: SqlProject
:3:34: Error: At function: EquiJoin
:3:34: Error: Cannot compare key columns (l.Key has type: Optional, r.Key has type: Optional) 2025-06-25T14:50:57.827617Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=2&id=MWFjMzE4MDYtN2U1MDRkMTAtMzUyZDEyNDUtZWUwMTMyNDA=, ActorId: [2:7519898885357871668:2476], ActorState: ExecuteState, TraceId: 01jyks44w35n2jfzr4a59cdry1, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-06-25T14:51:04.368129Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7519899602617423209:6636], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:2:13: Error: At function: RemovePrefixMembers, At function: Sort, At function: PersistableRepr, At function: SqlProject
:3:34: Error: At function: EquiJoin
:3:34: Error: Cannot compare key columns (l.Key has type: Optional, r.Key has type: Optional) 2025-06-25T14:51:04.368369Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=2&id=MWFjMzE4MDYtN2U1MDRkMTAtMzUyZDEyNDUtZWUwMTMyNDA=, ActorId: [2:7519898885357871668:2476], ActorState: ExecuteState, TraceId: 01jyks4b8d1kc6tymwvn58yz9f, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-06-25T14:51:07.601507Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7519899615502325323:6713], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:2:13: Error: At function: RemovePrefixMembers, At function: Sort, At function: PersistableRepr, At function: SqlProject
:3:34: Error: At function: EquiJoin
:3:34: Error: Cannot compare key columns (l.Key has type: Optional, r.Key has type: Optional) 2025-06-25T14:51:07.601792Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=2&id=MWFjMzE4MDYtN2U1MDRkMTAtMzUyZDEyNDUtZWUwMTMyNDA=, ActorId: [2:7519898885357871668:2476], ActorState: ExecuteState, TraceId: 01jyks4edeazkb6108hwa3d8vn, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-06-25T14:51:07.642804Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7519899615502325338:6720], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:2:13: Error: At function: RemovePrefixMembers, At function: Sort, At function: PersistableRepr, At function: SqlProject
:3:34: Error: At function: EquiJoin
:3:34: Error: Cannot compare key columns (l.Key has type: Optional, r.Key has type: Optional) 2025-06-25T14:51:07.643158Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=2&id=MWFjMzE4MDYtN2U1MDRkMTAtMzUyZDEyNDUtZWUwMTMyNDA=, ActorId: [2:7519898885357871668:2476], ActorState: ExecuteState, TraceId: 01jyks4ef06mksn953x9982en1, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-06-25T14:51:07.681422Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7519899615502325351:6726], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:2:13: Error: At function: RemovePrefixMembers, At function: Sort, At function: PersistableRepr, At function: SqlProject
:3:34: Error: At function: EquiJoin
:3:34: Error: Cannot compare key columns (l.Key has type: Optional, r.Key has type: Optional) 2025-06-25T14:51:07.681858Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=2&id=MWFjMzE4MDYtN2U1MDRkMTAtMzUyZDEyNDUtZWUwMTMyNDA=, ActorId: [2:7519898885357871668:2476], ActorState: ExecuteState, TraceId: 01jyks4eg59tdbd0ssf17j5p41, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest >> TUniqueIndexTests::CreateTable [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:51:14.735723Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:51:14.735814Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:14.735857Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:51:14.735889Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:51:14.735931Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:51:14.735985Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:51:14.736044Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:14.736100Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:51:14.736823Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:51:14.737140Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:51:14.814539Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:51:14.814591Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:51:14.832849Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:51:14.833265Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:51:14.833429Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:51:14.839110Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:51:14.839386Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:51:14.839958Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:14.840196Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:51:14.843321Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:14.843486Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:51:14.844576Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:51:14.844627Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:14.844783Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:51:14.844827Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:51:14.844877Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:51:14.844946Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:51:14.852138Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:51:14.974351Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:51:14.974587Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:14.974787Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:51:14.974839Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:51:14.975062Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:51:14.975138Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:51:14.977205Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:14.977379Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:51:14.977565Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:14.977638Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:51:14.977698Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:51:14.977736Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:51:14.979454Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:14.979517Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:51:14.979557Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:51:14.981291Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:14.981362Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:14.981421Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:14.981466Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:51:14.991277Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:51:14.993318Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:51:14.993478Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:51:14.994259Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:14.994385Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:51:14.994433Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:14.994661Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:51:14.994714Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:14.994883Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:51:14.994998Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:51:14.996833Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:51:14.996864Z node 1 :FLAT_TX_SCHEMESHARD ... 2075186233409547, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:15.280968Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:707: all shard schema changes has been received, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:51:15.280999Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:719: send schema changes ack message, operation: 101:0, datashard: 72075186233409547, at schemeshard: 72057594046678944 2025-06-25T14:51:15.281039Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 101:0 129 -> 240 2025-06-25T14:51:15.283444Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-25T14:51:15.283525Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-25T14:51:15.283579Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-25T14:51:15.285941Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-25T14:51:15.286082Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 101:2, at schemeshard: 72057594046678944 2025-06-25T14:51:15.286170Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:51:15.286274Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 101:2, at schemeshard: 72057594046678944 2025-06-25T14:51:15.286579Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:51:15.286745Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 101:2, at schemeshard: 72057594046678944 2025-06-25T14:51:15.286782Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 101:2 ProgressState 2025-06-25T14:51:15.286905Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#101:2 progress is 2/3 2025-06-25T14:51:15.286939Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 101 ready parts: 2/3 2025-06-25T14:51:15.286978Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#101:2 progress is 2/3 2025-06-25T14:51:15.287012Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 101 ready parts: 2/3 2025-06-25T14:51:15.287048Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 101, ready parts: 2/3, is published: true 2025-06-25T14:51:15.287278Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:51:15.287311Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 101:0 ProgressState 2025-06-25T14:51:15.287364Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#101:0 progress is 3/3 2025-06-25T14:51:15.287385Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 101 ready parts: 3/3 2025-06-25T14:51:15.287413Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#101:0 progress is 3/3 2025-06-25T14:51:15.287436Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 101 ready parts: 3/3 2025-06-25T14:51:15.287462Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 101, ready parts: 3/3, is published: true 2025-06-25T14:51:15.287523Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1656: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:385:2351] message: TxId: 101 2025-06-25T14:51:15.287583Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 101 ready parts: 3/3 2025-06-25T14:51:15.287626Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 101:0 2025-06-25T14:51:15.287658Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 101:0 2025-06-25T14:51:15.287807Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-25T14:51:15.287849Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 101:1 2025-06-25T14:51:15.287870Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 101:1 2025-06-25T14:51:15.287908Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-25T14:51:15.287934Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 101:2 2025-06-25T14:51:15.287952Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 101:2 2025-06-25T14:51:15.287996Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 2025-06-25T14:51:15.290056Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-25T14:51:15.290103Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:386:2352] TestWaitNotification: OK eventTxId 101 2025-06-25T14:51:15.290603Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/UserDefinedIndex" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-25T14:51:15.290937Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/UserDefinedIndex" took 335us result status StatusSuccess 2025-06-25T14:51:15.291762Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table/UserDefinedIndex" PathDescription { Self { Name: "UserDefinedIndex" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeTableIndex CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableIndexVersion: 1 } ChildrenExist: true } Children { Name: "indexImplTable" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 3 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" PathSubType: EPathSubTypeSyncIndexImplTable Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } TableIndex { Name: "UserDefinedIndex" LocalPathId: 3 Type: EIndexTypeGlobalUnique State: EIndexStateReady KeyColumnNames: "indexed" SchemaVersion: 1 PathOwnerId: 72057594046678944 DataSize: 0 IndexImplTableDescriptions { PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> OlapEstimationRowsCorrectness::TPCH11 [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest >> TAsyncIndexTests::OnlineBuild [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:51:14.724146Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:51:14.724273Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:14.724414Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:51:14.724446Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:51:14.724492Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:51:14.724533Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:51:14.724586Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:14.724641Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:51:14.725264Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:51:14.725574Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:51:14.796813Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:51:14.796868Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:51:14.812750Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:51:14.813120Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:51:14.813266Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:51:14.819307Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:51:14.819580Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:51:14.820138Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:14.820386Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:51:14.823658Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:14.823830Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:51:14.824887Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:51:14.824946Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:14.825080Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:51:14.825131Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:51:14.825174Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:51:14.825248Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:51:14.831191Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:51:14.958134Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:51:14.958371Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:14.958576Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:51:14.958636Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:51:14.958871Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:51:14.958942Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:51:14.965820Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:14.966011Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:51:14.966200Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:14.966287Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:51:14.966327Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:51:14.966362Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:51:14.971168Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:14.971241Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:51:14.971279Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:51:14.973962Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:14.974012Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:14.974064Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:14.974113Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:51:14.977853Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:51:14.979879Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:51:14.980073Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:51:14.980974Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:14.981114Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:51:14.981177Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:14.981443Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:51:14.981492Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:14.981689Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:51:14.981781Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:51:14.986303Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:51:14.986354Z node 1 :FLAT_TX_SCHEMESHARD ... ockTxId: 281474976710760, UnlockTxStatus: StatusSuccess, UnlockTxDone: 0, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }}, cookie: 102, record: Status: StatusAccepted TxId: 281474976710760 SchemeshardId: 72057594046678944 PathId: 2, status: StatusAccepted 2025-06-25T14:51:15.564348Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 281474976710760:0, at schemeshard: 72057594046678944 2025-06-25T14:51:15.564389Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_lock.cpp:30: [72057594046678944] TDropLock TPropose opId# 281474976710760:0 ProgressState 2025-06-25T14:51:15.564427Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 281474976710760 ready parts: 1/1 2025-06-25T14:51:15.564512Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 281474976710760 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:51:15.567746Z node 1 :BUILD_INDEX NOTICE: schemeshard_build_index__progress.cpp:1129: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 102 Unlocking 2025-06-25T14:51:15.567837Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1130: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 102 Unlocking TBuildInfo{ IndexBuildId: 102, Uid: , DomainPathId: [OwnerId: 72057594046678944, LocalPathId: 1], TablePathId: [OwnerId: 72057594046678944, LocalPathId: 2], IndexType: EIndexTypeGlobalAsync, IndexName: UserDefinedIndex, IndexColumn: indexed, State: Unlocking, IsBroken: 0, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [1:388:2358], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 281474976710757, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976710758, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 5000004, ApplyTxId: 281474976710759, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976710760, UnlockTxStatus: StatusAccepted, UnlockTxDone: 0, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }} 2025-06-25T14:51:15.568002Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 281474976710760:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:281474976710760 msg type: 269090816 2025-06-25T14:51:15.568128Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 281474976710760, partId: 4294967295, tablet: 72057594046316545 2025-06-25T14:51:15.568345Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 281474976710760, at schemeshard: 72057594046678944 2025-06-25T14:51:15.568379Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 281474976710760, ready parts: 0/1, is published: true 2025-06-25T14:51:15.568425Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 281474976710760, at schemeshard: 72057594046678944 FAKE_COORDINATOR: Add transaction: 281474976710760 at step: 5000006 FAKE_COORDINATOR: advance: minStep5000006 State->FrontStep: 5000005 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710760 at step: 5000006 2025-06-25T14:51:15.568651Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000006, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:15.568743Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976710760 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000006 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:51:15.568786Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_lock.cpp:44: [72057594046678944] TDropLock TPropose opId# 281474976710760:0 HandleReply TEvOperationPlan: step# 5000006 2025-06-25T14:51:15.568830Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 281474976710760:0 128 -> 240 2025-06-25T14:51:15.570568Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 281474976710760:0, at schemeshard: 72057594046678944 2025-06-25T14:51:15.570623Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 281474976710760:0 ProgressState 2025-06-25T14:51:15.570697Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976710760:0 progress is 1/1 2025-06-25T14:51:15.570724Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976710760 ready parts: 1/1 2025-06-25T14:51:15.570756Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976710760:0 progress is 1/1 2025-06-25T14:51:15.570779Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976710760 ready parts: 1/1 2025-06-25T14:51:15.570808Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 281474976710760, ready parts: 1/1, is published: true 2025-06-25T14:51:15.570890Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1656: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:126:2150] message: TxId: 281474976710760 2025-06-25T14:51:15.570931Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976710760 ready parts: 1/1 2025-06-25T14:51:15.570960Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976710760:0 2025-06-25T14:51:15.570998Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 281474976710760:0 2025-06-25T14:51:15.571062Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 FAKE_COORDINATOR: Erasing txId 281474976710760 2025-06-25T14:51:15.572571Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6830: Handle: TEvNotifyTxCompletionResult: txId# 281474976710760 2025-06-25T14:51:15.572633Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6832: Message: TxId: 281474976710760 2025-06-25T14:51:15.572689Z node 1 :BUILD_INDEX INFO: schemeshard_build_index__progress.cpp:1930: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvNotifyTxCompletionResult, id# 102, txId# 281474976710760 2025-06-25T14:51:15.572780Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1933: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvNotifyTxCompletionResult, TIndexBuildInfo: TBuildInfo{ IndexBuildId: 102, Uid: , DomainPathId: [OwnerId: 72057594046678944, LocalPathId: 1], TablePathId: [OwnerId: 72057594046678944, LocalPathId: 2], IndexType: EIndexTypeGlobalAsync, IndexName: UserDefinedIndex, IndexColumn: indexed, State: Unlocking, IsBroken: 0, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [1:388:2358], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 281474976710757, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976710758, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 5000004, ApplyTxId: 281474976710759, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976710760, UnlockTxStatus: StatusAccepted, UnlockTxDone: 0, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }}, txId# 281474976710760 2025-06-25T14:51:15.574851Z node 1 :BUILD_INDEX NOTICE: schemeshard_build_index__progress.cpp:1129: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 102 Unlocking 2025-06-25T14:51:15.574944Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1130: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 102 Unlocking TBuildInfo{ IndexBuildId: 102, Uid: , DomainPathId: [OwnerId: 72057594046678944, LocalPathId: 1], TablePathId: [OwnerId: 72057594046678944, LocalPathId: 2], IndexType: EIndexTypeGlobalAsync, IndexName: UserDefinedIndex, IndexColumn: indexed, State: Unlocking, IsBroken: 0, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [1:388:2358], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 281474976710757, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976710758, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 5000004, ApplyTxId: 281474976710759, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976710760, UnlockTxStatus: StatusAccepted, UnlockTxDone: 1, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }} 2025-06-25T14:51:15.575009Z node 1 :BUILD_INDEX INFO: schemeshard_build_index_tx_base.cpp:24: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: Change state from Unlocking to Done 2025-06-25T14:51:15.576700Z node 1 :BUILD_INDEX NOTICE: schemeshard_build_index__progress.cpp:1129: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 102 Done 2025-06-25T14:51:15.577138Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1130: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 102 Done TBuildInfo{ IndexBuildId: 102, Uid: , DomainPathId: [OwnerId: 72057594046678944, LocalPathId: 1], TablePathId: [OwnerId: 72057594046678944, LocalPathId: 2], IndexType: EIndexTypeGlobalAsync, IndexName: UserDefinedIndex, IndexColumn: indexed, State: Done, IsBroken: 0, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [1:388:2358], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 281474976710757, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976710758, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 5000004, ApplyTxId: 281474976710759, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976710760, UnlockTxStatus: StatusAccepted, UnlockTxDone: 1, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }} 2025-06-25T14:51:15.577185Z node 1 :BUILD_INDEX TRACE: schemeshard_build_index_tx_base.cpp:333: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TIndexBuildInfo SendNotifications: : id# 102, subscribers count# 1 2025-06-25T14:51:15.577328Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-25T14:51:15.577381Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:477:2436] TestWaitNotification: OK eventTxId 102 |86.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest >> TAsyncIndexTests::Decimal [GOOD] |86.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest |86.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest |86.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest >> TAsyncIndexTests::SplitIndexWithReboots[TabletReboots] |86.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest >> TVectorIndexTests::CreateTablePrefix [GOOD] |86.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest >> TAsyncIndexTests::Decimal [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:51:16.239819Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:51:16.239913Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:16.239947Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:51:16.239971Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:51:16.240022Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:51:16.240049Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:51:16.240092Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:16.240172Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:51:16.240765Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:51:16.241035Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:51:16.291725Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:51:16.291763Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:51:16.304045Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:51:16.304377Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:51:16.304507Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:51:16.308393Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:51:16.308635Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:51:16.309073Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:16.309255Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:51:16.311792Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:16.311954Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:51:16.313000Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:51:16.313055Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:16.313191Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:51:16.313234Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:51:16.313281Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:51:16.313348Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:51:16.319104Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:51:16.421881Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:51:16.422041Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:16.422185Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:51:16.422219Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:51:16.422483Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:51:16.422528Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:51:16.424187Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:16.424357Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:51:16.424475Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:16.424526Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:51:16.424554Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:51:16.424577Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:51:16.425865Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:16.425913Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:51:16.425942Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:51:16.426972Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:16.427011Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:16.427047Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:16.427079Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:51:16.429359Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:51:16.430498Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:51:16.430652Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:51:16.431239Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:16.431328Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:51:16.431365Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:16.431547Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:51:16.431579Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:16.431687Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:51:16.431742Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:51:16.433127Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:51:16.433168Z node 1 :FLAT_TX_SCHEMESHARD ... 2075186233409547, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:16.710489Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:707: all shard schema changes has been received, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:51:16.710513Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:719: send schema changes ack message, operation: 101:0, datashard: 72075186233409547, at schemeshard: 72057594046678944 2025-06-25T14:51:16.710540Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 101:0 129 -> 240 2025-06-25T14:51:16.715088Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-25T14:51:16.715305Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-25T14:51:16.715390Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-25T14:51:16.720201Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-25T14:51:16.720453Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 101:2, at schemeshard: 72057594046678944 2025-06-25T14:51:16.720853Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:51:16.721300Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 101:2, at schemeshard: 72057594046678944 2025-06-25T14:51:16.721571Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:51:16.722005Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 101:2, at schemeshard: 72057594046678944 2025-06-25T14:51:16.722076Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 101:2 ProgressState 2025-06-25T14:51:16.722179Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#101:2 progress is 2/3 2025-06-25T14:51:16.722213Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 101 ready parts: 2/3 2025-06-25T14:51:16.722255Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#101:2 progress is 2/3 2025-06-25T14:51:16.722282Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 101 ready parts: 2/3 2025-06-25T14:51:16.722313Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 101, ready parts: 2/3, is published: true 2025-06-25T14:51:16.722809Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:51:16.722845Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 101:0 ProgressState 2025-06-25T14:51:16.722893Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#101:0 progress is 3/3 2025-06-25T14:51:16.722931Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 101 ready parts: 3/3 2025-06-25T14:51:16.722976Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#101:0 progress is 3/3 2025-06-25T14:51:16.722997Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 101 ready parts: 3/3 2025-06-25T14:51:16.723033Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 101, ready parts: 3/3, is published: true 2025-06-25T14:51:16.723095Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1656: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:385:2351] message: TxId: 101 2025-06-25T14:51:16.723140Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 101 ready parts: 3/3 2025-06-25T14:51:16.723178Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 101:0 2025-06-25T14:51:16.723221Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 101:0 2025-06-25T14:51:16.723353Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-25T14:51:16.723412Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 101:1 2025-06-25T14:51:16.723432Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 101:1 2025-06-25T14:51:16.723463Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-25T14:51:16.723490Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 101:2 2025-06-25T14:51:16.723525Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 101:2 2025-06-25T14:51:16.723581Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 2025-06-25T14:51:16.726743Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-25T14:51:16.726793Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:386:2352] TestWaitNotification: OK eventTxId 101 2025-06-25T14:51:16.727275Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/UserDefinedIndex" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-25T14:51:16.727551Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/UserDefinedIndex" took 259us result status StatusSuccess 2025-06-25T14:51:16.728389Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table/UserDefinedIndex" PathDescription { Self { Name: "UserDefinedIndex" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeTableIndex CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableIndexVersion: 1 } ChildrenExist: true } Children { Name: "indexImplTable" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 3 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" PathSubType: EPathSubTypeAsyncIndexImplTable Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } TableIndex { Name: "UserDefinedIndex" LocalPathId: 3 Type: EIndexTypeGlobalAsync State: EIndexStateReady KeyColumnNames: "indexed" SchemaVersion: 1 PathOwnerId: 72057594046678944 DataSize: 0 IndexImplTableDescriptions { PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 |86.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest |86.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest >> TAsyncIndexTests::SplitBothWithReboots[TabletReboots] >> TVectorIndexTests::CreateTablePrefixInvalidKeyType |86.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest >> TVectorIndexTests::CreateTablePrefix [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:51:16.197654Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:51:16.197757Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:16.197799Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:51:16.197831Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:51:16.197874Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:51:16.197909Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:51:16.197963Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:16.198024Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:51:16.198713Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:51:16.199007Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:51:16.272185Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:51:16.272238Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:51:16.285592Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:51:16.285841Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:51:16.285962Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:51:16.290251Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:51:16.290538Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:51:16.291139Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:16.291344Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:51:16.294542Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:16.294722Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:51:16.295823Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:51:16.295885Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:16.296036Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:51:16.296089Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:51:16.296160Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:51:16.296239Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:51:16.302518Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:51:16.419997Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:51:16.420203Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:16.420391Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:51:16.420443Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:51:16.420680Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:51:16.420750Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:51:16.422718Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:16.422893Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:51:16.423050Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:16.423121Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:51:16.423177Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:51:16.423218Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:51:16.424880Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:16.424932Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:51:16.424966Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:51:16.426416Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:16.426455Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:16.426505Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:16.426552Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:51:16.430014Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:51:16.431501Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:51:16.431675Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:51:16.432518Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:16.432651Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:51:16.432703Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:16.432959Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:51:16.433007Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:16.433145Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:51:16.433242Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:51:16.435005Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:51:16.435047Z node 1 :FLAT_TX_SCHEMESHARD ... 44, LocalPathId: 4], version: 18446744073709551615 2025-06-25T14:51:16.919447Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 4 2025-06-25T14:51:16.919825Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 5 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T14:51:16.919891Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 5 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T14:51:16.919916Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 102 2025-06-25T14:51:16.919940Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 5], version: 18446744073709551615 2025-06-25T14:51:16.919967Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 3 2025-06-25T14:51:16.920742Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 6 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T14:51:16.920815Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 6 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T14:51:16.920841Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 102 2025-06-25T14:51:16.920865Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 6], version: 18446744073709551615 2025-06-25T14:51:16.920888Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 6] was 4 2025-06-25T14:51:16.920945Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 102, ready parts: 2/5, is published: true 2025-06-25T14:51:16.921870Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 102:4, at schemeshard: 72057594046678944 2025-06-25T14:51:16.921913Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_table.cpp:414: TDropTable TProposedDeletePart operationId: 102:4 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:51:16.922140Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove table for pathId [OwnerId: 72057594046678944, LocalPathId: 6] was 3 2025-06-25T14:51:16.922236Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:4 progress is 3/5 2025-06-25T14:51:16.922265Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 3/5 2025-06-25T14:51:16.922304Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:4 progress is 3/5 2025-06-25T14:51:16.922330Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 3/5 2025-06-25T14:51:16.922357Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 102, ready parts: 3/5, is published: true 2025-06-25T14:51:16.923065Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 102:2, at schemeshard: 72057594046678944 2025-06-25T14:51:16.923106Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_table.cpp:414: TDropTable TProposedDeletePart operationId: 102:2 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:51:16.923276Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove table for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 2025-06-25T14:51:16.923358Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:2 progress is 4/5 2025-06-25T14:51:16.923383Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 4/5 2025-06-25T14:51:16.923428Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:2 progress is 4/5 2025-06-25T14:51:16.923456Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 4/5 2025-06-25T14:51:16.923481Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 102, ready parts: 4/5, is published: true 2025-06-25T14:51:16.924265Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-25T14:51:16.924304Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_table.cpp:414: TDropTable TProposedDeletePart operationId: 102:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:51:16.924515Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove table for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-25T14:51:16.924586Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:0 progress is 5/5 2025-06-25T14:51:16.924610Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 5/5 2025-06-25T14:51:16.924646Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:0 progress is 5/5 2025-06-25T14:51:16.924670Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 5/5 2025-06-25T14:51:16.924709Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 102, ready parts: 5/5, is published: true 2025-06-25T14:51:16.924771Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1656: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:455:2399] message: TxId: 102 2025-06-25T14:51:16.924828Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 5/5 2025-06-25T14:51:16.924880Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 102:0 2025-06-25T14:51:16.924917Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 102:0 2025-06-25T14:51:16.925015Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-25T14:51:16.925071Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 102:1 2025-06-25T14:51:16.925093Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 102:1 2025-06-25T14:51:16.925126Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-06-25T14:51:16.925146Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 102:2 2025-06-25T14:51:16.925163Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 102:2 2025-06-25T14:51:16.925227Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 2 2025-06-25T14:51:16.925257Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 102:3 2025-06-25T14:51:16.925276Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 102:3 2025-06-25T14:51:16.925318Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 2 2025-06-25T14:51:16.925339Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 102:4 2025-06-25T14:51:16.925357Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 102:4 2025-06-25T14:51:16.925394Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 6] was 2 2025-06-25T14:51:16.926241Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-25T14:51:16.926353Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-25T14:51:16.926390Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-25T14:51:16.926433Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-25T14:51:16.926487Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-25T14:51:16.928913Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-25T14:51:16.928958Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-25T14:51:16.929027Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-25T14:51:16.929079Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-25T14:51:16.929143Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-25T14:51:16.930982Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-25T14:51:16.931036Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:603:2540] TestWaitNotification: OK eventTxId 102 >> KqpJoinOrder::CanonizedJoinOrderTPCH8 [GOOD] >> TAsyncIndexTests::SplitBothWithReboots[PipeResets] |86.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest >> TAsyncIndexTests::DropTableWithInflightChanges[TabletReboots] >> TAsyncIndexTests::CdcAndMergeWithReboots[PipeResets] >> TVectorIndexTests::CreateTablePrefixInvalidKeyType [GOOD] >> TAsyncIndexTests::DropTableWithInflightChanges[PipeResets] |86.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest |86.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest |86.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest |86.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest >> TAsyncIndexTests::SplitMainWithReboots[PipeResets] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest >> TVectorIndexTests::CreateTablePrefixInvalidKeyType [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:51:18.011167Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:51:18.011237Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:18.011275Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:51:18.011301Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:51:18.011347Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:51:18.011392Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:51:18.011434Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:18.011489Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:51:18.012017Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:51:18.012268Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:51:18.076538Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:51:18.076595Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:51:18.091098Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:51:18.091471Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:51:18.091609Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:51:18.097686Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:51:18.097981Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:51:18.098621Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:18.098842Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:51:18.102046Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:18.102225Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:51:18.103252Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:51:18.103314Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:18.103457Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:51:18.103513Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:51:18.103555Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:51:18.103651Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:51:18.111618Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:51:18.203236Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:51:18.203422Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:18.203602Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:51:18.203638Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:51:18.203839Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:51:18.203894Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:51:18.205485Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:18.205631Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:51:18.205759Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:18.205817Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:51:18.205849Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:51:18.205878Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:51:18.207271Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:18.207313Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:51:18.207341Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:51:18.208556Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:18.208587Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:18.208626Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:18.208672Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:51:18.210981Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:51:18.213387Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:51:18.213560Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:51:18.214209Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:18.214312Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:51:18.214350Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:18.214539Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:51:18.214576Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:18.214709Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:51:18.214800Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:51:18.216233Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:51:18.216270Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:51:18.216438Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:18.216493Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 1, path id: 1 2025-06-25T14:51:18.216787Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:18.216842Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 1:0 ProgressState 2025-06-25T14:51:18.216927Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1:0 progress is 1/1 2025-06-25T14:51:18.216954Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-25T14:51:18.216980Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1:0 progress is 1/1 2025-06-25T14:51:18.217003Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-25T14:51:18.217030Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 1, ready parts: 1/1, is published: false 2025-06-25T14:51:18.217064Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-25T14:51:18.217099Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1:0 2025-06-25T14:51:18.217130Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 1:0 2025-06-25T14:51:18.217174Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-25T14:51:18.217200Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 1, publications: 1, subscribers: 0 2025-06-25T14:51:18.217222Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 1, [OwnerId: 72057594046678944, LocalPathId: 1], 3 2025-06-25T14:51:18.218557Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-06-25T14:51:18.218660Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-06-25T14:51:18.218691Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1 2025-06-25T14:51:18.218720Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 3 2025-06-25T14:51:18.218748Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:51:18.218827Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1, subscribers: 0 2025-06-25T14:51:18.220881Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1 2025-06-25T14:51:18.221292Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1, at schemeshard: 72057594046678944 TestModificationResults wait txId: 101 2025-06-25T14:51:18.222883Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:434: actor# [1:274:2263] Bootstrap 2025-06-25T14:51:18.240773Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:453: actor# [1:274:2263] Become StateWork (SchemeCache [1:279:2268]) 2025-06-25T14:51:18.242857Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateIndexedTable CreateIndexedTable { TableDescription { Name: "vectors" Columns { Name: "id" Type: "Uint64" } Columns { Name: "embedding" Type: "String" } Columns { Name: "covered" Type: "String" } Columns { Name: "prefix" Type: "Float" } KeyColumnNames: "id" } IndexDescription { Name: "idx_vector" KeyColumnNames: "prefix" KeyColumnNames: "embedding" Type: EIndexTypeGlobalVectorKmeansTree DataColumnNames: "covered" VectorIndexKmeansTreeDescription { Settings { settings { metric: DISTANCE_COSINE vector_type: VECTOR_TYPE_FLOAT vector_dimension: 1024 } clusters: 4 levels: 5 } } } } } TxId: 101 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:51:18.243215Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_indexed_table.cpp:100: TCreateTableIndex construct operation table path: /MyRoot/vectors domain path id: [OwnerId: 72057594046678944, LocalPathId: 1] domain path: /MyRoot shardsToCreate: 2 GetShardsInside: 0 MaxShards: 200000 2025-06-25T14:51:18.243369Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_just_reject.cpp:47: TReject Propose, opId: 101:0, explain: Column 'prefix' has wrong key type Float for being key, at schemeshard: 72057594046678944 2025-06-25T14:51:18.243422Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 101:1, propose status:StatusInvalidParameter, reason: Column 'prefix' has wrong key type Float for being key, at schemeshard: 72057594046678944 2025-06-25T14:51:18.246344Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:274:2263] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-25T14:51:18.248196Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 101, response: Status: StatusInvalidParameter Reason: "Column \'prefix\' has wrong key type Float for being key" TxId: 101 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:51:18.248396Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 101, database: /MyRoot, subject: , status: StatusInvalidParameter, reason: Column 'prefix' has wrong key type Float for being key, operation: CREATE TABLE WITH INDEXES, path: /MyRoot/vectors 2025-06-25T14:51:18.248744Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 101 2025-06-25T14:51:18.248884Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2025-06-25T14:51:18.248924Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 2025-06-25T14:51:18.249213Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-06-25T14:51:18.249274Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-25T14:51:18.249331Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:289:2278] TestWaitNotification: OK eventTxId 101 2025-06-25T14:51:18.249675Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/vectors/idx_vector" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-25T14:51:18.249819Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/vectors/idx_vector" took 146us result status StatusPathDoesNotExist 2025-06-25T14:51:18.249944Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/vectors/idx_vector\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/vectors/idx_vector" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> OlapEstimationRowsCorrectness::TPCH11 [GOOD] Test command err: Trying to start YDB, gRPC: 20266, MsgBus: 16483 2025-06-25T14:49:35.501241Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899221157558195:2135];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:49:35.525548Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d52/r3tmp/tmpeGXQHs/pdisk_1.dat 2025-06-25T14:49:36.200155Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:49:36.200405Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:49:36.200476Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:49:36.202629Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519899221157558097:2080] 1750862975488152 != 1750862975488155 2025-06-25T14:49:36.237113Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 20266, node 1 2025-06-25T14:49:36.437568Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:49:36.437597Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:49:36.437611Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:49:36.437709Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:49:36.536383Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:16483 TClient is connected to server localhost:16483 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:49:37.310918Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:49:37.334646Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:49:39.498928Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899238337427926:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:39.499040Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:39.499371Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899238337427938:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:39.503516Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:49:39.523862Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899238337427940:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:49:39.576204Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899238337427993:2339] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:49:40.030508Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T14:49:40.369550Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519899242632395549:2319];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:49:40.369726Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519899242632395549:2319];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:49:40.370196Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519899242632395549:2319];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:49:40.370364Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519899242632395549:2319];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:49:40.370488Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519899242632395549:2319];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:49:40.370604Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519899242632395549:2319];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:49:40.370692Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519899242632395549:2319];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:49:40.370793Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519899242632395549:2319];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:49:40.370943Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519899242632395549:2319];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:49:40.371057Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519899242632395549:2319];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:49:40.371183Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[1:7519899242632395549:2319];tablet_id=72075186224037903;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:49:40.372154Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519899242632395533:2311];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:49:40.372225Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519899242632395533:2311];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:49:40.372396Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519899242632395533:2311];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:49:40.372502Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519899242632395533:2311];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:49:40.372588Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519899242632395533:2311];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:49:40.372694Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519899242632395533:2311];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:49:40.372787Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519899242632395533:2311];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:49:40.373226Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519899242632395533:2311];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:49:40.373327Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519899242632395533:2311];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunk ... line=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:02.665976Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039393;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:02.666622Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039407;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:02.666954Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039357;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:02.667413Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039377;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:02.672653Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039377;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:02.673234Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039407;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:02.673271Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039398;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:02.673892Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039257;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:02.678896Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039398;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:02.679361Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039257;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:02.679578Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039404;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:02.679981Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039341;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:02.685744Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039404;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:02.685907Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039341;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:02.686435Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039383;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:02.686554Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039249;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:02.691711Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039383;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:02.692016Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039249;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:02.692358Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039417;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:02.692750Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039414;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:02.696960Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039417;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:02.697539Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039415;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:02.698061Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039414;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:02.698737Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039277;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:02.701831Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039415;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:02.702410Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039411;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:02.703934Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039277;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:02.704837Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039281;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:02.706950Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039411;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:02.707475Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039379;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:02.713114Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039281;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:02.713864Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039423;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:02.714921Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039379;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:02.715578Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039387;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:02.720689Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039423;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:02.720869Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039387;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:02.721440Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039373;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:02.721447Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039421;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:02.726654Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039373;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:02.727936Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039400;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:02.728150Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039421;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:02.732772Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039400;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:02.856636Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyks38hbakc0wwacgp432y2x", SessionId: ydb://session/3?node_id=1&id=NWFjMGYwZGQtNzc2YWU1ODgtNjNlOWI5ZWUtZDIzOTY4ZjY=, Slow query, duration: 34.074711s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:51:03.111870Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:51:03.111938Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:51:03.112612Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; |86.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest |86.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest |86.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest |86.9%| [TA] $(B)/ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::CanonizedJoinOrderTPCH8 [GOOD] Test command err: Trying to start YDB, gRPC: 21780, MsgBus: 6818 2025-06-25T14:49:25.389056Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899175315866686:2138];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:49:25.395578Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d64/r3tmp/tmpoyWmIr/pdisk_1.dat 2025-06-25T14:49:25.893926Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:49:25.894020Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:49:25.905349Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:49:25.987590Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:49:25.988460Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519899175315866584:2080] 1750862965257405 != 1750862965257408 TServer::EnableGrpc on GrpcPort 21780, node 1 2025-06-25T14:49:26.203876Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:49:26.203893Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:49:26.203898Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:49:26.203994Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:49:26.388545Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:6818 TClient is connected to server localhost:6818 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:49:27.388556Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:49:27.405823Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:49:29.596887Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899192495736412:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:29.597023Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:29.597833Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899192495736424:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:29.602461Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:49:29.619088Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899192495736426:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:49:29.721892Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899192495736477:2337] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:49:30.037939Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T14:49:30.367548Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519899196790703977:2316];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:49:30.367792Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519899196790703977:2316];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:49:30.368080Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519899196790703977:2316];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:49:30.368200Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519899196790703977:2316];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:49:30.376389Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519899196790703977:2316];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:49:30.376701Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519899196790703977:2316];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:49:30.376824Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519899196790703977:2316];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:49:30.376925Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519899196790703977:2316];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:49:30.377030Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519899196790703977:2316];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:49:30.377140Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519899196790703977:2316];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:49:30.377243Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519899196790703977:2316];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:49:30.381344Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519899196790703974:2313];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:49:30.381393Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519899196790703974:2313];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:49:30.381584Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519899196790703974:2313];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:49:30.381689Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519899196790703974:2313];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:49:30.381784Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519899196790703974:2313];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:49:30.381889Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519899196790703974:2313];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:49:30.381999Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519899196790703974:2313];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:49:30.382094Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519899196790703974:2313];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:49:30.382184Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519899196790703974:2313];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; ... 33661Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:55.338609Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039410;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:55.339119Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039301;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:55.343784Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039301;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:55.346995Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:55.347523Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039420;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:55.352584Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039389;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:55.356989Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039389;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:55.357505Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039397;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:55.361021Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039420;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:55.361531Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039398;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:55.366430Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039398;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:55.367091Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039396;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:55.370136Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039397;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:55.370639Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039414;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:55.375473Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039414;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:55.379217Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039396;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:55.379699Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039379;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:55.380891Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039418;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:55.389196Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039418;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:55.389872Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039416;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:55.392724Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039379;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:55.393240Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039400;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:55.398753Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039416;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:55.401184Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039405;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:55.401244Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039400;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:55.401876Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039422;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:55.406979Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039405;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:55.407298Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039422;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:55.407680Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039385;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:55.407891Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039386;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:55.422764Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039385;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:55.423343Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039384;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:55.431216Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039386;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:55.431707Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039399;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:55.438734Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039399;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:55.439136Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039384;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:55.439639Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039390;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:55.440144Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039388;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:55.445000Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039390;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:55.445154Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039388;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:55.651012Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyks30qr0xv0mh7c82mjx45c", SessionId: ydb://session/3?node_id=1&id=MjA5ZTJmOGYtNDY3NDJlZGMtNDJkMTU3NmEtMjgwODc0Y2Y=, Slow query, duration: 34.858063s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:50:55.972727Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:50:55.972887Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:50:55.973289Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;self_id=[1:7519899467373688207:8200];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224039094;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224039392;receive=72075186224038933; 2025-06-25T14:50:55.973696Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; >> KqpRm::NotEnoughMemory >> KqpRm::ManyTasks >> KqpRm::NodesMembershipByExchanger >> KqpRm::NotEnoughExecutionUnits >> KqpRm::DisonnectNodes >> KqpRm::SnapshotSharingByExchanger >> KqpRm::ResourceBrokerNotEnoughResources >> TSchemeShardTTLTests::CreateTableShouldSucceed-EnableTablePgTypes-false [GOOD] >> KqpJoinOrder::TPCDS90+ColumnStore [GOOD] >> KqpRm::NotEnoughExecutionUnits [GOOD] >> KqpRm::ManyTasks [GOOD] >> KqpRm::NotEnoughMemory [GOOD] >> OlapEstimationRowsCorrectness::TPCH5 [GOOD] >> KqpRm::ResourceBrokerNotEnoughResources [GOOD] >> TSchemeShardTTLTests::CreateTableShouldFailOnWrongUnit-EnableTablePgTypes-false [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/rm_service/ut/unittest >> KqpRm::NotEnoughExecutionUnits [GOOD] >> TPartitionWriterCacheActorTests::WriteReplyOrder >> KqpRm::DisonnectNodes [GOOD] Test command err: 2025-06-25T14:51:22.880924Z node 2 :BS_PDISK WARN: {BPD01@blobstorage_pdisk_blockdevice_async.cpp:922} Warning# got EIoResult::FileOpenError from IoContext->Setup PDiskId# 1000 2025-06-25T14:51:22.881289Z node 2 :BS_PDISK CRIT: {BPD39@blobstorage_pdisk_impl.cpp:2897} BlockDevice initialization error Details# Can't open file "/home/runner/.ya/build/build_root/yft8/001ab0/r3tmp/tmpSSQtAz/pdisk_1.dat": unknown reason, errno# 0. PDiskId# 1000 2025-06-25T14:51:22.881718Z node 2 :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:300} PDiskId# 1000 bootstrapped to the StateError, reason# Can't open file "/home/runner/.ya/build/build_root/yft8/001ab0/r3tmp/tmpSSQtAz/pdisk_1.dat": unknown reason, errno# 0. Can not be initialized Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/yft8/001ab0/r3tmp/tmpSSQtAz/pdisk_1.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 13547951868887724143 PDiskId# 1000 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 2 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 1000 2025-06-25T14:51:22.924089Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:1115: TResourceBrokerActor bootstrap 2025-06-25T14:51:22.924343Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:1115: TResourceBrokerActor bootstrap 2025-06-25T14:51:22.940540Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:599: Start KqpResourceManagerActor at [2:460:2102] with ResourceBroker at [2:431:2101] 2025-06-25T14:51:22.940659Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:121: Start KqpResourceInfoExchangerActor at [2:461:2103] 2025-06-25T14:51:22.940825Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:599: Start KqpResourceManagerActor at [1:459:2338] with ResourceBroker at [1:430:2319] 2025-06-25T14:51:22.940920Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:121: Start KqpResourceInfoExchangerActor at [1:462:2339] 2025-06-25T14:51:22.940984Z node 2 :KQP_RESOURCE_MANAGER CRIT: kqp_resource_info_exchanger.cpp:411: Failed to deliver subscription request to config dispatcher. 2025-06-25T14:51:22.941025Z node 1 :KQP_RESOURCE_MANAGER CRIT: kqp_rm_service.cpp:796: Failed to deliver subscription request to config dispatcher 2025-06-25T14:51:22.941058Z node 1 :KQP_RESOURCE_MANAGER CRIT: kqp_resource_info_exchanger.cpp:411: Failed to deliver subscription request to config dispatcher. 2025-06-25T14:51:22.941145Z node 2 :KQP_RESOURCE_MANAGER CRIT: kqp_rm_service.cpp:796: Failed to deliver subscription request to config dispatcher 2025-06-25T14:51:22.941527Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:913: Don't set KqpProxySharedResources 2025-06-25T14:51:22.958507Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:929: Send to publish resource usage for reason: data_center update, payload: NodeId: 2 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372045444738657 } Timestamp: 1750863082 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2025-06-25T14:51:22.958690Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:913: Don't set KqpProxySharedResources 2025-06-25T14:51:22.958780Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:929: Send to publish resource usage for reason: data_center update, payload: NodeId: 1 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372041149771361 } Timestamp: 1750863082 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2025-06-25T14:51:22.959121Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_resource_info_exchanger.cpp:465: Received tenant pool status for exchanger, serving tenant: /dc-1, board: kqpexch+/dc-1 2025-06-25T14:51:22.959244Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_resource_info_exchanger.cpp:465: Received tenant pool status for exchanger, serving tenant: /dc-1, board: kqpexch+/dc-1 2025-06-25T14:51:22.959353Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:753: Received tenant pool status, serving tenant: /dc-1, board: kqprm+/dc-1 2025-06-25T14:51:22.959387Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:913: Don't set KqpProxySharedResources 2025-06-25T14:51:22.959480Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:929: Send to publish resource usage for reason: tenant updated, payload: NodeId: 2 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372045444738657 } Timestamp: 1750863082 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2025-06-25T14:51:22.959563Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:753: Received tenant pool status, serving tenant: /dc-1, board: kqprm+/dc-1 2025-06-25T14:51:22.959583Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:913: Don't set KqpProxySharedResources 2025-06-25T14:51:22.959652Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:929: Send to publish resource usage for reason: tenant updated, payload: NodeId: 1 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372041149771361 } Timestamp: 1750863082 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2025-06-25T14:51:22.960295Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:479: Get board info from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 0 2025-06-25T14:51:22.960400Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:501: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2025-06-25T14:51:22.960795Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:501: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2025-06-25T14:51:22.961196Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:479: Get board info from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2025-06-25T14:51:22.961331Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:501: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 2 2025-06-25T14:51:22.961497Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:526: Get resources info from node: 1 2025-06-25T14:51:22.961692Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:526: Get resources info from node: 2 2025-06-25T14:51:22.961877Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:526: Get resources info from node: 1 2025-06-25T14:51:22.961935Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:526: Get resources info from node: 2 >> KqpRm::NodesMembershipByExchanger [GOOD] >> TopicService::OneConsumer_TheRangesDoNotOverlap ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/rm_service/ut/unittest >> KqpRm::NotEnoughMemory [GOOD] Test command err: 2025-06-25T14:51:22.881109Z node 2 :BS_PDISK WARN: {BPD01@blobstorage_pdisk_blockdevice_async.cpp:922} Warning# got EIoResult::FileOpenError from IoContext->Setup PDiskId# 1000 2025-06-25T14:51:22.881514Z node 2 :BS_PDISK CRIT: {BPD39@blobstorage_pdisk_impl.cpp:2897} BlockDevice initialization error Details# Can't open file "/home/runner/.ya/build/build_root/yft8/001a89/r3tmp/tmpZTIbgd/pdisk_1.dat": unknown reason, errno# 0. PDiskId# 1000 2025-06-25T14:51:22.881955Z node 2 :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:300} PDiskId# 1000 bootstrapped to the StateError, reason# Can't open file "/home/runner/.ya/build/build_root/yft8/001a89/r3tmp/tmpZTIbgd/pdisk_1.dat": unknown reason, errno# 0. Can not be initialized Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/yft8/001a89/r3tmp/tmpZTIbgd/pdisk_1.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 12598402184611914793 PDiskId# 1000 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 2 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 1000 2025-06-25T14:51:22.923839Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:1115: TResourceBrokerActor bootstrap 2025-06-25T14:51:22.924009Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:1115: TResourceBrokerActor bootstrap 2025-06-25T14:51:22.940614Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:599: Start KqpResourceManagerActor at [2:460:2102] with ResourceBroker at [2:431:2101] 2025-06-25T14:51:22.940835Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:121: Start KqpResourceInfoExchangerActor at [2:461:2103] 2025-06-25T14:51:22.940985Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:599: Start KqpResourceManagerActor at [1:459:2338] with ResourceBroker at [1:430:2319] 2025-06-25T14:51:22.941071Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:121: Start KqpResourceInfoExchangerActor at [1:462:2339] 2025-06-25T14:51:22.941125Z node 2 :KQP_RESOURCE_MANAGER CRIT: kqp_resource_info_exchanger.cpp:411: Failed to deliver subscription request to config dispatcher. 2025-06-25T14:51:22.941159Z node 1 :KQP_RESOURCE_MANAGER CRIT: kqp_rm_service.cpp:796: Failed to deliver subscription request to config dispatcher 2025-06-25T14:51:22.941185Z node 1 :KQP_RESOURCE_MANAGER CRIT: kqp_resource_info_exchanger.cpp:411: Failed to deliver subscription request to config dispatcher. 2025-06-25T14:51:22.941269Z node 2 :KQP_RESOURCE_MANAGER CRIT: kqp_rm_service.cpp:796: Failed to deliver subscription request to config dispatcher 2025-06-25T14:51:22.941540Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:913: Don't set KqpProxySharedResources 2025-06-25T14:51:22.955479Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:929: Send to publish resource usage for reason: data_center update, payload: NodeId: 2 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372045444738657 } Timestamp: 1750863082 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2025-06-25T14:51:22.955653Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:913: Don't set KqpProxySharedResources 2025-06-25T14:51:22.955732Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:929: Send to publish resource usage for reason: data_center update, payload: NodeId: 1 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372041149771361 } Timestamp: 1750863082 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2025-06-25T14:51:22.956035Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_resource_info_exchanger.cpp:465: Received tenant pool status for exchanger, serving tenant: /dc-1, board: kqpexch+/dc-1 2025-06-25T14:51:22.956182Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_resource_info_exchanger.cpp:465: Received tenant pool status for exchanger, serving tenant: /dc-1, board: kqpexch+/dc-1 2025-06-25T14:51:22.956274Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:753: Received tenant pool status, serving tenant: /dc-1, board: kqprm+/dc-1 2025-06-25T14:51:22.956323Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:913: Don't set KqpProxySharedResources 2025-06-25T14:51:22.956420Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:929: Send to publish resource usage for reason: tenant updated, payload: NodeId: 2 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372045444738657 } Timestamp: 1750863082 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2025-06-25T14:51:22.956499Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:753: Received tenant pool status, serving tenant: /dc-1, board: kqprm+/dc-1 2025-06-25T14:51:22.956520Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:913: Don't set KqpProxySharedResources 2025-06-25T14:51:22.956590Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:929: Send to publish resource usage for reason: tenant updated, payload: NodeId: 1 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372041149771361 } Timestamp: 1750863082 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2025-06-25T14:51:22.957203Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:479: Get board info from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 0 2025-06-25T14:51:22.957287Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:501: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2025-06-25T14:51:22.957671Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:501: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2025-06-25T14:51:22.958064Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:479: Get board info from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2025-06-25T14:51:22.958203Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:501: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 2 2025-06-25T14:51:22.958364Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:526: Get resources info from node: 1 2025-06-25T14:51:22.958548Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:526: Get resources info from node: 2 2025-06-25T14:51:22.958729Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:526: Get resources info from node: 1 2025-06-25T14:51:22.958783Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:526: Get resources info from node: 2 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/rm_service/ut/unittest >> KqpRm::ResourceBrokerNotEnoughResources [GOOD] Test command err: 2025-06-25T14:51:22.880519Z node 2 :BS_PDISK WARN: {BPD01@blobstorage_pdisk_blockdevice_async.cpp:922} Warning# got EIoResult::FileOpenError from IoContext->Setup PDiskId# 1000 2025-06-25T14:51:22.880920Z node 2 :BS_PDISK CRIT: {BPD39@blobstorage_pdisk_impl.cpp:2897} BlockDevice initialization error Details# Can't open file "/home/runner/.ya/build/build_root/yft8/001a67/r3tmp/tmpmoorwu/pdisk_1.dat": unknown reason, errno# 0. PDiskId# 1000 2025-06-25T14:51:22.881406Z node 2 :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:300} PDiskId# 1000 bootstrapped to the StateError, reason# Can't open file "/home/runner/.ya/build/build_root/yft8/001a67/r3tmp/tmpmoorwu/pdisk_1.dat": unknown reason, errno# 0. Can not be initialized Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/yft8/001a67/r3tmp/tmpmoorwu/pdisk_1.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 2908441544548774901 PDiskId# 1000 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 2 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 1000 2025-06-25T14:51:22.923813Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:1115: TResourceBrokerActor bootstrap 2025-06-25T14:51:22.924063Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:1115: TResourceBrokerActor bootstrap 2025-06-25T14:51:22.940254Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:599: Start KqpResourceManagerActor at [2:460:2102] with ResourceBroker at [2:431:2101] 2025-06-25T14:51:22.940385Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:121: Start KqpResourceInfoExchangerActor at [2:461:2103] 2025-06-25T14:51:22.940528Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:599: Start KqpResourceManagerActor at [1:459:2338] with ResourceBroker at [1:430:2319] 2025-06-25T14:51:22.940604Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:121: Start KqpResourceInfoExchangerActor at [1:462:2339] 2025-06-25T14:51:22.940658Z node 2 :KQP_RESOURCE_MANAGER CRIT: kqp_resource_info_exchanger.cpp:411: Failed to deliver subscription request to config dispatcher. 2025-06-25T14:51:22.940690Z node 1 :KQP_RESOURCE_MANAGER CRIT: kqp_rm_service.cpp:796: Failed to deliver subscription request to config dispatcher 2025-06-25T14:51:22.940715Z node 1 :KQP_RESOURCE_MANAGER CRIT: kqp_resource_info_exchanger.cpp:411: Failed to deliver subscription request to config dispatcher. 2025-06-25T14:51:22.940793Z node 2 :KQP_RESOURCE_MANAGER CRIT: kqp_rm_service.cpp:796: Failed to deliver subscription request to config dispatcher 2025-06-25T14:51:22.941520Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:913: Don't set KqpProxySharedResources 2025-06-25T14:51:22.954232Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:929: Send to publish resource usage for reason: data_center update, payload: NodeId: 2 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372045444738657 } Timestamp: 1750863082 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2025-06-25T14:51:22.954388Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:913: Don't set KqpProxySharedResources 2025-06-25T14:51:22.954481Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:929: Send to publish resource usage for reason: data_center update, payload: NodeId: 1 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372041149771361 } Timestamp: 1750863082 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 100000000 Memory { Pool: 1 Available: 100000000 } ExecutionUnits: 100 2025-06-25T14:51:22.954778Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_resource_info_exchanger.cpp:465: Received tenant pool status for exchanger, serving tenant: /dc-1, board: kqpexch+/dc-1 2025-06-25T14:51:22.954878Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_resource_info_exchanger.cpp:465: Received tenant pool status for exchanger, serving tenant: /dc-1, board: kqpexch+/dc-1 2025-06-25T14:51:22.954955Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:753: Received tenant pool status, serving tenant: /dc-1, board: kqprm+/dc-1 2025-06-25T14:51:22.954983Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:913: Don't set KqpProxySharedResources 2025-06-25T14:51:22.955074Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:929: Send to publish resource usage for reason: tenant updated, payload: NodeId: 2 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372045444738657 } Timestamp: 1750863082 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2025-06-25T14:51:22.955162Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:753: Received tenant pool status, serving tenant: /dc-1, board: kqprm+/dc-1 2025-06-25T14:51:22.955192Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:913: Don't set KqpProxySharedResources 2025-06-25T14:51:22.955243Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:929: Send to publish resource usage for reason: tenant updated, payload: NodeId: 1 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372041149771361 } Timestamp: 1750863082 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 100000000 Memory { Pool: 1 Available: 100000000 } ExecutionUnits: 100 2025-06-25T14:51:22.955825Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:479: Get board info from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 0 2025-06-25T14:51:22.955905Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:501: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2025-06-25T14:51:22.956293Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:501: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2025-06-25T14:51:22.956692Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:479: Get board info from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2025-06-25T14:51:22.956816Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:501: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 2 2025-06-25T14:51:22.956984Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:526: Get resources info from node: 1 2025-06-25T14:51:22.957158Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:526: Get resources info from node: 2 2025-06-25T14:51:22.957352Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:526: Get resources info from node: 1 2025-06-25T14:51:22.957413Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:526: Get resources info from node: 2 2025-06-25T14:51:22.960037Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new kqp_query task kqp-1-2-1 (1 by [1:459:2338]) priority=0 resources={0, 1000} 2025-06-25T14:51:22.960110Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task kqp-1-2-1 (1 by [1:459:2338]) to queue queue_kqp_resource_manager 2025-06-25T14:51:22.960162Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {0, 1000} for task kqp-1-2-1 (1 by [1:459:2338]) from queue queue_kqp_resource_manager 2025-06-25T14:51:22.960215Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task kqp-1-2-1 (1 by [1:459:2338]) to queue queue_kqp_resource_manager 2025-06-25T14:51:22.960256Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_kqp_resource_manager from 0.000000 to 2.500000 (insert task kqp-1-2-1 (1 by [1:459:2338])) 2025-06-25T14:51:22.966040Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:351: TxId: 1, taskId: 2. Allocated TKqpResourcesRequest{ MemoryPool: 1, Memory: 1000ExternalMemory: 0 } 2025-06-25T14:51:22.966159Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new kqp_query task kqp-1-2-2 (2 by [1:459:2338]) priority=0 resources={0, 100000} 2025-06-25T14:51:22.966209Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task kqp-1-2-2 (2 by [1:459:2338]) to queue queue_kqp_resource_manager 2025-06-25T14:51:22.966265Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:619: Not enough resources to start task kqp-1-2-2 (2 by [1:459:2338]) 2025-06-25T14:51:22.966299Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:499: Removing task kqp-1-2-2 (2 by [1:459:2338]) 2025-06-25T14:51:22.966365Z node 1 :KQP_RESOURCE_MANAGER NOTICE: kqp_rm_service.cpp:338: TxId: 1, taskId: 2. Not enough memory for query, requested: 100000. TxResourcesInfo { TxId: 1, Database: , tx initially granted memory: 0B, tx total memory allocations: 1000B, tx largest successful memory allocation: 1000B, tx last failed memory allocation: 0B, tx total execution units: 0, started at: 2025-06-25T14:51:22.959968Z } ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/rm_service/ut/unittest >> KqpRm::ManyTasks [GOOD] Test command err: 2025-06-25T14:51:22.882187Z node 2 :BS_PDISK WARN: {BPD01@blobstorage_pdisk_blockdevice_async.cpp:922} Warning# got EIoResult::FileOpenError from IoContext->Setup PDiskId# 1000 2025-06-25T14:51:22.882640Z node 2 :BS_PDISK CRIT: {BPD39@blobstorage_pdisk_impl.cpp:2897} BlockDevice initialization error Details# Can't open file "/home/runner/.ya/build/build_root/yft8/001a9a/r3tmp/tmp2kRF8D/pdisk_1.dat": unknown reason, errno# 0. PDiskId# 1000 2025-06-25T14:51:22.883214Z node 2 :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:300} PDiskId# 1000 bootstrapped to the StateError, reason# Can't open file "/home/runner/.ya/build/build_root/yft8/001a9a/r3tmp/tmp2kRF8D/pdisk_1.dat": unknown reason, errno# 0. Can not be initialized Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/yft8/001a9a/r3tmp/tmp2kRF8D/pdisk_1.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 3447128505677951923 PDiskId# 1000 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 2 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 1000 2025-06-25T14:51:22.923851Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:1115: TResourceBrokerActor bootstrap 2025-06-25T14:51:22.924103Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:1115: TResourceBrokerActor bootstrap 2025-06-25T14:51:22.940426Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:599: Start KqpResourceManagerActor at [2:460:2102] with ResourceBroker at [2:431:2101] 2025-06-25T14:51:22.940576Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:121: Start KqpResourceInfoExchangerActor at [2:461:2103] 2025-06-25T14:51:22.940735Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:599: Start KqpResourceManagerActor at [1:459:2338] with ResourceBroker at [1:430:2319] 2025-06-25T14:51:22.940826Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:121: Start KqpResourceInfoExchangerActor at [1:462:2339] 2025-06-25T14:51:22.940903Z node 2 :KQP_RESOURCE_MANAGER CRIT: kqp_resource_info_exchanger.cpp:411: Failed to deliver subscription request to config dispatcher. 2025-06-25T14:51:22.940941Z node 1 :KQP_RESOURCE_MANAGER CRIT: kqp_rm_service.cpp:796: Failed to deliver subscription request to config dispatcher 2025-06-25T14:51:22.940966Z node 1 :KQP_RESOURCE_MANAGER CRIT: kqp_resource_info_exchanger.cpp:411: Failed to deliver subscription request to config dispatcher. 2025-06-25T14:51:22.941053Z node 2 :KQP_RESOURCE_MANAGER CRIT: kqp_rm_service.cpp:796: Failed to deliver subscription request to config dispatcher 2025-06-25T14:51:22.941537Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:913: Don't set KqpProxySharedResources 2025-06-25T14:51:22.952593Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:929: Send to publish resource usage for reason: data_center update, payload: NodeId: 2 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372045444738657 } Timestamp: 1750863082 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2025-06-25T14:51:22.952768Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:913: Don't set KqpProxySharedResources 2025-06-25T14:51:22.952863Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:929: Send to publish resource usage for reason: data_center update, payload: NodeId: 1 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372041149771361 } Timestamp: 1750863082 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2025-06-25T14:51:22.953213Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_resource_info_exchanger.cpp:465: Received tenant pool status for exchanger, serving tenant: /dc-1, board: kqpexch+/dc-1 2025-06-25T14:51:22.953310Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_resource_info_exchanger.cpp:465: Received tenant pool status for exchanger, serving tenant: /dc-1, board: kqpexch+/dc-1 2025-06-25T14:51:22.953390Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:753: Received tenant pool status, serving tenant: /dc-1, board: kqprm+/dc-1 2025-06-25T14:51:22.953426Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:913: Don't set KqpProxySharedResources 2025-06-25T14:51:22.953536Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:929: Send to publish resource usage for reason: tenant updated, payload: NodeId: 2 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372045444738657 } Timestamp: 1750863082 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2025-06-25T14:51:22.953635Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:753: Received tenant pool status, serving tenant: /dc-1, board: kqprm+/dc-1 2025-06-25T14:51:22.953659Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:913: Don't set KqpProxySharedResources 2025-06-25T14:51:22.953732Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:929: Send to publish resource usage for reason: tenant updated, payload: NodeId: 1 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372041149771361 } Timestamp: 1750863082 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2025-06-25T14:51:22.954395Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:479: Get board info from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 0 2025-06-25T14:51:22.954483Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:501: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2025-06-25T14:51:22.954888Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:501: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2025-06-25T14:51:22.955358Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:479: Get board info from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2025-06-25T14:51:22.955503Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:501: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 2 2025-06-25T14:51:22.955672Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:526: Get resources info from node: 1 2025-06-25T14:51:22.955837Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:526: Get resources info from node: 2 2025-06-25T14:51:22.956009Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:526: Get resources info from node: 1 2025-06-25T14:51:22.956084Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:526: Get resources info from node: 2 2025-06-25T14:51:22.958859Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new kqp_query task kqp-1-1-1 (1 by [1:459:2338]) priority=0 resources={0, 100} 2025-06-25T14:51:22.958923Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task kqp-1-1-1 (1 by [1:459:2338]) to queue queue_kqp_resource_manager 2025-06-25T14:51:22.958978Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {0, 100} for task kqp-1-1-1 (1 by [1:459:2338]) from queue queue_kqp_resource_manager 2025-06-25T14:51:22.959030Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task kqp-1-1-1 (1 by [1:459:2338]) to queue queue_kqp_resource_manager 2025-06-25T14:51:22.959075Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_kqp_resource_manager from 0.000000 to 0.250000 (insert task kqp-1-1-1 (1 by [1:459:2338])) 2025-06-25T14:51:22.966057Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:351: TxId: 1, taskId: 1. Allocated TKqpResourcesRequest{ MemoryPool: 1, Memory: 100ExternalMemory: 0 } 2025-06-25T14:51:22.966337Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new kqp_query task kqp-1-2-2 (2 by [1:459:2338]) priority=0 resources={0, 100} 2025-06-25T14:51:22.966393Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task kqp-1-2-2 (2 by [1:459:2338]) to queue queue_kqp_resource_manager 2025-06-25T14:51:22.966454Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {0, 100} for task kqp-1-2-2 (2 by [1:459:2338]) from queue queue_kqp_resource_manager 2025-06-25T14:51:22.966490Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task kqp-1-2-2 (2 by [1:459:2338]) to queue queue_kqp_resource_manager 2025-06-25T14:51:22.966532Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_kqp_resource_manager from 0.250000 to 0.500000 (insert task kqp-1-2-2 (2 by [1:459:2338])) 2025-06-25T14:51:22.966567Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:351: TxId: 1, taskId: 2. Allocated TKqpResourcesRequest{ MemoryPool: 1, Memory: 100ExternalMemory: 0 } 2025-06-25T14:51:22.966693Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new kqp_query task kqp-1-3-3 (3 by [1:459:2338]) priority=0 resources={0, 100} 2025-06-25T14:51:22.966723Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task kqp-1-3-3 (3 by [1:459:2338]) to queue queue_kqp_resource_manager 2025-06-25T14:51:22.966746Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {0, 100} for task kqp-1-3-3 (3 by [1:459:2338]) from queue queue_kqp_resource_manager 2025-06-25T14:51:22.966768Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task kqp-1-3-3 (3 by [1:459:2338]) to queue queue_kqp_resource_manager 2025-06-25T14:51:22.966788Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_kqp_resource_manager from 0.500000 to 0.750000 (insert task kqp-1-3-3 (3 by [1:459:2338])) 2025-06-25T14:51:22.966806Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:351: TxId: 1, taskId: 3. Allocated TKqpResourcesRequest{ MemoryPool: 1, Memory: 100ExternalMemory: 0 } 2025-06-25T14:51:22.966880Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new kqp_query task kqp-1-4-4 (4 by [1:459:2338]) priority=0 resources={0, 100} 2025-06-25T14:51:22.966906Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task kqp-1-4-4 (4 by [1:459:2338]) to queue queue_kqp_resource_manager 2025-06-25T14:51:22.966955Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {0, 100} for task kqp-1-4-4 (4 by [1:459:2338]) from queue queue_kqp_resource_manager 2025-06-25T14:51:22.966983Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task kqp-1-4-4 (4 by [1:459:2338]) to queue queue_kqp_resource_manager 2025-06-25T14:51:22.967008Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_kqp_resource_manager from 0.750000 to 1.000000 (insert task kqp-1-4-4 (4 by [1:459:2338])) 2025-06-25T14:51:22.967032Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:351: TxId: 1, taskId: 4. Allocated TKqpResourcesRequest{ MemoryPool: 1, Memory: 100ExternalMemory: 0 } 2025-06-25T14:51:22.967141Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new kqp_query task kqp-1-5-5 (5 by [1:459:2338]) priority=0 resources={0, 100} 2025-06-25T14:51:22.967173Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task kqp-1-5-5 (5 by [1:459:2338]) to queue queue_kqp_resource_manager 2025-06-25T14:51:22.967200Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {0, 100} for task kqp-1-5-5 (5 by [1:459:2338]) from queue queue_kqp_resource_manager 2025-06-25T14:51:22.967226Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task kqp-1-5-5 (5 by [1:459:2338]) to queue queue_kqp_resource_manager 2025-06-25T14:51:22.967252Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_kqp_resource_manager from 1.000000 to 1.250000 (insert task kqp-1-5-5 (5 by [1:459:2338])) 2025-06-25T14:51:22.967275Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:351: TxId: 1, taskId: 5. Allocated TKqpResourcesRequest{ MemoryPool: 1, Memory: 100ExternalMemory: 0 } 2025-06-25T14:51:22.967370Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new kqp_query task kqp-1-6-6 (6 by [1:459:2338]) priority=0 resources={0, 100} 2025-06-25T14:51:22.967403Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task kqp-1-6-6 (6 by [1:459:2338]) to queue queue_kqp_resource_manager 2025-06-25T14:51:22.967441Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {0, 100} for task kqp-1-6-6 (6 by [1:459:2338]) from queue queue_kqp_resource_manager 2025-06-25T14:51:22.967464Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task kqp-1-6-6 (6 by [1:459:2338]) to queue queue_kqp_resource_manager 2025-06-25T14:51:22.967488Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_kqp_resource_manager from 1.250000 to 1.500000 (insert task kqp-1-6-6 (6 by [1:459:2338])) 2025-06-25T14:51:22.967512Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:351: TxId: 1, taskId: 6. Allocated TKqpResourcesRequest{ MemoryPool: 1, Memory: 100ExternalMemory: 0 } 2025-06-25T14:51:22.967621Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new kqp_query task kqp-1-7-7 (7 by [1:459:2338]) priority=0 resources={0, 100} 2025-06-25T14:51:22.967654Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task kqp-1-7-7 (7 by [1:459:2338]) to queue queue_kqp_resource_manager 2025-06-25T14:51:22.967682Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {0, 100} for task kqp-1-7-7 (7 by [1:459:2338]) from queue queue_kqp_resource_manager 2025-06-25T14:51:22.967705Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task kqp-1-7-7 (7 by [1:459:2338]) to queue queue_kqp_resource_manager 2025-06-25T14:51:22.967741Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_kqp_resource_manager from 1.500000 to 1.750000 (insert task kqp-1-7-7 (7 by [1:459:2338])) 2025-06-25T14:51:22.967777Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:351: TxId: 1, taskId: 7. Allocated TKqpResourcesRequest{ MemoryPool: 1, Memory: 100ExternalMemory: 0 } 2025-06-25T14:51:22.967880Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new kqp_query task kqp-1-8-8 (8 by [1:459:2338]) priority=0 resources={0, 100} 2025-06-25T14:51:22.967919Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task kqp-1-8-8 (8 by [1:459:2338]) to queue queue_kqp_resource_manager 2025-06-25T14:51:22.967959Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {0, 100} for task kqp-1-8-8 (8 by [1:459:2338]) from queue queue_kqp_resource_manager 2025-06-25T14:51:22.967984Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task kqp-1-8-8 (8 by [1:459:2338]) to queue queue_kqp_resource_manager 2025-06-25T14:51:22.968008Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_kqp_resource_manager from 1.750000 to 2.000000 (insert task kqp-1-8-8 (8 by [1:459:2338])) 2025-06-25T14:51:22.968037Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:351: TxId: 1, taskId: 8. Allocated TKqpResourcesRequest{ MemoryPool: 1, Memory: 100ExternalMemory: 0 } 2025-06-25T14:51:22.968158Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new kqp_query task kqp-1-9-9 (9 by [1:459:2338]) priority=0 resources={0, 100} 2025-06-25T14:51:22.968203Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task kqp-1-9-9 (9 by [1:459:2338]) to queue queue_kqp_resource_manager 2025-06-25T14:51:22.968237Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {0, 100} for task kqp-1-9-9 (9 by [1:459:2338]) from queue queue_kqp_resource_manager 2025-06-25T14:51:22.968261Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task kqp-1-9-9 (9 by [1:459:2338]) to queue queue_kqp_resource_manager 2025-06-25T14:51:22.968284Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_kqp_resource_manager from 2.000000 to 2.250000 (insert task kqp-1-9-9 (9 by [1:459:2338])) 2025-06-25T14:51:22.968326Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:351: TxId: 1, taskId: 9. Allocated TKqpResourcesRequest{ MemoryPool: 1, Memory: 100ExternalMemory: 0 } 2025-06-25T14:51:22.968409Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:528: Finish task kqp-1-1-1 (1 by [1:459:2338]) (release resources {0, 100}) 2025-06-25T14:51:22.968451Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:577: Updated planned resource usage for queue queue_kqp_resource_manager from 2.250000 to 2.000000 (remove task kqp-1-1-1 (1 by [1:459:2338])) 2025-06-25T14:51:22.968497Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:404: TxId: 1, taskId: 1. Released resources, Memory: 100, Free Tier: 0, ExecutionUnits: 0. ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTests::CreateTableShouldSucceed-EnableTablePgTypes-false [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:51:09.448981Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:51:09.449067Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:09.449102Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:51:09.449154Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:51:09.449207Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:51:09.449242Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:51:09.449295Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:09.449359Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:51:09.450010Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:51:09.450372Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:51:09.518376Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:51:09.518435Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:51:09.526025Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:51:09.526134Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:51:09.526256Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:51:09.531212Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:51:09.531399Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:51:09.531859Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:09.531995Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:51:09.533742Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:09.533920Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:51:09.534971Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:51:09.535036Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:09.535235Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:51:09.535289Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:51:09.535334Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:51:09.535424Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:51:09.541591Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:127:2151] sender: [1:243:2058] recipient: [1:15:2062] 2025-06-25T14:51:09.656831Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:51:09.657045Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:09.657239Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:51:09.657300Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:51:09.657533Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:51:09.657604Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:51:09.659587Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:09.659785Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:51:09.659946Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:09.659992Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:51:09.660031Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:51:09.660065Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:51:09.661825Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:09.661880Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:51:09.661921Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:51:09.663545Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:09.663587Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:09.663636Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:09.663687Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:51:09.667212Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:51:09.668737Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:51:09.668905Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:51:09.669813Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:09.669927Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:51:09.669986Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:09.670216Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:51:09.670264Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:09.670565Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:51:09.670647Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:51:09.672542Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:51:09.672589Z node 1 :FLAT_TX_SCHEMESHARD ... , LocalPathId: 1] was 2 2025-06-25T14:51:22.704416Z node 18 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 101 2025-06-25T14:51:22.704481Z node 18 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 101 2025-06-25T14:51:22.704511Z node 18 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 101 2025-06-25T14:51:22.704546Z node 18 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 3 2025-06-25T14:51:22.704583Z node 18 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-25T14:51:22.704655Z node 18 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 101, ready parts: 0/1, is published: true FAKE_COORDINATOR: Erasing txId 101 2025-06-25T14:51:22.705934Z node 18 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6369: Handle TEvProposeTransactionResult, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 101 Step: 5000002 OrderId: 101 ExecLatency: 0 ProposeLatency: 2 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 934 } } CommitVersion { Step: 5000002 TxId: 101 } 2025-06-25T14:51:22.705978Z node 18 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1791: TOperation FindRelatedPartByTabletId, TxId: 101, tablet: 72075186233409546, partId: 0 2025-06-25T14:51:22.706084Z node 18 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:632: TTxOperationReply execute, operationId: 101:0, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 101 Step: 5000002 OrderId: 101 ExecLatency: 0 ProposeLatency: 2 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 934 } } CommitVersion { Step: 5000002 TxId: 101 } 2025-06-25T14:51:22.706182Z node 18 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_part.cpp:109: HandleReply TEvDataShard::TEvProposeTransactionResult Ignore message: tablet# 72057594046678944, ev# TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 101 Step: 5000002 OrderId: 101 ExecLatency: 0 ProposeLatency: 2 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 934 } } CommitVersion { Step: 5000002 TxId: 101 } 2025-06-25T14:51:22.707302Z node 18 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5596: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 314 RawX2: 77309413627 } Origin: 72075186233409546 State: 2 TxId: 101 Step: 0 Generation: 2 2025-06-25T14:51:22.707337Z node 18 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1791: TOperation FindRelatedPartByTabletId, TxId: 101, tablet: 72075186233409546, partId: 0 2025-06-25T14:51:22.707420Z node 18 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:632: TTxOperationReply execute, operationId: 101:0, at schemeshard: 72057594046678944, message: Source { RawX1: 314 RawX2: 77309413627 } Origin: 72075186233409546 State: 2 TxId: 101 Step: 0 Generation: 2 2025-06-25T14:51:22.707455Z node 18 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1047: NTableState::TProposedWaitParts operationId# 101:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 2025-06-25T14:51:22.707528Z node 18 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1051: NTableState::TProposedWaitParts operationId# 101:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 314 RawX2: 77309413627 } Origin: 72075186233409546 State: 2 TxId: 101 Step: 0 Generation: 2 2025-06-25T14:51:22.707581Z node 18 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:670: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 101:0, shardIdx: 72057594046678944:1, shard: 72075186233409546, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:22.707612Z node 18 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:707: all shard schema changes has been received, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:51:22.707646Z node 18 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:719: send schema changes ack message, operation: 101:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-06-25T14:51:22.707686Z node 18 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 101:0 129 -> 240 2025-06-25T14:51:22.708594Z node 18 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-25T14:51:22.709681Z node 18 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-25T14:51:22.711208Z node 18 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:51:22.711340Z node 18 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:51:22.711573Z node 18 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:51:22.711618Z node 18 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 101:0 ProgressState 2025-06-25T14:51:22.711729Z node 18 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#101:0 progress is 1/1 2025-06-25T14:51:22.711766Z node 18 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-25T14:51:22.711813Z node 18 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#101:0 progress is 1/1 2025-06-25T14:51:22.711846Z node 18 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-25T14:51:22.711886Z node 18 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 101, ready parts: 1/1, is published: true 2025-06-25T14:51:22.711956Z node 18 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1656: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [18:342:2319] message: TxId: 101 2025-06-25T14:51:22.712012Z node 18 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-25T14:51:22.712072Z node 18 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 101:0 2025-06-25T14:51:22.712106Z node 18 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 101:0 2025-06-25T14:51:22.712230Z node 18 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-25T14:51:22.714000Z node 18 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-25T14:51:22.714047Z node 18 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [18:343:2320] TestWaitNotification: OK eventTxId 101 2025-06-25T14:51:22.714601Z node 18 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/TTLTableWithDyNumberColumn_UNIT_NANOSECONDS" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:51:22.714797Z node 18 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/TTLTableWithDyNumberColumn_UNIT_NANOSECONDS" took 229us result status StatusSuccess 2025-06-25T14:51:22.715348Z node 18 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/TTLTableWithDyNumberColumn_UNIT_NANOSECONDS" PathDescription { Self { Name: "TTLTableWithDyNumberColumn_UNIT_NANOSECONDS" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "TTLTableWithDyNumberColumn_UNIT_NANOSECONDS" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "modified_at" Type: "DyNumber" TypeId: 4866 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 1 TTLSettings { Enabled { ColumnName: "modified_at" ExpireAfterSeconds: 3600 ColumnUnit: UNIT_NANOSECONDS Tiers { ApplyAfterSeconds: 3600 Delete { } } } } IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 1 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TSchemeShardTTLTests::RacyAlterTableAndConditionalErase [GOOD] >> TPartitionWriterCacheActorTests::WriteReplyOrder [GOOD] >> TSchemeShardTTLTests::CreateTableShouldSucceed-EnableTablePgTypes-true [GOOD] >> KqpRm::SnapshotSharingByExchanger [GOOD] >> TPartitionWriterCacheActorTests::DropOldWriter >> TPartitionWriterCacheActorTests::DropOldWriter [GOOD] >> TPersQueueCommonTest::Auth_CreateGrpcStreamWithInvalidTokenInInitialMetadata_SessionClosedWithUnauthenticatedError ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/rm_service/ut/unittest >> KqpRm::DisonnectNodes [GOOD] Test command err: 2025-06-25T14:51:22.882228Z node 2 :BS_PDISK WARN: {BPD01@blobstorage_pdisk_blockdevice_async.cpp:922} Warning# got EIoResult::FileOpenError from IoContext->Setup PDiskId# 1000 2025-06-25T14:51:22.882701Z node 2 :BS_PDISK CRIT: {BPD39@blobstorage_pdisk_impl.cpp:2897} BlockDevice initialization error Details# Can't open file "/home/runner/.ya/build/build_root/yft8/001a98/r3tmp/tmpS3ThfI/pdisk_1.dat": unknown reason, errno# 0. PDiskId# 1000 2025-06-25T14:51:22.883231Z node 2 :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:300} PDiskId# 1000 bootstrapped to the StateError, reason# Can't open file "/home/runner/.ya/build/build_root/yft8/001a98/r3tmp/tmpS3ThfI/pdisk_1.dat": unknown reason, errno# 0. Can not be initialized Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/yft8/001a98/r3tmp/tmpS3ThfI/pdisk_1.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 11208552741025052471 PDiskId# 1000 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 2 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 1000 2025-06-25T14:51:22.923814Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:1115: TResourceBrokerActor bootstrap 2025-06-25T14:51:22.924011Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:1115: TResourceBrokerActor bootstrap 2025-06-25T14:51:22.939946Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:599: Start KqpResourceManagerActor at [2:460:2102] with ResourceBroker at [2:431:2101] 2025-06-25T14:51:22.940066Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:121: Start KqpResourceInfoExchangerActor at [2:461:2103] 2025-06-25T14:51:22.940176Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:599: Start KqpResourceManagerActor at [1:459:2338] with ResourceBroker at [1:430:2319] 2025-06-25T14:51:22.940242Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:121: Start KqpResourceInfoExchangerActor at [1:462:2339] 2025-06-25T14:51:22.940370Z node 2 :KQP_RESOURCE_MANAGER CRIT: kqp_rm_service.cpp:796: Failed to deliver subscription request to config dispatcher 2025-06-25T14:51:22.940428Z node 2 :KQP_RESOURCE_MANAGER CRIT: kqp_resource_info_exchanger.cpp:411: Failed to deliver subscription request to config dispatcher. 2025-06-25T14:51:22.940465Z node 1 :KQP_RESOURCE_MANAGER CRIT: kqp_rm_service.cpp:796: Failed to deliver subscription request to config dispatcher 2025-06-25T14:51:22.940487Z node 1 :KQP_RESOURCE_MANAGER CRIT: kqp_resource_info_exchanger.cpp:411: Failed to deliver subscription request to config dispatcher. 2025-06-25T14:51:22.941491Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:913: Don't set KqpProxySharedResources 2025-06-25T14:51:22.952503Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:929: Send to publish resource usage for reason: data_center update, payload: NodeId: 2 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372045444738657 } Timestamp: 1750863082 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2025-06-25T14:51:22.952676Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:913: Don't set KqpProxySharedResources 2025-06-25T14:51:22.952756Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:929: Send to publish resource usage for reason: data_center update, payload: NodeId: 1 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372041149771361 } Timestamp: 1750863082 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2025-06-25T14:51:22.953087Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_resource_info_exchanger.cpp:465: Received tenant pool status for exchanger, serving tenant: /dc-1, board: kqpexch+/dc-1 2025-06-25T14:51:22.953205Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_resource_info_exchanger.cpp:465: Received tenant pool status for exchanger, serving tenant: /dc-1, board: kqpexch+/dc-1 2025-06-25T14:51:22.953317Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:753: Received tenant pool status, serving tenant: /dc-1, board: kqprm+/dc-1 2025-06-25T14:51:22.953349Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:913: Don't set KqpProxySharedResources 2025-06-25T14:51:22.953437Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:929: Send to publish resource usage for reason: tenant updated, payload: NodeId: 2 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372045444738657 } Timestamp: 1750863082 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2025-06-25T14:51:22.953572Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:753: Received tenant pool status, serving tenant: /dc-1, board: kqprm+/dc-1 2025-06-25T14:51:22.953586Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:913: Don't set KqpProxySharedResources 2025-06-25T14:51:22.953635Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:929: Send to publish resource usage for reason: tenant updated, payload: NodeId: 1 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372041149771361 } Timestamp: 1750863082 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2025-06-25T14:51:22.954089Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:479: Get board info from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2025-06-25T14:51:22.954262Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:501: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2025-06-25T14:51:22.954527Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:479: Get board info from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2025-06-25T14:51:22.954682Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:501: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2025-06-25T14:51:22.954765Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:501: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2025-06-25T14:51:22.954921Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:526: Get resources info from node: 2 2025-06-25T14:51:22.955024Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:501: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2025-06-25T14:51:22.955078Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:526: Get resources info from node: 1 2025-06-25T14:51:22.955176Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:526: Get resources info from node: 2 2025-06-25T14:51:22.955248Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:526: Get resources info from node: 1 2025-06-25T14:51:23.924719Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:423: Schedule Snapshot request 2025-06-25T14:51:23.924827Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:423: Schedule Snapshot request 2025-06-25T14:51:23.927166Z node 1 :PIPE_SERVER ERROR: tablet_pipe_server.cpp:228: [72057594046447617] NodeDisconnected NodeId# 2 2025-06-25T14:51:23.927300Z node 1 :PIPE_SERVER ERROR: tablet_pipe_server.cpp:228: [72057594037932033] NodeDisconnected NodeId# 2 2025-06-25T14:51:23.927762Z node 1 :PIPE_SERVER ERROR: tablet_pipe_server.cpp:228: [72057594046578946] NodeDisconnected NodeId# 2 2025-06-25T14:51:23.928373Z node 2 :TX_PROXY WARN: proxy_impl.cpp:227: actor# [2:152:2089] HANDLE TEvClientDestroyed from tablet# 72057594046447617 2025-06-25T14:51:23.928552Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:501: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 2 2025-06-25T14:51:23.928775Z node 2 :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [2:74:2075] ServerId# [1:354:2271] TabletId# 72057594037932033 PipeClientId# [2:74:2075] 2025-06-25T14:51:23.928971Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_resource_info_exchanger.cpp:495: Subcriber is not available for info exchanger, serving tenant: /dc-1, board: kqpexch+/dc-1 2025-06-25T14:51:23.929010Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_resource_info_exchanger.cpp:167: Kill previous info exchanger subscriber for 'kqpexch+/dc-1' at [2:464:2105], reason: tenant updated 2025-06-25T14:51:23.929127Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:501: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2025-06-25T14:51:23.931321Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:479: Get board info from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2025-06-25T14:51:23.931441Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:501: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2025-06-25T14:51:24.289933Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:423: Schedule Snapshot request ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/rm_service/ut/unittest >> KqpRm::NodesMembershipByExchanger [GOOD] Test command err: 2025-06-25T14:51:22.880551Z node 2 :BS_PDISK WARN: {BPD01@blobstorage_pdisk_blockdevice_async.cpp:922} Warning# got EIoResult::FileOpenError from IoContext->Setup PDiskId# 1000 2025-06-25T14:51:22.880904Z node 2 :BS_PDISK CRIT: {BPD39@blobstorage_pdisk_impl.cpp:2897} BlockDevice initialization error Details# Can't open file "/home/runner/.ya/build/build_root/yft8/001a7e/r3tmp/tmpQcOdQj/pdisk_1.dat": unknown reason, errno# 0. PDiskId# 1000 2025-06-25T14:51:22.881395Z node 2 :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:300} PDiskId# 1000 bootstrapped to the StateError, reason# Can't open file "/home/runner/.ya/build/build_root/yft8/001a7e/r3tmp/tmpQcOdQj/pdisk_1.dat": unknown reason, errno# 0. Can not be initialized Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/yft8/001a7e/r3tmp/tmpQcOdQj/pdisk_1.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 2594255426095076900 PDiskId# 1000 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 2 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 1000 2025-06-25T14:51:22.924001Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:1115: TResourceBrokerActor bootstrap 2025-06-25T14:51:22.924280Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:1115: TResourceBrokerActor bootstrap 2025-06-25T14:51:22.940503Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:599: Start KqpResourceManagerActor at [2:460:2102] with ResourceBroker at [2:431:2101] 2025-06-25T14:51:22.940613Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:121: Start KqpResourceInfoExchangerActor at [2:461:2103] 2025-06-25T14:51:22.940749Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:599: Start KqpResourceManagerActor at [1:459:2338] with ResourceBroker at [1:430:2319] 2025-06-25T14:51:22.940830Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:121: Start KqpResourceInfoExchangerActor at [1:462:2339] 2025-06-25T14:51:22.940888Z node 2 :KQP_RESOURCE_MANAGER CRIT: kqp_resource_info_exchanger.cpp:411: Failed to deliver subscription request to config dispatcher. 2025-06-25T14:51:22.940922Z node 1 :KQP_RESOURCE_MANAGER CRIT: kqp_rm_service.cpp:796: Failed to deliver subscription request to config dispatcher 2025-06-25T14:51:22.940948Z node 1 :KQP_RESOURCE_MANAGER CRIT: kqp_resource_info_exchanger.cpp:411: Failed to deliver subscription request to config dispatcher. 2025-06-25T14:51:22.941030Z node 2 :KQP_RESOURCE_MANAGER CRIT: kqp_rm_service.cpp:796: Failed to deliver subscription request to config dispatcher 2025-06-25T14:51:22.941510Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:913: Don't set KqpProxySharedResources 2025-06-25T14:51:22.957327Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:929: Send to publish resource usage for reason: data_center update, payload: NodeId: 2 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372045444738657 } Timestamp: 1750863082 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2025-06-25T14:51:22.957500Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:913: Don't set KqpProxySharedResources 2025-06-25T14:51:22.957590Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:929: Send to publish resource usage for reason: data_center update, payload: NodeId: 1 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372041149771361 } Timestamp: 1750863082 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2025-06-25T14:51:22.957976Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_resource_info_exchanger.cpp:465: Received tenant pool status for exchanger, serving tenant: /dc-1, board: kqpexch+/dc-1 2025-06-25T14:51:22.958088Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_resource_info_exchanger.cpp:465: Received tenant pool status for exchanger, serving tenant: /dc-1, board: kqpexch+/dc-1 2025-06-25T14:51:22.958180Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:753: Received tenant pool status, serving tenant: /dc-1, board: kqprm+/dc-1 2025-06-25T14:51:22.958212Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:913: Don't set KqpProxySharedResources 2025-06-25T14:51:22.958309Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:929: Send to publish resource usage for reason: tenant updated, payload: NodeId: 2 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372045444738657 } Timestamp: 1750863082 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2025-06-25T14:51:22.958396Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:753: Received tenant pool status, serving tenant: /dc-1, board: kqprm+/dc-1 2025-06-25T14:51:22.958417Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:913: Don't set KqpProxySharedResources 2025-06-25T14:51:22.958487Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:929: Send to publish resource usage for reason: tenant updated, payload: NodeId: 1 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372041149771361 } Timestamp: 1750863082 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2025-06-25T14:51:22.959097Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:479: Get board info from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 0 2025-06-25T14:51:22.959196Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:501: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2025-06-25T14:51:22.959584Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:501: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2025-06-25T14:51:22.960023Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:479: Get board info from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2025-06-25T14:51:22.960187Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:501: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 2 2025-06-25T14:51:22.960385Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:526: Get resources info from node: 1 2025-06-25T14:51:22.960592Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:526: Get resources info from node: 2 2025-06-25T14:51:22.960769Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:526: Get resources info from node: 1 2025-06-25T14:51:22.960846Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:526: Get resources info from node: 2 2025-06-25T14:51:23.942265Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:423: Schedule Snapshot request 2025-06-25T14:51:23.942371Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:423: Schedule Snapshot request 2025-06-25T14:51:23.943245Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:501: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2025-06-25T14:51:24.257153Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:423: Schedule Snapshot request ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTests::CreateTableShouldFailOnWrongUnit-EnableTablePgTypes-false [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:51:03.031026Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:51:03.031131Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:03.031175Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:51:03.031214Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:51:03.032532Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:51:03.032590Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:51:03.032669Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:03.032751Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:51:03.033558Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:51:03.035180Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:51:03.128836Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:51:03.128925Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:51:03.144987Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:51:03.145333Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:51:03.145460Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:51:03.150544Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:51:03.150888Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:51:03.151434Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:03.151703Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:51:03.154889Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:03.155038Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:51:03.161410Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:51:03.161471Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:03.161619Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:51:03.161669Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:51:03.161720Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:51:03.161806Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:51:03.177149Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:51:03.331469Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:51:03.331695Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:03.331911Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:51:03.331967Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:51:03.332170Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:51:03.332235Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:51:03.337321Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:03.337569Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:51:03.337772Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:03.337848Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:51:03.337913Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:51:03.337949Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:51:03.340542Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:03.340612Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:51:03.340653Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:51:03.343967Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:03.344038Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:03.344106Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:03.344163Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:51:03.347875Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:51:03.350541Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:51:03.350724Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:51:03.351701Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:03.351833Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:51:03.351889Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:03.352156Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:51:03.352211Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:03.352415Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:51:03.352494Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:51:03.355213Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:51:03.355277Z node 1 :FLAT_TX_SCHEMESHARD ... Root 2025-06-25T14:51:23.574293Z node 27 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:23.574388Z node 27 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:51:23.574467Z node 27 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:51:23.574533Z node 27 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:51:23.576559Z node 27 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:23.576642Z node 27 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:51:23.576711Z node 27 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:51:23.578547Z node 27 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:23.578610Z node 27 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:23.578703Z node 27 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:23.578796Z node 27 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:51:23.579022Z node 27 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:51:23.580726Z node 27 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:51:23.581041Z node 27 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:51:23.582150Z node 27 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:23.582349Z node 27 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 115964119149 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:51:23.582465Z node 27 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:23.582827Z node 27 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:51:23.582913Z node 27 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:23.583185Z node 27 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:51:23.583287Z node 27 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:51:23.584950Z node 27 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:51:23.585016Z node 27 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:51:23.585235Z node 27 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:23.585296Z node 27 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [27:208:2208], at schemeshard: 72057594046678944, txId: 1, path id: 1 2025-06-25T14:51:23.585671Z node 27 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:23.585740Z node 27 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 1:0 ProgressState 2025-06-25T14:51:23.585910Z node 27 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1:0 progress is 1/1 2025-06-25T14:51:23.585958Z node 27 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-25T14:51:23.586008Z node 27 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1:0 progress is 1/1 2025-06-25T14:51:23.586060Z node 27 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-25T14:51:23.586131Z node 27 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 1, ready parts: 1/1, is published: false 2025-06-25T14:51:23.586214Z node 27 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-25T14:51:23.586279Z node 27 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1:0 2025-06-25T14:51:23.586335Z node 27 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 1:0 2025-06-25T14:51:23.586419Z node 27 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-25T14:51:23.586478Z node 27 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 1, publications: 1, subscribers: 0 2025-06-25T14:51:23.586529Z node 27 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 1, [OwnerId: 72057594046678944, LocalPathId: 1], 3 2025-06-25T14:51:23.587102Z node 27 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-06-25T14:51:23.587234Z node 27 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-06-25T14:51:23.587291Z node 27 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1 2025-06-25T14:51:23.587344Z node 27 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 3 2025-06-25T14:51:23.587406Z node 27 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:51:23.587532Z node 27 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1, subscribers: 0 2025-06-25T14:51:23.589995Z node 27 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1 2025-06-25T14:51:23.590619Z node 27 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1, at schemeshard: 72057594046678944 TestModificationResults wait txId: 101 2025-06-25T14:51:23.594160Z node 27 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateTable CreateTable { Name: "TTLEnabledTable" Columns { Name: "key" Type: "Uint64" } Columns { Name: "modified_at" Type: "DyNumber" } KeyColumnNames: "key" TTLSettings { Enabled { ColumnName: "modified_at" ColumnUnit: UNIT_AUTO } } } } TxId: 101 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:51:23.594715Z node 27 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_table.cpp:423: TCreateTable Propose, path: /MyRoot/TTLEnabledTable, opId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:51:23.594877Z node 27 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_table.cpp:430: TCreateTable Propose, path: /MyRoot/TTLEnabledTable, opId: 101:0, schema: Name: "TTLEnabledTable" Columns { Name: "key" Type: "Uint64" } Columns { Name: "modified_at" Type: "DyNumber" } KeyColumnNames: "key" TTLSettings { Enabled { ColumnName: "modified_at" ColumnUnit: UNIT_AUTO } }, at schemeshard: 72057594046678944 2025-06-25T14:51:23.595479Z node 27 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 101:1, propose status:StatusSchemeError, reason: To enable TTL on integral type column 'ValueSinceUnixEpochModeSettings' should be specified, at schemeshard: 72057594046678944 2025-06-25T14:51:23.595953Z node 27 :TX_PROXY DEBUG: proxy_impl.cpp:434: actor# [27:271:2260] Bootstrap 2025-06-25T14:51:23.633237Z node 27 :TX_PROXY DEBUG: proxy_impl.cpp:453: actor# [27:271:2260] Become StateWork (SchemeCache [27:276:2265]) 2025-06-25T14:51:23.634295Z node 27 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [27:271:2260] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-25T14:51:23.636208Z node 27 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 101, response: Status: StatusSchemeError Reason: "To enable TTL on integral type column \'ValueSinceUnixEpochModeSettings\' should be specified" TxId: 101 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:51:23.636568Z node 27 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 101, database: /MyRoot, subject: , status: StatusSchemeError, reason: To enable TTL on integral type column 'ValueSinceUnixEpochModeSettings' should be specified, operation: CREATE TABLE, path: /MyRoot/TTLEnabledTable 2025-06-25T14:51:23.637748Z node 27 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 TestModificationResult got TxId: 101, wait until txId: 101 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTests::RacyAlterTableAndConditionalErase [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:51:03.031020Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:51:03.031121Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:03.031158Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:51:03.031194Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:51:03.032518Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:51:03.032570Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:51:03.032652Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:03.032725Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:51:03.033486Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:51:03.035185Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:51:03.122891Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:51:03.122961Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:51:03.140597Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:51:03.141011Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:51:03.141191Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:51:03.147123Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:51:03.147430Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:51:03.148044Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:03.148357Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:51:03.154656Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:03.154950Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:51:03.160439Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:51:03.160507Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:03.160691Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:51:03.160748Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:51:03.160799Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:51:03.160880Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:51:03.168039Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:51:03.278305Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:51:03.282404Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:03.283472Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:51:03.283573Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:51:03.285375Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:51:03.285482Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:51:03.288769Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:03.289701Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:51:03.289924Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:03.290046Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:51:03.290120Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:51:03.290159Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:51:03.292193Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:03.292290Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:51:03.292372Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:51:03.294038Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:03.294095Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:03.294158Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:03.294224Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:51:03.304970Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:51:03.306896Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:51:03.307064Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:51:03.307931Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:03.308085Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:51:03.308140Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:03.308449Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:51:03.308504Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:03.308669Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:51:03.308749Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:51:03.313204Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:51:03.313251Z node 1 :FLAT_TX_SCHEMESHARD ... FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 102 at step: 5000003 FAKE_COORDINATOR: Send Plan to tablet 72075186233409546 for txId: 102 at step: 5000003 2025-06-25T14:51:24.666244Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000003, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:24.666340Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 102 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000003 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:51:24.666389Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_table.cpp:357: TAlterTable TPropose operationId# 102:0 HandleReply TEvOperationPlan, operationId: 102:0, stepId: 5000003, at schemeshard: 72057594046678944 2025-06-25T14:51:24.666636Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 102:0 128 -> 129 2025-06-25T14:51:24.666757Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000003 2025-06-25T14:51:24.671948Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:51:24.672012Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-25T14:51:24.672299Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:24.672371Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 102, path id: 2 2025-06-25T14:51:24.672783Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-25T14:51:24.672826Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1086: NTableState::TProposedWaitParts operationId# 102:0 ProgressState at tablet: 72057594046678944 2025-06-25T14:51:24.673448Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 4 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T14:51:24.673536Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 4 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T14:51:24.673583Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 102 2025-06-25T14:51:24.673637Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 4 2025-06-25T14:51:24.673694Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-25T14:51:24.673775Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 102, ready parts: 0/1, is published: true 2025-06-25T14:51:24.674926Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6369: Handle TEvProposeTransactionResult, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 102 Step: 5000003 OrderId: 102 ExecLatency: 0 ProposeLatency: 2 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 1133 } } CommitVersion { Step: 5000003 TxId: 102 } 2025-06-25T14:51:24.674975Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1791: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409546, partId: 0 2025-06-25T14:51:24.675105Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:632: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 102 Step: 5000003 OrderId: 102 ExecLatency: 0 ProposeLatency: 2 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 1133 } } CommitVersion { Step: 5000003 TxId: 102 } 2025-06-25T14:51:24.675199Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_part.cpp:109: HandleReply TEvDataShard::TEvProposeTransactionResult Ignore message: tablet# 72057594046678944, ev# TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 102 Step: 5000003 OrderId: 102 ExecLatency: 0 ProposeLatency: 2 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 1133 } } CommitVersion { Step: 5000003 TxId: 102 } FAKE_COORDINATOR: Erasing txId 102 2025-06-25T14:51:24.676638Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5596: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 313 RawX2: 4294969594 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 2025-06-25T14:51:24.676691Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1791: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409546, partId: 0 2025-06-25T14:51:24.676799Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:632: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: Source { RawX1: 313 RawX2: 4294969594 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 2025-06-25T14:51:24.676848Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1047: NTableState::TProposedWaitParts operationId# 102:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 2025-06-25T14:51:24.676947Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1051: NTableState::TProposedWaitParts operationId# 102:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 313 RawX2: 4294969594 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 2025-06-25T14:51:24.676999Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:670: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 102:0, shardIdx: 72057594046678944:1, shard: 72075186233409546, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:24.677031Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:707: all shard schema changes has been received, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-25T14:51:24.677071Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:719: send schema changes ack message, operation: 102:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-06-25T14:51:24.677127Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 102:0 129 -> 240 2025-06-25T14:51:24.679823Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-25T14:51:24.679963Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-25T14:51:24.680232Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-25T14:51:24.680365Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-25T14:51:24.680404Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 102:0 ProgressState 2025-06-25T14:51:24.680507Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:0 progress is 1/1 2025-06-25T14:51:24.680577Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-25T14:51:24.680612Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:0 progress is 1/1 2025-06-25T14:51:24.680644Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-25T14:51:24.680675Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: true 2025-06-25T14:51:24.680731Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1656: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:340:2317] message: TxId: 102 2025-06-25T14:51:24.680788Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-25T14:51:24.680825Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 102:0 2025-06-25T14:51:24.680853Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 102:0 2025-06-25T14:51:24.680981Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-25T14:51:24.682569Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-25T14:51:24.682613Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:616:2568] TestWaitNotification: OK eventTxId 102 2025-06-25T14:51:24.683003Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:347: TTxScheduleConditionalErase Execute: at schemeshard: 72057594046678944 2025-06-25T14:51:24.683058Z node 1 :FLAT_TX_SCHEMESHARD ERROR: schemeshard__conditional_erase.cpp:391: Unsuccessful conditional erase: tabletId: 72075186233409546, status: SCHEME_ERROR, error: Schema version mismatch: got 1, expected 2, retry after: 300.000000s, at schemeshard: 72057594046678944 2025-06-25T14:51:24.684481Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:453: TTxScheduleConditionalErase Complete: at schemeshard: 72057594046678944 2025-06-25T14:51:24.684670Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:56: TTxRunConditionalErase DoExecute: at schemeshard: 72057594046678944 2025-06-25T14:51:24.684719Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__conditional_erase.cpp:106: Skip conditional erase: shardIdx: 72057594046678944:1, run at: 1970-01-01T00:06:00.039500Z, at schemeshard: 72057594046678944 2025-06-25T14:51:24.684774Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:189: TTxRunConditionalErase DoComplete: at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::TPCDS90+ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 24580, MsgBus: 14719 2025-06-25T14:49:36.859148Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899222389932646:2136];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:49:36.859303Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d4f/r3tmp/tmpiHn3Rb/pdisk_1.dat 2025-06-25T14:49:37.523494Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519899222389932542:2080] 1750862976813844 != 1750862976813847 2025-06-25T14:49:37.541891Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:49:37.541982Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:49:37.558852Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:49:37.562259Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 24580, node 1 2025-06-25T14:49:37.899143Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:49:37.899165Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:49:37.899171Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:49:37.899278Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:49:37.906607Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:14719 TClient is connected to server localhost:14719 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:49:38.726615Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:49:38.744736Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:49:41.401302Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899243864769671:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:41.401416Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:41.401800Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899243864769683:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:41.406076Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:49:41.424471Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-25T14:49:41.424768Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899243864769685:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:49:41.493035Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899243864769736:2340] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:49:41.800845Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T14:49:41.857466Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519899222389932646:2136];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:49:41.857561Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:49:42.196101Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519899248159737234:2322];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:49:42.196539Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519899248159737234:2322];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:49:42.196838Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519899248159737234:2322];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:49:42.196977Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519899248159737234:2322];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:49:42.197085Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519899248159737234:2322];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:49:42.197187Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519899248159737234:2322];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:49:42.197316Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519899248159737234:2322];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:49:42.197446Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519899248159737234:2322];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:49:42.197567Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519899248159737234:2322];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:49:42.197717Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519899248159737234:2322];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:49:42.197842Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519899248159737234:2322];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:49:42.198614Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519899243864769928:2312];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:49:42.198694Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519899243864769928:2312];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:49:42.198869Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519899243864769928:2312];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:49:42.198969Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519899243864769928:2312];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:49:42.199089Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519899243864769928:2312];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:49:42.199177Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519899243864769928:2312];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:49:42.199277Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519899243864769928:2312];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_regis ... line=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:00.810594Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039207;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:00.810747Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039292;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:00.811213Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039340;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:00.811392Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039338;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:00.816563Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039340;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:00.817159Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039268;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:00.817169Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039338;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:00.817790Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039348;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:00.822386Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039268;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:00.823052Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039348;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:00.823057Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039260;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:00.823642Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039346;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:00.828545Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039260;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:00.829086Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039346;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:00.829276Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039228;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:00.829728Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039211;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:00.834564Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039228;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:00.834968Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039211;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:00.835224Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039203;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:00.835574Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039205;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:00.840296Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039203;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:00.840876Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039205;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:00.840948Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039197;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:00.841499Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039195;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:00.847212Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039197;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:00.848013Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039195;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:00.848289Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039191;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:00.849591Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039189;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:00.853239Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039191;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:00.853819Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039382;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:00.854300Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039189;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:00.854934Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039352;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:00.858345Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039382;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:00.858932Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039378;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:00.859719Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039352;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:00.860294Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039364;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:00.863393Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039378;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:00.864274Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039282;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:00.865122Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039364;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:00.866063Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039215;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:00.868886Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039282;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:00.870845Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039215;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:00.975758Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyks395042cbsv5ccz2hx0dr", SessionId: ydb://session/3?node_id=1&id=ODk2ZTQ4YzAtZWY2YTc0Yy1lZDA1N2ZhNy0yZGZjOGY3Zg==, Slow query, duration: 31.566607s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:51:01.535248Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:51:01.535266Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:51:01.535863Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; >> TPersQueueTest::FetchRequest >> TPersQueueTest::DirectReadPreCached >> TPersQueueTest::UpdatePartitionLocation >> TPersQueueTest::WriteExisting ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTests::CreateTableShouldSucceed-EnableTablePgTypes-true [GOOD] >> TPersQueueTest::BadTopic Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:51:04.461262Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:51:04.461348Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:04.461386Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:51:04.461421Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:51:04.461461Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:51:04.461491Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:51:04.461538Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:04.461605Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:51:04.462334Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:51:04.462681Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:51:04.545221Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:51:04.545273Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:51:04.561799Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:51:04.562215Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:51:04.562395Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:51:04.568072Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:51:04.568383Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:51:04.568991Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:04.569268Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:51:04.579718Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:04.579921Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:51:04.581286Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:51:04.581347Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:04.581491Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:51:04.581550Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:51:04.581591Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:51:04.581674Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:51:04.600046Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:51:04.750483Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:51:04.750701Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:04.750892Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:51:04.750951Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:51:04.751165Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:51:04.751235Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:51:04.753306Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:04.753495Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:51:04.753670Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:04.753728Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:51:04.753809Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:51:04.753845Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:51:04.755641Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:04.755731Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:51:04.755777Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:51:04.757212Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:04.757253Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:04.757302Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:04.757344Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:51:04.759605Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:51:04.761221Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:51:04.761372Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:51:04.762280Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:04.762432Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:51:04.762489Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:04.762736Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:51:04.762787Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:04.762938Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:51:04.762995Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:51:04.764759Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:51:04.764800Z node 1 :FLAT_TX_SCHEMESHARD ... 57594046678944, LocalPathId: 1] was 2 2025-06-25T14:51:24.988029Z node 28 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 101 2025-06-25T14:51:24.988113Z node 28 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 101 2025-06-25T14:51:24.988145Z node 28 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 101 2025-06-25T14:51:24.988181Z node 28 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 3 2025-06-25T14:51:24.988222Z node 28 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-25T14:51:24.988297Z node 28 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 101, ready parts: 0/1, is published: true 2025-06-25T14:51:24.989224Z node 28 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6369: Handle TEvProposeTransactionResult, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 101 Step: 5000002 OrderId: 101 ExecLatency: 0 ProposeLatency: 3 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 1197 } } CommitVersion { Step: 5000002 TxId: 101 } 2025-06-25T14:51:24.989268Z node 28 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1791: TOperation FindRelatedPartByTabletId, TxId: 101, tablet: 72075186233409546, partId: 0 2025-06-25T14:51:24.989402Z node 28 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:632: TTxOperationReply execute, operationId: 101:0, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 101 Step: 5000002 OrderId: 101 ExecLatency: 0 ProposeLatency: 3 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 1197 } } CommitVersion { Step: 5000002 TxId: 101 } 2025-06-25T14:51:24.989512Z node 28 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_part.cpp:109: HandleReply TEvDataShard::TEvProposeTransactionResult Ignore message: tablet# 72057594046678944, ev# TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 101 Step: 5000002 OrderId: 101 ExecLatency: 0 ProposeLatency: 3 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 1197 } } CommitVersion { Step: 5000002 TxId: 101 } 2025-06-25T14:51:24.990169Z node 28 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5596: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 312 RawX2: 120259086585 } Origin: 72075186233409546 State: 2 TxId: 101 Step: 0 Generation: 2 2025-06-25T14:51:24.990212Z node 28 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1791: TOperation FindRelatedPartByTabletId, TxId: 101, tablet: 72075186233409546, partId: 0 2025-06-25T14:51:24.990319Z node 28 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:632: TTxOperationReply execute, operationId: 101:0, at schemeshard: 72057594046678944, message: Source { RawX1: 312 RawX2: 120259086585 } Origin: 72075186233409546 State: 2 TxId: 101 Step: 0 Generation: 2 2025-06-25T14:51:24.990370Z node 28 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1047: NTableState::TProposedWaitParts operationId# 101:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 2025-06-25T14:51:24.990458Z node 28 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1051: NTableState::TProposedWaitParts operationId# 101:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 312 RawX2: 120259086585 } Origin: 72075186233409546 State: 2 TxId: 101 Step: 0 Generation: 2 2025-06-25T14:51:24.990519Z node 28 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:670: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 101:0, shardIdx: 72057594046678944:1, shard: 72075186233409546, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:24.990562Z node 28 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:707: all shard schema changes has been received, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:51:24.990602Z node 28 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:719: send schema changes ack message, operation: 101:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-06-25T14:51:24.990650Z node 28 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 101:0 129 -> 240 2025-06-25T14:51:24.992702Z node 28 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-25T14:51:24.994405Z node 28 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-25T14:51:24.994537Z node 28 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:51:24.994636Z node 28 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:51:24.994872Z node 28 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:51:24.994921Z node 28 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 101:0 ProgressState 2025-06-25T14:51:24.995021Z node 28 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#101:0 progress is 1/1 2025-06-25T14:51:24.995055Z node 28 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-25T14:51:24.995097Z node 28 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#101:0 progress is 1/1 2025-06-25T14:51:24.995133Z node 28 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-25T14:51:24.995172Z node 28 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 101, ready parts: 1/1, is published: true 2025-06-25T14:51:24.995235Z node 28 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1656: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [28:340:2317] message: TxId: 101 2025-06-25T14:51:24.995291Z node 28 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-25T14:51:24.995333Z node 28 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 101:0 2025-06-25T14:51:24.995367Z node 28 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 101:0 2025-06-25T14:51:24.995487Z node 28 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-25T14:51:24.997128Z node 28 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-25T14:51:24.997172Z node 28 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [28:341:2318] TestWaitNotification: OK eventTxId 101 2025-06-25T14:51:24.997647Z node 28 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/TTLTableWithpgint8Column_UNIT_NANOSECONDS" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:51:24.997881Z node 28 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/TTLTableWithpgint8Column_UNIT_NANOSECONDS" took 244us result status StatusSuccess 2025-06-25T14:51:24.998452Z node 28 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/TTLTableWithpgint8Column_UNIT_NANOSECONDS" PathDescription { Self { Name: "TTLTableWithpgint8Column_UNIT_NANOSECONDS" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "TTLTableWithpgint8Column_UNIT_NANOSECONDS" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "modified_at" Type: "pgint8" TypeId: 12288 Id: 2 NotNull: false TypeInfo { PgTypeId: 20 } IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 1 TTLSettings { Enabled { ColumnName: "modified_at" ExpireAfterSeconds: 3600 ColumnUnit: UNIT_NANOSECONDS Tiers { ApplyAfterSeconds: 3600 Delete { } } } } IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 1 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TPersQueueTest::ReadFromSeveralPartitions >> DemoTx::Scenario_1 >> TSchemeShardTTLTests::ShouldSkipDroppedColumn [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/rm_service/ut/unittest >> KqpRm::SnapshotSharingByExchanger [GOOD] Test command err: 2025-06-25T14:51:22.881999Z node 2 :BS_PDISK WARN: {BPD01@blobstorage_pdisk_blockdevice_async.cpp:922} Warning# got EIoResult::FileOpenError from IoContext->Setup PDiskId# 1000 2025-06-25T14:51:22.882422Z node 2 :BS_PDISK CRIT: {BPD39@blobstorage_pdisk_impl.cpp:2897} BlockDevice initialization error Details# Can't open file "/home/runner/.ya/build/build_root/yft8/001a7c/r3tmp/tmpbRDzsf/pdisk_1.dat": unknown reason, errno# 0. PDiskId# 1000 2025-06-25T14:51:22.885714Z node 2 :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:300} PDiskId# 1000 bootstrapped to the StateError, reason# Can't open file "/home/runner/.ya/build/build_root/yft8/001a7c/r3tmp/tmpbRDzsf/pdisk_1.dat": unknown reason, errno# 0. Can not be initialized Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/yft8/001a7c/r3tmp/tmpbRDzsf/pdisk_1.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 6122906676798498053 PDiskId# 1000 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 2 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 1000 2025-06-25T14:51:22.923870Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:1115: TResourceBrokerActor bootstrap 2025-06-25T14:51:22.924103Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:1115: TResourceBrokerActor bootstrap 2025-06-25T14:51:22.940575Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:599: Start KqpResourceManagerActor at [2:460:2102] with ResourceBroker at [2:431:2101] 2025-06-25T14:51:22.940685Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:121: Start KqpResourceInfoExchangerActor at [2:461:2103] 2025-06-25T14:51:22.940812Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:599: Start KqpResourceManagerActor at [1:459:2338] with ResourceBroker at [1:430:2319] 2025-06-25T14:51:22.940890Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:121: Start KqpResourceInfoExchangerActor at [1:462:2339] 2025-06-25T14:51:22.940947Z node 2 :KQP_RESOURCE_MANAGER CRIT: kqp_resource_info_exchanger.cpp:411: Failed to deliver subscription request to config dispatcher. 2025-06-25T14:51:22.940981Z node 1 :KQP_RESOURCE_MANAGER CRIT: kqp_rm_service.cpp:796: Failed to deliver subscription request to config dispatcher 2025-06-25T14:51:22.941009Z node 1 :KQP_RESOURCE_MANAGER CRIT: kqp_resource_info_exchanger.cpp:411: Failed to deliver subscription request to config dispatcher. 2025-06-25T14:51:22.941096Z node 2 :KQP_RESOURCE_MANAGER CRIT: kqp_rm_service.cpp:796: Failed to deliver subscription request to config dispatcher 2025-06-25T14:51:22.941524Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:913: Don't set KqpProxySharedResources 2025-06-25T14:51:22.951974Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:929: Send to publish resource usage for reason: data_center update, payload: NodeId: 2 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372045444738657 } Timestamp: 1750863082 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2025-06-25T14:51:22.952201Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:913: Don't set KqpProxySharedResources 2025-06-25T14:51:22.952287Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:929: Send to publish resource usage for reason: data_center update, payload: NodeId: 1 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372041149771361 } Timestamp: 1750863082 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2025-06-25T14:51:22.952654Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_resource_info_exchanger.cpp:465: Received tenant pool status for exchanger, serving tenant: /dc-1, board: kqpexch+/dc-1 2025-06-25T14:51:22.952772Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_resource_info_exchanger.cpp:465: Received tenant pool status for exchanger, serving tenant: /dc-1, board: kqpexch+/dc-1 2025-06-25T14:51:22.952858Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:753: Received tenant pool status, serving tenant: /dc-1, board: kqprm+/dc-1 2025-06-25T14:51:22.952889Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:913: Don't set KqpProxySharedResources 2025-06-25T14:51:22.952985Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:929: Send to publish resource usage for reason: tenant updated, payload: NodeId: 2 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372045444738657 } Timestamp: 1750863082 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2025-06-25T14:51:22.953063Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:753: Received tenant pool status, serving tenant: /dc-1, board: kqprm+/dc-1 2025-06-25T14:51:22.953083Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:913: Don't set KqpProxySharedResources 2025-06-25T14:51:22.953146Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:929: Send to publish resource usage for reason: tenant updated, payload: NodeId: 1 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372041149771361 } Timestamp: 1750863082 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2025-06-25T14:51:22.953851Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:479: Get board info from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 0 2025-06-25T14:51:22.953935Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:501: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2025-06-25T14:51:22.954315Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:501: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2025-06-25T14:51:22.954688Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:479: Get board info from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2025-06-25T14:51:22.954823Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:501: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 2 2025-06-25T14:51:22.954981Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:526: Get resources info from node: 1 2025-06-25T14:51:22.955199Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:526: Get resources info from node: 2 2025-06-25T14:51:22.955357Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:526: Get resources info from node: 1 2025-06-25T14:51:22.955428Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:526: Get resources info from node: 2 2025-06-25T14:51:23.918471Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:423: Schedule Snapshot request 2025-06-25T14:51:23.918543Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:423: Schedule Snapshot request 2025-06-25T14:51:23.918650Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new kqp_query task kqp-1-1-1 (1 by [1:459:2338]) priority=0 resources={0, 100} 2025-06-25T14:51:23.918682Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task kqp-1-1-1 (1 by [1:459:2338]) to queue queue_kqp_resource_manager 2025-06-25T14:51:23.918714Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {0, 100} for task kqp-1-1-1 (1 by [1:459:2338]) from queue queue_kqp_resource_manager 2025-06-25T14:51:23.918755Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task kqp-1-1-1 (1 by [1:459:2338]) to queue queue_kqp_resource_manager 2025-06-25T14:51:23.918790Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_kqp_resource_manager from 0.000000 to 0.250000 (insert task kqp-1-1-1 (1 by [1:459:2338])) 2025-06-25T14:51:23.918997Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:351: TxId: 1, taskId: 1. Allocated TKqpResourcesRequest{ MemoryPool: 1, Memory: 100ExternalMemory: 0 } 2025-06-25T14:51:23.919046Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new kqp_query task kqp-2-1-2 (2 by [1:459:2338]) priority=0 resources={0, 100} 2025-06-25T14:51:23.919081Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task kqp-2-1-2 (2 by [1:459:2338]) to queue queue_kqp_resource_manager 2025-06-25T14:51:23.919115Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {0, 100} for task kqp-2-1-2 (2 by [1:459:2338]) from queue queue_kqp_resource_manager 2025-06-25T14:51:23.919150Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task kqp-2-1-2 (2 by [1:459:2338]) to queue queue_kqp_resource_manager 2025-06-25T14:51:23.919175Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_kqp_resource_manager from 0.250000 to 0.500000 (insert task kqp-2-1-2 (2 by [1:459:2338])) 2025-06-25T14:51:23.919223Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:351: TxId: 2, taskId: 1. Allocated TKqpResourcesRequest{ MemoryPool: 1, Memory: 100ExternalMemory: 0 } 2025-06-25T14:51:23.919267Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:913: Don't set KqpProxySharedResources 2025-06-25T14:51:23.919371Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:929: Send to publish resource usage for reason: alloc, payload: NodeId: 1 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372041149771361 } Timestamp: 1750863083 AvailableComputeActors: 80 UsedMemory: 200 TotalMemory: 1000 Memory { Pool: 1 Available: 800 } ExecutionUnits: 80 2025-06-25T14:51:23.919581Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:526: Get resources info from node: 1 2025-06-25T14:51:24.245106Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:423: Schedule Snapshot request 2025-06-25T14:51:24.245277Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new kqp_query task kqp-1-1-1 (1 by [2:460:2102]) priority=0 resources={0, 100} 2025-06-25T14:51:24.245354Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task kqp-1-1-1 (1 by [2:460:2102]) to queue queue_kqp_resource_manager 2025-06-25T14:51:24.245431Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {0, 100} for task kqp-1-1-1 (1 by [2:460:2102]) from queue queue_kqp_resource_manager 2025-06-25T14:51:24.245470Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task kqp-1-1-1 (1 by [2:460:2102]) to queue queue_kqp_resource_manager 2025-06-25T14:51:24.245525Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_kqp_resource_manager from 0.000000 to 0.250000 (insert task kqp-1-1-1 (1 by [2:460:2102])) 2025-06-25T14:51:24.245636Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:351: TxId: 1, taskId: 1. Allocated TKqpResourcesRequest{ MemoryPool: 1, Memory: 100ExternalMemory: 0 } 2025-06-25T14:51:24.245708Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new kqp_query task kqp-2-2-2 (2 by [2:460:2102]) priority=0 resources={0, 100} 2025-06-25T14:51:24.245752Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task kqp-2-2-2 (2 by [2:460:2102]) to queue queue_kqp_resource_manager 2025-06-25T14:51:24.245790Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {0, 100} for task kqp-2-2-2 (2 by [2:460:2102]) from queue queue_kqp_resource_manager 2025-06-25T14:51:24.245823Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task kqp-2-2-2 (2 by [2:460:2102]) to queue queue_kqp_resource_manager 2025-06-25T14:51:24.245857Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_kqp_resource_manager from 0.250000 to 0.500000 (insert task kqp-2-2-2 (2 by [2:460:2102])) 2025-06-25T14:51:24.245944Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:351: TxId: 2, taskId: 2. Allocated TKqpResourcesRequest{ MemoryPool: 1, Memory: 100ExternalMemory: 0 } 2025-06-25T14:51:24.246006Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:913: Don't set KqpProxySharedResources 2025-06-25T14:51:24.246133Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:929: Send to publish resource usage for reason: alloc, payload: NodeId: 2 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372045444738657 } Timestamp: 1750863084 AvailableComputeActors: 80 UsedMemory: 200 TotalMemory: 1000 Memory { Pool: 1 Available: 800 } ExecutionUnits: 80 2025-06-25T14:51:24.246434Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:526: Get resources info from node: 2 2025-06-25T14:51:24.531957Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:423: Schedule Snapshot request 2025-06-25T14:51:24.532120Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:528: Finish task kqp-1-1-1 (1 by [1:459:2338]) (release resources {0, 100}) 2025-06-25T14:51:24.532182Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:577: Updated planned resource usage for queue queue_kqp_resource_manager from 0.500000 to 0.350200 (remove task kqp-1-1-1 (1 by [1:459:2338])) 2025-06-25T14:51:24.532220Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:582: Updated real resource usage for queue queue_kqp_resource_manager from 0.000000 to 0.200400 2025-06-25T14:51:24.532262Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:404: TxId: 1, taskId: 1. Released resources, Memory: 100, Free Tier: 0, ExecutionUnits: 10. 2025-06-25T14:51:24.532319Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:528: Finish task kqp-2-1-2 (2 by [1:459:2338]) (release resources {0, 100}) 2025-06-25T14:51:24.532358Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:577: Updated planned resource usage for queue queue_kqp_resource_manager from 0.350200 to 0.200400 (remove task kqp-2-1-2 (2 by [1:459:2338])) 2025-06-25T14:51:24.532390Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:404: TxId: 2, taskId: 1. Released resources, Memory: 100, Free Tier: 0, ExecutionUnits: 10. 2025-06-25T14:51:24.532465Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:913: Don't set KqpProxySharedResources 2025-06-25T14:51:24.532614Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:929: Send to publish resource usage for reason: alloc, payload: NodeId: 1 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372041149771361 } Timestamp: 1750863085 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2025-06-25T14:51:24.532862Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:526: Get resources info from node: 1 2025-06-25T14:51:24.806239Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:423: Schedule Snapshot request 2025-06-25T14:51:24.806422Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:528: Finish task kqp-1-1-1 (1 by [2:460:2102]) (release resources {0, 100}) 2025-06-25T14:51:24.806481Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:577: Updated planned resource usage for queue queue_kqp_resource_manager from 0.500000 to 0.350200 (remove task kqp-1-1-1 (1 by [2:460:2102])) 2025-06-25T14:51:24.806515Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:582: Updated real resource usage for queue queue_kqp_resource_manager from 0.000000 to 0.200400 2025-06-25T14:51:24.806559Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:404: TxId: 1, taskId: 1. Released resources, Memory: 100, Free Tier: 0, ExecutionUnits: 10. 2025-06-25T14:51:24.806598Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:528: Finish task kqp-2-2-2 (2 by [2:460:2102]) (release resources {0, 100}) 2025-06-25T14:51:24.806653Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:577: Updated planned resource usage for queue queue_kqp_resource_manager from 0.350200 to 0.200400 (remove task kqp-2-2-2 (2 by [2:460:2102])) 2025-06-25T14:51:24.806702Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:404: TxId: 2, taskId: 2. Released resources, Memory: 100, Free Tier: 0, ExecutionUnits: 10. 2025-06-25T14:51:24.806762Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:913: Don't set KqpProxySharedResources 2025-06-25T14:51:24.806902Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:929: Send to publish resource usage for reason: alloc, payload: NodeId: 2 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372045444738657 } Timestamp: 1750863086 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2025-06-25T14:51:24.807230Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:526: Get resources info from node: 2 2025-06-25T14:51:25.091076Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:423: Schedule Snapshot request |87.0%| [TA] {RESULT} $(B)/ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTests::ShouldSkipDroppedColumn [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:51:06.418423Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:51:06.418490Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:06.418524Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:51:06.418552Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:51:06.418582Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:51:06.418601Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:51:06.418634Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:06.418694Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:51:06.419263Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:51:06.419514Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:51:06.497109Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:51:06.497163Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:51:06.513059Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:51:06.513433Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:51:06.513589Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:51:06.519073Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:51:06.519348Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:51:06.519942Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:06.520212Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:51:06.523070Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:06.523223Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:51:06.524334Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:51:06.524388Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:06.524509Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:51:06.524559Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:51:06.524592Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:51:06.524669Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:51:06.530805Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:51:06.654760Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:51:06.654980Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:06.655177Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:51:06.655228Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:51:06.655438Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:51:06.655523Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:51:06.657442Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:06.657613Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:51:06.657848Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:06.657906Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:51:06.657970Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:51:06.658004Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:51:06.664558Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:06.664652Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:51:06.664706Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:51:06.666378Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:06.666436Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:06.666503Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:06.666561Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:51:06.670001Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:51:06.671734Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:51:06.671875Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:51:06.672640Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:06.672732Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:51:06.672773Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:06.672961Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:51:06.672995Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:06.673138Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:51:06.673213Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:51:06.674771Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:51:06.674802Z node 1 :FLAT_TX_SCHEMESHARD ... 25-06-25T14:51:07.141550Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [1:513:2472] TestWaitNotification: OK eventTxId 103 2025-06-25T14:51:12.606238Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7382: Cannot get console configs 2025-06-25T14:51:12.606306Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:51:14.435818Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046678944 from shard 72075186233409546 followerId 0 pathId [OwnerId: 72057594046678944, LocalPathId: 4] state 'Ready' dataSize 0 rowCount 0 cpuUsage 0.0389 2025-06-25T14:51:14.446518Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046678944 from shard 72075186233409547 followerId 0 pathId [OwnerId: 72057594046678944, LocalPathId: 2] state 'Ready' dataSize 0 rowCount 0 cpuUsage 0.0807 2025-06-25T14:51:14.487670Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046678944, queue size# 2 2025-06-25T14:51:14.487871Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 4 shard idx 72057594046678944:2 data size 0 row count 0 2025-06-25T14:51:14.487969Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186233409546 maps to shardIdx: 72057594046678944:2 followerId=0, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], pathId map=indexImplTable, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-25T14:51:14.488080Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186233409546 2025-06-25T14:51:14.488134Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 2 shard idx 72057594046678944:1 data size 0 row count 0 2025-06-25T14:51:14.488184Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186233409547 maps to shardIdx: 72057594046678944:1 followerId=0, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], pathId map=TTLEnabledTable, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-25T14:51:14.488233Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186233409547 2025-06-25T14:51:14.498739Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046678944, queue size# 0 2025-06-25T14:51:17.950348Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046678944 from shard 72075186233409546 followerId 0 pathId [OwnerId: 72057594046678944, LocalPathId: 4] state 'Ready' dataSize 0 rowCount 0 cpuUsage 0.0106 2025-06-25T14:51:17.960909Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046678944 from shard 72075186233409547 followerId 0 pathId [OwnerId: 72057594046678944, LocalPathId: 2] state 'Ready' dataSize 0 rowCount 0 cpuUsage 0.0209 2025-06-25T14:51:18.001998Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046678944, queue size# 2 2025-06-25T14:51:18.002176Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 4 shard idx 72057594046678944:2 data size 0 row count 0 2025-06-25T14:51:18.002245Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186233409546 maps to shardIdx: 72057594046678944:2 followerId=0, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], pathId map=indexImplTable, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-25T14:51:18.002345Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186233409546 2025-06-25T14:51:18.002403Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 2 shard idx 72057594046678944:1 data size 0 row count 0 2025-06-25T14:51:18.002468Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186233409547 maps to shardIdx: 72057594046678944:1 followerId=0, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], pathId map=TTLEnabledTable, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-25T14:51:18.002516Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186233409547 2025-06-25T14:51:18.012908Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046678944, queue size# 0 2025-06-25T14:51:21.398788Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046678944 from shard 72075186233409546 followerId 0 pathId [OwnerId: 72057594046678944, LocalPathId: 4] state 'Ready' dataSize 0 rowCount 0 cpuUsage 0.0106 2025-06-25T14:51:21.409341Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046678944 from shard 72075186233409547 followerId 0 pathId [OwnerId: 72057594046678944, LocalPathId: 2] state 'Ready' dataSize 0 rowCount 0 cpuUsage 0.0209 2025-06-25T14:51:21.450335Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046678944, queue size# 2 2025-06-25T14:51:21.450507Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 4 shard idx 72057594046678944:2 data size 0 row count 0 2025-06-25T14:51:21.450586Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186233409546 maps to shardIdx: 72057594046678944:2 followerId=0, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], pathId map=indexImplTable, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-25T14:51:21.450696Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186233409546 2025-06-25T14:51:21.450749Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 2 shard idx 72057594046678944:1 data size 0 row count 0 2025-06-25T14:51:21.450785Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186233409547 maps to shardIdx: 72057594046678944:1 followerId=0, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], pathId map=TTLEnabledTable, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-25T14:51:21.450813Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186233409547 2025-06-25T14:51:21.461144Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046678944, queue size# 0 2025-06-25T14:51:24.736528Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046678944 from shard 72075186233409546 followerId 0 pathId [OwnerId: 72057594046678944, LocalPathId: 4] state 'Ready' dataSize 0 rowCount 0 cpuUsage 0.0037 2025-06-25T14:51:24.747077Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046678944 from shard 72075186233409547 followerId 0 pathId [OwnerId: 72057594046678944, LocalPathId: 2] state 'Ready' dataSize 0 rowCount 0 cpuUsage 0.0063 2025-06-25T14:51:24.787984Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046678944, queue size# 2 2025-06-25T14:51:24.788154Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 4 shard idx 72057594046678944:2 data size 0 row count 0 2025-06-25T14:51:24.788248Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186233409546 maps to shardIdx: 72057594046678944:2 followerId=0, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], pathId map=indexImplTable, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-25T14:51:24.788357Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186233409546 2025-06-25T14:51:24.788411Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 2 shard idx 72057594046678944:1 data size 0 row count 0 2025-06-25T14:51:24.788457Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186233409547 maps to shardIdx: 72057594046678944:1 followerId=0, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], pathId map=TTLEnabledTable, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-25T14:51:24.788501Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186233409547 2025-06-25T14:51:24.798942Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046678944, queue size# 0 2025-06-25T14:51:28.210441Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6716: Handle: TEvRunConditionalErase, at schemeshard: 72057594046678944 2025-06-25T14:51:28.210548Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:56: TTxRunConditionalErase DoExecute: at schemeshard: 72057594046678944 2025-06-25T14:51:28.210708Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:189: TTxRunConditionalErase DoComplete: at schemeshard: 72057594046678944 2025-06-25T14:51:28.210937Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__conditional_erase.cpp:213: Run conditional erase, tabletId: 72075186233409547, request: TableId: 2 Expiration { ColumnId: 2 WallClockTimestamp: 60025000 ColumnUnit: UNIT_AUTO } SchemaVersion: 3 Indexes { OwnerId: 72057594046678944 PathId: 4 SchemaVersion: 1 KeyMap { IndexColumnId: 1 MainColumnId: 3 } KeyMap { IndexColumnId: 2 MainColumnId: 1 } } Limits { BatchMaxBytes: 512000 BatchMinKeys: 1 BatchMaxKeys: 256 }, at schemeshard: 72057594046678944 2025-06-25T14:51:28.211515Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6744: Conditional erase accepted: tabletId: 72075186233409547, at schemeshard: 72057594046678944 2025-06-25T14:51:28.211847Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:347: TTxScheduleConditionalErase Execute: at schemeshard: 72057594046678944 2025-06-25T14:51:28.211882Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:397: Successful conditional erase: tabletId: 72075186233409547, at schemeshard: 72057594046678944 2025-06-25T14:51:28.214885Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:453: TTxScheduleConditionalErase Complete: at schemeshard: 72057594046678944 2025-06-25T14:51:28.215046Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:56: TTxRunConditionalErase DoExecute: at schemeshard: 72057594046678944 2025-06-25T14:51:28.215086Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__conditional_erase.cpp:106: Skip conditional erase: shardIdx: 72057594046678944:1, run at: 1970-01-01T01:01:00.025000Z, at schemeshard: 72057594046678944 2025-06-25T14:51:28.215141Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:189: TTxRunConditionalErase DoComplete: at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> OlapEstimationRowsCorrectness::TPCH5 [GOOD] Test command err: Trying to start YDB, gRPC: 29976, MsgBus: 27644 2025-06-25T14:49:49.452707Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899279683622563:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:49:49.473100Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d48/r3tmp/tmpWiEKBC/pdisk_1.dat 2025-06-25T14:49:50.072241Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519899279683622535:2080] 1750862989437300 != 1750862989437303 2025-06-25T14:49:50.079991Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:49:50.085802Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:49:50.085915Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:49:50.091635Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 29976, node 1 2025-06-25T14:49:50.360788Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:49:50.360809Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:49:50.360817Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:49:50.360905Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:49:50.461613Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:27644 TClient is connected to server localhost:27644 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:49:51.438824Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:49:51.472983Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:49:53.469210Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899296863492366:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:53.469322Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899296863492374:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:53.469390Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:53.473225Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:49:53.503909Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899296863492380:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:49:53.600171Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899296863492433:2338] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:49:54.239303Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T14:49:54.454559Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519899279683622563:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:49:54.454628Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:49:54.669977Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519899301158459983:2321];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:49:54.670168Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519899301158459983:2321];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:49:54.670429Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519899301158459983:2321];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:49:54.670530Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519899301158459983:2321];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:49:54.670628Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519899301158459983:2321];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:49:54.670712Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519899301158459983:2321];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:49:54.670801Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519899301158459983:2321];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:49:54.670912Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519899301158459983:2321];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:49:54.671002Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519899301158459983:2321];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:49:54.671088Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519899301158459983:2321];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:49:54.671201Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[1:7519899301158459983:2321];tablet_id=72075186224037898;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:49:54.678015Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519899301158460027:2327];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:49:54.678091Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519899301158460027:2327];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:49:54.678256Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519899301158460027:2327];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:49:54.678354Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519899301158460027:2327];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:49:54.678428Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519899301158460027:2327];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:49:54.678519Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519899301158460027:2327];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:49:54.678614Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519899301158460027:2327];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:49:54.678727Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519899301158460027:2327];tabl ... 4;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:09.247291Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039382;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:09.247407Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039356;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:09.250801Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039356;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:09.251226Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039382;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:09.251442Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039378;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:09.251732Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039394;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:09.254721Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039378;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:09.255501Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039346;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:09.255577Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039394;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:09.256080Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039284;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:09.259159Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039346;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:09.259661Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039370;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:09.260221Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039284;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:09.260743Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039366;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:09.263462Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039370;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:09.263929Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039380;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:09.264431Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039366;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:09.264869Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039390;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:09.268289Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039380;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:09.268826Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039362;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:09.268962Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039390;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:09.269480Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039374;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:09.272337Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039362;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:09.272936Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039340;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:09.273419Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039374;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:09.273896Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039372;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:09.276145Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039340;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:09.276966Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039246;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:09.277974Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039372;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:09.278467Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039386;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:09.281168Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039246;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:09.281559Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039328;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:09.282930Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039386;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:09.283432Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039368;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:09.285922Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039328;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:09.286504Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039376;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:09.288005Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039368;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:09.288530Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039342;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:09.291057Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039376;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:09.291553Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039348;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:09.292772Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039342;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:09.301245Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039348;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:09.342431Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyks3j8e4ax8j3exa7c8fcc3", SessionId: ydb://session/3?node_id=1&id=ZDI1ODdlN2UtZDM4NmRiNjYtMzExYTQzOTMtMmY5N2FlZGY=, Slow query, duration: 30.607720s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:51:09.577931Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:51:09.577931Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:51:09.578417Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; >> TSchemeShardColumnTableTTL::CreateColumnTable [GOOD] >> KqpJoinOrder::CanonizedJoinOrderTPCH11 [GOOD] >> TStorageTenantTest::RemoveStoragePoolAndCreateOneMore >> TStorageTenantTest::CreateSolomonInsideSubDomain >> TStorageTenantTest::CreateTableInsideSubDomain2 >> TStorageTenantTest::LsLs >> TStorageTenantTest::DeclareAndDefine >> TStorageTenantTest::CreateTableInsideSubDomain >> KqpRm::SingleTask ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardColumnTableTTL::CreateColumnTable [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:51:03.031239Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:51:03.031347Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:03.031393Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:51:03.031435Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:51:03.033316Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:51:03.033379Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:51:03.033463Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:03.033546Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:51:03.034391Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:51:03.035176Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:51:03.111758Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:51:03.111836Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:51:03.135595Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:51:03.136117Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:51:03.136350Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:51:03.144170Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:51:03.144519Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:51:03.145965Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:03.147088Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:51:03.154165Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:03.154966Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:51:03.160564Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:51:03.160637Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:03.160781Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:51:03.160838Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:51:03.160886Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:51:03.160971Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:51:03.171018Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:51:03.288718Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:51:03.288986Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:03.289205Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:51:03.289270Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:51:03.289513Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:51:03.289589Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:51:03.292066Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:03.292257Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:51:03.292479Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:03.292542Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:51:03.292617Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:51:03.292665Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:51:03.294916Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:03.295009Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:51:03.295070Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:51:03.297086Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:03.297150Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:03.297220Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:03.297293Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:51:03.300750Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:51:03.302485Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:51:03.302795Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:51:03.304009Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:03.304160Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:51:03.304224Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:03.305943Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:51:03.306010Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:03.306393Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:51:03.306494Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:51:03.310196Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:51:03.310232Z node 1 :FLAT_TX_SCHEMESHARD ... shard: 72057594046678944 2025-06-25T14:51:29.662309Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:51:29.662377Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:51:29.662524Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:51:29.662649Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:51:29.662710Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:51:29.663522Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:51:29.663604Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:51:29.663664Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:51:29.663708Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:51:29.663754Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:51:29.663804Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:51:29.663882Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:51:29.663927Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:51:29.666056Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:51:29.666198Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:51:29.666287Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:51:29.666365Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:51:29.666478Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:51:29.666525Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 101:0 ProgressState 2025-06-25T14:51:29.666656Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#101:0 progress is 1/1 2025-06-25T14:51:29.666698Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-25T14:51:29.666740Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#101:0 progress is 1/1 2025-06-25T14:51:29.666776Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-25T14:51:29.666821Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 101, ready parts: 1/1, is published: true 2025-06-25T14:51:29.666894Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1656: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [4:2718:3917] message: TxId: 101 2025-06-25T14:51:29.666952Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-25T14:51:29.667041Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 101:0 2025-06-25T14:51:29.667081Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 101:0 2025-06-25T14:51:29.668272Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 66 2025-06-25T14:51:29.670575Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-25T14:51:29.670625Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [4:2719:3918] TestWaitNotification: OK eventTxId 101 2025-06-25T14:51:29.671048Z node 4 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/TTLEnabledTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:51:29.671250Z node 4 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/TTLEnabledTable" took 224us result status StatusSuccess 2025-06-25T14:51:29.671730Z node 4 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/TTLEnabledTable" PathDescription { Self { Name: "TTLEnabledTable" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeColumnTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 ColumnTableVersion: 1 ColumnTableSchemaVersion: 1 ColumnTableTtlSettingsVersion: 1 } ChildrenExist: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 64 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } ColumnTableDescription { Name: "TTLEnabledTable" Schema { Columns { Id: 1 Name: "key" Type: "Uint64" TypeId: 4 NotNull: true StorageId: "" DefaultValue { } ColumnFamilyId: 0 } Columns { Id: 2 Name: "modified_at" Type: "Uint64" TypeId: 4 NotNull: true StorageId: "" DefaultValue { } ColumnFamilyId: 0 } KeyColumnNames: "modified_at" NextColumnId: 3 Version: 1 Options { SchemeNeedActualization: false } ColumnFamilies { Id: 0 Name: "default" } NextColumnFamilyId: 1 } TtlSettings { Enabled { ColumnName: "modified_at" ExpireAfterSeconds: 3600 ColumnUnit: UNIT_SECONDS Tiers { ApplyAfterSeconds: 3600 Delete { } } } Version: 1 } ColumnShardCount: 64 Sharding { ColumnShards: 72075186233409546 ColumnShards: 72075186233409547 ColumnShards: 72075186233409548 ColumnShards: 72075186233409549 ColumnShards: 72075186233409550 ColumnShards: 72075186233409551 ColumnShards: 72075186233409552 ColumnShards: 72075186233409553 ColumnShards: 72075186233409554 ColumnShards: 72075186233409555 ColumnShards: 72075186233409556 ColumnShards: 72075186233409557 ColumnShards: 72075186233409558 ColumnShards: 72075186233409559 ColumnShards: 72075186233409560 ColumnShards: 72075186233409561 ColumnShards: 72075186233409562 ColumnShards: 72075186233409563 ColumnShards: 72075186233409564 ColumnShards: 72075186233409565 ColumnShards: 72075186233409566 ColumnShards: 72075186233409567 ColumnShards: 72075186233409568 ColumnShards: 72075186233409569 ColumnShards: 72075186233409570 ColumnShards: 72075186233409571 ColumnShards: 72075186233409572 ColumnShards: 72075186233409573 ColumnShards: 72075186233409574 ColumnShards: 72075186233409575 ColumnShards: 72075186233409576 ColumnShards: 72075186233409577 ColumnShards: 72075186233409578 ColumnShards: 72075186233409579 ColumnShards: 72075186233409580 ColumnShards: 72075186233409581 ColumnShards: 72075186233409582 ColumnShards: 72075186233409583 ColumnShards: 72075186233409584 ColumnShards: 72075186233409585 ColumnShards: 72075186233409586 ColumnShards: 72075186233409587 ColumnShards: 72075186233409588 ColumnShards: 72075186233409589 ColumnShards: 72075186233409590 ColumnShards: 72075186233409591 ColumnShards: 72075186233409592 ColumnShards: 72075186233409593 ColumnShards: 72075186233409594 ColumnShards: 72075186233409595 ColumnShards: 72075186233409596 ColumnShards: 72075186233409597 ColumnShards: 72075186233409598 ColumnShards: 72075186233409599 ColumnShards: 72075186233409600 ColumnShards: 72075186233409601 ColumnShards: 72075186233409602 ColumnShards: 72075186233409603 ColumnShards: 72075186233409604 ColumnShards: 72075186233409605 ColumnShards: 72075186233409606 ColumnShards: 72075186233409607 ColumnShards: 72075186233409608 ColumnShards: 72075186233409609 HashSharding { Function: HASH_FUNCTION_CONSISTENCY_64 Columns: "modified_at" } } StorageConfig { DataChannelCount: 64 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TPersQueueTest::SetupLockSession2 >> KqpRm::SingleTask [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/rm_service/ut/unittest >> KqpRm::SingleTask [GOOD] Test command err: 2025-06-25T14:51:31.427723Z node 2 :BS_PDISK WARN: {BPD01@blobstorage_pdisk_blockdevice_async.cpp:922} Warning# got EIoResult::FileOpenError from IoContext->Setup PDiskId# 1000 2025-06-25T14:51:31.428385Z node 2 :BS_PDISK CRIT: {BPD39@blobstorage_pdisk_impl.cpp:2897} BlockDevice initialization error Details# Can't open file "/home/runner/.ya/build/build_root/yft8/001a55/r3tmp/tmpR9fXpg/pdisk_1.dat": unknown reason, errno# 0. PDiskId# 1000 2025-06-25T14:51:31.429012Z node 2 :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:300} PDiskId# 1000 bootstrapped to the StateError, reason# Can't open file "/home/runner/.ya/build/build_root/yft8/001a55/r3tmp/tmpR9fXpg/pdisk_1.dat": unknown reason, errno# 0. Can not be initialized Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/yft8/001a55/r3tmp/tmpR9fXpg/pdisk_1.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 6229157829635485673 PDiskId# 1000 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 2 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 1000 2025-06-25T14:51:31.471945Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:1115: TResourceBrokerActor bootstrap 2025-06-25T14:51:31.472211Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:1115: TResourceBrokerActor bootstrap 2025-06-25T14:51:31.489400Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:599: Start KqpResourceManagerActor at [2:460:2102] with ResourceBroker at [2:431:2101] 2025-06-25T14:51:31.489526Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:121: Start KqpResourceInfoExchangerActor at [2:461:2103] 2025-06-25T14:51:31.489640Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:599: Start KqpResourceManagerActor at [1:459:2338] with ResourceBroker at [1:430:2319] 2025-06-25T14:51:31.489695Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:121: Start KqpResourceInfoExchangerActor at [1:462:2339] 2025-06-25T14:51:31.489804Z node 2 :KQP_RESOURCE_MANAGER CRIT: kqp_rm_service.cpp:796: Failed to deliver subscription request to config dispatcher 2025-06-25T14:51:31.489837Z node 2 :KQP_RESOURCE_MANAGER CRIT: kqp_resource_info_exchanger.cpp:411: Failed to deliver subscription request to config dispatcher. 2025-06-25T14:51:31.489859Z node 1 :KQP_RESOURCE_MANAGER CRIT: kqp_rm_service.cpp:796: Failed to deliver subscription request to config dispatcher 2025-06-25T14:51:31.489873Z node 1 :KQP_RESOURCE_MANAGER CRIT: kqp_resource_info_exchanger.cpp:411: Failed to deliver subscription request to config dispatcher. 2025-06-25T14:51:31.490026Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:913: Don't set KqpProxySharedResources 2025-06-25T14:51:31.504657Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:929: Send to publish resource usage for reason: data_center update, payload: NodeId: 2 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372045444738657 } Timestamp: 1750863091 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2025-06-25T14:51:31.504877Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:913: Don't set KqpProxySharedResources 2025-06-25T14:51:31.504953Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:929: Send to publish resource usage for reason: data_center update, payload: NodeId: 1 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372041149771361 } Timestamp: 1750863091 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2025-06-25T14:51:31.505317Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_resource_info_exchanger.cpp:465: Received tenant pool status for exchanger, serving tenant: /dc-1, board: kqpexch+/dc-1 2025-06-25T14:51:31.505457Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_resource_info_exchanger.cpp:465: Received tenant pool status for exchanger, serving tenant: /dc-1, board: kqpexch+/dc-1 2025-06-25T14:51:31.505624Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:753: Received tenant pool status, serving tenant: /dc-1, board: kqprm+/dc-1 2025-06-25T14:51:31.505661Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:913: Don't set KqpProxySharedResources 2025-06-25T14:51:31.505761Z node 2 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:929: Send to publish resource usage for reason: tenant updated, payload: NodeId: 2 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372045444738657 } Timestamp: 1750863091 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2025-06-25T14:51:31.505994Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:753: Received tenant pool status, serving tenant: /dc-1, board: kqprm+/dc-1 2025-06-25T14:51:31.506021Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:913: Don't set KqpProxySharedResources 2025-06-25T14:51:31.506105Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:929: Send to publish resource usage for reason: tenant updated, payload: NodeId: 1 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372041149771361 } Timestamp: 1750863091 AvailableComputeActors: 100 UsedMemory: 0 TotalMemory: 1000 Memory { Pool: 1 Available: 1000 } ExecutionUnits: 100 2025-06-25T14:51:31.506830Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:479: Get board info from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2025-06-25T14:51:31.507102Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:501: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2025-06-25T14:51:31.507562Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:479: Get board info from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2025-06-25T14:51:31.507793Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:501: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2025-06-25T14:51:31.507888Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:501: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2025-06-25T14:51:31.508121Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:526: Get resources info from node: 2 2025-06-25T14:51:31.508260Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:501: Get board info update from subscriber, serving tenant: /dc-1, board: kqpexch+/dc-1, with size: 1 2025-06-25T14:51:31.508377Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:526: Get resources info from node: 1 2025-06-25T14:51:31.508527Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:526: Get resources info from node: 2 2025-06-25T14:51:31.508608Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:526: Get resources info from node: 1 2025-06-25T14:51:31.511098Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new kqp_query task kqp-1-2-1 (1 by [1:459:2338]) priority=0 resources={0, 100} 2025-06-25T14:51:31.511163Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task kqp-1-2-1 (1 by [1:459:2338]) to queue queue_kqp_resource_manager 2025-06-25T14:51:31.511230Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {0, 100} for task kqp-1-2-1 (1 by [1:459:2338]) from queue queue_kqp_resource_manager 2025-06-25T14:51:31.511274Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task kqp-1-2-1 (1 by [1:459:2338]) to queue queue_kqp_resource_manager 2025-06-25T14:51:31.511316Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_kqp_resource_manager from 0.000000 to 0.250000 (insert task kqp-1-2-1 (1 by [1:459:2338])) 2025-06-25T14:51:31.511532Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:351: TxId: 1, taskId: 2. Allocated TKqpResourcesRequest{ MemoryPool: 1, Memory: 100ExternalMemory: 0 } 2025-06-25T14:51:31.511678Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:528: Finish task kqp-1-2-1 (1 by [1:459:2338]) (release resources {0, 100}) 2025-06-25T14:51:31.511725Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:577: Updated planned resource usage for queue queue_kqp_resource_manager from 0.250000 to 0.000000 (remove task kqp-1-2-1 (1 by [1:459:2338])) 2025-06-25T14:51:31.511759Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:404: TxId: 1, taskId: 2. Released resources, Memory: 100, Free Tier: 0, ExecutionUnits: 0. >> KqpJoinOrder::CanonizedJoinOrderTPCDS78 [GOOD] |87.0%| [TA] $(B)/ydb/core/kqp/rm_service/ut/test-results/unittest/{meta.json ... results_accumulator.log} |87.0%| [TA] {RESULT} $(B)/ydb/core/kqp/rm_service/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TStorageTenantTest::Boot ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::CanonizedJoinOrderTPCH11 [GOOD] Test command err: Trying to start YDB, gRPC: 10686, MsgBus: 9341 2025-06-25T14:49:53.136519Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899296697659255:2228];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:49:53.141204Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d45/r3tmp/tmpXynauk/pdisk_1.dat 2025-06-25T14:49:53.856548Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:49:53.856629Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:49:53.864992Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:49:53.906154Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:49:53.908740Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519899296697659055:2080] 1750862993071529 != 1750862993071532 TServer::EnableGrpc on GrpcPort 10686, node 1 2025-06-25T14:49:54.188564Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:49:54.189385Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:49:54.189395Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:49:54.189402Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:49:54.189983Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9341 TClient is connected to server localhost:9341 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:49:55.330771Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:49:55.373269Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:49:57.870012Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899313877528888:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:57.870130Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:57.870410Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899313877528900:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:57.874028Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:49:57.884751Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-25T14:49:57.884925Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899313877528902:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:49:57.958501Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899313877528953:2339] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:49:58.124430Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519899296697659255:2228];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:49:58.136732Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:49:58.264527Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T14:49:58.500020Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519899318172496456:2323];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:49:58.500219Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519899318172496456:2323];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:49:58.500431Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519899318172496456:2323];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:49:58.500548Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519899318172496456:2323];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:49:58.500636Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519899318172496456:2323];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:49:58.500731Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519899318172496456:2323];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:49:58.500825Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519899318172496456:2323];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:49:58.500957Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519899318172496456:2323];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:49:58.512488Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519899318172496456:2323];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:49:58.512679Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519899318172496456:2323];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:49:58.512775Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519899318172496456:2323];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:49:58.513604Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519899318172496449:2316];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:49:58.513633Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519899318172496449:2316];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:49:58.513806Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519899318172496449:2316];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:49:58.513904Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519899318172496449:2316];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:49:58.514011Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519899318172496449:2316];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:49:58.514105Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519899318172496449:2316];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:49:58.514207Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519899318172496449:2316];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register ... line=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:14.027630Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039351;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:14.028047Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039349;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:14.028436Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039289;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:14.028902Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039335;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:14.031694Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039349;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:14.032358Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039305;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:14.032875Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039335;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:14.033412Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039187;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:14.037056Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039305;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:14.037662Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039347;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:14.037681Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039187;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:14.038189Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039311;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:14.042139Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039347;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:14.042407Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039311;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:14.042752Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039345;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:14.042977Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039369;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:14.047816Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039345;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:14.047825Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039369;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:14.048435Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039225;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:14.048468Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039339;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:14.052945Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039225;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:14.052962Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039339;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:14.053542Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039277;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:14.053544Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039221;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:14.058245Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039277;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:14.058435Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039221;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:14.058857Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039275;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:14.059049Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039327;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:14.063900Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039275;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:14.063956Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039327;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:14.064964Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039359;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:14.065964Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039223;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:14.069641Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039359;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:14.070287Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039213;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:14.070635Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039223;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:14.071330Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039337;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:14.075504Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039213;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:14.075654Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039337;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:14.076193Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039315;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:14.076304Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039321;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:14.081060Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039321;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:14.081061Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039315;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:14.181028Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyks3rh0byb95apxvtz1fyyy", SessionId: ydb://session/3?node_id=1&id=Y2QxN2YwOTUtZDBmMzdmNjUtYmZmODljNmYtOTU4ZmFiZWY=, Slow query, duration: 29.028021s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:51:14.418514Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:51:14.418514Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:51:14.419155Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; >> TStorageTenantTest::RemoveStoragePoolBeforeDroppingTablet >> TopicService::OneConsumer_TheRangesDoNotOverlap [GOOD] >> TStorageTenantTest::GenericCases >> TStorageTenantTest::LsLs [GOOD] >> TopicService::OneConsumer_TheRangesOverlap ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::CanonizedJoinOrderTPCDS78 [GOOD] Test command err: Trying to start YDB, gRPC: 5728, MsgBus: 1721 2025-06-25T14:49:24.346911Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899174437766524:2220];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:49:24.347309Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d67/r3tmp/tmpEknVtI/pdisk_1.dat 2025-06-25T14:49:24.857208Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:49:24.857299Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:49:24.863024Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:49:24.937080Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:49:24.940227Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519899174437766342:2080] 1750862964309722 != 1750862964309725 TServer::EnableGrpc on GrpcPort 5728, node 1 2025-06-25T14:49:25.152923Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:49:25.152943Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:49:25.152951Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:49:25.153043Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:49:25.300538Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:1721 TClient is connected to server localhost:1721 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:49:26.051923Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:49:27.950711Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899187322668879:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:27.950837Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:27.952504Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899187322668891:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:27.956550Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:49:27.973972Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-25T14:49:27.974820Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899187322668893:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:49:28.045274Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899191617636241:2337] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:49:28.439647Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T14:49:28.716638Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519899191617636498:2322];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:49:28.716816Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519899191617636498:2322];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:49:28.717112Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519899191617636498:2322];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:49:28.717120Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519899191617636489:2313];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:49:28.717187Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519899191617636489:2313];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:49:28.717239Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519899191617636498:2322];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:49:28.717282Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519899191617636489:2313];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:49:28.717353Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519899191617636498:2322];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:49:28.717396Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519899191617636489:2313];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:49:28.717444Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519899191617636498:2322];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:49:28.717468Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519899191617636489:2313];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:49:28.717523Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519899191617636498:2322];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:49:28.717542Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519899191617636489:2313];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:49:28.718034Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519899191617636498:2322];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:49:28.718142Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519899191617636498:2322];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:49:28.718231Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519899191617636498:2322];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:49:28.718359Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519899191617636498:2322];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:49:28.724785Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519899191617636489:2313];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:49:28.724907Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519899191617636489:2313];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:49:28.724990Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519899191617636489:2313];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 20 ... ablet_id=72075186224039255;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:50.298320Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039255;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:50.298379Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039327;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:50.298821Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039295;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:50.298821Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039313;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:50.303699Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039295;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:50.303728Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039313;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:50.304237Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039285;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:50.304238Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039337;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:50.309093Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039285;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:50.309601Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039243;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:50.313357Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039337;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:50.313857Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039297;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:50.318265Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039297;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:50.324004Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039235;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:50.328248Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039235;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:50.328755Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039243;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:50.329183Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039317;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:50.333725Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039317;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:50.334313Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039277;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:50.336103Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039283;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:50.339298Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039277;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:50.339802Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039329;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:50.341687Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039283;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:50.342232Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039301;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:50.344596Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039329;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:50.356722Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039301;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:50.508340Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyks2whj6g2d6ztqfq6v4xfr", SessionId: ydb://session/3?node_id=1&id=OGRkYmU5NmYtY2UyYTNmZTctMWZhZTU2MzMtNGYxNDEzMGE=, Slow query, duration: 34.009229s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:50:50.993684Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:50:50.994166Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:50:50.994745Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;self_id=[1:7519899513740239806:9988];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224039392;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224039094;receive=72075186224038933; 2025-06-25T14:50:50.995136Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:51:22.241996Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyks4cs55p9n1eam6k68gk51", SessionId: ydb://session/3?node_id=1&id=OGRkYmU5NmYtY2UyYTNmZTctMWZhZTU2MzMtNGYxNDEzMGE=, Slow query, duration: 16.344897s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "PRAGMA TablePathPrefix='/Root/test/ds';\n\n-- NB: Subquerys\n\n$ws =\n\n (select date_dim.d_year AS ws_sold_year, web_sales.ws_item_sk ws_item_sk,\n\n web_sales.ws_bill_customer_sk ws_customer_sk,\n\n sum(ws_quantity) ws_qty,\n\n sum(ws_wholesale_cost) ws_wc,\n\n sum(ws_sales_price) ws_sp\n\n from web_sales as web_sales\n\n left join web_returns as web_returns on web_returns.wr_order_number=web_sales.ws_order_number and web_sales.ws_item_sk=web_returns.wr_item_sk\n\n join date_dim as date_dim on web_sales.ws_sold_date_sk = date_dim.d_date_sk\n\n where wr_order_number is null\n\n group by date_dim.d_year, web_sales.ws_item_sk, web_sales.ws_bill_customer_sk\n\n );\n\n$cs =\n\n (select date_dim.d_year AS cs_sold_year, catalog_sales.cs_item_sk cs_item_sk,\n\n catalog_sales.cs_bill_customer_sk cs_customer_sk,\n\n sum(cs_quantity) cs_qty,\n\n sum(cs_wholesale_cost) cs_wc,\n\n sum(cs_sales_price) cs_sp\n\n from catalog_sales as catalog_sales\n\n left join catalog_returns as catalog_returns on catalog_returns.cr_order_number=catalog_sales.cs_order_number and catalog_sales.cs_item_sk=catalog_returns.cr_item_sk\n\n join date_dim as date_dim on catalog_sales.cs_sold_date_sk = date_dim.d_date_sk\n\n where cr_order_number is null\n\n group by date_dim.d_year, catalog_sales.cs_item_sk, catalog_sales.cs_bill_customer_sk\n\n );\n\n$ss=\n\n (select date_dim.d_year AS ss_sold_year, store_sales.ss_item_sk ss_item_sk,\n\n store_sales.ss_customer_sk ss_customer_sk,\n\n sum(ss_quantity) ss_qty,\n\n sum(ss_wholesale_cost) ss_wc,\n\n sum(ss_sales_price) ss_sp\n\n from store_sales as store_sales\n\n left join store_returns as store_returns on store_returns.sr_ticket_number=store_sales.ss_ticket_number and store_sales.ss_item_sk=store_returns.sr_item_sk\n\n join date_dim as date_dim on store_sales.ss_sold_date_sk = date_dim.d_date_sk\n\n where sr_ticket_number is null\n\n group by date_dim.d_year, store_sales.ss_item_sk, store_sales.ss_customer_sk\n\n );\n\n-- start query 1 in stream 0 using template query78.tpl and seed 1819994127\n\n select\n\nss_sold_year, ss_item_sk, ss_customer_sk,\n\ncast(ss_qty as double)/(coalesce(ws_qty,0)+coalesce(cs_qty,0)) ratio,\n\nss_qty store_qty, ss_wc store_wholesale_cost, ss_sp store_sales_price,\n\ncoalesce(ws_qty,0)+coalesce(cs_qty,0) other_chan_qty,\n\ncoalesce(ws_wc,0)+coalesce(cs_wc,0) other_chan_wholesale_cost,\n\ncoalesce(ws_sp,0)+coalesce(cs_sp,0) other_chan_sales_price\n\nfrom $ss ss\n\nleft join $ws ws on (ws.ws_sold_year=ss.ss_sold_year and ws.ws_item_sk=ss.ss_item_sk and ws.ws_customer_sk=ss.ss_customer_sk)\n\nleft join $cs cs on (cs.cs_sold_year=ss.ss_sold_year and cs.cs_item_sk=ss.ss_item_sk and cs.cs_customer_sk=ss.ss_customer_sk)\n\nwhere (coalesce(ws_qty,0)>0 or coalesce(cs_qty, 0)>0) and ss_sold_year=2001\n\norder by\n\n ss_sold_year, ss_item_sk, ss_customer_sk,\n\n store_qty desc, store_wholesale_cost desc, store_sales_price desc,\n\n other_chan_qty,\n\n other_chan_wholesale_cost,\n\n other_chan_sales_price,\n\n ratio\n\nlimit 100;\n\n\n\n-- end query 1 in stream 0 using template query78.tpl", parameters: 0b >> TStorageTenantTest::DeclareAndDefine [GOOD] >> TStorageTenantTest::CreateSolomonInsideSubDomain [GOOD] >> TStorageTenantTest::CreateTableInsideSubDomain [GOOD] >> TSchemeShardTTLTests::CheckCounters [GOOD] >> TStorageTenantTest::Boot [GOOD] >> TStorageTenantTest::CopyTableAndConcurrentSplit ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_storage_tenant/unittest >> TStorageTenantTest::LsLs [GOOD] Test command err: 2025-06-25T14:51:31.133172Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899719601779494:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:51:31.133225Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000b7c/r3tmp/tmpgGa3VK/pdisk_1.dat 2025-06-25T14:51:31.398646Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:51:31.517082Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:51:31.517186Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:51:31.523784Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:51:31.543173Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:51:31.543270Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:51:31.550362Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T14:51:31.550598Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:51:31.551382Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:20440 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-25T14:51:31.760101Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7519899719601779687:2144] Handle TEvNavigate describe path dc-1 2025-06-25T14:51:31.780809Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7519899719601780121:2441] HANDLE EvNavigateScheme dc-1 2025-06-25T14:51:31.780921Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7519899719601779710:2157], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:51:31.780944Z node 1 :TX_PROXY_SCHEME_CACHE TRACE: cache.cpp:2321: Create subscriber: self# [1:7519899719601779710:2157], path# /dc-1, domainOwnerId# 72057594046644480 2025-06-25T14:51:31.781110Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1003: [main][1:7519899719601780122:2442][/dc-1] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-25T14:51:31.782684Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519899719601779361:2052] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7519899719601780126:2442] 2025-06-25T14:51:31.782764Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519899719601779361:2052] Subscribe: subscriber# [1:7519899719601780126:2442], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-25T14:51:31.782824Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519899719601779364:2055] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7519899719601780127:2442] 2025-06-25T14:51:31.782852Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519899719601779364:2055] Subscribe: subscriber# [1:7519899719601780127:2442], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-25T14:51:31.782886Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519899719601779367:2058] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7519899719601780128:2442] 2025-06-25T14:51:31.782903Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519899719601779367:2058] Subscribe: subscriber# [1:7519899719601780128:2442], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-25T14:51:31.782942Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519899719601780126:2442][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519899719601779361:2052] 2025-06-25T14:51:31.782962Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519899719601780127:2442][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519899719601779364:2055] 2025-06-25T14:51:31.782986Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519899719601780128:2442][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519899719601779367:2058] 2025-06-25T14:51:31.783044Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519899719601780122:2442][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519899719601780123:2442] 2025-06-25T14:51:31.783100Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519899719601780122:2442][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519899719601780124:2442] 2025-06-25T14:51:31.783163Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:852: [main][1:7519899719601780122:2442][/dc-1] Set up state: owner# [1:7519899719601779710:2157], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-25T14:51:31.783244Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519899719601780122:2442][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519899719601780125:2442] 2025-06-25T14:51:31.783273Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:870: [main][1:7519899719601780122:2442][/dc-1] Path was already updated: owner# [1:7519899719601779710:2157], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-25T14:51:31.783304Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519899719601780126:2442][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519899719601780123:2442], cookie# 1 2025-06-25T14:51:31.783317Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519899719601780127:2442][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519899719601780124:2442], cookie# 1 2025-06-25T14:51:31.783343Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519899719601780128:2442][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519899719601780125:2442], cookie# 1 2025-06-25T14:51:31.783366Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519899719601779361:2052] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7519899719601780126:2442] 2025-06-25T14:51:31.783394Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519899719601779361:2052] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519899719601780126:2442], cookie# 1 2025-06-25T14:51:31.783437Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519899719601779364:2055] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7519899719601780127:2442] 2025-06-25T14:51:31.783449Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519899719601779364:2055] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519899719601780127:2442], cookie# 1 2025-06-25T14:51:31.783465Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519899719601779367:2058] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7519899719601780128:2442] 2025-06-25T14:51:31.783473Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519899719601779367:2058] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519899719601780128:2442], cookie# 1 2025-06-25T14:51:31.784367Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519899719601780126:2442][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519899719601779361:2052], cookie# 1 2025-06-25T14:51:31.784384Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519899719601780127:2442][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519899719601779364:2055], cookie# 1 2025-06-25T14:51:31.784411Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519899719601780128:2442][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519899719601779367:2058], cookie# 1 2025-06-25T14:51:31.784451Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519899719601780122:2442][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519899719601780123:2442], cookie# 1 2025-06-25T14:51:31.784475Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519899719601780122:2442][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2025-06-25T14:51:31.784486Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519899719601780122:2442][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519899719601780124:2442], cookie# 1 2025-06-25T14:51:31.784494Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519899719601780122:2442][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 2, failures# 0 2025-06-25T14:51:31.784502Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519899719601780122:2442][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519899719601780125:2442], cookie# 1 2025-06-25T14:51:31.784522Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:984: [main][1:7519899719601780122:2442][/dc-1] Sync is done in the ring group: cookie# 1, ring group# 0, size# 3, half# 1, successes# 3, failures# 0, partial# 0 2025-06-25T14:51:31.823765Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [1:7519899719601779710:2157], notify# NKikimr::TSchemeBoardEvents::TEvNotifyUpdate { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DescribeSchemeResult: Status: StatusSuccess Path: "/dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPath ... ookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:51:34.336849Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [2:7519899730372752058:2124], recipient# [2:7519899730372752038:2267], result# { ErrorCount: 2 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo },{ Path: dc-1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo }] } 2025-06-25T14:51:34.337197Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:7519899730372752038:2267], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:51:34.411873Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [2:7519899717487850094:2109], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:51:34.411972Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [2:7519899717487850094:2109], cacheItem# { Subscriber: { Subscriber: [2:7519899721782817415:2115] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:51:34.412029Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [2:7519899730372752060:2125], recipient# [2:7519899730372752059:2268], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo }] } 2025-06-25T14:51:34.412261Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:51:34.709693Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [2:7519899717487850094:2109], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo },{ Path: dc-1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:51:34.709816Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [2:7519899717487850094:2109], cacheItem# { Subscriber: { Subscriber: [2:7519899730372752041:2120] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:51:34.709860Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [2:7519899717487850094:2109], cacheItem# { Subscriber: { Subscriber: [2:7519899730372752042:2121] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:51:34.709950Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [2:7519899730372752062:2126], recipient# [2:7519899730372752038:2267], result# { ErrorCount: 2 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo },{ Path: dc-1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo }] } 2025-06-25T14:51:34.710090Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:7519899730372752038:2267], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:51:35.096974Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [2:7519899717487850094:2109], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:51:35.097104Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [2:7519899717487850094:2109], cacheItem# { Subscriber: { Subscriber: [2:7519899730372752021:2118] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:51:35.097190Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [2:7519899734667719361:2127], recipient# [2:7519899734667719360:2269], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo }] } 2025-06-25T14:51:35.097421Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:51:35.167761Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [2:7519899717487850094:2109], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo },{ Path: dc-1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:51:35.167905Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [2:7519899717487850094:2109], cacheItem# { Subscriber: { Subscriber: [2:7519899730372752041:2120] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:51:35.167955Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [2:7519899717487850094:2109], cacheItem# { Subscriber: { Subscriber: [2:7519899730372752042:2121] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:51:35.168047Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [2:7519899734667719362:2128], recipient# [2:7519899730372752038:2267], result# { ErrorCount: 2 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo },{ Path: dc-1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo }] } 2025-06-25T14:51:35.168412Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:7519899730372752038:2267], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } >> TStorageTenantTest::CreateTableInsideSubDomain2 [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTests::CheckCounters [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:51:08.555020Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:51:08.555087Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:08.555114Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:51:08.555139Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:51:08.555169Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:51:08.555189Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:51:08.555223Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:08.555284Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:51:08.555846Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:51:08.556098Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:51:08.626450Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:51:08.626497Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:51:08.641923Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:51:08.642044Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:51:08.642165Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:51:08.657109Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:51:08.657334Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:51:08.657930Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:08.658202Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:51:08.659984Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:08.660145Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:51:08.661318Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:51:08.661375Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:08.661513Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:51:08.661556Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:51:08.661623Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:51:08.661741Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:51:08.666139Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:243:2058] recipient: [1:15:2062] 2025-06-25T14:51:08.792651Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:51:08.792961Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:08.793183Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:51:08.793238Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:51:08.793436Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:51:08.793499Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:51:08.796265Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:08.796465Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:51:08.796636Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:08.796702Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:51:08.796745Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:51:08.796778Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:51:08.799631Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:08.799701Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:51:08.799753Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:51:08.801313Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:08.801353Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:08.801446Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:08.801491Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:51:08.805039Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:51:08.806836Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:51:08.806985Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:51:08.807829Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:08.807942Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 138 RawX2: 4294969455 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:51:08.808023Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:08.808301Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:51:08.808376Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:08.808526Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:51:08.808595Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:51:08.811994Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:51:08.812038Z node 1 :FLAT_TX_SCHEMESHARD D ... l.cpp:515: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-25T14:51:35.512239Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 107: got EvNotifyTxCompletionResult 2025-06-25T14:51:35.512296Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 107: satisfy waiter [1:1346:3243] 2025-06-25T14:51:35.512622Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 107 Name: "SchemeShard/NumShardsByTtlLag" Ranges: "0" Ranges: "900" Ranges: "1800" Ranges: "3600" Ranges: "7200" Ranges: "14400" Ranges: "28800" Ranges: "57600" Ranges: "86400" Ranges: "inf" Values: 0 Values: 2 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 2025-06-25T14:51:35.626679Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046678944 from shard 72075186233409548 followerId 0 pathId [OwnerId: 72057594046678944, LocalPathId: 4] state 'Ready' dataSize 0 rowCount 0 cpuUsage 0.0001 2025-06-25T14:51:35.626932Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046678944 from shard 72075186233409549 followerId 0 pathId [OwnerId: 72057594046678944, LocalPathId: 4] state 'Ready' dataSize 0 rowCount 0 cpuUsage 0.0001 2025-06-25T14:51:35.627414Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 4 shard idx 72057594046678944:3 data size 0 row count 0 2025-06-25T14:51:35.627517Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186233409548 maps to shardIdx: 72057594046678944:3 followerId=0, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], pathId map=TTLEnabledTableMoved, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-25T14:51:35.627632Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186233409548 2025-06-25T14:51:35.627906Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 4 shard idx 72057594046678944:4 data size 0 row count 0 2025-06-25T14:51:35.627957Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186233409549 maps to shardIdx: 72057594046678944:4 followerId=0, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], pathId map=TTLEnabledTableMoved, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-25T14:51:35.628003Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186233409549 Name: "SchemeShard/NumShardsByTtlLag" Ranges: "0" Ranges: "900" Ranges: "1800" Ranges: "3600" Ranges: "7200" Ranges: "14400" Ranges: "28800" Ranges: "57600" Ranges: "86400" Ranges: "inf" Values: 0 Values: 0 Values: 2 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 2025-06-25T14:51:35.724287Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6716: Handle: TEvRunConditionalErase, at schemeshard: 72057594046678944 2025-06-25T14:51:35.724423Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:56: TTxRunConditionalErase DoExecute: at schemeshard: 72057594046678944 2025-06-25T14:51:35.724535Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:189: TTxRunConditionalErase DoComplete: at schemeshard: 72057594046678944 2025-06-25T14:51:35.724683Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__conditional_erase.cpp:213: Run conditional erase, tabletId: 72075186233409549, request: TableId: 4 Expiration { ColumnId: 2 WallClockTimestamp: 1750876358999291 ColumnUnit: UNIT_AUTO } SchemaVersion: 4 Limits { BatchMaxBytes: 512000 BatchMinKeys: 1 BatchMaxKeys: 256 }, at schemeshard: 72057594046678944 2025-06-25T14:51:35.725192Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__conditional_erase.cpp:213: Run conditional erase, tabletId: 72075186233409548, request: TableId: 4 Expiration { ColumnId: 2 WallClockTimestamp: 1750876358999291 ColumnUnit: UNIT_AUTO } SchemaVersion: 4 Limits { BatchMaxBytes: 512000 BatchMinKeys: 1 BatchMaxKeys: 256 }, at schemeshard: 72057594046678944 2025-06-25T14:51:35.725919Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6744: Conditional erase accepted: tabletId: 72075186233409548, at schemeshard: 72057594046678944 2025-06-25T14:51:35.726293Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6744: Conditional erase accepted: tabletId: 72075186233409549, at schemeshard: 72057594046678944 2025-06-25T14:51:35.727078Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:347: TTxScheduleConditionalErase Execute: at schemeshard: 72057594046678944 2025-06-25T14:51:35.727128Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:397: Successful conditional erase: tabletId: 72075186233409548, at schemeshard: 72057594046678944 2025-06-25T14:51:35.727478Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:347: TTxScheduleConditionalErase Execute: at schemeshard: 72057594046678944 2025-06-25T14:51:35.727502Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:397: Successful conditional erase: tabletId: 72075186233409549, at schemeshard: 72057594046678944 2025-06-25T14:51:35.741133Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:453: TTxScheduleConditionalErase Complete: at schemeshard: 72057594046678944 2025-06-25T14:51:35.741264Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:56: TTxRunConditionalErase DoExecute: at schemeshard: 72057594046678944 2025-06-25T14:51:35.741307Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__conditional_erase.cpp:106: Skip conditional erase: shardIdx: 72057594046678944:3, run at: 2025-06-25T19:32:38.999291Z, at schemeshard: 72057594046678944 2025-06-25T14:51:35.741404Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:453: TTxScheduleConditionalErase Complete: at schemeshard: 72057594046678944 2025-06-25T14:51:35.741444Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:189: TTxRunConditionalErase DoComplete: at schemeshard: 72057594046678944 2025-06-25T14:51:35.741490Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:56: TTxRunConditionalErase DoExecute: at schemeshard: 72057594046678944 2025-06-25T14:51:35.741512Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__conditional_erase.cpp:106: Skip conditional erase: shardIdx: 72057594046678944:3, run at: 2025-06-25T19:32:38.999291Z, at schemeshard: 72057594046678944 2025-06-25T14:51:35.741544Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:189: TTxRunConditionalErase DoComplete: at schemeshard: 72057594046678944 2025-06-25T14:51:35.764239Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046678944, queue size# 0 2025-06-25T14:51:35.822306Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046678944 from shard 72075186233409548 followerId 0 pathId [OwnerId: 72057594046678944, LocalPathId: 4] state 'Ready' dataSize 0 rowCount 0 cpuUsage 0 2025-06-25T14:51:35.822428Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046678944 from shard 72075186233409549 followerId 0 pathId [OwnerId: 72057594046678944, LocalPathId: 4] state 'Ready' dataSize 0 rowCount 0 cpuUsage 0 2025-06-25T14:51:35.822482Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 4 shard idx 72057594046678944:3 data size 0 row count 0 2025-06-25T14:51:35.822553Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186233409548 maps to shardIdx: 72057594046678944:3 followerId=0, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], pathId map=TTLEnabledTableMoved, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-25T14:51:35.822641Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186233409548 2025-06-25T14:51:35.822785Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 4 shard idx 72057594046678944:4 data size 0 row count 0 2025-06-25T14:51:35.822825Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186233409549 maps to shardIdx: 72057594046678944:4 followerId=0, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], pathId map=TTLEnabledTableMoved, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-25T14:51:35.822863Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186233409549 Name: "SchemeShard/NumShardsByTtlLag" Ranges: "0" Ranges: "900" Ranges: "1800" Ranges: "3600" Ranges: "7200" Ranges: "14400" Ranges: "28800" Ranges: "57600" Ranges: "86400" Ranges: "inf" Values: 2 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 2025-06-25T14:51:35.856500Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046678944, queue size# 0 2025-06-25T14:51:35.912207Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046678944 from shard 72075186233409548 followerId 0 pathId [OwnerId: 72057594046678944, LocalPathId: 4] state 'Ready' dataSize 0 rowCount 0 cpuUsage 0 2025-06-25T14:51:35.912365Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046678944 from shard 72075186233409549 followerId 0 pathId [OwnerId: 72057594046678944, LocalPathId: 4] state 'Ready' dataSize 0 rowCount 0 cpuUsage 0 2025-06-25T14:51:35.912421Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 4 shard idx 72057594046678944:3 data size 0 row count 0 2025-06-25T14:51:35.912500Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186233409548 maps to shardIdx: 72057594046678944:3 followerId=0, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], pathId map=TTLEnabledTableMoved, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-25T14:51:35.912593Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186233409548 2025-06-25T14:51:35.912746Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 4 shard idx 72057594046678944:4 data size 0 row count 0 2025-06-25T14:51:35.912784Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186233409549 maps to shardIdx: 72057594046678944:4 followerId=0, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], pathId map=TTLEnabledTableMoved, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-25T14:51:35.912826Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186233409549 Name: "SchemeShard/NumShardsByTtlLag" Ranges: "0" Ranges: "900" Ranges: "1800" Ranges: "3600" Ranges: "7200" Ranges: "14400" Ranges: "28800" Ranges: "57600" Ranges: "86400" Ranges: "inf" Values: 0 Values: 2 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_storage_tenant/unittest >> TStorageTenantTest::DeclareAndDefine [GOOD] Test command err: 2025-06-25T14:51:31.082504Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899715678066004:2076];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:51:31.082599Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000b68/r3tmp/tmpHOt3mk/pdisk_1.dat 2025-06-25T14:51:31.476237Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:51:31.521388Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:51:31.521465Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:51:31.533441Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:22772 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-25T14:51:31.660130Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7519899715678066201:2125] Handle TEvNavigate describe path dc-1 2025-06-25T14:51:31.677589Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7519899715678066651:2434] HANDLE EvNavigateScheme dc-1 2025-06-25T14:51:31.677707Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7519899715678066262:2158], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:51:31.677737Z node 1 :TX_PROXY_SCHEME_CACHE TRACE: cache.cpp:2321: Create subscriber: self# [1:7519899715678066262:2158], path# /dc-1, domainOwnerId# 72057594046644480 2025-06-25T14:51:31.677886Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1003: [main][1:7519899715678066652:2435][/dc-1] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-25T14:51:31.683377Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519899715678065901:2050] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7519899715678066656:2435] 2025-06-25T14:51:31.683390Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519899715678065904:2053] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7519899715678066657:2435] 2025-06-25T14:51:31.683463Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519899715678065904:2053] Subscribe: subscriber# [1:7519899715678066657:2435], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-25T14:51:31.683463Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519899715678065901:2050] Subscribe: subscriber# [1:7519899715678066656:2435], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-25T14:51:31.683513Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519899715678065907:2056] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7519899715678066658:2435] 2025-06-25T14:51:31.683527Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519899715678065907:2056] Subscribe: subscriber# [1:7519899715678066658:2435], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-25T14:51:31.683561Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519899715678066656:2435][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519899715678065901:2050] 2025-06-25T14:51:31.683587Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519899715678066657:2435][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519899715678065904:2053] 2025-06-25T14:51:31.683610Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519899715678065901:2050] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7519899715678066656:2435] 2025-06-25T14:51:31.683613Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519899715678066658:2435][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519899715678065907:2056] 2025-06-25T14:51:31.683627Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519899715678065904:2053] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7519899715678066657:2435] 2025-06-25T14:51:31.683636Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519899715678065907:2056] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7519899715678066658:2435] 2025-06-25T14:51:31.683652Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519899715678066652:2435][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519899715678066653:2435] 2025-06-25T14:51:31.683701Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519899715678066652:2435][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519899715678066654:2435] 2025-06-25T14:51:31.683750Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:852: [main][1:7519899715678066652:2435][/dc-1] Set up state: owner# [1:7519899715678066262:2158], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-25T14:51:31.683881Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519899715678066652:2435][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519899715678066655:2435] 2025-06-25T14:51:31.683952Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:870: [main][1:7519899715678066652:2435][/dc-1] Path was already updated: owner# [1:7519899715678066262:2158], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-25T14:51:31.683996Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519899715678066656:2435][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519899715678066653:2435], cookie# 1 2025-06-25T14:51:31.684010Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519899715678066657:2435][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519899715678066654:2435], cookie# 1 2025-06-25T14:51:31.684029Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519899715678066658:2435][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519899715678066655:2435], cookie# 1 2025-06-25T14:51:31.684052Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519899715678065904:2053] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519899715678066657:2435], cookie# 1 2025-06-25T14:51:31.684070Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519899715678065907:2056] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519899715678066658:2435], cookie# 1 2025-06-25T14:51:31.684096Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519899715678066657:2435][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519899715678065904:2053], cookie# 1 2025-06-25T14:51:31.684109Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519899715678066658:2435][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519899715678065907:2056], cookie# 1 2025-06-25T14:51:31.684134Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519899715678066652:2435][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519899715678066654:2435], cookie# 1 2025-06-25T14:51:31.684160Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519899715678066652:2435][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2025-06-25T14:51:31.684176Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519899715678066652:2435][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519899715678066655:2435], cookie# 1 2025-06-25T14:51:31.684187Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519899715678066652:2435][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 2, failures# 0 2025-06-25T14:51:31.684202Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519899715678065901:2050] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519899715678066656:2435], cookie# 1 2025-06-25T14:51:31.684217Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519899715678066656:2435][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519899715678065901:2050], cookie# 1 2025-06-25T14:51:31.684250Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519899715678066652:2435][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519899715678066653:2435], cookie# 1 2025-06-25T14:51:31.684271Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:984: [main][1:7519899715678066652:2435][/dc-1] Sync is done in the ring group: cookie# 1, ring group# 0, size# 3, half# 1, successes# 3, failures# 0, partial# 0 2025-06-25T14:51:31.735447Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [1:7519899715678066262:2158], notify# NKikimr::TSchemeBoardEvents::TEvNotifyUpdate { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DescribeSchemeResult: Status: StatusSuccess Path: "/dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 Data ... Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 1 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-25T14:51:34.213616Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519899715678065901:2050] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [1:7519899728562969323:3020] 2025-06-25T14:51:34.213628Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519899715678065901:2050] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [1:7519899728562969329:3021] 2025-06-25T14:51:34.213638Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519899715678065901:2050] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [1:7519899728562969335:3022] 2025-06-25T14:51:34.213649Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519899715678065904:2053] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [1:7519899728562969324:3020] 2025-06-25T14:51:34.213661Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519899715678065904:2053] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [1:7519899728562969330:3021] 2025-06-25T14:51:34.213670Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519899715678065904:2053] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [1:7519899728562969336:3022] 2025-06-25T14:51:34.213684Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519899715678065907:2056] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [1:7519899728562969325:3020] 2025-06-25T14:51:34.213695Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519899715678065907:2056] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [1:7519899728562969331:3021] 2025-06-25T14:51:34.213705Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519899715678065907:2056] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [1:7519899728562969337:3022] 2025-06-25T14:51:34.213740Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [1:7519899715678066262:2158], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers PathId: Strong: 1 } 2025-06-25T14:51:34.213790Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [1:7519899715678066262:2158], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers PathId: Strong: 1 }, by path# { Subscriber: { Subscriber: [1:7519899728562969317:3020] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 0 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-25T14:51:34.213857Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [1:7519899715678066262:2158], cacheItem# { Subscriber: { Subscriber: [1:7519899728562969317:3020] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:51:34.213888Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [1:7519899715678066262:2158], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/.metadata/workload_manager/delayed_requests PathId: Strong: 1 } 2025-06-25T14:51:34.213919Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [1:7519899715678066262:2158], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/.metadata/workload_manager/delayed_requests PathId: Strong: 1 }, by path# { Subscriber: { Subscriber: [1:7519899728562969318:3021] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 0 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-25T14:51:34.213963Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [1:7519899715678066262:2158], cacheItem# { Subscriber: { Subscriber: [1:7519899728562969318:3021] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:51:34.213987Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [1:7519899715678066262:2158], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/.metadata/workload_manager/running_requests PathId: Strong: 1 } 2025-06-25T14:51:34.214019Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [1:7519899715678066262:2158], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/.metadata/workload_manager/running_requests PathId: Strong: 1 }, by path# { Subscriber: { Subscriber: [1:7519899728562969319:3022] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 0 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-25T14:51:34.214055Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [1:7519899715678066262:2158], cacheItem# { Subscriber: { Subscriber: [1:7519899728562969319:3022] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:51:34.214102Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [1:7519899728562969338:3023], recipient# [1:7519899728562969315:2295], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:51:34.214153Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [1:7519899728562969339:3024], recipient# [1:7519899728562969316:2296], result# { ErrorCount: 2 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo },{ Path: dc-1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:51:35.092816Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7519899715678066262:2158], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:51:35.092930Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [1:7519899715678066262:2158], cacheItem# { Subscriber: { Subscriber: [1:7519899719973034274:2683] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:51:35.093017Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [1:7519899732857936655:3028], recipient# [1:7519899732857936654:2297], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:51:35.216241Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7519899715678066262:2158], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:51:35.216385Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [1:7519899715678066262:2158], cacheItem# { Subscriber: { Subscriber: [1:7519899728562969317:3020] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:51:35.216473Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [1:7519899732857936657:3029], recipient# [1:7519899732857936656:2298], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_storage_tenant/unittest >> TStorageTenantTest::CreateTableInsideSubDomain [GOOD] Test command err: 2025-06-25T14:51:31.094611Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899719047478012:2077];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:51:31.100463Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000b69/r3tmp/tmpaVyPl9/pdisk_1.dat 2025-06-25T14:51:31.428979Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:51:31.506513Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:51:31.506603Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:51:31.513831Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:24324 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-25T14:51:31.623367Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7519899719047478231:2142] Handle TEvNavigate describe path dc-1 2025-06-25T14:51:31.643932Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7519899719047478656:2432] HANDLE EvNavigateScheme dc-1 2025-06-25T14:51:31.645302Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7519899719047478258:2157], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:51:31.645343Z node 1 :TX_PROXY_SCHEME_CACHE TRACE: cache.cpp:2321: Create subscriber: self# [1:7519899719047478258:2157], path# /dc-1, domainOwnerId# 72057594046644480 2025-06-25T14:51:31.645548Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1003: [main][1:7519899719047478657:2433][/dc-1] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-25T14:51:31.647321Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519899719047477909:2051] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7519899719047478661:2433] 2025-06-25T14:51:31.647382Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519899719047477909:2051] Subscribe: subscriber# [1:7519899719047478661:2433], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-25T14:51:31.647441Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519899719047477915:2057] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7519899719047478663:2433] 2025-06-25T14:51:31.647455Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519899719047477915:2057] Subscribe: subscriber# [1:7519899719047478663:2433], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-25T14:51:31.647505Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519899719047478661:2433][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519899719047477909:2051] 2025-06-25T14:51:31.647543Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519899719047478663:2433][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519899719047477915:2057] 2025-06-25T14:51:31.647586Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519899719047478657:2433][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519899719047478658:2433] 2025-06-25T14:51:31.647646Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519899719047478657:2433][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519899719047478660:2433] 2025-06-25T14:51:31.647695Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:852: [main][1:7519899719047478657:2433][/dc-1] Set up state: owner# [1:7519899719047478258:2157], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-25T14:51:31.647828Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519899719047478661:2433][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519899719047478658:2433], cookie# 1 2025-06-25T14:51:31.647867Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519899719047478662:2433][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519899719047478659:2433], cookie# 1 2025-06-25T14:51:31.647880Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519899719047478663:2433][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519899719047478660:2433], cookie# 1 2025-06-25T14:51:31.647909Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519899719047477909:2051] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7519899719047478661:2433] 2025-06-25T14:51:31.647945Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519899719047477909:2051] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519899719047478661:2433], cookie# 1 2025-06-25T14:51:31.647962Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519899719047477915:2057] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7519899719047478663:2433] 2025-06-25T14:51:31.647973Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519899719047477915:2057] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519899719047478663:2433], cookie# 1 2025-06-25T14:51:31.648071Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519899719047477912:2054] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7519899719047478662:2433] 2025-06-25T14:51:31.648122Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519899719047477912:2054] Subscribe: subscriber# [1:7519899719047478662:2433], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-25T14:51:31.648154Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519899719047477912:2054] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519899719047478662:2433], cookie# 1 2025-06-25T14:51:31.648193Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519899719047478661:2433][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519899719047477909:2051], cookie# 1 2025-06-25T14:51:31.648207Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519899719047478663:2433][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519899719047477915:2057], cookie# 1 2025-06-25T14:51:31.648229Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519899719047478662:2433][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519899719047477912:2054] 2025-06-25T14:51:31.648245Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519899719047478662:2433][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519899719047477912:2054], cookie# 1 2025-06-25T14:51:31.648283Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519899719047478657:2433][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519899719047478658:2433], cookie# 1 2025-06-25T14:51:31.648341Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519899719047478657:2433][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2025-06-25T14:51:31.648356Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519899719047478657:2433][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519899719047478660:2433], cookie# 1 2025-06-25T14:51:31.648366Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519899719047478657:2433][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 2, failures# 0 2025-06-25T14:51:31.648383Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519899719047478657:2433][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519899719047478659:2433] 2025-06-25T14:51:31.648429Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:870: [main][1:7519899719047478657:2433][/dc-1] Path was already updated: owner# [1:7519899719047478258:2157], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-25T14:51:31.648452Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519899719047478657:2433][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519899719047478659:2433], cookie# 1 2025-06-25T14:51:31.648470Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:984: [main][1:7519899719047478657:2433][/dc-1] Sync is done in the ring group: cookie# 1, ring group# 0, size# 3, half# 1, successes# 3, failures# 0, partial# 0 2025-06-25T14:51:31.648526Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519899719047477912:2054] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7519899719047478662:2433] 2025-06-25T14:51:31.688886Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [1:7519899719047478258:2157], notify# NKikimr::TSchemeBoardEvents::TEvNotifyUpdate { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DescribeSchemeResult: Status: StatusSuccess Path: "/dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 Data ... 2075186224037889 TimeCastBucketsPerMediator: 2 Mediators: 72075186224037890 Mediators: 72075186224037891 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-25T14:51:33.381348Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7519899721593753748:2109], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:51:33.381393Z node 3 :TX_PROXY_SCHEME_CACHE TRACE: cache.cpp:2321: Create subscriber: self# [3:7519899721593753748:2109], path# /dc-1/USER_0/.metadata/initialization/migrations, domainOwnerId# 72057594046644480 2025-06-25T14:51:33.381513Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1003: [main][3:7519899725888721439:2350][/dc-1/USER_0/.metadata/initialization/migrations] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-25T14:51:33.379458Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519899719047477915:2057] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1/USER_0 DomainOwnerId: 72057594046644480 }: sender# [3:7519899725888721437:2348] 2025-06-25T14:51:33.379482Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519899719047477915:2057] Subscribe: subscriber# [3:7519899725888721437:2348], path# /dc-1/USER_0, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-25T14:51:33.380234Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519899719047477909:2051] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 6 }: sender# [3:7519899725888721435:2348] 2025-06-25T14:51:33.380249Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519899719047477912:2054] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 6 }: sender# [3:7519899725888721436:2348] 2025-06-25T14:51:33.380260Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519899719047477915:2057] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 6 }: sender# [3:7519899725888721437:2348] 2025-06-25T14:51:33.382595Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519899719047477909:2051] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1/USER_0/.metadata/initialization/migrations DomainOwnerId: 72057594046644480 }: sender# [3:7519899725888721443:2350] 2025-06-25T14:51:33.382616Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [1:7519899719047477909:2051] Upsert description: path# /dc-1/USER_0/.metadata/initialization/migrations 2025-06-25T14:51:33.382662Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519899719047477912:2054] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1/USER_0/.metadata/initialization/migrations DomainOwnerId: 72057594046644480 }: sender# [3:7519899725888721444:2350] 2025-06-25T14:51:33.382677Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519899719047477909:2051] Subscribe: subscriber# [3:7519899725888721443:2350], path# /dc-1/USER_0/.metadata/initialization/migrations, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-25T14:51:33.382689Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [1:7519899719047477912:2054] Upsert description: path# /dc-1/USER_0/.metadata/initialization/migrations 2025-06-25T14:51:33.382729Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519899719047477915:2057] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1/USER_0/.metadata/initialization/migrations DomainOwnerId: 72057594046644480 }: sender# [3:7519899725888721445:2350] 2025-06-25T14:51:33.382732Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519899719047477912:2054] Subscribe: subscriber# [3:7519899725888721444:2350], path# /dc-1/USER_0/.metadata/initialization/migrations, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-25T14:51:33.382733Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [1:7519899719047477915:2057] Upsert description: path# /dc-1/USER_0/.metadata/initialization/migrations 2025-06-25T14:51:33.382763Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519899719047477915:2057] Subscribe: subscriber# [3:7519899725888721445:2350], path# /dc-1/USER_0/.metadata/initialization/migrations, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-25T14:51:33.383203Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][3:7519899725888721443:2350][/dc-1/USER_0/.metadata/initialization/migrations] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/USER_0/.metadata/initialization/migrations Version: 0 }: sender# [1:7519899719047477909:2051] 2025-06-25T14:51:33.383240Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][3:7519899725888721444:2350][/dc-1/USER_0/.metadata/initialization/migrations] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/USER_0/.metadata/initialization/migrations Version: 0 }: sender# [1:7519899719047477912:2054] 2025-06-25T14:51:33.383282Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][3:7519899725888721445:2350][/dc-1/USER_0/.metadata/initialization/migrations] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/USER_0/.metadata/initialization/migrations Version: 0 }: sender# [1:7519899719047477915:2057] 2025-06-25T14:51:33.383335Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][3:7519899725888721439:2350][/dc-1/USER_0/.metadata/initialization/migrations] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/USER_0/.metadata/initialization/migrations Version: 0 }: sender# [3:7519899725888721440:2350] 2025-06-25T14:51:33.383397Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][3:7519899725888721439:2350][/dc-1/USER_0/.metadata/initialization/migrations] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/USER_0/.metadata/initialization/migrations Version: 0 }: sender# [3:7519899725888721441:2350] 2025-06-25T14:51:33.383446Z node 3 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:852: [main][3:7519899725888721439:2350][/dc-1/USER_0/.metadata/initialization/migrations] Set up state: owner# [3:7519899721593753748:2109], state# { Deleted: 1 Strong: 1 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-25T14:51:33.383485Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][3:7519899725888721439:2350][/dc-1/USER_0/.metadata/initialization/migrations] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/USER_0/.metadata/initialization/migrations Version: 0 }: sender# [3:7519899725888721442:2350] 2025-06-25T14:51:33.383523Z node 3 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:870: [main][3:7519899725888721439:2350][/dc-1/USER_0/.metadata/initialization/migrations] Ignore empty state: owner# [3:7519899721593753748:2109], state# { Deleted: 1 Strong: 1 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 1 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-25T14:51:33.383589Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [3:7519899721593753748:2109], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/USER_0/.metadata/initialization/migrations PathId: Strong: 1 } 2025-06-25T14:51:33.383661Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [3:7519899721593753748:2109], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/USER_0/.metadata/initialization/migrations PathId: Strong: 1 }, by path# { Subscriber: { Subscriber: [3:7519899725888721439:2350] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 0 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-25T14:51:33.383753Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7519899721593753748:2109], cacheItem# { Subscriber: { Subscriber: [3:7519899725888721439:2350] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:51:33.383830Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7519899725888721446:2351], recipient# [3:7519899725888721429:2284], result# { ErrorCount: 1 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:51:33.384450Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519899719047477915:2057] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [3:7519899725888721445:2350] 2025-06-25T14:51:33.384480Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519899719047477909:2051] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [3:7519899725888721443:2350] 2025-06-25T14:51:33.384496Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519899719047477912:2054] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [3:7519899725888721444:2350] 2025-06-25T14:51:33.396101Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:51:34.385969Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7519899721593753748:2109], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:51:34.386136Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7519899721593753748:2109], cacheItem# { Subscriber: { Subscriber: [3:7519899725888721439:2350] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:51:34.386226Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7519899730183688744:2352], recipient# [3:7519899730183688743:2285], result# { ErrorCount: 1 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_storage_tenant/unittest >> TStorageTenantTest::CreateSolomonInsideSubDomain [GOOD] Test command err: 2025-06-25T14:51:31.118179Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899715672301552:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:51:31.121252Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000b7d/r3tmp/tmp2Ip3WS/pdisk_1.dat 2025-06-25T14:51:31.455957Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:51:31.495634Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:51:31.495777Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:51:31.498377Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:27834 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-25T14:51:31.661607Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7519899715672301776:2143] Handle TEvNavigate describe path dc-1 2025-06-25T14:51:31.682594Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7519899715672302214:2445] HANDLE EvNavigateScheme dc-1 2025-06-25T14:51:31.682770Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7519899715672301799:2156], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:51:31.682826Z node 1 :TX_PROXY_SCHEME_CACHE TRACE: cache.cpp:2321: Create subscriber: self# [1:7519899715672301799:2156], path# /dc-1, domainOwnerId# 72057594046644480 2025-06-25T14:51:31.683013Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1003: [main][1:7519899715672302215:2446][/dc-1] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-25T14:51:31.684721Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519899715672301452:2051] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7519899715672302219:2446] 2025-06-25T14:51:31.684777Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519899715672301452:2051] Subscribe: subscriber# [1:7519899715672302219:2446], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-25T14:51:31.684826Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519899715672301455:2054] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7519899715672302220:2446] 2025-06-25T14:51:31.684840Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519899715672301455:2054] Subscribe: subscriber# [1:7519899715672302220:2446], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-25T14:51:31.684859Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519899715672301458:2057] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7519899715672302221:2446] 2025-06-25T14:51:31.684871Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519899715672301458:2057] Subscribe: subscriber# [1:7519899715672302221:2446], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-25T14:51:31.684929Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519899715672302219:2446][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519899715672301452:2051] 2025-06-25T14:51:31.685030Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519899715672302220:2446][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519899715672301455:2054] 2025-06-25T14:51:31.685049Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519899715672302221:2446][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519899715672301458:2057] 2025-06-25T14:51:31.685096Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519899715672302215:2446][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519899715672302216:2446] 2025-06-25T14:51:31.685129Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519899715672302215:2446][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519899715672302217:2446] 2025-06-25T14:51:31.685185Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:852: [main][1:7519899715672302215:2446][/dc-1] Set up state: owner# [1:7519899715672301799:2156], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-25T14:51:31.685263Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519899715672302215:2446][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519899715672302218:2446] 2025-06-25T14:51:31.685310Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:870: [main][1:7519899715672302215:2446][/dc-1] Path was already updated: owner# [1:7519899715672301799:2156], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-25T14:51:31.685377Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519899715672302219:2446][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519899715672302216:2446], cookie# 1 2025-06-25T14:51:31.685391Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519899715672302220:2446][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519899715672302217:2446], cookie# 1 2025-06-25T14:51:31.685408Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519899715672302221:2446][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519899715672302218:2446], cookie# 1 2025-06-25T14:51:31.685448Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519899715672301452:2051] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7519899715672302219:2446] 2025-06-25T14:51:31.685477Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519899715672301452:2051] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519899715672302219:2446], cookie# 1 2025-06-25T14:51:31.685494Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519899715672301455:2054] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7519899715672302220:2446] 2025-06-25T14:51:31.685505Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519899715672301455:2054] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519899715672302220:2446], cookie# 1 2025-06-25T14:51:31.685517Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519899715672301458:2057] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7519899715672302221:2446] 2025-06-25T14:51:31.685527Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519899715672301458:2057] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519899715672302221:2446], cookie# 1 2025-06-25T14:51:31.686077Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519899715672302219:2446][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519899715672301452:2051], cookie# 1 2025-06-25T14:51:31.686102Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519899715672302220:2446][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519899715672301455:2054], cookie# 1 2025-06-25T14:51:31.686117Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519899715672302221:2446][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519899715672301458:2057], cookie# 1 2025-06-25T14:51:31.686153Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519899715672302215:2446][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519899715672302216:2446], cookie# 1 2025-06-25T14:51:31.686177Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519899715672302215:2446][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2025-06-25T14:51:31.686192Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519899715672302215:2446][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519899715672302217:2446], cookie# 1 2025-06-25T14:51:31.686209Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519899715672302215:2446][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 2, failures# 0 2025-06-25T14:51:31.686224Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519899715672302215:2446][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519899715672302218:2446], cookie# 1 2025-06-25T14:51:31.686263Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:984: [main][1:7519899715672302215:2446][/dc-1] Sync is done in the ring group: cookie# 1, ring group# 0, size# 3, half# 1, successes# 3, failures# 0, partial# 0 2025-06-25T14:51:31.723344Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [1:7519899715672301799:2156], notify# NKikimr::TSchemeBoardEvents::TEvNotifyUpdate { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DescribeSchemeResult: Status: StatusSuccess Path: "/dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 Data ... G: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:6 2025-06-25T14:51:33.100611Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:6 tabletId 72075186224037893 2025-06-25T14:51:33.100659Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:3 2025-06-25T14:51:33.100669Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:3 tabletId 72075186224037890 2025-06-25T14:51:33.100700Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:8 2025-06-25T14:51:33.100707Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:8 tabletId 72075186224037895 2025-06-25T14:51:33.103213Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:5 2025-06-25T14:51:33.103246Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:5 tabletId 72075186224037892 2025-06-25T14:51:33.103289Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 1 candidates, at schemeshard: 72057594046644480 2025-06-25T14:51:33.103394Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046644480 2025-06-25T14:51:33.103416Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046644480 2025-06-25T14:51:33.103433Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046644480, LocalPathId: 2], at schemeshard: 72057594046644480 2025-06-25T14:51:33.103490Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 1 2025-06-25T14:51:33.119791Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046644480 2025-06-25T14:51:33.138285Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7519899715672301799:2156], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:51:33.138413Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [1:7519899715672301799:2156], cacheItem# { Subscriber: { Subscriber: [1:7519899719967269816:2666] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:51:33.138483Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [1:7519899724262237443:2941], recipient# [1:7519899724262237439:2272], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:51:33.417278Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7519899723161075981:2131], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:51:33.417405Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7519899723161075981:2131], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 72057594046644480 Instant: 0 ResultSet [{ Path: dc-1/USER_0 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:51:33.417438Z node 3 :TX_PROXY_SCHEME_CACHE TRACE: cache.cpp:2321: Create subscriber: self# [3:7519899723161075981:2131], path# /dc-1/USER_0, domainOwnerId# 72057594046644480 2025-06-25T14:51:33.417600Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1003: [main][3:7519899727456043553:2306][/dc-1/USER_0] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-25T14:51:33.418150Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][3:7519899727456043553:2306][/dc-1/USER_0] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/USER_0 Version: 0 }: sender# [3:7519899727456043554:2306] 2025-06-25T14:51:33.418191Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][3:7519899727456043553:2306][/dc-1/USER_0] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/USER_0 Version: 0 }: sender# [3:7519899727456043555:2306] 2025-06-25T14:51:33.418222Z node 3 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:852: [main][3:7519899727456043553:2306][/dc-1/USER_0] Set up state: owner# [3:7519899723161075981:2131], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-25T14:51:33.418250Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][3:7519899727456043553:2306][/dc-1/USER_0] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/USER_0 Version: 0 }: sender# [3:7519899727456043556:2306] 2025-06-25T14:51:33.418291Z node 3 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:870: [main][3:7519899727456043553:2306][/dc-1/USER_0] Ignore empty state: owner# [3:7519899723161075981:2131], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-25T14:51:33.418328Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [3:7519899723161075981:2131], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/USER_0 PathId: Strong: 0 } 2025-06-25T14:51:33.418446Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [3:7519899723161075981:2131], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/USER_0 PathId: Strong: 0 }, by path# { Subscriber: { Subscriber: [3:7519899727456043553:2306] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 0 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-25T14:51:33.418551Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7519899723161075981:2131], cacheItem# { Subscriber: { Subscriber: [3:7519899727456043553:2306] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_0 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:51:33.418625Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7519899727456043560:2307], recipient# [3:7519899727456043552:2305], result# { ErrorCount: 1 DatabaseName: DomainOwnerId: 72057594046644480 Instant: 0 ResultSet [{ Path: dc-1/USER_0 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo }] } 2025-06-25T14:51:33.418702Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7519899723161075981:2131], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:51:33.418773Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7519899727456043561:2308], recipient# [3:7519899727456043551:2278], result# { ErrorCount: 1 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo }] } 2025-06-25T14:51:33.418862Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/USER_0/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:51:33.435587Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:51:34.420732Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7519899723161075981:2131], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:51:34.420866Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7519899731751010860:2310], recipient# [3:7519899731751010859:2279], result# { ErrorCount: 1 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo }] } 2025-06-25T14:51:34.421128Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/USER_0/.metadata/initialization/migrations;error=incorrect path status: LookupError; >> TPersQueueTest::WriteExisting [GOOD] >> TPersQueueTest::WriteExistingBigValue ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_storage_tenant/unittest >> TStorageTenantTest::CreateTableInsideSubDomain2 [GOOD] Test command err: 2025-06-25T14:51:31.093837Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899715976762530:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:51:31.093885Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000b9e/r3tmp/tmpLg9UXL/pdisk_1.dat 2025-06-25T14:51:31.450634Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:51:31.514727Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:51:31.514826Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:51:31.521397Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:26429 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-25T14:51:31.660957Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7519899715976762750:2137] Handle TEvNavigate describe path dc-1 2025-06-25T14:51:31.682419Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7519899715976763186:2434] HANDLE EvNavigateScheme dc-1 2025-06-25T14:51:31.682563Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7519899715976762774:2150], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:51:31.682613Z node 1 :TX_PROXY_SCHEME_CACHE TRACE: cache.cpp:2321: Create subscriber: self# [1:7519899715976762774:2150], path# /dc-1, domainOwnerId# 72057594046644480 2025-06-25T14:51:31.682805Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1003: [main][1:7519899715976763187:2435][/dc-1] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-25T14:51:31.684668Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519899715976762435:2051] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7519899715976763191:2435] 2025-06-25T14:51:31.684717Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519899715976762435:2051] Subscribe: subscriber# [1:7519899715976763191:2435], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-25T14:51:31.684771Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519899715976762441:2057] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7519899715976763193:2435] 2025-06-25T14:51:31.684799Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519899715976762441:2057] Subscribe: subscriber# [1:7519899715976763193:2435], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-25T14:51:31.684850Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519899715976763191:2435][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519899715976762435:2051] 2025-06-25T14:51:31.684871Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519899715976763193:2435][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519899715976762441:2057] 2025-06-25T14:51:31.684905Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519899715976763187:2435][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519899715976763188:2435] 2025-06-25T14:51:31.684938Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519899715976763187:2435][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519899715976763190:2435] 2025-06-25T14:51:31.684981Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:852: [main][1:7519899715976763187:2435][/dc-1] Set up state: owner# [1:7519899715976762774:2150], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-25T14:51:31.685116Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519899715976763191:2435][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519899715976763188:2435], cookie# 1 2025-06-25T14:51:31.685134Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519899715976763192:2435][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519899715976763189:2435], cookie# 1 2025-06-25T14:51:31.685148Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519899715976763193:2435][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519899715976763190:2435], cookie# 1 2025-06-25T14:51:31.685168Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519899715976762435:2051] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7519899715976763191:2435] 2025-06-25T14:51:31.685196Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519899715976762435:2051] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519899715976763191:2435], cookie# 1 2025-06-25T14:51:31.685216Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519899715976762441:2057] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7519899715976763193:2435] 2025-06-25T14:51:31.685228Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519899715976762441:2057] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519899715976763193:2435], cookie# 1 2025-06-25T14:51:31.688359Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519899715976762438:2054] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7519899715976763192:2435] 2025-06-25T14:51:31.688412Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519899715976762438:2054] Subscribe: subscriber# [1:7519899715976763192:2435], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-25T14:51:31.688457Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519899715976762438:2054] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519899715976763192:2435], cookie# 1 2025-06-25T14:51:31.688497Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519899715976763191:2435][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519899715976762435:2051], cookie# 1 2025-06-25T14:51:31.688516Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519899715976763193:2435][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519899715976762441:2057], cookie# 1 2025-06-25T14:51:31.688541Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519899715976763192:2435][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519899715976762438:2054] 2025-06-25T14:51:31.688557Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519899715976763192:2435][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519899715976762438:2054], cookie# 1 2025-06-25T14:51:31.688603Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519899715976763187:2435][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519899715976763188:2435], cookie# 1 2025-06-25T14:51:31.688627Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519899715976763187:2435][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2025-06-25T14:51:31.688718Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519899715976763187:2435][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519899715976763190:2435], cookie# 1 2025-06-25T14:51:31.688730Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519899715976763187:2435][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 2, failures# 0 2025-06-25T14:51:31.688756Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519899715976763187:2435][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519899715976763189:2435] 2025-06-25T14:51:31.688808Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:870: [main][1:7519899715976763187:2435][/dc-1] Path was already updated: owner# [1:7519899715976762774:2150], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-25T14:51:31.688831Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519899715976763187:2435][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519899715976763189:2435], cookie# 1 2025-06-25T14:51:31.688864Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:984: [main][1:7519899715976763187:2435][/dc-1] Sync is done in the ring group: cookie# 1, ring group# 0, size# 3, half# 1, successes# 3, failures# 0, partial# 0 2025-06-25T14:51:31.688891Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519899715976762438:2054] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7519899715976763192:2435] 2025-06-25T14:51:31.741150Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [1:7519899715976762774:2150], notify# NKikimr::TSchemeBoardEvents::TEvNotifyUpdate { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DescribeSchemeResult: Status: StatusSuccess Path: "/dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 Data ... Id: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046644480 }, by path# { Subscriber: { Subscriber: [3:7519899731007616933:2350] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 0 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-25T14:51:34.601135Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7519899726712649271:2129], cacheItem# { Subscriber: { Subscriber: [3:7519899731007616933:2350] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusSuccess Kind: 8 TableKind: 0 Created: 1 CreateStep: 1750863091978 PathId: [OwnerId: 72057594046644480, LocalPathId: 2] DomainId: [OwnerId: 72057594046644480, LocalPathId: 2] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_0 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:51:34.601276Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7519899731007616940:2351], recipient# [3:7519899731007616932:2349], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 72057594046644480 Instant: 0 ResultSet [{ Path: dc-1/USER_0 TableId: [72057594046644480:2:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindSubdomain DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 2] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 2] Params { Version: 2 PlanResolution: 50 Coordinators: 72075186224037888 Coordinators: 72075186224037889 TimeCastBucketsPerMediator: 2 Mediators: 72075186224037890 Mediators: 72075186224037891 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-25T14:51:34.601339Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7519899726712649271:2129], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:51:34.601368Z node 3 :TX_PROXY_SCHEME_CACHE TRACE: cache.cpp:2321: Create subscriber: self# [3:7519899726712649271:2129], path# /dc-1/USER_0/.metadata/initialization/migrations, domainOwnerId# 72057594046644480 2025-06-25T14:51:34.601439Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1003: [main][3:7519899731007616941:2352][/dc-1/USER_0/.metadata/initialization/migrations] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-25T14:51:34.601983Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519899715976762435:2051] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 6 }: sender# [3:7519899731007616937:2350] 2025-06-25T14:51:34.602018Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519899715976762435:2051] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1/USER_0/.metadata/initialization/migrations DomainOwnerId: 72057594046644480 }: sender# [3:7519899731007616945:2352] 2025-06-25T14:51:34.602029Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [1:7519899715976762435:2051] Upsert description: path# /dc-1/USER_0/.metadata/initialization/migrations 2025-06-25T14:51:34.602055Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519899715976762435:2051] Subscribe: subscriber# [3:7519899731007616945:2352], path# /dc-1/USER_0/.metadata/initialization/migrations, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-25T14:51:34.602089Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519899715976762438:2054] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 6 }: sender# [3:7519899731007616938:2350] 2025-06-25T14:51:34.602107Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519899715976762438:2054] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1/USER_0/.metadata/initialization/migrations DomainOwnerId: 72057594046644480 }: sender# [3:7519899731007616946:2352] 2025-06-25T14:51:34.602119Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [1:7519899715976762438:2054] Upsert description: path# /dc-1/USER_0/.metadata/initialization/migrations 2025-06-25T14:51:34.602140Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519899715976762438:2054] Subscribe: subscriber# [3:7519899731007616946:2352], path# /dc-1/USER_0/.metadata/initialization/migrations, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-25T14:51:34.602163Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519899715976762441:2057] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 6 }: sender# [3:7519899731007616939:2350] 2025-06-25T14:51:34.602177Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519899715976762441:2057] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1/USER_0/.metadata/initialization/migrations DomainOwnerId: 72057594046644480 }: sender# [3:7519899731007616947:2352] 2025-06-25T14:51:34.602182Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [1:7519899715976762441:2057] Upsert description: path# /dc-1/USER_0/.metadata/initialization/migrations 2025-06-25T14:51:34.602195Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519899715976762441:2057] Subscribe: subscriber# [3:7519899731007616947:2352], path# /dc-1/USER_0/.metadata/initialization/migrations, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-25T14:51:34.602555Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][3:7519899731007616945:2352][/dc-1/USER_0/.metadata/initialization/migrations] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/USER_0/.metadata/initialization/migrations Version: 0 }: sender# [1:7519899715976762435:2051] 2025-06-25T14:51:34.602592Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][3:7519899731007616946:2352][/dc-1/USER_0/.metadata/initialization/migrations] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/USER_0/.metadata/initialization/migrations Version: 0 }: sender# [1:7519899715976762438:2054] 2025-06-25T14:51:34.602620Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][3:7519899731007616947:2352][/dc-1/USER_0/.metadata/initialization/migrations] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/USER_0/.metadata/initialization/migrations Version: 0 }: sender# [1:7519899715976762441:2057] 2025-06-25T14:51:34.602649Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][3:7519899731007616941:2352][/dc-1/USER_0/.metadata/initialization/migrations] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/USER_0/.metadata/initialization/migrations Version: 0 }: sender# [3:7519899731007616942:2352] 2025-06-25T14:51:34.602679Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][3:7519899731007616941:2352][/dc-1/USER_0/.metadata/initialization/migrations] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/USER_0/.metadata/initialization/migrations Version: 0 }: sender# [3:7519899731007616943:2352] 2025-06-25T14:51:34.602714Z node 3 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:852: [main][3:7519899731007616941:2352][/dc-1/USER_0/.metadata/initialization/migrations] Set up state: owner# [3:7519899726712649271:2129], state# { Deleted: 1 Strong: 1 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-25T14:51:34.602736Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][3:7519899731007616941:2352][/dc-1/USER_0/.metadata/initialization/migrations] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/USER_0/.metadata/initialization/migrations Version: 0 }: sender# [3:7519899731007616944:2352] 2025-06-25T14:51:34.602759Z node 3 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:870: [main][3:7519899731007616941:2352][/dc-1/USER_0/.metadata/initialization/migrations] Ignore empty state: owner# [3:7519899726712649271:2129], state# { Deleted: 1 Strong: 1 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 1 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-25T14:51:34.602837Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [3:7519899726712649271:2129], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/USER_0/.metadata/initialization/migrations PathId: Strong: 1 } 2025-06-25T14:51:34.602906Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [3:7519899726712649271:2129], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/USER_0/.metadata/initialization/migrations PathId: Strong: 1 }, by path# { Subscriber: { Subscriber: [3:7519899731007616941:2352] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 0 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-25T14:51:34.602978Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7519899726712649271:2129], cacheItem# { Subscriber: { Subscriber: [3:7519899731007616941:2352] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:51:34.603035Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7519899731007616948:2353], recipient# [3:7519899731007616931:2284], result# { ErrorCount: 1 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:51:34.603198Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519899715976762435:2051] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [3:7519899731007616945:2352] 2025-06-25T14:51:34.603222Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519899715976762438:2054] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [3:7519899731007616946:2352] 2025-06-25T14:51:34.603237Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519899715976762441:2057] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [3:7519899731007616947:2352] >> TPersQueueCommonTest::Auth_CreateGrpcStreamWithInvalidTokenInInitialMetadata_SessionClosedWithUnauthenticatedError [GOOD] >> TPersQueueCommonTest::Auth_MultipleUpdateTokenRequestIterationsWithValidToken_GotUpdateTokenResponseForEachRequest >> DemoTx::Scenario_1 [GOOD] >> TStorageTenantTest::CreateDummyTabletsInDifferentDomains >> TPersQueueTest::UpdatePartitionLocation [GOOD] >> TPersQueueTest::TopicServiceCommitOffset >> TPersQueueTest::BadTopic [GOOD] >> TPersQueueTest::CloseActiveWriteSessionOnClusterDisable >> DemoTx::Scenario_2 >> TPersQueueTest::DirectReadPreCached [GOOD] >> TPersQueueTest::DirectReadNotCached >> KqpJoinOrder::CanonizedJoinOrderTPCH22 [GOOD] >> TSchemeShardTTLTests::CreateTableShouldFailOnWrongUnit-EnableTablePgTypes-true [GOOD] >> TStorageTenantTest::RemoveStoragePoolBeforeDroppingTablet [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTests::CreateTableShouldFailOnWrongUnit-EnableTablePgTypes-true [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:51:09.577209Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:51:09.577284Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:09.577316Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:51:09.577345Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:51:09.577384Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:51:09.577410Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:51:09.577465Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:09.577519Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:51:09.578203Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:51:09.578518Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:51:09.650545Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:51:09.650598Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:51:09.664795Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:51:09.665139Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:51:09.665288Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:51:09.670291Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:51:09.670583Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:51:09.671209Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:09.671485Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:51:09.675641Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:09.675820Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:51:09.677065Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:51:09.677120Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:09.677248Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:51:09.677297Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:51:09.677335Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:51:09.677413Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:51:09.683404Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:51:09.840836Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:51:09.841079Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:09.841266Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:51:09.841311Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:51:09.841545Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:51:09.841631Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:51:09.844802Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:09.845003Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:51:09.845187Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:09.845240Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:51:09.845312Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:51:09.845357Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:51:09.847132Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:09.847183Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:51:09.847273Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:51:09.848804Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:09.848867Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:09.848950Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:09.849008Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:51:09.852570Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:51:09.854422Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:51:09.854592Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:51:09.855467Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:09.855597Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:51:09.855654Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:09.855954Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:51:09.856010Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:09.856179Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:51:09.856260Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:51:09.858199Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:51:09.858261Z node 1 :FLAT_TX_SCHEMESHARD ... 2025-06-25T14:51:39.337264Z node 37 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:39.337364Z node 37 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:51:39.337446Z node 37 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:51:39.337509Z node 37 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:51:39.339200Z node 37 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:39.339281Z node 37 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:51:39.339365Z node 37 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:51:39.340951Z node 37 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:39.341001Z node 37 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:39.341095Z node 37 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:39.341195Z node 37 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:51:39.341427Z node 37 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:51:39.342775Z node 37 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:51:39.343052Z node 37 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:51:39.344076Z node 37 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:39.344277Z node 37 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 158913792110 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:51:39.344391Z node 37 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:39.344734Z node 37 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:51:39.344828Z node 37 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:39.345167Z node 37 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:51:39.345289Z node 37 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:51:39.347204Z node 37 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:51:39.347298Z node 37 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:51:39.347581Z node 37 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:39.347674Z node 37 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [37:210:2210], at schemeshard: 72057594046678944, txId: 1, path id: 1 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:51:39.348199Z node 37 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:39.348278Z node 37 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 1:0 ProgressState 2025-06-25T14:51:39.348523Z node 37 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1:0 progress is 1/1 2025-06-25T14:51:39.348597Z node 37 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-25T14:51:39.348672Z node 37 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1:0 progress is 1/1 2025-06-25T14:51:39.348745Z node 37 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-25T14:51:39.348825Z node 37 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 1, ready parts: 1/1, is published: false 2025-06-25T14:51:39.348911Z node 37 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-25T14:51:39.348991Z node 37 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1:0 2025-06-25T14:51:39.349056Z node 37 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 1:0 2025-06-25T14:51:39.349167Z node 37 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-25T14:51:39.349251Z node 37 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 1, publications: 1, subscribers: 0 2025-06-25T14:51:39.349323Z node 37 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 1, [OwnerId: 72057594046678944, LocalPathId: 1], 3 2025-06-25T14:51:39.350263Z node 37 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-06-25T14:51:39.350450Z node 37 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-06-25T14:51:39.350540Z node 37 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1 2025-06-25T14:51:39.350619Z node 37 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 3 2025-06-25T14:51:39.350698Z node 37 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:51:39.350860Z node 37 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1, subscribers: 0 2025-06-25T14:51:39.353439Z node 37 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1 2025-06-25T14:51:39.354161Z node 37 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1, at schemeshard: 72057594046678944 TestModificationResults wait txId: 101 2025-06-25T14:51:39.355550Z node 37 :TX_PROXY DEBUG: proxy_impl.cpp:434: actor# [37:273:2262] Bootstrap 2025-06-25T14:51:39.399117Z node 37 :TX_PROXY DEBUG: proxy_impl.cpp:453: actor# [37:273:2262] Become StateWork (SchemeCache [37:278:2267]) 2025-06-25T14:51:39.402723Z node 37 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateTable CreateTable { Name: "TTLEnabledTable" Columns { Name: "key" Type: "Uint64" } Columns { Name: "modified_at" Type: "pgint8" } KeyColumnNames: "key" TTLSettings { Enabled { ColumnName: "modified_at" ColumnUnit: UNIT_AUTO } } } } TxId: 101 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:51:39.403347Z node 37 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_table.cpp:423: TCreateTable Propose, path: /MyRoot/TTLEnabledTable, opId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:51:39.403551Z node 37 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_table.cpp:430: TCreateTable Propose, path: /MyRoot/TTLEnabledTable, opId: 101:0, schema: Name: "TTLEnabledTable" Columns { Name: "key" Type: "Uint64" } Columns { Name: "modified_at" Type: "pgint8" } KeyColumnNames: "key" TTLSettings { Enabled { ColumnName: "modified_at" ColumnUnit: UNIT_AUTO } }, at schemeshard: 72057594046678944 2025-06-25T14:51:39.404352Z node 37 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 101:1, propose status:StatusSchemeError, reason: To enable TTL on integral PG type column 'ValueSinceUnixEpochModeSettings' should be specified, at schemeshard: 72057594046678944 2025-06-25T14:51:39.405965Z node 37 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [37:273:2262] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-25T14:51:39.408821Z node 37 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 101, response: Status: StatusSchemeError Reason: "To enable TTL on integral PG type column \'ValueSinceUnixEpochModeSettings\' should be specified" TxId: 101 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:51:39.409245Z node 37 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 101, database: /MyRoot, subject: , status: StatusSchemeError, reason: To enable TTL on integral PG type column 'ValueSinceUnixEpochModeSettings' should be specified, operation: CREATE TABLE, path: /MyRoot/TTLEnabledTable 2025-06-25T14:51:39.409997Z node 37 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 TestModificationResult got TxId: 101, wait until txId: 101 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_storage_tenant/unittest >> TStorageTenantTest::RemoveStoragePoolBeforeDroppingTablet [GOOD] Test command err: 2025-06-25T14:51:34.600220Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899729156153278:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:51:34.600259Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000b4c/r3tmp/tmpKkzOGr/pdisk_1.dat 2025-06-25T14:51:34.898820Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:51:34.975666Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:51:34.975753Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:51:34.978571Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:20200 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-25T14:51:35.132509Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7519899729156153467:2142] Handle TEvNavigate describe path dc-1 2025-06-25T14:51:35.154439Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7519899733451121195:2439] HANDLE EvNavigateScheme dc-1 2025-06-25T14:51:35.154580Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7519899729156153492:2156], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:51:35.154625Z node 1 :TX_PROXY_SCHEME_CACHE TRACE: cache.cpp:2321: Create subscriber: self# [1:7519899729156153492:2156], path# /dc-1, domainOwnerId# 72057594046644480 2025-06-25T14:51:35.154811Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1003: [main][1:7519899733451121196:2440][/dc-1] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-25T14:51:35.162242Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519899729156153145:2051] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7519899733451121200:2440] 2025-06-25T14:51:35.162311Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519899729156153145:2051] Subscribe: subscriber# [1:7519899733451121200:2440], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-25T14:51:35.162411Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519899733451121200:2440][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519899729156153145:2051] 2025-06-25T14:51:35.162451Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519899733451121196:2440][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519899733451121197:2440] 2025-06-25T14:51:35.162491Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519899729156153145:2051] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7519899733451121200:2440] 2025-06-25T14:51:35.162519Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519899729156153151:2057] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7519899733451121202:2440] 2025-06-25T14:51:35.162545Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519899729156153151:2057] Subscribe: subscriber# [1:7519899733451121202:2440], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-25T14:51:35.162617Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519899733451121202:2440][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519899729156153151:2057] 2025-06-25T14:51:35.162638Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519899733451121196:2440][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519899733451121199:2440] 2025-06-25T14:51:35.162700Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:852: [main][1:7519899733451121196:2440][/dc-1] Set up state: owner# [1:7519899729156153492:2156], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-25T14:51:35.162826Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519899733451121200:2440][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519899733451121197:2440], cookie# 1 2025-06-25T14:51:35.162857Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519899733451121201:2440][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519899733451121198:2440], cookie# 1 2025-06-25T14:51:35.162874Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519899733451121202:2440][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519899733451121199:2440], cookie# 1 2025-06-25T14:51:35.162900Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519899729156153145:2051] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519899733451121200:2440], cookie# 1 2025-06-25T14:51:35.162932Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519899733451121200:2440][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519899729156153145:2051], cookie# 1 2025-06-25T14:51:35.162959Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519899733451121196:2440][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519899733451121197:2440], cookie# 1 2025-06-25T14:51:35.162985Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519899733451121196:2440][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2025-06-25T14:51:35.163008Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519899729156153151:2057] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7519899733451121202:2440] 2025-06-25T14:51:35.163021Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519899729156153151:2057] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519899733451121202:2440], cookie# 1 2025-06-25T14:51:35.164512Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519899729156153148:2054] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7519899733451121201:2440] 2025-06-25T14:51:35.164556Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519899729156153148:2054] Subscribe: subscriber# [1:7519899733451121201:2440], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-25T14:51:35.164613Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519899729156153148:2054] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519899733451121201:2440], cookie# 1 2025-06-25T14:51:35.164645Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519899733451121202:2440][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519899729156153151:2057], cookie# 1 2025-06-25T14:51:35.164688Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519899733451121201:2440][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519899729156153148:2054] 2025-06-25T14:51:35.164715Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519899733451121201:2440][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519899729156153148:2054], cookie# 1 2025-06-25T14:51:35.164747Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519899733451121196:2440][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519899733451121199:2440], cookie# 1 2025-06-25T14:51:35.164767Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519899733451121196:2440][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 2, failures# 0 2025-06-25T14:51:35.164794Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519899733451121196:2440][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519899733451121198:2440] 2025-06-25T14:51:35.164846Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:870: [main][1:7519899733451121196:2440][/dc-1] Path was already updated: owner# [1:7519899729156153492:2156], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-25T14:51:35.164870Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519899733451121196:2440][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519899733451121198:2440], cookie# 1 2025-06-25T14:51:35.164891Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:984: [main][1:7519899733451121196:2440][/dc-1] Sync is done in the ring group: cookie# 1, ring group# 0, size# 3, half# 1, successes# 3, failures# 0, partial# 0 2025-06-25T14:51:35.164923Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519899729156153148:2054] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7519899733451121201:2440] 2025-06-25T14:51:35.211708Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [1:7519899729156153492:2156], notify# NKikimr::TSchemeBoardEvents::TEvNotifyUpdate { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DescribeSchemeResult: Status: StatusSuccess Path: "/dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 Data ... arams { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } StoragePools { Name: "" Kind: "storage-pool-number-1" } StoragePools { Name: "" Kind: "storage-pool-number-2" } StoragePools { Name: "/dc-1:test" Kind: "test" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/dc-1" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046644480 }, by path# { Subscriber: { Subscriber: [1:7519899733451121196:2440] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 3 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 1750863095429 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# { Subscriber: { Subscriber: [1:7519899733451121196:2440] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 3 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 1750863095429 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 } 2025-06-25T14:51:36.026898Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046644480, cookie: 281474976715660 2025-06-25T14:51:36.026945Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046644480, cookie: 281474976715660 2025-06-25T14:51:36.026953Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046644480, txId: 281474976715660 2025-06-25T14:51:36.026964Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046644480, txId: 281474976715660, pathId: [OwnerId: 72057594046644480, LocalPathId: 2], version: 18446744073709551615 2025-06-25T14:51:36.026973Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [1:7519899729156153492:2156], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/USER_0 PathId: [OwnerId: 72057594046644480, LocalPathId: 2] Strong: 1 } 2025-06-25T14:51:36.026975Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 6 2025-06-25T14:51:36.027011Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046644480, txId: 281474976715660, subscribers: 1 2025-06-25T14:51:36.027022Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:212: TTxAckPublishToSchemeBoard Notify send TEvNotifyTxCompletionResult, at schemeshard: 72057594046644480, to actorId: [1:7519899737746088972:2277] 2025-06-25T14:51:36.027047Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [1:7519899729156153492:2156], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/USER_0 PathId: [OwnerId: 72057594046644480, LocalPathId: 2] Strong: 1 }, by path# { Subscriber: { Subscriber: [1:7519899733451121441:2626] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 3 } Filled: 1 Status: StatusSuccess Kind: 8 TableKind: 0 Created: 1 CreateStep: 1750863095520 PathId: [OwnerId: 72057594046644480, LocalPathId: 2] DomainId: [OwnerId: 72057594046644480, LocalPathId: 2] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# { Subscriber: { Subscriber: [1:7519899733451121441:2626] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 3 } Filled: 1 Status: StatusSuccess Kind: 8 TableKind: 0 Created: 1 CreateStep: 1750863095520 PathId: [OwnerId: 72057594046644480, LocalPathId: 2] DomainId: [OwnerId: 72057594046644480, LocalPathId: 2] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 } 2025-06-25T14:51:36.028761Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:2 hive 72057594037968897 at ss 72057594046644480 2025-06-25T14:51:36.028772Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:4 hive 72057594037968897 at ss 72057594046644480 2025-06-25T14:51:36.028781Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:1 hive 72057594037968897 at ss 72057594046644480 2025-06-25T14:51:36.028789Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:3 hive 72057594037968897 at ss 72057594046644480 2025-06-25T14:51:36.028846Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046644480, cookie: 281474976715660 2025-06-25T14:51:36.028866Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046644480, cookie: 281474976715660 2025-06-25T14:51:36.030455Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046644480 ShardLocalIdx: 2, at schemeshard: 72057594046644480 2025-06-25T14:51:36.030706Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 5 2025-06-25T14:51:36.030930Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 4 ShardOwnerId: 72057594046644480 ShardLocalIdx: 4, at schemeshard: 72057594046644480 2025-06-25T14:51:36.031063Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 4 2025-06-25T14:51:36.031154Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 1 ShardOwnerId: 72057594046644480 ShardLocalIdx: 1, at schemeshard: 72057594046644480 2025-06-25T14:51:36.031273Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 3 2025-06-25T14:51:36.031359Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 3 ShardOwnerId: 72057594046644480 ShardLocalIdx: 3, at schemeshard: 72057594046644480 2025-06-25T14:51:36.031447Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 2 2025-06-25T14:51:36.031536Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046644480 2025-06-25T14:51:36.031553Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046644480, LocalPathId: 2], at schemeshard: 72057594046644480 2025-06-25T14:51:36.031663Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 1 2025-06-25T14:51:36.031812Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046644480 2025-06-25T14:51:36.031830Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046644480, LocalPathId: 2], at schemeshard: 72057594046644480 2025-06-25T14:51:36.031875Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 1 2025-06-25T14:51:36.033427Z node 1 :HIVE WARN: tx__block_storage_result.cpp:43: HIVE#72057594037968897 THive::TTxBlockStorageResult Complete status was NO_GROUP for TabletId 72075186224037889 2025-06-25T14:51:36.033478Z node 1 :HIVE WARN: tx__block_storage_result.cpp:43: HIVE#72057594037968897 THive::TTxBlockStorageResult Complete status was NO_GROUP for TabletId 72075186224037888 2025-06-25T14:51:36.033501Z node 1 :HIVE WARN: tx__block_storage_result.cpp:43: HIVE#72057594037968897 THive::TTxBlockStorageResult Complete status was NO_GROUP for TabletId 72075186224037891 2025-06-25T14:51:36.038685Z node 1 :HIVE WARN: tx__block_storage_result.cpp:43: HIVE#72057594037968897 THive::TTxBlockStorageResult Complete status was NO_GROUP for TabletId 72075186224037890 2025-06-25T14:51:36.039433Z node 1 :HIVE WARN: hive_impl.cpp:1955: HIVE#72057594037968897 Can't find the tablet from RequestHiveInfo(TabletID=72075186224037888) 2025-06-25T14:51:36.045436Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:2 2025-06-25T14:51:36.045452Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:2 tabletId 72075186224037889 2025-06-25T14:51:36.045485Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:4 2025-06-25T14:51:36.045489Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:4 tabletId 72075186224037891 2025-06-25T14:51:36.045500Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:1 2025-06-25T14:51:36.045504Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:1 tabletId 72075186224037888 2025-06-25T14:51:36.045512Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:3 2025-06-25T14:51:36.045521Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:3 tabletId 72075186224037890 2025-06-25T14:51:36.045543Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046644480 2025-06-25T14:51:36.045566Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046644480 2025-06-25T14:51:36.053134Z node 1 :HIVE WARN: hive_impl.cpp:1955: HIVE#72057594037968897 Can't find the tablet from RequestHiveInfo(TabletID=72075186224037888) >> TStorageTenantTest::GenericCases [GOOD] >> TStorageTenantTest::CopyTableAndConcurrentSplit [GOOD] >> TStorageTenantTest::RemoveStoragePoolAndCreateOneMore [GOOD] >> KqpRbo::Bench_Filter >> KqpRbo::JoinFilter >> KqpRbo::Select >> KqpRbo::LeftJoinToKqpOpJoin >> KqpRbo::Bench_CrossFilter >> KqpRbo::CrossFilter >> KqpRbo::Bench_Select ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_storage_tenant/unittest >> TStorageTenantTest::GenericCases [GOOD] Test command err: 2025-06-25T14:51:35.747171Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899734180170769:2141];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:51:35.747243Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000b0a/r3tmp/tmpjtmwWE/pdisk_1.dat 2025-06-25T14:51:36.145319Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:51:36.159714Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:51:36.159796Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:51:36.167496Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:15617 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-25T14:51:36.376493Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7519899734180170887:2118] Handle TEvNavigate describe path dc-1 2025-06-25T14:51:36.396227Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7519899738475138651:2437] HANDLE EvNavigateScheme dc-1 2025-06-25T14:51:36.396358Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7519899734180170916:2133], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:51:36.396384Z node 1 :TX_PROXY_SCHEME_CACHE TRACE: cache.cpp:2321: Create subscriber: self# [1:7519899734180170916:2133], path# /dc-1, domainOwnerId# 72057594046644480 2025-06-25T14:51:36.396542Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1003: [main][1:7519899738475138652:2438][/dc-1] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-25T14:51:36.398314Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519899734180170600:2050] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7519899738475138656:2438] 2025-06-25T14:51:36.398369Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519899734180170600:2050] Subscribe: subscriber# [1:7519899738475138656:2438], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-25T14:51:36.398431Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519899734180170603:2053] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7519899738475138657:2438] 2025-06-25T14:51:36.398448Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519899734180170603:2053] Subscribe: subscriber# [1:7519899738475138657:2438], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-25T14:51:36.398681Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519899738475138656:2438][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519899734180170600:2050] 2025-06-25T14:51:36.398720Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519899738475138657:2438][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519899734180170603:2053] 2025-06-25T14:51:36.398757Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519899738475138652:2438][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519899738475138653:2438] 2025-06-25T14:51:36.398816Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519899738475138652:2438][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519899738475138654:2438] 2025-06-25T14:51:36.398881Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:852: [main][1:7519899738475138652:2438][/dc-1] Set up state: owner# [1:7519899734180170916:2133], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-25T14:51:36.399017Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519899738475138656:2438][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519899738475138653:2438], cookie# 1 2025-06-25T14:51:36.399038Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519899738475138657:2438][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519899738475138654:2438], cookie# 1 2025-06-25T14:51:36.399049Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519899738475138658:2438][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519899738475138655:2438], cookie# 1 2025-06-25T14:51:36.399070Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519899734180170606:2056] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7519899738475138658:2438] 2025-06-25T14:51:36.399095Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519899734180170606:2056] Subscribe: subscriber# [1:7519899738475138658:2438], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-25T14:51:36.399134Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519899734180170606:2056] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519899738475138658:2438], cookie# 1 2025-06-25T14:51:36.399171Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519899734180170600:2050] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7519899738475138656:2438] 2025-06-25T14:51:36.399188Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519899734180170600:2050] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519899738475138656:2438], cookie# 1 2025-06-25T14:51:36.399243Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519899734180170603:2053] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7519899738475138657:2438] 2025-06-25T14:51:36.399258Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519899734180170603:2053] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519899738475138657:2438], cookie# 1 2025-06-25T14:51:36.399355Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519899738475138658:2438][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519899734180170606:2056] 2025-06-25T14:51:36.399399Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519899738475138658:2438][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519899734180170606:2056], cookie# 1 2025-06-25T14:51:36.399413Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519899738475138656:2438][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519899734180170600:2050], cookie# 1 2025-06-25T14:51:36.399428Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519899738475138657:2438][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519899734180170603:2053], cookie# 1 2025-06-25T14:51:36.399477Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519899738475138652:2438][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519899738475138655:2438] 2025-06-25T14:51:36.399527Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:870: [main][1:7519899738475138652:2438][/dc-1] Path was already updated: owner# [1:7519899734180170916:2133], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-25T14:51:36.399557Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519899738475138652:2438][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519899738475138655:2438], cookie# 1 2025-06-25T14:51:36.399579Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519899738475138652:2438][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2025-06-25T14:51:36.399600Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519899738475138652:2438][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519899738475138653:2438], cookie# 1 2025-06-25T14:51:36.399611Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519899738475138652:2438][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 2, failures# 0 2025-06-25T14:51:36.399624Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519899738475138652:2438][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519899738475138654:2438], cookie# 1 2025-06-25T14:51:36.399651Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:984: [main][1:7519899738475138652:2438][/dc-1] Sync is done in the ring group: cookie# 1, ring group# 0, size# 3, half# 1, successes# 3, failures# 0, partial# 0 2025-06-25T14:51:36.399679Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519899734180170606:2056] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7519899738475138658:2438] 2025-06-25T14:51:36.448226Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [1:7519899734180170916:2133], notify# NKikimr::TSchemeBoardEvents::TEvNotifyUpdate { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DescribeSchemeResult: Status: StatusSuccess Path: "/dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 Data ... sts PathId: Strong: 1 } 2025-06-25T14:51:39.084953Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519899751360041485:3145][/dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers Version: 0 }: sender# [1:7519899751360041500:3145] 2025-06-25T14:51:39.084994Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:852: [main][1:7519899751360041485:3145][/dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers] Set up state: owner# [1:7519899734180170916:2133], state# { Deleted: 1 Strong: 1 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-25T14:51:39.085005Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [1:7519899734180170916:2133], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/.metadata/workload_manager/delayed_requests PathId: Strong: 1 }, by path# { Subscriber: { Subscriber: [1:7519899751360041483:3143] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 0 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-25T14:51:39.085019Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519899751360041485:3145][/dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers Version: 0 }: sender# [1:7519899751360041499:3145] 2025-06-25T14:51:39.085043Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:870: [main][1:7519899751360041485:3145][/dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers] Ignore empty state: owner# [1:7519899734180170916:2133], state# { Deleted: 1 Strong: 1 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 1 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-25T14:51:39.085072Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [1:7519899734180170916:2133], cacheItem# { Subscriber: { Subscriber: [1:7519899751360041483:3143] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:51:39.085073Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519899751360041494:3144][/dc-1/.metadata/workload_manager/running_requests] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/workload_manager/running_requests Version: 0 }: sender# [1:7519899734180170603:2053] 2025-06-25T14:51:39.085119Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519899751360041484:3144][/dc-1/.metadata/workload_manager/running_requests] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/workload_manager/running_requests Version: 0 }: sender# [1:7519899751360041489:3144] 2025-06-25T14:51:39.085131Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [1:7519899734180170916:2133], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers PathId: Strong: 1 } 2025-06-25T14:51:39.085155Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:870: [main][1:7519899751360041484:3144][/dc-1/.metadata/workload_manager/running_requests] Ignore empty state: owner# [1:7519899734180170916:2133], state# { Deleted: 1 Strong: 1 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 1 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-25T14:51:39.085190Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519899734180170600:2050] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [1:7519899751360041493:3143] 2025-06-25T14:51:39.085191Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [1:7519899734180170916:2133], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers PathId: Strong: 1 }, by path# { Subscriber: { Subscriber: [1:7519899751360041485:3145] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 0 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-25T14:51:39.085219Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519899734180170600:2050] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [1:7519899751360041501:3145] 2025-06-25T14:51:39.085254Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [1:7519899734180170916:2133], cacheItem# { Subscriber: { Subscriber: [1:7519899751360041485:3145] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:51:39.085256Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519899734180170603:2053] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [1:7519899751360041495:3143] 2025-06-25T14:51:39.085285Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519899734180170603:2053] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [1:7519899751360041502:3145] 2025-06-25T14:51:39.085298Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519899734180170606:2056] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [1:7519899751360041503:3145] 2025-06-25T14:51:39.085337Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519899734180170603:2053] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [1:7519899751360041494:3144] 2025-06-25T14:51:39.085398Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [1:7519899751360041504:3146], recipient# [1:7519899751360041480:2297], result# { ErrorCount: 2 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo },{ Path: dc-1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:51:39.085412Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [1:7519899751360041505:3147], recipient# [1:7519899751360041482:2299], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:51:39.761015Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7519899734180170916:2133], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:51:39.761138Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [1:7519899734180170916:2133], cacheItem# { Subscriber: { Subscriber: [1:7519899738475138818:2560] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:51:39.761203Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [1:7519899751360041519:3148], recipient# [1:7519899751360041518:2300], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:51:40.085861Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7519899734180170916:2133], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:51:40.085996Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [1:7519899734180170916:2133], cacheItem# { Subscriber: { Subscriber: [1:7519899751360041485:3145] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:51:40.086081Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [1:7519899755655008823:3152], recipient# [1:7519899755655008822:2301], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } >> KqpSystemView::QueryStatsSimple [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_storage_tenant/unittest >> TStorageTenantTest::CopyTableAndConcurrentSplit [GOOD] Test command err: 2025-06-25T14:51:33.485979Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899727377567108:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:51:33.486033Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000b54/r3tmp/tmpbVEGxo/pdisk_1.dat 2025-06-25T14:51:33.842622Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:51:33.842770Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:51:33.847460Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519899727377567085:2080] 1750863093484478 != 1750863093484481 2025-06-25T14:51:33.857826Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:51:33.860656Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:12071 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-25T14:51:34.085324Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7519899727377567290:2095] Handle TEvNavigate describe path dc-1 2025-06-25T14:51:34.129872Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7519899731672535096:2434] HANDLE EvNavigateScheme dc-1 2025-06-25T14:51:34.129985Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7519899727377567341:2120], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:51:34.130008Z node 1 :TX_PROXY_SCHEME_CACHE TRACE: cache.cpp:2321: Create subscriber: self# [1:7519899727377567341:2120], path# /dc-1, domainOwnerId# 72057594046644480 2025-06-25T14:51:34.130169Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1003: [main][1:7519899731672535097:2435][/dc-1] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-25T14:51:34.132015Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519899727377567054:2049] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7519899731672535101:2435] 2025-06-25T14:51:34.132067Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519899727377567054:2049] Subscribe: subscriber# [1:7519899731672535101:2435], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-25T14:51:34.132109Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519899727377567060:2055] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7519899731672535103:2435] 2025-06-25T14:51:34.132124Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519899727377567060:2055] Subscribe: subscriber# [1:7519899731672535103:2435], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-25T14:51:34.132162Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519899731672535101:2435][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519899727377567054:2049] 2025-06-25T14:51:34.132187Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519899731672535103:2435][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519899727377567060:2055] 2025-06-25T14:51:34.132219Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519899731672535097:2435][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519899731672535098:2435] 2025-06-25T14:51:34.132245Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519899731672535097:2435][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519899731672535100:2435] 2025-06-25T14:51:34.132300Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:852: [main][1:7519899731672535097:2435][/dc-1] Set up state: owner# [1:7519899727377567341:2120], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-25T14:51:34.132347Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519899727377567057:2052] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7519899731672535102:2435] 2025-06-25T14:51:34.132368Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519899727377567057:2052] Subscribe: subscriber# [1:7519899731672535102:2435], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-25T14:51:34.132409Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519899727377567054:2049] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7519899731672535101:2435] 2025-06-25T14:51:34.132422Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519899727377567060:2055] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7519899731672535103:2435] 2025-06-25T14:51:34.132569Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519899731672535102:2435][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519899727377567057:2052] 2025-06-25T14:51:34.132593Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519899731672535101:2435][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519899731672535098:2435], cookie# 1 2025-06-25T14:51:34.132609Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519899731672535102:2435][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519899731672535099:2435], cookie# 1 2025-06-25T14:51:34.132641Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519899731672535103:2435][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519899731672535100:2435], cookie# 1 2025-06-25T14:51:34.132666Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519899731672535097:2435][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519899731672535099:2435] 2025-06-25T14:51:34.132871Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:870: [main][1:7519899731672535097:2435][/dc-1] Path was already updated: owner# [1:7519899727377567341:2120], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-25T14:51:34.132926Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519899727377567057:2052] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7519899731672535102:2435] 2025-06-25T14:51:34.132956Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519899727377567057:2052] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519899731672535102:2435], cookie# 1 2025-06-25T14:51:34.132976Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519899727377567054:2049] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519899731672535101:2435], cookie# 1 2025-06-25T14:51:34.132990Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519899727377567060:2055] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519899731672535103:2435], cookie# 1 2025-06-25T14:51:34.133013Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519899731672535102:2435][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519899727377567057:2052], cookie# 1 2025-06-25T14:51:34.133027Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519899731672535101:2435][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519899727377567054:2049], cookie# 1 2025-06-25T14:51:34.133041Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519899731672535103:2435][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519899727377567060:2055], cookie# 1 2025-06-25T14:51:34.133077Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519899731672535097:2435][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519899731672535099:2435], cookie# 1 2025-06-25T14:51:34.133110Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519899731672535097:2435][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2025-06-25T14:51:34.133128Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519899731672535097:2435][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519899731672535098:2435], cookie# 1 2025-06-25T14:51:34.133139Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519899731672535097:2435][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 2, failures# 0 2025-06-25T14:51:34.133158Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519899731672535097:2435][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519899731672535100:2435], cookie# 1 2025-06-25T14:51:34.133207Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:984: [main][1:7519899731672535097:2435][/dc-1] Sync is done in the ring group: cookie# 1, ring group# 0, size# 3, half# 1, successes# 3, failures# 0, partial# 0 2025-06-25T14:51:34.180444Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [1:7519899727377567341:2120], notify# NKikimr::TSchemeBoardEvents::TEvNotifyUpdate { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DescribeSchemeResult: Status: StatusSuccess Path: "/dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 Shards ... kie: 0 } Filled: 1 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_0 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:51:38.403537Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [4:7519899749592089857:2750], recipient# [4:7519899749592089849:2748], result# { ErrorCount: 1 DatabaseName: DomainOwnerId: 72057594046644480 Instant: 0 ResultSet [{ Path: dc-1/USER_0 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo }] } 2025-06-25T14:51:38.403611Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [4:7519899745297121527:2109], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:51:38.403682Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [4:7519899749592089858:2751], recipient# [4:7519899749592089848:2320], result# { ErrorCount: 1 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo }] } 2025-06-25T14:51:38.403781Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/USER_0/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:51:39.408486Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [4:7519899745297121527:2109], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:51:39.408652Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [4:7519899753887057156:2752], recipient# [4:7519899753887057155:2321], result# { ErrorCount: 1 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo }] } 2025-06-25T14:51:39.408814Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/USER_0/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:51:40.408942Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [4:7519899745297121527:2109], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:51:40.409082Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [4:7519899758182024454:2753], recipient# [4:7519899758182024453:2322], result# { ErrorCount: 1 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo }] } 2025-06-25T14:51:40.409357Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/USER_0/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:51:40.477456Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [4:7519899745297121527:2109], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:51:40.477598Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [4:7519899745297121527:2109], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo },{ Path: dc-1/USER_0/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:51:40.477690Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [4:7519899758182024469:2754], recipient# [4:7519899758182024465:2332], result# { ErrorCount: 1 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo }] } 2025-06-25T14:51:40.477776Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [4:7519899758182024470:2755], recipient# [4:7519899758182024468:2334], result# { ErrorCount: 2 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo },{ Path: dc-1/USER_0/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo }] } 2025-06-25T14:51:40.477802Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/USER_0/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:51:40.478368Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [4:7519899758182024468:2334], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:51:40.567598Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [4:7519899745297121527:2109], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo },{ Path: dc-1/USER_0/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:51:40.567796Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [4:7519899758182024473:2756], recipient# [4:7519899758182024468:2334], result# { ErrorCount: 2 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo },{ Path: dc-1/USER_0/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo }] } 2025-06-25T14:51:40.567991Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [4:7519899758182024468:2334], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:51:40.670005Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [4:7519899745297121527:2109], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo },{ Path: dc-1/USER_0/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:51:40.670177Z node 4 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [4:7519899758182024475:2757], recipient# [4:7519899758182024468:2334], result# { ErrorCount: 2 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo },{ Path: dc-1/USER_0/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo }] } 2025-06-25T14:51:40.670402Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [4:7519899758182024468:2334], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::CanonizedJoinOrderTPCH22 [GOOD] Test command err: Trying to start YDB, gRPC: 31434, MsgBus: 21419 2025-06-25T14:50:10.213526Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899370410775935:2199];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:50:10.213569Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d38/r3tmp/tmpUQMqBX/pdisk_1.dat 2025-06-25T14:50:10.952563Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519899370410775774:2080] 1750863010177484 != 1750863010177487 2025-06-25T14:50:10.982106Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:50:10.983553Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:50:10.983640Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:50:10.987349Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 31434, node 1 2025-06-25T14:50:11.232658Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:50:11.324671Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:50:11.324689Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:50:11.324695Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:50:11.324787Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:21419 TClient is connected to server localhost:21419 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:50:12.527541Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:50:12.572885Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:50:14.746721Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899387590645604:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:14.746852Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:14.747402Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899387590645616:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:14.751107Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:50:14.763746Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899387590645618:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:50:14.860024Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899387590645669:2337] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:50:15.254799Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519899370410775935:2199];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:50:15.254849Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:50:15.390929Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T14:50:15.883358Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519899391885613214:2315];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:50:15.892013Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519899391885613182:2312];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:50:15.892250Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519899391885613182:2312];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:50:15.892628Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519899391885613182:2312];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:50:15.892739Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519899391885613182:2312];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:50:15.892827Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519899391885613182:2312];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:50:15.892942Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519899391885613182:2312];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:50:15.893052Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519899391885613182:2312];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:50:15.893145Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519899391885613182:2312];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:50:15.893234Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519899391885613182:2312];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:50:15.893598Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519899391885613182:2312];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:50:15.893700Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519899391885613182:2312];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:50:15.904443Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519899391885613214:2315];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:50:15.904675Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519899391885613214:2315];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:50:15.904773Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519899391885613214:2315];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:50:15.904859Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519899391885613214:2315];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:50:15.904945Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519899391885613214:2315];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:50:15.905060Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519899391885613214:2315];tablet_id=72075186224037902;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:50:15.905164Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;self_id=[1:7519899391885613214:2315];tabl ... 4;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:25.597270Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039412;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:25.597424Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039349;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:25.600546Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039412;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:25.600974Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039349;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:25.600981Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039402;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:25.601474Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039414;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:25.604745Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039402;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:25.605205Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039414;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:25.605577Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039389;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:25.605808Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039393;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:25.608873Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039389;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:25.608874Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039393;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:25.609284Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039390;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:25.609307Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039406;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:25.612493Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039406;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:25.612697Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039390;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:25.612994Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039394;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:25.613185Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039417;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:25.616722Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039417;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:25.617313Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039410;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:25.617353Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039394;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:25.617956Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039347;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:25.620584Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039410;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:25.620975Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039422;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:25.621359Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039347;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:25.621718Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039381;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:25.624726Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039422;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:25.625154Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039408;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:25.625458Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039381;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:25.625990Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039361;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:25.628796Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039408;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:25.629261Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039400;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:25.630469Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039361;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:25.630983Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039403;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:25.632618Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039400;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:25.633050Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039391;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:25.635154Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039403;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:25.635757Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:25.636300Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039391;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:25.639286Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:25.679092Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039421;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:25.683828Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039421;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:25.729194Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyks45q9d309ww4g611bxagf", SessionId: ydb://session/3?node_id=1&id=YmU0Y2FhZTYtMjRiOWVkZjYtY2U1ZGU3NzItNGExM2Y4Yzg=, Slow query, duration: 27.063018s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:51:25.971108Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:51:25.973177Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:51:25.973753Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; >> KqpRbo::Filter ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_storage_tenant/unittest >> TStorageTenantTest::RemoveStoragePoolAndCreateOneMore [GOOD] Test command err: 2025-06-25T14:51:31.113031Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899715751044631:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:51:31.113373Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000b59/r3tmp/tmpiz6ec9/pdisk_1.dat 2025-06-25T14:51:31.459171Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:51:31.485868Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:51:31.485960Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:51:31.490671Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:12710 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-25T14:51:31.635558Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7519899715751044830:2143] Handle TEvNavigate describe path dc-1 2025-06-25T14:51:31.651676Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7519899715751045260:2437] HANDLE EvNavigateScheme dc-1 2025-06-25T14:51:31.651870Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7519899715751044900:2169], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:51:31.651934Z node 1 :TX_PROXY_SCHEME_CACHE TRACE: cache.cpp:2321: Create subscriber: self# [1:7519899715751044900:2169], path# /dc-1, domainOwnerId# 72057594046644480 2025-06-25T14:51:31.652119Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1003: [main][1:7519899715751045261:2438][/dc-1] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-25T14:51:31.653831Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519899715751044507:2051] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7519899715751045265:2438] 2025-06-25T14:51:31.653840Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519899715751044510:2054] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7519899715751045266:2438] 2025-06-25T14:51:31.653889Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519899715751044510:2054] Subscribe: subscriber# [1:7519899715751045266:2438], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-25T14:51:31.653890Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519899715751044507:2051] Subscribe: subscriber# [1:7519899715751045265:2438], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-25T14:51:31.653954Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519899715751044513:2057] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7519899715751045267:2438] 2025-06-25T14:51:31.653969Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519899715751044513:2057] Subscribe: subscriber# [1:7519899715751045267:2438], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-25T14:51:31.653971Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519899715751045265:2438][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519899715751044507:2051] 2025-06-25T14:51:31.653994Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519899715751045266:2438][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519899715751044510:2054] 2025-06-25T14:51:31.653995Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519899715751044507:2051] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7519899715751045265:2438] 2025-06-25T14:51:31.654022Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519899715751044510:2054] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7519899715751045266:2438] 2025-06-25T14:51:31.654031Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519899715751045267:2438][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519899715751044513:2057] 2025-06-25T14:51:31.654045Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519899715751044513:2057] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7519899715751045267:2438] 2025-06-25T14:51:31.654109Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519899715751045261:2438][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519899715751045262:2438] 2025-06-25T14:51:31.654152Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519899715751045261:2438][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519899715751045263:2438] 2025-06-25T14:51:31.654201Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:852: [main][1:7519899715751045261:2438][/dc-1] Set up state: owner# [1:7519899715751044900:2169], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-25T14:51:31.654326Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519899715751045261:2438][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519899715751045264:2438] 2025-06-25T14:51:31.654384Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:870: [main][1:7519899715751045261:2438][/dc-1] Path was already updated: owner# [1:7519899715751044900:2169], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-25T14:51:31.654445Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519899715751045265:2438][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519899715751045262:2438], cookie# 1 2025-06-25T14:51:31.654482Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519899715751045266:2438][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519899715751045263:2438], cookie# 1 2025-06-25T14:51:31.654500Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519899715751045267:2438][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519899715751045264:2438], cookie# 1 2025-06-25T14:51:31.654546Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519899715751044507:2051] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519899715751045265:2438], cookie# 1 2025-06-25T14:51:31.654568Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519899715751044510:2054] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519899715751045266:2438], cookie# 1 2025-06-25T14:51:31.654581Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519899715751044513:2057] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519899715751045267:2438], cookie# 1 2025-06-25T14:51:31.654666Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519899715751045265:2438][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519899715751044507:2051], cookie# 1 2025-06-25T14:51:31.654689Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519899715751045266:2438][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519899715751044510:2054], cookie# 1 2025-06-25T14:51:31.654701Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519899715751045267:2438][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519899715751044513:2057], cookie# 1 2025-06-25T14:51:31.654751Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519899715751045261:2438][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519899715751045262:2438], cookie# 1 2025-06-25T14:51:31.654781Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519899715751045261:2438][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2025-06-25T14:51:31.654796Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519899715751045261:2438][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519899715751045263:2438], cookie# 1 2025-06-25T14:51:31.654806Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519899715751045261:2438][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 2, failures# 0 2025-06-25T14:51:31.654822Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519899715751045261:2438][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519899715751045264:2438], cookie# 1 2025-06-25T14:51:31.654865Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:984: [main][1:7519899715751045261:2438][/dc-1] Sync is done in the ring group: cookie# 1, ring group# 0, size# 3, half# 1, successes# 3, failures# 0, partial# 0 2025-06-25T14:51:31.700991Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [1:7519899715751044900:2169], notify# NKikimr::TSchemeBoardEvents::TEvNotifyUpdate { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DescribeSchemeResult: Status: StatusSuccess Path: "/dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 Data ... 2025-06-25T14:51:39.965887Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [2:7519899751623007293:2348], recipient# [2:7519899751623007290:2492], result# { ErrorCount: 2 DatabaseName: /dc-1/USER_1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo },{ Path: dc-1/USER_1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:51:40.067236Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7519899724070774042:2234], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:51:40.067357Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7519899724070774042:2234], cacheItem# { Subscriber: { Subscriber: [3:7519899736955676059:2300] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:51:40.067433Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7519899758430522034:5481], recipient# [3:7519899758430522033:4586], result# { ErrorCount: 1 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:51:40.075359Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7519899724070774042:2234], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:51:40.075466Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7519899724070774042:2234], cacheItem# { Subscriber: { Subscriber: [3:7519899736955676085:2306] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_0/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:51:40.075589Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7519899758430522037:5482], recipient# [3:7519899758430522035:4587], result# { ErrorCount: 1 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:51:40.076361Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7519899724070774042:2234], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:51:40.076439Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7519899724070774042:2234], cacheItem# { Subscriber: { Subscriber: [3:7519899736955676059:2300] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:51:40.076501Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7519899758430522038:5483], recipient# [3:7519899758430522036:4588], result# { ErrorCount: 1 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:51:40.815446Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [2:7519899738738105191:2242], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:51:40.815538Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [2:7519899738738105191:2242], cacheItem# { Subscriber: { Subscriber: [2:7519899751623007184:2304] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:51:40.815606Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [2:7519899755917974591:2349], recipient# [2:7519899755917974590:2494], result# { ErrorCount: 1 DatabaseName: /dc-1/USER_1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:51:40.834773Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [2:7519899738738105191:2242], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:51:40.834900Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [2:7519899738738105191:2242], cacheItem# { Subscriber: { Subscriber: [2:7519899751623007184:2304] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:51:40.834982Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [2:7519899755917974593:2350], recipient# [2:7519899755917974592:2495], result# { ErrorCount: 1 DatabaseName: /dc-1/USER_1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:51:40.841397Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [2:7519899738738105191:2242], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:51:40.841535Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [2:7519899738738105191:2242], cacheItem# { Subscriber: { Subscriber: [2:7519899751623007201:2312] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:51:40.841648Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [2:7519899755917974595:2351], recipient# [2:7519899755917974594:2496], result# { ErrorCount: 1 DatabaseName: /dc-1/USER_1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } >> TAsyncIndexTests::DropTableWithInflightChanges[PipeResets] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/sysview/unittest >> KqpSystemView::QueryStatsSimple [GOOD] Test command err: Trying to start YDB, gRPC: 12969, MsgBus: 61882 2025-06-25T14:50:38.353202Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899489382282827:2239];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:50:38.353256Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:50:38.472786Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519899491822834226:2156];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:50:38.472850Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:50:38.540648Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519899490177290054:2179];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001a8c/r3tmp/tmpxxVjIa/pdisk_1.dat 2025-06-25T14:50:39.183919Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:50:39.344504Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:50:39.401104Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:50:39.483043Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:50:39.518770Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:50:39.520824Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:50:39.715366Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:50:39.715462Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:50:39.715602Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:50:39.715632Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:50:39.715907Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:50:39.715947Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:50:39.728735Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:50:39.729232Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T14:50:39.729251Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 3 Cookie 3 2025-06-25T14:50:39.733727Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:50:39.756860Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:50:39.792246Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 12969, node 1 2025-06-25T14:50:40.361016Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:50:40.361039Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:50:40.361046Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:50:40.361139Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:61882 TClient is connected to server localhost:61882 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:50:42.271560Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976720657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:50:42.514592Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:42.891553Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:43.357683Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519899489382282827:2239];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:50:43.357734Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:50:43.480966Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519899490177290054:2179];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:50:43.481030Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:50:43.485445Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519899491822834226:2156];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:50:43.485603Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:50:43.534663Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:43.734861Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:50:46.240681Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899523742023012:2331], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:46.240796Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:46.716456Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:46.836000Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:46.964683Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:47.124101Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:47.276619Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB fi ... 06-25T14:51:32.174944Z node 16 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(18, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:51:32.177399Z node 16 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 17 Cookie 17 2025-06-25T14:51:32.178189Z node 16 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(17, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 17006, node 16 2025-06-25T14:51:32.253683Z node 16 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:51:32.253719Z node 16 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:51:32.253730Z node 16 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:51:32.253927Z node 16 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24339 2025-06-25T14:51:32.821620Z node 16 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:51:32.826218Z node 18 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:51:32.828436Z node 17 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:24339 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:51:33.017191Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:51:33.046786Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:51:33.155119Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:51:33.288576Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:51:33.355695Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:51:36.800851Z node 16 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[16:7519899716851717001:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:51:36.800923Z node 16 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:51:36.816414Z node 17 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[17:7519899718167456331:2081];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:51:36.816499Z node 17 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:51:36.819764Z node 18 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[18:7519899719548155594:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:51:36.819858Z node 18 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:51:36.916127Z node 16 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [16:7519899738326555444:2326], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:36.916247Z node 16 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:36.951452Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:51:37.087189Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:51:37.223043Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:51:37.309370Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:51:37.493745Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:51:37.577131Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:51:37.694923Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:51:37.791506Z node 16 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [16:7519899742621523614:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:37.791652Z node 16 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:37.791758Z node 16 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [16:7519899742621523619:2371], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:37.795946Z node 16 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:51:37.822791Z node 16 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [16:7519899742621523621:2372], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:51:37.907008Z node 16 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [16:7519899742621523706:4254] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:51:40.091642Z node 16 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863100082, txId: 281474976710674] shutting down ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest >> TAsyncIndexTests::DropTableWithInflightChanges[PipeResets] [GOOD] Test command err: =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:115:2144] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:115:2144] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:130:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:133:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [1:136:2157] sender: [1:138:2058] recipient: [1:115:2144] 2025-06-25T14:51:18.840420Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:51:18.840485Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:18.840523Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:51:18.840565Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:51:18.840596Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:51:18.840615Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:51:18.840649Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:18.840697Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:51:18.841211Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:51:18.841455Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:51:18.899306Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7732: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-25T14:51:18.899353Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:51:18.899987Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:178:2058] recipient: [1:15:2062] 2025-06-25T14:51:18.910352Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:51:18.914085Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:51:18.914258Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:51:18.920803Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:51:18.921001Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:51:18.921520Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:18.921745Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:51:18.923988Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:18.924148Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:51:18.924865Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:51:18.924912Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:18.924999Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:51:18.925028Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:51:18.925056Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:51:18.925156Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:217:2058] recipient: [1:215:2214] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:217:2058] recipient: [1:215:2214] Leader for TabletID 72057594037968897 is [1:221:2218] sender: [1:222:2058] recipient: [1:215:2214] 2025-06-25T14:51:18.931287Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:242:2058] recipient: [1:15:2062] 2025-06-25T14:51:19.025628Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:51:19.025800Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:19.025948Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:51:19.025984Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:51:19.026165Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:51:19.026232Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:51:19.028046Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:19.028218Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:51:19.028413Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:19.028471Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:51:19.028512Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:51:19.028551Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:51:19.030084Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:19.030126Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:51:19.030174Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:51:19.031741Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:19.031788Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:19.031834Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:19.031924Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:51:19.035076Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:51:19.036751Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:51:19.036898Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:136:2157] sender: [1:257:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:51:19.037789Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:19.037898Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 Media ... 594046678944 Generation: 2 LocalPathId: 5 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-25T14:51:42.883249Z node 26 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 1003 2025-06-25T14:51:42.883280Z node 26 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1003, pathId: [OwnerId: 72057594046678944, LocalPathId: 5], version: 18446744073709551615 2025-06-25T14:51:42.883312Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 3 2025-06-25T14:51:42.883378Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 1003, ready parts: 2/3, is published: true 2025-06-25T14:51:42.884474Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1003:0, at schemeshard: 72057594046678944 2025-06-25T14:51:42.884525Z node 26 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_table.cpp:414: TDropTable TProposedDeletePart operationId: 1003:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:51:42.884744Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove table for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-06-25T14:51:42.884851Z node 26 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1003:0 progress is 3/3 2025-06-25T14:51:42.884882Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 1003 ready parts: 3/3 2025-06-25T14:51:42.884920Z node 26 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1003:0 progress is 3/3 2025-06-25T14:51:42.884956Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 1003 ready parts: 3/3 2025-06-25T14:51:42.884989Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 1003, ready parts: 3/3, is published: true 2025-06-25T14:51:42.885022Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 1003 ready parts: 3/3 2025-06-25T14:51:42.885060Z node 26 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1003:0 2025-06-25T14:51:42.885093Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 1003:0 2025-06-25T14:51:42.885186Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-25T14:51:42.885228Z node 26 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1003:1 2025-06-25T14:51:42.885253Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 1003:1 2025-06-25T14:51:42.885287Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 2 2025-06-25T14:51:42.885314Z node 26 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1003:2 2025-06-25T14:51:42.885338Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 1003:2 2025-06-25T14:51:42.885385Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 2 2025-06-25T14:51:42.886005Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 2025-06-25T14:51:42.888171Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 2025-06-25T14:51:42.890268Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 2025-06-25T14:51:42.890326Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 2025-06-25T14:51:42.893080Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 2025-06-25T14:51:42.893181Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 2025-06-25T14:51:42.895149Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5633: Handle TEvStateChanged, at schemeshard: 72057594046678944, message: Source { RawX1: 351 RawX2: 111669152029 } TabletId: 72075186233409546 State: 4 2025-06-25T14:51:42.895227Z node 26 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186233409546, state: Offline, at schemeshard: 72057594046678944 2025-06-25T14:51:42.896914Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:2 hive 72057594037968897 at ss 72057594046678944 2025-06-25T14:51:42.897346Z node 26 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 2 TxId_Deprecated: 2 TabletID: 72075186233409546 2025-06-25T14:51:42.897558Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046678944 ShardLocalIdx: 2, at schemeshard: 72057594046678944 2025-06-25T14:51:42.897797Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 1 Forgetting tablet 72075186233409546 2025-06-25T14:51:42.900517Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-25T14:51:42.900567Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 5], at schemeshard: 72057594046678944 2025-06-25T14:51:42.900637Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 1 2025-06-25T14:51:42.900682Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 4], at schemeshard: 72057594046678944 2025-06-25T14:51:42.900721Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-25T14:51:42.903619Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-06-25T14:51:42.903681Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:2 tabletId 72075186233409546 2025-06-25T14:51:42.904053Z node 26 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 2 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 1003, wait until txId: 1003 TestWaitNotification wait txId: 1003 2025-06-25T14:51:42.904324Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 1003: send EvNotifyTxCompletion 2025-06-25T14:51:42.904367Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 1003 2025-06-25T14:51:42.905099Z node 26 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1003, at schemeshard: 72057594046678944 2025-06-25T14:51:42.905184Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 1003: got EvNotifyTxCompletionResult 2025-06-25T14:51:42.905219Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 1003: satisfy waiter [26:629:2553] 2025-06-25T14:51:42.910795Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5633: Handle TEvStateChanged, at schemeshard: 72057594046678944, message: Source { RawX1: 354 RawX2: 111669152031 } TabletId: 72075186233409547 State: 4 2025-06-25T14:51:42.910882Z node 26 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186233409547, state: Offline, at schemeshard: 72057594046678944 2025-06-25T14:51:42.912568Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:1 hive 72057594037968897 at ss 72057594046678944 2025-06-25T14:51:42.913012Z node 26 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 1 TxId_Deprecated: 1 TabletID: 72075186233409547 Forgetting tablet 72075186233409547 2025-06-25T14:51:42.913243Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 1 ShardOwnerId: 72057594046678944 ShardLocalIdx: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:42.913493Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-25T14:51:42.914021Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-25T14:51:42.914066Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-06-25T14:51:42.914128Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-25T14:51:42.919093Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-06-25T14:51:42.919160Z node 26 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:1 tabletId 72075186233409547 2025-06-25T14:51:42.919737Z node 26 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 1003 wait until 72075186233409546 is deleted wait until 72075186233409547 is deleted 2025-06-25T14:51:42.920131Z node 26 :HIVE INFO: tablet_helpers.cpp:1476: [72057594037968897] TEvSubscribeToTabletDeletion, 72075186233409546 2025-06-25T14:51:42.920201Z node 26 :HIVE INFO: tablet_helpers.cpp:1476: [72057594037968897] TEvSubscribeToTabletDeletion, 72075186233409547 Deleted tabletId 72075186233409546 Deleted tabletId 72075186233409547 >> KqpRbo::Bench_JoinFilter >> TPersQueueTest::ReadFromSeveralPartitions [GOOD] >> TPersQueueTest::ReadFromSeveralPartitionsMigrated |87.1%| [TA] $(B)/ydb/core/kqp/ut/sysview/test-results/unittest/{meta.json ... results_accumulator.log} |87.1%| [TA] {RESULT} $(B)/ydb/core/kqp/ut/sysview/test-results/unittest/{meta.json ... results_accumulator.log} >> TPersQueueTest::FetchRequest [GOOD] >> TPersQueueTest::Init >> KqpRbo::Bench_10Joins >> TPersQueueTest::SetupLockSession2 [GOOD] >> TPersQueueTest::SetupLockSession >> KqpRbo::Select [GOOD] >> TopicService::OneConsumer_TheRangesOverlap [GOOD] >> KqpRbo::JoinFilter [GOOD] >> KqpRbo::Bench_Select [GOOD] >> KqpRbo::CrossFilter [GOOD] >> KqpRbo::LeftJoinToKqpOpJoin [GOOD] >> KqpRbo::Filter [GOOD] >> KqpRbo::Bench_Filter [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/rbo/unittest >> KqpRbo::Select [GOOD] Test command err: Trying to start YDB, gRPC: 23056, MsgBus: 63926 2025-06-25T14:51:42.156685Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899763690460451:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:51:42.156735Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d9f/r3tmp/tmpzAtv2W/pdisk_1.dat 2025-06-25T14:51:42.538418Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:51:42.576871Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:51:42.576995Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:51:42.578929Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 23056, node 1 2025-06-25T14:51:42.727967Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:51:42.727999Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:51:42.728014Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:51:42.728152Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:63926 2025-06-25T14:51:43.174570Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:63926 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:51:43.402001Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:51:44.691488Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899772280395650:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:44.691488Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899772280395658:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:44.691596Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:44.698597Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:51:44.707745Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899772280395664:2295], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:51:44.799544Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899772280395715:2334] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } >> KqpRbo::Bench_CrossFilter [GOOD] >> TopicService::DifferentConsumers_TheRangesOverlap ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/rbo/unittest >> KqpRbo::JoinFilter [GOOD] Test command err: Trying to start YDB, gRPC: 12716, MsgBus: 23886 2025-06-25T14:51:42.156762Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899763014022105:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:51:42.156818Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d53/r3tmp/tmpmf6W5i/pdisk_1.dat 2025-06-25T14:51:42.537671Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:51:42.584501Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:51:42.584601Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:51:42.586315Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 12716, node 1 2025-06-25T14:51:42.728873Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:51:42.728905Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:51:42.728913Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:51:42.729012Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23886 2025-06-25T14:51:43.178172Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:23886 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:51:43.384768Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:51:44.770632Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899771603957304:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:44.770711Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:45.091330Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:51:45.209476Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:51:45.254913Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899775898924778:2310], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:45.255019Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:45.255111Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899775898924783:2313], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:45.259414Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710660:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:51:45.268599Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899775898924785:2314], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710660 completed, doublechecking } 2025-06-25T14:51:45.341424Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899775898924836:2441] txid# 281474976710661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/rbo/unittest >> KqpRbo::Bench_Select [GOOD] Test command err: Trying to start YDB, gRPC: 10573, MsgBus: 13472 2025-06-25T14:51:42.344950Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899764893626976:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:51:42.345606Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d0a/r3tmp/tmprfArF4/pdisk_1.dat 2025-06-25T14:51:42.720604Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:51:42.720956Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519899764893626951:2080] 1750863102343511 != 1750863102343514 TServer::EnableGrpc on GrpcPort 10573, node 1 2025-06-25T14:51:42.794897Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:51:42.794925Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:51:42.794935Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:51:42.795050Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:51:42.817607Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:51:42.817714Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:51:42.821330Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:13472 TClient is connected to server localhost:13472 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-25T14:51:43.350106Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:51:43.400427Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:51:45.112584Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899777778529489:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:45.112586Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899777778529481:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:45.112670Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:45.115749Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:51:45.124069Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899777778529495:2295], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:51:45.205692Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899777778529546:2334] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-CreateUser-1 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/rbo/unittest >> KqpRbo::Filter [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-ModifyUser-31 Test command err: Trying to start YDB, gRPC: 5278, MsgBus: 16237 2025-06-25T14:51:42.788396Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899764001962762:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:51:42.788465Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000899/r3tmp/tmppoW0FR/pdisk_1.dat 2025-06-25T14:51:43.047275Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519899764001962733:2080] 1750863102785871 != 1750863102785874 2025-06-25T14:51:43.048471Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 5278, node 1 2025-06-25T14:51:43.127197Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:51:43.127268Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:51:43.128154Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:51:43.136873Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:51:43.136892Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:51:43.136899Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:51:43.137140Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16237 TClient is connected to server localhost:16237 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:51:43.601457Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:51:43.800081Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:51:45.204106Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899776886865260:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:45.204197Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:45.417500Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:51:45.530645Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899776886865364:2302], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:45.530729Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:45.531051Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899776886865369:2305], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:45.535879Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:51:45.545456Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899776886865371:2306], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-25T14:51:45.642034Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899776886865422:2390] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/rbo/unittest >> KqpRbo::CrossFilter [GOOD] Test command err: Trying to start YDB, gRPC: 10517, MsgBus: 16074 2025-06-25T14:51:42.220177Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899765687923553:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:51:42.232042Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d8a/r3tmp/tmpTasR1c/pdisk_1.dat 2025-06-25T14:51:42.541554Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:51:42.541662Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:51:42.546551Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:51:42.549884Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519899765687923520:2080] 1750863102196901 != 1750863102196904 2025-06-25T14:51:42.561642Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 10517, node 1 2025-06-25T14:51:42.728054Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:51:42.728075Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:51:42.728081Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:51:42.728195Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16074 2025-06-25T14:51:43.221503Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:16074 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:51:43.399967Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:51:45.176526Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899778572826046:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:45.176627Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:45.443651Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:51:45.548738Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:51:45.598598Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899778572826222:2310], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:45.598709Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:45.598755Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899778572826227:2313], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:45.602123Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715660:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:51:45.611026Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899778572826229:2314], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715660 completed, doublechecking } 2025-06-25T14:51:45.699600Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899778572826280:2440] txid# 281474976715661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/rbo/unittest >> KqpRbo::LeftJoinToKqpOpJoin [GOOD] Test command err: Trying to start YDB, gRPC: 62386, MsgBus: 17101 2025-06-25T14:51:42.178693Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899764517812324:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:51:42.180653Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000db7/r3tmp/tmpWwF5TX/pdisk_1.dat 2025-06-25T14:51:42.561590Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:51:42.561684Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:51:42.594084Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:51:42.610612Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 62386, node 1 2025-06-25T14:51:42.728912Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:51:42.728937Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:51:42.728944Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:51:42.729094Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17101 2025-06-25T14:51:43.188485Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:17101 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:51:43.421656Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:51:44.817733Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899773107747506:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:44.817799Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:45.091302Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:51:45.213922Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:51:45.239257Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:51:45.264108Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:51:45.305958Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899777402715120:2324], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:45.306086Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:45.306120Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899777402715125:2327], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:45.309589Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:51:45.317704Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899777402715127:2328], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710662 completed, doublechecking } 2025-06-25T14:51:45.414443Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899777402715178:2533] txid# 281474976710663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-ModifyUser-37 >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-ModifyUser-25 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/rbo/unittest >> KqpRbo::Bench_Filter [GOOD] Test command err: Trying to start YDB, gRPC: 9227, MsgBus: 12722 2025-06-25T14:51:42.162169Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899767133165086:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:51:42.163107Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000ddf/r3tmp/tmp0gTLJ4/pdisk_1.dat 2025-06-25T14:51:42.518243Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519899767133165066:2080] 1750863102159460 != 1750863102159463 2025-06-25T14:51:42.527493Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 9227, node 1 2025-06-25T14:51:42.607236Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:51:42.607303Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:51:42.609147Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:51:42.727943Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:51:42.727965Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:51:42.727970Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:51:42.728090Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:12722 2025-06-25T14:51:43.182179Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:12722 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:51:43.398153Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:51:44.633547Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899775723100299:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:44.633630Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:45.091233Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:51:45.207488Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899780018067703:2303], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:45.207546Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:45.207560Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899780018067708:2306], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:45.210759Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:51:45.220117Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899780018067710:2307], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-25T14:51:45.321213Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899780018067761:2395] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/rbo/unittest >> KqpRbo::Bench_CrossFilter [GOOD] Test command err: Trying to start YDB, gRPC: 29114, MsgBus: 8986 2025-06-25T14:51:42.188138Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899765405835454:2146];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:51:42.188266Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d71/r3tmp/tmp7iGYaq/pdisk_1.dat 2025-06-25T14:51:42.538229Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:51:42.583384Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:51:42.583489Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:51:42.585858Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 29114, node 1 2025-06-25T14:51:42.729859Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:51:42.729884Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:51:42.729896Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:51:42.730019Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8986 2025-06-25T14:51:43.195073Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:8986 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:51:43.384629Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:51:44.841257Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899773995770564:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:44.841369Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:45.091207Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:51:45.201003Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:51:45.242091Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899778290738038:2310], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:45.242156Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:45.242172Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899778290738043:2313], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:45.245711Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710660:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:51:45.255411Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899778290738045:2314], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710660 completed, doublechecking } 2025-06-25T14:51:45.352026Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899778290738096:2441] txid# 281474976710661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } >> KqpRbo::Bench_JoinFilter [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/rbo/unittest >> KqpRbo::Bench_JoinFilter [GOOD] Test command err: Trying to start YDB, gRPC: 26142, MsgBus: 63914 2025-06-25T14:51:44.153463Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899774402274514:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:51:44.153662Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00085f/r3tmp/tmp2aYLqj/pdisk_1.dat 2025-06-25T14:51:44.400467Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519899774402274492:2080] 1750863104152879 != 1750863104152882 2025-06-25T14:51:44.408686Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 26142, node 1 2025-06-25T14:51:44.473969Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:51:44.473993Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:51:44.474003Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:51:44.474127Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:51:44.522104Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:51:44.522224Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:51:44.524021Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:63914 TClient is connected to server localhost:63914 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:51:44.896842Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:51:45.160774Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:51:46.480332Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899782992209722:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:46.480477Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:46.685937Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:51:46.791112Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:51:46.855138Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899782992209897:2309], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:46.855254Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:46.855543Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899782992209902:2312], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:46.860496Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715660:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:51:46.873295Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899782992209904:2313], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715660 completed, doublechecking } 2025-06-25T14:51:46.944785Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899782992209955:2436] txid# 281474976715661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } >> TSchemeShardTTLTestsWithReboots::AlterTable [GOOD] >> DemoTx::Scenario_2 [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTestsWithReboots::AlterTable [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:115:2144] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:115:2144] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:130:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:133:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [1:136:2157] sender: [1:138:2058] recipient: [1:115:2144] 2025-06-25T14:51:03.030985Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:51:03.031109Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:03.031169Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:51:03.031222Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:51:03.032506Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:51:03.032569Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:51:03.032648Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:03.032729Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:51:03.033524Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:51:03.035220Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:51:03.128234Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7732: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-25T14:51:03.128296Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:51:03.129163Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:178:2058] recipient: [1:15:2062] 2025-06-25T14:51:03.147352Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:51:03.151077Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:51:03.151244Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:51:03.160286Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:51:03.160586Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:51:03.161253Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:03.161587Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:51:03.164201Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:03.164405Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:51:03.165558Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:51:03.165615Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:03.165713Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:51:03.165753Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:51:03.165796Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:51:03.165920Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:217:2058] recipient: [1:215:2214] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:217:2058] recipient: [1:215:2214] Leader for TabletID 72057594037968897 is [1:221:2218] sender: [1:222:2058] recipient: [1:215:2214] 2025-06-25T14:51:03.172648Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:242:2058] recipient: [1:15:2062] 2025-06-25T14:51:03.334526Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:51:03.334819Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:03.335028Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:51:03.335075Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:51:03.335298Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:51:03.335495Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:51:03.341226Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:03.341414Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:51:03.341618Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:03.341681Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:51:03.341721Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:51:03.341777Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:51:03.343976Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:03.344037Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:51:03.344073Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:51:03.345978Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:03.346012Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:03.346064Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:03.346101Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:51:03.349064Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:51:03.350681Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:51:03.350795Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:136:2157] sender: [1:257:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:51:03.351445Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:03.351534Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 42949694 ... 51:49.112862Z node 51 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 4 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-25T14:51:49.112956Z node 51 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 4 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-25T14:51:49.112993Z node 51 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 1003 2025-06-25T14:51:49.113029Z node 51 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1003, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 4 2025-06-25T14:51:49.113067Z node 51 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-06-25T14:51:49.113145Z node 51 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 1003, ready parts: 0/1, is published: true 2025-06-25T14:51:49.113814Z node 51 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6369: Handle TEvProposeTransactionResult, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 1003 Step: 5000004 OrderId: 1003 ExecLatency: 0 ProposeLatency: 2 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 1035 } } CommitVersion { Step: 5000004 TxId: 1003 } 2025-06-25T14:51:49.113860Z node 51 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1791: TOperation FindRelatedPartByTabletId, TxId: 1003, tablet: 72075186233409546, partId: 0 2025-06-25T14:51:49.113993Z node 51 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:632: TTxOperationReply execute, operationId: 1003:0, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 1003 Step: 5000004 OrderId: 1003 ExecLatency: 0 ProposeLatency: 2 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 1035 } } CommitVersion { Step: 5000004 TxId: 1003 } 2025-06-25T14:51:49.114095Z node 51 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_part.cpp:109: HandleReply TEvDataShard::TEvProposeTransactionResult Ignore message: tablet# 72057594046678944, ev# TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 1003 Step: 5000004 OrderId: 1003 ExecLatency: 0 ProposeLatency: 2 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 1035 } } CommitVersion { Step: 5000004 TxId: 1003 } 2025-06-25T14:51:49.114880Z node 51 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5596: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 337 RawX2: 219043334418 } Origin: 72075186233409546 State: 2 TxId: 1003 Step: 0 Generation: 2 2025-06-25T14:51:49.114923Z node 51 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1791: TOperation FindRelatedPartByTabletId, TxId: 1003, tablet: 72075186233409546, partId: 0 2025-06-25T14:51:49.115030Z node 51 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:632: TTxOperationReply execute, operationId: 1003:0, at schemeshard: 72057594046678944, message: Source { RawX1: 337 RawX2: 219043334418 } Origin: 72075186233409546 State: 2 TxId: 1003 Step: 0 Generation: 2 2025-06-25T14:51:49.115078Z node 51 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1047: NTableState::TProposedWaitParts operationId# 1003:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 2025-06-25T14:51:49.115161Z node 51 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1051: NTableState::TProposedWaitParts operationId# 1003:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 337 RawX2: 219043334418 } Origin: 72075186233409546 State: 2 TxId: 1003 Step: 0 Generation: 2 2025-06-25T14:51:49.115220Z node 51 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:670: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 1003:0, shardIdx: 72057594046678944:1, shard: 72075186233409546, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:49.115258Z node 51 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:707: all shard schema changes has been received, operationId: 1003:0, at schemeshard: 72057594046678944 2025-06-25T14:51:49.115297Z node 51 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:719: send schema changes ack message, operation: 1003:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-06-25T14:51:49.115344Z node 51 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1003:0 129 -> 240 2025-06-25T14:51:49.118710Z node 51 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 2025-06-25T14:51:49.118862Z node 51 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 1003:0, at schemeshard: 72057594046678944 2025-06-25T14:51:49.118960Z node 51 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 1003:0, at schemeshard: 72057594046678944 2025-06-25T14:51:49.119221Z node 51 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1003:0, at schemeshard: 72057594046678944 2025-06-25T14:51:49.119263Z node 51 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 1003:0 ProgressState 2025-06-25T14:51:49.119357Z node 51 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1003:0 progress is 1/1 2025-06-25T14:51:49.119388Z node 51 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 1003 ready parts: 1/1 2025-06-25T14:51:49.119426Z node 51 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1003:0 progress is 1/1 2025-06-25T14:51:49.119453Z node 51 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 1003 ready parts: 1/1 2025-06-25T14:51:49.119486Z node 51 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 1003, ready parts: 1/1, is published: true 2025-06-25T14:51:49.119524Z node 51 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 1003 ready parts: 1/1 2025-06-25T14:51:49.119564Z node 51 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1003:0 2025-06-25T14:51:49.119597Z node 51 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 1003:0 2025-06-25T14:51:49.119717Z node 51 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 TestModificationResult got TxId: 1003, wait until txId: 1003 TestWaitNotification wait txId: 1003 2025-06-25T14:51:49.121979Z node 51 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 1003: send EvNotifyTxCompletion 2025-06-25T14:51:49.122022Z node 51 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 1003 2025-06-25T14:51:49.122342Z node 51 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1003, at schemeshard: 72057594046678944 2025-06-25T14:51:49.122419Z node 51 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 1003: got EvNotifyTxCompletionResult 2025-06-25T14:51:49.122469Z node 51 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 1003: satisfy waiter [51:458:2429] TestWaitNotification: OK eventTxId 1003 2025-06-25T14:51:49.122897Z node 51 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/TTLEnabledTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:51:49.123095Z node 51 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/TTLEnabledTable" took 225us result status StatusSuccess 2025-06-25T14:51:49.123548Z node 51 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/TTLEnabledTable" PathDescription { Self { Name: "TTLEnabledTable" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1002 CreateStep: 5000003 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "TTLEnabledTable" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "modified_at" Type: "Timestamp" TypeId: 50 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 2 TTLSettings { Enabled { ColumnName: "modified_at" ExpireAfterSeconds: 3600 Tiers { ApplyAfterSeconds: 3600 Delete { } } } } IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 1 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TStorageTenantTest::CreateDummyTabletsInDifferentDomains [GOOD] >> TPersQueueTest::TopicServiceCommitOffset [GOOD] >> TPersQueueTest::TopicServiceCommitOffsetBadOffsets >> DemoTx::Scenario_3 >> TPersQueueCommonTest::Auth_MultipleUpdateTokenRequestIterationsWithValidToken_GotUpdateTokenResponseForEachRequest [GOOD] >> TPersQueueCommonTest::Auth_WriteSessionWithValidTokenAndACEAndThenRemoveACEAndSendWriteRequest_SessionClosedWithUnauthorizedErrorAfterSuccessfullWriteResponse >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-CreateUser-1 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-CreateUser-10 >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-ModifyUser-25 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-ModifyUser-26 >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-ModifyUser-31 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-ModifyUser-32 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_storage_tenant/unittest >> TStorageTenantTest::CreateDummyTabletsInDifferentDomains [GOOD] Test command err: 2025-06-25T14:51:38.951521Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899749445406000:2186];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:51:38.951753Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:51:39.018823Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519899752153428061:2158];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:51:39.167836Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000a7f/r3tmp/tmp2z8kJw/pdisk_1.dat 2025-06-25T14:51:39.355110Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:51:39.370449Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:51:39.370544Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:51:39.390202Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:51:39.390303Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:51:39.391240Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T14:51:39.392113Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:51:39.392608Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T14:51:39.393546Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:2749 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-25T14:51:39.561565Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7519899749445406070:2144] Handle TEvNavigate describe path dc-1 2025-06-25T14:51:39.583946Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7519899753740373769:2423] HANDLE EvNavigateScheme dc-1 2025-06-25T14:51:39.584640Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7519899753740373391:2157], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:51:39.584689Z node 1 :TX_PROXY_SCHEME_CACHE TRACE: cache.cpp:2321: Create subscriber: self# [1:7519899753740373391:2157], path# /dc-1, domainOwnerId# 72057594046644480 2025-06-25T14:51:39.585326Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1003: [main][1:7519899753740373770:2424][/dc-1] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-25T14:51:39.587560Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519899749445405744:2052] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7519899753740373775:2424] 2025-06-25T14:51:39.587575Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519899749445405747:2055] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7519899753740373776:2424] 2025-06-25T14:51:39.587642Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519899749445405747:2055] Subscribe: subscriber# [1:7519899753740373776:2424], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-25T14:51:39.587772Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519899749445405744:2052] Subscribe: subscriber# [1:7519899753740373775:2424], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-25T14:51:39.587788Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519899749445405750:2058] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7519899753740373777:2424] 2025-06-25T14:51:39.587820Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519899749445405750:2058] Subscribe: subscriber# [1:7519899753740373777:2424], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-25T14:51:39.588081Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519899753740373776:2424][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519899749445405747:2055] 2025-06-25T14:51:39.588137Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519899753740373775:2424][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519899749445405744:2052] 2025-06-25T14:51:39.588164Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519899753740373777:2424][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519899749445405750:2058] 2025-06-25T14:51:39.588232Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519899753740373770:2424][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519899753740373773:2424] 2025-06-25T14:51:39.588276Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519899753740373770:2424][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519899753740373772:2424] 2025-06-25T14:51:39.588368Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:852: [main][1:7519899753740373770:2424][/dc-1] Set up state: owner# [1:7519899753740373391:2157], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-25T14:51:39.588512Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519899753740373770:2424][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519899753740373774:2424] 2025-06-25T14:51:39.588564Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519899749445405747:2055] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7519899753740373776:2424] 2025-06-25T14:51:39.588584Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519899749445405744:2052] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7519899753740373775:2424] 2025-06-25T14:51:39.588597Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519899749445405750:2058] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7519899753740373777:2424] 2025-06-25T14:51:39.588609Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:870: [main][1:7519899753740373770:2424][/dc-1] Path was already updated: owner# [1:7519899753740373391:2157], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-25T14:51:39.588659Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519899753740373775:2424][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519899753740373772:2424], cookie# 1 2025-06-25T14:51:39.588687Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519899753740373776:2424][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519899753740373773:2424], cookie# 1 2025-06-25T14:51:39.588730Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519899753740373777:2424][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519899753740373774:2424], cookie# 1 2025-06-25T14:51:39.588848Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519899749445405744:2052] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519899753740373775:2424], cookie# 1 2025-06-25T14:51:39.588887Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519899749445405747:2055] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519899753740373776:2424], cookie# 1 2025-06-25T14:51:39.588928Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519899749445405750:2058] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519899753740373777:2424], cookie# 1 2025-06-25T14:51:39.589075Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519899753740373775:2424][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519899749445405744:2052], cookie# 1 2025-06-25T14:51:39.589104Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519899753740373776:2424][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519899749445405747:2055], cookie# 1 2025-06-25T14:51:39.589121Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519899753740373777:2424][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519899749445405750:2058], cookie# 1 2025-06-25T14:51:39.589157Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519899753740373770:2424][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519899753740373772:2424], cookie# 1 2025-06-25T14:51:39.589186Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519899753740373770:2424][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2025-06-25T14:51:39.589211Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519899753740373770:2424][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519899753740373773:2424], cookie# 1 2025-06-25T14:51:39.589223Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519899753740373770:2424][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 2, failures# 0 2025-06-25T14:51:39.589266Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519899753740373770:2424][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519899753740373774:2424], cookie# 1 2025-06-25T14:51:39.589291Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:984: [main][1:7519899753740373770:2424][/dc-1] Sync is done in the ring group: cookie# 1, ring group# 0, size# 3, half# 1, successes# 3, failures# 0, partial# 0 2025-06-25T14:51:39.634277Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [1:7519899753740 ... er: { Subscriber: [3:7519899780803684388:2217] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:51:49.121828Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7519899793688586350:2237], recipient# [3:7519899793688586349:2490], result# { ErrorCount: 1 DatabaseName: /dc-1/USER_1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:51:49.126240Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7519899780803684334:2186], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:51:49.126380Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7519899780803684334:2186], cacheItem# { Subscriber: { Subscriber: [3:7519899780803684388:2217] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:51:49.126466Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7519899793688586352:2238], recipient# [3:7519899793688586351:2491], result# { ErrorCount: 1 DatabaseName: /dc-1/USER_1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:51:49.187686Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [2:7519899752153428164:2109], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:51:49.187807Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [2:7519899752153428164:2109], cacheItem# { Subscriber: { Subscriber: [2:7519899756448395481:2114] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:51:49.187968Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [2:7519899795103101233:2140], recipient# [2:7519899795103101232:2286], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:51:49.258864Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7519899780803684334:2186], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:51:49.259006Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7519899780803684334:2186], cacheItem# { Subscriber: { Subscriber: [3:7519899780803684399:2220] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:51:49.259126Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7519899793688586355:2239], recipient# [3:7519899793688586354:2492], result# { ErrorCount: 1 DatabaseName: /dc-1/USER_1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:51:49.298513Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [2:7519899752153428164:2109], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:51:49.298645Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [2:7519899752153428164:2109], cacheItem# { Subscriber: { Subscriber: [2:7519899765038330098:2118] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:51:49.298731Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [2:7519899795103101235:2141], recipient# [2:7519899795103101234:2287], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:51:49.359200Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7519899780803684334:2186], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo },{ Path: dc-1/USER_1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:51:49.359352Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7519899780803684334:2186], cacheItem# { Subscriber: { Subscriber: [3:7519899789393619033:2231] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:51:49.359403Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7519899780803684334:2186], cacheItem# { Subscriber: { Subscriber: [3:7519899789393619034:2232] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-25T14:51:49.359507Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7519899793688586356:2240], recipient# [3:7519899789393619030:2488], result# { ErrorCount: 2 DatabaseName: /dc-1/USER_1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo },{ Path: dc-1/USER_1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo }] } 2025-06-25T14:51:49.359678Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [3:7519899789393619030:2488], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-ModifyUser-37 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-ModifyUser-38 >> TCdcStreamTests::Basic >> TCdcStreamTests::VirtualTimestamps |87.1%| [TA] $(B)/ydb/core/tx/tx_proxy/ut_storage_tenant/test-results/unittest/{meta.json ... results_accumulator.log} |87.1%| [TA] {RESULT} $(B)/ydb/core/tx/tx_proxy/ut_storage_tenant/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpTx::CommitRequired >> KqpSnapshotIsolation::TConflictWriteOltp [GOOD] >> KqpSnapshotIsolation::TConflictWriteOlap [GOOD] >> KqpSinkMvcc::SnapshotExpiration >> KqpSinkMvcc::OlapReadOnlyTxCommitsOnConcurrentWrite >> KqpSinkLocks::UncommittedRead >> KqpLocks::Invalidate >> KqpSnapshotRead::ReadOnlyTxWithIndexCommitsOnConcurrentWrite-withSink >> KqpTx::InteractiveTx >> TPersQueueTest::CloseActiveWriteSessionOnClusterDisable [GOOD] >> TPersQueueTest::Cache |87.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/tx/unittest >> KqpSnapshotIsolation::TConflictWriteOlap [GOOD] >> KqpSnapshotRead::ReadOnlyTxCommitsOnConcurrentWrite-withSink >> TCdcStreamTests::Basic [GOOD] >> TCdcStreamTests::Attributes >> TCdcStreamTests::VirtualTimestamps [GOOD] >> TCdcStreamTests::ResolvedTimestamps >> TAsyncIndexTests::SplitIndexWithReboots[PipeResets] [GOOD] >> KqpSnapshotRead::TestSnapshotExpiration-withSink >> TCdcStreamTests::Attributes [GOOD] >> TCdcStreamTests::DocApi ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest >> TAsyncIndexTests::SplitIndexWithReboots[PipeResets] [GOOD] Test command err: =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:115:2144] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:115:2144] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:130:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:133:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [1:136:2157] sender: [1:138:2058] recipient: [1:115:2144] 2025-06-25T14:51:16.005360Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:51:16.005456Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:16.005495Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:51:16.005529Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:51:16.005571Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:51:16.005596Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:51:16.005640Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:16.005714Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:51:16.006315Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:51:16.006618Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:51:16.077363Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7732: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-25T14:51:16.077408Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:51:16.078039Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:178:2058] recipient: [1:15:2062] 2025-06-25T14:51:16.088543Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:51:16.092162Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:51:16.092347Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:51:16.098973Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:51:16.099174Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:51:16.099716Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:16.099933Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:51:16.102350Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:16.102525Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:51:16.103433Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:51:16.103496Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:16.103589Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:51:16.103622Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:51:16.103659Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:51:16.103759Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:217:2058] recipient: [1:215:2214] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:217:2058] recipient: [1:215:2214] Leader for TabletID 72057594037968897 is [1:221:2218] sender: [1:222:2058] recipient: [1:215:2214] 2025-06-25T14:51:16.109774Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:242:2058] recipient: [1:15:2062] 2025-06-25T14:51:16.221489Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:51:16.221674Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:16.221841Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:51:16.221877Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:51:16.222100Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:51:16.222165Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:51:16.224163Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:16.224326Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:51:16.224479Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:16.224539Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:51:16.224575Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:51:16.224602Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:51:16.226210Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:16.226257Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:51:16.226295Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:51:16.227788Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:16.227837Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:16.227885Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:16.227926Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:51:16.235953Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:51:16.237613Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:51:16.237758Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:136:2157] sender: [1:257:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:51:16.238651Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:16.238763Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 Media ... ntToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 1 } } TableIndexes { Name: "UserDefinedIndex" LocalPathId: 4 Type: EIndexTypeGlobalAsync State: EIndexStateReady KeyColumnNames: "indexed" SchemaVersion: 1 PathOwnerId: 72057594046678944 DataSize: 0 IndexImplTableDescriptions { PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409547 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:51:54.240970Z node 22 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/UserDefinedIndex/indexImplTable" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-25T14:51:54.241234Z node 22 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/UserDefinedIndex/indexImplTable" took 292us result status StatusSuccess 2025-06-25T14:51:54.242060Z node 22 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table/UserDefinedIndex/indexImplTable" PathDescription { Self { Name: "indexImplTable" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1002 CreateStep: 5000003 ParentPathId: 4 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeAsyncIndexImplTable Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 2 } ChildrenExist: false } Table { Name: "indexImplTable" Columns { Name: "indexed" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "indexed" KeyColumnNames: "key" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "\002\000\004\000\000\0002\000\000\000\000\000\000\200" IsPoint: false IsInclusive: false DatashardId: 72075186233409548 } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409549 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 2 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-CreateUser-10 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-CreateUser-11 >> TCdcStreamTests::ResolvedTimestamps [GOOD] >> TCdcStreamTests::SchemaChanges >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-ModifyUser-38 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-ModifyUser-39 >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-ModifyUser-26 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-ModifyUser-27 >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-ModifyUser-32 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-ModifyUser-33 >> TCdcStreamTests::DocApi [GOOD] >> TCdcStreamTests::DocApiNegative >> KqpRbo::Bench_10Joins [GOOD] >> TopicService::DifferentConsumers_TheRangesOverlap [GOOD] >> TCdcStreamTests::SchemaChanges [GOOD] >> TCdcStreamTests::RetentionPeriod >> TCdcStreamTests::DocApiNegative [GOOD] >> TCdcStreamTests::Negative >> TPersQueueTest::DirectReadNotCached [GOOD] >> TPersQueueTest::DirectReadBudgetOnRestart >> KqpSinkMvcc::ReadOnlyTxCommitsOnConcurrentWrite ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/rbo/unittest >> KqpRbo::Bench_10Joins [GOOD] Test command err: Trying to start YDB, gRPC: 18759, MsgBus: 13142 2025-06-25T14:51:44.907179Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899773051411159:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:51:44.907271Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00082e/r3tmp/tmpRjfTi1/pdisk_1.dat 2025-06-25T14:51:45.207144Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519899773051411139:2080] 1750863104906531 != 1750863104906534 2025-06-25T14:51:45.213267Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 18759, node 1 2025-06-25T14:51:45.252907Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:51:45.252936Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:51:45.252947Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:51:45.253085Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:51:45.289094Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:51:45.289207Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:51:45.290858Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:13142 TClient is connected to server localhost:13142 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:51:45.705766Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:51:45.916627Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Trying to start YDB, gRPC: 8173, MsgBus: 32249 2025-06-25T14:51:47.493150Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519899784744060136:2136];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:51:47.495378Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00082e/r3tmp/tmpV7RReA/pdisk_1.dat 2025-06-25T14:51:47.733913Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:51:47.733994Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:51:47.736796Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:51:47.743393Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:51:47.744472Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519899784744060035:2080] 1750863107483891 != 1750863107483894 TServer::EnableGrpc on GrpcPort 8173, node 2 2025-06-25T14:51:47.791036Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:51:47.791062Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:51:47.791067Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:51:47.791148Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:32249 TClient is connected to server localhost:32249 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:51:48.316944Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:51:48.496625Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:51:49.907270Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519899773051411159:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:51:49.907355Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:51:50.222455Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519899797628962556:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:50.222537Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:50.460888Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:51:50.550350Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:51:50.575785Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:51:50.599858Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:51:50.625666Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:51:50.651732Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:51:50.677890Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:51:50.702877Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:51:50.726363Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:51:50.750568Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:51:50.781397Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519899797628963291:2364], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:50.781448Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:50.781493Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519899797628963296:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:50.784696Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710668:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:51:50.793360Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519899797628963298:2368], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710668 completed, doublechecking } 2025-06-25T14:51:50.863800Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519899797628963349:2804] txid# 281474976710669, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 15], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:51:52.491978Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519899784744060136:2136];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:51:52.492058Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> TopicService::UnknownConsumer |87.1%| [TA] $(B)/ydb/core/kqp/ut/rbo/test-results/unittest/{meta.json ... results_accumulator.log} |87.1%| [TA] {RESULT} $(B)/ydb/core/kqp/ut/rbo/test-results/unittest/{meta.json ... results_accumulator.log} >> TCdcStreamTests::Negative [GOOD] >> TCdcStreamTests::DisableProtoSourceIdInfo >> TCdcStreamTests::RetentionPeriod [GOOD] >> TCdcStreamTests::TopicPartitions >> TCdcStreamTests::DisableProtoSourceIdInfo [GOOD] >> TCdcStreamTests::CreateStream >> KqpSinkTx::SnapshotROInteractive2 >> TPersQueueTest::Init [GOOD] >> TPersQueueTest::EventBatching >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-ModifyUser-39 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-ModifyUser-40 >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-CreateUser-11 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-CreateUser-12 >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-ModifyUser-27 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-ModifyUser-28 >> TAsyncIndexTests::SplitMainWithReboots[PipeResets] [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-ModifyUser-33 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-ModifyUser-34 >> TCdcStreamTests::CreateStream [GOOD] >> TCdcStreamTests::AlterStream >> TPersQueueTest::WriteExistingBigValue [GOOD] >> TPersQueueTest::WriteEmptyData >> TCdcStreamTests::TopicPartitions [GOOD] >> TCdcStreamTests::ReplicationAttribute >> KqpTx::CommitRequired [GOOD] >> KqpTx::CommitPrepared ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest >> TAsyncIndexTests::SplitMainWithReboots[PipeResets] [GOOD] Test command err: =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:115:2144] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:115:2144] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:130:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:133:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [1:136:2157] sender: [1:138:2058] recipient: [1:115:2144] 2025-06-25T14:51:19.233394Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:51:19.233481Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:19.233525Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:51:19.233569Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:51:19.233611Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:51:19.233637Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:51:19.233682Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:19.233745Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:51:19.234407Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:51:19.234714Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:51:19.305135Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7732: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-25T14:51:19.305181Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:51:19.305783Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:178:2058] recipient: [1:15:2062] 2025-06-25T14:51:19.317019Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:51:19.320798Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:51:19.320961Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:51:19.327788Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:51:19.327982Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:51:19.328564Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:19.328816Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:51:19.331166Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:19.331349Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:51:19.332303Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:51:19.332383Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:19.332494Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:51:19.332531Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:51:19.332580Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:51:19.332689Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:217:2058] recipient: [1:215:2214] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:217:2058] recipient: [1:215:2214] Leader for TabletID 72057594037968897 is [1:221:2218] sender: [1:222:2058] recipient: [1:215:2214] 2025-06-25T14:51:19.338676Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:242:2058] recipient: [1:15:2062] 2025-06-25T14:51:19.450617Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:51:19.450799Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:19.450969Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:51:19.451006Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:51:19.451202Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:51:19.451269Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:51:19.453190Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:19.453347Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:51:19.453486Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:19.453544Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:51:19.453586Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:51:19.453613Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:51:19.455117Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:19.455162Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:51:19.455202Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:51:19.456587Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:19.456632Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:19.456684Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:19.456724Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:51:19.459951Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:51:19.461593Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:51:19.461757Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:136:2157] sender: [1:257:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:51:19.462746Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:19.462864Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 Media ... ompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "\001\000\004\000\000\0002\000\000\000" IsPoint: false IsInclusive: false DatashardId: 72075186233409548 } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409549 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 2 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:51:59.308457Z node 24 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:78: [TableChangeSenderShard][72075186233409548:2][72075186233409546][24:787:2621] Handshake NKikimrChangeExchange.TEvStatus Status: STATUS_OK LastRecordOrder: 0 2025-06-25T14:51:59.308559Z node 24 :CHANGE_EXCHANGE DEBUG: change_sender_async_index.cpp:239: [AsyncIndexChangeSenderMain][72075186233409548:2][24:727:2621] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186233409546 } 2025-06-25T14:51:59.308694Z node 24 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:123: [TableChangeSenderShard][72075186233409548:2][72075186233409546][24:787:2621] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 1 Group: 1750863119283451 Step: 5000003 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046678944, LocalPathId: 4] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046678944, LocalPathId: 3] SchemaVersion: 1 LockId: 0 LockOffset: 0 },{ Order: 2 Group: 1750863119283451 Step: 5000003 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046678944, LocalPathId: 4] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046678944, LocalPathId: 3] SchemaVersion: 1 LockId: 0 LockOffset: 0 },{ Order: 3 Group: 1750863119283451 Step: 5000003 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046678944, LocalPathId: 4] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046678944, LocalPathId: 3] SchemaVersion: 1 LockId: 0 LockOffset: 0 }] } 2025-06-25T14:51:59.310854Z node 24 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:200: [TableChangeSenderShard][72075186233409548:2][72075186233409546][24:787:2621] Handle NKikimrChangeExchange.TEvStatus Status: STATUS_OK RecordStatuses { Order: 1 Status: STATUS_OK Reason: REASON_NONE } RecordStatuses { Order: 2 Status: STATUS_OK Reason: REASON_NONE } RecordStatuses { Order: 3 Status: STATUS_OK Reason: REASON_NONE } LastRecordOrder: 3 2025-06-25T14:51:59.310955Z node 24 :CHANGE_EXCHANGE DEBUG: change_sender_async_index.cpp:239: [AsyncIndexChangeSenderMain][72075186233409548:2][24:727:2621] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186233409546 } 2025-06-25T14:51:59.492807Z node 24 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/UserDefinedIndex/indexImplTable" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-25T14:51:59.493074Z node 24 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/UserDefinedIndex/indexImplTable" took 292us result status StatusSuccess 2025-06-25T14:51:59.493907Z node 24 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table/UserDefinedIndex/indexImplTable" PathDescription { Self { Name: "indexImplTable" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1002 CreateStep: 5000003 ParentPathId: 4 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeAsyncIndexImplTable Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "indexImplTable" Columns { Name: "indexed" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "indexed" KeyColumnNames: "key" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409546 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> KqpSinkLocks::UncommittedRead [GOOD] >> KqpSinkLocks::VisibleUncommittedRows >> KqpTx::InteractiveTx [GOOD] >> KqpTx::InvalidateOnError >> KqpLocks::Invalidate [GOOD] >> KqpLocks::InvalidateOnCommit >> TSchemeShardTTLTestsWithReboots::MoveTable [GOOD] >> TCdcStreamTests::AlterStream [GOOD] >> TCdcStreamTests::DropStream >> KqpSnapshotRead::ReadOnlyTxCommitsOnConcurrentWrite-withSink [GOOD] >> KqpSnapshotRead::ReadOnlyTxWithIndexCommitsOnConcurrentWrite+withSink >> TCdcStreamTests::ReplicationAttribute [GOOD] >> TCdcStreamTests::StreamOnIndexTableNegative ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTestsWithReboots::MoveTable [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:115:2144] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:115:2144] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:130:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:133:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [1:136:2157] sender: [1:138:2058] recipient: [1:115:2144] 2025-06-25T14:51:04.211845Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:51:04.211910Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:04.211942Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:51:04.211975Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:51:04.212007Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:51:04.212025Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:51:04.212062Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:04.212117Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:51:04.212853Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:51:04.213083Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:51:04.292705Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7732: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-25T14:51:04.292762Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:51:04.293468Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:178:2058] recipient: [1:15:2062] 2025-06-25T14:51:04.310465Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:51:04.314539Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:51:04.314725Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:51:04.328183Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:51:04.328397Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:51:04.328853Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:04.329116Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:51:04.331060Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:04.331200Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:51:04.332124Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:51:04.332164Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:04.332227Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:51:04.332256Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:51:04.332281Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:51:04.332453Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:217:2058] recipient: [1:215:2214] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:217:2058] recipient: [1:215:2214] Leader for TabletID 72057594037968897 is [1:221:2218] sender: [1:222:2058] recipient: [1:215:2214] 2025-06-25T14:51:04.339829Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:242:2058] recipient: [1:15:2062] 2025-06-25T14:51:04.471780Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:51:04.471991Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:04.472224Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:51:04.472278Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:51:04.481508Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:51:04.481680Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:51:04.484108Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:04.484333Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:51:04.484530Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:04.484581Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:51:04.484627Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:51:04.484677Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:51:04.487285Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:04.487343Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:51:04.487381Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:51:04.489258Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:04.489302Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:04.489374Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:04.489424Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:51:04.492565Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:51:04.494179Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:51:04.494351Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:136:2157] sender: [1:257:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:51:04.495147Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:04.495253Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 42949694 ... 3] 2025-06-25T14:52:00.857937Z node 62 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:52:00.857980Z node 62 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [62:212:2212], at schemeshard: 72057594046678944, txId: 1003, path id: 1 2025-06-25T14:52:00.858031Z node 62 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [62:212:2212], at schemeshard: 72057594046678944, txId: 1003, path id: 3 2025-06-25T14:52:00.858390Z node 62 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1003:0, at schemeshard: 72057594046678944 2025-06-25T14:52:00.858439Z node 62 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1086: NTableState::TProposedWaitParts operationId# 1003:0 ProgressState at tablet: 72057594046678944 2025-06-25T14:52:00.858528Z node 62 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:707: all shard schema changes has been received, operationId: 1003:0, at schemeshard: 72057594046678944 2025-06-25T14:52:00.858566Z node 62 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:719: send schema changes ack message, operation: 1003:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-06-25T14:52:00.858611Z node 62 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1003:0 129 -> 240 2025-06-25T14:52:00.859629Z node 62 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 11 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-25T14:52:00.859720Z node 62 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 11 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-25T14:52:00.859757Z node 62 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 1003 2025-06-25T14:52:00.859795Z node 62 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1003, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 11 2025-06-25T14:52:00.859834Z node 62 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 4 2025-06-25T14:52:00.860938Z node 62 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-25T14:52:00.861027Z node 62 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-25T14:52:00.861060Z node 62 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 1003 2025-06-25T14:52:00.861096Z node 62 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1003, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 18446744073709551615 2025-06-25T14:52:00.861133Z node 62 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-25T14:52:00.861209Z node 62 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 1003, ready parts: 0/1, is published: true 2025-06-25T14:52:00.869572Z node 62 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1003:0, at schemeshard: 72057594046678944 2025-06-25T14:52:00.869645Z node 62 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_move_table.cpp:564: TMoveTable TDone, operationId: 1003:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:52:00.869698Z node 62 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_move_table.cpp:574: TMoveTable TDone, operationId: 1003:0 ProgressState, SourcePathId: [OwnerId: 72057594046678944, LocalPathId: 3], TargetPathId: [OwnerId: 72057594046678944, LocalPathId: 4], at schemeshard: 72057594046678944 2025-06-25T14:52:00.869814Z node 62 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1003:0 progress is 1/1 2025-06-25T14:52:00.869850Z node 62 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 1003 ready parts: 1/1 2025-06-25T14:52:00.869893Z node 62 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1003:0 progress is 1/1 2025-06-25T14:52:00.869925Z node 62 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 1003 ready parts: 1/1 2025-06-25T14:52:00.869965Z node 62 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 1003, ready parts: 1/1, is published: true 2025-06-25T14:52:00.870013Z node 62 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 1003 ready parts: 1/1 2025-06-25T14:52:00.870057Z node 62 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1003:0 2025-06-25T14:52:00.870091Z node 62 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 1003:0 2025-06-25T14:52:00.870225Z node 62 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 2025-06-25T14:52:00.870264Z node 62 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate source path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-25T14:52:00.870845Z node 62 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-25T14:52:00.870892Z node 62 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-06-25T14:52:00.870962Z node 62 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-25T14:52:00.871438Z node 62 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 2025-06-25T14:52:00.877468Z node 62 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 2025-06-25T14:52:00.880781Z node 62 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 1003, wait until txId: 1003 TestWaitNotification wait txId: 1003 2025-06-25T14:52:00.881113Z node 62 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 1003: send EvNotifyTxCompletion 2025-06-25T14:52:00.881160Z node 62 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 1003 2025-06-25T14:52:00.881526Z node 62 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1003, at schemeshard: 72057594046678944 2025-06-25T14:52:00.881621Z node 62 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 1003: got EvNotifyTxCompletionResult 2025-06-25T14:52:00.881657Z node 62 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 1003: satisfy waiter [62:474:2445] TestWaitNotification: OK eventTxId 1003 2025-06-25T14:52:00.882110Z node 62 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/TTLEnabledTableMoved" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:52:00.882319Z node 62 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/TTLEnabledTableMoved" took 248us result status StatusSuccess 2025-06-25T14:52:00.883028Z node 62 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/TTLEnabledTableMoved" PathDescription { Self { Name: "TTLEnabledTableMoved" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1003 CreateStep: 5000004 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 2 } ChildrenExist: false } Table { Name: "TTLEnabledTableMoved" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "modified_at" Type: "Timestamp" TypeId: 50 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 2 TTLSettings { Enabled { ColumnName: "modified_at" ExpireAfterSeconds: 3600 Tiers { ApplyAfterSeconds: 3600 Delete { } } } } IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 1 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 4 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TPersQueueTest::ReadFromSeveralPartitionsMigrated [GOOD] >> TPersQueueTest::SchemeshardRestart >> KqpSinkLocks::InvalidateOnCommit >> TCdcStreamTests::StreamOnIndexTableNegative [GOOD] >> TCdcStreamTests::StreamOnIndexTable >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-ModifyUser-28 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-ModifyUser-29 >> DemoTx::Scenario_3 [GOOD] >> KqpSnapshotRead::TestReadOnly-withSink >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-CreateUser-12 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-CreateUser-13 >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-ModifyUser-40 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-ModifyUser-41 >> KqpSinkMvcc::ReadOnlyTxCommitsOnConcurrentWrite [GOOD] >> KqpSinkMvcc::ReadWriteTxFailsOnConcurrentWrite1 >> KqpSnapshotRead::ReadOnlyTxWithIndexCommitsOnConcurrentWrite-withSink [GOOD] >> KqpSnapshotRead::ReadWriteTxFailsOnConcurrentWrite1+withSink >> TCdcStreamTests::DropStream [GOOD] >> TCdcStreamTests::AlterStreamImplShouldFail >> TCdcStreamTests::StreamOnIndexTable [GOOD] >> TCdcStreamTests::StreamOnBuildingIndexTable >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-ModifyUser-34 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-ModifyUser-35 >> DemoTx::Scenario_4 >> TCdcStreamTests::AlterStreamImplShouldFail [GOOD] >> TCdcStreamTests::DropStreamImplShouldFail >> TAsyncIndexTests::MergeIndexWithReboots[PipeResets] [GOOD] >> KqpTx::CommitPrepared [GOOD] >> TPersQueueTest::TopicServiceCommitOffsetBadOffsets [GOOD] >> TPersQueueTest::TopicServiceReadBudget >> KqpSinkTx::SnapshotROInteractive2 [GOOD] >> KqpSnapshotIsolation::TConflictReadWriteOlap [GOOD] >> TCdcStreamTests::StreamOnBuildingIndexTable [GOOD] >> TCdcStreamWithInitialScanTests::InitialScanEnabled >> TPersQueueCommonTest::Auth_WriteSessionWithValidTokenAndACEAndThenRemoveACEAndSendWriteRequest_SessionClosedWithUnauthorizedErrorAfterSuccessfullWriteResponse [GOOD] >> TPersQueueCommonTest::Auth_MultipleInflightWriteUpdateTokenRequestWithDifferentValidToken_SessionClosedWithOverloadedError >> TAsyncIndexTests::MergeMainWithReboots[PipeResets] [GOOD] >> KqpTx::InvalidateOnError [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest >> TAsyncIndexTests::MergeIndexWithReboots[PipeResets] [GOOD] Test command err: =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:115:2144] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:115:2144] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:130:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:133:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [1:136:2157] sender: [1:138:2058] recipient: [1:115:2144] 2025-06-25T14:51:13.305712Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:51:13.305810Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:13.305852Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:51:13.305898Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:51:13.305941Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:51:13.305969Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:51:13.306030Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:13.306113Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:51:13.306856Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:51:13.307346Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:51:13.375531Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7732: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-25T14:51:13.375601Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:51:13.376248Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:178:2058] recipient: [1:15:2062] 2025-06-25T14:51:13.385812Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:51:13.388916Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:51:13.391167Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:51:13.401121Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:51:13.401470Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:51:13.404035Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:13.404354Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:51:13.409526Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:13.409697Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:51:13.413747Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:51:13.413827Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:13.413936Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:51:13.413979Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:51:13.414027Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:51:13.414153Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:217:2058] recipient: [1:215:2214] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:217:2058] recipient: [1:215:2214] Leader for TabletID 72057594037968897 is [1:221:2218] sender: [1:222:2058] recipient: [1:215:2214] 2025-06-25T14:51:13.420598Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:242:2058] recipient: [1:15:2062] 2025-06-25T14:51:13.521671Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:51:13.521875Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:13.522031Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:51:13.522082Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:51:13.522243Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:51:13.522310Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:51:13.525057Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:13.525196Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:51:13.526308Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:13.531754Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:51:13.531832Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:51:13.531871Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:51:13.537176Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:13.537236Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:51:13.537335Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:51:13.539375Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:13.539424Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:13.539467Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:13.539507Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:51:13.544001Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:51:13.545934Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:51:13.546845Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:136:2157] sender: [1:257:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:51:13.547926Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:13.548063Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 Media ... rceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 1 } } TableIndexes { Name: "UserDefinedIndex" LocalPathId: 4 Type: EIndexTypeGlobalAsync State: EIndexStateReady KeyColumnNames: "indexed" SchemaVersion: 1 PathOwnerId: 72057594046678944 DataSize: 0 IndexImplTableDescriptions { PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 1 } } } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409548 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:52:05.349098Z node 26 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/UserDefinedIndex/indexImplTable" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-25T14:52:05.349382Z node 26 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/UserDefinedIndex/indexImplTable" took 301us result status StatusSuccess 2025-06-25T14:52:05.350347Z node 26 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table/UserDefinedIndex/indexImplTable" PathDescription { Self { Name: "indexImplTable" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1002 CreateStep: 5000003 ParentPathId: 4 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeAsyncIndexImplTable Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 2 } ChildrenExist: false } Table { Name: "indexImplTable" Columns { Name: "indexed" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "indexed" KeyColumnNames: "key" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 1 } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409549 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> KqpLocks::InvalidateOnCommit [GOOD] >> KqpLocks::MixedTxFail+useSink >> TCdcStreamTests::DropStreamImplShouldFail [GOOD] >> TCdcStreamTests::CopyTableShouldNotCopyStream ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/tx/unittest >> KqpTx::CommitPrepared [GOOD] Test command err: Trying to start YDB, gRPC: 25673, MsgBus: 28064 2025-06-25T14:51:52.706477Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899807370047363:2134];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:51:52.706860Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0012f9/r3tmp/tmppTwRUr/pdisk_1.dat 2025-06-25T14:51:53.116176Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 25673, node 1 2025-06-25T14:51:53.160356Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:51:53.160444Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:51:53.161840Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:51:53.296798Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:51:53.296820Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:51:53.296825Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:51:53.296921Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28064 2025-06-25T14:51:53.711189Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:28064 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:51:54.061408Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:51:54.115782Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:51:54.283954Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:51:54.438930Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:51:54.498908Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:51:55.729269Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899820254950784:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:55.729468Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:57.629925Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:51:57.706114Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519899807370047363:2134];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:51:57.706219Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:51:57.714778Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:51:57.795709Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:51:57.818747Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:51:57.852260Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:51:57.892367Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:51:57.935753Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:51:58.016903Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899833139853347:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:58.017002Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:58.017355Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899833139853352:2437], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:58.021771Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:51:58.034506Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899833139853354:2438], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:51:58.113991Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899833139853405:3430] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 17411, MsgBus: 15091 2025-06-25T14:52:00.651058Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519899841552386953:2148];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0012f9/r3tmp/tmpUmFaO9/pdisk_1.dat 2025-06-25T14:52:00.671534Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:52:00.745418Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:52:00.748642Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519899841552386830:2080] 1750863120638627 != 1750863120638630 TServer::EnableGrpc on GrpcPort 17411, node 2 2025-06-25T14:52:00.775563Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:52:00.775641Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:52:00.776744Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:52:00.803386Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:52:00.803407Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:52:00.803412Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:52:00.803485Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:15091 TClient is connected to server localhost:15091 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:52:01.187061Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:52:01.210435Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:01.291017Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:01.441206Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:01.515989Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:01.661151Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:52:03.515437Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519899854437290342:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:03.515512Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:03.593259Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:03.620385Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:03.650223Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:03.684998Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:03.714033Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:03.788482Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:03.859684Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:03.975146Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519899854437291014:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:03.975227Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:03.975444Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519899854437291019:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:03.979666Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:52:03.993069Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519899854437291021:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:52:04.086274Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519899858732258368:3421] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest >> TAsyncIndexTests::MergeMainWithReboots[PipeResets] [GOOD] Test command err: =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:115:2144] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:115:2144] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:130:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:133:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [1:136:2157] sender: [1:138:2058] recipient: [1:115:2144] 2025-06-25T14:51:15.298354Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:51:15.298431Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:15.298458Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:51:15.298489Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:51:15.298546Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:51:15.298573Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:51:15.298621Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:15.298689Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:51:15.299361Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:51:15.299731Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:51:15.381967Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7732: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-25T14:51:15.382029Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:51:15.382753Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:178:2058] recipient: [1:15:2062] 2025-06-25T14:51:15.395101Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:51:15.399383Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:51:15.399562Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:51:15.406483Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:51:15.406646Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:51:15.407117Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:15.407313Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:51:15.409340Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:15.409485Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:51:15.410200Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:51:15.410246Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:15.410312Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:51:15.410354Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:51:15.410382Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:51:15.410490Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:217:2058] recipient: [1:215:2214] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:217:2058] recipient: [1:215:2214] Leader for TabletID 72057594037968897 is [1:221:2218] sender: [1:222:2058] recipient: [1:215:2214] 2025-06-25T14:51:15.415797Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:242:2058] recipient: [1:15:2062] 2025-06-25T14:51:15.542033Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:51:15.542226Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:15.542399Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:51:15.542442Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:51:15.542652Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:51:15.542735Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:51:15.544859Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:15.545022Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:51:15.545183Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:15.545254Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:51:15.545296Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:51:15.545330Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:51:15.547147Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:15.547198Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:51:15.547237Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:51:15.548844Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:15.548893Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:15.548952Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:15.548995Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:51:15.552454Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:51:15.554238Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:51:15.554398Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:136:2157] sender: [1:257:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:51:15.555313Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:15.555451Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 Media ... CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409549 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:52:05.816812Z node 30 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:78: [TableChangeSenderShard][72075186233409549:2][72075186233409546][30:834:2676] Handshake NKikimrChangeExchange.TEvStatus Status: STATUS_OK LastRecordOrder: 0 2025-06-25T14:52:05.816925Z node 30 :CHANGE_EXCHANGE DEBUG: change_sender_async_index.cpp:239: [AsyncIndexChangeSenderMain][72075186233409549:2][30:787:2676] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186233409546 } 2025-06-25T14:52:05.817083Z node 30 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:123: [TableChangeSenderShard][72075186233409549:2][72075186233409546][30:834:2676] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 1 Group: 1750863125793047 Step: 5000003 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046678944, LocalPathId: 4] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046678944, LocalPathId: 3] SchemaVersion: 1 LockId: 0 LockOffset: 0 },{ Order: 2 Group: 1750863125793047 Step: 5000003 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046678944, LocalPathId: 4] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046678944, LocalPathId: 3] SchemaVersion: 1 LockId: 0 LockOffset: 0 },{ Order: 3 Group: 1750863125793047 Step: 5000003 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046678944, LocalPathId: 4] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046678944, LocalPathId: 3] SchemaVersion: 1 LockId: 0 LockOffset: 0 }] } 2025-06-25T14:52:05.821785Z node 30 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:200: [TableChangeSenderShard][72075186233409549:2][72075186233409546][30:834:2676] Handle NKikimrChangeExchange.TEvStatus Status: STATUS_OK RecordStatuses { Order: 1 Status: STATUS_OK Reason: REASON_NONE } RecordStatuses { Order: 2 Status: STATUS_OK Reason: REASON_NONE } RecordStatuses { Order: 3 Status: STATUS_OK Reason: REASON_NONE } LastRecordOrder: 3 2025-06-25T14:52:05.821865Z node 30 :CHANGE_EXCHANGE DEBUG: change_sender_async_index.cpp:239: [AsyncIndexChangeSenderMain][72075186233409549:2][30:787:2676] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186233409546 } 2025-06-25T14:52:06.009100Z node 30 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/UserDefinedIndex/indexImplTable" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-25T14:52:06.009417Z node 30 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/UserDefinedIndex/indexImplTable" took 343us result status StatusSuccess 2025-06-25T14:52:06.010366Z node 30 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table/UserDefinedIndex/indexImplTable" PathDescription { Self { Name: "indexImplTable" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1002 CreateStep: 5000003 ParentPathId: 4 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeAsyncIndexImplTable Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "indexImplTable" Columns { Name: "indexed" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "indexed" KeyColumnNames: "key" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409546 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> KqpSinkLocks::VisibleUncommittedRows [GOOD] >> KqpSinkLocks::VisibleUncommittedRowsUpdate ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/tx/unittest >> KqpSnapshotIsolation::TConflictReadWriteOlap [GOOD] Test command err: Trying to start YDB, gRPC: 30817, MsgBus: 18748 2025-06-25T14:51:59.070952Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899836015417770:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:51:59.071071Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0012ac/r3tmp/tmpSSILHI/pdisk_1.dat 2025-06-25T14:51:59.384819Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:51:59.388109Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519899836015417751:2080] 1750863119069765 != 1750863119069768 TServer::EnableGrpc on GrpcPort 30817, node 1 2025-06-25T14:51:59.445119Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:51:59.445160Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:51:59.445170Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:51:59.445317Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:51:59.445705Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:51:59.445818Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:51:59.447446Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:18748 TClient is connected to server localhost:18748 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:51:59.918317Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:51:59.941072Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:52:00.080550Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:52:01.872529Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899844605352979:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:01.872616Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:01.872720Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899844605352992:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:01.876001Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:52:01.887811Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899844605352995:2294], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:52:01.962727Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899844605353047:2332] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:52:02.247094Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:02.373746Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:03.181440Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:04.070914Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519899836015417770:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:04.071012Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> TCdcStreamWithInitialScanTests::InitialScanEnabled [GOOD] >> TCdcStreamWithInitialScanTests::InitialScanDisabled ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/tx/unittest >> KqpTx::InvalidateOnError [GOOD] Test command err: Trying to start YDB, gRPC: 2783, MsgBus: 65119 2025-06-25T14:51:52.707728Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899809146233604:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:51:52.707806Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0012df/r3tmp/tmpzrxrXU/pdisk_1.dat 2025-06-25T14:51:53.114669Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519899809146233575:2080] 1750863112704845 != 1750863112704848 2025-06-25T14:51:53.128866Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:51:53.134374Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:51:53.134469Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 2783, node 1 2025-06-25T14:51:53.163149Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:51:53.300894Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:51:53.300917Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:51:53.300923Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:51:53.301041Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:65119 2025-06-25T14:51:53.717698Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:65119 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:51:54.164714Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:51:54.196817Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:51:54.201742Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:51:54.342560Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:51:54.488195Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:51:54.569870Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:51:55.734270Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899822031137105:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:55.734383Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:57.630449Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:51:57.654385Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:51:57.682295Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:51:57.705004Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:51:57.707862Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519899809146233604:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:51:57.707916Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:51:57.740533Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:51:57.773342Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:51:57.799723Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:51:57.939121Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899830621072364:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:57.939203Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:57.939582Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899830621072369:2437], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:57.943661Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:51:57.960837Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899830621072371:2438], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:51:58.031876Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899834916039719:3425] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 22349, MsgBus: 14642 2025-06-25T14:52:01.088744Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519899845371384401:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:01.088785Z node 2 :METADATA_PROVIDER ERROR: log.cpp: ... PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:52:01.749476Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:52:01.757081Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:52:01.768568Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:01.853353Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:02.019617Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:02.104298Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... 2025-06-25T14:52:02.109445Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:03.894751Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519899853961320605:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:03.894831Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:03.952330Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:04.019362Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:04.049258Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:04.093470Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:04.125272Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:04.172088Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:04.209614Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:04.318902Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519899858256288562:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:04.318972Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519899858256288567:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:04.318971Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:04.322460Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:52:04.331346Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519899858256288569:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:52:04.424488Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519899858256288620:3419] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:52:05.398320Z node 2 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_CONSTRAINT_VIOLATION;details=Conflict with existing key.;tx_id=3; 2025-06-25T14:52:05.408060Z node 2 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:226: Prepare transaction failed. txid 3 at tablet 72075186224037911 errors: Status: STATUS_CONSTRAINT_VIOLATION Issues: { message: "Conflict with existing key." issue_code: 2012 severity: 1 } 2025-06-25T14:52:05.408404Z node 2 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:168: Errors while proposing transaction txid 3 at tablet 72075186224037911 Status: STATUS_CONSTRAINT_VIOLATION Issues: { message: "Conflict with existing key." issue_code: 2012 severity: 1 } 2025-06-25T14:52:05.408676Z node 2 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:819: SelfId: [2:7519899862551256209:2473], Table: `/Root/KeyValue` ([72057594046644480:6:1]), SessionActorId: [2:7519899862551256184:2473]Got CONSTRAINT VIOLATION for table `/Root/KeyValue`. ShardID=72075186224037911, Sink=[2:7519899862551256209:2473].{
: Error: Conflict with existing key., code: 2012 } 2025-06-25T14:52:05.409200Z node 2 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:3004: SelfId: [2:7519899862551256202:2473], SessionActorId: [2:7519899862551256184:2473], statusCode=PRECONDITION_FAILED. Issue=
: Error: Constraint violated. Table: `/Root/KeyValue`., code: 2012
: Error: Conflict with existing key., code: 2012 . sessionActorId=[2:7519899862551256184:2473]. isRollback=0 2025-06-25T14:52:05.409433Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:1895: SessionId: ydb://session/3?node_id=2&id=YjJmNWRkLTcyZmE2ZGMwLTQzOWFiOGViLWQ1NzQ2Nzk4, ActorId: [2:7519899862551256184:2473], ActorState: ExecuteState, TraceId: 01jyks66tm8y27qmv5x23xeabw, got TEvKqpBuffer::TEvError in ExecuteState, status: PRECONDITION_FAILED send to: [2:7519899862551256203:2473] from: [2:7519899862551256202:2473] 2025-06-25T14:52:05.409534Z node 2 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [2:7519899862551256203:2473] TxId: 281474976715672. Ctx: { TraceId: 01jyks66tm8y27qmv5x23xeabw, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YjJmNWRkLTcyZmE2ZGMwLTQzOWFiOGViLWQ1NzQ2Nzk4, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. PRECONDITION_FAILED: {
: Error: Constraint violated. Table: `/Root/KeyValue`., code: 2012 subissue: {
: Error: Conflict with existing key., code: 2012 } } 2025-06-25T14:52:05.409774Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=YjJmNWRkLTcyZmE2ZGMwLTQzOWFiOGViLWQ1NzQ2Nzk4, ActorId: [2:7519899862551256184:2473], ActorState: ExecuteState, TraceId: 01jyks66tm8y27qmv5x23xeabw, Create QueryResponse for error on request, msg: 2025-06-25T14:52:05.501324Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=YjJmNWRkLTcyZmE2ZGMwLTQzOWFiOGViLWQ1NzQ2Nzk4, ActorId: [2:7519899862551256184:2473], ActorState: ExecuteState, TraceId: 01jyks66x95x1ds78bcj15g3k8, Create QueryResponse for error on request, msg: >> TSchemeShardTTLTestsWithReboots::CreateTable [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-CreateUser-13 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-CreateUser-14 >> TopicService::UnknownConsumer [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-ModifyUser-41 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-ModifyUser-42 >> KqpTx::LocksAbortOnCommit >> TCdcStreamWithInitialScanTests::InitialScanDisabled [GOOD] >> TCdcStreamWithInitialScanTests::InitialScanProgress >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-ModifyUser-29 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-ModifyUser-30 >> TCdcStreamTests::CopyTableShouldNotCopyStream [GOOD] >> TCdcStreamTests::MoveTableShouldFail ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTestsWithReboots::CreateTable [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:115:2144] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:115:2144] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:130:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:133:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [1:136:2157] sender: [1:138:2058] recipient: [1:115:2144] 2025-06-25T14:51:04.492192Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:51:04.492285Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:04.492363Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:51:04.492408Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:51:04.492455Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:51:04.492482Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:51:04.492540Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:04.492610Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:51:04.493388Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:51:04.493708Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:51:04.570108Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7732: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-25T14:51:04.570171Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:51:04.571439Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:178:2058] recipient: [1:15:2062] 2025-06-25T14:51:04.586810Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:51:04.590678Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:51:04.590888Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:51:04.607270Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:51:04.607505Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:51:04.608177Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:04.608560Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:51:04.611329Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:04.611591Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:51:04.612898Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:51:04.612979Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:04.613070Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:51:04.613120Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:51:04.613160Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:51:04.613371Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:217:2058] recipient: [1:215:2214] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:217:2058] recipient: [1:215:2214] Leader for TabletID 72057594037968897 is [1:221:2218] sender: [1:222:2058] recipient: [1:215:2214] 2025-06-25T14:51:04.621666Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:242:2058] recipient: [1:15:2062] 2025-06-25T14:51:04.791232Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:51:04.791473Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:04.791660Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:51:04.791703Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:51:04.791924Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:51:04.792065Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:51:04.794196Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:04.794407Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:51:04.794613Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:04.794666Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:51:04.794717Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:51:04.794772Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:51:04.798818Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:04.798882Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:51:04.798924Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:51:04.800603Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:04.800648Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:04.800723Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:04.800774Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:51:04.804206Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:51:04.805867Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:51:04.806079Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:136:2157] sender: [1:257:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:51:04.806992Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:04.807114Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 42949694 ... 4, cookie: 1002 2025-06-25T14:52:07.201983Z node 72 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 3 PathOwnerId: 72057594046678944, cookie: 1002 2025-06-25T14:52:07.202015Z node 72 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 1002 2025-06-25T14:52:07.202045Z node 72 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1002, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 3 2025-06-25T14:52:07.202080Z node 72 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-06-25T14:52:07.202152Z node 72 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 1002, ready parts: 0/1, is published: true FAKE_COORDINATOR: Erasing txId 1002 2025-06-25T14:52:07.204051Z node 72 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6369: Handle TEvProposeTransactionResult, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 1002 Step: 5000003 OrderId: 1002 ExecLatency: 0 ProposeLatency: 3 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 1010 } } CommitVersion { Step: 5000003 TxId: 1002 } 2025-06-25T14:52:07.204094Z node 72 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1791: TOperation FindRelatedPartByTabletId, TxId: 1002, tablet: 72075186233409546, partId: 0 2025-06-25T14:52:07.204221Z node 72 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:632: TTxOperationReply execute, operationId: 1002:0, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 1002 Step: 5000003 OrderId: 1002 ExecLatency: 0 ProposeLatency: 3 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 1010 } } CommitVersion { Step: 5000003 TxId: 1002 } 2025-06-25T14:52:07.204337Z node 72 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_part.cpp:109: HandleReply TEvDataShard::TEvProposeTransactionResult Ignore message: tablet# 72057594046678944, ev# TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 1002 Step: 5000003 OrderId: 1002 ExecLatency: 0 ProposeLatency: 3 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 1010 } } CommitVersion { Step: 5000003 TxId: 1002 } 2025-06-25T14:52:07.204856Z node 72 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5596: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 331 RawX2: 309237647628 } Origin: 72075186233409546 State: 2 TxId: 1002 Step: 0 Generation: 2 2025-06-25T14:52:07.204894Z node 72 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1791: TOperation FindRelatedPartByTabletId, TxId: 1002, tablet: 72075186233409546, partId: 0 2025-06-25T14:52:07.204989Z node 72 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:632: TTxOperationReply execute, operationId: 1002:0, at schemeshard: 72057594046678944, message: Source { RawX1: 331 RawX2: 309237647628 } Origin: 72075186233409546 State: 2 TxId: 1002 Step: 0 Generation: 2 2025-06-25T14:52:07.205036Z node 72 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1047: NTableState::TProposedWaitParts operationId# 1002:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 2025-06-25T14:52:07.205111Z node 72 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1051: NTableState::TProposedWaitParts operationId# 1002:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 331 RawX2: 309237647628 } Origin: 72075186233409546 State: 2 TxId: 1002 Step: 0 Generation: 2 2025-06-25T14:52:07.205169Z node 72 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:670: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 1002:0, shardIdx: 72057594046678944:1, shard: 72075186233409546, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-25T14:52:07.205204Z node 72 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:707: all shard schema changes has been received, operationId: 1002:0, at schemeshard: 72057594046678944 2025-06-25T14:52:07.205239Z node 72 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:719: send schema changes ack message, operation: 1002:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-06-25T14:52:07.205278Z node 72 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1002:0 129 -> 240 2025-06-25T14:52:07.208072Z node 72 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1002 2025-06-25T14:52:07.208261Z node 72 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1002 2025-06-25T14:52:07.209598Z node 72 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 1002:0, at schemeshard: 72057594046678944 2025-06-25T14:52:07.209682Z node 72 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 1002:0, at schemeshard: 72057594046678944 2025-06-25T14:52:07.209950Z node 72 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1002:0, at schemeshard: 72057594046678944 2025-06-25T14:52:07.209983Z node 72 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 1002:0 ProgressState 2025-06-25T14:52:07.210054Z node 72 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1002:0 progress is 1/1 2025-06-25T14:52:07.210076Z node 72 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 1002 ready parts: 1/1 2025-06-25T14:52:07.210104Z node 72 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1002:0 progress is 1/1 2025-06-25T14:52:07.210125Z node 72 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 1002 ready parts: 1/1 2025-06-25T14:52:07.210151Z node 72 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 1002, ready parts: 1/1, is published: true 2025-06-25T14:52:07.210178Z node 72 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 1002 ready parts: 1/1 2025-06-25T14:52:07.210206Z node 72 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1002:0 2025-06-25T14:52:07.210230Z node 72 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 1002:0 2025-06-25T14:52:07.210343Z node 72 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 TestModificationResult got TxId: 1002, wait until txId: 1002 TestWaitNotification wait txId: 1002 2025-06-25T14:52:07.212868Z node 72 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 1002: send EvNotifyTxCompletion 2025-06-25T14:52:07.212911Z node 72 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 1002 2025-06-25T14:52:07.213250Z node 72 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1002, at schemeshard: 72057594046678944 2025-06-25T14:52:07.213337Z node 72 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 1002: got EvNotifyTxCompletionResult 2025-06-25T14:52:07.213367Z node 72 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 1002: satisfy waiter [72:408:2379] TestWaitNotification: OK eventTxId 1002 2025-06-25T14:52:07.213781Z node 72 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/TTLEnabledTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:52:07.213965Z node 72 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/TTLEnabledTable" took 220us result status StatusSuccess 2025-06-25T14:52:07.214487Z node 72 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/TTLEnabledTable" PathDescription { Self { Name: "TTLEnabledTable" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1002 CreateStep: 5000003 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "TTLEnabledTable" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "modified_at" Type: "Timestamp" TypeId: 50 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 1 TTLSettings { Enabled { ColumnName: "modified_at" ExpireAfterSeconds: 3600 Tiers { ApplyAfterSeconds: 3600 Delete { } } } } IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 1 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> KqpSnapshotIsolation::TConflictReadWriteOltp [GOOD] >> KqpSnapshotIsolation::TConflictReadWriteOltpNoSink [GOOD] >> KqpLocks::DifferentKeyUpdate >> TPersQueueTest::SetupLockSession [GOOD] >> TPersQueueTest::StreamReadCreateAndDestroyMsgs >> KqpSinkMvcc::OlapReadWriteTxFailsOnConcurrentWrite1 >> TPersQueueTest::Cache [GOOD] >> TPersQueueTest::CacheHead |87.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/tx/unittest >> KqpSnapshotIsolation::TConflictReadWriteOltpNoSink [GOOD] >> KqpSinkLocks::InvalidateOnCommit [GOOD] >> KqpSinkLocks::InvalidateOlapOnCommit >> KqpSnapshotIsolation::TReadOnlyOltp [GOOD] >> KqpSnapshotIsolation::TReadOnlyOltpNoSink [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-ModifyUser-35 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-ModifyUser-36 >> KqpSinkMvcc::OlapReadOnlyTxCommitsOnConcurrentWrite [GOOD] >> KqpSinkMvcc::OlapNamedStatement >> TopicService::UnknownTopic |87.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/tx/unittest >> KqpSnapshotIsolation::TReadOnlyOltpNoSink [GOOD] >> KqpSnapshotRead::ReadOnlyTxWithIndexCommitsOnConcurrentWrite+withSink [GOOD] >> KqpSnapshotRead::ReadWriteTxFailsOnConcurrentWrite1+withSink [GOOD] >> KqpSnapshotRead::TestReadOnly-withSink [GOOD] >> KqpSnapshotRead::TestSnapshotExpiration+withSink >> TCdcStreamWithInitialScanTests::InitialScanProgress [GOOD] >> TCdcStreamWithInitialScanTests::WithoutPqTransactions >> TCdcStreamTests::MoveTableShouldFail [GOOD] >> TCdcStreamTests::CheckSchemeLimits >> KqpLocksTricky::TestNoLocksIssue-withSink >> KqpSinkMvcc::ReadWriteTxFailsOnConcurrentWrite1 [GOOD] >> KqpSinkTx::OlapLocksAbortOnCommit ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/tx/unittest >> KqpSnapshotRead::ReadOnlyTxWithIndexCommitsOnConcurrentWrite+withSink [GOOD] Test command err: Trying to start YDB, gRPC: 26958, MsgBus: 14371 2025-06-25T14:51:53.464181Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899812061188348:2145];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:51:53.478506Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0012c7/r3tmp/tmpxIfwsk/pdisk_1.dat 2025-06-25T14:51:53.869811Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:51:53.885373Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:51:53.885472Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:51:53.887303Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 26958, node 1 2025-06-25T14:51:53.972779Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:51:53.972798Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:51:53.972808Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:51:53.972898Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14371 TClient is connected to server localhost:14371 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-25T14:51:54.468538Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:51:54.571995Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:51:54.585052Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:51:54.593807Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:51:54.736563Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:51:54.854224Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:51:54.918675Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:51:56.409416Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899824946091738:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:56.409561Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:57.629349Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:51:57.657114Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:51:57.685285Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:51:57.722594Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:51:57.788424Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:51:57.855327Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:51:57.881937Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:51:57.974576Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899829241059701:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:57.974671Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:57.974874Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899829241059706:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:57.978351Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:51:57.988084Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899829241059708:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:51:58.053311Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899833536027055:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:51:58.460686Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519899812061188348:2145];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:51:58.460748Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 18244, MsgBus: 26602 2025-06-25T14:52:01.695379Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519899844778002232:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:01.695465Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/run ... ver::EnableGrpc on GrpcPort 18244, node 2 2025-06-25T14:52:01.837841Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:52:01.837926Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:52:01.839551Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:52:01.855753Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:52:01.855771Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:52:01.855781Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:52:01.855870Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26602 TClient is connected to server localhost:26602 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:52:02.308526Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:52:02.315027Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:52:02.324034Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:02.410265Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:02.546318Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:02.605055Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:02.731633Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:52:04.394311Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519899857662905747:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:04.394469Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:04.454794Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:04.524488Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:04.554342Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:04.584289Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:04.611548Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:04.642411Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:04.676437Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:04.763530Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519899857662906409:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:04.763614Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:04.763693Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519899857662906414:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:04.767199Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:52:04.784521Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519899857662906416:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:52:04.859506Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519899857662906467:3421] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:52:05.907225Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:05.943306Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:06.010676Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:06.695715Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519899844778002232:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:06.695798Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/tx/unittest >> KqpSnapshotRead::ReadWriteTxFailsOnConcurrentWrite1+withSink [GOOD] Test command err: Trying to start YDB, gRPC: 29025, MsgBus: 28105 2025-06-25T14:51:52.705524Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899807307238307:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:51:52.709124Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0012ea/r3tmp/tmpcl2pVe/pdisk_1.dat 2025-06-25T14:51:53.111376Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:51:53.114420Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519899807307238275:2080] 1750863112698804 != 1750863112698807 2025-06-25T14:51:53.159713Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:51:53.159818Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 29025, node 1 2025-06-25T14:51:53.161846Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:51:53.293017Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:51:53.293042Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:51:53.293048Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:51:53.293177Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28105 2025-06-25T14:51:53.725820Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:28105 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:51:54.123571Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:51:54.153281Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:51:54.175107Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:51:54.338238Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:51:54.495475Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:51:54.558776Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:51:55.741287Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899820192141799:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:55.741424Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:57.629836Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:51:57.652851Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:51:57.687781Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:51:57.705695Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519899807307238307:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:51:57.705748Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:51:57.715282Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:51:57.746358Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:51:57.781839Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:51:57.852743Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:51:57.932234Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899828782077061:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:57.932328Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:57.932616Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899828782077066:2437], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:57.936711Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:51:57.950468Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899828782077068:2438], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:51:58.039852Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899833077044415:3426] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:52:00.063500Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/ ... from file: (empty maybe) 2025-06-25T14:52:04.371897Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5956 TClient is connected to server localhost:5956 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-25T14:52:04.805329Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:52:04.817618Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:04.889371Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:05.058717Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:05.119539Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:05.183887Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:52:07.244441Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519899873766513594:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:07.244545Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:07.301322Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:07.327575Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:07.361363Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:07.391609Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:07.420843Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:07.455081Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:07.492411Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:07.562899Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519899873766514250:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:07.562988Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:07.563152Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519899873766514255:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:07.567298Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:52:07.580125Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519899873766514257:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:52:07.641930Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519899873766514308:3416] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:52:08.978822Z node 2 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_LOCKS_BROKEN;details=Operation is aborting because locks are not valid;tx_id=281474976710674; 2025-06-25T14:52:08.987197Z node 2 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:802: SelfId: [2:7519899878061481940:2473], Table: `/Root/TwoShard` ([72057594046644480:2:1]), SessionActorId: [2:7519899878061481873:2473]Got LOCKS BROKEN for table `/Root/TwoShard`. ShardID=72075186224037888, Sink=[2:7519899878061481940:2473].{
: Error: Operation is aborting because locks are not valid, code: 2001 } 2025-06-25T14:52:08.987717Z node 2 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:3004: SelfId: [2:7519899878061481933:2473], SessionActorId: [2:7519899878061481873:2473], statusCode=ABORTED. Issue=
: Error: Transaction locks invalidated. Table: `/Root/TwoShard`., code: 2001
: Error: Operation is aborting because locks are not valid, code: 2001 . sessionActorId=[2:7519899878061481873:2473]. isRollback=0 2025-06-25T14:52:08.987950Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:1895: SessionId: ydb://session/3?node_id=2&id=ZTI5NmRlMWQtNTljMjQ4MWQtY2QyMzg0NzUtNmEyMGIwODM=, ActorId: [2:7519899878061481873:2473], ActorState: ExecuteState, TraceId: 01jyks6aaw64dgfa1dftdgy83c, got TEvKqpBuffer::TEvError in ExecuteState, status: ABORTED send to: [2:7519899878061481934:2473] from: [2:7519899878061481933:2473] 2025-06-25T14:52:08.988022Z node 2 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [2:7519899878061481934:2473] TxId: 281474976710674. Ctx: { TraceId: 01jyks6aaw64dgfa1dftdgy83c, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZTI5NmRlMWQtNTljMjQ4MWQtY2QyMzg0NzUtNmEyMGIwODM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Transaction locks invalidated. Table: `/Root/TwoShard`., code: 2001 subissue: {
: Error: Operation is aborting because locks are not valid, code: 2001 } } 2025-06-25T14:52:08.988193Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=ZTI5NmRlMWQtNTljMjQ4MWQtY2QyMzg0NzUtNmEyMGIwODM=, ActorId: [2:7519899878061481873:2473], ActorState: ExecuteState, TraceId: 01jyks6aaw64dgfa1dftdgy83c, Create QueryResponse for error on request, msg: 2025-06-25T14:52:09.162772Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519899860881610227:2155];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:09.162856Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> KqpTx::CommitRoTx ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/tx/unittest >> KqpSinkMvcc::ReadWriteTxFailsOnConcurrentWrite1 [GOOD] Test command err: Trying to start YDB, gRPC: 2388, MsgBus: 19783 2025-06-25T14:51:57.141815Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899831065309987:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:51:57.141895Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0012b8/r3tmp/tmpVUX3CC/pdisk_1.dat 2025-06-25T14:51:57.411983Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519899831065309967:2080] 1750863117140629 != 1750863117140632 2025-06-25T14:51:57.419844Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 2388, node 1 2025-06-25T14:51:57.465795Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:51:57.465816Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:51:57.465825Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:51:57.465990Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:51:57.500802Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:51:57.500914Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:51:57.502562Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:19783 TClient is connected to server localhost:19783 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:51:57.962261Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:51:57.978142Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:51:58.151248Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:51:59.684258Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899839655245179:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:59.684458Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:59.684859Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899839655245206:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:59.689151Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:51:59.699984Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899839655245208:2294], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:51:59.804527Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899839655245259:2332] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:52:00.055028Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:00.187146Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:01.087454Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:02.141506Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519899831065309987:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:02.141577Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 29675, MsgBus: 28405 2025-06-25T14:52:03.945846Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519899853936165355:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:03.945885Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0012b8/r3tmp/tmps7hrN3/pdisk_1.dat 2025-06-25T14:52:04.098181Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:52:04.099922Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519899853936165338:2080] 1750863123944820 != 1750863123944823 TServer::EnableGrpc on GrpcPort 29675, node 2 2025-06-25T14:52:04.129533Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:52:04.129603Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:52:04.144225Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:52:04.173925Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:52:04.173946Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:52:04.173972Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:52:04.174079Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28405 TClient is connected to server localhost:28405 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:52:04.572848Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:52:04.979075Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:52:06.987853Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519899866821067859:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:06.987913Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519899866821067867:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:06.987975Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:06.991231Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:52:06.999880Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519899866821067873:2295], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:52:07.055553Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519899871116035220:2332] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:52:07.102559Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:07.137208Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:08.101501Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:09.015580Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519899853936165355:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:09.018102Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:52:09.609236Z node 2 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_LOCKS_BROKEN;details=Operation is aborting because locks are not valid;tx_id=281474976715666; 2025-06-25T14:52:09.610382Z node 2 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:802: SelfId: [2:7519899879705977844:2930], Table: `/Root/KV` ([72057594046644480:7:1]), SessionActorId: [2:7519899879705977778:2930]Got LOCKS BROKEN for table `/Root/KV`. ShardID=72075186224037889, Sink=[2:7519899879705977844:2930].{
: Error: Operation is aborting because locks are not valid, code: 2001 } 2025-06-25T14:52:09.610821Z node 2 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:3004: SelfId: [2:7519899879705977837:2930], SessionActorId: [2:7519899879705977778:2930], statusCode=ABORTED. Issue=
: Error: Transaction locks invalidated. Table: `/Root/KV`., code: 2001
: Error: Operation is aborting because locks are not valid, code: 2001 . sessionActorId=[2:7519899879705977778:2930]. isRollback=0 2025-06-25T14:52:09.611051Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:1895: SessionId: ydb://session/3?node_id=2&id=N2MzNmUwMDUtOTNiODI4ZjYtZjNmZDMxNjMtNWNjZTVlNzQ=, ActorId: [2:7519899879705977778:2930], ActorState: ExecuteState, TraceId: 01jyks6ay972evh202zahm0206, got TEvKqpBuffer::TEvError in ExecuteState, status: ABORTED send to: [2:7519899879705977838:2930] from: [2:7519899879705977837:2930] 2025-06-25T14:52:09.611118Z node 2 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [2:7519899879705977838:2930] TxId: 281474976715666. Ctx: { TraceId: 01jyks6ay972evh202zahm0206, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=N2MzNmUwMDUtOTNiODI4ZjYtZjNmZDMxNjMtNWNjZTVlNzQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Transaction locks invalidated. Table: `/Root/KV`., code: 2001 subissue: {
: Error: Operation is aborting because locks are not valid, code: 2001 } } 2025-06-25T14:52:09.611296Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=N2MzNmUwMDUtOTNiODI4ZjYtZjNmZDMxNjMtNWNjZTVlNzQ=, ActorId: [2:7519899879705977778:2930], ActorState: ExecuteState, TraceId: 01jyks6ay972evh202zahm0206, Create QueryResponse for error on request, msg: >> TCdcStreamWithInitialScanTests::WithoutPqTransactions [GOOD] >> TCdcStreamWithInitialScanTests::WithPqTransactions >> KqpSnapshotRead::TestSnapshotExpiration-withSink [GOOD] >> KqpTx::BeginTransactionBadMode >> TCdcStreamTests::CheckSchemeLimits [GOOD] >> TCdcStreamTests::RebootSchemeShard >> TPersQueueTest::EventBatching [GOOD] >> TPersQueueTest::NoDecompressionMemoryLeaks >> TPersQueueTest::WriteEmptyData [GOOD] >> TPersQueueTest::WriteNonExistingPartition >> KqpSinkTx::ExplicitTcl >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-CreateUser-14 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-ModifyUser-25 >> KqpSinkLocks::TInvalidate >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-ModifyUser-42 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-ModifyUser-43 >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-ModifyUser-30 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-ModifyUser-31 >> TPersQueueTest::SchemeshardRestart [GOOD] >> TPersQueueTest::SameOffset >> KqpLocksTricky::TestNoWrite >> TCdcStreamWithInitialScanTests::WithPqTransactions [GOOD] >> TCdcStreamWithInitialScanTests::AlterStream >> KqpSinkLocks::VisibleUncommittedRowsUpdate [GOOD] >> KqpLocks::MixedTxFail+useSink [GOOD] >> TCdcStreamTests::RebootSchemeShard [GOOD] >> TCdcStreamTests::MeteringServerless >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-ModifyUser-36 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-ModifyUser-37 >> KqpTx::LocksAbortOnCommit [GOOD] >> KqpTx::MixEnginesOldNew >> KqpSinkMvcc::SnapshotExpiration [GOOD] >> KqpSinkTx::DeferredEffects >> KqpLocks::DifferentKeyUpdate [GOOD] >> KqpLocks::EmptyRange ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/tx/unittest >> KqpSinkLocks::VisibleUncommittedRowsUpdate [GOOD] Test command err: Trying to start YDB, gRPC: 4733, MsgBus: 7299 2025-06-25T14:51:52.703576Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899808218722806:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:51:52.703759Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0012f5/r3tmp/tmpCpq0TA/pdisk_1.dat 2025-06-25T14:51:53.111967Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:51:53.113297Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519899808218722784:2080] 1750863112698790 != 1750863112698793 2025-06-25T14:51:53.139113Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:51:53.142814Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:51:53.145306Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 4733, node 1 2025-06-25T14:51:53.292919Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:51:53.292942Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:51:53.292949Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:51:53.293063Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:7299 2025-06-25T14:51:53.721030Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:7299 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:51:54.100894Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:51:55.678128Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899821103625312:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:55.678253Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899821103625322:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:55.678312Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:55.700204Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:51:55.712232Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899821103625326:2294], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:51:55.796139Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899821103625379:2335] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:51:57.708369Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519899808218722806:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:51:57.708428Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:51:57.712343Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:51:57.858403Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:51:58.621288Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) Trying to start YDB, gRPC: 24499, MsgBus: 23401 2025-06-25T14:52:01.044141Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519899845900989507:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:01.044188Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0012f5/r3tmp/tmpdZZuKN/pdisk_1.dat 2025-06-25T14:52:01.201489Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:52:01.202382Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519899845900989489:2080] 1750863121038864 != 1750863121038867 2025-06-25T14:52:01.218359Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:52:01.218446Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:52:01.220721Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 24499, node 2 2025-06-25T14:52:01.356884Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:52:01.356906Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:52:01.356913Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:52:01.357031Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23401 TClient is connected to server localhost:23401 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:52:01.830460Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:52:01.837375Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:52:02.069913Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:52:03.943694Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519899854490924711:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:03.943753Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519899854490924716:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:03.943802Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:03.947283Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:52:03.961610Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519899854490924725:2294], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:52:04.031163Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519899858785892073:2330] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:52:04.074406Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:04.156510Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:05.101828Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:06.044358Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519899845900989507:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:06.044431Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 5788, MsgBus: 8148 2025-06-25T14:52:07.325877Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519899872870174009:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:07.325978Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0012f5/r3tmp/tmp1vASOM/pdisk_1.dat 2025-06-25T14:52:07.429363Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:52:07.430049Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519899872870173990:2080] 1750863127325394 != 1750863127325397 TServer::EnableGrpc on GrpcPort 5788, node 3 2025-06-25T14:52:07.469225Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:52:07.469314Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:52:07.471576Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:52:07.488832Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:52:07.488855Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:52:07.488863Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:52:07.488978Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8148 TClient is connected to server localhost:8148 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:52:07.938778Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:52:07.945138Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:52:08.414895Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:52:10.204129Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519899885755076494:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:10.204259Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:10.205974Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519899885755076520:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:10.212857Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:52:10.222815Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519899885755076522:2294], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:52:10.300227Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519899885755076573:2330] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:52:10.359142Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:10.403216Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:11.324359Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:12.326103Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519899872870174009:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:12.326170Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> TCdcStreamWithInitialScanTests::AlterStream [GOOD] >> TCdcStreamWithInitialScanTests::DropStream ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/tx/unittest >> KqpLocks::MixedTxFail+useSink [GOOD] Test command err: Trying to start YDB, gRPC: 8332, MsgBus: 16004 2025-06-25T14:51:52.702400Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899806458729147:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:51:52.702488Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0012dc/r3tmp/tmpomnUkD/pdisk_1.dat 2025-06-25T14:51:53.131976Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519899806458729122:2080] 1750863112698866 != 1750863112698869 2025-06-25T14:51:53.150138Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:51:53.169912Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:51:53.169981Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:51:53.178005Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 8332, node 1 2025-06-25T14:51:53.296896Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:51:53.296926Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:51:53.296938Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:51:53.297079Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16004 2025-06-25T14:51:53.736409Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:16004 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:51:54.066141Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:51:54.115890Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:51:54.278882Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:51:54.423853Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:51:54.484044Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:51:55.856095Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899819343632635:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:55.856227Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:57.630345Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:51:57.652599Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:51:57.703212Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519899806458729147:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:51:57.703292Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:51:57.738382Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:51:57.809323Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:51:57.837650Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:51:57.873727Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:51:57.944731Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:51:58.015717Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899832228535192:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:58.015799Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:58.016078Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899832228535197:2437], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:58.019354Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:51:58.028451Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899832228535199:2438], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:51:58.118664Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899832228535250:3425] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:52:00.393940Z node 1 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:3004: SelfId: [1:7519899840818470191:2481], SessionActorId: [1:7519899836523502846:2481], statusCode=ABORTED. Issue=
: Error: Transaction locks invalidated. Table: `/Root/Test`, code: 2001 . sessionActorId=[1:7519899836523502846:2481]. isRollback=0 2025-06-25T14:52:00.394243Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:1895: SessionId: ydb://session/3?node_id=1&id=ZjE0NTQ3YzgtM2I5NTU4ZWUtZTU3NjM2ZWEtZTA3MTkzZWM=, ActorId: [1:751989983652 ... og.cpp:784: tablet_id=72075186224037919;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-25T14:52:12.193151Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037944;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-25T14:52:12.197355Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037919;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-25T14:52:12.197876Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037949;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-25T14:52:12.198418Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037944;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-25T14:52:12.199119Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037928;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-25T14:52:12.201767Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037949;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-25T14:52:12.202310Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037941;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-25T14:52:12.204716Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037928;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-25T14:52:12.205355Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037905;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-25T14:52:12.206493Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037941;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-25T14:52:12.206962Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037931;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-25T14:52:12.210929Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037905;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-25T14:52:12.211062Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037931;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-25T14:52:12.211606Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037923;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-25T14:52:12.211656Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037925;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-25T14:52:12.215466Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037925;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-25T14:52:12.216000Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037951;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-25T14:52:12.216946Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037923;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-25T14:52:12.217656Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037933;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-25T14:52:12.219544Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037951;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-25T14:52:12.220063Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037939;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-25T14:52:12.223231Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037933;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-25T14:52:12.223975Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037935;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-25T14:52:12.224558Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037939;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-25T14:52:12.225149Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037947;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-25T14:52:12.229603Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037947;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-25T14:52:12.229696Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037935;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-25T14:52:12.230424Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037936;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-25T14:52:12.235746Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037936;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-25T14:52:12.465365Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037947;self_id=[3:7519899883200804228:2330];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037947;local_tx_no=13;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037899,72075186224037903;receive=72075186224037907; 2025-06-25T14:52:12.465760Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037907;tx_state=TTxProgressTx::Execute;tx_current=281474976715664;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; 2025-06-25T14:52:12.466226Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037903;tx_state=TTxProgressTx::Execute;tx_current=281474976715664;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; 2025-06-25T14:52:12.466331Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037899;tx_state=TTxProgressTx::Execute;tx_current=281474976715664;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; 2025-06-25T14:52:12.466718Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037907;local_tx_no=12;method=complete;tx_info=;fline=secondary.h:126;event=duplication_tablet_broken_flag;txId=281474976715664; 2025-06-25T14:52:12.466918Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037947;tx_state=TTxProgressTx::Execute;tx_current=281474976715664;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; 2025-06-25T14:52:12.769651Z node 3 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_LOCKS_BROKEN;details=Operation is aborting because locks are not valid;tx_id=281474976715668; 2025-06-25T14:52:12.772000Z node 3 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:2751: SelfId: [3:7519899891790741063:2728], SessionActorId: [3:7519899891790741008:2728], Got LOCKS BROKEN for table. ShardID=72075186224037888, Sink=[3:7519899891790741063:2728].{
: Error: Operation is aborting because locks are not valid, code: 2001 } 2025-06-25T14:52:12.772141Z node 3 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:3004: SelfId: [3:7519899891790741063:2728], SessionActorId: [3:7519899891790741008:2728], statusCode=ABORTED. Issue=
: Error: Transaction locks invalidated. Table: `/Root/DataShard`., code: 2001
: Error: Operation is aborting because locks are not valid, code: 2001 . sessionActorId=[3:7519899891790741008:2728]. isRollback=0 2025-06-25T14:52:12.772349Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:1895: SessionId: ydb://session/3?node_id=3&id=NTAxNzk1MWUtOWQ2MWQ1N2EtMTljMWRkMGUtNGE1YjMwZDM=, ActorId: [3:7519899891790741008:2728], ActorState: ExecuteState, TraceId: 01jyks6e0h0xgsty3jchjk6ksn, got TEvKqpBuffer::TEvError in ExecuteState, status: ABORTED send to: [3:7519899891790741089:2728] from: [3:7519899891790741063:2728] 2025-06-25T14:52:12.772430Z node 3 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [3:7519899891790741089:2728] TxId: 281474976715668. Ctx: { TraceId: 01jyks6e0h0xgsty3jchjk6ksn, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NTAxNzk1MWUtOWQ2MWQ1N2EtMTljMWRkMGUtNGE1YjMwZDM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Transaction locks invalidated. Table: `/Root/DataShard`., code: 2001 subissue: {
: Error: Operation is aborting because locks are not valid, code: 2001 } } 2025-06-25T14:52:12.772608Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=3&id=NTAxNzk1MWUtOWQ2MWQ1N2EtMTljMWRkMGUtNGE1YjMwZDM=, ActorId: [3:7519899891790741008:2728], ActorState: ExecuteState, TraceId: 01jyks6e0h0xgsty3jchjk6ksn, Create QueryResponse for error on request, msg: 2025-06-25T14:52:12.772906Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037936;tx_state=TTxProgressTx::Execute;tx_current=281474976715668;tx_id=281474976715668;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715668; 2025-06-25T14:52:12.773685Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037936;self_id=[3:7519899883200804376:2360];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037936;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-25T14:52:12.774186Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037936;tx_state=TTxProgressTx::Complete;fline=events.h:103;event=ev_write_error;status=STATUS_LOCKS_BROKEN;details=lock invalidated;tx_id=281474976715668; >> KqpSinkMvcc::OltpNamedStatementNoSink >> KqpTx::SnapshotRO >> TCdcStreamWithInitialScanTests::DropStream [GOOD] >> TCdcStreamWithInitialScanTests::RacyAlterStreamAndRestart >> KqpTx::CommitRoTx [GOOD] >> KqpTx::CommitRoTx_TLI >> KqpTx::BeginTransactionBadMode [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-ModifyUser-43 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-ModifyUser-44 >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-ModifyUser-31 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-ModifyUser-32 >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-ModifyUser-25 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-ModifyUser-26 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/tx/unittest >> KqpTx::BeginTransactionBadMode [GOOD] Test command err: Trying to start YDB, gRPC: 14715, MsgBus: 9373 2025-06-25T14:51:54.871874Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899814784203923:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:51:54.871946Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0012bf/r3tmp/tmp8WqxfW/pdisk_1.dat 2025-06-25T14:51:55.223140Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:51:55.223386Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519899814784203903:2080] 1750863114870987 != 1750863114870990 TServer::EnableGrpc on GrpcPort 14715, node 1 2025-06-25T14:51:55.270335Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:51:55.270436Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:51:55.272184Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:51:55.281250Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:51:55.281287Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:51:55.281296Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:51:55.281462Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9373 TClient is connected to server localhost:9373 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:51:55.739105Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:51:55.753964Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:51:55.879637Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:51:55.891255Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:51:56.047030Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:51:56.139919Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:51:57.541172Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899827669107428:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:57.541304Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:57.834325Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:51:57.866178Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:51:57.906045Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:51:57.935803Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:51:57.971259Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:51:58.040787Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:51:58.074893Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:51:58.131356Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899831964075387:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:58.131458Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:58.131576Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899831964075392:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:58.135140Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:51:58.147346Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899831964075394:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:51:58.226893Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899831964075445:3421] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:51:59.872452Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519899814784203923:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:51:59.872528Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:52:10.180860Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7382: Cannot get console configs 2025-06-25T14:52:10.180891Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:52:11.324500Z node 1 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1546: SelfId: [1:7519899887798650952:2611], TxId: 281474976710682, task: 1. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=1&id=ZjBiZTliOGQtYTdjMTZkY2UtZDQ4ZGQyMjgtNzhlZWVlOTE=. TraceId : 01jyks6cg1ejk4 ... PoolId : default. }. InternalError: ABORTED DEFAULT_ERROR: {
: Error: Read request aborted subissue: {
: Error: Table id 2 has no snapshot at v1750863120055/18446744073709551615 shard 72075186224037888 with lowWatermark v1750863120370/18446744073709551615 (node# 1 state# Ready) } }. 2025-06-25T14:52:11.325360Z node 1 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [1:7519899887798650954:2612], TxId: 281474976710682, task: 2. Ctx: { SessionId : ydb://session/3?node_id=1&id=ZjBiZTliOGQtYTdjMTZkY2UtZDQ4ZGQyMjgtNzhlZWVlOTE=. TraceId : 01jyks6cg1ejk473rpbfj5vqn8. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Handle abort execution event from: [1:7519899887798650948:2466], status: ABORTED, reason: {
: Error: Terminate execution } 2025-06-25T14:52:11.325658Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=1&id=ZjBiZTliOGQtYTdjMTZkY2UtZDQ4ZGQyMjgtNzhlZWVlOTE=, ActorId: [1:7519899840554010270:2466], ActorState: ExecuteState, TraceId: 01jyks6cg1ejk473rpbfj5vqn8, Create QueryResponse for error on request, msg: Trying to start YDB, gRPC: 23236, MsgBus: 10518 2025-06-25T14:52:12.065896Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519899893084313343:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:12.069905Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0012bf/r3tmp/tmpX7HSZU/pdisk_1.dat 2025-06-25T14:52:12.175148Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 23236, node 2 2025-06-25T14:52:12.198871Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:52:12.198953Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:52:12.200743Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:52:12.218543Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:52:12.218558Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:52:12.218563Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:52:12.218711Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10518 TClient is connected to server localhost:10518 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:52:12.654286Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:52:12.665129Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:12.728399Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:12.877662Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:12.960136Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:13.114434Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:52:15.033743Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519899905969216839:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:15.033847Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:15.103237Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:15.136828Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:15.172996Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:15.222856Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:15.255643Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:15.295090Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:15.329025Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:15.427926Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519899905969217498:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:15.428032Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:15.428295Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519899905969217503:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:15.432919Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:52:15.448723Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519899905969217505:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:52:15.538901Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519899905969217556:3412] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } >> DemoTx::Scenario_4 [GOOD] >> TCdcStreamWithInitialScanTests::RacyAlterStreamAndRestart [GOOD] >> TCdcStreamWithInitialScanTests::MeteringServerless >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-ModifyUser-37 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-ModifyUser-38 >> KqpSinkLocks::TInvalidate [GOOD] >> KqpSinkLocks::TInvalidateOlap >> TPersQueueCommonTest::Auth_MultipleInflightWriteUpdateTokenRequestWithDifferentValidToken_SessionClosedWithOverloadedError [GOOD] >> TPersQueueCommonTest::Auth_WriteUpdateTokenRequestWithInvalidToken_SessionClosedWithUnauthenticatedError >> KqpTx::MixEnginesOldNew [GOOD] >> KqpSinkTx::ExplicitTcl [GOOD] >> KqpSinkTx::Interactive >> KqpTx::ExplicitTcl >> DemoTx::Scenario_5 >> TopicService::UnknownTopic [GOOD] >> TPersQueueTest::TopicServiceReadBudget [GOOD] >> TPersQueueTest::TopicServiceSimpleHappyWrites >> KqpLocks::EmptyRange [GOOD] >> KqpLocks::EmptyRangeAlreadyBroken ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/tx/unittest >> KqpTx::MixEnginesOldNew [GOOD] Test command err: Trying to start YDB, gRPC: 14625, MsgBus: 64432 2025-06-25T14:52:08.337957Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899878281855586:2137];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:08.338034Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001293/r3tmp/tmp3sMfIs/pdisk_1.dat 2025-06-25T14:52:08.700290Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519899878281855486:2080] 1750863128326278 != 1750863128326281 2025-06-25T14:52:08.711013Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 14625, node 1 2025-06-25T14:52:08.757326Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:52:08.757601Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:52:08.775318Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:52:08.800854Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:52:08.800891Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:52:08.800897Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:52:08.801020Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:64432 TClient is connected to server localhost:64432 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-25T14:52:09.342747Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:52:09.353024Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:52:09.381203Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:09.491932Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:09.672386Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:09.762363Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:11.322590Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899891166759016:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:11.322729Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:11.643498Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:11.671275Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:11.696819Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:11.761915Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:11.789130Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:11.817328Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:11.842363Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:11.891325Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899891166759682:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:11.891447Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:11.891534Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899891166759687:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:11.894512Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:52:11.902650Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899891166759689:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:52:11.960291Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899891166759740:3419] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:52:13.352257Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519899878281855586:2137];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:13.366996Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:52:13.782573Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=1&id=NDJjZWJlNzktOWE2NzkzYjYtYTQ3MDVhODctYmNhOTRlNA==, ActorId: [1:7519899899756694560:2464], ActorState: ExecuteState, TraceId: 01jyks6ey156ngas414fzzvbpr, Create QueryResponse for error on request, msg: tx has deferred effects, but locks are broken 2025-06-25T14:52:13.794353Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=1&id=NDJjZWJlNzktOWE2NzkzYjYtYTQ3MDVhODctYmNhOTRlNA==, ActorId: [1:7519899899756694560:2464], ActorState: ReadyState, TraceId: 01jyks6f31a80wgegaft82g5fv, Create QueryResponse for error on request, msg: Trying to start YDB, gRPC: 7928, MsgBus: 4692 2025-06-25T14:52:14.537729Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519899904342832607:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:14.537777Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001293/r3tmp/tmpijcswu/pdisk_1.dat 2025-06-25T14:52:14.649334Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:52:14.650386Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519899904342832578:2080] 1750863134536080 != 1750863134536083 TServer::EnableGrpc on GrpcPort 7928, node 2 2025-06-25T14:52:14.678758Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:52:14.678825Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:52:14.682066Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:52:14.699982Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:52:14.700014Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:52:14.700021Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:52:14.700130Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4692 TClient is connected to server localhost:4692 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-25T14:52:15.189318Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:52:15.199572Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:15.273767Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:15.407808Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:15.495197Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:15.611135Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:52:17.607120Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519899917227736117:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:17.607192Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:17.673752Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:17.707797Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:17.744744Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:17.772590Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:17.798404Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:17.867048Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:17.896883Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:17.956041Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519899917227736776:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:17.956118Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519899917227736781:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:17.956127Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:17.959355Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:52:17.968144Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519899917227736783:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:52:18.033392Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519899921522704130:3418] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } >> KqpSinkTx::DeferredEffects [GOOD] >> TCdcStreamWithInitialScanTests::MeteringServerless [GOOD] >> TCdcStreamWithInitialScanTests::MeteringDedicated >> TopicService::UseDoubleSlashInTopicPath >> KqpSinkMvcc::OlapReadWriteTxFailsOnConcurrentWrite1 [GOOD] >> KqpSinkMvcc::OlapReadWriteTxFailsOnConcurrentWrite2 >> KqpLocksTricky::TestNoLocksIssue-withSink [GOOD] >> KqpLocksTricky::TestNoLocksIssueInteractiveTx+withSink >> KqpTx::CommitRoTx_TLI [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/tx/unittest >> KqpSinkTx::DeferredEffects [GOOD] Test command err: Trying to start YDB, gRPC: 6809, MsgBus: 2451 2025-06-25T14:51:52.705207Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899807057465020:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:51:52.705266Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0012cb/r3tmp/tmpFaOD01/pdisk_1.dat 2025-06-25T14:51:53.114192Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:51:53.120555Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519899807057464991:2080] 1750863112700525 != 1750863112700528 TServer::EnableGrpc on GrpcPort 6809, node 1 2025-06-25T14:51:53.180481Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:51:53.180568Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:51:53.182103Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:51:53.291856Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:51:53.291875Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:51:53.291880Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:51:53.291975Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2451 2025-06-25T14:51:53.717867Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:2451 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:51:54.059751Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:51:55.686641Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899819942367528:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:55.686651Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899819942367519:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:55.686739Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:55.700201Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:51:55.727716Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899819942367534:2294], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:51:55.799888Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899819942367587:2335] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:51:57.705263Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519899807057465020:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:51:57.705353Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:51:57.706191Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:51:57.834693Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:51:58.676817Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:08.082493Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7382: Cannot get console configs 2025-06-25T14:52:08.082519Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:52:13.670809Z node 1 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1546: SelfId: [1:7519899897251787876:3218], TxId: 281474976715678, task: 1. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=1&id=MmIyMjAzZjMtYmY1MjkzN2QtZjYwOGYzOTMtOTIwY2ExMDU=. TraceId : 01jyks6ez0enca3spetpnfxbvm. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Source[0] fatal error: {
: Error: Read request aborted subissue: {
: Error: Table id 7 has no snapshot at v1750863120000/18446744073709551615 shard 72075186224037889 with lowWatermark v1750863120209/18446744073709551615 (node# 1 state# Ready) } } 2025-06-25T14:52:13.671234Z node 1 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:678: SelfId: [1:7519899897251787876:3218], TxId: 281474976715678, task: 1. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=1&id=MmIyMjAzZjMtYmY1MjkzN2QtZjYwOGYzOTMtOTIwY2ExMDU=. TraceId : 01jyks6ez0enca3spetpnfxbvm. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. InternalError: ABORTED DEFAULT_ERROR: {
: Error: Read request aborted subissue: {
: Error: Table id 7 has no snapshot at v1750863120000/18446744073709551615 shard 72075186224037889 with lowWatermark v1750863120209/18446744073709551615 (node# 1 state# Ready) } }. 2025-06-25T14:52:13.671794Z node 1 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [1:7519899897251787877:3219], TxId: 281474976715678, task: 2. Ctx: { TraceId : 01jyks6ez0enca3spetpnfxbvm. SessionId : ydb://session/3?node_id=1&id=MmIyMjAzZjMtYmY1MjkzN2QtZjYwOGYzOTMtOTIwY2ExMDU=. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Handle abort execution event from: [1:7519899897251787872:2934], status: ABORTED, reason: {
: Error: Terminate execution } 2025-06-25T14:52:13.672337Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=1&id=MmIyMjAzZjMtYmY1MjkzN2QtZjYwOGYzOTMtOTIwY2ExMDU=, ActorId: [1:7519899837122244738:2934], ActorState: ExecuteState, TraceId: 01jyks6ez0enca3spetpnfxbvm, Create QueryResponse for error on request, msg: Trying to start YDB, gRPC: 25039, MsgBus: 16786 2025-06-25T14:52:14.593105Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519899904005749520:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:14.593811Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0012cb/r3tmp/tmpehn2HK/pdisk_1.dat 2025-06-25T14:52:14.770541Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:52:14.785702Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:52:14.785780Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:52:14.787509Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 25039, node 2 2025-06-25T14:52:14.873008Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:52:14.873034Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:52:14.873041Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:52:14.873197Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16786 TClient is connected to server localhost:16786 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:52:15.322468Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:52:15.612420Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:52:17.742464Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519899916890651994:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:17.742525Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519899916890652008:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:17.742551Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:17.745421Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:52:17.755013Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519899916890652010:2295], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:52:17.812839Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519899916890652061:2332] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:52:17.866969Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:17.910597Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:18.854038Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:19.598232Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519899904005749520:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:19.598982Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> KqpSinkTx::LocksAbortOnCommit >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-ModifyUser-44 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-ModifyUser-45 >> KqpTx::SnapshotRO [GOOD] >> KqpTx::SnapshotROInteractive1 >> KqpSinkLocks::InvalidateOlapOnCommit [GOOD] >> KqpSinkLocks::OlapInsertWithBulkUpsert+UseBulkUpsert [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-ModifyUser-32 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-ModifyUser-33 >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-ModifyUser-26 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-ModifyUser-27 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/tx/unittest >> KqpTx::CommitRoTx_TLI [GOOD] Test command err: Trying to start YDB, gRPC: 30432, MsgBus: 3448 2025-06-25T14:52:11.572408Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899889793496468:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:11.572528Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001245/r3tmp/tmpJPZeYH/pdisk_1.dat 2025-06-25T14:52:11.904511Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:52:11.904643Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:52:11.906421Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:52:11.926906Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:52:11.927330Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519899889793496449:2080] 1750863131571743 != 1750863131571746 TServer::EnableGrpc on GrpcPort 30432, node 1 2025-06-25T14:52:11.989813Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:52:11.989844Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:52:11.989852Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:52:11.989970Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3448 TClient is connected to server localhost:3448 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:52:12.497990Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:52:12.511143Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:52:12.517763Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:12.583912Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:52:12.642202Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:12.773466Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:12.844822Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:14.535058Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899902678399988:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:14.535374Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:14.822711Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:14.854778Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:14.902718Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:14.971395Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:15.019488Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:15.057660Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:15.090496Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:15.146613Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899906973367948:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:15.146701Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:15.146959Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899906973367953:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:15.150042Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:52:15.161279Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899906973367955:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:52:15.254961Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899906973368006:3424] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:52:16.572870Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519899889793496468:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:16.572949Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 2816, MsgBus: 64860 2025-06-25T14:52:17.252778Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519899914067707562:2088];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:17.325574Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001245/r3tmp/tmpU5vyKk/pdisk_1.dat 2025-06-25T14:52:17.418906Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:52:17.421357Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519899914067707489:2080] 1750863137217382 != 1750863137217385 TServer::EnableGrpc on GrpcPort 2816, node 2 2025-06-25T14:52:17.445911Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:52:17.445981Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:52:17.451884Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:52:17.494730Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:52:17.494750Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:52:17.494756Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:52:17.494859Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:64860 TClient is connected to server localhost:64860 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-25T14:52:17.894776Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:52:17.908771Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:17.979662Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:18.115527Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:18.173732Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:18.309857Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:52:20.101291Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519899926952611000:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:20.101390Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:20.157616Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:20.203087Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:20.233772Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:20.259348Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:20.284861Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:20.310767Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:20.345262Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:20.451442Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519899926952611659:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:20.451561Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:20.451852Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519899926952611664:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:20.455374Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:52:20.465672Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519899926952611666:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:52:20.562766Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519899926952611717:3418] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } >> KqpSinkMvcc::OltpNamedStatementNoSink [GOOD] >> KqpSinkMvcc::OltpNamedStatement >> KqpLocksTricky::TestNoWrite [GOOD] >> KqpSinkLocks::DifferentKeyUpdate >> KqpSinkTx::SnapshotRO >> TPersQueueTest::StreamReadCreateAndDestroyMsgs [GOOD] >> TPersQueueTest::StreamReadCommitAndStatusMsgs >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-ModifyUser-38 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-ModifyUser-39 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/tx/unittest >> KqpSinkLocks::OlapInsertWithBulkUpsert+UseBulkUpsert [GOOD] Test command err: Trying to start YDB, gRPC: 62334, MsgBus: 19376 2025-06-25T14:52:02.552892Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899852958810077:2135];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:02.553057Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0012a9/r3tmp/tmp8Ia7uU/pdisk_1.dat 2025-06-25T14:52:02.802706Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:52:02.802804Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:52:02.804126Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519899852958809980:2080] 1750863122537824 != 1750863122537827 2025-06-25T14:52:02.804130Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:52:02.812140Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 62334, node 1 2025-06-25T14:52:02.880213Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:52:02.880233Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:52:02.880242Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:52:02.880388Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19376 TClient is connected to server localhost:19376 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:52:03.416193Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:52:03.436877Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:52:03.558384Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:52:05.205808Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899865843712517:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:05.205823Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899865843712505:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:05.205907Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:05.209655Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:52:05.220217Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899865843712519:2294], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:52:05.292550Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899865843712570:2333] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:52:05.595150Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:05.748924Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:06.542852Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:07.549797Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519899852958810077:2135];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:07.554164Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:52:07.962409Z node 1 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:3004: SelfId: [1:7519899874433655226:2930], SessionActorId: [1:7519899874433655185:2930], statusCode=ABORTED. Issue=
: Error: Transaction locks invalidated. Table: `/Root/Test`, code: 2001 . sessionActorId=[1:7519899874433655185:2930]. isRollback=0 2025-06-25T14:52:07.962609Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:1895: SessionId: ydb://session/3?node_id=1&id=MWFhOWJmZDEtYWZmMGFmODktNjIyYTM4NS05OTVhYjc0NQ==, ActorId: [1:7519899874433655185:2930], ActorState: ExecuteState, TraceId: 01jyks69cdb3jcr006ec7cwrn0, got TEvKqpBuffer::TEvError in ExecuteState, status: ABORTED send to: [1:7519899874433655227:2930] from: [1:7519899874433655226:2930] 2025-06-25T14:52:07.962663Z node 1 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [1:7519899874433655227:2930] TxId: 281474976715665. Ctx: { TraceId: 01jyks69cdb3jcr006ec7cwrn0, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MWFhOWJmZDEtYWZmMGFmODktNjIyYTM4NS05OTVhYjc0NQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Transaction locks invalidated. Table: `/Root/Test`, code: 2001 } 2025-06-25T14:52:07.962809Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=1&id=MWFhOWJmZDEtYWZmMGFmODktNjIyYTM4NS05OTVhYjc0NQ==, ActorId: [1:7519899874433655185:2930], ActorState: ExecuteState, TraceId: 01jyks69cdb3jcr006ec7cwrn0, Create QueryResponse for error on request, msg:
: Error: Transaction locks invalidated. Table: `/Root/Test`, code: 2001 Trying to start YDB, gRPC: 30116, MsgBus: 4404 2025-06-25T14:52:09.167409Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519899881909602135:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:09.167462Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0012a9/r3tmp/tmpsCjjOq/pdisk_1.dat 2025-06-25T14:52:09.298324Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:52:09.299432Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519899881909602101:2080] 1750863129166388 != 1750863129166391 TServer::EnableGrpc on GrpcPort 30116, node 2 2025-06-25T14:52:09.330976Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:52:09.331067Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:52:09.346876Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:52:09.376813Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:52:09.376836Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:52:09.376843Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:52:09.376945Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4404 TClient is connected to server localhost:4404 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Child ... 2129Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037993;tx_state=TTxProgressTx::Execute;tx_current=281474976715664;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; 2025-06-25T14:52:20.292295Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037996;tx_state=TTxProgressTx::Execute;tx_current=281474976715664;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; 2025-06-25T14:52:20.292537Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[2:7519899894794504874:2310];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037892;local_tx_no=30;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037969,72075186224037993,72075186224037996;receive=72075186224037981; 2025-06-25T14:52:20.292700Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[2:7519899894794504874:2310];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037892;local_tx_no=32;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037993,72075186224037996;receive=72075186224037981; 2025-06-25T14:52:20.292756Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[2:7519899894794504874:2310];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037892;local_tx_no=33;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037993,72075186224037996;receive=72075186224037969; 2025-06-25T14:52:20.292802Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[2:7519899894794504874:2310];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037892;local_tx_no=34;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037993,72075186224037996;receive=72075186224037969; 2025-06-25T14:52:20.292977Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[2:7519899894794504874:2310];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037892;local_tx_no=36;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037996;receive=72075186224037993; 2025-06-25T14:52:20.293021Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[2:7519899894794504874:2310];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037892;local_tx_no=37;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037996;receive=72075186224037993; 2025-06-25T14:52:20.293405Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037892;tx_state=TTxProgressTx::Execute;tx_current=281474976715664;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; 2025-06-25T14:52:21.115980Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037897;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715667; 2025-06-25T14:52:21.116598Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037897;tx_state=TTxProgressTx::Complete;commit_tx_id=281474976715667;commit_lock_id=281474976715666;fline=manager.cpp:94;broken_lock_id=281474976715665; 2025-06-25T14:52:21.129689Z node 2 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[2:7519899894794504873:2309];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037897;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_LOCKS_BROKEN;details=tablet lock have another internal generation counter: 18446744073709551615 != 0;tx_id=281474976715668; 2025-06-25T14:52:21.131501Z node 2 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:2751: SelfId: [2:7519899929154252794:3690], SessionActorId: [2:7519899929154252239:3690], Got LOCKS BROKEN for table. ShardID=72075186224037897, Sink=[2:7519899929154252794:3690].{
: Error: tablet lock have another internal generation counter: 18446744073709551615 != 0, code: 2001 } 2025-06-25T14:52:21.131627Z node 2 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:3004: SelfId: [2:7519899929154252794:3690], SessionActorId: [2:7519899929154252239:3690], statusCode=ABORTED. Issue=
: Error: Transaction locks invalidated. Table: `/Root/Test`., code: 2001
: Error: tablet lock have another internal generation counter: 18446744073709551615 != 0, code: 2001 . sessionActorId=[2:7519899929154252239:3690]. isRollback=0 2025-06-25T14:52:21.131903Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:1895: SessionId: ydb://session/3?node_id=2&id=OWE4MjMxMjAtNmZlZmMwNTYtYTk1NWYzMDktMWRmMTU0ZTY=, ActorId: [2:7519899929154252239:3690], ActorState: ExecuteState, TraceId: 01jyks6p85ee7x56de6mmjkakr, got TEvKqpBuffer::TEvError in ExecuteState, status: ABORTED send to: [2:7519899933449220256:3690] from: [2:7519899929154252794:3690] 2025-06-25T14:52:21.131969Z node 2 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [2:7519899933449220256:3690] TxId: 281474976715668. Ctx: { TraceId: 01jyks6p85ee7x56de6mmjkakr, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=OWE4MjMxMjAtNmZlZmMwNTYtYTk1NWYzMDktMWRmMTU0ZTY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Transaction locks invalidated. Table: `/Root/Test`., code: 2001 subissue: {
: Error: tablet lock have another internal generation counter: 18446744073709551615 != 0, code: 2001 } } 2025-06-25T14:52:21.132106Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=OWE4MjMxMjAtNmZlZmMwNTYtYTk1NWYzMDktMWRmMTU0ZTY=, ActorId: [2:7519899929154252239:3690], ActorState: ExecuteState, TraceId: 01jyks6p85ee7x56de6mmjkakr, Create QueryResponse for error on request, msg: 2025-06-25T14:52:21.133285Z node 2 :TX_COLUMNSHARD WARN: ctor_logger.h:56: TColumnShard.StateWork at 72075186224037888 unhandled event type: NKikimr::TEvDataShard::TEvCancelTransactionProposal event: NKikimrTxDataShard.TEvCancelTransactionProposal TxId: 281474976715668 2025-06-25T14:52:21.133406Z node 2 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[2:7519899894794504858:2306];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037888;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-25T14:52:21.133558Z node 2 :TX_COLUMNSHARD WARN: ctor_logger.h:56: TColumnShard.StateWork at 72075186224037890 unhandled event type: NKikimr::TEvDataShard::TEvCancelTransactionProposal event: NKikimrTxDataShard.TEvCancelTransactionProposal TxId: 281474976715668 2025-06-25T14:52:21.133601Z node 2 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[2:7519899894794504875:2311];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037890;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-25T14:52:21.133713Z node 2 :TX_COLUMNSHARD WARN: ctor_logger.h:56: TColumnShard.StateWork at 72075186224037891 unhandled event type: NKikimr::TEvDataShard::TEvCancelTransactionProposal event: NKikimrTxDataShard.TEvCancelTransactionProposal TxId: 281474976715668 2025-06-25T14:52:21.133757Z node 2 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[2:7519899894794504865:2307];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037891;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-25T14:52:21.133816Z node 2 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[2:7519899894794504873:2309];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037897;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-25T14:52:21.133819Z node 2 :TX_COLUMNSHARD WARN: ctor_logger.h:56: TColumnShard.StateWork at 72075186224037892 unhandled event type: NKikimr::TEvDataShard::TEvCancelTransactionProposal event: NKikimrTxDataShard.TEvCancelTransactionProposal TxId: 281474976715668 2025-06-25T14:52:21.133865Z node 2 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[2:7519899894794504874:2310];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037892;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-25T14:52:21.133870Z node 2 :TX_COLUMNSHARD WARN: ctor_logger.h:56: TColumnShard.StateWork at 72075186224037893 unhandled event type: NKikimr::TEvDataShard::TEvCancelTransactionProposal event: NKikimrTxDataShard.TEvCancelTransactionProposal TxId: 281474976715668 2025-06-25T14:52:21.133912Z node 2 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[2:7519899894794504966:2314];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037893;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-25T14:52:21.133914Z node 2 :TX_COLUMNSHARD WARN: ctor_logger.h:56: TColumnShard.StateWork at 72075186224037889 unhandled event type: NKikimr::TEvDataShard::TEvCancelTransactionProposal event: NKikimrTxDataShard.TEvCancelTransactionProposal TxId: 281474976715668 2025-06-25T14:52:21.133945Z node 2 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[2:7519899894794504866:2308];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037889;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-25T14:52:21.134082Z node 2 :TX_COLUMNSHARD WARN: ctor_logger.h:56: TColumnShard.StateWork at 72075186224037894 unhandled event type: NKikimr::TEvDataShard::TEvCancelTransactionProposal event: NKikimrTxDataShard.TEvCancelTransactionProposal TxId: 281474976715668 2025-06-25T14:52:21.134103Z node 2 :TX_COLUMNSHARD WARN: ctor_logger.h:56: TColumnShard.StateWork at 72075186224037895 unhandled event type: NKikimr::TEvDataShard::TEvCancelTransactionProposal event: NKikimrTxDataShard.TEvCancelTransactionProposal TxId: 281474976715668 2025-06-25T14:52:21.134113Z node 2 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[2:7519899894794504879:2312];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037894;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-25T14:52:21.134142Z node 2 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[2:7519899894794504856:2305];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037895;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-25T14:52:21.134184Z node 2 :TX_COLUMNSHARD WARN: ctor_logger.h:56: TColumnShard.StateWork at 72075186224037896 unhandled event type: NKikimr::TEvDataShard::TEvCancelTransactionProposal event: NKikimrTxDataShard.TEvCancelTransactionProposal TxId: 281474976715668 2025-06-25T14:52:21.134212Z node 2 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[2:7519899894794504898:2313];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037896;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0;
: Error: Transaction locks invalidated. Table: `/Root/Test`., code: 2001
: Error: tablet lock have another internal generation counter: 18446744073709551615 != 0, code: 2001 >> TPersQueueTest::DirectReadBudgetOnRestart [GOOD] >> TPersQueueTest::DirectReadCorrectOffsetsOnRestart >> KqpTx::RollbackManyTx >> TPersQueueTest::WriteNonExistingPartition [GOOD] >> TPersQueueTest::WriteNonExistingTopic >> KqpSnapshotRead::TestSnapshotExpiration+withSink [GOOD] >> KqpSinkMvcc::OlapNamedStatement [GOOD] >> KqpSinkMvcc::OlapMultiSinks >> KqpTx::ExplicitTcl [GOOD] >> KqpTx::EmptyTxOnCommit >> KqpSnapshotIsolation::TConflictWriteOltpNoSink [GOOD] >> KqpSnapshotIsolation::TReadOnlyOlap [GOOD] >> KqpSinkTx::Interactive [GOOD] >> KqpLocks::EmptyRangeAlreadyBroken [GOOD] |87.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/tx/unittest >> KqpSnapshotIsolation::TReadOnlyOlap [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/tx/unittest >> KqpSnapshotRead::TestSnapshotExpiration+withSink [GOOD] Test command err: Trying to start YDB, gRPC: 3269, MsgBus: 31496 2025-06-25T14:52:03.889077Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899853689498875:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:03.889137Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0012a7/r3tmp/tmpYhNNr0/pdisk_1.dat 2025-06-25T14:52:04.225828Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 3269, node 1 2025-06-25T14:52:04.304744Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:52:04.304848Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:52:04.306700Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:52:04.312269Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:52:04.312300Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:52:04.312330Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:52:04.312427Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:31496 TClient is connected to server localhost:31496 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:52:04.791292Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:52:04.825948Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:04.903029Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:52:04.966801Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:52:05.110090Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:05.180228Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:06.688673Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899866574402365:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:06.688815Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:06.995691Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:07.022418Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:07.050436Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:07.082077Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:07.110855Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:07.151151Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:07.190794Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:07.249980Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899870869370322:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:07.250053Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:07.250486Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899870869370327:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:07.253987Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:52:07.265492Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899870869370329:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:52:07.326388Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899870869370380:3421] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:52:08.891324Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519899853689498875:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:08.891423Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 11268, MsgBus: 27842 2025-06-25T14:52:09.851562Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519899879638433835:2194];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:09.854146Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0012a7/r3tmp/tmpjkaqp9/pdisk_1.dat 2025-06-25T14:52:09.961439Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:52:0 ... ient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:52:10.505776Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:52:10.512631Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:52:10.523795Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:10.581595Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:10.726415Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:10.801424Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:10.858206Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:52:12.545332Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519899892523337189:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:12.545427Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:12.588399Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:12.617511Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:12.650371Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:12.675587Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:12.707225Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:12.738684Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:12.806622Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:12.894443Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519899892523337852:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:12.894524Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:12.894724Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519899892523337857:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:12.898147Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:52:12.907002Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519899892523337859:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:52:13.012991Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519899896818305208:3416] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:52:14.852413Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519899879638433835:2194];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:14.852556Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:52:24.951716Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7382: Cannot get console configs 2025-06-25T14:52:24.951747Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:52:25.584973Z node 2 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1546: SelfId: [2:7519899948357913439:2618], TxId: 281474976715682, task: 1. Ctx: { CustomerSuppliedId : . TraceId : 01jyks6tcc7vfh3rybkk8ev5q8. SessionId : ydb://session/3?node_id=2&id=N2RjNzE0YTUtYjczOWM4YzktZTM3OTMwN2UtNzg0OWVkODg=. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Source[0] fatal error: {
: Error: Read request aborted subissue: {
: Error: Table id 2 has no snapshot at v1750863134000/18446744073709551615 shard 72075186224037888 with lowWatermark v1750863134300/18446744073709551615 (node# 2 state# Ready) } } 2025-06-25T14:52:25.585435Z node 2 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:678: SelfId: [2:7519899948357913439:2618], TxId: 281474976715682, task: 1. Ctx: { CustomerSuppliedId : . TraceId : 01jyks6tcc7vfh3rybkk8ev5q8. SessionId : ydb://session/3?node_id=2&id=N2RjNzE0YTUtYjczOWM4YzktZTM3OTMwN2UtNzg0OWVkODg=. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. InternalError: ABORTED DEFAULT_ERROR: {
: Error: Read request aborted subissue: {
: Error: Table id 2 has no snapshot at v1750863134000/18446744073709551615 shard 72075186224037888 with lowWatermark v1750863134300/18446744073709551615 (node# 2 state# Ready) } }. 2025-06-25T14:52:25.585932Z node 2 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [2:7519899948357913440:2619], TxId: 281474976715682, task: 2. Ctx: { TraceId : 01jyks6tcc7vfh3rybkk8ev5q8. SessionId : ydb://session/3?node_id=2&id=N2RjNzE0YTUtYjczOWM4YzktZTM3OTMwN2UtNzg0OWVkODg=. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Handle abort execution event from: [2:7519899948357913435:2473], status: ABORTED, reason: {
: Error: Terminate execution } 2025-06-25T14:52:25.586314Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=N2RjNzE0YTUtYjczOWM4YzktZTM3OTMwN2UtNzg0OWVkODg=, ActorId: [2:7519899896818305475:2473], ActorState: ExecuteState, TraceId: 01jyks6tcc7vfh3rybkk8ev5q8, Create QueryResponse for error on request, msg: >> KqpSinkTx::OlapLocksAbortOnCommit [GOOD] >> KqpSinkTx::OlapSnapshotRO >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-ModifyUser-45 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-ModifyUser-46 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/tx/unittest >> KqpSinkTx::Interactive [GOOD] Test command err: Trying to start YDB, gRPC: 63590, MsgBus: 5734 2025-06-25T14:52:12.859028Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899894704617815:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:12.859154Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001244/r3tmp/tmp36h7hw/pdisk_1.dat 2025-06-25T14:52:13.261909Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:52:13.312336Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:52:13.312434Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 63590, node 1 2025-06-25T14:52:13.324338Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:52:13.508838Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:52:13.508859Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:52:13.508865Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:52:13.508978Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5734 2025-06-25T14:52:13.868605Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:5734 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:52:14.143906Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:52:16.043853Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899911884487605:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:16.043985Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899911884487613:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:16.044065Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:16.048141Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:52:16.059857Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899911884487619:2295], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:52:16.158130Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899911884487670:2336] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:52:16.425812Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:16.552354Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:17.539277Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:18.181173Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519899894704617815:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:18.207315Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:52:18.864374Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=1&id=MTMzOGZlNzEtZTFiMjJhYTUtM2I1ZTkwMjgtY2ZmZDlmNTc=, ActorId: [1:7519899920474430147:2930], ActorState: ReadyState, TraceId: 01jyks6m16e6121ja4ywbnfke6, Create QueryResponse for error on request, msg: Trying to start YDB, gRPC: 63030, MsgBus: 63438 2025-06-25T14:52:20.086565Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519899927223586134:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:20.117014Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001244/r3tmp/tmpaX9Dfx/pdisk_1.dat 2025-06-25T14:52:20.253191Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:52:20.284438Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519899927223586096:2080] 1750863140074559 != 1750863140074562 TServer::EnableGrpc on GrpcPort 63030, node 2 2025-06-25T14:52:20.290311Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:52:20.290366Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:52:20.293678Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:52:20.333673Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:52:20.333694Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:52:20.333701Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:52:20.333807Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:63438 TClient is connected to server localhost:63438 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:52:20.762087Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:52:20.766359Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:52:21.132442Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:52:23.348191Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519899940108488594:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:23.348300Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519899940108488624:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:23.348467Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:23.352863Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:52:23.364092Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519899940108488631:2295], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:52:23.460336Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519899940108488682:2332] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:52:23.521486Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:23.565845Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:24.602125Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:25.303133Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519899927223586134:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:25.338854Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/tx/unittest >> KqpLocks::EmptyRangeAlreadyBroken [GOOD] Test command err: Trying to start YDB, gRPC: 30333, MsgBus: 9949 2025-06-25T14:52:08.815021Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899877432313720:2233];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:08.815724Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001284/r3tmp/tmppg2hfI/pdisk_1.dat 2025-06-25T14:52:09.161821Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 30333, node 1 2025-06-25T14:52:09.212995Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:52:09.213029Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:52:09.213047Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:52:09.213203Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:52:09.229215Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:52:09.229497Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:52:09.231435Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:9949 TClient is connected to server localhost:9949 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:52:09.822089Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:52:09.824268Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:52:09.847835Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:52:09.875401Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:10.030335Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:10.192683Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:10.260814Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:11.872642Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899890317217048:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:11.872721Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:12.151275Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:12.174653Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:12.199296Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:12.224857Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:12.255120Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:12.321538Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:12.355645Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:12.401848Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899894612185007:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:12.401919Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:12.402135Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899894612185012:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:12.405253Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:52:12.413865Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899894612185014:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:52:12.502380Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899894612185065:3420] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:52:13.820435Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519899877432313720:2233];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:13.820496Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 17830, MsgBus: 9112 2025-06-25T14:52:14.757626Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519899901383525953:2057];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:14.758315Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/ ... zation/migrations;error=timeout; Trying to start YDB, gRPC: 18102, MsgBus: 7857 2025-06-25T14:52:20.681086Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519899926210402917:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:20.681467Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001284/r3tmp/tmpH37cYD/pdisk_1.dat 2025-06-25T14:52:20.857998Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:52:20.865131Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:52:20.865220Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:52:20.893558Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 18102, node 3 2025-06-25T14:52:21.002657Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:52:21.002685Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:52:21.002693Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:52:21.002811Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:7857 TClient is connected to server localhost:7857 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:52:21.549925Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:52:21.557281Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:52:21.566885Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:52:21.657159Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:21.725431Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:52:21.829597Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:21.893621Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:24.147576Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519899943390273688:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:24.147679Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:24.210769Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:24.298462Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:24.342052Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:24.375373Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:24.408677Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:24.439355Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:24.470561Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:24.552709Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519899943390274346:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:24.552792Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:24.552865Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519899943390274351:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:24.556495Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:52:24.565419Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519899943390274353:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:52:24.650919Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519899943390274404:3420] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:52:25.681342Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519899926210402917:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:25.681404Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:52:26.279527Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=3&id=MTEzYjg2NjQtNGY1ODRlZTgtYWFiN2IzMmMtYTY1NTE1ZDQ=, ActorId: [3:7519899947685241968:2473], ActorState: ExecuteState, TraceId: 01jyks6v363ezp48g1nqhen8mv, Create QueryResponse for error on request, msg: tx has deferred effects, but locks are broken
: Error: Transaction locks invalidated. Table: `/Root/Test`, code: 2001
: Error: tx has deferred effects, but locks are broken >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-ModifyUser-33 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-ModifyUser-34 >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-ModifyUser-27 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-ModifyUser-28 >> TPersQueueTest::NoDecompressionMemoryLeaks [GOOD] >> TPersQueueTest::PreferredCluster_TwoEnabledClustersAndWriteSessionsWithDifferentPreferredCluster_SessionWithMismatchedClusterDiesAndOthersAlive >> KqpTx::SnapshotROInteractive1 [GOOD] >> KqpSinkMvcc::ReadWriteTxFailsOnConcurrentWrite2 >> KqpSnapshotIsolation::TSimpleOltpNoSink [GOOD] >> KqpSnapshotRead::ReadOnlyTxCommitsOnConcurrentWrite+withSink >> TCdcStreamWithInitialScanTests::MeteringDedicated [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-ModifyUser-39 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-ModifyUser-40 >> KqpSinkTx::LocksAbortOnCommit [GOOD] >> KqpSinkTx::InvalidateOnError ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_cdc_stream/unittest >> TCdcStreamWithInitialScanTests::MeteringDedicated [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:51:52.475737Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:51:52.475858Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:52.475908Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:51:52.475955Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:51:52.476719Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:51:52.476770Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:51:52.476847Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:52.476923Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:51:52.477655Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:51:52.478972Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:51:52.559471Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:51:52.559525Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:51:52.576847Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:51:52.577229Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:51:52.577396Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:51:52.583981Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:51:52.584320Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:51:52.587483Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:52.587873Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:51:52.595347Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:52.595550Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:51:52.601662Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:51:52.601724Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:52.601846Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:51:52.601888Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:51:52.601951Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:51:52.602013Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:51:52.607818Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:51:52.724537Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:51:52.726204Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:52.727471Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:51:52.727533Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:51:52.729459Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:51:52.729570Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:51:52.735166Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:52.735977Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:51:52.736253Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:52.736419Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:51:52.736486Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:51:52.736530Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:51:52.738753Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:52.738821Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:51:52.738868Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:51:52.740831Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:52.740880Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:52.740933Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:52.740994Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:51:52.745676Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:51:52.748099Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:51:52.749449Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:51:52.750499Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:52.750649Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:51:52.750713Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:52.752271Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:51:52.752354Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:52.752577Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:51:52.752651Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:51:52.755575Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:51:52.755640Z node 1 :FLAT_TX_SCHEMESHARD ... wner: 72075186233409546 Generation: 2 LocalPathId: 2 Version: 5 PathOwnerId: 72075186233409546, cookie: 281474976715657 2025-06-25T14:52:23.046060Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72075186233409546, msg: Owner: 72075186233409546 Generation: 2 LocalPathId: 2 Version: 5 PathOwnerId: 72075186233409546, cookie: 281474976715657 2025-06-25T14:52:23.046090Z node 19 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72075186233409546, txId: 281474976715657 2025-06-25T14:52:23.046123Z node 19 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72075186233409546, txId: 281474976715657, pathId: [OwnerId: 72075186233409546, LocalPathId: 2], version: 5 2025-06-25T14:52:23.046159Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72075186233409546, LocalPathId: 2] was 6 2025-06-25T14:52:23.046245Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 281474976715657, ready parts: 2/3, is published: true 2025-06-25T14:52:23.052274Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72075186233409546, cookie: 281474976715657 2025-06-25T14:52:23.052447Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72075186233409546, cookie: 281474976715657 2025-06-25T14:52:23.065335Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6369: Handle TEvProposeTransactionResult, at schemeshard: 72075186233409546, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409552 Status: COMPLETE TxId: 281474976715657 Step: 250 OrderId: 281474976715657 ExecLatency: 0 ProposeLatency: 4 DomainCoordinators: 72075186233409547 TxStats { PerShardStats { ShardId: 72075186233409552 CpuTimeUsec: 1761 } } CommitVersion { Step: 250 TxId: 281474976715657 } 2025-06-25T14:52:23.065417Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1791: TOperation FindRelatedPartByTabletId, TxId: 281474976715657, tablet: 72075186233409552, partId: 1 2025-06-25T14:52:23.065589Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:632: TTxOperationReply execute, operationId: 281474976715657:1, at schemeshard: 72075186233409546, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409552 Status: COMPLETE TxId: 281474976715657 Step: 250 OrderId: 281474976715657 ExecLatency: 0 ProposeLatency: 4 DomainCoordinators: 72075186233409547 TxStats { PerShardStats { ShardId: 72075186233409552 CpuTimeUsec: 1761 } } CommitVersion { Step: 250 TxId: 281474976715657 } 2025-06-25T14:52:23.065738Z node 19 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_part.cpp:109: HandleReply TEvDataShard::TEvProposeTransactionResult Ignore message: tablet# 72075186233409546, ev# TxKind: TX_KIND_SCHEME Origin: 72075186233409552 Status: COMPLETE TxId: 281474976715657 Step: 250 OrderId: 281474976715657 ExecLatency: 0 ProposeLatency: 4 DomainCoordinators: 72075186233409547 TxStats { PerShardStats { ShardId: 72075186233409552 CpuTimeUsec: 1761 } } CommitVersion { Step: 250 TxId: 281474976715657 } 2025-06-25T14:52:23.066993Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5596: Handle TEvSchemaChanged, tabletId: 72075186233409546, at schemeshard: 72075186233409546, message: Source { RawX1: 756 RawX2: 81604381267 } Origin: 72075186233409552 State: 2 TxId: 281474976715657 Step: 0 Generation: 2 2025-06-25T14:52:23.067104Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1791: TOperation FindRelatedPartByTabletId, TxId: 281474976715657, tablet: 72075186233409552, partId: 1 2025-06-25T14:52:23.067349Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:632: TTxOperationReply execute, operationId: 281474976715657:1, at schemeshard: 72075186233409546, message: Source { RawX1: 756 RawX2: 81604381267 } Origin: 72075186233409552 State: 2 TxId: 281474976715657 Step: 0 Generation: 2 2025-06-25T14:52:23.067470Z node 19 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1047: NTableState::TProposedWaitParts operationId# 281474976715657:1 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72075186233409546 2025-06-25T14:52:23.067645Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1051: NTableState::TProposedWaitParts operationId# 281474976715657:1 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72075186233409546 message: Source { RawX1: 756 RawX2: 81604381267 } Origin: 72075186233409552 State: 2 TxId: 281474976715657 Step: 0 Generation: 2 2025-06-25T14:52:23.067772Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:670: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 281474976715657:1, shardIdx: 72075186233409546:4, shard: 72075186233409552, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72075186233409546 2025-06-25T14:52:23.067842Z node 19 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:707: all shard schema changes has been received, operationId: 281474976715657:1, at schemeshard: 72075186233409546 2025-06-25T14:52:23.067915Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:719: send schema changes ack message, operation: 281474976715657:1, datashard: 72075186233409552, at schemeshard: 72075186233409546 2025-06-25T14:52:23.068000Z node 19 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 281474976715657:1 129 -> 240 2025-06-25T14:52:23.072274Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 281474976715657:1, at schemeshard: 72075186233409546 2025-06-25T14:52:23.072850Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 281474976715657:1, at schemeshard: 72075186233409546 2025-06-25T14:52:23.073048Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 281474976715657:1, at schemeshard: 72075186233409546 2025-06-25T14:52:23.073111Z node 19 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72075186233409546] TDone opId# 281474976715657:1 ProgressState 2025-06-25T14:52:23.073347Z node 19 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976715657:1 progress is 3/3 2025-06-25T14:52:23.073408Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976715657 ready parts: 3/3 2025-06-25T14:52:23.073485Z node 19 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976715657:1 progress is 3/3 2025-06-25T14:52:23.073550Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976715657 ready parts: 3/3 2025-06-25T14:52:23.073615Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 281474976715657, ready parts: 3/3, is published: true 2025-06-25T14:52:23.073686Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976715657 ready parts: 3/3 2025-06-25T14:52:23.073754Z node 19 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976715657:0 2025-06-25T14:52:23.073818Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 281474976715657:0 2025-06-25T14:52:23.073951Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72075186233409546, LocalPathId: 3] was 3 2025-06-25T14:52:23.074009Z node 19 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976715657:1 2025-06-25T14:52:23.074033Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 281474976715657:1 2025-06-25T14:52:23.074121Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72075186233409546, LocalPathId: 2] was 5 2025-06-25T14:52:23.074162Z node 19 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976715657:2 2025-06-25T14:52:23.074183Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 281474976715657:2 2025-06-25T14:52:23.074226Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72075186233409546, LocalPathId: 2] was 4 2025-06-25T14:52:26.162768Z node 19 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: PathId: 4 SchemeshardId: 72075186233409546, at schemeshard: 72075186233409546 2025-06-25T14:52:26.163219Z node 19 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:44: Tablet 72075186233409546 describe pathId 4 took 474us result status StatusNameConflict 2025-06-25T14:52:26.163500Z node 19 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusNameConflict Reason: "Check failed: path: \'/MyRoot/Shared/Table/Stream/streamImpl\', error: path is not a common path (id: [OwnerId: 72075186233409546, LocalPathId: 4], type: EPathTypePersQueueGroup, state: EPathStateNoChanges)" Path: "/MyRoot/Shared/Table/Stream/streamImpl" PathId: 4 LastExistedPrefixPath: "/MyRoot/Shared/Table/Stream/streamImpl" LastExistedPrefixPathId: 4 LastExistedPrefixDescription { Self { Name: "streamImpl" PathId: 4 SchemeshardId: 72075186233409546 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 106 CreateStep: 200 ParentPathId: 3 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeStreamImpl ChildrenExist: false BalancerTabletID: 72075186233409554 } } PathOwnerId: 72075186233409546, at schemeshard: 72075186233409546 2025-06-25T14:52:28.996265Z node 19 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: PathId: 4 SchemeshardId: 72075186233409546, at schemeshard: 72075186233409546 2025-06-25T14:52:28.996759Z node 19 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:44: Tablet 72075186233409546 describe pathId 4 took 494us result status StatusNameConflict 2025-06-25T14:52:28.996981Z node 19 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusNameConflict Reason: "Check failed: path: \'/MyRoot/Shared/Table/Stream/streamImpl\', error: path is not a common path (id: [OwnerId: 72075186233409546, LocalPathId: 4], type: EPathTypePersQueueGroup, state: EPathStateNoChanges)" Path: "/MyRoot/Shared/Table/Stream/streamImpl" PathId: 4 LastExistedPrefixPath: "/MyRoot/Shared/Table/Stream/streamImpl" LastExistedPrefixPathId: 4 LastExistedPrefixDescription { Self { Name: "streamImpl" PathId: 4 SchemeshardId: 72075186233409546 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 106 CreateStep: 200 ParentPathId: 3 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeStreamImpl ChildrenExist: false BalancerTabletID: 72075186233409554 } } PathOwnerId: 72075186233409546, at schemeshard: 72075186233409546 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/tx/unittest >> KqpTx::SnapshotROInteractive1 [GOOD] Test command err: Trying to start YDB, gRPC: 14598, MsgBus: 2689 2025-06-25T14:52:17.249348Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899915608186870:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:17.249404Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001234/r3tmp/tmpmrQen4/pdisk_1.dat 2025-06-25T14:52:17.692983Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519899915608186845:2080] 1750863137240307 != 1750863137240310 2025-06-25T14:52:17.698357Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:52:17.701198Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:52:17.701286Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:52:17.706977Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 14598, node 1 2025-06-25T14:52:17.791319Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:52:17.791339Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:52:17.791345Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:52:17.791437Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2689 TClient is connected to server localhost:2689 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-25T14:52:18.286552Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:52:18.297144Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:52:18.309275Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:52:18.326594Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:18.450272Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:52:18.600850Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:18.690345Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:20.305966Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899928493090375:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:20.306085Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:20.551882Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:20.584278Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:20.611059Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:20.639613Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:20.668504Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:20.741102Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:20.813099Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:20.902093Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899928493091043:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:20.902173Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:20.902323Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899928493091048:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:20.905895Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:52:20.915656Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899928493091050:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:52:20.985409Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899928493091101:3421] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:52:22.280662Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519899915608186870:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:22.280728Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:52:22.555003Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=1&id=OGRkMjk4OWItMWM3Mzg2YTgtMTUwNjFlMGMtNDIzMTk2NDc=, ActorId: [1:7519899932788058664:2473], ActorState: ExecuteState, TraceId: 01jyks6qj018c1gmss5km6r2wq, Create QueryResponse for error on request, msg:
:3:25: Error: Operation 'Upsert' can't be performed in read only transaction, code: 2008 Trying to start YDB, gRPC: 64387, MsgBus: 64367 2025-06-25T14:52:23.297071Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519899939090308300:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:23.297114Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001234/r3tmp/tmpDmH7NJ/pdisk_1.dat 2025-06-25T14:52:23.451673Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:52:23.459291Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:52:23.459369Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:52:23.463807Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 64387, node 2 2025-06-25T14:52:23.520883Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:52:23.520905Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:52:23.520913Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:52:23.521014Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:64367 TClient is connected to server localhost:64367 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:52:23.971017Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:52:23.981066Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:52:24.000148Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:24.080289Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:24.290988Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:24.319482Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:52:24.376138Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:26.467201Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519899951975211810:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:26.467298Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:26.525771Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:26.558838Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:26.592591Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:26.624320Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:26.692492Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:26.767273Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:26.841242Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:26.912780Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519899951975212474:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:26.913057Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:26.913404Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519899951975212479:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:26.917449Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:52:26.935785Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519899951975212481:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:52:27.012618Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519899956270179828:3419] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:52:28.297274Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519899939090308300:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:28.297366Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> KqpSinkTx::OlapDeferredEffects >> KqpSinkLocks::OlapUncommittedRead >> TPersQueueTest::SameOffset [GOOD] >> TPersQueueTest::SchemeOperationsTest >> KqpSinkLocks::DifferentKeyUpdate [GOOD] >> KqpSinkLocks::DifferentKeyUpdateOlap >> KqpTx::EmptyTxOnCommit [GOOD] >> KqpTx::DeferredEffects >> KqpSinkMvcc::OltpNamedStatement [GOOD] >> KqpSinkTx::SnapshotRO [GOOD] >> KqpSinkTx::SnapshotROInteractive1 >> KqpSinkTx::OlapInvalidateOnError >> TPersQueueTest::CacheHead [GOOD] >> TPersQueueTest::CheckACLForGrpcWrite ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/tx/unittest >> KqpTx::EmptyTxOnCommit [GOOD] Test command err: Trying to start YDB, gRPC: 11757, MsgBus: 26604 2025-06-25T14:52:20.367030Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899929627868100:2131];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:20.367143Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00122e/r3tmp/tmp0XGB6a/pdisk_1.dat 2025-06-25T14:52:20.742574Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:52:20.742773Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519899929627868008:2080] 1750863140363883 != 1750863140363886 TServer::EnableGrpc on GrpcPort 11757, node 1 2025-06-25T14:52:20.787360Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:52:20.787472Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:52:20.801257Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:52:20.887307Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:52:20.887329Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:52:20.887338Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:52:20.887432Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26604 TClient is connected to server localhost:26604 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-25T14:52:21.421836Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:52:21.565918Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:52:21.589557Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:52:21.599857Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:21.735011Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:21.895226Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:21.982680Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:23.510083Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899942512771536:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:23.510193Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:23.757934Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:23.795354Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:23.829006Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:23.875409Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:23.920456Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:23.995038Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:24.041507Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:24.146063Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899946807739493:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:24.146116Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:24.146241Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899946807739498:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:24.150334Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:52:24.168827Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899946807739500:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:52:24.256120Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899946807739551:3421] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:52:25.368418Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519899929627868100:2131];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:25.368492Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:52:25.715958Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=1&id=NmQ3YjYxYTctZjMwMmNiMjgtZmNjYmI3ZTQtYTUzZDEyZGY=, ActorId: [1:7519899951102707116:2473], ActorState: ReadyState, TraceId: 01jyks6tqb21hwehntfvtq8m8g, Create QueryResponse for error on request, msg: Trying to start YDB, gRPC: 9074, MsgBus: 8797 2025-06-25T14:52:26.568776Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519899954721788126:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:26.568818Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00122e/r3tmp/tmpTCZ41N/pdisk_1.dat 2025-06-25T14:52:26.712944Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:52:26.723680Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:52:26.723754Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:52:26.726634Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 9074, node 2 2025-06-25T14:52:26.812814Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:52:26.812835Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:52:26.812841Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:52:26.812941Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8797 TClient is connected to server localhost:8797 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:52:27.325706Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:52:27.333908Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:52:27.351858Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:27.408707Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:27.546421Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:27.587645Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:52:27.610608Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:29.552450Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519899967606691631:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:29.552544Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:29.630406Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:29.681041Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:29.718606Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:29.754969Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:29.789630Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:29.830950Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:29.861146Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:29.948566Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519899967606692293:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:29.948674Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:29.948979Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519899967606692298:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:29.952420Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:52:29.966783Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519899967606692300:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:52:30.033680Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519899971901659647:3417] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-ModifyUser-46 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-ModifyUser-47 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/tx/unittest >> KqpSinkMvcc::OltpNamedStatement [GOOD] Test command err: Trying to start YDB, gRPC: 16404, MsgBus: 22139 2025-06-25T14:52:16.876937Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899912447422065:2233];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:16.877489Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001237/r3tmp/tmphIuRFR/pdisk_1.dat 2025-06-25T14:52:17.274551Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:52:17.279355Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519899912447421844:2080] 1750863136818376 != 1750863136818379 TServer::EnableGrpc on GrpcPort 16404, node 1 2025-06-25T14:52:17.305494Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:52:17.305674Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:52:17.347096Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:52:17.368800Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:52:17.368820Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:52:17.368826Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:52:17.368928Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:22139 TClient is connected to server localhost:22139 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-25T14:52:17.855242Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:52:17.862450Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:52:19.570070Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899925332324370:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:19.570139Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899925332324375:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:19.570213Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:19.573904Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:52:19.581837Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899925332324384:2294], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:52:19.684294Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899925332324435:2331] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:52:19.965397Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:20.082518Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:21.039156Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:21.865213Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519899912447422065:2233];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:21.865875Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 65306, MsgBus: 11083 2025-06-25T14:52:24.573394Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519899943690928716:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:24.573463Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001237/r3tmp/tmpnHTBEd/pdisk_1.dat 2025-06-25T14:52:24.695634Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519899943690928696:2080] 1750863144572911 != 1750863144572914 2025-06-25T14:52:24.702929Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 65306, node 2 2025-06-25T14:52:24.710148Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:52:24.710215Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:52:24.713133Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:52:24.734888Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:52:24.734909Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:52:24.734927Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:52:24.735018Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11083 TClient is connected to server localhost:11083 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:52:25.160622Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:52:25.580335Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:52:27.503425Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519899956575831215:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:27.503516Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:27.503892Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519899956575831227:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:27.508071Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:52:27.518068Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519899956575831229:2294], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:52:27.584348Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519899956575831282:2332] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:52:27.636712Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:27.713762Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:28.703180Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:29.654256Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519899943690928716:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:29.657591Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> TopicService::UseDoubleSlashInTopicPath [GOOD] >> DemoTx::Scenario_5 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-ModifyUser-34 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-ModifyUser-35 >> TPersQueueTest::TopicServiceSimpleHappyWrites [GOOD] >> TPersQueueTest::WhenDisableNodeAndCreateTopic_ThenAllPartitionsAreOnOtherNode >> YdbYqlClient::SimpleColumnFamilies [GOOD] >> YdbYqlClient::TableKeyRangesSinglePartition >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-ModifyUser-28 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-ModifyUser-29 >> TPersQueueCommonTest::Auth_WriteUpdateTokenRequestWithInvalidToken_SessionClosedWithUnauthenticatedError [GOOD] >> TPersQueueCommonTest::Auth_WriteUpdateTokenRequestWithValidTokenButWithoutACL_SessionClosedWithUnauthorizedError >> KqpSinkLocks::TInvalidateOlap [GOOD] >> KqpSinkLocks::OlapVisibleUncommittedRowsUpdate [GOOD] >> KqpSnapshotRead::TestReadOnly+withSink >> KqpLocks::TwoPhaseTx >> TopicService::RelativePath >> TFstClassSrcIdPQTest::TestTableCreated >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-ModifyUser-40 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-ModifyUser-41 >> TAsyncIndexTests::CdcAndSplitWithReboots[PipeResets] [GOOD] >> KqpTx::RollbackManyTx [GOOD] >> KqpTx::RollbackRoTx >> KqpSnapshotRead::ReadOnlyTxCommitsOnConcurrentWrite+withSink [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/tx/unittest >> KqpSinkLocks::OlapVisibleUncommittedRowsUpdate [GOOD] Test command err: Trying to start YDB, gRPC: 22486, MsgBus: 19649 2025-06-25T14:52:12.882381Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899894390815397:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:12.885825Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00123e/r3tmp/tmpctFLp8/pdisk_1.dat 2025-06-25T14:52:13.307177Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:52:13.307325Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:52:13.309868Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519899894390815378:2080] 1750863132879630 != 1750863132879633 2025-06-25T14:52:13.310259Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:52:13.324073Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 22486, node 1 2025-06-25T14:52:13.397320Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:52:13.397342Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:52:13.397348Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:52:13.397471Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19649 TClient is connected to server localhost:19649 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-25T14:52:13.909114Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:52:14.052260Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:52:14.066651Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:52:15.825951Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899907275717912:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:15.827349Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899907275717900:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:15.827461Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:15.829768Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:52:15.838870Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899907275717914:2294], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:52:15.939965Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899907275717967:2334] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:52:16.185496Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:16.296443Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:17.158866Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:17.884673Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519899894390815397:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:17.886054Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:52:18.658124Z node 1 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:3004: SelfId: [1:7519899920160627817:2930], SessionActorId: [1:7519899920160627770:2930], statusCode=ABORTED. Issue=
: Error: Transaction locks invalidated. Table: `/Root/Test`, code: 2001 . sessionActorId=[1:7519899920160627770:2930]. isRollback=0 2025-06-25T14:52:18.676434Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:1895: SessionId: ydb://session/3?node_id=1&id=YjViMzIyMTItZTg2ZDI1YTEtOGY4YTcwODUtZjFhNWUzMjU=, ActorId: [1:7519899920160627770:2930], ActorState: ExecuteState, TraceId: 01jyks6ks014pzbk9770g5kwxb, got TEvKqpBuffer::TEvError in ExecuteState, status: ABORTED send to: [1:7519899920160627818:2930] from: [1:7519899920160627817:2930] 2025-06-25T14:52:18.676552Z node 1 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [1:7519899920160627818:2930] TxId: 281474976710665. Ctx: { TraceId: 01jyks6ks014pzbk9770g5kwxb, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjViMzIyMTItZTg2ZDI1YTEtOGY4YTcwODUtZjFhNWUzMjU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Transaction locks invalidated. Table: `/Root/Test`, code: 2001 } 2025-06-25T14:52:18.676756Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=1&id=YjViMzIyMTItZTg2ZDI1YTEtOGY4YTcwODUtZjFhNWUzMjU=, ActorId: [1:7519899920160627770:2930], ActorState: ExecuteState, TraceId: 01jyks6ks014pzbk9770g5kwxb, Create QueryResponse for error on request, msg:
: Error: Transaction locks invalidated. Table: `/Root/Test`, code: 2001 Trying to start YDB, gRPC: 3290, MsgBus: 24751 2025-06-25T14:52:19.633767Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519899925474068035:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:19.633836Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00123e/r3tmp/tmpr6Ujoo/pdisk_1.dat 2025-06-25T14:52:19.721821Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:52:19.722652Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519899925474068016:2080] 1750863139633312 != 1750863139633315 TServer::EnableGrpc on GrpcPort 3290, node 2 2025-06-25T14:52:19.768612Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:52:19.768698Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:52:19.781445Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:52:19.809217Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:52:19.809241Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:52:19.809248Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:52:19.809359Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24751 TClient is connected to server localhost:24751 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Chil ... Kikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037892;local_tx_no=64;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037896,72075186224037897;receive=72075186224037996; 2025-06-25T14:52:31.210706Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[2:7519899942653938083:2309];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037892;local_tx_no=66;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037897;receive=72075186224037896; 2025-06-25T14:52:31.210770Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[2:7519899942653938083:2309];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037892;local_tx_no=67;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037897;receive=72075186224037896; 2025-06-25T14:52:31.210848Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[2:7519899942653938083:2309];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037892;local_tx_no=68;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037897;receive=72075186224037896; 2025-06-25T14:52:31.210914Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[2:7519899942653938083:2309];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037892;local_tx_no=69;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037897;receive=72075186224037896; 2025-06-25T14:52:31.210967Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[2:7519899942653938083:2309];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037892;local_tx_no=70;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037897;receive=72075186224037896; 2025-06-25T14:52:31.211034Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[2:7519899942653938083:2309];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037892;local_tx_no=71;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037897;receive=72075186224037896; 2025-06-25T14:52:31.211126Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[2:7519899942653938083:2309];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037892;local_tx_no=72;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037897;receive=72075186224037896; 2025-06-25T14:52:31.211863Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037892;tx_state=TTxProgressTx::Execute;tx_current=281474976715664;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; 2025-06-25T14:52:32.066233Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037897;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715667; 2025-06-25T14:52:32.067313Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037897;tx_state=TTxProgressTx::Complete;commit_tx_id=281474976715667;commit_lock_id=281474976715666;fline=manager.cpp:94;broken_lock_id=281474976715665; 2025-06-25T14:52:32.154302Z node 2 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[2:7519899942653938113:2314];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037897;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_LOCKS_BROKEN;details=tablet lock have another internal generation counter: 18446744073709551615 != 0;tx_id=281474976715669; 2025-06-25T14:52:32.154486Z node 2 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:2751: SelfId: [2:7519899977013685993:3692], SessionActorId: [2:7519899977013685438:3692], Got LOCKS BROKEN for table. ShardID=72075186224037897, Sink=[2:7519899977013685993:3692].{
: Error: tablet lock have another internal generation counter: 18446744073709551615 != 0, code: 2001 } 2025-06-25T14:52:32.154609Z node 2 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:3004: SelfId: [2:7519899977013685993:3692], SessionActorId: [2:7519899977013685438:3692], statusCode=ABORTED. Issue=
: Error: Transaction locks invalidated. Table: `/Root/Test`., code: 2001
: Error: tablet lock have another internal generation counter: 18446744073709551615 != 0, code: 2001 . sessionActorId=[2:7519899977013685438:3692]. isRollback=0
: Error: Transaction locks invalidated. Table: `/Root/Test`., code: 2001
: Error: tablet lock have another internal generation counter: 18446744073709551615 != 0, code: 2001 2025-06-25T14:52:32.155229Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:1895: SessionId: ydb://session/3?node_id=2&id=NjVhNDdlZGQtMTA0NWZkNDQtOGZlYWY2NWEtMTYyOTM4YWE=, ActorId: [2:7519899977013685438:3692], ActorState: ExecuteState, TraceId: 01jyks70yg63gkwjjpm4ymgqnj, got TEvKqpBuffer::TEvError in ExecuteState, status: ABORTED send to: [2:7519899981308653478:3692] from: [2:7519899977013685993:3692] 2025-06-25T14:52:32.155316Z node 2 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [2:7519899981308653478:3692] TxId: 281474976715669. Ctx: { TraceId: 01jyks70yg63gkwjjpm4ymgqnj, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NjVhNDdlZGQtMTA0NWZkNDQtOGZlYWY2NWEtMTYyOTM4YWE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Transaction locks invalidated. Table: `/Root/Test`., code: 2001 subissue: {
: Error: tablet lock have another internal generation counter: 18446744073709551615 != 0, code: 2001 } } 2025-06-25T14:52:32.155427Z node 2 :TX_COLUMNSHARD WARN: ctor_logger.h:56: TColumnShard.StateWork at 72075186224037893 unhandled event type: NKikimr::TEvDataShard::TEvCancelTransactionProposal event: NKikimrTxDataShard.TEvCancelTransactionProposal TxId: 281474976715669 2025-06-25T14:52:32.155448Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=NjVhNDdlZGQtMTA0NWZkNDQtOGZlYWY2NWEtMTYyOTM4YWE=, ActorId: [2:7519899977013685438:3692], ActorState: ExecuteState, TraceId: 01jyks70yg63gkwjjpm4ymgqnj, Create QueryResponse for error on request, msg: 2025-06-25T14:52:32.155498Z node 2 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[2:7519899942653938082:2308];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037893;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-25T14:52:32.155731Z node 2 :TX_COLUMNSHARD WARN: ctor_logger.h:56: TColumnShard.StateWork at 72075186224037895 unhandled event type: NKikimr::TEvDataShard::TEvCancelTransactionProposal event: NKikimrTxDataShard.TEvCancelTransactionProposal TxId: 281474976715669 2025-06-25T14:52:32.155777Z node 2 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[2:7519899942653938091:2313];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037895;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-25T14:52:32.155886Z node 2 :TX_COLUMNSHARD WARN: ctor_logger.h:56: TColumnShard.StateWork at 72075186224037896 unhandled event type: NKikimr::TEvDataShard::TEvCancelTransactionProposal event: NKikimrTxDataShard.TEvCancelTransactionProposal TxId: 281474976715669 2025-06-25T14:52:32.155927Z node 2 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[2:7519899938358970784:2306];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037896;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-25T14:52:32.156017Z node 2 :TX_COLUMNSHARD WARN: ctor_logger.h:56: TColumnShard.StateWork at 72075186224037894 unhandled event type: NKikimr::TEvDataShard::TEvCancelTransactionProposal event: NKikimrTxDataShard.TEvCancelTransactionProposal TxId: 281474976715669 2025-06-25T14:52:32.156056Z node 2 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[2:7519899942653938081:2307];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037894;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-25T14:52:32.156106Z node 2 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[2:7519899942653938113:2314];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037897;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-25T14:52:32.156224Z node 2 :TX_COLUMNSHARD WARN: ctor_logger.h:56: TColumnShard.StateWork at 72075186224037888 unhandled event type: NKikimr::TEvDataShard::TEvCancelTransactionProposal event: NKikimrTxDataShard.TEvCancelTransactionProposal TxId: 281474976715669 2025-06-25T14:52:32.156262Z node 2 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[2:7519899938358970751:2305];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037888;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-25T14:52:32.156281Z node 2 :TX_COLUMNSHARD WARN: ctor_logger.h:56: TColumnShard.StateWork at 72075186224037890 unhandled event type: NKikimr::TEvDataShard::TEvCancelTransactionProposal event: NKikimrTxDataShard.TEvCancelTransactionProposal TxId: 281474976715669 2025-06-25T14:52:32.156356Z node 2 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[2:7519899942653938088:2310];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037890;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-25T14:52:32.156407Z node 2 :TX_COLUMNSHARD WARN: ctor_logger.h:56: TColumnShard.StateWork at 72075186224037889 unhandled event type: NKikimr::TEvDataShard::TEvCancelTransactionProposal event: NKikimrTxDataShard.TEvCancelTransactionProposal TxId: 281474976715669 2025-06-25T14:52:32.156422Z node 2 :TX_COLUMNSHARD WARN: ctor_logger.h:56: TColumnShard.StateWork at 72075186224037891 unhandled event type: NKikimr::TEvDataShard::TEvCancelTransactionProposal event: NKikimrTxDataShard.TEvCancelTransactionProposal TxId: 281474976715669 2025-06-25T14:52:32.156447Z node 2 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[2:7519899942653938089:2311];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037889;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-25T14:52:32.156493Z node 2 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[2:7519899942653938090:2312];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037891;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-25T14:52:32.156706Z node 2 :TX_COLUMNSHARD WARN: ctor_logger.h:56: TColumnShard.StateWork at 72075186224037892 unhandled event type: NKikimr::TEvDataShard::TEvCancelTransactionProposal event: NKikimrTxDataShard.TEvCancelTransactionProposal TxId: 281474976715669 2025-06-25T14:52:32.156750Z node 2 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[2:7519899942653938083:2309];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037892;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; >> KqpJoinOrder::CanonizedJoinOrderTPCDS64 [GOOD] >> KqpSinkTx::InvalidateOnError [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest >> TAsyncIndexTests::CdcAndSplitWithReboots[PipeResets] [GOOD] Test command err: =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:115:2144] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:115:2144] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:130:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:133:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [1:136:2157] sender: [1:138:2058] recipient: [1:115:2144] 2025-06-25T14:51:13.305118Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:51:13.305231Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:13.305298Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:51:13.305345Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:51:13.305396Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:51:13.305423Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:51:13.305467Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:13.305527Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:51:13.306266Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:51:13.307361Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:51:13.385238Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7732: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-25T14:51:13.385289Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:51:13.385842Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:178:2058] recipient: [1:15:2062] 2025-06-25T14:51:13.394896Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:51:13.397307Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:51:13.397430Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:51:13.404333Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:51:13.404581Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:51:13.405168Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:13.405404Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:51:13.408942Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:13.409113Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:51:13.413715Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:51:13.413792Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:13.413924Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:51:13.413973Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:51:13.414048Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:51:13.414172Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:217:2058] recipient: [1:215:2214] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:217:2058] recipient: [1:215:2214] Leader for TabletID 72057594037968897 is [1:221:2218] sender: [1:222:2058] recipient: [1:215:2214] 2025-06-25T14:51:13.420413Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:242:2058] recipient: [1:15:2062] 2025-06-25T14:51:13.536961Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:51:13.537165Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:13.537360Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:51:13.537406Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:51:13.537620Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:51:13.537703Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:51:13.541292Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:13.541443Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:51:13.541603Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:13.541651Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:51:13.541696Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:51:13.541725Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:51:13.543394Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:13.543446Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:51:13.543484Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:51:13.544950Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:13.544989Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:13.545042Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:13.545080Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:51:13.553189Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:51:13.554803Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:51:13.554942Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:136:2157] sender: [1:257:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:51:13.555736Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:13.555843Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 Media ... epInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } } } TableSchemaVersion: 2 IsBackup: false CdcStreams { Name: "Stream" Mode: ECdcStreamModeKeysOnly PathId { OwnerId: 72057594046678944 LocalId: 6 } State: ECdcStreamStateReady SchemaVersion: 1 Format: ECdcStreamFormatProto VirtualTimestamps: false AwsRegion: "" ResolvedTimestampsIntervalMs: 0 SchemaChanges: false } IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "\001\000\004\000\000\0002\000\000\000" IsPoint: false IsInclusive: false DatashardId: 72075186233409550 } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409551 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 2 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 6 PathsLimit: 10000 ShardsInside: 6 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 1 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:52:34.982290Z node 34 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:78: [TableChangeSenderShard][72075186233409550:2][72075186233409546][34:1076:2858] Handshake NKikimrChangeExchange.TEvStatus Status: STATUS_OK LastRecordOrder: 0 2025-06-25T14:52:34.982403Z node 34 :CHANGE_EXCHANGE DEBUG: change_sender_async_index.cpp:239: [AsyncIndexChangeSenderMain][72075186233409550:2][34:1034:2858] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186233409546 } 2025-06-25T14:52:34.982592Z node 34 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:123: [TableChangeSenderShard][72075186233409550:2][72075186233409546][34:1076:2858] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 1 Group: 1750863154924469 Step: 5000004 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046678944, LocalPathId: 4] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046678944, LocalPathId: 3] SchemaVersion: 2 LockId: 0 LockOffset: 0 },{ Order: 3 Group: 1750863154924469 Step: 5000004 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046678944, LocalPathId: 4] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046678944, LocalPathId: 3] SchemaVersion: 2 LockId: 0 LockOffset: 0 },{ Order: 5 Group: 1750863154924469 Step: 5000004 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046678944, LocalPathId: 4] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046678944, LocalPathId: 3] SchemaVersion: 2 LockId: 0 LockOffset: 0 }] } 2025-06-25T14:52:34.986030Z node 34 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:200: [TableChangeSenderShard][72075186233409550:2][72075186233409546][34:1076:2858] Handle NKikimrChangeExchange.TEvStatus Status: STATUS_OK RecordStatuses { Order: 1 Status: STATUS_OK Reason: REASON_NONE } RecordStatuses { Order: 3 Status: STATUS_OK Reason: REASON_NONE } RecordStatuses { Order: 5 Status: STATUS_OK Reason: REASON_NONE } LastRecordOrder: 5 2025-06-25T14:52:34.986121Z node 34 :CHANGE_EXCHANGE DEBUG: change_sender_async_index.cpp:239: [AsyncIndexChangeSenderMain][72075186233409550:2][34:1034:2858] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186233409546 } 2025-06-25T14:52:35.281062Z node 34 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/UserDefinedIndex/indexImplTable" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-25T14:52:35.281316Z node 34 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/UserDefinedIndex/indexImplTable" took 290us result status StatusSuccess 2025-06-25T14:52:35.282177Z node 34 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table/UserDefinedIndex/indexImplTable" PathDescription { Self { Name: "indexImplTable" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1002 CreateStep: 5000003 ParentPathId: 4 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeAsyncIndexImplTable Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "indexImplTable" Columns { Name: "indexed" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "indexed" KeyColumnNames: "key" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409546 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 6 PathsLimit: 10000 ShardsInside: 6 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 1 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/tx/unittest >> KqpSnapshotRead::ReadOnlyTxCommitsOnConcurrentWrite+withSink [GOOD] Test command err: Trying to start YDB, gRPC: 1188, MsgBus: 13267 2025-06-25T14:52:29.506961Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899968584264697:2138];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:29.508982Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001214/r3tmp/tmpzZIReg/pdisk_1.dat 2025-06-25T14:52:29.873028Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:52:29.898179Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519899968584264588:2080] 1750863149493185 != 1750863149493188 TServer::EnableGrpc on GrpcPort 1188, node 1 2025-06-25T14:52:29.944792Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:52:29.944910Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:52:29.953411Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:52:30.010604Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:52:30.010626Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:52:30.010643Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:52:30.010813Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13267 2025-06-25T14:52:30.508345Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:13267 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:52:30.720794Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:52:30.739754Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:52:30.750973Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:30.884111Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:31.031563Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:52:31.101076Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:32.853927Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899981469168125:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:32.854024Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:33.184151Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:33.217523Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:33.244764Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:33.272367Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:33.313657Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:33.349309Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:33.423814Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:33.496950Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899985764136086:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:33.497029Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:33.497452Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899985764136091:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:33.501183Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:52:33.519052Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899985764136093:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:52:33.623509Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899985764136146:3429] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:52:34.502426Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519899968584264697:2138];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:34.502486Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> KqpSinkTx::OlapSnapshotROInteractive1 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/tx/unittest >> KqpSinkTx::InvalidateOnError [GOOD] Test command err: Trying to start YDB, gRPC: 13613, MsgBus: 11276 2025-06-25T14:52:23.148764Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899939883062599:2078];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:23.156128Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00122d/r3tmp/tmpma7ScJ/pdisk_1.dat 2025-06-25T14:52:23.493904Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 13613, node 1 2025-06-25T14:52:23.571276Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:52:23.571401Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:52:23.573021Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:52:23.586892Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:52:23.586916Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:52:23.586922Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:52:23.587064Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11276 TClient is connected to server localhost:11276 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-25T14:52:24.160402Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:52:24.253275Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:52:24.266257Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:52:26.203245Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899952767965072:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:26.203245Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899952767965081:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:26.203335Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:26.206362Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:52:26.215912Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899952767965086:2295], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:52:26.296474Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899952767965137:2334] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:52:26.542593Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:26.655833Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:27.513627Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:28.181966Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519899939883062599:2078];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:28.189973Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:52:29.201970Z node 1 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:3004: SelfId: [1:7519899965652875302:2930], SessionActorId: [1:7519899961357907711:2930], statusCode=ABORTED. Issue=
: Error: Transaction locks invalidated. Table: `/Root/KV`, code: 2001 . sessionActorId=[1:7519899961357907711:2930]. isRollback=0 2025-06-25T14:52:29.203038Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:1895: SessionId: ydb://session/3?node_id=1&id=N2IxMmY4MjAtMmU5Yjg1YTctMTRiN2FlNDAtNmJhNzU5ODA=, ActorId: [1:7519899961357907711:2930], ActorState: ExecuteState, TraceId: 01jyks6y3x2t5wbpyjb8bt2eyr, got TEvKqpBuffer::TEvError in ExecuteState, status: ABORTED send to: [1:7519899965652875303:2930] from: [1:7519899965652875302:2930] 2025-06-25T14:52:29.203111Z node 1 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [1:7519899965652875303:2930] TxId: 281474976710667. Ctx: { TraceId: 01jyks6y3x2t5wbpyjb8bt2eyr, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=N2IxMmY4MjAtMmU5Yjg1YTctMTRiN2FlNDAtNmJhNzU5ODA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Transaction locks invalidated. Table: `/Root/KV`, code: 2001 } 2025-06-25T14:52:29.203284Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=1&id=N2IxMmY4MjAtMmU5Yjg1YTctMTRiN2FlNDAtNmJhNzU5ODA=, ActorId: [1:7519899961357907711:2930], ActorState: ExecuteState, TraceId: 01jyks6y3x2t5wbpyjb8bt2eyr, Create QueryResponse for error on request, msg: Trying to start YDB, gRPC: 22414, MsgBus: 3120 2025-06-25T14:52:30.157781Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519899971295991318:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:30.159609Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00122d/r3tmp/tmpaILNp8/pdisk_1.dat 2025-06-25T14:52:30.290047Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:52:30.303148Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:52:30.303232Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:52:30.306441Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 22414, node 2 2025-06-25T14:52:30.343800Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:52:30.343825Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:52:30.343832Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:52:30.343944Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3120 TClient is connected to server localhost:3120 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:52:30.757462Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:52:30.763681Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:52:31.170802Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:52:33.099780Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519899984180893796:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:33.099883Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:33.100214Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519899984180893808:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:33.106414Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:52:33.123012Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519899984180893810:2294], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:52:33.178834Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519899984180893863:2332] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:52:33.226472Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:33.282581Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:34.258079Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:35.193779Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519899971295991318:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:35.196933Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:52:35.531349Z node 2 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_CONSTRAINT_VIOLATION;details=Conflict with existing key.;tx_id=3; 2025-06-25T14:52:35.531538Z node 2 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:226: Prepare transaction failed. txid 3 at tablet 72075186224037889 errors: Status: STATUS_CONSTRAINT_VIOLATION Issues: { message: "Conflict with existing key." issue_code: 2012 severity: 1 } 2025-06-25T14:52:35.531661Z node 2 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:168: Errors while proposing transaction txid 3 at tablet 72075186224037889 Status: STATUS_CONSTRAINT_VIOLATION Issues: { message: "Conflict with existing key." issue_code: 2012 severity: 1 } 2025-06-25T14:52:35.531818Z node 2 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:819: SelfId: [2:7519899992770836448:2930], Table: `/Root/KV` ([72057594046644480:7:1]), SessionActorId: [2:7519899992770836424:2930]Got CONSTRAINT VIOLATION for table `/Root/KV`. ShardID=72075186224037889, Sink=[2:7519899992770836448:2930].{
: Error: Conflict with existing key., code: 2012 } 2025-06-25T14:52:35.531900Z node 2 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:3004: SelfId: [2:7519899992770836441:2930], SessionActorId: [2:7519899992770836424:2930], statusCode=PRECONDITION_FAILED. Issue=
: Error: Constraint violated. Table: `/Root/KV`., code: 2012
: Error: Conflict with existing key., code: 2012 . sessionActorId=[2:7519899992770836424:2930]. isRollback=0 2025-06-25T14:52:35.532052Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:1895: SessionId: ydb://session/3?node_id=2&id=YTBmZmYzYmMtYWY1MWE1MzMtMzhiNTc4OGEtMmI3ZjU4NGE=, ActorId: [2:7519899992770836424:2930], ActorState: ExecuteState, TraceId: 01jyks748heryj3816abfxw2te, got TEvKqpBuffer::TEvError in ExecuteState, status: PRECONDITION_FAILED send to: [2:7519899992770836442:2930] from: [2:7519899992770836441:2930] 2025-06-25T14:52:35.532110Z node 2 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [2:7519899992770836442:2930] TxId: 281474976715664. Ctx: { TraceId: 01jyks748heryj3816abfxw2te, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YTBmZmYzYmMtYWY1MWE1MzMtMzhiNTc4OGEtMmI3ZjU4NGE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. PRECONDITION_FAILED: {
: Error: Constraint violated. Table: `/Root/KV`., code: 2012 subissue: {
: Error: Conflict with existing key., code: 2012 } } 2025-06-25T14:52:35.532255Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=YTBmZmYzYmMtYWY1MWE1MzMtMzhiNTc4OGEtMmI3ZjU4NGE=, ActorId: [2:7519899992770836424:2930], ActorState: ExecuteState, TraceId: 01jyks748heryj3816abfxw2te, Create QueryResponse for error on request, msg:
: Error: Constraint violated. Table: `/Root/KV`., code: 2012
: Error: Conflict with existing key., code: 2012 2025-06-25T14:52:35.589495Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=YTBmZmYzYmMtYWY1MWE1MzMtMzhiNTc4OGEtMmI3ZjU4NGE=, ActorId: [2:7519899992770836424:2930], ActorState: ExecuteState, TraceId: 01jyks74ajcr355brzm6m9g3q7, Create QueryResponse for error on request, msg:
: Error: Transaction not found: 01jyks74861b95g85cd607xmz6, code: 2015 >> KqpTx::DeferredEffects [GOOD] >> KqpTx::CommitStats >> KqpSinkMvcc::ReadWriteTxFailsOnConcurrentWrite2 [GOOD] >> KqpSinkMvcc::ReadWriteTxFailsOnConcurrentWrite3 >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-ModifyUser-47 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-ModifyUser-48 >> KqpSinkTx::SnapshotROInteractive1 [GOOD] >> KqpSnapshotRead::ReadWriteTxFailsOnConcurrentWrite1-withSink >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-ModifyUser-35 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-ModifyUser-36 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::CanonizedJoinOrderTPCDS64 [GOOD] Test command err: Trying to start YDB, gRPC: 2896, MsgBus: 4044 2025-06-25T14:48:59.665937Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899065964175559:2236];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:48:59.665977Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d7f/r3tmp/tmpjpcm3t/pdisk_1.dat 2025-06-25T14:49:00.335085Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:49:00.336450Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519899065964175348:2080] 1750862939617373 != 1750862939617376 2025-06-25T14:49:00.353402Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:49:00.353479Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:49:00.364208Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 2896, node 1 2025-06-25T14:49:00.584739Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:49:00.584756Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:49:00.584763Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:49:00.584845Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:49:00.648591Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:4044 TClient is connected to server localhost:4044 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:49:01.385053Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:49:01.405048Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:49:04.098587Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899087439012477:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:04.098705Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:04.098965Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899087439012489:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:04.102492Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:49:04.136742Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899087439012491:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:49:04.207780Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899087439012542:2340] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:49:04.476047Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T14:49:04.668161Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519899065964175559:2236];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:49:04.668227Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:49:04.852398Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519899087439012812:2325];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:49:04.852630Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519899087439012812:2325];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:49:04.852890Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519899087439012812:2325];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:49:04.852998Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519899087439012812:2325];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:49:04.853116Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519899087439012812:2325];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:49:04.853240Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519899087439012812:2325];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:49:04.853352Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519899087439012812:2325];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:49:04.853464Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519899087439012812:2325];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:49:04.853893Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519899087439012812:2325];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:49:04.854044Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519899087439012812:2325];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:49:04.854142Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[1:7519899087439012812:2325];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:49:04.854435Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519899087439012786:2313];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:49:04.854462Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519899087439012786:2313];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:49:04.854579Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519899087439012786:2313];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:49:04.854671Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519899087439012786:2313];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:49:04.854766Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519899087439012786:2313];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:49:04.854851Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519899087439012786:2313];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:49:04.854924Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519899087439012786:2313];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:49:04.855021Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519899087439012786:2313];tablet_id ... rrent=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:28.797331Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039421;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:28.798178Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039411;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:28.798862Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039187;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:28.802037Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039421;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:28.802567Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039395;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:28.803784Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039187;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:28.804627Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039423;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:28.807257Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039395;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:28.807759Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039397;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:28.809130Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039423;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:28.809701Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039399;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:28.812159Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039397;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:28.812642Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039393;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:28.814067Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039399;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:28.817084Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039393;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:28.818277Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039407;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:28.818291Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039413;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:28.822631Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039413;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:28.823168Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039407;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:28.823244Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039417;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:50:28.827683Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039417;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:50:29.036248Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyks23z5fntrz358yq7s9x96", SessionId: ydb://session/3?node_id=1&id=OGM3YzUwNDYtMjY1NTgxZTktYWM0MWFmOTEtNWJkYzQyN2E=, Slow query, duration: 37.702150s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:50:29.579325Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:50:29.579425Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:50:29.579703Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;self_id=[1:7519899405266647733:9751];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224039392;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224038933;receive=72075186224039094; 2025-06-25T14:50:29.580115Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:52:24.384644Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyks4xdk04gyfq971wrkex0x", SessionId: ydb://session/3?node_id=1&id=OGM3YzUwNDYtMjY1NTgxZTktYWM0MWFmOTEtNWJkYzQyN2E=, Slow query, duration: 61.452763s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "PRAGMA TablePathPrefix='/Root/test/ds';\n\n-- NB: Subquerys\n\n$cs_ui =\n\n (select catalog_sales.cs_item_sk cs_item_sk\n\n ,sum(cs_ext_list_price) as sale,sum(cr_refunded_cash+cr_reversed_charge+cr_store_credit) as refund\n\n from catalog_sales as catalog_sales\n\n cross join catalog_returns as catalog_returns\n\n where cs_item_sk = cr_item_sk\n\n and cs_order_number = cr_order_number\n\n group by catalog_sales.cs_item_sk\n\n having sum(cs_ext_list_price)>2*sum(cr_refunded_cash+cr_reversed_charge+cr_store_credit));\n\n$cross_sales =\n\n (select item.i_product_name product_name\n\n ,item.i_item_sk item_sk\n\n ,store.s_store_name store_name\n\n ,store.s_zip store_zip\n\n ,ad1.ca_street_number b_street_number\n\n ,ad1.ca_street_name b_street_name\n\n ,ad1.ca_city b_city\n\n ,ad1.ca_zip b_zip\n\n ,ad2.ca_street_number c_street_number\n\n ,ad2.ca_street_name c_street_name\n\n ,ad2.ca_city c_city\n\n ,ad2.ca_zip c_zip\n\n ,d1.d_year as syear\n\n ,d2.d_year as fsyear\n\n ,d3.d_year s2year\n\n ,count(*) cnt\n\n ,sum(ss_wholesale_cost) s1\n\n ,sum(ss_list_price) s2\n\n ,sum(ss_coupon_amt) s3\n\n FROM store_sales as store_sales\n\n cross join store_returns as store_returns\n\n cross join $cs_ui cs_ui\n\n cross join date_dim d1\n\n cross join date_dim d2\n\n cross join date_dim d3\n\n cross join store as store\n\n cross join customer as customer\n\n cross join customer_demographics cd1\n\n cross join customer_demographics cd2\n\n cross join promotion as promotion\n\n cross join household_demographics hd1\n\n cross join household_demographics hd2\n\n cross join customer_address ad1\n\n cross join customer_address ad2\n\n cross join income_band ib1\n\n cross join income_band ib2\n\n cross join item as item\n\n WHERE ss_store_sk = s_store_sk AND\n\n ss_sold_date_sk = d1.d_date_sk AND\n\n ss_customer_sk = c_customer_sk AND\n\n ss_cdemo_sk= cd1.cd_demo_sk AND\n\n ss_hdemo_sk = hd1.hd_demo_sk AND\n\n ss_addr_sk = ad1.ca_address_sk and\n\n ss_item_sk = i_item_sk and\n\n ss_item_sk = sr_item_sk and\n\n ss_ticket_number = sr_ticket_number and\n\n ss_item_sk = cs_ui.cs_item_sk and\n\n c_current_cdemo_sk = cd2.cd_demo_sk AND\n\n c_current_hdemo_sk = hd2.hd_demo_sk AND\n\n c_current_addr_sk = ad2.ca_address_sk and\n\n c_first_sales_date_sk = d2.d_date_sk and\n\n c_first_shipto_date_sk = d3.d_date_sk and\n\n ss_promo_sk = p_promo_sk and\n\n hd1.hd_income_band_sk = ib1.ib_income_band_sk and\n\n hd2.hd_income_band_sk = ib2.ib_income_band_sk and\n\n cd1.cd_marital_status <> cd2.cd_marital_status and\n\n i_color in ('azure','gainsboro','misty','blush','hot','lemon') and\n\n i_current_price between 80 and 80 + 10 and\n\n i_current_price between 80 + 1 and 80 + 15\n\ngroup by item.i_product_name\n\n ,item.i_item_sk\n\n ,store.s_store_name\n\n ,store.s_zip\n\n ,ad1.ca_street_number\n\n ,ad1.ca_street_name\n\n ,ad1.ca_city\n\n ,ad1.ca_zip\n\n ,ad2.ca_street_number\n\n ,ad2.ca_street_name\n\n ,ad2.ca_city\n\n ,ad2.ca_zip\n\n ,d1.d_year\n\n ,d2.d_year\n\n ,d3.d_year\n\n);\n\n-- start query 1 in stream 0 using template query64.tpl and seed 1220860970\n\nselect cs1.product_name\n\n ,cs1.store_name\n\n ,cs1.store_zip\n\n ,cs1.b_street_number\n\n ,cs1.b_street_name\n\n ,cs1.b_city\n\n ,cs1.b_zip\n\n ,cs1.c_street_number\n\n ,cs1.c_street_name\n\n ,cs1.c_city\n\n ,cs1.c_zip\n\n ,cs1.syear\n\n ,cs1.cnt\n\n ,cs1.s1 as s11\n\n ,cs1.s2 as s21\n\n ,cs1.s3 as s31\n\n ,cs2.s1 as s12\n\n ,cs2.s2 as s22\n\n ,cs2.s3 as s32\n\n ,cs2.syear\n\n ,cs2.cnt\n\nfrom $cross_sales cs1 cross join $cross_sales cs2\n\nwhere cs1.item_sk=cs2.item_sk and\n\n cs1.syear = 1999 and\n\n cs2.syear = 1999 + 1 and\n\n cs2.cnt <= cs1.cnt and\n\n cs1.store_name = cs2.store_name and\n\n cs1.store_zip = cs2.store_zip\n\norder by cs1.product_name\n\n ,cs1.store_name\n\n ,cs2.cnt\n\n ,s11\n\n ,s21\n\n ,s22;\n\n\n\n-- end query 1 in stream 0 using template query64.tpl\n", parameters: 0b >> TPersQueueTest::StreamReadCommitAndStatusMsgs [GOOD] >> TPersQueueTest::StreamReadManyUpdateTokenAndRead >> TPersQueueTest::WriteNonExistingTopic [GOOD] >> TPersQueueTest::WriteAfterAlter >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-ModifyUser-29 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-ModifyUser-30 >> TSchemeShardTTLTestsWithReboots::CopyTable [GOOD] >> YdbYqlClient::TableKeyRangesSinglePartition [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/tx/unittest >> KqpSinkTx::SnapshotROInteractive1 [GOOD] Test command err: Trying to start YDB, gRPC: 21392, MsgBus: 1960 2025-06-25T14:52:24.851477Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899946938722103:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:24.851515Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001227/r3tmp/tmpGDoKV7/pdisk_1.dat 2025-06-25T14:52:25.203538Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519899946938722080:2080] 1750863144849190 != 1750863144849193 2025-06-25T14:52:25.223451Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 21392, node 1 2025-06-25T14:52:25.259228Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:52:25.259329Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:52:25.273338Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:52:25.306444Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:52:25.306473Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:52:25.306484Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:52:25.306628Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1960 TClient is connected to server localhost:1960 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:52:25.805661Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:52:25.825394Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:52:25.861083Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:52:27.710857Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899959823624614:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:27.710865Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899959823624606:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:27.710970Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:27.715425Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:52:27.724273Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899959823624620:2294], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:52:27.796901Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899959823624671:2332] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:52:28.077192Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:28.210828Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:29.073980Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:29.891279Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519899946938722103:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:29.898847Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:52:31.098129Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=1&id=NjViYzQxNWMtZWVjODYyNDctZGRlN2I5MTgtNWFiOGQ2NjE=, ActorId: [1:7519899972708534432:2930], ActorState: ExecuteState, TraceId: 01jyks6zxc2a7rffkqey4thkax, Create QueryResponse for error on request, msg:
:3:29: Error: Operation 'Upsert' can't be performed in read only transaction, code: 2008 Trying to start YDB, gRPC: 4638, MsgBus: 9154 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001227/r3tmp/tmpd8uIip/pdisk_1.dat 2025-06-25T14:52:32.382607Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:52:32.388811Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:52:32.389760Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519899978966187330:2080] 1750863152183860 != 1750863152183863 2025-06-25T14:52:32.418203Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:52:32.418286Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:52:32.421444Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 4638, node 2 2025-06-25T14:52:32.471613Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:52:32.471629Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:52:32.471635Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:52:32.471745Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9154 TClient is connected to server localhost:9154 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:52:32.924815Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:52:32.937222Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:52:33.216444Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:52:35.393703Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519899991851089838:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:35.393846Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:35.399307Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519899991851089866:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:35.404342Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:52:35.416703Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-25T14:52:35.416831Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519899991851089868:2295], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:52:35.482871Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519899991851089919:2334] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:52:35.540508Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:35.620206Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:36.755977Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) >> KqpSinkMvcc::OlapReadWriteTxFailsOnConcurrentWrite2 [GOOD] >> KqpSinkMvcc::OlapReadWriteTxFailsOnConcurrentWrite3 >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-ModifyUser-41 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-ModifyUser-42 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTestsWithReboots::CopyTable [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:115:2144] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:115:2144] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:130:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:133:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [1:136:2157] sender: [1:138:2058] recipient: [1:115:2144] 2025-06-25T14:51:04.577342Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:51:04.577426Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:04.577471Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:51:04.577507Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:51:04.577569Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:51:04.577596Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:51:04.577649Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:04.577722Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:51:04.578425Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:51:04.578756Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:51:04.649527Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7732: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-25T14:51:04.649581Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:51:04.650277Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:178:2058] recipient: [1:15:2062] 2025-06-25T14:51:04.666806Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:51:04.670491Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:51:04.670665Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:51:04.678114Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:51:04.678335Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:51:04.678973Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:04.679305Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:51:04.681874Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:04.682051Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:51:04.683271Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:51:04.683327Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:04.683426Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:51:04.683469Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:51:04.683508Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:51:04.683638Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:217:2058] recipient: [1:215:2214] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:217:2058] recipient: [1:215:2214] Leader for TabletID 72057594037968897 is [1:221:2218] sender: [1:222:2058] recipient: [1:215:2214] 2025-06-25T14:51:04.690306Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:242:2058] recipient: [1:15:2062] 2025-06-25T14:51:04.804689Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:51:04.804903Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:04.805123Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:51:04.805167Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:51:04.805348Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:51:04.805470Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:51:04.810385Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:04.810564Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:51:04.810728Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:04.810791Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:51:04.810842Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:51:04.810899Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:51:04.813998Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:04.814056Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:51:04.814088Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:51:04.817859Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:04.817908Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:04.817979Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:04.818025Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:51:04.821493Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:51:04.824122Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:51:04.824338Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:136:2157] sender: [1:257:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:51:04.825290Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:04.825416Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 42949694 ... { PerShardStats { ShardId: 72075186233409547 CpuTimeUsec: 1312 } } CommitVersion { Step: 5000004 TxId: 1003 } 2025-06-25T14:52:39.670159Z node 97 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1791: TOperation FindRelatedPartByTabletId, TxId: 1003, tablet: 72075186233409547, partId: 0 2025-06-25T14:52:39.670292Z node 97 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:632: TTxOperationReply execute, operationId: 1003:0, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409547 Status: COMPLETE TxId: 1003 Step: 5000004 OrderId: 1003 ExecLatency: 3 ProposeLatency: 5 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409547 CpuTimeUsec: 1312 } } CommitVersion { Step: 5000004 TxId: 1003 } 2025-06-25T14:52:39.670392Z node 97 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_part.cpp:109: HandleReply TEvDataShard::TEvProposeTransactionResult Ignore message: tablet# 72057594046678944, ev# TxKind: TX_KIND_SCHEME Origin: 72075186233409547 Status: COMPLETE TxId: 1003 Step: 5000004 OrderId: 1003 ExecLatency: 3 ProposeLatency: 5 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409547 CpuTimeUsec: 1312 } } CommitVersion { Step: 5000004 TxId: 1003 } 2025-06-25T14:52:39.670762Z node 97 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 1003:0, at schemeshard: 72057594046678944 2025-06-25T14:52:39.671149Z node 97 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5596: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 435 RawX2: 416611830115 } Origin: 72075186233409547 State: 2 TxId: 1003 Step: 0 Generation: 2 2025-06-25T14:52:39.671184Z node 97 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1791: TOperation FindRelatedPartByTabletId, TxId: 1003, tablet: 72075186233409547, partId: 0 2025-06-25T14:52:39.671280Z node 97 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:632: TTxOperationReply execute, operationId: 1003:0, at schemeshard: 72057594046678944, message: Source { RawX1: 435 RawX2: 416611830115 } Origin: 72075186233409547 State: 2 TxId: 1003 Step: 0 Generation: 2 2025-06-25T14:52:39.671328Z node 97 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1047: NTableState::TProposedWaitParts operationId# 1003:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 2025-06-25T14:52:39.671409Z node 97 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1051: NTableState::TProposedWaitParts operationId# 1003:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 435 RawX2: 416611830115 } Origin: 72075186233409547 State: 2 TxId: 1003 Step: 0 Generation: 2 2025-06-25T14:52:39.671461Z node 97 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:670: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 1003:0, shardIdx: 72057594046678944:2, shard: 72075186233409547, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-25T14:52:39.671495Z node 97 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:707: all shard schema changes has been received, operationId: 1003:0, at schemeshard: 72057594046678944 2025-06-25T14:52:39.671534Z node 97 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:719: send schema changes ack message, operation: 1003:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-06-25T14:52:39.671580Z node 97 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:719: send schema changes ack message, operation: 1003:0, datashard: 72075186233409547, at schemeshard: 72057594046678944 2025-06-25T14:52:39.671613Z node 97 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1003:0 129 -> 240 2025-06-25T14:52:39.674431Z node 97 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 1003:0, at schemeshard: 72057594046678944 2025-06-25T14:52:39.674957Z node 97 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 1003:0, at schemeshard: 72057594046678944 2025-06-25T14:52:39.675462Z node 97 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1003:0, at schemeshard: 72057594046678944 2025-06-25T14:52:39.675514Z node 97 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_states.h:93: TCopyTable::TWaitCopyTableBarrier operationId: 1003:0ProgressState, operation type TxCopyTable 2025-06-25T14:52:39.675561Z node 97 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:1061: Set barrier, OperationId: 1003:0, name: CopyTableBarrier, done: 0, blocked: 1, parts count: 1 2025-06-25T14:52:39.675597Z node 97 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:1105: All parts have reached barrier, tx: 1003, done: 0, blocked: 1 2025-06-25T14:52:39.675655Z node 97 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_states.h:76: TCopyTable::TWaitCopyTableBarrier operationId: 1003:0 HandleReply TEvPrivate::TEvCompleteBarrier, msg: NKikimr::NSchemeShard::TEvPrivate::TEvCompleteBarrier { TxId: 1003 Name: CopyTableBarrier }, at tablet# 72057594046678944 2025-06-25T14:52:39.675694Z node 97 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1003:0 240 -> 240 2025-06-25T14:52:39.679281Z node 97 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1003:0, at schemeshard: 72057594046678944 2025-06-25T14:52:39.679331Z node 97 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 1003:0 ProgressState 2025-06-25T14:52:39.679423Z node 97 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1003:0 progress is 1/1 2025-06-25T14:52:39.679455Z node 97 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 1003 ready parts: 1/1 2025-06-25T14:52:39.679494Z node 97 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1003:0 progress is 1/1 2025-06-25T14:52:39.679523Z node 97 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 1003 ready parts: 1/1 2025-06-25T14:52:39.679555Z node 97 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 1003, ready parts: 1/1, is published: true 2025-06-25T14:52:39.679598Z node 97 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 1003 ready parts: 1/1 2025-06-25T14:52:39.679636Z node 97 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1003:0 2025-06-25T14:52:39.679667Z node 97 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 1003:0 2025-06-25T14:52:39.679820Z node 97 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 2025-06-25T14:52:39.679861Z node 97 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate source path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 TestModificationResult got TxId: 1003, wait until txId: 1003 TestWaitNotification wait txId: 1003 2025-06-25T14:52:39.681629Z node 97 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 1003: send EvNotifyTxCompletion 2025-06-25T14:52:39.681675Z node 97 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 1003 2025-06-25T14:52:39.682025Z node 97 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1003, at schemeshard: 72057594046678944 2025-06-25T14:52:39.682100Z node 97 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 1003: got EvNotifyTxCompletionResult 2025-06-25T14:52:39.682132Z node 97 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 1003: satisfy waiter [97:532:2491] TestWaitNotification: OK eventTxId 1003 2025-06-25T14:52:39.682540Z node 97 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/TTLEnabledTableCopy" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:52:39.682736Z node 97 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/TTLEnabledTableCopy" took 235us result status StatusSuccess 2025-06-25T14:52:39.683255Z node 97 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/TTLEnabledTableCopy" PathDescription { Self { Name: "TTLEnabledTableCopy" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1003 CreateStep: 5000004 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "TTLEnabledTableCopy" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "modified_at" Type: "Timestamp" TypeId: 50 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 1 TTLSettings { Enabled { ColumnName: "modified_at" ExpireAfterSeconds: 3600 Tiers { ApplyAfterSeconds: 3600 Delete { } } } } IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 4 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> KqpSnapshotRead::TestReadOnly+withSink [GOOD] >> KqpSnapshotRead::ReadWriteTxFailsOnConcurrentWrite3-withSink >> KqpTx::RollbackRoTx [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> YdbYqlClient::TableKeyRangesSinglePartition [GOOD] Test command err: 2025-06-25T14:44:11.448710Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519897827955657992:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:11.448761Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00183d/r3tmp/tmpc5LETo/pdisk_1.dat 2025-06-25T14:44:11.943438Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:11.959601Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:11.959674Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:11.963332Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 5334, node 1 2025-06-25T14:44:12.129137Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:12.129168Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:12.129175Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:12.129287Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:20577 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:12.459040Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:12.502369Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:44:14.218182Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897840840560872:2301], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:14.218332Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:14.476978Z node 1 :TX_PROXY ERROR: schemereq.cpp:1096: Actor# [1:7519897840840560898:2634] txid# 281474976715658, Access denied for badguy@builtin on path /Root, with access CreateTable 2025-06-25T14:44:14.477138Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519897840840560898:2634] txid# 281474976715658, issues: { message: "Access denied for badguy@builtin on path /Root" issue_code: 200000 severity: 1 } 2025-06-25T14:44:14.572172Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519897840840560910:2310], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:14.572254Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:14.589547Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:44:16.349939Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519897848168933503:2083];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:16.351088Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00183d/r3tmp/tmpXNcc5V/pdisk_1.dat 2025-06-25T14:44:16.504005Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:16.531225Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:16.531309Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:44:16.537110Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 25200, node 4 2025-06-25T14:44:16.609486Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:44:16.609507Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:44:16.609513Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:44:16.609654Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19808 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:44:16.841853Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:44:17.360713Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:44:19.216425Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519897861053836357:2301], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:19.216499Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:19.223570Z node 4 :TX_PROXY ERROR: schemereq.cpp:1096: Actor# [4:7519897861053836378:2628] txid# 281474976710658, Access denied for badguy@builtin on path /Root, with access CreateTable 2025-06-25T14:44:19.223666Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519897861053836378:2628] txid# 281474976710658, issues: { message: "Access denied for badguy@builtin on path /Root" issue_code: 200000 severity: 1 } 2025-06-25T14:44:19.299718Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519897861053836390:2309], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:19.300286Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:44:19.311163Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:44:21.064724Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519897873008069428:2083];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:44:21.064820Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00183d/r3tmp/tmpouTPja/pdisk_1.dat 2025-06-25T14:44:21.455710Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:44:21.489520Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:44:21.489619Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: D ... t, use /Root 2025-06-25T14:52:30.301307Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714647. Ctx: { TraceId: 01jyks6z4mf5xt4nzspzz70scj, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=OGIzNjAyMmQtYTI2Njg5ZC04YmEyMjhlZi05ZmM0Njg1YQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:52:30.429058Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714648. Ctx: { TraceId: 01jyks6z7serqhav5jgs97fqk0, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=OGIzNjAyMmQtYTI2Njg5ZC04YmEyMjhlZi05ZmM0Njg1YQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:52:30.533000Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714649. Ctx: { TraceId: 01jyks6zbjerdc2c1ht9w6x1dk, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=OGIzNjAyMmQtYTI2Njg5ZC04YmEyMjhlZi05ZmM0Njg1YQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:52:30.626661Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714650. Ctx: { TraceId: 01jyks6zev6m1mnn1xk6bb2zdx, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=OGIzNjAyMmQtYTI2Njg5ZC04YmEyMjhlZi05ZmM0Njg1YQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:52:30.751006Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714651. Ctx: { TraceId: 01jyks6zj40tqksmxjdhsdxhx5, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=OGIzNjAyMmQtYTI2Njg5ZC04YmEyMjhlZi05ZmM0Njg1YQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:52:30.865828Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714652. Ctx: { TraceId: 01jyks6znvfyc1mgz054ajggyk, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=OGIzNjAyMmQtYTI2Njg5ZC04YmEyMjhlZi05ZmM0Njg1YQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:52:30.973005Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714653. Ctx: { TraceId: 01jyks6zsb3xkq1yaxw6gka1tx, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=OGIzNjAyMmQtYTI2Njg5ZC04YmEyMjhlZi05ZmM0Njg1YQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:52:31.105090Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714654. Ctx: { TraceId: 01jyks6zxt43819deftvc5szcn, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=OGIzNjAyMmQtYTI2Njg5ZC04YmEyMjhlZi05ZmM0Njg1YQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:52:31.188488Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714655. Ctx: { TraceId: 01jyks700h2vc87gc1bzj15nmy, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=OGIzNjAyMmQtYTI2Njg5ZC04YmEyMjhlZi05ZmM0Njg1YQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:52:31.297430Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714656. Ctx: { TraceId: 01jyks703y00rhbmwabkj6dtfs, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=OGIzNjAyMmQtYTI2Njg5ZC04YmEyMjhlZi05ZmM0Njg1YQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:52:31.397563Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714657. Ctx: { TraceId: 01jyks706rachg03w107c7tmnb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=OGIzNjAyMmQtYTI2Njg5ZC04YmEyMjhlZi05ZmM0Njg1YQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:52:31.488285Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714658. Ctx: { TraceId: 01jyks709j58n1btw89vb5gr0f, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=OGIzNjAyMmQtYTI2Njg5ZC04YmEyMjhlZi05ZmM0Njg1YQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:52:31.629603Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714659. Ctx: { TraceId: 01jyks70e39dc07d10j1gez0a7, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=OGIzNjAyMmQtYTI2Njg5ZC04YmEyMjhlZi05ZmM0Njg1YQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:52:31.745444Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714660. Ctx: { TraceId: 01jyks70he679q0br3ajk8sr5d, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=OGIzNjAyMmQtYTI2Njg5ZC04YmEyMjhlZi05ZmM0Njg1YQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:52:31.853573Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714661. Ctx: { TraceId: 01jyks70n4cwjxecpbe6jd5ywd, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=OGIzNjAyMmQtYTI2Njg5ZC04YmEyMjhlZi05ZmM0Njg1YQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:52:31.979597Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714662. Ctx: { TraceId: 01jyks70re0w08a88wqqfgg3sz, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=OGIzNjAyMmQtYTI2Njg5ZC04YmEyMjhlZi05ZmM0Njg1YQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:52:32.074713Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714663. Ctx: { TraceId: 01jyks70w82s6a06d1w3trw7s4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=OGIzNjAyMmQtYTI2Njg5ZC04YmEyMjhlZi05ZmM0Njg1YQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:52:32.162886Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714664. Ctx: { TraceId: 01jyks70z146r97bndrg2nbxmx, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=OGIzNjAyMmQtYTI2Njg5ZC04YmEyMjhlZi05ZmM0Njg1YQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:52:32.269528Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714665. Ctx: { TraceId: 01jyks711v0hc43t5jv98cd8mr, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=OGIzNjAyMmQtYTI2Njg5ZC04YmEyMjhlZi05ZmM0Njg1YQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:52:32.373426Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714666. Ctx: { TraceId: 01jyks71555mcpgfr6v3k652qb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=OGIzNjAyMmQtYTI2Njg5ZC04YmEyMjhlZi05ZmM0Njg1YQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:52:32.468837Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714667. Ctx: { TraceId: 01jyks718823taf9e1bhasvd1r, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=OGIzNjAyMmQtYTI2Njg5ZC04YmEyMjhlZi05ZmM0Njg1YQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:52:32.569419Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976714668. Ctx: { TraceId: 01jyks71b733fyf03dmq5071rd, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=OGIzNjAyMmQtYTI2Njg5ZC04YmEyMjhlZi05ZmM0Njg1YQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:52:32.605331Z node 7 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 9 2025-06-25T14:52:32.606250Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-25T14:52:34.700834Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7519899986316356013:2197];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00183d/r3tmp/tmpiVIsLv/pdisk_1.dat 2025-06-25T14:52:34.919543Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:52:35.086334Z node 10 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:52:35.118279Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:52:35.118416Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:52:35.135964Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 1712, node 10 2025-06-25T14:52:35.256960Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:52:35.256991Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:52:35.257000Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:52:35.257147Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4928 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:52:35.616348Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:52:35.652597Z node 10 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:52:38.679099Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) >> KqpLocks::TwoPhaseTx [GOOD] >> KqpLocks::MixedTxFail-useSink |87.3%| [TA] $(B)/ydb/core/tx/schemeshard/ut_ttl/test-results/unittest/{meta.json ... results_accumulator.log} |87.3%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_ttl/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/tx/unittest >> KqpTx::RollbackRoTx [GOOD] Test command err: Trying to start YDB, gRPC: 20670, MsgBus: 18385 2025-06-25T14:52:25.758073Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899949670258600:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:25.758146Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001223/r3tmp/tmp5Olryg/pdisk_1.dat 2025-06-25T14:52:26.085591Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 20670, node 1 2025-06-25T14:52:26.141275Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:52:26.141745Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:52:26.151578Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:52:26.180823Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:52:26.180875Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:52:26.180883Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:52:26.180998Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:18385 TClient is connected to server localhost:18385 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:52:26.689518Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:52:26.711172Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:52:26.725560Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:26.774865Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:52:26.845024Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:27.002979Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:27.073515Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:28.638132Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899962555162084:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:28.638224Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:28.987058Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:29.028359Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:29.058899Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:29.090906Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:29.140923Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:29.172667Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:29.212747Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:29.273724Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899966850130041:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:29.273821Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:29.274865Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899966850130046:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:29.278304Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:52:29.289540Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899966850130048:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:52:29.383975Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899966850130099:3426] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:52:30.758906Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519899949670258600:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:30.758955Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 23941, MsgBus: 24879 2025-06-25T14:52:36.149894Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519899998915112483:2176];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:36.234213Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001223/r3tmp/tmpOAQjLT/pdisk_1.dat 2025-06-25T14:52:36.410952Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:52:36.420444Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519899998915112345:2080] 1750863156069987 != 1750863156069990 2025-06-25T14:52:36.429478Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:52:36.429540Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:52:36.431134Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 23941, node 2 2025-06-25T14:52:36.503959Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:52:36.503979Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:52:36.503986Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:52:36.504097Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24879 TClient is connected to server localhost:24879 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:52:37.011728Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:52:37.017830Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:52:37.024196Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:37.092575Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:37.226639Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... 2025-06-25T14:52:37.286744Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:37.368637Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:39.429698Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519900011800015872:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:39.429795Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:39.489489Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:39.518319Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:39.549603Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:39.581169Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:39.622738Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:39.665475Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:39.740911Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:39.795135Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519900011800016529:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:39.795231Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:39.795546Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519900011800016534:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:39.798320Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:52:39.805764Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519900011800016536:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:52:39.869593Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519900011800016587:3413] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:52:40.938809Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=NmJkOWZlZWYtM2QyMGNmMzgtODQxMzIyNWUtYjk3OTI3MmI=, ActorId: [2:7519900016094984153:2473], ActorState: ReadyState, TraceId: 01jyks79k229mkd8xcb09rbkcz, Create QueryResponse for error on request, msg: 2025-06-25T14:52:41.092409Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519899998915112483:2176];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:41.092500Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> KqpSinkMvcc::OlapMultiSinks [GOOD] >> TPersQueueTest::DirectReadCorrectOffsetsOnRestart [GOOD] >> TPersQueueTest::DirectReadBadCases >> KqpNewEngine::LocksSingleShard >> KqpSinkLocks::OlapUncommittedRead [GOOD] >> KqpSinkLocks::OlapInsertWithBulkUpsert-UseBulkUpsert [GOOD] >> KqpSinkLocks::OlapVisibleUncommittedRows [GOOD] >> KqpNewEngine::StreamLookupWithView >> KqpNewEngine::Update-UseSink >> KqpSort::ReverseOptimized >> KqpTx::CommitStats [GOOD] >> KqpNamedExpressions::NamedExpressionRandomUpsertIndex-UseSink-UseDataQuery >> KqpNotNullColumns::ReplaceNotNullPk >> KqpSinkTx::OlapSnapshotRO [GOOD] >> TPersQueueTest::PreferredCluster_TwoEnabledClustersAndWriteSessionsWithDifferentPreferredCluster_SessionWithMismatchedClusterDiesAndOthersAlive [GOOD] >> TPersQueueTest::PreferredCluster_DisabledRemoteClusterAndWriteSessionsWithDifferentPreferredClusterAndLaterRemoteClusterEnabled_SessionWithMismatchedClusterDiesAfterPreferredClusterEnabledAndOtherSessionsAlive >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-ModifyUser-48 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-1 >> KqpSinkTx::OlapDeferredEffects [GOOD] >> KqpSinkTx::OlapExplicitTcl >> KqpNewEngine::PkSelect1 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/tx/unittest >> KqpSinkMvcc::OlapMultiSinks [GOOD] Test command err: Trying to start YDB, gRPC: 22188, MsgBus: 63427 2025-06-25T14:51:52.702485Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899808368998175:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:51:52.702523Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0012e2/r3tmp/tmpnovm3t/pdisk_1.dat 2025-06-25T14:51:53.140049Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519899808368998152:2080] 1750863112700931 != 1750863112700934 2025-06-25T14:51:53.140381Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 22188, node 1 2025-06-25T14:51:53.162887Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:51:53.162974Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:51:53.164031Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:51:53.304628Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:51:53.304651Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:51:53.304658Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:51:53.304802Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:63427 2025-06-25T14:51:53.711966Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:63427 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:51:54.066594Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:51:54.107148Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:51:55.678144Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899821253900690:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:55.678260Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899821253900682:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:55.678394Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:55.700263Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:51:55.717946Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899821253900696:2294], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:51:55.796071Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899821253900749:2335] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:51:57.702712Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519899808368998175:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:51:57.702780Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:51:57.750638Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T14:51:57.937452Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519899829843835497:2310];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:51:57.937469Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519899829843835503:2311];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:51:57.937798Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519899829843835497:2310];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:51:57.938103Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519899829843835503:2311];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:51:57.938108Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519899829843835497:2310];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:51:57.938259Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519899829843835497:2310];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:51:57.938332Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519899829843835503:2311];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:51:57.938384Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519899829843835497:2310];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:51:57.938475Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519899829843835503:2311];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:51:57.938498Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519899829843835497:2310];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:51:57.938648Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519899829843835497:2310];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:51:57.938680Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519899829843835503:2311];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:51:57.938747Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519899829843835497:2310];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:51:57.938792Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519899829843835503:2311];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:51:57.938839Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519899829843835497:2310];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:51:57.938891Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519899829843835503:2311];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:51:57.938950Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519899829843835497:2310];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:51:57.939009Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519899829843835503:2311];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:51:57.939063Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519899829843835497:2310];table ... rrent=0;tx_id=281474976710662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710662; 2025-06-25T14:52:39.736623Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038043;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710662;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=8;result=not_found; 2025-06-25T14:52:39.738287Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038089;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710662; 2025-06-25T14:52:39.738792Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038071;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710662;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=8;result=not_found; 2025-06-25T14:52:39.743201Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038043;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710662; 2025-06-25T14:52:39.743554Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038071;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710662; 2025-06-25T14:52:39.743819Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038077;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710662;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=8;result=not_found; 2025-06-25T14:52:39.744142Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038083;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710662;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=8;result=not_found; 2025-06-25T14:52:39.750347Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038083;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710662; 2025-06-25T14:52:39.750350Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038077;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710662; 2025-06-25T14:52:39.751033Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038022;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710662;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=8;result=not_found; 2025-06-25T14:52:39.751078Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038045;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710662;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=8;result=not_found; 2025-06-25T14:52:39.757310Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038045;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710662; 2025-06-25T14:52:39.757379Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038022;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710662; 2025-06-25T14:52:39.758006Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038026;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710662;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=8;result=not_found; 2025-06-25T14:52:39.758085Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038095;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710662;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=8;result=not_found; 2025-06-25T14:52:39.764486Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038095;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710662; 2025-06-25T14:52:39.764534Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038026;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710662; 2025-06-25T14:52:39.765159Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038050;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710662;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=8;result=not_found; 2025-06-25T14:52:39.765172Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038065;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710662;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=8;result=not_found; 2025-06-25T14:52:39.770425Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038065;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710662; 2025-06-25T14:52:39.771068Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038075;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710662;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=8;result=not_found; 2025-06-25T14:52:39.771437Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038050;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710662; 2025-06-25T14:52:39.771969Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038024;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710662;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=8;result=not_found; 2025-06-25T14:52:39.777306Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038075;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710662; 2025-06-25T14:52:39.778059Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038024;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710662; 2025-06-25T14:52:39.778308Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038049;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710662;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=8;result=not_found; 2025-06-25T14:52:39.784890Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038049;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710662; 2025-06-25T14:52:40.037692Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037913;tx_state=TTxProgressTx::Execute;tx_current=281474976710664;tx_id=281474976710664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710664; 2025-06-25T14:52:40.038218Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037970;self_id=[3:7519899975872921190:2458];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037970;local_tx_no=18;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037892,72075186224037896,72075186224037897,72075186224037969,72075186224037981,72075186224037993,72075186224037996;receive=72075186224037913; 2025-06-25T14:52:40.038293Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037896;tx_state=TTxProgressTx::Execute;tx_current=281474976710664;tx_id=281474976710664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710664; 2025-06-25T14:52:40.038424Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037970;self_id=[3:7519899975872921190:2458];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037970;local_tx_no=20;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037892,72075186224037897,72075186224037969,72075186224037981,72075186224037993,72075186224037996;receive=72075186224037896; 2025-06-25T14:52:40.038781Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037969;tx_state=TTxProgressTx::Execute;tx_current=281474976710664;tx_id=281474976710664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710664; 2025-06-25T14:52:40.038820Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037892;tx_state=TTxProgressTx::Execute;tx_current=281474976710664;tx_id=281474976710664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710664; 2025-06-25T14:52:40.039244Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037981;tx_state=TTxProgressTx::Execute;tx_current=281474976710664;tx_id=281474976710664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710664; 2025-06-25T14:52:40.039244Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037993;tx_state=TTxProgressTx::Execute;tx_current=281474976710664;tx_id=281474976710664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710664; 2025-06-25T14:52:40.039708Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037970;self_id=[3:7519899975872921190:2458];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037970;local_tx_no=23;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037897,72075186224037981,72075186224037993,72075186224037996;receive=72075186224037892; 2025-06-25T14:52:40.039784Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037970;self_id=[3:7519899975872921190:2458];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037970;local_tx_no=24;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037897,72075186224037981,72075186224037993,72075186224037996;receive=72075186224037969; 2025-06-25T14:52:40.039937Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037897;tx_state=TTxProgressTx::Execute;tx_current=281474976710664;tx_id=281474976710664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710664; 2025-06-25T14:52:40.040112Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037970;self_id=[3:7519899975872921190:2458];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037970;local_tx_no=27;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037897,72075186224037996;receive=72075186224037981; 2025-06-25T14:52:40.040177Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037970;self_id=[3:7519899975872921190:2458];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037970;local_tx_no=28;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037897,72075186224037996;receive=72075186224037993; 2025-06-25T14:52:40.040399Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037970;self_id=[3:7519899975872921190:2458];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037970;local_tx_no=30;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037996;receive=72075186224037897; 2025-06-25T14:52:40.041026Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037970;tx_state=TTxProgressTx::Execute;tx_current=281474976710664;tx_id=281474976710664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710664; 2025-06-25T14:52:40.041895Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037996;tx_state=TTxProgressTx::Execute;tx_current=281474976710664;tx_id=281474976710664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710664; 2025-06-25T14:52:40.284928Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037993;tx_state=TTxProgressTx::Execute;tx_current=281474976710667;tx_id=281474976710667;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710667; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/tx/unittest >> KqpTx::CommitStats [GOOD] Test command err: Trying to start YDB, gRPC: 2966, MsgBus: 10939 2025-06-25T14:52:32.204483Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899981088612377:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:32.206480Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00120c/r3tmp/tmpbbuTkM/pdisk_1.dat 2025-06-25T14:52:32.613112Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:52:32.613188Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:52:32.633393Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:52:32.646277Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 2966, node 1 2025-06-25T14:52:32.730566Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:52:32.730612Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:52:32.730623Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:52:32.730756Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10939 2025-06-25T14:52:33.223433Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:10939 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:52:33.388599Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:52:33.401771Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:52:33.413615Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:33.547142Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:33.711600Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:33.776775Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:35.317825Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899993973515883:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:35.317957Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:35.602988Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:35.678988Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:35.711136Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:35.738298Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:35.787969Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:35.827657Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:35.864304Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:35.959907Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899993973516551:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:35.959974Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:35.960282Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899993973516556:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:35.964426Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:52:35.978789Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899993973516558:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:52:36.045757Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899998268483905:3427] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:52:37.200442Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519899981088612377:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:37.200509Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 17761, MsgBus: 22322 2025-06-25T14:52:38.327176Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519900003992100668:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:38.327231Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00120c/r3tmp/tmpbxF82T/pdisk_1.dat 2025-06-25T14:52:38.423071Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 17761, node 2 2025-06-25T14:52:38.436797Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:52:38.436864Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:52:38.438474Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:52:38.495025Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:52:38.495043Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:52:38.495050Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:52:38.495142Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:22322 TClient is connected to server localhost:22322 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:52:38.979810Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:52:38.985026Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:52:38.991446Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:39.072178Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:39.211846Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:52:39.300024Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:39.352257Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:52:41.439582Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519900016877004156:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:41.439673Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:41.488300Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:41.518811Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:41.541905Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:41.565576Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:41.595406Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:41.639231Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:41.711169Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:41.795389Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519900016877004821:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:41.795474Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:41.795634Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519900016877004826:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:41.799267Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:52:41.808726Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519900016877004828:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:52:41.874860Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519900016877004879:3418] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-ModifyUser-36 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-DropUser-55 >> KqpSinkTx::OlapInvalidateOnError [GOOD] >> KqpSinkTx::OlapInteractive ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/tx/unittest >> KqpSinkLocks::OlapVisibleUncommittedRows [GOOD] Test command err: Trying to start YDB, gRPC: 5275, MsgBus: 6636 2025-06-25T14:52:30.550991Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899971022739994:2135];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:30.551414Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00120e/r3tmp/tmpS0b6P3/pdisk_1.dat 2025-06-25T14:52:30.963756Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519899971022739897:2080] 1750863150521547 != 1750863150521550 2025-06-25T14:52:30.966763Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:52:30.970421Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:52:30.970509Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:52:30.975369Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 5275, node 1 2025-06-25T14:52:31.059344Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:52:31.059373Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:52:31.059381Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:52:31.059488Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6636 TClient is connected to server localhost:6636 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-25T14:52:31.551145Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:52:31.643670Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:52:31.658289Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:52:33.570619Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899983907642401:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:33.570818Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:33.584435Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899983907642428:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:33.606984Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:52:33.626759Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899983907642432:2295], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:52:33.712224Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899983907642483:2333] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:52:33.989535Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T14:52:34.127950Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519899988202609959:2308];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:52:34.127950Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519899988202609951:2306];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:52:34.128157Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519899988202609951:2306];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:52:34.128469Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519899988202609951:2306];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:52:34.128617Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519899988202609951:2306];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:52:34.128756Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519899988202609951:2306];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:52:34.128878Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519899988202609951:2306];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:52:34.128995Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519899988202609951:2306];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:52:34.129109Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519899988202609951:2306];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:52:34.129214Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519899988202609951:2306];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:52:34.129291Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519899988202609959:2308];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:52:34.129363Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519899988202609951:2306];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:52:34.129505Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519899988202609959:2308];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:52:34.129510Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519899988202609951:2306];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:52:34.129625Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519899988202609959:2308];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:52:34.129719Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519899988202609959:2308];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:52:34.129826Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519899988202609959:2308];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:52:34.129930Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519899988202609959:2308];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:52:34.130037Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519899988202609959:2308];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:52:34.130139Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519899988202609959:2308];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 20 ... elf_id=[1:7519899988202610673:2445];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037913;local_tx_no=34;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037892,72075186224037897,72075186224037969,72075186224037970,72075186224037993;receive=72075186224037996; 2025-06-25T14:52:41.633551Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037913;self_id=[1:7519899988202610673:2445];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037913;local_tx_no=36;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037897,72075186224037969,72075186224037970,72075186224037993;receive=72075186224037892; 2025-06-25T14:52:41.633596Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037913;self_id=[1:7519899988202610673:2445];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037913;local_tx_no=37;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037897,72075186224037969,72075186224037970,72075186224037993;receive=72075186224037892; 2025-06-25T14:52:41.633641Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037913;self_id=[1:7519899988202610673:2445];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037913;local_tx_no=38;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037897,72075186224037969,72075186224037970,72075186224037993;receive=72075186224037892; 2025-06-25T14:52:41.633698Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037913;self_id=[1:7519899988202610673:2445];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037913;local_tx_no=39;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037897,72075186224037969,72075186224037970,72075186224037993;receive=72075186224037892; 2025-06-25T14:52:41.633753Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037913;self_id=[1:7519899988202610673:2445];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037913;local_tx_no=40;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037897,72075186224037969,72075186224037970,72075186224037993;receive=72075186224037892; 2025-06-25T14:52:41.633860Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037913;self_id=[1:7519899988202610673:2445];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037913;local_tx_no=42;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037969,72075186224037970,72075186224037993;receive=72075186224037897; 2025-06-25T14:52:41.633904Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037913;self_id=[1:7519899988202610673:2445];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037913;local_tx_no=43;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037969,72075186224037970,72075186224037993;receive=72075186224037897; 2025-06-25T14:52:41.633946Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037913;self_id=[1:7519899988202610673:2445];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037913;local_tx_no=44;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037969,72075186224037970,72075186224037993;receive=72075186224037897; 2025-06-25T14:52:41.633988Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037913;self_id=[1:7519899988202610673:2445];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037913;local_tx_no=45;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037969,72075186224037970,72075186224037993;receive=72075186224037897; 2025-06-25T14:52:41.634028Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037913;self_id=[1:7519899988202610673:2445];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037913;local_tx_no=46;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037969,72075186224037970,72075186224037993;receive=72075186224037897; 2025-06-25T14:52:41.634143Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037913;self_id=[1:7519899988202610673:2445];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037913;local_tx_no=48;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037969,72075186224037970;receive=72075186224037993; 2025-06-25T14:52:41.634192Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037913;self_id=[1:7519899988202610673:2445];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037913;local_tx_no=49;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037969,72075186224037970;receive=72075186224037993; 2025-06-25T14:52:41.634253Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037913;self_id=[1:7519899988202610673:2445];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037913;local_tx_no=50;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037969,72075186224037970;receive=72075186224037993; 2025-06-25T14:52:41.634293Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037913;self_id=[1:7519899988202610673:2445];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037913;local_tx_no=51;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037969,72075186224037970;receive=72075186224037993; 2025-06-25T14:52:41.634333Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037913;self_id=[1:7519899988202610673:2445];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037913;local_tx_no=52;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037969,72075186224037970;receive=72075186224037993; 2025-06-25T14:52:41.635859Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037969;tx_state=TTxProgressTx::Execute;tx_current=281474976710664;tx_id=281474976710664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710664; 2025-06-25T14:52:41.636065Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037970;tx_state=TTxProgressTx::Execute;tx_current=281474976710664;tx_id=281474976710664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710664; 2025-06-25T14:52:41.636735Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037913;self_id=[1:7519899988202610673:2445];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037913;local_tx_no=54;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037970;receive=72075186224037969; 2025-06-25T14:52:41.636788Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037913;self_id=[1:7519899988202610673:2445];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037913;local_tx_no=55;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037970;receive=72075186224037969; 2025-06-25T14:52:41.636843Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037913;self_id=[1:7519899988202610673:2445];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037913;local_tx_no=56;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037970;receive=72075186224037969; 2025-06-25T14:52:41.636889Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037913;self_id=[1:7519899988202610673:2445];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037913;local_tx_no=57;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037970;receive=72075186224037969; 2025-06-25T14:52:41.636950Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037913;self_id=[1:7519899988202610673:2445];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037913;local_tx_no=58;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037970;receive=72075186224037969; 2025-06-25T14:52:41.637367Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037913;tx_state=TTxProgressTx::Execute;tx_current=281474976710664;tx_id=281474976710664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710664; 2025-06-25T14:52:42.482862Z node 1 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519899988202609981:2313];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037892;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-25T14:52:42.483016Z node 1 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519899988202609951:2306];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037888;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-25T14:52:42.492696Z node 1 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519899988202609966:2312];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037893;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-25T14:52:42.492810Z node 1 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519899988202609965:2311];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037894;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-25T14:52:42.492882Z node 1 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519899988202609959:2308];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037895;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-25T14:52:42.492940Z node 1 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519899988202609964:2310];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037896;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-25T14:52:42.493000Z node 1 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519899988202609963:2309];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037897;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-25T14:52:42.493570Z node 1 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519899988202609950:2305];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037889;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-25T14:52:42.493654Z node 1 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519899988202609982:2314];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037890;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-25T14:52:42.493714Z node 1 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519899988202609954:2307];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037891;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; >> KqpSinkLocks::DifferentKeyUpdateOlap [GOOD] >> KqpSinkMvcc::ReadWriteTxFailsOnConcurrentWrite3 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-ModifyUser-30 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-ModifyUser-31 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/tx/unittest >> KqpSinkTx::OlapSnapshotRO [GOOD] Test command err: Trying to start YDB, gRPC: 18474, MsgBus: 4293 2025-06-25T14:52:11.000174Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899885559144356:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:11.001881Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00124d/r3tmp/tmpn2iC3C/pdisk_1.dat 2025-06-25T14:52:11.303495Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519899885559144325:2080] 1750863130987242 != 1750863130987245 2025-06-25T14:52:11.304042Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:52:11.315325Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:52:11.315439Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:52:11.320793Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 18474, node 1 2025-06-25T14:52:11.392790Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:52:11.392811Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:52:11.392816Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:52:11.392934Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4293 TClient is connected to server localhost:4293 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:52:11.889775Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:52:12.016061Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:52:13.666049Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899898444046837:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:13.666150Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:13.666239Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899898444046858:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:13.669958Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:52:13.681599Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899898444046860:2294], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:52:13.752910Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899898444046911:2331] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:52:14.080700Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T14:52:14.231072Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519899902739014385:2306];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:52:14.231261Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519899902739014385:2306];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:52:14.231585Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519899902739014385:2306];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:52:14.231697Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519899902739014385:2306];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:52:14.231790Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519899902739014385:2306];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:52:14.231875Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519899902739014385:2306];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:52:14.231965Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519899902739014385:2306];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:52:14.232050Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519899902739014385:2306];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:52:14.232138Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519899902739014385:2306];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:52:14.232224Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519899902739014385:2306];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:52:14.232372Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519899902739014385:2306];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:52:14.240072Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519899902739014384:2305];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:52:14.240128Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519899902739014384:2305];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:52:14.240301Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519899902739014384:2305];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:52:14.240618Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519899902739014384:2305];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:52:14.240716Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519899902739014384:2305];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:52:14.240812Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519899902739014384:2305];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:52:14.240896Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519899902739014384:2305];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:52:14.240987Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519899902739014384:2305];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:52:14.241079Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519899902739014384:2305];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:52:14.241170Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519899902739014384:2305];tablet_id=72075186224037888;process=TTxInitSchema::Exe ... r.cpp:215;event=finished_tx;tx_id=281474976715664; 2025-06-25T14:52:40.346510Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037897;tx_state=TTxProgressTx::Execute;tx_current=281474976715664;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; 2025-06-25T14:52:40.346735Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037993;tx_state=TTxProgressTx::Execute;tx_current=281474976715664;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; 2025-06-25T14:52:40.347346Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037913;self_id=[2:7519899977982768622:2366];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037913;local_tx_no=24;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037892,72075186224037897,72075186224037969,72075186224037970,72075186224037993,72075186224037996;receive=72075186224037896; 2025-06-25T14:52:40.347479Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037913;self_id=[2:7519899977982768622:2366];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037913;local_tx_no=25;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037892,72075186224037897,72075186224037969,72075186224037970,72075186224037993,72075186224037996;receive=72075186224037896; 2025-06-25T14:52:40.347576Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037913;self_id=[2:7519899977982768622:2366];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037913;local_tx_no=26;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037892,72075186224037897,72075186224037969,72075186224037970,72075186224037993,72075186224037996;receive=72075186224037896; 2025-06-25T14:52:40.347663Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037913;self_id=[2:7519899977982768622:2366];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037913;local_tx_no=27;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037892,72075186224037897,72075186224037969,72075186224037970,72075186224037993,72075186224037996;receive=72075186224037896; 2025-06-25T14:52:40.347731Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037913;self_id=[2:7519899977982768622:2366];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037913;local_tx_no=28;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037892,72075186224037897,72075186224037969,72075186224037970,72075186224037993,72075186224037996;receive=72075186224037896; 2025-06-25T14:52:40.347940Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037913;self_id=[2:7519899977982768622:2366];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037913;local_tx_no=30;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037892,72075186224037897,72075186224037969,72075186224037993,72075186224037996;receive=72075186224037970; 2025-06-25T14:52:40.347993Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037913;self_id=[2:7519899977982768622:2366];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037913;local_tx_no=31;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037892,72075186224037897,72075186224037969,72075186224037993,72075186224037996;receive=72075186224037970; 2025-06-25T14:52:40.348051Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037913;self_id=[2:7519899977982768622:2366];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037913;local_tx_no=32;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037892,72075186224037897,72075186224037969,72075186224037993,72075186224037996;receive=72075186224037970; 2025-06-25T14:52:40.348109Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037913;self_id=[2:7519899977982768622:2366];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037913;local_tx_no=33;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037892,72075186224037897,72075186224037969,72075186224037993,72075186224037996;receive=72075186224037970; 2025-06-25T14:52:40.348164Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037913;self_id=[2:7519899977982768622:2366];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037913;local_tx_no=34;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037892,72075186224037897,72075186224037969,72075186224037993,72075186224037996;receive=72075186224037970; 2025-06-25T14:52:40.348299Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037913;self_id=[2:7519899977982768622:2366];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037913;local_tx_no=36;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037892,72075186224037969,72075186224037993,72075186224037996;receive=72075186224037897; 2025-06-25T14:52:40.348379Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037913;self_id=[2:7519899977982768622:2366];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037913;local_tx_no=37;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037892,72075186224037969,72075186224037993,72075186224037996;receive=72075186224037897; 2025-06-25T14:52:40.348433Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037913;self_id=[2:7519899977982768622:2366];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037913;local_tx_no=38;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037892,72075186224037969,72075186224037993,72075186224037996;receive=72075186224037897; 2025-06-25T14:52:40.348489Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037913;self_id=[2:7519899977982768622:2366];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037913;local_tx_no=39;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037892,72075186224037969,72075186224037993,72075186224037996;receive=72075186224037897; 2025-06-25T14:52:40.348544Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037913;self_id=[2:7519899977982768622:2366];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037913;local_tx_no=40;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037892,72075186224037969,72075186224037993,72075186224037996;receive=72075186224037897; 2025-06-25T14:52:40.348683Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037913;self_id=[2:7519899977982768622:2366];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037913;local_tx_no=42;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037892,72075186224037969,72075186224037996;receive=72075186224037993; 2025-06-25T14:52:40.348734Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037913;self_id=[2:7519899977982768622:2366];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037913;local_tx_no=43;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037892,72075186224037969,72075186224037996;receive=72075186224037993; 2025-06-25T14:52:40.348788Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037913;self_id=[2:7519899977982768622:2366];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037913;local_tx_no=44;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037892,72075186224037969,72075186224037996;receive=72075186224037993; 2025-06-25T14:52:40.348840Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037913;self_id=[2:7519899977982768622:2366];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037913;local_tx_no=45;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037892,72075186224037969,72075186224037996;receive=72075186224037993; 2025-06-25T14:52:40.348895Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037913;self_id=[2:7519899977982768622:2366];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037913;local_tx_no=46;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037892,72075186224037969,72075186224037996;receive=72075186224037993; 2025-06-25T14:52:40.349081Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037969;tx_state=TTxProgressTx::Execute;tx_current=281474976715664;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; 2025-06-25T14:52:40.349247Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037913;self_id=[2:7519899977982768622:2366];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037913;local_tx_no=48;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037892,72075186224037996;receive=72075186224037969; 2025-06-25T14:52:40.349313Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037913;self_id=[2:7519899977982768622:2366];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037913;local_tx_no=49;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037892,72075186224037996;receive=72075186224037969; 2025-06-25T14:52:40.349377Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037913;self_id=[2:7519899977982768622:2366];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037913;local_tx_no=50;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037892,72075186224037996;receive=72075186224037969; 2025-06-25T14:52:40.349433Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037913;self_id=[2:7519899977982768622:2366];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037913;local_tx_no=51;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037892,72075186224037996;receive=72075186224037969; 2025-06-25T14:52:40.349483Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037913;self_id=[2:7519899977982768622:2366];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037913;local_tx_no=52;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037892,72075186224037996;receive=72075186224037969; 2025-06-25T14:52:40.350028Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037892;tx_state=TTxProgressTx::Execute;tx_current=281474976715664;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; 2025-06-25T14:52:40.350087Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037996;tx_state=TTxProgressTx::Execute;tx_current=281474976715664;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; 2025-06-25T14:52:40.351860Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037913;tx_state=TTxProgressTx::Execute;tx_current=281474976715664;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; 2025-06-25T14:52:42.261303Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=ZDc0NDU0OC05OGQ0OThmYS1hMjkxYzMzOS1lMzUxZDc1Yw==, ActorId: [2:7519900012342515272:3618], ActorState: ExecuteState, TraceId: 01jyks7athfpetwafsq7476nb9, Create QueryResponse for error on request, msg:
:3:29: Error: Operation 'Upsert' can't be performed in read only transaction, code: 2008 >> KqpSnapshotRead::ReadWriteTxFailsOnConcurrentWrite1-withSink [GOOD] >> KqpSnapshotRead::ReadWriteTxFailsOnConcurrentWrite2+withSink >> KqpNotNullColumns::ReplaceNotNull >> KqpSort::TopSortParameter >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-ModifyUser-42 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-ModifyUser-43 >> TPersQueueTest::CheckACLForGrpcWrite [GOOD] >> TPersQueueTest::CheckACLForGrpcRead ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/tx/unittest >> KqpSinkMvcc::ReadWriteTxFailsOnConcurrentWrite3 [GOOD] Test command err: Trying to start YDB, gRPC: 8793, MsgBus: 2125 2025-06-25T14:52:29.276952Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899965987372849:2129];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:29.276999Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00121a/r3tmp/tmpe0mAyo/pdisk_1.dat 2025-06-25T14:52:29.733100Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519899965987372760:2080] 1750863149269974 != 1750863149269977 2025-06-25T14:52:29.795509Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:52:29.795594Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:52:29.796827Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:52:29.820161Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 8793, node 1 2025-06-25T14:52:29.835524Z node 1 :GRPC_SERVER WARN: grpc_request_proxy.cpp:529: SchemeBoardDelete /Root Strong=0 2025-06-25T14:52:29.876816Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:52:29.876843Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:52:29.876849Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:52:29.876971Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2125 2025-06-25T14:52:30.296456Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:2125 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:52:30.427903Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:52:30.454294Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:52:32.231879Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899978872275274:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:32.231985Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899978872275292:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:32.232047Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:32.236238Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:52:32.252729Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899978872275303:2294], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:52:32.351134Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899978872275356:2337] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:52:32.674688Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:32.780407Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:33.674384Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:34.359910Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519899965987372849:2129];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:34.364938Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:52:37.510595Z node 1 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:3004: SelfId: [1:7519900000347119847:2930], SessionActorId: [1:7519899987462217877:2930], statusCode=ABORTED. Issue=
: Error: Transaction locks invalidated. Table: `/Root/KV2`, code: 2001 . sessionActorId=[1:7519899987462217877:2930]. isRollback=0 2025-06-25T14:52:37.510845Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:1895: SessionId: ydb://session/3?node_id=1&id=YzQwNGE3OTYtY2Y5ZDk4OGItNzgxZDg4NDYtOTkzOGJhODM=, ActorId: [1:7519899987462217877:2930], ActorState: ExecuteState, TraceId: 01jyks75znaqzcge9h2pv9jjkz, got TEvKqpBuffer::TEvError in ExecuteState, status: ABORTED send to: [1:7519900000347119848:2930] from: [1:7519900000347119847:2930] 2025-06-25T14:52:37.510914Z node 1 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [1:7519900000347119848:2930] TxId: 281474976710666. Ctx: { TraceId: 01jyks75znaqzcge9h2pv9jjkz, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YzQwNGE3OTYtY2Y5ZDk4OGItNzgxZDg4NDYtOTkzOGJhODM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Transaction locks invalidated. Table: `/Root/KV2`, code: 2001 } 2025-06-25T14:52:37.511161Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=1&id=YzQwNGE3OTYtY2Y5ZDk4OGItNzgxZDg4NDYtOTkzOGJhODM=, ActorId: [1:7519899987462217877:2930], ActorState: ExecuteState, TraceId: 01jyks75znaqzcge9h2pv9jjkz, Create QueryResponse for error on request, msg: Trying to start YDB, gRPC: 64722, MsgBus: 11730 2025-06-25T14:52:38.594115Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519900003930762993:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:38.594200Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00121a/r3tmp/tmpe7HCT6/pdisk_1.dat 2025-06-25T14:52:38.749878Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:52:38.750884Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519900003930762972:2080] 1750863158592781 != 1750863158592784 TServer::EnableGrpc on GrpcPort 64722, node 2 2025-06-25T14:52:38.779672Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:52:38.779775Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:52:38.835223Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:52:38.868793Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:52:38.868813Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:52:38.868820Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:52:38.868932Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11730 TClient is connected to server localhost:11730 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:52:39.493209Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:52:39.504682Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:52:39.604832Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:52:41.760649Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519900016815665479:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:41.760800Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:41.761048Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519900016815665506:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:41.763893Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:52:41.772152Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519900016815665508:2295], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:52:41.831365Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519900016815665559:2332] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:52:41.879436Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:41.913191Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:42.814570Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:43.662988Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519900003930762993:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:43.665327Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:52:44.468268Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=YmJlOGRkYTQtNzcwM2M3OTMtYTU2YzRkODktZTQ2YTE4ZmU=, ActorId: [2:7519900025405608090:2930], ActorState: ExecuteState, TraceId: 01jyks7cy781mnnq0h96070zz4, Create QueryResponse for error on request, msg: tx has deferred effects, but locks are broken ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/tx/unittest >> KqpSinkLocks::DifferentKeyUpdateOlap [GOOD] Test command err: Trying to start YDB, gRPC: 26462, MsgBus: 20585 2025-06-25T14:52:16.298601Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:52:16.298775Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:52:16.298824Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00123d/r3tmp/tmp7XCLAO/pdisk_1.dat 2025-06-25T14:52:16.628903Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 26462, node 1 2025-06-25T14:52:16.751509Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:52:16.759956Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:52:16.760023Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:52:16.760063Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:52:16.760528Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:52:16.760811Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750863133688782 != 1750863133688786 2025-06-25T14:52:16.814585Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:52:16.814733Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:52:16.829559Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:20585 TClient is connected to server localhost:20585 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:52:17.166966Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:52:17.268048Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:17.417576Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:52:17.645180Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:18.022543Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:18.324336Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:19.082081Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:1685:3282], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:19.082398Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:19.111019Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:19.299871Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:19.540101Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:19.806256Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:20.057917Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:20.402479Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:20.695913Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:21.028932Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2357:3777], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:21.029031Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:21.029294Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2362:3782], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:21.034186Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:52:21.197033Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:2364:3784], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:52:21.259462Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:2422:3823] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:52:22.278822Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:22.502725Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ES ... x_id=281474976715664; 2025-06-25T14:52:42.886696Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[3:7519899987972684865:2308];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037897;local_tx_no=42;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037969,72075186224037970,72075186224037981,72075186224037996;receive=72075186224037896; 2025-06-25T14:52:42.886783Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[3:7519899987972684865:2308];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037897;local_tx_no=43;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037969,72075186224037970,72075186224037981,72075186224037996;receive=72075186224037896; 2025-06-25T14:52:42.886854Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[3:7519899987972684865:2308];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037897;local_tx_no=44;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037969,72075186224037970,72075186224037981,72075186224037996;receive=72075186224037896; 2025-06-25T14:52:42.886916Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[3:7519899987972684865:2308];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037897;local_tx_no=45;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037969,72075186224037970,72075186224037981,72075186224037996;receive=72075186224037896; 2025-06-25T14:52:42.886972Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[3:7519899987972684865:2308];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037897;local_tx_no=46;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037969,72075186224037970,72075186224037981,72075186224037996;receive=72075186224037896; 2025-06-25T14:52:42.887048Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[3:7519899987972684865:2308];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037897;local_tx_no=47;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037969,72075186224037970,72075186224037981,72075186224037996;receive=72075186224037896; 2025-06-25T14:52:42.887120Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[3:7519899987972684865:2308];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037897;local_tx_no=48;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037969,72075186224037970,72075186224037981,72075186224037996;receive=72075186224037896; 2025-06-25T14:52:42.887170Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037969;tx_state=TTxProgressTx::Execute;tx_current=281474976715664;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; 2025-06-25T14:52:42.887342Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[3:7519899987972684865:2308];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037897;local_tx_no=50;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037970,72075186224037981,72075186224037996;receive=72075186224037969; 2025-06-25T14:52:42.887414Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[3:7519899987972684865:2308];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037897;local_tx_no=51;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037970,72075186224037981,72075186224037996;receive=72075186224037969; 2025-06-25T14:52:42.887500Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[3:7519899987972684865:2308];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037897;local_tx_no=52;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037970,72075186224037981,72075186224037996;receive=72075186224037969; 2025-06-25T14:52:42.887593Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[3:7519899987972684865:2308];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037897;local_tx_no=53;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037970,72075186224037981,72075186224037996;receive=72075186224037969; 2025-06-25T14:52:42.887671Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[3:7519899987972684865:2308];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037897;local_tx_no=54;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037970,72075186224037981,72075186224037996;receive=72075186224037969; 2025-06-25T14:52:42.887727Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[3:7519899987972684865:2308];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037897;local_tx_no=55;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037970,72075186224037981,72075186224037996;receive=72075186224037969; 2025-06-25T14:52:42.887780Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[3:7519899987972684865:2308];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037897;local_tx_no=56;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037970,72075186224037981,72075186224037996;receive=72075186224037969; 2025-06-25T14:52:42.887802Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037996;tx_state=TTxProgressTx::Execute;tx_current=281474976715664;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; 2025-06-25T14:52:42.888079Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[3:7519899987972684865:2308];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037897;local_tx_no=58;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037970,72075186224037981;receive=72075186224037996; 2025-06-25T14:52:42.888146Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[3:7519899987972684865:2308];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037897;local_tx_no=59;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037970,72075186224037981;receive=72075186224037996; 2025-06-25T14:52:42.888198Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[3:7519899987972684865:2308];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037897;local_tx_no=60;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037970,72075186224037981;receive=72075186224037996; 2025-06-25T14:52:42.888251Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[3:7519899987972684865:2308];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037897;local_tx_no=61;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037970,72075186224037981;receive=72075186224037996; 2025-06-25T14:52:42.888299Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[3:7519899987972684865:2308];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037897;local_tx_no=62;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037970,72075186224037981;receive=72075186224037996; 2025-06-25T14:52:42.888369Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[3:7519899987972684865:2308];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037897;local_tx_no=63;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037970,72075186224037981;receive=72075186224037996; 2025-06-25T14:52:42.888424Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[3:7519899987972684865:2308];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037897;local_tx_no=64;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037970,72075186224037981;receive=72075186224037996; 2025-06-25T14:52:42.888425Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037970;tx_state=TTxProgressTx::Execute;tx_current=281474976715664;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; 2025-06-25T14:52:42.888602Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[3:7519899987972684865:2308];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037897;local_tx_no=66;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037981;receive=72075186224037970; 2025-06-25T14:52:42.888660Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[3:7519899987972684865:2308];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037897;local_tx_no=67;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037981;receive=72075186224037970; 2025-06-25T14:52:42.888712Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[3:7519899987972684865:2308];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037897;local_tx_no=68;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037981;receive=72075186224037970; 2025-06-25T14:52:42.888761Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[3:7519899987972684865:2308];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037897;local_tx_no=69;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037981;receive=72075186224037970; 2025-06-25T14:52:42.888810Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[3:7519899987972684865:2308];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037897;local_tx_no=70;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037981;receive=72075186224037970; 2025-06-25T14:52:42.888943Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[3:7519899987972684865:2308];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037897;local_tx_no=71;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037981;receive=72075186224037970; 2025-06-25T14:52:42.889011Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[3:7519899987972684865:2308];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037897;local_tx_no=72;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037981;receive=72075186224037970; 2025-06-25T14:52:42.889057Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037981;tx_state=TTxProgressTx::Execute;tx_current=281474976715664;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; 2025-06-25T14:52:42.889829Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037897;tx_state=TTxProgressTx::Execute;tx_current=281474976715664;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; 2025-06-25T14:52:43.643470Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037895;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715667; >> KqpNamedExpressions::NamedExpressionSimple+UseSink >> KqpNewEngine::JoinWithParams >> KqpSnapshotRead::ReadWriteTxFailsOnConcurrentWrite3-withSink [GOOD] >> TopicService::RelativePath [GOOD] >> KqpNewEngine::BlindWrite >> TAsyncIndexTests::SplitBothWithReboots[PipeResets] [GOOD] >> KqpNewEngine::Select1 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/tx/unittest >> KqpSnapshotRead::ReadWriteTxFailsOnConcurrentWrite3-withSink [GOOD] Test command err: Trying to start YDB, gRPC: 17042, MsgBus: 5109 2025-06-25T14:52:34.897448Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899987568445653:2226];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:34.897489Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0011f4/r3tmp/tmpZwu5jz/pdisk_1.dat 2025-06-25T14:52:35.363864Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519899987568445465:2080] 1750863154869615 != 1750863154869618 2025-06-25T14:52:35.370052Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:52:35.374299Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:52:35.374376Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:52:35.376894Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 17042, node 1 2025-06-25T14:52:35.476355Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:52:35.476380Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:52:35.476390Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:52:35.476563Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5109 2025-06-25T14:52:35.888535Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:5109 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:52:36.157359Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:52:36.202797Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:36.425138Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:36.602105Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:36.680210Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:38.336436Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900004748316273:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:38.336557Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:38.581681Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:38.620899Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:38.663754Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:38.695190Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:38.728192Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:38.810182Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:38.871268Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:38.962700Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900004748316939:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:38.962760Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:38.963006Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900004748316944:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:38.966449Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:52:38.977513Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519900004748316946:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:52:39.051380Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519900009043284293:3421] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:52:39.899886Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519899987568445653:2226];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:39.899956Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 13255, MsgBus: 16778 2025-06-25T14:52:41.725464Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519900019103406188:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:41.725545Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0011f4/r3tmp/tmpBb1omr/pdisk_1.dat 2025-06-25T14:52:41.830288Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:52:41.831444Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519900019103406169:2080] 1750863161724917 != 1750863161724920 TServer::EnableGrpc on GrpcPort 13255, node 2 2025-06-25T14:52:41.861415Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:52:41.861528Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:52:41.864732Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:52:41.886264Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:52:41.886285Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:52:41.886293Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:52:41.886384Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16778 TClient is connected to server localhost:16778 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:52:42.275504Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:52:42.284727Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:52:42.300499Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:42.375398Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:42.512419Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:42.586080Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:42.731118Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:52:44.392100Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519900031988309694:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:44.392172Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:44.452460Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:44.486417Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:44.517950Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:44.563935Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:44.609297Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:44.670147Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:44.740044Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:44.801865Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519900031988310350:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:44.801940Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:44.802145Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519900031988310355:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:44.805949Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:52:44.814915Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-25T14:52:44.815202Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519900031988310357:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:52:44.905109Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519900031988310408:3417] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:52:46.728437Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519900019103406188:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:46.728510Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:52:46.757389Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=NTM4MjFiMGItYTQ2OWFiZGYtM2Y4NzVkYzItZTE2YmQ0Njk=, ActorId: [2:7519900036283277932:2464], ActorState: ExecuteState, TraceId: 01jyks7f759r3n4fn04b6g18ze, Create QueryResponse for error on request, msg: tx has deferred effects, but locks are broken >> KqpNotNullColumns::ReplaceNotNullPk [GOOD] >> KqpNotNullColumns::ReplaceNotNullPkPg >> KqpLocks::MixedTxFail-useSink [GOOD] >> KqpLocksTricky::TestNoLocksIssue+withSink >> TopicService::AccessRights ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest >> TAsyncIndexTests::SplitBothWithReboots[PipeResets] [GOOD] Test command err: =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:115:2144] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:115:2144] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:130:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:133:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [1:136:2157] sender: [1:138:2058] recipient: [1:115:2144] 2025-06-25T14:51:18.374010Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:51:18.374112Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:18.374158Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:51:18.374195Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:51:18.374243Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:51:18.374271Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:51:18.374331Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:18.374409Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:51:18.375141Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:51:18.375476Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:51:18.446524Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7732: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-25T14:51:18.446580Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:51:18.447149Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:178:2058] recipient: [1:15:2062] 2025-06-25T14:51:18.462716Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:51:18.466951Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:51:18.467148Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:51:18.478737Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:51:18.478960Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:51:18.479616Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:18.479902Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:51:18.486007Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:18.486183Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:51:18.487083Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:51:18.487137Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:18.487211Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:51:18.487246Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:51:18.487275Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:51:18.487379Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:217:2058] recipient: [1:215:2214] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:217:2058] recipient: [1:215:2214] Leader for TabletID 72057594037968897 is [1:221:2218] sender: [1:222:2058] recipient: [1:215:2214] 2025-06-25T14:51:18.493081Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:242:2058] recipient: [1:15:2062] 2025-06-25T14:51:18.613712Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:51:18.613947Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:18.614145Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:51:18.614189Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:51:18.614395Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:51:18.614469Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:51:18.616701Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:18.616878Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:51:18.617063Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:18.617131Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:51:18.617179Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:51:18.617216Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:51:18.618985Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:18.619039Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:51:18.619081Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:51:18.620677Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:18.620731Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:18.620792Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:18.620832Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:51:18.629621Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:51:18.631831Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:51:18.632025Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:136:2157] sender: [1:257:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:51:18.633045Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:18.633190Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 Media ... mpty maybe) Users: [] Groups: [] } }] } 2025-06-25T14:52:48.152018Z node 40 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.h:227: [AsyncIndexChangeSenderMain][72075186233409548:2][40:831:2700] HandleKeys TEvTxProxySchemeCache::TEvResolveKeySetResult: result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 ResultSet [{ TableId: [OwnerId: 72057594046678944, LocalPathId: 5] Access: 0 SyncVersion: false Status: OkData Kind: KindAsyncIndexTable PartitionsCount: 2 DomainInfo { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046678944, LocalPathId: 1] Params { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } From: (Uint32 : NULL, Uint32 : NULL) IncFrom: 1 To: () IncTo: 0 }] } 2025-06-25T14:52:48.152601Z node 40 :CHANGE_EXCHANGE DEBUG: change_sender_async_index.cpp:229: [AsyncIndexChangeSenderMain][72075186233409548:2][40:831:2700] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 1 Group: 1750863168124419 Step: 5000003 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046678944, LocalPathId: 4] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046678944, LocalPathId: 3] SchemaVersion: 1 LockId: 0 LockOffset: 0 },{ Order: 2 Group: 1750863168124419 Step: 5000003 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046678944, LocalPathId: 4] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046678944, LocalPathId: 3] SchemaVersion: 1 LockId: 0 LockOffset: 0 },{ Order: 3 Group: 1750863168124419 Step: 5000003 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046678944, LocalPathId: 4] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046678944, LocalPathId: 3] SchemaVersion: 1 LockId: 0 LockOffset: 0 }] } 2025-06-25T14:52:48.152814Z node 40 :TX_PROXY DEBUG: proxy_impl.cpp:392: actor# [40:274:2263] Handle TEvGetProxyServicesRequest 2025-06-25T14:52:48.152856Z node 40 :TX_PROXY DEBUG: proxy_impl.cpp:392: actor# [40:274:2263] Handle TEvGetProxyServicesRequest 2025-06-25T14:52:48.152913Z node 40 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:40: [TableChangeSenderShard][72075186233409548:2][72075186233409550][40:986:2700] Handle NKikimr::TEvTxUserProxy::TEvGetProxyServicesResponse 2025-06-25T14:52:48.152992Z node 40 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:40: [TableChangeSenderShard][72075186233409548:2][72075186233409551][40:987:2700] Handle NKikimr::TEvTxUserProxy::TEvGetProxyServicesResponse 2025-06-25T14:52:48.174527Z node 40 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:78: [TableChangeSenderShard][72075186233409548:2][72075186233409550][40:986:2700] Handshake NKikimrChangeExchange.TEvStatus Status: STATUS_OK LastRecordOrder: 0 2025-06-25T14:52:48.174619Z node 40 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:78: [TableChangeSenderShard][72075186233409548:2][72075186233409551][40:987:2700] Handshake NKikimrChangeExchange.TEvStatus Status: STATUS_OK LastRecordOrder: 0 2025-06-25T14:52:48.174688Z node 40 :CHANGE_EXCHANGE DEBUG: change_sender_async_index.cpp:239: [AsyncIndexChangeSenderMain][72075186233409548:2][40:831:2700] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186233409550 } 2025-06-25T14:52:48.174756Z node 40 :CHANGE_EXCHANGE DEBUG: change_sender_async_index.cpp:239: [AsyncIndexChangeSenderMain][72075186233409548:2][40:831:2700] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186233409551 } 2025-06-25T14:52:48.174881Z node 40 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:123: [TableChangeSenderShard][72075186233409548:2][72075186233409550][40:986:2700] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 1 Group: 1750863168124419 Step: 5000003 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046678944, LocalPathId: 4] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046678944, LocalPathId: 3] SchemaVersion: 1 LockId: 0 LockOffset: 0 },{ Order: 2 Group: 1750863168124419 Step: 5000003 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046678944, LocalPathId: 4] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046678944, LocalPathId: 3] SchemaVersion: 1 LockId: 0 LockOffset: 0 }] } 2025-06-25T14:52:48.175042Z node 40 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:123: [TableChangeSenderShard][72075186233409548:2][72075186233409551][40:987:2700] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 3 Group: 1750863168124419 Step: 5000003 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046678944, LocalPathId: 4] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046678944, LocalPathId: 3] SchemaVersion: 1 LockId: 0 LockOffset: 0 }] } 2025-06-25T14:52:48.187496Z node 40 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:200: [TableChangeSenderShard][72075186233409548:2][72075186233409550][40:986:2700] Handle NKikimrChangeExchange.TEvStatus Status: STATUS_OK RecordStatuses { Order: 1 Status: STATUS_OK Reason: REASON_NONE } RecordStatuses { Order: 2 Status: STATUS_OK Reason: REASON_NONE } LastRecordOrder: 2 2025-06-25T14:52:48.187843Z node 40 :CHANGE_EXCHANGE DEBUG: change_sender_async_index.cpp:239: [AsyncIndexChangeSenderMain][72075186233409548:2][40:831:2700] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186233409550 } 2025-06-25T14:52:48.188477Z node 40 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:200: [TableChangeSenderShard][72075186233409548:2][72075186233409551][40:987:2700] Handle NKikimrChangeExchange.TEvStatus Status: STATUS_OK RecordStatuses { Order: 3 Status: STATUS_OK Reason: REASON_NONE } LastRecordOrder: 3 2025-06-25T14:52:48.188550Z node 40 :CHANGE_EXCHANGE DEBUG: change_sender_async_index.cpp:239: [AsyncIndexChangeSenderMain][72075186233409548:2][40:831:2700] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186233409551 } 2025-06-25T14:52:48.382664Z node 40 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/UserDefinedIndex/indexImplTable" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-25T14:52:48.383010Z node 40 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/UserDefinedIndex/indexImplTable" took 353us result status StatusSuccess 2025-06-25T14:52:48.383967Z node 40 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table/UserDefinedIndex/indexImplTable" PathDescription { Self { Name: "indexImplTable" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1002 CreateStep: 5000003 ParentPathId: 4 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeAsyncIndexImplTable Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 2 } ChildrenExist: false } Table { Name: "indexImplTable" Columns { Name: "indexed" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "indexed" KeyColumnNames: "key" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "\002\000\004\000\000\0002\000\000\000\000\000\000\200" IsPoint: false IsInclusive: false DatashardId: 72075186233409550 } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409551 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 2 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 6 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TPersQueueCommonTest::Auth_WriteUpdateTokenRequestWithValidTokenButWithoutACL_SessionClosedWithUnauthorizedError [GOOD] >> TPersQueueCommonTest::TestWriteWithRateLimiterWithBlobsRateLimit [GOOD] >> TPersQueueCommonTest::TestWriteWithRateLimiterWithUserPayloadRateLimit >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-1 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-2 >> KqpNewEngine::LocksSingleShard [GOOD] >> KqpNewEngine::LocksMultiShardOk >> KqpSort::ReverseOptimized [GOOD] >> KqpSort::ReverseOptimizedWithPredicate >> TPersQueueTest::WhenDisableNodeAndCreateTopic_ThenAllPartitionsAreOnOtherNode [GOOD] >> TPersQueueTest::WhenTheTopicIsDeletedAfterDecompressingTheData_Compressed >> KqpNewEngine::Update-UseSink [GOOD] >> KqpNewEngine::UpdateFromParams >> TPersQueueTest::SchemeOperationsTest [GOOD] >> TPersQueueTest::SchemeOperationFirstClassCitizen >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-DropUser-55 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-DropUser-56 >> KqpNotNullColumns::CreateTableWithDisabledNotNullDataColumns >> KqpNewEngine::PkSelect1 [GOOD] >> KqpNewEngine::PkSelect2 >> KqpNotNullColumns::ReplaceNotNull [GOOD] >> KqpNotNullColumns::JoinLeftTableWithNotNullPk+StreamLookup >> KqpSinkTx::OlapSnapshotROInteractive1 [GOOD] >> KqpSinkTx::OlapSnapshotROInteractive2 >> KqpMergeCn::TopSortBy_PK_Uint64_Limit3 >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-ModifyUser-31 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-ModifyUser-32 >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-ModifyUser-43 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-ModifyUser-44 >> KqpNewEngine::StreamLookupWithView [GOOD] >> KqpNewEngine::Truncated >> KqpSort::TopSortParameter [GOOD] >> KqpSort::TopSortExpr >> KqpNewEngine::Select1 [GOOD] >> KqpNewEngine::ShuffleWrite >> KqpNewEngine::JoinWithParams [GOOD] >> KqpNewEngine::JoinPure >> KqpNamedExpressions::NamedExpressionSimple+UseSink [GOOD] >> KqpNamedExpressions::NamedExpressionSimple-UseSink >> KqpNotNullColumns::ReplaceNotNullPkPg [GOOD] >> KqpNotNullColumns::SelectNotNullColumns >> KqpSnapshotRead::ReadWriteTxFailsOnConcurrentWrite2+withSink [GOOD] >> KqpNewEngine::BlindWrite [GOOD] >> KqpNewEngine::BlindWriteParameters >> KqpNotNullColumns::CreateTableWithDisabledNotNullDataColumns [GOOD] >> KqpNotNullColumns::InsertNotNull >> TPersQueueTest::DirectReadBadCases [GOOD] >> TPersQueueTest::DirectReadWrongGeneration ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/tx/unittest >> KqpSnapshotRead::ReadWriteTxFailsOnConcurrentWrite2+withSink [GOOD] Test command err: Trying to start YDB, gRPC: 17701, MsgBus: 4240 2025-06-25T14:52:39.569889Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900007933319984:2137];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:39.570328Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0011d5/r3tmp/tmp4bWr9e/pdisk_1.dat 2025-06-25T14:52:40.041828Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:52:40.045984Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519900007933319876:2080] 1750863159558030 != 1750863159558033 TServer::EnableGrpc on GrpcPort 17701, node 1 2025-06-25T14:52:40.058617Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:52:40.058685Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:52:40.070737Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:52:40.139678Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:52:40.139702Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:52:40.139709Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:52:40.139819Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4240 2025-06-25T14:52:40.549957Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:4240 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:52:40.789411Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:52:40.824195Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:40.972482Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:41.155017Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:41.239378Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:42.716605Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900020818223404:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:42.716698Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:43.014530Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:43.045928Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:43.112077Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:43.138585Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:43.165589Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:43.211781Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:43.240791Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:43.323502Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900025113191363:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:43.323573Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:43.323624Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900025113191368:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:43.327451Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:52:43.337750Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519900025113191370:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:52:43.439324Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519900025113191421:3420] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:52:44.563585Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519900007933319984:2137];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:44.563641Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:52:45.142950Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=1&id=ZDdlMGQ5ZmMtZWQ3N2FiNzgtYTRlNDU2MDAtMTAzM2NhNTg=, ActorId: [1:7519900029408158949:2465], ActorState: ExecuteState, TraceId: 01jyks7djh7z68mapfjc0kkvj4, Create QueryResponse for error on request, msg: Trying to start YDB, gRPC: 19038, MsgBus: 30054 2025-06-25T14:52:46.013531Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:751 ... o.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:52:46.159488Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:52:46.192617Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:52:46.228765Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:52:46.228787Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:52:46.228793Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:52:46.228894Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:30054 TClient is connected to server localhost:30054 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:52:46.627838Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:52:46.645001Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:46.730880Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:52:46.870190Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:46.929660Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:47.058020Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:52:49.108132Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519900053883972298:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:49.108216Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:49.172499Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:49.206461Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:49.239304Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:49.277361Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:49.305404Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:49.365102Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:49.435999Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:49.508365Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519900053883972958:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:49.508440Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:49.508583Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519900053883972963:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:49.513108Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:52:49.528213Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519900053883972965:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:52:49.589323Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519900053883973016:3415] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:52:51.016430Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519900040999068820:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:51.016503Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:52:53.254029Z node 2 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:3004: SelfId: [2:7519900071063842577:2473], SessionActorId: [2:7519900058178940581:2473], statusCode=ABORTED. Issue=
: Error: Transaction locks invalidated. Table: `/Root/EightShard`, code: 2001 . sessionActorId=[2:7519900058178940581:2473]. isRollback=0 2025-06-25T14:52:53.254291Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:1895: SessionId: ydb://session/3?node_id=2&id=ZGU5OWIzZTQtZWUyYzE1YjEtNDZjMmI3NzAtNzExM2MxNGE=, ActorId: [2:7519900058178940581:2473], ActorState: ExecuteState, TraceId: 01jyks7n9w55j0231p27hhcfm1, got TEvKqpBuffer::TEvError in ExecuteState, status: ABORTED send to: [2:7519900071063842578:2473] from: [2:7519900071063842577:2473] 2025-06-25T14:52:53.254363Z node 2 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [2:7519900071063842578:2473] TxId: 281474976715674. Ctx: { TraceId: 01jyks7n9w55j0231p27hhcfm1, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZGU5OWIzZTQtZWUyYzE1YjEtNDZjMmI3NzAtNzExM2MxNGE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Transaction locks invalidated. Table: `/Root/EightShard`, code: 2001 } 2025-06-25T14:52:53.254569Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=ZGU5OWIzZTQtZWUyYzE1YjEtNDZjMmI3NzAtNzExM2MxNGE=, ActorId: [2:7519900058178940581:2473], ActorState: ExecuteState, TraceId: 01jyks7n9w55j0231p27hhcfm1, Create QueryResponse for error on request, msg: >> TPersQueueTest::StreamReadManyUpdateTokenAndRead [GOOD] >> TPersQueueTest::SetupWriteSession >> KqpSort::ReverseOptimizedWithPredicate [GOOD] >> KqpSort::ReverseMixedOrderNotOptimized >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-2 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-3 >> KqpNewEngine::LocksMultiShardOk [GOOD] >> KqpNewEngine::LocksNoMutations >> KqpNewEngine::UpdateFromParams [GOOD] >> KqpNewEngine::UpsertEmptyInput >> TFstClassSrcIdPQTest::TestTableCreated [GOOD] >> TFstClassSrcIdPQTest::NoMapping >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-DropUser-56 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-DropUser-57 >> KqpNewEngine::PkSelect2 [GOOD] >> KqpNewEngine::PkRangeSelect3 >> KqpNewEngine::DuplicatedResults >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-ModifyUser-32 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-ModifyUser-33 >> KqpNotNullColumns::JoinLeftTableWithNotNullPk+StreamLookup [GOOD] >> KqpNotNullColumns::JoinLeftTableWithNotNullPk-StreamLookup >> KqpNotNullColumns::SelectNotNullColumns [GOOD] >> KqpNotNullColumns::ReplaceNotNullPg >> KqpMergeCn::TopSortBy_PK_Uint64_Limit3 [GOOD] >> KqpMergeCn::TopSortBy_Int32_Limit3 >> TPersQueueTest::WriteAfterAlter [GOOD] >> TPersQueueTest::WhenTheTopicIsDeletedBeforeDataIsDecompressed_Compressed >> KqpNewEngine::Truncated [GOOD] >> KqpNewEngine::Update+UseSink >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-ModifyUser-44 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-ModifyUser-45 >> KqpSort::TopSortExpr [GOOD] >> KqpSort::TopSortExprPk >> KqpNotNullColumns::InsertNotNull [GOOD] >> KqpNotNullColumns::InsertFromSelect >> KqpNamedExpressions::NamedExpressionSimple-UseSink [GOOD] >> KqpNamedExpressions::NamedExpressionRandomUpsertReturning-UseSink-UseDataQuery >> KqpNewEngine::ShuffleWrite [GOOD] >> KqpNewEngine::SelfJoin >> KqpNewEngine::JoinPure [GOOD] >> KqpNewEngine::JoinPureUncomparableKeys >> KqpSinkMvcc::OlapReadWriteTxFailsOnConcurrentWrite3 [GOOD] >> KqpJoinOrder::TPCDS23 [GOOD] >> TPersQueueTest::PreferredCluster_DisabledRemoteClusterAndWriteSessionsWithDifferentPreferredClusterAndLaterRemoteClusterEnabled_SessionWithMismatchedClusterDiesAfterPreferredClusterEnabledAndOtherSessionsAlive [GOOD] >> TPersQueueTest::PreferredCluster_EnabledRemotePreferredClusterAndCloseClientSessionWithEnabledRemotePreferredClusterDelaySec_SessionDiesOnlyAfterDelay >> KqpNewEngine::BlindWriteParameters [GOOD] >> KqpNewEngine::BlindWriteListParameter >> KqpSinkTx::OlapExplicitTcl [GOOD] >> TopicService::AccessRights [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/tx/unittest >> KqpSinkMvcc::OlapReadWriteTxFailsOnConcurrentWrite3 [GOOD] Test command err: Trying to start YDB, gRPC: 1705, MsgBus: 29462 2025-06-25T14:52:09.110437Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899881591466994:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:09.113281Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001262/r3tmp/tmpVnjEnX/pdisk_1.dat 2025-06-25T14:52:09.508952Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 1705, node 1 2025-06-25T14:52:09.532071Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:52:09.532191Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:52:09.594794Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:52:09.661063Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:52:09.661093Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:52:09.661108Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:52:09.661231Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:29462 TClient is connected to server localhost:29462 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-25T14:52:10.141485Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:52:10.219550Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:52:11.928647Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899890181402197:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:11.928763Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899890181402189:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:11.928858Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:11.932998Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:52:11.945950Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899890181402203:2294], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:52:12.004326Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899894476369550:2332] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:52:12.232495Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T14:52:12.384142Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519899894476369705:2311];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:52:12.384536Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519899894476369705:2311];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:52:12.384844Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519899894476369705:2311];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:52:12.385012Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519899894476369705:2311];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:52:12.385136Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519899894476369705:2311];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:52:12.385256Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519899894476369705:2311];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:52:12.385449Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519899894476369705:2311];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:52:12.385581Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519899894476369705:2311];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:52:12.385694Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519899894476369705:2311];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:52:12.385828Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519899894476369705:2311];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:52:12.385970Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519899894476369705:2311];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:52:12.417076Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519899894476369699:2305];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:52:12.417145Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519899894476369699:2305];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:52:12.417393Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519899894476369699:2305];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:52:12.417497Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519899894476369699:2305];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:52:12.417610Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519899894476369699:2305];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:52:12.417709Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519899894476369699:2305];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:52:12.417809Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519899894476369699:2305];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:52:12.417912Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519899894476369699:2305];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:52:12.418047Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519899894476369699:2305];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:52:12.418166Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519899894476369699:2305];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:52:12.418266Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;sel ... tablet_id=72075186224037960;self_id=[3:7519900037975232130:2430];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037960;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-25T14:52:57.262816Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037973;self_id=[3:7519900037975232040:2423];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037973;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-25T14:52:57.262861Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037945;self_id=[3:7519900037975232230:2448];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037945;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-25T14:52:57.262873Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037958;self_id=[3:7519900037975232006:2402];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037958;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-25T14:52:57.262911Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037946;self_id=[3:7519900037975232209:2445];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037946;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-25T14:52:57.262940Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037975;self_id=[3:7519900037975232023:2418];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037975;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-25T14:52:57.262963Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037947;self_id=[3:7519900037975232169:2434];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037947;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-25T14:52:57.262995Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037976;self_id=[3:7519900037975231909:2376];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037976;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-25T14:52:57.263019Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037948;self_id=[3:7519900037975232172:2436];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037948;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-25T14:52:57.263078Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037961;self_id=[3:7519900037975231996:2392];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037961;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-25T14:52:57.263084Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037965;self_id=[3:7519900037975232020:2416];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037965;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-25T14:52:57.263151Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037962;self_id=[3:7519900037975231920:2379];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037962;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-25T14:52:57.263153Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037966;self_id=[3:7519900037975231966:2385];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037966;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-25T14:52:57.263204Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037963;self_id=[3:7519900037975232015:2411];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037963;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-25T14:52:57.263205Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037987;self_id=[3:7519900037975231912:2377];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037987;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-25T14:52:57.263254Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037988;self_id=[3:7519900037975231919:2378];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037988;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-25T14:52:57.263257Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037964;self_id=[3:7519900037975232016:2412];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037964;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-25T14:52:57.263302Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037993;self_id=[3:7519900037975231908:2375];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037993;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-25T14:52:57.263307Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037981;self_id=[3:7519900037975232011:2407];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037981;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-25T14:52:57.263363Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037990;self_id=[3:7519900037975231923:2382];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037990;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-25T14:52:57.263368Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037982;self_id=[3:7519900037975232013:2409];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037982;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-25T14:52:57.263419Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037971;self_id=[3:7519900037975232012:2408];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037971;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-25T14:52:57.263425Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037994;self_id=[3:7519900037975231906:2373];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037994;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-25T14:52:57.263477Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037972;self_id=[3:7519900037975232041:2424];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037972;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-25T14:52:57.263481Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037995;self_id=[3:7519900037975231903:2371];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037995;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-25T14:52:57.263541Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037989;self_id=[3:7519900037975231991:2388];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037989;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-25T14:52:57.263542Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037996;self_id=[3:7519900037975231921:2380];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037996;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-25T14:52:57.263596Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037974;self_id=[3:7519900037975232019:2415];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037974;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-25T14:52:57.263598Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037997;self_id=[3:7519900037975231967:2386];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037997;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-25T14:52:57.263645Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037991;self_id=[3:7519900037975231990:2387];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037991;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-25T14:52:57.263702Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037992;self_id=[3:7519900037975231873:2368];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037992;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-25T14:52:57.263763Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037977;self_id=[3:7519900037975232018:2414];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037977;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-25T14:52:57.263821Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037978;self_id=[3:7519900037975232014:2410];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037978;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-25T14:52:57.263880Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037979;self_id=[3:7519900037975231965:2384];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037979;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-25T14:52:57.263920Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037980;self_id=[3:7519900037975231992:2389];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037980;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; >> KqpNewEngine::UpsertEmptyInput [GOOD] >> KqpNotNullColumns::AlterAddNotNullColumn >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-3 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-10 >> KqpSort::ReverseMixedOrderNotOptimized [GOOD] >> KqpSort::ReverseRangeOptimized ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/tx/unittest >> KqpSinkTx::OlapExplicitTcl [GOOD] Test command err: Trying to start YDB, gRPC: 13072, MsgBus: 12585 2025-06-25T14:52:30.570074Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899972795393572:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:30.570187Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001213/r3tmp/tmpjBGift/pdisk_1.dat 2025-06-25T14:52:31.032094Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:52:31.033462Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519899972795393551:2080] 1750863150569051 != 1750863150569054 2025-06-25T14:52:31.057876Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:52:31.057942Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 13072, node 1 2025-06-25T14:52:31.064033Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:52:31.110082Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:52:31.110100Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:52:31.110107Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:52:31.110229Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:12585 TClient is connected to server localhost:12585 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-25T14:52:31.576621Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:52:31.695905Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:52:31.733033Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:52:33.460539Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899985680296073:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:33.460922Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899985680296063:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:33.460996Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:33.464573Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:52:33.474756Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-25T14:52:33.474943Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899985680296092:2294], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:52:33.553627Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899985680296143:2333] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:52:33.824052Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T14:52:33.967164Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519899985680296318:2306];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:52:33.967397Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519899985680296318:2306];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:52:33.967639Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519899985680296318:2306];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:52:33.967742Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519899985680296318:2306];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:52:33.967854Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519899985680296318:2306];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:52:33.967970Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519899985680296318:2306];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:52:33.968063Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519899985680296318:2306];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:52:33.968168Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519899985680296318:2306];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:52:33.968294Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519899985680296318:2306];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:52:33.968798Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519899985680296318:2306];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:52:33.968932Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519899985680296318:2306];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:52:33.986067Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519899985680296314:2305];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:52:33.986113Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519899985680296314:2305];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:52:33.986310Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519899985680296314:2305];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:52:33.986376Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519899985680296314:2305];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:52:33.986433Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519899985680296314:2305];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:52:33.986490Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519899985680296314:2305];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:52:33.986596Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519899985680296314:2305];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:52:33.986710Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519899985680296314:2305];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:52:33.986795Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=7207518622 ... 72075186224037913,72075186224037969,72075186224037981,72075186224037993;receive=72075186224037897; 2025-06-25T14:52:57.073058Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037996;self_id=[2:7519900049307451055:2382];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037996;local_tx_no=34;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037913,72075186224037969,72075186224037981,72075186224037993;receive=72075186224037897; 2025-06-25T14:52:57.073119Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037996;self_id=[2:7519900049307451055:2382];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037996;local_tx_no=35;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037913,72075186224037969,72075186224037981,72075186224037993;receive=72075186224037892; 2025-06-25T14:52:57.073179Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037996;self_id=[2:7519900049307451055:2382];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037996;local_tx_no=36;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037913,72075186224037969,72075186224037981,72075186224037993;receive=72075186224037897; 2025-06-25T14:52:57.073235Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037996;self_id=[2:7519900049307451055:2382];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037996;local_tx_no=37;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037913,72075186224037969,72075186224037981,72075186224037993;receive=72075186224037892; 2025-06-25T14:52:57.073293Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037996;self_id=[2:7519900049307451055:2382];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037996;local_tx_no=38;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037913,72075186224037969,72075186224037981,72075186224037993;receive=72075186224037892; 2025-06-25T14:52:57.073353Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037996;self_id=[2:7519900049307451055:2382];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037996;local_tx_no=39;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037913,72075186224037969,72075186224037981,72075186224037993;receive=72075186224037892; 2025-06-25T14:52:57.073426Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037996;self_id=[2:7519900049307451055:2382];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037996;local_tx_no=40;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037913,72075186224037969,72075186224037981,72075186224037993;receive=72075186224037892; 2025-06-25T14:52:57.073636Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037996;self_id=[2:7519900049307451055:2382];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037996;local_tx_no=42;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037913,72075186224037981,72075186224037993;receive=72075186224037969; 2025-06-25T14:52:57.073712Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037996;self_id=[2:7519900049307451055:2382];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037996;local_tx_no=43;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037913,72075186224037981,72075186224037993;receive=72075186224037969; 2025-06-25T14:52:57.073787Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037996;self_id=[2:7519900049307451055:2382];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037996;local_tx_no=44;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037913,72075186224037981,72075186224037993;receive=72075186224037969; 2025-06-25T14:52:57.073853Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037996;self_id=[2:7519900049307451055:2382];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037996;local_tx_no=45;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037913,72075186224037981,72075186224037993;receive=72075186224037969; 2025-06-25T14:52:57.073918Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037996;self_id=[2:7519900049307451055:2382];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037996;local_tx_no=46;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037913,72075186224037981,72075186224037993;receive=72075186224037969; 2025-06-25T14:52:57.074060Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037996;self_id=[2:7519900049307451055:2382];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037996;local_tx_no=48;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037913,72075186224037993;receive=72075186224037981; 2025-06-25T14:52:57.074114Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037996;self_id=[2:7519900049307451055:2382];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037996;local_tx_no=49;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037913,72075186224037993;receive=72075186224037981; 2025-06-25T14:52:57.074167Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037996;self_id=[2:7519900049307451055:2382];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037996;local_tx_no=50;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037913,72075186224037993;receive=72075186224037981; 2025-06-25T14:52:57.074169Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037913;tx_state=TTxProgressTx::Execute;tx_current=281474976715664;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; 2025-06-25T14:52:57.074221Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037996;self_id=[2:7519900049307451055:2382];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037996;local_tx_no=51;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037913,72075186224037993;receive=72075186224037981; 2025-06-25T14:52:57.074279Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037996;self_id=[2:7519900049307451055:2382];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037996;local_tx_no=52;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037913,72075186224037993;receive=72075186224037981; 2025-06-25T14:52:57.074423Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037996;self_id=[2:7519900049307451055:2382];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037996;local_tx_no=53;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037913,72075186224037993;receive=72075186224037981; 2025-06-25T14:52:57.074520Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037996;self_id=[2:7519900049307451055:2382];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037996;local_tx_no=54;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037913,72075186224037993;receive=72075186224037981; 2025-06-25T14:52:57.074691Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037996;self_id=[2:7519900049307451055:2382];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037996;local_tx_no=56;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037993;receive=72075186224037913; 2025-06-25T14:52:57.074747Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037996;self_id=[2:7519900049307451055:2382];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037996;local_tx_no=57;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037993;receive=72075186224037913; 2025-06-25T14:52:57.074798Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037996;self_id=[2:7519900049307451055:2382];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037996;local_tx_no=58;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037993;receive=72075186224037913; 2025-06-25T14:52:57.074850Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037996;self_id=[2:7519900049307451055:2382];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037996;local_tx_no=59;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037993;receive=72075186224037913; 2025-06-25T14:52:57.074883Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037993;tx_state=TTxProgressTx::Execute;tx_current=281474976715664;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; 2025-06-25T14:52:57.074899Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037996;self_id=[2:7519900049307451055:2382];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037996;local_tx_no=60;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037993;receive=72075186224037913; 2025-06-25T14:52:57.075085Z node 2 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037993;local_tx_no=21;method=execute;tx_info=;fline=secondary.h:109;event=duplication_tablet_broken_flag;txId=281474976715664; 2025-06-25T14:52:57.075150Z node 2 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037993;local_tx_no=22;method=execute;tx_info=;fline=secondary.h:68;event=duplication_tablet_ack_flag;txId=281474976715664; 2025-06-25T14:52:57.075180Z node 2 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037993;local_tx_no=23;method=execute;tx_info=;fline=secondary.h:109;event=duplication_tablet_broken_flag;txId=281474976715664; 2025-06-25T14:52:57.075737Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037996;tx_state=TTxProgressTx::Execute;tx_current=281474976715664;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; 2025-06-25T14:52:57.077021Z node 2 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037913;local_tx_no=16;method=complete;tx_info=;fline=secondary.h:126;event=duplication_tablet_broken_flag;txId=281474976715664; 2025-06-25T14:52:57.077049Z node 2 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037913;local_tx_no=17;method=complete;tx_info=;fline=secondary.h:126;event=duplication_tablet_broken_flag;txId=281474976715664; 2025-06-25T14:52:57.077333Z node 2 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037993;local_tx_no=21;method=complete;tx_info=;fline=secondary.h:126;event=duplication_tablet_broken_flag;txId=281474976715664; 2025-06-25T14:52:57.077363Z node 2 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037993;local_tx_no=23;method=complete;tx_info=;fline=secondary.h:126;event=duplication_tablet_broken_flag;txId=281474976715664; 2025-06-25T14:52:58.210884Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037991;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715667; 2025-06-25T14:52:58.662233Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=ZWI5Y2Q2LWZiN2NkYTZlLTk4NjQ3NzA3LWQxODFlZWZi, ActorId: [2:7519900087962164542:3620], ActorState: ReadyState, TraceId: 01jyks7tww33j3y6sbes0hqqv7, Create QueryResponse for error on request, msg: >> KqpNotNullColumns::ReplaceNotNullPg [GOOD] >> KqpNotNullColumns::SecondaryKeyWithNotNullColumn >> KqpNewEngine::LocksNoMutations [GOOD] >> KqpNewEngine::LocksNoMutationsSharded >> KqpLocksTricky::TestNoLocksIssue+withSink [GOOD] >> TopicService::ThereAreGapsInTheOffsetRanges >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-DropUser-57 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-DropUser-58 >> KqpNewEngine::PkRangeSelect3 [GOOD] >> KqpNewEngine::PkRangeSelect4 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::TPCDS23 [GOOD] Test command err: Trying to start YDB, gRPC: 64029, MsgBus: 18901 2025-06-25T14:50:18.275156Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899402686443437:2230];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:50:18.275542Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d2b/r3tmp/tmpi3Pscq/pdisk_1.dat 2025-06-25T14:50:18.848677Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:50:18.848773Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:50:18.850375Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:50:18.895473Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:50:18.902382Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519899402686443229:2080] 1750863018163549 != 1750863018163552 TServer::EnableGrpc on GrpcPort 64029, node 1 2025-06-25T14:50:19.084228Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:50:19.084254Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:50:19.084261Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:50:19.084366Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:50:19.165151Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:18901 TClient is connected to server localhost:18901 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:50:20.062196Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:50:20.074325Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:50:22.220478Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899419866313053:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:22.220590Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:22.220973Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899419866313065:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:22.229743Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:50:22.245593Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899419866313067:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:50:22.329368Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899419866313120:2337] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:50:22.743054Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T14:50:22.978960Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519899419866313350:2317];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:50:22.979185Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519899419866313350:2317];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:50:22.979410Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519899419866313350:2317];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:50:22.979511Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519899419866313350:2317];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:50:22.979592Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519899419866313350:2317];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:50:22.979704Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519899419866313350:2317];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:50:22.979796Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519899419866313350:2317];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:50:22.979901Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519899419866313350:2317];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:50:22.979985Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519899419866313350:2317];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:50:22.980074Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519899419866313350:2317];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:50:22.980157Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519899419866313350:2317];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:50:22.987621Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519899419866313390:2322];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:50:22.992540Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519899419866313390:2322];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:50:22.992731Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519899419866313390:2322];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:50:22.992862Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519899419866313390:2322];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:50:22.992994Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519899419866313390:2322];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:50:22.993110Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519899419866313390:2322];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:50:22.993225Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519899419866313390:2322];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:50:22.993350Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519899419866313390:2322];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:50:22.993468Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[1:7519899419866313390:2322];tablet_id=72075186224037901;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunk ... :Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:31.133728Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039390;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:31.137674Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039349;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:31.138277Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039307;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:31.138759Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039390;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:31.139327Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039386;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:31.143636Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039307;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:31.144353Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039386;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:31.144738Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039361;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:31.144944Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039353;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:31.151809Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039353;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:31.152368Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039361;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:31.152532Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039213;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:31.153057Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039301;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:31.157664Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039213;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:31.158262Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039327;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:31.158354Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039301;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:31.159034Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039297;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:31.163005Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039327;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:31.163654Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039239;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:31.163964Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039297;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:31.164582Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039249;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:31.168685Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039239;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:31.169300Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224039277;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=63;result=not_found; 2025-06-25T14:51:31.169830Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039249;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:31.174354Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039277;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710714;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710714; 2025-06-25T14:51:31.279840Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyks4c2q0mfwxbe1k1jw5ppv", SessionId: ydb://session/3?node_id=1&id=YWJjZmY2NDMtMzg3OWRkMzYtMTI2ZThkYTEtYzkxMWYyYjc=, Slow query, duration: 26.103972s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "CREATE TABLE t1 (\n id1 Int32 NOT NULL,\n PRIMARY KEY (id1)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t2 (\n id2 Int64 NOT NULL,\n t1_id1 Int64 NOT NULL,\n -- random_field2 Int32\n PRIMARY KEY (id2)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n\nCREATE TABLE t3 (\n id3 Int16 NOT NULL,\n -- random_field3 Int32\n PRIMARY KEY (id3)\n) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 240);\n", parameters: 0b 2025-06-25T14:51:31.751248Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039094;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:51:31.751283Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224039392;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:51:31.752446Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038933;tx_state=TTxProgressTx::Execute;tx_current=281474976710716;tx_id=281474976710716;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710716; 2025-06-25T14:52:52.632140Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyks66xx9mdme1zw1tv135te", SessionId: ydb://session/3?node_id=1&id=YWJjZmY2NDMtMzg3OWRkMzYtMTI2ZThkYTEtYzkxMWYyYjc=, Slow query, duration: 47.193601s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "pragma TablePathPrefix = \"/Root/test/ds/\";\n\n-- NB: Subquerys\n\n$blabla = (\n\n select substring(cast(item.i_item_desc as string),0,30) itemdesc,item.i_item_sk item_sk,date_dim.d_date solddate\n\n from store_sales as store_sales\n\n cross join date_dim as date_dim\n\n cross join item as item\n\n where ss_sold_date_sk = d_date_sk\n\n and ss_item_sk = i_item_sk\n\n and d_year in (2000,2000+1,2000+2,2000+3)\n\n);\n\n$frequent_ss_items =\n\n (select itemdesc, item_sk, solddate,count(*) cnt\n\n from $blabla\n\n group by itemdesc,item_sk,solddate\n\n having count(*) >4);\n\n\n\n$max_store_sales =\n\n (select max(csales) tpcds_cmax\n\n from (select customer.c_customer_sk c_customer_sk,sum(ss_quantity*ss_sales_price) csales\n\n from store_sales as store_sales\n\n cross join customer as customer\n\n cross join date_dim as date_dim\n\n where ss_customer_sk = c_customer_sk\n\n and ss_sold_date_sk = d_date_sk\n\n and d_year in (2000,2000+1,2000+2,2000+3)\n\n group by customer.c_customer_sk) x);\n\n\n\n$best_ss_customer =\n\n (select customer.c_customer_sk c_customer_sk,sum(ss_quantity*ss_sales_price) ssales\n\n from store_sales as store_sales\n\n cross join customer as customer\n\n where ss_customer_sk = c_customer_sk\n\n group by customer.c_customer_sk\n\n having sum(ss_quantity*ss_sales_price) > (95/100.0) * $max_store_sales);\n\n\n\n-- start query 1 in stream 0 using template query23.tpl and seed 2031708268\n\nselect sum(sales)\n\n from (select cs_quantity*cs_list_price sales\n\n from catalog_sales as catalog_sales\n\n cross join date_dim as date_dim\n\n where d_year = 2000\n\n and d_moy = 3\n\n and cs_sold_date_sk = d_date_sk\n\n and cs_item_sk in (select item_sk from $frequent_ss_items)\n\n and cs_bill_customer_sk in (select c_customer_sk from $best_ss_customer)\n\n union all\n\n select ws_quantity*ws_list_price sales\n\n from web_sales as web_sales\n\n cross join date_dim as date_dim\n\n where d_year = 2000\n\n and d_moy = 3\n\n and ws_sold_date_sk = d_date_sk\n\n and ws_item_sk in (select item_sk from $frequent_ss_items)\n\n and ws_bill_customer_sk in (select c_customer_sk from $best_ss_customer)) y\n\n limit 100;\n\n\n\nselect c_last_name,c_first_name,sales\n\n from (select customer.c_last_name c_last_name,customer.c_first_name c_first_name,sum(cs_quantity*cs_list_price) sales\n\n from catalog_sales as catalog_sales\n\n cross join customer as customer\n\n cross join date_dim as date_dim\n\n where d_year = 2000\n\n and d_moy = 3\n\n and cs_sold_date_sk = d_date_sk\n\n and cs_item_sk in (select item_sk from $frequent_ss_items)\n\n and cs_bill_customer_sk in (select c_customer_sk from $best_ss_customer)\n\n and cs_bill_customer_sk = c_customer_sk\n\n group by customer.c_last_name,customer.c_first_name\n\n union all\n\n select customer.c_last_name c_last_name,customer.c_first_name c_first_name,sum(ws_quantity*ws_list_price) sales\n\n from web_sales as web_sales\n\n cross join customer as customer\n\n cross join date_dim as date_dim\n\n where d_year = 2000\n\n and d_moy = 3\n\n and ws_sold_date_sk = d_date_sk\n\n and ws_item_sk in (select item_sk from $frequent_ss_items)\n\n and ws_bill_customer_sk in (select c_customer_sk from $best_ss_customer)\n\n and ws_bill_customer_sk = c_customer_sk\n\n group by customer.c_last_name,customer.c_first_name) y\n\n order by c_last_name,c_first_name,sales\n\n limit 100;\n\n\n\n-- end query 1 in stream 0 using template query23.tpl", parameters: 0b >> TPersQueueTest::CheckACLForGrpcRead [GOOD] >> TPersQueueTest::CheckKillBalancer >> KqpSinkTx::OlapInteractive [GOOD] >> KqpNewEngine::ContainerRegistryCombiner >> KqpNotNullColumns::InsertFromSelect [GOOD] >> KqpNotNullColumns::FailedMultiEffects >> KqpNewEngine::DuplicatedResults [GOOD] >> KqpNewEngine::FlatmapLambdaMutiusedConnections >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-ModifyUser-33 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-ModifyUser-34 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/tx/unittest >> KqpLocksTricky::TestNoLocksIssue+withSink [GOOD] Test command err: Trying to start YDB, gRPC: 14064, MsgBus: 64632 2025-06-25T14:52:35.334011Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899991687626888:2229];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:35.334679Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0011e9/r3tmp/tmpdV72ZO/pdisk_1.dat 2025-06-25T14:52:35.641572Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 14064, node 1 2025-06-25T14:52:35.749021Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:52:35.749138Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:52:35.760721Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:52:35.803489Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:52:35.803517Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:52:35.803523Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:52:35.803611Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:64632 TClient is connected to server localhost:64632 2025-06-25T14:52:36.352455Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:52:36.490857Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:52:36.513932Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:52:36.531964Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:52:36.673783Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:36.835655Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:36.930863Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:38.675914Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900004572530224:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:38.676020Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:38.978155Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:39.015619Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:39.091829Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:39.124390Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:39.198339Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:39.233194Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:39.279282Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:39.348485Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900008867498185:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:39.348581Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:39.348816Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900008867498190:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:39.353032Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:52:39.364235Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519900008867498192:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:52:39.468126Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519900008867498245:3420] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:52:40.329390Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519899991687626888:2229];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:40.329466Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:52:41.197313Z node 1 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_LOCKS_BROKEN;details=Operation is aborting because locks are not valid;tx_id=281474976715676; 2025-06-25T14:52:41.213317Z node 1 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:802: SelfId: [1:7519900013162465842:2474], Table: `/Root/Test` ([72057594046644480:9:1]), SessionActorId: [1:7519900013162465810:2474]Got LOCKS BROKEN for table `/Root/Test`. ShardID=72075186224037914, Sink=[1:7519900013162465842:2474].{
: Error: ... on=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0011e9/r3tmp/tmpStkanl/pdisk_1.dat 2025-06-25T14:52:53.177016Z node 3 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 3 Type# 268639257 TServer::EnableGrpc on GrpcPort 19871, node 3 2025-06-25T14:52:53.328233Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:52:53.329318Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:52:53.329377Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:52:53.329435Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:52:53.329921Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:52:53.330248Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:33:2080] 1750863169264388 != 1750863169264391 2025-06-25T14:52:53.377018Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:52:53.377182Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:52:53.388665Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:20269 TClient is connected to server localhost:20269 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:52:53.870272Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:52:53.881681Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:52:54.042859Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:54.351011Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:54.787384Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:55.074819Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:55.691534Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:1683:3281], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:55.691732Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:55.717497Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:55.932066Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:56.198619Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:56.458927Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:56.772009Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:57.084529Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:57.366546Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:57.681137Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:2354:3776], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:57.681262Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:57.681621Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:2359:3781], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:57.687879Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:52:57.867729Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:2361:3783], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:52:57.922333Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:2419:3822] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:52:59.134521Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:59.409135Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:59.771758Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) >> KqpRanges::UpdateMulti >> KqpNotNullColumns::JoinLeftTableWithNotNullPk-StreamLookup [GOOD] >> KqpNotNullColumns::JoinRightTableWithNotNullColumns+StreamLookup >> TPersQueueTest::SchemeOperationFirstClassCitizen [GOOD] >> TPersQueueTest::SchemeOperationsCheckPropValues >> KqpMergeCn::TopSortBy_Int32_Limit3 [GOOD] >> KqpMergeCn::TopSortBy_Float_Limit4 >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-ModifyUser-45 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-ModifyUser-46 >> KqpLocksTricky::TestNoLocksIssueInteractiveTx+withSink [GOOD] >> KqpLocksTricky::TestNoLocksIssueInteractiveTx-withSink >> KqpRanges::WhereInSubquery ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/tx/unittest >> KqpSinkTx::OlapInteractive [GOOD] Test command err: Trying to start YDB, gRPC: 27715, MsgBus: 27326 2025-06-25T14:52:32.408443Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899981376064506:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:32.419141Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001201/r3tmp/tmpVuc3LW/pdisk_1.dat 2025-06-25T14:52:32.872433Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519899981376064488:2080] 1750863152393900 != 1750863152393903 2025-06-25T14:52:32.887939Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:52:32.912035Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:52:32.912134Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 27715, node 1 2025-06-25T14:52:32.916590Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:52:32.953886Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:52:32.953909Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:52:32.953920Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:52:32.954027Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27326 TClient is connected to server localhost:27326 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-25T14:52:33.410879Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:52:33.536880Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:52:35.470913Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899994260967004:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:35.471161Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:35.471469Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899994260967033:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:35.475564Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:52:35.490337Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899994260967035:2295], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:52:35.582470Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899994260967086:2335] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:52:35.837098Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T14:52:36.000612Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519899994260967255:2306];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:52:36.000837Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519899994260967255:2306];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:52:36.001093Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519899994260967255:2306];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:52:36.001208Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519899994260967255:2306];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:52:36.001314Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519899994260967255:2306];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:52:36.001420Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519899994260967255:2306];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:52:36.001517Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519899994260967255:2306];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:52:36.001654Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519899994260967255:2306];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:52:36.001760Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519899994260967255:2306];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:52:36.001865Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519899994260967255:2306];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:52:36.001970Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519899994260967255:2306];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:52:36.005023Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-25T14:52:36.005078Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-25T14:52:36.005170Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-25T14:52:36.005193Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-25T14:52:36.005387Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-25T14:52:36.005411Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-25T14:52:36.005509Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-25T14:52:36.005537Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-25T14:52:36.005575Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-25T14:52:36.005597Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-25T14:52:36.005788Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-25T14:52:36.005815Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_s ... 4037923;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715667; 2025-06-25T14:52:58.507393Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037903;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715667; 2025-06-25T14:52:58.507542Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037965;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715667; 2025-06-25T14:52:58.507590Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037938;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715667; 2025-06-25T14:52:58.507725Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037975;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715667; 2025-06-25T14:52:58.507771Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037907;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715667; 2025-06-25T14:52:58.507919Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037953;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715667; 2025-06-25T14:52:58.508045Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037919;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715667; 2025-06-25T14:52:58.508155Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037959;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715667; 2025-06-25T14:52:58.508282Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037951;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715667; 2025-06-25T14:52:58.509145Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037927;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715667; 2025-06-25T14:53:00.030554Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7382: Cannot get console configs 2025-06-25T14:53:00.030594Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:53:00.597053Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037971;self_id=[2:7519900053948947513:2384];ev=NActors::TEvents::TEvWakeup;fline=sync.h:19;event=tx_timeout;lock=281474976715665;tx_id=281474976715667;d=2.003916s; 2025-06-25T14:53:00.599358Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037971;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715667; 2025-06-25T14:53:00.600896Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037943;self_id=[2:7519900053948947728:2464];ev=NActors::TEvents::TEvWakeup;fline=sync.h:19;event=tx_timeout;lock=281474976715665;tx_id=281474976715667;d=2.002728s; 2025-06-25T14:53:00.602658Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037939;self_id=[2:7519900053948947595:2412];ev=NActors::TEvents::TEvWakeup;fline=sync.h:19;event=tx_timeout;lock=281474976715665;tx_id=281474976715667;d=2.001887s; 2025-06-25T14:53:00.603761Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037995;self_id=[2:7519900053948947495:2374];ev=NActors::TEvents::TEvWakeup;fline=sync.h:19;event=tx_timeout;lock=281474976715665;tx_id=281474976715667;d=2.001947s; 2025-06-25T14:53:00.604084Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037977;self_id=[2:7519900053948947529:2389];ev=NActors::TEvents::TEvWakeup;fline=sync.h:19;event=tx_timeout;lock=281474976715665;tx_id=281474976715667;d=2.001835s; 2025-06-25T14:53:00.606338Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037985;self_id=[2:7519900053948947490:2370];ev=NActors::TEvents::TEvWakeup;fline=sync.h:19;event=tx_timeout;lock=281474976715665;tx_id=281474976715667;d=2.001369s; 2025-06-25T14:53:00.606857Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037943;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715667; 2025-06-25T14:53:00.609120Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037939;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715667; 2025-06-25T14:53:00.609907Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037935;self_id=[2:7519900053948947596:2413];ev=NActors::TEvents::TEvWakeup;fline=sync.h:19;event=tx_timeout;lock=281474976715665;tx_id=281474976715667;d=2.004701s; 2025-06-25T14:53:00.610828Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037995;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715667; 2025-06-25T14:53:00.611033Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037977;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715667; 2025-06-25T14:53:00.611936Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037945;self_id=[2:7519900053948947658:2424];ev=NActors::TEvents::TEvWakeup;fline=sync.h:19;event=tx_timeout;lock=281474976715665;tx_id=281474976715667;d=2.003188s; 2025-06-25T14:53:00.614085Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037985;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715667; 2025-06-25T14:53:00.615561Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037909;self_id=[2:7519900053948947679:2443];ev=NActors::TEvents::TEvWakeup;fline=sync.h:19;event=tx_timeout;lock=281474976715665;tx_id=281474976715667;d=2.002659s; 2025-06-25T14:53:00.616062Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037987;self_id=[2:7519900053948947512:2383];ev=NActors::TEvents::TEvWakeup;fline=sync.h:19;event=tx_timeout;lock=281474976715665;tx_id=281474976715667;d=2.002605s; 2025-06-25T14:53:00.616231Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037979;self_id=[2:7519900053948947487:2367];ev=NActors::TEvents::TEvWakeup;fline=sync.h:19;event=tx_timeout;lock=281474976715665;tx_id=281474976715667;d=2.002622s; 2025-06-25T14:53:00.617826Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037935;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715667; 2025-06-25T14:53:00.618488Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037983;self_id=[2:7519900053948947501:2380];ev=NActors::TEvents::TEvWakeup;fline=sync.h:19;event=tx_timeout;lock=281474976715665;tx_id=281474976715667;d=2.004499s; 2025-06-25T14:53:00.621666Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037945;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715667; 2025-06-25T14:53:00.621794Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037973;self_id=[2:7519900053948947670:2434];ev=NActors::TEvents::TEvWakeup;fline=sync.h:19;event=tx_timeout;lock=281474976715665;tx_id=281474976715667;d=2.004108s; 2025-06-25T14:53:00.623059Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037989;self_id=[2:7519900053948947539:2394];ev=NActors::TEvents::TEvWakeup;fline=sync.h:19;event=tx_timeout;lock=281474976715665;tx_id=281474976715667;d=2.006161s; 2025-06-25T14:53:00.624813Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037929;self_id=[2:7519900053948947493:2372];ev=NActors::TEvents::TEvWakeup;fline=sync.h:19;event=tx_timeout;lock=281474976715665;tx_id=281474976715667;d=2.005540s; 2025-06-25T14:53:00.627455Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037941;self_id=[2:7519900053948947726:2463];ev=NActors::TEvents::TEvWakeup;fline=sync.h:19;event=tx_timeout;lock=281474976715665;tx_id=281474976715667;d=2.005279s; 2025-06-25T14:53:00.631565Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037979;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715667; 2025-06-25T14:53:00.631826Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037909;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715667; 2025-06-25T14:53:00.633774Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037987;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715667; 2025-06-25T14:53:00.644816Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037969;self_id=[2:7519900053948947479:2365];ev=NActors::TEvents::TEvWakeup;fline=sync.h:19;event=tx_timeout;lock=281474976715665;tx_id=281474976715667;d=2.012555s; 2025-06-25T14:53:00.644946Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037957;self_id=[2:7519900053948947600:2416];ev=NActors::TEvents::TEvWakeup;fline=sync.h:19;event=tx_timeout;lock=281474976715665;tx_id=281474976715667;d=2.014267s; 2025-06-25T14:53:00.646110Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037983;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715667; 2025-06-25T14:53:00.647393Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037973;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715667; 2025-06-25T14:53:00.647639Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037929;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715667; 2025-06-25T14:53:00.647644Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037989;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715667; 2025-06-25T14:53:00.647853Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037941;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715667; 2025-06-25T14:53:00.649365Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037957;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715667; 2025-06-25T14:53:00.649887Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037969;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715667; >> KqpNewEngine::Update+UseSink [GOOD] >> KqpNewEngine::StaleRO_IndexFollowers-EnableFollowers >> KqpSort::TopSortExprPk [GOOD] >> KqpSort::TopSortTableExpr >> KqpNewEngine::SelfJoin [GOOD] >> KqpNewEngine::ScalarFunctions >> KqpNewEngine::JoinPureUncomparableKeys [GOOD] >> KqpNewEngine::JoinWithPrecompute >> KqpSqlIn::CantRewrite >> KqpNewEngine::BlindWriteListParameter [GOOD] >> KqpNewEngine::BrokenLocksAtROTx >> KqpSort::ReverseFirstKeyOptimized >> KqpNotNullColumns::FailedMultiEffects [GOOD] >> KqpNotNullColumns::CreateIndexedTableWithDisabledNotNullDataColumns >> KqpNotNullColumns::SecondaryKeyWithNotNullColumn [GOOD] >> KqpNotNullColumns::SecondaryIndexWithNotNullDataColumn >> KqpNotNullColumns::AlterAddNotNullColumn [GOOD] >> KqpNotNullColumns::AlterAddNotNullColumnPg >> KqpSinkTx::OlapSnapshotROInteractive2 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-10 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-11 >> TPersQueueTest::DirectReadWrongGeneration [GOOD] >> TPersQueueTest::DirectReadStop >> KqpSort::ReverseRangeOptimized [GOOD] >> KqpSort::ReverseRangeLimitOptimized >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-DropUser-58 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-DropUser-59 >> KqpNewEngine::ContainerRegistryCombiner [GOOD] >> KqpNewEngine::DeferredEffects >> KqpNewEngine::LocksNoMutationsSharded [GOOD] >> KqpNewEngine::MultiEffects >> KqpNewEngine::PkRangeSelect4 [GOOD] >> KqpNewEngine::PruneEffectPartitions+UseSink >> KqpNewEngine::FlatmapLambdaMutiusedConnections [GOOD] >> KqpNewEngine::EmptyMapWithBroadcast ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/tx/unittest >> KqpSinkTx::OlapSnapshotROInteractive2 [GOOD] Test command err: Trying to start YDB, gRPC: 24137, MsgBus: 11039 2025-06-25T14:52:38.244914Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900007657161529:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:38.245002Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0011de/r3tmp/tmpYrgq30/pdisk_1.dat 2025-06-25T14:52:38.608499Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:52:38.625574Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:52:38.625658Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 24137, node 1 2025-06-25T14:52:38.644339Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:52:38.698486Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:52:38.698510Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:52:38.698515Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:52:38.698714Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11039 TClient is connected to server localhost:11039 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-25T14:52:39.252074Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:52:39.351355Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:52:39.369571Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:52:41.297158Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900020542064036:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:41.297186Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900020542064047:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:41.297306Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:41.300776Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:52:41.310067Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519900020542064050:2295], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:52:41.394466Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519900020542064101:2334] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:52:41.672245Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T14:52:41.820568Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519900020542064277:2306];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:52:41.820814Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519900020542064277:2306];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:52:41.821104Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519900020542064277:2306];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:52:41.821214Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519900020542064277:2306];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:52:41.821328Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519900020542064277:2306];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:52:41.821454Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519900020542064277:2306];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:52:41.821556Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519900020542064277:2306];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:52:41.821666Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519900020542064277:2306];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:52:41.821766Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519900020542064277:2306];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:52:41.821874Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519900020542064277:2306];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:52:41.821969Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519900020542064277:2306];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:52:41.843787Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519900020542064278:2307];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:52:41.843905Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519900020542064278:2307];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:52:41.844142Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519900020542064278:2307];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:52:41.844277Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519900020542064278:2307];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:52:41.844587Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519900020542064278:2307];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:52:41.844735Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519900020542064278:2307];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:52:41.844835Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519900020542064278:2307];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:52:41.845094Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519900020542064278:2307];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:52:41.845224Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519900020542064278:2307];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:52:41.845325Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519900020542064278:2307];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline= ... execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037896,72075186224037913,72075186224037969,72075186224037981,72075186224037993;receive=72075186224037897; 2025-06-25T14:53:04.237774Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037996;self_id=[2:7519900084699998883:2377];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037996;local_tx_no=39;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037896,72075186224037913,72075186224037969,72075186224037981,72075186224037993;receive=72075186224037897; 2025-06-25T14:53:04.237811Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037996;self_id=[2:7519900084699998883:2377];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037996;local_tx_no=40;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037896,72075186224037913,72075186224037969,72075186224037981,72075186224037993;receive=72075186224037897; 2025-06-25T14:53:04.237898Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037996;self_id=[2:7519900084699998883:2377];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037996;local_tx_no=42;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037913,72075186224037969,72075186224037981,72075186224037993;receive=72075186224037896; 2025-06-25T14:53:04.237936Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037996;self_id=[2:7519900084699998883:2377];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037996;local_tx_no=43;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037913,72075186224037969,72075186224037981,72075186224037993;receive=72075186224037896; 2025-06-25T14:53:04.237974Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037996;self_id=[2:7519900084699998883:2377];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037996;local_tx_no=44;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037913,72075186224037969,72075186224037981,72075186224037993;receive=72075186224037896; 2025-06-25T14:53:04.238013Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037996;self_id=[2:7519900084699998883:2377];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037996;local_tx_no=45;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037913,72075186224037969,72075186224037981,72075186224037993;receive=72075186224037896; 2025-06-25T14:53:04.238053Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037996;self_id=[2:7519900084699998883:2377];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037996;local_tx_no=46;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037913,72075186224037969,72075186224037981,72075186224037993;receive=72075186224037896; 2025-06-25T14:53:04.238092Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037996;self_id=[2:7519900084699998883:2377];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037996;local_tx_no=47;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037913,72075186224037969,72075186224037981,72075186224037993;receive=72075186224037896; 2025-06-25T14:53:04.238130Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037996;self_id=[2:7519900084699998883:2377];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037996;local_tx_no=48;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037913,72075186224037969,72075186224037981,72075186224037993;receive=72075186224037896; 2025-06-25T14:53:04.238220Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037996;self_id=[2:7519900084699998883:2377];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037996;local_tx_no=50;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037913,72075186224037969,72075186224037981;receive=72075186224037993; 2025-06-25T14:53:04.238276Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037996;self_id=[2:7519900084699998883:2377];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037996;local_tx_no=51;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037913,72075186224037969,72075186224037981;receive=72075186224037993; 2025-06-25T14:53:04.238340Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037996;self_id=[2:7519900084699998883:2377];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037996;local_tx_no=52;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037913,72075186224037969,72075186224037981;receive=72075186224037993; 2025-06-25T14:53:04.238390Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037996;self_id=[2:7519900084699998883:2377];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037996;local_tx_no=53;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037913,72075186224037969,72075186224037981;receive=72075186224037993; 2025-06-25T14:53:04.238432Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037996;self_id=[2:7519900084699998883:2377];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037996;local_tx_no=54;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037913,72075186224037969,72075186224037981;receive=72075186224037993; 2025-06-25T14:53:04.238481Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037996;self_id=[2:7519900084699998883:2377];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037996;local_tx_no=55;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037913,72075186224037969,72075186224037981;receive=72075186224037993; 2025-06-25T14:53:04.238532Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037996;self_id=[2:7519900084699998883:2377];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037996;local_tx_no=56;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037913,72075186224037969,72075186224037981;receive=72075186224037993; 2025-06-25T14:53:04.238690Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037996;self_id=[2:7519900084699998883:2377];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037996;local_tx_no=58;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037969,72075186224037981;receive=72075186224037913; 2025-06-25T14:53:04.238746Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037996;self_id=[2:7519900084699998883:2377];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037996;local_tx_no=59;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037969,72075186224037981;receive=72075186224037913; 2025-06-25T14:53:04.238817Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037996;self_id=[2:7519900084699998883:2377];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037996;local_tx_no=60;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037969,72075186224037981;receive=72075186224037913; 2025-06-25T14:53:04.238901Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037996;self_id=[2:7519900084699998883:2377];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037996;local_tx_no=61;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037969,72075186224037981;receive=72075186224037913; 2025-06-25T14:53:04.238945Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037996;self_id=[2:7519900084699998883:2377];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037996;local_tx_no=62;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037969,72075186224037981;receive=72075186224037913; 2025-06-25T14:53:04.238981Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037996;self_id=[2:7519900084699998883:2377];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037996;local_tx_no=63;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037969,72075186224037981;receive=72075186224037913; 2025-06-25T14:53:04.239018Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037996;self_id=[2:7519900084699998883:2377];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037996;local_tx_no=64;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037969,72075186224037981;receive=72075186224037913; 2025-06-25T14:53:04.239111Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037996;self_id=[2:7519900084699998883:2377];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037996;local_tx_no=66;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037969;receive=72075186224037981; 2025-06-25T14:53:04.239146Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037996;self_id=[2:7519900084699998883:2377];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037996;local_tx_no=67;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037969;receive=72075186224037981; 2025-06-25T14:53:04.239180Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037996;self_id=[2:7519900084699998883:2377];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037996;local_tx_no=68;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037969;receive=72075186224037981; 2025-06-25T14:53:04.239225Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037996;self_id=[2:7519900084699998883:2377];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037996;local_tx_no=69;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037969;receive=72075186224037981; 2025-06-25T14:53:04.239264Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037996;self_id=[2:7519900084699998883:2377];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037996;local_tx_no=70;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037969;receive=72075186224037981; 2025-06-25T14:53:04.239297Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037996;self_id=[2:7519900084699998883:2377];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037996;local_tx_no=71;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037969;receive=72075186224037981; 2025-06-25T14:53:04.239333Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037996;self_id=[2:7519900084699998883:2377];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037996;local_tx_no=72;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037969;receive=72075186224037981; 2025-06-25T14:53:04.241783Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037996;tx_state=TTxProgressTx::Execute;tx_current=281474976715664;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; 2025-06-25T14:53:05.671037Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037993;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715667; >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-ModifyUser-34 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-ModifyUser-35 >> TPersQueueTest::WhenTheTopicIsDeletedAfterDecompressingTheData_Compressed [GOOD] >> TPersQueueTest::WhenTheTopicIsDeletedAfterDecompressingTheData_Uncompressed >> KqpRanges::UpdateMulti [GOOD] >> KqpRanges::UpdateWhereInBigLiteralList >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-ModifyUser-46 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-ModifyUser-47 >> TFstClassSrcIdPQTest::NoMapping [GOOD] >> TFstClassSrcIdPQTest::ProperPartitionSelected >> KqpNotNullColumns::JoinRightTableWithNotNullColumns+StreamLookup [GOOD] >> KqpNotNullColumns::JoinRightTableWithNotNullColumns-StreamLookup >> KqpMergeCn::TopSortBy_Float_Limit4 [GOOD] >> KqpMergeCn::TopSortBy_String_Limit3 >> KqpRanges::WhereInSubquery [GOOD] >> KqpRanges::UpdateWhereInNoFullScan+UseSink >> KqpNotNullColumns::CreateIndexedTableWithDisabledNotNullDataColumns [GOOD] >> KqpNotNullColumns::Describe >> KqpReturning::ReturningWorksIndexedDelete+QueryService >> KqpSort::ReverseFirstKeyOptimized [GOOD] >> KqpSort::ReverseLimitOptimized >> KqpNewEngine::BrokenLocksAtROTx [GOOD] >> KqpNewEngine::BrokenLocksAtROTxSharded >> KqpNamedExpressions::NamedExpressionRandomUpsertIndex-UseSink-UseDataQuery [GOOD] >> KqpNamedExpressions::NamedExpressionRandomUpsertIndex+UseSink-UseDataQuery >> TopicService::ThereAreGapsInTheOffsetRanges [GOOD] >> KqpNotNullColumns::SecondaryIndexWithNotNullDataColumn [GOOD] >> KqpNotNullColumns::SecondaryIndexWithNotNullDataColumnPg >> KqpNewEngine::JoinWithPrecompute [GOOD] >> KqpNewEngine::JoinProjectMulti >> KqpSort::TopSortTableExpr [GOOD] >> KqpSort::TopSortTableExprOffset >> KqpSqlIn::CantRewrite [GOOD] >> KqpSqlIn::KeySuffix >> KqpNotNullColumns::AlterAddNotNullColumnPg [GOOD] >> KqpNotNullColumns::AlterDropNotNullColumn >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-11 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-12 >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-DropUser-59 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-DropUser-60 >> TPersQueueTest::SetupWriteSession [GOOD] >> TPersQueueTest::StoreNoMoreThanXSourceIDs >> KqpNewEngine::DeferredEffects [GOOD] >> KqpNewEngine::Delete+UseSink >> TCdcStreamTests::MeteringServerless [GOOD] >> TCdcStreamTests::MeteringDedicated >> KqpSort::ReverseRangeLimitOptimized [GOOD] >> KqpSort::TopParameter >> KqpNewEngine::PruneEffectPartitions+UseSink [GOOD] >> KqpNewEngine::PrecomputeKey >> TPersQueueCommonTest::TestWriteWithRateLimiterWithUserPayloadRateLimit [GOOD] >> TPersQueueCommonTest::TestLimiterLimitsWithBlobsRateLimit >> KqpNewEngine::MultiEffects [GOOD] >> KqpNewEngine::MultiEffectsOnSameTable >> TopicService::OnePartitionAndNoGapsInTheOffsets >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-ModifyUser-35 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-ModifyUser-36 >> KqpNewEngine::EmptyMapWithBroadcast [GOOD] >> KqpNewEngine::FlatMapLambdaInnerPrecompute >> KqpNotNullColumns::Describe [GOOD] >> KqpNotNullColumns::CreateTableWithNotNullColumns >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-ModifyUser-47 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-ModifyUser-48 >> TPersQueueTest::WhenTheTopicIsDeletedBeforeDataIsDecompressed_Compressed [GOOD] >> TPersQueueTest::WhenTheTopicIsDeletedAfterReadingTheData_Compressed >> KqpRanges::UpdateWhereInBigLiteralList [GOOD] >> KqpRanges::UpdateWhereInBigLiteralListPrefix >> KqpMergeCn::TopSortBy_String_Limit3 [GOOD] >> KqpMergeCn::TopSortBy_Utf8_Limit2 >> TPersQueueTest::PreferredCluster_EnabledRemotePreferredClusterAndCloseClientSessionWithEnabledRemotePreferredClusterDelaySec_SessionDiesOnlyAfterDelay [GOOD] >> TPersQueueTest::PreferredCluster_NonExistentPreferredCluster_SessionDiesOnlyAfterDelay >> KqpRanges::UpdateWhereInNoFullScan+UseSink [GOOD] >> KqpRanges::UpdateWhereInNoFullScan-UseSink >> KqpLocksTricky::TestNoLocksIssueInteractiveTx-withSink [GOOD] >> KqpNotNullColumns::JoinRightTableWithNotNullColumns-StreamLookup [GOOD] >> KqpNotNullColumns::OptionalParametersDataQuery >> KqpSort::ReverseLimitOptimized [GOOD] >> KqpSort::ReverseEightShardOptimized >> KqpNotNullColumns::AlterDropNotNullColumn [GOOD] >> KqpNotNullColumns::AlterAddIndex >> KqpNewEngine::BrokenLocksAtROTxSharded [GOOD] >> KqpNewEngine::BrokenLocksOnUpdate >> KqpReturning::ReturningWorksIndexedDelete+QueryService [GOOD] >> KqpReturning::ReturningWorksIndexedDelete-QueryService >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-12 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-13 >> TPersQueueTest::SchemeOperationsCheckPropValues [GOOD] >> TPersQueueTest::ReadRuleServiceType >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-DropUser-60 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-DropUser-61 >> KqpNewEngine::Delete+UseSink [GOOD] >> KqpNewEngine::Delete-UseSink >> KqpNotNullColumns::SecondaryIndexWithNotNullDataColumnPg [GOOD] >> KqpNewEngine::JoinProjectMulti [GOOD] >> KqpNewEngine::JoinMultiConsumer >> KqpNotNullColumns::CreateTableWithNotNullColumns [GOOD] >> KqpSort::TopSortTableExprOffset [GOOD] >> KqpSort::TopSortResults >> TPersQueueTest::DirectReadStop [GOOD] >> TPersQueueTest::DirectReadCleanCache >> KqpNewEngine::ScalarFunctions [GOOD] >> KqpNewEngine::ScalarMultiUsage >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-ModifyUser-36 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-ModifyUser-37 >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-ModifyUser-48 [GOOD] >> SchemeReqAdminAccessInTenant::ClusterAdminCanAdministerTenant ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/opt/unittest >> KqpNotNullColumns::SecondaryIndexWithNotNullDataColumnPg [GOOD] Test command err: Trying to start YDB, gRPC: 30543, MsgBus: 14927 2025-06-25T14:52:43.919679Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900025441556248:2234];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:43.919756Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000a9b/r3tmp/tmp1gd7vs/pdisk_1.dat 2025-06-25T14:52:44.292736Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:52:44.292831Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:52:44.298003Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519900025441556035:2080] 1750863163908371 != 1750863163908374 2025-06-25T14:52:44.317070Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:52:44.322154Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 30543, node 1 2025-06-25T14:52:44.515271Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:52:44.515292Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:52:44.515299Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:52:44.515413Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14927 2025-06-25T14:52:44.919168Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:14927 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:52:45.354517Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:52:45.389139Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:52:46.998525Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900038326458563:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:46.998624Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:47.601755Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:47.737135Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900042621425962:2301], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:47.737273Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:47.737456Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900042621425967:2304], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:47.744551Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:52:47.757653Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710659, at schemeshard: 72057594046644480 2025-06-25T14:52:47.758734Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519900042621425969:2305], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-25T14:52:47.842770Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519900042621426020:2395] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:52:48.276181Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519900046916393363:2318], status: PRECONDITION_FAILED, issues:
: Error: Type annotation, code: 1030
:1:14: Error: At function: KiWriteTable!
:1:14: Error: Missing key column in input: Key for table: /Root/TestReplaceNotNullPk, code: 2029 2025-06-25T14:52:48.277295Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=ZmE1ZDdhZTctY2RlYmMxY2ItNjNmOWViN2QtMWU1ZWZhYzc=, ActorId: [1:7519900038326458559:2290], ActorState: ExecuteState, TraceId: 01jyks7gqx5kf1w91e93s437ht, ReplyQueryCompileError, status PRECONDITION_FAILED remove tx with tx_id: 2025-06-25T14:52:48.308900Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519900046916393372:2322], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:1:14: Error: At function: KiWriteTable!
:1:49: Error: Failed to convert type: Struct<'Key':Null,'Value':String> to Struct<'Key':Uint64,'Value':String?>
:1:49: Error: Failed to convert 'Key': Null to Uint64
:1:49: Error: Failed to convert input columns types to scheme types, code: 2031 2025-06-25T14:52:48.309219Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=ZmE1ZDdhZTctY2RlYmMxY2ItNjNmOWViN2QtMWU1ZWZhYzc=, ActorId: [1:7519900038326458559:2290], ActorState: ExecuteState, TraceId: 01jyks7gs41m446v1h7rexkk10, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: Trying to start YDB, gRPC: 10806, MsgBus: 63592 2025-06-25T14:52:49.084056Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519900053397762662:2057];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:49.086943Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000a9b/r3tmp/tmpbMD7a3/pdisk_1.dat 2025-06-25T14:52:49.253794Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:52:49.255157Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519900053397762646:2080] 1750863169080485 != 1750863169080488 TServer::EnableGrpc on GrpcPort 10806, node 2 2025-06-25T14:52:49.272922Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:52:49.273027Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:52:49.275344Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:52:49.320374Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:52:49.320398Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:52:49.320406Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:52:49.320522Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:63592 TClient is connected to server localhost:63592 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 202 ... 6644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:53:14.538680Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:53:14.545399Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:53:14.664566Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:53:17.365756Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519900173172174129:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:17.365851Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:17.400019Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:17.506516Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519900173172174278:2304], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:17.506622Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:17.506824Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519900173172174283:2307], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:17.512915Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:53:17.526815Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7519900173172174285:2308], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-25T14:53:17.615883Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7519900173172174336:2427] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:53:18.659034Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[7:7519900155992304345:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:53:18.659111Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:53:18.690788Z node 7 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:678: SelfId: [7:7519900177467141781:2343], TxId: 281474976710663, task: 1. Ctx: { TraceId : 01jyks8e1x3vh8arqn2rcdge9z. SessionId : ydb://session/3?node_id=7&id=ZDlhNDU3NzktOTk3ZThkOWItNGUyMWMyZDAtNmQ1MGMzZjY=. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. InternalError: BAD_REQUEST KIKIMR_BAD_COLUMN_TYPE: {
: Error: Tried to insert NULL value into NOT NULL column: Index1, code: 2031 }. 2025-06-25T14:53:18.691255Z node 7 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [7:7519900177467141782:2344], TxId: 281474976710663, task: 2. Ctx: { CustomerSuppliedId : . TraceId : 01jyks8e1x3vh8arqn2rcdge9z. SessionId : ydb://session/3?node_id=7&id=ZDlhNDU3NzktOTk3ZThkOWItNGUyMWMyZDAtNmQ1MGMzZjY=. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Handle abort execution event from: [7:7519900177467141776:2290], status: BAD_REQUEST, reason: {
: Error: Terminate execution } 2025-06-25T14:53:18.691330Z node 7 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [7:7519900177467141783:2345], TxId: 281474976710663, task: 3. Ctx: { TraceId : 01jyks8e1x3vh8arqn2rcdge9z. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=7&id=ZDlhNDU3NzktOTk3ZThkOWItNGUyMWMyZDAtNmQ1MGMzZjY=. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Handle abort execution event from: [7:7519900177467141776:2290], status: BAD_REQUEST, reason: {
: Error: Terminate execution } 2025-06-25T14:53:18.691562Z node 7 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [7:7519900177467141784:2346], TxId: 281474976710663, task: 4. Ctx: { CustomerSuppliedId : . TraceId : 01jyks8e1x3vh8arqn2rcdge9z. SessionId : ydb://session/3?node_id=7&id=ZDlhNDU3NzktOTk3ZThkOWItNGUyMWMyZDAtNmQ1MGMzZjY=. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Handle abort execution event from: [7:7519900177467141776:2290], status: BAD_REQUEST, reason: {
: Error: Terminate execution } 2025-06-25T14:53:18.691867Z node 7 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=7&id=ZDlhNDU3NzktOTk3ZThkOWItNGUyMWMyZDAtNmQ1MGMzZjY=, ActorId: [7:7519900173172174126:2290], ActorState: ExecuteState, TraceId: 01jyks8e1x3vh8arqn2rcdge9z, Create QueryResponse for error on request, msg: 2025-06-25T14:53:18.779469Z node 7 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [7:7519900177467141810:2350], status: BAD_REQUEST, issues:
: Error: Type annotation, code: 1030
:1:13: Error: At function: KiWriteTable!
:1:13: Error: Missing not null column in input: Index1. All not null columns should be initialized, code: 2032 2025-06-25T14:53:18.782094Z node 7 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=7&id=ZDlhNDU3NzktOTk3ZThkOWItNGUyMWMyZDAtNmQ1MGMzZjY=, ActorId: [7:7519900173172174126:2290], ActorState: ExecuteState, TraceId: 01jyks8eh5c6n5hnn12ja29bv5, ReplyQueryCompileError, status BAD_REQUEST remove tx with tx_id: 2025-06-25T14:53:18.809453Z node 7 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [7:7519900177467141829:2358], status: BAD_REQUEST, issues:
: Error: Type annotation, code: 1030
:1:13: Error: At function: KiWriteTable!
:1:13: Error: Missing not null column in input: Index1. All not null columns should be initialized, code: 2032 2025-06-25T14:53:18.811707Z node 7 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=7&id=ZDlhNDU3NzktOTk3ZThkOWItNGUyMWMyZDAtNmQ1MGMzZjY=, ActorId: [7:7519900173172174126:2290], ActorState: ExecuteState, TraceId: 01jyks8ej6bkhsswv9r1m221kn, ReplyQueryCompileError, status BAD_REQUEST remove tx with tx_id: 2025-06-25T14:53:18.836917Z node 7 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [7:7519900177467141849:2366], status: BAD_REQUEST, issues:
: Error: Type annotation, code: 1030
:1:14: Error: At function: KiWriteTable!
:1:14: Error: Missing not null column in input: Index1. All not null columns should be initialized, code: 2032 2025-06-25T14:53:18.838911Z node 7 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=7&id=ZDlhNDU3NzktOTk3ZThkOWItNGUyMWMyZDAtNmQ1MGMzZjY=, ActorId: [7:7519900173172174126:2290], ActorState: ExecuteState, TraceId: 01jyks8ek26e1npacx0qmbmp4a, ReplyQueryCompileError, status BAD_REQUEST remove tx with tx_id: 2025-06-25T14:53:19.189289Z node 7 :KQP_EXECUTER ERROR: kqp_literal_executer.cpp:107: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: , Database: , DatabaseId: , SessionId: , CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TKqpLiteralExecuter, TKqpEnsure failed. 2025-06-25T14:53:19.216492Z node 7 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [7:7519900177467141868:2374], status: BAD_REQUEST, issues:
: Error: Execution, code: 1060
: Error: Tried to insert NULL value into NOT NULL column: Index1, code: 2031 2025-06-25T14:53:19.217841Z node 7 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=7&id=ZDlhNDU3NzktOTk3ZThkOWItNGUyMWMyZDAtNmQ1MGMzZjY=, ActorId: [7:7519900173172174126:2290], ActorState: ExecuteState, TraceId: 01jyks8ekz5zynmc9s6j8dqtj4, ReplyQueryCompileError, status BAD_REQUEST remove tx with tx_id: 2025-06-25T14:53:19.787872Z node 7 :KQP_EXECUTER ERROR: kqp_literal_executer.cpp:107: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: , Database: , DatabaseId: , SessionId: , CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TKqpLiteralExecuter, TKqpEnsure failed. 2025-06-25T14:53:19.792989Z node 7 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [7:7519900181762109185:2383], status: BAD_REQUEST, issues:
: Error: Execution, code: 1060
: Error: Tried to insert NULL value into NOT NULL column: Index1, code: 2031 2025-06-25T14:53:19.796581Z node 7 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=7&id=ZDlhNDU3NzktOTk3ZThkOWItNGUyMWMyZDAtNmQ1MGMzZjY=, ActorId: [7:7519900173172174126:2290], ActorState: ExecuteState, TraceId: 01jyks8ezsegw1yqfnrgcqdna9, ReplyQueryCompileError, status BAD_REQUEST remove tx with tx_id: 2025-06-25T14:53:20.424820Z node 7 :KQP_EXECUTER ERROR: kqp_literal_executer.cpp:107: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: , Database: , DatabaseId: , SessionId: , CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TKqpLiteralExecuter, TKqpEnsure failed. 2025-06-25T14:53:20.429689Z node 7 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [7:7519900181762109211:2395], status: BAD_REQUEST, issues:
: Error: Execution, code: 1060
: Error: Tried to insert NULL value into NOT NULL column: Index1, code: 2031 2025-06-25T14:53:20.429941Z node 7 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=7&id=ZDlhNDU3NzktOTk3ZThkOWItNGUyMWMyZDAtNmQ1MGMzZjY=, ActorId: [7:7519900173172174126:2290], ActorState: ExecuteState, TraceId: 01jyks8fjd49x3ce7y2gep8vny, ReplyQueryCompileError, status BAD_REQUEST remove tx with tx_id: ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/opt/unittest >> KqpNotNullColumns::CreateTableWithNotNullColumns [GOOD] Test command err: Trying to start YDB, gRPC: 16226, MsgBus: 17127 2025-06-25T14:52:50.657119Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900055898598992:2078];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:50.669719Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000a20/r3tmp/tmpLumk2R/pdisk_1.dat 2025-06-25T14:52:51.157582Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:52:51.175133Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:52:51.179081Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:52:51.181678Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 16226, node 1 2025-06-25T14:52:51.452894Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:52:51.452917Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:52:51.452924Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:52:51.453022Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:52:51.672529Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:17127 TClient is connected to server localhost:17127 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:52:52.267386Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:52:52.289096Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:52:54.053144Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900073078468768:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:54.053238Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:54.262423Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519900073078468789:2308] txid# 281474976710658, issues: { message: "It is not allowed to create not null data column: Value" severity: 1 } 2025-06-25T14:52:54.297185Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900073078468797:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:54.297251Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:54.311351Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) Trying to start YDB, gRPC: 25481, MsgBus: 21938 2025-06-25T14:52:55.051151Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519900077625676111:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:55.051192Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000a20/r3tmp/tmpJtMOcN/pdisk_1.dat 2025-06-25T14:52:55.275121Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:52:55.284333Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:52:55.284407Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:52:55.287682Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 25481, node 2 2025-06-25T14:52:55.360450Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:52:55.360482Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:52:55.360494Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:52:55.360622Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:21938 TClient is connected to server localhost:21938 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:52:55.787310Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:52:55.796802Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:52:56.062590Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:52:57.961423Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519900086215611313:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:57.961496Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:57.975722Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:58.022259Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519900090510578711:2300], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:58.022359Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:58.028436Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519900090510578716:2303], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:58.032770Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:52:58.043454Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519900090510578718:2304], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transactio ... Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:53:08.352227Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:53:08.365774Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:53:08.606591Z node 5 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:53:10.709492Z node 5 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [5:7519900143950749355:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:10.709596Z node 5 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:10.745018Z node 5 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [5:7519900143950749376:2302] txid# 281474976715658, issues: { message: "It is not allowed to create not null data column: Value" severity: 1 } 2025-06-25T14:53:10.762558Z node 5 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [5:7519900143950749384:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:10.762667Z node 5 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:10.787765Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) Trying to start YDB, gRPC: 11148, MsgBus: 15788 2025-06-25T14:53:11.578351Z node 6 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[6:7519900148291884861:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:53:11.578401Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000a20/r3tmp/tmpyq25K5/pdisk_1.dat 2025-06-25T14:53:11.706303Z node 6 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 11148, node 6 2025-06-25T14:53:11.729030Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:53:11.729132Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:53:11.730583Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:53:11.768977Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:53:11.769011Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:53:11.769022Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:53:11.769178Z node 6 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:15788 TClient is connected to server localhost:15788 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:53:12.433856Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:53:12.443069Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:53:12.607395Z node 6 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:53:15.368494Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7519900165471754648:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:15.368606Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:15.390082Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) Trying to start YDB, gRPC: 25179, MsgBus: 27015 2025-06-25T14:53:16.335781Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519900169424249352:2132];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:53:16.422761Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000a20/r3tmp/tmpVVU8j4/pdisk_1.dat 2025-06-25T14:53:16.570211Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:53:16.572468Z node 7 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [7:7519900169424249258:2080] 1750863196316147 != 1750863196316150 2025-06-25T14:53:16.583687Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:53:16.583779Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:53:16.589741Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 25179, node 7 2025-06-25T14:53:16.648006Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:53:16.648030Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:53:16.648045Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:53:16.648189Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27015 TClient is connected to server localhost:27015 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-25T14:53:17.289949Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:53:17.335236Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:53:20.499947Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) >> KqpSort::TopParameter [GOOD] >> KqpSort::TopParameterFilter >> KqpJoinOrder::TPCHEveryQueryWorks+ColumnStore [GOOD] >> KqpNewEngine::StaleRO_IndexFollowers-EnableFollowers [GOOD] >> KqpNewEngine::UnionAllPure >> KqpNewEngine::MultiEffectsOnSameTable [GOOD] >> KqpNewEngine::LookupColumns >> KqpSqlIn::KeySuffix [GOOD] >> KqpSqlIn::KeySuffix_NotPointPrefix >> KqpNewEngine::PrecomputeKey [GOOD] >> KqpNewEngine::PrimaryView >> KqpNewEngine::FlatMapLambdaInnerPrecompute [GOOD] >> KqpNewEngine::DqSourceSequentialLimit >> KqpNewEngine::PkRangeSelect1 >> KqpSort::ReverseEightShardOptimized [GOOD] >> KqpSort::PassLimit >> KqpSqlIn::SimpleKey >> KqpMergeCn::TopSortBy_Utf8_Limit2 [GOOD] >> KqpMergeCn::TopSortBy_Timestamp_Limit2 >> TFstClassSrcIdPQTest::ProperPartitionSelected [GOOD] >> TPQCompatTest::DiscoverTopics ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::TPCHEveryQueryWorks+ColumnStore [GOOD] Test command err: Trying to start YDB, gRPC: 9565, MsgBus: 11111 2025-06-25T14:47:14.844841Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898615950095548:2231];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:47:14.845026Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000e02/r3tmp/tmpBfgibh/pdisk_1.dat 2025-06-25T14:47:15.494181Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:47:15.494275Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:47:15.505844Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 9565, node 1 2025-06-25T14:47:15.665216Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:47:15.679504Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519898615950095351:2080] 1750862834765574 != 1750862834765577 2025-06-25T14:47:15.829094Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:47:15.949342Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:47:15.949363Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:47:15.949370Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:47:15.949478Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11111 TClient is connected to server localhost:11111 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:47:16.819545Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:47:16.844195Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:47:19.241950Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898637424932488:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:19.242031Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898637424932477:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:19.242304Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:47:19.246446Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:47:19.266659Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898637424932491:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:47:19.333253Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898637424932542:2337] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:47:19.829204Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519898615950095548:2231];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:47:19.829285Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:47:19.840269Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T14:47:20.116261Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519898637424932813:2316];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:47:20.116264Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519898637424932812:2315];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:47:20.116476Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519898637424932812:2315];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:47:20.116513Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519898637424932813:2316];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:47:20.116735Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519898637424932813:2316];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:47:20.116801Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519898637424932812:2315];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:47:20.116865Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519898637424932813:2316];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:47:20.116919Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519898637424932812:2315];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:47:20.116978Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519898637424932813:2316];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:47:20.117013Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519898637424932812:2315];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:47:20.117108Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519898637424932813:2316];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:47:20.117114Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519898637424932812:2315];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:47:20.117224Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519898637424932812:2315];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:47:20.117233Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519898637424932813:2316];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:47:20.117333Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519898637424932813:2316];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:47:20.117437Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519898637424932813:2316];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:47:20.117554Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519898637424932813:2316];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:47:20.117687Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519898637424932813:2316];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:47:20.117963Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7519898637424932812:2315];tablet ... ;result=not_found; 2025-06-25T14:47:26.019016Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038005;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710667;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710667; 2025-06-25T14:47:26.019520Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038006;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710667;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=13;result=not_found; 2025-06-25T14:47:26.019657Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038007;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710667;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710667; 2025-06-25T14:47:26.023964Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224038006;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710667;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710667; 2025-06-25T14:47:26.779998Z node 1 :KQP_YQL WARN: log.cpp:67: TraceId: 01jykrxp3c96ygy8vcg3drkc88, SessionId: CompileActor 2025-06-25 14:47:26.779 WARN ydb-core-kqp-ut-join(pid=431424, tid=0x00007FC42D086640) [KQP] kqp_opt_phy_olap_agg.cpp:48: Expected TCoMember callable to get column under aggregation. Got: Failed to render expression to pretty string: yql/essentials/ast/yql_expr.cpp:1973 BuildValueNode(): requirement ctx.AllowFreeArgs failed, message: Free arguments are not allowed 2025-06-25T14:47:30.423849Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7382: Cannot get console configs 2025-06-25T14:47:30.423881Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:47:50.029452Z node 1 :KQP_YQL WARN: log.cpp:67: TraceId: 01jykrycw8101wb2fv6qf3aewy, SessionId: CompileActor 2025-06-25 14:47:50.028 WARN ydb-core-kqp-ut-join(pid=431424, tid=0x00007FC42D086640) [KQP] kqp_opt_phy_olap_agg.cpp:48: Expected TCoMember callable to get column under aggregation. Got: Failed to render expression to pretty string: yql/essentials/ast/yql_expr.cpp:1973 BuildValueNode(): requirement ctx.AllowFreeArgs failed, message: Free arguments are not allowed 2025-06-25T14:48:09.150233Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jykryn1cb8c3r21pmjgep9qj", SessionId: ydb://session/3?node_id=1&id=ZWM5NWYzZGUtYzY2M2E5Ny1jMjQ2NmFhNS02NzJkMmNj, Slow query, duration: 11.409228s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "pragma warning(\"disable\", \"4527\");\n\n$z0 = 0;\n$z1_2 = 1.2;\n$z1_3 = 1.3;\n$z0_9 = 0.9;\n$z0_99 = 0.99;\n$z1_49 = 1.49;\n\n$z0_35 = 0;\n$z0_1_35 = 0.1;\n$z1_2_35 = 1.2;\n$z0_05_35 = 0.05;\n$z0_9_35 = 0.9;\n$z1_1_35 = 1.1;\n$z0_5_35 = 0.5;\n$z100_35 = 100.;\n$z0_0001_35 = 0.0001;\n$z7_35 = 7.;\n\n$z0_12 = 0.;\n$z1_12 = 1;\n$z0_0100001_12 = 0.0100001;\n$z0_06_12 = 0.06;\n$z0_2_12 = 0.2;\n\n$scale_factor = 1;\n\n$round = ($x, $y) -> { return Math::Round($x, $y); };\n$upscale = ($x) -> { return $x; };\n\n$todecimal = ($x, $p, $s) -> { return cast($x as double); };\n\n\n\n-- TPC-H/TPC-R National Market Share Query (Q8)\n-- TPC TPC-H Parameter Substitution (Version 2.17.2 build 0)\n-- using 1680793381 as a seed to the RNG\n\n$join1 = (\nselect\n l.l_extendedprice * ($z1_12 - l.l_discount) as volume,\n l.l_suppkey as l_suppkey,\n l.l_orderkey as l_orderkey\nfrom\n `/Root/part` as p\njoin\n `/Root/lineitem` as l\non\n p.p_partkey = l.l_partkey\nwhere\n p.p_type = 'ECONOMY ANODIZED STEEL'\n);\n$join2 = (\nselect\n j.volume as volume,\n j.l_orderkey as l_orderkey,\n s.s_nationkey as s_nationkey\nfrom\n $join1 as j\njoin\n `/Root/supplier` as s\non\n s.s_suppkey = j.l_suppkey\n);\n$join3 = (\nselect\n j.volume as volume,\n j.l_orderkey as l_orderkey,\n n.n_name as nation\nfrom\n $join2 as j\njoin\n `/Root/nation` as n\non\n n.n_nationkey = j.s_nationkey\n);\n$join4 = (\nselect\n j.volume as volume,\n j.nation as nation,\n DateTime::GetYear(cast(o.o_orderdate as Timestamp)) as o_year,\n o.o_custkey as o_custkey\nfrom\n $join3 as j\njoin\n `/Root/orders` as o\non\n o.o_orderkey = j.l_orderkey\nwhere o_orderdate between Date('1995-01-01') and Date('1996-12-31')\n);\n$join5 = (\nselect\n j.volume as volume,\n j.nation as nation,\n j.o_year as o_year,\n c.c_nationkey as c_nationkey\nfrom\n $join4 as j\njoin\n `/Root/customer` as c\non\n c.c_custkey = j.o_custkey\n);\n$join6 = (\nselect\n j.volume as volume,\n j.nation as nation,\n j.o_year as o_year,\n n.n_regionkey as n_regionkey\nfrom\n $join5 as j\njoin\n `/Root/nation` as n\non\n n.n_nationkey = j.c_nationkey\n);\n$join7 = (\nselect\n j.volume as volume,\n j.nation as nation,\n j.o_year as o_year\nfrom\n $join6 as j\njoin\n `/Root/region` as r\non\n r.r_regionkey = j.n_regionkey\nwhere\n r.r_name = 'AMERICA'\n);\n\nselect\n o_year,\n sum(case\n when nation = 'BRAZIL' then volume\n else $z0_12\n end) / sum(volume) as mkt_share\nfrom\n $join7 as all_nations\ngroup by\n o_year\norder by\n o_year;\n", parameters: 0b 2025-06-25T14:48:42.081558Z node 1 :KQP_YQL WARN: log.cpp:67: TraceId: 01jykrzz1sc8tfvzqvm0vtd7gw, SessionId: CompileActor 2025-06-25 14:48:42.081 WARN ydb-core-kqp-ut-join(pid=431424, tid=0x00007FC42C877640) [KQP] kqp_opt_phy_olap_agg.cpp:48: Expected TCoMember callable to get column under aggregation. Got: Failed to render expression to pretty string: yql/essentials/ast/yql_expr.cpp:1973 BuildValueNode(): requirement ctx.AllowFreeArgs failed, message: Free arguments are not allowed 2025-06-25T14:49:31.811527Z node 1 :KQP_YQL WARN: log.cpp:67: TraceId: 01jyks1g299pdy82fd65ebhd8x, SessionId: CompileActor 2025-06-25 14:49:31.811 WARN ydb-core-kqp-ut-join(pid=431424, tid=0x00007FC42D086640) [KQP] kqp_opt_phy_olap_agg.cpp:48: Expected TCoMember callable to get column under aggregation. Got: Failed to render expression to pretty string: yql/essentials/ast/yql_expr.cpp:1973 BuildValueNode(): requirement ctx.AllowFreeArgs failed, message: Free arguments are not allowed 2025-06-25T14:49:45.608063Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyks1ksfa9yy6vcvbmna4v2d", SessionId: ydb://session/3?node_id=1&id=ZWM5NWYzZGUtYzY2M2E5Ny1jMjQ2NmFhNS02NzJkMmNj, Slow query, duration: 10.839109s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "\n\n-- TPC-H/TPC-R Minimum Cost Supplier Query (Q2)\n-- TPC TPC-H Parameter Substitution (Version 2.17.2 build 0)\n-- using 1680793381 as a seed to the RNG\n\nselect\n s_acctbal,\n s_name,\n n_name,\n p_partkey,\n p_mfgr,\n s_address,\n s_phone,\n s_comment\nfrom\n `/Root/part`\n cross join `/Root/supplier`\n cross join `/Root/partsupp`\n cross join `/Root/nation`\n cross join `/Root/region`\n cross join (\n select\n `/Root/partsupp`.ps_partkey as sc_ps_partkey,\n min(ps_supplycost) as min_ps_supplycost\n from\n `/Root/partsupp`\n cross join `/Root/supplier`\n cross join `/Root/nation`\n cross join `/Root/region`\n where\n s_suppkey = ps_suppkey\n and s_nationkey = n_nationkey\n and n_regionkey = r_regionkey\n and r_name = 'AMERICA'\n group by `/Root/partsupp`.ps_partkey\n ) as min_ps_supplycosts\nwhere\n p_partkey = ps_partkey\n and s_suppkey = ps_suppkey\n and p_size = 10\n and p_type like '%COPPER'\n and s_nationkey = n_nationkey\n and n_regionkey = r_regionkey\n and r_name = 'AMERICA'\n and ps_supplycost = min_ps_supplycost\n and p_partkey = ps_partkey\norder by\n s_acctbal desc,\n n_name,\n s_name,\n p_partkey\nlimit 100;\n", parameters: 0b 2025-06-25T14:50:01.936116Z node 1 :KQP_YQL WARN: log.cpp:67: TraceId: 01jyks2dsp4hdjpqm6tcyrdz7b, SessionId: CompileActor 2025-06-25 14:50:01.935 WARN ydb-core-kqp-ut-join(pid=431424, tid=0x00007FC42D086640) [KQP] kqp_opt_phy_olap_agg.cpp:48: Expected TCoMember callable to get column under aggregation. Got: Failed to render expression to pretty string: yql/essentials/ast/yql_expr.cpp:1973 BuildValueNode(): requirement ctx.AllowFreeArgs failed, message: Free arguments are not allowed 2025-06-25T14:50:24.270605Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyks2rmg7sq32fn8rcpa60rg", SessionId: ydb://session/3?node_id=1&id=ZWM5NWYzZGUtYzY2M2E5Ny1jMjQ2NmFhNS02NzJkMmNj, Slow query, duration: 11.773451s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "\n\n-- TPC-H/TPC-R National Market Share Query (Q8)\n-- TPC TPC-H Parameter Substitution (Version 2.17.2 build 0)\n-- using 1680793381 as a seed to the RNG\n\nselect\n o_year,\n sum(case\n when nation = 'MOZAMBIQUE' then volume\n else 0\n end) / sum(volume) as mkt_share\nfrom\n (\n select\n DateTime::GetYear(o_orderdate) as o_year,\n l_extendedprice * (1 - l_discount) as volume,\n n2.n_name as nation\n from\n `/Root/part`\n cross join `/Root/supplier`\n cross join `/Root/lineitem`\n cross join `/Root/orders`\n cross join `/Root/customer`\n cross join `/Root/nation` n1\n cross join `/Root/nation` n2\n cross join `/Root/region`\n where\n p_partkey = l_partkey\n and s_suppkey = l_suppkey\n and l_orderkey = o_orderkey\n and o_custkey = c_custkey\n and c_nationkey = n1.n_nationkey\n and n1.n_regionkey = r_regionkey\n and r_name = 'AFRICA'\n and s_nationkey = n2.n_nationkey\n and o_orderdate between date('1995-01-01') and date('1996-12-31')\n and p_type = 'ECONOMY PLATED COPPER'\n ) as all_nations\ngroup by\n o_year\norder by\n o_year;\n\n", parameters: 0b 2025-06-25T14:50:56.329045Z node 1 :KQP_YQL WARN: log.cpp:67: TraceId: 01jyks42b6etktkx6x0qb9py6f, SessionId: CompileActor 2025-06-25 14:50:56.328 WARN ydb-core-kqp-ut-join(pid=431424, tid=0x00007FC42D086640) [KQP] kqp_opt_phy_olap_agg.cpp:48: Expected TCoMember callable to get column under aggregation. Got: Failed to render expression to pretty string: yql/essentials/ast/yql_expr.cpp:1973 BuildValueNode(): requirement ctx.AllowFreeArgs failed, message: Free arguments are not allowed 2025-06-25T14:51:36.196671Z node 1 :KQP_YQL WARN: log.cpp:67: TraceId: 01jyks59r5bgy5sk2meb3vgrzg, SessionId: CompileActor 2025-06-25 14:51:36.196 WARN ydb-core-kqp-ut-join(pid=431424, tid=0x00007FC42D086640) [KQP] kqp_opt_phy_olap_agg.cpp:48: Expected TCoMember callable to get column under aggregation. Got: Failed to render expression to pretty string: yql/essentials/ast/yql_expr.cpp:1973 BuildValueNode(): requirement ctx.AllowFreeArgs failed, message: Free arguments are not allowed 2025-06-25T14:51:57.600619Z node 1 :KQP_YQL WARN: log.cpp:67: TraceId: 01jyks5yxw2dkp8tn2d905xdwx, SessionId: CompileActor 2025-06-25 14:51:57.600 WARN ydb-core-kqp-ut-join(pid=431424, tid=0x00007FC42C877640) [KQP] kqp_opt_phy_olap_agg.cpp:48: Expected TCoMember callable to get column under aggregation. Got: Failed to render expression to pretty string: yql/essentials/ast/yql_expr.cpp:1973 BuildValueNode(): requirement ctx.AllowFreeArgs failed, message: Free arguments are not allowed 2025-06-25T14:52:41.187164Z node 1 :KQP_YQL WARN: log.cpp:67: TraceId: 01jyks78zh7458b6xcq3wffhej, SessionId: CompileActor 2025-06-25 14:52:41.186 WARN ydb-core-kqp-ut-join(pid=431424, tid=0x00007FC42D086640) [KQP] kqp_opt_phy_olap_agg.cpp:48: Expected TCoMember callable to get column under aggregation. Got: Failed to render expression to pretty string: yql/essentials/ast/yql_expr.cpp:1973 BuildValueNode(): requirement ctx.AllowFreeArgs failed, message: Free arguments are not allowed >> KqpNotNullColumns::OptionalParametersDataQuery [GOOD] >> KqpNotNullColumns::OptionalParametersScanQuery >> KqpRanges::UpdateWhereInBigLiteralListPrefix [GOOD] >> KqpRanges::UpdateWhereInMultipleUpdate >> TAsyncIndexTests::DropTableWithInflightChanges[TabletReboots] [GOOD] >> KqpRanges::UpdateWhereInNoFullScan-UseSink [GOOD] >> KqpRanges::UpdateWhereInWithNull >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-13 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-14 >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-DropUser-61 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-DropUser-62 >> KqpNewEngine::KeyColumnOrder >> KqpNewEngine::BrokenLocksOnUpdate [GOOD] >> KqpNewEngine::ComplexLookupLimit ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest >> TAsyncIndexTests::DropTableWithInflightChanges[TabletReboots] [GOOD] Test command err: =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:115:2144] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:115:2144] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:130:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:133:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [1:136:2157] sender: [1:138:2058] recipient: [1:115:2144] 2025-06-25T14:51:18.778445Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:51:18.778544Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:18.778595Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:51:18.778636Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:51:18.778683Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:51:18.778712Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:51:18.778765Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:18.778844Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:51:18.779561Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:51:18.779861Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:51:18.854007Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7732: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-25T14:51:18.854061Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:51:18.854775Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:178:2058] recipient: [1:15:2062] 2025-06-25T14:51:18.865364Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:51:18.868966Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:51:18.869123Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:51:18.875664Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:51:18.875872Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:51:18.876537Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:18.876762Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:51:18.879067Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:18.879247Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:51:18.880225Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:51:18.880289Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:18.880414Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:51:18.880461Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:51:18.880500Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:51:18.880609Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:217:2058] recipient: [1:215:2214] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:217:2058] recipient: [1:215:2214] Leader for TabletID 72057594037968897 is [1:221:2218] sender: [1:222:2058] recipient: [1:215:2214] 2025-06-25T14:51:18.886533Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:242:2058] recipient: [1:15:2062] 2025-06-25T14:51:18.996241Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:51:18.996455Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:18.996648Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:51:18.996689Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:51:18.996901Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:51:18.996975Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:51:18.999180Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:18.999356Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:51:18.999525Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:18.999602Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:51:18.999645Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:51:18.999678Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:51:19.001394Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:19.001448Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:51:19.001504Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:51:19.003013Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:19.003059Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:19.003116Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:19.003160Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:51:19.006551Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:51:19.008045Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:51:19.008206Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:136:2157] sender: [1:257:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:51:19.009134Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:19.009251Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 Media ... 709551615 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-25T14:53:25.602850Z node 114 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 1003 2025-06-25T14:53:25.602882Z node 114 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1003, pathId: [OwnerId: 72057594046678944, LocalPathId: 5], version: 18446744073709551615 2025-06-25T14:53:25.602918Z node 114 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 3 2025-06-25T14:53:25.602988Z node 114 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 1003, ready parts: 2/3, is published: true 2025-06-25T14:53:25.604164Z node 114 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1003:0, at schemeshard: 72057594046678944 2025-06-25T14:53:25.604217Z node 114 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_table.cpp:414: TDropTable TProposedDeletePart operationId: 1003:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:53:25.604479Z node 114 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove table for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-06-25T14:53:25.604595Z node 114 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1003:0 progress is 3/3 2025-06-25T14:53:25.604634Z node 114 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 1003 ready parts: 3/3 2025-06-25T14:53:25.604672Z node 114 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1003:0 progress is 3/3 2025-06-25T14:53:25.604703Z node 114 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 1003 ready parts: 3/3 2025-06-25T14:53:25.604738Z node 114 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 1003, ready parts: 3/3, is published: true 2025-06-25T14:53:25.604773Z node 114 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 1003 ready parts: 3/3 2025-06-25T14:53:25.604811Z node 114 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1003:0 2025-06-25T14:53:25.604842Z node 114 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 1003:0 2025-06-25T14:53:25.604937Z node 114 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-25T14:53:25.604974Z node 114 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1003:1 2025-06-25T14:53:25.604999Z node 114 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 1003:1 2025-06-25T14:53:25.605031Z node 114 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 2 2025-06-25T14:53:25.605057Z node 114 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1003:2 2025-06-25T14:53:25.605079Z node 114 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 1003:2 2025-06-25T14:53:25.605121Z node 114 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 2 2025-06-25T14:53:25.605760Z node 114 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 2025-06-25T14:53:25.605909Z node 114 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 2025-06-25T14:53:25.607881Z node 114 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 2025-06-25T14:53:25.607956Z node 114 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 2025-06-25T14:53:25.609481Z node 114 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 2025-06-25T14:53:25.609562Z node 114 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 2025-06-25T14:53:25.614157Z node 114 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5633: Handle TEvStateChanged, at schemeshard: 72057594046678944, message: Source { RawX1: 350 RawX2: 489626274076 } TabletId: 72075186233409546 State: 4 2025-06-25T14:53:25.614223Z node 114 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186233409546, state: Offline, at schemeshard: 72057594046678944 2025-06-25T14:53:25.615823Z node 114 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:2 hive 72057594037968897 at ss 72057594046678944 2025-06-25T14:53:25.616146Z node 114 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 2 TxId_Deprecated: 2 TabletID: 72075186233409546 2025-06-25T14:53:25.616316Z node 114 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046678944 ShardLocalIdx: 2, at schemeshard: 72057594046678944 2025-06-25T14:53:25.616543Z node 114 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 1 Forgetting tablet 72075186233409546 2025-06-25T14:53:25.618704Z node 114 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-25T14:53:25.618751Z node 114 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 5], at schemeshard: 72057594046678944 2025-06-25T14:53:25.618820Z node 114 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 1 2025-06-25T14:53:25.618861Z node 114 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 4], at schemeshard: 72057594046678944 2025-06-25T14:53:25.618899Z node 114 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-25T14:53:25.621128Z node 114 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-06-25T14:53:25.621176Z node 114 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:2 tabletId 72075186233409546 2025-06-25T14:53:25.621470Z node 114 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 2 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 1003, wait until txId: 1003 TestWaitNotification wait txId: 1003 2025-06-25T14:53:25.621636Z node 114 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 1003: send EvNotifyTxCompletion 2025-06-25T14:53:25.621667Z node 114 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 1003 2025-06-25T14:53:25.622447Z node 114 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1003, at schemeshard: 72057594046678944 2025-06-25T14:53:25.622530Z node 114 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 1003: got EvNotifyTxCompletionResult 2025-06-25T14:53:25.622557Z node 114 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 1003: satisfy waiter [114:639:2563] 2025-06-25T14:53:25.626520Z node 114 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5633: Handle TEvStateChanged, at schemeshard: 72057594046678944, message: Source { RawX1: 353 RawX2: 489626274078 } TabletId: 72075186233409547 State: 4 2025-06-25T14:53:25.626591Z node 114 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186233409547, state: Offline, at schemeshard: 72057594046678944 2025-06-25T14:53:25.627868Z node 114 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:1 hive 72057594037968897 at ss 72057594046678944 2025-06-25T14:53:25.628216Z node 114 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 1 TxId_Deprecated: 1 TabletID: 72075186233409547 2025-06-25T14:53:25.628392Z node 114 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 1 ShardOwnerId: 72057594046678944 ShardLocalIdx: 1, at schemeshard: 72057594046678944 2025-06-25T14:53:25.628610Z node 114 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 Forgetting tablet 72075186233409547 2025-06-25T14:53:25.630745Z node 114 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-25T14:53:25.630789Z node 114 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-06-25T14:53:25.630843Z node 114 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-25T14:53:25.633312Z node 114 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-06-25T14:53:25.633368Z node 114 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:1 tabletId 72075186233409547 2025-06-25T14:53:25.633495Z node 114 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 1003 wait until 72075186233409546 is deleted wait until 72075186233409547 is deleted 2025-06-25T14:53:25.633825Z node 114 :HIVE INFO: tablet_helpers.cpp:1476: [72057594037968897] TEvSubscribeToTabletDeletion, 72075186233409546 2025-06-25T14:53:25.633870Z node 114 :HIVE INFO: tablet_helpers.cpp:1476: [72057594037968897] TEvSubscribeToTabletDeletion, 72075186233409547 Deleted tabletId 72075186233409546 Deleted tabletId 72075186233409547 >> TopicService::OnePartitionAndNoGapsInTheOffsets [GOOD] >> KqpNotNullColumns::AlterAddIndex [GOOD] >> KqpNewEngine::Delete-UseSink [GOOD] >> KqpNewEngine::DecimalColumn >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-ModifyUser-37 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-ModifyUser-38 >> KqpReturning::ReturningWorksIndexedDelete-QueryService [GOOD] >> KqpReturning::ReturningWorksIndexedDeleteV2+QueryService ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/opt/unittest >> KqpNotNullColumns::AlterAddIndex [GOOD] Test command err: Trying to start YDB, gRPC: 2543, MsgBus: 5959 2025-06-25T14:52:43.920929Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900025454960580:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:43.926750Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000a8d/r3tmp/tmpkgy7A9/pdisk_1.dat 2025-06-25T14:52:44.312714Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 2543, node 1 2025-06-25T14:52:44.386969Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:52:44.387051Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:52:44.388596Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:52:44.509712Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:52:44.509739Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:52:44.509749Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:52:44.509873Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5959 2025-06-25T14:52:44.936393Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:5959 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:52:45.407063Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:52:45.436889Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:45.606246Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:45.772064Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:45.859357Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:47.160334Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900042634831353:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:47.160485Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:47.603553Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:47.647169Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:47.680364Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:47.750857Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:47.790595Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:47.824885Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:47.864035Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:47.925622Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900042634832010:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:47.925727Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:47.925786Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900042634832015:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:47.929855Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:52:47.938539Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519900042634832017:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:52:48.021455Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519900046929799366:3420] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:52:48.920735Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519900025454960580:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:48.920796Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 64811, MsgBus: 10297 2025-06-25T14:52:50.337934Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519900057655030588:2080];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:50.346292Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000a8d/r3tmp/tmpGo7pj1/pdisk_1.dat 2025-06-25T14:52:50.498834Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subsc ... RN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:53:19.749206Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:53:19.750820Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 3040, node 7 2025-06-25T14:53:19.807670Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:53:19.807692Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:53:19.807702Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:53:19.807838Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11009 TClient is connected to server localhost:11009 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:53:20.554969Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:53:20.560509Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:53:20.575024Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:20.616782Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:53:20.663083Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:20.915324Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:21.008017Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:23.761688Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519900197423912139:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:23.761791Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:23.843207Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:23.896732Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:23.944585Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:23.981333Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:24.035135Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:24.092093Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:24.170804Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:24.275395Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519900201718880092:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:24.275499Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:24.275735Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519900201718880097:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:24.281164Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:53:24.294095Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7519900201718880099:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:53:24.388586Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7519900201718880150:3416] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:53:24.592759Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[7:7519900180244041340:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:53:24.592834Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:53:25.685743Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:25.783415Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:25.832863Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710759:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:383) >> KqpNewEngine::PureExpr >> KqpNewEngine::JoinMultiConsumer [GOOD] >> KqpNewEngine::JoinSameKey >> TopicService::MultiplePartitionsAndNoGapsInTheOffsets >> KqpNewEngine::ScalarMultiUsage [GOOD] >> KqpNewEngine::SequentialReadsPragma+Enabled >> KqpNamedExpressions::NamedExpressionRandomUpsertReturning-UseSink-UseDataQuery [GOOD] >> KqpNamedExpressions::NamedExpressionRandomUpsertRevert-UseSink-UseDataQuery >> KqpNewEngine::PkRangeSelect1 [GOOD] >> KqpNewEngine::PkRangeSelect2 >> SchemeReqAdminAccessInTenant::ClusterAdminCanAdministerTenant [GOOD] >> SchemeReqAdminAccessInTenant::ClusterAdminCanAdministerTenant-StrictAclCheck >> KqpSqlIn::TableSource >> KqpNewEngine::UnionAllPure [GOOD] >> KqpNewEngine::StreamLookupForDataQuery+StreamLookupJoin >> KqpNewEngine::DqSourceSequentialLimit [GOOD] >> KqpNewEngine::DqSourceLocksEffects >> TPersQueueTest::WhenTheTopicIsDeletedAfterDecompressingTheData_Uncompressed [GOOD] >> TPersQueueTest::TestWriteStat >> TPersQueueTest::CheckKillBalancer [GOOD] >> TPersQueueTest::CheckDeleteTopic >> KqpSort::TopParameterFilter [GOOD] >> KqpNewEngine::LookupColumns [GOOD] >> KqpSort::PassLimit [GOOD] >> KqpSort::Offset >> KqpSqlIn::KeySuffix_NotPointPrefix [GOOD] >> KqpSqlIn::ComplexKey >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-DropUser-62 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-DropUser-63 >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-14 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-15 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/opt/unittest >> KqpSort::TopParameterFilter [GOOD] Test command err: Trying to start YDB, gRPC: 4055, MsgBus: 2336 2025-06-25T14:52:43.907021Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900025318346016:2134];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:43.907656Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000a83/r3tmp/tmpmsB8SL/pdisk_1.dat 2025-06-25T14:52:44.307121Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:52:44.307270Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:52:44.311335Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:52:44.344805Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:52:44.347486Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519900025318345917:2080] 1750863163899695 != 1750863163899698 TServer::EnableGrpc on GrpcPort 4055, node 1 2025-06-25T14:52:44.516357Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:52:44.516378Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:52:44.516383Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:52:44.516470Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:52:44.923005Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:2336 TClient is connected to server localhost:2336 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:52:45.447495Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:52:45.472907Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:52:45.488142Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:45.648875Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:45.810547Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:45.880637Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:47.096128Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900042498216729:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:47.096243Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:47.606394Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:47.652281Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:47.681706Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:47.709570Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:47.746493Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:47.781936Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:47.854783Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:47.914018Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900042498217389:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:47.914190Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:47.914447Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900042498217394:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:47.919408Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:52:47.928584Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519900042498217396:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:52:48.021173Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519900046793184743:3418] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:52:48.908418Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519900025318346016:2134];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:48.908490Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 1656, MsgBus: 5584 2025-06-25T14:52:50.153322Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519900057036158859:2157];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000a83/r ... tate: Unknown -> Disconnected 2025-06-25T14:53:23.161075Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:53:23.162363Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 8199, node 7 2025-06-25T14:53:23.211183Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:53:23.211210Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:53:23.211220Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:53:23.211355Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4937 TClient is connected to server localhost:4937 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-25T14:53:23.816665Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:53:23.836884Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:23.926416Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:24.025155Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:53:24.120634Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:24.205382Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:27.357596Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519900215575481477:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:27.357696Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:27.487734Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:27.529862Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:27.574829Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:27.612243Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:27.655969Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:27.731616Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:27.774963Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:27.870126Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519900215575482143:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:27.870229Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:27.870553Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519900215575482148:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:27.875184Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:53:27.886619Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7519900215575482150:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:53:27.972630Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7519900215575482201:3418] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:53:28.008723Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[7:7519900198395610688:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:53:28.008801Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; ( (declare $limit (DataType 'Uint64)) (declare $value (DataType 'Int32)) (let $1 (KqpTable '"/Root/TwoShard" '"72057594046644480:2" '"" '1)) (let $2 '('"Key" '"Value1" '"Value2")) (let $3 (KqpRowsSourceSettings $1 $2 '() (Void) '())) (let $4 (DataType 'Int32)) (let $5 (Min (Uint64 '"1001") $limit)) (let $6 (StructType '('"Key" (OptionalType (DataType 'Uint32))) '('"Value1" (OptionalType (DataType 'String))) '('"Value2" (OptionalType $4)))) (let $7 '('('"_logical_id" '497) '('"_id" '"1241767f-b58b86f8-2f93c9b9-7bafd180") '('"_wide_channels" $6))) (let $8 (DqPhyStage '((DqSource (DataSource '"KqpReadRangesSource") $3)) (lambda '($12) (block '( (let $13 (lambda '($16) (block '( (let $17 (Member $16 '"Value2")) (return (Member $16 '"Key") (Member $16 '"Value1") $17 (Coalesce (!= $17 $value) (Bool 'false))) )))) (let $14 (WideFilter (ExpandMap (ToFlow $12) $13) (lambda '($18 $19 $20 $21) $21) $5)) (let $15 (lambda '($22 $23 $24 $25) $22 $23 $24)) (return (FromFlow (WideMap $14 $15))) ))) $7)) (let $9 (DqCnUnionAll (TDqOutput $8 '"0"))) (let $10 (DqPhyStage '($9) (lambda '($26) (FromFlow (NarrowMap (Take (ToFlow $26) $5) (lambda '($27 $28 $29) (AsStruct '('"Key" $27) '('"Value1" $28) '('"Value2" $29)))))) '('('"_logical_id" '510) '('"_id" '"7573e668-b3146f60-f67759d7-a34fae33")))) (let $11 (DqCnResult (TDqOutput $10 '"0") '())) (return (KqpPhysicalQuery '((KqpPhysicalTx '($8 $10) '($11) '('('"$limit") '('"$value")) '('('"type" '"data")))) '((KqpTxResultBinding (ListType $6) '"0" '"0")) '('('"type" '"data_query")))) ) >> TAsyncIndexTests::MergeBothWithReboots[PipeResets] [GOOD] >> KqpNewEngine::KeyColumnOrder [GOOD] >> KqpNewEngine::KeyColumnOrder2 >> KqpMergeCn::TopSortBy_Timestamp_Limit2 [GOOD] >> KqpMergeCn::TopSortBy_Interval_Limit3 >> KqpSort::TopSortResults [GOOD] >> KqpSort::UnionAllSortLimit ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/opt/unittest >> KqpNewEngine::LookupColumns [GOOD] Test command err: Trying to start YDB, gRPC: 1077, MsgBus: 64065 2025-06-25T14:52:43.903667Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900026279803817:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:43.903723Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000a92/r3tmp/tmpuUrLas/pdisk_1.dat 2025-06-25T14:52:44.354146Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:52:44.358471Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519900026279803796:2080] 1750863163898820 != 1750863163898823 2025-06-25T14:52:44.370303Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:52:44.370418Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 1077, node 1 2025-06-25T14:52:44.373430Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:52:44.509216Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:52:44.509246Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:52:44.509255Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:52:44.509379Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:64065 2025-06-25T14:52:44.938219Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:64065 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:52:45.341110Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-25T14:52:45.392548Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:45.583304Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:45.739107Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:45.827666Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:47.005846Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900043459674617:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:47.005936Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:47.602189Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:47.649593Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:47.719095Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:47.748536Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:47.817733Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:47.888247Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:47.956189Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:48.015068Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900047754642583:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:48.015158Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:48.015381Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900047754642588:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:48.018742Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:52:48.029503Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519900047754642590:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:52:48.113092Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519900047754642641:3427] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:52:48.903739Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519900026279803817:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:48.903806Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 6775, MsgBus: 10472 2025-06-25T14:52:50.098297Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519900055439527419:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:50.098369Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath ... d: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:53:20.857687Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[6:7519900165228947522:2141];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:53:20.857782Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 25175, MsgBus: 32018 2025-06-25T14:53:23.400030Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519900199193844716:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:53:23.400191Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000a92/r3tmp/tmpNwBVZd/pdisk_1.dat 2025-06-25T14:53:23.513091Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 25175, node 7 2025-06-25T14:53:23.537753Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:53:23.537874Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:53:23.543755Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:53:23.584900Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:53:23.584928Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:53:23.584941Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:53:23.585095Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:32018 TClient is connected to server localhost:32018 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:53:24.219688Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:53:24.229076Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:53:24.239447Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:24.317779Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:24.412508Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:53:24.514540Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:24.601861Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:27.748245Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519900216373715500:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:27.748367Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:27.813527Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:27.892499Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:27.937287Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:27.984015Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:28.026055Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:28.065546Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:28.138612Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:28.229827Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519900220668683459:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:28.229942Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:28.230005Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519900220668683464:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:28.237509Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:53:28.259256Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7519900220668683466:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:53:28.335437Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7519900220668683517:3418] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:53:28.400580Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[7:7519900199193844716:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:53:28.400692Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> KqpSqlIn::SimpleKey [GOOD] >> KqpSqlIn::SelectNotAllElements ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest >> TAsyncIndexTests::MergeBothWithReboots[PipeResets] [GOOD] Test command err: =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:115:2144] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:115:2144] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:130:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:133:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [1:136:2157] sender: [1:138:2058] recipient: [1:115:2144] 2025-06-25T14:51:15.319171Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:51:15.319266Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:15.319322Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:51:15.319361Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:51:15.319410Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:51:15.319439Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:51:15.319489Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:15.319571Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:51:15.320389Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:51:15.320709Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:51:15.392896Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7732: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-25T14:51:15.392947Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:51:15.393651Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:178:2058] recipient: [1:15:2062] 2025-06-25T14:51:15.403319Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:51:15.406951Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:51:15.407105Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:51:15.413697Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:51:15.413888Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:51:15.414499Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:15.414729Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:51:15.417067Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:15.417238Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:51:15.418179Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:51:15.418250Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:15.418351Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:51:15.418396Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:51:15.418435Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:51:15.418543Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:217:2058] recipient: [1:215:2214] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:217:2058] recipient: [1:215:2214] Leader for TabletID 72057594037968897 is [1:221:2218] sender: [1:222:2058] recipient: [1:215:2214] 2025-06-25T14:51:15.424827Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:242:2058] recipient: [1:15:2062] 2025-06-25T14:51:15.560117Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:51:15.560342Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:15.560526Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:51:15.560568Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:51:15.560762Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:51:15.560831Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:51:15.567712Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:15.567894Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:51:15.568067Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:15.568134Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:51:15.568211Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:51:15.568247Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:51:15.570144Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:15.570193Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:51:15.570238Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:51:15.571917Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:15.571968Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:15.572022Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:15.572067Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:51:15.575543Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:51:15.577186Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:51:15.577377Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:136:2157] sender: [1:257:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:51:15.578280Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:15.578399Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 Media ... xImplTableDescriptions { PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 1 } } } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409550 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:53:32.037287Z node 54 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:78: [TableChangeSenderShard][72075186233409550:2][72075186233409551][54:1108:2882] Handshake NKikimrChangeExchange.TEvStatus Status: STATUS_OK LastRecordOrder: 0 2025-06-25T14:53:32.037412Z node 54 :CHANGE_EXCHANGE DEBUG: change_sender_async_index.cpp:239: [AsyncIndexChangeSenderMain][72075186233409550:2][54:1068:2882] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186233409551 } 2025-06-25T14:53:32.037600Z node 54 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:123: [TableChangeSenderShard][72075186233409550:2][72075186233409551][54:1108:2882] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 1 Group: 1750863212010222 Step: 5000003 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046678944, LocalPathId: 4] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046678944, LocalPathId: 3] SchemaVersion: 1 LockId: 0 LockOffset: 0 },{ Order: 2 Group: 1750863212010222 Step: 5000003 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046678944, LocalPathId: 4] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046678944, LocalPathId: 3] SchemaVersion: 1 LockId: 0 LockOffset: 0 },{ Order: 3 Group: 1750863212010222 Step: 5000003 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046678944, LocalPathId: 4] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046678944, LocalPathId: 3] SchemaVersion: 1 LockId: 0 LockOffset: 0 }] } 2025-06-25T14:53:32.042818Z node 54 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:200: [TableChangeSenderShard][72075186233409550:2][72075186233409551][54:1108:2882] Handle NKikimrChangeExchange.TEvStatus Status: STATUS_OK RecordStatuses { Order: 1 Status: STATUS_OK Reason: REASON_NONE } RecordStatuses { Order: 2 Status: STATUS_OK Reason: REASON_NONE } RecordStatuses { Order: 3 Status: STATUS_OK Reason: REASON_NONE } LastRecordOrder: 3 2025-06-25T14:53:32.042950Z node 54 :CHANGE_EXCHANGE DEBUG: change_sender_async_index.cpp:239: [AsyncIndexChangeSenderMain][72075186233409550:2][54:1068:2882] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186233409551 } 2025-06-25T14:53:32.245098Z node 54 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/UserDefinedIndex/indexImplTable" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-25T14:53:32.245421Z node 54 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/UserDefinedIndex/indexImplTable" took 353us result status StatusSuccess 2025-06-25T14:53:32.246388Z node 54 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table/UserDefinedIndex/indexImplTable" PathDescription { Self { Name: "indexImplTable" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1002 CreateStep: 5000003 ParentPathId: 4 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeAsyncIndexImplTable Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 2 } ChildrenExist: false } Table { Name: "indexImplTable" Columns { Name: "indexed" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "indexed" KeyColumnNames: "key" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 1 } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409551 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> KqpNotNullColumns::OptionalParametersScanQuery [GOOD] >> KqpNewEngine::PrimaryView [GOOD] >> KqpRanges::UpdateWhereInWithNull [GOOD] >> KqpRanges::ValidatePredicates >> KqpNewEngine::DecimalColumn [GOOD] >> KqpNewEngine::DecimalColumn35 >> KqpRanges::UpdateWhereInMultipleUpdate [GOOD] >> KqpRanges::UpdateWhereInFullScan+UseSink >> KqpNotNullColumns::UpdateNotNullPk >> KqpNewEngine::PureExpr [GOOD] >> KqpNewEngine::PureTxMixedWithDeferred >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-ModifyUser-38 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-ModifyUser-39 >> TPersQueueTest::ReadRuleServiceType [GOOD] >> TPersQueueTest::ReadRuleServiceTypeLimit >> KqpKv::ReadRows_SpecificKey ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/opt/unittest >> KqpNotNullColumns::OptionalParametersScanQuery [GOOD] Test command err: Trying to start YDB, gRPC: 21943, MsgBus: 14808 2025-06-25T14:52:46.672133Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900041657017585:2228];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:46.674773Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000a72/r3tmp/tmpVD82h9/pdisk_1.dat 2025-06-25T14:52:47.024884Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:52:47.044006Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:52:47.044084Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:52:47.046071Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 21943, node 1 2025-06-25T14:52:47.106428Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:52:47.106453Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:52:47.106461Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:52:47.106587Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14808 TClient is connected to server localhost:14808 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-25T14:52:47.639890Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:52:47.666160Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:52:49.410715Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900054541919920:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:49.410849Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:49.686826Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:49.820738Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900054541920024:2301], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:49.820805Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:49.821170Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900054541920029:2304], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:49.826430Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:52:49.848863Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519900054541920031:2305], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-25T14:52:49.905228Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519900054541920082:2391] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:52:50.119670Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519900058836887420:2316], status: BAD_REQUEST, issues:
: Error: Type annotation, code: 1030
:1:14: Error: At function: KiWriteTable!
:1:14: Error: Missing not null column in input: Value. All not null columns should be initialized, code: 2032 2025-06-25T14:52:50.120367Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=Yzc1NThkYzMtZGZhNGNhZjYtMjgzODlmZmQtMjM0NDU3MmQ=, ActorId: [1:7519900054541919908:2288], ActorState: ExecuteState, TraceId: 01jyks7jgk9y65b54xaez1b0t9, ReplyQueryCompileError, status BAD_REQUEST remove tx with tx_id: 2025-06-25T14:52:50.151769Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519900058836887429:2320], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:1:14: Error: At function: KiWriteTable!
:1:47: Error: Failed to convert type: Struct<'Key':Int32,'Value':Null> to Struct<'Key':Uint64?,'Value':String>
:1:47: Error: Failed to convert 'Value': Null to String
:1:47: Error: Failed to convert input columns types to scheme types, code: 2031 2025-06-25T14:52:50.152052Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=Yzc1NThkYzMtZGZhNGNhZjYtMjgzODlmZmQtMjM0NDU3MmQ=, ActorId: [1:7519900054541919908:2288], ActorState: ExecuteState, TraceId: 01jyks7jjqbwxw0v0j9y9d4dta, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: Trying to start YDB, gRPC: 24694, MsgBus: 2635 2025-06-25T14:52:51.057509Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519900056163021310:2223];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:51.057772Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000a72/r3tmp/tmpDw3YKr/pdisk_1.dat 2025-06-25T14:52:51.320670Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:52:51.320739Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:52:51.328648Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:52:51.332537Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519900056163021125:2080] 1750863170969352 != 1750863170969355 2025-06-25T14:52:51.346055Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 24694, node 2 2025-06-25T14:52:51.521169Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:52:51.521189Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:52:51.521201Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:52:51.521305Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2635 2025-06-25T14:52:51.973428Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:2635 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:52:52.150855Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) ... ; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000a72/r3tmp/tmpjJtK2H/pdisk_1.dat 2025-06-25T14:53:25.432269Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:53:25.449578Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:53:25.449691Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:53:25.453851Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 12026, node 7 2025-06-25T14:53:25.566671Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:53:25.566692Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:53:25.566700Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:53:25.566800Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3894 TClient is connected to server localhost:3894 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-25T14:53:26.236521Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:53:26.243865Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:53:26.252741Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:26.306620Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:53:26.346411Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:26.533555Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:26.624497Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:29.548958Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519900223456272404:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:29.549060Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:29.618322Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:29.662737Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:29.743278Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:29.783450Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:29.829574Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:29.890905Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:29.971881Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:30.078069Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519900227751240367:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:30.078201Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:30.078814Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519900227751240372:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:30.083027Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:53:30.095447Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7519900227751240374:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:53:30.167555Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7519900227751240425:3423] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:53:30.304409Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[7:7519900206276401620:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:53:30.304561Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:53:31.537933Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:32.026698Z node 7 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863212056, txId: 281474976715674] shutting down 2025-06-25T14:53:32.271962Z node 7 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863212301, txId: 281474976715676] shutting down 2025-06-25T14:53:32.484639Z node 7 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863212525, txId: 281474976715678] shutting down ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/opt/unittest >> KqpNewEngine::PrimaryView [GOOD] Test command err: Trying to start YDB, gRPC: 20382, MsgBus: 19626 2025-06-25T14:52:44.699029Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900030577138406:2137];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:44.699144Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000a78/r3tmp/tmp3ITPai/pdisk_1.dat 2025-06-25T14:52:45.089514Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 20382, node 1 2025-06-25T14:52:45.135882Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:52:45.135980Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:52:45.140889Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:52:45.198999Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:52:45.199027Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:52:45.199037Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:52:45.199164Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19626 TClient is connected to server localhost:19626 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-25T14:52:45.737991Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:52:45.871729Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:52:45.883681Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:52:45.898002Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:46.057108Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:46.204175Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:46.289542Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:47.886207Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900043462041831:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:47.886295Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:48.169409Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:48.242114Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:48.279380Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:48.356646Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:48.391488Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:48.422888Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:48.459213Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:48.552393Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900047757009791:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:48.552466Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:48.552628Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900047757009796:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:48.555497Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:52:48.565265Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519900047757009798:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:52:48.635143Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519900047757009849:3424] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:52:49.699112Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519900030577138406:2137];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:49.699185Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 7619, MsgBus: 23424 2025-06-25T14:52:50.807233Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519900055574635268:2228];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000a78/r3tmp/tmpLV82Yc/pdisk_1.dat 2025-06-25T14:52:50.933926Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/in ... # /home/runner/.ya/build/build_root/yft8/000a78/r3tmp/tmpmbRz6f/pdisk_1.dat 2025-06-25T14:53:23.549147Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:53:23.549238Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:53:23.552656Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:53:23.553565Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 26794, node 7 2025-06-25T14:53:23.626271Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:53:23.626311Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:53:23.626325Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:53:23.626465Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13867 TClient is connected to server localhost:13867 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:53:24.253974Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:53:24.276741Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:53:24.349113Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:24.467455Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:53:24.543262Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:24.636083Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:27.510608Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519900216158113596:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:27.510727Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:27.575383Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:27.655115Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:27.728981Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:27.800605Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:27.838385Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:27.917696Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:27.965222Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:28.069000Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519900220453081563:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:28.069088Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:28.069131Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519900220453081568:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:28.073159Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:53:28.086772Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7519900220453081570:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:53:28.144996Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7519900220453081621:3419] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:53:28.416434Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[7:7519900198978242819:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:53:28.416518Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:53:29.620566Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:29.761206Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:29.812842Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) >> KqpNewEngine::ComplexLookupLimit [GOOD] >> TPersQueueTest::PreferredCluster_NonExistentPreferredCluster_SessionDiesOnlyAfterDelay [GOOD] >> TPersQueueTest::PreferredCluster_EnabledRemotePreferredClusterAndRemoteClusterEnabledDelaySec_SessionDiesOnlyAfterDelay >> KqpReturning::ReturningTwice >> KqpNewEngine::PkRangeSelect2 [GOOD] >> KqpNewEngine::OnlineRO_Consistent >> TPersQueueCommonTest::TestLimiterLimitsWithBlobsRateLimit [GOOD] >> TPersQueueCommonTest::TestLimiterLimitsWithUserPayloadRateLimit >> TPersQueueTest::DirectReadCleanCache [GOOD] >> TPersQueueTest::DirectReadRestartPQRB ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/opt/unittest >> KqpNewEngine::ComplexLookupLimit [GOOD] Test command err: Trying to start YDB, gRPC: 1896, MsgBus: 21042 2025-06-25T14:52:48.712981Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900050587437593:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:48.713047Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000a5e/r3tmp/tmpsjAq1V/pdisk_1.dat TServer::EnableGrpc on GrpcPort 1896, node 1 2025-06-25T14:52:49.133396Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:52:49.133463Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:52:49.134966Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:52:49.148583Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:52:49.293548Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:52:49.293572Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:52:49.293584Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:52:49.293732Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:21042 TClient is connected to server localhost:21042 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-25T14:52:49.740462Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:52:49.787035Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:52:49.804006Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:52:49.822726Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:49.971781Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:50.125909Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:50.207973Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:51.842379Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900063472341081:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:51.842478Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:52.144661Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:52.179219Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:52.207862Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:52.235543Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:52.308714Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:52.377586Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:52.407430Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:52.461587Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900067767309041:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:52.461670Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:52.462000Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900067767309046:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:52.466054Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:52:52.479616Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519900067767309048:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:52:52.555300Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519900067767309099:3423] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:52:53.716233Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519900050587437593:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:53.716290Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 9880, MsgBus: 12341 2025-06-25T14:52:54.737395Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519900075716075009:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:54.737437Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner ... ED Issues: { message: "Distributed transaction aborted due to commit failure" issue_code: 2011 severity: 1 } Trying to start YDB, gRPC: 13552, MsgBus: 4083 2025-06-25T14:53:26.926815Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519900210398945487:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:53:26.926873Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000a5e/r3tmp/tmptqpfUa/pdisk_1.dat 2025-06-25T14:53:27.063661Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:53:27.068659Z node 7 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [7:7519900210398945468:2080] 1750863206926244 != 1750863206926247 TServer::EnableGrpc on GrpcPort 13552, node 7 2025-06-25T14:53:27.091044Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:53:27.091192Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:53:27.092276Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:53:27.166334Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:53:27.166360Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:53:27.166372Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:53:27.166534Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4083 TClient is connected to server localhost:4083 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:53:27.829878Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:53:27.840193Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:27.906057Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:27.993049Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:53:28.101843Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:28.236775Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:31.231535Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519900231873783592:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:31.231630Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:31.302815Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:31.350609Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:31.398565Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:31.461630Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:31.510787Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:31.553884Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:31.628141Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:31.708738Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519900231873784252:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:31.708847Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:31.709105Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519900231873784257:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:31.714562Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:53:31.731779Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7519900231873784259:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:53:31.817773Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7519900231873784310:3422] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:53:31.926870Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[7:7519900210398945487:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:53:31.926962Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:53:33.334542Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) >> TAsyncIndexTests::CdcAndMergeWithReboots[PipeResets] [GOOD] >> KqpNamedExpressions::NamedExpressionChanged+UseSink >> KqpNewEngine::SequentialReadsPragma+Enabled [GOOD] >> KqpNewEngine::SequentialReadsPragma-Enabled >> TPersQueueTest::WhenTheTopicIsDeletedAfterReadingTheData_Compressed [GOOD] >> TPersQueueTest::WhenTheTopicIsDeletedBeforeDataIsDecompressed_Uncompressed >> KqpNewEngine::JoinSameKey [GOOD] >> KqpRanges::NullInKey >> KqpReturning::ReturningWorksIndexedDeleteV2+QueryService [GOOD] >> KqpReturning::ReturningWorksIndexedDeleteV2-QueryService ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest >> TAsyncIndexTests::CdcAndMergeWithReboots[PipeResets] [GOOD] Test command err: =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:115:2144] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:115:2144] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:130:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:133:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [1:136:2157] sender: [1:138:2058] recipient: [1:115:2144] 2025-06-25T14:51:18.768003Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:51:18.768080Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:18.768128Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:51:18.768168Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:51:18.768204Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:51:18.768225Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:51:18.768262Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:18.768351Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:51:18.768923Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:51:18.769192Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:51:18.830396Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7732: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-25T14:51:18.830435Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:51:18.831009Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:178:2058] recipient: [1:15:2062] 2025-06-25T14:51:18.839428Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:51:18.841939Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:51:18.842071Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:51:18.847868Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:51:18.848033Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:51:18.848569Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:18.848784Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:51:18.850781Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:18.850949Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:51:18.851673Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:51:18.851739Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:18.851811Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:51:18.851844Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:51:18.851876Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:51:18.851974Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:217:2058] recipient: [1:215:2214] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:217:2058] recipient: [1:215:2214] Leader for TabletID 72057594037968897 is [1:221:2218] sender: [1:222:2058] recipient: [1:215:2214] 2025-06-25T14:51:18.857265Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:242:2058] recipient: [1:15:2062] 2025-06-25T14:51:18.944453Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:51:18.944656Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:18.944839Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:51:18.944881Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:51:18.945084Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:51:18.945165Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:51:18.947279Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:18.947458Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:51:18.947644Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:18.947718Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:51:18.947768Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:51:18.947805Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:51:18.949725Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:18.949782Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:51:18.949829Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:51:18.952555Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:18.952613Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:18.952675Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:18.952727Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:51:18.962571Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:51:18.964585Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:51:18.964805Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:136:2157] sender: [1:257:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:51:18.966015Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:18.966157Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 Media ... ion { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } } } TableSchemaVersion: 2 IsBackup: false CdcStreams { Name: "Stream" Mode: ECdcStreamModeKeysOnly PathId { OwnerId: 72057594046678944 LocalId: 6 } State: ECdcStreamStateReady SchemaVersion: 1 Format: ECdcStreamFormatProto VirtualTimestamps: false AwsRegion: "" ResolvedTimestampsIntervalMs: 0 SchemaChanges: false } IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409551 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 6 PathsLimit: 10000 ShardsInside: 5 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 2 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:53:36.020382Z node 54 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:78: [TableChangeSenderShard][72075186233409551:2][72075186233409546][54:1164:2942] Handshake NKikimrChangeExchange.TEvStatus Status: STATUS_OK LastRecordOrder: 0 2025-06-25T14:53:36.020509Z node 54 :CHANGE_EXCHANGE DEBUG: change_sender_async_index.cpp:239: [AsyncIndexChangeSenderMain][72075186233409551:2][54:1133:2942] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186233409546 } 2025-06-25T14:53:36.020682Z node 54 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:123: [TableChangeSenderShard][72075186233409551:2][72075186233409546][54:1164:2942] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 1 Group: 1750863215974923 Step: 5000004 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046678944, LocalPathId: 4] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046678944, LocalPathId: 3] SchemaVersion: 2 LockId: 0 LockOffset: 0 },{ Order: 3 Group: 1750863215974923 Step: 5000004 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046678944, LocalPathId: 4] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046678944, LocalPathId: 3] SchemaVersion: 2 LockId: 0 LockOffset: 0 },{ Order: 5 Group: 1750863215974923 Step: 5000004 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046678944, LocalPathId: 4] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046678944, LocalPathId: 3] SchemaVersion: 2 LockId: 0 LockOffset: 0 }] } 2025-06-25T14:53:36.024974Z node 54 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:200: [TableChangeSenderShard][72075186233409551:2][72075186233409546][54:1164:2942] Handle NKikimrChangeExchange.TEvStatus Status: STATUS_OK RecordStatuses { Order: 1 Status: STATUS_OK Reason: REASON_NONE } RecordStatuses { Order: 3 Status: STATUS_OK Reason: REASON_NONE } RecordStatuses { Order: 5 Status: STATUS_OK Reason: REASON_NONE } LastRecordOrder: 5 2025-06-25T14:53:36.025098Z node 54 :CHANGE_EXCHANGE DEBUG: change_sender_async_index.cpp:239: [AsyncIndexChangeSenderMain][72075186233409551:2][54:1133:2942] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186233409546 } 2025-06-25T14:53:36.298640Z node 54 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/UserDefinedIndex/indexImplTable" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-25T14:53:36.298952Z node 54 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/UserDefinedIndex/indexImplTable" took 344us result status StatusSuccess 2025-06-25T14:53:36.299854Z node 54 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table/UserDefinedIndex/indexImplTable" PathDescription { Self { Name: "indexImplTable" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1002 CreateStep: 5000003 ParentPathId: 4 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeAsyncIndexImplTable Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "indexImplTable" Columns { Name: "indexed" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "indexed" KeyColumnNames: "key" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409546 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 6 PathsLimit: 10000 ShardsInside: 5 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 2 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> KqpNewEngine::KeyColumnOrder2 [GOOD] >> KqpNewEngine::LocksMultiShard >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-DropUser-63 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-DropUser-64 >> KqpSqlIn::TableSource [GOOD] >> KqpSqlIn::SimpleKey_Negated >> KqpRanges::IsNull >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-15 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-16 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/opt/unittest >> KqpNewEngine::JoinSameKey [GOOD] Test command err: Trying to start YDB, gRPC: 63225, MsgBus: 61171 2025-06-25T14:52:47.369986Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900043631686084:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:47.370348Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000a69/r3tmp/tmpCULkiR/pdisk_1.dat 2025-06-25T14:52:47.672726Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519900043631686065:2080] 1750863167368358 != 1750863167368361 2025-06-25T14:52:47.693033Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 63225, node 1 2025-06-25T14:52:47.791344Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:52:47.791445Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:52:47.792501Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:52:47.798512Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:52:47.798534Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:52:47.798561Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:52:47.798670Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:61171 TClient is connected to server localhost:61171 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:52:48.302131Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:52:48.314825Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:52:48.322239Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:48.382633Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:52:48.461818Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:48.614817Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:48.695109Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:50.117957Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900056516589601:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:50.118074Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:50.540122Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:50.581901Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:50.620997Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:50.657789Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:50.695669Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:50.767953Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:50.847106Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:50.945406Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900056516590269:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:50.945486Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:50.945832Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900056516590274:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:50.950264Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:52:50.967673Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519900056516590276:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:52:51.055744Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519900060811557623:3421] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:52:52.372443Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519900043631686084:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:52.372528Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 14064, MsgBus: 7203 2025-06-25T14:52:53.351106Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519900070748926738:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:53.351137Z node 2 :METADATA_PROVIDER ERROR: log.cpp ... :7519900206408139262:3422] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:53:26.342914Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[6:7519900189228267749:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:53:26.343003Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 62388, MsgBus: 2684 2025-06-25T14:53:28.888161Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519900221231411965:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:53:28.888234Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000a69/r3tmp/tmp7GA2vP/pdisk_1.dat 2025-06-25T14:53:29.034334Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:53:29.056176Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:53:29.056279Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:53:29.061395Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 62388, node 7 2025-06-25T14:53:29.113725Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:53:29.113759Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:53:29.113773Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:53:29.113933Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2684 TClient is connected to server localhost:2684 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:53:29.890953Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:53:29.896194Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:53:29.903950Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:30.008623Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:30.255406Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:30.387972Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:33.017354Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519900238411282746:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:33.017457Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:33.065399Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:33.111266Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:33.154668Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:33.199976Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:33.254996Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:33.341152Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:33.422957Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:33.526014Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519900242706250707:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:33.526138Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519900242706250712:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:33.526177Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:33.530974Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:53:33.543965Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7519900242706250714:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:53:33.640820Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7519900242706250765:3423] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:53:33.888183Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[7:7519900221231411965:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:53:33.888252Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> KqpNewEngine::DqSourceLocksEffects [GOOD] >> KqpNewEngine::FullScanCount >> KqpSort::Offset [GOOD] >> KqpSort::OffsetPk >> KqpNotNullColumns::UpdateNotNullPk [GOOD] >> KqpNotNullColumns::UpdateNotNullPkPg >> KqpKv::ReadRows_SpecificKey [GOOD] >> KqpKv::ReadRows_NonExistentKeys >> KqpNewEngine::StreamLookupForDataQuery+StreamLookupJoin [GOOD] >> KqpNewEngine::StreamLookupForDataQuery-StreamLookupJoin >> SchemeReqAdminAccessInTenant::ClusterAdminCanAdministerTenant-StrictAclCheck [GOOD] >> SchemeReqAdminAccessInTenant::ClusterAdminCanAdministerTenant-DomainLoginOnly >> KqpReturning::ReturningWorksIndexedUpsert+QueryService >> KqpNewEngine::PureTxMixedWithDeferred [GOOD] >> KqpNewEngine::PrunePartitionsByLiteral >> KqpNotNullColumns::InsertNotNullPk >> TPQCompatTest::DiscoverTopics [GOOD] >> TPQCompatTest::SetupLockSession >> KqpSqlIn::SelectNotAllElements [GOOD] >> KqpSqlIn::SecondaryIndex_SimpleKey_In_And >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-ModifyUser-39 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-ModifyUser-40 >> TopicService::MultiplePartitionsAndNoGapsInTheOffsets [GOOD] >> KqpNewEngine::DecimalColumn35 [GOOD] >> KqpNewEngine::DeleteByKey >> KqpSort::UnionAllSortLimit [GOOD] >> KqpSqlIn::ComplexKey [GOOD] >> KqpSqlIn::Dict >> KqpMergeCn::TopSortBy_Interval_Limit3 [GOOD] >> KqpMergeCn::TopSortBy_Decimal_Limit5 >> KqpNewEngine::OnlineRO_Consistent [GOOD] >> KqpNewEngine::OnlineRO_Inconsistent >> KqpReturning::ReturningTwice [GOOD] >> KqpReturning::ReplaceSerial >> TTopicYqlTest::DropTopicYql ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/opt/unittest >> KqpSort::UnionAllSortLimit [GOOD] Test command err: Trying to start YDB, gRPC: 24216, MsgBus: 7404 2025-06-25T14:52:46.717507Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900039534625832:2145];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:46.722238Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000a6f/r3tmp/tmpP16zqk/pdisk_1.dat 2025-06-25T14:52:47.105024Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 24216, node 1 2025-06-25T14:52:47.111422Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:52:47.112207Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:52:47.114372Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:52:47.179477Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:52:47.179501Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:52:47.179522Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:52:47.179647Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:7404 TClient is connected to server localhost:7404 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-25T14:52:47.724538Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:52:47.735738Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:52:47.749138Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:52:47.762977Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:47.847213Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:47.988601Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:52:48.065427Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:49.670697Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900052419529224:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:49.670802Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:49.950061Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:49.986961Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:50.013069Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:50.038208Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:50.063722Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:50.097325Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:50.166001Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:50.239052Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900056714497187:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:50.239134Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:50.239319Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900056714497192:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:50.242539Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:52:50.253613Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519900056714497194:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:52:50.346270Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519900056714497245:3416] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:52:51.749272Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519900039534625832:2145];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:51.749310Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 6521, MsgBus: 20862 2025-06-25T14:52:53.019295Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519900070280889411:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:53.019346Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/ ... r_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:53:27.489619Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) Trying to start YDB, gRPC: 6339, MsgBus: 14891 2025-06-25T14:53:33.143471Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519900242320469946:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:53:33.143539Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000a6f/r3tmp/tmpRscCrK/pdisk_1.dat 2025-06-25T14:53:33.331312Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:53:33.345428Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:53:33.345530Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:53:33.351461Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 6339, node 7 2025-06-25T14:53:33.430545Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:53:33.430581Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:53:33.430597Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:53:33.430765Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14891 TClient is connected to server localhost:14891 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:53:34.078327Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:53:34.085246Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:53:34.100853Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:34.157228Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... 2025-06-25T14:53:34.200627Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:34.435857Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:34.520563Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:37.542079Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519900259500340744:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:37.542175Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:37.605059Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:37.655426Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:37.727046Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:37.775066Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:37.815944Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:37.862561Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:37.940664Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:38.053051Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519900263795308701:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:38.053126Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:38.053328Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519900263795308706:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:38.057859Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:53:38.073768Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7519900263795308708:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:53:38.146751Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[7:7519900242320469946:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:53:38.146855Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:53:38.160046Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7519900263795308759:3426] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } >> KqpRanges::UpdateWhereInFullScan+UseSink [GOOD] >> KqpRanges::UpdateWhereInFullScan-UseSink >> KqpKv::ReadRows_NonExistentKeys [GOOD] >> KqpKv::ReadRows_NotFullPK >> KqpNotNullColumns::UpdateNotNullPkPg [GOOD] >> KqpNotNullColumns::UpdateNotNull >> KqpNamedExpressions::NamedExpressionChanged+UseSink [GOOD] >> KqpNamedExpressions::NamedExpressionChanged-UseSink >> KqpNewEngine::LocksMultiShard [GOOD] >> KqpNewEngine::LocksEffects >> KqpRanges::NullInKey [GOOD] >> KqpRanges::NullInKeySuffix >> KqpNotNullColumns::InsertNotNullPk [GOOD] >> KqpNotNullColumns::InsertNotNullPkPg+useSink >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-DropUser-64 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-DropUser-65 >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-16 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-17 >> KqpNewEngine::SequentialReadsPragma-Enabled [GOOD] >> KqpNotNullColumns::UpsertNotNullPk >> KqpRanges::IsNull [GOOD] >> KqpRanges::IsNotNullSecondComponent ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/tx/unittest >> KqpLocksTricky::TestNoLocksIssueInteractiveTx-withSink [GOOD] Test command err: Trying to start YDB, gRPC: 26585, MsgBus: 20928 2025-06-25T14:52:12.985772Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:52:12.985923Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:52:12.986045Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00124e/r3tmp/tmpAKwbJg/pdisk_1.dat 2025-06-25T14:52:13.311635Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 26585, node 1 2025-06-25T14:52:13.457755Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:52:13.462611Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:52:13.462673Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:52:13.462720Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:52:13.463156Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:52:13.463470Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750863130325909 != 1750863130325913 2025-06-25T14:52:13.518558Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:52:13.518707Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:52:13.533616Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:20928 TClient is connected to server localhost:20928 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:52:13.895100Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:52:13.971529Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:14.139413Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:52:14.357187Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:14.744469Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:15.100264Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:15.923307Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:1686:3282], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:15.923580Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:15.952667Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:16.147490Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:16.407108Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:16.685929Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:16.953492Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:17.307942Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:17.610079Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:17.987636Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2357:3777], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:17.987746Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:17.988040Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2362:3782], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:17.993449Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:52:18.132557Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:2364:3784], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:52:18.197605Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:2422:3823] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:52:19.473915Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:19.686208Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ES ... lude/__memory/allocate_at_least.h:41:19 #5 0x204cbd35 in __split_buffer /-S/contrib/libs/cxxsupp/libcxx/include/__split_buffer:354:25 #6 0x204cbd35 in __push_back_slow_path /-S/contrib/libs/cxxsupp/libcxx/include/vector:1541:47 #7 0x204cbd35 in push_back /-S/contrib/libs/cxxsupp/libcxx/include/vector:1569:13 #8 0x204cbd35 in grpc_core::Server::Start() /-S/contrib/libs/grpc/src/core/lib/surface/server.cc:702:17 #9 0x204dac6e in grpc_server_start /-S/contrib/libs/grpc/src/core/lib/surface/server.cc:1715:37 #10 0x36d13c17 in grpc::Server::Start(grpc::ServerCompletionQueue**, unsigned long) /-S/contrib/libs/grpc/src/cpp/server/server_cc.cc:1214:3 #11 0x36d0630a in grpc::ServerBuilder::BuildAndStart() /-S/contrib/libs/grpc/src/cpp/server/server_builder.cc:445:11 #12 0x36cf7322 in NYdbGrpc::TGRpcServer::Start() /-S/ydb/library/grpc/server/grpc_server.cpp:249:23 #13 0x3c726c99 in NKikimr::Tests::TServer::EnableGRpc(NYdbGrpc::TServerOptions const&, unsigned int, std::__y1::optional>> const&) /-S/ydb/core/testlib/test_client.cpp:759:21 #14 0x3c72b71a in NKikimr::Tests::TServer::EnableGRpc(unsigned short, unsigned int, std::__y1::optional>> const&) /-S/ydb/core/testlib/test_client.cpp:763:9 #15 0x4a32d492 in NKikimr::NKqp::TKikimrRunner::TKikimrRunner(NKikimr::NKqp::TKikimrSettings const&) /-S/ydb/core/kqp/ut/common/kqp_ut_common.cpp:172:17 #16 0x19985883 in NKikimr::NKqp::NTestSuiteKqpLocksTricky::TTestCaseTestNoLocksIssueInteractiveTx::Execute_(NUnitTest::TTestContext&) /-S/ydb/core/kqp/ut/tx/kqp_locks_tricky_ut.cpp:139:23 #17 0x1995f4e7 in operator() /-S/ydb/core/kqp/ut/tx/kqp_locks_tricky_ut.cpp:31:1 #18 0x1995f4e7 in __invoke<(lambda at /-S/ydb/core/kqp/ut/tx/kqp_locks_tricky_ut.cpp:31:1) &> /-S/contrib/libs/cxxsupp/libcxx/include/__type_traits/invoke.h:149:25 #19 0x1995f4e7 in __call<(lambda at /-S/ydb/core/kqp/ut/tx/kqp_locks_tricky_ut.cpp:31:1) &> /-S/contrib/libs/cxxsupp/libcxx/include/__type_traits/invoke.h:224:5 #20 0x1995f4e7 in operator() /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:169:12 #21 0x1995f4e7 in std::__y1::__function::__func, void ()>::operator()() /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:314:10 #22 0x1a4589d5 in operator() /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:431:12 #23 0x1a4589d5 in operator() /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:990:10 #24 0x1a4589d5 in TColoredProcessor::Run(std::__y1::function, TBasicString> const&, char const*, bool) /-S/library/cpp/testing/unittest/utmain.cpp:525:20 #25 0x1a428378 in NUnitTest::TTestBase::Run(std::__y1::function, TBasicString> const&, char const*, bool) /-S/library/cpp/testing/unittest/registar.cpp:373:18 #26 0x1995e393 in NKikimr::NKqp::NTestSuiteKqpLocksTricky::TCurrentTest::Execute() /-S/ydb/core/kqp/ut/tx/kqp_locks_tricky_ut.cpp:31:1 #27 0x1a429c45 in NUnitTest::TTestFactory::Execute() /-S/library/cpp/testing/unittest/registar.cpp:494:19 #28 0x1a452f4c in NUnitTest::RunMain(int, char**) /-S/library/cpp/testing/unittest/utmain.cpp:872:44 #29 0x7f801e9dad8f (/lib/x86_64-linux-gnu/libc.so.6+0x29d8f) (BuildId: cd410b710f0f094c6832edd95931006d883af48e) Indirect leak of 8 byte(s) in 1 object(s) allocated from: #0 0x19c928dd in operator new(unsigned long) /-S/contrib/libs/clang18-rt/lib/asan/asan_new_delete.cpp:86:3 #1 0x204cf4f2 in __libcpp_operator_new /-S/contrib/libs/cxxsupp/libcxx/include/new:271:10 #2 0x204cf4f2 in __libcpp_allocate /-S/contrib/libs/cxxsupp/libcxx/include/new:295:10 #3 0x204cf4f2 in allocate /-S/contrib/libs/cxxsupp/libcxx/include/__memory/allocator.h:103:32 #4 0x204cf4f2 in __allocate_at_least > /-S/contrib/libs/cxxsupp/libcxx/include/__memory/allocate_at_least.h:41:19 #5 0x204cf4f2 in __split_buffer /-S/contrib/libs/cxxsupp/libcxx/include/__split_buffer:354:25 #6 0x204cf4f2 in __push_back_slow_path /-S/contrib/libs/cxxsupp/libcxx/include/vector:1541:47 #7 0x204cf4f2 in push_back /-S/contrib/libs/cxxsupp/libcxx/include/vector:1557:13 #8 0x204cf4f2 in grpc_core::Server::RegisterCompletionQueue(grpc_completion_queue*) /-S/contrib/libs/grpc/src/core/lib/surface/server.cc:794:8 #9 0x36d05dd7 in grpc::ServerBuilder::BuildAndStart() /-S/contrib/libs/grpc/src/cpp/server/server_builder.cc:396:5 #10 0x36cf7322 in NYdbGrpc::TGRpcServer::Start() /-S/ydb/library/grpc/server/grpc_server.cpp:249:23 #11 0x3c726c99 in NKikimr::Tests::TServer::EnableGRpc(NYdbGrpc::TServerOptions const&, unsigned int, std::__y1::optional>> const&) /-S/ydb/core/testlib/test_client.cpp:759:21 #12 0x3c72b71a in NKikimr::Tests::TServer::EnableGRpc(unsigned short, unsigned int, std::__y1::optional>> const&) /-S/ydb/core/testlib/test_client.cpp:763:9 #13 0x4a32d492 in NKikimr::NKqp::TKikimrRunner::TKikimrRunner(NKikimr::NKqp::TKikimrSettings const&) /-S/ydb/core/kqp/ut/common/kqp_ut_common.cpp:172:17 #14 0x19985883 in NKikimr::NKqp::NTestSuiteKqpLocksTricky::TTestCaseTestNoLocksIssueInteractiveTx::Execute_(NUnitTest::TTestContext&) /-S/ydb/core/kqp/ut/tx/kqp_locks_tricky_ut.cpp:139:23 #15 0x1995f4e7 in operator() /-S/ydb/core/kqp/ut/tx/kqp_locks_tricky_ut.cpp:31:1 #16 0x1995f4e7 in __invoke<(lambda at /-S/ydb/core/kqp/ut/tx/kqp_locks_tricky_ut.cpp:31:1) &> /-S/contrib/libs/cxxsupp/libcxx/include/__type_traits/invoke.h:149:25 #17 0x1995f4e7 in __call<(lambda at /-S/ydb/core/kqp/ut/tx/kqp_locks_tricky_ut.cpp:31:1) &> /-S/contrib/libs/cxxsupp/libcxx/include/__type_traits/invoke.h:224:5 #18 0x1995f4e7 in operator() /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:169:12 #19 0x1995f4e7 in std::__y1::__function::__func, void ()>::operator()() /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:314:10 #20 0x1a4589d5 in operator() /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:431:12 #21 0x1a4589d5 in operator() /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:990:10 #22 0x1a4589d5 in TColoredProcessor::Run(std::__y1::function, TBasicString> const&, char const*, bool) /-S/library/cpp/testing/unittest/utmain.cpp:525:20 #23 0x1a428378 in NUnitTest::TTestBase::Run(std::__y1::function, TBasicString> const&, char const*, bool) /-S/library/cpp/testing/unittest/registar.cpp:373:18 #24 0x1995e393 in NKikimr::NKqp::NTestSuiteKqpLocksTricky::TCurrentTest::Execute() /-S/ydb/core/kqp/ut/tx/kqp_locks_tricky_ut.cpp:31:1 #25 0x1a429c45 in NUnitTest::TTestFactory::Execute() /-S/library/cpp/testing/unittest/registar.cpp:494:19 #26 0x1a452f4c in NUnitTest::RunMain(int, char**) /-S/library/cpp/testing/unittest/utmain.cpp:872:44 #27 0x7f801e9dad8f (/lib/x86_64-linux-gnu/libc.so.6+0x29d8f) (BuildId: cd410b710f0f094c6832edd95931006d883af48e) Indirect leak of 8 byte(s) in 1 object(s) allocated from: #0 0x19c928dd in operator new(unsigned long) /-S/contrib/libs/clang18-rt/lib/asan/asan_new_delete.cpp:86:3 #1 0x1fb5b8a1 in grpc_core::internal::StatusAllocHeapPtr(y_absl::lts_y_20240722::Status) /-S/contrib/libs/grpc/src/core/lib/gprpp/status_helper.cc:427:25 #2 0x1fca9f12 in grpc_core::CallCombiner::Cancel(y_absl::lts_y_20240722::Status) /-S/contrib/libs/grpc/src/core/lib/iomgr/call_combiner.cc:233:25 #3 0x1fc5253e in grpc_core::FilterStackCall::CancelWithError(y_absl::lts_y_20240722::Status) /-S/contrib/libs/grpc/src/core/lib/surface/call.cc:1037:18 #4 0x1fc4dcac in grpc_core::Call::CancelWithStatus(grpc_status_code, char const*) /-S/contrib/libs/grpc/src/core/lib/surface/call.cc:366:3 #5 0x1fc6f783 in grpc_call_cancel_with_status /-S/contrib/libs/grpc/src/core/lib/surface/call.cc:3499:30 #6 0x36d33116 in grpc::ServerContextBase::TryCancel() const /-S/contrib/libs/grpc/src/cpp/server/server_context.cc:347:7 #7 0x36cf3f2c in NYdbGrpc::TGrpcServiceProtectiable::StopService() /-S/ydb/library/grpc/server/grpc_server.cpp:64:26 #8 0x36cfa63e in NYdbGrpc::TGRpcServer::Stop() /-S/ydb/library/grpc/server/grpc_server.cpp:277:18 #9 0x3c74d127 in Shutdown /-S/ydb/core/testlib/test_client.h:408:33 #10 0x3c74d127 in ShutdownGRpc /-S/ydb/core/testlib/test_client.h:365:30 #11 0x3c74d127 in NKikimr::Tests::TServer::~TServer() /-S/ydb/core/testlib/test_client.cpp:1712:9 #12 0x3c74d8dd in NKikimr::Tests::TServer::~TServer() /-S/ydb/core/testlib/test_client.cpp:1711:25 #13 0x19957f49 in CheckedDelete /-S/util/generic/ptr.h:36:5 #14 0x19957f49 in Destroy /-S/util/generic/ptr.h:57:9 #15 0x19957f49 in DoDestroy /-S/util/generic/ptr.h:376:13 #16 0x19957f49 in Reset /-S/util/generic/ptr.h:319:13 #17 0x19957f49 in Destroy /-S/util/generic/ptr.h:310:9 #18 0x19957f49 in Reset /-S/util/generic/ptr.h:329:9 #19 0x19957f49 in NKikimr::NKqp::TKikimrRunner::~TKikimrRunner() /-S/ydb/core/kqp/ut/common/kqp_ut_common.h:171:16 #20 0x19988942 in NKikimr::NKqp::NTestSuiteKqpLocksTricky::TTestCaseTestNoLocksIssueInteractiveTx::Execute_(NUnitTest::TTestContext&) /-S/ydb/core/kqp/ut/tx/kqp_locks_tricky_ut.cpp:249:5 #21 0x1995f4e7 in operator() /-S/ydb/core/kqp/ut/tx/kqp_locks_tricky_ut.cpp:31:1 #22 0x1995f4e7 in __invoke<(lambda at /-S/ydb/core/kqp/ut/tx/kqp_locks_tricky_ut.cpp:31:1) &> /-S/contrib/libs/cxxsupp/libcxx/include/__type_traits/invoke.h:149:25 #23 0x1995f4e7 in __call<(lambda at /-S/ydb/core/kqp/ut/tx/kqp_locks_tricky_ut.cpp:31:1) &> /-S/contrib/libs/cxxsupp/libcxx/include/__type_traits/invoke.h:224:5 #24 0x1995f4e7 in operator() /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:169:12 #25 0x1995f4e7 in std::__y1::__function::__func, void ()>::operator()() /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:314:10 #26 0x1a4589d5 in operator() /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:431:12 #27 0x1a4589d5 in operator() /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:990:10 #28 0x1a4589d5 in TColoredProcessor::Run(std::__y1::function, TBasicString> const&, char const*, bool) /-S/library/cpp/testing/unittest/utmain.cpp:525:20 #29 0x1a428378 in NUnitTest::TTestBase::Run(std::__y1::function, TBasicString> const&, char const*, bool) /-S/library/cpp/testing/unittest/registar.cpp:373:18 #30 0x1995e393 in NKikimr::NKqp::NTestSuiteKqpLocksTricky::TCurrentTest::Execute() /-S/ydb/core/kqp/ut/tx/kqp_locks_tricky_ut.cpp:31:1 #31 0x1a429c45 in NUnitTest::TTestFactory::Execute() /-S/library/cpp/testing/unittest/registar.cpp:494:19 #32 0x1a452f4c in NUnitTest::RunMain(int, char**) /-S/library/cpp/testing/unittest/utmain.cpp:872:44 #33 0x7f801e9dad8f (/lib/x86_64-linux-gnu/libc.so.6+0x29d8f) (BuildId: cd410b710f0f094c6832edd95931006d883af48e) SUMMARY: AddressSanitizer: 413440 byte(s) leaked in 5076 allocation(s). ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/opt/unittest >> KqpNewEngine::SequentialReadsPragma-Enabled [GOOD] Test command err: Trying to start YDB, gRPC: 24549, MsgBus: 24596 2025-06-25T14:52:48.810937Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900049699764951:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:48.810991Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000a53/r3tmp/tmptvWkcU/pdisk_1.dat 2025-06-25T14:52:49.268576Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 24549, node 1 2025-06-25T14:52:49.275587Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:52:49.275713Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:52:49.277139Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:52:49.363885Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:52:49.363918Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:52:49.363928Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:52:49.364067Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24596 TClient is connected to server localhost:24596 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-25T14:52:49.827275Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:52:49.867097Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:52:49.890424Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:52:51.858567Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900062584667440:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:51.858656Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:51.858961Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900062584667452:2295], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:51.862602Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:52:51.876403Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519900062584667454:2296], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:52:51.936931Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519900062584667505:2332] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 5519, MsgBus: 18262 2025-06-25T14:52:53.120652Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519900068264578357:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:53.120723Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000a53/r3tmp/tmpvqOwst/pdisk_1.dat 2025-06-25T14:52:53.333245Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:52:53.334170Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519900068264578335:2080] 1750863173120301 != 1750863173120304 2025-06-25T14:52:53.347188Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:52:53.347253Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:52:53.351012Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 5519, node 2 2025-06-25T14:52:53.452904Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:52:53.452926Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:52:53.452934Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:52:53.453034Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:18262 TClient is connected to server localhost:18262 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:52:53.993596Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:52:54.016472Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:52:54.030736Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:54.180427Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:54.184654Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:52:54.384441Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:54.452188Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:56.283582Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519900081149481860:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:56.283671Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool defaul ... log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[6:7519900222525402268:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:53:34.276520Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:53:35.581053Z node 6 :TX_DATASHARD WARN: datashard__read_iterator.cpp:3439: 72075186224037892 Cancelled read: {[6:7519900248295208676:2486], 2} Trying to start YDB, gRPC: 13603, MsgBus: 26067 2025-06-25T14:53:36.882162Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519900254857720481:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:53:36.882227Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000a53/r3tmp/tmphCvoeA/pdisk_1.dat 2025-06-25T14:53:37.073417Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:53:37.074537Z node 7 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [7:7519900254857720452:2080] 1750863216881447 != 1750863216881450 2025-06-25T14:53:37.088159Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:53:37.088537Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:53:37.092782Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 13603, node 7 2025-06-25T14:53:37.260933Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:53:37.260962Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:53:37.260973Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:53:37.261125Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26067 TClient is connected to server localhost:26067 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-25T14:53:37.891754Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-25T14:53:37.926588Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:53:37.952621Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:38.025128Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:38.227995Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:38.309087Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:41.270690Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519900276332558559:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:41.270811Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:41.344900Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:41.394959Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:41.439113Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:41.485196Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:41.564642Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:41.618723Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:41.701866Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:41.787955Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519900276332559220:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:41.788060Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:41.788269Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519900276332559225:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:41.793666Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:53:41.812221Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7519900276332559227:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:53:41.875408Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7519900276332559278:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:53:41.884560Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[7:7519900254857720481:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:53:41.884636Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> KqpSqlIn::SimpleKey_Negated [GOOD] >> KqpSqlIn::TupleParameter >> KqpNewEngine::PrunePartitionsByLiteral [GOOD] >> KqpNewEngine::PrunePartitionsByExpr >> KqpSort::OffsetPk [GOOD] >> KqpSort::OffsetTopSort >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-ModifyUser-40 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-ModifyUser-41 >> KqpKv::ReadRows_NotFullPK [GOOD] >> KqpKv::ReadRows_SpecificReturnValue >> KqpReturning::ReturningWorksIndexedDeleteV2-QueryService [GOOD] >> KqpReturning::ReturningWorksIndexedInsert+QueryService |87.5%| [TA] $(B)/ydb/core/kqp/ut/tx/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpNewEngine::FullScanCount [GOOD] >> KqpNewEngine::DeleteOn+UseSink >> KqpReturning::ReplaceSerial [GOOD] >> KqpReturning::ReturningSerial >> KqpNamedExpressions::NamedExpressionRandomUpsertIndex+UseSink-UseDataQuery [GOOD] >> KqpNamedExpressions::NamedExpressionRandomUpsertIndex-UseSink+UseDataQuery >> KqpKv::ReadRows_UnknownTable >> KqpNewEngine::StreamLookupForDataQuery-StreamLookupJoin [GOOD] |87.5%| [TA] {RESULT} $(B)/ydb/core/kqp/ut/tx/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpReturning::ReturningWorksIndexedUpsert+QueryService [GOOD] >> KqpReturning::ReturningWorksIndexedUpsert-QueryService >> KqpNotNullColumns::UpdateNotNull [GOOD] >> KqpNotNullColumns::UpdateTable_DontChangeNotNull >> SchemeReqAdminAccessInTenant::ClusterAdminCanAdministerTenant-DomainLoginOnly [GOOD] >> SchemeReqAdminAccessInTenant::ClusterAdminCanAdministerTenant-DomainLoginOnly-StrictAclCheck >> KqpNewEngine::OnlineRO_Inconsistent [GOOD] >> KqpNewEngine::Nondeterministic >> KqpNotNullColumns::InsertNotNullPkPg+useSink [GOOD] >> KqpNotNullColumns::InsertNotNullPkPg-useSink >> KqpNotNullColumns::UpsertNotNullPk [GOOD] >> KqpNotNullColumns::UpsertNotNull ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/opt/unittest >> KqpNewEngine::FullScanCount [GOOD] Test command err: Trying to start YDB, gRPC: 22447, MsgBus: 4610 2025-06-25T14:52:57.248755Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900088589657353:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:57.248802Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000a0b/r3tmp/tmpKvined/pdisk_1.dat 2025-06-25T14:52:57.666142Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:52:57.668079Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519900088589657333:2080] 1750863177242043 != 1750863177242046 TServer::EnableGrpc on GrpcPort 22447, node 1 2025-06-25T14:52:57.725927Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:52:57.726043Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:52:57.729683Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:52:57.828848Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:52:57.828871Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:52:57.828883Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:52:57.828995Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4610 TClient is connected to server localhost:4610 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-25T14:52:58.283805Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:52:58.428472Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:52:58.451761Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:52:58.461269Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:58.595146Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:58.730964Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:58.808674Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:00.324347Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900101474560855:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:00.324494Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:00.625744Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:00.655223Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:00.684272Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:00.710604Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:00.788324Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:00.857688Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:00.926875Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:00.975511Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900101474561524:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:00.975564Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:00.975623Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900101474561529:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:00.978730Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:53:00.989438Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519900101474561531:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:53:01.089450Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519900105769528878:3421] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:53:02.249134Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519900088589657353:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:53:02.249219Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 25620, MsgBus: 61962 2025-06-25T14:53:03.345232Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519900111219159833:2151];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:53:03.360804Z node 2 :METADATA_PROVIDER ERROR: log.cpp:7 ... :7307199536658146131:7762515]; 2025-06-25T14:53:38.774160Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000a0b/r3tmp/tmpmiiGl5/pdisk_1.dat 2025-06-25T14:53:39.008087Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:53:39.010064Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:53:39.024299Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:53:39.028773Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 64067, node 7 2025-06-25T14:53:39.135070Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:53:39.135094Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:53:39.135104Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:53:39.135233Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:22123 TClient is connected to server localhost:22123 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-25T14:53:39.824534Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:53:39.834702Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:53:39.855881Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:39.981908Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:40.208669Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:40.335695Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:43.017648Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519900278524778757:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:43.017766Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:43.091127Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:43.165605Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:43.201638Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:43.250208Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:43.291487Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:43.369797Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:43.424672Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:43.573818Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519900282819746716:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:43.573944Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:43.574316Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519900282819746721:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:43.578892Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:53:43.601231Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7519900282819746723:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:53:43.678074Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7519900282819746774:3419] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:53:43.776405Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[7:7519900261344907963:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:53:43.776481Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout;
: Warning: Type annotation, code: 1030
:3:17: Warning: At function: RemovePrefixMembers, At function: PersistableRepr, At function: SqlProject
:3:33: Warning: At function: Filter, At lambda, At function: Coalesce
:3:58: Warning: At function: SqlIn
:3:58: Warning: IN may produce unexpected result when used with nullable arguments. Consider adding 'PRAGMA AnsiInForEmptyOrNullableItemsCollections;', code: 1108
: Warning: Type annotation, code: 1030
:3:17: Warning: At function: RemovePrefixMembers, At function: PersistableRepr, At function: SqlProject
:3:33: Warning: At function: Filter, At lambda, At function: Coalesce
:3:58: Warning: At function: SqlIn
:3:58: Warning: IN may produce unexpected result when used with nullable arguments. Consider adding 'PRAGMA AnsiInForEmptyOrNullableItemsCollections;', code: 1108 >> TPersQueueTest::ReadRuleServiceTypeLimit [GOOD] >> TPersQueueTest::ReadRuleDisallowDefaultServiceType ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/opt/unittest >> KqpNewEngine::StreamLookupForDataQuery-StreamLookupJoin [GOOD] Test command err: Trying to start YDB, gRPC: 32265, MsgBus: 21791 2025-06-25T14:52:43.925387Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900027314805197:2223];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:43.929637Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000a88/r3tmp/tmpKSc1gG/pdisk_1.dat 2025-06-25T14:52:44.310126Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519900027314805011:2080] 1750863163905722 != 1750863163905725 2025-06-25T14:52:44.318152Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 32265, node 1 2025-06-25T14:52:44.385888Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:52:44.385987Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:52:44.387971Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:52:44.507573Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:52:44.507599Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:52:44.507637Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:52:44.507799Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:52:44.883557Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:21791 TClient is connected to server localhost:21791 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:52:45.377672Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:52:46.920733Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900040199707540:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:46.920837Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:47.601935Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:47.729379Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:47.774732Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710759:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:383) 2025-06-25T14:52:47.819232Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710762:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:47.850600Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710763:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:383) 2025-06-25T14:52:47.876695Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:47.952258Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710766:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:47.989109Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710767:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:383) 2025-06-25T14:52:48.015138Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710770:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:48.049287Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710771:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:383) 2025-06-25T14:52:48.078158Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710774:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:48.123057Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710775:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:383) 2025-06-25T14:52:48.143820Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:48.184034Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710778:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:48.253235Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710779:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:383) 2025-06-25T14:52:48.288053Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710782:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:48.317157Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710783:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:383) 2025-06-25T14:52:48.358109Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900048789643472:2373], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resou ... /tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:53:35.306583Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [6:7519900248992359384:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:53:35.413556Z node 6 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [6:7519900248992359435:3421] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:53:35.620430Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[6:7519900227517520628:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:53:35.690407Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 29282, MsgBus: 9162 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000a88/r3tmp/tmp3KhBVr/pdisk_1.dat 2025-06-25T14:53:39.364450Z node 7 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [7:7519900266973849698:2080] 1750863219166917 != 1750863219166920 2025-06-25T14:53:39.364764Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:53:39.379489Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:53:39.382282Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:53:39.382380Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:53:39.384808Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 29282, node 7 2025-06-25T14:53:39.448929Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:53:39.448953Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:53:39.448964Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:53:39.449126Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9162 TClient is connected to server localhost:9162 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-25T14:53:40.148461Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:53:40.217751Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:53:40.232629Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:40.365553Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:40.568168Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:40.661303Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:43.721748Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519900284153720508:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:43.721841Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:43.803328Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:43.851230Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:43.893707Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:43.934590Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:44.020611Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:44.075260Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:44.149861Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:44.303883Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519900288448688469:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:44.304000Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:44.304266Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519900288448688474:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:44.308670Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:53:44.324925Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7519900288448688476:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:53:44.394550Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7519900288448688528:3426] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } >> KqpSqlIn::SecondaryIndex_SimpleKey_In_And [GOOD] >> KqpSqlIn::SecondaryIndex_SimpleKey_In_And_In >> KqpNewEngine::LocksEffects [GOOD] >> KqpNewEngine::LeftSemiJoin >> KqpNamedExpressions::NamedExpressionChanged-UseSink [GOOD] >> KqpNamedExpressions::NamedExpressionRandomChanged+UseSink >> KqpNewEngine::DeleteByKey [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-17 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-18 >> KqpKv::ReadRows_SpecificReturnValue [GOOD] >> KqpKv::ReadRows_TimeoutCancelsReads >> KqpMergeCn::TopSortBy_Decimal_Limit5 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-DropUser-65 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-DropUser-66 >> KqpNewEngine::BatchUpload >> KqpRanges::NullInKeySuffix [GOOD] >> KqpRanges::NullInPredicate >> KqpNotNullColumns::UpsertNotNullPkPg >> KqpRanges::UpdateWhereInFullScan-UseSink [GOOD] >> KqpRanges::ScanKeyPrefix ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/opt/unittest >> KqpNewEngine::DeleteByKey [GOOD] Test command err: Trying to start YDB, gRPC: 27909, MsgBus: 9958 2025-06-25T14:53:03.120378Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900111906212891:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:53:03.120454Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000a06/r3tmp/tmpvPryhl/pdisk_1.dat 2025-06-25T14:53:03.590485Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 27909, node 1 2025-06-25T14:53:03.620916Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:53:03.621776Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:53:03.639173Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:53:03.672833Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:53:03.672860Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:53:03.672865Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:53:03.672968Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9958 2025-06-25T14:53:04.140497Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:9958 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:53:04.376551Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:53:04.412740Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:53:06.245465Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900124791115390:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:06.245564Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:06.532046Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:06.713546Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900124791115492:2301], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:06.713644Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:06.714197Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900124791115497:2304], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:06.718700Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:53:06.736515Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519900124791115499:2305], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-25T14:53:06.815672Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519900124791115550:2394] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:53:08.120021Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519900111906212891:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:53:08.120088Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 20163, MsgBus: 21001 2025-06-25T14:53:08.654722Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519900132457093400:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:53:08.654775Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000a06/r3tmp/tmpAW045N/pdisk_1.dat 2025-06-25T14:53:08.805010Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:53:08.806773Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519900132457093379:2080] 1750863188653814 != 1750863188653817 TServer::EnableGrpc on GrpcPort 20163, node 2 2025-06-25T14:53:08.820997Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:53:08.821073Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:53:08.822660Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:53:08.868717Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:53:08.868736Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:53:08.868742Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:53:08.868827Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:21001 TClient is connected to server localhost:21001 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-25T14:53:09.284582Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:53:09.294327Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:09.376372Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:09.512581Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/a ... ion.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) Trying to start YDB, gRPC: 1046, MsgBus: 29443 2025-06-25T14:53:41.394212Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519900274379109726:2172];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:53:41.437912Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000a06/r3tmp/tmpt8ERsz/pdisk_1.dat 2025-06-25T14:53:41.566480Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:53:41.571223Z node 7 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [7:7519900274379109589:2080] 1750863221372510 != 1750863221372513 2025-06-25T14:53:41.583166Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:53:41.583266Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:53:41.585307Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 1046, node 7 2025-06-25T14:53:41.663945Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:53:41.663976Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:53:41.663987Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:53:41.664138Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:29443 TClient is connected to server localhost:29443 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:53:42.362021Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:53:42.372813Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:53:42.396748Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:53:42.399374Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:53:42.453834Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:42.634586Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:42.723725Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:45.853773Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519900291558980394:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:45.853886Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:45.932748Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:45.986819Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:46.062665Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:46.103904Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:46.161829Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:46.249838Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:46.292330Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:46.373776Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519900295853948353:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:46.373881Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:46.374339Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519900295853948358:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:46.379763Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:53:46.389440Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[7:7519900274379109726:2172];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:53:46.389630Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:53:46.400340Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7519900295853948360:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:53:46.483822Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7519900295853948414:3418] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } >> KqpRanges::IsNotNullSecondComponent [GOOD] >> KqpRanges::IsNotNullInValue >> KqpSqlIn::Dict [GOOD] >> KqpSqlIn::Delete >> KqpKv::ReadRows_UnknownTable [GOOD] >> KqpMergeCn::TopSortByDesc_Double_Limit3 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/opt/unittest >> KqpMergeCn::TopSortBy_Decimal_Limit5 [GOOD] Test command err: Trying to start YDB, gRPC: 62184, MsgBus: 28042 2025-06-25T14:52:51.652424Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900063031344300:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:51.691141Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000a12/r3tmp/tmpAfmtln/pdisk_1.dat 2025-06-25T14:52:52.274596Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:52:52.274693Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:52:52.277620Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:52:52.317641Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519900063031344270:2080] 1750863171638689 != 1750863171638692 2025-06-25T14:52:52.324651Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 62184, node 1 2025-06-25T14:52:52.464618Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:52:52.464639Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:52:52.464660Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:52:52.464771Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28042 2025-06-25T14:52:52.698671Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:28042 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:52:53.007409Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:52:53.041658Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:52:53.049888Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:53.211669Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:53.403865Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:53.483474Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:54.984611Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900075916247798:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:54.984726Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:55.326845Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:55.362438Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:55.394051Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:55.426573Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:55.458571Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:55.533603Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:55.603612Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:55.701702Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900080211215764:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:55.701752Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:55.701923Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900080211215770:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:55.704901Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:52:55.714611Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519900080211215772:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:52:55.777671Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519900080211215823:3423] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:52:56.664601Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519900063031344300:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:56.677884Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:52:56.779201Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/ ... anager: discarding snapshot; our snapshot: [step: 1750863220491, txId: 281474976710674] shutting down Trying to start YDB, gRPC: 26558, MsgBus: 23648 2025-06-25T14:53:41.642842Z node 8 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[8:7519900275637223790:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:53:41.642919Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000a12/r3tmp/tmpQha4mc/pdisk_1.dat 2025-06-25T14:53:41.773540Z node 8 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:53:41.791924Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:53:41.792026Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:53:41.793220Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 26558, node 8 2025-06-25T14:53:41.877145Z node 8 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:53:41.877168Z node 8 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:53:41.877180Z node 8 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:53:41.877315Z node 8 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23648 TClient is connected to server localhost:23648 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:53:42.557648Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:53:42.574622Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:42.668273Z node 8 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:53:42.696476Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:42.968478Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:43.067808Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:45.997444Z node 8 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [8:7519900292817094583:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:45.997532Z node 8 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:46.074635Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:46.122265Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:46.199866Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:46.253195Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:46.328001Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:46.414839Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:46.516802Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:46.598278Z node 8 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [8:7519900297112062543:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:46.598411Z node 8 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:46.598747Z node 8 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [8:7519900297112062548:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:46.604947Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:53:46.619449Z node 8 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [8:7519900297112062550:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:53:46.643180Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[8:7519900275637223790:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:53:46.643293Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:53:46.687661Z node 8 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [8:7519900297112062601:3425] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:53:48.306518Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:49.738695Z node 8 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863229759, txId: 281474976715674] shutting down >> KqpNotNullColumns::UpsertNotNull [GOOD] >> KqpNotNullColumns::UpsertNotNullPg >> TPersQueueTest::PreferredCluster_EnabledRemotePreferredClusterAndRemoteClusterEnabledDelaySec_SessionDiesOnlyAfterDelay [GOOD] >> TPersQueueTest::PreferredCluster_RemotePreferredClusterEnabledWhileSessionInitializing_SessionDiesOnlyAfterInitializationAndDelay >> KqpNewEngine::PrunePartitionsByExpr [GOOD] >> KqpNewEngine::PruneWritePartitions+UseSink >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-ModifyUser-41 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-ModifyUser-42 >> KqpNewEngine::InShardsWrite >> KqpNotNullColumns::UpdateTable_DontChangeNotNull [GOOD] >> KqpNotNullColumns::UpdateNotNullPg >> KqpNewEngine::MultiSelect >> KqpNewEngine::DeleteOn+UseSink [GOOD] >> KqpNewEngine::DeleteOn-UseSink >> KqpNotNullColumns::InsertNotNullPkPg-useSink [GOOD] >> KqpNotNullColumns::InsertNotNullPg+useSink >> TPersQueueTest::CheckDeleteTopic [GOOD] >> TPersQueueTest::CheckDecompressionTasksWithoutSession >> KqpNewEngine::Nondeterministic [GOOD] >> KqpNewEngine::OrderedScalarContext >> KqpSqlIn::TupleParameter [GOOD] >> KqpSqlIn::TupleLiteral >> KqpNotNullColumns::UpsertNotNullPkPg [GOOD] >> KqpRanges::DateKeyPredicate >> KqpReturning::ReturningSerial [GOOD] >> KqpReturning::ReturningWorks+QueryService >> TPersQueueTest::WhenTheTopicIsDeletedBeforeDataIsDecompressed_Uncompressed [GOOD] >> TPersQueueTest::WhenTheTopicIsDeletedAfterReadingTheData_Uncompressed >> KqpSort::OffsetTopSort [GOOD] >> KqpReturning::ReturningWorksIndexedUpsert-QueryService [GOOD] >> KqpReturning::ReturningWorksIndexedReplace+QueryService >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-DropUser-66 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-DropUser-67 >> TPersQueueCommonTest::TestLimiterLimitsWithUserPayloadRateLimit [GOOD] >> TPersQueueTest::AllEqual [GOOD] >> TPersQueueTest::BadSids >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-18 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-19 >> TPQCompatTest::SetupLockSession [GOOD] >> TPQCompatTest::BadTopics >> KqpNamedExpressions::NamedExpressionRandomChanged+UseSink [GOOD] >> KqpNamedExpressions::NamedExpressionRandomChanged-UseSink >> KqpNotNullColumns::UpsertNotNullPg [GOOD] >> KqpNotNullColumns::UpdateTable_DontChangeNotNullWithIndex >> KqpNewEngine::BatchUpload [GOOD] >> KqpNewEngine::Aggregate >> KqpNewEngine::LeftSemiJoin [GOOD] >> KqpNewEngine::LocksInRoTx >> KqpReturning::ReturningWorksIndexedInsert+QueryService [GOOD] >> KqpReturning::ReturningWorksIndexedInsert-QueryService ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/opt/unittest >> KqpSort::OffsetTopSort [GOOD] Test command err: Trying to start YDB, gRPC: 31640, MsgBus: 30240 2025-06-25T14:53:06.799841Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900124683290885:2153];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:53:06.807097Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0009fc/r3tmp/tmpIo1Hq6/pdisk_1.dat 2025-06-25T14:53:07.159817Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:53:07.161108Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519900124683290758:2080] 1750863186764424 != 1750863186764427 TServer::EnableGrpc on GrpcPort 31640, node 1 2025-06-25T14:53:07.213061Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:53:07.213208Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:53:07.215327Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:53:07.221283Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:53:07.221306Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:53:07.221314Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:53:07.221427Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:30240 TClient is connected to server localhost:30240 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:53:07.787846Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:53:07.802767Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:53:07.820462Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:07.966758Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:08.156488Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:08.243919Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:09.782291Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900137568194273:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:09.782410Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:10.154541Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:10.226652Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:10.277822Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:10.309126Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:10.338316Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:10.372268Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:10.442944Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:10.501682Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900141863162238:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:10.501732Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:10.501777Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900141863162243:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:10.504974Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:53:10.513918Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519900141863162245:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:53:10.588012Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519900141863162296:3417] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:53:11.800400Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519900124683290885:2153];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:53:11.800462Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 17864, MsgBus: 28043 2025-06-25T14:53:12.568252Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519900149453596159:2057];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:53:12.570469Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPa ... sts.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[6:7519900262055007701:2133];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:53:43.870227Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 7468, MsgBus: 23030 2025-06-25T14:53:46.686147Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519900295589151260:2200];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0009fc/r3tmp/tmpamZavz/pdisk_1.dat 2025-06-25T14:53:46.812031Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:53:46.899975Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:53:46.900081Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:53:46.906442Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:53:46.907368Z node 7 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [7:7519900295589151073:2080] 1750863226661875 != 1750863226661878 2025-06-25T14:53:46.922711Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 7468, node 7 2025-06-25T14:53:46.981611Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:53:46.981640Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:53:46.981654Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:53:46.981794Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23030 TClient is connected to server localhost:23030 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-25T14:53:47.681126Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:53:47.698610Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:53:47.706148Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:53:47.718579Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:47.819271Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:48.037664Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:48.139545Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:51.138597Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519900317063989182:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:51.138714Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:51.225860Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:51.291365Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:51.365543Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:51.421523Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:51.476728Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:51.558688Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:51.643518Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:51.682770Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[7:7519900295589151260:2200];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:53:51.682835Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:53:51.727801Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519900317063989844:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:51.727906Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:51.728225Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519900317063989849:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:51.732954Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:53:51.752771Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7519900317063989851:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:53:51.817900Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7519900317063989903:3423] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } >> SchemeReqAdminAccessInTenant::ClusterAdminCanAdministerTenant-DomainLoginOnly-StrictAclCheck [GOOD] >> SchemeReqAdminAccessInTenant::ClusterAdminCanAuthOnEmptyTenant >> KqpMergeCn::TopSortByDesc_Double_Limit3 [GOOD] >> KqpMergeCn::TopSortBy_Date_Limit4 >> KqpRanges::NullInPredicate [GOOD] >> KqpRanges::NullInPredicateRow >> TTopicYqlTest::DropTopicYql [GOOD] >> TTopicYqlTest::CreateTopicYqlBackCompatibility >> KqpNotNullColumns::InsertNotNullPg+useSink [GOOD] >> KqpNotNullColumns::InsertNotNullPg-useSink >> KqpNotNullColumns::UpdateNotNullPg [GOOD] >> KqpNotNullColumns::UpdateOnNotNull >> KqpRanges::IsNotNullInValue [GOOD] >> KqpRanges::IsNotNullInJsonValue >> KqpRanges::ScanKeyPrefix [GOOD] >> KqpNewEngine::ReadAfterWrite >> KqpNewEngine::InShardsWrite [GOOD] >> KqpNewEngine::Join >> KqpNewEngine::DeleteOn-UseSink [GOOD] >> KqpNewEngine::DeleteWithBuiltin+UseSink >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-ModifyUser-42 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-ModifyUser-43 >> KqpNewEngine::PruneWritePartitions+UseSink [GOOD] >> KqpNewEngine::PruneWritePartitions-UseSink >> KqpSqlIn::SecondaryIndex_SimpleKey_In_And_In [GOOD] >> KqpSqlIn::SecondaryIndex_TupleParameter >> KqpNewEngine::MultiSelect [GOOD] >> KqpNewEngine::MultiOutput ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/opt/unittest >> KqpRanges::ScanKeyPrefix [GOOD] Test command err: Trying to start YDB, gRPC: 26095, MsgBus: 13511 2025-06-25T14:53:04.067223Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900116952790310:2230];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:53:04.067373Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000a01/r3tmp/tmpxp6ZFs/pdisk_1.dat 2025-06-25T14:53:04.474362Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:53:04.475150Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519900112657822806:2080] 1750863183992623 != 1750863183992626 TServer::EnableGrpc on GrpcPort 26095, node 1 2025-06-25T14:53:04.562025Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:53:04.562098Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:53:04.569295Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:53:04.594605Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:53:04.594627Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:53:04.594637Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:53:04.594741Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13511 TClient is connected to server localhost:13511 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-25T14:53:05.052478Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:53:05.163016Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:53:05.177272Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:53:05.195854Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:05.389638Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:05.559797Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:05.655925Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:07.114037Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900129837693621:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:07.114133Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:07.396778Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:07.426062Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:07.458784Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:07.490690Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:07.517345Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:07.566620Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:07.638831Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:07.731202Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900129837694289:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:07.731268Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:07.731455Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900129837694294:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:07.735323Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:53:07.750278Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519900129837694296:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:53:07.851567Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519900129837694347:3419] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:53:09.054180Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519900116952790310:2230];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:53:09.054245Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 9524, MsgBus: 25551 2025-06-25T14:53:10.276699Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519900143112684585:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:53:10.276743Z node 2 :METADATA_PROVIDER ERROR: log.cpp ... hemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664)
: Warning: Type annotation, code: 1030
:1:44: Warning: At lambda, At function: Coalesce
:1:58: Warning: At function: SqlIn
:1:58: Warning: IN may produce unexpected result when used with nullable arguments. Consider adding 'PRAGMA AnsiInForEmptyOrNullableItemsCollections;', code: 1108 Trying to start YDB, gRPC: 11323, MsgBus: 27835 2025-06-25T14:53:51.985857Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519900317610221670:2134];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:53:51.986077Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000a01/r3tmp/tmpjzuH1Z/pdisk_1.dat 2025-06-25T14:53:52.265508Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:53:52.265603Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:53:52.267493Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:53:52.268901Z node 7 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [7:7519900317610221574:2080] 1750863231961632 != 1750863231961635 2025-06-25T14:53:52.286229Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 11323, node 7 2025-06-25T14:53:52.350679Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:53:52.350705Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:53:52.350715Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:53:52.350852Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27835 2025-06-25T14:53:53.001715Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:27835 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:53:53.128296Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:53:53.139564Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:53.225198Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:53.428350Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:53.565143Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:56.218133Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519900339085059681:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:56.218236Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:56.278192Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:56.319189Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:56.403440Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:56.450515Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:56.500195Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:56.551647Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:56.604379Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:56.739169Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519900339085060336:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:56.739267Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:56.739485Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519900339085060341:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:56.744103Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:53:56.756756Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7519900339085060343:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:53:56.824521Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7519900339085060394:3420] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:53:56.988627Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[7:7519900317610221670:2134];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:53:56.988772Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> KqpRanges::DateKeyPredicate [GOOD] >> KqpRanges::DuplicateKeyPredicateLiteral >> KqpAgg::AggWithLookup >> KqpNewEngine::OrderedScalarContext [GOOD] >> KqpNewEngine::PagingNoPredicateExtract >> TPersQueueTest::StoreNoMoreThanXSourceIDs [GOOD] >> TPersQueueTest::SetupWriteSessionOnDisabledCluster >> KqpSqlIn::Delete [GOOD] >> KqpSqlIn::InWithCast >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-DropUser-67 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-DropUser-68 >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-19 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-20 >> KqpNewEngine::Aggregate [GOOD] >> KqpNewEngine::AggregateTuple >> KqpNotNullColumns::UpdateTable_DontChangeNotNullWithIndex [GOOD] >> KqpNotNullColumns::UpdateTable_UniqIndex >> KqpNotNullColumns::UpdateOnNotNull [GOOD] >> KqpNotNullColumns::UpdateOnNotNullPg >> KqpNotNullColumns::InsertNotNullPg-useSink [GOOD] >> KqpNotNullColumns::JoinBothTablesWithNotNullPk+StreamLookup >> TPersQueueTest::ReadRuleDisallowDefaultServiceType [GOOD] >> TPersQueueTest::ReadRuleServiceTypeMigration >> KqpNewEngine::LocksInRoTx [GOOD] >> KqpNewEngine::LiteralKeys >> KqpNamedExpressions::NamedExpressionRandomChanged-UseSink [GOOD] >> KqpNamedExpressions::NamedExpressionRandomChanged2+UseSink >> KqpMergeCn::TopSortBy_Date_Limit4 [GOOD] >> KqpMergeCn::TopSortByDesc_Datetime_Limit3 >> KqpReturning::ReturningWorks+QueryService [GOOD] >> KqpReturning::ReturningWorks-QueryService >> KqpSqlIn::TupleLiteral [GOOD] >> KqpSqlIn::TupleSelect >> KqpNewEngine::ReadAfterWrite [GOOD] >> KqpNewEngine::Replace >> KqpNewEngine::DeleteWithBuiltin+UseSink [GOOD] >> KqpNewEngine::DeleteWithBuiltin-UseSink >> KqpNewEngine::Join [GOOD] >> KqpNewEngine::JoinIdxLookup >> KqpRanges::NullInPredicateRow [GOOD] >> KqpRanges::NoFullScanAtScanQuery >> KqpNewEngine::MultiOutput [GOOD] >> KqpNewEngine::MultiStatement >> KqpReturning::ReturningWorksIndexedReplace+QueryService [GOOD] >> KqpReturning::ReturningWorksIndexedReplace-QueryService >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-ModifyUser-43 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-ModifyUser-44 >> KqpRanges::IsNotNullInJsonValue [GOOD] >> KqpRanges::IsNotNullInJsonValue2 >> SchemeReqAdminAccessInTenant::ClusterAdminCanAuthOnEmptyTenant [GOOD] >> SchemeReqAdminAccessInTenant::ClusterAdminCanAuthOnEmptyTenant-StrictAclCheck >> KqpNewEngine::PruneWritePartitions-UseSink [GOOD] >> KqpNewEngine::PruneEffectPartitions-UseSink >> KqpRanges::DuplicateKeyPredicateLiteral [GOOD] >> KqpRanges::DuplicateCompositeKeyPredicate >> KqpReturning::ReturningWorksIndexedInsert-QueryService [GOOD] >> KqpReturning::ReturningWorksIndexedOperationsWithDefault+QueryService >> KqpNotNullColumns::UpdateOnNotNullPg [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-DropUser-68 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-DropUser-69 >> KqpNewEngine::AggregateTuple [GOOD] >> KqpNewEngine::AsyncIndexUpdate >> KqpAgg::AggWithLookup [GOOD] >> KqpAgg::AggWithSelfLookup >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-20 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-21 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/opt/unittest >> KqpNotNullColumns::UpdateOnNotNullPg [GOOD] Test command err: Trying to start YDB, gRPC: 24761, MsgBus: 8467 2025-06-25T14:53:34.333357Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900247154793395:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:53:34.333459Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0009e9/r3tmp/tmp7O1OV9/pdisk_1.dat 2025-06-25T14:53:34.743906Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 24761, node 1 2025-06-25T14:53:34.800416Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:53:34.800554Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:53:34.818247Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:53:34.837992Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:53:34.838035Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:53:34.838044Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:53:34.838162Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8467 TClient is connected to server localhost:8467 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-25T14:53:35.376550Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:53:35.528333Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:53:35.542656Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:53:37.251757Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900260039695892:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:37.251854Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:37.595746Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:37.735609Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900260039695997:2301], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:37.735687Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:37.735975Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900260039696002:2304], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:37.740029Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:53:37.766833Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519900260039696004:2305], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-25T14:53:37.842357Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519900260039696055:2395] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:53:38.168279Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519900264334663416:2322], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:1:61: Error: At function: KiUpdateTable!
:1:61: Error: Cannot update primary key column: Key 2025-06-25T14:53:38.170142Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=NWE2OWViZTYtNzQwZjJiMjgtZjhhZTg1NWUtZWJhNzAxYzM=, ActorId: [1:7519900260039695874:2289], ActorState: ExecuteState, TraceId: 01jyks91eh83fswb0zxh099ezn, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-06-25T14:53:38.192096Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519900264334663425:2326], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:1:63: Error: At function: KiUpdateTable!
:1:63: Error: Cannot update primary key column: Key 2025-06-25T14:53:38.193285Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=NWE2OWViZTYtNzQwZjJiMjgtZjhhZTg1NWUtZWJhNzAxYzM=, ActorId: [1:7519900260039695874:2289], ActorState: ExecuteState, TraceId: 01jyks91g10e03yp50menvp2qd, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: Trying to start YDB, gRPC: 21229, MsgBus: 8716 2025-06-25T14:53:39.011121Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519900267444318017:2229];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:53:39.011950Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0009e9/r3tmp/tmpy9FkST/pdisk_1.dat 2025-06-25T14:53:39.223558Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:53:39.223632Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:53:39.225745Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:53:39.227277Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:53:39.232410Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519900263149350520:2080] 1750863218961714 != 1750863218961717 TServer::EnableGrpc on GrpcPort 21229, node 2 2025-06-25T14:53:39.340907Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:53:39.340934Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:53:39.340942Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:53:39.341058Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8716 TClient is connected to server localhost:8716 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-25T14:53:39.753657Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:53:40.008591Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:53:42.092452Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp: ... 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:54:00.238924Z node 6 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:54:03.044267Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7519900371367350528:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:03.044374Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:03.053106Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:03.125867Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7519900371367350628:2301], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:03.125986Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:03.126225Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7519900371367350633:2304], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:03.131161Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:54:03.142137Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [6:7519900371367350635:2305], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-25T14:54:03.235884Z node 6 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [6:7519900371367350686:2391] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:54:03.451521Z node 6 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [6:7519900371367350748:2321], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:1:63: Error: At function: KiWriteTable!
:1:45: Error: Failed to convert type: Struct<'Key':Int32,'Value':Null> to Struct<'Key':Uint64?,'Value':String>
:1:45: Error: Failed to convert 'Value': Null to String
:1:45: Error: Failed to convert input columns types to scheme types, code: 2031 2025-06-25T14:54:03.453885Z node 6 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=6&id=YjViYjQ4NzAtOGUxNGIzZjAtZGVmYTg3NmEtZmE4MjU3Mjg=, ActorId: [6:7519900371367350525:2290], ActorState: ExecuteState, TraceId: 01jyks9t5be999jadh54t7cyw1, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: Trying to start YDB, gRPC: 19226, MsgBus: 9470 2025-06-25T14:54:04.418792Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519900375508284540:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:54:04.418872Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0009e9/r3tmp/tmpFhcc1E/pdisk_1.dat 2025-06-25T14:54:04.658060Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:54:04.678525Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:54:04.678617Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:54:04.681445Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 19226, node 7 2025-06-25T14:54:04.756004Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:54:04.756027Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:54:04.756037Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:54:04.756162Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9470 TClient is connected to server localhost:9470 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:54:05.337768Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:54:05.345075Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:54:05.426999Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:54:08.245644Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519900392688154331:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:08.245747Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:08.269101Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:08.344519Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519900392688154431:2301], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:08.344625Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:08.345300Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519900392688154436:2304], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:08.350045Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:54:08.361240Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7519900392688154438:2305], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-25T14:54:08.433795Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7519900392688154489:2391] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:54:08.815692Z node 7 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:678: SelfId: [7:7519900392688154562:2290], TxId: 281474976715663, task: 1. Ctx: { SessionId : ydb://session/3?node_id=7&id=ODQ3N2YxMzQtOTU3ZTNhMjctMzliYjIyZTctZWMyNzVjMmE=. CustomerSuppliedId : . TraceId : 01jyks9z9jf294sv2fmy90nx2e. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. InternalError: BAD_REQUEST KIKIMR_BAD_COLUMN_TYPE: {
: Error: Tried to insert NULL value into NOT NULL column: Value, code: 2031 }. 2025-06-25T14:54:08.816200Z node 7 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=7&id=ODQ3N2YxMzQtOTU3ZTNhMjctMzliYjIyZTctZWMyNzVjMmE=, ActorId: [7:7519900392688154320:2290], ActorState: ExecuteState, TraceId: 01jyks9z9jf294sv2fmy90nx2e, Create QueryResponse for error on request, msg: >> TPersQueueTest::DirectReadRestartPQRB [GOOD] >> TPersQueueTest::DirectReadRestartTablet >> KqpNewEngine::PagingNoPredicateExtract [GOOD] >> KqpSqlIn::SecondaryIndex_TupleParameter [GOOD] >> KqpSqlIn::SecondaryIndex_TupleLiteral >> KqpNewEngine::Replace [GOOD] >> KqpNewEngine::ReadRangeWithParams >> KqpSqlIn::InWithCast [GOOD] >> KqpNotNullColumns::JoinBothTablesWithNotNullPk+StreamLookup [GOOD] >> KqpNotNullColumns::JoinBothTablesWithNotNullPk-StreamLookup >> TPersQueueTest::PreferredCluster_RemotePreferredClusterEnabledWhileSessionInitializing_SessionDiesOnlyAfterInitializationAndDelay [GOOD] >> TPersQueueTest::PartitionsMapping >> TAsyncIndexTests::SplitIndexWithReboots[TabletReboots] [GOOD] >> TPersQueueTest::CheckDecompressionTasksWithoutSession [GOOD] >> TPersQueueTest::Codecs_InitWriteSession_DefaultTopicSupportedCodecsInInitResponse >> KqpNewEngine::MultiStatement [GOOD] >> KqpNewEngine::MultiStatementMixPure >> KqpNewEngine::JoinIdxLookup [GOOD] >> KqpNewEngine::JoinIdxLookupWithPredicate >> KqpNewEngine::DeleteWithBuiltin-UseSink [GOOD] >> KqpNewEngine::DeleteON >> KqpNewEngine::SimpleUpsertSelect >> KqpNotNullColumns::UpdateTable_UniqIndex [GOOD] >> KqpNotNullColumns::UpdateTable_UniqIndexPg ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/opt/unittest >> KqpNewEngine::PagingNoPredicateExtract [GOOD] Test command err: Trying to start YDB, gRPC: 29734, MsgBus: 8443 2025-06-25T14:53:24.294865Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900202520839077:2236];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:53:24.294914Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0009f9/r3tmp/tmpQ5TdD3/pdisk_1.dat 2025-06-25T14:53:24.753496Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:53:24.753607Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:53:24.758985Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:53:24.768220Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 29734, node 1 2025-06-25T14:53:24.875123Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:53:24.875207Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:53:24.875219Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:53:24.875357Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8443 TClient is connected to server localhost:8443 2025-06-25T14:53:25.280911Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:53:25.433769Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:53:25.445214Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:53:25.458044Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:25.616220Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:25.776167Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:25.862998Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:27.330129Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900215405742369:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:27.330232Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:27.663924Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:27.696251Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:27.734005Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:27.800590Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:27.870503Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:27.937367Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:27.970029Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:28.059219Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900219700710340:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:28.059291Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:28.059575Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900219700710345:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:28.063432Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:53:28.072358Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519900219700710347:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:53:28.171082Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519900219700710398:3420] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:53:29.296058Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519900202520839077:2236];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:53:29.296144Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 18787, MsgBus: 4229 2025-06-25T14:53:30.110944Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519900230394728793:2156];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0009f9/r3tmp/tmpTbio5a/pdisk_1.dat 2025-06-25T14:53:30.162386Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initi ... 19900358562463945:3427] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:54:00.656397Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[6:7519900337087625293:2229];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:54:00.672573Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 18263, MsgBus: 17146 2025-06-25T14:54:03.089133Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519900369121968412:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:54:03.089604Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0009f9/r3tmp/tmpAEjjFW/pdisk_1.dat 2025-06-25T14:54:03.254671Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:54:03.271323Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:54:03.271592Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:54:03.273240Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 18263, node 7 2025-06-25T14:54:03.345835Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:54:03.345864Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:54:03.345877Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:54:03.346055Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17146 TClient is connected to server localhost:17146 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-25T14:54:04.089256Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:54:04.094489Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:54:04.105755Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:54:04.225955Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:54:04.435306Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:54:04.525513Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:54:07.412446Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519900386301839189:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:07.412555Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:07.490357Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:07.595185Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:07.640324Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:07.690596Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:07.740079Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:07.801880Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:07.849793Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:07.951744Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519900386301839851:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:07.951857Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:07.952266Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519900386301839856:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:07.956581Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:54:07.973978Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7519900386301839858:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:54:08.067707Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7519900390596807205:3422] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:54:08.097891Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[7:7519900369121968412:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:54:08.097975Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> KqpNewEngine::LiteralKeys [GOOD] >> KqpNamedExpressions::NamedExpressionRandomChanged2+UseSink [GOOD] >> KqpNamedExpressions::NamedExpressionRandomChanged2-UseSink >> TTopicYqlTest::CreateTopicYqlBackCompatibility [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest >> TAsyncIndexTests::SplitIndexWithReboots[TabletReboots] [GOOD] Test command err: =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:115:2144] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:115:2144] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:130:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:133:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [1:136:2157] sender: [1:138:2058] recipient: [1:115:2144] 2025-06-25T14:51:17.545637Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:51:17.545748Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:17.545792Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:51:17.545827Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:51:17.545870Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:51:17.545895Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:51:17.545937Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:17.546011Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:51:17.546628Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:51:17.546922Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:51:17.614558Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7732: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-25T14:51:17.614601Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:51:17.615230Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:178:2058] recipient: [1:15:2062] 2025-06-25T14:51:17.623794Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:51:17.626434Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:51:17.626595Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:51:17.632038Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:51:17.632212Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:51:17.632653Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:17.632821Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:51:17.634908Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:17.635084Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:51:17.636012Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:51:17.636075Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:17.636185Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:51:17.636222Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:51:17.636268Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:51:17.636396Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:217:2058] recipient: [1:215:2214] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:217:2058] recipient: [1:215:2214] Leader for TabletID 72057594037968897 is [1:221:2218] sender: [1:222:2058] recipient: [1:215:2214] 2025-06-25T14:51:17.642539Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:242:2058] recipient: [1:15:2062] 2025-06-25T14:51:17.752181Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:51:17.752378Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:17.752516Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:51:17.752544Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:51:17.752742Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:51:17.752808Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:51:17.754279Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:17.754411Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:51:17.754537Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:17.754583Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:51:17.754615Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:51:17.754635Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:51:17.755867Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:17.755908Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:51:17.755936Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:51:17.757017Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:17.757053Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:17.757089Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:17.757121Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:51:17.764808Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:51:17.766456Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:51:17.766609Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:136:2157] sender: [1:257:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:51:17.767420Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:17.767533Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 Media ... ntToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 1 } } TableIndexes { Name: "UserDefinedIndex" LocalPathId: 4 Type: EIndexTypeGlobalAsync State: EIndexStateReady KeyColumnNames: "indexed" SchemaVersion: 1 PathOwnerId: 72057594046678944 DataSize: 0 IndexImplTableDescriptions { PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409547 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:54:12.064418Z node 88 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/UserDefinedIndex/indexImplTable" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-25T14:54:12.064770Z node 88 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/UserDefinedIndex/indexImplTable" took 424us result status StatusSuccess 2025-06-25T14:54:12.065805Z node 88 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table/UserDefinedIndex/indexImplTable" PathDescription { Self { Name: "indexImplTable" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1002 CreateStep: 5000003 ParentPathId: 4 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeAsyncIndexImplTable Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 2 } ChildrenExist: false } Table { Name: "indexImplTable" Columns { Name: "indexed" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "indexed" KeyColumnNames: "key" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "\002\000\004\000\000\0002\000\000\000\000\000\000\200" IsPoint: false IsInclusive: false DatashardId: 72075186233409548 } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409549 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 2 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-ModifyUser-44 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-ModifyUser-45 >> KqpMergeCn::TopSortByDesc_Datetime_Limit3 [GOOD] >> KqpMergeCn::TopSortByDesc_Bool_And_PKUint64_Limit4 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/opt/unittest >> KqpSqlIn::InWithCast [GOOD] Test command err: Trying to start YDB, gRPC: 22105, MsgBus: 3536 2025-06-25T14:53:05.858274Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900119441425135:2137];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:53:05.859469Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0009fe/r3tmp/tmpaBFJr9/pdisk_1.dat 2025-06-25T14:53:06.264447Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519900119441425035:2080] 1750863185821176 != 1750863185821179 2025-06-25T14:53:06.300863Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:53:06.306470Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:53:06.306716Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:53:06.344434Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 22105, node 1 2025-06-25T14:53:06.473170Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:53:06.473201Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:53:06.473215Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:53:06.473343Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3536 2025-06-25T14:53:06.864760Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:3536 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:53:07.037648Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:53:07.058943Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:07.187355Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:07.343798Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:07.413531Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:09.042797Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900136621295862:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:09.042882Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:09.373647Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:09.408272Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:09.443635Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:09.478864Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:09.544372Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:09.575964Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:09.616393Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:09.701782Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900136621296528:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:09.701860Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:09.702061Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900136621296533:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:09.705360Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:53:09.714206Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519900136621296535:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:53:09.806194Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519900136621296586:3425] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:53:10.814920Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:10.851824Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519900119441425135:2137];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:53:10.852191Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:53:10.855305Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part ... ration.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) Trying to start YDB, gRPC: 1740, MsgBus: 1422 2025-06-25T14:54:03.181986Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519900369452176370:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:54:03.182034Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0009fe/r3tmp/tmpf7oPHH/pdisk_1.dat 2025-06-25T14:54:03.445459Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:54:03.446315Z node 7 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [7:7519900369452176352:2080] 1750863243181270 != 1750863243181273 2025-06-25T14:54:03.464295Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:54:03.464577Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:54:03.466259Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 1740, node 7 2025-06-25T14:54:03.540886Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:54:03.540910Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:54:03.540920Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:54:03.541077Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1422 TClient is connected to server localhost:1422 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-25T14:54:04.189656Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:54:04.230704Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:54:04.237956Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:54:04.246673Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:54:04.354251Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:54:04.633159Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:04.727273Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:54:08.028428Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519900390927014463:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:08.028522Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:08.112864Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:08.174586Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:08.182658Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[7:7519900369452176370:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:54:08.182736Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:54:08.224257Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:08.275522Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:08.317987Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:08.410043Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:08.462841Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:08.567632Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519900390927015125:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:08.567741Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:08.568038Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519900390927015130:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:08.573790Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:54:08.591036Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7519900390927015132:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:54:08.698458Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7519900390927015183:3427] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } >> TPersQueueTest::BadSids [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/opt/unittest >> KqpNewEngine::LiteralKeys [GOOD] Test command err: Trying to start YDB, gRPC: 19726, MsgBus: 19505 2025-06-25T14:53:26.905128Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900211910659685:2167];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:53:26.906969Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0009f3/r3tmp/tmp3VMNca/pdisk_1.dat 2025-06-25T14:53:27.266567Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 19726, node 1 2025-06-25T14:53:27.313116Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:53:27.313238Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:53:27.317442Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:53:27.329821Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:53:27.329846Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:53:27.329853Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:53:27.329978Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19505 TClient is connected to server localhost:19505 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:53:27.805841Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-25T14:53:27.833294Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:27.905267Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:53:27.975863Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:28.123600Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:28.188427Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:29.777718Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900224795563058:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:29.777807Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:30.109207Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:30.150043Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:30.179060Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:30.225705Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:30.261713Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:30.302431Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:30.341039Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:30.411391Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900229090531012:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:30.411479Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:30.411879Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900229090531017:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:30.415478Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:53:30.426979Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519900229090531019:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:53:30.536625Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519900229090531070:3415] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:53:31.650594Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:31.846256Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-25T14:53:31.901761Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519900211910659685:2167];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:53:31.901816Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 20910, MsgBus: 9818 2025-06-25T14:53:32.748529Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivere ... 644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:54:02.760430Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[6:7519900343731357782:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:54:02.760502Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 12243, MsgBus: 24063 2025-06-25T14:54:05.192218Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519900377821489102:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:54:05.192291Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0009f3/r3tmp/tmpXnjpre/pdisk_1.dat 2025-06-25T14:54:05.351581Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:54:05.356415Z node 7 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [7:7519900377821489082:2080] 1750863245191610 != 1750863245191613 2025-06-25T14:54:05.368681Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:54:05.368755Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:54:05.372564Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 12243, node 7 2025-06-25T14:54:05.432914Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:54:05.432940Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:54:05.432950Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:54:05.433105Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24063 TClient is connected to server localhost:24063 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:54:06.142366Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:54:06.151945Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:54:06.210517Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:54:06.261079Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:54:06.501181Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:54:06.590843Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:54:09.496452Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519900395001359891:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:09.496550Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:09.572626Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:09.615711Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:09.705870Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:09.746990Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:09.791880Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:09.878998Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:09.921334Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:10.011975Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519900399296327844:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:10.012121Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:10.012625Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519900399296327849:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:10.017384Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:54:10.030948Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7519900399296327851:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:54:10.120636Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7519900399296327902:3419] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:54:10.194638Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[7:7519900377821489102:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:54:10.194720Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> TCdcStreamTests::MeteringDedicated [GOOD] >> TCdcStreamTests::ChangeOwner >> KqpSqlIn::KeySuffix_OnlyTail ------- [TM] {asan, default-linux-x86_64, release} ydb/services/persqueue_v1/ut/unittest >> TTopicYqlTest::CreateTopicYqlBackCompatibility [GOOD] Test command err: 2025-06-25T14:51:24.526124Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899689619409632:2077];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:51:24.526724Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:51:24.558167Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519899686906811044:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:51:24.558256Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:51:24.672194Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000b77/r3tmp/tmpm28p1p/pdisk_1.dat 2025-06-25T14:51:24.681982Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-25T14:51:24.854636Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:51:24.854730Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:51:24.868707Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:51:24.882458Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T14:51:24.883197Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:51:24.886454Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:51:24.886506Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:51:24.889367Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 12973, node 1 2025-06-25T14:51:25.043192Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/yft8/000b77/r3tmp/yandexWCgnQa.tmp 2025-06-25T14:51:25.043228Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/yft8/000b77/r3tmp/yandexWCgnQa.tmp 2025-06-25T14:51:25.044367Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/yft8/000b77/r3tmp/yandexWCgnQa.tmp 2025-06-25T14:51:25.044554Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:51:25.272136Z INFO: TTestServer started on Port 26392 GrpcPort 12973 TClient is connected to server localhost:26392 PQClient connected to localhost:12973 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-25T14:51:25.526305Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:51:25.564030Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:51:25.564747Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-25T14:51:25.612953Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... waiting... 2025-06-25T14:51:27.130366Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899702504312631:2303], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:27.130490Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:27.132737Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899702504312643:2306], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:27.139247Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899702504312649:2309], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:27.139320Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:27.143676Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:51:27.164542Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899702504312645:2307], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710662 completed, doublechecking } 2025-06-25T14:51:27.246054Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899702504312723:2800] txid# 281474976710663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:51:27.621595Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519899702504312741:2313], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:51:27.627670Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=YTY5NDY2ODMtMmYzYWNiZDctMzViYjE4MzYtMjI2ZmExYTk=, ActorId: [1:7519899702504312613:2301], ActorState: ExecuteState, TraceId: 01jyks51g9avjhgnekyfgnmnc2, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:51:27.630528Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-25T14:51:27.685467Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:51:27.773434Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:51:27.862902Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); 2025-06-25T14:51:28.273557Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710667. Ctx: { TraceId: 01jyks52a278752t9x669cds87, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MWZmZGVjYjgtMmY5NmI5M2MtNGViYTM5ZTgtYTU4YTYwZjk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root === CheckClustersList. Subcribe to ClusterTracker from [1:7519899706799280414:3081] 2025-06-25T14:51:29.522299Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519899689619409632:2077];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:51:29.522394Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=access ... ersion: 1 TabletId: 0 } ReadRuleGenerations: 0 PartitionStrategy { MinPartitionCount: 2 MaxPartitionCount: 5 ScaleThresholdSeconds: 300 ScaleUpPartitionWriteSpeedThresholdPercent: 90 ScaleDownPartitionWriteSpeedThresholdPercent: 30 PartitionStrategyType: CAN_SPLIT } AllPartitions { PartitionId: 1 KeyRange { FromBound: "\177\377\377\377\377\377\377\377\377\377\377\377\377\377\377\376" } Status: Active CreateVersion: 1 TabletId: 0 } Consumers { Name: "c1" ReadFromTimestampsMs: 0 FormatVersion: 0 Codec { } ServiceType: "data-streams" Version: 0 Generation: 0 } } BootstrapConfig { } SourceActor { RawX1: 7519900353391792912 RawX2: 107374184615 } Partitions { Partition { PartitionId: 1 } } 2025-06-25T14:54:10.327509Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:3683: [PQ: 72075186224037892] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-06-25T14:54:10.329102Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:3683: [PQ: 72075186224037893] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-06-25T14:54:10.332767Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:1241: [PQ: 72075186224037893] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-25T14:54:10.332797Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:4353: [PQ: 72075186224037893] Try execute txs with state EXECUTED 2025-06-25T14:54:10.332815Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:4398: [PQ: 72075186224037893] TxId 281474976715676, State EXECUTED 2025-06-25T14:54:10.332835Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:4345: [PQ: 72075186224037893] TxId 281474976715676 State EXECUTED FrontTxId 281474976715676 2025-06-25T14:54:10.332854Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:4046: [PQ: 72075186224037893] TPersQueue::SendEvReadSetAckToSenders 2025-06-25T14:54:10.332872Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:4288: [PQ: 72075186224037893] TxId 281474976715676, NewState WAIT_RS_ACKS 2025-06-25T14:54:10.332889Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:4323: [PQ: 72075186224037893] TxId 281474976715676 moved from EXECUTED to WAIT_RS_ACKS 2025-06-25T14:54:10.332913Z node 26 :PERSQUEUE DEBUG: transaction.cpp:366: [TxId: 281474976715676] PredicateAcks: 0/0 2025-06-25T14:54:10.332922Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:4599: [PQ: 72075186224037893] HaveAllRecipientsReceive 1, AllSupportivePartitionsHaveBeenDeleted 1 2025-06-25T14:54:10.332945Z node 26 :PERSQUEUE DEBUG: transaction.cpp:366: [TxId: 281474976715676] PredicateAcks: 0/0 2025-06-25T14:54:10.332973Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:4660: [PQ: 72075186224037893] add an TxId 281474976715676 to the list for deletion 2025-06-25T14:54:10.332996Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:4288: [PQ: 72075186224037893] TxId 281474976715676, NewState DELETING 2025-06-25T14:54:10.333033Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:3882: [PQ: 72075186224037893] delete key for TxId 281474976715676 2025-06-25T14:54:10.333091Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:3683: [PQ: 72075186224037893] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-06-25T14:54:10.336650Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:1241: [PQ: 72075186224037892] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-25T14:54:10.336688Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:4353: [PQ: 72075186224037892] Try execute txs with state EXECUTED 2025-06-25T14:54:10.336707Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:4398: [PQ: 72075186224037892] TxId 281474976715676, State EXECUTED 2025-06-25T14:54:10.336728Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:4345: [PQ: 72075186224037892] TxId 281474976715676 State EXECUTED FrontTxId 281474976715676 2025-06-25T14:54:10.336747Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:4046: [PQ: 72075186224037892] TPersQueue::SendEvReadSetAckToSenders 2025-06-25T14:54:10.336766Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:4288: [PQ: 72075186224037892] TxId 281474976715676, NewState WAIT_RS_ACKS 2025-06-25T14:54:10.336783Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:4323: [PQ: 72075186224037892] TxId 281474976715676 moved from EXECUTED to WAIT_RS_ACKS 2025-06-25T14:54:10.336805Z node 25 :PERSQUEUE DEBUG: transaction.cpp:366: [TxId: 281474976715676] PredicateAcks: 0/0 2025-06-25T14:54:10.336814Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:4599: [PQ: 72075186224037892] HaveAllRecipientsReceive 1, AllSupportivePartitionsHaveBeenDeleted 1 2025-06-25T14:54:10.336827Z node 25 :PERSQUEUE DEBUG: transaction.cpp:366: [TxId: 281474976715676] PredicateAcks: 0/0 2025-06-25T14:54:10.336845Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:4660: [PQ: 72075186224037892] add an TxId 281474976715676 to the list for deletion 2025-06-25T14:54:10.336873Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:4288: [PQ: 72075186224037892] TxId 281474976715676, NewState DELETING 2025-06-25T14:54:10.336906Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:3882: [PQ: 72075186224037892] delete key for TxId 281474976715676 2025-06-25T14:54:10.336966Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:3683: [PQ: 72075186224037892] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-06-25T14:54:10.340916Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:1241: [PQ: 72075186224037893] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-25T14:54:10.340961Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:4353: [PQ: 72075186224037893] Try execute txs with state DELETING 2025-06-25T14:54:10.340982Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:4398: [PQ: 72075186224037893] TxId 281474976715676, State DELETING 2025-06-25T14:54:10.341011Z node 26 :PERSQUEUE DEBUG: pq_impl.cpp:4610: [PQ: 72075186224037893] delete TxId 281474976715676 2025-06-25T14:54:10.343208Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:1241: [PQ: 72075186224037892] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-25T14:54:10.343243Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:4353: [PQ: 72075186224037892] Try execute txs with state DELETING 2025-06-25T14:54:10.343260Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:4398: [PQ: 72075186224037892] TxId 281474976715676, State DELETING 2025-06-25T14:54:10.343280Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:4610: [PQ: 72075186224037892] delete TxId 281474976715676 TClient::Ls request: /Root/PQ/rt3.dc1--legacy--topic1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "rt3.dc1--legacy--topic1" PathId: 13 SchemeshardId: 72057594046644480 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 281474976715676 CreateStep: 1750863250339 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 1 } ChildrenExist: false BalancerTabletID: 72075186224037894 } PersQueueGroup { Name: "rt3.dc1--legacy--topic1" PathId: 13 TotalGroupCount: 2 PartitionPerTablet: 1 PQTabletConfig { PartitionConfig { MaxCountInPartition: 2147483647 LifetimeSeconds: 86400 SourceIdLifetimeSeconds: 1382400 WriteSpeedInBytesPerSecond: 10... (TRUNCATED) === PATH DESCRIPTION: Name: "rt3.dc1--legacy--topic1" PathId: 13 TotalGroupCount: 2 PartitionPerTablet: 1 PQTabletConfig { PartitionConfig { MaxCountInPartition: 2147483647 LifetimeSeconds: 86400 SourceIdLifetimeSeconds: 1382400 WriteSpeedInBytesPerSecond: 1048576 BurstSize: 1048576 ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } SourceIdMaxCounts: 6000000 } LocalDC: true RequireAuthWrite: true RequireAuthRead: true Producer: "legacy" Ident: "legacy" Topic: "topic1" DC: "dc1" FormatVersion: 0 Codecs { } YdbDatabasePath: "/Root" PartitionStrategy { MinPartitionCount: 2 MaxPartitionCount: 5 ScaleThresholdSeconds: 300 ScaleUpPartitionWriteSpeedThresholdPercent: 90 ScaleDownPartitionWriteSpeedThresholdPercent: 30 PartitionStrategyType: CAN_SPLIT } Consumers { Name: "c1" ReadFromTimestampsMs: 0 FormatVersion: 0 Codec { } ServiceType: "data-streams" Version: 0 } } Partitions { PartitionId: 0 TabletId: 72075186224037893 KeyRange { ToBound: "\177\377\377\377\377\377\377\377\377\377\377\377\377\377\377\376" } Status: Active } Partitions { PartitionId: 1 TabletId: 72075186224037892 KeyRange { FromBound: "\177\377\377\377\377\377\377\377\377\377\377\377\377\377\377\376" } Status: Active } AlterVersion: 1 BalancerTabletID: 72075186224037894 NextPartitionId: 2 2025-06-25T14:54:10.937929Z node 25 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976715677, task: 1, CA Id [25:7519900400636434980:2433]. Got EvDeliveryProblem, TabletId: 72075186224037891, NotDelivered: 0 2025-06-25T14:54:10.984623Z node 25 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976715677, task: 1, CA Id [25:7519900400636434980:2433]. Got EvDeliveryProblem, TabletId: 72075186224037891, NotDelivered: 1 2025-06-25T14:54:11.052454Z node 25 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976715677, task: 1, CA Id [25:7519900400636434980:2433]. Got EvDeliveryProblem, TabletId: 72075186224037891, NotDelivered: 1 2025-06-25T14:54:11.107344Z node 25 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976715677, task: 1, CA Id [25:7519900400636434980:2433]. Got EvDeliveryProblem, TabletId: 72075186224037891, NotDelivered: 1 2025-06-25T14:54:11.214142Z node 25 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976715677, task: 1, CA Id [25:7519900400636434980:2433]. Got EvDeliveryProblem, TabletId: 72075186224037891, NotDelivered: 1 2025-06-25T14:54:11.352401Z node 25 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976715677, task: 1, CA Id [25:7519900400636434980:2433]. Got EvDeliveryProblem, TabletId: 72075186224037891, NotDelivered: 1 2025-06-25T14:54:11.675335Z node 25 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976715677, task: 1, CA Id [25:7519900400636434980:2433]. Got EvDeliveryProblem, TabletId: 72075186224037891, NotDelivered: 1 2025-06-25T14:54:11.756462Z node 25 :KQP_EXECUTER WARN: kqp_shards_resolver.cpp:86: [ShardsResolver] TxId: 281474976715678. Failed to resolve tablet: 72075186224037891 after several retries. 2025-06-25T14:54:11.756635Z node 25 :KQP_EXECUTER WARN: kqp_executer_impl.h:265: ActorId: [25:7519900404931402342:2436] TxId: 281474976715678. Ctx: { TraceId: 01jyksa1mrcz132dj3v9mj4wre, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=25&id=YTVmZjE2OS0xZDZhMzU5Yi03NjUwNjI2Yi0yNDk5Njk2Yw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Shards nodes resolve failed, status: UNAVAILABLE, issues:
: Error: Failed to resolve tablet: 72075186224037891 after several retries. 2025-06-25T14:54:11.756876Z node 25 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=25&id=YTVmZjE2OS0xZDZhMzU5Yi03NjUwNjI2Yi0yNDk5Njk2Yw==, ActorId: [25:7519900404931402305:2436], ActorState: ExecuteState, TraceId: 01jyksa1mrcz132dj3v9mj4wre, Create QueryResponse for error on request, msg: 2025-06-25T14:54:11.757962Z node 25 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Failed to resolve tablet: 72075186224037891 after several retries." severity: 1 } TxMeta { id: "01jyksa23bedwaqmh3e87dbckm" } } YdbStatus: UNAVAILABLE ConsumedRu: 299 } >> KqpRanges::IsNullInValue >> KqpNamedExpressions::NamedExpressionRandomInsert+UseSink >> TPQCompatTest::BadTopics [GOOD] >> TPQCompatTest::CommitOffsets ------- [TM] {asan, default-linux-x86_64, release} ydb/services/persqueue_v1/ut/unittest >> TPersQueueTest::BadSids [GOOD] Test command err: 2025-06-25T14:51:24.685973Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:51:24.686089Z node 1 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-25T14:51:25.178900Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:51:25.178979Z node 2 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info === Server->StartServer(false); 2025-06-25T14:51:25.538486Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519899692436802763:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:51:25.538675Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:51:25.554995Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519899693479111747:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:51:25.555076Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000b92/r3tmp/tmpMDsVf0/pdisk_1.dat 2025-06-25T14:51:25.686670Z node 4 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-25T14:51:25.690411Z node 3 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-25T14:51:25.825513Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:51:25.843311Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:51:25.843421Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:51:25.850474Z node 3 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 4 Cookie 4 2025-06-25T14:51:25.851273Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 9124, node 3 2025-06-25T14:51:25.902474Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:51:25.902548Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:51:25.907996Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:51:25.942124Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/yft8/000b92/r3tmp/yandexZgdQDt.tmp 2025-06-25T14:51:25.942147Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/yft8/000b92/r3tmp/yandexZgdQDt.tmp 2025-06-25T14:51:25.942342Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/yft8/000b92/r3tmp/yandexZgdQDt.tmp 2025-06-25T14:51:25.942482Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:51:26.009287Z INFO: TTestServer started on Port 13312 GrpcPort 9124 TClient is connected to server localhost:13312 PQClient connected to localhost:9124 === TenantModeEnabled() = 0 === Init PQ - start server on port 9124 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:51:26.407400Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "Root" StoragePools { Name: "/Root:test" Kind: "test" } } } TxId: 281474976710657 TabletId: 72057594046644480 PeerName: "" , at schemeshard: 72057594046644480 2025-06-25T14:51:26.407599Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //Root, opId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-25T14:51:26.407812Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 0 2025-06-25T14:51:26.407868Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 281474976710657:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046644480, LocalPathId: 1] source path: 2025-06-25T14:51:26.408146Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 281474976710657:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-25T14:51:26.408194Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:51:26.410448Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 281474976710657, response: Status: StatusAccepted TxId: 281474976710657 SchemeshardId: 72057594046644480 PathId: 1, at schemeshard: 72057594046644480 2025-06-25T14:51:26.410631Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976710657, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //Root 2025-06-25T14:51:26.410798Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-25T14:51:26.410846Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 281474976710657:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046644480 2025-06-25T14:51:26.410857Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 281474976710657:0 ProgressState no shards to create, do next state 2025-06-25T14:51:26.410887Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 281474976710657:0 2 -> 3 2025-06-25T14:51:26.416994Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-25T14:51:26.417037Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 281474976710657:0 ProgressState, at schemeshard: 72057594046644480 2025-06-25T14:51:26.417058Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 281474976710657:0 3 -> 128 waiting... 2025-06-25T14:51:26.420619Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-25T14:51:26.420656Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-25T14:51:26.420681Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 281474976710657:0, at tablet# 72057594046644480 2025-06-25T14:51:26.420726Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 281474976710657 ready parts: 1/1 2025-06-25T14:51:26.425109Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046644480 Flags: 2 } ExecLevel: 0 TxId: 281474976710657 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:51:26.426745Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:51:26.426767Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 281474976710657, ready parts: 0/1, is published: true 2025-06-25T14:51:26.426800Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:51:26.427173Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 281474976710657:4294967295 from tablet: 72057594046644480 to tablet: 72057594046316545 cookie: 0:281474976710657 msg type: 269090816 2025-06-25T14:51:26.427384Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 281474976710657, partId: 4294967295, tablet: 72057594046316545 2025-06-25T14:51:26.430024Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 1750863086476, transactions count in step: 1, at schemeshard: 72057594046644480 2025-06-25T14:51:26.430170Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976710657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1750863086476 MediatorID: 72057594046382081 TabletID: 72057594046644480, at schemeshard: 72057594046644480 2025-06-25T14:51:26.430216Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 281474976710657:0, at tablet# 72057594046644480 2025-06-25T14:51:26.430437Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 281474976710657:0 128 -> 240 2025-06-25T14:51:26.430471Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 281474976710657:0, at tablet# 72057594046644480 2025-06-25T14:51:26.430625Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId ... o=(NULL) 2025-06-25T14:54:11.903307Z node 21 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:268: TPartitionChooser [21:7519900404335736439:2500] (SourceId=base64:aa, PreferedPartition=(NULL)) Start idle 2025-06-25T14:54:11.903353Z node 21 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:689: ProceedPartition. session cookie: 5 sessionId: partition: 3 expectedGeneration: (NULL) 2025-06-25T14:54:11.906154Z node 21 :PQ_WRITE_PROXY DEBUG: writer.cpp:819: TPartitionWriter 72075186224037892 (partition=3) TEvClientConnected Status OK, TabletId: 72075186224037892, NodeId 22, Generation: 1 2025-06-25T14:54:11.907771Z node 21 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:865: session inited cookie: 5 partition: 3 MaxSeqNo: 0 sessionId: base64:aa|30b045a2-3d622442-eac8f9fa-374defc7_0 2025-06-25T14:54:11.905817Z node 22 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72075186224037892] server connected, pipe [21:7519900404335736471:2500], now have 1 active actors on pipe 2025-06-25T14:54:11.906455Z node 22 :PERSQUEUE DEBUG: pq_impl.cpp:348: Handle TEvRequest topic: 'rt3.dc1--topic1' requestId: 2025-06-25T14:54:11.906486Z node 22 :PERSQUEUE DEBUG: pq_impl.cpp:2812: [PQ: 72075186224037892] got client message batch for topic 'rt3.dc1--topic1' partition 3 2025-06-25T14:54:11.906584Z node 22 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie base64:aa|30b045a2-3d622442-eac8f9fa-374defc7_0 generated for partition 3 topic 'rt3.dc1--topic1' owner base64:aa 2025-06-25T14:54:11.906689Z node 22 :PERSQUEUE DEBUG: partition_write.cpp:34: [PQ: 72075186224037892, Partition: 3, State: StateIdle] TPartition::ReplyOwnerOk. Partition: 3 2025-06-25T14:54:11.906743Z node 22 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'rt3.dc1--topic1' partition: 3 messageNo: 0 requestId: cookie: 0 2025-06-25T14:54:11.907353Z node 22 :PERSQUEUE DEBUG: pq_impl.cpp:348: Handle TEvRequest topic: 'rt3.dc1--topic1' requestId: 2025-06-25T14:54:11.908605Z :INFO: [] MessageGroupId [base64:aa] SessionId [] Counters: { Errors: 0 CurrentSessionLifetimeMs: 1750863251908 BytesWritten: 0 MessagesWritten: 0 BytesWrittenCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-25T14:54:11.907375Z node 22 :PERSQUEUE DEBUG: pq_impl.cpp:2812: [PQ: 72075186224037892] got client message batch for topic 'rt3.dc1--topic1' partition 3 2025-06-25T14:54:11.907457Z node 22 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'rt3.dc1--topic1' partition: 3 messageNo: 0 requestId: cookie: 0 2025-06-25T14:54:11.908743Z :INFO: [] MessageGroupId [base64:aa] SessionId [] Write session established. Init response: session_id: "base64:aa|30b045a2-3d622442-eac8f9fa-374defc7_0" topic: "topic1" cluster: "dc1" partition_id: 3 supported_codecs: CODEC_RAW supported_codecs: CODEC_GZIP supported_codecs: CODEC_LZOP 2025-06-25T14:54:11.912474Z :DEBUG: [] MessageGroupId [base64:aa] SessionId [base64:aa|30b045a2-3d622442-eac8f9fa-374defc7_0] Write 1 messages with Id from 1 to 1 2025-06-25T14:54:11.912618Z :INFO: [] MessageGroupId [base64:aa] SessionId [base64:aa|30b045a2-3d622442-eac8f9fa-374defc7_0] Write session: close. Timeout = 18446744073709551 ms 2025-06-25T14:54:11.913051Z :DEBUG: [] MessageGroupId [base64:aa] SessionId [base64:aa|30b045a2-3d622442-eac8f9fa-374defc7_0] Write session: try to update token 2025-06-25T14:54:11.913139Z :DEBUG: [] MessageGroupId [base64:aa] SessionId [base64:aa|30b045a2-3d622442-eac8f9fa-374defc7_0] Send 1 message(s) (0 left), first sequence number is 1 2025-06-25T14:54:11.915763Z node 21 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 5 sessionId: base64:aa|30b045a2-3d622442-eac8f9fa-374defc7_0 grpc read done: success: 1 data: write_request[data omitted] 2025-06-25T14:54:11.916078Z node 21 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037892 (partition=3) Received event: NKikimr::NPQ::TEvPartitionWriter::TEvWriteRequest 2025-06-25T14:54:11.917551Z node 22 :PERSQUEUE DEBUG: pq_impl.cpp:348: Handle TEvRequest topic: 'rt3.dc1--topic1' requestId: 2025-06-25T14:54:11.918128Z node 21 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037892 (partition=3) Received event: NActors::IEventHandle 2025-06-25T14:54:11.917622Z node 22 :PERSQUEUE DEBUG: pq_impl.cpp:2812: [PQ: 72075186224037892] got client message batch for topic 'rt3.dc1--topic1' partition 3 2025-06-25T14:54:11.917730Z node 22 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'rt3.dc1--topic1' partition: 3 messageNo: 0 requestId: cookie: 1 2025-06-25T14:54:11.919262Z node 22 :PERSQUEUE DEBUG: pq_impl.cpp:348: Handle TEvRequest topic: 'rt3.dc1--topic1' requestId: 2025-06-25T14:54:11.919295Z node 22 :PERSQUEUE DEBUG: pq_impl.cpp:2812: [PQ: 72075186224037892] got client message batch for topic 'rt3.dc1--topic1' partition 3 2025-06-25T14:54:11.919711Z node 22 :PERSQUEUE DEBUG: pq_impl.cpp:2209: [PQ: 72075186224037892] got client message topic: rt3.dc1--topic1 partition: 3 SourceId: '\0base64:aa' SeqNo: 1 partNo : 0 messageNo: 1 size 92 offset: -1 2025-06-25T14:54:11.920000Z node 22 :PERSQUEUE DEBUG: partition_write.cpp:1364: [PQ: 72075186224037892, Partition: 3, State: StateIdle] Topic 'rt3.dc1--topic1' partition 3 part blob processing sourceId '\0base64:aa' seqNo 1 partNo 0 2025-06-25T14:54:11.920880Z node 22 :PERSQUEUE DEBUG: partition_write.cpp:1468: [PQ: 72075186224037892, Partition: 3, State: StateIdle] Topic 'rt3.dc1--topic1' partition 3 part blob complete sourceId '\0base64:aa' seqNo 1 partNo 0 FormedBlobsCount 0 NewHead: Offset 0 PartNo 0 PackedSize 169 count 1 nextOffset 1 batches 1 2025-06-25T14:54:11.921690Z node 22 :PERSQUEUE DEBUG: partition_write.cpp:1762: [PQ: 72075186224037892, Partition: 3, State: StateIdle] Add new write blob: topic 'rt3.dc1--topic1' partition 3 compactOffset 0,1 HeadOffset 0 endOffset 0 curOffset 1 d0000000003_00000000000000000000_00000_0000000001_00000? size 157 WTime 1750863251919 2025-06-25T14:54:11.921896Z node 22 :PERSQUEUE DEBUG: read.h:272: CacheProxy. Passthrough write request to KV 2025-06-25T14:54:11.921990Z node 22 :PERSQUEUE DEBUG: read.h:310: CacheProxy. Passthrough blob. Partition 3 offset 0 partNo 0 count 1 size 157 2025-06-25T14:54:11.927968Z node 21 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037892 (partition=3) Received event: NActors::IEventHandle 2025-06-25T14:54:11.925750Z node 22 :PERSQUEUE DEBUG: cache_eviction.h:319: Caching head blob in L1. Partition 3 offset 0 count 1 size 157 actorID [22:7519900397527399633:2362] 2025-06-25T14:54:11.925864Z node 22 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72075186224037892, Partition: 3, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 102 WriteNewSizeFromSupportivePartitions# 0 2025-06-25T14:54:11.925912Z node 22 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72075186224037892, Partition: 3, State: StateIdle] TPartition::ReplyWrite. Partition: 3 2025-06-25T14:54:11.925961Z node 22 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72075186224037892, Partition: 3, State: StateIdle] Answering for message sourceid: '\0base64:aa', Topic: 'rt3.dc1--topic1', Partition: 3, SeqNo: 1, partNo: 0, Offset: 0 is stored on disk 2025-06-25T14:54:11.926213Z node 22 :PERSQUEUE DEBUG: partition_read.cpp:882: [PQ: 72075186224037892, Partition: 3, State: StateIdle] Topic 'rt3.dc1--topic1' partition 3 user user readTimeStamp for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 0 2025-06-25T14:54:11.926247Z node 22 :PERSQUEUE DEBUG: partition_read.cpp:924: [PQ: 72075186224037892, Partition: 3, State: StateIdle] Topic 'rt3.dc1--topic1' partition 3 user user send read request for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 1 rrg 0 2025-06-25T14:54:11.926298Z node 22 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72075186224037892, Partition: 3, State: StateIdle] need more data for compaction. cumulativeSize=157, count=1, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-25T14:54:11.926365Z node 22 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'rt3.dc1--topic1' partition: 3 messageNo: 1 requestId: cookie: 1 2025-06-25T14:54:11.926498Z node 22 :PERSQUEUE DEBUG: partition_read.cpp:839: [PQ: 72075186224037892, Partition: 3, State: StateIdle] read cookie 2 Topic 'rt3.dc1--topic1' partition 3 user user offset 0 count 1 size 1024000 endOffset 1 max time lag 0ms effective offset 0 2025-06-25T14:54:11.926765Z node 22 :PERSQUEUE DEBUG: partition_read.cpp:1043: [PQ: 72075186224037892, Partition: 3, State: StateIdle] read cookie 2 added 1 blobs, size 157 count 1 last offset 0, current partition end offset: 1 2025-06-25T14:54:11.926792Z node 22 :PERSQUEUE DEBUG: partition_read.cpp:1069: [PQ: 72075186224037892, Partition: 3, State: StateIdle] Reading cookie 2. Send blob request. 2025-06-25T14:54:11.926859Z node 22 :PERSQUEUE DEBUG: cache_eviction.h:492: Got data from cache. Partition 3 offset 0 partno 0 count 1 parts_count 0 source 1 size 157 accessed 0 times before, last time 2025-06-25T14:54:11.000000Z 2025-06-25T14:54:11.926883Z node 22 :PERSQUEUE DEBUG: read.h:121: Reading cookie 2. All 1 blobs are from cache. 2025-06-25T14:54:11.926933Z node 22 :PERSQUEUE DEBUG: partition_read.cpp:551: FormAnswer for 1 blobs 2025-06-25T14:54:11.927093Z node 22 :PERSQUEUE DEBUG: partition_read.cpp:476: FormAnswer processing batch offset 0 totakecount 1 count 1 size 137 from pos 0 cbcount 1 2025-06-25T14:54:11.927169Z node 22 :PERSQUEUE DEBUG: partition_read.cpp:964: Topic 'rt3.dc1--topic1' partition 3 user user readTimeStamp done, result 1750863251919 queuesize 0 startOffset 0 2025-06-25T14:54:11.927469Z node 22 :PERSQUEUE DEBUG: pq_l2_cache.cpp:120: PQ Cache (L2). Adding blob. Tablet '72075186224037892' partition 3 offset 0 partno 0 count 1 parts 0 suffix '63' size 157 2025-06-25T14:54:11.927531Z node 22 :PERSQUEUE DEBUG: pq_l2_cache.cpp:192: PQ Cache (L2). Touched. Tablet '72075186224037892' partition 3 offset 0 partno 0 count 1 parts 0 suffix '63' 2025-06-25T14:54:11.929070Z :DEBUG: [] MessageGroupId [base64:aa] SessionId [base64:aa|30b045a2-3d622442-eac8f9fa-374defc7_0] Write session got write response: sequence_numbers: 1 offsets: 0 already_written: false partition_id: 3 write_statistics { persist_duration_ms: 4 } 2025-06-25T14:54:11.929124Z :DEBUG: [] MessageGroupId [base64:aa] SessionId [base64:aa|30b045a2-3d622442-eac8f9fa-374defc7_0] Write session: acknoledged message 1 2025-06-25T14:54:12.012796Z :INFO: [] MessageGroupId [base64:aa] SessionId [base64:aa|30b045a2-3d622442-eac8f9fa-374defc7_0] Write session will now close 2025-06-25T14:54:12.012901Z :DEBUG: [] MessageGroupId [base64:aa] SessionId [base64:aa|30b045a2-3d622442-eac8f9fa-374defc7_0] Write session: aborting 2025-06-25T14:54:12.013626Z :INFO: [] MessageGroupId [base64:aa] SessionId [base64:aa|30b045a2-3d622442-eac8f9fa-374defc7_0] Write session: gracefully shut down, all writes complete 2025-06-25T14:54:12.013698Z :DEBUG: [] MessageGroupId [base64:aa] SessionId [base64:aa|30b045a2-3d622442-eac8f9fa-374defc7_0] Write session: destroy 2025-06-25T14:54:12.015613Z node 21 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 5 sessionId: base64:aa|30b045a2-3d622442-eac8f9fa-374defc7_0 grpc closed 2025-06-25T14:54:12.015655Z node 21 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 5 sessionId: base64:aa|30b045a2-3d622442-eac8f9fa-374defc7_0 is DEAD 2025-06-25T14:54:12.016920Z node 21 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037892 (partition=3) Received event: NActors::TEvents::TEvPoison 2025-06-25T14:54:12.017447Z node 22 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037892] server disconnected, pipe [21:7519900404335736471:2500] destroyed 2025-06-25T14:54:12.017484Z node 22 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72075186224037892, Partition: 3, State: StateIdle] TPartition::DropOwner. >> KqpRanges::IsNotNullInJsonValue2 [GOOD] >> KqpRanges::DuplicateKeyPredicateParam >> KqpSqlIn::TupleSelect [GOOD] >> KqpSqlIn::SimpleKey_In_And_In >> KqpReturning::ReturningWorks-QueryService [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-DropUser-69 [GOOD] >> KqpReturning::ReturningColumnsOrder >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-DropUser-70 >> TCdcStreamTests::ChangeOwner [GOOD] >> TCdcStreamTests::DropIndexWithStream >> KqpExtractPredicateLookup::OverflowLookup >> KqpNewEngine::PruneEffectPartitions-UseSink [GOOD] >> KqpNewEngine::AsyncIndexUpdate [GOOD] >> KqpNewEngine::AutoChooseIndex >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-21 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-22 >> KqpReturning::ReturningWorksIndexedReplace-QueryService [GOOD] >> KqpReturning::ReturningWorksIndexedOperationsWithDefault-QueryService >> TPersQueueTest::WhenTheTopicIsDeletedAfterReadingTheData_Uncompressed [GOOD] >> TTopicYqlTest::CreateAndAlterTopicYql >> KqpAgg::AggWithSelfLookup [GOOD] >> KqpAgg::AggWithSelfLookup2 >> SchemeReqAdminAccessInTenant::ClusterAdminCanAuthOnEmptyTenant-StrictAclCheck [GOOD] >> SchemeReqAdminAccessInTenant::ClusterAdminCanAuthOnEmptyTenant-DomainLoginOnly >> KqpNewEngine::DeleteWithInputMultiConsumptionLimit+UseSink >> KqpRanges::DuplicateCompositeKeyPredicate [GOOD] >> KqpRanges::DeleteNotFullScan+UseSink >> TAsyncIndexTests::SplitMainWithReboots[TabletReboots] [GOOD] >> KqpNewEngine::SimpleUpsertSelect [GOOD] >> KqpNewEngine::StaleRO+EnableFollowers ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/opt/unittest >> KqpNewEngine::PruneEffectPartitions-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 6195, MsgBus: 13353 2025-06-25T14:53:28.837741Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900219270749547:2176];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:53:28.838015Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0009ec/r3tmp/tmpUMqTZL/pdisk_1.dat 2025-06-25T14:53:29.236398Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:53:29.240469Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519900219270749409:2080] 1750863208821361 != 1750863208821364 TServer::EnableGrpc on GrpcPort 6195, node 1 2025-06-25T14:53:29.294011Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:53:29.294117Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:53:29.297573Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:53:29.408848Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:53:29.408879Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:53:29.408886Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:53:29.408993Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13353 TClient is connected to server localhost:13353 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-25T14:53:29.865603Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:53:29.953891Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:53:29.966561Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:53:29.976061Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:30.148708Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:30.293241Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:30.356767Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:31.897990Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900232155652939:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:31.898337Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:32.176388Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:32.250587Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:32.296268Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:32.331993Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:32.363404Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:32.438474Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:32.477913Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:32.532632Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900236450620902:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:32.532717Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:32.532770Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900236450620907:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:32.536205Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:53:32.547617Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519900236450620909:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:53:32.639281Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519900236450620960:3426] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:53:33.835406Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519900219270749547:2176];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:53:33.835476Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 32532, MsgBus: 4270 2025-06-25T14:53:34.504246Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519900246960784672:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:53:34.504471Z node 2 :METADATA_PROVIDER ERROR: log.cpp:7 ... exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[6:7519900358274425388:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:54:05.418706Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 4536, MsgBus: 8350 2025-06-25T14:54:08.092591Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519900391299015122:2240];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0009ec/r3tmp/tmphwCFY4/pdisk_1.dat 2025-06-25T14:54:08.117051Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:54:08.266286Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:54:08.266383Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:54:08.267691Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:54:08.270195Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:54:08.276477Z node 7 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [7:7519900391299014897:2080] 1750863248016696 != 1750863248016699 TServer::EnableGrpc on GrpcPort 4536, node 7 2025-06-25T14:54:08.385008Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:54:08.385041Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:54:08.385053Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:54:08.385225Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8350 2025-06-25T14:54:09.032418Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:8350 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:54:09.156426Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:54:09.170026Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:54:09.183068Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:54:09.253866Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:54:09.452663Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:54:09.548404Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:54:12.355178Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519900408478885701:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:12.355285Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:12.410683Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:12.452377Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:12.496548Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:12.545228Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:12.594849Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:12.656405Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:12.699437Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:12.819699Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519900408478886355:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:12.819786Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:12.820125Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519900408478886360:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:12.826260Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:54:12.838715Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7519900408478886362:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:54:12.912114Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7519900408478886413:3411] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:54:13.032667Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[7:7519900391299015122:2240];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:54:13.032805Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> TCdcStreamTests::DropIndexWithStream [GOOD] >> TCdcStreamTests::DropTableWithIndexWithStream >> KqpNewEngine::ReadRangeWithParams [GOOD] >> KqpNewEngine::ReadDifferentColumns >> KqpReturning::ReturningWorksIndexedOperationsWithDefault+QueryService [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest >> TAsyncIndexTests::SplitMainWithReboots[TabletReboots] [GOOD] Test command err: =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:115:2144] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:115:2144] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:130:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:133:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [1:136:2157] sender: [1:138:2058] recipient: [1:115:2144] 2025-06-25T14:51:15.190555Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:51:15.190663Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:15.190707Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:51:15.190764Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:51:15.190813Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:51:15.190842Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:51:15.190904Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:15.190987Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:51:15.191717Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:51:15.192085Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:51:15.270107Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7732: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-25T14:51:15.270166Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:51:15.270904Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:178:2058] recipient: [1:15:2062] 2025-06-25T14:51:15.282684Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:51:15.286616Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:51:15.286784Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:51:15.294958Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:51:15.295208Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:51:15.295902Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:15.296194Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:51:15.299763Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:15.299913Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:51:15.300732Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:51:15.300806Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:15.300918Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:51:15.300957Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:51:15.300998Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:51:15.301141Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:217:2058] recipient: [1:215:2214] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:217:2058] recipient: [1:215:2214] Leader for TabletID 72057594037968897 is [1:221:2218] sender: [1:222:2058] recipient: [1:215:2214] 2025-06-25T14:51:15.308377Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:242:2058] recipient: [1:15:2062] 2025-06-25T14:51:15.435740Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:51:15.435944Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:15.436135Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:51:15.436198Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:51:15.436434Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:51:15.436516Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:51:15.438657Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:15.438824Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:51:15.439005Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:15.439081Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:51:15.439128Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:51:15.439161Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:51:15.441079Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:15.441143Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:51:15.441188Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:51:15.442863Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:15.442915Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:15.442975Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:15.443018Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:51:15.446371Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:51:15.448148Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:51:15.448403Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:136:2157] sender: [1:257:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:51:15.449379Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:15.449514Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 Media ... ompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "\001\000\004\000\000\0002\000\000\000" IsPoint: false IsInclusive: false DatashardId: 72075186233409548 } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409549 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 2 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:54:17.352712Z node 93 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:78: [TableChangeSenderShard][72075186233409548:2][72075186233409546][93:793:2625] Handshake NKikimrChangeExchange.TEvStatus Status: STATUS_OK LastRecordOrder: 0 2025-06-25T14:54:17.352794Z node 93 :CHANGE_EXCHANGE DEBUG: change_sender_async_index.cpp:239: [AsyncIndexChangeSenderMain][72075186233409548:2][93:732:2625] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186233409546 } 2025-06-25T14:54:17.352918Z node 93 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:123: [TableChangeSenderShard][72075186233409548:2][72075186233409546][93:793:2625] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 1 Group: 1750863257318687 Step: 5000003 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046678944, LocalPathId: 4] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046678944, LocalPathId: 3] SchemaVersion: 1 LockId: 0 LockOffset: 0 },{ Order: 2 Group: 1750863257318687 Step: 5000003 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046678944, LocalPathId: 4] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046678944, LocalPathId: 3] SchemaVersion: 1 LockId: 0 LockOffset: 0 },{ Order: 3 Group: 1750863257318687 Step: 5000003 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046678944, LocalPathId: 4] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046678944, LocalPathId: 3] SchemaVersion: 1 LockId: 0 LockOffset: 0 }] } 2025-06-25T14:54:17.365994Z node 93 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:200: [TableChangeSenderShard][72075186233409548:2][72075186233409546][93:793:2625] Handle NKikimrChangeExchange.TEvStatus Status: STATUS_OK RecordStatuses { Order: 1 Status: STATUS_OK Reason: REASON_NONE } RecordStatuses { Order: 2 Status: STATUS_OK Reason: REASON_NONE } RecordStatuses { Order: 3 Status: STATUS_OK Reason: REASON_NONE } LastRecordOrder: 3 2025-06-25T14:54:17.366135Z node 93 :CHANGE_EXCHANGE DEBUG: change_sender_async_index.cpp:239: [AsyncIndexChangeSenderMain][72075186233409548:2][93:732:2625] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186233409546 } 2025-06-25T14:54:17.573186Z node 93 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/UserDefinedIndex/indexImplTable" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-25T14:54:17.573501Z node 93 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/UserDefinedIndex/indexImplTable" took 356us result status StatusSuccess 2025-06-25T14:54:17.574465Z node 93 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table/UserDefinedIndex/indexImplTable" PathDescription { Self { Name: "indexImplTable" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1002 CreateStep: 5000003 ParentPathId: 4 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeAsyncIndexImplTable Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "indexImplTable" Columns { Name: "indexed" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "indexed" KeyColumnNames: "key" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409546 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> KqpNewEngine::MultiStatementMixPure [GOOD] >> KqpNewEngine::MultiUsagePrecompute >> KqpNotNullColumns::JoinBothTablesWithNotNullPk-StreamLookup [GOOD] >> TCdcStreamTests::DropTableWithIndexWithStream [GOOD] >> KqpNewEngine::DeleteON [GOOD] >> KqpNewEngine::DeleteWithInputMultiConsumption+UseSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/opt/unittest >> KqpReturning::ReturningWorksIndexedOperationsWithDefault+QueryService [GOOD] Test command err: Trying to start YDB, gRPC: 15057, MsgBus: 8301 2025-06-25T14:53:11.618842Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900147696697451:2145];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:53:11.622617Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0009fa/r3tmp/tmpCwRCOH/pdisk_1.dat 2025-06-25T14:53:11.945856Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 15057, node 1 2025-06-25T14:53:12.036872Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:53:12.036981Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:53:12.038579Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:53:12.046983Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:53:12.046999Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:53:12.047005Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:53:12.047116Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8301 TClient is connected to server localhost:8301 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:53:12.588255Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:53:12.613772Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:12.620650Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... 2025-06-25T14:53:12.740874Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:12.883544Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:12.966921Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:14.409148Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900160581600857:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:14.409497Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:14.739803Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:14.765073Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:14.797204Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:14.825092Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:14.850799Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:14.890519Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:14.921250Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:14.971878Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900160581601513:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:14.971942Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:14.972094Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900160581601518:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:14.975099Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:53:14.986756Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519900160581601520:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:53:15.055151Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519900164876568867:3419] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:53:16.061025Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:16.101848Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:16.146855Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/a ... /runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) Trying to start YDB, gRPC: 28763, MsgBus: 17407 2025-06-25T14:54:09.409004Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519900396046443938:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:54:09.409101Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0009fa/r3tmp/tmps0Z8r1/pdisk_1.dat 2025-06-25T14:54:09.610496Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:54:09.611629Z node 7 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [7:7519900396046443917:2080] 1750863249391851 != 1750863249391854 2025-06-25T14:54:09.639062Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:54:09.639171Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:54:09.640738Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 28763, node 7 2025-06-25T14:54:09.781042Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:54:09.781076Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:54:09.781091Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:54:09.781262Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17407 TClient is connected to server localhost:17407 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-25T14:54:10.463565Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:54:10.502955Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:54:10.608759Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:54:10.708187Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:54:10.977409Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:54:11.129129Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:14.421920Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[7:7519900396046443938:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:54:14.422063Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519900417521282055:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:14.422218Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:54:14.422288Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:14.519655Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:14.590815Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:14.632606Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:14.680856Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:14.720829Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:14.797023Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:14.873434Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:14.969991Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519900417521282723:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:14.970109Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:14.970346Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519900417521282728:2436], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:14.975214Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:54:14.990475Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7519900417521282730:2437], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:54:15.066924Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7519900421816250077:3427] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:54:16.563844Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) >> KqpNewEngine::JoinIdxLookupWithPredicate [GOOD] >> KqpNewEngine::ItemsLimit >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-ModifyUser-45 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-ModifyUser-46 |87.5%| [TS] {asan, default-linux-x86_64, release} ydb/core/wrappers/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_cdc_stream/unittest >> TCdcStreamTests::DropTableWithIndexWithStream [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:51:52.475529Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:51:52.475669Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:52.475716Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:51:52.475755Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:51:52.476710Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:51:52.476774Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:51:52.476854Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:52.476921Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:51:52.477658Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:51:52.478926Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:51:52.557177Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:51:52.557241Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:51:52.576507Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:51:52.576861Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:51:52.577139Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:51:52.583673Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:51:52.584077Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:51:52.587491Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:52.587824Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:51:52.593902Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:52.594917Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:51:52.601658Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:51:52.601730Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:52.601853Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:51:52.601893Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:51:52.601949Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:51:52.602032Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:51:52.607790Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:51:52.745886Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:51:52.746172Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:52.746383Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:51:52.746427Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:51:52.746710Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:51:52.746787Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:51:52.752476Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:52.752687Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:51:52.752918Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:52.752997Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:51:52.753049Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:51:52.753098Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:51:52.755400Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:52.755469Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:51:52.755512Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:51:52.761158Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:52.761221Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:52.761292Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:52.761372Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:51:52.766627Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:51:52.769318Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:51:52.769554Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:51:52.770624Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:52.770789Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:51:52.770847Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:52.771154Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:51:52.771207Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:52.771407Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:51:52.771479Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:51:52.774576Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:51:52.774628Z node 1 :FLAT_TX_SCHEMESHARD ... 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 103 2025-06-25T14:54:19.753678Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 103 2025-06-25T14:54:19.753706Z node 20 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 103 2025-06-25T14:54:19.755063Z node 20 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 103 2025-06-25T14:54:19.755146Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 103 2025-06-25T14:54:19.755181Z node 20 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 103 2025-06-25T14:54:19.755212Z node 20 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 18446744073709551615 2025-06-25T14:54:19.755247Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 4 2025-06-25T14:54:19.755340Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 103, ready parts: 4/5, is published: true 2025-06-25T14:54:19.756450Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-25T14:54:19.756505Z node 20 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_table.cpp:414: TDropTable TProposedDeletePart operationId: 103:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:54:19.756821Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove table for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-25T14:54:19.756947Z node 20 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#103:0 progress is 5/5 2025-06-25T14:54:19.756985Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 103 ready parts: 5/5 2025-06-25T14:54:19.757039Z node 20 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#103:0 progress is 5/5 2025-06-25T14:54:19.757071Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 103 ready parts: 5/5 2025-06-25T14:54:19.757107Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 103, ready parts: 5/5, is published: true 2025-06-25T14:54:19.757193Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1656: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [20:379:2345] message: TxId: 103 2025-06-25T14:54:19.757289Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 103 ready parts: 5/5 2025-06-25T14:54:19.757370Z node 20 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 103:0 2025-06-25T14:54:19.757444Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 103:0 2025-06-25T14:54:19.757621Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-25T14:54:19.757701Z node 20 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 103:1 2025-06-25T14:54:19.757732Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 103:1 2025-06-25T14:54:19.757771Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-25T14:54:19.757798Z node 20 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 103:2 2025-06-25T14:54:19.757823Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 103:2 2025-06-25T14:54:19.757872Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 2025-06-25T14:54:19.757901Z node 20 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 103:3 2025-06-25T14:54:19.757924Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 103:3 2025-06-25T14:54:19.757959Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 2 2025-06-25T14:54:19.757982Z node 20 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 103:4 2025-06-25T14:54:19.758005Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 103:4 2025-06-25T14:54:19.758071Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 6] was 1 2025-06-25T14:54:19.759071Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-25T14:54:19.759156Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 6], at schemeshard: 72057594046678944 2025-06-25T14:54:19.759281Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 1 2025-06-25T14:54:19.759359Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 5], at schemeshard: 72057594046678944 2025-06-25T14:54:19.759395Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 2 2025-06-25T14:54:19.759744Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-25T14:54:19.761394Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-25T14:54:19.761516Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-25T14:54:19.761553Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-25T14:54:19.764036Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-25T14:54:19.764138Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-25T14:54:19.764421Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-06-25T14:54:19.764508Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [20:757:2658] 2025-06-25T14:54:19.764681Z node 20 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 2 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 103 2025-06-25T14:54:19.765534Z node 20 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/Index/indexImplTable/Stream" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-25T14:54:19.765929Z node 20 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/Index/indexImplTable/Stream" took 471us result status StatusPathDoesNotExist 2025-06-25T14:54:19.766201Z node 20 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/Table/Index/indexImplTable/Stream\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot/Table/Index/indexImplTable\' (id: [OwnerId: 72057594046678944, LocalPathId: 4])" Path: "/MyRoot/Table/Index/indexImplTable/Stream" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-25T14:54:19.766888Z node 20 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/Index/indexImplTable/Stream/streamImpl" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-25T14:54:19.767232Z node 20 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/Index/indexImplTable/Stream/streamImpl" took 388us result status StatusPathDoesNotExist 2025-06-25T14:54:19.767445Z node 20 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/Table/Index/indexImplTable/Stream/streamImpl\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot/Table/Index/indexImplTable\' (id: [OwnerId: 72057594046678944, LocalPathId: 4])" Path: "/MyRoot/Table/Index/indexImplTable/Stream/streamImpl" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 |87.5%| [TS] {asan, default-linux-x86_64, release} ydb/core/wrappers/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/opt/unittest >> KqpNotNullColumns::JoinBothTablesWithNotNullPk-StreamLookup [GOOD] Test command err: Trying to start YDB, gRPC: 32316, MsgBus: 8079 2025-06-25T14:53:40.373637Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900272931739107:2229];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:53:40.373689Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0009db/r3tmp/tmpEQh3H1/pdisk_1.dat 2025-06-25T14:53:40.748859Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 32316, node 1 2025-06-25T14:53:40.806697Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:53:40.806821Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:53:40.808354Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:53:40.810196Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:53:40.810222Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:53:40.810234Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:53:40.810346Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8079 TClient is connected to server localhost:8079 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:53:41.358406Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:53:41.364279Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:53:41.385329Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:53:43.082170Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900285816641435:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:43.082275Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:43.314199Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:43.471412Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900285816641539:2301], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:43.471493Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:43.471758Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900285816641544:2304], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:43.475485Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:53:43.485235Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519900285816641546:2305], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-25T14:53:43.555797Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519900285816641597:2392] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:53:43.724040Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519900285816641639:2316], status: PRECONDITION_FAILED, issues:
: Error: Type annotation, code: 1030
:1:13: Error: At function: KiWriteTable!
:1:13: Error: Missing key column in input: Key for table: /Root/TestInsertNotNullPk, code: 2029 2025-06-25T14:53:43.724563Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=NDY3YjZkZWUtODM4YTIxMzMtYzdmZDBiYjUtNjE1OWRjNTA=, ActorId: [1:7519900285816641408:2288], ActorState: ExecuteState, TraceId: 01jyks96wp641b1w1azw8q07rw, ReplyQueryCompileError, status PRECONDITION_FAILED remove tx with tx_id: 2025-06-25T14:53:43.755851Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519900285816641648:2320], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:1:13: Error: At function: KiWriteTable!
:1:47: Error: Failed to convert type: Struct<'Key':Null,'Value':String> to Struct<'Key':Uint64,'Value':String?>
:1:47: Error: Failed to convert 'Key': Null to Uint64
:1:47: Error: Failed to convert input columns types to scheme types, code: 2031 2025-06-25T14:53:43.756528Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=NDY3YjZkZWUtODM4YTIxMzMtYzdmZDBiYjUtNjE1OWRjNTA=, ActorId: [1:7519900285816641408:2288], ActorState: ExecuteState, TraceId: 01jyks96xq3whj50mak5xmq8vs, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: Trying to start YDB, gRPC: 11767, MsgBus: 24095 2025-06-25T14:53:44.393762Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519900290725212147:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:53:44.393838Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0009db/r3tmp/tmpuUW7Rd/pdisk_1.dat 2025-06-25T14:53:44.523665Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:53:44.525155Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519900290725212120:2080] 1750863224393354 != 1750863224393357 TServer::EnableGrpc on GrpcPort 11767, node 2 2025-06-25T14:53:44.577654Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:53:44.577732Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:53:44.578988Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:53:44.584865Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:53:44.584885Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:53:44.584892Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:53:44.584985Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24095 TClient is connected to server localhost:24095 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:53:45.089687Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_a ... test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0009db/r3tmp/tmpVnW0fB/pdisk_1.dat 2025-06-25T14:54:12.391236Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:54:12.394479Z node 7 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [7:7519900408229809546:2080] 1750863252266384 != 1750863252266387 2025-06-25T14:54:12.419666Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:54:12.419774Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:54:12.423438Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 11085, node 7 2025-06-25T14:54:12.492859Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:54:12.492884Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:54:12.492895Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:54:12.493029Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11104 TClient is connected to server localhost:11104 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:54:13.095509Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:54:13.103392Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:54:13.115141Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:54:13.227722Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:54:13.363669Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:54:13.450208Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:54:13.584872Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:54:16.217778Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519900425409680366:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:16.217892Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:16.294529Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:16.339069Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:16.387499Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:16.435088Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:16.497917Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:16.572046Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:16.661688Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:16.768684Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519900425409681032:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:16.768795Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:16.769206Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519900425409681037:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:16.774592Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:54:16.792632Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7519900425409681039:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:54:16.891700Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7519900425409681090:3422] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:54:17.267766Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[7:7519900408229809578:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:54:17.267847Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:54:18.607554Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:18.844582Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) >> TS3WrapperTests::AbortUnknownUpload >> KqpMergeCn::TopSortByDesc_Bool_And_PKUint64_Limit4 [GOOD] >> KqpMergeCn::TopSortBy_Date_And_Datetime_Limit4 |87.6%| [TA] $(B)/ydb/core/tx/schemeshard/ut_cdc_stream/test-results/unittest/{meta.json ... results_accumulator.log} >> TS3WrapperTests::AbortUnknownUpload [GOOD] |87.6%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_cdc_stream/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpNamedExpressions::NamedExpressionRandomChanged2-UseSink [GOOD] >> KqpNamedExpressions::NamedExpressionRandom+UseSink >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-DropUser-70 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-DropUser-71 |87.6%| [TS] {asan, default-linux-x86_64, release} ydb/core/wrappers/ut/unittest >> KqpRanges::IsNullInValue [GOOD] >> KqpRanges::IsNullInJsonValue ------- [TS] {asan, default-linux-x86_64, release} ydb/core/wrappers/ut/unittest >> TS3WrapperTests::AbortUnknownUpload [GOOD] Test command err: 2025-06-25T14:54:21.619157Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:75: Request: uuid# AEBD0F7D-B4B7-4364-AA45-7603164AB7DB, request# AbortMultipartUpload { Bucket: TEST Key: key UploadId: uploadId } REQUEST: DELETE /TEST/key?uploadId=uploadId HTTP/1.1 HEADERS: Host: localhost:1787 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 0FE551CF-8E71-4880-9AFE-BAC51C225885 amz-sdk-request: attempt=1 content-type: application/xml user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-api-version: 2006-03-01 S3_MOCK::HttpServeAction: 6 / /TEST/key / uploadId=uploadId 2025-06-25T14:54:21.665248Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:63: Response: uuid# AEBD0F7D-B4B7-4364-AA45-7603164AB7DB, response# >> TS3WrapperTests::AbortMultipartUpload >> KqpSqlIn::KeySuffix_OnlyTail [GOOD] >> KqpSqlIn::KeyTypeMissmatch_Int |87.6%| [TS] {asan, default-linux-x86_64, release} ydb/core/wrappers/ut/unittest |87.6%| [TS] {asan, default-linux-x86_64, release} ydb/core/wrappers/ut/unittest >> TS3WrapperTests::GetUnknownObject >> TS3WrapperTests::AbortMultipartUpload [GOOD] >> TS3WrapperTests::GetUnknownObject [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-22 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-23 >> KqpSqlIn::SecondaryIndex_TupleLiteral [GOOD] >> KqpSqlIn::SecondaryIndex_TupleSelect >> KqpNewEngine::DeleteWithInputMultiConsumptionLimit+UseSink [GOOD] >> KqpNewEngine::DeleteWithInputMultiConsumptionLimit-UseSink ------- [TS] {asan, default-linux-x86_64, release} ydb/core/wrappers/ut/unittest >> TS3WrapperTests::AbortMultipartUpload [GOOD] Test command err: 2025-06-25T14:54:23.122090Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:75: Request: uuid# D4A127C2-2A11-43B3-8A8D-D74915DDEAA2, request# CreateMultipartUpload { Bucket: TEST Key: key } REQUEST: POST /TEST/key?uploads HTTP/1.1 HEADERS: Host: localhost:28022 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: A2BEFB71-8E64-4125-9583-C01899B2B31A amz-sdk-request: attempt=1 content-length: 0 content-type: application/xml user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-api-version: 2006-03-01 x-amz-storage-class: STANDARD S3_MOCK::HttpServeAction: 4 / /TEST/key / uploads= 2025-06-25T14:54:23.129117Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:63: Response: uuid# D4A127C2-2A11-43B3-8A8D-D74915DDEAA2, response# CreateMultipartUploadResult { Bucket: Key: TEST/key UploadId: 1 } 2025-06-25T14:54:23.129595Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:75: Request: uuid# C900D253-517E-4CC0-A5B4-403EBA51DDD8, request# AbortMultipartUpload { Bucket: TEST Key: key UploadId: 1 } REQUEST: DELETE /TEST/key?uploadId=1 HTTP/1.1 HEADERS: Host: localhost:28022 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 7D2342B6-1348-401D-887B-3666D9C3498E amz-sdk-request: attempt=1 content-type: application/xml user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-api-version: 2006-03-01 S3_MOCK::HttpServeAction: 6 / /TEST/key / uploadId=1 2025-06-25T14:54:23.131918Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:63: Response: uuid# C900D253-517E-4CC0-A5B4-403EBA51DDD8, response# AbortMultipartUploadResult { } 2025-06-25T14:54:23.132275Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:75: Request: uuid# C36287C3-1951-47C1-BBD2-B6B4562D5FC9, request# HeadObject { Bucket: TEST Key: key } REQUEST: HEAD /TEST/key HTTP/1.1 HEADERS: Host: localhost:28022 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: DB8D1912-7DCB-4F10-84C6-8DBB784F26C4 amz-sdk-request: attempt=1 content-type: application/xml user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-api-version: 2006-03-01 2025-06-25T14:54:23.137538Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:63: Response: uuid# C36287C3-1951-47C1-BBD2-B6B4562D5FC9, response# No response body. ------- [TS] {asan, default-linux-x86_64, release} ydb/core/wrappers/ut/unittest >> TS3WrapperTests::GetUnknownObject [GOOD] Test command err: 2025-06-25T14:54:23.169588Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:75: Request: uuid# CFAF376B-AB95-4AEF-BA02-893C6ACFDB60, request# GetObject { Bucket: TEST Key: key Range: bytes=0-3 } REQUEST: GET /TEST/key HTTP/1.1 HEADERS: Host: localhost:19693 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: DF60E33C-7031-4DD7-B0AE-DACFE339839B amz-sdk-request: attempt=1 content-type: application/xml range: bytes=0-3 user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-api-version: 2006-03-01 2025-06-25T14:54:23.176126Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:63: Response: uuid# CFAF376B-AB95-4AEF-BA02-893C6ACFDB60, response# No response body. >> KqpRanges::DuplicateKeyPredicateParam [GOOD] >> KqpRanges::DuplicateKeyPredicateMixed >> TS3WrapperTests::HeadObject >> TS3WrapperTests::CopyPartUpload >> TPersQueueTest::SetupWriteSessionOnDisabledCluster [GOOD] >> TPersQueueTest::SetupReadSession >> TS3WrapperTests::HeadObject [GOOD] >> TS3WrapperTests::CopyPartUpload [GOOD] >> KqpNewEngine::ReadDifferentColumns [GOOD] >> KqpNewEngine::ReadDifferentColumnsPk |87.6%| [TS] {asan, default-linux-x86_64, release} ydb/core/wrappers/ut/unittest >> TS3WrapperTests::MultipartUpload ------- [TS] {asan, default-linux-x86_64, release} ydb/core/wrappers/ut/unittest >> TS3WrapperTests::HeadObject [GOOD] Test command err: 2025-06-25T14:54:24.129748Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:75: Request: uuid# 00C4711E-20A3-4FE6-93B3-CF77AF731266, request# PutObject { Bucket: TEST Key: key } REQUEST: PUT /TEST/key HTTP/1.1 HEADERS: Host: localhost:7466 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: E9ABFC76-2A63-42D7-83BF-E3A839411323 amz-sdk-request: attempt=1 content-length: 4 content-md5: hBotaJrYa9FhFEdFPCLG/A== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /TEST/key / / 4 2025-06-25T14:54:24.140111Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:63: Response: uuid# 00C4711E-20A3-4FE6-93B3-CF77AF731266, response# PutObjectResult { ETag: 841a2d689ad86bd1611447453c22c6fc } 2025-06-25T14:54:24.140472Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:75: Request: uuid# 518FB846-6CAB-4580-8BAE-B32A70B037B6, request# HeadObject { Bucket: TEST Key: key } REQUEST: HEAD /TEST/key HTTP/1.1 HEADERS: Host: localhost:7466 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 6447473D-606E-4741-A130-B8287D79FC8D amz-sdk-request: attempt=1 content-type: application/xml user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-api-version: 2006-03-01 S3_MOCK::HttpServeRead: /TEST/key / 4 2025-06-25T14:54:24.142996Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:63: Response: uuid# 518FB846-6CAB-4580-8BAE-B32A70B037B6, response# HeadObjectResult { ETag: 841a2d689ad86bd1611447453c22c6fc ContentLength: 4 } >> KqpAgg::AggWithSelfLookup2 [GOOD] >> KqpAgg::AggWithHop ------- [TS] {asan, default-linux-x86_64, release} ydb/core/wrappers/ut/unittest >> TS3WrapperTests::CopyPartUpload [GOOD] Test command err: 2025-06-25T14:54:24.228414Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:75: Request: uuid# 7A651C88-146A-4E65-93DD-FD42D8FA6943, request# PutObject { Bucket: TEST Key: key } REQUEST: PUT /TEST/key HTTP/1.1 HEADERS: Host: localhost:2869 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 03D0939A-FD19-4933-A6C1-A42FD1690349 amz-sdk-request: attempt=1 content-length: 4 content-md5: hBotaJrYa9FhFEdFPCLG/A== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /TEST/key / / 4 2025-06-25T14:54:24.233568Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:63: Response: uuid# 7A651C88-146A-4E65-93DD-FD42D8FA6943, response# PutObjectResult { ETag: 841a2d689ad86bd1611447453c22c6fc } 2025-06-25T14:54:24.233888Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:75: Request: uuid# B4448F88-E451-40A2-8765-60D44945F51B, request# CreateMultipartUpload { Bucket: TEST Key: key1 } REQUEST: POST /TEST/key1?uploads HTTP/1.1 HEADERS: Host: localhost:2869 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: AF88413F-394A-4D45-88FF-0E080F1EB1E4 amz-sdk-request: attempt=1 content-length: 0 content-type: application/xml user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-api-version: 2006-03-01 x-amz-storage-class: STANDARD S3_MOCK::HttpServeAction: 4 / /TEST/key1 / uploads= 2025-06-25T14:54:24.237064Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:63: Response: uuid# B4448F88-E451-40A2-8765-60D44945F51B, response# CreateMultipartUploadResult { Bucket: Key: TEST/key1 UploadId: 1 } 2025-06-25T14:54:24.239595Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:75: Request: uuid# 9B4F54D6-C86F-4555-9737-61151E90B7CA, request# UploadPartCopy { Bucket: TEST Key: key1 UploadId: 1 PartNumber: 1 } REQUEST: PUT /TEST/key1?partNumber=1&uploadId=1 HTTP/1.1 HEADERS: Host: localhost:2869 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 9E5E2DBE-7814-43FE-BA91-B5E2778116FD amz-sdk-request: attempt=1 content-length: 0 content-type: application/xml user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-api-version: 2006-03-01 x-amz-copy-source: /TEST/key x-amz-copy-source-range: bytes=1-2 S3_MOCK::HttpServeWrite: /TEST/key1 / partNumber=1&uploadId=1 / 0 2025-06-25T14:54:24.248628Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:63: Response: uuid# 9B4F54D6-C86F-4555-9737-61151E90B7CA, response# UploadPartCopyResult { } 2025-06-25T14:54:24.249012Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:75: Request: uuid# 0F9C209E-2FD6-4169-A5B5-6BB599436428, request# CompleteMultipartUpload { Bucket: TEST Key: key1 UploadId: 1 MultipartUpload: { Parts: [afc7e8a98f75755e513d9d5ead888e1d] } } REQUEST: POST /TEST/key1?uploadId=1 HTTP/1.1 HEADERS: Host: localhost:2869 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 956AEC78-3D02-4DB9-8AF9-BAC0FDF4B08A amz-sdk-request: attempt=1 content-length: 235 content-type: application/xml user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-api-version: 2006-03-01 S3_MOCK::HttpServeAction: 4 / /TEST/key1 / uploadId=1 2025-06-25T14:54:24.255331Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:63: Response: uuid# 0F9C209E-2FD6-4169-A5B5-6BB599436428, response# CompleteMultipartUploadResult { Bucket: Key: TEST/key1 ETag: afc7e8a98f75755e513d9d5ead888e1d } 2025-06-25T14:54:24.255633Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:75: Request: uuid# 332EBAA1-D454-47B8-B1B8-265D80F557CB, request# GetObject { Bucket: TEST Key: key1 Range: bytes=0-1 } REQUEST: GET /TEST/key1 HTTP/1.1 HEADERS: Host: localhost:2869 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 9F30F20D-FC56-4900-8736-84D91C51F67D amz-sdk-request: attempt=1 content-type: application/xml range: bytes=0-1 user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-api-version: 2006-03-01 S3_MOCK::HttpServeRead: /TEST/key1 / 2 2025-06-25T14:54:24.258117Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:63: Response: uuid# 332EBAA1-D454-47B8-B1B8-265D80F557CB, response# GetObjectResult { } >> KqpRanges::DeleteNotFullScan+UseSink [GOOD] >> KqpRanges::DeleteNotFullScan-UseSink >> TPersQueueTest::ReadRuleServiceTypeMigration [GOOD] >> TPersQueueTest::ReadRuleServiceTypeMigrationWithDisallowDefault >> TS3WrapperTests::MultipartUpload [GOOD] >> KqpReturning::ReturningColumnsOrder [GOOD] >> KqpReturning::ReturningTypes >> KqpKv::ReadRows_TimeoutCancelsReads [FAIL] >> KqpKv::ReadRows_PgValue >> TS3WrapperTests::CompleteUnknownUpload >> TS3WrapperTests::UploadUnknownPart ------- [TS] {asan, default-linux-x86_64, release} ydb/core/wrappers/ut/unittest >> TS3WrapperTests::MultipartUpload [GOOD] Test command err: 2025-06-25T14:54:24.920844Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:75: Request: uuid# ED676A13-309E-4604-BC9A-E836F5694D14, request# CreateMultipartUpload { Bucket: TEST Key: key } REQUEST: POST /TEST/key?uploads HTTP/1.1 HEADERS: Host: localhost:9994 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 191CA3EF-13AA-4838-8F46-325696CD67B6 amz-sdk-request: attempt=1 content-length: 0 content-type: application/xml user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-api-version: 2006-03-01 x-amz-storage-class: STANDARD S3_MOCK::HttpServeAction: 4 / /TEST/key / uploads= 2025-06-25T14:54:24.926132Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:63: Response: uuid# ED676A13-309E-4604-BC9A-E836F5694D14, response# CreateMultipartUploadResult { Bucket: Key: TEST/key UploadId: 1 } 2025-06-25T14:54:24.926450Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:75: Request: uuid# 7E944AC7-53D0-4E41-B051-F05ECC17DF65, request# UploadPart { Bucket: TEST Key: key UploadId: 1 PartNumber: 1 } REQUEST: PUT /TEST/key?partNumber=1&uploadId=1 HTTP/1.1 HEADERS: Host: localhost:9994 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: C29ACB1D-284B-4445-9490-D15DCA9F2EC5 amz-sdk-request: attempt=1 content-length: 4 content-md5: hBotaJrYa9FhFEdFPCLG/A== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 S3_MOCK::HttpServeWrite: /TEST/key / partNumber=1&uploadId=1 / 4 2025-06-25T14:54:24.932525Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:63: Response: uuid# 7E944AC7-53D0-4E41-B051-F05ECC17DF65, response# UploadPartResult { ETag: 841a2d689ad86bd1611447453c22c6fc } 2025-06-25T14:54:24.934480Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:75: Request: uuid# 12331AC1-59F2-43D0-AF40-4F4236EEE8C3, request# CompleteMultipartUpload { Bucket: TEST Key: key UploadId: 1 MultipartUpload: { Parts: [841a2d689ad86bd1611447453c22c6fc] } } REQUEST: POST /TEST/key?uploadId=1 HTTP/1.1 HEADERS: Host: localhost:9994 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 6344FC28-7385-4166-9584-A26609885AC0 amz-sdk-request: attempt=1 content-length: 235 content-type: application/xml user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-api-version: 2006-03-01 S3_MOCK::HttpServeAction: 4 / /TEST/key / uploadId=1 2025-06-25T14:54:24.941627Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:63: Response: uuid# 12331AC1-59F2-43D0-AF40-4F4236EEE8C3, response# CompleteMultipartUploadResult { Bucket: Key: TEST/key ETag: 841a2d689ad86bd1611447453c22c6fc } 2025-06-25T14:54:24.942115Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:75: Request: uuid# 996E9588-4CFE-4D2B-85F5-081A896C9A61, request# GetObject { Bucket: TEST Key: key Range: bytes=0-3 } REQUEST: GET /TEST/key HTTP/1.1 HEADERS: Host: localhost:9994 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 6D39EFEF-44AC-408B-BF8A-E9E3F4DC2BBE amz-sdk-request: attempt=1 content-type: application/xml range: bytes=0-3 user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-api-version: 2006-03-01 S3_MOCK::HttpServeRead: /TEST/key / 4 2025-06-25T14:54:24.944709Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:63: Response: uuid# 996E9588-4CFE-4D2B-85F5-081A896C9A61, response# GetObjectResult { } >> KqpReturning::ReturningWorksIndexedOperationsWithDefault-QueryService [GOOD] >> KqpSort::ComplexPkExclusiveSecondOptionalPredicate >> TS3WrapperTests::CompleteUnknownUpload [GOOD] >> TS3WrapperTests::UploadUnknownPart [GOOD] >> KqpNewEngine::AutoChooseIndex [GOOD] >> KqpNewEngine::AutoChooseIndexOrderByLimit >> TS3WrapperTests::PutObject ------- [TS] {asan, default-linux-x86_64, release} ydb/core/wrappers/ut/unittest >> TS3WrapperTests::CompleteUnknownUpload [GOOD] Test command err: 2025-06-25T14:54:25.731622Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:75: Request: uuid# E488469D-FC62-4E5F-B403-36251A5B05F5, request# CompleteMultipartUpload { Bucket: TEST Key: key UploadId: uploadId MultipartUpload: { Parts: [ETag] } } REQUEST: POST /TEST/key?uploadId=uploadId HTTP/1.1 HEADERS: Host: localhost:20107 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 556EFC79-765F-4631-91E5-51CE946C804E amz-sdk-request: attempt=1 content-length: 207 content-type: application/xml user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-api-version: 2006-03-01 S3_MOCK::HttpServeAction: 4 / /TEST/key / uploadId=uploadId 2025-06-25T14:54:25.737112Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:63: Response: uuid# E488469D-FC62-4E5F-B403-36251A5B05F5, response# |87.6%| [TS] {asan, default-linux-x86_64, release} ydb/core/wrappers/ut/unittest ------- [TS] {asan, default-linux-x86_64, release} ydb/core/wrappers/ut/unittest >> TS3WrapperTests::UploadUnknownPart [GOOD] Test command err: 2025-06-25T14:54:25.844893Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:75: Request: uuid# 48BFA7DB-30A9-4EAA-AD42-21A6ADA28645, request# UploadPart { Bucket: TEST Key: key UploadId: uploadId PartNumber: 1 } REQUEST: PUT /TEST/key?partNumber=1&uploadId=uploadId HTTP/1.1 HEADERS: Host: localhost:2202 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 76378253-63FA-40C2-A275-D184C08DD41F amz-sdk-request: attempt=1 content-length: 4 content-md5: hBotaJrYa9FhFEdFPCLG/A== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 S3_MOCK::HttpServeWrite: /TEST/key / partNumber=1&uploadId=uploadId / 4 2025-06-25T14:54:25.854315Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:63: Response: uuid# 48BFA7DB-30A9-4EAA-AD42-21A6ADA28645, response# >> TS3WrapperTests::PutObject [GOOD] >> TS3WrapperTests::GetObject >> KqpNewEngine::MultiUsagePrecompute [GOOD] >> KqpNewEngine::MultiUsageInnerConnection >> TS3WrapperTests::GetObject [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-ModifyUser-46 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-ModifyUser-47 ------- [TS] {asan, default-linux-x86_64, release} ydb/core/wrappers/ut/unittest >> TS3WrapperTests::PutObject [GOOD] Test command err: 2025-06-25T14:54:26.662645Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:75: Request: uuid# 0525D0C7-9801-4DFC-85B7-04CC429C64E3, request# PutObject { Bucket: TEST Key: key } REQUEST: PUT /TEST/key HTTP/1.1 HEADERS: Host: localhost:20375 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: A40DB1B3-7911-4495-B6C2-CFEE6F140634 amz-sdk-request: attempt=1 content-length: 4 content-md5: hBotaJrYa9FhFEdFPCLG/A== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /TEST/key / / 4 2025-06-25T14:54:26.667775Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:63: Response: uuid# 0525D0C7-9801-4DFC-85B7-04CC429C64E3, response# PutObjectResult { ETag: 841a2d689ad86bd1611447453c22c6fc } >> KqpNotNullColumns::UpdateTable_UniqIndexPg [GOOD] >> KqpNotNullColumns::UpdateTable_Immediate >> KqpRanges::ValidatePredicates [GOOD] >> KqpRanges::ValidatePredicatesDataQuery >> KqpSqlIn::SimpleKey_In_And_In [GOOD] >> KqpSqlIn::TupleNotOnlyOfKeys ------- [TS] {asan, default-linux-x86_64, release} ydb/core/wrappers/ut/unittest >> TS3WrapperTests::GetObject [GOOD] Test command err: 2025-06-25T14:54:27.147173Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:75: Request: uuid# 169AEC93-F50D-455B-8204-FF4557D06618, request# PutObject { Bucket: TEST Key: key } REQUEST: PUT /TEST/key HTTP/1.1 HEADERS: Host: localhost:8242 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 987AB606-47D2-49D6-857D-46D5A89FA4E1 amz-sdk-request: attempt=1 content-length: 4 content-md5: hBotaJrYa9FhFEdFPCLG/A== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /TEST/key / / 4 2025-06-25T14:54:27.154724Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:63: Response: uuid# 169AEC93-F50D-455B-8204-FF4557D06618, response# PutObjectResult { ETag: 841a2d689ad86bd1611447453c22c6fc } 2025-06-25T14:54:27.154978Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:75: Request: uuid# C5641273-E1CB-4D4C-A3DD-3D31173FE556, request# GetObject { Bucket: TEST Key: key Range: bytes=0-3 } REQUEST: GET /TEST/key HTTP/1.1 HEADERS: Host: localhost:8242 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: E1AAA485-E10F-436D-B854-F41E01FE486B amz-sdk-request: attempt=1 content-type: application/xml range: bytes=0-3 user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-api-version: 2006-03-01 S3_MOCK::HttpServeRead: /TEST/key / 4 2025-06-25T14:54:27.159551Z node 1 :S3_WRAPPER NOTICE: s3_storage.h:63: Response: uuid# C5641273-E1CB-4D4C-A3DD-3D31173FE556, response# GetObjectResult { } |87.6%| [TS] {asan, default-linux-x86_64, release} ydb/core/wrappers/ut/unittest >> SchemeReqAdminAccessInTenant::ClusterAdminCanAuthOnEmptyTenant-DomainLoginOnly [GOOD] >> SchemeReqAdminAccessInTenant::ClusterAdminCanAuthOnEmptyTenant-DomainLoginOnly-StrictAclCheck >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-DropUser-71 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-DropUser-72 >> KqpNewEngine::DeleteWithInputMultiConsumption+UseSink [GOOD] >> KqpNewEngine::DeleteWithInputMultiConsumption-UseSink |87.6%| [TA] $(B)/ydb/core/wrappers/ut/test-results/unittest/{meta.json ... results_accumulator.log} |87.6%| [TA] {RESULT} $(B)/ydb/core/wrappers/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpRanges::IsNullInJsonValue [GOOD] >> KqpRanges::IsNullPartial >> TPersQueueTest::Codecs_InitWriteSession_DefaultTopicSupportedCodecsInInitResponse [GOOD] >> TPersQueueTest::Codecs_WriteMessageWithDefaultCodecs_MessagesAreAcknowledged >> TPersQueueTest::PartitionsMapping [GOOD] >> TPersQueueTest::MessageMetadata >> KqpMergeCn::TopSortBy_Date_And_Datetime_Limit4 [GOOD] >> KqpMergeCn::SortBy_PK_Uint64_Desc >> KqpNamedExpressions::NamedExpressionRandomUpsertIndex-UseSink+UseDataQuery [GOOD] >> KqpNamedExpressions::NamedExpressionRandomUpsertIndex+UseSink+UseDataQuery >> KqpNewEngine::DeleteWithInputMultiConsumptionLimit-UseSink [GOOD] >> KqpNewEngine::DependentSelect >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-23 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-24 >> KqpSqlIn::KeyTypeMissmatch_Int [GOOD] >> KqpSqlIn::KeyTypeMissmatch_Str >> KqpNewEngine::ReadDifferentColumnsPk [GOOD] >> KqpNewEngine::PushFlatmapInnerConnectionsToStageInput |87.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest |87.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest |87.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest |87.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest >> TestProgram::NumRowsWithNulls >> TestScript::StepMerging [GOOD] >> KqpAgg::AggWithHop [GOOD] >> KqpAgg::GroupByLimit >> KqpRanges::DuplicateKeyPredicateMixed [GOOD] >> TestProgram::NumRowsWithNulls [GOOD] |87.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest >> TestScript::StepMerging [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest >> TestProgram::NumRowsWithNulls [GOOD] Test command err: FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:33;event=parse_program;program=Command { Assign { Column { Id: 10001 } Function { Id: 7 Arguments { Id: 2 } } } } Command { Filter { Predicate { Id: 10001 } } } Command { GroupBy { Aggregates { Column { Id: 10002 } Function { Id: 2 } } } } Command { Projection { Columns { Id: 10002 } } } ; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:102;parse_proto_program=Command { Assign { Column { Id: 10001 } Function { Id: 7 Arguments { Id: 2 } } } } Command { Filter { Predicate { Id: 10001 } } } Command { GroupBy { Aggregates { Column { Id: 10002 } Function { Id: 2 } } } } Command { Projection { Columns { Id: 10002 } } } ; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2101;fline=graph_execute.cpp:162;graph_constructed=digraph program {N0[shape=box, label="N3(15):{\"i\":\"2\",\"p\":{\"kernel\":{\"class_name\":\"SIMPLE\"}},\"o\":\"10001\",\"t\":\"Calculation\"}\nREMOVE:2"]; N2 -> N0[label="1"]; N1[shape=box, label="N1(2):{\"i\":\"0\",\"p\":{\"data\":[{\"name\":\"uid\",\"id\":2}]},\"o\":\"2\",\"t\":\"FetchOriginalData\"}\n",style=filled,color="#FFFF88"]; N6 -> N1[label="1"]; N2[shape=box, label="N2(7):{\"i\":\"2\",\"p\":{\"address\":{\"name\":\"uid\",\"id\":2}},\"o\":\"2\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N1 -> N2[label="1"]; N3[shape=box, label="N4(15):{\"i\":\"10001\",\"t\":\"Filter\"}\nREMOVE:10001",style=filled,color="#FFAAAA"]; N0 -> N3[label="1"]; N4[shape=box, label="N5(8):{\"p\":{\"kernel\":{\"class_name\":\"SIMPLE\"}},\"o\":\"10002\",\"t\":\"Calculation\"}\n"]; N5[shape=box, label="N6(8):{\"i\":\"10002\",\"t\":\"Projection\"}\n",style=filled,color="#FFAAAA"]; N4 -> N5[label="1"]; N6[shape=box, label="N0(0):{\"p\":{\"data\":[{\"name\":\"uid\",\"id\":2}]},\"o\":\"0\",\"t\":\"ReserveMemory\"}\n"]; N6->N1->N2->N0->N3->N4->N5[color=red]; }; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:51;event=program_parsed;result={"edges":[{"owner_id":0,"inputs":[{"from":2}]},{"owner_id":1,"inputs":[{"from":6}]},{"owner_id":2,"inputs":[{"from":1}]},{"owner_id":3,"inputs":[{"from":0}]},{"owner_id":4,"inputs":[]},{"owner_id":5,"inputs":[{"from":4}]},{"owner_id":6,"inputs":[]}],"nodes":{"1":{"p":{"i":"0","p":{"data":[{"name":"uid","id":2}]},"o":"2","t":"FetchOriginalData"},"w":2,"id":1},"3":{"p":{"i":"10001","t":"Filter"},"w":15,"id":3},"2":{"p":{"i":"2","p":{"address":{"name":"uid","id":2}},"o":"2","t":"AssembleOriginalData"},"w":7,"id":2},"6":{"p":{"p":{"data":[{"name":"uid","id":2}]},"o":"0","t":"ReserveMemory"},"w":0,"id":6},"5":{"p":{"i":"10002","t":"Projection"},"w":8,"id":5},"4":{"p":{"p":{"kernel":{"class_name":"SIMPLE"}},"o":"10002","t":"Calculation"},"w":8,"id":4},"0":{"p":{"i":"2","p":{"kernel":{"class_name":"SIMPLE"}},"o":"10001","t":"Calculation"},"w":15,"id":0}}}; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10UInt64TypeE; >> KqpRanges::DeleteNotFullScan-UseSink [GOOD] >> KqpRanges::CastKeyBounds >> TTopicYqlTest::CreateAndAlterTopicYql [GOOD] >> TTopicYqlTest::AlterAutopartitioning >> KqpNewEngine::ItemsLimit [GOOD] >> KqpNewEngine::JoinDictWithPure |87.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest |87.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest |87.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest >> TestProgram::SimpleFunction [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/opt/unittest >> KqpRanges::DuplicateKeyPredicateMixed [GOOD] Test command err: Trying to start YDB, gRPC: 21746, MsgBus: 3196 2025-06-25T14:53:38.518576Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900262800780314:2140];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:53:38.518833Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0009e0/r3tmp/tmpbSiPkH/pdisk_1.dat 2025-06-25T14:53:38.965576Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:53:38.972476Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519900262800780210:2080] 1750863218501570 != 1750863218501573 TServer::EnableGrpc on GrpcPort 21746, node 1 2025-06-25T14:53:38.983076Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:53:38.983140Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:53:38.990370Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:53:39.036744Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:53:39.036856Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:53:39.036864Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:53:39.036961Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3196 TClient is connected to server localhost:3196 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-25T14:53:39.528441Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:53:39.582527Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:53:39.605487Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:39.742833Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:39.892069Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:39.991630Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:41.593172Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900275685683732:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:41.593263Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:41.914551Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:41.962793Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:42.005011Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:42.051788Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:42.085958Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:42.155907Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:42.195689Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:42.266253Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900279980651687:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:42.266314Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:42.266514Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900279980651692:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:42.269468Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:53:42.277797Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519900279980651694:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:53:42.355121Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519900279980651745:3419] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:53:43.190218Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:43.397156Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:43.520495Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExist ... le_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[6:7519900424551914961:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:54:21.172535Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:54:21.191849Z node 6 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [6:7519900446026753772:3416] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 22064, MsgBus: 12756 2025-06-25T14:54:24.193858Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519900462398574682:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:54:24.193919Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0009e0/r3tmp/tmpfS13yx/pdisk_1.dat 2025-06-25T14:54:24.356349Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:54:24.378356Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:54:24.378473Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:54:24.380933Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 22064, node 7 2025-06-25T14:54:24.437140Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:54:24.437173Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:54:24.437187Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:54:24.437353Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:12756 TClient is connected to server localhost:12756 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-25T14:54:25.070908Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:54:25.094796Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:54:25.193938Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:54:25.207974Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:54:25.406485Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:54:25.504690Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:54:28.598711Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519900479578445445:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:28.598813Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:28.688481Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:28.732276Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:28.809078Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:28.848765Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:28.921384Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:29.022840Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:29.102608Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:29.193890Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[7:7519900462398574682:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:54:29.193962Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:54:29.269856Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519900483873413418:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:29.269957Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:29.270159Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519900483873413423:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:29.274588Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:54:29.296531Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7519900483873413425:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:54:29.389092Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7519900483873413477:3421] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } >> TPQCompatTest::CommitOffsets [GOOD] >> TPQCompatTest::LongProducerAndLongMessageGroupId >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-ModifyUser-47 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-ModifyUser-48 |87.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest >> TestProgram::SimpleFunction [GOOD] Test command err: FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:33;event=parse_program;program=Command { Assign { Column { Id: 15 } Function { Id: 8 Arguments { Id: 2 } } } } Command { Projection { Columns { Id: 15 } } } ; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:102;parse_proto_program=Command { Assign { Column { Id: 15 } Function { Id: 8 Arguments { Id: 2 } } } } Command { Projection { Columns { Id: 15 } } } ; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2101;fline=graph_execute.cpp:162;graph_constructed=digraph program {N0[shape=box, label="N3(15):{\"i\":\"2\",\"p\":{\"kernel\":{\"class_name\":\"SIMPLE\"}},\"o\":\"15\",\"t\":\"Calculation\"}\nREMOVE:2"]; N2 -> N0[label="1"]; N1[shape=box, label="N1(2):{\"i\":\"0\",\"p\":{\"data\":[{\"name\":\"uid\",\"id\":2}]},\"o\":\"2\",\"t\":\"FetchOriginalData\"}\n",style=filled,color="#FFFF88"]; N4 -> N1[label="1"]; N2[shape=box, label="N2(7):{\"i\":\"2\",\"p\":{\"address\":{\"name\":\"uid\",\"id\":2}},\"o\":\"2\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N1 -> N2[label="1"]; N3[shape=box, label="N4(15):{\"i\":\"15\",\"t\":\"Projection\"}\n",style=filled,color="#FFAAAA"]; N0 -> N3[label="1"]; N4[shape=box, label="N0(0):{\"p\":{\"data\":[{\"name\":\"uid\",\"id\":2}]},\"o\":\"0\",\"t\":\"ReserveMemory\"}\n"]; N4->N1->N2->N0->N3[color=red]; }; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:51;event=program_parsed;result={"edges":[{"owner_id":0,"inputs":[{"from":2}]},{"owner_id":1,"inputs":[{"from":4}]},{"owner_id":2,"inputs":[{"from":1}]},{"owner_id":3,"inputs":[{"from":0}]},{"owner_id":4,"inputs":[]}],"nodes":{"1":{"p":{"i":"0","p":{"data":[{"name":"uid","id":2}]},"o":"2","t":"FetchOriginalData"},"w":2,"id":1},"3":{"p":{"i":"15","t":"Projection"},"w":15,"id":3},"2":{"p":{"i":"2","p":{"address":{"name":"uid","id":2}},"o":"2","t":"AssembleOriginalData"},"w":7,"id":2},"4":{"p":{"p":{"data":[{"name":"uid","id":2}]},"o":"0","t":"ReserveMemory"},"w":0,"id":4},"0":{"p":{"i":"2","p":{"kernel":{"class_name":"SIMPLE"}},"o":"15","t":"Calculation"},"w":15,"id":0}}}; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10UInt64TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10UInt64TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10UInt64TypeE; >> KqpNewEngine::StaleRO+EnableFollowers [GOOD] >> KqpNewEngine::StaleRO-EnableFollowers |87.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest >> KqpNamedExpressions::NamedExpressionRandomInsert+UseSink [GOOD] >> KqpNamedExpressions::NamedExpressionRandomInsert-UseSink >> KqpNewEngine::AutoChooseIndexOrderByLimit [GOOD] >> KqpNewEngine::AutoChooseIndexOrderByLambda >> KqpReturning::ReturningTypes [GOOD] |87.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest |87.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-DropUser-72 [GOOD] >> TestProgram::YqlKernelStartsWith >> KqpSort::ComplexPkExclusiveSecondOptionalPredicate [GOOD] >> KqpSort::ComplexPkInclusiveSecondOptionalPredicate >> TestProgram::YqlKernelStartsWith [GOOD] |87.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest >> KqpSqlIn::SecondaryIndex_TupleSelect [GOOD] |87.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest >> KqpNewEngine::MultiUsageInnerConnection [GOOD] >> KqpNewEngine::MultipleBroadcastJoin |87.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest >> TestProgram::YqlKernelStartsWith [GOOD] Test command err: FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:33;event=parse_program;program=Command { Assign { Column { Id: 15 } Function { Arguments { Id: 7 } Arguments { Id: 9 } FunctionType: YQL_KERNEL KernelIdx: 0 } } } Command { Projection { Columns { Id: 15 } } } Kernels: "O\002\030BlockAsTuple\t\211\004\235\213\004\213\002\203\001H\213\002\203\014\001\235?\002\001\235?\006\001\002\000\t\211\002?\014\235?\000\001\002\000\t\251\000?\022\014Arg\000\000\t\211\002?\016\235?\004\001\002\000\t\211\006?\034\203\005@?\022?\022$BlockFunc\000\003? \024StartsWith?\030?\030\001\000\000/" ; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:102;parse_proto_program=Command { Assign { Column { Id: 15 } Function { Arguments { Id: 7 } Arguments { Id: 9 } FunctionType: YQL_KERNEL KernelIdx: 0 } } } Command { Projection { Columns { Id: 15 } } } Kernels: "O\002\030BlockAsTuple\t\211\004\235\213\004\213\002\203\001H\213\002\203\014\001\235?\002\001\235?\006\001\002\000\t\211\002?\014\235?\000\001\002\000\t\251\000?\022\014Arg\000\000\t\211\002?\016\235?\004\001\002\000\t\211\006?\034\203\005@?\022?\022$BlockFunc\000\003? \024StartsWith?\030?\030\001\000\000/" ; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2101;fline=graph_execute.cpp:162;graph_constructed=digraph program {N7[shape=box, label="N0(0):{\"p\":{\"data\":[{\"name\":\"string\",\"id\":7},{\"name\":\"substring\",\"id\":9}]},\"o\":\"0\",\"t\":\"ReserveMemory\"}\n"]; N0[shape=box, label="N4(26):{\"i\":\"7,9\",\"p\":{\"kernel\":{\"class_name\":\"SIMPLE\"}},\"o\":\"15\",\"t\":\"Calculation\"}\nREMOVE:7,9"]; N2 -> N0[label="1"]; N4 -> N0[label="2"]; N2[shape=box, label="N2(9):{\"i\":\"7\",\"p\":{\"address\":{\"name\":\"string\",\"id\":7}},\"o\":\"7\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N6 -> N2[label="1"]; N4[shape=box, label="N3(9):{\"i\":\"9\",\"p\":{\"address\":{\"name\":\"substring\",\"id\":9}},\"o\":\"9\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N6 -> N4[label="1"]; N5[shape=box, label="N5(26):{\"i\":\"15\",\"t\":\"Projection\"}\n",style=filled,color="#FFAAAA"]; N0 -> N5[label="1"]; N6[shape=box, label="N1(4):{\"i\":\"0\",\"p\":{\"data\":[{\"name\":\"string\",\"id\":7},{\"name\":\"substring\",\"id\":9}]},\"o\":\"7,9\",\"t\":\"FetchOriginalData\"}\n",style=filled,color="#FFFF88"]; N7 -> N6[label="1"]; N7->N6->N2->N4->N0->N5[color=red]; }; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:51;event=program_parsed;result={"edges":[{"owner_id":7,"inputs":[]},{"owner_id":0,"inputs":[{"from":2},{"from":4}]},{"owner_id":2,"inputs":[{"from":6}]},{"owner_id":4,"inputs":[{"from":6}]},{"owner_id":5,"inputs":[{"from":0}]},{"owner_id":6,"inputs":[{"from":7}]}],"nodes":{"2":{"p":{"i":"7","p":{"address":{"name":"string","id":7}},"o":"7","t":"AssembleOriginalData"},"w":9,"id":2},"6":{"p":{"i":"0","p":{"data":[{"name":"string","id":7},{"name":"substring","id":9}]},"o":"7,9","t":"FetchOriginalData"},"w":4,"id":6},"7":{"p":{"p":{"data":[{"name":"string","id":7},{"name":"substring","id":9}]},"o":"0","t":"ReserveMemory"},"w":0,"id":7},"5":{"p":{"i":"15","t":"Projection"},"w":26,"id":5},"4":{"p":{"i":"9","p":{"address":{"name":"substring","id":9}},"o":"9","t":"AssembleOriginalData"},"w":9,"id":4},"0":{"p":{"i":"7,9","p":{"kernel":{"class_name":"SIMPLE"}},"o":"15","t":"Calculation"},"w":26,"id":0}}}; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9UInt8TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9UInt8TypeE; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_schemereq/unittest >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-BuiltinUser-DropUser-72 [GOOD] Test command err: Starting YDB, grpc: 15716, msgbus: 6044 2025-06-25T14:51:47.709600Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899785010583569:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:51:47.711171Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00095a/r3tmp/tmpxV3pdD/pdisk_1.dat 2025-06-25T14:51:48.151993Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:51:48.165186Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:51:48.165280Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:51:48.175661Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 15716, node 1 2025-06-25T14:51:48.334073Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:51:48.334091Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:51:48.334099Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:51:48.334221Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6044 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-25T14:51:48.722942Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7519899785010583758:2117] Handle TEvNavigate describe path dc-1 2025-06-25T14:51:48.743148Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7519899789305551575:2446] HANDLE EvNavigateScheme dc-1 2025-06-25T14:51:48.743580Z node 1 :TX_PROXY DEBUG: describe.cpp:356: Actor# [1:7519899789305551575:2446] HANDLE EvNavigateKeySetResult TDescribeReq marker# P5 ErrorCount# 0 2025-06-25T14:51:48.766252Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:51:48.778900Z node 1 :TX_PROXY DEBUG: describe.cpp:435: Actor# [1:7519899789305551575:2446] SEND to# 72057594046644480 shardToRequest NKikimrSchemeOp.TDescribePath Path: "dc-1" Options { ReturnBoundaries: true ShowPrivateTable: true ReturnRangeKey: true } 2025-06-25T14:51:48.800470Z node 1 :TX_PROXY DEBUG: describe.cpp:448: Actor# [1:7519899789305551575:2446] Handle TEvDescribeSchemeResult Forward to# [1:7519899789305551573:2444] Cookie: 0 TEvDescribeSchemeResult: NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 2 Record# Status: StatusSuccess Path: "dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/dc-1" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046644480 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:51:48.820219Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7519899785010583758:2117] Handle TEvProposeTransaction 2025-06-25T14:51:48.820247Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:7519899785010583758:2117] TxId# 281474976715657 ProcessProposeTransaction 2025-06-25T14:51:48.820396Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:7519899785010583758:2117] Cookie# 0 userReqId# "" txid# 281474976715657 SEND to# [1:7519899789305551581:2451] 2025-06-25T14:51:48.923451Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:7519899789305551581:2451] txid# 281474976715657 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "dc-1" StoragePools { Name: "" Kind: "tenant-db" } StoragePools { Name: "/dc-1:test" Kind: "test" } } } } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" PeerName: "" 2025-06-25T14:51:48.923557Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:7519899789305551581:2451] txid# 281474976715657 Bootstrap, UserSID: root@builtin CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-25T14:51:48.923583Z node 1 :TX_PROXY DEBUG: schemereq.cpp:585: Actor# [1:7519899789305551581:2451] txid# 281474976715657 Bootstrap, UserSID: root@builtin IsClusterAdministrator: 1 2025-06-25T14:51:48.923689Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:7519899789305551581:2451] txid# 281474976715657 TEvNavigateKeySet requested from SchemeCache 2025-06-25T14:51:48.924056Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:7519899789305551581:2451] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-25T14:51:48.924249Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:7519899789305551581:2451] HANDLE EvNavigateKeySetResult, txid# 281474976715657 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# false 2025-06-25T14:51:48.924452Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:7519899789305551581:2451] txid# 281474976715657 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715657 TabletId# 72057594046644480} 2025-06-25T14:51:48.924652Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:7519899789305551581:2451] txid# 281474976715657 HANDLE EvClientConnected 2025-06-25T14:51:48.925660Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:51:48.928663Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [1:7519899789305551581:2451] txid# 281474976715657 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715657} 2025-06-25T14:51:48.928759Z node 1 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [1:7519899789305551581:2451] txid# 281474976715657 SEND to# [1:7519899789305551580:2450] Source {TEvProposeTransactionStatus txid# 281474976715657 Status# 53} waiting... 2025-06-25T14:51:48.939012Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:51:48.942247Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7519899785010583758:2117] Handle TEvProposeTransaction 2025-06-25T14:51:48.942268Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:7519899785010583758:2117] TxId# 281474976715658 ProcessProposeTransaction 2025-06-25T14:51:48.942312Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:7519899785010583758:2117] Cookie# 0 userReqId# "" txid# 281474976715658 SEND to# [1:7519899789305551622:2488] 2025-06-25T14:51:48.943884Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:7519899789305551622:2488] txid# 281474976715658 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/" OperationType: ESchemeOpModifyACL ModifyACL { Name: "dc-1" DiffACL: "\n\032\010\000\022\026\010\001\020\377\377\003\032\014root@builtin \003" } } } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" PeerName: "" 2025-06-25T14:51:48.943933Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:7519899789305551622:2488] txid# 281474976715658 Bootstrap, UserSID: root@builtin CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-25T14:51:48.943949Z node 1 :TX_PROXY DEBUG: schemereq.cpp:585: Actor# [1:7519899789305551622:2488] txid# 281474976715658 Bootstrap, UserSID: root@builtin IsClusterAdministrator: 1 2025-06-25T14:51:48.944009Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:7519899789305551622:2488] txid# 281474976715658 TEvNavigateKeySet requested from SchemeCache 2025-06-25T14:51:48.944249Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:7519899789305551622:2488] txid# 281474976715658 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-25T14:51:48.944338Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:7519899789305551622:2488] HANDLE EvNavigateKeySetResult, txid# 281474976715658 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-25T14:51:48.944399Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:7519899789305551622:2488] txid# 281474976715658 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715658 TabletId# 72057594046644480} 2025-06-25T14:51:48.944516Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:7519899789305551622:2488] txid# 281474976715658 HANDLE EvClientConnected 2025-06-25T14:51:48.94521 ... ocessProposeTransaction 2025-06-25T14:54:33.400966Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [59:7519900477176766071:2112] Cookie# 0 userReqId# "" txid# 281474976715661 SEND to# [59:7519900498651603487:2577] 2025-06-25T14:54:33.403969Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [59:7519900498651603487:2577] txid# 281474976715661 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/dc-1/.metadata/workload_manager/pools" OperationType: ESchemeOpCreateResourcePool ModifyACL { Name: "default" DiffACL: "\n\032\010\000\022\026\010\001\020\377\377\003\032\014root@builtin \003\n!\010\000\022\035\010\001\020\201\004\032\024all-users@well-known \003\n\031\010\000\022\025\010\001\020\201\004\032\014root@builtin \003" NewOwner: "metadata@system" } Internal: true CreateResourcePool { Name: "default" Properties { Properties { key: "concurrent_query_limit" value: "-1" } Properties { key: "database_load_cpu_threshold" value: "-1" } Properties { key: "query_cancel_after_seconds" value: "0" } Properties { key: "query_cpu_limit_percent_per_node" value: "-1" } Properties { key: "query_memory_limit_percent_per_node" value: "-1" } Properties { key: "queue_size" value: "-1" } Properties { key: "resource_weight" value: "-1" } Properties { key: "total_cpu_limit_percent_per_node" value: "-1" } } } } } UserToken: "\n\017metadata@system\022\000" DatabaseName: "/dc-1" 2025-06-25T14:54:33.404026Z node 59 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [59:7519900498651603487:2577] txid# 281474976715661 Bootstrap, UserSID: metadata@system CheckAdministrator: 1 CheckDatabaseAdministrator: 1 2025-06-25T14:54:33.404048Z node 59 :TX_PROXY DEBUG: schemereq.cpp:585: Actor# [59:7519900498651603487:2577] txid# 281474976715661 Bootstrap, UserSID: metadata@system IsClusterAdministrator: 0 2025-06-25T14:54:33.404212Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1434: Actor# [59:7519900498651603487:2577] txid# 281474976715661 HandleResolveDatabase, ResultSet size: 1 ResultSet error count: 0 2025-06-25T14:54:33.404242Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1469: Actor# [59:7519900498651603487:2577] txid# 281474976715661 HandleResolveDatabase, UserSID: metadata@system CheckAdministrator: 1 CheckDatabaseAdministrator: 1 IsClusterAdministrator: 0 IsDatabaseAdministrator: 0 DatabaseOwner: root@builtin 2025-06-25T14:54:33.405625Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1660: Actor# [59:7519900498651603487:2577] txid# 281474976715661 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-06-25T14:54:33.405733Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [59:7519900498651603487:2577] txid# 281474976715661 TEvNavigateKeySet requested from SchemeCache 2025-06-25T14:54:33.405936Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [59:7519900498651603487:2577] txid# 281474976715661 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-25T14:54:33.406097Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [59:7519900498651603487:2577] HANDLE EvNavigateKeySetResult, txid# 281474976715661 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-25T14:54:33.406150Z node 59 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [59:7519900498651603487:2577] txid# 281474976715661 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715661 TabletId# 72057594046644480} 2025-06-25T14:54:33.406295Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [59:7519900498651603487:2577] txid# 281474976715661 HANDLE EvClientConnected 2025-06-25T14:54:33.410546Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [59:7519900498651603487:2577] txid# 281474976715661 Status StatusAlreadyExists HANDLE {TEvModifySchemeTransactionResult Status# StatusAlreadyExists txid# 281474976715661 Reason# Check failed: path: '/dc-1/.metadata/workload_manager/pools/default', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)} 2025-06-25T14:54:33.410690Z node 59 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [59:7519900498651603487:2577] txid# 281474976715661, issues: { message: "Check failed: path: \'/dc-1/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:54:33.410733Z node 59 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [59:7519900498651603487:2577] txid# 281474976715661 SEND to# [59:7519900498651603417:2303] Source {TEvProposeTransactionStatus txid# 281474976715661 Status# 48} 2025-06-25T14:54:33.447851Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [59:7519900477176766071:2112] Handle TEvProposeTransaction 2025-06-25T14:54:33.447893Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [59:7519900477176766071:2112] TxId# 281474976715662 ProcessProposeTransaction 2025-06-25T14:54:33.447945Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [59:7519900477176766071:2112] Cookie# 0 userReqId# "" txid# 281474976715662 SEND to# [59:7519900498651603511:2589] 2025-06-25T14:54:33.450810Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [59:7519900498651603511:2589] txid# 281474976715662 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/dc-1" OperationType: ESchemeOpAlterLogin AlterLogin { CreateUser { User: "targetuser" Password: "passwd" CanLogin: true IsHashedPassword: false } } } } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" DatabaseName: "/dc-1" RequestType: "" PeerName: "ipv6:[::1]:45212" 2025-06-25T14:54:33.450898Z node 59 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [59:7519900498651603511:2589] txid# 281474976715662 Bootstrap, UserSID: root@builtin CheckAdministrator: 1 CheckDatabaseAdministrator: 1 2025-06-25T14:54:33.450926Z node 59 :TX_PROXY DEBUG: schemereq.cpp:585: Actor# [59:7519900498651603511:2589] txid# 281474976715662 Bootstrap, UserSID: root@builtin IsClusterAdministrator: 1 2025-06-25T14:54:33.450988Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [59:7519900498651603511:2589] txid# 281474976715662 TEvNavigateKeySet requested from SchemeCache 2025-06-25T14:54:33.451382Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [59:7519900498651603511:2589] txid# 281474976715662 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-25T14:54:33.451504Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [59:7519900498651603511:2589] HANDLE EvNavigateKeySetResult, txid# 281474976715662 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-25T14:54:33.451561Z node 59 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [59:7519900498651603511:2589] txid# 281474976715662 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715662 TabletId# 72057594046644480} 2025-06-25T14:54:33.451713Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [59:7519900498651603511:2589] txid# 281474976715662 HANDLE EvClientConnected 2025-06-25T14:54:33.473531Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [59:7519900498651603511:2589] txid# 281474976715662 Status StatusSuccess HANDLE {TEvModifySchemeTransactionResult Status# StatusSuccess txid# 281474976715662} 2025-06-25T14:54:33.473613Z node 59 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [59:7519900498651603511:2589] txid# 281474976715662 SEND to# [59:7519900498651603510:2296] Source {TEvProposeTransactionStatus txid# 281474976715662 Status# 48} 2025-06-25T14:54:33.559225Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [59:7519900477176766071:2112] Handle TEvProposeTransaction 2025-06-25T14:54:33.559257Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [59:7519900477176766071:2112] TxId# 281474976715663 ProcessProposeTransaction 2025-06-25T14:54:33.559307Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [59:7519900477176766071:2112] Cookie# 0 userReqId# "" txid# 281474976715663 SEND to# [59:7519900498651603544:2603] 2025-06-25T14:54:33.561898Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [59:7519900498651603544:2603] txid# 281474976715663 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/dc-1" OperationType: ESchemeOpAlterLogin AlterLogin { RemoveUser { User: "targetuser" MissingOk: false } } } } UserToken: "\n\024ordinaryuser@builtin\022\030\022\026\n\024all-users@well-known\032\024ordinaryuser@builtin\"\007Builtin*\027ordi****ltin (32520BBF)" DatabaseName: "/dc-1" RequestType: "" PeerName: "ipv6:[::1]:45244" 2025-06-25T14:54:33.561959Z node 59 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [59:7519900498651603544:2603] txid# 281474976715663 Bootstrap, UserSID: ordinaryuser@builtin CheckAdministrator: 1 CheckDatabaseAdministrator: 1 2025-06-25T14:54:33.561983Z node 59 :TX_PROXY DEBUG: schemereq.cpp:585: Actor# [59:7519900498651603544:2603] txid# 281474976715663 Bootstrap, UserSID: ordinaryuser@builtin IsClusterAdministrator: 0 2025-06-25T14:54:33.562141Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1434: Actor# [59:7519900498651603544:2603] txid# 281474976715663 HandleResolveDatabase, ResultSet size: 1 ResultSet error count: 0 2025-06-25T14:54:33.562180Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1469: Actor# [59:7519900498651603544:2603] txid# 281474976715663 HandleResolveDatabase, UserSID: ordinaryuser@builtin CheckAdministrator: 1 CheckDatabaseAdministrator: 1 IsClusterAdministrator: 0 IsDatabaseAdministrator: 0 DatabaseOwner: root@builtin 2025-06-25T14:54:33.562234Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [59:7519900498651603544:2603] txid# 281474976715663 TEvNavigateKeySet requested from SchemeCache 2025-06-25T14:54:33.562503Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [59:7519900498651603544:2603] txid# 281474976715663 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-25T14:54:33.562525Z node 59 :TX_PROXY ERROR: schemereq.cpp:1103: Actor# [59:7519900498651603544:2603] txid# 281474976715663, Access denied for ordinaryuser@builtin, attempt to manage user 2025-06-25T14:54:33.562608Z node 59 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [59:7519900498651603544:2603] txid# 281474976715663, issues: { message: "Access denied for ordinaryuser@builtin" issue_code: 200000 severity: 1 } 2025-06-25T14:54:33.562635Z node 59 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [59:7519900498651603544:2603] txid# 281474976715663 SEND to# [59:7519900498651603543:2313] Source {TEvProposeTransactionStatus Status# 5} 2025-06-25T14:54:33.563461Z node 59 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=59&id=YmI5NDA2OWEtYzE5OGRkODUtZjVmYzIwYWYtYTZjNWVlOTY=, ActorId: [59:7519900498651603529:2313], ActorState: ExecuteState, TraceId: 01jyksaqj51ek6yxrw3y053k9k, Create QueryResponse for error on request, msg: 2025-06-25T14:54:33.563926Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:353: actor# [59:7519900477176766071:2112] Handle TEvExecuteKqpTransaction 2025-06-25T14:54:33.563944Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:342: actor# [59:7519900477176766071:2112] TxId# 281474976715664 ProcessProposeKqpTransaction 2025-06-25T14:54:33.834026Z node 59 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[59:7519900477176766149:2155];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:54:33.834101Z node 59 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=timeout; |87.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest >> KqpRanges::IsNullPartial [GOOD] >> KqpRanges::LiteralOr ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/opt/unittest >> KqpReturning::ReturningTypes [GOOD] Test command err: Trying to start YDB, gRPC: 17888, MsgBus: 5870 2025-06-25T14:53:35.530540Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900250722225792:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:53:35.530583Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0009e5/r3tmp/tmphte84c/pdisk_1.dat 2025-06-25T14:53:35.949636Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 17888, node 1 2025-06-25T14:53:35.962952Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:53:35.963265Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:53:35.993254Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:53:36.044681Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:53:36.044713Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:53:36.044721Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:53:36.044844Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5870 TClient is connected to server localhost:5870 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-25T14:53:36.560849Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:53:36.608151Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:53:36.620640Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:53:36.629608Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:36.774766Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:36.928778Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:36.993384Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:38.449302Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900263607129257:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:38.449411Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:38.795859Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:38.830997Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:38.878493Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:38.920738Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:38.967265Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:39.000416Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:39.083996Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:39.174310Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900267902097219:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:39.174380Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:39.174659Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900267902097224:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:39.178286Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:53:39.197337Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519900267902097226:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:53:39.292041Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519900267902097277:3411] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:53:40.287148Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:40.329932Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:40.371193Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propo ... 4918:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:54:21.904082Z node 6 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [6:7519900449450154969:3428] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:54:23.256676Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) [[[2];["321"]];[["111"];[2]]] Trying to start YDB, gRPC: 19705, MsgBus: 9932 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0009e5/r3tmp/tmpKKbgVS/pdisk_1.dat 2025-06-25T14:54:25.626637Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:54:25.629154Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:54:25.630525Z node 7 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [7:7519900464626023075:2080] 1750863265302093 != 1750863265302096 2025-06-25T14:54:25.644914Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:54:25.645011Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:54:25.647697Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 19705, node 7 2025-06-25T14:54:25.741160Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:54:25.741186Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:54:25.741198Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:54:25.741350Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9932 2025-06-25T14:54:26.299718Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:9932 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:54:26.525915Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:54:26.534409Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:54:26.546955Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:26.646873Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:54:26.872956Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:54:26.972748Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:54:30.446120Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519900486100861182:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:30.446215Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:30.487047Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:30.540141Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:30.612612Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:30.690226Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:30.726565Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:30.765727Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:30.848479Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:30.974411Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519900486100861855:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:30.974528Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:30.974764Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519900486100861860:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:30.979746Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:54:31.001544Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7519900486100861862:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:54:31.095965Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7519900490395829209:3425] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } >> KqpNotNullColumns::UpdateTable_Immediate [GOOD] >> TestProgram::JsonExists |87.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest >> TestProgram::JsonExists [GOOD] >> KqpNewEngine::DependentSelect [GOOD] >> KqpNewEngine::DqSourceCount >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-24 [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/opt/unittest >> KqpSqlIn::SecondaryIndex_TupleSelect [GOOD] Test command err: Trying to start YDB, gRPC: 27660, MsgBus: 29385 2025-06-25T14:53:24.460726Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900203627971433:2200];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:53:24.471574Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0009f6/r3tmp/tmpQD0xIr/pdisk_1.dat 2025-06-25T14:53:24.814345Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:53:24.815279Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519900203627971269:2080] 1750863204446176 != 1750863204446179 TServer::EnableGrpc on GrpcPort 27660, node 1 2025-06-25T14:53:24.876616Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:53:24.876744Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:53:24.878317Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:53:24.890090Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:53:24.890126Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:53:24.890135Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:53:24.890241Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:29385 TClient is connected to server localhost:29385 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:53:25.408664Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:53:25.437299Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:53:25.447177Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:25.459740Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:53:25.595331Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:25.760170Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:25.833438Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:27.418058Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900216512874789:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:27.418234Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:27.733703Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:27.768459Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:27.803588Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:27.830968Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:27.863255Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:27.899068Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:27.929562Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:27.997766Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900216512875445:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:27.997834Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:27.997885Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900216512875450:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:28.001166Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:53:28.012182Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519900216512875452:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:53:28.096283Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519900220807842799:3421] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:53:29.028544Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:29.074618Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/yd ... 24.116848Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:54:24.116881Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:54:24.116895Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:54:24.117054Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25683 TClient is connected to server localhost:25683 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:54:24.868594Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:54:24.870147Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:54:24.885421Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:54:24.894548Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:54:24.977445Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:54:25.184054Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:54:25.294473Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:54:28.661093Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519900477894572699:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:28.661212Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:28.705763Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:28.750632Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:28.798739Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:28.839162Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[7:7519900456419734624:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:54:28.839237Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:54:28.844041Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:28.899385Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:28.982905Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:29.079254Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:29.171505Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519900482189540660:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:29.171617Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519900482189540665:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:29.171806Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:29.177178Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:54:29.191622Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7519900482189540667:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:54:29.281886Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7519900482189540718:3419] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:54:30.954883Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:31.017655Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:31.068699Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664)
: Warning: Type annotation, code: 1030
:5:17: Warning: At function: RemovePrefixMembers, At function: Sort, At function: PersistableRepr, At function: SqlProject
:6:56: Warning: At function: Filter, At lambda, At function: Coalesce
:7:29: Warning: At function: SqlIn
:7:29: Warning: IN may produce unexpected result when used with nullable arguments. Consider adding 'PRAGMA AnsiInForEmptyOrNullableItemsCollections;', code: 1108 |87.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest |87.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest >> TestProgram::Like >> KqpKv::ReadRows_PgValue [GOOD] >> KqpKv::ReadRows_PgKey >> KqpRanges::NoFullScanAtScanQuery [GOOD] >> KqpRanges::NoFullScanAtDNFPredicate >> KqpNewEngine::DeleteWithInputMultiConsumption-UseSink [GOOD] >> TestProgram::Like [GOOD] |87.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest >> TestProgram::JsonExists [GOOD] Test command err: FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:33;event=parse_program;program=Command { Assign { Column { Id: 15 } Constant { Text: "$.key" } } } Command { Assign { Column { Id: 16 } Function { Id: 8 Arguments { Id: 5 } Arguments { Id: 15 } FunctionType: YQL_KERNEL KernelIdx: 0 } } } Command { Projection { Columns { Id: 16 } } } Kernels: "O\022\020JsonNode\006Arg\020JsonPath\006UDF\006Udf\014Apply2\nFlags\010Name\030BlockAsTuple\t\211\004\235\213\004\213\004\207\203\tH\203\001H\213\002\207\203\014\001\235?\006\001\235?\014\001\"\000\t\211\004?\022\235?\002\001\235?\004\000\"\000\t\251\000?\030\006\000\t\251\000?\032\006\000\000\t\211\002?\024\235?\n\001\"\000\t\211\n?(?\030?\032?\002?\004?\n,ScalarApply\000? ?$\t\251\000?\002\006\000\t\251\000?\004\006\000\t\211\010?\n?\002?\000\207?\010?6$IfPresent\000?0\t\251\000?\000\006\000\t\211\022?6\211\010?6\207\214\002\214\n\210\203\001H?>?6\016\000\203\004\203\005@\203\004\203\004\207\214\002\214\n\210\203\001H\214\002\207\203\014\026\000\t\211\010?H\203\005@\200\203\005@\202\022\000\003?d\036Json2.SqlExists\202\003?f\000\002\017\003?J\000\003?L\000\003?N\000\003?P\000\027?T\t\211\014?R\311\002?R\203\tH\005\205\004\206\205\004\203\010\203\005@\032\036\203\005@\020Args\034Payload\006\002?\200\005\205\004\203\010\203\005@\032\036\003?\206\002\003?\210\000\003\001\003?\202\000\003\016\000\203\004\203\005@\203\004\203\004?\000\026\000\t\211\010?\230\203\005@\200\203\005@\202\022\000\003?\244\026Json2.Parse\202\003?\246\000\002\017\003?\232\000\003?\234\000\003?\236\000\003?\240\000?<\036\t\211\014?V\211\002?V\203\001H\016\000\203\004\203\005@\203\004\203\004?\004\026\000\t\211\010?\276\203\005@\200\203\005@\202\022\000\003?\312\"Json2.CompilePath\202\003?\314\000\002\017\003?\300\000\003?\302\000\003?\304\000\003?\306\000?4\036\010\000?\\7?`\003?^\000\276\001\'?6\010\000\000\000/" ; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:102;parse_proto_program=Command { Assign { Column { Id: 15 } Constant { Text: "$.key" } } } Command { Assign { Column { Id: 16 } Function { Id: 8 Arguments { Id: 5 } Arguments { Id: 15 } FunctionType: YQL_KERNEL KernelIdx: 0 } } } Command { Projection { Columns { Id: 16 } } } Kernels: "O\022\020JsonNode\006Arg\020JsonPath\006UDF\006Udf\014Apply2\nFlags\010Name\030BlockAsTuple\t\211\004\235\213\004\213\004\207\203\tH\203\001H\213\002\207\203\014\001\235?\006\001\235?\014\001\"\000\t\211\004?\022\235?\002\001\235?\004\000\"\000\t\251\000?\030\006\000\t\251\000?\032\006\000\000\t\211\002?\024\235?\n\001\"\000\t\211\n?(?\030?\032?\002?\004?\n,ScalarApply\000? ?$\t\251\000?\002\006\000\t\251\000?\004\006\000\t\211\010?\n?\002?\000\207?\010?6$IfPresent\000?0\t\251\000?\000\006\000\t\211\022?6\211\010?6\207\214\002\214\n\210\203\001H?>?6\016\000\203\004\203\005@\203\004\203\004\207\214\002\214\n\210\203\001H\214\002\207\203\014\026\000\t\211\010?H\203\005@\200\203\005@\202\022\000\003?d\036Json2.SqlExists\202\003?f\000\002\017\003?J\000\003?L\000\003?N\000\003?P\000\027?T\t\211\014?R\311\002?R\203\tH\005\205\004\206\205\004\203\010\203\005@\032\036\203\005@\020Args\034Payload\006\002?\200\005\205\004\203\010\203\005@\032\036\003?\206\002\003?\210\000\003\001\003?\202\000\003\016\000\203\004\203\005@\203\004\203\004?\000\026\000\t\211\010?\230\203\005@\200\203\005@\202\022\000\003?\244\026Json2.Parse\202\003?\246\000\002\017\003?\232\000\003?\234\000\003?\236\000\003?\240\000?<\036\t\211\014?V\211\002?V\203\001H\016\000\203\004\203\005@\203\004\203\004?\004\026\000\t\211\010?\276\203\005@\200\203\005@\202\022\000\003?\312\"Json2.CompilePath\202\003?\314\000\002\017\003?\300\000\003?\302\000\003?\304\000\003?\306\000?4\036\010\000?\\7?`\003?^\000\276\001\'?6\010\000\000\000/" ; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2101;fline=graph_execute.cpp:162;graph_constructed=digraph program {N0[shape=box, label="N0(0):{\"p\":{\"v\":\"$.key\"},\"o\":\"15\",\"t\":\"Const\"}\n"]; N1[shape=box, label="N4(15):{\"i\":\"5,15\",\"p\":{\"kernel\":{\"class_name\":\"SIMPLE\"}},\"o\":\"16\",\"t\":\"Calculation\"}\nREMOVE:15,5"]; N0 -> N1[label="1"]; N3 -> N1[label="2"]; N2[shape=box, label="N2(2):{\"i\":\"0\",\"p\":{\"data\":[{\"name\":\"json_string\",\"id\":5}]},\"o\":\"5\",\"t\":\"FetchOriginalData\"}\n",style=filled,color="#FFFF88"]; N5 -> N2[label="1"]; N3[shape=box, label="N3(7):{\"i\":\"5\",\"p\":{\"address\":{\"name\":\"json_string\",\"id\":5}},\"o\":\"5\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N2 -> N3[label="1"]; N4[shape=box, label="N5(15):{\"i\":\"16\",\"t\":\"Projection\"}\n",style=filled,color="#FFAAAA"]; N1 -> N4[label="1"]; N5[shape=box, label="N1(0):{\"p\":{\"data\":[{\"name\":\"json_string\",\"id\":5}]},\"o\":\"0\",\"t\":\"ReserveMemory\"}\n"]; N0->N5->N2->N3->N1->N4[color=red]; }; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:51;event=program_parsed;result={"edges":[{"owner_id":0,"inputs":[]},{"owner_id":1,"inputs":[{"from":0},{"from":3}]},{"owner_id":2,"inputs":[{"from":5}]},{"owner_id":3,"inputs":[{"from":2}]},{"owner_id":4,"inputs":[{"from":1}]},{"owner_id":5,"inputs":[]}],"nodes":{"1":{"p":{"i":"5,15","p":{"kernel":{"class_name":"SIMPLE"}},"o":"16","t":"Calculation"},"w":15,"id":1},"3":{"p":{"i":"5","p":{"address":{"name":"json_string","id":5}},"o":"5","t":"AssembleOriginalData"},"w":7,"id":3},"2":{"p":{"i":"0","p":{"data":[{"name":"json_string","id":5}]},"o":"5","t":"FetchOriginalData"},"w":2,"id":2},"5":{"p":{"p":{"data":[{"name":"json_string","id":5}]},"o":"0","t":"ReserveMemory"},"w":0,"id":5},"4":{"p":{"i":"16","t":"Projection"},"w":15,"id":4},"0":{"p":{"p":{"v":"$.key"},"o":"15","t":"Const"},"w":0,"id":0}}}; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; json_string: [ "{"key":"value"}", "[]" ] FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9UInt8TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9UInt8TypeE; >> KqpSqlIn::KeyTypeMissmatch_Str [GOOD] >> KqpSqlIn::SecondaryIndex_PgKey |87.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest >> TestProgram::Like [GOOD] Test command err: FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:33;event=parse_program;program=Command { Assign { Column { Id: 15 } Constant { Bytes: "001" } } } Command { Assign { Column { Id: 16 } Constant { Bytes: "uid" } } } Command { Assign { Column { Id: 17 } Function { Id: 33 Arguments { Id: 7 } Arguments { Id: 16 } FunctionType: YQL_KERNEL KernelIdx: 0 } } } Command { Assign { Column { Id: 18 } Function { Id: 34 Arguments { Id: 7 } Arguments { Id: 15 } FunctionType: YQL_KERNEL KernelIdx: 1 } } } Command { Assign { Column { Id: 19 } Function { Id: 18 Arguments { Id: 17 } FunctionType: SIMPLE_ARROW } } } Command { Assign { Column { Id: 20 } Function { Id: 18 Arguments { Id: 18 } FunctionType: SIMPLE_ARROW } } } Command { Assign { Column { Id: 21 } Function { Id: 11 Arguments { Id: 19 } Arguments { Id: 20 } FunctionType: SIMPLE_ARROW } } } Command { Projection { Columns { Id: 21 } } } Kernels: "O\006\006Arg\022BlockFunc\030BlockAsTuple\t\211\004\235\213\004\213\004\203\001H\203\005@\213\004\203\014?\006\001\235?\004\001\235?\010\001\n\000\t\211\004?\016\235?\000\001\235?\002\000\n\000\t\251\000?\024\002\000\t\251\000?\026\002\000\000\t\211\004?\020\235?\006\001?$\n\000\t\211\006?$\203\005@?\024?\026\006\000\003?(\024StartsWith?\034? \001\t\211\006?$\203\005@?\024?\026\006\000\003?0\020EndsWith?\034? \001\000\000/" ; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:102;parse_proto_program=Command { Assign { Column { Id: 15 } Constant { Bytes: "001" } } } Command { Assign { Column { Id: 16 } Constant { Bytes: "uid" } } } Command { Assign { Column { Id: 17 } Function { Id: 33 Arguments { Id: 7 } Arguments { Id: 16 } FunctionType: YQL_KERNEL KernelIdx: 0 } } } Command { Assign { Column { Id: 18 } Function { Id: 34 Arguments { Id: 7 } Arguments { Id: 15 } FunctionType: YQL_KERNEL KernelIdx: 1 } } } Command { Assign { Column { Id: 19 } Function { Id: 18 Arguments { Id: 17 } FunctionType: SIMPLE_ARROW } } } Command { Assign { Column { Id: 20 } Function { Id: 18 Arguments { Id: 18 } FunctionType: SIMPLE_ARROW } } } Command { Assign { Column { Id: 21 } Function { Id: 11 Arguments { Id: 19 } Arguments { Id: 20 } FunctionType: SIMPLE_ARROW } } } Command { Projection { Columns { Id: 21 } } } Kernels: "O\006\006Arg\022BlockFunc\030BlockAsTuple\t\211\004\235\213\004\213\004\203\001H\203\005@\213\004\203\014?\006\001\235?\004\001\235?\010\001\n\000\t\211\004?\016\235?\000\001\235?\002\000\n\000\t\251\000?\024\002\000\t\251\000?\026\002\000\000\t\211\004?\020\235?\006\001?$\n\000\t\211\006?$\203\005@?\024?\026\006\000\003?(\024StartsWith?\034? \001\t\211\006?$\203\005@?\024?\026\006\000\003?0\020EndsWith?\034? \001\000\000/" ; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2101;fline=graph_execute.cpp:162;graph_constructed=digraph program {N0[shape=box, label="N6(0):{\"p\":{\"v\":\"001\"},\"o\":\"15\",\"t\":\"Const\"}\n"]; N1[shape=box, label="N0(0):{\"p\":{\"v\":\"uid\"},\"o\":\"16\",\"t\":\"Const\"}\n"]; N2[shape=box, label="N4(15):{\"i\":\"7,16\",\"p\":{\"kernel\":{\"class_name\":\"SIMPLE\"}},\"o\":\"17\",\"t\":\"Calculation\"}\nREMOVE:16"]; N1 -> N2[label="1"]; N4 -> N2[label="2"]; N3[shape=box, label="N2(2):{\"i\":\"0\",\"p\":{\"data\":[{\"name\":\"string\",\"id\":7}]},\"o\":\"7\",\"t\":\"FetchOriginalData\"}\n",style=filled,color="#FFFF88"]; N10 -> N3[label="1"]; N4[shape=box, label="N3(7):{\"i\":\"7\",\"p\":{\"address\":{\"name\":\"string\",\"id\":7}},\"o\":\"7\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N3 -> N4[label="1"]; N5[shape=box, label="N7(15):{\"i\":\"7,15\",\"p\":{\"kernel\":{\"class_name\":\"SIMPLE\"}},\"o\":\"18\",\"t\":\"Calculation\"}\nREMOVE:7,15"]; N0 -> N5[label="1"]; N4 -> N5[label="2"]; N6[shape=box, label="N5(23):{\"i\":\"17\",\"p\":{\"kernel\":{\"class_name\":\"SIMPLE\"}},\"o\":\"19\",\"t\":\"Calculation\"}\nREMOVE:17"]; N2 -> N6[label="1"]; N7[shape=box, label="N8(23):{\"i\":\"18\",\"p\":{\"kernel\":{\"class_name\":\"SIMPLE\"}},\"o\":\"20\",\"t\":\"Calculation\"}\nREMOVE:18"]; N5 -> N7[label="1"]; N8[shape=box, label="N9(54):{\"i\":\"19,20\",\"p\":{\"kernel\":{\"class_name\":\"SIMPLE\"}},\"o\":\"21\",\"t\":\"Calculation\"}\nREMOVE:19,20"]; N6 -> N8[label="1"]; N7 -> N8[label="2"]; N9[shape=box, label="N10(54):{\"i\":\"21\",\"t\":\"Projection\"}\n",style=filled,color="#FFAAAA"]; N8 -> N9[label="1"]; N10[shape=box, label="N1(0):{\"p\":{\"data\":[{\"name\":\"string\",\"id\":7}]},\"o\":\"0\",\"t\":\"ReserveMemory\"}\n"]; N1->N10->N3->N4->N2->N6->N0->N5->N7->N8->N9[color=red]; }; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:51;event=program_parsed;result={"edges":[{"owner_id":0,"inputs":[]},{"owner_id":1,"inputs":[]},{"owner_id":2,"inputs":[{"from":1},{"from":4}]},{"owner_id":3,"inputs":[{"from":10}]},{"owner_id":4,"inputs":[{"from":3}]},{"owner_id":5,"inputs":[{"from":0},{"from":4}]},{"owner_id":6,"inputs":[{"from":2}]},{"owner_id":7,"inputs":[{"from":5}]},{"owner_id":8,"inputs":[{"from":6},{"from":7}]},{"owner_id":9,"inputs":[{"from":8}]},{"owner_id":10,"inputs":[]}],"nodes":{"1":{"p":{"p":{"v":"uid"},"o":"16","t":"Const"},"w":0,"id":1},"3":{"p":{"i":"0","p":{"data":[{"name":"string","id":7}]},"o":"7","t":"FetchOriginalData"},"w":2,"id":3},"8":{"p":{"i":"19,20","p":{"kernel":{"class_name":"SIMPLE"}},"o":"21","t":"Calculation"},"w":54,"id":8},"2":{"p":{"i":"7,16","p":{"kernel":{"class_name":"SIMPLE"}},"o":"17","t":"Calculation"},"w":15,"id":2},"0":{"p":{"p":{"v":"001"},"o":"15","t":"Const"},"w":0,"id":0},"5":{"p":{"i":"7,15","p":{"kernel":{"class_name":"SIMPLE"}},"o":"18","t":"Calculation"},"w":15,"id":5},"9":{"p":{"i":"21","t":"Projection"},"w":54,"id":9},"7":{"p":{"i":"18","p":{"kernel":{"class_name":"SIMPLE"}},"o":"20","t":"Calculation"},"w":23,"id":7},"4":{"p":{"i":"7","p":{"address":{"name":"string","id":7}},"o":"7","t":"AssembleOriginalData"},"w":7,"id":4},"10":{"p":{"p":{"data":[{"name":"string","id":7}]},"o":"0","t":"ReserveMemory"},"w":0,"id":10},"6":{"p":{"i":"17","p":{"kernel":{"class_name":"SIMPLE"}},"o":"19","t":"Calculation"},"w":23,"id":6}}}; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow11BooleanTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow11BooleanTypeE; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/opt/unittest >> KqpNotNullColumns::UpdateTable_Immediate [GOOD] Test command err: Trying to start YDB, gRPC: 7546, MsgBus: 13424 2025-06-25T14:53:44.774984Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900290233425125:2135];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:53:44.785961Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0009d9/r3tmp/tmpoYBTAi/pdisk_1.dat 2025-06-25T14:53:45.184392Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519900290233425028:2080] 1750863224753299 != 1750863224753302 2025-06-25T14:53:45.201000Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:53:45.201112Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:53:45.204954Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:53:45.232892Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 7546, node 1 2025-06-25T14:53:45.340694Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:53:45.340716Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:53:45.340736Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:53:45.340866Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13424 TClient is connected to server localhost:13424 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:53:45.798461Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:53:45.799534Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:53:45.819437Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:53:47.558130Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900303118327555:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:47.558228Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:47.775576Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:47.922057Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900303118327660:2301], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:47.922153Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:47.922198Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900303118327665:2304], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:47.925997Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:53:47.947270Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519900303118327667:2305], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-25T14:53:48.040746Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519900307413295014:2392] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:53:48.182011Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519900307413295056:2316], status: PRECONDITION_FAILED, issues:
: Error: Type annotation, code: 1030
:1:13: Error: At function: KiWriteTable!
:1:13: Error: Missing key column in input: Key for table: /Root/TestUpsertNotNullPk, code: 2029 2025-06-25T14:53:48.183529Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=MTBkNmRjMjQtZmVlMzQxZmYtYzQyNTU3NmItZDUyZTM3YzI=, ActorId: [1:7519900303118327545:2289], ActorState: ExecuteState, TraceId: 01jyks9b8176f42ecqqryn645c, ReplyQueryCompileError, status PRECONDITION_FAILED remove tx with tx_id: 2025-06-25T14:53:48.209193Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519900307413295065:2320], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:1:13: Error: At function: KiWriteTable!
:1:47: Error: Failed to convert type: Struct<'Key':Null,'Value':String> to Struct<'Key':Uint64,'Value':String?>
:1:47: Error: Failed to convert 'Key': Null to Uint64
:1:47: Error: Failed to convert input columns types to scheme types, code: 2031 2025-06-25T14:53:48.210648Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=MTBkNmRjMjQtZmVlMzQxZmYtYzQyNTU3NmItZDUyZTM3YzI=, ActorId: [1:7519900303118327545:2289], ActorState: ExecuteState, TraceId: 01jyks9b927ww3dz0377ybwv9c, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: Trying to start YDB, gRPC: 24824, MsgBus: 11295 2025-06-25T14:53:48.931202Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519900307863805122:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:53:48.931234Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0009d9/r3tmp/tmptgyC6W/pdisk_1.dat 2025-06-25T14:53:49.074105Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:53:49.076583Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519900307863805100:2080] 1750863228929316 != 1750863228929319 2025-06-25T14:53:49.094308Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:53:49.094387Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 24824, node 2 2025-06-25T14:53:49.098684Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:53:49.145193Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:53:49.145216Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:53:49.145223Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:53:49.145329Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11295 TClient is connected to server localhost:11295 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:53:49.744101Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDo ... cess. 2025-06-25T14:54:14.191107Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:54:14.201372Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:54:16.887983Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7519900426309223772:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:16.888044Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7519900426309223783:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:16.888098Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:16.892908Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:54:16.908959Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [6:7519900426309223786:2295], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:54:17.007974Z node 6 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [6:7519900430604191133:2334] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:54:17.047879Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:18.085745Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[6:7519900413424321395:2157];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:54:18.085821Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:54:25.026628Z node 6 :KQP_EXECUTER ERROR: kqp_literal_executer.cpp:107: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: 01jyksadf83hc4dtkea5kevbry, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=6&id=M2JhZGRkNjAtMmNiNDg0YTgtZDQ3ZmUyZGMtNmUwNjk4MGE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. TKqpLiteralExecuter, TKqpEnsure failed. 2025-06-25T14:54:25.026922Z node 6 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=6&id=M2JhZGRkNjAtMmNiNDg0YTgtZDQ3ZmUyZGMtNmUwNjk4MGE=, ActorId: [6:7519900456373995724:2488], ActorState: ExecuteState, TraceId: 01jyksadf83hc4dtkea5kevbry, Create QueryResponse for error on request, msg: 2025-06-25T14:54:26.845900Z node 6 :KQP_EXECUTER ERROR: kqp_literal_executer.cpp:107: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: 01jyksaf8cdasg6egyk32xjp0g, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=6&id=MjhlODA1YTAtMzZmYjNhMy0xODY5YWNkYy00YWUxOGMzYg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. TKqpLiteralExecuter, TKqpEnsure failed. 2025-06-25T14:54:26.846138Z node 6 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=6&id=MjhlODA1YTAtMzZmYjNhMy0xODY5YWNkYy00YWUxOGMzYg==, ActorId: [6:7519900464963930389:2513], ActorState: ExecuteState, TraceId: 01jyksaf8cdasg6egyk32xjp0g, Create QueryResponse for error on request, msg: Trying to start YDB, gRPC: 26324, MsgBus: 18137 2025-06-25T14:54:27.757586Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519900475406174674:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:54:27.757651Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0009d9/r3tmp/tmp30hpfS/pdisk_1.dat 2025-06-25T14:54:27.942913Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:54:27.963780Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:54:27.963886Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:54:27.968541Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 26324, node 7 2025-06-25T14:54:28.077133Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:54:28.077161Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:54:28.077172Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:54:28.077315Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:18137 2025-06-25T14:54:28.769174Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:18137 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:54:28.970982Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:54:32.286610Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519900496881011757:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:32.286743Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:32.307808Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:32.412945Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519900496881011910:2305], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:32.413066Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:32.413569Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519900496881011915:2308], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:32.418371Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:54:32.433118Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7519900496881011917:2309], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-25T14:54:32.540266Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7519900496881011968:2431] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:54:32.760394Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[7:7519900475406174674:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:54:32.760471Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_schemereq/unittest >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-24 [GOOD] Test command err: Starting YDB, grpc: 3676, msgbus: 4322 2025-06-25T14:51:47.672263Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899786387778749:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:51:47.675104Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00093d/r3tmp/tmpckNUzu/pdisk_1.dat 2025-06-25T14:51:48.152795Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:51:48.165415Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:51:48.165536Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:51:48.182201Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 3676, node 1 2025-06-25T14:51:48.334263Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:51:48.334292Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:51:48.334312Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:51:48.334425Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4322 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-25T14:51:48.720463Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7519899786387778944:2118] Handle TEvNavigate describe path dc-1 2025-06-25T14:51:48.724570Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:51:48.739596Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7519899790682746749:2435] HANDLE EvNavigateScheme dc-1 2025-06-25T14:51:48.739989Z node 1 :TX_PROXY DEBUG: describe.cpp:356: Actor# [1:7519899790682746749:2435] HANDLE EvNavigateKeySetResult TDescribeReq marker# P5 ErrorCount# 0 2025-06-25T14:51:48.792551Z node 1 :TX_PROXY DEBUG: describe.cpp:435: Actor# [1:7519899790682746749:2435] SEND to# 72057594046644480 shardToRequest NKikimrSchemeOp.TDescribePath Path: "dc-1" Options { ReturnBoundaries: true ShowPrivateTable: true ReturnRangeKey: true } 2025-06-25T14:51:48.801705Z node 1 :TX_PROXY DEBUG: describe.cpp:448: Actor# [1:7519899790682746749:2435] Handle TEvDescribeSchemeResult Forward to# [1:7519899790682746748:2434] Cookie: 0 TEvDescribeSchemeResult: NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 2 Record# Status: StatusSuccess Path: "dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/dc-1" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046644480 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:51:48.820499Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7519899786387778944:2118] Handle TEvProposeTransaction 2025-06-25T14:51:48.820538Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:7519899786387778944:2118] TxId# 281474976715657 ProcessProposeTransaction 2025-06-25T14:51:48.820644Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:7519899786387778944:2118] Cookie# 0 userReqId# "" txid# 281474976715657 SEND to# [1:7519899790682746755:2440] 2025-06-25T14:51:48.959418Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:7519899790682746755:2440] txid# 281474976715657 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "dc-1" StoragePools { Name: "" Kind: "tenant-db" } StoragePools { Name: "/dc-1:test" Kind: "test" } } } } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" PeerName: "" 2025-06-25T14:51:48.959530Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:7519899790682746755:2440] txid# 281474976715657 Bootstrap, UserSID: root@builtin CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-25T14:51:48.959551Z node 1 :TX_PROXY DEBUG: schemereq.cpp:585: Actor# [1:7519899790682746755:2440] txid# 281474976715657 Bootstrap, UserSID: root@builtin IsClusterAdministrator: 1 2025-06-25T14:51:48.959650Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:7519899790682746755:2440] txid# 281474976715657 TEvNavigateKeySet requested from SchemeCache 2025-06-25T14:51:48.960205Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:7519899790682746755:2440] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-25T14:51:48.960437Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:7519899790682746755:2440] HANDLE EvNavigateKeySetResult, txid# 281474976715657 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# false 2025-06-25T14:51:48.960564Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:7519899790682746755:2440] txid# 281474976715657 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715657 TabletId# 72057594046644480} 2025-06-25T14:51:48.960770Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:7519899790682746755:2440] txid# 281474976715657 HANDLE EvClientConnected 2025-06-25T14:51:48.961674Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:51:48.964490Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [1:7519899790682746755:2440] txid# 281474976715657 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715657} 2025-06-25T14:51:48.964555Z node 1 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [1:7519899790682746755:2440] txid# 281474976715657 SEND to# [1:7519899790682746754:2439] Source {TEvProposeTransactionStatus txid# 281474976715657 Status# 53} waiting... 2025-06-25T14:51:48.981114Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7519899786387778944:2118] Handle TEvProposeTransaction 2025-06-25T14:51:48.981137Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:7519899786387778944:2118] TxId# 281474976715658 ProcessProposeTransaction 2025-06-25T14:51:48.981175Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:7519899786387778944:2118] Cookie# 0 userReqId# "" txid# 281474976715658 SEND to# [1:7519899790682746799:2480] 2025-06-25T14:51:48.983689Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:7519899790682746799:2480] txid# 281474976715658 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/" OperationType: ESchemeOpModifyACL ModifyACL { Name: "dc-1" DiffACL: "\n\032\010\000\022\026\010\001\020\377\377\003\032\014root@builtin \003" } } } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" PeerName: "" 2025-06-25T14:51:48.983748Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:7519899790682746799:2480] txid# 281474976715658 Bootstrap, UserSID: root@builtin CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-25T14:51:48.983763Z node 1 :TX_PROXY DEBUG: schemereq.cpp:585: Actor# [1:7519899790682746799:2480] txid# 281474976715658 Bootstrap, UserSID: root@builtin IsClusterAdministrator: 1 2025-06-25T14:51:48.983805Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:7519899790682746799:2480] txid# 281474976715658 TEvNavigateKeySet requested from SchemeCache 2025-06-25T14:51:48.984141Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:7519899790682746799:2480] txid# 281474976715658 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-25T14:51:48.984223Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:7519899790682746799:2480] HANDLE EvNavigateKeySetResult, txid# 281474976715658 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-25T14:51:48.984285Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:7519899790682746799:2480] txid# 281474976715658 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715658 TabletId# 72057594046644480} 2025-06-25T14:51:48.984426Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:7519899790682746799:2480] txid# 281474976715658 HANDLE EvClientConnected 2025-06-25T14:51:48.984914Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 28147497671565 ... cePool, state: EPathStateNoChanges)} 2025-06-25T14:54:35.167248Z node 59 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [59:7519900509824560390:2577] txid# 281474976710660, issues: { message: "Check failed: path: \'/dc-1/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:54:35.167285Z node 59 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [59:7519900509824560390:2577] txid# 281474976710660 SEND to# [59:7519900509824560322:2302] Source {TEvProposeTransactionStatus txid# 281474976710660 Status# 48} 2025-06-25T14:54:35.192753Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [59:7519900488349723136:2115] Handle TEvProposeTransaction 2025-06-25T14:54:35.192784Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [59:7519900488349723136:2115] TxId# 281474976710661 ProcessProposeTransaction 2025-06-25T14:54:35.192828Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [59:7519900488349723136:2115] Cookie# 0 userReqId# "" txid# 281474976710661 SEND to# [59:7519900509824560414:2589] 2025-06-25T14:54:35.195482Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [59:7519900509824560414:2589] txid# 281474976710661 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/dc-1" OperationType: ESchemeOpAlterLogin AlterLogin { CreateUser { User: "ordinaryuser" Password: "passwd" CanLogin: true IsHashedPassword: false } } } } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" DatabaseName: "/dc-1" RequestType: "" PeerName: "ipv6:[::1]:48282" 2025-06-25T14:54:35.195547Z node 59 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [59:7519900509824560414:2589] txid# 281474976710661 Bootstrap, UserSID: root@builtin CheckAdministrator: 1 CheckDatabaseAdministrator: 1 2025-06-25T14:54:35.195567Z node 59 :TX_PROXY DEBUG: schemereq.cpp:585: Actor# [59:7519900509824560414:2589] txid# 281474976710661 Bootstrap, UserSID: root@builtin IsClusterAdministrator: 1 2025-06-25T14:54:35.195614Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [59:7519900509824560414:2589] txid# 281474976710661 TEvNavigateKeySet requested from SchemeCache 2025-06-25T14:54:35.195938Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [59:7519900509824560414:2589] txid# 281474976710661 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-25T14:54:35.196028Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [59:7519900509824560414:2589] HANDLE EvNavigateKeySetResult, txid# 281474976710661 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-25T14:54:35.196078Z node 59 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [59:7519900509824560414:2589] txid# 281474976710661 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976710661 TabletId# 72057594046644480} 2025-06-25T14:54:35.196215Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [59:7519900509824560414:2589] txid# 281474976710661 HANDLE EvClientConnected 2025-06-25T14:54:35.205135Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [59:7519900509824560414:2589] txid# 281474976710661 Status StatusSuccess HANDLE {TEvModifySchemeTransactionResult Status# StatusSuccess txid# 281474976710661} 2025-06-25T14:54:35.205197Z node 59 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [59:7519900509824560414:2589] txid# 281474976710661 SEND to# [59:7519900509824560413:2295] Source {TEvProposeTransactionStatus txid# 281474976710661 Status# 48} 2025-06-25T14:54:35.337350Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [59:7519900488349723136:2115] Handle TEvProposeTransaction 2025-06-25T14:54:35.337385Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [59:7519900488349723136:2115] TxId# 281474976710662 ProcessProposeTransaction 2025-06-25T14:54:35.337439Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [59:7519900488349723136:2115] Cookie# 0 userReqId# "" txid# 281474976710662 SEND to# [59:7519900509824560434:2603] 2025-06-25T14:54:35.339910Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [59:7519900509824560434:2603] txid# 281474976710662 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "" OperationType: ESchemeOpModifyACL ModifyACL { Name: "dc-1" DiffACL: "\n\022\010\001\022\016\032\014ordinaryuser\n\032\010\000\022\026\010\001\020\200\200\002\032\014ordinaryuser \000" } } } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" DatabaseName: "/dc-1" RequestType: "" PeerName: "ipv6:[::1]:48290" 2025-06-25T14:54:35.339986Z node 59 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [59:7519900509824560434:2603] txid# 281474976710662 Bootstrap, UserSID: root@builtin CheckAdministrator: 1 CheckDatabaseAdministrator: 1 2025-06-25T14:54:35.340009Z node 59 :TX_PROXY DEBUG: schemereq.cpp:585: Actor# [59:7519900509824560434:2603] txid# 281474976710662 Bootstrap, UserSID: root@builtin IsClusterAdministrator: 1 2025-06-25T14:54:35.340067Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [59:7519900509824560434:2603] txid# 281474976710662 TEvNavigateKeySet requested from SchemeCache 2025-06-25T14:54:35.340420Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [59:7519900509824560434:2603] txid# 281474976710662 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-25T14:54:35.340521Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [59:7519900509824560434:2603] HANDLE EvNavigateKeySetResult, txid# 281474976710662 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-25T14:54:35.340574Z node 59 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [59:7519900509824560434:2603] txid# 281474976710662 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976710662 TabletId# 72057594046644480} 2025-06-25T14:54:35.340712Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [59:7519900509824560434:2603] txid# 281474976710662 HANDLE EvClientConnected 2025-06-25T14:54:35.341252Z node 59 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:54:35.343581Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [59:7519900509824560434:2603] txid# 281474976710662 Status StatusSuccess HANDLE {TEvModifySchemeTransactionResult Status# StatusSuccess txid# 281474976710662} 2025-06-25T14:54:35.343641Z node 59 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [59:7519900509824560434:2603] txid# 281474976710662 SEND to# [59:7519900509824560433:2308] Source {TEvProposeTransactionStatus txid# 281474976710662 Status# 48} 2025-06-25T14:54:35.409116Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [59:7519900488349723136:2115] Handle TEvProposeTransaction 2025-06-25T14:54:35.409146Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [59:7519900488349723136:2115] TxId# 281474976710663 ProcessProposeTransaction 2025-06-25T14:54:35.409188Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [59:7519900488349723136:2115] Cookie# 0 userReqId# "" txid# 281474976710663 SEND to# [59:7519900509824560478:2629] 2025-06-25T14:54:35.413023Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [59:7519900509824560478:2629] txid# 281474976710663 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/dc-1" OperationType: ESchemeOpAlterLogin AlterLogin { CreateUser { User: "targetuser" Password: "passwd" CanLogin: true IsHashedPassword: false } } } } UserToken: "\n\014ordinaryuser\022\030\022\026\n\024all-users@well-known\032\334\003eyJhbGciOiJQUzI1NiIsImtpZCI6IjEifQ.eyJhdWQiOlsiXC9kYy0xIl0sImV4cCI6MTc1MDkwNjQ3NSwiaWF0IjoxNzUwODYzMjc1LCJzdWIiOiJvcmRpbmFyeXVzZXIifQ.mdfM33rNtUzcrN5LdljvcOmg2Tm-1DZig37mP_rbFPgKC2BC_fXvqZA6ORM4h5YvEB_cGruovESge5nmYuv69U2_0kvfLmZEcSO5Cl-Q-0n06q50qCNBmkHEOyAseWr0wsAHxLa0yPXizOeW5gw_lBx4zoqMnyr90c1vjb0HKruhqMN36vVUMdtolES4UGIM6LjLc-oWlu-wDiFpYVkgQrvbGwn1jmAjdM6Sz-ZF3Qz1FaN6ZxsUFzKkvEuAw1u6RVqVbgdUJKIkHywWJnD1fMuRGqQVhND45qjJKUAqyBIZ0HExwAwT5Wio_m6wVwHwM1dN3ayvAc-ETBRgKd-drA\"\005Login*\210\001eyJhbGciOiJQUzI1NiIsImtpZCI6IjEifQ.eyJhdWQiOlsiXC9kYy0xIl0sImV4cCI6MTc1MDkwNjQ3NSwiaWF0IjoxNzUwODYzMjc1LCJzdWIiOiJvcmRpbmFyeXVzZXIifQ.**" DatabaseName: "/dc-1" RequestType: "" PeerName: "ipv6:[::1]:48324" 2025-06-25T14:54:35.413097Z node 59 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [59:7519900509824560478:2629] txid# 281474976710663 Bootstrap, UserSID: ordinaryuser CheckAdministrator: 1 CheckDatabaseAdministrator: 1 2025-06-25T14:54:35.413117Z node 59 :TX_PROXY DEBUG: schemereq.cpp:585: Actor# [59:7519900509824560478:2629] txid# 281474976710663 Bootstrap, UserSID: ordinaryuser IsClusterAdministrator: 0 2025-06-25T14:54:35.413277Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1434: Actor# [59:7519900509824560478:2629] txid# 281474976710663 HandleResolveDatabase, ResultSet size: 1 ResultSet error count: 0 2025-06-25T14:54:35.413318Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1469: Actor# [59:7519900509824560478:2629] txid# 281474976710663 HandleResolveDatabase, UserSID: ordinaryuser CheckAdministrator: 1 CheckDatabaseAdministrator: 1 IsClusterAdministrator: 0 IsDatabaseAdministrator: 0 DatabaseOwner: root@builtin 2025-06-25T14:54:35.413361Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [59:7519900509824560478:2629] txid# 281474976710663 TEvNavigateKeySet requested from SchemeCache 2025-06-25T14:54:35.413612Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [59:7519900509824560478:2629] txid# 281474976710663 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-25T14:54:35.413636Z node 59 :TX_PROXY ERROR: schemereq.cpp:1103: Actor# [59:7519900509824560478:2629] txid# 281474976710663, Access denied for ordinaryuser, attempt to manage user 2025-06-25T14:54:35.413723Z node 59 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [59:7519900509824560478:2629] txid# 281474976710663, issues: { message: "Access denied for ordinaryuser" issue_code: 200000 severity: 1 } 2025-06-25T14:54:35.413750Z node 59 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [59:7519900509824560478:2629] txid# 281474976710663 SEND to# [59:7519900509824560477:2314] Source {TEvProposeTransactionStatus Status# 5} 2025-06-25T14:54:35.414413Z node 59 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=59&id=OWJhN2JmYmQtMzFmMDgxNWYtMzAxNWQwNTYtZjNiMTQ5NjE=, ActorId: [59:7519900509824560463:2314], ActorState: ExecuteState, TraceId: 01jyksasbv83h6f53tn3d06ec1, Create QueryResponse for error on request, msg: 2025-06-25T14:54:35.414839Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:353: actor# [59:7519900488349723136:2115] Handle TEvExecuteKqpTransaction 2025-06-25T14:54:35.414857Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:342: actor# [59:7519900488349723136:2115] TxId# 281474976710664 ProcessProposeKqpTransaction 2025-06-25T14:54:35.712295Z node 59 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[59:7519900488349723042:2146];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:54:35.712396Z node 59 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=timeout; >> TestProgram::YqlKernelEndsWith >> TestProgram::YqlKernelEndsWith [GOOD] >> TestProgram::YqlKernelContains |87.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest >> TestProgram::YqlKernelContains [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest >> TestProgram::YqlKernelEndsWith [GOOD] Test command err: FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:33;event=parse_program;program=Command { Assign { Column { Id: 15 } Function { Arguments { Id: 7 } Arguments { Id: 9 } FunctionType: YQL_KERNEL KernelIdx: 0 } } } Command { Projection { Columns { Id: 15 } } } Kernels: "O\002\030BlockAsTuple\t\211\004\235\213\004\213\002\203\001H\213\002\203\014\001\235?\002\001\235?\006\001\002\000\t\211\002?\014\235?\000\001\002\000\t\251\000?\022\014Arg\000\000\t\211\002?\016\235?\004\001\002\000\t\211\006?\034\203\005@?\022?\022$BlockFunc\000\003? \020EndsWith?\030?\030\001\000\000/" ; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:102;parse_proto_program=Command { Assign { Column { Id: 15 } Function { Arguments { Id: 7 } Arguments { Id: 9 } FunctionType: YQL_KERNEL KernelIdx: 0 } } } Command { Projection { Columns { Id: 15 } } } Kernels: "O\002\030BlockAsTuple\t\211\004\235\213\004\213\002\203\001H\213\002\203\014\001\235?\002\001\235?\006\001\002\000\t\211\002?\014\235?\000\001\002\000\t\251\000?\022\014Arg\000\000\t\211\002?\016\235?\004\001\002\000\t\211\006?\034\203\005@?\022?\022$BlockFunc\000\003? \020EndsWith?\030?\030\001\000\000/" ; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2101;fline=graph_execute.cpp:162;graph_constructed=digraph program {N7[shape=box, label="N0(0):{\"p\":{\"data\":[{\"name\":\"string\",\"id\":7},{\"name\":\"substring\",\"id\":9}]},\"o\":\"0\",\"t\":\"ReserveMemory\"}\n"]; N0[shape=box, label="N4(26):{\"i\":\"7,9\",\"p\":{\"kernel\":{\"class_name\":\"SIMPLE\"}},\"o\":\"15\",\"t\":\"Calculation\"}\nREMOVE:7,9"]; N2 -> N0[label="1"]; N4 -> N0[label="2"]; N2[shape=box, label="N2(9):{\"i\":\"7\",\"p\":{\"address\":{\"name\":\"string\",\"id\":7}},\"o\":\"7\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N6 -> N2[label="1"]; N4[shape=box, label="N3(9):{\"i\":\"9\",\"p\":{\"address\":{\"name\":\"substring\",\"id\":9}},\"o\":\"9\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N6 -> N4[label="1"]; N5[shape=box, label="N5(26):{\"i\":\"15\",\"t\":\"Projection\"}\n",style=filled,color="#FFAAAA"]; N0 -> N5[label="1"]; N6[shape=box, label="N1(4):{\"i\":\"0\",\"p\":{\"data\":[{\"name\":\"string\",\"id\":7},{\"name\":\"substring\",\"id\":9}]},\"o\":\"7,9\",\"t\":\"FetchOriginalData\"}\n",style=filled,color="#FFFF88"]; N7 -> N6[label="1"]; N7->N6->N2->N4->N0->N5[color=red]; }; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:51;event=program_parsed;result={"edges":[{"owner_id":7,"inputs":[]},{"owner_id":0,"inputs":[{"from":2},{"from":4}]},{"owner_id":2,"inputs":[{"from":6}]},{"owner_id":4,"inputs":[{"from":6}]},{"owner_id":5,"inputs":[{"from":0}]},{"owner_id":6,"inputs":[{"from":7}]}],"nodes":{"2":{"p":{"i":"7","p":{"address":{"name":"string","id":7}},"o":"7","t":"AssembleOriginalData"},"w":9,"id":2},"6":{"p":{"i":"0","p":{"data":[{"name":"string","id":7},{"name":"substring","id":9}]},"o":"7,9","t":"FetchOriginalData"},"w":4,"id":6},"7":{"p":{"p":{"data":[{"name":"string","id":7},{"name":"substring","id":9}]},"o":"0","t":"ReserveMemory"},"w":0,"id":7},"5":{"p":{"i":"15","t":"Projection"},"w":26,"id":5},"4":{"p":{"i":"9","p":{"address":{"name":"substring","id":9}},"o":"9","t":"AssembleOriginalData"},"w":9,"id":4},"0":{"p":{"i":"7,9","p":{"kernel":{"class_name":"SIMPLE"}},"o":"15","t":"Calculation"},"w":26,"id":0}}}; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9UInt8TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9UInt8TypeE; >> SchemeReqAdminAccessInTenant::ClusterAdminCanAuthOnEmptyTenant-DomainLoginOnly-StrictAclCheck [GOOD] >> SchemeReqAdminAccessInTenant::ClusterAdminCanAuthOnNonEmptyTenant |87.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/opt/unittest >> KqpNewEngine::DeleteWithInputMultiConsumption-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 6244, MsgBus: 7024 2025-06-25T14:53:47.892546Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900301686874884:2220];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:53:47.892858Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0009d8/r3tmp/tmpO3cFqh/pdisk_1.dat 2025-06-25T14:53:48.327018Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:53:48.327106Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:53:48.330840Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:53:48.358826Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:53:48.364496Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519900301686874702:2080] 1750863227817894 != 1750863227817897 TServer::EnableGrpc on GrpcPort 6244, node 1 2025-06-25T14:53:48.544777Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:53:48.544796Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:53:48.544807Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:53:48.544886Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:53:48.821482Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:7024 TClient is connected to server localhost:7024 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:53:49.323913Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:53:49.341212Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:53:49.358777Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:49.507136Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:49.693381Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:53:49.770895Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:51.271768Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900318866745518:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:51.272007Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:51.534569Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:51.557917Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:51.590090Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:51.616101Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:51.645960Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:51.678973Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:51.735046Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:51.827956Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900318866746177:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:51.828026Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:51.828427Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900318866746182:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:51.832448Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:53:51.842958Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519900318866746184:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:53:51.937033Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519900318866746237:3421] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:53:52.864671Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519900301686874884:2220];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:53:52.864729Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 26226, MsgBus: 29629 2025-06-25T14:53:54.341313Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519900333123291290:2205];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:53:54.341534Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784 ... s.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[6:7519900443104323508:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:54:25.476519Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 23696, MsgBus: 14261 2025-06-25T14:54:28.697059Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519900479448867320:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:54:28.697147Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0009d8/r3tmp/tmpddkubz/pdisk_1.dat 2025-06-25T14:54:28.854749Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:54:28.873167Z node 7 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [7:7519900479448867301:2080] 1750863268695652 != 1750863268695655 2025-06-25T14:54:28.874011Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:54:28.874101Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:54:28.875600Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 23696, node 7 2025-06-25T14:54:28.976887Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:54:28.976907Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:54:28.976917Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:54:28.977061Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14261 TClient is connected to server localhost:14261 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-25T14:54:29.633844Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:54:29.641506Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:54:29.654729Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:54:29.713068Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:54:29.750583Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:54:30.033272Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:54:30.134888Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:54:32.772703Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519900496628738119:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:32.772823Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:32.848613Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:32.890205Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:32.933260Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:32.976394Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:33.017100Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:33.106818Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:33.185004Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:33.292737Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519900500923706075:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:33.292844Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:33.293047Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519900500923706080:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:33.298647Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:54:33.315042Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7519900500923706082:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:54:33.372070Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7519900500923706133:3421] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:54:33.700427Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[7:7519900479448867320:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:54:33.700517Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest >> TestProgram::YqlKernelContains [GOOD] Test command err: FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:33;event=parse_program;program=Command { Assign { Column { Id: 15 } Function { Arguments { Id: 7 } Arguments { Id: 9 } FunctionType: YQL_KERNEL KernelIdx: 0 } } } Command { Projection { Columns { Id: 15 } } } Kernels: "O\002\030BlockAsTuple\t\211\004\235\213\004\213\002\203\005@\213\002\203\014\001\235?\002\001\235?\006\001\002\000\t\211\002?\014\235?\000\001\002\000\t\251\000?\022\014Arg\000\000\t\211\002?\016\235?\004\001\002\000\t\211\006?\034\203\005@?\022?\022$BlockFunc\000\003? \034StringContains?\030?\030\001\000\000/" ; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:102;parse_proto_program=Command { Assign { Column { Id: 15 } Function { Arguments { Id: 7 } Arguments { Id: 9 } FunctionType: YQL_KERNEL KernelIdx: 0 } } } Command { Projection { Columns { Id: 15 } } } Kernels: "O\002\030BlockAsTuple\t\211\004\235\213\004\213\002\203\005@\213\002\203\014\001\235?\002\001\235?\006\001\002\000\t\211\002?\014\235?\000\001\002\000\t\251\000?\022\014Arg\000\000\t\211\002?\016\235?\004\001\002\000\t\211\006?\034\203\005@?\022?\022$BlockFunc\000\003? \034StringContains?\030?\030\001\000\000/" ; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2101;fline=graph_execute.cpp:162;graph_constructed=digraph program {N7[shape=box, label="N0(0):{\"p\":{\"data\":[{\"name\":\"string\",\"id\":7},{\"name\":\"substring\",\"id\":9}]},\"o\":\"0\",\"t\":\"ReserveMemory\"}\n"]; N0[shape=box, label="N4(26):{\"i\":\"7,9\",\"p\":{\"kernel\":{\"class_name\":\"SIMPLE\"}},\"o\":\"15\",\"t\":\"Calculation\"}\nREMOVE:7,9"]; N2 -> N0[label="1"]; N4 -> N0[label="2"]; N2[shape=box, label="N2(9):{\"i\":\"7\",\"p\":{\"address\":{\"name\":\"string\",\"id\":7}},\"o\":\"7\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N6 -> N2[label="1"]; N4[shape=box, label="N3(9):{\"i\":\"9\",\"p\":{\"address\":{\"name\":\"substring\",\"id\":9}},\"o\":\"9\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N6 -> N4[label="1"]; N5[shape=box, label="N5(26):{\"i\":\"15\",\"t\":\"Projection\"}\n",style=filled,color="#FFAAAA"]; N0 -> N5[label="1"]; N6[shape=box, label="N1(4):{\"i\":\"0\",\"p\":{\"data\":[{\"name\":\"string\",\"id\":7},{\"name\":\"substring\",\"id\":9}]},\"o\":\"7,9\",\"t\":\"FetchOriginalData\"}\n",style=filled,color="#FFFF88"]; N7 -> N6[label="1"]; N7->N6->N2->N4->N0->N5[color=red]; }; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:51;event=program_parsed;result={"edges":[{"owner_id":7,"inputs":[]},{"owner_id":0,"inputs":[{"from":2},{"from":4}]},{"owner_id":2,"inputs":[{"from":6}]},{"owner_id":4,"inputs":[{"from":6}]},{"owner_id":5,"inputs":[{"from":0}]},{"owner_id":6,"inputs":[{"from":7}]}],"nodes":{"2":{"p":{"i":"7","p":{"address":{"name":"string","id":7}},"o":"7","t":"AssembleOriginalData"},"w":9,"id":2},"6":{"p":{"i":"0","p":{"data":[{"name":"string","id":7},{"name":"substring","id":9}]},"o":"7,9","t":"FetchOriginalData"},"w":4,"id":6},"7":{"p":{"p":{"data":[{"name":"string","id":7},{"name":"substring","id":9}]},"o":"0","t":"ReserveMemory"},"w":0,"id":7},"5":{"p":{"i":"15","t":"Projection"},"w":26,"id":5},"4":{"p":{"i":"9","p":{"address":{"name":"substring","id":9}},"o":"9","t":"AssembleOriginalData"},"w":9,"id":4},"0":{"p":{"i":"7,9","p":{"kernel":{"class_name":"SIMPLE"}},"o":"15","t":"Calculation"},"w":26,"id":0}}}; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10BinaryTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10BinaryTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10BinaryTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10BinaryTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10BinaryTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10BinaryTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10BinaryTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10BinaryTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9UInt8TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9UInt8TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9UInt8TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9UInt8TypeE; |87.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest >> KqpMergeCn::SortBy_PK_Uint64_Desc [GOOD] >> KqpMergeCn::SortBy_Int32 >> TestProgram::YqlKernelEquals >> TestProgram::YqlKernelEquals [GOOD] |87.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest |87.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest |87.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/partition_stats/ut/unittest >> ResultFormatter::Optional [GOOD] >> ResultFormatter::Pg >> TestProgram::JsonValue >> ResultFormatter::Pg [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest >> TestProgram::YqlKernelEquals [GOOD] Test command err: FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:33;event=parse_program;program=Command { Assign { Column { Id: 15 } Function { Arguments { Id: 10 } Arguments { Id: 11 } FunctionType: YQL_KERNEL KernelIdx: 0 } } } Command { Projection { Columns { Id: 15 } } } Kernels: "O\004\006Arg\030BlockAsTuple\t\211\004\235\213\004\213\004\203\020\203B\213\002\203\014\001\235?\004\001\235?\010\001\006\000\t\211\004?\016\235?\000\001\235?\002\001\006\000\t\251\000?\024\002\000\t\251\000?\026\002\000\000\t\211\002?\020\235?\006\001\006\000\t\211\006?$\203\005@?\024?\026$BlockFunc\000\003?(\014Equals?\034? \001\000\000/" ; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:102;parse_proto_program=Command { Assign { Column { Id: 15 } Function { Arguments { Id: 10 } Arguments { Id: 11 } FunctionType: YQL_KERNEL KernelIdx: 0 } } } Command { Projection { Columns { Id: 15 } } } Kernels: "O\004\006Arg\030BlockAsTuple\t\211\004\235\213\004\213\004\203\020\203B\213\002\203\014\001\235?\004\001\235?\010\001\006\000\t\211\004?\016\235?\000\001\235?\002\001\006\000\t\251\000?\024\002\000\t\251\000?\026\002\000\000\t\211\002?\020\235?\006\001\006\000\t\211\006?$\203\005@?\024?\026$BlockFunc\000\003?(\014Equals?\034? \001\000\000/" ; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2101;fline=graph_execute.cpp:162;graph_constructed=digraph program {N7[shape=box, label="N0(0):{\"p\":{\"data\":[{\"name\":\"i16\",\"id\":10},{\"name\":\"float\",\"id\":11}]},\"o\":\"0\",\"t\":\"ReserveMemory\"}\n"]; N0[shape=box, label="N4(26):{\"i\":\"10,11\",\"p\":{\"kernel\":{\"class_name\":\"SIMPLE\"}},\"o\":\"15\",\"t\":\"Calculation\"}\nREMOVE:10,11"]; N2 -> N0[label="1"]; N4 -> N0[label="2"]; N2[shape=box, label="N2(9):{\"i\":\"10\",\"p\":{\"address\":{\"name\":\"i16\",\"id\":10}},\"o\":\"10\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N6 -> N2[label="1"]; N4[shape=box, label="N3(9):{\"i\":\"11\",\"p\":{\"address\":{\"name\":\"float\",\"id\":11}},\"o\":\"11\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N6 -> N4[label="1"]; N5[shape=box, label="N5(26):{\"i\":\"15\",\"t\":\"Projection\"}\n",style=filled,color="#FFAAAA"]; N0 -> N5[label="1"]; N6[shape=box, label="N1(4):{\"i\":\"0\",\"p\":{\"data\":[{\"name\":\"i16\",\"id\":10},{\"name\":\"float\",\"id\":11}]},\"o\":\"10,11\",\"t\":\"FetchOriginalData\"}\n",style=filled,color="#FFFF88"]; N7 -> N6[label="1"]; N7->N6->N2->N4->N0->N5[color=red]; }; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:51;event=program_parsed;result={"edges":[{"owner_id":7,"inputs":[]},{"owner_id":0,"inputs":[{"from":2},{"from":4}]},{"owner_id":2,"inputs":[{"from":6}]},{"owner_id":4,"inputs":[{"from":6}]},{"owner_id":5,"inputs":[{"from":0}]},{"owner_id":6,"inputs":[{"from":7}]}],"nodes":{"2":{"p":{"i":"10","p":{"address":{"name":"i16","id":10}},"o":"10","t":"AssembleOriginalData"},"w":9,"id":2},"6":{"p":{"i":"0","p":{"data":[{"name":"i16","id":10},{"name":"float","id":11}]},"o":"10,11","t":"FetchOriginalData"},"w":4,"id":6},"7":{"p":{"p":{"data":[{"name":"i16","id":10},{"name":"float","id":11}]},"o":"0","t":"ReserveMemory"},"w":0,"id":7},"5":{"p":{"i":"15","t":"Projection"},"w":26,"id":5},"4":{"p":{"i":"11","p":{"address":{"name":"float","id":11}},"o":"11","t":"AssembleOriginalData"},"w":9,"id":4},"0":{"p":{"i":"10,11","p":{"kernel":{"class_name":"SIMPLE"}},"o":"15","t":"Calculation"},"w":26,"id":0}}}; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9Int16TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9FloatTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9Int16TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9FloatTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9Int16TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9FloatTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9Int16TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9FloatTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9Int16TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9FloatTypeE; digraph program {N7[shape=box, label="N0(0):{\"p\":{\"data\":[{\"name\":\"i16\",\"id\":10},{\"name\":\"float\",\"id\":11}]},\"o\":\"0\",\"t\":\"ReserveMemory\"}\n"]; N0[shape=box, label="N4(26):{\"i\":\"10,11\",\"p\":{\"kernel\":{\"class_name\":\"SIMPLE\"}},\"o\":\"15\",\"t\":\"Calculation\"}\nREMOVE:10,11"]; N2 -> N0[label="1"]; N4 -> N0[label="2"]; N2[shape=box, label="N2(9):{\"i\":\"10\",\"p\":{\"address\":{\"name\":\"i16\",\"id\":10}},\"o\":\"10\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N6 -> N2[label="1"]; N4[shape=box, label="N3(9):{\"i\":\"11\",\"p\":{\"address\":{\"name\":\"float\",\"id\":11}},\"o\":\"11\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N6 -> N4[label="1"]; N5[shape=box, label="N5(26):{\"i\":\"15\",\"t\":\"Projection\"}\n",style=filled,color="#FFAAAA"]; N0 -> N5[label="1"]; N6[shape=box, label="N1(4):{\"i\":\"0\",\"p\":{\"data\":[{\"name\":\"i16\",\"id\":10},{\"name\":\"float\",\"id\":11}]},\"o\":\"10,11\",\"t\":\"FetchOriginalData\"}\n",style=filled,color="#FFFF88"]; N7 -> N6[label="1"]; N7->N6->N2->N4->N0->N5[color=red]; } FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9UInt8TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9UInt8TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9UInt8TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9UInt8TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow9UInt8TypeE; >> KqpExtractPredicateLookup::OverflowLookup [GOOD] >> KqpExtractPredicateLookup::SimpleRange >> TestProgram::JsonValue [GOOD] |87.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/result_formatter/ut/unittest >> ResultFormatter::Pg [GOOD] |87.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-ModifyUser-48 [GOOD] >> KqpNewEngine::PushFlatmapInnerConnectionsToStageInput [GOOD] >> KqpNewEngine::PushPureFlatmapInnerConnectionsToStage >> PartitionStats::Collector |87.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/partition_stats/ut/unittest >> PartitionStats::Collector [GOOD] >> KqpSqlIn::TupleNotOnlyOfKeys [GOOD] >> ResultFormatter::List [GOOD] >> ResultFormatter::Null [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/engines/ut/unittest >> TestProgram::JsonValue [GOOD] Test command err: FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:33;event=parse_program;program=Command { Assign { Column { Id: 15 } Constant { Text: "$.key" } } } Command { Assign { Column { Id: 16 } Function { Id: 8 Arguments { Id: 5 } Arguments { Id: 15 } FunctionType: YQL_KERNEL KernelIdx: 0 } } } Command { Projection { Columns { Id: 16 } } } Kernels: "O\022\006Arg\020JsonNode\020JsonPath\006UDF\006Udf\014Apply2\nFlags\010Name\030BlockAsTuple\t\211\004\235\213\004\213\004\207\203\tH\203\001H\213\002\207?\004\001\235?\006\001\235?\n\001\"\000\t\211\004?\020\235?\002\001\235?\004\000\"\000\t\251\000?\026\002\000\t\251\000?\030\002\000\000\t\211\002?\022\235?\010\001\"\000\t\211\n?&?\026?\030?\002?\004?\010,ScalarApply\000?\036?\"\t\251\000?\002\002\000\t\251\000?\004\002\000\t\211\010?\010?\002?\000\207?\004?4$IfPresent\000?.\t\251\000?\000\002\000\t\211\n?4\201\213\004\213\004\203\n\203\005@\207\203\001H?@?4?D?D VisitAll\000\t\211\020?H\211\006?H\207\214\006\214\n\210\203\001H\214\006\016\000\203\004\203\005@\203\004\203\004\207\214\006\214\n\210\203\001H\214\006\026\000\t\211\010?X\203\005@\200\203\005@\202\022\000\003?p6Json2.SqlValueConvertToUtf8\202\003?r\000\002\017\003?Z\000\003?\\\000\003?^\000\003?`\000\027?d\t\211\014?b\311\002?b\203\tH\005\205\004\206\205\004\203\010\203\005@\032\036\203\005@\020Args\034Payload\006\002?\214\005\205\004\203\010\203\005@\032\036\003?\222\002\003?\224\000\003\001\003?\216\000\003\016\000\203\004\203\005@\203\004\203\004?\000\026\000\t\211\010?\244\203\005@\200\203\005@\202\022\000\003?\260\026Json2.Parse\202\003?\262\000\002\017\003?\246\000\003?\250\000\003?\252\000\003?\254\000?:\036\t\211\014?f\211\002?f\203\001H\016\000\203\004\203\005@\203\004\203\004?\004\026\000\t\211\010?\312\203\005@\200\203\005@\202\022\000\003?\326\"Json2.CompilePath\202\003?\330\000\002\017\003?\314\000\003?\316\000\003?\320\000\003?\322\000?2\036\010\000?l\276\t\251\000?@\002\000\'?4\t\251\000?D\002\000?\370\004\'?4\010\000\000\000/" ; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:102;parse_proto_program=Command { Assign { Column { Id: 15 } Constant { Text: "$.key" } } } Command { Assign { Column { Id: 16 } Function { Id: 8 Arguments { Id: 5 } Arguments { Id: 15 } FunctionType: YQL_KERNEL KernelIdx: 0 } } } Command { Projection { Columns { Id: 16 } } } Kernels: "O\022\006Arg\020JsonNode\020JsonPath\006UDF\006Udf\014Apply2\nFlags\010Name\030BlockAsTuple\t\211\004\235\213\004\213\004\207\203\tH\203\001H\213\002\207?\004\001\235?\006\001\235?\n\001\"\000\t\211\004?\020\235?\002\001\235?\004\000\"\000\t\251\000?\026\002\000\t\251\000?\030\002\000\000\t\211\002?\022\235?\010\001\"\000\t\211\n?&?\026?\030?\002?\004?\010,ScalarApply\000?\036?\"\t\251\000?\002\002\000\t\251\000?\004\002\000\t\211\010?\010?\002?\000\207?\004?4$IfPresent\000?.\t\251\000?\000\002\000\t\211\n?4\201\213\004\213\004\203\n\203\005@\207\203\001H?@?4?D?D VisitAll\000\t\211\020?H\211\006?H\207\214\006\214\n\210\203\001H\214\006\016\000\203\004\203\005@\203\004\203\004\207\214\006\214\n\210\203\001H\214\006\026\000\t\211\010?X\203\005@\200\203\005@\202\022\000\003?p6Json2.SqlValueConvertToUtf8\202\003?r\000\002\017\003?Z\000\003?\\\000\003?^\000\003?`\000\027?d\t\211\014?b\311\002?b\203\tH\005\205\004\206\205\004\203\010\203\005@\032\036\203\005@\020Args\034Payload\006\002?\214\005\205\004\203\010\203\005@\032\036\003?\222\002\003?\224\000\003\001\003?\216\000\003\016\000\203\004\203\005@\203\004\203\004?\000\026\000\t\211\010?\244\203\005@\200\203\005@\202\022\000\003?\260\026Json2.Parse\202\003?\262\000\002\017\003?\246\000\003?\250\000\003?\252\000\003?\254\000?:\036\t\211\014?f\211\002?f\203\001H\016\000\203\004\203\005@\203\004\203\004?\004\026\000\t\211\010?\312\203\005@\200\203\005@\202\022\000\003?\326\"Json2.CompilePath\202\003?\330\000\002\017\003?\314\000\003?\316\000\003?\320\000\003?\322\000?2\036\010\000?l\276\t\251\000?@\002\000\'?4\t\251\000?D\002\000?\370\004\'?4\010\000\000\000/" ; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2101;fline=graph_execute.cpp:162;graph_constructed=digraph program {N0[shape=box, label="N0(0):{\"p\":{\"v\":\"$.key\"},\"o\":\"15\",\"t\":\"Const\"}\n"]; N1[shape=box, label="N4(15):{\"i\":\"5,15\",\"p\":{\"kernel\":{\"class_name\":\"SIMPLE\"}},\"o\":\"16\",\"t\":\"Calculation\"}\nREMOVE:15,5"]; N0 -> N1[label="1"]; N3 -> N1[label="2"]; N2[shape=box, label="N2(2):{\"i\":\"0\",\"p\":{\"data\":[{\"name\":\"json_string\",\"id\":5}]},\"o\":\"5\",\"t\":\"FetchOriginalData\"}\n",style=filled,color="#FFFF88"]; N5 -> N2[label="1"]; N3[shape=box, label="N3(7):{\"i\":\"5\",\"p\":{\"address\":{\"name\":\"json_string\",\"id\":5}},\"o\":\"5\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N2 -> N3[label="1"]; N4[shape=box, label="N5(15):{\"i\":\"16\",\"t\":\"Projection\"}\n",style=filled,color="#FFAAAA"]; N1 -> N4[label="1"]; N5[shape=box, label="N1(0):{\"p\":{\"data\":[{\"name\":\"json_string\",\"id\":5}]},\"o\":\"0\",\"t\":\"ReserveMemory\"}\n"]; N0->N5->N2->N3->N1->N4[color=red]; }; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:51;event=program_parsed;result={"edges":[{"owner_id":0,"inputs":[]},{"owner_id":1,"inputs":[{"from":0},{"from":3}]},{"owner_id":2,"inputs":[{"from":5}]},{"owner_id":3,"inputs":[{"from":2}]},{"owner_id":4,"inputs":[{"from":1}]},{"owner_id":5,"inputs":[]}],"nodes":{"1":{"p":{"i":"5,15","p":{"kernel":{"class_name":"SIMPLE"}},"o":"16","t":"Calculation"},"w":15,"id":1},"3":{"p":{"i":"5","p":{"address":{"name":"json_string","id":5}},"o":"5","t":"AssembleOriginalData"},"w":7,"id":3},"2":{"p":{"i":"0","p":{"data":[{"name":"json_string","id":5}]},"o":"5","t":"FetchOriginalData"},"w":2,"id":2},"5":{"p":{"p":{"data":[{"name":"json_string","id":5}]},"o":"0","t":"ReserveMemory"},"w":0,"id":5},"4":{"p":{"i":"16","t":"Projection"},"w":15,"id":4},"0":{"p":{"p":{"v":"$.key"},"o":"15","t":"Const"},"w":0,"id":0}}}; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; json_string: [ "{"key":"value"}", "{"key":10}", "{"key":0.1}", "{"key":false}", "{"another":"value"}", "[]" ] Check output for Utf8 FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:33;event=parse_program;program=Command { Assign { Column { Id: 15 } Constant { Text: "$.key" } } } Command { Assign { Column { Id: 16 } Function { Id: 8 Arguments { Id: 5 } Arguments { Id: 15 } FunctionType: YQL_KERNEL KernelIdx: 0 } } } Command { Projection { Columns { Id: 16 } } } Kernels: "O\022\006Arg\020JsonNode\020JsonPath\006UDF\006Udf\014Apply2\nFlags\010Name\030BlockAsTuple\t\211\004\235\213\004\213\004\207\203\tH\203\001H\213\002\207\203\014\001\235?\006\001\235?\014\001\"\000\t\211\004?\022\235?\002\001\235?\004\000\"\000\t\251\000?\030\002\000\t\251\000?\032\002\000\000\t\211\002?\024\235?\n\001\"\000\t\211\n?(?\030?\032?\002?\004?\n,ScalarApply\000? ?$\t\251\000?\002\002\000\t\251\000?\004\002\000\t\211\010?\n?\002?\000\207?\010?6$IfPresent\000?0\t\251\000?\000\002\000\t\211\n?6\201\213\004\213\004\203\n\203\005@\207\203\014?B?6?F?F VisitAll\000\t\211\020?J\211\006?J\207\214\006\214\n\210\203\001H\214\006\016\000\203\004\203\005@\203\004\203\004\207\214\006\214\n\210\203\001H\214\006\026\000\t\211\010?Z\203\005@\200\203\005@\202\022\000\003?r$Json2.SqlValueBool\202\003?t\000\002\017\003?\\\000\003?^\000\003?`\000\003?b\000\027?f\t\211\014?d\311\002?d\203\tH\005\205\004\206\205\004\203\010\203\005@\032\036\203\005@\020Args\034Payload\006\002?\216\005\205\004\203\010\203\005@\032\036\003?\224\002\003?\226\000\003\001\003?\220\000\003\016\000\203\004\203\005@\203\004\203\004?\000\026\000\t\211\010?\246\203\005@\200\203\005@\202\022\000\003?\262\026Json2.Parse\202\003?\264\000\002\017\003?\250\000\003?\252\000\003?\254\000\003?\256\000?<\036\t\211\014?h\211\002?h\203\001H\016\000\203\004\203\005@\203\004\203\004?\004\026\000\t\211\010?\314\203\005@\200\203\005@\202\022\000\003?\330\"Json2.CompilePath\202\003?\332\000\002\017\003?\316\000\003?\320\000\003?\322\000\003?\324\000?4\036\010\000?n\276\t\251\000?B\002\000\'?6\t\251\000?F\002\000?\372\004\'?6\010\000\000\000/" ; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:102;parse_proto_program=Command { Assign { Column { Id: 15 } Constant { Text: "$.key" } } } Command { Assign { Column { Id: 16 } Function { Id: 8 Arguments { Id: 5 } Arguments { Id: 15 } FunctionType: YQL_KERNEL KernelIdx: 0 } } } Command { Projection { Columns { Id: 16 } } } Kernels: "O\022\006Arg\020JsonNode\020JsonPath\006UDF\006Udf\014Apply2\nFlags\010Name\030BlockAsTuple\t\211\004\235\213\004\213\004\207\203\tH\203\001H\213\002\207\203\014\001\235?\006\001\235?\014\001\"\000\t\211\004?\022\235?\002\001\235?\004\000\"\000\t\251\000?\030\002\000\t\251\000?\032\002\000\000\t\211\002?\024\235?\n\001\"\000\t\211\n?(?\030?\032?\002?\004?\n,ScalarApply\000? ?$\t\251\000?\002\002\000\t\251\000?\004\002\000\t\211\010?\n?\002?\000\207?\010?6$IfPresent\000?0\t\251\000?\000\002\000\t\211\n?6\201\213\004\213\004\203\n\203\005@\207\203\014?B?6?F?F VisitAll\000\t\211\020?J\211\006?J\207\214\006\214\n\210\203\001H\214\006\016\000\203\004\203\005@\203\004\203\004\207\214\006\214\n\210\203\001H\214\006\026\000\t\211\010?Z\203\005@\200\203\005@\202\022\000\003?r$Json2.SqlValueBool\202\003?t\000\002\017\003?\\\000\003?^\000\003?`\000\003?b\000\027?f\t\211\014?d\311\002?d\203\tH\005\205\004\206\205\004\203\010\203\005@\032\036\203\005@\020Args\034Payload\006\002?\216\005\205\004\203\010\203\005@\032\036\003?\224\002\003?\226\000\003\001\003?\220\000\003\016\000\203\004\203\005@\203\004\203\004?\000\026\000\t\211\010?\246\203\005@\200\203\005@\202\022\000\003?\262\026Json2.Parse\202\003?\264\000\002\017\003?\250\000\003?\252\000\003?\254\000\003?\256\000?<\036\t\211\014?h\211\002?h\203\001H\016\000\203\004\203\005@\203\004\203\004?\004\026\000\t\211\010?\314\203\005@\200\203\005@\202\022\000\003?\330\"Json2.CompilePath\202\003?\332\000\002\017\003?\316\000\003?\320\000\003?\322\000\003?\324\000?4\036\010\000?n\276\t\251\000?B\002\000\'?6\t\251\000?F\002\000?\372\004\'?6\010\000\000\000/" ; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2101;fline=graph_execute.cpp:162;graph_constructed=digraph program {N0[shape=box, label="N0(0):{\"p\":{\"v\":\"$.key\"},\"o\":\"15\",\"t\":\"Const\"}\n"]; N1[shape=box, label="N4(15):{\"i\":\"5,15\",\"p\":{\"kernel\":{\"class_name\":\"SIMPLE\"}},\"o\":\"16\",\"t\" ... } FunctionType: YQL_KERNEL KernelIdx: 0 } } } Command { Projection { Columns { Id: 16 } } } Kernels: "O\022\006Arg\020JsonNode\020JsonPath\006UDF\006Udf\014Apply2\nFlags\010Name\030BlockAsTuple\t\211\004\235\213\004\213\004\207\203\tH\203\001H\213\002\207\203B\001\235?\006\001\235?\014\001\"\000\t\211\004?\022\235?\002\001\235?\004\000\"\000\t\251\000?\030\002\000\t\251\000?\032\002\000\000\t\211\002?\024\235?\n\001\"\000\t\211\n?(?\030?\032?\002?\004?\n,ScalarApply\000? ?$\t\251\000?\002\002\000\t\251\000?\004\002\000\t\211\010?\n?\002?\000\207?\010?6$IfPresent\000?0\t\251\000?\000\002\000\t\211\n?6\201\213\004\213\004\203\n\203\005@\207\203@?B?6?F?6 VisitAll\000\t\211\020?J\211\006?J\207\214\006\214\n\210\203\001H\214\006\016\000\203\004\203\005@\203\004\203\004\207\214\006\214\n\210\203\001H\214\006\026\000\t\211\010?Z\203\005@\200\203\005@\202\022\000\003?r(Json2.SqlValueNumber\202\003?t\000\002\017\003?\\\000\003?^\000\003?`\000\003?b\000\027?f\t\211\014?d\311\002?d\203\tH\005\205\004\206\205\004\203\010\203\005@\032\036\203\005@\020Args\034Payload\006\002?\216\005\205\004\203\010\203\005@\032\036\003?\224\002\003?\226\000\003\001\003?\220\000\003\016\000\203\004\203\005@\203\004\203\004?\000\026\000\t\211\010?\246\203\005@\200\203\005@\202\022\000\003?\262\026Json2.Parse\202\003?\264\000\002\017\003?\250\000\003?\252\000\003?\254\000\003?\256\000?<\036\t\211\014?h\211\002?h\203\001H\016\000\203\004\203\005@\203\004\203\004?\004\026\000\t\211\010?\314\203\005@\200\203\005@\202\022\000\003?\330\"Json2.CompilePath\202\003?\332\000\002\017\003?\316\000\003?\320\000\003?\322\000\003?\324\000?4\036\010\000?n\276\t\251\000?B\002\000\'?6\t\251\000?F\002\000\t\211\004?6\203\005@?F\030Invoke\000\003?\374\016Convert?\372\001\004\'?6\010\000\000\000/" ; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2101;fline=graph_execute.cpp:162;graph_constructed=digraph program {N0[shape=box, label="N0(0):{\"p\":{\"v\":\"$.key\"},\"o\":\"15\",\"t\":\"Const\"}\n"]; N1[shape=box, label="N4(15):{\"i\":\"5,15\",\"p\":{\"kernel\":{\"class_name\":\"SIMPLE\"}},\"o\":\"16\",\"t\":\"Calculation\"}\nREMOVE:15,5"]; N0 -> N1[label="1"]; N3 -> N1[label="2"]; N2[shape=box, label="N2(2):{\"i\":\"0\",\"p\":{\"data\":[{\"name\":\"json_string\",\"id\":5}]},\"o\":\"5\",\"t\":\"FetchOriginalData\"}\n",style=filled,color="#FFFF88"]; N5 -> N2[label="1"]; N3[shape=box, label="N3(7):{\"i\":\"5\",\"p\":{\"address\":{\"name\":\"json_string\",\"id\":5}},\"o\":\"5\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N2 -> N3[label="1"]; N4[shape=box, label="N5(15):{\"i\":\"16\",\"t\":\"Projection\"}\n",style=filled,color="#FFAAAA"]; N1 -> N4[label="1"]; N5[shape=box, label="N1(0):{\"p\":{\"data\":[{\"name\":\"json_string\",\"id\":5}]},\"o\":\"0\",\"t\":\"ReserveMemory\"}\n"]; N0->N5->N2->N3->N1->N4[color=red]; }; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:51;event=program_parsed;result={"edges":[{"owner_id":0,"inputs":[]},{"owner_id":1,"inputs":[{"from":0},{"from":3}]},{"owner_id":2,"inputs":[{"from":5}]},{"owner_id":3,"inputs":[{"from":2}]},{"owner_id":4,"inputs":[{"from":1}]},{"owner_id":5,"inputs":[]}],"nodes":{"1":{"p":{"i":"5,15","p":{"kernel":{"class_name":"SIMPLE"}},"o":"16","t":"Calculation"},"w":15,"id":1},"3":{"p":{"i":"5","p":{"address":{"name":"json_string","id":5}},"o":"5","t":"AssembleOriginalData"},"w":7,"id":3},"2":{"p":{"i":"0","p":{"data":[{"name":"json_string","id":5}]},"o":"5","t":"FetchOriginalData"},"w":2,"id":2},"5":{"p":{"p":{"data":[{"name":"json_string","id":5}]},"o":"0","t":"ReserveMemory"},"w":0,"id":5},"4":{"p":{"i":"16","t":"Projection"},"w":15,"id":4},"0":{"p":{"p":{"v":"$.key"},"o":"15","t":"Const"},"w":0,"id":0}}}; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; json_string: [ "{"key":"value"}", "{"key":10}", "{"key":0.1}", "{"key":false}", "{"another":"value"}", "[]" ] Check output for Float FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10DoubleTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10DoubleTypeE; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:33;event=parse_program;program=Command { Assign { Column { Id: 15 } Constant { Text: "$.key" } } } Command { Assign { Column { Id: 16 } Function { Id: 8 Arguments { Id: 5 } Arguments { Id: 15 } FunctionType: YQL_KERNEL KernelIdx: 0 } } } Command { Projection { Columns { Id: 16 } } } Kernels: "O\022\006Arg\020JsonNode\020JsonPath\006UDF\006Udf\014Apply2\nFlags\010Name\030BlockAsTuple\t\211\004\235\213\004\213\004\207\203\tH\203\001H\213\002\207\203@\001\235?\006\001\235?\014\001\"\000\t\211\004?\022\235?\002\001\235?\004\000\"\000\t\251\000?\030\002\000\t\251\000?\032\002\000\000\t\211\002?\024\235?\n\001\"\000\t\211\n?(?\030?\032?\002?\004?\n,ScalarApply\000? ?$\t\251\000?\002\002\000\t\251\000?\004\002\000\t\211\010?\n?\002?\000\207?\010?6$IfPresent\000?0\t\251\000?\000\002\000\t\211\n?6\201\213\004\213\004\203\n\203\005@\207\203@?B?6?F?F VisitAll\000\t\211\020?J\211\006?J\207\214\006\214\n\210\203\001H\214\006\016\000\203\004\203\005@\203\004\203\004\207\214\006\214\n\210\203\001H\214\006\026\000\t\211\010?Z\203\005@\200\203\005@\202\022\000\003?r(Json2.SqlValueNumber\202\003?t\000\002\017\003?\\\000\003?^\000\003?`\000\003?b\000\027?f\t\211\014?d\311\002?d\203\tH\005\205\004\206\205\004\203\010\203\005@\032\036\203\005@\020Args\034Payload\006\002?\216\005\205\004\203\010\203\005@\032\036\003?\224\002\003?\226\000\003\001\003?\220\000\003\016\000\203\004\203\005@\203\004\203\004?\000\026\000\t\211\010?\246\203\005@\200\203\005@\202\022\000\003?\262\026Json2.Parse\202\003?\264\000\002\017\003?\250\000\003?\252\000\003?\254\000\003?\256\000?<\036\t\211\014?h\211\002?h\203\001H\016\000\203\004\203\005@\203\004\203\004?\004\026\000\t\211\010?\314\203\005@\200\203\005@\202\022\000\003?\330\"Json2.CompilePath\202\003?\332\000\002\017\003?\316\000\003?\320\000\003?\322\000\003?\324\000?4\036\010\000?n\276\t\251\000?B\002\000\'?6\t\251\000?F\002\000?\372\004\'?6\010\000\000\000/" ; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:102;parse_proto_program=Command { Assign { Column { Id: 15 } Constant { Text: "$.key" } } } Command { Assign { Column { Id: 16 } Function { Id: 8 Arguments { Id: 5 } Arguments { Id: 15 } FunctionType: YQL_KERNEL KernelIdx: 0 } } } Command { Projection { Columns { Id: 16 } } } Kernels: "O\022\006Arg\020JsonNode\020JsonPath\006UDF\006Udf\014Apply2\nFlags\010Name\030BlockAsTuple\t\211\004\235\213\004\213\004\207\203\tH\203\001H\213\002\207\203@\001\235?\006\001\235?\014\001\"\000\t\211\004?\022\235?\002\001\235?\004\000\"\000\t\251\000?\030\002\000\t\251\000?\032\002\000\000\t\211\002?\024\235?\n\001\"\000\t\211\n?(?\030?\032?\002?\004?\n,ScalarApply\000? ?$\t\251\000?\002\002\000\t\251\000?\004\002\000\t\211\010?\n?\002?\000\207?\010?6$IfPresent\000?0\t\251\000?\000\002\000\t\211\n?6\201\213\004\213\004\203\n\203\005@\207\203@?B?6?F?F VisitAll\000\t\211\020?J\211\006?J\207\214\006\214\n\210\203\001H\214\006\016\000\203\004\203\005@\203\004\203\004\207\214\006\214\n\210\203\001H\214\006\026\000\t\211\010?Z\203\005@\200\203\005@\202\022\000\003?r(Json2.SqlValueNumber\202\003?t\000\002\017\003?\\\000\003?^\000\003?`\000\003?b\000\027?f\t\211\014?d\311\002?d\203\tH\005\205\004\206\205\004\203\010\203\005@\032\036\203\005@\020Args\034Payload\006\002?\216\005\205\004\203\010\203\005@\032\036\003?\224\002\003?\226\000\003\001\003?\220\000\003\016\000\203\004\203\005@\203\004\203\004?\000\026\000\t\211\010?\246\203\005@\200\203\005@\202\022\000\003?\262\026Json2.Parse\202\003?\264\000\002\017\003?\250\000\003?\252\000\003?\254\000\003?\256\000?<\036\t\211\014?h\211\002?h\203\001H\016\000\203\004\203\005@\203\004\203\004?\004\026\000\t\211\010?\314\203\005@\200\203\005@\202\022\000\003?\330\"Json2.CompilePath\202\003?\332\000\002\017\003?\316\000\003?\320\000\003?\322\000\003?\324\000?4\036\010\000?n\276\t\251\000?B\002\000\'?6\t\251\000?F\002\000?\372\004\'?6\010\000\000\000/" ; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2101;fline=graph_execute.cpp:162;graph_constructed=digraph program {N0[shape=box, label="N0(0):{\"p\":{\"v\":\"$.key\"},\"o\":\"15\",\"t\":\"Const\"}\n"]; N1[shape=box, label="N4(15):{\"i\":\"5,15\",\"p\":{\"kernel\":{\"class_name\":\"SIMPLE\"}},\"o\":\"16\",\"t\":\"Calculation\"}\nREMOVE:15,5"]; N0 -> N1[label="1"]; N3 -> N1[label="2"]; N2[shape=box, label="N2(2):{\"i\":\"0\",\"p\":{\"data\":[{\"name\":\"json_string\",\"id\":5}]},\"o\":\"5\",\"t\":\"FetchOriginalData\"}\n",style=filled,color="#FFFF88"]; N5 -> N2[label="1"]; N3[shape=box, label="N3(7):{\"i\":\"5\",\"p\":{\"address\":{\"name\":\"json_string\",\"id\":5}},\"o\":\"5\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N2 -> N3[label="1"]; N4[shape=box, label="N5(15):{\"i\":\"16\",\"t\":\"Projection\"}\n",style=filled,color="#FFAAAA"]; N1 -> N4[label="1"]; N5[shape=box, label="N1(0):{\"p\":{\"data\":[{\"name\":\"json_string\",\"id\":5}]},\"o\":\"0\",\"t\":\"ReserveMemory\"}\n"]; N0->N5->N2->N3->N1->N4[color=red]; }; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:51;event=program_parsed;result={"edges":[{"owner_id":0,"inputs":[]},{"owner_id":1,"inputs":[{"from":0},{"from":3}]},{"owner_id":2,"inputs":[{"from":5}]},{"owner_id":3,"inputs":[{"from":2}]},{"owner_id":4,"inputs":[{"from":1}]},{"owner_id":5,"inputs":[]}],"nodes":{"1":{"p":{"i":"5,15","p":{"kernel":{"class_name":"SIMPLE"}},"o":"16","t":"Calculation"},"w":15,"id":1},"3":{"p":{"i":"5","p":{"address":{"name":"json_string","id":5}},"o":"5","t":"AssembleOriginalData"},"w":7,"id":3},"2":{"p":{"i":"0","p":{"data":[{"name":"json_string","id":5}]},"o":"5","t":"FetchOriginalData"},"w":2,"id":2},"5":{"p":{"p":{"data":[{"name":"json_string","id":5}]},"o":"0","t":"ReserveMemory"},"w":0,"id":5},"4":{"p":{"i":"16","t":"Projection"},"w":15,"id":4},"0":{"p":{"p":{"v":"$.key"},"o":"15","t":"Const"},"w":0,"id":0}}}; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10StringTypeE; json_string: [ "{"key":"value"}", "{"key":10}", "{"key":0.1}", "{"key":false}", "{"another":"value"}", "[]" ] Check output for Double FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10DoubleTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:520;T=N5arrow10DoubleTypeE; |87.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/partition_stats/ut/unittest >> PartitionStats::Collector [GOOD] |87.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/result_formatter/ut/unittest >> ResultFormatter::Null [GOOD] >> KqpRanges::CastKeyBounds [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_schemereq/unittest >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-ModifyUser-48 [GOOD] Test command err: Starting YDB, grpc: 18997, msgbus: 21841 2025-06-25T14:51:47.699544Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899785381124776:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:51:47.699909Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000988/r3tmp/tmpuT3AIw/pdisk_1.dat 2025-06-25T14:51:48.143395Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:51:48.155871Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:51:48.155975Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:51:48.177206Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 18997, node 1 2025-06-25T14:51:48.334214Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:51:48.334241Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:51:48.334250Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:51:48.334361Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:21841 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-25T14:51:48.687749Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7519899785381124982:2118] Handle TEvNavigate describe path dc-1 2025-06-25T14:51:48.714417Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7519899789676092796:2445] HANDLE EvNavigateScheme dc-1 2025-06-25T14:51:48.715566Z node 1 :TX_PROXY DEBUG: describe.cpp:356: Actor# [1:7519899789676092796:2445] HANDLE EvNavigateKeySetResult TDescribeReq marker# P5 ErrorCount# 0 2025-06-25T14:51:48.722066Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:51:48.748435Z node 1 :TX_PROXY DEBUG: describe.cpp:435: Actor# [1:7519899789676092796:2445] SEND to# 72057594046644480 shardToRequest NKikimrSchemeOp.TDescribePath Path: "dc-1" Options { ReturnBoundaries: true ShowPrivateTable: true ReturnRangeKey: true } 2025-06-25T14:51:48.763111Z node 1 :TX_PROXY DEBUG: describe.cpp:448: Actor# [1:7519899789676092796:2445] Handle TEvDescribeSchemeResult Forward to# [1:7519899789676092777:2439] Cookie: 0 TEvDescribeSchemeResult: NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 2 Record# Status: StatusSuccess Path: "dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/dc-1" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046644480 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:51:48.786070Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7519899785381124982:2118] Handle TEvProposeTransaction 2025-06-25T14:51:48.786095Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:7519899785381124982:2118] TxId# 281474976715657 ProcessProposeTransaction 2025-06-25T14:51:48.787768Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:7519899785381124982:2118] Cookie# 0 userReqId# "" txid# 281474976715657 SEND to# [1:7519899789676092802:2450] 2025-06-25T14:51:48.886503Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:7519899789676092802:2450] txid# 281474976715657 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "dc-1" StoragePools { Name: "" Kind: "tenant-db" } StoragePools { Name: "/dc-1:test" Kind: "test" } } } } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" PeerName: "" 2025-06-25T14:51:48.887991Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:7519899789676092802:2450] txid# 281474976715657 Bootstrap, UserSID: root@builtin CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-25T14:51:48.888027Z node 1 :TX_PROXY DEBUG: schemereq.cpp:585: Actor# [1:7519899789676092802:2450] txid# 281474976715657 Bootstrap, UserSID: root@builtin IsClusterAdministrator: 1 2025-06-25T14:51:48.888110Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:7519899789676092802:2450] txid# 281474976715657 TEvNavigateKeySet requested from SchemeCache 2025-06-25T14:51:48.888702Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:7519899789676092802:2450] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-25T14:51:48.889234Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:7519899789676092802:2450] HANDLE EvNavigateKeySetResult, txid# 281474976715657 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# false 2025-06-25T14:51:48.889460Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:7519899789676092802:2450] txid# 281474976715657 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715657 TabletId# 72057594046644480} 2025-06-25T14:51:48.889812Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:7519899789676092802:2450] txid# 281474976715657 HANDLE EvClientConnected 2025-06-25T14:51:48.898008Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:51:48.902546Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [1:7519899789676092802:2450] txid# 281474976715657 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715657} 2025-06-25T14:51:48.902660Z node 1 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [1:7519899789676092802:2450] txid# 281474976715657 SEND to# [1:7519899789676092801:2449] Source {TEvProposeTransactionStatus txid# 281474976715657 Status# 53} waiting... 2025-06-25T14:51:48.932539Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7519899785381124982:2118] Handle TEvProposeTransaction 2025-06-25T14:51:48.932573Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:7519899785381124982:2118] TxId# 281474976715658 ProcessProposeTransaction 2025-06-25T14:51:48.932630Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:7519899785381124982:2118] Cookie# 0 userReqId# "" txid# 281474976715658 SEND to# [1:7519899789676092846:2490] 2025-06-25T14:51:48.934965Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:7519899789676092846:2490] txid# 281474976715658 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/" OperationType: ESchemeOpModifyACL ModifyACL { Name: "dc-1" DiffACL: "\n\032\010\000\022\026\010\001\020\377\377\003\032\014root@builtin \003" } } } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" PeerName: "" 2025-06-25T14:51:48.935017Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:7519899789676092846:2490] txid# 281474976715658 Bootstrap, UserSID: root@builtin CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-25T14:51:48.935031Z node 1 :TX_PROXY DEBUG: schemereq.cpp:585: Actor# [1:7519899789676092846:2490] txid# 281474976715658 Bootstrap, UserSID: root@builtin IsClusterAdministrator: 1 2025-06-25T14:51:48.935108Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:7519899789676092846:2490] txid# 281474976715658 TEvNavigateKeySet requested from SchemeCache 2025-06-25T14:51:48.935381Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:7519899789676092846:2490] txid# 281474976715658 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-25T14:51:48.935479Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:7519899789676092846:2490] HANDLE EvNavigateKeySetResult, txid# 281474976715658 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-25T14:51:48.935541Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:7519899789676092846:2490] txid# 281474976715658 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715658 TabletId# 72057594046644480} 2025-06-25T14:51:48.935652Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:7519899789676092846:2490] txid# 281474976715658 HANDLE EvClientConnected 2025-06-25T14:51:48.936139Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 2814749767 ... DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-25T14:54:38.577768Z node 59 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [59:7519900519692292459:2576] txid# 281474976715661 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715661 TabletId# 72057594046644480} 2025-06-25T14:54:38.577896Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [59:7519900519692292459:2576] txid# 281474976715661 HANDLE EvClientConnected 2025-06-25T14:54:38.593885Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [59:7519900519692292459:2576] txid# 281474976715661 Status StatusSuccess HANDLE {TEvModifySchemeTransactionResult Status# StatusSuccess txid# 281474976715661} 2025-06-25T14:54:38.593942Z node 59 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [59:7519900519692292459:2576] txid# 281474976715661 SEND to# [59:7519900519692292458:2295] Source {TEvProposeTransactionStatus txid# 281474976715661 Status# 48} 2025-06-25T14:54:38.669816Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [59:7519900502512422399:2111] Handle TEvProposeTransaction 2025-06-25T14:54:38.669867Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [59:7519900502512422399:2111] TxId# 281474976715662 ProcessProposeTransaction 2025-06-25T14:54:38.669936Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [59:7519900502512422399:2111] Cookie# 0 userReqId# "" txid# 281474976715662 SEND to# [59:7519900519692292482:2590] 2025-06-25T14:54:38.672814Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [59:7519900519692292482:2590] txid# 281474976715662 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "" OperationType: ESchemeOpModifyACL ModifyACL { Name: "dc-1" DiffACL: "\n\022\010\001\022\016\032\014ordinaryuser\n\032\010\000\022\026\010\001\020\200\200\002\032\014ordinaryuser \000" } } } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" DatabaseName: "/dc-1" RequestType: "" PeerName: "ipv6:[::1]:44684" 2025-06-25T14:54:38.672900Z node 59 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [59:7519900519692292482:2590] txid# 281474976715662 Bootstrap, UserSID: root@builtin CheckAdministrator: 1 CheckDatabaseAdministrator: 1 2025-06-25T14:54:38.672926Z node 59 :TX_PROXY DEBUG: schemereq.cpp:585: Actor# [59:7519900519692292482:2590] txid# 281474976715662 Bootstrap, UserSID: root@builtin IsClusterAdministrator: 1 2025-06-25T14:54:38.672987Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [59:7519900519692292482:2590] txid# 281474976715662 TEvNavigateKeySet requested from SchemeCache 2025-06-25T14:54:38.673381Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [59:7519900519692292482:2590] txid# 281474976715662 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-25T14:54:38.673488Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [59:7519900519692292482:2590] HANDLE EvNavigateKeySetResult, txid# 281474976715662 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-25T14:54:38.673544Z node 59 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [59:7519900519692292482:2590] txid# 281474976715662 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715662 TabletId# 72057594046644480} 2025-06-25T14:54:38.673698Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [59:7519900519692292482:2590] txid# 281474976715662 HANDLE EvClientConnected 2025-06-25T14:54:38.674284Z node 59 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:54:38.678180Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [59:7519900519692292482:2590] txid# 281474976715662 Status StatusSuccess HANDLE {TEvModifySchemeTransactionResult Status# StatusSuccess txid# 281474976715662} 2025-06-25T14:54:38.678243Z node 59 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [59:7519900519692292482:2590] txid# 281474976715662 SEND to# [59:7519900519692292481:2308] Source {TEvProposeTransactionStatus txid# 281474976715662 Status# 48} 2025-06-25T14:54:38.743963Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [59:7519900502512422399:2111] Handle TEvProposeTransaction 2025-06-25T14:54:38.743998Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [59:7519900502512422399:2111] TxId# 281474976715663 ProcessProposeTransaction 2025-06-25T14:54:38.744045Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [59:7519900502512422399:2111] Cookie# 0 userReqId# "" txid# 281474976715663 SEND to# [59:7519900519692292513:2607] 2025-06-25T14:54:38.746795Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [59:7519900519692292513:2607] txid# 281474976715663 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/dc-1" OperationType: ESchemeOpAlterLogin AlterLogin { CreateUser { User: "targetuser" Password: "passwd" CanLogin: true IsHashedPassword: false } } } } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" DatabaseName: "/dc-1" RequestType: "" PeerName: "ipv6:[::1]:44698" 2025-06-25T14:54:38.746871Z node 59 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [59:7519900519692292513:2607] txid# 281474976715663 Bootstrap, UserSID: root@builtin CheckAdministrator: 1 CheckDatabaseAdministrator: 1 2025-06-25T14:54:38.746894Z node 59 :TX_PROXY DEBUG: schemereq.cpp:585: Actor# [59:7519900519692292513:2607] txid# 281474976715663 Bootstrap, UserSID: root@builtin IsClusterAdministrator: 1 2025-06-25T14:54:38.746940Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [59:7519900519692292513:2607] txid# 281474976715663 TEvNavigateKeySet requested from SchemeCache 2025-06-25T14:54:38.747272Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [59:7519900519692292513:2607] txid# 281474976715663 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-25T14:54:38.747373Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [59:7519900519692292513:2607] HANDLE EvNavigateKeySetResult, txid# 281474976715663 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-25T14:54:38.747422Z node 59 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [59:7519900519692292513:2607] txid# 281474976715663 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715663 TabletId# 72057594046644480} 2025-06-25T14:54:38.747589Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [59:7519900519692292513:2607] txid# 281474976715663 HANDLE EvClientConnected 2025-06-25T14:54:38.757332Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [59:7519900519692292513:2607] txid# 281474976715663 Status StatusSuccess HANDLE {TEvModifySchemeTransactionResult Status# StatusSuccess txid# 281474976715663} 2025-06-25T14:54:38.757394Z node 59 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [59:7519900519692292513:2607] txid# 281474976715663 SEND to# [59:7519900519692292512:2310] Source {TEvProposeTransactionStatus txid# 281474976715663 Status# 48} 2025-06-25T14:54:38.832738Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [59:7519900502512422399:2111] Handle TEvProposeTransaction 2025-06-25T14:54:38.832772Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [59:7519900502512422399:2111] TxId# 281474976715664 ProcessProposeTransaction 2025-06-25T14:54:38.832814Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [59:7519900502512422399:2111] Cookie# 0 userReqId# "" txid# 281474976715664 SEND to# [59:7519900519692292541:2619] 2025-06-25T14:54:38.835391Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [59:7519900519692292541:2619] txid# 281474976715664 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/dc-1" OperationType: ESchemeOpAlterLogin AlterLogin { ModifyUser { User: "targetuser" Password: "passwd" IsHashedPassword: false } } } } UserToken: "\n\014ordinaryuser\022\030\022\026\n\024all-users@well-known\032\334\003eyJhbGciOiJQUzI1NiIsImtpZCI6IjEifQ.eyJhdWQiOlsiXC9kYy0xIl0sImV4cCI6MTc1MDkwNjQ3OCwiaWF0IjoxNzUwODYzMjc4LCJzdWIiOiJvcmRpbmFyeXVzZXIifQ.eXYGBSrHU9yK_NuA2gYr0jjunaWCQKcVcjmbbtivgYS5qsR5YqUCohpLR_kf8P1NLvvYu68sAD89gnXjK8E08Zriy5pJaBu43qPmdQOHqw7Rwt243pGI0cHKYcCrcXA_hSuAeQNRqlTjn291iSVgIQTPY0Y58jAfz3CGvqnQUlBYnFRkffP-VW7PIef7c--fhk-VEu498xt8o1TVVGzwNIV0G42WCPGpUJI5N_bKBvlFtGr77wrfPHwjLpSzS86Js3JTJi8JnErvsnWgxxdbjIgApg0Ay4vsxQhYsZEk29_QYFThDZopMU3fMMaZ08bznlBMoBY5VvlYtjU5y457ug\"\005Login*\210\001eyJhbGciOiJQUzI1NiIsImtpZCI6IjEifQ.eyJhdWQiOlsiXC9kYy0xIl0sImV4cCI6MTc1MDkwNjQ3OCwiaWF0IjoxNzUwODYzMjc4LCJzdWIiOiJvcmRpbmFyeXVzZXIifQ.**" DatabaseName: "/dc-1" RequestType: "" PeerName: "ipv6:[::1]:44730" 2025-06-25T14:54:38.835453Z node 59 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [59:7519900519692292541:2619] txid# 281474976715664 Bootstrap, UserSID: ordinaryuser CheckAdministrator: 1 CheckDatabaseAdministrator: 1 2025-06-25T14:54:38.835473Z node 59 :TX_PROXY DEBUG: schemereq.cpp:585: Actor# [59:7519900519692292541:2619] txid# 281474976715664 Bootstrap, UserSID: ordinaryuser IsClusterAdministrator: 0 2025-06-25T14:54:38.835640Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1434: Actor# [59:7519900519692292541:2619] txid# 281474976715664 HandleResolveDatabase, ResultSet size: 1 ResultSet error count: 0 2025-06-25T14:54:38.835678Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1469: Actor# [59:7519900519692292541:2619] txid# 281474976715664 HandleResolveDatabase, UserSID: ordinaryuser CheckAdministrator: 1 CheckDatabaseAdministrator: 1 IsClusterAdministrator: 0 IsDatabaseAdministrator: 0 DatabaseOwner: root@builtin 2025-06-25T14:54:38.835716Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [59:7519900519692292541:2619] txid# 281474976715664 TEvNavigateKeySet requested from SchemeCache 2025-06-25T14:54:38.835973Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [59:7519900519692292541:2619] txid# 281474976715664 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-25T14:54:38.835999Z node 59 :TX_PROXY ERROR: schemereq.cpp:1103: Actor# [59:7519900519692292541:2619] txid# 281474976715664, Access denied for ordinaryuser, attempt to manage user 2025-06-25T14:54:38.836084Z node 59 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [59:7519900519692292541:2619] txid# 281474976715664, issues: { message: "Access denied for ordinaryuser" issue_code: 200000 severity: 1 } 2025-06-25T14:54:38.836112Z node 59 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [59:7519900519692292541:2619] txid# 281474976715664 SEND to# [59:7519900519692292540:2322] Source {TEvProposeTransactionStatus Status# 5} 2025-06-25T14:54:38.839578Z node 59 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=59&id=NjI4M2MyNGItY2I0ZDkxNGEtZmJiNTlhNjAtNWMzMmJmMTI=, ActorId: [59:7519900519692292531:2322], ActorState: ExecuteState, TraceId: 01jyksawq0a39y5q5hf4rg8dbb, Create QueryResponse for error on request, msg: 2025-06-25T14:54:38.839907Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:353: actor# [59:7519900502512422399:2111] Handle TEvExecuteKqpTransaction 2025-06-25T14:54:38.839932Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:342: actor# [59:7519900502512422399:2111] TxId# 281474976715665 ProcessProposeKqpTransaction |87.9%| [TA] $(B)/ydb/core/tx/columnshard/engines/ut/test-results/unittest/{meta.json ... results_accumulator.log} |87.9%| [TA] {RESULT} $(B)/ydb/core/tx/columnshard/engines/ut/test-results/unittest/{meta.json ... results_accumulator.log} |87.9%| [TA] $(B)/ydb/core/fq/libs/result_formatter/ut/test-results/unittest/{meta.json ... results_accumulator.log} |87.9%| [TA] {RESULT} $(B)/ydb/core/fq/libs/result_formatter/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpNewEngine::JoinDictWithPure [GOOD] >> KqpNewEngine::IdxLookupExtractMembers ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/opt/unittest >> KqpSqlIn::TupleNotOnlyOfKeys [GOOD] Test command err: Trying to start YDB, gRPC: 4767, MsgBus: 63009 2025-06-25T14:53:30.425914Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900230177945226:2132];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:53:30.426314Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0009eb/r3tmp/tmpD8VAvg/pdisk_1.dat 2025-06-25T14:53:30.843390Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:53:30.843490Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:53:30.847601Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:53:30.853119Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:53:30.853751Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519900230177945132:2080] 1750863210401622 != 1750863210401625 TServer::EnableGrpc on GrpcPort 4767, node 1 2025-06-25T14:53:30.917338Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:53:30.917363Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:53:30.917367Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:53:30.917466Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:63009 TClient is connected to server localhost:63009 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:53:31.425124Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:53:31.425467Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... 2025-06-25T14:53:31.437966Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:53:31.444582Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:31.575147Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:31.755780Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:31.819832Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:33.292294Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900243062848677:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:33.292411Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:33.520258Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:33.592679Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:33.639006Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:33.667926Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:33.702898Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:33.772363Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:33.845635Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:33.935461Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900243062849346:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:33.935537Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:33.935571Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900243062849351:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:33.938650Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:53:33.946804Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519900243062849353:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:53:34.046026Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519900247357816700:3422] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:53:35.072425Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:35.124205Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ ... T_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:54:28.299851Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16182 2025-06-25T14:54:28.952878Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:16182 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:54:29.033905Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:54:29.123321Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:54:29.213802Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:54:29.437253Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:54:29.555042Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:54:32.769309Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519900495666199147:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:32.769400Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:32.924089Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:32.930070Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[7:7519900474191361045:2057];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:54:32.930194Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:54:32.975643Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:33.025111Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:33.085044Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:33.164839Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:33.249158Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:33.338057Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:33.435807Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519900499961167113:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:33.435922Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:33.436224Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519900499961167118:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:33.440764Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:54:33.454904Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7519900499961167120:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:54:33.548068Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7519900499961167171:3429] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:54:35.098061Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:35.199109Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:35.249499Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664)
: Warning: Type annotation, code: 1030
:5:21: Warning: At function: RemovePrefixMembers, At function: Sort, At function: PersistableRepr, At function: SqlProject
:6:26: Warning: At function: Filter, At lambda, At function: Coalesce
:7:37: Warning: At function: SqlIn
:7:37: Warning: IN may produce unexpected result when used with nullable arguments. Consider adding 'PRAGMA AnsiInForEmptyOrNullableItemsCollections;', code: 1108
: Warning: Type annotation, code: 1030
:5:21: Warning: At function: RemovePrefixMembers, At function: Sort, At function: PersistableRepr, At function: SqlProject
:6:26: Warning: At function: Filter, At lambda, At function: Coalesce
:7:37: Warning: At function: SqlIn
:7:37: Warning: IN may produce unexpected result when used with nullable arguments. Consider adding 'PRAGMA AnsiInForEmptyOrNullableItemsCollections;', code: 1108 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/opt/unittest >> KqpRanges::CastKeyBounds [GOOD] Test command err: Trying to start YDB, gRPC: 3836, MsgBus: 12763 2025-06-25T14:53:51.629661Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900319321879508:2143];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:53:51.631608Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0009d1/r3tmp/tmpG5DyJQ/pdisk_1.dat 2025-06-25T14:53:51.986687Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:53:51.998864Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:53:51.998970Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:53:52.019620Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 3836, node 1 2025-06-25T14:53:52.092472Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:53:52.092493Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:53:52.092499Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:53:52.092591Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:12763 TClient is connected to server localhost:12763 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-25T14:53:52.632420Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:53:52.660141Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:53:52.672967Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:53:54.457223Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900332206781917:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:54.457331Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:54.713130Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:54.845274Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900332206782021:2301], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:54.845361Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:54.845585Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900332206782026:2304], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:54.849106Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:53:54.860251Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519900332206782028:2305], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-25T14:53:54.946762Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519900332206782079:2391] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:53:55.099615Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519900336501749417:2316], status: PRECONDITION_FAILED, issues:
: Error: Type annotation, code: 1030
:1:13: Error: At function: KiWriteTable!
:1:13: Error: Missing key column in input: Key for table: /Root/TestUpsertNotNullPk, code: 2029 2025-06-25T14:53:55.100565Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=NTBmMzc1MTQtYzQ5MGIwMS02YjZhZmY0YS1iYjEyYjJjMQ==, ActorId: [1:7519900332206781891:2289], ActorState: ExecuteState, TraceId: 01jyks9j044x7f7zqgrcwdn5ry, ReplyQueryCompileError, status PRECONDITION_FAILED remove tx with tx_id: 2025-06-25T14:53:55.170348Z node 1 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:678: SelfId: [1:7519900336501749437:2289], TxId: 281474976710662, task: 1. Ctx: { TraceId : 01jyks9j19e916scpfh8et8d6y. SessionId : ydb://session/3?node_id=1&id=NTBmMzc1MTQtYzQ5MGIwMS02YjZhZmY0YS1iYjEyYjJjMQ==. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. InternalError: BAD_REQUEST KIKIMR_BAD_COLUMN_TYPE: {
: Error: Tried to insert NULL value into NOT NULL column: Key, code: 2031 }. 2025-06-25T14:53:55.170789Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=1&id=NTBmMzc1MTQtYzQ5MGIwMS02YjZhZmY0YS1iYjEyYjJjMQ==, ActorId: [1:7519900332206781891:2289], ActorState: ExecuteState, TraceId: 01jyks9j19e916scpfh8et8d6y, Create QueryResponse for error on request, msg: Trying to start YDB, gRPC: 2377, MsgBus: 9702 2025-06-25T14:53:55.920040Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519900337452373567:2086];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:53:55.922225Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0009d1/r3tmp/tmp8IDG0p/pdisk_1.dat 2025-06-25T14:53:56.048452Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:53:56.052510Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519900337452373502:2080] 1750863235889744 != 1750863235889747 TServer::EnableGrpc on GrpcPort 2377, node 2 2025-06-25T14:53:56.093287Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:53:56.093371Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:53:56.101764Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:53:56.124817Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:53:56.124838Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:53:56.124846Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:53:56.124975Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9702 TClient is connected to server localhost:9702 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:53:56.553835Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T1 ... lanNodeType":"Query"},"meta":{"version":"0.2","type":"query"},"SimplifiedPlan":{"PlanNodeId":0,"Plans":[{"PlanNodeId":1,"Plans":[{"PlanNodeId":2,"Operators":[{"A-SelfCpu":0.895,"A-Cpu":0.895,"Path":"\/Root\/Join2","Name":"Delete","Table":"Join2"}],"Node Type":"Delete"}],"Node Type":"Effect"}],"Node Type":"Query","PlanNodeType":"Query"}} Trying to start YDB, gRPC: 2141, MsgBus: 3708 2025-06-25T14:54:33.171009Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519900500037856495:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:54:33.171065Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0009d1/r3tmp/tmpo1LRDb/pdisk_1.dat 2025-06-25T14:54:33.496479Z node 7 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [7:7519900500037856474:2080] 1750863273166002 != 1750863273166005 2025-06-25T14:54:33.499737Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:54:33.504487Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:54:33.504587Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:54:33.506183Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 2141, node 7 2025-06-25T14:54:33.604799Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:54:33.604824Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:54:33.604835Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:54:33.605002Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3708 2025-06-25T14:54:34.213880Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:3708 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:54:34.365972Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:54:34.374055Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:54:34.387813Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:54:34.518240Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:54:34.740667Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:54:34.853977Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:54:37.611174Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519900517217727308:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:37.611305Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:37.702854Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:37.782510Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:37.852596Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:37.947976Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:38.028724Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:38.075595Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:38.116558Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:38.172054Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[7:7519900500037856495:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:54:38.172268Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:54:38.238050Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519900521512695268:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:38.238162Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:38.238628Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519900521512695273:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:38.246272Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:54:38.264559Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7519900521512695275:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:54:38.327421Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7519900521512695327:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } |87.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/sys_view/partition_stats/ut/unittest >> KqpKv::ReadRows_PgKey [GOOD] >> KqpKv::ReadRows_Nulls |87.9%| [TA] $(B)/ydb/core/sys_view/partition_stats/ut/test-results/unittest/{meta.json ... results_accumulator.log} |87.9%| [TA] {RESULT} $(B)/ydb/core/sys_view/partition_stats/ut/test-results/unittest/{meta.json ... results_accumulator.log} |87.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> KqpNewEngine::DqSourceCount [GOOD] >> KqpNewEngine::DqSource >> data_correctness.py::TestDataCorrectness::test |87.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_object_storage_listing/unittest |87.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_object_storage_listing/unittest |87.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_object_storage_listing/unittest |87.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_object_storage_listing/unittest |87.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_object_storage_listing/unittest |87.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_object_storage_listing/unittest |87.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_object_storage_listing/unittest |87.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_object_storage_listing/unittest >> ObjectStorageListingTest::ListingNoFilter >> KqpRanges::LiteralOr [GOOD] >> KqpRanges::LiteralOrCompisite >> KqpSort::ComplexPkInclusiveSecondOptionalPredicate [GOOD] >> KqpNewEngine::AutoChooseIndexOrderByLambda [GOOD] |87.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |87.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/backup/impl/ut_local_partition_reader/unittest >> LocalPartitionReader::Booting >> LocalPartitionReader::Booting [GOOD] >> TPersQueueTest::Codecs_WriteMessageWithDefaultCodecs_MessagesAreAcknowledged [GOOD] >> TPersQueueTest::Codecs_WriteMessageWithNonDefaultCodecThatHasToBeConfiguredAdditionally_SessionClosedWithBadRequestError >> TPersQueueTest::ReadRuleServiceTypeMigrationWithDisallowDefault [GOOD] >> TPersQueueTest::ReadWithoutConsumerFederation >> KqpNewEngine::MultipleBroadcastJoin [GOOD] |87.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_object_storage_listing/unittest |87.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/backup/impl/ut_local_partition_reader/unittest |87.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/backup/impl/ut_local_partition_reader/unittest |87.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/backup/impl/ut_local_partition_reader/unittest |88.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_object_storage_listing/unittest |88.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/backup/impl/ut_local_partition_reader/unittest >> LocalPartitionReader::Booting [GOOD] >> ObjectStorageListingTest::FilterListing ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/opt/unittest >> KqpNewEngine::AutoChooseIndexOrderByLambda [GOOD] Test command err: Trying to start YDB, gRPC: 31049, MsgBus: 28202 2025-06-25T14:53:51.289850Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900320155701058:2145];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:53:51.289919Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0009d4/r3tmp/tmpZl2sSa/pdisk_1.dat 2025-06-25T14:53:51.647620Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 31049, node 1 2025-06-25T14:53:51.708100Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:53:51.708204Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:53:51.766220Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:53:51.809715Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:53:51.809740Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:53:51.809752Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:53:51.809907Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28202 TClient is connected to server localhost:28202 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-25T14:53:52.300035Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:53:52.440708Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:53:52.464936Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:52.611358Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:52.820438Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:52.885101Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:54.601530Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900333040604450:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:54.601620Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:54.941001Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:54.972190Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:55.060909Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:55.096108Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:55.143402Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:55.220217Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:55.284655Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:55.364633Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900337335572410:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:55.364697Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:55.364893Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900337335572415:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:55.368403Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:53:55.380701Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710669, at schemeshard: 72057594046644480 2025-06-25T14:53:55.380914Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519900337335572417:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:53:55.493420Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519900337335572468:3425] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:53:56.282501Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519900320155701058:2145];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:53:56.282565Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 25959, MsgBus: 21703 2025-06-25T14:53:57.625561Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519900344691330457:2129];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:53:57.625713Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/run ... nner/.ya/build/build_root/yft8/0009d4/r3tmp/tmpmQwKwh/pdisk_1.dat 2025-06-25T14:54:35.210494Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519900508591133255:2205];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:54:35.211002Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:54:35.340513Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:54:35.356452Z node 7 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [7:7519900508591133063:2080] 1750863275041880 != 1750863275041883 2025-06-25T14:54:35.360779Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:54:35.360876Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:54:35.363144Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 28905, node 7 2025-06-25T14:54:35.504916Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:54:35.504940Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:54:35.504950Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:54:35.505093Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:22081 2025-06-25T14:54:36.157641Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:22081 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:54:36.254010Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:54:36.261789Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:54:36.287002Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:54:36.371868Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:54:36.635822Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:36.734379Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:54:39.629752Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519900525771003885:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:39.629895Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:39.727975Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:39.770734Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:39.818057Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:39.868365Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:39.924189Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:39.982217Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:40.026798Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:40.092733Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[7:7519900508591133255:2205];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:54:40.093212Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:54:40.117527Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519900530065971838:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:40.117635Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:40.117887Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519900530065971843:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:40.122925Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:54:40.138434Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7519900530065971845:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:54:40.204157Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7519900530065971896:3420] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:54:41.792864Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/opt/unittest >> KqpSort::ComplexPkInclusiveSecondOptionalPredicate [GOOD] Test command err: Trying to start YDB, gRPC: 28507, MsgBus: 10943 2025-06-25T14:53:39.625083Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900269270529704:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:53:39.625124Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0009de/r3tmp/tmpobKAkG/pdisk_1.dat 2025-06-25T14:53:40.066379Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:53:40.084815Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:53:40.084907Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:53:40.087901Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 28507, node 1 2025-06-25T14:53:40.180819Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:53:40.180840Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:53:40.180854Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:53:40.180995Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10943 TClient is connected to server localhost:10943 2025-06-25T14:53:40.632470Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:53:40.792298Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:53:40.831975Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:40.990309Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:41.153561Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:41.239109Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:42.812950Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900282155433194:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:42.813069Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:43.115883Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:43.188662Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:43.211668Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:43.297852Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:43.372442Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:43.424341Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:43.500831Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:43.575446Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900286450401161:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:43.575519Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:43.575692Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900286450401166:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:43.579463Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:53:43.593918Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519900286450401168:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:53:43.673453Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519900286450401222:3424] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:53:44.625571Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519900269270529704:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:53:44.625640Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:53:44.667452Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:44.709304Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /ho ... 963950754:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:54:31.299608Z node 6 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [6:7519900491963950805:3426] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 30665, MsgBus: 14827 2025-06-25T14:54:35.419014Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519900509447299609:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:54:35.419083Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0009de/r3tmp/tmpf03QwC/pdisk_1.dat 2025-06-25T14:54:35.650645Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:54:35.650755Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:54:35.654452Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:54:35.658449Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 30665, node 7 2025-06-25T14:54:35.778873Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:54:35.778897Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:54:35.778907Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:54:35.779074Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14827 2025-06-25T14:54:36.425836Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:14827 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:54:36.618120Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:54:36.626104Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:54:36.639842Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:54:36.731816Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:54:37.029022Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:54:37.152817Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:54:40.162491Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519900530922137663:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:40.162613Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:40.222517Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:40.305682Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:40.350796Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:40.392449Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:40.420390Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[7:7519900509447299609:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:54:40.420471Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:54:40.447186Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:40.494465Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:40.547649Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:40.631701Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519900530922138323:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:40.631793Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:40.632091Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519900530922138328:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:40.637522Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:54:40.656204Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7519900530922138330:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:54:40.725280Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7519900530922138382:3422] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } >> KqpRanges::NoFullScanAtDNFPredicate [GOOD] >> KqpRanges::MergeRanges >> ttl_delete_s3.py::TestDeleteTtl::test_ttl_delete >> test_auditlog.py::test_dynconfig >> TPersQueueTest::MessageMetadata [GOOD] >> TPersQueueTest::LOGBROKER_7820 |88.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_object_storage_listing/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/opt/unittest >> KqpNewEngine::MultipleBroadcastJoin [GOOD] Test command err: Trying to start YDB, gRPC: 12629, MsgBus: 6046 2025-06-25T14:53:54.376974Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900330772061193:2229];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:53:54.377447Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0009cb/r3tmp/tmphhOpZQ/pdisk_1.dat 2025-06-25T14:53:54.744532Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:53:54.748837Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519900330772060990:2080] 1750863234293746 != 1750863234293749 2025-06-25T14:53:54.804224Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:53:54.804306Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 12629, node 1 2025-06-25T14:53:54.805705Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:53:54.843923Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:53:54.843949Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:53:54.843958Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:53:54.844107Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6046 TClient is connected to server localhost:6046 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-25T14:53:55.360449Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:53:55.529295Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:53:55.549021Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:53:55.556672Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:55.707415Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:55.877359Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:55.955963Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:57.431465Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900343656964520:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:57.431562Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:57.761196Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:57.806749Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:57.847096Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:57.892419Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:57.938815Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:58.016764Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:58.045104Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:58.136467Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900347951932478:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:58.136540Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:58.136766Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900347951932483:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:58.139448Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:53:58.152390Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519900347951932485:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:53:58.226106Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519900347951932536:3422] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:53:59.361150Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519900330772061193:2229];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:53:59.361209Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 14661, MsgBus: 19474 2025-06-25T14:54:00.692351Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519900357499065026:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:54:00.692402Z node 2 :METADATA_PROVIDER ERROR: log.cpp:7 ... 06-25T14:54:36.099429Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:54:36.099540Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:54:36.101192Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 13054, node 7 2025-06-25T14:54:36.177124Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:54:36.177163Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:54:36.177174Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:54:36.177350Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9129 TClient is connected to server localhost:9129 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-25T14:54:36.905132Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:54:36.912041Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:54:36.921547Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:54:36.928748Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:54:37.062102Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:54:37.280856Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:54:37.385282Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:54:40.469198Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519900527918313470:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:40.469313Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:40.535379Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:40.576528Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:40.623718Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:40.691686Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:40.738427Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:40.814018Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:40.854909Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:40.917749Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[7:7519900506443475540:2203];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:54:40.938932Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:54:40.971625Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519900527918314130:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:40.971763Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:40.971961Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519900527918314135:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:40.976657Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:54:40.993435Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7519900527918314137:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:54:41.058131Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7519900532213281486:3421] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:54:42.492799Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:42.551193Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:42.602254Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) [] >> LocalPartitionReader::Simple >> LocalPartitionReader::Simple [GOOD] |88.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |88.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/backup/impl/ut_local_partition_reader/unittest >> KqpKv::ReadRows_Nulls [GOOD] >> KqpNamedExpressions::NamedExpressionRandom+UseSink [GOOD] >> KqpNamedExpressions::NamedExpressionRandom-UseSink >> KqpMergeCn::SortBy_Int32 [GOOD] |88.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/backup/impl/ut_local_partition_reader/unittest >> LocalPartitionReader::Simple [GOOD] >> KqpSqlIn::SecondaryIndex_PgKey [GOOD] >> KqpSqlIn::SecondaryIndex_SimpleKey >> KqpNewEngine::PushPureFlatmapInnerConnectionsToStage [GOOD] >> LocalPartitionReader::FeedSlowly |88.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/backup/impl/ut_local_partition_reader/unittest >> SchemeReqAdminAccessInTenant::ClusterAdminCanAuthOnNonEmptyTenant [GOOD] >> SchemeReqAdminAccessInTenant::ClusterAdminCanAuthOnNonEmptyTenant-StrictAclCheck >> LocalPartitionReader::FeedSlowly [GOOD] >> TTopicYqlTest::AlterAutopartitioning [GOOD] |88.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_populator/unittest >> TTopicYqlTest::BadRequests >> KqpNewEngine::IdxLookupExtractMembers [GOOD] >> TPopulatorTest::Boot >> TPopulatorQuorumTest::OneRingGroup ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/opt/unittest >> KqpMergeCn::SortBy_Int32 [GOOD] Test command err: Trying to start YDB, gRPC: 25746, MsgBus: 26696 2025-06-25T14:53:48.152744Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900306767221557:2131];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:53:48.154019Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0009d7/r3tmp/tmpzSDxgy/pdisk_1.dat 2025-06-25T14:53:48.652082Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:53:48.665287Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:53:48.665369Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:53:48.669320Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 25746, node 1 2025-06-25T14:53:48.764749Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:53:48.764772Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:53:48.764779Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:53:48.764885Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26696 2025-06-25T14:53:49.107731Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:26696 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:53:49.434746Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:53:49.456832Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:53:51.199361Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900319652123991:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:51.199509Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:51.418224Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:51.536723Z node 1 :RPC_REQUEST ERROR: rpc_read_rows.cpp:777: TReadRowsRPC ReplyWithError: Unknown table '/Root/WrongTable' Trying to start YDB, gRPC: 61996, MsgBus: 22945 2025-06-25T14:53:52.352755Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519900324076374701:2161];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:53:52.352888Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0009d7/r3tmp/tmp3kAYYn/pdisk_1.dat 2025-06-25T14:53:52.497897Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:53:52.524905Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:53:52.524992Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:53:52.527142Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 61996, node 2 2025-06-25T14:53:52.648927Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:53:52.648957Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:53:52.648964Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:53:52.649067Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:22945 TClient is connected to server localhost:22945 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:53:53.166503Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:53:53.174104Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:53:53.181553Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:53.247677Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:53.378505Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:53:53.393010Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:53.491790Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:55.424335Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519900336961278092:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:55.424454Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:55.504795Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:55.536674Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:55.567955Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, ... WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[8:7519900526998574446:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:54:39.093525Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0009d7/r3tmp/tmpilsucL/pdisk_1.dat 2025-06-25T14:54:39.320700Z node 8 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [8:7519900526998574428:2080] 1750863279087894 != 1750863279087897 2025-06-25T14:54:39.329682Z node 8 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:54:39.337403Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:54:39.337506Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:54:39.341608Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 11574, node 8 2025-06-25T14:54:39.449183Z node 8 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:54:39.449231Z node 8 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:54:39.449246Z node 8 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:54:39.449417Z node 8 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:30243 2025-06-25T14:54:40.176394Z node 8 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:30243 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:54:40.253394Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:54:40.264539Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:54:40.333514Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:40.536820Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:54:40.626515Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:54:43.696382Z node 8 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [8:7519900544178445239:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:43.696501Z node 8 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:43.776513Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:43.810266Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:43.844454Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:43.880357Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:43.921229Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:43.970217Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:44.008483Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:44.094226Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[8:7519900526998574446:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:54:44.094429Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:54:44.127532Z node 8 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [8:7519900548473413191:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:44.127663Z node 8 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:44.127751Z node 8 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [8:7519900548473413196:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:44.132444Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:54:44.147321Z node 8 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [8:7519900548473413198:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:54:44.203850Z node 8 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [8:7519900548473413251:3421] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:54:45.781707Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:46.815584Z node 8 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863286844, txId: 281474976710674] shutting down >> TPopulatorTest::RemoveDir >> TPopulatorQuorumTest::OneWriteOnlyRingGroup >> TPopulatorTestWithResets::UpdateAck >> TPopulatorQuorumTest::TwoRingGroups >> TPopulatorTest::MakeDir |88.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/backup/impl/ut_local_partition_reader/unittest >> LocalPartitionReader::FeedSlowly [GOOD] >> TPopulatorQuorumTest::OneDisconnectedRingGroup ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/opt/unittest >> KqpNewEngine::PushPureFlatmapInnerConnectionsToStage [GOOD] Test command err: Trying to start YDB, gRPC: 14325, MsgBus: 26859 2025-06-25T14:53:59.874757Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900352915807883:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:53:59.894404Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0009c7/r3tmp/tmpUqZMAr/pdisk_1.dat 2025-06-25T14:54:00.329834Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:54:00.367065Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:54:00.367180Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 14325, node 1 2025-06-25T14:54:00.370081Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:54:00.500831Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:54:00.500851Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:54:00.500857Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:54:00.500977Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26859 2025-06-25T14:54:00.896475Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:26859 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:54:01.132582Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:54:01.150367Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:54:01.161555Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:54:01.318730Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:54:01.457404Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:54:01.538263Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:54:03.269647Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900370095678660:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:03.269720Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:03.515688Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:03.543400Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:03.571789Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:03.603624Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:03.634876Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:03.711077Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:03.772541Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:03.857392Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900370095679325:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:03.857445Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:03.857572Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900370095679330:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:03.861476Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:54:03.874043Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519900370095679332:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:54:03.977604Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519900370095679385:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:54:04.871029Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519900352915807883:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:54:04.872170Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 18182, MsgBus: 19445 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0009c7/r3tmp/tmpDkJZAj/pdisk_1.dat 2025-06-25T14:54:05.887823Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519900381333496265:2213];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:54:05.888080Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/i ... s.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[6:7519900488800676235:2215];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:54:36.795128Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 17005, MsgBus: 27821 2025-06-25T14:54:40.612585Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519900528737790768:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:54:40.612854Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0009c7/r3tmp/tmpsQ34WP/pdisk_1.dat 2025-06-25T14:54:40.756253Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:54:40.756531Z node 7 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [7:7519900528737790748:2080] 1750863280611743 != 1750863280611746 2025-06-25T14:54:40.771466Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:54:40.771571Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:54:40.777606Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 17005, node 7 2025-06-25T14:54:40.821795Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:54:40.821831Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:54:40.821847Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:54:40.822040Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27821 TClient is connected to server localhost:27821 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:54:41.527459Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:54:41.535831Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:54:41.542978Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:54:41.624438Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:54:41.630158Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:54:41.827869Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:54:41.910250Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:54:44.928764Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519900545917661571:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:44.928905Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:44.987606Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:45.022105Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:45.057324Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:45.128012Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:45.167530Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:45.214228Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:45.254568Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:45.333800Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519900550212629526:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:45.333921Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:45.334145Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519900550212629531:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:45.339342Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:54:45.357856Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7519900550212629533:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:54:45.454495Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7519900550212629584:3422] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:54:45.612427Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[7:7519900528737790768:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:54:45.612525Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> TPopulatorTest::MakeDir [GOOD] >> TPopulatorTest::RemoveDir [GOOD] >> TPopulatorTestWithResets::UpdateAck [GOOD] >> TPopulatorTest::Boot [GOOD] |88.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> KqpAgg::GroupByLimit [GOOD] >> KqpAgg::AggHashShuffle+UseSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_populator/unittest >> TPopulatorTest::Boot [GOOD] Test command err: 2025-06-25T14:54:49.588435Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:54:49.588494Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_populator/unittest >> TPopulatorTestWithResets::UpdateAck [GOOD] Test command err: 2025-06-25T14:54:49.591636Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:54:49.591701Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TestModificationResults wait txId: 100 2025-06-25T14:54:49.716288Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:668: [1:98:2123] Handle TEvSchemeShard::TEvDescribeSchemeResult { Status: StatusSuccess Path: "/Root" PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 2 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: true } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/Root" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944 }: sender# [1:73:2111], cookie# 100, event size# 419, preserialized size# 51 2025-06-25T14:54:49.716438Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:685: [1:98:2123] Update description: owner# 72057594046678944, pathId# [OwnerId: 72057594046678944, LocalPathId: 1], cookie# 100, is deletion# false, version: 3 2025-06-25T14:54:49.717628Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:99:2124] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:98:2123], cookie# 100 2025-06-25T14:54:49.717713Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:100:2125] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:98:2123], cookie# 100 2025-06-25T14:54:49.717765Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:101:2126] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:98:2123], cookie# 100 2025-06-25T14:54:49.718312Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:668: [1:98:2123] Handle TEvSchemeShard::TEvDescribeSchemeResult { Status: StatusSuccess Path: "/Root/DirC" PathDescription { Self { Name: "DirC" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: false CreateTxId: 100 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944 }: sender# [1:73:2111], cookie# 100, event size# 309, preserialized size# 2 2025-06-25T14:54:49.718360Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:685: [1:98:2123] Update description: owner# 72057594046678944, pathId# [OwnerId: 72057594046678944, LocalPathId: 2], cookie# 100, is deletion# false, version: 2 FAKE_COORDINATOR: Add transaction: 100 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 100 at step: 5000001 2025-06-25T14:54:49.720850Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:668: [1:98:2123] Handle TEvSchemeShard::TEvDescribeSchemeResult { Status: StatusSuccess Path: "/Root" PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: true } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/Root" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944 }: sender# [1:73:2111], cookie# 100, event size# 429, preserialized size# 56 2025-06-25T14:54:49.720895Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:685: [1:98:2123] Update description: owner# 72057594046678944, pathId# [OwnerId: 72057594046678944, LocalPathId: 1], cookie# 100, is deletion# false, version: 4 FAKE_COORDINATOR: Erasing txId 100 2025-06-25T14:54:49.721487Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:668: [1:98:2123] Handle TEvSchemeShard::TEvDescribeSchemeResult { Status: StatusSuccess Path: "/Root/DirC" PathDescription { Self { Name: "DirC" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 100 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 2 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944 }: sender# [1:73:2111], cookie# 100, event size# 314, preserialized size# 2 2025-06-25T14:54:49.721534Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:685: [1:98:2123] Update description: owner# 72057594046678944, pathId# [OwnerId: 72057594046678944, LocalPathId: 2], cookie# 100, is deletion# false, version: 3 TestModificationResult got TxId: 100, wait until txId: 100 TestWaitNotification wait txId: 100 2025-06-25T14:54:49.749518Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:239: [1:99:2124] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 72057594046678944 Generation: 2 }: sender# [1:12:2059] 2025-06-25T14:54:49.749590Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:251: [1:99:2124] Successful handshake: replica# [1:12:2059] 2025-06-25T14:54:49.749628Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:260: [1:99:2124] Resume sync: replica# [1:12:2059], fromPathId# [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-25T14:54:49.749694Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:239: [1:100:2125] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 72057594046678944 Generation: 2 }: sender# [1:15:2062] 2025-06-25T14:54:49.749718Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:251: [1:100:2125] Successful handshake: replica# [1:15:2062] 2025-06-25T14:54:49.749746Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:260: [1:100:2125] Resume sync: replica# [1:15:2062], fromPathId# [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-25T14:54:49.749796Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:239: [1:101:2126] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 72057594046678944 Generation: 2 }: sender# [1:18:2065] 2025-06-25T14:54:49.749817Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:251: [1:101:2126] Successful handshake: replica# [1:18:2065] 2025-06-25T14:54:49.749835Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:260: [1:101:2126] Resume sync: replica# [1:18:2065], fromPathId# [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-25T14:54:49.749921Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:536: [1:98:2123] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestDescribe { PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Replica: [1:24339059:0] }: sender# [1:99:2124] 2025-06-25T14:54:49.750030Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:99:2124] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvDescribeResult { Commit: false DeletedPathBegin: 0 DeletedPathEnd: 0 { Path: /Root/DirC PathId: [OwnerId: 72057594046678944, LocalPathId: 2] PathVersion: 3 } }: sender# [1:98:2123] 2025-06-25T14:54:49.750156Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:630: [1:98:2123] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestUpdate { PathId: [OwnerId: 72057594046678944, LocalPathId: 1] }: sender# [1:99:2124] 2025-06-25T14:54:49.750309Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:99:2124] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sen ... 4:49.750592Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:99:2124] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:98:2123], cookie# 0 2025-06-25T14:54:49.750652Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:100:2125] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvDescribeResult { Commit: false DeletedPathBegin: 0 DeletedPathEnd: 0 { Path: /Root/DirC PathId: [OwnerId: 72057594046678944, LocalPathId: 2] PathVersion: 3 } }: sender# [1:98:2123] 2025-06-25T14:54:49.750723Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:630: [1:98:2123] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestUpdate { PathId: [OwnerId: 72057594046678944, LocalPathId: 1] }: sender# [1:100:2125] 2025-06-25T14:54:49.750752Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:100:2125] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:98:2123], cookie# 0 2025-06-25T14:54:49.750834Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:630: [1:98:2123] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestUpdate { PathId: [OwnerId: 72057594046678944, LocalPathId: 2] }: sender# [1:100:2125] 2025-06-25T14:54:49.750870Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:287: [1:99:2124] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 3 }: sender# [1:12:2059], cookie# 0 2025-06-25T14:54:49.750929Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:287: [1:100:2125] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 4 }: sender# [1:15:2062], cookie# 0 2025-06-25T14:54:49.750977Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:536: [1:98:2123] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestDescribe { PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Replica: [1:2199047594611:0] }: sender# [1:101:2126] 2025-06-25T14:54:49.751041Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:100:2125] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:98:2123], cookie# 0 2025-06-25T14:54:49.751084Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:101:2126] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvDescribeResult { Commit: false DeletedPathBegin: 0 DeletedPathEnd: 0 { Path: /Root/DirC PathId: [OwnerId: 72057594046678944, LocalPathId: 2] PathVersion: 3 } }: sender# [1:98:2123] 2025-06-25T14:54:49.751155Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:630: [1:98:2123] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestUpdate { PathId: [OwnerId: 72057594046678944, LocalPathId: 1] }: sender# [1:101:2126] 2025-06-25T14:54:49.751197Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:101:2126] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:98:2123], cookie# 0 2025-06-25T14:54:49.751268Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:630: [1:98:2123] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestUpdate { PathId: [OwnerId: 72057594046678944, LocalPathId: 2] }: sender# [1:101:2126] 2025-06-25T14:54:49.751341Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:287: [1:100:2125] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 3 }: sender# [1:15:2062], cookie# 0 2025-06-25T14:54:49.751382Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:287: [1:101:2126] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 4 }: sender# [1:18:2065], cookie# 0 2025-06-25T14:54:49.751420Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:101:2126] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:98:2123], cookie# 0 2025-06-25T14:54:49.751474Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:536: [1:98:2123] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestDescribe { PathId: [OwnerId: 72057594046678944, LocalPathId: 3] Replica: [1:24339059:0] }: sender# [1:99:2124] 2025-06-25T14:54:49.751509Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:99:2124] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvDescribeResult { Commit: true DeletedPathBegin: 0 DeletedPathEnd: 0 }: sender# [1:98:2123] 2025-06-25T14:54:49.751570Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:98:2123] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 4 }: sender# [1:99:2124], cookie# 0 2025-06-25T14:54:49.751601Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:756: [1:98:2123] Ack for unknown update (already acked?): sender# [1:99:2124], cookie# 0 2025-06-25T14:54:49.751639Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:305: [1:99:2124] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 72057594046678944 Generation: 2 }: sender# [1:12:2059] 2025-06-25T14:54:49.751677Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:287: [1:101:2126] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 3 }: sender# [1:18:2065], cookie# 0 2025-06-25T14:54:49.751721Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:98:2123] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 4 }: sender# [1:99:2124], cookie# 100 2025-06-25T14:54:49.751778Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:536: [1:98:2123] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestDescribe { PathId: [OwnerId: 72057594046678944, LocalPathId: 3] Replica: [1:1099535966835:0] }: sender# [1:100:2125] 2025-06-25T14:54:49.751811Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:100:2125] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvDescribeResult { Commit: true DeletedPathBegin: 0 DeletedPathEnd: 0 }: sender# [1:98:2123] 2025-06-25T14:54:49.751846Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:98:2123] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 3 }: sender# [1:99:2124], cookie# 0 2025-06-25T14:54:49.751881Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:756: [1:98:2123] Ack for unknown update (already acked?): sender# [1:99:2124], cookie# 0 2025-06-25T14:54:49.751919Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:98:2123] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 3 }: sender# [1:99:2124], cookie# 100 2025-06-25T14:54:49.751945Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:305: [1:100:2125] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 72057594046678944 Generation: 2 }: sender# [1:15:2062] 2025-06-25T14:54:49.751972Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:98:2123] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 4 }: sender# [1:100:2125], cookie# 0 2025-06-25T14:54:49.751989Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:756: [1:98:2123] Ack for unknown update (already acked?): sender# [1:100:2125], cookie# 0 2025-06-25T14:54:49.752016Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:98:2123] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 4 }: sender# [1:100:2125], cookie# 100 2025-06-25T14:54:49.752040Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:774: [1:98:2123] Ack update: ack to# [1:73:2111], cookie# 100, pathId# [OwnerId: 72057594046678944, LocalPathId: 1], version# 3 2025-06-25T14:54:49.752102Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:774: [1:98:2123] Ack update: ack to# [1:73:2111], cookie# 100, pathId# [OwnerId: 72057594046678944, LocalPathId: 1], version# 4 2025-06-25T14:54:49.752545Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:536: [1:98:2123] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestDescribe { PathId: [OwnerId: 72057594046678944, LocalPathId: 3] Replica: [1:2199047594611:0] }: sender# [1:101:2126] 2025-06-25T14:54:49.752594Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:98:2123] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 3 }: sender# [1:100:2125], cookie# 0 2025-06-25T14:54:49.752614Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:756: [1:98:2123] Ack for unknown update (already acked?): sender# [1:100:2125], cookie# 0 2025-06-25T14:54:49.752646Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:101:2126] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvDescribeResult { Commit: true DeletedPathBegin: 0 DeletedPathEnd: 0 }: sender# [1:98:2123] 2025-06-25T14:54:49.752951Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:98:2123] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 3 }: sender# [1:100:2125], cookie# 100 2025-06-25T14:54:49.752992Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:774: [1:98:2123] Ack update: ack to# [1:73:2111], cookie# 100, pathId# [OwnerId: 72057594046678944, LocalPathId: 2], version# 2 2025-06-25T14:54:49.753095Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:774: [1:98:2123] Ack update: ack to# [1:73:2111], cookie# 100, pathId# [OwnerId: 72057594046678944, LocalPathId: 2], version# 3 2025-06-25T14:54:49.753139Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:305: [1:101:2126] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 72057594046678944 Generation: 2 }: sender# [1:18:2065] 2025-06-25T14:54:49.753188Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:98:2123] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 4 }: sender# [1:101:2126], cookie# 0 2025-06-25T14:54:49.753209Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:756: [1:98:2123] Ack for unknown update (already acked?): sender# [1:101:2126], cookie# 0 2025-06-25T14:54:49.753594Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:98:2123] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 4 }: sender# [1:101:2126], cookie# 100 2025-06-25T14:54:49.753620Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:756: [1:98:2123] Ack for unknown update (already acked?): sender# [1:101:2126], cookie# 100 2025-06-25T14:54:49.753711Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:98:2123] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 3 }: sender# [1:101:2126], cookie# 0 2025-06-25T14:54:49.753741Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:756: [1:98:2123] Ack for unknown update (already acked?): sender# [1:101:2126], cookie# 0 2025-06-25T14:54:49.753799Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:98:2123] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 3 }: sender# [1:101:2126], cookie# 100 2025-06-25T14:54:49.753829Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:756: [1:98:2123] Ack for unknown update (already acked?): sender# [1:101:2126], cookie# 100 TestWaitNotification: OK eventTxId 100 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_populator/unittest >> TPopulatorTest::RemoveDir [GOOD] Test command err: 2025-06-25T14:54:49.577256Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:54:49.577307Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TestModificationResults wait txId: 100 2025-06-25T14:54:49.697406Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:668: [1:98:2123] Handle TEvSchemeShard::TEvDescribeSchemeResult { Status: StatusSuccess Path: "/Root" PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 2 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: true } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/Root" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944 }: sender# [1:73:2111], cookie# 100, event size# 419, preserialized size# 51 2025-06-25T14:54:49.697509Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:685: [1:98:2123] Update description: owner# 72057594046678944, pathId# [OwnerId: 72057594046678944, LocalPathId: 1], cookie# 100, is deletion# false, version: 3 2025-06-25T14:54:49.698805Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:99:2124] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:98:2123], cookie# 100 2025-06-25T14:54:49.698900Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:100:2125] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:98:2123], cookie# 100 2025-06-25T14:54:49.698932Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:101:2126] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:98:2123], cookie# 100 2025-06-25T14:54:49.700512Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:668: [1:98:2123] Handle TEvSchemeShard::TEvDescribeSchemeResult { Status: StatusSuccess Path: "/Root/DirB" PathDescription { Self { Name: "DirB" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: false CreateTxId: 100 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944 }: sender# [1:73:2111], cookie# 100, event size# 309, preserialized size# 2 2025-06-25T14:54:49.700572Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:685: [1:98:2123] Update description: owner# 72057594046678944, pathId# [OwnerId: 72057594046678944, LocalPathId: 2], cookie# 100, is deletion# false, version: 2 2025-06-25T14:54:49.700719Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:287: [1:99:2124] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 3 }: sender# [1:12:2059], cookie# 100 2025-06-25T14:54:49.700786Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:287: [1:100:2125] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 3 }: sender# [1:15:2062], cookie# 100 2025-06-25T14:54:49.700848Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:287: [1:101:2126] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 3 }: sender# [1:18:2065], cookie# 100 2025-06-25T14:54:49.701002Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:98:2123] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 3 }: sender# [1:99:2124], cookie# 100 2025-06-25T14:54:49.701066Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:99:2124] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:98:2123], cookie# 100 2025-06-25T14:54:49.701103Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:100:2125] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:98:2123], cookie# 100 2025-06-25T14:54:49.701131Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:101:2126] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:98:2123], cookie# 100 2025-06-25T14:54:49.701172Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:98:2123] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 3 }: sender# [1:100:2125], cookie# 100 2025-06-25T14:54:49.701199Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:774: [1:98:2123] Ack update: ack to# [1:73:2111], cookie# 100, pathId# [OwnerId: 72057594046678944, LocalPathId: 1], version# 3 2025-06-25T14:54:49.701762Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:98:2123] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 3 }: sender# [1:101:2126], cookie# 100 2025-06-25T14:54:49.701802Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:287: [1:99:2124] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 2 }: sender# [1:12:2059], cookie# 100 2025-06-25T14:54:49.701837Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:287: [1:100:2125] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 2 }: sender# [1:15:2062], cookie# 100 2025-06-25T14:54:49.701930Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:287: [1:101:2126] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 2 }: sender# [1:18:2065], cookie# 100 2025-06-25T14:54:49.702022Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:98:2123] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 2 }: sender# [1:99:2124], cookie# 100 2025-06-25T14:54:49.702331Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:98:2123] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 2 }: sender# [1:100:2125], cookie# 100 2025-06-25T14:54:49.702370Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:774: [1:98:2123] Ack update: ack to# [1:73:2111], cookie# 100, pathId# [OwnerId: 72057594046678944, LocalPathId: 2], version# 2 2025-06-25T14:54:49.702636Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:98:2123] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 2 }: sender# [1:101:2126], cookie# 100 2025-06-25T14:54:49.702672Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:756: [1:98:2123] Ack for unknown update (already acked?): sender# [1:101:2126], cookie# 100 FAKE_COORDINATOR: Add transaction: 100 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 100 at step: 5000001 FAKE_COORDINATOR: Erasing txId 100 2025-06-25T14:54:49.706316Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:668: [1:98:2123] Handle TEvSchemeShard::TEvDescribeSchemeResult { Status: StatusSuccess Path: "/Root" PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: true } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/Root" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944 }: sender# [1:73:2111], cookie# 100, event size# 429, preserialized size# 56 2025-06-25T14:54:49.706372Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:685: [1:98:2123] Update description: owner# 72057594046678944, pathId# [OwnerId: 72057594046678944, LocalPathId: 1], cookie# 100, is deletion# false, version: 4 2025-06-25T14:54:49.706509Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:99:2124] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 ... ner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 5 }: sender# [1:15:2062], cookie# 101 2025-06-25T14:54:49.722929Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:101:2126] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:98:2123], cookie# 101 2025-06-25T14:54:49.723076Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:98:2123] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 5 }: sender# [1:99:2124], cookie# 101 2025-06-25T14:54:49.723114Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:774: [1:98:2123] Ack update: ack to# [1:73:2111], cookie# 101, pathId# [OwnerId: 72057594046678944, LocalPathId: 1], version# 5 2025-06-25T14:54:49.723188Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:287: [1:99:2124] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 3 }: sender# [1:12:2059], cookie# 101 2025-06-25T14:54:49.723229Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:287: [1:100:2125] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 3 }: sender# [1:15:2062], cookie# 101 2025-06-25T14:54:49.723258Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:287: [1:101:2126] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 3 }: sender# [1:18:2065], cookie# 101 2025-06-25T14:54:49.723465Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:98:2123] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 5 }: sender# [1:100:2125], cookie# 101 2025-06-25T14:54:49.723609Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:98:2123] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 3 }: sender# [1:99:2124], cookie# 101 2025-06-25T14:54:49.723868Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:98:2123] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 3 }: sender# [1:100:2125], cookie# 101 2025-06-25T14:54:49.723898Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:774: [1:98:2123] Ack update: ack to# [1:73:2111], cookie# 101, pathId# [OwnerId: 72057594046678944, LocalPathId: 2], version# 3 2025-06-25T14:54:49.724136Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:98:2123] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 3 }: sender# [1:101:2126], cookie# 101 2025-06-25T14:54:49.724173Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:756: [1:98:2123] Ack for unknown update (already acked?): sender# [1:101:2126], cookie# 101 FAKE_COORDINATOR: Add transaction: 101 at step: 5000002 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000001 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 101 at step: 5000002 FAKE_COORDINATOR: Erasing txId 101 2025-06-25T14:54:49.725737Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:668: [1:98:2123] Handle TEvSchemeShard::TEvDescribeSchemeResult { Status: StatusSuccess Path: "/Root" PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/Root" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944 }: sender# [1:73:2111], cookie# 101, event size# 321, preserialized size# 2 2025-06-25T14:54:49.725787Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:685: [1:98:2123] Update description: owner# 72057594046678944, pathId# [OwnerId: 72057594046678944, LocalPathId: 1], cookie# 101, is deletion# false, version: 6 2025-06-25T14:54:49.725902Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:99:2124] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:98:2123], cookie# 101 2025-06-25T14:54:49.725949Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:100:2125] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:98:2123], cookie# 101 2025-06-25T14:54:49.725978Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:101:2126] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:98:2123], cookie# 101 2025-06-25T14:54:49.726160Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:668: [1:98:2123] Handle TEvSchemeShard::TEvDescribeSchemeResult { Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/Root/DirB\', error: path has been deleted (id: [OwnerId: 72057594046678944, LocalPathId: 2], type: EPathTypeDir, state: EPathStateNotExist), drop stepId: 5000002, drop txId: 101" Path: "/Root/DirB" PathId: 2 LastExistedPrefixPath: "/Root" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 72057594046678944 }: sender# [1:73:2111], cookie# 101, event size# 306, preserialized size# 0 2025-06-25T14:54:49.726185Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:685: [1:98:2123] Update description: owner# 72057594046678944, pathId# [OwnerId: 72057594046678944, LocalPathId: 2], cookie# 101, is deletion# true, version: 0 2025-06-25T14:54:49.726240Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:99:2124] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:98:2123], cookie# 101 2025-06-25T14:54:49.726275Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:287: [1:100:2125] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 6 }: sender# [1:15:2062], cookie# 101 2025-06-25T14:54:49.726313Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:287: [1:101:2126] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 6 }: sender# [1:18:2065], cookie# 101 2025-06-25T14:54:49.726361Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:101:2126] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:98:2123], cookie# 101 2025-06-25T14:54:49.726503Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:98:2123] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 6 }: sender# [1:100:2125], cookie# 101 2025-06-25T14:54:49.726546Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:287: [1:99:2124] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 6 }: sender# [1:12:2059], cookie# 101 2025-06-25T14:54:49.726648Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:100:2125] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:98:2123], cookie# 101 2025-06-25T14:54:49.726712Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:98:2123] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 6 }: sender# [1:101:2126], cookie# 101 2025-06-25T14:54:49.726735Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:774: [1:98:2123] Ack update: ack to# [1:73:2111], cookie# 101, pathId# [OwnerId: 72057594046678944, LocalPathId: 1], version# 6 2025-06-25T14:54:49.726765Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:287: [1:99:2124] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 18446744073709551615 }: sender# [1:12:2059], cookie# 101 2025-06-25T14:54:49.726802Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:287: [1:101:2126] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 18446744073709551615 }: sender# [1:18:2065], cookie# 101 2025-06-25T14:54:49.727060Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:98:2123] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 6 }: sender# [1:99:2124], cookie# 101 2025-06-25T14:54:49.727093Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:287: [1:100:2125] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 18446744073709551615 }: sender# [1:15:2062], cookie# 101 2025-06-25T14:54:49.727183Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:98:2123] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 18446744073709551615 }: sender# [1:99:2124], cookie# 101 2025-06-25T14:54:49.727284Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:98:2123] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 18446744073709551615 }: sender# [1:101:2126], cookie# 101 2025-06-25T14:54:49.727316Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:774: [1:98:2123] Ack update: ack to# [1:73:2111], cookie# 101, pathId# [OwnerId: 72057594046678944, LocalPathId: 2], version# 18446744073709551615 2025-06-25T14:54:49.727555Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:98:2123] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 18446744073709551615 }: sender# [1:100:2125], cookie# 101 2025-06-25T14:54:49.727582Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:756: [1:98:2123] Ack for unknown update (already acked?): sender# [1:100:2125], cookie# 101 TestModificationResult got TxId: 101, wait until txId: 101 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_populator/unittest >> TPopulatorTest::MakeDir [GOOD] Test command err: 2025-06-25T14:54:49.595702Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:54:49.595758Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TestModificationResults wait txId: 100 2025-06-25T14:54:49.698664Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:668: [1:98:2123] Handle TEvSchemeShard::TEvDescribeSchemeResult { Status: StatusSuccess Path: "/Root" PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 2 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: true } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/Root" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944 }: sender# [1:73:2111], cookie# 100, event size# 419, preserialized size# 51 2025-06-25T14:54:49.698774Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:685: [1:98:2123] Update description: owner# 72057594046678944, pathId# [OwnerId: 72057594046678944, LocalPathId: 1], cookie# 100, is deletion# false, version: 3 2025-06-25T14:54:49.700160Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:99:2124] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:98:2123], cookie# 100 2025-06-25T14:54:49.700295Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:100:2125] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:98:2123], cookie# 100 2025-06-25T14:54:49.700350Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:101:2126] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:98:2123], cookie# 100 2025-06-25T14:54:49.701033Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:668: [1:98:2123] Handle TEvSchemeShard::TEvDescribeSchemeResult { Status: StatusSuccess Path: "/Root/DirA" PathDescription { Self { Name: "DirA" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: false CreateTxId: 100 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944 }: sender# [1:73:2111], cookie# 100, event size# 309, preserialized size# 2 2025-06-25T14:54:49.701117Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:685: [1:98:2123] Update description: owner# 72057594046678944, pathId# [OwnerId: 72057594046678944, LocalPathId: 2], cookie# 100, is deletion# false, version: 2 2025-06-25T14:54:49.701279Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:287: [1:99:2124] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 3 }: sender# [1:12:2059], cookie# 100 2025-06-25T14:54:49.701344Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:287: [1:100:2125] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 3 }: sender# [1:15:2062], cookie# 100 2025-06-25T14:54:49.701404Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:287: [1:101:2126] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 3 }: sender# [1:18:2065], cookie# 100 2025-06-25T14:54:49.701577Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:98:2123] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 3 }: sender# [1:99:2124], cookie# 100 2025-06-25T14:54:49.701630Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:99:2124] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:98:2123], cookie# 100 2025-06-25T14:54:49.701666Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:100:2125] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:98:2123], cookie# 100 2025-06-25T14:54:49.701694Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:101:2126] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:98:2123], cookie# 100 2025-06-25T14:54:49.701731Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:98:2123] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 3 }: sender# [1:100:2125], cookie# 100 2025-06-25T14:54:49.701761Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:774: [1:98:2123] Ack update: ack to# [1:73:2111], cookie# 100, pathId# [OwnerId: 72057594046678944, LocalPathId: 1], version# 3 2025-06-25T14:54:49.702449Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:98:2123] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 3 }: sender# [1:101:2126], cookie# 100 2025-06-25T14:54:49.702495Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:287: [1:99:2124] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 2 }: sender# [1:12:2059], cookie# 100 2025-06-25T14:54:49.702536Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:287: [1:100:2125] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 2 }: sender# [1:15:2062], cookie# 100 2025-06-25T14:54:49.702582Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:287: [1:101:2126] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 2 }: sender# [1:18:2065], cookie# 100 2025-06-25T14:54:49.702685Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:98:2123] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 2 }: sender# [1:99:2124], cookie# 100 2025-06-25T14:54:49.702976Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:98:2123] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 2 }: sender# [1:100:2125], cookie# 100 2025-06-25T14:54:49.703025Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:774: [1:98:2123] Ack update: ack to# [1:73:2111], cookie# 100, pathId# [OwnerId: 72057594046678944, LocalPathId: 2], version# 2 2025-06-25T14:54:49.703311Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:98:2123] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 2 }: sender# [1:101:2126], cookie# 100 2025-06-25T14:54:49.703346Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:756: [1:98:2123] Ack for unknown update (already acked?): sender# [1:101:2126], cookie# 100 FAKE_COORDINATOR: Add transaction: 100 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 100 at step: 5000001 FAKE_COORDINATOR: Erasing txId 100 2025-06-25T14:54:49.706442Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:668: [1:98:2123] Handle TEvSchemeShard::TEvDescribeSchemeResult { Status: StatusSuccess Path: "/Root" PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: true } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/Root" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944 }: sender# [1:73:2111], cookie# 100, event size# 429, preserialized size# 56 2025-06-25T14:54:49.706510Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:685: [1:98:2123] Update description: owner# 72057594046678944, pathId# [OwnerId: 72057594046678944, LocalPathId: 1], cookie# 100, is deletion# false, version: 4 2025-06-25T14:54:49.706644Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:99:2124] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:98:2123], cookie# 100 2025-06-25T14:54:49.706692Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:100:2125] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:98:2123], cookie# 100 2025-06-25T14:54:49.706724Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:101:2126] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:98:2123], cookie# 100 2025-06-25T14:54:49.710919Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:668: [1:98:2123] Handle TEvSchemeShard::TEvDescribeSchemeResult { Status: StatusSuccess Path: "/Root/DirA" PathDescription { Self { Name: "DirA" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 100 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 2 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944 }: sender# [1:73:2111], cookie# 100, event size# 314, preserialized size# 2 2025-06-25T14:54:49.710989Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:685: [1:98:2123] Update description: owner# 72057594046678944, pathId# [OwnerId: 72057594046678944, LocalPathId: 2], cookie# 100, is deletion# false, version: 3 2025-06-25T14:54:49.711101Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:287: [1:99:2124] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 4 }: sender# [1:12:2059], cookie# 100 2025-06-25T14:54:49.711171Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:287: [1:100:2125] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 4 }: sender# [1:15:2062], cookie# 100 2025-06-25T14:54:49.711239Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:287: [1:101:2126] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 4 }: sender# [1:18:2065], cookie# 100 2025-06-25T14:54:49.711289Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:101:2126] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:98:2123], cookie# 100 2025-06-25T14:54:49.711452Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:98:2123] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 4 }: sender# [1:99:2124], cookie# 100 2025-06-25T14:54:49.711485Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:99:2124] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:98:2123], cookie# 100 2025-06-25T14:54:49.711523Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:100:2125] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:98:2123], cookie# 100 2025-06-25T14:54:49.711599Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:98:2123] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 4 }: sender# [1:100:2125], cookie# 100 2025-06-25T14:54:49.711627Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:774: [1:98:2123] Ack update: ack to# [1:73:2111], cookie# 100, pathId# [OwnerId: 72057594046678944, LocalPathId: 1], version# 4 2025-06-25T14:54:49.711702Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:287: [1:99:2124] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 3 }: sender# [1:12:2059], cookie# 100 2025-06-25T14:54:49.711743Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:287: [1:100:2125] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 3 }: sender# [1:15:2062], cookie# 100 2025-06-25T14:54:49.711789Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:287: [1:101:2126] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 3 }: sender# [1:18:2065], cookie# 100 2025-06-25T14:54:49.712104Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:98:2123] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 4 }: sender# [1:101:2126], cookie# 100 2025-06-25T14:54:49.712204Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:98:2123] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 3 }: sender# [1:99:2124], cookie# 100 2025-06-25T14:54:49.718890Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:98:2123] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 3 }: sender# [1:100:2125], cookie# 100 2025-06-25T14:54:49.718955Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:774: [1:98:2123] Ack update: ack to# [1:73:2111], cookie# 100, pathId# [OwnerId: 72057594046678944, LocalPathId: 2], version# 3 2025-06-25T14:54:49.719254Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:98:2123] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 3 }: sender# [1:101:2126], cookie# 100 2025-06-25T14:54:49.719299Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:756: [1:98:2123] Ack for unknown update (already acked?): sender# [1:101:2126], cookie# 100 TestModificationResult got TxId: 100, wait until txId: 100 >> ObjectStorageListingTest::ListingNoFilter [GOOD] >> ObjectStorageListingTest::FilterListing [GOOD] >> KqpNewEngine::DqSource [GOOD] >> KqpNewEngine::DqSourceLiteralRange ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/opt/unittest >> KqpNewEngine::IdxLookupExtractMembers [GOOD] Test command err: Trying to start YDB, gRPC: 20622, MsgBus: 8858 2025-06-25T14:53:54.044910Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900333433220723:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:53:54.044989Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0009d0/r3tmp/tmpjPvUeO/pdisk_1.dat 2025-06-25T14:53:54.534524Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:53:54.534637Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:53:54.537426Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:53:54.537795Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:53:54.544361Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519900333433220702:2080] 1750863234043906 != 1750863234043909 TServer::EnableGrpc on GrpcPort 20622, node 1 2025-06-25T14:53:54.668874Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:53:54.668894Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:53:54.668901Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:53:54.669005Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8858 TClient is connected to server localhost:8858 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-25T14:53:55.077617Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:53:55.239311Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:53:55.261407Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:53:55.278782Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:55.431073Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:55.581433Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:55.656250Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:57.090466Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900346318124251:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:57.090540Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:57.381409Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:57.422037Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:57.453717Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:57.488555Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:57.559203Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:57.612647Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:57.662346Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:57.738894Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900346318124904:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:57.738958Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:57.739153Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900346318124909:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:57.743055Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:53:57.755347Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519900346318124911:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:53:57.850777Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519900346318124962:3419] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:53:59.046520Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519900333433220723:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:53:59.046744Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 28815, MsgBus: 4836 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0009d0/r3tmp/tmp8adrPU/pdisk_1.dat 2025-06-25T14:53:59.911233Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519900352103049027:2250];send_to=[0: ... exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 1037, MsgBus: 2408 2025-06-25T14:54:41.786292Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519900531905240953:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:54:41.786391Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0009d0/r3tmp/tmpjrGgLb/pdisk_1.dat 2025-06-25T14:54:41.937990Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:54:41.940361Z node 7 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [7:7519900531905240934:2080] 1750863281785793 != 1750863281785796 2025-06-25T14:54:41.957705Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:54:41.957822Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:54:41.964770Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 1037, node 7 2025-06-25T14:54:42.004498Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:54:42.004522Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:54:42.004532Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:54:42.004671Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2408 TClient is connected to server localhost:2408 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:54:42.600443Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:54:42.608144Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:54:42.623857Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:54:42.702889Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:54:42.836873Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:54:42.909943Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:54:42.990899Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:54:45.507099Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519900549085111761:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:45.507195Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:45.588418Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:45.634578Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:45.670672Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:45.702693Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:45.741430Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:45.783748Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:45.853698Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:45.953857Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519900549085112418:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:45.953951Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519900549085112423:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:45.954017Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:45.957736Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:54:45.968063Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-25T14:54:45.968248Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7519900549085112425:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:54:46.028035Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7519900553380079772:3419] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:54:46.786604Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[7:7519900531905240953:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:54:46.786681Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; |88.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> KqpNewEngine::StaleRO-EnableFollowers [GOOD] >> KqpNewEngine::StaleRO_Immediate >> TPQCompatTest::LongProducerAndLongMessageGroupId [GOOD] >> TPQCompatTest::ReadWriteSessions |88.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_populator/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_object_storage_listing/unittest >> ObjectStorageListingTest::ListingNoFilter [GOOD] Test command err: 2025-06-25T14:54:47.395990Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:54:47.396132Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:54:47.396195Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001134/r3tmp/tmpccj7VV/pdisk_1.dat 2025-06-25T14:54:47.833013Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T14:54:47.846628Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:54:47.897704Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:54:47.905656Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750863284666467 != 1750863284666471 2025-06-25T14:54:47.955072Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:54:47.955249Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:54:47.969734Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:54:48.073000Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:48.157758Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:627:2531] 2025-06-25T14:54:48.158036Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T14:54:48.200853Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T14:54:48.200997Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T14:54:48.205117Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-25T14:54:48.205234Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-25T14:54:48.205281Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-25T14:54:48.206876Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T14:54:48.207021Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T14:54:48.207135Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:643:2531] in generation 1 2025-06-25T14:54:48.217969Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T14:54:48.247829Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-25T14:54:48.249868Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T14:54:48.250084Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:645:2541] 2025-06-25T14:54:48.250187Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T14:54:48.250229Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-25T14:54:48.250270Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:54:48.251484Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-25T14:54:48.251631Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-25T14:54:48.251713Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:54:48.251760Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:54:48.251902Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-25T14:54:48.251948Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:54:48.252336Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:623:2528], serverId# [1:634:2535], sessionId# [0:0:0] 2025-06-25T14:54:48.252531Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T14:54:48.252820Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-25T14:54:48.253299Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-25T14:54:48.255195Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T14:54:48.266232Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-25T14:54:48.266365Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-25T14:54:48.428744Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:660:2550], serverId# [1:662:2552], sessionId# [0:0:0] 2025-06-25T14:54:48.433323Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1000 at tablet 72075186224037888 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1000 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-06-25T14:54:48.433398Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:54:48.434708Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:54:48.434768Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-06-25T14:54:48.434845Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-06-25T14:54:48.435079Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-06-25T14:54:48.435255Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-25T14:54:48.435795Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:54:48.435904Z node 1 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-06-25T14:54:48.438687Z node 1 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-25T14:54:48.440504Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:54:48.442200Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-06-25T14:54:48.442248Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:54:48.442767Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-06-25T14:54:48.442832Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:54:48.443539Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:54:48.443583Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T14:54:48.443654Z node 1 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-25T14:54:48.443724Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [1:373:2367], exec latency: 0 ms, propose latency: 0 ms 2025-06-25T14:54:48.443781Z node 1 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-25T14:54:48.443897Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:54:48.452621Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T14:54:48.454670Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-25T14:54:48.454746Z node 1 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-25T14:54:48.455696Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-25T14:54:48.506662Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:694:2576], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:48.506820Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:704:2581], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:48.506900Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:48.518832Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:54:48.527472Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T14:54:48.577094Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:54:48.703706Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T14:54:48.706910Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:708:2584], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:54:48.781247Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:778:2623] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:54:49.781468Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715660. Ctx: { TraceId: 01jyksb65q1wnp3w5z5xqdcm7y, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NzNkNDNiODMtZDdlZTg1MDUtNGQyYjg5OGEtYjM1ZjhlOWU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:54:49.846294Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:809:2640], serverId# [1:810:2641], sessionId# [0:0:0] 2025-06-25T14:54:49.849188Z node 1 :TX_DATASHARD DEBUG: execute_write_unit.cpp:251: Executing write operation for [0:2] at 72075186224037888 2025-06-25T14:54:49.849455Z node 1 :TX_DATASHARD DEBUG: execute_write_unit.cpp:416: Executed write operation for [0:2] at 72075186224037888, row count=5 2025-06-25T14:54:49.863530Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:54:49.924418Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:817:2647], serverId# [1:818:2648], sessionId# [0:0:0] 2025-06-25T14:54:49.924667Z node 1 :TX_DATASHARD DEBUG: datashard__object_storage_listing.cpp:152: 72075186224037888 S3 Listing: start at key ((type:2, value:"d\0\0\0") (type:4608, value:"/test/")), end at key ((type:2, value:"d\0\0\0") (type:4608, value:"/test0")) restarted: 0 last path: "" contents: 0 common prefixes: 0 2025-06-25T14:54:49.924893Z node 1 :TX_DATASHARD DEBUG: datashard__object_storage_listing.cpp:374: 72075186224037888 S3 Listing: finished status: 0 description: "" contents: 3 common prefixes: 2 2025-06-25T14:54:49.925103Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037888, clientId# [1:817:2647], serverId# [1:818:2648], sessionId# [0:0:0] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_object_storage_listing/unittest >> ObjectStorageListingTest::FilterListing [GOOD] Test command err: 2025-06-25T14:54:48.566111Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:54:48.566230Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:54:48.566276Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00105f/r3tmp/tmpMJ5Yw0/pdisk_1.dat 2025-06-25T14:54:48.896053Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T14:54:48.899383Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:54:48.943349Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:54:48.948539Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750863285979585 != 1750863285979589 2025-06-25T14:54:48.999507Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:54:48.999659Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:54:49.011404Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:54:49.097855Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:49.146705Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:627:2531] 2025-06-25T14:54:49.146985Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T14:54:49.196672Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T14:54:49.196809Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T14:54:49.198530Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-25T14:54:49.198623Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-25T14:54:49.198676Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-25T14:54:49.199045Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T14:54:49.199191Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T14:54:49.199290Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:643:2531] in generation 1 2025-06-25T14:54:49.210072Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T14:54:49.238737Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-25T14:54:49.238985Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T14:54:49.239118Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:645:2541] 2025-06-25T14:54:49.239218Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T14:54:49.239256Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-25T14:54:49.239290Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:54:49.239803Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-25T14:54:49.239925Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-25T14:54:49.240003Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:54:49.240048Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:54:49.240107Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-25T14:54:49.240148Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:54:49.240545Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:623:2528], serverId# [1:634:2535], sessionId# [0:0:0] 2025-06-25T14:54:49.240719Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T14:54:49.240989Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-25T14:54:49.241095Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-25T14:54:49.242845Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T14:54:49.253805Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-25T14:54:49.253930Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-25T14:54:49.419743Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:660:2550], serverId# [1:662:2552], sessionId# [0:0:0] 2025-06-25T14:54:49.423244Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1000 at tablet 72075186224037888 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1000 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-06-25T14:54:49.423321Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:54:49.424092Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:54:49.424134Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-06-25T14:54:49.424198Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-06-25T14:54:49.425016Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-06-25T14:54:49.425293Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-25T14:54:49.425972Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T14:54:49.426051Z node 1 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-06-25T14:54:49.428105Z node 1 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-25T14:54:49.428572Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:54:49.430347Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-06-25T14:54:49.430398Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:54:49.430887Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-06-25T14:54:49.430956Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:54:49.431643Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T14:54:49.431685Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T14:54:49.431750Z node 1 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-25T14:54:49.431814Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [1:373:2367], exec latency: 0 ms, propose latency: 0 ms 2025-06-25T14:54:49.431870Z node 1 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-25T14:54:49.431941Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:54:49.443532Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T14:54:49.445678Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-25T14:54:49.445750Z node 1 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-25T14:54:49.446605Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-25T14:54:49.475709Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:694:2576], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:49.475842Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:704:2581], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:49.475921Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:49.479618Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:54:49.486230Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T14:54:49.534677Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:54:49.659823Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T14:54:49.662875Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:708:2584], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:54:49.736698Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:778:2623] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:54:50.114094Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715660. Ctx: { TraceId: 01jyksb742crp9d198erjt4w7m, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YmU2OWRiNzEtY2I2YjMzN2UtYTkzNDQ2MTAtMWFkNDY4NGM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:54:50.131098Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:809:2640], serverId# [1:810:2641], sessionId# [0:0:0] 2025-06-25T14:54:50.131638Z node 1 :TX_DATASHARD DEBUG: execute_write_unit.cpp:251: Executing write operation for [0:2] at 72075186224037888 2025-06-25T14:54:50.131851Z node 1 :TX_DATASHARD DEBUG: execute_write_unit.cpp:416: Executed write operation for [0:2] at 72075186224037888, row count=5 2025-06-25T14:54:50.144876Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:54:50.169304Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:817:2647], serverId# [1:818:2648], sessionId# [0:0:0] 2025-06-25T14:54:50.169579Z node 1 :TX_DATASHARD DEBUG: datashard__object_storage_listing.cpp:152: 72075186224037888 S3 Listing: start at key ((type:2, value:"d\0\0\0") (type:4608, value:"/test/")), end at key ((type:2, value:"d\0\0\0") (type:4608, value:"/test0")) restarted: 0 last path: "" contents: 0 common prefixes: 0 2025-06-25T14:54:50.169803Z node 1 :TX_DATASHARD DEBUG: datashard__object_storage_listing.cpp:374: 72075186224037888 S3 Listing: finished status: 0 description: "" contents: 2 common prefixes: 1 2025-06-25T14:54:50.170019Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037888, clientId# [1:817:2647], serverId# [1:818:2648], sessionId# [0:0:0] 2025-06-25T14:54:50.192719Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:823:2653], serverId# [1:824:2654], sessionId# [0:0:0] 2025-06-25T14:54:50.192988Z node 1 :TX_DATASHARD DEBUG: datashard__object_storage_listing.cpp:152: 72075186224037888 S3 Listing: start at key ((type:2, value:"d\0\0\0") (type:4608, value:"/test/")), end at key ((type:2, value:"d\0\0\0") (type:4608, value:"/test0")) restarted: 0 last path: "" contents: 0 common prefixes: 0 2025-06-25T14:54:50.193212Z node 1 :TX_DATASHARD DEBUG: datashard__object_storage_listing.cpp:374: 72075186224037888 S3 Listing: finished status: 0 description: "" contents: 1 common prefixes: 1 2025-06-25T14:54:50.193438Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037888, clientId# [1:823:2653], serverId# [1:824:2654], sessionId# [0:0:0] >> THealthCheckTest::OneIssueListing >> THealthCheckTest::RedGroupIssueWhenDisintegratedGroupStatus >> THealthCheckTest::Basic ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/opt/unittest >> KqpKv::ReadRows_Nulls [GOOD] Test command err: Trying to start YDB, gRPC: 29097, MsgBus: 4881 2025-06-25T14:53:34.990984Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900247506994569:2156];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:53:34.991237Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0009e7/r3tmp/tmpmSAgda/pdisk_1.dat 2025-06-25T14:53:35.411969Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:53:35.413603Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519900247506994451:2080] 1750863214943347 != 1750863214943350 2025-06-25T14:53:35.426303Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:53:35.426381Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:53:35.428801Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 29097, node 1 2025-06-25T14:53:35.500808Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:53:35.500826Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:53:35.500832Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:53:35.500925Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4881 TClient is connected to server localhost:4881 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-25T14:53:35.992428Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:53:36.042931Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:53:36.063731Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:53:37.918251Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900260391896974:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:37.918374Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:38.162142Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) IsSuccess(): 1 GetStatus(): SUCCESS Trying to start YDB, gRPC: 26060, MsgBus: 11913 2025-06-25T14:53:39.142674Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519900265638438470:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:53:39.142708Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0009e7/r3tmp/tmprpb9fy/pdisk_1.dat 2025-06-25T14:53:39.303393Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:53:39.303461Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:53:39.307508Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:53:39.309138Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:53:39.310409Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519900265638438451:2080] 1750863219141898 != 1750863219141901 TServer::EnableGrpc on GrpcPort 26060, node 2 2025-06-25T14:53:39.458589Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:53:39.458613Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:53:39.458620Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:53:39.458720Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11913 TClient is connected to server localhost:11913 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:53:40.045648Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:53:40.177933Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:53:42.269475Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519900278523340976:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:42.269532Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:42.283330Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) IsSuccess(): 1 GetStatus(): SUCCESS [] IsSuccess(): 1 GetStatus(): SUCCESS 2025-06-25T14:53:42.404835Z node 2 :RPC_REQUEST ERROR: rpc_read_rows.cpp:777: TReadRowsRPC ReplyWithError: no keys are found in request's proto Trying to start YDB, gRPC: 32192, MsgBus: 18397 2025-06-25T14:53:43.076291Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519900286233748446:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:53:43.076346Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0009e7/r3tmp/tmpCUcXi3/pdisk_1.dat 2025-06-25T14:53:43.212945Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:53:43.216273Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519900286233748425:2080] 1750863223075224 != 1750863223075227 TServer::EnableGrpc on GrpcPort 32192, node 3 2025-06-25T14:53:43.245962Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:53:43.246041Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:53:43.249115Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:53:43.376786Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:53:43.376808Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:53:43.376816Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:53:43.376923Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:18397 TClient is conn ... .cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 6, TabletId: 72075186224037936 not found 2025-06-25T14:54:35.917458Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710756:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:36.078835Z node 6 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 6, TabletId: 72075186224037937 not found 2025-06-25T14:54:36.091388Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710758:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:36.200347Z node 6 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 6, TabletId: 72075186224037938 not found 2025-06-25T14:54:36.214529Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710760:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:36.366979Z node 6 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 6, TabletId: 72075186224037939 not found Trying to start YDB, gRPC: 17093, MsgBus: 4338 2025-06-25T14:54:37.334050Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519900518113284170:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:54:37.334102Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0009e7/r3tmp/tmpdYrdRN/pdisk_1.dat 2025-06-25T14:54:37.505073Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:54:37.527189Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:54:37.527286Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:54:37.530683Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 17093, node 7 2025-06-25T14:54:37.623122Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:54:37.623154Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:54:37.623164Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:54:37.623328Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4338 TClient is connected to server localhost:4338 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-25T14:54:38.341076Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:54:38.366395Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:54:38.375068Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:54:41.429631Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:41.532360Z node 7 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 7, TabletId: 72075186224037888 not found 2025-06-25T14:54:41.539061Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:41.717008Z node 7 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 7, TabletId: 72075186224037889 not found 2025-06-25T14:54:41.725158Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:41.803661Z node 7 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 7, TabletId: 72075186224037890 not found Trying to start YDB, gRPC: 13294, MsgBus: 26969 2025-06-25T14:54:42.712431Z node 8 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[8:7519900539732665938:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:54:42.712494Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0009e7/r3tmp/tmpP8Vpjf/pdisk_1.dat 2025-06-25T14:54:42.832442Z node 8 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:54:42.851160Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:54:42.851273Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 13294, node 8 2025-06-25T14:54:42.853655Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:54:42.890373Z node 8 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:54:42.890403Z node 8 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:54:42.890414Z node 8 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:54:42.890555Z node 8 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26969 TClient is connected to server localhost:26969 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:54:43.504895Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:54:43.721741Z node 8 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:54:46.586560Z node 8 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [8:7519900556912535720:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:46.586683Z node 8 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:46.614202Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) IsSuccess(): 1 GetStatus(): SUCCESS |88.1%| [TA] $(B)/ydb/core/tx/datashard/ut_object_storage_listing/test-results/unittest/{meta.json ... results_accumulator.log} |88.1%| [TA] {RESULT} $(B)/ydb/core/tx/datashard/ut_object_storage_listing/test-results/unittest/{meta.json ... results_accumulator.log} >> TAsyncIndexTests::MergeMainWithReboots[TabletReboots] [GOOD] >> KqpRanges::LiteralOrCompisite [GOOD] >> KqpRanges::LiteralOrCompisiteCollision ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest >> TAsyncIndexTests::MergeMainWithReboots[TabletReboots] [GOOD] Test command err: =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:115:2144] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:115:2144] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:130:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:133:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [1:136:2157] sender: [1:138:2058] recipient: [1:115:2144] 2025-06-25T14:51:14.315739Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:51:14.315841Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:14.315886Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:51:14.315925Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:51:14.315975Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:51:14.316005Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:51:14.316055Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:14.316125Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:51:14.316851Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:51:14.317194Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:51:14.378055Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7732: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-25T14:51:14.378108Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:51:14.378828Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:178:2058] recipient: [1:15:2062] 2025-06-25T14:51:14.390861Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:51:14.394177Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:51:14.394297Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:51:14.401402Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:51:14.401686Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:51:14.402288Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:14.402576Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:51:14.405430Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:14.405617Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:51:14.406677Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:51:14.406744Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:14.406850Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:51:14.406889Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:51:14.406928Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:51:14.407057Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:217:2058] recipient: [1:215:2214] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:217:2058] recipient: [1:215:2214] Leader for TabletID 72057594037968897 is [1:221:2218] sender: [1:222:2058] recipient: [1:215:2214] 2025-06-25T14:51:14.414014Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:242:2058] recipient: [1:15:2062] 2025-06-25T14:51:14.548606Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:51:14.548814Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:14.549020Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:51:14.549064Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:51:14.549279Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:51:14.549355Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:51:14.551797Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:14.551980Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:51:14.552221Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:14.552295Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:51:14.552356Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:51:14.552386Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:51:14.554361Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:14.554418Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:51:14.554463Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:51:14.556116Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:14.556177Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:14.556249Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:14.556302Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:51:14.560008Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:51:14.561827Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:51:14.562009Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:136:2157] sender: [1:257:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:51:14.562955Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:14.563088Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 Media ... icy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409549 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:54:51.419263Z node 107 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:78: [TableChangeSenderShard][72075186233409549:2][72075186233409546][107:832:2674] Handshake NKikimrChangeExchange.TEvStatus Status: STATUS_OK LastRecordOrder: 0 2025-06-25T14:54:51.419371Z node 107 :CHANGE_EXCHANGE DEBUG: change_sender_async_index.cpp:239: [AsyncIndexChangeSenderMain][72075186233409549:2][107:785:2674] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186233409546 } 2025-06-25T14:54:51.419547Z node 107 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:123: [TableChangeSenderShard][72075186233409549:2][72075186233409546][107:832:2674] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 1 Group: 1750863291396157 Step: 5000003 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046678944, LocalPathId: 4] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046678944, LocalPathId: 3] SchemaVersion: 1 LockId: 0 LockOffset: 0 },{ Order: 2 Group: 1750863291396157 Step: 5000003 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046678944, LocalPathId: 4] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046678944, LocalPathId: 3] SchemaVersion: 1 LockId: 0 LockOffset: 0 },{ Order: 3 Group: 1750863291396157 Step: 5000003 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046678944, LocalPathId: 4] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046678944, LocalPathId: 3] SchemaVersion: 1 LockId: 0 LockOffset: 0 }] } 2025-06-25T14:54:51.425789Z node 107 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:200: [TableChangeSenderShard][72075186233409549:2][72075186233409546][107:832:2674] Handle NKikimrChangeExchange.TEvStatus Status: STATUS_OK RecordStatuses { Order: 1 Status: STATUS_OK Reason: REASON_NONE } RecordStatuses { Order: 2 Status: STATUS_OK Reason: REASON_NONE } RecordStatuses { Order: 3 Status: STATUS_OK Reason: REASON_NONE } LastRecordOrder: 3 2025-06-25T14:54:51.425909Z node 107 :CHANGE_EXCHANGE DEBUG: change_sender_async_index.cpp:239: [AsyncIndexChangeSenderMain][72075186233409549:2][107:785:2674] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186233409546 } 2025-06-25T14:54:51.599990Z node 107 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/UserDefinedIndex/indexImplTable" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-25T14:54:51.600359Z node 107 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/UserDefinedIndex/indexImplTable" took 385us result status StatusSuccess 2025-06-25T14:54:51.601396Z node 107 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table/UserDefinedIndex/indexImplTable" PathDescription { Self { Name: "indexImplTable" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1002 CreateStep: 5000003 ParentPathId: 4 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeAsyncIndexImplTable Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "indexImplTable" Columns { Name: "indexed" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "indexed" KeyColumnNames: "key" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409546 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 |88.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/backup/impl/ut_local_partition_reader/unittest >> TPersQueueTest::SetupReadSession [GOOD] >> TPersQueueTest::TestBigMessage |88.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/storagepoolmon/ut/unittest |88.1%| [TA] $(B)/ydb/core/backup/impl/ut_local_partition_reader/test-results/unittest/{meta.json ... results_accumulator.log} |88.1%| [TA] {RESULT} $(B)/ydb/core/backup/impl/ut_local_partition_reader/test-results/unittest/{meta.json ... results_accumulator.log} >> THealthCheckTest::StorageLimit95 >> KqpRanges::ValidatePredicatesDataQuery [GOOD] >> KqpReturning::Random >> THealthCheckTest::Basic [GOOD] >> THealthCheckTest::BasicNodeCheckRequest >> test_select.py::TestDML::test_select[table_index_4_UNIQUE_SYNC-pk_types0-all_types0-index0--UNIQUE-SYNC] |88.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> KqpRanges::MergeRanges [GOOD] >> test_update_script_tables.py::TestUpdateScriptTablesYdb::test_recreate_tables[DROP TABLE {}-`.metadata/script_executions`] >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_when_create_path_second_time_then_it_is_ok >> BasicUsage::WriteSessionWriteInHandlers >> BasicUsage::WaitEventBlocksBeforeDiscovery >> BasicUsage::WriteSessionNoAvailableDatabase >> BasicUsage::BasicWriteSession |88.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> BasicUsage::RetryDiscoveryWithCancel |88.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/s3/py3test ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/opt/unittest >> KqpRanges::MergeRanges [GOOD] Test command err: Trying to start YDB, gRPC: 20416, MsgBus: 4813 2025-06-25T14:53:37.228285Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900260010571160:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:53:37.228369Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0009e2/r3tmp/tmpogy6nE/pdisk_1.dat 2025-06-25T14:53:37.632848Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:53:37.640510Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519900260010571141:2080] 1750863217225085 != 1750863217225088 TServer::EnableGrpc on GrpcPort 20416, node 1 2025-06-25T14:53:37.728663Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:53:37.728739Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:53:37.792888Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:53:37.892651Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:53:37.892686Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:53:37.892694Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:53:37.892824Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4813 2025-06-25T14:53:38.255113Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:4813 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:53:38.475386Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:53:38.506440Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:38.661840Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:38.872685Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:38.967908Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:40.561293Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900272895474652:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:40.561399Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:40.855832Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:40.891631Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:40.927203Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:40.954301Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:40.986175Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:41.067784Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:41.137548Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:41.215136Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900277190442609:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:41.215218Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:41.215574Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900277190442614:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:41.219433Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:53:41.234221Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519900277190442616:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:53:41.304634Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519900277190442669:3417] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:53:42.228882Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519900260010571160:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:53:42.229410Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:53:42.292778Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:42.527684Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part ... 784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0009e2/r3tmp/tmpHPYdxT/pdisk_1.dat 2025-06-25T14:54:46.681691Z node 10 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:54:46.682956Z node 10 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [10:7519900556007796190:2080] 1750863286507964 != 1750863286507967 2025-06-25T14:54:46.702363Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:54:46.702503Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:54:46.704737Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 13487, node 10 2025-06-25T14:54:46.777037Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:54:46.777072Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:54:46.777084Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:54:46.777267Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:22344 TClient is connected to server localhost:22344 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:54:47.488120Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:54:47.500720Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:54:47.516765Z node 10 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:54:47.517176Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:54:47.597785Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:54:47.787018Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:54:47.855891Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:54:51.095536Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7519900577482634305:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:51.095638Z node 10 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:51.171728Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:51.247661Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:51.288286Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:51.331973Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:51.375866Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:51.436437Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:51.508778Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[10:7519900556007796209:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:54:51.508902Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:54:51.514214Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:51.703712Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7519900577482634971:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:51.703847Z node 10 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:51.703934Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7519900577482634976:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:51.709454Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:54:51.727956Z node 10 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [10:7519900577482634978:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:54:51.790352Z node 10 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [10:7519900577482635030:3428] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:54:53.148418Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:53.827198Z node 10 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863293865, txId: 281474976715674] shutting down |88.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/address_classification/ut/unittest >> KqpNewEngine::StaleRO_Immediate [GOOD] >> KqpNewEngine::StaleRO_IndexFollowers+EnableFollowers |88.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/address_classification/ut/unittest >> TNetClassifierTest::TestInitFromFile >> KqpNamedExpressions::NamedExpressionRandomInsert-UseSink [GOOD] >> KqpNamedExpressions::NamedExpressionRandomDataQuery+UseSink >> BasicUsage::SelectDatabaseByHash [GOOD] >> BasicUsage::SelectDatabase [GOOD] >> THealthCheckTest::BasicNodeCheckRequest [GOOD] >> THealthCheckTest::BlueGroupIssueWhenPartialGroupStatusAndReplicationDisks >> KqpSqlIn::SecondaryIndex_SimpleKey [GOOD] >> KqpSqlIn::SecondaryIndex_ComplexKey_In_And_In >> KqpNewEngine::DqSourceLiteralRange [GOOD] >> KqpNewEngine::DqSourceLimit >> BasicUsage::FallbackToSingleDb |88.1%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/federated_topic/ut/unittest >> BasicUsage::SelectDatabase [GOOD] >> KqpAgg::AggHashShuffle+UseSink [GOOD] >> KqpAgg::AggHashShuffle-UseSink >> KqpNamedExpressions::NamedExpressionRandomUpsertRevert-UseSink-UseDataQuery [GOOD] >> KqpNamedExpressions::NamedExpressionRandomUpsertRevert+UseSink-UseDataQuery >> TNetClassifierTest::TestInitFromBadlyFormattedFile >> SchemeReqAdminAccessInTenant::ClusterAdminCanAuthOnNonEmptyTenant-StrictAclCheck [GOOD] >> SchemeReqAdminAccessInTenant::ClusterAdminCanAuthOnNonEmptyTenant-DomainLoginOnly >> THealthCheckTest::OneIssueListing [GOOD] >> THealthCheckTest::OnlyDiskIssueOnFaultyPDisks >> THealthCheckTest::RedGroupIssueWhenDisintegratedGroupStatus [GOOD] >> THealthCheckTest::ProtobufUnderLimitFor70LargeVdisksIssues >> test_auditlog.py::test_dynconfig [GOOD] >> TNetClassifierTest::TestInitFromFile [GOOD] >> THealthCheckTest::StorageLimit95 [GOOD] >> THealthCheckTest::StorageLimit87 |88.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/address_classification/ut/unittest >> TNetClassifierTest::TestInitFromFile [GOOD] Test command err: 2025-06-25T14:54:57.343843Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900604507910851:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:54:57.343919Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0019f6/r3tmp/tmpXJ9ki5/pdisk_1.dat 2025-06-25T14:54:57.887788Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:54:57.887917Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:54:57.922058Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:54:57.954488Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:54:58.041064Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/yft8/0019f6/r3tmp/yandexI3bgOy.tmp 2025-06-25T14:54:58.041086Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/yft8/0019f6/r3tmp/yandexI3bgOy.tmp 2025-06-25T14:54:58.044556Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/yft8/0019f6/r3tmp/yandexI3bgOy.tmp 2025-06-25T14:54:58.044722Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:54:58.352476Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; >> TNetClassifierTest::TestInitFromBadlyFormattedFile [GOOD] >> TPersQueueTest::Codecs_WriteMessageWithNonDefaultCodecThatHasToBeConfiguredAdditionally_SessionClosedWithBadRequestError [GOOD] >> TPersQueueTest::CreateTopicWithMeteringMode |88.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/s3/py3test >> KqpRanges::LiteralOrCompisiteCollision [GOOD] >> KqpRanges::Like ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/address_classification/ut/unittest >> TNetClassifierTest::TestInitFromBadlyFormattedFile [GOOD] Test command err: 2025-06-25T14:54:59.028150Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900610151163545:2155];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:54:59.080700Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0019cb/r3tmp/tmpNSgIrz/pdisk_1.dat 2025-06-25T14:54:59.460061Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:54:59.464447Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519900605856196103:2080] 1750863298888451 != 1750863298888454 2025-06-25T14:54:59.470412Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:54:59.470488Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:54:59.473349Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:54:59.508968Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/yft8/0019cb/r3tmp/yandexGyoK73.tmp 2025-06-25T14:54:59.508995Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/yft8/0019cb/r3tmp/yandexGyoK73.tmp 2025-06-25T14:54:59.509110Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:344: invalid NetData format 2025-06-25T14:54:59.509135Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: /home/runner/.ya/build/build_root/yft8/0019cb/r3tmp/yandexGyoK73.tmp 2025-06-25T14:54:59.509247Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration >> KqpReturning::Random [GOOD] >> BasicUsage::GetAllStartPartitionSessions >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_when_create_path_second_time_then_it_is_ok [GOOD] >> TTopicYqlTest::BadRequests [GOOD] >> TAsyncIndexTests::MergeIndexWithReboots[TabletReboots] [GOOD] >> TPersQueueTest::ReadWithoutConsumerFederation [GOOD] >> TPersQueueTest::ReadWithoutConsumerFirstClassCitizen ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/opt/unittest >> KqpReturning::Random [GOOD] Test command err: Trying to start YDB, gRPC: 18646, MsgBus: 14562 2025-06-25T14:53:04.828269Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900117415546168:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:53:04.828706Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0009ff/r3tmp/tmpn6tVKo/pdisk_1.dat 2025-06-25T14:53:05.287947Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:53:05.288098Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:53:05.292469Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 18646, node 1 2025-06-25T14:53:05.313061Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:53:05.532951Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:53:05.532974Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:53:05.533007Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:53:05.533149Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:53:05.848762Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:14562 TClient is connected to server localhost:14562 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:53:06.327569Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:53:06.374707Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:06.519325Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:06.663437Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:06.744376Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:08.209891Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900134595416974:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:08.209982Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:08.491644Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:08.525406Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:08.595310Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:08.625504Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:08.656263Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:08.698572Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:08.766277Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:08.846821Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900134595417635:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:08.846876Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:08.847121Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900134595417640:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:08.851147Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:53:08.863058Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519900134595417642:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:53:08.957587Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519900134595417695:3421] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:53:09.828473Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519900117415546168:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:53:09.828519Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout;
: Warning: Type annotation, code: 1030
:4:13: Warning: At function: RemovePrefixMembers, At function: RemoveSystemMembers, At function: PersistableRepr, At function: SqlProject
:4:27: Warning: At function: Filter, At lambda, At function: Coalesce
:4:50: Warning: At function: SqlIn
:4:50: Warning: IN may produce unexpected result when used with nullable arguments. Consider adding 'PRAGMA AnsiInForEmptyOrNullableItemsCollections;', code: 1108 Trying to start YDB, gRPC: 25020, MsgBus: 17944 2025-06-25T14:53:11.248988Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:75199001482 ... 25-06-25T14:54:43.059927Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded Trying to start YDB, gRPC: 61904, MsgBus: 61158 2025-06-25T14:54:54.523849Z node 8 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[8:7519900589001318530:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:54:54.523940Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0009ff/r3tmp/tmp1omJqA/pdisk_1.dat 2025-06-25T14:54:54.725258Z node 8 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:54:54.725317Z node 8 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [8:7519900589001318511:2080] 1750863294523409 != 1750863294523412 2025-06-25T14:54:54.742744Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:54:54.742864Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:54:54.744688Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 61904, node 8 2025-06-25T14:54:54.804939Z node 8 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:54:54.804970Z node 8 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:54:54.804992Z node 8 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:54:54.805135Z node 8 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:61158 TClient is connected to server localhost:61158 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-25T14:54:55.530282Z node 8 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:54:55.550116Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:54:55.568957Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:54:55.657309Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:54:55.877642Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:54:55.981340Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:54:59.526329Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[8:7519900589001318530:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:54:59.526422Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:54:59.840912Z node 8 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [8:7519900610476156648:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:59.841031Z node 8 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:59.882153Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:59.935410Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:59.991179Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:00.059388Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:00.119967Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:00.186563Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:00.291047Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:00.400382Z node 8 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [8:7519900614771124600:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:00.400507Z node 8 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:00.400982Z node 8 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [8:7519900614771124605:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:00.406955Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:55:00.426418Z node 8 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [8:7519900614771124607:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:55:00.495798Z node 8 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [8:7519900614771124658:3433] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:55:02.115610Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) >> THealthCheckTest::BlueGroupIssueWhenPartialGroupStatusAndReplicationDisks [GOOD] >> THealthCheckTest::GreenStatusWhenCreatingGroup ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest >> TAsyncIndexTests::MergeIndexWithReboots[TabletReboots] [GOOD] Test command err: =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:115:2144] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:115:2144] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:130:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:133:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [1:136:2157] sender: [1:138:2058] recipient: [1:115:2144] 2025-06-25T14:51:13.616457Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:51:13.616568Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:13.616613Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:51:13.616656Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:51:13.616704Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:51:13.616733Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:51:13.616789Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:13.616877Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:51:13.617612Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:51:13.617954Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:51:13.697726Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7732: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-25T14:51:13.697782Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:51:13.698575Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:178:2058] recipient: [1:15:2062] 2025-06-25T14:51:13.714925Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:51:13.719239Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:51:13.719446Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:51:13.727527Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:51:13.727752Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:51:13.728467Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:13.728743Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:51:13.731766Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:13.731950Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:51:13.732965Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:51:13.733034Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:13.733132Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:51:13.733168Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:51:13.733213Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:51:13.733345Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:217:2058] recipient: [1:215:2214] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:217:2058] recipient: [1:215:2214] Leader for TabletID 72057594037968897 is [1:221:2218] sender: [1:222:2058] recipient: [1:215:2214] 2025-06-25T14:51:13.739936Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:242:2058] recipient: [1:15:2062] 2025-06-25T14:51:13.856951Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:51:13.857113Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:13.857255Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:51:13.857285Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:51:13.857453Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:51:13.857507Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:51:13.859303Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:13.859421Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:51:13.859577Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:13.859632Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:51:13.859682Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:51:13.859712Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:51:13.861153Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:13.861195Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:51:13.861224Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:51:13.862485Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:13.862519Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:13.862560Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:13.862594Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:51:13.865351Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:51:13.866712Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:51:13.866842Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:136:2157] sender: [1:257:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:51:13.867598Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:13.867696Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 Media ... SizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 1 } } TableIndexes { Name: "UserDefinedIndex" LocalPathId: 4 Type: EIndexTypeGlobalAsync State: EIndexStateReady KeyColumnNames: "indexed" SchemaVersion: 1 PathOwnerId: 72057594046678944 DataSize: 0 IndexImplTableDescriptions { PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 1 } } } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409548 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:55:04.337077Z node 104 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/UserDefinedIndex/indexImplTable" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-25T14:55:04.337435Z node 104 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/UserDefinedIndex/indexImplTable" took 403us result status StatusSuccess 2025-06-25T14:55:04.338406Z node 104 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table/UserDefinedIndex/indexImplTable" PathDescription { Self { Name: "indexImplTable" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1002 CreateStep: 5000003 ParentPathId: 4 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeAsyncIndexImplTable Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 2 } ChildrenExist: false } Table { Name: "indexImplTable" Columns { Name: "indexed" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "indexed" KeyColumnNames: "key" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 1 } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409549 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/services/persqueue_v1/ut/unittest >> TTopicYqlTest::BadRequests [GOOD] Test command err: 2025-06-25T14:51:26.051899Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899697695898726:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:51:26.057108Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:51:26.078797Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519899696270406992:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:51:26.078863Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000b4e/r3tmp/tmp56YQt7/pdisk_1.dat 2025-06-25T14:51:26.270908Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-25T14:51:26.277159Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-25T14:51:26.489676Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:51:26.501756Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:51:26.501837Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:51:26.506335Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:51:26.506395Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:51:26.510953Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:51:26.514072Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T14:51:26.515157Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:51:26.539029Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 25439, node 1 2025-06-25T14:51:26.638703Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/yft8/000b4e/r3tmp/yandex0gDxsx.tmp 2025-06-25T14:51:26.638727Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/yft8/000b4e/r3tmp/yandex0gDxsx.tmp 2025-06-25T14:51:26.638899Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/yft8/000b4e/r3tmp/yandex0gDxsx.tmp 2025-06-25T14:51:26.639076Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:51:26.700712Z INFO: TTestServer started on Port 15717 GrpcPort 25439 TClient is connected to server localhost:15717 PQClient connected to localhost:25439 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:51:26.960410Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-25T14:51:27.004031Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:51:27.048505Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:51:27.086206Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... waiting... 2025-06-25T14:51:28.745784Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899706285834364:2298], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:28.745863Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899706285834375:2301], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:28.745935Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:28.749290Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:51:28.770827Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899706285834378:2302], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710662 completed, doublechecking } 2025-06-25T14:51:28.969301Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899706285834470:2764] txid# 281474976710663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:51:28.996699Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:51:29.075332Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:51:29.097291Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519899706285834483:2309], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:5:17: Error: At function: KiReadTable!
:5:17: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Versions]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:51:29.097462Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=ZjIwNTUwODctNTk5YzZhYy1mMjM5OTk3Mi0xYjM2MjcxNA==, ActorId: [1:7519899706285834362:2297], ActorState: ExecuteState, TraceId: 01jyks53383pj9by7k2jcm4jrm, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:51:29.099155Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 5 column: 17 } message: "At function: KiReadTable!" end_position { row: 5 column: 17 } severity: 1 issues { position { row: 5 column: 17 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Versions]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 5 column: 17 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-25T14:51:29.168665Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:51:29.196010Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7519899709155309286:2282], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:5:17: Error: At function: KiReadTable!
:5:17: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Versions]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:51:29.196192Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=2&id=NDdlMzM0YzctOTQ0NzhlYmMtYWFiYTQ4M2ItZjRmODM3YmQ=, ActorId: [2:7519899709155309247:2276], ActorState: ExecuteState, TraceId: 01jyks53gn5vm2rqc1xx5bck11, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:51:29.196553Z node 2 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 5 column: 17 } message: "At function: KiReadTable!" end_position { row: 5 column: 17 } severity: 1 issues { position { row: 5 column: 17 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Versions]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 5 column: 17 ... 369240Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:4323: [PQ: 72075186224037892] TxId 281474976710678 moved from WAIT_RS to EXECUTING 2025-06-25T14:55:01.369261Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:4551: [PQ: 72075186224037892] Received 0, Expected 1 2025-06-25T14:55:01.369330Z node 25 :PERSQUEUE DEBUG: partition.cpp:1216: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCommit Step 1750863301390, TxId 281474976710678 2025-06-25T14:55:01.369897Z node 25 :PERSQUEUE DEBUG: read.h:272: CacheProxy. Passthrough write request to KV 2025-06-25T14:55:01.373108Z node 25 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-06-25T14:55:01.373199Z node 25 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037892, Partition: 0, State: StateIdle] no data for compaction 2025-06-25T14:55:01.373309Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:3583: [PQ: 72075186224037892] Handle TEvPQ::TEvTxCommitDone Step 1750863301390, TxId 281474976710678, Partition 0 2025-06-25T14:55:01.373345Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:4353: [PQ: 72075186224037892] Try execute txs with state EXECUTING 2025-06-25T14:55:01.373369Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:4398: [PQ: 72075186224037892] TxId 281474976710678, State EXECUTING 2025-06-25T14:55:01.373402Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:4345: [PQ: 72075186224037892] TxId 281474976710678 State EXECUTING FrontTxId 281474976710678 2025-06-25T14:55:01.373424Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:4551: [PQ: 72075186224037892] Received 1, Expected 1 2025-06-25T14:55:01.373461Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:4224: [PQ: 72075186224037892] TxId: 281474976710678 send TEvPersQueue::TEvProposeTransactionResult(COMPLETE) 2025-06-25T14:55:01.373508Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:4555: [PQ: 72075186224037892] complete TxId 281474976710678 2025-06-25T14:55:01.373967Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:591: [PQ: 72075186224037892] Apply new config PartitionConfig { MaxCountInPartition: 2147483647 LifetimeSeconds: 86400 SourceIdLifetimeSeconds: 1382400 WriteSpeedInBytesPerSecond: 1048576 BurstSize: 1048576 TotalPartitions: 1 ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } SourceIdMaxCounts: 6000000 } PartitionIds: 0 TopicName: "rt3.dc1--legacy--topic1" Version: 0 LocalDC: true RequireAuthWrite: true RequireAuthRead: true Producer: "legacy" Ident: "legacy" Topic: "topic1" DC: "dc1" FormatVersion: 0 Codecs { } TopicPath: "/Root/PQ/rt3.dc1--legacy--topic1" YcCloudId: "" YcFolderId: "" YdbDatabaseId: "" YdbDatabasePath: "/Root" Partitions { PartitionId: 0 Status: Active CreateVersion: 1 TabletId: 0 } ReadRuleGenerations: 0 ReadRuleGenerations: 0 AllPartitions { PartitionId: 0 Status: Active CreateVersion: 1 TabletId: 0 } Consumers { Name: "c1" ReadFromTimestampsMs: 0 FormatVersion: 0 Codec { } ServiceType: "data-streams" Version: 0 Generation: 0 } Consumers { Name: "c2" ReadFromTimestampsMs: 0 FormatVersion: 0 Codec { } ServiceType: "data-streams" Version: 0 Generation: 0 } 2025-06-25T14:55:01.374109Z node 25 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72075186224037892] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:55:01.374236Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:4573: [PQ: 72075186224037892] delete partitions for TxId 281474976710678 2025-06-25T14:55:01.374262Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:4288: [PQ: 72075186224037892] TxId 281474976710678, NewState EXECUTED 2025-06-25T14:55:01.374290Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:4323: [PQ: 72075186224037892] TxId 281474976710678 moved from EXECUTING to EXECUTED 2025-06-25T14:55:01.374322Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:3866: [PQ: 72075186224037892] write key for TxId 281474976710678 2025-06-25T14:55:01.374858Z node 25 :PERSQUEUE DEBUG: transaction.cpp:374: [TxId: 281474976710678] save tx TxId: 281474976710678 State: EXECUTED MinStep: 1750863301005 MaxStep: 18446744073709551615 Step: 1750863301390 Predicate: true Kind: KIND_CONFIG TabletConfig { PartitionConfig { MaxCountInPartition: 2147483647 LifetimeSeconds: 86400 SourceIdLifetimeSeconds: 1382400 WriteSpeedInBytesPerSecond: 1048576 BurstSize: 1048576 TotalPartitions: 1 ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } SourceIdMaxCounts: 6000000 } PartitionIds: 0 TopicName: "rt3.dc1--legacy--topic1" Version: 0 LocalDC: true RequireAuthWrite: true RequireAuthRead: true Producer: "legacy" Ident: "legacy" Topic: "topic1" DC: "dc1" FormatVersion: 0 Codecs { } TopicPath: "/Root/PQ/rt3.dc1--legacy--topic1" YcCloudId: "" YcFolderId: "" YdbDatabaseId: "" YdbDatabasePath: "/Root" Partitions { PartitionId: 0 Status: Active CreateVersion: 1 TabletId: 0 } ReadRuleGenerations: 0 ReadRuleGenerations: 0 AllPartitions { PartitionId: 0 Status: Active CreateVersion: 1 TabletId: 0 } Consumers { Name: "c1" ReadFromTimestampsMs: 0 FormatVersion: 0 Codec { } ServiceType: "data-streams" Version: 0 Generation: 0 } Consumers { Name: "c2" ReadFromTimestampsMs: 0 FormatVersion: 0 Codec { } ServiceType: "data-streams" Version: 0 Generation: 0 } } BootstrapConfig { } SourceActor { RawX1: 7519900567940534762 RawX2: 107374184611 } Partitions { Partition { PartitionId: 0 } } 2025-06-25T14:55:01.375137Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:3683: [PQ: 72075186224037892] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-06-25T14:55:01.383115Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:1241: [PQ: 72075186224037892] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-25T14:55:01.383201Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:4353: [PQ: 72075186224037892] Try execute txs with state EXECUTED 2025-06-25T14:55:01.383235Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:4398: [PQ: 72075186224037892] TxId 281474976710678, State EXECUTED 2025-06-25T14:55:01.383261Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:4345: [PQ: 72075186224037892] TxId 281474976710678 State EXECUTED FrontTxId 281474976710678 2025-06-25T14:55:01.383287Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:4046: [PQ: 72075186224037892] TPersQueue::SendEvReadSetAckToSenders 2025-06-25T14:55:01.383317Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:4288: [PQ: 72075186224037892] TxId 281474976710678, NewState WAIT_RS_ACKS 2025-06-25T14:55:01.383344Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:4323: [PQ: 72075186224037892] TxId 281474976710678 moved from EXECUTED to WAIT_RS_ACKS 2025-06-25T14:55:01.383382Z node 25 :PERSQUEUE DEBUG: transaction.cpp:366: [TxId: 281474976710678] PredicateAcks: 0/0 2025-06-25T14:55:01.383394Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:4599: [PQ: 72075186224037892] HaveAllRecipientsReceive 1, AllSupportivePartitionsHaveBeenDeleted 1 2025-06-25T14:55:01.383415Z node 25 :PERSQUEUE DEBUG: transaction.cpp:366: [TxId: 281474976710678] PredicateAcks: 0/0 2025-06-25T14:55:01.383443Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:4660: [PQ: 72075186224037892] add an TxId 281474976710678 to the list for deletion 2025-06-25T14:55:01.383477Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:4288: [PQ: 72075186224037892] TxId 281474976710678, NewState DELETING 2025-06-25T14:55:01.383516Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:3882: [PQ: 72075186224037892] delete key for TxId 281474976710678 2025-06-25T14:55:01.383602Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:3683: [PQ: 72075186224037892] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-06-25T14:55:01.390098Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:1241: [PQ: 72075186224037892] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-25T14:55:01.390143Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:4353: [PQ: 72075186224037892] Try execute txs with state DELETING 2025-06-25T14:55:01.390171Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:4398: [PQ: 72075186224037892] TxId 281474976710678, State DELETING 2025-06-25T14:55:01.390202Z node 25 :PERSQUEUE DEBUG: pq_impl.cpp:4610: [PQ: 72075186224037892] delete TxId 281474976710678 2025-06-25T14:55:02.152381Z node 25 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976710679, task: 1, CA Id [25:7519900623775111452:2463]. Got EvDeliveryProblem, TabletId: 72075186224037891, NotDelivered: 0 2025-06-25T14:55:02.192884Z node 25 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976710679, task: 1, CA Id [25:7519900623775111452:2463]. Got EvDeliveryProblem, TabletId: 72075186224037891, NotDelivered: 1 2025-06-25T14:55:02.244883Z node 25 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976710679, task: 1, CA Id [25:7519900623775111452:2463]. Got EvDeliveryProblem, TabletId: 72075186224037891, NotDelivered: 1 2025-06-25T14:55:02.317172Z node 25 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976710679, task: 1, CA Id [25:7519900623775111452:2463]. Got EvDeliveryProblem, TabletId: 72075186224037891, NotDelivered: 1 2025-06-25T14:55:02.395069Z node 25 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976710679, task: 1, CA Id [25:7519900623775111452:2463]. Got EvDeliveryProblem, TabletId: 72075186224037891, NotDelivered: 1 2025-06-25T14:55:02.541900Z node 25 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976710679, task: 1, CA Id [25:7519900623775111452:2463]. Got EvDeliveryProblem, TabletId: 72075186224037891, NotDelivered: 1 2025-06-25T14:55:02.765515Z node 25 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976710679, task: 1, CA Id [25:7519900623775111452:2463]. Got EvDeliveryProblem, TabletId: 72075186224037891, NotDelivered: 1 2025-06-25T14:55:02.765570Z node 25 :KQP_EXECUTER WARN: kqp_shards_resolver.cpp:86: [ShardsResolver] TxId: 281474976710680. Failed to resolve tablet: 72075186224037891 after several retries. 2025-06-25T14:55:02.765681Z node 25 :KQP_EXECUTER WARN: kqp_executer_impl.h:265: ActorId: [25:7519900623775111499:2461] TxId: 281474976710680. Ctx: { TraceId: 01jyksbkfcdkxn8ktc5zeykj4d, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=25&id=OTgyNDEzMTQtNmQwNGQ1MzYtNjNiZjRkNy1hNjU0OGNkZA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Shards nodes resolve failed, status: UNAVAILABLE, issues:
: Error: Failed to resolve tablet: 72075186224037891 after several retries. 2025-06-25T14:55:02.765898Z node 25 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=25&id=OTgyNDEzMTQtNmQwNGQ1MzYtNjNiZjRkNy1hNjU0OGNkZA==, ActorId: [25:7519900623775111441:2461], ActorState: ExecuteState, TraceId: 01jyksbkfcdkxn8ktc5zeykj4d, Create QueryResponse for error on request, msg: 2025-06-25T14:55:02.767016Z node 25 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Failed to resolve tablet: 72075186224037891 after several retries." severity: 1 } TxMeta { id: "01jyksbky37e41361hydb4952b" } } YdbStatus: UNAVAILABLE ConsumedRu: 309 } 2025-06-25T14:55:03.400411Z node 25 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976710679, task: 1, CA Id [25:7519900623775111452:2463]. Got EvDeliveryProblem, TabletId: 72075186224037891, NotDelivered: 1 >> KqpNewEngine::DqSourceLimit [GOOD] >> TModifyUserTest::ModifyUser >> THealthCheckTest::ProtobufUnderLimitFor70LargeVdisksIssues [GOOD] >> THealthCheckTest::ServerlessBadTablets >> THealthCheckTest::OnlyDiskIssueOnFaultyPDisks [GOOD] >> THealthCheckTest::NoStoragePools |88.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/storagepoolmon/ut/unittest >> THealthCheckTest::StorageLimit87 [GOOD] >> THealthCheckTest::StorageLimit80 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/opt/unittest >> KqpNewEngine::DqSourceLimit [GOOD] Test command err: Trying to start YDB, gRPC: 16840, MsgBus: 5556 2025-06-25T14:54:17.914823Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900429613514855:2231];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:54:17.915167Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0009b1/r3tmp/tmpFW9xXK/pdisk_1.dat 2025-06-25T14:54:18.332386Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519900429613514649:2080] 1750863257828444 != 1750863257828447 2025-06-25T14:54:18.338593Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:54:18.341885Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:54:18.341987Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:54:18.351110Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 16840, node 1 2025-06-25T14:54:18.499318Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:54:18.499340Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:54:18.499347Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:54:18.499462Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5556 2025-06-25T14:54:18.880600Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:5556 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:54:19.210714Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:54:19.232216Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:54:19.245346Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:54:19.391992Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:54:19.541753Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:54:19.605620Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:20.933942Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900442498418177:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:20.934021Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:21.229645Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:21.258365Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:21.294444Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:21.326601Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:21.362740Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:21.424723Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:21.459994Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:21.539597Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900446793386130:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:21.539656Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:21.539904Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900446793386135:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:21.543253Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:54:21.553023Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519900446793386137:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:54:21.624059Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519900446793386188:3421] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:54:22.914245Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519900429613514855:2231];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:54:22.914304Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 63954, MsgBus: 15107 2025-06-25T14:54:23.926840Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519900456299602640:2153];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:54:23.945996Z node 2 :METADATA_PROVIDER ERROR: log.cpp:7 ... 644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:54:55.637117Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[6:7519900572340949805:2144];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:54:55.637212Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 63321, MsgBus: 22485 2025-06-25T14:54:58.152264Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519900606611744493:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:54:58.152329Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0009b1/r3tmp/tmpYHmaBU/pdisk_1.dat 2025-06-25T14:54:58.505218Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:54:58.505314Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:54:58.508262Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:54:58.511043Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:54:58.511829Z node 7 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [7:7519900606611744465:2080] 1750863298151446 != 1750863298151449 TServer::EnableGrpc on GrpcPort 63321, node 7 2025-06-25T14:54:58.648988Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:54:58.649018Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:54:58.649042Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:54:58.649433Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:22485 2025-06-25T14:54:59.181787Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:22485 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:54:59.687634Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:54:59.712111Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:54:59.802361Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:00.014186Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:55:00.117122Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:03.020810Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519900628086582574:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:03.020914Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:03.074681Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:03.111136Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:03.152752Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[7:7519900606611744493:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:03.152847Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:55:03.183577Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:03.228481Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:03.272979Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:03.355842Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:03.395859Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:03.469573Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519900628086583236:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:03.469663Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:03.469705Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519900628086583241:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:03.474363Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:55:03.487179Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7519900628086583243:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:55:03.542547Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7519900628086583295:3423] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } >> KqpAgg::AggHashShuffle-UseSink [GOOD] >> KqpExtractPredicateLookup::ComplexRange >> TableCreation::SimpleTableCreation >> KqpProxy::PingNotExistedSession >> TPQCompatTest::ReadWriteSessions [GOOD] >> TBlobStorageStoragePoolMonTest::ReducedSizeClassCalcTest [GOOD] |88.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/controller/ut_target_discoverer/unittest |88.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/storagepoolmon/ut/unittest >> TBlobStorageStoragePoolMonTest::ReducedSizeClassCalcTest [GOOD] >> TargetDiscoverer::Transfer >> KqpSqlIn::SecondaryIndex_ComplexKey_In_And_In [GOOD] >> KqpSqlIn::PhasesCount |88.2%| [TA] $(B)/ydb/core/blobstorage/storagepoolmon/ut/test-results/unittest/{meta.json ... results_accumulator.log} |88.2%| [TA] {RESULT} $(B)/ydb/core/blobstorage/storagepoolmon/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TPersQueueTest::DirectReadRestartTablet [GOOD] >> TPersQueueTest::EachMessageGetsExactlyOneAcknowledgementInCorrectOrder >> TModifyUserTest::ModifyUser [GOOD] >> TModifyUserTest::ModifyLdapUser >> BasicUsage::BasicWriteSession [GOOD] >> BasicUsage::CloseWriteSessionImmediately >> TPersQueueTest::TestBigMessage [GOOD] >> TPersQueueTest::SetMeteringMode ------- [TM] {asan, default-linux-x86_64, release} ydb/services/persqueue_v1/ut/unittest >> TPQCompatTest::ReadWriteSessions [GOOD] Test command err: 2025-06-25T14:51:27.656832Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899700594939352:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:51:27.657302Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:51:27.689470Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519899701453568959:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:51:27.689533Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:51:27.801080Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000b35/r3tmp/tmpaK8lVw/pdisk_1.dat 2025-06-25T14:51:27.803934Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-25T14:51:27.956068Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:51:27.985302Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:51:27.985391Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:51:27.990685Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T14:51:27.991483Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 64022, node 1 2025-06-25T14:51:28.016776Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:51:28.016840Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:51:28.019253Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:51:28.050047Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/yft8/000b35/r3tmp/yandex8Pfbb9.tmp 2025-06-25T14:51:28.050083Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/yft8/000b35/r3tmp/yandex8Pfbb9.tmp 2025-06-25T14:51:28.050229Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/yft8/000b35/r3tmp/yandex8Pfbb9.tmp 2025-06-25T14:51:28.050388Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:51:28.105991Z INFO: TTestServer started on Port 29451 GrpcPort 64022 TClient is connected to server localhost:29451 PQClient connected to localhost:64022 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:51:28.325064Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976720657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-25T14:51:28.361919Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:51:28.507575Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976720660, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:51:28.672668Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:51:28.695192Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:51:30.392363Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519899714338471121:2269], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:30.392364Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519899714338471129:2272], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:30.392515Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:30.399338Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715657:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:51:30.428014Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519899714338471135:2273], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715657 completed, doublechecking } 2025-06-25T14:51:30.665892Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519899714338471163:2131] txid# 281474976715658, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:51:30.692979Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519899713479842397:2308], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:51:30.693605Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=MjgwNDkyY2UtOGU0MWJiMWEtNDE5MzEzNzQtY2UzZDNkNjc=, ActorId: [1:7519899713479842326:2300], ActorState: ExecuteState, TraceId: 01jyks54q2a2kpnzvaat2rb6p3, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:51:30.694903Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:51:30.695504Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-25T14:51:30.694737Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7519899714338471178:2277], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:51:30.694903Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=2&id=ZDkyNDA3MzgtOGFkZWNiOWQtYTAzNzk1MDEtZGM2NGE4Nw==, ActorId: [2:7519899714338471119:2268], ActorState: ExecuteState, TraceId: 01jyks54pp9br0ec4581efn89c, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:51:30.695505Z node 2 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-25T14:51:30.773299Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:51:30.848539Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720664:0, at schemeshard: 72057594046644480, first GetDB called at ... us: Ok Kind: KindTopic DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-25T14:55:05.999135Z node 27 :PQ_METACACHE DEBUG: msgbus_server_pq_metacache.cpp:613: Got describe topics SC response 2025-06-25T14:55:05.999191Z node 27 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:131: session cookie 6 consumer shared/user session shared/user_27_6_10941878248046144843_v1 Handle describe topics response 2025-06-25T14:55:05.999396Z node 27 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:68: session cookie 6 consumer shared/user session shared/user_27_6_10941878248046144843_v1 auth is DEAD 2025-06-25T14:55:05.999407Z node 27 :PQ_READ_PROXY INFO: read_session_actor.cpp:1033: session cookie 6 consumer shared/user session shared/user_27_6_10941878248046144843_v1 auth ok: topics# 1, initDone# 0 2025-06-25T14:55:06.000327Z node 27 :PQ_READ_PROXY INFO: read_session_actor.cpp:1196: session cookie 6 consumer shared/user session shared/user_27_6_10941878248046144843_v1 register session: topic# rt3.dc2--account--topic2 ===Got response: 2025-06-25T14:55:06.000749Z node 27 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72075186224037899][topic2-mirrored-from-dc2] pipe [27:7519900636533406200:2615] connected; active server actors: 1 status: SUCCESS init_response { session_id: "shared/user_27_6_10941878248046144843_v1" } 2025-06-25T14:55:06.000864Z node 27 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1699: [72075186224037899][topic2-mirrored-from-dc2] consumer "user" register session for pipe [27:7519900636533406200:2615] session shared/user_27_6_10941878248046144843_v1 2025-06-25T14:55:06.000922Z node 27 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:635: [72075186224037899][topic2-mirrored-from-dc2] consumer user register readable partition 0 2025-06-25T14:55:06.000998Z node 27 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:665: [72075186224037899][topic2-mirrored-from-dc2] consumer user family created family=1 (Status=Free, Partitions=[0]) 2025-06-25T14:55:06.001056Z node 27 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:867: [72075186224037899][topic2-mirrored-from-dc2] consumer user register reading session ReadingSession "shared/user_27_6_10941878248046144843_v1" (Sender=[27:7519900636533406197:2615], Pipe=[27:7519900636533406200:2615], Partitions=[], ActiveFamilyCount=0) 2025-06-25T14:55:06.001091Z node 27 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1183: [72075186224037899][topic2-mirrored-from-dc2] consumer user rebalancing was scheduled 2025-06-25T14:55:06.001165Z node 27 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1255: [72075186224037899][topic2-mirrored-from-dc2] consumer user balancing. Sessions=1, Families=1, UnradableFamilies=1 [1 (0), ], RequireBalancing=0 [] 2025-06-25T14:55:06.001252Z node 27 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1302: [72075186224037899][topic2-mirrored-from-dc2] consumer user balancing family=1 (Status=Free, Partitions=[0]) for ReadingSession "shared/user_27_6_10941878248046144843_v1" (Sender=[27:7519900636533406197:2615], Pipe=[27:7519900636533406200:2615], Partitions=[], ActiveFamilyCount=0) 2025-06-25T14:55:06.001342Z node 27 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:545: [72075186224037899][topic2-mirrored-from-dc2] consumer user family 1 status Active partitions [0] session "shared/user_27_6_10941878248046144843_v1" sender [27:7519900636533406197:2615] lock partition 0 for ReadingSession "shared/user_27_6_10941878248046144843_v1" (Sender=[27:7519900636533406197:2615], Pipe=[27:7519900636533406200:2615], Partitions=[], ActiveFamilyCount=1) generation 1 step 3 2025-06-25T14:55:06.001422Z node 27 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1322: [72075186224037899][topic2-mirrored-from-dc2] consumer user start rebalancing. familyCount=1, sessionCount=1, desiredFamilyCount=1, allowPlusOne=0 2025-06-25T14:55:06.001468Z node 27 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1399: [72075186224037899][topic2-mirrored-from-dc2] consumer user balancing duration: 0.000267s 2025-06-25T14:55:06.002267Z node 27 :PQ_READ_PROXY INFO: read_session_actor.cpp:1315: session cookie 6 consumer shared/user session shared/user_27_6_10941878248046144843_v1 assign: record# { Partition: 0 TabletId: 72075186224037898 Topic: "topic2-mirrored-from-dc2" Generation: 1 Step: 3 Session: "shared/user_27_6_10941878248046144843_v1" ClientId: "user" PipeClient { RawX1: 7519900636533406200 RawX2: 4503715591490103 } Path: "/Root/LbCommunal/account/topic2-mirrored-from-dc2" } 2025-06-25T14:55:06.002404Z node 27 :PQ_READ_PROXY INFO: partition_actor.cpp:1132: session cookie 6 consumer shared/user session shared/user_27_6_10941878248046144843_v1 INITING TopicId: Topic topic2-mirrored-from-dc2 in dc dc2 in database: Root, partition 0(assignId:1) 2025-06-25T14:55:06.002928Z node 28 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72075186224037898] server connected, pipe [27:7519900640828373499:2618], now have 1 active actors on pipe 2025-06-25T14:55:06.003132Z node 27 :PQ_READ_PROXY INFO: partition_actor.cpp:972: session cookie 6 consumer shared/user session shared/user_27_6_10941878248046144843_v1 TopicId: Topic topic2-mirrored-from-dc2 in dc dc2 in database: Root, partition 0(assignId:1) pipe restart attempt 0 pipe creation result: OK TabletId: 72075186224037898 Generation: 1, pipe: [27:7519900640828373499:2618] 2025-06-25T14:55:06.003260Z node 28 :PERSQUEUE DEBUG: pq_impl.cpp:348: Handle TEvRequest topic: 'topic2-mirrored-from-dc2' requestId: 2025-06-25T14:55:06.003297Z node 28 :PERSQUEUE DEBUG: pq_impl.cpp:2812: [PQ: 72075186224037898] got client message batch for topic 'rt3.dc2--account--topic2' partition 0 2025-06-25T14:55:06.003346Z node 28 :PERSQUEUE DEBUG: pq_impl.cpp:1946: [PQ: 72075186224037898] Created session shared/user_27_6_10941878248046144843_v1 on pipe: [27:7519900640828373499:2618] 2025-06-25T14:55:06.003436Z node 28 :PQ_READ_PROXY DEBUG: caching_service.cpp:283: Direct read cache: registered server session: shared/user_27_6_10941878248046144843_v1:1 with generation 1 2025-06-25T14:55:06.003490Z node 28 :PERSQUEUE DEBUG: partition.cpp:3346: [PQ: 72075186224037898, Partition: 0, State: StateIdle] Topic 'rt3.dc2--account--topic2' partition 0 user user session is set to 0 (startOffset 0) session shared/user_27_6_10941878248046144843_v1 2025-06-25T14:55:06.003647Z node 28 :PERSQUEUE DEBUG: read.h:272: CacheProxy. Passthrough write request to KV 2025-06-25T14:55:06.006507Z node 28 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'topic2-mirrored-from-dc2' partition: 0 messageNo: 0 requestId: cookie: 18446744073709551615 2025-06-25T14:55:06.006531Z node 28 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72075186224037898, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-06-25T14:55:06.006596Z node 28 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037898, Partition: 0, State: StateIdle] no data for compaction 2025-06-25T14:55:06.006874Z node 27 :PQ_READ_PROXY DEBUG: partition_actor.cpp:652: session cookie 6 consumer shared/user session shared/user_27_6_10941878248046144843_v1 TopicId: Topic topic2-mirrored-from-dc2 in dc dc2 in database: Root, partition 0(assignId:1) initDone 0 event { CmdGetClientOffsetResult { Offset: 0 EndOffset: 0 SizeLag: 0 WriteTimestampEstimateMS: 0 ClientHasAnyCommits: false } Cookie: 18446744073709551615 } 2025-06-25T14:55:06.006941Z node 27 :PQ_READ_PROXY INFO: partition_actor.cpp:683: session cookie 6 consumer shared/user session shared/user_27_6_10941878248046144843_v1 INIT DONE TopicId: Topic topic2-mirrored-from-dc2 in dc dc2 in database: Root, partition 0(assignId:1) EndOffset 0 readOffset 0 committedOffset 0 2025-06-25T14:55:06.007056Z node 27 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:1413: session cookie 6 consumer shared/user session shared/user_27_6_10941878248046144843_v1 sending to client partition status ===Got response: status: SUCCESS start_partition_session_request { partition_session { partition_session_id: 1 path: "account/topic2-mirrored-from-dc2" } partition_offsets { } } 2025-06-25T14:55:06.013643Z node 28 :PERSQUEUE DEBUG: pq_impl.cpp:2452: [PQ: 72075186224037898] Destroy direct read session shared/user_27_6_10941878248046144843_v1 2025-06-25T14:55:06.013718Z node 28 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037898] server disconnected, pipe [27:7519900640828373499:2618] destroyed 2025-06-25T14:55:06.013751Z node 28 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: shared/user_27_6_10941878248046144843_v1 2025-06-25T14:55:06.013064Z node 27 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 6 consumer shared/user session shared/user_27_6_10941878248046144843_v1 grpc read done: success# 0, data# { } 2025-06-25T14:55:06.013090Z node 27 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 6 consumer shared/user session shared/user_27_6_10941878248046144843_v1 grpc read failed 2025-06-25T14:55:06.013115Z node 27 :PQ_READ_PROXY INFO: read_session_actor.cpp:92: session cookie 6 consumer shared/user session shared/user_27_6_10941878248046144843_v1 grpc closed 2025-06-25T14:55:06.013162Z node 27 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 6 consumer shared/user session shared/user_27_6_10941878248046144843_v1 is DEAD 2025-06-25T14:55:06.013726Z node 27 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037899][topic2-mirrored-from-dc2] pipe [27:7519900636533406200:2615] disconnected; active server actors: 1 2025-06-25T14:55:06.013758Z node 27 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037899][topic2-mirrored-from-dc2] pipe [27:7519900636533406200:2615] client user disconnected session shared/user_27_6_10941878248046144843_v1 2025-06-25T14:55:06.548499Z node 27 :PQ_METACACHE DEBUG: msgbus_server_pq_metacache.cpp:743: Check version rescan 2025-06-25T14:55:06.572827Z node 27 :PQ_METACACHE DEBUG: msgbus_server_pq_metacache.cpp:290: Got config version: 6 2025-06-25T14:55:06.583071Z node 27 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1985: ActorId: [27:7519900640828373555:2631] TxId: 281474976715706. Ctx: { TraceId: 01jyksbqtk2dxvxbj6prx0d572, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=27&id=YzVhMWE3OGUtYThkNDQwN2QtNGJlYTk3ZDUtZTFjM2IyMmE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. UNAVAILABLE: Failed to send EvStartKqpTasksRequest because node is unavailable: 28 2025-06-25T14:55:06.583239Z node 27 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [27:7519900640828373559:2631], TxId: 281474976715706, task: 2. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=27&id=YzVhMWE3OGUtYThkNDQwN2QtNGJlYTk3ZDUtZTFjM2IyMmE=. TraceId : 01jyksbqtk2dxvxbj6prx0d572. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Handle abort execution event from: [27:7519900640828373555:2631], status: UNAVAILABLE, reason: {
: Error: Terminate execution } 2025-06-25T14:55:06.587982Z node 27 :PQ_METACACHE DEBUG: msgbus_server_pq_metacache.cpp:167: HandleClustersUpdate 2025-06-25T14:55:06.588017Z node 27 :PQ_METACACHE DEBUG: msgbus_server_pq_metacache.cpp:169: HandleClustersUpdate LocalCluster !LocalCluster.empty() 2025-06-25T14:55:06.819646Z node 27 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7382: Cannot get console configs 2025-06-25T14:55:06.819676Z node 27 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded >> SchemeReqAdminAccessInTenant::ClusterAdminCanAuthOnNonEmptyTenant-DomainLoginOnly [GOOD] >> SchemeReqAdminAccessInTenant::ClusterAdminCanAuthOnNonEmptyTenant-DomainLoginOnly-StrictAclCheck >> BasicUsage::WriteSessionNoAvailableDatabase [GOOD] >> BasicUsage::WriteSessionSwitchDatabases >> BasicUsage::WriteSessionWriteInHandlers [GOOD] >> KqpRanges::Like [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_dynconfig [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/yft8/0011f3/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk16/testing_out_stuff/test_auditlog.py.test_dynconfig/audit.txt 2025-06-25T14:54:59.668032Z: {"sanitized_token":"**** (B6C6F477)","subject":"root@builtin","new_config":"\n---\nmetadata:\n kind: MainConfig\n cluster: \"\"\n version: 0\nconfig:\n yaml_config_enabled: true\nallowed_labels:\n node_id:\n type: string\n host:\n type: string\n tenant:\n type: string\nselector_config: []\n ","status":"SUCCESS","component":"console","operation":"REPLACE DYNCONFIG","remote_address":"127.0.0.1"} ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/federated_topic/ut/unittest >> BasicUsage::WriteSessionWriteInHandlers [GOOD] Test command err: 2025-06-25T14:54:55.997921Z :WriteSessionWriteInHandlers INFO: Random seed for debugging is 1750863295997889 2025-06-25T14:54:56.413115Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900598556851828:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:54:56.413236Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:54:56.503029Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519900598700385095:2184];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:54:56.507456Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0012bc/r3tmp/tmpizSmm8/pdisk_1.dat 2025-06-25T14:54:56.728731Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-25T14:54:56.728994Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-25T14:54:57.018268Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:54:57.018354Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:54:57.027916Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:54:57.027970Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:54:57.033689Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:54:57.035658Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T14:54:57.037324Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:54:57.080790Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 21279, node 1 2025-06-25T14:54:57.120925Z node 1 :GRPC_SERVER WARN: grpc_request_proxy.cpp:529: SchemeBoardDelete /Root Strong=0 2025-06-25T14:54:57.121425Z node 1 :GRPC_SERVER WARN: grpc_request_proxy.cpp:529: SchemeBoardDelete /Root Strong=0 2025-06-25T14:54:57.312381Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/yft8/0012bc/r3tmp/yandexKegx0s.tmp 2025-06-25T14:54:57.312409Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/yft8/0012bc/r3tmp/yandexKegx0s.tmp 2025-06-25T14:54:57.312561Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/yft8/0012bc/r3tmp/yandexKegx0s.tmp 2025-06-25T14:54:57.312667Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:54:57.467886Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:54:57.511058Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:54:57.600415Z INFO: TTestServer started on Port 9525 GrpcPort 21279 TClient is connected to server localhost:9525 PQClient connected to localhost:21279 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:54:58.067616Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... waiting... waiting... 2025-06-25T14:55:00.376530Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900615736721986:2298], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:00.376670Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:00.379029Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900615736722007:2302], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:00.387603Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710661:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:55:00.544493Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519900615736722009:2303], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710661 completed, doublechecking } 2025-06-25T14:55:00.892556Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519900615736722101:2691] txid# 281474976710662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:55:00.960082Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:00.964956Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7519900615880254455:2276], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:55:00.965265Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=2&id=NDA4OTgwMmEtNDdlZDQ2ODItNjU5YTY0ZC02YjdjM2IyZA==, ActorId: [2:7519900615880254405:2268], ActorState: ExecuteState, TraceId: 01jyksbj0sadv3kpc52fvetpm3, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:55:00.964734Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519900615736722114:2310], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:55:00.966855Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=N2E5ODE3MDgtZjc5M2FkYjQtNGQ4NDVhNDMtYjVkNmNlYTU=, ActorId: [1:7519900615736721977:2295], ActorState: ExecuteState, TraceId: 01jyksbhqvcwnv4k7xfbzqqrk7, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:55:00.969293Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-25T14:55:00.969293Z node 2 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-25T14:55:01.107374Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:01.260198Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost:21279", true, true, 10 ... impl.cpp:383: Answer ok topic: 'rt3.dc1--test-topic' partition: 0 messageNo: 1 requestId: cookie: 1 2025-06-25T14:55:09.170033Z node 2 :PERSQUEUE DEBUG: partition_read.cpp:839: [PQ: 72075186224037892, Partition: 0, State: StateIdle] read cookie 2 Topic 'rt3.dc1--test-topic' partition 0 user user offset 0 count 1 size 1024000 endOffset 1 max time lag 0ms effective offset 0 2025-06-25T14:55:09.170129Z node 1 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037892 (partition=0) Received event: NActors::IEventHandle 2025-06-25T14:55:09.170917Z :DEBUG: [/Root] TraceId [] SessionId [src_id|5b092e98-a969addf-9dc91b7f-aac08d00_0] MessageGroupId [src_id] Write session: OnReadDone gRpcStatusCode: 0 2025-06-25T14:55:09.171327Z :DEBUG: [/Root] TraceId [] SessionId [src_id|5b092e98-a969addf-9dc91b7f-aac08d00_0] MessageGroupId [src_id] Write session got write response: acks { seq_no: 1 written { } } write_statistics { persisting_time { nanos: 10000000 } min_queue_wait_time { nanos: 35000000 } max_queue_wait_time { nanos: 35000000 } partition_quota_wait_time { } topic_quota_wait_time { } } 2025-06-25T14:55:09.170265Z node 2 :PERSQUEUE DEBUG: partition_read.cpp:1043: [PQ: 72075186224037892, Partition: 0, State: StateIdle] read cookie 2 added 1 blobs, size 177 count 1 last offset 0, current partition end offset: 1 2025-06-25T14:55:09.171381Z :DEBUG: [/Root] TraceId [] SessionId [src_id|5b092e98-a969addf-9dc91b7f-aac08d00_0] MessageGroupId [src_id] OnAck: seqNo=1, txId=? 2025-06-25T14:55:09.170297Z node 2 :PERSQUEUE DEBUG: partition_read.cpp:1069: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Reading cookie 2. Send blob request. 2025-06-25T14:55:09.170344Z node 2 :PERSQUEUE DEBUG: cache_eviction.h:492: Got data from cache. Partition 0 offset 0 partno 0 count 1 parts_count 0 source 1 size 177 accessed 0 times before, last time 2025-06-25T14:55:09.000000Z 2025-06-25T14:55:09.171424Z :DEBUG: [/Root] TraceId [] SessionId [src_id|5b092e98-a969addf-9dc91b7f-aac08d00_0] MessageGroupId [src_id] Write session: acknoledged message 1 2025-06-25T14:55:09.170373Z node 2 :PERSQUEUE DEBUG: read.h:121: Reading cookie 2. All 1 blobs are from cache. 2025-06-25T14:55:09.170418Z node 2 :PERSQUEUE DEBUG: partition_read.cpp:551: FormAnswer for 1 blobs 2025-06-25T14:55:09.170516Z node 2 :PERSQUEUE DEBUG: pq_l2_cache.cpp:192: PQ Cache (L2). Touched. Tablet '72075186224037892' partition 0 offset 0 partno 0 count 1 parts 0 suffix '63' 2025-06-25T14:55:09.170562Z node 2 :PERSQUEUE DEBUG: partition_read.cpp:476: FormAnswer processing batch offset 0 totakecount 1 count 1 size 157 from pos 0 cbcount 1 2025-06-25T14:55:09.170625Z node 2 :PERSQUEUE DEBUG: partition_read.cpp:964: Topic 'rt3.dc1--test-topic' partition 0 user user readTimeStamp done, result 1750863309122 queuesize 0 startOffset 0 === Inside AcksHandler 2025-06-25T14:55:09.171705Z :DEBUG: [/Root] TraceId [] SessionId [src_id|5b092e98-a969addf-9dc91b7f-aac08d00_0] MessageGroupId [src_id] Write 1 messages with Id from 2 to 2 === Inside ReadyToAcceptHandler === AcksHandler has written a message, closing the session 2025-06-25T14:55:09.172035Z :DEBUG: [/Root] TraceId [] SessionId [src_id|5b092e98-a969addf-9dc91b7f-aac08d00_0] MessageGroupId [src_id] Write session: try to update token 2025-06-25T14:55:09.172075Z :DEBUG: [/Root] TraceId [] SessionId [src_id|5b092e98-a969addf-9dc91b7f-aac08d00_0] MessageGroupId [src_id] Send 1 message(s) (0 left), first sequence number is 2 2025-06-25T14:55:09.172564Z node 1 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 2 sessionId: src_id|5b092e98-a969addf-9dc91b7f-aac08d00_0 grpc read done: success: 1 data: write_request[data omitted] 2025-06-25T14:55:09.172761Z node 1 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037892 (partition=0) Received event: NKikimr::NPQ::TEvPartitionWriter::TEvWriteRequest 2025-06-25T14:55:09.173095Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:348: Handle TEvRequest topic: 'rt3.dc1--test-topic' requestId: 2025-06-25T14:55:09.173123Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2812: [PQ: 72075186224037892] got client message batch for topic 'rt3.dc1--test-topic' partition 0 2025-06-25T14:55:09.173177Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'rt3.dc1--test-topic' partition: 0 messageNo: 2 requestId: cookie: 2 2025-06-25T14:55:09.173484Z node 1 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037892 (partition=0) Received event: NActors::IEventHandle 2025-06-25T14:55:09.173795Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:348: Handle TEvRequest topic: 'rt3.dc1--test-topic' requestId: 2025-06-25T14:55:09.173810Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2812: [PQ: 72075186224037892] got client message batch for topic 'rt3.dc1--test-topic' partition 0 2025-06-25T14:55:09.173848Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2209: [PQ: 72075186224037892] got client message topic: rt3.dc1--test-topic partition: 0 SourceId: '\0src_id' SeqNo: 2 partNo : 0 messageNo: 3 size 107 offset: -1 2025-06-25T14:55:09.173968Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:1364: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Topic 'rt3.dc1--test-topic' partition 0 part blob processing sourceId '\0src_id' seqNo 2 partNo 0 2025-06-25T14:55:09.174645Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:1468: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Topic 'rt3.dc1--test-topic' partition 0 part blob complete sourceId '\0src_id' seqNo 2 partNo 0 FormedBlobsCount 0 NewHead: Offset 1 PartNo 0 PackedSize 181 count 1 nextOffset 2 batches 1 2025-06-25T14:55:09.174925Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:1762: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Add new write blob: topic 'rt3.dc1--test-topic' partition 0 compactOffset 1,1 HeadOffset 1 endOffset 1 curOffset 2 d0000000000_00000000000000000001_00000_0000000001_00000? size 169 WTime 1750863309174 2025-06-25T14:55:09.175001Z node 2 :PERSQUEUE DEBUG: read.h:272: CacheProxy. Passthrough write request to KV 2025-06-25T14:55:09.175040Z node 2 :PERSQUEUE DEBUG: read.h:310: CacheProxy. Passthrough blob. Partition 0 offset 1 partNo 0 count 1 size 169 2025-06-25T14:55:09.177677Z node 2 :PERSQUEUE DEBUG: cache_eviction.h:319: Caching head blob in L1. Partition 0 offset 1 count 1 size 169 actorID [2:7519900645945026094:2376] 2025-06-25T14:55:09.177749Z node 2 :PERSQUEUE DEBUG: pq_l2_cache.cpp:120: PQ Cache (L2). Adding blob. Tablet '72075186224037892' partition 0 offset 1 partno 0 count 1 parts 0 suffix '63' size 169 2025-06-25T14:55:09.177755Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 114 WriteNewSizeFromSupportivePartitions# 0 2025-06-25T14:55:09.177781Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-25T14:55:09.177803Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Answering for message sourceid: '\0src_id', Topic: 'rt3.dc1--test-topic', Partition: 0, SeqNo: 2, partNo: 0, Offset: 1 is stored on disk 2025-06-25T14:55:09.177815Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'rt3.dc1--test-topic' partition: 0 messageNo: 3 requestId: cookie: 2 2025-06-25T14:55:09.177921Z node 2 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72075186224037892, Partition: 0, State: StateIdle] need more data for compaction. cumulativeSize=346, count=2, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-25T14:55:09.178006Z node 1 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037892 (partition=0) Received event: NActors::IEventHandle 2025-06-25T14:55:09.179075Z :DEBUG: [/Root] TraceId [] SessionId [src_id|5b092e98-a969addf-9dc91b7f-aac08d00_0] MessageGroupId [src_id] Write session: OnReadDone gRpcStatusCode: 0 2025-06-25T14:55:09.179238Z :DEBUG: [/Root] TraceId [] SessionId [src_id|5b092e98-a969addf-9dc91b7f-aac08d00_0] MessageGroupId [src_id] Write session got write response: acks { seq_no: 2 written { offset: 1 } } write_statistics { persisting_time { nanos: 3000000 } min_queue_wait_time { nanos: 1000000 } max_queue_wait_time { nanos: 1000000 } partition_quota_wait_time { } topic_quota_wait_time { } } 2025-06-25T14:55:09.179269Z :DEBUG: [/Root] TraceId [] SessionId [src_id|5b092e98-a969addf-9dc91b7f-aac08d00_0] MessageGroupId [src_id] OnAck: seqNo=2, txId=? 2025-06-25T14:55:09.179298Z :DEBUG: [/Root] TraceId [] SessionId [src_id|5b092e98-a969addf-9dc91b7f-aac08d00_0] MessageGroupId [src_id] Write session: acknoledged message 2 === Inside AcksHandler === Inside SessionClosedHandler 2025-06-25T14:55:09.179607Z :DEBUG: [/Root] TraceId [] SessionId [src_id|5b092e98-a969addf-9dc91b7f-aac08d00_0] MessageGroupId [src_id] Write 1 messages with Id from 3 to 3 === SessionClosedHandler has 'written' a message 2025-06-25T14:55:09.179728Z :INFO: [/Root] TraceId [] SessionId [src_id|5b092e98-a969addf-9dc91b7f-aac08d00_0] MessageGroupId [src_id] Write session: close. Timeout 0.000000s 2025-06-25T14:55:09.179765Z :INFO: [/Root] TraceId [] SessionId [src_id|5b092e98-a969addf-9dc91b7f-aac08d00_0] MessageGroupId [src_id] Write session will now close 2025-06-25T14:55:09.179806Z :DEBUG: [/Root] TraceId [] SessionId [src_id|5b092e98-a969addf-9dc91b7f-aac08d00_0] MessageGroupId [src_id] Write session: aborting 2025-06-25T14:55:09.180206Z :WARNING: [/Root] TraceId [] SessionId [src_id|5b092e98-a969addf-9dc91b7f-aac08d00_0] MessageGroupId [src_id] Write session: could not confirm all writes in time or session aborted, perform hard shutdown 2025-06-25T14:55:09.181153Z :DEBUG: [/Root] TraceId [] SessionId [src_id|5b092e98-a969addf-9dc91b7f-aac08d00_0] MessageGroupId [src_id] Write session: destroy 2025-06-25T14:55:09.181130Z node 1 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 2 sessionId: src_id|5b092e98-a969addf-9dc91b7f-aac08d00_0 grpc read done: success: 0 data: 2025-06-25T14:55:09.181158Z node 1 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 2 sessionId: src_id|5b092e98-a969addf-9dc91b7f-aac08d00_0 grpc read failed 2025-06-25T14:55:09.181186Z node 1 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 2 sessionId: src_id|5b092e98-a969addf-9dc91b7f-aac08d00_0 grpc closed 2025-06-25T14:55:09.181208Z node 1 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 2 sessionId: src_id|5b092e98-a969addf-9dc91b7f-aac08d00_0 is DEAD 2025-06-25T14:55:09.182680Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037892] server disconnected, pipe [1:7519900654391429041:2482] destroyed 2025-06-25T14:55:09.182728Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::DropOwner. 2025-06-25T14:55:09.182202Z node 1 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037892 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-25T14:55:09.628396Z node 1 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976710691, task: 1, CA Id [1:7519900654391429070:2489]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 0 2025-06-25T14:55:09.668404Z node 1 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976710691, task: 1, CA Id [1:7519900654391429070:2489]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 1 2025-06-25T14:55:09.715490Z node 1 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976710691, task: 1, CA Id [1:7519900654391429070:2489]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 1 2025-06-25T14:55:09.773136Z node 1 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976710691, task: 1, CA Id [1:7519900654391429070:2489]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 1 >> THealthCheckTest::NoStoragePools [GOOD] >> THealthCheckTest::NoBscResponse >> TPersQueueTest::LOGBROKER_7820 [GOOD] >> TPersQueueTest::InflightLimit >> THealthCheckTest::ServerlessBadTablets [GOOD] >> THealthCheckTest::ServerlessWhenTroublesWithSharedNodes >> KqpNamedExpressions::NamedExpressionRandomUpsertIndex+UseSink+UseDataQuery [GOOD] >> KqpNamedExpressions::NamedExpressionRandomUpsertReturning+UseSink-UseDataQuery >> KqpProxy::PingNotExistedSession [GOOD] >> ScriptExecutionsTest::AttemptToUpdateDeletedLease >> TargetDiscoverer::Transfer [GOOD] >> BasicUsage::FallbackToSingleDb [GOOD] >> BasicUsage::FallbackToSingleDbAfterBadRequest >> KqpQueryService::Followers ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/opt/unittest >> KqpRanges::Like [GOOD] Test command err: Trying to start YDB, gRPC: 13288, MsgBus: 21711 2025-06-25T14:54:15.462675Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900422910717887:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:54:15.462723Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0009bb/r3tmp/tmpi6AppF/pdisk_1.dat 2025-06-25T14:54:15.848685Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 13288, node 1 2025-06-25T14:54:15.907046Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:54:15.907230Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:54:15.914752Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:54:15.996471Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:54:15.996494Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:54:15.996505Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:54:15.996627Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:21711 2025-06-25T14:54:16.469308Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:21711 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:54:16.671236Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:54:16.687925Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:54:16.696242Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:16.847136Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:54:17.011613Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:54:17.089382Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:54:18.598674Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900435795621387:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:18.598768Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:18.870193Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:18.941869Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:18.984554Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:19.058311Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:19.091636Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:19.127494Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:19.201260Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:19.267713Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900440090589347:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:19.267781Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:19.268026Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900440090589352:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:19.271944Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:54:19.284632Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519900440090589354:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:54:19.343793Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519900440090589405:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:54:20.204393Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:20.388871Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:20.463014Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;even ... :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519900622260322521:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:02.948723Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0009bb/r3tmp/tmpJXrFj3/pdisk_1.dat 2025-06-25T14:55:03.097238Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:55:03.098467Z node 7 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [7:7519900622260322500:2080] 1750863302947020 != 1750863302947023 2025-06-25T14:55:03.115889Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:55:03.115997Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:55:03.121471Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 31267, node 7 2025-06-25T14:55:03.199742Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:55:03.199773Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:55:03.199784Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:55:03.199947Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26764 TClient is connected to server localhost:26764 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:55:03.838956Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:55:03.849751Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:55:03.857504Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:03.918097Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:04.047299Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:55:04.093760Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:04.174619Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:07.390198Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519900643735160621:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:07.390292Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:07.438348Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:07.477401Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:07.557857Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:07.597685Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:07.640669Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:07.714129Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:07.787359Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:07.878897Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519900643735161292:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:07.878998Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:07.879021Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519900643735161297:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:07.883230Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:55:07.894397Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7519900643735161299:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:55:07.949150Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[7:7519900622260322521:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:07.949232Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:55:07.976116Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7519900643735161350:3423] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:55:09.368494Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) >> KqpService::Shutdown >> KqpQueryService::SessionFromPoolSuccess >> KqpQueryService::TableSink_ReplaceFromSelectLargeOlap >> KqpQueryService::StreamExecuteQueryPure >> KqpQueryServiceScripts::TestPaging >> TModifyUserTest::ModifyLdapUser [GOOD] >> TModifyUserTest::ModifyUserIsEnabled >> TableCreation::SimpleTableCreation [GOOD] >> TableCreation::SimpleUpdateTable ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/controller/ut_target_discoverer/unittest >> TargetDiscoverer::Transfer [GOOD] Test command err: 2025-06-25T14:55:09.233708Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900653161428610:2232];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:09.234199Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000c81/r3tmp/tmpBv1P2W/pdisk_1.dat 2025-06-25T14:55:09.697776Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:55:09.697862Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:55:09.714832Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:55:09.717339Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TClient is connected to server localhost:7646 TServer::EnableGrpc on GrpcPort 30997, node 1 2025-06-25T14:55:10.049409Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:55:10.049435Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:55:10.049443Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:55:10.049574Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:55:10.233828Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:7646 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:55:10.598884Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-25T14:55:10.924661Z node 1 :REPLICATION_CONTROLLER TRACE: target_discoverer.cpp:27: [TargetDiscoverer][rid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribePathResponse { Result: { name: Topic, owner: root@builtin, type: Topic, size_bytes: 0, created_at: { plan_step: 1750863310791, tx_id: 281474976710658 } } } 2025-06-25T14:55:10.924710Z node 1 :REPLICATION_CONTROLLER DEBUG: target_discoverer.cpp:42: [TargetDiscoverer][rid 1] Describe path succeeded: path# /Root/Topic 2025-06-25T14:55:10.947090Z node 1 :PQ_READ_PROXY ERROR: grpc_pq_schema.cpp:148: new Describe topic request 2025-06-25T14:55:10.984292Z node 1 :REPLICATION_CONTROLLER TRACE: target_discoverer.cpp:166: [TargetDiscoverer][rid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribeTopicResponse { Result: { status: SUCCESS, issues: } } 2025-06-25T14:55:10.984342Z node 1 :REPLICATION_CONTROLLER DEBUG: target_discoverer.cpp:181: [TargetDiscoverer][rid 1] Describe topic succeeded: path# /Root/Topic 2025-06-25T14:55:10.984401Z node 1 :REPLICATION_CONTROLLER INFO: target_discoverer.cpp:191: [TargetDiscoverer][rid 1] Add target: srcPath# /Root/Topic, dstPath# /Root/Replicated/Table, kind# Transfer >> KqpQueryService::TableSink_OltpLiteralUpsert >> THealthCheckTest::GreenStatusWhenCreatingGroup [GOOD] >> THealthCheckTest::DontIgnoreServerlessWithExclusiveNodesWhenNotSpecific |88.2%| [TA] $(B)/ydb/core/tx/replication/controller/ut_target_discoverer/test-results/unittest/{meta.json ... results_accumulator.log} |88.2%| [TA] {RESULT} $(B)/ydb/core/tx/replication/controller/ut_target_discoverer/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpQueryService::ExecuteQueryPg >> THealthCheckTest::StorageLimit80 [GOOD] >> THealthCheckTest::StorageNoQuota >> KqpQueryService::ExecuteQueryWithWorkloadManager >> KqpNewEngine::StaleRO_IndexFollowers+EnableFollowers [GOOD] >> KqpNewEngine::SqlInFromCompact >> THealthCheckTest::ServerlessWhenTroublesWithSharedNodes [GOOD] >> THealthCheckTest::ServerlessWithExclusiveNodesWhenTroublesWithSharedNodes >> TModifyUserTest::ModifyUserIsEnabled [GOOD] >> KqpNamedExpressions::NamedExpressionRandom-UseSink [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_base_tenant/unittest >> TModifyUserTest::ModifyUserIsEnabled [GOOD] Test command err: 2025-06-25T14:55:06.668625Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900641934111447:2226];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:06.669171Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0016a4/r3tmp/tmpx5cDdn/pdisk_1.dat 2025-06-25T14:55:07.086703Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:55:07.091280Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519900641934111254:2080] 1750863306659493 != 1750863306659496 2025-06-25T14:55:07.125902Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:55:07.126026Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:55:07.130112Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:18449 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-25T14:55:07.365371Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7519900641934111489:2105] Handle TEvNavigate describe path dc-1 2025-06-25T14:55:07.387726Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7519900646229079059:2258] HANDLE EvNavigateScheme dc-1 2025-06-25T14:55:07.388751Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7519900641934111536:2128], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:55:07.388811Z node 1 :TX_PROXY_SCHEME_CACHE TRACE: cache.cpp:2321: Create subscriber: self# [1:7519900641934111536:2128], path# /dc-1, domainOwnerId# 72057594046644480 2025-06-25T14:55:07.388971Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:1003: [main][1:7519900646229079060:2259][/dc-1] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-25T14:55:07.390734Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519900641934111223:2049] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7519900646229079064:2259] 2025-06-25T14:55:07.390746Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519900641934111226:2052] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7519900646229079065:2259] 2025-06-25T14:55:07.390793Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519900641934111226:2052] Subscribe: subscriber# [1:7519900646229079065:2259], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-25T14:55:07.390793Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519900641934111223:2049] Subscribe: subscriber# [1:7519900646229079064:2259], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-25T14:55:07.390839Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7519900641934111229:2055] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7519900646229079066:2259] 2025-06-25T14:55:07.390856Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7519900641934111229:2055] Subscribe: subscriber# [1:7519900646229079066:2259], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-25T14:55:07.390889Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519900646229079065:2259][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519900641934111226:2052] 2025-06-25T14:55:07.390916Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519900646229079064:2259][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519900641934111223:2049] 2025-06-25T14:55:07.390937Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519900641934111226:2052] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7519900646229079065:2259] 2025-06-25T14:55:07.390949Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7519900646229079066:2259][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519900641934111229:2055] 2025-06-25T14:55:07.390955Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519900641934111223:2049] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7519900646229079064:2259] 2025-06-25T14:55:07.390965Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7519900641934111229:2055] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7519900646229079066:2259] 2025-06-25T14:55:07.390987Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519900646229079060:2259][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519900646229079062:2259] 2025-06-25T14:55:07.391024Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519900646229079060:2259][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519900646229079061:2259] 2025-06-25T14:55:07.391083Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:852: [main][1:7519900646229079060:2259][/dc-1] Set up state: owner# [1:7519900641934111536:2128], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-25T14:55:07.392098Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:817: [main][1:7519900646229079060:2259][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7519900646229079063:2259] 2025-06-25T14:55:07.392176Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:870: [main][1:7519900646229079060:2259][/dc-1] Path was already updated: owner# [1:7519900641934111536:2128], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-25T14:55:07.392224Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519900646229079064:2259][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519900646229079061:2259], cookie# 1 2025-06-25T14:55:07.392262Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519900646229079065:2259][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519900646229079062:2259], cookie# 1 2025-06-25T14:55:07.392278Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7519900646229079066:2259][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519900646229079063:2259], cookie# 1 2025-06-25T14:55:07.392601Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519900641934111223:2049] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519900646229079064:2259], cookie# 1 2025-06-25T14:55:07.392677Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519900641934111226:2052] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519900646229079065:2259], cookie# 1 2025-06-25T14:55:07.392729Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7519900641934111229:2055] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7519900646229079066:2259], cookie# 1 2025-06-25T14:55:07.392766Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519900646229079064:2259][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519900641934111223:2049], cookie# 1 2025-06-25T14:55:07.392780Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519900646229079065:2259][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519900641934111226:2052], cookie# 1 2025-06-25T14:55:07.392792Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7519900646229079066:2259][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519900641934111229:2055], cookie# 1 2025-06-25T14:55:07.392861Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519900646229079060:2259][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519900646229079061:2259], cookie# 1 2025-06-25T14:55:07.392887Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519900646229079060:2259][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2025-06-25T14:55:07.392903Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519900646229079060:2259][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519900646229079062:2259], cookie# 1 2025-06-25T14:55:07.392913Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][1:7519900646229079060:2259][/dc-1] Sync is in progress: cookie# 1, ring group# 0, size# 3, half# 1, successes# 2, failures# 0 2025-06-25T14:55:07.392929Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][1:7519900646229079060:2259][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7519900646229079063:2259], cookie# 1 2025-06-25T14:55:07.392956Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:984: [main][1:7519900646229079060:2259][/dc-1] Sync is done in the ring group: cookie# 1, ring group# 0, size# 3, half# 1, successes# 3, failures# 0, partial# 0 2025-06-25T14:55:07.443548Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [1:7519900641934111536:2128], notify# NKikimr::TSchemeBoardEvents::TEvNotifyUpdate { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DescribeSchemeResult: Status: StatusSuccess Path: "/dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 Shards ... 0672739512614:2119], cacheItem# { Subscriber: { Subscriber: [3:7519900672739512863:2260] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 9 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 1750863313409 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 9 IsSync: true Partial: 0 } 2025-06-25T14:55:13.582488Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7519900672739512947:2328], recipient# [3:7519900672739512946:2327], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [72057594046644480:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [{ Sid: user2 },{ Sid: user1 }] Groups: [] } }] } 2025-06-25T14:55:13.582517Z node 3 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [3:7519900672739512946:2327] txid# 281474976715662 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-25T14:55:13.582555Z node 3 :TX_PROXY ERROR: schemereq.cpp:1096: Actor# [3:7519900672739512946:2327] txid# 281474976715662, Access denied for user2 on path /dc-1, with access AlterSchema 2025-06-25T14:55:13.582622Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519900672739512946:2327] txid# 281474976715662, issues: { message: "Access denied for user2 on path /dc-1" issue_code: 200000 severity: 1 } 2025-06-25T14:55:13.582641Z node 3 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [3:7519900672739512946:2327] txid# 281474976715662 SEND to# [3:7519900672739512945:2326] Source {TEvProposeTransactionStatus Status# 5} 2025-06-25T14:55:13.584167Z node 3 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [3:7519900672739512591:2106] Handle TEvProposeTransaction 2025-06-25T14:55:13.584184Z node 3 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [3:7519900672739512591:2106] TxId# 281474976715663 ProcessProposeTransaction 2025-06-25T14:55:13.584211Z node 3 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [3:7519900672739512591:2106] Cookie# 0 userReqId# "" txid# 281474976715663 SEND to# [3:7519900672739512949:2330] 2025-06-25T14:55:13.586315Z node 3 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [3:7519900672739512949:2330] txid# 281474976715663 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/dc-1" OperationType: ESchemeOpAlterLogin AlterLogin { ModifyUser { User: "user2" Password: "password" CanLogin: false } } } } UserToken: "\n\005user2\022\030\022\026\n\024all-users@well-known\032\322\003eyJhbGciOiJQUzI1NiIsImtpZCI6IjEifQ.eyJhdWQiOlsiXC9kYy0xIl0sImV4cCI6MTc1MDkwNjUxMywiaWF0IjoxNzUwODYzMzEzLCJzdWIiOiJ1c2VyMiJ9.UdC6k1rGn88aM-rJDu5WSn6m6e_T5XQ2190cU46HvIPZvIeozaVQC8hoeEyyvs6hBl3PWqHtr0PfuC7uVEo0D-nlb7912eOkFUAfmlciwlj8KtBhj3QFuI-3L_dGRx0gyCfumtNcOjPqRcGYeQdQvJmg4zXxfEhmdGPvEn44c7O9zAVGG0ucnJKsOoeggssh4aIgfaTDntuGNYSoAQeLVyxHDs1_sc_fI-WFio4AODR7BnLMdaTcTG-FF8TlAzWdPOWdaZEkvWcKUsQgVyLs75HG7-f3R1CsB62mF2e6_CTTGz0OZCtiTQzwGJ6b8l2rtQPiUzmDJCoI_KQaJRKbjA\"\005Login*~eyJhbGciOiJQUzI1NiIsImtpZCI6IjEifQ.eyJhdWQiOlsiXC9kYy0xIl0sImV4cCI6MTc1MDkwNjUxMywiaWF0IjoxNzUwODYzMzEzLCJzdWIiOiJ1c2VyMiJ9.**" PeerName: "" 2025-06-25T14:55:13.586358Z node 3 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [3:7519900672739512949:2330] txid# 281474976715663 Bootstrap, UserSID: user2 CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-25T14:55:13.586372Z node 3 :TX_PROXY DEBUG: schemereq.cpp:585: Actor# [3:7519900672739512949:2330] txid# 281474976715663 Bootstrap, UserSID: user2 IsClusterAdministrator: 1 2025-06-25T14:55:13.586408Z node 3 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [3:7519900672739512949:2330] txid# 281474976715663 TEvNavigateKeySet requested from SchemeCache 2025-06-25T14:55:13.586467Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2747: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7519900672739512614:2119], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-25T14:55:13.586536Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:887: [main][3:7519900672739512863:2260][/dc-1] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvSyncRequest: sender# [3:7519900672739512614:2119], cookie# 10 2025-06-25T14:55:13.586586Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][3:7519900672739512867:2260][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [3:7519900672739512864:2260], cookie# 10 2025-06-25T14:55:13.586603Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][3:7519900672739512868:2260][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [3:7519900672739512865:2260], cookie# 10 2025-06-25T14:55:13.586617Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][3:7519900672739512869:2260][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [3:7519900672739512866:2260], cookie# 10 2025-06-25T14:55:13.586639Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [3:7519900668444545028:2049] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [3:7519900672739512867:2260], cookie# 10 2025-06-25T14:55:13.586661Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [3:7519900668444545031:2052] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [3:7519900672739512868:2260], cookie# 10 2025-06-25T14:55:13.586674Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [3:7519900668444545034:2055] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [3:7519900672739512869:2260], cookie# 10 2025-06-25T14:55:13.586697Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][3:7519900672739512867:2260][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 8 Partial: 0 }: sender# [3:7519900668444545028:2049], cookie# 10 2025-06-25T14:55:13.586710Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][3:7519900672739512868:2260][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 8 Partial: 0 }: sender# [3:7519900668444545031:2052], cookie# 10 2025-06-25T14:55:13.586723Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][3:7519900672739512869:2260][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 8 Partial: 0 }: sender# [3:7519900668444545034:2055], cookie# 10 2025-06-25T14:55:13.586749Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][3:7519900672739512863:2260][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 8 Partial: 0 }: sender# [3:7519900672739512864:2260], cookie# 10 2025-06-25T14:55:13.586766Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][3:7519900672739512863:2260][/dc-1] Sync is in progress: cookie# 10, ring group# 0, size# 3, half# 1, successes# 1, failures# 0 2025-06-25T14:55:13.586781Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][3:7519900672739512863:2260][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 8 Partial: 0 }: sender# [3:7519900672739512865:2260], cookie# 10 2025-06-25T14:55:13.586791Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:971: [main][3:7519900672739512863:2260][/dc-1] Sync is in progress: cookie# 10, ring group# 0, size# 3, half# 1, successes# 2, failures# 0 2025-06-25T14:55:13.586806Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:913: [main][3:7519900672739512863:2260][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 8 Partial: 0 }: sender# [3:7519900672739512866:2260], cookie# 10 2025-06-25T14:55:13.586826Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:984: [main][3:7519900672739512863:2260][/dc-1] Sync is done in the ring group: cookie# 10, ring group# 0, size# 3, half# 1, successes# 3, failures# 0, partial# 0 2025-06-25T14:55:13.586866Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2588: HandleNotify: self# [3:7519900672739512614:2119], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1 PathId: Partial: 0 } 2025-06-25T14:55:13.586928Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2463: ResolveCacheItem: self# [3:7519900672739512614:2119], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1 PathId: Partial: 0 }, by path# { Subscriber: { Subscriber: [3:7519900672739512863:2260] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 10 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 1750863313409 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-25T14:55:13.586991Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7519900672739512614:2119], cacheItem# { Subscriber: { Subscriber: [3:7519900672739512863:2260] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 10 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 1750863313409 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 10 IsSync: true Partial: 0 } 2025-06-25T14:55:13.587117Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7519900672739512950:2331], recipient# [3:7519900672739512949:2330], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [72057594046644480:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [{ Sid: user2 },{ Sid: user1 }] Groups: [] } }] } 2025-06-25T14:55:13.587145Z node 3 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [3:7519900672739512949:2330] txid# 281474976715663 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-25T14:55:13.587183Z node 3 :TX_PROXY ERROR: schemereq.cpp:1096: Actor# [3:7519900672739512949:2330] txid# 281474976715663, Access denied for user2 on path /dc-1, with access AlterSchema 2025-06-25T14:55:13.587264Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519900672739512949:2330] txid# 281474976715663, issues: { message: "Access denied for user2 on path /dc-1" issue_code: 200000 severity: 1 } 2025-06-25T14:55:13.587287Z node 3 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [3:7519900672739512949:2330] txid# 281474976715663 SEND to# [3:7519900672739512948:2329] Source {TEvProposeTransactionStatus Status# 5} >> TableCreation::SimpleUpdateTable [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/opt/unittest >> KqpNamedExpressions::NamedExpressionRandom-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 19465, MsgBus: 22342 2025-06-25T14:53:36.814336Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900256730183724:2167];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:53:36.814737Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0009e4/r3tmp/tmpyOQEOe/pdisk_1.dat 2025-06-25T14:53:37.175166Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 19465, node 1 2025-06-25T14:53:37.221539Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:53:37.221649Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:53:37.230509Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:53:37.387911Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:53:37.387930Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:53:37.387937Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:53:37.388047Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:22342 2025-06-25T14:53:37.816438Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:22342 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:53:37.993563Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:53:38.042586Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:38.191518Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:53:38.348241Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:53:38.422014Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:40.029294Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900273910054432:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:40.029404Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:40.412140Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:40.459887Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:40.493509Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:40.526342Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:40.560683Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:40.629037Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:40.702751Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:53:40.764005Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900273910055098:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:40.764079Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:40.764288Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900273910055103:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:53:40.767440Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:53:40.777264Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519900273910055105:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:53:40.867242Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519900273910055158:3425] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:53:41.817303Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519900256730183724:2167];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:53:41.821798Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 12692, MsgBus: 15064 2025-06-25T14:53:43.611564Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519900284852071925:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:53:43.611649Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0009e4/r3tmp/tmpBXEFAB/pdisk_1.dat 2025-06-25T14:53:43.753764Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:53 ... 39b-4fc1-a2dc-bd75deb888da"]] Trying to start YDB, gRPC: 17646, MsgBus: 3772 2025-06-25T14:55:06.703830Z node 12 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[12:7519900641487141620:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:06.703897Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0009e4/r3tmp/tmpGnwry4/pdisk_1.dat 2025-06-25T14:55:06.859025Z node 12 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:55:06.859631Z node 12 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [12:7519900641487141598:2080] 1750863306702957 != 1750863306702960 2025-06-25T14:55:06.877874Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:55:06.878000Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:55:06.879522Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 17646, node 12 2025-06-25T14:55:06.917016Z node 12 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:55:06.917049Z node 12 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:55:06.917064Z node 12 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:55:06.917241Z node 12 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3772 TClient is connected to server localhost:3772 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:55:07.522784Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:55:07.530756Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:55:07.539245Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:07.650970Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:07.798190Z node 12 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:55:07.919625Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:07.999404Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:11.661246Z node 12 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [12:7519900662961979711:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:11.661388Z node 12 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:11.704439Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[12:7519900641487141620:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:11.704519Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:55:11.710512Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:11.751744Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:11.790989Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:11.828595Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:11.910344Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:11.950921Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:12.027528Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:12.114966Z node 12 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [12:7519900667256947670:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:12.115092Z node 12 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:12.115492Z node 12 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [12:7519900667256947675:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:12.126694Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:55:12.147179Z node 12 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [12:7519900667256947677:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:55:12.204952Z node 12 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [12:7519900667256947728:3424] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } [[["6e7f7518-2dd6-4ed3-8389-9848c634dac2"]];[["a4184e8f-08d2-4f1a-b661-77315f6b9292"]]] [[["6e7f7518-2dd6-4ed3-8389-9848c634dac2"]];[["a4184e8f-08d2-4f1a-b661-77315f6b9292"]]] [["26c5e4cf-9284-4888-8b5e-c88936834ca2"];["cce01128-b1b3-47d4-9353-611e7302b299"]] >> KqpSqlIn::PhasesCount [GOOD] |88.2%| [TA] $(B)/ydb/core/tx/tx_proxy/ut_base_tenant/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpQueryService::TableSink_OltpLiteralUpsert [GOOD] >> KqpQueryService::TableSink_OltpInsert |88.2%| [TA] {RESULT} $(B)/ydb/core/tx/tx_proxy/ut_base_tenant/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpQueryService::StreamExecuteQueryPure [GOOD] >> KqpQueryService::StreamExecuteQueryMultiResult >> BasicUsage::GetAllStartPartitionSessions [GOOD] >> BasicUsage::PreferredDatabaseNoFallback >> THealthCheckTest::DontIgnoreServerlessWithExclusiveNodesWhenNotSpecific [GOOD] >> THealthCheckTest::CLusterNotBootstrapped >> ScriptExecutionsTest::AttemptToUpdateDeletedLease [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/proxy_service/ut/unittest >> TableCreation::SimpleUpdateTable [GOOD] Test command err: 2025-06-25T14:55:07.989643Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900646148769299:2133];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:07.989973Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000c7e/r3tmp/tmp7pOO43/pdisk_1.dat 2025-06-25T14:55:08.409199Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:55:08.438567Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:55:08.438669Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:55:08.447118Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:8978 TServer::EnableGrpc on GrpcPort 6790, node 1 2025-06-25T14:55:08.700981Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:55:08.700998Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:55:08.701005Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:55:08.701119Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-25T14:55:08.993025Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:55:09.122036Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:55:10.731099Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1692: Updated YQL logs priority to current level: 4 2025-06-25T14:55:10.733249Z node 1 :KQP_PROXY INFO: kqp_proxy_service.cpp:454: Cannot start publishing usage, tenants: /dc-1, empty 2025-06-25T14:55:10.745679Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:524: Subscribed for config changes. 2025-06-25T14:55:10.745741Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:531: Updated table service config. 2025-06-25T14:55:10.745780Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1692: Updated YQL logs priority to current level: 4 2025-06-25T14:55:10.745870Z node 1 :KQP_PROXY INFO: kqp_proxy_service.cpp:454: Cannot start publishing usage, tenants: /dc-1, empty 2025-06-25T14:55:10.745984Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-25T14:55:10.746017Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-25T14:55:10.752138Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:147: Table script_executions updater. Describe result: PathErrorUnknown 2025-06-25T14:55:10.752138Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:147: Table result_sets updater. Describe result: PathErrorUnknown 2025-06-25T14:55:10.752149Z node 1 :KQP_PROXY NOTICE: table_creator.cpp:167: Table script_executions updater. Creating table 2025-06-25T14:55:10.752161Z node 1 :KQP_PROXY NOTICE: table_creator.cpp:167: Table result_sets updater. Creating table 2025-06-25T14:55:10.752205Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:100: Table result_sets updater. Full table path:/dc-1/.metadata/result_sets 2025-06-25T14:55:10.752333Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:147: Table script_execution_leases updater. Describe result: PathErrorUnknown 2025-06-25T14:55:10.752339Z node 1 :KQP_PROXY NOTICE: table_creator.cpp:167: Table script_execution_leases updater. Creating table 2025-06-25T14:55:10.752354Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:100: Table script_execution_leases updater. Full table path:/dc-1/.metadata/script_execution_leases 2025-06-25T14:55:10.754575Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:100: Table script_executions updater. Full table path:/dc-1/.metadata/script_executions 2025-06-25T14:55:10.754987Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-25T14:55:10.755030Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-25T14:55:10.759066Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:10.760704Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:10.762622Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:10.768291Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:190: Table script_execution_leases updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976710659 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 PathId: 4 } 2025-06-25T14:55:10.768356Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:190: Table result_sets updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976710658 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 PathId: 3 } 2025-06-25T14:55:10.768394Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:261: Table result_sets updater. Subscribe on create table tx: 281474976710658 2025-06-25T14:55:10.769266Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:261: Table script_execution_leases updater. Subscribe on create table tx: 281474976710659 2025-06-25T14:55:10.769713Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:190: Table script_executions updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976710660 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 PathId: 5 } 2025-06-25T14:55:10.769725Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:261: Table script_executions updater. Subscribe on create table tx: 281474976710660 2025-06-25T14:55:10.921668Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:290: Table result_sets updater. Request: create. Transaction completed: 281474976710658. Doublechecking... 2025-06-25T14:55:10.986479Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:290: Table script_execution_leases updater. Request: create. Transaction completed: 281474976710659. Doublechecking... 2025-06-25T14:55:10.986626Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:290: Table script_executions updater. Request: create. Transaction completed: 281474976710660. Doublechecking... 2025-06-25T14:55:11.001693Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:362: Table result_sets updater. Column diff is empty, finishing 2025-06-25T14:55:11.037563Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:362: Table script_execution_leases updater. Column diff is empty, finishing 2025-06-25T14:55:11.080125Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:362: Table script_executions updater. Column diff is empty, finishing 2025-06-25T14:55:11.082354Z node 1 :KQP_PROXY DEBUG: query_actor.cpp:134: [TQueryBase] [TCreateScriptOperationQuery] TraceId: c657f411-5c5082bb-928a0bb5-d0da29c8, Bootstrap. Database: /dc-1 2025-06-25T14:55:11.109392Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1489: Request has 18444993210398.442260s seconds to be completed 2025-06-25T14:55:11.121030Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1564: Created new session, sessionId: ydb://session/3?node_id=1&id=YjVkYzBmYjktZDMxYzgzZTQtNTI4OTQ2YjctZGQwNjIzMmU=, workerId: [1:7519900663328639258:2294], database: /dc-1, longSession: 1, local sessions count: 1 2025-06-25T14:55:11.121205Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:663: Received create session request, trace_id: 2025-06-25T14:55:11.124215Z node 1 :KQP_PROXY DEBUG: query_actor.cpp:197: [TQueryBase] [TCreateScriptOperationQuery] TraceId: c657f411-5c5082bb-928a0bb5-d0da29c8, RunDataQuery: -- TCreateScriptOperationQuery::OnRunQuery DECLARE $database AS Text; DECLARE $execution_id AS Text; DECLARE $run_script_actor_id AS Text; DECLARE $execution_status AS Int32; DECLARE $execution_mode AS Int32; DECLARE $query_text AS Text; DECLARE $syntax AS Int32; DECLARE $meta AS JsonDocument; DECLARE $lease_duration AS Interval; DECLARE $execution_meta_ttl AS Interval; UPSERT INTO `.metadata/script_executions` (database, execution_id, run_script_actor_id, execution_status, execution_mode, start_ts, query_text, syntax, meta, expire_at) VALUES ($database, $execution_id, $run_script_actor_id, $execution_status, $execution_mode, CurrentUtcTimestamp(), $query_text, $syntax, $meta, CurrentUtcTimestamp() + $execution_meta_ttl); UPSERT INTO `.metadata/script_execution_leases` (database, execution_id, lease_deadline, lease_generation, expire_at) VALUES ($database, $execution_id, CurrentUtcTimestamp() + $lease_duration, 1, CurrentUtcTimestamp() + $execution_meta_ttl); 2025-06-25T14:55:11.127563Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:788: Ctx: { TraceId: , Database: /dc-1, DatabaseId: , SessionId: ydb://session/3?node_id=1&id=YjVkYzBmYjktZDMxYzgzZTQtNTI4OTQ2YjctZGQwNjIzMmU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQue ... : col4, col5. Existing columns: col1, col2, col3 2025-06-25T14:55:16.859422Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:100: Table test_table updater. Full table path:/dc-1/.test/test_table 2025-06-25T14:55:16.860488Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:171) 2025-06-25T14:55:16.861158Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:190: Table test_table updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976715667 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 } 2025-06-25T14:55:16.861192Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:261: Table test_table updater. Subscribe on create table tx: 281474976715667 2025-06-25T14:55:16.874474Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:290: Table test_table updater. Request: alter. Transaction completed: 281474976715667. Doublechecking... 2025-06-25T14:55:16.903832Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:974: Forwarded response to sender actor, requestId: 10, sender: [2:7519900685967818577:2331], selfId: [2:7519900673082915691:2065], source: [2:7519900685967818574:2330] 2025-06-25T14:55:16.903996Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:240: [TQueryBase] [TSaveScriptExecutionResultMetaQuery] TraceId: 8c6bf721-151a2fe1-efa7aa28-decded8d, TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=NTIxOTk2M2QtOTc0MjNmNWUtZDZjMzliMTItNjlkOGJmYjE=, TxId: 2025-06-25T14:55:16.904026Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:367: [TQueryBase] [TSaveScriptExecutionResultMetaQuery] TraceId: 8c6bf721-151a2fe1-efa7aa28-decded8d, Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=NTIxOTk2M2QtOTc0MjNmNWUtZDZjMzliMTItNjlkOGJmYjE=, TxId: 2025-06-25T14:55:16.904160Z node 2 :KQP_PROXY DEBUG: kqp_script_executions.cpp:1911: [ScriptExecutions] [TSaveScriptExecutionResultActor] ExecutionId: 8c6bf721-151a2fe1-efa7aa28-decded8d, start saving rows range [0; 1) 2025-06-25T14:55:16.904286Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:134: [TQueryBase] [TSaveScriptExecutionResultQuery] TraceId: 8c6bf721-151a2fe1-efa7aa28-decded8d, Bootstrap. Database: /dc-1 2025-06-25T14:55:16.904353Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1374: Session closed, sessionId: ydb://session/3?node_id=2&id=NTIxOTk2M2QtOTc0MjNmNWUtZDZjMzliMTItNjlkOGJmYjE=, workerId: [2:7519900685967818574:2330], local sessions count: 2 2025-06-25T14:55:16.904420Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1489: Request has 18444993210392.647206s seconds to be completed 2025-06-25T14:55:16.905979Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1564: Created new session, sessionId: ydb://session/3?node_id=2&id=NjFmODJiOWQtMjQyYzRhMDYtMzU3M2EwMzgtODA0N2RiZDM=, workerId: [2:7519900685967818727:2344], database: /dc-1, longSession: 1, local sessions count: 3 2025-06-25T14:55:16.906111Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:663: Received create session request, trace_id: 2025-06-25T14:55:16.906530Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:197: [TQueryBase] [TSaveScriptExecutionResultQuery] TraceId: 8c6bf721-151a2fe1-efa7aa28-decded8d, RunDataQuery: -- TSaveScriptExecutionResultQuery::OnRunQuery DECLARE $database AS Text; DECLARE $execution_id AS Text; DECLARE $result_set_id AS Int32; DECLARE $expire_at AS Optional; DECLARE $items AS List>; UPSERT INTO `.metadata/result_sets` SELECT $database as database, $execution_id as execution_id, $result_set_id as result_set_id, T.row_id as row_id, $expire_at as expire_at, T.result_set as result_set, T.accumulated_size as accumulated_size FROM AS_TABLE($items) AS T; 2025-06-25T14:55:16.907000Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:788: Ctx: { TraceId: , Database: /dc-1, DatabaseId: , SessionId: ydb://session/3?node_id=2&id=NjFmODJiOWQtMjQyYzRhMDYtMzU3M2EwMzgtODA0N2RiZDM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 300.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 12, targetId: [2:7519900685967818727:2344] 2025-06-25T14:55:16.907041Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1315: Scheduled timeout timer for requestId: 12 timeout: 300.000000s actor id: [2:7519900685967818729:2634] 2025-06-25T14:55:16.950739Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table updater. Column diff is empty, finishing 2025-06-25T14:55:17.003123Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1374: Session closed, sessionId: ydb://session/3?node_id=2&id=NTUyZTljNC1iMzAzMGEwNi1hMDIxODAzNC00YWNjNzg2Mg==, workerId: [2:7519900685967818565:2328], local sessions count: 2 2025-06-25T14:55:17.072919Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:974: Forwarded response to sender actor, requestId: 12, sender: [2:7519900685967818728:2345], selfId: [2:7519900673082915691:2065], source: [2:7519900685967818727:2344] 2025-06-25T14:55:17.073261Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:240: [TQueryBase] [TSaveScriptExecutionResultQuery] TraceId: 8c6bf721-151a2fe1-efa7aa28-decded8d, TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=NjFmODJiOWQtMjQyYzRhMDYtMzU3M2EwMzgtODA0N2RiZDM=, TxId: 2025-06-25T14:55:17.073284Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:367: [TQueryBase] [TSaveScriptExecutionResultQuery] TraceId: 8c6bf721-151a2fe1-efa7aa28-decded8d, Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=NjFmODJiOWQtMjQyYzRhMDYtMzU3M2EwMzgtODA0N2RiZDM=, TxId: 2025-06-25T14:55:17.073394Z node 2 :KQP_PROXY DEBUG: kqp_script_executions.cpp:1943: [ScriptExecutions] [TSaveScriptExecutionResultActor] ExecutionId: 8c6bf721-151a2fe1-efa7aa28-decded8d, result part successfully saved 2025-06-25T14:55:17.073408Z node 2 :KQP_PROXY DEBUG: kqp_script_executions.cpp:1950: [ScriptExecutions] [TSaveScriptExecutionResultActor] ExecutionId: 8c6bf721-151a2fe1-efa7aa28-decded8d, reply SUCCESS, issues: 2025-06-25T14:55:17.073647Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1374: Session closed, sessionId: ydb://session/3?node_id=2&id=NjFmODJiOWQtMjQyYzRhMDYtMzU3M2EwMzgtODA0N2RiZDM=, workerId: [2:7519900685967818727:2344], local sessions count: 1 2025-06-25T14:55:17.073726Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:134: [TQueryBase] [TSaveScriptExecutionResultMetaQuery] TraceId: 8c6bf721-151a2fe1-efa7aa28-decded8d, Bootstrap. Database: /dc-1 2025-06-25T14:55:17.073823Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1489: Request has 18444993210392.477807s seconds to be completed 2025-06-25T14:55:17.075692Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1564: Created new session, sessionId: ydb://session/3?node_id=2&id=NGUxMjE3ZS0yZDI0NzJkZC00MWIzZDhlZS1kMTJjYjRjNA==, workerId: [2:7519900690262786056:2353], database: /dc-1, longSession: 1, local sessions count: 2 2025-06-25T14:55:17.075840Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:663: Received create session request, trace_id: 2025-06-25T14:55:17.076045Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:197: [TQueryBase] [TSaveScriptExecutionResultMetaQuery] TraceId: 8c6bf721-151a2fe1-efa7aa28-decded8d, RunDataQuery: -- TSaveScriptExecutionResultMetaQuery::OnRunQuery DECLARE $database AS Text; DECLARE $execution_id AS Text; DECLARE $result_set_metas AS JsonDocument; UPDATE `.metadata/script_executions` SET result_set_metas = $result_set_metas WHERE database = $database AND execution_id = $execution_id; 2025-06-25T14:55:17.076346Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:788: Ctx: { TraceId: , Database: /dc-1, DatabaseId: , SessionId: ydb://session/3?node_id=2&id=NGUxMjE3ZS0yZDI0NzJkZC00MWIzZDhlZS1kMTJjYjRjNA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 300.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 14, targetId: [2:7519900690262786056:2353] 2025-06-25T14:55:17.076378Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1315: Scheduled timeout timer for requestId: 14 timeout: 300.000000s actor id: [2:7519900690262786058:2649] 2025-06-25T14:55:17.083963Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:974: Forwarded response to sender actor, requestId: 14, sender: [2:7519900690262786057:2354], selfId: [2:7519900673082915691:2065], source: [2:7519900690262786056:2353] 2025-06-25T14:55:17.084375Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:240: [TQueryBase] [TSaveScriptExecutionResultMetaQuery] TraceId: 8c6bf721-151a2fe1-efa7aa28-decded8d, TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=NGUxMjE3ZS0yZDI0NzJkZC00MWIzZDhlZS1kMTJjYjRjNA==, TxId: 2025-06-25T14:55:17.084398Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:367: [TQueryBase] [TSaveScriptExecutionResultMetaQuery] TraceId: 8c6bf721-151a2fe1-efa7aa28-decded8d, Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=NGUxMjE3ZS0yZDI0NzJkZC00MWIzZDhlZS1kMTJjYjRjNA==, TxId: 2025-06-25T14:55:17.084719Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1374: Session closed, sessionId: ydb://session/3?node_id=2&id=NGUxMjE3ZS0yZDI0NzJkZC00MWIzZDhlZS1kMTJjYjRjNA==, workerId: [2:7519900690262786056:2353], local sessions count: 1 2025-06-25T14:55:17.084756Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:134: [TQueryBase] [TSaveScriptFinalStatusActor] TraceId: 8c6bf721-151a2fe1-efa7aa28-decded8d, Bootstrap. Database: /dc-1 2025-06-25T14:55:17.084851Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1489: Request has 18444993210392.466776s seconds to be completed 2025-06-25T14:55:17.086694Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1564: Created new session, sessionId: ydb://session/3?node_id=2&id=OTNmNWRhZGUtZDRlYzNhOTItZjg5ODQxNC0xMTMzMWRmNA==, workerId: [2:7519900690262786080:2362], database: /dc-1, longSession: 1, local sessions count: 2 2025-06-25T14:55:17.086831Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:663: Received create session request, trace_id: 2025-06-25T14:55:17.087018Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:197: [TQueryBase] [TSaveScriptFinalStatusActor] TraceId: 8c6bf721-151a2fe1-efa7aa28-decded8d, RunDataQuery: -- TSaveScriptFinalStatusActor::OnRunQuery DECLARE $database AS Text; DECLARE $execution_id AS Text; SELECT operation_status, finalization_status, meta, customer_supplied_id, user_token, script_sinks, script_secret_names FROM `.metadata/script_executions` WHERE database = $database AND execution_id = $execution_id AND (expire_at > CurrentUtcTimestamp() OR expire_at IS NULL); SELECT lease_generation FROM `.metadata/script_execution_leases` WHERE database = $database AND execution_id = $execution_id AND (expire_at > CurrentUtcTimestamp() OR expire_at IS NULL); 2025-06-25T14:55:17.087335Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:788: Ctx: { TraceId: , Database: /dc-1, DatabaseId: , SessionId: ydb://session/3?node_id=2&id=OTNmNWRhZGUtZDRlYzNhOTItZjg5ODQxNC0xMTMzMWRmNA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 300.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 16, targetId: [2:7519900690262786080:2362] 2025-06-25T14:55:17.087372Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1315: Scheduled timeout timer for requestId: 16 timeout: 300.000000s actor id: [2:7519900690262786082:2654] >> KqpQueryServiceScripts::ValidateScript ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/opt/unittest >> KqpSqlIn::PhasesCount [GOOD] Test command err: Trying to start YDB, gRPC: 18172, MsgBus: 26613 2025-06-25T14:54:15.083613Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900421075003995:2134];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:54:15.084209Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0009c0/r3tmp/tmpfgmbN5/pdisk_1.dat 2025-06-25T14:54:15.540864Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:54:15.540967Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:54:15.545695Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:54:15.569535Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:54:15.570520Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519900421075003899:2080] 1750863255064850 != 1750863255064853 TServer::EnableGrpc on GrpcPort 18172, node 1 2025-06-25T14:54:15.628659Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:54:15.628676Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:54:15.628682Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:54:15.628795Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26613 TClient is connected to server localhost:26613 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-25T14:54:16.123437Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:54:16.278133Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:54:16.300807Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:54:16.319289Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:54:16.465941Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:54:16.634717Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:54:16.699181Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:54:18.526703Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900433959907427:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:18.526786Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:18.848329Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:18.934286Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:18.972227Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:19.001603Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:19.032520Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:19.117397Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:19.167043Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:19.259076Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900438254875394:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:19.259146Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:19.259345Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900438254875399:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:19.263390Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:54:19.279670Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710669, at schemeshard: 72057594046644480 2025-06-25T14:54:19.279871Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519900438254875401:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:54:19.348457Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519900438254875452:3424] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:54:20.078434Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519900421075003995:2134];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:54:20.078484Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:54:20.294922Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is u ... me/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:04.475591Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) Trying to start YDB, gRPC: 3477, MsgBus: 24346 2025-06-25T14:55:09.522788Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519900651921455557:2245];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:09.523634Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0009c0/r3tmp/tmprZ1FF1/pdisk_1.dat 2025-06-25T14:55:09.657516Z node 7 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [7:7519900651921455312:2080] 1750863309502100 != 1750863309502103 2025-06-25T14:55:09.667304Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:55:09.670196Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:55:09.670295Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:55:09.674302Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 3477, node 7 2025-06-25T14:55:09.729108Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:55:09.729134Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:55:09.729146Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:55:09.729305Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24346 TClient is connected to server localhost:24346 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:55:10.377048Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:55:10.396885Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:10.473653Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:10.662276Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:55:10.725608Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:10.819556Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:13.940477Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519900669101326126:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:13.940589Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:14.023511Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:14.085145Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:14.145411Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:14.195306Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:14.243070Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:14.319727Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:14.380278Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:14.499483Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519900673396294089:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:14.499594Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:14.499938Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519900673396294094:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:14.505722Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:55:14.524629Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[7:7519900651921455557:2245];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:14.525235Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:55:14.526844Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7519900673396294096:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:55:14.586568Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7519900673396294148:3426] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } >> KqpQueryService::SessionFromPoolSuccess [GOOD] >> KqpQueryService::SeveralCTAS+UseSink >> KqpQueryService::SessionFromPoolError >> KqpQueryService::ExecuteQueryPg [GOOD] >> KqpQueryService::ExecuteQueryPgTableSelect ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/proxy_service/ut/unittest >> ScriptExecutionsTest::AttemptToUpdateDeletedLease [GOOD] Test command err: 2025-06-25T14:55:08.155482Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900650097005322:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:08.155619Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000c7a/r3tmp/tmp84OHeE/pdisk_1.dat 2025-06-25T14:55:08.500704Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 20800, node 1 2025-06-25T14:55:08.555779Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:55:08.555855Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:55:08.572121Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:55:08.697405Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:55:08.697420Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:55:08.697426Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:55:08.697508Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13576 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-25T14:55:09.182803Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:55:09.215830Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:55:10.787195Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1692: Updated YQL logs priority to current level: 4 2025-06-25T14:55:10.790535Z node 1 :KQP_PROXY INFO: kqp_proxy_service.cpp:454: Cannot start publishing usage, tenants: /Root, empty 2025-06-25T14:55:10.798913Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:902: Received ping session request, request_id: 2, sender: [1:7519900654391973530:2278], trace_id: 01jyksbtgw4k64t1cmhca6w75g 2025-06-25T14:55:10.801447Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1315: Scheduled timeout timer for requestId: 2 timeout: 5.000000s actor id: [0:0:0] 2025-06-25T14:55:10.801694Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:524: Subscribed for config changes. 2025-06-25T14:55:10.801725Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:531: Updated table service config. 2025-06-25T14:55:10.801747Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1692: Updated YQL logs priority to current level: 4 2025-06-25T14:55:10.801810Z node 1 :KQP_PROXY INFO: kqp_proxy_service.cpp:454: Cannot start publishing usage, tenants: /Root, empty 2025-06-25T14:55:10.801915Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-25T14:55:10.801948Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-25T14:55:10.803356Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-25T14:55:10.803377Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-25T14:55:10.803732Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:573: Session not found, targetId: [2:8678280833929343339:121] requestId: 2 2025-06-25T14:55:10.810340Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:974: TraceId: "01jyksbtgw4k64t1cmhca6w75g", Forwarded response to sender actor, requestId: 2, sender: [1:7519900654391973530:2278], selfId: [1:7519900650097005518:2241], source: [1:7519900650097005518:2241] 2025-06-25T14:55:12.081700Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519900666616930916:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:12.081764Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000c7a/r3tmp/tmpOXHvjo/pdisk_1.dat 2025-06-25T14:55:12.196256Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:55:12.197669Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7519900666616930897:2080] 1750863312081275 != 1750863312081278 2025-06-25T14:55:12.235493Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:55:12.235566Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:55:12.237044Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:8816 TServer::EnableGrpc on GrpcPort 4092, node 4 2025-06-25T14:55:12.409526Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:55:12.409543Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:55:12.409550Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:55:12.409668Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:55:12.477339Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:55:12.486121Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:55:13.092838Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:55:14.701895Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1692: Updated YQL logs priority to current level: 4 2025-06-25T14:55:14.703172Z node 4 :KQP_PROXY INFO: kqp_proxy_service.cpp:454: Cannot start publishing usage, tenants: /dc-1, empty 2025-06-25T14:55:14.711146Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:524: Subscribed for config changes. 2025-06-25T14:55:14.711198Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-25T14:55:14.711260Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-25T14:55:14.711352Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:531: Updated table service config. 2025-06-25T14:55:14.711392Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1692: Updated YQL logs priority to current level: 4 2025-06-25T14:55:14.712647Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-25T14:55:14.714461Z node 4 :KQP_PROXY DEBUG: table_creator.cpp:147: Table script_executions updater. Describe result: PathErrorUnknown 2025-06-25T14:55:14.714492Z node 4 :KQP_PROXY DEBUG: table_creator.cpp:147: Table script_execution_leases updater. Describe result: PathErrorUnknown 2025-06-25T14:55:14.714494Z node 4 :KQP_PROXY NOTICE: table_creator.cpp:167: Table script_executions updater. Creating table 2025-06-25T14:55:14.714504Z node 4 :KQP_PROXY NOTICE: table_creator.cpp:167: Table script_execution_leases updater. Creating table 2025-06-25T14:55:14.714537Z node 4 :KQP_PROXY DEBUG: table_creator.cpp:100: Table script_execution_leases updater. Full table path:/dc-1/.metadata/script_execution_leases 2025-06-25T14:55:14.714538Z node 4 :KQP_PROXY DEBUG: table_creator.cpp:100: Table script_executions updater. Full table path:/dc-1/.metadata/script_executions 2025-06-25T14:55:14.714633Z node 4 :KQP_PROXY DEBUG: table_creator.cpp:147: Table result_sets updater. Describe result: PathErrorUnknown 2025-06-25T14:55:14.714645Z node 4 :KQP_PROXY NOTICE: table_creator.cpp:167: Table result_sets updater. Creating table 2025-06-25T14:55:14.714660Z node 4 :KQP_PROXY DEBUG: table_creator.cpp:100: Table result_sets updater. Full table path:/dc-1/.metadata/result_sets 2025-06-25T14:55:14.718990Z node 4 :FLAT_TX_SCHEM ... database AND execution_id = $execution_id AND (expire_at > CurrentUtcTimestamp() OR expire_at IS NULL); 2025-06-25T14:55:17.801066Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:788: Ctx: { TraceId: , Database: /dc-1, DatabaseId: , SessionId: ydb://session/3?node_id=4&id=NDZhODI1MzAtODNlNmE5MzYtZDEyOGJmODEtMjM2OWM0OTE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 300.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 24, targetId: [4:7519900688091768676:2418] 2025-06-25T14:55:17.801099Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1315: Scheduled timeout timer for requestId: 24 timeout: 300.000000s actor id: [4:7519900688091768678:2638] 2025-06-25T14:55:17.807204Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:974: Forwarded response to sender actor, requestId: 24, sender: [4:7519900688091768677:2419], selfId: [4:7519900666616931023:2155], source: [4:7519900688091768676:2418] 2025-06-25T14:55:17.807408Z node 4 :KQP_PROXY DEBUG: query_actor.cpp:240: [TQueryBase] [TSaveScriptFinalStatusActor] TraceId: 837a6c89-f32c78b7-5efe225a-66c60f17, State: Get operation info, TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=4&id=NDZhODI1MzAtODNlNmE5MzYtZDEyOGJmODEtMjM2OWM0OTE=, TxId: 01jyksc2s9bfcmtyadcnv7nv2n 2025-06-25T14:55:17.807846Z node 4 :KQP_PROXY DEBUG: query_actor.cpp:197: [TQueryBase] [TSaveScriptFinalStatusActor] TraceId: 837a6c89-f32c78b7-5efe225a-66c60f17, State: Get operation info, RunDataQuery: -- TSaveScriptFinalStatusActor::FinishScriptExecution DECLARE $database AS Text; DECLARE $execution_id AS Text; DECLARE $operation_status AS Int32; DECLARE $execution_status AS Int32; DECLARE $finalization_status AS Int32; DECLARE $issues AS JsonDocument; DECLARE $plan AS JsonDocument; DECLARE $stats AS JsonDocument; DECLARE $ast AS Optional; DECLARE $ast_compressed AS Optional; DECLARE $ast_compression_method AS Optional; DECLARE $operation_ttl AS Interval; DECLARE $customer_supplied_id AS Text; DECLARE $user_token AS Text; DECLARE $script_sinks AS Optional; DECLARE $script_secret_names AS Optional; DECLARE $applicate_script_external_effect_required AS Bool; UPDATE `.metadata/script_executions` SET operation_status = $operation_status, execution_status = $execution_status, finalization_status = IF($applicate_script_external_effect_required, $finalization_status, NULL), issues = $issues, plan = $plan, end_ts = CurrentUtcTimestamp(), stats = $stats, ast = $ast, ast_compressed = $ast_compressed, ast_compression_method = $ast_compression_method, expire_at = IF($operation_ttl > CAST(0 AS Interval), CurrentUtcTimestamp() + $operation_ttl, NULL), customer_supplied_id = IF($applicate_script_external_effect_required, $customer_supplied_id, NULL), user_token = IF($applicate_script_external_effect_required, $user_token, NULL), script_sinks = IF($applicate_script_external_effect_required, $script_sinks, NULL), script_secret_names = IF($applicate_script_external_effect_required, $script_secret_names, NULL) WHERE database = $database AND execution_id = $execution_id; DELETE FROM `.metadata/script_execution_leases` WHERE database = $database AND execution_id = $execution_id; 2025-06-25T14:55:17.808204Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:788: Ctx: { TraceId: , Database: /dc-1, DatabaseId: , SessionId: ydb://session/3?node_id=4&id=NDZhODI1MzAtODNlNmE5MzYtZDEyOGJmODEtMjM2OWM0OTE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 300.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 25, targetId: [4:7519900688091768676:2418] 2025-06-25T14:55:17.808235Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1315: Scheduled timeout timer for requestId: 25 timeout: 300.000000s actor id: [4:7519900688091768701:2645] 2025-06-25T14:55:17.817290Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:974: Forwarded response to sender actor, requestId: 25, sender: [4:7519900688091768700:2425], selfId: [4:7519900666616931023:2155], source: [4:7519900688091768676:2418] 2025-06-25T14:55:17.817594Z node 4 :KQP_PROXY DEBUG: query_actor.cpp:240: [TQueryBase] [TSaveScriptFinalStatusActor] TraceId: 837a6c89-f32c78b7-5efe225a-66c60f17, State: Update final status, TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=4&id=NDZhODI1MzAtODNlNmE5MzYtZDEyOGJmODEtMjM2OWM0OTE=, TxId: 2025-06-25T14:55:17.817662Z node 4 :KQP_PROXY DEBUG: query_actor.cpp:367: [TQueryBase] [TSaveScriptFinalStatusActor] TraceId: 837a6c89-f32c78b7-5efe225a-66c60f17, State: Update final status, Finish with SUCCESS, SessionId: ydb://session/3?node_id=4&id=NDZhODI1MzAtODNlNmE5MzYtZDEyOGJmODEtMjM2OWM0OTE=, TxId: 2025-06-25T14:55:17.817708Z node 4 :KQP_PROXY DEBUG: kqp_script_executions.cpp:2658: [ScriptExecutions] Finish script execution operation. ExecutionId: 837a6c89-f32c78b7-5efe225a-66c60f17. UNAVAILABLE. Issues: {
: Error: Lease expired } 2025-06-25T14:55:17.817933Z node 4 :KQP_PROXY DEBUG: kqp_script_executions.cpp:633: [ScriptExecutions] [TCheckLeaseStatusActor] ExecutionId: 837a6c89-f32c78b7-5efe225a-66c60f17, successfully finalized script execution operation 2025-06-25T14:55:17.817956Z node 4 :KQP_PROXY DEBUG: kqp_script_executions.cpp:838: [ScriptExecutions] [TCheckLeaseStatusActor] ExecutionId: 837a6c89-f32c78b7-5efe225a-66c60f17, reply success 2025-06-25T14:55:17.818093Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1374: Session closed, sessionId: ydb://session/3?node_id=4&id=NDZhODI1MzAtODNlNmE5MzYtZDEyOGJmODEtMjM2OWM0OTE=, workerId: [4:7519900688091768676:2418], local sessions count: 1 2025-06-25T14:55:17.833980Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:788: Ctx: { TraceId: 01jyksc2t9d5xkgke0ctf2bfpm, Database: /dc-1, DatabaseId: , SessionId: ydb://session/3?node_id=4&id=YzNkOGZmOWMtMjE5NzFlY2YtZTk3YjYxOGItZDZkYjA1YWY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 300.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 26, targetId: [4:7519900679501833814:2333] 2025-06-25T14:55:17.834035Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1315: Scheduled timeout timer for requestId: 26 timeout: 300.000000s actor id: [4:7519900688091768726:2652] 2025-06-25T14:55:17.935315Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-25T14:55:18.277443Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:974: TraceId: "01jyksc2t9d5xkgke0ctf2bfpm", Forwarded response to sender actor, requestId: 26, sender: [4:7519900688091768725:2430], selfId: [4:7519900666616931023:2155], source: [4:7519900679501833814:2333] 2025-06-25T14:55:18.280966Z node 4 :KQP_PROXY DEBUG: query_actor.cpp:134: [TQueryBase] [TScriptLeaseUpdater] TraceId: 837a6c89-f32c78b7-5efe225a-66c60f17, Bootstrap. Database: /dc-1 2025-06-25T14:55:18.282380Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1489: Request has 18444993210391.269251s seconds to be completed 2025-06-25T14:55:18.283940Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1564: Created new session, sessionId: ydb://session/3?node_id=4&id=YjM0NzI1YTktMmU3OWQ4OWItZDY2MzhjZjMtY2MwY2M4NWY=, workerId: [4:7519900692386736067:2443], database: /dc-1, longSession: 1, local sessions count: 2 2025-06-25T14:55:18.284067Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:663: Received create session request, trace_id: 2025-06-25T14:55:18.284449Z node 4 :KQP_PROXY DEBUG: query_actor.cpp:197: [TQueryBase] [TScriptLeaseUpdater] TraceId: 837a6c89-f32c78b7-5efe225a-66c60f17, RunDataQuery: -- TScriptLeaseUpdater::OnRunQuery DECLARE $database AS Text; DECLARE $execution_id AS Text; SELECT lease_deadline FROM `.metadata/script_execution_leases` WHERE database = $database AND execution_id = $execution_id AND (expire_at > CurrentUtcTimestamp() OR expire_at IS NULL); 2025-06-25T14:55:18.284723Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:788: Ctx: { TraceId: , Database: /dc-1, DatabaseId: , SessionId: ydb://session/3?node_id=4&id=YjM0NzI1YTktMmU3OWQ4OWItZDY2MzhjZjMtY2MwY2M4NWY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 300.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 28, targetId: [4:7519900692386736067:2443] 2025-06-25T14:55:18.284750Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1315: Scheduled timeout timer for requestId: 28 timeout: 300.000000s actor id: [4:7519900692386736069:2672] 2025-06-25T14:55:18.495517Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:974: Forwarded response to sender actor, requestId: 28, sender: [4:7519900692386736068:2444], selfId: [4:7519900666616931023:2155], source: [4:7519900692386736067:2443] 2025-06-25T14:55:18.495735Z node 4 :KQP_PROXY DEBUG: query_actor.cpp:240: [TQueryBase] [TScriptLeaseUpdater] TraceId: 837a6c89-f32c78b7-5efe225a-66c60f17, State: Get lease info, TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=4&id=YjM0NzI1YTktMmU3OWQ4OWItZDY2MzhjZjMtY2MwY2M4NWY=, TxId: 01jyksc3esf29rqetysqjyne9y 2025-06-25T14:55:18.495833Z node 4 :KQP_PROXY WARN: query_actor.cpp:372: [TQueryBase] [TScriptLeaseUpdater] TraceId: 837a6c89-f32c78b7-5efe225a-66c60f17, State: Get lease info, Finish with BAD_REQUEST, Issues: {
: Error: No such execution }, SessionId: ydb://session/3?node_id=4&id=YjM0NzI1YTktMmU3OWQ4OWItZDY2MzhjZjMtY2MwY2M4NWY=, TxId: 01jyksc3esf29rqetysqjyne9y 2025-06-25T14:55:18.495875Z node 4 :KQP_PROXY DEBUG: query_actor.cpp:428: [TQueryBase] [TScriptLeaseUpdater] TraceId: 837a6c89-f32c78b7-5efe225a-66c60f17, State: Get lease info, Rollback transaction: 01jyksc3esf29rqetysqjyne9y 2025-06-25T14:55:18.496078Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:788: Ctx: { TraceId: , Database: /dc-1, DatabaseId: , SessionId: ydb://session/3?node_id=4&id=YjM0NzI1YTktMmU3OWQ4OWItZDY2MzhjZjMtY2MwY2M4NWY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 600.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 29, targetId: [4:7519900692386736067:2443] 2025-06-25T14:55:18.496111Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1315: Scheduled timeout timer for requestId: 29 timeout: 600.000000s actor id: [4:7519900692386736095:2683] 2025-06-25T14:55:18.502347Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:974: Forwarded response to sender actor, requestId: 29, sender: [4:7519900692386736094:2452], selfId: [4:7519900666616931023:2155], source: [4:7519900692386736067:2443] 2025-06-25T14:55:18.502655Z node 4 :KQP_PROXY DEBUG: query_actor.cpp:437: [TQueryBase] [TScriptLeaseUpdater] TraceId: 837a6c89-f32c78b7-5efe225a-66c60f17, State: Get lease info, RollbackTransactionResult: SUCCESS. Issues: 2025-06-25T14:55:18.503441Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1374: Session closed, sessionId: ydb://session/3?node_id=4&id=YjM0NzI1YTktMmU3OWQ4OWItZDY2MzhjZjMtY2MwY2M4NWY=, workerId: [4:7519900692386736067:2443], local sessions count: 1 2025-06-25T14:55:18.510117Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1374: Session closed, sessionId: ydb://session/3?node_id=4&id=YzNkOGZmOWMtMjE5NzFlY2YtZTk3YjYxOGItZDZkYjA1YWY=, workerId: [4:7519900679501833814:2333], local sessions count: 0 >> KqpService::Shutdown [GOOD] >> KqpService::SessionBusyRetryOperationSync >> KqpQueryService::ExecuteQueryWithWorkloadManager [GOOD] >> KqpQueryService::ExecuteQueryWithResourcePoolClassifier |88.2%| [TA] $(B)/ydb/core/kqp/proxy_service/ut/test-results/unittest/{meta.json ... results_accumulator.log} |88.2%| [TA] {RESULT} $(B)/ydb/core/kqp/proxy_service/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpQueryServiceScripts::ExecuteScriptStatsBasic >> KqpService::SessionBusy >> SchemeReqAdminAccessInTenant::ClusterAdminCanAuthOnNonEmptyTenant-DomainLoginOnly-StrictAclCheck [GOOD] >> KqpNamedExpressions::NamedExpressionRandomDataQuery+UseSink [GOOD] >> KqpNamedExpressions::NamedExpressionRandomDataQuery-UseSink >> THealthCheckTest::ServerlessWithExclusiveNodesWhenTroublesWithSharedNodes [GOOD] >> KqpQueryService::PeriodicTaskInSessionPool >> KqpQueryServiceScripts::TestPaging [GOOD] >> KqpQueryServiceScripts::TestFetchMoreThanLimit >> KqpQueryService::Followers [GOOD] >> KqpQueryService::FlowControllOnHugeRealTable-LongRow >> THealthCheckTest::StorageNoQuota [GOOD] >> THealthCheckTest::TestBootingTabletIsNotDead >> KqpQueryService::TableSink_OltpInsert [GOOD] >> KqpQueryService::TableSink_OltpInteractive ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_proxy/ut_schemereq/unittest >> SchemeReqAdminAccessInTenant::ClusterAdminCanAuthOnNonEmptyTenant-DomainLoginOnly-StrictAclCheck [GOOD] Test command err: Starting YDB, grpc: 5252, msgbus: 15482 2025-06-25T14:51:47.689220Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899787717260784:2212];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:51:47.689294Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00096f/r3tmp/tmpt0wG6V/pdisk_1.dat 2025-06-25T14:51:48.143470Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:51:48.165448Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:51:48.165532Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:51:48.178465Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 5252, node 1 2025-06-25T14:51:48.334096Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:51:48.334117Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:51:48.334127Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:51:48.334256Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:15482 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-25T14:51:48.693277Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:51:48.700222Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7519899787717260838:2118] Handle TEvNavigate describe path dc-1 2025-06-25T14:51:48.714804Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7519899792012228652:2444] HANDLE EvNavigateScheme dc-1 2025-06-25T14:51:48.715517Z node 1 :TX_PROXY DEBUG: describe.cpp:356: Actor# [1:7519899792012228652:2444] HANDLE EvNavigateKeySetResult TDescribeReq marker# P5 ErrorCount# 0 2025-06-25T14:51:48.746012Z node 1 :TX_PROXY DEBUG: describe.cpp:435: Actor# [1:7519899792012228652:2444] SEND to# 72057594046644480 shardToRequest NKikimrSchemeOp.TDescribePath Path: "dc-1" Options { ReturnBoundaries: true ShowPrivateTable: true ReturnRangeKey: true } 2025-06-25T14:51:48.760877Z node 1 :TX_PROXY DEBUG: describe.cpp:448: Actor# [1:7519899792012228652:2444] Handle TEvDescribeSchemeResult Forward to# [1:7519899792012228651:2443] Cookie: 0 TEvDescribeSchemeResult: NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 2 Record# Status: StatusSuccess Path: "dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/dc-1" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046644480 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:51:48.796463Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7519899787717260838:2118] Handle TEvProposeTransaction 2025-06-25T14:51:48.796494Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:7519899787717260838:2118] TxId# 281474976715657 ProcessProposeTransaction 2025-06-25T14:51:48.796619Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:7519899787717260838:2118] Cookie# 0 userReqId# "" txid# 281474976715657 SEND to# [1:7519899792012228658:2449] 2025-06-25T14:51:48.894247Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:7519899792012228658:2449] txid# 281474976715657 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "dc-1" StoragePools { Name: "" Kind: "tenant-db" } StoragePools { Name: "/dc-1:test" Kind: "test" } } } } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" PeerName: "" 2025-06-25T14:51:48.894328Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:7519899792012228658:2449] txid# 281474976715657 Bootstrap, UserSID: root@builtin CheckAdministrator: 1 CheckDatabaseAdministrator: 0 2025-06-25T14:51:48.894344Z node 1 :TX_PROXY DEBUG: schemereq.cpp:585: Actor# [1:7519899792012228658:2449] txid# 281474976715657 Bootstrap, UserSID: root@builtin IsClusterAdministrator: 1 2025-06-25T14:51:48.894421Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:7519899792012228658:2449] txid# 281474976715657 TEvNavigateKeySet requested from SchemeCache 2025-06-25T14:51:48.894770Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:7519899792012228658:2449] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-25T14:51:48.894927Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:7519899792012228658:2449] HANDLE EvNavigateKeySetResult, txid# 281474976715657 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# false 2025-06-25T14:51:48.894990Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:7519899792012228658:2449] txid# 281474976715657 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715657 TabletId# 72057594046644480} 2025-06-25T14:51:48.895142Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:7519899792012228658:2449] txid# 281474976715657 HANDLE EvClientConnected 2025-06-25T14:51:48.897665Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:51:48.902155Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [1:7519899792012228658:2449] txid# 281474976715657 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715657} 2025-06-25T14:51:48.902219Z node 1 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [1:7519899792012228658:2449] txid# 281474976715657 SEND to# [1:7519899792012228657:2448] Source {TEvProposeTransactionStatus txid# 281474976715657 Status# 53} waiting... 2025-06-25T14:51:48.927863Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7519899787717260838:2118] Handle TEvProposeTransaction 2025-06-25T14:51:48.927886Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:7519899787717260838:2118] TxId# 281474976715658 ProcessProposeTransaction 2025-06-25T14:51:48.927918Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:7519899787717260838:2118] Cookie# 0 userReqId# "" txid# 281474976715658 SEND to# [1:7519899792012228697:2484] 2025-06-25T14:51:48.929841Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:7519899792012228697:2484] txid# 281474976715658 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/" OperationType: ESchemeOpModifyACL ModifyACL { Name: "dc-1" DiffACL: "\n\032\010\000\022\026\010\001\020\377\377\003\032\014root@builtin \003" } } } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" PeerName: "" 2025-06-25T14:51:48.929880Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:7519899792012228697:2484] txid# 281474976715658 Bootstrap, UserSID: root@builtin CheckAdministrator: 1 CheckDatabaseAdministrator: 0 2025-06-25T14:51:48.929894Z node 1 :TX_PROXY DEBUG: schemereq.cpp:585: Actor# [1:7519899792012228697:2484] txid# 281474976715658 Bootstrap, UserSID: root@builtin IsClusterAdministrator: 1 2025-06-25T14:51:48.929994Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:7519899792012228697:2484] txid# 281474976715658 TEvNavigateKeySet requested from SchemeCache 2025-06-25T14:51:48.930215Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:7519899792012228697:2484] txid# 281474976715658 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-25T14:51:48.930303Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:7519899792012228697:2484] HANDLE EvNavigateKeySetResult, txid# 281474976715658 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-25T14:51:48.930346Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:7519899792012228697:2484] txid# 281474976715658 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715658 TabletId# 72057594046644480} 2025-06-25T14:51:48.930430Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:7519899792012228697:2484] txid# 281474976715658 HANDLE EvClientConnected 2025-06-25T14:51:48.930822Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715 ... ath: /dc-1/tenant-db, operationId: 281474976710665:0, at schemeshard: 72075186224037891 2025-06-25T14:55:18.580552Z node 60 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5302: ExamineTreeVFS visit path id [OwnerId: 72075186224037891, LocalPathId: 1] name: dc-1/tenant-db type: EPathTypeSubDomain state: EPathStateNoChanges stepDropped: 0 droppedTxId: 0 parent: [OwnerId: 72075186224037891, LocalPathId: 1] 2025-06-25T14:55:18.580583Z node 60 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5318: ExamineTreeVFS run path id: [OwnerId: 72075186224037891, LocalPathId: 1] 2025-06-25T14:55:18.580880Z node 60 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 281474976710665:1, propose status:StatusSuccess, reason: , at schemeshard: 72075186224037891 2025-06-25T14:55:18.580948Z node 60 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710665:0, at schemeshard: 72075186224037891, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:55:18.581082Z node 60 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976710665:0 progress is 1/1 2025-06-25T14:55:18.581108Z node 60 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976710665 ready parts: 1/1 2025-06-25T14:55:18.581152Z node 60 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976710665:0 progress is 1/1 2025-06-25T14:55:18.581173Z node 60 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976710665 ready parts: 1/1 2025-06-25T14:55:18.581229Z node 60 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72075186224037891, LocalPathId: 1] was 4 2025-06-25T14:55:18.581309Z node 60 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 281474976710665, ready parts: 1/1, is published: false 2025-06-25T14:55:18.581343Z node 60 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72075186224037891, LocalPathId: 1], at schemeshard: 72075186224037891 2025-06-25T14:55:18.581368Z node 60 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976710665 ready parts: 1/1 2025-06-25T14:55:18.581391Z node 60 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976710665:0 2025-06-25T14:55:18.581416Z node 60 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 281474976710665, publications: 1, subscribers: 0 2025-06-25T14:55:18.581439Z node 60 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 281474976710665, [OwnerId: 72075186224037891, LocalPathId: 1], 9 2025-06-25T14:55:18.590054Z node 60 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 281474976710665, response: Status: StatusSuccess TxId: 281474976710665 SchemeshardId: 72075186224037891, at schemeshard: 72075186224037891 2025-06-25T14:55:18.590572Z node 60 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976710665, subject: root@builtin, status: StatusSuccess, operation: MODIFY ACL, path: /dc-1/tenant-db, add access: +(DS):clusteradmin, remove access: -():clusteradmin:- 2025-06-25T14:55:18.590937Z node 60 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72075186224037891 2025-06-25T14:55:18.590960Z node 60 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72075186224037891, txId: 281474976710665, path id: [OwnerId: 72075186224037891, LocalPathId: 1] 2025-06-25T14:55:18.591294Z node 60 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72075186224037891, txId: 281474976710665, path id: [OwnerId: 72075186224037891, LocalPathId: 1] 2025-06-25T14:55:18.591417Z node 60 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72075186224037891 2025-06-25T14:55:18.591444Z node 60 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [60:7519900668053403471:2268], at schemeshard: 72075186224037891, txId: 281474976710665, path id: 1 2025-06-25T14:55:18.591473Z node 60 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [60:7519900668053403471:2268], at schemeshard: 72075186224037891, txId: 281474976710665, path id: 1 2025-06-25T14:55:18.590932Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [59:7519900692838988491:2871] txid# 281474976710665 Status StatusSuccess HANDLE {TEvModifySchemeTransactionResult Status# StatusSuccess txid# 281474976710665} 2025-06-25T14:55:18.591073Z node 59 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [59:7519900692838988491:2871] txid# 281474976710665 SEND to# [59:7519900692838988490:2320] Source {TEvProposeTransactionStatus txid# 281474976710665 Status# 48} 2025-06-25T14:55:18.593725Z node 60 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72075186224037891, msg: Owner: 72075186224037891 Generation: 1 LocalPathId: 1 Version: 9 PathOwnerId: 72075186224037891, cookie: 281474976710665 2025-06-25T14:55:18.593919Z node 60 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72075186224037891, msg: Owner: 72075186224037891 Generation: 1 LocalPathId: 1 Version: 9 PathOwnerId: 72075186224037891, cookie: 281474976710665 2025-06-25T14:55:18.593953Z node 60 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72075186224037891, txId: 281474976710665 2025-06-25T14:55:18.593993Z node 60 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72075186224037891, txId: 281474976710665, pathId: [OwnerId: 72075186224037891, LocalPathId: 1], version: 9 2025-06-25T14:55:18.594035Z node 60 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72075186224037891, LocalPathId: 1] was 5 2025-06-25T14:55:18.594169Z node 60 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72075186224037891, txId: 281474976710665, subscribers: 0 TEST clusteradmin triggers auth on tenant 2025-06-25T14:55:18.598786Z node 60 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72075186224037891, cookie: 281474976710665 TClient is connected to server localhost:19904 TClient::Ls request: /dc-1/tenant-db 2025-06-25T14:55:19.083262Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [59:7519900658479248842:2115] Handle TEvNavigate describe path /dc-1/tenant-db 2025-06-25T14:55:19.132560Z node 59 :TX_PROXY DEBUG: describe.cpp:272: Actor# [59:7519900697133955812:2883] HANDLE EvNavigateScheme /dc-1/tenant-db 2025-06-25T14:55:19.133170Z node 59 :TX_PROXY DEBUG: describe.cpp:356: Actor# [59:7519900697133955812:2883] HANDLE EvNavigateKeySetResult TDescribeReq marker# P5 ErrorCount# 0 2025-06-25T14:55:19.133352Z node 59 :TX_PROXY DEBUG: describe.cpp:435: Actor# [59:7519900697133955812:2883] SEND to# 72075186224037891 shardToRequest NKikimrSchemeOp.TDescribePath Path: "/dc-1/tenant-db" Options { ReturnBoundaries: false ShowPrivateTable: true ReturnRangeKey: false } 2025-06-25T14:55:19.135534Z node 59 :TX_PROXY DEBUG: describe.cpp:448: Actor# [59:7519900697133955812:2883] Handle TEvDescribeSchemeResult Forward to# [59:7519900697133955811:2882] Cookie: 0 TEvDescribeSchemeResult: NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 0 Record# Status: StatusSuccess Path: "/dc-1/tenant-db" PathDescription { Self { Name: "dc-1/tenant-db" PathId: 1 SchemeshardId: 72075186224037891 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "\n\025\010\001\020\200\004\032\014clusteradmin \003" EffectiveACL: "\n\030\010\001\020\377\377\003\032\014root@builtin \003(\001\n\025\010\001\020\200\004\032\014clusteradmin \003" PathVersion: 9 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 9 ACLVersion: 1 EffectiveACLVersion: 2 UserAttrsVersion: 1 ChildrenVersion: 2 SubDomainVersion: 3 SecurityStateVersion: 1 } } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 2 ProcessingParams { Version: 3 PlanResolution: 50 Coordinators: 72075186224037890 TimeCastBucketsPerMediator: 2 Mediators: 72075186224037889 SchemeShard: 72075186224037891 Hive: 72075186224037888 } DomainKey { SchemeShard: 72057594046644480 PathId: 2 } StoragePools { Name: "name_tenant-db_kind_tenant-db" Kind: "tenant-db" } StoragePools { Name: "name_tenant-db_kind_test" Kind: "test" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Sids { Name: "tenantuser" Type: USER } Audience: "/dc-1/tenant-db" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72075186224037891 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1/tenant-db" PathId: 1 SchemeshardId: 72075186224037891 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "\n\025\010\001\020\200\004\032\014clusteradmin \003" EffectiveACL: "\n\030\010\001\020\377\377\003\032\014root@builtin \003(\001\n\025\010\001\020\200\004\032\014clusteradmin \003" PathVersion: 9 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 9 ACLVersion: 1 EffectiveACLVersion: 2 UserAttrsVersion: 1 ChildrenVersion: 2 SubDomainVersion: 3 SecurityStateVersion: 1 } } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72075186224037891 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 184467440737095... (TRUNCATED) 2025-06-25T14:55:19.327384Z node 59 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 60 2025-06-25T14:55:19.327955Z node 59 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(60, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-25T14:55:19.329600Z node 60 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 >> BasicUsage::CloseWriteSessionImmediately [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/health_check/ut/unittest >> THealthCheckTest::ServerlessWithExclusiveNodesWhenTroublesWithSharedNodes [GOOD] Test command err: 2025-06-25T14:54:57.089766Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:628:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:54:57.090555Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:54:57.091089Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:54:57.091486Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:625:2319], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:54:57.091705Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:54:57.091833Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001d66/r3tmp/tmpFfODmo/pdisk_1.dat 2025-06-25T14:54:57.631384Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 6176, node 1 TClient is connected to server localhost:14139 2025-06-25T14:54:57.987548Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:54:57.987595Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:54:57.987623Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:54:57.987815Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:55:05.299856Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [3:628:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:55:05.300207Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:55:05.300398Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:55:05.301933Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [4:625:2319], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:55:05.302131Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:55:05.302356Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001d66/r3tmp/tmppsrtLL/pdisk_1.dat 2025-06-25T14:55:05.580999Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 13520, node 3 TClient is connected to server localhost:8257 2025-06-25T14:55:05.921260Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:55:05.921323Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:55:05.921357Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:55:05.921802Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:55:10.248232Z node 5 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [5:420:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:55:10.248615Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:55:10.248709Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001d66/r3tmp/tmpfMISNu/pdisk_1.dat 2025-06-25T14:55:10.555377Z node 5 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 65015, node 5 TClient is connected to server localhost:16343 2025-06-25T14:55:10.915615Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:55:10.915676Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:55:10.915714Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:55:10.916128Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:55:15.042503Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [7:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:55:15.042724Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:55:15.042885Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001d66/r3tmp/tmpfV4UWd/pdisk_1.dat 2025-06-25T14:55:15.365111Z node 7 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 7 Type# 268639257 TServer::EnableGrpc on GrpcPort 13643, node 7 TClient is connected to server localhost:1133 2025-06-25T14:55:20.022836Z node 8 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [8:419:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:55:20.023166Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:55:20.023327Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001d66/r3tmp/tmpxLdBKy/pdisk_1.dat 2025-06-25T14:55:20.364508Z node 8 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 7795, node 8 TClient is connected to server localhost:12389 2025-06-25T14:55:20.871572Z node 8 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:55:20.871646Z node 8 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:55:20.871688Z node 8 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:55:20.871966Z node 8 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration >> TPersQueueTest::ReadWithoutConsumerFirstClassCitizen [GOOD] >> KqpNewEngine::SqlInFromCompact [GOOD] >> KqpNewEngine::SqlInAsScalar >> THealthCheckTest::NoBscResponse [GOOD] >> THealthCheckTest::LayoutIncorrect |88.3%| [TA] $(B)/ydb/core/tx/tx_proxy/ut_schemereq/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpQueryService::StreamExecuteQueryMultiResult [GOOD] >> KqpQueryService::TableSink_BadTransactions |88.3%| [TA] {RESULT} $(B)/ydb/core/tx/tx_proxy/ut_schemereq/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpQueryService::ExecuteQueryPgTableSelect [GOOD] >> KqpQueryService::ExecuteQueryMultiScalar ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/federated_topic/ut/unittest >> BasicUsage::CloseWriteSessionImmediately [GOOD] Test command err: 2025-06-25T14:54:56.049333Z :BasicWriteSession INFO: Random seed for debugging is 1750863296049305 2025-06-25T14:54:56.400828Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900596427668260:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:54:56.400910Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:54:56.476621Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519900599327977940:2098];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:54:56.486883Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:54:56.631854Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-25T14:54:56.660250Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0012ae/r3tmp/tmpQNAt2E/pdisk_1.dat 2025-06-25T14:54:57.038494Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:54:57.058537Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:54:57.058634Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:54:57.061645Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:54:57.061842Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:54:57.061871Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:54:57.067480Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T14:54:57.068171Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 31849, node 1 2025-06-25T14:54:57.300791Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/yft8/0012ae/r3tmp/yandexfq0UUJ.tmp 2025-06-25T14:54:57.300822Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/yft8/0012ae/r3tmp/yandexfq0UUJ.tmp 2025-06-25T14:54:57.300988Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/yft8/0012ae/r3tmp/yandexfq0UUJ.tmp 2025-06-25T14:54:57.301136Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:54:57.405819Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:54:57.487768Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:54:57.601310Z INFO: TTestServer started on Port 17217 GrpcPort 31849 TClient is connected to server localhost:17217 PQClient connected to localhost:31849 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:54:57.973636Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... waiting... waiting... 2025-06-25T14:55:00.054716Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900613607538394:2299], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:00.054826Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:00.086555Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900613607538414:2302], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:00.088376Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900613607538417:2304], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:00.088444Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:00.094330Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715661:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:55:00.241275Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519900613607538420:2305], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715661 completed, doublechecking } 2025-06-25T14:55:00.476461Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519900613607538499:2675] txid# 281474976715662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:55:00.601833Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7519900616507847382:2275], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:55:00.604520Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=2&id=NDg0Yjk4YS1iNWNmNjIxOC0yZmM5MzUwYy02NmYwMzQwMw==, ActorId: [2:7519900616507847354:2269], ActorState: ExecuteState, TraceId: 01jyksbhjv6nzssdcb8a49eh8r, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:55:00.602008Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519900613607538512:2310], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:55:00.614179Z node 2 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-25T14:55:00.623988Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=ZWQyODgyNDgtZjE5MjhjMS0zMTgyMGQ1Yy1kOTNjZWNlNg==, ActorId: [1:7519900613607538390:2296], ActorState: ExecuteState, TraceId: 01jyksbheh05wg9hbt8jc3mrd0, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:55:00.624580Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-25T14:55:00.744820Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:00.975488Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:01.217833Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESch ... ason: "the following topics are not created: rt3.dc1--test-topic, Marker# PQ95" ErrorCode: UNKNOWN_TOPIC CallPersQueueGRPC request to localhost:25482 MetaRequest { CmdGetTopicMetadata { Topic: "rt3.dc1--test-topic" } } 2025-06-25T14:55:21.032572Z node 3 :PERSQUEUE INFO: msgbus_server_persqueue.cpp:1531: proxy answer CallPersQueueGRPC response: Status: 1 ErrorCode: OK MetaResponse { CmdGetTopicMetadataResult { TopicInfo { Topic: "rt3.dc1--test-topic" NumPartitions: 1 Config { PartitionConfig { LifetimeSeconds: 86400 LowWatermark: 8388608 SourceIdLifetimeSeconds: 86400 WriteSpeedInBytesPerSecond: 20000000 BurstSize: 20000000 SourceIdMaxCounts: 6000000 } Version: 1 LocalDC: true Codecs { Ids: 0 Ids: 1 Ids: 2 Codecs: "raw" Codecs: "gzip" Codecs: "lzop" } TopicPath: "/Root/PQ/rt3.dc1--test-topic" YdbDatabasePath: "/Root" Consumers { Name: "user" ReadFromTimestampsMs: 0 FormatVersion: 0 Codec { } Version: 0 Important: false } } ErrorCode: OK } } } === Topic created, have version: 1 2025-06-25T14:55:21.046310Z :DEBUG: [] MessageGroupId [src] SessionId [] Write session: try to update token 2025-06-25T14:55:21.046622Z :INFO: [] MessageGroupId [src] SessionId [] Write session: Do CDS request 2025-06-25T14:55:21.046658Z :INFO: [] MessageGroupId [src] SessionId [] Start write session. Will connect to endpoint: localhost:25482 2025-06-25T14:55:21.069686Z :DEBUG: [] MessageGroupId [src] SessionId [] Write session: send init request: init_request { topic: "test-topic" message_group_id: "src" } 2025-06-25T14:55:21.070226Z node 3 :PQ_WRITE_PROXY DEBUG: grpc_pq_write.h:107: new grpc connection 2025-06-25T14:55:21.070260Z node 3 :PQ_WRITE_PROXY DEBUG: grpc_pq_write.h:141: new session created cookie 1 2025-06-25T14:55:21.070569Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 1 sessionId: grpc read done: success: 1 data: init_request { topic: "test-topic" message_group_id: "src" } 2025-06-25T14:55:21.070665Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:442: session request cookie: 1 topic: "test-topic" message_group_id: "src" from ipv6:[::1]:36134 2025-06-25T14:55:21.070685Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:1532: write session: cookie=1 sessionId= userAgent="pqv1 server" ip=ipv6:[::1]:36134 proto=v1 topic=test-topic durationSec=0 2025-06-25T14:55:21.070693Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:566: init check schema 2025-06-25T14:55:21.072029Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:627: session v1 cookie: 1 sessionId: describe result for acl check 2025-06-25T14:55:21.072135Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:62: TTableHelper SelectQuery: --!syntax_v1 DECLARE $Hash AS Uint32; DECLARE $Topic AS Utf8; DECLARE $SourceId AS Utf8; SELECT Partition, CreateTime, AccessTime, SeqNo FROM `/Root/PQ/SourceIdMeta2` WHERE Hash == $Hash AND Topic == $Topic AND SourceId == $SourceId; 2025-06-25T14:55:21.072152Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:63: TTableHelper UpdateQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint32; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64;DECLARE $SeqNo AS Uint64; UPSERT INTO `/Root/PQ/SourceIdMeta2` (Hash, Topic, SourceId, CreateTime, AccessTime, Partition, SeqNo) VALUES ($Hash, $Topic, $SourceId, $CreateTime, $AccessTime, $Partition, $SeqNo); 2025-06-25T14:55:21.072159Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:64: TTableHelper UpdateAccessTimeQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint32; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64; UPDATE `/Root/PQ/SourceIdMeta2` SET AccessTime = $AccessTime WHERE Hash = $Hash AND Topic = $Topic AND SourceId = $SourceId AND Partition = $Partition; 2025-06-25T14:55:21.072177Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:111: TPartitionChooser [3:7519900705446563858:2466] (SourceId=src, PreferedPartition=(NULL)) StartKqpSession 2025-06-25T14:55:21.074440Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:142: TPartitionChooser [3:7519900705446563858:2466] (SourceId=src, PreferedPartition=(NULL)) Select from the table 2025-06-25T14:55:21.226773Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__old_chooser_actor.h:67: TPartitionChooser [3:7519900705446563858:2466] (SourceId=src, PreferedPartition=(NULL)) RequestPQRB 2025-06-25T14:55:21.227111Z node 3 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72075186224037893][rt3.dc1--test-topic] pipe [3:7519900705446563906:2466] connected; active server actors: 1 2025-06-25T14:55:21.227197Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__old_chooser_actor.h:80: TPartitionChooser [3:7519900705446563858:2466] (SourceId=src, PreferedPartition=(NULL)) Received partition 0 from PQRB for SourceId=src 2025-06-25T14:55:21.227212Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:174: TPartitionChooser [3:7519900705446563858:2466] (SourceId=src, PreferedPartition=(NULL)) Update the table 2025-06-25T14:55:21.228289Z node 3 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037893][rt3.dc1--test-topic] pipe [3:7519900705446563906:2466] disconnected; active server actors: 1 2025-06-25T14:55:21.228337Z node 3 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1688: [72075186224037893][rt3.dc1--test-topic] pipe [3:7519900705446563906:2466] disconnected no session 2025-06-25T14:55:21.349326Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:183: TPartitionChooser [3:7519900705446563858:2466] (SourceId=src, PreferedPartition=(NULL)) HandleUpdate PartitionPersisted=0 Status=SUCCESS 2025-06-25T14:55:21.349366Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:305: TPartitionChooser [3:7519900705446563858:2466] (SourceId=src, PreferedPartition=(NULL)) ReplyResult: Partition=0, SeqNo=(NULL) 2025-06-25T14:55:21.349381Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:268: TPartitionChooser [3:7519900705446563858:2466] (SourceId=src, PreferedPartition=(NULL)) Start idle 2025-06-25T14:55:21.349412Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:689: ProceedPartition. session cookie: 1 sessionId: partition: 0 expectedGeneration: (NULL) 2025-06-25T14:55:21.352421Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72075186224037892] server connected, pipe [3:7519900705446563932:2466], now have 1 active actors on pipe 2025-06-25T14:55:21.356618Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:819: TPartitionWriter 72075186224037892 (partition=0) TEvClientConnected Status OK, TabletId: 72075186224037892, NodeId 4, Generation: 1 2025-06-25T14:55:21.363798Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:865: session inited cookie: 1 partition: 0 MaxSeqNo: 0 sessionId: src|b9c73cf-229e53cd-90769752-db9525bd_0 2025-06-25T14:55:21.360441Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:348: Handle TEvRequest topic: 'rt3.dc1--test-topic' requestId: 2025-06-25T14:55:21.360487Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2812: [PQ: 72075186224037892] got client message batch for topic 'rt3.dc1--test-topic' partition 0 2025-06-25T14:55:21.360587Z node 4 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie src|b9c73cf-229e53cd-90769752-db9525bd_0 generated for partition 0 topic 'rt3.dc1--test-topic' owner src 2025-06-25T14:55:21.360711Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:34: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyOwnerOk. Partition: 0 2025-06-25T14:55:21.360760Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'rt3.dc1--test-topic' partition: 0 messageNo: 0 requestId: cookie: 0 2025-06-25T14:55:21.363400Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:348: Handle TEvRequest topic: 'rt3.dc1--test-topic' requestId: 2025-06-25T14:55:21.363426Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2812: [PQ: 72075186224037892] got client message batch for topic 'rt3.dc1--test-topic' partition 0 2025-06-25T14:55:21.363498Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'rt3.dc1--test-topic' partition: 0 messageNo: 0 requestId: cookie: 0 2025-06-25T14:55:21.364786Z :INFO: [] MessageGroupId [src] SessionId [] Counters: { Errors: 0 CurrentSessionLifetimeMs: 1750863321364 BytesWritten: 0 MessagesWritten: 0 BytesWrittenCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-25T14:55:21.364891Z :INFO: [] MessageGroupId [src] SessionId [] Write session established. Init response: session_id: "src|b9c73cf-229e53cd-90769752-db9525bd_0" topic: "test-topic" cluster: "dc1" supported_codecs: CODEC_RAW supported_codecs: CODEC_GZIP supported_codecs: CODEC_LZOP 2025-06-25T14:55:21.365031Z :INFO: [] MessageGroupId [src] SessionId [src|b9c73cf-229e53cd-90769752-db9525bd_0] Write session: close. Timeout = 0 ms 2025-06-25T14:55:21.365073Z :INFO: [] MessageGroupId [src] SessionId [src|b9c73cf-229e53cd-90769752-db9525bd_0] Write session will now close 2025-06-25T14:55:21.365104Z :DEBUG: [] MessageGroupId [src] SessionId [src|b9c73cf-229e53cd-90769752-db9525bd_0] Write session: aborting 2025-06-25T14:55:21.365423Z :INFO: [] MessageGroupId [src] SessionId [src|b9c73cf-229e53cd-90769752-db9525bd_0] Write session: gracefully shut down, all writes complete 2025-06-25T14:55:21.365448Z :DEBUG: [] MessageGroupId [src] SessionId [src|b9c73cf-229e53cd-90769752-db9525bd_0] Write session: destroy 2025-06-25T14:55:21.373934Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 1 sessionId: src|b9c73cf-229e53cd-90769752-db9525bd_0 grpc closed 2025-06-25T14:55:21.373961Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 1 sessionId: src|b9c73cf-229e53cd-90769752-db9525bd_0 is DEAD 2025-06-25T14:55:21.381695Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037892] server disconnected, pipe [3:7519900705446563932:2466] destroyed 2025-06-25T14:55:21.381747Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::DropOwner. 2025-06-25T14:55:21.381252Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037892 (partition=0) Received event: NActors::TEvents::TEvPoison Session was created 2025-06-25T14:55:21.827732Z node 3 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976710689, task: 1, CA Id [3:7519900705446563950:2478]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 0 2025-06-25T14:55:21.862004Z node 3 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976710689, task: 1, CA Id [3:7519900705446563950:2478]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 1 2025-06-25T14:55:21.911829Z node 3 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976710689, task: 1, CA Id [3:7519900705446563950:2478]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 1 2025-06-25T14:55:21.986696Z node 3 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976710689, task: 1, CA Id [3:7519900705446563950:2478]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 1 2025-06-25T14:55:22.068435Z node 3 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976710689, task: 1, CA Id [3:7519900705446563950:2478]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 1 >> KqpQueryService::SeveralCTAS+UseSink [GOOD] >> KqpQueryService::SeveralCTAS-UseSink >> KqpQueryService::StreamExecuteQuery >> THealthCheckTest::CLusterNotBootstrapped [GOOD] >> KqpQueryServiceScripts::ValidateScript [GOOD] >> KqpQueryServiceScripts::TestTruncatedByRows ------- [TM] {asan, default-linux-x86_64, release} ydb/services/persqueue_v1/ut/unittest >> TPersQueueTest::ReadWithoutConsumerFirstClassCitizen [GOOD] Test command err: 2025-06-25T14:51:26.048946Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899695829510384:2148];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:51:26.049240Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:51:26.107217Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519899697609734249:2144];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000b53/r3tmp/tmprahtQ1/pdisk_1.dat 2025-06-25T14:51:26.366690Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:51:26.366949Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-25T14:51:26.372906Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-25T14:51:26.595231Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:51:26.610039Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:51:26.610144Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:51:26.610871Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:51:26.610943Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:51:26.612994Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:51:26.617382Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T14:51:26.618190Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 23575, node 1 2025-06-25T14:51:26.758639Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/yft8/000b53/r3tmp/yandexUSaduo.tmp 2025-06-25T14:51:26.758680Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/yft8/000b53/r3tmp/yandexUSaduo.tmp 2025-06-25T14:51:26.758868Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/yft8/000b53/r3tmp/yandexUSaduo.tmp 2025-06-25T14:51:26.759014Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:51:26.816213Z INFO: TTestServer started on Port 30573 GrpcPort 23575 TClient is connected to server localhost:30573 PQClient connected to localhost:23575 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-25T14:51:27.057876Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:51:27.101910Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:51:27.106344Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... waiting... 2025-06-25T14:51:27.153208Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... waiting... 2025-06-25T14:51:29.111401Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899708714413246:2302], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:29.111556Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899708714413236:2299], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:29.112036Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:29.115081Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:51:29.133837Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899708714413251:2304], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715662 completed, doublechecking } 2025-06-25T14:51:29.418749Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899708714413338:2762] txid# 281474976715663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:51:29.446177Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:51:29.447244Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519899708714413350:2310], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:51:29.447347Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7519899710494636439:2278], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:51:29.447529Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=MzAxN2E3ZmEtZjhkOTc4OTItYzEwMzJjYWMtZjY0NDFlNTU=, ActorId: [1:7519899708714413234:2298], ActorState: ExecuteState, TraceId: 01jyks53en91gm8h1mbt4f5xv4, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:51:29.447550Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=2&id=MWRiZGExZWUtY2M1OThjYTktZmM3NGYyZTEtZjQ0Mzk4YTQ=, ActorId: [2:7519899710494636415:2272], ActorState: ExecuteState, TraceId: 01jyks53j04t2nph0k712m4q1t, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:51:29.449930Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-25T14:51:29.449926Z node 2 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-25T14:51:29.523792Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:51:29.601015Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster ... bytes ..." SourceId: "\000source" SeqNo: 38 WriteTimestampMS: 1750863320867 CreateTimestampMS: 1750863320856 UncompressedSize: 6 PartitionKey: "" ExplicitHash: "" } BlobsFromDisk: 0 BlobsFromCache: 2 SizeLag: 508 RealReadOffset: 36 WaitQuotaTimeMs: 0 EndOffset: 40 StartOffset: 0 } Cookie: 35 } 2025-06-25T14:55:21.140045Z node 26 :PQ_READ_PROXY DEBUG: partition_actor.cpp:958: session cookie 2 consumer session _26_2_12464558810118230819_v1 TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 2(assignId:3) ready for read with readOffset 37 endOffset 40 2025-06-25T14:55:21.140106Z node 26 :PQ_READ_PROXY DEBUG: partition_actor.cpp:890: session cookie 2 consumer session _26_2_12464558810118230819_v1 after read state TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 2(assignId:3) EndOffset 40 ReadOffset 37 ReadGuid 20b9108f-ab477f6b-6d8618ff-96bb73c8 has messages 1 2025-06-25T14:55:21.140217Z node 26 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2309: session cookie 2 consumer session _26_2_12464558810118230819_v1 partition ready for read: partition# TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 2(assignId:3), readOffset# 37, endOffset# 40, WTime# 1750863320867, sizeLag# 508 2025-06-25T14:55:21.140243Z node 26 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2320: session cookie 2 consumer session _26_2_12464558810118230819_v1TEvPartitionReady. Aval parts: 0 Bytes readed: 352 Offset: 35 from session 3 Offset: 36 from session 3 2025-06-25T14:55:21.140298Z node 26 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:1917: session cookie 2 consumer session _26_2_12464558810118230819_v1 read done: guid# 20b9108f-ab477f6b-6d8618ff-96bb73c8, partition# TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 2(assignId:3), size# 352 2025-06-25T14:55:21.140363Z node 26 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2079: session cookie 2 consumer session _26_2_12464558810118230819_v1 response to read: guid# 20b9108f-ab477f6b-6d8618ff-96bb73c8 2025-06-25T14:55:21.140622Z node 26 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2122: session cookie 2 consumer session _26_2_12464558810118230819_v1 Process answer. Aval parts: 1 2025-06-25T14:55:21.141897Z node 26 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 2 consumer session _26_2_12464558810118230819_v1 grpc read done: success# 1, data# { read_request { bytes_size: 400 } } 2025-06-25T14:55:21.141976Z node 26 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:1816: session cookie 2 consumer session _26_2_12464558810118230819_v1 got read request: guid# 22960e3d-4999fe0d-f545af17-87fc8523 2025-06-25T14:55:21.142026Z node 26 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2243: session cookie 2 consumer session _26_2_12464558810118230819_v1 performing read request: guid# 1e7da58b-15ec4ce-c21ba242-aec082c5, from# TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 2(assignId:3), count# 3, size# 332, partitionsAsked# 1, maxTimeLag# 0ms 2025-06-25T14:55:21.142138Z node 26 :PQ_READ_PROXY DEBUG: partition_actor.cpp:1384: session cookie 2 consumer session _26_2_12464558810118230819_v1 READ FROM TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 2(assignId:3)maxCount 3 maxSize 332 maxTimeLagMs 0 readTimestampMs 0 readOffset 37 EndOffset 40 ClientCommitOffset 0 committedOffset 0 Guid 1e7da58b-15ec4ce-c21ba242-aec082c5 2025-06-25T14:55:21.143307Z node 27 :PERSQUEUE DEBUG: pq_impl.cpp:348: Handle TEvRequest topic: 'rt3.dc1--topic1' requestId: 2025-06-25T14:55:21.143368Z node 27 :PERSQUEUE DEBUG: pq_impl.cpp:2812: [PQ: 72075186224037892] got client message batch for topic 'rt3.dc1--topic1' partition 2 2025-06-25T14:55:21.143588Z node 27 :PERSQUEUE DEBUG: partition_read.cpp:839: [PQ: 72075186224037892, Partition: 2, State: StateIdle] read cookie 36 Topic 'rt3.dc1--topic1' partition 2 user $without_consumer offset 37 count 3 size 332 endOffset 40 max time lag 0ms effective offset 37 2025-06-25T14:55:21.144564Z node 27 :PERSQUEUE DEBUG: partition_read.cpp:1043: [PQ: 72075186224037892, Partition: 2, State: StateIdle] read cookie 36 added 3 blobs, size 468 count 3 last offset 39, current partition end offset: 40 2025-06-25T14:55:21.144643Z node 27 :PERSQUEUE DEBUG: partition_read.cpp:1069: [PQ: 72075186224037892, Partition: 2, State: StateIdle] Reading cookie 36. Send blob request. 2025-06-25T14:55:21.144735Z node 27 :PERSQUEUE DEBUG: cache_eviction.h:492: Got data from cache. Partition 2 offset 37 partno 0 count 1 parts_count 0 source 1 size 156 accessed 1 times before, last time 2025-06-25T14:55:21.000000Z 2025-06-25T14:55:21.144760Z node 27 :PERSQUEUE DEBUG: cache_eviction.h:492: Got data from cache. Partition 2 offset 38 partno 0 count 1 parts_count 0 source 1 size 156 accessed 1 times before, last time 2025-06-25T14:55:21.000000Z 2025-06-25T14:55:21.144784Z node 27 :PERSQUEUE DEBUG: cache_eviction.h:492: Got data from cache. Partition 2 offset 39 partno 0 count 1 parts_count 0 source 1 size 156 accessed 1 times before, last time 2025-06-25T14:55:21.000000Z 2025-06-25T14:55:21.144829Z node 27 :PERSQUEUE DEBUG: read.h:121: Reading cookie 36. All 3 blobs are from cache. 2025-06-25T14:55:21.144895Z node 27 :PERSQUEUE DEBUG: partition_read.cpp:551: FormAnswer for 3 blobs 2025-06-25T14:55:21.144976Z node 27 :PERSQUEUE DEBUG: pq_l2_cache.cpp:192: PQ Cache (L2). Touched. Tablet '72075186224037892' partition 2 offset 37 partno 0 count 1 parts 0 suffix '63' 2025-06-25T14:55:21.145018Z node 27 :PERSQUEUE DEBUG: pq_l2_cache.cpp:192: PQ Cache (L2). Touched. Tablet '72075186224037892' partition 2 offset 38 partno 0 count 1 parts 0 suffix '63' 2025-06-25T14:55:21.145046Z node 27 :PERSQUEUE DEBUG: pq_l2_cache.cpp:192: PQ Cache (L2). Touched. Tablet '72075186224037892' partition 2 offset 39 partno 0 count 1 parts 0 suffix '63' 2025-06-25T14:55:21.145163Z node 27 :PERSQUEUE DEBUG: partition_read.cpp:476: FormAnswer processing batch offset 37 totakecount 1 count 1 size 136 from pos 0 cbcount 1 2025-06-25T14:55:21.145252Z node 27 :PERSQUEUE DEBUG: partition_read.cpp:476: FormAnswer processing batch offset 38 totakecount 1 count 1 size 136 from pos 0 cbcount 1 2025-06-25T14:55:21.145339Z node 27 :PERSQUEUE DEBUG: partition_read.cpp:476: FormAnswer processing batch offset 39 totakecount 1 count 1 size 136 from pos 0 cbcount 1 2025-06-25T14:55:21.145513Z node 27 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'rt3.dc1--topic1' partition: 2 messageNo: 0 requestId: cookie: 37 2025-06-25T14:55:21.148893Z node 26 :PQ_READ_PROXY DEBUG: partition_actor.cpp:652: session cookie 2 consumer session _26_2_12464558810118230819_v1 TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 2(assignId:3) initDone 1 event { CmdReadResult { MaxOffset: 40 Result { Offset: 37 Data: "... 94 bytes ..." SourceId: "\000source" SeqNo: 39 WriteTimestampMS: 1750863320893 CreateTimestampMS: 1750863320892 UncompressedSize: 6 PartitionKey: "" ExplicitHash: "" } Result { Offset: 38 Data: "... 94 bytes ..." SourceId: "\000source" SeqNo: 40 WriteTimestampMS: 1750863320907 CreateTimestampMS: 1750863320906 UncompressedSize: 6 PartitionKey: "" ExplicitHash: "" } Result { Offset: 39 Data: "... 94 bytes ..." SourceId: "\000source" SeqNo: 41 WriteTimestampMS: 1750863320920 CreateTimestampMS: 1750863320918 UncompressedSize: 6 PartitionKey: "" ExplicitHash: "" } BlobsFromDisk: 0 BlobsFromCache: 3 SizeLag: 60 RealReadOffset: 39 WaitQuotaTimeMs: 0 EndOffset: 40 StartOffset: 0 } Cookie: 37 } 2025-06-25T14:55:21.149217Z node 26 :PQ_READ_PROXY DEBUG: partition_actor.cpp:1266: session cookie 2 consumer session _26_2_12464558810118230819_v1 TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 2(assignId:3) wait data in partition inited, cookie 1 from offset 40 2025-06-25T14:55:21.149284Z node 26 :PQ_READ_PROXY DEBUG: partition_actor.cpp:890: session cookie 2 consumer session _26_2_12464558810118230819_v1 after read state TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 2(assignId:3) EndOffset 40 ReadOffset 40 ReadGuid 1e7da58b-15ec4ce-c21ba242-aec082c5 has messages 1 2025-06-25T14:55:21.149419Z node 26 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:1917: session cookie 2 consumer session _26_2_12464558810118230819_v1 read done: guid# 1e7da58b-15ec4ce-c21ba242-aec082c5, partition# TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 2(assignId:3), size# 522 2025-06-25T14:55:21.149463Z node 26 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2079: session cookie 2 consumer session _26_2_12464558810118230819_v1 response to read: guid# 1e7da58b-15ec4ce-c21ba242-aec082c5 2025-06-25T14:55:21.149708Z node 26 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2122: session cookie 2 consumer session _26_2_12464558810118230819_v1 Process answer. Aval parts: 0 Bytes readed: 522 Offset: 37 from session 3 Offset: 38 from session 3 Offset: 39 from session 3 2025-06-25T14:55:21.153487Z node 26 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 2 consumer session _26_2_12464558810118230819_v1 grpc read done: success# 1, data# { commit_offset_request { commit_offsets { partition_session_id: 3 offsets { end: 39 } } } } 2025-06-25T14:55:21.153525Z node 26 :PQ_READ_PROXY INFO: read_session_actor.cpp:1640: session cookie 2 consumer session _26_2_12464558810118230819_v1 closed with error: reason# can't commit when reading without a consumer 2025-06-25T14:55:21.153808Z node 26 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 2 consumer session _26_2_12464558810118230819_v1 is DEAD 2025-06-25T14:55:21.154759Z node 27 :PERSQUEUE DEBUG: pq_impl.cpp:2452: [PQ: 72075186224037892] Destroy direct read session _26_2_12464558810118230819_v1 2025-06-25T14:55:21.154828Z node 27 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037892] server disconnected, pipe [26:7519900701983794193:2543] destroyed 2025-06-25T14:55:21.154855Z node 27 :PERSQUEUE DEBUG: pq_impl.cpp:2452: [PQ: 72075186224037892] Destroy direct read session _26_2_12464558810118230819_v1 2025-06-25T14:55:21.154876Z node 27 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037892] server disconnected, pipe [26:7519900701983794192:2542] destroyed 2025-06-25T14:55:21.154894Z node 27 :PERSQUEUE DEBUG: pq_impl.cpp:2452: [PQ: 72075186224037892] Destroy direct read session _26_2_12464558810118230819_v1 2025-06-25T14:55:21.154911Z node 27 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037892] server disconnected, pipe [26:7519900701983794191:2541] destroyed 2025-06-25T14:55:21.154926Z node 27 :PERSQUEUE DEBUG: pq_impl.cpp:2452: [PQ: 72075186224037892] Destroy direct read session _26_2_12464558810118230819_v1 2025-06-25T14:55:21.154963Z node 27 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037892] server disconnected, pipe [26:7519900701983794190:2540] destroyed 2025-06-25T14:55:21.154986Z node 27 :PERSQUEUE DEBUG: pq_impl.cpp:2452: [PQ: 72075186224037892] Destroy direct read session _26_2_12464558810118230819_v1 2025-06-25T14:55:21.155007Z node 27 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037892] server disconnected, pipe [26:7519900701983794194:2539] destroyed 2025-06-25T14:55:21.155073Z node 27 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: _26_2_12464558810118230819_v1 2025-06-25T14:55:21.155093Z node 27 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: _26_2_12464558810118230819_v1 2025-06-25T14:55:21.155111Z node 27 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: _26_2_12464558810118230819_v1 2025-06-25T14:55:21.155126Z node 27 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: _26_2_12464558810118230819_v1 2025-06-25T14:55:21.155143Z node 27 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: _26_2_12464558810118230819_v1 >> KqpQueryService::IssuesInCaseOfSuccess >> KqpQueryService::SessionFromPoolError [GOOD] >> KqpQueryService::ReturnAndCloseSameTime >> TPersQueueTest::EachMessageGetsExactlyOneAcknowledgementInCorrectOrder [GOOD] >> TPersQueueTest::Delete ------- [TM] {asan, default-linux-x86_64, release} ydb/core/health_check/ut/unittest >> THealthCheckTest::CLusterNotBootstrapped [GOOD] Test command err: 2025-06-25T14:54:51.894442Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900577178417768:2136];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:54:51.894647Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001dce/r3tmp/tmpU9RK2o/pdisk_1.dat 2025-06-25T14:54:52.315128Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:54:52.334564Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:54:52.334672Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:54:52.338416Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 20154, node 1 2025-06-25T14:54:52.515354Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:54:52.515376Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:54:52.515385Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:54:52.515490Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24922 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-25T14:54:52.903478Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:54:53.009954Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:54:54.748224Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519900588533551587:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:54:54.748332Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001dce/r3tmp/tmpc1VoxT/pdisk_1.dat 2025-06-25T14:54:54.864252Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:54:54.865770Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519900588533551568:2080] 1750863294747641 != 1750863294747644 TServer::EnableGrpc on GrpcPort 19131, node 2 2025-06-25T14:54:54.900394Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:54:54.900449Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:54:54.903706Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:54:54.923776Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:54:54.923802Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:54:54.923814Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:54:54.923943Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25387 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:54:55.095869Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:54:55.104718Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:55:03.927976Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [3:628:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:55:03.928370Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:55:03.928564Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:55:03.930012Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [4:625:2319], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:55:03.930157Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:55:03.930322Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001dce/r3tmp/tmpfRSBsG/pdisk_1.dat 2025-06-25T14:55:04.233735Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 27987, node 3 TClient is connected to server localhost:21409 2025-06-25T14:55:04.607676Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:55:04.607732Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:55:04.607763Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:55:04.608199Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:55:11.510266Z node 5 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [5:625:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:55:11.510614Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:55:11.510770Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:55:11.512428Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [6:634:2322], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:55:11.512749Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:55:11.512888Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001dce/r3tmp/tmp8qDE6X/pdisk_1.dat 2025-06-25T14:55:11.839622Z node 5 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 26622, node 5 TClient is connected to server localhost:15934 2025-06-25T14:55:12.235415Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:55:12.235473Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:55:12.235507Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:55:12.235991Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration self_check_result: GOOD issue_log { id: "YELLOW-f489-1231c6b1" status: YELLOW message: "Database has compute issues" location { database { name: "/Root" } } reason: "YELLOW-1ba8-1231c6b1" type: "DATABASE" level: 1 } issue_log { id: "YELLOW-1ba8-1231c6b1" status: YELLOW message: "Compute is overloaded" location { database { name: "/Root" } } reason: "YELLOW-e9e2-1231c6b1-5" reason: "YELLOW-e9e2-1231c6b1-6" type: "COMPUTE" level: 2 } issue_log { id: "YELLOW-e9e2-1231c6b1-5" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 5 host: "::1" port: 12001 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 } issue_log { id: "YELLOW-e9e2-1231c6b1-6" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 6 host: "::1" port: 12002 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 } location { id: 5 host: "::1" port: 12001 } 2025-06-25T14:55:16.991416Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [7:492:2374], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:55:16.991778Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:55:16.991894Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001dce/r3tmp/tmpCbE6sI/pdisk_1.dat 2025-06-25T14:55:17.314831Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 1249, node 7 TClient is connected to server localhost:20890 2025-06-25T14:55:17.770801Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:55:17.770858Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:55:17.770900Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:55:17.771477Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:55:22.788930Z node 10 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [10:420:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:55:22.789215Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:55:22.789402Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001dce/r3tmp/tmpQ2yIJO/pdisk_1.dat 2025-06-25T14:55:23.177669Z node 10 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 8381, node 10 TClient is connected to server localhost:27362 2025-06-25T14:55:23.644288Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:55:23.644512Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:55:23.644576Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:55:23.645392Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration >> KqpQueryService::DdlGroup >> KqpQueryService::CreateTempTable >> BasicUsage::FallbackToSingleDbAfterBadRequest [GOOD] >> KqpService::SessionBusy [GOOD] >> KqpService::SessionBusyRetryOperation |88.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/scheme_shard/py3test >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_when_create_path_second_time_then_it_is_ok [GOOD] >> KqpService::SessionBusyRetryOperationSync [GOOD] >> KqpService::SwitchCache+UseCache >> KqpQueryService::Write >> KqpQueryServiceScripts::ExecuteScriptStatsBasic [GOOD] >> KqpQueryServiceScripts::ExecuteScriptStatsFull ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/federated_topic/ut/unittest >> BasicUsage::FallbackToSingleDbAfterBadRequest [GOOD] Test command err: 2025-06-25T14:54:58.250916Z :FallbackToSingleDb INFO: Random seed for debugging is 1750863298250886 2025-06-25T14:54:58.692404Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900607740655921:2083];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:54:58.692486Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:54:58.825288Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519900608404601663:2249];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:54:58.825340Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:54:59.171477Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001292/r3tmp/tmptcerc4/pdisk_1.dat 2025-06-25T14:54:59.194144Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-25T14:54:59.744480Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:54:59.778613Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:54:59.805204Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:54:59.834605Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:54:59.833531Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:54:59.837005Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:54:59.837087Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:54:59.837741Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:54:59.837816Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:54:59.845988Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T14:54:59.846131Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:54:59.846566Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 1378, node 1 2025-06-25T14:55:00.067008Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/yft8/001292/r3tmp/yandexzkzgoU.tmp 2025-06-25T14:55:00.067028Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/yft8/001292/r3tmp/yandexzkzgoU.tmp 2025-06-25T14:55:00.067175Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/yft8/001292/r3tmp/yandexzkzgoU.tmp 2025-06-25T14:55:00.067264Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:55:00.137415Z INFO: TTestServer started on Port 22185 GrpcPort 1378 TClient is connected to server localhost:22185 PQClient connected to localhost:1378 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:55:00.620346Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976720657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... waiting... waiting... 2025-06-25T14:55:02.989665Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519900625584470938:2271], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:02.989666Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519900625584470947:2274], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:02.989773Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:02.995512Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710657:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:55:03.018052Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519900625584470952:2275], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710657 completed, doublechecking } 2025-06-25T14:55:03.091052Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519900629879438276:2133] txid# 281474976710658, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:55:03.288220Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:03.294337Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519900629215493426:2305], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:55:03.294691Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=ODFkNzAxYTctOTRlMWZkYjAtNzNkODJhNGUtOWM2OGJkYzE=, ActorId: [1:7519900629215493380:2298], ActorState: ExecuteState, TraceId: 01jyksbmb5awewxbwwbef93ps0, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:55:03.297030Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-25T14:55:03.296777Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7519900629879438283:2279], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:55:03.298189Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=2&id=ODViNTE1MzMtZWYwYmMxZGUtMmMyYTA5Y2ItOGI3M2RmNGM=, ActorId: [2:7519900625584470936:2270], ActorState: ExecuteState, TraceId: 01jyksbmacfwv5q03expf96q4s, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:55:03.298604Z node 2 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-25T14:55:03.442799Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:03.591260Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_r ... artition 0 from PQRB for SourceId=src 2025-06-25T14:55:24.175551Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:174: TPartitionChooser [3:7519900719947774889:2463] (SourceId=src, PreferedPartition=(NULL)) Update the table 2025-06-25T14:55:24.176704Z node 3 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037893][rt3.dc1--test-topic] pipe [3:7519900719947774925:2463] disconnected; active server actors: 1 2025-06-25T14:55:24.176734Z node 3 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1688: [72075186224037893][rt3.dc1--test-topic] pipe [3:7519900719947774925:2463] disconnected no session 2025-06-25T14:55:24.271071Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:183: TPartitionChooser [3:7519900719947774889:2463] (SourceId=src, PreferedPartition=(NULL)) HandleUpdate PartitionPersisted=0 Status=SUCCESS 2025-06-25T14:55:24.271105Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:305: TPartitionChooser [3:7519900719947774889:2463] (SourceId=src, PreferedPartition=(NULL)) ReplyResult: Partition=0, SeqNo=(NULL) 2025-06-25T14:55:24.271121Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:268: TPartitionChooser [3:7519900719947774889:2463] (SourceId=src, PreferedPartition=(NULL)) Start idle 2025-06-25T14:55:24.271148Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:689: ProceedPartition. session cookie: 1 sessionId: partition: 0 expectedGeneration: (NULL) 2025-06-25T14:55:24.272737Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:819: TPartitionWriter 72075186224037892 (partition=0) TEvClientConnected Status OK, TabletId: 72075186224037892, NodeId 4, Generation: 1 2025-06-25T14:55:24.274553Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:865: session inited cookie: 1 partition: 0 MaxSeqNo: 0 sessionId: src|8d59f6bb-554705a5-f04001a2-d1abb941_0 2025-06-25T14:55:24.272638Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72075186224037892] server connected, pipe [3:7519900719947774949:2463], now have 1 active actors on pipe 2025-06-25T14:55:24.273363Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:348: Handle TEvRequest topic: 'rt3.dc1--test-topic' requestId: 2025-06-25T14:55:24.273396Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2812: [PQ: 72075186224037892] got client message batch for topic 'rt3.dc1--test-topic' partition 0 2025-06-25T14:55:24.273480Z node 4 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie src|8d59f6bb-554705a5-f04001a2-d1abb941_0 generated for partition 0 topic 'rt3.dc1--test-topic' owner src 2025-06-25T14:55:24.273588Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:34: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyOwnerOk. Partition: 0 2025-06-25T14:55:24.276499Z :INFO: [] MessageGroupId [src] SessionId [] Counters: { Errors: 0 CurrentSessionLifetimeMs: 1750863324276 BytesWritten: 0 MessagesWritten: 0 BytesWrittenCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-25T14:55:24.273666Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'rt3.dc1--test-topic' partition: 0 messageNo: 0 requestId: cookie: 0 2025-06-25T14:55:24.274147Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:348: Handle TEvRequest topic: 'rt3.dc1--test-topic' requestId: 2025-06-25T14:55:24.274166Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2812: [PQ: 72075186224037892] got client message batch for topic 'rt3.dc1--test-topic' partition 0 2025-06-25T14:55:24.274232Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'rt3.dc1--test-topic' partition: 0 messageNo: 0 requestId: cookie: 0 2025-06-25T14:55:24.276632Z :INFO: [] MessageGroupId [src] SessionId [] Write session established. Init response: session_id: "src|8d59f6bb-554705a5-f04001a2-d1abb941_0" topic: "test-topic" cluster: "dc1" supported_codecs: CODEC_RAW supported_codecs: CODEC_GZIP supported_codecs: CODEC_LZOP 2025-06-25T14:55:24.276868Z :INFO: [] MessageGroupId [src] SessionId [src|8d59f6bb-554705a5-f04001a2-d1abb941_0] Write session: close. Timeout = 0 ms 2025-06-25T14:55:24.276910Z :INFO: [] MessageGroupId [src] SessionId [src|8d59f6bb-554705a5-f04001a2-d1abb941_0] Write session will now close 2025-06-25T14:55:24.276949Z :DEBUG: [] MessageGroupId [src] SessionId [src|8d59f6bb-554705a5-f04001a2-d1abb941_0] Write session: aborting 2025-06-25T14:55:24.277378Z :INFO: [] MessageGroupId [src] SessionId [src|8d59f6bb-554705a5-f04001a2-d1abb941_0] Write session: gracefully shut down, all writes complete 2025-06-25T14:55:24.277421Z :DEBUG: [] MessageGroupId [src] SessionId [src|8d59f6bb-554705a5-f04001a2-d1abb941_0] Write session: destroy 2025-06-25T14:55:24.279099Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 1 sessionId: src|8d59f6bb-554705a5-f04001a2-d1abb941_0 grpc read done: success: 0 data: 2025-06-25T14:55:24.279123Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 1 sessionId: src|8d59f6bb-554705a5-f04001a2-d1abb941_0 grpc read failed 2025-06-25T14:55:24.279146Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 1 sessionId: src|8d59f6bb-554705a5-f04001a2-d1abb941_0 grpc closed 2025-06-25T14:55:24.279159Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 1 sessionId: src|8d59f6bb-554705a5-f04001a2-d1abb941_0 is DEAD 2025-06-25T14:55:24.279554Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037892 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-25T14:55:24.280534Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037892] server disconnected, pipe [3:7519900719947774949:2463] destroyed 2025-06-25T14:55:24.280588Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::DropOwner. PORTS 61822 26164 Session was created >>> Ready to answer: ok 2025-06-25T14:55:25.295270Z :INFO: [/Root] OnFederationDiscovery fall back to single mode, database=/Root 2025-06-25T14:55:25.295387Z :INFO: [/Root] [] [21ff86c5-ddf84611-1604425e-5243ab8] Open read subsessions to databases: { name: , endpoint: localhost:26164, path: /Root } 2025-06-25T14:55:25.295542Z :INFO: [/Root] [/Root] [d619a9fb-e74134c1-61684075-1e401d0b] Starting read session 2025-06-25T14:55:25.295565Z :DEBUG: [/Root] [/Root] [d619a9fb-e74134c1-61684075-1e401d0b] Starting single session 2025-06-25T14:55:25.295934Z :DEBUG: [/Root] [/Root] [d619a9fb-e74134c1-61684075-1e401d0b] [] In Reconnect, ReadSizeBudget = 524288, ReadSizeServerDelta = 0 2025-06-25T14:55:25.295966Z :DEBUG: [/Root] [/Root] [d619a9fb-e74134c1-61684075-1e401d0b] [] New values: ReadSizeBudget = 524288, ReadSizeServerDelta = 0 2025-06-25T14:55:25.296004Z :DEBUG: [/Root] [/Root] [d619a9fb-e74134c1-61684075-1e401d0b] [] Reconnecting session to cluster in 0.000000s 2025-06-25T14:55:25.296188Z :ERROR: [/Root] [/Root] [d619a9fb-e74134c1-61684075-1e401d0b] [] Got error. Status: CLIENT_CALL_UNIMPLEMENTED. Description:
: Error: GRpc error: (12):
: Error: Grpc error response on endpoint localhost:26164
: Error: Endpoint list is empty for database /Root, cluster endpoint localhost:26164. 2025-06-25T14:55:25.296232Z :DEBUG: [/Root] [/Root] [d619a9fb-e74134c1-61684075-1e401d0b] [] In Reconnect, ReadSizeBudget = 524288, ReadSizeServerDelta = 0 2025-06-25T14:55:25.296262Z :DEBUG: [/Root] [/Root] [d619a9fb-e74134c1-61684075-1e401d0b] [] New values: ReadSizeBudget = 524288, ReadSizeServerDelta = 0 2025-06-25T14:55:25.296407Z :INFO: [/Root] [/Root] [d619a9fb-e74134c1-61684075-1e401d0b] [] Closing session to cluster: SessionClosed { Status: CLIENT_CALL_UNIMPLEMENTED Issues: "
: Error: Failed to establish connection to server "localhost:26164" ( cluster ). Attempts done: 1
: Error: GRpc error: (12):
: Error: Grpc error response on endpoint localhost:26164
: Error: Endpoint list is empty for database /Root, cluster endpoint localhost:26164. " } 2025-06-25T14:55:25.297482Z :NOTICE: [/Root] [/Root] [d619a9fb-e74134c1-61684075-1e401d0b] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-06-25T14:55:25.297534Z :DEBUG: [/Root] [/Root] [d619a9fb-e74134c1-61684075-1e401d0b] [] Abort session to cluster Got new read session event: SessionClosed { Status: CLIENT_CALL_UNIMPLEMENTED Issues: "
: Error: Failed to establish connection to server "localhost:26164" ( cluster ). Attempts done: 1
: Error: GRpc error: (12):
: Error: Grpc error response on endpoint localhost:26164
: Error: Endpoint list is empty for database /Root, cluster endpoint localhost:26164. " } 2025-06-25T14:55:25.297649Z :INFO: [/Root] [/Root] [d619a9fb-e74134c1-61684075-1e401d0b] Closing read session. Close timeout: 0.010000s 2025-06-25T14:55:25.297697Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): 2025-06-25T14:55:25.297738Z :INFO: [/Root] [/Root] [d619a9fb-e74134c1-61684075-1e401d0b] Counters: { Errors: 1 CurrentSessionLifetimeMs: 2 BytesRead: 0 MessagesRead: 0 BytesReadCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-25T14:55:25.297773Z :INFO: [/Root] [/Root] [d619a9fb-e74134c1-61684075-1e401d0b] Closing read session. Close timeout: 0.000000s 2025-06-25T14:55:25.297805Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): 2025-06-25T14:55:25.297840Z :INFO: [/Root] [/Root] [d619a9fb-e74134c1-61684075-1e401d0b] Counters: { Errors: 1 CurrentSessionLifetimeMs: 2 BytesRead: 0 MessagesRead: 0 BytesReadCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-25T14:55:25.297877Z :INFO: [/Root] [/Root] [d619a9fb-e74134c1-61684075-1e401d0b] Closing read session. Close timeout: 0.000000s 2025-06-25T14:55:25.297907Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): 2025-06-25T14:55:25.297966Z :INFO: [/Root] [/Root] [d619a9fb-e74134c1-61684075-1e401d0b] Counters: { Errors: 1 CurrentSessionLifetimeMs: 2 BytesRead: 0 MessagesRead: 0 BytesReadCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-25T14:55:25.298047Z :NOTICE: [/Root] [/Root] [d619a9fb-e74134c1-61684075-1e401d0b] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-06-25T14:55:25.575886Z node 3 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976715689, task: 1, CA Id [3:7519900724242742314:2483]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 0 2025-06-25T14:55:25.609420Z node 3 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976715689, task: 1, CA Id [3:7519900724242742314:2483]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 1 2025-06-25T14:55:25.662256Z node 3 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976715689, task: 1, CA Id [3:7519900724242742314:2483]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 1 2025-06-25T14:55:25.737458Z node 3 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976715689, task: 1, CA Id [3:7519900724242742314:2483]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 1 2025-06-25T14:55:25.840405Z node 3 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976715689, task: 1, CA Id [3:7519900724242742314:2483]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 1 2025-06-25T14:55:26.028892Z node 3 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976715689, task: 1, CA Id [3:7519900724242742314:2483]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 1 >> KqpQueryService::TableSink_OltpInteractive [GOOD] >> KqpQueryServiceScripts::ForgetScriptExecutionOnLongQuery >> THealthCheckTest::LayoutIncorrect [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/service/unittest >> KqpQueryService::TableSink_OltpInteractive [GOOD] Test command err: Trying to start YDB, gRPC: 2737, MsgBus: 10417 2025-06-25T14:55:13.595204Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900671070614238:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:13.595250Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001848/r3tmp/tmpli9CnK/pdisk_1.dat 2025-06-25T14:55:13.975378Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:55:13.975478Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:55:13.981058Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:55:14.004442Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519900671070614217:2080] 1750863313585944 != 1750863313585947 2025-06-25T14:55:14.021159Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 2737, node 1 2025-06-25T14:55:14.094223Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:55:14.094254Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:55:14.094264Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:55:14.094380Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10417 TClient is connected to server localhost:10417 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-25T14:55:14.620601Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:55:14.696602Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:55:14.709221Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:55:16.650762Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900683955516743:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:16.650873Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:16.903625Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:17.059282Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900688250484143:2302], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:17.059368Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:17.059610Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900688250484148:2305], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:17.062953Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:55:17.072521Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519900688250484150:2306], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-25T14:55:17.148030Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519900688250484201:2392] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 21438, MsgBus: 14428 2025-06-25T14:55:18.343810Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519900694355577862:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:18.344848Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001848/r3tmp/tmpuhXVAj/pdisk_1.dat 2025-06-25T14:55:18.481054Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:55:18.485420Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519900694355577839:2080] 1750863318342038 != 1750863318342041 2025-06-25T14:55:18.502990Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected TServer::EnableGrpc on GrpcPort 21438, node 2 2025-06-25T14:55:18.503873Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:55:18.505253Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:55:18.553058Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:55:18.553088Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:55:18.553096Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:55:18.553234Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14428 TClient is connected to server localhost:14428 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:55:19.049419Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:55:19.056102Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:55:19.359799Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:55:21.373239Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519900707240480360:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:21.373762Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:21.393534Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-0 ... operation_create_resource_pool.cpp:179) 2025-06-25T14:55:21.458553Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519900707240480470:2306], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-25T14:55:21.535939Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519900707240480521:2392] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:55:21.942395Z node 2 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_CONSTRAINT_VIOLATION;details=Conflict with existing key.;tx_id=4; 2025-06-25T14:55:21.953192Z node 2 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:226: Prepare transaction failed. txid 4 at tablet 72075186224037888 errors: Status: STATUS_CONSTRAINT_VIOLATION Issues: { message: "Conflict with existing key." issue_code: 2012 severity: 1 } 2025-06-25T14:55:21.953403Z node 2 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:168: Errors while proposing transaction txid 4 at tablet 72075186224037888 Status: STATUS_CONSTRAINT_VIOLATION Issues: { message: "Conflict with existing key." issue_code: 2012 severity: 1 } 2025-06-25T14:55:21.953654Z node 2 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:819: SelfId: [2:7519900707240480605:2327], Table: `/Root/DataShard` ([72057594046644480:2:1]), SessionActorId: [2:7519900707240480589:2327]Got CONSTRAINT VIOLATION for table `/Root/DataShard`. ShardID=72075186224037888, Sink=[2:7519900707240480605:2327].{
: Error: Conflict with existing key., code: 2012 } 2025-06-25T14:55:21.954301Z node 2 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:3004: SelfId: [2:7519900707240480598:2327], SessionActorId: [2:7519900707240480589:2327], statusCode=PRECONDITION_FAILED. Issue=
: Error: Constraint violated. Table: `/Root/DataShard`., code: 2012
: Error: Conflict with existing key., code: 2012 . sessionActorId=[2:7519900707240480589:2327]. isRollback=0 2025-06-25T14:55:21.954561Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:1895: SessionId: ydb://session/3?node_id=2&id=NWIwODU1MDAtMjg3N2U0NWMtOTY2ZDMyYWItOTM2MmQ4MWM=, ActorId: [2:7519900707240480589:2327], ActorState: ExecuteState, TraceId: 01jyksc6rm3m9x3xh3eft6pjsz, got TEvKqpBuffer::TEvError in ExecuteState, status: PRECONDITION_FAILED send to: [2:7519900707240480599:2327] from: [2:7519900707240480598:2327] 2025-06-25T14:55:21.954652Z node 2 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [2:7519900707240480599:2327] TxId: 281474976710663. Ctx: { TraceId: 01jyksc6rm3m9x3xh3eft6pjsz, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NWIwODU1MDAtMjg3N2U0NWMtOTY2ZDMyYWItOTM2MmQ4MWM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. PRECONDITION_FAILED: {
: Error: Constraint violated. Table: `/Root/DataShard`., code: 2012 subissue: {
: Error: Conflict with existing key., code: 2012 } } 2025-06-25T14:55:21.954886Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=NWIwODU1MDAtMjg3N2U0NWMtOTY2ZDMyYWItOTM2MmQ4MWM=, ActorId: [2:7519900707240480589:2327], ActorState: ExecuteState, TraceId: 01jyksc6rm3m9x3xh3eft6pjsz, Create QueryResponse for error on request, msg: Trying to start YDB, gRPC: 7397, MsgBus: 4231 2025-06-25T14:55:22.719310Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519900711422146975:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:22.719344Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001848/r3tmp/tmpLrCsJA/pdisk_1.dat 2025-06-25T14:55:22.912392Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519900711422146956:2080] 1750863322718629 != 1750863322718632 2025-06-25T14:55:22.921790Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 7397, node 3 2025-06-25T14:55:22.946919Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:55:22.947042Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:55:22.980848Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:55:23.034802Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:55:23.034824Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:55:23.034831Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:55:23.034940Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4231 TClient is connected to server localhost:4231 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:55:23.598759Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:55:23.606476Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:55:23.723616Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:55:26.069791Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519900728602016772:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:26.069875Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:26.107058Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:26.331705Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:26.571798Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519900728602018165:2404], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:26.571875Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:26.582757Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519900728602018169:2406], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:26.582863Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:26.583039Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519900728602018174:2409], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:26.586607Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710660:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:55:26.598964Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519900728602018176:2410], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710660 completed, doublechecking } 2025-06-25T14:55:26.653971Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519900728602018227:3201] txid# 281474976710661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:55:27.728426Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519900711422146975:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:27.728510Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> KqpQueryService::SeveralCTAS-UseSink [GOOD] >> KqpQueryService::ExecuteQueryExplicitTxTLI >> KqpQueryService::ExecuteQueryMultiScalar [GOOD] >> KqpQueryService::StreamExecuteQuery [GOOD] >> KqpQueryService::StreamExecuteCollectMeta ------- [TM] {asan, default-linux-x86_64, release} ydb/core/health_check/ut/unittest >> THealthCheckTest::LayoutIncorrect [GOOD] Test command err: 2025-06-25T14:54:57.113332Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:628:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:54:57.113884Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:54:57.114257Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:54:57.114632Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:625:2319], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:54:57.114852Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:54:57.114992Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001d6b/r3tmp/tmpxtg0bP/pdisk_1.dat 2025-06-25T14:54:57.517450Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 3822, node 1 TClient is connected to server localhost:23225 2025-06-25T14:54:57.903322Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:54:57.903378Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:54:57.903410Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:54:57.903638Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:55:05.244492Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [3:628:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:55:05.244624Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:55:05.244808Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:55:05.245035Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [4:625:2319], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:55:05.245379Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:55:05.245450Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001d6b/r3tmp/tmprQeazW/pdisk_1.dat 2025-06-25T14:55:05.544651Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 1181, node 3 TClient is connected to server localhost:25491 2025-06-25T14:55:05.898773Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:55:05.898836Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:55:05.898870Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:55:05.899430Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration self_check_result: GOOD issue_log { id: "YELLOW-f489-1231c6b1" status: YELLOW message: "Database has compute issues" location { database { name: "/Root" } } reason: "YELLOW-1ba8-1231c6b1" type: "DATABASE" level: 1 } issue_log { id: "YELLOW-1ba8-1231c6b1" status: YELLOW message: "Compute is overloaded" location { database { name: "/Root" } } reason: "YELLOW-e9e2-1231c6b1-3" reason: "YELLOW-e9e2-1231c6b1-4" type: "COMPUTE" level: 2 } issue_log { id: "YELLOW-e9e2-1231c6b1-3" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 3 host: "::1" port: 12001 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 } issue_log { id: "YELLOW-e9e2-1231c6b1-4" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 4 host: "::1" port: 12002 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 } issue_log { id: "RED-a594-3-3-42" status: RED message: "PDisk state is FAULTY" location { storage { node { id: 3 host: "::1" port: 12001 } pool { group { vdisk { pdisk { id: "3-42" path: "/home/runner/.ya/build/build_root/yft8/001d6b/r3tmp/tmprQeazW/pdisk_1.dat" } } } } } } type: "PDISK" level: 6 } issue_log { id: "RED-a594-3-3-43" status: RED message: "PDisk state is FAULTY" location { storage { node { id: 3 host: "::1" port: 12001 } pool { group { vdisk { pdisk { id: "3-43" path: "/home/runner/.ya/build/build_root/yft8/001d6b/r3tmp/tmprQeazW/pdisk_1.dat" } } } } } } type: "PDISK" level: 6 } issue_log { id: "RED-a594-3-3-44" status: RED message: "PDisk state is FAULTY" location { storage { node { id: 3 host: "::1" port: 12001 } pool { group { vdisk { pdisk { id: "3-44" path: "/home/runner/.ya/build/build_root/yft8/001d6b/r3tmp/tmprQeazW/pdisk_1.dat" } } } } } } type: "PDISK" level: 6 } location { id: 3 host: "::1" port: 12001 } 2025-06-25T14:55:10.223019Z node 5 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [5:420:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:55:10.223405Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:55:10.223490Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001d6b/r3tmp/tmpIVsj3V/pdisk_1.dat 2025-06-25T14:55:10.501006Z node 5 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 7376, node 5 TClient is connected to server localhost:8643 2025-06-25T14:55:10.851811Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:55:10.851877Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:55:10.851920Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:55:10.852418Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:55:15.061831Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [7:419:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:55:15.062222Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:55:15.062378Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001d6b/r3tmp/tmpLOXMnB/pdisk_1.dat 2025-06-25T14:55:15.402043Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 25753, node 7 TClient is connected to server localhost:17246 2025-06-25T14:55:15.821763Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:55:15.821838Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:55:15.821899Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:55:15.822428Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:55:15.906365Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:55:15.906542Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:55:15.930735Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:55:16.652965Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:55:27.566430Z node 9 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [9:419:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:55:27.566585Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:55:27.566651Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001d6b/r3tmp/tmpHQoNCn/pdisk_1.dat 2025-06-25T14:55:27.967198Z node 9 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 17364, node 9 TClient is connected to server localhost:22064 2025-06-25T14:55:28.497663Z node 9 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:55:28.497739Z node 9 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:55:28.497783Z node 9 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:55:28.498391Z node 9 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration >> KqpQueryService::TableSink_BadTransactions [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/service/unittest >> KqpQueryService::SeveralCTAS-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 25127, MsgBus: 22557 2025-06-25T14:55:12.732772Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900668831464263:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:12.732818Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00185a/r3tmp/tmpKbClhR/pdisk_1.dat 2025-06-25T14:55:13.194230Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:55:13.261312Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:55:13.261415Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:55:13.263736Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 25127, node 1 2025-06-25T14:55:13.553049Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:55:13.553069Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:55:13.553077Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:55:13.553200Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:55:13.754557Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:22557 TClient is connected to server localhost:22557 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:55:14.400056Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:55:14.412944Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:55:14.429499Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:14.607738Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:14.790286Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:14.883079Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:16.036365Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900686011335049:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:16.036473Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:16.378672Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:16.406349Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:16.448028Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:16.474218Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:16.503913Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:16.534708Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:16.567148Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:16.632563Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900686011335705:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:16.632647Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:16.632732Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900686011335710:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:16.640111Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:55:16.650178Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519900686011335712:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:55:16.720028Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519900686011335763:3419] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:55:17.733261Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519900668831464263:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:17.733345Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 1146, MsgBus: 22315 2025-06-25T14:55:19.605693Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519900694982720984:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:19.605756Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runn ... schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:55:22.556545Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519900707867623475:2294], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:55:22.651323Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519900707867623528:2332] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:55:22.845149Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:23.028840Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519900712162591108:2499] txid# 281474976710665, issues: { message: "Check failed: path: \'/Root/.tmp/sessions\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeDir, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:55:23.041215Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519900712162591115:2504] txid# 281474976710666, issues: { message: "Check failed: path: \'/Root/.tmp/sessions/NDU3YjUzZDAtMzhlNTU5MWMtZGM5NTFjM2MtZjMzZmY2OWQ=\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeDir, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:55:23.044047Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:23.205979Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519900712162591302:2617] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.tmp/sessions\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeDir, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:55:23.207817Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519900712162591309:2622] txid# 281474976710671, issues: { message: "Check failed: path: \'/Root/.tmp/sessions/NDU3YjUzZDAtMzhlNTU5MWMtZGM5NTFjM2MtZjMzZmY2OWQ=\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeDir, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:55:23.210628Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) Trying to start YDB, gRPC: 22165, MsgBus: 14979 2025-06-25T14:55:24.681556Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519900716483534406:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:24.681603Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00185a/r3tmp/tmpK11MS5/pdisk_1.dat 2025-06-25T14:55:24.799733Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 22165, node 3 2025-06-25T14:55:24.828711Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:55:24.828792Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:55:24.860761Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:55:24.880852Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:55:24.880876Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:55:24.880882Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:55:24.880998Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14979 TClient is connected to server localhost:14979 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:55:25.367010Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:55:25.387300Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:55:25.691925Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:55:27.780447Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519900729368436885:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:27.780772Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519900729368436875:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:27.780824Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:27.788665Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:55:27.801818Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519900729368436904:2295], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:55:27.859357Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519900729368436955:2332] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:55:27.939810Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:28.122129Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519900733663404532:2498] txid# 281474976715665, issues: { message: "Check failed: path: \'/Root/.tmp/sessions\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeDir, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:55:28.124266Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519900733663404539:2503] txid# 281474976715666, issues: { message: "Check failed: path: \'/Root/.tmp/sessions/Nzc3OWUzNmEtYjkzMThiNTctYmNmM2NlNGItNzIzZDlhM2I=\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeDir, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:55:28.127374Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:28.324906Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519900733663404726:2616] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.tmp/sessions\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeDir, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:55:28.327205Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519900733663404733:2621] txid# 281474976715671, issues: { message: "Check failed: path: \'/Root/.tmp/sessions/Nzc3OWUzNmEtYjkzMThiNTctYmNmM2NlNGItNzIzZDlhM2I=\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeDir, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:55:28.329819Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) >> KqpQueryService::FlowControllOnHugeRealTable-LongRow [GOOD] >> KqpQueryService::ForbidInteractiveTxOnImplicitSession >> KqpQueryService::TableSink_OltpReplace+HasSecondaryIndex >> KqpQueryService::ExecuteQueryWithResourcePoolClassifier [GOOD] >> KqpQueryService::ExecuteRetryQuery ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/service/unittest >> KqpQueryService::ExecuteQueryMultiScalar [GOOD] Test command err: Trying to start YDB, gRPC: 12250, MsgBus: 14922 2025-06-25T14:55:14.458854Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900674603841961:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:14.458938Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001847/r3tmp/tmpzrfhsI/pdisk_1.dat 2025-06-25T14:55:14.919795Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:55:14.943266Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:55:14.943377Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:55:14.945295Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 12250, node 1 2025-06-25T14:55:15.020810Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:55:15.020832Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:55:15.020839Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:55:15.020973Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14922 TClient is connected to server localhost:14922 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-25T14:55:15.500276Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:55:15.562555Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:55:15.572113Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:55:15.584719Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:15.713601Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:15.870688Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:15.944065Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:17.534502Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900687488745456:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:17.534622Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:17.909251Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:17.947373Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:17.984499Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:18.019670Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:18.051127Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:18.122105Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:18.155619Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:18.211588Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900691783713412:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:18.211692Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:18.211971Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900691783713417:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:18.215571Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:55:18.225578Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519900691783713419:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:55:18.301046Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519900691783713470:3421] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:55:19.460408Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519900674603841961:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:19.460471Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 23786, MsgBus: 21507 2025-06-25T14:55:20.109406Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519900700867412271:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:20.109483Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/run ... R: schemereq.cpp:553: Actor# [2:7519900713752314936:2391] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 10356, MsgBus: 18474 2025-06-25T14:55:24.367734Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519900718144106787:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:24.367797Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001847/r3tmp/tmpdXgWOU/pdisk_1.dat 2025-06-25T14:55:24.530965Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:55:24.531842Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519900718144106757:2080] 1750863324366987 != 1750863324366990 2025-06-25T14:55:24.548069Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:55:24.548147Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:55:24.550215Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 10356, node 3 2025-06-25T14:55:24.600758Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:55:24.600776Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:55:24.600782Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:55:24.600909Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:18474 TClient is connected to server localhost:18474 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:55:25.113772Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:55:25.127712Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:55:25.143816Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:25.254150Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:25.413524Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:55:25.428106Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:55:25.548774Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:27.604579Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519900731029010281:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:27.604659Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:27.664005Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:27.700558Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:27.744060Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:27.777110Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:27.816573Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:27.868993Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:27.907621Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:28.033735Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519900735323978237:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:28.033825Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:28.034207Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519900735323978242:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:28.038381Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:55:28.051267Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519900735323978244:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:55:28.120098Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519900735323978295:3422] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:55:29.368424Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519900718144106787:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:29.368501Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/service/unittest >> KqpQueryService::TableSink_BadTransactions [GOOD] Test command err: Trying to start YDB, gRPC: 13267, MsgBus: 2954 2025-06-25T14:55:12.749108Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900666977132469:2228];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:12.751287Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001858/r3tmp/tmpPPKsYV/pdisk_1.dat 2025-06-25T14:55:13.248323Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:55:13.248417Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:55:13.256615Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:55:13.260455Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519900666977132277:2080] 1750863312710776 != 1750863312710779 2025-06-25T14:55:13.288337Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 13267, node 1 2025-06-25T14:55:13.556851Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:55:13.556868Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:55:13.556873Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:55:13.556956Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:55:13.751232Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:2954 TClient is connected to server localhost:2954 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:55:14.391467Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:55:14.408976Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:55:14.436378Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:14.595063Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:14.823408Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:14.912355Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:16.250178Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900684157003124:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:16.250279Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:16.567056Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:16.602174Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:16.636019Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:16.668525Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:16.713847Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:16.742603Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:16.808840Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:16.866264Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900684157003788:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:16.866331Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:16.866471Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900684157003793:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:16.869495Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:55:16.878372Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519900684157003795:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:55:16.941454Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519900684157003846:3423] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:55:17.752415Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519900666977132469:2228];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:17.752513Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 26354, MsgBus: 27851 2025-06-25T14:55:18.730466Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519900692397808004:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:18.730577Z node 2 :METADATA_PROVIDER ERROR: log.cpp:7 ... COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037895;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715658; 2025-06-25T14:55:27.999384Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-25T14:55:27.999775Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-25T14:55:28.005346Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037897;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715658; 2025-06-25T14:55:28.005346Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037894;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715658; 2025-06-25T14:55:28.006038Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-25T14:55:28.006041Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-25T14:55:28.013159Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715658; 2025-06-25T14:55:28.013159Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037896;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715658; 2025-06-25T14:55:28.013821Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-25T14:55:28.013837Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-25T14:55:28.018570Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037890;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715658; 2025-06-25T14:55:28.018570Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715658; 2025-06-25T14:55:28.019203Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-25T14:55:28.019203Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-25T14:55:28.024268Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037893;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715658; 2025-06-25T14:55:28.024268Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037891;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715658; 2025-06-25T14:55:28.030878Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:28.151921Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519900735953503442:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:28.152028Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:28.152337Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519900735953503447:2437], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:28.157673Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715660:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:55:28.170772Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519900735953503449:2438], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715660 completed, doublechecking } 2025-06-25T14:55:28.258850Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519900735953503502:2664] txid# 281474976715661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:55:28.524915Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=281474976715663;tx_id=281474976715663;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715663; 2025-06-25T14:55:28.524916Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037894;tx_state=TTxProgressTx::Execute;tx_current=281474976715663;tx_id=281474976715663;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715663; 2025-06-25T14:55:28.525315Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037893;tx_state=TTxProgressTx::Execute;tx_current=281474976715663;tx_id=281474976715663;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715663; 2025-06-25T14:55:28.525322Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[3:7519900731658535735:2306];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037897;local_tx_no=14;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037893;receive=72075186224037894; 2025-06-25T14:55:28.525402Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[3:7519900731658535735:2306];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037897;local_tx_no=15;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037893;receive=72075186224037889; 2025-06-25T14:55:28.525459Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[3:7519900731658535735:2306];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037897;local_tx_no=16;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037893;receive=72075186224037889; 2025-06-25T14:55:28.525514Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[3:7519900731658535735:2306];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037897;local_tx_no=17;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037893;receive=72075186224037894; 2025-06-25T14:55:28.525887Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037897;tx_state=TTxProgressTx::Execute;tx_current=281474976715663;tx_id=281474976715663;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715663; 2025-06-25T14:55:28.972862Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=3&id=Y2Y0ZDZiOWYtNTI3YTcyMGMtMjU4Njc0YzQtZmIyYmQ4N2I=, ActorId: [3:7519900735953503627:2461], ActorState: ExecuteState, TraceId: 01jykscdcxae08rkpb2abrzbve, Create QueryResponse for error on request, msg: Write transactions between column and row tables are disabled at current time. 2025-06-25T14:55:29.125735Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=3&id=NjQ4MTU3YmQtMjAyM2RhMDAtZDNmOTdmYWUtYzNlOTUyZjk=, ActorId: [3:7519900735953503649:2470], ActorState: ExecuteState, TraceId: 01jykscdq094ewmygasy3vtkdj, Create QueryResponse for error on request, msg: Write transactions between column and row tables are disabled at current time. 2025-06-25T14:55:29.296757Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=3&id=NTM2YTZlZjgtMjdlM2M3OTQtYzY4ODhhYjQtODY4NWM1ZGI=, ActorId: [3:7519900740248470963:2477], ActorState: ExecuteState, TraceId: 01jykscdve8wqfeepzrea5hw71, Create QueryResponse for error on request, msg: Write transactions between column and row tables are disabled at current time. 2025-06-25T14:55:29.675412Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=3&id=N2EwM2UxY2EtYTYyNGJlYjktZjU3MzZlMWQtNGE3ODU0ODM=, ActorId: [3:7519900740248470983:2485], ActorState: ExecuteState, TraceId: 01jyksce0va7pmb8tsv4m0kvgq, Create QueryResponse for error on request, msg: Write transactions between column and row tables are disabled at current time. 2025-06-25T14:55:29.860856Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=3&id=MTJiNzkxNDUtYTBjMTE2MWYtYTg5OTEzMDAtMjFkYjAxZTk=, ActorId: [3:7519900740248471002:2493], ActorState: ExecuteState, TraceId: 01jykscecp2bncdseqz0t7qx45, Create QueryResponse for error on request, msg: Write transactions between column and row tables are disabled at current time. 2025-06-25T14:55:30.031230Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037897;tx_state=TTxProgressTx::Execute;tx_current=281474976715671;tx_id=281474976715671;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715671; 2025-06-25T14:55:30.031322Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037893;tx_state=TTxProgressTx::Execute;tx_current=281474976715671;tx_id=281474976715671;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715671; 2025-06-25T14:55:30.031639Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=281474976715671;tx_id=281474976715671;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715671; 2025-06-25T14:55:30.032387Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037894;tx_state=TTxProgressTx::Execute;tx_current=281474976715671;tx_id=281474976715671;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715671; >> KqpQueryServiceScripts::TestFetchMoreThanLimit [GOOD] >> KqpQueryServiceScripts::TestAstWithCompression >> test_update_script_tables.py::TestUpdateScriptTablesYdb::test_recreate_tables[DROP TABLE {}-`.metadata/script_executions`] [GOOD] >> KqpQueryServiceScripts::ExecuteScriptWithParameters >> KqpQueryService::CreateTempTable [GOOD] >> KqpQueryService::CreateAndDropTopic >> KqpDocumentApi::RestrictWriteExplicitPrepare >> KqpQueryService::IssuesInCaseOfSuccess [GOOD] >> KqpQueryService::MaterializeTxResults >> TPersQueueTest::CreateTopicWithMeteringMode [GOOD] >> TPersQueueTest::DefaultMeteringMode >> KqpQueryService::DdlGroup [GOOD] >> KqpQueryService::DdlPermission >> KqpQueryServiceScripts::ExecuteScriptWithUnspecifiedMode >> KqpService::SessionBusyRetryOperation [GOOD] >> KqpService::RangeCache-UseCache >> KqpQueryService::Write [GOOD] >> KqpQueryServiceScripts::CancelScriptExecution >> KqpQueryServiceScripts::ExecuteScriptWithWorkloadManager >> THealthCheckTest::TestBootingTabletIsNotDead [GOOD] >> KqpQueryServiceScripts::ExecuteScriptStatsFull [GOOD] >> KqpQueryServiceScripts::ExecuteScriptStatsNone >> KqpQueryService::ExecuteQueryExplicitTxTLI [GOOD] >> KqpQueryService::ExecuteQueryInteractiveTx >> KqpQueryService::StreamExecuteCollectMeta [GOOD] >> KqpQueryService::ShowCreateViewOnTable ------- [TM] {asan, default-linux-x86_64, release} ydb/core/health_check/ut/unittest >> THealthCheckTest::TestBootingTabletIsNotDead [GOOD] Test command err: 2025-06-25T14:54:58.680040Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:628:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:54:58.680653Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:625:2319], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:54:58.680768Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:54:58.680941Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:54:58.680991Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:54:58.681075Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001d5e/r3tmp/tmpcQd6wN/pdisk_1.dat 2025-06-25T14:54:59.119195Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 8839, node 1 TClient is connected to server localhost:11497 2025-06-25T14:54:59.555825Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:54:59.555883Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:54:59.555911Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:54:59.556432Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:55:05.817760Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [3:628:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:55:05.817840Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:55:05.817934Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:55:05.818091Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [4:625:2319], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:55:05.818389Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:55:05.818433Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001d5e/r3tmp/tmpKCypat/pdisk_1.dat 2025-06-25T14:55:06.099585Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 26860, node 3 TClient is connected to server localhost:7543 2025-06-25T14:55:06.397624Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:55:06.397670Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:55:06.397695Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:55:06.397931Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:55:12.390149Z node 5 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [5:625:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:55:12.390438Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:55:12.390576Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:55:12.392086Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [6:634:2322], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:55:12.392384Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:55:12.392515Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001d5e/r3tmp/tmp2LtLCH/pdisk_1.dat 2025-06-25T14:55:12.700221Z node 5 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 16583, node 5 TClient is connected to server localhost:28737 2025-06-25T14:55:13.035954Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:55:13.036003Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:55:13.036028Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:55:13.036814Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:55:20.526320Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [7:628:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:55:20.526838Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:55:20.527052Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:55:20.527449Z node 8 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [8:625:2319], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:55:20.527787Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:55:20.527931Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001d5e/r3tmp/tmphQC1hC/pdisk_1.dat 2025-06-25T14:55:20.846776Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 19356, node 7 TClient is connected to server localhost:22921 2025-06-25T14:55:21.221317Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:55:21.221392Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:55:21.221426Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:55:21.221908Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:55:28.977965Z node 9 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [9:637:2377], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:55:28.978721Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:55:28.979166Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:55:28.980696Z node 10 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [10:634:2253], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:55:28.981061Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T14:55:28.981161Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001d5e/r3tmp/tmpNtjixh/pdisk_1.dat 2025-06-25T14:55:29.464008Z node 9 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 62849, node 9 TClient is connected to server localhost:27512 2025-06-25T14:55:33.485903Z node 9 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:55:33.485971Z node 9 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:55:33.486011Z node 9 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:55:33.486650Z node 9 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:55:33.520497Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(11, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:55:33.520727Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(11, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:55:33.561565Z node 9 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 11 Cookie 11 2025-06-25T14:55:33.562939Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(11, (0,0,0,0)) VolatileState: Connecting -> Connected self_check_result: GOOD issue_log { id: "YELLOW-f489-1231c6b1" status: YELLOW message: "Database has compute issues" location { database { name: "/Root" } } reason: "YELLOW-1ba8-1231c6b1" type: "DATABASE" level: 1 } issue_log { id: "YELLOW-1ba8-1231c6b1" status: YELLOW message: "Compute is overloaded" location { database { name: "/Root" } } reason: "YELLOW-e9e2-1231c6b1-10" reason: "YELLOW-e9e2-1231c6b1-11" reason: "YELLOW-e9e2-1231c6b1-9" type: "COMPUTE" level: 2 } issue_log { id: "YELLOW-e9e2-1231c6b1-9" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 9 host: "::1" port: 12001 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 } issue_log { id: "YELLOW-e9e2-1231c6b1-10" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 10 host: "::1" port: 12002 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 } issue_log { id: "YELLOW-e9e2-1231c6b1-11" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 11 host: "::1" port: 12003 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 } location { id: 9 host: "::1" port: 12001 } |88.3%| [TA] $(B)/ydb/core/health_check/ut/test-results/unittest/{meta.json ... results_accumulator.log} |88.3%| [TA] {RESULT} $(B)/ydb/core/health_check/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpQueryService::CreateAndDropTopic [GOOD] >> KqpQueryService::CreateAndAlterTopic >> KqpQueryServiceScripts::ForgetScriptExecutionOnLongQuery [GOOD] >> KqpQueryServiceScripts::ForgetScriptExecutionRace >> KqpQueryService::ForbidInteractiveTxOnImplicitSession [GOOD] >> KqpQueryService::TableSink_OltpReplace+HasSecondaryIndex [GOOD] >> KqpQueryService::TableSink_OltpReplace-HasSecondaryIndex >> KqpQueryService::ExecuteRetryQuery [GOOD] >> KqpQueryService::TableSink_Olap_Replace >> KqpNewEngine::SqlInAsScalar [GOOD] >> KqpDocumentApi::RestrictWriteExplicitPrepare [GOOD] >> KqpDocumentApi::Scripting ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/service/unittest >> KqpQueryService::ForbidInteractiveTxOnImplicitSession [GOOD] Test command err: Trying to start YDB, gRPC: 12654, MsgBus: 17243 2025-06-25T14:55:15.514721Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:55:15.514862Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:55:15.514905Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001851/r3tmp/tmpvDlzuv/pdisk_1.dat 2025-06-25T14:55:15.842548Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 12654, node 1 2025-06-25T14:55:15.960632Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:55:15.964944Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:55:15.965002Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:55:15.965032Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:55:15.965425Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:55:15.965706Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750863312706904 != 1750863312706908 2025-06-25T14:55:16.014634Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:55:16.014809Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:55:16.027056Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:17243 TClient is connected to server localhost:17243 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:55:16.339448Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:55:16.439161Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:16.571931Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:55:16.801883Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:17.175988Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:17.501722Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:18.315370Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:1688:3282], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:18.315753Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:18.342018Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:18.558347Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:18.808804Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:19.099799Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:19.364214Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:19.719852Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:20.022747Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:20.361588Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2361:3780], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:20.361696Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:20.362110Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2366:3785], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:20.367512Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:55:20.501013Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:2368:3787], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:55:20.570350Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:2423:3823] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 7241, MsgBus: 7364 2025-06-25T14:55:22.656717Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519900710130760667:2082];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:22.660238Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001851/r3tmp/tmpxRrjmp/pdisk_1 ... , but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:27.660687Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519900710130760667:2082];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:27.660755Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 121 Trying to start YDB, gRPC: 25126, MsgBus: 17697 2025-06-25T14:55:31.388584Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519900747310522929:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:31.388639Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001851/r3tmp/tmpj2eYQE/pdisk_1.dat 2025-06-25T14:55:31.604989Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:55:31.605064Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:55:31.610325Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:55:31.622625Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 25126, node 3 2025-06-25T14:55:31.682982Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:55:31.683010Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:55:31.683021Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:55:31.683135Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17697 TClient is connected to server localhost:17697 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-25T14:55:32.333508Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:55:32.362345Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:32.393764Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:55:32.461879Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:32.680290Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:55:32.766053Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:35.081339Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519900764490393699:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:35.081415Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:35.141243Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:35.282282Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:35.357308Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:35.419122Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:35.456513Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:35.506500Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:35.549586Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:35.623204Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519900764490394366:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:35.623275Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519900764490394371:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:35.623276Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:35.627177Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:55:35.637058Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519900764490394373:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:55:35.717210Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519900764490394424:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:55:36.388704Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519900747310522929:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:36.388774Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> KqpQueryService::PeriodicTaskInSessionPool [GOOD] >> KqpQueryService::PeriodicTaskInSessionPoolSessionCloseByIdle >> KqpQueryService::MaterializeTxResults [GOOD] >> KqpQueryService::MixedReadQueryWithoutStreamLookup >> KqpExtractPredicateLookup::SimpleRange [GOOD] >> KqpExtractPredicateLookup::PointJoin ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/service/unittest >> KqpQueryService::ExecuteRetryQuery [GOOD] Test command err: Trying to start YDB, gRPC: 1311, MsgBus: 24145 2025-06-25T14:55:15.232706Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900679067611443:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:15.232739Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001846/r3tmp/tmp8kyfMy/pdisk_1.dat 2025-06-25T14:55:15.616388Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519900679067611417:2080] 1750863315231000 != 1750863315231003 2025-06-25T14:55:15.623801Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 1311, node 1 2025-06-25T14:55:15.638960Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:55:15.639435Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:55:15.641381Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:55:15.733124Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:55:15.733150Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:55:15.733159Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:55:15.733285Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24145 TClient is connected to server localhost:24145 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:55:16.191346Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:55:16.222190Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:55:16.234642Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:16.239592Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... 2025-06-25T14:55:16.340134Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:16.478547Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:16.575434Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:18.029938Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900691952514952:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:18.030042Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:18.362987Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:18.398680Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:18.427600Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:18.461229Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:18.530247Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:18.562341Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:18.631785Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:18.689966Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900691952515614:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:18.690013Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900691952515619:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:18.690044Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:18.693384Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:55:18.702899Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519900691952515621:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:55:18.771460Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519900691952515672:3424] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:55:19.988486Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900696247483273:2488], DatabaseId: /Root, PoolId: another_pool_id, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool another_pool_id not found or you don't have access permissions } 2025-06-25T14:55:19.988520Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900696247483271:2486], DatabaseId: /Root, PoolId: another_pool_id, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool another_pool_id not found or you don't have access permissions } 2025-06-25T14:55:19.988587Z node 1 :KQP_WORKLOAD_SERVICE WARN: ... s_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) Wait resource pool classifier 0.092799s: status = SUCCESS, issues = 2025-06-25T14:55:30.635612Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=NjE4Zjk0ZjktNWIwNGUwNzktZWM4MTk5ODEtNjMxNTg3Yjg=, ActorId: [2:7519900743957064198:2767], ActorState: ExecuteState, TraceId: 01jykscfa560gz7cgd7f2sdytt, Create QueryResponse for error on request, msg: Query failed during adding/waiting in workload pool MyPool Trying to start YDB, gRPC: 23347, MsgBus: 7462 2025-06-25T14:55:31.711266Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519900750475590384:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:31.711334Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001846/r3tmp/tmpAqbt3j/pdisk_1.dat 2025-06-25T14:55:31.969133Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:55:31.976143Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:55:31.976229Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:55:31.981733Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 23347, node 3 2025-06-25T14:55:32.074374Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:55:32.074400Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:55:32.074408Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:55:32.074550Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:7462 TClient is connected to server localhost:7462 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:55:32.669712Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:55:32.690726Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:32.725690Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:55:32.772950Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:32.969460Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:55:33.040334Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:35.484487Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519900767655461152:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:35.484608Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:35.585637Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:35.652534Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:35.682694Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:35.722200Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:35.756211Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:35.796274Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:35.876401Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:35.961325Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519900767655461817:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:35.961475Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:35.962377Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519900767655461822:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:35.968372Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:55:35.989505Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-25T14:55:35.989772Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519900767655461824:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:55:36.087731Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519900771950429171:3418] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:55:36.711306Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519900750475590384:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:36.711356Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> KqpQueryServiceScripts::ExecuteScriptWithUnspecifiedMode [GOOD] >> KqpQueryServiceScripts::ExecuteScriptWithTimeout ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/opt/unittest >> KqpNewEngine::SqlInAsScalar [GOOD] Test command err: Trying to start YDB, gRPC: 17672, MsgBus: 22961 2025-06-25T14:54:13.104836Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900414488562747:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:54:13.104875Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0009c1/r3tmp/tmpQbRqGA/pdisk_1.dat 2025-06-25T14:54:13.697656Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:54:13.697758Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:54:13.716773Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 17672, node 1 2025-06-25T14:54:13.739251Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:54:13.831505Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:54:13.831531Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:54:13.831538Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:54:13.831643Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:22961 2025-06-25T14:54:14.132581Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:22961 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:54:14.599463Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:54:14.616318Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:54:16.514164Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900427373465247:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:16.514296Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:16.746045Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:16.889154Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900427373465350:2301], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:16.889235Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:16.889443Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900427373465355:2304], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:16.893056Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:54:16.903854Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519900427373465357:2305], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-25T14:54:16.974216Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519900427373465408:2396] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 27067, MsgBus: 3485 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0009c1/r3tmp/tmpiGtYXp/pdisk_1.dat 2025-06-25T14:54:18.291727Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:54:18.345305Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 27067, node 2 2025-06-25T14:54:18.404404Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:54:18.404531Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:54:18.429187Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:54:18.440831Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:54:18.440856Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:54:18.440865Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:54:18.440971Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3485 TClient is connected to server localhost:3485 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:54:18.930088Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:54:18.935168Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:54:19.212882Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:54:21.062980Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519900448682640820:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:21.063058Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:21.111521Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:21.283070Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519900448682641187:2321], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:21.283148Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T1 ... /runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:21.406297Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) Trying to start YDB, gRPC: 21693, MsgBus: 61124 2025-06-25T14:55:23.718661Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519900712129191206:2179];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0009c1/r3tmp/tmpJAyxdm/pdisk_1.dat 2025-06-25T14:55:23.792602Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:55:23.875091Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:55:23.891705Z node 7 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [7:7519900712129191036:2080] 1750863323704554 != 1750863323704557 2025-06-25T14:55:23.891757Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:55:23.891845Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:55:23.897176Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 21693, node 7 2025-06-25T14:55:23.960384Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:55:23.960411Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:55:23.960422Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:55:23.960605Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:61124 TClient is connected to server localhost:61124 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:55:24.590757Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:55:24.604183Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:24.670418Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:24.802184Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:55:24.889993Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:24.968157Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:27.895920Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519900729309061862:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:27.896031Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:27.976888Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:28.032347Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:28.110777Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:28.158342Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:28.257430Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:28.340687Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:28.391270Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:28.470910Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519900733604029819:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:28.471025Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:28.471570Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519900733604029824:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:28.476058Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:55:28.491857Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7519900733604029826:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:55:28.577506Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7519900733604029877:3418] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:55:28.716435Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[7:7519900712129191206:2179];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:28.716512Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> KqpQueryServiceScripts::TestAstWithCompression [GOOD] >> KqpQueryService::ExecuteQueryUpsertDoesntChangeIndexedValuesIfNotChanged >> KqpQueryServiceScripts::ExecuteScriptWithParameters [GOOD] >> KqpQueryServiceScripts::ExecuteScriptWithForgetAfter >> KqpQueryService::AlterTempTable >> KqpQueryService::DdlPermission [GOOD] >> KqpQueryService::DdlSecret >> TPersQueueTest::SetMeteringMode [GOOD] >> TPersQueueTest::TClusterTrackerTest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/service/unittest >> KqpQueryServiceScripts::TestAstWithCompression [GOOD] Test command err: Trying to start YDB, gRPC: 17790, MsgBus: 27738 2025-06-25T14:55:13.063279Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900670286939537:2220];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:13.063605Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00184f/r3tmp/tmptRfoRb/pdisk_1.dat 2025-06-25T14:55:13.500818Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:55:13.500914Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:55:13.502928Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:55:13.535758Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519900670286939355:2080] 1750863313021332 != 1750863313021335 2025-06-25T14:55:13.552531Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 17790, node 1 2025-06-25T14:55:13.634623Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:55:13.634661Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:55:13.634667Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:55:13.634765Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27738 2025-06-25T14:55:14.052650Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:27738 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:55:14.431459Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:55:14.476929Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:55:14.495847Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:55:14.658685Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:14.811339Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:14.871310Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:16.155954Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900683171842876:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:16.156057Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:16.501555Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:16.531300Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:16.560275Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:16.594127Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:16.627006Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:16.659150Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:16.685547Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:16.767609Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900683171843534:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:16.767679Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:16.767734Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900683171843539:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:16.770556Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:55:16.778487Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519900683171843541:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:55:16.861617Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519900683171843592:3415] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:55:17.881676Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:17.883685Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/yd ... 2.676506Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519900753164742024:2080] 1750863332474320 != 1750863332474323 2025-06-25T14:55:32.689857Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:55:32.689950Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:55:32.693559Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 25861, node 3 2025-06-25T14:55:32.754061Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:55:32.754091Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:55:32.754101Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:55:32.754212Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:22305 TClient is connected to server localhost:22305 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:55:33.353936Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:55:33.368588Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:33.439082Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:33.496729Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... 2025-06-25T14:55:33.646194Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:33.731744Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:36.174889Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519900770344612828:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:36.174958Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:36.228978Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:36.258810Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:36.292529Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:36.333167Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:36.378456Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:36.426207Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:36.467335Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:36.549700Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519900770344613482:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:36.549792Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:36.550046Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519900770344613487:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:36.555314Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:55:36.567991Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519900770344613489:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:55:36.627208Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519900770344613542:3419] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:55:37.477843Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519900753164742053:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:37.494808Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:55:37.685734Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:37.686915Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:37.689015Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) >> KqpQueryServiceScripts::ExecuteScriptWithWorkloadManager [GOOD] >> KqpQueryServiceScripts::ExplainScript >> KqpQueryServiceScripts::CancelScriptExecution [GOOD] >> KqpQueryServiceScripts::EmptyNextFetchToken >> KqpQueryService::TableSink_OltpReplace-HasSecondaryIndex [GOOD] >> KqpQueryService::TableSink_OltpOrder >> KqpQueryService::ExecuteQueryInteractiveTx [GOOD] >> KqpQueryService::ExecuteQueryInteractiveTxCommitWithQuery >> KqpQueryService::ShowCreateViewOnTable [GOOD] >> KqpQueryServiceScripts::ExecuteScript >> KqpQueryServiceScripts::ExecuteScriptStatsNone [GOOD] >> KqpQueryService::CreateAndAlterTopic [GOOD] >> KqpQueryService::CreateOrDropTopicOverTable ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::TPCDSEveryQueryWorks-ColumnStore 2025-06-25 14:55:38,877 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper execution timed out 2025-06-25 14:55:39,097 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper has overrun 600 secs timeout. Process tree before termination: pid rss ref pdirt 417308 47.4M 45.6M 24.0M test_tool run_ut @/home/runner/.ya/build/build_root/yft8/000e85/ydb/core/kqp/ut/join/test-results/unittest/testing_out_stuff/chunk173/testing_out_stuff/test_tool.args 417579 2.2G 2.2G 1.8G └─ ydb-core-kqp-ut-join --trace-path-append /home/runner/.ya/build/build_root/yft8/000e85/ydb/core/kqp/ut/join/test-results/unittest/testing_out_stuff/chunk173/ytest.report Test command err: Trying to start YDB, gRPC: 63478, MsgBus: 8250 2025-06-25T14:45:40.638693Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519898210763879595:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:45:40.639142Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000e85/r3tmp/tmphL9Tsa/pdisk_1.dat 2025-06-25T14:45:41.194276Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 63478, node 1 2025-06-25T14:45:41.229457Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:45:41.229548Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:45:41.245244Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:45:41.401169Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:45:41.401192Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:45:41.401202Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:45:41.401321Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:45:41.653401Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:8250 TClient is connected to server localhost:8250 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:45:42.210869Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:45:42.224271Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:45:43.982283Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898223648782084:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:43.982323Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519898223648782096:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:43.982381Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:45:43.985682Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:45:43.996777Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519898223648782098:2297], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:45:44.061491Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519898227943749445:2336] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:45:44.918374Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:45.077409Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:45.111839Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:45.147069Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:45.227444Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:45.261422Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:45.309731Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:45.348618Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:45.382660Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:45.418223Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:45.458121Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:45.533032Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:45.571125Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:45.611756Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:45:45.640384Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519898210763879595:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:45:45.640668Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.me ... `/Root/test/ds/customer_demographics` cd2\n cross join `/Root/test/ds/customer_address` as customer_address\n cross join `/Root/test/ds/item` as item\n where cs_sold_date_sk = d_date_sk and\n cs_item_sk = i_item_sk and\n cs_bill_cdemo_sk = cd1.cd_demo_sk and\n cs_bill_customer_sk = c_customer_sk and\n cd1.cd_gender = 'F' and\n cd1.cd_education_status = 'Unknown' and\n c_current_cdemo_sk = cd2.cd_demo_sk and\n c_current_addr_sk = ca_address_sk and\n c_birth_month in (1,6,8,9,12,2) and\n d_year = 1998 and\n ca_state in ('MS','IN','ND'\n ,'OK','NM','VA','MS')\n group by rollup (item.i_item_id, customer_address.ca_country, customer_address.ca_state, customer_address.ca_county)\n order by ca_country,\n ca_state,\n ca_county,\n\ti_item_id, agg6\n limit 100;\n\n-- end query 1 in stream 0 using template query18.tpl\n", parameters: 0b 2025-06-25T14:54:15.696081Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyks9t1cbxd95n98cd1ave3z", SessionId: ydb://session/3?node_id=1&id=ZjMyYmE1MTItMWRmMTJiZjItYjk2NmIwNTktZTE0ODNhNGE=, Slow query, duration: 12.386352s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "pragma warning(\"disable\", \"4527\");\n\n$z0 = 0;\n$z1_2 = 1.2;\n$z1_3 = 1.3;\n$z0_9 = 0.9;\n$z0_99 = 0.99;\n$z1_49 = 1.49;\n\n$z0_35 = 0;\n$z0_1_35 = 0.1;\n$z1_2_35 = 1.2;\n$z0_05_35 = 0.05;\n$z0_9_35 = 0.9;\n$z1_1_35 = 1.1;\n$z0_5_35 = 0.5;\n$z100_35 = 100.;\n$z0_0001_35 = 0.0001;\n$z7_35 = 7.;\n\n$z0_12 = 0.;\n$z1_12 = 1;\n$z0_0100001_12 = 0.0100001;\n$z0_06_12 = 0.06;\n$z0_2_12 = 0.2;\n\n$scale_factor = 1;\n\n$round = ($x, $y) -> { return Math::Round($x, $y); };\n$upscale = ($x) -> { return $x; };\n\n$todecimal = ($x, $p, $s) -> { return cast($x as double); };\n\n\n\n-- NB: Subquerys\n-- start query 1 in stream 0 using template query22.tpl and seed 1819994127\nselect item.i_product_name\n ,item.i_brand\n ,item.i_class\n ,item.i_category\n ,avg(inv_quantity_on_hand) qoh\n from `/Root/test/ds/inventory` as inventory\n cross join `/Root/test/ds/date_dim` as date_dim\n cross join `/Root/test/ds/item` as item\n where inv_date_sk=d_date_sk\n and inv_item_sk=i_item_sk\n and d_month_seq between 1200 and 1200 + 11\n group by rollup(item.i_product_name\n ,item.i_brand\n ,item.i_class\n ,item.i_category)\norder by qoh, item.i_product_name, item.i_brand, item.i_class, item.i_category\nlimit 100;\n\n-- end query 1 in stream 0 using template query22.tpl\n", parameters: 0b 2025-06-25T14:55:01.337738Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyksa65ae128h9wrvxm0m09t", SessionId: ydb://session/3?node_id=1&id=ZjMyYmE1MTItMWRmMTJiZjItYjk2NmIwNTktZTE0ODNhNGE=, Slow query, duration: 45.614244s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "pragma warning(\"disable\", \"4527\");\n\n$z0 = 0;\n$z1_2 = 1.2;\n$z1_3 = 1.3;\n$z0_9 = 0.9;\n$z0_99 = 0.99;\n$z1_49 = 1.49;\n\n$z0_35 = 0;\n$z0_1_35 = 0.1;\n$z1_2_35 = 1.2;\n$z0_05_35 = 0.05;\n$z0_9_35 = 0.9;\n$z1_1_35 = 1.1;\n$z0_5_35 = 0.5;\n$z100_35 = 100.;\n$z0_0001_35 = 0.0001;\n$z7_35 = 7.;\n\n$z0_12 = 0.;\n$z1_12 = 1;\n$z0_0100001_12 = 0.0100001;\n$z0_06_12 = 0.06;\n$z0_2_12 = 0.2;\n\n$scale_factor = 1;\n\n$round = ($x, $y) -> { return Math::Round($x, $y); };\n$upscale = ($x) -> { return $x; };\n\n$todecimal = ($x, $p, $s) -> { return cast($x as double); };\n\n\n\n-- NB: Subquerys\n$blabla = (\n select substring(cast(item.i_item_desc as string),0,30) itemdesc,item.i_item_sk item_sk,date_dim.d_date solddate\n from `/Root/test/ds/store_sales` as store_sales\n cross join `/Root/test/ds/date_dim` as date_dim\n cross join `/Root/test/ds/item` as item\n where ss_sold_date_sk = d_date_sk\n and ss_item_sk = i_item_sk\n and d_year in (2000,2000+1,2000+2,2000+3)\n);\n$frequent_ss_items =\n (select itemdesc, item_sk, solddate,count(*) cnt\n from $blabla\n group by itemdesc,item_sk,solddate\n having count(*) >4);\n\n$max_store_sales =\n (select max(csales) tpcds_cmax\n from (select customer.c_customer_sk c_customer_sk,sum(ss_quantity*ss_sales_price) csales\n from `/Root/test/ds/store_sales` as store_sales\n cross join `/Root/test/ds/customer` as customer\n cross join `/Root/test/ds/date_dim` as date_dim\n where ss_customer_sk = c_customer_sk\n and ss_sold_date_sk = d_date_sk\n and d_year in (2000,2000+1,2000+2,2000+3)\n group by customer.c_customer_sk) x);\n\n$best_ss_customer =\n (select customer.c_customer_sk c_customer_sk,sum(ss_quantity*ss_sales_price) ssales\n from `/Root/test/ds/store_sales` as store_sales\n cross join `/Root/test/ds/customer` as customer\n where ss_customer_sk = c_customer_sk\n group by customer.c_customer_sk\n having sum(ss_quantity*ss_sales_price) > $z0_5_35 * $max_store_sales);\n\n-- start query 1 in stream 0 using template query23.tpl and seed 2031708268\nselect sum(sales)\n from (select cs_quantity*cs_list_price sales\n from `/Root/test/ds/catalog_sales` as catalog_sales\n cross join `/Root/test/ds/date_dim` as date_dim\n where d_year = 2000\n and d_moy = 2\n and cs_sold_date_sk = d_date_sk\n and cs_item_sk in (select item_sk from $frequent_ss_items)\n and cs_bill_customer_sk in (select c_customer_sk from $best_ss_customer)\n union all\n select ws_quantity*ws_list_price sales\n from `/Root/test/ds/web_sales` as web_sales\n cross join `/Root/test/ds/date_dim` as date_dim\n where d_year = 2000\n and d_moy = 2\n and ws_sold_date_sk = d_date_sk\n and ws_item_sk in (select item_sk from $frequent_ss_items)\n and ws_bill_customer_sk in (select c_customer_sk from $best_ss_customer)) y\n limit 100;\n\nselect c_last_name,c_first_name,sales\n from (select customer.c_last_name c_last_name,customer.c_first_name c_first_name,sum(cs_quantity*cs_list_price) sales\n from `/Root/test/ds/catalog_sales` as catalog_sales\n cross join `/Root/test/ds/customer` as customer\n cross join `/Root/test/ds/date_dim` as date_dim\n where d_year = 2000\n and d_moy = 2\n and cs_sold_date_sk = d_date_sk\n and cs_item_sk in (select item_sk from $frequent_ss_items)\n and cs_bill_customer_sk in (select c_customer_sk from $best_ss_customer)\n and cs_bill_customer_sk = c_customer_sk\n group by customer.c_last_name,customer.c_first_name\n union all\n select customer.c_last_name c_last_name,customer.c_first_name c_first_name,sum(ws_quantity*ws_list_price) sales\n from `/Root/test/ds/web_sales` as web_sales\n cross join `/Root/test/ds/customer` as customer\n cross join `/Root/test/ds/date_dim` as date_dim\n where d_year = 2000\n and d_moy = 2\n and ws_sold_date_sk = d_date_sk\n and ws_item_sk in (select item_sk from $frequent_ss_items)\n and ws_bill_customer_sk in (select c_customer_sk from $best_ss_customer)\n and ws_bill_customer_sk = c_customer_sk\n group by customer.c_last_name,customer.c_first_name) y\n order by c_last_name,c_first_name,sales\n limit 100;\n\n-- end query 1 in stream 0 using template query23.tpl\n", parameters: 0b 2025-06-25T14:55:25.749673Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyksbjqh36tzn6phtc7zcmb7", SessionId: ydb://session/3?node_id=1&id=ZjMyYmE1MTItMWRmMTJiZjItYjk2NmIwNTktZTE0ODNhNGE=, Slow query, duration: 24.387589s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "pragma warning(\"disable\", \"4527\");\n\n$z0 = 0;\n$z1_2 = 1.2;\n$z1_3 = 1.3;\n$z0_9 = 0.9;\n$z0_99 = 0.99;\n$z1_49 = 1.49;\n\n$z0_35 = 0;\n$z0_1_35 = 0.1;\n$z1_2_35 = 1.2;\n$z0_05_35 = 0.05;\n$z0_9_35 = 0.9;\n$z1_1_35 = 1.1;\n$z0_5_35 = 0.5;\n$z100_35 = 100.;\n$z0_0001_35 = 0.0001;\n$z7_35 = 7.;\n\n$z0_12 = 0.;\n$z1_12 = 1;\n$z0_0100001_12 = 0.0100001;\n$z0_06_12 = 0.06;\n$z0_2_12 = 0.2;\n\n$scale_factor = 1;\n\n$round = ($x, $y) -> { return Math::Round($x, $y); };\n$upscale = ($x) -> { return $x; };\n\n$todecimal = ($x, $p, $s) -> { return cast($x as double); };\n\n\n\n-- NB: Subquerys\n$ssales =\n(select customer.c_last_name c_last_name\n ,customer.c_first_name c_first_name\n ,store.s_store_name s_store_name\n ,customer_address.ca_state ca_state\n ,store.s_state s_state\n ,item.i_color i_color\n ,item.i_current_price i_current_price\n ,item.i_manager_id i_manager_id\n ,item.i_units i_units\n ,item.i_size i_size\n ,sum(ss_net_paid) netpaid\nfrom `/Root/test/ds/store_sales` as store_sales\n cross join `/Root/test/ds/store_returns` as store_returns\n cross join `/Root/test/ds/store` as store\n cross join `/Root/test/ds/item` as item\n cross join `/Root/test/ds/customer` as customer\n cross join `/Root/test/ds/customer_address` as customer_address\nwhere ss_ticket_number = sr_ticket_number\n and ss_item_sk = sr_item_sk\n and ss_customer_sk = c_customer_sk\n and ss_item_sk = i_item_sk\n and ss_store_sk = s_store_sk\n and c_current_addr_sk = ca_address_sk\n and c_birth_country <> Unicode::ToUpper(Cast(ca_country as Utf8))\n and s_zip = ca_zip\nand s_market_id=8\ngroup by customer.c_last_name\n ,customer.c_first_name\n ,store.s_store_name\n ,customer_address.ca_state\n ,store.s_state\n ,item.i_color\n ,item.i_current_price\n ,item.i_manager_id\n ,item.i_units\n ,item.i_size);\n\n$avg_netpaid = (select avg(netpaid) from $ssales);\n\n-- start query 1 in stream 0 using template query24.tpl and seed 1220860970\nselect c_last_name\n ,c_first_name\n ,s_store_name\n ,sum(netpaid) paid\nfrom $ssales\nwhere i_color = 'peach'\ngroup by c_last_name\n ,c_first_name\n ,s_store_name\nhaving sum(netpaid) > $z0_05_35*$avg_netpaid\norder by c_last_name\n ,c_first_name\n ,s_store_name\n;\n\nselect c_last_name\n ,c_first_name\n ,s_store_name\n ,sum(netpaid) paid\nfrom $ssales\nwhere i_color = 'saddle'\ngroup by c_last_name\n ,c_first_name\n ,s_store_name\nhaving sum(netpaid) > $z0_05_35*$avg_netpaid\norder by c_last_name\n ,c_first_name\n ,s_store_name\n;\n\n-- end query 1 in stream 0 using template query24.tpl\n", parameters: 0b Traceback (most recent call last): File "library/python/testing/yatest_common/yatest/common/process.py", line 384, in wait wait_for( File "library/python/testing/yatest_common/yatest/common/process.py", line 765, in wait_for raise TimeoutError(truncate(message, MAX_MESSAGE_LEN)) yatest.common.process.TimeoutError: 600 second(s) wait timeout has expired: Command '['/home/runner/.ya/tools/v4/9029509511/test_tool', 'run_ut', '@/home/runner/.ya/build/build_root/yft8/000e85/ydb/core/kqp/ut/join/test-results/unittest/testing_out_stuff/chunk173/testing_out_stuff/test_tool.args']' stopped by 600 seconds timeout During handling of the above exception, another exception occurred: Traceback (most recent call last): File "devtools/ya/test/programs/test_tool/run_test/run_test.py", line 1738, in main res.wait(check_exit_code=False, timeout=run_timeout, on_timeout=timeout_callback) File "library/python/testing/yatest_common/yatest/common/process.py", line 398, in wait raise ExecutionTimeoutError(self, str(e)) yatest.common.process.ExecutionTimeoutError: (("600 second(s) wait timeout has expired: Command '['/home/runner/.ya/tools/v4/9029509511/test_tool', 'run_ut', '@/home/runner/.ya/build/build_root/yft8/000e85/ydb/core/kqp/ut/join/test-results/unittest/testing_out_stuff/chunk173/testing_out_stuff/test_tool.args']' stopped by 600 seconds timeout",), {}) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/service/unittest >> KqpQueryService::ShowCreateViewOnTable [GOOD] Test command err: Trying to start YDB, gRPC: 20987, MsgBus: 29971 2025-06-25T14:55:24.940481Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900719643133820:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:24.940651Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00182e/r3tmp/tmp3SN6IU/pdisk_1.dat 2025-06-25T14:55:25.260393Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519900719643133802:2080] 1750863324939791 != 1750863324939794 2025-06-25T14:55:25.276836Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 20987, node 1 2025-06-25T14:55:25.350221Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:55:25.350238Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:55:25.350243Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:55:25.350328Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:55:25.356506Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:55:25.356608Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:55:25.361379Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:29971 TClient is connected to server localhost:29971 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:55:25.831661Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:55:25.865334Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:25.949529Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:55:26.046493Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:26.200915Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:26.280056Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:27.852951Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900732528037326:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:27.853062Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:28.210463Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:28.285594Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:28.338523Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:28.371764Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:28.400191Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:28.430354Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:28.466569Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:28.557841Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900736823005290:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:28.557937Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:28.558181Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900736823005295:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:28.561937Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:55:28.573521Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710669, at schemeshard: 72057594046644480 2025-06-25T14:55:28.573996Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519900736823005297:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:55:28.637788Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519900736823005348:3418] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:55:29.940942Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519900719643133820:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:29.941034Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 13304, MsgBus: 9172 2025-06-25T14:55:30.757166Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519900745824379593:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:30.757218Z node 2 :METADATA_PROVIDER ERROR: log.cpp ... subscription [3:7519900769774733511:2080] 1750863336417622 != 1750863336417625 TServer::EnableGrpc on GrpcPort 9837, node 3 2025-06-25T14:55:36.592297Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:55:36.592425Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:55:36.594147Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:55:36.637166Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:55:36.637200Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:55:36.637209Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:55:36.637327Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:20960 TClient is connected to server localhost:20960 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:55:37.147087Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:55:37.153571Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:55:37.162430Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:37.249129Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:37.414708Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:37.460539Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... 2025-06-25T14:55:37.489597Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:39.659741Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519900782659637028:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:39.659825Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:39.720700Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:39.790714Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:39.824548Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:39.864302Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:39.904403Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:39.943343Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:40.020498Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:40.121755Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519900786954604992:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:40.121870Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:40.122250Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519900786954604997:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:40.126662Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:55:40.145293Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519900786954604999:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:55:40.205389Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519900786954605050:3419] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:55:41.419481Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519900769774733531:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:41.419540Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:55:41.508908Z node 3 :SYSTEM_VIEWS ERROR: scan_actor_base_impl.h:98: Scan error, actor: [3:7519900791249572637:2484], owner: [3:7519900791249572634:2482], scan id: 0, sys view info: Type: EShowCreate SourceObject { OwnerId: 1 LocalId: 0 }, error: Path type mismatch, expected: View, found: Table 2025-06-25T14:55:41.509832Z node 3 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [3:7519900791249572635:2483], TxId: 281474976715672, task: 2. Ctx: { SessionId : ydb://session/3?node_id=3&id=ODU4Y2JlMGQtNWIzMDk0M2YtYmU2YTZiYWMtOWEwYmExODA=. CustomerSuppliedId : . TraceId : 01jykscsqn8aw651fd5xhphs67. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Handle abort execution event from: [3:7519900791249572631:2473], status: BAD_REQUEST, reason: {
: Error: Terminate execution } 2025-06-25T14:55:41.510340Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=3&id=ODU4Y2JlMGQtNWIzMDk0M2YtYmU2YTZiYWMtOWEwYmExODA=, ActorId: [3:7519900791249572615:2473], ActorState: ExecuteState, TraceId: 01jykscsqn8aw651fd5xhphs67, Create QueryResponse for error on request, msg: >> KqpService::SwitchCache-UseCache >> TPersQueueTest::Delete [GOOD] >> TPersQueueTest::DisableWrongSettings >> KqpQueryService::TableSink_Olap_Replace [GOOD] >> KqpQueryService::TableSink_OlapUpsert >> KqpDocumentApi::Scripting [GOOD] >> KqpQueryService::AlterTable_DropNotNull_Valid ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/service/unittest >> KqpQueryServiceScripts::ExecuteScriptStatsNone [GOOD] Test command err: Trying to start YDB, gRPC: 2526, MsgBus: 11694 2025-06-25T14:55:21.064669Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900705597497774:2135];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:21.068254Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00183a/r3tmp/tmpGrJpMg/pdisk_1.dat 2025-06-25T14:55:21.436265Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 2526, node 1 2025-06-25T14:55:21.490270Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:55:21.490412Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:55:21.508211Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:55:21.572769Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:55:21.572803Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:55:21.572815Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:55:21.572927Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11694 TClient is connected to server localhost:11694 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-25T14:55:22.072898Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:55:22.124413Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:55:22.151531Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:22.295154Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:22.464694Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:22.548119Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:24.157210Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900718482401195:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:24.157325Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:24.417101Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:24.454677Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:24.479630Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:24.504208Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:24.571564Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:24.638551Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:24.666522Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:24.752485Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900718482401865:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:24.752603Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:24.752783Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900718482401870:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:24.755993Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:55:24.767887Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519900718482401872:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:55:24.840916Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519900718482401923:3422] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:55:25.789630Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:25.791001Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:25.792097Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/ ... -06-25T14:55:35.547106Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:55:35.547190Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 11119, node 3 2025-06-25T14:55:35.549435Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:55:35.611262Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:55:35.611288Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:55:35.611297Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:55:35.611417Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16804 TClient is connected to server localhost:16804 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:55:36.160096Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:55:36.167070Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:55:36.184700Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:36.246078Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:36.379191Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:55:36.404076Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:36.479724Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:38.637440Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519900777744359323:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:38.637523Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:38.712497Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:38.757332Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:38.813753Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:38.848849Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:38.893515Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:38.923953Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:38.992909Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:39.058519Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519900782039327276:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:39.058609Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:39.058895Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519900782039327281:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:39.062627Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:55:39.074158Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519900782039327283:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:55:39.162050Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519900782039327334:3412] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:55:40.254105Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:40.255630Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:40.261042Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:40.360478Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519900764859455817:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:40.360553Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> KqpQueryService::TableSink_OltpUpsert >> KqpQueryService::TempTablesDrop >> BasicUsage::WaitEventBlocksBeforeDiscovery [GOOD] >> BasicUsage::SimpleHandlers >> KqpQueryService::TableSink_OlapUpdate >> KqpQueryServiceScripts::ForgetScriptExecutionRace [GOOD] >> KqpQueryServiceScripts::InvalidFetchToken >> KqpQueryService::AlterTempTable [GOOD] >> KqpQueryService::CTASWithoutPerStatement >> KqpQueryService::MixedReadQueryWithoutStreamLookup [GOOD] >> KqpService::RangeCache-UseCache [GOOD] >> KqpQueryService::ExecuteQueryInteractiveTxCommitWithQuery [GOOD] >> KqpQueryService::ExecuteQueryMultiResult >> KqpQueryService::AlterTable_DropNotNull_Valid [GOOD] >> KqpQueryService::AlterCdcTopic >> KqpQueryServiceScripts::ExplainScript [GOOD] >> KqpQueryServiceScripts::ForgetScriptExecution ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/service/unittest >> KqpQueryService::MixedReadQueryWithoutStreamLookup [GOOD] Test command err: Trying to start YDB, gRPC: 28674, MsgBus: 19290 2025-06-25T14:55:25.324600Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900724525620629:2143];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:25.339199Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00182d/r3tmp/tmpVUgRtm/pdisk_1.dat 2025-06-25T14:55:25.691292Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 28674, node 1 2025-06-25T14:55:25.751572Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:55:25.751681Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:55:25.765102Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:55:25.790128Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:55:25.790154Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:55:25.790163Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:55:25.790278Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19290 TClient is connected to server localhost:19290 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-25T14:55:26.309862Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:55:26.363229Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:55:26.402605Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:26.551839Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:26.710998Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:26.789439Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:28.450290Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900737410524038:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:28.450369Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:28.778354Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:28.810193Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:28.834649Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:28.864349Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:28.945756Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:28.991212Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:29.037839Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:29.135661Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900741705491996:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:29.135712Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:29.135877Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900741705492001:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:29.138600Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:55:29.154357Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519900741705492003:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:55:29.216023Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519900741705492054:3419] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:55:30.242511Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:30.292661Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:30.305202Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519900724525620629:2143];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:30.305305Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_bas ... ound; 2025-06-25T14:55:45.008564Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037913;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-25T14:55:45.013683Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037905;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-25T14:55:45.013683Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037913;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-25T14:55:45.014316Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037947;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-25T14:55:45.014317Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037927;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-25T14:55:45.019246Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037947;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-25T14:55:45.019259Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037927;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-25T14:55:45.019890Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037931;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-25T14:55:45.019905Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037929;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-25T14:55:45.025248Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037931;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-25T14:55:45.025247Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037929;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-25T14:55:45.025876Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037921;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-25T14:55:45.025879Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037933;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-25T14:55:45.031047Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037933;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-25T14:55:45.031149Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037921;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-25T14:55:45.031677Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037935;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-25T14:55:45.031704Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037952;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-25T14:55:45.037084Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037952;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-25T14:55:45.037084Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037935;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-25T14:55:45.037705Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037915;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-25T14:55:45.037724Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037937;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-25T14:55:45.043297Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037915;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-25T14:55:45.043320Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037937;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-25T14:55:45.043908Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037919;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-25T14:55:45.043917Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037949;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-25T14:55:45.048524Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037949;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-25T14:55:45.049097Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037941;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-25T14:55:45.049120Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037919;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-25T14:55:45.049709Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037923;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-25T14:55:45.053685Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037941;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-25T14:55:45.054119Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037951;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-25T14:55:45.054643Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037923;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-25T14:55:45.055165Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-25T14:55:45.058791Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037951;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-25T14:55:45.059398Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-25T14:55:45.060348Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037893;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-25T14:55:45.060918Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037943;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-25T14:55:45.064528Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037897;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-25T14:55:45.064959Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037943;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-25T14:55:45.372667Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037903;tx_state=TTxProgressTx::Execute;tx_current=281474976715664;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; 2025-06-25T14:55:45.372755Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037907;tx_state=TTxProgressTx::Execute;tx_current=281474976715664;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; 2025-06-25T14:55:45.372959Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[3:7519900799611067481:2373];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037899;local_tx_no=13;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037907,72075186224037947;receive=72075186224037903; 2025-06-25T14:55:45.372998Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[3:7519900799611067481:2373];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037899;local_tx_no=14;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037907,72075186224037947;receive=72075186224037903; 2025-06-25T14:55:45.373077Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[3:7519900799611067481:2373];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037899;local_tx_no=16;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037947;receive=72075186224037907; 2025-06-25T14:55:45.373109Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[3:7519900799611067481:2373];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037899;local_tx_no=17;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037947;receive=72075186224037907; 2025-06-25T14:55:45.373501Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037947;tx_state=TTxProgressTx::Execute;tx_current=281474976715664;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; 2025-06-25T14:55:45.374130Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037899;tx_state=TTxProgressTx::Execute;tx_current=281474976715664;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; >> KqpQueryService::TableSink_OltpUpsert [GOOD] >> KqpQueryService::TableSink_OltpUpdate >> KqpQueryService::TableSink_OlapUpsert [GOOD] >> KqpQueryService::TableSink_OltpDelete >> KqpQueryService::TempTablesDrop [GOOD] >> KqpQueryService::Tcl >> KqpQueryService::CreateOrDropTopicOverTable [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/service/unittest >> KqpService::RangeCache-UseCache [GOOD] Test command err: Trying to start YDB, gRPC: 13420, MsgBus: 15463 2025-06-25T14:55:21.369610Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900705794225464:2137];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:21.372053Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001837/r3tmp/tmpHo8Ur0/pdisk_1.dat 2025-06-25T14:55:21.719299Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519900705794225357:2080] 1750863321359429 != 1750863321359432 2025-06-25T14:55:21.722377Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 13420, node 1 2025-06-25T14:55:21.799478Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:55:21.799616Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:55:21.801125Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:55:21.816766Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:55:21.816785Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:55:21.816788Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:55:21.816869Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:15463 TClient is connected to server localhost:15463 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:55:22.368724Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:55:22.372230Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:55:22.408521Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:55:22.431480Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:22.670483Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:22.846943Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:22.926588Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:24.380968Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900718679128870:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:24.381085Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:24.758792Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:24.782435Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:24.813941Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:24.839878Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:24.877333Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:24.914144Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:24.987387Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:25.055199Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900722974096824:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:25.055295Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:25.055693Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900722974096829:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:25.059333Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:55:25.070408Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519900722974096831:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:55:25.161513Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519900722974096882:3417] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:55:26.208706Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2246: SessionId: ydb://session/3?node_id=1&id=OWZmMGZhY2QtYmY2ZjZkYzUtMTA3OWE4YzEtYzIzZWIyMjA=, ActorId: [1:7519900727269064446:2473], ActorState: ExecuteState, TraceId: 01jykscazt8dw0kc4z6kh6hsq4, Reply query error, msg: Pending previous query completion proxyRequestId: 7 2025-06-25T14:55:26.209407Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2246: SessionId: ydb://session/3?node_id=1&id=OWZmMGZhY2QtYmY2ZjZkYzUtMTA3OWE4YzEtYzIzZWIyMjA=, ActorId: [1:7519900727269064446:2473], ActorState: ExecuteState, TraceId: 01jykscazt8dw0kc4z6kh6hsq4, Reply query error, msg: Pending previous query completion proxyRequestId: 8 2025-06-25T14:55:26.217965Z node 1 :KQP_SESSION WARN: kqp_s ... roxyRequestId: 64 2025-06-25T14:55:32.928865Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2246: SessionId: ydb://session/3?node_id=2&id=YWQzZTZhNzQtYjhiMDBiNzQtMTdmZDI1OGEtN2YzOTkxY2M=, ActorId: [2:7519900750990569201:2578], ActorState: ExecuteState, TraceId: 01jykschhyfwhgcmrsm5eyr2xq, Reply query error, msg: Pending previous query completion proxyRequestId: 67 Trying to start YDB, gRPC: 1909, MsgBus: 17934 2025-06-25T14:55:34.118403Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519900760062592365:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:34.118478Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001837/r3tmp/tmpGLEgOu/pdisk_1.dat 2025-06-25T14:55:34.272949Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:55:34.273027Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:55:34.276073Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:55:34.283729Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 1909, node 3 2025-06-25T14:55:34.354994Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:55:34.355017Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:55:34.355024Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:55:34.355143Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17934 TClient is connected to server localhost:17934 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-25T14:55:34.995639Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:55:35.004784Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:55:35.017907Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:35.083702Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:35.199025Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:55:35.275537Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:35.357686Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:37.723978Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519900772947495840:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:37.724050Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:37.775918Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:37.810092Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:37.845062Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:37.876467Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:37.907273Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:37.951146Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:37.999371Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:38.111531Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519900777242463794:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:38.111627Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:38.111925Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519900777242463799:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:38.115964Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:55:38.128754Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519900777242463801:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:55:38.199024Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519900777242463852:3412] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:55:39.119718Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519900760062592365:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:39.119795Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; took: 8.179169s took: 8.221127s took: 8.215027s took: 8.243138s took: 8.223562s took: 8.204889s took: 8.206523s took: 8.258892s took: 8.261066s took: 8.241644s >> KqpQueryServiceScripts::ExecuteScript [GOOD] >> KqpQueryServiceScripts::ExecuteMultiScript >> KqpNamedExpressions::NamedExpressionRandomDataQuery-UseSink [GOOD] >> KqpNamedExpressions::NamedExpressionRandomInsertDataQuery+UseSink >> KqpQueryServiceScripts::EmptyNextFetchToken [GOOD] >> KqpQueryService::CTASWithoutPerStatement [GOOD] >> KqpQueryService::CheckIsolationLevelFroPerStatementMode ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/service/unittest >> KqpQueryService::CreateOrDropTopicOverTable [GOOD] Test command err: Trying to start YDB, gRPC: 19958, MsgBus: 25429 2025-06-25T14:55:26.969652Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900728669219592:2134];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:26.972537Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001821/r3tmp/tmprr4Jb0/pdisk_1.dat 2025-06-25T14:55:27.325858Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:55:27.328493Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519900728669219495:2080] 1750863326963230 != 1750863326963233 TServer::EnableGrpc on GrpcPort 19958, node 1 2025-06-25T14:55:27.388012Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:55:27.388120Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:55:27.392288Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:55:27.416847Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:55:27.416871Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:55:27.416876Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:55:27.417007Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25429 TClient is connected to server localhost:25429 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-25T14:55:27.972413Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:55:28.104916Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:55:28.140506Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:55:28.149955Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:55:30.074057Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900745849089342:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:30.074128Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900745849089333:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:30.074263Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:30.078269Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:55:30.087548Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519900745849089347:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-25T14:55:30.143237Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519900745849089398:2340] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:55:30.436331Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715661, at schemeshard: 72057594046644480 2025-06-25T14:55:30.444899Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715662, at schemeshard: 72057594046644480 2025-06-25T14:55:30.448989Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:30.769468Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519900745849089633:2465] txid# 281474976715665, issues: { message: "Check failed: path: \'/Root/.tmp/sessions/OTBhZGZjMWMtNTM5YzhlODMtODlmNWU5ZGMtNjU2OGI5OGQ=\', error: path is temporary (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeDir, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:55:30.778828Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=1&id=OTBhZGZjMWMtNTM5YzhlODMtODlmNWU5ZGMtNjU2OGI5OGQ=, ActorId: [1:7519900745849089327:2291], ActorState: ExecuteState, TraceId: 01jykscfdre2g7mhtaqvce913b, Create QueryResponse for error on request, msg: 2025-06-25T14:55:30.814016Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519900745849089660:2328], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:17: Error: At function: KiReadTable!
:3:17: Error: Cannot find table 'db.[/Root/Temp]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:55:30.814458Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=ZTM3MDBkOWYtZDE4NDdhNS1mNjVlYjJhZS0yYjEyZDc3, ActorId: [1:7519900745849089655:2326], ActorState: ExecuteState, TraceId: 01jykscffdbe6e8g06e1qm6kac, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:55:30.822118Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpRmDir, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_rmdir.cpp:66) 2025-06-25T14:55:30.828541Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpRmDir, opId: 281474976715669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_rmdir.cpp:66) 2025-06-25T14:55:30.836212Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037888 not found 2025-06-25T14:55:31.972714Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519900728669219592:2134];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:31.972795Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 6820, MsgBus: 28304 2025-06-25T14:55:32.862333Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519900753597906291:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:32.862379Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001821/r3tmp/tmpFI0Fmu/pdisk_1.dat 2025-06-25T14:55:33.019126Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 6820, node 2 2025-06-25T14:55:33.050732Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:55:33.050820Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:55:33.052331Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:55:33.136959Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:55:33.136980Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:55:33.136986Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:55:33.137078Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28304 TClient is connected to server localhost:28304 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls ... _work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:48.664670Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519900820873004098:3648] txid# 281474976715673, issues: { message: "Check failed: path: \'/Root/TmpTable\', error: unexpected path type (id: [OwnerId: 72057594046644480, LocalPathId: 17], type: EPathTypeTable, state: EPathStateNoChanges), expected types: EPathTypePersQueueGroup" severity: 1 } 2025-06-25T14:55:48.664727Z node 4 :KQP_GATEWAY ERROR: scheme.h:178: Unexpected error on scheme request, TxId: 281474976715673, ProxyStatus: ExecError, SchemeShardReason: Check failed: path: '/Root/TmpTable', error: unexpected path type (id: [OwnerId: 72057594046644480, LocalPathId: 17], type: EPathTypeTable, state: EPathStateNoChanges), expected types: EPathTypePersQueueGroup 2025-06-25T14:55:48.664864Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=ZmMzNzk0YzgtNWI5OGMzNjktNjcyOTE4NTktZmRlNmYxNDk=, ActorId: [4:7519900820873004088:2491], ActorState: ExecuteState, TraceId: 01jyksd0x8f1kyyqffmxhjb3x1, Create QueryResponse for error on request, msg: Query failed, status: GENERIC_ERROR:
: Error: Scheme operation failed, status: ExecError, reason: Check failed: path: '/Root/TmpTable', error: unexpected path type (id: [OwnerId: 72057594046644480, LocalPathId: 17], type: EPathTypeTable, state: EPathStateNoChanges), expected types: EPathTypePersQueueGroup Scheme entry: { name: .metadata, owner: metadata@system, type: Directory, size_bytes: 0, created_at: { plan_step: 1750863347436, tx_id: 281474976715669 } } Scheme entry: { name: BatchUpload, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750863344825, tx_id: 281474976715661 } } Scheme entry: { name: EightShard, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750863344559, tx_id: 281474976715659 } } Scheme entry: { name: Join1, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750863347177, tx_id: 281474976715666 } } Scheme entry: { name: Join2, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750863347268, tx_id: 281474976715667 } } Scheme entry: { name: KeyValue, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750863346995, tx_id: 281474976715662 } } Scheme entry: { name: KeyValue2, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750863347037, tx_id: 281474976715663 } } Scheme entry: { name: KeyValueLargePartition, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750863347072, tx_id: 281474976715664 } } Scheme entry: { name: Logs, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750863344643, tx_id: 281474976715660 } } Scheme entry: { name: Test, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750863347107, tx_id: 281474976715665 } } Scheme entry: { name: TmpTable, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750863348633, tx_id: 281474976715672 } } Scheme entry: { name: TuplePrimaryDescending, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750863347338, tx_id: 281474976715668 } } Scheme entry: { name: TwoShard, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750863344398, tx_id: 281474976715658 } } Scheme entry: { name: .sys, owner: , type: Directory, size_bytes: 0, created_at: { plan_step: 0, tx_id: 0 } } 2025-06-25T14:55:48.721005Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519900820873004118:3659] txid# 281474976715675, issues: { message: "Check failed: path: \'/Root/TmpTable\', error: unexpected path type (id: [OwnerId: 72057594046644480, LocalPathId: 17], type: EPathTypeTable, state: EPathStateNoChanges), expected types: EPathTypePersQueueGroup" severity: 1 } 2025-06-25T14:55:48.721083Z node 4 :KQP_GATEWAY ERROR: scheme.h:178: Unexpected error on scheme request, TxId: 281474976715675, ProxyStatus: ExecError, SchemeShardReason: Check failed: path: '/Root/TmpTable', error: unexpected path type (id: [OwnerId: 72057594046644480, LocalPathId: 17], type: EPathTypeTable, state: EPathStateNoChanges), expected types: EPathTypePersQueueGroup 2025-06-25T14:55:48.721248Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=ZmMzNzk0YzgtNWI5OGMzNjktNjcyOTE4NTktZmRlNmYxNDk=, ActorId: [4:7519900820873004088:2491], ActorState: ExecuteState, TraceId: 01jyksd0yz58gq70v9hhyd820q, Create QueryResponse for error on request, msg: Query failed, status: GENERIC_ERROR:
: Error: Scheme operation failed, status: ExecError, reason: Check failed: path: '/Root/TmpTable', error: unexpected path type (id: [OwnerId: 72057594046644480, LocalPathId: 17], type: EPathTypeTable, state: EPathStateNoChanges), expected types: EPathTypePersQueueGroup Scheme entry: { name: .metadata, owner: metadata@system, type: Directory, size_bytes: 0, created_at: { plan_step: 1750863347436, tx_id: 281474976715669 } } Scheme entry: { name: BatchUpload, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750863344825, tx_id: 281474976715661 } } Scheme entry: { name: EightShard, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750863344559, tx_id: 281474976715659 } } Scheme entry: { name: Join1, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750863347177, tx_id: 281474976715666 } } Scheme entry: { name: Join2, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750863347268, tx_id: 281474976715667 } } Scheme entry: { name: KeyValue, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750863346995, tx_id: 281474976715662 } } Scheme entry: { name: KeyValue2, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750863347037, tx_id: 281474976715663 } } Scheme entry: { name: KeyValueLargePartition, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750863347072, tx_id: 281474976715664 } } Scheme entry: { name: Logs, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750863344643, tx_id: 281474976715660 } } Scheme entry: { name: Test, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750863347107, tx_id: 281474976715665 } } Scheme entry: { name: TmpTable, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750863348633, tx_id: 281474976715672 } } Scheme entry: { name: TuplePrimaryDescending, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750863347338, tx_id: 281474976715668 } } Scheme entry: { name: TwoShard, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750863344398, tx_id: 281474976715658 } } Scheme entry: { name: .sys, owner: , type: Directory, size_bytes: 0, created_at: { plan_step: 0, tx_id: 0 } } 2025-06-25T14:55:48.788843Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519900820873004138:3670] txid# 281474976715677, issues: { message: "Check failed: path: \'/Root/TmpTable\', error: path is not a topic (id: [OwnerId: 72057594046644480, LocalPathId: 17], type: EPathTypeTable, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:55:48.789077Z node 4 :KQP_GATEWAY ERROR: scheme.h:178: Unexpected error on scheme request, TxId: 281474976715677, ProxyStatus: ExecError, SchemeShardReason: Check failed: path: '/Root/TmpTable', error: path is not a topic (id: [OwnerId: 72057594046644480, LocalPathId: 17], type: EPathTypeTable, state: EPathStateNoChanges) 2025-06-25T14:55:48.789233Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=ZmMzNzk0YzgtNWI5OGMzNjktNjcyOTE4NTktZmRlNmYxNDk=, ActorId: [4:7519900820873004088:2491], ActorState: ExecuteState, TraceId: 01jyksd10r18sbrbmf5xennzad, Create QueryResponse for error on request, msg: Query failed, status: GENERIC_ERROR:
: Error: Scheme operation failed, status: ExecError, reason: Check failed: path: '/Root/TmpTable', error: path is not a topic (id: [OwnerId: 72057594046644480, LocalPathId: 17], type: EPathTypeTable, state: EPathStateNoChanges) 2025-06-25T14:55:48.818297Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519900820873004153:3677] txid# 281474976715679, issues: { message: "Check failed: path: \'/Root/TmpTable\', error: path is not a topic (id: [OwnerId: 72057594046644480, LocalPathId: 17], type: EPathTypeTable, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:55:48.818391Z node 4 :KQP_GATEWAY ERROR: scheme.h:178: Unexpected error on scheme request, TxId: 281474976715679, ProxyStatus: ExecError, SchemeShardReason: Check failed: path: '/Root/TmpTable', error: path is not a topic (id: [OwnerId: 72057594046644480, LocalPathId: 17], type: EPathTypeTable, state: EPathStateNoChanges) 2025-06-25T14:55:48.818623Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=ZmMzNzk0YzgtNWI5OGMzNjktNjcyOTE4NTktZmRlNmYxNDk=, ActorId: [4:7519900820873004088:2491], ActorState: ExecuteState, TraceId: 01jyksd11x2y0vj90gc1r14n69, Create QueryResponse for error on request, msg: Query failed, status: GENERIC_ERROR:
: Error: Scheme operation failed, status: ExecError, reason: Check failed: path: '/Root/TmpTable', error: path is not a topic (id: [OwnerId: 72057594046644480, LocalPathId: 17], type: EPathTypeTable, state: EPathStateNoChanges) Scheme entry: { name: .metadata, owner: metadata@system, type: Directory, size_bytes: 0, created_at: { plan_step: 1750863347436, tx_id: 281474976715669 } } Scheme entry: { name: BatchUpload, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750863344825, tx_id: 281474976715661 } } Scheme entry: { name: EightShard, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750863344559, tx_id: 281474976715659 } } Scheme entry: { name: Join1, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750863347177, tx_id: 281474976715666 } } Scheme entry: { name: Join2, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750863347268, tx_id: 281474976715667 } } Scheme entry: { name: KeyValue, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750863346995, tx_id: 281474976715662 } } Scheme entry: { name: KeyValue2, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750863347037, tx_id: 281474976715663 } } Scheme entry: { name: KeyValueLargePartition, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750863347072, tx_id: 281474976715664 } } Scheme entry: { name: Logs, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750863344643, tx_id: 281474976715660 } } Scheme entry: { name: Test, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750863347107, tx_id: 281474976715665 } } Scheme entry: { name: TmpTable, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750863348633, tx_id: 281474976715672 } } Scheme entry: { name: TuplePrimaryDescending, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750863347338, tx_id: 281474976715668 } } Scheme entry: { name: TwoShard, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1750863344398, tx_id: 281474976715658 } } Scheme entry: { name: .sys, owner: , type: Directory, size_bytes: 0, created_at: { plan_step: 0, tx_id: 0 } } 2025-06-25T14:55:48.879605Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715681:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:171) >> KqpQueryServiceScripts::ExecuteScriptWithForgetAfter [GOOD] >> KqpQueryServiceScripts::ExecuteScriptWithResultsTtl >> KqpQueryService::TableSink_HtapInteractive-withOltpSink >> KqpQueryServiceScripts::ParseScript ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/service/unittest >> KqpQueryServiceScripts::EmptyNextFetchToken [GOOD] Test command err: Trying to start YDB, gRPC: 16181, MsgBus: 23737 2025-06-25T14:55:28.109175Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900737662653067:2109];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:28.115609Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00181f/r3tmp/tmpDpoagS/pdisk_1.dat 2025-06-25T14:55:28.567086Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:55:28.567182Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:55:28.570455Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:55:28.608417Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519900737662652984:2080] 1750863328080766 != 1750863328080769 2025-06-25T14:55:28.609135Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 16181, node 1 2025-06-25T14:55:28.793009Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:55:28.793041Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:55:28.793056Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:55:28.793219Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23737 2025-06-25T14:55:29.120428Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:23737 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:55:29.463954Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:55:29.505369Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:29.636503Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:29.788108Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:29.868336Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:31.435026Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900750547556507:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:31.435143Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:31.749740Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:31.786760Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:31.819781Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:31.868763Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:31.906839Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:31.946277Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:31.989341Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:32.088387Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900754842524459:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:32.088504Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:32.088793Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900754842524464:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:32.093715Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:55:32.104712Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519900754842524466:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:55:32.169972Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519900754842524519:3419] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:55:33.104388Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519900737662653067:2109];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:33.104459Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 13884, MsgBus: 61654 2025-06-25T14:55:34.121320Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519900761892643479:2151];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:34.132289Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPa ... HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:55:42.263277Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 16072, node 3 2025-06-25T14:55:42.376831Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:55:42.376857Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:55:42.376865Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:55:42.376983Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:22632 TClient is connected to server localhost:22632 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:55:43.038564Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:55:43.048391Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:55:43.056574Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:43.122604Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:55:43.134885Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:55:43.285068Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:43.355897Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:45.524429Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519900810279598163:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:45.524516Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:45.573518Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:45.616158Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:45.650771Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:45.681847Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:45.756191Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:45.802189Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:45.875535Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:45.943381Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519900810279598828:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:45.943463Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:45.943621Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519900810279598833:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:45.947112Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:55:45.967725Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519900810279598835:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:55:46.030797Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519900814574566182:3414] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:55:47.110260Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:47.111870Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:47.112832Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:47.114153Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519900797394694674:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:47.114205Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:55:49.608907Z node 3 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863349641, txId: 281474976715703] shutting down >> KqpQueryService::FlowControllOnHugeLiteralAsTable >> KqpNamedExpressions::NamedExpressionRandomUpsertReturning+UseSink-UseDataQuery [GOOD] >> KqpNamedExpressions::NamedExpressionRandomUpsertReturning-UseSink+UseDataQuery >> KqpQueryService::TableSink_OlapUpdate [GOOD] >> KqpQueryService::TableSink_OlapOrder >> KqpService::CloseSessionsWithLoad >> KqpQueryServiceScripts::InvalidFetchToken [GOOD] >> KqpQueryServiceScripts::TestTruncatedByRows [GOOD] >> KqpQueryServiceScripts::TestTruncatedBySize >> KqpService::SwitchCache+UseCache [GOOD] >> KqpQueryService::TableSink_OltpUpdate [GOOD] >> KqpQueryService::TableSink_Oltp_Replace+UseSink >> KqpQueryService::Tcl [GOOD] >> KqpQueryService::TableSink_ReplaceFromSelectOlap >> KqpQueryService::TableSink_OltpDelete [GOOD] >> KqpQueryService::ExecuteQueryUpsertDoesntChangeIndexedValuesIfNotChanged [GOOD] >> KqpQueryService::ExecuteQueryPure >> KqpQueryService::ExecuteQueryMultiResult [GOOD] >> KqpQueryServiceScripts::ExecuteScriptWithTimeout [GOOD] >> KqpQueryServiceScripts::ExecuteScriptWithResultsTtlAndForgetAfter ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/service/unittest >> KqpQueryServiceScripts::InvalidFetchToken [GOOD] Test command err: Trying to start YDB, gRPC: 16627, MsgBus: 4588 2025-06-25T14:55:29.208799Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900740050656767:2167];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:29.208887Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001817/r3tmp/tmpv2Mzaw/pdisk_1.dat 2025-06-25T14:55:29.732815Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:55:29.732927Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:55:29.735489Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:55:29.764649Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 16627, node 1 2025-06-25T14:55:29.956587Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:55:29.956614Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:55:29.956628Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:55:29.956732Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4588 2025-06-25T14:55:30.202844Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:4588 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:55:30.590600Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:55:30.610798Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:55:30.630085Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:30.768803Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:30.905866Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:30.991565Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:32.518562Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900752935560155:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:32.518661Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:32.802179Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:32.833783Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:32.862568Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:32.907548Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:32.978610Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:33.022143Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:33.061991Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:33.174176Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900757230528109:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:33.174273Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:33.174788Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900757230528114:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:33.179341Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:55:33.193269Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519900757230528116:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:55:33.276456Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519900757230528169:3424] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:55:34.197742Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:34.199041Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:34.200030Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propo ... ubscription [3:7519900813535400771:2080] 1750863346366095 != 1750863346366098 2025-06-25T14:55:46.519663Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:55:46.521565Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:55:46.521649Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 8468, node 3 2025-06-25T14:55:46.526176Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:55:46.600750Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:55:46.600772Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:55:46.600783Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:55:46.600882Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:63268 TClient is connected to server localhost:63268 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:55:47.146765Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-25T14:55:47.165687Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:47.251951Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:47.371938Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... 2025-06-25T14:55:47.414712Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:47.524289Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:49.937477Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519900826420304280:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:49.937548Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:50.001030Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:50.042545Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:50.075493Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:50.114163Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:50.146119Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:50.194520Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:50.267268Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:50.338251Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519900830715272238:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:50.338332Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:50.338786Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519900830715272243:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:50.343174Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:55:50.354842Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519900830715272245:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:55:50.449196Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519900830715272296:3417] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:55:51.368105Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519900813535400787:2057];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:51.368201Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:55:51.532299Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:51.535788Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:51.538380Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) >> KqpQueryService::AlterCdcTopic [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/service/unittest >> KqpService::SwitchCache+UseCache [GOOD] Test command err: Trying to start YDB, gRPC: 16616, MsgBus: 14733 2025-06-25T14:55:12.729077Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900668067439378:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:12.736118Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001859/r3tmp/tmp1sNw42/pdisk_1.dat 2025-06-25T14:55:13.248760Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519900668067439347:2080] 1750863312709275 != 1750863312709278 2025-06-25T14:55:13.251642Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:55:13.253241Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:55:13.253748Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:55:13.264930Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 16616, node 1 2025-06-25T14:55:13.556818Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:55:13.556833Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:55:13.556838Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:55:13.556921Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:55:13.718891Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:14733 TClient is connected to server localhost:14733 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:55:14.346215Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:55:14.372160Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:55:14.388449Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:14.577608Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:14.719197Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:14.811752Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:15.798508Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900680952342883:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:15.798626Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:16.350182Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:16.375263Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:16.399755Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:16.427252Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:16.460016Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:16.525453Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:16.601089Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:16.691798Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900685247310847:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:16.691897Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:16.691956Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900685247310852:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:16.695959Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:55:16.711501Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519900685247310854:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:55:16.811406Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519900685247310909:3430] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:55:17.729299Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519900668067439378:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:17.729359Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:55:18.854034Z node 1 :KQP_SESSION ERROR: kqp_session_actor.cpp:2865: SessionId: ydb://session/3?node_id=1&id=ZTM4MzIxY2UtNWI0ODg4OTEtODg1MjY0MzktNmQ2YmI3ZDc=, ActorId: [1:7519900689542278530:2529], ActorState: ReadyState, Internal error, message: TKqpSessionActor in state ReadyState received unexpected event NKikimr::N ... ered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001859/r3tmp/tmprDti7x/pdisk_1.dat 2025-06-25T14:55:27.507874Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:55:27.520378Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519900731239872332:2080] 1750863327393649 != 1750863327393652 TServer::EnableGrpc on GrpcPort 15162, node 3 2025-06-25T14:55:27.540913Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:55:27.541027Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:55:27.551634Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:55:27.579978Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:55:27.580000Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:55:27.580007Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:55:27.580122Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13951 TClient is connected to server localhost:13951 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:55:28.095347Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:55:28.108558Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:55:28.127136Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:28.199708Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:28.355262Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:28.409106Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:55:28.437649Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:30.737423Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519900744124775849:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:30.737516Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:30.814685Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:30.847197Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:30.878823Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:30.908528Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:30.943738Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:31.021220Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:31.096519Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:31.166891Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519900748419743811:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:31.166977Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:31.167080Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519900748419743816:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:31.170775Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:55:31.215081Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519900748419743818:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:55:31.313777Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519900748419743869:3419] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:55:32.396412Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519900731239872354:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:32.396505Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:55:32.589253Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:42.489656Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7382: Cannot get console configs 2025-06-25T14:55:42.489682Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded took: 20.675139s took: 20.675676s took: 20.677174s took: 20.678640s took: 20.680721s took: 20.685342s took: 20.684872s took: 20.684602s took: 20.693259s took: 20.706485s ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/service/unittest >> KqpQueryService::TableSink_OltpDelete [GOOD] Test command err: Trying to start YDB, gRPC: 23426, MsgBus: 2917 2025-06-25T14:55:38.723962Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900776684030781:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:38.729388Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017fc/r3tmp/tmptPw5or/pdisk_1.dat 2025-06-25T14:55:39.062729Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 23426, node 1 2025-06-25T14:55:39.154639Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:55:39.154673Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:55:39.154685Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:55:39.154778Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:55:39.161008Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:55:39.161122Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:55:39.162321Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:2917 TClient is connected to server localhost:2917 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-25T14:55:39.729713Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:55:39.827186Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:55:39.850822Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:55:41.613281Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900789568933259:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:41.613396Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:41.901728Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T14:55:42.075713Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519900789568933370:2296];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:55:42.075958Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519900789568933370:2296];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:55:42.076217Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519900789568933370:2296];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:55:42.076365Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519900789568933370:2296];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:55:42.076477Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519900789568933370:2296];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:55:42.076596Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519900789568933370:2296];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:55:42.076697Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519900789568933370:2296];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:55:42.076783Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519900789568933370:2296];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:55:42.076862Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519900789568933370:2296];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:55:42.076990Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519900789568933370:2296];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:55:42.077093Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519900789568933370:2296];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:55:42.097588Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519900793863900733:2304];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:55:42.097644Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519900793863900733:2304];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:55:42.097824Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519900793863900733:2304];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:55:42.097911Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519900793863900733:2304];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:55:42.098001Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519900793863900733:2304];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:55:42.098085Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519900793863900733:2304];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:55:42.098159Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519900793863900733:2304];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:55:42.098234Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519900793863900733:2304];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:55:42.098340Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519900793863900733:2304];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:55:42.098447Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519900793863900733:2304];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:55:42.098525Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519900793863900733:2304];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:55:42.128647Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519900793863900803:2305];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:55:42.128706Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519900793863900803:2305];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:55:42.128911Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519900793863900803:2305];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:55:42.129001Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519900793863900803:2305];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLAS ... og.cpp:784: tablet_id=72075186224037896;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715658; 2025-06-25T14:55:47.829420Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-25T14:55:47.830173Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037894;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715658; 2025-06-25T14:55:47.831000Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-25T14:55:47.834368Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715658; 2025-06-25T14:55:47.836614Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037895;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715658; 2025-06-25T14:55:47.860711Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519900818653383361:2351], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:47.860784Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:47.864652Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519900818653383366:2354], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:47.868796Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:55:47.887179Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519900818653383368:2355], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-25T14:55:47.971085Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519900818653383419:2569] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:55:48.122465Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037890;tx_state=TTxProgressTx::Execute;tx_current=281474976715663;tx_id=281474976715663;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715663; 2025-06-25T14:55:48.122968Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037894;tx_state=TTxProgressTx::Execute;tx_current=281474976715663;tx_id=281474976715663;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715663; 2025-06-25T14:55:48.685263Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037890;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715667; 2025-06-25T14:55:48.685342Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037894;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715667; 2025-06-25T14:55:48.719367Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7519900822948351065:2504], status: PRECONDITION_FAILED, issues:
: Error: Type annotation, code: 1030
:2:29: Error: At function: KiWriteTable!
:2:29: Error: Missing key column in input: Col1 for table: /Root/DataShard, code: 2029 2025-06-25T14:55:48.719582Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=2&id=ZjQ0YzQxM2ItYzBlOGMzMmMtNWMxMWQ3MjgtNjBhYzc1YzE=, ActorId: [2:7519900822948351063:2503], ActorState: ExecuteState, TraceId: 01jyksd0ync0t1fy6ecy6kgwpq, ReplyQueryCompileError, status PRECONDITION_FAILED remove tx with tx_id: Trying to start YDB, gRPC: 1647, MsgBus: 11826 2025-06-25T14:55:49.757186Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519900827866208758:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:49.757219Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017fc/r3tmp/tmpE7VbIA/pdisk_1.dat 2025-06-25T14:55:49.934609Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:55:49.955473Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:55:49.955576Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:55:49.960657Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 1647, node 3 2025-06-25T14:55:50.016822Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:55:50.016846Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:55:50.016854Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:55:50.016972Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11826 TClient is connected to server localhost:11826 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:55:50.483853Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:55:50.491435Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:55:50.788071Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:55:53.063059Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519900845046078545:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:53.063149Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:53.088560Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:53.192368Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519900845046078648:2302], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:53.192465Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:53.196455Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519900845046078653:2305], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:53.200224Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:55:53.211938Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519900845046078655:2306], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-25T14:55:53.311137Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519900845046078706:2390] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/service/unittest >> KqpQueryService::ExecuteQueryMultiResult [GOOD] Test command err: Trying to start YDB, gRPC: 13686, MsgBus: 19017 2025-06-25T14:55:30.292079Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900742262248887:2142];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:30.298196Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001816/r3tmp/tmpNY43bv/pdisk_1.dat 2025-06-25T14:55:30.663635Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 13686, node 1 2025-06-25T14:55:30.737385Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:55:30.737750Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:55:30.739648Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:55:30.757676Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:55:30.757712Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:55:30.757744Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:55:30.757832Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19017 TClient is connected to server localhost:19017 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-25T14:55:31.293500Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:55:31.303027Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-25T14:55:31.329415Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:31.466442Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:31.636791Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:31.709814Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:33.433375Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900755147152318:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:33.433478Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:33.716036Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:33.744916Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:33.775432Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:33.815090Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:33.849917Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:33.898920Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:33.969197Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:34.039002Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900759442120271:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:34.039081Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:34.044537Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900759442120276:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:34.048600Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:55:34.058872Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519900759442120278:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:55:34.155129Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519900759442120329:3420] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:55:35.280410Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519900742262248887:2142];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:35.280479Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:55:35.578402Z node 1 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:3004: SelfId: [1:7519900763737087951:2473], SessionActorId: [1:7519900763737087893:2473], statusCode=ABORTED. Issue=
: Error: Transaction locks invalidated. Table: `/Root/TwoShard`, code: 2001 . sessionActorId=[1:7519900763737087893:2473]. isRollback=0 2025-06-25T14:55:35.578655Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:1895: SessionId: ydb://session/3?node_id=1&id=ZjgzZjdhNDQtNTY0NTkwMDctNGEyYWNkYzgtNjBhYmYyYzk=, ActorId: [1:7519900763737087893:2473], ActorState: ExecuteState, TraceId: 01jykscm4c3gk4per2kdgp880c, got TEvKqpBuffer::TEvError in ExecuteState, status: ABORTED send to: [1:7519900763737087952:2473] from: [1:751990076373 ... s.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519900793841151849:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:47.345363Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 63288, MsgBus: 16184 2025-06-25T14:55:48.737021Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519900822042080515:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:48.737076Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001816/r3tmp/tmprdwzkl/pdisk_1.dat 2025-06-25T14:55:48.872414Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:55:48.873385Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7519900822042080492:2080] 1750863348736462 != 1750863348736465 TServer::EnableGrpc on GrpcPort 63288, node 4 2025-06-25T14:55:48.894045Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:55:48.894131Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:55:48.895320Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:55:48.936873Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:55:48.936898Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:55:48.936906Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:55:48.937025Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16184 TClient is connected to server localhost:16184 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:55:49.448899Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:55:49.457398Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:55:49.469668Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:49.561866Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:49.746153Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:55:49.759955Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:49.851140Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:52.080444Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519900839221951303:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:52.080534Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:52.142062Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:52.214566Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:52.280967Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:52.323500Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:52.367676Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:52.414332Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:52.465415Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:52.597485Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519900839221951965:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:52.597586Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:52.598069Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519900839221951970:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:52.602066Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:55:52.617537Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519900839221951972:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:55:52.679360Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519900839221952023:3415] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:55:53.737562Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519900822042080515:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:53.738135Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> KqpQueryService::PeriodicTaskInSessionPoolSessionCloseByIdle [GOOD] >> KqpQueryService::ReadDatashardAndColumnshard >> KqpQueryServiceScripts::ParseScript [GOOD] >> KqpQueryServiceScripts::ListScriptExecutions ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/service/unittest >> KqpQueryService::AlterCdcTopic [GOOD] Test command err: Trying to start YDB, gRPC: 31355, MsgBus: 3965 2025-06-25T14:55:33.134667Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900756687790919:2135];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:33.135203Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001808/r3tmp/tmphv2fb3/pdisk_1.dat 2025-06-25T14:55:33.547134Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:55:33.547228Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:55:33.555870Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:55:33.580390Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519900756687790822:2080] 1750863333107420 != 1750863333107423 2025-06-25T14:55:33.583566Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 31355, node 1 2025-06-25T14:55:33.642595Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:55:33.642623Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:55:33.642629Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:55:33.642759Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3965 2025-06-25T14:55:34.140587Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:3965 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:55:34.298980Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:55:34.323927Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:34.450465Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:34.620158Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:55:34.706008Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:36.314434Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900769572694356:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:36.314533Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:36.665520Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:36.697496Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:36.726705Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:36.768531Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:36.801159Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:36.829473Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:36.865531Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:36.936823Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900769572695011:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:36.936922Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:36.937212Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900769572695016:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:36.941567Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:55:36.956828Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519900769572695018:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:55:37.034987Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519900773867662365:3421] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:55:38.039130Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:38.112711Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519900756687790919:2135];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:38.112756Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:55:38.157226Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, s ... om file: (empty maybe) 2025-06-25T14:55:49.437019Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26172 TClient is connected to server localhost:26172 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:55:50.065872Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:55:50.072437Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:55:50.083784Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:55:50.154264Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:50.246554Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:55:50.336098Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:50.406257Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:52.725016Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519900838979282858:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:52.725111Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:52.788354Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:52.834475Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:52.863451Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:52.936497Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:53.013798Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:53.087446Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:53.160661Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:53.250015Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519900843274250820:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:53.250086Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:53.250260Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519900843274250825:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:53.254013Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:55:53.268627Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519900843274250827:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:55:53.347044Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519900843274250880:3418] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:55:54.169250Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519900826094379362:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:54.169323Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:55:54.487697Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:54.646902Z node 4 :CHANGE_EXCHANGE WARN: change_sender_cdc_stream.cpp:398: [CdcChangeSenderMain][72075186224037922:1][4:7519900847569218640:2500] Failed entry at 'ResolveTopic': entry# { Path: TableId: [72057594046644480:19:0] RequestType: ByTableId Operation: OpTopic RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo } 2025-06-25T14:55:54.790001Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976710674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_pq.cpp:268) 2025-06-25T14:55:54.813428Z node 4 :PQ_READ_PROXY ERROR: grpc_pq_schema.cpp:148: new Describe topic request 2025-06-25T14:55:54.850701Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519900847569218774:3774] txid# 281474976710675, issues: { message: "Cannot change partition count. Use split/merge instead" severity: 1 } 2025-06-25T14:55:54.856662Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=Nzg3ODc1MWUtMTA4YjBlNGQtMzQ5Y2YwMDktNDhlNjhlMWU=, ActorId: [4:7519900847569218706:2512], ActorState: ExecuteState, TraceId: 01jyksd6yg009gvd6j900bf5rf, Create QueryResponse for error on request, msg: Query failed, status: BAD_REQUEST:
: Error: Cannot change partition count. Use split/merge instead, code: 2017 2025-06-25T14:55:54.865683Z node 4 :PQ_READ_PROXY ERROR: grpc_pq_schema.cpp:148: new Describe topic request >> KqpQueryService::TableSink_HtapInteractive-withOltpSink [GOOD] >> KqpQueryService::TableSink_OlapInsert >> KqpQueryService::ReadManyRanges >> KqpQueryService::TableSink_HtapComplex+withOltpSink >> KqpQueryService::CheckIsolationLevelFroPerStatementMode [GOOD] >> KqpQueryService::AlterTable_DropNotNull_WithSetFamily_Valid >> KqpQueryService::DdlUser >> KqpQueryService::ShowCreateTable >> TPersQueueTest::TestWriteStat [GOOD] >> TPersQueueTest::TestWriteSessionsConflicts >> KqpQueryService::ClosedSessionRemovedWhileActiveWithQuery >> KqpQueryServiceScripts::ExecuteMultiScript [GOOD] >> KqpQueryServiceScripts::ExecuteScriptPg >> TPersQueueTest::InflightLimit [GOOD] >> KqpQueryService::FlowControllOnHugeLiteralAsTable [GOOD] >> KqpQueryService::FlowControllOnHugeRealTable+LongRow >> BasicUsage::SimpleHandlers [GOOD] |88.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/script_execution/py3test >> test_update_script_tables.py::TestUpdateScriptTablesYdb::test_recreate_tables[DROP TABLE {}-`.metadata/script_executions`] [GOOD] >> TPersQueueTest::DisableWrongSettings [GOOD] >> TPersQueueTest::DisableDeduplication >> KqpQueryService::ExecuteQueryPure [GOOD] >> KqpQueryService::ExecuteQueryScalar >> KqpQueryService::TableSink_Oltp_Replace+UseSink [GOOD] >> KqpQueryServiceScripts::ExecuteScriptWithResultsTtl [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/services/persqueue_v1/ut/unittest >> TPersQueueTest::InflightLimit [GOOD] Test command err: 2025-06-25T14:51:25.943666Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899693366488802:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:51:25.943743Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:51:25.976434Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519899692648989094:2146];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:51:25.980771Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000b62/r3tmp/tmp6SjFyb/pdisk_1.dat 2025-06-25T14:51:26.165556Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-25T14:51:26.165251Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-25T14:51:26.366053Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:51:26.366127Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:51:26.366750Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:51:26.366783Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:51:26.374000Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T14:51:26.374372Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:51:26.375138Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:51:26.375387Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 12997, node 1 2025-06-25T14:51:26.548778Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/yft8/000b62/r3tmp/yandexi7BKB7.tmp 2025-06-25T14:51:26.548811Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/yft8/000b62/r3tmp/yandexi7BKB7.tmp 2025-06-25T14:51:26.549001Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/yft8/000b62/r3tmp/yandexi7BKB7.tmp 2025-06-25T14:51:26.549157Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:51:26.597585Z INFO: TTestServer started on Port 27842 GrpcPort 12997 TClient is connected to server localhost:27842 PQClient connected to localhost:12997 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:51:26.907677Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-25T14:51:26.946413Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-25T14:51:26.952521Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:51:26.955183Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:51:26.981338Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... waiting... 2025-06-25T14:51:28.861943Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519899705533891180:2272], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:28.862018Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519899705533891165:2269], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:28.862165Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:28.867956Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976720657:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:51:28.896155Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519899705533891183:2273], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976720657 completed, doublechecking } 2025-06-25T14:51:28.954954Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519899705533891211:2131] txid# 281474976720658, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:51:29.225950Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:51:29.228708Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7519899705533891226:2277], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:51:29.229010Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=2&id=MjFhZmIxMGYtMWE2MzhkZGItZTdjYmJjYzQtM2NiNzc5NTY=, ActorId: [2:7519899705533891152:2268], ActorState: ExecuteState, TraceId: 01jyks536w1pgrcr3thv2yhxp8, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:51:29.229148Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519899706251391854:2309], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:51:29.229320Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=ZTQ2ODYxOGYtNzg0OTU2YWYtNzljOGNiZjctYzgzYmZkYjg=, ActorId: [1:7519899706251391814:2302], ActorState: ExecuteState, TraceId: 01jyks53ae40sjrbvf3pwzkt5c, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:51:29.231454Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-25T14:51:29.231515Z node 2 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-25T14:51:29.305297Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:51:29.393049Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at ... dc1 in database: Root, partition 0(assignId:1) EndOffset 4 ReadOffset 4 ReadGuid 3193e29c-2c6f3010-b319bbdc-43436b11 has messages 1 2025-06-25T14:55:51.137663Z node 29 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:1917: session cookie 3 consumer session _29_3_17909495799462276030_v1 read done: guid# 3193e29c-2c6f3010-b319bbdc-43436b11, partition# TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1), size# 82616 2025-06-25T14:55:51.137690Z node 29 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2079: session cookie 3 consumer session _29_3_17909495799462276030_v1 response to read: guid# 3193e29c-2c6f3010-b319bbdc-43436b11 2025-06-25T14:55:51.138029Z node 29 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2122: session cookie 3 consumer session _29_3_17909495799462276030_v1 Process answer. Aval parts: 0 Bytes readed: 82616 Offset: 0 from session 1 Offset: 1 from session 1 Offset: 2 from session 1 Offset: 3 from session 1 2025-06-25T14:55:51.144512Z node 29 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 3 consumer session _29_3_17909495799462276030_v1 grpc read done: success# 0, data# { } 2025-06-25T14:55:51.144533Z node 29 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 3 consumer session _29_3_17909495799462276030_v1 grpc read failed 2025-06-25T14:55:51.144557Z node 29 :PQ_READ_PROXY INFO: read_session_actor.cpp:92: session cookie 3 consumer session _29_3_17909495799462276030_v1 grpc closed 2025-06-25T14:55:51.144588Z node 29 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 3 consumer session _29_3_17909495799462276030_v1 is DEAD 2025-06-25T14:55:51.146559Z node 30 :PERSQUEUE DEBUG: pq_impl.cpp:2452: [PQ: 72075186224037892] Destroy direct read session _29_3_17909495799462276030_v1 2025-06-25T14:55:51.146626Z node 30 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037892] server disconnected, pipe [29:7519900817765925665:2525] destroyed 2025-06-25T14:55:51.146792Z node 30 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: _29_3_17909495799462276030_v1 2025-06-25T14:55:52.114142Z node 30 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72075186224037892, Partition: 0, State: StateIdle] need more data for compaction. cumulativeSize=82536, count=4, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-25T14:55:55.129588Z node 30 :PERSQUEUE DEBUG: partition_read.cpp:839: [PQ: 72075186224037892, Partition: 0, State: StateIdle] read cookie 5 Topic 'rt3.dc1--topic1' partition 0 user $without_consumer offset 0 count 4 size 99043 endOffset 4 max time lag 0ms effective offset 0 2025-06-25T14:55:55.130742Z node 30 :PERSQUEUE DEBUG: partition_read.cpp:1043: [PQ: 72075186224037892, Partition: 0, State: StateIdle] read cookie 5 added 4 blobs, size 82536 count 4 last offset 3, current partition end offset: 4 2025-06-25T14:55:55.130798Z node 30 :PERSQUEUE DEBUG: partition_read.cpp:1069: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Reading cookie 5. Send blob request. 2025-06-25T14:55:55.130887Z node 30 :PERSQUEUE DEBUG: cache_eviction.h:492: Got data from cache. Partition 0 offset 0 partno 0 count 1 parts_count 0 source 1 size 20634 accessed 3 times before, last time 2025-06-25T14:55:51.000000Z 2025-06-25T14:55:55.130920Z node 30 :PERSQUEUE DEBUG: cache_eviction.h:492: Got data from cache. Partition 0 offset 1 partno 0 count 1 parts_count 0 source 1 size 20634 accessed 2 times before, last time 2025-06-25T14:55:51.000000Z 2025-06-25T14:55:55.130943Z node 30 :PERSQUEUE DEBUG: cache_eviction.h:492: Got data from cache. Partition 0 offset 2 partno 0 count 1 parts_count 0 source 1 size 20634 accessed 2 times before, last time 2025-06-25T14:55:51.000000Z 2025-06-25T14:55:55.130962Z node 30 :PERSQUEUE DEBUG: cache_eviction.h:492: Got data from cache. Partition 0 offset 3 partno 0 count 1 parts_count 0 source 1 size 20634 accessed 2 times before, last time 2025-06-25T14:55:51.000000Z 2025-06-25T14:55:55.131012Z node 30 :PERSQUEUE DEBUG: read.h:121: Reading cookie 5. All 4 blobs are from cache. 2025-06-25T14:55:55.131098Z node 30 :PERSQUEUE DEBUG: partition_read.cpp:551: FormAnswer for 4 blobs 2025-06-25T14:55:55.131471Z node 30 :PERSQUEUE DEBUG: partition_read.cpp:476: FormAnswer processing batch offset 0 totakecount 1 count 1 size 20612 from pos 0 cbcount 1 2025-06-25T14:55:55.131614Z node 30 :PERSQUEUE DEBUG: partition_read.cpp:476: FormAnswer processing batch offset 1 totakecount 1 count 1 size 20612 from pos 0 cbcount 1 2025-06-25T14:55:55.131725Z node 30 :PERSQUEUE DEBUG: partition_read.cpp:476: FormAnswer processing batch offset 2 totakecount 1 count 1 size 20612 from pos 0 cbcount 1 2025-06-25T14:55:55.131864Z node 30 :PERSQUEUE DEBUG: partition_read.cpp:476: FormAnswer processing batch offset 3 totakecount 1 count 1 size 20612 from pos 0 cbcount 1 2025-06-25T14:55:55.132028Z node 30 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'rt3.dc1--topic1' partition: 0 messageNo: 0 requestId: cookie: 0 2025-06-25T14:55:55.132120Z node 30 :PERSQUEUE DEBUG: pq_l2_cache.cpp:192: PQ Cache (L2). Touched. Tablet '72075186224037892' partition 0 offset 0 partno 0 count 1 parts 0 suffix '63' 2025-06-25T14:55:55.132140Z node 30 :PERSQUEUE DEBUG: pq_l2_cache.cpp:192: PQ Cache (L2). Touched. Tablet '72075186224037892' partition 0 offset 1 partno 0 count 1 parts 0 suffix '63' 2025-06-25T14:55:55.133358Z node 29 :PQ_READ_PROXY DEBUG: partition_actor.cpp:652: session cookie 2 consumer session _29_2_7752321200257482112_v1 TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1) initDone 1 event { CmdReadResult { MaxOffset: 4 Result { Offset: 0 Data: "... 20570 bytes ..." SourceId: "\000source" SeqNo: 2 WriteTimestampMS: 1750863343744 CreateTimestampMS: 1750863343740 UncompressedSize: 20480 PartitionKey: "" ExplicitHash: "" } Result { Offset: 1 Data: "... 20570 bytes ..." SourceId: "\000source" SeqNo: 3 WriteTimestampMS: 1750863343768 CreateTimestampMS: 1750863343765 UncompressedSize: 20480 PartitionKey: "" ExplicitHash: "" } Result { Offset: 2 Data: "... 20570 bytes ..." SourceId: "\000source" SeqNo: 4 WriteTimestampMS: 1750863343800 CreateTimestampMS: 1750863343784 UncompressedSize: 20480 PartitionKey: "" ExplicitHash: "" } Result { Offset: 3 Data: "... 20570 bytes ..." SourceId: "\000source" SeqNo: 5 WriteTimestampMS: 1750863343824 CreateTimestampMS: 1750863343824 UncompressedSize: 20480 PartitionKey: "" ExplicitHash: "" } BlobsFromDisk: 0 BlobsFromCache: 4 SizeLag: 88 RealReadOffset: 3 WaitQuotaTimeMs: 7429 EndOffset: 4 StartOffset: 0 } Cookie: 0 } 2025-06-25T14:55:55.133783Z node 29 :PQ_READ_PROXY DEBUG: partition_actor.cpp:1266: session cookie 2 consumer session _29_2_7752321200257482112_v1 TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1) wait data in partition inited, cookie 1 from offset 4 2025-06-25T14:55:55.133853Z node 29 :PQ_READ_PROXY DEBUG: partition_actor.cpp:890: session cookie 2 consumer session _29_2_7752321200257482112_v1 after read state TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1) EndOffset 4 ReadOffset 4 ReadGuid 1a34ff9a-32a683b2-565262b5-2228539 has messages 1 2025-06-25T14:55:55.134019Z node 29 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:1917: session cookie 2 consumer session _29_2_7752321200257482112_v1 read done: guid# 1a34ff9a-32a683b2-565262b5-2228539, partition# TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1), size# 82616 2025-06-25T14:55:55.134065Z node 29 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2079: session cookie 2 consumer session _29_2_7752321200257482112_v1 response to read: guid# 1a34ff9a-32a683b2-565262b5-2228539 2025-06-25T14:55:55.134428Z node 29 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2122: session cookie 2 consumer session _29_2_7752321200257482112_v1 Process answer. Aval parts: 0 2025-06-25T14:55:55.132162Z node 30 :PERSQUEUE DEBUG: pq_l2_cache.cpp:192: PQ Cache (L2). Touched. Tablet '72075186224037892' partition 0 offset 2 partno 0 count 1 parts 0 suffix '63' 2025-06-25T14:55:55.132187Z node 30 :PERSQUEUE DEBUG: pq_l2_cache.cpp:192: PQ Cache (L2). Touched. Tablet '72075186224037892' partition 0 offset 3 partno 0 count 1 parts 0 suffix '63' Bytes readed: 82616 Offset: 0 from session 1 Offset: 1 from session 1 Offset: 2 from session 1 Offset: 3 from session 1 2025-06-25T14:55:55.148412Z node 30 :PERSQUEUE DEBUG: pq_impl.cpp:2452: [PQ: 72075186224037892] Destroy direct read session _29_2_7752321200257482112_v1 2025-06-25T14:55:55.148482Z node 30 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037892] server disconnected, pipe [29:7519900817765925663:2524] destroyed 2025-06-25T14:55:55.148548Z node 30 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: _29_2_7752321200257482112_v1 2025-06-25T14:55:55.146622Z node 29 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 2 consumer session _29_2_7752321200257482112_v1 grpc read done: success# 0, data# { } 2025-06-25T14:55:55.146646Z node 29 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 2 consumer session _29_2_7752321200257482112_v1 grpc read failed 2025-06-25T14:55:55.146672Z node 29 :PQ_READ_PROXY INFO: read_session_actor.cpp:92: session cookie 2 consumer session _29_2_7752321200257482112_v1 grpc closed 2025-06-25T14:55:55.146719Z node 29 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 2 consumer session _29_2_7752321200257482112_v1 is DEAD 2025-06-25T14:55:55.443655Z node 28 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:157: [72075186224037893][rt3.dc1--topic1] TPersQueueReadBalancer::HandleWakeup 2025-06-25T14:55:55.443766Z node 28 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:550: [72075186224037893][rt3.dc1--topic1] Send TEvPersQueue::TEvStatus TabletId: 72075186224037892 Cookie: 1 2025-06-25T14:55:55.445892Z node 28 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:688: [72075186224037893][rt3.dc1--topic1] Send TEvPeriodicTopicStats PathId: 13 Generation: 1 StatsReportRound: 1 DataSize: 0 UsedReserveSize: 0 2025-06-25T14:55:55.446521Z node 28 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1823: [72075186224037893][rt3.dc1--topic1] ProcessPendingStats. PendingUpdates size 1 2025-06-25T14:55:55.444744Z node 27 :PERSQUEUE DEBUG: partition.cpp:873: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 0 PartitionConfig{ LifetimeSeconds: 86400 LowWatermark: 1048584 SourceIdLifetimeSeconds: 86400 WriteSpeedInBytesPerSecond: 10240 BurstSize: 10240 TotalPartitions: 1 SourceIdMaxCounts: 6000000 } 2025-06-25T14:55:55.482415Z node 27 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72075186224037892, Partition: 0, State: StateIdle] need more data for compaction. cumulativeSize=82536, count=4, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-25T14:55:57.806764Z node 27 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1985: ActorId: [27:7519900859574681485:2725] TxId: 281474976710734. Ctx: { TraceId: 01jyksd98p26xmh6eqb85tc86t, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=27&id=NjhiYjk1MzctYThjZjBlZjAtNmZlZTc0OWItMjc0Zjc4YmM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. UNAVAILABLE: Failed to send EvStartKqpTasksRequest because node is unavailable: 28 2025-06-25T14:55:57.806961Z node 27 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [27:7519900859574681490:2725], TxId: 281474976710734, task: 3. Ctx: { SessionId : ydb://session/3?node_id=27&id=NjhiYjk1MzctYThjZjBlZjAtNmZlZTc0OWItMjc0Zjc4YmM=. TraceId : 01jyksd98p26xmh6eqb85tc86t. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Handle abort execution event from: [27:7519900859574681485:2725], status: UNAVAILABLE, reason: {
: Error: Terminate execution } >> KqpQueryService::TableSink_OltpOrder [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/federated_topic/ut/unittest >> BasicUsage::SimpleHandlers [GOOD] Test command err: 2025-06-25T14:54:56.031687Z :WaitEventBlocksBeforeDiscovery INFO: Random seed for debugging is 1750863296031653 2025-06-25T14:54:56.400121Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900598056082318:2079];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:54:56.400205Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:54:56.453089Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519900598936968769:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:54:56.453151Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:54:56.651133Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0012b5/r3tmp/tmpMIqFzu/pdisk_1.dat 2025-06-25T14:54:56.679184Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-25T14:54:57.007132Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:54:57.024973Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:54:57.025073Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:54:57.041479Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:54:57.041540Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:54:57.041651Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:54:57.045192Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T14:54:57.046319Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 1481, node 1 2025-06-25T14:54:57.302657Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/yft8/0012b5/r3tmp/yandexXB4Bj7.tmp 2025-06-25T14:54:57.302700Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/yft8/0012b5/r3tmp/yandexXB4Bj7.tmp 2025-06-25T14:54:57.309143Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/yft8/0012b5/r3tmp/yandexXB4Bj7.tmp 2025-06-25T14:54:57.310439Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:54:57.414928Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:54:57.476442Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:54:57.601107Z INFO: TTestServer started on Port 18000 GrpcPort 1481 TClient is connected to server localhost:18000 PQClient connected to localhost:1481 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:54:57.975472Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... waiting... waiting... 2025-06-25T14:54:59.940268Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519900611821870929:2270], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:59.940389Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:59.941280Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519900611821870941:2273], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:59.976811Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976720657:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:55:00.018528Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976720657, at schemeshard: 72057594046644480 2025-06-25T14:55:00.021053Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519900611821870943:2274], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976720657 completed, doublechecking } 2025-06-25T14:55:00.133010Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519900616116838267:2132] txid# 281474976720658, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:55:00.609366Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7519900616116838274:2278], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:55:00.610202Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519900615235952496:2304], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:55:00.611943Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=ZWQwZjJlNWItZGZjYTNmZTQtNDA3OGIyYzQtNTllNWI1Yjg=, ActorId: [1:7519900610940985128:2296], ActorState: ExecuteState, TraceId: 01jyksbhcm8h7z4qdtngdq3yg2, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:55:00.611864Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=2&id=MzI2OTk0NjUtYmZkYTU5MzktYjQ3ZGE3NjQtN2Y1ZDE2OTQ=, ActorId: [2:7519900611821870927:2269], ActorState: ExecuteState, TraceId: 01jyksbhb07z5ct7h1yzcwydsr, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:55:00.613620Z node 2 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-25T14:55:00.616510Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-25T14:55:00.745126Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:00.993480Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:01.167738Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost:1481", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", ... 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-25T14:55:58.613683Z :INFO: [/Root] MessageGroupId [src_id] SessionId [src_id|8af6b12b-788080c2-8d14ea0a-9af6219_0] Write session: close. Timeout = 0 ms 2025-06-25T14:55:58.613731Z :INFO: [/Root] MessageGroupId [src_id] SessionId [src_id|8af6b12b-788080c2-8d14ea0a-9af6219_0] Write session will now close 2025-06-25T14:55:58.613760Z :DEBUG: [/Root] MessageGroupId [src_id] SessionId [src_id|8af6b12b-788080c2-8d14ea0a-9af6219_0] Write session: aborting 2025-06-25T14:55:58.613857Z :INFO: [/Root] MessageGroupId [src_id] SessionId [src_id|8af6b12b-788080c2-8d14ea0a-9af6219_0] Write session: gracefully shut down, all writes complete 2025-06-25T14:55:58.613890Z :DEBUG: [/Root] MessageGroupId [src_id] SessionId [src_id|8af6b12b-788080c2-8d14ea0a-9af6219_0] Write session: destroy 2025-06-25T14:55:58.614964Z :INFO: [/Root] [/Root] [13c39e12-30a1ab7d-ee0e32c4-3eead5fc] Closing read session. Close timeout: 0.000000s 2025-06-25T14:55:58.615019Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): 2025-06-25T14:55:58.615060Z :INFO: [/Root] [/Root] [13c39e12-30a1ab7d-ee0e32c4-3eead5fc] Counters: { Errors: 0 CurrentSessionLifetimeMs: 683 BytesRead: 0 MessagesRead: 0 BytesReadCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-25T14:55:58.615096Z :INFO: [/Root] [/Root] [2ec8eb4f-fbb2d1da-7f4df607-cf13757c] Closing read session. Close timeout: 0.000000s 2025-06-25T14:55:58.615118Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): 2025-06-25T14:55:58.615137Z :INFO: [/Root] [/Root] [2ec8eb4f-fbb2d1da-7f4df607-cf13757c] Counters: { Errors: 0 CurrentSessionLifetimeMs: 678 BytesRead: 0 MessagesRead: 0 BytesReadCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-25T14:55:58.615154Z :INFO: [/Root] [/Root] [70c10425-a8474238-a6c94292-1d3574e1] Closing read session. Close timeout: 0.000000s 2025-06-25T14:55:58.615179Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): -:test-topic:0:1:299:0 2025-06-25T14:55:58.615202Z :INFO: [/Root] [/Root] [70c10425-a8474238-a6c94292-1d3574e1] Counters: { Errors: 0 CurrentSessionLifetimeMs: 676 BytesRead: 4936800 MessagesRead: 300 BytesReadCompressed: 4936800 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-25T14:55:58.615228Z :INFO: [/Root] [/Root] [70c10425-a8474238-a6c94292-1d3574e1] Closing read session. Close timeout: 0.000000s 2025-06-25T14:55:58.615282Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): -:test-topic:0:1:299:0 2025-06-25T14:55:58.615315Z :INFO: [/Root] [/Root] [70c10425-a8474238-a6c94292-1d3574e1] Counters: { Errors: 0 CurrentSessionLifetimeMs: 676 BytesRead: 4936800 MessagesRead: 300 BytesReadCompressed: 4936800 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-25T14:55:58.615588Z :NOTICE: [/Root] [/Root] [70c10425-a8474238-a6c94292-1d3574e1] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-06-25T14:55:58.615707Z :INFO: [/Root] [/Root] [2ec8eb4f-fbb2d1da-7f4df607-cf13757c] Closing read session. Close timeout: 0.000000s 2025-06-25T14:55:58.615724Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): 2025-06-25T14:55:58.615741Z :INFO: [/Root] [/Root] [2ec8eb4f-fbb2d1da-7f4df607-cf13757c] Counters: { Errors: 0 CurrentSessionLifetimeMs: 678 BytesRead: 0 MessagesRead: 0 BytesReadCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-25T14:55:58.615780Z :NOTICE: [/Root] [/Root] [2ec8eb4f-fbb2d1da-7f4df607-cf13757c] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-06-25T14:55:58.615813Z :INFO: [/Root] [/Root] [13c39e12-30a1ab7d-ee0e32c4-3eead5fc] Closing read session. Close timeout: 0.000000s 2025-06-25T14:55:58.615828Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): 2025-06-25T14:55:58.615843Z :INFO: [/Root] [/Root] [13c39e12-30a1ab7d-ee0e32c4-3eead5fc] Counters: { Errors: 0 CurrentSessionLifetimeMs: 684 BytesRead: 0 MessagesRead: 0 BytesReadCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-25T14:55:58.615865Z :NOTICE: [/Root] [/Root] [13c39e12-30a1ab7d-ee0e32c4-3eead5fc] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-06-25T14:55:58.620598Z node 3 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer shared/user session shared/user_3_1_8240207399671959320_v1 grpc read done: success# 0, data# { } 2025-06-25T14:55:58.620631Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 1 consumer shared/user session shared/user_3_1_8240207399671959320_v1 grpc read failed 2025-06-25T14:55:58.620660Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:92: session cookie 1 consumer shared/user session shared/user_3_1_8240207399671959320_v1 grpc closed 2025-06-25T14:55:58.620684Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 1 consumer shared/user session shared/user_3_1_8240207399671959320_v1 is DEAD 2025-06-25T14:55:58.624719Z node 4 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037893][rt3.dc1--test-topic] pipe [3:7519900861292999310:2503] disconnected; active server actors: 1 2025-06-25T14:55:58.624260Z node 3 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 2 consumer shared/user session shared/user_3_2_3469521243860978719_v1 grpc read done: success# 0, data# { } 2025-06-25T14:55:58.624281Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 2 consumer shared/user session shared/user_3_2_3469521243860978719_v1 grpc read failed 2025-06-25T14:55:58.624296Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:92: session cookie 2 consumer shared/user session shared/user_3_2_3469521243860978719_v1 grpc closed 2025-06-25T14:55:58.624324Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 2 consumer shared/user session shared/user_3_2_3469521243860978719_v1 is DEAD 2025-06-25T14:55:58.624929Z node 3 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 3 consumer shared/user session shared/user_3_3_16714140418007381502_v1 grpc read done: success# 0, data# { } 2025-06-25T14:55:58.624938Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 3 consumer shared/user session shared/user_3_3_16714140418007381502_v1 grpc read failed 2025-06-25T14:55:58.624952Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:92: session cookie 3 consumer shared/user session shared/user_3_3_16714140418007381502_v1 grpc closed 2025-06-25T14:55:58.624974Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 3 consumer shared/user session shared/user_3_3_16714140418007381502_v1 is DEAD 2025-06-25T14:55:58.625532Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 2 sessionId: src_id|8af6b12b-788080c2-8d14ea0a-9af6219_0 grpc read done: success: 0 data: 2025-06-25T14:55:58.625542Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 2 sessionId: src_id|8af6b12b-788080c2-8d14ea0a-9af6219_0 grpc read failed 2025-06-25T14:55:58.625564Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 2 sessionId: src_id|8af6b12b-788080c2-8d14ea0a-9af6219_0 grpc closed 2025-06-25T14:55:58.625576Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 2 sessionId: src_id|8af6b12b-788080c2-8d14ea0a-9af6219_0 is DEAD 2025-06-25T14:55:58.626082Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037892 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-25T14:55:58.626294Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037892] server disconnected, pipe [3:7519900865587966646:2502] destroyed 2025-06-25T14:55:58.626333Z node 3 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::DropOwner. 2025-06-25T14:55:58.627073Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2452: [PQ: 72075186224037892] Destroy direct read session shared/user_3_3_16714140418007381502_v1 2025-06-25T14:55:58.627104Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037892] server disconnected, pipe [3:7519900861292999320:2515] destroyed 2025-06-25T14:55:58.627144Z node 3 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: shared/user_3_3_16714140418007381502_v1 2025-06-25T14:55:58.624755Z node 4 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037893][rt3.dc1--test-topic] pipe [3:7519900861292999310:2503] client user disconnected session shared/user_3_1_8240207399671959320_v1 2025-06-25T14:55:58.624807Z node 4 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1183: [72075186224037893][rt3.dc1--test-topic] consumer user rebalancing was scheduled 2025-06-25T14:55:58.624858Z node 4 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1255: [72075186224037893][rt3.dc1--test-topic] consumer user balancing. Sessions=2, Families=1, UnradableFamilies=0 [], RequireBalancing=0 [] 2025-06-25T14:55:58.624885Z node 4 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1322: [72075186224037893][rt3.dc1--test-topic] consumer user start rebalancing. familyCount=1, sessionCount=2, desiredFamilyCount=0, allowPlusOne=1 2025-06-25T14:55:58.624914Z node 4 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1399: [72075186224037893][rt3.dc1--test-topic] consumer user balancing duration: 0.000033s 2025-06-25T14:55:58.625194Z node 4 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037893][rt3.dc1--test-topic] pipe [3:7519900861292999313:2504] disconnected; active server actors: 1 2025-06-25T14:55:58.625216Z node 4 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037893][rt3.dc1--test-topic] pipe [3:7519900861292999313:2504] client user disconnected session shared/user_3_2_3469521243860978719_v1 2025-06-25T14:55:58.625246Z node 4 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1183: [72075186224037893][rt3.dc1--test-topic] consumer user rebalancing was scheduled 2025-06-25T14:55:58.625289Z node 4 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1255: [72075186224037893][rt3.dc1--test-topic] consumer user balancing. Sessions=1, Families=1, UnradableFamilies=0 [], RequireBalancing=0 [] 2025-06-25T14:55:58.625312Z node 4 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1322: [72075186224037893][rt3.dc1--test-topic] consumer user start rebalancing. familyCount=1, sessionCount=1, desiredFamilyCount=1, allowPlusOne=0 2025-06-25T14:55:58.625335Z node 4 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1399: [72075186224037893][rt3.dc1--test-topic] consumer user balancing duration: 0.000028s 2025-06-25T14:55:58.625693Z node 4 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037893][rt3.dc1--test-topic] pipe [3:7519900861292999302:2505] disconnected; active server actors: 1 2025-06-25T14:55:58.625707Z node 4 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037893][rt3.dc1--test-topic] pipe [3:7519900861292999302:2505] client user disconnected session shared/user_3_3_16714140418007381502_v1 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/service/unittest >> KqpQueryService::TableSink_Oltp_Replace+UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 10356, MsgBus: 14209 2025-06-25T14:55:45.384794Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900806740947422:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:45.384849Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017dc/r3tmp/tmpcnlt3o/pdisk_1.dat 2025-06-25T14:55:45.685108Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 10356, node 1 2025-06-25T14:55:45.776989Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:55:45.777075Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:55:45.778463Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:55:45.781333Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:55:45.781350Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:55:45.781361Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:55:45.781480Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14209 TClient is connected to server localhost:14209 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:55:46.310543Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:55:46.321313Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:55:46.391813Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:55:48.069930Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900819625849916:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:48.070010Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:48.347701Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:48.488681Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900819625850022:2302], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:48.488777Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:48.489093Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900819625850027:2305], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:48.492126Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:55:48.501781Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519900819625850029:2306], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-25T14:55:48.585239Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519900819625850080:2393] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:55:48.963543Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519900819625850170:2334], status: PRECONDITION_FAILED, issues:
: Error: Type annotation, code: 1030
:2:29: Error: At function: KiWriteTable!
:2:29: Error: Missing key column in input: Col1 for table: /Root/DataShard, code: 2029 2025-06-25T14:55:48.963825Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=Zjg2NTJmODctOTMzZDFkZGItZGI3M2E5MGYtNjI3NDU3ZTE=, ActorId: [1:7519900819625850168:2333], ActorState: ExecuteState, TraceId: 01jyksd1676apnxn4rt45y8816, ReplyQueryCompileError, status PRECONDITION_FAILED remove tx with tx_id: Trying to start YDB, gRPC: 27911, MsgBus: 23023 2025-06-25T14:55:49.701007Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519900825274285255:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:49.701123Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017dc/r3tmp/tmp39HlCr/pdisk_1.dat 2025-06-25T14:55:49.875185Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:55:49.875641Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:55:49.875713Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:55:49.887530Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 27911, node 2 2025-06-25T14:55:50.025941Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:55:50.025963Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:55:50.025973Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:55:50.026063Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23023 TClient is connected to server localhost:23023 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:55:50.505402Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:55:50.511701Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:55:50.715027Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:55:52.941524Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519900838159187737:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:52.941616Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:52.959367Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:53.000427Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519900838159187840:2302], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:53.000490Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:53.000631Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519900838159187845:2305], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:53.024979Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:55:53.036921Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715659, at schemeshard: 72057594046644480 2025-06-25T14:55:53.040456Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519900842454155143:2306], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-25T14:55:53.118517Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519900842454155194:2390] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 30215, MsgBus: 6473 2025-06-25T14:55:54.736178Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519900845829173693:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:54.736219Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017dc/r3tmp/tmphIVrxb/pdisk_1.dat 2025-06-25T14:55:54.890728Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:55:54.896489Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519900845829173667:2080] 1750863354735065 != 1750863354735068 2025-06-25T14:55:54.906238Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:55:54.906311Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:55:54.912185Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 30215, node 3 2025-06-25T14:55:54.976800Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:55:54.976818Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:55:54.976824Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:55:54.976907Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6473 TClient is connected to server localhost:6473 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:55:55.482309Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:55:55.493041Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:55:55.749750Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:55:57.968610Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519900858714076188:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:57.968689Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:57.983268Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:58.146122Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:58.364889Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519900863009044820:2402], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:58.365013Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:58.365168Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519900863009044825:2405], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:58.369000Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715660:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:55:58.382409Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519900863009044827:2406], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715660 completed, doublechecking } 2025-06-25T14:55:58.450835Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519900863009044878:3197] txid# 281474976715661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:55:59.736773Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519900845829173693:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:59.736848Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> KqpQueryService::TableSink_Htap+withOltpSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/service/unittest >> KqpQueryServiceScripts::ExecuteScriptWithResultsTtl [GOOD] Test command err: Trying to start YDB, gRPC: 62449, MsgBus: 22643 2025-06-25T14:55:32.815659Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900751601476104:2226];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:32.819186Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00180b/r3tmp/tmpPpa1xE/pdisk_1.dat 2025-06-25T14:55:33.245594Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:55:33.273083Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:55:33.273166Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:55:33.297651Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 62449, node 1 2025-06-25T14:55:33.488892Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:55:33.488908Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:55:33.488916Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:55:33.489003Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:22643 2025-06-25T14:55:33.812471Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:22643 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:55:34.165537Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:55:34.192175Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:34.365855Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:34.526511Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:34.595286Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:36.241072Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900768781346717:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:36.241173Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:36.586539Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:36.613000Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:36.640304Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:36.706285Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:36.780119Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:36.854492Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:36.893267Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:36.951688Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900768781347382:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:36.951775Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:36.951992Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900768781347387:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:36.955453Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:55:36.969571Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519900768781347389:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:55:37.064978Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519900773076314736:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:55:37.816418Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519900751601476104:2226];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:37.816503Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:55:38.045930Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:38.053144Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /ho ... 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:55:51.833144Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:55:51.841238Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:55:51.850090Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:51.929759Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:52.100719Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:55:52.101829Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:52.199241Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:54.457580Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519900849193097817:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:54.457674Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:54.525811Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:54.569778Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:54.639345Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:54.671577Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:54.703266Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:54.784463Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:54.869950Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:54.962207Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519900849193098478:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:54.962274Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:54.962419Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519900849193098483:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:54.966335Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:55:54.981068Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519900849193098485:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:55:55.059900Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519900853488065832:3418] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:55:56.088419Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519900836308194320:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:56.088495Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:55:56.201726Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:56.203029Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:56.203923Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:58.670206Z node 3 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863358699, txId: 281474976710704] shutting down 2025-06-25T14:55:58.933253Z node 3 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863358972, txId: 281474976710707] shutting down 2025-06-25T14:55:59.245377Z node 3 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863359280, txId: 281474976710710] shutting down 2025-06-25T14:55:59.646352Z node 3 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863359679, txId: 281474976710713] shutting down 2025-06-25T14:55:59.684694Z node 3 :KQP_PROXY WARN: query_actor.cpp:372: [TQueryBase] [TGetScriptExecutionResultQueryActor] TraceId: 930e858f-4cc4eafe-f4ed500a-3cec66f8, State: Get results info, Finish with NOT_FOUND, Issues: {
: Error: Results are expired }, SessionId: ydb://session/3?node_id=3&id=ZjJmNDZmZDgtMjM4MDMxYjMtZjY1NjZlNC0zMDg5NTBmMQ==, TxId: >> KqpQueryService::TableSink_OlapInsert [GOOD] >> KqpQueryService::TableSink_OlapDelete >> BasicUsage::PreferredDatabaseNoFallback [GOOD] >> KqpQueryService::TableSink_ReplaceFromSelectOlap [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/service/unittest >> KqpQueryService::TableSink_OltpOrder [GOOD] Test command err: Trying to start YDB, gRPC: 10060, MsgBus: 15227 2025-06-25T14:55:31.692963Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900746544455830:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:31.701787Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00180d/r3tmp/tmpovNc7K/pdisk_1.dat 2025-06-25T14:55:32.215206Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519900746544455798:2080] 1750863331681067 != 1750863331681070 2025-06-25T14:55:32.220040Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 10060, node 1 2025-06-25T14:55:32.232423Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:55:32.232531Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:55:32.233978Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:55:32.268935Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:55:32.268988Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:55:32.268996Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:55:32.269129Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:15227 2025-06-25T14:55:32.696421Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:15227 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:55:32.855246Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:55:32.874587Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:55:34.646900Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900759429358326:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:34.646995Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:34.896455Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:35.011335Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900763724325776:2305], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:35.011413Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:35.011851Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900763724325781:2308], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:35.015911Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:55:35.024732Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519900763724325783:2309], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-25T14:55:35.108486Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519900763724325834:2428] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:55:36.693510Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519900746544455830:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:36.693582Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 14211, MsgBus: 15776 2025-06-25T14:55:37.641154Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519900773494269273:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:37.641211Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00180d/r3tmp/tmp1mqcRB/pdisk_1.dat 2025-06-25T14:55:37.791213Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:55:37.792126Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519900773494269253:2080] 1750863337640169 != 1750863337640172 2025-06-25T14:55:37.801736Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:55:37.801809Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:55:37.802987Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 14211, node 2 2025-06-25T14:55:37.884981Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:55:37.885004Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:55:37.885011Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:55:37.885102Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:15776 TClient is connected to server localhost:15776 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-25T14:55:38.284460Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:55:38.290830Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:55:38.676835Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:55:40.967829Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519900786379171777:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:40.967932Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permission ... MtYTYxNmYxY2EtMjQzYTgzM2Q=, ActorId: [3:7519900806751369014:2326], ActorState: ExecuteState, TraceId: 01jyksdc0ne3wey1j5r42yf0mx, Create QueryResponse for error on request, msg: 2025-06-25T14:56:00.221957Z node 3 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_CONSTRAINT_VIOLATION;details=Conflict with existing key.;tx_id=97; 2025-06-25T14:56:00.222174Z node 3 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:819: SelfId: [3:7519900871175881776:2326], Table: `/Root/DataShard` ([72057594046644480:2:1]), SessionActorId: [3:7519900806751369014:2326]Got CONSTRAINT VIOLATION for table `/Root/DataShard`. ShardID=72075186224037888, Sink=[3:7519900871175881776:2326].{
: Error: Conflict with existing key., code: 2012 } 2025-06-25T14:56:00.222225Z node 3 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:3004: SelfId: [3:7519900871175881766:2326], SessionActorId: [3:7519900806751369014:2326], statusCode=PRECONDITION_FAILED. Issue=
: Error: Constraint violated. Table: `/Root/DataShard`., code: 2012
: Error: Conflict with existing key., code: 2012 . sessionActorId=[3:7519900806751369014:2326]. isRollback=0 2025-06-25T14:56:00.222370Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:1895: SessionId: ydb://session/3?node_id=3&id=YTIyY2JjNTctYmFhYzQyZWMtYTYxNmYxY2EtMjQzYTgzM2Q=, ActorId: [3:7519900806751369014:2326], ActorState: ExecuteState, TraceId: 01jyksdc595yz9e522rm0ksp1d, got TEvKqpBuffer::TEvError in ExecuteState, status: PRECONDITION_FAILED send to: [3:7519900871175881767:2326] from: [3:7519900871175881766:2326] 2025-06-25T14:56:00.222427Z node 3 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [3:7519900871175881767:2326] TxId: 281474976715756. Ctx: { TraceId: 01jyksdc595yz9e522rm0ksp1d, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YTIyY2JjNTctYmFhYzQyZWMtYTYxNmYxY2EtMjQzYTgzM2Q=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. PRECONDITION_FAILED: {
: Error: Constraint violated. Table: `/Root/DataShard`., code: 2012 subissue: {
: Error: Conflict with existing key., code: 2012 } } 2025-06-25T14:56:00.222560Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=3&id=YTIyY2JjNTctYmFhYzQyZWMtYTYxNmYxY2EtMjQzYTgzM2Q=, ActorId: [3:7519900806751369014:2326], ActorState: ExecuteState, TraceId: 01jyksdc595yz9e522rm0ksp1d, Create QueryResponse for error on request, msg: 2025-06-25T14:56:00.394363Z node 3 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_CONSTRAINT_VIOLATION;details=Conflict with existing key.;tx_id=98; 2025-06-25T14:56:00.394659Z node 3 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:819: SelfId: [3:7519900871175881817:2301], Table: `/Root/DataShard` ([72057594046644480:2:1]), SessionActorId: [3:7519900806751368893:2301]Got CONSTRAINT VIOLATION for table `/Root/DataShard`. ShardID=72075186224037888, Sink=[3:7519900871175881817:2301].{
: Error: Conflict with existing key., code: 2012 } 2025-06-25T14:56:00.394718Z node 3 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:3004: SelfId: [3:7519900871175881807:2301], SessionActorId: [3:7519900806751368893:2301], statusCode=PRECONDITION_FAILED. Issue=
: Error: Constraint violated. Table: `/Root/DataShard`., code: 2012
: Error: Conflict with existing key., code: 2012 . sessionActorId=[3:7519900806751368893:2301]. isRollback=0 2025-06-25T14:56:00.394901Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:1895: SessionId: ydb://session/3?node_id=3&id=NzEzNjA0ZDMtMTAwMTk4MmMtYTMzYWRkZWYtNzA4MzBhMmM=, ActorId: [3:7519900806751368893:2301], ActorState: ExecuteState, TraceId: 01jyksdc9w7dsh3ghygn1sgs6d, got TEvKqpBuffer::TEvError in ExecuteState, status: PRECONDITION_FAILED send to: [3:7519900871175881808:2301] from: [3:7519900871175881807:2301] 2025-06-25T14:56:00.394962Z node 3 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [3:7519900871175881808:2301] TxId: 281474976715757. Ctx: { TraceId: 01jyksdc9w7dsh3ghygn1sgs6d, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NzEzNjA0ZDMtMTAwMTk4MmMtYTMzYWRkZWYtNzA4MzBhMmM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. PRECONDITION_FAILED: {
: Error: Constraint violated. Table: `/Root/DataShard`., code: 2012 subissue: {
: Error: Conflict with existing key., code: 2012 } } 2025-06-25T14:56:00.395113Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=3&id=NzEzNjA0ZDMtMTAwMTk4MmMtYTMzYWRkZWYtNzA4MzBhMmM=, ActorId: [3:7519900806751368893:2301], ActorState: ExecuteState, TraceId: 01jyksdc9w7dsh3ghygn1sgs6d, Create QueryResponse for error on request, msg: 2025-06-25T14:56:00.556363Z node 3 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_CONSTRAINT_VIOLATION;details=Conflict with existing key.;tx_id=99; 2025-06-25T14:56:00.556645Z node 3 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:819: SelfId: [3:7519900871175881851:2326], Table: `/Root/DataShard` ([72057594046644480:2:1]), SessionActorId: [3:7519900806751369014:2326]Got CONSTRAINT VIOLATION for table `/Root/DataShard`. ShardID=72075186224037888, Sink=[3:7519900871175881851:2326].{
: Error: Conflict with existing key., code: 2012 } 2025-06-25T14:56:00.556704Z node 3 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:3004: SelfId: [3:7519900871175881841:2326], SessionActorId: [3:7519900806751369014:2326], statusCode=PRECONDITION_FAILED. Issue=
: Error: Constraint violated. Table: `/Root/DataShard`., code: 2012
: Error: Conflict with existing key., code: 2012 . sessionActorId=[3:7519900806751369014:2326]. isRollback=0 2025-06-25T14:56:00.556867Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:1895: SessionId: ydb://session/3?node_id=3&id=YTIyY2JjNTctYmFhYzQyZWMtYTYxNmYxY2EtMjQzYTgzM2Q=, ActorId: [3:7519900806751369014:2326], ActorState: ExecuteState, TraceId: 01jyksdcezbegnv2rvsqvnw5qk, got TEvKqpBuffer::TEvError in ExecuteState, status: PRECONDITION_FAILED send to: [3:7519900871175881842:2326] from: [3:7519900871175881841:2326] 2025-06-25T14:56:00.556923Z node 3 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [3:7519900871175881842:2326] TxId: 281474976715758. Ctx: { TraceId: 01jyksdcezbegnv2rvsqvnw5qk, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YTIyY2JjNTctYmFhYzQyZWMtYTYxNmYxY2EtMjQzYTgzM2Q=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. PRECONDITION_FAILED: {
: Error: Constraint violated. Table: `/Root/DataShard`., code: 2012 subissue: {
: Error: Conflict with existing key., code: 2012 } } 2025-06-25T14:56:00.557061Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=3&id=YTIyY2JjNTctYmFhYzQyZWMtYTYxNmYxY2EtMjQzYTgzM2Q=, ActorId: [3:7519900806751369014:2326], ActorState: ExecuteState, TraceId: 01jyksdcezbegnv2rvsqvnw5qk, Create QueryResponse for error on request, msg: 2025-06-25T14:56:00.725121Z node 3 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_CONSTRAINT_VIOLATION;details=Conflict with existing key.;tx_id=100; 2025-06-25T14:56:00.725339Z node 3 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:226: Prepare transaction failed. txid 100 at tablet 72075186224037888 errors: Status: STATUS_CONSTRAINT_VIOLATION Issues: { message: "Conflict with existing key." issue_code: 2012 severity: 1 } 2025-06-25T14:56:00.725451Z node 3 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:168: Errors while proposing transaction txid 100 at tablet 72075186224037888 Status: STATUS_CONSTRAINT_VIOLATION Issues: { message: "Conflict with existing key." issue_code: 2012 severity: 1 } 2025-06-25T14:56:00.725614Z node 3 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:819: SelfId: [3:7519900871175881883:2326], Table: `/Root/DataShard` ([72057594046644480:2:1]), SessionActorId: [3:7519900806751369014:2326]Got CONSTRAINT VIOLATION for table `/Root/DataShard`. ShardID=72075186224037888, Sink=[3:7519900871175881883:2326].{
: Error: Conflict with existing key., code: 2012 } 2025-06-25T14:56:00.725682Z node 3 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:3004: SelfId: [3:7519900871175881875:2326], SessionActorId: [3:7519900806751369014:2326], statusCode=PRECONDITION_FAILED. Issue=
: Error: Constraint violated. Table: `/Root/DataShard`., code: 2012
: Error: Conflict with existing key., code: 2012 . sessionActorId=[3:7519900806751369014:2326]. isRollback=0 2025-06-25T14:56:00.725893Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:1895: SessionId: ydb://session/3?node_id=3&id=YTIyY2JjNTctYmFhYzQyZWMtYTYxNmYxY2EtMjQzYTgzM2Q=, ActorId: [3:7519900806751369014:2326], ActorState: ExecuteState, TraceId: 01jyksdcm0exhdmr3d33rm28ar, got TEvKqpBuffer::TEvError in ExecuteState, status: PRECONDITION_FAILED send to: [3:7519900871175881876:2326] from: [3:7519900871175881875:2326] 2025-06-25T14:56:00.725950Z node 3 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [3:7519900871175881876:2326] TxId: 281474976715759. Ctx: { TraceId: 01jyksdcm0exhdmr3d33rm28ar, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YTIyY2JjNTctYmFhYzQyZWMtYTYxNmYxY2EtMjQzYTgzM2Q=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. PRECONDITION_FAILED: {
: Error: Constraint violated. Table: `/Root/DataShard`., code: 2012 subissue: {
: Error: Conflict with existing key., code: 2012 } } 2025-06-25T14:56:00.726101Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=3&id=YTIyY2JjNTctYmFhYzQyZWMtYTYxNmYxY2EtMjQzYTgzM2Q=, ActorId: [3:7519900806751369014:2326], ActorState: ExecuteState, TraceId: 01jyksdcm0exhdmr3d33rm28ar, Create QueryResponse for error on request, msg: 2025-06-25T14:56:00.858385Z node 3 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_CONSTRAINT_VIOLATION;details=Conflict with existing key.;tx_id=101; 2025-06-25T14:56:00.858658Z node 3 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:819: SelfId: [3:7519900871175881919:2301], Table: `/Root/DataShard` ([72057594046644480:2:1]), SessionActorId: [3:7519900806751368893:2301]Got CONSTRAINT VIOLATION for table `/Root/DataShard`. ShardID=72075186224037888, Sink=[3:7519900871175881919:2301].{
: Error: Conflict with existing key., code: 2012 } 2025-06-25T14:56:00.858720Z node 3 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:3004: SelfId: [3:7519900871175881909:2301], SessionActorId: [3:7519900806751368893:2301], statusCode=PRECONDITION_FAILED. Issue=
: Error: Constraint violated. Table: `/Root/DataShard`., code: 2012
: Error: Conflict with existing key., code: 2012 . sessionActorId=[3:7519900806751368893:2301]. isRollback=0 2025-06-25T14:56:00.858887Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:1895: SessionId: ydb://session/3?node_id=3&id=NzEzNjA0ZDMtMTAwMTk4MmMtYTMzYWRkZWYtNzA4MzBhMmM=, ActorId: [3:7519900806751368893:2301], ActorState: ExecuteState, TraceId: 01jyksdcs0838jzqdtcq8ykw8e, got TEvKqpBuffer::TEvError in ExecuteState, status: PRECONDITION_FAILED send to: [3:7519900871175881910:2301] from: [3:7519900871175881909:2301] 2025-06-25T14:56:00.858946Z node 3 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [3:7519900871175881910:2301] TxId: 281474976715760. Ctx: { TraceId: 01jyksdcs0838jzqdtcq8ykw8e, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NzEzNjA0ZDMtMTAwMTk4MmMtYTMzYWRkZWYtNzA4MzBhMmM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. PRECONDITION_FAILED: {
: Error: Constraint violated. Table: `/Root/DataShard`., code: 2012 subissue: {
: Error: Conflict with existing key., code: 2012 } } 2025-06-25T14:56:00.859088Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=3&id=NzEzNjA0ZDMtMTAwMTk4MmMtYTMzYWRkZWYtNzA4MzBhMmM=, ActorId: [3:7519900806751368893:2301], ActorState: ExecuteState, TraceId: 01jyksdcs0838jzqdtcq8ykw8e, Create QueryResponse for error on request, msg: >> KqpQueryServiceScripts::ExecuteScriptStatsProfile >> KqpQueryService::AlterTable_DropNotNull_WithSetFamily_Valid [GOOD] >> KqpQueryService::ReadManyRanges [GOOD] >> KqpQueryService::ReadManyShardsRange >> KqpQueryService::ShowCreateTable [GOOD] >> KqpQueryService::ShowCreateTableDisable >> KqpQueryService::DdlUser [GOOD] >> KqpQueryService::DdlTx ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/service/unittest >> KqpQueryService::TableSink_ReplaceFromSelectOlap [GOOD] Test command err: Trying to start YDB, gRPC: 16319, MsgBus: 11515 2025-06-25T14:55:45.702565Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900807793972870:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:45.702605Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017cc/r3tmp/tmpgc8awN/pdisk_1.dat 2025-06-25T14:55:46.095235Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 16319, node 1 2025-06-25T14:55:46.137249Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:55:46.137341Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:55:46.138428Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:55:46.201167Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:55:46.201186Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:55:46.201193Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:55:46.201309Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11515 TClient is connected to server localhost:11515 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-25T14:55:46.712876Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:55:46.798588Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:55:46.812899Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:55:48.460541Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900820678875364:2295], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:48.460541Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900820678875356:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:48.460621Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:48.464468Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:55:48.473764Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519900820678875370:2296], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:55:48.564007Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519900820678875421:2332] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:55:48.813274Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:49.061655Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519900824973842958:2464] txid# 281474976715664, issues: { message: "Check failed: path: \'/Root/.tmp/sessions\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeDir, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:55:49.073193Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519900824973842965:2469] txid# 281474976715665, issues: { message: "Check failed: path: \'/Root/.tmp/sessions/YTgyNWFjNzAtNTk5YTQwOTktMjZjMDAyNWYtZGJmZGM5OWY=\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeDir, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:55:49.099074Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037888 not found 2025-06-25T14:55:49.112564Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519900824973843018:2327], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:17: Error: At function: KiReadTable!
:3:17: Error: Cannot find table 'db.[/Root/test/Temp]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:55:49.112764Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=YTgyNWFjNzAtNTk5YTQwOTktMjZjMDAyNWYtZGJmZGM5OWY=, ActorId: [1:7519900820678875328:2288], ActorState: ExecuteState, TraceId: 01jyksd1bd8z154fk3sgwxxzng, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:55:49.147043Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519900824973843029:2334], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:17: Error: At function: KiReadTable!
:3:17: Error: Cannot find table 'db.[/Root/test/Temp]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:55:49.147218Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=ODlhOTVlY2MtZTEyZmEwN2QtNWVjYTFlOTQtZWJlNmYwYTA=, ActorId: [1:7519900824973843025:2331], ActorState: ExecuteState, TraceId: 01jyksd1cf2h1xm2bb0cv9ss6p, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: Trying to start YDB, gRPC: 9710, MsgBus: 24794 2025-06-25T14:55:49.807533Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519900824356802930:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:49.807578Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017cc/r3tmp/tmplmbRvv/pdisk_1.dat 2025-06-25T14:55:49.937137Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 9710, node 2 2025-06-25T14:55:49.968511Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:55:49.968565Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:55:49.973216Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:55:50.021710Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:55:50.021730Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:55:50.021736Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:55:50.021837Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24794 TClient is connected to server localhost:24794 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:55:50.483868Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at ... taEvents::TEvWrite;tablet_id=72075186224037893;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-25T14:56:00.094998Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[3:7519900865101430901:2307];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037895;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-25T14:56:00.095018Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[3:7519900865101430785:2304];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037896;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-25T14:56:00.095050Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[3:7519900865101430783:2302];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037897;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-25T14:56:00.095104Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[3:7519900865101430781:2300];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037888;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-25T14:56:00.095140Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[3:7519900865101430782:2301];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037889;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-25T14:56:00.095178Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[3:7519900865101430801:2306];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037890;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-25T14:56:00.906063Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037891;tx_state=TTxProgressTx::Execute;tx_current=281474976715669;tx_id=281474976715669;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715669; 2025-06-25T14:56:00.907046Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037890;tx_state=TTxProgressTx::Execute;tx_current=281474976715669;tx_id=281474976715669;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715669; 2025-06-25T14:56:00.907094Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037898;tx_state=TTxProgressTx::Execute;tx_current=281474976715669;tx_id=281474976715669;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715669; 2025-06-25T14:56:00.908001Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037899;tx_state=TTxProgressTx::Execute;tx_current=281474976715669;tx_id=281474976715669;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715669; 2025-06-25T14:56:00.909178Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=281474976715669;tx_id=281474976715669;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715669; 2025-06-25T14:56:00.909209Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037897;tx_state=TTxProgressTx::Execute;tx_current=281474976715669;tx_id=281474976715669;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715669; 2025-06-25T14:56:00.909535Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037896;tx_state=TTxProgressTx::Execute;tx_current=281474976715669;tx_id=281474976715669;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715669; 2025-06-25T14:56:00.909711Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037892;tx_state=TTxProgressTx::Execute;tx_current=281474976715669;tx_id=281474976715669;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715669; 2025-06-25T14:56:00.909747Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037894;tx_state=TTxProgressTx::Execute;tx_current=281474976715669;tx_id=281474976715669;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715669; 2025-06-25T14:56:00.910001Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037895;tx_state=TTxProgressTx::Execute;tx_current=281474976715669;tx_id=281474976715669;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715669; 2025-06-25T14:56:00.910006Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037893;tx_state=TTxProgressTx::Execute;tx_current=281474976715669;tx_id=281474976715669;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715669; 2025-06-25T14:56:00.910106Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037898;local_tx_no=13;method=complete;tx_info=;fline=secondary.h:126;event=duplication_tablet_broken_flag;txId=281474976715669; 2025-06-25T14:56:00.910126Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037898;local_tx_no=14;method=complete;tx_info=;fline=secondary.h:126;event=duplication_tablet_broken_flag;txId=281474976715669; 2025-06-25T14:56:00.910145Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037898;local_tx_no=15;method=complete;tx_info=;fline=secondary.h:126;event=duplication_tablet_broken_flag;txId=281474976715669; 2025-06-25T14:56:00.910184Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037898;local_tx_no=16;method=complete;tx_info=;fline=secondary.h:126;event=duplication_tablet_broken_flag;txId=281474976715669; 2025-06-25T14:56:00.910219Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037898;local_tx_no=17;method=complete;tx_info=;fline=secondary.h:126;event=duplication_tablet_broken_flag;txId=281474976715669; 2025-06-25T14:56:00.910247Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037898;local_tx_no=19;method=complete;tx_info=;fline=secondary.h:126;event=duplication_tablet_broken_flag;txId=281474976715669; 2025-06-25T14:56:00.910262Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037898;local_tx_no=20;method=complete;tx_info=;fline=secondary.h:126;event=duplication_tablet_broken_flag;txId=281474976715669; 2025-06-25T14:56:00.910263Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=281474976715669;tx_id=281474976715669;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715669; 2025-06-25T14:56:00.910279Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037898;local_tx_no=21;method=complete;tx_info=;fline=secondary.h:126;event=duplication_tablet_broken_flag;txId=281474976715669; 2025-06-25T14:56:01.354697Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037896;tx_state=TTxProgressTx::Execute;tx_current=281474976715672;tx_id=281474976715672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715672; 2025-06-25T14:56:01.362093Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037894;tx_state=TTxProgressTx::Execute;tx_current=281474976715672;tx_id=281474976715672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715672; 2025-06-25T14:56:01.362519Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037893;tx_state=TTxProgressTx::Execute;tx_current=281474976715672;tx_id=281474976715672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715672; 2025-06-25T14:56:01.362776Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037895;tx_state=TTxProgressTx::Execute;tx_current=281474976715672;tx_id=281474976715672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715672; 2025-06-25T14:56:01.362954Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037897;tx_state=TTxProgressTx::Execute;tx_current=281474976715672;tx_id=281474976715672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715672; 2025-06-25T14:56:01.363472Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037910;tx_state=TTxProgressTx::Execute;tx_current=281474976715672;tx_id=281474976715672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715672; 2025-06-25T14:56:01.363913Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037903;tx_state=TTxProgressTx::Execute;tx_current=281474976715672;tx_id=281474976715672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715672; 2025-06-25T14:56:01.364370Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037915;tx_state=TTxProgressTx::Execute;tx_current=281474976715672;tx_id=281474976715672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715672; 2025-06-25T14:56:01.364604Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=281474976715672;tx_id=281474976715672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715672; 2025-06-25T14:56:01.364768Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037891;tx_state=TTxProgressTx::Execute;tx_current=281474976715672;tx_id=281474976715672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715672; 2025-06-25T14:56:01.364939Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=281474976715672;tx_id=281474976715672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715672; 2025-06-25T14:56:01.365160Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037890;tx_state=TTxProgressTx::Execute;tx_current=281474976715672;tx_id=281474976715672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715672; 2025-06-25T14:56:01.365324Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037892;tx_state=TTxProgressTx::Execute;tx_current=281474976715672;tx_id=281474976715672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715672; 2025-06-25T14:56:01.366778Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037909;self_id=[3:7519900865101431504:2454];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037909;local_tx_no=23;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037903,72075186224037915;receive=72075186224037910; 2025-06-25T14:56:01.366853Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037909;self_id=[3:7519900865101431504:2454];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037909;local_tx_no=24;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037903,72075186224037915;receive=72075186224037910; 2025-06-25T14:56:01.366985Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037909;self_id=[3:7519900865101431504:2454];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037909;local_tx_no=26;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037915;receive=72075186224037903; 2025-06-25T14:56:01.367033Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037909;self_id=[3:7519900865101431504:2454];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037909;local_tx_no=27;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037915;receive=72075186224037903; 2025-06-25T14:56:01.367458Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037909;tx_state=TTxProgressTx::Execute;tx_current=281474976715672;tx_id=281474976715672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715672; ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/federated_topic/ut/unittest >> BasicUsage::PreferredDatabaseNoFallback [GOOD] Test command err: 2025-06-25T14:55:04.180525Z :GetAllStartPartitionSessions INFO: Random seed for debugging is 1750863304180497 2025-06-25T14:55:04.531182Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900634628444504:2141];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:04.531216Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:55:04.586584Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519900632545917673:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:04.586664Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00126e/r3tmp/tmphJ4L0W/pdisk_1.dat 2025-06-25T14:55:04.772197Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-25T14:55:04.788739Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-25T14:55:04.949565Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:55:04.955370Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:55:04.955467Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:55:04.961516Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T14:55:04.962294Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:55:04.993631Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:55:04.993715Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 18806, node 1 2025-06-25T14:55:04.997102Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:55:05.022989Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/yft8/00126e/r3tmp/yandexuaa2Za.tmp 2025-06-25T14:55:05.023016Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/yft8/00126e/r3tmp/yandexuaa2Za.tmp 2025-06-25T14:55:05.023172Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/yft8/00126e/r3tmp/yandexuaa2Za.tmp 2025-06-25T14:55:05.023298Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:55:05.078907Z INFO: TTestServer started on Port 62206 GrpcPort 18806 TClient is connected to server localhost:62206 PQClient connected to localhost:18806 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:55:05.345874Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... waiting... waiting... 2025-06-25T14:55:05.570448Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:55:05.612511Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:55:07.511958Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519900645430819844:2272], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:07.512034Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519900645430819834:2269], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:07.512189Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:07.519106Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976720657:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:55:07.547674Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519900645430819849:2273], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976720657 completed, doublechecking } 2025-06-25T14:55:07.640501Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519900645430819879:2132] txid# 281474976720658, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:55:07.900844Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:07.902241Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519900647513347354:2303], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:55:07.902460Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=ZmQ3MDhhNDItZTA1MTNhMjEtYzJhODlkMDktZjc1MTYzNjY=, ActorId: [1:7519900647513347314:2297], ActorState: ExecuteState, TraceId: 01jyksbrtsd0swagg2wtqtzthk, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:55:07.903884Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-25T14:55:07.902638Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7519900645430819886:2278], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:55:07.904565Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=2&id=YmJmZTMxMGYtYzZlYjI3YmItMmY3NDNlZGItMjIyNjQ2YjQ=, ActorId: [2:7519900645430819818:2268], ActorState: ExecuteState, TraceId: 01jyksbrqp9skasfzje2srpnp2, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:55:07.904947Z node 2 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-25T14:55:08.063979Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:08.243407Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost:18806", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, false, 1000); 2025-06-25T14:55:08.451453Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710664. Ctx: { TraceId: 01jyksbsh977vwgtwwwsws1fg2, Database: , DatabaseI ... tive actors on pipe 2025-06-25T14:55:31.004917Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:348: Handle TEvRequest topic: 'rt3.dc1--test-topic' requestId: 2025-06-25T14:55:31.004945Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2812: [PQ: 72075186224037892] got client message batch for topic 'rt3.dc1--test-topic' partition 0 2025-06-25T14:55:31.005029Z node 3 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie src|a490c1e8-6657da6f-f196f738-2482ba44_0 generated for partition 0 topic 'rt3.dc1--test-topic' owner src 2025-06-25T14:55:31.005226Z node 3 :PERSQUEUE DEBUG: partition_write.cpp:34: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyOwnerOk. Partition: 0 2025-06-25T14:55:31.005327Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'rt3.dc1--test-topic' partition: 0 messageNo: 0 requestId: cookie: 0 2025-06-25T14:55:31.005860Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:348: Handle TEvRequest topic: 'rt3.dc1--test-topic' requestId: 2025-06-25T14:55:31.005895Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2812: [PQ: 72075186224037892] got client message batch for topic 'rt3.dc1--test-topic' partition 0 2025-06-25T14:55:31.006018Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'rt3.dc1--test-topic' partition: 0 messageNo: 0 requestId: cookie: 0 2025-06-25T14:55:31.006246Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:865: session inited cookie: 1 partition: 0 MaxSeqNo: 0 sessionId: src|a490c1e8-6657da6f-f196f738-2482ba44_0 2025-06-25T14:55:31.007746Z :INFO: [] MessageGroupId [src] SessionId [] Counters: { Errors: 0 CurrentSessionLifetimeMs: 1750863331007 BytesWritten: 0 MessagesWritten: 0 BytesWrittenCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-25T14:55:31.007865Z :INFO: [] MessageGroupId [src] SessionId [] Write session established. Init response: session_id: "src|a490c1e8-6657da6f-f196f738-2482ba44_0" topic: "test-topic" cluster: "dc1" supported_codecs: CODEC_RAW supported_codecs: CODEC_GZIP supported_codecs: CODEC_LZOP 2025-06-25T14:55:31.008065Z :INFO: [] MessageGroupId [src] SessionId [src|a490c1e8-6657da6f-f196f738-2482ba44_0] Write session: close. Timeout = 0 ms 2025-06-25T14:55:31.008149Z :INFO: [] MessageGroupId [src] SessionId [src|a490c1e8-6657da6f-f196f738-2482ba44_0] Write session will now close 2025-06-25T14:55:31.008196Z :DEBUG: [] MessageGroupId [src] SessionId [src|a490c1e8-6657da6f-f196f738-2482ba44_0] Write session: aborting 2025-06-25T14:55:31.008644Z :INFO: [] MessageGroupId [src] SessionId [src|a490c1e8-6657da6f-f196f738-2482ba44_0] Write session: gracefully shut down, all writes complete 2025-06-25T14:55:31.008681Z :DEBUG: [] MessageGroupId [src] SessionId [src|a490c1e8-6657da6f-f196f738-2482ba44_0] Write session: destroy 2025-06-25T14:55:31.037080Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 1 sessionId: src|a490c1e8-6657da6f-f196f738-2482ba44_0 grpc read done: success: 0 data: 2025-06-25T14:55:31.037103Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 1 sessionId: src|a490c1e8-6657da6f-f196f738-2482ba44_0 grpc read failed 2025-06-25T14:55:31.037131Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 1 sessionId: src|a490c1e8-6657da6f-f196f738-2482ba44_0 grpc closed 2025-06-25T14:55:31.037147Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 1 sessionId: src|a490c1e8-6657da6f-f196f738-2482ba44_0 is DEAD 2025-06-25T14:55:31.037720Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037892 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-25T14:55:31.038762Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037892] server disconnected, pipe [3:7519900748311465194:2485] destroyed 2025-06-25T14:55:31.038828Z node 3 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::DropOwner. ====TYdbPqTestRetryPolicy() ====ExpectBreakDown === Session was created, waiting for retries >>> Ready to answer: ok ====CreateRetryState ====CreateRetryState Initialized Test retry state: get retry delay 2025-06-25T14:55:31.104471Z :NOTICE: [/Root] [] [] Retry to update federation state in 2.000000s Test retry state: get retry delay 2025-06-25T14:55:33.107485Z :NOTICE: [/Root] [] [] Retry to update federation state in 2.000000s 2025-06-25T14:55:34.314686Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7382: Cannot get console configs 2025-06-25T14:55:34.314728Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:55:34.589061Z node 3 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037892, Partition: 0, State: StateIdle] no data for compaction Test retry state: get retry delay === In the next federation discovery response dc2 will be available 2025-06-25T14:55:35.112573Z :NOTICE: [/Root] [] [] Retry to update federation state in 2.000000s Test retry state: get retry delay 2025-06-25T14:55:37.113575Z :NOTICE: [/Root] [] [] Retry to update federation state in 2.000000s Test retry state: get retry delay 2025-06-25T14:55:39.115232Z :NOTICE: [/Root] [] [] Retry to update federation state in 2.000000s 2025-06-25T14:55:39.597070Z node 3 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037892, Partition: 0, State: StateIdle] no data for compaction Test retry state: get retry delay 2025-06-25T14:55:41.115561Z :NOTICE: [/Root] [] [] Retry to update federation state in 2.000000s Test retry state: get retry delay 2025-06-25T14:55:43.124446Z :NOTICE: [/Root] [] [] Retry to update federation state in 2.000000s 2025-06-25T14:55:44.593046Z node 3 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037892, Partition: 0, State: StateIdle] no data for compaction Test retry state: get retry delay 2025-06-25T14:55:45.130568Z :NOTICE: [/Root] [] [] Retry to update federation state in 2.000000s Test retry state: get retry delay 2025-06-25T14:55:47.138542Z :NOTICE: [/Root] [] [] Retry to update federation state in 2.000000s Test retry state: get retry delay 2025-06-25T14:55:49.144491Z :NOTICE: [/Root] [] [] Retry to update federation state in 2.000000s 2025-06-25T14:55:49.593732Z node 3 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037892, Partition: 0, State: StateIdle] no data for compaction Test retry state: get retry delay 2025-06-25T14:55:51.145525Z :NOTICE: [/Root] [] [] Retry to update federation state in 2.000000s Test retry state: get retry delay 2025-06-25T14:55:53.147600Z :NOTICE: [/Root] [] [] Retry to update federation state in 2.000000s 2025-06-25T14:55:54.593872Z node 3 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037892, Partition: 0, State: StateIdle] no data for compaction Test retry state: get retry delay 2025-06-25T14:55:55.149771Z :NOTICE: [/Root] [] [] Retry to update federation state in 2.000000s Test retry state: get retry delay 2025-06-25T14:55:57.150539Z :NOTICE: [/Root] [] [] Retry to update federation state in 2.000000s Test retry state: get retry delay 2025-06-25T14:55:59.151527Z :NOTICE: [/Root] [] [] Retry to update federation state in 2.000000s 2025-06-25T14:55:59.525612Z node 3 :PERSQUEUE DEBUG: partition.cpp:873: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 0 PartitionConfig{ LifetimeSeconds: 86400 LowWatermark: 8388608 SourceIdLifetimeSeconds: 86400 WriteSpeedInBytesPerSecond: 20000000 BurstSize: 20000000 TotalPartitions: 1 SourceIdMaxCounts: 6000000 } 2025-06-25T14:55:59.524423Z node 4 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:157: [72075186224037893][rt3.dc1--test-topic] TPersQueueReadBalancer::HandleWakeup 2025-06-25T14:55:59.524529Z node 4 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:550: [72075186224037893][rt3.dc1--test-topic] Send TEvPersQueue::TEvStatus TabletId: 72075186224037892 Cookie: 1 2025-06-25T14:55:59.529016Z node 4 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:688: [72075186224037893][rt3.dc1--test-topic] Send TEvPeriodicTopicStats PathId: 13 Generation: 1 StatsReportRound: 1 DataSize: 0 UsedReserveSize: 0 2025-06-25T14:55:59.529537Z node 4 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1823: [72075186224037893][rt3.dc1--test-topic] ProcessPendingStats. PendingUpdates size 1 2025-06-25T14:55:59.597927Z node 3 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037892, Partition: 0, State: StateIdle] no data for compaction === Waiting for repair >>> Ready to answer: ok 2025-06-25T14:56:01.152812Z :INFO: [/Root] [] [] Start federated write session to database 'dc2' (previous was ) FederationState: { Status: SUCCESS SelfLocation: "fancy_datacenter" DbInfos: [ { name: "dc1" path: "/Root" id: "account-dc1" endpoint: "localhost:1789" location: "dc1" status: AVAILABLE weight: 1000 } { name: "dc2" path: "/Root" id: "account-dc2" endpoint: "localhost:1789" location: "dc2" status: AVAILABLE weight: 500 } { name: "dc3" path: "/Root" id: "account-dc3" endpoint: "localhost:1789" location: "dc3" status: AVAILABLE weight: 500 } ] ControlPlaneEndpoint: cp.logbroker-federation:2135 } === Closing the session 2025-06-25T14:56:01.189028Z :DEBUG: [/Root] TraceId [] SessionId [] MessageGroupId [src_id] Write session: try to update token 2025-06-25T14:56:01.189730Z :INFO: [/Root] TraceId [] SessionId [] MessageGroupId [src_id] Start write session. Will connect to nodeId: 0 2025-06-25T14:56:01.210250Z :DEBUG: [/Root] TraceId [] SessionId [] MessageGroupId [src_id] Write session: write to message_group: src_id 2025-06-25T14:56:01.210411Z :DEBUG: [/Root] TraceId [] SessionId [] MessageGroupId [src_id] Write session: send init request: init_request { path: "test-topic" message_group_id: "src_id" } 2025-06-25T14:56:01.210988Z :DEBUG: [/Root] TraceId [] SessionId [] MessageGroupId [src_id] Write session: OnWriteDone gRpcStatusCode: 0 2025-06-25T14:56:01.214653Z :INFO: [/Root] TraceId [] SessionId [] MessageGroupId [src_id] Write session: close. Timeout 0.000000s 2025-06-25T14:56:01.214704Z :INFO: [/Root] TraceId [] SessionId [] MessageGroupId [src_id] Write session will now close 2025-06-25T14:56:01.214757Z :DEBUG: [/Root] TraceId [] SessionId [] MessageGroupId [src_id] Write session: aborting 2025-06-25T14:56:01.215147Z :INFO: [/Root] TraceId [] SessionId [] MessageGroupId [src_id] Write session: gracefully shut down, all writes complete 2025-06-25T14:56:01.215799Z :DEBUG: [/Root] TraceId [] SessionId [] MessageGroupId [src_id] Write session: OnReadDone gRpcStatusCode: 1, Msg: CANCELLED, Details: , InternalError: 0 2025-06-25T14:56:01.215868Z :INFO: [/Root] TraceId [] SessionId [] MessageGroupId [src_id] Counters: { Errors: 0 CurrentSessionLifetimeMs: 1750863361215 BytesWritten: 0 MessagesWritten: 0 BytesWrittenCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-25T14:56:01.215917Z :DEBUG: [/Root] TraceId [] SessionId [] MessageGroupId [src_id] Write session is aborting and will not restart 2025-06-25T14:56:01.215994Z :DEBUG: [/Root] TraceId [] SessionId [] MessageGroupId [src_id] Write session: destroy 2025-06-25T14:56:01.212903Z node 3 :PQ_WRITE_PROXY DEBUG: grpc_pq_write.h:107: new grpc connection 2025-06-25T14:56:01.212942Z node 3 :PQ_WRITE_PROXY DEBUG: grpc_pq_write.h:141: new session created cookie 2 2025-06-25T14:56:01.216070Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 2 sessionId: grpc read done: success: 1 data: init_request { path: "test-topic" message_group_id: "src_id" } 2025-06-25T14:56:01.216131Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 2 sessionId: grpc closed 2025-06-25T14:56:01.216145Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 2 sessionId: is DEAD >> KqpQueryService::ReadDatashardAndColumnshard [GOOD] |88.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/address_classification/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/service/unittest >> KqpQueryService::AlterTable_DropNotNull_WithSetFamily_Valid [GOOD] Test command err: Trying to start YDB, gRPC: 26409, MsgBus: 4193 2025-06-25T14:55:41.744086Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900791411097371:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:41.750049Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017ed/r3tmp/tmp6xTVqE/pdisk_1.dat 2025-06-25T14:55:42.103369Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:55:42.103469Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:55:42.105215Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:55:42.115044Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519900791411097352:2080] 1750863341735919 != 1750863341735922 2025-06-25T14:55:42.130050Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 26409, node 1 2025-06-25T14:55:42.231384Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:55:42.231400Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:55:42.231408Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:55:42.231522Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4193 TClient is connected to server localhost:4193 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-25T14:55:42.845409Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:55:42.972516Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:55:42.992689Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:55:44.666641Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900804295999878:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:44.666719Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:44.666832Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900804295999886:2295], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:44.671353Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:55:44.679339Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519900804295999892:2296], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:55:44.767401Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519900804295999943:2334] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:55:45.047827Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:45.280797Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:171) 2025-06-25T14:55:45.314211Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519900808590967486:2480] txid# 281474976710664, issues: { message: "Check failed: path: \'/Root/.tmp/sessions\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeDir, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:55:45.324591Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519900808590967493:2485] txid# 281474976710665, issues: { message: "Check failed: path: \'/Root/.tmp/sessions/ZWJjNjM1MmYtYzA3N2MyMDctY2U0ZWU1OTItNzZhODBjYTE=\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeDir, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:55:45.344167Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037888 not found 2025-06-25T14:55:45.363580Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519900808590967553:2532] txid# 281474976710667, issues: { message: "Check failed: path: \'/Root/.tmp/sessions\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeDir, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:55:45.365357Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519900808590967560:2537] txid# 281474976710668, issues: { message: "Check failed: path: \'/Root/.tmp/sessions/ZWJjNjM1MmYtYzA3N2MyMDctY2U0ZWU1OTItNzZhODBjYTE=\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeDir, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:55:45.367528Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:45.635663Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:171) 2025-06-25T14:55:45.741062Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519900808590967740:2642] txid# 281474976710674, issues: { message: "Check failed: path: \'/Root/.tmp/sessions\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeDir, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:55:45.742424Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519900808590967747:2647] txid# 281474976710675, issues: { message: "Check failed: path: \'/Root/.tmp/sessions/ZWJjNjM1MmYtYzA3N2MyMDctY2U0ZWU1OTItNzZhODBjYTE=\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeDir, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:55:45.762785Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037889 not found 2025-06-25T14:55:45.781712Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519900808590967801:2368], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:21: Error: At function: KiReadTable!
:3:21: Error: Cannot find table 'db.[/Root/Temp]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:55:45.781876Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=ZjZiNTlmNWUtNWNiZmQ3MDAtMzViMDhiYWUtZjUxNzZiODc=, ActorId: [1:7519900808590967799:2367], ActorState: ExecuteState, TraceId: 01jykscy3b155t91vj3w9gwrem, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:55:45.809391Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519900808590967811:2373], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:17: Error: At function: KiReadTable!
:3:17: Error: Cannot find table 'db.[/Root/Temp]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:55:45.810424Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=YWU0NmQ2NDUtMjhhYjYxYzUtYTMzYzdmYzItYTc0Yzc1Nw==, ActorId: [1:7519900808590967809:2372], ActorState: ExecuteState, TraceId: 01jykscy452wwxq9md41jmgd2c, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: Trying to start YDB, gRPC: 24417, MsgBus: 10832 2025-06-25T14:55:46.591492Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor= ... work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:55.648255Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:55.695938Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:55.960516Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519900831116635629:2083];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:55.961072Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:55:56.197979Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715678:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:56.447342Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715681:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:56.582781Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715683:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:56.723900Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715685:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:56.945417Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715688:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:57.124199Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715690:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) Trying to start YDB, gRPC: 12962, MsgBus: 6613 2025-06-25T14:55:57.987058Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519900859371502187:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:57.987196Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017ed/r3tmp/tmpBs5jry/pdisk_1.dat 2025-06-25T14:55:58.128591Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7519900859371502166:2080] 1750863357978246 != 1750863357978249 2025-06-25T14:55:58.136053Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:55:58.140556Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:55:58.140634Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:55:58.143226Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 12962, node 4 2025-06-25T14:55:58.216766Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:55:58.216786Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:55:58.216794Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:55:58.216898Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6613 TClient is connected to server localhost:6613 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:55:58.751436Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:55:58.760570Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:55:59.000461Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:56:01.638785Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519900876551371982:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:01.638804Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519900876551371995:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:01.638867Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:01.641941Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:56:01.651641Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519900876551371998:2295], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:56:01.750751Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519900876551372049:2332] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:56:01.869016Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:02.153031Z node 4 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [4:7519900880846339498:2317], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:2:30: Error: At function: KiWriteTable!
:2:84: Error: Failed to convert type: Struct<'id':Int32,'val1':Null,'val2':Int32> to Struct<'id':Int32,'val1':Int32,'val2':Int32?>
:2:84: Error: Failed to convert 'val1': Null to Int32
:2:84: Error: Failed to convert input columns types to scheme types, code: 2031 2025-06-25T14:56:02.153253Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=4&id=YTFjMDkyZjYtMTRjZWEwYzctYzdmZDYyYmYtNTdjNjgyZDc=, ActorId: [4:7519900880846339496:2316], ActorState: ExecuteState, TraceId: 01jyksde2nacw26phswprz0ttf, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-06-25T14:56:02.178262Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:171) 2025-06-25T14:56:02.216294Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:171) >> KqpQueryService::Ddl >> TPersQueueTest::TClusterTrackerTest [GOOD] >> TPersQueueTest::TestReadPartitionByGroupId >> TPersQueueTest::DefaultMeteringMode [GOOD] >> KqpQueryService::FlowControllOnHugeRealTable+LongRow [GOOD] >> KqpQueryService::Explain ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/service/unittest >> KqpQueryService::ReadDatashardAndColumnshard [GOOD] Test command err: Trying to start YDB, gRPC: 7953, MsgBus: 21637 2025-06-25T14:55:22.184385Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900710700226658:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:22.184480Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00182f/r3tmp/tmp5SoOMD/pdisk_1.dat 2025-06-25T14:55:22.688533Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519900710700226638:2080] 1750863322182794 != 1750863322182797 2025-06-25T14:55:22.740250Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:55:22.740926Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:55:22.741376Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:55:22.744250Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 7953, node 1 2025-06-25T14:55:22.884811Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:55:22.884834Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:55:22.884875Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:55:22.884981Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:21637 2025-06-25T14:55:23.212366Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:21637 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:55:23.427960Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:55:23.448955Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:55:23.468425Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:23.630752Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:23.800544Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:23.894547Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:25.477277Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900723585130188:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:25.477394Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:25.802772Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:25.832333Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:25.865971Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:25.894531Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:25.932142Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:25.983461Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:26.060358Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:26.134379Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900727880098146:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:26.134495Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:26.134770Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900727880098151:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:26.139028Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:55:26.151898Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519900727880098153:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:55:26.216951Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519900727880098204:3424] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:55:27.184035Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519900710700226658:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:27.184102Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:55:37.736906Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7382: Cannot get console configs 2025-06-25T14:55:37.736937Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded Trying to start YDB, gRPC: 9935, MsgBus: 16472 2025-06-25T14:55:39.092081Z node 2 :METADATA_PROVIDER WARN: log.c ... e;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-25T14:56:01.137243Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-25T14:56:01.137296Z node 3 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=72075186224037892;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-25T14:56:01.137330Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-25T14:56:01.137373Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-25T14:56:01.137663Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-25T14:56:01.137696Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-25T14:56:01.141313Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[3:7519900873022320571:2315];ev=NActors::IEventHandle;tablet_id=72075186224037890;tx_id=281474976710661;this=88923019834208;method=TTxController::StartProposeOnExecute;tx_info=281474976710661:TX_KIND_SCHEMA;min=1750863361135;max=18446744073709551615;plan=0;src=[3:7519900860137418061:2152];cookie=32:2;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-25T14:56:01.141406Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[3:7519900873022320572:2316];ev=NActors::IEventHandle;tablet_id=72075186224037891;tx_id=281474976710661;this=88923004130912;method=TTxController::StartProposeOnExecute;tx_info=281474976710661:TX_KIND_SCHEMA;min=1750863361140;max=18446744073709551615;plan=0;src=[3:7519900860137418061:2152];cookie=42:2;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-25T14:56:01.144131Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[3:7519900873022320558:2313];ev=NActors::IEventHandle;tablet_id=72075186224037893;tx_id=281474976710661;this=88923019842496;method=TTxController::StartProposeOnExecute;tx_info=281474976710661:TX_KIND_SCHEMA;min=1750863361143;max=18446744073709551615;plan=0;src=[3:7519900860137418061:2152];cookie=62:2;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-25T14:56:01.144131Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[3:7519900873022320536:2309];ev=NActors::IEventHandle;tablet_id=72075186224037896;tx_id=281474976710661;this=88923004132256;method=TTxController::StartProposeOnExecute;tx_info=281474976710661:TX_KIND_SCHEMA;min=1750863361143;max=18446744073709551615;plan=0;src=[3:7519900860137418061:2152];cookie=92:2;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-25T14:56:01.144729Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[3:7519900873022320573:2317];ev=NActors::IEventHandle;tablet_id=72075186224037895;tx_id=281474976710661;this=88923004133600;method=TTxController::StartProposeOnExecute;tx_info=281474976710661:TX_KIND_SCHEMA;min=1750863361144;max=18446744073709551615;plan=0;src=[3:7519900860137418061:2152];cookie=82:2;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-25T14:56:01.147025Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[3:7519900873022320539:2312];ev=NActors::IEventHandle;tablet_id=72075186224037889;tx_id=281474976710661;this=88923004117920;method=TTxController::StartProposeOnExecute;tx_info=281474976710661:TX_KIND_SCHEMA;min=1750863361146;max=18446744073709551615;plan=0;src=[3:7519900860137418061:2152];cookie=22:2;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-25T14:56:01.147939Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[3:7519900873022320559:2314];ev=NActors::IEventHandle;tablet_id=72075186224037892;tx_id=281474976710661;this=88923004134272;method=TTxController::StartProposeOnExecute;tx_info=281474976710661:TX_KIND_SCHEMA;min=1750863361147;max=18446744073709551615;plan=0;src=[3:7519900860137418061:2152];cookie=52:2;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-25T14:56:01.150008Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;self_id=[3:7519900873022320538:2311];ev=NActors::IEventHandle;tablet_id=72075186224037898;tx_id=281474976710661;this=88923004119488;method=TTxController::StartProposeOnExecute;tx_info=281474976710661:TX_KIND_SCHEMA;min=1750863361149;max=18446744073709551615;plan=0;src=[3:7519900860137418061:2152];cookie=112:2;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-25T14:56:01.150722Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[3:7519900873022320574:2318];ev=NActors::IEventHandle;tablet_id=72075186224037897;tx_id=281474976710661;this=88923004138080;method=TTxController::StartProposeOnExecute;tx_info=281474976710661:TX_KIND_SCHEMA;min=1750863361150;max=18446744073709551615;plan=0;src=[3:7519900860137418061:2152];cookie=102:2;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-25T14:56:01.152660Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[3:7519900873022320537:2310];ev=NActors::IEventHandle;tablet_id=72075186224037894;tx_id=281474976710661;this=88923004120832;method=TTxController::StartProposeOnExecute;tx_info=281474976710661:TX_KIND_SCHEMA;min=1750863361152;max=18446744073709551615;plan=0;src=[3:7519900860137418061:2152];cookie=72:2;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-25T14:56:01.156068Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710661;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-25T14:56:01.156458Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710661;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-25T14:56:01.170176Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037896;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710661; 2025-06-25T14:56:01.170236Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037897;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710661; 2025-06-25T14:56:01.170708Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710661;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-25T14:56:01.170751Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710661;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-25T14:56:01.175905Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037890;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710661; 2025-06-25T14:56:01.175912Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037892;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710661; 2025-06-25T14:56:01.176568Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710661;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-25T14:56:01.177397Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710661;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-25T14:56:01.181505Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037898;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710661; 2025-06-25T14:56:01.182146Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710661;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-25T14:56:01.182202Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037891;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710661; 2025-06-25T14:56:01.183339Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710661;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-25T14:56:01.187022Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710661; 2025-06-25T14:56:01.187713Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710661;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-25T14:56:01.188002Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037894;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710661; 2025-06-25T14:56:01.189112Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710661;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-25T14:56:01.192729Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037893;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710661; 2025-06-25T14:56:01.193687Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037895;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710661; 2025-06-25T14:56:01.422710Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037898;tx_state=TTxProgressTx::Execute;tx_current=281474976710664;tx_id=281474976710664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710664; 2025-06-25T14:56:02.076061Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519900860137417752:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:56:02.085452Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> KqpQueryService::ClosedSessionRemovedWhileActiveWithQuery [GOOD] >> KqpQueryService::CloseSessionsWithLoad ------- [TM] {asan, default-linux-x86_64, release} ydb/services/persqueue_v1/ut/unittest >> TPersQueueTest::DefaultMeteringMode [GOOD] Test command err: 2025-06-25T14:51:27.604155Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899700700766912:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:51:27.604263Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:51:27.632193Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519899700235551761:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:51:27.632232Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000b40/r3tmp/tmp795omB/pdisk_1.dat 2025-06-25T14:51:27.765170Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-25T14:51:27.778102Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-25T14:51:27.907356Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:51:27.907446Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:51:27.910991Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:51:27.922023Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T14:51:27.923248Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 17568, node 1 2025-06-25T14:51:27.978693Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:51:27.978777Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:51:27.982053Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:51:27.989684Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/yft8/000b40/r3tmp/yandexLNJ8N1.tmp 2025-06-25T14:51:27.989703Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/yft8/000b40/r3tmp/yandexLNJ8N1.tmp 2025-06-25T14:51:27.989811Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/yft8/000b40/r3tmp/yandexLNJ8N1.tmp 2025-06-25T14:51:27.989947Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:51:28.035829Z INFO: TTestServer started on Port 26720 GrpcPort 17568 TClient is connected to server localhost:26720 PQClient connected to localhost:17568 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:51:28.242534Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-25T14:51:28.281504Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... waiting... 2025-06-25T14:51:28.611573Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:51:28.639269Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:51:30.305942Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519899713120453931:2272], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:30.306009Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519899713120453909:2269], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:30.306176Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:51:30.311072Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976720657:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:51:30.329158Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519899713120453936:2273], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976720657 completed, doublechecking } 2025-06-25T14:51:30.591521Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519899713120453964:2131] txid# 281474976720658, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:51:30.619589Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:51:30.620173Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519899713585669961:2308], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:51:30.620402Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=NTBmNzliYzMtZmY3ZGNiYTUtOGYwZGY5Y2UtZGZiZDdlM2E=, ActorId: [1:7519899713585669934:2301], ActorState: ExecuteState, TraceId: 01jyks54mjd6chhvf9v4505qng, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:51:30.622460Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-25T14:51:30.623989Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7519899713120453979:2277], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:51:30.624196Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=2&id=M2RhZmVlZTEtMjA0YTk0N2EtNWJlNTA0NDAtMjU5NzI0Nw==, ActorId: [2:7519899713120453905:2268], ActorState: ExecuteState, TraceId: 01jyks54m0b938htr4y0dzdqdp, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:51:30.624527Z node 2 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-25T14:51:30.700110Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:51:30.778708Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster ... 037892] TxId 281474976715672 State CALCULATING FrontTxId 281474976715672 2025-06-25T14:56:03.038412Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:4482: [PQ: 72075186224037892] Received 1, Expected 1 2025-06-25T14:56:03.038452Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:4288: [PQ: 72075186224037892] TxId 281474976715672, NewState CALCULATED 2025-06-25T14:56:03.038488Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:4323: [PQ: 72075186224037892] TxId 281474976715672 moved from CALCULATING to CALCULATED 2025-06-25T14:56:03.038530Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:3866: [PQ: 72075186224037892] write key for TxId 281474976715672 2025-06-25T14:56:03.039050Z node 29 :PERSQUEUE DEBUG: transaction.cpp:374: [TxId: 281474976715672] save tx TxId: 281474976715672 State: CALCULATED MinStep: 1750863363004 MaxStep: 18446744073709551615 Step: 1750863363060 Predicate: true Kind: KIND_CONFIG TabletConfig { PartitionConfig { MaxCountInPartition: 2147483647 LifetimeSeconds: 86400 SourceIdLifetimeSeconds: 1382400 WriteSpeedInBytesPerSecond: 1048576 BurstSize: 1048576 TotalPartitions: 1 ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } SourceIdMaxCounts: 6000000 } PartitionIds: 0 TopicName: "ttt" Version: 0 RequireAuthWrite: true RequireAuthRead: true FormatVersion: 0 Codecs { } TopicPath: "/Root/PQ/ttt" YcCloudId: "" YcFolderId: "" YdbDatabaseId: "" YdbDatabasePath: "/Root" Partitions { PartitionId: 0 Status: Active CreateVersion: 1 TabletId: 0 } MeteringMode: METERING_MODE_REQUEST_UNITS AllPartitions { PartitionId: 0 Status: Active CreateVersion: 1 TabletId: 0 } } BootstrapConfig { } SourceActor { RawX1: 7519900831122883129 RawX2: 124554053782 } Partitions { Partition { PartitionId: 0 } } 2025-06-25T14:56:03.039237Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:3683: [PQ: 72075186224037892] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-06-25T14:56:03.046471Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:1241: [PQ: 72075186224037892] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-25T14:56:03.046537Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:4353: [PQ: 72075186224037892] Try execute txs with state CALCULATED 2025-06-25T14:56:03.046576Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:4398: [PQ: 72075186224037892] TxId 281474976715672, State CALCULATED 2025-06-25T14:56:03.046613Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:4345: [PQ: 72075186224037892] TxId 281474976715672 State CALCULATED FrontTxId 281474976715672 2025-06-25T14:56:03.046651Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:4288: [PQ: 72075186224037892] TxId 281474976715672, NewState WAIT_RS 2025-06-25T14:56:03.046695Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:4323: [PQ: 72075186224037892] TxId 281474976715672 moved from CALCULATED to WAIT_RS 2025-06-25T14:56:03.046807Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:4027: [PQ: 72075186224037892] Send TEvTxProcessing::TEvReadSet to 0 receivers. Wait TEvTxProcessing::TEvReadSet from 0 senders. 2025-06-25T14:56:03.046858Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:4521: [PQ: 72075186224037892] HaveParticipantsDecision 1 2025-06-25T14:56:03.046966Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:4288: [PQ: 72075186224037892] TxId 281474976715672, NewState EXECUTING 2025-06-25T14:56:03.047007Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:4323: [PQ: 72075186224037892] TxId 281474976715672 moved from WAIT_RS to EXECUTING 2025-06-25T14:56:03.047035Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:4551: [PQ: 72075186224037892] Received 0, Expected 1 2025-06-25T14:56:03.047132Z node 29 :PERSQUEUE DEBUG: partition.cpp:1216: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCommit Step 1750863363060, TxId 281474976715672 2025-06-25T14:56:03.047674Z node 29 :PERSQUEUE DEBUG: read.h:272: CacheProxy. Passthrough write request to KV 2025-06-25T14:56:03.051407Z node 29 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-06-25T14:56:03.051489Z node 29 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72075186224037892, Partition: 0, State: StateIdle] no data for compaction 2025-06-25T14:56:03.051554Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:3583: [PQ: 72075186224037892] Handle TEvPQ::TEvTxCommitDone Step 1750863363060, TxId 281474976715672, Partition 0 2025-06-25T14:56:03.051591Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:4353: [PQ: 72075186224037892] Try execute txs with state EXECUTING 2025-06-25T14:56:03.051623Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:4398: [PQ: 72075186224037892] TxId 281474976715672, State EXECUTING 2025-06-25T14:56:03.051658Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:4345: [PQ: 72075186224037892] TxId 281474976715672 State EXECUTING FrontTxId 281474976715672 2025-06-25T14:56:03.051682Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:4551: [PQ: 72075186224037892] Received 1, Expected 1 2025-06-25T14:56:03.051728Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:4224: [PQ: 72075186224037892] TxId: 281474976715672 send TEvPersQueue::TEvProposeTransactionResult(COMPLETE) 2025-06-25T14:56:03.051767Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:4555: [PQ: 72075186224037892] complete TxId 281474976715672 2025-06-25T14:56:03.052253Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:591: [PQ: 72075186224037892] Apply new config PartitionConfig { MaxCountInPartition: 2147483647 LifetimeSeconds: 86400 SourceIdLifetimeSeconds: 1382400 WriteSpeedInBytesPerSecond: 1048576 BurstSize: 1048576 TotalPartitions: 1 ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } SourceIdMaxCounts: 6000000 } PartitionIds: 0 TopicName: "ttt" Version: 0 RequireAuthWrite: true RequireAuthRead: true FormatVersion: 0 Codecs { } TopicPath: "/Root/PQ/ttt" YcCloudId: "" YcFolderId: "" YdbDatabaseId: "" YdbDatabasePath: "/Root" Partitions { PartitionId: 0 Status: Active CreateVersion: 1 TabletId: 0 } MeteringMode: METERING_MODE_REQUEST_UNITS AllPartitions { PartitionId: 0 Status: Active CreateVersion: 1 TabletId: 0 } 2025-06-25T14:56:03.052360Z node 29 :PERSQUEUE NOTICE: pq_impl.cpp:1129: [PQ: 72075186224037892] metering mode METERING_MODE_REQUEST_UNITS 2025-06-25T14:56:03.052523Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:4573: [PQ: 72075186224037892] delete partitions for TxId 281474976715672 2025-06-25T14:56:03.052569Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:4288: [PQ: 72075186224037892] TxId 281474976715672, NewState EXECUTED 2025-06-25T14:56:03.052620Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:4323: [PQ: 72075186224037892] TxId 281474976715672 moved from EXECUTING to EXECUTED 2025-06-25T14:56:03.052688Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:3866: [PQ: 72075186224037892] write key for TxId 281474976715672 2025-06-25T14:56:03.053161Z node 29 :PERSQUEUE DEBUG: transaction.cpp:374: [TxId: 281474976715672] save tx TxId: 281474976715672 State: EXECUTED MinStep: 1750863363004 MaxStep: 18446744073709551615 Step: 1750863363060 Predicate: true Kind: KIND_CONFIG TabletConfig { PartitionConfig { MaxCountInPartition: 2147483647 LifetimeSeconds: 86400 SourceIdLifetimeSeconds: 1382400 WriteSpeedInBytesPerSecond: 1048576 BurstSize: 1048576 TotalPartitions: 1 ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } SourceIdMaxCounts: 6000000 } PartitionIds: 0 TopicName: "ttt" Version: 0 RequireAuthWrite: true RequireAuthRead: true FormatVersion: 0 Codecs { } TopicPath: "/Root/PQ/ttt" YcCloudId: "" YcFolderId: "" YdbDatabaseId: "" YdbDatabasePath: "/Root" Partitions { PartitionId: 0 Status: Active CreateVersion: 1 TabletId: 0 } MeteringMode: METERING_MODE_REQUEST_UNITS AllPartitions { PartitionId: 0 Status: Active CreateVersion: 1 TabletId: 0 } } BootstrapConfig { } SourceActor { RawX1: 7519900831122883129 RawX2: 124554053782 } Partitions { Partition { PartitionId: 0 } } 2025-06-25T14:56:03.053509Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:3683: [PQ: 72075186224037892] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-06-25T14:56:03.065189Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:1241: [PQ: 72075186224037892] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-25T14:56:03.065254Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:4353: [PQ: 72075186224037892] Try execute txs with state EXECUTED 2025-06-25T14:56:03.065291Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:4398: [PQ: 72075186224037892] TxId 281474976715672, State EXECUTED 2025-06-25T14:56:03.065327Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:4345: [PQ: 72075186224037892] TxId 281474976715672 State EXECUTED FrontTxId 281474976715672 2025-06-25T14:56:03.065362Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:4046: [PQ: 72075186224037892] TPersQueue::SendEvReadSetAckToSenders 2025-06-25T14:56:03.065399Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:4288: [PQ: 72075186224037892] TxId 281474976715672, NewState WAIT_RS_ACKS 2025-06-25T14:56:03.065432Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:4323: [PQ: 72075186224037892] TxId 281474976715672 moved from EXECUTED to WAIT_RS_ACKS 2025-06-25T14:56:03.065491Z node 29 :PERSQUEUE DEBUG: transaction.cpp:366: [TxId: 281474976715672] PredicateAcks: 0/0 2025-06-25T14:56:03.065509Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:4599: [PQ: 72075186224037892] HaveAllRecipientsReceive 1, AllSupportivePartitionsHaveBeenDeleted 1 2025-06-25T14:56:03.065536Z node 29 :PERSQUEUE DEBUG: transaction.cpp:366: [TxId: 281474976715672] PredicateAcks: 0/0 2025-06-25T14:56:03.065573Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:4660: [PQ: 72075186224037892] add an TxId 281474976715672 to the list for deletion 2025-06-25T14:56:03.065615Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:4288: [PQ: 72075186224037892] TxId 281474976715672, NewState DELETING 2025-06-25T14:56:03.065664Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:3882: [PQ: 72075186224037892] delete key for TxId 281474976715672 2025-06-25T14:56:03.065762Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:3683: [PQ: 72075186224037892] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-06-25T14:56:03.070870Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:1241: [PQ: 72075186224037892] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-25T14:56:03.071012Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:4353: [PQ: 72075186224037892] Try execute txs with state DELETING 2025-06-25T14:56:03.071048Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:4398: [PQ: 72075186224037892] TxId 281474976715672, State DELETING 2025-06-25T14:56:03.071085Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:4610: [PQ: 72075186224037892] delete TxId 281474976715672 2025-06-25T14:56:03.087512Z node 29 :PQ_READ_PROXY ERROR: grpc_pq_schema.cpp:148: new Describe topic request 2025-06-25T14:56:03.087627Z node 29 :PQ_READ_PROXY DEBUG: schema_actors.cpp:1186: Describe topic actor for path /Root/PQ/ttt >> KqpQueryServiceScripts::TestTruncatedBySize [GOOD] >> KqpQueryServiceScripts::ForgetScriptExecution [GOOD] >> KqpQueryService::TableSink_HtapComplex+withOltpSink [GOOD] >> KqpQueryService::TableSink_HtapComplex-withOltpSink >> KqpQueryService::ReadManyShardsRange [GOOD] >> KqpQueryService::ReadManyRangesAndPoints >> KqpQueryServiceScripts::ExecuteScriptPg [GOOD] >> KqpService::SwitchCache-UseCache [GOOD] >> KqpService::ToDictCache+UseCache >> KqpQueryService::ExecuteQueryScalar [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/service/unittest >> KqpQueryServiceScripts::TestTruncatedBySize [GOOD] Test command err: Trying to start YDB, gRPC: 13641, MsgBus: 4447 2025-06-25T14:55:19.333830Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900697913094104:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:19.333873Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00183f/r3tmp/tmpHvPMmy/pdisk_1.dat 2025-06-25T14:55:19.786518Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:55:19.787148Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519900697913094084:2080] 1750863319332770 != 1750863319332773 TServer::EnableGrpc on GrpcPort 13641, node 1 2025-06-25T14:55:19.804822Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:55:19.804910Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:55:19.812594Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:55:19.907787Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:55:19.907811Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:55:19.907817Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:55:19.907953Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4447 TClient is connected to server localhost:4447 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-25T14:55:20.382182Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:55:20.462082Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:55:20.481258Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:55:20.617023Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:20.781253Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:20.859925Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:22.290250Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900710797997607:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:22.290362Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:22.613941Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:22.659610Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:22.691091Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:22.724475Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:22.794581Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:22.831718Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:22.881276Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:22.971923Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900710797998273:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:22.971998Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:22.972226Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900710797998278:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:22.975342Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:55:22.984644Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519900710797998280:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:55:23.066605Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519900715092965627:3422] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:55:24.333918Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519900697913094104:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:24.334236Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 7960, MsgBus: 3137 2025-06-25T14:55:24.926613Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519900717952234902:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:24.926680Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # ... for subscription [3:7519900846337521914:2080] 1750863354322134 != 1750863354322137 2025-06-25T14:55:54.517449Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 16896, node 3 2025-06-25T14:55:54.572900Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:55:54.572939Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:55:54.572950Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:55:54.573102Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27469 TClient is connected to server localhost:27469 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:55:55.218825Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:55:55.230104Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:55.303619Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:55.366496Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:55:55.500828Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:55.589327Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:58.550044Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519900863517392758:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:58.550130Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:58.610479Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:58.683539Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:58.720596Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:58.757870Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:58.831862Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:58.868066Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:58.939923Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:59.067469Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519900867812360717:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:59.067574Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:59.067689Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519900867812360722:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:59.071149Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:55:59.084902Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-25T14:55:59.085308Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519900867812360724:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:55:59.170613Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519900867812360775:3422] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:55:59.322973Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519900846337521932:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:59.323043Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:56:00.627758Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:00.629840Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:00.632299Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:05.791849Z node 3 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863365825, txId: 281474976715746] shutting down ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/service/unittest >> KqpQueryServiceScripts::ForgetScriptExecution [GOOD] Test command err: Trying to start YDB, gRPC: 9408, MsgBus: 29536 2025-06-25T14:55:34.337821Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900763495784817:2234];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:34.337861Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017fe/r3tmp/tmp5ZOIpe/pdisk_1.dat 2025-06-25T14:55:34.696345Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519900763495784609:2080] 1750863334301829 != 1750863334301832 2025-06-25T14:55:34.706890Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:55:34.738766Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:55:34.738887Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 9408, node 1 2025-06-25T14:55:34.740244Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:55:34.824769Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:55:34.824788Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:55:34.824794Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:55:34.824882Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:29536 TClient is connected to server localhost:29536 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-25T14:55:35.338954Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:55:35.377006Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:55:35.398353Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:55:35.403494Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:35.566966Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:35.706108Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:35.768122Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:37.196208Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900776380688149:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:37.196303Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:37.510235Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:37.536055Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:37.562714Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:37.593617Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:37.623173Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:37.659769Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:37.731814Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:37.792001Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900776380688808:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:37.792080Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:37.792099Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900776380688813:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:37.795656Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:55:37.807208Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519900776380688815:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:55:37.900207Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519900776380688866:3418] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:55:38.870175Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:38.871530Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ ... 68897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:55:49.429772Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 62190, node 3 2025-06-25T14:55:49.504795Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:55:49.504824Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:55:49.504833Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:55:49.504942Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13183 TClient is connected to server localhost:13183 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-25T14:55:50.182019Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:55:50.187683Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:55:50.200433Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:50.222481Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:55:50.277102Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:50.451091Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:50.531200Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:52.571834Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519900838397207869:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:52.571942Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:52.649049Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:52.720025Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:52.752749Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:52.795370Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:52.826017Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:52.904855Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:52.946704Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:53.045742Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519900842692175832:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:53.045844Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:53.046098Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519900842692175837:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:53.049697Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:55:53.064527Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519900842692175839:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:55:53.138714Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519900842692175890:3415] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:55:54.101671Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:54.103510Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:54.105245Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:54.208500Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519900825512304370:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:54.208595Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:56:04.397310Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7382: Cannot get console configs 2025-06-25T14:56:04.397343Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded >> TestYmqHttpProxy::TestCreateQueueWithSameNameAndSameParams >> TestYmqHttpProxy::TestSendMessage >> TestYmqHttpProxy::TestGetQueueUrl >> TestKinesisHttpProxy::DifferentContentTypes >> TestYmqHttpProxy::TestCreateQueue ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/service/unittest >> KqpQueryServiceScripts::ExecuteScriptPg [GOOD] Test command err: Trying to start YDB, gRPC: 4324, MsgBus: 10193 2025-06-25T14:55:42.428957Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900796808669610:2128];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:42.428997Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017e7/r3tmp/tmpRoiyQ8/pdisk_1.dat 2025-06-25T14:55:42.823152Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519900796808669522:2080] 1750863342413188 != 1750863342413191 2025-06-25T14:55:42.832731Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:55:42.835102Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:55:42.835193Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:55:42.842864Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 4324, node 1 2025-06-25T14:55:42.931380Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:55:42.931405Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:55:42.931411Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:55:42.931501Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10193 TClient is connected to server localhost:10193 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-25T14:55:43.454701Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:55:43.535399Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:55:43.560554Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:55:43.576011Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:43.706867Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:43.837337Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:43.899805Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:45.631254Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900809693573049:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:45.631367Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:45.902495Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:45.928031Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:45.960036Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:45.993839Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:46.022921Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:46.056701Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:46.099565Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:46.159698Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900813988541000:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:46.159773Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:46.159973Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900813988541005:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:46.163980Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:55:46.178555Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519900813988541007:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:55:46.243132Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519900813988541058:3415] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:55:47.139026Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:47.144389Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ ... wn -> Disconnected 2025-06-25T14:55:59.416578Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:55:59.416952Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:55:59.429871Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 6835, node 3 2025-06-25T14:55:59.552692Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:55:59.552716Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:55:59.552725Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:55:59.552857Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8055 TClient is connected to server localhost:8055 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:56:00.066495Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-25T14:56:00.085620Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:00.153387Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:00.275409Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... 2025-06-25T14:56:00.331715Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:00.406287Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:02.752532Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519900881080861730:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:02.752618Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:02.796013Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:02.829899Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:02.901573Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:02.943452Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:03.020296Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:03.060186Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:03.104343Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:03.167554Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519900885375829686:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:03.167635Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:03.168066Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519900885375829691:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:03.172553Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:56:03.183334Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519900885375829693:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:56:03.274717Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519900885375829744:3416] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:56:04.244494Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519900868195958249:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:56:04.244556Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:56:04.365684Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:04.367669Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:04.374908Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:06.775787Z node 3 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863366812, txId: 281474976715705] shutting down ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/service/unittest >> KqpQueryService::ExecuteQueryScalar [GOOD] Test command err: Trying to start YDB, gRPC: 15761, MsgBus: 29183 2025-06-25T14:55:40.984386Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900786162750521:2056];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:40.984477Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017f5/r3tmp/tmpjHBSUr/pdisk_1.dat 2025-06-25T14:55:41.299086Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519900786162750506:2080] 1750863340983652 != 1750863340983655 2025-06-25T14:55:41.309007Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 15761, node 1 2025-06-25T14:55:41.393946Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:55:41.394190Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:55:41.413422Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:55:41.444667Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:55:41.444682Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:55:41.444686Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:55:41.444761Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:29183 TClient is connected to server localhost:29183 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:55:41.944373Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:55:41.957686Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:55:41.974862Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:42.047105Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:55:42.138592Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:42.298280Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:55:42.382488Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:44.284836Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900803342621318:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:44.284932Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:44.601914Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:44.631783Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:44.655336Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:44.684184Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:44.714611Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:44.769364Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:44.849999Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:44.915039Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900803342621977:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:44.915154Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:44.915651Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900803342621982:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:44.919203Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:55:44.934977Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519900803342621984:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:55:45.022455Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519900807637589333:3420] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:55:45.984474Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519900786162750521:2056];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:45.984567Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:55:46.122220Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/ ... 19900864847988582:3413] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:55:59.996109Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519900847668117114:2079];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:59.996168Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 18799, MsgBus: 24187 2025-06-25T14:56:00.928697Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519900874458823830:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:56:00.928752Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017f5/r3tmp/tmplwYMSs/pdisk_1.dat 2025-06-25T14:56:01.135384Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:56:01.149718Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:56:01.149813Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:56:01.152206Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 18799, node 3 2025-06-25T14:56:01.279521Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:56:01.279556Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:56:01.279564Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:56:01.279679Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24187 TClient is connected to server localhost:24187 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-25T14:56:01.969230Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-25T14:56:01.985791Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:56:02.003281Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:02.087147Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:02.260371Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:02.346434Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:04.941174Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519900891638694627:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:04.941279Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:05.014367Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:05.060401Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:05.098786Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:05.164248Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:05.210944Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:05.287457Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:05.360501Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:05.431779Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519900895933662590:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:05.431853Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:05.432176Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519900895933662595:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:05.436018Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:56:05.448168Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519900895933662597:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:56:05.507246Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519900895933662648:3417] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:56:05.929845Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519900874458823830:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:56:05.929942Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> KqpQueryService::ShowCreateTableDisable [GOOD] >> KqpQueryService::ShowCreateSysView >> KqpQueryService::DdlSecret [GOOD] >> KqpQueryService::DdlMixedDml >> KqpQueryService::DdlTx [GOOD] >> KqpQueryService::DdlWithExplicitTransaction >> KqpQueryServiceScripts::ExecuteScriptStatsProfile [GOOD] >> KqpQueryServiceScripts::ExecuteScriptWithCancelAfter >> KqpQueryService::Explain [GOOD] >> KqpQueryService::Ddl [GOOD] >> KqpQueryService::DdlColumnTable >> KqpQueryServiceScripts::ListScriptExecutions [GOOD] >> KqpQueryServiceScripts::Tcl >> KqpQueryService::ReadManyRangesAndPoints [GOOD] >> KqpQueryService::TableSink_OlapDelete [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/service/unittest >> KqpQueryService::Explain [GOOD] Test command err: Trying to start YDB, gRPC: 24578, MsgBus: 22227 2025-06-25T14:55:52.856252Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900839078159064:2142];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:52.856755Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017b9/r3tmp/tmpSVs38h/pdisk_1.dat 2025-06-25T14:55:53.257724Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:55:53.257827Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:55:53.279737Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 24578, node 1 2025-06-25T14:55:53.300862Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:55:53.362709Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:55:53.362732Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:55:53.362742Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:55:53.362857Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:22227 TClient is connected to server localhost:22227 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:55:53.820664Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:55:53.856207Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... 2025-06-25T14:55:53.863372Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:55:53.867391Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:53.999270Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:54.139894Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:54.224384Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:55.864109Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900851963062482:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:55.864342Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:56.160741Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:56.234855Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:56.270282Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:56.302310Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:56.380748Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:56.459427Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:56.513808Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:56.595132Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900856258030444:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:56.595243Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:56.595579Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900856258030449:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:56.599390Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:55:56.609603Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519900856258030451:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:55:56.705736Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519900856258030502:3420] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:55:57.855185Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519900839078159064:2142];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:57.855242Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 17068, MsgBus: 28352 2025-06-25T14:55:59.249404Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519900867833448515:2132];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:59.249505Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/run ... 94046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:56:03.833308Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:04.250706Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519900867833448515:2132];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:56:04.250763Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 10 Trying to start YDB, gRPC: 29936, MsgBus: 20456 2025-06-25T14:56:05.300199Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519900893951583707:2237];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017b9/r3tmp/tmp8qHNWY/pdisk_1.dat 2025-06-25T14:56:05.318867Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:56:05.398464Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:56:05.405422Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519900893951583494:2080] 1750863365240007 != 1750863365240010 TServer::EnableGrpc on GrpcPort 29936, node 3 2025-06-25T14:56:05.412164Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:56:05.412225Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:56:05.413862Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:56:05.450624Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:56:05.450644Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:56:05.450650Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:56:05.450756Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:20456 TClient is connected to server localhost:20456 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:56:05.943857Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:56:05.961748Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:06.040782Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:06.176727Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:06.262513Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... 2025-06-25T14:56:06.276618Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:08.399724Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519900906836487009:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:08.399827Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:08.458657Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:08.489257Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:08.521085Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:08.553948Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:08.591435Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:08.660920Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:08.732436Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:08.795800Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519900906836487674:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:08.795923Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:08.796160Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519900906836487679:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:08.800083Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:56:08.811668Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519900906836487681:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:56:08.900794Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519900906836487732:3415] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/service/unittest >> KqpQueryService::ReadManyRangesAndPoints [GOOD] Test command err: Trying to start YDB, gRPC: 5397, MsgBus: 24785 2025-06-25T14:55:57.752371Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900858856561392:2223];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:57.753165Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017a8/r3tmp/tmpgvN39h/pdisk_1.dat 2025-06-25T14:55:58.195166Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:55:58.195272Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:55:58.198047Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:55:58.222831Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:55:58.224444Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519900858856561202:2080] 1750863357699351 != 1750863357699354 TServer::EnableGrpc on GrpcPort 5397, node 1 2025-06-25T14:55:58.274777Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:55:58.274797Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:55:58.274803Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:55:58.274884Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24785 TClient is connected to server localhost:24785 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-25T14:55:58.725713Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:55:58.756366Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:56:00.712300Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900871741463728:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:00.712450Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:00.983959Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:01.281431Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900876036431592:2338], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:01.281514Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:01.281714Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900876036431597:2341], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:01.285457Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:56:01.300494Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519900876036431599:2342], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-25T14:56:01.363913Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519900876036431650:2683] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:56:02.730402Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519900858856561392:2223];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:56:02.730461Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 21403, MsgBus: 8756 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017a8/r3tmp/tmpOzLOry/pdisk_1.dat 2025-06-25T14:56:03.472283Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:56:03.541844Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:56:03.542242Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519900886374603918:2080] 1750863363338414 != 1750863363338417 2025-06-25T14:56:03.552344Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:56:03.552416Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:56:03.554120Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 21403, node 2 2025-06-25T14:56:03.612705Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:56:03.612720Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:56:03.612725Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:56:03.612798Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8756 TClient is connected to server localhost:8756 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:56:04.042356Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:56:04.049213Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:56:04.380038Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:56:06.205324Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519900899259506442:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:06.205388Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:06.231665Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:06.320569Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519900899259506664:2310], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:06.320631Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:06.320797Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519900899259506669:2313], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:06.323928Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:56:06.332338Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519900899259506671:2314], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-25T14:56:06.433999Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519900899259506722:2466] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 26795, MsgBus: 18157 2025-06-25T14:56:07.620270Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519900905307238463:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:56:07.620366Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017a8/r3tmp/tmpoS8K7x/pdisk_1.dat 2025-06-25T14:56:07.746989Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:56:07.749025Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519900905307238442:2080] 1750863367619650 != 1750863367619653 TServer::EnableGrpc on GrpcPort 26795, node 3 2025-06-25T14:56:07.770343Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:56:07.770444Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:56:07.772533Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:56:07.822222Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:56:07.822246Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:56:07.822255Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:56:07.822401Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:18157 TClient is connected to server localhost:18157 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:56:08.334493Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:56:08.629671Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:56:10.884473Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519900918192140964:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:10.884581Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:10.915217Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:11.094138Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519900922487108699:2329], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:11.094230Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:11.094462Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519900922487108704:2332], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:11.099087Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:56:11.111011Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519900922487108706:2333], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-25T14:56:11.211691Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519900922487108757:2611] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/service/unittest >> KqpQueryService::TableSink_OlapDelete [GOOD] Test command err: Trying to start YDB, gRPC: 13397, MsgBus: 10027 2025-06-25T14:55:51.150002Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900832479768877:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:51.150118Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017c4/r3tmp/tmpqOIXun/pdisk_1.dat 2025-06-25T14:55:51.592945Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519900832479768858:2080] 1750863351149064 != 1750863351149067 2025-06-25T14:55:51.603134Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 13397, node 1 2025-06-25T14:55:51.637879Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:55:51.637957Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:55:51.640114Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:55:51.664806Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:55:51.664833Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:55:51.664839Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:55:51.664927Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10027 TClient is connected to server localhost:10027 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:55:52.164462Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:55:52.172908Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:55:54.036950Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900845364671388:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:54.037064Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:54.357903Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T14:55:54.534715Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519900845364671579:2306];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:55:54.534896Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519900845364671579:2306];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:55:54.535081Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519900845364671579:2306];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:55:54.535178Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519900845364671579:2306];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:55:54.535277Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519900845364671579:2306];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:55:54.535372Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519900845364671579:2306];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:55:54.535480Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519900845364671579:2306];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:55:54.535594Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519900845364671579:2306];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:55:54.535686Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519900845364671579:2306];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:55:54.535775Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519900845364671579:2306];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:55:54.535874Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7519900845364671579:2306];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:55:54.559135Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519900845364671542:2300];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:55:54.559218Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519900845364671542:2300];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:55:54.559420Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519900845364671542:2300];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:55:54.559539Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519900845364671542:2300];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:55:54.559625Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519900845364671542:2300];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:55:54.559713Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519900845364671542:2300];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:55:54.559809Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519900845364671542:2300];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:55:54.559898Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519900845364671542:2300];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:55:54.559991Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519900845364671542:2300];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:55:54.560092Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519900845364671542:2300];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:55:54.560237Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7519900845364671542:2300];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:55:54.570354Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519900845364671544:2302];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:55:54.570412Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519900845364671544:2302];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:55:54.570594Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519900845364671544:2302];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:55:54.570685Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519900845364671544:2302];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register ... [TPoolFetcherActor] ActorId: [3:7519900899994201747:2354], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:06.764931Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:56:06.774204Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519900899994201749:2355], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-25T14:56:06.870216Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519900899994201800:2571] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:56:07.050425Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037897;tx_state=TTxProgressTx::Execute;tx_current=281474976715664;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; 2025-06-25T14:56:07.050483Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037894;tx_state=TTxProgressTx::Execute;tx_current=281474976715664;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; 2025-06-25T14:56:07.051483Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037890;tx_state=TTxProgressTx::Execute;tx_current=281474976715664;tx_id=281474976715664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715664; 2025-06-25T14:56:07.904803Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519900882814331477:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:56:07.905769Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:56:07.942520Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037897;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715667; 2025-06-25T14:56:07.943098Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037897;local_tx_no=27;method=complete;tx_info=281474976715667;fline=primary.h:101;event=repeated shard broken_flag info;shard_id=72075186224037893;reason=absent operator; 2025-06-25T14:56:07.943395Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715667; 2025-06-25T14:56:07.943537Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037896;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715667; 2025-06-25T14:56:07.943780Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715667; 2025-06-25T14:56:07.943964Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037890;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715667; 2025-06-25T14:56:07.944176Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037894;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715667; 2025-06-25T14:56:07.944431Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037895;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715667; 2025-06-25T14:56:07.944591Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037891;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715667; 2025-06-25T14:56:07.944764Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037892;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715667; 2025-06-25T14:56:10.680623Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[3:7519900899994201428:2304];ev=NActors::TEvents::TEvWakeup;fline=sync.h:19;event=tx_timeout;lock=281474976715666;tx_id=281474976715667;d=2.001384s; 2025-06-25T14:56:10.684406Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037893;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715667; 2025-06-25T14:56:11.242879Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[3:7519900899994201400:2297];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037888;event=TEvWrite;fline=manager.cpp:116;event=abort;tx_id=281474976715669;problem=finished; 2025-06-25T14:56:11.242944Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[3:7519900899994201400:2297];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037888;event=TEvWrite;fline=manager.cpp:134;event=abort;tx_id=281474976715669;problem=finished; 2025-06-25T14:56:11.243048Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[3:7519900899994201392:2296];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037889;event=TEvWrite;fline=manager.cpp:116;event=abort;tx_id=281474976715669;problem=finished; 2025-06-25T14:56:11.243069Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[3:7519900899994201392:2296];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037889;event=TEvWrite;fline=manager.cpp:134;event=abort;tx_id=281474976715669;problem=finished; 2025-06-25T14:56:11.243145Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[3:7519900899994201408:2300];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037890;event=TEvWrite;fline=manager.cpp:116;event=abort;tx_id=281474976715669;problem=finished; 2025-06-25T14:56:11.243169Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[3:7519900899994201408:2300];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037890;event=TEvWrite;fline=manager.cpp:134;event=abort;tx_id=281474976715669;problem=finished; 2025-06-25T14:56:11.243232Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[3:7519900899994201409:2301];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037891;event=TEvWrite;fline=manager.cpp:116;event=abort;tx_id=281474976715669;problem=finished; 2025-06-25T14:56:11.243249Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[3:7519900899994201409:2301];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037891;event=TEvWrite;fline=manager.cpp:134;event=abort;tx_id=281474976715669;problem=finished; 2025-06-25T14:56:11.243302Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[3:7519900899994201429:2305];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037892;event=TEvWrite;fline=manager.cpp:116;event=abort;tx_id=281474976715669;problem=finished; 2025-06-25T14:56:11.243319Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[3:7519900899994201429:2305];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037892;event=TEvWrite;fline=manager.cpp:134;event=abort;tx_id=281474976715669;problem=finished; 2025-06-25T14:56:11.243382Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[3:7519900899994201428:2304];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037893;event=TEvWrite;fline=manager.cpp:116;event=abort;tx_id=281474976715669;problem=finished; 2025-06-25T14:56:11.243403Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[3:7519900899994201428:2304];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037893;event=TEvWrite;fline=manager.cpp:134;event=abort;tx_id=281474976715669;problem=finished; 2025-06-25T14:56:11.243467Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[3:7519900899994201426:2302];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037894;event=TEvWrite;fline=manager.cpp:116;event=abort;tx_id=281474976715669;problem=finished; 2025-06-25T14:56:11.243501Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[3:7519900899994201426:2302];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037894;event=TEvWrite;fline=manager.cpp:134;event=abort;tx_id=281474976715669;problem=finished; 2025-06-25T14:56:11.243571Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[3:7519900899994201406:2298];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037895;event=TEvWrite;fline=manager.cpp:116;event=abort;tx_id=281474976715669;problem=finished; 2025-06-25T14:56:11.243597Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[3:7519900899994201406:2298];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037895;event=TEvWrite;fline=manager.cpp:134;event=abort;tx_id=281474976715669;problem=finished; 2025-06-25T14:56:11.243652Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[3:7519900899994201407:2299];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037896;event=TEvWrite;fline=manager.cpp:116;event=abort;tx_id=281474976715669;problem=finished; 2025-06-25T14:56:11.243671Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[3:7519900899994201407:2299];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037896;event=TEvWrite;fline=manager.cpp:134;event=abort;tx_id=281474976715669;problem=finished; 2025-06-25T14:56:11.243724Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[3:7519900899994201427:2303];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037897;event=TEvWrite;fline=manager.cpp:116;event=abort;tx_id=281474976715669;problem=finished; 2025-06-25T14:56:11.243742Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[3:7519900899994201427:2303];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037897;event=TEvWrite;fline=manager.cpp:134;event=abort;tx_id=281474976715669;problem=finished; 2025-06-25T14:56:11.353133Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037890;tx_state=TTxProgressTx::Execute;tx_current=281474976715672;tx_id=281474976715672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715672; 2025-06-25T14:56:11.354085Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037890;tx_state=TTxProgressTx::Complete;commit_tx_id=281474976715672;commit_lock_id=281474976715671;fline=manager.cpp:94;broken_lock_id=281474976715669; >> KqpQueryService::TableSink_Htap+withOltpSink [GOOD] >> KqpQueryService::TableSink_Htap-withOltpSink >> KqpQueryService::DdlWithExplicitTransaction [GOOD] >> KqpQueryService::Ddl_Dml >> KqpQueryService::ShowCreateSysView [GOOD] >> KqpQueryService::TableSink_OlapOrder [GOOD] >> KqpQueryService::TableSink_OlapRWQueries >> TestYmqHttpProxy::TestCreateQueue [GOOD] >> TestYmqHttpProxy::TestCreateQueueWithSameNameAndSameParams [GOOD] >> TestYmqHttpProxy::TestGetQueueUrl [GOOD] >> TestYmqHttpProxy::TestSendMessage [GOOD] >> TestKinesisHttpProxy::DifferentContentTypes [GOOD] >> TestYmqHttpProxy::TestCreateQueueWithBadQueueName >> TestYmqHttpProxy::TestCreateQueueWithSameNameAndDifferentParams >> TestYmqHttpProxy::TestGetQueueUrlOfNotExistingQueue >> TestYmqHttpProxy::TestReceiveMessage >> TestKinesisHttpProxy::GoodRequestPutRecords >> TPersQueueTest::DisableDeduplication [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/service/unittest >> KqpQueryService::ShowCreateSysView [GOOD] Test command err: Trying to start YDB, gRPC: 5458, MsgBus: 20961 2025-06-25T14:55:58.412539Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900864304932414:2147];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:58.412717Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001793/r3tmp/tmpbMRN9K/pdisk_1.dat 2025-06-25T14:55:58.726449Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 5458, node 1 2025-06-25T14:55:58.818371Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:55:58.818507Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:55:58.818716Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:55:58.818742Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:55:58.818773Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:55:58.818918Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:55:58.819518Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:20961 TClient is connected to server localhost:20961 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:55:59.384391Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:55:59.397185Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:55:59.413207Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:55:59.416284Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:59.548737Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:59.689517Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:59.750681Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:01.321128Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900877189835808:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:01.321216Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:01.660726Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:01.695333Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:01.725974Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:01.757515Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:01.811192Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:01.882196Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:01.918628Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:01.992437Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900877189836468:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:01.992533Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:01.992877Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900877189836473:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:01.998732Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:56:02.011295Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519900877189836475:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:56:02.103925Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519900881484803822:3418] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:56:03.105186Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:03.409211Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519900864304932414:2147];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:56:03.409295Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 62479, MsgBus: 26160 2025-06-25T14:56:04.042541Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: f ... ET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:56:09.521046Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19058 TClient is connected to server localhost:19058 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:56:10.150107Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:56:10.159583Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:56:10.167134Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:10.227910Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:56:10.255564Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:10.426850Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:10.525496Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:12.929485Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519900924709766638:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:12.929560Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:12.988661Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:13.039048Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:13.095091Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:13.165528Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:13.214214Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:13.258701Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:13.292830Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:13.387392Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519900929004734593:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:13.387481Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:13.387514Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519900929004734598:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:13.390470Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:56:13.404624Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519900929004734600:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:56:13.495171Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519900929004734651:3415] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:56:14.218113Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519900911824863146:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:56:14.218181Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:56:14.532494Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:14.622403Z node 3 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [3:7519900933299702303:2488], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:2:17: Error: At function: KiReadTable!
:2:17: Error: Cannot find table 'db.[/Root/.sys/show_create]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:56:14.622722Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=3&id=ZjI5OGJlYS0xMTllYzVkYi1mZTQwMDI1ZC1mZGMzZjU5Mw==, ActorId: [3:7519900933299702218:2474], ActorState: ExecuteState, TraceId: 01jyksdt7k6g1gc8zqy33n19x8, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:56:14.670995Z node 3 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [3:7519900933299702316:2491], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:2:17: Error: At function: KiReadTable!
:2:17: Error: Cannot find table 'db.[/Root/.sys/show_create]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:56:14.672492Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=3&id=ZjI5OGJlYS0xMTllYzVkYi1mZTQwMDI1ZC1mZGMzZjU5Mw==, ActorId: [3:7519900933299702218:2474], ActorState: ExecuteState, TraceId: 01jyksdt9d3vkw516x4c3vza6s, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: >> KqpQueryService::DdlMixedDml [GOOD] >> KqpQueryService::TableSink_HtapComplex-withOltpSink [GOOD] >> KqpQueryService::TableSink_HtapInteractive+withOltpSink >> TPersQueueTest::TestWriteSessionsConflicts [GOOD] >> TPersQueueTest::TestReadRuleServiceTypePassword |88.5%| [TM] {asan, default-linux-x86_64, release} ydb/library/ycloud/impl/ut/unittest |88.5%| [TM] {asan, default-linux-x86_64, release} ydb/library/ycloud/impl/ut/unittest >> TAccessServiceTest::PassRequestId >> FolderServiceTest::TFolderServiceAdapter >> TAccessServiceTest::Authenticate ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/service/unittest >> KqpQueryService::DdlMixedDml [GOOD] Test command err: Trying to start YDB, gRPC: 15817, MsgBus: 26754 2025-06-25T14:55:26.525632Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900728606637841:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:26.525823Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001823/r3tmp/tmpy8mwYf/pdisk_1.dat 2025-06-25T14:55:27.045544Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:55:27.049306Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:55:27.049410Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:55:27.051828Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 15817, node 1 2025-06-25T14:55:27.156771Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:55:27.156798Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:55:27.156808Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:55:27.156945Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26754 2025-06-25T14:55:27.538706Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:26754 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:55:27.811593Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:55:27.848008Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:27.989062Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:55:28.160339Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:28.241103Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:29.993399Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900741491541320:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:29.993500Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:30.363509Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:30.401450Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:30.453738Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:30.491963Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:30.534483Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:30.617467Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:30.657430Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:30.724417Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900745786509278:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:30.724520Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:30.724730Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900745786509283:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:30.728883Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:55:30.741495Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519900745786509285:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:55:30.800947Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519900745786509336:3419] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:55:31.526078Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519900728606637841:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:31.526147Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:55:31.901807Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519900750081476927:3604] txid# 281474976710673, issues: { message: "Group already exists" severity: 1 } 2025-06-25T14:55:31.916543Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=1&id=YTI5YTBiOGQtYzIxNjZlMGItN2VkZmFhODItOWE2Y2MzYjg=, ActorId: [1:7519900750081476921:2480], ActorState: ExecuteState, TraceId: 01jykscgh03cn37y0gmvst8wke, Create QueryResponse for error on request, msg: 2025-06-25T14:55:32.043935Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519900754376444270:3627] txid# 281474976710677, issues: { message: "Group not found" severity: 1 } 2025-06-25T14:55:32.044162Z node 1 : ... ion=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001823/r3tmp/tmpHI0wjD/pdisk_1.dat 2025-06-25T14:56:09.505547Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:56:09.528503Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7519900910098862162:2080] 1750863369296075 != 1750863369296078 2025-06-25T14:56:09.530035Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:56:09.530132Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:56:09.537332Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 18542, node 4 2025-06-25T14:56:09.625910Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:56:09.625943Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:56:09.625953Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:56:09.626127Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:64776 2025-06-25T14:56:10.302156Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:64776 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:56:10.441963Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:56:10.456650Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:56:10.465381Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:10.558664Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:10.852973Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:10.950648Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:13.715735Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519900927278732989:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:13.715878Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:13.789636Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:13.830005Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:13.864304Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:13.907879Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:13.950004Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:14.035019Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:14.114202Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:14.232741Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519900931573700952:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:14.232826Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:14.233128Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519900931573700957:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:14.237256Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:56:14.256370Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519900931573700959:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:56:14.300443Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519900910098862183:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:56:14.300529Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:56:14.334165Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519900931573701012:3424] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:56:15.936078Z node 4 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [4:7519900935868668600:2481], status: GENERIC_ERROR, issues:
: Error: Optimization, code: 1070
:8:25: Error: Queries with mixed data and scheme operations are not supported. Use separate queries for different types of operations., code: 2009 2025-06-25T14:56:15.936681Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=4&id=MTUxMzU4MDktYWJmNGZlNDgtNTRmODU5MjUtZGQzNWI4ODg=, ActorId: [4:7519900935868668593:2477], ActorState: ExecuteState, TraceId: 01jyksdvgpe6zvj40jq7dzk62g, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: >> TUserAccountServiceTest::Get >> KqpQueryServiceScripts::ExecuteScriptWithResultsTtlAndForgetAfter [GOOD] >> KqpNamedExpressions::NamedExpressionRandomInsertDataQuery+UseSink [GOOD] >> KqpNamedExpressions::NamedExpressionRandomInsertDataQuery-UseSink >> KqpQueryServiceScripts::Tcl [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/service/unittest >> KqpQueryServiceScripts::ExecuteScriptWithResultsTtlAndForgetAfter [GOOD] Test command err: Trying to start YDB, gRPC: 21441, MsgBus: 27450 2025-06-25T14:55:33.792797Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900757565104375:2229];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:33.794437Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001800/r3tmp/tmpXD9d0W/pdisk_1.dat 2025-06-25T14:55:34.266775Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 21441, node 1 2025-06-25T14:55:34.272597Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:55:34.272760Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:55:34.274760Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:55:34.353619Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:55:34.353637Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:55:34.353644Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:55:34.353753Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27450 2025-06-25T14:55:34.748615Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:27450 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:55:34.909023Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:55:34.935719Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:35.067626Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:55:35.224960Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:35.290256Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:37.003264Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900770450007715:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:37.003357Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:37.361066Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:37.398699Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:37.447416Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:37.522600Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:37.593775Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:37.668672Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:37.709168Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:37.803484Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900774744975680:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:37.803611Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:37.803801Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900774744975685:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:37.807496Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:55:37.816921Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519900774744975687:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:55:37.915380Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519900774744975740:3422] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:55:38.789600Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519900757565104375:2229];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:38.789669Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 32765, MsgBus: 13808 2025-06-25T14:55:39.889679Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519900781893123034:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:39.889796Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001800/r3tmp/tmpTWIOqd/pdisk_1.dat 2025-06-25T14:55:40.058527Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Un ... : ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:56:07.683564Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:56:07.688524Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:56:07.698848Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:07.770022Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:07.899790Z node 5 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:56:07.952601Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:08.032626Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:10.999632Z node 5 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [5:7519900914452073810:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:10.999752Z node 5 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:11.055977Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:11.118816Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:11.159802Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:11.235441Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:11.309630Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:11.385825Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:11.436847Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:11.545352Z node 5 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [5:7519900918747041769:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:11.545446Z node 5 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:11.545700Z node 5 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [5:7519900918747041774:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:11.549136Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:56:11.565170Z node 5 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [5:7519900918747041776:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:56:11.630163Z node 5 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [5:7519900918747041827:3419] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:56:11.835104Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[5:7519900897272202995:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:56:11.835169Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:56:13.076538Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:13.078618Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:13.080193Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:15.512851Z node 5 :KQP_PROXY WARN: query_actor.cpp:372: [TQueryBase] [TGetScriptExecutionOperationQueryActor] TraceId: 881f44ea-2ffc03c1-522c8925-d62a5038, Finish with NOT_FOUND, Issues: {
: Error: No such execution }, SessionId: ydb://session/3?node_id=5&id=YTM2ZmRjZDUtNWQ4YTA4MzktNzJlNzM5N2YtOGZhNTNlOTY=, TxId: 2025-06-25T14:56:16.685848Z node 5 :KQP_PROXY WARN: query_actor.cpp:372: [TQueryBase] [TCheckLeaseStatusQueryActor] TraceId: 881f44ea-2ffc03c1-522c8925-d62a5038, Finish with NOT_FOUND, Issues: {
: Error: No such execution }, SessionId: ydb://session/3?node_id=5&id=YWI5NzM4MjMtYmI2MDdlNzgtZWJlYmExOTgtYWIwODFjMDU=, TxId: 2025-06-25T14:56:16.874699Z node 5 :KQP_PROXY WARN: kqp_script_executions.cpp:1077: [ScriptExecutions] [TForgetScriptExecutionOperationActor] ExecutionId: 881f44ea-2ffc03c1-522c8925-d62a5038, reply NOT_FOUND, issues: {
: Error: No such execution } 2025-06-25T14:56:16.906086Z node 5 :KQP_PROXY WARN: query_actor.cpp:372: [TQueryBase] [TCheckLeaseStatusQueryActor] TraceId: 881f44ea-2ffc03c1-522c8925-d62a5038, Finish with NOT_FOUND, Issues: {
: Error: No such execution }, SessionId: ydb://session/3?node_id=5&id=MzQ3ZWZkYTktNjIyODg3ZS1lNTIyZTUwYi02MTExM2VlNw==, TxId: 2025-06-25T14:56:16.906279Z node 5 :KQP_PROXY WARN: kqp_script_executions.cpp:1674: [ScriptExecutions] [TCancelScriptExecutionOperationActor] ExecutionId: 881f44ea-2ffc03c1-522c8925-d62a5038, check lease failed 2025-06-25T14:56:17.343645Z node 5 :KQP_PROXY WARN: query_actor.cpp:372: [TQueryBase] [TGetScriptExecutionResultQueryActor] TraceId: 881f44ea-2ffc03c1-522c8925-d62a5038, State: Get results info, Finish with NOT_FOUND, Issues: {
: Error: Script execution not found }, SessionId: ydb://session/3?node_id=5&id=ZWU3MTQ2MDgtZjMzM2E0ODAtOWM2MDkxZWYtN2FkNGEwNDA=, TxId: >> FolderServiceTest::TFolderServiceTransitional ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/service/unittest >> KqpQueryServiceScripts::Tcl [GOOD] Test command err: Trying to start YDB, gRPC: 18223, MsgBus: 16566 2025-06-25T14:55:51.680976Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900836380090014:2136];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:51.685815Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017be/r3tmp/tmpWdKUGp/pdisk_1.dat 2025-06-25T14:55:52.004664Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519900836380089916:2080] 1750863351662204 != 1750863351662207 2025-06-25T14:55:52.013757Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 18223, node 1 2025-06-25T14:55:52.117015Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:55:52.117050Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:55:52.117068Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:55:52.117254Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:55:52.121192Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:55:52.121294Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:55:52.126986Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:16566 TClient is connected to server localhost:16566 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:55:52.644627Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:55:52.658857Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:55:52.669885Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:52.685481Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:55:52.802979Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:52.958699Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:53.032909Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:54.622047Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900849264993443:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:54.622145Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:54.920129Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:54.958139Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:54.993251Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:55.023940Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:55.063813Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:55.093988Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:55.126900Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:55.182860Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900853559961393:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:55.182937Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:55.183298Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900853559961398:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:55.187104Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:55:55.199439Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519900853559961400:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:55:55.299285Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519900853559961451:3420] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 22915, MsgBus: 16488 2025-06-25T14:55:57.125317Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519900859955502415:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:57.125381Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017be/r3tmp/tmpcC5lCt/pdisk_1.dat 2025-06-25T14:55:57.230048Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:55:57.238861Z node 2 :HIVE WARN: no ... se: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:56:12.694168Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:56:12.716089Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:12.798525Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:12.916825Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:56:12.963230Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:13.051816Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:15.553688Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519900936898888588:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:15.553765Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:15.612624Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:15.644477Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:15.684708Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:15.728271Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:15.761604Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:15.801179Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:15.843029Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:15.922873Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519900936898889242:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:15.922955Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:15.923005Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519900936898889247:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:15.925868Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:56:15.936285Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519900936898889249:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:56:16.026171Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519900941193856598:3415] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:56:16.888444Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519900919719017859:2129];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:56:16.888563Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:56:17.199397Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:17.201742Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:17.203086Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:17.552543Z node 3 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [3:7519900945488824427:2501], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:3:13: Error: At function: Commit!
:3:13: Error: COMMIT not supported inside YDB query, code: 2008 2025-06-25T14:56:17.599748Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=3&id=NmJhNGZlMjQtZjQ0MzY5N2YtOTZhNzFhZmQtMTUyOTQzZWQ=, ActorId: [3:7519900945488824420:2497], ActorState: ExecuteState, TraceId: 01jyksdws92d4w86xss0rewrrx, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-06-25T14:56:18.991980Z node 3 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [3:7519900949783792290:2689], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:3:13: Error: At function: Commit!
:3:13: Error: ROLLBACK not supported inside YDB query, code: 2008 2025-06-25T14:56:19.002626Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=3&id=ODBlMzEzMTgtM2ViZDEyNTMtYzdhNGViZTAtNzE4MTBiNTk=, ActorId: [3:7519900949783792286:2687], ActorState: ExecuteState, TraceId: 01jyksdyfyacnt6t9nc6617746, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: >> KqpQueryService::TableSink_OlapRWQueries [GOOD] >> TAccessServiceTest::PassRequestId [GOOD] >> TAccessServiceTest::Authenticate [GOOD] >> FolderServiceTest::TFolderServiceAdapter [GOOD] >> TUserAccountServiceTest::Get [GOOD] >> TestYmqHttpProxy::TestGetQueueUrlOfNotExistingQueue [GOOD] >> TestYmqHttpProxy::TestCreateQueueWithBadQueueName [GOOD] >> TestYmqHttpProxy::TestCreateQueueWithSameNameAndDifferentParams [GOOD] >> TServiceAccountServiceTest::IssueToken [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/library/ycloud/impl/ut/unittest >> TAccessServiceTest::PassRequestId [GOOD] Test command err: 2025-06-25T14:56:18.588972Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900951544613803:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:56:18.589042Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0016a7/r3tmp/tmpSUWuJ7/pdisk_1.dat 2025-06-25T14:56:19.109570Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:56:19.109684Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:56:19.126112Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:56:19.128423Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:11949 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:56:19.430297Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:56:19.452682Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:56:19.490234Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [51700008e408]{trololo} Connect to grpc://localhost:24322 2025-06-25T14:56:19.494383Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [51700008e408]{trololo} Request AuthenticateRequest { iam_token: "**** (717F937C)" } 2025-06-25T14:56:19.505117Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [51700008e408]{trololo} Response AuthenticateResponse { subject { user_account { id: "1234" } } } ------- [TM] {asan, default-linux-x86_64, release} ydb/library/ycloud/impl/ut/unittest >> TAccessServiceTest::Authenticate [GOOD] Test command err: 2025-06-25T14:56:18.648779Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900951578107201:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:56:18.648816Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00169d/r3tmp/tmppAhgqy/pdisk_1.dat 2025-06-25T14:56:19.091368Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:56:19.096490Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519900951578107165:2080] 1750863378647788 != 1750863378647791 2025-06-25T14:56:19.141570Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:56:19.141674Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:56:19.145018Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:31211 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:56:19.419315Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:56:19.444598Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:56:19.489360Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [51700008e408] Connect to grpc://localhost:2664 2025-06-25T14:56:19.491738Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [51700008e408] Request AuthenticateRequest { iam_token: "**** (047D44F1)" } 2025-06-25T14:56:19.501996Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [51700008e408] Status 7 Permission Denied 2025-06-25T14:56:19.502378Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [51700008e408] Request AuthenticateRequest { iam_token: "**** (342498C1)" } 2025-06-25T14:56:19.504740Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [51700008e408] Response AuthenticateResponse { subject { user_account { id: "1234" } } } >> TExternalDataSourceTest::ReadOnlyMode ------- [TM] {asan, default-linux-x86_64, release} ydb/library/ycloud/impl/ut/unittest >> FolderServiceTest::TFolderServiceAdapter [GOOD] Test command err: 2025-06-25T14:56:18.595715Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900950229642771:2132];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:56:18.596178Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0016c6/r3tmp/tmpZTm2Bo/pdisk_1.dat 2025-06-25T14:56:19.072524Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519900950229642677:2080] 1750863378572190 != 1750863378572193 2025-06-25T14:56:19.083025Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:56:19.088615Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:56:19.088714Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:56:19.092768Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:2151 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:56:19.383834Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:56:19.414443Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:56:19.456222Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [517000058708] Connect to grpc://localhost:9824 2025-06-25T14:56:19.458601Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000058708] Request ListFoldersRequest { id: "i_am_exists" } 2025-06-25T14:56:19.497580Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [517000058708] Response ListFoldersResponse { result { cloud_id: "cloud_from_old_service" } } 2025-06-25T14:56:19.500216Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [51700007cc08] Connect to grpc://localhost:5481 2025-06-25T14:56:19.500971Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [51700007cc08] Request ResolveFoldersRequest { folder_ids: "i_am_exists" } 2025-06-25T14:56:19.507043Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [51700007cc08] Response ResolveFoldersResponse { resolved_folders { cloud_id: "cloud_from_new_service" } } 2025-06-25T14:56:19.507376Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [51700007cc08] Request ResolveFoldersRequest { folder_ids: "i_am_not_exists" } 2025-06-25T14:56:19.508666Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [51700007cc08] Status 5 Not Found 2025-06-25T14:56:19.509006Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000058708] Request ListFoldersRequest { id: "i_am_not_exists" } 2025-06-25T14:56:19.510371Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [517000058708] Status 5 Not Found >> TExternalDataSourceTest::DropTableTwice >> TExternalDataSourceTest::ParallelCreateSameExternalDataSource >> TExternalDataSourceTest::CreateExternalDataSource >> TExternalDataSourceTest::ReplaceExternalDataSourceIfNotExists >> TestYmqHttpProxy::TestGetQueueUrlWithIAM ------- [TM] {asan, default-linux-x86_64, release} ydb/library/ycloud/impl/ut/unittest >> TUserAccountServiceTest::Get [GOOD] Test command err: 2025-06-25T14:56:18.822579Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900950906849624:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:56:18.822640Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001698/r3tmp/tmpcMs0Pt/pdisk_1.dat 2025-06-25T14:56:19.303599Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:56:19.303693Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:56:19.305288Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:56:19.335023Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519900950906849604:2080] 1750863378821612 != 1750863378821615 2025-06-25T14:56:19.346708Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TClient is connected to server localhost:21830 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:56:19.616975Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... >> TestYmqHttpProxy::TestCreateQueueWithEmptyName >> TestKinesisHttpProxy::GoodRequestPutRecords [GOOD] >> TestYmqHttpProxy::TestReceiveMessage [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/service/unittest >> KqpQueryService::TableSink_OlapRWQueries [GOOD] Test command err: Trying to start YDB, gRPC: 9036, MsgBus: 30614 2025-06-25T14:55:46.430581Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900814394080421:2227];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:46.433196Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017cb/r3tmp/tmpmBIp4c/pdisk_1.dat 2025-06-25T14:55:46.776044Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 9036, node 1 2025-06-25T14:55:46.886186Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:55:46.886839Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:55:46.893760Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:55:47.000824Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:55:47.000846Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:55:47.000852Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:55:47.000943Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:30614 2025-06-25T14:55:47.434504Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:30614 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:55:47.632151Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:55:47.653426Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:55:49.397159Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900827278982752:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:49.397270Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:49.710146Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T14:55:49.892526Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519900827278982894:2296];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:55:49.892727Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519900827278982894:2296];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:55:49.892953Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519900827278982894:2296];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:55:49.893050Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519900827278982894:2296];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:55:49.893172Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519900827278982894:2296];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:55:49.893268Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519900827278982894:2296];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:55:49.893372Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519900827278982894:2296];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:55:49.893466Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519900827278982894:2296];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:55:49.893552Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519900827278982894:2296];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:55:49.893646Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519900827278982894:2296];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:55:49.893731Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7519900827278982894:2296];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:55:49.912948Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519900827278982897:2299];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:55:49.913041Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519900827278982897:2299];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:55:49.913201Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519900827278982897:2299];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:55:49.913302Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519900827278982897:2299];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:55:49.913384Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519900827278982897:2299];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:55:49.913482Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519900827278982897:2299];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:55:49.913568Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519900827278982897:2299];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:55:49.913656Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519900827278982897:2299];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:55:49.913773Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519900827278982897:2299];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:55:49.913890Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519900827278982897:2299];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:55:49.913984Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519900827278982897:2299];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:55:49.933068Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519900827278982900:2302];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:55:49.933124Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519900827278982900:2302];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:55:49.933318Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519900827278982900:2302];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:55:49.933433Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519900827278982900:2302];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLA ... 2025-06-25T14:56:19.523333Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[3:7519900954012268958:2298];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:56:19.523420Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[3:7519900954012268958:2298];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:56:19.523496Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[3:7519900954012268958:2298];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:56:19.523569Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[3:7519900954012268958:2298];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:56:19.523640Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[3:7519900954012268958:2298];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:56:19.523710Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[3:7519900954012268958:2298];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:56:19.523800Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[3:7519900954012268958:2298];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:56:19.527391Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-25T14:56:19.527458Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-25T14:56:19.527561Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-25T14:56:19.527590Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-25T14:56:19.527768Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-25T14:56:19.527797Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-25T14:56:19.527881Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-25T14:56:19.527910Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-25T14:56:19.527956Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-25T14:56:19.527983Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-25T14:56:19.528202Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-25T14:56:19.528233Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-25T14:56:19.528448Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-25T14:56:19.528482Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-25T14:56:19.528614Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-25T14:56:19.528647Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-25T14:56:19.528721Z node 3 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=72075186224037890;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-25T14:56:19.528764Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-25T14:56:19.528795Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-25T14:56:19.529178Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-25T14:56:19.529211Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-25T14:56:19.534045Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[3:7519900954012268958:2298];ev=NActors::IEventHandle;tablet_id=72075186224037890;tx_id=281474976710658;this=88923099884416;method=TTxController::StartProposeOnExecute;tx_info=281474976710658:TX_KIND_SCHEMA;min=1750863379533;max=18446744073709551615;plan=0;src=[3:7519900936832399378:2150];cookie=32:1;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-25T14:56:19.538421Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-25T14:56:19.542329Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-25T14:56:19.542961Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710658; 2025-06-25T14:56:19.543460Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-25T14:56:19.549044Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037890;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710658; 2025-06-25T14:56:19.549049Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710658; 2025-06-25T14:56:19.584656Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519900954012269060:2316], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:19.584756Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:19.584981Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519900954012269065:2319], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:19.589364Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:56:19.599338Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519900954012269067:2320], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-25T14:56:19.665079Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519900954012269118:2428] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:56:20.742151Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037890;tx_state=TTxProgressTx::Execute;tx_current=281474976710665;tx_id=281474976710665;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710665; 2025-06-25T14:56:20.742246Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=281474976710665;tx_id=281474976710665;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710665; 2025-06-25T14:56:20.743004Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=281474976710665;tx_id=281474976710665;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710665; 2025-06-25T14:56:20.841372Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519900936832399159:2136];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:56:20.841446Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> KqpExtractPredicateLookup::ComplexRange [GOOD] >> TestYmqHttpProxy::TestCreateQueueWithWrongBody >> TExternalDataSourceTest::ReplaceExternalDataSourceIfNotExistsShouldFailIfFeatureFlagIsNotSet >> FolderServiceTest::TFolderService >> KqpQueryService::TableSink_HtapInteractive+withOltpSink [GOOD] >> TServiceAccountServiceTest::Get >> TestYmqHttpProxy::TestReceiveMessageWithAttributes >> TestKinesisHttpProxy::DoubleCreateStream >> TServiceAccountServiceTest::Get [GOOD] >> TExternalDataSourceTest::ParallelCreateSameExternalDataSource [GOOD] >> TExternalDataSourceTest::PreventDeletionOfDependentDataSources >> TExternalDataSourceTest::CreateExternalDataSource [GOOD] >> TExternalDataSourceTest::CreateExternalDataSourceShouldFailIfSuchEntityAlreadyExists >> TExternalDataSourceTest::ReplaceExternalDataSourceIfNotExists [GOOD] >> TExternalDataSourceTest::DropTableTwice [GOOD] >> TExternalDataSourceTest::ReplaceExternalDataSourceIfNotExistsShouldFailIfFeatureFlagIsNotSet [GOOD] >> TExternalDataSourceTest::ParallelCreateExternalDataSource >> TExternalDataSourceTest::RemovingReferencesFromDataSources >> TExternalDataSourceTest::SchemeErrors ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_external_data_source/unittest >> TExternalDataSourceTest::ReplaceExternalDataSourceIfNotExists [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046678944 is [1:129:2153] sender: [1:131:2058] recipient: [1:113:2143] 2025-06-25T14:56:23.219085Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:56:23.219177Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:56:23.219235Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:56:23.219272Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:56:23.220225Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:56:23.220273Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:56:23.220386Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:56:23.220471Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:56:23.221355Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:56:23.223057Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:56:23.298265Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7732: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-25T14:56:23.298337Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:56:23.299095Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:56:23.319535Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:56:23.319953Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:56:23.320124Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:56:23.330129Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:56:23.330265Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:56:23.330672Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:56:23.330883Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:56:23.338646Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:56:23.340364Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:56:23.347900Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:56:23.347997Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:56:23.348055Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:56:23.348101Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:56:23.348178Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:56:23.348411Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:56:23.359514Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:129:2153] sender: [1:243:2058] recipient: [1:15:2062] 2025-06-25T14:56:23.472537Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:56:23.474717Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:56:23.476334Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:56:23.476404Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:56:23.479945Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:56:23.480065Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:56:23.484662Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:56:23.485655Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:56:23.485911Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:56:23.486073Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:56:23.486121Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:56:23.486164Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:56:23.489123Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:56:23.489184Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:56:23.489227Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:56:23.491297Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:56:23.491341Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:56:23.491402Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:56:23.491457Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:56:23.495644Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:56:23.497441Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:56:23.497568Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:56:23.498456Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:56:23.498549Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 138 RawX2: 4294969455 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:56:23.498588Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:56:23.499837Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:56:23.499893Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:56:23.500044Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:56:23.500114Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:56: ... HEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 102 Coordinator: 72057594046316545 AckTo { RawX1: 138 RawX2: 4294969455 } } Step: 5000003 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:56:23.582806Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_external_data_source.cpp:35: [72057594046678944] TAlterExternalDataSource TPropose, operationId: 102:0HandleReply TEvOperationPlan: step# 5000003 2025-06-25T14:56:23.582911Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 102:0 128 -> 240 2025-06-25T14:56:23.583060Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:56:23.583121Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-25T14:56:23.583892Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-25T14:56:23.584936Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 FAKE_COORDINATOR: Erasing txId 102 2025-06-25T14:56:23.585661Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:56:23.585695Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:56:23.585784Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-25T14:56:23.585828Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-25T14:56:23.585882Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:56:23.585902Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:210:2210], at schemeshard: 72057594046678944, txId: 102, path id: 1 2025-06-25T14:56:23.585924Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:210:2210], at schemeshard: 72057594046678944, txId: 102, path id: 2 2025-06-25T14:56:23.585936Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:210:2210], at schemeshard: 72057594046678944, txId: 102, path id: 2 2025-06-25T14:56:23.586208Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-25T14:56:23.586242Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 102:0 ProgressState 2025-06-25T14:56:23.586323Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:0 progress is 1/1 2025-06-25T14:56:23.586345Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-25T14:56:23.586385Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:0 progress is 1/1 2025-06-25T14:56:23.586409Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-25T14:56:23.586433Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: false 2025-06-25T14:56:23.586456Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-25T14:56:23.586481Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 102:0 2025-06-25T14:56:23.586510Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 102:0 2025-06-25T14:56:23.586566Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-25T14:56:23.586598Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 102, publications: 2, subscribers: 0 2025-06-25T14:56:23.586619Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 102, [OwnerId: 72057594046678944, LocalPathId: 1], 7 2025-06-25T14:56:23.586637Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 102, [OwnerId: 72057594046678944, LocalPathId: 2], 3 2025-06-25T14:56:23.587021Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T14:56:23.587075Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T14:56:23.587098Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 102 2025-06-25T14:56:23.587120Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 7 2025-06-25T14:56:23.587155Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-25T14:56:23.587655Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T14:56:23.587725Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T14:56:23.587750Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 102 2025-06-25T14:56:23.587767Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 3 2025-06-25T14:56:23.587784Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-25T14:56:23.587832Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 102, subscribers: 0 2025-06-25T14:56:23.589981Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-25T14:56:23.590237Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 102 2025-06-25T14:56:23.590409Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-06-25T14:56:23.590444Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 2025-06-25T14:56:23.590738Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-06-25T14:56:23.590794Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-25T14:56:23.590820Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:336:2325] TestWaitNotification: OK eventTxId 102 2025-06-25T14:56:23.591175Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/MyExternalDataSource" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:56:23.591360Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/MyExternalDataSource" took 173us result status StatusSuccess 2025-06-25T14:56:23.591598Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/MyExternalDataSource" PathDescription { Self { Name: "MyExternalDataSource" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeExternalDataSource CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ExternalDataSourceVersion: 2 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } ExternalDataSourceDescription { Name: "MyExternalDataSource" PathId { OwnerId: 72057594046678944 LocalId: 2 } Version: 2 SourceType: "ObjectStorage" Location: "https://s3.cloud.net/my_new_bucket" Installation: "" Auth { None { } } Properties { } References { } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TExternalDataSourceTest::ReadOnlyMode [GOOD] >> TExternalDataSourceTest::PreventDeletionOfDependentDataSources [GOOD] >> FolderServiceTest::TFolderServiceTransitional [GOOD] >> TExternalDataSourceTest::CreateExternalDataSourceShouldFailIfSuchEntityAlreadyExists [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_external_data_source/unittest >> TExternalDataSourceTest::ReplaceExternalDataSourceIfNotExistsShouldFailIfFeatureFlagIsNotSet [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046678944 is [1:129:2153] sender: [1:131:2058] recipient: [1:113:2143] 2025-06-25T14:56:23.485252Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:56:23.485329Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:56:23.485365Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:56:23.485400Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:56:23.485443Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:56:23.485468Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:56:23.485526Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:56:23.485613Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:56:23.486278Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:56:23.486575Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:56:23.558908Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7732: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-25T14:56:23.558953Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:56:23.559589Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:56:23.573105Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:56:23.573425Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:56:23.573594Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:56:23.581566Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:56:23.581768Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:56:23.582431Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:56:23.582699Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:56:23.585224Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:56:23.585423Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:56:23.586529Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:56:23.586579Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:56:23.586626Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:56:23.586672Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:56:23.586725Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:56:23.586909Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:56:23.592728Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:129:2153] sender: [1:243:2058] recipient: [1:15:2062] 2025-06-25T14:56:23.708807Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:56:23.709053Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:56:23.709263Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:56:23.709320Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:56:23.709549Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:56:23.709620Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:56:23.713385Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:56:23.713567Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:56:23.713749Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:56:23.713826Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:56:23.713866Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:56:23.713899Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:56:23.715886Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:56:23.715934Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:56:23.715971Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:56:23.717600Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:56:23.717648Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:56:23.717683Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:56:23.717725Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:56:23.721006Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:56:23.722773Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:56:23.722960Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:56:23.723839Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:56:23.723985Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 138 RawX2: 4294969455 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:56:23.724052Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:56:23.724342Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:56:23.724392Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:56:23.724546Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:56:23.724621Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:56:23.726500Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:56:23.726546Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:56:23.726706Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:56:23.726751Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:210:2210], at schemeshard: 72057594046678944, txId: 1, path id: 1 2025-06-25T14:56:23.727064Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:56:23.727104Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 1:0 ProgressState 2025-06-25T14:56:23.727205Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1:0 progress is 1/1 2025-06-25T14:56:23.727242Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-25T14:56:23.727275Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1:0 progress is 1/1 2025-06-25T14:56:23.727303Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-25T14:56:23.727336Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 1, ready parts: 1/1, is published: false 2025-06-25T14:56:23.727372Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-25T14:56:23.727404Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1:0 2025-06-25T14:56:23.727443Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 1:0 2025-06-25T14:56:23.727525Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-25T14:56:23.727558Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 1, publications: 1, subscribers: 0 2025-06-25T14:56:23.727587Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 1, [OwnerId: 72057594046678944, LocalPathId: 1], 3 2025-06-25T14:56:23.729216Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-06-25T14:56:23.729311Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-06-25T14:56:23.729353Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1 2025-06-25T14:56:23.729395Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 3 2025-06-25T14:56:23.729429Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:56:23.729517Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1, subscribers: 0 2025-06-25T14:56:23.732380Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1 2025-06-25T14:56:23.732878Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1, at schemeshard: 72057594046678944 TestModificationResults wait txId: 101 2025-06-25T14:56:23.733973Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:434: actor# [1:273:2262] Bootstrap 2025-06-25T14:56:23.753124Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:453: actor# [1:273:2262] Become StateWork (SchemeCache [1:278:2267]) 2025-06-25T14:56:23.755566Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateExternalDataSource CreateExternalDataSource { Name: "MyExternalDataSource" SourceType: "ObjectStorage" Location: "https://s3.cloud.net/my_bucket" Auth { None { } } ReplaceIfExists: true } } TxId: 101 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:56:23.755866Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_external_data_source.cpp:336: [72057594046678944] CreateNewExternalDataSource, opId 101:0, feature flag EnableReplaceIfExistsForExternalEntities 0, tx WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateExternalDataSource FailOnExist: false CreateExternalDataSource { Name: "MyExternalDataSource" SourceType: "ObjectStorage" Location: "https://s3.cloud.net/my_bucket" Auth { None { } } ReplaceIfExists: true } 2025-06-25T14:56:23.755938Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_just_reject.cpp:47: TReject Propose, opId: 101:0, explain: Invalid TCreateExternalDataSource request: Unsupported: feature flag EnableReplaceIfExistsForExternalEntities is off, at schemeshard: 72057594046678944 2025-06-25T14:56:23.755978Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 101:1, propose status:StatusPreconditionFailed, reason: Invalid TCreateExternalDataSource request: Unsupported: feature flag EnableReplaceIfExistsForExternalEntities is off, at schemeshard: 72057594046678944 2025-06-25T14:56:23.756851Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:273:2262] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-25T14:56:23.759059Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 101, response: Status: StatusPreconditionFailed Reason: "Invalid TCreateExternalDataSource request: Unsupported: feature flag EnableReplaceIfExistsForExternalEntities is off" TxId: 101 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:56:23.759334Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 101, database: /MyRoot, subject: , status: StatusPreconditionFailed, reason: Invalid TCreateExternalDataSource request: Unsupported: feature flag EnableReplaceIfExistsForExternalEntities is off, operation: CREATE EXTERNAL DATA SOURCE, path: /MyRoot/MyExternalDataSource 2025-06-25T14:56:23.760445Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 101 2025-06-25T14:56:23.760663Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2025-06-25T14:56:23.760700Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 2025-06-25T14:56:23.761074Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-06-25T14:56:23.761155Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-25T14:56:23.761189Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:288:2277] TestWaitNotification: OK eventTxId 101 2025-06-25T14:56:23.761617Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/MyExternalDataSource" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:56:23.761810Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/MyExternalDataSource" took 188us result status StatusPathDoesNotExist 2025-06-25T14:56:23.761963Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/MyExternalDataSource\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/MyExternalDataSource" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 >> TExternalDataSourceTest::ParallelCreateExternalDataSource [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/opt/unittest >> KqpExtractPredicateLookup::ComplexRange [GOOD] Test command err: Trying to start YDB, gRPC: 21681, MsgBus: 30985 2025-06-25T14:54:03.111313Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900368579012882:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:54:03.112748Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0009c5/r3tmp/tmpVmoGkV/pdisk_1.dat 2025-06-25T14:54:03.456574Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:54:03.456680Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:54:03.464880Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:54:03.472645Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519900368579012849:2080] 1750863243101681 != 1750863243101684 2025-06-25T14:54:03.485940Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 21681, node 1 2025-06-25T14:54:03.612552Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:54:03.612578Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:54:03.612589Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:54:03.612707Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:30985 TClient is connected to server localhost:30985 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-25T14:54:04.126293Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:54:04.160684Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:54:04.195261Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:54:04.318071Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:54:04.491877Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:54:04.592256Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:54:06.113898Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900381463916363:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:06.114018Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:06.523356Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:06.556878Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:06.601036Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:06.670560Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:06.707298Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:06.749351Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:06.823443Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:06.905918Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900381463917029:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:06.905987Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:06.906302Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900381463917034:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:06.910522Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:54:06.925393Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519900381463917036:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:54:06.988782Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519900381463917087:3419] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:54:08.109878Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519900368579012882:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:54:08.109971Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 16634, MsgBus: 25758 2025-06-25T14:54:10.108218Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519900400205199010:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:54:10.108268Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPa ... x/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:56:11.157250Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:11.298062Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:11.590716Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:11.708402Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:14.652464Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[13:7519900911130719186:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:56:14.652579Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:56:15.622621Z node 13 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [13:7519900936900524581:2370], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:15.622753Z node 13 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:15.738277Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:15.801672Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:15.876437Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:15.929373Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:15.987199Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:16.044704Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:16.107777Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:16.397775Z node 13 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [13:7519900941195492540:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:16.397965Z node 13 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:16.398814Z node 13 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [13:7519900941195492545:2438], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:16.404802Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:56:16.423335Z node 13 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [13:7519900941195492547:2439], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:56:16.513363Z node 13 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [13:7519900941195492598:3431] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:56:18.560778Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:18.719382Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:18.804932Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:18.906069Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:19.072183Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:19.138662Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710677:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:19.207681Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710678:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:19.266535Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710679:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:19.333934Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710680:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:19.402877Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710681:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/service/unittest >> KqpQueryService::TableSink_HtapInteractive+withOltpSink [GOOD] Test command err: Trying to start YDB, gRPC: 12386, MsgBus: 1528 2025-06-25T14:55:57.825834Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900861384618738:2144];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:57.826552Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017a4/r3tmp/tmpYhKWTU/pdisk_1.dat 2025-06-25T14:55:58.169561Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:55:58.169646Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:55:58.174100Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:55:58.206835Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:55:58.208504Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519900861384618617:2080] 1750863357775303 != 1750863357775306 TServer::EnableGrpc on GrpcPort 12386, node 1 2025-06-25T14:55:58.280777Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:55:58.280808Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:55:58.280836Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:55:58.280948Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1528 TClient is connected to server localhost:1528 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:55:58.768590Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:55:58.799702Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:55:58.825684Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:56:00.761455Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900874269521139:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:00.761565Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:01.064711Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T14:56:01.218931Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519900878564488532:2299];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:56:01.220856Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519900878564488532:2299];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:56:01.221130Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519900878564488532:2299];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:56:01.221252Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519900878564488532:2299];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:56:01.221333Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519900878564488532:2299];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:56:01.221420Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519900878564488532:2299];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:56:01.221504Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519900878564488532:2299];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:56:01.221585Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519900878564488532:2299];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:56:01.221669Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519900878564488532:2299];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:56:01.221763Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519900878564488532:2299];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:56:01.221846Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519900878564488532:2299];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:56:01.265185Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519900878564488540:2300];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:56:01.265245Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519900878564488540:2300];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:56:01.265436Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519900878564488540:2300];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:56:01.265538Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519900878564488540:2300];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:56:01.265646Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519900878564488540:2300];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:56:01.265735Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519900878564488540:2300];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:56:01.265824Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519900878564488540:2300];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:56:01.265914Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519900878564488540:2300];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:56:01.266003Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519900878564488540:2300];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:56:01.266110Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519900878564488540:2300];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:56:01.266200Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519900878564488540:2300];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:56:01.271369Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519900878564488613:2301];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:56:01.271435Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519900878564488613:2301];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:56:01.271781Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519900878564488613:2301];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:56:01.271879Z node 1 :TX_COLUMNSHARD WARN ... g.cpp:784: tablet_id=72075186224037896;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-25T14:56:21.074298Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-25T14:56:21.074332Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-25T14:56:21.074519Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-25T14:56:21.074560Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-25T14:56:21.074678Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-25T14:56:21.074716Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-25T14:56:21.074732Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[3:7519900957635414177:2301];ev=NActors::IEventHandle;tablet_id=72075186224037897;tx_id=281474976710658;this=88923003042272;method=TTxController::StartProposeOnExecute;tx_info=281474976710658:TX_KIND_SCHEMA;min=1750863381074;max=18446744073709551615;plan=0;src=[3:7519900944750511823:2138];cookie=102:1;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-25T14:56:21.074790Z node 3 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=72075186224037896;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-25T14:56:21.074837Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-25T14:56:21.074874Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-25T14:56:21.075275Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-25T14:56:21.075320Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-25T14:56:21.079409Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[3:7519900957635414178:2302];ev=NActors::IEventHandle;tablet_id=72075186224037896;tx_id=281474976710658;this=88923053238880;method=TTxController::StartProposeOnExecute;tx_info=281474976710658:TX_KIND_SCHEMA;min=1750863381078;max=18446744073709551615;plan=0;src=[3:7519900944750511823:2138];cookie=92:1;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-25T14:56:21.082957Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-25T14:56:21.083093Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-25T14:56:21.089330Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037896;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710658; 2025-06-25T14:56:21.089345Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037897;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710658; 2025-06-25T14:56:21.090190Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-25T14:56:21.090262Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-25T14:56:21.095463Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037890;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710658; 2025-06-25T14:56:21.095470Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037891;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710658; 2025-06-25T14:56:21.096069Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-25T14:56:21.096139Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-25T14:56:21.101206Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037893;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710658; 2025-06-25T14:56:21.101218Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037894;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710658; 2025-06-25T14:56:21.101900Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-25T14:56:21.101938Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-25T14:56:21.107203Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037895;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710658; 2025-06-25T14:56:21.107216Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710658; 2025-06-25T14:56:21.107907Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-25T14:56:21.108187Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-25T14:56:21.113651Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710658; 2025-06-25T14:56:21.113651Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037892;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710658; 2025-06-25T14:56:21.119995Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:21.214117Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519900961930381920:2364], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:21.214190Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:21.214377Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519900961930381925:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:21.217695Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710660:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:56:21.226191Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519900961930381927:2368], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710660 completed, doublechecking } 2025-06-25T14:56:21.301802Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519900961930381978:2649] txid# 281474976710661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:56:21.517429Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037894;tx_state=TTxProgressTx::Execute;tx_current=281474976710664;tx_id=281474976710664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710664; 2025-06-25T14:56:21.749489Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=281474976710667;tx_id=281474976710667;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710667; 2025-06-25T14:56:22.188194Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519900944750511552:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:56:22.188267Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_external_data_source/unittest >> TExternalDataSourceTest::ReadOnlyMode [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046678944 is [1:129:2153] sender: [1:131:2058] recipient: [1:113:2143] 2025-06-25T14:56:23.219111Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:56:23.219215Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:56:23.219265Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:56:23.219308Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:56:23.220222Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:56:23.220265Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:56:23.220353Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:56:23.220456Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:56:23.221260Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:56:23.223039Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:56:23.302660Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7732: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-25T14:56:23.302718Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:56:23.303458Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:56:23.318552Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:56:23.318953Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:56:23.319198Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:56:23.327056Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:56:23.327372Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:56:23.329476Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:56:23.329855Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:56:23.339323Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:56:23.340708Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:56:23.348666Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:56:23.348766Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:56:23.348828Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:56:23.348884Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:56:23.348942Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:56:23.349150Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:56:23.357373Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:129:2153] sender: [1:243:2058] recipient: [1:15:2062] 2025-06-25T14:56:23.503138Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:56:23.503333Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:56:23.503535Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:56:23.503580Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:56:23.503819Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:56:23.503892Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:56:23.506537Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:56:23.506706Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:56:23.506858Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:56:23.506911Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:56:23.506966Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:56:23.507026Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:56:23.508749Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:56:23.508806Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:56:23.508855Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:56:23.510328Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:56:23.510365Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:56:23.510407Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:56:23.510486Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:56:23.519573Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:56:23.521729Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:56:23.521948Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:56:23.522822Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:56:23.522936Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 138 RawX2: 4294969455 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:56:23.522986Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:56:23.523260Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:56:23.523328Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:56:23.523488Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:56:23.523588Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:56: ... 28:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:56:24.000263Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 128 ready parts: 1/1 2025-06-25T14:56:24.000402Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 128 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:56:24.001150Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 1 Version: 8 PathOwnerId: 72057594046678944, cookie: 128 2025-06-25T14:56:24.001272Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 1 Version: 8 PathOwnerId: 72057594046678944, cookie: 128 2025-06-25T14:56:24.001329Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 128 2025-06-25T14:56:24.001385Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 128, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 8 2025-06-25T14:56:24.001424Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 4 2025-06-25T14:56:24.002346Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 4 Version: 2 PathOwnerId: 72057594046678944, cookie: 128 2025-06-25T14:56:24.002442Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 4 Version: 2 PathOwnerId: 72057594046678944, cookie: 128 2025-06-25T14:56:24.002472Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 128 2025-06-25T14:56:24.002510Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 128, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 2 2025-06-25T14:56:24.002553Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 2 2025-06-25T14:56:24.002620Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 128, ready parts: 0/1, is published: true 2025-06-25T14:56:24.005148Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 128:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:128 msg type: 269090816 2025-06-25T14:56:24.005311Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 128, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 128 at step: 5000004 FAKE_COORDINATOR: advance: minStep5000004 State->FrontStep: 5000003 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 128 at step: 5000004 2025-06-25T14:56:24.007128Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 128 2025-06-25T14:56:24.007341Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 128 2025-06-25T14:56:24.007524Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000004, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:56:24.007639Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 128 Coordinator: 72057594046316545 AckTo { RawX1: 138 RawX2: 4294969455 } } Step: 5000004 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:56:24.007758Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_mkdir.cpp:33: MkDir::TPropose operationId# 128:0 HandleReply TEvPrivate::TEvOperationPlan, step: 5000004, at schemeshard: 72057594046678944 2025-06-25T14:56:24.007909Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 128:0 128 -> 240 2025-06-25T14:56:24.008068Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-25T14:56:24.008128Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 1 FAKE_COORDINATOR: Erasing txId 128 2025-06-25T14:56:24.009776Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:56:24.009811Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 128, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:56:24.009981Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 128, path id: [OwnerId: 72057594046678944, LocalPathId: 4] 2025-06-25T14:56:24.010061Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:56:24.010092Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:457:2413], at schemeshard: 72057594046678944, txId: 128, path id: 1 2025-06-25T14:56:24.010141Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:457:2413], at schemeshard: 72057594046678944, txId: 128, path id: 4 2025-06-25T14:56:24.010486Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 128:0, at schemeshard: 72057594046678944 2025-06-25T14:56:24.010533Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 128:0 ProgressState 2025-06-25T14:56:24.010638Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#128:0 progress is 1/1 2025-06-25T14:56:24.010677Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 128 ready parts: 1/1 2025-06-25T14:56:24.010712Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#128:0 progress is 1/1 2025-06-25T14:56:24.010743Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 128 ready parts: 1/1 2025-06-25T14:56:24.010775Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 128, ready parts: 1/1, is published: false 2025-06-25T14:56:24.010817Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 128 ready parts: 1/1 2025-06-25T14:56:24.010872Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 128:0 2025-06-25T14:56:24.010907Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 128:0 2025-06-25T14:56:24.010967Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 2 2025-06-25T14:56:24.011010Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 128, publications: 2, subscribers: 0 2025-06-25T14:56:24.011053Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 128, [OwnerId: 72057594046678944, LocalPathId: 1], 9 2025-06-25T14:56:24.011081Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 128, [OwnerId: 72057594046678944, LocalPathId: 4], 3 2025-06-25T14:56:24.011594Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 1 Version: 9 PathOwnerId: 72057594046678944, cookie: 128 2025-06-25T14:56:24.011664Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 1 Version: 9 PathOwnerId: 72057594046678944, cookie: 128 2025-06-25T14:56:24.011720Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 128 2025-06-25T14:56:24.011767Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 128, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 9 2025-06-25T14:56:24.011814Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 4 2025-06-25T14:56:24.012383Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 4 Version: 3 PathOwnerId: 72057594046678944, cookie: 128 2025-06-25T14:56:24.012475Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 4 Version: 3 PathOwnerId: 72057594046678944, cookie: 128 2025-06-25T14:56:24.012508Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 128 2025-06-25T14:56:24.012532Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 128, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 3 2025-06-25T14:56:24.012560Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 1 2025-06-25T14:56:24.012626Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 128, subscribers: 0 2025-06-25T14:56:24.015045Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 128 2025-06-25T14:56:24.015874Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 128 TestModificationResult got TxId: 128, wait until txId: 128 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_external_data_source/unittest >> TExternalDataSourceTest::PreventDeletionOfDependentDataSources [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046678944 is [1:129:2153] sender: [1:131:2058] recipient: [1:113:2143] 2025-06-25T14:56:23.219111Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:56:23.219207Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:56:23.219279Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:56:23.219318Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:56:23.220231Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:56:23.220283Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:56:23.220369Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:56:23.220481Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:56:23.221260Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:56:23.224470Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:56:23.302613Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7732: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-25T14:56:23.302666Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:56:23.303760Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:56:23.328691Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:56:23.329358Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:56:23.329569Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:56:23.343501Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:56:23.343742Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:56:23.344533Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:56:23.344831Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:56:23.347941Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:56:23.348151Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:56:23.349354Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:56:23.349411Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:56:23.349460Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:56:23.349505Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:56:23.349548Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:56:23.349733Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:56:23.357396Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:129:2153] sender: [1:243:2058] recipient: [1:15:2062] 2025-06-25T14:56:23.472770Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:56:23.475259Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:56:23.476513Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:56:23.476588Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:56:23.479969Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:56:23.480112Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:56:23.489690Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:56:23.489895Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:56:23.490081Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:56:23.490169Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:56:23.490211Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:56:23.490262Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:56:23.492739Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:56:23.492818Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:56:23.492864Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:56:23.495100Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:56:23.495160Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:56:23.495222Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:56:23.495273Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:56:23.499065Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:56:23.501350Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:56:23.501566Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:56:23.502546Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:56:23.502667Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 138 RawX2: 4294969455 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:56:23.502719Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:56:23.503046Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:56:23.503101Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:56:23.503284Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:56:23.503411Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:56: ... DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-25T14:56:24.175668Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 101 2025-06-25T14:56:24.175750Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 101 2025-06-25T14:56:24.175775Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 101 2025-06-25T14:56:24.175798Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 2 2025-06-25T14:56:24.175823Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-25T14:56:24.176497Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 2 PathOwnerId: 72057594046678944, cookie: 101 2025-06-25T14:56:24.176563Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 2 PathOwnerId: 72057594046678944, cookie: 101 2025-06-25T14:56:24.176597Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 101 2025-06-25T14:56:24.176620Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 2 2025-06-25T14:56:24.176649Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-25T14:56:24.176716Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 101, subscribers: 0 2025-06-25T14:56:24.178339Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-25T14:56:24.179174Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-25T14:56:24.179262Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 101 2025-06-25T14:56:24.179427Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2025-06-25T14:56:24.179463Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 2025-06-25T14:56:24.179855Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-06-25T14:56:24.179933Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-25T14:56:24.179965Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [2:336:2325] TestWaitNotification: OK eventTxId 101 2025-06-25T14:56:24.180402Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/ExternalTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:56:24.180576Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/ExternalTable" took 197us result status StatusSuccess 2025-06-25T14:56:24.180879Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/ExternalTable" PathDescription { Self { Name: "ExternalTable" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeExternalTable CreateFinished: true CreateTxId: 101 CreateStep: 5000003 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ExternalTableVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } ExternalTableDescription { Name: "ExternalTable" PathId { OwnerId: 72057594046678944 LocalId: 3 } Version: 1 SourceType: "ObjectStorage" DataSourcePath: "/MyRoot/ExternalDataSource" Location: "/" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false } Content: "" } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 TestModificationResults wait txId: 103 2025-06-25T14:56:24.183662Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpDropExternalDataSource Drop { Name: "ExternalDataSource" } } TxId: 103 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:56:24.183809Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_drop_external_data_source.cpp:116: [72057594046678944] TDropExternalDataSource Propose: opId# 103:0, path# /MyRoot/ExternalDataSource 2025-06-25T14:56:24.183918Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 103:1, propose status:StatusSchemeError, reason: Other entities depend on this data source, please remove them at the beginning: /MyRoot/ExternalTable, at schemeshard: 72057594046678944 2025-06-25T14:56:24.186193Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 103, response: Status: StatusSchemeError Reason: "Other entities depend on this data source, please remove them at the beginning: /MyRoot/ExternalTable" TxId: 103 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:56:24.186401Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 103, database: /MyRoot, subject: , status: StatusSchemeError, reason: Other entities depend on this data source, please remove them at the beginning: /MyRoot/ExternalTable, operation: DROP EXTERNAL DATA SOURCE, path: /MyRoot/ExternalDataSource TestModificationResult got TxId: 103, wait until txId: 103 TestWaitNotification wait txId: 103 2025-06-25T14:56:24.186666Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 103: send EvNotifyTxCompletion 2025-06-25T14:56:24.186701Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 103 2025-06-25T14:56:24.187055Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 103, at schemeshard: 72057594046678944 2025-06-25T14:56:24.187143Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-06-25T14:56:24.187175Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [2:344:2333] TestWaitNotification: OK eventTxId 103 2025-06-25T14:56:24.187618Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/ExternalDataSource" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:56:24.187809Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/ExternalDataSource" took 201us result status StatusSuccess 2025-06-25T14:56:24.188105Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/ExternalDataSource" PathDescription { Self { Name: "ExternalDataSource" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeExternalDataSource CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ExternalDataSourceVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } ExternalDataSourceDescription { Name: "ExternalDataSource" PathId { OwnerId: 72057594046678944 LocalId: 2 } Version: 1 SourceType: "ObjectStorage" Location: "https://s3.cloud.net/my_bucket" Installation: "" Auth { None { } } Properties { } References { References { Path: "/MyRoot/ExternalTable" PathId { OwnerId: 72057594046678944 LocalId: 3 } } } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_external_data_source/unittest >> TExternalDataSourceTest::CreateExternalDataSourceShouldFailIfSuchEntityAlreadyExists [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046678944 is [1:129:2153] sender: [1:131:2058] recipient: [1:113:2143] 2025-06-25T14:56:23.219158Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:56:23.219242Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:56:23.219294Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:56:23.219337Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:56:23.220940Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:56:23.220996Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:56:23.221081Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:56:23.221207Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:56:23.222030Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:56:23.223255Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:56:23.308048Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7732: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-25T14:56:23.308107Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:56:23.308900Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:56:23.321917Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:56:23.322252Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:56:23.322372Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:56:23.329812Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:56:23.330017Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:56:23.330675Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:56:23.330913Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:56:23.340674Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:56:23.340853Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:56:23.348555Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:56:23.348620Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:56:23.348682Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:56:23.348733Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:56:23.348786Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:56:23.348978Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:56:23.356046Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:129:2153] sender: [1:243:2058] recipient: [1:15:2062] 2025-06-25T14:56:23.507430Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:56:23.507617Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:56:23.507832Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:56:23.507876Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:56:23.508110Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:56:23.508202Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:56:23.510150Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:56:23.510326Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:56:23.510469Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:56:23.510525Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:56:23.510560Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:56:23.510606Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:56:23.512136Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:56:23.512186Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:56:23.512226Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:56:23.513623Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:56:23.513662Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:56:23.513712Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:56:23.513758Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:56:23.517347Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:56:23.519077Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:56:23.519236Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:56:23.520249Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:56:23.520378Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 138 RawX2: 4294969455 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:56:23.520431Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:56:23.520733Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:56:23.520780Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:56:23.520958Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:56:23.521021Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:56: ... 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 101 2025-06-25T14:56:24.161264Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 5 2025-06-25T14:56:24.161305Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-25T14:56:24.161798Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 2 PathOwnerId: 72057594046678944, cookie: 101 2025-06-25T14:56:24.161847Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 2 PathOwnerId: 72057594046678944, cookie: 101 2025-06-25T14:56:24.161867Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 101 2025-06-25T14:56:24.161886Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 2 2025-06-25T14:56:24.161905Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-25T14:56:24.161956Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 101, subscribers: 0 2025-06-25T14:56:24.163927Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-25T14:56:24.164915Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 101 2025-06-25T14:56:24.165172Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2025-06-25T14:56:24.165218Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 2025-06-25T14:56:24.165627Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-06-25T14:56:24.165723Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-25T14:56:24.165764Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [2:306:2295] TestWaitNotification: OK eventTxId 101 2025-06-25T14:56:24.166191Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/MyExternalDataSource" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:56:24.166363Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/MyExternalDataSource" took 222us result status StatusSuccess 2025-06-25T14:56:24.166682Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/MyExternalDataSource" PathDescription { Self { Name: "MyExternalDataSource" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeExternalDataSource CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ExternalDataSourceVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } ExternalDataSourceDescription { Name: "MyExternalDataSource" PathId { OwnerId: 72057594046678944 LocalId: 2 } Version: 1 SourceType: "ObjectStorage" Location: "https://s3.cloud.net/my_bucket" Installation: "" Auth { None { } } Properties { } References { } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 TestModificationResults wait txId: 102 2025-06-25T14:56:24.169516Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateExternalDataSource CreateExternalDataSource { Name: "MyExternalDataSource" SourceType: "ObjectStorage" Location: "https://s3.cloud.net/my_new_bucket" Auth { None { } } } } TxId: 102 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:56:24.169775Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_external_data_source.cpp:336: [72057594046678944] CreateNewExternalDataSource, opId 102:0, feature flag EnableReplaceIfExistsForExternalEntities 1, tx WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateExternalDataSource FailOnExist: false CreateExternalDataSource { Name: "MyExternalDataSource" SourceType: "ObjectStorage" Location: "https://s3.cloud.net/my_new_bucket" Auth { None { } } } 2025-06-25T14:56:24.169845Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_external_data_source.cpp:232: [72057594046678944] TCreateExternalDataSource Propose: opId# 102:0, path# /MyRoot/MyExternalDataSource 2025-06-25T14:56:24.169985Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 102:1, propose status:StatusAlreadyExists, reason: Check failed: path: '/MyRoot/MyExternalDataSource', error: path exist, request accepts it (id: [OwnerId: 72057594046678944, LocalPathId: 2], type: EPathTypeExternalDataSource, state: EPathStateNoChanges), at schemeshard: 72057594046678944 2025-06-25T14:56:24.172088Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 102, response: Status: StatusAlreadyExists Reason: "Check failed: path: \'/MyRoot/MyExternalDataSource\', error: path exist, request accepts it (id: [OwnerId: 72057594046678944, LocalPathId: 2], type: EPathTypeExternalDataSource, state: EPathStateNoChanges)" TxId: 102 SchemeshardId: 72057594046678944 PathId: 2 PathCreateTxId: 101, at schemeshard: 72057594046678944 2025-06-25T14:56:24.172301Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 102, database: /MyRoot, subject: , status: StatusAlreadyExists, reason: Check failed: path: '/MyRoot/MyExternalDataSource', error: path exist, request accepts it (id: [OwnerId: 72057594046678944, LocalPathId: 2], type: EPathTypeExternalDataSource, state: EPathStateNoChanges), operation: CREATE EXTERNAL DATA SOURCE, path: /MyRoot/MyExternalDataSource TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 102 2025-06-25T14:56:24.172579Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-06-25T14:56:24.172617Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 2025-06-25T14:56:24.172986Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-06-25T14:56:24.173063Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-25T14:56:24.173100Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [2:314:2303] TestWaitNotification: OK eventTxId 102 2025-06-25T14:56:24.173543Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/MyExternalDataSource" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:56:24.173701Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/MyExternalDataSource" took 188us result status StatusSuccess 2025-06-25T14:56:24.174013Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/MyExternalDataSource" PathDescription { Self { Name: "MyExternalDataSource" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeExternalDataSource CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ExternalDataSourceVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } ExternalDataSourceDescription { Name: "MyExternalDataSource" PathId { OwnerId: 72057594046678944 LocalId: 2 } Version: 1 SourceType: "ObjectStorage" Location: "https://s3.cloud.net/my_bucket" Installation: "" Auth { None { } } Properties { } References { } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TExternalDataSourceTest::CreateExternalDataSourceWithProperties >> TExternalDataSourceTest::SchemeErrors [GOOD] >> TExternalDataSourceTest::RemovingReferencesFromDataSources [GOOD] >> KqpQueryService::Ddl_Dml [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_external_data_source/unittest >> TExternalDataSourceTest::ParallelCreateExternalDataSource [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046678944 is [1:129:2153] sender: [1:131:2058] recipient: [1:113:2143] 2025-06-25T14:56:23.219141Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:56:23.219237Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:56:23.219314Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:56:23.219359Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:56:23.220249Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:56:23.220330Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:56:23.220403Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:56:23.220504Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:56:23.221244Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:56:23.223039Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:56:23.313367Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7732: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-25T14:56:23.313433Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:56:23.314180Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:56:23.335443Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:56:23.335947Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:56:23.336142Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:56:23.349447Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:56:23.349676Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:56:23.350391Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:56:23.350690Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:56:23.353566Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:56:23.353810Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:56:23.355162Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:56:23.355215Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:56:23.355264Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:56:23.355309Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:56:23.355353Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:56:23.355560Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:56:23.362073Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:129:2153] sender: [1:243:2058] recipient: [1:15:2062] 2025-06-25T14:56:23.502924Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:56:23.503130Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:56:23.503322Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:56:23.503368Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:56:23.503601Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:56:23.503674Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:56:23.505759Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:56:23.505932Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:56:23.506094Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:56:23.506164Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:56:23.506204Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:56:23.506251Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:56:23.507894Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:56:23.507955Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:56:23.507997Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:56:23.510547Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:56:23.510590Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:56:23.510654Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:56:23.510707Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:56:23.514420Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:56:23.516125Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:56:23.516324Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:56:23.517197Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:56:23.517325Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 138 RawX2: 4294969455 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:56:23.517392Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:56:23.517711Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:56:23.517764Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:56:23.517937Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:56:23.518004Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:56: ... hId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } ExternalDataSourceDescription { Name: "MyExternalDataSource1" PathId { OwnerId: 72057594046678944 LocalId: 3 } Version: 1 SourceType: "ObjectStorage" Location: "https://s3.cloud.net/my_bucket" Installation: "" Auth { None { } } Properties { } References { } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:56:24.380231Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/DirA/MyExternalDataSource2" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:56:24.380407Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/DirA/MyExternalDataSource2" took 181us result status StatusSuccess 2025-06-25T14:56:24.380661Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/DirA/MyExternalDataSource2" PathDescription { Self { Name: "MyExternalDataSource2" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeExternalDataSource CreateFinished: true CreateTxId: 126 CreateStep: 5000003 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ExternalDataSourceVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } ExternalDataSourceDescription { Name: "MyExternalDataSource2" PathId { OwnerId: 72057594046678944 LocalId: 4 } Version: 1 SourceType: "ObjectStorage" Location: "https://s3.cloud.net/my_bucket" Installation: "" Auth { None { } } Properties { } References { } } } PathId: 4 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:56:24.381245Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/DirA" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:56:24.381390Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/DirA" took 140us result status StatusSuccess 2025-06-25T14:56:24.381746Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/DirA" PathDescription { Self { Name: "DirA" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 124 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 6 } ChildrenExist: true } Children { Name: "MyExternalDataSource1" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeExternalDataSource CreateFinished: true CreateTxId: 125 CreateStep: 5000004 ParentPathId: 2 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "MyExternalDataSource2" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeExternalDataSource CreateFinished: true CreateTxId: 126 CreateStep: 5000003 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:56:24.382184Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/DirA/MyExternalDataSource1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:56:24.382310Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/DirA/MyExternalDataSource1" took 141us result status StatusSuccess 2025-06-25T14:56:24.382539Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/DirA/MyExternalDataSource1" PathDescription { Self { Name: "MyExternalDataSource1" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeExternalDataSource CreateFinished: true CreateTxId: 125 CreateStep: 5000004 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ExternalDataSourceVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } ExternalDataSourceDescription { Name: "MyExternalDataSource1" PathId { OwnerId: 72057594046678944 LocalId: 3 } Version: 1 SourceType: "ObjectStorage" Location: "https://s3.cloud.net/my_bucket" Installation: "" Auth { None { } } Properties { } References { } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:56:24.382967Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/DirA/MyExternalDataSource2" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:56:24.383114Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/DirA/MyExternalDataSource2" took 153us result status StatusSuccess 2025-06-25T14:56:24.383350Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/DirA/MyExternalDataSource2" PathDescription { Self { Name: "MyExternalDataSource2" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeExternalDataSource CreateFinished: true CreateTxId: 126 CreateStep: 5000003 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ExternalDataSourceVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } ExternalDataSourceDescription { Name: "MyExternalDataSource2" PathId { OwnerId: 72057594046678944 LocalId: 4 } Version: 1 SourceType: "ObjectStorage" Location: "https://s3.cloud.net/my_bucket" Installation: "" Auth { None { } } Properties { } References { } } } PathId: 4 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/library/ycloud/impl/ut/unittest >> FolderServiceTest::TFolderServiceTransitional [GOOD] Test command err: 2025-06-25T14:56:21.149360Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900963115301272:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:56:21.149443Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001694/r3tmp/tmpAOto0f/pdisk_1.dat 2025-06-25T14:56:21.488022Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:56:21.572449Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:56:21.572574Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:56:21.574332Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:2057 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:56:21.786282Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:56:21.801068Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:56:21.802380Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [51700008e408] Connect to grpc://localhost:17259 2025-06-25T14:56:21.828339Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [51700008e408] Request ListFoldersRequest { id: "i_am_not_exists" } 2025-06-25T14:56:21.836523Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [51700008e408] Status 14 failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:17259: Failed to connect to remote host: Connection refused 2025-06-25T14:56:21.837721Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [51700008e408] Request ListFoldersRequest { id: "i_am_not_exists" } 2025-06-25T14:56:21.838149Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [51700008e408] Status 14 failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:17259: Failed to connect to remote host: Connection refused 2025-06-25T14:56:22.159676Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:56:22.840693Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [51700008e408] Request ListFoldersRequest { id: "i_am_not_exists" } 2025-06-25T14:56:22.841304Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [51700008e408] Status 14 failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:17259: Failed to connect to remote host: Connection refused 2025-06-25T14:56:23.844552Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [51700008e408] Request ListFoldersRequest { id: "i_am_not_exists" } 2025-06-25T14:56:23.848509Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [51700008e408] Status 5 Not Found 2025-06-25T14:56:23.849431Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [51700008e408] Request ListFoldersRequest { id: "i_am_exists" } 2025-06-25T14:56:23.864005Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [51700008e408] Response ListFoldersResponse { result { cloud_id: "response_cloud_id" } } ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_external_data_source/unittest >> TExternalDataSourceTest::RemovingReferencesFromDataSources [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046678944 is [1:129:2153] sender: [1:131:2058] recipient: [1:113:2143] 2025-06-25T14:56:24.517245Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:56:24.517338Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:56:24.517387Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:56:24.517421Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:56:24.517478Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:56:24.517501Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:56:24.517547Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:56:24.517632Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:56:24.518297Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:56:24.518599Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:56:24.579851Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7732: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-25T14:56:24.579908Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:56:24.580573Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:56:24.593534Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:56:24.593920Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:56:24.594093Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:56:24.608187Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:56:24.608463Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:56:24.609102Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:56:24.609383Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:56:24.612205Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:56:24.612402Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:56:24.613550Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:56:24.613616Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:56:24.613664Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:56:24.613701Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:56:24.613735Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:56:24.613919Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:56:24.621685Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:129:2153] sender: [1:243:2058] recipient: [1:15:2062] 2025-06-25T14:56:24.732044Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:56:24.732259Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:56:24.732509Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:56:24.732557Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:56:24.732840Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:56:24.732915Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:56:24.735075Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:56:24.735260Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:56:24.735432Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:56:24.735482Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:56:24.735520Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:56:24.735555Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:56:24.737377Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:56:24.737433Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:56:24.737471Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:56:24.739084Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:56:24.739132Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:56:24.739173Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:56:24.739250Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:56:24.742597Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:56:24.744426Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:56:24.744620Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:56:24.745505Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:56:24.745634Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 138 RawX2: 4294969455 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:56:24.745685Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:56:24.745939Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:56:24.746009Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:56:24.746186Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:56:24.746288Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:56: ... ation RegisterRelationByTabletId, TxId: 104, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 104 at step: 5000005 FAKE_COORDINATOR: advance: minStep5000005 State->FrontStep: 5000004 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 104 at step: 5000005 2025-06-25T14:56:24.891425Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000005, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:56:24.891524Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 104 Coordinator: 72057594046316545 AckTo { RawX1: 138 RawX2: 4294969455 } } Step: 5000005 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:56:24.891578Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_external_data_source.cpp:40: [72057594046678944] TDropExternalDataSource TPropose opId# 104:0 HandleReply TEvOperationPlan: step# 5000005 2025-06-25T14:56:24.891688Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-25T14:56:24.891750Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 104:0 128 -> 240 2025-06-25T14:56:24.891917Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:56:24.891964Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-25T14:56:24.893036Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2025-06-25T14:56:24.893158Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 FAKE_COORDINATOR: Erasing txId 104 2025-06-25T14:56:24.893999Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:56:24.894031Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 104, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:56:24.894160Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 104, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-25T14:56:24.894278Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:56:24.894306Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:210:2210], at schemeshard: 72057594046678944, txId: 104, path id: 1 2025-06-25T14:56:24.894343Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:210:2210], at schemeshard: 72057594046678944, txId: 104, path id: 2 2025-06-25T14:56:24.894411Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-25T14:56:24.894444Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 104:0 ProgressState 2025-06-25T14:56:24.894568Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#104:0 progress is 1/1 2025-06-25T14:56:24.894601Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-25T14:56:24.894645Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#104:0 progress is 1/1 2025-06-25T14:56:24.894674Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-25T14:56:24.894708Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 104, ready parts: 1/1, is published: false 2025-06-25T14:56:24.894744Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-25T14:56:24.894778Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 104:0 2025-06-25T14:56:24.894808Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 104:0 2025-06-25T14:56:24.894861Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-25T14:56:24.894900Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 104, publications: 2, subscribers: 0 2025-06-25T14:56:24.894933Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 104, [OwnerId: 72057594046678944, LocalPathId: 1], 11 2025-06-25T14:56:24.894959Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 104, [OwnerId: 72057594046678944, LocalPathId: 2], 18446744073709551615 2025-06-25T14:56:24.895541Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 104 2025-06-25T14:56:24.895619Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 104 2025-06-25T14:56:24.895648Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 104 2025-06-25T14:56:24.895700Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 104, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 18446744073709551615 2025-06-25T14:56:24.895784Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-25T14:56:24.896043Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-25T14:56:24.896081Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-25T14:56:24.896156Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-25T14:56:24.896396Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 11 PathOwnerId: 72057594046678944, cookie: 104 2025-06-25T14:56:24.896469Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 11 PathOwnerId: 72057594046678944, cookie: 104 2025-06-25T14:56:24.896492Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 104 2025-06-25T14:56:24.896518Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 104, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 11 2025-06-25T14:56:24.896552Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:56:24.896608Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 104, subscribers: 0 2025-06-25T14:56:24.899992Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2025-06-25T14:56:24.900064Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-25T14:56:24.900133Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 TestModificationResult got TxId: 104, wait until txId: 104 TestWaitNotification wait txId: 104 2025-06-25T14:56:24.900373Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 104: send EvNotifyTxCompletion 2025-06-25T14:56:24.900411Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 104 2025-06-25T14:56:24.900881Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 104, at schemeshard: 72057594046678944 2025-06-25T14:56:24.900953Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 104: got EvNotifyTxCompletionResult 2025-06-25T14:56:24.900981Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 104: satisfy waiter [1:389:2378] TestWaitNotification: OK eventTxId 104 2025-06-25T14:56:24.901448Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/ExternalDataSource" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:56:24.901598Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/ExternalDataSource" took 166us result status StatusPathDoesNotExist 2025-06-25T14:56:24.901721Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/ExternalDataSource\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/ExternalDataSource" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_external_data_source/unittest >> TExternalDataSourceTest::SchemeErrors [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046678944 is [1:129:2153] sender: [1:131:2058] recipient: [1:113:2143] 2025-06-25T14:56:24.541946Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:56:24.542030Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:56:24.542072Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:56:24.542107Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:56:24.542177Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:56:24.542206Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:56:24.542255Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:56:24.542350Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:56:24.543071Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:56:24.543373Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:56:24.610781Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7732: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-25T14:56:24.610831Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:56:24.611502Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:56:24.622583Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:56:24.622929Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:56:24.623091Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:56:24.630864Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:56:24.631050Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:56:24.631662Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:56:24.631966Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:56:24.635018Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:56:24.635210Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:56:24.636373Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:56:24.636445Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:56:24.636499Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:56:24.636557Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:56:24.636586Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:56:24.636789Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:56:24.642560Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:129:2153] sender: [1:243:2058] recipient: [1:15:2062] 2025-06-25T14:56:24.804505Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:56:24.804742Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:56:24.804949Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:56:24.804998Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:56:24.805241Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:56:24.805323Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:56:24.807406Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:56:24.807586Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:56:24.807767Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:56:24.807822Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:56:24.807877Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:56:24.807923Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:56:24.809785Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:56:24.809843Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:56:24.809893Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:56:24.811456Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:56:24.811498Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:56:24.811554Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:56:24.811634Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:56:24.821860Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:56:24.823663Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:56:24.823890Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:56:24.824845Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:56:24.824976Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 138 RawX2: 4294969455 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:56:24.825031Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:56:24.825335Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:56:24.825401Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:56:24.825575Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:56:24.825643Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:56: ... 16Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_external_data_source.cpp:336: [72057594046678944] CreateNewExternalDataSource, opId 126:0, feature flag EnableReplaceIfExistsForExternalEntities 0, tx WorkingDir: "/MyRoot/DirA" OperationType: ESchemeOpCreateExternalDataSource FailOnExist: false CreateExternalDataSource { Name: "MyExternalDataSource" SourceType: "ObjectStorage" Location: "https://s3.cloud.net/my_bucket" } 2025-06-25T14:56:24.900500Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_external_data_source.cpp:232: [72057594046678944] TCreateExternalDataSource Propose: opId# 126:0, path# /MyRoot/DirA/MyExternalDataSource 2025-06-25T14:56:24.900661Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 126:1, propose status:StatusSchemeError, reason: Authorization method isn't specified, at schemeshard: 72057594046678944 2025-06-25T14:56:24.903302Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 126, response: Status: StatusSchemeError Reason: "Authorization method isn\'t specified" TxId: 126 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:56:24.903568Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 126, database: /MyRoot, subject: , status: StatusSchemeError, reason: Authorization method isn't specified, operation: CREATE EXTERNAL DATA SOURCE, path: /MyRoot/DirA/MyExternalDataSource TestModificationResult got TxId: 126, wait until txId: 126 TestModificationResults wait txId: 127 2025-06-25T14:56:24.906449Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/DirA" OperationType: ESchemeOpCreateExternalDataSource CreateExternalDataSource { Name: "MyExternalDataSource" SourceType: "ObjectStorage" Location: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" Auth { None { } } } } TxId: 127 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:56:24.906754Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_external_data_source.cpp:336: [72057594046678944] CreateNewExternalDataSource, opId 127:0, feature flag EnableReplaceIfExistsForExternalEntities 0, tx WorkingDir: "/MyRoot/DirA" OperationType: ESchemeOpCreateExternalDataSource FailOnExist: false CreateExternalDataSource { Name: "MyExternalDataSource" SourceType: "ObjectStorage" Location: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" Auth { None { } } } 2025-06-25T14:56:24.907068Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_external_data_source.cpp:232: [72057594046678944] TCreateExternalDataSource Propose: opId# 127:0, path# /MyRoot/DirA/MyExternalDataSource 2025-06-25T14:56:24.907219Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 127:1, propose status:StatusSchemeError, reason: Maximum length of location must be less or equal equal to 1000 but got 1001, at schemeshard: 72057594046678944 2025-06-25T14:56:24.909706Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 127, response: Status: StatusSchemeError Reason: "Maximum length of location must be less or equal equal to 1000 but got 1001" TxId: 127 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:56:24.909983Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 127, database: /MyRoot, subject: , status: StatusSchemeError, reason: Maximum length of location must be less or equal equal to 1000 but got 1001, operation: CREATE EXTERNAL DATA SOURCE, path: /MyRoot/DirA/MyExternalDataSource TestModificationResult got TxId: 127, wait until txId: 127 TestModificationResults wait txId: 128 2025-06-25T14:56:24.912921Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/DirA" OperationType: ESchemeOpCreateExternalDataSource CreateExternalDataSource { Name: "MyExternalDataSource" SourceType: "ObjectStorage" Installation: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" Auth { None { } } } } TxId: 128 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:56:24.913251Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_external_data_source.cpp:336: [72057594046678944] CreateNewExternalDataSource, opId 128:0, feature flag EnableReplaceIfExistsForExternalEntities 0, tx WorkingDir: "/MyRoot/DirA" OperationType: ESchemeOpCreateExternalDataSource FailOnExist: false CreateExternalDataSource { Name: "MyExternalDataSource" SourceType: "ObjectStorage" Installation: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" Auth { None { } } } 2025-06-25T14:56:24.913350Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_external_data_source.cpp:232: [72057594046678944] TCreateExternalDataSource Propose: opId# 128:0, path# /MyRoot/DirA/MyExternalDataSource 2025-06-25T14:56:24.913499Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 128:1, propose status:StatusSchemeError, reason: Maximum length of installation must be less or equal equal to 1000 but got 1001, at schemeshard: 72057594046678944 2025-06-25T14:56:24.916974Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 128, response: Status: StatusSchemeError Reason: "Maximum length of installation must be less or equal equal to 1000 but got 1001" TxId: 128 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:56:24.917232Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 128, database: /MyRoot, subject: , status: StatusSchemeError, reason: Maximum length of installation must be less or equal equal to 1000 but got 1001, operation: CREATE EXTERNAL DATA SOURCE, path: /MyRoot/DirA/MyExternalDataSource TestModificationResult got TxId: 128, wait until txId: 128 TestModificationResults wait txId: 129 2025-06-25T14:56:24.920130Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/DirA" OperationType: ESchemeOpCreateExternalDataSource CreateExternalDataSource { Name: "" SourceType: "ObjectStorage" Location: "https://s3.cloud.net/my_bucket" Auth { None { } } } } TxId: 129 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:56:24.920376Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_external_data_source.cpp:336: [72057594046678944] CreateNewExternalDataSource, opId 129:0, feature flag EnableReplaceIfExistsForExternalEntities 0, tx WorkingDir: "/MyRoot/DirA" OperationType: ESchemeOpCreateExternalDataSource CreateExternalDataSource { Name: "" SourceType: "ObjectStorage" Location: "https://s3.cloud.net/my_bucket" Auth { None { } } } 2025-06-25T14:56:24.920465Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_external_data_source.cpp:232: [72057594046678944] TCreateExternalDataSource Propose: opId# 129:0, path# /MyRoot/DirA/ 2025-06-25T14:56:24.920596Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 129:1, propose status:StatusSchemeError, reason: Check failed: path: '/MyRoot/DirA/', error: path part shouldn't be empty, at schemeshard: 72057594046678944 2025-06-25T14:56:24.922886Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 129, response: Status: StatusSchemeError Reason: "Check failed: path: \'/MyRoot/DirA/\', error: path part shouldn\'t be empty" TxId: 129 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:56:24.923163Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 129, database: /MyRoot, subject: , status: StatusSchemeError, reason: Check failed: path: '/MyRoot/DirA/', error: path part shouldn't be empty, operation: CREATE EXTERNAL DATA SOURCE, path: /MyRoot/DirA/ TestModificationResult got TxId: 129, wait until txId: 129 >> KqpQueryServiceScripts::ExecuteScriptWithCancelAfter [GOOD] >> KqpQueryServiceScripts::ExecuteScriptWithCancelAfterAndTimeout >> TExternalDataSourceTest::CreateExternalDataSourceWithProperties [GOOD] >> TExternalDataSourceTest::DropExternalDataSource ------- [TM] {asan, default-linux-x86_64, release} ydb/library/ycloud/impl/ut/unittest >> TServiceAccountServiceTest::IssueToken [GOOD] Test command err: 2025-06-25T14:56:19.042944Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900950325403652:2223];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:56:19.048676Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0016b4/r3tmp/tmp26ziz6/pdisk_1.dat 2025-06-25T14:56:19.365496Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519900950325403467:2080] 1750863379002274 != 1750863379002277 2025-06-25T14:56:19.397168Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:56:19.428719Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:56:19.428783Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:56:19.431693Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:2367 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:56:19.685287Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:56:21.957356Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519900964636218644:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:56:21.957405Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0016b4/r3tmp/tmptdAhrp/pdisk_1.dat 2025-06-25T14:56:22.089107Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:56:22.110262Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:56:22.110392Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:56:22.111956Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:6122 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:56:22.293304Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:56:22.300367Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 >> FolderServiceTest::TFolderService [GOOD] >> KqpService::ToDictCache+UseCache [GOOD] >> KqpService::ToDictCache-UseCache ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/service/unittest >> KqpQueryService::Ddl_Dml [GOOD] Test command err: Trying to start YDB, gRPC: 27809, MsgBus: 22086 2025-06-25T14:55:58.193456Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900862871616947:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:58.219239Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017a3/r3tmp/tmpDH8UUs/pdisk_1.dat 2025-06-25T14:55:58.593755Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:55:58.593894Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:55:58.597971Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:55:58.620657Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:55:58.621181Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519900862871616917:2080] 1750863358182868 != 1750863358182871 TServer::EnableGrpc on GrpcPort 27809, node 1 2025-06-25T14:55:58.708802Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:55:58.708827Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:55:58.708846Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:55:58.708939Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:22086 TClient is connected to server localhost:22086 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-25T14:55:59.228415Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:55:59.296712Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:55:59.336931Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:55:59.358574Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:59.574983Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:59.698351Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:59.759908Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:01.345379Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900875756520439:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:01.345489Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:01.750916Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:01.833448Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:01.859151Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:01.899125Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:01.932979Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:02.005963Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:02.045310Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:02.145185Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900880051488403:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:02.145267Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:02.145330Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900880051488408:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:02.149541Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:56:02.159951Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519900880051488410:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:56:02.245429Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519900880051488463:3419] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:56:03.193618Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519900862871616947:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:56:03.193799Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:56:03.327693Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519900884346456052:3602] txid# 281474976710673, issues: { message: "User already exists" severity: 1 } 2025-06-25T14:56:03.351911Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=1&id=ZWZlOWUxMGQtYWRlNTc0OTUtYjQ ... .070616Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=ZTVmNjFhNDUtYjc5MzI2NGItZDlkZWQxNzMtYWQ4OTcwNzY=, ActorId: [4:7519900958885895029:2488], ActorState: ExecuteState, TraceId: 01jykse0chdy3gh1e62dbcs9zx, Create QueryResponse for error on request, msg: 2025-06-25T14:56:21.154983Z node 4 :KQP_COMPILE_SERVICE WARN: kqp_compile_service.cpp:570: queryId in recompile request and queryId in cache are different, queryId in request: {Cluster: db, Database: /Root, DatabaseId: /Root, UserSid: , Text: \n UPSERT INTO TestDdlDml2 (Key, Value1) VALUES (1, \"1\");\n SELECT * FROM TestDdlDml2;\n UPSERT INTO TestDdlDml2 (Key, Value1) VALUES (2, \"2\");\n SELECT * FROM TestDdlDml2;\n CREATE TABLE TestDdlDml33 (\n Key Uint64,\n PRIMARY KEY (Key)\n );\n , Settings: {DocumentApiRestricted: 1, IsInternalCall: 0, QueryType: QUERY_TYPE_SQL_GENERIC_CONCURRENT_QUERY}, QueryParameterTypes: , GUCSettings: { "guc_settings": { "session_settings": { "ydb_database":"Root" }, "settings": { "ydb_database":"Root" }, "rollback_settings": { } } }}, queryId in cache: {Cluster: db, Database: /Root, DatabaseId: /Root, UserSid: , Text: \n UPSERT INTO TestDdlDml2 (Key, Value1, Value2) VALUES (1, \"1\", \"1\");\n SELECT * FROM TestDdlDml2;\n ALTER TABLE TestDdlDml2 DROP COLUMN Value2;\n , Settings: {DocumentApiRestricted: 1, IsInternalCall: 0, QueryType: QUERY_TYPE_SQL_GENERIC_CONCURRENT_QUERY}, QueryParameterTypes: , GUCSettings: { "guc_settings": { "session_settings": { "ydb_database":"Root" }, "settings": { "ydb_database":"Root" }, "rollback_settings": { } } }} 2025-06-25T14:56:21.353106Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715681:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:21.523244Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715685:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:21.838426Z node 4 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [4:7519900963180862739:2566], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:11:17: Error: At function: KiReadTable!
:11:17: Error: Cannot find table 'db.[/Root/TestDdlDml5]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:56:21.838853Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=4&id=NTlhNjU3ZTItOTMwOTg3NC1jMjQ2MDFhNi1hMjYyOWYwYQ==, ActorId: [4:7519900963180862601:2544], ActorState: ExecuteState, TraceId: 01jykse0zje0swy47n0sqmxpsq, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:56:21.910800Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715690:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:22.036919Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715691:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:22.485278Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519900967475830328:3988] txid# 281474976715697, issues: { message: "Check failed: path: \'/Root/TestDdl1\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 20], type: EPathTypeTable, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:56:22.485367Z node 4 :KQP_GATEWAY ERROR: scheme.h:178: Unexpected error on scheme request, TxId: 281474976715697, ProxyStatus: ExecComplete, SchemeShardReason: Check failed: path: '/Root/TestDdl1', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 20], type: EPathTypeTable, state: EPathStateNoChanges) 2025-06-25T14:56:22.485508Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=ZGVmYjRmNTgtYzFkODhmYzgtOTk3ZTVjYzQtYTA1NzJiOTM=, ActorId: [4:7519900967475830315:2620], ActorState: ExecuteState, TraceId: 01jykse1xq6tb8jnp6j2d5e8tj, Create QueryResponse for error on request, msg: 2025-06-25T14:56:22.520041Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519900967475830352:3999] txid# 281474976715699, issues: { message: "Check failed: path: \'/Root/TestDdl2\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 21], type: EPathTypeTable, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:56:22.520120Z node 4 :KQP_GATEWAY ERROR: scheme.h:178: Unexpected error on scheme request, TxId: 281474976715699, ProxyStatus: ExecComplete, SchemeShardReason: Check failed: path: '/Root/TestDdl2', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 21], type: EPathTypeTable, state: EPathStateNoChanges) 2025-06-25T14:56:22.520245Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=MzlmODg2Y2YtN2M2MGE0OWUtYjIyZTQzZTctOWMxZWNmYTg=, ActorId: [4:7519900967475830339:2627], ActorState: ExecuteState, TraceId: 01jykse1yy1mvd1r169kxmjbz6, Create QueryResponse for error on request, msg: 2025-06-25T14:56:22.920732Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715703:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:23.147165Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519900971770797830:4095] txid# 281474976715704, issues: { message: "Check failed: path: \'/Root/TestDdl2\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 21], type: EPathTypeTable, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:56:23.147252Z node 4 :KQP_GATEWAY ERROR: scheme.h:178: Unexpected error on scheme request, TxId: 281474976715704, ProxyStatus: ExecComplete, SchemeShardReason: Check failed: path: '/Root/TestDdl2', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 21], type: EPathTypeTable, state: EPathStateNoChanges) 2025-06-25T14:56:23.147437Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=Y2IyOGE2ODYtZDRhM2RkYmYtYzAxN2YzZGMtOGM3ZTA2ZTY=, ActorId: [4:7519900967475830424:2653], ActorState: ExecuteState, TraceId: 01jykse28g7zkkwfzbttscd4nx, Create QueryResponse for error on request, msg: 2025-06-25T14:56:23.303155Z node 4 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [4:7519900971770797881:2690], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:2:17: Error: At function: KiReadTable!
:2:17: Error: Cannot find table 'db.[/Root/TestDdl4]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:56:23.304899Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=4&id=N2MyY2NhNGUtNDQ5NmZmYzQtMmVhNTAyMmMtNjc3MzIxNmQ=, ActorId: [4:7519900971770797878:2688], ActorState: ExecuteState, TraceId: 01jykse2qm8wry7m6m11d66k66, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:56:23.515257Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715711:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:24.090296Z node 4 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:468: Get parsing result with error, self: [4:7519900976065765400:2739], owner: [4:7519900954590926622:2355], statement id: 1 2025-06-25T14:56:24.090546Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=4&id=MzI5YmQwNzAtN2M5M2FhODYtMmVjMTBmODEtNTFjMzliZjI=, ActorId: [4:7519900976065765398:2738], ActorState: ExecuteState, TraceId: 01jykse3gfftx59sybxc12pt91, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-06-25T14:56:24.278723Z node 4 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [4:7519900976065765444:2756], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:3:29: Error: At function: KiWriteTable!
:3:44: Error: Failed to convert type: Struct<'Key':Int32,'Value':String> to Struct<'Key':Uint64?,'Value':Uint64?>
:3:44: Error: Failed to convert 'Value': String to Optional
:3:44: Error: Failed to convert input columns types to scheme types, code: 2031 2025-06-25T14:56:24.280068Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=4&id=NjE5YzM2NjEtYzI3MmZmZGEtZDhlYmQyNGUtZTA2YTNhYg==, ActorId: [4:7519900976065765427:2750], ActorState: ExecuteState, TraceId: 01jykse3m4f1ddwvwtza21r6mv, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-06-25T14:56:24.338830Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715720:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:24.438682Z node 4 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [4:7519900976065765564:2778], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:8:29: Error: At function: KiWriteTable!
:8:44: Error: Failed to convert type: Struct<'Key':Int32,'Value':String> to Struct<'Key':Uint64?,'Value':Uint64?>
:8:44: Error: Failed to convert 'Value': String to Optional
:8:44: Error: Failed to convert input columns types to scheme types, code: 2031 2025-06-25T14:56:24.439170Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=4&id=YTg5NTRmNDEtZmIyOTA0NzctMTBhMjRjZjgtYmQ1MDdhMg==, ActorId: [4:7519900976065765471:2765], ActorState: ExecuteState, TraceId: 01jykse3qj738zdnxmhrtn4f7j, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: |88.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/kesus/proxy/ut/unittest |88.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/kesus/proxy/ut/unittest |88.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/kesus/proxy/ut/unittest |88.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/kesus/proxy/ut/unittest >> TProxyActorTest::TestDisconnectWhileAttaching >> TExternalDataSourceTest::DropExternalDataSource [GOOD] >> TProxyActorTest::TestCreateSemaphoreInterrupted |88.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/kesus/proxy/ut/unittest |88.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/kesus/proxy/ut/unittest |88.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/kesus/proxy/ut/unittest |88.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/kesus/proxy/ut/unittest |88.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/kesus/proxy/ut/unittest >> TProxyActorTest::TestAttachSession >> TProxyActorTest::TestCreateSemaphore ------- [TM] {asan, default-linux-x86_64, release} ydb/library/ycloud/impl/ut/unittest >> FolderServiceTest::TFolderService [GOOD] Test command err: 2025-06-25T14:56:23.246335Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900971714792830:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:56:23.248027Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00168b/r3tmp/tmpXnGT1Y/pdisk_1.dat 2025-06-25T14:56:23.607836Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:56:23.607920Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:56:23.614353Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:56:23.650343Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519900971714792811:2080] 1750863383243973 != 1750863383243976 2025-06-25T14:56:23.663207Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TClient is connected to server localhost:30655 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:56:23.944833Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:56:23.961755Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [51700008e408] Connect to grpc://localhost:2886 2025-06-25T14:56:23.994685Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [51700008e408] Request ResolveFoldersRequest { folder_ids: "i_am_not_exists" } 2025-06-25T14:56:24.007354Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [51700008e408] Status 14 failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:2886: Failed to connect to remote host: Connection refused 2025-06-25T14:56:24.008773Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [51700008e408] Request ResolveFoldersRequest { folder_ids: "i_am_not_exists" } 2025-06-25T14:56:24.009289Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [51700008e408] Status 14 failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:2886: Failed to connect to remote host: Connection refused 2025-06-25T14:56:24.259154Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:56:25.011186Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [51700008e408] Request ResolveFoldersRequest { folder_ids: "i_am_not_exists" } 2025-06-25T14:56:25.014141Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [51700008e408] Status 5 Not Found 2025-06-25T14:56:25.014933Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [51700008e408] Request ResolveFoldersRequest { folder_ids: "i_am_exists" } 2025-06-25T14:56:25.017585Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [51700008e408] Response ResolveFoldersResponse { resolved_folders { cloud_id: "response_cloud_id" } } ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_external_data_source/unittest >> TExternalDataSourceTest::DropExternalDataSource [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046678944 is [1:129:2153] sender: [1:131:2058] recipient: [1:113:2143] 2025-06-25T14:56:25.437783Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:56:25.437883Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:56:25.437958Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:56:25.438012Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:56:25.438062Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:56:25.438093Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:56:25.438149Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:56:25.438244Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:56:25.439115Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:56:25.439457Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:56:25.505275Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7732: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-25T14:56:25.505338Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:56:25.505986Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:56:25.519213Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:56:25.519694Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:56:25.519886Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:56:25.527202Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:56:25.527452Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:56:25.528215Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:56:25.528562Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:56:25.531298Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:56:25.531468Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:56:25.532670Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:56:25.532726Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:56:25.532775Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:56:25.532815Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:56:25.532851Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:56:25.533051Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:56:25.539703Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:129:2153] sender: [1:243:2058] recipient: [1:15:2062] 2025-06-25T14:56:25.670447Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:56:25.670701Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:56:25.670915Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:56:25.670962Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:56:25.671254Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:56:25.671374Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:56:25.673879Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:56:25.674064Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:56:25.674279Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:56:25.674332Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:56:25.674376Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:56:25.674430Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:56:25.676481Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:56:25.676542Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:56:25.676588Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:56:25.678627Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:56:25.678690Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:56:25.678753Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:56:25.678806Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:56:25.682552Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:56:25.684731Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:56:25.684891Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:56:25.685551Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:56:25.685654Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 138 RawX2: 4294969455 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:56:25.685696Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:56:25.685952Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:56:25.685990Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:56:25.686125Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:56:25.686183Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:56: ... n RegisterRelationByTabletId, TxId: 102, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 102 at step: 5000003 FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000002 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 102 at step: 5000003 2025-06-25T14:56:26.465844Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000003, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:56:26.465956Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 102 Coordinator: 72057594046316545 AckTo { RawX1: 139 RawX2: 8589936752 } } Step: 5000003 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:56:26.466007Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_external_data_source.cpp:40: [72057594046678944] TDropExternalDataSource TPropose opId# 102:0 HandleReply TEvOperationPlan: step# 5000003 2025-06-25T14:56:26.466105Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-25T14:56:26.466185Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 102:0 128 -> 240 2025-06-25T14:56:26.466343Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:56:26.466399Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-25T14:56:26.467317Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-25T14:56:26.467727Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 FAKE_COORDINATOR: Erasing txId 102 2025-06-25T14:56:26.469388Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:56:26.469430Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:56:26.469559Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-25T14:56:26.469690Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:56:26.469721Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [2:209:2209], at schemeshard: 72057594046678944, txId: 102, path id: 1 2025-06-25T14:56:26.469777Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [2:209:2209], at schemeshard: 72057594046678944, txId: 102, path id: 2 2025-06-25T14:56:26.470155Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-25T14:56:26.470203Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 102:0 ProgressState 2025-06-25T14:56:26.470306Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:0 progress is 1/1 2025-06-25T14:56:26.470341Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-25T14:56:26.470383Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:0 progress is 1/1 2025-06-25T14:56:26.470418Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-25T14:56:26.470453Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: false 2025-06-25T14:56:26.470484Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-25T14:56:26.470511Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 102:0 2025-06-25T14:56:26.470537Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 102:0 2025-06-25T14:56:26.470594Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-25T14:56:26.470624Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 102, publications: 2, subscribers: 0 2025-06-25T14:56:26.470651Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 102, [OwnerId: 72057594046678944, LocalPathId: 1], 7 2025-06-25T14:56:26.470697Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 102, [OwnerId: 72057594046678944, LocalPathId: 2], 18446744073709551615 2025-06-25T14:56:26.471048Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T14:56:26.471134Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T14:56:26.471163Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 102 2025-06-25T14:56:26.471200Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 18446744073709551615 2025-06-25T14:56:26.471242Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-25T14:56:26.471503Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-25T14:56:26.471539Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-25T14:56:26.471596Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-25T14:56:26.471765Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T14:56:26.471809Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T14:56:26.471829Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 102 2025-06-25T14:56:26.471848Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 7 2025-06-25T14:56:26.471869Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:56:26.471925Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 102, subscribers: 0 2025-06-25T14:56:26.474846Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-25T14:56:26.474921Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-25T14:56:26.474968Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 102 2025-06-25T14:56:26.475128Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-06-25T14:56:26.475158Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 2025-06-25T14:56:26.475544Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-06-25T14:56:26.475617Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-25T14:56:26.475646Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [2:334:2323] TestWaitNotification: OK eventTxId 102 2025-06-25T14:56:26.476078Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/MyExternalDataSource" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:56:26.476266Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/MyExternalDataSource" took 230us result status StatusPathDoesNotExist 2025-06-25T14:56:26.476485Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/MyExternalDataSource\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/MyExternalDataSource" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 |88.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/kesus/proxy/ut/unittest |88.6%| [TA] $(B)/ydb/core/tx/schemeshard/ut_external_data_source/test-results/unittest/{meta.json ... results_accumulator.log} |88.6%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_external_data_source/test-results/unittest/{meta.json ... results_accumulator.log} |88.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/kesus/proxy/ut/unittest >> TProxyActorTest::TestCreateSemaphoreInterrupted [GOOD] >> TProxyActorTest::TestAttachSession [GOOD] >> TProxyActorTest::TestCreateSemaphore [GOOD] |88.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/kesus/proxy/ut/unittest |88.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/kesus/proxy/ut/unittest |88.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/kesus/proxy/ut/unittest >> TProxyActorTest::TestCreateSemaphoreInterrupted [GOOD] >> TestYmqHttpProxy::TestGetQueueUrlWithIAM [GOOD] >> TestYmqHttpProxy::TestCreateQueueWithEmptyName [GOOD] |88.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/kesus/proxy/ut/unittest >> TProxyActorTest::TestAttachSession [GOOD] |88.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/kesus/proxy/ut/unittest >> TProxyActorTest::TestCreateSemaphore [GOOD] >> TProxyActorTest::TestDisconnectWhileAttaching [GOOD] >> TPersQueueTest::TestReadPartitionByGroupId [GOOD] >> TPersQueueTest::SrcIdCompatibility >> TestYmqHttpProxy::TestCreateQueueWithWrongBody [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kesus/proxy/ut/unittest >> TProxyActorTest::TestDisconnectWhileAttaching [GOOD] Test command err: ... waiting for blocked registrations ... blocking NKikimr::NKesus::TEvKesus::TEvRegisterProxy from KESUS_PROXY_ACTOR to KESUS_TABLET_ACTOR cookie 0 ... waiting for blocked registrations (done) 2025-06-25T14:56:27.320388Z node 1 :PIPE_SERVER ERROR: tablet_pipe_server.cpp:228: [72057594037927937] NodeDisconnected NodeId# 2 ... unblocking NKikimr::NKesus::TEvKesus::TEvRegisterProxy from KESUS_PROXY_ACTOR to KESUS_TABLET_ACTOR >> TestYmqHttpProxy::TestGetQueueAttributes >> TestYmqHttpProxy::TestCreateQueueWithAllAttributes >> TestKinesisHttpProxy::DoubleCreateStream [GOOD] |88.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/kesus/proxy/ut/unittest >> TestYmqHttpProxy::TestCreateQueueWithWrongAttribute >> KqpQueryService::TableSink_Htap-withOltpSink [GOOD] >> KqpQueryService::TableSink_DisableSink |88.7%| [TA] $(B)/ydb/core/kesus/proxy/ut/test-results/unittest/{meta.json ... results_accumulator.log} |88.7%| [TA] {RESULT} $(B)/ydb/core/kesus/proxy/ut/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/library/ycloud/impl/ut/unittest >> TServiceAccountServiceTest::Get [GOOD] Test command err: 2025-06-25T14:56:22.760951Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900968023382405:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:56:22.761011Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00168e/r3tmp/tmph6khzF/pdisk_1.dat 2025-06-25T14:56:23.112733Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:56:23.173020Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:56:23.173171Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:56:23.175171Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:26007 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:56:23.399172Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:56:23.420043Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:56:25.842550Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519900980179306145:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:56:25.842671Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00168e/r3tmp/tmpZDmlwT/pdisk_1.dat 2025-06-25T14:56:25.946469Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:56:25.947411Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519900980179306126:2080] 1750863385841408 != 1750863385841411 2025-06-25T14:56:25.986198Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:56:25.986307Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:56:25.987876Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:19344 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:56:26.150282Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... >> TestYmqHttpProxy::TestReceiveMessageWithAttributes [GOOD] >> TestKinesisHttpProxy::GoodRequestGetRecords |88.7%| [TA] $(B)/ydb/library/ycloud/impl/ut/test-results/unittest/{meta.json ... results_accumulator.log} |88.7%| [TA] {RESULT} $(B)/ydb/library/ycloud/impl/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TReplicaTest::Handshake >> TReplicaTest::Update >> TReplicaTest::Subscribe >> TReplicaTest::Unsubscribe >> TReplicaTest::UpdateWithoutHandshake >> TestYmqHttpProxy::TestReceiveMessageWithAttemptId >> TReplicaTest::Update [GOOD] >> TReplicaTest::UnsubscribeWithoutSubscribe >> TReplicaTest::Subscribe [GOOD] >> TReplicaTest::SubscribeUnknownPath >> TReplicaTest::Unsubscribe [GOOD] >> TReplicaTest::UnsubscribeUnknownPath >> TReplicaTest::UpdateWithoutHandshake [GOOD] >> TReplicaTest::UpdateWithStaleGeneration >> TReplicaTest::Handshake [GOOD] >> TReplicaTest::DoubleUnsubscribe |88.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/runtime/ut/unittest >> TComputeScheduler::QueryLimits [GOOD] >> TComputeScheduler::TTotalLimits [GOOD] >> TReplicaTest::DoubleUnsubscribe [GOOD] >> TReplicaTest::DoubleDelete >> TKqpScanData::FailOnUnsupportedPgType >> TReplicaTest::UnsubscribeWithoutSubscribe [GOOD] >> TReplicaTest::SubscribeUnknownPath [GOOD] >> TReplicaTest::SyncVersion >> TReplicaTest::UnsubscribeUnknownPath [GOOD] >> KqpNamedExpressions::NamedExpressionRandomUpsertRevert+UseSink-UseDataQuery [GOOD] >> KqpNamedExpressions::NamedExpressionRandomUpsertRevert-UseSink+UseDataQuery >> TReplicaTest::UpdateWithStaleGeneration [GOOD] >> TKqpScanData::FailOnUnsupportedPgType [GOOD] >> TReplicaTest::DoubleDelete [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/runtime/ut/unittest >> TComputeScheduler::TTotalLimits [GOOD] Test command err: 1610 1600 1610 1600 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/runtime/ut/unittest >> TComputeScheduler::QueryLimits [GOOD] Test command err: 800 800 800 800 >> TReplicaTest::SyncVersion [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_replica/unittest >> TReplicaTest::UnsubscribeWithoutSubscribe [GOOD] Test command err: 2025-06-25T14:56:30.145584Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [1:7:2054] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 1 }: sender# [1:8:2055] 2025-06-25T14:56:30.145665Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [1:7:2054] Successful handshake: owner# 1, generation# 1 2025-06-25T14:56:30.146510Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [1:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [1:8:2055], cookie# 0, event size# 72 2025-06-25T14:56:30.146576Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [1:7:2054] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], deletion# false 2025-06-25T14:56:30.154139Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:550: [1:7:2054] Upsert description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], pathDescription# {Status StatusSuccess, Path path, PathId [OwnerId: 1, LocalPathId: 1], PathVersion 1, SubdomainPathId , PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 30} 2025-06-25T14:56:30.154328Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7:2054] Handle NKikimrSchemeBoard.TEvSubscribe { Path: path DomainOwnerId: 0 }: sender# [1:8:2055] 2025-06-25T14:56:30.154408Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7:2054] Subscribe: subscriber# [1:8:2055], path# path, domainOwnerId# 0, capabilities# 2025-06-25T14:56:30.154523Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1075: [1:7:2054] Handle NKikimrSchemeBoard.TEvUnsubscribe { Path: path }: sender# [1:8:2055] 2025-06-25T14:56:30.154570Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:662: [1:7:2054] Unsubscribe: subscriber# [1:8:2055], path# path 2025-06-25T14:56:30.154653Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7:2054] Handle NKikimrSchemeBoard.TEvSubscribe { PathId: [OwnerId: 1, LocalPathId: 1] DomainOwnerId: 0 }: sender# [1:8:2055] 2025-06-25T14:56:30.154718Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7:2054] Subscribe: subscriber# [1:8:2055], path# [OwnerId: 1, LocalPathId: 1], domainOwnerId# 0, capabilities# 2025-06-25T14:56:30.154820Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1075: [1:7:2054] Handle NKikimrSchemeBoard.TEvUnsubscribe { PathId: [OwnerId: 1, LocalPathId: 1] }: sender# [1:8:2055] 2025-06-25T14:56:30.154859Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:662: [1:7:2054] Unsubscribe: subscriber# [1:8:2055], path# [OwnerId: 1, LocalPathId: 1] 2025-06-25T14:56:30.412507Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [2:7:2054] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 1 }: sender# [2:8:2055] 2025-06-25T14:56:30.412565Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [2:7:2054] Successful handshake: owner# 1, generation# 1 2025-06-25T14:56:30.412673Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [2:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [2:8:2055], cookie# 0, event size# 72 2025-06-25T14:56:30.412714Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [2:7:2054] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], deletion# false 2025-06-25T14:56:30.412764Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:550: [2:7:2054] Upsert description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], pathDescription# {Status StatusSuccess, Path path, PathId [OwnerId: 1, LocalPathId: 1], PathVersion 1, SubdomainPathId , PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 30} 2025-06-25T14:56:30.412817Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1075: [2:7:2054] Handle NKikimrSchemeBoard.TEvUnsubscribe { Path: path }: sender# [2:8:2055] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_replica/unittest >> TReplicaTest::UnsubscribeUnknownPath [GOOD] Test command err: 2025-06-25T14:56:30.146742Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [1:7:2054] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 1 }: sender# [1:8:2055] 2025-06-25T14:56:30.146813Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [1:7:2054] Successful handshake: owner# 1, generation# 1 2025-06-25T14:56:30.146922Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7:2054] Handle NKikimrSchemeBoard.TEvSubscribe { Path: path DomainOwnerId: 0 }: sender# [1:9:2056] 2025-06-25T14:56:30.147046Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [1:7:2054] Upsert description: path# path 2025-06-25T14:56:30.150737Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7:2054] Subscribe: subscriber# [1:9:2056], path# path, domainOwnerId# 0, capabilities# 2025-06-25T14:56:30.150865Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7:2054] Handle NKikimrSchemeBoard.TEvSubscribe { Path: path DomainOwnerId: 0 }: sender# [1:10:2057] 2025-06-25T14:56:30.150904Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7:2054] Subscribe: subscriber# [1:10:2057], path# path, domainOwnerId# 0, capabilities# 2025-06-25T14:56:30.151029Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [1:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [1:8:2055], cookie# 0, event size# 72 2025-06-25T14:56:30.151093Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [1:7:2054] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], deletion# false 2025-06-25T14:56:30.155512Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:550: [1:7:2054] Upsert description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], pathDescription# {Status StatusSuccess, Path path, PathId [OwnerId: 1, LocalPathId: 1], PathVersion 1, SubdomainPathId , PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 30} 2025-06-25T14:56:30.157258Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1075: [1:7:2054] Handle NKikimrSchemeBoard.TEvUnsubscribe { Path: path }: sender# [1:9:2056] 2025-06-25T14:56:30.157305Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:662: [1:7:2054] Unsubscribe: subscriber# [1:9:2056], path# path 2025-06-25T14:56:30.157367Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [1:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [1:8:2055], cookie# 0, event size# 40 2025-06-25T14:56:30.157400Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [1:7:2054] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], deletion# true 2025-06-25T14:56:30.157428Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:575: [1:7:2054] Delete description: path# path, pathId# [OwnerId: 1, LocalPathId: 1] 2025-06-25T14:56:30.408801Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1075: [2:7:2054] Handle NKikimrSchemeBoard.TEvUnsubscribe { Path: path }: sender# [2:8:2055] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_replica/unittest >> TReplicaTest::UpdateWithStaleGeneration [GOOD] Test command err: 2025-06-25T14:56:30.148068Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [1:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [1:8:2055], cookie# 0, event size# 72 2025-06-25T14:56:30.148151Z node 1 :SCHEME_BOARD_REPLICA ERROR: replica.cpp:797: [1:7:2054] Reject update from unknown populator: sender# [1:8:2055], owner# 1, generation# 1 2025-06-25T14:56:30.148249Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7:2054] Handle NKikimrSchemeBoard.TEvSubscribe { Path: path DomainOwnerId: 0 }: sender# [1:8:2055] 2025-06-25T14:56:30.148285Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [1:7:2054] Upsert description: path# path 2025-06-25T14:56:30.150731Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7:2054] Subscribe: subscriber# [1:8:2055], path# path, domainOwnerId# 0, capabilities# 2025-06-25T14:56:30.150872Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1075: [1:7:2054] Handle NKikimrSchemeBoard.TEvUnsubscribe { Path: path }: sender# [1:8:2055] 2025-06-25T14:56:30.152195Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:662: [1:7:2054] Unsubscribe: subscriber# [1:8:2055], path# path 2025-06-25T14:56:30.152260Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7:2054] Handle NKikimrSchemeBoard.TEvSubscribe { PathId: [OwnerId: 1, LocalPathId: 1] DomainOwnerId: 0 }: sender# [1:8:2055] 2025-06-25T14:56:30.152290Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [1:7:2054] Upsert description: path# [OwnerId: 1, LocalPathId: 1] 2025-06-25T14:56:30.152401Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7:2054] Subscribe: subscriber# [1:8:2055], path# [OwnerId: 1, LocalPathId: 1], domainOwnerId# 0, capabilities# 2025-06-25T14:56:30.152494Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1075: [1:7:2054] Handle NKikimrSchemeBoard.TEvUnsubscribe { PathId: [OwnerId: 1, LocalPathId: 1] }: sender# [1:8:2055] 2025-06-25T14:56:30.152535Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:662: [1:7:2054] Unsubscribe: subscriber# [1:8:2055], path# [OwnerId: 1, LocalPathId: 1] 2025-06-25T14:56:30.412522Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [2:7:2054] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 1 }: sender# [2:8:2055] 2025-06-25T14:56:30.412629Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [2:7:2054] Successful handshake: owner# 1, generation# 1 2025-06-25T14:56:30.412751Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [2:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 0 }: sender# [2:8:2055], cookie# 0, event size# 72 2025-06-25T14:56:30.412814Z node 2 :SCHEME_BOARD_REPLICA ERROR: replica.cpp:805: [2:7:2054] Reject update from stale populator: sender# [2:8:2055], owner# 1, generation# 0, pending generation# 1 2025-06-25T14:56:30.412890Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [2:7:2054] Handle NKikimrSchemeBoard.TEvSubscribe { Path: path DomainOwnerId: 0 }: sender# [2:8:2055] 2025-06-25T14:56:30.412924Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [2:7:2054] Upsert description: path# path 2025-06-25T14:56:30.412990Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [2:7:2054] Subscribe: subscriber# [2:8:2055], path# path, domainOwnerId# 0, capabilities# 2025-06-25T14:56:30.413079Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1075: [2:7:2054] Handle NKikimrSchemeBoard.TEvUnsubscribe { Path: path }: sender# [2:8:2055] 2025-06-25T14:56:30.413121Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:662: [2:7:2054] Unsubscribe: subscriber# [2:8:2055], path# path 2025-06-25T14:56:30.413181Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [2:7:2054] Handle NKikimrSchemeBoard.TEvSubscribe { PathId: [OwnerId: 1, LocalPathId: 1] DomainOwnerId: 0 }: sender# [2:8:2055] 2025-06-25T14:56:30.413217Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [2:7:2054] Upsert description: path# [OwnerId: 1, LocalPathId: 1] 2025-06-25T14:56:30.413263Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [2:7:2054] Subscribe: subscriber# [2:8:2055], path# [OwnerId: 1, LocalPathId: 1], domainOwnerId# 0, capabilities# 2025-06-25T14:56:30.413342Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1075: [2:7:2054] Handle NKikimrSchemeBoard.TEvUnsubscribe { PathId: [OwnerId: 1, LocalPathId: 1] }: sender# [2:8:2055] 2025-06-25T14:56:30.413394Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:662: [2:7:2054] Unsubscribe: subscriber# [2:8:2055], path# [OwnerId: 1, LocalPathId: 1] |88.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/runtime/ut/unittest >> TKqpScanData::FailOnUnsupportedPgType [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_replica/unittest >> TReplicaTest::DoubleDelete [GOOD] Test command err: 2025-06-25T14:56:30.145625Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [1:7:2054] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 1 }: sender# [1:8:2055] 2025-06-25T14:56:30.145696Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [1:7:2054] Successful handshake: owner# 1, generation# 1 2025-06-25T14:56:30.421599Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [2:7:2054] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 1 }: sender# [2:8:2055] 2025-06-25T14:56:30.421666Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [2:7:2054] Successful handshake: owner# 1, generation# 1 2025-06-25T14:56:30.421810Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [2:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [2:8:2055], cookie# 0, event size# 72 2025-06-25T14:56:30.421868Z node 2 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [2:7:2054] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], deletion# false 2025-06-25T14:56:30.428438Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:550: [2:7:2054] Upsert description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], pathDescription# {Status StatusSuccess, Path path, PathId [OwnerId: 1, LocalPathId: 1], PathVersion 1, SubdomainPathId , PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 30} 2025-06-25T14:56:30.428612Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [2:7:2054] Handle NKikimrSchemeBoard.TEvSubscribe { Path: path DomainOwnerId: 0 }: sender# [2:8:2055] 2025-06-25T14:56:30.428698Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [2:7:2054] Subscribe: subscriber# [2:8:2055], path# path, domainOwnerId# 0, capabilities# 2025-06-25T14:56:30.428817Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1075: [2:7:2054] Handle NKikimrSchemeBoard.TEvUnsubscribe { Path: path }: sender# [2:8:2055] 2025-06-25T14:56:30.428871Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:662: [2:7:2054] Unsubscribe: subscriber# [2:8:2055], path# path 2025-06-25T14:56:30.428927Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1075: [2:7:2054] Handle NKikimrSchemeBoard.TEvUnsubscribe { Path: path }: sender# [2:8:2055] 2025-06-25T14:56:30.685991Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [3:7:2054] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 1 }: sender# [3:8:2055] 2025-06-25T14:56:30.686050Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [3:7:2054] Successful handshake: owner# 1, generation# 1 2025-06-25T14:56:30.686131Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [3:7:2054] Handle NKikimrSchemeBoard.TEvSubscribe { Path: path DomainOwnerId: 0 }: sender# [3:9:2056] 2025-06-25T14:56:30.686159Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [3:7:2054] Upsert description: path# path 2025-06-25T14:56:30.686222Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [3:7:2054] Subscribe: subscriber# [3:9:2056], path# path, domainOwnerId# 0, capabilities# 2025-06-25T14:56:30.686345Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [3:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [3:8:2055], cookie# 0, event size# 72 2025-06-25T14:56:30.686374Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [3:7:2054] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], deletion# false 2025-06-25T14:56:30.686428Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:550: [3:7:2054] Upsert description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], pathDescription# {Status StatusSuccess, Path path, PathId [OwnerId: 1, LocalPathId: 1], PathVersion 1, SubdomainPathId , PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 30} 2025-06-25T14:56:30.686553Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [3:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [3:8:2055], cookie# 0, event size# 40 2025-06-25T14:56:30.686586Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [3:7:2054] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], deletion# true 2025-06-25T14:56:30.686618Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:575: [3:7:2054] Delete description: path# path, pathId# [OwnerId: 1, LocalPathId: 1] 2025-06-25T14:56:30.686766Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [3:7:2054] Handle NKikimrSchemeBoard.TEvSubscribe { Path: path DomainOwnerId: 0 }: sender# [3:10:2057] 2025-06-25T14:56:30.686805Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [3:7:2054] Subscribe: subscriber# [3:10:2057], path# path, domainOwnerId# 0, capabilities# 2025-06-25T14:56:30.686882Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [3:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [3:8:2055], cookie# 0, event size# 40 2025-06-25T14:56:30.686908Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [3:7:2054] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], deletion# true ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_replica/unittest >> TReplicaTest::SyncVersion [GOOD] Test command err: 2025-06-25T14:56:30.149739Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [1:7:2054] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 1 }: sender# [1:8:2055] 2025-06-25T14:56:30.149814Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [1:7:2054] Successful handshake: owner# 1, generation# 1 2025-06-25T14:56:30.149963Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [1:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [1:8:2055], cookie# 0, event size# 72 2025-06-25T14:56:30.150018Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [1:7:2054] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], deletion# false 2025-06-25T14:56:30.155567Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:550: [1:7:2054] Upsert description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], pathDescription# {Status StatusSuccess, Path path, PathId [OwnerId: 1, LocalPathId: 1], PathVersion 1, SubdomainPathId , PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 30} 2025-06-25T14:56:30.155719Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7:2054] Handle NKikimrSchemeBoard.TEvSubscribe { Path: path DomainOwnerId: 0 }: sender# [1:8:2055] 2025-06-25T14:56:30.155796Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7:2054] Subscribe: subscriber# [1:8:2055], path# path, domainOwnerId# 0, capabilities# 2025-06-25T14:56:30.155888Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [1:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [1:8:2055], cookie# 0, event size# 40 2025-06-25T14:56:30.155919Z node 1 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [1:7:2054] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], deletion# true 2025-06-25T14:56:30.155946Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:575: [1:7:2054] Delete description: path# path, pathId# [OwnerId: 1, LocalPathId: 1] 2025-06-25T14:56:30.410021Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [2:7:2054] Handle NKikimrSchemeBoard.TEvSubscribe { Path: path DomainOwnerId: 0 }: sender# [2:8:2055] 2025-06-25T14:56:30.410081Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [2:7:2054] Upsert description: path# path 2025-06-25T14:56:30.410140Z node 2 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [2:7:2054] Subscribe: subscriber# [2:8:2055], path# path, domainOwnerId# 0, capabilities# 2025-06-25T14:56:30.671915Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:751: [3:7:2054] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 1 Generation: 1 }: sender# [3:8:2055] 2025-06-25T14:56:30.671967Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:769: [3:7:2054] Successful handshake: owner# 1, generation# 1 2025-06-25T14:56:30.672142Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:782: [3:7:2054] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 1 Generation: 1 }: sender# [3:8:2055], cookie# 0, event size# 76 2025-06-25T14:56:30.672174Z node 3 :SCHEME_BOARD_REPLICA NOTICE: replica.cpp:822: [3:7:2054] Update description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], deletion# false 2025-06-25T14:56:30.672222Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:550: [3:7:2054] Upsert description: path# path, pathId# [OwnerId: 1, LocalPathId: 1], pathDescription# {Status StatusSuccess, Path path, PathId [OwnerId: 1, LocalPathId: 1], PathVersion 100500, SubdomainPathId , PathAbandonedTenantsSchemeShards size 0, DescribeSchemeResultSerialized size 32} 2025-06-25T14:56:30.672289Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [3:7:2054] Handle NKikimrSchemeBoard.TEvSubscribe { Path: path DomainOwnerId: 0 }: sender# [3:8:2055] 2025-06-25T14:56:30.672353Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [3:7:2054] Subscribe: subscriber# [3:8:2055], path# path, domainOwnerId# 0, capabilities# 2025-06-25T14:56:30.672432Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [3:7:2054] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: path }: sender# [3:8:2055], cookie# 1 |88.7%| [TA] $(B)/ydb/core/kqp/runtime/ut/test-results/unittest/{meta.json ... results_accumulator.log} |88.7%| [TA] {RESULT} $(B)/ydb/core/kqp/runtime/ut/test-results/unittest/{meta.json ... results_accumulator.log} |88.7%| [TA] $(B)/ydb/core/tx/scheme_board/ut_replica/test-results/unittest/{meta.json ... results_accumulator.log} |88.7%| [TA] {RESULT} $(B)/ydb/core/tx/scheme_board/ut_replica/test-results/unittest/{meta.json ... results_accumulator.log} >> TBlobStorageWardenTest::TestReceivedPDiskRestartNotAllowed >> TBlobStorageWardenTest::TestReceivedPDiskRestartNotAllowed [GOOD] |88.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_column_build/unittest |88.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_column_build/unittest |88.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_column_build/unittest >> ColumnBuildTest::BaseCase >> ColumnBuildTest::AlreadyExists |88.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/nodewarden/ut/unittest >> TBlobStorageWardenTest::TestReceivedPDiskRestartNotAllowed [GOOD] >> ColumnBuildTest::BuildColumnDoesnotRestoreDeletedRows >> ColumnBuildTest::ValidDefaultValue |88.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/control/ut/unittest |88.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/control/ut/unittest >> YdbTableSplit::SplitByLoadWithReads >> YdbTableSplit::SplitByLoadWithReadsMultipleSplitsWithData |88.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/control/ut/unittest |88.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/control/ut/unittest |88.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/control/ut/unittest >> YdbTableSplit::RenameTablesAndSplit >> IcbAsActorTests::TestHttpGetResponse [GOOD] |88.8%| [TA] $(B)/ydb/core/blobstorage/nodewarden/ut/test-results/unittest/{meta.json ... results_accumulator.log} |88.8%| [TA] {RESULT} $(B)/ydb/core/blobstorage/nodewarden/ut/test-results/unittest/{meta.json ... results_accumulator.log} |88.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/control/ut/unittest >> IcbAsActorTests::TestHttpGetResponse [GOOD] |88.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_column_build/unittest >> IcbAsActorTests::TestHttpPostReaction >> IcbAsActorTests::TestHttpPostReaction [GOOD] |88.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/control/ut/unittest |88.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/control/ut/unittest |88.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/control/ut/unittest >> IcbAsActorTests::TestHttpPostReaction [GOOD] >> TestYmqHttpProxy::TestCreateQueueWithAllAttributes [GOOD] >> TestYmqHttpProxy::TestCreateQueueWithWrongAttribute [GOOD] >> KqpQueryService::TableSink_DisableSink [GOOD] >> BasicUsage::RetryDiscoveryWithCancel [GOOD] >> BasicUsage::RecreateObserver >> TestYmqHttpProxy::TestGetQueueAttributes [GOOD] |88.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/control/ut/unittest >> TestYmqHttpProxy::BillingRecordsForJsonApi >> TestYmqHttpProxy::TestCreateQueueWithTags >> TestYmqHttpProxy::TestDeleteQueue |88.8%| [TA] $(B)/ydb/core/control/ut/test-results/unittest/{meta.json ... results_accumulator.log} |88.8%| [TA] {RESULT} $(B)/ydb/core/control/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TestYmqHttpProxy::TestReceiveMessageWithAttemptId [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/service/unittest >> KqpQueryService::TableSink_DisableSink [GOOD] Test command err: Trying to start YDB, gRPC: 29527, MsgBus: 2772 2025-06-25T14:56:02.374220Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900881823483909:2213];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:56:02.374390Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00178e/r3tmp/tmp3i4ZUa/pdisk_1.dat 2025-06-25T14:56:02.727477Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:56:02.727672Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:56:02.758060Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519900881823483734:2080] 1750863362350060 != 1750863362350063 2025-06-25T14:56:02.762048Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:56:02.766580Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 29527, node 1 2025-06-25T14:56:02.830889Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:56:02.830912Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:56:02.830920Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:56:02.831048Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2772 TClient is connected to server localhost:2772 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:56:03.361472Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:56:03.380416Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:56:03.380805Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:56:05.213970Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900894708386258:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:05.214075Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:05.516612Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T14:56:05.701013Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519900894708386364:2297];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:56:05.701013Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519900894708386416:2305];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:56:05.701250Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519900894708386416:2305];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:56:05.701526Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519900894708386416:2305];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:56:05.701650Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519900894708386416:2305];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:56:05.701759Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519900894708386416:2305];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:56:05.701780Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519900894708386364:2297];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:56:05.701871Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519900894708386416:2305];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:56:05.701933Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519900894708386364:2297];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:56:05.701972Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519900894708386416:2305];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:56:05.702044Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519900894708386364:2297];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:56:05.702130Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519900894708386416:2305];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:56:05.702161Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519900894708386364:2297];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:56:05.702232Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519900894708386416:2305];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:56:05.702271Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519900894708386364:2297];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:56:05.702349Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519900894708386364:2297];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:56:05.702372Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519900894708386416:2305];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:56:05.702411Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519900894708386364:2297];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:56:05.702529Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7519900894708386416:2305];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:56:05.702563Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519900894708386364:2297];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:56:05.702673Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519900894708386364:2297];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:56:05.702767Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7519900894708386364:2297];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:56:05.747369Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519900894708386408:2299];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:56:05.747435Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519900894708386408:2299];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:56:05.747669Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7519900894708386408:2299];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:56:05.747795Z node 1 :TX_COLUMNSHARD WARN ... 72075186224037892;tx_id=281474976715658;this=88923011869664;method=TTxController::StartProposeOnExecute;tx_info=281474976715658:TX_KIND_SCHEMA;min=1750863393204;max=18446744073709551615;plan=0;src=[3:7519900996011521738:2163];cookie=52:1;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-25T14:56:33.205846Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;self_id=[3:7519901008896424107:2305];ev=NActors::IEventHandle;tablet_id=72075186224037901;tx_id=281474976715658;this=88923065569408;method=TTxController::StartProposeOnExecute;tx_info=281474976715658:TX_KIND_SCHEMA;min=1750863393205;max=18446744073709551615;plan=0;src=[3:7519900996011521738:2163];cookie=142:1;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-25T14:56:33.206526Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[3:7519901008896424151:2307];ev=NActors::IEventHandle;tablet_id=72075186224037897;tx_id=281474976715658;this=88923011623936;method=TTxController::StartProposeOnExecute;tx_info=281474976715658:TX_KIND_SCHEMA;min=1750863393206;max=18446744073709551615;plan=0;src=[3:7519900996011521738:2163];cookie=102:1;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-25T14:56:33.212485Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-25T14:56:33.212520Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037898;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-25T14:56:33.219134Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037900;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715658; 2025-06-25T14:56:33.219872Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-25T14:56:33.221112Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037898;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715658; 2025-06-25T14:56:33.221778Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-25T14:56:33.225585Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037895;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715658; 2025-06-25T14:56:33.226264Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-25T14:56:33.226828Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037894;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715658; 2025-06-25T14:56:33.227373Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-25T14:56:33.231548Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715658; 2025-06-25T14:56:33.232154Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-25T14:56:33.232380Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037893;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715658; 2025-06-25T14:56:33.232986Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-25T14:56:33.237384Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715658; 2025-06-25T14:56:33.237723Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037901;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715658; 2025-06-25T14:56:33.238035Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-25T14:56:33.238678Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-25T14:56:33.243288Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037891;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715658; 2025-06-25T14:56:33.243540Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037899;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715658; 2025-06-25T14:56:33.243908Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-25T14:56:33.244110Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-25T14:56:33.249652Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037903;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715658; 2025-06-25T14:56:33.249652Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037892;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715658; 2025-06-25T14:56:33.250292Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-25T14:56:33.250311Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-25T14:56:33.255656Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037896;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715658; 2025-06-25T14:56:33.256179Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037902;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-25T14:56:33.257130Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037897;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715658; 2025-06-25T14:56:33.258423Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-25T14:56:33.260663Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037902;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715658; 2025-06-25T14:56:33.261922Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037890;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715658; 2025-06-25T14:56:33.284662Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519901013191391904:2381], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:33.284768Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:33.285121Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519901013191391909:2384], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:33.288710Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:56:33.314056Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519901013191391911:2385], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-25T14:56:33.399754Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519901013191391964:2691] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:56:33.484091Z node 3 :KQP_EXECUTER ERROR: kqp_data_executer.cpp:2029: ActorId: [3:7519901013191391990:2380] TxId: 281474976715661. Ctx: { TraceId: 01jyksecg03b81kx4b303bf8yw, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YjY3ZmM2MzctYzQ0MzkwNWMtOTA3Mzg1ZjktZDllNTE3YzU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Data manipulation queries do not support column shard tables. 2025-06-25T14:56:33.484295Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=3&id=YjY3ZmM2MzctYzQ0MzkwNWMtOTA3Mzg1ZjktZDllNTE3YzU=, ActorId: [3:7519901013191391902:2380], ActorState: ExecuteState, TraceId: 01jyksecg03b81kx4b303bf8yw, Create QueryResponse for error on request, msg: >> KqpNamedExpressions::NamedExpressionRandomUpsertReturning-UseSink+UseDataQuery [GOOD] >> KqpNamedExpressions::NamedExpressionRandomUpsertReturning+UseSink+UseDataQuery >> TestKinesisHttpProxy::GoodRequestGetRecords [GOOD] >> TestYmqHttpProxy::TestListQueues >> ColumnBuildTest::AlreadyExists [GOOD] >> TestKinesisHttpProxy::GoodRequestGetRecordsCbor ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_column_build/unittest >> ColumnBuildTest::AlreadyExists [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:56:33.587034Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:56:33.587166Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:56:33.587211Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:56:33.587246Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:56:33.587286Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:56:33.587312Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:56:33.587369Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:56:33.587455Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:56:33.588165Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:56:33.592643Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:56:33.677179Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:56:33.677254Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:56:33.698473Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:56:33.698916Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:56:33.699108Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:56:33.707416Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:56:33.707848Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:56:33.708609Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:56:33.708963Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:56:33.713441Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:56:33.713699Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:56:33.714973Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:56:33.715049Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:56:33.715215Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:56:33.715266Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:56:33.715317Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:56:33.715406Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:56:33.724554Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:56:33.855576Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:56:33.855783Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:56:33.855985Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:56:33.856030Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:56:33.856232Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:56:33.856300Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:56:33.858990Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:56:33.859169Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:56:33.859336Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:56:33.859400Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:56:33.859454Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:56:33.859509Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:56:33.861778Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:56:33.861857Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:56:33.861911Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:56:33.864131Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:56:33.864189Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:56:33.864265Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:56:33.864362Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:56:33.868186Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:56:33.872474Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:56:33.872712Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:56:33.873899Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:56:33.874061Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:56:33.874124Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:56:33.874488Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:56:33.874551Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:56:33.874732Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:56:33.874814Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:56:33.877213Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:56:33.877281Z node 1 :FLAT_TX_SCHEMESHARD ... LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 3 ProcessingParams { Version: 2 PlanResolution: 50 Coordinators: 72075186233409550 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409551 SchemeShard: 72075186233409549 } DomainKey { SchemeShard: 72057594046678944 PathId: 3 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SharedHive: 72057594037968897 ServerlessComputeResourcesMode: EServerlessComputeResourcesModeShared SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72075186233409549, at schemeshard: 72075186233409549 2025-06-25T14:56:36.381011Z node 1 :BUILD_INDEX NOTICE: schemeshard_build_index__create.cpp:23: TIndexBuilder::TXTYPE_CREATE_INDEX_BUILD: DoExecute TxId: 106 DatabaseName: "/MyRoot/ServerLessDB" Settings { source_path: "/MyRoot/ServerLessDB/Table" max_shards_in_flight: 2 column_build_operation { column { ColumnName: "value" default_from_literal { type { type_id: UINT64 } value { uint64_value: 10 } } } } ScanSettings { MaxBatchRows: 1 } } 2025-06-25T14:56:36.383421Z node 1 :BUILD_INDEX NOTICE: schemeshard_build_index__progress.cpp:1129: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 106 AlterMainTable 2025-06-25T14:56:36.383572Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1130: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 106 AlterMainTable TBuildInfo{ IndexBuildId: 106, Uid: , DomainPathId: [OwnerId: 72075186233409549, LocalPathId: 1], TablePathId: [OwnerId: 72075186233409549, LocalPathId: 2], IndexType: EIndexTypeInvalid, IndexName: , State: AlterMainTable, IsBroken: 0, IsCancellationRequested: 0, Issue: , SubscribersCount: 0, CreateSender: [1:1163:3031], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 0, LockTxStatus: StatusSuccess, LockTxDone: 0, InitiateTxId: 0, InitiateTxStatus: StatusSuccess, InitiateTxDone: 0, SnapshotStepId: 0, ApplyTxId: 0, ApplyTxStatus: StatusSuccess, ApplyTxDone: 0, UnlockTxId: 0, UnlockTxStatus: StatusSuccess, UnlockTxDone: 0, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }} 2025-06-25T14:56:36.383620Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index_tx_base.cpp:180: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: AllocateTxId 106 2025-06-25T14:56:36.383771Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 106, at schemeshard: 72075186233409549 2025-06-25T14:56:36.383848Z node 1 :BUILD_INDEX INFO: schemeshard_build_index__progress.cpp:2216: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvAllocateResult, id# 106, txId# 281474976725757 2025-06-25T14:56:36.383943Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:2219: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvAllocateResult, TIndexBuildInfo: TBuildInfo{ IndexBuildId: 106, Uid: , DomainPathId: [OwnerId: 72075186233409549, LocalPathId: 1], TablePathId: [OwnerId: 72075186233409549, LocalPathId: 2], IndexType: EIndexTypeInvalid, IndexName: , State: AlterMainTable, IsBroken: 0, IsCancellationRequested: 0, Issue: , SubscribersCount: 0, CreateSender: [1:1163:3031], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 0, LockTxStatus: StatusSuccess, LockTxDone: 0, InitiateTxId: 0, InitiateTxStatus: StatusSuccess, InitiateTxDone: 0, SnapshotStepId: 0, ApplyTxId: 0, ApplyTxStatus: StatusSuccess, ApplyTxDone: 0, UnlockTxId: 0, UnlockTxStatus: StatusSuccess, UnlockTxDone: 0, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }}, txId# 281474976725757 2025-06-25T14:56:36.385501Z node 1 :BUILD_INDEX NOTICE: schemeshard_build_index__progress.cpp:1129: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 106 AlterMainTable 2025-06-25T14:56:36.385573Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1130: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 106 AlterMainTable TBuildInfo{ IndexBuildId: 106, Uid: , DomainPathId: [OwnerId: 72075186233409549, LocalPathId: 1], TablePathId: [OwnerId: 72075186233409549, LocalPathId: 2], IndexType: EIndexTypeInvalid, IndexName: , State: AlterMainTable, IsBroken: 0, IsCancellationRequested: 0, Issue: , SubscribersCount: 0, CreateSender: [1:1163:3031], AlterMainTableTxId: 281474976725757, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 0, LockTxStatus: StatusSuccess, LockTxDone: 0, InitiateTxId: 0, InitiateTxStatus: StatusSuccess, InitiateTxDone: 0, SnapshotStepId: 0, ApplyTxId: 0, ApplyTxStatus: StatusSuccess, ApplyTxDone: 0, UnlockTxId: 0, UnlockTxStatus: StatusSuccess, UnlockTxDone: 0, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }} 2025-06-25T14:56:36.385797Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:428: AlterMainTablePropose 106 AlterMainTable Transaction { WorkingDir: "/MyRoot/ServerLessDB" OperationType: ESchemeOpAlterTable AlterTable { Name: "Table" Columns { Name: "value" Type: "Uint64" DefaultFromLiteral { type { type_id: UINT64 } value { uint64_value: 10 } } IsBuildInProgress: true } } Internal: true } TxId: 281474976725757 TabletId: 72075186233409549 FailOnExist: true 2025-06-25T14:56:36.388441Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/ServerLessDB" OperationType: ESchemeOpAlterTable AlterTable { Name: "Table" Columns { Name: "value" Type: "Uint64" DefaultFromLiteral { type { type_id: UINT64 } value { uint64_value: 10 } } IsBuildInProgress: true } } Internal: true } TxId: 281474976725757 TabletId: 72075186233409549 FailOnExist: true , at schemeshard: 72075186233409549 2025-06-25T14:56:36.388692Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_table.cpp:506: TAlterTable Propose, path: /MyRoot/ServerLessDB/Table, pathId: , opId: 281474976725757:0, at schemeshard: 72075186233409549 2025-06-25T14:56:36.389071Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 281474976725757:1, propose status:StatusInvalidParameter, reason: Cannot alter type for column 'value', at schemeshard: 72075186233409549 2025-06-25T14:56:36.392489Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 281474976725757, response: Status: StatusInvalidParameter Reason: "Cannot alter type for column \'value\'" TxId: 281474976725757 SchemeshardId: 72075186233409549, at schemeshard: 72075186233409549 2025-06-25T14:56:36.392750Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976725757, database: /MyRoot/ServerLessDB, subject: , status: StatusInvalidParameter, reason: Cannot alter type for column 'value', operation: ALTER TABLE, path: /MyRoot/ServerLessDB/Table 2025-06-25T14:56:36.392937Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6781: Handle: TEvModifySchemeTransactionResult: txId# 281474976725757, status# StatusInvalidParameter 2025-06-25T14:56:36.393026Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6783: Message: Status: StatusInvalidParameter Reason: "Cannot alter type for column \'value\'" TxId: 281474976725757 SchemeshardId: 72075186233409549 2025-06-25T14:56:36.393230Z node 1 :BUILD_INDEX INFO: schemeshard_build_index__progress.cpp:2053: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvModifySchemeTransactionResult, id# 106, cookie: 106, record: Status: StatusInvalidParameter Reason: "Cannot alter type for column \'value\'" TxId: 281474976725757 SchemeshardId: 72075186233409549, status: StatusInvalidParameter 2025-06-25T14:56:36.393373Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:2058: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvModifySchemeTransactionResult, TIndexBuildInfo: TBuildInfo{ IndexBuildId: 106, Uid: , DomainPathId: [OwnerId: 72075186233409549, LocalPathId: 1], TablePathId: [OwnerId: 72075186233409549, LocalPathId: 2], IndexType: EIndexTypeInvalid, IndexName: , State: AlterMainTable, IsBroken: 0, IsCancellationRequested: 0, Issue: , SubscribersCount: 0, CreateSender: [1:1163:3031], AlterMainTableTxId: 281474976725757, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 0, LockTxStatus: StatusSuccess, LockTxDone: 0, InitiateTxId: 0, InitiateTxStatus: StatusSuccess, InitiateTxDone: 0, SnapshotStepId: 0, ApplyTxId: 0, ApplyTxStatus: StatusSuccess, ApplyTxDone: 0, UnlockTxId: 0, UnlockTxStatus: StatusSuccess, UnlockTxDone: 0, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }}, cookie: 106, record: Status: StatusInvalidParameter Reason: "Cannot alter type for column \'value\'" TxId: 281474976725757 SchemeshardId: 72075186233409549, status: StatusInvalidParameter 2025-06-25T14:56:36.394460Z node 1 :BUILD_INDEX NOTICE: schemeshard_build_index__progress.cpp:2027: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TIndexBuilder::TTxReply: ReplyOnCreation, BuildIndexId: 106, status: BAD_REQUEST, error: At AlterMainTable state got unsuccess propose result, status: StatusInvalidParameter, reason: Cannot alter type for column 'value', replyTo: [1:1163:3031], message: TxId: 106 Status: BAD_REQUEST Issues { message: "At AlterMainTable state got unsuccess propose result, status: StatusInvalidParameter, reason: Cannot alter type for column \'value\'" severity: 1 } IndexBuild { Id: 106 Issues { message: "At AlterMainTable state got unsuccess propose result, status: StatusInvalidParameter, reason: Cannot alter type for column \'value\'" severity: 1 } State: STATE_PREPARING Settings { source_path: "/MyRoot/ServerLessDB/Table" max_shards_in_flight: 2 column_build_operation { column { ColumnName: "value" default_from_literal { type { type_id: UINT64 } value { uint64_value: 10 } } } } ScanSettings { MaxBatchRows: 1 } } Progress: 0 StartTime { } } BUILDCOLUMN RESPONSE CREATE: NKikimrIndexBuilder.TEvCreateResponse TxId: 106 Status: BAD_REQUEST Issues { message: "At AlterMainTable state got unsuccess propose result, status: StatusInvalidParameter, reason: Cannot alter type for column \'value\'" severity: 1 } IndexBuild { Id: 106 Issues { message: "At AlterMainTable state got unsuccess propose result, status: StatusInvalidParameter, reason: Cannot alter type for column \'value\'" severity: 1 } State: STATE_PREPARING Settings { source_path: "/MyRoot/ServerLessDB/Table" max_shards_in_flight: 2 column_build_operation { column { ColumnName: "value" default_from_literal { type { type_id: UINT64 } value { uint64_value: 10 } } } } ScanSettings { MaxBatchRows: 1 } } Progress: 0 StartTime { } } |88.8%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest |88.8%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest |88.8%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest >> ColumnBuildTest::BaseCase [GOOD] |88.9%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest |88.9%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest |88.9%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest |88.9%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest |88.9%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest |88.9%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest >> ColumnBuildTest::ValidDefaultValue [GOOD] |88.9%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest |88.9%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest |88.9%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/services/persqueue_v1/ut/unittest >> TPersQueueTest::DisableDeduplication [GOOD] Test command err: === Server->StartServer(false); 2025-06-25T14:51:25.776133Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899691360581165:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:51:25.776193Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000b63/r3tmp/tmpM8Hgff/pdisk_1.dat 2025-06-25T14:51:25.951952Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-25T14:51:26.139053Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:51:26.140454Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519899691360581142:2080] 1750863085774653 != 1750863085774656 2025-06-25T14:51:26.159596Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:51:26.159706Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 9291, node 1 2025-06-25T14:51:26.161645Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:51:26.203706Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/yft8/000b63/r3tmp/yandexxdhToB.tmp 2025-06-25T14:51:26.203732Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/yft8/000b63/r3tmp/yandexxdhToB.tmp 2025-06-25T14:51:26.203898Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/yft8/000b63/r3tmp/yandexxdhToB.tmp 2025-06-25T14:51:26.204035Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:51:26.247642Z INFO: TTestServer started on Port 2387 GrpcPort 9291 TClient is connected to server localhost:2387 PQClient connected to localhost:9291 === TenantModeEnabled() = 0 === Init PQ - start server on port 9291 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:51:26.632219Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "Root" StoragePools { Name: "/Root:test" Kind: "test" } } } TxId: 281474976710657 TabletId: 72057594046644480 PeerName: "" , at schemeshard: 72057594046644480 2025-06-25T14:51:26.632545Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //Root, opId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-25T14:51:26.632712Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 0 2025-06-25T14:51:26.632741Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 281474976710657:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046644480, LocalPathId: 1] source path: 2025-06-25T14:51:26.632951Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 281474976710657:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-25T14:51:26.632996Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:51:26.633495Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 281474976710657, response: Status: StatusAccepted TxId: 281474976710657 SchemeshardId: 72057594046644480 PathId: 1, at schemeshard: 72057594046644480 2025-06-25T14:51:26.633689Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976710657, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //Root 2025-06-25T14:51:26.633818Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-25T14:51:26.633852Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 281474976710657:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046644480 2025-06-25T14:51:26.633884Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 281474976710657:0 ProgressState no shards to create, do next state waiting... 2025-06-25T14:51:26.633903Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 281474976710657:0 2 -> 3 2025-06-25T14:51:26.635013Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-25T14:51:26.635038Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 281474976710657:0 ProgressState, at schemeshard: 72057594046644480 2025-06-25T14:51:26.635052Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 281474976710657:0 3 -> 128 2025-06-25T14:51:26.635323Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-25T14:51:26.635340Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-25T14:51:26.635355Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 281474976710657:0, at tablet# 72057594046644480 2025-06-25T14:51:26.635382Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 281474976710657 ready parts: 1/1 2025-06-25T14:51:26.639774Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046644480 Flags: 2 } ExecLevel: 0 TxId: 281474976710657 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:51:26.640044Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:51:26.640062Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 281474976710657, ready parts: 0/1, is published: true 2025-06-25T14:51:26.640093Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:51:26.640250Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 281474976710657:4294967295 from tablet: 72057594046644480 to tablet: 72057594046316545 cookie: 0:281474976710657 msg type: 269090816 2025-06-25T14:51:26.640386Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 281474976710657, partId: 4294967295, tablet: 72057594046316545 2025-06-25T14:51:26.641892Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 1750863086686, transactions count in step: 1, at schemeshard: 72057594046644480 2025-06-25T14:51:26.642025Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976710657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1750863086686 MediatorID: 72057594046382081 TabletID: 72057594046644480, at schemeshard: 72057594046644480 2025-06-25T14:51:26.642082Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 281474976710657:0, at tablet# 72057594046644480 2025-06-25T14:51:26.642363Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 281474976710657:0 128 -> 240 2025-06-25T14:51:26.642398Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 281474976710657:0, at tablet# 72057594046644480 2025-06-25T14:51:26.642604Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 1 2025-06-25T14:51:26.642753Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046644480, LocalPathId: 1], at schemeshard: 72057594046644480 2025-06-25T14:51:26.643314Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046644480 2025-06-25T14:51:26.643339Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046644480, txId: 281474976710657, path id: [OwnerId: 72057594046644480, LocalPathId: 1] 2025-06-25T14:51:26.644155Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046644480 2025-06-25T14:51:26.644182Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:7519899695655548932:2245], at schemeshard: 72057594046644480, txId: 281474976710657, path id: 1 2025-06-25T14:51:26.644219Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-25T14:51:26.644243Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046644480] TDone opId# 281474976710657:0 ProgressState 2025-06-25T14:51:26.6443 ... nter &> /-S/contrib/libs/cxxsupp/libcxx/include/variant:706:1 #7 0x1b09b9fd in __union<2UL, const grpc_core::ChannelArgs::Pointer &> /-S/contrib/libs/cxxsupp/libcxx/include/variant:706:1 #8 0x1b09b9fd in construct_at >, grpc_core::ChannelArgs::Pointer>, const std::__y1::in_place_index_t<2UL> &, const grpc_core::ChannelArgs::Pointer &, std::__y1::__variant_detail::__union<(std::__y1::__variant_detail::_Trait)1, 0UL, int, TBasicString >, grpc_core::ChannelArgs::Pointer> *> /-S/contrib/libs/cxxsupp/libcxx/include/__memory/construct_at.h:41:46 #9 0x1b09b9fd in __construct_at >, grpc_core::ChannelArgs::Pointer>, const std::__y1::in_place_index_t<2UL> &, const grpc_core::ChannelArgs::Pointer &, std::__y1::__variant_detail::__union<(std::__y1::__variant_detail::_Trait)1, 0UL, int, TBasicString >, grpc_core::ChannelArgs::Pointer> *> /-S/contrib/libs/cxxsupp/libcxx/include/__memory/construct_at.h:49:10 #10 0x1b09b9fd in operator() &> /-S/contrib/libs/cxxsupp/libcxx/include/variant:816:13 #11 0x1b09b9fd in __invoke<(lambda at /-S/contrib/libs/cxxsupp/libcxx/include/variant:815:11), const std::__y1::__variant_detail::__alt<2UL, grpc_core::ChannelArgs::Pointer> &> /-S/contrib/libs/cxxsupp/libcxx/include/__type_traits/invoke.h:149:25 #12 0x1b09b9fd in decltype(auto) std::__y1::__variant_detail::__visitation::__base::__dispatcher<2ul>::__dispatch[abi:fe200000]>, grpc_core::ChannelArgs::Pointer>>::__generic_construct[abi:fe200000]>, grpc_core::ChannelArgs::Pointer>, (std::__y1::__variant_detail::_Trait)1> const&>(std::__y1::__variant_detail::__ctor>, grpc_core::ChannelArgs::Pointer>>&, std::__y1::__variant_detail::__copy_constructor>, grpc_core::ChannelArgs::Pointer>, (std::__y1::__variant_detail::_Trait)1> const&)::'lambda'(std::__y1::__variant_detail::__copy_constructor>, grpc_core::ChannelArgs::Pointer>, (std::__y1::__variant_detail::_Trait)1> const&)&&, std::__y1::__variant_detail::__base<(std::__y1::__variant_detail::_Trait)1, int, TBasicString>, grpc_core::ChannelArgs::Pointer> const&>(std::__y1::__variant_detail::__copy_constructor>, grpc_core::ChannelArgs::Pointer>, (std::__y1::__variant_detail::_Trait)1> const&, std::__y1::__variant_detail::__base<(std::__y1::__variant_detail::_Trait)1, int, TBasicString>, grpc_core::ChannelArgs::Pointer> const&) /-S/contrib/libs/cxxsupp/libcxx/include/variant:531:14 #13 0x1b097158 in __visit_alt_at<(lambda at /-S/contrib/libs/cxxsupp/libcxx/include/variant:815:11), const std::__y1::__variant_detail::__copy_constructor >, grpc_core::ChannelArgs::Pointer>, (std::__y1::__variant_detail::_Trait)1> &> /-S/contrib/libs/cxxsupp/libcxx/include/variant:493:12 #14 0x1b097158 in __generic_construct >, grpc_core::ChannelArgs::Pointer>, (std::__y1::__variant_detail::_Trait)1> &> /-S/contrib/libs/cxxsupp/libcxx/include/variant:813:7 #15 0x1b097158 in __copy_constructor /-S/contrib/libs/cxxsupp/libcxx/include/variant:888:1 #16 0x1b097158 in __assignment /-S/contrib/libs/cxxsupp/libcxx/include/variant:900:28 #17 0x1b097158 in __move_assignment /-S/contrib/libs/cxxsupp/libcxx/include/variant:986:1 #18 0x1b097158 in __copy_assignment /-S/contrib/libs/cxxsupp/libcxx/include/variant:1016:1 #19 0x1b097158 in __impl /-S/contrib/libs/cxxsupp/libcxx/include/variant:1036:25 #20 0x1b097158 in variant /-S/contrib/libs/cxxsupp/libcxx/include/variant:1183:35 #21 0x1b097158 in grpc_core::AVL>, std::__y1::variant>, grpc_core::ChannelArgs::Pointer>>::Rebalance(TBasicString>, std::__y1::variant>, grpc_core::ChannelArgs::Pointer>, std::__y1::shared_ptr>, std::__y1::variant>, grpc_core::ChannelArgs::Pointer>>::Node> const&, std::__y1::shared_ptr>, std::__y1::variant>, grpc_core::ChannelArgs::Pointer>>::Node> const&) /-S/contrib/libs/grpc/src/core/lib/avl/avl.h:252:30 #22 0x1b09627a in grpc_core::AVL>, std::__y1::variant>, grpc_core::ChannelArgs::Pointer>>::AddKey(std::__y1::shared_ptr>, std::__y1::variant>, grpc_core::ChannelArgs::Pointer>>::Node> const&, TBasicString>, std::__y1::variant>, grpc_core::ChannelArgs::Pointer>) /-S/contrib/libs/grpc/src/core/lib/avl/avl.h:265:14 #23 0x1b096249 in grpc_core::AVL>, std::__y1::variant>, grpc_core::ChannelArgs::Pointer>>::AddKey(std::__y1::shared_ptr>, std::__y1::variant>, grpc_core::ChannelArgs::Pointer>>::Node> const&, TBasicString>, std::__y1::variant>, grpc_core::ChannelArgs::Pointer>) /-S/contrib/libs/grpc/src/core/lib/avl/avl.h:266:24 #24 0x1b08e157 in grpc_core::AVL>, std::__y1::variant>, grpc_core::ChannelArgs::Pointer>>::Add(TBasicString>, std::__y1::variant>, grpc_core::ChannelArgs::Pointer>) const /-S/contrib/libs/grpc/src/core/lib/avl/avl.h:36:16 #25 0x1b08db3a in grpc_core::ChannelArgs::Set(std::__y1::basic_string_view>, std::__y1::variant>, grpc_core::ChannelArgs::Pointer>) const /-S/contrib/libs/grpc/src/core/lib/channel/channel_args.cc:158:28 #26 0x1b08d205 in grpc_core::ChannelArgs::Set(std::__y1::basic_string_view>, grpc_core::ChannelArgs::Pointer) const /-S/contrib/libs/grpc/src/core/lib/channel/channel_args.cc:150:10 #27 0x1b0f73f6 in grpc_core::Channel::Create(char const*, grpc_core::ChannelArgs, grpc_channel_stack_type, grpc_transport*) /-S/contrib/libs/grpc/src/core/lib/surface/channel.cc:218:19 #28 0x1b47d571 in CreateChannel /-S/contrib/libs/grpc/src/core/ext/transport/chttp2/client/chttp2_connector.cc:323:10 #29 0x1b47d571 in grpc_channel_create /-S/contrib/libs/grpc/src/core/ext/transport/chttp2/client/chttp2_connector.cc:365:14 #30 0x1b9b15b0 in grpc::(anonymous namespace)::InsecureChannelCredentialsImpl::CreateChannelWithInterceptors(TBasicString> const&, grpc::ChannelArguments const&, std::__y1::vector>, std::__y1::allocator>>>) /-S/contrib/libs/grpc/src/cpp/client/insecure_credentials.cc:55:13 #31 0x1b9b138b in grpc::(anonymous namespace)::InsecureChannelCredentialsImpl::CreateChannelImpl(TBasicString> const&, grpc::ChannelArguments const&) /-S/contrib/libs/grpc/src/cpp/client/insecure_credentials.cc:40:12 #32 0x1b9a9b44 in grpc::CreateCustomChannel(TBasicString> const&, std::__y1::shared_ptr const&, grpc::ChannelArguments const&) /-S/contrib/libs/grpc/src/cpp/client/create_channel.cc:50:25 #33 0x198ebfb6 in NKikimr::NPersQueueTests::NTestSuiteTPersQueueTest::TDirectReadTestSetup::Connect(NKikimr::NPersQueueTests::TPersQueueV1TestServer&) /-S/ydb/services/persqueue_v1/persqueue_ut.cpp:824:23 #34 0x195d5732 in NKikimr::NPersQueueTests::NTestSuiteTPersQueueTest::TDirectReadTestSetup::TDirectReadTestSetup(NKikimr::NPersQueueTests::TPersQueueV1TestServer&) /-S/ydb/services/persqueue_v1/persqueue_ut.cpp:806:13 #35 0x196229b1 in NKikimr::NPersQueueTests::NTestSuiteTPersQueueTest::TTestCaseDirectReadRestartTablet::Execute_(NUnitTest::TTestContext&) /-S/ydb/services/persqueue_v1/persqueue_ut.cpp:1629:30 #36 0x198cacc7 in operator() /-S/ydb/services/persqueue_v1/persqueue_ut.cpp:134:1 #37 0x198cacc7 in __invoke<(lambda at /-S/ydb/services/persqueue_v1/persqueue_ut.cpp:134:1) &> /-S/contrib/libs/cxxsupp/libcxx/include/__type_traits/invoke.h:149:25 #38 0x198cacc7 in __call<(lambda at /-S/ydb/services/persqueue_v1/persqueue_ut.cpp:134:1) &> /-S/contrib/libs/cxxsupp/libcxx/include/__type_traits/invoke.h:224:5 #39 0x198cacc7 in operator() /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:169:12 #40 0x198cacc7 in std::__y1::__function::__func, void ()>::operator()() /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:314:10 #41 0x1a369f25 in operator() /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:431:12 #42 0x1a369f25 in operator() /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:990:10 #43 0x1a369f25 in TColoredProcessor::Run(std::__y1::function, TBasicString> const&, char const*, bool) /-S/library/cpp/testing/unittest/utmain.cpp:525:20 #44 0x1a3398c8 in NUnitTest::TTestBase::Run(std::__y1::function, TBasicString> const&, char const*, bool) /-S/library/cpp/testing/unittest/registar.cpp:373:18 #45 0x198c9c73 in NKikimr::NPersQueueTests::NTestSuiteTPersQueueTest::TCurrentTest::Execute() /-S/ydb/services/persqueue_v1/persqueue_ut.cpp:134:1 #46 0x1a33b195 in NUnitTest::TTestFactory::Execute() /-S/library/cpp/testing/unittest/registar.cpp:494:19 #47 0x1a36449c in NUnitTest::RunMain(int, char**) /-S/library/cpp/testing/unittest/utmain.cpp:872:44 #48 0x7f978ab10d8f (/lib/x86_64-linux-gnu/libc.so.6+0x29d8f) (BuildId: cd410b710f0f094c6832edd95931006d883af48e) SUMMARY: AddressSanitizer: 6654256 byte(s) leaked in 2204 allocation(s). ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_column_build/unittest >> ColumnBuildTest::BaseCase [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:56:33.584033Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:56:33.584128Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:56:33.584158Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:56:33.584186Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:56:33.585997Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:56:33.586060Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:56:33.586147Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:56:33.586238Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:56:33.586953Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:56:33.589791Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:56:33.673047Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:56:33.673104Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:56:33.687757Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:56:33.688069Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:56:33.688202Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:56:33.694166Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:56:33.694419Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:56:33.694819Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:56:33.695001Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:56:33.698683Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:56:33.699685Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:56:33.706166Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:56:33.706221Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:56:33.706316Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:56:33.706361Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:56:33.706419Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:56:33.706470Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:56:33.714394Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:56:33.858831Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:56:33.859013Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:56:33.859207Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:56:33.859252Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:56:33.859483Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:56:33.859570Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:56:33.861626Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:56:33.861791Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:56:33.861958Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:56:33.862012Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:56:33.862045Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:56:33.862131Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:56:33.863769Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:56:33.863826Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:56:33.863859Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:56:33.866071Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:56:33.866119Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:56:33.866174Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:56:33.866232Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:56:33.869294Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:56:33.872866Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:56:33.873060Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:56:33.873977Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:56:33.874094Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:56:33.874139Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:56:33.874427Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:56:33.874476Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:56:33.874635Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:56:33.874716Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:56:33.878462Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:56:33.878509Z node 1 :FLAT_TX_SCHEMESHARD ... 2 } ExecLevel: 0 TxId: 281474976725761 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72075186233409550 2025-06-25T14:56:37.645457Z node 1 :BUILD_INDEX NOTICE: schemeshard_build_index__progress.cpp:1129: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 106 Unlocking 2025-06-25T14:56:37.645544Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1130: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 106 Unlocking TBuildInfo{ IndexBuildId: 106, Uid: , DomainPathId: [OwnerId: 72075186233409549, LocalPathId: 1], TablePathId: [OwnerId: 72075186233409549, LocalPathId: 2], IndexType: EIndexTypeInvalid, IndexName: , State: Unlocking, IsBroken: 0, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [1:1163:3031], AlterMainTableTxId: 281474976725757, AlterMainTableTxStatus: StatusAccepted, AlterMainTableTxDone: 1, LockTxId: 281474976725758, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976725759, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 450, ApplyTxId: 281474976725760, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976725761, UnlockTxStatus: StatusAccepted, UnlockTxDone: 0, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 101, upload bytes: 2424, read rows: 101, read bytes: 2424 }, Billed: { upload rows: 101, upload bytes: 2424, read rows: 101, read bytes: 2424 }} 2025-06-25T14:56:37.645713Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 281474976725761:4294967295 from tablet: 72075186233409549 to tablet: 72075186233409550 cookie: 0:281474976725761 msg type: 269090816 2025-06-25T14:56:37.645863Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 281474976725761, partId: 4294967295, tablet: 72075186233409550 2025-06-25T14:56:37.646000Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 281474976725761, at schemeshard: 72075186233409549 2025-06-25T14:56:37.646033Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 281474976725761, ready parts: 0/1, is published: true 2025-06-25T14:56:37.646063Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 281474976725761, at schemeshard: 72075186233409549 2025-06-25T14:56:37.659896Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877763, Sender [1:1837:3697], Recipient [1:771:2658]: NKikimr::TEvTabletPipe::TEvClientDestroyed { TabletId: 72075186233409549 ClientId: [1:1837:3697] ServerId: [1:1839:3699] } 2025-06-25T14:56:37.659950Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3166: StateWork, processing event TEvTabletPipe::TEvClientDestroyed 2025-06-25T14:56:37.717554Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 650, transactions count in step: 1, at schemeshard: 72075186233409549 2025-06-25T14:56:37.717662Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976725761 AckTo { RawX1: 0 RawX2: 0 } } Step: 650 MediatorID: 72075186233409551 TabletID: 72075186233409549, at schemeshard: 72075186233409549 2025-06-25T14:56:37.717715Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_lock.cpp:44: [72075186233409549] TDropLock TPropose opId# 281474976725761:0 HandleReply TEvOperationPlan: step# 650 2025-06-25T14:56:37.717754Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 281474976725761:0 128 -> 240 2025-06-25T14:56:37.720186Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 281474976725761:0, at schemeshard: 72075186233409549 2025-06-25T14:56:37.720246Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72075186233409549] TDone opId# 281474976725761:0 ProgressState 2025-06-25T14:56:37.720353Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976725761:0 progress is 1/1 2025-06-25T14:56:37.720379Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976725761 ready parts: 1/1 2025-06-25T14:56:37.720408Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976725761:0 progress is 1/1 2025-06-25T14:56:37.720431Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976725761 ready parts: 1/1 2025-06-25T14:56:37.720459Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 281474976725761, ready parts: 1/1, is published: true 2025-06-25T14:56:37.720513Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1656: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:574:2511] message: TxId: 281474976725761 2025-06-25T14:56:37.720549Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976725761 ready parts: 1/1 2025-06-25T14:56:37.720575Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976725761:0 2025-06-25T14:56:37.720597Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 281474976725761:0 2025-06-25T14:56:37.720667Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72075186233409549, LocalPathId: 2] was 3 2025-06-25T14:56:37.724280Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6830: Handle: TEvNotifyTxCompletionResult: txId# 281474976725761 2025-06-25T14:56:37.724368Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6832: Message: TxId: 281474976725761 2025-06-25T14:56:37.724424Z node 1 :BUILD_INDEX INFO: schemeshard_build_index__progress.cpp:1930: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvNotifyTxCompletionResult, id# 106, txId# 281474976725761 2025-06-25T14:56:37.724514Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1933: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvNotifyTxCompletionResult, TIndexBuildInfo: TBuildInfo{ IndexBuildId: 106, Uid: , DomainPathId: [OwnerId: 72075186233409549, LocalPathId: 1], TablePathId: [OwnerId: 72075186233409549, LocalPathId: 2], IndexType: EIndexTypeInvalid, IndexName: , State: Unlocking, IsBroken: 0, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [1:1163:3031], AlterMainTableTxId: 281474976725757, AlterMainTableTxStatus: StatusAccepted, AlterMainTableTxDone: 1, LockTxId: 281474976725758, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976725759, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 450, ApplyTxId: 281474976725760, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976725761, UnlockTxStatus: StatusAccepted, UnlockTxDone: 0, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 101, upload bytes: 2424, read rows: 101, read bytes: 2424 }, Billed: { upload rows: 101, upload bytes: 2424, read rows: 101, read bytes: 2424 }}, txId# 281474976725761 2025-06-25T14:56:37.726145Z node 1 :BUILD_INDEX NOTICE: schemeshard_build_index__progress.cpp:1129: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 106 Unlocking 2025-06-25T14:56:37.726235Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1130: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 106 Unlocking TBuildInfo{ IndexBuildId: 106, Uid: , DomainPathId: [OwnerId: 72075186233409549, LocalPathId: 1], TablePathId: [OwnerId: 72075186233409549, LocalPathId: 2], IndexType: EIndexTypeInvalid, IndexName: , State: Unlocking, IsBroken: 0, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [1:1163:3031], AlterMainTableTxId: 281474976725757, AlterMainTableTxStatus: StatusAccepted, AlterMainTableTxDone: 1, LockTxId: 281474976725758, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976725759, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 450, ApplyTxId: 281474976725760, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976725761, UnlockTxStatus: StatusAccepted, UnlockTxDone: 1, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 101, upload bytes: 2424, read rows: 101, read bytes: 2424 }, Billed: { upload rows: 101, upload bytes: 2424, read rows: 101, read bytes: 2424 }} 2025-06-25T14:56:37.726272Z node 1 :BUILD_INDEX INFO: schemeshard_build_index_tx_base.cpp:24: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: Change state from Unlocking to Done 2025-06-25T14:56:37.727754Z node 1 :BUILD_INDEX NOTICE: schemeshard_build_index__progress.cpp:1129: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 106 Done 2025-06-25T14:56:37.727878Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1130: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 106 Done TBuildInfo{ IndexBuildId: 106, Uid: , DomainPathId: [OwnerId: 72075186233409549, LocalPathId: 1], TablePathId: [OwnerId: 72075186233409549, LocalPathId: 2], IndexType: EIndexTypeInvalid, IndexName: , State: Done, IsBroken: 0, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [1:1163:3031], AlterMainTableTxId: 281474976725757, AlterMainTableTxStatus: StatusAccepted, AlterMainTableTxDone: 1, LockTxId: 281474976725758, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976725759, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 450, ApplyTxId: 281474976725760, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976725761, UnlockTxStatus: StatusAccepted, UnlockTxDone: 1, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 101, upload bytes: 2424, read rows: 101, read bytes: 2424 }, Billed: { upload rows: 101, upload bytes: 2424, read rows: 101, read bytes: 2424 }} 2025-06-25T14:56:37.727921Z node 1 :BUILD_INDEX TRACE: schemeshard_build_index_tx_base.cpp:333: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TIndexBuildInfo SendNotifications: : id# 106, subscribers count# 1 2025-06-25T14:56:37.728187Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 106: got EvNotifyTxCompletionResult 2025-06-25T14:56:37.728233Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 106: satisfy waiter [1:1183:3051] TestWaitNotification: OK eventTxId 106 2025-06-25T14:56:37.730564Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__get.cpp:19: TIndexBuilder::TXTYPE_GET_INDEX_BUILD: DoExecute DatabaseName: "/MyRoot/ServerLessDB" IndexBuildId: 106 2025-06-25T14:56:37.730849Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index_tx_base.h:104: TIndexBuilder::TXTYPE_GET_INDEX_BUILD: Reply Status: SUCCESS IndexBuild { Id: 106 State: STATE_DONE Settings { source_path: "/MyRoot/ServerLessDB/Table" max_shards_in_flight: 2 column_build_operation { column { ColumnName: "DefaultValue" default_from_literal { type { type_id: UINT64 } value { uint64_value: 10 } } } } ScanSettings { MaxBatchRows: 1 } } Progress: 100 StartTime { } EndTime { } } BUILDINDEX RESPONSE Get: NKikimrIndexBuilder.TEvGetResponse Status: SUCCESS IndexBuild { Id: 106 State: STATE_DONE Settings { source_path: "/MyRoot/ServerLessDB/Table" max_shards_in_flight: 2 column_build_operation { column { ColumnName: "DefaultValue" default_from_literal { type { type_id: UINT64 } value { uint64_value: 10 } } } } ScanSettings { MaxBatchRows: 1 } } Progress: 100 StartTime { } EndTime { } } >> KqpService::CloseSessionsWithLoad [GOOD] >> KqpService::PatternCache ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_column_build/unittest >> ColumnBuildTest::ValidDefaultValue [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:56:33.584379Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:56:33.584529Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:56:33.584595Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:56:33.584644Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:56:33.587161Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:56:33.587232Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:56:33.587312Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:56:33.587375Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:56:33.588061Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:56:33.590769Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:56:33.667615Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:56:33.667675Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:56:33.683178Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:56:33.683536Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:56:33.683790Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:56:33.689667Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:56:33.690027Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:56:33.692547Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:56:33.692808Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:56:33.698936Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:56:33.699706Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:56:33.706987Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:56:33.707057Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:56:33.707193Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:56:33.707237Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:56:33.707301Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:56:33.707371Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:56:33.713187Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:56:33.831592Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:56:33.834527Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:56:33.838237Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:56:33.838313Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:56:33.839631Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:56:33.839720Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:56:33.843407Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:56:33.844319Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:56:33.844506Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:56:33.844649Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:56:33.844684Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:56:33.844732Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:56:33.847018Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:56:33.847062Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:56:33.847130Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:56:33.850451Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:56:33.850496Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:56:33.850555Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:56:33.850617Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:56:33.860845Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:56:33.862493Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:56:33.863745Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:56:33.864665Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:56:33.864760Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:56:33.864803Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:56:33.866555Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:56:33.866610Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:56:33.866796Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:56:33.866857Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:56:33.873325Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:56:33.873377Z node 1 :FLAT_TX_SCHEMESHARD ... } ExecLevel: 0 TxId: 281474976725761 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72075186233409550 2025-06-25T14:56:37.664347Z node 1 :BUILD_INDEX NOTICE: schemeshard_build_index__progress.cpp:1129: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 106 Unlocking 2025-06-25T14:56:37.664435Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1130: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 106 Unlocking TBuildInfo{ IndexBuildId: 106, Uid: , DomainPathId: [OwnerId: 72075186233409549, LocalPathId: 1], TablePathId: [OwnerId: 72075186233409549, LocalPathId: 2], IndexType: EIndexTypeInvalid, IndexName: , State: Unlocking, IsBroken: 0, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [1:1163:3031], AlterMainTableTxId: 281474976725757, AlterMainTableTxStatus: StatusAccepted, AlterMainTableTxDone: 1, LockTxId: 281474976725758, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976725759, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 450, ApplyTxId: 281474976725760, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976725761, UnlockTxStatus: StatusAccepted, UnlockTxDone: 0, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 101, upload bytes: 2424, read rows: 101, read bytes: 2424 }, Billed: { upload rows: 101, upload bytes: 2424, read rows: 101, read bytes: 2424 }} 2025-06-25T14:56:37.664631Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 281474976725761:4294967295 from tablet: 72075186233409549 to tablet: 72075186233409550 cookie: 0:281474976725761 msg type: 269090816 2025-06-25T14:56:37.664741Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 281474976725761, partId: 4294967295, tablet: 72075186233409550 2025-06-25T14:56:37.664880Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 281474976725761, at schemeshard: 72075186233409549 2025-06-25T14:56:37.664913Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 281474976725761, ready parts: 0/1, is published: true 2025-06-25T14:56:37.664941Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 281474976725761, at schemeshard: 72075186233409549 2025-06-25T14:56:37.677151Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877763, Sender [1:1837:3697], Recipient [1:771:2658]: NKikimr::TEvTabletPipe::TEvClientDestroyed { TabletId: 72075186233409549 ClientId: [1:1837:3697] ServerId: [1:1839:3699] } 2025-06-25T14:56:37.677200Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3166: StateWork, processing event TEvTabletPipe::TEvClientDestroyed 2025-06-25T14:56:37.733350Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 650, transactions count in step: 1, at schemeshard: 72075186233409549 2025-06-25T14:56:37.733464Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976725761 AckTo { RawX1: 0 RawX2: 0 } } Step: 650 MediatorID: 72075186233409551 TabletID: 72075186233409549, at schemeshard: 72075186233409549 2025-06-25T14:56:37.733508Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_lock.cpp:44: [72075186233409549] TDropLock TPropose opId# 281474976725761:0 HandleReply TEvOperationPlan: step# 650 2025-06-25T14:56:37.733760Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 281474976725761:0 128 -> 240 2025-06-25T14:56:37.736000Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 281474976725761:0, at schemeshard: 72075186233409549 2025-06-25T14:56:37.736053Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72075186233409549] TDone opId# 281474976725761:0 ProgressState 2025-06-25T14:56:37.736139Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976725761:0 progress is 1/1 2025-06-25T14:56:37.736166Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976725761 ready parts: 1/1 2025-06-25T14:56:37.736196Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976725761:0 progress is 1/1 2025-06-25T14:56:37.736218Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976725761 ready parts: 1/1 2025-06-25T14:56:37.736244Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 281474976725761, ready parts: 1/1, is published: true 2025-06-25T14:56:37.736295Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1656: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:574:2511] message: TxId: 281474976725761 2025-06-25T14:56:37.736372Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976725761 ready parts: 1/1 2025-06-25T14:56:37.736397Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976725761:0 2025-06-25T14:56:37.736431Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 281474976725761:0 2025-06-25T14:56:37.736492Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72075186233409549, LocalPathId: 2] was 3 2025-06-25T14:56:37.748773Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6830: Handle: TEvNotifyTxCompletionResult: txId# 281474976725761 2025-06-25T14:56:37.748837Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6832: Message: TxId: 281474976725761 2025-06-25T14:56:37.748913Z node 1 :BUILD_INDEX INFO: schemeshard_build_index__progress.cpp:1930: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvNotifyTxCompletionResult, id# 106, txId# 281474976725761 2025-06-25T14:56:37.748995Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1933: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvNotifyTxCompletionResult, TIndexBuildInfo: TBuildInfo{ IndexBuildId: 106, Uid: , DomainPathId: [OwnerId: 72075186233409549, LocalPathId: 1], TablePathId: [OwnerId: 72075186233409549, LocalPathId: 2], IndexType: EIndexTypeInvalid, IndexName: , State: Unlocking, IsBroken: 0, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [1:1163:3031], AlterMainTableTxId: 281474976725757, AlterMainTableTxStatus: StatusAccepted, AlterMainTableTxDone: 1, LockTxId: 281474976725758, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976725759, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 450, ApplyTxId: 281474976725760, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976725761, UnlockTxStatus: StatusAccepted, UnlockTxDone: 0, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 101, upload bytes: 2424, read rows: 101, read bytes: 2424 }, Billed: { upload rows: 101, upload bytes: 2424, read rows: 101, read bytes: 2424 }}, txId# 281474976725761 2025-06-25T14:56:37.753049Z node 1 :BUILD_INDEX NOTICE: schemeshard_build_index__progress.cpp:1129: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 106 Unlocking 2025-06-25T14:56:37.753144Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1130: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 106 Unlocking TBuildInfo{ IndexBuildId: 106, Uid: , DomainPathId: [OwnerId: 72075186233409549, LocalPathId: 1], TablePathId: [OwnerId: 72075186233409549, LocalPathId: 2], IndexType: EIndexTypeInvalid, IndexName: , State: Unlocking, IsBroken: 0, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [1:1163:3031], AlterMainTableTxId: 281474976725757, AlterMainTableTxStatus: StatusAccepted, AlterMainTableTxDone: 1, LockTxId: 281474976725758, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976725759, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 450, ApplyTxId: 281474976725760, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976725761, UnlockTxStatus: StatusAccepted, UnlockTxDone: 1, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 101, upload bytes: 2424, read rows: 101, read bytes: 2424 }, Billed: { upload rows: 101, upload bytes: 2424, read rows: 101, read bytes: 2424 }} 2025-06-25T14:56:37.753195Z node 1 :BUILD_INDEX INFO: schemeshard_build_index_tx_base.cpp:24: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: Change state from Unlocking to Done 2025-06-25T14:56:37.755117Z node 1 :BUILD_INDEX NOTICE: schemeshard_build_index__progress.cpp:1129: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 106 Done 2025-06-25T14:56:37.755253Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1130: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 106 Done TBuildInfo{ IndexBuildId: 106, Uid: , DomainPathId: [OwnerId: 72075186233409549, LocalPathId: 1], TablePathId: [OwnerId: 72075186233409549, LocalPathId: 2], IndexType: EIndexTypeInvalid, IndexName: , State: Done, IsBroken: 0, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [1:1163:3031], AlterMainTableTxId: 281474976725757, AlterMainTableTxStatus: StatusAccepted, AlterMainTableTxDone: 1, LockTxId: 281474976725758, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976725759, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 450, ApplyTxId: 281474976725760, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976725761, UnlockTxStatus: StatusAccepted, UnlockTxDone: 1, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 101, upload bytes: 2424, read rows: 101, read bytes: 2424 }, Billed: { upload rows: 101, upload bytes: 2424, read rows: 101, read bytes: 2424 }} 2025-06-25T14:56:37.755319Z node 1 :BUILD_INDEX TRACE: schemeshard_build_index_tx_base.cpp:333: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TIndexBuildInfo SendNotifications: : id# 106, subscribers count# 1 2025-06-25T14:56:37.755445Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 106: got EvNotifyTxCompletionResult 2025-06-25T14:56:37.755481Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 106: satisfy waiter [1:1183:3051] TestWaitNotification: OK eventTxId 106 2025-06-25T14:56:37.757988Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__get.cpp:19: TIndexBuilder::TXTYPE_GET_INDEX_BUILD: DoExecute DatabaseName: "/MyRoot/ServerLessDB" IndexBuildId: 106 2025-06-25T14:56:37.758616Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index_tx_base.h:104: TIndexBuilder::TXTYPE_GET_INDEX_BUILD: Reply Status: SUCCESS IndexBuild { Id: 106 State: STATE_DONE Settings { source_path: "/MyRoot/ServerLessDB/Table" max_shards_in_flight: 2 column_build_operation { column { ColumnName: "ColumnValue" default_from_literal { type { type_id: UINT64 } value { uint64_value: 1111 } } } } ScanSettings { MaxBatchRows: 1 } } Progress: 100 StartTime { } EndTime { } } BUILDINDEX RESPONSE Get: NKikimrIndexBuilder.TEvGetResponse Status: SUCCESS IndexBuild { Id: 106 State: STATE_DONE Settings { source_path: "/MyRoot/ServerLessDB/Table" max_shards_in_flight: 2 column_build_operation { column { ColumnName: "ColumnValue" default_from_literal { type { type_id: UINT64 } value { uint64_value: 1111 } } } } ScanSettings { MaxBatchRows: 1 } } Progress: 100 StartTime { } EndTime { } } |88.9%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest |88.9%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest |88.9%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest |88.9%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest |88.9%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest >> ColumnBuildTest::BuildColumnDoesnotRestoreDeletedRows [GOOD] |88.9%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest |88.9%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest |88.9%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest |88.9%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest |88.9%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest |88.9%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest |89.0%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest >> Initializer::Simple |89.0%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest |89.0%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest |89.0%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_column_build/unittest >> ColumnBuildTest::BuildColumnDoesnotRestoreDeletedRows [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:56:33.584057Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:56:33.584175Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:56:33.584221Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:56:33.584272Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:56:33.586204Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:56:33.586258Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:56:33.586333Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:56:33.586412Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:56:33.587120Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:56:33.592806Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:56:33.675177Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:56:33.675240Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:56:33.692795Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:56:33.693193Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:56:33.693345Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:56:33.699184Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:56:33.699581Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:56:33.700196Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:56:33.700394Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:56:33.703395Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:56:33.703573Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:56:33.709218Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:56:33.709347Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:56:33.709523Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:56:33.709580Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:56:33.709634Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:56:33.709716Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:56:33.723014Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:56:33.875401Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:56:33.875593Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:56:33.875746Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:56:33.875783Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:56:33.875963Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:56:33.876017Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:56:33.877748Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:56:33.877906Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:56:33.878026Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:56:33.878078Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:56:33.878120Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:56:33.878160Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:56:33.879404Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:56:33.879447Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:56:33.879483Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:56:33.880685Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:56:33.880717Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:56:33.880775Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:56:33.880825Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:56:33.888940Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:56:33.891513Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:56:33.891718Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:56:33.892720Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:56:33.892853Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:56:33.892899Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:56:33.893244Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:56:33.893301Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:56:33.893481Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:56:33.893551Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:56:33.896300Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:56:33.896378Z node 1 :FLAT_TX_SCHEMESHARD ... lMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'28))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } 2025-06-25T14:56:39.127306Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268830210, Sender [1:2070:3930], Recipient [1:771:2658]: NKikimrTabletTxBase.TEvLocalMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'29))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } 2025-06-25T14:56:39.137655Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268830210, Sender [1:2071:3931], Recipient [1:771:2658]: NKikimrTabletTxBase.TEvLocalMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'30))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } 2025-06-25T14:56:39.144987Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268830210, Sender [1:2072:3932], Recipient [1:771:2658]: NKikimrTabletTxBase.TEvLocalMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'31))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } 2025-06-25T14:56:39.151944Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268830210, Sender [1:2073:3933], Recipient [1:771:2658]: NKikimrTabletTxBase.TEvLocalMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'32))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } 2025-06-25T14:56:39.159142Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268830210, Sender [1:2074:3934], Recipient [1:771:2658]: NKikimrTabletTxBase.TEvLocalMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'33))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } 2025-06-25T14:56:39.166217Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268830210, Sender [1:2075:3935], Recipient [1:771:2658]: NKikimrTabletTxBase.TEvLocalMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'34))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } 2025-06-25T14:56:39.173255Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268830210, Sender [1:2076:3936], Recipient [1:771:2658]: NKikimrTabletTxBase.TEvLocalMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'35))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } 2025-06-25T14:56:39.180495Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268830210, Sender [1:2077:3937], Recipient [1:771:2658]: NKikimrTabletTxBase.TEvLocalMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'36))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } 2025-06-25T14:56:39.187817Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268830210, Sender [1:2078:3938], Recipient [1:771:2658]: NKikimrTabletTxBase.TEvLocalMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'37))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } 2025-06-25T14:56:39.195059Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268830210, Sender [1:2079:3939], Recipient [1:771:2658]: NKikimrTabletTxBase.TEvLocalMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'38))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } 2025-06-25T14:56:39.202697Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268830210, Sender [1:2080:3940], Recipient [1:771:2658]: NKikimrTabletTxBase.TEvLocalMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'39))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } 2025-06-25T14:56:39.210463Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268830210, Sender [1:2081:3941], Recipient [1:771:2658]: NKikimrTabletTxBase.TEvLocalMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'40))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } 2025-06-25T14:56:39.218376Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268830210, Sender [1:2082:3942], Recipient [1:771:2658]: NKikimrTabletTxBase.TEvLocalMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'41))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } 2025-06-25T14:56:39.225713Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268830210, Sender [1:2083:3943], Recipient [1:771:2658]: NKikimrTabletTxBase.TEvLocalMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'42))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } 2025-06-25T14:56:39.233286Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268830210, Sender [1:2084:3944], Recipient [1:771:2658]: NKikimrTabletTxBase.TEvLocalMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'43))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } 2025-06-25T14:56:39.240667Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268830210, Sender [1:2085:3945], Recipient [1:771:2658]: NKikimrTabletTxBase.TEvLocalMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'44))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } 2025-06-25T14:56:39.247946Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268830210, Sender [1:2086:3946], Recipient [1:771:2658]: NKikimrTabletTxBase.TEvLocalMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'45))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } 2025-06-25T14:56:39.255513Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268830210, Sender [1:2087:3947], Recipient [1:771:2658]: NKikimrTabletTxBase.TEvLocalMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'46))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } 2025-06-25T14:56:39.263021Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268830210, Sender [1:2088:3948], Recipient [1:771:2658]: NKikimrTabletTxBase.TEvLocalMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'47))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } 2025-06-25T14:56:39.271227Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268830210, Sender [1:2089:3949], Recipient [1:771:2658]: NKikimrTabletTxBase.TEvLocalMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'48))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } 2025-06-25T14:56:39.279589Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268830210, Sender [1:2090:3950], Recipient [1:771:2658]: NKikimrTabletTxBase.TEvLocalMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'49))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } 2025-06-25T14:56:39.287123Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268830210, Sender [1:2091:3951], Recipient [1:771:2658]: NKikimrTabletTxBase.TEvLocalMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'50))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } |89.0%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest |89.0%| [TA] $(B)/ydb/core/tx/schemeshard/ut_column_build/test-results/unittest/{meta.json ... results_accumulator.log} |89.0%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_column_build/test-results/unittest/{meta.json ... results_accumulator.log} |89.0%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest |89.0%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest |89.0%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest |89.0%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest |89.0%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest |89.0%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest |89.0%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest |89.0%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest |89.0%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest |89.0%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest |89.0%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest >> TestYmqHttpProxy::TestCreateQueueWithTags [GOOD] |89.0%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest |89.0%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest |89.0%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest |89.0%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest |89.1%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest >> TestKinesisHttpProxy::GoodRequestGetRecordsCbor [GOOD] >> KqpQueryServiceScripts::ExecuteScriptWithCancelAfterAndTimeout [GOOD] >> TestYmqHttpProxy::TestDeleteMessage |89.1%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest |89.1%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest |89.1%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest |89.1%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest |89.1%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest |89.1%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest |89.1%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest |89.1%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest |89.1%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest |89.1%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest |89.1%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest >> TestKinesisHttpProxy::GoodRequestGetRecordsLongStreamName |89.1%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest |89.1%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/service/unittest >> KqpQueryServiceScripts::ExecuteScriptWithCancelAfterAndTimeout [GOOD] Test command err: Trying to start YDB, gRPC: 61941, MsgBus: 8113 2025-06-25T14:56:03.218023Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900885164037523:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:56:03.221112Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001787/r3tmp/tmpP9uDVV/pdisk_1.dat 2025-06-25T14:56:03.649627Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 61941, node 1 2025-06-25T14:56:03.709252Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:56:03.709379Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:56:03.711500Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:56:03.721804Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:56:03.721845Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:56:03.721855Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:56:03.722081Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8113 TClient is connected to server localhost:8113 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-25T14:56:04.236442Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:56:04.292242Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:56:04.314088Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:56:04.330608Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:04.481854Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:04.653718Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:56:04.751690Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:06.159120Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900898048941004:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:06.159208Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:06.447899Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:06.470650Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:06.495815Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:06.523931Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:06.550023Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:06.628135Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:06.652837Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:06.730250Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900898048941670:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:06.730307Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:06.730370Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900898048941675:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:06.733478Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:56:06.740373Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519900898048941677:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:56:06.814411Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519900898048941728:3417] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:56:07.722656Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:07.725609Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:07.726849Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propo ... (5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:56:33.687265Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:56:33.689397Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:56:33.736557Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:56:33.736601Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:56:33.736611Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:56:33.736782Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17530 TClient is connected to server localhost:17530 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:56:34.290798Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:56:34.302588Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:34.363624Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:34.555403Z node 5 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:56:34.565291Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:34.644431Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:37.499723Z node 5 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [5:7519901031826010151:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:37.499810Z node 5 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:37.604735Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:37.644935Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:37.717008Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:37.758931Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:37.810715Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:37.850557Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:37.893976Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:38.033905Z node 5 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [5:7519901036120978114:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:38.034011Z node 5 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:38.034252Z node 5 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [5:7519901036120978119:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:38.038836Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:56:38.053334Z node 5 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [5:7519901036120978121:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:56:38.129468Z node 5 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [5:7519901036120978172:3424] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:56:38.552462Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[5:7519901014646139347:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:56:38.627640Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:56:39.287018Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:39.289613Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:39.293058Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:39.706322Z node 5 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=5&id=Yjc3MzZkYmEtNGI4YWQwMWMtNmU1NDI4YjctZDBiYTZhMzk=, ActorId: [5:7519901040415946014:2500], ActorState: ExecuteState, TraceId: 01jyksejbh181494svx88zg8e0, Create QueryResponse for error on request, msg: >> TestYmqHttpProxy::TestListQueues [GOOD] >> TestYmqHttpProxy::TestPurgeQueue >> KqpService::ToDictCache-UseCache [GOOD] >> TInterconnectTest::TestManyEvents >> TInterconnectTest::TestNotifyUndelivered >> TInterconnectTest::TestConnectAndDisconnect >> TActorActivity::Basic [GOOD] >> ActorBootstrapped::TestBootstrapped >> TestProtocols::TestConnectProtocol >> ActorBootstrapped::TestBootstrapped [GOOD] >> ActorBootstrapped::TestBootstrappedParent >> ActorBootstrapped::TestBootstrappedParent [GOOD] >> TActorTracker::Basic >> TInterconnectTest::TestNotifyUndelivered [GOOD] >> TInterconnectTest::TestNotifyUndeliveredOnMissedActor >> TActorTracker::Basic [GOOD] >> TestProtocols::TestConnectProtocol [GOOD] >> TestProtocols::TestHTTPCollected >> TInterconnectTest::TestConnectAndDisconnect [GOOD] >> TInterconnectTest::TestBlobEventPreSerialized >> KqpQueryService::CloseSessionsWithLoad [GOOD] >> KqpQueryService::ClosedSessionRemovedFromPool |89.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/dread_cache_service/ut/unittest |89.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/dread_cache_service/ut/unittest |89.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/dread_cache_service/ut/unittest |89.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/dread_cache_service/ut/unittest |89.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/dread_cache_service/ut/unittest >> TPQCachingProxyTest::OutdatedSession >> TPersQueueTest::SrcIdCompatibility [GOOD] >> TPQCachingProxyTest::TestWrongSessionOrGeneration >> TPQCachingProxyTest::TestDeregister >> TPQCachingProxyTest::MultipleSessions >> TInterconnectTest::TestBlobEventPreSerialized [GOOD] >> TInterconnectTest::TestBlobEventUpToMebibytes >> TPQCachingProxyTest::TestPublishAndForget >> TInterconnectTest::TestNotifyUndeliveredOnMissedActor [GOOD] >> TInterconnectTest::TestPreSerializedBlobEventUpToMebibytes ------- [TM] {asan, default-linux-x86_64, release} ydb/core/actorlib_impl/ut/unittest >> TActorTracker::Basic [GOOD] Test command err: ASYNC_DESTROYER >> TestProtocols::TestHTTPCollected [GOOD] >> TInterconnectTest::TestTraceIdPassThrough >> TestYmqHttpProxy::BillingRecordsForJsonApi [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/service/unittest >> KqpService::ToDictCache-UseCache [GOOD] Test command err: Trying to start YDB, gRPC: 24144, MsgBus: 9696 2025-06-25T14:55:43.922718Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900798833325682:2233];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:43.923176Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017e6/r3tmp/tmp0hmuIT/pdisk_1.dat 2025-06-25T14:55:44.307939Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:55:44.308230Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519900798833325468:2080] 1750863343875393 != 1750863343875396 TServer::EnableGrpc on GrpcPort 24144, node 1 2025-06-25T14:55:44.361869Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:55:44.362057Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:55:44.364244Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:55:44.385414Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:55:44.385435Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:55:44.385441Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:55:44.385572Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9696 TClient is connected to server localhost:9696 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-25T14:55:44.918927Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:55:45.001293Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:55:45.017202Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:55:45.024047Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:45.136171Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:45.294590Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:45.353595Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:46.772720Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900811718228997:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:46.772821Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:47.039083Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:47.070393Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:47.098576Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:47.124781Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:47.155642Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:47.240617Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:47.274243Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:47.334799Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900816013196955:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:47.334877Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:47.335167Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900816013196960:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:47.338971Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:55:47.352755Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519900816013196962:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:55:47.418355Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519900816013197013:3421] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:55:48.434414Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:48.918228Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519900798833325682:2233];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:48.918291Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detec ... R: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017e6/r3tmp/tmpvys2p0/pdisk_1.dat 2025-06-25T14:56:26.608151Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:56:26.608988Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519900983067077219:2080] 1750863386485616 != 1750863386485619 TServer::EnableGrpc on GrpcPort 17205, node 3 2025-06-25T14:56:26.629014Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:56:26.629112Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:56:26.630771Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:56:26.666464Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:56:26.666489Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:56:26.666499Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:56:26.666661Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:7021 TClient is connected to server localhost:7021 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:56:27.244693Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:56:27.493375Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:56:29.759868Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519900995951979769:2309], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:29.759899Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519900995951979768:2308], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:29.759940Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519900995951979767:2307], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:29.759951Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519900995951979746:2298], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:29.760253Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:29.762483Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519900995951979791:2314], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:29.762553Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519900995951979796:2316], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:29.762609Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:29.766056Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710660:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:56:29.769760Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519900995951979778:2309] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateCreate)" severity: 1 } 2025-06-25T14:56:29.769911Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519900995951979809:2317] txid# 281474976710661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateCreate)" severity: 1 } 2025-06-25T14:56:29.773159Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519900995951979777:2308] txid# 281474976710658, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateCreate)" severity: 1 } 2025-06-25T14:56:29.778291Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519900995951979808:2317], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710660 completed, doublechecking } 2025-06-25T14:56:29.778371Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519900995951979774:2310], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710660 completed, doublechecking } 2025-06-25T14:56:29.778418Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519900995951979775:2311], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710660 completed, doublechecking } 2025-06-25T14:56:29.778452Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519900995951979776:2312], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710660 completed, doublechecking } 2025-06-25T14:56:29.843704Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519900995951979859:2359] txid# 281474976710662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:56:29.846129Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519900995951979864:2363] txid# 281474976710663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:56:29.860223Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519900995951979886:2374] txid# 281474976710664, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:56:29.868592Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519900995951979894:2380] txid# 281474976710665, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } took: 3.462128s took: 3.462747s took: 3.463113s took: 3.464310s 2025-06-25T14:56:31.486435Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519900983067077240:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:56:31.486498Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; took: 0.808979s took: 0.809337s took: 0.809524s took: 0.811403s took: 0.720602s took: 0.721406s took: 0.722514s took: 0.722832s took: 0.816663s took: 0.817036s took: 0.817165s took: 0.819896s took: 0.961403s took: 0.962412s took: 0.964296s took: 0.967689s took: 1.099975s took: 1.100588s took: 1.101402s took: 1.102579s took: 0.942449s took: 0.943355s took: 0.943395s took: 0.944678s took: 0.909204s took: 0.911532s took: 0.913160s took: 0.913856s took: 0.846851s took: 0.852092s took: 0.854885s took: 0.855316s took: 0.937784s took: 0.948063s took: 0.951428s took: 0.956411s took: 1.052185s took: 1.052439s took: 1.058318s took: 1.062677s took: 1.084398s took: 1.088870s took: 1.091280s took: 1.096728s 2025-06-25T14:56:41.577495Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7382: Cannot get console configs 2025-06-25T14:56:41.577522Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded took: 1.122765s took: 1.123943s took: 1.130059s took: 1.136222s took: 0.984724s took: 0.985066s took: 0.985222s took: 0.985449s took: 1.000975s took: 1.006820s took: 1.007342s took: 1.009703s took: 1.036716s took: 1.037501s took: 1.038752s took: 1.039572s >> TInterconnectTest::TestBlobEventUpToMebibytes [GOOD] >> TInterconnectTest::TestBlobEventsThroughSubChannels >> TInterconnectTest::TestPreSerializedBlobEventUpToMebibytes [GOOD] >> TInterconnectTest::TestPingPongThroughSubChannel >> TInterconnectTest::TestTraceIdPassThrough [GOOD] >> TestYmqHttpProxy::TestChangeMessageVisibility >> TPQCachingProxyTest::MultipleSessions [GOOD] >> TPQCachingProxyTest::OutdatedSession [GOOD] >> TPQCachingProxyTest::TestPublishAndForget [GOOD] >> TPQCachingProxyTest::TestWrongSessionOrGeneration [GOOD] >> TPQCachingProxyTest::TestDeregister [GOOD] >> TInterconnectTest::TestBlobEventsThroughSubChannels [GOOD] |89.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/actorlib_impl/ut/unittest >> TInterconnectTest::TestTraceIdPassThrough [GOOD] >> TInterconnectTest::TestManyEvents [GOOD] >> TInterconnectTest::TestCrossConnect >> TInterconnectTest::TestPingPongThroughSubChannel [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/dread_cache_service/ut/unittest >> TPQCachingProxyTest::TestPublishAndForget [GOOD] Test command err: 2025-06-25T14:56:47.508147Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:56:47.508258Z node 1 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-25T14:56:47.525175Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-25T14:56:47.526171Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:283: Direct read cache: registered server session: session1:1 with generation 1 2025-06-25T14:56:47.526333Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:171: Direct read cache: staged direct read id 1 for session: session1 2025-06-25T14:56:47.526380Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:179: Direct read cache: publish read: 1 for session session1, Generation: 1 2025-06-25T14:56:47.526486Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:218: Direct read cache: forget read: 1 for session session1 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/dread_cache_service/ut/unittest >> TPQCachingProxyTest::TestWrongSessionOrGeneration [GOOD] Test command err: 2025-06-25T14:56:47.510405Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:56:47.510478Z node 1 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-25T14:56:47.525383Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-25T14:56:47.526172Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:283: Direct read cache: registered server session: session1:1 with generation 2 2025-06-25T14:56:47.526278Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:171: Direct read cache: staged direct read id 1 for session: session1 2025-06-25T14:56:47.526318Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:179: Direct read cache: publish read: 1 for session session1, Generation: 2 2025-06-25T14:56:47.526379Z node 1 :PQ_READ_PROXY INFO: caching_service.cpp:297: Direct read cache: attempted to register server session: session1:1 with stale generation 1, ignored 2025-06-25T14:56:47.526423Z node 1 :PQ_READ_PROXY ALERT: caching_service.cpp:159: Direct read cache: tried to stage direct read for session session1 with generation 1, previously had this session with generation 2. Data ignored 2025-06-25T14:56:47.526470Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:179: Direct read cache: publish read: 1 for session session1, Generation: 1 2025-06-25T14:56:47.526552Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:218: Direct read cache: forget read: 1 for session session1 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/dread_cache_service/ut/unittest >> TPQCachingProxyTest::TestDeregister [GOOD] Test command err: 2025-06-25T14:56:47.508422Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:56:47.508510Z node 1 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-25T14:56:47.525105Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-25T14:56:47.526170Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:283: Direct read cache: registered server session: session1:1 with generation 1 2025-06-25T14:56:47.526270Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:283: Direct read cache: registered server session: session2:1 with generation 1 2025-06-25T14:56:47.526374Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: session1 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/dread_cache_service/ut/unittest >> TPQCachingProxyTest::MultipleSessions [GOOD] Test command err: 2025-06-25T14:56:47.508187Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:56:47.508297Z node 1 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-25T14:56:47.526283Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-25T14:56:47.526377Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:283: Direct read cache: registered server session: session1:1 with generation 1 2025-06-25T14:56:47.526460Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:171: Direct read cache: staged direct read id 1 for session: session1 2025-06-25T14:56:47.526511Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:171: Direct read cache: staged direct read id 2 for session: session1 2025-06-25T14:56:47.526549Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:179: Direct read cache: publish read: 1 for session session1, Generation: 1 2025-06-25T14:56:47.526642Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:179: Direct read cache: publish read: 2 for session session1, Generation: 1 2025-06-25T14:56:47.526722Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:283: Direct read cache: registered server session: session2:1 with generation 2 2025-06-25T14:56:47.526798Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:171: Direct read cache: staged direct read id 3 for session: session2 2025-06-25T14:56:47.526839Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:179: Direct read cache: publish read: 3 for session session2, Generation: 2 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/dread_cache_service/ut/unittest >> TPQCachingProxyTest::OutdatedSession [GOOD] Test command err: 2025-06-25T14:56:47.508172Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:56:47.508269Z node 1 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-25T14:56:47.525762Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-25T14:56:47.526203Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:283: Direct read cache: registered server session: session1:1 with generation 1 2025-06-25T14:56:47.526313Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:171: Direct read cache: staged direct read id 1 for session: session1 2025-06-25T14:56:47.526360Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:179: Direct read cache: publish read: 1 for session session1, Generation: 1 2025-06-25T14:56:47.526464Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:293: Direct read cache: registered server session: session1:1 with generation 2, killed existing session with older generation |89.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/actorlib_impl/ut/unittest >> TInterconnectTest::TestBlobEventsThroughSubChannels [GOOD] |89.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/actorlib_impl/ut/unittest >> TInterconnectTest::TestPingPongThroughSubChannel [GOOD] |89.2%| [TA] $(B)/ydb/core/persqueue/dread_cache_service/ut/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/services/persqueue_v1/ut/unittest >> TPersQueueTest::SrcIdCompatibility [GOOD] Test command err: === Start server === Server->StartServer(false); 2025-06-25T14:51:31.898530Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899715879359346:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:51:31.898564Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:51:31.955285Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519899718275602530:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:51:31.955332Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:51:32.124975Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000b2f/r3tmp/tmpBdvTVC/pdisk_1.dat 2025-06-25T14:51:32.139951Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-25T14:51:32.295874Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:51:32.298069Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:51:32.298148Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:51:32.307389Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T14:51:32.307885Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 23988, node 1 2025-06-25T14:51:32.348579Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:51:32.348648Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:51:32.373566Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:51:32.414352Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/yft8/000b2f/r3tmp/yandexRJN1V0.tmp 2025-06-25T14:51:32.414373Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/yft8/000b2f/r3tmp/yandexRJN1V0.tmp 2025-06-25T14:51:32.414514Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/yft8/000b2f/r3tmp/yandexRJN1V0.tmp 2025-06-25T14:51:32.414642Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:51:32.467707Z INFO: TTestServer started on Port 14366 GrpcPort 23988 TClient is connected to server localhost:14366 PQClient connected to localhost:23988 === TenantModeEnabled() = 0 === Init PQ - start server on port 23988 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:51:32.851733Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "Root" StoragePools { Name: "/Root:test" Kind: "test" } } } TxId: 281474976710657 TabletId: 72057594046644480 PeerName: "" , at schemeshard: 72057594046644480 2025-06-25T14:51:32.851958Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //Root, opId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-25T14:51:32.852144Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 0 2025-06-25T14:51:32.852177Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 281474976710657:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046644480, LocalPathId: 1] source path: 2025-06-25T14:51:32.852421Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 281474976710657:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-25T14:51:32.852466Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:51:32.854680Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 281474976710657, response: Status: StatusAccepted TxId: 281474976710657 SchemeshardId: 72057594046644480 PathId: 1, at schemeshard: 72057594046644480 2025-06-25T14:51:32.854839Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976710657, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //Root 2025-06-25T14:51:32.854980Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-25T14:51:32.855019Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 281474976710657:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046644480 2025-06-25T14:51:32.855037Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 281474976710657:0 ProgressState no shards to create, do next state 2025-06-25T14:51:32.855056Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 281474976710657:0 2 -> 3 waiting... 2025-06-25T14:51:32.857450Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:51:32.857469Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 281474976710657, ready parts: 0/1, is published: true 2025-06-25T14:51:32.857494Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:51:32.857628Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-25T14:51:32.857655Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 281474976710657:0 ProgressState, at schemeshard: 72057594046644480 2025-06-25T14:51:32.857667Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 281474976710657:0 3 -> 128 2025-06-25T14:51:32.859248Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-25T14:51:32.859274Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-25T14:51:32.859292Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 281474976710657:0, at tablet# 72057594046644480 2025-06-25T14:51:32.859317Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 281474976710657 ready parts: 1/1 2025-06-25T14:51:32.862830Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046644480 Flags: 2 } ExecLevel: 0 TxId: 281474976710657 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:51:32.864278Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 281474976710657:4294967295 from tablet: 72057594046644480 to tablet: 72057594046316545 cookie: 0:281474976710657 msg type: 269090816 2025-06-25T14:51:32.864403Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 281474976710657, partId: 4294967295, tablet: 72057594046316545 2025-06-25T14:51:32.866835Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 1750863092909, transactions count in step: 1, at schemeshard: 72057594046644480 2025-06-25T14:51:32.866962Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976710657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1750863092909 MediatorID: 72057594046382081 TabletID: 72057594046644480, at schemeshard: 72057594046644480 2025-06-25T14:51:32.866987Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 281474976710657:0, at tablet# 72057594046644480 2025-06-25T14:51:32.867236Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 281474976710657:0 128 -> 240 2025-06-25T14:51:32.867274Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 281474976710657:0, at tablet# 72057594046644480 2025-06-25T14:51:32.867437Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 1 2025-06-25T14:51:32.869607Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046644480, LocalPathId: 1], at schemeshard: 72057594046644480 2025-06-25T14:51:32.871260Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046644480 2025-06-25T14:51:32.871286Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme ... UG: pq_impl.cpp:2812: [PQ: 72075186224037910] got client message batch for topic 'rt3.dc1--account--topic100' partition 7 2025-06-25T14:56:44.688105Z node 29 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie test-src-id-compat2|875d3958-13c249b8-b96ae2ab-a891fd22_0 generated for partition 7 topic 'rt3.dc1--account--topic100' owner test-src-id-compat2 2025-06-25T14:56:44.688236Z node 29 :PERSQUEUE DEBUG: partition_write.cpp:34: [PQ: 72075186224037910, Partition: 7, State: StateIdle] TPartition::ReplyOwnerOk. Partition: 7 2025-06-25T14:56:44.688361Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'rt3.dc1--account--topic100' partition: 7 messageNo: 0 requestId: cookie: 0 2025-06-25T14:56:44.688567Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:348: Handle TEvRequest topic: 'rt3.dc1--account--topic100' requestId: 2025-06-25T14:56:44.688607Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:2812: [PQ: 72075186224037910] got client message batch for topic 'rt3.dc1--account--topic100' partition 7 2025-06-25T14:56:44.688717Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'rt3.dc1--account--topic100' partition: 7 messageNo: 0 requestId: cookie: 0 2025-06-25T14:56:44.688926Z node 29 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:865: session inited cookie: 5 partition: 7 MaxSeqNo: 0 sessionId: test-src-id-compat2|875d3958-13c249b8-b96ae2ab-a891fd22_0 2025-06-25T14:56:44.689788Z :INFO: [] MessageGroupId [test-src-id-compat2] SessionId [] Counters: { Errors: 0 CurrentSessionLifetimeMs: 1750863404689 BytesWritten: 0 MessagesWritten: 0 BytesWrittenCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-25T14:56:44.689972Z :INFO: [] MessageGroupId [test-src-id-compat2] SessionId [] Write session established. Init response: session_id: "test-src-id-compat2|875d3958-13c249b8-b96ae2ab-a891fd22_0" topic: "account/topic100" cluster: "dc1" partition_id: 7 supported_codecs: CODEC_RAW supported_codecs: CODEC_GZIP supported_codecs: CODEC_LZOP 2025-06-25T14:56:44.690220Z :DEBUG: [] MessageGroupId [test-src-id-compat2] SessionId [test-src-id-compat2|875d3958-13c249b8-b96ae2ab-a891fd22_0] Write 1 messages with Id from 1 to 1 2025-06-25T14:56:44.690803Z :DEBUG: [] MessageGroupId [test-src-id-compat2] SessionId [test-src-id-compat2|875d3958-13c249b8-b96ae2ab-a891fd22_0] Write session: try to update token 2025-06-25T14:56:44.690876Z :DEBUG: [] MessageGroupId [test-src-id-compat2] SessionId [test-src-id-compat2|875d3958-13c249b8-b96ae2ab-a891fd22_0] Send 1 message(s) (0 left), first sequence number is 1 2025-06-25T14:56:44.692387Z node 29 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 5 sessionId: test-src-id-compat2|875d3958-13c249b8-b96ae2ab-a891fd22_0 grpc read done: success: 1 data: write_request[data omitted] 2025-06-25T14:56:44.692750Z node 29 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037910 (partition=7) Received event: NKikimr::NPQ::TEvPartitionWriter::TEvWriteRequest 2025-06-25T14:56:44.692924Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:348: Handle TEvRequest topic: 'rt3.dc1--account--topic100' requestId: 2025-06-25T14:56:44.692960Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:2812: [PQ: 72075186224037910] got client message batch for topic 'rt3.dc1--account--topic100' partition 7 2025-06-25T14:56:44.693064Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'rt3.dc1--account--topic100' partition: 7 messageNo: 0 requestId: cookie: 1 2025-06-25T14:56:44.693165Z node 29 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037910 (partition=7) Received event: NKikimr::TEvPersQueue::TEvResponse 2025-06-25T14:56:44.693324Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:348: Handle TEvRequest topic: 'rt3.dc1--account--topic100' requestId: 2025-06-25T14:56:44.693348Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:2812: [PQ: 72075186224037910] got client message batch for topic 'rt3.dc1--account--topic100' partition 7 2025-06-25T14:56:44.693412Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:2209: [PQ: 72075186224037910] got client message topic: rt3.dc1--account--topic100 partition: 7 SourceId: '\0test-src-id-compat2' SeqNo: 1 partNo : 0 messageNo: 1 size 102 offset: -1 2025-06-25T14:56:44.693659Z node 29 :PERSQUEUE DEBUG: partition_write.cpp:1364: [PQ: 72075186224037910, Partition: 7, State: StateIdle] Topic 'rt3.dc1--account--topic100' partition 7 part blob processing sourceId '\0test-src-id-compat2' seqNo 1 partNo 0 2025-06-25T14:56:44.694756Z node 29 :PERSQUEUE DEBUG: partition_write.cpp:1468: [PQ: 72075186224037910, Partition: 7, State: StateIdle] Topic 'rt3.dc1--account--topic100' partition 7 part blob complete sourceId '\0test-src-id-compat2' seqNo 1 partNo 0 FormedBlobsCount 0 NewHead: Offset 0 PartNo 0 PackedSize 189 count 1 nextOffset 1 batches 1 2025-06-25T14:56:44.695788Z node 29 :PERSQUEUE DEBUG: partition_write.cpp:1762: [PQ: 72075186224037910, Partition: 7, State: StateIdle] Add new write blob: topic 'rt3.dc1--account--topic100' partition 7 compactOffset 0,1 HeadOffset 0 endOffset 0 curOffset 1 d0000000007_00000000000000000000_00000_0000000001_00000? size 177 WTime 1750863404695 2025-06-25T14:56:44.696117Z node 29 :PERSQUEUE DEBUG: read.h:272: CacheProxy. Passthrough write request to KV 2025-06-25T14:56:44.696278Z node 29 :PERSQUEUE DEBUG: read.h:310: CacheProxy. Passthrough blob. Partition 7 offset 0 partNo 0 count 1 size 177 2025-06-25T14:56:44.699194Z node 29 :PERSQUEUE DEBUG: cache_eviction.h:319: Caching head blob in L1. Partition 7 offset 0 count 1 size 177 actorID [29:7519901047722178671:2465] 2025-06-25T14:56:44.699331Z node 29 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72075186224037910, Partition: 7, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 122 WriteNewSizeFromSupportivePartitions# 0 2025-06-25T14:56:44.699407Z node 29 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72075186224037910, Partition: 7, State: StateIdle] TPartition::ReplyWrite. Partition: 7 2025-06-25T14:56:44.699478Z node 29 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72075186224037910, Partition: 7, State: StateIdle] Answering for message sourceid: '\0test-src-id-compat2', Topic: 'rt3.dc1--account--topic100', Partition: 7, SeqNo: 1, partNo: 0, Offset: 0 is stored on disk 2025-06-25T14:56:44.699513Z node 29 :PERSQUEUE DEBUG: pq_l2_cache.cpp:120: PQ Cache (L2). Adding blob. Tablet '72075186224037910' partition 7 offset 0 partno 0 count 1 parts 0 suffix '63' size 177 2025-06-25T14:56:44.699800Z node 29 :PERSQUEUE DEBUG: partition_read.cpp:882: [PQ: 72075186224037910, Partition: 7, State: StateIdle] Topic 'rt3.dc1--account--topic100' partition 7 user user readTimeStamp for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 0 2025-06-25T14:56:44.699848Z node 29 :PERSQUEUE DEBUG: partition_read.cpp:924: [PQ: 72075186224037910, Partition: 7, State: StateIdle] Topic 'rt3.dc1--account--topic100' partition 7 user user send read request for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 1 rrg 0 2025-06-25T14:56:44.699905Z node 29 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72075186224037910, Partition: 7, State: StateIdle] need more data for compaction. cumulativeSize=177, count=1, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-25T14:56:44.699986Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'rt3.dc1--account--topic100' partition: 7 messageNo: 1 requestId: cookie: 1 2025-06-25T14:56:44.700127Z node 29 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037910 (partition=7) Received event: NKikimr::TEvPersQueue::TEvResponse 2025-06-25T14:56:44.700560Z node 29 :PERSQUEUE DEBUG: partition_read.cpp:839: [PQ: 72075186224037910, Partition: 7, State: StateIdle] read cookie 2 Topic 'rt3.dc1--account--topic100' partition 7 user user offset 0 count 1 size 1024000 endOffset 1 max time lag 0ms effective offset 0 2025-06-25T14:56:44.700878Z :DEBUG: [] MessageGroupId [test-src-id-compat2] SessionId [test-src-id-compat2|875d3958-13c249b8-b96ae2ab-a891fd22_0] Write session got write response: sequence_numbers: 1 offsets: 0 already_written: false partition_id: 7 write_statistics { persist_duration_ms: 5 } 2025-06-25T14:56:44.700954Z :DEBUG: [] MessageGroupId [test-src-id-compat2] SessionId [test-src-id-compat2|875d3958-13c249b8-b96ae2ab-a891fd22_0] Write session: acknoledged message 1 2025-06-25T14:56:44.700931Z node 29 :PERSQUEUE DEBUG: partition_read.cpp:1043: [PQ: 72075186224037910, Partition: 7, State: StateIdle] read cookie 2 added 1 blobs, size 177 count 1 last offset 0, current partition end offset: 1 2025-06-25T14:56:44.700972Z node 29 :PERSQUEUE DEBUG: partition_read.cpp:1069: [PQ: 72075186224037910, Partition: 7, State: StateIdle] Reading cookie 2. Send blob request. 2025-06-25T14:56:44.701025Z node 29 :PERSQUEUE DEBUG: cache_eviction.h:492: Got data from cache. Partition 7 offset 0 partno 0 count 1 parts_count 0 source 1 size 177 accessed 0 times before, last time 2025-06-25T14:56:44.000000Z 2025-06-25T14:56:44.701061Z node 29 :PERSQUEUE DEBUG: read.h:121: Reading cookie 2. All 1 blobs are from cache. 2025-06-25T14:56:44.701112Z node 29 :PERSQUEUE DEBUG: partition_read.cpp:551: FormAnswer for 1 blobs 2025-06-25T14:56:44.701153Z node 29 :PERSQUEUE DEBUG: pq_l2_cache.cpp:192: PQ Cache (L2). Touched. Tablet '72075186224037910' partition 7 offset 0 partno 0 count 1 parts 0 suffix '63' 2025-06-25T14:56:44.701178Z :INFO: [] MessageGroupId [test-src-id-compat2] SessionId [test-src-id-compat2|875d3958-13c249b8-b96ae2ab-a891fd22_0] Write session: close. Timeout = 0 ms 2025-06-25T14:56:44.701233Z :INFO: [] MessageGroupId [test-src-id-compat2] SessionId [test-src-id-compat2|875d3958-13c249b8-b96ae2ab-a891fd22_0] Write session will now close 2025-06-25T14:56:44.701310Z :DEBUG: [] MessageGroupId [test-src-id-compat2] SessionId [test-src-id-compat2|875d3958-13c249b8-b96ae2ab-a891fd22_0] Write session: aborting 2025-06-25T14:56:44.701317Z node 29 :PERSQUEUE DEBUG: partition_read.cpp:476: FormAnswer processing batch offset 0 totakecount 1 count 1 size 157 from pos 0 cbcount 1 2025-06-25T14:56:44.701425Z node 29 :PERSQUEUE DEBUG: partition_read.cpp:964: Topic 'rt3.dc1--account--topic100' partition 7 user user readTimeStamp done, result 1750863404693 queuesize 0 startOffset 0 2025-06-25T14:56:44.701872Z :INFO: [] MessageGroupId [test-src-id-compat2] SessionId [test-src-id-compat2|875d3958-13c249b8-b96ae2ab-a891fd22_0] Write session: gracefully shut down, all writes complete 2025-06-25T14:56:44.701937Z :DEBUG: [] MessageGroupId [test-src-id-compat2] SessionId [test-src-id-compat2|875d3958-13c249b8-b96ae2ab-a891fd22_0] Write session: destroy 2025-06-25T14:56:44.702549Z node 29 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 5 sessionId: test-src-id-compat2|875d3958-13c249b8-b96ae2ab-a891fd22_0 grpc read done: success: 0 data: 2025-06-25T14:56:44.702580Z node 29 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 5 sessionId: test-src-id-compat2|875d3958-13c249b8-b96ae2ab-a891fd22_0 grpc read failed 2025-06-25T14:56:44.702635Z node 29 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:818: session v1 closed cookie: 5 sessionId: test-src-id-compat2|875d3958-13c249b8-b96ae2ab-a891fd22_0 2025-06-25T14:56:44.702664Z node 29 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 5 sessionId: test-src-id-compat2|875d3958-13c249b8-b96ae2ab-a891fd22_0 is DEAD 2025-06-25T14:56:44.703215Z node 29 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037910 (partition=7) Received event: NActors::TEvents::TEvPoison 2025-06-25T14:56:44.703375Z node 29 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037910] server disconnected, pipe [29:7519901060607082106:2699] destroyed 2025-06-25T14:56:44.703438Z node 29 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72075186224037910, Partition: 7, State: StateIdle] TPartition::DropOwner. |89.2%| [TA] {RESULT} $(B)/ydb/core/persqueue/dread_cache_service/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TPersQueueTest::TestReadRuleServiceTypePassword [GOOD] >> TPersQueueTest::TestReadPartitionStatus >> KqpUniqueIndex::UpsertImplicitNullInComplexFk >> KqpIndexes::SecondaryIndexUpsert2Update >> KqpIndexes::UpsertWithoutExtraNullDelete+UseSink >> TestYmqHttpProxy::TestDeleteMessage [GOOD] >> KqpUniqueIndex::UpdateOnFkSelectResultSameValue >> KqpIndexes::SecondaryIndexUpsert1DeleteUpdate >> KqpIndexes::NullInIndexTable >> KqpVectorIndexes::CoveredVectorIndexWithFollowers+StaleRO >> KqpUniqueIndex::ReplaceFkAlreadyExist >> KqpUniqueIndex::InsertFkAlreadyExist >> KqpIndexes::MultipleSecondaryIndex+UseSink >> KqpIndexMetadata::HandleNotReadyIndex >> BasicUsage::RecreateObserver [GOOD] >> KqpIndexes::ForbidViewModification >> KqpIndexes::InnerJoinSecondaryIndexLookupAndRightTablePredicateNonIndexColumn >> TestYmqHttpProxy::TestDeleteMessageBatch >> KqpNamedExpressions::NamedExpressionRandomInsertDataQuery-UseSink [GOOD] >> KqpNamedExpressions::NamedExpressionRandomSelect+UseSink >> TestKinesisHttpProxy::GoodRequestGetRecordsLongStreamName [GOOD] >> KqpUniqueIndex::UpdateFkSameValue >> KqpIndexes::InnerJoinWithNonIndexWherePredicate >> KqpVectorIndexes::OrderByCosineLevel1-Nullable-UseSimilarity >> KqpMultishardIndex::SecondaryIndexSelectNull >> KqpIndexes::UpdateDeletePlan+UseSink >> TestKinesisHttpProxy::ErroneousRequestGetRecords ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/federated_topic/ut/unittest >> BasicUsage::RecreateObserver [GOOD] Test command err: 2025-06-25T14:54:56.059367Z :RetryDiscoveryWithCancel INFO: Random seed for debugging is 1750863296059341 2025-06-25T14:54:56.401144Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900597564532787:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:54:56.401205Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:54:56.472596Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519900596096709864:2157];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:54:56.476431Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0012d4/r3tmp/tmpz2xgZf/pdisk_1.dat 2025-06-25T14:54:56.685393Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-25T14:54:56.695089Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-25T14:54:57.057494Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:54:57.061051Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:54:57.061143Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:54:57.062418Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:54:57.062466Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:54:57.070555Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T14:54:57.070672Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:54:57.073717Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 18152, node 1 2025-06-25T14:54:57.305029Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/yft8/0012d4/r3tmp/yandex3YdGhT.tmp 2025-06-25T14:54:57.305061Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/yft8/0012d4/r3tmp/yandex3YdGhT.tmp 2025-06-25T14:54:57.305204Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/yft8/0012d4/r3tmp/yandex3YdGhT.tmp 2025-06-25T14:54:57.305315Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:54:57.413970Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:54:57.475941Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:54:57.605697Z INFO: TTestServer started on Port 27659 GrpcPort 18152 TClient is connected to server localhost:27659 PQClient connected to localhost:18152 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:54:58.053819Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976720657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... waiting... waiting... 2025-06-25T14:55:00.194066Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900614744402934:2302], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:00.194116Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900614744402922:2299], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:00.194249Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:00.220807Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976720661:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:55:00.347261Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519900614744402944:2303], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976720661 completed, doublechecking } 2025-06-25T14:55:00.412319Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519900614744403028:2670] txid# 281474976720662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:55:00.747566Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7519900613276579242:2275], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:55:00.747859Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519900614744403038:2309], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:55:00.748597Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=2&id=ZTJlN2NiNzQtNjI2ZGU0NGItY2FlYTMzMTQtNWY4MTVkNQ==, ActorId: [2:7519900613276579224:2269], ActorState: ExecuteState, TraceId: 01jyksbhq4c5szcv7cytse6cj4, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:55:00.750885Z node 2 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-25T14:55:00.749649Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=OWRlNjE5OGUtNTcwMDRlZjgtNDc4ZGYwY2MtZWQ2OGUzZDE=, ActorId: [1:7519900614744402902:2295], ActorState: ExecuteState, TraceId: 01jyksbhj111z0wnahy6nzer17, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:55:00.750821Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-25T14:55:00.754425Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:00.979457Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:01.188971Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost:18152", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, false, 1000); 2025-06-25T14:55:01.413672Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519900597564532787: ... O: [/Root] [/Root] [a09efc3a-564947e8-b9148e4b-d24c2f39] Counters: { Errors: 0 CurrentSessionLifetimeMs: 32 BytesRead: 0 MessagesRead: 0 BytesReadCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-25T14:56:48.463734Z :NOTICE: [/Root] [/Root] [a09efc3a-564947e8-b9148e4b-d24c2f39] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Close with zero timeout " } 2025-06-25T14:56:48.463764Z :DEBUG: [/Root] [/Root] [a09efc3a-564947e8-b9148e4b-d24c2f39] [] Abort session to cluster 2025-06-25T14:56:48.463967Z node 3 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer shared/user session shared/user_3_1_587190748063074774_v1 grpc read done: success# 0, data# { } 2025-06-25T14:56:48.464064Z :INFO: [/Root] [/Root] [6cde34a4-76a296-9a2611b9-e83a9806] Closing read session. Close timeout: 0.000000s 2025-06-25T14:56:48.464007Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 1 consumer shared/user session shared/user_3_1_587190748063074774_v1 grpc read failed 2025-06-25T14:56:48.464030Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:92: session cookie 1 consumer shared/user session shared/user_3_1_587190748063074774_v1 grpc closed 2025-06-25T14:56:48.464116Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): -:test-topic:0:1:0:0 2025-06-25T14:56:48.464066Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 1 consumer shared/user session shared/user_3_1_587190748063074774_v1 is DEAD 2025-06-25T14:56:48.464144Z :INFO: [/Root] [/Root] [6cde34a4-76a296-9a2611b9-e83a9806] Counters: { Errors: 0 CurrentSessionLifetimeMs: 29 BytesRead: 0 MessagesRead: 0 BytesReadCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-25T14:56:48.464186Z :NOTICE: [/Root] [/Root] [6cde34a4-76a296-9a2611b9-e83a9806] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Close with zero timeout " } 2025-06-25T14:56:48.464210Z :DEBUG: [/Root] [/Root] [6cde34a4-76a296-9a2611b9-e83a9806] [] Abort session to cluster 2025-06-25T14:56:48.464480Z :INFO: [/Root] [/Root] [6cde34a4-76a296-9a2611b9-e83a9806] Closing read session. Close timeout: 0.000000s 2025-06-25T14:56:48.464523Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): -:test-topic:0:1:0:0 2025-06-25T14:56:48.464566Z :INFO: [/Root] [/Root] [6cde34a4-76a296-9a2611b9-e83a9806] Counters: { Errors: 0 CurrentSessionLifetimeMs: 30 BytesRead: 0 MessagesRead: 0 BytesReadCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-25T14:56:48.464650Z :NOTICE: [/Root] [/Root] [6cde34a4-76a296-9a2611b9-e83a9806] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-06-25T14:56:48.464774Z :INFO: [/Root] [/Root] [a09efc3a-564947e8-b9148e4b-d24c2f39] Closing read session. Close timeout: 0.000000s 2025-06-25T14:56:48.464801Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): 2025-06-25T14:56:48.464827Z :INFO: [/Root] [/Root] [a09efc3a-564947e8-b9148e4b-d24c2f39] Counters: { Errors: 0 CurrentSessionLifetimeMs: 33 BytesRead: 0 MessagesRead: 0 BytesReadCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-25T14:56:48.464864Z :NOTICE: [/Root] [/Root] [a09efc3a-564947e8-b9148e4b-d24c2f39] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-06-25T14:56:48.464876Z node 3 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 2 consumer shared/user session shared/user_3_2_5315133695190617838_v1 grpc read done: success# 0, data# { } 2025-06-25T14:56:48.464897Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 2 consumer shared/user session shared/user_3_2_5315133695190617838_v1 grpc read failed 2025-06-25T14:56:48.464915Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:92: session cookie 2 consumer shared/user session shared/user_3_2_5315133695190617838_v1 grpc closed 2025-06-25T14:56:48.464930Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 2 consumer shared/user session shared/user_3_2_5315133695190617838_v1 is DEAD 2025-06-25T14:56:48.464928Z node 3 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037893][rt3.dc1--test-topic] pipe [3:7519901079752996185:2490] disconnected; active server actors: 1 2025-06-25T14:56:48.464959Z node 3 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037893][rt3.dc1--test-topic] pipe [3:7519901079752996185:2490] client user disconnected session shared/user_3_1_587190748063074774_v1 2025-06-25T14:56:48.465024Z node 3 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1183: [72075186224037893][rt3.dc1--test-topic] consumer user rebalancing was scheduled 2025-06-25T14:56:48.465078Z node 3 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1255: [72075186224037893][rt3.dc1--test-topic] consumer user balancing. Sessions=2, Families=1, UnradableFamilies=0 [], RequireBalancing=0 [] 2025-06-25T14:56:48.465106Z node 3 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1322: [72075186224037893][rt3.dc1--test-topic] consumer user start rebalancing. familyCount=1, sessionCount=2, desiredFamilyCount=0, allowPlusOne=1 2025-06-25T14:56:48.465132Z node 3 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1399: [72075186224037893][rt3.dc1--test-topic] consumer user balancing duration: 0.000035s 2025-06-25T14:56:48.465302Z :INFO: [/Root] [/Root] [851baa73-37080b09-d82bb22d-1574ef0d] Closing read session. Close timeout: 0.000000s 2025-06-25T14:56:48.465332Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): 2025-06-25T14:56:48.465356Z :INFO: [/Root] [/Root] [851baa73-37080b09-d82bb22d-1574ef0d] Counters: { Errors: 0 CurrentSessionLifetimeMs: 44 BytesRead: 0 MessagesRead: 0 BytesReadCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-25T14:56:48.465399Z :NOTICE: [/Root] [/Root] [851baa73-37080b09-d82bb22d-1574ef0d] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-06-25T14:56:48.465367Z node 3 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037893][rt3.dc1--test-topic] pipe [3:7519901079752996184:2491] disconnected; active server actors: 1 2025-06-25T14:56:48.465391Z node 3 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037893][rt3.dc1--test-topic] pipe [3:7519901079752996184:2491] client user disconnected session shared/user_3_2_5315133695190617838_v1 2025-06-25T14:56:48.465415Z node 3 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1183: [72075186224037893][rt3.dc1--test-topic] consumer user rebalancing was scheduled 2025-06-25T14:56:48.465446Z node 3 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1255: [72075186224037893][rt3.dc1--test-topic] consumer user balancing. Sessions=1, Families=1, UnradableFamilies=0 [], RequireBalancing=0 [] 2025-06-25T14:56:48.465468Z node 3 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1322: [72075186224037893][rt3.dc1--test-topic] consumer user start rebalancing. familyCount=1, sessionCount=1, desiredFamilyCount=1, allowPlusOne=0 2025-06-25T14:56:48.465487Z node 3 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1399: [72075186224037893][rt3.dc1--test-topic] consumer user balancing duration: 0.000025s 2025-06-25T14:56:48.467484Z node 3 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 3 consumer shared/user session shared/user_3_3_7193687515553824761_v1 grpc read done: success# 0, data# { } 2025-06-25T14:56:48.467507Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 3 consumer shared/user session shared/user_3_3_7193687515553824761_v1 grpc read failed 2025-06-25T14:56:48.467525Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:92: session cookie 3 consumer shared/user session shared/user_3_3_7193687515553824761_v1 grpc closed 2025-06-25T14:56:48.467558Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 3 consumer shared/user session shared/user_3_3_7193687515553824761_v1 is DEAD 2025-06-25T14:56:48.468060Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2452: [PQ: 72075186224037892] Destroy direct read session shared/user_3_3_7193687515553824761_v1 2025-06-25T14:56:48.468104Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037892] server disconnected, pipe [3:7519901079752996193:2500] destroyed 2025-06-25T14:56:48.468269Z node 4 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: shared/user_3_3_7193687515553824761_v1 2025-06-25T14:56:48.468930Z node 3 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037893][rt3.dc1--test-topic] pipe [3:7519901079752996188:2492] disconnected; active server actors: 1 2025-06-25T14:56:48.468954Z node 3 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037893][rt3.dc1--test-topic] pipe [3:7519901079752996188:2492] client user disconnected session shared/user_3_3_7193687515553824761_v1 2025-06-25T14:56:48.822823Z node 3 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1985: ActorId: [3:7519901079752996210:2502] TxId: 281474976715690. Ctx: { TraceId: 01jyksevct44xx9tc8z17m75cy, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YWYzZjcyOGUtYzBiOGYxNTUtZmE3NzU1ZTctZjBiMDNiNDY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. UNAVAILABLE: Failed to send EvStartKqpTasksRequest because node is unavailable: 4 2025-06-25T14:56:48.823521Z node 3 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [3:7519901079752996214:2502], TxId: 281474976715690, task: 3. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=3&id=YWYzZjcyOGUtYzBiOGYxNTUtZmE3NzU1ZTctZjBiMDNiNDY=. TraceId : 01jyksevct44xx9tc8z17m75cy. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Handle abort execution event from: [3:7519901079752996210:2502], status: UNAVAILABLE, reason: {
: Error: Terminate execution } 2025-06-25T14:56:49.183224Z node 3 :KQP_EXECUTER WARN: kqp_shards_resolver.cpp:86: [ShardsResolver] TxId: 281474976715691. Failed to resolve tablet: 72075186224037890 after several retries. 2025-06-25T14:56:49.183381Z node 3 :KQP_EXECUTER WARN: kqp_executer_impl.h:265: ActorId: [3:7519901084047963522:2510] TxId: 281474976715691. Ctx: { TraceId: 01jyksevvh4s8r37vv99qn7v4t, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YTE0YzNkNDgtMjNhYjYyNmEtNDZmNjBkNTEtOWI1OTYzNGE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Shards nodes resolve failed, status: UNAVAILABLE, issues:
: Error: Failed to resolve tablet: 72075186224037890 after several retries. 2025-06-25T14:56:49.183634Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=3&id=YTE0YzNkNDgtMjNhYjYyNmEtNDZmNjBkNTEtOWI1OTYzNGE=, ActorId: [3:7519901084047963518:2510], ActorState: ExecuteState, TraceId: 01jyksevvh4s8r37vv99qn7v4t, Create QueryResponse for error on request, msg: 2025-06-25T14:56:49.185335Z node 3 :PQ_METACACHE ERROR: msgbus_server_pq_metacache.cpp:260: Got error trying to perform request: { Response { QueryIssues { message: "Failed to resolve tablet: 72075186224037890 after several retries." severity: 1 } TxMeta { id: "01jyksevvh4s8r37vv9ddz98ke" } } YdbStatus: UNAVAILABLE ConsumedRu: 1 } >> TestYmqHttpProxy::TestDeleteQueue [GOOD] >> KqpExtractPredicateLookup::PointJoin [GOOD] >> KqpExtractPredicateLookup::SqlInJoin >> TestYmqHttpProxy::TestListDeadLetterSourceQueues >> TestYmqHttpProxy::TestPurgeQueue [GOOD] >> TestYmqHttpProxy::TestSendMessageBatch >> KqpPrefixedVectorIndexes::PrefixedVectorIndexOrderByCosineDistanceWithCover+Nullable >> TestYmqHttpProxy::TestChangeMessageVisibility [GOOD] >> TestYmqHttpProxy::TestChangeMessageVisibilityBatch >> KqpIndexes::ForbidViewModification [GOOD] >> KqpIndexes::ForbidDirectIndexTableCreation >> TestYmqHttpProxy::TestDeleteMessageBatch [GOOD] >> KqpUniqueIndex::InsertFkAlreadyExist [GOOD] >> KqpUniqueIndex::InsertFkPartialColumnSet >> KqpIndexes::SecondaryIndexUpsert2Update [GOOD] >> KqpIndexes::SecondaryIndexUsingInJoin+UseStreamJoin >> KqpIndexes::UpdateDeletePlan+UseSink [GOOD] >> KqpIndexes::UpdateDeletePlan-UseSink >> KqpIndexes::MultipleSecondaryIndex+UseSink [GOOD] >> KqpIndexes::MultipleSecondaryIndex-UseSink >> KqpIndexes::NullInIndexTable [GOOD] >> KqpIndexes::MultipleSecondaryIndexWithSameComulns+UseSink >> KqpIndexes::UpsertWithoutExtraNullDelete+UseSink [GOOD] >> KqpIndexes::UpsertWithNullKeysSimple >> KqpUniqueIndex::ReplaceFkAlreadyExist [GOOD] >> KqpUniqueIndex::InsertNullInPk >> TestKinesisHttpProxy::ErroneousRequestGetRecords [GOOD] >> KqpIndexes::InnerJoinWithNonIndexWherePredicate [GOOD] >> KqpIndexes::JoinWithNonPKColumnsInPredicate+UseStreamJoin >> KqpIndexes::SecondaryIndexUpsert1DeleteUpdate [GOOD] >> KqpIndexes::SecondaryIndexUpdateOnUsingIndex >> KqpUniqueIndex::UpdateFkSameValue [GOOD] >> KqpUniqueIndex::UpdateImplicitNullInComplexFk2 >> KqpUniqueIndex::UpdateOnFkSelectResultSameValue [GOOD] >> KqpUniqueIndex::UpdateOnFkAlreadyExist >> TestKinesisHttpProxy::GoodRequestCreateStream >> KqpMultishardIndex::SecondaryIndexSelectNull [GOOD] >> KqpMultishardIndex::SecondaryIndexSelect >> KqpIndexMetadata::HandleNotReadyIndex [GOOD] >> KqpIndexMetadata::TestNoReadFromMainTableBeforeJoin ------- [TM] {asan, default-linux-x86_64, release} ydb/core/http_proxy/ut/inside_ydb_ut/unittest >> TestYmqHttpProxy::TestDeleteMessageBatch [GOOD] Test command err: 2025-06-25T14:56:09.050054Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900910930093585:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:56:09.054961Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000c73/r3tmp/tmpUirJJF/pdisk_1.dat 2025-06-25T14:56:09.490520Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:56:09.490636Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:56:09.599420Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:56:09.600763Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 8246, node 1 2025-06-25T14:56:09.743426Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:56:09.743446Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:56:09.743453Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:56:09.743582Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:56:10.064442Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:11715 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:56:10.331423Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... TClient is connected to server localhost:11715 2025-06-25T14:56:10.690035Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... 2025-06-25T14:56:10.720889Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-25T14:56:10.729606Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) waiting... 2025-06-25T14:56:10.748924Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:10.967702Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:56:11.040856Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710663, at schemeshard: 72057594046644480 2025-06-25T14:56:11.045268Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:56:11.143856Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710665, at schemeshard: 72057594046644480 2025-06-25T14:56:11.150570Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:11.196008Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:11.238002Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:11.276907Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:11.310623Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:11.345924Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:56:11.379596Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:12.375824Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900923814996822:2336], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:12.375823Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900923814996834:2339], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:12.375962Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:12.380174Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710673:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:56:12.391118Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519900923814996836:2340], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710673 completed, doublechecking } 2025-06-25T14:56:12.493171Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519900923814996887:2865] txid# 281474976710674, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 18], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:56:13.038787Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710675. Ctx: { TraceId: 01jyksdr2m067kg6v1ccfpxwbw, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MmQzZTlkMmEtZDc1OGJiYmQtMjc4YjBlZTUtY2E2N2FhMDM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:56:13.130930Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/sc ... 26919Z node 7 :SQS DEBUG: proxy_actor.cpp:147: Request [5488a118-ab795aff-c3bd853e-41a9dc72] Sending reply from proxy actor: { DeleteMessageBatch { RequestId: "5488a118-ab795aff-c3bd853e-41a9dc72" Entries { Id: "Id-0" } Entries { Id: "Id-1" } } RequestId: "5488a118-ab795aff-c3bd853e-41a9dc72" FolderId: "folder4" ResourceId: "000000000000000101v0" IsFifo: false } 2025-06-25T14:56:57.227228Z node 7 :HTTP_PROXY DEBUG: http_req.cpp:379: http request [DeleteMessageBatch] requestId [5488a118-ab795aff-c3bd853e-41a9dc72] Got succesfult GRPC response. Http output full {"Successful":[{"Id":"Id-0"},{"Id":"Id-1"}]} 2025-06-25T14:56:57.227352Z node 7 :HTTP_PROXY INFO: http_req.cpp:1207: http request [DeleteMessageBatch] requestId [5488a118-ab795aff-c3bd853e-41a9dc72] reply ok 2025-06-25T14:56:57.227472Z node 7 :HTTP_PROXY DEBUG: http_req.cpp:1267: http request [DeleteMessageBatch] requestId [5488a118-ab795aff-c3bd853e-41a9dc72] Send metering event. HttpStatusCode: 200 IsFifo: 0 FolderId: folder4 RequestSizeInBytes: 716 ResponseSizeInBytes: 222 SourceAddress: 985c:1801:6050:0:805c:1801:6050:0 ResourceId: 000000000000000101v0 Action: DeleteMessageBatch 2025-06-25T14:56:57.227540Z node 7 :HTTP DEBUG: http_proxy_incoming.cpp:280: (#37,[::1]:32940) <- (200 , 44 bytes) 2025-06-25T14:56:57.227623Z node 7 :HTTP DEBUG: http_proxy_incoming.cpp:335: (#37,[::1]:32940) connection closed 2025-06-25T14:56:57.228823Z node 7 :HTTP DEBUG: http_proxy_incoming.cpp:83: (#37,[::1]:32956) incoming connection opened 2025-06-25T14:56:57.228889Z node 7 :HTTP DEBUG: http_proxy_incoming.cpp:156: (#37,[::1]:32956) -> (POST /Root, 106 bytes) 2025-06-25T14:56:57.228999Z node 7 :HTTP_PROXY INFO: http_service.cpp:102: proxy service: incoming request from [9812:8d00:6050:0:8012:8d00:6050:0] request [ReceiveMessage] url [/Root] database [/Root] requestId: 3c6ef00a-7391216d-a3fbce5c-817e0f1 2025-06-25T14:56:57.229346Z node 7 :HTTP_PROXY INFO: http_req.cpp:520: http request [ReceiveMessage] requestId [3c6ef00a-7391216d-a3fbce5c-817e0f1] got new request from [9812:8d00:6050:0:8012:8d00:6050:0] 2025-06-25T14:56:57.229726Z node 7 :HTTP_PROXY DEBUG: http_req.cpp:454: http request [ReceiveMessage] requestId [3c6ef00a-7391216d-a3fbce5c-817e0f1] Got cloud auth response. FolderId: folder4 CloudId: cloud4 UserSid: fake_user_sid@as 2025-06-25T14:56:57.229743Z node 7 :HTTP_PROXY INFO: http_req.cpp:280: http request [ReceiveMessage] requestId [3c6ef00a-7391216d-a3fbce5c-817e0f1] sending grpc request to '' database: '/Root' iam token size: 0 2025-06-25T14:56:57.230425Z node 7 :SQS DEBUG: ymq_proxy.cpp:148: Got new request in YMQ proxy. FolderId: folder4, CloudId: cloud4, UserSid: fake_user_sid@as, RequestId: 3c6ef00a-7391216d-a3fbce5c-817e0f1 2025-06-25T14:56:57.230528Z node 7 :SQS DEBUG: proxy_actor.cpp:263: Request [3c6ef00a-7391216d-a3fbce5c-817e0f1] Proxy actor: used user_name='cloud4', queue_name='000000000000000101v0', folder_id='folder4' 2025-06-25T14:56:57.230540Z node 7 :SQS DEBUG: proxy_actor.cpp:78: Request [3c6ef00a-7391216d-a3fbce5c-817e0f1] Request proxy started 2025-06-25T14:56:57.230649Z node 7 :SQS DEBUG: service.cpp:761: Request [3c6ef00a-7391216d-a3fbce5c-817e0f1] Answer configuration for queue [cloud4/000000000000000101v0] without leader 2025-06-25T14:56:57.230891Z node 7 :SQS TRACE: executor.cpp:286: Request [] Query(idx=GET_OLDEST_MESSAGE_TIMESTAMP_METRIC_ID) Queue [cloud4/000000000000000101v0] HandleResponse { Status: 48 TxId: 281474976715712 StatusCode: SUCCESS ExecutionEngineStatus: 1 ExecutionEngineResponseStatus: 2 ExecutionEngineEvaluatedResponse { Type { Kind: Struct Struct { Member { Name: "messages" Type { Kind: Optional Optional { Item { Kind: List List { Item { Kind: Struct Struct { Member { Name: "Offset" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "SentTimestamp" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } } } } } } } } } } Value { Struct { Optional { } } } } } 2025-06-25T14:56:57.230922Z node 7 :SQS DEBUG: executor.cpp:287: Request [] Query(idx=GET_OLDEST_MESSAGE_TIMESTAMP_METRIC_ID) Queue [cloud4/000000000000000101v0] Attempt 1 execution duration: 3ms 2025-06-25T14:56:57.231090Z node 7 :SQS TRACE: executor.cpp:325: Request [] Query(idx=GET_OLDEST_MESSAGE_TIMESTAMP_METRIC_ID) Queue [cloud4/000000000000000101v0] Sending mkql execution result: { Status: 48 TxId: 281474976715712 StatusCode: SUCCESS ExecutionEngineStatus: 1 ExecutionEngineResponseStatus: 2 ExecutionEngineEvaluatedResponse { Type { Kind: Struct Struct { Member { Name: "messages" Type { Kind: Optional Optional { Item { Kind: List List { Item { Kind: Struct Struct { Member { Name: "Offset" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "SentTimestamp" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } } } } } } } } } } Value { Struct { Optional { } } } } } 2025-06-25T14:56:57.231120Z node 7 :SQS TRACE: executor.cpp:327: Request [] Query(idx=GET_OLDEST_MESSAGE_TIMESTAMP_METRIC_ID) Queue [cloud4/000000000000000101v0] Minikql data response: {"messages": []} 2025-06-25T14:56:57.231193Z node 7 :SQS DEBUG: executor.cpp:401: Request [] Query(idx=GET_OLDEST_MESSAGE_TIMESTAMP_METRIC_ID) Queue [cloud4/000000000000000101v0] execution duration: 6ms 2025-06-25T14:56:57.231298Z node 7 :SQS DEBUG: queue_leader.cpp:556: Request [] Sending executed reply 2025-06-25T14:56:57.231390Z node 7 :SQS DEBUG: queue_leader.cpp:1913: Handle oldest timestamp metrics for [cloud4/000000000000000101v0/3] 2025-06-25T14:56:57.231498Z node 7 :SQS DEBUG: proxy_actor.cpp:97: Request [3c6ef00a-7391216d-a3fbce5c-817e0f1] Get configuration duration: 0ms 2025-06-25T14:56:57.231577Z node 7 :SQS DEBUG: proxy_service.cpp:246: Request [3c6ef00a-7391216d-a3fbce5c-817e0f1] Send get leader node request to sqs service for cloud4/000000000000000101v0 2025-06-25T14:56:57.231595Z node 7 :SQS DEBUG: service.cpp:581: Request [3c6ef00a-7391216d-a3fbce5c-817e0f1] Leader node for queue [cloud4/000000000000000101v0] is 7 2025-06-25T14:56:57.231614Z node 7 :SQS DEBUG: proxy_service.cpp:170: Request [3c6ef00a-7391216d-a3fbce5c-817e0f1] Got leader node for queue response. Node id: 7. Status: 0 2025-06-25T14:56:57.231697Z node 7 :SQS TRACE: proxy_service.cpp:303: Request [3c6ef00a-7391216d-a3fbce5c-817e0f1] Sending request from proxy to leader node 7: ReceiveMessage { Auth { UserName: "cloud4" FolderId: "folder4" UserSID: "fake_user_sid@as" } QueueName: "000000000000000101v0" } RequestId: "3c6ef00a-7391216d-a3fbce5c-817e0f1" 2025-06-25T14:56:57.231769Z node 7 :SQS DEBUG: proxy_service.cpp:70: Request [3c6ef00a-7391216d-a3fbce5c-817e0f1] Received Sqs Request: ReceiveMessage { Auth { UserName: "cloud4" FolderId: "folder4" UserSID: "fake_user_sid@as" } QueueName: "000000000000000101v0" } RequestId: "3c6ef00a-7391216d-a3fbce5c-817e0f1" 2025-06-25T14:56:57.231828Z node 7 :SQS DEBUG: action.h:133: Request [3c6ef00a-7391216d-a3fbce5c-817e0f1] Request started. Actor: [7:7519901118729449100:3694] 2025-06-25T14:56:57.231863Z node 7 :SQS TRACE: service.cpp:1472: Inc local leader ref for actor [7:7519901118729449100:3694] 2025-06-25T14:56:57.231879Z node 7 :SQS DEBUG: service.cpp:754: Request [3c6ef00a-7391216d-a3fbce5c-817e0f1] Forward configuration request to queue [cloud4/000000000000000101v0] leader 2025-06-25T14:56:57.231907Z node 7 :SQS DEBUG: action.h:627: Request [3c6ef00a-7391216d-a3fbce5c-817e0f1] Get configuration duration: 0ms 2025-06-25T14:56:57.231922Z node 7 :SQS TRACE: action.h:647: Request [3c6ef00a-7391216d-a3fbce5c-817e0f1] Got configuration. Root url: http://ghrun-kqfvx6aroe.auto.internal:8771, Shards: 4, Fail: 0 2025-06-25T14:56:57.231942Z node 7 :SQS TRACE: action.h:662: Request [3c6ef00a-7391216d-a3fbce5c-817e0f1] Got configuration. Attributes: { ContentBasedDeduplication: 0 DelaySeconds: 0.000000s FifoQueue: 0 MaximumMessageSize: 262144 MessageRetentionPeriod: 345600.000000s ReceiveMessageWaitTime: 0.000000s VisibilityTimeout: 30.000000s } 2025-06-25T14:56:57.231954Z node 7 :SQS TRACE: action.h:427: Request [3c6ef00a-7391216d-a3fbce5c-817e0f1] DoRoutine 2025-06-25T14:56:57.231993Z node 7 :SQS TRACE: queue_leader.cpp:2424: Increment active message requests for [cloud4/000000000000000101v0/3]. ActiveMessageRequests: 1 2025-06-25T14:56:57.232008Z node 7 :SQS DEBUG: queue_leader.cpp:938: Request [3c6ef00a-7391216d-a3fbce5c-817e0f1] Received empty result from shard 3 infly. Infly capacity: 0. Messages count: 0 2025-06-25T14:56:57.232015Z node 7 :SQS DEBUG: queue_leader.cpp:1162: Request [3c6ef00a-7391216d-a3fbce5c-817e0f1] No known messages in this shard. Skip attempt to add messages to infly 2025-06-25T14:56:57.232021Z node 7 :SQS DEBUG: queue_leader.cpp:1168: Request [3c6ef00a-7391216d-a3fbce5c-817e0f1] Already tried to add messages to infly 2025-06-25T14:56:57.232060Z node 7 :SQS TRACE: queue_leader.cpp:2434: Decrement active message requests for [[cloud4/000000000000000101v0/3]. ActiveMessageRequests: 0 2025-06-25T14:56:57.232094Z node 7 :SQS TRACE: action.h:264: Request [3c6ef00a-7391216d-a3fbce5c-817e0f1] SendReplyAndDie from action actor { ReceiveMessage { RequestId: "3c6ef00a-7391216d-a3fbce5c-817e0f1" } } 2025-06-25T14:56:57.232133Z node 7 :SQS DEBUG: queue_leader.cpp:384: Request ReceiveMessage working duration: 0ms 2025-06-25T14:56:57.232168Z node 7 :SQS TRACE: proxy_service.h:35: Request [3c6ef00a-7391216d-a3fbce5c-817e0f1] Sending sqs response: { ReceiveMessage { RequestId: "3c6ef00a-7391216d-a3fbce5c-817e0f1" } RequestId: "3c6ef00a-7391216d-a3fbce5c-817e0f1" FolderId: "folder4" ResourceId: "000000000000000101v0" IsFifo: false } 2025-06-25T14:56:57.232223Z node 7 :SQS TRACE: service.cpp:1483: Dec local leader ref for actor [7:7519901118729449100:3694]. Found: 1 2025-06-25T14:56:57.232227Z node 7 :SQS TRACE: proxy_service.cpp:194: HandleSqsResponse ReceiveMessage { RequestId: "3c6ef00a-7391216d-a3fbce5c-817e0f1" } RequestId: "3c6ef00a-7391216d-a3fbce5c-817e0f1" FolderId: "folder4" ResourceId: "000000000000000101v0" IsFifo: false 2025-06-25T14:56:57.232273Z node 7 :SQS TRACE: proxy_service.cpp:208: Sending answer to proxy actor [7:7519901118729449099:2504]: ReceiveMessage { RequestId: "3c6ef00a-7391216d-a3fbce5c-817e0f1" } RequestId: "3c6ef00a-7391216d-a3fbce5c-817e0f1" FolderId: "folder4" ResourceId: "000000000000000101v0" IsFifo: false 2025-06-25T14:56:57.232386Z node 7 :SQS TRACE: proxy_actor.cpp:178: Request [3c6ef00a-7391216d-a3fbce5c-817e0f1] HandleResponse: { ReceiveMessage { RequestId: "3c6ef00a-7391216d-a3fbce5c-817e0f1" } RequestId: "3c6ef00a-7391216d-a3fbce5c-817e0f1" FolderId: "folder4" ResourceId: "000000000000000101v0" IsFifo: false }, status: OK 2025-06-25T14:56:57.232447Z node 7 :SQS DEBUG: proxy_actor.cpp:147: Request [3c6ef00a-7391216d-a3fbce5c-817e0f1] Sending reply from proxy actor: { ReceiveMessage { RequestId: "3c6ef00a-7391216d-a3fbce5c-817e0f1" } RequestId: "3c6ef00a-7391216d-a3fbce5c-817e0f1" FolderId: "folder4" ResourceId: "000000000000000101v0" IsFifo: false } 2025-06-25T14:56:57.232618Z node 7 :HTTP_PROXY DEBUG: http_req.cpp:379: http request [ReceiveMessage] requestId [3c6ef00a-7391216d-a3fbce5c-817e0f1] Got succesfult GRPC response. 2025-06-25T14:56:57.232662Z node 7 :HTTP_PROXY INFO: http_req.cpp:1207: http request [ReceiveMessage] requestId [3c6ef00a-7391216d-a3fbce5c-817e0f1] reply ok 2025-06-25T14:56:57.232737Z node 7 :HTTP_PROXY DEBUG: http_req.cpp:1267: http request [ReceiveMessage] requestId [3c6ef00a-7391216d-a3fbce5c-817e0f1] Send metering event. HttpStatusCode: 200 IsFifo: 0 FolderId: folder4 RequestSizeInBytes: 526 ResponseSizeInBytes: 178 SourceAddress: 9812:8d00:6050:0:8012:8d00:6050:0 ResourceId: 000000000000000101v0 Action: ReceiveMessage 2025-06-25T14:56:57.232824Z node 7 :HTTP DEBUG: http_proxy_incoming.cpp:280: (#37,[::1]:32956) <- (200 , 2 bytes) Http output full {} 2025-06-25T14:56:57.232897Z node 7 :HTTP DEBUG: http_proxy_incoming.cpp:335: (#37,[::1]:32956) connection closed >> TestYmqHttpProxy::TestListDeadLetterSourceQueues [GOOD] >> KqpQueryService::ClosedSessionRemovedFromPool [GOOD] >> KqpQueryService::CloseConnection >> TPopulatorQuorumTest::OneRingGroup [GOOD] >> TPopulatorQuorumTest::OneWriteOnlyRingGroup [GOOD] >> TPopulatorQuorumTest::OneDisconnectedRingGroup [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_populator/unittest >> TPopulatorQuorumTest::OneRingGroup [GOOD] Test command err: replicas: [1:24339059:0], [1:1099535966835:0], [1:2199047594611:0] replicaActorToServiceMap: actor: [1:8:2055], service: [1:2199047594611:0] actor: [1:5:2052], service: [1:1099535966835:0] actor: [1:2:2049], service: [1:24339059:0] ... waiting for NKikimr::TEvStateStorage::TEvListSchemeBoardResult 2025-06-25T14:54:49.399085Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:795: [1:20:2066] Handle NKikimr::TEvStateStorage::TEvListSchemeBoardResult: sender# [1:10:2057] ... waiting for NKikimr::TEvStateStorage::TEvListSchemeBoardResult (done) 2025-06-25T14:54:49.441327Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:668: [1:20:2066] Handle TEvSchemeShard::TEvDescribeSchemeResult { Status: StatusSuccess Path: "/Root/TestPath" PathDescription { Self { Name: "TestPath" PathId: 100 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1 ParentPathId: 1 PathState: EPathStateNoChanges PathVersion: 1 } } PathId: 100 PathOwnerId: 72057594046678944 }: sender# [1:17:2064], cookie# 12345, event size# 76, preserialized size# 0 2025-06-25T14:54:49.441398Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:685: [1:20:2066] Update description: owner# 72057594046678944, pathId# [OwnerId: 72057594046678944, LocalPathId: 100], cookie# 12345, is deletion# false, version: 1 ... waiting for updates from replica populators 2025-06-25T14:54:49.443694Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:239: [1:21:2067] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 72057594046678944 Generation: 0 }: sender# [1:2:2049] 2025-06-25T14:54:49.443757Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:251: [1:21:2067] Successful handshake: replica# [1:2:2049] 2025-06-25T14:54:49.443807Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:255: [1:21:2067] Start full sync: replica# [1:2:2049] 2025-06-25T14:54:49.443885Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:239: [1:22:2068] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 72057594046678944 Generation: 0 }: sender# [1:5:2052] 2025-06-25T14:54:49.443917Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:251: [1:22:2068] Successful handshake: replica# [1:5:2052] 2025-06-25T14:54:49.443955Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:255: [1:22:2068] Start full sync: replica# [1:5:2052] 2025-06-25T14:54:49.444014Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:239: [1:23:2069] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 72057594046678944 Generation: 0 }: sender# [1:8:2055] 2025-06-25T14:54:49.444034Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:251: [1:23:2069] Successful handshake: replica# [1:8:2055] 2025-06-25T14:54:49.444050Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:255: [1:23:2069] Start full sync: replica# [1:8:2055] 2025-06-25T14:54:49.444115Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:536: [1:20:2066] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestDescribe { PathId: Replica: [1:24339059:0] }: sender# [1:21:2067] 2025-06-25T14:54:49.444219Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:21:2067] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvDescribeResult { Commit: false DeletedPathBegin: 0 DeletedPathEnd: 0 { Path: /Root/TestPath PathId: [OwnerId: 72057594046678944, LocalPathId: 100] PathVersion: 1 } }: sender# [1:20:2066] 2025-06-25T14:54:49.444388Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:630: [1:20:2066] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestUpdate { PathId: [OwnerId: 72057594046678944, LocalPathId: 100] }: sender# [1:21:2067] 2025-06-25T14:54:49.444448Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:21:2067] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 1 }: sender# [1:20:2066], cookie# 0 2025-06-25T14:54:49.444513Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:536: [1:20:2066] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestDescribe { PathId: Replica: [1:1099535966835:0] }: sender# [1:22:2068] 2025-06-25T14:54:49.444598Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:22:2068] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvDescribeResult { Commit: false DeletedPathBegin: 0 DeletedPathEnd: 0 { Path: /Root/TestPath PathId: [OwnerId: 72057594046678944, LocalPathId: 100] PathVersion: 1 } }: sender# [1:20:2066] 2025-06-25T14:54:49.444683Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:630: [1:20:2066] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestUpdate { PathId: [OwnerId: 72057594046678944, LocalPathId: 100] }: sender# [1:22:2068] 2025-06-25T14:54:49.444752Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:287: [1:21:2067] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 1 PathId: [OwnerId: 72057594046678944, LocalPathId: 100] Version: 1 }: sender# [1:2:2049], cookie# 0 2025-06-25T14:54:49.444821Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:22:2068] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 1 }: sender# [1:20:2066], cookie# 0 2025-06-25T14:54:49.444879Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:536: [1:20:2066] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestDescribe { PathId: Replica: [1:2199047594611:0] }: sender# [1:23:2069] 2025-06-25T14:54:49.444916Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:287: [1:22:2068] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 1 PathId: [OwnerId: 72057594046678944, LocalPathId: 100] Version: 1 }: sender# [1:5:2052], cookie# 0 2025-06-25T14:54:49.444970Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:23:2069] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvDescribeResult { Commit: false DeletedPathBegin: 0 DeletedPathEnd: 0 { Path: /Root/TestPath PathId: [OwnerId: 72057594046678944, LocalPathId: 100] PathVersion: 1 } }: sender# [1:20:2066] 2025-06-25T14:54:49.445046Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:630: [1:20:2066] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestUpdate { PathId: [OwnerId: 72057594046678944, LocalPathId: 100] }: sender# [1:23:2069] 2025-06-25T14:54:49.445101Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:23:2069] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 1 }: sender# [1:20:2066], cookie# 0 2025-06-25T14:54:49.445176Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:536: [1:20:2066] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestDescribe { PathId: [OwnerId: 72057594046678944, LocalPathId: 101] Replica: [1:24339059:0] }: sender# [1:21:2067] 2025-06-25T14:54:49.445219Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:21:2067] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvDescribeResult { Commit: true DeletedPathBegin: 0 DeletedPathEnd: 0 }: sender# [1:20:2066] 2025-06-25T14:54:49.445258Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:287: [1:23:2069] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 1 PathId: [OwnerId: 72057594046678944, LocalPathId: 100] Version: 1 }: sender# [1:8:2055], cookie# 0 2025-06-25T14:54:49.445354Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:536: [1:20:2066] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestDescribe { PathId: [OwnerId: 72057594046678944, LocalPathId: 101] Replica: [1:1099535966835:0] }: sender# [1:22:2068] 2025-06-25T14:54:49.445397Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:305: [1:21:2067] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 72057594046678944 Generation: 1 }: sender# [1:2:2049] 2025-06-25T14:54:49.445437Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:22:2068] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvDescribeResult { Commit: true DeletedPathBegin: 0 DeletedPathEnd: 0 }: sender# [1:20:2066] 2025-06-25T14:54:49.445475Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:305: [1:22:2068] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 72057594046678944 Generation: 1 }: sender# [1:5:2052] 2025-06-25T14:54:49.445513Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:20:2066] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 1 PathId: [OwnerId: 72057594046678944, LocalPathId: 100] Version: 1 }: sender# [1:21:2067], cookie# 0 2025-06-25T14:54:49.445544Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:756: [1:20:2066] Ack for unknown update (already acked?): sender# [1:21:2067], cookie# 0 ... blocking NKikimr::NSchemeBoard::NSchemeshardEvents::TEvUpdateAck from SCHEME_BOARD_REPLICA_POPULATOR_ACTOR to SCHEME_BOARD_POPULATOR_ACTOR cookie 12345 2025-06-25T14:54:49.445638Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:20:2066] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 1 PathId: [OwnerId: 72057594046678944, LocalPathId: 100] Version: 1 }: sender# [1:22:2068], cookie# 0 2025-06-25T14:54:49.445663Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:756: [1:20:2066] Ack for unknown update (already acked?): sender# [1:22:2068], cookie# 0 ... blocking NKikimr::NSchemeBoard::NSchemeshardEvents::TEvUpdateAck from SCHEME_BOARD_REPLICA_POPULATOR_ACTOR to SCHEME_BOARD_POPULATOR_ACTOR cookie 12345 2025-06-25T14:54:49.445712Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:536: [1:20:2066] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestDescribe { PathId: [OwnerId: 72057594046678944, LocalPathId: 101] Replica: [1:2199047594611:0] }: sender# [1:23:2069] 2025-06-25T14:54:49.445750Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:23:2069] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvDescribeResult { Commit: true DeletedPathBegin: 0 DeletedPathEnd: 0 }: sender# [1:20:2066] 2025-06-25T14:54:49.445807Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:20:2066] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 1 PathId: [OwnerId: 72057594046678944, LocalPathId: 100] Version: 1 }: sender# [1:23:2069], cookie# 0 2025-06-25T14:54:49.445827Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:756: [1:20:2066] Ack for unknown update (already acked?): sender# [1:23:2069], cookie# 0 2025-06-25T14:54:49.445850Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:305: [1:23:2069] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 72057594046678944 Generation: 1 }: sender# [1:8:2055] ... blocking NKikimr::NSchemeBoard::NSchemeshardEvents::TEvUpdateAck from SCHEME_BOARD_REPLICA_POPULATOR_ACTOR to SCHEME_BOARD_POPULATOR_ACTOR cookie 12345 ... waiting for updates from replica populators (done) populatorToReplicaMap: populator: [1:22:2068], replica: [1:1099535966835:0] populator: [1:23:2069], replica: [1:2199047594611:0] populator: [1:21:2067], replica: [1:24339059:0] 2025-06-25T14:54:49.445980Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:20:2066] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 1 PathId: [OwnerId: 72057594046678944, LocalPathId: 100] Version: 1 }: sender# [1:21:2067], cookie# 12345 2025-06-25T14:56:59.799074Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:20:2066] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 1 PathId: [OwnerId: 72057594046678944, LocalPathId: 100] Version: 1 }: sender# [1:22:2068], cookie# 12345 2025-06-25T14:56:59.799161Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:774: [1:20:2066] Ack update: ack to# [1:17:2064], cookie# 12345, pathId# [OwnerId: 72057594046678944, LocalPathId: 100], version# 1 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_populator/unittest >> TPopulatorQuorumTest::OneWriteOnlyRingGroup [GOOD] Test command err: replicas: [1:24339059:0], [1:1099535966835:0], [1:2199047594611:0], [1:3298559222387:0], [1:4398070850163:0], [1:5497582477939:0] replicaActorToServiceMap: actor: [1:8:2055], service: [1:2199047594611:0] actor: [1:5:2052], service: [1:1099535966835:0] actor: [1:17:2064], service: [1:5497582477939:0] actor: [1:2:2049], service: [1:24339059:0] actor: [1:14:2061], service: [1:4398070850163:0] actor: [1:11:2058], service: [1:3298559222387:0] ... waiting for NKikimr::TEvStateStorage::TEvListSchemeBoardResult 2025-06-25T14:54:49.399471Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:795: [1:29:2075] Handle NKikimr::TEvStateStorage::TEvListSchemeBoardResult: sender# [1:19:2066] ... waiting for NKikimr::TEvStateStorage::TEvListSchemeBoardResult (done) 2025-06-25T14:54:49.448162Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:668: [1:29:2075] Handle TEvSchemeShard::TEvDescribeSchemeResult { Status: StatusSuccess Path: "/Root/TestPath" PathDescription { Self { Name: "TestPath" PathId: 100 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1 ParentPathId: 1 PathState: EPathStateNoChanges PathVersion: 1 } } PathId: 100 PathOwnerId: 72057594046678944 }: sender# [1:26:2073], cookie# 12345, event size# 76, preserialized size# 0 2025-06-25T14:54:49.448255Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:685: [1:29:2075] Update description: owner# 72057594046678944, pathId# [OwnerId: 72057594046678944, LocalPathId: 100], cookie# 12345, is deletion# false, version: 1 ... waiting for updates from replica populators 2025-06-25T14:54:49.450053Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:239: [1:30:2076] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 72057594046678944 Generation: 0 }: sender# [1:2:2049] 2025-06-25T14:54:49.450105Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:251: [1:30:2076] Successful handshake: replica# [1:2:2049] 2025-06-25T14:54:49.450132Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:255: [1:30:2076] Start full sync: replica# [1:2:2049] 2025-06-25T14:54:49.450220Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:239: [1:31:2077] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 72057594046678944 Generation: 0 }: sender# [1:5:2052] 2025-06-25T14:54:49.450255Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:251: [1:31:2077] Successful handshake: replica# [1:5:2052] 2025-06-25T14:54:49.450291Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:255: [1:31:2077] Start full sync: replica# [1:5:2052] 2025-06-25T14:54:49.450329Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:239: [1:32:2078] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 72057594046678944 Generation: 0 }: sender# [1:8:2055] 2025-06-25T14:54:49.450348Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:251: [1:32:2078] Successful handshake: replica# [1:8:2055] 2025-06-25T14:54:49.450363Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:255: [1:32:2078] Start full sync: replica# [1:8:2055] 2025-06-25T14:54:49.450389Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:239: [1:33:2079] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 72057594046678944 Generation: 0 }: sender# [1:11:2058] 2025-06-25T14:54:49.450419Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:251: [1:33:2079] Successful handshake: replica# [1:11:2058] 2025-06-25T14:54:49.450440Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:255: [1:33:2079] Start full sync: replica# [1:11:2058] 2025-06-25T14:54:49.450481Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:239: [1:34:2080] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 72057594046678944 Generation: 0 }: sender# [1:14:2061] 2025-06-25T14:54:49.450503Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:251: [1:34:2080] Successful handshake: replica# [1:14:2061] 2025-06-25T14:54:49.450540Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:255: [1:34:2080] Start full sync: replica# [1:14:2061] 2025-06-25T14:54:49.450577Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:239: [1:35:2081] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 72057594046678944 Generation: 0 }: sender# [1:17:2064] 2025-06-25T14:54:49.450596Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:251: [1:35:2081] Successful handshake: replica# [1:17:2064] 2025-06-25T14:54:49.450613Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:255: [1:35:2081] Start full sync: replica# [1:17:2064] 2025-06-25T14:54:49.450683Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:536: [1:29:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestDescribe { PathId: Replica: [1:24339059:0] }: sender# [1:30:2076] 2025-06-25T14:54:49.450761Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:30:2076] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvDescribeResult { Commit: false DeletedPathBegin: 0 DeletedPathEnd: 0 { Path: /Root/TestPath PathId: [OwnerId: 72057594046678944, LocalPathId: 100] PathVersion: 1 } }: sender# [1:29:2075] 2025-06-25T14:54:49.450900Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:630: [1:29:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestUpdate { PathId: [OwnerId: 72057594046678944, LocalPathId: 100] }: sender# [1:30:2076] 2025-06-25T14:54:49.450974Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:30:2076] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 1 }: sender# [1:29:2075], cookie# 0 2025-06-25T14:54:49.451056Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:287: [1:30:2076] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 1 PathId: [OwnerId: 72057594046678944, LocalPathId: 100] Version: 1 }: sender# [1:2:2049], cookie# 0 2025-06-25T14:54:49.451128Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:536: [1:29:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestDescribe { PathId: Replica: [1:1099535966835:0] }: sender# [1:31:2077] 2025-06-25T14:54:49.451196Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:630: [1:29:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestUpdate { PathId: [OwnerId: 72057594046678944, LocalPathId: 100] }: sender# [1:31:2077] 2025-06-25T14:54:49.451244Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:31:2077] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvDescribeResult { Commit: false DeletedPathBegin: 0 DeletedPathEnd: 0 { Path: /Root/TestPath PathId: [OwnerId: 72057594046678944, LocalPathId: 100] PathVersion: 1 } }: sender# [1:29:2075] 2025-06-25T14:54:49.451316Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:536: [1:29:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestDescribe { PathId: Replica: [1:2199047594611:0] }: sender# [1:32:2078] 2025-06-25T14:54:49.451354Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:31:2077] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 1 }: sender# [1:29:2075], cookie# 0 2025-06-25T14:54:49.451398Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:32:2078] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvDescribeResult { Commit: false DeletedPathBegin: 0 DeletedPathEnd: 0 { Path: /Root/TestPath PathId: [OwnerId: 72057594046678944, LocalPathId: 100] PathVersion: 1 } }: sender# [1:29:2075] 2025-06-25T14:54:49.451458Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:287: [1:31:2077] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 1 PathId: [OwnerId: 72057594046678944, LocalPathId: 100] Version: 1 }: sender# [1:5:2052], cookie# 0 2025-06-25T14:54:49.451518Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:630: [1:29:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestUpdate { PathId: [OwnerId: 72057594046678944, LocalPathId: 100] }: sender# [1:32:2078] 2025-06-25T14:54:49.451568Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:536: [1:29:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestDescribe { PathId: Replica: [1:3298559222387:0] }: sender# [1:33:2079] 2025-06-25T14:54:49.451608Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:32:2078] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 1 }: sender# [1:29:2075], cookie# 0 2025-06-25T14:54:49.451663Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:33:2079] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvDescribeResult { Commit: false DeletedPathBegin: 0 DeletedPathEnd: 0 { Path: /Root/TestPath PathId: [OwnerId: 72057594046678944, LocalPathId: 100] PathVersion: 1 } }: sender# [1:29:2075] 2025-06-25T14:54:49.451740Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:630: [1:29:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestUpdate { PathId: [OwnerId: 72057594046678944, LocalPathId: 100] }: sender# [1:33:2079] 2025-06-25T14:54:49.451778Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:287: [1:32:2078] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 1 PathId: [OwnerId: 72057594046678944, LocalPathId: 100] Version: 1 }: sender# [1:8:2055], cookie# 0 2025-06-25T14:54:49.451818Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:33:2079] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 1 }: sender# [1:29:2075], cookie# 0 2025-06-25T14:54:49.451860Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:287: [1:33:2079] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 1 PathId: [OwnerId: 72057594046678944, LocalPathId: 100] Version: 1 }: sender# [1:11:2058], cookie# 0 2025-06-25T14:54:49.451935Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:536: [1:29:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestDescribe { PathId: Replica: [1:4398070850163:0] }: sender# [1:34:2080] 2025-06-25T14:54:49.451982Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:34:2080] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvDescribeResult { Commit: false DeletedPathBegin: 0 DeletedPathEnd: 0 { Path: /Root/TestPath PathId: [OwnerId: 72057594046678944, LocalPathId: 100] PathVersion: 1 } }: sender# [1:29:2075] 2025-06-25T14:54:49.452029Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:630: [1:29:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestUpdate { PathId: [OwnerId: 72057594046678944, LocalPathId: 100] }: sender# [1:34:2080] 2025-06-25T14:54:49.452067Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:34:2080] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 1 }: sender# [1:29:2075], cookie# 0 2025-06-25T14:54:49.452131Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:536: [1:29:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestDescribe { PathId: Replica: [1:5497582477939:0] }: sender# [1:35:2081] 2025-06-25T14:54:49.452187Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:630: [1:29:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestUpdate { PathId: [OwnerId: 72057594046678944, LocalPathId: 100] }: sender# [1:35:2081] 2025-06-25T14:54:49.452218Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:287: [1:34:2080] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 1 PathId: [OwnerId: 72057594046678944, LocalPathId: 100] Version: 1 }: sender# [1:14:2061], cookie# 0 2025-06-25T14:54:49.452260Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:35:2081] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvDescribeResult { Commit: false DeletedPathBegin: 0 DeletedPathEnd: 0 { Path: /Root/TestPath PathId: [OwnerId: 72057594046678944, LocalPathId: 100] PathVersion: 1 } }: sender# [1:29:2075] 2025-06-25T14:54:49.452325Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:35:2081] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 1 }: sender# [1:29:2075], cookie# 0 2025-06-25T14:54:49.452390Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:536: [1:29:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestDescribe { PathId: [OwnerId: 72057594046678944, LocalPathId: 101] Replica: [1:24339059:0] }: sender# [1:30:2076] 2025-06-25T14:54:49.452435Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:30:2076] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvDescribeResult { Commit: true DeletedPathBegin: 0 DeletedPathEnd: 0 }: sender# [1:29:2075] 2025-06-25T14:54:49.452520Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:305: [1:30:2076] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 72057594046678944 Generation: 1 }: sender# [1:2:2049] 2025-06-25T14:54:49.452583Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:287: [1:35:2081] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 1 PathId: [OwnerId: 72057594046678944, LocalPathId: 100] Version: 1 }: sender# [1:17:2064], cookie# 0 2025-06-25T14:54:49.452634Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:29:2075] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 1 PathId: [OwnerId: 72057594046678944, LocalPathId: 100] Version: 1 }: sender# [1:30:2076], cookie# 0 2025-06-25T14:54:49.452674Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:756: [1:29:2075] Ack for unknown update (already acked?): sender# [1:30:2076], cookie# 0 ... blocking NKikimr::NSchemeBoard::NSchemeshardEvents::TEvUpdateAck from SCHEME_BOARD_REPLICA_POPULATOR_ACTOR to SCHEME_BOARD_POPULATOR_ACTOR cookie 12345 2025-06-25T14:54:49.452761Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:536: [1:29:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestDescribe { PathId: [OwnerId: 72057594046678944, LocalPathId: 101] Replica: [1:1099535966835:0] }: sender# [1:31:2077] 2025-06-25T14:54:49.452797Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:31:2077] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvDescribeResult { Commit: true DeletedPathBegin: 0 DeletedPathEnd: 0 }: sender# [1:29:2075] 2025-06-25T14:54:49.452839Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:305: [1:31:2077] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 72057594046678944 Generation: 1 }: sender# [1:5:2052] 2025-06-25T14:54:49.452873Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:536: [1:29:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestDescribe { PathId: [OwnerId: 72057594046678944, LocalPathId: 101] Replica: [1:2199047594611:0] }: sender# [1:32:2078] 2025-06-25T14:54:49.452907Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:32:2078] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvDescribeResult { Commit: true DeletedPathBegin: 0 DeletedPathEnd: 0 }: sender# [1:29:2075] 2025-06-25T14:54:49.452951Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:29:2075] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 1 PathId: [OwnerId: 72057594046678944, LocalPathId: 100] Version: 1 }: sender# [1:31:2077], cookie# 0 2025-06-25T14:54:49.452974Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:756: [1:29:2075] Ack for unknown update (already acked?): sender# [1:31:2077], cookie# 0 2025-06-25T14:54:49.453013Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:305: [1:32:2078] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 72057594046678944 Generation: 1 }: sender# [1:8:2055] ... blocking NKikimr::NSchemeBoard::NSchemeshardEvents::TEvUpdateAck from SCHEME_BOARD_REPLICA_POPULATOR_ACTOR to SCHEME_BOARD_POPULATOR_ACTOR cookie 12345 2025-06-25T14:54:49.453104Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:536: [1:29:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestDescribe { PathId: [OwnerId: 72057594046678944, LocalPathId: 101] Replica: [1:3298559222387:0] }: sender# [1:33:2079] 2025-06-25T14:54:49.453149Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:33:2079] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvDescribeResult { Commit: true DeletedPathBegin: 0 DeletedPathEnd: 0 }: sender# [1:29:2075] 2025-06-25T14:54:49.453199Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:29:2075] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 1 PathId: [OwnerId: 72057594046678944, LocalPathId: 100] Version: 1 }: sender# [1:32:2078], cookie# 0 2025-06-25T14:54:49.453217Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:756: [1:29:2075] Ack for unknown update (already acked?): sender# [1:32:2078], cookie# 0 2025-06-25T14:54:49.453239Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:305: [1:33:2079] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 72057594046678944 Generation: 1 }: sender# [1:11:2058] ... blocking NKikimr::NSchemeBoard::NSchemeshardEvents::TEvUpdateAck from SCHEME_BOARD_REPLICA_POPULATOR_ACTOR to SCHEME_BOARD_POPULATOR_ACTOR cookie 12345 2025-06-25T14:54:49.453289Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:29:2075] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 1 PathId: [OwnerId: 72057594046678944, LocalPathId: 100] Version: 1 }: sender# [1:33:2079], cookie# 0 2025-06-25T14:54:49.453307Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:756: [1:29:2075] Ack for unknown update (already acked?): sender# [1:33:2079], cookie# 0 ... blocking NKikimr::NSchemeBoard::NSchemeshardEvents::TEvUpdateAck from SCHEME_BOARD_REPLICA_POPULATOR_ACTOR to SCHEME_BOARD_POPULATOR_ACTOR cookie 12345 2025-06-25T14:54:49.453355Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:536: [1:29:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestDescribe { PathId: [OwnerId: 72057594046678944, LocalPathId: 101] Replica: [1:4398070850163:0] }: sender# [1:34:2080] 2025-06-25T14:54:49.453400Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:34:2080] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvDescribeResult { Commit: true DeletedPathBegin: 0 DeletedPathEnd: 0 }: sender# [1:29:2075] 2025-06-25T14:54:49.453435Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:305: [1:34:2080] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 72057594046678944 Generation: 1 }: sender# [1:14:2061] 2025-06-25T14:54:49.453476Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:29:2075] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 1 PathId: [OwnerId: 72057594046678944, LocalPathId: 100] Version: 1 }: sender# [1:34:2080], cookie# 0 2025-06-25T14:54:49.453504Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:756: [1:29:2075] Ack for unknown update (already acked?): sender# [1:34:2080], cookie# 0 ... blocking NKikimr::NSchemeBoard::NSchemeshardEvents::TEvUpdateAck from SCHEME_BOARD_REPLICA_POPULATOR_ACTOR to SCHEME_BOARD_POPULATOR_ACTOR cookie 12345 2025-06-25T14:54:49.453562Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:536: [1:29:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestDescribe { PathId: [OwnerId: 72057594046678944, LocalPathId: 101] Replica: [1:5497582477939:0] }: sender# [1:35:2081] 2025-06-25T14:54:49.453609Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:35:2081] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvDescribeResult { Commit: true DeletedPathBegin: 0 DeletedPathEnd: 0 }: sender# [1:29:2075] 2025-06-25T14:54:49.453653Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:29:2075] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 1 PathId: [OwnerId: 72057594046678944, LocalPathId: 100] Version: 1 }: sender# [1:35:2081], cookie# 0 2025-06-25T14:54:49.453674Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:756: [1:29:2075] Ack for unknown update (already acked?): sender# [1:35:2081], cookie# 0 2025-06-25T14:54:49.453710Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:305: [1:35:2081] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 72057594046678944 Generation: 1 }: sender# [1:17:2064] ... blocking NKikimr::NSchemeBoard::NSchemeshardEvents::TEvUpdateAck from SCHEME_BOARD_REPLICA_POPULATOR_ACTOR to SCHEME_BOARD_POPULATOR_ACTOR cookie 12345 ... waiting for updates from replica populators (done) populatorToReplicaMap: populator: [1:33:2079], replica: [1:3298559222387:0] populator: [1:30:2076], replica: [1:24339059:0] populator: [1:34:2080], replica: [1:4398070850163:0] populator: [1:31:2077], replica: [1:1099535966835:0] populator: [1:35:2081], replica: [1:5497582477939:0] populator: [1:32:2078], replica: [1:2199047594611:0] 2025-06-25T14:54:49.453895Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:29:2075] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 1 PathId: [OwnerId: 72057594046678944, LocalPathId: 100] Version: 1 }: sender# [1:30:2076], cookie# 12345 2025-06-25T14:56:59.819638Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:29:2075] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 1 PathId: [OwnerId: 72057594046678944, LocalPathId: 100] Version: 1 }: sender# [1:31:2077], cookie# 12345 2025-06-25T14:56:59.819737Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:774: [1:29:2075] Ack update: ack to# [1:26:2073], cookie# 12345, pathId# [OwnerId: 72057594046678944, LocalPathId: 100], version# 1 >> TestYmqHttpProxy::TestListQueueTags >> TPopulatorQuorumTest::TwoRingGroups [GOOD] >> KqpNamedExpressions::NamedExpressionRandomSelect+UseSink [GOOD] >> KqpNamedExpressions::NamedExpressionRandomSelect-UseSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_populator/unittest >> TPopulatorQuorumTest::OneDisconnectedRingGroup [GOOD] Test command err: replicas: [1:24339059:0], [1:1099535966835:0], [1:2199047594611:0], [1:3298559222387:0], [1:4398070850163:0], [1:5497582477939:0] replicaActorToServiceMap: actor: [1:8:2055], service: [1:2199047594611:0] actor: [1:5:2052], service: [1:1099535966835:0] actor: [1:17:2064], service: [1:5497582477939:0] actor: [1:2:2049], service: [1:24339059:0] actor: [1:14:2061], service: [1:4398070850163:0] actor: [1:11:2058], service: [1:3298559222387:0] ... waiting for NKikimr::TEvStateStorage::TEvListSchemeBoardResult 2025-06-25T14:54:49.588688Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:795: [1:29:2075] Handle NKikimr::TEvStateStorage::TEvListSchemeBoardResult: sender# [1:19:2066] ... waiting for NKikimr::TEvStateStorage::TEvListSchemeBoardResult (done) 2025-06-25T14:54:49.618589Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:668: [1:29:2075] Handle TEvSchemeShard::TEvDescribeSchemeResult { Status: StatusSuccess Path: "/Root/TestPath" PathDescription { Self { Name: "TestPath" PathId: 100 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1 ParentPathId: 1 PathState: EPathStateNoChanges PathVersion: 1 } } PathId: 100 PathOwnerId: 72057594046678944 }: sender# [1:26:2073], cookie# 12345, event size# 76, preserialized size# 0 2025-06-25T14:54:49.618667Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:685: [1:29:2075] Update description: owner# 72057594046678944, pathId# [OwnerId: 72057594046678944, LocalPathId: 100], cookie# 12345, is deletion# false, version: 1 ... waiting for updates from replica populators 2025-06-25T14:54:49.620971Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:239: [1:30:2076] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 72057594046678944 Generation: 0 }: sender# [1:2:2049] 2025-06-25T14:54:49.621029Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:251: [1:30:2076] Successful handshake: replica# [1:2:2049] 2025-06-25T14:54:49.621071Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:255: [1:30:2076] Start full sync: replica# [1:2:2049] 2025-06-25T14:54:49.621150Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:239: [1:31:2077] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 72057594046678944 Generation: 0 }: sender# [1:5:2052] 2025-06-25T14:54:49.621184Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:251: [1:31:2077] Successful handshake: replica# [1:5:2052] 2025-06-25T14:54:49.621218Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:255: [1:31:2077] Start full sync: replica# [1:5:2052] 2025-06-25T14:54:49.621254Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:239: [1:32:2078] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 72057594046678944 Generation: 0 }: sender# [1:8:2055] 2025-06-25T14:54:49.621272Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:251: [1:32:2078] Successful handshake: replica# [1:8:2055] 2025-06-25T14:54:49.621287Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:255: [1:32:2078] Start full sync: replica# [1:8:2055] 2025-06-25T14:54:49.621314Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:239: [1:33:2079] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 72057594046678944 Generation: 0 }: sender# [1:11:2058] 2025-06-25T14:54:49.621342Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:251: [1:33:2079] Successful handshake: replica# [1:11:2058] 2025-06-25T14:54:49.621361Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:255: [1:33:2079] Start full sync: replica# [1:11:2058] 2025-06-25T14:54:49.621407Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:239: [1:34:2080] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 72057594046678944 Generation: 0 }: sender# [1:14:2061] 2025-06-25T14:54:49.621433Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:251: [1:34:2080] Successful handshake: replica# [1:14:2061] 2025-06-25T14:54:49.621464Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:255: [1:34:2080] Start full sync: replica# [1:14:2061] 2025-06-25T14:54:49.621498Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:239: [1:35:2081] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 72057594046678944 Generation: 0 }: sender# [1:17:2064] 2025-06-25T14:54:49.621516Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:251: [1:35:2081] Successful handshake: replica# [1:17:2064] 2025-06-25T14:54:49.621532Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:255: [1:35:2081] Start full sync: replica# [1:17:2064] 2025-06-25T14:54:49.621610Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:536: [1:29:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestDescribe { PathId: Replica: [1:24339059:0] }: sender# [1:30:2076] 2025-06-25T14:54:49.621688Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:30:2076] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvDescribeResult { Commit: false DeletedPathBegin: 0 DeletedPathEnd: 0 { Path: /Root/TestPath PathId: [OwnerId: 72057594046678944, LocalPathId: 100] PathVersion: 1 } }: sender# [1:29:2075] 2025-06-25T14:54:49.621857Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:630: [1:29:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestUpdate { PathId: [OwnerId: 72057594046678944, LocalPathId: 100] }: sender# [1:30:2076] 2025-06-25T14:54:49.621926Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:30:2076] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 1 }: sender# [1:29:2075], cookie# 0 2025-06-25T14:54:49.622010Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:287: [1:30:2076] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 1 PathId: [OwnerId: 72057594046678944, LocalPathId: 100] Version: 1 }: sender# [1:2:2049], cookie# 0 2025-06-25T14:54:49.622083Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:536: [1:29:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestDescribe { PathId: Replica: [1:1099535966835:0] }: sender# [1:31:2077] 2025-06-25T14:54:49.622128Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:630: [1:29:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestUpdate { PathId: [OwnerId: 72057594046678944, LocalPathId: 100] }: sender# [1:31:2077] 2025-06-25T14:54:49.622170Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:31:2077] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvDescribeResult { Commit: false DeletedPathBegin: 0 DeletedPathEnd: 0 { Path: /Root/TestPath PathId: [OwnerId: 72057594046678944, LocalPathId: 100] PathVersion: 1 } }: sender# [1:29:2075] 2025-06-25T14:54:49.622435Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:536: [1:29:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestDescribe { PathId: Replica: [1:2199047594611:0] }: sender# [1:32:2078] 2025-06-25T14:54:49.622475Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:31:2077] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 1 }: sender# [1:29:2075], cookie# 0 2025-06-25T14:54:49.622520Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:32:2078] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvDescribeResult { Commit: false DeletedPathBegin: 0 DeletedPathEnd: 0 { Path: /Root/TestPath PathId: [OwnerId: 72057594046678944, LocalPathId: 100] PathVersion: 1 } }: sender# [1:29:2075] 2025-06-25T14:54:49.622581Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:287: [1:31:2077] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 1 PathId: [OwnerId: 72057594046678944, LocalPathId: 100] Version: 1 }: sender# [1:5:2052], cookie# 0 2025-06-25T14:54:49.622649Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:630: [1:29:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestUpdate { PathId: [OwnerId: 72057594046678944, LocalPathId: 100] }: sender# [1:32:2078] 2025-06-25T14:54:49.622709Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:536: [1:29:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestDescribe { PathId: Replica: [1:3298559222387:0] }: sender# [1:33:2079] 2025-06-25T14:54:49.622766Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:32:2078] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 1 }: sender# [1:29:2075], cookie# 0 2025-06-25T14:54:49.622831Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:33:2079] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvDescribeResult { Commit: false DeletedPathBegin: 0 DeletedPathEnd: 0 { Path: /Root/TestPath PathId: [OwnerId: 72057594046678944, LocalPathId: 100] PathVersion: 1 } }: sender# [1:29:2075] 2025-06-25T14:54:49.622898Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:630: [1:29:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestUpdate { PathId: [OwnerId: 72057594046678944, LocalPathId: 100] }: sender# [1:33:2079] 2025-06-25T14:54:49.622936Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:287: [1:32:2078] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 1 PathId: [OwnerId: 72057594046678944, LocalPathId: 100] Version: 1 }: sender# [1:8:2055], cookie# 0 2025-06-25T14:54:49.622977Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:33:2079] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 1 }: sender# [1:29:2075], cookie# 0 2025-06-25T14:54:49.623022Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:287: [1:33:2079] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 1 PathId: [OwnerId: 72057594046678944, LocalPathId: 100] Version: 1 }: sender# [1:11:2058], cookie# 0 2025-06-25T14:54:49.623085Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:536: [1:29:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestDescribe { PathId: Replica: [1:4398070850163:0] }: sender# [1:34:2080] 2025-06-25T14:54:49.623132Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:34:2080] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvDescribeResult { Commit: false DeletedPathBegin: 0 DeletedPathEnd: 0 { Path: /Root/TestPath PathId: [OwnerId: 72057594046678944, LocalPathId: 100] PathVersion: 1 } }: sender# [1:29:2075] 2025-06-25T14:54:49.623278Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:630: [1:29:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestUpdate { PathId: [OwnerId: 72057594046678944, LocalPathId: 100] }: sender# [1:34:2080] 2025-06-25T14:54:49.623329Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:34:2080] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 1 }: sender# [1:29:2075], cookie# 0 2025-06-25T14:54:49.623394Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:536: [1:29:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestDescribe { PathId: Replica: [1:5497582477939:0] }: sender# [1:35:2081] 2025-06-25T14:54:49.623428Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:630: [1:29:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestUpdate { PathId: [OwnerId: 72057594046678944, LocalPathId: 100] }: sender# [1:35:2081] 2025-06-25T14:54:49.623449Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:287: [1:34:2080] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 1 PathId: [OwnerId: 72057594046678944, LocalPathId: 100] Version: 1 }: sender# [1:14:2061], cookie# 0 2025-06-25T14:54:49.623474Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:35:2081] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvDescribeResult { Commit: false DeletedPathBegin: 0 DeletedPathEnd: 0 { Path: /Root/TestPath PathId: [OwnerId: 72057594046678944, LocalPathId: 100] PathVersion: 1 } }: sender# [1:29:2075] 2025-06-25T14:54:49.623510Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:35:2081] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 1 }: sender# [1:29:2075], cookie# 0 2025-06-25T14:54:49.623563Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:536: [1:29:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestDescribe { PathId: [OwnerId: 72057594046678944, LocalPathId: 101] Replica: [1:24339059:0] }: sender# [1:30:2076] 2025-06-25T14:54:49.623591Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:30:2076] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvDescribeResult { Commit: true DeletedPathBegin: 0 DeletedPathEnd: 0 }: sender# [1:29:2075] 2025-06-25T14:54:49.623640Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:305: [1:30:2076] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 72057594046678944 Generation: 1 }: sender# [1:2:2049] 2025-06-25T14:54:49.623718Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:287: [1:35:2081] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 1 PathId: [OwnerId: 72057594046678944, LocalPathId: 100] Version: 1 }: sender# [1:17:2064], cookie# 0 2025-06-25T14:54:49.623766Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:29:2075] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 1 PathId: [OwnerId: 72057594046678944, LocalPathId: 100] Version: 1 }: sender# [1:30:2076], cookie# 0 2025-06-25T14:54:49.623806Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:756: [1:29:2075] Ack for unknown update (already acked?): sender# [1:30:2076], cookie# 0 ... blocking NKikimr::NSchemeBoard::NSchemeshardEvents::TEvUpdateAck from SCHEME_BOARD_REPLICA_POPULATOR_ACTOR to SCHEME_BOARD_POPULATOR_ACTOR cookie 12345 2025-06-25T14:54:49.623877Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:536: [1:29:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestDescribe { PathId: [OwnerId: 72057594046678944, LocalPathId: 101] Replica: [1:1099535966835:0] }: sender# [1:31:2077] 2025-06-25T14:54:49.623911Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:31:2077] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvDescribeResult { Commit: true DeletedPathBegin: 0 DeletedPathEnd: 0 }: sender# [1:29:2075] 2025-06-25T14:54:49.623950Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:305: [1:31:2077] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 72057594046678944 Generation: 1 }: sender# [1:5:2052] 2025-06-25T14:54:49.623982Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:536: [1:29:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestDescribe { PathId: [OwnerId: 72057594046678944, LocalPathId: 101] Replica: [1:2199047594611:0] }: sender# [1:32:2078] 2025-06-25T14:54:49.624014Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:32:2078] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvDescribeResult { Commit: true DeletedPathBegin: 0 DeletedPathEnd: 0 }: sender# [1:29:2075] 2025-06-25T14:54:49.624048Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:29:2075] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 1 PathId: [OwnerId: 72057594046678944, LocalPathId: 100] Version: 1 }: sender# [1:31:2077], cookie# 0 2025-06-25T14:54:49.624061Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:756: [1:29:2075] Ack for unknown update (already acked?): sender# [1:31:2077], cookie# 0 2025-06-25T14:54:49.624084Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:305: [1:32:2078] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 72057594046678944 Generation: 1 }: sender# [1:8:2055] ... blocking NKikimr::NSchemeBoard::NSchemeshardEvents::TEvUpdateAck from SCHEME_BOARD_REPLICA_POPULATOR_ACTOR to SCHEME_BOARD_POPULATOR_ACTOR cookie 12345 2025-06-25T14:54:49.624133Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:536: [1:29:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestDescribe { PathId: [OwnerId: 72057594046678944, LocalPathId: 101] Replica: [1:3298559222387:0] }: sender# [1:33:2079] 2025-06-25T14:54:49.624167Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:33:2079] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvDescribeResult { Commit: true DeletedPathBegin: 0 DeletedPathEnd: 0 }: sender# [1:29:2075] 2025-06-25T14:54:49.624194Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:29:2075] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 1 PathId: [OwnerId: 72057594046678944, LocalPathId: 100] Version: 1 }: sender# [1:32:2078], cookie# 0 2025-06-25T14:54:49.624206Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:756: [1:29:2075] Ack for unknown update (already acked?): sender# [1:32:2078], cookie# 0 2025-06-25T14:54:49.624218Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:305: [1:33:2079] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 72057594046678944 Generation: 1 }: sender# [1:11:2058] ... blocking NKikimr::NSchemeBoard::NSchemeshardEvents::TEvUpdateAck from SCHEME_BOARD_REPLICA_POPULATOR_ACTOR to SCHEME_BOARD_POPULATOR_ACTOR cookie 12345 2025-06-25T14:54:49.624252Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:29:2075] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 1 PathId: [OwnerId: 72057594046678944, LocalPathId: 100] Version: 1 }: sender# [1:33:2079], cookie# 0 2025-06-25T14:54:49.624268Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:756: [1:29:2075] Ack for unknown update (already acked?): sender# [1:33:2079], cookie# 0 ... blocking NKikimr::NSchemeBoard::NSchemeshardEvents::TEvUpdateAck from SCHEME_BOARD_REPLICA_POPULATOR_ACTOR to SCHEME_BOARD_POPULATOR_ACTOR cookie 12345 2025-06-25T14:54:49.624326Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:536: [1:29:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestDescribe { PathId: [OwnerId: 72057594046678944, LocalPathId: 101] Replica: [1:4398070850163:0] }: sender# [1:34:2080] 2025-06-25T14:54:49.624377Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:34:2080] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvDescribeResult { Commit: true DeletedPathBegin: 0 DeletedPathEnd: 0 }: sender# [1:29:2075] 2025-06-25T14:54:49.624414Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:305: [1:34:2080] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 72057594046678944 Generation: 1 }: sender# [1:14:2061] 2025-06-25T14:54:49.624457Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:29:2075] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 1 PathId: [OwnerId: 72057594046678944, LocalPathId: 100] Version: 1 }: sender# [1:34:2080], cookie# 0 2025-06-25T14:54:49.624492Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:756: [1:29:2075] Ack for unknown update (already acked?): sender# [1:34:2080], cookie# 0 ... blocking NKikimr::NSchemeBoard::NSchemeshardEvents::TEvUpdateAck from SCHEME_BOARD_REPLICA_POPULATOR_ACTOR to SCHEME_BOARD_POPULATOR_ACTOR cookie 12345 2025-06-25T14:54:49.624545Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:536: [1:29:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestDescribe { PathId: [OwnerId: 72057594046678944, LocalPathId: 101] Replica: [1:5497582477939:0] }: sender# [1:35:2081] 2025-06-25T14:54:49.624591Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:35:2081] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvDescribeResult { Commit: true DeletedPathBegin: 0 DeletedPathEnd: 0 }: sender# [1:29:2075] 2025-06-25T14:54:49.624643Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:29:2075] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 1 PathId: [OwnerId: 72057594046678944, LocalPathId: 100] Version: 1 }: sender# [1:35:2081], cookie# 0 2025-06-25T14:54:49.624662Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:756: [1:29:2075] Ack for unknown update (already acked?): sender# [1:35:2081], cookie# 0 2025-06-25T14:54:49.624697Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:305: [1:35:2081] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 72057594046678944 Generation: 1 }: sender# [1:17:2064] ... blocking NKikimr::NSchemeBoard::NSchemeshardEvents::TEvUpdateAck from SCHEME_BOARD_REPLICA_POPULATOR_ACTOR to SCHEME_BOARD_POPULATOR_ACTOR cookie 12345 ... waiting for updates from replica populators (done) populatorToReplicaMap: populator: [1:33:2079], replica: [1:3298559222387:0] populator: [1:30:2076], replica: [1:24339059:0] populator: [1:34:2080], replica: [1:4398070850163:0] populator: [1:31:2077], replica: [1:1099535966835:0] populator: [1:35:2081], replica: [1:5497582477939:0] populator: [1:32:2078], replica: [1:2199047594611:0] 2025-06-25T14:54:49.624896Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:29:2075] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 1 PathId: [OwnerId: 72057594046678944, LocalPathId: 100] Version: 1 }: sender# [1:30:2076], cookie# 12345 2025-06-25T14:57:00.025180Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:29:2075] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 1 PathId: [OwnerId: 72057594046678944, LocalPathId: 100] Version: 1 }: sender# [1:31:2077], cookie# 12345 2025-06-25T14:57:00.025278Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:774: [1:29:2075] Ack update: ack to# [1:26:2073], cookie# 12345, pathId# [OwnerId: 72057594046678944, LocalPathId: 100], version# 1 >> TestYmqHttpProxy::TestSendMessageBatch [GOOD] >> KqpVectorIndexes::OrderByCosineLevel2+Nullable-UseSimilarity ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/scheme_board/ut_populator/unittest >> TPopulatorQuorumTest::TwoRingGroups [GOOD] Test command err: replicas: [1:24339059:0], [1:1099535966835:0], [1:2199047594611:0], [1:3298559222387:0], [1:4398070850163:0], [1:5497582477939:0] replicaActorToServiceMap: actor: [1:8:2055], service: [1:2199047594611:0] actor: [1:5:2052], service: [1:1099535966835:0] actor: [1:17:2064], service: [1:5497582477939:0] actor: [1:2:2049], service: [1:24339059:0] actor: [1:14:2061], service: [1:4398070850163:0] actor: [1:11:2058], service: [1:3298559222387:0] ... waiting for NKikimr::TEvStateStorage::TEvListSchemeBoardResult 2025-06-25T14:54:49.399507Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:795: [1:29:2075] Handle NKikimr::TEvStateStorage::TEvListSchemeBoardResult: sender# [1:19:2066] ... waiting for NKikimr::TEvStateStorage::TEvListSchemeBoardResult (done) 2025-06-25T14:54:49.440598Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:668: [1:29:2075] Handle TEvSchemeShard::TEvDescribeSchemeResult { Status: StatusSuccess Path: "/Root/TestPath" PathDescription { Self { Name: "TestPath" PathId: 100 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1 ParentPathId: 1 PathState: EPathStateNoChanges PathVersion: 1 } } PathId: 100 PathOwnerId: 72057594046678944 }: sender# [1:26:2073], cookie# 12345, event size# 76, preserialized size# 0 2025-06-25T14:54:49.440689Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:685: [1:29:2075] Update description: owner# 72057594046678944, pathId# [OwnerId: 72057594046678944, LocalPathId: 100], cookie# 12345, is deletion# false, version: 1 ... waiting for updates from replica populators 2025-06-25T14:54:49.444973Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:239: [1:30:2076] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 72057594046678944 Generation: 0 }: sender# [1:2:2049] 2025-06-25T14:54:49.445042Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:251: [1:30:2076] Successful handshake: replica# [1:2:2049] 2025-06-25T14:54:49.445084Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:255: [1:30:2076] Start full sync: replica# [1:2:2049] 2025-06-25T14:54:49.445145Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:239: [1:31:2077] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 72057594046678944 Generation: 0 }: sender# [1:5:2052] 2025-06-25T14:54:49.445163Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:251: [1:31:2077] Successful handshake: replica# [1:5:2052] 2025-06-25T14:54:49.445202Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:255: [1:31:2077] Start full sync: replica# [1:5:2052] 2025-06-25T14:54:49.445241Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:239: [1:32:2078] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 72057594046678944 Generation: 0 }: sender# [1:8:2055] 2025-06-25T14:54:49.445258Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:251: [1:32:2078] Successful handshake: replica# [1:8:2055] 2025-06-25T14:54:49.445273Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:255: [1:32:2078] Start full sync: replica# [1:8:2055] 2025-06-25T14:54:49.445300Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:239: [1:33:2079] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 72057594046678944 Generation: 0 }: sender# [1:11:2058] 2025-06-25T14:54:49.445333Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:251: [1:33:2079] Successful handshake: replica# [1:11:2058] 2025-06-25T14:54:49.445355Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:255: [1:33:2079] Start full sync: replica# [1:11:2058] 2025-06-25T14:54:49.445403Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:239: [1:34:2080] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 72057594046678944 Generation: 0 }: sender# [1:14:2061] 2025-06-25T14:54:49.445428Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:251: [1:34:2080] Successful handshake: replica# [1:14:2061] 2025-06-25T14:54:49.445456Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:255: [1:34:2080] Start full sync: replica# [1:14:2061] 2025-06-25T14:54:49.445502Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:239: [1:35:2081] Handle NKikimrSchemeBoard.TEvHandshake { Owner: 72057594046678944 Generation: 0 }: sender# [1:17:2064] 2025-06-25T14:54:49.445523Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:251: [1:35:2081] Successful handshake: replica# [1:17:2064] 2025-06-25T14:54:49.445539Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:255: [1:35:2081] Start full sync: replica# [1:17:2064] 2025-06-25T14:54:49.445614Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:536: [1:29:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestDescribe { PathId: Replica: [1:24339059:0] }: sender# [1:30:2076] 2025-06-25T14:54:49.445698Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:30:2076] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvDescribeResult { Commit: false DeletedPathBegin: 0 DeletedPathEnd: 0 { Path: /Root/TestPath PathId: [OwnerId: 72057594046678944, LocalPathId: 100] PathVersion: 1 } }: sender# [1:29:2075] 2025-06-25T14:54:49.445845Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:630: [1:29:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestUpdate { PathId: [OwnerId: 72057594046678944, LocalPathId: 100] }: sender# [1:30:2076] 2025-06-25T14:54:49.445907Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:30:2076] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 1 }: sender# [1:29:2075], cookie# 0 2025-06-25T14:54:49.446000Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:287: [1:30:2076] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 1 PathId: [OwnerId: 72057594046678944, LocalPathId: 100] Version: 1 }: sender# [1:2:2049], cookie# 0 2025-06-25T14:54:49.446087Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:536: [1:29:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestDescribe { PathId: Replica: [1:1099535966835:0] }: sender# [1:31:2077] 2025-06-25T14:54:49.446149Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:630: [1:29:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestUpdate { PathId: [OwnerId: 72057594046678944, LocalPathId: 100] }: sender# [1:31:2077] 2025-06-25T14:54:49.446194Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:31:2077] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvDescribeResult { Commit: false DeletedPathBegin: 0 DeletedPathEnd: 0 { Path: /Root/TestPath PathId: [OwnerId: 72057594046678944, LocalPathId: 100] PathVersion: 1 } }: sender# [1:29:2075] 2025-06-25T14:54:49.446267Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:536: [1:29:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestDescribe { PathId: Replica: [1:2199047594611:0] }: sender# [1:32:2078] 2025-06-25T14:54:49.446303Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:31:2077] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 1 }: sender# [1:29:2075], cookie# 0 2025-06-25T14:54:49.446346Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:32:2078] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvDescribeResult { Commit: false DeletedPathBegin: 0 DeletedPathEnd: 0 { Path: /Root/TestPath PathId: [OwnerId: 72057594046678944, LocalPathId: 100] PathVersion: 1 } }: sender# [1:29:2075] 2025-06-25T14:54:49.446412Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:287: [1:31:2077] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 1 PathId: [OwnerId: 72057594046678944, LocalPathId: 100] Version: 1 }: sender# [1:5:2052], cookie# 0 2025-06-25T14:54:49.446462Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:630: [1:29:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestUpdate { PathId: [OwnerId: 72057594046678944, LocalPathId: 100] }: sender# [1:32:2078] 2025-06-25T14:54:49.446524Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:536: [1:29:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestDescribe { PathId: Replica: [1:3298559222387:0] }: sender# [1:33:2079] 2025-06-25T14:54:49.446563Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:32:2078] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 1 }: sender# [1:29:2075], cookie# 0 2025-06-25T14:54:49.446613Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:33:2079] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvDescribeResult { Commit: false DeletedPathBegin: 0 DeletedPathEnd: 0 { Path: /Root/TestPath PathId: [OwnerId: 72057594046678944, LocalPathId: 100] PathVersion: 1 } }: sender# [1:29:2075] 2025-06-25T14:54:49.446692Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:630: [1:29:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestUpdate { PathId: [OwnerId: 72057594046678944, LocalPathId: 100] }: sender# [1:33:2079] 2025-06-25T14:54:49.446724Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:287: [1:32:2078] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 1 PathId: [OwnerId: 72057594046678944, LocalPathId: 100] Version: 1 }: sender# [1:8:2055], cookie# 0 2025-06-25T14:54:49.446777Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:33:2079] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 1 }: sender# [1:29:2075], cookie# 0 2025-06-25T14:54:49.446825Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:287: [1:33:2079] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 1 PathId: [OwnerId: 72057594046678944, LocalPathId: 100] Version: 1 }: sender# [1:11:2058], cookie# 0 2025-06-25T14:54:49.446884Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:536: [1:29:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestDescribe { PathId: Replica: [1:4398070850163:0] }: sender# [1:34:2080] 2025-06-25T14:54:49.446941Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:34:2080] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvDescribeResult { Commit: false DeletedPathBegin: 0 DeletedPathEnd: 0 { Path: /Root/TestPath PathId: [OwnerId: 72057594046678944, LocalPathId: 100] PathVersion: 1 } }: sender# [1:29:2075] 2025-06-25T14:54:49.446992Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:630: [1:29:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestUpdate { PathId: [OwnerId: 72057594046678944, LocalPathId: 100] }: sender# [1:34:2080] 2025-06-25T14:54:49.447032Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:34:2080] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 1 }: sender# [1:29:2075], cookie# 0 2025-06-25T14:54:49.447089Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:536: [1:29:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestDescribe { PathId: Replica: [1:5497582477939:0] }: sender# [1:35:2081] 2025-06-25T14:54:49.447128Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:630: [1:29:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestUpdate { PathId: [OwnerId: 72057594046678944, LocalPathId: 100] }: sender# [1:35:2081] 2025-06-25T14:54:49.447170Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:287: [1:34:2080] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 1 PathId: [OwnerId: 72057594046678944, LocalPathId: 100] Version: 1 }: sender# [1:14:2061], cookie# 0 2025-06-25T14:54:49.447212Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:35:2081] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvDescribeResult { Commit: false DeletedPathBegin: 0 DeletedPathEnd: 0 { Path: /Root/TestPath PathId: [OwnerId: 72057594046678944, LocalPathId: 100] PathVersion: 1 } }: sender# [1:29:2075] 2025-06-25T14:54:49.447252Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:35:2081] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 1 }: sender# [1:29:2075], cookie# 0 2025-06-25T14:54:49.447307Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:536: [1:29:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestDescribe { PathId: [OwnerId: 72057594046678944, LocalPathId: 101] Replica: [1:24339059:0] }: sender# [1:30:2076] 2025-06-25T14:54:49.447370Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:30:2076] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvDescribeResult { Commit: true DeletedPathBegin: 0 DeletedPathEnd: 0 }: sender# [1:29:2075] 2025-06-25T14:54:49.447442Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:305: [1:30:2076] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 72057594046678944 Generation: 1 }: sender# [1:2:2049] 2025-06-25T14:54:49.447498Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:287: [1:35:2081] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 1 PathId: [OwnerId: 72057594046678944, LocalPathId: 100] Version: 1 }: sender# [1:17:2064], cookie# 0 2025-06-25T14:54:49.447561Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:29:2075] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 1 PathId: [OwnerId: 72057594046678944, LocalPathId: 100] Version: 1 }: sender# [1:30:2076], cookie# 0 2025-06-25T14:54:49.447591Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:756: [1:29:2075] Ack for unknown update (already acked?): sender# [1:30:2076], cookie# 0 ... blocking NKikimr::NSchemeBoard::NSchemeshardEvents::TEvUpdateAck from SCHEME_BOARD_REPLICA_POPULATOR_ACTOR to SCHEME_BOARD_POPULATOR_ACTOR cookie 12345 2025-06-25T14:54:49.447663Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:536: [1:29:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestDescribe { PathId: [OwnerId: 72057594046678944, LocalPathId: 101] Replica: [1:1099535966835:0] }: sender# [1:31:2077] 2025-06-25T14:54:49.447713Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:31:2077] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvDescribeResult { Commit: true DeletedPathBegin: 0 DeletedPathEnd: 0 }: sender# [1:29:2075] 2025-06-25T14:54:49.447758Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:305: [1:31:2077] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 72057594046678944 Generation: 1 }: sender# [1:5:2052] 2025-06-25T14:54:49.447795Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:536: [1:29:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestDescribe { PathId: [OwnerId: 72057594046678944, LocalPathId: 101] Replica: [1:2199047594611:0] }: sender# [1:32:2078] 2025-06-25T14:54:49.447848Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:32:2078] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvDescribeResult { Commit: true DeletedPathBegin: 0 DeletedPathEnd: 0 }: sender# [1:29:2075] 2025-06-25T14:54:49.447890Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:29:2075] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 1 PathId: [OwnerId: 72057594046678944, LocalPathId: 100] Version: 1 }: sender# [1:31:2077], cookie# 0 2025-06-25T14:54:49.447920Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:756: [1:29:2075] Ack for unknown update (already acked?): sender# [1:31:2077], cookie# 0 2025-06-25T14:54:49.447950Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:305: [1:32:2078] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 72057594046678944 Generation: 1 }: sender# [1:8:2055] ... blocking NKikimr::NSchemeBoard::NSchemeshardEvents::TEvUpdateAck from SCHEME_BOARD_REPLICA_POPULATOR_ACTOR to SCHEME_BOARD_POPULATOR_ACTOR cookie 12345 2025-06-25T14:54:49.448010Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:536: [1:29:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestDescribe { PathId: [OwnerId: 72057594046678944, LocalPathId: 101] Replica: [1:3298559222387:0] }: sender# [1:33:2079] 2025-06-25T14:54:49.448049Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:33:2079] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvDescribeResult { Commit: true DeletedPathBegin: 0 DeletedPathEnd: 0 }: sender# [1:29:2075] 2025-06-25T14:54:49.448103Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:29:2075] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 1 PathId: [OwnerId: 72057594046678944, LocalPathId: 100] Version: 1 }: sender# [1:32:2078], cookie# 0 2025-06-25T14:54:49.448127Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:756: [1:29:2075] Ack for unknown update (already acked?): sender# [1:32:2078], cookie# 0 2025-06-25T14:54:49.448156Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:305: [1:33:2079] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 72057594046678944 Generation: 1 }: sender# [1:11:2058] ... blocking NKikimr::NSchemeBoard::NSchemeshardEvents::TEvUpdateAck from SCHEME_BOARD_REPLICA_POPULATOR_ACTOR to SCHEME_BOARD_POPULATOR_ACTOR cookie 12345 2025-06-25T14:54:49.448209Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:29:2075] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 1 PathId: [OwnerId: 72057594046678944, LocalPathId: 100] Version: 1 }: sender# [1:33:2079], cookie# 0 2025-06-25T14:54:49.448230Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:756: [1:29:2075] Ack for unknown update (already acked?): sender# [1:33:2079], cookie# 0 ... blocking NKikimr::NSchemeBoard::NSchemeshardEvents::TEvUpdateAck from SCHEME_BOARD_REPLICA_POPULATOR_ACTOR to SCHEME_BOARD_POPULATOR_ACTOR cookie 12345 2025-06-25T14:54:49.448281Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:536: [1:29:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestDescribe { PathId: [OwnerId: 72057594046678944, LocalPathId: 101] Replica: [1:4398070850163:0] }: sender# [1:34:2080] 2025-06-25T14:54:49.448335Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:34:2080] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvDescribeResult { Commit: true DeletedPathBegin: 0 DeletedPathEnd: 0 }: sender# [1:29:2075] 2025-06-25T14:54:49.448376Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:305: [1:34:2080] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 72057594046678944 Generation: 1 }: sender# [1:14:2061] 2025-06-25T14:54:49.448424Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:29:2075] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 1 PathId: [OwnerId: 72057594046678944, LocalPathId: 100] Version: 1 }: sender# [1:34:2080], cookie# 0 2025-06-25T14:54:49.448449Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:756: [1:29:2075] Ack for unknown update (already acked?): sender# [1:34:2080], cookie# 0 ... blocking NKikimr::NSchemeBoard::NSchemeshardEvents::TEvUpdateAck from SCHEME_BOARD_REPLICA_POPULATOR_ACTOR to SCHEME_BOARD_POPULATOR_ACTOR cookie 12345 2025-06-25T14:54:49.448508Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:536: [1:29:2075] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvRequestDescribe { PathId: [OwnerId: 72057594046678944, LocalPathId: 101] Replica: [1:5497582477939:0] }: sender# [1:35:2081] 2025-06-25T14:54:49.448564Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:35:2081] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvDescribeResult { Commit: true DeletedPathBegin: 0 DeletedPathEnd: 0 }: sender# [1:29:2075] 2025-06-25T14:54:49.448611Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:29:2075] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 1 PathId: [OwnerId: 72057594046678944, LocalPathId: 100] Version: 1 }: sender# [1:35:2081], cookie# 0 2025-06-25T14:54:49.448628Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:756: [1:29:2075] Ack for unknown update (already acked?): sender# [1:35:2081], cookie# 0 2025-06-25T14:54:49.448653Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:305: [1:35:2081] Handle NKikimrSchemeBoard.TEvCommitGeneration { Owner: 72057594046678944 Generation: 1 }: sender# [1:17:2064] ... blocking NKikimr::NSchemeBoard::NSchemeshardEvents::TEvUpdateAck from SCHEME_BOARD_REPLICA_POPULATOR_ACTOR to SCHEME_BOARD_POPULATOR_ACTOR cookie 12345 ... waiting for updates from replica populators (done) populatorToReplicaMap: populator: [1:33:2079], replica: [1:3298559222387:0] populator: [1:30:2076], replica: [1:24339059:0] populator: [1:34:2080], replica: [1:4398070850163:0] populator: [1:31:2077], replica: [1:1099535966835:0] populator: [1:35:2081], replica: [1:5497582477939:0] populator: [1:32:2078], replica: [1:2199047594611:0] 2025-06-25T14:54:49.448837Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:29:2075] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 1 PathId: [OwnerId: 72057594046678944, LocalPathId: 100] Version: 1 }: sender# [1:30:2076], cookie# 12345 2025-06-25T14:54:49.448889Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:29:2075] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 1 PathId: [OwnerId: 72057594046678944, LocalPathId: 100] Version: 1 }: sender# [1:31:2077], cookie# 12345 2025-06-25T14:54:49.448916Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:29:2075] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 1 PathId: [OwnerId: 72057594046678944, LocalPathId: 100] Version: 1 }: sender# [1:33:2079], cookie# 12345 2025-06-25T14:57:00.402381Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:750: [1:29:2075] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 1 PathId: [OwnerId: 72057594046678944, LocalPathId: 100] Version: 1 }: sender# [1:34:2080], cookie# 12345 2025-06-25T14:57:00.402445Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:774: [1:29:2075] Ack update: ack to# [1:26:2073], cookie# 12345, pathId# [OwnerId: 72057594046678944, LocalPathId: 100], version# 1 >> KqpUniqueIndex::UpsertImplicitNullInComplexFk [GOOD] >> KqpVectorIndexes::BuildIndexTimesAndUser |89.2%| [TA] $(B)/ydb/core/tx/scheme_board/ut_populator/test-results/unittest/{meta.json ... results_accumulator.log} |89.2%| [TA] {RESULT} $(B)/ydb/core/tx/scheme_board/ut_populator/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpMultishardIndex::SortedRangeReadDesc >> KqpIndexes::ForbidDirectIndexTableCreation [GOOD] >> KqpIndexes::IndexFilterPushDown >> KqpIndexes::DoUpsertWithoutIndexUpdate-UniqIndex-UseSink >> KqpIndexes::SelectConcurentTX >> KqpUniqueIndex::UpdateFkAlreadyExist >> YdbTableSplit::SplitByLoadWithReads [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/http_proxy/ut/inside_ydb_ut/unittest >> TestYmqHttpProxy::TestSendMessageBatch [GOOD] Test command err: 2025-06-25T14:56:09.079786Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900910444213001:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:56:09.080205Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000c6a/r3tmp/tmpukMo1u/pdisk_1.dat 2025-06-25T14:56:09.499908Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:56:09.525849Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:56:09.525951Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 1281, node 1 2025-06-25T14:56:09.533667Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:56:09.749564Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:56:09.749589Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:56:09.749598Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:56:09.749731Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:56:10.093981Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:9273 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:56:10.268727Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:56:10.305382Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 TClient is connected to server localhost:9273 waiting... 2025-06-25T14:56:10.497984Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) 2025-06-25T14:56:10.501759Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-25T14:56:10.503365Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) waiting... 2025-06-25T14:56:10.524969Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:10.677886Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:56:10.748517Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710663, at schemeshard: 72057594046644480 2025-06-25T14:56:10.753093Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:56:10.824628Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710665, at schemeshard: 72057594046644480 2025-06-25T14:56:10.829260Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:10.907088Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:10.942627Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:10.983895Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:11.032257Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:11.131391Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:11.213241Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:12.126104Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900923329116225:2336], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:12.126174Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:12.126454Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900923329116238:2339], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:12.130857Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710673:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:56:12.141747Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519900923329116240:2340], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710673 completed, doublechecking } 2025-06-25T14:56:12.238884Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519900923329116291:2866] txid# 281474976710674, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 18], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:56:13.038779Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710675. Ctx: { TraceId: 01jyksdqtr6a57gjtdftjx7wt7, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MmFjZjhhYzEtN2YzZGVlZTMtNWRmNWY5OC05Mzc3MWJiMg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:56:13.142379Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ... 62135ae945eca30bf" MessageId: "55186a07-1513a05e-ba654262-fe0b1027" SequenceNumber: 2 Id: "Id-1" } Entries { Error { Status: 400 Message: "No MessageGroupId parameter." ErrorCode: "MissingParameter" } Id: "Id-2" } } RequestId: "c369efcf-6764e7a5-4bd65a81-99ae934a" FolderId: "folder4" ResourceId: "000000000000000101v0" IsFifo: true } 2025-06-25T14:57:01.088061Z node 7 :SQS TRACE: proxy_service.cpp:194: HandleSqsResponse SendMessageBatch { RequestId: "c369efcf-6764e7a5-4bd65a81-99ae934a" Entries { MD5OfMessageAttributes: "3d778967e1fa431d626ffb890c486385" MD5OfMessageBody: "94a29778a1f1f41bf68142847b2e6106" MessageId: "cb447f6a-72e63332-e744f506-b89cad37" SequenceNumber: 1 Id: "Id-0" } Entries { MD5OfMessageBody: "3bf7e6d806a0b8062135ae945eca30bf" MessageId: "55186a07-1513a05e-ba654262-fe0b1027" SequenceNumber: 2 Id: "Id-1" } Entries { Error { Status: 400 Message: "No MessageGroupId parameter." ErrorCode: "MissingParameter" } Id: "Id-2" } } RequestId: "c369efcf-6764e7a5-4bd65a81-99ae934a" FolderId: "folder4" ResourceId: "000000000000000101v0" IsFifo: true 2025-06-25T14:57:01.088152Z node 7 :SQS TRACE: proxy_service.cpp:208: Sending answer to proxy actor [7:7519901130098508251:2463]: SendMessageBatch { RequestId: "c369efcf-6764e7a5-4bd65a81-99ae934a" Entries { MD5OfMessageAttributes: "3d778967e1fa431d626ffb890c486385" MD5OfMessageBody: "94a29778a1f1f41bf68142847b2e6106" MessageId: "cb447f6a-72e63332-e744f506-b89cad37" SequenceNumber: 1 Id: "Id-0" } Entries { MD5OfMessageBody: "3bf7e6d806a0b8062135ae945eca30bf" MessageId: "55186a07-1513a05e-ba654262-fe0b1027" SequenceNumber: 2 Id: "Id-1" } Entries { Error { Status: 400 Message: "No MessageGroupId parameter." ErrorCode: "MissingParameter" } Id: "Id-2" } } RequestId: "c369efcf-6764e7a5-4bd65a81-99ae934a" FolderId: "folder4" ResourceId: "000000000000000101v0" IsFifo: true 2025-06-25T14:57:01.088207Z node 7 :SQS TRACE: service.cpp:1483: Dec local leader ref for actor [7:7519901130098508258:3512]. Found: 1 2025-06-25T14:57:01.089154Z node 7 :SQS TRACE: proxy_actor.cpp:178: Request [c369efcf-6764e7a5-4bd65a81-99ae934a] HandleResponse: { SendMessageBatch { RequestId: "c369efcf-6764e7a5-4bd65a81-99ae934a" Entries { MD5OfMessageAttributes: "3d778967e1fa431d626ffb890c486385" MD5OfMessageBody: "94a29778a1f1f41bf68142847b2e6106" MessageId: "cb447f6a-72e63332-e744f506-b89cad37" SequenceNumber: 1 Id: "Id-0" } Entries { MD5OfMessageBody: "3bf7e6d806a0b8062135ae945eca30bf" MessageId: "55186a07-1513a05e-ba654262-fe0b1027" SequenceNumber: 2 Id: "Id-1" } Entries { Error { Status: 400 Message: "No MessageGroupId parameter." ErrorCode: "MissingParameter" } Id: "Id-2" } } RequestId: "c369efcf-6764e7a5-4bd65a81-99ae934a" FolderId: "folder4" ResourceId: "000000000000000101v0" IsFifo: true }, status: OK 2025-06-25T14:57:01.089300Z node 7 :SQS DEBUG: proxy_actor.cpp:147: Request [c369efcf-6764e7a5-4bd65a81-99ae934a] Sending reply from proxy actor: { SendMessageBatch { RequestId: "c369efcf-6764e7a5-4bd65a81-99ae934a" Entries { MD5OfMessageAttributes: "3d778967e1fa431d626ffb890c486385" MD5OfMessageBody: "94a29778a1f1f41bf68142847b2e6106" MessageId: "cb447f6a-72e63332-e744f506-b89cad37" SequenceNumber: 1 Id: "Id-0" } Entries { MD5OfMessageBody: "3bf7e6d806a0b8062135ae945eca30bf" MessageId: "55186a07-1513a05e-ba654262-fe0b1027" SequenceNumber: 2 Id: "Id-1" } Entries { Error { Status: 400 Message: "No MessageGroupId parameter." ErrorCode: "MissingParameter" } Id: "Id-2" } } RequestId: "c369efcf-6764e7a5-4bd65a81-99ae934a" FolderId: "folder4" ResourceId: "000000000000000101v0" IsFifo: true } 2025-06-25T14:57:01.089905Z node 7 :HTTP_PROXY DEBUG: http_req.cpp:379: http request [SendMessageBatch] requestId [c369efcf-6764e7a5-4bd65a81-99ae934a] Got succesfult GRPC response. 2025-06-25T14:57:01.090137Z node 7 :HTTP_PROXY INFO: http_req.cpp:1207: http request [SendMessageBatch] requestId [c369efcf-6764e7a5-4bd65a81-99ae934a] reply ok 2025-06-25T14:57:01.090305Z node 7 :HTTP_PROXY DEBUG: http_req.cpp:1267: http request [SendMessageBatch] requestId [c369efcf-6764e7a5-4bd65a81-99ae934a] Send metering event. HttpStatusCode: 200 IsFifo: 1 FolderId: folder4 RequestSizeInBytes: 1063 ResponseSizeInBytes: 644 SourceAddress: 7870:9a00:6050:0:6070:9a00:6050:0 ResourceId: 000000000000000101v0 Action: SendMessageBatch 2025-06-25T14:57:01.090490Z node 7 :HTTP DEBUG: http_proxy_incoming.cpp:280: (#37,[::1]:38700) <- (200 , 465 bytes) 2025-06-25T14:57:01.090584Z node 7 :HTTP DEBUG: http_proxy_incoming.cpp:335: (#37,[::1]:38700) connection closed Http output full {"Successful":[{"SequenceNumber":"1","Id":"Id-0","MD5OfMessageBody":"94a29778a1f1f41bf68142847b2e6106","MD5OfMessageAttributes":"3d778967e1fa431d626ffb890c486385","MessageId":"cb447f6a-72e63332-e744f506-b89cad37"},{"SequenceNumber":"2","Id":"Id-1","MD5OfMessageBody":"3bf7e6d806a0b8062135ae945eca30bf","MessageId":"55186a07-1513a05e-ba654262-fe0b1027"}],"Failed":[{"Message":"No MessageGroupId parameter.","Id":"Id-2","Code":"MissingParameter","SenderFault":true}]} 2025-06-25T14:57:01.091434Z node 7 :SQS TRACE: executor.cpp:256: Request [] Query(idx=GET_OLDEST_MESSAGE_TIMESTAMP_METRIC_ID) Queue [cloud4/000000000000000101v0] Compile program response: { Status: 48 MiniKQLCompileResults { CompiledProgram: "\037\016\nFlags\010Name\010Args\016Payload\022Parameter\014Offset\032SentTimestamp\006\002\206\202\t\211\004\202\203\005@\206\205\004\207\203\010\207\203\010\026\032$SetResult\000\003?\002\020messages\t\211\004?\016\205\004?\016\203\014\020List$Truncated\203\004\030Member\000\t\211\026?\026\203\005\004\200\205\004\203\004\203\004\026\032\213\010\203\010\203\010\203\010\203\010\213\010?$?&\203\010\203\010\203\004\203\010\203\010\203\004\206\203\014\203\014,SelectRange\000\003?\034 \000\001\205\000\000\000\000\001\032\000\000\000\000\000\000\000?\014\005?\"\003?\036\010\003? \006\003\013?,\t\351\000?$\005\205\004\206\205\004\203\010\203\005@\002\006\203\005@\n\016\006\000?R\003?T(QUEUE_ID_NUMBER_HASH\003\022\000\t\351\000?&\005\205\004\206\205\004\203\010\203\005@\002\006\203\005@\n\016\006\000?h\003?j\036QUEUE_ID_NUMBER\003\022\000\t\351\000?(\005\205\004\206\205\004\203\010\203\005@\002\006\203\005@\n\016\006\000?~\003?\200\022TIME_FROM\003\022\000\003?*\000\010\013?2?`?v\003?.\177\377\377\377\377\377\377\377\377\003?0\177\377\377\377\377\377\377\377\377\014\003?4\000\003?6\002\003?8\000\003?:\000\006\010?>\003\203\014\000\003\203\014\000\003\203\014\000\003\203\014\000\017\003?@\000\377\007\003?\030\000\002\001\000/" } } 2025-06-25T14:57:01.091471Z node 7 :SQS DEBUG: executor.cpp:401: Request [] Query(idx=GET_OLDEST_MESSAGE_TIMESTAMP_METRIC_ID) Queue [cloud4/000000000000000101v0] compilation duration: 3ms 2025-06-25T14:57:01.091515Z node 7 :SQS DEBUG: queue_leader.cpp:464: Request [] Query(idx=GET_OLDEST_MESSAGE_TIMESTAMP_METRIC_ID) has been prepared 2025-06-25T14:57:01.091530Z node 7 :SQS DEBUG: queue_leader.cpp:514: Request [] Executing compiled query(idx=GET_OLDEST_MESSAGE_TIMESTAMP_METRIC_ID) 2025-06-25T14:57:01.091612Z node 7 :SQS DEBUG: executor.cpp:83: Request [] Starting executor actor for query(idx=GET_OLDEST_MESSAGE_TIMESTAMP_METRIC_ID). Mode: COMPILE_AND_EXEC 2025-06-25T14:57:01.091678Z node 7 :SQS TRACE: executor.cpp:154: Request [] Query(idx=GET_OLDEST_MESSAGE_TIMESTAMP_METRIC_ID) Queue [cloud4/000000000000000101v0] Serializing params: {"QUEUE_ID_NUMBER": 2, "QUEUE_ID_NUMBER_HASH": 17472595041006102391, "SHARD": 0, "QUEUE_ID_NUMBER_AND_SHARD_HASH": 12311263855443095412, "TIME_FROM": 0} 2025-06-25T14:57:01.091964Z node 7 :SQS TRACE: executor.cpp:203: Request [] Query(idx=GET_OLDEST_MESSAGE_TIMESTAMP_METRIC_ID) Queue [cloud4/000000000000000101v0] Execute program: { Transaction { MiniKQLTransaction { Mode: COMPILE_AND_EXEC Program { Bin: "\037\016\nFlags\010Name\010Args\016Payload\022Parameter\014Offset\032SentTimestamp\006\002\206\202\t\211\004\202\203\005@\206\205\004\207\203\010\207\203\010\026\032$SetResult\000\003?\002\020messages\t\211\004?\016\205\004?\016\203\014\020List$Truncated\203\004\030Member\000\t\211\026?\026\203\005\004\200\205\004\203\004\203\004\026\032\213\010\203\010\203\010\203\010\203\010\213\010?$?&\203\010\203\010\203\004\203\010\203\010\203\004\206\203\014\203\014,SelectRange\000\003?\034 \000\001\205\000\000\000\000\001\032\000\000\000\000\000\000\000?\014\005?\"\003?\036\010\003? \006\003\013?,\t\351\000?$\005\205\004\206\205\004\203\010\203\005@\002\006\203\005@\n\016\006\000?R\003?T(QUEUE_ID_NUMBER_HASH\003\022\000\t\351\000?&\005\205\004\206\205\004\203\010\203\005@\002\006\203\005@\n\016\006\000?h\003?j\036QUEUE_ID_NUMBER\003\022\000\t\351\000?(\005\205\004\206\205\004\203\010\203\005@\002\006\203\005@\n\016\006\000?~\003?\200\022TIME_FROM\003\022\000\003?*\000\010\013?2?`?v\003?.\177\377\377\377\377\377\377\377\377\003?0\177\377\377\377\377\377\377\377\377\014\003?4\000\003?6\002\003?8\000\003?:\000\006\010?>\003\203\014\000\003\203\014\000\003\203\014\000\003\203\014\000\017\003?@\000\377\007\003?\030\000\002\001\000/" } Params { Bin: "\037\000\005\205\n\203\010\203\010\203\010\203\004\203\010> KqpIndexes::UpdateDeletePlan-UseSink [GOOD] >> KqpIndexes::UpdateIndexSubsetPk >> TestYmqHttpProxy::TestChangeMessageVisibilityBatch [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/table_split_ut/unittest >> YdbTableSplit::SplitByLoadWithReads [GOOD] Test command err: 2025-06-25T14:56:33.690531Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901015995685347:2082];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:56:33.694088Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00137c/r3tmp/tmphqx0VT/pdisk_1.dat 2025-06-25T14:56:34.123514Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:56:34.142397Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:56:34.142469Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:56:34.147282Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 26606, node 1 2025-06-25T14:56:34.322004Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:56:34.322037Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:56:34.322046Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:56:34.322190Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:56:34.693036Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:5237 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:56:34.816461Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... TClient is connected to server localhost:5237 2025-06-25T14:56:36.496593Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901028880588221:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:36.496684Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:36.687087Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:36.814031Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901028880588394:2311], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:36.814116Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:36.830406Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:171) TClient::Ls request: /Root/Foo TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Foo" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1750863396800 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Foo" Columns { Name: "NameHash" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Name" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "Versio... (TRUNCATED) Table has 1 shards TClient::Ls request: /Root/Foo TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Foo" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1750863396800 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Foo" Columns { Name: "NameHash" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Name" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "Versio... (TRUNCATED) 2025-06-25T14:56:36.929074Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901028880588495:2346], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:36.929186Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:36.929388Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901028880588517:2359], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:36.929447Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901028880588519:2361], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:36.929474Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901028880588520:2362], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:36.929526Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901028880588518:2360], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:36.929598Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901028880588522:2364], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:36.929633Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901028880588521:2363], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:36.931752Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901028880588553:2372], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:36.931838Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901028880588557:2374], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:36.931893Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:36.932539Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901028880588574:2378], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:36.933411Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901028880588593:2381], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:36.933472Z node 1 :KQP_WORKLOAD_ ... 726491. Ctx: { TraceId: 01jyksf3phdybykqwsrm1yrayc, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OWFlMTE5ZDEtNTVhODYwYTYtYTU2NDFlNzYtMzgxNTVjZTM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:56:57.046691Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976726492. Ctx: { TraceId: 01jyksf3pnc8fjttde9ke7axqp, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Y2QzM2MyNTItZDYzYTg0YzQtZmVjOWFhNzUtZWRlZWQzNzE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root TClient::Ls request: /Root/Foo 2025-06-25T14:56:57.057365Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976726493. Ctx: { TraceId: 01jyksf3pvdhkd6jqjzq5zdhb8, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MzQxMGFiMDctOGExYjRlMy1kMDAzNWI5MS1jNGJhOTZmYg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:56:57.079592Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976726494. Ctx: { TraceId: 01jyksf3q42kbjbxt709jdkqzz, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MzM4MWI0MWQtNTZlNTliYTQtNTA0MWQ0Y2ItOWNmMDc5MQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:56:57.079928Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976726495. Ctx: { TraceId: 01jyksf3q4bbgr7rkyjdqm7twb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OWFlMTE5ZDEtNTVhODYwYTYtYTU2NDFlNzYtMzgxNTVjZTM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:56:57.080459Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976726497. Ctx: { TraceId: 01jyksf3q7apwz3hm4k0gtd8mk, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Y2QzM2MyNTItZDYzYTg0YzQtZmVjOWFhNzUtZWRlZWQzNzE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:56:57.080461Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976726498. Ctx: { TraceId: 01jyksf3q9edwb0e3st097q0ws, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MzQxMGFiMDctOGExYjRlMy1kMDAzNWI5MS1jNGJhOTZmYg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:56:57.081158Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976726496. Ctx: { TraceId: 01jyksf3q71d99s4p6182q9n7v, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTk2N2M5MmMtY2I5ZmM1ZDctZjI3MTdkMDQtNWY0MGNhMGM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Foo" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1750863396800 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 2 } ChildrenExist: false } Table { Name: "Foo" Columns { Name: "NameHash" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Name" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "Versio... (TRUNCATED) 2025-06-25T14:56:57.088257Z node 1 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976726464, task: 1, CA Id [1:7519901114780054203:2337]. Got EvDeliveryProblem, TabletId: 72075186224037888, NotDelivered: 1 2025-06-25T14:56:57.088262Z node 1 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976726462, task: 1, CA Id [1:7519901114780054182:2344]. Got EvDeliveryProblem, TabletId: 72075186224037888, NotDelivered: 1 2025-06-25T14:56:57.088291Z node 1 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976726460, task: 1, CA Id [1:7519901114780054171:2322]. Got EvDeliveryProblem, TabletId: 72075186224037888, NotDelivered: 1 2025-06-25T14:56:57.132073Z node 1 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976726461, task: 1, CA Id [1:7519901114780054179:2338]. Got EvDeliveryProblem, TabletId: 72075186224037888, NotDelivered: 1 2025-06-25T14:56:57.132113Z node 1 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976726465, task: 1, CA Id [1:7519901114780054212:2341]. Got EvDeliveryProblem, TabletId: 72075186224037888, NotDelivered: 1 2025-06-25T14:56:57.243167Z node 1 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976726464, task: 1, CA Id [1:7519901114780054203:2337]. Got EvDeliveryProblem, TabletId: 72075186224037888, NotDelivered: 1 2025-06-25T14:56:57.243240Z node 1 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976726460, task: 1, CA Id [1:7519901114780054171:2322]. Got EvDeliveryProblem, TabletId: 72075186224037888, NotDelivered: 1 2025-06-25T14:56:57.243265Z node 1 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976726462, task: 1, CA Id [1:7519901114780054182:2344]. Got EvDeliveryProblem, TabletId: 72075186224037888, NotDelivered: 1 2025-06-25T14:56:57.369510Z node 1 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976726461, task: 1, CA Id [1:7519901114780054179:2338]. Got EvDeliveryProblem, TabletId: 72075186224037888, NotDelivered: 1 2025-06-25T14:56:57.369594Z node 1 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976726465, task: 1, CA Id [1:7519901114780054212:2341]. Got EvDeliveryProblem, TabletId: 72075186224037888, NotDelivered: 1 2025-06-25T14:56:57.507027Z node 1 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976726464, task: 1, CA Id [1:7519901114780054203:2337]. Got EvDeliveryProblem, TabletId: 72075186224037888, NotDelivered: 1 2025-06-25T14:56:57.544510Z node 1 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976726462, task: 1, CA Id [1:7519901114780054182:2344]. Got EvDeliveryProblem, TabletId: 72075186224037888, NotDelivered: 1 2025-06-25T14:56:57.544556Z node 1 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976726460, task: 1, CA Id [1:7519901114780054171:2322]. Got EvDeliveryProblem, TabletId: 72075186224037888, NotDelivered: 1 2025-06-25T14:56:57.860390Z node 1 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976726465, task: 1, CA Id [1:7519901114780054212:2341]. Got EvDeliveryProblem, TabletId: 72075186224037888, NotDelivered: 1 2025-06-25T14:56:57.922464Z node 1 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976726462, task: 1, CA Id [1:7519901114780054182:2344]. Got EvDeliveryProblem, TabletId: 72075186224037888, NotDelivered: 1 2025-06-25T14:56:57.922523Z node 1 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976726460, task: 1, CA Id [1:7519901114780054171:2322]. Got EvDeliveryProblem, TabletId: 72075186224037888, NotDelivered: 1 2025-06-25T14:56:58.030558Z node 1 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976726461, task: 1, CA Id [1:7519901114780054179:2338]. Got EvDeliveryProblem, TabletId: 72075186224037888, NotDelivered: 1 2025-06-25T14:56:58.113799Z node 1 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976726464, task: 1, CA Id [1:7519901114780054203:2337]. Got EvDeliveryProblem, TabletId: 72075186224037888, NotDelivered: 1 2025-06-25T14:56:58.407041Z node 1 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976726465, task: 1, CA Id [1:7519901114780054212:2341]. Got EvDeliveryProblem, TabletId: 72075186224037888, NotDelivered: 1 2025-06-25T14:56:58.496814Z node 1 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976726462, task: 1, CA Id [1:7519901114780054182:2344]. Got EvDeliveryProblem, TabletId: 72075186224037888, NotDelivered: 1 2025-06-25T14:56:58.625001Z node 1 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976726460, task: 1, CA Id [1:7519901114780054171:2322]. Got EvDeliveryProblem, TabletId: 72075186224037888, NotDelivered: 1 2025-06-25T14:56:58.701672Z node 1 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976726464, task: 1, CA Id [1:7519901114780054203:2337]. Got EvDeliveryProblem, TabletId: 72075186224037888, NotDelivered: 1 2025-06-25T14:56:58.818962Z node 1 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976726461, task: 1, CA Id [1:7519901114780054179:2338]. Got EvDeliveryProblem, TabletId: 72075186224037888, NotDelivered: 1 2025-06-25T14:56:59.000826Z node 1 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976726465, task: 1, CA Id [1:7519901114780054212:2341]. Got EvDeliveryProblem, TabletId: 72075186224037888, NotDelivered: 1 2025-06-25T14:56:59.000865Z node 1 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976726462, task: 1, CA Id [1:7519901114780054182:2344]. Got EvDeliveryProblem, TabletId: 72075186224037888, NotDelivered: 1 2025-06-25T14:56:59.296233Z node 1 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976726464, task: 1, CA Id [1:7519901114780054203:2337]. Got EvDeliveryProblem, TabletId: 72075186224037888, NotDelivered: 1 2025-06-25T14:56:59.595722Z node 1 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976726462, task: 1, CA Id [1:7519901114780054182:2344]. Got EvDeliveryProblem, TabletId: 72075186224037888, NotDelivered: 1 2025-06-25T14:56:59.626600Z node 1 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976726460, task: 1, CA Id [1:7519901114780054171:2322]. Got EvDeliveryProblem, TabletId: 72075186224037888, NotDelivered: 1 2025-06-25T14:56:59.712459Z node 1 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976726461, task: 1, CA Id [1:7519901114780054179:2338]. Got EvDeliveryProblem, TabletId: 72075186224037888, NotDelivered: 1 2025-06-25T14:56:59.766572Z node 1 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976726465, task: 1, CA Id [1:7519901114780054212:2341]. Got EvDeliveryProblem, TabletId: 72075186224037888, NotDelivered: 1 2025-06-25T14:56:59.860769Z node 1 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976726464, task: 1, CA Id [1:7519901114780054203:2337]. Got EvDeliveryProblem, TabletId: 72075186224037888, NotDelivered: 1 2025-06-25T14:57:00.208811Z node 1 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976726460, task: 1, CA Id [1:7519901114780054171:2322]. Got EvDeliveryProblem, TabletId: 72075186224037888, NotDelivered: 1 2025-06-25T14:57:00.590130Z node 1 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976726461, task: 1, CA Id [1:7519901114780054179:2338]. Got EvDeliveryProblem, TabletId: 72075186224037888, NotDelivered: 1 TClient::Ls request: /Root/Foo TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Foo" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1750863396800 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 2 } ChildrenExist: false } Table { Name: "Foo" Columns { Name: "NameHash" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Name" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "Versio... (TRUNCATED) Table has 2 shards >> KqpIndexes::MultipleSecondaryIndex-UseSink [GOOD] >> KqpIndexes::MultipleModifications >> KqpUniqueIndex::InsertFkPartialColumnSet [GOOD] >> KqpUniqueIndex::InsertFkDuplicate >> KqpIndexes::UpsertWithNullKeysSimple [GOOD] >> KqpIndexes::UpsertWithNullKeysComplex >> KqpIndexes::SecondaryIndexUsingInJoin+UseStreamJoin [GOOD] >> KqpIndexes::SecondaryIndexUsingInJoin-UseStreamJoin >> KqpIndexes::UniqIndexComplexPkComplexFkOverlap >> KqpUniqueIndex::InsertNullInPk [GOOD] >> KqpUniqueIndex::InsertNullInFk >> KqpIndexes::InnerJoinSecondaryIndexLookupAndRightTablePredicateNonIndexColumn [GOOD] >> KqpIndexes::IndexOr ------- [TM] {asan, default-linux-x86_64, release} ydb/core/http_proxy/ut/inside_ydb_ut/unittest >> TestYmqHttpProxy::TestChangeMessageVisibilityBatch [GOOD] Test command err: 2025-06-25T14:56:09.071504Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900912869428870:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:56:09.072567Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000c8f/r3tmp/tmpWNKuYF/pdisk_1.dat 2025-06-25T14:56:09.492173Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:56:09.492275Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:56:09.541492Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:56:09.551044Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 12902, node 1 2025-06-25T14:56:09.745217Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:56:09.745248Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:56:09.745253Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:56:09.745350Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:56:10.103644Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:6975 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:56:10.267708Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... TClient is connected to server localhost:6975 2025-06-25T14:56:10.482956Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... 2025-06-25T14:56:10.489655Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-25T14:56:10.493446Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) waiting... 2025-06-25T14:56:10.516579Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710660, at schemeshard: 72057594046644480 2025-06-25T14:56:10.526964Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:10.678398Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:56:10.739502Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710663, at schemeshard: 72057594046644480 2025-06-25T14:56:10.743682Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:56:10.800714Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710665, at schemeshard: 72057594046644480 2025-06-25T14:56:10.805408Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:10.857003Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:10.897163Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:10.972902Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:11.059823Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:11.104397Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:11.165943Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:12.044263Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900925754332120:2338], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:12.044446Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900925754332112:2335], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:12.044559Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:12.049110Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710673:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:56:12.058121Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519900925754332126:2339], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710673 completed, doublechecking } 2025-06-25T14:56:12.140698Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519900925754332179:2865] txid# 281474976710674, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 18], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:56:13.038985Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710675. Ctx: { TraceId: 01jyksdqr6baj65fh238e2bx2x, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjYxMGVkMDktYjZlMTRmODQtYmJhZmQ4ZWMtOTIxYTZiMjY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:56:13.130686Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type ... ode: COMPILE_AND_EXEC 2025-06-25T14:57:04.073716Z node 7 :SQS TRACE: executor.cpp:154: Request [56285c0f-116c0ef4-45653f38-dd8c8b34] Query(idx=CHANGE_VISIBILITY_ID) Queue [cloud4/000000000000000101v0] Serializing params: {"QUEUE_ID_NUMBER": 2, "QUEUE_ID_NUMBER_HASH": 17472595041006102391, "SHARD": 2, "QUEUE_ID_NUMBER_AND_SHARD_HASH": 18011340738530590538, "NOW": 1750863424072, "GROUPS_READ_ATTEMPT_IDS_PERIOD": 300000, "KEYS": [{"LockTimestamp": 1750863423888, "Offset": 1, "NewVisibilityDeadline": 1750863425072}, {"LockTimestamp": 1750863423931, "Offset": 2, "NewVisibilityDeadline": 1750863426072}]} 2025-06-25T14:57:04.074141Z node 7 :SQS TRACE: executor.cpp:203: Request [56285c0f-116c0ef4-45653f38-dd8c8b34] Query(idx=CHANGE_VISIBILITY_ID) Queue [cloud4/000000000000000101v0] Execute program: { Transaction { MiniKQLTransaction { Mode: COMPILE_AND_EXEC Program { Bin: "O\034\014Exists*NewVisibilityDeadline\014Offset\006Arg\014Member\nFlags\010Name\010Args\016Payload\022Parameter\006And\032LockTimestamp$VisibilityDeadline\014Invoke\t\211\004\206\202?\000\206\202\030Extend\000\006\002?\000\t\211\004\202\203\005@\206\205\n\203\014\207\203\010\203\014\203\010?\020(ChangeConddCurrentVisibilityDeadline\002\006\n$SetResult\000\003?\006\014result\t\211\006?\024\206\205\006?\020?\020?\020.\006\n?\032?\0220MapParameter\000\t\351\000?\034\005\205\004\206\205\004\203\010\203\005@\026\032\203\005@\036\"\006\000?&\003?(\010KEYS\003&\000\t\251\000?\032\016\000\005?\022\t\211\004?\010\207\203\014?\010 Coalesce\000\t\211\004?<\207\203\014\207\203\014*\000\t\211\006?B\203\005@\203\010?\0146\000\003?J\026LessOrEqual\t\351\000?L\005\205\004\206\205\004\203\010\203\005@\026\032\203\005@\036\"\006\000?X\003?Z\006NOW\003&\000\t\211\004?\014\207\205\004\207\203\010?\014.2\203\004\022\000\t\211\n?n\203\005\004\200\205\004\203\004\203\004.2\213\010\203\010\203\010\203\004?\020\203\004$SelectRow\000\003?t \000\001\205\000\000\000\000\001\030\000\000\000\000\000\000\000?l\005?z\003?v\020\003?x\026\003\013?\202\t\351\000?|\005\205\004\206\205\004\203\010\203\005@\026\032\203\005@\036\"\006\000?\226\003?\230> KqpIndexMetadata::TestNoReadFromMainTableBeforeJoin [GOOD] >> KqpIndexMetadata::HandleWriteOnlyIndex >> TestKinesisHttpProxy::GoodRequestCreateStream [GOOD] >> KqpVectorIndexes::VectorIndexIsNotUpdatable >> KqpIndexes::SecondaryIndexUpdateOnUsingIndex [GOOD] >> KqpIndexes::SecondaryIndexSelectUsingScripting >> TPersQueueTest::TestReadPartitionStatus [GOOD] >> TPersQueueTest::TxCounters >> TestYmqHttpProxy::TestListQueueTags [GOOD] >> data_correctness.py::TestDataCorrectness::test [GOOD] >> KqpVectorIndexes::BuildIndexTimesAndUser [GOOD] >> KqpPrefixedVectorIndexes::OrderByCosineDistanceNotNullableLevel3 >> KqpUniqueIndex::UpdateOnFkAlreadyExist [GOOD] >> KqpUniqueIndex::UpdateImplicitNullInComplexFk2 [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/http_proxy/ut/inside_ydb_ut/unittest >> TestKinesisHttpProxy::GoodRequestCreateStream [GOOD] Test command err: 2025-06-25T14:56:09.063421Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900912243849197:2230];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:56:09.063899Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000c7f/r3tmp/tmpR36drH/pdisk_1.dat 2025-06-25T14:56:09.521326Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:56:09.521443Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:56:09.523177Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:56:09.569541Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 14735, node 1 2025-06-25T14:56:09.749206Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:56:09.749236Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:56:09.749244Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:56:09.749380Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:56:10.056431Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:20920 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:56:10.284550Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:56:10.305288Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 TClient is connected to server localhost:20920 2025-06-25T14:56:10.521743Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... 2025-06-25T14:56:10.528019Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-25T14:56:10.529653Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) waiting... 2025-06-25T14:56:10.547635Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:10.688046Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:56:10.740197Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:56:10.828932Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:10.882432Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:10.929131Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:10.980796Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:11.020100Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:11.065591Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:11.115080Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:12.171468Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900925128752258:2336], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:12.171479Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900925128752269:2339], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:12.171583Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:12.175542Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710673:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:56:12.185002Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519900925128752272:2340], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710673 completed, doublechecking } 2025-06-25T14:56:12.245765Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519900925128752323:2864] txid# 281474976710674, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 18], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:56:13.038790Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710675. Ctx: { TraceId: 01jyksdqw72h3p5zkhzn6apd0e, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=N2ZlOWU1ODEtYTM2NDY3NTctODIxNjBmODAtZjM1ZWJmMzg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:56:13.140205Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:13.182392Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, ... 2898: [PQ: 72075186224037909] server connected, pipe [8:7519901158457312715:2487], now have 1 active actors on pipe 2025-06-25T14:57:06.581478Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72075186224037910] server connected, pipe [8:7519901158457312716:2488], now have 1 active actors on pipe 2025-06-25T14:57:06.581542Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72075186224037911] server connected, pipe [8:7519901158457312717:2489], now have 1 active actors on pipe 2025-06-25T14:57:06.584133Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037910] server disconnected, pipe [8:7519901158457312716:2488] destroyed 2025-06-25T14:57:06.584180Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037907] server disconnected, pipe [8:7519901158457312712:2485] destroyed 2025-06-25T14:57:06.584214Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037911] server disconnected, pipe [8:7519901158457312717:2489] destroyed 2025-06-25T14:57:06.584602Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037908] server disconnected, pipe [8:7519901158457312714:2486] destroyed 2025-06-25T14:57:06.584635Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037909] server disconnected, pipe [8:7519901158457312715:2487] destroyed Http output full {"StreamDescription":{"RetentionPeriodHours":24,"WriteQuotaKbPerSec":1024,"StreamModeDetails":{"StreamMode":"ON_DEMAND"},"StreamArn":"testtopic","PartitioningSettings":{"MinActivePartitions":5,"AutoPartitioningSettings":{"Strategy":"AUTO_PARTITIONING_STRATEGY_DISABLED","PartitionWriteSpeed":{"StabilizationWindow":{"Nanos":0,"Seconds":300},"DownUtilizationPercent":30,"UpUtilizationPercent":90}},"MaxActivePartitions":5},"Shards":[{"ShardId":"shard-000000","SequenceNumberRange":{"StartingSequenceNumber":"0"},"HashKeyRange":{"EndingHashKey":"68056473384187692692674921486353642290","StartingHashKey":"0"}},{"ShardId":"shard-000001","SequenceNumberRange":{"StartingSequenceNumber":"0"},"HashKeyRange":{"EndingHashKey":"136112946768375385385349842972707284581","StartingHashKey":"68056473384187692692674921486353642291"}},{"ShardId":"shard-000002","SequenceNumberRange":{"StartingSequenceNumber":"0"},"HashKeyRange":{"EndingHashKey":"204169420152563078078024764459060926872","StartingHashKey":"136112946768375385385349842972707284582"}},{"ShardId":"shard-000003","SequenceNumberRange":{"StartingSequenceNumber":"0"},"HashKeyRange":{"EndingHashKey":"272225893536750770770699685945414569163","StartingHashKey":"204169420152563078078024764459060926873"}},{"ShardId":"shard-000004","SequenceNumberRange":{"StartingSequenceNumber":"0"},"HashKeyRange":{"EndingHashKey":"340282366920938463463374607431768211455","StartingHashKey":"272225893536750770770699685945414569164"}}],"KeyId":"","Owner":"Service1_id@as","StreamStatus":"ACTIVE","HasMoreShards":false,"EncryptionType":"ENCRYPTION_UNDEFINED","StreamCreationTimestamp":1750863427,"StorageLimitMb":0,"StreamName":"testtopic"}} 2025-06-25T14:57:06.585319Z node 8 :HTTP_PROXY INFO: http_req.cpp:1207: http request [DescribeStream] requestId [c77b614d-56eb02ca-9eb30121-a82fb47e] reply ok 2025-06-25T14:57:06.585683Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:280: (#37,[::1]:58988) <- (200 , 1672 bytes) 2025-06-25T14:57:06.585765Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:335: (#37,[::1]:58988) connection closed 200 {"StreamDescription":{"RetentionPeriodHours":24,"WriteQuotaKbPerSec":1024,"StreamModeDetails":{"StreamMode":"ON_DEMAND"},"StreamArn":"testtopic","PartitioningSettings":{"MinActivePartitions":5,"AutoPartitioningSettings":{"Strategy":"AUTO_PARTITIONING_STRATEGY_DISABLED","PartitionWriteSpeed":{"StabilizationWindow":{"Nanos":0,"Seconds":300},"DownUtilizationPercent":30,"UpUtilizationPercent":90}},"MaxActivePartitions":5},"Shards":[{"ShardId":"shard-000000","SequenceNumberRange":{"StartingSequenceNumber":"0"},"HashKeyRange":{"EndingHashKey":"68056473384187692692674921486353642290","StartingHashKey":"0"}},{"ShardId":"shard-000001","SequenceNumberRange":{"StartingSequenceNumber":"0"},"HashKeyRange":{"EndingHashKey":"136112946768375385385349842972707284581","StartingHashKey":"68056473384187692692674921486353642291"}},{"ShardId":"shard-000002","SequenceNumberRange":{"StartingSequenceNumber":"0"},"HashKeyRange":{"EndingHashKey":"204169420152563078078024764459060926872","StartingHashKey":"136112946768375385385349842972707284582"}},{"ShardId":"shard-000003","SequenceNumberRange":{"StartingSequenceNumber":"0"},"HashKeyRange":{"EndingHashKey":"272225893536750770770699685945414569163","StartingHashKey":"204169420152563078078024764459060926873"}},{"ShardId":"shard-000004","SequenceNumberRange":{"StartingSequenceNumber":"0"},"HashKeyRange":{"EndingHashKey":"340282366920938463463374607431768211455","StartingHashKey":"272225893536750770770699685945414569164"}}],"KeyId":"","Owner":"Service1_id@as","StreamStatus":"ACTIVE","HasMoreShards":false,"EncryptionType":"ENCRYPTION_UNDEFINED","StreamCreationTimestamp":1750863427,"StorageLimitMb":0,"StreamName":"testtopic"}} 2025-06-25T14:57:06.587093Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:83: (#37,[::1]:58990) incoming connection opened 2025-06-25T14:57:06.587173Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:156: (#37,[::1]:58990) -> (POST /Root, 30 bytes) 2025-06-25T14:57:06.587293Z node 8 :HTTP_PROXY INFO: http_service.cpp:102: proxy service: incoming request from [5849:6000:6050:0:4049:6000:6050:0] request [DescribeStreamSummary] url [/Root] database [/Root] requestId: 60746cdf-cd523321-4c703c94-9419dcf1 2025-06-25T14:57:06.587666Z node 8 :HTTP_PROXY INFO: http_req.cpp:959: http request [DescribeStreamSummary] requestId [60746cdf-cd523321-4c703c94-9419dcf1] got new request from [5849:6000:6050:0:4049:6000:6050:0] database '/Root' stream 'testtopic' Http output full {"StreamDescriptionSummary":{"RetentionPeriodHours":24,"OpenShardCount":5,"StreamArn":"testtopic","ConsumerCount":0,"KeyId":"","StreamStatus":"ACTIVE","EncryptionType":"NONE","StreamCreationTimestamp":1750863.426,"StreamName":"testtopic"}} 200 {"StreamDescriptionSummary":{"RetentionPeriodHours":24,"OpenShardCount":5,"StreamArn":"testtopic","ConsumerCount":0,"KeyId":"","StreamStatus":"ACTIVE","EncryptionType":"NONE","StreamCreationTimestamp":1750863.426,"StreamName":"testtopic"}} 2025-06-25T14:57:06.588160Z node 8 :HTTP_PROXY DEBUG: http_req.cpp:1500: http request [DescribeStreamSummary] requestId [60746cdf-cd523321-4c703c94-9419dcf1] [auth] Authorized successfully 2025-06-25T14:57:06.588276Z node 8 :HTTP_PROXY INFO: http_req.cpp:678: http request [DescribeStreamSummary] requestId [60746cdf-cd523321-4c703c94-9419dcf1] sending grpc request to '' database: '/Root' iam token size: 0 2025-06-25T14:57:06.589555Z node 8 :HTTP_PROXY INFO: http_req.cpp:1207: http request [DescribeStreamSummary] requestId [60746cdf-cd523321-4c703c94-9419dcf1] reply ok 2025-06-25T14:57:06.589870Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:280: (#37,[::1]:58990) <- (200 , 239 bytes) 2025-06-25T14:57:06.589945Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:335: (#37,[::1]:58990) connection closed 2025-06-25T14:57:06.591051Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:83: (#37,[::1]:58992) incoming connection opened 2025-06-25T14:57:06.591113Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:156: (#37,[::1]:58992) -> (POST /Root, 30 bytes) 2025-06-25T14:57:06.591254Z node 8 :HTTP_PROXY INFO: http_service.cpp:102: proxy service: incoming request from [3842:8a00:6050:0:2042:8a00:6050:0] request [DescribeStream] url [/Root] database [/Root] requestId: 7ecdab6e-fb5837c9-4b518af5-98b3a1d5 2025-06-25T14:57:06.591540Z node 8 :HTTP_PROXY INFO: http_req.cpp:959: http request [DescribeStream] requestId [7ecdab6e-fb5837c9-4b518af5-98b3a1d5] got new request from [3842:8a00:6050:0:2042:8a00:6050:0] database '/Root' stream 'testtopic' 2025-06-25T14:57:06.591931Z node 8 :HTTP_PROXY DEBUG: http_req.cpp:1500: http request [DescribeStream] requestId [7ecdab6e-fb5837c9-4b518af5-98b3a1d5] [auth] Authorized successfully 2025-06-25T14:57:06.592003Z node 8 :HTTP_PROXY INFO: http_req.cpp:678: http request [DescribeStream] requestId [7ecdab6e-fb5837c9-4b518af5-98b3a1d5] sending grpc request to '' database: '/Root' iam token size: 0 2025-06-25T14:57:06.592935Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72075186224037907] server connected, pipe [8:7519901158457312739:2497], now have 1 active actors on pipe 2025-06-25T14:57:06.592990Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72075186224037909] server connected, pipe [8:7519901158457312741:2499], now have 1 active actors on pipe 2025-06-25T14:57:06.593031Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72075186224037911] server connected, pipe [8:7519901158457312743:2501], now have 1 active actors on pipe 2025-06-25T14:57:06.593036Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72075186224037908] server connected, pipe [8:7519901158457312740:2498], now have 1 active actors on pipe 2025-06-25T14:57:06.593066Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72075186224037910] server connected, pipe [8:7519901158457312742:2500], now have 1 active actors on pipe Http output full {"StreamDescription":{"RetentionPeriodHours":24,"WriteQuotaKbPerSec":1024,"StreamModeDetails":{"StreamMode":"ON_DEMAND"},"StreamArn":"testtopic","PartitioningSettings":{"MinActivePartitions":5,"AutoPartitioningSettings":{"Strategy":"AUTO_PARTITIONING_STRATEGY_DISABLED","PartitionWriteSpeed":{"StabilizationWindow":{"Nanos":0,"Seconds":300},"DownUtilizationPercent":30,"UpUtilizationPercent":90}},"MaxActivePartitions":5},"Shards":[{"ShardId":"shard-000000","SequenceNumberRange":{"StartingSequenceNumber":"0"},"HashKeyRange":{"EndingHashKey":"68056473384187692692674921486353642290","StartingHashKey":"0"}},{"ShardId":"shard-000001","SequenceNumberRange":{"StartingSequenceNumber":"0"},"HashKeyRange":{"EndingHashKey":"136112946768375385385349842972707284581","StartingHashKey":"68056473384187692692674921486353642291"}},{"ShardId":"shard-000002","SequenceNumberRange":{"StartingSequenceNumber":"0"},"HashKeyRange":{"EndingHashKey":"204169420152563078078024764459060926872","StartingHashKey":"136112946768375385385349842972707284582"}},{"ShardId":"shard-000003","SequenceNumberRange":{"StartingSequenceNumber":"0"},"HashKeyRange":{"EndingHashKey":"272225893536750770770699685945414569163","StartingHashKey":"204169420152563078078024764459060926873"}},{"ShardId":"shard-000004","SequenceNumberRange":{"StartingSequenceNumber":"0"},"HashKeyRange":{"EndingHashKey":"340282366920938463463374607431768211455","StartingHashKey":"272225893536750770770699685945414569164"}}],"KeyId":"","Owner":"Service1_id@as","StreamStatus":"ACTIVE","HasMoreShards":false,"EncryptionType":"ENCRYPTION_UNDEFINED","StreamCreationTimestamp":1750863427,"StorageLimitMb":0,"StreamName":"testtopic"}} 2025-06-25T14:57:06.594497Z node 8 :HTTP_PROXY INFO: http_req.cpp:1207: http request [DescribeStream] requestId [7ecdab6e-fb5837c9-4b518af5-98b3a1d5] reply ok 2025-06-25T14:57:06.594949Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:280: (#37,[::1]:58992) <- (200 , 1672 bytes) 2025-06-25T14:57:06.595025Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:335: (#37,[::1]:58992) connection closed 2025-06-25T14:57:06.596070Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037907] server disconnected, pipe [8:7519901158457312739:2497] destroyed 2025-06-25T14:57:06.596103Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037908] server disconnected, pipe [8:7519901158457312740:2498] destroyed 2025-06-25T14:57:06.596124Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037909] server disconnected, pipe [8:7519901158457312741:2499] destroyed 2025-06-25T14:57:06.596143Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037910] server disconnected, pipe [8:7519901158457312742:2500] destroyed 2025-06-25T14:57:06.596162Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037911] server disconnected, pipe [8:7519901158457312743:2501] destroyed >> KqpMultishardIndex::SecondaryIndexSelect [GOOD] >> KqpMultishardIndex::SortByPk ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpVectorIndexes::BuildIndexTimesAndUser [GOOD] Test command err: Trying to start YDB, gRPC: 32461, MsgBus: 20521 2025-06-25T14:56:49.691947Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901083663452583:2196];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:56:49.692179Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001c3e/r3tmp/tmpWEEymL/pdisk_1.dat 2025-06-25T14:56:50.056284Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:56:50.056378Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:56:50.061197Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:56:50.076090Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519901083663452425:2080] 1750863409628562 != 1750863409628565 2025-06-25T14:56:50.090972Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 32461, node 1 2025-06-25T14:56:50.289844Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:56:50.289863Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:56:50.289868Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:56:50.289940Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:56:50.687773Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:20521 TClient is connected to server localhost:20521 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:56:51.259182Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:56:51.284212Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:51.470117Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:51.732638Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:51.825464Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:53.090268Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901100843323264:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:53.090363Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:53.582450Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:53.621471Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:53.674412Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:53.705946Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:53.731280Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:53.808964Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:53.848397Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:53.913780Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901100843323924:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:53.913825Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:53.913862Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901100843323929:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:53.933633Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:56:53.950014Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519901100843323931:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:56:54.031547Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519901105138291278:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:56:54.691446Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519901083663452583:2196];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:56:54.691498Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:56:55.131137Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:00.353127Z node 1 :KQP_EXECUTER ERROR: kqp_literal_executer.cpp:107: ActorId: ... cription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:57:02.454686Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:57:02.488412Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:57:02.495140Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:02.582898Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:02.781204Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:02.852043Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:05.028883Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519901154139031697:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:05.028979Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:05.104379Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:05.183420Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:05.233709Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:05.314731Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:05.398791Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:05.457912Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:05.542306Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:05.632642Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519901154139032367:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:05.632748Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:05.633700Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519901154139032372:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:05.637348Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:57:05.656344Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519901154139032374:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:57:05.739126Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519901154139032425:3419] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:57:06.463740Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519901136959160913:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:57:06.463805Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:57:06.895564Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:07.173802Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976715758:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:07.269945Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976715759:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:07.337470Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976715760:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:07.412610Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976715764:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:383) 2025-06-25T14:57:07.493928Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037928 not found 2025-06-25T14:57:07.494013Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037929 not found 2025-06-25T14:57:07.494027Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037927 not found ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpUniqueIndex::UpdateOnFkAlreadyExist [GOOD] Test command err: Trying to start YDB, gRPC: 23011, MsgBus: 25268 2025-06-25T14:56:49.635657Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901084500057842:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:56:49.640133Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001c5a/r3tmp/tmp3mYijo/pdisk_1.dat 2025-06-25T14:56:50.086054Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:56:50.100111Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:56:50.100197Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:56:50.141503Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 23011, node 1 2025-06-25T14:56:50.290701Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:56:50.290722Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:56:50.290728Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:56:50.290831Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:56:50.649179Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:25268 TClient is connected to server localhost:25268 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:56:51.108520Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:56:51.155587Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:56:51.170992Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:51.365997Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:51.530808Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:51.620437Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:53.005412Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901101679928620:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:53.005553Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:53.581911Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:53.625015Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:53.669920Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:53.694197Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:53.720799Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:53.765041Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:53.799452Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:53.900580Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901101679929276:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:53.900668Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:53.904418Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901101679929281:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:53.930399Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:56:53.957092Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710669, at schemeshard: 72057594046644480 2025-06-25T14:56:53.957365Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519901101679929283:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:56:54.056368Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519901105974896632:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:56:54.636150Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519901084500057842:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:56:54.636202Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:56:55.166094Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ ... ty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:56:59.612960Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:56:59.612968Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:56:59.613091Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11478 TClient is connected to server localhost:11478 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:57:00.161582Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:57:00.169391Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:57:00.174745Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:00.266866Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:00.466966Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:57:00.540430Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:00.630945Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:02.798523Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519901139855142331:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:02.798622Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:02.890286Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:02.931550Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:02.977280Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:03.017207Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:03.097184Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:03.172223Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:03.262461Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:03.368535Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519901144150110299:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:03.368623Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:03.369055Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519901144150110304:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:03.372386Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:57:03.387969Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519901144150110306:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:57:03.486028Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519901144150110357:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:57:04.320458Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519901126970238985:2214];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:57:04.329338Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:57:04.563333Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:06.622771Z node 2 :KQP_EXECUTER ERROR: kqp_literal_executer.cpp:107: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: 01jyksfc8d684ff8ssb3mr5cek, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZDBmOWJmOTMtZWEzYWE3ODgtNWI5OGRkMzYtZDYwNmZjMmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. TKqpLiteralExecuter, TKqpEnsure failed. 2025-06-25T14:57:06.646822Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=ZDBmOWJmOTMtZWEzYWE3ODgtNWI5OGRkMzYtZDYwNmZjMmQ=, ActorId: [2:7519901148445078678:2533], ActorState: ExecuteState, TraceId: 01jyksfc8d684ff8ssb3mr5cek, Create QueryResponse for error on request, msg: 2025-06-25T14:57:07.485333Z node 2 :KQP_EXECUTER ERROR: kqp_literal_executer.cpp:107: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: 01jyksfd3189qcdeggjh8z2zb6, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZDBmOWJmOTMtZWEzYWE3ODgtNWI5OGRkMzYtZDYwNmZjMmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. TKqpLiteralExecuter, TKqpEnsure failed. 2025-06-25T14:57:07.485598Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=ZDBmOWJmOTMtZWEzYWE3ODgtNWI5OGRkMzYtZDYwNmZjMmQ=, ActorId: [2:7519901148445078678:2533], ActorState: ExecuteState, TraceId: 01jyksfd3189qcdeggjh8z2zb6, Create QueryResponse for error on request, msg: >> KqpMultishardIndex::SortedRangeReadDesc [GOOD] >> KqpMultishardIndex::WriteIntoRenamingSyncIndex ------- [TM] {asan, default-linux-x86_64, release} ydb/core/http_proxy/ut/inside_ydb_ut/unittest >> TestYmqHttpProxy::TestListQueueTags [GOOD] Test command err: 2025-06-25T14:56:09.054262Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900912206075172:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:56:09.068181Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000c5f/r3tmp/tmpv9LO9S/pdisk_1.dat 2025-06-25T14:56:09.503090Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:56:09.503183Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:56:09.507042Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:56:09.551741Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 1233, node 1 2025-06-25T14:56:09.776940Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:56:09.776959Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:56:09.776965Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:56:09.777058Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:56:10.054460Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:10281 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:56:10.362576Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:56:10.382482Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 TClient is connected to server localhost:10281 2025-06-25T14:56:10.596582Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... 2025-06-25T14:56:10.603230Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-25T14:56:10.605117Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) waiting... 2025-06-25T14:56:10.631217Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:10.783092Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:56:10.844731Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710663, at schemeshard: 72057594046644480 2025-06-25T14:56:10.848904Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:56:10.900458Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710665, at schemeshard: 72057594046644480 2025-06-25T14:56:10.908933Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:10.946325Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:10.979549Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:56:11.021742Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:11.067457Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:11.123155Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:11.165331Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:12.348756Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900925090978422:2339], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:12.348807Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900925090978411:2336], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:12.348902Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:12.352750Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710673:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:56:12.362524Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519900925090978425:2340], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710673 completed, doublechecking } 2025-06-25T14:56:12.439724Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519900925090978476:2866] txid# 281474976710674, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 18], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:56:13.038943Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710675. Ctx: { TraceId: 01jyksdr1t81zxhnd7hhj9vvjs, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MzkxNjI4MzAtNWM4OGFjMDgtOGNkMGVhODYtNTJkMjNlOGQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:56:13.130722Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation typ ... 01H\"\000\t\211\006?\320\203\005@\203\001H?\322\030Invoke\000\003?\326\014Equals\003?\330\000\t\211\004?\322\207\203\001H?\322 Coalesce\000\t\211\004?\342\207\205\004\207\203\001H?\342\026\032\203\004\030Member\000\t\211\n?\354\203\005\004\200\205\004\203\004\203\004\026\032\213\004\203\001H\203\001H\203\004\036\000\003?\362 \000\001\205\000\000\000\000\001\003\000\000\000\000\000\000\000?\352\005?\370\003?\364\004\003?\366 \003\013?\376\t\351\000?\372\005\205\004\206\205\004\203\010\203\005@\002\006\203\005@\n\016\006\000?%\002\003?)\002\022USER_NAME\003\022\000\003?\374(000000000000000301v0\002\003?\001\002\000\037\003?\356\002\002\003?\322\004{}\002\003\003?\302\004{}?a\002\002\002\001\000/" } Params { Bin: "\037\000\005\205\010\203\001H\203\010\203\010\203\001H\020NAME> KqpUniqueIndex::UpdateImplicitNullInComplexFk2 [GOOD] Test command err: Trying to start YDB, gRPC: 16038, MsgBus: 18669 2025-06-25T14:56:50.264136Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901087462810893:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:56:50.273833Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001c28/r3tmp/tmp8wzabB/pdisk_1.dat 2025-06-25T14:56:50.704026Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:56:50.704096Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:56:50.713234Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:56:50.721955Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 16038, node 1 2025-06-25T14:56:50.863144Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:56:50.863178Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:56:50.863193Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:56:50.863354Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:18669 2025-06-25T14:56:51.274811Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:18669 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:56:51.651806Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:56:51.667350Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:56:51.679927Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:51.863661Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:52.032508Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:52.104223Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:53.529096Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901100347714397:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:53.529210Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:53.819935Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:53.851903Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:53.883649Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:53.963135Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:54.006936Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:54.044793Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:54.081078Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:54.184632Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901104642682356:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:54.184704Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:54.184782Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901104642682361:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:54.188553Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:56:54.204768Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519901104642682363:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:56:54.280181Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519901104642682414:3420] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:56:55.268477Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519901087462810893:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:56:55.268538Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:56:55.327666Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... Trying to start YDB, gRPC: 29181, MsgBus: 21664 2025-06-25T14:56:59.292623Z node 2 :METADATA_PROVIDER WARN: l ... tileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 29181, node 2 2025-06-25T14:56:59.639356Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:56:59.639389Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:56:59.639397Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:56:59.639511Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:21664 TClient is connected to server localhost:21664 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:57:00.110347Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:57:00.131198Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:00.228132Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:00.339208Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:57:00.385767Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:00.466478Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:02.729474Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519901140530733724:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:02.729565Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:02.783708Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:02.815329Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:02.852643Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:02.927881Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:02.974939Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:03.025169Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:03.106658Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:03.179384Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519901144825701684:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:03.179458Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:03.179775Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519901144825701689:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:03.183315Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:57:03.198159Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519901144825701691:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:57:03.295763Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519901144825701742:3421] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:57:04.294435Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519901127645830203:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:57:04.296404Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:57:04.363451Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:06.683353Z node 2 :KQP_EXECUTER ERROR: kqp_literal_executer.cpp:107: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: 01jyksfc54c847amezb3vbkjs1, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZWVhNGQyMWUtYzBmMzQ3OTYtYTMwZTYwY2UtYjJmMjMxZDU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. TKqpLiteralExecuter, TKqpEnsure failed. 2025-06-25T14:57:06.694888Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=ZWVhNGQyMWUtYzBmMzQ3OTYtYTMwZTYwY2UtYjJmMjMxZDU=, ActorId: [2:7519901149120670058:2532], ActorState: ExecuteState, TraceId: 01jyksfc54c847amezb3vbkjs1, Create QueryResponse for error on request, msg: 2025-06-25T14:57:08.047729Z node 2 :KQP_EXECUTER ERROR: kqp_literal_executer.cpp:107: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: 01jyksfdax8kdmhyczznc7gamf, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZWVhNGQyMWUtYzBmMzQ3OTYtYTMwZTYwY2UtYjJmMjMxZDU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. TKqpLiteralExecuter, TKqpEnsure failed. 2025-06-25T14:57:08.047997Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=ZWVhNGQyMWUtYzBmMzQ3OTYtYTMwZTYwY2UtYjJmMjMxZDU=, ActorId: [2:7519901149120670058:2532], ActorState: ExecuteState, TraceId: 01jyksfdax8kdmhyczznc7gamf, Create QueryResponse for error on request, msg: >> KqpIndexes::SelectConcurentTX [GOOD] >> KqpIndexes::SelectConcurentTX2 >> KqpIndexes::IndexFilterPushDown [GOOD] >> KqpIndexes::DoUpsertWithoutIndexUpdate-UniqIndex-UseSink [GOOD] >> KqpIndexes::DoUpsertWithoutIndexUpdate-UniqIndex+UseSink >> KqpVectorIndexes::OrderByCosineLevel1+Nullable+UseSimilarity >> KqpVectorIndexes::OrderByCosineLevel2-Nullable-UseSimilarity >> TInterconnectTest::TestCrossConnect [GOOD] >> TInterconnectTest::TestManyEventsWithReconnect >> KqpPrefixedVectorIndexes::OrderByCosineLevel1+Nullable-UseSimilarity >> TInterconnectTest::TestManyEventsWithReconnect [GOOD] >> TInterconnectTest::TestEventWithPayloadSerialization >> KqpIndexes::MultipleModifications [GOOD] >> KqpIndexes::CreateTableWithImplicitSyncIndexSQL ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpIndexes::IndexFilterPushDown [GOOD] Test command err: Trying to start YDB, gRPC: 7598, MsgBus: 5078 2025-06-25T14:56:50.188032Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901088566893105:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:56:50.189369Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001c21/r3tmp/tmpjdAdYJ/pdisk_1.dat 2025-06-25T14:56:50.628348Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 7598, node 1 2025-06-25T14:56:50.663124Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:56:50.663210Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:56:50.672839Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:56:50.705178Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:56:50.705202Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:56:50.705209Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:56:50.705318Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5078 TClient is connected to server localhost:5078 2025-06-25T14:56:51.208717Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:56:51.395049Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:56:51.431647Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:56:51.449447Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:51.630710Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:51.821336Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:56:51.897529Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:53.436454Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901101451796583:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:53.436572Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:53.687240Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:53.717718Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:53.749381Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:53.780642Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:53.847846Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:53.892453Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:53.923602Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:54.018774Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901105746764541:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:54.018841Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:54.018843Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901105746764546:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:54.022259Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:56:54.031302Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519901105746764548:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:56:54.106236Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519901105746764599:3419] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:56:55.188149Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519901088566893105:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:56:55.188284Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:56:55.193775Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877761, Sender [1:7519901110041732169:3594], Recipient [1:7519901088566893412:2160]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:56:55.193818Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5052: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T14:56:55.193829Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5837: Pipe server connected, at tablet: 72057594046644480 ... 668976Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519901138096990611:2080] 1750863422524942 != 1750863422524945 2025-06-25T14:57:02.675861Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:57:02.677407Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 17353, node 3 2025-06-25T14:57:02.762039Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:57:02.762058Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:57:02.762064Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:57:02.762163Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28106 TClient is connected to server localhost:28106 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:57:03.355720Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:57:03.364597Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:57:03.379625Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:03.463577Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:03.562385Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:57:03.602976Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:03.695685Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:06.018048Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519901155276861435:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:06.018154Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:06.085639Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:06.132495Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:06.173476Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:06.250798Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:06.293123Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:06.372925Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:06.495513Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:06.594395Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519901155276862104:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:06.594495Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:06.594655Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519901155276862109:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:06.597381Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:57:06.610837Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519901155276862111:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:57:06.672274Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519901155276862162:3421] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:57:07.532046Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519901138096990643:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:57:07.532117Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:57:07.909569Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:07.966976Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:08.008487Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) >> KqpIndexes::ExplainCollectFullDiagnostics >> KqpIndexes::MultipleSecondaryIndexWithSameComulns+UseSink [GOOD] >> KqpIndexes::MultipleSecondaryIndexWithSameComulns-UseSink >> KqpQueryService::CloseConnection [GOOD] >> KqpIndexes::UpdateIndexSubsetPk [GOOD] >> TInterconnectTest::TestEventWithPayloadSerialization [GOOD] >> KqpNamedExpressions::NamedExpressionRandomSelect-UseSink [GOOD] >> KqpIndexes::JoinWithNonPKColumnsInPredicate+UseStreamJoin [GOOD] >> KqpIndexes::JoinWithNonPKColumnsInPredicate-UseStreamJoin ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpIndexes::MultipleModifications [GOOD] Test command err: Trying to start YDB, gRPC: 2848, MsgBus: 18138 2025-06-25T14:56:49.983737Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901085611991266:2229];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:56:49.984614Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001c30/r3tmp/tmpXNlbkG/pdisk_1.dat 2025-06-25T14:56:50.459031Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:56:50.479171Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:56:50.479259Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 2848, node 1 2025-06-25T14:56:50.486686Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:56:50.652297Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:56:50.653358Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:56:50.653377Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:56:50.653491Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:18138 2025-06-25T14:56:50.975428Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:18138 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:56:51.174706Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:56:51.205000Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:51.398776Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:56:51.563719Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:51.653120Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:53.282124Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901102791861881:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:53.282234Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:53.581945Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:53.624167Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:53.662840Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:53.738666Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:53.793588Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:53.872757Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:53.919947Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:54.023731Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901107086829847:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:54.023845Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:54.024115Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901107086829852:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:54.027669Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:56:54.038267Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519901107086829854:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:56:54.111769Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519901107086829905:3420] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:56:54.980415Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519901085611991266:2229];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:56:55.030693Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:56:55.155531Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877761, Sender [1:7519901111381797477:3597], Recipient [1:7519901089906958742:2181]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:56:55.155578Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5052: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T14:56:55.155592Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5837: Pipe server connected, at tablet: 72057594046644480 2025-06-25T14:56:55.155631Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271122432, Sender [1:7519901111381797473:3594], Recipient [1:75199010 ... _effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-25T14:57:10.170983Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [3:7519901152760583988:2142], Recipient [3:7519901152760583988:2142]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:57:10.171006Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:57:10.171064Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 281474976710672:0, at schemeshard: 72057594046644480 2025-06-25T14:57:10.171074Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:57:10.171149Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:7519901152760583988:2142]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:57:10.171164Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:57:10.171201Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 281474976710672:2, at schemeshard: 72057594046644480 2025-06-25T14:57:10.171222Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:57:10.171259Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:276: Activate send for 281474976710672:2 2025-06-25T14:57:10.171318Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:632: Send to actor: [3:7519901174235422815:2478] msg type: 269552132 msg: NKikimrTxDataShard.TEvSchemaChangedResult TxId: 281474976710672 at schemeshard: 72057594046644480 2025-06-25T14:57:10.171394Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 281474976710672:0, at schemeshard: 72057594046644480 2025-06-25T14:57:10.171402Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:57:10.171408Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:276: Activate send for 281474976710672:0 2025-06-25T14:57:10.171435Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:632: Send to actor: [3:7519901174235422812:2477] msg type: 269552132 msg: NKikimrTxDataShard.TEvSchemaChangedResult TxId: 281474976710672 at schemeshard: 72057594046644480 2025-06-25T14:57:10.171502Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [3:7519901152760583988:2142], Recipient [3:7519901152760583988:2142]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:57:10.171513Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:57:10.171551Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 2146435072, Sender [3:7519901152760583988:2142], Recipient [3:7519901152760583988:2142]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-25T14:57:10.171565Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4972: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-25T14:57:10.171621Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 281474976710672:2, at schemeshard: 72057594046644480 2025-06-25T14:57:10.171645Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046644480] TDone opId# 281474976710672:2 ProgressState 2025-06-25T14:57:10.171720Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-25T14:57:10.171732Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976710672:2 progress is 2/3 2025-06-25T14:57:10.171744Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976710672 ready parts: 2/3 2025-06-25T14:57:10.171767Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976710672:2 progress is 2/3 2025-06-25T14:57:10.171780Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976710672 ready parts: 2/3 2025-06-25T14:57:10.171795Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 281474976710672, ready parts: 2/3, is published: true 2025-06-25T14:57:10.171962Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 2146435072, Sender [3:7519901152760583988:2142], Recipient [3:7519901152760583988:2142]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-25T14:57:10.171977Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4972: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-25T14:57:10.172003Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 281474976710672:0, at schemeshard: 72057594046644480 2025-06-25T14:57:10.172020Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046644480] TDone opId# 281474976710672:0 ProgressState 2025-06-25T14:57:10.172070Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-25T14:57:10.172080Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976710672:0 progress is 3/3 2025-06-25T14:57:10.172088Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976710672 ready parts: 3/3 2025-06-25T14:57:10.172103Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976710672:0 progress is 3/3 2025-06-25T14:57:10.172112Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976710672 ready parts: 3/3 2025-06-25T14:57:10.172121Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 281474976710672, ready parts: 3/3, is published: true 2025-06-25T14:57:10.172164Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1656: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [3:7519901174235422783:2475] message: TxId: 281474976710672 2025-06-25T14:57:10.172180Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976710672 ready parts: 3/3 2025-06-25T14:57:10.172203Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976710672:0 2025-06-25T14:57:10.172212Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 281474976710672:0 2025-06-25T14:57:10.173064Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 17] was 4 2025-06-25T14:57:10.173089Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976710672:1 2025-06-25T14:57:10.173107Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 281474976710672:1 2025-06-25T14:57:10.173161Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 18] was 3 2025-06-25T14:57:10.173183Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976710672:2 2025-06-25T14:57:10.173190Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 281474976710672:2 2025-06-25T14:57:10.173235Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 19] was 3 2025-06-25T14:57:10.173562Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:57:10.173637Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877764, Sender [3:7519901174235422883:3664], Recipient [3:7519901152760583988:2142]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-25T14:57:10.173653Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5053: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-25T14:57:10.173664Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5885: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-25T14:57:10.173691Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877764, Sender [3:7519901174235422886:3667], Recipient [3:7519901152760583988:2142]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-25T14:57:10.173701Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5053: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-25T14:57:10.173707Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5885: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-25T14:57:10.173824Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:57:10.173871Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:632: Send to actor: [3:7519901174235422783:2475] msg type: 271124998 msg: NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976710672 at schemeshard: 72057594046644480 2025-06-25T14:57:10.175568Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877764, Sender [3:7519901174235422798:3605], Recipient [3:7519901152760583988:2142]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-25T14:57:10.175586Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5053: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-25T14:57:10.175601Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5885: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-25T14:57:11.172413Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:7519901152760583988:2142]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:57:11.172459Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:57:11.172510Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [3:7519901152760583988:2142], Recipient [3:7519901152760583988:2142]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:57:11.172527Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime ------- [TM] {asan, default-linux-x86_64, release} ydb/core/actorlib_impl/ut/unittest >> TInterconnectTest::TestEventWithPayloadSerialization [GOOD] Test command err: Starting iteration 0 Starting iteration 1 Starting iteration 2 Starting iteration 3 Starting iteration 4 Starting iteration 5 Starting iteration 6 Starting iteration 7 Starting iteration 8 Starting iteration 9 Starting iteration 10 Starting iteration 11 Starting iteration 12 Starting iteration 13 Starting iteration 14 Starting iteration 15 Starting iteration 16 Starting iteration 17 Starting iteration 18 Starting iteration 19 Starting iteration 20 Starting iteration 21 Starting iteration 22 Starting iteration 23 Starting iteration 24 Starting iteration 25 Starting iteration 26 Starting iteration 27 Starting iteration 28 Starting iteration 29 Starting iteration 30 Starting iteration 31 Starting iteration 32 Starting iteration 33 Starting iteration 34 Starting iteration 35 Starting iteration 36 Starting iteration 37 Starting iteration 38 Starting iteration 39 Starting iteration 40 Starting iteration 41 Starting iteration 42 Starting iteration 43 Starting iteration 44 Starting iteration 45 Starting iteration 46 Starting iteration 47 Starting iteration 48 Starting iteration 49 0 0 0 1 0 3 0 7 0 15 0 31 0 63 0 127 0 255 0 511 0 1023 0 2047 0 4095 0 8191 0 16383 0 32767 0 65535 1 0 1 1 1 3 1 7 1 15 1 31 1 63 1 127 1 255 1 511 1 1023 1 2047 1 4095 1 8191 1 16383 1 32767 1 65535 3 0 3 1 3 3 3 7 3 15 3 31 3 63 3 127 3 255 3 511 3 1023 3 2047 3 4095 3 8191 3 16383 3 32767 3 65535 7 0 7 1 7 3 7 7 7 15 7 31 7 63 7 127 7 255 7 511 7 1023 7 2047 7 4095 7 8191 7 16383 7 32767 7 65535 15 0 15 1 15 3 15 7 15 15 15 31 15 63 15 127 15 255 15 511 15 1023 15 2047 15 4095 15 8191 15 16383 15 32767 15 65535 31 0 31 1 31 3 31 7 31 15 31 31 31 63 31 127 31 255 31 511 31 1023 31 2047 31 4095 31 8191 31 16383 31 32767 31 65535 63 0 63 1 63 3 63 7 63 15 63 31 63 63 63 127 63 255 63 511 63 1023 63 2047 63 4095 63 8191 63 16383 63 32767 63 65535 127 0 127 1 127 3 127 7 127 15 127 31 127 63 127 127 127 255 127 511 127 1023 127 2047 127 4095 127 8191 127 16383 127 32767 127 65535 255 0 255 1 255 3 255 7 255 15 255 31 255 63 255 127 255 255 255 511 255 1023 255 2047 255 4095 255 8191 255 16383 255 32767 255 65535 511 0 511 1 511 3 511 7 511 15 511 31 511 63 511 127 511 255 511 511 511 1023 511 2047 511 4095 511 8191 511 16383 511 32767 511 65535 1023 0 1023 1 1023 3 1023 7 1023 15 1023 31 1023 63 1023 127 1023 255 1023 511 1023 1023 1023 2047 1023 4095 1023 8191 1023 16383 1023 32767 1023 65535 2047 0 2047 1 2047 3 2047 7 2047 15 2047 31 2047 63 2047 127 2047 255 2047 511 2047 1023 2047 2047 2047 4095 2047 8191 2047 16383 2047 32767 2047 65535 4095 0 4095 1 4095 3 4095 7 4095 15 4095 31 4095 63 4095 127 4095 255 4095 511 4095 1023 4095 2047 4095 4095 4095 8191 4095 16383 4095 32767 4095 65535 8191 0 8191 1 8191 3 8191 7 8191 15 8191 31 8191 63 8191 127 8191 255 8191 511 8191 1023 8191 2047 8191 4095 8191 8191 8191 16383 8191 32767 8191 65535 16383 0 16383 1 16383 3 16383 7 16383 15 16383 31 16383 63 16383 127 16383 255 16383 511 16383 1023 16383 2047 16383 4095 16383 8191 16383 16383 16383 32767 16383 65535 32767 0 32767 1 32767 3 32767 7 32767 15 32767 31 32767 63 32767 127 32767 255 32767 511 32767 1023 32767 2047 32767 4095 32767 8191 32767 16383 32767 32767 32767 65535 65535 0 65535 1 65535 3 65535 7 65535 15 65535 31 65535 63 65535 127 65535 255 65535 511 65535 1023 65535 2047 65535 4095 65535 8191 65535 16383 65535 32767 65535 65535 >> KqpIndexes::SecondaryIndexSelectUsingScripting [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpIndexes::UpdateIndexSubsetPk [GOOD] Test command err: Trying to start YDB, gRPC: 18344, MsgBus: 27334 2025-06-25T14:56:50.578403Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901087954678307:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:56:50.578462Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001c11/r3tmp/tmptDHv6i/pdisk_1.dat 2025-06-25T14:56:51.115057Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:56:51.115131Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:56:51.121564Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:56:51.165432Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:56:51.165563Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519901087954678281:2080] 1750863410577322 != 1750863410577325 TServer::EnableGrpc on GrpcPort 18344, node 1 2025-06-25T14:56:51.404840Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:56:51.404862Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:56:51.404869Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:56:51.404958Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:56:51.602755Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:27334 TClient is connected to server localhost:27334 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:56:52.307016Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:56:52.321473Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:56:52.339132Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:52.545443Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:52.700151Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:52.757801Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:54.300801Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901105134549091:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:54.300899Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:54.579959Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:54.607997Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:54.633910Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:54.664474Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:54.714324Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:54.750937Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:54.792905Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:54.853671Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901105134549747:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:54.853747Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:54.853816Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901105134549752:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:54.857529Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:56:54.872121Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519901105134549754:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:56:54.943894Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519901105134549805:3420] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:56:55.582812Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519901087954678307:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:56:55.582878Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:56:56.060881Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877761, Sender [1:7519901113724484687:3600], Recipient [1:7519901087954678654:2179]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:56:56.060925Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:50 ... de 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:57:09.713387Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046644480, cookie: 281474976715672 2025-06-25T14:57:09.713415Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:57:09.713573Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 281474976715672:2, at schemeshard: 72057594046644480 2025-06-25T14:57:09.713584Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:57:09.713646Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 281474976715672:0, at schemeshard: 72057594046644480 2025-06-25T14:57:09.713655Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:57:09.713665Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:276: Activate send for 281474976715672:0 2025-06-25T14:57:09.713709Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:632: Send to actor: [3:7519901169971654009:2483] msg type: 269552132 msg: NKikimrTxDataShard.TEvSchemaChangedResult TxId: 281474976715672 at schemeshard: 72057594046644480 2025-06-25T14:57:09.713774Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 281474976715672:2, at schemeshard: 72057594046644480 2025-06-25T14:57:09.713779Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:57:09.713785Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:276: Activate send for 281474976715672:2 2025-06-25T14:57:09.713809Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:632: Send to actor: [3:7519901169971654027:2484] msg type: 269552132 msg: NKikimrTxDataShard.TEvSchemaChangedResult TxId: 281474976715672 at schemeshard: 72057594046644480 2025-06-25T14:57:09.713871Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 2146435072, Sender [3:7519901148496815196:2149], Recipient [3:7519901148496815196:2149]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-25T14:57:09.713885Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4972: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-25T14:57:09.713916Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 281474976715672:0, at schemeshard: 72057594046644480 2025-06-25T14:57:09.713933Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046644480] TDone opId# 281474976715672:0 ProgressState 2025-06-25T14:57:09.714038Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-25T14:57:09.714089Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976715672:0 progress is 2/3 2025-06-25T14:57:09.714150Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976715672 ready parts: 2/3 2025-06-25T14:57:09.714195Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976715672:0 progress is 2/3 2025-06-25T14:57:09.714207Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976715672 ready parts: 2/3 2025-06-25T14:57:09.714220Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 281474976715672, ready parts: 2/3, is published: true 2025-06-25T14:57:09.714382Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 2146435072, Sender [3:7519901148496815196:2149], Recipient [3:7519901148496815196:2149]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-25T14:57:09.714394Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4972: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-25T14:57:09.714418Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 281474976715672:2, at schemeshard: 72057594046644480 2025-06-25T14:57:09.714431Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046644480] TDone opId# 281474976715672:2 ProgressState 2025-06-25T14:57:09.714479Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-25T14:57:09.714487Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976715672:2 progress is 3/3 2025-06-25T14:57:09.714495Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976715672 ready parts: 3/3 2025-06-25T14:57:09.714508Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976715672:2 progress is 3/3 2025-06-25T14:57:09.714515Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976715672 ready parts: 3/3 2025-06-25T14:57:09.714525Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 281474976715672, ready parts: 3/3, is published: true 2025-06-25T14:57:09.714557Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1656: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [3:7519901169971653988:2481] message: TxId: 281474976715672 2025-06-25T14:57:09.714576Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976715672 ready parts: 3/3 2025-06-25T14:57:09.714595Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976715672:0 2025-06-25T14:57:09.714605Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 281474976715672:0 2025-06-25T14:57:09.714698Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 17] was 4 2025-06-25T14:57:09.714711Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976715672:1 2025-06-25T14:57:09.714717Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 281474976715672:1 2025-06-25T14:57:09.714731Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 18] was 3 2025-06-25T14:57:09.714737Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976715672:2 2025-06-25T14:57:09.714742Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 281474976715672:2 2025-06-25T14:57:09.714770Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 19] was 3 2025-06-25T14:57:09.715177Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:57:09.715217Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:57:09.715261Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:632: Send to actor: [3:7519901169971653988:2481] msg type: 271124998 msg: NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976715672 at schemeshard: 72057594046644480 2025-06-25T14:57:09.716384Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877764, Sender [3:7519901169971653998:3600], Recipient [3:7519901148496815196:2149]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-25T14:57:09.716401Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5053: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-25T14:57:09.716411Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5885: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-25T14:57:09.717913Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877764, Sender [3:7519901169971654088:3664], Recipient [3:7519901148496815196:2149]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-25T14:57:09.717928Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5053: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-25T14:57:09.717938Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5885: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-25T14:57:09.717967Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877764, Sender [3:7519901169971654083:3660], Recipient [3:7519901148496815196:2149]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-25T14:57:09.717977Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5053: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-25T14:57:09.717984Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5885: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-25T14:57:10.384759Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:7519901148496815196:2149]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:57:10.384789Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:57:10.384833Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [3:7519901148496815196:2149], Recipient [3:7519901148496815196:2149]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:57:10.384848Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:57:11.391387Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:7519901148496815196:2149]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:57:11.391428Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:57:11.391465Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [3:7519901148496815196:2149], Recipient [3:7519901148496815196:2149]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:57:11.391476Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime >> KqpUniqueIndex::InsertFkDuplicate [GOOD] >> KqpVectorIndexes::VectorIndexIsNotUpdatable [GOOD] >> KqpVectorIndexes::SimpleVectorIndexOrderByCosineDistanceWithCover-Nullable >> KqpVectorIndexes::OrderByCosineLevel1+Nullable-UseSimilarity ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/service/unittest >> KqpQueryService::CloseConnection [GOOD] Test command err: Trying to start YDB, gRPC: 17035, MsgBus: 31892 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00178f/r3tmp/tmp8LlQWB/pdisk_1.dat 2025-06-25T14:55:59.096192Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900870676366358:2236];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:59.097258Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:55:59.510849Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:55:59.510973Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:55:59.519315Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 17035, node 1 2025-06-25T14:55:59.552422Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:55:59.555152Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519900870676366140:2080] 1750863359031751 != 1750863359031754 2025-06-25T14:55:59.632158Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:55:59.632181Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:55:59.632190Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:55:59.632333Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:31892 2025-06-25T14:56:00.078535Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:31892 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:56:00.284247Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:56:00.323709Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:00.510713Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:00.678862Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:00.752180Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:02.485890Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900883561269667:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:02.485987Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:02.841868Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:02.877880Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:02.974273Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:03.013668Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:03.050263Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:03.095341Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:03.134493Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:03.196577Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900887856237624:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:03.196678Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:03.198555Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900887856237629:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:03.202870Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:56:03.219020Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519900887856237631:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:56:03.297365Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519900887856237682:3423] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:56:04.078227Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519900870676366358:2236];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:56:04.078306Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 30530, MsgBus: 22458 2025-06-25T14:56:06.092381Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519900899270541027:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:56:06.092466Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPa ... dle undelivered TEvState event, abort execution 2025-06-25T14:57:09.646311Z node 4 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1155: SelfId: [4:7519901167048787406:2597], TxId: 281474976715686, task: 5. Ctx: { SessionId : ydb://session/3?node_id=4&id=NTUyMzA0NmYtYzIzODRlN2UtY2E2ZjYwZmEtYmUwZGJlZjA=. TraceId : 01jyksfewt71011n831ybb8nt7. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Handle undelivered TEvState event, abort execution 2025-06-25T14:57:09.646501Z node 4 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1155: SelfId: [4:7519901167048787407:2598], TxId: 281474976715687, task: 1. Ctx: { SessionId : ydb://session/3?node_id=4&id=NGE1MDE0NjEtZTgyOGQ5MWItZTE3MWIzMjAtOGYxZjkwOTc=. TraceId : 01jyksfexx33b2g9fw320z4yaz. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Handle undelivered TEvState event, abort execution 2025-06-25T14:57:09.646703Z node 4 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1155: SelfId: [4:7519901167048787418:2607], TxId: 281474976715688, task: 5. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=4&id=ODI0NzQ4M2EtYjNkYWEyNWYtNDEwMzhkNDEtYTg4NDkyY2M=. TraceId : 01jyksfez42nrd9czskjsbhybq. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Handle undelivered TEvState event, abort execution 2025-06-25T14:57:09.647150Z node 4 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1155: SelfId: [4:7519901167048787408:2599], TxId: 281474976715687, task: 2. Ctx: { TraceId : 01jyksfexx33b2g9fw320z4yaz. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=4&id=NGE1MDE0NjEtZTgyOGQ5MWItZTE3MWIzMjAtOGYxZjkwOTc=. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Handle undelivered TEvState event, abort execution 2025-06-25T14:57:09.647340Z node 4 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1155: SelfId: [4:7519901167048787402:2594], TxId: 281474976715686, task: 2. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=4&id=NTUyMzA0NmYtYzIzODRlN2UtY2E2ZjYwZmEtYmUwZGJlZjA=. TraceId : 01jyksfewt71011n831ybb8nt7. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Handle undelivered TEvState event, abort execution 2025-06-25T14:57:09.647511Z node 4 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1155: SelfId: [4:7519901167048787403:2595], TxId: 281474976715686, task: 3. Ctx: { TraceId : 01jyksfewt71011n831ybb8nt7. SessionId : ydb://session/3?node_id=4&id=NTUyMzA0NmYtYzIzODRlN2UtY2E2ZjYwZmEtYmUwZGJlZjA=. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Handle undelivered TEvState event, abort execution 2025-06-25T14:57:09.647669Z node 4 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1155: SelfId: [4:7519901167048787410:2601], TxId: 281474976715687, task: 4. Ctx: { SessionId : ydb://session/3?node_id=4&id=NGE1MDE0NjEtZTgyOGQ5MWItZTE3MWIzMjAtOGYxZjkwOTc=. TraceId : 01jyksfexx33b2g9fw320z4yaz. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Handle undelivered TEvState event, abort execution 2025-06-25T14:57:09.647846Z node 4 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1155: SelfId: [4:7519901167048787412:2602], TxId: 281474976715687, task: 5. Ctx: { TraceId : 01jyksfexx33b2g9fw320z4yaz. SessionId : ydb://session/3?node_id=4&id=NGE1MDE0NjEtZTgyOGQ5MWItZTE3MWIzMjAtOGYxZjkwOTc=. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Handle undelivered TEvState event, abort execution 2025-06-25T14:57:09.647998Z node 4 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1155: SelfId: [4:7519901167048787413:2603], TxId: 281474976715688, task: 1. Ctx: { TraceId : 01jyksfez42nrd9czskjsbhybq. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=4&id=ODI0NzQ4M2EtYjNkYWEyNWYtNDEwMzhkNDEtYTg4NDkyY2M=. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Handle undelivered TEvState event, abort execution 2025-06-25T14:57:09.648164Z node 4 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1155: SelfId: [4:7519901167048787405:2596], TxId: 281474976715686, task: 4. Ctx: { SessionId : ydb://session/3?node_id=4&id=NTUyMzA0NmYtYzIzODRlN2UtY2E2ZjYwZmEtYmUwZGJlZjA=. TraceId : 01jyksfewt71011n831ybb8nt7. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Handle undelivered TEvState event, abort execution 2025-06-25T14:57:09.648613Z node 4 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1155: SelfId: [4:7519901167048787414:2604], TxId: 281474976715688, task: 2. Ctx: { TraceId : 01jyksfez42nrd9czskjsbhybq. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=4&id=ODI0NzQ4M2EtYjNkYWEyNWYtNDEwMzhkNDEtYTg4NDkyY2M=. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Handle undelivered TEvState event, abort execution 2025-06-25T14:57:09.648804Z node 4 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1155: SelfId: [4:7519901167048787416:2605], TxId: 281474976715688, task: 3. Ctx: { CustomerSuppliedId : . TraceId : 01jyksfez42nrd9czskjsbhybq. SessionId : ydb://session/3?node_id=4&id=ODI0NzQ4M2EtYjNkYWEyNWYtNDEwMzhkNDEtYTg4NDkyY2M=. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Handle undelivered TEvState event, abort execution 2025-06-25T14:57:09.648970Z node 4 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1155: SelfId: [4:7519901167048787409:2600], TxId: 281474976715687, task: 3. Ctx: { TraceId : 01jyksfexx33b2g9fw320z4yaz. SessionId : ydb://session/3?node_id=4&id=NGE1MDE0NjEtZTgyOGQ5MWItZTE3MWIzMjAtOGYxZjkwOTc=. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Handle undelivered TEvState event, abort execution 2025-06-25T14:57:09.649248Z node 4 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1155: SelfId: [4:7519901167048787417:2606], TxId: 281474976715688, task: 4. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=4&id=ODI0NzQ4M2EtYjNkYWEyNWYtNDEwMzhkNDEtYTg4NDkyY2M=. TraceId : 01jyksfez42nrd9czskjsbhybq. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Handle undelivered TEvState event, abort execution 2025-06-25T14:57:09.671907Z node 4 :RPC_REQUEST WARN: rpc_execute_query.cpp:472: Client lost 2025-06-25T14:57:09.672521Z node 4 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [4:7519901171343755451:2791] TxId: 281474976715723. Ctx: { TraceId: 01jyksffzaa1dbbcfq59ndwbae, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=NjE0YmU0MTktNjJhODkyOGQtMTlmZjE0NGQtNmY5ZjZjMDg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Client lost } 2025-06-25T14:57:09.730122Z node 4 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [4:7519901171343755460:2795], TxId: 281474976715723, task: 3. Ctx: { TraceId : 01jyksffzaa1dbbcfq59ndwbae. SessionId : ydb://session/3?node_id=4&id=NjE0YmU0MTktNjJhODkyOGQtMTlmZjE0NGQtNmY5ZjZjMDg=. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Handle abort execution event from: [4:7519901171343755451:2791], status: ABORTED, reason: {
: Error: Terminate execution } 2025-06-25T14:57:09.730551Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=NjE0YmU0MTktNjJhODkyOGQtMTlmZjE0NGQtNmY5ZjZjMDg=, ActorId: [4:7519901171343755448:2791], ActorState: ExecuteState, TraceId: 01jyksffzaa1dbbcfq59ndwbae, Create QueryResponse for error on request, msg: 2025-06-25T14:57:09.929429Z node 4 :RPC_REQUEST WARN: rpc_execute_query.cpp:472: Client lost 2025-06-25T14:57:09.929942Z node 4 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [4:7519901171343755659:2847] TxId: 281474976715731. Ctx: { TraceId: 01jyksfg7nd1fnrs3vs8d3tpes, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=OGFhMTY3ODItMzQwMzI2ZmUtMTQ3NGZlNTEtYzQ5NzQ2MTM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Client lost } 2025-06-25T14:57:09.930077Z node 4 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [4:7519901171343755669:2853], TxId: 281474976715731, task: 5. Ctx: { CustomerSuppliedId : . TraceId : 01jyksfg7nd1fnrs3vs8d3tpes. SessionId : ydb://session/3?node_id=4&id=OGFhMTY3ODItMzQwMzI2ZmUtMTQ3NGZlNTEtYzQ5NzQ2MTM=. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Handle abort execution event from: [4:7519901171343755659:2847], status: ABORTED, reason: {
: Error: Terminate execution } 2025-06-25T14:57:09.950366Z node 4 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [4:7519901171343755668:2852], TxId: 281474976715731, task: 4. Ctx: { SessionId : ydb://session/3?node_id=4&id=OGFhMTY3ODItMzQwMzI2ZmUtMTQ3NGZlNTEtYzQ5NzQ2MTM=. TraceId : 01jyksfg7nd1fnrs3vs8d3tpes. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Handle abort execution event from: [4:7519901171343755659:2847], status: ABORTED, reason: {
: Error: Terminate execution } 2025-06-25T14:57:09.951363Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=OGFhMTY3ODItMzQwMzI2ZmUtMTQ3NGZlNTEtYzQ5NzQ2MTM=, ActorId: [4:7519901171343755656:2847], ActorState: ExecuteState, TraceId: 01jyksfg7nd1fnrs3vs8d3tpes, Create QueryResponse for error on request, msg: 2025-06-25T14:57:10.296450Z node 4 :RPC_REQUEST WARN: rpc_execute_query.cpp:472: Client lost 2025-06-25T14:57:10.297053Z node 4 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [4:7519901175638723228:2921] TxId: 281474976715741. Ctx: { TraceId: 01jyksfgjj17rgbf2zncfc0hmv, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=Mzc0ZjMyMDUtOTMxMDQyODktZDc3OGMxYTUtZmNiZDEzN2I=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Client lost } 2025-06-25T14:57:10.297382Z node 4 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [4:7519901175638723236:2924], TxId: 281474976715741, task: 2. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=4&id=Mzc0ZjMyMDUtOTMxMDQyODktZDc3OGMxYTUtZmNiZDEzN2I=. TraceId : 01jyksfgjj17rgbf2zncfc0hmv. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Handle abort execution event from: [4:7519901175638723228:2921], status: ABORTED, reason: {
: Error: Terminate execution } 2025-06-25T14:57:10.306634Z node 4 :KQP_EXECUTER ERROR: kqp_data_executer.cpp:2914: ActorId: [4:7519901175638723228:2921] TxId: 281474976715741. Ctx: { TraceId: 01jyksfgjj17rgbf2zncfc0hmv, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=Mzc0ZjMyMDUtOTMxMDQyODktZDc3OGMxYTUtZmNiZDEzN2I=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Unexpected event while waiting for shutdown: NYql::NDq::TEvDqCompute::TEvChannelData 2025-06-25T14:57:10.306860Z node 4 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [4:7519901175638723240:2927], TxId: 281474976715741, task: 5. Ctx: { TraceId : 01jyksfgjj17rgbf2zncfc0hmv. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=4&id=Mzc0ZjMyMDUtOTMxMDQyODktZDc3OGMxYTUtZmNiZDEzN2I=. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Handle abort execution event from: [4:7519901175638723228:2921], status: ABORTED, reason: {
: Error: Terminate execution } 2025-06-25T14:57:10.307693Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=Mzc0ZjMyMDUtOTMxMDQyODktZDc3OGMxYTUtZmNiZDEzN2I=, ActorId: [4:7519901175638723225:2921], ActorState: ExecuteState, TraceId: 01jyksfgjj17rgbf2zncfc0hmv, Create QueryResponse for error on request, msg: >> KqpIndexes::UpsertWithNullKeysComplex [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/opt/unittest >> KqpNamedExpressions::NamedExpressionRandomSelect-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 27410, MsgBus: 19553 2025-06-25T14:54:15.744470Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900421442272299:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:54:15.745138Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0009b9/r3tmp/tmpCAyKMY/pdisk_1.dat 2025-06-25T14:54:16.091730Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:54:16.092258Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519900421442272269:2080] 1750863255741909 != 1750863255741912 TServer::EnableGrpc on GrpcPort 27410, node 1 2025-06-25T14:54:16.175121Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:54:16.175145Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:54:16.175151Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:54:16.175280Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:54:16.186467Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:54:16.186610Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:54:16.188652Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:19553 TClient is connected to server localhost:19553 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-25T14:54:16.762593Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:54:16.892384Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:54:16.911254Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:54:16.919200Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:54:17.111175Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:54:17.291509Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:54:17.409898Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:54:19.175182Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900438622143088:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:19.175297Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:19.452177Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:19.480629Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:19.510192Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:19.583414Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:19.640804Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:19.675671Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:19.708861Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:19.773156Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900438622143741:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:19.773218Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:19.773656Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900438622143746:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:19.777748Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:54:19.789585Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519900438622143748:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:54:19.895817Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519900438622143801:3418] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:54:20.744813Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519900421442272299:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:54:20.745291Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; UPSERT INTO [[["775c69a5-d522-4e2e-b156-e9fa84f7df07"]];[["8b60b980-e3f7-4bed-9890-f11cb99521ce"]]] [[["775c69a5-d522-4e2e-b156-e9fa84f7df07"]];[["8b60b980-e3f7-4bed-9890-f11cb99521ce"]]] [["81f072c1-54db-42e3-ad3f-eb6ae6e81809"];["8c0448a9-e416-4bac-9e90-845845bb3c4b"]] Trying to start YDB, gRPC: 1988, MsgBus: 18934 2025-06-25T14:5 ... xists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[20:7519901135595614369:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:57:01.065431Z node 20 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0009b9/r3tmp/tmpnDirXQ/pdisk_1.dat 2025-06-25T14:57:01.307235Z node 20 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(20, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:57:01.307350Z node 20 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(20, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:57:01.327654Z node 20 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:57:01.329323Z node 20 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(20, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:57:01.329735Z node 20 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [20:7519901135595614348:2080] 1750863421064474 != 1750863421064477 TServer::EnableGrpc on GrpcPort 4457, node 20 2025-06-25T14:57:01.433196Z node 20 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:57:01.433219Z node 20 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:57:01.433230Z node 20 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:57:01.433390Z node 20 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:7987 2025-06-25T14:57:02.119927Z node 20 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:7987 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:57:02.294986Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:57:02.305694Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:57:02.319631Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:02.407314Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:02.636992Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:02.752698Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:06.066923Z node 20 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[20:7519901135595614369:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:57:06.067044Z node 20 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:57:06.645681Z node 20 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [20:7519901157070452481:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:06.645792Z node 20 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:06.746184Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:06.827845Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:06.884510Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:06.940169Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:06.995193Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:07.117664Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:07.215566Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:07.324619Z node 20 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [20:7519901161365420441:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:07.324758Z node 20 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:07.325407Z node 20 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [20:7519901161365420446:2437], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:07.331049Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:57:07.414081Z node 20 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [20:7519901161365420448:2438], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:57:07.508436Z node 20 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [20:7519901161365420501:3436] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:57:09.650237Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) [[2u]] >> KqpIndexes::IndexOr [GOOD] >> KqpIndexes::IndexTopSortPushDown ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpIndexes::SecondaryIndexSelectUsingScripting [GOOD] Test command err: Trying to start YDB, gRPC: 12536, MsgBus: 9223 2025-06-25T14:56:49.633562Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901083933902731:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:56:49.633674Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001c37/r3tmp/tmp8wp0XD/pdisk_1.dat 2025-06-25T14:56:50.119701Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:56:50.161325Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:56:50.161423Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:56:50.165370Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 12536, node 1 2025-06-25T14:56:50.284897Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:56:50.284924Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:56:50.284930Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:56:50.285060Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:56:50.652042Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:9223 TClient is connected to server localhost:9223 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:56:51.243690Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:56:51.299185Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:51.473626Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:51.707151Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:51.778591Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:53.266265Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901101113773527:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:53.266393Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:53.582188Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:53.616160Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:53.647252Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:53.715322Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:53.770109Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:53.817933Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:53.877778Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:53.956640Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901101113774196:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:53.956707Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:53.956757Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901101113774201:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:53.960354Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:56:53.970156Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519901101113774203:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:56:54.036147Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519901105408741550:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:56:54.633640Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519901083933902731:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:56:54.633702Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:56:55.160978Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877761, Sender [1:7519901109703709121:3598], Recipient [1:7519901083933903033:2144]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:56:55.161020Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5052: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T14:56:55.161031Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5837: Pipe server connected, at tablet: 72057594046644480 2025-06-25T14:56:55.161066Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271122432, Sender [1:7519901109703709117:3595], Recipient [1:751990108 ... Origin: 72075186224037922 State: 2 TxId: 281474976710672 Step: 0 Generation: 1 2025-06-25T14:57:12.197406Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1047: NTableState::TProposedWaitParts operationId# 281474976710672:2 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046644480 2025-06-25T14:57:12.197450Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1051: NTableState::TProposedWaitParts operationId# 281474976710672:2 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046644480 message: Source { RawX1: 7519901183570800144 RawX2: 4503612512274866 } Origin: 72075186224037922 State: 2 TxId: 281474976710672 Step: 0 Generation: 1 2025-06-25T14:57:12.197472Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:670: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 281474976710672:2, shardIdx: 72057594046644480:36, shard: 72075186224037922, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046644480 2025-06-25T14:57:12.197480Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:707: all shard schema changes has been received, operationId: 281474976710672:2, at schemeshard: 72057594046644480 2025-06-25T14:57:12.197490Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:719: send schema changes ack message, operation: 281474976710672:2, datashard: 72075186224037922, at schemeshard: 72057594046644480 2025-06-25T14:57:12.197503Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 281474976710672:2 129 -> 240 2025-06-25T14:57:12.197566Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-25T14:57:12.197846Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 281474976710672:0, at schemeshard: 72057594046644480 2025-06-25T14:57:12.197855Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:57:12.197920Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 281474976710672:2, at schemeshard: 72057594046644480 2025-06-25T14:57:12.197926Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:57:12.198108Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 281474976710672:0, at schemeshard: 72057594046644480 2025-06-25T14:57:12.198118Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:57:12.198128Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:276: Activate send for 281474976710672:0 2025-06-25T14:57:12.198173Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:632: Send to actor: [3:7519901183570800143:2481] msg type: 269552132 msg: NKikimrTxDataShard.TEvSchemaChangedResult TxId: 281474976710672 at schemeshard: 72057594046644480 2025-06-25T14:57:12.200057Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 281474976710672:2, at schemeshard: 72057594046644480 2025-06-25T14:57:12.200071Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:57:12.200080Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:276: Activate send for 281474976710672:2 2025-06-25T14:57:12.200118Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:632: Send to actor: [3:7519901183570800144:2482] msg type: 269552132 msg: NKikimrTxDataShard.TEvSchemaChangedResult TxId: 281474976710672 at schemeshard: 72057594046644480 2025-06-25T14:57:12.200177Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 2146435072, Sender [3:7519901157800994027:2146], Recipient [3:7519901157800994027:2146]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-25T14:57:12.200193Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4972: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-25T14:57:12.200225Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 281474976710672:0, at schemeshard: 72057594046644480 2025-06-25T14:57:12.200241Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046644480] TDone opId# 281474976710672:0 ProgressState 2025-06-25T14:57:12.200320Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-25T14:57:12.200333Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976710672:0 progress is 2/3 2025-06-25T14:57:12.200343Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976710672 ready parts: 2/3 2025-06-25T14:57:12.200355Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976710672:0 progress is 2/3 2025-06-25T14:57:12.200365Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976710672 ready parts: 2/3 2025-06-25T14:57:12.200377Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 281474976710672, ready parts: 2/3, is published: true 2025-06-25T14:57:12.200527Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877764, Sender [3:7519901183570800220:3660], Recipient [3:7519901157800994027:2146]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-25T14:57:12.200541Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5053: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-25T14:57:12.200551Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5885: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-25T14:57:12.200581Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 2146435072, Sender [3:7519901157800994027:2146], Recipient [3:7519901157800994027:2146]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-25T14:57:12.200593Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4972: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-25T14:57:12.200618Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 281474976710672:2, at schemeshard: 72057594046644480 2025-06-25T14:57:12.200631Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046644480] TDone opId# 281474976710672:2 ProgressState 2025-06-25T14:57:12.200677Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-25T14:57:12.200685Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976710672:2 progress is 3/3 2025-06-25T14:57:12.200692Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976710672 ready parts: 3/3 2025-06-25T14:57:12.200707Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976710672:2 progress is 3/3 2025-06-25T14:57:12.200715Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976710672 ready parts: 3/3 2025-06-25T14:57:12.200725Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 281474976710672, ready parts: 3/3, is published: true 2025-06-25T14:57:12.200757Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1656: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [3:7519901183570800118:2479] message: TxId: 281474976710672 2025-06-25T14:57:12.200776Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976710672 ready parts: 3/3 2025-06-25T14:57:12.200797Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976710672:0 2025-06-25T14:57:12.200814Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 281474976710672:0 2025-06-25T14:57:12.200922Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 17] was 4 2025-06-25T14:57:12.200935Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976710672:1 2025-06-25T14:57:12.200941Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 281474976710672:1 2025-06-25T14:57:12.200955Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 18] was 3 2025-06-25T14:57:12.200962Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976710672:2 2025-06-25T14:57:12.200968Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 281474976710672:2 2025-06-25T14:57:12.200993Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 19] was 3 2025-06-25T14:57:12.201419Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:57:12.201460Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:57:12.201503Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:632: Send to actor: [3:7519901183570800118:2479] msg type: 271124998 msg: NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976710672 at schemeshard: 72057594046644480 2025-06-25T14:57:12.205778Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877764, Sender [3:7519901183570800129:3596], Recipient [3:7519901157800994027:2146]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-25T14:57:12.205807Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5053: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-25T14:57:12.205826Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5885: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-25T14:57:12.206327Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877764, Sender [3:7519901183570800221:3661], Recipient [3:7519901157800994027:2146]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-25T14:57:12.206342Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5053: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-25T14:57:12.206350Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5885: Server pipe is reset, at schemeshard: 72057594046644480 >> KqpUniqueIndex::InsertNullInFk [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpUniqueIndex::InsertFkDuplicate [GOOD] Test command err: Trying to start YDB, gRPC: 18521, MsgBus: 21974 2025-06-25T14:56:49.662437Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901082084830288:2135];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:56:49.662708Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001c3f/r3tmp/tmpc0zox0/pdisk_1.dat 2025-06-25T14:56:50.072749Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519901082084830191:2080] 1750863409652324 != 1750863409652327 2025-06-25T14:56:50.092571Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:56:50.093874Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:56:50.093940Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:56:50.135744Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 18521, node 1 2025-06-25T14:56:50.284935Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:56:50.284962Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:56:50.284975Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:56:50.285102Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:56:50.665556Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:21974 TClient is connected to server localhost:21974 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:56:51.274939Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:56:51.300844Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:56:51.317967Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:51.454411Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:51.654849Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:51.755894Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:53.065762Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901099264701006:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:53.065847Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:53.582133Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:53.625638Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:53.656526Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:53.687443Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:53.721269Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:53.801087Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:53.841816Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:53.911038Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901099264701666:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:53.911132Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:53.913822Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901099264701671:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:53.924419Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:56:53.971838Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519901099264701673:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:56:54.053128Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519901103559669022:3419] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:56:54.657942Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519901082084830288:2135];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:56:54.658020Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:56:55.162198Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/ ... 68897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 5326, node 3 2025-06-25T14:57:05.458070Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:57:05.458093Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:57:05.458101Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:57:05.458205Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25619 TClient is connected to server localhost:25619 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:57:06.009238Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:57:06.033545Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:06.121940Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:06.147136Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:57:06.343390Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:06.440197Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:08.779598Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519901164320613684:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:08.779709Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:08.857622Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:08.901714Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:08.939993Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:08.990781Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:09.031242Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:09.105771Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:09.186331Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:09.266021Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519901168615581642:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:09.266141Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:09.266387Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519901168615581647:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:09.271093Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:57:09.286756Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519901168615581649:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:57:09.340832Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519901168615581702:3417] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:57:10.148428Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519901151435710209:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:57:10.148517Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:57:10.667273Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:12.300012Z node 3 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:678: SelfId: [3:7519901181500484843:2578], TxId: 281474976715677, task: 1. Ctx: { TraceId : 01jyksfj58d120fa9ytp6p40dh. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=3&id=ZTc0MDQ0YzctZGQ3MmZmYjItYjdiMjFiOTctNzE0N2NmMjU=. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. InternalError: PRECONDITION_FAILED KIKIMR_CONSTRAINT_VIOLATION: {
: Error: Duplicated keys found., code: 2012 }. 2025-06-25T14:57:12.300221Z node 3 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [3:7519901181500484845:2579], TxId: 281474976715677, task: 2. Ctx: { SessionId : ydb://session/3?node_id=3&id=ZTc0MDQ0YzctZGQ3MmZmYjItYjdiMjFiOTctNzE0N2NmMjU=. CustomerSuppliedId : . TraceId : 01jyksfj58d120fa9ytp6p40dh. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Handle abort execution event from: [3:7519901181500484840:2533], status: PRECONDITION_FAILED, reason: {
: Error: Terminate execution } 2025-06-25T14:57:12.300537Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=3&id=ZTc0MDQ0YzctZGQ3MmZmYjItYjdiMjFiOTctNzE0N2NmMjU=, ActorId: [3:7519901172910550024:2533], ActorState: ExecuteState, TraceId: 01jyksfj58d120fa9ytp6p40dh, Create QueryResponse for error on request, msg: >> KqpIndexes::WriteWithParamsFieldOrder >> KqpIndexes::SecondaryIndexUsingInJoin-UseStreamJoin [GOOD] >> KqpMultishardIndex::DataColumnWriteNull >> KqpUniqueIndex::UpdateFkAlreadyExist [GOOD] >> KqpUniqueIndex::UpdateFkPkOverlap >> KqpPrefixedVectorIndexes::OrderByCosineLevel1-Nullable-UseSimilarity ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpIndexes::UpsertWithNullKeysComplex [GOOD] Test command err: Trying to start YDB, gRPC: 29050, MsgBus: 25285 2025-06-25T14:56:49.640640Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901081654795267:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:56:49.640738Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001c4d/r3tmp/tmpzeaHkG/pdisk_1.dat 2025-06-25T14:56:50.050679Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:56:50.054312Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519901081654795241:2080] 1750863409630981 != 1750863409630984 2025-06-25T14:56:50.085291Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:56:50.085374Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:56:50.086804Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 29050, node 1 2025-06-25T14:56:50.285053Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:56:50.285082Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:56:50.285090Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:56:50.285176Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25285 2025-06-25T14:56:50.675238Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:25285 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:56:51.162639Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:56:51.211818Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:56:51.219708Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:51.394752Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:51.621108Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:51.702130Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:52.993143Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901094539698757:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:52.993249Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:53.582483Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:53.619421Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:53.663372Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:53.691742Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:53.721028Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:53.798055Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:53.866760Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:53.992985Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901098834666721:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:53.993082Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:53.993303Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901098834666726:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:53.996188Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:56:54.008236Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519901098834666728:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:56:54.106172Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519901103129634075:3416] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:56:54.640745Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519901081654795267:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:56:54.647577Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:56:55.164589Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877761, Sender [1:7519901107424601647:3593], Recipient [1:7519901081654795571:2147]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:56:55.164626Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:50 ... r txid 281474976715760:0 2025-06-25T14:57:12.205450Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 17] was 5 2025-06-25T14:57:12.205802Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:57:12.205855Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:632: Send to actor: [3:7519901152692920040:2179] msg type: 271124998 msg: NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976715760 at schemeshard: 72057594046644480 2025-06-25T14:57:12.205977Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124998, Sender [3:7519901152692920040:2179], Recipient [3:7519901152692920040:2179]: NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976715760 2025-06-25T14:57:12.206011Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5113: StateWork, processing event TEvSchemeShard::TEvNotifyTxCompletionResult 2025-06-25T14:57:12.206031Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6830: Handle: TEvNotifyTxCompletionResult: txId# 281474976715760 2025-06-25T14:57:12.206050Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6832: Message: TxId: 281474976715760 2025-06-25T14:57:12.206092Z node 3 :BUILD_INDEX INFO: schemeshard_build_index__progress.cpp:1930: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvNotifyTxCompletionResult, id# 281474976710678, txId# 281474976715760 2025-06-25T14:57:12.206149Z node 3 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1933: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvNotifyTxCompletionResult, TIndexBuildInfo: TBuildInfo{ IndexBuildId: 281474976710678, Uid: , DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1], TablePathId: [OwnerId: 72057594046644480, LocalPathId: 17], IndexType: EIndexTypeGlobal, IndexName: IndexName2, IndexColumn: IndexColumn2, State: Unlocking, IsBroken: 0, IsCancellationRequested: 0, Issue: , SubscribersCount: 0, CreateSender: [3:7519901182757693661:2521], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 281474976715757, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976715758, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 1750863432199, ApplyTxId: 281474976715759, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976715760, UnlockTxStatus: StatusAccepted, UnlockTxDone: 0, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 2, upload bytes: 85, read rows: 2, read bytes: 85 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }}, txId# 281474976715760 2025-06-25T14:57:12.206195Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-25T14:57:12.206433Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:57:12.206491Z node 3 :BUILD_INDEX NOTICE: schemeshard_build_index__progress.cpp:1129: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 281474976710678 Unlocking 2025-06-25T14:57:12.206535Z node 3 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1130: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 281474976710678 Unlocking TBuildInfo{ IndexBuildId: 281474976710678, Uid: , DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1], TablePathId: [OwnerId: 72057594046644480, LocalPathId: 17], IndexType: EIndexTypeGlobal, IndexName: IndexName2, IndexColumn: IndexColumn2, State: Unlocking, IsBroken: 0, IsCancellationRequested: 0, Issue: , SubscribersCount: 0, CreateSender: [3:7519901182757693661:2521], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 281474976715757, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976715758, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 1750863432199, ApplyTxId: 281474976715759, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976715760, UnlockTxStatus: StatusAccepted, UnlockTxDone: 1, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 2, upload bytes: 85, read rows: 2, read bytes: 85 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }} 2025-06-25T14:57:12.206547Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-25T14:57:12.206560Z node 3 :BUILD_INDEX INFO: schemeshard_build_index_tx_base.cpp:24: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: Change state from Unlocking to Done 2025-06-25T14:57:12.206764Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:57:12.206808Z node 3 :BUILD_INDEX NOTICE: schemeshard_build_index__progress.cpp:1129: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 281474976710678 Done 2025-06-25T14:57:12.206851Z node 3 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1130: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 281474976710678 Done TBuildInfo{ IndexBuildId: 281474976710678, Uid: , DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1], TablePathId: [OwnerId: 72057594046644480, LocalPathId: 17], IndexType: EIndexTypeGlobal, IndexName: IndexName2, IndexColumn: IndexColumn2, State: Done, IsBroken: 0, IsCancellationRequested: 0, Issue: , SubscribersCount: 0, CreateSender: [3:7519901182757693661:2521], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 281474976715757, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976715758, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 1750863432199, ApplyTxId: 281474976715759, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976715760, UnlockTxStatus: StatusAccepted, UnlockTxDone: 1, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 2, upload bytes: 85, read rows: 2, read bytes: 85 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }} 2025-06-25T14:57:12.206865Z node 3 :BUILD_INDEX TRACE: schemeshard_build_index_tx_base.cpp:333: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TIndexBuildInfo SendNotifications: : id# 281474976710678, subscribers count# 0 2025-06-25T14:57:12.206873Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-25T14:57:12.206894Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:57:12.234065Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877761, Sender [3:7519901182757693841:3886], Recipient [3:7519901152692920040:2179]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:57:12.234105Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5052: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T14:57:12.234119Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5837: Pipe server connected, at tablet: 72057594046644480 2025-06-25T14:57:12.234404Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 274792450, Sender [3:7519901182757693838:2531], Recipient [3:7519901152692920040:2179]: NKikimrIndexBuilder.TEvGetRequest DatabaseName: "/Root" IndexBuildId: 281474976710678 2025-06-25T14:57:12.234432Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5090: StateWork, processing event TEvIndexBuilder::TEvGetRequest 2025-06-25T14:57:12.234535Z node 3 :BUILD_INDEX DEBUG: schemeshard_build_index__get.cpp:19: TIndexBuilder::TXTYPE_GET_INDEX_BUILD: DoExecute DatabaseName: "/Root" IndexBuildId: 281474976710678 2025-06-25T14:57:12.234726Z node 3 :BUILD_INDEX DEBUG: schemeshard_build_index_tx_base.h:104: TIndexBuilder::TXTYPE_GET_INDEX_BUILD: Reply Status: SUCCESS IndexBuild { Id: 281474976710678 State: STATE_DONE Settings { source_path: "/Root/TestTable" index { name: "IndexName2" index_columns: "IndexColumn2" global_index { } } max_shards_in_flight: 32 ScanSettings { } } Progress: 100 StartTime { seconds: 1750863432 } EndTime { seconds: 1750863432 } } 2025-06-25T14:57:12.234745Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-25T14:57:12.234808Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:57:12.234922Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:632: Send to actor: [3:7519901182757693838:2531] msg type: 274792451 msg: NKikimrIndexBuilder.TEvGetResponse Status: SUCCESS IndexBuild { Id: 281474976710678 State: STATE_DONE Settings { source_path: "/Root/TestTable" index { name: "IndexName2" index_columns: "IndexColumn2" global_index { } } max_shards_in_flight: 32 ScanSettings { } } Progress: 100 StartTime { seconds: 1750863432 } EndTime { seconds: 1750863432 } } at schemeshard: 72057594046644480 2025-06-25T14:57:12.235579Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877764, Sender [3:7519901182757693841:3886], Recipient [3:7519901152692920040:2179]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-25T14:57:12.235599Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5053: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-25T14:57:12.235610Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5885: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-25T14:57:12.660514Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:7519901152692920040:2179]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:57:12.660557Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:57:12.660605Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [3:7519901152692920040:2179], Recipient [3:7519901152692920040:2179]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:57:12.660620Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:57:13.519809Z node 3 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-25T14:57:13.550438Z node 3 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-25T14:57:13.663191Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:7519901152692920040:2179]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:57:13.663234Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:57:13.663274Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [3:7519901152692920040:2179], Recipient [3:7519901152692920040:2179]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:57:13.663290Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime >> KqpUniqueIndex::UpsertExplicitNullInComplexFk ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpUniqueIndex::InsertNullInFk [GOOD] Test command err: Trying to start YDB, gRPC: 29318, MsgBus: 18946 2025-06-25T14:56:49.633810Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901085212127143:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:56:49.633955Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001c6b/r3tmp/tmpldyGiH/pdisk_1.dat 2025-06-25T14:56:50.091320Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:56:50.096624Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519901085212127124:2080] 1750863409632965 != 1750863409632968 2025-06-25T14:56:50.137268Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:56:50.137359Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:56:50.144275Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 29318, node 1 2025-06-25T14:56:50.285154Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:56:50.285202Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:56:50.285211Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:56:50.285310Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:56:50.672422Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:18946 TClient is connected to server localhost:18946 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:56:51.187658Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:56:51.211569Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:56:51.228669Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:51.438799Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:51.621137Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:51.718844Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:53.062306Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901102391997946:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:53.062452Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:53.584911Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:53.654216Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:53.694087Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:53.721565Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:53.742811Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:53.774244Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:53.808647Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:53.897452Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901102391998603:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:53.897576Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:53.898150Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901102391998608:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:53.924372Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:56:53.936918Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519901102391998610:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:56:54.033324Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519901106686965957:3419] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:56:54.634360Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519901085212127143:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:56:54.634449Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:56:55.138998Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/ ... 245Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=MjQ4MDUyODgtZjg0ZmFmZmQtYTA1MDFiZmUtMzdmY2RmODU=, ActorId: [2:7519901141690912299:2531], ActorState: ExecuteState, TraceId: 01jyksfb5pcjp2krze8rtd9nct, Create QueryResponse for error on request, msg: Trying to start YDB, gRPC: 7184, MsgBus: 11702 2025-06-25T14:57:06.056161Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519901155989021097:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:57:06.064857Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001c6b/r3tmp/tmpKYwt41/pdisk_1.dat 2025-06-25T14:57:06.218531Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:57:06.232909Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:57:06.232985Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:57:06.234573Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 7184, node 3 2025-06-25T14:57:06.296620Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:57:06.296643Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:57:06.296652Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:57:06.296771Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11702 TClient is connected to server localhost:11702 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:57:06.935592Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:57:06.947394Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:07.030005Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:07.132856Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:57:07.214804Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:07.296294Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:09.822934Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519901168873924551:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:09.823038Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:09.888605Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:09.924217Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:09.967351Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:10.012370Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:10.101849Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:10.156354Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:10.213635Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:10.300578Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519901173168892505:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:10.300669Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:10.304851Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519901173168892510:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:10.309400Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:57:10.336818Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519901173168892512:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:57:10.415424Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519901173168892563:3411] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:57:11.060412Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519901155989021097:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:57:11.060486Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:57:11.699822Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... >> KqpMultishardIndex::YqWorksFineAfterAlterIndexTableDirectly ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpIndexes::SecondaryIndexUsingInJoin-UseStreamJoin [GOOD] Test command err: Trying to start YDB, gRPC: 4251, MsgBus: 17009 2025-06-25T14:56:49.631310Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901083768972086:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:56:49.631378Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001c5f/r3tmp/tmpGrgxyu/pdisk_1.dat 2025-06-25T14:56:50.137201Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:56:50.138880Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519901083768972060:2080] 1750863409623781 != 1750863409623784 2025-06-25T14:56:50.144642Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:56:50.144734Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:56:50.146177Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 4251, node 1 2025-06-25T14:56:50.284045Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:56:50.284075Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:56:50.284087Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:56:50.284201Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:56:50.659880Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:17009 TClient is connected to server localhost:17009 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:56:51.206348Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:56:51.228829Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:56:51.240002Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:51.406657Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:51.591473Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:51.743561Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:53.184627Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901100948842883:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:53.184725Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:53.582803Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:53.657312Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:53.706630Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:53.772456Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:53.822387Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:53.904131Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:53.942381Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:54.005622Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901105243810840:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:54.005711Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:54.006023Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901105243810845:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:54.009345Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:56:54.019096Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519901105243810847:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:56:54.085071Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519901105243810898:3418] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:56:54.632506Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519901083768972086:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:56:54.643118Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:56:55.155538Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877761, Sender [1:7519901109538778472:3596], Recipient [1:7519901083768972385:2144]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:56:55.155578Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5052 ... ed 2025-06-25T14:57:11.104725Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5596: Handle TEvSchemaChanged, tabletId: 72057594046644480, at schemeshard: 72057594046644480, message: Source { RawX1: 7519901178909224936 RawX2: 4503612512274870 } Origin: 72075186224037924 State: 2 TxId: 281474976715673 Step: 0 Generation: 1 2025-06-25T14:57:11.104740Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1791: TOperation FindRelatedPartByTabletId, TxId: 281474976715673, tablet: 72075186224037924, partId: 0 2025-06-25T14:57:11.104821Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:632: TTxOperationReply execute, operationId: 281474976715673:0, at schemeshard: 72057594046644480, message: Source { RawX1: 7519901178909224936 RawX2: 4503612512274870 } Origin: 72075186224037924 State: 2 TxId: 281474976715673 Step: 0 Generation: 1 2025-06-25T14:57:11.104864Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1047: NTableState::TProposedWaitParts operationId# 281474976715673:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046644480 2025-06-25T14:57:11.104917Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1051: NTableState::TProposedWaitParts operationId# 281474976715673:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046644480 message: Source { RawX1: 7519901178909224936 RawX2: 4503612512274870 } Origin: 72075186224037924 State: 2 TxId: 281474976715673 Step: 0 Generation: 1 2025-06-25T14:57:11.104949Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:670: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 281474976715673:0, shardIdx: 72057594046644480:37, shard: 72075186224037924, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046644480 2025-06-25T14:57:11.104969Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:707: all shard schema changes has been received, operationId: 281474976715673:0, at schemeshard: 72057594046644480 2025-06-25T14:57:11.104984Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:719: send schema changes ack message, operation: 281474976715673:0, datashard: 72075186224037924, at schemeshard: 72057594046644480 2025-06-25T14:57:11.104999Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 281474976715673:0 129 -> 240 2025-06-25T14:57:11.105090Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-25T14:57:11.105400Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:57:11.105549Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 281474976715673:0, at schemeshard: 72057594046644480 2025-06-25T14:57:11.105557Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:57:11.105568Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:276: Activate send for 281474976715673:0 2025-06-25T14:57:11.105608Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:632: Send to actor: [3:7519901178909224936:2486] msg type: 269552132 msg: NKikimrTxDataShard.TEvSchemaChangedResult TxId: 281474976715673 at schemeshard: 72057594046644480 2025-06-25T14:57:11.105680Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 2146435072, Sender [3:7519901153139418728:2147], Recipient [3:7519901153139418728:2147]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-25T14:57:11.105693Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4972: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-25T14:57:11.105764Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 281474976715673:0, at schemeshard: 72057594046644480 2025-06-25T14:57:11.105785Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046644480] TDone opId# 281474976715673:0 ProgressState 2025-06-25T14:57:11.105844Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-25T14:57:11.105856Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976715673:0 progress is 3/3 2025-06-25T14:57:11.105866Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976715673 ready parts: 3/3 2025-06-25T14:57:11.105880Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976715673:0 progress is 3/3 2025-06-25T14:57:11.105888Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976715673 ready parts: 3/3 2025-06-25T14:57:11.105901Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 281474976715673, ready parts: 3/3, is published: true 2025-06-25T14:57:11.105937Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1656: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [3:7519901178909224903:2483] message: TxId: 281474976715673 2025-06-25T14:57:11.105955Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976715673 ready parts: 3/3 2025-06-25T14:57:11.105974Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976715673:0 2025-06-25T14:57:11.106003Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 281474976715673:0 2025-06-25T14:57:11.106125Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 20] was 4 2025-06-25T14:57:11.106142Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976715673:1 2025-06-25T14:57:11.106147Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 281474976715673:1 2025-06-25T14:57:11.106160Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 21] was 3 2025-06-25T14:57:11.106166Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976715673:2 2025-06-25T14:57:11.106170Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 281474976715673:2 2025-06-25T14:57:11.106195Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 22] was 3 2025-06-25T14:57:11.106522Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877764, Sender [3:7519901178909225004:3740], Recipient [3:7519901153139418728:2147]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-25T14:57:11.106555Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5053: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-25T14:57:11.106584Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5885: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-25T14:57:11.106718Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:57:11.106779Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:632: Send to actor: [3:7519901178909224903:2483] msg type: 271124998 msg: NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976715673 at schemeshard: 72057594046644480 2025-06-25T14:57:11.107275Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877764, Sender [3:7519901178909224913:3677], Recipient [3:7519901153139418728:2147]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-25T14:57:11.107288Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5053: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-25T14:57:11.107294Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5885: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-25T14:57:11.108646Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877764, Sender [3:7519901178909225007:3743], Recipient [3:7519901153139418728:2147]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-25T14:57:11.108675Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5053: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-25T14:57:11.108691Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5885: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-25T14:57:11.590481Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:7519901153139418728:2147]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:57:11.590521Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:57:11.590554Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [3:7519901153139418728:2147], Recipient [3:7519901153139418728:2147]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:57:11.590565Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:57:12.596544Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:7519901153139418728:2147]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:57:12.596585Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:57:12.596624Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [3:7519901153139418728:2147], Recipient [3:7519901153139418728:2147]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:57:12.596639Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:57:13.596699Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:7519901153139418728:2147]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:57:13.596739Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:57:13.596783Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [3:7519901153139418728:2147], Recipient [3:7519901153139418728:2147]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:57:13.596799Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime >> KqpIndexes::UpsertMultipleUniqIndexes >> KqpIndexes::NullInIndexTableNoDataRead >> KqpIndexMetadata::HandleWriteOnlyIndex [GOOD] >> KqpMultishardIndex::DataColumnUpsertMixedSemantic >> KqpUniqueIndex::UpdateOnHidenChanges+DataColumn >> KqpIndexes::DoUpsertWithoutIndexUpdate-UniqIndex+UseSink [GOOD] >> KqpIndexes::DuplicateUpsertInterleave >> KqpJoinOrder::Chain65Nodes [FAIL] >> KqpMultishardIndex::SortByPk [GOOD] >> KqpIndexes::SelectConcurentTX2 [GOOD] >> KqpIndexes::SecondaryIndexWithPrimaryKeySameComulns-UseSink >> KqpIndexes::DoUpsertWithoutIndexUpdate+UniqIndex-UseSink >> KqpIndexes::ExplainCollectFullDiagnostics [GOOD] >> KqpIndexes::DuplicateUpsertInterleaveParams+UseSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpIndexMetadata::HandleWriteOnlyIndex [GOOD] Test command err: Trying to start YDB, gRPC: 25353, MsgBus: 13650 2025-06-25T14:56:50.172507Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901087800924177:2200];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:56:50.172737Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001c2f/r3tmp/tmpUE6EJn/pdisk_1.dat 2025-06-25T14:56:50.687613Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519901087800924015:2080] 1750863410121851 != 1750863410121854 2025-06-25T14:56:50.695769Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:56:50.709722Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:56:50.709833Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:56:50.717538Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 25353, node 1 2025-06-25T14:56:50.873041Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:56:50.873075Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:56:50.873083Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:56:50.873218Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:56:51.120045Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:13650 TClient is connected to server localhost:13650 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:56:51.678041Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:56:51.715149Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:51.844735Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:56:52.036900Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:52.099657Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:53.713916Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901100685827531:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:53.713994Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:54.046327Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:54.077873Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:54.107969Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:54.137405Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:54.214823Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:54.251159Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:54.286712Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:54.357939Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901104980795487:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:54.358041Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:54.359749Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901104980795492:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:54.363485Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:56:54.376041Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519901104980795494:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:56:54.441245Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519901104980795545:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:56:55.165282Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519901087800924177:2200];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:56:55.165409Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:56:55.395760Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877761, Sender [1:7519901109275763112:3597], Recipient [1:7519901087800924332:2143]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:56:55.395803Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5052: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T14:56:55.395814Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5837: Pipe server connected, at tablet: ... 94046644480 2025-06-25T14:57:12.494327Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 2146435072, Sender [3:7519901155460393887:2146], Recipient [3:7519901155460393887:2146]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-25T14:57:12.494342Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4972: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-25T14:57:12.494503Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 281474976715672:0, at schemeshard: 72057594046644480 2025-06-25T14:57:12.494524Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046644480] TDone opId# 281474976715672:0 ProgressState 2025-06-25T14:57:12.494587Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-25T14:57:12.494598Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976715672:0 progress is 2/3 2025-06-25T14:57:12.494610Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976715672 ready parts: 2/3 2025-06-25T14:57:12.494624Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976715672:0 progress is 2/3 2025-06-25T14:57:12.494633Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976715672 ready parts: 2/3 2025-06-25T14:57:12.494647Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 281474976715672, ready parts: 2/3, is published: true 2025-06-25T14:57:12.494796Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 281474976715672:2, at schemeshard: 72057594046644480 2025-06-25T14:57:12.494803Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:57:12.494810Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:276: Activate send for 281474976715672:2 2025-06-25T14:57:12.494845Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:632: Send to actor: [3:7519901181230199985:2477] msg type: 269552132 msg: NKikimrTxDataShard.TEvSchemaChangedResult TxId: 281474976715672 at schemeshard: 72057594046644480 2025-06-25T14:57:12.494911Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 2146435072, Sender [3:7519901155460393887:2146], Recipient [3:7519901155460393887:2146]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-25T14:57:12.494923Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4972: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-25T14:57:12.494947Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 281474976715672:2, at schemeshard: 72057594046644480 2025-06-25T14:57:12.494959Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046644480] TDone opId# 281474976715672:2 ProgressState 2025-06-25T14:57:12.495016Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-25T14:57:12.495024Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976715672:2 progress is 3/3 2025-06-25T14:57:12.495031Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976715672 ready parts: 3/3 2025-06-25T14:57:12.495044Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976715672:2 progress is 3/3 2025-06-25T14:57:12.495051Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976715672 ready parts: 3/3 2025-06-25T14:57:12.495059Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 281474976715672, ready parts: 3/3, is published: true 2025-06-25T14:57:12.495092Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1656: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [3:7519901181230199951:2473] message: TxId: 281474976715672 2025-06-25T14:57:12.495109Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976715672 ready parts: 3/3 2025-06-25T14:57:12.495131Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976715672:0 2025-06-25T14:57:12.495141Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 281474976715672:0 2025-06-25T14:57:12.495236Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 17] was 4 2025-06-25T14:57:12.495250Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976715672:1 2025-06-25T14:57:12.495255Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 281474976715672:1 2025-06-25T14:57:12.495267Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 18] was 3 2025-06-25T14:57:12.495273Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976715672:2 2025-06-25T14:57:12.495278Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 281474976715672:2 2025-06-25T14:57:12.495302Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 19] was 3 2025-06-25T14:57:12.495739Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:57:12.495779Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:57:12.495822Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:632: Send to actor: [3:7519901181230199951:2473] msg type: 271124998 msg: NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976715672 at schemeshard: 72057594046644480 2025-06-25T14:57:12.495974Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877764, Sender [3:7519901181230199961:3596], Recipient [3:7519901155460393887:2146]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-25T14:57:12.495989Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5053: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-25T14:57:12.495998Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5885: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-25T14:57:12.496833Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877764, Sender [3:7519901181230200054:3663], Recipient [3:7519901155460393887:2146]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-25T14:57:12.496849Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5053: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-25T14:57:12.496858Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5885: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-25T14:57:12.497549Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877764, Sender [3:7519901181230200056:3665], Recipient [3:7519901155460393887:2146]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-25T14:57:12.497564Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5053: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-25T14:57:12.497572Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5885: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-25T14:57:12.996976Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:7519901155460393887:2146]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:57:12.997012Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:57:12.997054Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [3:7519901155460393887:2146], Recipient [3:7519901155460393887:2146]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:57:12.997070Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:57:13.998422Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:7519901155460393887:2146]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:57:13.998464Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:57:13.998514Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [3:7519901155460393887:2146], Recipient [3:7519901155460393887:2146]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:57:13.998535Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:57:15.002778Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:7519901155460393887:2146]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:57:15.002826Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:57:15.002875Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [3:7519901155460393887:2146], Recipient [3:7519901155460393887:2146]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:57:15.002891Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:57:16.004513Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:7519901155460393887:2146]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:57:16.004556Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:57:16.004604Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [3:7519901155460393887:2146], Recipient [3:7519901155460393887:2146]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:57:16.004621Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime >> KqpIndexes::CreateTableWithImplicitSyncIndexSQL [GOOD] >> KqpIndexes::CreateTableWithExplicitSyncIndexSQL >> KqpIndexes::UniqIndexComplexPkComplexFkOverlap [GOOD] >> KqpIndexes::UniqAndNoUniqSecondaryIndex ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpMultishardIndex::SortByPk [GOOD] Test command err: Trying to start YDB, gRPC: 10535, MsgBus: 7429 2025-06-25T14:56:50.708812Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901087016853438:2137];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:56:50.709078Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001c09/r3tmp/tmpNaTDQE/pdisk_1.dat 2025-06-25T14:56:51.186205Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 10535, node 1 2025-06-25T14:56:51.204814Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:56:51.204888Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:56:51.209099Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:56:51.404899Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:56:51.404922Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:56:51.404931Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:56:51.405037Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:7429 2025-06-25T14:56:51.739440Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:7429 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:56:52.210978Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:56:52.244405Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:56:52.259796Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:52.425867Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:52.611863Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:52.715952Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:54.163323Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901104196724139:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:54.163428Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:54.448620Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:54.491624Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:54.527410Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:54.562242Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:54.595081Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:54.668768Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:54.750337Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:54.803177Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901104196724807:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:54.803263Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:54.803470Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901104196724812:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:54.807119Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:56:54.818127Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519901104196724814:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:56:54.902700Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519901104196724867:3421] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:56:55.708801Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519901087016853438:2137];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:56:55.708859Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:56:55.862300Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... Trying to start YDB, gRPC: 13936, MsgBus: 15981 2025-06-25T14:56:59.321068Z node 2 :METADATA_PROVIDER WARN: log. ... PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519901169179963621:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:57:09.316641Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001c09/r3tmp/tmp3Dh8Tn/pdisk_1.dat 2025-06-25T14:57:09.475382Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519901169179963601:2080] 1750863429315755 != 1750863429315758 2025-06-25T14:57:09.488340Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:57:09.490077Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:57:09.490155Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:57:09.492006Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 9603, node 3 2025-06-25T14:57:09.572875Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:57:09.572899Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:57:09.572909Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:57:09.573042Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:12456 TClient is connected to server localhost:12456 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:57:10.238980Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:57:10.252444Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:57:10.259267Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:10.400616Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:57:10.441593Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:10.623892Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:10.718072Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:13.192494Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519901186359834409:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:13.192609Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:13.250550Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:13.329421Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:13.367332Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:13.442367Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:13.476866Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:13.553406Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:13.594736Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:13.709030Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519901186359835080:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:13.709112Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:13.709276Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519901186359835085:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:13.712756Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:57:13.725335Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519901186359835087:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:57:13.826218Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519901186359835138:3419] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:57:14.320412Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519901169179963621:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:57:14.320487Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:57:14.899754Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... >> KqpNamedExpressions::NamedExpressionRandomUpsertReturning+UseSink+UseDataQuery [GOOD] >> KqpIndexes::SelectFromAsyncIndexedTable >> KqpVectorIndexes::CoveredVectorIndexWithFollowers+StaleRO [GOOD] >> KqpVectorIndexes::CoveredVectorIndexWithFollowers-StaleRO >> KqpVectorIndexes::OrderByCosineLevel1-Nullable-UseSimilarity [GOOD] >> KqpVectorIndexes::OrderByCosineLevel2+Nullable+UseSimilarity ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/opt/unittest >> KqpNamedExpressions::NamedExpressionRandomUpsertReturning+UseSink+UseDataQuery [GOOD] Test command err: Trying to start YDB, gRPC: 28940, MsgBus: 24463 2025-06-25T14:52:43.915411Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900025397834644:2217];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:43.919695Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000a7d/r3tmp/tmpSaNXcP/pdisk_1.dat 2025-06-25T14:52:44.326565Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:52:44.355705Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:52:44.355820Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:52:44.357254Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 28940, node 1 2025-06-25T14:52:44.507166Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:52:44.507189Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:52:44.507199Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:52:44.507294Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:52:44.933755Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:24463 TClient is connected to server localhost:24463 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:52:45.352889Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:52:45.395747Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:45.585873Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:45.759390Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:45.854621Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:47.017118Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900042577705297:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:47.017212Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:47.602032Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:47.644908Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:47.686456Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:47.717934Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:47.760900Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:47.837288Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:47.878598Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:47.959683Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900042577705964:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:47.959775Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:47.960072Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900042577705969:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:47.963773Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:52:47.975256Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519900042577705971:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:52:48.056977Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519900046872673322:3424] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:52:48.909601Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519900025397834644:2217];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:48.909668Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:52:49.124443Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:49.171254Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /ho ... #72057594037968897 Node(28, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:57:08.575803Z node 28 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(28, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:57:08.582425Z node 28 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(28, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:57:08.583448Z node 28 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:57:08.592729Z node 28 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:57:08.593620Z node 28 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [28:7519901164853677002:2080] 1750863428309980 != 1750863428309983 TServer::EnableGrpc on GrpcPort 6064, node 28 2025-06-25T14:57:08.695510Z node 28 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:57:08.695540Z node 28 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:57:08.695554Z node 28 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:57:08.695752Z node 28 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:22581 TClient is connected to server localhost:22581 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-25T14:57:09.339340Z node 28 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:57:09.363997Z node 28 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:09.367507Z node 28 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... 2025-06-25T14:57:09.455433Z node 28 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:09.745640Z node 28 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:57:09.861805Z node 28 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:14.295815Z node 28 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [28:7519901190623482425:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:14.295962Z node 28 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:14.419458Z node 28 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:14.474948Z node 28 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:14.563115Z node 28 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:14.619061Z node 28 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:14.671265Z node 28 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:14.753695Z node 28 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:14.812433Z node 28 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:14.915823Z node 28 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [28:7519901190623483104:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:14.915981Z node 28 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:14.916219Z node 28 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [28:7519901190623483109:2436], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:14.922992Z node 28 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:57:14.946769Z node 28 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [28:7519901190623483111:2437], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:57:15.004889Z node 28 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [28:7519901190623483162:3433] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:57:17.038050Z node 28 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:17.110689Z node 28 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:17.175831Z node 28 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) [[["cdd857c3-895b-4d06-8253-d0a06e5c27b8"]]] [[["cdd857c3-895b-4d06-8253-d0a06e5c27b8"]]] >> KqpIndexes::WriteWithParamsFieldOrder [GOOD] >> KqpIndexes::UpsertWithoutExtraNullDelete-UseSink >> KqpIndexes::SecondaryIndexWithPrimaryKeySameComulns+UseSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::Chain65Nodes [FAIL] Test command err: Trying to start YDB, gRPC: 3185, MsgBus: 31179 2025-06-25T14:50:20.192498Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899411011764824:2236];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:50:20.192540Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d25/r3tmp/tmp5aIJrs/pdisk_1.dat 2025-06-25T14:50:20.694653Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:50:20.694744Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:50:20.707327Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:50:20.810868Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 3185, node 1 2025-06-25T14:50:21.009136Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:50:21.009154Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:50:21.009165Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:50:21.009263Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:50:21.084395Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:31179 TClient is connected to server localhost:31179 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:50:21.989182Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:50:22.047194Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:50:24.338065Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899428191634437:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:24.338152Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:24.595966Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:24.792641Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899428191634540:2302], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:24.792725Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:24.805085Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:24.892677Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899428191634615:2311], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:24.892760Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:24.918676Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:25.008750Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899432486601986:2320], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:25.008817Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:25.027550Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:25.096445Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899432486602062:2329], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:25.096530Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:25.109799Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:25.194030Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899432486602136:2338], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:25.194108Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519899411011764824:2236];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:50:25.194152Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:25.194213Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:50:25.210563Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:25.300712Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899432486602214:2348], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:25.300774Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:25.307580Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:25.388619Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899432486602291:2358], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:25.388709Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have acc ... /ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:29.714016Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899449666475353:2829], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:29.714095Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:29.747212Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710716:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:29.840915Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899449666475431:2838], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:29.840990Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:29.846178Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710717:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:29.920888Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899449666475507:2847], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:29.920952Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:29.934782Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710718:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:29.995511Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899449666475583:2856], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:29.995578Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:30.009330Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710719:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:30.046277Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899453961442954:2865], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:30.046345Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:30.062347Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710720:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:30.114356Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899453961443031:2874], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:30.114421Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:30.129028Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710721:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:30.188079Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899453961443107:2883], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:30.188182Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:30.196714Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710722:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:50:30.278862Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899453961443188:2894], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:30.278949Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:30.279416Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899453961443193:2897], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:50:30.283760Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710723:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:50:30.297453Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899453961443195:2898], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710723 completed, doublechecking } 2025-06-25T14:50:30.381455Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899453961443249:5498] txid# 281474976710724, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 70], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:50:35.691131Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7382: Cannot get console configs 2025-06-25T14:50:35.691170Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:55:30.274882Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=1&id=MzNhODIzZi0yZWIzMGM1MS1jMzVmODllOC1hOTI3NDVlNA==, ActorId: [1:7519899428191634409:2289], ActorState: ExecuteState, TraceId: 01jyks3a06e06p3ngmg35ywt45, Create QueryResponse for error on request, msg:
: Error: Request timeout 300000ms exceeded
: Error: Cancelling after 299996ms during compilation assertion failed at ydb/core/kqp/ut/join/kqp_join_order_ut.cpp:387, void NKikimr::NKqp::TChainTester::JoinTables(): (result.GetStatus() == EStatus::SUCCESS) failed: (TIMEOUT != SUCCESS) , with diff: (TIM|SUCC)E(OUT|SS) 0. /-S/util/system/backtrace.cpp:284: ?? @ 0x19F5FD3B 1. /tmp//-S/library/cpp/testing/unittest/registar.cpp:46: RaiseError @ 0x1A4319EF 2. /tmp//-S/ydb/core/kqp/ut/join/kqp_join_order_ut.cpp:387: JoinTables @ 0x19B5BAFB 3. /tmp//-S/ydb/core/kqp/ut/join/kqp_join_order_ut.cpp:333: Test @ 0x19B0F900 4. /tmp//-S/ydb/core/kqp/ut/join/kqp_join_order_ut.cpp:554: Execute_ @ 0x19B0F900 5. /tmp//-S/ydb/core/kqp/ut/join/kqp_join_order_ut.cpp:552: operator() @ 0x19B58147 6. /-S/contrib/libs/cxxsupp/libcxx/include/__type_traits/invoke.h:149: __invoke<(lambda at /-S/ydb/core/kqp/ut/join/kqp_join_order_ut.cpp:552:1) &> @ 0x19B58147 7. /-S/contrib/libs/cxxsupp/libcxx/include/__type_traits/invoke.h:224: __call<(lambda at /-S/ydb/core/kqp/ut/join/kqp_join_order_ut.cpp:552:1) &> @ 0x19B58147 8. /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:169: operator() @ 0x19B58147 9. /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:314: operator() @ 0x19B58147 10. /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:431: operator() @ 0x1A468BD5 11. /-S/contrib/libs/cxxsupp/libcxx/include/__functional/function.h:990: operator() @ 0x1A468BD5 12. /tmp//-S/library/cpp/testing/unittest/utmain.cpp:525: Run @ 0x1A468BD5 13. /tmp//-S/library/cpp/testing/unittest/registar.cpp:373: Run @ 0x1A438578 14. /tmp//-S/ydb/core/kqp/ut/join/kqp_join_order_ut.cpp:552: Execute @ 0x19B57313 15. /tmp//-S/library/cpp/testing/unittest/registar.cpp:494: Execute @ 0x1A439E45 16. /tmp//-S/library/cpp/testing/unittest/utmain.cpp:872: RunMain @ 0x1A46314C 17. ??:0: ?? @ 0x7F0865D66D8F 18. ??:0: ?? @ 0x7F0865D66E3F 19. ??:0: ?? @ 0x16F80028 >> KqpMultishardIndex::YqWorksFineAfterAlterIndexTableDirectly [GOOD] >> KqpPrefixedVectorIndexes::CosineDistanceWithPkPrefix+Nullable-Covered >> KqpMultishardIndex::DataColumnWriteNull [GOOD] >> KqpMultishardIndex::DataColumnWrite-UseSink >> TPersQueueTest::TxCounters [GOOD] >> YdbTableSplit::SplitByLoadWithReadsMultipleSplitsWithData [GOOD] >> BasicUsage::WriteSessionSwitchDatabases [GOOD] >> KqpIndexes::NullInIndexTableNoDataRead [GOOD] >> KqpIndexes::SecondaryIndexOrderBy >> KqpIndexes::DuplicateUpsertInterleave [GOOD] >> KqpIndexes::IndexTopSortPushDown [GOOD] >> KqpUniqueIndex::UpsertExplicitNullInComplexFk [GOOD] >> KqpUniqueIndex::UpdateOnNullInComplexFk >> KqpIndexes::CreateTableWithExplicitSyncIndexSQL [GOOD] >> KqpIndexes::DeleteByIndex >> KqpMultishardIndex::DataColumnUpsertMixedSemantic [GOOD] >> KqpMultishardIndex::DataColumnWrite+UseSink ------- [TM] {asan, default-linux-x86_64, release} ydb/services/persqueue_v1/ut/unittest >> TPersQueueTest::TxCounters [GOOD] Test command err: === Server->StartServer(false); 2025-06-25T14:51:25.924400Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899693754164042:2076];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:51:25.925367Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:51:25.983934Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519899692796553531:2155];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000b73/r3tmp/tmpYyyju5/pdisk_1.dat 2025-06-25T14:51:26.163397Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-25T14:51:26.173296Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-25T14:51:26.195045Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:51:26.322119Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:51:26.363826Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:51:26.363925Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:51:26.380974Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T14:51:26.382225Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 4816, node 1 2025-06-25T14:51:26.391146Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:51:26.391195Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:51:26.403176Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:51:26.456328Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/yft8/000b73/r3tmp/yandexSlnlTh.tmp 2025-06-25T14:51:26.456363Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/yft8/000b73/r3tmp/yandexSlnlTh.tmp 2025-06-25T14:51:26.456861Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/yft8/000b73/r3tmp/yandexSlnlTh.tmp 2025-06-25T14:51:26.457015Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:51:26.525927Z INFO: TTestServer started on Port 23334 GrpcPort 4816 TClient is connected to server localhost:23334 PQClient connected to localhost:4816 === TenantModeEnabled() = 0 === Init PQ - start server on port 4816 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:51:26.934352Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:51:26.982952Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:51:27.007209Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "Root" StoragePools { Name: "/Root:test" Kind: "test" } } } TxId: 281474976715657 TabletId: 72057594046644480 PeerName: "" , at schemeshard: 72057594046644480 2025-06-25T14:51:27.007399Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //Root, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-25T14:51:27.007582Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 0 2025-06-25T14:51:27.007605Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 281474976715657:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046644480, LocalPathId: 1] source path: 2025-06-25T14:51:27.007818Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 281474976715657:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-25T14:51:27.007876Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:51:27.010333Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 281474976715657, response: Status: StatusAccepted TxId: 281474976715657 SchemeshardId: 72057594046644480 PathId: 1, at schemeshard: 72057594046644480 2025-06-25T14:51:27.010514Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976715657, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //Root 2025-06-25T14:51:27.010621Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-25T14:51:27.010654Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 281474976715657:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046644480 2025-06-25T14:51:27.010680Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 281474976715657:0 ProgressState no shards to create, do next state 2025-06-25T14:51:27.010698Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 281474976715657:0 2 -> 3 2025-06-25T14:51:27.012952Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-25T14:51:27.013005Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 281474976715657:0 ProgressState, at schemeshard: 72057594046644480 2025-06-25T14:51:27.013047Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 281474976715657:0 3 -> 128 2025-06-25T14:51:27.014465Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-25T14:51:27.014495Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:51:27.014512Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 281474976715657:0, at tablet# 72057594046644480 2025-06-25T14:51:27.014556Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 281474976715657 ready parts: 1/1 2025-06-25T14:51:27.018652Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046644480 Flags: 2 } ExecLevel: 0 TxId: 281474976715657 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:51:27.019771Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:51:27.019786Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 281474976715657, ready parts: 0/1, is published: true 2025-06-25T14:51:27.019818Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:51:27.020232Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 281474976715657:4294967295 from tablet: 72057594046644480 to tablet: 72057594046316545 cookie: 0:281474976715657 msg type: 269090816 2025-06-25T14:51:27.020369Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 281474976715657, partId: 4294967295, tablet: 72057594046316545 2025-06-25T14:51:27.022657Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 1750863087064, transactions count in step: 1, at schemeshard: 72057594046644480 2025-06-25T14:51:27.022803Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1750863087064 MediatorID: 72057594046382081 TabletID: 72057594046644480, at schemeshard: 72057594046644480 2025-06-25T14:51:27.022825Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 281474976715657:0, at tablet# 72057594046644480 2025-06-25T14:51:27.023046Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 281474976715657:0 128 -> 240 2025-06-25T14:51:27.023073Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 281474976715657:0, at tablet# 72057594046644480 2025-06-25T14:51:27.023238Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 1 2025-06-25T14:51:27.023281Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046644480, LocalPa ... nected; active server actors: 1 2025-06-25T14:57:22.627310Z node 32 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1688: [72075186224037893][topic] pipe [32:7519901223520403541:2455] disconnected no session 2025-06-25T14:57:22.636541Z node 32 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 2 sessionId: 123|27f5e45b-92666960-276e8eb6-ce460782_0 grpc read done: success: 0 data: 2025-06-25T14:57:22.636579Z node 32 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 2 sessionId: 123|27f5e45b-92666960-276e8eb6-ce460782_0 grpc read failed 2025-06-25T14:57:22.636628Z node 32 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 2 sessionId: 123|27f5e45b-92666960-276e8eb6-ce460782_0 grpc closed 2025-06-25T14:57:22.636651Z node 32 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 2 sessionId: 123|27f5e45b-92666960-276e8eb6-ce460782_0 is DEAD 2025-06-25T14:57:22.637798Z node 32 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037892 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-25T14:57:22.638801Z node 32 :PQ_WRITE_PROXY DEBUG: grpc_pq_write.h:107: new grpc connection 2025-06-25T14:57:22.638833Z node 32 :PQ_WRITE_PROXY DEBUG: grpc_pq_write.h:141: new session created cookie 3 2025-06-25T14:57:22.639357Z node 32 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 3 sessionId: grpc read done: success: 1 data: init_request { path: "topic" producer_id: "123" partition_with_generation { generation: 1 } } 2025-06-25T14:57:22.639519Z node 32 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:442: session request cookie: 3 path: "topic" producer_id: "123" partition_with_generation { generation: 1 } from ipv6:[::1]:58484 2025-06-25T14:57:22.639546Z node 32 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:1532: write session: cookie=3 sessionId= userAgent="topic server" ip=ipv6:[::1]:58484 proto=topic topic=topic durationSec=0 2025-06-25T14:57:22.639558Z node 32 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:566: init check schema 2025-06-25T14:57:22.639598Z node 32 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:475: session to partition: 0, generation: 1 2025-06-25T14:57:22.641246Z node 32 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:627: session v1 cookie: 3 sessionId: describe result for acl check 2025-06-25T14:57:22.641502Z node 32 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:62: TTableHelper SelectQuery: --!syntax_v1 DECLARE $Hash AS Uint64; DECLARE $Topic AS Utf8; DECLARE $SourceId AS Utf8; SELECT Partition, CreateTime, AccessTime, SeqNo FROM `//Root/.metadata/TopicPartitionsMapping` WHERE Hash == $Hash AND Topic == $Topic AND ProducerId == $SourceId; 2025-06-25T14:57:22.641529Z node 32 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:63: TTableHelper UpdateQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint64; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64; DECLARE $SeqNo AS Uint64; UPSERT INTO `//Root/.metadata/TopicPartitionsMapping` (Hash, Topic, ProducerId, CreateTime, AccessTime, Partition, SeqNo) VALUES ($Hash, $Topic, $SourceId, $CreateTime, $AccessTime, $Partition, $SeqNo); 2025-06-25T14:57:22.641549Z node 32 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:64: TTableHelper UpdateAccessTimeQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint64; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64; UPDATE `//Root/.metadata/TopicPartitionsMapping` SET AccessTime = $AccessTime WHERE Hash = $Hash AND Topic = $Topic AND ProducerId = $SourceId AND Partition = $Partition; 2025-06-25T14:57:22.641572Z node 32 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:305: TPartitionChooser [32:7519901223520403546:2457] (SourceId=123, PreferedPartition=0) ReplyResult: Partition=0, SeqNo=0 2025-06-25T14:57:22.641593Z node 32 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:689: ProceedPartition. session cookie: 3 sessionId: partition: 0 expectedGeneration: 1 2025-06-25T14:57:22.642498Z node 32 :PQ_WRITE_PROXY DEBUG: writer.cpp:819: TPartitionWriter 72075186224037892 (partition=0) TEvClientConnected Status OK, TabletId: 72075186224037892, NodeId 32, Generation: 1 2025-06-25T14:57:22.642767Z node 32 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie 123|821f7563-d625d41c-e9dcd22-844bfa74_0 generated for partition 0 topic 'topic' owner 123 2025-06-25T14:57:22.643327Z node 32 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:865: session inited cookie: 3 partition: 0 MaxSeqNo: 0 sessionId: 123|821f7563-d625d41c-e9dcd22-844bfa74_0 2025-06-25T14:57:22.652874Z node 32 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 3 sessionId: 123|821f7563-d625d41c-e9dcd22-844bfa74_0 grpc read done: success: 1 data: write_request[data omitted] 2025-06-25T14:57:22.653342Z node 32 :PQ_WRITE_PROXY INFO: writer.cpp:263: TPartitionWriter 72075186224037892 (partition=0) Start of a request to KQP for a WriteId. SessionId: ydb://session/3?node_id=32&id=Mjg2OGFmMmYtN2JiMzcyZDMtOTJlZTk2OTQtNWM5NTRmZjA= TxId: 01jyksfweq2dmt3yrddsj9mzdw 2025-06-25T14:57:22.654711Z node 32 :PQ_WRITE_PROXY DEBUG: writer.cpp:819: TPartitionWriter 72075186224037892 (partition=0) TEvClientConnected Status OK, TabletId: 72075186224037892, NodeId 32, Generation: 1 2025-06-25T14:57:22.655082Z node 32 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 3 sessionId: 123|821f7563-d625d41c-e9dcd22-844bfa74_0 grpc read done: success: 1 data: write_request[data omitted] 2025-06-25T14:57:22.655469Z node 32 :PQ_WRITE_PROXY INFO: writer.cpp:283: TPartitionWriter 72075186224037892 (partition=0) End of the request to KQP for the WriteId. SessionId: ydb://session/3?node_id=32&id=Mjg2OGFmMmYtN2JiMzcyZDMtOTJlZTk2OTQtNWM5NTRmZjA= TxId: 01jyksfweq2dmt3yrddsj9mzdw 2025-06-25T14:57:22.655508Z node 32 :PQ_WRITE_PROXY DEBUG: writer.cpp:301: SessionId: ydb://session/3?node_id=32&id=Mjg2OGFmMmYtN2JiMzcyZDMtOTJlZTk2OTQtNWM5NTRmZjA= TxId: 01jyksfweq2dmt3yrddsj9mzdw WriteId: {32, 281474976715673} 2025-06-25T14:57:22.655661Z node 32 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 3 sessionId: 123|821f7563-d625d41c-e9dcd22-844bfa74_0 grpc read done: success: 1 data: write_request[data omitted] 2025-06-25T14:57:22.655945Z node 32 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 3 sessionId: 123|821f7563-d625d41c-e9dcd22-844bfa74_0 grpc read done: success: 1 data: write_request[data omitted] 2025-06-25T14:57:22.659439Z node 32 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72075186224037892, Partition: {0, {32, 281474976715673}, 100000}, State: StateInit] bootstrapping {0, {32, 281474976715673}, 100000} [32:7519901223520403558:2459] 2025-06-25T14:57:22.662592Z node 32 :PERSQUEUE INFO: partition_init.cpp:895: [topic:{0, {32, 281474976715673}, 100000}:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-25T14:57:22.662674Z node 32 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72075186224037892, Partition: {0, {32, 281474976715673}, 100000}, State: StateInit] init complete for topic 'topic' partition {0, {32, 281474976715673}, 100000} generation 1 [32:7519901223520403558:2459] 2025-06-25T14:57:22.663075Z node 32 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie 123|915b5124-bcac7c31-2e3cbcf7-317b0e19_0 generated for partition {0, {32, 281474976715673}, 100000} topic 'topic' owner 123 2025-06-25T14:57:22.663334Z node 32 :PQ_WRITE_PROXY INFO: writer.cpp:422: TPartitionWriter 72075186224037892 (partition=0) Start of a request to KQP to save PartitionId. SessionId: ydb://session/3?node_id=32&id=Mjg2OGFmMmYtN2JiMzcyZDMtOTJlZTk2OTQtNWM5NTRmZjA= TxId: 01jyksfweq2dmt3yrddsj9mzdw 2025-06-25T14:57:22.664923Z node 32 :PQ_WRITE_PROXY INFO: writer.cpp:431: TPartitionWriter 72075186224037892 (partition=0) End of a request to KQP to save PartitionId. SessionId: ydb://session/3?node_id=32&id=Mjg2OGFmMmYtN2JiMzcyZDMtOTJlZTk2OTQtNWM5NTRmZjA= TxId: 01jyksfweq2dmt3yrddsj9mzdw 2025-06-25T14:57:22.666043Z node 32 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037892 (partition=0) Received event: NKikimr::TEvPersQueue::TEvResponse 2025-06-25T14:57:22.666124Z node 32 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037892 (partition=0) Received event: NKikimr::TEvPersQueue::TEvResponse 2025-06-25T14:57:22.666155Z node 32 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037892 (partition=0) Received event: NKikimr::TEvPersQueue::TEvResponse 2025-06-25T14:57:22.666183Z node 32 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037892 (partition=0) Received event: NKikimr::TEvPersQueue::TEvResponse 2025-06-25T14:57:22.673979Z node 32 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037892 (partition=0) Received event: NKikimr::TEvPersQueue::TEvResponse 2025-06-25T14:57:22.676167Z node 32 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037892 (partition=0) Received event: NKikimr::TEvPersQueue::TEvResponse 2025-06-25T14:57:22.676193Z node 32 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037892 (partition=0) Received event: NKikimr::TEvPersQueue::TEvResponse 2025-06-25T14:57:22.676213Z node 32 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037892 (partition=0) Received event: NKikimr::TEvPersQueue::TEvResponse 2025-06-25T14:57:22.754657Z node 32 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 3 sessionId: 123|821f7563-d625d41c-e9dcd22-844bfa74_0 grpc read done: success: 0 data: 2025-06-25T14:57:22.754703Z node 32 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 3 sessionId: 123|821f7563-d625d41c-e9dcd22-844bfa74_0 grpc read failed 2025-06-25T14:57:22.754757Z node 32 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 3 sessionId: 123|821f7563-d625d41c-e9dcd22-844bfa74_0 grpc closed 2025-06-25T14:57:22.754782Z node 32 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 3 sessionId: 123|821f7563-d625d41c-e9dcd22-844bfa74_0 is DEAD 2025-06-25T14:57:22.756051Z node 32 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037892 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-25T14:57:22.756113Z node 32 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037892 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-25T14:57:22.767802Z node 32 :PERSQUEUE WARN: pq_impl.cpp:4269: [PQ: 72075186224037892] Unknown transaction 281474976715674 Counters: ================================
name=api.grpc.topic.stream_write.bytes: 20796
name=api.grpc.topic.stream_write.messages: 4
name=topic.compaction.lag_milliseconds_max: 0
name=topic.compaction.unprocessed_bytes_max: 20958
name=topic.compaction.unprocessed_count_max: 2
name=topic.write.bytes: 20796
name=topic.write.discarded_bytes: 0
name=topic.write.discarded_messages: 0
name=topic.write.messages: 4
name=topic.write.uncompressed_bytes: 16
name=topic.write.lag_milliseconds:
    bin=100: 0
    bin=1000: 0
    bin=10000: 0
    bin=180000: 0
    bin=200: 0
    bin=2000: 3
    bin=30000: 0
    bin=500: 0
    bin=5000: 1
    bin=60000: 0
    bin=999999: 0
name=topic.write.message_size_bytes:
    bin=1024: 1
    bin=10240: 2
    bin=102400: 0
    bin=1048576: 0
    bin=10485760: 0
    bin=20480: 1
    bin=204800: 0
    bin=2097152: 0
    bin=5120: 0
    bin=51200: 0
    bin=524288: 0
    bin=5242880: 0
    bin=67108864: 0
    bin=99999999: 0
name=topic.write.partition_throttled_milliseconds:
    bin=0: 4
    bin=1: 0
    bin=10: 0
    bin=100: 0
    bin=1000: 0
    bin=10000: 0
    bin=20: 0
    bin=2500: 0
    bin=5: 0
    bin=50: 0
    bin=500: 0
    bin=5000: 0
    bin=999999: 0
------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/table_split_ut/unittest >> YdbTableSplit::SplitByLoadWithReadsMultipleSplitsWithData [GOOD] Test command err: 2025-06-25T14:56:33.700701Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901014944273202:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:56:33.700748Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001379/r3tmp/tmpmfDLBk/pdisk_1.dat 2025-06-25T14:56:34.120262Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:56:34.135972Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:56:34.136034Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:56:34.141510Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 19139, node 1 2025-06-25T14:56:34.322653Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:56:34.322676Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:56:34.322684Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:56:34.322821Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:21739 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-25T14:56:34.711143Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:56:34.805790Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... TClient is connected to server localhost:21739 2025-06-25T14:56:36.176416Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901027829176080:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:36.176520Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:36.613591Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:36.790208Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901027829176295:2333], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:36.790296Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:36.790564Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901027829176313:2344], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:36.790603Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901027829176314:2345], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:36.790616Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901027829176315:2346], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:36.790654Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901027829176316:2347], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:36.790666Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901027829176317:2348], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:36.791962Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901027829176333:2356], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:36.791973Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901027829176339:2358], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:36.792028Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901027829176341:2360], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:36.792042Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:36.792614Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901027829176351:2363], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:36.793749Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901027829176379:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:36.793771Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901027829176381:2370], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:36.793787Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:36.794647Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901027829176397:2373], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:36.794671Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:36.794672Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901027829176399:2375], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:36.796989Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715666:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:56:36.803827Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519901027829176331:2757] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateCreate)" severity: 1 } 2025-06-25T14:56:36.804036Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519901027829176335:2760] txid# 281474976715662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateCreate)" severity: 1 } 2025-06-25T14:56:36.804168Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519901027829176336:2761] txid# 281474976715663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateCreate)" severity: 1 } 2025-06-25T14:56:36.804412Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:751 ... 5. Ctx: { TraceId: 01jyksfvdz2gyhb046h4ehpzzw, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZjYxNzliZGQtMzJlODNiMC0zZjVjOGM4My01NWMwYWQ3YQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:57:21.348453Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976734716. Ctx: { TraceId: 01jyksfvdz7n7pmgzcp7bjb5sv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZjZhYjMxZjgtN2UzMmM3ZGEtZWJkMDg0MDItN2M5NDZhMw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:57:21.350702Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976734717. Ctx: { TraceId: 01jyksfvdz1semxh6q952vv54j, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTExYjA0NTUtNjMwN2U0NTAtZDNjMmVjYTMtYmI2YmRhNDk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:57:21.351060Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976734718. Ctx: { TraceId: 01jyksfvdz109gqt9yk7tenr0f, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZmViNDcxNWEtYTQ1ZDk5N2MtYmEwNWYwOTktMTY3YTExNDQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:57:21.352548Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976734719. Ctx: { TraceId: 01jyksfve49m9ya8kzdb011zxx, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Yzg4ZTRiZjAtNDgxZDc0MGItMmE1OWUzMjYtNTliZTU1Yg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:57:21.357867Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976734720. Ctx: { TraceId: 01jyksfve8aypk6vtz1yaag4fe, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OWEyN2ViNTMtYTdjMzA4ZTctYWY3YzM1MjUtZTI2Y2Q4M2Y=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:57:21.357903Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976734721. Ctx: { TraceId: 01jyksfve86ma1kc6y6738cpq8, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MjZmNzAwZGItOTQ0MzY4NmUtYzA3NDJiNzMtZDdkMjk2ZjI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:57:21.358421Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976734722. Ctx: { TraceId: 01jyksfve9ewt9f54p92xsebtq, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YzNmNzI1MjYtOTNjZTMzODktOGIzYzQ2MGYtZWIxMjdlMDM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:57:21.358537Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976734723. Ctx: { TraceId: 01jyksfvec6er3ch1ypx73kxpy, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MzliY2Q3Ny1kOWU1ZTc5YS1iNzcwNGVjNC05NWE3YmY0Zg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:57:21.365550Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976734724. Ctx: { TraceId: 01jyksfvef8wdtsgew57keweks, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZmFlYTdjNDctMzk0NmMwOTMtOTgxOWNmMjgtZTA4MzJkNGQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:57:21.369865Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976734725. Ctx: { TraceId: 01jyksfveq6rgk5gwmsak6yt0e, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZjZhYjMxZjgtN2UzMmM3ZGEtZWJkMDg0MDItN2M5NDZhMw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:57:21.370615Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976734726. Ctx: { TraceId: 01jyksfveqaaqmj8333px7t4cr, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZjYxNzliZGQtMzJlODNiMC0zZjVjOGM4My01NWMwYWQ3YQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:57:21.370665Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976734727. Ctx: { TraceId: 01jyksfveq2bf0t128h7m7t4py, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Yzg4ZTRiZjAtNDgxZDc0MGItMmE1OWUzMjYtNTliZTU1Yg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:57:21.371179Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976734728. Ctx: { TraceId: 01jyksfveqcrf34dnydnrswc6m, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZmViNDcxNWEtYTQ1ZDk5N2MtYmEwNWYwOTktMTY3YTExNDQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:57:21.371196Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976734729. Ctx: { TraceId: 01jyksfveq9hxw4tv5hdw2cxtk, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTExYjA0NTUtNjMwN2U0NTAtZDNjMmVjYTMtYmI2YmRhNDk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:57:21.376539Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976734730. Ctx: { TraceId: 01jyksfveyfpns1d8vy2dpabym, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZmFlYTdjNDctMzk0NmMwOTMtOTgxOWNmMjgtZTA4MzJkNGQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root TClient::Ls request: /Root/Foo 2025-06-25T14:57:21.379254Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976734731. Ctx: { TraceId: 01jyksfvf14m8rdvvns8gv1n3b, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OWEyN2ViNTMtYTdjMzA4ZTctYWY3YzM1MjUtZTI2Y2Q4M2Y=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:57:21.379750Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976734732. Ctx: { TraceId: 01jyksfvf1b6c4d9xd159frwwc, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MjZmNzAwZGItOTQ0MzY4NmUtYzA3NDJiNzMtZDdkMjk2ZjI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:57:21.410133Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976734737. Ctx: { TraceId: 01jyksfvfg2r0nj6eepm15jwsn, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Yzg4ZTRiZjAtNDgxZDc0MGItMmE1OWUzMjYtNTliZTU1Yg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:57:21.410223Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976734738. Ctx: { TraceId: 01jyksfvfgf1b0ss4r3abjshcd, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZmViNDcxNWEtYTQ1ZDk5N2MtYmEwNWYwOTktMTY3YTExNDQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:57:21.411079Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976734739. Ctx: { TraceId: 01jyksfvfjb3kam8hrz4d8w02f, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZmFlYTdjNDctMzk0NmMwOTMtOTgxOWNmMjgtZTA4MzJkNGQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:57:21.411242Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976734741. Ctx: { TraceId: 01jyksfvfm4f49kq3s3v46end7, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OWEyN2ViNTMtYTdjMzA4ZTctYWY3YzM1MjUtZTI2Y2Q4M2Y=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:57:21.411658Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976734740. Ctx: { TraceId: 01jyksfvfm00rbtahcf7kevg58, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZjZhYjMxZjgtN2UzMmM3ZGEtZWJkMDg0MDItN2M5NDZhMw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:57:21.411795Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976734742. Ctx: { TraceId: 01jyksfvfq00t2dwb4adev2xz7, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MjZmNzAwZGItOTQ0MzY4NmUtYzA3NDJiNzMtZDdkMjk2ZjI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:57:21.412154Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976734734. Ctx: { TraceId: 01jyksfvf41g32wdn3sgqf4tnv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZjYxNzliZGQtMzJlODNiMC0zZjVjOGM4My01NWMwYWQ3YQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:57:21.412326Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976734733. Ctx: { TraceId: 01jyksfvf474z90y2vnkmfmgr1, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MzliY2Q3Ny1kOWU1ZTc5YS1iNzcwNGVjNC05NWE3YmY0Zg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Foo" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1750863396737 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 4 } ChildrenExist: false } Table { Name: "Foo" Columns { Name: "NameHash" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Name" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "Versio... (TRUNCATED) 2025-06-25T14:57:21.412701Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976734735. Ctx: { TraceId: 01jyksfvfcejjjh65rzc08fks4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTExYjA0NTUtNjMwN2U0NTAtZDNjMmVjYTMtYmI2YmRhNDk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:57:21.412868Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976734736. Ctx: { TraceId: 01jyksfvfcdmvf28nnp3n5cda8, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YzNmNzI1MjYtOTNjZTMzODktOGIzYzQ2MGYtZWIxMjdlMDM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root TClient::Ls request: /Root/Foo TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Foo" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1750863396737 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 4 } ChildrenExist: false } Table { Name: "Foo" Columns { Name: "NameHash" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Name" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "Versio... (TRUNCATED) Table has 4 shards >> KqpIndexes::DuplicateUpsertInterleaveParams+UseSink [GOOD] >> KqpIndexes::DuplicateUpsertInterleaveParams-UseSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpIndexes::DuplicateUpsertInterleave [GOOD] Test command err: Trying to start YDB, gRPC: 14266, MsgBus: 24668 2025-06-25T14:57:02.684946Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901138176430022:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:57:02.694707Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001bf5/r3tmp/tmp1IKL7O/pdisk_1.dat 2025-06-25T14:57:03.149360Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:57:03.149504Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:57:03.155518Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:57:03.190435Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:57:03.209890Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519901138176429992:2080] 1750863422679645 != 1750863422679648 TServer::EnableGrpc on GrpcPort 14266, node 1 2025-06-25T14:57:03.313837Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:57:03.313866Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:57:03.313873Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:57:03.314012Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24668 2025-06-25T14:57:03.696477Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:24668 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:57:04.128801Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:57:04.165581Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:04.347348Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:04.509612Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:04.595951Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:06.314946Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901155356300806:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:06.315057Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:06.603749Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:06.642643Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:06.685153Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:06.738962Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:06.770209Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:06.843713Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:06.899998Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:07.001899Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901159651268761:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:07.001980Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:07.002179Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901159651268766:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:07.005955Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:57:07.019934Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519901159651268768:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:57:07.081552Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519901159651268819:3417] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:57:07.685059Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519901138176430022:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:57:07.685132Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:57:08.337302Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877761, Sender [1:7519901163946236354:3596], Recipient [1:7519901142471397604:2138]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:57:08.337345Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5052: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T14:57:08.337356Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5837: Pipe server connected, at tablet: ... mr::TEvDataShard::TEvProposeTransactionResult> complete, operationId: 281474976715672:0, at schemeshard: 72057594046644480 2025-06-25T14:57:23.467997Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:57:23.468028Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046644480, cookie: 281474976715672 2025-06-25T14:57:23.468035Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:57:23.468059Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046644480, cookie: 281474976715672 2025-06-25T14:57:23.468064Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:57:23.468098Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 281474976715672:0, at schemeshard: 72057594046644480 2025-06-25T14:57:23.468105Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:57:23.468114Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:276: Activate send for 281474976715672:0 2025-06-25T14:57:23.468152Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:632: Send to actor: [3:7519901228407523823:2476] msg type: 269552132 msg: NKikimrTxDataShard.TEvSchemaChangedResult TxId: 281474976715672 at schemeshard: 72057594046644480 2025-06-25T14:57:23.468203Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 281474976715672:2, at schemeshard: 72057594046644480 2025-06-25T14:57:23.468208Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:57:23.468214Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:276: Activate send for 281474976715672:2 2025-06-25T14:57:23.468236Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:632: Send to actor: [3:7519901228407523837:2477] msg type: 269552132 msg: NKikimrTxDataShard.TEvSchemaChangedResult TxId: 281474976715672 at schemeshard: 72057594046644480 2025-06-25T14:57:23.468263Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046644480, cookie: 281474976715672 2025-06-25T14:57:23.468267Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:57:23.468288Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046644480, cookie: 281474976715672 2025-06-25T14:57:23.468294Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:57:23.468355Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 2146435072, Sender [3:7519901206932685012:2142], Recipient [3:7519901206932685012:2142]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-25T14:57:23.468369Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4972: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-25T14:57:23.468405Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 281474976715672:0, at schemeshard: 72057594046644480 2025-06-25T14:57:23.468424Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046644480] TDone opId# 281474976715672:0 ProgressState 2025-06-25T14:57:23.468481Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-25T14:57:23.468492Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976715672:0 progress is 2/3 2025-06-25T14:57:23.468502Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976715672 ready parts: 2/3 2025-06-25T14:57:23.468513Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976715672:0 progress is 2/3 2025-06-25T14:57:23.468522Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976715672 ready parts: 2/3 2025-06-25T14:57:23.468533Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 281474976715672, ready parts: 2/3, is published: true 2025-06-25T14:57:23.468685Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 2146435072, Sender [3:7519901206932685012:2142], Recipient [3:7519901206932685012:2142]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-25T14:57:23.468697Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4972: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-25T14:57:23.468719Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 281474976715672:2, at schemeshard: 72057594046644480 2025-06-25T14:57:23.468731Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046644480] TDone opId# 281474976715672:2 ProgressState 2025-06-25T14:57:23.468768Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-25T14:57:23.468777Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976715672:2 progress is 3/3 2025-06-25T14:57:23.468785Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976715672 ready parts: 3/3 2025-06-25T14:57:23.468798Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976715672:2 progress is 3/3 2025-06-25T14:57:23.468805Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976715672 ready parts: 3/3 2025-06-25T14:57:23.468815Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 281474976715672, ready parts: 3/3, is published: true 2025-06-25T14:57:23.468848Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1656: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [3:7519901228407523801:2474] message: TxId: 281474976715672 2025-06-25T14:57:23.468865Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976715672 ready parts: 3/3 2025-06-25T14:57:23.468882Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976715672:0 2025-06-25T14:57:23.468890Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 281474976715672:0 2025-06-25T14:57:23.468986Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 17] was 4 2025-06-25T14:57:23.468999Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976715672:1 2025-06-25T14:57:23.469004Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 281474976715672:1 2025-06-25T14:57:23.469019Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 18] was 3 2025-06-25T14:57:23.469024Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976715672:2 2025-06-25T14:57:23.469030Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 281474976715672:2 2025-06-25T14:57:23.469057Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 19] was 3 2025-06-25T14:57:23.469304Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:57:23.469372Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877764, Sender [3:7519901228407523897:3653], Recipient [3:7519901206932685012:2142]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-25T14:57:23.469386Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5053: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-25T14:57:23.469395Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5885: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-25T14:57:23.469421Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877764, Sender [3:7519901228407523898:3654], Recipient [3:7519901206932685012:2142]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-25T14:57:23.469431Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5053: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-25T14:57:23.469438Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5885: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-25T14:57:23.469451Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:57:23.469492Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:632: Send to actor: [3:7519901228407523801:2474] msg type: 271124998 msg: NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976715672 at schemeshard: 72057594046644480 2025-06-25T14:57:23.469937Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877764, Sender [3:7519901228407523809:3591], Recipient [3:7519901206932685012:2142]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-25T14:57:23.469949Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5053: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-25T14:57:23.469957Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5885: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-25T14:57:23.521701Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:7519901206932685012:2142]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:57:23.521738Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:57:23.521784Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [3:7519901206932685012:2142], Recipient [3:7519901206932685012:2142]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:57:23.521800Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpIndexes::IndexTopSortPushDown [GOOD] Test command err: Trying to start YDB, gRPC: 7772, MsgBus: 18754 2025-06-25T14:56:50.247065Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901089567728861:2220];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:56:50.248984Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001c1a/r3tmp/tmpgqRQgU/pdisk_1.dat 2025-06-25T14:56:50.707182Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519901089567728680:2080] 1750863410232707 != 1750863410232710 2025-06-25T14:56:50.718599Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:56:50.719723Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:56:50.719794Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:56:50.726769Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 7772, node 1 2025-06-25T14:56:50.783410Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:56:50.783433Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:56:50.783439Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:56:50.783566Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:18754 2025-06-25T14:56:51.215750Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:18754 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:56:51.537522Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:56:51.550728Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:56:51.562403Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:51.744131Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:51.963134Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:56:52.047740Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:53.598807Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901102452632221:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:53.598900Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:53.872587Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:53.903992Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:53.937035Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:53.973546Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:54.004071Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:54.044242Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:54.114586Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:54.190832Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901106747600176:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:54.190908Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:54.191025Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901106747600181:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:54.194433Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:56:54.204527Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519901106747600183:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:56:54.282867Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519901106747600236:3418] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:56:55.240701Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519901089567728861:2220];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:56:55.240760Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:56:55.296094Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877761, Sender [1:7519901111042567822:3598], Recipient [1:7519901089567729058:2180]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:56:55.296134Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5052 ... -06-25T14:57:15.014726Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:57:15.014831Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:57:15.021692Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 20901, node 3 2025-06-25T14:57:15.161343Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:57:15.161368Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:57:15.161377Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:57:15.161493Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:29313 TClient is connected to server localhost:29313 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:57:15.754445Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:57:15.781050Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:57:15.781460Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:57:15.795532Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:15.899401Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:16.107883Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:16.173298Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:18.593534Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519901207426617374:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:18.593642Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:18.654740Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:18.695880Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:18.737232Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:18.775338Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:18.845130Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:18.887017Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:18.963619Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:19.073942Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519901211721585332:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:19.074053Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:19.074623Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519901211721585337:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:19.081400Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:57:19.097918Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519901211721585339:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:57:19.185853Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519901211721585390:3413] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:57:19.748548Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519901190246746587:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:57:19.748610Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:57:20.408388Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:20.504904Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:20.554006Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/federated_topic/ut/unittest >> BasicUsage::WriteSessionSwitchDatabases [GOOD] Test command err: 2025-06-25T14:54:56.049340Z :WriteSessionNoAvailableDatabase INFO: Random seed for debugging is 1750863296049315 2025-06-25T14:54:56.404144Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900596971191299:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:54:56.404229Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:54:56.466487Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519900600146289426:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:54:56.466520Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0012b2/r3tmp/tmpVCnSY6/pdisk_1.dat 2025-06-25T14:54:56.666604Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-25T14:54:56.723365Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-25T14:54:57.010033Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:54:57.028810Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:54:57.028885Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:54:57.030785Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:54:57.030855Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:54:57.049381Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T14:54:57.049793Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:54:57.051218Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 5872, node 1 2025-06-25T14:54:57.299110Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/yft8/0012b2/r3tmp/yandexCdVHRS.tmp 2025-06-25T14:54:57.299140Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/yft8/0012b2/r3tmp/yandexCdVHRS.tmp 2025-06-25T14:54:57.300280Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/yft8/0012b2/r3tmp/yandexCdVHRS.tmp 2025-06-25T14:54:57.300464Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:54:57.436693Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:54:57.489809Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:54:57.601248Z INFO: TTestServer started on Port 17686 GrpcPort 5872 TClient is connected to server localhost:17686 PQClient connected to localhost:5872 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:54:58.014001Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... waiting... 2025-06-25T14:54:58.128538Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715659, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:55:00.362447Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519900617326158891:2273], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:00.362547Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519900617326158857:2270], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:00.362848Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:00.382843Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976720657:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:55:00.420568Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519900617326158894:2274], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976720657 completed, doublechecking } 2025-06-25T14:55:00.515913Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519900617326158924:2133] txid# 281474976720658, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:55:00.813920Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:00.836007Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7519900617326158931:2279], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:55:00.836261Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=2&id=M2JkZjkzNzMtM2MwZDRmNmQtMzYzZDRmM2UtMTJhZjExZmQ=, ActorId: [2:7519900617326158853:2267], ActorState: ExecuteState, TraceId: 01jyksbhpd0e3h9qmhkt3heg0f, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:55:00.838694Z node 2 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-25T14:55:00.864599Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519900614151061521:2307], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:55:00.864787Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=NWExMTBkMzEtMmMxYzI5NGEtNTUyYTk2MzgtNmI1YmQxYzQ=, ActorId: [1:7519900614151061461:2297], ActorState: ExecuteState, TraceId: 01jyksbhwh4x96s0vmrrfbz8je, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:55:00.865177Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-25T14:55:00.973178Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:01.162463Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost:5872", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net" ... UG: [/Root] TraceId [] SessionId [] MessageGroupId [src_id] Write session: OnReadDone gRpcStatusCode: 0 2025-06-25T14:57:02.665008Z :INFO: [/Root] TraceId [] SessionId [] MessageGroupId [src_id] Counters: { Errors: 0 CurrentSessionLifetimeMs: 1750863422664 BytesWritten: 0 MessagesWritten: 0 BytesWrittenCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-25T14:57:02.665198Z :INFO: [/Root] TraceId [] SessionId [] MessageGroupId [src_id] Write session established. Init response: last_seq_no: 2 session_id: "src_id|4f6000fb-2622638e-4df1c128-21175484_0" supported_codecs { codecs: 1 codecs: 2 codecs: 3 } 2025-06-25T14:57:03.663590Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:174: TPartitionChooser [3:7519901139458560502:3627] (SourceId=src_id, PreferedPartition=(NULL)) Update the table 2025-06-25T14:57:03.691352Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:183: TPartitionChooser [3:7519901139458560502:3627] (SourceId=src_id, PreferedPartition=(NULL)) HandleUpdate PartitionPersisted=1 Status=SUCCESS 2025-06-25T14:57:03.691387Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:268: TPartitionChooser [3:7519901139458560502:3627] (SourceId=src_id, PreferedPartition=(NULL)) Start idle 2025-06-25T14:57:05.988265Z node 3 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72075186224037892, Partition: 0, State: StateIdle] need more data for compaction. cumulativeSize=320, count=2, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-25T14:57:10.990013Z node 3 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72075186224037892, Partition: 0, State: StateIdle] need more data for compaction. cumulativeSize=320, count=2, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-25T14:57:15.992640Z node 3 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72075186224037892, Partition: 0, State: StateIdle] need more data for compaction. cumulativeSize=320, count=2, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-25T14:57:20.950914Z node 4 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:157: [72075186224037893][rt3.dc1--test-topic] TPersQueueReadBalancer::HandleWakeup 2025-06-25T14:57:20.950966Z node 4 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:550: [72075186224037893][rt3.dc1--test-topic] Send TEvPersQueue::TEvStatus TabletId: 72075186224037892 Cookie: 4 2025-06-25T14:57:20.952607Z node 3 :PERSQUEUE DEBUG: partition.cpp:873: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 0 PartitionConfig{ LifetimeSeconds: 86400 LowWatermark: 8388608 SourceIdLifetimeSeconds: 86400 WriteSpeedInBytesPerSecond: 20000000 BurstSize: 20000000 TotalPartitions: 1 SourceIdMaxCounts: 6000000 } 2025-06-25T14:57:20.955099Z node 4 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:688: [72075186224037893][rt3.dc1--test-topic] Send TEvPeriodicTopicStats PathId: 13 Generation: 1 StatsReportRound: 4 DataSize: 0 UsedReserveSize: 0 2025-06-25T14:57:20.955244Z node 4 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1823: [72075186224037893][rt3.dc1--test-topic] ProcessPendingStats. PendingUpdates size 1 2025-06-25T14:57:20.994512Z node 3 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72075186224037892, Partition: 0, State: StateIdle] need more data for compaction. cumulativeSize=320, count=2, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-25T14:57:22.652789Z :DEBUG: [/Root] TraceId [] SessionId [src_id|4f6000fb-2622638e-4df1c128-21175484_0] MessageGroupId [src_id] Write 1 messages with Id from 1 to 1 >>> Got event: ReadyToAcceptEvent >>> Ready to answer: ok 2025-06-25T14:57:22.653224Z :DEBUG: [/Root] TraceId [] SessionId [src_id|4f6000fb-2622638e-4df1c128-21175484_0] MessageGroupId [src_id] Write session: try to update token 2025-06-25T14:57:22.653263Z :DEBUG: [/Root] TraceId [] SessionId [src_id|4f6000fb-2622638e-4df1c128-21175484_0] MessageGroupId [src_id] Send 1 message(s) (0 left), first sequence number is 3 2025-06-25T14:57:22.654163Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 4 sessionId: src_id|4f6000fb-2622638e-4df1c128-21175484_0 grpc read done: success: 1 data: write_request[data omitted] 2025-06-25T14:57:22.654390Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037892 (partition=0) Received event: NKikimr::NPQ::TEvPartitionWriter::TEvWriteRequest 2025-06-25T14:57:22.654872Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:348: Handle TEvRequest topic: 'rt3.dc1--test-topic' requestId: 2025-06-25T14:57:22.654912Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2812: [PQ: 72075186224037892] got client message batch for topic 'rt3.dc1--test-topic' partition 0 2025-06-25T14:57:22.654981Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'rt3.dc1--test-topic' partition: 0 messageNo: 0 requestId: cookie: 1 2025-06-25T14:57:22.655071Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037892 (partition=0) Received event: NKikimr::TEvPersQueue::TEvResponse 2025-06-25T14:57:22.655377Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:348: Handle TEvRequest topic: 'rt3.dc1--test-topic' requestId: 2025-06-25T14:57:22.655398Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2812: [PQ: 72075186224037892] got client message batch for topic 'rt3.dc1--test-topic' partition 0 2025-06-25T14:57:22.655445Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2209: [PQ: 72075186224037892] got client message topic: rt3.dc1--test-topic partition: 0 SourceId: '\0src_id' SeqNo: 3 partNo : 0 messageNo: 1 size 98 offset: -1 2025-06-25T14:57:22.655617Z node 3 :PERSQUEUE DEBUG: partition_write.cpp:1364: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Topic 'rt3.dc1--test-topic' partition 0 part blob processing sourceId '\0src_id' seqNo 3 partNo 0 2025-06-25T14:57:22.656542Z node 3 :PERSQUEUE DEBUG: partition_write.cpp:1468: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Topic 'rt3.dc1--test-topic' partition 0 part blob complete sourceId '\0src_id' seqNo 3 partNo 0 FormedBlobsCount 0 NewHead: Offset 2 PartNo 0 PackedSize 172 count 1 nextOffset 3 batches 1 2025-06-25T14:57:22.656991Z node 3 :PERSQUEUE DEBUG: partition_write.cpp:1762: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Add new write blob: topic 'rt3.dc1--test-topic' partition 0 compactOffset 2,1 HeadOffset 2 endOffset 2 curOffset 3 d0000000000_00000000000000000002_00000_0000000001_00000? size 160 WTime 1750863442656 2025-06-25T14:57:22.657114Z node 3 :PERSQUEUE DEBUG: read.h:272: CacheProxy. Passthrough write request to KV 2025-06-25T14:57:22.657178Z node 3 :PERSQUEUE DEBUG: read.h:310: CacheProxy. Passthrough blob. Partition 0 offset 2 partNo 0 count 1 size 160 2025-06-25T14:57:22.661412Z node 3 :PERSQUEUE DEBUG: cache_eviction.h:319: Caching head blob in L1. Partition 0 offset 2 count 1 size 160 actorID [3:7519900701371889431:2444] 2025-06-25T14:57:22.661536Z node 3 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 105 WriteNewSizeFromSupportivePartitions# 0 2025-06-25T14:57:22.661581Z node 3 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-25T14:57:22.661623Z node 3 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Answering for message sourceid: '\0src_id', Topic: 'rt3.dc1--test-topic', Partition: 0, SeqNo: 3, partNo: 0, Offset: 2 is stored on disk 2025-06-25T14:57:22.661804Z node 3 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72075186224037892, Partition: 0, State: StateIdle] need more data for compaction. cumulativeSize=480, count=3, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-25T14:57:22.661845Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:1382: [PQ: 72075186224037892] Topic 'rt3.dc1--test-topic' counters. CacheSize 480 CachedBlobs 3 2025-06-25T14:57:22.661873Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'rt3.dc1--test-topic' partition: 0 messageNo: 1 requestId: cookie: 1 2025-06-25T14:57:22.661936Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037892 (partition=0) Received event: NKikimr::TEvPersQueue::TEvResponse 2025-06-25T14:57:22.662449Z node 3 :PERSQUEUE DEBUG: pq_l2_cache.cpp:120: PQ Cache (L2). Adding blob. Tablet '72075186224037892' partition 0 offset 2 partno 0 count 1 parts 0 suffix '63' size 160 2025-06-25T14:57:22.664731Z :DEBUG: [/Root] TraceId [] SessionId [src_id|4f6000fb-2622638e-4df1c128-21175484_0] MessageGroupId [src_id] Write session: OnReadDone gRpcStatusCode: 0 2025-06-25T14:57:22.664900Z :DEBUG: [/Root] TraceId [] SessionId [src_id|4f6000fb-2622638e-4df1c128-21175484_0] MessageGroupId [src_id] Write session got write response: acks { seq_no: 3 written { offset: 2 } } write_statistics { persisting_time { nanos: 5000000 } min_queue_wait_time { nanos: 1000000 } max_queue_wait_time { nanos: 1000000 } partition_quota_wait_time { } topic_quota_wait_time { } } 2025-06-25T14:57:22.664928Z :DEBUG: [/Root] TraceId [] SessionId [src_id|4f6000fb-2622638e-4df1c128-21175484_0] MessageGroupId [src_id] OnAck: seqNo=1, txId=? 2025-06-25T14:57:22.664950Z :DEBUG: [/Root] TraceId [] SessionId [src_id|4f6000fb-2622638e-4df1c128-21175484_0] MessageGroupId [src_id] Write session: acknoledged message 1 2025-06-25T14:57:22.667361Z :DEBUG: [/Root] TraceId [] SessionId [src_id|4f6000fb-2622638e-4df1c128-21175484_0] MessageGroupId [src_id] Write session: OnReadDone gRpcStatusCode: 1, Msg: Cancelled on the server side, Details: , InternalError: 0 2025-06-25T14:57:22.667470Z :ERROR: [/Root] TraceId [] SessionId [src_id|4f6000fb-2622638e-4df1c128-21175484_0] MessageGroupId [src_id] Got error. Status: CLIENT_CANCELLED, Description:
: Error: GRpc error: (1): Cancelled on the server side 2025-06-25T14:57:22.667512Z :ERROR: [/Root] TraceId [] SessionId [src_id|4f6000fb-2622638e-4df1c128-21175484_0] MessageGroupId [src_id] Write session will not restart after a fatal error 2025-06-25T14:57:22.667552Z :INFO: [/Root] TraceId [] SessionId [src_id|4f6000fb-2622638e-4df1c128-21175484_0] MessageGroupId [src_id] Write session will now close 2025-06-25T14:57:22.667620Z :DEBUG: [/Root] TraceId [] SessionId [src_id|4f6000fb-2622638e-4df1c128-21175484_0] MessageGroupId [src_id] Write session: aborting 2025-06-25T14:57:22.672582Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 4 sessionId: src_id|4f6000fb-2622638e-4df1c128-21175484_0 grpc read done: success: 0 data: 2025-06-25T14:57:22.672615Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 4 sessionId: src_id|4f6000fb-2622638e-4df1c128-21175484_0 grpc read failed 2025-06-25T14:57:22.672645Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 4 sessionId: src_id|4f6000fb-2622638e-4df1c128-21175484_0 grpc closed 2025-06-25T14:57:22.672660Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 4 sessionId: src_id|4f6000fb-2622638e-4df1c128-21175484_0 is DEAD 2025-06-25T14:57:22.673070Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037892 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-25T14:57:22.696711Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037892] server disconnected, pipe [3:7519901139458560534:3627] destroyed 2025-06-25T14:57:22.696821Z node 3 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::DropOwner. 2025-06-25T14:57:22.713434Z :DEBUG: [/Root] TraceId [] SessionId [src_id|4f6000fb-2622638e-4df1c128-21175484_0] MessageGroupId [src_id] Write session: destroy >> KqpPrefixedVectorIndexes::PrefixedVectorIndexOrderByCosineDistanceWithCover+Nullable [GOOD] >> KqpPrefixedVectorIndexes::PrefixedVectorIndexOrderByCosineDistanceWithCover-Nullable >> KqpUniqueIndex::UpdateFkPkOverlap [GOOD] |89.4%| [TA] $(B)/ydb/public/sdk/cpp/src/client/federated_topic/ut/test-results/unittest/{meta.json ... results_accumulator.log} |89.4%| [TA] {RESULT} $(B)/ydb/public/sdk/cpp/src/client/federated_topic/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpIndexes::UpsertMultipleUniqIndexes [GOOD] >> KqpIndexes::UpsertNoIndexColumns >> KqpIndexes::SelectFromAsyncIndexedTable [GOOD] >> KqpIndexes::SelectFromIndexesAndFreeSpaceLogicDoesntTimeout >> KqpIndexes::DoUpsertWithoutIndexUpdate+UniqIndex-UseSink [GOOD] >> KqpIndexes::DoUpsertWithoutIndexUpdate+UniqIndex+UseSink >> KqpUniqueIndex::UpdateOnHidenChanges+DataColumn [GOOD] >> KqpUniqueIndex::UpdateOnHidenChanges-DataColumn >> KqpIndexes::MultipleSecondaryIndexWithSameComulns-UseSink [GOOD] |89.4%| [TA] $(B)/ydb/services/persqueue_v1/ut/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpUniqueIndex::UpdateFkPkOverlap [GOOD] Test command err: Trying to start YDB, gRPC: 11726, MsgBus: 21498 2025-06-25T14:57:03.451365Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901145056372188:2170];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:57:03.451622Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001bf0/r3tmp/tmpRoZbgt/pdisk_1.dat 2025-06-25T14:57:03.879480Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:57:03.922778Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:57:03.922855Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 11726, node 1 2025-06-25T14:57:03.924490Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:57:03.981216Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:57:03.981438Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:57:03.981450Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:57:03.981542Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:21498 TClient is connected to server localhost:21498 2025-06-25T14:57:04.448492Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:57:04.569328Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:57:04.594749Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:04.748900Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:04.888067Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:04.966280Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:06.612210Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901157941275558:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:06.612341Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:06.940878Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:07.015506Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:07.064591Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:07.116255Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:07.152563Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:07.226851Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:07.264646Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:07.328401Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901162236243516:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:07.328496Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:07.328996Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901162236243521:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:07.333229Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:57:07.346861Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519901162236243523:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:57:07.409779Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519901162236243574:3419] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:57:08.444823Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519901145056372188:2170];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:57:08.444890Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; waiting... 2025-06-25T14:57:08.494647Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:10.495045Z node 1 :KQP_EXECUTER ERROR: kqp_literal_executer.cpp:107: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: 01jyksffvg5f033j29bbbq527j, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MTQ0ZmJkYzItZDU4MTU3OWEtMmY3NTgwYmQtZWVjZTU4Yg==, CurrentExecution ... orState: ExecuteState, TraceId: 01jyksfhvrfxarewfkk84m8cbs, Create QueryResponse for error on request, msg: Trying to start YDB, gRPC: 19905, MsgBus: 24187 2025-06-25T14:57:15.668516Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519901196054106814:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:57:15.668578Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001bf0/r3tmp/tmptOnJMN/pdisk_1.dat 2025-06-25T14:57:15.804066Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:57:15.804159Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:57:15.808192Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:57:15.813714Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 19905, node 2 2025-06-25T14:57:15.959253Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:57:15.959280Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:57:15.959289Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:57:15.959426Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24187 TClient is connected to server localhost:24187 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:57:16.578100Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:57:16.584853Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:57:16.592142Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:16.649124Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:57:16.675585Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:16.838195Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:16.913862Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:19.109295Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519901213233977568:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:19.109373Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:19.167869Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:19.202089Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:19.271868Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:19.314174Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:19.348918Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:19.417808Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:19.454507Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:19.565566Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519901213233978228:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:19.565657Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:19.565922Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519901213233978233:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:19.570020Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:57:19.600137Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519901213233978235:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:57:19.669025Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519901213233978288:3412] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:57:20.668842Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519901196054106814:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:57:20.668900Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:57:20.734782Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... |89.4%| [TA] {RESULT} $(B)/ydb/services/persqueue_v1/ut/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpIndexes::MultipleSecondaryIndexWithSameComulns-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 9369, MsgBus: 11126 2025-06-25T14:56:49.639004Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901083775672424:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:56:49.639089Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001c4b/r3tmp/tmp0ZuH7F/pdisk_1.dat 2025-06-25T14:56:50.122113Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519901083775672400:2080] 1750863409637745 != 1750863409637748 2025-06-25T14:56:50.133313Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:56:50.135591Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:56:50.135685Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:56:50.137439Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 9369, node 1 2025-06-25T14:56:50.285674Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:56:50.285698Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:56:50.285707Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:56:50.285819Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:56:50.664554Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:11126 TClient is connected to server localhost:11126 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:56:51.150696Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:56:51.163308Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:56:51.175783Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:51.378555Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:51.619018Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:51.700992Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:53.059141Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901100955543227:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:53.059299Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:53.581983Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:53.616132Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:53.645035Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:53.688420Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:53.722154Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:53.809802Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:53.881358Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:53.975597Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901100955543894:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:53.975716Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:53.976550Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901100955543899:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:53.983497Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:56:54.008671Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519901100955543901:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:56:54.095282Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519901105250511250:3418] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:56:54.640410Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519901083775672424:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:56:54.640473Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:56:55.238676Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_w ... Owners: 72075186224037917 NodeId: 3 StartTime: 1750863436515 TableOwnerId: 72057594046644480 FollowerId: 0 2025-06-25T14:57:26.549048Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4992: StateWork, processing event TEvDataShard::TEvPeriodicTableStats 2025-06-25T14:57:26.549075Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037917 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 11] state 'Ready' dataSize 1184 rowCount 6 cpuUsage 0 2025-06-25T14:57:26.549178Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:570: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037917 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 11] raw table stats: DataSize: 1184 RowCount: 6 IndexSize: 0 InMemSize: 1184 LastAccessTime: 1750863438089 LastUpdateTime: 1750863438089 ImmediateTxCompleted: 0 PlannedTxCompleted: 2 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 6 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 1 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 2025-06-25T14:57:26.549199Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:610: Will delay TTxStoreTableStats on# 0.099997s, queue# 1 2025-06-25T14:57:26.600250Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877761, Sender [3:7519901244133362957:4398], Recipient [3:7519901184003816938:2149]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:57:26.600300Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5052: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T14:57:26.600323Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5837: Pipe server connected, at tablet: 72057594046644480 2025-06-25T14:57:26.600716Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269553162, Sender [3:7519901201183687972:2422], Recipient [3:7519901184003816938:2149]: NKikimrTxDataShard.TEvPeriodicTableStats DatashardId: 72075186224037921 TableLocalId: 12 Generation: 1 Round: 0 TableStats { DataSize: 1240 RowCount: 7 IndexSize: 0 InMemSize: 1240 LastAccessTime: 1750863438079 LastUpdateTime: 1750863438079 ImmediateTxCompleted: 0 PlannedTxCompleted: 2 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 7 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 1 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { Memory: 82488 } ShardState: 2 UserTablePartOwners: 72075186224037921 NodeId: 3 StartTime: 1750863436572 TableOwnerId: 72057594046644480 FollowerId: 0 2025-06-25T14:57:26.600740Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4992: StateWork, processing event TEvDataShard::TEvPeriodicTableStats 2025-06-25T14:57:26.600767Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037921 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 12] state 'Ready' dataSize 1240 rowCount 7 cpuUsage 0 2025-06-25T14:57:26.600874Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:570: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037921 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 12] raw table stats: DataSize: 1240 RowCount: 7 IndexSize: 0 InMemSize: 1240 LastAccessTime: 1750863438079 LastUpdateTime: 1750863438079 ImmediateTxCompleted: 0 PlannedTxCompleted: 2 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 7 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 1 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 2025-06-25T14:57:26.601576Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877761, Sender [3:7519901244133362961:4401], Recipient [3:7519901184003816938:2149]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:57:26.601599Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5052: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T14:57:26.601608Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5837: Pipe server connected, at tablet: 72057594046644480 2025-06-25T14:57:26.601760Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269553162, Sender [3:7519901201183687971:2421], Recipient [3:7519901184003816938:2149]: NKikimrTxDataShard.TEvPeriodicTableStats DatashardId: 72075186224037920 TableLocalId: 12 Generation: 1 Round: 0 TableStats { DataSize: 928 RowCount: 4 IndexSize: 0 InMemSize: 928 LastAccessTime: 1750863438092 LastUpdateTime: 1750863438092 ImmediateTxCompleted: 0 PlannedTxCompleted: 2 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 4 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 1 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { Memory: 82488 } ShardState: 2 UserTablePartOwners: 72075186224037920 NodeId: 3 StartTime: 1750863436572 TableOwnerId: 72057594046644480 FollowerId: 0 2025-06-25T14:57:26.601775Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4992: StateWork, processing event TEvDataShard::TEvPeriodicTableStats 2025-06-25T14:57:26.601793Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037920 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 12] state 'Ready' dataSize 928 rowCount 4 cpuUsage 0 2025-06-25T14:57:26.601887Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:570: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037920 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 12] raw table stats: DataSize: 928 RowCount: 4 IndexSize: 0 InMemSize: 928 LastAccessTime: 1750863438092 LastUpdateTime: 1750863438092 ImmediateTxCompleted: 0 PlannedTxCompleted: 2 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 4 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 1 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 2025-06-25T14:57:26.649126Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [3:7519901184003816938:2149]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-06-25T14:57:26.649174Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5131: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-06-25T14:57:26.649194Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046644480, queue size# 3 2025-06-25T14:57:26.649245Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:601: Will execute TTxStoreStats, queue# 3 2025-06-25T14:57:26.649268Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:610: Will delay TTxStoreTableStats on# 0.000000s, queue# 3 2025-06-25T14:57:26.649321Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 11 shard idx 72057594046644480:30 data size 1184 row count 6 2025-06-25T14:57:26.649376Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037917 maps to shardIdx: 72057594046644480:30 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 11], pathId map=Join2, is column=0, is olap=0, RowCount 6, DataSize 1184 2025-06-25T14:57:26.649389Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037917, followerId 0 2025-06-25T14:57:26.649444Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:30 with partCount# 0, rowCount# 6, searchHeight# 1, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-25T14:57:26.649495Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037917 2025-06-25T14:57:26.649523Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 12 shard idx 72057594046644480:34 data size 1240 row count 7 2025-06-25T14:57:26.649553Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037921 maps to shardIdx: 72057594046644480:34 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 12], pathId map=TuplePrimaryDescending, is column=0, is olap=0, RowCount 7, DataSize 1240 2025-06-25T14:57:26.649561Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037921, followerId 0 2025-06-25T14:57:26.649594Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:34 with partCount# 0, rowCount# 7, searchHeight# 1, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-25T14:57:26.649614Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037921 2025-06-25T14:57:26.649632Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 12 shard idx 72057594046644480:33 data size 928 row count 4 2025-06-25T14:57:26.649659Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037920 maps to shardIdx: 72057594046644480:33 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 12], pathId map=TuplePrimaryDescending, is column=0, is olap=0, RowCount 4, DataSize 928 2025-06-25T14:57:26.649669Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037920, followerId 0 2025-06-25T14:57:26.649708Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:33 with partCount# 0, rowCount# 4, searchHeight# 1, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-25T14:57:26.649727Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037920 2025-06-25T14:57:26.649778Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:57:26.650913Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [3:7519901184003816938:2149]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-06-25T14:57:26.650939Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5131: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-06-25T14:57:26.650956Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046644480, queue size# 0 >> GenericFederatedQuery::YdbManagedSelectAll >> GenericFederatedQuery::IcebergHiveTokenSelectAll >> GenericFederatedQuery::IcebergHadoopBasicSelectAll >> GenericFederatedQuery::PostgreSQLOnPremSelectAll >> GenericFederatedQuery::IcebergHiveBasicSelectAll >> GenericFederatedQuery::IcebergHadoopSaSelectAll >> GenericFederatedQuery::YdbFilterPushdown >> GenericFederatedQuery::IcebergHiveSaSelectAll >> KqpIndexes::JoinWithNonPKColumnsInPredicate-UseStreamJoin [GOOD] >> KqpIndexes::UpsertWithoutExtraNullDelete-UseSink [GOOD] >> KqpMultishardIndex::CheckPushTopSort >> KqpIndexes::UniqAndNoUniqSecondaryIndex [GOOD] >> KqpIndexes::UniqAndNoUniqSecondaryIndexWithCover >> GenericFederatedQuery::IcebergHadoopTokenSelectAll ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpIndexes::JoinWithNonPKColumnsInPredicate-UseStreamJoin [GOOD] Test command err: Trying to start YDB, gRPC: 25112, MsgBus: 15993 2025-06-25T14:56:50.365557Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901087490911536:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:56:50.397214Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001c12/r3tmp/tmpn9oDFC/pdisk_1.dat 2025-06-25T14:56:50.950802Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519901087490911506:2080] 1750863410345639 != 1750863410345642 2025-06-25T14:56:50.979195Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:56:50.979725Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:56:50.979842Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:56:50.983772Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 25112, node 1 2025-06-25T14:56:51.140733Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:56:51.140753Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:56:51.140759Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:56:51.140845Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:15993 2025-06-25T14:56:51.400461Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:15993 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:56:51.826364Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:56:51.856091Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:51.995501Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:52.154801Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:52.219607Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:53.789266Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901100375815030:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:53.789396Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:54.052467Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:54.127030Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:54.165319Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:54.203578Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:54.242303Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:54.313533Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:54.381701Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:54.462173Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901104670782997:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:54.462242Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:54.462351Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901104670783002:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:54.465865Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:56:54.480217Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519901104670783004:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:56:54.535535Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519901104670783055:3424] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:56:55.354023Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519901087490911536:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:56:55.392107Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:56:55.491220Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877761, Sender [1:7519901108965750659:3608], Recipient [1:7519901087490911894:2184]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:56:55.491276Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5052: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T14:56:55.491287Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5837: Pipe server connected, at tablet: ... hard 72075186224037933 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 31] state 'Ready' dataSize 0 rowCount 0 cpuUsage 0 2025-06-25T14:57:29.036100Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:570: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037933 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 31] raw table stats: DataSize: 0 RowCount: 0 IndexSize: 0 InMemSize: 0 LastAccessTime: 1750863446702 LastUpdateTime: 1750863444368 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 10 RowDeletes: 0 RowReads: 13 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 0 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 1 LocksWholeShard: 0 LocksBroken: 1 2025-06-25T14:57:29.036204Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269553162, Sender [3:7519901211493096147:2523], Recipient [3:7519901185723289502:2157]: NKikimrTxDataShard.TEvPeriodicTableStats DatashardId: 72075186224037932 TableLocalId: 33 Generation: 1 Round: 0 TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 InMemSize: 0 LastAccessTime: 1750863446700 LastUpdateTime: 1750863444370 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 10 RowDeletes: 0 RowReads: 0 RangeReads: 1 PartCount: 0 RangeReadRows: 3 SearchHeight: 0 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { Memory: 82488 } ShardState: 2 UserTablePartOwners: 72075186224037932 NodeId: 3 StartTime: 1750863439003 TableOwnerId: 72057594046644480 FollowerId: 0 2025-06-25T14:57:29.036220Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4992: StateWork, processing event TEvDataShard::TEvPeriodicTableStats 2025-06-25T14:57:29.036231Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037932 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 33] state 'Ready' dataSize 0 rowCount 0 cpuUsage 0 2025-06-25T14:57:29.036288Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:570: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037932 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 33] raw table stats: DataSize: 0 RowCount: 0 IndexSize: 0 InMemSize: 0 LastAccessTime: 1750863446700 LastUpdateTime: 1750863444370 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 10 RowDeletes: 0 RowReads: 0 RangeReads: 1 PartCount: 0 RangeReadRows: 3 SearchHeight: 0 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 2025-06-25T14:57:29.043662Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877761, Sender [3:7519901254442770686:4669], Recipient [3:7519901185723289502:2157]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:57:29.043690Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5052: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T14:57:29.043712Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5837: Pipe server connected, at tablet: 72057594046644480 2025-06-25T14:57:29.043954Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269553162, Sender [3:7519901211493096129:2521], Recipient [3:7519901185723289502:2157]: NKikimrTxDataShard.TEvPeriodicTableStats DatashardId: 72075186224037931 TableLocalId: 35 Generation: 1 Round: 0 TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 InMemSize: 0 LastAccessTime: 1750863444367 LastUpdateTime: 1750863444367 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 10 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 0 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { Memory: 82488 } ShardState: 2 UserTablePartOwners: 72075186224037931 NodeId: 3 StartTime: 1750863439002 TableOwnerId: 72057594046644480 FollowerId: 0 2025-06-25T14:57:29.043963Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4992: StateWork, processing event TEvDataShard::TEvPeriodicTableStats 2025-06-25T14:57:29.043979Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037931 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 35] state 'Ready' dataSize 0 rowCount 0 cpuUsage 0 2025-06-25T14:57:29.044024Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:570: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037931 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 35] raw table stats: DataSize: 0 RowCount: 0 IndexSize: 0 InMemSize: 0 LastAccessTime: 1750863444367 LastUpdateTime: 1750863444367 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 10 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 0 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 2025-06-25T14:57:29.138766Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [3:7519901185723289502:2157]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-06-25T14:57:29.138791Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5131: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-06-25T14:57:29.138813Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046644480, queue size# 4 2025-06-25T14:57:29.138862Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:601: Will execute TTxStoreStats, queue# 4 2025-06-25T14:57:29.138875Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:610: Will delay TTxStoreTableStats on# 0.000000s, queue# 4 2025-06-25T14:57:29.138918Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 37 shard idx 72057594046644480:46 data size 0 row count 0 2025-06-25T14:57:29.138965Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037930 maps to shardIdx: 72057594046644480:46 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 37], pathId map=indexImplTable, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-25T14:57:29.138975Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037930, followerId 0 2025-06-25T14:57:29.139027Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:46 with partCount# 0, rowCount# 0, searchHeight# 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-25T14:57:29.139067Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037930 2025-06-25T14:57:29.139089Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 31 shard idx 72057594046644480:43 data size 0 row count 0 2025-06-25T14:57:29.139114Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037933 maps to shardIdx: 72057594046644480:43 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 31], pathId map=tab4, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-25T14:57:29.139120Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037933, followerId 0 2025-06-25T14:57:29.139144Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:43 with partCount# 0, rowCount# 0, searchHeight# 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-25T14:57:29.139154Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037933 2025-06-25T14:57:29.139166Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 33 shard idx 72057594046644480:44 data size 0 row count 0 2025-06-25T14:57:29.139198Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037932 maps to shardIdx: 72057594046644480:44 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 33], pathId map=indexImplTable, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-25T14:57:29.139208Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037932, followerId 0 2025-06-25T14:57:29.139237Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:44 with partCount# 0, rowCount# 0, searchHeight# 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-25T14:57:29.139255Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037932 2025-06-25T14:57:29.139271Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 35 shard idx 72057594046644480:45 data size 0 row count 0 2025-06-25T14:57:29.139293Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037931 maps to shardIdx: 72057594046644480:45 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 35], pathId map=indexImplTable, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-25T14:57:29.139300Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037931, followerId 0 2025-06-25T14:57:29.139321Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:45 with partCount# 0, rowCount# 0, searchHeight# 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-25T14:57:29.139333Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037931 2025-06-25T14:57:29.139367Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:57:29.139451Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [3:7519901185723289502:2157]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-06-25T14:57:29.139462Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5131: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-06-25T14:57:29.139474Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046644480, queue size# 0 >> KqpIndexes::SecondaryIndexWithPrimaryKeySameComulns-UseSink [GOOD] >> KqpIndexes::DeleteByIndex [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpIndexes::SecondaryIndexWithPrimaryKeySameComulns-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 2357, MsgBus: 9555 2025-06-25T14:57:03.079555Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901145061647334:2144];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001bf3/r3tmp/tmpNdpnO7/pdisk_1.dat 2025-06-25T14:57:03.338011Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:57:03.520871Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:57:03.537215Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:57:03.537304Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:57:03.540079Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 2357, node 1 2025-06-25T14:57:03.768823Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:57:03.768849Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:57:03.768859Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:57:03.768965Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:57:04.079996Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:9555 TClient is connected to server localhost:9555 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:57:04.554063Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:57:04.572005Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:04.734555Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:04.947066Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:05.036004Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:06.604890Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901157946550724:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:06.605006Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:06.924758Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:07.004769Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:07.071014Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:07.106829Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:07.141950Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:07.190329Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:07.236980Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:07.324805Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901162241518685:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:07.324897Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:07.326558Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901162241518690:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:07.331764Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:57:07.346025Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519901162241518692:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:57:07.438655Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519901162241518745:3417] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:57:08.064532Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519901145061647334:2144];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:57:08.078329Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:57:08.393205Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877761, Sender [1:7519901166536486316:3593], Recipient [1:7519901145061647541:2147]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:57:08.393249Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5052: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T14:57:08.393263Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5837: Pipe server connected, at tablet: 72057594046644480 2025-06-25T14:57:08.393297Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271122432, Sender [1:7519901166536486312:3590], Recipient [1:75199011450 ... d: 72057594046644480, LocalPathId: 5], pathId map=BatchUpload, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-25T14:57:30.131084Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037908, followerId 0 2025-06-25T14:57:30.131109Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:21 with partCount# 0, rowCount# 0, searchHeight# 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-25T14:57:30.131120Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037908 2025-06-25T14:57:30.131135Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 5 shard idx 72057594046644480:22 data size 0 row count 0 2025-06-25T14:57:30.131160Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037909 maps to shardIdx: 72057594046644480:22 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 5], pathId map=BatchUpload, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-25T14:57:30.131168Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037909, followerId 0 2025-06-25T14:57:30.131190Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:22 with partCount# 0, rowCount# 0, searchHeight# 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-25T14:57:30.131198Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037909 2025-06-25T14:57:30.131213Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 5 shard idx 72057594046644480:16 data size 0 row count 0 2025-06-25T14:57:30.131236Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037903 maps to shardIdx: 72057594046644480:16 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 5], pathId map=BatchUpload, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-25T14:57:30.131244Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037903, followerId 0 2025-06-25T14:57:30.131264Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:16 with partCount# 0, rowCount# 0, searchHeight# 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-25T14:57:30.131273Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037903 2025-06-25T14:57:30.131291Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 5 shard idx 72057594046644480:15 data size 0 row count 0 2025-06-25T14:57:30.131318Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037902 maps to shardIdx: 72057594046644480:15 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 5], pathId map=BatchUpload, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-25T14:57:30.131331Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037902, followerId 0 2025-06-25T14:57:30.131354Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:15 with partCount# 0, rowCount# 0, searchHeight# 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-25T14:57:30.131366Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037902 2025-06-25T14:57:30.131379Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 5 shard idx 72057594046644480:18 data size 0 row count 0 2025-06-25T14:57:30.131406Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037905 maps to shardIdx: 72057594046644480:18 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 5], pathId map=BatchUpload, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-25T14:57:30.131415Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037905, followerId 0 2025-06-25T14:57:30.131433Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:18 with partCount# 0, rowCount# 0, searchHeight# 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-25T14:57:30.131448Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037905 2025-06-25T14:57:30.131464Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 5 shard idx 72057594046644480:14 data size 0 row count 0 2025-06-25T14:57:30.131488Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037901 maps to shardIdx: 72057594046644480:14 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 5], pathId map=BatchUpload, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-25T14:57:30.131495Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037901, followerId 0 2025-06-25T14:57:30.131536Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:14 with partCount# 0, rowCount# 0, searchHeight# 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-25T14:57:30.131555Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037901 2025-06-25T14:57:30.131575Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 5 shard idx 72057594046644480:20 data size 0 row count 0 2025-06-25T14:57:30.131601Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037907 maps to shardIdx: 72057594046644480:20 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 5], pathId map=BatchUpload, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-25T14:57:30.131608Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037907, followerId 0 2025-06-25T14:57:30.131635Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:20 with partCount# 0, rowCount# 0, searchHeight# 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-25T14:57:30.131644Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037907 2025-06-25T14:57:30.131658Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 5 shard idx 72057594046644480:23 data size 0 row count 0 2025-06-25T14:57:30.131682Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037910 maps to shardIdx: 72057594046644480:23 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 5], pathId map=BatchUpload, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-25T14:57:30.131688Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037910, followerId 0 2025-06-25T14:57:30.131711Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:23 with partCount# 0, rowCount# 0, searchHeight# 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-25T14:57:30.131719Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037910 2025-06-25T14:57:30.131733Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 5 shard idx 72057594046644480:17 data size 0 row count 0 2025-06-25T14:57:30.131751Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037904 maps to shardIdx: 72057594046644480:17 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 5], pathId map=BatchUpload, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-25T14:57:30.131758Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037904, followerId 0 2025-06-25T14:57:30.131776Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:17 with partCount# 0, rowCount# 0, searchHeight# 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-25T14:57:30.131784Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037904 2025-06-25T14:57:30.131828Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:57:30.131940Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [3:7519901208348687155:2146]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-06-25T14:57:30.131967Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5131: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-06-25T14:57:30.131984Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046644480, queue size# 0 2025-06-25T14:57:30.458464Z node 3 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-25T14:57:30.487998Z node 3 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-25T14:57:30.756406Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:7519901208348687155:2146]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:57:30.756444Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:57:30.756502Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [3:7519901208348687155:2146], Recipient [3:7519901208348687155:2146]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:57:30.756517Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:57:30.817396Z node 3 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill >> KqpIndexes::DuplicateUpsertInterleaveParams-UseSink [GOOD] >> KqpIndexes::SecondaryIndexWithPrimaryKeySameComulns+UseSink [GOOD] >> KqpIndexes::SecondaryIndexUsingInJoin2+UseStreamJoin >> GenericFederatedQuery::ClickHouseManagedSelectAll ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpIndexes::DeleteByIndex [GOOD] Test command err: Trying to start YDB, gRPC: 11470, MsgBus: 13366 2025-06-25T14:57:12.290379Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901184159971790:2217];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:57:12.306464Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001b91/r3tmp/tmpTVLfYS/pdisk_1.dat 2025-06-25T14:57:12.768437Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519901184159971613:2080] 1750863432263854 != 1750863432263857 2025-06-25T14:57:12.775783Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:57:12.785038Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:57:12.785117Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:57:12.805407Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 11470, node 1 2025-06-25T14:57:12.960918Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:57:12.960943Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:57:12.960952Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:57:12.961059Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13366 2025-06-25T14:57:13.296014Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:13366 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:57:13.672343Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:57:13.689973Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:57:13.702637Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:13.819253Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:13.982578Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:14.043878Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:15.674897Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901197044875125:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:15.674999Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:16.010183Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:16.078796Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:16.125330Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:16.156195Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:16.189458Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:16.251856Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:16.294347Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:16.372427Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901201339843084:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:16.372521Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:16.372773Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901201339843089:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:16.378348Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:57:16.390827Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519901201339843091:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:57:16.459201Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519901201339843142:3424] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:57:17.291715Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519901184159971790:2217];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:57:17.291797Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:57:17.540463Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/ ... CHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877761, Sender [3:7519901261047036540:3665], Recipient [3:7519901239572197654:2146]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:57:30.478041Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5052: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T14:57:30.478050Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5837: Pipe server connected, at tablet: 72057594046644480 2025-06-25T14:57:30.478133Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269551620, Sender [3:7519901261047036466:2480], Recipient [3:7519901239572197654:2146]: NKikimrTxDataShard.TEvSchemaChanged Source { RawX1: 7519901261047036466 RawX2: 4503612512274864 } Origin: 72075186224037923 State: 2 TxId: 281474976715672 Step: 0 Generation: 1 2025-06-25T14:57:30.478152Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4987: StateWork, processing event TEvDataShard::TEvSchemaChanged 2025-06-25T14:57:30.478197Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5596: Handle TEvSchemaChanged, tabletId: 72057594046644480, at schemeshard: 72057594046644480, message: Source { RawX1: 7519901261047036466 RawX2: 4503612512274864 } Origin: 72075186224037923 State: 2 TxId: 281474976715672 Step: 0 Generation: 1 2025-06-25T14:57:30.478216Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1791: TOperation FindRelatedPartByTabletId, TxId: 281474976715672, tablet: 72075186224037923, partId: 2 2025-06-25T14:57:30.478297Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:632: TTxOperationReply execute, operationId: 281474976715672:2, at schemeshard: 72057594046644480, message: Source { RawX1: 7519901261047036466 RawX2: 4503612512274864 } Origin: 72075186224037923 State: 2 TxId: 281474976715672 Step: 0 Generation: 1 2025-06-25T14:57:30.478324Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1047: NTableState::TProposedWaitParts operationId# 281474976715672:2 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046644480 2025-06-25T14:57:30.478387Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1051: NTableState::TProposedWaitParts operationId# 281474976715672:2 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046644480 message: Source { RawX1: 7519901261047036466 RawX2: 4503612512274864 } Origin: 72075186224037923 State: 2 TxId: 281474976715672 Step: 0 Generation: 1 2025-06-25T14:57:30.478438Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:670: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 281474976715672:2, shardIdx: 72057594046644480:36, shard: 72075186224037923, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046644480 2025-06-25T14:57:30.478453Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:707: all shard schema changes has been received, operationId: 281474976715672:2, at schemeshard: 72057594046644480 2025-06-25T14:57:30.478466Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:719: send schema changes ack message, operation: 281474976715672:2, datashard: 72075186224037923, at schemeshard: 72057594046644480 2025-06-25T14:57:30.478491Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 281474976715672:2 129 -> 240 2025-06-25T14:57:30.478592Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-25T14:57:30.478848Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 281474976715672:2, at schemeshard: 72057594046644480 2025-06-25T14:57:30.478863Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:57:30.478873Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:276: Activate send for 281474976715672:2 2025-06-25T14:57:30.478914Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:632: Send to actor: [3:7519901261047036466:2480] msg type: 269552132 msg: NKikimrTxDataShard.TEvSchemaChangedResult TxId: 281474976715672 at schemeshard: 72057594046644480 2025-06-25T14:57:30.478989Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 2146435072, Sender [3:7519901239572197654:2146], Recipient [3:7519901239572197654:2146]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-25T14:57:30.479006Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4972: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-25T14:57:30.479033Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 281474976715672:2, at schemeshard: 72057594046644480 2025-06-25T14:57:30.479048Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046644480] TDone opId# 281474976715672:2 ProgressState 2025-06-25T14:57:30.479127Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-25T14:57:30.479144Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976715672:2 progress is 3/3 2025-06-25T14:57:30.479153Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976715672 ready parts: 3/3 2025-06-25T14:57:30.479167Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976715672:2 progress is 3/3 2025-06-25T14:57:30.479175Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976715672 ready parts: 3/3 2025-06-25T14:57:30.479186Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 281474976715672, ready parts: 3/3, is published: true 2025-06-25T14:57:30.479225Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1656: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [3:7519901261047036437:2477] message: TxId: 281474976715672 2025-06-25T14:57:30.479255Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976715672 ready parts: 3/3 2025-06-25T14:57:30.479284Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976715672:0 2025-06-25T14:57:30.479301Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 281474976715672:0 2025-06-25T14:57:30.479407Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 17] was 4 2025-06-25T14:57:30.479426Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976715672:1 2025-06-25T14:57:30.479433Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 281474976715672:1 2025-06-25T14:57:30.479449Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 18] was 3 2025-06-25T14:57:30.479455Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976715672:2 2025-06-25T14:57:30.479460Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 281474976715672:2 2025-06-25T14:57:30.479488Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 19] was 3 2025-06-25T14:57:30.480507Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877764, Sender [3:7519901261047036537:3662], Recipient [3:7519901239572197654:2146]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-25T14:57:30.480527Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5053: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-25T14:57:30.480536Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5885: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-25T14:57:30.480853Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:57:30.480919Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:632: Send to actor: [3:7519901261047036437:2477] msg type: 271124998 msg: NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976715672 at schemeshard: 72057594046644480 2025-06-25T14:57:30.482356Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877764, Sender [3:7519901261047036449:3600], Recipient [3:7519901239572197654:2146]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-25T14:57:30.482371Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5053: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-25T14:57:30.482379Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5885: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-25T14:57:30.484428Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877764, Sender [3:7519901261047036540:3665], Recipient [3:7519901239572197654:2146]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-25T14:57:30.484445Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5053: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-25T14:57:30.484452Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5885: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-25T14:57:31.441939Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:7519901239572197654:2146]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:57:31.441973Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:57:31.442010Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [3:7519901239572197654:2146], Recipient [3:7519901239572197654:2146]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:57:31.442026Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:57:32.442427Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:7519901239572197654:2146]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:57:32.442467Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:57:32.442523Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [3:7519901239572197654:2146], Recipient [3:7519901239572197654:2146]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:57:32.442538Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime >> KqpIndexes::DoUpsertWithoutIndexUpdate+UniqIndex+UseSink [GOOD] >> KqpIndexes::DirectAccessToIndexImplTable ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpIndexes::DuplicateUpsertInterleaveParams-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 4593, MsgBus: 2611 2025-06-25T14:57:12.324347Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901181876182303:2154];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:57:12.326570Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001b64/r3tmp/tmpYLNIBT/pdisk_1.dat 2025-06-25T14:57:12.836997Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 4593, node 1 2025-06-25T14:57:12.852098Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:57:12.852183Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:57:12.857261Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:57:12.908587Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:57:12.908607Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:57:12.908614Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:57:12.908757Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2611 2025-06-25T14:57:13.326839Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:2611 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:57:13.549060Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:57:13.560150Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:57:13.567802Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:13.702128Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:13.847047Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:13.916886Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:15.530331Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901194761085713:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:15.530436Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:15.849340Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:15.882466Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:15.928635Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:16.000664Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:16.073579Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:16.118875Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:16.159353Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:16.233721Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901199056053670:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:16.233797Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:16.234007Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901199056053675:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:16.237826Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:57:16.248744Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519901199056053677:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:57:16.347731Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519901199056053728:3421] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:57:17.328442Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519901181876182303:2154];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:57:17.328511Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:57:17.502939Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877761, Sender [1:7519901203351021303:3599], Recipient [1:7519901181876182522:2150]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:57:17.502985Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5052: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T14:57:17.502998Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5837: Pipe server connected, at tablet: 72057594046644480 ... te: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046644480 2025-06-25T14:57:31.636981Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:707: all shard schema changes has been received, operationId: 281474976715672:0, at schemeshard: 72057594046644480 2025-06-25T14:57:31.636990Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:719: send schema changes ack message, operation: 281474976715672:0, datashard: 72075186224037922, at schemeshard: 72057594046644480 2025-06-25T14:57:31.637003Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 281474976715672:0 129 -> 240 2025-06-25T14:57:31.637079Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-25T14:57:31.637216Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 281474976715672:0, at schemeshard: 72057594046644480 2025-06-25T14:57:31.637226Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:57:31.637289Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 281474976715672:2, at schemeshard: 72057594046644480 2025-06-25T14:57:31.637295Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:57:31.637340Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 281474976715672:2, at schemeshard: 72057594046644480 2025-06-25T14:57:31.637347Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:57:31.637358Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:276: Activate send for 281474976715672:2 2025-06-25T14:57:31.637404Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:632: Send to actor: [3:7519901262186157795:2469] msg type: 269552132 msg: NKikimrTxDataShard.TEvSchemaChangedResult TxId: 281474976715672 at schemeshard: 72057594046644480 2025-06-25T14:57:31.637476Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 281474976715672:0, at schemeshard: 72057594046644480 2025-06-25T14:57:31.637484Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:57:31.637491Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:276: Activate send for 281474976715672:0 2025-06-25T14:57:31.637519Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:632: Send to actor: [3:7519901262186157793:2468] msg type: 269552132 msg: NKikimrTxDataShard.TEvSchemaChangedResult TxId: 281474976715672 at schemeshard: 72057594046644480 2025-06-25T14:57:31.637578Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 2146435072, Sender [3:7519901240711319003:2142], Recipient [3:7519901240711319003:2142]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-25T14:57:31.637595Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4972: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-25T14:57:31.637631Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 281474976715672:2, at schemeshard: 72057594046644480 2025-06-25T14:57:31.637647Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046644480] TDone opId# 281474976715672:2 ProgressState 2025-06-25T14:57:31.637718Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-25T14:57:31.637730Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976715672:2 progress is 2/3 2025-06-25T14:57:31.637742Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976715672 ready parts: 2/3 2025-06-25T14:57:31.637754Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976715672:2 progress is 2/3 2025-06-25T14:57:31.637763Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976715672 ready parts: 2/3 2025-06-25T14:57:31.637777Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 281474976715672, ready parts: 2/3, is published: true 2025-06-25T14:57:31.637924Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 2146435072, Sender [3:7519901240711319003:2142], Recipient [3:7519901240711319003:2142]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-25T14:57:31.637936Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4972: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-25T14:57:31.637959Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 281474976715672:0, at schemeshard: 72057594046644480 2025-06-25T14:57:31.637971Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046644480] TDone opId# 281474976715672:0 ProgressState 2025-06-25T14:57:31.638012Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-25T14:57:31.638019Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976715672:0 progress is 3/3 2025-06-25T14:57:31.638026Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976715672 ready parts: 3/3 2025-06-25T14:57:31.638040Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976715672:0 progress is 3/3 2025-06-25T14:57:31.638046Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976715672 ready parts: 3/3 2025-06-25T14:57:31.638056Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 281474976715672, ready parts: 3/3, is published: true 2025-06-25T14:57:31.638091Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1656: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [3:7519901262186157764:2466] message: TxId: 281474976715672 2025-06-25T14:57:31.638107Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976715672 ready parts: 3/3 2025-06-25T14:57:31.638128Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976715672:0 2025-06-25T14:57:31.638138Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 281474976715672:0 2025-06-25T14:57:31.638234Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 17] was 4 2025-06-25T14:57:31.638248Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976715672:1 2025-06-25T14:57:31.638254Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 281474976715672:1 2025-06-25T14:57:31.638267Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 18] was 3 2025-06-25T14:57:31.638275Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976715672:2 2025-06-25T14:57:31.638282Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 281474976715672:2 2025-06-25T14:57:31.638308Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 19] was 3 2025-06-25T14:57:31.638556Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:57:31.638763Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:57:31.638822Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:632: Send to actor: [3:7519901262186157764:2466] msg type: 271124998 msg: NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976715672 at schemeshard: 72057594046644480 2025-06-25T14:57:31.639000Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877764, Sender [3:7519901262186157866:3666], Recipient [3:7519901240711319003:2142]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-25T14:57:31.639018Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5053: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-25T14:57:31.639032Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5885: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-25T14:57:31.639174Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877764, Sender [3:7519901262186157865:3665], Recipient [3:7519901240711319003:2142]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-25T14:57:31.639184Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5053: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-25T14:57:31.639192Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5885: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-25T14:57:31.639368Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877764, Sender [3:7519901262186157775:3601], Recipient [3:7519901240711319003:2142]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-25T14:57:31.639380Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5053: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-25T14:57:31.639387Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5885: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-25T14:57:32.248469Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:7519901240711319003:2142]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:57:32.248513Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:57:32.248557Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [3:7519901240711319003:2142], Recipient [3:7519901240711319003:2142]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:57:32.248573Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:57:32.852568Z node 3 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill >> KqpUniqueIndex::UpdateOnHidenChanges-DataColumn [GOOD] >> KqpIndexes::SelectFromIndexesAndFreeSpaceLogicDoesntTimeout [GOOD] >> KqpIndexes::Uint8Index >> KqpIndexes::SecondaryIndexOrderBy [GOOD] >> KqpIndexes::SecondaryIndexInsert1 |89.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/olap/ttl_tiering/py3test >> data_correctness.py::TestDataCorrectness::test [GOOD] >> KqpMultishardIndex::WriteIntoRenamingSyncIndex [FAIL] >> KqpMultishardIndex::WriteIntoRenamingAsyncIndex >> KqpIndexes::UpsertNoIndexColumns [GOOD] >> KqpIndexes::UpdateOnReadColumns ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpUniqueIndex::UpdateOnHidenChanges-DataColumn [GOOD] Test command err: Trying to start YDB, gRPC: 22293, MsgBus: 4576 2025-06-25T14:57:18.301478Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901209494691787:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:57:18.301512Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001ac0/r3tmp/tmpL63qFw/pdisk_1.dat 2025-06-25T14:57:18.858611Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:57:18.858691Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:57:18.860425Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:57:18.864347Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519901209494691768:2080] 1750863438284131 != 1750863438284134 2025-06-25T14:57:18.875160Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 22293, node 1 2025-06-25T14:57:18.996836Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:57:18.996860Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:57:18.996870Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:57:18.996971Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4576 2025-06-25T14:57:19.342907Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:4576 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:57:19.732714Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:57:19.754152Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:19.920459Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:20.087831Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:57:20.163374Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:21.714159Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901222379595289:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:21.714259Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:21.973763Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:22.044998Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:22.116353Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:22.189344Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:22.259799Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:22.333341Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:22.370507Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:22.440886Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901226674563255:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:22.440992Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:22.444527Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901226674563260:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:22.448479Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:57:22.463466Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710669, at schemeshard: 72057594046644480 2025-06-25T14:57:22.463719Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519901226674563262:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:57:22.538340Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519901226674563313:3418] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:57:23.304434Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519901209494691787:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:57:23.316321Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:57:23.543042Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_wo ... T14:57:27.213304Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:57:27.214205Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519901246345179240:2080] 1750863447069521 != 1750863447069524 TServer::EnableGrpc on GrpcPort 14201, node 2 2025-06-25T14:57:27.249970Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:57:27.250058Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:57:27.251690Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:57:27.280402Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:57:27.280423Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:57:27.280448Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:57:27.280566Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23094 TClient is connected to server localhost:23094 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-25T14:57:27.756371Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:57:27.773712Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:27.835579Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:28.004057Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:28.074736Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:28.208724Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:57:30.273501Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519901259230082753:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:30.273574Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:30.342383Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:30.374668Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:30.407811Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:30.479732Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:30.517296Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:30.558757Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:30.607851Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:30.679414Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519901259230083409:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:30.679491Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:30.679712Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519901259230083414:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:30.682991Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:57:30.695353Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519901259230083416:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:57:30.796558Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519901259230083467:3422] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:57:31.719203Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:32.084197Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519901246345179277:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:57:32.118419Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:57:33.602483Z node 2 :KQP_EXECUTER ERROR: kqp_literal_executer.cpp:107: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: 01jyksg6m5a1w3zzwea52g5s2c, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=Y2NhYTNjN2UtNzhmMjNkNmYtYTg4MjMzODgtM2QxZjYzMWE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. TKqpLiteralExecuter, TKqpEnsure failed. 2025-06-25T14:57:33.602757Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=Y2NhYTNjN2UtNzhmMjNkNmYtYTg4MjMzODgtM2QxZjYzMWE=, ActorId: [2:7519901263525051780:2531], ActorState: ExecuteState, TraceId: 01jyksg6m5a1w3zzwea52g5s2c, Create QueryResponse for error on request, msg: >> KqpMultishardIndex::CheckPushTopSort [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpMultishardIndex::CheckPushTopSort [GOOD] Test command err: Trying to start YDB, gRPC: 19508, MsgBus: 20513 2025-06-25T14:57:15.390585Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901195019695330:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:57:15.390683Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001b4f/r3tmp/tmpnDRqWt/pdisk_1.dat 2025-06-25T14:57:15.870338Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519901195019695312:2080] 1750863435389582 != 1750863435389585 2025-06-25T14:57:15.871704Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 19508, node 1 2025-06-25T14:57:15.890280Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:57:15.890330Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:57:15.902660Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:57:16.044063Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:57:16.044089Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:57:16.044102Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:57:16.053630Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:20513 2025-06-25T14:57:16.420914Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:20513 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:57:16.700634Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:57:16.712704Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:57:16.728959Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:16.889389Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:17.048646Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:17.131196Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:18.897200Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901207904598840:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:18.897315Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:19.150992Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:19.189176Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:19.264706Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:19.296062Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:19.345514Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:19.378966Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:19.458512Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:19.557591Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901212199566809:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:19.557660Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:19.557885Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901212199566814:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:19.561855Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:57:19.582167Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519901212199566816:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:57:19.672454Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519901212199566867:3428] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:57:20.390853Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519901195019695330:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:57:20.390936Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:57:20.741056Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877761, Sender [1:7519901216494534439:3605], Recipient [1:7519901195019695639:2149]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:57:20.741095Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:50 ... fResponseTime 2025-06-25T14:57:29.352213Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:57:29.362210Z node 2 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill Trying to start YDB, gRPC: 32054, MsgBus: 13430 2025-06-25T14:57:30.175619Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519901257624015766:2146];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001b4f/r3tmp/tmpa7Il3P/pdisk_1.dat 2025-06-25T14:57:30.194510Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:57:30.255005Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 32054, node 3 2025-06-25T14:57:30.308961Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:57:30.309079Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:57:30.369978Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:57:30.380934Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:57:30.380956Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:57:30.380963Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:57:30.381065Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13430 TClient is connected to server localhost:13430 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:57:30.998254Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:57:31.015284Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:31.091915Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:31.216276Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:57:31.268438Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:31.357939Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:33.650849Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519901270508919152:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:33.650946Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:33.711139Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:33.788155Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:33.867916Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:33.902446Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:33.943299Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:34.024176Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:34.097498Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:34.194977Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519901274803887117:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:34.195077Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:34.195334Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519901274803887122:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:34.199164Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:57:34.216209Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519901274803887124:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:57:34.279274Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519901274803887177:3420] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:57:35.162451Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519901257624015766:2146];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:57:35.162546Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:57:35.423416Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... >> GenericFederatedQuery::YdbFilterPushdown [GOOD] >> GenericFederatedQuery::TestFailsOnIncorrectScriptExecutionOperationId1 >> KqpPrefixedVectorIndexes::OrderByCosineLevel1+Nullable-UseSimilarity [GOOD] >> KqpPrefixedVectorIndexes::OrderByCosineLevel1+Nullable+UseSimilarity |89.4%| [TM] {asan, default-linux-x86_64, release} ydb/services/persqueue_cluster_discovery/ut/unittest >> KqpVectorIndexes::OrderByCosineLevel2+Nullable-UseSimilarity [GOOD] >> KqpVectorIndexes::OrderByCosineLevel2-Nullable+UseSimilarity |89.4%| [TM] {asan, default-linux-x86_64, release} ydb/services/persqueue_cluster_discovery/ut/unittest >> TPQCDTest::TestUnavailableWithoutNetClassifier >> KqpIndexes::SecondaryIndexInsert1 [GOOD] >> TPQCDTest::TestUnavailableWithoutClustersList >> TPQCDTest::TestRelatedServicesAreRunning >> TPQCDTest::TestPrioritizeLocalDatacenter >> KqpIndexes::SecondaryIndexUsingInJoin2+UseStreamJoin [GOOD] >> KqpIndexes::SecondaryIndexUsingInJoin2-UseStreamJoin >> KqpIndexes::Uint8Index [GOOD] >> GenericFederatedQuery::IcebergHiveBasicSelectAll [GOOD] >> GenericFederatedQuery::IcebergHiveBasicSelectConstant >> GenericFederatedQuery::PostgreSQLOnPremSelectAll [GOOD] >> GenericFederatedQuery::PostgreSQLOnPremSelectConstant >> GenericFederatedQuery::IcebergHadoopSaSelectAll [GOOD] >> GenericFederatedQuery::IcebergHadoopSaSelectConstant >> GenericFederatedQuery::IcebergHiveTokenSelectAll [GOOD] >> GenericFederatedQuery::IcebergHiveTokenSelectConstant >> KqpVectorIndexes::OrderByCosineLevel1+Nullable+UseSimilarity [GOOD] >> KqpVectorIndexes::OrderByCosineDistanceNotNullableLevel3 >> GenericFederatedQuery::IcebergHiveSaSelectAll [GOOD] >> GenericFederatedQuery::IcebergHiveSaSelectConstant >> KqpPrefixedVectorIndexes::OrderByCosineLevel1-Nullable-UseSimilarity [GOOD] >> GenericFederatedQuery::IcebergHadoopBasicSelectAll [GOOD] >> GenericFederatedQuery::IcebergHadoopBasicSelectConstant >> KqpPrefixedVectorIndexes::OrderByCosineLevel1-Nullable+UseSimilarity >> GenericFederatedQuery::YdbManagedSelectAll [GOOD] >> GenericFederatedQuery::YdbManagedSelectConstant ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpIndexes::SecondaryIndexInsert1 [GOOD] Test command err: Trying to start YDB, gRPC: 32428, MsgBus: 29162 2025-06-25T14:57:16.933069Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901198723749221:2144];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:57:16.937772Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001ae4/r3tmp/tmpmyCWMF/pdisk_1.dat 2025-06-25T14:57:17.483934Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:57:17.504345Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:57:17.504414Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 32428, node 1 2025-06-25T14:57:17.505989Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:57:17.556986Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:57:17.557008Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:57:17.557013Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:57:17.557114Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:29162 2025-06-25T14:57:17.942412Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:29162 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:57:18.172175Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:57:18.188942Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:57:18.202998Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:18.350580Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:18.503388Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:18.590473Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:20.171790Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901215903619918:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:20.171900Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:20.486067Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:20.517974Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:20.554239Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:20.592006Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:20.620570Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:20.649546Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:20.722985Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:20.774623Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901215903620573:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:20.774680Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:20.774823Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901215903620578:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:20.778464Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:57:20.790659Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519901215903620580:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:57:20.885047Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519901215903620631:3419] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:57:21.893067Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:21.927371Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519901198723749221:2144];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:57:21.927420Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:57:21.942206Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok ... T14:57:39.608744Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-25T14:57:39.608829Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 274137603, Sender [3:7519901279150901201:2245], Recipient [3:7519901279150901036:2149]: NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046644480 Generation: 2 PathId: [OwnerId: 72057594046644480, LocalPathId: 8] Version: 2 } 2025-06-25T14:57:39.608841Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5039: StateWork, processing event NSchemeBoard::NSchemeshardEvents::TEvUpdateAck 2025-06-25T14:57:39.608867Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 8 Version: 2 PathOwnerId: 72057594046644480, cookie: 281474976715659 2025-06-25T14:57:39.608904Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 8 Version: 2 PathOwnerId: 72057594046644480, cookie: 281474976715659 2025-06-25T14:57:39.608910Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046644480, txId: 281474976715659 2025-06-25T14:57:39.608918Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046644480, txId: 281474976715659, pathId: [OwnerId: 72057594046644480, LocalPathId: 8], version: 2 2025-06-25T14:57:39.608926Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046644480, LocalPathId: 8] was 2 2025-06-25T14:57:39.608962Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046644480, txId: 281474976715659, subscribers: 1 2025-06-25T14:57:39.608986Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:212: TTxAckPublishToSchemeBoard Notify send TEvNotifyTxCompletionResult, at schemeshard: 72057594046644480, to actorId: [3:7519901296330770670:2304] 2025-06-25T14:57:39.609002Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-25T14:57:39.609086Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:57:39.609134Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:57:39.609196Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046644480, cookie: 281474976715659 2025-06-25T14:57:39.609205Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:57:39.609230Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046644480, cookie: 281474976715659 2025-06-25T14:57:39.609237Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:57:39.609258Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046644480, cookie: 281474976715659 2025-06-25T14:57:39.609262Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:57:39.609282Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046644480, cookie: 281474976715659 2025-06-25T14:57:39.609292Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:57:39.609321Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046644480, cookie: 281474976715659 2025-06-25T14:57:39.609326Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:57:39.609384Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:632: Send to actor: [3:7519901296330770670:2304] msg type: 271124998 msg: NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976715659 at schemeshard: 72057594046644480 2025-06-25T14:57:39.609537Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519901296330770670:2304], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-25T14:57:39.609624Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877764, Sender [3:7519901296330770706:2409], Recipient [3:7519901279150901036:2149]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-25T14:57:39.609648Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5053: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-25T14:57:39.609677Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5885: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-25T14:57:39.699718Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877761, Sender [3:7519901296330770725:2427], Recipient [3:7519901279150901036:2149]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:57:39.699752Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5052: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T14:57:39.699765Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5837: Pipe server connected, at tablet: 72057594046644480 2025-06-25T14:57:39.699819Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271122432, Sender [3:7519901296330770721:2424], Recipient [3:7519901279150901036:2149]: {TEvModifySchemeTransaction txid# 281474976715660 TabletId# 72057594046644480} 2025-06-25T14:57:39.699832Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4966: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-06-25T14:57:39.702224Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/Root/.metadata/workload_manager/pools" OperationType: ESchemeOpCreateResourcePool ModifyACL { Name: "default" DiffACL: "\n!\010\000\022\035\010\001\020\201\004\032\024all-users@well-known \003\n\031\010\000\022\025\010\001\020\201\004\032\014root@builtin \003" NewOwner: "metadata@system" } Internal: true CreateResourcePool { Name: "default" Properties { Properties { key: "concurrent_query_limit" value: "-1" } Properties { key: "database_load_cpu_threshold" value: "-1" } Properties { key: "query_cancel_after_seconds" value: "0" } Properties { key: "query_cpu_limit_percent_per_node" value: "-1" } Properties { key: "query_memory_limit_percent_per_node" value: "-1" } Properties { key: "queue_size" value: "-1" } Properties { key: "resource_weight" value: "-1" } Properties { key: "total_cpu_limit_percent_per_node" value: "-1" } } } } TxId: 281474976715660 TabletId: 72057594046644480 Owner: "metadata@system" UserToken: "***" PeerName: "" , at schemeshard: 72057594046644480 2025-06-25T14:57:39.702551Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_resource_pool.cpp:148: [72057594046644480] TCreateResourcePool Propose: opId# 281474976715660:0, path# /Root/.metadata/workload_manager/pools/default 2025-06-25T14:57:39.702713Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 281474976715660:1, propose status:StatusAlreadyExists, reason: Check failed: path: '/Root/.metadata/workload_manager/pools/default', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges), at schemeshard: 72057594046644480 2025-06-25T14:57:39.703190Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-25T14:57:39.703621Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 281474976715660, response: Status: StatusAlreadyExists Reason: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" TxId: 281474976715660 SchemeshardId: 72057594046644480 PathId: 8 PathCreateTxId: 281474976715659, at schemeshard: 72057594046644480 2025-06-25T14:57:39.703934Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976715660, database: /Root, subject: metadata@system, status: StatusAlreadyExists, reason: Check failed: path: '/Root/.metadata/workload_manager/pools/default', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges), operation: CREATE RESOURCE POOL, path: default, set owner:metadata@system, add access: +(SR|DS):all-users@well-known, add access: +(SR|DS):root@builtin 2025-06-25T14:57:39.703955Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:57:39.704126Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519901296330770721:2424] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:57:39.704304Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877764, Sender [3:7519901296330770725:2427], Recipient [3:7519901279150901036:2149]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-25T14:57:39.704338Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5053: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-25T14:57:39.704351Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5885: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-25T14:57:39.964815Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:7519901279150901036:2149]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:57:39.964850Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:57:39.964897Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [3:7519901279150901036:2149], Recipient [3:7519901279150901036:2149]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:57:39.964913Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:57:40.138828Z node 3 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill >> KqpIndexes::UniqAndNoUniqSecondaryIndexWithCover [GOOD] >> TPQCDTest::TestDiscoverClusters >> TPQCDTest::TestUnavailableWithoutBoth >> GenericFederatedQuery::IcebergHadoopTokenSelectAll [GOOD] >> GenericFederatedQuery::IcebergHadoopTokenSelectConstant >> GenericFederatedQuery::TestFailsOnIncorrectScriptExecutionOperationId1 [GOOD] >> GenericFederatedQuery::TestFailsOnIncorrectScriptExecutionOperationId2 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpIndexes::Uint8Index [GOOD] Test command err: Trying to start YDB, gRPC: 27289, MsgBus: 28945 2025-06-25T14:57:21.063931Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901222386822016:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:57:21.071982Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001aa2/r3tmp/tmpwyGS6x/pdisk_1.dat 2025-06-25T14:57:21.496612Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519901222386821972:2080] 1750863441055937 != 1750863441055940 2025-06-25T14:57:21.511283Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 27289, node 1 2025-06-25T14:57:21.513306Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:57:21.513386Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:57:21.516766Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:57:21.576951Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:57:21.576975Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:57:21.576981Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:57:21.577107Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28945 TClient is connected to server localhost:28945 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-25T14:57:22.080478Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:57:22.138187Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:57:22.148998Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:57:22.167023Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:57:22.313067Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:22.461558Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:22.550878Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:24.032556Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901235271725505:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:24.032705Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:24.326267Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:24.361400Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:24.399110Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:24.424562Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:24.451220Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:24.486996Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:24.555293Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:24.640547Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901235271726170:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:24.640629Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:24.640649Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901235271726175:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:24.644260Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:57:24.657629Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519901235271726177:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:57:24.743494Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519901235271726228:3418] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:57:25.756695Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:26.021808Z node 1 :KQP_EXECUTER ERROR: kqp_data_executer.cpp:2020: ActorId: [1:7519901243861661275:2473] TxId: 281474976710673. Ctx: { TraceId: 01jyksg001efjah5zjs8x61f9s, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OGNlZWZlNmMtYWQ1MGZmMWUtMTYyOTRmNDctNDc2MTEwZmU=, CurrentExecutionId: , Custo ... d_import.cpp:305: Table profiles were not loaded 2025-06-25T14:57:35.719998Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519901282298400432:2080] 1750863455513021 != 1750863455513024 2025-06-25T14:57:35.736600Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 5681, node 3 2025-06-25T14:57:35.807855Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:57:35.807875Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:57:35.807885Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:57:35.807995Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19501 TClient is connected to server localhost:19501 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:57:36.429771Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:57:36.448708Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:57:36.466262Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:36.580327Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:57:36.625060Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:36.792076Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:36.864983Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:39.212050Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519901299478271245:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:39.212154Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:39.276290Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:39.317709Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:39.363630Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:39.407787Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:39.471356Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:39.511079Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:39.586924Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:39.687524Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519901299478271909:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:39.687617Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:39.687992Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519901299478271914:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:39.693488Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:57:39.711143Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519901299478271916:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:57:39.782478Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519901299478271967:3417] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:57:40.514946Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519901282298400453:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:57:40.515016Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:57:40.914782Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:41.171689Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:41.227689Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710759:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:383) >> KqpMultishardIndex::DataColumnWrite+UseSink [GOOD] >> KqpMultishardIndex::DataColumnSelect ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpIndexes::UniqAndNoUniqSecondaryIndexWithCover [GOOD] Test command err: Trying to start YDB, gRPC: 20649, MsgBus: 9700 2025-06-25T14:57:05.636858Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901150559769958:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:57:05.636987Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001bec/r3tmp/tmpKUiNoR/pdisk_1.dat 2025-06-25T14:57:06.079651Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:57:06.097073Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:57:06.097148Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:57:06.102553Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 20649, node 1 2025-06-25T14:57:06.261100Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:57:06.261122Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:57:06.261129Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:57:06.261247Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9700 2025-06-25T14:57:06.652546Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:9700 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:57:06.979226Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:57:07.001731Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:57:07.016022Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:07.156975Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:07.323985Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:07.414725Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:09.291951Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901167739640747:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:09.292067Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:09.586386Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:09.629524Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:09.663207Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:09.700522Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:09.757269Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:09.806552Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:09.857772Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:09.927686Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901167739641408:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:09.927761Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:09.927968Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901167739641413:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:09.931841Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:57:09.947226Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519901167739641415:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:57:10.044977Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519901172034608762:3421] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:57:10.636780Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519901150559769958:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:57:10.636843Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:57:11.200159Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877761, Sender [1:7519901176329576334:3598], Recipient [1:7519901150559770244:2146]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:57:11.200193Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5052: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T14:57:11.200204Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5837: Pipe server connected, at tablet: 7205759404664448 ... 6224037895 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 3] raw table stats: DataSize: 800 RowCount: 3 IndexSize: 0 InMemSize: 800 LastAccessTime: 1750863456090 LastUpdateTime: 1750863456090 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 3 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 1 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 2025-06-25T14:57:41.644291Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269553162, Sender [3:7519901264574628416:2289], Recipient [3:7519901260279660656:2153]: NKikimrTxDataShard.TEvPeriodicTableStats DatashardId: 72075186224037893 TableLocalId: 3 Generation: 1 Round: 0 TableStats { DataSize: 800 RowCount: 3 IndexSize: 0 InMemSize: 800 LastAccessTime: 1750863456089 LastUpdateTime: 1750863456089 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 3 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 1 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { Memory: 82488 } ShardState: 2 UserTablePartOwners: 72075186224037893 NodeId: 3 StartTime: 1750863451529 TableOwnerId: 72057594046644480 FollowerId: 0 2025-06-25T14:57:41.644302Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4992: StateWork, processing event TEvDataShard::TEvPeriodicTableStats 2025-06-25T14:57:41.644338Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037893 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 3] state 'Ready' dataSize 800 rowCount 3 cpuUsage 0 2025-06-25T14:57:41.644405Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:570: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037893 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 3] raw table stats: DataSize: 800 RowCount: 3 IndexSize: 0 InMemSize: 800 LastAccessTime: 1750863456089 LastUpdateTime: 1750863456089 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 3 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 1 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 2025-06-25T14:57:41.644527Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269553162, Sender [3:7519901264574628418:2290], Recipient [3:7519901260279660656:2153]: NKikimrTxDataShard.TEvPeriodicTableStats DatashardId: 72075186224037896 TableLocalId: 3 Generation: 1 Round: 0 TableStats { DataSize: 800 RowCount: 3 IndexSize: 0 InMemSize: 800 LastAccessTime: 1750863456088 LastUpdateTime: 1750863456088 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 3 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 1 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { Memory: 82488 } ShardState: 2 UserTablePartOwners: 72075186224037896 NodeId: 3 StartTime: 1750863451530 TableOwnerId: 72057594046644480 FollowerId: 0 2025-06-25T14:57:41.644541Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4992: StateWork, processing event TEvDataShard::TEvPeriodicTableStats 2025-06-25T14:57:41.644557Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037896 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 3] state 'Ready' dataSize 800 rowCount 3 cpuUsage 0 2025-06-25T14:57:41.644619Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:570: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037896 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 3] raw table stats: DataSize: 800 RowCount: 3 IndexSize: 0 InMemSize: 800 LastAccessTime: 1750863456088 LastUpdateTime: 1750863456088 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 3 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 1 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 2025-06-25T14:57:41.644732Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269553162, Sender [3:7519901264574628414:2287], Recipient [3:7519901260279660656:2153]: NKikimrTxDataShard.TEvPeriodicTableStats DatashardId: 72075186224037891 TableLocalId: 3 Generation: 1 Round: 0 TableStats { DataSize: 800 RowCount: 3 IndexSize: 0 InMemSize: 800 LastAccessTime: 1750863456089 LastUpdateTime: 1750863456089 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 3 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 1 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { Memory: 82488 } ShardState: 2 UserTablePartOwners: 72075186224037891 NodeId: 3 StartTime: 1750863451529 TableOwnerId: 72057594046644480 FollowerId: 0 2025-06-25T14:57:41.644742Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4992: StateWork, processing event TEvDataShard::TEvPeriodicTableStats 2025-06-25T14:57:41.644756Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037891 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 3] state 'Ready' dataSize 800 rowCount 3 cpuUsage 0 2025-06-25T14:57:41.644817Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:570: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037891 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 3] raw table stats: DataSize: 800 RowCount: 3 IndexSize: 0 InMemSize: 800 LastAccessTime: 1750863456089 LastUpdateTime: 1750863456089 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 3 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 1 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 2025-06-25T14:57:41.644927Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269553162, Sender [3:7519901264574628415:2288], Recipient [3:7519901260279660656:2153]: NKikimrTxDataShard.TEvPeriodicTableStats DatashardId: 72075186224037897 TableLocalId: 3 Generation: 1 Round: 0 TableStats { DataSize: 1088 RowCount: 6 IndexSize: 0 InMemSize: 1088 LastAccessTime: 1750863456090 LastUpdateTime: 1750863456090 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 6 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 1 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { Memory: 82488 } ShardState: 2 UserTablePartOwners: 72075186224037897 NodeId: 3 StartTime: 1750863451529 TableOwnerId: 72057594046644480 FollowerId: 0 2025-06-25T14:57:41.644936Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4992: StateWork, processing event TEvDataShard::TEvPeriodicTableStats 2025-06-25T14:57:41.644949Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037897 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 3] state 'Ready' dataSize 1088 rowCount 6 cpuUsage 0 2025-06-25T14:57:41.645007Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:570: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037897 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 3] raw table stats: DataSize: 1088 RowCount: 6 IndexSize: 0 InMemSize: 1088 LastAccessTime: 1750863456090 LastUpdateTime: 1750863456090 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 6 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 1 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 2025-06-25T14:57:41.652991Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877761, Sender [3:7519901307524303962:3947], Recipient [3:7519901260279660656:2153]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:57:41.653026Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5052: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T14:57:41.653038Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5837: Pipe server connected, at tablet: 72057594046644480 2025-06-25T14:57:41.653522Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269553162, Sender [3:7519901264574628391:2283], Recipient [3:7519901260279660656:2153]: NKikimrTxDataShard.TEvPeriodicTableStats DatashardId: 72075186224037894 TableLocalId: 3 Generation: 1 Round: 0 TableStats { DataSize: 800 RowCount: 3 IndexSize: 0 InMemSize: 800 LastAccessTime: 1750863456085 LastUpdateTime: 1750863456085 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 3 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 1 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { Memory: 82488 } ShardState: 2 UserTablePartOwners: 72075186224037894 NodeId: 3 StartTime: 1750863451528 TableOwnerId: 72057594046644480 FollowerId: 0 2025-06-25T14:57:41.653545Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4992: StateWork, processing event TEvDataShard::TEvPeriodicTableStats 2025-06-25T14:57:41.653586Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037894 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 3] state 'Ready' dataSize 800 rowCount 3 cpuUsage 0 2025-06-25T14:57:41.653679Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:570: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037894 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 3] raw table stats: DataSize: 800 RowCount: 3 IndexSize: 0 InMemSize: 800 LastAccessTime: 1750863456085 LastUpdateTime: 1750863456085 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 3 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 1 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 >> KqpMultishardIndex::DataColumnWrite-UseSink [FAIL] >> KqpMultishardIndex::DuplicateUpsert |89.4%| [TM] {asan, default-linux-x86_64, release} ydb/services/persqueue_cluster_discovery/ut/unittest >> KqpPrefixedVectorIndexes::OrderByCosineDistanceNotNullableLevel3 [GOOD] >> KqpPrefixedVectorIndexes::CosineDistanceWithPkPrefix-Nullable-Covered >> KqpIndexes::DirectAccessToIndexImplTable [GOOD] >> TPQCDTest::TestUnavailableWithoutClustersList [GOOD] >> KqpVectorIndexes::OrderByCosineLevel1+Nullable-UseSimilarity [GOOD] >> KqpVectorIndexes::OrderByCosineLevel1-Nullable+UseSimilarity >> TPQCDTest::TestRelatedServicesAreRunning [GOOD] >> TPQCDTest::TestPrioritizeLocalDatacenter [GOOD] >> KqpUniqueIndex::UpdateOnNullInComplexFk [GOOD] >> GenericFederatedQuery::ClickHouseManagedSelectAll [GOOD] >> GenericFederatedQuery::ClickHouseManagedSelectConstant ------- [TM] {asan, default-linux-x86_64, release} ydb/services/persqueue_cluster_discovery/ut/unittest >> TPQCDTest::TestUnavailableWithoutClustersList [GOOD] Test command err: 2025-06-25T14:57:41.310020Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901307394082685:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:57:41.310093Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000c0c/r3tmp/tmpRGADgZ/pdisk_1.dat 2025-06-25T14:57:41.717974Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:57:41.778005Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:57:41.778131Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:57:41.782145Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 31146, node 1 2025-06-25T14:57:41.949564Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/yft8/000c0c/r3tmp/yandexE3LNPd.tmp 2025-06-25T14:57:41.949589Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/yft8/000c0c/r3tmp/yandexE3LNPd.tmp 2025-06-25T14:57:41.955537Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/yft8/000c0c/r3tmp/yandexE3LNPd.tmp 2025-06-25T14:57:41.955689Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:57:42.322424Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:57:44.048435Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901320278985231:2324], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:44.048542Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901320278985219:2321], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:44.048639Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:44.058712Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710657:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:57:44.077061Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519901320278985233:2325], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710657 completed, doublechecking } 2025-06-25T14:57:44.217198Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519901320278985298:2354] txid# 281474976710658, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:57:44.713296Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519901320278985315:2333], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:57:44.714143Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=ZTkzNWU2YjktMWRjYjA4NDItMTU3NGYzMTEtNWM3Y2VkMGQ=, ActorId: [1:7519901320278985217:2320], ActorState: ExecuteState, TraceId: 01jyksghk9cbbnj6p453gnzfjp, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:57:44.734091Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpIndexes::DirectAccessToIndexImplTable [GOOD] Test command err: Trying to start YDB, gRPC: 32027, MsgBus: 19060 2025-06-25T14:57:18.726463Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901209347071732:2135];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:57:18.729711Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001ab1/r3tmp/tmpxqH6A2/pdisk_1.dat 2025-06-25T14:57:19.114245Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:57:19.116438Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519901209347071635:2080] 1750863438710622 != 1750863438710625 TServer::EnableGrpc on GrpcPort 32027, node 1 2025-06-25T14:57:19.162938Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:57:19.163013Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:57:19.181445Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:57:19.196855Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:57:19.196888Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:57:19.196900Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:57:19.197009Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19060 2025-06-25T14:57:19.725832Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:19060 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:57:20.011618Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:57:20.056510Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:57:20.075073Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:20.258380Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:20.420416Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:20.508025Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:22.023778Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901226526942450:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:22.023866Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:22.294035Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:22.339550Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:22.377334Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:22.417863Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:22.476632Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:22.553157Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:22.602474Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:22.677953Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901226526943110:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:22.678060Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:22.678205Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901226526943115:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:22.682079Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:57:22.695724Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519901226526943117:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:57:22.800090Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519901226526943170:3421] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:57:23.714884Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519901209347071732:2135];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:57:23.714941Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:57:23.881418Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877761, Sender [1:7519901230821910700:3596], Recipient [1:7519901209347071962:2146]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:57:23.881457Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:50 ... ist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:57:42.450020Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=3&id=NmI0M2U1ODYtYzhjNmE2MmYtMTdlYmVhNi03MzU0ZWY1OQ==, ActorId: [3:7519901310188184407:2574], ActorState: ExecuteState, TraceId: 01jyksgfzre6rkdv3exbfatqhr, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:57:42.496847Z node 3 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:304: Access denied: self# [3:7519901310188184421:3965], for# user@builtin, access# DescribeSchema 2025-06-25T14:57:42.496873Z node 3 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:304: Access denied: self# [3:7519901310188184421:3965], for# user@builtin, access# DescribeSchema 2025-06-25T14:57:42.501215Z node 3 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [3:7519901310188184418:2579], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:2:17: Error: At function: KiReadTable!
:2:17: Error: Cannot find table 'db.[/Root/SecondaryKeys/Index/indexImplTable]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:57:42.503266Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=3&id=NmI0M2U1ODYtYzhjNmE2MmYtMTdlYmVhNi03MzU0ZWY1OQ==, ActorId: [3:7519901310188184407:2574], ActorState: ExecuteState, TraceId: 01jyksgg208rqfyzcppw3x1pek, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:57:42.544907Z node 3 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:304: Access denied: self# [3:7519901310188184430:3969], for# user@builtin, access# DescribeSchema 2025-06-25T14:57:42.544965Z node 3 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:304: Access denied: self# [3:7519901310188184430:3969], for# user@builtin, access# DescribeSchema 2025-06-25T14:57:42.549836Z node 3 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [3:7519901310188184426:2583], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:2:29: Error: At function: KiWriteTable!
:2:29: Error: Cannot find table 'db.[/Root/SecondaryKeys]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:57:42.551860Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=3&id=NmI0M2U1ODYtYzhjNmE2MmYtMTdlYmVhNi03MzU0ZWY1OQ==, ActorId: [3:7519901310188184407:2574], ActorState: ExecuteState, TraceId: 01jyksgg3r8gz2mpwakqzp1sg4, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:57:42.588163Z node 3 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:304: Access denied: self# [3:7519901310188184437:3972], for# user@builtin, access# DescribeSchema 2025-06-25T14:57:42.588187Z node 3 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:304: Access denied: self# [3:7519901310188184437:3972], for# user@builtin, access# DescribeSchema 2025-06-25T14:57:42.589492Z node 3 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [3:7519901310188184434:2586], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:2:29: Error: At function: KiWriteTable!
:2:29: Error: Cannot find table 'db.[/Root/SecondaryKeys/Index/indexImplTable]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:57:42.589782Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=3&id=NmI0M2U1ODYtYzhjNmE2MmYtMTdlYmVhNi03MzU0ZWY1OQ==, ActorId: [3:7519901310188184407:2574], ActorState: ExecuteState, TraceId: 01jyksgg5ac9byt1bempxtyhpb, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:57:42.612294Z node 3 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:304: Access denied: self# [3:7519901310188184444:3975], for# user@builtin, access# DescribeSchema 2025-06-25T14:57:42.612335Z node 3 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:304: Access denied: self# [3:7519901310188184444:3975], for# user@builtin, access# DescribeSchema 2025-06-25T14:57:42.613680Z node 3 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [3:7519901310188184441:2589], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:2:92: Error: At function: KiWriteTable!
:2:92: Error: Cannot find table 'db.[/Root/SecondaryKeys/Index/indexImplTable]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:57:42.615677Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=3&id=NmI0M2U1ODYtYzhjNmE2MmYtMTdlYmVhNi03MzU0ZWY1OQ==, ActorId: [3:7519901310188184407:2574], ActorState: ExecuteState, TraceId: 01jyksgg67cfnwxnqtxxq6jk19, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:57:42.654230Z node 3 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:304: Access denied: self# [3:7519901310188184451:3978], for# user@builtin, access# DescribeSchema 2025-06-25T14:57:42.654257Z node 3 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:304: Access denied: self# [3:7519901310188184451:3978], for# user@builtin, access# DescribeSchema 2025-06-25T14:57:42.655614Z node 3 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [3:7519901310188184448:2592], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:2:83: Error: At function: KiDeleteTable!
:2:83: Error: Cannot find table 'db.[/Root/SecondaryKeys/Index/indexImplTable]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:57:42.657390Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=3&id=NmI0M2U1ODYtYzhjNmE2MmYtMTdlYmVhNi03MzU0ZWY1OQ==, ActorId: [3:7519901310188184407:2574], ActorState: ExecuteState, TraceId: 01jyksgg726hngyg0spttdpf1t, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:57:42.718081Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715678:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:57:43.559822Z node 3 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [3:7519901314483151886:2635], status: BAD_REQUEST, issues:
: Error: Execution, code: 1060
:2:29: Error: Writing to index implementation tables is not allowed. Table: `/Root/SecondaryKeys/Index/indexImplTable`., code: 2017
: Error: Execution, code: 1060
:2:29: Error: Writing to index implementation tables is not allowed. Table: `/Root/SecondaryKeys/Index/indexImplTable`., code: 2017 2025-06-25T14:57:43.560047Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=3&id=NmI0M2U1ODYtYzhjNmE2MmYtMTdlYmVhNi03MzU0ZWY1OQ==, ActorId: [3:7519901310188184407:2574], ActorState: ExecuteState, TraceId: 01jyksgh3a2gff6wmma2awr2m7, ReplyQueryCompileError, status BAD_REQUEST remove tx with tx_id: 2025-06-25T14:57:43.614819Z node 3 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [3:7519901314483151896:2639], status: BAD_REQUEST, issues:
: Error: Execution, code: 1060
:2:92: Error: Writing to index implementation tables is not allowed. Table: `/Root/SecondaryKeys/Index/indexImplTable`., code: 2017
: Error: Execution, code: 1060
:2:92: Error: Writing to index implementation tables is not allowed. Table: `/Root/SecondaryKeys/Index/indexImplTable`., code: 2017 2025-06-25T14:57:43.615129Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=3&id=NmI0M2U1ODYtYzhjNmE2MmYtMTdlYmVhNi03MzU0ZWY1OQ==, ActorId: [3:7519901310188184407:2574], ActorState: ExecuteState, TraceId: 01jyksgh4wc5v1kc6tts9hm42k, ReplyQueryCompileError, status BAD_REQUEST remove tx with tx_id: 2025-06-25T14:57:43.657775Z node 3 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [3:7519901314483151907:2643], status: BAD_REQUEST, issues:
: Error: Execution, code: 1060
:2:83: Error: Writing to index implementation tables is not allowed. Table: `/Root/SecondaryKeys/Index/indexImplTable`., code: 2017
: Error: Execution, code: 1060
:2:83: Error: Writing to index implementation tables is not allowed. Table: `/Root/SecondaryKeys/Index/indexImplTable`., code: 2017 2025-06-25T14:57:43.657986Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=3&id=NmI0M2U1ODYtYzhjNmE2MmYtMTdlYmVhNi03MzU0ZWY1OQ==, ActorId: [3:7519901310188184407:2574], ActorState: ExecuteState, TraceId: 01jyksgh68fdbrkt5z9hkpj6my, ReplyQueryCompileError, status BAD_REQUEST remove tx with tx_id: 2025-06-25T14:57:44.565556Z node 3 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [3:7519901318778119339:2685], status: BAD_REQUEST, issues:
: Error: Execution, code: 1060
:2:29: Error: Writing to index implementation tables is not allowed. Table: `/Root/SecondaryKeys/Index/indexImplTable`., code: 2017
: Error: Execution, code: 1060
:2:29: Error: Writing to index implementation tables is not allowed. Table: `/Root/SecondaryKeys/Index/indexImplTable`., code: 2017 2025-06-25T14:57:44.565820Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=3&id=NmI0M2U1ODYtYzhjNmE2MmYtMTdlYmVhNi03MzU0ZWY1OQ==, ActorId: [3:7519901310188184407:2574], ActorState: ExecuteState, TraceId: 01jyksgj2s81syp3xe9mja5dc7, ReplyQueryCompileError, status BAD_REQUEST remove tx with tx_id: 2025-06-25T14:57:44.650681Z node 3 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [3:7519901318778119349:2689], status: BAD_REQUEST, issues:
: Error: Execution, code: 1060
:2:92: Error: Writing to index implementation tables is not allowed. Table: `/Root/SecondaryKeys/Index/indexImplTable`., code: 2017
: Error: Execution, code: 1060
:2:92: Error: Writing to index implementation tables is not allowed. Table: `/Root/SecondaryKeys/Index/indexImplTable`., code: 2017 2025-06-25T14:57:44.650998Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=3&id=NmI0M2U1ODYtYzhjNmE2MmYtMTdlYmVhNi03MzU0ZWY1OQ==, ActorId: [3:7519901310188184407:2574], ActorState: ExecuteState, TraceId: 01jyksgj59f1fvvcjx3c6vc7sf, ReplyQueryCompileError, status BAD_REQUEST remove tx with tx_id: 2025-06-25T14:57:44.685571Z node 3 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [3:7519901318778119359:2693], status: BAD_REQUEST, issues:
: Error: Execution, code: 1060
:2:83: Error: Writing to index implementation tables is not allowed. Table: `/Root/SecondaryKeys/Index/indexImplTable`., code: 2017
: Error: Execution, code: 1060
:2:83: Error: Writing to index implementation tables is not allowed. Table: `/Root/SecondaryKeys/Index/indexImplTable`., code: 2017 2025-06-25T14:57:44.685785Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=3&id=NmI0M2U1ODYtYzhjNmE2MmYtMTdlYmVhNi03MzU0ZWY1OQ==, ActorId: [3:7519901310188184407:2574], ActorState: ExecuteState, TraceId: 01jyksgj6k3nsr7xq8aq5dz9jf, ReplyQueryCompileError, status BAD_REQUEST remove tx with tx_id: >> TPQCDTest::TestUnavailableWithoutBoth [GOOD] >> GenericFederatedQuery::TestFailsOnIncorrectScriptExecutionOperationId2 [GOOD] >> GenericFederatedQuery::TestFailsOnIncorrectScriptExecutionFetchToken ------- [TM] {asan, default-linux-x86_64, release} ydb/services/persqueue_cluster_discovery/ut/unittest >> TPQCDTest::TestRelatedServicesAreRunning [GOOD] Test command err: 2025-06-25T14:57:41.321837Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901307978178420:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:57:41.321900Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000c1e/r3tmp/tmpJ6LRvn/pdisk_1.dat 2025-06-25T14:57:41.728520Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:57:41.772704Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:57:41.772788Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:57:41.773778Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 1451, node 1 2025-06-25T14:57:41.952997Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/yft8/000c1e/r3tmp/yandex5hfaXu.tmp 2025-06-25T14:57:41.953021Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/yft8/000c1e/r3tmp/yandex5hfaXu.tmp 2025-06-25T14:57:41.954879Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/yft8/000c1e/r3tmp/yandex5hfaXu.tmp 2025-06-25T14:57:41.955178Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:57:42.336848Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:4298 PQClient connected to localhost:1451 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:57:42.501990Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:57:42.523522Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:57:42.533811Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 waiting... waiting... 2025-06-25T14:57:44.221888Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901320863080992:2295], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:44.221906Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901320863080960:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:44.221978Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:44.225552Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710661:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:57:44.239286Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519901320863080999:2296], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710661 completed, doublechecking } 2025-06-25T14:57:44.330838Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519901320863081064:2389] txid# 281474976710662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:57:44.656860Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519901320863081074:2303], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:57:44.665158Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=Y2U5NjkwOWYtNThhMGRiZjctNDdjYTJkNDktOGFhODQ5YWU=, ActorId: [1:7519901320863080957:2288], ActorState: ExecuteState, TraceId: 01jyksghrbfpy58tsqbn62vhex, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:57:44.669729Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-25T14:57:44.706176Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:44.840443Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:44.913804Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); 2025-06-25T14:57:45.314736Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710666. Ctx: { TraceId: 01jyksgjjm3g6kev40dwh3yvar, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODQ0Nzc3MmEtNmNhZjlhNjMtNjM5NGQ2YjQtZWU4NzkwYTI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root ------- [TM] {asan, default-linux-x86_64, release} ydb/services/persqueue_cluster_discovery/ut/unittest >> TPQCDTest::TestPrioritizeLocalDatacenter [GOOD] Test command err: 2025-06-25T14:57:41.379182Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901307212427994:2236];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:57:41.379412Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000c27/r3tmp/tmpwwNj2a/pdisk_1.dat 2025-06-25T14:57:41.844975Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:57:41.845787Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:57:41.845856Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 16926, node 1 2025-06-25T14:57:41.852537Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:57:41.953039Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/yft8/000c27/r3tmp/yandexYD1RnD.tmp 2025-06-25T14:57:41.953075Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/yft8/000c27/r3tmp/yandexYD1RnD.tmp 2025-06-25T14:57:41.954738Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/yft8/000c27/r3tmp/yandexYD1RnD.tmp 2025-06-25T14:57:41.954893Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13074 PQClient connected to localhost:16926 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-25T14:57:42.379091Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:57:42.536195Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... waiting... 2025-06-25T14:57:42.576215Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710659, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:57:42.594056Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710660, at schemeshard: 72057594046644480 2025-06-25T14:57:44.406788Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901320097330351:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:44.407035Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:44.407424Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901320097330390:2298], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:44.410864Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710661:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:57:44.420994Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519901320097330392:2299], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710661 completed, doublechecking } 2025-06-25T14:57:44.666223Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519901320097330457:2390] txid# 281474976710662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:57:44.706296Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:44.788385Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519901320097330465:2305], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:57:44.788738Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=ZDBjZjI0MmItZDM0ODcwZjQtNGY1NjY1OWEtYjMzNTAxNDg=, ActorId: [1:7519901320097330348:2290], ActorState: ExecuteState, TraceId: 01jyksghy29p548hfmnqab9tpf, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:57:44.790827Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-25T14:57:44.829200Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:44.912181Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); 2025-06-25T14:57:45.314459Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710666. Ctx: { TraceId: 01jyksgjha7pfjmtm4jpaqx14y, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MmJhYmZjMzMtYjZhYThjNDctM2FmOWI0OC1kYzQ1MjM2NQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpUniqueIndex::UpdateOnNullInComplexFk [GOOD] Test command err: Trying to start YDB, gRPC: 6527, MsgBus: 16719 2025-06-25T14:57:16.276296Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901198767697091:2194];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:57:16.276871Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001b00/r3tmp/tmp1kO0GH/pdisk_1.dat 2025-06-25T14:57:16.672483Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519901198767696935:2080] 1750863436255158 != 1750863436255161 2025-06-25T14:57:16.722050Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:57:16.722151Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:57:16.731385Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:57:16.756646Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 6527, node 1 2025-06-25T14:57:16.884819Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:57:16.884847Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:57:16.884854Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:57:16.884939Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16719 2025-06-25T14:57:17.256848Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:16719 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:57:17.596785Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:57:17.621226Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:57:17.642886Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:17.780966Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:17.971970Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:18.046041Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:19.561518Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901211652600469:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:19.561643Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:19.837818Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:19.874805Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:19.946584Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:19.988710Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:20.061989Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:20.098846Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:20.134675Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:20.203160Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901215947568424:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:20.203232Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:20.203428Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901215947568429:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:20.207250Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:57:20.220720Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519901215947568431:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:57:20.276691Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519901215947568482:3423] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:57:21.270314Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519901198767697091:2194];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:57:21.270369Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:57:21.347555Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_w ... db/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:28.004108Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519901250282940883:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:28.004208Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:28.074191Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:28.109245Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:28.191534Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:28.225781Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:28.293827Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:28.337101Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:28.412051Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:28.494614Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519901250282941546:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:28.494786Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:28.495176Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519901250282941551:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:28.499290Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:57:28.511103Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519901250282941553:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:57:28.585954Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519901250282941604:3416] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:57:29.615089Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:29.893864Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519901233103070095:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:57:29.893941Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:57:35.613686Z node 2 :KQP_EXECUTER ERROR: kqp_literal_executer.cpp:107: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: 01jyksg8egfstmjpcknqgtmf82, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=Mzk0YWVhMzgtMzdkYTMxNDAtNTBlOTVjNTUtYTMwMWRkNGE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. TKqpLiteralExecuter, TKqpEnsure failed. 2025-06-25T14:57:35.626756Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=Mzk0YWVhMzgtMzdkYTMxNDAtNTBlOTVjNTUtYTMwMWRkNGE=, ActorId: [2:7519901254577909938:2531], ActorState: ExecuteState, TraceId: 01jyksg8egfstmjpcknqgtmf82, Create QueryResponse for error on request, msg: 2025-06-25T14:57:40.033465Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7382: Cannot get console configs 2025-06-25T14:57:40.033494Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:57:40.074149Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037933 not found 2025-06-25T14:57:40.074188Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037931 not found 2025-06-25T14:57:40.074205Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037939 not found 2025-06-25T14:57:40.074238Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037927 not found 2025-06-25T14:57:40.074259Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037934 not found 2025-06-25T14:57:40.074273Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037940 not found 2025-06-25T14:57:40.075828Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037932 not found 2025-06-25T14:57:40.075857Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037926 not found 2025-06-25T14:57:43.184061Z node 2 :KQP_EXECUTER ERROR: kqp_literal_executer.cpp:107: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: 01jyksgfdh2qq52d2bm68zmqqr, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=Mzk0YWVhMzgtMzdkYTMxNDAtNTBlOTVjNTUtYTMwMWRkNGE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. TKqpLiteralExecuter, TKqpEnsure failed. 2025-06-25T14:57:43.184358Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=Mzk0YWVhMzgtMzdkYTMxNDAtNTBlOTVjNTUtYTMwMWRkNGE=, ActorId: [2:7519901254577909938:2531], ActorState: ExecuteState, TraceId: 01jyksgfdh2qq52d2bm68zmqqr, Create QueryResponse for error on request, msg: 2025-06-25T14:57:44.462003Z node 2 :KQP_EXECUTER ERROR: kqp_literal_executer.cpp:107: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: 01jyksggyh8kqnqtphjf4vpc4y, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=Mzk0YWVhMzgtMzdkYTMxNDAtNTBlOTVjNTUtYTMwMWRkNGE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. TKqpLiteralExecuter, TKqpEnsure failed. 2025-06-25T14:57:44.462253Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=Mzk0YWVhMzgtMzdkYTMxNDAtNTBlOTVjNTUtYTMwMWRkNGE=, ActorId: [2:7519901254577909938:2531], ActorState: ExecuteState, TraceId: 01jyksggyh8kqnqtphjf4vpc4y, Create QueryResponse for error on request, msg: 2025-06-25T14:57:45.024497Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037937 not found 2025-06-25T14:57:45.031982Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037938 not found 2025-06-25T14:57:45.044081Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037930 not found 2025-06-25T14:57:45.044116Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037929 not found 2025-06-25T14:57:45.057987Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037936 not found 2025-06-25T14:57:45.058241Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037935 not found 2025-06-25T14:57:45.702044Z node 2 :KQP_EXECUTER ERROR: kqp_literal_executer.cpp:107: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: 01jyksgk6r0981zszxhy32cx61, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=Mzk0YWVhMzgtMzdkYTMxNDAtNTBlOTVjNTUtYTMwMWRkNGE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. TKqpLiteralExecuter, TKqpEnsure failed. 2025-06-25T14:57:45.702269Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=Mzk0YWVhMzgtMzdkYTMxNDAtNTBlOTVjNTUtYTMwMWRkNGE=, ActorId: [2:7519901254577909938:2531], ActorState: ExecuteState, TraceId: 01jyksgk6r0981zszxhy32cx61, Create QueryResponse for error on request, msg: >> KqpVectorIndexes::OrderByCosineLevel2-Nullable-UseSimilarity [GOOD] >> KqpVectorIndexes::SimpleVectorIndexOrderByCosineDistanceWithCover+Nullable ------- [TM] {asan, default-linux-x86_64, release} ydb/services/persqueue_cluster_discovery/ut/unittest >> TPQCDTest::TestUnavailableWithoutBoth [GOOD] Test command err: 2025-06-25T14:57:43.003079Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901311507901006:2135];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:57:43.003240Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000bfa/r3tmp/tmpg08Iuj/pdisk_1.dat 2025-06-25T14:57:43.461424Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:57:43.476553Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519901311507900909:2080] 1750863462919090 != 1750863462919093 2025-06-25T14:57:43.508033Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:57:43.508795Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:57:43.513294Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 61314, node 1 2025-06-25T14:57:43.586465Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:57:43.586485Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:57:43.586500Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:57:43.586684Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:57:44.004431Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:57:45.706090Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901324392803479:2325], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:45.706090Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901324392803490:2328], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:45.706198Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:45.717100Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710657:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:57:45.739351Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:57:45.739575Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519901324392803493:2329], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710657 completed, doublechecking } 2025-06-25T14:57:45.870665Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519901324392803554:2355] txid# 281474976710658, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:57:46.200741Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519901324392803567:2337], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:57:46.201039Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=ODMxM2ZmM2QtYTFkZDZlZWItOGQ4Y2VlNzktYWExMjNmODQ=, ActorId: [1:7519901324392803477:2324], ActorState: ExecuteState, TraceId: 01jyksgk78bpkhnmxff427810p, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:57:46.211140Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } >> TPQCDTest::TestUnavailableWithoutNetClassifier [GOOD] >> KqpVectorIndexes::OrderByCosineDistanceNotNullableLevel3 [GOOD] |89.5%| [TM] {asan, default-linux-x86_64, release} ydb/services/cms/ut/unittest |89.5%| [TM] {asan, default-linux-x86_64, release} ydb/services/cms/ut/unittest >> TGRpcCmsTest::RemoveWithAnotherTokenTest >> TGRpcCmsTest::DescribeOptionsTest >> TGRpcCmsTest::SimpleTenantsTest >> TGRpcCmsTest::AuthTokenTest >> TSequence::CreateSequenceParallel >> KqpIndexes::SecondaryIndexUsingInJoin2-UseStreamJoin [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/services/persqueue_cluster_discovery/ut/unittest >> TPQCDTest::TestUnavailableWithoutNetClassifier [GOOD] Test command err: 2025-06-25T14:57:41.312019Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901305183250127:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:57:41.312093Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000c16/r3tmp/tmp0geZFb/pdisk_1.dat 2025-06-25T14:57:41.750118Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:57:41.768064Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:57:41.768147Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:57:41.771935Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 14178, node 1 2025-06-25T14:57:41.948879Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:57:41.948900Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:57:41.948908Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:57:41.949006Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:57:42.328722Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:63459 PQClient connected to localhost:14178 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:57:42.506898Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... waiting... waiting... 2025-06-25T14:57:44.278330Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901318068152705:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:44.278454Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:44.280018Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901318068152718:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:44.283381Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710661:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:57:44.296996Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519901318068152720:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710661 completed, doublechecking } 2025-06-25T14:57:44.356888Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519901318068152787:2390] txid# 281474976710662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:57:44.656875Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519901318068152795:2305], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:57:44.664216Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=Mjg0ZGM1YmItOWU3NGU5MzctZDAyOGZkNTctYzE1YmQ5Njc=, ActorId: [1:7519901318068152702:2292], ActorState: ExecuteState, TraceId: 01jyksghtmax4e9f35mge803x5, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:57:44.669852Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-25T14:57:44.708159Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:44.878104Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:44.962676Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); 2025-06-25T14:57:45.315131Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710666. Ctx: { TraceId: 01jyksgjk9e7ekkjvrz03190br, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NzYyMmI1YjItYzA0NWZiN2ItZDU0NWY0M2ItMjU1YWM1ZjA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:57:46.312506Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519901305183250127:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:57:46.312610Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; |89.5%| [TM] {asan, default-linux-x86_64, release} ydb/services/cms/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpVectorIndexes::OrderByCosineDistanceNotNullableLevel3 [GOOD] Test command err: Trying to start YDB, gRPC: 11418, MsgBus: 2085 2025-06-25T14:57:11.239567Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901178110482194:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:57:11.239716Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001bc5/r3tmp/tmpy5Ekf4/pdisk_1.dat 2025-06-25T14:57:11.703896Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:57:11.724710Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:57:11.724818Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 11418, node 1 2025-06-25T14:57:11.726636Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:57:11.832887Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:57:11.832921Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:57:11.832933Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:57:11.833039Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2085 2025-06-25T14:57:12.248528Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:2085 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:57:12.470754Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:57:12.492695Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:57:12.513015Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:12.676965Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:12.824165Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:12.911946Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:14.473870Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901190995385676:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:14.473966Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:14.762306Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:14.798237Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:14.830167Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:14.861364Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:14.886301Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:14.953081Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:14.982848Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:15.047816Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901195290353627:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:15.047882Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:15.048052Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901195290353632:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:15.051710Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:57:15.065743Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519901195290353634:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:57:15.169549Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519901195290353685:3418] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:57:16.174830Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877761, Sender [1:7519901199585321255:3593], Recipient [1:7519901178110482475:2144]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:57:16.174871Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5052: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T14:57:16.174882Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5837: Pipe server connected, at tablet: 72057594046644480 2025-06-25T14:57:16.174919Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271122432, Sender [1:7519901199585321251:3590], Recipient [1:7519901178110482475:2144]: {TEvModifySchemeTransaction txid# 281474976710672 TabletId# 72057594046644480} 2025-06-25T14:57:16.174952Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4966: StateWork, processing event TEvSchemeShard::TEvMo ... peration IsReadyToDone TxId: 281474976710771 ready parts: 1/1 2025-06-25T14:57:48.355552Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 281474976710771, ready parts: 1/1, is published: true 2025-06-25T14:57:48.355586Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1656: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [2:7519901310192595276:2144] message: TxId: 281474976710771 2025-06-25T14:57:48.355613Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976710771 ready parts: 1/1 2025-06-25T14:57:48.355633Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976710771:0 2025-06-25T14:57:48.355642Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 281474976710771:0 2025-06-25T14:57:48.355687Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 17] was 6 2025-06-25T14:57:48.355929Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:57:48.355977Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:632: Send to actor: [2:7519901310192595276:2144] msg type: 271124998 msg: NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976710771 at schemeshard: 72057594046644480 2025-06-25T14:57:48.356067Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124998, Sender [2:7519901310192595276:2144], Recipient [2:7519901310192595276:2144]: NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976710771 2025-06-25T14:57:48.356090Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5113: StateWork, processing event TEvSchemeShard::TEvNotifyTxCompletionResult 2025-06-25T14:57:48.356100Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6830: Handle: TEvNotifyTxCompletionResult: txId# 281474976710771 2025-06-25T14:57:48.356116Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6832: Message: TxId: 281474976710771 2025-06-25T14:57:48.356160Z node 2 :BUILD_INDEX INFO: schemeshard_build_index__progress.cpp:1930: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvNotifyTxCompletionResult, id# 281474976715674, txId# 281474976710771 2025-06-25T14:57:48.356212Z node 2 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1933: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvNotifyTxCompletionResult, TIndexBuildInfo: TBuildInfo{ IndexBuildId: 281474976715674, Uid: , DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1], TablePathId: [OwnerId: 72057594046644480, LocalPathId: 17], IndexType: EIndexTypeGlobalVectorKmeansTree, IndexName: index, IndexColumn: emb, State: Unlocking, IsBroken: 0, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [2:7519901331667434259:2497], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 281474976710757, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976710758, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 0, ApplyTxId: 281474976710767, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976710771, UnlockTxStatus: StatusAccepted, UnlockTxDone: 0, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 43, upload bytes: 787, read rows: 40, read bytes: 600 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }}, txId# 281474976710771 2025-06-25T14:57:48.356263Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-25T14:57:48.356515Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:57:48.356587Z node 2 :BUILD_INDEX NOTICE: schemeshard_build_index__progress.cpp:1129: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 281474976715674 Unlocking 2025-06-25T14:57:48.356641Z node 2 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1130: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 281474976715674 Unlocking TBuildInfo{ IndexBuildId: 281474976715674, Uid: , DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1], TablePathId: [OwnerId: 72057594046644480, LocalPathId: 17], IndexType: EIndexTypeGlobalVectorKmeansTree, IndexName: index, IndexColumn: emb, State: Unlocking, IsBroken: 0, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [2:7519901331667434259:2497], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 281474976710757, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976710758, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 0, ApplyTxId: 281474976710767, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976710771, UnlockTxStatus: StatusAccepted, UnlockTxDone: 1, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 43, upload bytes: 787, read rows: 40, read bytes: 600 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }} 2025-06-25T14:57:48.356660Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-25T14:57:48.356675Z node 2 :BUILD_INDEX INFO: schemeshard_build_index_tx_base.cpp:24: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: Change state from Unlocking to Done 2025-06-25T14:57:48.356923Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:57:48.357005Z node 2 :BUILD_INDEX NOTICE: schemeshard_build_index__progress.cpp:1129: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 281474976715674 Done 2025-06-25T14:57:48.357056Z node 2 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1130: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 281474976715674 Done TBuildInfo{ IndexBuildId: 281474976715674, Uid: , DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1], TablePathId: [OwnerId: 72057594046644480, LocalPathId: 17], IndexType: EIndexTypeGlobalVectorKmeansTree, IndexName: index, IndexColumn: emb, State: Done, IsBroken: 0, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [2:7519901331667434259:2497], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 281474976710757, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976710758, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 0, ApplyTxId: 281474976710767, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976710771, UnlockTxStatus: StatusAccepted, UnlockTxDone: 1, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 43, upload bytes: 787, read rows: 40, read bytes: 600 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }} 2025-06-25T14:57:48.357073Z node 2 :BUILD_INDEX TRACE: schemeshard_build_index_tx_base.cpp:333: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TIndexBuildInfo SendNotifications: : id# 281474976715674, subscribers count# 1 2025-06-25T14:57:48.357092Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-25T14:57:48.357144Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:57:48.357186Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:632: Send to actor: [2:7519901331667434259:2497] msg type: 271124998 msg: NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976715674 at schemeshard: 72057594046644480 2025-06-25T14:57:48.357455Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 274792450, Sender [2:7519901331667434259:2497], Recipient [2:7519901310192595276:2144]: NKikimrIndexBuilder.TEvGetRequest DatabaseName: "/Root" IndexBuildId: 281474976715674 2025-06-25T14:57:48.357486Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5090: StateWork, processing event TEvIndexBuilder::TEvGetRequest 2025-06-25T14:57:48.357564Z node 2 :BUILD_INDEX DEBUG: schemeshard_build_index__get.cpp:19: TIndexBuilder::TXTYPE_GET_INDEX_BUILD: DoExecute DatabaseName: "/Root" IndexBuildId: 281474976715674 2025-06-25T14:57:48.357781Z node 2 :BUILD_INDEX DEBUG: schemeshard_build_index_tx_base.h:104: TIndexBuilder::TXTYPE_GET_INDEX_BUILD: Reply Status: SUCCESS IndexBuild { Id: 281474976715674 State: STATE_DONE Settings { source_path: "/Root/TestTable" index { name: "index" index_columns: "emb" global_vector_kmeans_tree_index { } } max_shards_in_flight: 32 ScanSettings { } } Progress: 100 StartTime { seconds: 1750863467 } EndTime { seconds: 1750863468 } UserSID: "" } 2025-06-25T14:57:48.357806Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-25T14:57:48.357842Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:57:48.357969Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:632: Send to actor: [2:7519901331667434259:2497] msg type: 274792451 msg: NKikimrIndexBuilder.TEvGetResponse Status: SUCCESS IndexBuild { Id: 281474976715674 State: STATE_DONE Settings { source_path: "/Root/TestTable" index { name: "index" index_columns: "emb" global_vector_kmeans_tree_index { } } max_shards_in_flight: 32 ScanSettings { } } Progress: 100 StartTime { seconds: 1750863467 } EndTime { seconds: 1750863468 } UserSID: "" } at schemeshard: 72057594046644480 2025-06-25T14:57:48.358774Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877764, Sender [2:7519901331667434262:3716], Recipient [2:7519901310192595276:2144]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-25T14:57:48.358802Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5053: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-25T14:57:48.358811Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5885: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-25T14:57:48.390454Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271122945, Sender [2:7519901335962402544:4477], Recipient [2:7519901310192595276:2144]: NKikimrSchemeOp.TDescribePath Path: "/Root/TestTable" Options { ShowPrivateTable: false } 2025-06-25T14:57:48.390515Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4967: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2025-06-25T14:57:48.576672Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [2:7519901310192595276:2144]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:57:48.576713Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:57:48.576761Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [2:7519901310192595276:2144], Recipient [2:7519901310192595276:2144]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:57:48.576779Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime >> KqpMultishardIndex::DuplicateUpsert [GOOD] >> GenericFederatedQuery::IcebergHadoopSaSelectConstant [GOOD] >> GenericFederatedQuery::IcebergHadoopSaSelectCount >> GenericFederatedQuery::TestFailsOnIncorrectScriptExecutionFetchToken [GOOD] >> GenericFederatedQuery::YdbManagedSelectConstant [GOOD] >> GenericFederatedQuery::YdbSelectCount >> GenericFederatedQuery::PostgreSQLOnPremSelectConstant [GOOD] >> GenericFederatedQuery::PostgreSQLSelectCount >> GenericFederatedQuery::IcebergHiveSaSelectConstant [GOOD] >> GenericFederatedQuery::IcebergHiveSaSelectCount ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpIndexes::SecondaryIndexUsingInJoin2-UseStreamJoin [GOOD] Test command err: Trying to start YDB, gRPC: 24700, MsgBus: 24529 2025-06-25T14:57:22.601055Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901225435996045:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:57:22.601082Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001a93/r3tmp/tmpX4gS3n/pdisk_1.dat 2025-06-25T14:57:23.132560Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519901225435996025:2080] 1750863442599116 != 1750863442599119 2025-06-25T14:57:23.139143Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 24700, node 1 2025-06-25T14:57:23.146726Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:57:23.146808Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:57:23.148714Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:57:23.222986Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:57:23.223016Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:57:23.223023Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:57:23.223730Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24529 2025-06-25T14:57:23.645985Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:24529 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:57:23.867401Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:57:23.881819Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:57:23.895733Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:24.028641Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:24.159068Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:24.244119Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:25.803057Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901238320899561:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:25.803135Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:26.067962Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:26.102536Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:26.135498Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:26.205701Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:26.237506Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:26.285567Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:26.366025Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:26.468599Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901242615867527:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:26.468713Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:26.468975Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901242615867532:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:26.472164Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:57:26.482554Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519901242615867534:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:57:26.582036Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519901242615867585:3423] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:57:27.547768Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877761, Sender [1:7519901246910835155:3598], Recipient [1:7519901225435996341:2137]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:57:27.547813Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5052: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T14:57:27.547825Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5837: Pipe server connected, at tablet: 72057594046644480 2025-06-25T14:57:27.547861Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271122432, Sender [1:7519901246910835151:3595], Recipient [1:7519901225435996341:2137]: {TEvModifySc ... hemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-25T14:57:47.296774Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 281474976710673:0, at schemeshard: 72057594046644480 2025-06-25T14:57:47.296790Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:57:47.296859Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 281474976710673:2, at schemeshard: 72057594046644480 2025-06-25T14:57:47.296871Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:57:47.296922Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 281474976710673:0, at schemeshard: 72057594046644480 2025-06-25T14:57:47.296937Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:57:47.296947Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:276: Activate send for 281474976710673:0 2025-06-25T14:57:47.296996Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:632: Send to actor: [3:7519901333787072188:2486] msg type: 269552132 msg: NKikimrTxDataShard.TEvSchemaChangedResult TxId: 281474976710673 at schemeshard: 72057594046644480 2025-06-25T14:57:47.297057Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 281474976710673:2, at schemeshard: 72057594046644480 2025-06-25T14:57:47.297070Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:57:47.297078Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:276: Activate send for 281474976710673:2 2025-06-25T14:57:47.297106Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:632: Send to actor: [3:7519901333787072187:2485] msg type: 269552132 msg: NKikimrTxDataShard.TEvSchemaChangedResult TxId: 281474976710673 at schemeshard: 72057594046644480 2025-06-25T14:57:47.297168Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 2146435072, Sender [3:7519901308017265975:2151], Recipient [3:7519901308017265975:2151]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-25T14:57:47.297191Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4972: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-25T14:57:47.297257Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 281474976710673:0, at schemeshard: 72057594046644480 2025-06-25T14:57:47.297285Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046644480] TDone opId# 281474976710673:0 ProgressState 2025-06-25T14:57:47.297355Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-25T14:57:47.297374Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976710673:0 progress is 2/3 2025-06-25T14:57:47.297385Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976710673 ready parts: 2/3 2025-06-25T14:57:47.297400Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976710673:0 progress is 2/3 2025-06-25T14:57:47.297410Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976710673 ready parts: 2/3 2025-06-25T14:57:47.297425Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 281474976710673, ready parts: 2/3, is published: true 2025-06-25T14:57:47.297575Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 2146435072, Sender [3:7519901308017265975:2151], Recipient [3:7519901308017265975:2151]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-25T14:57:47.297593Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4972: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-25T14:57:47.297617Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 281474976710673:2, at schemeshard: 72057594046644480 2025-06-25T14:57:47.297631Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046644480] TDone opId# 281474976710673:2 ProgressState 2025-06-25T14:57:47.297673Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-25T14:57:47.297684Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976710673:2 progress is 3/3 2025-06-25T14:57:47.297693Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976710673 ready parts: 3/3 2025-06-25T14:57:47.297708Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976710673:2 progress is 3/3 2025-06-25T14:57:47.297716Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976710673 ready parts: 3/3 2025-06-25T14:57:47.297727Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 281474976710673, ready parts: 3/3, is published: true 2025-06-25T14:57:47.297764Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1656: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [3:7519901333787072153:2483] message: TxId: 281474976710673 2025-06-25T14:57:47.297780Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976710673 ready parts: 3/3 2025-06-25T14:57:47.297801Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976710673:0 2025-06-25T14:57:47.297810Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 281474976710673:0 2025-06-25T14:57:47.297903Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 20] was 4 2025-06-25T14:57:47.297916Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976710673:1 2025-06-25T14:57:47.297921Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 281474976710673:1 2025-06-25T14:57:47.297936Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 21] was 3 2025-06-25T14:57:47.297943Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976710673:2 2025-06-25T14:57:47.297949Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 281474976710673:2 2025-06-25T14:57:47.297977Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 22] was 3 2025-06-25T14:57:47.298238Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:57:47.298420Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:57:47.298488Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:632: Send to actor: [3:7519901333787072153:2483] msg type: 271124998 msg: NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976710673 at schemeshard: 72057594046644480 2025-06-25T14:57:47.298631Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877764, Sender [3:7519901333787072255:3739], Recipient [3:7519901308017265975:2151]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-25T14:57:47.298653Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5053: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-25T14:57:47.298664Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5885: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-25T14:57:47.298698Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877764, Sender [3:7519901333787072254:3738], Recipient [3:7519901308017265975:2151]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-25T14:57:47.298717Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5053: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-25T14:57:47.298732Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5885: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-25T14:57:47.298977Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877764, Sender [3:7519901333787072161:3673], Recipient [3:7519901308017265975:2151]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-25T14:57:47.298994Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5053: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-25T14:57:47.299001Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5885: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-25T14:57:48.008236Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:7519901308017265975:2151]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:57:48.008281Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:57:48.008341Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [3:7519901308017265975:2151], Recipient [3:7519901308017265975:2151]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:57:48.008358Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:57:49.009167Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:7519901308017265975:2151]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:57:49.009207Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:57:49.009245Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [3:7519901308017265975:2151], Recipient [3:7519901308017265975:2151]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:57:49.009260Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime >> KqpVectorIndexes::SimpleVectorIndexOrderByCosineDistanceWithCover-Nullable [GOOD] >> GenericFederatedQuery::IcebergHiveTokenSelectConstant [GOOD] >> GenericFederatedQuery::IcebergHiveTokenSelectCount >> TSequence::CreateSequenceParallel [GOOD] >> TSequence::CreateSequenceSequential >> GenericFederatedQuery::IcebergHiveBasicSelectConstant [GOOD] >> GenericFederatedQuery::IcebergHiveBasicSelectCount >> TPQCDTest::TestDiscoverClusters [GOOD] >> GenericFederatedQuery::IcebergHadoopBasicSelectConstant [GOOD] >> GenericFederatedQuery::IcebergHadoopBasicSelectCount >> GenericFederatedQuery::IcebergHadoopTokenSelectConstant [GOOD] >> GenericFederatedQuery::IcebergHadoopTokenSelectCount >> TSequence::CreateSequenceSequential [GOOD] >> TSequence::CreateSequenceInsideTableThenDropSequence ------- [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/federated_query/generic_ut/unittest >> GenericFederatedQuery::TestFailsOnIncorrectScriptExecutionFetchToken [GOOD] Test command err: Trying to start YDB, gRPC: 64875, MsgBus: 22540 2025-06-25T14:57:29.352832Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901255651637756:2079];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:57:29.356994Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000e9e/r3tmp/tmpFg6GUx/pdisk_1.dat 2025-06-25T14:57:29.844183Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:57:29.873275Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:57:29.873396Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:57:29.874614Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 64875, node 1 2025-06-25T14:57:30.097111Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:57:30.097140Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:57:30.097154Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:57:30.097295Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:57:30.370063Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:22540 TClient is connected to server localhost:22540 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:57:30.999760Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:57:31.044790Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:57:32.658320Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901268536540234:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:32.658452Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:33.020057Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:33.173396Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901272831507653:2305], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:33.173469Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:33.173690Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901272831507658:2308], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:33.177005Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:57:33.187300Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519901272831507660:2309], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-25T14:57:33.246115Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519901272831507700:2399] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:57:34.053488Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:57:34.352867Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519901255651637756:2079];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:57:34.352944Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:57:34.475380Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:34.947632Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:35.528757Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710678:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:57:36.026967Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710681:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:57:36.599790Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976715758:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:36.639277Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976715759:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:383) 2025-06-25T14:57:38.330281Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976710703:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp:267) Call DescribeTable. data_source_instance { kind: YDB endpoint { host: "localhost" port: 2136 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Expected: data_source_instance { kind: YDB endpoint { host: "localhost" port: 2136 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Actual: data_source_instance { kind: YDB endpoint { host: "localhost" port: 2136 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } DescribeTable result. GRpcStatusCode: 0 schema { columns { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } columns { name: "data_column" type { optional_type { item { type_id: STRING } } } } } error { status: SUCCESS } Call List ... 0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:57:39.666494Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 30238, node 2 2025-06-25T14:57:39.735835Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:57:39.735850Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:57:39.735855Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:57:39.735970Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:61400 TClient is connected to server localhost:61400 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:57:40.210266Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:57:40.216991Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 Trying to start YDB, gRPC: 4453, MsgBus: 29351 2025-06-25T14:57:43.188775Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519901315585430068:2109];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:57:43.188905Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000e9e/r3tmp/tmpH9dcbK/pdisk_1.dat 2025-06-25T14:57:43.379397Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:57:43.395050Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:57:43.395136Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:57:43.396714Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519901315585429989:2080] 1750863463182883 != 1750863463182886 2025-06-25T14:57:43.398215Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 4453, node 3 2025-06-25T14:57:43.493522Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:57:43.493545Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:57:43.493553Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:57:43.493658Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:29351 TClient is connected to server localhost:29351 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:57:43.961651Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:57:43.969886Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:57:44.166702Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:57:46.834325Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:46.836622Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:46.838138Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:47.068707Z node 3 :KQP_PROXY WARN: kqp_script_executions.cpp:1077: [ScriptExecutions] [TForgetScriptExecutionOperationActor] ExecutionId: , reply BAD_REQUEST, issues: {
: Error: Invalid operation id: ydb/public/sdk/cpp/src/library/operation_id/operation_id.cpp:184: Unable to find key: id } 2025-06-25T14:57:47.071871Z node 3 :KQP_PROXY WARN: kqp_script_executions.cpp:1366: [ScriptExecutions] [TGetScriptExecutionOperationActor] ExecutionId: , reply BAD_REQUEST, issues: {
: Error: Invalid operation id: ydb/public/sdk/cpp/src/library/operation_id/operation_id.cpp:184: Unable to find key: id } Trying to start YDB, gRPC: 27015, MsgBus: 12249 2025-06-25T14:57:47.735025Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519901331769149146:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:57:47.735094Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000e9e/r3tmp/tmpf5JTRq/pdisk_1.dat 2025-06-25T14:57:47.836144Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:57:47.837495Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7519901331769149127:2080] 1750863467734520 != 1750863467734523 TServer::EnableGrpc on GrpcPort 27015, node 4 2025-06-25T14:57:47.869988Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:57:47.870083Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:57:47.871828Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:57:47.900769Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:57:47.900792Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:57:47.900801Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:57:47.900907Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:12249 TClient is connected to server localhost:12249 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:57:48.451164Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... ------- [TM] {asan, default-linux-x86_64, release} ydb/services/persqueue_cluster_discovery/ut/unittest >> TPQCDTest::TestDiscoverClusters [GOOD] Test command err: 2025-06-25T14:57:42.881730Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901310435983835:2229];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:57:42.883224Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000bfb/r3tmp/tmpwT9WEp/pdisk_1.dat 2025-06-25T14:57:43.459314Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:57:43.462040Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519901310435983631:2080] 1750863462831576 != 1750863462831579 2025-06-25T14:57:43.482332Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:57:43.482417Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 7828, node 1 2025-06-25T14:57:43.486134Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:57:43.557580Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/yft8/000bfb/r3tmp/yandexbaHnhJ.tmp 2025-06-25T14:57:43.557605Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/yft8/000bfb/r3tmp/yandexbaHnhJ.tmp 2025-06-25T14:57:43.557755Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/yft8/000bfb/r3tmp/yandexbaHnhJ.tmp 2025-06-25T14:57:43.557863Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10331 PQClient connected to localhost:7828 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:57:43.846602Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:57:43.867273Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... waiting... 2025-06-25T14:57:43.896963Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710659, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:57:45.769036Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901323320886222:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:45.769131Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901323320886231:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:45.769170Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:45.773787Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710661:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:57:45.783793Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519901323320886241:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710661 completed, doublechecking } 2025-06-25T14:57:45.995411Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519901323320886306:2385] txid# 281474976710662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:57:46.021989Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:46.135046Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:46.143519Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519901323320886319:2307], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:5:17: Error: At function: KiReadTable!
:5:17: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Versions]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:57:46.145451Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=YzI4N2RmMWEtOGI1ZWZlZDgtNzQzM2FlNWEtMmVmYmIwZWY=, ActorId: [1:7519901323320886209:2292], ActorState: ExecuteState, TraceId: 01jyksgk8k2x6yaxw3cy42mev7, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:57:46.146995Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 5 column: 17 } message: "At function: KiReadTable!" end_position { row: 5 column: 17 } severity: 1 issues { position { row: 5 column: 17 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Versions]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 5 column: 17 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-25T14:57:46.232533Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); 2025-06-25T14:57:46.418998Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710666. Ctx: { TraceId: 01jyksgksy6eyhfjsqrzyp9xbr, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NDNlODVlZDgtZjBmMzVmY2EtNDI0YzZlNjQtOTI4YTM4NmI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:57:47.745196Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710668. Ctx: { TraceId: 01jyksgmzz2hqc8tkds6j2w4jv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjZkMjc2MjctMzNkMzQ1YWItOGI5YTA4NzktNzI2OWNiZWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:57:47.864892Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519901310435983835:2229];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:57:47.864979Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:57:48.929962Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710670. Ctx: { TraceId: 01jyksgp6w062bykhy0wzsw93a, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MmNhYWVkZGMtZTI3MmU0LWZjZDAwMTg2LWRhN2ZkYjU1, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:57:50.265520Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710674. Ctx: { TraceId: 01jyksgqfr5p0axg9efw4q8tw1, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NDEwOTMzM2UtOWQyZTJjMmMtZjA3NzYzMS00YWI5ZWM5MQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:57:51.470616Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710678. Ctx: { TraceId: 01jyksgrnp9cfzamvtdwhz0sbc, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NGU5N2Q1MzctMjQyNWI2ZWMtMjVlNjEzMzYtZDI4YmMwYzE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpVectorIndexes::SimpleVectorIndexOrderByCosineDistanceWithCover-Nullable [GOOD] Test command err: Trying to start YDB, gRPC: 8720, MsgBus: 3581 2025-06-25T14:57:06.869211Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901156977579395:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:57:06.869528Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001be2/r3tmp/tmpVStZ0y/pdisk_1.dat 2025-06-25T14:57:07.326830Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:57:07.330951Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519901156977579355:2080] 1750863426852496 != 1750863426852499 2025-06-25T14:57:07.339992Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:57:07.340073Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:57:07.343976Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 8720, node 1 2025-06-25T14:57:07.436594Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:57:07.436618Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:57:07.436664Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:57:07.436790Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3581 2025-06-25T14:57:07.887426Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:3581 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:57:08.105979Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:57:08.131248Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:57:08.148170Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:57:08.311811Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:08.463760Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:08.554371Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:10.138177Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901174157450170:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:10.138293Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:10.471991Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:10.541733Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:10.572220Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:10.615878Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:10.686222Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:10.737307Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:10.786144Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:10.847171Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901174157450829:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:10.847237Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:10.847353Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901174157450834:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:10.851044Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:57:10.865798Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519901174157450836:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:57:10.933917Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519901174157450889:3418] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:57:11.871707Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519901156977579395:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:57:11.871768Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:57:11.976990Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work ... ullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 2025-06-25T14:57:48.980493Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [2:7519901187302478018:2147]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-06-25T14:57:48.980536Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5131: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-06-25T14:57:48.980556Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046644480, queue size# 3 2025-06-25T14:57:48.980594Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:601: Will execute TTxStoreStats, queue# 3 2025-06-25T14:57:48.980608Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:610: Will delay TTxStoreTableStats on# 0.000000s, queue# 3 2025-06-25T14:57:48.980658Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 17 shard idx 72057594046644480:35 data size 896 row count 4 2025-06-25T14:57:48.980705Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037922 maps to shardIdx: 72057594046644480:35 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 17], pathId map=TestTable, is column=0, is olap=0, RowCount 4, DataSize 896 2025-06-25T14:57:48.980717Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037922, followerId 0 2025-06-25T14:57:48.980781Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:35 with partCount# 0, rowCount# 4, searchHeight# 1, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-25T14:57:48.980827Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037922 2025-06-25T14:57:48.980851Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 17 shard idx 72057594046644480:36 data size 704 row count 2 2025-06-25T14:57:48.980897Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037923 maps to shardIdx: 72057594046644480:36 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 17], pathId map=TestTable, is column=0, is olap=0, RowCount 2, DataSize 704 2025-06-25T14:57:48.980913Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037923, followerId 0 2025-06-25T14:57:48.980940Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:36 with partCount# 0, rowCount# 2, searchHeight# 1, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-25T14:57:48.980952Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037923 2025-06-25T14:57:48.980969Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 17 shard idx 72057594046644480:37 data size 896 row count 4 2025-06-25T14:57:48.980992Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037924 maps to shardIdx: 72057594046644480:37 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 17], pathId map=TestTable, is column=0, is olap=0, RowCount 4, DataSize 896 2025-06-25T14:57:48.980998Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037924, followerId 0 2025-06-25T14:57:48.981019Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:37 with partCount# 0, rowCount# 4, searchHeight# 1, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-25T14:57:48.981029Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037924 2025-06-25T14:57:48.981066Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:57:48.981146Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [2:7519901187302478018:2147]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-06-25T14:57:48.981166Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5131: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-06-25T14:57:48.981181Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046644480, queue size# 0 2025-06-25T14:57:49.126388Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [2:7519901187302478018:2147]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:57:49.126422Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:57:49.126471Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [2:7519901187302478018:2147], Recipient [2:7519901187302478018:2147]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:57:49.126496Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:57:50.125070Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269553162, Sender [2:7519901195892413347:2309], Recipient [2:7519901187302478018:2147]: NKikimrTxDataShard.TEvPeriodicTableStats DatashardId: 72075186224037900 TableLocalId: 4 Generation: 1 Round: 2 TableStats { DataSize: 1472 RowCount: 8 IndexSize: 0 InMemSize: 1472 LastAccessTime: 1750863438677 LastUpdateTime: 1750863438677 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 8 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 1 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 106 Memory: 137784 } ShardState: 2 UserTablePartOwners: 72075186224037900 NodeId: 2 StartTime: 1750863435037 TableOwnerId: 72057594046644480 FollowerId: 0 2025-06-25T14:57:50.125107Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4992: StateWork, processing event TEvDataShard::TEvPeriodicTableStats 2025-06-25T14:57:50.125140Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037900 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 4] state 'Ready' dataSize 1472 rowCount 8 cpuUsage 0.0106 2025-06-25T14:57:50.125226Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:570: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037900 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 4] raw table stats: DataSize: 1472 RowCount: 8 IndexSize: 0 InMemSize: 1472 LastAccessTime: 1750863438677 LastUpdateTime: 1750863438677 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 8 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 1 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 2025-06-25T14:57:50.125247Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:610: Will delay TTxStoreTableStats on# 0.099994s, queue# 1 2025-06-25T14:57:50.127421Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [2:7519901187302478018:2147]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:57:50.127447Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:57:50.127490Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [2:7519901187302478018:2147], Recipient [2:7519901187302478018:2147]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:57:50.127507Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:57:50.225259Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [2:7519901187302478018:2147]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-06-25T14:57:50.225295Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5131: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-06-25T14:57:50.225307Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046644480, queue size# 1 2025-06-25T14:57:50.225359Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:601: Will execute TTxStoreStats, queue# 1 2025-06-25T14:57:50.225373Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:610: Will delay TTxStoreTableStats on# 0.000000s, queue# 1 2025-06-25T14:57:50.225425Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 4 shard idx 72057594046644480:13 data size 1472 row count 8 2025-06-25T14:57:50.225481Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037900 maps to shardIdx: 72057594046644480:13 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 4], pathId map=Logs, is column=0, is olap=0, RowCount 8, DataSize 1472 2025-06-25T14:57:50.225494Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037900, followerId 0 2025-06-25T14:57:50.225549Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:13 with partCount# 0, rowCount# 8, searchHeight# 1, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-25T14:57:50.225595Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037900 2025-06-25T14:57:50.225642Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:57:50.225719Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [2:7519901187302478018:2147]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-06-25T14:57:50.225732Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5131: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-06-25T14:57:50.225739Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046644480, queue size# 0 |89.5%| [TA] $(B)/ydb/services/persqueue_cluster_discovery/ut/test-results/unittest/{meta.json ... results_accumulator.log} |89.5%| [TA] {RESULT} $(B)/ydb/services/persqueue_cluster_discovery/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> Initializer::Simple [GOOD] >> TSequence::CreateSequenceInsideTableThenDropSequence [GOOD] >> TSequence::CreateSequenceInsideTableThenDropTable |89.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data/unittest |89.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data/unittest |89.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data/unittest >> TGRpcCmsTest::DescribeOptionsTest [GOOD] |89.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data/unittest |89.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data/unittest >> KqpUserConstraint::KqpReadNull-UploadNull >> KqpUserConstraint::KqpReadNull+UploadNull >> KqpMultishardIndex::DataColumnSelect [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpMultishardIndex::DuplicateUpsert [GOOD] Test command err: Trying to start YDB, gRPC: 61420, MsgBus: 31415 2025-06-25T14:57:15.570105Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901193673357066:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:57:15.570152Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001b41/r3tmp/tmpkDzydO/pdisk_1.dat 2025-06-25T14:57:16.024576Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 61420, node 1 2025-06-25T14:57:16.058441Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:57:16.058528Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:57:16.062132Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:57:16.155479Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:57:16.155495Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:57:16.155500Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:57:16.155594Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:31415 TClient is connected to server localhost:31415 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-25T14:57:16.588734Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:57:16.713416Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:57:16.745668Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:57:16.758906Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:16.940995Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:17.130851Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:17.223695Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:18.981080Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901206558260529:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:18.981175Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:19.231515Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:19.306467Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:19.377407Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:19.406654Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:19.465255Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:19.508753Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:19.586732Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:19.656641Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901210853228493:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:19.656750Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:19.657741Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901210853228498:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:19.662371Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:57:19.677977Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519901210853228500:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:57:19.744579Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519901210853228551:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:57:20.572411Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519901193673357066:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:57:20.572468Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:57:20.767365Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... Trying to start YDB, gRPC: 2454, MsgBus: 16679 2025-06-25T14:57:23.736333Z node 2 :METADATA_PROVIDER WARN: lo ... ain @ 0x1AA0869C 15. ??:0: ?? @ 0x7F3A3916ED8F 16. ??:0: ?? @ 0x7F3A3916EE3F 17. ??:0: ?? @ 0x1702E028 Trying to start YDB, gRPC: 1979, MsgBus: 17814 2025-06-25T14:57:44.188132Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519901317875040689:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:57:44.188207Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001b41/r3tmp/tmpUnWJ91/pdisk_1.dat 2025-06-25T14:57:44.350806Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:57:44.360126Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:57:44.360225Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:57:44.362354Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 1979, node 3 2025-06-25T14:57:44.439326Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:57:44.439352Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:57:44.439361Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:57:44.439494Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17814 TClient is connected to server localhost:17814 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:57:45.003674Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:57:45.015124Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:57:45.028836Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:45.110865Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:45.226815Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:57:45.293167Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:45.381454Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:47.655704Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519901330759944196:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:47.655811Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:47.722585Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:47.765937Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:47.800738Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:47.834289Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:47.865184Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:47.920423Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:47.957604Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:48.014892Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519901335054912152:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:48.015003Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:48.015110Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519901335054912157:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:48.019609Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:57:48.028544Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519901335054912159:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:57:48.115427Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519901335054912210:3419] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:57:49.185045Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519901317875040689:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:57:49.185123Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:57:49.335679Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... >> TSequence::CreateSequenceInsideTableThenDropTable [GOOD] >> TSequence::CreateSequencesWithIndexedTable ------- [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest >> Initializer::Simple [GOOD] Test command err: 2025-06-25T14:56:43.017926Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:56:43.018105Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:56:43.018182Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00153f/r3tmp/tmpacV5lI/pdisk_1.dat 2025-06-25T14:56:43.671112Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 1267, node 1 TClient is connected to server localhost:23613 2025-06-25T14:56:44.255614Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:56:44.303971Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:56:44.317168Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:56:44.317274Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:56:44.317309Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:56:44.318615Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:56:44.319043Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750863400244689 != 1750863400244693 2025-06-25T14:56:44.368135Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:56:44.368288Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:56:44.381582Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:56:54.595791Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:639:2530], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:54.595994Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:649:2535], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:54.596102Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:54.609721Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715657:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:56:54.712401Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:653:2538], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715657 completed, doublechecking } 2025-06-25T14:56:54.733711Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:56:54.812970Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:723:2577] txid# 281474976715658, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:56:55.326174Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:734:2587], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:1:1: Error: At function: KiReadTable!
:1:1: Error: Cannot find table 'db.[/Root/.metadata/test]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:56:55.335433Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=ZWNlMTRkZDktMmJjODhlMjgtYjNjNDkzNjgtNjdhMmI2ZmM=, ActorId: [1:635:2527], ActorState: ExecuteState, TraceId: 01jyksf19t89k1hk0tdzghyc3v, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: REQUEST=SELECT * FROM `/Root/.metadata/test`;RESULT=
: Error: Type annotation, code: 1030
:1:1: Error: At function: KiReadTable!
:1:1: Error: Cannot find table 'db.[/Root/.metadata/test]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 ;EXPECTATION=0 REQUEST=SELECT * FROM `/Root/.metadata/test`;EXPECTATION=0 2025-06-25T14:56:55.446618Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:57.133195Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:56:57.642623Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:58.457612Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) Initialization finished 2025-06-25T14:57:09.465521Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715675. Ctx: { TraceId: 01jyksffpza1033nadrf9073ws, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MjljYjJhODQtMjUzN2ZmNmMtNjdiNGJiOWUtZWRmMGExZjg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root REQUEST=SELECT * FROM `/Root/.metadata/test`;RESULT=;EXPECTATION=1 REQUEST=SELECT * FROM `/Root/.metadata/test`;EXPECTATION=1 REQUEST=DROP TABLE `/Root/.metadata/test`;EXPECTATION=0;WAITING=1 2025-06-25T14:57:20.933984Z node 1 :TX_PROXY ERROR: schemereq.cpp:1096: Actor# [1:1286:2981] txid# 281474976715678, Access denied for root@builtin on path /Root/.metadata/test, with access RemoveSchema 2025-06-25T14:57:20.934193Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:1286:2981] txid# 281474976715678, issues: { message: "Access denied for root@builtin on path /Root/.metadata/test" issue_code: 200000 severity: 1 } REQUEST=DROP TABLE `/Root/.metadata/test`;RESULT=
: Error: Execution, code: 1060
:1:12: Error: Executing DROP TABLE
: Error: Access denied., code: 2018
: Error: Access denied for root@builtin on path /Root/.metadata/test, code: 200000 ;EXPECTATION=0 FINISHED_REQUEST=DROP TABLE `/Root/.metadata/test`;EXPECTATION=0;WAITING=1 2025-06-25T14:57:31.584682Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715681. Ctx: { TraceId: 01jyksg59sc3qz6rh38561j5d1, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZmM4YWZlMjAtNGQ1NzgxNDktMmEwOGY0NTgtODA5ZjE4MzE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root REQUEST=SELECT * FROM `/Root/.metadata/initialization/migrations`;RESULT=;EXPECTATION=1 REQUEST=SELECT * FROM `/Root/.metadata/initialization/migrations`;EXPECTATION=1 REQUEST=DELETE FROM `/Root/.metadata/initialization/migrations`;EXPECTATION=0;WAITING=1 REQUEST=DELETE FROM `/Root/.metadata/initialization/migrations`;RESULT=
: Fatal: ydb/core/kqp/host/kqp_host.cpp:977 ExecuteDataQuery(): requirement false failed, message: Unexpected query type for execute script action: Ddl, code: 1 ;EXPECTATION=0 FINISHED_REQUEST=DELETE FROM `/Root/.metadata/initialization/migrations`;EXPECTATION=0;WAITING=1 REQUEST=DROP TABLE `/Root/.metadata/initialization/migrations`;EXPECTATION=0;WAITING=1 2025-06-25T14:57:52.950079Z node 1 :TX_PROXY ERROR: schemereq.cpp:1096: Actor# [1:1457:3102] txid# 281474976715686, Access denied for root@builtin on path /Root/.metadata/initialization/migrations, with access RemoveSchema 2025-06-25T14:57:52.950265Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:1457:3102] txid# 281474976715686, issues: { message: "Access denied for root@builtin on path /Root/.metadata/initialization/migrations" issue_code: 200000 severity: 1 } REQUEST=DROP TABLE `/Root/.metadata/initialization/migrations`;RESULT=
: Error: Execution, code: 1060
:1:12: Error: Executing DROP TABLE
: Error: Access denied., code: 2018
: Error: Access denied for root@builtin on path /Root/.metadata/initialization/migrations, code: 200000 ;EXPECTATION=0 FINISHED_REQUEST=DROP TABLE `/Root/.metadata/initialization/migrations`;EXPECTATION=0;WAITING=1 E0625 14:57:53.889362324 605768 backup_poller.cc:113] run_poller: UNKNOWN:Timer list shutdown {created_time:"2025-06-25T14:57:53.880617962+00:00"} ------- [TM] {asan, default-linux-x86_64, release} ydb/services/cms/ut/unittest >> TGRpcCmsTest::DescribeOptionsTest [GOOD] Test command err: 2025-06-25T14:57:50.178498Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901346073906924:2097];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:57:50.179810Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0013ad/r3tmp/tmp659R7i/pdisk_1.dat 2025-06-25T14:57:50.653911Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:57:50.654003Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:57:50.667149Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:57:50.706979Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 16932, node 1 2025-06-25T14:57:50.837043Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:57:50.837065Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:57:50.837081Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:57:50.837249Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:57:51.186225Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:24141 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:57:51.371724Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... TClient is connected to server localhost:24141 2025-06-25T14:57:51.620711Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:131: TTxProcessor(tenants) is now locking 2025-06-25T14:57:51.620729Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:143: TTxProcessor(tenants) is now locked by parent 2025-06-25T14:57:51.627260Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:102: TTxProcessor(tenants) is now active 2025-06-25T14:57:51.721708Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285140, Sender [1:7519901350368874918:2275], Recipient [1:7519901346073907305:2198]: NKikimr::NConsole::TEvConsole::TEvDescribeTenantOptionsRequest { Request { } UserToken: "" PeerName: "ipv6:[::1]:59992" } 2025-06-25T14:57:51.721771Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:964: StateWork, processing event TEvConsole::TEvDescribeTenantOptionsRequest 2025-06-25T14:57:51.725683Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3335: Send TEvConsole::TEvDescribeTenantOptionsResponse: Response { operation { ready: true status: SUCCESS result { [type.googleapis.com/Ydb.Cms.DescribeDatabaseOptionsResult] { storage_units { kind: "hdd2" labels { key: "disk_type" value: "ROT" } labels { key: "erasure" value: "none" } } storage_units { kind: "hdd" labels { key: "disk_type" value: "ROT" } labels { key: "erasure" value: "none" } } storage_units { kind: "hdd1" labels { key: "disk_type" value: "ROT" } labels { key: "erasure" value: "none" } } storage_units { kind: "ssd" labels { key: "disk_type" value: "ROT" } labels { key: "erasure" value: "none" } } storage_units { kind: "test" labels { key: "disk_type" value: "ROT" } labels { key: "erasure" value: "none" } } availability_zones { name: "dc-1" labels { key: "collocation" value: "disabled" } labels { key: "fixed_data_center" value: "DC-1" } } availability_zones { name: "any" labels { key: "any_data_center" value: "true" } labels { key: "collocation" value: "disabled" } } computational_units { kind: "slot" labels { key: "slot_type" value: "default" } labels { key: "type" value: "dynamic_slot" } allowed_availability_zones: "any" allowed_availability_zones: "dc-1" } } } } } >> TGRpcCmsTest::SimpleTenantsTest [GOOD] >> KqpVectorIndexes::CoveredVectorIndexWithFollowers-StaleRO [GOOD] >> TGRpcCmsTest::RemoveWithAnotherTokenTest [GOOD] >> TGRpcCmsTest::AuthTokenTest [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpMultishardIndex::DataColumnSelect [GOOD] Test command err: Trying to start YDB, gRPC: 13674, MsgBus: 6108 2025-06-25T14:57:17.751288Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901205001455557:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:57:17.751377Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001acf/r3tmp/tmpYrjS6O/pdisk_1.dat 2025-06-25T14:57:18.280004Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:57:18.280419Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519901205001455534:2080] 1750863437749973 != 1750863437749976 2025-06-25T14:57:18.294752Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:57:18.294838Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:57:18.297000Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 13674, node 1 2025-06-25T14:57:18.396777Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:57:18.396805Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:57:18.396811Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:57:18.396926Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6108 2025-06-25T14:57:18.799563Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:6108 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:57:19.114954Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:57:19.132438Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:57:19.310510Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:19.506558Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:19.586848Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:21.299785Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901222181326355:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:21.299870Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:21.652283Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:21.684839Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:21.757380Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:21.795446Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:21.867555Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:21.936043Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:22.007902Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:22.074312Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901226476294317:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:22.074385Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:22.074575Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901226476294322:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:22.077807Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:57:22.092048Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519901226476294324:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:57:22.181665Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519901226476294375:3426] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:57:22.751365Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519901205001455557:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:57:22.751434Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:57:23.141123Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... Trying to start YDB, gRPC: 16517, MsgBus: 21605 2025-06-25T14:57:25.351208Z node 2 :METADATA_PROVI ... Connected TServer::EnableGrpc on GrpcPort 1329, node 3 2025-06-25T14:57:44.140959Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:57:44.140984Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:57:44.140995Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:57:44.141130Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10045 TClient is connected to server localhost:10045 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:57:44.708167Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:57:44.717131Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:57:44.729418Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:44.803217Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:44.926437Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:57:44.978570Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:45.068374Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:47.503241Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519901334489606467:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:47.503349Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:47.574714Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:47.609980Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:47.640097Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:47.676788Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:47.746252Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:47.818267Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:47.895550Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:47.963499Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519901334489607135:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:47.963599Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:47.963797Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519901334489607140:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:47.967174Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:57:47.978751Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519901334489607142:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:57:48.053323Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519901338784574489:3420] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:57:48.860891Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519901317309735891:2244];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:57:48.860962Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:57:49.388671Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:49.429822Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:49.512208Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:51.511952Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710677:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... ------- [TM] {asan, default-linux-x86_64, release} ydb/services/cms/ut/unittest >> TGRpcCmsTest::SimpleTenantsTest [GOOD] Test command err: 2025-06-25T14:57:50.136492Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901345714120659:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:57:50.136771Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0013a8/r3tmp/tmpOR1xMH/pdisk_1.dat 2025-06-25T14:57:50.545160Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:57:50.545281Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:57:50.551593Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:57:50.579978Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 32483, node 1 2025-06-25T14:57:50.832977Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:57:50.833001Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:57:50.833013Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:57:50.833148Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:57:51.164798Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:25042 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:57:51.344652Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:57:51.491834Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285120, Sender [1:7519901350009088715:2274], Recipient [1:7519901345714121109:2212]: NKikimr::NConsole::TEvConsole::TEvCreateTenantRequest { Request { path: "/Root/users/user-1" resources { storage_units { unit_kind: "hdd" count: 1 } } } UserToken: "" PeerName: "ipv6:[::1]:37632" } 2025-06-25T14:57:51.491877Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:963: StateWork, processing event TEvConsole::TEvCreateTenantRequest 2025-06-25T14:57:51.491924Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-25T14:57:51.491938Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-25T14:57:51.492082Z node 1 :CMS_TENANTS DEBUG: console__create_tenant.cpp:71: TTxCreateTenant: Request { path: "/Root/users/user-1" resources { storage_units { unit_kind: "hdd" count: 1 } } } UserToken: "" PeerName: "ipv6:[::1]:37632" 2025-06-25T14:57:51.493013Z node 1 :CMS_TENANTS DEBUG: console__create_tenant.cpp:365: Add tenant /Root/users/user-1 (txid = 1750863471492510) 2025-06-25T14:57:51.494563Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2577: Add tenant /Root/users/user-1 to database state=CREATING_POOLS coordinators=3 mediators=3 planresolution=10 timecastbucketspermediator=2 issue= txid=1750863471492510 subdomainversion=1 confirmedsubdomain=0 attrs= generation=1 errorcode=STATUS_CODE_UNSPECIFIED isExternalSubDomain=1 isExternalHive=1 isExternalSysViewProcessor=1 isExternalStatisticsAggregator=1 areResourcesShared=0 sharedDomainId= 2025-06-25T14:57:51.494848Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2637: Add tenant pool /Root/users/user-1:hdd to database kind=hdd config=BoxId: 999 StoragePoolId: 4 Name: "/Root/users/user-1:hdd" ErasureSpecies: "none" VDiskKind: "Default" Kind: "hdd" NumGroups: 1 PDiskFilter { Property { Type: ROT } } allocatednumgroups=0 state=NOT_ALLOCATED 2025-06-25T14:57:51.504167Z node 1 :CMS_TENANTS DEBUG: console__create_tenant.cpp:375: TTxCreateTenant Complete 2025-06-25T14:57:51.514662Z node 1 :CMS_TENANTS TRACE: console__create_tenant.cpp:383: Send: NKikimr::NConsole::TEvConsole::TEvCreateTenantResponse { Response { operation { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1750863471492510&action=1" } } } 2025-06-25T14:57:51.517057Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-25T14:57:51.517167Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:158: TPoolManip(/Root/users/user-1:hdd) Bootstrap 2025-06-25T14:57:51.517358Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:117: TPoolManip(/Root/users/user-1:hdd) read pool state: Request { Command { ReadStoragePool { BoxId: 999 Name: "/Root/users/user-1:hdd" } } } 2025-06-25T14:57:51.518457Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:198: TPoolManip(/Root/users/user-1:hdd) got read response: Status { Success: true } Success: true ConfigTxSeqNo: 5 2025-06-25T14:57:51.518621Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:131: TPoolManip(/Root/users/user-1:hdd) send pool request: Request { Command { DefineStoragePool { BoxId: 999 StoragePoolId: 4 Name: "/Root/users/user-1:hdd" ErasureSpecies: "none" VDiskKind: "Default" Kind: "hdd" NumGroups: 1 PDiskFilter { Property { Type: ROT } } } } } 2025-06-25T14:57:51.523996Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285131, Sender [1:7519901350009088723:2275], Recipient [1:7519901345714121109:2212]: NKikimr::NConsole::TEvConsole::TEvGetOperationRequest { Request { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1750863471492510&action=1" } UserToken: "" } 2025-06-25T14:57:51.524030Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:965: StateWork, processing event TEvConsole::TEvGetOperationRequest 2025-06-25T14:57:51.524223Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3353: Send TEvConsole::TEvGetOperationResponse: Response { operation { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1750863471492510&action=1" } } 2025-06-25T14:57:51.527396Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:244: TPoolManip(/Root/users/user-1:hdd) got config response: Status { Success: true } Success: true ConfigTxSeqNo: 6 2025-06-25T14:57:51.527456Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:168: TPoolManip(/Root/users/user-1:hdd) reply with NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvPoolAllocated 2025-06-25T14:57:51.527514Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 2146435079, Sender [1:7519901350009088720:2212], Recipient [1:7519901345714121109:2212]: NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvPoolAllocated 2025-06-25T14:57:51.527528Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:972: StateWork, processing event TEvPrivate::TEvPoolAllocated 2025-06-25T14:57:51.527546Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-25T14:57:51.527555Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-25T14:57:51.527598Z node 1 :CMS_TENANTS DEBUG: console__update_pool_state.cpp:28: TTxUpdatePoolState for pool /Root/users/user-1:hdd of /Root/users/user-1 state=ALLOCATED 2025-06-25T14:57:51.532420Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3047: Update pool state in database for /Root/users/user-1:hdd state=ALLOCATED allocatednumgroups=1 2025-06-25T14:57:51.532535Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3206: Update subdomain version in database for /Root/users/user-1 subdomainversion=2 2025-06-25T14:57:51.541463Z node 1 :CMS_TENANTS DEBUG: console__update_pool_state.cpp:73: TTxUpdatePoolState complete for /Root/users/user-1:hdd 2025-06-25T14:57:51.541506Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-25T14:57:51.541516Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-25T14:57:51.541523Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-25T14:57:51.541602Z node 1 :CMS_TENANTS DEBUG: console__update_tenant_state.cpp:23: TTxUpdateTenantState for tenant /Root/users/user-1 to CREATING_SUBDOMAIN 2025-06-25T14:57:51.541628Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3146: Update tenant state in database for /Root/users/user-1 state=CREATING_SUBDOMAIN txid=1750863471492510 errorcode=STATUS_CODE_UNSPECIFIED issue= 2025-06-25T14:57:51.545176Z node 1 :CMS_TENANTS DEBUG: console__update_tenant_state.cpp:45: TTxUpdateTenantState complete for /Root/users/user-1 2025-06-25T14:57:51.545342Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-25T14:57:51.545373Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:784: TSubdomainManip(/Root/users/user-1)::Bootstrap 2025-06-25T14:57:51.545382Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:596: TSubDomainManip(/Root/users/user-1) create subdomain 2025-06-25T14:57:51.550057Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:620: TSubdomainManip(/Root/users/user-1) send subdomain creation cmd: NKikimrTxUserProxy.TEvProposeTransaction Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateExtSubDomain SubDomain { Name: "users/user-1" ExternalSchemeShard: true ExternalHive: true ExternalSysViewProcessor: true ExternalStatisticsAggregator: true GraphShard: true } } } ExecTimeoutPeriod: 18446744073709551615 DatabaseName: "Root" 2025-06-25T14:57:51.551627Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976710658:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-25T14:57:51.558003Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:832: TSubdomainManip(/Root/users/user-1) got propose result: Status: 53 TxId: 281474976710658 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 PathId: 3 2025-06-25T14:57:51.558090Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:768: TSubdomainManip(/Root/users/user-1) send notification request ... nRemoved 2025-06-25T14:57:52.040745Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:980: StateWork, processing event TEvPrivate::TEvSubdomainRemoved 2025-06-25T14:57:52.040759Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-25T14:57:52.040767Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-25T14:57:52.040811Z node 1 :CMS_TENANTS DEBUG: console__remove_computational_units.cpp:20: TTxRemoveComputationalUnits Execute /Root/users/user-1 2025-06-25T14:57:52.040847Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3146: Update tenant state in database for /Root/users/user-1 state=REMOVING_UNITS txid=1750863472005350 errorcode=STATUS_CODE_UNSPECIFIED issue= 2025-06-25T14:57:52.040886Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2927: Remove computational units of /Root/users/user-1 from database txid=1750863472005350 issue= 2025-06-25T14:57:52.042490Z node 1 :CMS_TENANTS DEBUG: console__remove_computational_units.cpp:34: TTxRemoveComputationalUnits Complete /Root/users/user-1 2025-06-25T14:57:52.042573Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2114: Send TEvTenantSlotBroker::TEvAlterTenant: TenantName: "/Root/users/user-1" 2025-06-25T14:57:52.042591Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-25T14:57:52.042731Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273154052, Sender [1:7519901345714121011:2208], Recipient [1:7519901345714121109:2212]: NKikimrTenantSlotBroker.TTenantState TenantName: "/Root/users/user-1" 2025-06-25T14:57:52.042744Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:984: StateWork, processing event TEvTenantSlotBroker::TEvTenantState 2025-06-25T14:57:52.042759Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-25T14:57:52.042767Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-25T14:57:52.042793Z node 1 :CMS_TENANTS DEBUG: console__update_tenant_state.cpp:23: TTxUpdateTenantState for tenant /Root/users/user-1 to REMOVING_POOLS 2025-06-25T14:57:52.042809Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3146: Update tenant state in database for /Root/users/user-1 state=REMOVING_POOLS txid=1750863472005350 errorcode=STATUS_CODE_UNSPECIFIED issue= 2025-06-25T14:57:52.047129Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__delete_tablet_reply.cpp:39: Got DeleteTabletReply with Forward response from Hive 72075186224037888 to Hive 72057594037968897 shardIdx 72057594046644480:1 2025-06-25T14:57:52.052289Z node 1 :CMS_TENANTS DEBUG: console__update_tenant_state.cpp:45: TTxUpdateTenantState complete for /Root/users/user-1 2025-06-25T14:57:52.052367Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-25T14:57:52.052408Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:158: TPoolManip(/Root/users/user-1:hdd) Bootstrap 2025-06-25T14:57:52.052583Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:117: TPoolManip(/Root/users/user-1:hdd) read pool state: Request { Command { ReadStoragePool { BoxId: 999 Name: "/Root/users/user-1:hdd" } } } 2025-06-25T14:57:52.053277Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:198: TPoolManip(/Root/users/user-1:hdd) got read response: Status { Success: true StoragePool { BoxId: 999 StoragePoolId: 4 Name: "/Root/users/user-1:hdd" ErasureSpecies: "none" Geometry { } VDiskKind: "Default" Kind: "hdd" NumGroups: 2 PDiskFilter { Property { Type: ROT } } ScopeId { X1: 72057594046644480 X2: 3 } ItemConfigGeneration: 3 } } Success: true ConfigTxSeqNo: 13 2025-06-25T14:57:52.053371Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:151: TPoolManip(/Root/users/user-1:hdd) send pool request: Request { Command { DeleteStoragePool { BoxId: 999 StoragePoolId: 4 ItemConfigGeneration: 3 } } } 2025-06-25T14:57:52.053548Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72075186224037888 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037897 not found 2025-06-25T14:57:52.053581Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72075186224037888 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037890 not found 2025-06-25T14:57:52.053592Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72075186224037888 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037894 not found 2025-06-25T14:57:52.053629Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72075186224037888 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037896 not found 2025-06-25T14:57:52.053643Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72075186224037888 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037891 not found 2025-06-25T14:57:52.053675Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72075186224037888 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037895 not found 2025-06-25T14:57:52.053686Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72075186224037888 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037892 not found 2025-06-25T14:57:52.053719Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72075186224037888 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037893 not found 2025-06-25T14:57:52.053733Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72075186224037888 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037889 not found 2025-06-25T14:57:52.055878Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037888 not found 2025-06-25T14:57:52.068457Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:306: TPoolManip(/Root/users/user-1:hdd) got config response: Status { Success: true } Success: true ConfigTxSeqNo: 14 2025-06-25T14:57:52.068568Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 2146435081, Sender [1:7519901354304056702:2212], Recipient [1:7519901345714121109:2212]: NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvPoolDeleted 2025-06-25T14:57:52.071267Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:973: StateWork, processing event TEvPrivate::TEvPoolDeleted 2025-06-25T14:57:52.071302Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-25T14:57:52.071316Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-25T14:57:52.071379Z node 1 :CMS_TENANTS DEBUG: console__update_pool_state.cpp:28: TTxUpdatePoolState for pool /Root/users/user-1:hdd of /Root/users/user-1 state=DELETED 2025-06-25T14:57:52.071399Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3047: Update pool state in database for /Root/users/user-1:hdd state=DELETED allocatednumgroups=0 2025-06-25T14:57:52.074992Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285131, Sender [1:7519901354304056728:2355], Recipient [1:7519901345714121109:2212]: NKikimr::NConsole::TEvConsole::TEvGetOperationRequest { Request { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1750863472005350&action=2" } UserToken: "" } 2025-06-25T14:57:52.075024Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:965: StateWork, processing event TEvConsole::TEvGetOperationRequest 2025-06-25T14:57:52.075210Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3353: Send TEvConsole::TEvGetOperationResponse: Response { operation { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1750863472005350&action=2" } } 2025-06-25T14:57:52.122202Z node 1 :CMS_TENANTS DEBUG: console__update_pool_state.cpp:73: TTxUpdatePoolState complete for /Root/users/user-1:hdd 2025-06-25T14:57:52.122232Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-25T14:57:52.122252Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-25T14:57:52.122261Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-25T14:57:52.122318Z node 1 :CMS_TENANTS DEBUG: console__remove_tenant_done.cpp:22: TTxRemoveTenantDone for tenant /Root/users/user-1 txid=1750863472005350 2025-06-25T14:57:52.122329Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2927: Remove computational units of /Root/users/user-1 from database txid=1750863472005350 issue= 2025-06-25T14:57:52.122340Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2958: Remove tenant /Root/users/user-1 from database txid=1750863472005350 issue= 2025-06-25T14:57:52.122350Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2963: Remove pool /Root/users/user-1:hdd from database 2025-06-25T14:57:52.122442Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3083: Add tenant removal info for /Root/users/user-1 txid=1750863472005350 code=SUCCESS errorcode=STATUS_CODE_UNSPECIFIED issue= 2025-06-25T14:57:52.156896Z node 1 :CMS_TENANTS DEBUG: console__remove_tenant_done.cpp:34: TTxRemoveTenantDone Complete 2025-06-25T14:57:52.156986Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-25T14:57:52.157466Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285131, Sender [1:7519901354304056757:2361], Recipient [1:7519901345714121109:2212]: NKikimr::NConsole::TEvConsole::TEvGetOperationRequest { Request { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1750863472005350&action=2" } UserToken: "" } 2025-06-25T14:57:52.157491Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:965: StateWork, processing event TEvConsole::TEvGetOperationRequest 2025-06-25T14:57:52.157646Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3353: Send TEvConsole::TEvGetOperationResponse: Response { operation { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1750863472005350&action=2" ready: true status: SUCCESS } } 2025-06-25T14:57:52.209652Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285122, Sender [1:7519901354304056760:2363], Recipient [1:7519901345714121109:2212]: NKikimr::NConsole::TEvConsole::TEvGetTenantStatusRequest { Request { path: "/Root/users/user-1" } UserToken: "" PeerName: "ipv6:[::1]:37632" } 2025-06-25T14:57:52.209683Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:966: StateWork, processing event TEvConsole::TEvGetTenantStatusRequest 2025-06-25T14:57:52.209804Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3377: Send TEvConsole::TEvGetTenantStatusResponse: Response { operation { ready: true status: NOT_FOUND issues { message: "Unknown tenant /Root/users/user-1" severity: 1 } } } 2025-06-25T14:57:52.212853Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285123, Sender [1:7519901354304056763:2364], Recipient [1:7519901345714121109:2212]: NKikimr::NConsole::TEvConsole::TEvListTenantsRequest { Request { } UserToken: "" PeerName: "ipv6:[::1]:37632" } 2025-06-25T14:57:52.212875Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:967: StateWork, processing event TEvConsole::TEvListTenantsRequest 2025-06-25T14:57:52.213051Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3421: Send TEvConsole::TEvListTenantsResponse: Response { operation { ready: true status: SUCCESS result { [type.googleapis.com/Ydb.Cms.ListDatabasesResult] { } } } } 2025-06-25T14:57:52.219627Z node 1 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 3 2025-06-25T14:57:52.219778Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-25T14:57:52.604429Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; >> TSequence::CreateSequencesWithIndexedTable [GOOD] >> TSequence::CreateTableWithDefaultFromSequence ------- [TM] {asan, default-linux-x86_64, release} ydb/services/cms/ut/unittest >> TGRpcCmsTest::AuthTokenTest [GOOD] Test command err: 2025-06-25T14:57:50.100654Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901346682236480:2079];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:57:50.100722Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0013a1/r3tmp/tmpGH2uwl/pdisk_1.dat 2025-06-25T14:57:50.571028Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:57:50.571137Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:57:50.591930Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:57:50.595843Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 1874, node 1 2025-06-25T14:57:50.832922Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:57:50.832945Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:57:50.832950Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:57:50.833061Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:57:51.116654Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:63639 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:57:51.411263Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:57:51.521626Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285120, Sender [1:7519901350977204517:2274], Recipient [1:7519901346682236929:2202]: NKikimr::NConsole::TEvConsole::TEvCreateTenantRequest { Request { path: "/Root/users/user-1" resources { storage_units { unit_kind: "hdd" count: 1 } } } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" PeerName: "ipv6:[::1]:49478" } 2025-06-25T14:57:51.521671Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:963: StateWork, processing event TEvConsole::TEvCreateTenantRequest 2025-06-25T14:57:51.521699Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-25T14:57:51.521724Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-25T14:57:51.521867Z node 1 :CMS_TENANTS DEBUG: console__create_tenant.cpp:71: TTxCreateTenant: Request { path: "/Root/users/user-1" resources { storage_units { unit_kind: "hdd" count: 1 } } } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" PeerName: "ipv6:[::1]:49478" 2025-06-25T14:57:51.522055Z node 1 :CMS_TENANTS DEBUG: console__create_tenant.cpp:365: Add tenant /Root/users/user-1 (txid = 1750863471516476) 2025-06-25T14:57:51.530618Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2577: Add tenant /Root/users/user-1 to database state=CREATING_POOLS coordinators=3 mediators=3 planresolution=10 timecastbucketspermediator=2 issue= txid=1750863471516476 subdomainversion=1 confirmedsubdomain=0 attrs= generation=1 errorcode=STATUS_CODE_UNSPECIFIED isExternalSubDomain=1 isExternalHive=1 isExternalSysViewProcessor=1 isExternalStatisticsAggregator=1 areResourcesShared=0 sharedDomainId= 2025-06-25T14:57:51.530869Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2637: Add tenant pool /Root/users/user-1:hdd to database kind=hdd config=BoxId: 999 StoragePoolId: 4 Name: "/Root/users/user-1:hdd" ErasureSpecies: "none" VDiskKind: "Default" Kind: "hdd" NumGroups: 1 PDiskFilter { Property { Type: ROT } } allocatednumgroups=0 state=NOT_ALLOCATED 2025-06-25T14:57:51.543883Z node 1 :CMS_TENANTS DEBUG: console__create_tenant.cpp:375: TTxCreateTenant Complete 2025-06-25T14:57:51.544813Z node 1 :CMS_TENANTS TRACE: console__create_tenant.cpp:383: Send: NKikimr::NConsole::TEvConsole::TEvCreateTenantResponse { Response { operation { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1750863471516476&action=1" } } } 2025-06-25T14:57:51.544943Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-25T14:57:51.545010Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:158: TPoolManip(/Root/users/user-1:hdd) Bootstrap 2025-06-25T14:57:51.545130Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:117: TPoolManip(/Root/users/user-1:hdd) read pool state: Request { Command { ReadStoragePool { BoxId: 999 Name: "/Root/users/user-1:hdd" } } } 2025-06-25T14:57:51.545664Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:198: TPoolManip(/Root/users/user-1:hdd) got read response: Status { Success: true } Success: true ConfigTxSeqNo: 5 2025-06-25T14:57:51.545781Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:131: TPoolManip(/Root/users/user-1:hdd) send pool request: Request { Command { DefineStoragePool { BoxId: 999 StoragePoolId: 4 Name: "/Root/users/user-1:hdd" ErasureSpecies: "none" VDiskKind: "Default" Kind: "hdd" NumGroups: 1 PDiskFilter { Property { Type: ROT } } } } } 2025-06-25T14:57:51.553244Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:244: TPoolManip(/Root/users/user-1:hdd) got config response: Status { Success: true } Success: true ConfigTxSeqNo: 6 2025-06-25T14:57:51.553293Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:168: TPoolManip(/Root/users/user-1:hdd) reply with NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvPoolAllocated 2025-06-25T14:57:51.553349Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 2146435079, Sender [1:7519901350977204522:2202], Recipient [1:7519901346682236929:2202]: NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvPoolAllocated 2025-06-25T14:57:51.553363Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:972: StateWork, processing event TEvPrivate::TEvPoolAllocated 2025-06-25T14:57:51.553387Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-25T14:57:51.553396Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-25T14:57:51.553434Z node 1 :CMS_TENANTS DEBUG: console__update_pool_state.cpp:28: TTxUpdatePoolState for pool /Root/users/user-1:hdd of /Root/users/user-1 state=ALLOCATED 2025-06-25T14:57:51.553457Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3047: Update pool state in database for /Root/users/user-1:hdd state=ALLOCATED allocatednumgroups=1 2025-06-25T14:57:51.553510Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3206: Update subdomain version in database for /Root/users/user-1 subdomainversion=2 2025-06-25T14:57:51.557990Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285131, Sender [1:7519901350977204536:2275], Recipient [1:7519901346682236929:2202]: NKikimr::NConsole::TEvConsole::TEvGetOperationRequest { Request { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1750863471516476&action=1" } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" } 2025-06-25T14:57:51.558016Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:965: StateWork, processing event TEvConsole::TEvGetOperationRequest 2025-06-25T14:57:51.558185Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3353: Send TEvConsole::TEvGetOperationResponse: Response { operation { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1750863471516476&action=1" } } 2025-06-25T14:57:51.564881Z node 1 :CMS_TENANTS DEBUG: console__update_pool_state.cpp:73: TTxUpdatePoolState complete for /Root/users/user-1:hdd 2025-06-25T14:57:51.564908Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-25T14:57:51.564916Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-25T14:57:51.564928Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-25T14:57:51.564974Z node 1 :CMS_TENANTS DEBUG: console__update_tenant_state.cpp:23: TTxUpdateTenantState for tenant /Root/users/user-1 to CREATING_SUBDOMAIN 2025-06-25T14:57:51.564996Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3146: Update tenant state in database for /Root/users/user-1 state=CREATING_SUBDOMAIN txid=1750863471516476 errorcode=STATUS_CODE_UNSPECIFIED issue= 2025-06-25T14:57:51.568003Z node 1 :CMS_TENANTS DEBUG: console__update_tenant_state.cpp:45: TTxUpdateTenantState complete for /Root/users/user-1 2025-06-25T14:57:51.568170Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-25T14:57:51.568209Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:784: TSubdomainManip(/Root/users/user-1)::Bootstrap 2025-06-25T14:57:51.568218Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:596: TSubDomainManip(/Root/users/user-1) create subdomain 2025-06-25T14:57:51.571205Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:620: TSubdomainManip(/Root/users/user-1) send subdomain creation cmd: NKikimrTxUserProxy.TEvProposeTransaction Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateExtSubDomain SubDomain { Name: "users/user-1" ExternalSchemeShard: true ExternalHive: true ExternalSysViewProcessor: true ExternalStatisticsAggregator: true GraphShard: true } } } ExecTimeoutPeriod: 18446744073709551615 UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" DatabaseName: "Root" 2025-06-25T14:57:51.574497Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715658:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/cor ... /users/user-1" state: PENDING_RESOURCES required_resources { storage_units { unit_kind: "hdd" count: 1 } } allocated_resources { storage_units { unit_kind: "hdd" count: 1 } } generation: 1 } } } } 2025-06-25T14:57:52.189196Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285122, Sender [1:7519901355272172277:2327], Recipient [1:7519901346682236929:2202]: NKikimr::NConsole::TEvConsole::TEvGetTenantStatusRequest { Request { path: "/Root/users/user-1" } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" PeerName: "ipv6:[::1]:49478" } 2025-06-25T14:57:52.189223Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:966: StateWork, processing event TEvConsole::TEvGetTenantStatusRequest 2025-06-25T14:57:52.189262Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2130: Send TEvTenantSlotBroker::TEvGetTenantState: TenantName: "/Root/users/user-1" 2025-06-25T14:57:52.189360Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273154052, Sender [1:7519901346682236799:2200], Recipient [1:7519901346682236929:2202]: NKikimrTenantSlotBroker.TTenantState TenantName: "/Root/users/user-1" 2025-06-25T14:57:52.189374Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:984: StateWork, processing event TEvTenantSlotBroker::TEvTenantState 2025-06-25T14:57:52.189864Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3753: Send TEvConsole::TEvGetTenantStatusResponse: Response { operation { ready: true status: SUCCESS result { [type.googleapis.com/Ydb.Cms.GetDatabaseStatusResult] { path: "/Root/users/user-1" state: PENDING_RESOURCES required_resources { storage_units { unit_kind: "hdd" count: 1 } } allocated_resources { storage_units { unit_kind: "hdd" count: 1 } } generation: 1 } } } } 2025-06-25T14:57:52.196757Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285122, Sender [1:7519901355272172282:2328], Recipient [1:7519901346682236929:2202]: NKikimr::NConsole::TEvConsole::TEvGetTenantStatusRequest { Request { path: "/Root/users/user-1" } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" PeerName: "ipv6:[::1]:49478" } 2025-06-25T14:57:52.196790Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:966: StateWork, processing event TEvConsole::TEvGetTenantStatusRequest 2025-06-25T14:57:52.196830Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2130: Send TEvTenantSlotBroker::TEvGetTenantState: TenantName: "/Root/users/user-1" 2025-06-25T14:57:52.197188Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273154052, Sender [1:7519901346682236799:2200], Recipient [1:7519901346682236929:2202]: NKikimrTenantSlotBroker.TTenantState TenantName: "/Root/users/user-1" 2025-06-25T14:57:52.197204Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:984: StateWork, processing event TEvTenantSlotBroker::TEvTenantState 2025-06-25T14:57:52.197662Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3753: Send TEvConsole::TEvGetTenantStatusResponse: Response { operation { ready: true status: SUCCESS result { [type.googleapis.com/Ydb.Cms.GetDatabaseStatusResult] { path: "/Root/users/user-1" state: PENDING_RESOURCES required_resources { storage_units { unit_kind: "hdd" count: 1 } } allocated_resources { storage_units { unit_kind: "hdd" count: 1 } } generation: 1 } } } } 2025-06-25T14:57:52.220891Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285122, Sender [1:7519901355272172296:2329], Recipient [1:7519901346682236929:2202]: NKikimr::NConsole::TEvConsole::TEvGetTenantStatusRequest { Request { path: "/Root/users/user-1" } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" PeerName: "ipv6:[::1]:49478" } 2025-06-25T14:57:52.220934Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:966: StateWork, processing event TEvConsole::TEvGetTenantStatusRequest 2025-06-25T14:57:52.220978Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2130: Send TEvTenantSlotBroker::TEvGetTenantState: TenantName: "/Root/users/user-1" 2025-06-25T14:57:52.221386Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273154052, Sender [1:7519901346682236799:2200], Recipient [1:7519901346682236929:2202]: NKikimrTenantSlotBroker.TTenantState TenantName: "/Root/users/user-1" 2025-06-25T14:57:52.221405Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:984: StateWork, processing event TEvTenantSlotBroker::TEvTenantState 2025-06-25T14:57:52.221883Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3753: Send TEvConsole::TEvGetTenantStatusResponse: Response { operation { ready: true status: SUCCESS result { [type.googleapis.com/Ydb.Cms.GetDatabaseStatusResult] { path: "/Root/users/user-1" state: PENDING_RESOURCES required_resources { storage_units { unit_kind: "hdd" count: 1 } } allocated_resources { storage_units { unit_kind: "hdd" count: 1 } } generation: 1 } } } } 2025-06-25T14:57:52.232494Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285122, Sender [1:7519901355272172324:2330], Recipient [1:7519901346682236929:2202]: NKikimr::NConsole::TEvConsole::TEvGetTenantStatusRequest { Request { path: "/Root/users/user-1" } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" PeerName: "ipv6:[::1]:49478" } 2025-06-25T14:57:52.232530Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:966: StateWork, processing event TEvConsole::TEvGetTenantStatusRequest 2025-06-25T14:57:52.232578Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2130: Send TEvTenantSlotBroker::TEvGetTenantState: TenantName: "/Root/users/user-1" 2025-06-25T14:57:52.232706Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273154052, Sender [1:7519901346682236799:2200], Recipient [1:7519901346682236929:2202]: NKikimrTenantSlotBroker.TTenantState TenantName: "/Root/users/user-1" 2025-06-25T14:57:52.232727Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:984: StateWork, processing event TEvTenantSlotBroker::TEvTenantState 2025-06-25T14:57:52.233239Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3753: Send TEvConsole::TEvGetTenantStatusResponse: Response { operation { ready: true status: SUCCESS result { [type.googleapis.com/Ydb.Cms.GetDatabaseStatusResult] { path: "/Root/users/user-1" state: PENDING_RESOURCES required_resources { storage_units { unit_kind: "hdd" count: 1 } } allocated_resources { storage_units { unit_kind: "hdd" count: 1 } } generation: 1 } } } } 2025-06-25T14:57:52.240350Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:809: TSubdomainManip(/Root/users/user-1) got TEvNotifyTxCompletionResult: TxId: 281474976715659 2025-06-25T14:57:52.240370Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:694: TSubdomainManip(/Root/users/user-1) done 2025-06-25T14:57:52.240434Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:710: TSubdomainManip(/Root/users/user-1) reply with NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvSubdomainReady 2025-06-25T14:57:52.240590Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 2146435076, Sender [1:7519901350977204625:2202], Recipient [1:7519901346682236929:2202]: NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvSubdomainReady 2025-06-25T14:57:52.240608Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:979: StateWork, processing event TEvPrivate::TEvSubdomainReady 2025-06-25T14:57:52.240622Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-25T14:57:52.240689Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-25T14:57:52.240729Z node 1 :CMS_TENANTS DEBUG: console__update_confirmed_subdomain.cpp:22: TTxUpdateConfirmedSubdomain for tenant /Root/users/user-1 to 2 2025-06-25T14:57:52.240766Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3146: Update tenant state in database for /Root/users/user-1 state=RUNNING txid=1750863471516476 errorcode=STATUS_CODE_UNSPECIFIED issue= 2025-06-25T14:57:52.240826Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2913: Update database for /Root/users/user-1 confirmedsubdomain=2 2025-06-25T14:57:52.242709Z node 1 :CMS_TENANTS DEBUG: console__update_confirmed_subdomain.cpp:42: TTxUpdateConfirmedSubdomain complete for /Root/users/user-1 2025-06-25T14:57:52.242754Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-25T14:57:52.244565Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285122, Sender [1:7519901355272172346:2331], Recipient [1:7519901346682236929:2202]: NKikimr::NConsole::TEvConsole::TEvGetTenantStatusRequest { Request { path: "/Root/users/user-1" } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" PeerName: "ipv6:[::1]:49478" } 2025-06-25T14:57:52.244592Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:966: StateWork, processing event TEvConsole::TEvGetTenantStatusRequest 2025-06-25T14:57:52.244634Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2130: Send TEvTenantSlotBroker::TEvGetTenantState: TenantName: "/Root/users/user-1" 2025-06-25T14:57:52.244748Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273154052, Sender [1:7519901346682236799:2200], Recipient [1:7519901346682236929:2202]: NKikimrTenantSlotBroker.TTenantState TenantName: "/Root/users/user-1" 2025-06-25T14:57:52.244760Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:984: StateWork, processing event TEvTenantSlotBroker::TEvTenantState 2025-06-25T14:57:52.245221Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3753: Send TEvConsole::TEvGetTenantStatusResponse: Response { operation { ready: true status: SUCCESS result { [type.googleapis.com/Ydb.Cms.GetDatabaseStatusResult] { path: "/Root/users/user-1" state: RUNNING required_resources { storage_units { unit_kind: "hdd" count: 1 } } allocated_resources { storage_units { unit_kind: "hdd" count: 1 } } generation: 1 } } } } TClient is connected to server localhost:63639 TClient::Ls request: /Root/users/user-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root/users/user-1" PathId: 1 SchemeshardId: 72075186224037897 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 3 SecurityStateVersion: 0 } } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72075186224037897 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 3 ProcessingParams { Version: 3 PlanReso... (TRUNCATED) 2025-06-25T14:57:52.578020Z node 1 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 3 2025-06-25T14:57:52.578658Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-25T14:57:52.652553Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; ------- [TM] {asan, default-linux-x86_64, release} ydb/services/cms/ut/unittest >> TGRpcCmsTest::RemoveWithAnotherTokenTest [GOOD] Test command err: 2025-06-25T14:57:50.103819Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901344542101183:2077];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:57:50.103946Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001399/r3tmp/tmpaMkw6U/pdisk_1.dat 2025-06-25T14:57:50.564428Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:57:50.594668Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:57:50.594772Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:57:50.609334Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 29400, node 1 2025-06-25T14:57:50.832949Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:57:50.832976Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:57:50.832982Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:57:50.833086Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:57:51.140607Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:28305 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:57:51.368252Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... TClient is connected to server localhost:28305 2025-06-25T14:57:51.645459Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:57:51.724976Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285120, Sender [1:7519901348837069246:2274], Recipient [1:7519901344542101628:2199]: NKikimr::NConsole::TEvConsole::TEvCreateTenantRequest { Request { path: "/Root/users/user-1" resources { storage_units { unit_kind: "hdd" count: 1 } } } UserToken: "\n\016user-1@builtin\022\030\022\026\n\024all-users@well-known\032\016user-1@builtin\"\007Builtin*\017**** (E3DE7296)" PeerName: "ipv6:[::1]:40972" } 2025-06-25T14:57:51.725016Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:963: StateWork, processing event TEvConsole::TEvCreateTenantRequest 2025-06-25T14:57:51.725047Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-25T14:57:51.725057Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-25T14:57:51.725168Z node 1 :CMS_TENANTS DEBUG: console__create_tenant.cpp:71: TTxCreateTenant: Request { path: "/Root/users/user-1" resources { storage_units { unit_kind: "hdd" count: 1 } } } UserToken: "\n\016user-1@builtin\022\030\022\026\n\024all-users@well-known\032\016user-1@builtin\"\007Builtin*\017**** (E3DE7296)" PeerName: "ipv6:[::1]:40972" 2025-06-25T14:57:51.725332Z node 1 :CMS_TENANTS DEBUG: console__create_tenant.cpp:365: Add tenant /Root/users/user-1 (txid = 1750863471722939) 2025-06-25T14:57:51.725888Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2577: Add tenant /Root/users/user-1 to database state=CREATING_POOLS coordinators=3 mediators=3 planresolution=10 timecastbucketspermediator=2 issue= txid=1750863471722939 subdomainversion=1 confirmedsubdomain=0 attrs= generation=1 errorcode=STATUS_CODE_UNSPECIFIED isExternalSubDomain=1 isExternalHive=1 isExternalSysViewProcessor=1 isExternalStatisticsAggregator=1 areResourcesShared=0 sharedDomainId= 2025-06-25T14:57:51.726054Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2637: Add tenant pool /Root/users/user-1:hdd to database kind=hdd config=BoxId: 999 StoragePoolId: 4 Name: "/Root/users/user-1:hdd" ErasureSpecies: "none" VDiskKind: "Default" Kind: "hdd" NumGroups: 1 PDiskFilter { Property { Type: ROT } } allocatednumgroups=0 state=NOT_ALLOCATED 2025-06-25T14:57:51.732882Z node 1 :CMS_TENANTS DEBUG: console__create_tenant.cpp:375: TTxCreateTenant Complete 2025-06-25T14:57:51.733806Z node 1 :CMS_TENANTS TRACE: console__create_tenant.cpp:383: Send: NKikimr::NConsole::TEvConsole::TEvCreateTenantResponse { Response { operation { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1750863471722939&action=1" } } } 2025-06-25T14:57:51.733953Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-25T14:57:51.734043Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:158: TPoolManip(/Root/users/user-1:hdd) Bootstrap 2025-06-25T14:57:51.734231Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:117: TPoolManip(/Root/users/user-1:hdd) read pool state: Request { Command { ReadStoragePool { BoxId: 999 Name: "/Root/users/user-1:hdd" } } } 2025-06-25T14:57:51.734852Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:198: TPoolManip(/Root/users/user-1:hdd) got read response: Status { Success: true } Success: true ConfigTxSeqNo: 5 2025-06-25T14:57:51.735025Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:131: TPoolManip(/Root/users/user-1:hdd) send pool request: Request { Command { DefineStoragePool { BoxId: 999 StoragePoolId: 4 Name: "/Root/users/user-1:hdd" ErasureSpecies: "none" VDiskKind: "Default" Kind: "hdd" NumGroups: 1 PDiskFilter { Property { Type: ROT } } } } } 2025-06-25T14:57:51.747872Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:244: TPoolManip(/Root/users/user-1:hdd) got config response: Status { Success: true } Success: true ConfigTxSeqNo: 6 2025-06-25T14:57:51.747973Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:168: TPoolManip(/Root/users/user-1:hdd) reply with NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvPoolAllocated 2025-06-25T14:57:51.748078Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 2146435079, Sender [1:7519901348837069254:2199], Recipient [1:7519901344542101628:2199]: NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvPoolAllocated 2025-06-25T14:57:51.748098Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:972: StateWork, processing event TEvPrivate::TEvPoolAllocated 2025-06-25T14:57:51.748125Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-25T14:57:51.748138Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-25T14:57:51.748185Z node 1 :CMS_TENANTS DEBUG: console__update_pool_state.cpp:28: TTxUpdatePoolState for pool /Root/users/user-1:hdd of /Root/users/user-1 state=ALLOCATED 2025-06-25T14:57:51.748208Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3047: Update pool state in database for /Root/users/user-1:hdd state=ALLOCATED allocatednumgroups=1 2025-06-25T14:57:51.748289Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3206: Update subdomain version in database for /Root/users/user-1 subdomainversion=2 2025-06-25T14:57:51.751584Z node 1 :CMS_TENANTS DEBUG: console__update_pool_state.cpp:73: TTxUpdatePoolState complete for /Root/users/user-1:hdd 2025-06-25T14:57:51.751613Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-25T14:57:51.751622Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-25T14:57:51.751629Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-25T14:57:51.751683Z node 1 :CMS_TENANTS DEBUG: console__update_tenant_state.cpp:23: TTxUpdateTenantState for tenant /Root/users/user-1 to CREATING_SUBDOMAIN 2025-06-25T14:57:51.751722Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3146: Update tenant state in database for /Root/users/user-1 state=CREATING_SUBDOMAIN txid=1750863471722939 errorcode=STATUS_CODE_UNSPECIFIED issue= 2025-06-25T14:57:51.752031Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285131, Sender [1:7519901348837069262:2275], Recipient [1:7519901344542101628:2199]: NKikimr::NConsole::TEvConsole::TEvGetOperationRequest { Request { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1750863471722939&action=1" } UserToken: "\n\016user-1@builtin\022\030\022\026\n\024all-users@well-known\032\016user-1@builtin\"\007Builtin*\017**** (E3DE7296)" } 2025-06-25T14:57:51.752052Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:965: StateWork, processing event TEvConsole::TEvGetOperationRequest 2025-06-25T14:57:51.752280Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3353: Send TEvConsole::TEvGetOperationResponse: Response { operation { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1750863471722939&action=1" } } 2025-06-25T14:57:51.756615Z node 1 :CMS_TENANTS DEBUG: console__update_tenant_state.cpp:45: TTxUpdateTenantState complete for /Root/users/user-1 2025-06-25T14:57:51.756811Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-25T14:57:51.756860Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:784: TSubdomainManip(/Root/users/user-1)::Bootstrap 2025-06-25T14:57:51.756871Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:596: TSubDomainManip(/Root/users/user-1) create subdomain 2025-06-25T14:57:51.767364Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:620: TSubdomainManip(/Root/users/user-1) send subdomain creation cmd: NKikimrTxUserProxy.TEvProposeTransaction Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateExtSubDomain SubDomain { Name: "users/user-1" ExternalSchemeShard: true ExternalHive: true ExternalSysViewProcessor: true ExternalStatisticsAggregator: true GraphShard: true } } } ExecTimeoutPeriod: 18446744073709551615 UserToken: "\n\016user-1@builtin\022\ ... est { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1750863472682929&action=2" } UserToken: "" } 2025-06-25T14:57:52.689351Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:965: StateWork, processing event TEvConsole::TEvGetOperationRequest 2025-06-25T14:57:52.689474Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3353: Send TEvConsole::TEvGetOperationResponse: Response { operation { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1750863472682929&action=2" } } 2025-06-25T14:57:52.696562Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:832: TSubdomainManip(/Root/users/user-1) got propose result: Status: 53 TxId: 281474976710663 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 2025-06-25T14:57:52.696616Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:768: TSubdomainManip(/Root/users/user-1) send notification request: NKikimrScheme.TEvNotifyTxCompletion TxId: 281474976710663 2025-06-25T14:57:52.696864Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:5496: Mark as Dropping path id [OwnerId: 72057594046644480, LocalPathId: 3] by tx: 281474976710663 2025-06-25T14:57:52.701059Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:804: TSubdomainManip(/Root/users/user-1) got TEvNotifyTxCompletionRegistered: TxId: 281474976710663 2025-06-25T14:57:52.728287Z node 3 :HIVE WARN: tx__delete_tablet.cpp:88: HIVE#72075186224037888 THive::TTxDeleteTablet tablet (72057594046644480,1) wasn't found - using supplied 72075186224037888 2025-06-25T14:57:52.729072Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:809: TSubdomainManip(/Root/users/user-1) got TEvNotifyTxCompletionResult: TxId: 281474976710663 2025-06-25T14:57:52.729096Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:694: TSubdomainManip(/Root/users/user-1) done 2025-06-25T14:57:52.729153Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:710: TSubdomainManip(/Root/users/user-1) reply with NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvSubdomainRemoved 2025-06-25T14:57:52.729319Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 2146435077, Sender [1:7519901353132037191:2199], Recipient [1:7519901344542101628:2199]: NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvSubdomainRemoved 2025-06-25T14:57:52.729357Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:980: StateWork, processing event TEvPrivate::TEvSubdomainRemoved 2025-06-25T14:57:52.729368Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-25T14:57:52.729375Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-25T14:57:52.729429Z node 1 :CMS_TENANTS DEBUG: console__remove_computational_units.cpp:20: TTxRemoveComputationalUnits Execute /Root/users/user-1 2025-06-25T14:57:52.729449Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3146: Update tenant state in database for /Root/users/user-1 state=REMOVING_UNITS txid=1750863472682929 errorcode=UNAUTHORIZED issue=AccessDenied: Access denied for request 2025-06-25T14:57:52.729494Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2927: Remove computational units of /Root/users/user-1 from database txid=1750863472682929 issue=AccessDenied: Access denied for request 2025-06-25T14:57:52.731258Z node 1 :CMS_TENANTS DEBUG: console__remove_computational_units.cpp:34: TTxRemoveComputationalUnits Complete /Root/users/user-1 2025-06-25T14:57:52.731332Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2114: Send TEvTenantSlotBroker::TEvAlterTenant: TenantName: "/Root/users/user-1" 2025-06-25T14:57:52.731344Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-25T14:57:52.731976Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273154052, Sender [1:7519901344542101495:2197], Recipient [1:7519901344542101628:2199]: NKikimrTenantSlotBroker.TTenantState TenantName: "/Root/users/user-1" 2025-06-25T14:57:52.731991Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:984: StateWork, processing event TEvTenantSlotBroker::TEvTenantState 2025-06-25T14:57:52.732014Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-25T14:57:52.732033Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-25T14:57:52.732068Z node 1 :CMS_TENANTS DEBUG: console__update_tenant_state.cpp:23: TTxUpdateTenantState for tenant /Root/users/user-1 to REMOVING_POOLS 2025-06-25T14:57:52.732089Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3146: Update tenant state in database for /Root/users/user-1 state=REMOVING_POOLS txid=1750863472682929 errorcode=UNAUTHORIZED issue=AccessDenied: Access denied for request 2025-06-25T14:57:52.734126Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__delete_tablet_reply.cpp:39: Got DeleteTabletReply with Forward response from Hive 72075186224037888 to Hive 72057594037968897 shardIdx 72057594046644480:1 2025-06-25T14:57:52.745239Z node 1 :CMS_TENANTS DEBUG: console__update_tenant_state.cpp:45: TTxUpdateTenantState complete for /Root/users/user-1 2025-06-25T14:57:52.745298Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-25T14:57:52.745363Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:158: TPoolManip(/Root/users/user-1:hdd) Bootstrap 2025-06-25T14:57:52.745489Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:117: TPoolManip(/Root/users/user-1:hdd) read pool state: Request { Command { ReadStoragePool { BoxId: 999 Name: "/Root/users/user-1:hdd" } } } 2025-06-25T14:57:52.745714Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285131, Sender [1:7519901353132037270:2351], Recipient [1:7519901344542101628:2199]: NKikimr::NConsole::TEvConsole::TEvGetOperationRequest { Request { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1750863472682929&action=2" } UserToken: "" } 2025-06-25T14:57:52.745742Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:965: StateWork, processing event TEvConsole::TEvGetOperationRequest 2025-06-25T14:57:52.745951Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3353: Send TEvConsole::TEvGetOperationResponse: Response { operation { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1750863472682929&action=2" } } 2025-06-25T14:57:52.746460Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:198: TPoolManip(/Root/users/user-1:hdd) got read response: Status { Success: true StoragePool { BoxId: 999 StoragePoolId: 4 Name: "/Root/users/user-1:hdd" ErasureSpecies: "none" Geometry { } VDiskKind: "Default" Kind: "hdd" NumGroups: 1 PDiskFilter { Property { Type: ROT } } ScopeId { X1: 72057594046644480 X2: 3 } ItemConfigGeneration: 2 } } Success: true ConfigTxSeqNo: 10 2025-06-25T14:57:52.746559Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:151: TPoolManip(/Root/users/user-1:hdd) send pool request: Request { Command { DeleteStoragePool { BoxId: 999 StoragePoolId: 4 ItemConfigGeneration: 2 } } } 2025-06-25T14:57:52.747148Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72075186224037888 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037897 not found 2025-06-25T14:57:52.747182Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72075186224037888 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037890 not found 2025-06-25T14:57:52.751275Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037888 not found 2025-06-25T14:57:52.757202Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:306: TPoolManip(/Root/users/user-1:hdd) got config response: Status { Success: true } Success: true ConfigTxSeqNo: 11 2025-06-25T14:57:52.757325Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 2146435081, Sender [1:7519901353132037275:2199], Recipient [1:7519901344542101628:2199]: NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvPoolDeleted 2025-06-25T14:57:52.757364Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:973: StateWork, processing event TEvPrivate::TEvPoolDeleted 2025-06-25T14:57:52.757383Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-25T14:57:52.757392Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-25T14:57:52.757448Z node 1 :CMS_TENANTS DEBUG: console__update_pool_state.cpp:28: TTxUpdatePoolState for pool /Root/users/user-1:hdd of /Root/users/user-1 state=DELETED 2025-06-25T14:57:52.757468Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3047: Update pool state in database for /Root/users/user-1:hdd state=DELETED allocatednumgroups=0 2025-06-25T14:57:52.760032Z node 1 :BS_PDISK ERROR: {BPD01@blobstorage_pdisk_impl.cpp:3277} PDiskId# 1 request from unregistered ownerId# 5 PDiskId# 1 2025-06-25T14:57:52.772004Z node 1 :CMS_TENANTS DEBUG: console__update_pool_state.cpp:73: TTxUpdatePoolState complete for /Root/users/user-1:hdd 2025-06-25T14:57:52.772049Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-25T14:57:52.772060Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-25T14:57:52.772066Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-25T14:57:52.772148Z node 1 :CMS_TENANTS DEBUG: console__remove_tenant_done.cpp:22: TTxRemoveTenantDone for tenant /Root/users/user-1 txid=1750863472682929 2025-06-25T14:57:52.772168Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2927: Remove computational units of /Root/users/user-1 from database txid=1750863472682929 issue=AccessDenied: Access denied for request 2025-06-25T14:57:52.772182Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2958: Remove tenant /Root/users/user-1 from database txid=1750863472682929 issue=AccessDenied: Access denied for request 2025-06-25T14:57:52.772338Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2963: Remove pool /Root/users/user-1:hdd from database 2025-06-25T14:57:52.772429Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3083: Add tenant removal info for /Root/users/user-1 txid=1750863472682929 code=SUCCESS errorcode=UNAUTHORIZED issue=AccessDenied: Access denied for request 2025-06-25T14:57:52.780929Z node 1 :CMS_TENANTS DEBUG: console__remove_tenant_done.cpp:34: TTxRemoveTenantDone Complete 2025-06-25T14:57:52.780985Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-25T14:57:52.805999Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285131, Sender [1:7519901353132037323:2357], Recipient [1:7519901344542101628:2199]: NKikimr::NConsole::TEvConsole::TEvGetOperationRequest { Request { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1750863472682929&action=2" } UserToken: "" } 2025-06-25T14:57:52.806042Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:965: StateWork, processing event TEvConsole::TEvGetOperationRequest 2025-06-25T14:57:52.806241Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3353: Send TEvConsole::TEvGetOperationResponse: Response { operation { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1750863472682929&action=2" ready: true status: SUCCESS } } 2025-06-25T14:57:52.810700Z node 1 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 3 2025-06-25T14:57:52.815684Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-25T14:57:52.815764Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpVectorIndexes::CoveredVectorIndexWithFollowers-StaleRO [GOOD] Test command err: Trying to start YDB, gRPC: 22668, MsgBus: 21584 2025-06-25T14:56:49.656613Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901083774938879:2231];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:56:49.656691Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001c4f/r3tmp/tmpFOYwdk/pdisk_1.dat 2025-06-25T14:56:50.080783Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:56:50.080858Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:56:50.090627Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:56:50.092882Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519901083774938684:2080] 1750863409624642 != 1750863409624645 2025-06-25T14:56:50.109613Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 22668, node 1 2025-06-25T14:56:50.284854Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:56:50.284890Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:56:50.284901Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:56:50.285017Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:56:50.644467Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:21584 TClient is connected to server localhost:21584 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:56:51.300617Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:56:51.317044Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:56:51.324581Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:51.487187Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:51.671179Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:51.766155Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:53.065951Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901100954809515:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:53.066061Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:53.582039Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:53.619149Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:53.673174Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:53.746513Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:53.779953Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:53.837485Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:53.889128Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:53.950579Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901100954810175:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:53.950648Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:53.951142Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901100954810180:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:53.954330Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:56:53.964401Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519901100954810182:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:56:54.035778Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519901105249777529:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:56:54.657115Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519901083774938879:2231];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:56:54.670424Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:56:55.162273Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/ ... 06-25T14:57:21.849020Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25694 TClient is connected to server localhost:25694 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:57:22.325185Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:57:22.333089Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:22.415367Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:22.596151Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:57:22.615391Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:22.691922Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:25.203191Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519901236613511839:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:25.203275Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:25.260753Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:25.340223Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:25.376040Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:25.424294Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:25.498112Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:25.545134Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:25.614398Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:25.720129Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519901236613512510:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:25.720223Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:25.724127Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519901236613512515:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:25.729061Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:57:25.742953Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519901236613512517:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:57:25.845994Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519901236613512568:3418] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:57:26.540431Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519901219433641047:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:57:26.540504Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:57:27.074075Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:27.336743Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:27.468544Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710759:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:383) 2025-06-25T14:57:27.530932Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715675:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:171) 2025-06-25T14:57:27.632048Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715676:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:171) 2025-06-25T14:57:36.693265Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7382: Cannot get console configs 2025-06-25T14:57:36.693288Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded |89.6%| [TA] $(B)/ydb/services/cms/ut/test-results/unittest/{meta.json ... results_accumulator.log} |89.6%| [TA] {RESULT} $(B)/ydb/services/cms/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TSequence::CreateTableWithDefaultFromSequence [GOOD] >> TSequence::CreateTableWithDefaultFromSequenceAndIndex >> GenericFederatedQuery::ClickHouseManagedSelectConstant [GOOD] >> GenericFederatedQuery::ClickHouseSelectCount |89.6%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest |89.6%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest |89.6%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest |89.6%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest |89.6%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest |89.6%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest |89.6%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest |89.6%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest |89.6%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest |89.6%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest |89.6%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest >> KqpPrefixedVectorIndexes::CosineDistanceWithPkPrefix+Nullable-Covered [GOOD] >> KqpPrefixedVectorIndexes::CosineDistanceWithPkPrefix+Nullable+Covered >> Secret::ValidationQueryService >> Secret::Validation |89.6%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest |89.6%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest |89.6%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest >> TSequence::CreateTableWithDefaultFromSequenceAndIndex [GOOD] |89.6%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest |89.6%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest |89.6%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest |89.6%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest |89.6%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest |89.7%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest |89.7%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest |89.7%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest |89.7%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest |89.7%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest |89.7%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_sequence/unittest >> TSequence::CreateTableWithDefaultFromSequenceAndIndex [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:57:50.892880Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:57:50.892980Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:57:50.893023Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:57:50.893070Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:57:50.894326Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:57:50.894374Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:57:50.894515Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:57:50.894591Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:57:50.895437Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:57:50.896830Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:57:50.985121Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:57:50.985184Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:57:51.002613Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:57:51.003076Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:57:51.003280Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:57:51.023263Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:57:51.023940Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:57:51.026467Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:57:51.027803Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:57:51.045059Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:57:51.045969Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:57:51.056737Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:57:51.056833Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:57:51.057062Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:57:51.057107Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:57:51.057168Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:57:51.057261Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:57:51.064408Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:57:51.228115Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:57:51.230241Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:57:51.231227Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:57:51.231291Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:57:51.232970Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:57:51.233074Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:57:51.236208Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:57:51.237048Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:57:51.237245Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:57:51.237366Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:57:51.237436Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:57:51.237471Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:57:51.239932Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:57:51.239982Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:57:51.240021Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:57:51.241856Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:57:51.241910Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:57:51.241950Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:57:51.242014Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:57:51.246453Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:57:51.248666Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:57:51.249854Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:57:51.250944Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:57:51.251085Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:57:51.251139Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:57:51.252360Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:57:51.252448Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:57:51.252635Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:57:51.252722Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:57:51.255131Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:57:51.255176Z node 1 :FLAT_TX_SCHEMESHARD ... ntPathDbRefCount reason remove table for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 2025-06-25T14:57:58.287171Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-06-25T14:57:58.287209Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:2 progress is 3/4 2025-06-25T14:57:58.287258Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 3/4 2025-06-25T14:57:58.287307Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:2 progress is 3/4 2025-06-25T14:57:58.287353Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 3/4 2025-06-25T14:57:58.287399Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 102, ready parts: 3/4, is published: true 2025-06-25T14:57:58.288407Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-25T14:57:58.288450Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-25T14:57:58.288713Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 2146435072, Sender [7:134:2156], Recipient [7:134:2156]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-25T14:57:58.288749Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4972: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-25T14:57:58.288799Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-25T14:57:58.288842Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_table.cpp:414: TDropTable TProposedDeletePart operationId: 102:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:57:58.289102Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove table for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-06-25T14:57:58.289221Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-06-25T14:57:58.289253Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:0 progress is 4/4 2025-06-25T14:57:58.289282Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 4/4 2025-06-25T14:57:58.289317Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:0 progress is 4/4 2025-06-25T14:57:58.289345Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 4/4 2025-06-25T14:57:58.289377Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 102, ready parts: 4/4, is published: true 2025-06-25T14:57:58.289447Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1656: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [7:414:2370] message: TxId: 102 2025-06-25T14:57:58.289524Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 4/4 2025-06-25T14:57:58.289578Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 102:0 2025-06-25T14:57:58.289626Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 102:0 2025-06-25T14:57:58.289749Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-25T14:57:58.289793Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 102:1 2025-06-25T14:57:58.289816Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 102:1 2025-06-25T14:57:58.289847Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-25T14:57:58.289873Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 102:2 2025-06-25T14:57:58.289894Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 102:2 2025-06-25T14:57:58.289940Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 2 2025-06-25T14:57:58.289967Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 102:3 2025-06-25T14:57:58.289987Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 102:3 2025-06-25T14:57:58.290036Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 1 2025-06-25T14:57:58.290564Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 2146435084, Sender [7:134:2156], Recipient [7:134:2156]: NKikimr::NSchemeShard::TEvPrivate::TEvCleanDroppedPaths 2025-06-25T14:57:58.290613Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5127: StateWork, processing event TEvPrivate::TEvCleanDroppedPaths 2025-06-25T14:57:58.290687Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-25T14:57:58.290734Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 5], at schemeshard: 72057594046678944 2025-06-25T14:57:58.290835Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-25T14:57:58.291641Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-25T14:57:58.291684Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-25T14:57:58.291775Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-25T14:57:58.291806Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-25T14:57:58.291868Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-25T14:57:58.291924Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-25T14:57:58.291983Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-25T14:57:58.292009Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-25T14:57:58.297612Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-25T14:57:58.297676Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-25T14:57:58.297782Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-25T14:57:58.300295Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-25T14:57:58.300429Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:632: Send to actor: [7:414:2370] msg type: 271124998 msg: NKikimrScheme.TEvNotifyTxCompletionResult TxId: 102 at schemeshard: 72057594046678944 2025-06-25T14:57:58.300584Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-25T14:57:58.300627Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [7:520:2469] 2025-06-25T14:57:58.300847Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-25T14:57:58.301003Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877764, Sender [7:522:2471], Recipient [7:134:2156]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-25T14:57:58.301040Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5053: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-25T14:57:58.301065Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5885: Server pipe is reset, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 102 2025-06-25T14:57:58.301419Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271122945, Sender [7:599:2548], Recipient [7:134:2156]: NKikimrSchemeOp.TDescribePath Path: "/MyRoot/Table" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false } 2025-06-25T14:57:58.301473Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4967: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2025-06-25T14:57:58.301568Z node 7 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:57:58.301801Z node 7 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table" took 210us result status StatusPathDoesNotExist 2025-06-25T14:57:58.301938Z node 7 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/Table\', error: path has been deleted (id: [OwnerId: 72057594046678944, LocalPathId: 2], type: EPathTypeTable, state: EPathStateNotExist), drop stepId: 5000003, drop txId: 102" Path: "/MyRoot/Table" PathId: 2 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 |89.7%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest |89.7%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest |89.7%| [TA] $(B)/ydb/core/tx/schemeshard/ut_sequence/test-results/unittest/{meta.json ... results_accumulator.log} |89.7%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_sequence/test-results/unittest/{meta.json ... results_accumulator.log} |89.7%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest >> KqpQueryService::DdlColumnTable [GOOD] >> KqpQueryService::DdlCache >> KqpUserConstraint::KqpReadNull-UploadNull [GOOD] >> KqpUserConstraint::KqpReadNull+UploadNull [GOOD] |89.7%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest |89.7%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest |89.7%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest |89.7%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest |89.7%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest |89.7%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest |89.7%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest |89.7%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest |89.7%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest |89.7%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest |89.7%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest >> Secret::DeactivatedQueryService |89.8%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data/unittest >> KqpUserConstraint::KqpReadNull+UploadNull [GOOD] Test command err: 2025-06-25T14:57:57.496039Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:57:57.496168Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:57:57.496208Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0009b3/r3tmp/tmpaGmWw8/pdisk_1.dat 2025-06-25T14:57:57.829896Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T14:57:57.840208Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:57:57.889989Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:57:57.900889Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750863474564650 != 1750863474564654 2025-06-25T14:57:57.950108Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:57:57.950252Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:57:57.964063Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:57:58.077804Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:58.639480Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:817:2664], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:58.639626Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:827:2669], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:58.639732Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:58.651967Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:57:58.689253Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:57:58.815228Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:831:2672], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:57:58.886129Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:900:2710] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:57:59.890732Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715660. Ctx: { TraceId: 01jyksgzvc0rrzabpjhmwmsnhk, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZWViMmZhZTAtYWI4NjQyZi1mOTU5Mjc4Yy1jMDUwYTFmMw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:58:00.021174Z node 1 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1546: SelfId: [1:931:2731], TxId: 281474976715660, task: 1. Ctx: { TraceId : 01jyksgzvc0rrzabpjhmwmsnhk. SessionId : ydb://session/3?node_id=1&id=ZWViMmZhZTAtYWI4NjQyZi1mOTU5Mjc4Yy1jMDUwYTFmMw==. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. Source[0] fatal error: {
: Fatal: Read from column index 1: got NULL from NOT NULL column, code: 2012 } 2025-06-25T14:58:00.024122Z node 1 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:678: SelfId: [1:931:2731], TxId: 281474976715660, task: 1. Ctx: { TraceId : 01jyksgzvc0rrzabpjhmwmsnhk. SessionId : ydb://session/3?node_id=1&id=ZWViMmZhZTAtYWI4NjQyZi1mOTU5Mjc4Yy1jMDUwYTFmMw==. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. InternalError: INTERNAL_ERROR KIKIMR_CONSTRAINT_VIOLATION: {
: Fatal: Read from column index 1: got NULL from NOT NULL column, code: 2012 }. 2025-06-25T14:58:00.036738Z node 1 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:678: SelfId: [1:932:2732], TxId: 281474976715660, task: 2. Ctx: { TraceId : 01jyksgzvc0rrzabpjhmwmsnhk. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=1&id=ZWViMmZhZTAtYWI4NjQyZi1mOTU5Mjc4Yy1jMDUwYTFmMw==. CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. InternalError: INTERNAL_ERROR DEFAULT_ERROR: {
: Error: Terminate execution }. 2025-06-25T14:58:00.045541Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=1&id=ZWViMmZhZTAtYWI4NjQyZi1mOTU5Mjc4Yy1jMDUwYTFmMw==, ActorId: [1:815:2662], ActorState: ExecuteState, TraceId: 01jyksgzvc0rrzabpjhmwmsnhk, Create QueryResponse for error on request, msg: 2025-06-25T14:58:00.048117Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jyksgzvc0rrzabpjhmwmsnhk, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZWViMmZhZTAtYWI4NjQyZi1mOTU5Mjc4Yy1jMDUwYTFmMw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data/unittest >> KqpUserConstraint::KqpReadNull-UploadNull [GOOD] Test command err: 2025-06-25T14:57:57.389684Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:57:57.389870Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:57:57.389944Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0009bc/r3tmp/tmpDiwGBd/pdisk_1.dat 2025-06-25T14:57:57.828665Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T14:57:57.840345Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:57:57.894937Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:57:57.900375Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750863474561884 != 1750863474561888 2025-06-25T14:57:57.949848Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:57:57.950016Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:57:57.964279Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:57:58.080653Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:58.640840Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:817:2664], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:58.640949Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:827:2669], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:58.641037Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:58.650587Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:57:58.685872Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:57:58.818974Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:831:2672], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:57:58.890066Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:900:2710] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:57:59.889969Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715660. Ctx: { TraceId: 01jyksgzva5xnt914zfbvw1w6j, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MTk4MGY1YjMtNjQyOTdmNzYtZjMxZmJjZTAtMjExYTRhZA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root |89.8%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest >> GenericFederatedQuery::IcebergHadoopSaSelectCount [GOOD] >> GenericFederatedQuery::IcebergHadoopSaFilterPushdown >> GenericFederatedQuery::IcebergHiveSaSelectCount [GOOD] >> GenericFederatedQuery::IcebergHiveSaFilterPushdown >> GenericFederatedQuery::YdbSelectCount [GOOD] >> KqpMultishardIndex::WriteIntoRenamingAsyncIndex [GOOD] |89.8%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest |89.8%| [TA] $(B)/ydb/core/kqp/ut/data/test-results/unittest/{meta.json ... results_accumulator.log} >> GenericFederatedQuery::IcebergHiveTokenSelectCount [GOOD] >> GenericFederatedQuery::IcebergHiveTokenFilterPushdown |89.8%| [TA] {RESULT} $(B)/ydb/core/kqp/ut/data/test-results/unittest/{meta.json ... results_accumulator.log} >> GenericFederatedQuery::PostgreSQLSelectCount [GOOD] >> GenericFederatedQuery::PostgreSQLFilterPushdown >> KqpPrefixedVectorIndexes::PrefixedVectorIndexOrderByCosineDistanceWithCover-Nullable [GOOD] >> KqpUniqueIndex::InsertComplexFkPkOverlapDuplicate >> GenericFederatedQuery::IcebergHadoopTokenSelectCount [GOOD] >> GenericFederatedQuery::IcebergHadoopTokenFilterPushdown |89.8%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest |89.8%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest |89.8%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest |89.8%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest |89.8%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest >> GenericFederatedQuery::IcebergHiveBasicSelectCount [GOOD] >> GenericFederatedQuery::IcebergHiveBasicFilterPushdown >> Secret::Deactivated >> Secret::Simple ------- [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/federated_query/generic_ut/unittest >> GenericFederatedQuery::YdbSelectCount [GOOD] Test command err: Trying to start YDB, gRPC: 3807, MsgBus: 18788 2025-06-25T14:57:29.380580Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901255138469829:2169];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:57:29.381183Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000a05/r3tmp/tmpYXhp3H/pdisk_1.dat 2025-06-25T14:57:29.875709Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:57:29.877156Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:57:29.882937Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519901255138469687:2080] 1750863449306860 != 1750863449306863 2025-06-25T14:57:29.889963Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:57:29.891117Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 3807, node 1 2025-06-25T14:57:30.100951Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:57:30.100972Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:57:30.100981Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:57:30.101109Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:57:30.355010Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:18788 TClient is connected to server localhost:18788 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:57:30.992440Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:57:31.012756Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:57:32.584424Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901268023372216:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:32.584573Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:33.019398Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:33.172104Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901272318339634:2305], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:33.172233Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:33.172632Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901272318339640:2308], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:33.175968Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:57:33.187087Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519901272318339642:2309], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-25T14:57:33.289128Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519901272318339682:2400] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:57:34.045546Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:57:34.364551Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519901255138469829:2169];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:57:34.364611Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:57:34.440817Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:34.867958Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:35.508129Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:57:35.919123Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710681:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:57:36.402492Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976715758:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:36.453692Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976715759:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:383) 2025-06-25T14:57:38.142111Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976710704:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp:267) 2025-06-25T14:57:38.173647Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710705:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:38.175188Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710706:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:38.179945Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710707:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/action ... nVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:57:52.294023Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:57:52.305447Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:57:52.538668Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:57:54.837848Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519901361912601509:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:54.837948Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:54.862953Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:54.928858Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519901361912601629:2305], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:54.928951Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:54.929161Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519901361912601634:2308], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:54.933161Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:57:54.941114Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519901361912601636:2309], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-25T14:57:55.030545Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519901366207568972:2393] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:57:55.542185Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:57:56.018637Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:56.513890Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:56.520149Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519901349027699007:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:57:56.520206Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:57:57.135257Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:57:57.689681Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715681:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:57:58.220767Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:58.304914Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710759:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:383) 2025-06-25T14:58:00.414308Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715705:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp:267) Call DescribeTable. data_source_instance { kind: YDB endpoint { host: "localhost" port: 2136 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Expected: data_source_instance { kind: YDB endpoint { host: "localhost" port: 2136 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Actual: data_source_instance { kind: YDB endpoint { host: "localhost" port: 2136 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } DescribeTable result. GRpcStatusCode: 0 schema { columns { name: "col1" type { type_id: UINT16 } } columns { name: "col2" type { type_id: DOUBLE } } } error { status: SUCCESS } Call ListSplits. selects { data_source_instance { kind: YDB endpoint { host: "localhost" port: 2136 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE } from { table: "example_1" } } CRAB Expected: selects { data_source_instance { kind: YDB endpoint { host: "localhost" port: 2136 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE } from { table: "example_1" } } CRAB Actual: selects { data_source_instance { kind: YDB endpoint { host: "localhost" port: 2136 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE } from { table: "example_1" } } ListSplits result. GRpcStatusCode: 0 Call ReadSplits. splits { select { data_source_instance { kind: YDB endpoint { host: "localhost" port: 2136 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE } what { } from { table: "example_1" } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL CRAB Expected: splits { select { data_source_instance { kind: YDB endpoint { host: "localhost" port: 2136 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE } what { } from { table: "example_1" } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL CRAB Actual: splits { select { data_source_instance { kind: YDB endpoint { host: "localhost" port: 2136 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE } what { } from { table: "example_1" } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL ReadSplits result. GRpcStatusCode: 0 >> GenericFederatedQuery::IcebergHadoopBasicSelectCount [GOOD] >> GenericFederatedQuery::IcebergHadoopBasicFilterPushdown >> Secret::SimpleQueryService >> YdbTableSplit::RenameTablesAndSplit [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpMultishardIndex::WriteIntoRenamingAsyncIndex [GOOD] Test command err: Trying to start YDB, gRPC: 64145, MsgBus: 25894 2025-06-25T14:57:02.545445Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901140604820064:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:57:02.545499Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001bfa/r3tmp/tmpsgJrvV/pdisk_1.dat 2025-06-25T14:57:03.028763Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 64145, node 1 2025-06-25T14:57:03.038858Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:57:03.038936Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:57:03.041468Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:57:03.161065Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:57:03.161104Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:57:03.161114Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:57:03.161232Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25894 2025-06-25T14:57:03.584544Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:25894 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:57:03.871351Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:57:03.904084Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:04.095102Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:04.283883Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:04.377249Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:06.161110Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901157784690843:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:06.161214Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:06.393952Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:06.425799Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:06.463732Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:06.507682Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:06.573723Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:06.654345Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:06.732233Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:06.828338Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901157784691511:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:06.828407Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:06.828678Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901157784691516:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:06.832618Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:57:06.841495Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519901157784691518:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:57:06.919922Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519901157784691569:3418] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:57:07.547650Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519901140604820064:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:57:07.547714Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:57:07.945018Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... Trying to start YDB, gRPC: 25294, MsgBus: 12350 2025-06-25T14:57:10.115682Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519901171893599786:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:57:10.115769Z nod ... cpp:311) waiting... 2025-06-25T14:57:36.841335Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:57:36.850911Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:36.851811Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... 2025-06-25T14:57:36.922841Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:37.099249Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:37.180046Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:39.486825Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519901297303536556:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:39.486918Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:39.558447Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:39.598964Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:39.641526Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:39.692051Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:39.733816Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:39.805231Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:39.864347Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:39.963195Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519901297303537216:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:39.963304Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:39.963601Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519901297303537221:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:39.967264Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:57:39.982432Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519901297303537223:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:57:40.041131Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519901301598504570:3413] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:57:40.840570Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519901280123665824:2081];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:57:40.840634Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:57:41.171245Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:51.028589Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7382: Cannot get console configs 2025-06-25T14:57:51.028623Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:57:51.589422Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037936 not found 2025-06-25T14:57:51.593543Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037938 not found 2025-06-25T14:57:51.593576Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037928 not found 2025-06-25T14:57:51.593593Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037930 not found 2025-06-25T14:57:51.593610Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037935 not found 2025-06-25T14:57:51.593629Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037927 not found 2025-06-25T14:57:51.593646Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037929 not found 2025-06-25T14:57:51.594662Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037939 not found 2025-06-25T14:57:52.424598Z node 3 :CHANGE_EXCHANGE ERROR: change_sender_table_base.cpp:211: [TableChangeSenderShard][72075186224037924:1][72075186224037933][3:7519901305893473486:2612] Apply status: status# 2, reason# 7 2025-06-25T14:57:52.424642Z node 3 :CHANGE_EXCHANGE ERROR: change_sender_table_base.cpp:211: [TableChangeSenderShard][72075186224037924:1][72075186224037925][3:7519901305893473485:2612] Apply status: status# 2, reason# 7 2025-06-25T14:57:52.450109Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037931 not found 2025-06-25T14:57:52.450979Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037932 not found 2025-06-25T14:57:52.451000Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037926 not found 2025-06-25T14:57:52.454098Z node 3 :CHANGE_EXCHANGE ERROR: change_sender_table_base.cpp:88: [TableChangeSenderShard][72075186224037924:1][72075186224037925][3:7519901353138123146:2612] Handshake status: status# 2, reason# 7 2025-06-25T14:57:52.454180Z node 3 :CHANGE_EXCHANGE ERROR: change_sender_table_base.cpp:88: [TableChangeSenderShard][72075186224037924:1][72075186224037933][3:7519901353138123147:2612] Handshake status: status# 2, reason# 7 2025-06-25T14:57:57.496595Z node 3 :CHANGE_EXCHANGE ERROR: change_sender_table_base.cpp:88: [TableChangeSenderShard][72075186224037923:1][72075186224037925][3:7519901374612964917:2615] Handshake status: status# 2, reason# 7 2025-06-25T14:57:59.627297Z node 3 :CHANGE_EXCHANGE ERROR: change_sender_table_base.cpp:88: [TableChangeSenderShard][72075186224037922:1][72075186224037925][3:7519901383202901393:2616] Handshake status: status# 2, reason# 7 |89.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_allocator_client/ut/unittest |89.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_allocator_client/ut/unittest |89.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_allocator_client/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/table_split_ut/unittest >> YdbTableSplit::RenameTablesAndSplit [GOOD] Test command err: 2025-06-25T14:56:33.727983Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901016949670282:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:56:33.728090Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001385/r3tmp/tmp2OndZI/pdisk_1.dat 2025-06-25T14:56:34.119027Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:56:34.184890Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:56:34.185017Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:56:34.188894Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 21854, node 1 2025-06-25T14:56:34.324919Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:56:34.324944Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:56:34.324951Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:56:34.325090Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:20746 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-25T14:56:34.772456Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 Pa... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:56:34.805799Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:56:36.308221Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901029834573177:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:36.308334Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:36.610717Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_table.cpp:423: TCreateTable Propose, path: /Root/Foo, opId: 281474976710658:0, at schemeshard: 72057594046644480 2025-06-25T14:56:36.613602Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 281474976710658:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-25T14:56:36.613641Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:36.616885Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976710658, database: /Root, subject: , status: StatusAccepted, operation: CREATE TABLE, path: /Root/Foo 2025-06-25T14:56:36.682335Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 1750863396730, transactions count in step: 1, at schemeshard: 72057594046644480 2025-06-25T14:56:36.755247Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976710658:0 2025-06-25T14:56:36.782275Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901029834573402:2311], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:36.782584Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:36.802088Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_table.cpp:506: TAlterTable Propose, path: /Root/Foo, pathId: , opId: 281474976710659:0, at schemeshard: 72057594046644480 2025-06-25T14:56:36.803280Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 281474976710659:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-25T14:56:36.803317Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:171) 2025-06-25T14:56:36.804975Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976710659, database: /Root, subject: , status: StatusAccepted, operation: ALTER TABLE, path: /Root/Foo 2025-06-25T14:56:36.815118Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 1750863396863, transactions count in step: 1, at schemeshard: 72057594046644480 2025-06-25T14:56:36.822481Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976710659:0 Fast forward 1m 2025-06-25T14:56:38.728294Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519901016949670282:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:56:38.728388Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; partitions 2 Fast forward 1m partitions 2 Fast forward 1m partitions 2 Fast forward 1m 2025-06-25T14:56:46.773149Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_split_merge.cpp:804: TSplitMerge Propose, tableStr: /Root/Foo, tableId: , opId: 281474976715657:0, at schemeshard: 72057594046644480, request: TablePath: "/Root/Foo" SourceTabletId: 72075186224037888 SourceTabletId: 72075186224037889 SchemeshardId: 72057594046644480 2025-06-25T14:56:46.775657Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_split_merge.cpp:1083: TSplitMerge Propose accepted, tableStr: /Root/Foo, tableId: , opId: 281474976715657:0, at schemeshard: 72057594046644480, op: SourceRanges { KeyRangeBegin: "\002\000\000\000\000\200\000\000\000\200" KeyRangeEnd: "\002\000\004\000\000\000\377\377\377\177\000\000\000\200" TabletID: 72075186224037888 ShardIdx: 1 } SourceRanges { KeyRangeBegin: "\002\000\004\000\000\000\377\377\377\177\000\000\000\200" KeyRangeEnd: "" TabletID: 72075186224037889 ShardIdx: 2 } DestinationRanges { KeyRangeBegin: "\002\000\000\000\000\200\000\000\000\200" KeyRangeEnd: "" ShardIdx: 3 }, request: TablePath: "/Root/Foo" SourceTabletId: 72075186224037888 SourceTabletId: 72075186224037889 SchemeshardId: 72057594046644480 2025-06-25T14:56:46.775705Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 281474976715657:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-25T14:56:46.837358Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976715657:0 2025-06-25T14:56:46.844132Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037888 not found 2025-06-25T14:56:46.844175Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037889 not found partitions 1 2025-06-25T14:56:48.994431Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_move_table.cpp:651: TMoveTable Propose, from: /Root/Foo, to: /Root/Bar, opId: 281474976710660:0, at schemeshard: 72057594046644480 2025-06-25T14:56:48.995332Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 281474976710660:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-25T14:56:49.001481Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976710660, database: /Root, subject: , status: StatusAccepted, operation: ALTER TABLE RENAME, dst path: /Root/Foo, dst path: /Root/Bar 2025-06-25T14:56:49.041578Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 1750863889082, transactions count in step: 1, at schemeshard: 72057594046644480 2025-06-25T14:56:49.060371Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:1105: All parts have reached barrier, tx: 281474976710660, done: 0, blocked: 1 2025-06-25T14:56:49.062258Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7382: Cannot get console configs 2025-06-25T14:56:49.062279Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:56:49.065495Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976710660:0 2025-06-25T14:56:49.067909Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046644480 Fast forward 1m partitions 1 Fast forward 1m 2025-06-25T14:56:56.803966Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037890 followerId 0 p ... from: 2025-06-25T14:58:03.330052Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976710664:0 progress is 1/1 2025-06-25T14:58:03.330063Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976710664 ready parts: 1/1 2025-06-25T14:58:03.330075Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 281474976710664, ready parts: 1/1, is published: true 2025-06-25T14:58:03.330098Z node 1 :TX_DATASHARD INFO: datashard_loans.cpp:177: 72075186224037892 Initiating switch from PreOffline to Offline state 2025-06-25T14:58:03.330153Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1656: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:7519901403496732098:2713] message: TxId: 281474976710664 2025-06-25T14:58:03.330178Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976710664 ready parts: 1/1 2025-06-25T14:58:03.330191Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976710664:0 2025-06-25T14:58:03.330210Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 281474976710664:0 2025-06-25T14:58:03.330329Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 4 2025-06-25T14:58:03.330605Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046644480, cookie: 281474976710664 2025-06-25T14:58:03.330669Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046644480, cookie: 281474976710664 2025-06-25T14:58:03.331521Z node 1 :TX_DATASHARD INFO: datashard_impl.h:3310: 72075186224037890 Reporting state Offline to schemeshard 72057594046644480 2025-06-25T14:58:03.331583Z node 1 :TX_DATASHARD INFO: datashard_impl.h:3310: 72075186224037890 Reporting state Offline to schemeshard 72057594046644480 2025-06-25T14:58:03.331767Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037890, clientId# [1:7519901403496732149:2716], serverId# [1:7519901403496732156:4700], sessionId# [0:0:0] 2025-06-25T14:58:03.331806Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037890, clientId# [1:7519901403496732151:2718], serverId# [1:7519901403496732157:4701], sessionId# [0:0:0] 2025-06-25T14:58:03.332746Z node 1 :TX_DATASHARD INFO: datashard_impl.h:3310: 72075186224037892 Reporting state Offline to schemeshard 72057594046644480 2025-06-25T14:58:03.332783Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5633: Handle TEvStateChanged, at schemeshard: 72057594046644480, message: Source { RawX1: 7519901072784246783 RawX2: 4503603922340149 } TabletId: 72075186224037890 State: 4 2025-06-25T14:58:03.332843Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186224037890, state: Offline, at schemeshard: 72057594046644480 2025-06-25T14:58:03.333029Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5633: Handle TEvStateChanged, at schemeshard: 72057594046644480, message: Source { RawX1: 7519901072784246783 RawX2: 4503603922340149 } TabletId: 72075186224037890 State: 4 2025-06-25T14:58:03.333064Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186224037890, state: Offline, at schemeshard: 72057594046644480 2025-06-25T14:58:03.333692Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5633: Handle TEvStateChanged, at schemeshard: 72057594046644480, message: Source { RawX1: 7519901394906797164 RawX2: 4503603922340479 } TabletId: 72075186224037892 State: 4 2025-06-25T14:58:03.333717Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186224037892, state: Offline, at schemeshard: 72057594046644480 2025-06-25T14:58:03.335171Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:3 hive 72057594037968897 at ss 72057594046644480 2025-06-25T14:58:03.335236Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:3 hive 72057594037968897 at ss 72057594046644480 2025-06-25T14:58:03.335353Z node 1 :TX_DATASHARD DEBUG: datashard_loans.cpp:220: 72075186224037891 in PreOffline state HasSharedBobs: 0 SchemaOperations: [ ] OutReadSets count: 0 ChangesQueue size: 0 ChangeExchangeSplit: 1 siblings to be activated: wait to activation from: 2025-06-25T14:58:03.335418Z node 1 :TX_DATASHARD INFO: datashard_loans.cpp:177: 72075186224037891 Initiating switch from PreOffline to Offline state 2025-06-25T14:58:03.335641Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2962: Handle TEvStateChangedResult datashard 72075186224037890 state Offline 2025-06-25T14:58:03.335661Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2962: Handle TEvStateChangedResult datashard 72075186224037890 state Offline 2025-06-25T14:58:03.339003Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:5 hive 72057594037968897 at ss 72057594046644480 2025-06-25T14:58:03.339085Z node 1 :TX_DATASHARD INFO: datashard_impl.h:3310: 72075186224037891 Reporting state Offline to schemeshard 72057594046644480 2025-06-25T14:58:03.339144Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 3 ShardOwnerId: 72057594046644480 ShardLocalIdx: 3, at schemeshard: 72057594046644480 2025-06-25T14:58:03.339175Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2962: Handle TEvStateChangedResult datashard 72075186224037892 state Offline 2025-06-25T14:58:03.339253Z node 1 :TX_DATASHARD INFO: datashard.cpp:197: OnTabletStop: 72075186224037890 reason = ReasonStop 2025-06-25T14:58:03.339402Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 3 2025-06-25T14:58:03.339560Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 3 ShardOwnerId: 72057594046644480 ShardLocalIdx: 3, at schemeshard: 72057594046644480 2025-06-25T14:58:03.339748Z node 1 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186224037890 2025-06-25T14:58:03.339830Z node 1 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186224037890 2025-06-25T14:58:03.340010Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037890 not found 2025-06-25T14:58:03.340241Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5633: Handle TEvStateChanged, at schemeshard: 72057594046644480, message: Source { RawX1: 7519901394906797163 RawX2: 4503603922340478 } TabletId: 72075186224037891 State: 4 2025-06-25T14:58:03.340298Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186224037891, state: Offline, at schemeshard: 72057594046644480 2025-06-25T14:58:03.344249Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:3 2025-06-25T14:58:03.344284Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:3 tabletId 72075186224037890 2025-06-25T14:58:03.344353Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:3 2025-06-25T14:58:03.344442Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 5 ShardOwnerId: 72057594046644480 ShardLocalIdx: 5, at schemeshard: 72057594046644480 2025-06-25T14:58:03.344651Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 2 2025-06-25T14:58:03.344910Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:4 hive 72057594037968897 at ss 72057594046644480 2025-06-25T14:58:03.344946Z node 1 :TX_DATASHARD INFO: datashard.cpp:197: OnTabletStop: 72075186224037892 reason = ReasonStop 2025-06-25T14:58:03.344989Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2962: Handle TEvStateChangedResult datashard 72075186224037891 state Offline 2025-06-25T14:58:03.345267Z node 1 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186224037892 2025-06-25T14:58:03.345367Z node 1 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186224037892 2025-06-25T14:58:03.345658Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037892 not found 2025-06-25T14:58:03.346811Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:5 2025-06-25T14:58:03.346836Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:5 tabletId 72075186224037892 2025-06-25T14:58:03.347777Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 4 ShardOwnerId: 72057594046644480 ShardLocalIdx: 4, at schemeshard: 72057594046644480 2025-06-25T14:58:03.347977Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 1 2025-06-25T14:58:03.348029Z node 1 :TX_DATASHARD INFO: datashard.cpp:197: OnTabletStop: 72075186224037891 reason = ReasonStop 2025-06-25T14:58:03.348169Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046644480 2025-06-25T14:58:03.348199Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046644480, LocalPathId: 3], at schemeshard: 72057594046644480 2025-06-25T14:58:03.348248Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 2 2025-06-25T14:58:03.348507Z node 1 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186224037891 2025-06-25T14:58:03.348592Z node 1 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186224037891 2025-06-25T14:58:03.348993Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037891 not found 2025-06-25T14:58:03.350437Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:4 2025-06-25T14:58:03.350466Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:4 tabletId 72075186224037891 2025-06-25T14:58:03.350516Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046644480 |89.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_allocator_client/ut/unittest >> TTxAllocatorClientTest::InitiatingRequest >> TTxAllocatorClientTest::AllocateOverTheEdge >> KqpVectorIndexes::OrderByCosineLevel2+Nullable+UseSimilarity [GOOD] >> TTxAllocatorClientTest::AllocateOverTheEdge [GOOD] >> TTxAllocatorClientTest::InitiatingRequest [GOOD] |89.8%| [TA] $(B)/ydb/services/ydb/table_split_ut/test-results/unittest/{meta.json ... results_accumulator.log} |89.8%| [TA] {RESULT} $(B)/ydb/services/ydb/table_split_ut/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_allocator_client/ut/unittest >> TTxAllocatorClientTest::InitiatingRequest [GOOD] Test command err: 2025-06-25T14:58:05.591905Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:1925: Tablet: 72057594046447617 LockedInitializationPath Marker# TSYS32 2025-06-25T14:58:05.596572Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:911: Tablet: 72057594046447617 HandleFindLatestLogEntry, NODATA Promote Marker# TSYS19 2025-06-25T14:58:05.598159Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:225: Tablet: 72057594046447617 TTablet::WriteZeroEntry. logid# [72057594046447617:2:0:0:0:0:0] Marker# TSYS01 2025-06-25T14:58:05.631311Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:0:0:0:20:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:58:05.634790Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:17: tablet# 72057594046447617 OnActivateExecutor 2025-06-25T14:58:05.647788Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:1:1:28672:35:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:58:05.647873Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:1:0:0:42:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:58:05.648037Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:1396: Tablet: 72057594046447617 GcCollect 0 channel, tablet:gen:step => 2:0 Marker# TSYS28 2025-06-25T14:58:05.648177Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:2:1:8192:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:58:05.648363Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:2:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:58:05.648459Z node 1 :TX_ALLOCATOR DEBUG: txallocator__scheme.cpp:22: tablet# 72057594046447617 TTxSchema Complete 2025-06-25T14:58:05.648569Z node 1 :TABLET_MAIN INFO: tablet_sys.cpp:1009: Tablet: 72057594046447617 Active! Generation: 2, Type: TxAllocator started in 0msec Marker# TSYS24 2025-06-25T14:58:05.650427Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:73:2107] requested range size#5000 2025-06-25T14:58:05.652894Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:3:1:24576:70:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:58:05.652963Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:3:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:58:05.653048Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 0 Reserved to# 5000 2025-06-25T14:58:05.653084Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:73:2107] TEvAllocateResult from# 0 to# 5000 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_allocator_client/ut/unittest >> TTxAllocatorClientTest::AllocateOverTheEdge [GOOD] Test command err: 2025-06-25T14:58:05.594245Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:1925: Tablet: 72057594046447617 LockedInitializationPath Marker# TSYS32 2025-06-25T14:58:05.596551Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:911: Tablet: 72057594046447617 HandleFindLatestLogEntry, NODATA Promote Marker# TSYS19 2025-06-25T14:58:05.598208Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:225: Tablet: 72057594046447617 TTablet::WriteZeroEntry. logid# [72057594046447617:2:0:0:0:0:0] Marker# TSYS01 2025-06-25T14:58:05.631191Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:0:0:0:20:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:58:05.634784Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:17: tablet# 72057594046447617 OnActivateExecutor 2025-06-25T14:58:05.648604Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:1:1:28672:35:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:58:05.648708Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:1:0:0:42:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:58:05.648834Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:1396: Tablet: 72057594046447617 GcCollect 0 channel, tablet:gen:step => 2:0 Marker# TSYS28 2025-06-25T14:58:05.648937Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:2:1:8192:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:58:05.649056Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:2:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:58:05.649136Z node 1 :TX_ALLOCATOR DEBUG: txallocator__scheme.cpp:22: tablet# 72057594046447617 TTxSchema Complete 2025-06-25T14:58:05.649246Z node 1 :TABLET_MAIN INFO: tablet_sys.cpp:1009: Tablet: 72057594046447617 Active! Generation: 2, Type: TxAllocator started in 0msec Marker# TSYS24 2025-06-25T14:58:05.650438Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:73:2107] requested range size#5000 2025-06-25T14:58:05.652898Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:3:1:24576:70:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:58:05.652980Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:3:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:58:05.653084Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 0 Reserved to# 5000 2025-06-25T14:58:05.653122Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:73:2107] TEvAllocateResult from# 0 to# 5000 2025-06-25T14:58:05.653289Z node 1 :TX_ALLOCATOR_CLIENT WARN: client.cpp:38: AllocateTxIds: requested many txIds. Just a warning, request is processed. Requested: 1000 TxAllocators count: 1 RequestPerAllocator: 5000 MaxCapacity: 5000 BatchAllocationWarning: 500 2025-06-25T14:58:05.653456Z node 1 :TX_ALLOCATOR_CLIENT WARN: client.cpp:38: AllocateTxIds: requested many txIds. Just a warning, request is processed. Requested: 1000 TxAllocators count: 1 RequestPerAllocator: 5000 MaxCapacity: 5000 BatchAllocationWarning: 500 2025-06-25T14:58:05.653615Z node 1 :TX_ALLOCATOR_CLIENT WARN: client.cpp:38: AllocateTxIds: requested many txIds. Just a warning, request is processed. Requested: 1000 TxAllocators count: 1 RequestPerAllocator: 5000 MaxCapacity: 5000 BatchAllocationWarning: 500 2025-06-25T14:58:05.653796Z node 1 :TX_ALLOCATOR_CLIENT WARN: client.cpp:38: AllocateTxIds: requested many txIds. Just a warning, request is processed. Requested: 1000 TxAllocators count: 1 RequestPerAllocator: 5000 MaxCapacity: 5000 BatchAllocationWarning: 500 2025-06-25T14:58:05.653919Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:73:2107] requested range size#5000 2025-06-25T14:58:05.660717Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:4:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:58:05.660803Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:4:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:58:05.660940Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 5000 Reserved to# 10000 2025-06-25T14:58:05.660981Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:73:2107] TEvAllocateResult from# 5000 to# 10000 2025-06-25T14:58:05.661232Z node 1 :TX_ALLOCATOR_CLIENT WARN: client.cpp:38: AllocateTxIds: requested many txIds. Just a warning, request is processed. Requested: 500 TxAllocators count: 1 RequestPerAllocator: 5000 MaxCapacity: 5000 BatchAllocationWarning: 500 2025-06-25T14:58:05.661358Z node 1 :TX_ALLOCATOR_CLIENT WARN: client.cpp:38: AllocateTxIds: requested many txIds. Just a warning, request is processed. Requested: 1000 TxAllocators count: 1 RequestPerAllocator: 5000 MaxCapacity: 5000 BatchAllocationWarning: 500 2025-06-25T14:58:05.661523Z node 1 :TX_ALLOCATOR_CLIENT WARN: client.cpp:38: AllocateTxIds: requested many txIds. Just a warning, request is processed. Requested: 2500 TxAllocators count: 1 RequestPerAllocator: 5000 MaxCapacity: 5000 BatchAllocationWarning: 500 2025-06-25T14:58:05.661772Z node 1 :TX_ALLOCATOR_CLIENT WARN: client.cpp:38: AllocateTxIds: requested many txIds. Just a warning, request is processed. Requested: 1000 TxAllocators count: 1 RequestPerAllocator: 5000 MaxCapacity: 5000 BatchAllocationWarning: 500 2025-06-25T14:58:05.661905Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:73:2107] requested range size#5000 2025-06-25T14:58:05.662320Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:5:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:58:05.662410Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:5:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:58:05.662515Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 10000 Reserved to# 15000 2025-06-25T14:58:05.662554Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:73:2107] TEvAllocateResult from# 10000 to# 15000 2025-06-25T14:58:05.662714Z node 1 :TX_ALLOCATOR_CLIENT WARN: client.cpp:38: AllocateTxIds: requested many txIds. Just a warning, request is processed. Requested: 3000 TxAllocators count: 1 RequestPerAllocator: 5000 MaxCapacity: 5000 BatchAllocationWarning: 500 |89.9%| [TA] $(B)/ydb/core/tx/tx_allocator_client/ut/test-results/unittest/{meta.json ... results_accumulator.log} |89.9%| [TA] {RESULT} $(B)/ydb/core/tx/tx_allocator_client/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpQueryService::DdlCache [GOOD] >> KqpQueryService::DdlExecuteScript ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpVectorIndexes::OrderByCosineLevel2+Nullable+UseSimilarity [GOOD] Test command err: Trying to start YDB, gRPC: 3741, MsgBus: 25224 2025-06-25T14:56:50.504403Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901088937168033:2078];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:56:50.554787Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001c1f/r3tmp/tmp8qO00y/pdisk_1.dat 2025-06-25T14:56:50.929317Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:56:50.929410Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:56:50.939039Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:56:50.960741Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 3741, node 1 2025-06-25T14:56:51.117317Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:56:51.117334Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:56:51.117341Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:56:51.117473Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25224 2025-06-25T14:56:51.545782Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:25224 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:56:51.910157Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-25T14:56:51.949172Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:52.090909Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:52.245455Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:52.333565Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:53.871401Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901101822071516:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:53.871700Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:54.110310Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:54.136874Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:54.171137Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:54.222267Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:54.254455Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:54.345362Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:54.422227Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:54.537914Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901106117039478:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:54.538003Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:54.538090Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901106117039483:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:54.542149Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:56:54.558319Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519901106117039485:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:56:54.643858Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519901106117039538:3421] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:56:55.501588Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519901088937168033:2078];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:56:55.501654Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:56:55.644383Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877761, Sender [1:7519901110412007108:3596], Recipient [1:7519901088937168362:2187]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:56:55.644428Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5052: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T14:56:55.644442Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5837: Pipe server connected, at tablet: 72057594046644480 2025-06-25T14:56:55.644475Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271122432, Sender [1:7519901110412007104:3593], Recipient [1:75199010 ... ght# 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-25T14:58:03.033135Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037901 2025-06-25T14:58:03.033148Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 5 shard idx 72057594046644480:20 data size 0 row count 0 2025-06-25T14:58:03.033171Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037907 maps to shardIdx: 72057594046644480:20 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 5], pathId map=BatchUpload, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-25T14:58:03.033179Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037907, followerId 0 2025-06-25T14:58:03.033201Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:20 with partCount# 0, rowCount# 0, searchHeight# 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-25T14:58:03.033210Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037907 2025-06-25T14:58:03.033226Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 5 shard idx 72057594046644480:15 data size 0 row count 0 2025-06-25T14:58:03.033252Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037902 maps to shardIdx: 72057594046644480:15 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 5], pathId map=BatchUpload, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-25T14:58:03.033262Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037902, followerId 0 2025-06-25T14:58:03.033283Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:15 with partCount# 0, rowCount# 0, searchHeight# 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-25T14:58:03.033291Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037902 2025-06-25T14:58:03.033305Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 5 shard idx 72057594046644480:19 data size 0 row count 0 2025-06-25T14:58:03.033327Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037906 maps to shardIdx: 72057594046644480:19 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 5], pathId map=BatchUpload, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-25T14:58:03.033333Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037906, followerId 0 2025-06-25T14:58:03.033353Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:19 with partCount# 0, rowCount# 0, searchHeight# 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-25T14:58:03.033362Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037906 2025-06-25T14:58:03.033378Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 5 shard idx 72057594046644480:16 data size 0 row count 0 2025-06-25T14:58:03.033400Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037903 maps to shardIdx: 72057594046644480:16 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 5], pathId map=BatchUpload, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-25T14:58:03.033408Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037903, followerId 0 2025-06-25T14:58:03.033428Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:16 with partCount# 0, rowCount# 0, searchHeight# 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-25T14:58:03.033437Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037903 2025-06-25T14:58:03.033450Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 5 shard idx 72057594046644480:22 data size 0 row count 0 2025-06-25T14:58:03.033473Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037909 maps to shardIdx: 72057594046644480:22 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 5], pathId map=BatchUpload, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-25T14:58:03.033482Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037909, followerId 0 2025-06-25T14:58:03.033502Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:22 with partCount# 0, rowCount# 0, searchHeight# 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-25T14:58:03.033511Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037909 2025-06-25T14:58:03.033525Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 5 shard idx 72057594046644480:18 data size 0 row count 0 2025-06-25T14:58:03.033547Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037905 maps to shardIdx: 72057594046644480:18 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 5], pathId map=BatchUpload, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-25T14:58:03.033555Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037905, followerId 0 2025-06-25T14:58:03.033575Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:18 with partCount# 0, rowCount# 0, searchHeight# 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-25T14:58:03.033586Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037905 2025-06-25T14:58:03.033600Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 5 shard idx 72057594046644480:17 data size 0 row count 0 2025-06-25T14:58:03.033620Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037904 maps to shardIdx: 72057594046644480:17 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 5], pathId map=BatchUpload, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-25T14:58:03.033626Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037904, followerId 0 2025-06-25T14:58:03.033647Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:17 with partCount# 0, rowCount# 0, searchHeight# 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-25T14:58:03.033655Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037904 2025-06-25T14:58:03.033669Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 5 shard idx 72057594046644480:23 data size 0 row count 0 2025-06-25T14:58:03.033689Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037910 maps to shardIdx: 72057594046644480:23 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 5], pathId map=BatchUpload, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-25T14:58:03.033696Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037910, followerId 0 2025-06-25T14:58:03.033716Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:23 with partCount# 0, rowCount# 0, searchHeight# 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-25T14:58:03.033725Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037910 2025-06-25T14:58:03.033773Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:58:03.035220Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [2:7519901219119182052:2147]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-06-25T14:58:03.035259Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5131: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-06-25T14:58:03.035269Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046644480, queue size# 0 2025-06-25T14:58:03.800746Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [2:7519901219119182052:2147]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:58:03.800795Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:58:03.800849Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [2:7519901219119182052:2147], Recipient [2:7519901219119182052:2147]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:58:03.800870Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:58:04.801134Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [2:7519901219119182052:2147]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:58:04.801175Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:58:04.801218Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [2:7519901219119182052:2147], Recipient [2:7519901219119182052:2147]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:58:04.801237Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime >> TFlatTest::CrossRW >> TLocksFatTest::RangeSetRemove >> TFlatTest::SelectRangeForbidNullArgs2 >> TLocksTest::UpdateLockedKey >> TFlatTest::Init >> TFlatTest::SelectRangeItemsLimit >> TFlatTest::CopyCopiedTableAndRead >> TLocksTest::Range_GoodLock0 >> TFlatTest::CopyTableAndRead >> TLocksTest::CK_GoodLock >> TLocksTest::SetLockFail >> TFlatTest::WriteMergeAndRead >> TFlatTest::SelectRangeReverseItemsLimit >> TLocksTest::Range_BrokenLock0 >> TFlatTest::AutoSplitBySize >> TLocksTest::Range_Pinhole >> TFlatTest::Mix_DML_DDL >> KqpPrefixedVectorIndexes::OrderByCosineLevel1+Nullable+UseSimilarity [GOOD] >> KqpPrefixedVectorIndexes::OrderByCosineDistanceNotNullableLevel4 >> GenericFederatedQuery::ClickHouseSelectCount [GOOD] >> GenericFederatedQuery::ClickHouseFilterPushdown >> TFlatTest::SelectRangeReverseItemsLimit [GOOD] >> TLocksTest::SetLockFail [GOOD] >> TLocksTest::SetEraseSet >> TFlatTest::SelectRangeReverseIncludeKeys >> TFlatTest::SelectRangeItemsLimit [GOOD] >> TFlatTest::SelectRangeForbidNullArgs4 >> TFlatTest::SelectRangeForbidNullArgs2 [GOOD] >> TFlatTest::SelectRangeForbidNullArgs3 >> TFlatTest::CrossRW [GOOD] >> TFlatTest::GetTabletCounters >> TFlatTest::WriteMergeAndRead [GOOD] >> TFlatTest::WriteSplitAndRead >> TFlatTest::CopyCopiedTableAndRead [GOOD] >> TFlatTest::CopyTableAndAddFollowers >> TFlatTest::CopyTableAndRead [GOOD] >> TFlatTest::CopyTableAndDropOriginal >> TFlatTest::Init [GOOD] >> TFlatTest::LargeDatashardReply >> KqpPrefixedVectorIndexes::OrderByCosineLevel1-Nullable+UseSimilarity [GOOD] >> KqpPrefixedVectorIndexes::OrderByCosineLevel2+Nullable+UseSimilarity >> KqpUniqueIndex::InsertComplexFkPkOverlapDuplicate [GOOD] >> TFlatTest::Mix_DML_DDL [GOOD] >> TFlatTest::OutOfDiskSpace [GOOD] >> GenericFederatedQuery::IcebergHiveTokenFilterPushdown [GOOD] >> GenericFederatedQuery::IcebergHadoopSaFilterPushdown [GOOD] >> GenericFederatedQuery::PostgreSQLFilterPushdown [GOOD] >> KqpNamedExpressions::NamedExpressionRandomUpsertRevert-UseSink+UseDataQuery [GOOD] >> KqpNamedExpressions::NamedExpressionRandomUpsertRevert+UseSink+UseDataQuery >> KqpExtractPredicateLookup::SqlInJoin [GOOD] >> KqpKv::BulkUpsert >> GenericFederatedQuery::IcebergHadoopTokenFilterPushdown [GOOD] >> GenericFederatedQuery::IcebergHiveSaFilterPushdown [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpUniqueIndex::InsertComplexFkPkOverlapDuplicate [GOOD] Test command err: Trying to start YDB, gRPC: 3809, MsgBus: 17044 2025-06-25T14:56:53.816897Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901102320314062:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:56:53.825160Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001c05/r3tmp/tmpNWfdhW/pdisk_1.dat 2025-06-25T14:56:54.198603Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 3809, node 1 2025-06-25T14:56:54.238893Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:56:54.238917Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:56:54.238923Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:56:54.239017Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:56:54.253290Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:56:54.253432Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:56:54.256805Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:17044 TClient is connected to server localhost:17044 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:56:54.815731Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:56:54.826512Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:56:54.846800Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:54.969788Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:55.123496Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:55.194617Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:56.830287Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901115205217542:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:56.830408Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:57.115190Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:57.157944Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:57.193591Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:57.226598Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:57.305183Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:57.347600Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:57.381341Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:57.446760Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901119500185497:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:57.446836Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:57.447038Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901119500185502:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:57.450579Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:56:57.461176Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519901119500185504:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:56:57.534837Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519901119500185555:3418] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:56:58.510831Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877761, Sender [1:7519901123795153125:3593], Recipient [1:7519901106615281630:2146]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:56:58.510873Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5052: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T14:56:58.510885Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5837: Pipe server connected, at tablet: 72057594046644480 2025-06-25T14:56:58.510921Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271122432, Sender [1:7519901123795153121:3590], Recipient [1:7519901106615281630:2146]: {TEvModifySchemeTransaction txid# 281474976710672 TabletId# 72057594046644480} 2025-06-25T14:56:58.510936Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4966: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-06-25T14:56:58.591748Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/Root" ... ROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519901395797069151:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:02.294346Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001c05/r3tmp/tmpoLGeh1/pdisk_1.dat 2025-06-25T14:58:02.471904Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:02.472009Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:02.479537Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:58:02.480581Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:02.486482Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519901395797069132:2080] 1750863482293806 != 1750863482293809 TServer::EnableGrpc on GrpcPort 23277, node 3 2025-06-25T14:58:02.569015Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:58:02.569038Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:58:02.569048Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:58:02.569213Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:15315 TClient is connected to server localhost:15315 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-25T14:58:03.209270Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:58:03.216759Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:58:03.224187Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:03.291158Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:03.301334Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:58:03.496322Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:03.578581Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:06.107785Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519901412976939953:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:58:06.107885Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:58:06.171771Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:58:06.205354Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:58:06.234986Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:58:06.265752Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:58:06.297841Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:58:06.376736Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:58:06.416000Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:58:06.473054Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519901412976940616:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:58:06.473133Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:58:06.473236Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519901412976940621:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:58:06.476563Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:58:06.486966Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519901412976940623:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:58:06.545864Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519901412976940674:3421] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:58:07.294308Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519901395797069151:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:07.294381Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:58:07.972114Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... >> GenericFederatedQuery::IcebergHiveBasicFilterPushdown [GOOD] ------- [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/federated_query/generic_ut/unittest >> GenericFederatedQuery::IcebergHiveTokenFilterPushdown [GOOD] Test command err: Trying to start YDB, gRPC: 24972, MsgBus: 7719 2025-06-25T14:57:29.345128Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901255028256255:2080];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:57:29.345310Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000e67/r3tmp/tmpguAGuU/pdisk_1.dat 2025-06-25T14:57:29.801687Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:57:29.801795Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:57:29.805929Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:57:29.839990Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 24972, node 1 2025-06-25T14:57:30.100975Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:57:30.101002Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:57:30.101010Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:57:30.101104Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:57:30.357036Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:7719 TClient is connected to server localhost:7719 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:57:30.990951Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:57:32.489277Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901267913158722:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:32.489427Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:33.019448Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:33.152484Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901272208126141:2305], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:33.152550Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:33.152592Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901272208126146:2308], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:33.155570Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:57:33.165088Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519901272208126148:2309], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-25T14:57:33.225842Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519901272208126188:2400] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:57:34.045522Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:57:34.344936Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519901255028256255:2080];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:57:34.344979Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:57:34.423565Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:34.861871Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:35.420949Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:57:35.892219Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715681:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:57:36.368202Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:36.414681Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710759:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:383) 2025-06-25T14:57:38.154331Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715704:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp:267) 2025-06-25T14:57:38.178462Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715705:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:38.179885Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715707:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:38.181407Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715706:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) Call DescribeTable. data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_u ... ion type: ESchemeOpModifyACL, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:58:06.604819Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:58:06.980706Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519901390959424687:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:06.980767Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:58:07.305432Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:58:07.905567Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:58:08.458939Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715683:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:58:09.023402Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:58:09.071074Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710759:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:383) 2025-06-25T14:58:11.438409Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715706:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp:267) Call DescribeTable. data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Expected: data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Actual: data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } DescribeTable result. GRpcStatusCode: 0 schema { columns { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } columns { name: "data_column" type { optional_type { item { type_id: STRING } } } } } error { status: SUCCESS } Call ListSplits. selects { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } from { table: "example_1" } } CRAB Expected: selects { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } from { table: "example_1" } } CRAB Actual: selects { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } from { table: "example_1" } } ListSplits result. GRpcStatusCode: 0 Call ReadSplits. splits { select { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } what { items { column { name: "data_column" type { optional_type { item { type_id: STRING } } } } } items { column { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } } } from { table: "example_1" } where { filter_typed { comparison { operation: EQ left_value { column: "filtered_column" } right_value { typed_value { type { type_id: INT32 } value { int32_value: 42 } } } } } } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL CRAB Expected: splits { select { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } what { items { column { name: "data_column" type { optional_type { item { type_id: STRING } } } } } items { column { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } } } from { table: "example_1" } where { filter_typed { comparison { operation: EQ left_value { column: "filtered_column" } right_value { typed_value { type { type_id: INT32 } value { int32_value: 42 } } } } } } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL CRAB Actual: splits { select { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } what { items { column { name: "data_column" type { optional_type { item { type_id: STRING } } } } } items { column { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } } } from { table: "example_1" } where { filter_typed { comparison { operation: EQ left_value { column: "filtered_column" } right_value { typed_value { type { type_id: INT32 } value { int32_value: 42 } } } } } } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL ReadSplits result. GRpcStatusCode: 0 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TFlatTest::OutOfDiskSpace [GOOD] Test command err: 2025-06-25T14:58:09.299094Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901429216983138:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:09.299217Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/002082/r3tmp/tmpJdYDzg/pdisk_1.dat 2025-06-25T14:58:09.775238Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:09.775322Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:09.782139Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:58:09.832458Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:09.836504Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519901429216983119:2080] 1750863489297735 != 1750863489297738 TClient is connected to server localhost:30102 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:10.195642Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:10.224074Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:58:10.233939Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:10.323696Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:58:10.439224Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:171) waiting... proxy error code: Unknown error:
: Error: Resolve failed for table: /dc-1/Table, error: column 'value' not exist, code: 200400 2025-06-25T14:58:10.469731Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:171) waiting... 2025-06-25T14:58:10.492858Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:171) waiting... waiting... 2025-06-25T14:58:10.521586Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:171) proxy error code: Unknown error:
:5:24: Error: At function: AsList
:5:32: Error: At function: SetResult
:4:27: Error: At function: SelectRow
:4:27: Error: Mismatch of key columns count for table [/dc-1/Table], expected: 2, but got 1., code: 2028 >> GenericFederatedQuery::IcebergHadoopBasicFilterPushdown [GOOD] >> TFlatTest::SelectRangeForbidNullArgs4 [GOOD] ------- [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/federated_query/generic_ut/unittest >> GenericFederatedQuery::IcebergHadoopSaFilterPushdown [GOOD] Test command err: Trying to start YDB, gRPC: 1348, MsgBus: 27953 2025-06-25T14:57:29.333330Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901253635576441:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:57:29.333376Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000e80/r3tmp/tmpGxutdl/pdisk_1.dat 2025-06-25T14:57:29.787276Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:57:29.787494Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:57:29.790431Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:57:29.834523Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 1348, node 1 2025-06-25T14:57:30.100945Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:57:30.100969Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:57:30.100977Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:57:30.101093Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:57:30.349851Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:27953 TClient is connected to server localhost:27953 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:57:31.134534Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:57:31.168695Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:57:32.592968Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901266520478949:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:32.593160Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:33.020523Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:33.159604Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901270815446367:2305], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:33.159727Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:33.160021Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901270815446373:2308], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:33.163360Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:57:33.170885Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519901270815446375:2309], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-25T14:57:33.225947Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519901270815446415:2401] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:57:34.045334Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:57:34.334241Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519901253635576441:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:57:34.334293Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:57:34.485598Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:34.965843Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:35.511795Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710678:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:57:35.930455Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710681:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:57:36.443489Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976715758:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:36.526799Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976715759:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:383) 2025-06-25T14:57:38.225560Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976710704:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp:267) 2025-06-25T14:57:38.249399Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710705:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:38.250696Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710706:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:38.251793Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710707:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) Call DescribeTable. data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { ... ck failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:58:06.255729Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:58:06.747545Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:58:06.782987Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519901390843547664:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:06.783048Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:58:07.327085Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:58:07.964365Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715678:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:58:08.683669Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715683:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:58:09.347559Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:58:09.392958Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710759:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:383) 2025-06-25T14:58:11.752059Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715708:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp:267) Call DescribeTable. data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Expected: data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Actual: data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } DescribeTable result. GRpcStatusCode: 0 schema { columns { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } columns { name: "data_column" type { optional_type { item { type_id: STRING } } } } } error { status: SUCCESS } Call ListSplits. selects { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } from { table: "example_1" } } CRAB Expected: selects { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } from { table: "example_1" } } CRAB Actual: selects { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } from { table: "example_1" } } ListSplits result. GRpcStatusCode: 0 Call ReadSplits. splits { select { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } what { items { column { name: "data_column" type { optional_type { item { type_id: STRING } } } } } items { column { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } } } from { table: "example_1" } where { filter_typed { comparison { operation: EQ left_value { column: "filtered_column" } right_value { typed_value { type { type_id: INT32 } value { int32_value: 42 } } } } } } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL CRAB Expected: splits { select { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } what { items { column { name: "data_column" type { optional_type { item { type_id: STRING } } } } } items { column { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } } } from { table: "example_1" } where { filter_typed { comparison { operation: EQ left_value { column: "filtered_column" } right_value { typed_value { type { type_id: INT32 } value { int32_value: 42 } } } } } } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL CRAB Actual: splits { select { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } what { items { column { name: "data_column" type { optional_type { item { type_id: STRING } } } } } items { column { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } } } from { table: "example_1" } where { filter_typed { comparison { operation: EQ left_value { column: "filtered_column" } right_value { typed_value { type { type_id: INT32 } value { int32_value: 42 } } } } } } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL ReadSplits result. GRpcStatusCode: 0 >> TFlatTest::SelectRangeForbidNullArgs3 [GOOD] >> TFlatTest::SelectRangeReverseIncludeKeys [GOOD] >> TFlatTest::WriteSplitAndRead [GOOD] ------- [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/federated_query/generic_ut/unittest >> GenericFederatedQuery::PostgreSQLFilterPushdown [GOOD] Test command err: Trying to start YDB, gRPC: 11151, MsgBus: 9755 2025-06-25T14:57:29.319569Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901253702092354:2140];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:57:29.323438Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000e8c/r3tmp/tmpQp7GwZ/pdisk_1.dat 2025-06-25T14:57:29.802402Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:57:29.812435Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519901253702092250:2080] 1750863449309139 != 1750863449309142 2025-06-25T14:57:29.833578Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:57:29.833699Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:57:29.835860Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 11151, node 1 2025-06-25T14:57:30.101841Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:57:30.101867Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:57:30.101874Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:57:30.101971Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:57:30.327129Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:9755 TClient is connected to server localhost:9755 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:57:31.057086Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:57:31.088685Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:57:32.517114Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901266586994784:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:32.517252Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:33.019554Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:33.150204Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901270881962202:2305], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:33.150313Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:33.150541Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901270881962207:2308], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:33.156921Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:57:33.167124Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519901270881962209:2309], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-25T14:57:33.236332Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519901270881962249:2398] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:57:34.046235Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:57:34.331895Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519901253702092354:2140];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:57:34.331955Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:57:34.499697Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:34.944137Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:35.448937Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:57:35.903208Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715681:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:57:36.446289Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:36.500401Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710759:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:383) 2025-06-25T14:57:38.255646Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715704:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp:267) 2025-06-25T14:57:38.280758Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715706:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:38.282123Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715705:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:38.283350Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715707:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions ... 519901411406313407:2308], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:58:05.756257Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:58:05.767099Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519901411406313409:2309], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-25T14:58:05.858899Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519901411406313449:2393] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:58:06.395714Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:58:06.892297Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:58:07.299553Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519901398521410780:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:07.299639Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:58:07.550482Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:58:08.286144Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:58:08.878561Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715683:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:58:09.487172Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:58:09.555374Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710759:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:383) 2025-06-25T14:58:12.105976Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715708:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp:267) Call DescribeTable. data_source_instance { kind: POSTGRESQL endpoint { host: "localhost" port: 5432 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE pg_options { schema: "public" } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Expected: data_source_instance { kind: POSTGRESQL endpoint { host: "localhost" port: 5432 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE pg_options { schema: "public" } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Actual: data_source_instance { kind: POSTGRESQL endpoint { host: "localhost" port: 5432 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE pg_options { schema: "public" } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } DescribeTable result. GRpcStatusCode: 0 schema { columns { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } columns { name: "data_column" type { optional_type { item { type_id: STRING } } } } } error { status: SUCCESS } Call ListSplits. selects { data_source_instance { kind: POSTGRESQL endpoint { host: "localhost" port: 5432 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE pg_options { schema: "public" } } from { table: "example_1" } } CRAB Expected: selects { data_source_instance { kind: POSTGRESQL endpoint { host: "localhost" port: 5432 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE pg_options { schema: "public" } } from { table: "example_1" } } CRAB Actual: selects { data_source_instance { kind: POSTGRESQL endpoint { host: "localhost" port: 5432 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE pg_options { schema: "public" } } from { table: "example_1" } } ListSplits result. GRpcStatusCode: 0 Call ReadSplits. splits { select { data_source_instance { kind: POSTGRESQL endpoint { host: "localhost" port: 5432 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE pg_options { schema: "public" } } what { items { column { name: "data_column" type { optional_type { item { type_id: STRING } } } } } items { column { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } } } from { table: "example_1" } where { filter_typed { comparison { operation: EQ left_value { column: "filtered_column" } right_value { typed_value { type { type_id: INT32 } value { int32_value: 42 } } } } } } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL CRAB Expected: splits { select { data_source_instance { kind: POSTGRESQL endpoint { host: "localhost" port: 5432 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE pg_options { schema: "public" } } what { items { column { name: "data_column" type { optional_type { item { type_id: STRING } } } } } items { column { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } } } from { table: "example_1" } where { filter_typed { comparison { operation: EQ left_value { column: "filtered_column" } right_value { typed_value { type { type_id: INT32 } value { int32_value: 42 } } } } } } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL CRAB Actual: splits { select { data_source_instance { kind: POSTGRESQL endpoint { host: "localhost" port: 5432 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE pg_options { schema: "public" } } what { items { column { name: "data_column" type { optional_type { item { type_id: STRING } } } } } items { column { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } } } from { table: "example_1" } where { filter_typed { comparison { operation: EQ left_value { column: "filtered_column" } right_value { typed_value { type { type_id: INT32 } value { int32_value: 42 } } } } } } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL ReadSplits result. GRpcStatusCode: 0 ------- [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/federated_query/generic_ut/unittest >> GenericFederatedQuery::IcebergHadoopTokenFilterPushdown [GOOD] Test command err: Trying to start YDB, gRPC: 19722, MsgBus: 23697 2025-06-25T14:57:30.962324Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901258694436472:2220];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:57:30.962633Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0009dc/r3tmp/tmp6prwBY/pdisk_1.dat 2025-06-25T14:57:31.352654Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:57:31.352761Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:57:31.356578Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:57:31.360760Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519901258694436290:2080] 1750863450930589 != 1750863450930592 2025-06-25T14:57:31.369432Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 19722, node 1 2025-06-25T14:57:31.512565Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:57:31.512586Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:57:31.512592Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:57:31.512711Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23697 2025-06-25T14:57:31.921182Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:23697 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:57:32.195312Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:57:32.217318Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:57:34.085273Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901275874306118:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:34.085380Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:34.342233Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:34.447673Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901275874306239:2305], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:34.447756Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:34.448296Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901275874306245:2308], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:34.452425Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:57:34.462895Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715659, at schemeshard: 72057594046644480 2025-06-25T14:57:34.463077Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519901275874306247:2309], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-25T14:57:34.526605Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519901275874306287:2399] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:57:35.103082Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:57:35.650140Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:35.948460Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519901258694436472:2220];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:57:35.948514Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:57:36.236104Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:36.709566Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:57:37.112185Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715681:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:57:37.508531Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:37.549399Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710759:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:383) 2025-06-25T14:57:39.179420Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715702:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp:267) 2025-06-25T14:57:39.225800Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715703:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:39.227895Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715704:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:39.238161Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose ... ck failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:58:06.452655Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:58:06.949469Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:58:07.457514Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519901397068815421:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:07.457599Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:58:07.530517Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:58:08.179307Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:58:08.891544Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715681:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:58:09.495541Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:58:09.547050Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710759:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:383) 2025-06-25T14:58:11.847720Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715706:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp:267) Call DescribeTable. data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Expected: data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Actual: data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } DescribeTable result. GRpcStatusCode: 0 schema { columns { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } columns { name: "data_column" type { optional_type { item { type_id: STRING } } } } } error { status: SUCCESS } Call ListSplits. selects { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } from { table: "example_1" } } CRAB Expected: selects { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } from { table: "example_1" } } CRAB Actual: selects { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } from { table: "example_1" } } ListSplits result. GRpcStatusCode: 0 Call ReadSplits. splits { select { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } what { items { column { name: "data_column" type { optional_type { item { type_id: STRING } } } } } items { column { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } } } from { table: "example_1" } where { filter_typed { comparison { operation: EQ left_value { column: "filtered_column" } right_value { typed_value { type { type_id: INT32 } value { int32_value: 42 } } } } } } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL CRAB Expected: splits { select { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } what { items { column { name: "data_column" type { optional_type { item { type_id: STRING } } } } } items { column { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } } } from { table: "example_1" } where { filter_typed { comparison { operation: EQ left_value { column: "filtered_column" } right_value { typed_value { type { type_id: INT32 } value { int32_value: 42 } } } } } } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL CRAB Actual: splits { select { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } what { items { column { name: "data_column" type { optional_type { item { type_id: STRING } } } } } items { column { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } } } from { table: "example_1" } where { filter_typed { comparison { operation: EQ left_value { column: "filtered_column" } right_value { typed_value { type { type_id: INT32 } value { int32_value: 42 } } } } } } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL ReadSplits result. GRpcStatusCode: 0 >> TFlatTest::GetTabletCounters [GOOD] >> TFlatTest::CopyTableAndAddFollowers [GOOD] >> TFlatTest::CopyCopiedTableAndDropFirstCopy >> TFlatTest::CopyTableAndDropOriginal [GOOD] ------- [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/federated_query/generic_ut/unittest >> GenericFederatedQuery::IcebergHiveSaFilterPushdown [GOOD] Test command err: Trying to start YDB, gRPC: 6257, MsgBus: 5186 2025-06-25T14:57:29.951439Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901256381235021:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:57:29.951490Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0009ea/r3tmp/tmpT48O2v/pdisk_1.dat 2025-06-25T14:57:30.302318Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519901256381234999:2080] 1750863449950128 != 1750863449950131 2025-06-25T14:57:30.313157Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 6257, node 1 2025-06-25T14:57:30.351208Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:57:30.351305Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:57:30.357659Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:57:30.449013Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:57:30.449043Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:57:30.449051Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:57:30.449163Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5186 TClient is connected to server localhost:5186 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-25T14:57:30.968461Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:57:31.047221Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:57:31.074945Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:57:32.993511Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901269266137528:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:32.993631Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:33.251506Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:33.349562Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901273561104946:2305], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:33.349640Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:33.349956Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901273561104951:2308], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:33.352825Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:57:33.361146Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519901273561104953:2309], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-25T14:57:33.430215Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519901273561104993:2396] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:57:34.048804Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:57:34.467117Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:34.956770Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519901256381235021:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:57:34.956824Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:57:34.992969Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:35.489929Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:57:35.880944Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710679:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:57:36.443822Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976715758:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:36.495545Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976715759:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:383) 2025-06-25T14:57:38.098273Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976710702:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp:267) 2025-06-25T14:57:38.131604Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710703:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:38.133180Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710704:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:38.134907Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710705:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_r ... ion type: ESchemeOpModifyACL, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:58:06.575627Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:58:06.853194Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519901394758736051:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:06.853276Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:58:07.236010Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:58:07.793774Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:58:08.434948Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715683:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:58:09.036812Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:58:09.094701Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710759:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:383) 2025-06-25T14:58:11.506849Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715706:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp:267) Call DescribeTable. data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Expected: data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Actual: data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } DescribeTable result. GRpcStatusCode: 0 schema { columns { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } columns { name: "data_column" type { optional_type { item { type_id: STRING } } } } } error { status: SUCCESS } Call ListSplits. selects { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } from { table: "example_1" } } CRAB Expected: selects { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } from { table: "example_1" } } CRAB Actual: selects { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } from { table: "example_1" } } ListSplits result. GRpcStatusCode: 0 Call ReadSplits. splits { select { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } what { items { column { name: "data_column" type { optional_type { item { type_id: STRING } } } } } items { column { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } } } from { table: "example_1" } where { filter_typed { comparison { operation: EQ left_value { column: "filtered_column" } right_value { typed_value { type { type_id: INT32 } value { int32_value: 42 } } } } } } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL CRAB Expected: splits { select { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } what { items { column { name: "data_column" type { optional_type { item { type_id: STRING } } } } } items { column { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } } } from { table: "example_1" } where { filter_typed { comparison { operation: EQ left_value { column: "filtered_column" } right_value { typed_value { type { type_id: INT32 } value { int32_value: 42 } } } } } } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL CRAB Actual: splits { select { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } what { items { column { name: "data_column" type { optional_type { item { type_id: STRING } } } } } items { column { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } } } from { table: "example_1" } where { filter_typed { comparison { operation: EQ left_value { column: "filtered_column" } right_value { typed_value { type { type_id: INT32 } value { int32_value: 42 } } } } } } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL ReadSplits result. GRpcStatusCode: 0 ------- [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/federated_query/generic_ut/unittest >> GenericFederatedQuery::IcebergHiveBasicFilterPushdown [GOOD] Test command err: Trying to start YDB, gRPC: 61576, MsgBus: 31411 2025-06-25T14:57:29.315815Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901254898758523:2131];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:57:29.316736Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000a13/r3tmp/tmpY336IN/pdisk_1.dat 2025-06-25T14:57:29.780768Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:57:29.780886Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:57:29.800042Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:57:29.804531Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519901254898758424:2080] 1750863449306884 != 1750863449306887 2025-06-25T14:57:29.868996Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 61576, node 1 2025-06-25T14:57:30.129043Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:57:30.129066Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:57:30.129074Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:57:30.129217Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:57:30.315923Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:31411 TClient is connected to server localhost:31411 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:57:31.130349Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:57:32.747416Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901267783660960:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:32.747535Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:33.019433Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:33.176929Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901272078628378:2305], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:33.176994Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:33.177322Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901272078628383:2308], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:33.180180Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:57:33.189792Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519901272078628385:2309], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-25T14:57:33.277634Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519901272078628425:2398] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:57:34.063498Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:57:34.315753Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519901254898758523:2131];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:57:34.315811Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:57:34.486475Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:34.906466Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:35.444057Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:57:35.899089Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710681:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:57:36.491547Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976715758:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:36.544409Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976715759:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:383) 2025-06-25T14:57:38.220672Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976710704:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp:267) 2025-06-25T14:57:38.243292Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710705:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:38.244497Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710707:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:38.246336Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710706:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) Call DescribeTable. data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" ... 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:58:07.678378Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:58:07.834302Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519901395844317036:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:07.834359Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:58:08.389773Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:58:09.119535Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715678:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:58:09.711964Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715683:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:58:10.358876Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:58:10.402406Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710759:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:383) 2025-06-25T14:58:12.639354Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715708:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp:267) Call DescribeTable. data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Expected: data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Actual: data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } DescribeTable result. GRpcStatusCode: 0 schema { columns { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } columns { name: "data_column" type { optional_type { item { type_id: STRING } } } } } error { status: SUCCESS } Call ListSplits. selects { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } from { table: "example_1" } } CRAB Expected: selects { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } from { table: "example_1" } } CRAB Actual: selects { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } from { table: "example_1" } } ListSplits result. GRpcStatusCode: 0 Call ReadSplits. splits { select { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } what { items { column { name: "data_column" type { optional_type { item { type_id: STRING } } } } } items { column { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } } } from { table: "example_1" } where { filter_typed { comparison { operation: EQ left_value { column: "filtered_column" } right_value { typed_value { type { type_id: INT32 } value { int32_value: 42 } } } } } } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL CRAB Expected: splits { select { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } what { items { column { name: "data_column" type { optional_type { item { type_id: STRING } } } } } items { column { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } } } from { table: "example_1" } where { filter_typed { comparison { operation: EQ left_value { column: "filtered_column" } right_value { typed_value { type { type_id: INT32 } value { int32_value: 42 } } } } } } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL CRAB Actual: splits { select { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } what { items { column { name: "data_column" type { optional_type { item { type_id: STRING } } } } } items { column { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } } } from { table: "example_1" } where { filter_typed { comparison { operation: EQ left_value { column: "filtered_column" } right_value { typed_value { type { type_id: INT32 } value { int32_value: 42 } } } } } } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL ReadSplits result. GRpcStatusCode: 0 >> KqpQueryService::DdlExecuteScript [GOOD] ------- [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/federated_query/generic_ut/unittest >> GenericFederatedQuery::IcebergHadoopBasicFilterPushdown [GOOD] Test command err: Trying to start YDB, gRPC: 61920, MsgBus: 27800 2025-06-25T14:57:29.334526Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901256193282885:2136];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:57:29.341883Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0009fb/r3tmp/tmpRpioXM/pdisk_1.dat 2025-06-25T14:57:29.829463Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:57:29.863019Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:57:29.863121Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:57:29.869947Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 61920, node 1 2025-06-25T14:57:30.107864Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:57:30.107884Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:57:30.107889Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:57:30.108021Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:57:30.336441Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:27800 TClient is connected to server localhost:27800 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:57:31.046193Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:57:32.634353Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901269078185319:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:32.634465Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:33.019386Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:33.154357Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901273373152737:2305], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:33.154448Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:33.154727Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901273373152743:2308], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:33.158269Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:57:33.168603Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710659, at schemeshard: 72057594046644480 2025-06-25T14:57:33.169171Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519901273373152745:2309], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-25T14:57:33.249443Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519901273373152785:2400] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:57:34.045428Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:57:34.343078Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519901256193282885:2136];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:57:34.343244Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:57:34.488911Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:35.040905Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:35.500445Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710677:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:57:35.964961Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710681:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:57:36.569497Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976715758:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:36.621628Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976715759:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:383) 2025-06-25T14:57:38.237351Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976710704:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp:267) 2025-06-25T14:57:38.262266Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710705:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:38.263863Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710706:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:38.265287Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710707:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) Call DescribeTable. data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials ... or: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:58:07.555348Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:58:08.099898Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:58:08.656299Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519901403228526306:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:08.656378Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:58:08.914777Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:58:09.599429Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:58:10.188932Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715681:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:58:10.783660Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:58:10.838715Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710759:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:383) 2025-06-25T14:58:13.141855Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715706:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp:267) Call DescribeTable. data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Expected: data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Actual: data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } DescribeTable result. GRpcStatusCode: 0 schema { columns { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } columns { name: "data_column" type { optional_type { item { type_id: STRING } } } } } error { status: SUCCESS } Call ListSplits. selects { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } from { table: "example_1" } } CRAB Expected: selects { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } from { table: "example_1" } } CRAB Actual: selects { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } from { table: "example_1" } } ListSplits result. GRpcStatusCode: 0 Call ReadSplits. splits { select { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } what { items { column { name: "data_column" type { optional_type { item { type_id: STRING } } } } } items { column { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } } } from { table: "example_1" } where { filter_typed { comparison { operation: EQ left_value { column: "filtered_column" } right_value { typed_value { type { type_id: INT32 } value { int32_value: 42 } } } } } } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL CRAB Expected: splits { select { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } what { items { column { name: "data_column" type { optional_type { item { type_id: STRING } } } } } items { column { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } } } from { table: "example_1" } where { filter_typed { comparison { operation: EQ left_value { column: "filtered_column" } right_value { typed_value { type { type_id: INT32 } value { int32_value: 42 } } } } } } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL CRAB Actual: splits { select { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } what { items { column { name: "data_column" type { optional_type { item { type_id: STRING } } } } } items { column { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } } } from { table: "example_1" } where { filter_typed { comparison { operation: EQ left_value { column: "filtered_column" } right_value { typed_value { type { type_id: INT32 } value { int32_value: 42 } } } } } } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL ReadSplits result. GRpcStatusCode: 0 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TFlatTest::SelectRangeReverseIncludeKeys [GOOD] Test command err: 2025-06-25T14:58:07.824806Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901416845359497:2245];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:07.829860Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00208f/r3tmp/tmpliopHv/pdisk_1.dat 2025-06-25T14:58:08.227944Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:08.228062Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:08.244064Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:58:08.314769Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:08.319233Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519901416845359272:2080] 1750863487784423 != 1750863487784426 TClient is connected to server localhost:30907 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:08.612499Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-25T14:58:08.643715Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:08.826278Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:58:11.111104Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519901433999878798:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:11.111148Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00208f/r3tmp/tmpG5OZyP/pdisk_1.dat 2025-06-25T14:58:11.271155Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:11.285476Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:11.285542Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:11.288363Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:26678 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:11.593754Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:11.601360Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:58:11.617175Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-25T14:58:11.628378Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TFlatTest::WriteSplitAndRead [GOOD] Test command err: 2025-06-25T14:58:07.778162Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901419525574480:2233];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:07.778400Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/002087/r3tmp/tmpqqyEpm/pdisk_1.dat 2025-06-25T14:58:08.206549Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:08.224371Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:08.225833Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:08.227421Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:22463 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:08.598448Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:08.620902Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:58:08.643302Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:08.780114Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:58:09.029166Z node 1 :OPS_COMPACT INFO: Compact{72075186224037889.1.11, eph 1} end=Done, 4 blobs 3r (max 3), put Spent{time=0.009s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 2 +0, (1139 521 2626)b }, ecr=1.000 2025-06-25T14:58:09.031040Z node 1 :OPS_COMPACT INFO: Compact{72075186224037888.1.11, eph 1} end=Done, 4 blobs 3r (max 3), put Spent{time=0.034s,wait=0.003s,interrupts=1} Part{ 2 pk, lobs 2 +0, (1297 647 2154)b }, ecr=1.000 2025-06-25T14:58:09.063655Z node 1 :OPS_COMPACT INFO: Compact{72075186224037888.1.16, eph 2} end=Done, 4 blobs 6r (max 6), put Spent{time=0.007s,wait=0.002s,interrupts=1} Part{ 2 pk, lobs 5 +0, (1653 647 6413)b }, ecr=1.000 2025-06-25T14:58:09.070436Z node 1 :OPS_COMPACT INFO: Compact{72075186224037889.1.16, eph 2} end=Done, 4 blobs 6r (max 6), put Spent{time=0.007s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 4 +0, (2326 1432 5183)b }, ecr=1.000 2025-06-25T14:58:09.094160Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T14:58:09.097387Z node 1 :OPS_COMPACT INFO: Compact{72075186224037888.1.21, eph 3} end=Done, 4 blobs 8r (max 9), put Spent{time=0.001s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 5 +0, (3362 2180 6413)b }, ecr=1.000 2025-06-25T14:58:09.099064Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-25T14:58:09.099115Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:58:09.099516Z node 1 :TX_DATASHARD DEBUG: datashard__compaction.cpp:203: CompactionComplete of tablet# 72075186224037888, table# 1001, finished edge# 0, ts 1970-01-01T00:00:00.000000Z 2025-06-25T14:58:09.099568Z node 1 :TX_DATASHARD DEBUG: datashard__compaction.cpp:240: ReplyCompactionWaiters of tablet# 72075186224037888, table# 1001, finished edge# 0, front# 0 2025-06-25T14:58:09.102434Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T14:58:09.104228Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-25T14:58:09.104269Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T14:58:09.107760Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037889 2025-06-25T14:58:09.111273Z node 1 :OPS_COMPACT INFO: Compact{72075186224037889.1.21, eph 3} end=Done, 4 blobs 9r (max 9), put Spent{time=0.002s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 4 +0, (4073 2983 5183)b }, ecr=1.000 2025-06-25T14:58:09.111480Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037889 2025-06-25T14:58:09.111544Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-25T14:58:09.111972Z node 1 :TX_DATASHARD DEBUG: datashard__compaction.cpp:203: CompactionComplete of tablet# 72075186224037889, table# 1001, finished edge# 0, ts 1970-01-01T00:00:00.000000Z 2025-06-25T14:58:09.111985Z node 1 :TX_DATASHARD DEBUG: datashard__compaction.cpp:240: ReplyCompactionWaiters of tablet# 72075186224037889, table# 1001, finished edge# 0, front# 0 2025-06-25T14:58:09.115253Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037889 2025-06-25T14:58:09.117675Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037889 2025-06-25T14:58:09.117724Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 TClient::Ls request: /dc-1/Dir/TableOld TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "TableOld" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710659 CreateStep: 1750863488822 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "TableOld" Columns { Name: "Key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "... (TRUNCATED) 2025-06-25T14:58:09.153036Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T14:58:09.209892Z node 1 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:561: tx 281474976710680 released its data 2025-06-25T14:58:09.210173Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037889 2025-06-25T14:58:09.211889Z node 1 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:561: tx 281474976710680 released its data 2025-06-25T14:58:09.212348Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T14:58:09.213005Z node 1 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:661: tx 281474976710680 at 72075186224037888 restored its data 2025-06-25T14:58:09.213961Z node 1 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:561: tx 281474976710680 released its data 2025-06-25T14:58:09.214106Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037889 2025-06-25T14:58:09.214593Z node 1 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:661: tx 281474976710680 at 72075186224037889 restored its data 2025-06-25T14:58:09.215375Z node 1 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:561: tx 281474976710680 released its data 2025-06-25T14:58:09.215901Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T14:58:09.216443Z node 1 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:661: tx 281474976710680 at 72075186224037888 restored its data 2025-06-25T14:58:09.217023Z node 1 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:561: tx 281474976710680 released its data 2025-06-25T14:58:09.217113Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037889 2025-06-25T14:58:09.217606Z node 1 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:661: tx 281474976710680 at 72075186224037889 restored its data 2025-06-25T14:58:09.218318Z node 1 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:561: tx 281474976710680 released its data 2025-06-25T14:58:09.218838Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T14:58:09.219221Z node 1 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:661: tx 281474976710680 at 72075186224037888 restored its data 2025-06-25T14:58:09.219889Z node 1 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:561: tx 281474976710680 released its data 2025-06-25T14:58:09.219985Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037889 2025-06-25T14:58:09.220414Z node 1 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:661: tx 281474976710680 at 72075186224037889 restored its data 2025-06-25T14:58:09.221048Z node 1 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:561: tx 281474976710680 released its data 2025-06-25T14:58:09.224647Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T14:58:09.225128Z node 1 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:661: tx 281474976710680 at 72075186224037888 restored its data 2025-06-25T14:58:09.225715Z node 1 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:561: tx ... 644480 2025-06-25T14:58:12.127682Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5633: Handle TEvStateChanged, at schemeshard: 72057594046644480, message: Source { RawX1: 7519901437514953851 RawX2: 4503608217307419 } TabletId: 72075186224037892 State: 4 2025-06-25T14:58:12.127697Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186224037892, state: Offline, at schemeshard: 72057594046644480 2025-06-25T14:58:12.127790Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5633: Handle TEvStateChanged, at schemeshard: 72057594046644480, message: Source { RawX1: 7519901437514953489 RawX2: 4503608217307346 } TabletId: 72075186224037889 State: 4 2025-06-25T14:58:12.127804Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186224037889, state: Offline, at schemeshard: 72057594046644480 2025-06-25T14:58:12.127891Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5633: Handle TEvStateChanged, at schemeshard: 72057594046644480, message: Source { RawX1: 7519901437514953841 RawX2: 4503608217307418 } TabletId: 72075186224037891 State: 4 2025-06-25T14:58:12.127907Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186224037891, state: Offline, at schemeshard: 72057594046644480 2025-06-25T14:58:12.127974Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5633: Handle TEvStateChanged, at schemeshard: 72057594046644480, message: Source { RawX1: 7519901437514953841 RawX2: 4503608217307418 } TabletId: 72075186224037891 State: 4 2025-06-25T14:58:12.127989Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186224037891, state: Offline, at schemeshard: 72057594046644480 2025-06-25T14:58:12.128570Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:3 hive 72057594037968897 at ss 72057594046644480 2025-06-25T14:58:12.128574Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2962: Handle TEvStateChangedResult datashard 72075186224037890 state Offline 2025-06-25T14:58:12.128836Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:5 hive 72057594037968897 at ss 72057594046644480 2025-06-25T14:58:12.128848Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2962: Handle TEvStateChangedResult datashard 72075186224037892 state Offline 2025-06-25T14:58:12.128922Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2962: Handle TEvStateChangedResult datashard 72075186224037892 state Offline 2025-06-25T14:58:12.128941Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:5 hive 72057594037968897 at ss 72057594046644480 2025-06-25T14:58:12.128992Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2962: Handle TEvStateChangedResult datashard 72075186224037889 state Offline 2025-06-25T14:58:12.128993Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:2 hive 72057594037968897 at ss 72057594046644480 2025-06-25T14:58:12.129022Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2962: Handle TEvStateChangedResult datashard 72075186224037891 state Offline 2025-06-25T14:58:12.129026Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:4 hive 72057594037968897 at ss 72057594046644480 2025-06-25T14:58:12.129053Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2962: Handle TEvStateChangedResult datashard 72075186224037891 state Offline 2025-06-25T14:58:12.129054Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:4 hive 72057594037968897 at ss 72057594046644480 2025-06-25T14:58:12.135442Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 3 ShardOwnerId: 72057594046644480 ShardLocalIdx: 3, at schemeshard: 72057594046644480 2025-06-25T14:58:12.135673Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 4 2025-06-25T14:58:12.135826Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 5 ShardOwnerId: 72057594046644480 ShardLocalIdx: 5, at schemeshard: 72057594046644480 2025-06-25T14:58:12.135945Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 3 2025-06-25T14:58:12.136027Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 5 ShardOwnerId: 72057594046644480 ShardLocalIdx: 5, at schemeshard: 72057594046644480 2025-06-25T14:58:12.136127Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046644480 ShardLocalIdx: 2, at schemeshard: 72057594046644480 2025-06-25T14:58:12.136231Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 2 2025-06-25T14:58:12.136330Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 4 ShardOwnerId: 72057594046644480 ShardLocalIdx: 4, at schemeshard: 72057594046644480 2025-06-25T14:58:12.136430Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 1 2025-06-25T14:58:12.136528Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 4 ShardOwnerId: 72057594046644480 ShardLocalIdx: 4, at schemeshard: 72057594046644480 2025-06-25T14:58:12.136604Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046644480 2025-06-25T14:58:12.136616Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046644480, LocalPathId: 3], at schemeshard: 72057594046644480 2025-06-25T14:58:12.136658Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 1 2025-06-25T14:58:12.139276Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:3 2025-06-25T14:58:12.139292Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:3 tabletId 72075186224037890 2025-06-25T14:58:12.139326Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:5 2025-06-25T14:58:12.139333Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:5 tabletId 72075186224037892 2025-06-25T14:58:12.139350Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:5 2025-06-25T14:58:12.139367Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:2 2025-06-25T14:58:12.139374Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:2 tabletId 72075186224037889 2025-06-25T14:58:12.139390Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:4 2025-06-25T14:58:12.139407Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:4 tabletId 72075186224037891 2025-06-25T14:58:12.139433Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:4 2025-06-25T14:58:12.139456Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046644480 2025-06-25T14:58:12.139979Z node 2 :TX_DATASHARD INFO: datashard.cpp:197: OnTabletStop: 72075186224037891 reason = ReasonStop 2025-06-25T14:58:12.140018Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037891, clientId# [2:7519901437514953969:2615], serverId# [2:7519901437514953972:2618], sessionId# [0:0:0] 2025-06-25T14:58:12.140038Z node 2 :TX_DATASHARD INFO: datashard.cpp:197: OnTabletStop: 72075186224037890 reason = ReasonStop 2025-06-25T14:58:12.140053Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037890, clientId# [2:7519901437514953968:2614], serverId# [2:7519901437514953971:2617], sessionId# [0:0:0] 2025-06-25T14:58:12.140065Z node 2 :TX_DATASHARD INFO: datashard.cpp:197: OnTabletStop: 72075186224037892 reason = ReasonStop 2025-06-25T14:58:12.140081Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037892, clientId# [2:7519901437514953970:2616], serverId# [2:7519901437514953973:2619], sessionId# [0:0:0] 2025-06-25T14:58:12.140093Z node 2 :TX_DATASHARD INFO: datashard.cpp:197: OnTabletStop: 72075186224037889 reason = ReasonStop 2025-06-25T14:58:12.140108Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037889, clientId# [2:7519901437514953614:2393], serverId# [2:7519901437514953615:2394], sessionId# [0:0:0] 2025-06-25T14:58:12.140804Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037891 not found 2025-06-25T14:58:12.140862Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037890 not found 2025-06-25T14:58:12.140874Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037892 not found 2025-06-25T14:58:12.140888Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037889 not found 2025-06-25T14:58:12.141306Z node 2 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186224037889 2025-06-25T14:58:12.141389Z node 2 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186224037889 2025-06-25T14:58:12.142779Z node 2 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186224037891 2025-06-25T14:58:12.142833Z node 2 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186224037891 2025-06-25T14:58:12.144142Z node 2 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186224037890 2025-06-25T14:58:12.144177Z node 2 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186224037890 2025-06-25T14:58:12.146340Z node 2 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186224037892 2025-06-25T14:58:12.146389Z node 2 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186224037892 TClient::Ls response: Status: 128 StatusCode: PATH_NOT_EXIST Issues { message: "Path not exist" issue_code: 200200 severity: 1 } SchemeStatus: 2 ErrorReason: "Path not found" >> TLocksFatTest::PointSetNotBreak ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TFlatTest::SelectRangeForbidNullArgs4 [GOOD] Test command err: 2025-06-25T14:58:07.780848Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901420198482171:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:07.781222Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/002085/r3tmp/tmpffDajw/pdisk_1.dat 2025-06-25T14:58:08.216045Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:08.216181Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:08.237692Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:58:08.239129Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TClient is connected to server localhost:17650 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:08.647959Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:08.672814Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:58:08.696663Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-25T14:58:08.707245Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:08.814387Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:58:11.107457Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519901435048813057:2085];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:11.109072Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/002085/r3tmp/tmpzR99OP/pdisk_1.dat 2025-06-25T14:58:11.332687Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:11.333900Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519901435048812988:2080] 1750863491090195 != 1750863491090198 2025-06-25T14:58:11.346500Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:11.346587Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:11.353175Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:29224 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:11.654234Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:11.661482Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:58:11.687976Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TFlatTest::GetTabletCounters [GOOD] Test command err: 2025-06-25T14:58:07.783231Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901419118712860:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:07.783299Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/002092/r3tmp/tmpqKMnUZ/pdisk_1.dat 2025-06-25T14:58:08.304886Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:08.317894Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:08.317991Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:08.322683Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:28435 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:08.642957Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:08.680523Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:58:08.696522Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-25T14:58:08.701153Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:08.798840Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:58:11.297165Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519901434050166194:2222];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:11.316575Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/002092/r3tmp/tmpCqc9Cu/pdisk_1.dat 2025-06-25T14:58:11.450891Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:11.456469Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519901434050166010:2080] 1750863491254123 != 1750863491254126 2025-06-25T14:58:11.477629Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:11.477711Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:11.482988Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:26028 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:11.733420Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:11.738520Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:58:11.754962Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... TClient::Ls request: /dc-1/Dir/TableOld TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "TableOld" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710659 CreateStep: 1750863491860 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "TableOld" Columns { Name: "Key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "... (TRUNCATED) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TFlatTest::SelectRangeForbidNullArgs3 [GOOD] Test command err: 2025-06-25T14:58:07.789750Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901419381638601:2184];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:07.790104Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00208a/r3tmp/tmpLfEaK4/pdisk_1.dat 2025-06-25T14:58:08.270313Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:08.270406Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:08.271857Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:58:08.281331Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519901419381638450:2080] 1750863487772638 != 1750863487772641 2025-06-25T14:58:08.296345Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TClient is connected to server localhost:17960 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:08.598907Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:08.621596Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:58:08.642303Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:08.805259Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:58:11.188784Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519901436235552087:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:11.224411Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00208a/r3tmp/tmpgiTRn1/pdisk_1.dat 2025-06-25T14:58:11.469867Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:11.471666Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519901436235552067:2080] 1750863491178806 != 1750863491178809 2025-06-25T14:58:11.488521Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:11.488592Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:11.490646Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:26931 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:11.732864Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:11.740715Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:58:11.755585Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... >> TFlatTest::CopyTableAndReturnPartAfterCompaction >> TFlatTest::ReadOnlyMode ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TFlatTest::CopyTableAndDropOriginal [GOOD] Test command err: 2025-06-25T14:58:07.781365Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901418508549885:2168];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:07.781536Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/002090/r3tmp/tmpKiW5jn/pdisk_1.dat 2025-06-25T14:58:08.240803Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:08.274118Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:08.274216Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:08.281155Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:11132 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:08.607448Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:08.632586Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:58:08.656365Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-25T14:58:08.672118Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:08.785217Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:58:09.017011Z node 1 :OPS_COMPACT INFO: Compact{72075186224037888.1.11, eph 1} end=Done, 4 blobs 3r (max 3), put Spent{time=0.023s,wait=0.009s,interrupts=1} Part{ 2 pk, lobs 2 +0, (1297 647 2154)b }, ecr=1.000 2025-06-25T14:58:09.017827Z node 1 :OPS_COMPACT INFO: Compact{72075186224037889.1.11, eph 1} end=Done, 4 blobs 3r (max 3), put Spent{time=0.002s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 2 +0, (1139 521 2626)b }, ecr=1.000 2025-06-25T14:58:09.092710Z node 1 :OPS_COMPACT INFO: Compact{72075186224037888.1.16, eph 2} end=Done, 4 blobs 6r (max 6), put Spent{time=0.012s,wait=0.004s,interrupts=1} Part{ 2 pk, lobs 5 +0, (1653 647 6413)b }, ecr=1.000 2025-06-25T14:58:09.097331Z node 1 :OPS_COMPACT INFO: Compact{72075186224037889.1.16, eph 2} end=Done, 4 blobs 6r (max 6), put Spent{time=0.014s,wait=0.003s,interrupts=1} Part{ 2 pk, lobs 4 +0, (2326 1432 5183)b }, ecr=1.000 Copy TableOld to Table 2025-06-25T14:58:09.188034Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/dc-1/Dir" OperationType: ESchemeOpCreateTable CreateTable { Name: "Table" CopyFromTable: "/dc-1/Dir/TableOld" } } TxId: 281474976715676 TabletId: 72057594046644480 PeerName: "" , at schemeshard: 72057594046644480 2025-06-25T14:58:09.188270Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_copy_table.cpp:343: TCopyTable Propose, path: /dc-1/Dir/Table, opId: 281474976715676:0, at schemeshard: 72057594046644480 2025-06-25T14:58:09.188766Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:347: AttachChild: child attached as only one child to the parent, parent id: [OwnerId: 72057594046644480, LocalPathId: 2], parent name: Dir, child name: Table, child id: [OwnerId: 72057594046644480, LocalPathId: 4], at schemeshard: 72057594046644480 2025-06-25T14:58:09.188831Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 4] was 0 2025-06-25T14:58:09.188842Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction source path for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 3 2025-06-25T14:58:09.188857Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 281474976715676:0 type: TxCopyTable target path: [OwnerId: 72057594046644480, LocalPathId: 4] source path: [OwnerId: 72057594046644480, LocalPathId: 3] 2025-06-25T14:58:09.188893Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason new shard created for pathId [OwnerId: 72057594046644480, LocalPathId: 4] was 1 2025-06-25T14:58:09.188906Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason new shard created for pathId [OwnerId: 72057594046644480, LocalPathId: 4] was 2 2025-06-25T14:58:09.189026Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046644480, LocalPathId: 4] was 3 2025-06-25T14:58:09.189131Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 281474976715676:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-25T14:58:09.191049Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 2 2025-06-25T14:58:09.191081Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046644480, LocalPathId: 4] was 4 2025-06-25T14:58:09.191629Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 281474976715676, response: Status: StatusAccepted TxId: 281474976715676 SchemeshardId: 72057594046644480 PathId: 4, at schemeshard: 72057594046644480 2025-06-25T14:58:09.191758Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976715676, database: /dc-1, subject: , status: StatusAccepted, operation: CREATE TABLE, path: /dc-1/Dir/Table 2025-06-25T14:58:09.191916Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046644480 2025-06-25T14:58:09.191929Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046644480, txId: 281474976715676, path id: [OwnerId: 72057594046644480, LocalPathId: 2] 2025-06-25T14:58:09.192401Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046644480, txId: 281474976715676, path id: [OwnerId: 72057594046644480, LocalPathId: 4] 2025-06-25T14:58:09.192519Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046644480 2025-06-25T14:58:09.192539Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:7519901422803517520:2238], at schemeshard: 72057594046644480, txId: 281474976715676, path id: 2 2025-06-25T14:58:09.192556Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:7519901422803517520:2238], at schemeshard: 72057594046644480, txId: 281474976715676, path id: 4 2025-06-25T14:58:09.192598Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 281474976715676:0, at schemeshard: 72057594046644480 2025-06-25T14:58:09.192631Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 281474976715676:0 ProgressState, operation type: TxCopyTable, at tablet# 72057594046644480 2025-06-25T14:58:09.192926Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:359: TCreateParts opId# 281474976715676:0 CreateRequest Event to Hive: 72057594037968897 msg: Owner: 72057594046644480 OwnerIdx: 3 TabletType: DataShard FollowerCount: 0 ObjectDomain { SchemeShard: 72057594046644480 PathId: 1 } ObjectId: 4 BindedChannels { StoragePoolName: "/dc-1:test" } BindedChannels { StoragePoolName: "/dc-1:test" } BindedChannels { StoragePoolName: "/dc-1:test" } AllowedDomains { SchemeShard: 72057594046644480 PathId: 1 } 2025-06-25T14:58:09.193051Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:359: TCreateParts opId# 281474976715676:0 CreateRequest Event to Hive: 72057594037968897 msg: Owner: 72057594046644480 OwnerIdx: 4 TabletType: DataShard FollowerCount: 0 ObjectDomain { SchemeShard: 72057594046644480 PathId: 1 } ObjectId: 4 BindedChannels { StoragePoolName: "/dc-1:test" } BindedChannels { StoragePoolName: "/dc-1:test" } BindedChannels { StoragePoolName: "/dc-1:test" } AllowedDomains { SchemeShard: 72057594046644480 PathId: 1 } 2025-06-25T14:58:09.195428Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 2 Version: 6 PathOwnerId: 72057594046644480, cookie: 281474976715676 2025-06-25T14:58:09.195498Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 2 Version: 6 PathOwnerId: 72057594046644480, cookie: 281474976715676 2025-06-25T14:58:09.195507Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046644480, txId: 281474976715676 2025-06-25T14:58:09.195521Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046644480, txId: 281474976715676, pathId: [OwnerId: 72057594046644480, LocalPathId: 2], version: 6 waiting... 2025-06-25T14:58:09.195536Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 3 2025-06-2 ... te: Offline, at schemeshard: 72057594046644480 2025-06-25T14:58:12.282826Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5633: Handle TEvStateChanged, at schemeshard: 72057594046644480, message: Source { RawX1: 7519901437627939078 RawX2: 4503608217307347 } TabletId: 72075186224037889 State: 4 2025-06-25T14:58:12.282847Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186224037889, state: Offline, at schemeshard: 72057594046644480 2025-06-25T14:58:12.282951Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5633: Handle TEvStateChanged, at schemeshard: 72057594046644480, message: Source { RawX1: 7519901441922906662 RawX2: 4503608217307405 } TabletId: 72075186224037891 State: 4 2025-06-25T14:58:12.282981Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186224037891, state: Offline, at schemeshard: 72057594046644480 2025-06-25T14:58:12.283071Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5633: Handle TEvStateChanged, at schemeshard: 72057594046644480, message: Source { RawX1: 7519901441922906662 RawX2: 4503608217307405 } TabletId: 72075186224037891 State: 4 2025-06-25T14:58:12.283088Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186224037891, state: Offline, at schemeshard: 72057594046644480 2025-06-25T14:58:12.283241Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:1 hive 72057594037968897 at ss 72057594046644480 2025-06-25T14:58:12.283303Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:3 hive 72057594037968897 at ss 72057594046644480 2025-06-25T14:58:12.283336Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:2 hive 72057594037968897 at ss 72057594046644480 2025-06-25T14:58:12.283366Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:4 hive 72057594037968897 at ss 72057594046644480 2025-06-25T14:58:12.283394Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:4 hive 72057594037968897 at ss 72057594046644480 2025-06-25T14:58:12.284024Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2962: Handle TEvStateChangedResult datashard 72075186224037888 state Offline 2025-06-25T14:58:12.284048Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2962: Handle TEvStateChangedResult datashard 72075186224037890 state Offline 2025-06-25T14:58:12.284073Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2962: Handle TEvStateChangedResult datashard 72075186224037889 state Offline 2025-06-25T14:58:12.284104Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2962: Handle TEvStateChangedResult datashard 72075186224037891 state Offline 2025-06-25T14:58:12.284123Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2962: Handle TEvStateChangedResult datashard 72075186224037891 state Offline 2025-06-25T14:58:12.284866Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 1 ShardOwnerId: 72057594046644480 ShardLocalIdx: 1, at schemeshard: 72057594046644480 2025-06-25T14:58:12.285099Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 2 2025-06-25T14:58:12.285253Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 3 ShardOwnerId: 72057594046644480 ShardLocalIdx: 3, at schemeshard: 72057594046644480 2025-06-25T14:58:12.285395Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 4] was 2 2025-06-25T14:58:12.285530Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046644480 ShardLocalIdx: 2, at schemeshard: 72057594046644480 2025-06-25T14:58:12.285645Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 1 2025-06-25T14:58:12.285755Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 4 ShardOwnerId: 72057594046644480 ShardLocalIdx: 4, at schemeshard: 72057594046644480 2025-06-25T14:58:12.285914Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 4] was 1 2025-06-25T14:58:12.286009Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 4 ShardOwnerId: 72057594046644480 ShardLocalIdx: 4, at schemeshard: 72057594046644480 2025-06-25T14:58:12.286104Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 2 paths in candidate queue, at schemeshard: 72057594046644480 2025-06-25T14:58:12.286131Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046644480, LocalPathId: 4], at schemeshard: 72057594046644480 2025-06-25T14:58:12.286196Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 2 2025-06-25T14:58:12.286232Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046644480, LocalPathId: 3], at schemeshard: 72057594046644480 2025-06-25T14:58:12.286282Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 1 2025-06-25T14:58:12.286288Z node 2 :TX_DATASHARD INFO: datashard.cpp:197: OnTabletStop: 72075186224037889 reason = ReasonStop 2025-06-25T14:58:12.286334Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037889, clientId# [2:7519901437627939185:2395], serverId# [2:7519901437627939186:2396], sessionId# [0:0:0] 2025-06-25T14:58:12.286358Z node 2 :TX_DATASHARD INFO: datashard.cpp:197: OnTabletStop: 72075186224037891 reason = ReasonStop 2025-06-25T14:58:12.286378Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037891, clientId# [2:7519901441922906846:2645], serverId# [2:7519901441922906847:2646], sessionId# [0:0:0] 2025-06-25T14:58:12.286389Z node 2 :TX_DATASHARD INFO: datashard.cpp:197: OnTabletStop: 72075186224037888 reason = ReasonStop 2025-06-25T14:58:12.286405Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037888, clientId# [2:7519901437627939175:2388], serverId# [2:7519901437627939176:2389], sessionId# [0:0:0] 2025-06-25T14:58:12.286415Z node 2 :TX_DATASHARD INFO: datashard.cpp:197: OnTabletStop: 72075186224037890 reason = ReasonStop 2025-06-25T14:58:12.286437Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037890, clientId# [2:7519901441922906844:2643], serverId# [2:7519901441922906845:2644], sessionId# [0:0:0] 2025-06-25T14:58:12.287347Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037889 not found 2025-06-25T14:58:12.287365Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037891 not found 2025-06-25T14:58:12.287375Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037888 not found 2025-06-25T14:58:12.287386Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037890 not found 2025-06-25T14:58:12.287401Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:1 2025-06-25T14:58:12.287415Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:1 tabletId 72075186224037888 2025-06-25T14:58:12.287440Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:3 2025-06-25T14:58:12.287447Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:3 tabletId 72075186224037890 2025-06-25T14:58:12.287460Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:2 2025-06-25T14:58:12.287467Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:2 tabletId 72075186224037889 2025-06-25T14:58:12.287479Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:4 2025-06-25T14:58:12.287496Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:4 tabletId 72075186224037891 2025-06-25T14:58:12.287511Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:4 2025-06-25T14:58:12.287535Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 2 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046644480 2025-06-25T14:58:12.287876Z node 2 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186224037889 2025-06-25T14:58:12.287982Z node 2 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186224037889 2025-06-25T14:58:12.289298Z node 2 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186224037891 2025-06-25T14:58:12.289340Z node 2 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186224037891 2025-06-25T14:58:12.290500Z node 2 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186224037888 2025-06-25T14:58:12.290543Z node 2 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186224037888 2025-06-25T14:58:12.291571Z node 2 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186224037890 2025-06-25T14:58:12.291619Z node 2 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186224037890 2025-06-25T14:58:12.410223Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:58:12.581701Z node 2 :HIVE WARN: hive_impl.cpp:1955: HIVE#72057594037968897 Can't find the tablet from RequestHiveInfo(TabletID=72075186224037888) Check that tablet 72075186224037889 was deleted 2025-06-25T14:58:12.582042Z node 2 :HIVE WARN: hive_impl.cpp:1955: HIVE#72057594037968897 Can't find the tablet from RequestHiveInfo(TabletID=72075186224037889) Check that tablet 72075186224037890 was deleted 2025-06-25T14:58:12.582345Z node 2 :HIVE WARN: hive_impl.cpp:1955: HIVE#72057594037968897 Can't find the tablet from RequestHiveInfo(TabletID=72075186224037890) Check that tablet 72075186224037891 was deleted 2025-06-25T14:58:12.582610Z node 2 :HIVE WARN: hive_impl.cpp:1955: HIVE#72057594037968897 Can't find the tablet from RequestHiveInfo(TabletID=72075186224037891) >> TLocksTest::Range_IncorrectDot1 >> TLocksFatTest::RangeSetRemove [GOOD] >> TLocksFatTest::ShardLocks ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/service/unittest >> KqpQueryService::DdlExecuteScript [GOOD] Test command err: Trying to start YDB, gRPC: 15373, MsgBus: 18464 2025-06-25T14:56:04.772911Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900890810899168:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:56:04.784277Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00177f/r3tmp/tmpn2dt6w/pdisk_1.dat 2025-06-25T14:56:05.295064Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:56:05.295158Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:56:05.297440Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:56:05.340298Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519900890810899140:2080] 1750863364752830 != 1750863364752833 2025-06-25T14:56:05.343125Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 15373, node 1 2025-06-25T14:56:05.448006Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:56:05.448038Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:56:05.448046Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:56:05.448161Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:18464 2025-06-25T14:56:05.752526Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:18464 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:56:06.019511Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:56:06.044501Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:06.183361Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:06.323727Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:06.398143Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:56:07.828149Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900903695802671:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:07.828286Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:08.110622Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:08.147115Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:08.175734Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:08.202812Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:08.249383Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:08.328759Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:08.361413Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:08.442358Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900907990770628:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:08.442424Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:08.442499Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900907990770633:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:56:08.445997Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:56:08.457741Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519900907990770635:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:56:08.556094Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519900907990770686:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:56:09.618730Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:56:09.776563Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519900890810899168:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:56:09.783885Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:56:09.899787Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519900912285738392:3668] ... 7 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:07.030531Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 64906, node 4 2025-06-25T14:58:07.100238Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:58:07.100271Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:58:07.100281Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:58:07.100435Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25143 TClient is connected to server localhost:25143 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:58:07.671219Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:07.690053Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:07.819248Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:07.919294Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:58:07.974570Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:08.119434Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:10.657970Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519901431042120968:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:58:10.658096Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:58:10.735429Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:58:10.789504Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:58:10.828755Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:58:10.865476Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:58:10.907443Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:58:10.946682Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:58:10.990400Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:58:11.131831Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519901435337088918:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:58:11.131957Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:58:11.132221Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519901435337088923:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:58:11.137465Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:58:11.183560Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519901435337088925:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:58:11.247673Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519901435337088976:3415] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:58:11.872419Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519901413862250170:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:11.872504Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:58:12.456283Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:58:12.459290Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:58:12.460916Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:58:12.860115Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715677:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) >> TFlatTest::Ls >> TLocksTest::GoodDupLock >> Secret::DeactivatedQueryService [GOOD] >> TCancelTx::CrossShardReadOnly >> TLocksTest::SetEraseSet [GOOD] >> TFlatTest::ShardUnfreezeNonFrozen >> TLocksTest::GoodSameKeyLock >> TLocksTest::Range_CorrectNullDot >> TLocksTest::BrokenLockUpdate >> TFlatTest::SplitEmptyAndWrite >> TFlatTest::CopyCopiedTableAndDropFirstCopy [GOOD] >> TFlatTest::AutoSplitBySize [GOOD] >> TFlatTest::AutoMergeBySize >> TLocksTest::BrokenSameKeyLock ------- [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest >> Secret::DeactivatedQueryService [GOOD] Test command err: 2025-06-25T14:58:03.828000Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:195:2241], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000e1b/r3tmp/tmp9lS0nE/pdisk_1.dat 2025-06-25T14:58:04.099554Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 9969, node 1 TClient is connected to server localhost:12444 2025-06-25T14:58:04.291314Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:58:04.330418Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:04.334377Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:58:04.334446Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:58:04.334478Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:58:04.334800Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:58:04.335175Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750863481203168 != 1750863481203172 2025-06-25T14:58:04.380864Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:04.380998Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:04.392467Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:58:04.595324Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Initialization finished REQUEST=CREATE OBJECT secret1 (TYPE SECRET) WITH value = `100`;EXPECTATION=0;WAITING=1 2025-06-25T14:58:16.083745Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:644:2536], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:58:16.083873Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:658:2545], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:58:16.083942Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:58:16.091310Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715657:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:58:16.112146Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:662:2548], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715657 completed, doublechecking } 2025-06-25T14:58:16.172057Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:713:2580] txid# 281474976715658, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:58:16.549412Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:723:2589], status: GENERIC_ERROR, issues:
: Error: Execution, code: 1060
:1:50: Error: Executing CREATE OBJECT SECRET
: Error: metadata provider service is disabled 2025-06-25T14:58:16.551982Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=NGJlY2JlYzAtNWU2NDJjZS1hZDQ3N2NhZC1mNGJhMzRhNw==, ActorId: [1:642:2534], ActorState: ExecuteState, TraceId: 01jykshgw8bxsycrwwp4b2ayc0, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: REQUEST=CREATE OBJECT secret1 (TYPE SECRET) WITH value = `100`;RESULT=
: Error: Execution, code: 1060
:1:50: Error: Executing CREATE OBJECT SECRET
: Error: metadata provider service is disabled ;EXPECTATION=0 FINISHED_REQUEST=CREATE OBJECT secret1 (TYPE SECRET) WITH value = `100`;EXPECTATION=0;WAITING=1 E0625 14:58:17.213301435 635160 backup_poller.cc:113] run_poller: UNKNOWN:Timer list shutdown {created_time:"2025-06-25T14:58:17.213086762+00:00"} >> TFlatTest::SelectRangeReverse >> TFlatTest::MiniKQLRanges >> Secret::Deactivated [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TLocksTest::SetEraseSet [GOOD] Test command err: 2025-06-25T14:58:07.779807Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901418700244393:2081];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:07.818722Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00208c/r3tmp/tmpUrBh5Y/pdisk_1.dat 2025-06-25T14:58:08.206909Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:08.221129Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:08.221235Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:08.251933Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:25773 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:08.598249Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:08.618910Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:58:08.642277Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:08.820447Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:58:08.836259Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:08.917056Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:08.959836Z node 1 :TX_DATASHARD ERROR: datashard_pipeline.cpp:1570: Shard 72075186224037888 cannot parse tx 281474976715662: Validate (783): Key validation status: 3 2025-06-25T14:58:08.960129Z node 1 :TX_PROXY ERROR: datareq.cpp:1873: Actor# [1:7519901422995212505:2499] txid# 281474976715662 HANDLE Prepare TEvProposeTransactionResult TDataReq TabletStatus# StatusWait GetStatus# ERROR shard id 72075186224037888 read size 0 out readset size 0 marker# P6 2025-06-25T14:58:08.960204Z node 1 :TX_PROXY ERROR: datareq.cpp:2071: Actor# [1:7519901422995212505:2499] txid# 281474976715662 HANDLE PrepareErrors TEvProposeTransactionResult TDataReq TabletStatus# StatusWait shard id 72075186224037888 2025-06-25T14:58:08.960239Z node 1 :TX_PROXY ERROR: datareq.cpp:1274: Actor# [1:7519901422995212505:2499] txid# 281474976715662 invalidateDistCache: 1 DIE TDataReq MarkShardError TabletsLeft# 1 2025-06-25T14:58:08.967188Z node 1 :TX_DATASHARD ERROR: datashard_pipeline.cpp:1570: Shard 72075186224037888 cannot parse tx 281474976715663: Validate (783): Key validation status: 3 DataShardErrors: [SCHEME_ERROR] Validate (783): Key validation status: 3 proxy error code: ProxyShardNotAvailable 2025-06-25T14:58:08.967448Z node 1 :TX_PROXY ERROR: datareq.cpp:1873: Actor# [1:7519901422995212527:2506] txid# 281474976715663 HANDLE Prepare TEvProposeTransactionResult TDataReq TabletStatus# StatusWait GetStatus# ERROR shard id 72075186224037888 read size 0 out readset size 0 marker# P6 2025-06-25T14:58:08.967501Z node 1 :TX_PROXY ERROR: datareq.cpp:2071: Actor# [1:7519901422995212527:2506] txid# 281474976715663 HANDLE PrepareErrors TEvProposeTransactionResult TDataReq TabletStatus# StatusWait shard id 72075186224037888 2025-06-25T14:58:08.967521Z node 1 :TX_PROXY ERROR: datareq.cpp:1274: Actor# [1:7519901422995212527:2506] txid# 281474976715663 invalidateDistCache: 1 DIE TDataReq MarkShardError TabletsLeft# 1 2025-06-25T14:58:08.970592Z node 1 :TX_DATASHARD ERROR: datashard_pipeline.cpp:1570: Shard 72075186224037888 cannot parse tx 281474976715664: Validate (783): Key validation status: 3 2025-06-25T14:58:08.970823Z node 1 :TX_PROXY ERROR: datareq.cpp:1873: Actor# [1:7519901422995212534:2510] txid# 281474976715664 HANDLE Prepare TEvProposeTransactionResult TDataReq TabletStatus# StatusWait GetStatus# ERROR shard id 72075186224037888 read size 0 out readset size 0 marker# P6 2025-06-25T14:58:08.970879Z node 1 :TX_PROXY ERROR: datareq.cpp:2071: Actor# [1:7519901422995212534:2510] txid# 281474976715664 HANDLE PrepareErrors TEvProposeTransactionResult TDataReq TabletStatus# StatusWait shard id 72075186224037888 2025-06-25T14:58:08.970922Z node 1 :TX_PROXY ERROR: datareq.cpp:1274: Actor# [1:7519901422995212534:2510] txid# 281474976715664 invalidateDistCache: 1 DIE TDataReq MarkShardError TabletsLeft# 1 2025-06-25T14:58:08.973354Z node 1 :TX_DATASHARD ERROR: datashard_pipeline.cpp:1570: Shard 72075186224037888 cannot parse tx 281474976715665: Validate (783): Key validation status: 3 DataShardErrors: [SCHEME_ERROR] Validate (783): Key validation status: 3 proxy error code: ProxyShardNotAvailable 2025-06-25T14:58:08.973585Z node 1 :TX_PROXY ERROR: datareq.cpp:1873: Actor# [1:7519901422995212540:2513] txid# 281474976715665 HANDLE Prepare TEvProposeTransactionResult TDataReq TabletStatus# StatusWait GetStatus# ERROR shard id 72075186224037888 read size 0 out readset size 0 marker# P6 2025-06-25T14:58:08.973627Z node 1 :TX_PROXY ERROR: datareq.cpp:2071: Actor# [1:7519901422995212540:2513] txid# 281474976715665 HANDLE PrepareErrors TEvProposeTransactionResult TDataReq TabletStatus# StatusWait shard id 72075186224037888 2025-06-25T14:58:08.973651Z node 1 :TX_PROXY ERROR: datareq.cpp:1274: Actor# [1:7519901422995212540:2513] txid# 281474976715665 invalidateDistCache: 1 DIE TDataReq MarkShardError TabletsLeft# 1 2025-06-25T14:58:11.050165Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519901434026638542:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:11.050316Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00208c/r3tmp/tmpnG0VAv/pdisk_1.dat 2025-06-25T14:58:11.228644Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:11.230180Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:11.230240Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:11.231278Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519901434026638515:2080] 1750863491048131 != 1750863491048134 2025-06-25T14:58:11.243314Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:23743 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. waiting... 2025-06-25T14:58:11.508350Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:58:11.516528Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:58:11.555561Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:11.657534Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:11.730911Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:14.407093Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519901450171390731:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:14.407149Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00208c/r3tmp/tmpNJsPcD/pdisk_1.dat 2025-06-25T14:58:14.534162Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:14.564047Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:14.564141Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:14.586763Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:26334 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. waiting... 2025-06-25T14:58:14.749243Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:58:14.753625Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:58:14.773693Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:58:14.832297Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:58:14.883469Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... >> TFlatTest::CopyTableAndReturnPartAfterCompaction [GOOD] >> TFlatTest::CopyTableDropOriginalAndReturnPartAfterCompaction >> TLocksTest::Range_BrokenLock2 >> KqpVectorIndexes::OrderByCosineLevel1-Nullable+UseSimilarity [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest >> Secret::Deactivated [GOOD] Test command err: 2025-06-25T14:58:05.234946Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:195:2241], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d2d/r3tmp/tmp5GSGjE/pdisk_1.dat 2025-06-25T14:58:05.532720Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 4588, node 1 TClient is connected to server localhost:10491 2025-06-25T14:58:05.720796Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:58:05.751753Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:05.755093Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:58:05.755153Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:58:05.755176Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:58:05.755492Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:58:05.755843Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750863482931695 != 1750863482931699 2025-06-25T14:58:05.801505Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:05.801652Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:05.813092Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:58:06.016282Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Initialization finished REQUEST=CREATE OBJECT secret1 (TYPE SECRET) WITH value = `100`;EXPECTATION=0;WAITING=1 2025-06-25T14:58:17.790929Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:646:2536], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:58:17.791075Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } REQUEST=CREATE OBJECT secret1 (TYPE SECRET) WITH value = `100`;RESULT=
: Error: Execution, code: 1060
:1:50: Error: Executing CREATE OBJECT SECRET
: Error: metadata provider service is disabled ;EXPECTATION=0 FINISHED_REQUEST=CREATE OBJECT secret1 (TYPE SECRET) WITH value = `100`;EXPECTATION=0;WAITING=1 >> TFlatTest::ReadOnlyMode [GOOD] >> TFlatTest::RejectByIncomingReadSetSize ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TFlatTest::CopyCopiedTableAndDropFirstCopy [GOOD] Test command err: 2025-06-25T14:58:07.796938Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901420241599622:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:07.812591Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/002088/r3tmp/tmp6AJRYE/pdisk_1.dat 2025-06-25T14:58:08.348033Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:08.362487Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519901420241599588:2080] 1750863487767918 != 1750863487767921 2025-06-25T14:58:08.386050Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:08.386137Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:08.388199Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:19743 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:08.813113Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:58:08.822698Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... waiting... 2025-06-25T14:58:08.856880Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:09.147644Z node 1 :OPS_COMPACT INFO: Compact{72075186224037888.1.11, eph 1} end=Done, 4 blobs 3r (max 3), put Spent{time=0.008s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 2 +0, (1265 647 2154)b }, ecr=1.000 2025-06-25T14:58:09.153930Z node 1 :OPS_COMPACT INFO: Compact{72075186224037889.1.11, eph 1} end=Done, 4 blobs 3r (max 3), put Spent{time=0.003s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 2 +0, (1139 521 2626)b }, ecr=1.000 2025-06-25T14:58:09.185717Z node 1 :OPS_COMPACT INFO: Compact{72075186224037888.1.16, eph 2} end=Done, 4 blobs 6r (max 6), put Spent{time=0.004s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 5 +0, (1573 647 6413)b }, ecr=1.000 2025-06-25T14:58:09.194606Z node 1 :OPS_COMPACT INFO: Compact{72075186224037889.1.16, eph 2} end=Done, 4 blobs 6r (max 6), put Spent{time=0.004s,wait=0.002s,interrupts=1} Part{ 2 pk, lobs 4 +0, (2326 1432 5183)b }, ecr=1.000 Copy TableOld to Table 2025-06-25T14:58:09.315532Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/dc-1/Dir" OperationType: ESchemeOpCreateTable CreateTable { Name: "Table" CopyFromTable: "/dc-1/Dir/TableOld" } } TxId: 281474976710676 TabletId: 72057594046644480 PeerName: "" , at schemeshard: 72057594046644480 2025-06-25T14:58:09.315821Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_copy_table.cpp:343: TCopyTable Propose, path: /dc-1/Dir/Table, opId: 281474976710676:0, at schemeshard: 72057594046644480 2025-06-25T14:58:09.316509Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:347: AttachChild: child attached as only one child to the parent, parent id: [OwnerId: 72057594046644480, LocalPathId: 2], parent name: Dir, child name: Table, child id: [OwnerId: 72057594046644480, LocalPathId: 4], at schemeshard: 72057594046644480 2025-06-25T14:58:09.316555Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 4] was 0 2025-06-25T14:58:09.316571Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction source path for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 3 2025-06-25T14:58:09.316601Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 281474976710676:0 type: TxCopyTable target path: [OwnerId: 72057594046644480, LocalPathId: 4] source path: [OwnerId: 72057594046644480, LocalPathId: 3] 2025-06-25T14:58:09.316633Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason new shard created for pathId [OwnerId: 72057594046644480, LocalPathId: 4] was 1 2025-06-25T14:58:09.316645Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason new shard created for pathId [OwnerId: 72057594046644480, LocalPathId: 4] was 2 2025-06-25T14:58:09.316761Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046644480, LocalPathId: 4] was 3 2025-06-25T14:58:09.316875Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 281474976710676:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-25T14:58:09.317495Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 2 2025-06-25T14:58:09.317520Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046644480, LocalPathId: 4] was 4 2025-06-25T14:58:09.317950Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 281474976710676, response: Status: StatusAccepted TxId: 281474976710676 SchemeshardId: 72057594046644480 PathId: 4, at schemeshard: 72057594046644480 2025-06-25T14:58:09.318085Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976710676, database: /dc-1, subject: , status: StatusAccepted, operation: CREATE TABLE, path: /dc-1/Dir/Table 2025-06-25T14:58:09.318225Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046644480 2025-06-25T14:58:09.318239Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046644480, txId: 281474976710676, path id: [OwnerId: 72057594046644480, LocalPathId: 2] 2025-06-25T14:58:09.318362Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046644480, txId: 281474976710676, path id: [OwnerId: 72057594046644480, LocalPathId: 4] 2025-06-25T14:58:09.318424Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046644480 2025-06-25T14:58:09.318441Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:7519901424536567383:2244], at schemeshard: 72057594046644480, txId: 281474976710676, path id: 2 2025-06-25T14:58:09.318467Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:7519901424536567383:2244], at schemeshard: 72057594046644480, txId: 281474976710676, path id: 4 2025-06-25T14:58:09.318495Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 281474976710676:0, at schemeshard: 72057594046644480 2025-06-25T14:58:09.318524Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 281474976710676:0 ProgressState, operation type: TxCopyTable, at tablet# 72057594046644480 2025-06-25T14:58:09.318790Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:359: TCreateParts opId# 281474976710676:0 CreateRequest Event to Hive: 72057594037968897 msg: Owner: 72057594046644480 OwnerIdx: 3 TabletType: DataShard FollowerCount: 0 ObjectDomain { SchemeShard: 72057594046644480 PathId: 1 } ObjectId: 4 BindedChannels { StoragePoolName: "/dc-1:test" } BindedChannels { StoragePoolName: "/dc-1:test" } BindedChannels { StoragePoolName: "/dc-1:test" } AllowedDomains { SchemeShard: 72057594046644480 PathId: 1 } 2025-06-25T14:58:09.318892Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:359: TCreateParts opId# 281474976710676:0 CreateRequest Event to Hive: 72057594037968897 msg: Owner: 72057594046644480 OwnerIdx: 4 TabletType: DataShard FollowerCount: 0 ObjectDomain { SchemeShard: 72057594046644480 PathId: 1 } ObjectId: 4 BindedChannels { StoragePoolName: "/dc-1:test" } BindedChannels { StoragePoolName: "/dc-1:test" } BindedChannels { StoragePoolName: "/dc-1:test" } AllowedDomains { SchemeShard: 72057594046644480 PathId: 1 } waiting... 2025-06-25T14:58:09.335805Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 2 Version: 6 PathOwnerId: 72057594046644480, cookie: 281474976710676 2025-06-25T14:58:09.336141Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 2 Version: 6 PathOwnerId: 72057594046644480, cookie: 281474976710676 2025-06-25T14:58:09.336178Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046644480, txId: 281474976710676 2025-06-25T14:58:09.336256Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046644480, txId: 281474976710676, pathId: [OwnerId: 72057594046644480, LocalPathId: 2], version: 6 2025-06-25T14:58:09.336685Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 3 2025-06-25T14:58:09.337202Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generatio ... hive 72057594037968897 at ss 72057594046644480 2025-06-25T14:58:15.595111Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:2962: Handle TEvStateChangedResult datashard 72075186224037893 state Offline 2025-06-25T14:58:15.595114Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:6 hive 72057594037968897 at ss 72057594046644480 2025-06-25T14:58:15.595148Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:2962: Handle TEvStateChangedResult datashard 72075186224037890 state Offline 2025-06-25T14:58:15.595153Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:3 hive 72057594037968897 at ss 72057594046644480 2025-06-25T14:58:15.596246Z node 3 :TX_DATASHARD INFO: datashard.cpp:197: OnTabletStop: 72075186224037890 reason = ReasonStop 2025-06-25T14:58:15.596294Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046644480 ShardLocalIdx: 2, at schemeshard: 72057594046644480 2025-06-25T14:58:15.596373Z node 3 :TX_DATASHARD INFO: datashard.cpp:197: OnTabletStop: 72075186224037889 reason = ReasonStop 2025-06-25T14:58:15.596580Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 2 2025-06-25T14:58:15.596722Z node 3 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186224037890 2025-06-25T14:58:15.596799Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 5 ShardOwnerId: 72057594046644480 ShardLocalIdx: 5, at schemeshard: 72057594046644480 2025-06-25T14:58:15.596859Z node 3 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186224037890 2025-06-25T14:58:15.596963Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 5] was 2 2025-06-25T14:58:15.597060Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 1 ShardOwnerId: 72057594046644480 ShardLocalIdx: 1, at schemeshard: 72057594046644480 2025-06-25T14:58:15.597173Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 1 2025-06-25T14:58:15.597291Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 4 ShardOwnerId: 72057594046644480 ShardLocalIdx: 4, at schemeshard: 72057594046644480 2025-06-25T14:58:15.597401Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 4] was 2 2025-06-25T14:58:15.597509Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 6 ShardOwnerId: 72057594046644480 ShardLocalIdx: 6, at schemeshard: 72057594046644480 2025-06-25T14:58:15.597650Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 5] was 1 2025-06-25T14:58:15.597750Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 3 ShardOwnerId: 72057594046644480 ShardLocalIdx: 3, at schemeshard: 72057594046644480 2025-06-25T14:58:15.597873Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 4] was 1 2025-06-25T14:58:15.597991Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 3 paths in candidate queue, at schemeshard: 72057594046644480 2025-06-25T14:58:15.598018Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046644480, LocalPathId: 5], at schemeshard: 72057594046644480 2025-06-25T14:58:15.598067Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 3 2025-06-25T14:58:15.598101Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046644480, LocalPathId: 4], at schemeshard: 72057594046644480 2025-06-25T14:58:15.598119Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 2 2025-06-25T14:58:15.598171Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046644480, LocalPathId: 3], at schemeshard: 72057594046644480 2025-06-25T14:58:15.598191Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 1 2025-06-25T14:58:15.598370Z node 3 :TX_DATASHARD INFO: datashard.cpp:197: OnTabletStop: 72075186224037892 reason = ReasonStop 2025-06-25T14:58:15.598402Z node 3 :TX_DATASHARD INFO: datashard.cpp:197: OnTabletStop: 72075186224037888 reason = ReasonStop 2025-06-25T14:58:15.598417Z node 3 :TX_DATASHARD INFO: datashard.cpp:197: OnTabletStop: 72075186224037891 reason = ReasonStop 2025-06-25T14:58:15.598426Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037890 not found 2025-06-25T14:58:15.598431Z node 3 :TX_DATASHARD INFO: datashard.cpp:197: OnTabletStop: 72075186224037893 reason = ReasonStop 2025-06-25T14:58:15.598444Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037889 not found 2025-06-25T14:58:15.598619Z node 3 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186224037889 2025-06-25T14:58:15.598682Z node 3 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186224037889 2025-06-25T14:58:15.599804Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3635: Client pipe to tablet 72075186224037890 from 72075186224037888 is reset 2025-06-25T14:58:15.599836Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037888, clientId# [3:7519901451638618253:2388], serverId# [3:7519901451638618254:2389], sessionId# [0:0:0] 2025-06-25T14:58:15.599916Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:2 2025-06-25T14:58:15.599935Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:2 tabletId 72075186224037889 2025-06-25T14:58:15.599974Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:5 2025-06-25T14:58:15.599981Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:5 tabletId 72075186224037892 2025-06-25T14:58:15.600001Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:1 2025-06-25T14:58:15.600009Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:1 tabletId 72075186224037888 2025-06-25T14:58:15.600018Z node 3 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186224037888 2025-06-25T14:58:15.600026Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:4 2025-06-25T14:58:15.600034Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:4 tabletId 72075186224037891 2025-06-25T14:58:15.600052Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:6 2025-06-25T14:58:15.600059Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:6 tabletId 72075186224037893 2025-06-25T14:58:15.600061Z node 3 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186224037888 2025-06-25T14:58:15.600161Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037893 not found 2025-06-25T14:58:15.600174Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037892 not found 2025-06-25T14:58:15.600186Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037888 not found 2025-06-25T14:58:15.600199Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037891 not found 2025-06-25T14:58:15.600387Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:3 2025-06-25T14:58:15.600402Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:3 tabletId 72075186224037890 2025-06-25T14:58:15.600444Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 3 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046644480 2025-06-25T14:58:15.601736Z node 3 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186224037893 2025-06-25T14:58:15.601801Z node 3 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186224037893 2025-06-25T14:58:15.603090Z node 3 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186224037892 2025-06-25T14:58:15.603138Z node 3 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186224037892 2025-06-25T14:58:15.604386Z node 3 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186224037891 2025-06-25T14:58:15.604433Z node 3 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186224037891 2025-06-25T14:58:15.715272Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Check that tablet 72075186224037893 was deleted Check that tablet 72075186224037888 was deleted Check that tablet 72075186224037889 was deleted 2025-06-25T14:58:15.891121Z node 3 :HIVE WARN: hive_impl.cpp:1955: HIVE#72057594037968897 Can't find the tablet from RequestHiveInfo(TabletID=72075186224037892) 2025-06-25T14:58:15.891690Z node 3 :HIVE WARN: hive_impl.cpp:1955: HIVE#72057594037968897 Can't find the tablet from RequestHiveInfo(TabletID=72075186224037893) 2025-06-25T14:58:15.892020Z node 3 :HIVE WARN: hive_impl.cpp:1955: HIVE#72057594037968897 Can't find the tablet from RequestHiveInfo(TabletID=72075186224037888) 2025-06-25T14:58:15.892360Z node 3 :HIVE WARN: hive_impl.cpp:1955: HIVE#72057594037968897 Can't find the tablet from RequestHiveInfo(TabletID=72075186224037889) Check that tablet 72075186224037890 was deleted 2025-06-25T14:58:15.892665Z node 3 :HIVE WARN: hive_impl.cpp:1955: HIVE#72057594037968897 Can't find the tablet from RequestHiveInfo(TabletID=72075186224037890) Check that tablet 72075186224037891 was deleted 2025-06-25T14:58:15.893182Z node 3 :HIVE WARN: hive_impl.cpp:1955: HIVE#72057594037968897 Can't find the tablet from RequestHiveInfo(TabletID=72075186224037891) >> KqpPrefixedVectorIndexes::CosineDistanceWithPkPrefix-Nullable-Covered [GOOD] >> KqpPrefixedVectorIndexes::CosineDistanceWithPkPrefix-Nullable+Covered >> TFlatTest::Ls [GOOD] >> TFlatTest::LsPathId >> TCancelTx::CrossShardReadOnly [GOOD] >> TCancelTx::CrossShardReadOnlyWithReadSets >> TFlatTest::ShardUnfreezeNonFrozen [GOOD] >> TFlatTest::ShardFreezeUnfreezeRejectScheme >> TLocksTest::Range_IncorrectNullDot1 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpVectorIndexes::OrderByCosineLevel1-Nullable+UseSimilarity [GOOD] Test command err: Trying to start YDB, gRPC: 26049, MsgBus: 17569 2025-06-25T14:57:14.257347Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901190718344548:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:57:14.273657Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001b5f/r3tmp/tmpphWWkv/pdisk_1.dat 2025-06-25T14:57:14.756287Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:57:14.756395Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:57:14.762807Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:57:14.773074Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:57:14.773962Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519901190718344511:2080] 1750863434218561 != 1750863434218564 TServer::EnableGrpc on GrpcPort 26049, node 1 2025-06-25T14:57:14.944428Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:57:14.944444Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:57:14.944450Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:57:14.944560Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17569 2025-06-25T14:57:15.276437Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:17569 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:57:15.619430Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:57:15.652716Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:57:15.662511Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:15.836505Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:16.023907Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:16.105419Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:17.757696Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901203603248048:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:17.757778Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:17.978461Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:18.014140Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:18.048997Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:18.086879Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:18.123687Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:18.194067Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:18.236272Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:18.310049Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901207898216002:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:18.310094Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:18.310176Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901207898216007:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:18.314876Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:57:18.330681Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519901207898216009:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:57:18.387582Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519901207898216060:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:57:19.264406Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519901190718344548:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:57:19.264492Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:57:19.363776Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877761, Sender [1:7519901212193183631:3598], Recipient [1:7519901190718344841:2147]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:57:19.363814Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:50 ... _table_stats.cpp:267: PersistSingleStats for pathId 5 shard idx 72057594046644480:22 data size 0 row count 0 2025-06-25T14:58:17.460675Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037909 maps to shardIdx: 72057594046644480:22 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 5], pathId map=BatchUpload, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-25T14:58:17.460684Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037909, followerId 0 2025-06-25T14:58:17.460712Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:22 with partCount# 0, rowCount# 0, searchHeight# 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-25T14:58:17.460722Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037909 2025-06-25T14:58:17.460740Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 5 shard idx 72057594046644480:17 data size 0 row count 0 2025-06-25T14:58:17.460766Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037904 maps to shardIdx: 72057594046644480:17 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 5], pathId map=BatchUpload, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-25T14:58:17.460775Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037904, followerId 0 2025-06-25T14:58:17.460800Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:17 with partCount# 0, rowCount# 0, searchHeight# 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-25T14:58:17.460809Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037904 2025-06-25T14:58:17.460823Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 5 shard idx 72057594046644480:18 data size 0 row count 0 2025-06-25T14:58:17.460847Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037905 maps to shardIdx: 72057594046644480:18 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 5], pathId map=BatchUpload, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-25T14:58:17.460855Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037905, followerId 0 2025-06-25T14:58:17.460879Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:18 with partCount# 0, rowCount# 0, searchHeight# 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-25T14:58:17.460902Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037905 2025-06-25T14:58:17.460919Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 5 shard idx 72057594046644480:23 data size 0 row count 0 2025-06-25T14:58:17.460944Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037910 maps to shardIdx: 72057594046644480:23 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 5], pathId map=BatchUpload, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-25T14:58:17.460954Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037910, followerId 0 2025-06-25T14:58:17.460979Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:23 with partCount# 0, rowCount# 0, searchHeight# 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-25T14:58:17.460987Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037910 2025-06-25T14:58:17.461002Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 5 shard idx 72057594046644480:20 data size 0 row count 0 2025-06-25T14:58:17.461030Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037907 maps to shardIdx: 72057594046644480:20 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 5], pathId map=BatchUpload, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-25T14:58:17.461039Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037907, followerId 0 2025-06-25T14:58:17.461061Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:20 with partCount# 0, rowCount# 0, searchHeight# 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-25T14:58:17.461071Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037907 2025-06-25T14:58:17.461088Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 5 shard idx 72057594046644480:14 data size 0 row count 0 2025-06-25T14:58:17.461114Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037901 maps to shardIdx: 72057594046644480:14 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 5], pathId map=BatchUpload, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-25T14:58:17.461121Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037901, followerId 0 2025-06-25T14:58:17.461142Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:14 with partCount# 0, rowCount# 0, searchHeight# 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-25T14:58:17.461151Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037901 2025-06-25T14:58:17.461166Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 5 shard idx 72057594046644480:21 data size 0 row count 0 2025-06-25T14:58:17.461188Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037908 maps to shardIdx: 72057594046644480:21 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 5], pathId map=BatchUpload, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-25T14:58:17.461196Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037908, followerId 0 2025-06-25T14:58:17.461216Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:21 with partCount# 0, rowCount# 0, searchHeight# 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-25T14:58:17.461225Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037908 2025-06-25T14:58:17.461242Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 5 shard idx 72057594046644480:15 data size 0 row count 0 2025-06-25T14:58:17.461267Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037902 maps to shardIdx: 72057594046644480:15 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 5], pathId map=BatchUpload, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-25T14:58:17.461275Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037902, followerId 0 2025-06-25T14:58:17.461298Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:15 with partCount# 0, rowCount# 0, searchHeight# 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-25T14:58:17.461308Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037902 2025-06-25T14:58:17.461321Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 5 shard idx 72057594046644480:19 data size 0 row count 0 2025-06-25T14:58:17.461346Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037906 maps to shardIdx: 72057594046644480:19 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 5], pathId map=BatchUpload, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-25T14:58:17.461354Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037906, followerId 0 2025-06-25T14:58:17.461377Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:19 with partCount# 0, rowCount# 0, searchHeight# 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-25T14:58:17.461385Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037906 2025-06-25T14:58:17.461442Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:58:17.461572Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [2:7519901328231964304:2148]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-06-25T14:58:17.461587Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5131: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-06-25T14:58:17.461598Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046644480, queue size# 0 2025-06-25T14:58:18.410002Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [2:7519901328231964304:2148]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:58:18.410062Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:58:18.410114Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [2:7519901328231964304:2148], Recipient [2:7519901328231964304:2148]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:58:18.410133Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime >> TFlatTest::ShardFreezeRejectBadProtobuf >> TFlatTest::SplitEmptyAndWrite [GOOD] >> TFlatTest::SplitBoundaryRead >> TFlatTest::MiniKQLRanges [GOOD] >> TFlatTest::MergeEmptyAndWrite >> TFlatTest::SelectRangeReverse [GOOD] >> TFlatTest::SelectRangeReverseExcludeKeys >> TObjectStorageListingTest::Listing >> KqpKv::BulkUpsert [GOOD] >> KqpKv::ReadRows_ExternalBlobs+UseExtBlobsPrecharge >> TLocksTest::GoodLock >> TFlatTest::LargeDatashardReply [GOOD] >> TFlatTest::CopyTableDropOriginalAndReturnPartAfterCompaction [GOOD] >> KqpVectorIndexes::OrderByCosineLevel2-Nullable+UseSimilarity [GOOD] >> GenericFederatedQuery::ClickHouseFilterPushdown [GOOD] >> TFlatTest::SplitEmptyToMany >> TLocksFatTest::PointSetNotBreak [GOOD] >> TLocksFatTest::PointSetRemove >> TFlatTest::LsPathId [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TFlatTest::LargeDatashardReply [GOOD] Test command err: 2025-06-25T14:58:07.796534Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901420270653572:2136];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:07.801451Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/002086/r3tmp/tmp8d8kxD/pdisk_1.dat 2025-06-25T14:58:08.251633Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:08.290910Z node 1 :HIVE DEBUG: hive_impl.cpp:141: HIVE#72057594037968897 Handle TEvLocal::TEvRegisterNode from [1:7519901420270653683:2102] HiveId: 72057594037968897 ServicedDomains { SchemeShard: 72057594046644480 PathId: 1 } TabletAvailability { Type: Mediator Priority: 0 } TabletAvailability { Type: Dummy Priority: 0 } TabletAvailability { Type: KeyValue Priority: 0 } TabletAvailability { Type: Coordinator Priority: 0 } TabletAvailability { Type: Hive Priority: 0 } TabletAvailability { Type: SchemeShard Priority: 0 } TabletAvailability { Type: DataShard Priority: 0 } TabletAvailability { Type: PersQueue Priority: 0 } TabletAvailability { Type: PersQueueReadBalancer Priority: 0 } TabletAvailability { Type: Kesus Priority: 0 } TabletAvailability { Type: SysViewProcessor Priority: 0 } TabletAvailability { Type: ColumnShard Priority: 0 } TabletAvailability { Type: SequenceShard Priority: 0 } TabletAvailability { Type: ReplicationController Priority: 0 } TabletAvailability { Type: StatisticsAggregator Priority: 0 } 2025-06-25T14:58:08.291116Z node 1 :HIVE DEBUG: tx__register_node.cpp:21: HIVE#72057594037968897 THive::TTxRegisterNode(1)::Execute 2025-06-25T14:58:08.292447Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:08.292495Z node 1 :HIVE DEBUG: hive_impl.cpp:361: HIVE#72057594037968897 ProcessWaitQueue (0) 2025-06-25T14:58:08.292516Z node 1 :HIVE DEBUG: hive_impl.cpp:342: HIVE#72057594037968897 ProcessBootQueue (0) 2025-06-25T14:58:08.292534Z node 1 :HIVE DEBUG: hive_impl.cpp:361: HIVE#72057594037968897 ProcessWaitQueue (0) 2025-06-25T14:58:08.292550Z node 1 :HIVE DEBUG: hive_impl.cpp:342: HIVE#72057594037968897 ProcessBootQueue (0) 2025-06-25T14:58:08.292642Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:08.292808Z node 1 :HIVE DEBUG: tx__process_boot_queue.cpp:18: HIVE#72057594037968897 THive::TTxProcessBootQueue()::Execute 2025-06-25T14:58:08.292848Z node 1 :HIVE DEBUG: hive_impl.cpp:222: HIVE#72057594037968897 Handle ProcessBootQueue (size: 0) 2025-06-25T14:58:08.292886Z node 1 :HIVE DEBUG: hive_impl.cpp:225: HIVE#72057594037968897 Handle ProcessWaitQueue (size: 0) 2025-06-25T14:58:08.292904Z node 1 :HIVE DEBUG: hive_impl.cpp:302: HIVE#72057594037968897 ProcessBootQueue - BootQueue empty (WaitQueue: 0) 2025-06-25T14:58:08.293111Z node 1 :HIVE DEBUG: hive_impl.cpp:808: HIVE#72057594037968897 TEvInterconnect::TEvNodeInfo NodeId 1 Location DataCenter: "1" Module: "1" Rack: "1" Unit: "1" 2025-06-25T14:58:08.295161Z node 1 :HIVE DEBUG: tx__register_node.cpp:95: HIVE#72057594037968897 THive::TTxRegisterNode(1)::Complete 2025-06-25T14:58:08.295209Z node 1 :HIVE DEBUG: node_info.cpp:373: HIVE#72057594037968897 Node(1) Ping([1:7519901420270653683:2102]) 2025-06-25T14:58:08.295252Z node 1 :HIVE DEBUG: tx__process_boot_queue.cpp:26: HIVE#72057594037968897 THive::TTxProcessBootQueue()::Complete 2025-06-25T14:58:08.295619Z node 1 :HIVE DEBUG: hive_impl.cpp:737: HIVE#72057594037968897 THive::Handle::TEvSyncTablets 2025-06-25T14:58:08.295650Z node 1 :HIVE DEBUG: tx__sync_tablets.cpp:41: HIVE#72057594037968897 THive::TTxSyncTablets([1:7519901420270653683:2102])::Execute 2025-06-25T14:58:08.295671Z node 1 :HIVE DEBUG: hive_impl.cpp:342: HIVE#72057594037968897 ProcessBootQueue (0) 2025-06-25T14:58:08.295703Z node 1 :HIVE DEBUG: tx__sync_tablets.cpp:130: HIVE#72057594037968897 THive::TTxSyncTablets([1:7519901420270653683:2102])::Complete 2025-06-25T14:58:08.295788Z node 1 :HIVE DEBUG: hive_impl.cpp:731: HIVE#72057594037968897 Handle TEvLocal::TEvStatus for Node 1: Status: 0 StartTime: 1750863487805493 ResourceMaximum { Memory: 270443331584 } 2025-06-25T14:58:08.295821Z node 1 :HIVE DEBUG: tx__status.cpp:22: HIVE#72057594037968897 THive::TTxStatus(1)::Execute 2025-06-25T14:58:08.295845Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:58:08.295936Z node 1 :HIVE DEBUG: hive_impl.cpp:2791: HIVE#72057594037968897 AddRegisteredDataCentersNode(1, 1) 2025-06-25T14:58:08.295972Z node 1 :HIVE DEBUG: hive_impl.cpp:361: HIVE#72057594037968897 ProcessWaitQueue (0) 2025-06-25T14:58:08.295983Z node 1 :HIVE DEBUG: hive_impl.cpp:342: HIVE#72057594037968897 ProcessBootQueue (0) 2025-06-25T14:58:08.296079Z node 1 :HIVE DEBUG: tx__process_boot_queue.cpp:18: HIVE#72057594037968897 THive::TTxProcessBootQueue()::Execute 2025-06-25T14:58:08.296091Z node 1 :HIVE DEBUG: hive_impl.cpp:222: HIVE#72057594037968897 Handle ProcessBootQueue (size: 0) 2025-06-25T14:58:08.296099Z node 1 :HIVE DEBUG: hive_impl.cpp:225: HIVE#72057594037968897 Handle ProcessWaitQueue (size: 0) 2025-06-25T14:58:08.296109Z node 1 :HIVE DEBUG: hive_impl.cpp:302: HIVE#72057594037968897 ProcessBootQueue - BootQueue empty (WaitQueue: 0) 2025-06-25T14:58:08.296373Z node 1 :HIVE DEBUG: tx__status.cpp:65: HIVE#72057594037968897 THive::TTxStatus(1)::Complete 2025-06-25T14:58:08.296386Z node 1 :HIVE DEBUG: tx__process_boot_queue.cpp:26: HIVE#72057594037968897 THive::TTxProcessBootQueue()::Complete TClient is connected to server localhost:5790 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-25T14:58:08.497033Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7519901420270653702:2105] Handle TEvNavigate describe path dc-1 2025-06-25T14:58:08.530840Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7519901424565621276:2258] HANDLE EvNavigateScheme dc-1 2025-06-25T14:58:08.531936Z node 1 :TX_PROXY DEBUG: describe.cpp:356: Actor# [1:7519901424565621276:2258] HANDLE EvNavigateKeySetResult TDescribeReq marker# P5 ErrorCount# 0 2025-06-25T14:58:08.576870Z node 1 :TX_PROXY DEBUG: describe.cpp:435: Actor# [1:7519901424565621276:2258] SEND to# 72057594046644480 shardToRequest NKikimrSchemeOp.TDescribePath Path: "dc-1" Options { ShowPrivateTable: true } 2025-06-25T14:58:08.622258Z node 1 :TX_PROXY DEBUG: describe.cpp:448: Actor# [1:7519901424565621276:2258] Handle TEvDescribeSchemeResult Forward to# [1:7519901424565621275:2257] Cookie: 0 TEvDescribeSchemeResult: NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 2 Record# Status: StatusSuccess Path: "dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/dc-1" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046644480 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:08.664403Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7519901420270653702:2105] Handle TEvProposeTransaction 2025-06-25T14:58:08.664443Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:7519901420270653702:2105] TxId# 281474976710657 ProcessProposeTransaction 2025-06-25T14:58:08.664562Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:7519901420270653702:2105] Cookie# 0 userReqId# "" txid# 281474976710657 SEND to# [1:7519901424565621289:2264] 2025-06-25T14:58:08.800429Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:58:08.831636Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:7519901424565621289:2264] txid# 281474976710657 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "dc-1" StoragePools { Name: "/dc-1:test" Kind: "test" } } } } UserToken: "" PeerName: "" 2025-06-25T14:58:08.832501Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:7519901424565621289:2264] txid# 281474976710657 Bootstrap, UserSID: CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-25T14:58:08.832575Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:7519901424565621289:2264] txid# 281474976710657 TEvNavigateKeySet requested from SchemeCache 2025-06-25T14:58:08.832866Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:7519901424565621289:2264] txid# 281474976710657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-25T14:58:08.833017Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:7519901424565621289:2264] HANDLE EvNavigateKeySetResult, txid# 281474976710657 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# false 2025-06-25T14:58:08.833066Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:7519901424565621289:2264] txid# 281474976710657 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976710657 TabletId# 72057594046644480} 2025-06-25T14:58:08.833193Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:7519901424565621289:2264] txid# 28147 ... remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 6] was 2 2025-06-25T14:58:09.874029Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976710674 datashard 72075186224037899 state PreOffline 2025-06-25T14:58:09.874065Z node 1 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037899 Got TEvSchemaChangedResult from SS at 72075186224037899 2025-06-25T14:58:09.875696Z node 1 :TX_DATASHARD DEBUG: datashard_loans.cpp:220: 72075186224037899 in PreOffline state HasSharedBobs: 0 SchemaOperations: [ ] OutReadSets count: 0 ChangesQueue size: 0 ChangeExchangeSplit: 1 siblings to be activated: wait to activation from: 2025-06-25T14:58:09.875761Z node 1 :TX_DATASHARD INFO: datashard_loans.cpp:177: 72075186224037899 Initiating switch from PreOffline to Offline state 2025-06-25T14:58:09.883342Z node 1 :TX_DATASHARD INFO: datashard_impl.h:3310: 72075186224037899 Reporting state Offline to schemeshard 72057594046644480 2025-06-25T14:58:09.883911Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5633: Handle TEvStateChanged, at schemeshard: 72057594046644480, message: Source { RawX1: 7519901428860589204 RawX2: 4503603922340087 } TabletId: 72075186224037899 State: 4 2025-06-25T14:58:09.883961Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186224037899, state: Offline, at schemeshard: 72057594046644480 2025-06-25T14:58:09.884430Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:12 hive 72057594037968897 at ss 72057594046644480 2025-06-25T14:58:09.884800Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2962: Handle TEvStateChangedResult datashard 72075186224037899 state Offline 2025-06-25T14:58:09.896494Z node 1 :HIVE DEBUG: tx__delete_tablet.cpp:74: HIVE#72057594037968897 THive::TTxDeleteTablet::Execute() ShardOwnerId: 72057594046644480 ShardLocalIdx: 12 TxId_Deprecated: 12 TabletID: 72075186224037899 2025-06-25T14:58:09.896545Z node 1 :HIVE DEBUG: tx__delete_tablet.cpp:19: HIVE#72057594037968897 THive::TTxDeleteTablet::Execute Tablet 72075186224037899 2025-06-25T14:58:09.896635Z node 1 :HIVE DEBUG: tablet_info.cpp:123: HIVE#72057594037968897 Tablet(DataShard.72075186224037899.Leader.1) VolatileState: Running -> Stopped (Node 1) 2025-06-25T14:58:09.896709Z node 1 :HIVE DEBUG: tablet_info.cpp:523: HIVE#72057594037968897 Sending TEvStopTablet(DataShard.72075186224037899.Leader.1 gen 1) to node 1 2025-06-25T14:58:09.896797Z node 1 :HIVE DEBUG: tx__delete_tablet.cpp:67: HIVE#72057594037968897 THive::TTxDeleteTablet::Execute() result Status: OK Origin: 72057594037968897 TxId_Deprecated: 12 ShardOwnerId: 72057594046644480 ShardLocalIdx: 12 2025-06-25T14:58:09.901057Z node 1 :HIVE DEBUG: tx__delete_tablet.cpp:136: HIVE#72057594037968897 THive::TTxDeleteTablet::Complete() SideEffects: {Notifications: 0x10080003 [1:7519901420270653683:2102] NKikimrLocal.TEvStopTablet TabletId: 72075186224037899 FollowerId: 0 Generation: 1,0x10040206 [1:7519901424565621076:2137] NKikimrHive.TEvDeleteTabletReply Status: OK Origin: 72057594037968897 TxId_Deprecated: 12 ShardOwnerId: 72057594046644480 ShardLocalIdx: 12 Actions: NKikimr::TTabletReqBlockBlobStorage} 2025-06-25T14:58:09.901160Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 12 ShardOwnerId: 72057594046644480 ShardLocalIdx: 12, at schemeshard: 72057594046644480 2025-06-25T14:58:09.901396Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 6] was 1 2025-06-25T14:58:09.901538Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046644480 2025-06-25T14:58:09.901556Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046644480, LocalPathId: 6], at schemeshard: 72057594046644480 2025-06-25T14:58:09.901595Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 1 2025-06-25T14:58:09.901793Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:12 2025-06-25T14:58:09.901844Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:12 tabletId 72075186224037899 2025-06-25T14:58:09.901867Z node 1 :HIVE DEBUG: tx__block_storage_result.cpp:23: HIVE#72057594037968897 THive::TTxBlockStorageResult::Execute(72075186224037899 OK) 2025-06-25T14:58:09.901915Z node 1 :HIVE DEBUG: tx__block_storage_result.cpp:65: HIVE#72057594037968897 THive::TTxBlockStorageResult::Complete(72075186224037899 OK) 2025-06-25T14:58:09.902011Z node 1 :HIVE DEBUG: hive_impl.cpp:892: HIVE#72057594037968897 THive::Handle::TEvInitiateDeleteStorage TabletId=72075186224037899 2025-06-25T14:58:09.902073Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046644480 2025-06-25T14:58:09.902207Z node 1 :HIVE DEBUG: tx__delete_tablet_result.cpp:26: HIVE#72057594037968897 THive::TTxDeleteTabletResult::Execute(72075186224037899 OK) 2025-06-25T14:58:09.902331Z node 1 :TX_DATASHARD INFO: datashard.cpp:197: OnTabletStop: 72075186224037899 reason = ReasonStop 2025-06-25T14:58:09.902357Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037899, clientId# [1:7519901428860589368:2760], serverId# [1:7519901428860589369:2761], sessionId# [0:0:0] 2025-06-25T14:58:09.902542Z node 1 :HIVE DEBUG: hive_impl.cpp:480: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus, TabletId: 72075186224037899 2025-06-25T14:58:09.902554Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037899 not found 2025-06-25T14:58:09.902750Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037899, clientId# [1:7519901428860589228:2683], serverId# [1:7519901428860589229:2684], sessionId# [0:0:0] 2025-06-25T14:58:09.902973Z node 1 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186224037899 2025-06-25T14:58:09.903032Z node 1 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186224037899 2025-06-25T14:58:09.903332Z node 1 :HIVE DEBUG: tx__delete_tablet_result.cpp:72: HIVE#72057594037968897 THive::TTxDeleteTabletResult(72075186224037899)::Complete SideEffects {} 2025-06-25T14:58:11.673052Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519901434626988832:2081];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:11.673721Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/002086/r3tmp/tmpHbvoWf/pdisk_1.dat 2025-06-25T14:58:11.846524Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:11.869191Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:11.869266Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:11.873466Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:23744 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:12.093768Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:12.107587Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:58:12.117739Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:12.684493Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:58:16.674169Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519901434626988832:2081];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:16.674927Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:58:21.603209Z node 2 :MINIKQL_ENGINE ERROR: datashard__engine_host.cpp:516: Shard %72075186224037888, txid %281474976716360, engine error: Error executing transaction (read-only: 1): Datashard 72075186224037888: reply size limit exceeded. (71580986 > 50331648) 2025-06-25T14:58:21.613780Z node 2 :TX_DATASHARD ERROR: execute_data_tx_unit.cpp:268: Datashard execution error for [0:281474976716360] at 72075186224037888: Datashard 72075186224037888: reply size limit exceeded. (71580986 > 50331648) 2025-06-25T14:58:21.615840Z node 2 :TX_DATASHARD ERROR: finish_propose_unit.cpp:174: Errors while proposing transaction txid 281474976716360 at tablet 72075186224037888 status: RESULT_UNAVAILABLE errors: REPLY_SIZE_EXCEEDED (Datashard 72075186224037888: reply size limit exceeded. (71580986 > 50331648)) | 2025-06-25T14:58:21.632606Z node 2 :TX_PROXY ERROR: datareq.cpp:883: Actor# [2:7519901473281700822:5905] txid# 281474976716360 RESPONSE Status# ExecResultUnavailable marker# P13c DataShardErrors: [REPLY_SIZE_EXCEEDED] Datashard 72075186224037888: reply size limit exceeded. (71580986 > 50331648) proxy error code: ExecResultUnavailable >> TFlatTest::RejectByIncomingReadSetSize [GOOD] >> TFlatTest::ShardFreezeUnfreezeRejectScheme [GOOD] >> TLocksFatTest::ShardLocks [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TFlatTest::CopyTableDropOriginalAndReturnPartAfterCompaction [GOOD] Test command err: 2025-06-25T14:58:16.164506Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901455556105265:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:16.175311Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00207f/r3tmp/tmpUfIdXf/pdisk_1.dat 2025-06-25T14:58:16.442994Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:16.443058Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:16.444896Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:58:16.459260Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:16.460376Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519901455556105235:2080] 1750863496158917 != 1750863496158920 TClient is connected to server localhost:25144 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:16.719213Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-25T14:58:16.751008Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:16.921640Z node 1 :OPS_COMPACT INFO: Compact{72075186224037888.1.11, eph 1} end=Done, 4 blobs 3r (max 3), put Spent{time=0.002s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 2 +0, (1265 647 2154)b }, ecr=1.000 2025-06-25T14:58:16.924861Z node 1 :OPS_COMPACT INFO: Compact{72075186224037889.1.11, eph 1} end=Done, 4 blobs 3r (max 3), put Spent{time=0.001s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 2 +0, (1139 521 2626)b }, ecr=1.000 2025-06-25T14:58:16.946872Z node 1 :OPS_COMPACT INFO: Compact{72075186224037888.1.16, eph 2} end=Done, 4 blobs 6r (max 6), put Spent{time=0.002s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 5 +0, (1573 647 6413)b }, ecr=1.000 2025-06-25T14:58:16.955061Z node 1 :OPS_COMPACT INFO: Compact{72075186224037889.1.16, eph 2} end=Done, 4 blobs 6r (max 6), put Spent{time=0.005s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 4 +0, (2326 1432 5183)b }, ecr=1.000 TClient::Ls request: /dc-1/Dir/TableOld TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "TableOld" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1750863496872 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "TableOld" Columns { Name: "Key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "... (TRUNCATED) Copy TableOld to Table 2025-06-25T14:58:17.081981Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/dc-1/Dir" OperationType: ESchemeOpCreateTable CreateTable { Name: "Table" PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 100000 InMemStepsToSnapshot: 2 InMemForceStepsToSnapshot: 3 InMemForceSizeToSnapshot: 1000000 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 200000 ReadAheadLoThreshold: 100000 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 10000 CountToCompact: 2 ForceCountToCompact: 2 ForceSizeToCompact: 20000 CompactionBrokerQueue: 1 KeepInCache: true } } ColumnFamilies { Id: 0 ColumnCache: ColumnCacheNone Storage: ColumnStorageTest_1_2_1k } } CopyFromTable: "/dc-1/Dir/TableOld" } } TxId: 281474976715676 TabletId: 72057594046644480 PeerName: "" , at schemeshard: 72057594046644480 2025-06-25T14:58:17.082331Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_copy_table.cpp:343: TCopyTable Propose, path: /dc-1/Dir/Table, opId: 281474976715676:0, at schemeshard: 72057594046644480 2025-06-25T14:58:17.082969Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:347: AttachChild: child attached as only one child to the parent, parent id: [OwnerId: 72057594046644480, LocalPathId: 2], parent name: Dir, child name: Table, child id: [OwnerId: 72057594046644480, LocalPathId: 4], at schemeshard: 72057594046644480 2025-06-25T14:58:17.083014Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 4] was 0 2025-06-25T14:58:17.083029Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction source path for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 3 2025-06-25T14:58:17.083047Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 281474976715676:0 type: TxCopyTable target path: [OwnerId: 72057594046644480, LocalPathId: 4] source path: [OwnerId: 72057594046644480, LocalPathId: 3] 2025-06-25T14:58:17.083072Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason new shard created for pathId [OwnerId: 72057594046644480, LocalPathId: 4] was 1 2025-06-25T14:58:17.083102Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason new shard created for pathId [OwnerId: 72057594046644480, LocalPathId: 4] was 2 2025-06-25T14:58:17.083256Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046644480, LocalPathId: 4] was 3 2025-06-25T14:58:17.083379Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 281474976715676:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-25T14:58:17.084060Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 2 2025-06-25T14:58:17.084107Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046644480, LocalPathId: 4] was 4 2025-06-25T14:58:17.084586Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 281474976715676, response: Status: StatusAccepted TxId: 281474976715676 SchemeshardId: 72057594046644480 PathId: 4, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:58:17.084825Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976715676, database: /dc-1, subject: , status: StatusAccepted, operation: CREATE TABLE, path: /dc-1/Dir/Table 2025-06-25T14:58:17.085005Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046644480 2025-06-25T14:58:17.085019Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046644480, txId: 281474976715676, path id: [OwnerId: 72057594046644480, LocalPathId: 2] 2025-06-25T14:58:17.085108Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046644480, txId: 281474976715676, path id: [OwnerId: 72057594046644480, LocalPathId: 4] 2025-06-25T14:58:17.085157Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046644480 2025-06-25T14:58:17.085170Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:7519901455556105730:2245], at schemeshard: 72057594046644480, txId: 281474976715676, path id: 2 2025-06-25T14:58:17.085181Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:7519901455556105730:2245], at schemeshard: 72057594046644480, txId: 281474976715676, path id: 4 2025-06-25T14:58:17.085207Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 281474976715676:0, at schemeshard: 72057594046644480 2025-06-25T14:58:17.085253Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 281474976715676:0 ProgressState, operation type: TxCopyTable, at tablet# 72057594046644480 2025-06-25T14:58:17.085619Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:359: TCreateParts opId# 281474976715676:0 CreateRequest Event to Hive: 72057594037968897 msg: Owner: 72057594046644480 OwnerIdx: 3 TabletType: DataShard FollowerCount: 0 ObjectDomain { SchemeShard: 72057594046644480 PathId: 1 } ObjectId: 4 BindedChannels { StoragePoolName: "/dc-1:test" } BindedChannels { StoragePoolName: "/dc-1:test" } BindedChannels { StoragePoolName: "/dc-1:test" } AllowedDomains { SchemeShard: 72057594046644480 PathId: 1 } 2025-06-25T14:58:17.085732Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:359: TCreateParts opId# 281474976715676:0 CreateRequest Event to Hive: 72057594037968897 msg: Owner: 72057594046644480 OwnerIdx: 4 TabletType: DataShard FollowerCount: 0 ObjectDomain { SchemeShard: 72057594046644480 PathId: 1 } ObjectId: 4 BindedChannels { StoragePoolName: "/dc-1:test" } BindedChannels { StoragePoolName: "/dc-1:test" } BindedChannels { StoragePoolName: "/dc-1:test" } Allowed ... D DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 4] was 3 2025-06-25T14:58:20.823859Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715784 datashard 72075186224037891 state PreOffline 2025-06-25T14:58:20.823893Z node 2 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037891 Got TEvSchemaChangedResult from SS at 72075186224037891 2025-06-25T14:58:20.824025Z node 2 :TX_DATASHARD DEBUG: datashard_loans.cpp:128: 72075186224037891 parts [ [72075186224037889:1:16:1:12288:306:0] [72075186224037889:1:23:1:12288:253:0] ] return ack processed 2025-06-25T14:58:20.824080Z node 2 :TX_DATASHARD DEBUG: datashard_loans.cpp:220: 72075186224037891 in PreOffline state HasSharedBobs: 0 SchemaOperations: [ ] OutReadSets count: 0 ChangesQueue size: 0 ChangeExchangeSplit: 1 siblings to be activated: wait to activation from: 2025-06-25T14:58:20.824162Z node 2 :TX_DATASHARD INFO: datashard_loans.cpp:177: 72075186224037891 Initiating switch from PreOffline to Offline state 2025-06-25T14:58:20.825771Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715784 datashard 72075186224037890 state PreOffline 2025-06-25T14:58:20.825803Z node 2 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037890 Got TEvSchemaChangedResult from SS at 72075186224037890 2025-06-25T14:58:20.826050Z node 2 :TX_DATASHARD INFO: datashard_impl.h:3310: 72075186224037889 Reporting state Offline to schemeshard 72057594046644480 2025-06-25T14:58:20.826157Z node 2 :TX_DATASHARD INFO: datashard_impl.h:3310: 72075186224037891 Reporting state Offline to schemeshard 72057594046644480 2025-06-25T14:58:20.826222Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037889, clientId# [2:7519901474114783091:2650], serverId# [2:7519901474114783094:3434], sessionId# [0:0:0] 2025-06-25T14:58:20.826894Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5633: Handle TEvStateChanged, at schemeshard: 72057594046644480, message: Source { RawX1: 7519901469819814083 RawX2: 4503608217307347 } TabletId: 72075186224037889 State: 4 2025-06-25T14:58:20.826941Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186224037889, state: Offline, at schemeshard: 72057594046644480 2025-06-25T14:58:20.827137Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5633: Handle TEvStateChanged, at schemeshard: 72057594046644480, message: Source { RawX1: 7519901469819814376 RawX2: 4503608217307403 } TabletId: 72075186224037891 State: 4 2025-06-25T14:58:20.827166Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186224037891, state: Offline, at schemeshard: 72057594046644480 2025-06-25T14:58:20.827479Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:2 hive 72057594037968897 at ss 72057594046644480 2025-06-25T14:58:20.827521Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:4 hive 72057594037968897 at ss 72057594046644480 2025-06-25T14:58:20.827714Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2962: Handle TEvStateChangedResult datashard 72075186224037891 state Offline 2025-06-25T14:58:20.827787Z node 2 :TX_DATASHARD DEBUG: datashard_loans.cpp:220: 72075186224037890 in PreOffline state HasSharedBobs: 0 SchemaOperations: [ ] OutReadSets count: 0 ChangesQueue size: 0 ChangeExchangeSplit: 1 siblings to be activated: wait to activation from: 2025-06-25T14:58:20.827871Z node 2 :TX_DATASHARD INFO: datashard_loans.cpp:177: 72075186224037890 Initiating switch from PreOffline to Offline state 2025-06-25T14:58:20.828824Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046644480 ShardLocalIdx: 2, at schemeshard: 72057594046644480 2025-06-25T14:58:20.829049Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 1 2025-06-25T14:58:20.829235Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 4 ShardOwnerId: 72057594046644480 ShardLocalIdx: 4, at schemeshard: 72057594046644480 2025-06-25T14:58:20.829333Z node 2 :TX_DATASHARD INFO: datashard.cpp:197: OnTabletStop: 72075186224037891 reason = ReasonStop 2025-06-25T14:58:20.829359Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2962: Handle TEvStateChangedResult datashard 72075186224037889 state Offline 2025-06-25T14:58:20.829375Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 4] was 2 2025-06-25T14:58:20.829386Z node 2 :TX_DATASHARD INFO: datashard.cpp:197: OnTabletStop: 72075186224037889 reason = ReasonStop 2025-06-25T14:58:20.829465Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046644480 2025-06-25T14:58:20.829483Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046644480, LocalPathId: 3], at schemeshard: 72057594046644480 2025-06-25T14:58:20.829517Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 2 2025-06-25T14:58:20.830289Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:2 2025-06-25T14:58:20.830313Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:2 tabletId 72075186224037889 2025-06-25T14:58:20.830366Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037891 not found 2025-06-25T14:58:20.830390Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037889 not found 2025-06-25T14:58:20.830585Z node 2 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186224037891 2025-06-25T14:58:20.830685Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:4 2025-06-25T14:58:20.830698Z node 2 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186224037891 2025-06-25T14:58:20.830703Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:4 tabletId 72075186224037891 2025-06-25T14:58:20.830750Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046644480 Check that tablet 72075186224037888 was deleted 2025-06-25T14:58:20.832283Z node 2 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186224037889 2025-06-25T14:58:20.832327Z node 2 :HIVE WARN: hive_impl.cpp:1955: HIVE#72057594037968897 Can't find the tablet from RequestHiveInfo(TabletID=72075186224037888) 2025-06-25T14:58:20.832378Z node 2 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186224037889 Check that tablet 72075186224037889 was deleted 2025-06-25T14:58:20.833413Z node 2 :HIVE WARN: hive_impl.cpp:1955: HIVE#72057594037968897 Can't find the tablet from RequestHiveInfo(TabletID=72075186224037889) Check that tablet 72075186224037890 was deleted 2025-06-25T14:58:20.834774Z node 2 :TX_DATASHARD INFO: datashard_impl.h:3310: 72075186224037890 Reporting state Offline to schemeshard 72057594046644480 2025-06-25T14:58:20.835582Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5633: Handle TEvStateChanged, at schemeshard: 72057594046644480, message: Source { RawX1: 7519901469819814374 RawX2: 4503608217307402 } TabletId: 72075186224037890 State: 4 2025-06-25T14:58:20.835641Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186224037890, state: Offline, at schemeshard: 72057594046644480 2025-06-25T14:58:20.836148Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:3 hive 72057594037968897 at ss 72057594046644480 2025-06-25T14:58:20.836747Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2962: Handle TEvStateChangedResult datashard 72075186224037890 state Offline 2025-06-25T14:58:20.837943Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 3 ShardOwnerId: 72057594046644480 ShardLocalIdx: 3, at schemeshard: 72057594046644480 2025-06-25T14:58:20.838137Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 4] was 1 2025-06-25T14:58:20.838270Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046644480 2025-06-25T14:58:20.838284Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046644480, LocalPathId: 4], at schemeshard: 72057594046644480 2025-06-25T14:58:20.838456Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 1 2025-06-25T14:58:20.839274Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:3 2025-06-25T14:58:20.839297Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:3 tabletId 72075186224037890 2025-06-25T14:58:20.839398Z node 2 :TX_DATASHARD INFO: datashard.cpp:197: OnTabletStop: 72075186224037890 reason = ReasonStop 2025-06-25T14:58:20.839436Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037890, clientId# [2:7519901469819814558:2626], serverId# [2:7519901469819814559:2627], sessionId# [0:0:0] 2025-06-25T14:58:20.839455Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037890, clientId# [2:7519901469819814446:2547], serverId# [2:7519901469819814447:2548], sessionId# [0:0:0] 2025-06-25T14:58:20.841098Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037890 not found 2025-06-25T14:58:20.841121Z node 2 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186224037890 2025-06-25T14:58:20.841203Z node 2 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186224037890 2025-06-25T14:58:20.841930Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046644480 2025-06-25T14:58:21.140202Z node 2 :HIVE WARN: hive_impl.cpp:1955: HIVE#72057594037968897 Can't find the tablet from RequestHiveInfo(TabletID=72075186224037890) Check that tablet 72075186224037891 was deleted 2025-06-25T14:58:21.141332Z node 2 :HIVE WARN: hive_impl.cpp:1955: HIVE#72057594037968897 Can't find the tablet from RequestHiveInfo(TabletID=72075186224037891) >> TFlatTest::ShardFreezeRejectBadProtobuf [GOOD] >> TFlatTest::SelectRangeSkipNullKeys >> TLocksTest::Range_GoodLock0 [GOOD] >> TLocksTest::Range_GoodLock1 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpVectorIndexes::OrderByCosineLevel2-Nullable+UseSimilarity [GOOD] Test command err: Trying to start YDB, gRPC: 4272, MsgBus: 25433 2025-06-25T14:57:01.344708Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901136900714971:2236];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:57:01.344745Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001c01/r3tmp/tmpQrEMHo/pdisk_1.dat 2025-06-25T14:57:01.735684Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 4272, node 1 2025-06-25T14:57:01.769944Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:57:01.770037Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:57:01.781387Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:57:01.913168Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:57:01.913199Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:57:01.913227Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:57:01.913384Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25433 2025-06-25T14:57:02.332665Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:25433 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:57:02.535198Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:57:02.577047Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:57:02.594885Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:02.778909Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:02.931666Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:03.015888Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:04.742510Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901149785618265:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:04.742586Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:05.098733Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:05.131358Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:05.165293Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:05.201611Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:05.263847Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:05.302708Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:05.386795Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:05.473889Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901154080586220:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:05.473979Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:05.479163Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901154080586225:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:05.484172Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:57:05.499408Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519901154080586227:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:57:05.596174Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519901154080586278:3423] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:57:06.348423Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519901136900714971:2236];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:57:06.348545Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:57:06.605156Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877761, Sender [1:7519901158375553849:3599], Recipient [1:7519901136900715093:2157]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:57:06.605203Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5052: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T14:57:06.605218Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5837: Pipe server connected, at tablet: 720575940466444 ... :58:22.313510Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037890 maps to shardIdx: 72057594046644480:3 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 3], pathId map=EightShard, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-25T14:58:22.313518Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037890, followerId 0 2025-06-25T14:58:22.313538Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:3 with partCount# 0, rowCount# 0, searchHeight# 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-25T14:58:22.313546Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037890 2025-06-25T14:58:22.313561Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 3 shard idx 72057594046644480:6 data size 800 row count 3 2025-06-25T14:58:22.313587Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037893 maps to shardIdx: 72057594046644480:6 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 3], pathId map=EightShard, is column=0, is olap=0, RowCount 3, DataSize 800 2025-06-25T14:58:22.313594Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037893, followerId 0 2025-06-25T14:58:22.313616Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:6 with partCount# 0, rowCount# 3, searchHeight# 1, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-25T14:58:22.313626Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037893 2025-06-25T14:58:22.313641Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 3 shard idx 72057594046644480:7 data size 800 row count 3 2025-06-25T14:58:22.313669Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037894 maps to shardIdx: 72057594046644480:7 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 3], pathId map=EightShard, is column=0, is olap=0, RowCount 3, DataSize 800 2025-06-25T14:58:22.313675Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037894, followerId 0 2025-06-25T14:58:22.313697Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:7 with partCount# 0, rowCount# 3, searchHeight# 1, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-25T14:58:22.313707Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037894 2025-06-25T14:58:22.313726Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 4 shard idx 72057594046644480:12 data size 648 row count 1 2025-06-25T14:58:22.313754Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037899 maps to shardIdx: 72057594046644480:12 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 4], pathId map=Logs, is column=0, is olap=0, RowCount 1, DataSize 648 2025-06-25T14:58:22.313760Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037899, followerId 0 2025-06-25T14:58:22.313785Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:12 with partCount# 0, rowCount# 1, searchHeight# 1, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-25T14:58:22.313795Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037899 2025-06-25T14:58:22.313810Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 4 shard idx 72057594046644480:11 data size 0 row count 0 2025-06-25T14:58:22.313835Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037898 maps to shardIdx: 72057594046644480:11 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 4], pathId map=Logs, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-25T14:58:22.313842Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037898, followerId 0 2025-06-25T14:58:22.313866Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:11 with partCount# 0, rowCount# 0, searchHeight# 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-25T14:58:22.313875Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037898 2025-06-25T14:58:22.313927Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:58:22.314311Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269553162, Sender [2:7519901312952437175:2310], Recipient [2:7519901308657469137:2153]: NKikimrTxDataShard.TEvPeriodicTableStats DatashardId: 72075186224037900 TableLocalId: 4 Generation: 1 Round: 3 TableStats { DataSize: 1472 RowCount: 8 IndexSize: 0 InMemSize: 1472 LastAccessTime: 1750863466398 LastUpdateTime: 1750863466398 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 8 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 1 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 134 Memory: 137784 } ShardState: 2 UserTablePartOwners: 72075186224037900 NodeId: 2 StartTime: 1750863462248 TableOwnerId: 72057594046644480 FollowerId: 0 2025-06-25T14:58:22.314330Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4992: StateWork, processing event TEvDataShard::TEvPeriodicTableStats 2025-06-25T14:58:22.314359Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037900 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 4] state 'Ready' dataSize 1472 rowCount 8 cpuUsage 0.0134 2025-06-25T14:58:22.314433Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:570: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037900 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 4] raw table stats: DataSize: 1472 RowCount: 8 IndexSize: 0 InMemSize: 1472 LastAccessTime: 1750863466398 LastUpdateTime: 1750863466398 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 8 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 1 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 2025-06-25T14:58:22.314478Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [2:7519901308657469137:2153], Recipient [2:7519901308657469137:2153]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:58:22.314491Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:58:22.316750Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [2:7519901308657469137:2153]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:58:22.316783Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:58:22.316828Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [2:7519901308657469137:2153], Recipient [2:7519901308657469137:2153]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:58:22.316843Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:58:22.316887Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [2:7519901308657469137:2153]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-06-25T14:58:22.316904Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5131: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-06-25T14:58:22.316917Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046644480, queue size# 1 2025-06-25T14:58:22.316952Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:601: Will execute TTxStoreStats, queue# 1 2025-06-25T14:58:22.316966Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:610: Will delay TTxStoreTableStats on# 0.097480s, queue# 1 2025-06-25T14:58:22.317008Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 4 shard idx 72057594046644480:13 data size 1472 row count 8 2025-06-25T14:58:22.317059Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037900 maps to shardIdx: 72057594046644480:13 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 4], pathId map=Logs, is column=0, is olap=0, RowCount 8, DataSize 1472 2025-06-25T14:58:22.317082Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037900, followerId 0 2025-06-25T14:58:22.317129Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:13 with partCount# 0, rowCount# 8, searchHeight# 1, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-25T14:58:22.317163Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037900 2025-06-25T14:58:22.317213Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:58:22.415022Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [2:7519901308657469137:2153]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-06-25T14:58:22.415069Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5131: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-06-25T14:58:22.415082Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046644480, queue size# 0 ------- [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/federated_query/generic_ut/unittest >> GenericFederatedQuery::ClickHouseFilterPushdown [GOOD] Test command err: Trying to start YDB, gRPC: 5352, MsgBus: 19117 2025-06-25T14:57:34.393994Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901275222622436:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:57:34.394048Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0009cf/r3tmp/tmpJNieQo/pdisk_1.dat 2025-06-25T14:57:34.726420Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:57:34.727468Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519901275222622419:2080] 1750863454392602 != 1750863454392605 TServer::EnableGrpc on GrpcPort 5352, node 1 2025-06-25T14:57:34.836205Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:57:34.836277Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:57:34.892960Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:57:34.933014Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:57:34.933044Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:57:34.933057Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:57:34.933160Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19117 TClient is connected to server localhost:19117 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-25T14:57:35.401339Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:57:35.558400Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:57:35.588116Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:57:37.422523Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901288107524949:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:37.422683Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:37.689376Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:37.816002Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901288107525072:2305], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:37.816099Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:37.816325Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901288107525077:2308], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:37.819420Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:57:37.830949Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710659, at schemeshard: 72057594046644480 2025-06-25T14:57:37.832634Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519901288107525080:2309], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-25T14:57:37.924096Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519901288107525120:2398] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:57:38.451091Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:57:38.853517Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:39.375213Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:39.393755Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519901275222622436:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:57:39.393816Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:57:39.856589Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:57:40.331393Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710679:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:57:40.892979Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976715758:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:40.938675Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976715759:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:383) 2025-06-25T14:57:43.734424Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976710713:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp:267) 2025-06-25T14:57:43.766043Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710716:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:43.767421Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710714:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:43.769546Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose it ... ool default not found or you don't have access permissions } 2025-06-25T14:58:14.414243Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519901448445113080:2308], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:58:14.418789Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:58:14.428680Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519901448445113082:2309], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-25T14:58:14.524408Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519901448445113122:2394] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:58:15.123647Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:58:15.536059Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519901431265243158:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:15.536144Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:58:15.687509Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:58:16.320785Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:58:16.953883Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710678:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:58:17.577647Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710683:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:58:18.200033Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976715758:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:58:18.251182Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976715759:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:383) 2025-06-25T14:58:21.992447Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976710723:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp:267) Call DescribeTable. data_source_instance { kind: CLICKHOUSE endpoint { host: "rc1a-d6dv17lv47v5mcop.db.yandex.net" port: 8443 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: HTTP } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Expected: data_source_instance { kind: CLICKHOUSE endpoint { host: "rc1a-d6dv17lv47v5mcop.db.yandex.net" port: 8443 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: HTTP } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Actual: data_source_instance { kind: CLICKHOUSE endpoint { host: "rc1a-d6dv17lv47v5mcop.db.yandex.net" port: 8443 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: HTTP } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } DescribeTable result. GRpcStatusCode: 0 schema { columns { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } columns { name: "data_column" type { optional_type { item { type_id: STRING } } } } } error { status: SUCCESS } Call ListSplits. selects { data_source_instance { kind: CLICKHOUSE endpoint { host: "rc1a-d6dv17lv47v5mcop.db.yandex.net" port: 8443 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: HTTP } from { table: "example_1" } } CRAB Expected: selects { data_source_instance { kind: CLICKHOUSE endpoint { host: "rc1a-d6dv17lv47v5mcop.db.yandex.net" port: 8443 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: HTTP } from { table: "example_1" } } CRAB Actual: selects { data_source_instance { kind: CLICKHOUSE endpoint { host: "rc1a-d6dv17lv47v5mcop.db.yandex.net" port: 8443 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: HTTP } from { table: "example_1" } } ListSplits result. GRpcStatusCode: 0 Call ReadSplits. splits { select { data_source_instance { kind: CLICKHOUSE endpoint { host: "rc1a-d6dv17lv47v5mcop.db.yandex.net" port: 8443 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: HTTP } what { items { column { name: "data_column" type { optional_type { item { type_id: STRING } } } } } items { column { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } } } from { table: "example_1" } where { filter_typed { comparison { operation: EQ left_value { column: "filtered_column" } right_value { typed_value { type { type_id: INT32 } value { int32_value: 42 } } } } } } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL CRAB Expected: splits { select { data_source_instance { kind: CLICKHOUSE endpoint { host: "rc1a-d6dv17lv47v5mcop.db.yandex.net" port: 8443 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: HTTP } what { items { column { name: "data_column" type { optional_type { item { type_id: STRING } } } } } items { column { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } } } from { table: "example_1" } where { filter_typed { comparison { operation: EQ left_value { column: "filtered_column" } right_value { typed_value { type { type_id: INT32 } value { int32_value: 42 } } } } } } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL CRAB Actual: splits { select { data_source_instance { kind: CLICKHOUSE endpoint { host: "rc1a-d6dv17lv47v5mcop.db.yandex.net" port: 8443 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: HTTP } what { items { column { name: "data_column" type { optional_type { item { type_id: STRING } } } } } items { column { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } } } from { table: "example_1" } where { filter_typed { comparison { operation: EQ left_value { column: "filtered_column" } right_value { typed_value { type { type_id: INT32 } value { int32_value: 42 } } } } } } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL ReadSplits result. GRpcStatusCode: 0 >> TFlatTest::SplitBoundaryRead [GOOD] >> TFlatTest::SelectRangeReverseExcludeKeys [GOOD] >> TFlatTest::MergeEmptyAndWrite [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TFlatTest::LsPathId [GOOD] Test command err: 2025-06-25T14:58:16.959590Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901455460530341:2133];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:16.959722Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00207c/r3tmp/tmpLa6lxu/pdisk_1.dat 2025-06-25T14:58:17.371618Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:17.395254Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:17.395371Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:17.396650Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:28409 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 Pa... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:17.636727Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:17.656677Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 TClient::Ls request: / TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "/" PathId: 1 SchemeshardId: 0 PathType: EPathTypeDir CreateFinished: true } Children { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true } } Path: "/" TClient::Ls request: TClient::Ls response: Status: 128 StatusCode: ERROR Issues { message: "Default error" severity: 1 } SchemeStatus: 7 ErrorReason: "Invalid path" TClient::Ls request: // TClient::Ls response: Status: 128 StatusCode: ERROR Issues { message: "Default error" severity: 1 } SchemeStatus: 7 ErrorReason: "Invalid path" TClient::Ls request: / TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "/" PathId: 1 SchemeshardId: 0 PathType: EPathTypeDir CreateFinished: true } Children { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true } } Path: "/" TClient::Ls request: /dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1750863497691 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } StoragePo... (TRUNCATED) TClient::Ls request: /dc-11 TClient::Ls response: Status: 128 StatusCode: PATH_NOT_EXIST Issues { message: "Path not exist" issue_code: 200200 severity: 1 } SchemeStatus: 2 ErrorReason: "Root not found" TClient::Ls request: /dc-2 TClient::Ls response: Status: 128 StatusCode: PATH_NOT_EXIST Issues { message: "Path not exist" issue_code: 200200 severity: 1 } SchemeStatus: 2 ErrorReason: "Root not found" waiting... 2025-06-25T14:58:17.818945Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 TClient::Ls request: / TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "/" PathId: 1 SchemeshardId: 0 PathType: EPathTypeDir CreateFinished: true } Children { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true } } Path: "/" TClient::Ls request: /dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1750863497691 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Berkanavt" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 281474976710658 CreateStep: 1750863497852 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } DomainDescription { SchemeShardId_Depr... (TRUNCATED) TClient::Ls request: /dc-1/Berkanavt TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Berkanavt" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 281474976710658 CreateStep: 1750863497852 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 2 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 1 PathsLimit: 10000 Shard... (TRUNCATED) 2025-06-25T14:58:17.889619Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519901459755498141:2325] txid# 281474976710659, issues: { message: "Check failed: path: \'/dc-1/Berkanavt\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 2], type: EPathTypeDir, state: EPathStateNoChanges)" severity: 1 } Error 1: Check failed: path: '/dc-1/Berkanavt', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 2], type: EPathTypeDir, state: EPathStateNoChanges) TClient::Ls request: /dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1750863497691 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Berkanavt" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 281474976710658 CreateStep: 1750863497852 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } DomainDescription { SchemeShardId_Depr... (TRUNCATED) TClient::Ls request: /dc-1/arcadia TClient::Ls response: Status: 128 StatusCode: PATH_NOT_EXIST Issues { message: "Path not exist" issue_code: 200200 severity: 1 } SchemeStatus: 2 ErrorReason: "Path not found" waiting... 2025-06-25T14:58:17.939521Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710660, at schemeshard: 72057594046644480 TClient::Ls request: /dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1750863497691 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Berkanavt" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 281474976710658 CreateStep: 1750863497852 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "arcadia" Path... (TRUNCATED) TClient::Ls request: /dc-1/arcadia 2025-06-25T14:58:17.969303Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "arcadia" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 281474976710660 CreateStep: 1750863497978 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 2 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsI... (TRUNCATED) 2025-06-25T14:58:20.188779Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519901472773478683:2075];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00207c/r3tmp/tmphrXYv0/pdisk_1.dat 2025-06-25T14:58:20.290518Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:58:20.361749Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:20.366571Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519901472773478642:2080] 1750863500166194 != 1750863500166197 2025-06-25T14:58:20.393471Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:20.393558Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:20.395569Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:28731 TClient::Ls request: / TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "/" PathId: 1 SchemeshardId: 0 PathType: EPathTypeDir CreateFinished: true } Children { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true } } Path: "/" WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 Pa... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:20.590702Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:20.596537Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:58:20.656665Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:58:20.747688Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715659, at schemeshard: 72057594046644480 >> TLocksTest::CK_GoodLock [GOOD] >> TLocksTest::CK_BrokenLock >> TLocksTest::UpdateLockedKey [GOOD] >> TLocksTest::SetLockNothing |90.0%| [TA] $(B)/ydb/core/kqp/ut/federated_query/generic_ut/test-results/unittest/{meta.json ... results_accumulator.log} |90.0%| [TA] {RESULT} $(B)/ydb/core/kqp/ut/federated_query/generic_ut/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TFlatTest::RejectByIncomingReadSetSize [GOOD] Test command err: 2025-06-25T14:58:16.330581Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901459112100235:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:16.330693Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/002080/r3tmp/tmpXj94UX/pdisk_1.dat 2025-06-25T14:58:16.754464Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:16.756047Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519901459112100213:2080] 1750863496329027 != 1750863496329030 2025-06-25T14:58:16.879226Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:16.879368Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:16.882652Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:21182 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:17.123406Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:17.252389Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/dc-1" OperationType: ESchemeOpMkDir MkDir { Name: "Dir1" } } TxId: 281474976710658 TabletId: 72057594046644480 PeerName: "" , at schemeshard: 72057594046644480 2025-06-25T14:58:17.252592Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_mkdir.cpp:115: TMkDir Propose, path: /dc-1/Dir1, operationId: 281474976710658:0, at schemeshard: 72057594046644480 2025-06-25T14:58:17.252757Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:347: AttachChild: child attached as only one child to the parent, parent id: [OwnerId: 72057594046644480, LocalPathId: 1], parent name: dc-1, child name: Dir1, child id: [OwnerId: 72057594046644480, LocalPathId: 2], at schemeshard: 72057594046644480 2025-06-25T14:58:17.252790Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 0 2025-06-25T14:58:17.252826Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 281474976710658:0 type: TxMkDir target path: [OwnerId: 72057594046644480, LocalPathId: 2] source path: 2025-06-25T14:58:17.252910Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 281474976710658:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-25T14:58:17.253113Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 1 2025-06-25T14:58:17.253177Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 1 2025-06-25T14:58:17.256549Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 281474976710658, response: Status: StatusAccepted TxId: 281474976710658 SchemeshardId: 72057594046644480 PathId: 2, at schemeshard: 72057594046644480 2025-06-25T14:58:17.256756Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976710658, database: /dc-1, subject: , status: StatusAccepted, operation: CREATE DIRECTORY, path: /dc-1/Dir1 2025-06-25T14:58:17.256960Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046644480 2025-06-25T14:58:17.256983Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046644480, txId: 281474976710658, path id: [OwnerId: 72057594046644480, LocalPathId: 1] 2025-06-25T14:58:17.257138Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046644480, txId: 281474976710658, path id: [OwnerId: 72057594046644480, LocalPathId: 2] 2025-06-25T14:58:17.257235Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046644480 2025-06-25T14:58:17.257250Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:7519901459112100853:2372], at schemeshard: 72057594046644480, txId: 281474976710658, path id: 1 2025-06-25T14:58:17.257265Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:7519901459112100853:2372], at schemeshard: 72057594046644480, txId: 281474976710658, path id: 2 2025-06-25T14:58:17.257302Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 281474976710658:0, at schemeshard: 72057594046644480 2025-06-25T14:58:17.257328Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_mkdir.cpp:63: MkDir::TPropose operationId# 281474976710658:0 ProgressState, at schemeshard: 72057594046644480 2025-06-25T14:58:17.257370Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 281474976710658 ready parts: 1/1 waiting... 2025-06-25T14:58:17.261093Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046644480 Flags: 2 } ExecLevel: 0 TxId: 281474976710658 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:58:17.262868Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 1 Version: 4 PathOwnerId: 72057594046644480, cookie: 281474976710658 2025-06-25T14:58:17.262958Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 1 Version: 4 PathOwnerId: 72057594046644480, cookie: 281474976710658 2025-06-25T14:58:17.262968Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046644480, txId: 281474976710658 2025-06-25T14:58:17.262984Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046644480, txId: 281474976710658, pathId: [OwnerId: 72057594046644480, LocalPathId: 1], version: 4 2025-06-25T14:58:17.263009Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 2 2025-06-25T14:58:17.263246Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 2 Version: 2 PathOwnerId: 72057594046644480, cookie: 281474976710658 2025-06-25T14:58:17.263297Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 2 Version: 2 PathOwnerId: 72057594046644480, cookie: 281474976710658 2025-06-25T14:58:17.263309Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046644480, txId: 281474976710658 2025-06-25T14:58:17.263320Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046644480, txId: 281474976710658, pathId: [OwnerId: 72057594046644480, LocalPathId: 2], version: 2 2025-06-25T14:58:17.263329Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 2 2025-06-25T14:58:17.263371Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 281474976710658, ready parts: 0/1, is published: true 2025-06-25T14:58:17.263490Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-25T14:58:17.263509Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 281474976710658, ready parts: 0/1, is published: true 2025-06-25T14:58:17.263549Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-25T14:58:17.264669Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 281474976710658:4294967295 from tablet: 72057594046644480 to tablet: 72057594046316545 cookie: 0:281474976710658 msg type: 269090816 2025-06-25T14:58:17.264751Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 281474976710658, partId: 4294967295, tablet: 72057594046316545 2025-06-25T14:58:17.265331Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046644480, cookie: 281474976710658 2025-06-25T14:58:17.265375Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046644480, cookie: 281474976710658 2025-06-25T14:58:17.268667Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 1750863497313, transactions count in step: 1, at schemeshard: 72057594046644480 2025-06-25T14:58:17.268813Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, messa ... OperationProgress} hope 1 -> done Change{36, redo 162b alter 0b annex 0, ~{ 42, 4 } -{ }, 0 gb} 2025-06-25T14:58:17.595844Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046644480:4:16} Tx{27, NKikimr::NSchemeShard::TSchemeShard::TTxOperationProgress} release 4194304b of static, Memory{0 dyn 0} 2025-06-25T14:58:17.596513Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 4 LocalPathId: 1 Version: 9 PathOwnerId: 72057594046644480, cookie: 281474976710661 2025-06-25T14:58:17.596602Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046644480:4:17} Tx{28, NKikimr::NSchemeShard::TSchemeShard::TTxAckPublishToSchemeBoard} queued, type NKikimr::NSchemeShard::TSchemeShard::TTxAckPublishToSchemeBoard 2025-06-25T14:58:17.596635Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046644480:4:17} Tx{28, NKikimr::NSchemeShard::TSchemeShard::TTxAckPublishToSchemeBoard} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-25T14:58:17.596713Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 4 LocalPathId: 1 Version: 9 PathOwnerId: 72057594046644480, cookie: 281474976710661 2025-06-25T14:58:17.596726Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046644480, txId: 281474976710661 2025-06-25T14:58:17.596740Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046644480, txId: 281474976710661, pathId: [OwnerId: 72057594046644480, LocalPathId: 1], version: 9 2025-06-25T14:58:17.596763Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 4 2025-06-25T14:58:17.596864Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046644480:4:17} Tx{28, NKikimr::NSchemeShard::TSchemeShard::TTxAckPublishToSchemeBoard} hope 1 -> done Change{37, redo 166b alter 0b annex 0, ~{ 48, 59 } -{ }, 0 gb} 2025-06-25T14:58:17.596895Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046644480:4:17} Tx{28, NKikimr::NSchemeShard::TSchemeShard::TTxAckPublishToSchemeBoard} release 4194304b of static, Memory{0 dyn 0} 2025-06-25T14:58:17.597034Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 4 LocalPathId: 4 Version: 3 PathOwnerId: 72057594046644480, cookie: 281474976710661 2025-06-25T14:58:17.597101Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046644480:4:18} Tx{29, NKikimr::NSchemeShard::TSchemeShard::TTxAckPublishToSchemeBoard} queued, type NKikimr::NSchemeShard::TSchemeShard::TTxAckPublishToSchemeBoard 2025-06-25T14:58:17.597117Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046644480:4:18} Tx{29, NKikimr::NSchemeShard::TSchemeShard::TTxAckPublishToSchemeBoard} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-25T14:58:17.597155Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 4 LocalPathId: 4 Version: 3 PathOwnerId: 72057594046644480, cookie: 281474976710661 2025-06-25T14:58:17.597162Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046644480, txId: 281474976710661 2025-06-25T14:58:17.597173Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046644480, txId: 281474976710661, pathId: [OwnerId: 72057594046644480, LocalPathId: 4], version: 3 2025-06-25T14:58:17.597191Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046644480, LocalPathId: 4] was 1 2025-06-25T14:58:17.597288Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046644480, txId: 281474976710661, subscribers: 1 2025-06-25T14:58:17.597308Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:212: TTxAckPublishToSchemeBoard Notify send TEvNotifyTxCompletionResult, at schemeshard: 72057594046644480, to actorId: [1:7519901463407068538:2263] 2025-06-25T14:58:17.597360Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046644480:4:18} Tx{29, NKikimr::NSchemeShard::TSchemeShard::TTxAckPublishToSchemeBoard} hope 1 -> done Change{38, redo 166b alter 0b annex 0, ~{ 48, 59 } -{ }, 0 gb} 2025-06-25T14:58:17.597397Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046644480:4:18} Tx{29, NKikimr::NSchemeShard::TSchemeShard::TTxAckPublishToSchemeBoard} release 4194304b of static, Memory{0 dyn 0} 2025-06-25T14:58:17.598453Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046644480:4:17:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:58:17.598477Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046644480:4:17:1:24576:118:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:58:17.598490Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046316545:2:15:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:58:17.598512Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046644480:4:16:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:58:17.598520Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046316545:2:15:1:24576:109:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:58:17.598535Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046644480:4:16:1:24576:121:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:58:17.598571Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046644480:4:18:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:58:17.598590Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046644480:4:18:1:24576:131:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:58:17.598620Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046644480:4:19} commited cookie 1 for step 16 2025-06-25T14:58:17.598637Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046316545:2:16} commited cookie 1 for step 15 2025-06-25T14:58:17.598703Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046644480:4:19} commited cookie 1 for step 17 2025-06-25T14:58:17.598720Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046644480, cookie: 281474976710661 2025-06-25T14:58:17.598742Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046644480:4:19} commited cookie 1 for step 18 2025-06-25T14:58:17.598749Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046644480, cookie: 281474976710661 2025-06-25T14:58:17.598867Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:397: TClient[72057594046644480] received poison pill [1:7519901463407068539:2263] 2025-06-25T14:58:17.598896Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:505: TClient[72057594046644480] notify reset [1:7519901463407068539:2263] 2025-06-25T14:58:17.598930Z node 1 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:182: [72057594046644480] Got PeerClosed from# [1:7519901463407068539:2263] 2025-06-25T14:58:19.660501Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519901470791103554:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:19.660562Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/002080/r3tmp/tmpnx6Ir0/pdisk_1.dat 2025-06-25T14:58:19.782088Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:19.783833Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519901470791103535:2080] 1750863499659860 != 1750863499659863 2025-06-25T14:58:19.825619Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:19.825692Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:19.828413Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:12591 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. waiting... 2025-06-25T14:58:19.973571Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:19.993235Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:20.743059Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:58:23.301271Z node 2 :TX_PROXY ERROR: datareq.cpp:2829: Actor# [2:7519901487970973813:2602] txid# 281474976715700 FailProposedRequest: Transaction incoming read set size 1000084 for tablet 72075186224037889 exceeded limit 1000 Status# ExecError 2025-06-25T14:58:23.301350Z node 2 :TX_PROXY ERROR: datareq.cpp:883: Actor# [2:7519901487970973813:2602] txid# 281474976715700 RESPONSE Status# ExecError marker# P13c >> TLocksTest::Range_BrokenLock0 [GOOD] >> TLocksTest::Range_BrokenLock1 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TFlatTest::ShardFreezeUnfreezeRejectScheme [GOOD] Test command err: 2025-06-25T14:58:17.453779Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901461293647319:2225];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:17.456901Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00207a/r3tmp/tmpDYeO99/pdisk_1.dat 2025-06-25T14:58:17.844467Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519901461293647123:2080] 1750863497435672 != 1750863497435675 2025-06-25T14:58:17.856896Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:17.886538Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:17.886651Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:17.888954Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:22858 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:18.154252Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:18.172842Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:58:18.176899Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:18.337457Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519901465588615088:2364] txid# 281474976715659, issues: { message: "Requested freeze state already set" severity: 1 } Error 1: Requested freeze state already set 2025-06-25T14:58:20.594280Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519901474443900219:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:20.602461Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00207a/r3tmp/tmpjlnqyJ/pdisk_1.dat 2025-06-25T14:58:20.764553Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519901474443900182:2080] 1750863500575495 != 1750863500575498 2025-06-25T14:58:20.766206Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:20.784725Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:20.784788Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:20.786642Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:14805 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:21.010194Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:21.017522Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:58:21.021097Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:21.109039Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:171) waiting... 2025-06-25T14:58:21.125267Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519901478738868182:2394] txid# 281474976715660, issues: { message: "Table is frozen. Only unfreeze alter is allowed" severity: 1 } Error 128: Table is frozen. Only unfreeze alter is allowed 2025-06-25T14:58:21.127025Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:171) waiting... 2025-06-25T14:58:21.143678Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:171) waiting... ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TLocksFatTest::ShardLocks [GOOD] Test command err: 2025-06-25T14:58:07.809804Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901419978028475:2234];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:07.809896Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/002091/r3tmp/tmpIlznws/pdisk_1.dat 2025-06-25T14:58:08.234971Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:08.235077Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:08.241112Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:58:08.265312Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TClient is connected to server localhost:29616 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:08.695381Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:08.712402Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:58:08.728773Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-25T14:58:08.735117Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:08.806250Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:58:08.888972Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:08.972987Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:12.855779Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519901440093345834:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:12.855838Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/002091/r3tmp/tmp8fCQJN/pdisk_1.dat 2025-06-25T14:58:12.952983Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:12.953335Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519901440093345816:2080] 1750863492855384 != 1750863492855387 2025-06-25T14:58:12.965616Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:12.965696Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:12.967946Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:27507 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. waiting... 2025-06-25T14:58:13.158324Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:58:13.162924Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:58:13.173177Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:13.223311Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:13.263407Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:13.867734Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:58:16.786437Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519901459164095412:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:16.786494Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/002091/r3tmp/tmpHHSxyO/pdisk_1.dat 2025-06-25T14:58:16.875446Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:16.880375Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519901459164095393:2080] 1750863496786081 != 1750863496786084 2025-06-25T14:58:16.919567Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:16.919649Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:16.923062Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:10231 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:17.078399Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:17.088700Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:58:17.096739Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-25T14:58:17.100274Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:17.171381Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:17.218551Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:20.442139Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519901472680406148:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:20.442209Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/002091/r3tmp/tmpBzg7i8/pdisk_1.dat 2025-06-25T14:58:20.576090Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:20.579631Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7519901472680406127:2080] 1750863500441677 != 1750863500441680 2025-06-25T14:58:20.591907Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:20.592004Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:20.593394Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:20140 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:20.836413Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:20.841665Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:58:20.854659Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:20.903260Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:58:20.949915Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) >> KqpIndexes::UpdateOnReadColumns [GOOD] >> TLocksTest::Range_Pinhole [GOOD] >> TLocksTest::SetBreakSetEraseBreak >> TLocksTest::BrokenLockErase ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TFlatTest::SplitBoundaryRead [GOOD] Test command err: 2025-06-25T14:58:18.281107Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901466228890318:2227];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:18.281357Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/002077/r3tmp/tmpD5Ndnc/pdisk_1.dat 2025-06-25T14:58:18.748296Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:18.748415Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:18.758380Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519901466228890119:2080] 1750863498224132 != 1750863498224135 2025-06-25T14:58:18.763876Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:18.769740Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:62586 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:19.073885Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:19.088644Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:58:19.108748Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:19.228801Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:58:19.321428Z node 1 :OPS_COMPACT INFO: Compact{72075186224037888.1.11, eph 1} end=Done, 4 blobs 3r (max 3), put Spent{time=0.002s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 2 +0, (1265 647 2154)b }, ecr=1.000 2025-06-25T14:58:19.332842Z node 1 :OPS_COMPACT INFO: Compact{72075186224037889.1.11, eph 1} end=Done, 4 blobs 3r (max 3), put Spent{time=0.003s,wait=0.001s,interrupts=1} Part{ 2 pk, lobs 2 +0, (1139 521 2626)b }, ecr=1.000 2025-06-25T14:58:19.384671Z node 1 :OPS_COMPACT INFO: Compact{72075186224037888.1.16, eph 2} end=Done, 4 blobs 6r (max 6), put Spent{time=0.013s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 5 +0, (1573 647 6413)b }, ecr=1.000 2025-06-25T14:58:19.391746Z node 1 :OPS_COMPACT INFO: Compact{72075186224037889.1.16, eph 2} end=Done, 4 blobs 6r (max 6), put Spent{time=0.004s,wait=0.001s,interrupts=1} Part{ 2 pk, lobs 4 +0, (2326 1432 5183)b }, ecr=1.000 TClient::Ls request: /dc-1/Dir/TableOld TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "TableOld" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1750863499231 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "TableOld" Columns { Name: "Key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "... (TRUNCATED) waiting... 2025-06-25T14:58:19.513176Z node 1 :OPS_COMPACT INFO: Compact{72075186224037888.1.23, eph 1} end=Done, 0 blobs 0r (max 1), put Spent{time=0.000s,wait=0.000s,interrupts=0} 2025-06-25T14:58:19.513566Z node 1 :OPS_COMPACT INFO: Compact{72075186224037888.1.22, eph 1} end=Done, 0 blobs 0r (max 1), put Spent{time=0.000s,wait=0.000s,interrupts=0} 2025-06-25T14:58:19.513822Z node 1 :OPS_COMPACT INFO: Compact{72075186224037888.1.25, eph 1} end=Done, 0 blobs 0r (max 1), put Spent{time=0.000s,wait=0.000s,interrupts=0} 2025-06-25T14:58:19.514734Z node 1 :OPS_COMPACT INFO: Compact{72075186224037888.1.24, eph 1} end=Done, 0 blobs 0r (max 1), put Spent{time=0.002s,wait=0.000s,interrupts=0} 2025-06-25T14:58:19.515012Z node 1 :OPS_COMPACT INFO: Compact{72075186224037888.1.26, eph 3} end=Done, 4 blobs 2r (max 2), put Spent{time=0.001s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 0 +0, (1907 1533 0)b }, ecr=1.000 2025-06-25T14:58:19.523646Z node 1 :OPS_COMPACT INFO: Compact{72075186224037888.1.32, eph 3} end=Done, 4 blobs 8r (max 8), put Spent{time=0.005s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 5 +0, (3250 2180 6413)b }, ecr=1.000 TClient::Ls request: /dc-1/Dir/TableOld TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "TableOld" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1750863499231 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 2 } ChildrenExist: false } Table { Name: "TableOld" Columns { Name: "Key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "... (TRUNCATED) waiting... TClient::Ls request: /dc-1/Dir/TableOld 2025-06-25T14:58:19.672878Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037888 not found 2025-06-25T14:58:19.672909Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037889 not found 2025-06-25T14:58:19.672920Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037891 not found 2025-06-25T14:58:19.672930Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037890 not found 2025-06-25T14:58:19.672940Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037892 not found TClient::Ls response: Status: 128 StatusCode: PATH_NOT_EXIST Issues { message: "Path not exist" issue_code: 200200 severity: 1 } SchemeStatus: 2 ErrorReason: "Path not found" 2025-06-25T14:58:21.589721Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519901477318955401:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:21.589828Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/002077/r3tmp/tmp7vPLaZ/pdisk_1.dat 2025-06-25T14:58:21.726455Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:21.727844Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519901477318955383:2080] 1750863501589286 != 1750863501589289 2025-06-25T14:58:21.764558Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:21.764638Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:21.767049Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:29872 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:21.957947Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:21.962382Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompleti ... kie: 281474976715678 TabletId: 72075186224037891 2025-06-25T14:58:22.400201Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_split_merge.cpp:38: TSplitMerge TConfigureDestination operationId# 281474976715678:0 HandleReply TEvInitSplitMergeDestinationAck, operationId: 281474976715678:0, at schemeshard: 72057594046644480 message# OperationCookie: 281474976715678 TabletId: 72075186224037891 2025-06-25T14:58:22.400570Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 281474976715678:0, at schemeshard: 72057594046644480 2025-06-25T14:58:22.403445Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:632: TTxOperationReply execute, operationId: 281474976715678:0, at schemeshard: 72057594046644480, message: OperationCookie: 281474976715678 TabletId: 72075186224037890 2025-06-25T14:58:22.403482Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_split_merge.cpp:38: TSplitMerge TConfigureDestination operationId# 281474976715678:0 HandleReply TEvInitSplitMergeDestinationAck, operationId: 281474976715678:0, at schemeshard: 72057594046644480 message# OperationCookie: 281474976715678 TabletId: 72075186224037890 2025-06-25T14:58:22.403507Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 281474976715678:0 3 -> 131 2025-06-25T14:58:22.403803Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 281474976715678:0, at schemeshard: 72057594046644480 2025-06-25T14:58:22.403870Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 281474976715678:0, at schemeshard: 72057594046644480 2025-06-25T14:58:22.403896Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_split_merge.cpp:334: TSplitMerge TTransferData operationId# 281474976715678:0 ProgressState, at schemeshard: 72057594046644480 2025-06-25T14:58:22.403924Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_split_merge.cpp:353: TSplitMerge TTransferData operationId# 281474976715678:0 Starting split on src datashard 72075186224037888 splitOpId# 281474976715678:0 at tablet 72057594046644480 2025-06-25T14:58:22.404137Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 281474976715678:0 from tablet: 72057594046644480 to tablet: 72075186224037888 cookie: 72057594046644480:1 msg type: 269553154 2025-06-25T14:58:22.404199Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 281474976715678, partId: 0, tablet: 72075186224037888 2025-06-25T14:58:22.407334Z node 2 :OPS_COMPACT INFO: Compact{72075186224037888.1.27, eph 1} end=Done, 0 blobs 0r (max 1), put Spent{time=0.000s,wait=0.000s,interrupts=0} 2025-06-25T14:58:22.407619Z node 2 :OPS_COMPACT INFO: Compact{72075186224037888.1.28, eph 1} end=Done, 0 blobs 0r (max 1), put Spent{time=0.000s,wait=0.000s,interrupts=0} 2025-06-25T14:58:22.407820Z node 2 :OPS_COMPACT INFO: Compact{72075186224037888.1.29, eph -9223372036854775808} end=Done, 0 blobs 0r (max 0), put Spent{time=0.000s,wait=0.000s,interrupts=0} 2025-06-25T14:58:22.408067Z node 2 :OPS_COMPACT INFO: Compact{72075186224037888.1.25, eph 1} end=Done, 0 blobs 0r (max 1), put Spent{time=0.000s,wait=0.000s,interrupts=0} 2025-06-25T14:58:22.408238Z node 2 :OPS_COMPACT INFO: Compact{72075186224037888.1.26, eph 1} end=Done, 0 blobs 0r (max 1), put Spent{time=0.000s,wait=0.000s,interrupts=0} 2025-06-25T14:58:22.419867Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:632: TTxOperationReply execute, operationId: 281474976715678:0, at schemeshard: 72057594046644480, message: OperationCookie: 281474976715678 TabletId: 72075186224037888 2025-06-25T14:58:22.419927Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_split_merge.cpp:207: TSplitMerge TTransferData operationId# 281474976715678:0 HandleReply TEvSplitAck, at schemeshard: 72057594046644480, message: OperationCookie: 281474976715678 TabletId: 72075186224037888 2025-06-25T14:58:22.420176Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 281474976715678:0 131 -> 132 2025-06-25T14:58:22.420254Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 6 2025-06-25T14:58:22.420582Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 281474976715678:0, at schemeshard: 72057594046644480 2025-06-25T14:58:22.420677Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046644480 2025-06-25T14:58:22.420703Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046644480, txId: 281474976715678, path id: [OwnerId: 72057594046644480, LocalPathId: 3] 2025-06-25T14:58:22.420898Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046644480 2025-06-25T14:58:22.420913Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [2:7519901477318955863:2237], at schemeshard: 72057594046644480, txId: 281474976715678, path id: 3 2025-06-25T14:58:22.420942Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 281474976715678:0, at schemeshard: 72057594046644480 2025-06-25T14:58:22.420966Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_split_merge.cpp:437: TSplitMerge TNotifySrc, operationId: 281474976715678:0 ProgressState, at schemeshard: 72057594046644480 2025-06-25T14:58:22.420986Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_split_merge.cpp:468: Notify src datashard 72075186224037888 on partitioning changed splitOp# 281474976715678 at tablet 72057594046644480 2025-06-25T14:58:22.423909Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 3 Version: 4 PathOwnerId: 72057594046644480, cookie: 281474976715678 2025-06-25T14:58:22.423986Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 3 Version: 4 PathOwnerId: 72057594046644480, cookie: 281474976715678 2025-06-25T14:58:22.423995Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046644480, txId: 281474976715678 2025-06-25T14:58:22.424012Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046644480, txId: 281474976715678, pathId: [OwnerId: 72057594046644480, LocalPathId: 3], version: 4 2025-06-25T14:58:22.424027Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 7 2025-06-25T14:58:22.424076Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 281474976715678, ready parts: 0/1, is published: true 2025-06-25T14:58:22.424201Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 281474976715678:0 from tablet: 72057594046644480 to tablet: 72075186224037888 cookie: 72057594046644480:1 msg type: 269553158 2025-06-25T14:58:22.426249Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:632: TTxOperationReply execute, operationId: 281474976715678:0, at schemeshard: 72057594046644480, message: OperationCookie: 281474976715678 TabletId: 72075186224037888 2025-06-25T14:58:22.426280Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_split_merge.cpp:392: TSplitMerge TNotifySrc, operationId: 281474976715678:0 HandleReply TEvSplitPartitioningChangedAck, from datashard: 72075186224037888, at schemeshard: 72057594046644480 2025-06-25T14:58:22.426328Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976715678:0 progress is 1/1 2025-06-25T14:58:22.426343Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976715678 ready parts: 1/1 2025-06-25T14:58:22.426366Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976715678:0 progress is 1/1 2025-06-25T14:58:22.426375Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976715678 ready parts: 1/1 2025-06-25T14:58:22.426392Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 281474976715678, ready parts: 1/1, is published: true 2025-06-25T14:58:22.426428Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1656: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [2:7519901481613923599:2320] message: TxId: 281474976715678 2025-06-25T14:58:22.426455Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976715678 ready parts: 1/1 2025-06-25T14:58:22.426471Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976715678:0 2025-06-25T14:58:22.426482Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 281474976715678:0 2025-06-25T14:58:22.426633Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 6 2025-06-25T14:58:22.427331Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046644480, cookie: 281474976715678 2025-06-25T14:58:22.427395Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 281474976715678:0, at schemeshard: 72057594046644480 2025-06-25T14:58:22.427409Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:261: Unable to activate 281474976715678:0 TClient::Ls request: /dc-1/Dir/TableOld TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "TableOld" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1750863502094 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 2 } ChildrenExist: false } Table { Name: "TableOld" Columns { Name: "Key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "... (TRUNCATED) >> TObjectStorageListingTest::MaxKeysAndSharding ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TFlatTest::MergeEmptyAndWrite [GOOD] Test command err: 2025-06-25T14:58:18.556641Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901464779683573:2139];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:18.556816Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/002073/r3tmp/tmpfya5kf/pdisk_1.dat 2025-06-25T14:58:18.988083Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:19.007536Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:19.007622Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:19.017442Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:17013 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:19.324788Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:19.349074Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:58:19.357567Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-25T14:58:19.361989Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:19.562621Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:58:21.735859Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519901479655176910:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:21.763378Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/002073/r3tmp/tmpzTi8l1/pdisk_1.dat 2025-06-25T14:58:22.034839Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:22.034925Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:22.037390Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:22.038206Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:5617 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:22.236886Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-25T14:58:22.262297Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:22.363174Z node 2 :OPS_COMPACT INFO: Compact{72075186224037888.1.11, eph 1} end=Done, 4 blobs 3r (max 3), put Spent{time=0.002s,wait=0.001s,interrupts=1} Part{ 2 pk, lobs 2 +0, (1265 647 2154)b }, ecr=1.000 2025-06-25T14:58:22.371613Z node 2 :OPS_COMPACT INFO: Compact{72075186224037889.1.11, eph 1} end=Done, 4 blobs 3r (max 3), put Spent{time=0.001s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 2 +0, (1139 521 2626)b }, ecr=1.000 2025-06-25T14:58:22.416129Z node 2 :OPS_COMPACT INFO: Compact{72075186224037888.1.16, eph 2} end=Done, 4 blobs 6r (max 6), put Spent{time=0.003s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 5 +0, (1573 647 6413)b }, ecr=1.000 2025-06-25T14:58:22.425733Z node 2 :OPS_COMPACT INFO: Compact{72075186224037889.1.16, eph 2} end=Done, 4 blobs 6r (max 6), put Spent{time=0.003s,wait=0.001s,interrupts=1} Part{ 2 pk, lobs 4 +0, (2326 1432 5183)b }, ecr=1.000 TClient::Ls request: /dc-1/Dir/TableOld TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "TableOld" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1750863502367 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "TableOld" Columns { Name: "Key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "... (TRUNCATED) 2025-06-25T14:58:22.477714Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T14:58:22.479522Z node 2 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:561: tx 281474976715676 released its data 2025-06-25T14:58:22.479692Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037889 2025-06-25T14:58:22.480860Z node 2 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:561: tx 281474976715676 released its data 2025-06-25T14:58:22.483488Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T14:58:22.484026Z node 2 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:661: tx 281474976715676 at 72075186224037888 restored its data 2025-06-25T14:58:22.484819Z node 2 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:561: tx 281474976715676 released its data 2025-06-25T14:58:22.484940Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037889 2025-06-25T14:58:22.485322Z node 2 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:661: tx 281474976715676 at 72075186224037889 restored its data 2025-06-25T14:58:22.485921Z node 2 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:561: tx 281474976715676 released its data 2025-06-25T14:58:22.486525Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T14:58:22.486896Z node 2 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:661: tx 281474976715676 at 72075186224037888 restored its data 2025-06-25T14:58:22.487517Z node 2 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:561: tx 281474976715676 released its data 2025-06-25T14:58:22.487606Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037889 2025-06-25T14:58:22.487995Z node 2 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:661: tx 281474976715676 at 72075186224037889 restored its data 2025-06-25T14:58:22.488618Z node 2 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:561: tx 281474976715676 released its data 2025-06-25T14:58:22.489057Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T14:58:22.489398Z node 2 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:661: tx 281474976715676 at 72075186224037888 restored its data 2025-06-25T14:58:22.490013Z node 2 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:561: tx 281474976715676 released its data 2025-06-25T14:58:22.490099Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037889 2025-06-25T14:58:22.490425Z node 2 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:661: tx 281474976715676 at 72075186224037889 restored its data 2025-06-25T14:58:22.491000Z node 2 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:561: tx 281474976715676 released its data 2025-06-25T14:58:22.491309Z node 2 :TX_DATASHARD DEBUG: datashard__ ... e: 281474976715687 2025-06-25T14:58:22.870754Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037890 2025-06-25T14:58:22.870800Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1750863502913 : 281474976715687] from 72075186224037890 at tablet 72075186224037890 send result to client [2:7519901479655177186:2140], exec latency: 0 ms, propose latency: 2 ms 2025-06-25T14:58:22.870829Z node 2 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037890 Sending notify to schemeshard 72057594046644480 txId 281474976715687 state PreOffline TxInFly 0 2025-06-25T14:58:22.870867Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037890 2025-06-25T14:58:22.870935Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6369: Handle TEvProposeTransactionResult, at schemeshard: 72057594046644480, message: TxKind: TX_KIND_SCHEME Origin: 72075186224037890 Status: COMPLETE TxId: 281474976715687 Step: 1750863502913 OrderId: 281474976715687 ExecLatency: 0 ProposeLatency: 2 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186224037890 CpuTimeUsec: 723 } } CommitVersion { Step: 1750863502913 TxId: 281474976715687 } 2025-06-25T14:58:22.870949Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1791: TOperation FindRelatedPartByTabletId, TxId: 281474976715687, tablet: 72075186224037890, partId: 0 2025-06-25T14:58:22.871058Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:632: TTxOperationReply execute, operationId: 281474976715687:0, at schemeshard: 72057594046644480, message: TxKind: TX_KIND_SCHEME Origin: 72075186224037890 Status: COMPLETE TxId: 281474976715687 Step: 1750863502913 OrderId: 281474976715687 ExecLatency: 0 ProposeLatency: 2 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186224037890 CpuTimeUsec: 723 } } CommitVersion { Step: 1750863502913 TxId: 281474976715687 } 2025-06-25T14:58:22.871151Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_part.cpp:109: HandleReply TEvDataShard::TEvProposeTransactionResult Ignore message: tablet# 72057594046644480, ev# TxKind: TX_KIND_SCHEME Origin: 72075186224037890 Status: COMPLETE TxId: 281474976715687 Step: 1750863502913 OrderId: 281474976715687 ExecLatency: 0 ProposeLatency: 2 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186224037890 CpuTimeUsec: 723 } } CommitVersion { Step: 1750863502913 TxId: 281474976715687 } 2025-06-25T14:58:22.871390Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5596: Handle TEvSchemaChanged, tabletId: 72057594046644480, at schemeshard: 72057594046644480, message: Source { RawX1: 7519901483950145076 RawX2: 4503608217307405 } Origin: 72075186224037890 State: 5 TxId: 281474976715687 Step: 0 Generation: 1 2025-06-25T14:58:22.871418Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1791: TOperation FindRelatedPartByTabletId, TxId: 281474976715687, tablet: 72075186224037890, partId: 0 2025-06-25T14:58:22.871512Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:632: TTxOperationReply execute, operationId: 281474976715687:0, at schemeshard: 72057594046644480, message: Source { RawX1: 7519901483950145076 RawX2: 4503608217307405 } Origin: 72075186224037890 State: 5 TxId: 281474976715687 Step: 0 Generation: 1 2025-06-25T14:58:22.871542Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1047: NTableState::TProposedWaitParts operationId# 281474976715687:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046644480 2025-06-25T14:58:22.871608Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1051: NTableState::TProposedWaitParts operationId# 281474976715687:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046644480 message: Source { RawX1: 7519901483950145076 RawX2: 4503608217307405 } Origin: 72075186224037890 State: 5 TxId: 281474976715687 Step: 0 Generation: 1 2025-06-25T14:58:22.871648Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:670: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 281474976715687:0, shardIdx: 72057594046644480:3, shard: 72075186224037890, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046644480 TClient::Ls request: /dc-1/Dir/TableOld 2025-06-25T14:58:22.871667Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:707: all shard schema changes has been received, operationId: 281474976715687:0, at schemeshard: 72057594046644480 2025-06-25T14:58:22.871683Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:719: send schema changes ack message, operation: 281474976715687:0, datashard: 72075186224037890, at schemeshard: 72057594046644480 2025-06-25T14:58:22.871698Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 281474976715687:0 129 -> 240 2025-06-25T14:58:22.871893Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 281474976715687:0, at schemeshard: 72057594046644480 2025-06-25T14:58:22.871957Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 281474976715687:0, at schemeshard: 72057594046644480 2025-06-25T14:58:22.872003Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715687 datashard 72075186224037890 state PreOffline 2025-06-25T14:58:22.872015Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 281474976715687:0, at schemeshard: 72057594046644480 2025-06-25T14:58:22.872041Z node 2 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037890 Got TEvSchemaChangedResult from SS at 72075186224037890 2025-06-25T14:58:22.872058Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_table.cpp:414: TDropTable TProposedDeletePart operationId: 281474976715687:0 ProgressState, at schemeshard: 72057594046644480 2025-06-25T14:58:22.872394Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove table for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 3 2025-06-25T14:58:22.872524Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976715687:0 progress is 1/1 2025-06-25T14:58:22.872537Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976715687 ready parts: 1/1 2025-06-25T14:58:22.872554Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976715687:0 progress is 1/1 2025-06-25T14:58:22.872567Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976715687 ready parts: 1/1 2025-06-25T14:58:22.872581Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 281474976715687, ready parts: 1/1, is published: true 2025-06-25T14:58:22.872622Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1656: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [2:7519901483950145302:2354] message: TxId: 281474976715687 2025-06-25T14:58:22.872638Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976715687 ready parts: 1/1 2025-06-25T14:58:22.872653Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976715687:0 2025-06-25T14:58:22.872661Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 281474976715687:0 2025-06-25T14:58:22.872726Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 2 2025-06-25T14:58:22.873393Z node 2 :TX_DATASHARD DEBUG: datashard_loans.cpp:220: 72075186224037890 in PreOffline state HasSharedBobs: 0 SchemaOperations: [ ] OutReadSets count: 0 ChangesQueue size: 0 ChangeExchangeSplit: 1 siblings to be activated: wait to activation from: 2025-06-25T14:58:22.873451Z node 2 :TX_DATASHARD INFO: datashard_loans.cpp:177: 72075186224037890 Initiating switch from PreOffline to Offline state 2025-06-25T14:58:22.874396Z node 2 :TX_DATASHARD INFO: datashard_impl.h:3310: 72075186224037890 Reporting state Offline to schemeshard 72057594046644480 2025-06-25T14:58:22.876551Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5633: Handle TEvStateChanged, at schemeshard: 72057594046644480, message: Source { RawX1: 7519901483950145076 RawX2: 4503608217307405 } TabletId: 72075186224037890 State: 4 2025-06-25T14:58:22.876614Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186224037890, state: Offline, at schemeshard: 72057594046644480 2025-06-25T14:58:22.877054Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2962: Handle TEvStateChangedResult datashard 72075186224037890 state Offline 2025-06-25T14:58:22.877174Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:3 hive 72057594037968897 at ss 72057594046644480 2025-06-25T14:58:22.878920Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 3 ShardOwnerId: 72057594046644480 ShardLocalIdx: 3, at schemeshard: 72057594046644480 2025-06-25T14:58:22.879118Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 1 2025-06-25T14:58:22.879262Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046644480 2025-06-25T14:58:22.879278Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046644480, LocalPathId: 3], at schemeshard: 72057594046644480 2025-06-25T14:58:22.879327Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 1 2025-06-25T14:58:22.879659Z node 2 :TX_DATASHARD INFO: datashard.cpp:197: OnTabletStop: 72075186224037890 reason = ReasonStop 2025-06-25T14:58:22.880136Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037890 not found 2025-06-25T14:58:22.880331Z node 2 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186224037890 2025-06-25T14:58:22.880400Z node 2 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186224037890 2025-06-25T14:58:22.880734Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:3 2025-06-25T14:58:22.880756Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:3 tabletId 72075186224037890 2025-06-25T14:58:22.880794Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046644480 TClient::Ls response: Status: 128 StatusCode: PATH_NOT_EXIST Issues { message: "Path not exist" issue_code: 200200 severity: 1 } SchemeStatus: 2 ErrorReason: "Path not found" ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TFlatTest::SelectRangeReverseExcludeKeys [GOOD] Test command err: 2025-06-25T14:58:18.478458Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901466906102699:2143];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:18.486457Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/002074/r3tmp/tmpCzGUvO/pdisk_1.dat 2025-06-25T14:58:18.899342Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:18.960073Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:18.960204Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:18.961396Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:14035 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:19.231492Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:19.252628Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:58:19.280612Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-25T14:58:19.297221Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:19.486955Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:58:21.810701Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519901480011710721:2057];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:21.810773Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/002074/r3tmp/tmpiGLv6L/pdisk_1.dat 2025-06-25T14:58:22.060486Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519901480011710705:2080] 1750863501810242 != 1750863501810245 2025-06-25T14:58:22.079649Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:22.086073Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:22.086159Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:22.089075Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:23574 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:22.351539Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:22.358269Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:58:22.371458Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:22.793041Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; >> TFlatTest::RejectByPerShardReadSize >> TFlatTest::LargeProxyReply >> TCancelTx::CrossShardReadOnlyWithReadSets [GOOD] >> TCancelTx::ImmediateReadOnly >> TFlatTest::SelectRangeBytesLimit ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpIndexes::UpdateOnReadColumns [GOOD] Test command err: Trying to start YDB, gRPC: 16474, MsgBus: 8109 2025-06-25T14:57:16.941636Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901199596926416:2144];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:57:16.952965Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001ada/r3tmp/tmpoXsOqE/pdisk_1.dat 2025-06-25T14:57:17.430785Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:57:17.437830Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:57:17.437907Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:57:17.440477Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 16474, node 1 2025-06-25T14:57:17.579171Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:57:17.579195Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:57:17.579201Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:57:17.579308Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8109 2025-06-25T14:57:17.936961Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:8109 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:57:18.170945Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:57:18.213515Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:57:18.337697Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:18.474853Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:18.554540Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:20.266414Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901216776797120:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:20.266517Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:20.544802Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:20.582478Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:20.616912Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:20.646734Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:20.672567Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:20.714683Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:20.760967Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:20.851133Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901216776797775:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:20.851197Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:20.851558Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901216776797780:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:20.855605Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:57:20.870659Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519901216776797782:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:57:20.954278Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519901216776797833:3422] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:57:21.930327Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519901199596926416:2144];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:57:21.930398Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:57:22.024350Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877761, Sender [1:7519901225366732700:3597], Recipient [1:7519901203891893911:2140]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:57:22.024389Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5052: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T14:57:22.024403Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5837: Pipe server connected, at tablet: 72057594046644480 2025-06-25T14:57:22.024440Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271122432, Sender [1:7519901225366732696:3594], Recipient [1:751990120 ... ubscription [7:7519901454946412155:2080] 1750863495114797 != 1750863495114800 2025-06-25T14:58:15.288421Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:15.296340Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:15.296452Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:15.298166Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 6512, node 7 2025-06-25T14:58:15.349406Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:58:15.349436Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:58:15.349447Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:58:15.349602Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28029 TClient is connected to server localhost:28029 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-25T14:58:16.124447Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:58:16.136169Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:16.163229Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:16.237933Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:16.453334Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:16.541299Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:19.651278Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519901472126282980:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:58:19.651389Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:58:19.742345Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:58:19.795509Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:58:19.838553Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:58:19.878142Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:58:19.927058Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:58:19.975969Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:58:20.022077Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:58:20.116163Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[7:7519901454946412177:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:20.116272Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:58:20.130196Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519901476421250935:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:58:20.130312Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:58:20.130751Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519901476421250940:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:58:20.135900Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:58:20.151687Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7519901476421250942:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:58:20.231805Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7519901476421250996:3426] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:58:21.869593Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:58:22.062339Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:58:22.120495Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) >> TFlatTest::SelectRangeSkipNullKeys [GOOD] >> TFlatTest::CopyTableAndCompareColumnsSchema >> TFlatTest::SelectRangeNullArgs3 >> TFlatTest::ShardFreezeUnfreezeAlreadySet >> TObjectStorageListingTest::Listing [GOOD] >> TObjectStorageListingTest::ManyDeletes >> TObjectStorageListingTest::CornerCases >> TFlatTest::LargeDatashardReplyDistributed >> TLocksTest::CK_Range_BrokenLock ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TFlatTest::SelectRangeSkipNullKeys [GOOD] Test command err: 2025-06-25T14:58:21.438377Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901480583971172:2198];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:21.438669Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/002070/r3tmp/tmpxNqU9r/pdisk_1.dat 2025-06-25T14:58:21.892196Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:21.892289Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:21.895477Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:58:21.927140Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:21.929145Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519901480583971012:2080] 1750863501401274 != 1750863501401277 TClient is connected to server localhost:16286 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:22.207572Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:22.226858Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:58:22.230387Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:22.360731Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519901484878938968:2362] txid# 281474976710659, issues: { message: "Mix freeze cmd with other options is forbidden" severity: 1 } Error 128: Mix freeze cmd with other options is forbidden 2025-06-25T14:58:22.362696Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519901484878938983:2370] txid# 281474976710660, issues: { message: "Unexpected freeze state" severity: 1 } Error 128: Unexpected freeze state 2025-06-25T14:58:22.364987Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519901484878938989:2375] txid# 281474976710661, issues: { message: "Mix freeze cmd with other options is forbidden" severity: 1 } Error 128: Mix freeze cmd with other options is forbidden 2025-06-25T14:58:22.369699Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519901484878938995:2380] txid# 281474976710662, issues: { message: "Mix freeze cmd with other options is forbidden" severity: 1 } Error 128: Mix freeze cmd with other options is forbidden 2025-06-25T14:58:22.453354Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:58:24.470662Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519901490422767217:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:24.470716Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/002070/r3tmp/tmpn25CDR/pdisk_1.dat 2025-06-25T14:58:24.651235Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:24.651305Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:24.652285Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:24.653682Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519901490422767190:2080] 1750863504469625 != 1750863504469628 2025-06-25T14:58:24.665045Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:19519 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:24.902237Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-25T14:58:24.921341Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-25T14:58:24.924626Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... >> TFlatTest::WriteSplitKillRead >> TFlatTest::SelectRangeBytesLimit [GOOD] >> TFlatTest::SelectRangeForbidNullArgs1 >> KqpVectorIndexes::SimpleVectorIndexOrderByCosineDistanceWithCover+Nullable [GOOD] >> TCancelTx::ImmediateReadOnly [GOOD] >> TFlatTest::SelectRangeNullArgs3 [GOOD] >> TFlatTest::SelectRangeNullArgs4 >> TFlatTest::ShardFreezeUnfreezeAlreadySet [GOOD] >> TFlatTest::ShardFreezeUnfreeze >> KqpKv::ReadRows_ExternalBlobs+UseExtBlobsPrecharge [GOOD] >> KqpKv::ReadRows_ExternalBlobs-UseExtBlobsPrecharge >> TLocksFatTest::RangeSetBreak >> TLocksFatTest::PointSetRemove [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpVectorIndexes::SimpleVectorIndexOrderByCosineDistanceWithCover+Nullable [GOOD] Test command err: Trying to start YDB, gRPC: 30182, MsgBus: 31646 2025-06-25T14:57:11.676182Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901177628585407:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:57:11.676225Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001bc4/r3tmp/tmp3W704i/pdisk_1.dat 2025-06-25T14:57:12.204697Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:57:12.204786Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:57:12.213670Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:57:12.257475Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:57:12.260612Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519901177628585386:2080] 1750863431672365 != 1750863431672368 TServer::EnableGrpc on GrpcPort 30182, node 1 2025-06-25T14:57:12.404374Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:57:12.404394Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:57:12.404400Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:57:12.404502Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:31646 2025-06-25T14:57:12.713636Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:31646 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:57:13.149349Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:57:13.165727Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:57:13.175172Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:13.281696Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:13.410435Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:13.491173Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:15.078217Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901194808456202:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:15.078340Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:15.382346Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:15.416956Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:15.451587Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:15.485485Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:15.527624Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:15.608327Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:15.640381Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:15.707426Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901194808456861:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:15.707511Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:15.707739Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901194808456866:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:15.711134Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:57:15.725927Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519901194808456868:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:57:15.824115Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519901194808456921:3420] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:57:16.680419Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519901177628585407:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:57:16.680481Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:57:16.817271Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877761, Sender [1:7519901199103424493:3597], Recipient [1:7519901181923552985:2136]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:57:16.817319Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:50 ... AT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037896 maps to shardIdx: 72057594046644480:9 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 3], pathId map=EightShard, is column=0, is olap=0, RowCount 3, DataSize 800 2025-06-25T14:58:29.453616Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037896, followerId 0 2025-06-25T14:58:29.453642Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:9 with partCount# 0, rowCount# 3, searchHeight# 1, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-25T14:58:29.453654Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037896 2025-06-25T14:58:29.453681Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 3 shard idx 72057594046644480:8 data size 800 row count 3 2025-06-25T14:58:29.453706Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037895 maps to shardIdx: 72057594046644480:8 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 3], pathId map=EightShard, is column=0, is olap=0, RowCount 3, DataSize 800 2025-06-25T14:58:29.453713Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037895, followerId 0 2025-06-25T14:58:29.453735Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:8 with partCount# 0, rowCount# 3, searchHeight# 1, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-25T14:58:29.453745Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037895 2025-06-25T14:58:29.453803Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:58:29.456630Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [2:7519901338645086948:2143]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-06-25T14:58:29.456668Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5131: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-06-25T14:58:29.456681Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046644480, queue size# 0 2025-06-25T14:58:29.576547Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [2:7519901338645086948:2143]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:58:29.576589Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:58:29.576649Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [2:7519901338645086948:2143], Recipient [2:7519901338645086948:2143]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:58:29.576671Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:58:29.714456Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269553162, Sender [2:7519901342940055163:2319], Recipient [2:7519901338645086948:2143]: NKikimrTxDataShard.TEvPeriodicTableStats DatashardId: 72075186224037903 TableLocalId: 5 Generation: 1 Round: 3 TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 InMemSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 0 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 104 Memory: 119352 } ShardState: 2 UserTablePartOwners: 72075186224037903 NodeId: 2 StartTime: 1750863469565 TableOwnerId: 72057594046644480 FollowerId: 0 2025-06-25T14:58:29.714499Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4992: StateWork, processing event TEvDataShard::TEvPeriodicTableStats 2025-06-25T14:58:29.714533Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037903 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 5] state 'Ready' dataSize 0 rowCount 0 cpuUsage 0.0104 2025-06-25T14:58:29.714627Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:570: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037903 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 5] raw table stats: DataSize: 0 RowCount: 0 IndexSize: 0 InMemSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 0 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 2025-06-25T14:58:29.714651Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:610: Will delay TTxStoreTableStats on# 0.099995s, queue# 1 2025-06-25T14:58:29.714817Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269553162, Sender [2:7519901342940055166:2322], Recipient [2:7519901338645086948:2143]: NKikimrTxDataShard.TEvPeriodicTableStats DatashardId: 72075186224037904 TableLocalId: 5 Generation: 1 Round: 3 TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 InMemSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 0 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 111 Memory: 119352 } ShardState: 2 UserTablePartOwners: 72075186224037904 NodeId: 2 StartTime: 1750863469565 TableOwnerId: 72057594046644480 FollowerId: 0 2025-06-25T14:58:29.714834Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4992: StateWork, processing event TEvDataShard::TEvPeriodicTableStats 2025-06-25T14:58:29.714853Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037904 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 5] state 'Ready' dataSize 0 rowCount 0 cpuUsage 0.0111 2025-06-25T14:58:29.714928Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:570: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037904 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 5] raw table stats: DataSize: 0 RowCount: 0 IndexSize: 0 InMemSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 0 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 2025-06-25T14:58:29.814368Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [2:7519901338645086948:2143]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-06-25T14:58:29.814417Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5131: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-06-25T14:58:29.814429Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046644480, queue size# 2 2025-06-25T14:58:29.814488Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:601: Will execute TTxStoreStats, queue# 2 2025-06-25T14:58:29.814504Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:610: Will delay TTxStoreTableStats on# 0.000142s, queue# 2 2025-06-25T14:58:29.814560Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 5 shard idx 72057594046644480:16 data size 0 row count 0 2025-06-25T14:58:29.814615Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037903 maps to shardIdx: 72057594046644480:16 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 5], pathId map=BatchUpload, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-25T14:58:29.814627Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037903, followerId 0 2025-06-25T14:58:29.814684Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:16 with partCount# 0, rowCount# 0, searchHeight# 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-25T14:58:29.814729Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037903 2025-06-25T14:58:29.814754Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 5 shard idx 72057594046644480:17 data size 0 row count 0 2025-06-25T14:58:29.814785Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037904 maps to shardIdx: 72057594046644480:17 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 5], pathId map=BatchUpload, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-25T14:58:29.814796Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037904, followerId 0 2025-06-25T14:58:29.814822Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:17 with partCount# 0, rowCount# 0, searchHeight# 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-25T14:58:29.814832Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037904 2025-06-25T14:58:29.814881Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:58:29.815221Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [2:7519901338645086948:2143]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-06-25T14:58:29.815238Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5131: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-06-25T14:58:29.815248Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046644480, queue size# 0 >> TObjectStorageListingTest::CornerCases [GOOD] >> TObjectStorageListingTest::Decimal ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TCancelTx::ImmediateReadOnly [GOOD] Test command err: 2025-06-25T14:58:17.325055Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901462191241473:2221];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:17.325274Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00207b/r3tmp/tmp58WBvE/pdisk_1.dat 2025-06-25T14:58:17.610883Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:17.611003Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:17.617550Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:17.618818Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:58:17.619603Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519901462191241289:2080] 1750863497290036 != 1750863497290039 TClient is connected to server localhost:27836 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:17.937843Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:17.954117Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:58:17.957915Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... TClient is connected to server localhost:27836 2025-06-25T14:58:18.317800Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:58:18.329160Z node 1 :TX_PROXY ERROR: datareq.cpp:2286: Actor# [1:7519901466486209288:2380] txid# 281474976710660 HANDLE Plan TEvProposeTransactionResult TDataReq GetStatus# CANCELLED shard id 72075186224037888 marker# P12 2025-06-25T14:58:18.329240Z node 1 :TX_PROXY ERROR: datareq.cpp:883: Actor# [1:7519901466486209288:2380] txid# 281474976710660 RESPONSE Status# ExecCancelled marker# P13c 2025-06-25T14:58:18.339270Z node 1 :TX_PROXY ERROR: datareq.cpp:2286: Actor# [1:7519901466486209310:2392] txid# 281474976710661 HANDLE Plan TEvProposeTransactionResult TDataReq GetStatus# CANCELLED shard id 72075186224037888 marker# P12 2025-06-25T14:58:18.339361Z node 1 :TX_PROXY ERROR: datareq.cpp:883: Actor# [1:7519901466486209310:2392] txid# 281474976710661 RESPONSE Status# ExecCancelled marker# P13c 2025-06-25T14:58:18.354444Z node 1 :TX_PROXY ERROR: datareq.cpp:2286: Actor# [1:7519901466486209323:2402] txid# 281474976710662 HANDLE Plan TEvProposeTransactionResult TDataReq GetStatus# CANCELLED shard id 72075186224037888 marker# P12 2025-06-25T14:58:18.354523Z node 1 :TX_PROXY ERROR: datareq.cpp:883: Actor# [1:7519901466486209323:2402] txid# 281474976710662 RESPONSE Status# ExecCancelled marker# P13c 2025-06-25T14:58:18.384198Z node 1 :TX_PROXY ERROR: datareq.cpp:2286: Actor# [1:7519901466486209349:2422] txid# 281474976710664 HANDLE Plan TEvProposeTransactionResult TDataReq GetStatus# CANCELLED shard id 72075186224037889 marker# P12 2025-06-25T14:58:18.384278Z node 1 :TX_PROXY ERROR: datareq.cpp:883: Actor# [1:7519901466486209349:2422] txid# 281474976710664 RESPONSE Status# ExecCancelled marker# P13c 2025-06-25T14:58:18.396357Z node 1 :TX_PROXY ERROR: datareq.cpp:2286: Actor# [1:7519901466486209362:2432] txid# 281474976710665 HANDLE Plan TEvProposeTransactionResult TDataReq GetStatus# CANCELLED shard id 72075186224037889 marker# P12 2025-06-25T14:58:18.396420Z node 1 :TX_PROXY ERROR: datareq.cpp:883: Actor# [1:7519901466486209362:2432] txid# 281474976710665 RESPONSE Status# ExecCancelled marker# P13c 2025-06-25T14:58:18.412115Z node 1 :TX_PROXY ERROR: datareq.cpp:2286: Actor# [1:7519901466486209375:2442] txid# 281474976710666 HANDLE Plan TEvProposeTransactionResult TDataReq GetStatus# CANCELLED shard id 72075186224037889 marker# P12 2025-06-25T14:58:18.412178Z node 1 :TX_PROXY ERROR: datareq.cpp:883: Actor# [1:7519901466486209375:2442] txid# 281474976710666 RESPONSE Status# ExecCancelled marker# P13c 2025-06-25T14:58:20.516752Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519901474580053236:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:20.516800Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00207b/r3tmp/tmpyYyl0V/pdisk_1.dat 2025-06-25T14:58:20.755499Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:20.755568Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:20.759189Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:20.760082Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:58:20.760632Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519901474580053210:2080] 1750863500509108 != 1750863500509111 TClient is connected to server localhost:14525 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:20.990216Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:20.997310Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:58:21.001221Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... TClient is connected to server localhost:14525 2025-06-25T14:58:23.995488Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519901488811746198:2150];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00207b/r3tmp/tmpmKPXg1/pdisk_1.dat 2025-06-25T14:58:24.063966Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:58:24.135654Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:24.137116Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519901488811746083:2080] 1750863503968727 != 1750863503968730 2025-06-25T14:58:24.157036Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:24.157124Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:24.158496Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:14697 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:24.350733Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:24.360735Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:58:24.365743Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... TClient is connected to server localhost:14697 2025-06-25T14:58:24.673469Z node 3 :TX_PROXY ERROR: datareq.cpp:2286: Actor# [3:7519901493106714085:2381] txid# 281474976715660 HANDLE Plan TEvProposeTransactionResult TDataReq GetStatus# CANCELLED shard id 72075186224037888 marker# P12 2025-06-25T14:58:24.673557Z node 3 :TX_PROXY ERROR: datareq.cpp:883: Actor# [3:7519901493106714085:2381] txid# 281474976715660 RESPONSE Status# ExecCancelled marker# P13c 2025-06-25T14:58:24.690303Z node 3 :TX_PROXY ERROR: datareq.cpp:2286: Actor# [3:7519901493106714101:2394] txid# 281474976715661 HANDLE Plan TEvProposeTransactionResult TDataReq GetStatus# CANCELLED shard id 72075186224037888 marker# P12 2025-06-25T14:58:24.690367Z node 3 :TX_PROXY ERROR: datareq.cpp:883: Actor# [3:7519901493106714101:2394] txid# 281474976715661 RESPONSE Status# ExecCancelled marker# P13c 2025-06-25T14:58:24.701474Z node 3 :TX_PROXY ERROR: datareq.cpp:2286: Actor# [3:7519901493106714115:2405] txid# 281474976715662 HANDLE Plan TEvProposeTransactionResult TDataReq GetStatus# CANCELLED shard id 72075186224037888 marker# P12 2025-06-25T14:58:24.701534Z node 3 :TX_PROXY ERROR: datareq.cpp:883: Actor# [3:7519901493106714115:2405] txid# 281474976715662 RESPONSE Status# ExecCancelled marker# P13c 2025-06-25T14:58:24.730263Z node 3 :TX_PROXY ERROR: datareq.cpp:2286: Actor# [3:7519901493106714143:2427] txid# 281474976715664 HANDLE Plan TEvProposeTransactionResult TDataReq GetStatus# CANCELLED shard id 72075186224037889 marker# P12 2025-06-25T14:58:24.730323Z node 3 :TX_PROXY ERROR: datareq.cpp:883: Actor# [3:7519901493106714143:2427] txid# 281474976715664 RESPONSE Status# ExecCancelled marker# P13c 2025-06-25T14:58:24.756897Z node 3 :TX_PROXY ERROR: datareq.cpp:2286: Actor# [3:7519901493106714157:2438] txid# 281474976715665 HANDLE Plan TEvProposeTransactionResult TDataReq GetStatus# CANCELLED shard id 72075186224037889 marker# P12 2025-06-25T14:58:24.756958Z node 3 :TX_PROXY ERROR: datareq.cpp:883: Actor# [3:7519901493106714157:2438] txid# 281474976715665 RESPONSE Status# ExecCancelled marker# P13c 2025-06-25T14:58:24.782954Z node 3 :TX_PROXY ERROR: datareq.cpp:2286: Actor# [3:7519901493106714171:2449] txid# 281474976715666 HANDLE Plan TEvProposeTransactionResult TDataReq GetStatus# CANCELLED shard id 72075186224037889 marker# P12 2025-06-25T14:58:24.783024Z node 3 :TX_PROXY ERROR: datareq.cpp:883: Actor# [3:7519901493106714171:2449] txid# 281474976715666 RESPONSE Status# ExecCancelled marker# P13c 2025-06-25T14:58:27.457778Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519901504894595940:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:27.457869Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00207b/r3tmp/tmpceDV31/pdisk_1.dat 2025-06-25T14:58:27.635338Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:27.637460Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7519901504894595911:2080] 1750863507456837 != 1750863507456840 2025-06-25T14:58:27.658963Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:27.659043Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:27.659836Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:11039 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:27.967632Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:27.975439Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... TClient is connected to server localhost:11039 2025-06-25T14:58:28.319479Z node 4 :TX_DATASHARD ERROR: finish_propose_unit.cpp:174: Errors while proposing transaction txid 281474976715660 at tablet 72075186224037888 status: CANCELLED errors: EXECUTION_CANCELLED (Tx was cancelled) | 2025-06-25T14:58:28.321086Z node 4 :TX_PROXY ERROR: datareq.cpp:883: Actor# [4:7519901509189563911:2383] txid# 281474976715660 RESPONSE Status# ExecCancelled marker# P13c 2025-06-25T14:58:28.333871Z node 4 :TX_DATASHARD ERROR: finish_propose_unit.cpp:174: Errors while proposing transaction txid 281474976715662 at tablet 72075186224037889 status: CANCELLED errors: EXECUTION_CANCELLED (Tx was cancelled) | 2025-06-25T14:58:28.334397Z node 4 :TX_PROXY ERROR: datareq.cpp:883: Actor# [4:7519901509189563925:2391] txid# 281474976715662 RESPONSE Status# ExecCancelled marker# P13c >> TFlatTest::WriteSplitKillRead [GOOD] >> TFlatTest::WriteSplitWriteSplit >> TLocksTest::SetLockNothing [GOOD] >> TFlatTest::LargeProxyReply [GOOD] >> TFlatTest::LargeProxyReplyRW >> TLocksTest::Range_IncorrectDot1 [GOOD] >> TLocksTest::Range_IncorrectDot2 >> TFlatTest::SelectRangeForbidNullArgs1 [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TLocksFatTest::PointSetRemove [GOOD] Test command err: 2025-06-25T14:58:15.827229Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901452211596273:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:15.827339Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/002081/r3tmp/tmpKWMvtK/pdisk_1.dat 2025-06-25T14:58:16.164047Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:16.235613Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:16.235716Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:16.237439Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:64917 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:16.390639Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-25T14:58:16.422674Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:16.525150Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:16.569412Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:16.841617Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:58:20.828455Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519901452211596273:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:20.828626Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:58:23.495544Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519901487682883747:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:23.495588Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/002081/r3tmp/tmp0N8Onk/pdisk_1.dat 2025-06-25T14:58:23.627987Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:23.646197Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:23.646280Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:23.648297Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:22546 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:23.834834Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:23.848738Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:58:23.855814Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-25T14:58:23.860345Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:23.935266Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:24.005885Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:24.515113Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:58:27.942825Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519901506456365429:2234];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/002081/r3tmp/tmp1WW8WI/pdisk_1.dat 2025-06-25T14:58:27.965994Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:58:28.043911Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:28.068565Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:28.068646Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:28.069994Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:17658 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. waiting... 2025-06-25T14:58:28.260969Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:58:28.265717Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... waiting... 2025-06-25T14:58:28.279990Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:58:28.330467Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:28.378462Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:28.924586Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; >> TLocksTest::SetBreakSetEraseBreak [GOOD] >> TLocksTest::GoodDupLock [GOOD] >> TLocksTest::CK_Range_GoodLock >> TFlatTest::SelectRangeNullArgs4 [GOOD] >> TFlatTest::ShardFreezeUnfreeze [GOOD] >> TObjectStorageListingTest::Split >> TLocksTest::GoodSameKeyLock [GOOD] >> TLocksTest::GoodSameShardLock ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TLocksTest::SetLockNothing [GOOD] Test command err: 2025-06-25T14:58:07.812786Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901418474034457:2140];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:07.844603Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00208d/r3tmp/tmpPkvNJZ/pdisk_1.dat 2025-06-25T14:58:08.351695Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:08.356453Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519901418474034345:2080] 1750863487770381 != 1750863487770384 2025-06-25T14:58:08.371210Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:08.371332Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:08.373282Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:4437 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:08.658075Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:08.671931Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:58:08.688760Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:08.834755Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:08.840650Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:58:08.894783Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:11.063662Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519901435452485130:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:11.063717Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00208d/r3tmp/tmpDkg9vl/pdisk_1.dat 2025-06-25T14:58:11.265822Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:11.265894Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:11.268083Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:58:11.271102Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:11.276247Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519901435452485103:2080] 1750863491063218 != 1750863491063221 TClient is connected to server localhost:18470 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. waiting... 2025-06-25T14:58:11.553734Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-25T14:58:11.583517Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-25T14:58:11.587436Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:58:11.703415Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:11.784674Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:14.583993Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519901448700524973:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:14.584067Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00208d/r3tmp/tmpNhGxg4/pdisk_1.dat 2025-06-25T14:58:14.697597Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519901448700524955:2080] 1750863494583344 != 1750863494583347 2025-06-25T14:58:14.705142Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:14.738828Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:14.738901Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:14.740017Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:4980 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:14.889423Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-25T14:58:14.910463Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is und ... 4037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:21.837960Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:21.843907Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:8286 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:22.115770Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:22.132548Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:58:22.152629Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-25T14:58:22.156549Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:22.231102Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:22.316810Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:25.312495Z node 6 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[6:7519901495754249991:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:25.324930Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00208d/r3tmp/tmpLGYdoO/pdisk_1.dat 2025-06-25T14:58:25.511786Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:25.511881Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:25.513270Z node 6 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:25.514695Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:20990 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:25.773861Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-25T14:58:25.794998Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:58:25.871633Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:58:25.951287Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:29.349983Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519901513418453383:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:29.350046Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00208d/r3tmp/tmpMGj3V1/pdisk_1.dat 2025-06-25T14:58:29.534692Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:29.661898Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:29.662012Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:29.669485Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:2001 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:29.846494Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:29.854900Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:58:29.865348Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-25T14:58:29.872610Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:29.964616Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:30.043216Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... >> TLocksTest::BrokenLockUpdate [GOOD] >> TLocksTest::BrokenNullLock >> TObjectStorageListingTest::TestFilter >> TLocksTest::BrokenSameKeyLock [GOOD] >> TLocksTest::BrokenSameShardLock ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TFlatTest::SelectRangeForbidNullArgs1 [GOOD] Test command err: 2025-06-25T14:58:27.653186Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901506469029179:2194];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:27.656651Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/002068/r3tmp/tmp3BBOwg/pdisk_1.dat 2025-06-25T14:58:28.107367Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519901506469029023:2080] 1750863507620544 != 1750863507620547 2025-06-25T14:58:28.119101Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:28.119191Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:28.121013Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:58:28.132597Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TClient is connected to server localhost:4017 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:28.526405Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:28.544782Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:58:28.560974Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:58:28.578318Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:58:28.671660Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/002068/r3tmp/tmptdHyrp/pdisk_1.dat 2025-06-25T14:58:31.260469Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:58:31.293752Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:31.300269Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:31.300436Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:31.302262Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:14285 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:31.494768Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:31.500777Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... waiting... 2025-06-25T14:58:31.525428Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) >> TLocksTest::Range_CorrectNullDot [GOOD] >> TLocksTest::Range_EmptyKey >> TObjectStorageListingTest::Decimal [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TLocksTest::SetBreakSetEraseBreak [GOOD] Test command err: 2025-06-25T14:58:08.845639Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901422276350125:2138];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:08.845680Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/002083/r3tmp/tmpIs0BvM/pdisk_1.dat 2025-06-25T14:58:09.329773Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:09.396851Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:09.396978Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:09.399212Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:14113 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:09.724133Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:09.734006Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:58:09.743828Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:09.865333Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:09.890291Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:58:09.919545Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:12.150950Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519901438723070628:2057];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:12.150986Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/002083/r3tmp/tmp7Kl6ea/pdisk_1.dat 2025-06-25T14:58:12.297187Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519901438723070612:2080] 1750863492139321 != 1750863492139324 2025-06-25T14:58:12.307284Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:12.327086Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:12.327162Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:12.328804Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:25489 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:12.489496Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-25T14:58:12.512129Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:12.582354Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:58:12.622989Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:58:15.361080Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519901454997191294:2245];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:15.361327Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/002083/r3tmp/tmpLOP4Ni/pdisk_1.dat 2025-06-25T14:58:15.475679Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:15.491725Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:15.491802Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:15.494227Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:22842 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. waiting... 2025-06-25T14:58:15.666237Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:58:15.675707Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:58:15.695158Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:15.804185Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, b ... hardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. waiting... 2025-06-25T14:58:22.846473Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:58:22.856799Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:58:22.867624Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-25T14:58:22.872032Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:22.951656Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:23.004022Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:26.213884Z node 6 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[6:7519901498662537918:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:26.213989Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/002083/r3tmp/tmpKWf3OQ/pdisk_1.dat 2025-06-25T14:58:26.367600Z node 6 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:26.368915Z node 6 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [6:7519901498662537899:2080] 1750863506210179 != 1750863506210182 2025-06-25T14:58:26.387936Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:26.388035Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:26.394075Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:23829 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:26.622336Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:26.629376Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:58:26.645707Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:26.705544Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:26.760993Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:30.351847Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519901517694949320:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:30.351959Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/002083/r3tmp/tmpHH2zFv/pdisk_1.dat 2025-06-25T14:58:30.570935Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:30.574841Z node 7 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [7:7519901517694949297:2080] 1750863510350893 != 1750863510350896 2025-06-25T14:58:30.589419Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:30.589514Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:30.591512Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:22145 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:30.854645Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:30.862552Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:58:30.877589Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:58:30.882784Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:58:30.978157Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:58:31.056839Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TFlatTest::SelectRangeNullArgs4 [GOOD] Test command err: 2025-06-25T14:58:28.307982Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901507738555260:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:28.308067Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/002065/r3tmp/tmpoCYDiK/pdisk_1.dat 2025-06-25T14:58:28.793833Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:28.793931Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:28.801205Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:58:28.801430Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TClient is connected to server localhost:9657 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:29.080921Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:29.099883Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:58:29.123708Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:29.316488Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:58:31.679704Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519901521765867207:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:31.687002Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/002065/r3tmp/tmpochRyX/pdisk_1.dat 2025-06-25T14:58:31.871070Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:31.971708Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:31.971794Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:31.973480Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:19335 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:32.118405Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:32.126049Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:58:32.134224Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-25T14:58:32.138337Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... >> TFlatTest::WriteSplitWriteSplit [GOOD] >> TLocksFatTest::PointSetBreak ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TFlatTest::ShardFreezeUnfreeze [GOOD] Test command err: 2025-06-25T14:58:28.304443Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901509000904216:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:28.305099Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/002066/r3tmp/tmpm2SfhS/pdisk_1.dat 2025-06-25T14:58:28.747888Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:28.753184Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519901509000904183:2080] 1750863508291935 != 1750863508291938 2025-06-25T14:58:28.846241Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:28.846356Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:28.849951Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:17928 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:29.157127Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:29.186368Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:58:29.193194Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:29.328585Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:58:29.370135Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:171) waiting... 2025-06-25T14:58:29.405660Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519901513295872195:2395] txid# 281474976710660, issues: { message: "Requested freeze state already set" severity: 1 } Error 1: Requested freeze state already set 2025-06-25T14:58:29.413493Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:171) waiting... 2025-06-25T14:58:29.434132Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519901513295872235:2429] txid# 281474976710662, issues: { message: "Requested freeze state already set" severity: 1 } Error 1: Requested freeze state already set 2025-06-25T14:58:31.778553Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519901520853189838:2149];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/002066/r3tmp/tmpPAk50Y/pdisk_1.dat 2025-06-25T14:58:31.860708Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:58:31.937846Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:31.940406Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519901520853189723:2080] 1750863511715058 != 1750863511715061 2025-06-25T14:58:31.963669Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:31.963742Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:31.965500Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:28225 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:32.171426Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:32.176911Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:58:32.179391Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:32.251980Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:171) waiting... 2025-06-25T14:58:32.298778Z node 2 :TX_DATASHARD ERROR: datashard_pipeline.cpp:1570: Shard 72075186224037888 cannot parse tx 281474976710660: 2025-06-25T14:58:32.299160Z node 2 :TX_PROXY ERROR: datareq.cpp:1873: Actor# [2:7519901525148157732:2394] txid# 281474976710660 HANDLE Prepare TEvProposeTransactionResult TDataReq TabletStatus# StatusWait GetStatus# ERROR shard id 72075186224037888 read size 0 out readset size 0 marker# P6 2025-06-25T14:58:32.299227Z node 2 :TX_PROXY ERROR: datareq.cpp:2071: Actor# [2:7519901525148157732:2394] txid# 281474976710660 HANDLE PrepareErrors TEvProposeTransactionResult TDataReq TabletStatus# StatusWait shard id 72075186224037888 2025-06-25T14:58:32.299248Z node 2 :TX_PROXY ERROR: datareq.cpp:1274: Actor# [2:7519901525148157732:2394] txid# 281474976710660 invalidateDistCache: 0 DIE TDataReq MarkShardError TabletsLeft# 1 2025-06-25T14:58:32.302141Z node 2 :TX_DATASHARD ERROR: datashard_pipeline.cpp:1570: Shard 72075186224037888 cannot parse tx 281474976710661: 2025-06-25T14:58:32.302371Z node 2 :TX_PROXY ERROR: datareq.cpp:1873: Actor# [2:7519901525148157740:2399] txid# 281474976710661 HANDLE Prepare TEvProposeTransactionResult TDataReq TabletStatus# StatusWait GetStatus# ERROR shard id 72075186224037888 read size 0 out readset size 0 marker# P6 2025-06-25T14:58:32.302423Z node 2 :TX_PROXY ERROR: datareq.cpp:2071: Actor# [2:7519901525148157740:2399] txid# 281474976710661 HANDLE PrepareErrors TEvProposeTransactionResult TDataReq TabletStatus# StatusWait shard id 72075186224037888 2025-06-25T14:58:32.302435Z node 2 :TX_PROXY ERROR: datareq.cpp:1274: Actor# [2:7519901525148157740:2399] txid# 281474976710661 invalidateDistCache: 0 DIE TDataReq MarkShardError TabletsLeft# 1 2025-06-25T14:58:32.309382Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:171) waiting... >> TLocksTest::Range_BrokenLock2 [GOOD] >> TLocksTest::Range_BrokenLock3 >> KqpPrefixedVectorIndexes::CosineDistanceWithPkPrefix+Nullable+Covered [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TObjectStorageListingTest::Decimal [GOOD] Test command err: test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/002064/r3tmp/tmpe5QddL/pdisk_1.dat 2025-06-25T14:58:29.508353Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:58:29.551408Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:29.551510Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:29.565019Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519901509114142159:2080] 1750863508969915 != 1750863508969918 2025-06-25T14:58:29.575749Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:29.580602Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 30174, node 1 2025-06-25T14:58:29.733044Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:58:29.733087Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:58:29.733100Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:58:29.733227Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9904 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-25T14:58:30.070190Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:30.239177Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:30.261959Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:58:30.281049Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-25T14:58:30.307868Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:32.895352Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519901524483200464:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:32.912154Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/002064/r3tmp/tmppg2e8s/pdisk_1.dat 2025-06-25T14:58:33.064377Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:33.066779Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519901524483200437:2080] 1750863512888813 != 1750863512888816 TServer::EnableGrpc on GrpcPort 11988, node 2 2025-06-25T14:58:33.110369Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:33.110447Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:33.112400Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:58:33.194188Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:58:33.194210Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:58:33.194217Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:58:33.194327Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24654 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. waiting... 2025-06-25T14:58:33.432956Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:58:33.437769Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:58:33.463659Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... >> TLocksTest::Range_BrokenLockMax >> TLocksTest::Range_IncorrectNullDot1 [GOOD] >> TLocksTest::Range_IncorrectNullDot2 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TFlatTest::WriteSplitWriteSplit [GOOD] Test command err: 2025-06-25T14:58:30.148721Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901519101641661:2210];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:30.148778Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/002061/r3tmp/tmphUweQv/pdisk_1.dat 2025-06-25T14:58:30.568228Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519901519101641473:2080] 1750863510055439 != 1750863510055442 2025-06-25T14:58:30.578857Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:30.580071Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:30.580159Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:30.591003Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:15684 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:30.832417Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:30.844758Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:58:30.864735Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:31.038607Z node 1 :OPS_COMPACT INFO: Compact{72075186224037888.1.11, eph 1} end=Done, 4 blobs 3r (max 3), put Spent{time=0.002s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 2 +0, (1265 647 2154)b }, ecr=1.000 2025-06-25T14:58:31.073139Z node 1 :OPS_COMPACT INFO: Compact{72075186224037889.1.11, eph 1} end=Done, 4 blobs 3r (max 3), put Spent{time=0.012s,wait=0.005s,interrupts=1} Part{ 2 pk, lobs 2 +0, (1139 521 2626)b }, ecr=1.000 2025-06-25T14:58:31.133741Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:58:31.137155Z node 1 :OPS_COMPACT INFO: Compact{72075186224037888.1.16, eph 2} end=Done, 4 blobs 6r (max 6), put Spent{time=0.002s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 5 +0, (1573 647 6413)b }, ecr=1.000 2025-06-25T14:58:31.146013Z node 1 :OPS_COMPACT INFO: Compact{72075186224037889.1.16, eph 2} end=Done, 4 blobs 6r (max 6), put Spent{time=0.002s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 4 +0, (2326 1432 5183)b }, ecr=1.000 2025-06-25T14:58:31.202910Z node 1 :OPS_COMPACT INFO: Compact{72075186224037888.1.21, eph 3} end=Done, 4 blobs 8r (max 9), put Spent{time=0.014s,wait=0.001s,interrupts=1} Part{ 2 pk, lobs 5 +0, (3250 2180 6413)b }, ecr=1.000 TClient::Ls request: /dc-1/Dir/TableOld TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "TableOld" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710659 CreateStep: 1750863510977 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "TableOld" Columns { Name: "Key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "... (TRUNCATED) 2025-06-25T14:58:31.332150Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { OperationType: ESchemeOpSplitMergeTablePartitions SplitMergeTablePartitions { TablePath: "/dc-1/Dir/TableOld" SourceTabletId: 72075186224037888 SplitBoundary { KeyPrefix { Tuple { Optional { Uint32: 100 } } } } SplitBoundary { KeyPrefix { Tuple { Optional { Uint32: 200 } } } } } } TxId: 281474976710680 TabletId: 72057594046644480 PeerName: "" , at schemeshard: 72057594046644480 2025-06-25T14:58:31.332462Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_split_merge.cpp:804: TSplitMerge Propose, tableStr: /dc-1/Dir/TableOld, tableId: , opId: 281474976710680:0, at schemeshard: 72057594046644480, request: TablePath: "/dc-1/Dir/TableOld" SourceTabletId: 72075186224037888 SplitBoundary { KeyPrefix { Tuple { Optional { Uint32: 100 } } } } SplitBoundary { KeyPrefix { Tuple { Optional { Uint32: 200 } } } } waiting... 2025-06-25T14:58:31.332711Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason new shard created for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 3 2025-06-25T14:58:31.332765Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason new shard created for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 4 2025-06-25T14:58:31.332779Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason new shard created for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 5 2025-06-25T14:58:31.333004Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 6 2025-06-25T14:58:31.333030Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 281474976710680:0 type: TxSplitTablePartition target path: [OwnerId: 72057594046644480, LocalPathId: 3] source path: 2025-06-25T14:58:31.333262Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_split_merge.cpp:1083: TSplitMerge Propose accepted, tableStr: /dc-1/Dir/TableOld, tableId: , opId: 281474976710680:0, at schemeshard: 72057594046644480, op: SourceRanges { KeyRangeBegin: "\001\000\000\000\000\200" KeyRangeEnd: "\001\000\004\000\000\000\377\377\377\177" TabletID: 72075186224037888 ShardIdx: 1 } DestinationRanges { KeyRangeBegin: "\001\000\000\000\000\200" KeyRangeEnd: "\001\000\004\000\000\000d\000\000\000" ShardIdx: 3 } DestinationRanges { KeyRangeBegin: "\001\000\004\000\000\000d\000\000\000" KeyRangeEnd: "\001\000\004\000\000\000\310\000\000\000" ShardIdx: 4 } DestinationRanges { KeyRangeBegin: "\001\000\004\000\000\000\310\000\000\000" KeyRangeEnd: "\001\000\004\000\000\000\377\377\377\177" ShardIdx: 5 }, request: TablePath: "/dc-1/Dir/TableOld" SourceTabletId: 72075186224037888 SplitBoundary { KeyPrefix { Tuple { Optional { Uint32: 100 } } } } SplitBoundary { KeyPrefix { Tuple { Optional { Uint32: 200 } } } } 2025-06-25T14:58:31.333296Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 281474976710680:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-25T14:58:31.334154Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 281474976710680, response: Status: StatusAccepted TxId: 281474976710680 SchemeshardId: 72057594046644480, at schemeshard: 72057594046644480 2025-06-25T14:58:31.334263Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976710680, subject: , status: StatusAccepted, operation: ALTER TABLE PARTITIONS, path: /dc-1/Dir/TableOld 2025-06-25T14:58:31.335549Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 281474976710680:0, at schemeshard: 72057594046644480 2025-06-25T14:58:31.335576Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 281474976710680:0 ProgressState, operation type: TxSplitTablePartition, at tablet# 72057594046644480 2025-06-25T14:58:31.335884Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:359: TCreateParts opId# 281474976710680:0 CreateRequest Event to Hive: 72057594037968897 msg: Owner: 72057594046644480 OwnerIdx: 3 TabletType: DataShard FollowerCount: 0 ObjectDomain { SchemeShard: 72057594046644480 PathId: 1 } ObjectId: 3 BindedChannels { StoragePoolName: "/dc-1:test" } BindedChannels { StoragePoolName: "/dc-1:test" } BindedChannels { StoragePoolName: "/dc-1:test" } AllowedDomains { SchemeShard: 72057594046644480 PathId: 1 } 2025-06-25T14:58:31.336017Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:359: TCreateParts opId# 281474976710680:0 CreateRequest Event to Hive: 72057594037968897 msg: Owner: 72057594046644480 OwnerIdx: 4 TabletType: DataShard FollowerCount: 0 ObjectDomain { SchemeShard: 72057594046644480 PathId: 1 } ObjectId: 3 BindedChannels { StoragePoolName: "/dc-1:test" } BindedChannels { StoragePoolName: "/dc-1:test" } BindedChannels { StoragePoolName: "/dc-1:test" } AllowedDomains { SchemeShard: 72057594046644480 PathId: 1 } 2025-06-25T14:58:31.336143Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:359: TCreateParts opId# 281474976710680:0 CreateRequest Event to Hive: 72057594037968897 msg: Owner: 72057594046644480 OwnerIdx: 5 TabletType: DataShard FollowerCount: 0 ObjectDomain { SchemeShard: 72057594046644480 PathId: 1 } ObjectId: 3 BindedChannels { StoragePoolName: "/dc-1:test" } BindedChannels { StoragePoolName: "/dc-1:test" } BindedChannels { StoragePoolName: "/dc-1:test" } AllowedDomains { SchemeShard: 72057594046644480 PathId: 1 } 2025-06-25T14:58:31.336336Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 281474976710680, at schemeshard: 72057594046644480 2025-06-25T14:58:31.336355Z node 1 :FLAT_TX_SCHEMESHA ... _changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186224037894, state: Offline, at schemeshard: 72057594046644480 2025-06-25T14:58:34.636617Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5633: Handle TEvStateChanged, at schemeshard: 72057594046644480, message: Source { RawX1: 7519901533265532043 RawX2: 4503608217307443 } TabletId: 72075186224037894 State: 4 2025-06-25T14:58:34.636632Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186224037894, state: Offline, at schemeshard: 72057594046644480 2025-06-25T14:58:34.637990Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:2 hive 72057594037968897 at ss 72057594046644480 2025-06-25T14:58:34.638205Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:3 hive 72057594037968897 at ss 72057594046644480 2025-06-25T14:58:34.638246Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:3 hive 72057594037968897 at ss 72057594046644480 2025-06-25T14:58:34.638273Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:7 hive 72057594037968897 at ss 72057594046644480 2025-06-25T14:58:34.638300Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:7 hive 72057594037968897 at ss 72057594046644480 2025-06-25T14:58:34.644991Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 5 ShardOwnerId: 72057594046644480 ShardLocalIdx: 5, at schemeshard: 72057594046644480 2025-06-25T14:58:34.645180Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 7 2025-06-25T14:58:34.645317Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 5 ShardOwnerId: 72057594046644480 ShardLocalIdx: 5, at schemeshard: 72057594046644480 2025-06-25T14:58:34.645401Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 6 ShardOwnerId: 72057594046644480 ShardLocalIdx: 6, at schemeshard: 72057594046644480 2025-06-25T14:58:34.645495Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 6 2025-06-25T14:58:34.645582Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046644480 ShardLocalIdx: 2, at schemeshard: 72057594046644480 2025-06-25T14:58:34.645671Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 5 2025-06-25T14:58:34.645734Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 3 ShardOwnerId: 72057594046644480 ShardLocalIdx: 3, at schemeshard: 72057594046644480 2025-06-25T14:58:34.645871Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 4 2025-06-25T14:58:34.645947Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 3 ShardOwnerId: 72057594046644480 ShardLocalIdx: 3, at schemeshard: 72057594046644480 2025-06-25T14:58:34.646047Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 7 ShardOwnerId: 72057594046644480 ShardLocalIdx: 7, at schemeshard: 72057594046644480 2025-06-25T14:58:34.646145Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 3 2025-06-25T14:58:34.646238Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 7 ShardOwnerId: 72057594046644480 ShardLocalIdx: 7, at schemeshard: 72057594046644480 TClient::Ls response: Status: 128 StatusCode: PATH_NOT_EXIST Issues { message: "Path not exist" issue_code: 200200 severity: 1 } SchemeStatus: 2 ErrorReason: "Path not found" 2025-06-25T14:58:34.647594Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:5 2025-06-25T14:58:34.647618Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:5 tabletId 72075186224037892 2025-06-25T14:58:34.647648Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:5 2025-06-25T14:58:34.648610Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:6 2025-06-25T14:58:34.648627Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:6 tabletId 72075186224037893 2025-06-25T14:58:34.648658Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:2 2025-06-25T14:58:34.648665Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:2 tabletId 72075186224037889 2025-06-25T14:58:34.648681Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:3 2025-06-25T14:58:34.648686Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:3 tabletId 72075186224037890 2025-06-25T14:58:34.648706Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:3 2025-06-25T14:58:34.648721Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:7 2025-06-25T14:58:34.648731Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:7 tabletId 72075186224037894 2025-06-25T14:58:34.648745Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:7 2025-06-25T14:58:34.648921Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037892 not found 2025-06-25T14:58:34.648936Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037889 not found 2025-06-25T14:58:34.648946Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037890 not found 2025-06-25T14:58:34.648955Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037894 not found 2025-06-25T14:58:34.648976Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037893 not found 2025-06-25T14:58:34.653287Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5633: Handle TEvStateChanged, at schemeshard: 72057594046644480, message: Source { RawX1: 7519901533265531502 RawX2: 4503608217307347 } TabletId: 72075186224037888 State: 4 2025-06-25T14:58:34.653337Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186224037888, state: Offline, at schemeshard: 72057594046644480 2025-06-25T14:58:34.653516Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5633: Handle TEvStateChanged, at schemeshard: 72057594046644480, message: Source { RawX1: 7519901533265531839 RawX2: 4503608217307413 } TabletId: 72075186224037891 State: 4 2025-06-25T14:58:34.653537Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186224037891, state: Offline, at schemeshard: 72057594046644480 2025-06-25T14:58:34.653924Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:1 hive 72057594037968897 at ss 72057594046644480 2025-06-25T14:58:34.653988Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:4 hive 72057594037968897 at ss 72057594046644480 2025-06-25T14:58:34.656165Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 1 ShardOwnerId: 72057594046644480 ShardLocalIdx: 1, at schemeshard: 72057594046644480 2025-06-25T14:58:34.656374Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 2 2025-06-25T14:58:34.656527Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 4 ShardOwnerId: 72057594046644480 ShardLocalIdx: 4, at schemeshard: 72057594046644480 2025-06-25T14:58:34.656631Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 1 2025-06-25T14:58:34.656731Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046644480 2025-06-25T14:58:34.656748Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046644480, LocalPathId: 3], at schemeshard: 72057594046644480 2025-06-25T14:58:34.656777Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 1 2025-06-25T14:58:34.658516Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037888 not found 2025-06-25T14:58:34.658543Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037891 not found 2025-06-25T14:58:34.658739Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:1 2025-06-25T14:58:34.658750Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:1 tabletId 72075186224037888 2025-06-25T14:58:34.658776Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:4 2025-06-25T14:58:34.658786Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:4 tabletId 72075186224037891 2025-06-25T14:58:34.659046Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046644480 2025-06-25T14:58:34.662610Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; >> TFlatTest::PathSorting ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpPrefixedVectorIndexes::CosineDistanceWithPkPrefix+Nullable+Covered [GOOD] Test command err: Trying to start YDB, gRPC: 14427, MsgBus: 19771 2025-06-25T14:57:16.513084Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901200026113340:2173];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:57:16.513299Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001af1/r3tmp/tmpWOYHXD/pdisk_1.dat 2025-06-25T14:57:17.072552Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:57:17.072860Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:57:17.072926Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:57:17.074551Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519901200026113191:2080] 1750863436493892 != 1750863436493895 2025-06-25T14:57:17.082748Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 14427, node 1 2025-06-25T14:57:17.272822Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:57:17.272844Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:57:17.272849Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:57:17.272933Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:57:17.524619Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:19771 TClient is connected to server localhost:19771 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:57:17.948694Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:57:17.972025Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:57:17.994644Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:18.136920Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:18.283846Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:18.347384Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:19.891173Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901212911016705:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:19.891282Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:20.178513Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:20.215108Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:20.249569Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:20.284362Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:20.318755Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:20.396394Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:20.471107Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:20.548646Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901217205984664:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:20.548750Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:20.548792Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901217205984669:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:20.553446Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:57:20.566479Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519901217205984671:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:57:20.631695Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519901217205984722:3420] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:57:21.510728Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519901200026113340:2173];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:57:21.510785Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:57:21.718902Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/ ... ata size 107 row count 3 2025-06-25T14:58:34.217719Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037925 maps to shardIdx: 72057594046644480:40 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 21], pathId map=indexImplPrefixTable, is column=0, is olap=0, RowCount 3, DataSize 107 2025-06-25T14:58:34.217731Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037925, followerId 0 2025-06-25T14:58:34.217790Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:40 with partCount# 1, rowCount# 3, searchHeight# 1, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-25T14:58:34.217850Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037925 2025-06-25T14:58:34.217895Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 19 shard idx 72057594046644480:38 data size 842 row count 18 2025-06-25T14:58:34.217927Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037926 maps to shardIdx: 72057594046644480:38 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 19], pathId map=indexImplLevelTable, is column=0, is olap=0, RowCount 18, DataSize 842 2025-06-25T14:58:34.217936Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037926, followerId 0 2025-06-25T14:58:34.217972Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:38 with partCount# 1, rowCount# 18, searchHeight# 1, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-25T14:58:34.217991Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037926 2025-06-25T14:58:34.218011Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 20 shard idx 72057594046644480:39 data size 1820 row count 30 2025-06-25T14:58:34.218043Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037927 maps to shardIdx: 72057594046644480:39 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 20], pathId map=indexImplPostingTable, is column=0, is olap=0, RowCount 30, DataSize 1820 2025-06-25T14:58:34.218053Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037927, followerId 0 2025-06-25T14:58:34.218086Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:39 with partCount# 1, rowCount# 30, searchHeight# 1, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-25T14:58:34.218101Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037927 2025-06-25T14:58:34.218121Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 4 shard idx 72057594046644480:11 data size 0 row count 0 2025-06-25T14:58:34.218152Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037898 maps to shardIdx: 72057594046644480:11 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 4], pathId map=Logs, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-25T14:58:34.218160Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037898, followerId 0 2025-06-25T14:58:34.218188Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:11 with partCount# 0, rowCount# 0, searchHeight# 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-25T14:58:34.218202Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037898 2025-06-25T14:58:34.218265Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:58:34.219884Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [3:7519901377651501063:2166]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-06-25T14:58:34.219904Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5131: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-06-25T14:58:34.219914Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046644480, queue size# 0 2025-06-25T14:58:34.345808Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269553162, Sender [3:7519901386241436592:2327], Recipient [3:7519901377651501063:2166]: NKikimrTxDataShard.TEvPeriodicTableStats DatashardId: 72075186224037908 TableLocalId: 5 Generation: 1 Round: 2 TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 InMemSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 0 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 116 Memory: 119352 } ShardState: 2 UserTablePartOwners: 72075186224037908 NodeId: 3 StartTime: 1750863479176 TableOwnerId: 72057594046644480 FollowerId: 0 2025-06-25T14:58:34.345851Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4992: StateWork, processing event TEvDataShard::TEvPeriodicTableStats 2025-06-25T14:58:34.345879Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037908 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 5] state 'Ready' dataSize 0 rowCount 0 cpuUsage 0.0116 2025-06-25T14:58:34.345969Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:570: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037908 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 5] raw table stats: DataSize: 0 RowCount: 0 IndexSize: 0 InMemSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 0 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 2025-06-25T14:58:34.345990Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:610: Will delay TTxStoreTableStats on# 0.099996s, queue# 1 2025-06-25T14:58:34.448499Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [3:7519901377651501063:2166]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-06-25T14:58:34.448540Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5131: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-06-25T14:58:34.448552Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046644480, queue size# 1 2025-06-25T14:58:34.448596Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:601: Will execute TTxStoreStats, queue# 1 2025-06-25T14:58:34.448610Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:610: Will delay TTxStoreTableStats on# 0.000000s, queue# 1 2025-06-25T14:58:34.448657Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 5 shard idx 72057594046644480:21 data size 0 row count 0 2025-06-25T14:58:34.448707Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037908 maps to shardIdx: 72057594046644480:21 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 5], pathId map=BatchUpload, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-25T14:58:34.448718Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037908, followerId 0 2025-06-25T14:58:34.448771Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:21 with partCount# 0, rowCount# 0, searchHeight# 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-25T14:58:34.448807Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037908 2025-06-25T14:58:34.448861Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:58:34.448954Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [3:7519901377651501063:2166]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-06-25T14:58:34.448967Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5131: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-06-25T14:58:34.448977Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046644480, queue size# 0 2025-06-25T14:58:35.088097Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:7519901377651501063:2166]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:58:35.088138Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:58:35.088184Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [3:7519901377651501063:2166], Recipient [3:7519901377651501063:2166]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:58:35.088199Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:58:36.091745Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:7519901377651501063:2166]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:58:36.091792Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:58:36.091841Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [3:7519901377651501063:2166], Recipient [3:7519901377651501063:2166]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:58:36.091857Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime >> TFlatTest::SelectBigRangePerf >> TObjectStorageListingTest::Split [GOOD] >> TObjectStorageListingTest::SuffixColumns >> TObjectStorageListingTest::TestFilter [GOOD] >> TObjectStorageListingTest::TestSkipShards >> TLocksTest::GoodLock [GOOD] >> TLocksTest::GoodNullLock >> TFlatTest::SplitInvalidPath >> TLocksFatTest::RangeSetBreak [GOOD] >> TLocksFatTest::RangeSetNotBreak >> TFlatTest::LargeProxyReplyRW [GOOD] >> TFlatTest::LargeDatashardReplyDistributed [GOOD] >> TFlatTest::LargeDatashardReplyRW >> TAsyncIndexTests::SplitBothWithReboots[TabletReboots] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TFlatTest::LargeProxyReplyRW [GOOD] Test command err: 2025-06-25T14:58:27.291804Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901502520880084:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:27.291857Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00206a/r3tmp/tmpaxIsyy/pdisk_1.dat 2025-06-25T14:58:27.730909Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:27.744847Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:27.744967Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:27.749313Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:32406 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:28.051107Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:28.072781Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:58:28.103132Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:28.306752Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:58:32.294809Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519901502520880084:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:32.294858Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:58:33.613460Z node 1 :TX_PROXY ERROR: datareq.cpp:2703: Actor# [1:7519901528290687392:4137] txid# 281474976711010 MergeResult Result too large TDataReq marker# P18 2025-06-25T14:58:33.613528Z node 1 :TX_PROXY ERROR: datareq.cpp:883: Actor# [1:7519901528290687392:4137] txid# 281474976711010 RESPONSE Status# ExecResultUnavailable marker# P13c MiniKQLErrors: Query result size limit exceeded. (71692241 > 50331648) proxy error code: ExecResultUnavailable 2025-06-25T14:58:34.274079Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519901533416552061:2238];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:34.285676Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00206a/r3tmp/tmpoIDaEf/pdisk_1.dat 2025-06-25T14:58:34.457424Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:34.461478Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519901533416551839:2080] 1750863514187940 != 1750863514187943 2025-06-25T14:58:34.476207Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:34.476282Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:34.480373Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:11835 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:34.711524Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:34.720599Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:58:34.731674Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-25T14:58:34.735745Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:35.260175Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:58:39.258736Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519901533416552061:2238];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:39.258802Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:58:39.716643Z node 2 :TX_PROXY ERROR: datareq.cpp:2703: Actor# [2:7519901554891391869:4135] txid# 281474976716011 MergeResult Result too large TDataReq marker# P18 2025-06-25T14:58:39.716704Z node 2 :TX_PROXY ERROR: datareq.cpp:883: Actor# [2:7519901554891391869:4135] txid# 281474976716011 RESPONSE Status# ExecResultUnavailable marker# P13c MiniKQLErrors: Query result size limit exceeded. (71692241 > 50331648) proxy error code: ExecResultUnavailable >> TFlatTest::PathSorting [GOOD] >> TFlatTest::PartBloomFilter >> KqpKv::ReadRows_ExternalBlobs-UseExtBlobsPrecharge [GOOD] >> KqpKv::ReadRows_Decimal >> TFlatTest::SelectBigRangePerf [GOOD] >> TFlatTest::SelectRangeBothLimit >> TObjectStorageListingTest::SuffixColumns [GOOD] >> TObjectStorageListingTest::TestSkipShards [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest >> TAsyncIndexTests::SplitBothWithReboots[TabletReboots] [GOOD] Test command err: =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:115:2144] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:115:2144] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:130:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:133:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [1:136:2157] sender: [1:138:2058] recipient: [1:115:2144] 2025-06-25T14:51:17.950210Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:51:17.950306Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:17.950346Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:51:17.950389Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:51:17.950436Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:51:17.950467Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:51:17.950518Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:17.950597Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:51:17.951325Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:51:17.951635Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:51:18.016659Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7732: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-25T14:51:18.016724Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:51:18.017433Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:178:2058] recipient: [1:15:2062] 2025-06-25T14:51:18.027096Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:51:18.029519Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:51:18.029647Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:51:18.035620Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:51:18.035768Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:51:18.036247Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:18.036463Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:51:18.038699Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:18.038857Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:51:18.039670Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:51:18.039724Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:18.039796Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:51:18.039831Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:51:18.039877Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:51:18.039994Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:217:2058] recipient: [1:215:2214] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:217:2058] recipient: [1:215:2214] Leader for TabletID 72057594037968897 is [1:221:2218] sender: [1:222:2058] recipient: [1:215:2214] 2025-06-25T14:51:18.045547Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:242:2058] recipient: [1:15:2062] 2025-06-25T14:51:18.155907Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:51:18.156074Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:18.156322Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:51:18.156360Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:51:18.156532Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:51:18.156596Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:51:18.158341Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:18.158468Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:51:18.158593Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:18.158643Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:51:18.158678Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:51:18.158703Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:51:18.160080Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:18.160149Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:51:18.160180Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:51:18.161354Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:18.161400Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:18.161441Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:18.161485Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:51:18.164402Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:51:18.165604Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:51:18.165762Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:136:2157] sender: [1:257:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:51:18.166552Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:18.166705Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 Media ... ionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "\001\000\004\000\000\0002\000\000\000" IsPoint: false IsInclusive: false DatashardId: 72075186233409548 } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409549 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 2 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 5 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:58:40.790096Z node 177 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:78: [TableChangeSenderShard][72075186233409548:2][72075186233409550][177:1025:2809] Handshake NKikimrChangeExchange.TEvStatus Status: STATUS_OK LastRecordOrder: 0 2025-06-25T14:58:40.790205Z node 177 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:78: [TableChangeSenderShard][72075186233409548:2][72075186233409551][177:1026:2809] Handshake NKikimrChangeExchange.TEvStatus Status: STATUS_OK LastRecordOrder: 0 2025-06-25T14:58:40.790280Z node 177 :CHANGE_EXCHANGE DEBUG: change_sender_async_index.cpp:239: [AsyncIndexChangeSenderMain][72075186233409548:2][177:970:2809] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186233409550 } 2025-06-25T14:58:40.790356Z node 177 :CHANGE_EXCHANGE DEBUG: change_sender_async_index.cpp:239: [AsyncIndexChangeSenderMain][72075186233409548:2][177:970:2809] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186233409551 } 2025-06-25T14:58:40.790476Z node 177 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:123: [TableChangeSenderShard][72075186233409548:2][72075186233409550][177:1025:2809] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 1 Group: 1750863520761650 Step: 5000003 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046678944, LocalPathId: 4] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046678944, LocalPathId: 3] SchemaVersion: 1 LockId: 0 LockOffset: 0 },{ Order: 2 Group: 1750863520761650 Step: 5000003 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046678944, LocalPathId: 4] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046678944, LocalPathId: 3] SchemaVersion: 1 LockId: 0 LockOffset: 0 }] } 2025-06-25T14:58:40.790648Z node 177 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:123: [TableChangeSenderShard][72075186233409548:2][72075186233409551][177:1026:2809] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 3 Group: 1750863520761650 Step: 5000003 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046678944, LocalPathId: 4] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046678944, LocalPathId: 3] SchemaVersion: 1 LockId: 0 LockOffset: 0 }] } 2025-06-25T14:58:40.801155Z node 177 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:200: [TableChangeSenderShard][72075186233409548:2][72075186233409550][177:1025:2809] Handle NKikimrChangeExchange.TEvStatus Status: STATUS_OK RecordStatuses { Order: 1 Status: STATUS_OK Reason: REASON_NONE } RecordStatuses { Order: 2 Status: STATUS_OK Reason: REASON_NONE } LastRecordOrder: 2 2025-06-25T14:58:40.801390Z node 177 :CHANGE_EXCHANGE DEBUG: change_sender_async_index.cpp:239: [AsyncIndexChangeSenderMain][72075186233409548:2][177:970:2809] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186233409550 } 2025-06-25T14:58:40.802379Z node 177 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:200: [TableChangeSenderShard][72075186233409548:2][72075186233409551][177:1026:2809] Handle NKikimrChangeExchange.TEvStatus Status: STATUS_OK RecordStatuses { Order: 3 Status: STATUS_OK Reason: REASON_NONE } LastRecordOrder: 3 2025-06-25T14:58:40.802459Z node 177 :CHANGE_EXCHANGE DEBUG: change_sender_async_index.cpp:239: [AsyncIndexChangeSenderMain][72075186233409548:2][177:970:2809] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186233409551 } 2025-06-25T14:58:41.010181Z node 177 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/UserDefinedIndex/indexImplTable" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-25T14:58:41.010541Z node 177 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/UserDefinedIndex/indexImplTable" took 386us result status StatusSuccess 2025-06-25T14:58:41.011516Z node 177 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table/UserDefinedIndex/indexImplTable" PathDescription { Self { Name: "indexImplTable" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1002 CreateStep: 5000003 ParentPathId: 4 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeAsyncIndexImplTable Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 2 } ChildrenExist: false } Table { Name: "indexImplTable" Columns { Name: "indexed" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "indexed" KeyColumnNames: "key" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "\002\000\004\000\000\0002\000\000\000\000\000\000\200" IsPoint: false IsInclusive: false DatashardId: 72075186233409550 } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409551 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 2 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 5 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TFlatTest::SplitInvalidPath [GOOD] >> TFlatTest::SplitThenMerge >> TFlatTest::RejectByPerShardReadSize [GOOD] >> TFlatTest::RejectByPerRequestSize ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TObjectStorageListingTest::SuffixColumns [GOOD] Test command err: 2025-06-25T14:58:35.538609Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901539165921253:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:35.538660Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00205f/r3tmp/tmpgI234S/pdisk_1.dat 2025-06-25T14:58:35.896534Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:35.902218Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519901539165921235:2080] 1750863515536906 != 1750863515536909 TServer::EnableGrpc on GrpcPort 8147, node 1 2025-06-25T14:58:35.936259Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:35.936415Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:35.938134Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:58:35.997496Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:58:35.997518Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:58:35.997524Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:58:35.997636Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1281 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:36.398565Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:36.413601Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:58:36.436968Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-25T14:58:36.447943Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:36.574042Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls request: /dc-1/Dir/Table TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Table" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1750863516640 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table" Columns { Name: "Hash" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Name" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "Path" ... (TRUNCATED) waiting... TClient::Ls request: /dc-1/Dir/Table TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Table" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1750863516640 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 2 } ChildrenExist: false } Table { Name: "Table" Columns { Name: "Hash" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Name" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "Path" ... (TRUNCATED) 2025-06-25T14:58:39.041983Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519901554972101860:2081];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00205f/r3tmp/tmpDSQPlP/pdisk_1.dat 2025-06-25T14:58:39.117892Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:58:39.250847Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:39.256546Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519901554972101815:2080] 1750863519013607 != 1750863519013610 2025-06-25T14:58:39.266221Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:39.266293Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:39.268303Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 28782, node 2 2025-06-25T14:58:39.360745Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:58:39.360766Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:58:39.360775Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:58:39.360882Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14960 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:39.606447Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:39.611109Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:58:39.631771Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:40.052858Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:58:40.151669Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553163, Sender [2:7519901559267070418:2444], Recipient [2:7519901554972102519:2275]: NKikimrTxDataShard.TEvObjectStorageListingRequest TableId: 3 SerializedKeyPrefix: "\002\000\010\000\000\0002\000\000\000\000\000\000\000\010\000\000\000Bucket50" PathColumnPrefix: "Music/AC DC/" PathColumnDelimiter: "/" SerializedStartAfterKeySuffix: "\002\000\037\000\000\000Music/AC DC/Shoot to Thrill.mp3\010\000\000\000B\000\000\000\000\000\000\000" ColumnsToReturn: 3 ColumnsToReturn: 4 ColumnsToReturn: 6 MaxKeys: 10 2025-06-25T14:58:40.151704Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3148: StateWork, processing event TEvDataShard::TEvObjectStorageListingRequest 2025-06-25T14:58:40.151917Z node 2 :TX_DATASHARD DEBUG: datashard__object_storage_listing.cpp:152: 72075186224037888 S3 Listing: start at key ((type:4, value:"2\0\0\0\0\0\0\0") (type:4608, value:"Bucket50") (type:4608, value:"Music/AC DC/Shoot to Thrill.mp3") (type:4, value:"B\0\0\0\0\0\0\0")), end at key ((type:4, value:"2\0\0\0\0\0\0\0") (type:4608, value:"Bucket50") (type:4608, value:"Music/AC DC0") (type:0)) restarted: 0 last path: "" contents: 0 common prefixes: 0 2025-06-25T14:58:40.152085Z node 2 :TX_DATASHARD TRACE: datashard__object_storage_listing.cpp:240: 72075186224037888 S3 Listing: "Music/AC DC/Shoot to Thrill.mp3" -> (Utf8 : Music/AC DC/Shoot to Thrill.mp3, Uint64 : 77, String : ) 2025-06-25T14:58:40.152123Z node 2 :TX_DATASHARD TRACE: datashard__object_storage_listing.cpp:240: 72075186224037888 S3 Listing: "Music/AC DC/Shoot to Thrill.mp3" -> (Utf8 : Music/AC DC/Shoot to Thrill.mp3, Uint64 : 88, String : ) 2025-06-25T14:58:40.152153Z node 2 :TX_DATASHARD TRACE: datashard__object_storage_listing.cpp:240: 72075186224037888 S3 Listing: "Music/AC DC/Shoot to Thrill.mp3" -> (Utf8 : Music/AC DC/Shoot to Thrill.mp3, Uint64 : 666, String : ) 2025-06-25T14:58:40.152178Z node 2 :TX_DATASHARD TRACE: datashard__object_storage_listing.cpp:240: 72075186224037888 S3 Listing: "Music/AC DC/Thunderstruck.mp3" -> (Utf8 : Music/AC DC/Thunderstruck.mp3, Uint64 : 1, String : ) 2025-06-25T14:58:40.152204Z node 2 :TX_DATASHARD TRACE: datashard__object_storage_listing.cpp:240: 72075186224037888 S3 Listing: "Music/AC DC/Thunderstruck.mp3" -> (Utf8 : Music/AC DC/Thunderstruck.mp3, Uint64 : 66, String : ) 2025-06-25T14:58:40.152272Z node 2 :TX_DATASHARD DEBUG: datashard__object_storage_listing.cpp:374: 72075186224037888 S3 Listing: finished status: 0 description: "" contents: 5 common prefixes: 0 2025-06-25T14:58:40.166335Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553163, Sender [2:7519901559267070422:2445], Recipient [2:7519901554972102519:2275]: NKikimrTxDataShard.TEvObjectStorageListingRequest TableId: 3 SerializedKeyPrefix: "\002\000\010\000\000\0002\000\000\000\000\000\000\000\010\000\000\000Bucket50" PathColumnPrefix: "Music/AC DC/" PathColumnDelimiter: "/" SerializedStartAfterKeySuffix: "\001\000\037\000\000\000Music/AC DC/Shoot to Thrill.mp3" ColumnsToReturn: 3 ColumnsToReturn: 4 ColumnsToReturn: 5 MaxKeys: 10 2025-06-25T14:58:40.166361Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3148: StateWork, processing event TEvDataShard::TEvObjectStorageListingRequest 2025-06-25T14:58:40.166484Z node 2 :TX_DATASHARD DEBUG: datashard__object_storage_listing.cpp:152: 72075186224037888 S3 Listing: start at key ((type:4, value:"2\0\0\0\0\0\0\0") (type:4608, value:"Bucket50") (type:4608, value:"Music/AC DC/Shoot to Thrill.mp3")), end at key ((type:4, value:"2\0\0\0\0\0\0\0") (type:4608, value:"Bucket50") (type:4608, value:"Music/AC DC0") (type:0)) restarted: 0 last path: "" contents: 0 common prefixes: 0 2025-06-25T14:58:40.166615Z node 2 :TX_DATASHARD TRACE: datashard__object_storage_listing.cpp:240: 72075186224037888 S3 Listing: "Music/AC DC/Thunderstruck.mp3" -> (Utf8 : Music/AC DC/Thunderstruck.mp3, Uint64 : 1, Uint64 : 10) 2025-06-25T14:58:40.166653Z node 2 :TX_DATASHARD TRACE: datashard__object_storage_listing.cpp:240: 72075186224037888 S3 Listing: "Music/AC DC/Thunderstruck.mp3" -> (Utf8 : Music/AC DC/Thunderstruck.mp3, Uint64 : 66, Uint64 : 10) 2025-06-25T14:58:40.166714Z node 2 :TX_DATASHARD DEBUG: datashard__object_storage_listing.cpp:374: 72075186224037888 S3 Listing: finished status: 0 description: "" contents: 2 common prefixes: 0 >> TFlatTest::CopyTableAndCompareColumnsSchema [GOOD] >> TFlatTest::CopyTableAndDropCopy >> KikimrIcGateway::TestLoadExternalTable >> KikimrProvider::TestFillAuthPropertiesNone [GOOD] >> KikimrIcGateway::TestCreateExternalTable >> KikimrProvider::TestFillAuthPropertiesServiceAccount [GOOD] >> KikimrProvider::TestFillAuthPropertiesMdbBasic [GOOD] >> TLocksTest::BrokenLockErase [GOOD] >> TLocksTest::BrokenDupLock >> ReadAttributesUtils::AttributesGatheringEmpry [GOOD] >> ReadAttributesUtils::AttributesGatheringFilter [GOOD] >> ReadAttributesUtils::AttributesGatheringRecursive [GOOD] >> KikimrProvider::TestFillAuthPropertiesBasic [GOOD] >> KikimrProvider::TestFillAuthPropertiesAws [GOOD] >> KikimrProvider::AlterTableAddIndexWithTableSettings [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TObjectStorageListingTest::TestSkipShards [GOOD] Test command err: 2025-06-25T14:58:35.861914Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901539160782867:2137];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:35.865306Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00205e/r3tmp/tmpOMWmMQ/pdisk_1.dat 2025-06-25T14:58:36.341229Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:36.342066Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519901539160782767:2080] 1750863515849606 != 1750863515849609 TServer::EnableGrpc on GrpcPort 16216, node 1 2025-06-25T14:58:36.347424Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:36.347509Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:36.350095Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:58:36.512807Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:58:36.512826Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:58:36.512833Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:58:36.512925Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:15076 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:36.832289Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:58:36.866336Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... 2025-06-25T14:58:36.880450Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:58:36.907589Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-25T14:58:36.923608Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00205e/r3tmp/tmpUVgYm9/pdisk_1.dat TServer::EnableGrpc on GrpcPort 62108, node 2 TClient is connected to server localhost:12108 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. waiting... waiting... waiting... >> TLocksFatTest::PointSetBreak [GOOD] >> TLocksFatTest::LocksLimit |90.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/provider/ut/unittest >> KikimrProvider::TestFillAuthPropertiesMdbBasic [GOOD] >> KikimrIcGateway::TestCreateSameExternalTable |90.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/provider/ut/unittest >> KikimrProvider::AlterTableAddIndexWithTableSettings [GOOD] |90.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/provider/ut/unittest >> ReadAttributesUtils::AttributesGatheringRecursive [GOOD] >> TFlatTest::PartBloomFilter [GOOD] >> TFlatTest::SelectRangeBothLimit [GOOD] >> KikimrIcGateway::TestLoadTableMetadata >> TFlatTest::SplitThenMerge [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TFlatTest::PartBloomFilter [GOOD] Test command err: 2025-06-25T14:58:38.612851Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901551807606179:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:38.624751Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00205a/r3tmp/tmpkYncno/pdisk_1.dat 2025-06-25T14:58:39.160412Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519901551807606136:2080] 1750863518598091 != 1750863518598094 2025-06-25T14:58:39.161366Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:39.163656Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:39.163748Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:39.165377Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:29378 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 Pa... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:39.533713Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... waiting... 2025-06-25T14:58:39.564880Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710659, at schemeshard: 72057594046644480 waiting... waiting... 2025-06-25T14:58:39.589194Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710661, at schemeshard: 72057594046644480 waiting... waiting... TClient::Ls request: /dc-1 2025-06-25T14:58:39.625237Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1750863519587 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 15 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 15 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 13 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "A" PathId: 7 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 281474976710663 CreateStep: 1750863519657 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "B" PathId: 4 Sche... (TRUNCATED) 2025-06-25T14:58:41.961498Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519901566124366439:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:41.961538Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00205a/r3tmp/tmpdVAUPA/pdisk_1.dat 2025-06-25T14:58:42.108870Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:42.116783Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:42.116844Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:42.120963Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:11582 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. waiting... 2025-06-25T14:58:42.302345Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:58:42.309316Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:58:42.316084Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:42.773821Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715719:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:171) waiting... 2025-06-25T14:58:42.987604Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TFlatTest::SelectRangeBothLimit [GOOD] Test command err: 2025-06-25T14:58:39.047537Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901555298133365:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:39.047615Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/002059/r3tmp/tmpHdsfqL/pdisk_1.dat 2025-06-25T14:58:39.607695Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:39.641695Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:39.641818Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:39.644742Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:8691 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:39.942601Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:39.956735Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:58:39.973140Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:58:39.984649Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715659, at schemeshard: 72057594046644480 2025-06-25T14:58:39.989599Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:40.058342Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; insert finished 10867 usec 13810 usec 13706 usec 10335 usec 10383 usec 10121 usec 9798 usec 9346 usec 9597 usec 9601 usec test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/002059/r3tmp/tmp1g0Jkg/pdisk_1.dat 2025-06-25T14:58:42.640505Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:58:42.641080Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:42.641151Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:42.645878Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:58:42.648834Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TClient is connected to server localhost:9871 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:42.852207Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:42.860453Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:58:42.877611Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:43.400433Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; >> TLocksTest::Range_GoodLock1 [GOOD] >> KikimrIcGateway::TestCreateExternalTable [GOOD] >> KikimrIcGateway::TestCreateResourcePool >> TLocksTest::CK_Range_BrokenLock [GOOD] >> TLocksTest::CK_Range_BrokenLockInf >> TLocksFatTest::RangeSetNotBreak [GOOD] >> TFlatTest::CopyTableAndDropCopy [GOOD] >> TLocksTest::Range_BrokenLock1 [GOOD] >> TObjectStorageListingTest::ManyDeletes [GOOD] >> TLocksTest::CK_BrokenLock [GOOD] >> test_select.py::TestDML::test_select[table_index_4_UNIQUE_SYNC-pk_types0-all_types0-index0--UNIQUE-SYNC] [GOOD] >> KikimrIcGateway::TestCreateSameExternalTable [GOOD] >> KikimrIcGateway::TestDropExternalTable ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TFlatTest::SplitThenMerge [GOOD] Test command err: 2025-06-25T14:58:40.112398Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901560584580396:2132];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:40.112449Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/002058/r3tmp/tmpp9HKRp/pdisk_1.dat 2025-06-25T14:58:40.531979Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:40.532096Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:40.537450Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:58:40.572392Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:40.576704Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519901560584580299:2080] 1750863520107423 != 1750863520107426 TClient is connected to server localhost:2914 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:40.886030Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:40.899787Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:58:40.918183Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-25T14:58:40.928834Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation_split_merge.cpp:816: TSplitMerge Propose failed StatusNameConflict Check failed: path: '/dc-1/Dir1', error: path is not a table (id: [OwnerId: 72057594046644480, LocalPathId: 2], type: EPathTypeDir, state: EPathStateNoChanges), tableStr: /dc-1/Dir1, tableId: , opId: 281474976715659:0, at schemeshard: 72057594046644480, request: TablePath: "/dc-1/Dir1" SourceTabletId: 100500 SplitBoundary { KeyPrefix { Tuple { Optional { Uint32: 42 } } } } 2025-06-25T14:58:40.931810Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519901560584580863:2298] txid# 281474976715659, issues: { message: "Check failed: path: \'/dc-1/Dir1\', error: path is not a table (id: [OwnerId: 72057594046644480, LocalPathId: 2], type: EPathTypeDir, state: EPathStateNoChanges)" severity: 1 } Error 128: Check failed: path: '/dc-1/Dir1', error: path is not a table (id: [OwnerId: 72057594046644480, LocalPathId: 2], type: EPathTypeDir, state: EPathStateNoChanges) 2025-06-25T14:58:43.351209Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519901573751881521:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:43.351281Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/002058/r3tmp/tmpIs9Gih/pdisk_1.dat 2025-06-25T14:58:43.637550Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:43.649444Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:43.649514Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:43.653168Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:29606 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:43.921457Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:43.929375Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:58:43.936401Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-25T14:58:43.943961Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:44.213370Z node 2 :OPS_COMPACT INFO: Compact{72075186224037888.1.11, eph 1} end=Done, 4 blobs 3r (max 3), put Spent{time=0.002s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 2 +0, (1265 647 2154)b }, ecr=1.000 2025-06-25T14:58:44.221881Z node 2 :OPS_COMPACT INFO: Compact{72075186224037889.1.11, eph 1} end=Done, 4 blobs 3r (max 3), put Spent{time=0.001s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 2 +0, (1139 521 2626)b }, ecr=1.000 2025-06-25T14:58:44.271426Z node 2 :OPS_COMPACT INFO: Compact{72075186224037888.1.16, eph 2} end=Done, 4 blobs 6r (max 6), put Spent{time=0.002s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 5 +0, (1573 647 6413)b }, ecr=1.000 2025-06-25T14:58:44.279661Z node 2 :OPS_COMPACT INFO: Compact{72075186224037889.1.16, eph 2} end=Done, 4 blobs 6r (max 6), put Spent{time=0.001s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 4 +0, (2326 1432 5183)b }, ecr=1.000 TClient::Ls request: /dc-1/Dir/TableOld TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "TableOld" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710659 CreateStep: 1750863524088 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "TableOld" Columns { Name: "Key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "... (TRUNCATED) 2025-06-25T14:58:44.343062Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T14:58:44.344733Z node 2 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:561: tx 281474976710676 released its data 2025-06-25T14:58:44.344898Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037889 2025-06-25T14:58:44.346103Z node 2 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:561: tx 281474976710676 released its data 2025-06-25T14:58:44.349496Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T14:58:44.350130Z node 2 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:661: tx 281474976710676 at 72075186224037888 restored its data 2025-06-25T14:58:44.350940Z node 2 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:561: tx 281474976710676 released its data 2025-06-25T14:58:44.351071Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037889 2025-06-25T14:58:44.351486Z node 2 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:661: tx 281474976710676 at 72075186224037889 restored its data 2025-06-25T14:58:44.352134Z node 2 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:561: tx 281474976710676 released its data 2025-06-25T14:58:44.353606Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T14:58:44.354018Z node 2 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:661: tx 281474976710676 at 72075186224037888 restored its data 2025-06-25T14:58:44.354659Z node 2 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:561: tx 281474976710676 released its data 2025-06-25T14:58:44.354760Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037889 2025-06-25T14:58:44.355120Z node 2 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:661: tx 281474976710676 at 72075186224037889 restored its data 2025-06-25T14:58:44.355734Z node 2 :T ... -25T14:58:45.051600Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1051: NTableState::TProposedWaitParts operationId# 281474976710693:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046644480 message: Source { RawX1: 7519901578046850021 RawX2: 4503608217307439 } Origin: 72075186224037894 State: 5 TxId: 281474976710693 Step: 0 Generation: 1 2025-06-25T14:58:45.051616Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:670: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 281474976710693:0, shardIdx: 72057594046644480:7, shard: 72075186224037894, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046644480 2025-06-25T14:58:45.051629Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:707: all shard schema changes has been received, operationId: 281474976710693:0, at schemeshard: 72057594046644480 2025-06-25T14:58:45.051645Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:719: send schema changes ack message, operation: 281474976710693:0, datashard: 72075186224037889, at schemeshard: 72057594046644480 2025-06-25T14:58:45.051656Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:719: send schema changes ack message, operation: 281474976710693:0, datashard: 72075186224037894, at schemeshard: 72057594046644480 2025-06-25T14:58:45.051669Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 281474976710693:0 129 -> 240 2025-06-25T14:58:45.051978Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 281474976710693:0, at schemeshard: 72057594046644480 2025-06-25T14:58:45.052027Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 281474976710693:0, at schemeshard: 72057594046644480 2025-06-25T14:58:45.052084Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 281474976710693:0, at schemeshard: 72057594046644480 2025-06-25T14:58:45.052103Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_table.cpp:414: TDropTable TProposedDeletePart operationId: 281474976710693:0 ProgressState, at schemeshard: 72057594046644480 2025-06-25T14:58:45.052410Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove table for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 4 2025-06-25T14:58:45.052527Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976710693:0 progress is 1/1 2025-06-25T14:58:45.052537Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976710693 ready parts: 1/1 2025-06-25T14:58:45.052553Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976710693:0 progress is 1/1 2025-06-25T14:58:45.052564Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976710693 ready parts: 1/1 2025-06-25T14:58:45.052576Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 281474976710693, ready parts: 1/1, is published: true 2025-06-25T14:58:45.052616Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1656: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [2:7519901582341817536:2388] message: TxId: 281474976710693 2025-06-25T14:58:45.052633Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976710693 ready parts: 1/1 2025-06-25T14:58:45.052653Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976710693:0 2025-06-25T14:58:45.052665Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 281474976710693:0 2025-06-25T14:58:45.052761Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 3 2025-06-25T14:58:45.053206Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976710693 datashard 72075186224037889 state PreOffline 2025-06-25T14:58:45.053244Z node 2 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037889 Got TEvSchemaChangedResult from SS at 72075186224037889 2025-06-25T14:58:45.053337Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976710693 datashard 72075186224037894 state PreOffline 2025-06-25T14:58:45.053357Z node 2 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037894 Got TEvSchemaChangedResult from SS at 72075186224037894 TClient::Ls request: /dc-1/Dir/TableOld 2025-06-25T14:58:45.060795Z node 2 :TX_DATASHARD DEBUG: datashard_loans.cpp:220: 72075186224037889 in PreOffline state HasSharedBobs: 0 SchemaOperations: [ ] OutReadSets count: 0 ChangesQueue size: 0 ChangeExchangeSplit: 1 siblings to be activated: wait to activation from: 2025-06-25T14:58:45.060876Z node 2 :TX_DATASHARD INFO: datashard_loans.cpp:177: 72075186224037889 Initiating switch from PreOffline to Offline state 2025-06-25T14:58:45.062013Z node 2 :TX_DATASHARD DEBUG: datashard_loans.cpp:220: 72075186224037894 in PreOffline state HasSharedBobs: 0 SchemaOperations: [ ] OutReadSets count: 0 ChangesQueue size: 0 ChangeExchangeSplit: 1 siblings to be activated: wait to activation from: 2025-06-25T14:58:45.062063Z node 2 :TX_DATASHARD INFO: datashard_loans.cpp:177: 72075186224037894 Initiating switch from PreOffline to Offline state 2025-06-25T14:58:45.063492Z node 2 :TX_DATASHARD INFO: datashard_impl.h:3310: 72075186224037889 Reporting state Offline to schemeshard 72057594046644480 2025-06-25T14:58:45.063561Z node 2 :TX_DATASHARD INFO: datashard_impl.h:3310: 72075186224037894 Reporting state Offline to schemeshard 72057594046644480 2025-06-25T14:58:45.063932Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5633: Handle TEvStateChanged, at schemeshard: 72057594046644480, message: Source { RawX1: 7519901573751882110 RawX2: 4503608217307347 } TabletId: 72075186224037889 State: 4 2025-06-25T14:58:45.063971Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186224037889, state: Offline, at schemeshard: 72057594046644480 2025-06-25T14:58:45.064097Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5633: Handle TEvStateChanged, at schemeshard: 72057594046644480, message: Source { RawX1: 7519901578046850021 RawX2: 4503608217307439 } TabletId: 72075186224037894 State: 4 2025-06-25T14:58:45.064111Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186224037894, state: Offline, at schemeshard: 72057594046644480 2025-06-25T14:58:45.064396Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:2 hive 72057594037968897 at ss 72057594046644480 2025-06-25T14:58:45.064443Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:7 hive 72057594037968897 at ss 72057594046644480 2025-06-25T14:58:45.065184Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2962: Handle TEvStateChangedResult datashard 72075186224037889 state Offline 2025-06-25T14:58:45.065209Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2962: Handle TEvStateChangedResult datashard 72075186224037894 state Offline 2025-06-25T14:58:45.068711Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046644480 ShardLocalIdx: 2, at schemeshard: 72057594046644480 2025-06-25T14:58:45.068953Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 2 2025-06-25T14:58:45.069129Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 7 ShardOwnerId: 72057594046644480 ShardLocalIdx: 7, at schemeshard: 72057594046644480 2025-06-25T14:58:45.069256Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 1 2025-06-25T14:58:45.069381Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046644480 2025-06-25T14:58:45.069401Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046644480, LocalPathId: 3], at schemeshard: 72057594046644480 2025-06-25T14:58:45.069438Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 1 2025-06-25T14:58:45.070558Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:2 2025-06-25T14:58:45.070572Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:2 tabletId 72075186224037889 2025-06-25T14:58:45.070603Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:7 2025-06-25T14:58:45.070615Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:7 tabletId 72075186224037894 2025-06-25T14:58:45.070645Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046644480 2025-06-25T14:58:45.072371Z node 2 :TX_DATASHARD INFO: datashard.cpp:197: OnTabletStop: 72075186224037889 reason = ReasonStop 2025-06-25T14:58:45.072411Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037889, clientId# [2:7519901578046849513:2392], serverId# [2:7519901578046849514:2393], sessionId# [0:0:0] 2025-06-25T14:58:45.072432Z node 2 :TX_DATASHARD INFO: datashard.cpp:197: OnTabletStop: 72075186224037894 reason = ReasonStop 2025-06-25T14:58:45.072448Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037894, clientId# [2:7519901578046850133:2791], serverId# [2:7519901578046850134:2792], sessionId# [0:0:0] 2025-06-25T14:58:45.073473Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037889 not found 2025-06-25T14:58:45.073501Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037894 not found 2025-06-25T14:58:45.075096Z node 2 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186224037889 2025-06-25T14:58:45.075156Z node 2 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186224037889 2025-06-25T14:58:45.076281Z node 2 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186224037894 2025-06-25T14:58:45.076353Z node 2 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186224037894 TClient::Ls response: Status: 128 StatusCode: PATH_NOT_EXIST Issues { message: "Path not exist" issue_code: 200200 severity: 1 } SchemeStatus: 2 ErrorReason: "Path not found" >> TFlatTest::RejectByPerRequestSize [GOOD] >> TTxLocatorTest::TestAllocateAllByPieces >> TTxLocatorTest::TestAllocateAll >> TTxLocatorTest::TestZeroRange >> TTxLocatorTest::TestImposibleSize >> TTxLocatorTest::Boot ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TLocksTest::Range_GoodLock1 [GOOD] Test command err: 2025-06-25T14:58:07.784097Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901419349235551:2141];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:07.784153Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00208b/r3tmp/tmpzU0VT6/pdisk_1.dat 2025-06-25T14:58:08.290087Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:08.291552Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519901419349235436:2080] 1750863487766827 != 1750863487766830 2025-06-25T14:58:08.301916Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:08.302030Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:08.311411Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:31452 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:08.723539Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:08.765139Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:58:08.775453Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-25T14:58:08.780002Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:08.842494Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:58:08.927705Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:08.991477Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:11.117756Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519901434554550548:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:11.117793Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00208b/r3tmp/tmpoPI3fl/pdisk_1.dat 2025-06-25T14:58:11.248373Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519901434554550527:2080] 1750863491108664 != 1750863491108667 2025-06-25T14:58:11.254514Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:11.279358Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:11.279439Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:11.280894Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:9619 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:11.453789Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-25T14:58:11.476396Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:11.533520Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:11.575386Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:14.137712Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519901446978219587:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:14.137765Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00208b/r3tmp/tmpNntiN7/pdisk_1.dat 2025-06-25T14:58:14.229311Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519901446978219568:2080] 1750863494137396 != 1750863494137399 2025-06-25T14:58:14.263444Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:14.285703Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:14.285798Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:14.287230Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:61633 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. waiting... 2025-06-25T14:58:14.445699Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:14.466243Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is un ... VolatileState: Unknown -> Disconnected 2025-06-25T14:58:33.597355Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:33.599928Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:15773 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. waiting... 2025-06-25T14:58:33.920137Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:33.943902Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:34.020379Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:34.077629Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:38.023850Z node 9 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[9:7519901553467156795:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:38.027576Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00208b/r3tmp/tmpRCrbSy/pdisk_1.dat 2025-06-25T14:58:38.245766Z node 9 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:38.262379Z node 9 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [9:7519901553467156777:2080] 1750863518018950 != 1750863518018953 2025-06-25T14:58:38.262537Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:38.262626Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:38.266008Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:7287 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:38.608522Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:38.615546Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:58:38.629394Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-25T14:58:38.634488Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:38.719418Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:38.784041Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:42.757283Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7519901570176282471:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:42.757407Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00208b/r3tmp/tmpywuQeY/pdisk_1.dat 2025-06-25T14:58:42.903959Z node 10 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:42.921799Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:42.921904Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:42.925387Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:19652 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:43.220445Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:43.225831Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:58:43.239800Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:43.301739Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:43.425220Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... >> TTxLocatorTest::TestZeroRange [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TFlatTest::CopyTableAndDropCopy [GOOD] Test command err: 2025-06-25T14:58:27.940126Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901502665744196:2128];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:27.940183Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/002067/r3tmp/tmp8Duu0A/pdisk_1.dat 2025-06-25T14:58:28.438504Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:28.440146Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519901502665744108:2080] 1750863507935598 != 1750863507935601 2025-06-25T14:58:28.451769Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:28.451864Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:28.482651Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:7785 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:28.745647Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-25T14:58:28.780641Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-25T14:58:28.784797Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... TClient::Ls request: /dc-1/Dir/Table_1 2025-06-25T14:58:28.969730Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Table_1" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710659 CreateStep: 1750863508898 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table_1" Columns { Name: "col_0" Type: "Int32" TypeId: 1 Id: 1 NotNull: false IsBuildInProgress: false } KeyColumnNames: "col_0" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 ... (TRUNCATED) TClient::Ls request: /dc-1/Dir/Table_1_Copy TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Table_1_Copy" PathId: 4 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710660 CreateStep: 1750863508996 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table_1_Copy" Columns { Name: "col_0" Type: "Int32" TypeId: 1 Id: 1 NotNull: false IsBuildInProgress: false } KeyColumnNames: "col_0" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot... (TRUNCATED) 2025-06-25T14:58:29.024783Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... TClient::Ls request: /dc-1/Dir/Table_2 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Table_2" PathId: 5 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710661 CreateStep: 1750863509101 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table_2" Columns { Name: "col_0" Type: "Int32" TypeId: 1 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "col_1" Type: "Int32" TypeId: 1 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "col_0" ... (TRUNCATED) TClient::Ls request: /dc-1/Dir/Table_2_Copy TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Table_2_Copy" PathId: 6 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710662 CreateStep: 1750863509171 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table_2_Copy" Columns { Name: "col_0" Type: "Int32" TypeId: 1 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "col_1" Type: "Int32" TypeId: 1 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: ... (TRUNCATED) 2025-06-25T14:58:29.196384Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... TClient::Ls request: /dc-1/Dir/Table_3 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Table_3" PathId: 7 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710663 CreateStep: 1750863509318 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table_3" Columns { Name: "col_0" Type: "Int32" TypeId: 1 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "col_1" Type: "Int32" TypeId: 1 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "col... (TRUNCATED) TClient::Ls request: /dc-1/Dir/Table_3_Copy TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Table_3_Copy" PathId: 8 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710664 CreateStep: 1750863509367 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table_3_Copy" Columns { Name: "col_0" Type: "Int32" TypeId: 1 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "col_1" Type: "Int32" TypeId: 1 Id: 2 NotNull: false IsBuildInProgress: false } Columns { ... (TRUNCATED) 2025-06-25T14:58:29.379685Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... TClient::Ls request: /dc-1/Dir/Table_4 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Table_4" PathId: 9 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710665 CreateStep: 1750863509465 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Versi ... athId [OwnerId: 72057594046644480, LocalPathId: 6] was 4 2025-06-25T14:58:45.802829Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976710686 datashard 72075186224037895 state Ready 2025-06-25T14:58:45.802871Z node 2 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037895 Got TEvSchemaChangedResult from SS at 72075186224037895 2025-06-25T14:58:45.802988Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976710686 datashard 72075186224037894 state Ready 2025-06-25T14:58:45.803018Z node 2 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037894 Got TEvSchemaChangedResult from SS at 72075186224037894 2025-06-25T14:58:45.809525Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037894, clientId# [2:7519901582087425883:3005], serverId# [2:7519901582087425884:3006], sessionId# [0:0:0] 2025-06-25T14:58:45.809624Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037894 2025-06-25T14:58:45.810999Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037894 2025-06-25T14:58:45.811051Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037894 2025-06-25T14:58:45.817728Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037895, clientId# [2:7519901582087425893:3012], serverId# [2:7519901582087425894:3013], sessionId# [0:0:0] 2025-06-25T14:58:45.817863Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037895 2025-06-25T14:58:45.819287Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037895 2025-06-25T14:58:45.819339Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037895 2025-06-25T14:58:45.822264Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037894 2025-06-25T14:58:45.823919Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037894 2025-06-25T14:58:45.823967Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037894 2025-06-25T14:58:45.826882Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037895 2025-06-25T14:58:45.831036Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037895 2025-06-25T14:58:45.831107Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037895 2025-06-25T14:58:45.838810Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037894 2025-06-25T14:58:45.840436Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037894 2025-06-25T14:58:45.840489Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037894 2025-06-25T14:58:45.841732Z node 2 :OPS_COMPACT INFO: Compact{72075186224037894.1.11, eph 1} end=Done, 4 blobs 3r (max 3), put Spent{time=0.001s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 2 +0, (1265 647 2154)b }, ecr=1.000 2025-06-25T14:58:45.842270Z node 2 :TX_DATASHARD DEBUG: datashard__compaction.cpp:203: CompactionComplete of tablet# 72075186224037894, table# 1001, finished edge# 0, ts 1970-01-01T00:00:00.000000Z 2025-06-25T14:58:45.842305Z node 2 :TX_DATASHARD DEBUG: datashard__compaction.cpp:240: ReplyCompactionWaiters of tablet# 72075186224037894, table# 1001, finished edge# 0, front# 0 2025-06-25T14:58:45.844302Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037895 2025-06-25T14:58:45.845729Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037895 2025-06-25T14:58:45.845777Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037895 2025-06-25T14:58:45.848997Z node 2 :OPS_COMPACT INFO: Compact{72075186224037895.1.11, eph 1} end=Done, 4 blobs 3r (max 3), put Spent{time=0.001s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 2 +0, (1139 521 2626)b }, ecr=1.000 2025-06-25T14:58:45.849002Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037894 2025-06-25T14:58:45.850101Z node 2 :TX_DATASHARD DEBUG: datashard__compaction.cpp:203: CompactionComplete of tablet# 72075186224037895, table# 1001, finished edge# 0, ts 1970-01-01T00:00:00.000000Z 2025-06-25T14:58:45.850147Z node 2 :TX_DATASHARD DEBUG: datashard__compaction.cpp:240: ReplyCompactionWaiters of tablet# 72075186224037895, table# 1001, finished edge# 0, front# 0 2025-06-25T14:58:45.850760Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037894 2025-06-25T14:58:45.850819Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037894 2025-06-25T14:58:45.854028Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037895 2025-06-25T14:58:45.855104Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037895 2025-06-25T14:58:45.855148Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037895 2025-06-25T14:58:45.858022Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037894 2025-06-25T14:58:45.859120Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037894 2025-06-25T14:58:45.859160Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037894 2025-06-25T14:58:45.862034Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037895 2025-06-25T14:58:45.863178Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037895 2025-06-25T14:58:45.863220Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037895 2025-06-25T14:58:45.866062Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037894 2025-06-25T14:58:45.867904Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037894 2025-06-25T14:58:45.867953Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037894 2025-06-25T14:58:45.870604Z node 2 :OPS_COMPACT INFO: Compact{72075186224037894.1.16, eph 2} end=Done, 4 blobs 6r (max 6), put Spent{time=0.003s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 5 +0, (1573 647 6413)b }, ecr=1.000 2025-06-25T14:58:45.871057Z node 2 :TX_DATASHARD DEBUG: datashard__compaction.cpp:203: CompactionComplete of tablet# 72075186224037894, table# 1001, finished edge# 0, ts 1970-01-01T00:00:00.000000Z 2025-06-25T14:58:45.871078Z node 2 :TX_DATASHARD DEBUG: datashard__compaction.cpp:240: ReplyCompactionWaiters of tablet# 72075186224037894, table# 1001, finished edge# 0, front# 0 2025-06-25T14:58:45.871256Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037895 2025-06-25T14:58:45.872646Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037895 2025-06-25T14:58:45.872689Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037895 2025-06-25T14:58:45.875314Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037894 2025-06-25T14:58:45.877524Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037894 2025-06-25T14:58:45.877569Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037894 2025-06-25T14:58:45.881733Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037895 2025-06-25T14:58:45.883492Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037895 2025-06-25T14:58:45.883534Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037895 2025-06-25T14:58:45.886101Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037894 2025-06-25T14:58:45.887414Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037894 2025-06-25T14:58:45.887455Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037894 2025-06-25T14:58:45.890018Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037895 2025-06-25T14:58:45.891560Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037895 2025-06-25T14:58:45.891605Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037895 Check that tablet 72075186224037892 was deleted 2025-06-25T14:58:45.892473Z node 2 :HIVE WARN: hive_impl.cpp:1955: HIVE#72057594037968897 Can't find the tablet from RequestHiveInfo(TabletID=72075186224037892) Check that tablet 72075186224037893 was deleted Check that tablet 72075186224037888 was deleted 2025-06-25T14:58:45.893026Z node 2 :HIVE WARN: hive_impl.cpp:1955: HIVE#72057594037968897 Can't find the tablet from RequestHiveInfo(TabletID=72075186224037893) 2025-06-25T14:58:45.893439Z node 2 :HIVE WARN: hive_impl.cpp:1955: HIVE#72057594037968897 Can't find the tablet from RequestHiveInfo(TabletID=72075186224037888) Check that tablet 72075186224037889 was deleted Check that tablet 72075186224037890 was deleted Check that tablet 72075186224037891 was deleted 2025-06-25T14:58:45.893718Z node 2 :OPS_COMPACT INFO: Compact{72075186224037895.1.16, eph 2} end=Done, 4 blobs 6r (max 6), put Spent{time=0.002s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 4 +0, (2326 1432 5183)b }, ecr=1.000 2025-06-25T14:58:45.893930Z node 2 :HIVE WARN: hive_impl.cpp:1955: HIVE#72057594037968897 Can't find the tablet from RequestHiveInfo(TabletID=72075186224037889) 2025-06-25T14:58:45.894304Z node 2 :HIVE WARN: hive_impl.cpp:1955: HIVE#72057594037968897 Can't find the tablet from RequestHiveInfo(TabletID=72075186224037890) 2025-06-25T14:58:45.894728Z node 2 :HIVE WARN: hive_impl.cpp:1955: HIVE#72057594037968897 Can't find the tablet from RequestHiveInfo(TabletID=72075186224037891) 2025-06-25T14:58:45.895400Z node 2 :TX_DATASHARD DEBUG: datashard__compaction.cpp:203: CompactionComplete of tablet# 72075186224037895, table# 1001, finished edge# 0, ts 1970-01-01T00:00:00.000000Z 2025-06-25T14:58:45.895417Z node 2 :TX_DATASHARD DEBUG: datashard__compaction.cpp:240: ReplyCompactionWaiters of tablet# 72075186224037895, table# 1001, finished edge# 0, front# 0 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TLocksFatTest::RangeSetNotBreak [GOOD] Test command err: 2025-06-25T14:58:32.145727Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901525448801556:2183];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:32.145805Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/002060/r3tmp/tmpkXqsPE/pdisk_1.dat 2025-06-25T14:58:32.551747Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:32.551849Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:32.555230Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:58:32.603937Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:32.608513Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519901525448801410:2080] 1750863512098355 != 1750863512098358 TClient is connected to server localhost:15363 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:32.926999Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:32.952463Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:58:32.967762Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-25T14:58:32.971964Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:33.147910Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:58:33.166611Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:33.217810Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:37.134943Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519901525448801556:2183];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:37.135166Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:58:40.099119Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519901560407494068:2222];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:40.110171Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/002060/r3tmp/tmpCiMiZo/pdisk_1.dat 2025-06-25T14:58:40.367804Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:40.367891Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:40.368702Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:40.369974Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519901560407493884:2080] 1750863520093206 != 1750863520093209 2025-06-25T14:58:40.380776Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:30246 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. waiting... 2025-06-25T14:58:40.597543Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:40.613769Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-25T14:58:40.617428Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:40.699092Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:40.759579Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:41.088415Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:58:45.100500Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519901560407494068:2222];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:45.100575Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=timeout; >> TTxLocatorTest::TestImposibleSize [GOOD] >> TTxLocatorTest::Boot [GOOD] >> TTxLocatorTest::TestAllocateAllByPieces [GOOD] >> TTxLocatorTest::TestAllocateAll [GOOD] |90.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_allocator/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TObjectStorageListingTest::ManyDeletes [GOOD] Test command err: 2025-06-25T14:58:21.928746Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901480195310781:2220];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:21.928942Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00206f/r3tmp/tmpojzlOz/pdisk_1.dat 2025-06-25T14:58:22.420856Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519901480195310599:2080] 1750863501855218 != 1750863501855221 2025-06-25T14:58:22.425088Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:22.434284Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:22.434386Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:22.435948Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 7999, node 1 2025-06-25T14:58:22.668945Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:58:22.668969Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:58:22.668982Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:58:22.669090Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:58:22.848695Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:24353 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:23.260349Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-25T14:58:23.303718Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:26.916496Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519901480195310781:2220];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:26.916546Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=timeout; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00206f/r3tmp/tmpJ7T6g6/pdisk_1.dat 2025-06-25T14:58:28.813073Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519901506992657445:2080] 1750863508629370 != 1750863508629373 2025-06-25T14:58:28.813129Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:58:28.825532Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:28.828552Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:28.828634Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:28.833688Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 17667, node 2 2025-06-25T14:58:28.896815Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:58:28.896833Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:58:28.896841Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:58:28.896950Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14115 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:29.209469Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:29.214542Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:58:29.232299Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:29.685779Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; .. 2025-06-25T14:58:39.296185Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037890 2025-06-25T14:58:39.297050Z node 2 :TX_DATASHARD DEBUG: check_data_tx_unit.cpp:313: Prepared DataTx transaction txId 281474976716500 at tablet 72075186224037890 2025-06-25T14:58:39.297337Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037891 2025-06-25T14:58:39.297948Z node 2 :TX_DATASHARD DEBUG: check_data_tx_unit.cpp:313: Prepared DataTx transaction txId 281474976716500 at tablet 72075186224037891 2025-06-25T14:58:39.298171Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037892 2025-06-25T14:58:39.298766Z node 2 :TX_DATASHARD DEBUG: check_data_tx_unit.cpp:313: Prepared DataTx transaction txId 281474976716500 at tablet 72075186224037892 2025-06-25T14:58:39.298961Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037889 2025-06-25T14:58:39.299697Z node 2 :TX_DATASHARD DEBUG: check_data_tx_unit.cpp:313: Prepared DataTx transaction txId 281474976716500 at tablet 72075186224037889 2025-06-25T14:58:39.300644Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037890 2025-06-25T14:58:39.300746Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037891 2025-06-25T14:58:39.300811Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037892 2025-06-25T14:58:39.300843Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037889 2025-06-25T14:58:39.309561Z node 2 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976716500 at step 1750863519349 at tablet 72075186224037892 { Transactions { TxId: 281474976716500 AckTo { RawX1: 0 RawX2: 0 } } Step: 1750863519349 MediatorID: 72057594046382081 TabletID: 72075186224037892 } 2025-06-25T14:58:39.309572Z node 2 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976716500 at step 1750863519349 at tablet 72075186224037889 { Transactions { TxId: 281474976716500 AckTo { RawX1: 0 RawX2: 0 } } Step: 1750863519349 MediatorID: 72057594046382081 TabletID: 72075186224037889 } 2025-06-25T14:58:39.309621Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-25T14:58:39.309771Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037892 2025-06-25T14:58:39.309794Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2025-06-25T14:58:39.309814Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 active 0 active planned 0 immediate 0 planned 1 2025-06-25T14:58:39.309842Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1750863519349:281474976716500] in PlanQueue unit at 72075186224037889 2025-06-25T14:58:39.309881Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:595: LoadTxDetails at 72075186224037889 got data tx from cache 17508635193 ... 224037889 2025-06-25T14:58:47.472154Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037892 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-25T14:58:47.472330Z node 2 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037891 step# 1750863527518} 2025-06-25T14:58:47.472359Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037891 2025-06-25T14:58:47.472392Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1750863527518 : 281474976716911] from 72075186224037891 at tablet 72075186224037891 send result to client [2:7519901588597050232:4040], exec latency: 0 ms, propose latency: 2 ms 2025-06-25T14:58:47.472407Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037891 2025-06-25T14:58:47.472600Z node 2 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037890 step# 1750863527518} 2025-06-25T14:58:47.472645Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037890 2025-06-25T14:58:47.472765Z node 2 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:661: tx 281474976716911 at 72075186224037889 restored its data 2025-06-25T14:58:47.473236Z node 2 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:661: tx 281474976716911 at 72075186224037890 restored its data 2025-06-25T14:58:47.473489Z node 2 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:561: tx 281474976716911 released its data 2025-06-25T14:58:47.473515Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-25T14:58:47.473704Z node 2 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037892 step# 1750863527518} 2025-06-25T14:58:47.473749Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037892 2025-06-25T14:58:47.474040Z node 2 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:561: tx 281474976716911 released its data 2025-06-25T14:58:47.474071Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037890 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-25T14:58:47.474345Z node 2 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:661: tx 281474976716911 at 72075186224037892 restored its data 2025-06-25T14:58:47.474856Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2025-06-25T14:58:47.475044Z node 2 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:561: tx 281474976716911 released its data 2025-06-25T14:58:47.475071Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037892 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-25T14:58:47.475202Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037890 2025-06-25T14:58:47.475504Z node 2 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:661: tx 281474976716911 at 72075186224037889 restored its data 2025-06-25T14:58:47.475754Z node 2 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:661: tx 281474976716911 at 72075186224037890 restored its data 2025-06-25T14:58:47.476341Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037890 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:58:47.476524Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037892 2025-06-25T14:58:47.476971Z node 2 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:561: tx 281474976716911 released its data 2025-06-25T14:58:47.476993Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-25T14:58:47.477045Z node 2 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:661: tx 281474976716911 at 72075186224037892 restored its data 2025-06-25T14:58:47.477650Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037892 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:58:47.478281Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037890 2025-06-25T14:58:47.478329Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1750863527518 : 281474976716911] from 72075186224037890 at tablet 72075186224037890 send result to client [2:7519901588597050232:4040], exec latency: 4 ms, propose latency: 7 ms 2025-06-25T14:58:47.478355Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037890 2025-06-25T14:58:47.479639Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037892 2025-06-25T14:58:47.479696Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1750863527518 : 281474976716911] from 72075186224037892 at tablet 72075186224037892 send result to client [2:7519901588597050232:4040], exec latency: 6 ms, propose latency: 8 ms 2025-06-25T14:58:47.479724Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037892 2025-06-25T14:58:47.488691Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2025-06-25T14:58:47.489529Z node 2 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:661: tx 281474976716911 at 72075186224037889 restored its data 2025-06-25T14:58:47.494461Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:58:47.497399Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-06-25T14:58:47.497462Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1750863527518 : 281474976716911] from 72075186224037889 at tablet 72075186224037889 send result to client [2:7519901588597050232:4040], exec latency: 24 ms, propose latency: 27 ms 2025-06-25T14:58:47.497499Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-25T14:58:47.514743Z node 2 :TX_DATASHARD DEBUG: datashard__object_storage_listing.cpp:152: 72075186224037889 S3 Listing: start at key ((type:4, value:"d\0\0\0\0\0\0\0") (type:4608, value:"Bucket100") (type:4608, value:"/Videos/") (type:0)), end at key ((type:4, value:"d\0\0\0\0\0\0\0") (type:4608, value:"Bucket100") (type:4608, value:"/Videos0") (type:0)) restarted: 0 last path: "" contents: 0 common prefixes: 0 2025-06-25T14:58:47.515122Z node 2 :TX_DATASHARD DEBUG: datashard__object_storage_listing.cpp:152: 72075186224037889 S3 Listing: start at key ((type:4, value:"d\0\0\0\0\0\0\0") (type:4608, value:"Bucket100") (type:4608, value:"/Videos/") (type:0)), end at key ((type:4, value:"d\0\0\0\0\0\0\0") (type:4608, value:"Bucket100") (type:4608, value:"/Videos0") (type:0)) restarted: 1 last path: "" contents: 0 common prefixes: 0 2025-06-25T14:58:47.515326Z node 2 :TX_DATASHARD DEBUG: datashard__object_storage_listing.cpp:152: 72075186224037889 S3 Listing: start at key ((type:4, value:"d\0\0\0\0\0\0\0") (type:4608, value:"Bucket100") (type:4608, value:"/Videos/") (type:0)), end at key ((type:4, value:"d\0\0\0\0\0\0\0") (type:4608, value:"Bucket100") (type:4608, value:"/Videos0") (type:0)) restarted: 2 last path: "" contents: 0 common prefixes: 0 2025-06-25T14:58:47.515521Z node 2 :TX_DATASHARD DEBUG: datashard__object_storage_listing.cpp:152: 72075186224037889 S3 Listing: start at key ((type:4, value:"d\0\0\0\0\0\0\0") (type:4608, value:"Bucket100") (type:4608, value:"/Videos/") (type:0)), end at key ((type:4, value:"d\0\0\0\0\0\0\0") (type:4608, value:"Bucket100") (type:4608, value:"/Videos0") (type:0)) restarted: 3 last path: "" contents: 0 common prefixes: 0 2025-06-25T14:58:47.515607Z node 2 :TX_DATASHARD DEBUG: datashard__object_storage_listing.cpp:374: 72075186224037889 S3 Listing: finished status: 0 description: "" contents: 0 common prefixes: 1 2025-06-25T14:58:47.515792Z node 2 :TX_DATASHARD DEBUG: datashard__object_storage_listing.cpp:152: 72075186224037891 S3 Listing: start at key ((type:4, value:"d\0\0\0\0\0\0\0") (type:4608, value:"Bucket100") (type:4608, value:"/Videos/") (type:0)), end at key ((type:4, value:"d\0\0\0\0\0\0\0") (type:4608, value:"Bucket100") (type:4608, value:"/Videos0") (type:0)) restarted: 0 last path: "" contents: 0 common prefixes: 0 2025-06-25T14:58:47.515859Z node 2 :TX_DATASHARD DEBUG: datashard__object_storage_listing.cpp:374: 72075186224037891 S3 Listing: finished status: 0 description: "" contents: 0 common prefixes: 0 2025-06-25T14:58:47.515989Z node 2 :TX_DATASHARD DEBUG: datashard__object_storage_listing.cpp:152: 72075186224037892 S3 Listing: start at key ((type:4, value:"d\0\0\0\0\0\0\0") (type:4608, value:"Bucket100") (type:4608, value:"/Videos/") (type:0)), end at key ((type:4, value:"d\0\0\0\0\0\0\0") (type:4608, value:"Bucket100") (type:4608, value:"/Videos0") (type:0)) restarted: 0 last path: "" contents: 0 common prefixes: 0 2025-06-25T14:58:47.516341Z node 2 :TX_DATASHARD DEBUG: datashard__object_storage_listing.cpp:152: 72075186224037892 S3 Listing: start at key ((type:4, value:"d\0\0\0\0\0\0\0") (type:4608, value:"Bucket100") (type:4608, value:"/Videos/") (type:0)), end at key ((type:4, value:"d\0\0\0\0\0\0\0") (type:4608, value:"Bucket100") (type:4608, value:"/Videos0") (type:0)) restarted: 1 last path: "" contents: 0 common prefixes: 0 2025-06-25T14:58:47.516691Z node 2 :TX_DATASHARD DEBUG: datashard__object_storage_listing.cpp:152: 72075186224037892 S3 Listing: start at key ((type:4, value:"d\0\0\0\0\0\0\0") (type:4608, value:"Bucket100") (type:4608, value:"/Videos/") (type:0)), end at key ((type:4, value:"d\0\0\0\0\0\0\0") (type:4608, value:"Bucket100") (type:4608, value:"/Videos0") (type:0)) restarted: 2 last path: "" contents: 0 common prefixes: 0 2025-06-25T14:58:47.517035Z node 2 :TX_DATASHARD DEBUG: datashard__object_storage_listing.cpp:152: 72075186224037892 S3 Listing: start at key ((type:4, value:"d\0\0\0\0\0\0\0") (type:4608, value:"Bucket100") (type:4608, value:"/Videos/Godfather.avi") (type:0)), end at key ((type:4, value:"d\0\0\0\0\0\0\0") (type:4608, value:"Bucket100") (type:4608, value:"/Videos0") (type:0)) restarted: 3 last path: "/Videos/Godfather.avi" contents: 2 common prefixes: 0 2025-06-25T14:58:47.517357Z node 2 :TX_DATASHARD DEBUG: datashard__object_storage_listing.cpp:152: 72075186224037892 S3 Listing: start at key ((type:4, value:"d\0\0\0\0\0\0\0") (type:4608, value:"Bucket100") (type:4608, value:"/Videos/House of Cards/Season 1/Chapter 1.avi") (type:0)), end at key ((type:4, value:"d\0\0\0\0\0\0\0") (type:4608, value:"Bucket100") (type:4608, value:"/Videos0") (type:0)) restarted: 4 last path: "/Videos/House of Cards/Season 1/Chapter 1.avi" contents: 3 common prefixes: 1 2025-06-25T14:58:47.519128Z node 2 :TX_DATASHARD DEBUG: datashard__object_storage_listing.cpp:152: 72075186224037892 S3 Listing: start at key ((type:4, value:"d\0\0\0\0\0\0\0") (type:4608, value:"Bucket100") (type:4608, value:"/Videos/Terminator 2.avi") (type:0)), end at key ((type:4, value:"d\0\0\0\0\0\0\0") (type:4608, value:"Bucket100") (type:4608, value:"/Videos0") (type:0)) restarted: 5 last path: "/Videos/Terminator 2.avi" contents: 4 common prefixes: 1 2025-06-25T14:58:47.519271Z node 2 :TX_DATASHARD DEBUG: datashard__object_storage_listing.cpp:374: 72075186224037892 S3 Listing: finished status: 0 description: "" contents: 4 common prefixes: 1 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TFlatTest::RejectByPerRequestSize [GOOD] Test command err: 2025-06-25T14:58:27.122112Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901504969147441:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:27.122157Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/002069/r3tmp/tmp47ccJU/pdisk_1.dat 2025-06-25T14:58:27.483217Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:27.484561Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519901504969147420:2080] 1750863507120540 != 1750863507120543 2025-06-25T14:58:27.496637Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:27.496747Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:27.502157Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:1868 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:27.752195Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:27.769869Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:58:27.785643Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:28.140416Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:58:32.132373Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519901504969147441:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:32.136390Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:58:35.420074Z node 1 :TX_DATASHARD ERROR: check_data_tx_unit.cpp:133: Transaction read size 51002277 exceeds limit 10000 at tablet 72075186224037888 txId 281474976710760 2025-06-25T14:58:35.420190Z node 1 :TX_DATASHARD ERROR: finish_propose_unit.cpp:174: Errors while proposing transaction txid 281474976710760 at tablet 72075186224037888 status: BAD_REQUEST errors: READ_SIZE_EXECEEDED (Transaction read size 51002277 exceeds limit 10000 at tablet 72075186224037888 txId 281474976710760) | 2025-06-25T14:58:35.420289Z node 1 :TX_PROXY ERROR: datareq.cpp:883: Actor# [1:7519901539328887413:2927] txid# 281474976710760 RESPONSE Status# WrongRequest marker# P13c 2025-06-25T14:58:36.191767Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519901544592543486:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:36.191818Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/002069/r3tmp/tmpfhYHvQ/pdisk_1.dat 2025-06-25T14:58:36.333659Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:36.345285Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:36.345336Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:36.349458Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:19906 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:36.553317Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:36.563304Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:58:36.572376Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-25T14:58:36.577791Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:37.208449Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:58:41.192873Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519901544592543486:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:41.192938Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:58:43.412989Z node 2 :TX_DATASHARD ERROR: check_data_tx_unit.cpp:133: Transaction read size 51002501 exceeds limit 10000 at tablet 72075186224037888 txId 281474976715760 2025-06-25T14:58:43.413077Z node 2 :TX_DATASHARD ERROR: finish_propose_unit.cpp:174: Errors while proposing transaction txid 281474976715760 at tablet 72075186224037888 status: BAD_REQUEST errors: READ_SIZE_EXECEEDED (Transaction read size 51002501 exceeds limit 10000 at tablet 72075186224037888 txId 281474976715760) | 2025-06-25T14:58:43.413182Z node 2 :TX_PROXY ERROR: datareq.cpp:883: Actor# [2:7519901574657316126:2922] txid# 281474976715760 RESPONSE Status# WrongRequest marker# P13c 2025-06-25T14:58:44.196119Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519901575889548668:2231];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/002069/r3tmp/tmpm3qRA0/pdisk_1.dat 2025-06-25T14:58:44.312325Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:58:44.400792Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519901575889548472:2080] 1750863524178438 != 1750863524178441 2025-06-25T14:58:44.402156Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:44.419808Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:44.419886Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:44.422429Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:8750 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:44.665717Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:44.672712Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:58:44.693120Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-25T14:58:44.702426Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:45.180459Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:58:47.770131Z node 3 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [3:7519901575889548503:2088] Handle TEvProposeTransaction 2025-06-25T14:58:47.770175Z node 3 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [3:7519901575889548503:2088] TxId# 281474976715700 ProcessProposeTransaction 2025-06-25T14:58:47.770219Z node 3 :TX_PROXY DEBUG: proxy_impl.cpp:273: actor# [3:7519901575889548503:2088] Cookie# 0 userReqId# "" txid# 281474976715700 SEND to# [3:7519901588774451461:2605] DataReq marker# P0 2025-06-25T14:58:47.770289Z node 3 :TX_PROXY DEBUG: datareq.cpp:1330: Actor# [3:7519901588774451461:2605] Cookie# 0 txid# 281474976715700 HANDLE TDataReq marker# P1 2025-06-25T14:58:47.770911Z node 3 :TX_PROXY DEBUG: datareq.cpp:1245: Actor [3:7519901588774451461:2605] txid 281474976715700 disallow followers cause of operation 2 read target mode 0 2025-06-25T14:58:47.770933Z node 3 :TX_PROXY DEBUG: datareq.cpp:1245: Actor [3:7519901588774451461:2605] txid 281474976715700 disallow followers cause of operation 2 read target mode 0 2025-06-25T14:58:47.770968Z node 3 :TX_PROXY DEBUG: datareq.cpp:1453: Actor# [3:7519901588774451461:2605] txid# 281474976715700 SEND to# [3:7519901575889548712:2114] TSchemeCache with 2 scheme entries. DataReq marker# P2 2025-06-25T14:58:47.771103Z node 3 :TX_PROXY DEBUG: datareq.cpp:1620: Actor# [3:7519901588774451461:2605] txid# 281474976715700 HANDLE EvResolveKeySetResult TDataReq marker# P3 ErrorCount# 0 2025-06-25T14:58:47.772610Z node 3 :TX_PROXY DEBUG: datareq.cpp:1115: Actor# [3:7519901588774451461:2605] txid# 281474976715700 SEND TEvProposeTransaction to datashard 72075186224037888 with 734 bytes program affected shards 2 followers disallowed marker# P4 2025-06-25T14:58:47.772918Z node 3 :TX_PROXY DEBUG: datareq.cpp:1115: Actor# [3:7519901588774451461:2605] txid# 281474976715700 SEND TEvProposeTransaction to datashard 72075186224037889 with 734 bytes program affected shards 2 followers disallowed marker# P4 2025-06-25T14:58:47.773226Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T14:58:47.773230Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037889 2025-06-25T14:58:47.774573Z node 3 :TX_DATASHARD DEBUG: check_data_tx_unit.cpp:313: Prepared DataTx transaction txId 281474976715700 at tablet 72075186224037888 2025-06-25T14:58:47.774698Z node 3 :TX_DATASHARD DEBUG: check_data_tx_unit.cpp:313: Prepared DataTx transaction txId 281474976715700 at tablet 72075186224037889 2025-06-25T14:58:47.775710Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-25T14:58:47.775878Z node 3 :TX_PROXY DEBUG: datareq.cpp:1873: Actor# [3:7519901588774451461:2605] txid# 281474976715700 HANDLE Prepare TEvProposeTransactionResult TDataReq TabletStatus# StatusWait GetStatus# PREPARED shard id 72075186224037888 read size 17000919 out readset size 0 marker# P6 2025-06-25T14:58:47.776160Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037889 2025-06-25T14:58:47.776258Z node 3 :TX_PROXY DEBUG: datareq.cpp:1873: Actor# [3:7519901588774451461:2605] txid# 281474976715700 HANDLE Prepare TEvProposeTransactionResult TDataReq TabletStatus# StatusWait GetStatus# PREPARED shard id 72075186224037889 read size 9000495 out readset size 0 marker# P6 2025-06-25T14:58:47.776321Z node 3 :TX_PROXY ERROR: datareq.cpp:2829: Actor# [3:7519901588774451461:2605] txid# 281474976715700 FailProposedRequest: Transaction total read size 26001414 exceeded limit 10000 Status# ExecError 2025-06-25T14:58:47.776380Z node 3 :TX_PROXY ERROR: datareq.cpp:883: Actor# [3:7519901588774451461:2605] txid# 281474976715700 RESPONSE Status# ExecError marker# P13c 2025-06-25T14:58:47.776504Z node 3 :TX_DATASHARD DEBUG: datashard__cancel_tx_proposal.cpp:73: Got TEvDataShard::TEvCancelTransactionProposal 72075186224037888 txId 281474976715700 2025-06-25T14:58:47.776526Z node 3 :TX_DATASHARD DEBUG: datashard__cancel_tx_proposal.cpp:73: Got TEvDataShard::TEvCancelTransactionProposal 72075186224037889 txId 281474976715700 2025-06-25T14:58:47.776565Z node 3 :TX_DATASHARD DEBUG: datashard__cancel_tx_proposal.cpp:44: Start TTxCancelTransactionProposal at tablet 72075186224037889 txId 281474976715700 2025-06-25T14:58:47.776566Z node 3 :TX_DATASHARD DEBUG: datashard__cancel_tx_proposal.cpp:44: Start TTxCancelTransactionProposal at tablet 72075186224037888 txId 281474976715700 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TLocksTest::Range_BrokenLock1 [GOOD] Test command err: 2025-06-25T14:58:07.782122Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901420311246560:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:07.782246Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/002089/r3tmp/tmp4WxKp3/pdisk_1.dat 2025-06-25T14:58:08.360870Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:08.360982Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:08.377212Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:08.400712Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:8967 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:08.744274Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:08.769870Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:58:08.793478Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... 2025-06-25T14:58:08.807238Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:58:08.976162Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:09.055653Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:11.292350Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519901436962235954:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:11.292438Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/002089/r3tmp/tmpPwnw8s/pdisk_1.dat 2025-06-25T14:58:11.524162Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:11.527045Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:11.527109Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:11.530823Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519901436962235925:2080] 1750863491290152 != 1750863491290155 2025-06-25T14:58:11.535916Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:25118 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:11.775072Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:11.788688Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:58:11.804616Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-25T14:58:11.808679Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:11.885595Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:11.937898Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:14.650159Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519901448581553243:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:14.650230Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/002089/r3tmp/tmpedDxY4/pdisk_1.dat 2025-06-25T14:58:14.772450Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:14.773667Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519901448581553209:2080] 1750863494649395 != 1750863494649398 2025-06-25T14:58:14.793826Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:14.793895Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:14.796397Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:21485 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:14.955951Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-25T14:58:14.973002Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, sub ... 5-06-25T14:58:34.371661Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:12917 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:34.693807Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:34.703342Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:58:34.715222Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:34.788010Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:34.859441Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:38.555407Z node 9 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[9:7519901550671830692:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:38.555474Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/002089/r3tmp/tmpTpeIRA/pdisk_1.dat 2025-06-25T14:58:38.773520Z node 9 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:38.792464Z node 9 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [9:7519901550671830673:2080] 1750863518554859 != 1750863518554862 2025-06-25T14:58:38.797851Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:38.797957Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:38.802031Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:14682 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:39.121871Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:39.128701Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:58:39.148064Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:39.218924Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:39.281083Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:43.316158Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7519901572777128883:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:43.316221Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/002089/r3tmp/tmpWblhZw/pdisk_1.dat 2025-06-25T14:58:43.500330Z node 10 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:43.518879Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:43.518982Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:43.521716Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:31469 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:43.839008Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:43.848821Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:58:43.858093Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-25T14:58:43.863642Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:43.940868Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:44.028372Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TLocksTest::CK_BrokenLock [GOOD] Test command err: 2025-06-25T14:58:07.806957Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901416687294656:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:07.807013Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00208e/r3tmp/tmpFNhdV3/pdisk_1.dat 2025-06-25T14:58:08.337140Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:08.337239Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:08.342248Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:58:08.369049Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519901416687294639:2080] 1750863487806353 != 1750863487806356 2025-06-25T14:58:08.385956Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TClient is connected to server localhost:4716 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:08.718766Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:08.741267Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:58:08.751031Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-25T14:58:08.757160Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:08.841029Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:58:08.920503Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:08.994895Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:11.195845Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519901435488137103:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:11.214574Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00208e/r3tmp/tmp4f8wNv/pdisk_1.dat 2025-06-25T14:58:11.479943Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:11.516858Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:11.516926Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:11.536785Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:10864 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:11.757813Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:11.764627Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... waiting... 2025-06-25T14:58:11.778173Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:11.831939Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:58:11.907254Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:14.446189Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519901449249677511:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:14.446254Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00208e/r3tmp/tmp5lHM6i/pdisk_1.dat 2025-06-25T14:58:14.590858Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:14.604484Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519901449249677481:2080] 1750863494445233 != 1750863494445236 2025-06-25T14:58:14.605496Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:14.605559Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:14.611220Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:31012 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:14.807847Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:14.814039Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: ... 37968897 Node(8, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:34.018905Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:21903 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:34.295591Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:34.303566Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:58:34.318576Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:34.399236Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:34.459378Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:38.583050Z node 9 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[9:7519901549668018049:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:38.583138Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00208e/r3tmp/tmpazRjob/pdisk_1.dat 2025-06-25T14:58:38.815030Z node 9 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:38.905713Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:38.905830Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:38.907901Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:7189 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:39.199762Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:39.206820Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:58:39.229862Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:39.322886Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:39.411110Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:39.604441Z node 9 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00208e/r3tmp/tmpvRNvpb/pdisk_1.dat 2025-06-25T14:58:43.528498Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:58:43.591549Z node 10 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:43.597923Z node 10 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [10:7519901574311366421:2080] 1750863523387841 != 1750863523387844 2025-06-25T14:58:43.610481Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:43.610582Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:43.611967Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:21770 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:44.037326Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:44.044445Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:58:44.060764Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:44.220871Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:44.303738Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:44.431241Z node 10 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_allocator/ut/unittest >> TTxLocatorTest::TestImposibleSize [GOOD] Test command err: 2025-06-25T14:58:49.440441Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:1925: Tablet: 72057594046447617 LockedInitializationPath Marker# TSYS32 2025-06-25T14:58:49.441119Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:911: Tablet: 72057594046447617 HandleFindLatestLogEntry, NODATA Promote Marker# TSYS19 2025-06-25T14:58:49.443360Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:225: Tablet: 72057594046447617 TTablet::WriteZeroEntry. logid# [72057594046447617:2:0:0:0:0:0] Marker# TSYS01 2025-06-25T14:58:49.457422Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:0:0:0:20:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:58:49.461595Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:17: tablet# 72057594046447617 OnActivateExecutor 2025-06-25T14:58:49.477308Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:1:1:28672:35:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:58:49.477422Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:1:0:0:42:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:58:49.477553Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:1396: Tablet: 72057594046447617 GcCollect 0 channel, tablet:gen:step => 2:0 Marker# TSYS28 2025-06-25T14:58:49.477672Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:2:1:8192:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:58:49.477807Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:2:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:58:49.477895Z node 1 :TX_ALLOCATOR DEBUG: txallocator__scheme.cpp:22: tablet# 72057594046447617 TTxSchema Complete 2025-06-25T14:58:49.478021Z node 1 :TABLET_MAIN INFO: tablet_sys.cpp:1009: Tablet: 72057594046447617 Active! Generation: 2, Type: TxAllocator started in 0msec Marker# TSYS24 2025-06-25T14:58:49.479669Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:73:2107] requested range size#281474976710656 2025-06-25T14:58:49.480860Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 0 Reserved from# 0 Reserved to# 0 2025-06-25T14:58:49.484544Z node 1 :TX_ALLOCATOR ERROR: txallocator_impl.cpp:84: tablet# 72057594046447617 Send to Sender# [1:73:2107] TEvAllocateResult status# IMPOSIBLE expected IMPOSIBLE 2025-06-25T14:58:49.485059Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:77:2110] requested range size#123456 2025-06-25T14:58:49.485596Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:3:1:24576:70:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:58:49.485694Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:3:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:58:49.485810Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 0 Reserved to# 123456 2025-06-25T14:58:49.485855Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:77:2110] TEvAllocateResult from# 0 to# 123456 expected SUCCESS 2025-06-25T14:58:49.486247Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:81:2114] requested range size#281474976587200 2025-06-25T14:58:49.486967Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 0 Reserved from# 123456 Reserved to# 0 2025-06-25T14:58:49.487020Z node 1 :TX_ALLOCATOR ERROR: txallocator_impl.cpp:84: tablet# 72057594046447617 Send to Sender# [1:81:2114] TEvAllocateResult status# IMPOSIBLE expected IMPOSIBLE 2025-06-25T14:58:49.487434Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:84:2117] requested range size#246912 2025-06-25T14:58:49.488786Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:4:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:58:49.488856Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:4:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:58:49.488945Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 123456 Reserved to# 370368 2025-06-25T14:58:49.488984Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:84:2117] TEvAllocateResult from# 123456 to# 370368 expected SUCCESS 2025-06-25T14:58:49.489419Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:88:2121] requested range size#281474976340288 2025-06-25T14:58:49.489531Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 0 Reserved from# 370368 Reserved to# 0 2025-06-25T14:58:49.489572Z node 1 :TX_ALLOCATOR ERROR: txallocator_impl.cpp:84: tablet# 72057594046447617 Send to Sender# [1:88:2121] TEvAllocateResult status# IMPOSIBLE expected IMPOSIBLE ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_allocator/ut/unittest >> TTxLocatorTest::Boot [GOOD] Test command err: 2025-06-25T14:58:49.438558Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:1925: Tablet: 72057594046447617 LockedInitializationPath Marker# TSYS32 2025-06-25T14:58:49.441152Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:911: Tablet: 72057594046447617 HandleFindLatestLogEntry, NODATA Promote Marker# TSYS19 2025-06-25T14:58:49.443353Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:225: Tablet: 72057594046447617 TTablet::WriteZeroEntry. logid# [72057594046447617:2:0:0:0:0:0] Marker# TSYS01 2025-06-25T14:58:49.457628Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:0:0:0:20:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:58:49.461592Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:17: tablet# 72057594046447617 OnActivateExecutor 2025-06-25T14:58:49.477395Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:1:1:28672:35:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:58:49.477538Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:1:0:0:42:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:58:49.477712Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:1396: Tablet: 72057594046447617 GcCollect 0 channel, tablet:gen:step => 2:0 Marker# TSYS28 2025-06-25T14:58:49.477829Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:2:1:8192:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:58:49.477986Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:2:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:58:49.478092Z node 1 :TX_ALLOCATOR DEBUG: txallocator__scheme.cpp:22: tablet# 72057594046447617 TTxSchema Complete 2025-06-25T14:58:49.478219Z node 1 :TABLET_MAIN INFO: tablet_sys.cpp:1009: Tablet: 72057594046447617 Active! Generation: 2, Type: TxAllocator started in 0msec Marker# TSYS24 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_allocator/ut/unittest >> TTxLocatorTest::TestAllocateAll [GOOD] Test command err: 2025-06-25T14:58:49.440210Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:1925: Tablet: 72057594046447617 LockedInitializationPath Marker# TSYS32 2025-06-25T14:58:49.442012Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:911: Tablet: 72057594046447617 HandleFindLatestLogEntry, NODATA Promote Marker# TSYS19 2025-06-25T14:58:49.444018Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:225: Tablet: 72057594046447617 TTablet::WriteZeroEntry. logid# [72057594046447617:2:0:0:0:0:0] Marker# TSYS01 2025-06-25T14:58:49.457468Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:0:0:0:20:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:58:49.461676Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:17: tablet# 72057594046447617 OnActivateExecutor 2025-06-25T14:58:49.477162Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:1:1:28672:35:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:58:49.477285Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:1:0:0:42:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:58:49.477427Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:1396: Tablet: 72057594046447617 GcCollect 0 channel, tablet:gen:step => 2:0 Marker# TSYS28 2025-06-25T14:58:49.477540Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:2:1:8192:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:58:49.477684Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:2:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:58:49.477773Z node 1 :TX_ALLOCATOR DEBUG: txallocator__scheme.cpp:22: tablet# 72057594046447617 TTxSchema Complete 2025-06-25T14:58:49.477889Z node 1 :TABLET_MAIN INFO: tablet_sys.cpp:1009: Tablet: 72057594046447617 Active! Generation: 2, Type: TxAllocator started in 0msec Marker# TSYS24 2025-06-25T14:58:49.481603Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:73:2107] requested range size#281474976710655 2025-06-25T14:58:49.482092Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:3:1:24576:70:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:58:49.482167Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:3:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:58:49.482252Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 0 Reserved to# 281474976710655 2025-06-25T14:58:49.482293Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:73:2107] TEvAllocateResult from# 0 to# 281474976710655 expected SUCCESS 2025-06-25T14:58:49.486058Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:78:2111] requested range size#1 2025-06-25T14:58:49.486967Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 0 Reserved from# 281474976710655 Reserved to# 0 2025-06-25T14:58:49.487043Z node 1 :TX_ALLOCATOR ERROR: txallocator_impl.cpp:84: tablet# 72057594046447617 Send to Sender# [1:78:2111] TEvAllocateResult status# IMPOSIBLE expected IMPOSIBLE ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_allocator/ut/unittest >> TTxLocatorTest::TestAllocateAllByPieces [GOOD] Test command err: 2025-06-25T14:58:49.440538Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:1925: Tablet: 72057594046447617 LockedInitializationPath Marker# TSYS32 2025-06-25T14:58:49.441103Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:911: Tablet: 72057594046447617 HandleFindLatestLogEntry, NODATA Promote Marker# TSYS19 2025-06-25T14:58:49.443986Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:225: Tablet: 72057594046447617 TTablet::WriteZeroEntry. logid# [72057594046447617:2:0:0:0:0:0] Marker# TSYS01 2025-06-25T14:58:49.457436Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:0:0:0:20:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:58:49.461610Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:17: tablet# 72057594046447617 OnActivateExecutor 2025-06-25T14:58:49.477044Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:1:1:28672:35:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:58:49.477170Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:1:0:0:42:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:58:49.477356Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:1396: Tablet: 72057594046447617 GcCollect 0 channel, tablet:gen:step => 2:0 Marker# TSYS28 2025-06-25T14:58:49.477475Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:2:1:8192:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:58:49.477644Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:2:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:58:49.477743Z node 1 :TX_ALLOCATOR DEBUG: txallocator__scheme.cpp:22: tablet# 72057594046447617 TTxSchema Complete 2025-06-25T14:58:49.477868Z node 1 :TABLET_MAIN INFO: tablet_sys.cpp:1009: Tablet: 72057594046447617 Active! Generation: 2, Type: TxAllocator started in 0msec Marker# TSYS24 2025-06-25T14:58:49.479669Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:73:2107] requested range size#8796093022207 2025-06-25T14:58:49.482020Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:3:1:24576:70:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:58:49.482122Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:3:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:58:49.482217Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 0 Reserved to# 8796093022207 2025-06-25T14:58:49.482274Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:73:2107] TEvAllocateResult from# 0 to# 8796093022207 expected SUCCESS 2025-06-25T14:58:49.488089Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:78:2111] requested range size#8796093022207 2025-06-25T14:58:49.490500Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:4:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:58:49.490577Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:4:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:58:49.490675Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 8796093022207 Reserved to# 17592186044414 2025-06-25T14:58:49.490736Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:78:2111] TEvAllocateResult from# 8796093022207 to# 17592186044414 expected SUCCESS 2025-06-25T14:58:49.491119Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:82:2115] requested range size#8796093022207 2025-06-25T14:58:49.491590Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:5:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:58:49.491658Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:5:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:58:49.491739Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 17592186044414 Reserved to# 26388279066621 2025-06-25T14:58:49.491783Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:82:2115] TEvAllocateResult from# 17592186044414 to# 26388279066621 expected SUCCESS 2025-06-25T14:58:49.492204Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:86:2119] requested range size#8796093022207 2025-06-25T14:58:49.492583Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:6:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:58:49.492672Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:6:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:58:49.492763Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 26388279066621 Reserved to# 35184372088828 2025-06-25T14:58:49.492816Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:86:2119] TEvAllocateResult from# 26388279066621 to# 35184372088828 expected SUCCESS 2025-06-25T14:58:49.493213Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:90:2123] requested range size#8796093022207 2025-06-25T14:58:49.493554Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:7:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:58:49.493658Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:7:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:58:49.493742Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 35184372088828 Reserved to# 43980465111035 2025-06-25T14:58:49.493793Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:90:2123] TEvAllocateResult from# 35184372088828 to# 43980465111035 expected SUCCESS 2025-06-25T14:58:49.494208Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:94:2127] requested range size#8796093022207 2025-06-25T14:58:49.494509Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:8:1:24576:74:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:58:49.494594Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:8:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:58:49.494682Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 43980465111035 Reserved to# 52776558133242 2025-06-25T14:58:49.494716Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:94:2127] TEvAllocateResult from# 43980465111035 to# 52776558133242 expected SUCCESS 2025-06-25T14:58:49.495189Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:98:2131] requested range size#8796093022207 2025-06-25T14:58:49.495515Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:9:1:24576:74:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:58:49.495605Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:9:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:58:49.495694Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 52776558133242 Reserved to# 61572651155449 2025-06-25T14:58:49.495738Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:98:2131] TEvAllocateResult from# 52776558133242 to# 61572651155449 expected SUCCESS 2025-06-25T14:58:49.496174Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:102:2135] requested range size#8796093022207 2025-06-25T14:58:49.496885Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:10:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:58:49.496945Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:10:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:58:49.497043Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 61572651155449 Reserved to# 70368744177656 2025-06-25T14:58:49.497079Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:102:2135] TEvAllocateResult from# 61572651155449 to# 70368744177656 expected SUCCESS 2025-06-25T14:58:49.497479Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:106:2139] requested range size#8796093022207 2025-06-25T14:58:49.497813Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:11:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:58:49.497870Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:11:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:58:49.497938Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 70368744177656 Reserved to# 79164837199863 2025-06-25T14:58:49.497970Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:106:2139] TEvAllocateResult from# 70368744177656 to# 79164837199863 expected SUCCESS 2025-06-25T14:58:49.498510Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:110:2143] requested range size#8796093022207 2025-06-25T14:58:49.498833Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:12:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:58:49.498927Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:12:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:58:49.499004Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Suc ... node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:158:2191] TEvAllocateResult from# 184717953466347 to# 193514046488554 expected SUCCESS 2025-06-25T14:58:49.513938Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:162:2195] requested range size#8796093022207 2025-06-25T14:58:49.514252Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:25:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:58:49.514371Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:25:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:58:49.514483Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 193514046488554 Reserved to# 202310139510761 2025-06-25T14:58:49.514529Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:162:2195] TEvAllocateResult from# 193514046488554 to# 202310139510761 expected SUCCESS 2025-06-25T14:58:49.515125Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:166:2199] requested range size#8796093022207 2025-06-25T14:58:49.515419Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:26:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:58:49.515480Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:26:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:58:49.515550Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 202310139510761 Reserved to# 211106232532968 2025-06-25T14:58:49.515620Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:166:2199] TEvAllocateResult from# 202310139510761 to# 211106232532968 expected SUCCESS 2025-06-25T14:58:49.516242Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:170:2203] requested range size#8796093022207 2025-06-25T14:58:49.516562Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:27:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:58:49.516626Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:27:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:58:49.516712Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 211106232532968 Reserved to# 219902325555175 2025-06-25T14:58:49.516750Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:170:2203] TEvAllocateResult from# 211106232532968 to# 219902325555175 expected SUCCESS 2025-06-25T14:58:49.517424Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:174:2207] requested range size#8796093022207 2025-06-25T14:58:49.517767Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:28:1:24576:75:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:58:49.517835Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:28:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:58:49.517914Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 219902325555175 Reserved to# 228698418577382 2025-06-25T14:58:49.517975Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:174:2207] TEvAllocateResult from# 219902325555175 to# 228698418577382 expected SUCCESS 2025-06-25T14:58:49.518651Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:178:2211] requested range size#8796093022207 2025-06-25T14:58:49.518940Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:29:1:24576:73:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:58:49.519015Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:29:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:58:49.519097Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 228698418577382 Reserved to# 237494511599589 2025-06-25T14:58:49.519151Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:178:2211] TEvAllocateResult from# 228698418577382 to# 237494511599589 expected SUCCESS 2025-06-25T14:58:49.519850Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:182:2215] requested range size#8796093022207 2025-06-25T14:58:49.520124Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:30:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:58:49.520189Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:30:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:58:49.520283Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 237494511599589 Reserved to# 246290604621796 2025-06-25T14:58:49.520355Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:182:2215] TEvAllocateResult from# 237494511599589 to# 246290604621796 expected SUCCESS 2025-06-25T14:58:49.521060Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:186:2219] requested range size#8796093022207 2025-06-25T14:58:49.521382Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:31:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:58:49.521444Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:31:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:58:49.521521Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 246290604621796 Reserved to# 255086697644003 2025-06-25T14:58:49.521598Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:186:2219] TEvAllocateResult from# 246290604621796 to# 255086697644003 expected SUCCESS 2025-06-25T14:58:49.522343Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:190:2223] requested range size#8796093022207 2025-06-25T14:58:49.522623Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:32:1:24576:75:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:58:49.522708Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:32:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:58:49.522805Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 255086697644003 Reserved to# 263882790666210 2025-06-25T14:58:49.522843Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:190:2223] TEvAllocateResult from# 255086697644003 to# 263882790666210 expected SUCCESS 2025-06-25T14:58:49.523813Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:194:2227] requested range size#8796093022207 2025-06-25T14:58:49.524124Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:33:1:24576:77:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:58:49.524173Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:33:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:58:49.524253Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 263882790666210 Reserved to# 272678883688417 2025-06-25T14:58:49.524289Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:194:2227] TEvAllocateResult from# 263882790666210 to# 272678883688417 expected SUCCESS 2025-06-25T14:58:49.525167Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:198:2231] requested range size#8796093022207 2025-06-25T14:58:49.525464Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:34:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:58:49.525538Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:34:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:58:49.525663Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 272678883688417 Reserved to# 281474976710624 2025-06-25T14:58:49.525707Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:198:2231] TEvAllocateResult from# 272678883688417 to# 281474976710624 expected SUCCESS 2025-06-25T14:58:49.526584Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:202:2235] requested range size#31 2025-06-25T14:58:49.526902Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:35:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:58:49.526965Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:35:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:58:49.527078Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 281474976710624 Reserved to# 281474976710655 2025-06-25T14:58:49.527122Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:202:2235] TEvAllocateResult from# 281474976710624 to# 281474976710655 expected SUCCESS 2025-06-25T14:58:49.527936Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:206:2239] requested range size#1 2025-06-25T14:58:49.528061Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 0 Reserved from# 281474976710655 Reserved to# 0 2025-06-25T14:58:49.528119Z node 1 :TX_ALLOCATOR ERROR: txallocator_impl.cpp:84: tablet# 72057594046447617 Send to Sender# [1:206:2239] TEvAllocateResult status# IMPOSIBLE expected IMPOSIBLE ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tx_allocator/ut/unittest >> TTxLocatorTest::TestZeroRange [GOOD] Test command err: 2025-06-25T14:58:49.441044Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:1925: Tablet: 72057594046447617 LockedInitializationPath Marker# TSYS32 2025-06-25T14:58:49.441687Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:911: Tablet: 72057594046447617 HandleFindLatestLogEntry, NODATA Promote Marker# TSYS19 2025-06-25T14:58:49.443351Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:225: Tablet: 72057594046447617 TTablet::WriteZeroEntry. logid# [72057594046447617:2:0:0:0:0:0] Marker# TSYS01 2025-06-25T14:58:49.457440Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:0:0:0:20:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:58:49.461800Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:17: tablet# 72057594046447617 OnActivateExecutor 2025-06-25T14:58:49.477471Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:1:1:28672:35:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:58:49.477605Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:1:0:0:42:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:58:49.477743Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:1396: Tablet: 72057594046447617 GcCollect 0 channel, tablet:gen:step => 2:0 Marker# TSYS28 2025-06-25T14:58:49.477835Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:2:1:8192:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:58:49.477952Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:2:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:58:49.478039Z node 1 :TX_ALLOCATOR DEBUG: txallocator__scheme.cpp:22: tablet# 72057594046447617 TTxSchema Complete 2025-06-25T14:58:49.478149Z node 1 :TABLET_MAIN INFO: tablet_sys.cpp:1009: Tablet: 72057594046447617 Active! Generation: 2, Type: TxAllocator started in 0msec Marker# TSYS24 2025-06-25T14:58:49.479698Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:73:2107] requested range size#0 2025-06-25T14:58:49.481920Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:3:1:24576:70:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:58:49.482008Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:3:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T14:58:49.482101Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 0 Reserved to# 0 2025-06-25T14:58:49.482143Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:73:2107] TEvAllocateResult from# 0 to# 0 expected SUCCESS |90.2%| [TA] $(B)/ydb/core/tx/tx_allocator/ut/test-results/unittest/{meta.json ... results_accumulator.log} |90.2%| [TA] {RESULT} $(B)/ydb/core/tx/tx_allocator/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> KikimrIcGateway::TestLoadExternalTable [GOOD] >> KikimrIcGateway::TestLoadServiceAccountSecretValueFromExternalDataSourceMetadata >> KqpKv::ReadRows_Decimal [GOOD] >> KikimrIcGateway::TestLoadTableMetadata [GOOD] >> KikimrIcGateway::TestLoadTokenSecretValueFromExternalDataSourceMetadata >> KikimrIcGateway::TestCreateResourcePool [GOOD] >> KikimrIcGateway::TestALterResourcePool >> TLocksFatTest::LocksLimit [GOOD] >> KikimrIcGateway::TestDropExternalTable [GOOD] >> KikimrIcGateway::TestDropExternalDataSource >> TFlatTest::LargeDatashardReplyRW [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/opt/unittest >> KqpKv::ReadRows_Decimal [GOOD] Test command err: Trying to start YDB, gRPC: 64539, MsgBus: 23421 2025-06-25T14:54:16.612244Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900427921329159:2219];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:54:16.613142Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0009b6/r3tmp/tmpWLIMwd/pdisk_1.dat 2025-06-25T14:54:17.083735Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:54:17.083825Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:54:17.126462Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:54:17.143137Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 64539, node 1 2025-06-25T14:54:17.296627Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:54:17.296796Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:54:17.296815Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:54:17.298120Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23421 2025-06-25T14:54:17.615315Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:23421 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:54:17.997868Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:54:18.020621Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:54:18.037136Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:18.275486Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:54:18.531562Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:54:18.652089Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:54:20.293093Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900445101199797:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:20.293200Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:20.546972Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:20.578128Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:20.611117Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:20.645548Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:20.688167Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:20.758615Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:20.795747Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:20.862935Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900445101200458:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:20.863012Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:20.863248Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900445101200463:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:54:20.866488Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:54:20.879441Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519900445101200465:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:54:20.983289Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519900445101200516:3421] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:54:21.605492Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519900427921329159:2219];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:54:21.605543Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:54:21.961929Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:54:21.991122Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok ... ersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:58:29.596360Z node 21 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:29.682196Z node 21 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) Trying to start YDB, gRPC: 11135, MsgBus: 15497 2025-06-25T14:58:38.133849Z node 22 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [22:275:2318], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:58:38.134428Z node 22 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:58:38.134711Z node 22 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0009b6/r3tmp/tmpZAZCBM/pdisk_1.dat 2025-06-25T14:58:38.589863Z node 22 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 22 Type# 268639257 TServer::EnableGrpc on GrpcPort 11135, node 22 2025-06-25T14:58:38.957910Z node 22 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:38.959662Z node 22 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:58:38.959764Z node 22 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:58:38.959856Z node 22 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:58:38.960687Z node 22 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:58:38.961242Z node 22 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [22:33:2080] 1750863512145108 != 1750863512145111 2025-06-25T14:58:39.040214Z node 22 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(22, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:39.050444Z node 22 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(22, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:39.065928Z node 22 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(22, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:58:39.286690Z node 22 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:15497 TClient is connected to server localhost:15497 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:58:40.091351Z node 22 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:40.108907Z node 22 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:58:40.187068Z node 22 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) Trying to start YDB, gRPC: 27858, MsgBus: 10549 2025-06-25T14:58:42.460630Z node 23 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[23:7519901566797176492:2131];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:42.460713Z node 23 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0009b6/r3tmp/tmplyZAeP/pdisk_1.dat 2025-06-25T14:58:42.676497Z node 23 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [23:7519901566797176400:2080] 1750863522449289 != 1750863522449292 2025-06-25T14:58:42.690924Z node 23 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:42.695637Z node 23 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(23, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:42.695790Z node 23 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(23, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:42.706040Z node 23 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(23, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 27858, node 23 2025-06-25T14:58:42.803302Z node 23 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:58:42.803334Z node 23 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:58:42.803350Z node 23 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:58:42.803564Z node 23 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10549 2025-06-25T14:58:43.469538Z node 23 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:10549 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:58:44.222437Z node 23 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:44.234906Z node 23 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:58:47.464339Z node 23 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[23:7519901566797176492:2131];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:47.464467Z node 23 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:58:49.527875Z node 23 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [23:7519901596861948125:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:58:49.528056Z node 23 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:58:49.569261Z node 23 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:58:49.775984Z node 23 :RPC_REQUEST ERROR: rpc_read_rows.cpp:777: TReadRowsRPC ReplyWithError: Type mismatch, got type Uint64 for column Key22, but expected Decimal(22,9) 2025-06-25T14:58:49.787510Z node 23 :RPC_REQUEST ERROR: rpc_read_rows.cpp:777: TReadRowsRPC ReplyWithError: Type mismatch, got type Decimal(35,10) for column Key22, but expected Decimal(22,9) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TLocksFatTest::LocksLimit [GOOD] Test command err: 2025-06-25T14:58:37.068391Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901546257135334:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:37.068450Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00205c/r3tmp/tmpp1LuHB/pdisk_1.dat 2025-06-25T14:58:37.559343Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:37.575902Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:37.576007Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:37.578023Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:4528 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:37.823515Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-25T14:58:37.862270Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:37.983249Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:38.033758Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:38.094290Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:58:42.068916Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519901546257135334:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:42.068963Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:58:44.669034Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519901578121011094:2147];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:44.669136Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00205c/r3tmp/tmpnj6q4E/pdisk_1.dat 2025-06-25T14:58:44.908523Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:44.912381Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519901578121010975:2080] 1750863524644599 != 1750863524644602 2025-06-25T14:58:44.920908Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:44.920985Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:44.923953Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:20139 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:45.242202Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:45.260723Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:58:45.289113Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-25T14:58:45.295987Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:45.415252Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:45.491224Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:45.692816Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:58:48.135536Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519901595313020729:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:48.136509Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00205c/r3tmp/tmpAxYuOF/pdisk_1.dat 2025-06-25T14:58:48.271439Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:48.273255Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519901595313020702:2080] 1750863528133427 != 1750863528133430 2025-06-25T14:58:48.300506Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:48.300594Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:48.304609Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:25005 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:48.509687Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:48.520960Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:58:48.529155Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-25T14:58:48.533882Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:48.590752Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:48.642806Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:49.152780Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; >> KqpPrefixedVectorIndexes::OrderByCosineLevel2+Nullable+UseSimilarity [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TFlatTest::LargeDatashardReplyRW [GOOD] Test command err: 2025-06-25T14:58:29.188133Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901514360918248:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:29.188215Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/002063/r3tmp/tmpNLnL4J/pdisk_1.dat 2025-06-25T14:58:29.775717Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:29.777417Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519901514360918221:2080] 1750863509158111 != 1750863509158114 2025-06-25T14:58:29.795675Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:29.795766Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:29.800147Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:13826 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:30.230514Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:58:30.239581Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:30.276780Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:58:30.300454Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-25T14:58:30.315141Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:34.192518Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519901514360918248:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:34.192610Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:58:40.317119Z node 1 :MINIKQL_ENGINE ERROR: datashard__engine_host.cpp:516: Shard %72075186224037889, txid %281474976711360, engine error: Error executing transaction (read-only: 1): Datashard 72075186224037889: reply size limit exceeded. (61442990 > 50331648) 2025-06-25T14:58:40.331032Z node 1 :TX_DATASHARD ERROR: execute_data_tx_unit.cpp:268: Datashard execution error for [1750863519818:281474976711360] at 72075186224037889: Datashard 72075186224037889: reply size limit exceeded. (61442990 > 50331648) DataShardErrors: [REPLY_SIZE_EXCEEDED] Datashard 72075186224037889: reply size limit exceeded. (61442990 > 50331648) proxy error code: ExecResultUnavailable 2025-06-25T14:58:40.336457Z node 1 :TX_PROXY ERROR: datareq.cpp:2286: Actor# [1:7519901557310597625:5937] txid# 281474976711360 HANDLE Plan TEvProposeTransactionResult TDataReq GetStatus# RESULT_UNAVAILABLE shard id 72075186224037889 marker# P12 2025-06-25T14:58:40.336546Z node 1 :TX_PROXY ERROR: datareq.cpp:883: Actor# [1:7519901557310597625:5937] txid# 281474976711360 RESPONSE Status# ExecResultUnavailable marker# P13c 2025-06-25T14:58:40.986317Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519901558317755933:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:40.986358Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/002063/r3tmp/tmpUOxYqw/pdisk_1.dat 2025-06-25T14:58:41.180238Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:41.192209Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:41.192278Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:41.194445Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:4653 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:41.373571Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:41.379138Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:58:41.390037Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-25T14:58:41.395144Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:42.015218Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:58:45.987687Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519901558317755933:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:45.987752Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:58:51.234961Z node 2 :MINIKQL_ENGINE ERROR: datashard__engine_host.cpp:516: Shard %72075186224037888, txid %281474976716361, engine error: Error executing transaction (read-only: 0): Datashard 72075186224037888: reply size limit exceeded. (71580986 > 50331648) 2025-06-25T14:58:51.246173Z node 2 :TX_DATASHARD ERROR: execute_data_tx_unit.cpp:268: Datashard execution error for [0:281474976716361] at 72075186224037888: Datashard 72075186224037888: reply size limit exceeded. (71580986 > 50331648) 2025-06-25T14:58:51.249223Z node 2 :TX_DATASHARD ERROR: finish_propose_unit.cpp:174: Errors while proposing transaction txid 281474976716361 at tablet 72075186224037888 status: RESULT_UNAVAILABLE errors: REPLY_SIZE_EXCEEDED (Datashard 72075186224037888: reply size limit exceeded. (71580986 > 50331648)) | 2025-06-25T14:58:51.249421Z node 2 :TX_PROXY ERROR: datareq.cpp:883: Actor# [2:7519901601267435267:5912] txid# 281474976716361 RESPONSE Status# ExecResultUnavailable marker# P13c DataShardErrors: [REPLY_SIZE_EXCEEDED] Datashard 72075186224037888: reply size limit exceeded. (71580986 > 50331648) proxy error code: ExecResultUnavailable |90.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/service/ut_table_writer/unittest >> LocalTableWriter::DecimalKeys >> LocalTableWriter::DataAlongWithHeartbeat >> LocalTableWriter::SupportedTypes >> LocalTableWriter::ConsistentWrite >> KikimrIcGateway::TestALterResourcePool [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpPrefixedVectorIndexes::OrderByCosineLevel2+Nullable+UseSimilarity [GOOD] Test command err: Trying to start YDB, gRPC: 5004, MsgBus: 3010 2025-06-25T14:57:15.843891Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901194126710709:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:57:15.843945Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001b3d/r3tmp/tmp2w0pgt/pdisk_1.dat 2025-06-25T14:57:16.363527Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:57:16.363606Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:57:16.374910Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:57:16.381413Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:57:16.382939Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519901194126710688:2080] 1750863435840339 != 1750863435840342 TServer::EnableGrpc on GrpcPort 5004, node 1 2025-06-25T14:57:16.493009Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:57:16.493036Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:57:16.493048Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:57:16.493150Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3010 2025-06-25T14:57:16.900435Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:3010 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:57:17.115947Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:57:17.134680Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:57:17.151146Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:17.358553Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:17.532396Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:57:17.639789Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:19.130961Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901211306581525:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:19.131056Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:19.470691Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:19.499092Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:19.531781Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:19.576727Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:19.609389Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:19.655606Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:19.688628Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:19.748205Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901211306582183:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:19.748289Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:19.748375Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901211306582188:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:19.751742Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:57:19.761460Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519901211306582190:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:57:19.836758Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519901211306582241:3427] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:57:20.844232Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519901194126710709:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:57:20.844295Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:57:20.968014Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877761, Sender [1:7519901215601549813:3603], Recipient [1:7519901198421678324:2154]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:57:20.968059Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5052: S ... e: 1750863497788 TableOwnerId: 72057594046644480 FollowerId: 0 2025-06-25T14:58:47.824843Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4992: StateWork, processing event TEvDataShard::TEvPeriodicTableStats 2025-06-25T14:58:47.824886Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037927 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 20] state 'Ready' dataSize 710 rowCount 30 cpuUsage 0.1086 2025-06-25T14:58:47.825006Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:570: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037927 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 20] raw table stats: DataSize: 710 RowCount: 30 IndexSize: 0 InMemSize: 0 LastAccessTime: 1750863526741 LastUpdateTime: 1750863498255 ImmediateTxCompleted: 0 PlannedTxCompleted: 2 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 30 RowDeletes: 0 RowReads: 0 RangeReads: 20 PartCount: 1 RangeReadRows: 70 SearchHeight: 1 LastFullCompactionTs: 0 HasLoanedParts: false Channels { Channel: 1 DataSize: 710 IndexSize: 0 } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 2025-06-25T14:58:47.825038Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:610: Will delay TTxStoreTableStats on# 0.099996s, queue# 1 2025-06-25T14:58:47.825232Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269553162, Sender [3:7519901462002005142:2499], Recipient [3:7519901440527166092:2146]: NKikimrTxDataShard.TEvPeriodicTableStats DatashardId: 72075186224037925 TableLocalId: 21 Generation: 1 Round: 2 TableStats { DataSize: 107 RowCount: 3 IndexSize: 0 InMemSize: 0 LastAccessTime: 1750863526734 LastUpdateTime: 1750863498141 ImmediateTxCompleted: 0 PlannedTxCompleted: 2 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 3 RowDeletes: 0 RowReads: 0 RangeReads: 10 PartCount: 1 RangeReadRows: 10 SearchHeight: 1 LastFullCompactionTs: 0 HasLoanedParts: false Channels { Channel: 1 DataSize: 107 IndexSize: 0 } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 425 Memory: 119579 Storage: 228 } ShardState: 2 UserTablePartOwners: 72075186224037925 NodeId: 3 StartTime: 1750863497783 TableOwnerId: 72057594046644480 FollowerId: 0 2025-06-25T14:58:47.825259Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4992: StateWork, processing event TEvDataShard::TEvPeriodicTableStats 2025-06-25T14:58:47.825279Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037925 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 21] state 'Ready' dataSize 107 rowCount 3 cpuUsage 0.0425 2025-06-25T14:58:47.825378Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:570: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037925 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 21] raw table stats: DataSize: 107 RowCount: 3 IndexSize: 0 InMemSize: 0 LastAccessTime: 1750863526734 LastUpdateTime: 1750863498141 ImmediateTxCompleted: 0 PlannedTxCompleted: 2 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 3 RowDeletes: 0 RowReads: 0 RangeReads: 10 PartCount: 1 RangeReadRows: 10 SearchHeight: 1 LastFullCompactionTs: 0 HasLoanedParts: false Channels { Channel: 1 DataSize: 107 IndexSize: 0 } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 2025-06-25T14:58:47.931104Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [3:7519901440527166092:2146]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-06-25T14:58:47.931155Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5131: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-06-25T14:58:47.931169Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046644480, queue size# 2 2025-06-25T14:58:47.931226Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:601: Will execute TTxStoreStats, queue# 2 2025-06-25T14:58:47.931243Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:610: Will delay TTxStoreTableStats on# 0.000000s, queue# 2 2025-06-25T14:58:47.931307Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 20 shard idx 72057594046644480:39 data size 710 row count 30 2025-06-25T14:58:47.931375Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037927 maps to shardIdx: 72057594046644480:39 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 20], pathId map=indexImplPostingTable, is column=0, is olap=0, RowCount 30, DataSize 710 2025-06-25T14:58:47.931388Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037927, followerId 0 2025-06-25T14:58:47.931454Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:39 with partCount# 1, rowCount# 30, searchHeight# 1, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-25T14:58:47.931507Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037927 2025-06-25T14:58:47.931547Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 21 shard idx 72057594046644480:40 data size 107 row count 3 2025-06-25T14:58:47.931583Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037925 maps to shardIdx: 72057594046644480:40 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 21], pathId map=indexImplPrefixTable, is column=0, is olap=0, RowCount 3, DataSize 107 2025-06-25T14:58:47.931595Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037925, followerId 0 2025-06-25T14:58:47.931632Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:40 with partCount# 1, rowCount# 3, searchHeight# 1, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-25T14:58:47.931651Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037925 2025-06-25T14:58:47.931701Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:58:47.931805Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [3:7519901440527166092:2146]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-06-25T14:58:47.931822Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5131: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-06-25T14:58:47.931832Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046644480, queue size# 0 2025-06-25T14:58:48.228080Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:7519901440527166092:2146]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:58:48.228137Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:58:48.228184Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [3:7519901440527166092:2146], Recipient [3:7519901440527166092:2146]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:58:48.228200Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:58:49.227752Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:7519901440527166092:2146]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:58:49.227800Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:58:49.227861Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [3:7519901440527166092:2146], Recipient [3:7519901440527166092:2146]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:58:49.227879Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:58:50.228244Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:7519901440527166092:2146]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:58:50.228321Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:58:50.228379Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [3:7519901440527166092:2146], Recipient [3:7519901440527166092:2146]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:58:50.228411Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:58:51.232497Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:7519901440527166092:2146]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:58:51.232547Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:58:51.232594Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [3:7519901440527166092:2146], Recipient [3:7519901440527166092:2146]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:58:51.232611Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:58:52.234312Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:7519901440527166092:2146]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:58:52.234359Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:58:52.234401Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [3:7519901440527166092:2146], Recipient [3:7519901440527166092:2146]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:58:52.234417Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime >> TPQTest::TestPartitionTotalQuota >> TPQTabletTests::DropTablet >> TPartitionTests::Batching >> TPartitionChooserSuite::TPartitionChooserActor_SplitMergeEnabled_NewSourceId_Test >> TPartitionTests::ConflictingSrcIdForTxInDifferentBatches >> TPQTabletTests::Multiple_PQTablets_1 >> TPQTabletTests::UpdateConfig_1 >> TTypeCodecsTest::TestFixedLenCodec [GOOD] >> TTypeCodecsTest::TestVarLenCodec [GOOD] >> TTypeCodecsTest::TestVarIntCodec [GOOD] >> TTypeCodecsTest::TestZigZagCodec [GOOD] >> TTypeCodecsTest::TestDeltaZigZagCodec [GOOD] >> TPartitionTests::WriteSubDomainOutOfSpace_DisableExpiration >> TPQTest::TestCmdReadWithLastOffset >> TPQTestInternal::TestBatchPacking [GOOD] >> TPQTestInternal::TestKeyRange [GOOD] >> TPQTestInternal::TestAsInt [GOOD] >> TPQTestInternal::TestAsIntWide [GOOD] >> TPQTestInternal::StoreKeys [GOOD] >> TPQTestInternal::RestoreKeys [GOOD] >> TFlatTest::AutoMergeBySize [GOOD] >> TFlatTest::AutoSplitMergeQueue >> TLocksTest::Range_BrokenLockMax [GOOD] >> TLocksTest::Range_CorrectDot >> TPartitionChooserSuite::TPartitionChooserActor_SplitMergeEnabled_SourceId_OldPartitionExists_NotBoundary_Test >> KikimrIcGateway::TestDropExternalDataSource [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/unittest >> TTypeCodecsTest::TestDeltaZigZagCodec [GOOD] Test command err: Size: 8002 Create chunk: 0.000190s Read by index: 0.000012s Iterate: 0.000093s Size: 8256 Create chunk: 0.000179s Read by index: 0.000019s Iterate: 0.000052s Size: 8532 Create chunk: 0.000070s Read by index: 0.000048s Iterate: 0.000031s Size: 7769 Create chunk: 0.000067s Read by index: 0.000029s Iterate: 0.000023s Size: 2853 Create chunk: 0.000077s Read by index: 0.000054s Iterate: 0.000023s Size: 2419 Create chunk: 0.000073s Read by index: 0.000058s Iterate: 0.000031s Size: 2929 Create chunk: 0.000090s Read by index: 0.000085s Iterate: 0.000033s Size: 2472 Create chunk: 0.000072s Read by index: 0.000056s Iterate: 0.000032s Size: 2407 Create chunk: 0.000082s Read by index: 0.000059s Iterate: 0.000025s Size: 2061 Create chunk: 0.000068s Read by index: 0.000079s Iterate: 0.000044s >> TPartitionTests::WriteSubDomainOutOfSpace_DisableExpiration [GOOD] >> TPQTabletTests::DropTablet [GOOD] |90.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/unittest >> TPQTestInternal::RestoreKeys [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/provider/ut/unittest >> KikimrIcGateway::TestALterResourcePool [GOOD] Test command err: Trying to start YDB, gRPC: 5272, MsgBus: 29375 2025-06-25T14:58:44.554246Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901577482111724:2189];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:44.554419Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001397/r3tmp/tmprAylpu/pdisk_1.dat 2025-06-25T14:58:45.162383Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:45.180425Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519901577482111572:2080] 1750863524546337 != 1750863524546340 2025-06-25T14:58:45.182697Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:45.187301Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:45.188845Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 5272, node 1 2025-06-25T14:58:45.498425Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:58:45.498451Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:58:45.498456Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:58:45.498550Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:58:45.550954Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:29375 TClient is connected to server localhost:29375 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:58:46.291014Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:46.337788Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976710658:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp:267) 2025-06-25T14:58:46.362880Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_external_table.cpp:351) 2025-06-25T14:58:46.369142Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710659, at schemeshard: 72057594046644480 Trying to start YDB, gRPC: 5241, MsgBus: 12355 2025-06-25T14:58:47.742419Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519901590435013013:2141];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001397/r3tmp/tmp98NsWt/pdisk_1.dat 2025-06-25T14:58:47.785615Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; TServer::EnableGrpc on GrpcPort 5241, node 2 2025-06-25T14:58:47.857197Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:47.858226Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519901590435012900:2080] 1750863527729408 != 1750863527729411 2025-06-25T14:58:47.892293Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:47.892398Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:47.905260Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:58:47.924849Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:58:47.924874Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:58:47.924896Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:58:47.925016Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:12355 TClient is connected to server localhost:12355 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:58:48.425751Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:48.440966Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:58:48.468571Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) Trying to start YDB, gRPC: 30565, MsgBus: 14232 2025-06-25T14:58:51.158685Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519901606614122524:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:51.158766Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001397/r3tmp/tmpu1lAwY/pdisk_1.dat 2025-06-25T14:58:51.309530Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:51.310714Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519901606614122502:2080] 1750863531154445 != 1750863531154448 2025-06-25T14:58:51.332047Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:51.332135Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 30565, node 3 2025-06-25T14:58:51.338071Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:58:51.361920Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:58:51.361950Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:58:51.361955Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:58:51.362070Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14232 TClient is connected to server localhost:14232 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:58:51.799745Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:51.819630Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:58:51.834834Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterResourcePool, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_resource_pool.cpp:159) >> TPQTabletTests::Multiple_PQTablets_1 [GOOD] >> TPQTabletTests::UpdateConfig_1 [GOOD] >> TPartitionTests::Batching [GOOD] >> TPQTabletTests::Test_Waiting_For_TEvReadSet_When_There_Are_More_Senders_Than_Recipients >> TPQTabletTests::Multiple_PQTablets_2 >> TPQTabletTests::DropTablet_And_PlannedConfigTransaction >> TPQTabletTests::UpdateConfig_2 >> TPartitionTests::WriteSubDomainOutOfSpace_IgnoreQuotaDeadline >> TPartitionTests::CommitOffsetRanges >> TPartitionTests::WriteSubDomainOutOfSpace_IgnoreQuotaDeadline [GOOD] >> TObjectStorageListingTest::MaxKeysAndSharding [GOOD] >> TObjectStorageListingTest::SchemaChecks >> TPQTabletTests::Multiple_PQTablets_2 [GOOD] >> TPQTabletTests::DropTablet_And_PlannedConfigTransaction [GOOD] >> TPQTabletTests::UpdateConfig_2 [GOOD] >> TPQTabletTests::Test_Waiting_For_TEvReadSet_When_There_Are_More_Senders_Than_Recipients [GOOD] >> TPQTabletTests::Cancel_Tx >> TPQTabletTests::Test_Waiting_For_TEvReadSet_Without_Senders >> TQuotaTracker::TestSmallMessages [GOOD] >> TQuotaTracker::TestBigMessages [GOOD] >> TSourceIdTests::ExpensiveCleanup >> TPQTabletTests::One_Tablet_For_All_Partitions >> TPQTabletTests::Test_Waiting_For_TEvReadSet_When_There_Are_Fewer_Senders_Than_Recipients >> TPQTabletTests::Cancel_Tx [GOOD] >> TPQTabletTests::Test_Waiting_For_TEvReadSet_Without_Senders [GOOD] >> TPQTabletTests::One_Tablet_For_All_Partitions [GOOD] >> TPQTest::DirectReadBadSessionOrPipe >> TPQTabletTests::One_New_Partition_In_Another_Tablet >> TPQTabletTests::Config_TEvTxCommit_After_Restart >> TPQTabletTests::Single_PQTablet_And_Multiple_Partitions ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/provider/ut/unittest >> KikimrIcGateway::TestDropExternalDataSource [GOOD] Test command err: Trying to start YDB, gRPC: 22194, MsgBus: 5495 2025-06-25T14:58:45.128245Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901581898923714:2198];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:45.137317Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00136a/r3tmp/tmpTV5Mae/pdisk_1.dat 2025-06-25T14:58:45.548957Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:45.549062Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:45.553913Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:58:45.558509Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:45.566393Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519901581898923554:2080] 1750863525047829 != 1750863525047832 TServer::EnableGrpc on GrpcPort 22194, node 1 2025-06-25T14:58:45.659610Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:58:45.659636Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:58:45.659647Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:58:45.659757Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5495 2025-06-25T14:58:46.136731Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:5495 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:58:46.334809Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:46.354201Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:58:46.376586Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715658:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp:267) 2025-06-25T14:58:46.397168Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_external_table.cpp:351) 2025-06-25T14:58:46.418010Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519901586193891522:2342] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/f1/f2/external_table\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeExternalTable, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:58:46.418268Z node 1 :KQP_GATEWAY ERROR: scheme.h:178: Unexpected error on scheme request, TxId: 281474976715660, ProxyStatus: ExecComplete, SchemeShardReason: Check failed: path: '/Root/f1/f2/external_table', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeExternalTable, state: EPathStateNoChanges)
: Error: Scheme operation failed, status: ExecComplete, reason: Check failed: path: '/Root/f1/f2/external_table', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeExternalTable, state: EPathStateNoChanges) Trying to start YDB, gRPC: 2590, MsgBus: 27469 2025-06-25T14:58:48.433382Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519901596495422882:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:48.433422Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00136a/r3tmp/tmpmezGrI/pdisk_1.dat 2025-06-25T14:58:48.601436Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:48.601515Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:48.604627Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:58:48.605160Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 2590, node 2 2025-06-25T14:58:48.753099Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:58:48.753125Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:58:48.753134Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:58:48.753306Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27469 TClient is connected to server localhost:27469 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:58:49.243333Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:49.268990Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976710658:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp:267) 2025-06-25T14:58:49.285045Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_external_table.cpp:351) Trying to start YDB, gRPC: 24164, MsgBus: 2565 2025-06-25T14:58:51.923601Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519901607163471584:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:51.923669Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00136a/r3tmp/tmpdJ3QaU/pdisk_1.dat 2025-06-25T14:58:52.044553Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:52.048402Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519901607163471564:2080] 1750863531923287 != 1750863531923290 TServer::EnableGrpc on GrpcPort 24164, node 3 2025-06-25T14:58:52.084874Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:52.084933Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:52.086561Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:58:52.094263Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:58:52.094281Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:58:52.094287Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:58:52.094391Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2565 TClient is connected to server localhost:2565 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:58:52.469049Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:52.485388Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715658:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp:267) >> TPartitionTests::CommitOffsetRanges [GOOD] >> TPQTabletTests::Test_Waiting_For_TEvReadSet_When_There_Are_Fewer_Senders_Than_Recipients [GOOD] >> TLocksTest::Range_IncorrectDot2 [GOOD] >> TPartitionTests::ChangeConfig >> TLocksTest::CK_Range_GoodLock [GOOD] >> TPQTabletTests::Test_Waiting_For_TEvReadSet_When_The_Number_Of_Senders_And_Recipients_Match >> TPQTabletTests::Config_TEvTxCommit_After_Restart [GOOD] >> TSourceIdTests::ExpensiveCleanup [GOOD] >> TPQTabletTests::One_New_Partition_In_Another_Tablet [GOOD] >> TPQTabletTests::Single_PQTablet_And_Multiple_Partitions [GOOD] >> LocalTableWriter::SupportedTypes [GOOD] >> TPQTabletTests::Limit_On_The_Number_Of_Transactons >> TPQTabletTests::Test_Waiting_For_TEvReadSet_When_The_Number_Of_Senders_And_Recipients_Match [GOOD] >> LocalTableWriter::DecimalKeys [GOOD] >> TPQTabletTests::All_New_Partitions_In_Another_Tablet >> LocalTableWriter::DataAlongWithHeartbeat [GOOD] >> TLocksTest::GoodSameShardLock [GOOD] >> TPQTabletTests::Test_Waiting_For_TEvReadSet_Without_Recipients >> TPartitionTests::TabletConfig_Is_Newer_That_PartitionConfig >> LocalTableWriter::ConsistentWrite [GOOD] >> TPQTabletTests::ProposeTx_Unknown_Partition_1 >> TPartitionTests::ChangeConfig [GOOD] >> TPQTestInternal::TestPartitionedBlobSimpleTest [GOOD] >> TPQTestInternal::TestPartitionedBigTest >> TPartitionTests::ConflictingActsInSeveralBatches >> TPQTabletTests::Test_Waiting_For_TEvReadSet_Without_Recipients [GOOD] >> TPQTabletTests::All_New_Partitions_In_Another_Tablet [GOOD] >> TPQTabletTests::ProposeTx_Unknown_Partition_1 [GOOD] >> TLocksTest::BrokenNullLock [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/unittest >> TSourceIdTests::ExpensiveCleanup [GOOD] Test command err: 2025-06-25T14:58:55.190407Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:58:55.190490Z node 1 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-25T14:58:55.714186Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:58:55.714252Z node 2 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info processed_blobs=41800 quoted_time=9.980000s Iteration 0 Iteration 1 Iteration 2 Iteration 3 Iteration 4 Iteration 5 Iteration 6 Iteration 7 Iteration 8 Iteration 9 Iteration 10 Iteration 11 Iteration 12 Iteration 13 Iteration 14 Iteration 15 Iteration 16 Iteration 17 Iteration 18 Iteration 19 Iteration 20 Iteration 21 Iteration 22 Iteration 23 Iteration 24 Iteration 25 Iteration 26 Iteration 27 Iteration 28 Iteration 29 Iteration 30 Iteration 31 Iteration 32 Iteration 33 Iteration 34 Iteration 35 Iteration 36 Iteration 37 Iteration 38 Iteration 39 Iteration 40 Iteration 41 Iteration 42 Iteration 43 Iteration 44 Iteration 45 Iteration 46 Iteration 47 Iteration 48 Iteration 49 Iteration 50 Iteration 51 Iteration 52 Iteration 53 Iteration 54 Iteration 55 Iteration 56 Iteration 57 Iteration 58 Iteration 59 Iteration 60 Iteration 61 Iteration 62 Iteration 63 Iteration 64 Iteration 65 Iteration 66 Iteration 67 Iteration 68 Iteration 69 Iteration 70 Iteration 71 Iteration 72 Iteration 73 Iteration 74 Iteration 75 Iteration 76 Iteration 77 Iteration 78 Iteration 79 Iteration 80 Iteration 81 Iteration 82 Iteration 83 Iteration 84 Iteration 85 Iteration 86 Iteration 87 Iteration 88 Iteration 89 Iteration 90 Iteration 91 Iteration 92 Iteration 93 Iteration 94 Iteration 95 Iteration 96 Iteration 97 Iteration 98 Iteration 99 Iteration 100 Iteration 101 Iteration 102 Iteration 103 Iteration 104 Iteration 105 Iteration 106 Iteration 107 Iteration 108 Iteration 109 Iteration 110 Iteration 111 Iteration 112 Iteration 113 Iteration 114 Iteration 115 Iteration 116 Iteration 117 Iteration 118 Iteration 119 Iteration 120 Iteration 121 Iteration 122 Iteration 123 Iteration 124 Iteration 125 Iteration 126 Iteration 127 Iteration 128 Iteration 129 Iteration 130 Iteration 131 Iteration 132 Iteration 133 Iteration 134 Iteration 135 Iteration 136 Iteration 137 Iteration 138 Iteration 139 Iteration 140 Iteration 141 Iteration 142 Iteration 143 Iteration 144 Iteration 145 Iteration 146 Iteration 147 Iteration 148 Iteration 149 Iteration 150 Iteration 151 Iteration 152 Iteration 153 Iteration 154 Iteration 155 Iteration 156 Iteration 157 Iteration 158 Iteration 159 Iteration 160 Iteration 161 Iteration 162 Iteration 163 Iteration 164 Iteration 165 Iteration 166 Iteration 167 Iteration 168 Iteration 169 Iteration 170 Iteration 171 Iteration 172 Iteration 173 Iteration 174 Iteration 175 Iteration 176 Iteration 177 Iteration 178 Iteration 179 Iteration 180 Iteration 181 Iteration 182 Iteration 183 Iteration 184 Iteration 185 Iteration 186 Iteration 187 Iteration 188 Iteration 189 Iteration 190 Iteration 191 Iteration 192 Iteration 193 Iteration 194 Iteration 195 Iteration 196 Iteration 197 Iteration 198 Iteration 199 Iteration 200 Iteration 201 Iteration 202 Iteration 203 Iteration 204 Iteration 205 Iteration 206 Iteration 207 Iteration 208 Iteration 209 Iteration 210 Iteration 211 Iteration 212 Iteration 213 Iteration 214 Iteration 215 Iteration 216 Iteration 217 Iteration 218 Iteration 219 Iteration 220 Iteration 221 Iteration 222 Iteration 223 Iteration 224 Iteration 225 Iteration 226 Iteration 227 Iteration 228 Iteration 229 Iteration 230 Iteration 231 Iteration 232 Iteration 233 Iteration 234 Iteration 235 Iteration 236 Iteration 237 Iteration 238 Iteration 239 Iteration 240 Iteration 241 Iteration 242 Iteration 243 Iteration 244 Iteration 245 Iteration 246 Iteration 247 Iteration 248 Iteration 249 Iteration 250 Iteration 251 Iteration 252 Iteration 253 Iteration 254 Iteration 255 Iteration 256 Iteration 257 Iteration 258 Iteration 259 Iteration 260 Iteration 261 Iteration 262 Iteration 263 Iteration 264 Iteration 265 Iteration 266 Iteration 267 Iteration 268 Iteration 269 Iteration 270 Iteration 271 Iteration 272 Iteration 273 Iteration 274 Iteration 275 Iteration 276 Iteration 277 Iteration 278 Iteration 279 Iteration 280 Iteration 281 Iteration 282 Iteration 283 Iteration 284 Iteration 285 Iteration 286 Iteration 287 Iteration 288 Iteration 289 Iteration 290 Iteration 291 Iteration 292 Iteration 293 Iteration 294 Iteration 295 Iteration 296 Iteration 297 Iteration 298 Iteration 299 Iteration 300 Iteration 301 Iteration 302 Iteration 303 Iteration 304 Iteration 305 Iteration 306 Iteration 307 Iteration 308 Iteration 309 Iteration 310 Iteration 311 Iteration 312 Iteration 313 Iteration 314 Iteration 315 Iteration 316 Iteration 317 Iteration 318 Iteration 319 Iteration 320 Iteration 321 Iteration 322 Iteration 323 Iteration 324 Iteration 325 Iteration 326 Iteration 327 Iteration 328 Iteration 329 Iteration 330 Iteration 331 Iteration 332 Iteration 333 Iteration 334 Iteration 335 Iteration 336 Iteration 337 Iteration 338 Iteration 339 Iteration 340 Iteration 341 Iteration 342 Iteration 343 Iteration 344 Iteration 345 Iteration 346 Iteration 347 Iteration 348 Iteration 349 Iteration 350 Iteration 351 Iteration 352 Iteration 353 Iteration 354 Iteration 355 Iteration 356 Iteration 357 Iteration 358 Iteration 359 Iteration 360 Iteration 361 Iteration 362 Iteration 363 Iteration 364 Iteration 365 Iteration 366 Iteration 367 Iteration 368 Iteration 369 Iteration 370 Iteration 371 Iteration 372 Iteration 373 Iteration 374 Iteration 375 Iteration 376 Iteration 377 Iteration 378 Iteration 379 Iteration 380 Iteration 381 Iteration 382 Iteration 383 Iteration 384 Iteration 385 Iteration 386 Iteration 387 Iteration 388 Iteration 389 Iteration 390 Iteration 391 Iteration 392 Iteration 393 Iteration 394 Iteration 395 Iteration 396 Iteration 397 Iteration 398 Iteration 399 Iteration 400 Iteration 401 Iteration 402 Iteration 403 Iteration 404 Iteration 405 Iteration 406 Iteration 407 Iteration 408 Iteration 409 Iteration 410 Iteration 411 Iteration 412 Iteration 413 Iteration 414 Iteration 415 Iteration 416 Iteration 417 Iteration 418 Iteration 419 Iteration 420 Iteration 421 Iteration 422 Iteration 423 Iteration 424 Iteration 425 Iteration 426 Iteration 427 Iteration 428 Iteration 429 Iteration 430 Iteration 431 Iteration 432 Iteration 433 Iteration 434 Iteration 435 Iteration 436 Iteration 437 Iteration 438 Iteration 439 Iteration 440 Iteration 441 Iteration 442 Iteration 443 Iteration 444 Iteration 445 Iteration 446 Iteration 447 Iteration 448 Iteration 449 Iteration 450 Iteration 451 Iteration 452 Iteration 453 Iteration 454 Iteration 455 Iteration 456 Iteration 457 Iteration 458 Iteration 459 Iteration 460 Iteration 461 Iteration 462 Iteration 463 Iteration 464 Iteration 465 Iteration 466 Iteration 467 Iteration 468 Iteration 469 Iteration 470 Iteration 471 Iteration 472 Iteration 473 Iteration 474 Iteration 475 Iteration 476 Iteration 477 Iteration 478 Iteration 479 Iteration 480 Iteration 481 Iteration 482 Iteration 483 Iteration 484 Iteration 485 Iteration 486 Iteration 487 Iteration 488 Iteration 489 Iteration 490 Iteration 491 Iteration 492 Iteration 493 Iteration 494 Iteration 495 Iteration 496 Iteration 497 Iteration 498 Iteration 499 Iteration 500 Iteration 501 Iteration 502 Iteration 503 Iteration 504 Iteration 505 Iteration 506 Iteration 507 Iteration 508 Iteration 509 Iteration 510 Iteration 511 Iteration 512 Iteration 513 Iteration 514 Iteration 515 Iteration 516 Iteration 517 Iteration 518 Iteration 519 Iteration 520 Iteration 521 Iteration 522 Iteration 523 Iteration 524 Iteration 525 Iteration 526 Iteration 527 Iteration 528 Iteration 529 Iteration 530 Iteration 531 Iteration 532 Iteration 533 Iteration 534 Iteration 535 Iteration 536 Iteration 537 Iteration 538 Iteration 539 Iteration 540 Iteration 541 Iteration 542 Iteration 543 Iteration 544 Iteration 545 Iteration 546 Iteration 547 Iteration 548 Iteration 549 Iteration 550 Iteration 551 Iteration 552 Iteration 553 Iteration 554 Iteration 555 Iteration 556 Iteration 557 Iteration 558 Iteration 559 Iteration 560 Iteration 561 Iteration 562 Iteration 563 Iteration 564 Iteration 565 Iteration 566 Iteration 567 Iteration 568 Iteration 569 Iteration 570 Iteration 571 Iteration 572 Iteration 573 Iteration 574 Iteration 575 Iteration 576 Iteration 577 Iteration 578 Iteration 579 Iteration 580 Iteration 581 Iteration 582 Iteration 583 Iteration 584 Iteration 585 Iteration 586 Iteration 587 Iteration 588 Iteration 589 Iteration 590 Iteration 591 Iteration 592 Iteration 593 Iteration 594 Iteration 595 Iteration 596 Iteration 597 Iteration 598 Iteration 599 Iteration 600 Iteration 601 Iteration 602 Iteration 603 Iteration 604 Iteration 605 Iteration 606 Iteration 607 Iteration 608 Iteration 609 Iteration 610 Iteration 611 Iteration 612 Iteration 613 Iteration 614 Iteration 615 Iteration 616 Iteration 617 Iteration 618 Iteration 619 Iteration 620 Iteration 621 Iteration 622 Iteration 623 Iteration 624 Iteration 625 Iteration 626 Iteration 627 Iteration 628 Iteration 629 Iteration 630 Iteration 631 Iteration 632 Iteration 633 Iteration 634 Iteration 635 Iteration 636 Iteration 637 Iteration 638 Iteration 639 Iteration 640 Iteration 641 Iteration 642 Iteration 643 Iteration 644 Iteration 645 Iteration 646 Iteration 647 Iteration 648 Iteration 649 Iteration 650 Iteration 651 Iteration 652 Iteration 653 Iteration 654 Iteration 655 Iteration 656 Iteration 657 Iteration 658 Iteration 659 Iteration 660 Iteration 661 Iteration 662 Iteration 663 Iteration 664 Iteration 665 Iteration 666 Iteration 667 Iteration 668 Iteration 669 Iteration 670 Iteration 671 Iteration 672 Iteration 673 Iteration 674 Iteration 675 Iteration 676 Iteration 677 Iteration 678 Iteration 679 Iteration 680 Iteration 681 Iteration 682 Iteration 683 Iteration 684 Iteration 685 Iteration 686 Iteration 687 Iteration 688 Iteration 689 Iteration 690 Iteration 691 Iteration 692 Iteration 693 Iteration 694 Iteration 695 Iteration 696 Iteration 697 Iteration 698 Iteration 699 Iteration 700 Iteration 701 Iteration 702 Iteration 703 Iteration 704 Iteration 705 Iteration 706 Iteration 707 Iteration 708 Iteration 709 Iteration 710 Iteration 711 Iteration 712 Iteration 713 Iteration 714 Iteration 715 Iteration 716 Iteration 717 Iteration 718 Iteration 719 Iteration 720 Iteration 721 Iteration 722 Iteration 723 Iteration 724 Iteration 725 Iteration 726 Iteration 727 Iteration 728 Iteration 729 Iteration 730 Iteration 731 Iteration 732 Iteration 733 Iteration 734 Iteration 735 Iteration 736 Iteration 737 Iteration 738 Iteration 739 Iteration 740 Iteration 741 Iteration 742 Iteration 743 Iteration 744 Iteration 745 Iteration 746 Iteration 747 Iteration 748 Iteration 749 Iteration 750 Iteration 751 Iteration 752 Iteration 753 Iteration 754 Iteration 755 Iteration 756 Iteration 757 Iteration 758 Iteration 759 Iteration 760 Iteration 761 Iteration 762 Iteration 763 Iteration 764 Iteration 765 Iteration 766 Iteration 767 Iteration 768 Iteration 769 Iteration 770 Iteration 771 Iteration 772 Iteration 773 Iteration 774 Iteration 775 Iteration 776 Iteration 777 Iteration 778 Iteration 779 Iteration 780 Iteration 781 Iteration 782 Iteration 783 Iteration 784 Iteration 785 Iteration 786 Iteration 787 Iteration 788 Iteration 789 Iteration 790 Iteration 791 Iteration 792 Iteration 793 Iteration 794 Iteration 795 Iteration 796 Iteration 797 Iteration 798 Iteration 799 Iteration 800 Iteration 801 Iteration 802 Iteration 803 Iteration 804 Iteration 805 Iteration 806 Iteration 807 Iteration 808 Iteration 809 Iteration 810 Iteration 811 Iteration 812 Iteration 813 Iteration 814 Iteration 815 Iteration 816 Iteration 817 Iteration 818 Iteration 819 Iteration 820 Iteration 821 Iteration 822 Iteration 823 Iteration 824 Iteration 825 Iteration 826 Iteration 827 Iteration 828 Iteration 829 Iteration 830 Iteration 831 Iteration 832 Iteration 833 Iteration 834 Iteration 835 Iteration 836 Iteration 837 Iteration 838 Iteration 839 Iteration 840 Iteration 841 Iteration 842 Iteration 843 Iteration 844 Iteration 845 Iteration 846 Iteration 847 Iteration 848 Iteration 849 Iteration 850 Iteration 851 Iteration 852 Iteration 853 Iteration 854 Iteration 855 Iteration 856 Iteration 857 Iteration 858 Iteration 859 Iteration 860 Iteration 861 Iteration 862 Iteration 863 Iteration 864 Iteration 865 Iteration 866 Iteration 867 Iteration 868 Iteration 869 Iteration 870 Iteration 871 Iteration 872 Iteration 873 Iteration 874 Iteration 875 Iteration 876 Iteration 877 Iteration 878 Iteration 879 Iteration 880 Iteration 881 Iteration 882 Iteration 883 Iteration 884 Iteration 885 Iteration 886 Iteration 887 Iteration 888 Iteration 889 Iteration 890 Iteration 891 Iteration 892 Iteration 893 Iteration 894 Iteration 895 Iteration 896 Iteration 897 Iteration 898 Iteration 899 Iteration 900 Iteration 901 Iteration 902 Iteration 903 Iteration 904 Iteration 905 Iteration 906 Iteration 907 Iteration 908 Iteration 909 Iteration 910 Iteration 911 Iteration 912 Iteration 913 Iteration 914 Iteration 915 Iteration 916 Iteration 917 Iteration 918 Iteration 919 Iteration 920 Iteration 921 Iteration 922 Iteration 923 Iteration 924 Iteration 925 Iteration 926 Iteration 927 Iteration 928 Iteration 929 Iteration 930 Iteration 931 Iteration 932 Iteration 933 Iteration 934 Iteration 935 Iteration 936 Iteration 937 Iteration 938 Iteration 939 Iteration 940 Iteration 941 Iteration 942 Iteration 943 Iteration 944 Iteration 945 Iteration 946 Iteration 947 Iteration 948 Iteration 949 Iteration 950 Iteration 951 Iteration 952 Iteration 953 Iteration 954 Iteration 955 Iteration 956 Iteration 957 Iteration 958 Iteration 959 Iteration 960 Iteration 961 Iteration 962 Iteration 963 Iteration 964 Iteration 965 Iteration 966 Iteration 967 Iteration 968 Iteration 969 Iteration 970 Iteration 971 Iteration 972 Iteration 973 Iteration 974 Iteration 975 Iteration 976 Iteration 977 Iteration 978 Iteration 979 Iteration 980 Iteration 981 Iteration 982 Iteration 983 Iteration 984 Iteration 985 Iteration 986 Iteration 987 Iteration 988 Iteration 989 Iteration 990 Iteration 991 Iteration 992 Iteration 993 Iteration 994 Iteration 995 Iteration 996 Iteration 997 Iteration 998 Iteration 999 >> TPQTabletTests::TEvReadSet_comes_before_TEvPlanStep >> TPQTabletTests::ProposeTx_Unknown_WriteId >> TLocksTest::Range_EmptyKey [GOOD] >> TPQTabletTests::After_Restarting_The_Tablet_Sends_A_TEvReadSet_For_Transactions_In_The_EXECUTED_State >> TPQTabletTests::ProposeTx_Unknown_WriteId [GOOD] >> TPQTabletTests::TEvReadSet_comes_before_TEvPlanStep [GOOD] >> TPartitionTests::TabletConfig_Is_Newer_That_PartitionConfig [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/service/ut_table_writer/unittest >> LocalTableWriter::SupportedTypes [GOOD] Test command err: 2025-06-25T14:58:53.878582Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901615666041400:2142];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:53.879001Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0016b3/r3tmp/tmppLriJ2/pdisk_1.dat 2025-06-25T14:58:54.302417Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:54.314773Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:54.314891Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:54.319095Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:7789 TServer::EnableGrpc on GrpcPort 23798, node 1 2025-06-25T14:58:54.602335Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:58:54.602359Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:58:54.602370Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:58:54.602540Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:58:54.878762Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:7789 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:58:55.145886Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:55.173923Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... TClient::Ls request: /Root/Table TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710658 CreateStep: 1750863535302 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "int32_value" Type: "Int32" TypeId: 1 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "ui... (TRUNCATED) 2025-06-25T14:58:55.342757Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:295: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519901624255976565:2353] Handshake: worker# [1:7519901624255976472:2292] 2025-06-25T14:58:55.343047Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:312: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519901624255976565:2353] Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult: result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Table TableId: [72057594046644480:2:1] RequestType: ByTableId Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Ok Kind: KindTable DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-25T14:58:55.343338Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:387: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519901624255976565:2353] Handle TEvTxProxySchemeCache::TEvResolveKeySetResult: result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 ResultSet [{ TableId: [OwnerId: 72057594046644480, LocalPathId: 2] Access: 0 SyncVersion: false Status: OkData Kind: KindRegularTable PartitionsCount: 1 DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } From: (Uint32 : NULL) IncFrom: 1 To: () IncTo: 0 }] } 2025-06-25T14:58:55.343389Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:417: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519901624255976565:2353] Send handshake: worker# [1:7519901624255976472:2292] 2025-06-25T14:58:55.346350Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:431: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519901624255976565:2353] Handle NKikimr::NReplication::NService::TEvWorker::TEvData { Source: TestSource Records [{ Codec: RAW Data: 45b Offset: 1 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 45b Offset: 2 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 45b Offset: 3 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 45b Offset: 4 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 41b Offset: 5 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 41b Offset: 6 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 45b Offset: 7 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 44b Offset: 8 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 66b Offset: 9 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 71b Offset: 10 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 72b Offset: 11 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 49b Offset: 12 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 48b Offset: 13 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 51b Offset: 14 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 58b Offset: 15 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 51b Offset: 16 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 54b Offset: 17 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 57b Offset: 18 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 76b Offset: 19 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 45b Offset: 20 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 54b Offset: 21 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 61b Offset: 22 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 51b Offset: 23 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 45b Offset: 24 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 46b Offset: 25 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 47b Offset: 26 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 50b Offset: 27 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 49b Offset: 28 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 72b Offset: 29 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 57b Offset: 30 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 64b Offset: 31 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: }] } 2025-06-25T14:58:55.347292Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:556: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519901624255976565:2353] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRequestRecords { Records [{ Order: 1 BodySize: 45 },{ Order: 2 BodySize: 45 },{ Order: 3 BodySize: 45 },{ Order: 4 BodySize: 45 },{ Order: 5 BodySize: 41 },{ Order: 6 BodySize: 41 },{ Order: 7 BodySize: 45 },{ Order: 8 BodySize: 44 },{ Order: 9 BodySize: 66 },{ Order: 10 BodySize: 71 },{ Order: 11 BodySize: 72 },{ Order: 12 BodySize: 49 },{ Order: 13 BodySize: 48 },{ Order: 14 BodySize: 51 },{ Order: 15 BodySize: 58 },{ Order: 16 BodySize: 51 },{ Order: 17 BodySize: 54 },{ Order: 18 BodySize: 57 },{ Order: 19 BodySize: 76 },{ Order: 20 BodySize: 45 },{ Order: 21 BodySize: 54 },{ Order: 22 BodySize: 61 },{ Order: 23 BodySize: 51 },{ Order: 24 BodySize: 45 },{ Order: 25 BodySize: 46 },{ Order: 26 BodySize: 47 },{ Order: 27 BodySize: 50 },{ Order: 28 BodySize: 49 },{ Order: 29 BodySize: 72 },{ Order: 30 BodySize: 57 },{ Order: 31 BodySize: 64 }] } 2025-06-25T14:58:55.347640Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:54: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7519901624255976568:2353] Handle NKikimr::TEvTxUserProxy::TEvGetProxyServicesResponse 2025-06-25T14:58:55.347679Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:587: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519901624255976565:2353] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186224037888 } 2025-06-25T14:58:55.347998Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:74: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7519901624255976568:2353] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 1 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 45b },{ Order: 2 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 45b },{ Order: 3 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 45b },{ Order: 4 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 45b },{ Order: 5 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 41b },{ Order: 6 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 41b },{ Order: 7 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 45b },{ Order: 8 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 44b },{ Order: 9 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 66b },{ Order: 10 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 71b },{ Order: 11 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 72b },{ Order: 12 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 49b },{ Order: 13 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 48b },{ Order: 14 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 51b },{ Order: 15 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 58b },{ Order: 16 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 51b },{ Order: 17 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 54b },{ Order: 18 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 57b },{ Order: 19 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 76b },{ Order: 20 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 45b },{ Order: 21 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 54b },{ Order: 22 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 61b },{ Order: 23 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 51b },{ Order: 24 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 45b },{ Order: 25 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 46b },{ Order: 26 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 47b },{ Order: 27 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 50b },{ Order: 28 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 49b },{ Order: 29 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 72b },{ Order: 30 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 57b },{ Order: 31 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 64b }] } 2025-06-25T14:58:55.430140Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:111: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7519901624255976568:2353] Handle NKikimrTxDataShard.TEvApplyReplicationChangesResult Status: STATUS_OK 2025-06-25T14:58:55.430217Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:587: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519901624255976565:2353] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186224037888 } 2025-06-25T14:58:55.430289Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:570: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519901624255976565:2353] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRemoveRecords { Records [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31] } >> TLocksTest::BrokenSameShardLock [GOOD] >> TPQTabletTests::TEvReadSet_Is_Not_Sent_Ahead_Of_Time >> TPQTabletTests::ProposeTx_Unknown_Partition_2 >> TPartitionTests::ShadowPartitionCountersRestore >> TPQTabletTests::After_Restarting_The_Tablet_Sends_A_TEvReadSet_For_Transactions_In_The_EXECUTED_State [GOOD] >> TPartitionTests::SetOffset ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/service/ut_table_writer/unittest >> LocalTableWriter::DataAlongWithHeartbeat [GOOD] Test command err: 2025-06-25T14:58:53.868847Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901617516293729:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:53.868929Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0016b1/r3tmp/tmpmDZE6m/pdisk_1.dat 2025-06-25T14:58:54.272953Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:54.276479Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519901617516293705:2080] 1750863533865244 != 1750863533865247 2025-06-25T14:58:54.313274Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:54.313404Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:54.316635Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:19076 TServer::EnableGrpc on GrpcPort 32109, node 1 2025-06-25T14:58:54.604302Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:58:54.604380Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:58:54.604391Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:58:54.604540Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:58:54.883343Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:19076 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:58:55.145601Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:55.177916Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... TClient::Ls request: /Root/Table TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710658 CreateStep: 1750863535295 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" Key... (TRUNCATED) 2025-06-25T14:58:55.339427Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:295: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519901626106228986:2353] Handshake: worker# [1:7519901626106228987:2354] 2025-06-25T14:58:55.339721Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:312: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519901626106228986:2353] Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult: result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Table TableId: [72057594046644480:2:1] RequestType: ByTableId Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Ok Kind: KindTable DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-25T14:58:55.339951Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:387: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519901626106228986:2353] Handle TEvTxProxySchemeCache::TEvResolveKeySetResult: result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 ResultSet [{ TableId: [OwnerId: 72057594046644480, LocalPathId: 2] Access: 0 SyncVersion: false Status: OkData Kind: KindRegularTable PartitionsCount: 1 DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } From: (Uint32 : NULL) IncFrom: 1 To: () IncTo: 0 }] } 2025-06-25T14:58:55.340000Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:417: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519901626106228986:2353] Send handshake: worker# [1:7519901626106228987:2354] 2025-06-25T14:58:55.344773Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:431: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519901626106228986:2353] Handle NKikimr::NReplication::NService::TEvWorker::TEvData { Source: TestSource Records [{ Codec: RAW Data: 48b Offset: 1 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 19b Offset: 2 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: }] } 2025-06-25T14:58:55.350178Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:490: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519901626106228986:2353] Handle NKikimrReplication.TEvTxIdResult VersionTxIds { Version { Step: 10 TxId: 0 } TxId: 1 } 2025-06-25T14:58:55.350298Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:556: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519901626106228986:2353] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRequestRecords { Records [{ Order: 1 BodySize: 48 }] } 2025-06-25T14:58:55.350435Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:54: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7519901626106228990:2353] Handle NKikimr::TEvTxUserProxy::TEvGetProxyServicesResponse 2025-06-25T14:58:55.350478Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:587: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519901626106228986:2353] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186224037888 } 2025-06-25T14:58:55.350586Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:74: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7519901626106228990:2353] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 1 Group: 0 Step: 1 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 48b }] } 2025-06-25T14:58:55.354057Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:111: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7519901626106228990:2353] Handle NKikimrTxDataShard.TEvApplyReplicationChangesResult Status: STATUS_OK 2025-06-25T14:58:55.354148Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:587: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519901626106228986:2353] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186224037888 } 2025-06-25T14:58:55.354237Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:570: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519901626106228986:2353] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRemoveRecords { Records [1] } ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/service/ut_table_writer/unittest >> LocalTableWriter::DecimalKeys [GOOD] Test command err: 2025-06-25T14:58:53.868846Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901615140554859:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:53.868929Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0016cb/r3tmp/tmpPxG4el/pdisk_1.dat 2025-06-25T14:58:54.252679Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:54.321961Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:54.322057Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:54.324204Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:10538 TServer::EnableGrpc on GrpcPort 61387, node 1 2025-06-25T14:58:54.601833Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:58:54.601858Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:58:54.601865Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:58:54.601965Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:58:54.882674Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:10538 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:58:55.176293Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:55.196330Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:58:55.201852Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... TClient::Ls request: /Root/Table TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710658 CreateStep: 1750863535316 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table" Columns { Name: "key" Type: "Decimal(1,0)" TypeId: 4865 Id: 1 NotNull: false TypeInfo { DecimalPrecision: 1 DecimalScale: 0 } IsBuildInProgress: false } Columns { Name: "value" Type: "Decimal(35,10)" TypeId: 4865 I... (TRUNCATED) 2025-06-25T14:58:55.347533Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:295: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519901623730490102:2352] Handshake: worker# [1:7519901623730490010:2292] 2025-06-25T14:58:55.347837Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:312: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519901623730490102:2352] Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult: result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Table TableId: [72057594046644480:2:1] RequestType: ByTableId Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Ok Kind: KindTable DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-25T14:58:55.348088Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:387: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519901623730490102:2352] Handle TEvTxProxySchemeCache::TEvResolveKeySetResult: result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 ResultSet [{ TableId: [OwnerId: 72057594046644480, LocalPathId: 2] Access: 0 SyncVersion: false Status: OkData Kind: KindRegularTable PartitionsCount: 1 DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } From: (Decimal(1,0) : NULL) IncFrom: 1 To: () IncTo: 0 }] } 2025-06-25T14:58:55.348128Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:417: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519901623730490102:2352] Send handshake: worker# [1:7519901623730490010:2292] 2025-06-25T14:58:55.348624Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:431: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519901623730490102:2352] Handle NKikimr::NReplication::NService::TEvWorker::TEvData { Source: TestSource Records [{ Codec: RAW Data: 57b Offset: 1 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 57b Offset: 2 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 57b Offset: 3 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: }] } 2025-06-25T14:58:55.348848Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:556: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519901623730490102:2352] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRequestRecords { Records [{ Order: 1 BodySize: 57 },{ Order: 2 BodySize: 57 },{ Order: 3 BodySize: 57 }] } 2025-06-25T14:58:55.348993Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:54: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7519901623730490105:2352] Handle NKikimr::TEvTxUserProxy::TEvGetProxyServicesResponse 2025-06-25T14:58:55.349051Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:587: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519901623730490102:2352] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186224037888 } 2025-06-25T14:58:55.349166Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:74: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7519901623730490105:2352] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 1 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 57b },{ Order: 2 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 57b },{ Order: 3 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 57b }] } 2025-06-25T14:58:55.353331Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:111: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7519901623730490105:2352] Handle NKikimrTxDataShard.TEvApplyReplicationChangesResult Status: STATUS_OK 2025-06-25T14:58:55.353405Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:587: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519901623730490102:2352] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186224037888 } 2025-06-25T14:58:55.353461Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:570: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519901623730490102:2352] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRemoveRecords { Records [1,2,3] } ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/replication/service/ut_table_writer/unittest >> LocalTableWriter::ConsistentWrite [GOOD] Test command err: 2025-06-25T14:58:53.890310Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901617574162717:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:53.890355Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0016bb/r3tmp/tmp5R9i1f/pdisk_1.dat 2025-06-25T14:58:54.268232Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519901617574162691:2080] 1750863533889881 != 1750863533889884 2025-06-25T14:58:54.268245Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:54.300111Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:54.300209Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:54.303831Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:18201 TServer::EnableGrpc on GrpcPort 6362, node 1 2025-06-25T14:58:54.604181Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:58:54.604204Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:58:54.604211Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:58:54.604371Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:58:54.899403Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:18201 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:58:55.160823Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:55.177901Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... TClient::Ls request: /Root/Table TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710658 CreateStep: 1750863535295 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" Key... (TRUNCATED) 2025-06-25T14:58:55.318566Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:295: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519901626164097968:2352] Handshake: worker# [1:7519901626164097875:2291] 2025-06-25T14:58:55.318935Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:312: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519901626164097968:2352] Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult: result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Table TableId: [72057594046644480:2:1] RequestType: ByTableId Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Ok Kind: KindTable DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-25T14:58:55.319214Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:387: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519901626164097968:2352] Handle TEvTxProxySchemeCache::TEvResolveKeySetResult: result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 ResultSet [{ TableId: [OwnerId: 72057594046644480, LocalPathId: 2] Access: 0 SyncVersion: false Status: OkData Kind: KindRegularTable PartitionsCount: 1 DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } From: (Uint32 : NULL) IncFrom: 1 To: () IncTo: 0 }] } 2025-06-25T14:58:55.319243Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:417: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519901626164097968:2352] Send handshake: worker# [1:7519901626164097875:2291] 2025-06-25T14:58:55.319557Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:431: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519901626164097968:2352] Handle NKikimr::NReplication::NService::TEvWorker::TEvData { Source: TestSource Records [{ Codec: RAW Data: 48b Offset: 1 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 48b Offset: 2 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 48b Offset: 3 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: }] } 2025-06-25T14:58:55.323521Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:490: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519901626164097968:2352] Handle NKikimrReplication.TEvTxIdResult VersionTxIds { Version { Step: 10 TxId: 0 } TxId: 1 } 2025-06-25T14:58:55.323660Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:556: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519901626164097968:2352] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRequestRecords { Records [{ Order: 1 BodySize: 48 },{ Order: 2 BodySize: 48 },{ Order: 3 BodySize: 48 }] } 2025-06-25T14:58:55.326429Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:54: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7519901626164097971:2352] Handle NKikimr::TEvTxUserProxy::TEvGetProxyServicesResponse 2025-06-25T14:58:55.326474Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:587: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519901626164097968:2352] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186224037888 } 2025-06-25T14:58:55.326631Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:74: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7519901626164097971:2352] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 1 Group: 0 Step: 1 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 48b },{ Order: 2 Group: 0 Step: 2 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 48b },{ Order: 3 Group: 0 Step: 3 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 48b }] } 2025-06-25T14:58:55.329888Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:111: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7519901626164097971:2352] Handle NKikimrTxDataShard.TEvApplyReplicationChangesResult Status: STATUS_OK 2025-06-25T14:58:55.329947Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:587: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519901626164097968:2352] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186224037888 } 2025-06-25T14:58:55.329992Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:570: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519901626164097968:2352] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRemoveRecords { Records [1,2,3] } 2025-06-25T14:58:55.331446Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:431: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519901626164097968:2352] Handle NKikimr::NReplication::NService::TEvWorker::TEvData { Source: TestSource Records [{ Codec: RAW Data: 19b Offset: 4 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: }] } 2025-06-25T14:58:55.331873Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:431: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519901626164097968:2352] Handle NKikimr::NReplication::NService::TEvWorker::TEvData { Source: TestSource Records [{ Codec: RAW Data: 49b Offset: 5 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 49b Offset: 6 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 49b Offset: 7 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 49b Offset: 8 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: }] } 2025-06-25T14:58:55.332259Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:490: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519901626164097968:2352] Handle NKikimrReplication.TEvTxIdResult VersionTxIds { Version { Step: 20 TxId: 0 } TxId: 2 } VersionTxIds { Version { Step: 30 TxId: 0 } TxId: 3 } 2025-06-25T14:58:55.332354Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:556: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519901626164097968:2352] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRequestRecords { Records [{ Order: 5 BodySize: 49 },{ Order: 6 BodySize: 49 },{ Order: 7 BodySize: 49 },{ Order: 8 BodySize: 49 }] } 2025-06-25T14:58:55.332501Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:74: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7519901626164097971:2352] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 5 Group: 0 Step: 11 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 49b },{ Order: 6 Group: 0 Step: 12 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 49b },{ Order: 7 Group: 0 Step: 21 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 49b },{ Order: 8 Group: 0 Step: 22 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 49b }] } 2025-06-25T14:58:55.335055Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:111: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7519901626164097971:2352] Handle NKikimrTxDataShard.TEvApplyReplicationChangesResult Status: STATUS_OK 2025-06-25T14:58:55.335094Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:587: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519901626164097968:2352] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186224037888 } 2025-06-25T14:58:55.335155Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:570: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519901626164097968:2352] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRemoveRecords { Records [5,6,7,8] } 2025-06-25T14:58:55.335622Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:431: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519901626164097968:2352] Handle NKikimr::NReplication::NService::TEvWorker::TEvData { Source: TestSource Records [{ Codec: RAW Data: 49b Offset: 9 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 49b Offset: 10 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: }] } 2025-06-25T14:58:55.335799Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:556: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519901626164097968:2352] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRequestRecords { Records [{ Order: 9 BodySize: 49 },{ Order: 10 BodySize: 49 }] } 2025-06-25T14:58:55.335919Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:74: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7519901626164097971:2352] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 9 Group: 0 Step: 13 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 49b },{ Order: 10 Group: 0 Step: 23 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 49b }] } 2025-06-25T14:58:55.341162Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:111: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7519901626164097971:2352] Handle NKikimrTxDataShard.TEvApplyReplicationChangesResult Status: STATUS_OK 2025-06-25T14:58:55.341215Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:587: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519901626164097968:2352] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186224037888 } 2025-06-25T14:58:55.341257Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:570: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519901626164097968:2352] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRemoveRecords { Records [9,10] } 2025-06-25T14:58:55.341706Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:431: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7519901626164097968:2352] Handle NKikimr::NReplication::NService::TEvWorker::TEvData { Source: TestSource Records [{ Codec: RAW Data: 19b Offset: 11 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: }] } >> TPQTabletTests::Limit_On_The_Number_Of_Transactons [GOOD] >> TPQTest::DirectReadBadSessionOrPipe [GOOD] >> TPQTest::DirectReadOldPipe >> TPQTabletTests::ProposeTx_Unknown_Partition_2 [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TLocksTest::Range_IncorrectDot2 [GOOD] Test command err: 2025-06-25T14:58:16.512501Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901455135955486:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:16.512597Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00207e/r3tmp/tmpAlfx26/pdisk_1.dat 2025-06-25T14:58:16.800129Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:16.812492Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519901455135955464:2080] 1750863496510973 != 1750863496510976 2025-06-25T14:58:16.899099Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:16.899202Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:16.900928Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:12001 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:17.119821Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:17.136675Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:58:17.151663Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:17.279702Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:17.369282Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:17.531170Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:58:19.599008Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519901470970521139:2217];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:19.599080Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00207e/r3tmp/tmptztlDb/pdisk_1.dat 2025-06-25T14:58:19.737187Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:19.748584Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519901470970520962:2080] 1750863499590572 != 1750863499590575 2025-06-25T14:58:19.772862Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:19.772956Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:19.775043Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:3361 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:19.954276Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-25T14:58:19.971836Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:20.046127Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:20.094360Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:23.067056Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519901487116816655:2169];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00207e/r3tmp/tmpo3NJXH/pdisk_1.dat 2025-06-25T14:58:23.092002Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:58:23.143122Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:23.153440Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519901487116816511:2080] 1750863503009926 != 1750863503009929 2025-06-25T14:58:23.181361Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:23.181457Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:23.186884Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:2605 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:23.325422Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... waiting... 2025-06-25T14:58:23.346518Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/action ... T WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:43.135361Z node 8 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [8:7519901567883825855:2080] 1750863522981386 != 1750863522981389 2025-06-25T14:58:43.155243Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:43.155336Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:43.157184Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:3453 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:43.437917Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-25T14:58:43.463462Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-25T14:58:43.470655Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:43.571272Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:43.651594Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:47.488769Z node 9 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[9:7519901591432732355:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:47.488827Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00207e/r3tmp/tmpNBH3NZ/pdisk_1.dat 2025-06-25T14:58:47.660714Z node 9 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:47.675947Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:47.676056Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:47.681294Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:15748 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:47.993333Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-25T14:58:48.016174Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:48.099435Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:58:48.158952Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:58:51.983025Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7519901608844233055:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:51.983111Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00207e/r3tmp/tmpWvakIa/pdisk_1.dat 2025-06-25T14:58:52.101358Z node 10 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:52.102637Z node 10 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [10:7519901608844233036:2080] 1750863531982550 != 1750863531982553 2025-06-25T14:58:52.122240Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:52.122328Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:52.123672Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:18579 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:52.401675Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-25T14:58:52.425871Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:52.487044Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:52.547037Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... >> TPQTabletTests::Read_TEvTxCommit_After_Restart >> TPQTabletTests::Non_Kafka_Transaction_Supportive_Partitions_Should_Not_Be_Deleted_After_Timeout >> TPQTestInternal::TestPartitionedBigTest [GOOD] >> TPQTestInternal::TestToHex [GOOD] >> TPQUserInfoTest::UserDataDeprecatedSerializaion [GOOD] >> TPQUtilsTest::TLastCounter [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TLocksTest::CK_Range_GoodLock [GOOD] Test command err: 2025-06-25T14:58:17.063114Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901462657035997:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:17.066184Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00207d/r3tmp/tmp9lcSsy/pdisk_1.dat 2025-06-25T14:58:17.415346Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:17.467721Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:17.467811Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:17.469561Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:3399 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:17.727399Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:17.745429Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:58:17.760274Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:17.902749Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:17.977517Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:18.062947Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:58:20.272414Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519901473872608464:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:20.276475Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00207d/r3tmp/tmpZSFHB2/pdisk_1.dat 2025-06-25T14:58:20.451303Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:20.456471Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519901473872608445:2080] 1750863500263832 != 1750863500263835 2025-06-25T14:58:20.484112Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:20.484197Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:20.485473Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:7997 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:20.717504Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:20.730693Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:58:20.744101Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-25T14:58:20.747658Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:20.828056Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:58:20.873311Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:58:23.567561Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519901488856730404:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:23.567600Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00207d/r3tmp/tmpvd4N17/pdisk_1.dat 2025-06-25T14:58:23.698827Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:23.703369Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519901488856730381:2080] 1750863503566713 != 1750863503566716 2025-06-25T14:58:23.729784Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:23.729867Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:23.731565Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:1957 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:23.904457Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:23.908559Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72 ... 5T14:58:43.264304Z node 8 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [8:7519901574603111069:2080] 1750863523111216 != 1750863523111219 2025-06-25T14:58:43.277170Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:61927 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:43.645744Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-25T14:58:43.668455Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:43.756161Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:43.840592Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:44.125308Z node 8 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:58:47.703503Z node 9 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[9:7519901589604666962:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:47.703560Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00207d/r3tmp/tmpIEruep/pdisk_1.dat 2025-06-25T14:58:47.874984Z node 9 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:47.892215Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:47.892337Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:47.895519Z node 9 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [9:7519901589604666940:2080] 1750863527702501 != 1750863527702504 2025-06-25T14:58:47.897831Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:20945 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:48.235370Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:48.243501Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:58:48.254399Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:48.341684Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:48.416702Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:52.175867Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7519901611862321345:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:52.175934Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00207d/r3tmp/tmpF3sX69/pdisk_1.dat 2025-06-25T14:58:52.315090Z node 10 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:52.317067Z node 10 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [10:7519901611862321323:2080] 1750863532174696 != 1750863532174699 2025-06-25T14:58:52.335062Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:52.335178Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:52.337840Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:15210 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. waiting... 2025-06-25T14:58:52.625774Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:52.650315Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:52.723196Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:52.795096Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... >> TPartitionTests::ShadowPartitionCountersRestore [GOOD] |90.2%| [TA] $(B)/ydb/core/tx/replication/service/ut_table_writer/test-results/unittest/{meta.json ... results_accumulator.log} |90.3%| [TA] {RESULT} $(B)/ydb/core/tx/replication/service/ut_table_writer/test-results/unittest/{meta.json ... results_accumulator.log} >> TPQTabletTests::Read_TEvTxCommit_After_Restart [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/unittest >> TPQTabletTests::After_Restarting_The_Tablet_Sends_A_TEvReadSet_For_Transactions_In_The_EXECUTED_State [GOOD] Test command err: 2025-06-25T14:58:55.177461Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3114: [PQ: 72057594037927937] Handle TEvInterconnect::TEvNodeInfo 2025-06-25T14:58:55.193032Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3146: [PQ: 72057594037927937] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-25T14:58:55.193317Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:752: [PQ: 72057594037927937] doesn't have tx info 2025-06-25T14:58:55.193373Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:764: [PQ: 72057594037927937] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-25T14:58:55.193429Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:985: [PQ: 72057594037927937] no config, start with empty partitions and default config 2025-06-25T14:58:55.193468Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4949: [PQ: 72057594037927937] Txs.size=0, PlannedTxs.size=0 2025-06-25T14:58:55.193527Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:58:55.193602Z node 1 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-25T14:58:55.215083Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037927937] server connected, pipe [1:180:2193], now have 1 active actors on pipe 2025-06-25T14:58:55.215244Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:1470: [PQ: 72057594037927937] Handle TEvPersQueue::TEvUpdateConfig 2025-06-25T14:58:55.242234Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:1656: [PQ: 72057594037927937] Config update version 1(current 0) received from actor [1:179:2192] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "topic" Version: 1 LocalDC: true Topic: "topic" TopicPath: "/topic" YcCloudId: "somecloud" YcFolderId: "somefolder" YdbDatabaseId: "PQ" YdbDatabasePath: "/Root/PQ" Partitions { PartitionId: 0 } ReadRuleGenerations: 1 FederationAccount: "federationAccount" MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 1 Important: false } 2025-06-25T14:58:55.245622Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:591: [PQ: 72057594037927937] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "topic" Version: 1 LocalDC: true Topic: "topic" TopicPath: "/topic" YcCloudId: "somecloud" YcFolderId: "somefolder" YdbDatabaseId: "PQ" YdbDatabasePath: "/Root/PQ" Partitions { PartitionId: 0 } ReadRuleGenerations: 1 FederationAccount: "federationAccount" MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 1 Important: false } 2025-06-25T14:58:55.245778Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:58:55.246761Z node 1 :PERSQUEUE INFO: pq_impl.cpp:1497: [PQ: 72057594037927937] Config applied version 1 actor [1:179:2192] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "topic" Version: 1 LocalDC: true Topic: "topic" TopicPath: "/topic" YcCloudId: "somecloud" YcFolderId: "somefolder" YdbDatabaseId: "PQ" YdbDatabasePath: "/Root/PQ" Partitions { PartitionId: 0 } ReadRuleGenerations: 1 FederationAccount: "federationAccount" MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 1 Important: false } 2025-06-25T14:58:55.246870Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:0:Initializer] Start initializing step TInitConfigStep 2025-06-25T14:58:55.247266Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:0:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-25T14:58:55.247642Z node 1 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [1:188:2199] 2025-06-25T14:58:55.249208Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:55: [topic:0:Initializer] Initializing completed. 2025-06-25T14:58:55.249280Z node 1 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'topic' partition 0 generation 2 [1:188:2199] 2025-06-25T14:58:55.249329Z node 1 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037927937, Partition: 0, State: StateInit] SYNC INIT topic topic partitition 0 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-25T14:58:55.249802Z node 1 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Process pending events. Count 0 2025-06-25T14:58:55.249935Z node 1 :PERSQUEUE DEBUG: partition.cpp:3232: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'topic' partition 0 user user reinit request with generation 1 2025-06-25T14:58:55.249995Z node 1 :PERSQUEUE DEBUG: partition.cpp:3302: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'topic' partition 0 user user reinit with generation 1 done 2025-06-25T14:58:55.250176Z node 1 :PERSQUEUE DEBUG: partition_read.cpp:882: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'topic' partition 0 user user readTimeStamp for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 0 2025-06-25T14:58:55.250346Z node 1 :PERSQUEUE DEBUG: read.h:272: CacheProxy. Passthrough write request to KV 2025-06-25T14:58:55.250677Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-25T14:58:55.252945Z node 1 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-06-25T14:58:55.253015Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-25T14:58:55.253233Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037927937] server connected, pipe [1:195:2204], now have 1 active actors on pipe 2025-06-25T14:58:55.254698Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037927937] server connected, pipe [1:198:2206], now have 1 active actors on pipe 2025-06-25T14:58:55.254786Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:1721: [PQ: 72057594037927937] Handle TEvPersQueue::TEvDropTablet 2025-06-25T14:58:55.674782Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3114: [PQ: 72057594037927937] Handle TEvInterconnect::TEvNodeInfo 2025-06-25T14:58:55.677954Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3146: [PQ: 72057594037927937] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-25T14:58:55.678180Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:752: [PQ: 72057594037927937] doesn't have tx info 2025-06-25T14:58:55.678216Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:764: [PQ: 72057594037927937] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-25T14:58:55.678244Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:985: [PQ: 72057594037927937] no config, start with empty partitions and default config 2025-06-25T14:58:55.678272Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:4949: [PQ: 72057594037927937] Txs.size=0, PlannedTxs.size=0 2025-06-25T14:58:55.678307Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:58:55.678347Z node 2 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-25T14:58:55.690185Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037927937] server connected, pipe [2:182:2195], now have 1 active actors on pipe 2025-06-25T14:58:55.690284Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1470: [PQ: 72057594037927937] Handle TEvPersQueue::TEvUpdateConfig 2025-06-25T14:58:55.690485Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1656: [PQ: 72057594037927937] Config update version 2(current 0) received from actor [2:181:2194] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "topic" Version: 2 LocalDC: true Topic: "topic" TopicPath: "/topic" YcCloudId: "somecloud" YcFolderId: "somefolder" YdbDatabaseId: "PQ" YdbDatabasePath: "/Root/PQ" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 2 FederationAccount: "federationAccount" MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 2 Important: false } 2025-06-25T14:58:55.692045Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:591: [PQ: 72057594037927937] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "topic" Version: 2 LocalDC: true Topic: "topic" TopicPath: "/topic" YcCloudId: "somecloud" YcFolderId: "somefolder" YdbDatabaseId: "PQ" YdbDatabasePath: "/Root/PQ" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 2 FederationAccount: "federationAccount" MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 2 Important: false } 2025-06-25T14:58:55.692129Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:58:55.692979Z node 2 :PERSQUEUE INFO: pq_impl.cpp:1497: [PQ: 72057594037927937] Config applied version 2 actor [2:181:2194] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "topic" Version: 2 LocalDC: true Topic: "topic" TopicPath: "/topic" YcCloudId: "somecloud" YcFolderId: "somefolder" YdbDatabaseId: "PQ" YdbDatabasePath: "/Root/PQ" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 2 FederationAccount: "federationAccount" MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 2 Important: false } 2025-06-25T14:58:55.693049Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:0:Initializer] Start initializing step TInitConfigStep 2025-06-25T14:58:55.693095Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:1:Initializer] Start initializing step TInitConfigStep 2025-06-25T14:58:55.693418Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:0:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-25T14:58:55.693607Z node 2 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [2:190:2201] 2025-06-25T14:58:55.694231Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:55: [topic:0:Initializer] Initializing completed. 2025-06-25T14:58:55.694272Z node 2 :PERSQUEUE ... 090Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:1241: [PQ: 72057594037927937] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-25T14:58:57.936138Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4353: [PQ: 72057594037927937] Try execute txs with state CALCULATED 2025-06-25T14:58:57.936170Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4398: [PQ: 72057594037927937] TxId 67891, State CALCULATED 2025-06-25T14:58:57.936213Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4345: [PQ: 72057594037927937] TxId 67891 State CALCULATED FrontTxId 67891 2025-06-25T14:58:57.936254Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4288: [PQ: 72057594037927937] TxId 67891, NewState WAIT_RS 2025-06-25T14:58:57.936301Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4323: [PQ: 72057594037927937] TxId 67891 moved from CALCULATED to WAIT_RS 2025-06-25T14:58:57.936366Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4027: [PQ: 72057594037927937] Send TEvTxProcessing::TEvReadSet to 1 receivers. Wait TEvTxProcessing::TEvReadSet from 1 senders. 2025-06-25T14:58:57.936402Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4037: [PQ: 72057594037927937] Send TEvReadSet to tablet 22222 2025-06-25T14:58:57.936457Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4521: [PQ: 72057594037927937] HaveParticipantsDecision 0 2025-06-25T14:58:57.940335Z node 6 :PERSQUEUE DEBUG: pqtablet_mock.cpp:87: Client pipe to tablet 72057594037927937 from 22222 is reset 2025-06-25T14:58:57.963421Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3114: [PQ: 72057594037927937] Handle TEvInterconnect::TEvNodeInfo 2025-06-25T14:58:57.965363Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3146: [PQ: 72057594037927937] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-25T14:58:57.966407Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:742: [PQ: 72057594037927937] has a tx info 2025-06-25T14:58:57.966458Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:764: [PQ: 72057594037927937] PlanStep 110, PlanTxId 67891, ExecStep 110, ExecTxId 67891 2025-06-25T14:58:57.966577Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:1007: [PQ: 72057594037927937] ReadRange pair. Key tx_00000000000000067890, Status 0 2025-06-25T14:58:57.966654Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:1016: [PQ: 72057594037927937] Restore Tx. TxId: 67890, Step: 100, State: EXECUTED, WriteId: 2025-06-25T14:58:57.966715Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:1007: [PQ: 72057594037927937] ReadRange pair. Key tx_00000000000000067891, Status 0 2025-06-25T14:58:57.966744Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:1016: [PQ: 72057594037927937] Restore Tx. TxId: 67891, Step: 110, State: CALCULATED, WriteId: 2025-06-25T14:58:57.966762Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:1019: [PQ: 72057594037927937] Fix tx state 2025-06-25T14:58:57.966807Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4949: [PQ: 72057594037927937] Txs.size=2, PlannedTxs.size=2 2025-06-25T14:58:57.966847Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4957: [PQ: 72057594037927937] top tx queue (100, 67890) 2025-06-25T14:58:57.966891Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4977: [PQ: 72057594037927937] TxsOrder: 67890 EXECUTED 0 2025-06-25T14:58:57.966929Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4977: [PQ: 72057594037927937] TxsOrder: 67891 PLANNED 0 2025-06-25T14:58:57.967463Z node 6 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:58:57.967500Z node 6 :PERSQUEUE INFO: pq_impl.cpp:788: [PQ: 72057594037927937] has a tx writes info 2025-06-25T14:58:57.967591Z node 6 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:0:Initializer] Start initializing step TInitConfigStep 2025-06-25T14:58:57.967840Z node 6 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:0:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-25T14:58:57.968074Z node 6 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [6:364:2340] 2025-06-25T14:58:57.968904Z node 6 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:0:Initializer] Start initializing step TInitDiskStatusStep 2025-06-25T14:58:57.969854Z node 6 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:0:Initializer] Start initializing step TInitMetaStep 2025-06-25T14:58:57.970137Z node 6 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:0:Initializer] Start initializing step TInitInfoRangeStep 2025-06-25T14:58:57.970738Z node 6 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:0:Initializer] Start initializing step TInitDataRangeStep 2025-06-25T14:58:57.970964Z node 6 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:0:Initializer] Start initializing step TInitDataStep 2025-06-25T14:58:57.971011Z node 6 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:0:Initializer] Start initializing step TInitEndWriteTimestampStep 2025-06-25T14:58:57.971060Z node 6 :PERSQUEUE INFO: partition_init.cpp:895: [topic:0:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-25T14:58:57.971099Z node 6 :PERSQUEUE DEBUG: partition_init.cpp:55: [topic:0:Initializer] Initializing completed. 2025-06-25T14:58:57.971147Z node 6 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'topic' partition 0 generation 3 [6:364:2340] 2025-06-25T14:58:57.971206Z node 6 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037927937, Partition: 0, State: StateInit] SYNC INIT topic topic partitition 0 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-25T14:58:57.971254Z node 6 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Process pending events. Count 0 2025-06-25T14:58:57.971322Z node 6 :PERSQUEUE DEBUG: partition_read.cpp:882: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'topic' partition 0 user user readTimeStamp for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 6 2025-06-25T14:58:57.971466Z node 6 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-25T14:58:57.971540Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4027: [PQ: 72057594037927937] Send TEvTxProcessing::TEvReadSet to 1 receivers. Wait TEvTxProcessing::TEvReadSet from 1 senders. 2025-06-25T14:58:57.971579Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4037: [PQ: 72057594037927937] Send TEvReadSet to tablet 22222 2025-06-25T14:58:57.971660Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4353: [PQ: 72057594037927937] Try execute txs with state EXECUTED 2025-06-25T14:58:57.971691Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4398: [PQ: 72057594037927937] TxId 67890, State EXECUTED 2025-06-25T14:58:57.971722Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4345: [PQ: 72057594037927937] TxId 67890 State EXECUTED FrontTxId 67890 2025-06-25T14:58:57.971755Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4046: [PQ: 72057594037927937] TPersQueue::SendEvReadSetAckToSenders 2025-06-25T14:58:57.971790Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4288: [PQ: 72057594037927937] TxId 67890, NewState WAIT_RS_ACKS 2025-06-25T14:58:57.971862Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4323: [PQ: 72057594037927937] TxId 67890 moved from EXECUTED to WAIT_RS_ACKS 2025-06-25T14:58:57.971908Z node 6 :PERSQUEUE DEBUG: transaction.cpp:366: [TxId: 67890] PredicateAcks: 0/1 2025-06-25T14:58:57.971939Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4599: [PQ: 72057594037927937] HaveAllRecipientsReceive 0, AllSupportivePartitionsHaveBeenDeleted 1 2025-06-25T14:58:57.971966Z node 6 :PERSQUEUE DEBUG: transaction.cpp:366: [TxId: 67890] PredicateAcks: 0/1 2025-06-25T14:58:57.972004Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4353: [PQ: 72057594037927937] Try execute txs with state PLANNED 2025-06-25T14:58:57.972030Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4398: [PQ: 72057594037927937] TxId 67891, State PLANNED 2025-06-25T14:58:57.972055Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4345: [PQ: 72057594037927937] TxId 67891 State PLANNED FrontTxId 67891 2025-06-25T14:58:57.972083Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4466: [PQ: 72057594037927937] TxQueue.size 1 2025-06-25T14:58:57.972121Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:837: [PQ: 72057594037927937] New ExecStep 110, ExecTxId 67891 2025-06-25T14:58:57.972187Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4288: [PQ: 72057594037927937] TxId 67891, NewState CALCULATING 2025-06-25T14:58:57.972231Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4323: [PQ: 72057594037927937] TxId 67891 moved from PLANNED to CALCULATING 2025-06-25T14:58:57.972341Z node 6 :PERSQUEUE DEBUG: partition.cpp:1170: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCalcPredicate Step 110, TxId 67891 2025-06-25T14:58:57.972778Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3537: [PQ: 72057594037927937] Handle TEvPQ::TEvTxCalcPredicateResult Step 110, TxId 67891, Partition 0, Predicate 1 2025-06-25T14:58:57.972809Z node 6 :PERSQUEUE DEBUG: transaction.cpp:218: [TxId: 67891] Handle TEvTxCalcPredicateResult 2025-06-25T14:58:57.972838Z node 6 :PERSQUEUE DEBUG: transaction.cpp:267: [TxId: 67891] Partition responses 1/1 2025-06-25T14:58:57.972868Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4353: [PQ: 72057594037927937] Try execute txs with state CALCULATING 2025-06-25T14:58:57.972897Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4398: [PQ: 72057594037927937] TxId 67891, State CALCULATING 2025-06-25T14:58:57.972936Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4345: [PQ: 72057594037927937] TxId 67891 State CALCULATING FrontTxId 67891 2025-06-25T14:58:57.972971Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4482: [PQ: 72057594037927937] Received 1, Expected 1 2025-06-25T14:58:57.973008Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4288: [PQ: 72057594037927937] TxId 67891, NewState CALCULATED 2025-06-25T14:58:57.973046Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4323: [PQ: 72057594037927937] TxId 67891 moved from CALCULATING to CALCULATED 2025-06-25T14:58:57.973091Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3866: [PQ: 72057594037927937] write key for TxId 67891 2025-06-25T14:58:57.973274Z node 6 :PERSQUEUE DEBUG: transaction.cpp:374: [TxId: 67891] save tx TxId: 67891 State: CALCULATED MinStep: 152 MaxStep: 30152 PredicatesReceived { TabletId: 22222 } PredicateRecipients: 22222 Operations { PartitionId: 0 CommitOffsetsBegin: 0 CommitOffsetsEnd: 0 Consumer: "user" Path: "/topic" } Step: 110 Predicate: true Kind: KIND_DATA SourceActor { RawX1: 179 RawX2: 25769805968 } Partitions { } 2025-06-25T14:58:57.973379Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3683: [PQ: 72057594037927937] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-06-25T14:58:57.973630Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:2931: [PQ: 72057594037927937] Handle TEvTabletPipe::TEvClientConnected 2025-06-25T14:58:57.973672Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:2936: [PQ: 72057594037927937] Connected to tablet 22222 2025-06-25T14:58:57.976393Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:1241: [PQ: 72057594037927937] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-25T14:58:57.976437Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4353: [PQ: 72057594037927937] Try execute txs with state CALCULATED 2025-06-25T14:58:57.976469Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4398: [PQ: 72057594037927937] TxId 67891, State CALCULATED 2025-06-25T14:58:57.976511Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4345: [PQ: 72057594037927937] TxId 67891 State CALCULATED FrontTxId 67891 2025-06-25T14:58:57.976615Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4288: [PQ: 72057594037927937] TxId 67891, NewState WAIT_RS 2025-06-25T14:58:57.976665Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4323: [PQ: 72057594037927937] TxId 67891 moved from CALCULATED to WAIT_RS 2025-06-25T14:58:57.976705Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4027: [PQ: 72057594037927937] Send TEvTxProcessing::TEvReadSet to 1 receivers. Wait TEvTxProcessing::TEvReadSet from 1 senders. 2025-06-25T14:58:57.976736Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4037: [PQ: 72057594037927937] Send TEvReadSet to tablet 22222 2025-06-25T14:58:57.976789Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4521: [PQ: 72057594037927937] HaveParticipantsDecision 0 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TLocksTest::GoodSameShardLock [GOOD] Test command err: 2025-06-25T14:58:17.889304Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901462827426526:2162];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:17.889696Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/002079/r3tmp/tmpY3uDnW/pdisk_1.dat 2025-06-25T14:58:18.180853Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:18.180963Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:18.183285Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:58:18.219773Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:18.224403Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519901462827426391:2080] 1750863497862544 != 1750863497862547 TClient is connected to server localhost:5930 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:18.540418Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-25T14:58:18.570710Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:18.707402Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:18.768303Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:18.890290Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:58:21.110176Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519901477425061388:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:21.110268Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/002079/r3tmp/tmpPQk3Z0/pdisk_1.dat 2025-06-25T14:58:21.289847Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:21.303832Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:21.304359Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:21.306019Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:12555 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:21.549802Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:21.557218Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:58:21.566869Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-25T14:58:21.572163Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:21.653195Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:21.709107Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/002079/r3tmp/tmpedWzRo/pdisk_1.dat 2025-06-25T14:58:24.573288Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:58:24.633456Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:24.653402Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:24.653474Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:24.654666Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:8769 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. waiting... 2025-06-25T14:58:24.861436Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:58:24.866826Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... waiting... 2025-06-25T14:58:24.882474Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:58:24.954751Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsa ... ecting 2025-06-25T14:58:44.104513Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:5054 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. waiting... 2025-06-25T14:58:44.413907Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:44.434003Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-25T14:58:44.441935Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:44.519456Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:44.583463Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:44.838294Z node 8 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:58:48.102978Z node 9 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[9:7519901594641404803:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:48.128759Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/002079/r3tmp/tmpvZo74h/pdisk_1.dat 2025-06-25T14:58:48.267787Z node 9 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:48.269001Z node 9 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [9:7519901594641404784:2080] 1750863528085041 != 1750863528085044 2025-06-25T14:58:48.286896Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:48.287002Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:48.289232Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:62032 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:48.557942Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:48.564282Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:58:48.587053Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:48.660774Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:48.724037Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:52.586887Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7519901610198872341:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:52.586939Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/002079/r3tmp/tmpbBRdJX/pdisk_1.dat 2025-06-25T14:58:52.705677Z node 10 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:52.706815Z node 10 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [10:7519901610198872309:2080] 1750863532586174 != 1750863532586177 2025-06-25T14:58:52.735532Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:52.735625Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:52.736921Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:4600 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:52.981548Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-25T14:58:52.997640Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:53.068587Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:53.145276Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... >> TPartitionTests::ConflictingSrcIdForTxInDifferentBatches [GOOD] >> TPartitionTests::TestNonConflictingActsBatchOk >> TPartitionTests::SetOffset [GOOD] >> TPartitionTests::ConflictingSrcIdForTxWithHead >> TPQTabletTests::TEvReadSet_Is_Not_Sent_Ahead_Of_Time [GOOD] >> TPQTabletTests::TEvReadSet_For_A_Non_Existent_Tablet >> TPartitionTests::OldPlanStep |90.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/unittest >> TPQUtilsTest::TLastCounter [GOOD] >> TPQTabletTests::Non_Kafka_Transaction_Supportive_Partitions_Should_Not_Be_Deleted_After_Timeout [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TLocksTest::BrokenNullLock [GOOD] Test command err: 2025-06-25T14:58:18.320778Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901464926185549:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:18.320831Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/002078/r3tmp/tmptS1F1D/pdisk_1.dat 2025-06-25T14:58:18.749733Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:18.773877Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:18.773971Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:18.775520Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:63909 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:19.095040Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:19.109322Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:58:19.117650Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-25T14:58:19.123287Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:19.269871Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:19.314248Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:19.336266Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/002078/r3tmp/tmpD2Orkh/pdisk_1.dat 2025-06-25T14:58:21.586169Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:58:21.666582Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:21.668228Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519901478485908168:2080] 1750863501431148 != 1750863501431151 2025-06-25T14:58:21.693186Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:21.693274Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:21.696124Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:28426 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:21.902274Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:21.924554Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:58:21.942302Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-25T14:58:21.948375Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:22.031122Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:22.078026Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:24.718432Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519901492670667275:2145];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:24.721362Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/002078/r3tmp/tmpY6zz3E/pdisk_1.dat 2025-06-25T14:58:24.899394Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:24.899460Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:24.904409Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519901492670667165:2080] 1750863504697124 != 1750863504697127 2025-06-25T14:58:24.908469Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:24.909679Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:7395 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:25.164636Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-25T14:58:25.190656Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCre ... 25-06-25T14:58:44.428666Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:19982 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:44.721891Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:44.732914Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:58:44.764713Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:58:44.776126Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:58:44.861644Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:44.927375Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:48.647245Z node 9 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[9:7519901593847479108:2244];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/002078/r3tmp/tmpCUGpKm/pdisk_1.dat 2025-06-25T14:58:48.647584Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:58:48.825406Z node 9 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:48.919840Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:48.919967Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:48.921661Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:27199 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:49.170415Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:49.180570Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:58:49.189788Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-25T14:58:49.193950Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:49.275938Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:49.332151Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:53.198758Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7519901614629302872:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:53.198839Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/002078/r3tmp/tmpZBeSuB/pdisk_1.dat 2025-06-25T14:58:53.342875Z node 10 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:53.343039Z node 10 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [10:7519901614629302853:2080] 1750863533198327 != 1750863533198330 2025-06-25T14:58:53.361672Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:53.361793Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:53.367979Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:6481 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:53.639467Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-25T14:58:53.656711Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:53.731684Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:58:53.784532Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TLocksTest::Range_EmptyKey [GOOD] Test command err: 2025-06-25T14:58:18.135783Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901467424247452:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:18.135847Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/002076/r3tmp/tmpP6aM94/pdisk_1.dat 2025-06-25T14:58:18.619702Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519901467424247433:2080] 1750863498134477 != 1750863498134480 2025-06-25T14:58:18.629522Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:18.638329Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:18.638481Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:18.639455Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:20576 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:18.893410Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:18.905187Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:58:18.918840Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:19.064739Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:19.147765Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:58:19.161735Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:21.579608Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519901478996760200:2209];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:21.579950Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/002076/r3tmp/tmpKoPkeb/pdisk_1.dat 2025-06-25T14:58:21.769863Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:21.769932Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:21.777693Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:58:21.784798Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:21.786479Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519901478996760031:2080] 1750863501495172 != 1750863501495175 TClient is connected to server localhost:24690 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:22.049642Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:22.056071Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:58:22.072720Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:22.146444Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:22.191703Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:24.964622Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519901490864465006:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:24.964669Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/002076/r3tmp/tmpuxvVHa/pdisk_1.dat 2025-06-25T14:58:25.152102Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:25.162656Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:25.162739Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:25.167898Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:12838 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:25.360915Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:25.366417Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:58:25.388070Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, su ... Unknown -> Disconnected 2025-06-25T14:58:44.714871Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:44.719539Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:15572 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:45.077979Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:45.093194Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:58:45.124259Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-25T14:58:45.139728Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:45.226068Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:58:45.308304Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:58:48.976800Z node 9 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[9:7519901593851928035:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:48.976872Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/002076/r3tmp/tmpZYuzOf/pdisk_1.dat 2025-06-25T14:58:49.138265Z node 9 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:49.139874Z node 9 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [9:7519901593851928014:2080] 1750863528970532 != 1750863528970535 2025-06-25T14:58:49.156165Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:49.156268Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:49.159039Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:24043 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:49.495839Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-25T14:58:49.520153Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:49.583020Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:49.655344Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:53.370753Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7519901614744575636:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:53.370884Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/002076/r3tmp/tmpWheZxK/pdisk_1.dat 2025-06-25T14:58:53.504704Z node 10 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:53.507357Z node 10 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [10:7519901614744575617:2080] 1750863533370319 != 1750863533370322 2025-06-25T14:58:53.523548Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:53.523662Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:53.525709Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:8673 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:53.803185Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-25T14:58:53.825012Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:53.876420Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:53.953919Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... >> TPQTabletTests::TEvReadSet_For_A_Non_Existent_Tablet [GOOD] >> TLocksTest::Range_BrokenLock3 [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TLocksTest::BrokenSameShardLock [GOOD] Test command err: 2025-06-25T14:58:18.446415Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901466334864260:2231];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:18.664973Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/002075/r3tmp/tmplfIvgr/pdisk_1.dat 2025-06-25T14:58:18.880396Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519901466334864041:2080] 1750863498398299 != 1750863498398302 2025-06-25T14:58:18.897616Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:18.919932Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:18.920034Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:18.925130Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:4554 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:19.223776Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:19.245430Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:58:19.257422Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-25T14:58:19.263210Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:19.415735Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:19.448048Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:58:19.489131Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/002075/r3tmp/tmpUQJXIc/pdisk_1.dat 2025-06-25T14:58:21.828545Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:58:21.834103Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:21.834177Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:21.835444Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:21.846714Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:21215 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:22.133465Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:22.143319Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:58:22.161189Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-25T14:58:22.167001Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:22.213691Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:22.286842Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:24.921538Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519901491192949754:2241];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/002075/r3tmp/tmp8cCXTj/pdisk_1.dat 2025-06-25T14:58:24.981223Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:58:25.056809Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519901491192949528:2080] 1750863504812115 != 1750863504812118 2025-06-25T14:58:25.056886Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:25.077892Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:25.077980Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:25.081275Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:28369 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:25.325368Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:25.344504Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... ... 4379389 2025-06-25T14:58:44.592854Z node 8 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:44.622610Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:44.622726Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:44.623583Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:14215 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:44.922234Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-25T14:58:44.946276Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:45.035533Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:45.117590Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:45.436695Z node 8 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/002075/r3tmp/tmp9wnbnz/pdisk_1.dat 2025-06-25T14:58:49.252616Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:58:49.255237Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:49.255322Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:49.259005Z node 9 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [9:7519901593757403376:2080] 1750863528999167 != 1750863528999170 2025-06-25T14:58:49.259772Z node 9 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:49.279437Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:31168 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:49.589827Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:49.595331Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:58:49.612195Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:49.674532Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:49.755260Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:53.563469Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7519901615209626759:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:53.563550Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/002075/r3tmp/tmp8Uyf3s/pdisk_1.dat 2025-06-25T14:58:53.706600Z node 10 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [10:7519901615209626738:2080] 1750863533562700 != 1750863533562703 2025-06-25T14:58:53.721921Z node 10 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:53.728207Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:53.728420Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:53.732446Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:12574 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:53.999020Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-25T14:58:54.022952Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:54.084689Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:54.179551Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/unittest >> TPQTabletTests::TEvReadSet_Is_Not_Sent_Ahead_Of_Time [GOOD] Test command err: 2025-06-25T14:58:55.709640Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3114: [PQ: 72057594037927937] Handle TEvInterconnect::TEvNodeInfo 2025-06-25T14:58:55.713172Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3146: [PQ: 72057594037927937] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-25T14:58:55.713446Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:752: [PQ: 72057594037927937] doesn't have tx info 2025-06-25T14:58:55.713509Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:764: [PQ: 72057594037927937] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-25T14:58:55.713559Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:985: [PQ: 72057594037927937] no config, start with empty partitions and default config 2025-06-25T14:58:55.713594Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4949: [PQ: 72057594037927937] Txs.size=0, PlannedTxs.size=0 2025-06-25T14:58:55.713637Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:58:55.713684Z node 1 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-25T14:58:55.778657Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037927937] server connected, pipe [1:348:2307], now have 1 active actors on pipe 2025-06-25T14:58:55.778774Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:1470: [PQ: 72057594037927937] Handle TEvPersQueue::TEvUpdateConfig 2025-06-25T14:58:55.797347Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:1656: [PQ: 72057594037927937] Config update version 1(current 0) received from actor [1:179:2192] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "topic" Version: 1 LocalDC: true Topic: "topic" TopicPath: "/topic" YcCloudId: "somecloud" YcFolderId: "somefolder" YdbDatabaseId: "PQ" YdbDatabasePath: "/Root/PQ" Partitions { PartitionId: 0 } ReadRuleGenerations: 1 FederationAccount: "federationAccount" MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 1 Important: false } 2025-06-25T14:58:55.800304Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:591: [PQ: 72057594037927937] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "topic" Version: 1 LocalDC: true Topic: "topic" TopicPath: "/topic" YcCloudId: "somecloud" YcFolderId: "somefolder" YdbDatabaseId: "PQ" YdbDatabasePath: "/Root/PQ" Partitions { PartitionId: 0 } ReadRuleGenerations: 1 FederationAccount: "federationAccount" MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 1 Important: false } 2025-06-25T14:58:55.800458Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:58:55.801338Z node 1 :PERSQUEUE INFO: pq_impl.cpp:1497: [PQ: 72057594037927937] Config applied version 1 actor [1:179:2192] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "topic" Version: 1 LocalDC: true Topic: "topic" TopicPath: "/topic" YcCloudId: "somecloud" YcFolderId: "somefolder" YdbDatabaseId: "PQ" YdbDatabasePath: "/Root/PQ" Partitions { PartitionId: 0 } ReadRuleGenerations: 1 FederationAccount: "federationAccount" MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 1 Important: false } 2025-06-25T14:58:55.801473Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:0:Initializer] Start initializing step TInitConfigStep 2025-06-25T14:58:55.801850Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:0:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-25T14:58:55.802214Z node 1 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [1:356:2313] 2025-06-25T14:58:55.803109Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:55: [topic:0:Initializer] Initializing completed. 2025-06-25T14:58:55.803170Z node 1 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'topic' partition 0 generation 2 [1:356:2313] 2025-06-25T14:58:55.803217Z node 1 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037927937, Partition: 0, State: StateInit] SYNC INIT topic topic partitition 0 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-25T14:58:55.803822Z node 1 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Process pending events. Count 0 2025-06-25T14:58:55.803942Z node 1 :PERSQUEUE DEBUG: partition.cpp:3232: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'topic' partition 0 user user reinit request with generation 1 2025-06-25T14:58:55.803983Z node 1 :PERSQUEUE DEBUG: partition.cpp:3302: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'topic' partition 0 user user reinit with generation 1 done 2025-06-25T14:58:55.804157Z node 1 :PERSQUEUE DEBUG: partition_read.cpp:882: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'topic' partition 0 user user readTimeStamp for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 0 2025-06-25T14:58:55.804303Z node 1 :PERSQUEUE DEBUG: read.h:272: CacheProxy. Passthrough write request to KV 2025-06-25T14:58:55.804525Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-25T14:58:55.806830Z node 1 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-06-25T14:58:55.806917Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-25T14:58:55.807243Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037927937] server connected, pipe [1:363:2318], now have 1 active actors on pipe 2025-06-25T14:58:55.807819Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037927937] server connected, pipe [1:366:2320], now have 1 active actors on pipe 2025-06-25T14:58:55.811145Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3255: [PQ: 72057594037927937] Handle TEvPersQueue::TEvProposeTransaction SourceActor { RawX1: 179 RawX2: 4294969488 } TxId: 67890 Data { Operations { PartitionId: 0 CommitOffsetsBegin: 0 CommitOffsetsEnd: 0 Consumer: "user" Path: "/topic" } SendingShards: 22222 SendingShards: 22223 SendingShards: 22224 SendingShards: 22225 ReceivingShards: 33333 ReceivingShards: 33334 Immediate: false } 2025-06-25T14:58:55.811207Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3431: [PQ: 72057594037927937] distributed transaction 2025-06-25T14:58:55.811285Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3745: [PQ: 72057594037927937] Propose TxId 67890, WriteId (empty maybe) 2025-06-25T14:58:55.811374Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4353: [PQ: 72057594037927937] Try execute txs with state UNKNOWN 2025-06-25T14:58:55.811407Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4398: [PQ: 72057594037927937] TxId 67890, State UNKNOWN 2025-06-25T14:58:55.811508Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3979: [PQ: 72057594037927937] schedule TEvProposeTransactionResult(PREPARED) 2025-06-25T14:58:55.811543Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4288: [PQ: 72057594037927937] TxId 67890, NewState PREPARING 2025-06-25T14:58:55.811589Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3866: [PQ: 72057594037927937] write key for TxId 67890 2025-06-25T14:58:55.811787Z node 1 :PERSQUEUE DEBUG: transaction.cpp:374: [TxId: 67890] save tx TxId: 67890 State: PREPARED MinStep: 153 MaxStep: 30153 PredicatesReceived { TabletId: 22225 } PredicatesReceived { TabletId: 22222 } PredicatesReceived { TabletId: 22223 } PredicatesReceived { TabletId: 22224 } PredicateRecipients: 33334 PredicateRecipients: 33333 Operations { PartitionId: 0 CommitOffsetsBegin: 0 CommitOffsetsEnd: 0 Consumer: "user" Path: "/topic" } Kind: KIND_DATA SourceActor { RawX1: 179 RawX2: 4294969488 } Partitions { } 2025-06-25T14:58:55.811884Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3683: [PQ: 72057594037927937] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-06-25T14:58:55.815568Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:1241: [PQ: 72057594037927937] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-25T14:58:55.815649Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4353: [PQ: 72057594037927937] Try execute txs with state PREPARING 2025-06-25T14:58:55.815691Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4398: [PQ: 72057594037927937] TxId 67890, State PREPARING 2025-06-25T14:58:55.815726Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4288: [PQ: 72057594037927937] TxId 67890, NewState PREPARED 2025-06-25T14:58:55.819353Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3457: [PQ: 72057594037927937] Handle TEvTxProcessing::TEvPlanStep Transactions { TxId: 67890 AckTo { RawX1: 179 RawX2: 4294969488 } } Step: 100 2025-06-25T14:58:55.819427Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4353: [PQ: 72057594037927937] Try execute txs with state PREPARED 2025-06-25T14:58:55.819473Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4398: [PQ: 72057594037927937] TxId 67890, State PREPARED 2025-06-25T14:58:55.819514Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4288: [PQ: 72057594037927937] TxId 67890, NewState PLANNING 2025-06-25T14:58:55.819575Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3846: [PQ: 72057594037927937] PlanStep 100, PlanTxId 67890 2025-06-25T14:58:55.819629Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3866: [PQ: 72057594037927937] write key for TxId 67890 2025-06-25T14:58:55.819780Z node 1 :PERSQUEUE DEBUG: transaction.cpp:374: [TxId: 67890] save tx TxId: 67890 State: PLANNED MinStep: 153 MaxStep: 30153 PredicatesReceived { TabletId: 22225 } PredicatesReceived { TabletId: 22222 } PredicatesReceived { TabletId: 22223 } PredicatesReceived { TabletId: 22224 } PredicateRecipients: 33334 PredicateRecipients: 33333 Operations { PartitionId: 0 CommitOffsetsBegin: 0 CommitOffsetsEnd: 0 Consumer: "user" Path: "/topic" } Step: 100 Kind: KIND_DATA SourceActor { RawX1: 179 RawX2: 4294969488 } Partitions { } 2025-06-25T14:58:55.819879Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3683: [PQ: 72057594037927937] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-06-25T14:58:55.823141Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:1241: [PQ: 72057594037927937] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-25T14:58:55.823202Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4353: [PQ: 72057594037927937] Try execute txs with state PLANNING 2025-06-25T14:58:55.823235Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4398: [PQ: 72057594037927937] TxId 67890, State PLANNING 2025-06-25T14:58:55.823271Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4288: [PQ: 72057594037927937] TxId 67890, NewState PLANNED 2025-06-25T14:58:55.823320Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4323: [PQ: 72057594037927937] TxId 67890 moved from PLANNING to PLANNED 2025-06-25T14:58:55.823370Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4466: [PQ: 72057594037927937] TxQueue.size 1 2025-06-25T14:58:55.823463Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:837: [PQ: 72057594037927937] New ExecStep 100, ExecTxId 67890 2025-06-25T14:58:55.823518Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4288: [PQ: 72057594037927937] TxId 67890, NewState CALCULATING 2025-06-25T14:58:55.823555Z node 1 :PERSQUEUE DEBUG ... andle TEvTxProcessing::TEvPlanStep Transactions { TxId: 67890 AckTo { RawX1: 179 RawX2: 25769805968 } } Step: 100 2025-06-25T14:58:58.173708Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4353: [PQ: 72057594037927937] Try execute txs with state PREPARED 2025-06-25T14:58:58.173753Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4398: [PQ: 72057594037927937] TxId 67890, State PREPARED 2025-06-25T14:58:58.173791Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4288: [PQ: 72057594037927937] TxId 67890, NewState PLANNING 2025-06-25T14:58:58.173828Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3846: [PQ: 72057594037927937] PlanStep 100, PlanTxId 67890 2025-06-25T14:58:58.173874Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3866: [PQ: 72057594037927937] write key for TxId 67890 2025-06-25T14:58:58.173994Z node 6 :PERSQUEUE DEBUG: transaction.cpp:374: [TxId: 67890] save tx TxId: 67890 State: PLANNED MinStep: 133 MaxStep: 30133 PredicatesReceived { TabletId: 22222 } PredicateRecipients: 22222 Operations { PartitionId: 0 CommitOffsetsBegin: 0 CommitOffsetsEnd: 0 Consumer: "user" Path: "/topic" } Step: 100 Kind: KIND_DATA SourceActor { RawX1: 179 RawX2: 25769805968 } Partitions { } 2025-06-25T14:58:58.174062Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3683: [PQ: 72057594037927937] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-06-25T14:58:58.177443Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:1241: [PQ: 72057594037927937] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-25T14:58:58.177538Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4353: [PQ: 72057594037927937] Try execute txs with state PLANNING 2025-06-25T14:58:58.177582Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4398: [PQ: 72057594037927937] TxId 67890, State PLANNING 2025-06-25T14:58:58.177622Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4288: [PQ: 72057594037927937] TxId 67890, NewState PLANNED 2025-06-25T14:58:58.177671Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4323: [PQ: 72057594037927937] TxId 67890 moved from PLANNING to PLANNED 2025-06-25T14:58:58.177710Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4466: [PQ: 72057594037927937] TxQueue.size 1 2025-06-25T14:58:58.177763Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:837: [PQ: 72057594037927937] New ExecStep 100, ExecTxId 67890 2025-06-25T14:58:58.177832Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4288: [PQ: 72057594037927937] TxId 67890, NewState CALCULATING 2025-06-25T14:58:58.177868Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4323: [PQ: 72057594037927937] TxId 67890 moved from PLANNED to CALCULATING 2025-06-25T14:58:58.177928Z node 6 :PERSQUEUE DEBUG: partition.cpp:1170: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCalcPredicate Step 100, TxId 67890 2025-06-25T14:58:58.178071Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3537: [PQ: 72057594037927937] Handle TEvPQ::TEvTxCalcPredicateResult Step 100, TxId 67890, Partition 0, Predicate 1 2025-06-25T14:58:58.178101Z node 6 :PERSQUEUE DEBUG: transaction.cpp:218: [TxId: 67890] Handle TEvTxCalcPredicateResult 2025-06-25T14:58:58.178129Z node 6 :PERSQUEUE DEBUG: transaction.cpp:267: [TxId: 67890] Partition responses 1/1 2025-06-25T14:58:58.178160Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4353: [PQ: 72057594037927937] Try execute txs with state CALCULATING 2025-06-25T14:58:58.178192Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4398: [PQ: 72057594037927937] TxId 67890, State CALCULATING 2025-06-25T14:58:58.178225Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4345: [PQ: 72057594037927937] TxId 67890 State CALCULATING FrontTxId 67890 2025-06-25T14:58:58.178255Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4482: [PQ: 72057594037927937] Received 1, Expected 1 2025-06-25T14:58:58.178285Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4288: [PQ: 72057594037927937] TxId 67890, NewState CALCULATED 2025-06-25T14:58:58.178320Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4323: [PQ: 72057594037927937] TxId 67890 moved from CALCULATING to CALCULATED 2025-06-25T14:58:58.178355Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3866: [PQ: 72057594037927937] write key for TxId 67890 2025-06-25T14:58:58.178497Z node 6 :PERSQUEUE DEBUG: transaction.cpp:374: [TxId: 67890] save tx TxId: 67890 State: CALCULATED MinStep: 133 MaxStep: 30133 PredicatesReceived { TabletId: 22222 } PredicateRecipients: 22222 Operations { PartitionId: 0 CommitOffsetsBegin: 0 CommitOffsetsEnd: 0 Consumer: "user" Path: "/topic" } Step: 100 Predicate: true Kind: KIND_DATA SourceActor { RawX1: 179 RawX2: 25769805968 } Partitions { } 2025-06-25T14:58:58.178571Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3683: [PQ: 72057594037927937] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-06-25T14:58:58.181300Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037927937] server connected, pipe [6:244:2239], now have 1 active actors on pipe 2025-06-25T14:58:58.181388Z node 6 :PERSQUEUE DEBUG: pqtablet_mock.cpp:72: Connected to tablet 72057594037927937 from tablet 22222 2025-06-25T14:58:58.181558Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3471: [PQ: 72057594037927937] Handle TEvTxProcessing::TEvReadSet Step: 100 TxId: 67890 TabletSource: 22222 TabletDest: 72057594037927937 TabletProducer: 22222 ReadSet: "\010\001" Seqno: 0 2025-06-25T14:58:58.181592Z node 6 :PERSQUEUE DEBUG: transaction.cpp:274: [TxId: 67890] Handle TEvReadSet 2025-06-25T14:58:58.181629Z node 6 :PERSQUEUE DEBUG: transaction.cpp:291: [TxId: 67890] Predicates 1/1 2025-06-25T14:58:58.184915Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:1241: [PQ: 72057594037927937] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-25T14:58:58.184962Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4353: [PQ: 72057594037927937] Try execute txs with state CALCULATED 2025-06-25T14:58:58.184994Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4398: [PQ: 72057594037927937] TxId 67890, State CALCULATED 2025-06-25T14:58:58.185028Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4345: [PQ: 72057594037927937] TxId 67890 State CALCULATED FrontTxId 67890 2025-06-25T14:58:58.185061Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4288: [PQ: 72057594037927937] TxId 67890, NewState WAIT_RS 2025-06-25T14:58:58.185096Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4323: [PQ: 72057594037927937] TxId 67890 moved from CALCULATED to WAIT_RS 2025-06-25T14:58:58.185136Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4027: [PQ: 72057594037927937] Send TEvTxProcessing::TEvReadSet to 1 receivers. Wait TEvTxProcessing::TEvReadSet from 1 senders. 2025-06-25T14:58:58.185178Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4037: [PQ: 72057594037927937] Send TEvReadSet to tablet 22222 2025-06-25T14:58:58.185264Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4521: [PQ: 72057594037927937] HaveParticipantsDecision 1 2025-06-25T14:58:58.185320Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4288: [PQ: 72057594037927937] TxId 67890, NewState EXECUTING 2025-06-25T14:58:58.185350Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4323: [PQ: 72057594037927937] TxId 67890 moved from WAIT_RS to EXECUTING 2025-06-25T14:58:58.185377Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4551: [PQ: 72057594037927937] Received 0, Expected 1 2025-06-25T14:58:58.185439Z node 6 :PERSQUEUE DEBUG: partition.cpp:1216: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCommit Step 100, TxId 67890 2025-06-25T14:58:58.185492Z node 6 :PERSQUEUE DEBUG: partition.cpp:2502: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::CommitWriteOperations TxId: 67890 2025-06-25T14:58:58.185725Z node 6 :PERSQUEUE DEBUG: read.h:272: CacheProxy. Passthrough write request to KV 2025-06-25T14:58:58.186876Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:2931: [PQ: 72057594037927937] Handle TEvTabletPipe::TEvClientConnected 2025-06-25T14:58:58.186936Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:2936: [PQ: 72057594037927937] Connected to tablet 22222 2025-06-25T14:58:58.189033Z node 6 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-06-25T14:58:58.189119Z node 6 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-25T14:58:58.189184Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3583: [PQ: 72057594037927937] Handle TEvPQ::TEvTxCommitDone Step 100, TxId 67890, Partition 0 2025-06-25T14:58:58.189233Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4353: [PQ: 72057594037927937] Try execute txs with state EXECUTING 2025-06-25T14:58:58.189276Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4398: [PQ: 72057594037927937] TxId 67890, State EXECUTING 2025-06-25T14:58:58.189327Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4345: [PQ: 72057594037927937] TxId 67890 State EXECUTING FrontTxId 67890 2025-06-25T14:58:58.189364Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4551: [PQ: 72057594037927937] Received 1, Expected 1 2025-06-25T14:58:58.189399Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4224: [PQ: 72057594037927937] TxId: 67890 send TEvPersQueue::TEvProposeTransactionResult(COMPLETE) 2025-06-25T14:58:58.189441Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4555: [PQ: 72057594037927937] complete TxId 67890 2025-06-25T14:58:58.189491Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4573: [PQ: 72057594037927937] delete partitions for TxId 67890 2025-06-25T14:58:58.189524Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4288: [PQ: 72057594037927937] TxId 67890, NewState EXECUTED 2025-06-25T14:58:58.189560Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4323: [PQ: 72057594037927937] TxId 67890 moved from EXECUTING to EXECUTED 2025-06-25T14:58:58.189596Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3866: [PQ: 72057594037927937] write key for TxId 67890 2025-06-25T14:58:58.189739Z node 6 :PERSQUEUE DEBUG: transaction.cpp:374: [TxId: 67890] save tx TxId: 67890 State: EXECUTED MinStep: 133 MaxStep: 30133 PredicatesReceived { TabletId: 22222 Predicate: true } PredicateRecipients: 22222 Operations { PartitionId: 0 CommitOffsetsBegin: 0 CommitOffsetsEnd: 0 Consumer: "user" Path: "/topic" } Step: 100 Predicate: true Kind: KIND_DATA SourceActor { RawX1: 179 RawX2: 25769805968 } Partitions { } 2025-06-25T14:58:58.189822Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3683: [PQ: 72057594037927937] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-06-25T14:58:58.190123Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3471: [PQ: 72057594037927937] Handle TEvTxProcessing::TEvReadSet Step: 100 TxId: 67890 TabletSource: 22222 TabletDest: 72057594037927937 TabletProducer: 22222 ReadSet: "\010\001" Seqno: 0 2025-06-25T14:58:58.190156Z node 6 :PERSQUEUE DEBUG: transaction.cpp:274: [TxId: 67890] Handle TEvReadSet 2025-06-25T14:58:58.807833Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:1241: [PQ: 72057594037927937] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-25T14:58:58.807909Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4353: [PQ: 72057594037927937] Try execute txs with state EXECUTED 2025-06-25T14:58:58.807955Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4398: [PQ: 72057594037927937] TxId 67890, State EXECUTED 2025-06-25T14:58:58.808001Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4345: [PQ: 72057594037927937] TxId 67890 State EXECUTED FrontTxId 67890 2025-06-25T14:58:58.808043Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4046: [PQ: 72057594037927937] TPersQueue::SendEvReadSetAckToSenders 2025-06-25T14:58:58.808103Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4048: [PQ: 72057594037927937] Send TEvTxProcessing::TEvReadSetAck {TEvReadSet step# 100 txid# 67890 TabletSource# 22222 TabletDest# 72057594037927937 SetTabletConsumer# 72057594037927937 Flags# 0 Seqno# 0} 2025-06-25T14:58:58.808152Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4288: [PQ: 72057594037927937] TxId 67890, NewState WAIT_RS_ACKS 2025-06-25T14:58:58.808197Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4323: [PQ: 72057594037927937] TxId 67890 moved from EXECUTED to WAIT_RS_ACKS 2025-06-25T14:58:58.808238Z node 6 :PERSQUEUE DEBUG: transaction.cpp:366: [TxId: 67890] PredicateAcks: 0/1 2025-06-25T14:58:58.808275Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4599: [PQ: 72057594037927937] HaveAllRecipientsReceive 0, AllSupportivePartitionsHaveBeenDeleted 1 2025-06-25T14:58:58.808327Z node 6 :PERSQUEUE DEBUG: transaction.cpp:366: [TxId: 67890] PredicateAcks: 0/1 >> TPartitionTests::OldPlanStep [GOOD] >> TPQTest::DirectReadOldPipe [GOOD] >> TPQTest::PQ_Tablet_Does_Not_Remove_The_Blob_Until_The_Reading_Is_Complete >> TObjectStorageListingTest::SchemaChecks [GOOD] >> TLocksTest::Range_IncorrectNullDot2 [GOOD] >> TPartitionTests::ReserveSubDomainOutOfSpace ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/unittest >> TPQTabletTests::TEvReadSet_For_A_Non_Existent_Tablet [GOOD] Test command err: 2025-06-25T14:58:56.715762Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3114: [PQ: 72057594037927937] Handle TEvInterconnect::TEvNodeInfo 2025-06-25T14:58:56.719201Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3146: [PQ: 72057594037927937] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-25T14:58:56.719500Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:752: [PQ: 72057594037927937] doesn't have tx info 2025-06-25T14:58:56.719555Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:764: [PQ: 72057594037927937] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-25T14:58:56.719593Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:985: [PQ: 72057594037927937] no config, start with empty partitions and default config 2025-06-25T14:58:56.719634Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4949: [PQ: 72057594037927937] Txs.size=0, PlannedTxs.size=0 2025-06-25T14:58:56.719687Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:58:56.719769Z node 1 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-25T14:58:56.734848Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037927937] server connected, pipe [1:180:2193], now have 1 active actors on pipe 2025-06-25T14:58:56.734971Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:1470: [PQ: 72057594037927937] Handle TEvPersQueue::TEvUpdateConfig 2025-06-25T14:58:56.755984Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:1656: [PQ: 72057594037927937] Config update version 1(current 0) received from actor [1:179:2192] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "topic" Version: 1 LocalDC: true Topic: "topic" TopicPath: "/topic" YcCloudId: "somecloud" YcFolderId: "somefolder" YdbDatabaseId: "PQ" YdbDatabasePath: "/Root/PQ" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 1 FederationAccount: "federationAccount" MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 1 Important: false } 2025-06-25T14:58:56.758726Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:591: [PQ: 72057594037927937] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "topic" Version: 1 LocalDC: true Topic: "topic" TopicPath: "/topic" YcCloudId: "somecloud" YcFolderId: "somefolder" YdbDatabaseId: "PQ" YdbDatabasePath: "/Root/PQ" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 1 FederationAccount: "federationAccount" MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 1 Important: false } 2025-06-25T14:58:56.758854Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:58:56.760426Z node 1 :PERSQUEUE INFO: pq_impl.cpp:1497: [PQ: 72057594037927937] Config applied version 1 actor [1:179:2192] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "topic" Version: 1 LocalDC: true Topic: "topic" TopicPath: "/topic" YcCloudId: "somecloud" YcFolderId: "somefolder" YdbDatabaseId: "PQ" YdbDatabasePath: "/Root/PQ" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 1 FederationAccount: "federationAccount" MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 1 Important: false } 2025-06-25T14:58:56.760553Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:0:Initializer] Start initializing step TInitConfigStep 2025-06-25T14:58:56.760632Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:1:Initializer] Start initializing step TInitConfigStep 2025-06-25T14:58:56.761090Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:0:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-25T14:58:56.761459Z node 1 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [1:188:2199] 2025-06-25T14:58:56.762422Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:55: [topic:0:Initializer] Initializing completed. 2025-06-25T14:58:56.762494Z node 1 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'topic' partition 0 generation 2 [1:188:2199] 2025-06-25T14:58:56.762545Z node 1 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037927937, Partition: 0, State: StateInit] SYNC INIT topic topic partitition 0 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-25T14:58:56.763035Z node 1 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Process pending events. Count 0 2025-06-25T14:58:56.763151Z node 1 :PERSQUEUE DEBUG: partition.cpp:3232: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'topic' partition 0 user user reinit request with generation 1 2025-06-25T14:58:56.763192Z node 1 :PERSQUEUE DEBUG: partition.cpp:3302: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'topic' partition 0 user user reinit with generation 1 done 2025-06-25T14:58:56.763374Z node 1 :PERSQUEUE DEBUG: partition_read.cpp:882: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'topic' partition 0 user user readTimeStamp for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 0 2025-06-25T14:58:56.763571Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:1:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-25T14:58:56.763855Z node 1 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [1:190:2201] 2025-06-25T14:58:56.764556Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:55: [topic:1:Initializer] Initializing completed. 2025-06-25T14:58:56.764593Z node 1 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'topic' partition 1 generation 2 [1:190:2201] 2025-06-25T14:58:56.764633Z node 1 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037927937, Partition: 1, State: StateInit] SYNC INIT topic topic partitition 1 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-25T14:58:56.764939Z node 1 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037927937, Partition: 1, State: StateIdle] Process pending events. Count 0 2025-06-25T14:58:56.765006Z node 1 :PERSQUEUE DEBUG: partition.cpp:3232: [PQ: 72057594037927937, Partition: 1, State: StateIdle] Topic 'topic' partition 1 user user reinit request with generation 1 2025-06-25T14:58:56.765036Z node 1 :PERSQUEUE DEBUG: partition.cpp:3302: [PQ: 72057594037927937, Partition: 1, State: StateIdle] Topic 'topic' partition 1 user user reinit with generation 1 done 2025-06-25T14:58:56.765147Z node 1 :PERSQUEUE DEBUG: partition_read.cpp:882: [PQ: 72057594037927937, Partition: 1, State: StateIdle] Topic 'topic' partition 1 user user readTimeStamp for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 0 2025-06-25T14:58:56.765230Z node 1 :PERSQUEUE DEBUG: read.h:272: CacheProxy. Passthrough write request to KV 2025-06-25T14:58:56.765545Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-25T14:58:56.765633Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 1, State: StateIdle] no data for compaction 2025-06-25T14:58:56.765655Z node 1 :PERSQUEUE DEBUG: read.h:272: CacheProxy. Passthrough write request to KV 2025-06-25T14:58:56.769356Z node 1 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-06-25T14:58:56.769445Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-25T14:58:56.769848Z node 1 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72057594037927937, Partition: 1, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-06-25T14:58:56.769893Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 1, State: StateIdle] no data for compaction 2025-06-25T14:58:56.770216Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037927937] server connected, pipe [1:203:2210], now have 1 active actors on pipe 2025-06-25T14:58:56.770813Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037927937] server connected, pipe [1:206:2212], now have 1 active actors on pipe 2025-06-25T14:58:56.771584Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3255: [PQ: 72057594037927937] Handle TEvPersQueue::TEvProposeTransaction SourceActor { RawX1: 179 RawX2: 4294969488 } TxId: 67890 Data { Operations { PartitionId: 0 CommitOffsetsBegin: 0 CommitOffsetsEnd: 0 Consumer: "user" Path: "/topic" } Operations { PartitionId: 1 CommitOffsetsBegin: 0 CommitOffsetsEnd: 0 Consumer: "user" Path: "/topic" } Immediate: false } 2025-06-25T14:58:56.771656Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3431: [PQ: 72057594037927937] distributed transaction 2025-06-25T14:58:56.771722Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3745: [PQ: 72057594037927937] Propose TxId 67890, WriteId (empty maybe) 2025-06-25T14:58:56.771763Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4353: [PQ: 72057594037927937] Try execute txs with state UNKNOWN 2025-06-25T14:58:56.771795Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4398: [PQ: 72057594037927937] TxId 67890, State UNKNOWN 2025-06-25T14:58:56.771837Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3979: [PQ: 72057594037927937] schedule TEvProposeTransactionResult(PREPARED) 2025-06-25T14:58:56.771886Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4288: [PQ: 72057594037927937] TxId 67890, NewState PREPARING 2025-06-25T14:58:56.771933Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3866: [PQ: 72057594037927937] write key for TxId 67890 2025-06-25T14:58:56.772080Z node 1 :PERSQUEUE DEBUG: transaction.cpp:374: [TxId: 67890] save tx TxId: 67890 State: PREPARED MinStep: 230 MaxStep: 30230 Operations { PartitionId: 0 CommitOffsetsBegin: 0 CommitOffsetsEnd: 0 Consumer: "user" Path: "/topic" } Operations { PartitionId: 1 CommitOffsetsBegin: 0 CommitOffsetsEnd: 0 Consumer: "user" Path: "/topic" } Kind: KIND_DATA SourceActor { RawX1: 179 RawX2: 4294969488 } Partitions { } 2025-06-25T14:58:56.772162Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3683: [PQ: 72057594037927937] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-06-25T14:58:56.775010Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:1241: [PQ: 72057594037927937] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-25T14:58:56.775064Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4353: [PQ: 72057594037927937] Try execute txs with state PREPARING 2025-06-25T14:58:56.775099Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4398: [PQ: 72057594037927937] TxId 67890, State PREPARING 2025-06-25T14:58:56.775136Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4288: [PQ: 72057594037927937] TxId 67890, NewState PREPARED 2025-0 ... EvTxProcessing::TEvReadSet Step: 100 TxId: 67890 TabletSource: 72057594037950158 TabletDest: 72057594037927937 TabletProducer: 72057594037950158 ReadSet: "\010\001" Seqno: 0 2025-06-25T14:58:59.190716Z node 6 :PERSQUEUE DEBUG: transaction.cpp:274: [TxId: 67890] Handle TEvReadSet 2025-06-25T14:58:59.190751Z node 6 :PERSQUEUE DEBUG: transaction.cpp:291: [TxId: 67890] Predicates 1/1 2025-06-25T14:58:59.191851Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:1241: [PQ: 72057594037927937] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-25T14:58:59.191897Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4353: [PQ: 72057594037927937] Try execute txs with state PLANNING 2025-06-25T14:58:59.191927Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4398: [PQ: 72057594037927937] TxId 67890, State PLANNING 2025-06-25T14:58:59.191960Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4288: [PQ: 72057594037927937] TxId 67890, NewState PLANNED 2025-06-25T14:58:59.191993Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4323: [PQ: 72057594037927937] TxId 67890 moved from PLANNING to PLANNED 2025-06-25T14:58:59.192021Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4466: [PQ: 72057594037927937] TxQueue.size 1 2025-06-25T14:58:59.192052Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:837: [PQ: 72057594037927937] New ExecStep 100, ExecTxId 67890 2025-06-25T14:58:59.192104Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4288: [PQ: 72057594037927937] TxId 67890, NewState CALCULATING 2025-06-25T14:58:59.192134Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4323: [PQ: 72057594037927937] TxId 67890 moved from PLANNED to CALCULATING 2025-06-25T14:58:59.192181Z node 6 :PERSQUEUE DEBUG: partition.cpp:1170: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCalcPredicate Step 100, TxId 67890 2025-06-25T14:58:59.192302Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3537: [PQ: 72057594037927937] Handle TEvPQ::TEvTxCalcPredicateResult Step 100, TxId 67890, Partition 0, Predicate 1 2025-06-25T14:58:59.192354Z node 6 :PERSQUEUE DEBUG: transaction.cpp:218: [TxId: 67890] Handle TEvTxCalcPredicateResult 2025-06-25T14:58:59.192383Z node 6 :PERSQUEUE DEBUG: transaction.cpp:267: [TxId: 67890] Partition responses 1/1 2025-06-25T14:58:59.192410Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4353: [PQ: 72057594037927937] Try execute txs with state CALCULATING 2025-06-25T14:58:59.192448Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4398: [PQ: 72057594037927937] TxId 67890, State CALCULATING 2025-06-25T14:58:59.192489Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4345: [PQ: 72057594037927937] TxId 67890 State CALCULATING FrontTxId 67890 2025-06-25T14:58:59.192517Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4482: [PQ: 72057594037927937] Received 1, Expected 1 2025-06-25T14:58:59.192549Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4288: [PQ: 72057594037927937] TxId 67890, NewState CALCULATED 2025-06-25T14:58:59.192581Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4323: [PQ: 72057594037927937] TxId 67890 moved from CALCULATING to CALCULATED 2025-06-25T14:58:59.192610Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3866: [PQ: 72057594037927937] write key for TxId 67890 2025-06-25T14:58:59.192722Z node 6 :PERSQUEUE DEBUG: transaction.cpp:374: [TxId: 67890] save tx TxId: 67890 State: CALCULATED MinStep: 133 MaxStep: 30133 PredicatesReceived { TabletId: 72057594037950158 Predicate: true } PredicateRecipients: 72057594037950158 Operations { PartitionId: 0 CommitOffsetsBegin: 0 CommitOffsetsEnd: 0 Consumer: "user" Path: "/topic" } Step: 100 Predicate: true Kind: KIND_DATA SourceActor { RawX1: 179 RawX2: 25769805968 } Partitions { } 2025-06-25T14:58:59.192782Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3683: [PQ: 72057594037927937] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-06-25T14:58:59.197746Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:1241: [PQ: 72057594037927937] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-25T14:58:59.197800Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4353: [PQ: 72057594037927937] Try execute txs with state CALCULATED 2025-06-25T14:58:59.197832Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4398: [PQ: 72057594037927937] TxId 67890, State CALCULATED 2025-06-25T14:58:59.197870Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4345: [PQ: 72057594037927937] TxId 67890 State CALCULATED FrontTxId 67890 2025-06-25T14:58:59.197915Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4288: [PQ: 72057594037927937] TxId 67890, NewState WAIT_RS 2025-06-25T14:58:59.197959Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4323: [PQ: 72057594037927937] TxId 67890 moved from CALCULATED to WAIT_RS 2025-06-25T14:58:59.198028Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4027: [PQ: 72057594037927937] Send TEvTxProcessing::TEvReadSet to 1 receivers. Wait TEvTxProcessing::TEvReadSet from 1 senders. 2025-06-25T14:58:59.198075Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4037: [PQ: 72057594037927937] Send TEvReadSet to tablet 72057594037950158 2025-06-25T14:58:59.198192Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4521: [PQ: 72057594037927937] HaveParticipantsDecision 1 2025-06-25T14:58:59.198244Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4288: [PQ: 72057594037927937] TxId 67890, NewState EXECUTING 2025-06-25T14:58:59.198290Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4323: [PQ: 72057594037927937] TxId 67890 moved from WAIT_RS to EXECUTING 2025-06-25T14:58:59.198328Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4551: [PQ: 72057594037927937] Received 0, Expected 1 2025-06-25T14:58:59.198501Z node 6 :PERSQUEUE DEBUG: partition.cpp:1216: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCommit Step 100, TxId 67890 2025-06-25T14:58:59.198556Z node 6 :PERSQUEUE DEBUG: partition.cpp:2502: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::CommitWriteOperations TxId: 67890 2025-06-25T14:58:59.198740Z node 6 :PERSQUEUE DEBUG: read.h:272: CacheProxy. Passthrough write request to KV 2025-06-25T14:58:59.199904Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:2931: [PQ: 72057594037927937] Handle TEvTabletPipe::TEvClientConnected 2025-06-25T14:58:59.199956Z node 6 :PERSQUEUE DEBUG: transaction.cpp:324: [TxId: 67890] Predicate acks 1/1 2025-06-25T14:58:59.199997Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4353: [PQ: 72057594037927937] Try execute txs with state EXECUTING 2025-06-25T14:58:59.200027Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4398: [PQ: 72057594037927937] TxId 67890, State EXECUTING 2025-06-25T14:58:59.200060Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4345: [PQ: 72057594037927937] TxId 67890 State EXECUTING FrontTxId 67890 2025-06-25T14:58:59.200087Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4551: [PQ: 72057594037927937] Received 0, Expected 1 2025-06-25T14:58:59.200132Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4366: [PQ: 72057594037927937] TxId 67890 status has not changed 2025-06-25T14:58:59.200244Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:2931: [PQ: 72057594037927937] Handle TEvTabletPipe::TEvClientConnected 2025-06-25T14:58:59.200292Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:2936: [PQ: 72057594037927937] Connected to tablet 72057594037950158 2025-06-25T14:58:59.202862Z node 6 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-06-25T14:58:59.202944Z node 6 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-25T14:58:59.203005Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3583: [PQ: 72057594037927937] Handle TEvPQ::TEvTxCommitDone Step 100, TxId 67890, Partition 0 2025-06-25T14:58:59.203051Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4353: [PQ: 72057594037927937] Try execute txs with state EXECUTING 2025-06-25T14:58:59.203097Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4398: [PQ: 72057594037927937] TxId 67890, State EXECUTING 2025-06-25T14:58:59.203143Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4345: [PQ: 72057594037927937] TxId 67890 State EXECUTING FrontTxId 67890 2025-06-25T14:58:59.203182Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4551: [PQ: 72057594037927937] Received 1, Expected 1 2025-06-25T14:58:59.203223Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4224: [PQ: 72057594037927937] TxId: 67890 send TEvPersQueue::TEvProposeTransactionResult(COMPLETE) 2025-06-25T14:58:59.203292Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4555: [PQ: 72057594037927937] complete TxId 67890 2025-06-25T14:58:59.203329Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4573: [PQ: 72057594037927937] delete partitions for TxId 67890 2025-06-25T14:58:59.203362Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4288: [PQ: 72057594037927937] TxId 67890, NewState EXECUTED 2025-06-25T14:58:59.203396Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4323: [PQ: 72057594037927937] TxId 67890 moved from EXECUTING to EXECUTED 2025-06-25T14:58:59.203434Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3866: [PQ: 72057594037927937] write key for TxId 67890 2025-06-25T14:58:59.203603Z node 6 :PERSQUEUE DEBUG: transaction.cpp:374: [TxId: 67890] save tx TxId: 67890 State: EXECUTED MinStep: 133 MaxStep: 30133 PredicatesReceived { TabletId: 72057594037950158 Predicate: true } PredicateRecipients: 72057594037950158 Operations { PartitionId: 0 CommitOffsetsBegin: 0 CommitOffsetsEnd: 0 Consumer: "user" Path: "/topic" } Step: 100 Predicate: true Kind: KIND_DATA SourceActor { RawX1: 179 RawX2: 25769805968 } Partitions { } 2025-06-25T14:58:59.203681Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3683: [PQ: 72057594037927937] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-06-25T14:58:59.206782Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:1241: [PQ: 72057594037927937] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-25T14:58:59.206832Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4353: [PQ: 72057594037927937] Try execute txs with state EXECUTED 2025-06-25T14:58:59.206887Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4398: [PQ: 72057594037927937] TxId 67890, State EXECUTED 2025-06-25T14:58:59.206925Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4345: [PQ: 72057594037927937] TxId 67890 State EXECUTED FrontTxId 67890 2025-06-25T14:58:59.206967Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4046: [PQ: 72057594037927937] TPersQueue::SendEvReadSetAckToSenders 2025-06-25T14:58:59.207029Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4048: [PQ: 72057594037927937] Send TEvTxProcessing::TEvReadSetAck {TEvReadSet step# 100 txid# 67890 TabletSource# 72057594037950158 TabletDest# 72057594037927937 SetTabletConsumer# 72057594037927937 Flags# 0 Seqno# 0} 2025-06-25T14:58:59.207097Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4288: [PQ: 72057594037927937] TxId 67890, NewState WAIT_RS_ACKS 2025-06-25T14:58:59.207146Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4323: [PQ: 72057594037927937] TxId 67890 moved from EXECUTED to WAIT_RS_ACKS 2025-06-25T14:58:59.207197Z node 6 :PERSQUEUE DEBUG: transaction.cpp:366: [TxId: 67890] PredicateAcks: 1/1 2025-06-25T14:58:59.207234Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4599: [PQ: 72057594037927937] HaveAllRecipientsReceive 1, AllSupportivePartitionsHaveBeenDeleted 1 2025-06-25T14:58:59.207268Z node 6 :PERSQUEUE DEBUG: transaction.cpp:366: [TxId: 67890] PredicateAcks: 1/1 2025-06-25T14:58:59.207313Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4660: [PQ: 72057594037927937] add an TxId 67890 to the list for deletion 2025-06-25T14:58:59.207360Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4288: [PQ: 72057594037927937] TxId 67890, NewState DELETING 2025-06-25T14:58:59.207414Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3882: [PQ: 72057594037927937] delete key for TxId 67890 2025-06-25T14:58:59.207489Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3683: [PQ: 72057594037927937] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-06-25T14:58:59.211693Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:1241: [PQ: 72057594037927937] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-25T14:58:59.211748Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4353: [PQ: 72057594037927937] Try execute txs with state DELETING 2025-06-25T14:58:59.211781Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4398: [PQ: 72057594037927937] TxId 67890, State DELETING 2025-06-25T14:58:59.211816Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4610: [PQ: 72057594037927937] delete TxId 67890 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/unittest >> TPQTabletTests::Non_Kafka_Transaction_Supportive_Partitions_Should_Not_Be_Deleted_After_Timeout [GOOD] Test command err: 2025-06-25T14:58:55.177304Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3114: [PQ: 72057594037927937] Handle TEvInterconnect::TEvNodeInfo 2025-06-25T14:58:55.188286Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3146: [PQ: 72057594037927937] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-25T14:58:55.188698Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:752: [PQ: 72057594037927937] doesn't have tx info 2025-06-25T14:58:55.188757Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:764: [PQ: 72057594037927937] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-25T14:58:55.188790Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:985: [PQ: 72057594037927937] no config, start with empty partitions and default config 2025-06-25T14:58:55.188845Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4949: [PQ: 72057594037927937] Txs.size=0, PlannedTxs.size=0 2025-06-25T14:58:55.188892Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:58:55.189058Z node 1 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-25T14:58:55.215478Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037927937] server connected, pipe [1:208:2212], now have 1 active actors on pipe 2025-06-25T14:58:55.215618Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:1470: [PQ: 72057594037927937] Handle TEvPersQueue::TEvUpdateConfig 2025-06-25T14:58:55.237120Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:1656: [PQ: 72057594037927937] Config update version 1(current 0) received from actor [1:179:2192] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 ImportantClientId: "consumer" LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "topic" Version: 1 LocalDC: true Topic: "topic" TopicPath: "/topic" YcCloudId: "somecloud" YcFolderId: "somefolder" YdbDatabaseId: "PQ" YdbDatabasePath: "/Root/PQ" Partitions { PartitionId: 0 } ReadRuleGenerations: 1 ReadRuleGenerations: 1 FederationAccount: "federationAccount" MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 1 Important: false } Consumers { Name: "consumer" Generation: 1 Important: true } 2025-06-25T14:58:55.239987Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:591: [PQ: 72057594037927937] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 ImportantClientId: "consumer" LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "topic" Version: 1 LocalDC: true Topic: "topic" TopicPath: "/topic" YcCloudId: "somecloud" YcFolderId: "somefolder" YdbDatabaseId: "PQ" YdbDatabasePath: "/Root/PQ" Partitions { PartitionId: 0 } ReadRuleGenerations: 1 ReadRuleGenerations: 1 FederationAccount: "federationAccount" MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 1 Important: false } Consumers { Name: "consumer" Generation: 1 Important: true } 2025-06-25T14:58:55.240144Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:58:55.241165Z node 1 :PERSQUEUE INFO: pq_impl.cpp:1497: [PQ: 72057594037927937] Config applied version 1 actor [1:179:2192] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 ImportantClientId: "consumer" LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "topic" Version: 1 LocalDC: true Topic: "topic" TopicPath: "/topic" YcCloudId: "somecloud" YcFolderId: "somefolder" YdbDatabaseId: "PQ" YdbDatabasePath: "/Root/PQ" Partitions { PartitionId: 0 } ReadRuleGenerations: 1 ReadRuleGenerations: 1 FederationAccount: "federationAccount" MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 1 Important: false } Consumers { Name: "consumer" Generation: 1 Important: true } 2025-06-25T14:58:55.241325Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:0:Initializer] Start initializing step TInitConfigStep 2025-06-25T14:58:55.241742Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:0:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-25T14:58:55.242126Z node 1 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [1:216:2218] 2025-06-25T14:58:55.243122Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:55: [topic:0:Initializer] Initializing completed. 2025-06-25T14:58:55.243195Z node 1 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'topic' partition 0 generation 2 [1:216:2218] 2025-06-25T14:58:55.243266Z node 1 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037927937, Partition: 0, State: StateInit] SYNC INIT topic topic partitition 0 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-25T14:58:55.244066Z node 1 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Process pending events. Count 0 2025-06-25T14:58:55.244180Z node 1 :PERSQUEUE DEBUG: partition.cpp:3232: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'topic' partition 0 user user reinit request with generation 1 2025-06-25T14:58:55.244236Z node 1 :PERSQUEUE DEBUG: partition.cpp:3302: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'topic' partition 0 user user reinit with generation 1 done 2025-06-25T14:58:55.244291Z node 1 :PERSQUEUE DEBUG: partition.cpp:3232: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'topic' partition 0 user consumer reinit request with generation 1 2025-06-25T14:58:55.244333Z node 1 :PERSQUEUE DEBUG: partition.cpp:3302: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'topic' partition 0 user consumer reinit with generation 1 done 2025-06-25T14:58:55.244540Z node 1 :PERSQUEUE DEBUG: partition_read.cpp:882: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'topic' partition 0 user user readTimeStamp for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 0 2025-06-25T14:58:55.244580Z node 1 :PERSQUEUE DEBUG: partition_read.cpp:882: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'topic' partition 0 user consumer readTimeStamp for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 0 2025-06-25T14:58:55.244705Z node 1 :PERSQUEUE DEBUG: read.h:272: CacheProxy. Passthrough write request to KV 2025-06-25T14:58:55.244912Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-25T14:58:55.251606Z node 1 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-06-25T14:58:55.251821Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-25T14:58:55.252237Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037927937] server connected, pipe [1:223:2223], now have 1 active actors on pipe 2025-06-25T14:58:55.256321Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037927937] server connected, pipe [1:226:2225], now have 1 active actors on pipe 2025-06-25T14:58:55.257283Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3255: [PQ: 72057594037927937] Handle TEvPersQueue::TEvProposeTransaction SourceActor { RawX1: 179 RawX2: 4294969488 } TxId: 67890 Data { Operations { PartitionId: 0 CommitOffsetsBegin: 0 CommitOffsetsEnd: 0 Consumer: "consumer" Path: "/topic" } SendingShards: 22222 ReceivingShards: 22222 Immediate: false } 2025-06-25T14:58:55.257351Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3431: [PQ: 72057594037927937] distributed transaction 2025-06-25T14:58:55.257477Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3745: [PQ: 72057594037927937] Propose TxId 67890, WriteId (empty maybe) 2025-06-25T14:58:55.257587Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4353: [PQ: 72057594037927937] Try execute txs with state UNKNOWN 2025-06-25T14:58:55.257655Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4398: [PQ: 72057594037927937] TxId 67890, State UNKNOWN 2025-06-25T14:58:55.257701Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3979: [PQ: 72057594037927937] schedule TEvProposeTransactionResult(PREPARED) 2025-06-25T14:58:55.257744Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4288: [PQ: 72057594037927937] TxId 67890, NewState PREPARING 2025-06-25T14:58:55.257808Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3866: [PQ: 72057594037927937] write key for TxId 67890 2025-06-25T14:58:55.258101Z node 1 :PERSQUEUE DEBUG: transaction.cpp:374: [TxId: 67890] save tx TxId: 67890 State: PREPARED MinStep: 133 MaxStep: 30133 PredicatesReceived { TabletId: 22222 } PredicateRecipients: 22222 Operations { PartitionId: 0 CommitOffsetsBegin: 0 CommitOffsetsEnd: 0 Consumer: "consumer" Path: "/topic" } Kind: KIND_DATA SourceActor { RawX1: 179 RawX2: 4294969488 } Partitions { } 2025-06-25T14:58:55.258213Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3683: [PQ: 72057594037927937] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-06-25T14:58:55.261834Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:1241: [PQ: 72057594037927937] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-25T14:58:55.261915Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4353: [PQ: 72057594037927937] Try execute txs with state PREPARING 2025-06-25T14:58:55.261957Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4398: [PQ: 72057594037927937] TxId 67890, State PREPARING 2025-06-25T14:58:55.261994Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4288: [PQ: 72057594037927937] TxId 67890, NewState PREPARED 2025-06-25T14:58:55.262353Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3255: [PQ: 72057594037927937] Handle TEvPersQueue::TEvProposeTransaction SourceActor { RawX1: 179 RawX2: 4294969488 } TxId: 67891 Data { Operations { PartitionId: 0 CommitOffsetsBegin: 0 CommitOffsetsEnd: 0 Consumer: "consumer" Path: "/topic" } SendingShards: 22222 ReceivingShards: 22222 Immediate: false } 2025-06-25T14:58:55.262399Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3431: [PQ: 72057594037927937] distributed transaction 2025-06-25T14:58:55.262469Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3745: [PQ: 72057594037927937] Propose TxId 67891, WriteId (empty maybe) 2025-06-25T14:58:55.262506Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4353: [PQ: 72057594037927937] Try execute txs with state UNKNOWN 2025-06-25T14:58:55.262546Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4398: [PQ: 72057594037927937] TxId 67891, State UNKNOWN 2025-06-25T14:58:55.262589Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3979: [PQ: 72057594037927937] schedule TEvProposeTransactionResult(PREPARED) 2025-06-25T14:58:55.262627Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4288: [PQ: 72057594037927937] TxId 67891, NewState PREPARING 2025-06-25T14:58:55.262676Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3866: [PQ: 72057594037927937] write key for TxId 67891 2025-06-25T14:58:55.262821Z node 1 :PERSQUEUE DEBUG: transaction.cpp:374: [TxId: 67891] save tx TxId: 67891 State: PREPARED MinStep: 135 MaxStep: 30135 PredicatesReceived { TabletId: 22222 } PredicateRecipients: 22222 Operations { PartitionId: 0 CommitOffsetsBegin: 0 CommitOffsetsEnd: 0 Consumer: "consumer" Path: "/topic" } Kind: KIND_DATA SourceActor { RawX1: 179 RawX2: 4294969488 } Partitions { } 2025-06-25T14:58:55.262942Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3683: [PQ: 72057594037927937] Send TEvKeyValue::TEvRequest (WRITE ... tion 0 user user reinit request with generation 6 2025-06-25T14:58:58.541484Z node 6 :PERSQUEUE DEBUG: partition.cpp:3302: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'topic' partition 0 user user reinit with generation 6 done 2025-06-25T14:58:58.541646Z node 6 :PERSQUEUE DEBUG: partition_read.cpp:882: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'topic' partition 0 user user readTimeStamp for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 0 2025-06-25T14:58:58.541767Z node 6 :PERSQUEUE DEBUG: read.h:272: CacheProxy. Passthrough write request to KV 2025-06-25T14:58:58.541943Z node 6 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-25T14:58:58.544203Z node 6 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-06-25T14:58:58.544278Z node 6 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-25T14:58:58.544610Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037927937] server connected, pipe [6:195:2204], now have 1 active actors on pipe 2025-06-25T14:58:58.545194Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037927937] server connected, pipe [6:198:2206], now have 1 active actors on pipe 2025-06-25T14:58:58.545298Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:348: Handle TEvRequest topic: 'topic' requestId: 2025-06-25T14:58:58.545343Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:2812: [PQ: 72057594037927937] got client message batch for topic 'topic' partition 0 2025-06-25T14:58:58.545392Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:2729: [PQ: 72057594037927937] partition {0, {0, 3}, 100000} for WriteId {0, 3} 2025-06-25T14:58:58.545594Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3630: [PQ: 72057594037927937] send TEvSubscribeLock for WriteId {0, 3} 2025-06-25T14:58:58.545685Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3683: [PQ: 72057594037927937] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-06-25T14:58:58.547598Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:1241: [PQ: 72057594037927937] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-25T14:58:58.547960Z node 6 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:{0, {0, 3}, 100000}:Initializer] Start initializing step TInitConfigStep 2025-06-25T14:58:58.548198Z node 6 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:{0, {0, 3}, 100000}:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-25T14:58:58.548414Z node 6 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: {0, {0, 3}, 100000}, State: StateInit] bootstrapping {0, {0, 3}, 100000} [6:204:2211] 2025-06-25T14:58:58.549101Z node 6 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:{0, {0, 3}, 100000}:Initializer] Start initializing step TInitDiskStatusStep 2025-06-25T14:58:58.549961Z node 6 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:{0, {0, 3}, 100000}:Initializer] Start initializing step TInitMetaStep 2025-06-25T14:58:58.550131Z node 6 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:{0, {0, 3}, 100000}:Initializer] Start initializing step TInitInfoRangeStep 2025-06-25T14:58:58.550343Z node 6 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:{0, {0, 3}, 100000}:Initializer] Start initializing step TInitDataRangeStep 2025-06-25T14:58:58.550481Z node 6 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:{0, {0, 3}, 100000}:Initializer] Start initializing step TInitDataStep 2025-06-25T14:58:58.550511Z node 6 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:{0, {0, 3}, 100000}:Initializer] Start initializing step TInitEndWriteTimestampStep 2025-06-25T14:58:58.550545Z node 6 :PERSQUEUE INFO: partition_init.cpp:895: [topic:{0, {0, 3}, 100000}:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-25T14:58:58.550574Z node 6 :PERSQUEUE DEBUG: partition_init.cpp:55: [topic:{0, {0, 3}, 100000}:Initializer] Initializing completed. 2025-06-25T14:58:58.550616Z node 6 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: {0, {0, 3}, 100000}, State: StateInit] init complete for topic 'topic' partition {0, {0, 3}, 100000} generation 2 [6:204:2211] 2025-06-25T14:58:58.550660Z node 6 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037927937, Partition: {0, {0, 3}, 100000}, State: StateInit] SYNC INIT topic topic partitition {0, {0, 3}, 100000} so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-25T14:58:58.550699Z node 6 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037927937, Partition: {0, {0, 3}, 100000}, State: StateIdle] Process pending events. Count 0 2025-06-25T14:58:58.550891Z node 6 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: {0, {0, 3}, 100000}, State: StateIdle] no data for compaction 2025-06-25T14:58:58.551042Z node 6 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie -=[ 0wn3r ]=-|c675e17a-11c3d543-f9f48cc4-d464ecf9_0 generated for partition {0, {0, 3}, 100000} topic 'topic' owner -=[ 0wn3r ]=- 2025-06-25T14:58:58.551148Z node 6 :PERSQUEUE DEBUG: partition_write.cpp:34: [PQ: 72057594037927937, Partition: {0, {0, 3}, 100000}, State: StateIdle] TPartition::ReplyOwnerOk. Partition: {0, {0, 3}, 100000} 2025-06-25T14:58:58.551232Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'topic' partition: 0 messageNo: 0 requestId: cookie: 4 2025-06-25T14:58:58.551473Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72057594037927937] server disconnected, pipe [6:198:2206] destroyed 2025-06-25T14:58:58.551518Z node 6 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72057594037927937, Partition: {0, {0, 3}, 100000}, State: StateIdle] TPartition::DropOwner. 2025-06-25T14:58:58.551677Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037927937] server connected, pipe [6:216:2218], now have 1 active actors on pipe 2025-06-25T14:58:58.551929Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:348: Handle TEvRequest topic: 'topic' requestId: 2025-06-25T14:58:58.551961Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:2812: [PQ: 72057594037927937] got client message batch for topic 'topic' partition 0 2025-06-25T14:58:58.552001Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:2729: [PQ: 72057594037927937] partition {0, {0, 0}, 100001} for WriteId {0, 0} 2025-06-25T14:58:58.552158Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3683: [PQ: 72057594037927937] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-06-25T14:58:58.554017Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:1241: [PQ: 72057594037927937] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-25T14:58:58.554632Z node 6 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:{0, {0, 0}, 100001}:Initializer] Start initializing step TInitConfigStep 2025-06-25T14:58:58.554928Z node 6 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:{0, {0, 0}, 100001}:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-25T14:58:58.555161Z node 6 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: {0, {0, 0}, 100001}, State: StateInit] bootstrapping {0, {0, 0}, 100001} [6:223:2223] 2025-06-25T14:58:58.555968Z node 6 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:{0, {0, 0}, 100001}:Initializer] Start initializing step TInitDiskStatusStep 2025-06-25T14:58:58.556934Z node 6 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:{0, {0, 0}, 100001}:Initializer] Start initializing step TInitMetaStep 2025-06-25T14:58:58.557167Z node 6 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:{0, {0, 0}, 100001}:Initializer] Start initializing step TInitInfoRangeStep 2025-06-25T14:58:58.557472Z node 6 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:{0, {0, 0}, 100001}:Initializer] Start initializing step TInitDataRangeStep 2025-06-25T14:58:58.557675Z node 6 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:{0, {0, 0}, 100001}:Initializer] Start initializing step TInitDataStep 2025-06-25T14:58:58.557721Z node 6 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:{0, {0, 0}, 100001}:Initializer] Start initializing step TInitEndWriteTimestampStep 2025-06-25T14:58:58.557767Z node 6 :PERSQUEUE INFO: partition_init.cpp:895: [topic:{0, {0, 0}, 100001}:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-25T14:58:58.557813Z node 6 :PERSQUEUE DEBUG: partition_init.cpp:55: [topic:{0, {0, 0}, 100001}:Initializer] Initializing completed. 2025-06-25T14:58:58.557865Z node 6 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: {0, {0, 0}, 100001}, State: StateInit] init complete for topic 'topic' partition {0, {0, 0}, 100001} generation 2 [6:223:2223] 2025-06-25T14:58:58.557928Z node 6 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037927937, Partition: {0, {0, 0}, 100001}, State: StateInit] SYNC INIT topic topic partitition {0, {0, 0}, 100001} so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-25T14:58:58.557980Z node 6 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037927937, Partition: {0, {0, 0}, 100001}, State: StateIdle] Process pending events. Count 0 2025-06-25T14:58:58.558212Z node 6 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: {0, {0, 0}, 100001}, State: StateIdle] no data for compaction 2025-06-25T14:58:58.558296Z node 6 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie -=[ 0wn3r ]=-|7f92660-8b4f6dbd-4712994c-935e15d4_0 generated for partition {0, {0, 0}, 100001} topic 'topic' owner -=[ 0wn3r ]=- 2025-06-25T14:58:58.558402Z node 6 :PERSQUEUE DEBUG: partition_write.cpp:34: [PQ: 72057594037927937, Partition: {0, {0, 0}, 100001}, State: StateIdle] TPartition::ReplyOwnerOk. Partition: {0, {0, 0}, 100001} 2025-06-25T14:58:58.558592Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'topic' partition: 0 messageNo: 0 requestId: cookie: 4 2025-06-25T14:58:58.559190Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:5262: [PQ: 72057594037927937] send TEvPQ::TEvDeletePartition to partition {0, {0, 0}, 100001} 2025-06-25T14:58:58.559302Z node 6 :PERSQUEUE DEBUG: partition.cpp:3863: [PQ: 72057594037927937, Partition: {0, {0, 0}, 100001}, State: StateIdle] Handle TEvPQ::TEvDeletePartition 2025-06-25T14:58:58.559858Z node 6 :PERSQUEUE DEBUG: read.h:272: CacheProxy. Passthrough write request to KV 2025-06-25T14:58:58.559907Z node 6 :PERSQUEUE DEBUG: read.h:348: CacheProxy. Delete blobs from D0000100001(+) to D0000100002(-) 2025-06-25T14:58:58.562463Z node 6 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: {0, {0, 0}, 100001}, State: StateIdle] no data for compaction 2025-06-25T14:58:58.562542Z node 6 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: {0, {0, 0}, 100001}, State: StateIdle] no data for compaction 2025-06-25T14:58:58.562900Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:1382: [PQ: 72057594037927937] Topic 'topic' counters. CacheSize 0 CachedBlobs 0 2025-06-25T14:58:58.563099Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:5196: [PQ: 72057594037927937] Handle TEvPQ::TEvDeletePartitionDone {0, {0, 0}, 100001} 2025-06-25T14:58:58.563183Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4647: [PQ: 72057594037927937] delete WriteId {0, 0} 2025-06-25T14:58:58.563261Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3683: [PQ: 72057594037927937] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-06-25T14:58:58.565706Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:1241: [PQ: 72057594037927937] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-25T14:58:58.742307Z node 6 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-25T14:58:58.752841Z node 6 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: {0, {0, 3}, 100000}, State: StateIdle] no data for compaction >> KikimrIcGateway::TestLoadServiceAccountSecretValueFromExternalDataSourceMetadata [GOOD] >> KikimrIcGateway::TestLoadMdbBasicSecretValueFromExternalDataSourceMetadata >> TPartitionTests::ReserveSubDomainOutOfSpace [GOOD] >> TPartitionTests::ShadowPartitionCounters >> KikimrIcGateway::TestLoadTokenSecretValueFromExternalDataSourceMetadata [GOOD] >> KikimrIcGateway::TestSecretsExistingValidation ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TLocksTest::Range_BrokenLock3 [GOOD] Test command err: 2025-06-25T14:58:19.316518Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901471159709224:2233];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:19.317036Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/002072/r3tmp/tmp94iBTy/pdisk_1.dat 2025-06-25T14:58:19.679453Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519901471159709003:2080] 1750863499273462 != 1750863499273465 2025-06-25T14:58:19.690501Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:19.717509Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:19.717724Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:19.725537Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:23170 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:19.981682Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-25T14:58:19.994337Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:58:20.009519Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:20.129286Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:20.202849Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:20.310544Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:58:22.507881Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519901482392119542:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:22.510055Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/002072/r3tmp/tmp3JEoCQ/pdisk_1.dat 2025-06-25T14:58:22.651079Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:22.653846Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519901482392119523:2080] 1750863502499580 != 1750863502499583 2025-06-25T14:58:22.697419Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:22.697513Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:22.698441Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:7067 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:22.902751Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:22.908486Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:58:22.915582Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-25T14:58:22.923486Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:22.999792Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:23.035768Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:25.759408Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519901497127971780:2142];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:25.763646Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/002072/r3tmp/tmpqCTwxq/pdisk_1.dat 2025-06-25T14:58:25.888150Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519901497127971676:2080] 1750863505739924 != 1750863505739927 2025-06-25T14:58:25.888432Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:25.920651Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:25.920721Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:25.927931Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:10082 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:26.109276Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alte ... ie mismatch for subscription [8:7519901580901764008:2080] 1750863525743178 != 1750863525743181 2025-06-25T14:58:46.013675Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:46.013772Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:46.015058Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:26914 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:46.340648Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:46.347539Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:58:46.364835Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:46.427861Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:46.481920Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:50.160123Z node 9 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[9:7519901604576384455:2076];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:50.160227Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/002072/r3tmp/tmpiYDwW8/pdisk_1.dat 2025-06-25T14:58:50.327449Z node 9 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:50.348265Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:50.348478Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:50.352777Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:2908 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:50.620865Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-25T14:58:50.638825Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:50.710943Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:50.783431Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:54.598096Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7519901621795929926:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:54.598178Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/002072/r3tmp/tmpAxiiHn/pdisk_1.dat 2025-06-25T14:58:54.757125Z node 10 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:54.778509Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:54.778615Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:54.781603Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:20325 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:55.066058Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:55.073994Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:58:55.082355Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-25T14:58:55.087654Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:55.158545Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:55.226361Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... >> TSchemeShardSubDomainTest::CreateForceDropSolomon >> TSchemeShardSubDomainTest::RestartAtInFly >> TSchemeShardSubDomainTest::CreateItemsInsideSubdomain >> TSchemeShardSubDomainTest::SimultaneousCreateDelete >> TSchemeShardSubDomainTest::CreateSubDomainsInSeparateDir >> TSchemeShardSubDomainTest::CreateWithoutTimeCastBuckets >> TSchemeShardSubDomainTest::Redefine >> TSchemeShardSubDomainTest::SchemeLimitsRejectsWithIndexedTables >> TSchemeShardSubDomainTest::SimultaneousDeclareAndDefine >> TSchemeShardSubDomainTest::RmDir >> TPartitionTests::ConflictingActsInSeveralBatches [GOOD] >> TSchemeShardSubDomainTest::SimultaneousDefineAndCreateTable >> TSchemeShardSubDomainTest::CreateSubDomainWithoutTabletsThenMkDir >> TSchemeShardSubDomainTest::Restart >> TLocksTest::GoodNullLock [GOOD] >> TSchemeShardSubDomainTest::SimultaneousCreateTenantTable >> TSchemeShardSubDomainTest::CreateItemsInsideSubdomainWithStoragePools >> TPartitionTests::After_TEvGetWriteInfoError_Comes_TEvTxCalcPredicateResult >> KqpPrefixedVectorIndexes::CosineDistanceWithPkPrefix-Nullable+Covered [GOOD] >> TPartitionTests::After_TEvGetWriteInfoError_Comes_TEvTxCalcPredicateResult [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TLocksTest::Range_IncorrectNullDot2 [GOOD] Test command err: 2025-06-25T14:58:20.722478Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901473160699802:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:20.722536Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/002071/r3tmp/tmpj1AmOQ/pdisk_1.dat 2025-06-25T14:58:21.100733Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:21.102483Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519901473160699774:2080] 1750863500718245 != 1750863500718248 2025-06-25T14:58:21.171338Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:21.171440Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:21.173197Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:25965 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:21.372853Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-25T14:58:21.416948Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-25T14:58:21.431791Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:21.565860Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:21.644114Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:21.725602Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:58:24.051266Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519901491007908993:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:24.051324Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/002071/r3tmp/tmpHoXMth/pdisk_1.dat 2025-06-25T14:58:24.249876Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:24.249942Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:24.253744Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:24.254619Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:62538 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:24.487847Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:24.496919Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:58:24.516423Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:24.594653Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:24.642526Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:27.316294Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519901504517506381:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:27.316727Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/002071/r3tmp/tmp244byv/pdisk_1.dat 2025-06-25T14:58:27.454279Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:27.464715Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519901504517506362:2080] 1750863507315927 != 1750863507315930 2025-06-25T14:58:27.491550Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:27.491628Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:27.496882Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:3274 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:27.668856Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:27.676267Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:58:27.688339Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at sc ... c-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:46.967187Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:46.975124Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:58:46.988075Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:58:47.070723Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:58:47.133435Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:50.793737Z node 9 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[9:7519901601913095264:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:50.793894Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/002071/r3tmp/tmphNgrV4/pdisk_1.dat 2025-06-25T14:58:50.913855Z node 9 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:50.929625Z node 9 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [9:7519901601913095245:2080] 1750863530793254 != 1750863530793257 2025-06-25T14:58:50.934664Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:50.934767Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:50.936138Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:16872 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:51.190237Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:51.196164Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:58:51.215966Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:51.287840Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:51.343686Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:55.258753Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7519901623597456825:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:55.258831Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/002071/r3tmp/tmpFJ21GC/pdisk_1.dat 2025-06-25T14:58:55.446475Z node 10 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:55.448254Z node 10 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [10:7519901623597456805:2080] 1750863535255909 != 1750863535255912 2025-06-25T14:58:55.473839Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:55.473940Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:55.477112Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:25376 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:55.747548Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:55.755095Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:58:55.768652Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-25T14:58:55.775081Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:55.867059Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:55.934079Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TObjectStorageListingTest::SchemaChecks [GOOD] Test command err: 2025-06-25T14:58:26.654280Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901500302684859:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:26.654336Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00206b/r3tmp/tmptrwajU/pdisk_1.dat 2025-06-25T14:58:26.963034Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519901500302684836:2080] 1750863506652796 != 1750863506652799 2025-06-25T14:58:26.991453Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:26.994152Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:26.994262Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:26.999177Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 28236, node 1 2025-06-25T14:58:27.104873Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:58:27.104894Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:58:27.104901Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:58:27.105008Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10313 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:27.423220Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:27.441209Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:58:27.457184Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-25T14:58:27.478482Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:27.662616Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:58:31.656400Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519901500302684859:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:31.656459Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:58:41.988519Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7382: Cannot get console configs 2025-06-25T14:58:41.988548Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:55.877321Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519901625909910030:2079];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:55.889724Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00206b/r3tmp/tmpebMNZD/pdisk_1.dat 2025-06-25T14:58:56.054174Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:56.070119Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:56.070209Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:56.072188Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 31372, node 2 2025-06-25T14:58:56.145051Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:58:56.145075Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:58:56.145084Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:58:56.145221Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16864 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:56.446302Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:56.453269Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:58:56.471502Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:56.892884Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; >> TStoragePoolsQuotasTest::QuoteNonexistentPool-IsExternalSubdomain-true >> TSchemeShardSubDomainTest::CreateDropNbs >> TAsyncIndexTests::CdcAndMergeWithReboots[TabletReboots] [GOOD] >> TPartitionTests::ConflictingSrcIdForTxWithHead [GOOD] >> TSchemeShardSubDomainTest::CreateWithoutTimeCastBuckets [GOOD] >> TSchemeShardSubDomainTest::SimultaneousDeclareAndDefine [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/unittest >> TPartitionTests::After_TEvGetWriteInfoError_Comes_TEvTxCalcPredicateResult [GOOD] Test command err: 2025-06-25T14:58:55.190483Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:58:55.190558Z node 1 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-25T14:58:55.216072Z node 1 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [1:181:2194] 2025-06-25T14:58:55.217783Z node 1 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'Root/PQ/rt3.dc1--account--topic' partition 1 generation 0 [1:181:2194] Got cmd write: CmdWrite { Key: "i0000000001" Value: "\030\000(\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001cclient-1" Value: "\010\000\020\002\030\003\"\014session-id-1(\0000\001@\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001uclient-1" Value: "\000\000\000\000\000\000\000\000\002\000\000\000\003\000\000\000session-id-1" StorageChannel: INLINE } Got cmd write: CmdWrite { Key: "i0000000001" Value: "\030\000(\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001cclient-2" Value: "\010\000\020\004\030\005\"\014session-id-2(\0000\003@\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001uclient-2" Value: "\000\000\000\000\000\000\000\000\004\000\000\000\005\000\000\000session-id-2" StorageChannel: INLINE } CmdWrite { Key: "m0000000001cclient-3" Value: "\010\000\020\006\030\007\"\014session-id-3(\0000\004@\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001uclient-3" Value: "\000\000\000\000\000\000\000\000\006\000\000\000\007\000\000\000session-id-3" StorageChannel: INLINE } Got cmd write: CmdWrite { Key: "i0000000001" Value: "\030\000(\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001cclient-1" Value: "\010\000\020\010\030\t\"\014session-id-2(\0000\001@\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000001uclient-1" Value: "\000\000\000\000\000\000\000\000\010\000\000\000\t\000\000\000session-id-2" StorageChannel: INLINE } 2025-06-25T14:58:55.895317Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:58:55.895379Z node 2 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-25T14:58:55.910618Z node 2 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [2:183:2196] 2025-06-25T14:58:55.912336Z node 2 :PERSQUEUE INFO: partition_init.cpp:911: [Root/PQ/rt3.dc1--account--topic:0:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp from keys completed. Value 2025-06-25T14:58:55.000000Z 2025-06-25T14:58:55.912408Z node 2 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'Root/PQ/rt3.dc1--account--topic' partition 0 generation 0 [2:183:2196] Got cmd write: CmdWrite { Key: "i0000000000" Value: "\030\000(\230\267\345\274\3722" StorageChannel: INLINE } CmdWrite { Key: "m0000000000cclient" Value: "\010\000\020\001\030\001\"\007session(\0000\001@\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000000uclient" Value: "\000\000\000\000\000\000\000\000\001\000\000\000\001\000\000\000session" StorageChannel: INLINE } Got cmd write: CmdWrite { Key: "i0000000000" Value: "\030\000(\230\267\345\274\3722" StorageChannel: INLINE } CmdWrite { Key: "m0000000000cclient" Value: "\010\002\020\001\030\001\"\007session(\0000\001@\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000000uclient" Value: "\002\000\000\000\000\000\000\000\001\000\000\000\001\000\000\000session" StorageChannel: INLINE } Got cmd write: CmdWrite { Key: "i0000000000" Value: "\030\000(\230\267\345\274\3722" StorageChannel: INLINE } CmdWrite { Key: "m0000000000cclient" Value: "\010\004\020\001\030\001\"\007session(\0000\001@\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000000uclient" Value: "\004\000\000\000\000\000\000\000\001\000\000\000\001\000\000\000session" StorageChannel: INLINE } 2025-06-25T14:58:56.727009Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:58:56.727087Z node 3 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-25T14:58:56.742157Z node 3 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 3, State: StateInit] bootstrapping 3 [3:183:2196] 2025-06-25T14:58:56.744553Z node 3 :PERSQUEUE INFO: partition_init.cpp:911: [Root/PQ/rt3.dc1--account--topic:3:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp from keys completed. Value 2025-06-25T14:58:56.000000Z 2025-06-25T14:58:56.744615Z node 3 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 3, State: StateInit] init complete for topic 'Root/PQ/rt3.dc1--account--topic' partition 3 generation 0 [3:183:2196] Send change config Wait cmd write (initial) Got cmd write: CmdWrite { Key: "i0000000003" Value: "\030\000(\200\277\345\274\3722" StorageChannel: INLINE } CmdWrite { Key: "I0000000003" Value: "\010\271`\020\262\222\004" StorageChannel: INLINE } CmdWrite { Key: "m0000000003cclient-1" Value: "\010\002\020\000\030\000\"\tsession-1(\0000\000@\001" StorageChannel: INLINE } CmdWrite { Key: "m0000000003uclient-1" Value: "\002\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000session-1" StorageChannel: INLINE } Wait commit 1 done Wait cmd write (change config) Got cmd write: CmdDeleteRange { Range { From: "m0000000003cclient-2" IncludeFrom: true To: "m0000000003cclient-2" IncludeTo: true } } CmdDeleteRange { Range { From: "m0000000003uclient-2" IncludeFrom: true To: "m0000000003uclient-2" IncludeTo: true } } CmdWrite { Key: "i0000000003" Value: "\030\000(\200\277\345\274\3722" StorageChannel: INLINE } CmdWrite { Key: "m0000000003cclient-1" Value: "\010\002\020\000\030\000\"\tsession-1(\0000\000@\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000003uclient-1" Value: "\002\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000session-1" StorageChannel: INLINE } CmdWrite { Key: "m0000000003cclient-3" Value: "\010\000\020\000\030\000\"\000(\0000\007@\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000003uclient-3" Value: "\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000" StorageChannel: INLINE } CmdWrite { Key: "_config_3" Value: "\022\t\030\200\243\0058\200\200\200\005\030\000\"\027rt3.dc1--account--topic(\0020\001\272\001 /Root/PQ/rt3.dc1--account--topic\352\001\000\372\001\002\010\000\212\002\007account\220\002\001\242\002\002\010\000\252\002\016\n\010client-1@\000H\000\252\002\016\n\010client-3@\007H\000" StorageChannel: INLINE } Wait config changed 2025-06-25T14:58:57.604372Z node 4 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:58:57.604443Z node 4 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-25T14:58:57.619098Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitConfigStep Got KV request 2025-06-25T14:58:57.619344Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-25T14:58:57.619606Z node 4 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [4:182:2195] 2025-06-25T14:58:57.620682Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitDiskStatusStep Got KV request 2025-06-25T14:58:57.620876Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitMetaStep Got KV request 2025-06-25T14:58:57.621049Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitInfoRangeStep Got KV request 2025-06-25T14:58:57.621247Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitDataRangeStep Got KV request 2025-06-25T14:58:57.621520Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:524: key[0]: d0000000000_00000000000000000000_00000_0000000001_00000 2025-06-25T14:58:57.621714Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:624: [Root/PQ/rt3.dc1--account--topic:0:TInitDataRangeStep] Got data offset 0 count 1 size 684 so 0 eo 1 d0000000000_00000000000000000000_00000_0000000001_00000 2025-06-25T14:58:57.621857Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitDataStep 2025-06-25T14:58:57.621895Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitEndWriteTimestampStep 2025-06-25T14:58:57.621949Z node 4 :PERSQUEUE INFO: partition_init.cpp:911: [Root/PQ/rt3.dc1--account--topic:0:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp from keys completed. Value 2025-06-25T14:58:57.000000Z 2025-06-25T14:58:57.622032Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:55: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Initializing completed. 2025-06-25T14:58:57.622087Z node 4 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'Root/PQ/rt3.dc1--account--topic' partition 0 generation 0 [4:182:2195] 2025-06-25T14:58:57.622141Z node 4 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037927937, Partition: 0, State: StateInit] SYNC INIT topic Root/PQ/rt3.dc1--account--topic partitition 0 so 1 endOffset 1 Head Offset 1 PartNo 0 PackedSize 0 count 0 nextOffset 1 batches 0 2025-06-25T14:58:57.622192Z node 4 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Process pending events. Count 0 2025-06-25T14:58:57.622450Z node 4 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-25T14:58:57.977805Z node 4 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie src1|6b84ea22-31e5362c-c40bff68-3ecd86b6_0 generated for partition 0 topic 'Root/PQ/rt3.dc1--account--topic' owner src1 2025-06-25T14:58:57.977968Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:34: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::ReplyOwnerOk. Partition: 0 Got batch complete: 1 2025-06-25T14:58:57.978280Z node 4 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie src4|f282dcf8-a7c2d7f1-f7e92d33-49877388_0 generated for partition 0 topic 'Root/PQ/rt3.dc1--account--topic' owner src4 2025-06-25T14:58:57.978360Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:34: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::ReplyOwnerOk. Partition: 0 Got batch complete: 1 2025-06-25T14:58:58.984125Z node 4 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction Create distr tx with id = 0 and act no: 1 Create distr tx with id = 2 and act no: 3 Create distr tx with id = 4 and act no: 5 Create distr tx with id = 8 and act no: 9 Create immediate tx with id = 11 and act no: 12 2025-06-25T14:58:58.984635Z node 4 :PERSQUEUE DEBUG: partition.cpp:1170: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCalcPredicate Step 1, TxId 0 2025-06-25T14:58:58.984720Z node 4 :PERSQUEUE DEBUG: partition.cpp:1170: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCalcPredicate Step 1, TxId 2 ... 8Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Answering for message sourceid: 'src1', Topic: 'Root/PQ/rt3.dc1--account--topic', Partition: 0, SeqNo: 9, partNo: 0, Offset: 62 is stored on disk 2025-06-25T14:59:00.747848Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-25T14:59:00.747882Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Answering for message sourceid: 'src1', Topic: 'Root/PQ/rt3.dc1--account--topic', Partition: 0, SeqNo: 10, partNo: 0, Offset: 63 is stored on disk 2025-06-25T14:59:00.747920Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-25T14:59:00.747956Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Answering for message sourceid: 'src1', Topic: 'Root/PQ/rt3.dc1--account--topic', Partition: 0, SeqNo: 11, partNo: 0, Offset: 64 is stored on disk 2025-06-25T14:59:00.747984Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-25T14:59:00.748015Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Answering for message sourceid: 'src1', Topic: 'Root/PQ/rt3.dc1--account--topic', Partition: 0, SeqNo: 12, partNo: 0, Offset: 65 is stored on disk 2025-06-25T14:59:00.748042Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-25T14:59:00.748077Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Answering for message sourceid: 'src4', Topic: 'Root/PQ/rt3.dc1--account--topic', Partition: 0, SeqNo: 1, partNo: 0, Offset: 70 is stored on disk 2025-06-25T14:59:00.748102Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-25T14:59:00.748137Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Answering for message sourceid: 'src4', Topic: 'Root/PQ/rt3.dc1--account--topic', Partition: 0, SeqNo: 2, partNo: 0, Offset: 71 is stored on disk 2025-06-25T14:59:00.748444Z node 4 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72057594037927937, Partition: 0, State: StateIdle] need more data for compaction. cumulativeSize=332, count=2, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 Got batch complete: 1 Wait tx committed for tx 4 Wait batch completion Wait batch completion 2025-06-25T14:59:00.748656Z node 4 :PERSQUEUE DEBUG: partition.cpp:1216: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCommit Step 1, TxId 8 2025-06-25T14:59:00.748705Z node 4 :PERSQUEUE DEBUG: partition.cpp:2502: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::CommitWriteOperations TxId: 8 2025-06-25T14:59:00.748764Z node 4 :PERSQUEUE DEBUG: partition.cpp:2528: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Head=Offset 72 PartNo 0 PackedSize 0 count 0 nextOffset 72 batches 0, NewHead=Offset 72 PartNo 0 PackedSize 0 count 0 nextOffset 72 batches 0 2025-06-25T14:59:00.748914Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:1364: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 part blob processing sourceId 'src4' seqNo 7 partNo 0 2025-06-25T14:59:00.749627Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:1468: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 part blob complete sourceId 'src4' seqNo 7 partNo 0 FormedBlobsCount 0 NewHead: Offset 100 PartNo 0 PackedSize 84 count 1 nextOffset 101 batches 1 2025-06-25T14:59:00.749706Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:1364: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 part blob processing sourceId 'src4' seqNo 8 partNo 0 2025-06-25T14:59:00.749750Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:1468: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 part blob complete sourceId 'src4' seqNo 8 partNo 0 FormedBlobsCount 0 NewHead: Offset 100 PartNo 0 PackedSize 136 count 2 nextOffset 102 batches 1 2025-06-25T14:59:00.749784Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:1364: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 part blob processing sourceId 'src4' seqNo 9 partNo 0 2025-06-25T14:59:00.749819Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:1468: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 part blob complete sourceId 'src4' seqNo 9 partNo 0 FormedBlobsCount 0 NewHead: Offset 100 PartNo 0 PackedSize 188 count 3 nextOffset 103 batches 1 2025-06-25T14:59:00.749856Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:1364: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 part blob processing sourceId 'src4' seqNo 10 partNo 0 2025-06-25T14:59:00.749902Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:1468: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 part blob complete sourceId 'src4' seqNo 10 partNo 0 FormedBlobsCount 0 NewHead: Offset 100 PartNo 0 PackedSize 240 count 4 nextOffset 104 batches 1 2025-06-25T14:59:00.749942Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:1364: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 part blob processing sourceId 'src4' seqNo 11 partNo 0 2025-06-25T14:59:00.749983Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:1468: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 part blob complete sourceId 'src4' seqNo 11 partNo 0 FormedBlobsCount 0 NewHead: Offset 100 PartNo 0 PackedSize 292 count 5 nextOffset 105 batches 1 2025-06-25T14:59:00.750022Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:1364: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 part blob processing sourceId 'src4' seqNo 12 partNo 0 2025-06-25T14:59:00.750054Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:1468: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 part blob complete sourceId 'src4' seqNo 12 partNo 0 FormedBlobsCount 0 NewHead: Offset 100 PartNo 0 PackedSize 344 count 6 nextOffset 106 batches 1 2025-06-25T14:59:00.750095Z node 4 :PERSQUEUE DEBUG: partition.cpp:2502: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::CommitWriteOperations TxId: (empty maybe) 2025-06-25T14:59:00.750143Z node 4 :PERSQUEUE DEBUG: partition.cpp:2528: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Head=Offset 72 PartNo 0 PackedSize 0 count 0 nextOffset 72 batches 0, NewHead=Offset 100 PartNo 0 PackedSize 344 count 6 nextOffset 106 batches 1 2025-06-25T14:59:00.771423Z node 4 :PERSQUEUE DEBUG: partition.cpp:3403: [PQ: 72057594037927937, Partition: 0, State: StateIdle] schedule TEvPersQueue::TEvProposeTransactionResult(COMPLETE), reason= 2025-06-25T14:59:00.771967Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:1762: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Add new write blob: topic 'Root/PQ/rt3.dc1--account--topic' partition 0 compactOffset 100,6 HeadOffset 72 endOffset 72 curOffset 106 d0000000000_00000000000000000100_00000_0000000006_00000? size 211 WTime 12141 Got KV request Got batch complete: 7 Got KV request Got KV request Wait kv request Wait tx committed for tx 8 2025-06-25T14:59:00.793012Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 102 WriteNewSizeFromSupportivePartitions# 2 2025-06-25T14:59:00.793081Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-25T14:59:00.793145Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Answering for message sourceid: 'src4', Topic: 'Root/PQ/rt3.dc1--account--topic', Partition: 0, SeqNo: 7, partNo: 0, Offset: 72 is already written 2025-06-25T14:59:00.793182Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-25T14:59:00.793210Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Answering for message sourceid: 'src4', Topic: 'Root/PQ/rt3.dc1--account--topic', Partition: 0, SeqNo: 8, partNo: 0, Offset: 72 is already written 2025-06-25T14:59:00.793233Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-25T14:59:00.793263Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Answering for message sourceid: 'src4', Topic: 'Root/PQ/rt3.dc1--account--topic', Partition: 0, SeqNo: 9, partNo: 0, Offset: 72 is already written 2025-06-25T14:59:00.793284Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-25T14:59:00.793311Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Answering for message sourceid: 'src4', Topic: 'Root/PQ/rt3.dc1--account--topic', Partition: 0, SeqNo: 10, partNo: 0, Offset: 72 is already written 2025-06-25T14:59:00.793331Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-25T14:59:00.793380Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Answering for message sourceid: 'src4', Topic: 'Root/PQ/rt3.dc1--account--topic', Partition: 0, SeqNo: 11, partNo: 0, Offset: 72 is already written 2025-06-25T14:59:00.793412Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-25T14:59:00.793456Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Answering for message sourceid: 'src4', Topic: 'Root/PQ/rt3.dc1--account--topic', Partition: 0, SeqNo: 12, partNo: 0, Offset: 72 is already written 2025-06-25T14:59:00.793646Z node 4 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72057594037927937, Partition: 0, State: StateIdle] need more data for compaction. cumulativeSize=543, count=3, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 Wait immediate tx complete 11 Got propose resutl: Origin: 72057594037927937 Status: COMPLETE TxId: 11 2025-06-25T14:59:01.240615Z node 5 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:59:01.240684Z node 5 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-25T14:59:01.256054Z node 5 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [5:183:2196] 2025-06-25T14:59:01.257026Z node 5 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'Root/PQ/rt3.dc1--account--topic' partition 1 generation 0 [5:183:2196] >> TSchemeShardSubDomainTest::CreateSubDomainWithoutTabletsThenMkDir [GOOD] >> TSchemeShardSubDomainTest::RestartAtInFly [GOOD] >> TSchemeShardSubDomainTest::SimultaneousCreateDelete [GOOD] >> TSchemeShardSubDomainTest::Restart [GOOD] >> TSchemeShardSubDomainTest::Redefine [GOOD] >> TSchemeShardSubDomainTest::RmDir [GOOD] >> TPartitionTests::ConflictingCommitsInSeveralBatches >> TSchemeShardSubDomainTest::SimultaneousDefineAndCreateTable [GOOD] >> TStoragePoolsQuotasTest::QuoteNonexistentPool-IsExternalSubdomain-true [GOOD] >> TSchemeShardSubDomainTest::CopyRejects >> TSchemeShardSubDomainTest::CreateItemsInsideSubdomain [GOOD] >> TSchemeShardSubDomainTest::CreateSubDomainsInSeparateDir [GOOD] >> TSchemeShardSubDomainTest::SimultaneousCreateTenantTable [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::CreateWithoutTimeCastBuckets [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:59:01.393575Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:59:01.393660Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:01.393699Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:59:01.393736Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:59:01.394685Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:59:01.394724Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:59:01.394795Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:01.394867Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:59:01.395417Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:59:01.397009Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:59:01.476151Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:59:01.476205Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:59:01.491212Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:59:01.491510Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:59:01.491640Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:59:01.496628Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:59:01.496941Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:59:01.497479Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:01.497729Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:59:01.501115Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:01.501269Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:59:01.506405Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:01.506468Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:01.506607Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:59:01.506658Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:59:01.506698Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:59:01.506783Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.515032Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:59:01.673736Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:59:01.673951Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.674129Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:59:01.674164Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:59:01.674365Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:59:01.674456Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:59:01.676419Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:01.676614Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:59:01.676780Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.676819Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:59:01.676872Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:59:01.676910Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:59:01.678650Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.678705Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:59:01.678740Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:59:01.680403Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.680462Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.680517Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:01.680558Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:59:01.683923Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:59:01.685712Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:59:01.685966Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:59:01.686833Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:01.686964Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:01.687017Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:01.687265Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:59:01.687322Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:01.687511Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:59:01.687587Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:59:01.689651Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:01.689697Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:59:01.689857Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:01.689895Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 1, path id: 1 2025-06-25T14:59:01.690194Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.690239Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 1:0 ProgressState 2025-06-25T14:59:01.690319Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1:0 progress is 1/1 2025-06-25T14:59:01.690347Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-25T14:59:01.690387Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1:0 progress is 1/1 2025-06-25T14:59:01.690416Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-25T14:59:01.690452Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 1, ready parts: 1/1, is published: false 2025-06-25T14:59:01.690481Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-25T14:59:01.690509Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1:0 2025-06-25T14:59:01.690531Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 1:0 2025-06-25T14:59:01.690574Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-25T14:59:01.690600Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 1, publications: 1, subscribers: 0 2025-06-25T14:59:01.690623Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 1, [OwnerId: 72057594046678944, LocalPathId: 1], 3 2025-06-25T14:59:01.691923Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-06-25T14:59:01.692017Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-06-25T14:59:01.692053Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1 2025-06-25T14:59:01.692085Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 3 2025-06-25T14:59:01.692141Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:59:01.692249Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1, subscribers: 0 2025-06-25T14:59:01.695507Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1 2025-06-25T14:59:01.696009Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1, at schemeshard: 72057594046678944 TestModificationResults wait txId: 100 2025-06-25T14:59:01.696717Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:434: actor# [1:274:2263] Bootstrap 2025-06-25T14:59:01.713998Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:453: actor# [1:274:2263] Become StateWork (SchemeCache [1:279:2268]) 2025-06-25T14:59:01.716093Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateSubDomain SubDomain { PlanResolution: 50 Coordinators: 1 Mediators: 1 Name: "USER_0" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 100 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:59:01.716320Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_subdomain.cpp:92: TCreateSubDomain Propose, path: /MyRoot/USER_0, opId: 100:0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.716405Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 100:1, propose status:StatusInvalidParameter, reason: Malformed subdomain request: TimeCastBucketsPerMediator is 0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.717321Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:274:2263] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-25T14:59:01.720823Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 100, response: Status: StatusInvalidParameter Reason: "Malformed subdomain request: TimeCastBucketsPerMediator is 0" TxId: 100 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:01.721053Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 100, database: /MyRoot, subject: , status: StatusInvalidParameter, reason: Malformed subdomain request: TimeCastBucketsPerMediator is 0, operation: CREATE DATABASE, path: /MyRoot/USER_0 2025-06-25T14:59:01.721521Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 TestModificationResult got TxId: 100, wait until txId: 100 TestWaitNotification wait txId: 100 2025-06-25T14:59:01.721735Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 100: send EvNotifyTxCompletion 2025-06-25T14:59:01.721772Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 100 2025-06-25T14:59:01.722208Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 100, at schemeshard: 72057594046678944 2025-06-25T14:59:01.722297Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 100: got EvNotifyTxCompletionResult 2025-06-25T14:59:01.722334Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 100: satisfy waiter [1:289:2278] TestWaitNotification: OK eventTxId 100 2025-06-25T14:59:01.722848Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:59:01.723066Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 246us result status StatusPathDoesNotExist 2025-06-25T14:59:01.724478Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/USER_0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::SimultaneousDeclareAndDefine [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:59:01.393620Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:59:01.393714Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:01.393751Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:59:01.393787Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:59:01.394721Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:59:01.394749Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:59:01.394812Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:01.394883Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:59:01.395511Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:59:01.396979Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:59:01.465944Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:59:01.466001Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:59:01.480030Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:59:01.480374Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:59:01.480528Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:59:01.492140Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:59:01.492495Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:59:01.493234Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:01.493511Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:59:01.499259Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:01.500497Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:59:01.506409Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:01.506483Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:01.506643Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:59:01.506696Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:59:01.506748Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:59:01.506843Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.512981Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:59:01.639340Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:59:01.639583Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.639789Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:59:01.639837Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:59:01.640027Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:59:01.640094Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:59:01.642071Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:01.642235Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:59:01.642379Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.642416Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:59:01.642488Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:59:01.642522Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:59:01.643971Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.644023Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:59:01.644054Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:59:01.649161Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.649221Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.649293Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:01.649353Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:59:01.651874Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:59:01.653579Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:59:01.653756Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:59:01.655357Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:01.655505Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:01.655579Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:01.657046Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:59:01.657112Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:01.657318Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:59:01.657400Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:59:01.659444Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:01.659508Z node 1 :FLAT_TX_SCHEMESHARD ... ode 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 100:0, at tablet# 72057594046678944 2025-06-25T14:59:01.716475Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 100:0 128 -> 240 2025-06-25T14:59:01.716527Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 100:0, at tablet# 72057594046678944 2025-06-25T14:59:01.716693Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:59:01.716749Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-25T14:59:01.716810Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 100 2025-06-25T14:59:01.718497Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:01.718544Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 100, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:59:01.718697Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 100, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-25T14:59:01.718849Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:01.718887Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 100, path id: 1 2025-06-25T14:59:01.718921Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 100, path id: 2 2025-06-25T14:59:01.719197Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 100:0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.719241Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 100:0 ProgressState 2025-06-25T14:59:01.719337Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#100:0 progress is 1/1 2025-06-25T14:59:01.719393Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 100 ready parts: 1/1 2025-06-25T14:59:01.719441Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#100:0 progress is 1/1 2025-06-25T14:59:01.719488Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 100 ready parts: 1/1 2025-06-25T14:59:01.719522Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 100, ready parts: 1/1, is published: false 2025-06-25T14:59:01.719559Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 100 ready parts: 1/1 2025-06-25T14:59:01.719609Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 100:0 2025-06-25T14:59:01.719638Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 100:0 2025-06-25T14:59:01.719697Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-25T14:59:01.719731Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 100, publications: 2, subscribers: 0 2025-06-25T14:59:01.719759Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 100, [OwnerId: 72057594046678944, LocalPathId: 1], 5 2025-06-25T14:59:01.719785Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 100, [OwnerId: 72057594046678944, LocalPathId: 2], 3 2025-06-25T14:59:01.720368Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 100 2025-06-25T14:59:01.720445Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 100 2025-06-25T14:59:01.720480Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 100 2025-06-25T14:59:01.720535Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 100, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 5 2025-06-25T14:59:01.720576Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-25T14:59:01.721182Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 100 2025-06-25T14:59:01.721243Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 100 2025-06-25T14:59:01.721262Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 100 2025-06-25T14:59:01.721282Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 100, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 3 2025-06-25T14:59:01.721300Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-25T14:59:01.721341Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 100, subscribers: 0 2025-06-25T14:59:01.724624Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 100 2025-06-25T14:59:01.725170Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 100 TestModificationResult got TxId: 100, wait until txId: 100 TestModificationResults wait txId: 101 TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 100 2025-06-25T14:59:01.725521Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 100: send EvNotifyTxCompletion 2025-06-25T14:59:01.725583Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 100 TestWaitNotification wait txId: 101 2025-06-25T14:59:01.725679Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2025-06-25T14:59:01.725700Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 2025-06-25T14:59:01.726140Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 100, at schemeshard: 72057594046678944 2025-06-25T14:59:01.726245Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 100: got EvNotifyTxCompletionResult 2025-06-25T14:59:01.726285Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 100: satisfy waiter [1:318:2307] 2025-06-25T14:59:01.726410Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-06-25T14:59:01.726507Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-25T14:59:01.726528Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:318:2307] TestWaitNotification: OK eventTxId 100 TestWaitNotification: OK eventTxId 101 2025-06-25T14:59:01.726958Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:59:01.727132Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 172us result status StatusSuccess 2025-06-25T14:59:01.727617Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0" PathDescription { Self { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 0 TimeCastBucketsPerMediator: 0 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TLocksTest::GoodNullLock [GOOD] Test command err: 2025-06-25T14:58:22.300270Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901481753357676:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:22.312213Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00206e/r3tmp/tmpqErH93/pdisk_1.dat 2025-06-25T14:58:22.727006Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:22.730211Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519901481753357646:2080] 1750863502263960 != 1750863502263963 2025-06-25T14:58:22.784997Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:22.785108Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:22.789470Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:17434 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:23.057362Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... waiting... 2025-06-25T14:58:23.088795Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:58:23.210247Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:23.294012Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:23.322265Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:58:25.413733Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519901495812635694:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:25.413758Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00206e/r3tmp/tmpVthN9T/pdisk_1.dat 2025-06-25T14:58:25.629420Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:25.632387Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519901495812635674:2080] 1750863505408046 != 1750863505408049 2025-06-25T14:58:25.650520Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:25.650596Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:25.653506Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:12659 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:25.840014Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-25T14:58:25.860253Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:25.931341Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:26.002449Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:28.720268Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519901508420300625:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:28.720348Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00206e/r3tmp/tmp4vz0Z7/pdisk_1.dat 2025-06-25T14:58:28.891829Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:28.896127Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519901508420300607:2080] 1750863508719364 != 1750863508719367 2025-06-25T14:58:28.917323Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:28.917410Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:28.919047Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:12660 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:29.093181Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:29.108814Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:58:29.120743Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-25T14:58:29.127953Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is u ... node 8 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:48.126119Z node 8 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [8:7519901591581654417:2080] 1750863527976056 != 1750863527976059 2025-06-25T14:58:48.159786Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:48.159856Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:48.161238Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:61274 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. waiting... 2025-06-25T14:58:48.461993Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:48.481585Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-25T14:58:48.486868Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:58:48.569094Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:58:48.643140Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:52.212813Z node 9 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[9:7519901612597246446:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:52.212877Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00206e/r3tmp/tmpcvuhHg/pdisk_1.dat 2025-06-25T14:58:52.339687Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:52.339790Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:52.341735Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:58:52.341942Z node 9 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TClient is connected to server localhost:25657 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:52.599631Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-25T14:58:52.622100Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:58:52.679180Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:58:52.776027Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:56.828673Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7519901631116266428:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:56.828731Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00206e/r3tmp/tmpQH3AXF/pdisk_1.dat 2025-06-25T14:58:57.018858Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:57.018959Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:57.020865Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:58:57.045831Z node 10 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TClient is connected to server localhost:65380 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:57.319379Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-25T14:58:57.336643Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-25T14:58:57.341941Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:57.399719Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:57.479115Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... >> TSchemeShardSubDomainTest::SchemeLimitsRejectsWithIndexedTables [GOOD] >> TSchemeShardSubDomainTest::CreateItemsInsideSubdomainWithStoragePools [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpPrefixedVectorIndexes::CosineDistanceWithPkPrefix-Nullable+Covered [GOOD] Test command err: Trying to start YDB, gRPC: 64408, MsgBus: 64081 2025-06-25T14:57:08.616908Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901165719089051:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:57:08.634540Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001bd4/r3tmp/tmpAEPw6L/pdisk_1.dat 2025-06-25T14:57:09.150743Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:57:09.150840Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:57:09.154282Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:57:09.189592Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:57:09.190364Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519901165719089015:2080] 1750863428596541 != 1750863428596544 TServer::EnableGrpc on GrpcPort 64408, node 1 2025-06-25T14:57:09.325036Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:57:09.325060Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:57:09.325068Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:57:09.325192Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:64081 2025-06-25T14:57:09.634625Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:64081 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:57:09.957172Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:57:09.970240Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:57:09.986931Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:10.133988Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:10.307935Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:10.393908Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:12.121385Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901182898959833:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:12.121506Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:12.401873Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:12.440427Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:12.510415Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:12.551677Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:12.645523Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:12.694954Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:12.728504Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:12.832598Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901182898960500:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:12.832696Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:12.836473Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901182898960505:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:12.840628Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:57:12.852473Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519901182898960507:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:57:12.936836Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519901182898960558:3423] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:57:13.611151Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519901165719089051:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:57:13.611202Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:57:13.967606Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877761, Sender [1:7519901187193928128:3598], Recipient [1:7519901165719089333:2147]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:57:13.967648Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:50 ... event TEvDataShard::TEvPeriodicTableStats 2025-06-25T14:58:56.685152Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037927 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 19] state 'Ready' dataSize 842 rowCount 18 cpuUsage 0.059 2025-06-25T14:58:56.685236Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:570: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037927 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 19] raw table stats: DataSize: 842 RowCount: 18 IndexSize: 0 InMemSize: 0 LastAccessTime: 1750863534691 LastUpdateTime: 1750863507033 ImmediateTxCompleted: 0 PlannedTxCompleted: 2 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 18 RowDeletes: 0 RowReads: 0 RangeReads: 30 PartCount: 1 RangeReadRows: 60 SearchHeight: 1 LastFullCompactionTs: 0 HasLoanedParts: false Channels { Channel: 1 DataSize: 842 IndexSize: 0 } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 2025-06-25T14:58:56.723943Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269553162, Sender [3:7519901499241552368:2503], Recipient [3:7519901473471745995:2143]: NKikimrTxDataShard.TEvPeriodicTableStats DatashardId: 72075186224037926 TableLocalId: 21 Generation: 1 Round: 2 TableStats { DataSize: 107 RowCount: 3 IndexSize: 0 InMemSize: 0 LastAccessTime: 1750863534686 LastUpdateTime: 1750863506923 ImmediateTxCompleted: 0 PlannedTxCompleted: 2 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 3 RowDeletes: 0 RowReads: 0 RangeReads: 10 PartCount: 1 RangeReadRows: 10 SearchHeight: 1 LastFullCompactionTs: 0 HasLoanedParts: false Channels { Channel: 1 DataSize: 107 IndexSize: 0 } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 547 Memory: 119579 Storage: 228 } ShardState: 2 UserTablePartOwners: 72075186224037926 NodeId: 3 StartTime: 1750863506638 TableOwnerId: 72057594046644480 FollowerId: 0 2025-06-25T14:58:56.723995Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4992: StateWork, processing event TEvDataShard::TEvPeriodicTableStats 2025-06-25T14:58:56.724031Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037926 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 21] state 'Ready' dataSize 107 rowCount 3 cpuUsage 0.0547 2025-06-25T14:58:56.724148Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:570: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037926 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 21] raw table stats: DataSize: 107 RowCount: 3 IndexSize: 0 InMemSize: 0 LastAccessTime: 1750863534686 LastUpdateTime: 1750863506923 ImmediateTxCompleted: 0 PlannedTxCompleted: 2 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 3 RowDeletes: 0 RowReads: 0 RangeReads: 10 PartCount: 1 RangeReadRows: 10 SearchHeight: 1 LastFullCompactionTs: 0 HasLoanedParts: false Channels { Channel: 1 DataSize: 107 IndexSize: 0 } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 2025-06-25T14:58:56.788594Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [3:7519901473471745995:2143]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-06-25T14:58:56.788648Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5131: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-06-25T14:58:56.788673Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046644480, queue size# 3 2025-06-25T14:58:56.788729Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:601: Will execute TTxStoreStats, queue# 3 2025-06-25T14:58:56.788749Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:610: Will delay TTxStoreTableStats on# 0.000000s, queue# 3 2025-06-25T14:58:56.788816Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 20 shard idx 72057594046644480:39 data size 1820 row count 30 2025-06-25T14:58:56.788880Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037925 maps to shardIdx: 72057594046644480:39 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 20], pathId map=indexImplPostingTable, is column=0, is olap=0, RowCount 30, DataSize 1820 2025-06-25T14:58:56.788896Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037925, followerId 0 2025-06-25T14:58:56.788970Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:39 with partCount# 1, rowCount# 30, searchHeight# 1, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-25T14:58:56.789024Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037925 2025-06-25T14:58:56.789066Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 19 shard idx 72057594046644480:38 data size 842 row count 18 2025-06-25T14:58:56.789104Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037927 maps to shardIdx: 72057594046644480:38 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 19], pathId map=indexImplLevelTable, is column=0, is olap=0, RowCount 18, DataSize 842 2025-06-25T14:58:56.789113Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037927, followerId 0 2025-06-25T14:58:56.789151Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:38 with partCount# 1, rowCount# 18, searchHeight# 1, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-25T14:58:56.789169Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037927 2025-06-25T14:58:56.789190Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 21 shard idx 72057594046644480:40 data size 107 row count 3 2025-06-25T14:58:56.789225Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037926 maps to shardIdx: 72057594046644480:40 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 21], pathId map=indexImplPrefixTable, is column=0, is olap=0, RowCount 3, DataSize 107 2025-06-25T14:58:56.789234Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037926, followerId 0 2025-06-25T14:58:56.789272Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:40 with partCount# 1, rowCount# 3, searchHeight# 1, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-25T14:58:56.789290Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037926 2025-06-25T14:58:56.789348Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:58:56.792573Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [3:7519901473471745995:2143]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-06-25T14:58:56.792616Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5131: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-06-25T14:58:56.792630Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046644480, queue size# 0 2025-06-25T14:58:57.259516Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:7519901473471745995:2143]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:58:57.259572Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:58:57.259626Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [3:7519901473471745995:2143], Recipient [3:7519901473471745995:2143]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:58:57.259646Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:58:58.259972Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:7519901473471745995:2143]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:58:58.260022Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:58:58.260081Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [3:7519901473471745995:2143], Recipient [3:7519901473471745995:2143]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:58:58.260103Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:58:59.260440Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:7519901473471745995:2143]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:58:59.260484Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:58:59.260539Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [3:7519901473471745995:2143], Recipient [3:7519901473471745995:2143]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:58:59.260558Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:59:00.260834Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:7519901473471745995:2143]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:59:00.260889Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:59:00.260957Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [3:7519901473471745995:2143], Recipient [3:7519901473471745995:2143]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:59:00.260979Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime >> TSchemeShardSubDomainTest::CreateDropNbs [GOOD] >> TSchemeShardSubDomainTest::CreateForceDropSolomon [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::SimultaneousDefineAndCreateTable [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:59:01.393617Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:59:01.393698Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:01.393734Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:59:01.393775Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:59:01.394698Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:59:01.394734Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:59:01.394795Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:01.394870Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:59:01.395584Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:59:01.397020Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:59:01.476633Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:59:01.476684Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:59:01.491731Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:59:01.492068Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:59:01.492196Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:59:01.497263Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:59:01.497580Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:59:01.498157Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:01.498394Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:59:01.501546Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:01.501688Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:59:01.506368Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:01.506449Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:01.506584Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:59:01.506632Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:59:01.506690Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:59:01.506771Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.512996Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:59:01.642585Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:59:01.642801Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.642963Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:59:01.643005Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:59:01.643227Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:59:01.643297Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:59:01.645424Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:01.645593Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:59:01.645741Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.645782Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:59:01.645848Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:59:01.645890Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:59:01.647771Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.647825Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:59:01.647860Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:59:01.649575Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.649629Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.649689Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:01.649737Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:59:01.653133Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:59:01.654840Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:59:01.655063Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:59:01.655842Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:01.655957Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:01.656003Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:01.657056Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:59:01.657113Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:01.657264Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:59:01.657335Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:59:01.659477Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:01.659517Z node 1 :FLAT_TX_SCHEMESHARD ... 7594046678944, message: Source { RawX1: 505 RawX2: 4294969753 } Origin: 72075186233409549 State: 2 TxId: 102 Step: 0 Generation: 2 2025-06-25T14:59:01.969793Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1047: NTableState::TProposedWaitParts operationId# 102:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 2025-06-25T14:59:01.969852Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1051: NTableState::TProposedWaitParts operationId# 102:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 505 RawX2: 4294969753 } Origin: 72075186233409549 State: 2 TxId: 102 Step: 0 Generation: 2 2025-06-25T14:59:01.969900Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:670: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 102:0, shardIdx: 72057594046678944:4, shard: 72075186233409549, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:01.969944Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:707: all shard schema changes has been received, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.969998Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:719: send schema changes ack message, operation: 102:0, datashard: 72075186233409549, at schemeshard: 72057594046678944 2025-06-25T14:59:01.970032Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 102:0 129 -> 240 2025-06-25T14:59:01.970462Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 3 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T14:59:01.970526Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 3 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T14:59:01.970551Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 102 2025-06-25T14:59:01.970576Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 3 2025-06-25T14:59:01.970602Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-06-25T14:59:01.970669Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 102, ready parts: 0/1, is published: true 2025-06-25T14:59:01.973046Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.973956Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-25T14:59:01.974069Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.974324Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.974380Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 102:0 ProgressState 2025-06-25T14:59:01.974469Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:0 progress is 1/1 2025-06-25T14:59:01.974510Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-25T14:59:01.974547Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:0 progress is 1/1 2025-06-25T14:59:01.974576Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-25T14:59:01.974610Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: true 2025-06-25T14:59:01.974660Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1656: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:315:2304] message: TxId: 102 2025-06-25T14:59:01.974706Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-25T14:59:01.974739Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 102:0 2025-06-25T14:59:01.974764Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 102:0 2025-06-25T14:59:01.974857Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-25T14:59:01.975157Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-25T14:59:01.976358Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-25T14:59:01.976391Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:533:2477] TestWaitNotification: OK eventTxId 102 2025-06-25T14:59:01.976852Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:59:01.977019Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 145us result status StatusSuccess 2025-06-25T14:59:01.977407Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0" PathDescription { Self { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 2 SecurityStateVersion: 0 } } Children { Name: "table_0" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 102 CreateStep: 130 ParentPathId: 2 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 2 PlanResolution: 10 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 Mediators: 72075186233409548 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 1 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:01.977898Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0/table_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:59:01.978035Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0/table_0" took 147us result status StatusSuccess 2025-06-25T14:59:01.978351Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0/table_0" PathDescription { Self { Name: "table_0" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 102 CreateStep: 130 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "table_0" Columns { Name: "RowId" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "RowId" KeyColumnIds: 1 TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 2 PlanResolution: 10 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 Mediators: 72075186233409548 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::CreateSubDomainWithoutTabletsThenMkDir [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:59:01.393542Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:59:01.393643Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:01.393687Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:59:01.393731Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:59:01.394742Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:59:01.394797Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:59:01.394875Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:01.394966Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:59:01.395699Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:59:01.397204Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:59:01.480525Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:59:01.480590Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:59:01.497113Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:59:01.497472Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:59:01.497622Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:59:01.502944Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:59:01.503219Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:59:01.503685Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:01.503848Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:59:01.506751Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:01.506933Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:59:01.508070Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:01.508128Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:01.508267Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:59:01.508352Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:59:01.508402Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:59:01.508478Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.514900Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:59:01.681653Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:59:01.681861Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.682033Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:59:01.682088Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:59:01.682349Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:59:01.682427Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:59:01.684461Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:01.684639Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:59:01.684804Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.684855Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:59:01.684914Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:59:01.684956Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:59:01.686761Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.686868Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:59:01.686916Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:59:01.688428Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.688486Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.688534Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:01.688580Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:59:01.692042Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:59:01.693696Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:59:01.693861Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:59:01.694795Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:01.694948Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:01.695003Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:01.695215Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:59:01.695257Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:01.695425Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:59:01.695505Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:59:01.697595Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:01.697644Z node 1 :FLAT_TX_SCHEMESHARD ... e target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-25T14:59:01.780966Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 101, publications: 2, subscribers: 0 2025-06-25T14:59:01.781001Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 2], 5 2025-06-25T14:59:01.781052Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 3], 3 2025-06-25T14:59:01.782080Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 5 PathOwnerId: 72057594046678944, cookie: 101 2025-06-25T14:59:01.782167Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 5 PathOwnerId: 72057594046678944, cookie: 101 2025-06-25T14:59:01.782203Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 101 2025-06-25T14:59:01.782246Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 5 2025-06-25T14:59:01.782290Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-25T14:59:01.782977Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 3 PathOwnerId: 72057594046678944, cookie: 101 2025-06-25T14:59:01.783103Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 3 PathOwnerId: 72057594046678944, cookie: 101 2025-06-25T14:59:01.783157Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 101 2025-06-25T14:59:01.783190Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 3 2025-06-25T14:59:01.783225Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-25T14:59:01.783301Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 101, subscribers: 0 2025-06-25T14:59:01.787015Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-25T14:59:01.787324Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 101 2025-06-25T14:59:01.787540Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2025-06-25T14:59:01.787585Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 2025-06-25T14:59:01.787987Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-06-25T14:59:01.788085Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-25T14:59:01.788123Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:342:2331] TestWaitNotification: OK eventTxId 101 2025-06-25T14:59:01.788613Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:59:01.788826Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 218us result status StatusSuccess 2025-06-25T14:59:01.789337Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:01.789902Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:59:01.790084Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 171us result status StatusSuccess 2025-06-25T14:59:01.790455Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0" PathDescription { Self { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 1 SecurityStateVersion: 0 } } Children { Name: "MyDir" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 101 CreateStep: 5000003 ParentPathId: 2 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:01.790898Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0/MyDir" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:59:01.791090Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0/MyDir" took 194us result status StatusSuccess 2025-06-25T14:59:01.791371Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0/MyDir" PathDescription { Self { Name: "MyDir" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 101 CreateStep: 5000003 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 2 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::Restart [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:59:01.393630Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:59:01.393760Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:01.393804Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:59:01.393848Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:59:01.394764Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:59:01.394819Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:59:01.394899Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:01.394974Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:59:01.395776Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:59:01.397040Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:59:01.475267Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:59:01.475337Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:59:01.492812Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:59:01.493210Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:59:01.493368Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:59:01.500584Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:59:01.500928Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:59:01.501650Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:01.501910Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:59:01.505665Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:01.505837Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:59:01.506935Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:01.507002Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:01.507153Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:59:01.507203Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:59:01.507250Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:59:01.507325Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.515576Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:59:01.668921Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:59:01.669142Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.669317Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:59:01.669362Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:59:01.669610Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:59:01.669702Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:59:01.672764Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:01.673014Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:59:01.673189Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.673241Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:59:01.673296Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:59:01.673344Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:59:01.675245Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.675304Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:59:01.675351Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:59:01.676903Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.676963Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.677016Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:01.677069Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:59:01.680521Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:59:01.682510Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:59:01.682705Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:59:01.683631Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:01.683777Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:01.683829Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:01.684124Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:59:01.684197Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:01.684406Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:59:01.684486Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:59:01.686467Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:01.686516Z node 1 :FLAT_TX_SCHEMESHARD ... shard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:59:01.860303Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1393: TTxInit for Paths, read records: 2, at schemeshard: 72057594046678944 2025-06-25T14:59:01.860404Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:347: AttachChild: child attached as only one child to the parent, parent id: [OwnerId: 72057594046678944, LocalPathId: 1], parent name: MyRoot, child name: USER_0, child id: [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-25T14:59:01.860499Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1467: TTxInit for UserAttributes, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.860595Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1493: TTxInit for UserAttributesAlterData, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.860811Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 0 2025-06-25T14:59:01.861101Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1795: TTxInit for Tables, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.861196Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__root_data_erasure_manager.cpp:452: [RootDataErasureManager] Restore: Generation# 0, Status# 0, WakeupInterval# 604800 s, NumberDataErasureTenantsInRunning# 0 2025-06-25T14:59:01.861449Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2043: TTxInit for Columns, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.861540Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2103: TTxInit for ColumnsAlters, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.861666Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2161: TTxInit for Shards, read records: 3, at schemeshard: 72057594046678944 2025-06-25T14:59:01.861704Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-25T14:59:01.861733Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-25T14:59:01.861767Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-25T14:59:01.861871Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2247: TTxInit for TablePartitions, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.861951Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2313: TTxInit for TableShardPartitionConfigs, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.862188Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2463: TTxInit for ChannelsBinding, read records: 9, at schemeshard: 72057594046678944 2025-06-25T14:59:01.862560Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2842: TTxInit for TableIndexes, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.862697Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2921: TTxInit for TableIndexKeys, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.863124Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3422: TTxInit for KesusInfos, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.863201Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3458: TTxInit for KesusAlters, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.863448Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3684: TTxInit for TxShards, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.863548Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3829: TTxInit for ShardToDelete, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.863649Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3846: TTxInit for BackupSettings, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.863851Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4006: TTxInit for ShardBackupStatus, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.863958Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4022: TTxInit for CompletedBackup, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.864130Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4307: TTxInit for Publications, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.864456Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4646: IndexBuild , records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.864538Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4702: KMeansTreeSample records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.864672Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4791: SnapshotTables: snapshots: 0 tables: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.864742Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4818: SnapshotSteps: snapshots: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.864800Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4845: LongLocks: records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.869302Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:59:01.871907Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:01.871967Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:01.872366Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:59:01.872430Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:59:01.872480Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:59:01.872596Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594046678944 is [1:465:2414] sender: [1:524:2058] recipient: [1:15:2062] 2025-06-25T14:59:01.936224Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:59:01.936523Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 271us result status StatusSuccess 2025-06-25T14:59:01.936957Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0" PathDescription { Self { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 Mediators: 72075186233409548 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:01.937666Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:59:01.937827Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 177us result status StatusSuccess 2025-06-25T14:59:01.938218Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::Redefine [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:59:01.393533Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:59:01.393617Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:01.393645Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:59:01.393675Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:59:01.394676Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:59:01.394718Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:59:01.394803Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:01.394891Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:59:01.395600Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:59:01.397035Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:59:01.475398Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:59:01.475450Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:59:01.491101Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:59:01.491521Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:59:01.491739Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:59:01.497152Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:59:01.497463Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:59:01.498017Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:01.498253Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:59:01.503250Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:01.503378Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:59:01.506346Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:01.506412Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:01.506563Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:59:01.506606Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:59:01.506687Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:59:01.506762Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.515859Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:59:01.663082Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:59:01.663310Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.663488Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:59:01.663525Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:59:01.663726Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:59:01.663789Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:59:01.665620Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:01.665804Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:59:01.665928Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.665984Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:59:01.666043Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:59:01.666082Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:59:01.667556Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.667614Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:59:01.667655Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:59:01.669314Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.669363Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.669435Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:01.669484Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:59:01.672799Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:59:01.674290Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:59:01.674454Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:59:01.675301Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:01.675428Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:01.675474Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:01.675712Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:59:01.675753Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:01.675898Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:59:01.675966Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:59:01.677792Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:01.677836Z node 1 :FLAT_TX_SCHEMESHARD ... SHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-25T14:59:01.866364Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 104 2025-06-25T14:59:01.866477Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 104 2025-06-25T14:59:01.866510Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 104 2025-06-25T14:59:01.866544Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 104, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 18446744073709551615 2025-06-25T14:59:01.866575Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-06-25T14:59:01.866659Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 104, subscribers: 0 2025-06-25T14:59:01.869437Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:1 hive 72057594037968897 at ss 72057594046678944 2025-06-25T14:59:01.869494Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:3 hive 72057594037968897 at ss 72057594046678944 2025-06-25T14:59:01.869518Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:2 hive 72057594037968897 at ss 72057594046678944 2025-06-25T14:59:01.871283Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 1 TxId_Deprecated: 1 TabletID: 72075186233409546 2025-06-25T14:59:01.872582Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 Forgetting tablet 72075186233409546 2025-06-25T14:59:01.873307Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 1 ShardOwnerId: 72057594046678944 ShardLocalIdx: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:01.873574Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-25T14:59:01.874119Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 3 TxId_Deprecated: 3 TabletID: 72075186233409548 2025-06-25T14:59:01.874519Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 2 TxId_Deprecated: 2 TabletID: 72075186233409547 2025-06-25T14:59:01.874791Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 3 ShardOwnerId: 72057594046678944 ShardLocalIdx: 3, at schemeshard: 72057594046678944 2025-06-25T14:59:01.874959Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-25T14:59:01.875727Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046678944 ShardLocalIdx: 2, at schemeshard: 72057594046678944 2025-06-25T14:59:01.875864Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 Forgetting tablet 72075186233409548 Forgetting tablet 72075186233409547 2025-06-25T14:59:01.876491Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-25T14:59:01.876546Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-25T14:59:01.876670Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-25T14:59:01.876828Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2025-06-25T14:59:01.876996Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-25T14:59:01.877031Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-25T14:59:01.877086Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:59:01.878721Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-06-25T14:59:01.878763Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:1 tabletId 72075186233409546 2025-06-25T14:59:01.878835Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:3 2025-06-25T14:59:01.878854Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:3 tabletId 72075186233409548 2025-06-25T14:59:01.880469Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-06-25T14:59:01.880531Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:2 tabletId 72075186233409547 2025-06-25T14:59:01.880652Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-25T14:59:01.880767Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 104, wait until txId: 104 TestWaitNotification wait txId: 104 2025-06-25T14:59:01.881000Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 104: send EvNotifyTxCompletion 2025-06-25T14:59:01.881034Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 104 2025-06-25T14:59:01.881448Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 104, at schemeshard: 72057594046678944 2025-06-25T14:59:01.881526Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 104: got EvNotifyTxCompletionResult 2025-06-25T14:59:01.881554Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 104: satisfy waiter [1:579:2531] TestWaitNotification: OK eventTxId 104 2025-06-25T14:59:01.882108Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:59:01.882284Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 144us result status StatusPathDoesNotExist 2025-06-25T14:59:01.882432Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/USER_0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-25T14:59:01.882855Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:59:01.882978Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 123us result status StatusSuccess 2025-06-25T14:59:01.883335Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::SimultaneousCreateDelete [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:59:01.393601Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:59:01.393703Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:01.393747Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:59:01.393810Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:59:01.394761Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:59:01.394820Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:59:01.394901Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:01.394983Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:59:01.395650Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:59:01.397040Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:59:01.485430Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:59:01.485495Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:59:01.503218Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:59:01.503611Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:59:01.503772Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:59:01.509572Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:59:01.509874Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:59:01.510568Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:01.510821Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:59:01.519266Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:01.519514Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:59:01.520727Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:01.520790Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:01.520951Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:59:01.521002Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:59:01.521053Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:59:01.521157Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.527720Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:59:01.645186Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:59:01.645456Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.645640Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:59:01.645686Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:59:01.645922Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:59:01.645999Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:59:01.648265Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:01.648495Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:59:01.648673Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.648719Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:59:01.648789Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:59:01.648828Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:59:01.650688Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.650742Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:59:01.650778Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:59:01.652303Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.652372Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.652442Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:01.652482Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:59:01.664649Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:59:01.666572Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:59:01.666747Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:59:01.667662Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:01.667795Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:01.667845Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:01.668160Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:59:01.668217Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:01.668405Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:59:01.668490Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:59:01.670350Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:01.670391Z node 1 :FLAT_TX_SCHEMESHARD ... 8944 to tablet: 72057594046316545 cookie: 0:101 msg type: 269090816 2025-06-25T14:59:01.870011Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 101, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 101 at step: 5000002 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000001 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 101 at step: 5000002 2025-06-25T14:59:01.870277Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000002, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:01.870367Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 101 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000002 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:01.870409Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 101:0, at tablet# 72057594046678944 2025-06-25T14:59:01.870660Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 101:0 128 -> 240 2025-06-25T14:59:01.870695Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 101:0, at tablet# 72057594046678944 2025-06-25T14:59:01.870823Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:59:01.870871Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 8 2025-06-25T14:59:01.870910Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 101 2025-06-25T14:59:01.872340Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:01.872391Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 101, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:59:01.872542Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 101, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-25T14:59:01.872625Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:01.872667Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 101, path id: 1 2025-06-25T14:59:01.872701Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 101, path id: 2 2025-06-25T14:59:01.873003Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.873040Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 101:0 ProgressState 2025-06-25T14:59:01.873118Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#101:0 progress is 1/1 2025-06-25T14:59:01.873147Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-25T14:59:01.873180Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#101:0 progress is 1/1 2025-06-25T14:59:01.873203Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-25T14:59:01.873235Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 101, ready parts: 1/1, is published: false 2025-06-25T14:59:01.873275Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-25T14:59:01.873331Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 101:0 2025-06-25T14:59:01.873365Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 101:0 2025-06-25T14:59:01.873586Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 9 2025-06-25T14:59:01.873625Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 101, publications: 2, subscribers: 1 2025-06-25T14:59:01.873657Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 1], 5 2025-06-25T14:59:01.873684Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 2], 3 2025-06-25T14:59:01.874234Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 101 2025-06-25T14:59:01.874302Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 101 2025-06-25T14:59:01.874337Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 101 2025-06-25T14:59:01.874363Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 5 2025-06-25T14:59:01.874407Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-25T14:59:01.874949Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 101 2025-06-25T14:59:01.875007Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 101 2025-06-25T14:59:01.875034Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 101 2025-06-25T14:59:01.875057Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 3 2025-06-25T14:59:01.875094Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 8 2025-06-25T14:59:01.875158Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 101, subscribers: 1 2025-06-25T14:59:01.875198Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:212: TTxAckPublishToSchemeBoard Notify send TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, to actorId: [1:572:2479] 2025-06-25T14:59:01.878072Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-25T14:59:01.878301Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-25T14:59:01.878367Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-25T14:59:01.878393Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:573:2480] TestWaitNotification: OK eventTxId 101 2025-06-25T14:59:01.878749Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:59:01.878963Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 167us result status StatusSuccess 2025-06-25T14:59:01.879437Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::RmDir [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:59:01.393601Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:59:01.393711Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:01.393756Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:59:01.393802Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:59:01.394739Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:59:01.394793Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:59:01.394872Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:01.394959Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:59:01.395770Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:59:01.397075Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:59:01.479757Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:59:01.479803Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:59:01.495708Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:59:01.496065Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:59:01.496216Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:59:01.503654Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:59:01.503990Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:59:01.504740Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:01.505011Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:59:01.508598Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:01.508789Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:59:01.509877Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:01.509930Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:01.510063Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:59:01.510102Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:59:01.510139Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:59:01.510198Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.515841Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:59:01.649056Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:59:01.649297Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.649508Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:59:01.649552Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:59:01.649794Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:59:01.649879Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:59:01.651970Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:01.652155Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:59:01.652352Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.652421Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:59:01.652472Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:59:01.652510Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:59:01.654615Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.654672Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:59:01.654714Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:59:01.656814Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.656883Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.656944Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:01.656993Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:59:01.660921Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:59:01.662779Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:59:01.662976Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:59:01.663963Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:01.664107Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:01.664161Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:01.664539Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:59:01.664603Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:01.664780Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:59:01.664865Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:59:01.666922Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:01.666979Z node 1 :FLAT_TX_SCHEMESHARD ... lPathId: 1] was 1 2025-06-25T14:59:01.871117Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 8 2025-06-25T14:59:01.871167Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 100 2025-06-25T14:59:01.873076Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:01.873123Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 100, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:59:01.873302Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 100, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-25T14:59:01.873453Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:01.873488Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 100, path id: 1 2025-06-25T14:59:01.873521Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 100, path id: 2 2025-06-25T14:59:01.873578Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 100:0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.873624Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 100:0 ProgressState 2025-06-25T14:59:01.873763Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#100:0 progress is 1/1 2025-06-25T14:59:01.873789Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 100 ready parts: 1/1 2025-06-25T14:59:01.873814Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#100:0 progress is 1/1 2025-06-25T14:59:01.873837Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 100 ready parts: 1/1 2025-06-25T14:59:01.873884Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 100, ready parts: 1/1, is published: false 2025-06-25T14:59:01.873930Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 100 ready parts: 1/1 2025-06-25T14:59:01.873985Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 100:0 2025-06-25T14:59:01.874023Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 100:0 2025-06-25T14:59:01.874247Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 9 2025-06-25T14:59:01.874292Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 100, publications: 2, subscribers: 1 2025-06-25T14:59:01.874320Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 100, [OwnerId: 72057594046678944, LocalPathId: 1], 5 2025-06-25T14:59:01.874341Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 100, [OwnerId: 72057594046678944, LocalPathId: 2], 3 2025-06-25T14:59:01.875111Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 100 2025-06-25T14:59:01.875187Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 100 2025-06-25T14:59:01.875215Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 100 2025-06-25T14:59:01.875241Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 100, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 5 2025-06-25T14:59:01.875275Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-25T14:59:01.875858Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 100 2025-06-25T14:59:01.875941Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 100 2025-06-25T14:59:01.875978Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 100 2025-06-25T14:59:01.876004Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 100, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 3 2025-06-25T14:59:01.876027Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 8 2025-06-25T14:59:01.876089Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 100, subscribers: 1 2025-06-25T14:59:01.876129Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:212: TTxAckPublishToSchemeBoard Notify send TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, to actorId: [1:572:2479] 2025-06-25T14:59:01.878567Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 100 2025-06-25T14:59:01.879276Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 100 2025-06-25T14:59:01.879344Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 100: got EvNotifyTxCompletionResult 2025-06-25T14:59:01.879364Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 100: satisfy waiter [1:573:2480] TestWaitNotification: OK eventTxId 100 2025-06-25T14:59:01.879736Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:59:01.879877Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 156us result status StatusSuccess 2025-06-25T14:59:01.880201Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0" PathDescription { Self { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 Coordinators: 72075186233409547 Coordinators: 72075186233409548 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409549 Mediators: 72075186233409550 Mediators: 72075186233409551 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 6 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 TestModificationResults wait txId: 101 2025-06-25T14:59:01.882367Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpRmDir Drop { Name: "USER_0" } } TxId: 101 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:59:01.882472Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_rmdir.cpp:28: TRmDir Propose, path: /MyRoot/USER_0, pathId: 0, opId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.882573Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 101:1, propose status:StatusPathIsNotDirectory, reason: Check failed: path: '/MyRoot/USER_0', error: path is not a directory (id: [OwnerId: 72057594046678944, LocalPathId: 2], type: EPathTypeSubDomain, state: EPathStateNoChanges), at schemeshard: 72057594046678944 2025-06-25T14:59:01.884174Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 101, response: Status: StatusPathIsNotDirectory Reason: "Check failed: path: \'/MyRoot/USER_0\', error: path is not a directory (id: [OwnerId: 72057594046678944, LocalPathId: 2], type: EPathTypeSubDomain, state: EPathStateNoChanges)" TxId: 101 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:01.884348Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 101, database: /MyRoot, subject: , status: StatusPathIsNotDirectory, reason: Check failed: path: '/MyRoot/USER_0', error: path is not a directory (id: [OwnerId: 72057594046678944, LocalPathId: 2], type: EPathTypeSubDomain, state: EPathStateNoChanges), operation: DROP DIRECTORY, path: /MyRoot/USER_0 TestModificationResult got TxId: 101, wait until txId: 101 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TStoragePoolsQuotasTest::QuoteNonexistentPool-IsExternalSubdomain-true [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:59:01.808146Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:59:01.808215Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:01.808245Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:59:01.808377Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:59:01.808414Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:59:01.808442Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:59:01.808498Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:01.808557Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:59:01.809150Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:59:01.809396Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:59:01.872544Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:59:01.872608Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:59:01.885918Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:59:01.886304Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:59:01.886470Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:59:01.892427Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:59:01.892776Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:59:01.893374Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:01.893594Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:59:01.896536Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:01.896720Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:59:01.897906Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:01.897965Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:01.898106Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:59:01.898155Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:59:01.898211Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:59:01.898304Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.904366Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:59:02.024577Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:59:02.024796Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:02.025023Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:59:02.025076Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:59:02.025295Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:59:02.025370Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:59:02.027024Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:02.027175Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:59:02.027303Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:02.027353Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:59:02.027413Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:59:02.027448Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:59:02.029049Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:02.029090Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:59:02.029121Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:59:02.030281Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:02.030326Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:02.030366Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:02.030402Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:59:02.042932Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:59:02.045088Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:59:02.045249Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:59:02.045939Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:02.046048Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:02.046096Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:02.046312Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:59:02.046365Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:02.046505Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:59:02.046562Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:59:02.048234Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:02.048267Z node 1 :FLAT_TX_SCHEMESHARD ... meBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-25T14:59:02.092150Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-25T14:59:02.092757Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:02.092795Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 101, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:59:02.092935Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 101, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-25T14:59:02.093006Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:02.093037Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 101, path id: 1 2025-06-25T14:59:02.093071Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 101, path id: 2 FAKE_COORDINATOR: Erasing txId 101 2025-06-25T14:59:02.093346Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:59:02.093411Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 101:0 ProgressState 2025-06-25T14:59:02.093593Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#101:0 progress is 1/1 2025-06-25T14:59:02.093633Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-25T14:59:02.093673Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#101:0 progress is 1/1 2025-06-25T14:59:02.093705Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-25T14:59:02.093740Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 101, ready parts: 1/1, is published: false 2025-06-25T14:59:02.093783Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-25T14:59:02.093814Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 101:0 2025-06-25T14:59:02.093863Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 101:0 2025-06-25T14:59:02.093926Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-25T14:59:02.093962Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 101, publications: 2, subscribers: 0 2025-06-25T14:59:02.093997Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 1], 5 2025-06-25T14:59:02.094054Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 2], 3 2025-06-25T14:59:02.094725Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 101 2025-06-25T14:59:02.094839Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 101 2025-06-25T14:59:02.094883Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 101 2025-06-25T14:59:02.094917Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 5 2025-06-25T14:59:02.094953Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-25T14:59:02.095510Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 101 2025-06-25T14:59:02.095587Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 101 2025-06-25T14:59:02.095643Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 101 2025-06-25T14:59:02.095673Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 3 2025-06-25T14:59:02.095699Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-25T14:59:02.095767Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 101, subscribers: 0 2025-06-25T14:59:02.098761Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-25T14:59:02.099177Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 TestModificationResult got TxId: 101, wait until txId: 101 TestModificationResults wait txId: 102 2025-06-25T14:59:02.103240Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpAlterExtSubDomain SubDomain { PlanResolution: 50 Coordinators: 1 Mediators: 1 Name: "SomeDatabase" TimeCastBucketsPerMediator: 2 ExternalSchemeShard: true DatabaseQuotas { storage_quotas { unit_kind: "nonexistent_storage_kind" data_size_hard_quota: 1 } } } } TxId: 102 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:59:02.103465Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_extsubdomain.cpp:1079: [72057594046678944] CreateCompatibleAlterExtSubDomain, opId 102:0, feature flag EnableAlterDatabaseCreateHiveFirst 1, tx WorkingDir: "/MyRoot" OperationType: ESchemeOpAlterExtSubDomain SubDomain { PlanResolution: 50 Coordinators: 1 Mediators: 1 Name: "SomeDatabase" TimeCastBucketsPerMediator: 2 ExternalSchemeShard: true DatabaseQuotas { storage_quotas { unit_kind: "nonexistent_storage_kind" data_size_hard_quota: 1 } } } 2025-06-25T14:59:02.103524Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_extsubdomain.cpp:1085: [72057594046678944] CreateCompatibleAlterExtSubDomain, opId 102:0, path /MyRoot/SomeDatabase 2025-06-25T14:59:02.103674Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_just_reject.cpp:47: TReject Propose, opId: 102:0, explain: Invalid AlterExtSubDomain request: Invalid ExtSubDomain request: Malformed subdomain request: cannot set storage quotas of the following kinds: nonexistent_storage_kind, because no storage pool in the subdomain SomeDatabase has the specified kinds. Existing storage kinds are: , at schemeshard: 72057594046678944 2025-06-25T14:59:02.103747Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 102:1, propose status:StatusInvalidParameter, reason: Invalid AlterExtSubDomain request: Invalid ExtSubDomain request: Malformed subdomain request: cannot set storage quotas of the following kinds: nonexistent_storage_kind, because no storage pool in the subdomain SomeDatabase has the specified kinds. Existing storage kinds are: , at schemeshard: 72057594046678944 2025-06-25T14:59:02.105775Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 102, response: Status: StatusInvalidParameter Reason: "Invalid AlterExtSubDomain request: Invalid ExtSubDomain request: Malformed subdomain request: cannot set storage quotas of the following kinds: nonexistent_storage_kind, because no storage pool in the subdomain SomeDatabase has the specified kinds. Existing storage kinds are: " TxId: 102 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:02.106001Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 102, database: /MyRoot, subject: , status: StatusInvalidParameter, reason: Invalid AlterExtSubDomain request: Invalid ExtSubDomain request: Malformed subdomain request: cannot set storage quotas of the following kinds: nonexistent_storage_kind, because no storage pool in the subdomain SomeDatabase has the specified kinds. Existing storage kinds are: , operation: ALTER DATABASE, path: /MyRoot/SomeDatabase TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 101 2025-06-25T14:59:02.106305Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2025-06-25T14:59:02.106345Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 TestWaitNotification wait txId: 102 2025-06-25T14:59:02.106443Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-06-25T14:59:02.106462Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 2025-06-25T14:59:02.106904Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-06-25T14:59:02.107027Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-25T14:59:02.107061Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:314:2303] 2025-06-25T14:59:02.107192Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-06-25T14:59:02.107270Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-25T14:59:02.107291Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:314:2303] TestWaitNotification: OK eventTxId 101 TestWaitNotification: OK eventTxId 102 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::RestartAtInFly [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:59:01.394349Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:59:01.394408Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:01.394440Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:59:01.394467Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:59:01.394729Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:59:01.394766Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:59:01.394826Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:01.394904Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:59:01.395646Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:59:01.397068Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:59:01.475311Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:59:01.475353Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:59:01.491490Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:59:01.491855Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:59:01.491996Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:59:01.497366Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:59:01.497663Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:59:01.498268Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:01.498481Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:59:01.501596Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:01.501744Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:59:01.506377Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:01.506438Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:01.506571Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:59:01.506618Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:59:01.506673Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:59:01.506752Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.515749Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:59:01.642795Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:59:01.642940Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.643109Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:59:01.643155Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:59:01.643408Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:59:01.643487Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:59:01.645310Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:01.645499Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:59:01.645648Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.645693Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:59:01.645742Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:59:01.645773Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:59:01.647121Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.647170Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:59:01.647230Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:59:01.649001Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.649057Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.649118Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:01.649165Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:59:01.653102Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:59:01.654874Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:59:01.655050Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:59:01.655700Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:01.655794Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:01.655832Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:01.657029Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:59:01.657071Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:01.657231Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:59:01.657307Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:59:01.659341Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:01.659382Z node 1 :FLAT_TX_SCHEMESHARD ... Data, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.824402Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 0 2025-06-25T14:59:01.824668Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1795: TTxInit for Tables, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.824769Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__root_data_erasure_manager.cpp:452: [RootDataErasureManager] Restore: Generation# 0, Status# 0, WakeupInterval# 604800 s, NumberDataErasureTenantsInRunning# 0 2025-06-25T14:59:01.824955Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2043: TTxInit for Columns, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.825012Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2103: TTxInit for ColumnsAlters, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.825097Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2161: TTxInit for Shards, read records: 3, at schemeshard: 72057594046678944 2025-06-25T14:59:01.825150Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-25T14:59:01.825190Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-25T14:59:01.825212Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-25T14:59:01.825294Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2247: TTxInit for TablePartitions, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.825356Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2313: TTxInit for TableShardPartitionConfigs, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.825580Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2463: TTxInit for ChannelsBinding, read records: 9, at schemeshard: 72057594046678944 2025-06-25T14:59:01.825905Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2842: TTxInit for TableIndexes, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.826037Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2921: TTxInit for TableIndexKeys, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.826422Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3422: TTxInit for KesusInfos, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.826495Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3458: TTxInit for KesusAlters, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.826758Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3684: TTxInit for TxShards, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.826867Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3829: TTxInit for ShardToDelete, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.826971Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3846: TTxInit for BackupSettings, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.827148Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4006: TTxInit for ShardBackupStatus, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.827228Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4022: TTxInit for CompletedBackup, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.827424Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4307: TTxInit for Publications, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.827667Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4646: IndexBuild , records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.827740Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4702: KMeansTreeSample records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.827862Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4791: SnapshotTables: snapshots: 0 tables: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.827909Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4818: SnapshotSteps: snapshots: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.827982Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4845: LongLocks: records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.832542Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:59:01.834065Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:01.834139Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:01.834251Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:59:01.834287Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:59:01.834326Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:59:01.834537Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 TestWaitNotification wait txId: 100 2025-06-25T14:59:01.898522Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 100: send EvNotifyTxCompletion 2025-06-25T14:59:01.898595Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 100 Leader for TabletID 72057594046678944 is [1:459:2408] sender: [1:520:2058] recipient: [1:15:2062] 2025-06-25T14:59:01.899272Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 100, at schemeshard: 72057594046678944 2025-06-25T14:59:01.899349Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 100: got EvNotifyTxCompletionResult 2025-06-25T14:59:01.899375Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 100: satisfy waiter [1:518:2453] TestWaitNotification: OK eventTxId 100 2025-06-25T14:59:01.899737Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:59:01.899905Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 177us result status StatusSuccess 2025-06-25T14:59:01.900362Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0" PathDescription { Self { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 Mediators: 72075186233409548 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:01.900726Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:59:01.900880Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 117us result status StatusSuccess 2025-06-25T14:59:01.901323Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TSchemeShardSubDomainTest::SimultaneousCreateTableForceDrop ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::SimultaneousCreateTenantTable [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:59:01.393611Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:59:01.393720Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:01.393756Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:59:01.393792Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:59:01.394727Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:59:01.394772Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:59:01.394842Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:01.394907Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:59:01.395606Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:59:01.397040Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:59:01.472521Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:59:01.472577Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:59:01.489041Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:59:01.489397Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:59:01.489557Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:59:01.494966Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:59:01.495262Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:59:01.495836Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:01.496105Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:59:01.500453Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:01.500616Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:59:01.506346Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:01.506424Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:01.506562Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:59:01.506606Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:59:01.506683Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:59:01.506760Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.514376Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:59:01.641013Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:59:01.641261Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.641477Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:59:01.641520Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:59:01.641798Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:59:01.641871Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:59:01.644142Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:01.644345Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:59:01.644505Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.644546Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:59:01.644602Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:59:01.644641Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:59:01.646376Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.646436Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:59:01.646471Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:59:01.647966Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.648017Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.648082Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:01.648124Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:59:01.657351Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:59:01.659156Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:59:01.659358Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:59:01.660222Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:01.660375Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:01.660426Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:01.660676Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:59:01.660718Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:01.660845Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:59:01.660894Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:59:01.665424Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:01.665470Z node 1 :FLAT_TX_SCHEMESHARD ... 7594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 3 2025-06-25T14:59:02.018406Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-06-25T14:59:02.018482Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 101, ready parts: 0/1, is published: true 2025-06-25T14:59:02.019418Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5596: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 631 RawX2: 4294969834 } Origin: 72075186233409552 State: 2 TxId: 101 Step: 0 Generation: 2 2025-06-25T14:59:02.019534Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1791: TOperation FindRelatedPartByTabletId, TxId: 101, tablet: 72075186233409552, partId: 0 2025-06-25T14:59:02.019681Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:632: TTxOperationReply execute, operationId: 101:0, at schemeshard: 72057594046678944, message: Source { RawX1: 631 RawX2: 4294969834 } Origin: 72075186233409552 State: 2 TxId: 101 Step: 0 Generation: 2 2025-06-25T14:59:02.019792Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1047: NTableState::TProposedWaitParts operationId# 101:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 2025-06-25T14:59:02.019897Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1051: NTableState::TProposedWaitParts operationId# 101:0 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 631 RawX2: 4294969834 } Origin: 72075186233409552 State: 2 TxId: 101 Step: 0 Generation: 2 2025-06-25T14:59:02.019976Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:670: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 101:0, shardIdx: 72057594046678944:7, shard: 72075186233409552, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:02.020019Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:707: all shard schema changes has been received, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:59:02.020067Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:719: send schema changes ack message, operation: 101:0, datashard: 72075186233409552, at schemeshard: 72057594046678944 2025-06-25T14:59:02.020113Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 101:0 129 -> 240 2025-06-25T14:59:02.023404Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-25T14:59:02.023598Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:59:02.024163Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-25T14:59:02.024619Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:59:02.024906Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:59:02.024951Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 101:0 ProgressState 2025-06-25T14:59:02.025100Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#101:0 progress is 1/1 2025-06-25T14:59:02.025146Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-25T14:59:02.025195Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#101:0 progress is 1/1 2025-06-25T14:59:02.025246Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-25T14:59:02.025285Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 101, ready parts: 1/1, is published: true 2025-06-25T14:59:02.025351Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1656: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:280:2269] message: TxId: 101 2025-06-25T14:59:02.025413Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-25T14:59:02.025457Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 101:0 2025-06-25T14:59:02.025493Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 101:0 2025-06-25T14:59:02.025647Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-25T14:59:02.027287Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-25T14:59:02.027342Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:281:2270] TestWaitNotification: OK eventTxId 101 2025-06-25T14:59:02.027834Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:59:02.028083Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 228us result status StatusSuccess 2025-06-25T14:59:02.028668Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0" PathDescription { Self { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 1 SecurityStateVersion: 0 } } Children { Name: "table_0" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 140 ParentPathId: 2 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 10 Coordinators: 72075186233409546 Coordinators: 72075186233409547 Coordinators: 72075186233409548 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409549 Mediators: 72075186233409550 Mediators: 72075186233409551 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 1 PathsLimit: 10000 ShardsInside: 7 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:02.029285Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0/table_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:59:02.029523Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0/table_0" took 239us result status StatusSuccess 2025-06-25T14:59:02.029960Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0/table_0" PathDescription { Self { Name: "table_0" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 140 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "table_0" Columns { Name: "RowId" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "RowId" KeyColumnIds: 1 TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 10 Coordinators: 72075186233409546 Coordinators: 72075186233409547 Coordinators: 72075186233409548 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409549 Mediators: 72075186233409550 Mediators: 72075186233409551 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 7 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::CreateSubDomainsInSeparateDir [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:59:01.393607Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:59:01.393711Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:01.393749Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:59:01.393792Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:59:01.394726Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:59:01.394767Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:59:01.394830Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:01.394895Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:59:01.395577Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:59:01.396997Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:59:01.462357Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:59:01.462406Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:59:01.477629Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:59:01.478011Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:59:01.478172Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:59:01.484983Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:59:01.487636Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:59:01.489786Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:01.491134Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:59:01.501015Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:01.501196Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:59:01.506390Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:01.506469Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:01.506641Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:59:01.506717Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:59:01.506773Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:59:01.506875Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.514077Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:59:01.667530Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:59:01.667762Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.667972Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:59:01.668019Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:59:01.668335Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:59:01.668411Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:59:01.670452Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:01.670664Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:59:01.670817Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.670865Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:59:01.670921Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:59:01.670987Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:59:01.673044Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.673108Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:59:01.673148Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:59:01.674741Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.674793Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.674850Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:01.674897Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:59:01.691191Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:59:01.693153Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:59:01.693354Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:59:01.694337Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:01.694513Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:01.694571Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:01.694935Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:59:01.694998Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:01.695175Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:59:01.695260Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:59:01.697700Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:01.697748Z node 1 :FLAT_TX_SCHEMESHARD ... s 9 2025-06-25T14:59:02.075588Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 102, publications: 2, subscribers: 1 2025-06-25T14:59:02.075620Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 102, [OwnerId: 72057594046678944, LocalPathId: 2], 7 2025-06-25T14:59:02.075649Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 102, [OwnerId: 72057594046678944, LocalPathId: 4], 3 2025-06-25T14:59:02.076587Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 7 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T14:59:02.076699Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 7 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T14:59:02.076741Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 102 2025-06-25T14:59:02.076846Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 7 2025-06-25T14:59:02.076891Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-25T14:59:02.077353Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 3 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T14:59:02.077426Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 3 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T14:59:02.077453Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 102 2025-06-25T14:59:02.077478Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 3 2025-06-25T14:59:02.077515Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 8 2025-06-25T14:59:02.077585Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 102, subscribers: 1 2025-06-25T14:59:02.077619Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:212: TTxAckPublishToSchemeBoard Notify send TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, to actorId: [1:923:2754] 2025-06-25T14:59:02.079928Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-25T14:59:02.081171Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-25T14:59:02.081254Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-25T14:59:02.081288Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:924:2755] TestWaitNotification: OK eventTxId 102 2025-06-25T14:59:02.081761Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/SubDomains/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:59:02.081975Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/SubDomains/USER_0" took 202us result status StatusSuccess 2025-06-25T14:59:02.082445Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/SubDomains/USER_0" PathDescription { Self { Name: "USER_0" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 101 CreateStep: 5000003 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 Coordinators: 72075186233409547 Coordinators: 72075186233409548 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409549 Mediators: 72075186233409550 Mediators: 72075186233409551 } DomainKey { SchemeShard: 72057594046678944 PathId: 3 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 6 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 3 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:02.082897Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/SubDomains/USER_1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:59:02.083046Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/SubDomains/USER_1" took 156us result status StatusSuccess 2025-06-25T14:59:02.083397Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/SubDomains/USER_1" PathDescription { Self { Name: "USER_1" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 102 CreateStep: 5000004 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409552 Coordinators: 72075186233409553 Coordinators: 72075186233409554 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409555 Mediators: 72075186233409556 Mediators: 72075186233409557 } DomainKey { SchemeShard: 72057594046678944 PathId: 4 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 6 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 4 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 4 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:02.083857Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/SubDomains" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:59:02.084015Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/SubDomains" took 140us result status StatusSuccess 2025-06-25T14:59:02.084363Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/SubDomains" PathDescription { Self { Name: "SubDomains" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 6 } ChildrenExist: true } Children { Name: "USER_0" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 101 CreateStep: 5000003 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" } Children { Name: "USER_1" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 102 CreateStep: 5000004 ParentPathId: 2 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TPQTest::PQ_Tablet_Does_Not_Remove_The_Blob_Until_The_Reading_Is_Complete [GOOD] >> TSchemeShardSubDomainTest::CreateAndWait ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::CreateItemsInsideSubdomain [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:59:01.393607Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:59:01.393696Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:01.393741Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:59:01.393786Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:59:01.394733Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:59:01.394788Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:59:01.394896Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:01.394969Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:59:01.395711Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:59:01.397053Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:59:01.464804Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:59:01.464875Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:59:01.482773Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:59:01.483206Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:59:01.483387Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:59:01.488813Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:59:01.489186Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:59:01.489842Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:01.491125Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:59:01.499485Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:01.500494Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:59:01.506351Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:01.506432Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:01.506585Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:59:01.506646Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:59:01.506694Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:59:01.506788Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.518693Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:59:01.642794Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:59:01.643022Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.643212Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:59:01.643263Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:59:01.643478Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:59:01.643561Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:59:01.645660Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:01.645837Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:59:01.646014Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.646065Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:59:01.646126Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:59:01.646172Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:59:01.647996Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.648055Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:59:01.648096Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:59:01.649826Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.649882Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.649947Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:01.650022Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:59:01.653597Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:59:01.655377Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:59:01.655604Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:59:01.656551Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:01.656679Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:01.656733Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:01.657025Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:59:01.657081Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:01.657239Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:59:01.657315Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:59:01.659378Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:01.659423Z node 1 :FLAT_TX_SCHEMESHARD ... 9: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [1:488:2441] TestWaitNotification: OK eventTxId 103 2025-06-25T14:59:02.160790Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:59:02.161033Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 242us result status StatusSuccess 2025-06-25T14:59:02.161595Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0" PathDescription { Self { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 8 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 8 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 6 SubDomainVersion: 1 SecurityStateVersion: 0 } } Children { Name: "dir_0" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 102 CreateStep: 150 ParentPathId: 2 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: true } Children { Name: "table_0" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 150 ParentPathId: 2 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 3 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:02.162165Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0/table_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:59:02.162355Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0/table_0" took 196us result status StatusSuccess 2025-06-25T14:59:02.162785Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0/table_0" PathDescription { Self { Name: "table_0" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 150 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "table_0" Columns { Name: "RowId" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "RowId" KeyColumnIds: 1 TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:02.163289Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0/dir_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:59:02.163442Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0/dir_0" took 162us result status StatusSuccess 2025-06-25T14:59:02.163848Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0/dir_0" PathDescription { Self { Name: "dir_0" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 102 CreateStep: 150 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 4 } ChildrenExist: true } Children { Name: "table_1" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 103 CreateStep: 200 ParentPathId: 4 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 4 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:02.164283Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0/dir_0/table_1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:59:02.164503Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0/dir_0/table_1" took 199us result status StatusSuccess 2025-06-25T14:59:02.164856Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0/dir_0/table_1" PathDescription { Self { Name: "table_1" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 103 CreateStep: 200 ParentPathId: 4 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "table_1" Columns { Name: "RowId" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "RowId" KeyColumnIds: 1 TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::CreateItemsInsideSubdomainWithStoragePools [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:59:01.393606Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:59:01.393717Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:01.393767Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:59:01.393809Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:59:01.394746Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:59:01.394816Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:59:01.394897Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:01.394991Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:59:01.395758Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:59:01.397047Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:59:01.478711Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:59:01.478770Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:59:01.494414Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:59:01.494808Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:59:01.494954Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:59:01.501849Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:59:01.502123Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:59:01.502689Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:01.502882Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:59:01.506131Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:01.506303Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:59:01.507495Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:01.507560Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:01.507696Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:59:01.507745Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:59:01.507806Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:59:01.507884Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.514494Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:59:01.681074Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:59:01.681305Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.681530Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:59:01.681582Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:59:01.681828Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:59:01.681903Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:59:01.683990Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:01.684204Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:59:01.684398Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.684451Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:59:01.684515Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:59:01.684551Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:59:01.686285Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.686340Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:59:01.686379Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:59:01.688052Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.688109Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.688191Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:01.688238Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:59:01.691952Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:59:01.694002Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:59:01.694296Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:59:01.695333Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:01.695483Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:01.695545Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:01.695811Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:59:01.695865Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:01.696053Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:59:01.696142Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:59:01.698504Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:01.698551Z node 1 :FLAT_TX_SCHEMESHARD ... 06-25T14:59:02.212766Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:59:02.213015Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 262us result status StatusSuccess 2025-06-25T14:59:02.213637Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0" PathDescription { Self { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 8 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 8 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 6 SubDomainVersion: 1 SecurityStateVersion: 0 } } Children { Name: "dir_0" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 102 CreateStep: 150 ParentPathId: 2 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: true } Children { Name: "table_0" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 150 ParentPathId: 2 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "name_USER_0_kind_hdd-1" Kind: "hdd-1" } StoragePools { Name: "name_USER_0_kind_hdd-2" Kind: "hdd-2" } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 3 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:02.214281Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0/table_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:59:02.214552Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0/table_0" took 242us result status StatusSuccess 2025-06-25T14:59:02.215052Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0/table_0" PathDescription { Self { Name: "table_0" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 150 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "table_0" Columns { Name: "RowId" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "RowId" KeyColumnIds: 1 TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:02.215717Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0/dir_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:59:02.215905Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0/dir_0" took 249us result status StatusSuccess 2025-06-25T14:59:02.216267Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0/dir_0" PathDescription { Self { Name: "dir_0" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 102 CreateStep: 150 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 4 } ChildrenExist: true } Children { Name: "table_1" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 103 CreateStep: 200 ParentPathId: 4 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 4 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:02.216850Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0/dir_0/table_1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:59:02.217107Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0/dir_0/table_1" took 258us result status StatusSuccess 2025-06-25T14:59:02.217541Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0/dir_0/table_1" PathDescription { Self { Name: "table_1" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 103 CreateStep: 200 ParentPathId: 4 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "table_1" Columns { Name: "RowId" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "RowId" KeyColumnIds: 1 TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::SchemeLimitsRejectsWithIndexedTables [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:59:01.393611Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:59:01.393729Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:01.393776Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:59:01.393817Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:59:01.394744Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:59:01.394787Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:59:01.394861Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:01.394934Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:59:01.395705Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:59:01.397010Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:59:01.472568Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:59:01.472631Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:59:01.489515Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:59:01.489883Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:59:01.490025Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:59:01.495675Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:59:01.495954Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:59:01.496658Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:01.496901Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:59:01.500383Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:01.500564Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:59:01.508617Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:01.508698Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:01.508862Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:59:01.508919Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:59:01.508975Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:59:01.509097Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.519632Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:59:01.655528Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:59:01.655762Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.655958Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:59:01.656000Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:59:01.656233Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:59:01.656331Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:59:01.659238Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:01.659661Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:59:01.659833Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.659888Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:59:01.659926Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:59:01.659952Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:59:01.661755Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.661812Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:59:01.661849Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:59:01.663523Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.663580Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.663635Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:01.663676Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:59:01.667214Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:59:01.668759Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:59:01.668927Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:59:01.669611Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:01.669715Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:01.669761Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:01.669968Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:59:01.670003Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:01.670121Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:59:01.670201Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:59:01.671887Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:01.671939Z node 1 :FLAT_TX_SCHEMESHARD ... shard_impl.cpp:5596: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 612 RawX2: 4294969845 } Origin: 72075186233409549 State: 2 TxId: 107 Step: 0 Generation: 2 2025-06-25T14:59:02.320039Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1791: TOperation FindRelatedPartByTabletId, TxId: 107, tablet: 72075186233409549, partId: 2 2025-06-25T14:59:02.320133Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:632: TTxOperationReply execute, operationId: 107:2, at schemeshard: 72057594046678944, message: Source { RawX1: 612 RawX2: 4294969845 } Origin: 72075186233409549 State: 2 TxId: 107 Step: 0 Generation: 2 2025-06-25T14:59:02.320174Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1047: NTableState::TProposedWaitParts operationId# 107:2 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 2025-06-25T14:59:02.320235Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1051: NTableState::TProposedWaitParts operationId# 107:2 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 612 RawX2: 4294969845 } Origin: 72075186233409549 State: 2 TxId: 107 Step: 0 Generation: 2 2025-06-25T14:59:02.320280Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:670: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 107:2, shardIdx: 72057594046678944:4, shard: 72075186233409549, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:02.320324Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:707: all shard schema changes has been received, operationId: 107:2, at schemeshard: 72057594046678944 2025-06-25T14:59:02.320351Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:719: send schema changes ack message, operation: 107:2, datashard: 72075186233409549, at schemeshard: 72057594046678944 2025-06-25T14:59:02.320393Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 107:2 129 -> 240 2025-06-25T14:59:02.324224Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 107 2025-06-25T14:59:02.324299Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 107 2025-06-25T14:59:02.328214Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 107 2025-06-25T14:59:02.328445Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 107:0, at schemeshard: 72057594046678944 2025-06-25T14:59:02.328555Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 107:2, at schemeshard: 72057594046678944 2025-06-25T14:59:02.328612Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 107 2025-06-25T14:59:02.328764Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 107:0, at schemeshard: 72057594046678944 2025-06-25T14:59:02.329074Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 107:0, at schemeshard: 72057594046678944 2025-06-25T14:59:02.329127Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 107:0 ProgressState 2025-06-25T14:59:02.329252Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#107:0 progress is 2/3 2025-06-25T14:59:02.329703Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 107 ready parts: 2/3 2025-06-25T14:59:02.329751Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#107:0 progress is 2/3 2025-06-25T14:59:02.329786Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 107 ready parts: 2/3 2025-06-25T14:59:02.329835Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 107, ready parts: 2/3, is published: true 2025-06-25T14:59:02.330100Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 107:2, at schemeshard: 72057594046678944 2025-06-25T14:59:02.330359Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 107:2, at schemeshard: 72057594046678944 2025-06-25T14:59:02.330395Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 107:2 ProgressState 2025-06-25T14:59:02.330463Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#107:2 progress is 3/3 2025-06-25T14:59:02.330491Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 107 ready parts: 3/3 2025-06-25T14:59:02.330521Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#107:2 progress is 3/3 2025-06-25T14:59:02.330543Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 107 ready parts: 3/3 2025-06-25T14:59:02.330592Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 107, ready parts: 3/3, is published: true 2025-06-25T14:59:02.330667Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1656: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:487:2434] message: TxId: 107 2025-06-25T14:59:02.330715Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 107 ready parts: 3/3 2025-06-25T14:59:02.330772Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 107:0 2025-06-25T14:59:02.330805Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 107:0 2025-06-25T14:59:02.330935Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 4 2025-06-25T14:59:02.330981Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 107:1 2025-06-25T14:59:02.331003Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 107:1 2025-06-25T14:59:02.331032Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 3 2025-06-25T14:59:02.331050Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 107:2 2025-06-25T14:59:02.331071Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 107:2 2025-06-25T14:59:02.331147Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 6] was 3 2025-06-25T14:59:02.333514Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 107: got EvNotifyTxCompletionResult 2025-06-25T14:59:02.333554Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 107: satisfy waiter [1:543:2490] TestWaitNotification: OK eventTxId 107 TestModificationResults wait txId: 108 2025-06-25T14:59:02.337165Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/USER_0" OperationType: ESchemeOpCreateIndexedTable CreateIndexedTable { TableDescription { Name: "Table7" Columns { Name: "RowId" Type: "Uint64" } Columns { Name: "Value0" Type: "Utf8" } Columns { Name: "Value1" Type: "Utf8" } Columns { Name: "Value2" Type: "Utf8" } Columns { Name: "Value3" Type: "Utf8" } Columns { Name: "Value4" Type: "Utf8" } KeyColumnNames: "RowId" } IndexDescription { Name: "UserDefinedIndexByValue0" KeyColumnNames: "Value0" } IndexDescription { Name: "UserDefinedIndexByValue1" KeyColumnNames: "Value1" } IndexDescription { Name: "UserDefinedIndexByValue2" KeyColumnNames: "Value2" } IndexDescription { Name: "UserDefinedIndexByValue3" KeyColumnNames: "Value3" } IndexDescription { Name: "UserDefinedIndexByValue4" KeyColumnNames: "Value4" } } } TxId: 108 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:59:02.337693Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_indexed_table.cpp:100: TCreateTableIndex construct operation table path: /MyRoot/USER_0/Table7 domain path id: [OwnerId: 72057594046678944, LocalPathId: 2] domain path: /MyRoot/USER_0 shardsToCreate: 6 GetShardsInside: 4 MaxShards: 7 2025-06-25T14:59:02.337788Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_just_reject.cpp:47: TReject Propose, opId: 108:0, explain: indexes count has reached maximum value in the table, children limit for dir in domain: 4, intention to create new children: 5, at schemeshard: 72057594046678944 2025-06-25T14:59:02.337830Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 108:1, propose status:StatusResourceExhausted, reason: indexes count has reached maximum value in the table, children limit for dir in domain: 4, intention to create new children: 5, at schemeshard: 72057594046678944 2025-06-25T14:59:02.339993Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 108, response: Status: StatusResourceExhausted Reason: "indexes count has reached maximum value in the table, children limit for dir in domain: 4, intention to create new children: 5" TxId: 108 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:02.340258Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 108, database: /MyRoot/USER_0, subject: , status: StatusResourceExhausted, reason: indexes count has reached maximum value in the table, children limit for dir in domain: 4, intention to create new children: 5, operation: CREATE TABLE WITH INDEXES, path: /MyRoot/USER_0/Table7 TestModificationResult got TxId: 108, wait until txId: 108 TestWaitNotification wait txId: 108 2025-06-25T14:59:02.340726Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 108: send EvNotifyTxCompletion 2025-06-25T14:59:02.340781Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 108 2025-06-25T14:59:02.341269Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 108, at schemeshard: 72057594046678944 2025-06-25T14:59:02.341362Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 108: got EvNotifyTxCompletionResult 2025-06-25T14:59:02.341424Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 108: satisfy waiter [1:737:2652] TestWaitNotification: OK eventTxId 108 >> TPQTest::TestCmdReadWithLastOffset [GOOD] >> TPQTest::TestDirectReadHappyWay ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::CreateDropNbs [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:59:01.858021Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:59:01.858114Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:01.858154Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:59:01.858193Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:59:01.858235Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:59:01.858265Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:59:01.858332Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:01.858409Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:59:01.859136Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:59:01.859445Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:59:01.937117Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:59:01.937185Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:59:01.953873Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:59:01.954309Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:59:01.954491Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:59:01.960410Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:59:01.960766Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:59:01.961494Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:01.961775Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:59:01.965428Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:01.965631Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:59:01.966881Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:01.966947Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:01.967111Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:59:01.967172Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:59:01.967228Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:59:01.967333Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.974128Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:59:02.094307Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:59:02.094529Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:02.094712Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:59:02.094757Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:59:02.094980Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:59:02.095077Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:59:02.097075Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:02.097265Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:59:02.097467Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:02.097575Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:59:02.097638Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:59:02.097684Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:59:02.099415Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:02.099466Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:59:02.099523Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:59:02.101068Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:02.101128Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:02.101183Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:02.101244Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:59:02.104583Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:59:02.106259Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:59:02.106439Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:59:02.107313Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:02.107458Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:02.107507Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:02.107795Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:59:02.107854Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:02.108025Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:59:02.108104Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:59:02.110100Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:02.110149Z node 1 :FLAT_TX_SCHEMESHARD ... schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-06-25T14:59:02.389480Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 102, subscribers: 0 2025-06-25T14:59:02.390844Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:1 hive 72057594037968897 at ss 72057594046678944 2025-06-25T14:59:02.390895Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:3 hive 72057594037968897 at ss 72057594046678944 2025-06-25T14:59:02.390919Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:2 hive 72057594037968897 at ss 72057594046678944 2025-06-25T14:59:02.390942Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:4 hive 72057594037968897 at ss 72057594046678944 2025-06-25T14:59:02.391741Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-25T14:59:02.392905Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 1 TxId_Deprecated: 1 TabletID: 72075186233409546 Forgetting tablet 72075186233409546 2025-06-25T14:59:02.394028Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 1 ShardOwnerId: 72057594046678944 ShardLocalIdx: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:02.394353Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-25T14:59:02.394884Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 3 TxId_Deprecated: 3 TabletID: 72075186233409548 2025-06-25T14:59:02.395368Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 2 TxId_Deprecated: 2 TabletID: 72075186233409547 2025-06-25T14:59:02.395518Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 3 ShardOwnerId: 72057594046678944 ShardLocalIdx: 3, at schemeshard: 72057594046678944 2025-06-25T14:59:02.395763Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-25T14:59:02.396285Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046678944 ShardLocalIdx: 2, at schemeshard: 72057594046678944 2025-06-25T14:59:02.396492Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 Forgetting tablet 72075186233409548 2025-06-25T14:59:02.397749Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 4 TxId_Deprecated: 4 TabletID: 72075186233409549 2025-06-25T14:59:02.397939Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 Forgetting tablet 72075186233409547 2025-06-25T14:59:02.399288Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 4 ShardOwnerId: 72057594046678944 ShardLocalIdx: 4, at schemeshard: 72057594046678944 2025-06-25T14:59:02.399511Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 Forgetting tablet 72075186233409549 2025-06-25T14:59:02.400125Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-25T14:59:02.400178Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-06-25T14:59:02.400249Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-25T14:59:02.400767Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-25T14:59:02.400825Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-25T14:59:02.400940Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-25T14:59:02.401250Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-25T14:59:02.404643Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-06-25T14:59:02.404704Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:1 tabletId 72075186233409546 2025-06-25T14:59:02.405311Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:3 2025-06-25T14:59:02.405342Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:3 tabletId 72075186233409548 2025-06-25T14:59:02.405434Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-06-25T14:59:02.405450Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:2 tabletId 72075186233409547 2025-06-25T14:59:02.405509Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:4 2025-06-25T14:59:02.405543Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:4 tabletId 72075186233409549 2025-06-25T14:59:02.405999Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 1 candidates, at schemeshard: 72057594046678944 2025-06-25T14:59:02.406089Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-25T14:59:02.406123Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-25T14:59:02.406189Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:59:02.406359Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-25T14:59:02.407585Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 102 2025-06-25T14:59:02.407790Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-06-25T14:59:02.407853Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 2025-06-25T14:59:02.408366Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-06-25T14:59:02.408468Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-25T14:59:02.408506Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:538:2490] TestWaitNotification: OK eventTxId 102 2025-06-25T14:59:02.424194Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0/BSVolume" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:59:02.424430Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0/BSVolume" took 254us result status StatusPathDoesNotExist 2025-06-25T14:59:02.424615Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0/BSVolume\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/USER_0/BSVolume" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-25T14:59:02.425326Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:59:02.425551Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 188us result status StatusPathDoesNotExist 2025-06-25T14:59:02.425705Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/USER_0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::CreateForceDropSolomon [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:59:01.393569Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:59:01.393649Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:01.393689Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:59:01.393726Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:59:01.394666Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:59:01.394694Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:59:01.394834Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:01.394907Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:59:01.395639Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:59:01.397021Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:59:01.476720Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:59:01.476773Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:59:01.493832Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:59:01.494224Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:59:01.494362Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:59:01.502537Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:59:01.502862Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:59:01.503503Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:01.503755Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:59:01.507213Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:01.507338Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:59:01.508134Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:01.508184Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:01.508295Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:59:01.508353Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:59:01.508399Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:59:01.508468Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.519213Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:59:01.625784Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:59:01.627182Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.628740Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:59:01.628831Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:59:01.630153Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:59:01.630260Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:59:01.637690Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:01.638803Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:59:01.639043Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.639171Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:59:01.639235Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:59:01.639279Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:59:01.642505Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.642564Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:59:01.642627Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:59:01.645017Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.645077Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:01.645133Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:01.645195Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:59:01.650071Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:59:01.653225Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:59:01.653482Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:59:01.655379Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:01.655514Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:01.655576Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:01.657010Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:59:01.657075Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:01.657249Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:59:01.657310Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:59:01.659309Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:01.659354Z node 1 :FLAT_TX_SCHEMESHARD ... blet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:38 tabletId 72075186233409583 2025-06-25T14:59:02.499075Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:8 2025-06-25T14:59:02.499108Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:8 tabletId 72075186233409553 2025-06-25T14:59:02.499192Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:12 2025-06-25T14:59:02.499214Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:12 tabletId 72075186233409557 2025-06-25T14:59:02.499272Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:7 2025-06-25T14:59:02.499303Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:7 tabletId 72075186233409552 2025-06-25T14:59:02.499367Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:16 2025-06-25T14:59:02.499385Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:16 tabletId 72075186233409561 2025-06-25T14:59:02.500278Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:25 2025-06-25T14:59:02.500333Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:25 tabletId 72075186233409570 2025-06-25T14:59:02.500526Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:20 2025-06-25T14:59:02.500560Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:20 tabletId 72075186233409565 2025-06-25T14:59:02.501123Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:29 2025-06-25T14:59:02.501152Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:29 tabletId 72075186233409574 2025-06-25T14:59:02.503314Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:33 2025-06-25T14:59:02.503348Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:33 tabletId 72075186233409578 2025-06-25T14:59:02.503509Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-06-25T14:59:02.503533Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:2 tabletId 72075186233409547 2025-06-25T14:59:02.503943Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:37 2025-06-25T14:59:02.503972Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:37 tabletId 72075186233409582 2025-06-25T14:59:02.504042Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:42 2025-06-25T14:59:02.504054Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:42 tabletId 72075186233409587 2025-06-25T14:59:02.504916Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:6 2025-06-25T14:59:02.504943Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:6 tabletId 72075186233409551 2025-06-25T14:59:02.505010Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:11 2025-06-25T14:59:02.505047Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:11 tabletId 72075186233409556 2025-06-25T14:59:02.505118Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:15 2025-06-25T14:59:02.505137Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:15 tabletId 72075186233409560 2025-06-25T14:59:02.505200Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:19 2025-06-25T14:59:02.505211Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:19 tabletId 72075186233409564 2025-06-25T14:59:02.505857Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:24 2025-06-25T14:59:02.505878Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:24 tabletId 72075186233409569 2025-06-25T14:59:02.505929Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:23 2025-06-25T14:59:02.505941Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:23 tabletId 72075186233409568 2025-06-25T14:59:02.505963Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:28 2025-06-25T14:59:02.505976Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:28 tabletId 72075186233409573 2025-06-25T14:59:02.506040Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:32 2025-06-25T14:59:02.506079Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:32 tabletId 72075186233409577 2025-06-25T14:59:02.506124Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-06-25T14:59:02.506135Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:1 tabletId 72075186233409546 2025-06-25T14:59:02.506182Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:36 2025-06-25T14:59:02.506204Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:36 tabletId 72075186233409581 2025-06-25T14:59:02.506351Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 1 candidates, at schemeshard: 72057594046678944 2025-06-25T14:59:02.506419Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-25T14:59:02.506447Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-25T14:59:02.506509Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:59:02.508442Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-25T14:59:02.509743Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 103, wait until txId: 103 TestWaitNotification wait txId: 103 2025-06-25T14:59:02.509992Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 103: send EvNotifyTxCompletion 2025-06-25T14:59:02.510049Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 103 2025-06-25T14:59:02.510482Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 103, at schemeshard: 72057594046678944 2025-06-25T14:59:02.510595Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-06-25T14:59:02.510638Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [1:2073:3671] TestWaitNotification: OK eventTxId 103 2025-06-25T14:59:02.511154Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0/Solomon" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:59:02.511341Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0/Solomon" took 237us result status StatusPathDoesNotExist 2025-06-25T14:59:02.511499Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0/Solomon\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/USER_0/Solomon" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-25T14:59:02.512091Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:59:02.512346Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 211us result status StatusPathDoesNotExist 2025-06-25T14:59:02.512473Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/USER_0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/unittest >> TPQTest::PQ_Tablet_Does_Not_Remove_The_Blob_Until_The_Reading_Is_Complete [GOOD] Test command err: 2025-06-25T14:58:55.177487Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3114: [PQ: 72057594037927937] Handle TEvInterconnect::TEvNodeInfo 2025-06-25T14:58:55.193401Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3146: [PQ: 72057594037927937] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-25T14:58:55.193773Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:752: [PQ: 72057594037927937] doesn't have tx info 2025-06-25T14:58:55.193836Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:764: [PQ: 72057594037927937] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-25T14:58:55.193875Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:985: [PQ: 72057594037927937] no config, start with empty partitions and default config 2025-06-25T14:58:55.193923Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4949: [PQ: 72057594037927937] Txs.size=0, PlannedTxs.size=0 2025-06-25T14:58:55.193978Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:58:55.194062Z node 1 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-25T14:58:55.215628Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037927937] server connected, pipe [1:180:2193], now have 1 active actors on pipe 2025-06-25T14:58:55.215756Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:1470: [PQ: 72057594037927937] Handle TEvPersQueue::TEvUpdateConfig 2025-06-25T14:58:55.238132Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:1656: [PQ: 72057594037927937] Config update version 1(current 0) received from actor [1:179:2192] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "topic" Version: 1 LocalDC: true Topic: "topic" TopicPath: "/topic" YcCloudId: "somecloud" YcFolderId: "somefolder" YdbDatabaseId: "PQ" YdbDatabasePath: "/Root/PQ" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 1 FederationAccount: "federationAccount" MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 1 Important: false } 2025-06-25T14:58:55.241412Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:591: [PQ: 72057594037927937] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "topic" Version: 1 LocalDC: true Topic: "topic" TopicPath: "/topic" YcCloudId: "somecloud" YcFolderId: "somefolder" YdbDatabaseId: "PQ" YdbDatabasePath: "/Root/PQ" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 1 FederationAccount: "federationAccount" MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 1 Important: false } 2025-06-25T14:58:55.241578Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:58:55.243539Z node 1 :PERSQUEUE INFO: pq_impl.cpp:1497: [PQ: 72057594037927937] Config applied version 1 actor [1:179:2192] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "topic" Version: 1 LocalDC: true Topic: "topic" TopicPath: "/topic" YcCloudId: "somecloud" YcFolderId: "somefolder" YdbDatabaseId: "PQ" YdbDatabasePath: "/Root/PQ" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 1 FederationAccount: "federationAccount" MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 1 Important: false } 2025-06-25T14:58:55.243717Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:0:Initializer] Start initializing step TInitConfigStep 2025-06-25T14:58:55.243869Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:1:Initializer] Start initializing step TInitConfigStep 2025-06-25T14:58:55.244504Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:0:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-25T14:58:55.244894Z node 1 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [1:188:2199] 2025-06-25T14:58:55.245948Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:55: [topic:0:Initializer] Initializing completed. 2025-06-25T14:58:55.246022Z node 1 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'topic' partition 0 generation 2 [1:188:2199] 2025-06-25T14:58:55.246103Z node 1 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037927937, Partition: 0, State: StateInit] SYNC INIT topic topic partitition 0 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-25T14:58:55.246711Z node 1 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Process pending events. Count 0 2025-06-25T14:58:55.246839Z node 1 :PERSQUEUE DEBUG: partition.cpp:3232: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'topic' partition 0 user user reinit request with generation 1 2025-06-25T14:58:55.246897Z node 1 :PERSQUEUE DEBUG: partition.cpp:3302: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'topic' partition 0 user user reinit with generation 1 done 2025-06-25T14:58:55.247147Z node 1 :PERSQUEUE DEBUG: partition_read.cpp:882: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'topic' partition 0 user user readTimeStamp for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 0 2025-06-25T14:58:55.247372Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:1:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-25T14:58:55.247683Z node 1 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [1:190:2201] 2025-06-25T14:58:55.248610Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:55: [topic:1:Initializer] Initializing completed. 2025-06-25T14:58:55.248664Z node 1 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'topic' partition 1 generation 2 [1:190:2201] 2025-06-25T14:58:55.248715Z node 1 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037927937, Partition: 1, State: StateInit] SYNC INIT topic topic partitition 1 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-25T14:58:55.249078Z node 1 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037927937, Partition: 1, State: StateIdle] Process pending events. Count 0 2025-06-25T14:58:55.249153Z node 1 :PERSQUEUE DEBUG: partition.cpp:3232: [PQ: 72057594037927937, Partition: 1, State: StateIdle] Topic 'topic' partition 1 user user reinit request with generation 1 2025-06-25T14:58:55.249195Z node 1 :PERSQUEUE DEBUG: partition.cpp:3302: [PQ: 72057594037927937, Partition: 1, State: StateIdle] Topic 'topic' partition 1 user user reinit with generation 1 done 2025-06-25T14:58:55.249354Z node 1 :PERSQUEUE DEBUG: partition_read.cpp:882: [PQ: 72057594037927937, Partition: 1, State: StateIdle] Topic 'topic' partition 1 user user readTimeStamp for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 0 2025-06-25T14:58:55.249460Z node 1 :PERSQUEUE DEBUG: read.h:272: CacheProxy. Passthrough write request to KV 2025-06-25T14:58:55.249801Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-25T14:58:55.249925Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 1, State: StateIdle] no data for compaction 2025-06-25T14:58:55.249960Z node 1 :PERSQUEUE DEBUG: read.h:272: CacheProxy. Passthrough write request to KV 2025-06-25T14:58:55.265210Z node 1 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-06-25T14:58:55.265321Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-25T14:58:55.265925Z node 1 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72057594037927937, Partition: 1, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-06-25T14:58:55.265977Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 1, State: StateIdle] no data for compaction 2025-06-25T14:58:55.266295Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037927937] server connected, pipe [1:203:2210], now have 1 active actors on pipe 2025-06-25T14:58:55.267123Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037927937] server connected, pipe [1:206:2212], now have 1 active actors on pipe 2025-06-25T14:58:55.268093Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3255: [PQ: 72057594037927937] Handle TEvPersQueue::TEvProposeTransaction SourceActor { RawX1: 179 RawX2: 4294969488 } TxId: 67890 Config { TabletConfig { PartitionConfig { LifetimeSeconds: 86400 WriteSpeedInBytesPerSecond: 10485760 } PartitionIds: 0 PartitionIds: 1 TopicName: "rt3.dc1--account--topic" Version: 2 LocalDC: true TopicPath: "/Root/PQ/rt3.dc1--account--topic" YdbDatabasePath: "" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } FederationAccount: "account" MeteringMode: METERING_MODE_REQUEST_UNITS AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "client-1" Generation: 0 Important: false } Consumers { Name: "client-3" Generation: 7 Important: false } } BootstrapConfig { } } 2025-06-25T14:58:55.269270Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3745: [PQ: 72057594037927937] Propose TxId 67890, WriteId (empty maybe) 2025-06-25T14:58:55.269344Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4353: [PQ: 72057594037927937] Try execute txs with state UNKNOWN 2025-06-25T14:58:55.269424Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4398: [PQ: 72057594037927937] TxId 67890, State UNKNOWN 2025-06-25T14:58:55.269474Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3979: [PQ: 72057594037927937] schedule TEvProposeTransactionResult(PREPARED) 2025-06-25T14:58:55.269536Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4288: [PQ: 72057594037927937] TxId 67890, NewState PREPARING 2025-06-25T14:58:55.269612Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3866: [PQ: 72057594037927937] write key for TxId 67890 2025-06-25T14:58:55.269871Z node 1 :PERSQUEUE DEBUG: transaction.cpp:374: [TxId: 67890] save tx TxId: 67890 State: PREPARED MinStep: 230 MaxStep: 18446744073709551615 Kind: KIND_CONFIG TabletConfig { PartitionConfig { LifetimeSeconds: 86400 WriteSpeedInBytesPerSecond: 10485760 } PartitionIds: 0 PartitionIds: 1 TopicName: "rt3.dc1--account--topic" Version: 2 LocalDC: true TopicPath: "/Root/PQ/rt3.dc1--account--topic" YdbDatabasePath: "" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 2 ReadRuleGenerations: 2 FederationAccount: "account" MeteringMode: METERING_MODE_REQUEST_UNITS AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "client-1" Generation: 2 Important: false } Consumers { Name: "client-3" Generation: 2 Important: false } } BootstrapConfig { } SourceActor { RawX1: 179 ... unt 1 count 0 size 512005 from pos 0 cbcount 1 2025-06-25T14:59:02.486161Z node 10 :PERSQUEUE DEBUG: partition_read.cpp:476: FormAnswer processing batch offset 3 totakecount 1 count 0 size 512005 from pos 0 cbcount 1 2025-06-25T14:59:02.487163Z node 10 :PERSQUEUE DEBUG: partition_read.cpp:476: FormAnswer processing batch offset 3 totakecount 1 count 0 size 512005 from pos 0 cbcount 1 2025-06-25T14:59:02.488174Z node 10 :PERSQUEUE DEBUG: partition_read.cpp:476: FormAnswer processing batch offset 3 totakecount 1 count 0 size 512005 from pos 0 cbcount 1 2025-06-25T14:59:02.489460Z node 10 :PERSQUEUE DEBUG: partition_read.cpp:476: FormAnswer processing batch offset 3 totakecount 1 count 0 size 512005 from pos 0 cbcount 1 2025-06-25T14:59:02.489924Z node 10 :PERSQUEUE DEBUG: partition_read.cpp:476: FormAnswer processing batch offset 3 totakecount 1 count 1 size 172682 from pos 0 cbcount 1 2025-06-25T14:59:02.492490Z node 10 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'topic' partition: 0 messageNo: 0 requestId: cookie: 123 2025-06-25T14:59:02.526105Z node 10 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037927937] server connected, pipe [10:628:2574], now have 1 active actors on pipe 2025-06-25T14:59:02.526234Z node 10 :PERSQUEUE DEBUG: pq_impl.cpp:348: Handle TEvRequest topic: 'topic' requestId: 2025-06-25T14:59:02.526281Z node 10 :PERSQUEUE DEBUG: pq_impl.cpp:2812: [PQ: 72057594037927937] got client message batch for topic 'topic' partition 0 2025-06-25T14:59:02.526334Z node 10 :PERSQUEUE DEBUG: pq_impl.cpp:2209: [PQ: 72057594037927937] got client message topic: topic partition: 0 SourceId: 'sourceid1' SeqNo: 15 partNo : 0 messageNo: 1 size 102400 offset: 14 2025-06-25T14:59:02.526405Z node 10 :PERSQUEUE DEBUG: event_helpers.cpp:40: tablet 72057594037927937 topic 'topic' partition 0 error: new GetOwnership request needed for owner 2025-06-25T14:59:02.526554Z node 10 :PERSQUEUE DEBUG: pq_impl.cpp:1434: [PQ: 72057594037927937] Handle TEvPQ::TEvError Cookie 45, Error new GetOwnership request needed for owner 2025-06-25T14:59:02.526592Z node 10 :PERSQUEUE DEBUG: pq_impl.cpp:402: Answer error topic: 'topic' partition: 0 messageNo: 1 requestId: error: new GetOwnership request needed for owner 2025-06-25T14:59:02.526645Z node 10 :PERSQUEUE DEBUG: read.h:272: CacheProxy. Passthrough write request to KV 2025-06-25T14:59:02.526714Z node 10 :PERSQUEUE DEBUG: read.h:348: CacheProxy. Delete blobs from d0000000000_00000000000000000002_00000_0000000001_00014(+) to d0000000000_00000000000000000002_00000_0000000001_00014(+) 2025-06-25T14:59:02.526742Z node 10 :PERSQUEUE DEBUG: read.h:348: CacheProxy. Delete blobs from d0000000000_00000000000000000003_00000_0000000001_00014(+) to d0000000000_00000000000000000003_00000_0000000001_00014(+) 2025-06-25T14:59:02.529225Z node 10 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-06-25T14:59:02.529305Z node 10 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-25T14:59:02.537796Z node 10 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037927937] server connected, pipe [10:648:2593], now have 1 active actors on pipe 2025-06-25T14:59:02.537964Z node 10 :PERSQUEUE DEBUG: pq_impl.cpp:348: Handle TEvRequest topic: 'topic' requestId: 2025-06-25T14:59:02.538033Z node 10 :PERSQUEUE DEBUG: pq_impl.cpp:2812: [PQ: 72057594037927937] got client message batch for topic 'topic' partition 0 2025-06-25T14:59:02.538189Z node 10 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|5eaf9dd6-4ebe9489-fa53b2a6-4e1683fc_14 generated for partition 0 topic 'topic' owner default 2025-06-25T14:59:02.538319Z node 10 :PERSQUEUE DEBUG: partition_write.cpp:34: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::ReplyOwnerOk. Partition: 0 2025-06-25T14:59:02.538583Z node 10 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'topic' partition: 0 messageNo: 0 requestId: cookie: 0 2025-06-25T14:59:02.539088Z node 10 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037927937] server connected, pipe [10:650:2595], now have 1 active actors on pipe 2025-06-25T14:59:02.539153Z node 10 :PERSQUEUE DEBUG: pq_impl.cpp:348: Handle TEvRequest topic: 'topic' requestId: 2025-06-25T14:59:02.539190Z node 10 :PERSQUEUE DEBUG: pq_impl.cpp:2812: [PQ: 72057594037927937] got client message batch for topic 'topic' partition 0 2025-06-25T14:59:02.539245Z node 10 :PERSQUEUE DEBUG: pq_impl.cpp:2209: [PQ: 72057594037927937] got client message topic: topic partition: 0 SourceId: 'sourceid1' SeqNo: 15 partNo : 0 messageNo: 0 size 102400 offset: 14 2025-06-25T14:59:02.539362Z node 10 :PERSQUEUE DEBUG: partition_write.cpp:1843: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Send write quota request. Topic: "topic". Partition: 0. Amount: 102409. Cookie: 15 2025-06-25T14:59:02.539476Z node 10 :PERSQUEUE DEBUG: partition.cpp:3720: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Got quota. Topic: "topic". Partition: 0: Cookie: 15 2025-06-25T14:59:02.539646Z node 10 :PERSQUEUE DEBUG: partition_write.cpp:1364: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'topic' partition 0 part blob processing sourceId 'sourceid1' seqNo 15 partNo 0 2025-06-25T14:59:02.540678Z node 10 :PERSQUEUE DEBUG: partition_write.cpp:1468: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'topic' partition 0 part blob complete sourceId 'sourceid1' seqNo 15 partNo 0 FormedBlobsCount 0 NewHead: Offset 14 PartNo 0 PackedSize 102472 count 1 nextOffset 15 batches 1 2025-06-25T14:59:02.541600Z node 10 :PERSQUEUE DEBUG: partition_write.cpp:1762: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Add new write blob: topic 'topic' partition 0 compactOffset 14,1 HeadOffset 14 endOffset 14 curOffset 15 d0000000000_00000000000000000014_00000_0000000001_00000? size 102462 WTime 1199 2025-06-25T14:59:02.541930Z node 10 :PERSQUEUE DEBUG: read.h:272: CacheProxy. Passthrough write request to KV 2025-06-25T14:59:02.542064Z node 10 :PERSQUEUE DEBUG: read.h:310: CacheProxy. Passthrough blob. Partition 0 offset 14 partNo 0 count 1 size 102462 2025-06-25T14:59:02.546240Z node 10 :PERSQUEUE DEBUG: cache_eviction.h:319: Caching head blob in L1. Partition 0 offset 14 count 1 size 102462 actorID [10:138:2162] 2025-06-25T14:59:02.546383Z node 10 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 102409 WriteNewSizeFromSupportivePartitions# 0 2025-06-25T14:59:02.546447Z node 10 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-25T14:59:02.546538Z node 10 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Answering for message sourceid: 'sourceid1', Topic: 'topic', Partition: 0, SeqNo: 15, partNo: 0, Offset: 14 is stored on disk 2025-06-25T14:59:02.546773Z node 10 :PERSQUEUE DEBUG: partition_compaction.cpp:179: [PQ: 72057594037927937, Partition: 0, State: StateIdle] need run compaction for 102462 bytes in 1 blobs 2025-06-25T14:59:02.546906Z node 10 :PERSQUEUE DEBUG: pq_l2_cache.cpp:120: PQ Cache (L2). Adding blob. Tablet '72057594037927937' partition 0 offset 14 partno 0 count 1 parts 0 suffix '63' size 102462 2025-06-25T14:59:02.546998Z node 10 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'topic' partition: 0 messageNo: 0 requestId: cookie: 0 2025-06-25T14:59:02.547092Z node 10 :PERSQUEUE DEBUG: partition_compaction.cpp:191: [PQ: 72057594037927937, Partition: 0, State: StateIdle] begin compaction for 102462 bytes in 1 blobs 2025-06-25T14:59:02.547209Z node 10 :PERSQUEUE DEBUG: partition_compaction.cpp:230: [PQ: 72057594037927937, Partition: 0, State: StateIdle] request key d0000000000_00000000000000000014_00000_0000000001_00000?, size 102462 2025-06-25T14:59:02.547270Z node 10 :PERSQUEUE DEBUG: partition_compaction.cpp:238: [PQ: 72057594037927937, Partition: 0, State: StateIdle] request 1 blobs for compaction 2025-06-25T14:59:02.547354Z node 10 :PERSQUEUE DEBUG: cache_eviction.h:492: Got data from cache. Partition 0 offset 14 partno 0 count 1 parts_count 0 source 1 size 102462 accessed 0 times before, last time 1970-01-01T00:00:01.000000Z 2025-06-25T14:59:02.547409Z node 10 :PERSQUEUE DEBUG: read.h:121: Reading cookie 0. All 1 blobs are from cache. 2025-06-25T14:59:02.547485Z node 10 :PERSQUEUE DEBUG: partition_compaction.cpp:245: [PQ: 72057594037927937, Partition: 0, State: StateIdle] continue compaction 2025-06-25T14:59:02.547765Z node 10 :PERSQUEUE DEBUG: partition_compaction.cpp:57: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'topic' partition 0 part blob processing sourceId 'sourceid1' seqNo 15 partNo 0 2025-06-25T14:59:02.548726Z node 10 :PERSQUEUE DEBUG: partition_compaction.cpp:135: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'topic' partition 0 part blob complete sourceId 'sourceid1' seqNo 15 partNo 0 FormedBlobsCount 0 NewHead: Offset 14 PartNo 0 PackedSize 102472 count 1 nextOffset 15 batches 1 2025-06-25T14:59:02.549532Z node 10 :PERSQUEUE DEBUG: partition_compaction.cpp:401: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Add new write blob: topic 'topic' partition 0 compactOffset 14,1 HeadOffset 14 endOffset 14 curOffset 15 d0000000000_00000000000000000014_00000_0000000001_00000| size 102462 WTime 1201 2025-06-25T14:59:02.549858Z node 10 :PERSQUEUE DEBUG: pq_l2_cache.cpp:192: PQ Cache (L2). Touched. Tablet '72057594037927937' partition 0 offset 14 partno 0 count 1 parts 0 suffix '63' 2025-06-25T14:59:02.549941Z node 10 :PERSQUEUE DEBUG: read.h:272: CacheProxy. Passthrough write request to KV 2025-06-25T14:59:02.550023Z node 10 :PERSQUEUE DEBUG: read.h:310: CacheProxy. Passthrough blob. Partition 0 offset 14 partNo 0 count 1 size 102462 2025-06-25T14:59:02.553864Z node 10 :PERSQUEUE DEBUG: cache_eviction.h:319: Caching head blob in L1. Partition 0 offset 14 count 1 size 102462 actorID [10:138:2162] 2025-06-25T14:59:02.553972Z node 10 :PERSQUEUE DEBUG: partition_compaction.cpp:323: [PQ: 72057594037927937, Partition: 0, State: StateIdle] compaction completed 2025-06-25T14:59:02.554345Z node 10 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-25T14:59:02.554459Z node 10 :PERSQUEUE DEBUG: pq_l2_cache.cpp:120: PQ Cache (L2). Adding blob. Tablet '72057594037927937' partition 0 offset 14 partno 0 count 1 parts 0 suffix '124' size 102462 2025-06-25T14:59:02.554513Z node 10 :PERSQUEUE DEBUG: read.h:272: CacheProxy. Passthrough write request to KV 2025-06-25T14:59:02.554559Z node 10 :PERSQUEUE DEBUG: read.h:348: CacheProxy. Delete blobs from d0000000000_00000000000000000014_00000_0000000001_00000?(+) to d0000000000_00000000000000000014_00000_0000000001_00000?(+) 2025-06-25T14:59:02.556826Z node 10 :PERSQUEUE DEBUG: cache_eviction.h:369: Deleting head blob in L1. Partition 0 offset 14 count 1 actorID [10:138:2162] 2025-06-25T14:59:02.556977Z node 10 :PERSQUEUE DEBUG: pq_l2_cache.cpp:146: PQ Cache (L2). Removed. Tablet '72057594037927937' partition 0 offset 14 partno 0 count 1 parts 0 suffix '63' size 102462 2025-06-25T14:59:02.557111Z node 10 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-06-25T14:59:02.557175Z node 10 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-25T14:59:02.565893Z node 10 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037927937] server connected, pipe [10:673:2615], now have 1 active actors on pipe ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest >> TAsyncIndexTests::CdcAndMergeWithReboots[TabletReboots] [GOOD] Test command err: =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:115:2144] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:115:2144] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:130:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:133:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [1:136:2157] sender: [1:138:2058] recipient: [1:115:2144] 2025-06-25T14:51:14.799642Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:51:14.799742Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:14.799789Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:51:14.799826Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:51:14.799877Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:51:14.799905Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:51:14.799956Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:14.800035Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:51:14.800785Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:51:14.801110Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:51:14.875853Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7732: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-25T14:51:14.875918Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:51:14.876767Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:178:2058] recipient: [1:15:2062] 2025-06-25T14:51:14.899086Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:51:14.902792Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:51:14.902942Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:51:14.925420Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:51:14.925646Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:51:14.926331Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:14.926594Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:51:14.929196Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:14.929381Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:51:14.930433Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:51:14.930517Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:14.930626Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:51:14.930669Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:51:14.930708Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:51:14.930827Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:217:2058] recipient: [1:215:2214] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:217:2058] recipient: [1:215:2214] Leader for TabletID 72057594037968897 is [1:221:2218] sender: [1:222:2058] recipient: [1:215:2214] 2025-06-25T14:51:14.937614Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:242:2058] recipient: [1:15:2062] 2025-06-25T14:51:15.060325Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:51:15.060503Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:15.060660Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:51:15.060699Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:51:15.060933Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:51:15.061006Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:51:15.063385Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:15.063600Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:51:15.063815Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:15.063888Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:51:15.063931Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:51:15.063966Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:51:15.065790Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:15.065847Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:51:15.065884Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:51:15.067291Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:15.067333Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:15.067389Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:15.067425Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:51:15.069893Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:51:15.071354Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:51:15.071533Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:136:2157] sender: [1:257:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:51:15.072493Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:15.072632Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 Media ... ionId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } } } TableSchemaVersion: 2 IsBackup: false CdcStreams { Name: "Stream" Mode: ECdcStreamModeKeysOnly PathId { OwnerId: 72057594046678944 LocalId: 6 } State: ECdcStreamStateReady SchemaVersion: 1 Format: ECdcStreamFormatProto VirtualTimestamps: false AwsRegion: "" ResolvedTimestampsIntervalMs: 0 SchemaChanges: false } IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409551 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 6 PathsLimit: 10000 ShardsInside: 5 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 2 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:01.340462Z node 164 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:78: [TableChangeSenderShard][72075186233409551:2][72075186233409546][164:1166:2945] Handshake NKikimrChangeExchange.TEvStatus Status: STATUS_OK LastRecordOrder: 0 2025-06-25T14:59:01.340592Z node 164 :CHANGE_EXCHANGE DEBUG: change_sender_async_index.cpp:239: [AsyncIndexChangeSenderMain][72075186233409551:2][164:1136:2945] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186233409546 } 2025-06-25T14:59:01.340754Z node 164 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:123: [TableChangeSenderShard][72075186233409551:2][72075186233409546][164:1166:2945] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 1 Group: 1750863541285715 Step: 5000004 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046678944, LocalPathId: 4] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046678944, LocalPathId: 3] SchemaVersion: 2 LockId: 0 LockOffset: 0 },{ Order: 3 Group: 1750863541285715 Step: 5000004 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046678944, LocalPathId: 4] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046678944, LocalPathId: 3] SchemaVersion: 2 LockId: 0 LockOffset: 0 },{ Order: 5 Group: 1750863541285715 Step: 5000004 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046678944, LocalPathId: 4] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046678944, LocalPathId: 3] SchemaVersion: 2 LockId: 0 LockOffset: 0 }] } 2025-06-25T14:59:01.347007Z node 164 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:200: [TableChangeSenderShard][72075186233409551:2][72075186233409546][164:1166:2945] Handle NKikimrChangeExchange.TEvStatus Status: STATUS_OK RecordStatuses { Order: 1 Status: STATUS_OK Reason: REASON_NONE } RecordStatuses { Order: 3 Status: STATUS_OK Reason: REASON_NONE } RecordStatuses { Order: 5 Status: STATUS_OK Reason: REASON_NONE } LastRecordOrder: 5 2025-06-25T14:59:01.347123Z node 164 :CHANGE_EXCHANGE DEBUG: change_sender_async_index.cpp:239: [AsyncIndexChangeSenderMain][72075186233409551:2][164:1136:2945] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186233409546 } 2025-06-25T14:59:01.631130Z node 164 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/UserDefinedIndex/indexImplTable" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-25T14:59:01.631481Z node 164 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/UserDefinedIndex/indexImplTable" took 386us result status StatusSuccess 2025-06-25T14:59:01.632477Z node 164 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table/UserDefinedIndex/indexImplTable" PathDescription { Self { Name: "indexImplTable" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1002 CreateStep: 5000003 ParentPathId: 4 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeAsyncIndexImplTable Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "indexImplTable" Columns { Name: "indexed" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "indexed" KeyColumnNames: "key" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409546 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 6 PathsLimit: 10000 ShardsInside: 5 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 2 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TSchemeShardSubDomainTest::SchemeLimitsCreatePq >> TSchemeShardSubDomainTest::CopyRejects [GOOD] >> TSchemeShardSubDomainTest::ConsistentCopyRejects >> KqpPrefixedVectorIndexes::OrderByCosineDistanceNotNullableLevel4 [GOOD] >> TSchemeShardSubDomainTest::CreateAndWait [GOOD] >> TPartitionTests::ShadowPartitionCounters [GOOD] >> TSchemeShardSubDomainTest::CreateSubDomainWithoutTabletsThenForceDrop >> TLocksTest::BrokenDupLock [GOOD] >> TSchemeShardSubDomainTest::SimultaneousDeclareAndCreateTable >> TSchemeShardSubDomainTest::Create >> TSchemeShardSubDomainTest::SchemeDatabaseQuotaRejects >> TSchemeShardSubDomainTest::DeclareDefineAndDelete >> TSchemeShardSubDomainTest::LS >> TPartitionTests::ShadowPartitionCountersFirstClass >> TSchemeShardSubDomainTest::SimultaneousCreateTableForceDrop [GOOD] >> TStoragePoolsQuotasTest::QuoteNonexistentPool-IsExternalSubdomain-false >> TSchemeShardSubDomainTest::SimultaneousCreateForceDrop >> TSchemeShardSubDomainTest::SchemeQuotas >> TSchemeShardSubDomainTest::SchemeLimitsRejects >> TSchemeShardSubDomainTest::CreateSubDomainWithoutTabletsThenDrop >> TSchemeShardSubDomainTest::DeclareAndForbidTableInside >> TSchemeShardSubDomainTest::CreateSubDomainWithoutSomeTablets >> TPartitionTests::TestNonConflictingActsBatchOk [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::CreateAndWait [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:59:03.248407Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:59:03.248529Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:03.248573Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:59:03.248615Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:59:03.248664Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:59:03.248697Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:59:03.248779Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:03.248876Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:59:03.249645Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:59:03.250018Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:59:03.332227Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:59:03.332294Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:59:03.350563Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:59:03.351015Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:59:03.351210Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:59:03.357672Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:59:03.358016Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:59:03.358540Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:03.358781Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:59:03.361623Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:03.361818Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:59:03.363032Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:03.363079Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:03.363221Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:59:03.363270Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:59:03.363314Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:59:03.363398Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:59:03.373301Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:59:03.517744Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:59:03.518009Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:03.518214Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:59:03.518271Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:59:03.518507Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:59:03.518576Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:59:03.520830Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:03.521039Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:59:03.521231Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:03.521282Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:59:03.521339Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:59:03.521391Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:59:03.523273Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:03.523326Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:59:03.523365Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:59:03.524898Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:03.524951Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:03.525010Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:03.525056Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:59:03.528756Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:59:03.530427Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:59:03.530600Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:59:03.531431Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:03.531558Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:03.531634Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:03.531886Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:59:03.531934Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:03.532100Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:59:03.532178Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:59:03.534176Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:03.534228Z node 1 :FLAT_TX_SCHEMESHARD ... operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:59:03.601528Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 101:0 ProgressState 2025-06-25T14:59:03.601600Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#101:0 progress is 1/1 2025-06-25T14:59:03.601620Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-25T14:59:03.601643Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#101:0 progress is 1/1 2025-06-25T14:59:03.601661Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-25T14:59:03.601689Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 101, ready parts: 1/1, is published: false 2025-06-25T14:59:03.601713Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-25T14:59:03.601736Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 101:0 2025-06-25T14:59:03.601754Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 101:0 2025-06-25T14:59:03.601796Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-25T14:59:03.601816Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 101, publications: 2, subscribers: 0 2025-06-25T14:59:03.601850Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 2], 5 2025-06-25T14:59:03.601874Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 3], 3 2025-06-25T14:59:03.602427Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 5 PathOwnerId: 72057594046678944, cookie: 101 2025-06-25T14:59:03.602488Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 5 PathOwnerId: 72057594046678944, cookie: 101 2025-06-25T14:59:03.602512Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 101 2025-06-25T14:59:03.602533Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 5 2025-06-25T14:59:03.602566Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-25T14:59:03.602984Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 3 PathOwnerId: 72057594046678944, cookie: 101 2025-06-25T14:59:03.603031Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 3 PathOwnerId: 72057594046678944, cookie: 101 2025-06-25T14:59:03.603051Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 101 2025-06-25T14:59:03.603070Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 3 2025-06-25T14:59:03.603089Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-25T14:59:03.603131Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 101, subscribers: 0 2025-06-25T14:59:03.605087Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-25T14:59:03.605768Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 TestModificationResult got TxId: 100, wait until txId: 101 TestModificationResults wait txId: 101 TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 100 2025-06-25T14:59:03.605971Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 100: send EvNotifyTxCompletion 2025-06-25T14:59:03.606012Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 100 TestWaitNotification wait txId: 101 2025-06-25T14:59:03.606103Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2025-06-25T14:59:03.606121Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 2025-06-25T14:59:03.606416Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 100, at schemeshard: 72057594046678944 2025-06-25T14:59:03.606527Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 100: got EvNotifyTxCompletionResult 2025-06-25T14:59:03.606561Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 100: satisfy waiter [1:336:2325] 2025-06-25T14:59:03.606752Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-06-25T14:59:03.606786Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-25T14:59:03.606801Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:336:2325] TestWaitNotification: OK eventTxId 100 TestWaitNotification: OK eventTxId 101 2025-06-25T14:59:03.607108Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/dir/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:59:03.607274Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/dir/USER_0" took 184us result status StatusSuccess 2025-06-25T14:59:03.607689Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/dir/USER_0" PathDescription { Self { Name: "USER_0" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 101 CreateStep: 5000003 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 0 TimeCastBucketsPerMediator: 0 } DomainKey { SchemeShard: 72057594046678944 PathId: 3 } StoragePools { Name: "/dc-1/users/tenant-1:hdd" Kind: "hdd" } StoragePools { Name: "/dc-1/users/tenant-1:hdd-1" Kind: "hdd-1" } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 3 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:03.608122Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/dir" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:59:03.608252Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/dir" took 132us result status StatusSuccess 2025-06-25T14:59:03.608527Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/dir" PathDescription { Self { Name: "dir" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 4 } ChildrenExist: true } Children { Name: "USER_0" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 101 CreateStep: 5000003 ParentPathId: 2 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TSchemeShardSubDomainTest::CreateWithoutPlanResolution >> TSchemeShardSubDomainTest::ConcurrentCreateSubDomainAndDescribe >> TPartitionTests::TestBatchingWithChangeConfig >> TPQTest::TestDirectReadHappyWay [GOOD] >> TPQTest::TestDescribeBalancer >> TSchemeShardSubDomainTest::ConsistentCopyRejects [GOOD] >> TSchemeShardSubDomainTest::SimultaneousCreateTenantTableForceDrop >> TSchemeShardSubDomainTest::CreateSubDomainWithoutTablets >> TStoragePoolsQuotasTest::DisableWritesToDatabase-IsExternalSubdomain-true ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::SimultaneousCreateTableForceDrop [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:59:03.184708Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:59:03.184817Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:03.184875Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:59:03.184936Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:59:03.184981Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:59:03.185014Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:59:03.185077Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:03.185166Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:59:03.185987Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:59:03.186428Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:59:03.272797Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:59:03.272871Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:59:03.290895Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:59:03.291311Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:59:03.291495Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:59:03.297702Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:59:03.297992Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:59:03.298599Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:03.298859Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:59:03.307581Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:03.307784Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:59:03.308916Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:03.308982Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:03.309143Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:59:03.309185Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:59:03.309221Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:59:03.309290Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:59:03.315181Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:59:03.448231Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:59:03.448515Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:03.448742Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:59:03.448792Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:59:03.449043Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:59:03.449125Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:59:03.451662Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:03.451872Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:59:03.452076Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:03.452129Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:59:03.452199Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:59:03.452244Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:59:03.456510Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:03.456584Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:59:03.456626Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:59:03.458703Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:03.458767Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:03.458836Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:03.458887Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:59:03.462684Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:59:03.464859Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:59:03.465052Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:59:03.466156Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:03.466322Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:03.466379Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:03.466683Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:59:03.466746Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:03.466942Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:59:03.467027Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:59:03.469435Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:03.469487Z node 1 :FLAT_TX_SCHEMESHARD ... ssage: Status: OK Origin: 72057594037968897 TxId_Deprecated: 4 ShardOwnerId: 72057594046678944 ShardLocalIdx: 4, at schemeshard: 72057594046678944 2025-06-25T14:59:03.862432Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 Forgetting tablet 72075186233409547 2025-06-25T14:59:03.862892Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 Forgetting tablet 72075186233409549 2025-06-25T14:59:03.866170Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-25T14:59:03.866238Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-25T14:59:03.866377Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-25T14:59:03.867431Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:5 2025-06-25T14:59:03.867482Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:5 tabletId 72075186233409550 2025-06-25T14:59:03.867595Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:7 2025-06-25T14:59:03.867620Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:7 tabletId 72075186233409552 2025-06-25T14:59:03.867778Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5823: Failed to connect, to tablet: 72075186233409552, at schemeshard: 72057594046678944 2025-06-25T14:59:03.870876Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-06-25T14:59:03.870918Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:1 tabletId 72075186233409546 2025-06-25T14:59:03.871008Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 1 candidates, at schemeshard: 72057594046678944 2025-06-25T14:59:03.871116Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-25T14:59:03.871159Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-25T14:59:03.871245Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:59:03.871425Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:3 2025-06-25T14:59:03.871454Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:3 tabletId 72075186233409548 2025-06-25T14:59:03.871872Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:6 2025-06-25T14:59:03.871901Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:6 tabletId 72075186233409551 2025-06-25T14:59:03.871960Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-06-25T14:59:03.871982Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:2 tabletId 72075186233409547 2025-06-25T14:59:03.872201Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:4 2025-06-25T14:59:03.872265Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:4 tabletId 72075186233409549 2025-06-25T14:59:03.873268Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-25T14:59:03.874545Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 101 2025-06-25T14:59:03.874745Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2025-06-25T14:59:03.874783Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 TestWaitNotification wait txId: 102 2025-06-25T14:59:03.874851Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-06-25T14:59:03.874869Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 2025-06-25T14:59:03.875315Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-06-25T14:59:03.875431Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-06-25T14:59:03.875474Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-25T14:59:03.875506Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:735:2621] 2025-06-25T14:59:03.875562Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-25T14:59:03.875599Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:735:2621] TestWaitNotification: OK eventTxId 101 TestWaitNotification: OK eventTxId 102 2025-06-25T14:59:03.876103Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:59:03.876296Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 180us result status StatusPathDoesNotExist 2025-06-25T14:59:03.876482Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/USER_0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-25T14:59:03.876891Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0/table_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:59:03.877081Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0/table_0" took 200us result status StatusPathDoesNotExist 2025-06-25T14:59:03.877220Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0/table_0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/USER_0/table_0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-25T14:59:03.877659Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:59:03.877816Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 174us result status StatusSuccess 2025-06-25T14:59:03.878190Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TSchemeShardSubDomainTest::DeleteAdd >> TSchemeShardSubDomainTest::SimultaneousDeclareAndCreateTable [GOOD] >> TSchemeShardSubDomainTest::CreateSubDomainWithoutTabletsThenForceDrop [GOOD] >> TSchemeShardSubDomainTest::Create [GOOD] >> TSchemeShardSubDomainTest::CreateAlterNbsChannels ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/indexes/unittest >> KqpPrefixedVectorIndexes::OrderByCosineDistanceNotNullableLevel4 [GOOD] Test command err: Trying to start YDB, gRPC: 12932, MsgBus: 16694 2025-06-25T14:57:11.931631Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901177011028503:2220];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:57:11.932085Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001ba6/r3tmp/tmprtUjGx/pdisk_1.dat 2025-06-25T14:57:12.293278Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:57:12.293838Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519901177011028322:2080] 1750863431900470 != 1750863431900473 TServer::EnableGrpc on GrpcPort 12932, node 1 2025-06-25T14:57:12.352460Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:57:12.352565Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:57:12.374010Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:57:12.392012Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:57:12.392035Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:57:12.392042Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:57:12.392130Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16694 2025-06-25T14:57:12.926319Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:16694 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:57:13.190725Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:57:13.212303Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:57:13.230589Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:13.362518Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:13.526678Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:13.638983Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:57:15.290186Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901194190899154:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:15.290296Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:15.617461Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:15.649077Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:15.676115Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:15.708425Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:15.741795Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:15.811499Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:15.853565Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:57:15.943371Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901194190899822:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:15.943459Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:15.943648Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901194190899827:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:57:15.947305Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:57:15.968341Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519901194190899829:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:57:16.068540Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519901198485867178:3422] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:57:16.929305Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519901177011028503:2220];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:57:16.929354Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:57:17.058258Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877761, Sender [1:7519901202780834747:3596], Recipient [1:7519901181305995962:2154]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:57:17.058295Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:50 ... chemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 38 Memory: 119352 } ShardState: 2 UserTablePartOwners: 72075186224037909 NodeId: 3 StartTime: 1750863491464 TableOwnerId: 72057594046644480 FollowerId: 0 2025-06-25T14:59:01.651453Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4992: StateWork, processing event TEvDataShard::TEvPeriodicTableStats 2025-06-25T14:59:01.651468Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037909 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 5] state 'Ready' dataSize 0 rowCount 0 cpuUsage 0.0038 2025-06-25T14:59:01.651542Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:570: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037909 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 5] raw table stats: DataSize: 0 RowCount: 0 IndexSize: 0 InMemSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 0 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 2025-06-25T14:59:01.749151Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [3:7519901431697106291:2144]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-06-25T14:59:01.749195Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5131: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-06-25T14:59:01.749206Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046644480, queue size# 7 2025-06-25T14:59:01.749246Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:601: Will execute TTxStoreStats, queue# 7 2025-06-25T14:59:01.749258Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:610: Will delay TTxStoreTableStats on# 0.000212s, queue# 7 2025-06-25T14:59:01.749301Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 5 shard idx 72057594046644480:15 data size 0 row count 0 2025-06-25T14:59:01.749343Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037902 maps to shardIdx: 72057594046644480:15 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 5], pathId map=BatchUpload, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-25T14:59:01.749354Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037902, followerId 0 2025-06-25T14:59:01.749393Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:15 with partCount# 0, rowCount# 0, searchHeight# 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-25T14:59:01.749432Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037902 2025-06-25T14:59:01.749454Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 5 shard idx 72057594046644480:16 data size 0 row count 0 2025-06-25T14:59:01.749472Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037903 maps to shardIdx: 72057594046644480:16 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 5], pathId map=BatchUpload, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-25T14:59:01.749479Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037903, followerId 0 2025-06-25T14:59:01.749502Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:16 with partCount# 0, rowCount# 0, searchHeight# 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-25T14:59:01.749514Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037903 2025-06-25T14:59:01.749531Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 5 shard idx 72057594046644480:14 data size 0 row count 0 2025-06-25T14:59:01.749557Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037901 maps to shardIdx: 72057594046644480:14 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 5], pathId map=BatchUpload, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-25T14:59:01.749565Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037901, followerId 0 2025-06-25T14:59:01.749591Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:14 with partCount# 0, rowCount# 0, searchHeight# 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-25T14:59:01.749600Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037901 2025-06-25T14:59:01.749612Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 5 shard idx 72057594046644480:21 data size 0 row count 0 2025-06-25T14:59:01.749629Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037908 maps to shardIdx: 72057594046644480:21 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 5], pathId map=BatchUpload, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-25T14:59:01.749634Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037908, followerId 0 2025-06-25T14:59:01.749652Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:21 with partCount# 0, rowCount# 0, searchHeight# 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-25T14:59:01.749658Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037908 2025-06-25T14:59:01.749668Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 5 shard idx 72057594046644480:19 data size 0 row count 0 2025-06-25T14:59:01.749687Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037906 maps to shardIdx: 72057594046644480:19 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 5], pathId map=BatchUpload, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-25T14:59:01.749695Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037906, followerId 0 2025-06-25T14:59:01.749720Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:19 with partCount# 0, rowCount# 0, searchHeight# 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-25T14:59:01.749729Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037906 2025-06-25T14:59:01.749741Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 5 shard idx 72057594046644480:18 data size 0 row count 0 2025-06-25T14:59:01.749767Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037905 maps to shardIdx: 72057594046644480:18 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 5], pathId map=BatchUpload, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-25T14:59:01.749775Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037905, followerId 0 2025-06-25T14:59:01.749797Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:18 with partCount# 0, rowCount# 0, searchHeight# 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-25T14:59:01.749805Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037905 2025-06-25T14:59:01.749816Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 5 shard idx 72057594046644480:22 data size 0 row count 0 2025-06-25T14:59:01.749834Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037909 maps to shardIdx: 72057594046644480:22 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 5], pathId map=BatchUpload, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-25T14:59:01.749839Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037909, followerId 0 2025-06-25T14:59:01.749855Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:22 with partCount# 0, rowCount# 0, searchHeight# 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-25T14:59:01.749862Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037909 2025-06-25T14:59:01.749898Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T14:59:01.750146Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [3:7519901431697106291:2144]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-06-25T14:59:01.750174Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5131: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-06-25T14:59:01.750184Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046644480, queue size# 0 2025-06-25T14:59:02.316938Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:7519901431697106291:2144]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:59:02.316973Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:59:02.317007Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [3:7519901431697106291:2144], Recipient [3:7519901431697106291:2144]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:59:02.317020Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime >> TSchemeShardSubDomainTest::DeclareDefineAndDelete [GOOD] >> TSchemeShardSubDomainTest::CreateSubDomainWithoutTabletsThenDrop [GOOD] >> TSchemeShardSubDomainTest::ForceDropTwice >> TSchemeShardSubDomainTest::DiskSpaceUsage >> TSchemeShardSubDomainTest::LS [GOOD] >> TSchemeShardSubDomainTest::SimultaneousCreateForceDrop [GOOD] >> TStoragePoolsQuotasTest::QuoteNonexistentPool-IsExternalSubdomain-false [GOOD] >> TSchemeShardSubDomainTest::CreateSubDomainWithoutSomeTablets [GOOD] >> TSchemeShardSubDomainTest::SchemeLimitsCreatePq [GOOD] >> TSchemeShardSubDomainTest::CreateWithoutPlanResolution [GOOD] >> TSchemeShardSubDomainTest::DeclareAndForbidTableInside [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::ConsistentCopyRejects [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:59:02.632060Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:59:02.632142Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:02.632189Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:59:02.632236Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:59:02.632276Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:59:02.632301Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:59:02.632368Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:02.632438Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:59:02.633117Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:59:02.633463Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:59:02.699504Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:59:02.699562Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:59:02.714994Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:59:02.715452Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:59:02.715631Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:59:02.722420Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:59:02.722808Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:59:02.723415Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:02.723763Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:59:02.727404Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:02.727616Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:59:02.728803Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:02.728859Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:02.729023Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:59:02.729067Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:59:02.729102Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:59:02.729209Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:59:02.735244Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:59:02.827826Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:59:02.828088Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:02.828298Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:59:02.828360Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:59:02.828621Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:59:02.828692Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:59:02.831288Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:02.831483Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:59:02.831664Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:02.831726Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:59:02.831766Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:59:02.831800Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:59:02.834423Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:02.834485Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:59:02.834525Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:59:02.836450Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:02.836507Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:02.836568Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:02.836613Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:59:02.840172Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:59:02.843256Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:59:02.843459Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:59:02.844485Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:02.844638Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:02.844686Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:02.844968Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:59:02.845017Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:02.845198Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:59:02.845278Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:59:02.848820Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:02.848879Z node 1 :FLAT_TX_SCHEMESHARD ... r { TxId: 106 Name: CopyTableBarrier }, at tablet# 72057594046678944 2025-06-25T14:59:04.597851Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 106:0 240 -> 240 2025-06-25T14:59:04.599290Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 106:0, at schemeshard: 72057594046678944 2025-06-25T14:59:04.599338Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 106:0 ProgressState 2025-06-25T14:59:04.599433Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#106:0 progress is 1/1 2025-06-25T14:59:04.599465Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 106 ready parts: 1/1 2025-06-25T14:59:04.599493Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#106:0 progress is 1/1 2025-06-25T14:59:04.599516Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 106 ready parts: 1/1 2025-06-25T14:59:04.599542Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 106, ready parts: 1/1, is published: true 2025-06-25T14:59:04.599597Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1656: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [2:642:2562] message: TxId: 106 2025-06-25T14:59:04.599664Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 106 ready parts: 1/1 2025-06-25T14:59:04.599701Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 106:0 2025-06-25T14:59:04.599742Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 106:0 2025-06-25T14:59:04.599842Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 6] was 3 2025-06-25T14:59:04.599868Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate source path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-25T14:59:04.601192Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 106: got EvNotifyTxCompletionResult 2025-06-25T14:59:04.601241Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 106: satisfy waiter [2:819:2714] TestWaitNotification: OK eventTxId 106 2025-06-25T14:59:04.601903Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0/table" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:59:04.602125Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0/table" took 251us result status StatusSuccess 2025-06-25T14:59:04.602500Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0/table" PathDescription { Self { Name: "table" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 150 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "table" Columns { Name: "RowId" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "RowId" KeyColumnIds: 1 TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:04.603113Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0/dst" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:59:04.603271Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0/dst" took 177us result status StatusSuccess 2025-06-25T14:59:04.603731Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0/dst" PathDescription { Self { Name: "dst" PathId: 6 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 106 CreateStep: 200 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "dst" Columns { Name: "RowId" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "RowId" KeyColumnIds: 1 TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 6 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:04.604428Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:59:04.604583Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 168us result status StatusSuccess 2025-06-25T14:59:04.604944Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0" PathDescription { Self { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } } Children { Name: "dst" PathId: 6 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 106 CreateStep: 200 ParentPathId: 2 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "table" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 150 ParentPathId: 2 PathState: EPathStateCopying Owner: "root@builtin" ACL: "" ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 2 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TSchemeShardSubDomainTest::ConcurrentCreateSubDomainAndDescribe [GOOD] >> TSchemeShardSubDomainTest::ColumnSchemeLimitsRejects >> TSchemeShardSubDomainTest::SchemeDatabaseQuotaRejects [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::DeclareDefineAndDelete [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:59:04.484681Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:59:04.484747Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:04.484774Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:59:04.484806Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:59:04.484838Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:59:04.484867Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:59:04.484907Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:04.484974Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:59:04.485614Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:59:04.485999Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:59:04.551899Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:59:04.551963Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:59:04.566036Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:59:04.566397Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:59:04.566546Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:59:04.572095Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:59:04.572425Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:59:04.573051Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:04.573328Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:59:04.576427Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:04.576612Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:59:04.577796Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:04.577853Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:04.578005Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:59:04.578062Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:59:04.578104Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:59:04.578183Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:59:04.584820Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:59:04.711203Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:59:04.711444Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:04.711633Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:59:04.711675Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:59:04.711912Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:59:04.711986Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:59:04.714136Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:04.714346Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:59:04.714520Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:04.714584Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:59:04.714646Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:59:04.714687Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:59:04.716582Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:04.716653Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:59:04.716692Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:59:04.718421Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:04.718492Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:04.718559Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:04.718606Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:59:04.727631Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:59:04.729150Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:59:04.729323Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:59:04.730122Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:04.730237Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:04.730283Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:04.730516Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:59:04.730568Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:04.730700Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:59:04.730788Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:59:04.732331Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:04.732372Z node 1 :FLAT_TX_SCHEMESHARD ... y parts: 1/1 2025-06-25T14:59:04.881992Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: false 2025-06-25T14:59:04.882016Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-25T14:59:04.882039Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 102:0 2025-06-25T14:59:04.882102Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 102:0 2025-06-25T14:59:04.882205Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 6 2025-06-25T14:59:04.882234Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 102, publications: 2, subscribers: 0 2025-06-25T14:59:04.882264Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 102, [OwnerId: 72057594046678944, LocalPathId: 1], 7 2025-06-25T14:59:04.882300Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 102, [OwnerId: 72057594046678944, LocalPathId: 2], 18446744073709551615 2025-06-25T14:59:04.882753Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T14:59:04.882816Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T14:59:04.882840Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 102 2025-06-25T14:59:04.882868Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 7 2025-06-25T14:59:04.882896Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-25T14:59:04.883474Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T14:59:04.883551Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T14:59:04.883598Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 102 2025-06-25T14:59:04.883621Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 18446744073709551615 2025-06-25T14:59:04.883640Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-06-25T14:59:04.883686Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 102, subscribers: 0 2025-06-25T14:59:04.886215Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:1 hive 72057594037968897 at ss 72057594046678944 2025-06-25T14:59:04.886259Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:3 hive 72057594037968897 at ss 72057594046678944 2025-06-25T14:59:04.886287Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:2 hive 72057594037968897 at ss 72057594046678944 2025-06-25T14:59:04.886610Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-25T14:59:04.886780Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-25T14:59:04.886854Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 1 TxId_Deprecated: 1 TabletID: 72075186233409546 2025-06-25T14:59:04.887026Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 1 ShardOwnerId: 72057594046678944 ShardLocalIdx: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:04.887244Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 Forgetting tablet 72075186233409546 2025-06-25T14:59:04.887826Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 3 TxId_Deprecated: 3 TabletID: 72075186233409548 2025-06-25T14:59:04.888015Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 3 ShardOwnerId: 72057594046678944 ShardLocalIdx: 3, at schemeshard: 72057594046678944 2025-06-25T14:59:04.888139Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-25T14:59:04.888841Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 2 TxId_Deprecated: 2 TabletID: 72075186233409547 Forgetting tablet 72075186233409548 Forgetting tablet 72075186233409547 2025-06-25T14:59:04.890015Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046678944 ShardLocalIdx: 2, at schemeshard: 72057594046678944 2025-06-25T14:59:04.890198Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-25T14:59:04.890680Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-25T14:59:04.890722Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-25T14:59:04.890849Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-25T14:59:04.891540Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-25T14:59:04.891591Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-25T14:59:04.891659Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:59:04.892762Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-06-25T14:59:04.892822Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:1 tabletId 72075186233409546 2025-06-25T14:59:04.893885Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:3 2025-06-25T14:59:04.893933Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:3 tabletId 72075186233409548 2025-06-25T14:59:04.894399Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-06-25T14:59:04.894443Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:2 tabletId 72075186233409547 2025-06-25T14:59:04.895938Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-25T14:59:04.896041Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 102 2025-06-25T14:59:04.896266Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-06-25T14:59:04.896320Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 2025-06-25T14:59:04.896739Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-06-25T14:59:04.896844Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-25T14:59:04.896882Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:527:2479] TestWaitNotification: OK eventTxId 102 2025-06-25T14:59:04.897458Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:59:04.897632Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 181us result status StatusPathDoesNotExist 2025-06-25T14:59:04.897813Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/USER_0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::CreateSubDomainWithoutTabletsThenForceDrop [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:59:04.475682Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:59:04.475843Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:04.475874Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:59:04.475908Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:59:04.475947Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:59:04.475967Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:59:04.476015Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:04.476074Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:59:04.476734Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:59:04.476990Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:59:04.552777Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:59:04.552834Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:59:04.568639Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:59:04.568994Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:59:04.569155Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:59:04.574216Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:59:04.574510Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:59:04.575150Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:04.575402Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:59:04.578824Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:04.578985Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:59:04.580087Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:04.580142Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:04.580282Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:59:04.580353Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:59:04.580400Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:59:04.580475Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:59:04.586687Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:59:04.727401Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:59:04.727641Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:04.727852Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:59:04.727902Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:59:04.728128Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:59:04.728202Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:59:04.730142Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:04.730337Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:59:04.730502Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:04.730551Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:59:04.730607Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:59:04.730652Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:59:04.732423Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:04.732472Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:59:04.732514Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:59:04.734006Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:04.734064Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:04.734124Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:04.734169Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:59:04.737523Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:59:04.739165Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:59:04.739331Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:59:04.740266Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:04.740420Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:04.740480Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:04.740763Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:59:04.740818Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:04.740984Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:59:04.741060Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:59:04.742908Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:04.742954Z node 1 :FLAT_TX_SCHEMESHARD ... 4, txId: 101, path id: 1 2025-06-25T14:59:04.827987Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 101, path id: 2 2025-06-25T14:59:04.828059Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:59:04.828122Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:418: [72057594046678944] TDeleteParts opId# 101:0 ProgressState 2025-06-25T14:59:04.828227Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#101:0 progress is 1/1 2025-06-25T14:59:04.828274Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-25T14:59:04.828339Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#101:0 progress is 1/1 2025-06-25T14:59:04.828374Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-25T14:59:04.828411Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 101, ready parts: 1/1, is published: false 2025-06-25T14:59:04.828447Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-25T14:59:04.828483Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 101:0 2025-06-25T14:59:04.828516Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 101:0 2025-06-25T14:59:04.828572Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-25T14:59:04.828603Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 101, publications: 2, subscribers: 0 2025-06-25T14:59:04.828635Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 1], 7 2025-06-25T14:59:04.828662Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 2], 18446744073709551615 2025-06-25T14:59:04.829353Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 101 2025-06-25T14:59:04.829440Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 101 2025-06-25T14:59:04.829469Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 101 2025-06-25T14:59:04.829511Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 7 2025-06-25T14:59:04.829559Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-25T14:59:04.829910Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 101 2025-06-25T14:59:04.829976Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 101 2025-06-25T14:59:04.830004Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 101 2025-06-25T14:59:04.830030Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 18446744073709551615 2025-06-25T14:59:04.830053Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-25T14:59:04.830119Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 101, subscribers: 0 2025-06-25T14:59:04.830325Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-25T14:59:04.830357Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-25T14:59:04.830420Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-25T14:59:04.831526Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-25T14:59:04.831581Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-25T14:59:04.831646Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:59:04.833635Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-25T14:59:04.835241Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-25T14:59:04.835331Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-25T14:59:04.835414Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 101 2025-06-25T14:59:04.835606Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2025-06-25T14:59:04.835661Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 2025-06-25T14:59:04.836055Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-06-25T14:59:04.836137Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-25T14:59:04.836189Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:348:2337] TestWaitNotification: OK eventTxId 101 2025-06-25T14:59:04.836670Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:59:04.836855Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 197us result status StatusPathDoesNotExist 2025-06-25T14:59:04.837041Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/USER_0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-25T14:59:04.837574Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:59:04.837743Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 151us result status StatusSuccess 2025-06-25T14:59:04.838146Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TLocksTest::BrokenDupLock [GOOD] Test command err: 2025-06-25T14:58:26.260809Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901499139836513:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:26.261830Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00206c/r3tmp/tmpOTBGXl/pdisk_1.dat 2025-06-25T14:58:26.600475Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:26.602099Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519901499139836493:2080] 1750863506255892 != 1750863506255895 2025-06-25T14:58:26.644884Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:26.645017Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:26.647338Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:26466 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:26.845014Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:26.871897Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:58:26.888972Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:27.025383Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:58:27.083128Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00206c/r3tmp/tmp9hSq5Y/pdisk_1.dat 2025-06-25T14:58:29.693943Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:58:29.706642Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:29.706721Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:29.708599Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:58:29.711997Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:29.714501Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519901512126971724:2080] 1750863509416977 != 1750863509416980 TClient is connected to server localhost:24289 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. waiting... 2025-06-25T14:58:29.954685Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:58:29.964897Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:58:29.986112Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:30.067604Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:30.138933Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00206c/r3tmp/tmpyFwgqu/pdisk_1.dat 2025-06-25T14:58:32.943060Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519901525670278775:2232];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:32.999397Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:58:33.086525Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:33.088450Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519901525670278568:2080] 1750863512835786 != 1750863512835789 2025-06-25T14:58:33.110125Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:33.110193Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:33.115497Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:2872 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:33.386627Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:33.397149Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:58:33.404923Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-25T14:58:33.409041Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo uns ... RT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:52.244547Z node 8 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [8:7519901612512990915:2080] 1750863532135170 != 1750863532135173 2025-06-25T14:58:52.280274Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:52.280387Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:52.281953Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:4204 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:52.525447Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-25T14:58:52.541055Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-25T14:58:52.546906Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:52.607720Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:52.678851Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:55.806325Z node 9 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[9:7519901623981524433:2142];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:55.806825Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00206c/r3tmp/tmpYVY7VN/pdisk_1.dat 2025-06-25T14:58:55.941019Z node 9 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:55.958820Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:55.958917Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:55.964113Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:1640 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. waiting... 2025-06-25T14:58:56.210626Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:56.233722Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:56.311895Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:56.395559Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:59.728834Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7519901641299465465:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:59.728906Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00206c/r3tmp/tmpt72rl0/pdisk_1.dat 2025-06-25T14:58:59.878022Z node 10 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:59.879973Z node 10 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [10:7519901641299465446:2080] 1750863539728225 != 1750863539728228 2025-06-25T14:58:59.898325Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:59.898437Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:59.901288Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:24005 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:59:00.159846Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-25T14:59:00.180044Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:59:00.252656Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:59:00.325789Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::CreateSubDomainWithoutTabletsThenDrop [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:59:04.744104Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:59:04.744168Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:04.744202Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:59:04.744232Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:59:04.744272Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:59:04.744292Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:59:04.744354Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:04.744407Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:59:04.744905Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:59:04.745144Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:59:04.809936Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:59:04.809983Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:59:04.824118Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:59:04.824402Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:59:04.824547Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:59:04.828903Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:59:04.829148Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:59:04.829731Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:04.829912Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:59:04.832258Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:04.832422Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:59:04.833260Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:04.833317Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:04.833465Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:59:04.833512Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:59:04.833549Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:59:04.833626Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:59:04.839426Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:59:04.927888Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:59:04.928071Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:04.928265Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:59:04.928324Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:59:04.928510Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:59:04.928566Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:59:04.930144Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:04.930291Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:59:04.930434Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:04.930475Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:59:04.930523Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:59:04.930560Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:59:04.932181Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:04.932230Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:59:04.932264Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:59:04.933623Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:04.933695Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:04.933747Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:04.933793Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:59:04.936078Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:59:04.937274Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:59:04.937407Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:59:04.937981Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:04.938078Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:04.938110Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:04.938350Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:59:04.938409Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:04.938528Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:59:04.938574Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:59:04.939840Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:04.939869Z node 1 :FLAT_TX_SCHEMESHARD ... 4, txId: 101, path id: 1 2025-06-25T14:59:05.013991Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 101, path id: 2 2025-06-25T14:59:05.014062Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:59:05.014098Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:418: [72057594046678944] TDeleteParts opId# 101:0 ProgressState 2025-06-25T14:59:05.014157Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#101:0 progress is 1/1 2025-06-25T14:59:05.014193Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-25T14:59:05.014228Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#101:0 progress is 1/1 2025-06-25T14:59:05.014254Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-25T14:59:05.014289Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 101, ready parts: 1/1, is published: false 2025-06-25T14:59:05.014336Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-25T14:59:05.014378Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 101:0 2025-06-25T14:59:05.014417Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 101:0 2025-06-25T14:59:05.014475Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-25T14:59:05.014504Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 101, publications: 2, subscribers: 0 2025-06-25T14:59:05.014533Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 1], 7 2025-06-25T14:59:05.014562Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 2], 18446744073709551615 2025-06-25T14:59:05.015421Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 101 2025-06-25T14:59:05.015497Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 101 2025-06-25T14:59:05.015527Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 101 2025-06-25T14:59:05.015572Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 7 2025-06-25T14:59:05.015611Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-25T14:59:05.016143Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 101 2025-06-25T14:59:05.016220Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 101 2025-06-25T14:59:05.016248Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 101 2025-06-25T14:59:05.016273Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 18446744073709551615 2025-06-25T14:59:05.016298Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-25T14:59:05.016391Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 101, subscribers: 0 2025-06-25T14:59:05.016691Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-25T14:59:05.016737Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-25T14:59:05.016817Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-25T14:59:05.017456Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-25T14:59:05.017513Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-25T14:59:05.017580Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:59:05.019317Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-25T14:59:05.021186Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-25T14:59:05.021337Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-25T14:59:05.021436Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 101 2025-06-25T14:59:05.021648Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2025-06-25T14:59:05.021682Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 2025-06-25T14:59:05.022087Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-06-25T14:59:05.022171Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-25T14:59:05.022201Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:348:2337] TestWaitNotification: OK eventTxId 101 2025-06-25T14:59:05.022619Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:59:05.022788Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 193us result status StatusPathDoesNotExist 2025-06-25T14:59:05.022953Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/USER_0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-25T14:59:05.023452Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:59:05.023674Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 169us result status StatusSuccess 2025-06-25T14:59:05.024113Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TStoragePoolsQuotasTest::QuoteNonexistentPool-IsExternalSubdomain-false [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:59:04.663330Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:59:04.663417Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:04.663458Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:59:04.663523Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:59:04.663565Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:59:04.663598Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:59:04.663692Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:04.663766Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:59:04.664496Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:59:04.664797Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:59:04.741576Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:59:04.741639Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:59:04.757569Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:59:04.757914Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:59:04.758087Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:59:04.763374Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:59:04.763663Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:59:04.764288Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:04.764556Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:59:04.767612Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:04.767791Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:59:04.768948Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:04.769004Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:04.769148Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:59:04.769194Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:59:04.769236Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:59:04.769330Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:59:04.780752Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:59:04.932129Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:59:04.932350Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:04.932539Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:59:04.932571Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:59:04.932769Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:59:04.932841Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:59:04.935784Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:04.935996Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:59:04.936183Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:04.936228Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:59:04.936280Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:59:04.936338Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:59:04.937956Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:04.938014Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:59:04.938048Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:59:04.939472Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:04.939528Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:04.939583Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:04.939629Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:59:04.949382Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:59:04.952045Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:59:04.952214Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:59:04.953162Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:04.953314Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:04.953388Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:04.953705Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:59:04.953769Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:04.953956Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:59:04.954036Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:59:04.956164Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:04.956215Z node 1 :FLAT_TX_SCHEMESHARD ... ons { TxId: 101 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000002 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:05.003067Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 101:0, at tablet# 72057594046678944 2025-06-25T14:59:05.003275Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 101:0 128 -> 240 2025-06-25T14:59:05.003326Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 101:0, at tablet# 72057594046678944 2025-06-25T14:59:05.003499Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:59:05.003800Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-25T14:59:05.003847Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-25T14:59:05.005766Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:05.005805Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 101, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:59:05.005902Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 101, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-25T14:59:05.005973Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:05.006006Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 101, path id: 1 2025-06-25T14:59:05.006056Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 101, path id: 2 FAKE_COORDINATOR: Erasing txId 101 2025-06-25T14:59:05.006329Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:59:05.006369Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 101:0 ProgressState 2025-06-25T14:59:05.006462Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#101:0 progress is 1/1 2025-06-25T14:59:05.006502Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-25T14:59:05.006545Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#101:0 progress is 1/1 2025-06-25T14:59:05.006573Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-25T14:59:05.006607Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 101, ready parts: 1/1, is published: false 2025-06-25T14:59:05.006642Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-25T14:59:05.006676Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 101:0 2025-06-25T14:59:05.006708Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 101:0 2025-06-25T14:59:05.006789Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-25T14:59:05.006825Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 101, publications: 2, subscribers: 0 2025-06-25T14:59:05.006856Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 1], 5 2025-06-25T14:59:05.006883Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 2], 3 2025-06-25T14:59:05.007613Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 101 2025-06-25T14:59:05.007717Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 101 2025-06-25T14:59:05.007747Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 101 2025-06-25T14:59:05.007784Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 5 2025-06-25T14:59:05.007825Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-25T14:59:05.008450Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 101 2025-06-25T14:59:05.008512Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 101 2025-06-25T14:59:05.008535Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 101 2025-06-25T14:59:05.008560Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 3 2025-06-25T14:59:05.008589Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-25T14:59:05.008647Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 101, subscribers: 0 2025-06-25T14:59:05.011368Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-25T14:59:05.011777Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 TestModificationResult got TxId: 101, wait until txId: 101 TestModificationResults wait txId: 102 2025-06-25T14:59:05.015082Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpAlterSubDomain SubDomain { PlanResolution: 50 Coordinators: 1 Mediators: 1 Name: "SomeDatabase" TimeCastBucketsPerMediator: 2 DatabaseQuotas { storage_quotas { unit_kind: "nonexistent_storage_kind" data_size_hard_quota: 1 } } } } TxId: 102 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:59:05.015450Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: /MyRoot/SomeDatabase, opId: 102:0, at schemeshard: 72057594046678944 2025-06-25T14:59:05.015658Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 102:1, propose status:StatusInvalidParameter, reason: Malformed subdomain request: cannot set storage quotas of the following kinds: nonexistent_storage_kind, because no storage pool in the subdomain /MyRoot/SomeDatabase has the specified kinds. Existing storage kinds are: pool-kind-1, pool-kind-2, at schemeshard: 72057594046678944 2025-06-25T14:59:05.017955Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 102, response: Status: StatusInvalidParameter Reason: "Malformed subdomain request: cannot set storage quotas of the following kinds: nonexistent_storage_kind, because no storage pool in the subdomain /MyRoot/SomeDatabase has the specified kinds. Existing storage kinds are: pool-kind-1, pool-kind-2" TxId: 102 SchemeshardId: 72057594046678944 PathId: 2, at schemeshard: 72057594046678944 2025-06-25T14:59:05.018180Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 102, database: /MyRoot, subject: , status: StatusInvalidParameter, reason: Malformed subdomain request: cannot set storage quotas of the following kinds: nonexistent_storage_kind, because no storage pool in the subdomain /MyRoot/SomeDatabase has the specified kinds. Existing storage kinds are: pool-kind-1, pool-kind-2, operation: ALTER DATABASE, path: /MyRoot/SomeDatabase TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 101 2025-06-25T14:59:05.018456Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2025-06-25T14:59:05.018505Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 TestWaitNotification wait txId: 102 2025-06-25T14:59:05.018633Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-06-25T14:59:05.018673Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 2025-06-25T14:59:05.019094Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-06-25T14:59:05.019181Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-06-25T14:59:05.019234Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-25T14:59:05.019273Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:320:2309] 2025-06-25T14:59:05.019342Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-25T14:59:05.019362Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:320:2309] TestWaitNotification: OK eventTxId 101 TestWaitNotification: OK eventTxId 102 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::SimultaneousDeclareAndCreateTable [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:59:04.484806Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:59:04.484904Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:04.484938Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:59:04.484973Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:59:04.485017Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:59:04.485042Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:59:04.485088Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:04.485153Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:59:04.486019Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:59:04.486338Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:59:04.543342Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:59:04.543387Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:59:04.554754Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:59:04.555046Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:59:04.555183Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:59:04.560048Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:59:04.560398Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:59:04.561074Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:04.561378Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:59:04.564735Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:04.564920Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:59:04.566068Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:04.566132Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:04.566271Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:59:04.566322Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:59:04.566361Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:59:04.566455Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:59:04.574891Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:59:04.708470Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:59:04.708703Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:04.708917Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:59:04.708964Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:59:04.709228Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:59:04.709314Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:59:04.711210Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:04.711413Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:59:04.711598Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:04.711649Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:59:04.711712Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:59:04.711772Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:59:04.713587Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:04.713636Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:59:04.713667Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:59:04.715281Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:04.715330Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:04.715396Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:04.715444Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:59:04.718335Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:59:04.719939Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:59:04.720104Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:59:04.721220Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:04.721389Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:04.721445Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:04.721727Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:59:04.721781Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:04.721984Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:59:04.722065Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:59:04.724103Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:04.724158Z node 1 :FLAT_TX_SCHEMESHARD ... SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 100, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:59:04.781078Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 100, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-25T14:59:04.781165Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:04.781212Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 100, path id: 1 2025-06-25T14:59:04.781259Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 100, path id: 2 2025-06-25T14:59:04.781559Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 100:0, at schemeshard: 72057594046678944 2025-06-25T14:59:04.781603Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 100:0 ProgressState 2025-06-25T14:59:04.781694Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#100:0 progress is 1/1 2025-06-25T14:59:04.781744Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 100 ready parts: 1/1 2025-06-25T14:59:04.781788Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#100:0 progress is 1/1 2025-06-25T14:59:04.781841Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 100 ready parts: 1/1 2025-06-25T14:59:04.781887Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 100, ready parts: 1/1, is published: false 2025-06-25T14:59:04.781926Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 100 ready parts: 1/1 2025-06-25T14:59:04.781961Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 100:0 2025-06-25T14:59:04.781998Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 100:0 2025-06-25T14:59:04.782062Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-25T14:59:04.782120Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 100, publications: 2, subscribers: 0 2025-06-25T14:59:04.782153Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 100, [OwnerId: 72057594046678944, LocalPathId: 1], 5 2025-06-25T14:59:04.782182Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 100, [OwnerId: 72057594046678944, LocalPathId: 2], 3 2025-06-25T14:59:04.782858Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 100 2025-06-25T14:59:04.782947Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 100 2025-06-25T14:59:04.782982Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 100 2025-06-25T14:59:04.783029Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 100, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 5 2025-06-25T14:59:04.783084Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-25T14:59:04.783810Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 100 2025-06-25T14:59:04.783883Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 100 2025-06-25T14:59:04.783910Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 100 2025-06-25T14:59:04.783950Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 100, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 3 2025-06-25T14:59:04.783983Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-25T14:59:04.784059Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 100, subscribers: 0 2025-06-25T14:59:04.787158Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 100 2025-06-25T14:59:04.787629Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 100 TestModificationResult got TxId: 100, wait until txId: 100 TestModificationResults wait txId: 101 TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 100 2025-06-25T14:59:04.787906Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 100: send EvNotifyTxCompletion 2025-06-25T14:59:04.787948Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 100 TestWaitNotification wait txId: 101 2025-06-25T14:59:04.788058Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2025-06-25T14:59:04.788089Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 2025-06-25T14:59:04.788541Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 100, at schemeshard: 72057594046678944 2025-06-25T14:59:04.788652Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 100: got EvNotifyTxCompletionResult 2025-06-25T14:59:04.788697Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 100: satisfy waiter [1:318:2307] 2025-06-25T14:59:04.788844Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-06-25T14:59:04.788963Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-25T14:59:04.788987Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:318:2307] TestWaitNotification: OK eventTxId 100 TestWaitNotification: OK eventTxId 101 2025-06-25T14:59:04.789380Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:59:04.789541Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 205us result status StatusSuccess 2025-06-25T14:59:04.789977Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0" PathDescription { Self { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 0 TimeCastBucketsPerMediator: 0 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:04.790418Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0/table_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:59:04.790602Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0/table_0" took 189us result status StatusPathDoesNotExist 2025-06-25T14:59:04.790740Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0/table_0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot/USER_0\' (id: [OwnerId: 72057594046678944, LocalPathId: 2])" Path: "/MyRoot/USER_0/table_0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot/USER_0" LastExistedPrefixPathId: 2 LastExistedPrefixDescription { Self { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 >> TSchemeShardSubDomainTest::SimultaneousCreateTenantTableForceDrop [GOOD] >> TSchemeShardSubDomainTest::CreateSubDomainWithoutTablets [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::SimultaneousCreateForceDrop [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:59:04.624148Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:59:04.624233Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:04.624277Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:59:04.624342Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:59:04.624381Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:59:04.624406Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:59:04.624455Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:04.624520Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:59:04.625204Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:59:04.625582Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:59:04.697685Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:59:04.697747Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:59:04.712817Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:59:04.713218Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:59:04.713404Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:59:04.719366Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:59:04.719677Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:59:04.720374Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:04.720630Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:59:04.723868Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:04.724023Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:59:04.724941Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:04.724995Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:04.725143Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:59:04.725187Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:59:04.725227Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:59:04.725304Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:59:04.731477Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:59:04.837337Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:59:04.837546Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:04.837688Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:59:04.837715Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:59:04.837889Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:59:04.837944Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:59:04.839571Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:04.839706Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:59:04.839814Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:04.839876Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:59:04.839928Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:59:04.839958Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:59:04.841438Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:04.841481Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:59:04.841654Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:59:04.842758Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:04.842793Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:04.842835Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:04.842866Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:59:04.845037Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:59:04.846229Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:59:04.846382Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:59:04.847022Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:04.847131Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:04.847166Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:04.847352Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:59:04.847383Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:04.847502Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:59:04.847545Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:59:04.849292Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:04.849334Z node 1 :FLAT_TX_SCHEMESHARD ... 37968897 TxId_Deprecated: 3 ShardOwnerId: 72057594046678944 ShardLocalIdx: 3, at schemeshard: 72057594046678944 2025-06-25T14:59:05.012567Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-06-25T14:59:05.012988Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 6 ShardOwnerId: 72057594046678944 ShardLocalIdx: 6, at schemeshard: 72057594046678944 2025-06-25T14:59:05.013131Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-25T14:59:05.013841Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 2 TxId_Deprecated: 2 TabletID: 72075186233409547 Forgetting tablet 72075186233409548 Forgetting tablet 72075186233409551 2025-06-25T14:59:05.015589Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046678944 ShardLocalIdx: 2, at schemeshard: 72057594046678944 2025-06-25T14:59:05.015745Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-25T14:59:05.016342Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 4 TxId_Deprecated: 4 TabletID: 72075186233409549 2025-06-25T14:59:05.016558Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 4 ShardOwnerId: 72057594046678944 ShardLocalIdx: 4, at schemeshard: 72057594046678944 2025-06-25T14:59:05.016705Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 Forgetting tablet 72075186233409547 2025-06-25T14:59:05.017261Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-25T14:59:05.017308Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-25T14:59:05.017451Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 Forgetting tablet 72075186233409549 2025-06-25T14:59:05.018722Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-25T14:59:05.018780Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-25T14:59:05.018871Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:59:05.020072Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:5 2025-06-25T14:59:05.020121Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:5 tabletId 72075186233409550 2025-06-25T14:59:05.020369Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5823: Failed to connect, to tablet: 72075186233409550, at schemeshard: 72057594046678944 2025-06-25T14:59:05.022264Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-06-25T14:59:05.022304Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:1 tabletId 72075186233409546 2025-06-25T14:59:05.022378Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:3 2025-06-25T14:59:05.022398Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:3 tabletId 72075186233409548 2025-06-25T14:59:05.022478Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:6 2025-06-25T14:59:05.022498Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:6 tabletId 72075186233409551 2025-06-25T14:59:05.022581Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5823: Failed to connect, to tablet: 72075186233409551, at schemeshard: 72057594046678944 2025-06-25T14:59:05.024647Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-06-25T14:59:05.024682Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:2 tabletId 72075186233409547 2025-06-25T14:59:05.024748Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:4 2025-06-25T14:59:05.024790Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:4 tabletId 72075186233409549 2025-06-25T14:59:05.024975Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5823: Failed to connect, to tablet: 72075186233409549, at schemeshard: 72057594046678944 2025-06-25T14:59:05.025025Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-25T14:59:05.025097Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 100 2025-06-25T14:59:05.025310Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 100: send EvNotifyTxCompletion 2025-06-25T14:59:05.025346Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 100 TestWaitNotification wait txId: 101 2025-06-25T14:59:05.025455Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2025-06-25T14:59:05.025477Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 2025-06-25T14:59:05.025856Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 100, at schemeshard: 72057594046678944 2025-06-25T14:59:05.025969Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 100: got EvNotifyTxCompletionResult 2025-06-25T14:59:05.026042Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 100: satisfy waiter [1:616:2520] 2025-06-25T14:59:05.026322Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-06-25T14:59:05.026399Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-25T14:59:05.026433Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:616:2520] TestWaitNotification: OK eventTxId 100 TestWaitNotification: OK eventTxId 101 2025-06-25T14:59:05.026819Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:59:05.026980Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 184us result status StatusPathDoesNotExist 2025-06-25T14:59:05.027158Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/USER_0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-25T14:59:05.027500Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:59:05.027676Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 171us result status StatusSuccess 2025-06-25T14:59:05.028076Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 4 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::LS [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:59:04.607412Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:59:04.607502Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:04.607542Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:59:04.607571Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:59:04.607604Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:59:04.607625Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:59:04.607662Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:04.607723Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:59:04.608543Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:59:04.608818Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:59:04.683756Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:59:04.683814Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:59:04.700157Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:59:04.700524Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:59:04.700694Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:59:04.706134Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:59:04.706437Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:59:04.707087Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:04.707357Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:59:04.710596Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:04.710770Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:59:04.711881Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:04.711937Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:04.712086Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:59:04.712134Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:59:04.712184Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:59:04.712273Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:59:04.718711Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:59:04.863915Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:59:04.864173Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:04.864380Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:59:04.864421Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:59:04.864651Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:59:04.864718Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:59:04.866571Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:04.866771Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:59:04.866935Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:04.866989Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:59:04.867049Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:59:04.867097Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:59:04.869052Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:04.869111Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:59:04.869151Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:59:04.870706Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:04.870761Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:04.870824Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:04.870874Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:59:04.874377Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:59:04.875981Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:59:04.876159Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:59:04.876948Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:04.877089Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:04.877148Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:04.877421Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:59:04.877470Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:04.877625Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:59:04.877709Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:59:04.879536Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:04.879597Z node 1 :FLAT_TX_SCHEMESHARD ... chemeshard: 72057594046678944, txId: 100, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-25T14:59:04.996010Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:04.996041Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 100, path id: 1 2025-06-25T14:59:04.996082Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 100, path id: 2 2025-06-25T14:59:04.996131Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 100:0, at schemeshard: 72057594046678944 2025-06-25T14:59:04.996169Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 100:0 ProgressState 2025-06-25T14:59:04.996262Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#100:0 progress is 1/1 2025-06-25T14:59:04.996325Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 100 ready parts: 1/1 2025-06-25T14:59:04.996438Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#100:0 progress is 1/1 2025-06-25T14:59:04.996470Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 100 ready parts: 1/1 2025-06-25T14:59:04.996505Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 100, ready parts: 1/1, is published: false 2025-06-25T14:59:04.996553Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 100 ready parts: 1/1 2025-06-25T14:59:04.996596Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 100:0 2025-06-25T14:59:04.996627Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 100:0 2025-06-25T14:59:04.996781Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 6 2025-06-25T14:59:04.996824Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 100, publications: 2, subscribers: 0 2025-06-25T14:59:04.996859Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 100, [OwnerId: 72057594046678944, LocalPathId: 1], 5 2025-06-25T14:59:04.996886Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 100, [OwnerId: 72057594046678944, LocalPathId: 2], 3 2025-06-25T14:59:04.997707Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 100 2025-06-25T14:59:04.997795Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 100 2025-06-25T14:59:04.997831Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 100 2025-06-25T14:59:04.997870Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 100, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 5 2025-06-25T14:59:04.997919Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-25T14:59:04.998440Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 100 2025-06-25T14:59:04.998517Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 100 2025-06-25T14:59:04.998544Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 100 2025-06-25T14:59:04.998584Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 100, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 3 2025-06-25T14:59:04.998613Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-06-25T14:59:04.998677Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 100, subscribers: 0 2025-06-25T14:59:05.002487Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 100 2025-06-25T14:59:05.002611Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 100 TestModificationResult got TxId: 100, wait until txId: 100 TestWaitNotification wait txId: 100 2025-06-25T14:59:05.002871Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 100: send EvNotifyTxCompletion 2025-06-25T14:59:05.002925Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 100 2025-06-25T14:59:05.003317Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 100, at schemeshard: 72057594046678944 2025-06-25T14:59:05.003400Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 100: got EvNotifyTxCompletionResult 2025-06-25T14:59:05.003436Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 100: satisfy waiter [1:454:2407] TestWaitNotification: OK eventTxId 100 2025-06-25T14:59:05.003969Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:59:05.004369Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 385us result status StatusSuccess 2025-06-25T14:59:05.004941Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0" PathDescription { Self { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 Mediators: 72075186233409548 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:05.005578Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:59:05.005769Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 194us result status StatusSuccess 2025-06-25T14:59:05.006115Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::SchemeLimitsCreatePq [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:59:04.011803Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:59:04.011892Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:04.011925Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:59:04.011963Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:59:04.012001Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:59:04.012026Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:59:04.012089Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:04.012163Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:59:04.012880Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:59:04.013175Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:59:04.086142Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:59:04.086202Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:59:04.101616Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:59:04.101972Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:59:04.102122Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:59:04.108137Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:59:04.108450Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:59:04.109032Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:04.109269Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:59:04.112440Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:04.112611Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:59:04.113749Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:04.113801Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:04.113943Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:59:04.113998Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:59:04.114035Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:59:04.114107Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:59:04.125737Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:59:04.223390Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:59:04.223600Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:04.223804Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:59:04.223847Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:59:04.224061Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:59:04.224128Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:59:04.226427Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:04.226648Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:59:04.226825Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:04.226869Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:59:04.226903Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:59:04.226934Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:59:04.228412Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:04.228460Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:59:04.228494Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:59:04.230012Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:04.230060Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:04.230112Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:04.230154Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:59:04.233088Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:59:04.234577Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:59:04.234729Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:59:04.235457Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:04.235577Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:04.235626Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:04.235857Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:59:04.235907Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:04.236066Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:59:04.236142Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:59:04.237634Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:04.237676Z node 1 :FLAT_TX_SCHEMESHARD ... rationPlanStep Execute, stepId: 5000003, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:04.858399Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 104 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000003 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:04.858494Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_pq.cpp:661: NPQState::TPropose operationId# 104:0 HandleReply TEvOperationPlan, step: 5000003, at tablet: 72057594046678944 2025-06-25T14:59:04.858588Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_pq.cpp:753: NPQState::TPropose operationId# 104:0 can't persist state: ShardsInProgress is not empty, remain: 2 2025-06-25T14:59:04.860840Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-25T14:59:04.862092Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 104:0, at schemeshard: 72057594046678944 FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000003 FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000003 FAKE_COORDINATOR: Erasing txId 104 2025-06-25T14:59:04.936005Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1791: TOperation FindRelatedPartByTabletId, TxId: 104, tablet: 72075186233409551, partId: 0 2025-06-25T14:59:04.936175Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:632: TTxOperationReply execute, operationId: 104:0, at schemeshard: 72057594046678944, message: Origin: 72075186233409551 Status: COMPLETE TxId: 104 Step: 5000003 2025-06-25T14:59:04.936258Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_pq.cpp:623: NPQState::TPropose operationId# 104:0 HandleReply TEvProposeTransactionResult triggers early, at schemeshard: 72057594046678944 message# Origin: 72075186233409551 Status: COMPLETE TxId: 104 Step: 5000003 2025-06-25T14:59:04.936330Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_pq.cpp:264: CollectPQConfigChanged accept TEvPersQueue::TEvProposeTransactionResult, operationId: 104:0, shardIdx: 72057594046678944:6, shard: 72075186233409551, left await: 1, txState.State: Propose, txState.ReadyForNotifications: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:04.936369Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_pq.cpp:628: NPQState::TPropose operationId# 104:0 HandleReply TEvProposeTransactionResult CollectPQConfigChanged: false 2025-06-25T14:59:04.936407Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_pq.cpp:753: NPQState::TPropose operationId# 104:0 can't persist state: ShardsInProgress is not empty, remain: 1 2025-06-25T14:59:04.937173Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1791: TOperation FindRelatedPartByTabletId, TxId: 104, tablet: 72075186233409550, partId: 0 2025-06-25T14:59:04.937277Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:632: TTxOperationReply execute, operationId: 104:0, at schemeshard: 72057594046678944, message: Origin: 72075186233409550 Status: COMPLETE TxId: 104 Step: 5000003 2025-06-25T14:59:04.937333Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_pq.cpp:623: NPQState::TPropose operationId# 104:0 HandleReply TEvProposeTransactionResult triggers early, at schemeshard: 72057594046678944 message# Origin: 72075186233409550 Status: COMPLETE TxId: 104 Step: 5000003 2025-06-25T14:59:04.937384Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_pq.cpp:264: CollectPQConfigChanged accept TEvPersQueue::TEvProposeTransactionResult, operationId: 104:0, shardIdx: 72057594046678944:5, shard: 72075186233409550, left await: 0, txState.State: Propose, txState.ReadyForNotifications: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:04.937408Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_pq.cpp:628: NPQState::TPropose operationId# 104:0 HandleReply TEvProposeTransactionResult CollectPQConfigChanged: true 2025-06-25T14:59:04.937570Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 104:0 128 -> 240 2025-06-25T14:59:04.937725Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-25T14:59:04.937838Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 5 2025-06-25T14:59:04.940266Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-25T14:59:04.940852Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-25T14:59:04.941129Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:04.941163Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 104, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:59:04.941304Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 104, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-25T14:59:04.941496Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:04.941530Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:341:2315], at schemeshard: 72057594046678944, txId: 104, path id: 1 2025-06-25T14:59:04.941561Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:341:2315], at schemeshard: 72057594046678944, txId: 104, path id: 3 2025-06-25T14:59:04.941982Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-25T14:59:04.942023Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 104:0 ProgressState 2025-06-25T14:59:04.942106Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#104:0 progress is 1/1 2025-06-25T14:59:04.942139Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-25T14:59:04.942180Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#104:0 progress is 1/1 2025-06-25T14:59:04.942206Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-25T14:59:04.942235Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 104, ready parts: 1/1, is published: false 2025-06-25T14:59:04.942272Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-25T14:59:04.942323Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 104:0 2025-06-25T14:59:04.942363Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 104:0 2025-06-25T14:59:04.942509Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 6 2025-06-25T14:59:04.942547Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 104, publications: 2, subscribers: 0 2025-06-25T14:59:04.942578Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 104, [OwnerId: 72057594046678944, LocalPathId: 1], 7 2025-06-25T14:59:04.942603Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 104, [OwnerId: 72057594046678944, LocalPathId: 3], 2 2025-06-25T14:59:04.943297Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 3 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 104 2025-06-25T14:59:04.943373Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 3 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 104 2025-06-25T14:59:04.943432Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 104 2025-06-25T14:59:04.943480Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 104, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 7 2025-06-25T14:59:04.943532Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-25T14:59:04.943956Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 3 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 104 2025-06-25T14:59:04.944019Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 3 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 104 2025-06-25T14:59:04.944046Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 104 2025-06-25T14:59:04.944070Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 104, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 2 2025-06-25T14:59:04.944093Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 5 2025-06-25T14:59:04.944146Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 104, subscribers: 0 2025-06-25T14:59:04.951638Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2025-06-25T14:59:04.951711Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 TestModificationResult got TxId: 104, wait until txId: 104 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::CreateWithoutPlanResolution [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:59:04.876955Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:59:04.877021Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:04.877048Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:59:04.877073Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:59:04.877103Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:59:04.877123Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:59:04.877156Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:04.877247Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:59:04.877846Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:59:04.878080Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:59:04.958490Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:59:04.958545Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:59:04.975450Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:59:04.975805Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:59:04.975983Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:59:04.981665Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:59:04.981898Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:59:04.982388Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:04.982610Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:59:04.985488Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:04.985623Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:59:04.986584Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:04.986641Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:04.986780Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:59:04.986830Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:59:04.986891Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:59:04.986971Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:59:04.992047Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:59:05.121819Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:59:05.121992Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:05.122137Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:59:05.122164Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:59:05.122358Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:59:05.122440Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:59:05.124140Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:05.124296Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:59:05.124459Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:05.124494Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:59:05.124528Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:59:05.124555Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:59:05.125962Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:05.125998Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:59:05.126026Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:59:05.127667Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:05.127749Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:05.127815Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:05.127872Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:59:05.131543Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:59:05.133477Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:59:05.133699Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:59:05.134680Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:05.134832Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:05.134899Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:05.135204Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:59:05.135264Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:05.135466Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:59:05.135554Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:59:05.137662Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:05.137700Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:59:05.137878Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:05.137911Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 1, path id: 1 2025-06-25T14:59:05.138195Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:05.138248Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 1:0 ProgressState 2025-06-25T14:59:05.138335Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1:0 progress is 1/1 2025-06-25T14:59:05.138362Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-25T14:59:05.138392Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1:0 progress is 1/1 2025-06-25T14:59:05.138436Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-25T14:59:05.138473Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 1, ready parts: 1/1, is published: false 2025-06-25T14:59:05.138517Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-25T14:59:05.138557Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1:0 2025-06-25T14:59:05.138582Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 1:0 2025-06-25T14:59:05.138635Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-25T14:59:05.138664Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 1, publications: 1, subscribers: 0 2025-06-25T14:59:05.138693Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 1, [OwnerId: 72057594046678944, LocalPathId: 1], 3 2025-06-25T14:59:05.140120Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-06-25T14:59:05.140225Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-06-25T14:59:05.140266Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1 2025-06-25T14:59:05.140305Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 3 2025-06-25T14:59:05.140377Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:59:05.140478Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1, subscribers: 0 2025-06-25T14:59:05.144396Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1 2025-06-25T14:59:05.144867Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1, at schemeshard: 72057594046678944 TestModificationResults wait txId: 100 2025-06-25T14:59:05.145535Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:434: actor# [1:274:2263] Bootstrap 2025-06-25T14:59:05.164189Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:453: actor# [1:274:2263] Become StateWork (SchemeCache [1:279:2268]) 2025-06-25T14:59:05.166574Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateSubDomain SubDomain { Coordinators: 1 Mediators: 1 Name: "USER_0" TimeCastBucketsPerMediator: 2 StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 100 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:59:05.166798Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_subdomain.cpp:92: TCreateSubDomain Propose, path: /MyRoot/USER_0, opId: 100:0, at schemeshard: 72057594046678944 2025-06-25T14:59:05.166886Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 100:1, propose status:StatusInvalidParameter, reason: Malformed subdomain request: plan resolution is 0, at schemeshard: 72057594046678944 2025-06-25T14:59:05.167821Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:274:2263] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-25T14:59:05.170113Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 100, response: Status: StatusInvalidParameter Reason: "Malformed subdomain request: plan resolution is 0" TxId: 100 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:05.170295Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 100, database: /MyRoot, subject: , status: StatusInvalidParameter, reason: Malformed subdomain request: plan resolution is 0, operation: CREATE DATABASE, path: /MyRoot/USER_0 2025-06-25T14:59:05.170684Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 TestModificationResult got TxId: 100, wait until txId: 100 TestWaitNotification wait txId: 100 2025-06-25T14:59:05.170887Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 100: send EvNotifyTxCompletion 2025-06-25T14:59:05.170922Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 100 2025-06-25T14:59:05.171295Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 100, at schemeshard: 72057594046678944 2025-06-25T14:59:05.171372Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 100: got EvNotifyTxCompletionResult 2025-06-25T14:59:05.171402Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 100: satisfy waiter [1:289:2278] TestWaitNotification: OK eventTxId 100 2025-06-25T14:59:05.171852Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:59:05.172008Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 164us result status StatusPathDoesNotExist 2025-06-25T14:59:05.172174Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/USER_0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::CreateSubDomainWithoutSomeTablets [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:59:04.887097Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:59:04.887158Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:04.887182Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:59:04.887205Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:59:04.887236Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:59:04.887270Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:59:04.887336Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:04.887386Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:59:04.887856Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:59:04.888078Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:59:04.942061Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:59:04.942101Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:59:04.956693Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:59:04.957038Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:59:04.957193Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:59:04.962086Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:59:04.962349Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:59:04.962878Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:04.963087Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:59:04.965950Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:04.966102Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:59:04.967021Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:04.967063Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:04.967162Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:59:04.967197Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:59:04.967230Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:59:04.967313Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:59:04.972335Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:59:05.091296Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:59:05.091510Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:05.091691Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:59:05.091748Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:59:05.091970Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:59:05.092043Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:59:05.093934Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:05.094115Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:59:05.094267Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:05.094320Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:59:05.094389Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:59:05.094428Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:59:05.096026Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:05.096080Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:59:05.096113Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:59:05.097673Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:05.097726Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:05.097783Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:05.097825Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:59:05.101339Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:59:05.102902Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:59:05.103080Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:59:05.103838Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:05.103967Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:05.104028Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:05.104293Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:59:05.104358Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:05.104507Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:59:05.104583Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:59:05.106404Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:05.106449Z node 1 :FLAT_TX_SCHEMESHARD ... node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 3 2025-06-25T14:59:05.109320Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:59:05.109421Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1, subscribers: 0 2025-06-25T14:59:05.111912Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1 2025-06-25T14:59:05.112365Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1, at schemeshard: 72057594046678944 TestModificationResults wait txId: 100 2025-06-25T14:59:05.113023Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:434: actor# [1:274:2263] Bootstrap 2025-06-25T14:59:05.130940Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:453: actor# [1:274:2263] Become StateWork (SchemeCache [1:279:2268]) 2025-06-25T14:59:05.133371Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateSubDomain SubDomain { PlanResolution: 50 Coordinators: 1 Name: "USER_1" TimeCastBucketsPerMediator: 2 StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 100 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:59:05.133613Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_subdomain.cpp:92: TCreateSubDomain Propose, path: /MyRoot/USER_1, opId: 100:0, at schemeshard: 72057594046678944 2025-06-25T14:59:05.133694Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 100:1, propose status:StatusInvalidParameter, reason: Malformed subdomain request: cant create subdomain with coordinators, but no mediators, at schemeshard: 72057594046678944 2025-06-25T14:59:05.134388Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:274:2263] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-25T14:59:05.136273Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 100, response: Status: StatusInvalidParameter Reason: "Malformed subdomain request: cant create subdomain with coordinators, but no mediators" TxId: 100 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:05.136431Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 100, database: /MyRoot, subject: , status: StatusInvalidParameter, reason: Malformed subdomain request: cant create subdomain with coordinators, but no mediators, operation: CREATE DATABASE, path: /MyRoot/USER_1 2025-06-25T14:59:05.136713Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 TestModificationResult got TxId: 100, wait until txId: 100 TestModificationResults wait txId: 101 2025-06-25T14:59:05.138580Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateSubDomain SubDomain { PlanResolution: 50 Mediators: 1 Name: "USER_2" TimeCastBucketsPerMediator: 2 StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 101 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:59:05.138743Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_subdomain.cpp:92: TCreateSubDomain Propose, path: /MyRoot/USER_2, opId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:59:05.138831Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 101:1, propose status:StatusInvalidParameter, reason: Malformed subdomain request: cant create subdomain with mediators, but no coordinators, at schemeshard: 72057594046678944 2025-06-25T14:59:05.140377Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 101, response: Status: StatusInvalidParameter Reason: "Malformed subdomain request: cant create subdomain with mediators, but no coordinators" TxId: 101 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:05.140554Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 101, database: /MyRoot, subject: , status: StatusInvalidParameter, reason: Malformed subdomain request: cant create subdomain with mediators, but no coordinators, operation: CREATE DATABASE, path: /MyRoot/USER_2 TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 100 2025-06-25T14:59:05.140735Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 100: send EvNotifyTxCompletion 2025-06-25T14:59:05.140761Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 100 TestWaitNotification wait txId: 101 2025-06-25T14:59:05.140838Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2025-06-25T14:59:05.140861Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 2025-06-25T14:59:05.141135Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 100, at schemeshard: 72057594046678944 2025-06-25T14:59:05.141223Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 100: got EvNotifyTxCompletionResult 2025-06-25T14:59:05.141245Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 100: satisfy waiter [1:293:2282] 2025-06-25T14:59:05.141367Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-06-25T14:59:05.141430Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-25T14:59:05.141450Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:293:2282] TestWaitNotification: OK eventTxId 100 TestWaitNotification: OK eventTxId 101 2025-06-25T14:59:05.141731Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:59:05.141892Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_1" took 121us result status StatusPathDoesNotExist 2025-06-25T14:59:05.142030Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_1\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/USER_1" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-25T14:59:05.142387Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_2" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:59:05.142523Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_2" took 152us result status StatusPathDoesNotExist 2025-06-25T14:59:05.142607Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_2\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/USER_2" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-25T14:59:05.142961Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:59:05.143101Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 143us result status StatusSuccess 2025-06-25T14:59:05.143430Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::DeclareAndForbidTableInside [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:59:04.772563Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:59:04.772648Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:04.772683Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:59:04.772719Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:59:04.772765Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:59:04.772793Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:59:04.772862Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:04.772933Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:59:04.773654Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:59:04.773957Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:59:04.852567Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:59:04.852635Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:59:04.867485Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:59:04.867836Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:59:04.867992Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:59:04.873717Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:59:04.874000Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:59:04.874540Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:04.874757Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:59:04.877459Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:04.877606Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:59:04.878595Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:04.878643Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:04.878768Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:59:04.878814Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:59:04.878850Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:59:04.878923Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:59:04.884603Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:59:04.990713Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:59:04.990930Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:04.991105Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:59:04.991147Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:59:04.991355Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:59:04.991443Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:59:04.993342Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:04.993531Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:59:04.993677Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:04.993732Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:59:04.993784Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:59:04.993819Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:59:04.995363Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:04.995427Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:59:04.995467Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:59:04.997010Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:04.997085Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:04.997155Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:04.997200Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:59:05.006647Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:59:05.008610Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:59:05.008808Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:59:05.009705Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:05.009835Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:05.009886Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:05.010133Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:59:05.010185Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:05.010347Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:59:05.010439Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:59:05.012414Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:05.012461Z node 1 :FLAT_TX_SCHEMESHARD ... : 72057594046678944, cookie: 101 2025-06-25T14:59:05.072992Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 101 2025-06-25T14:59:05.073009Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 101 2025-06-25T14:59:05.073045Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 2 2025-06-25T14:59:05.073073Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-25T14:59:05.073118Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 101, ready parts: 0/1, is published: true 2025-06-25T14:59:05.074225Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 101:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:101 msg type: 269090816 2025-06-25T14:59:05.074365Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 101, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 101 at step: 5000003 FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000002 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 101 at step: 5000003 2025-06-25T14:59:05.074689Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000003, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:05.074773Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 101 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000003 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:05.074828Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_mkdir.cpp:33: MkDir::TPropose operationId# 101:0 HandleReply TEvPrivate::TEvOperationPlan, step: 5000003, at schemeshard: 72057594046678944 2025-06-25T14:59:05.074921Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 101:0 128 -> 240 2025-06-25T14:59:05.075024Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-25T14:59:05.075069Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-25T14:59:05.076051Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-25T14:59:05.076470Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-25T14:59:05.077615Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:05.077657Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 101, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-25T14:59:05.077758Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 101, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-25T14:59:05.077808Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:05.077828Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 101, path id: 2 2025-06-25T14:59:05.077854Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 101, path id: 3 FAKE_COORDINATOR: Erasing txId 101 2025-06-25T14:59:05.078051Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:59:05.078083Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 101:0 ProgressState 2025-06-25T14:59:05.078170Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#101:0 progress is 1/1 2025-06-25T14:59:05.078197Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-25T14:59:05.078221Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#101:0 progress is 1/1 2025-06-25T14:59:05.078245Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-25T14:59:05.078274Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 101, ready parts: 1/1, is published: false 2025-06-25T14:59:05.078322Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-25T14:59:05.078352Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 101:0 2025-06-25T14:59:05.078397Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 101:0 2025-06-25T14:59:05.078451Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-25T14:59:05.078474Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 101, publications: 2, subscribers: 0 2025-06-25T14:59:05.078495Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 2], 5 2025-06-25T14:59:05.078520Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 3], 3 2025-06-25T14:59:05.079005Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 5 PathOwnerId: 72057594046678944, cookie: 101 2025-06-25T14:59:05.079057Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 5 PathOwnerId: 72057594046678944, cookie: 101 2025-06-25T14:59:05.079080Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 101 2025-06-25T14:59:05.079103Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 5 2025-06-25T14:59:05.079136Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-25T14:59:05.079559Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 3 PathOwnerId: 72057594046678944, cookie: 101 2025-06-25T14:59:05.079607Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 3 PathOwnerId: 72057594046678944, cookie: 101 2025-06-25T14:59:05.079623Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 101 2025-06-25T14:59:05.079638Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 3 2025-06-25T14:59:05.079665Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-25T14:59:05.079715Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 101, subscribers: 0 2025-06-25T14:59:05.082208Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-25T14:59:05.082433Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 TestModificationResult got TxId: 101, wait until txId: 101 TestModificationResults wait txId: 102 2025-06-25T14:59:05.085204Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/USER_0/dir" OperationType: ESchemeOpCreateTable CreateTable { Name: "table_0" Columns { Name: "RowId" Type: "Uint64" } Columns { Name: "Value" Type: "Utf8" } KeyColumnNames: "RowId" } } TxId: 102 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:59:05.085512Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_table.cpp:423: TCreateTable Propose, path: /MyRoot/USER_0/dir/table_0, opId: 102:0, at schemeshard: 72057594046678944 2025-06-25T14:59:05.085610Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_table.cpp:430: TCreateTable Propose, path: /MyRoot/USER_0/dir/table_0, opId: 102:0, schema: Name: "table_0" Columns { Name: "RowId" Type: "Uint64" } Columns { Name: "Value" Type: "Utf8" } KeyColumnNames: "RowId", at schemeshard: 72057594046678944 2025-06-25T14:59:05.085739Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 102:1, propose status:StatusNameConflict, reason: Inclusive subDomain do not support shared transactions, at schemeshard: 72057594046678944 2025-06-25T14:59:05.087238Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 102, response: Status: StatusNameConflict Reason: "Inclusive subDomain do not support shared transactions" TxId: 102 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:05.087479Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 102, database: /MyRoot/USER_0, subject: , status: StatusNameConflict, reason: Inclusive subDomain do not support shared transactions, operation: CREATE TABLE, path: /MyRoot/USER_0/dir/table_0 TestModificationResult got TxId: 102, wait until txId: 102 >> TPQTest::TestDescribeBalancer [GOOD] >> TPQTest::TestComactifiedWithRetention >> KikimrIcGateway::TestSecretsExistingValidation [GOOD] >> TSchemeShardSubDomainTest::SimultaneousCreateTenantDirTable ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::SchemeDatabaseQuotaRejects [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:59:04.515338Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:59:04.515425Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:04.515464Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:59:04.515498Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:59:04.515539Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:59:04.515565Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:59:04.515612Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:04.515680Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:59:04.516573Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:59:04.516899Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:59:04.591740Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:59:04.591798Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:59:04.607774Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:59:04.608156Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:59:04.608358Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:59:04.614187Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:59:04.614539Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:59:04.615146Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:04.615407Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:59:04.618484Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:04.618673Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:59:04.619817Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:04.619873Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:04.620009Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:59:04.620058Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:59:04.620096Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:59:04.620191Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:59:04.626261Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:59:04.742470Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:59:04.742689Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:04.742875Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:59:04.742918Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:59:04.743166Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:59:04.743299Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:59:04.745233Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:04.745431Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:59:04.745597Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:04.745653Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:59:04.745705Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:59:04.745740Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:59:04.747319Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:04.747372Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:59:04.747405Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:59:04.748898Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:04.748957Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:04.749011Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:04.749056Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:59:04.752240Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:59:04.753824Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:59:04.753981Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:59:04.754847Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:04.754975Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:04.755021Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:04.755280Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:59:04.755329Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:04.755473Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:59:04.755559Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:59:04.757268Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:04.757322Z node 1 :FLAT_TX_SCHEMESHARD ... 6678944:3 hive 72057594037968897 at ss 72057594046678944 2025-06-25T14:59:05.276280Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:2 hive 72057594037968897 at ss 72057594046678944 2025-06-25T14:59:05.276322Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:4 hive 72057594037968897 at ss 72057594046678944 2025-06-25T14:59:05.277475Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 106 2025-06-25T14:59:05.278247Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 5 TxId_Deprecated: 5 TabletID: 72075186233409550 Forgetting tablet 72075186233409550 2025-06-25T14:59:05.279725Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 5 ShardOwnerId: 72057594046678944 ShardLocalIdx: 5, at schemeshard: 72057594046678944 2025-06-25T14:59:05.280019Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-25T14:59:05.280901Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 1 TxId_Deprecated: 1 TabletID: 72075186233409546 2025-06-25T14:59:05.281108Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 1 ShardOwnerId: 72057594046678944 ShardLocalIdx: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:05.281317Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 Forgetting tablet 72075186233409546 2025-06-25T14:59:05.283400Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 3 TxId_Deprecated: 3 TabletID: 72075186233409548 2025-06-25T14:59:05.283548Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 3 ShardOwnerId: 72057594046678944 ShardLocalIdx: 3, at schemeshard: 72057594046678944 2025-06-25T14:59:05.283759Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-25T14:59:05.284200Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 2 TxId_Deprecated: 2 TabletID: 72075186233409547 2025-06-25T14:59:05.285975Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 4 TxId_Deprecated: 4 TabletID: 72075186233409549 2025-06-25T14:59:05.286501Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046678944 ShardLocalIdx: 2, at schemeshard: 72057594046678944 2025-06-25T14:59:05.286680Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 Forgetting tablet 72075186233409548 2025-06-25T14:59:05.287110Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 4 ShardOwnerId: 72057594046678944 ShardLocalIdx: 4, at schemeshard: 72057594046678944 2025-06-25T14:59:05.287267Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 Forgetting tablet 72075186233409547 Forgetting tablet 72075186233409549 2025-06-25T14:59:05.288254Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-25T14:59:05.288302Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-06-25T14:59:05.288399Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-25T14:59:05.290014Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-25T14:59:05.290063Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-25T14:59:05.290180Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-25T14:59:05.291179Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 106 2025-06-25T14:59:05.291336Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 106 2025-06-25T14:59:05.291887Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:5 2025-06-25T14:59:05.291950Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:5 tabletId 72075186233409550 2025-06-25T14:59:05.292037Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-06-25T14:59:05.292059Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:1 tabletId 72075186233409546 2025-06-25T14:59:05.295236Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:3 2025-06-25T14:59:05.295296Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:3 tabletId 72075186233409548 2025-06-25T14:59:05.295411Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-06-25T14:59:05.295435Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:2 tabletId 72075186233409547 2025-06-25T14:59:05.295475Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:4 2025-06-25T14:59:05.295510Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:4 tabletId 72075186233409549 2025-06-25T14:59:05.295831Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 1 candidates, at schemeshard: 72057594046678944 2025-06-25T14:59:05.295941Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-25T14:59:05.296001Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-25T14:59:05.296041Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-25T14:59:05.296128Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:59:05.298117Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 106, wait until txId: 106 TestWaitNotification wait txId: 106 2025-06-25T14:59:05.298439Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 106: send EvNotifyTxCompletion 2025-06-25T14:59:05.298490Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 106 2025-06-25T14:59:05.298969Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 106, at schemeshard: 72057594046678944 2025-06-25T14:59:05.299057Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 106: got EvNotifyTxCompletionResult 2025-06-25T14:59:05.299092Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 106: satisfy waiter [1:904:2802] TestWaitNotification: OK eventTxId 106 2025-06-25T14:59:05.299716Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:59:05.299945Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 236us result status StatusSuccess 2025-06-25T14:59:05.300361Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TSchemeShardSubDomainTest::CreateAlterNbsChannels [GOOD] >> TSchemeShardSubDomainTest::ForceDropTwice [GOOD] >> TPartitionTests::ConflictingCommitsInSeveralBatches [GOOD] |90.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/select/py3test >> test_select.py::TestDML::test_select[table_index_4_UNIQUE_SYNC-pk_types0-all_types0-index0--UNIQUE-SYNC] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::SimultaneousCreateTenantTableForceDrop [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:59:05.147344Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:59:05.147435Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:05.147471Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:59:05.147505Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:59:05.147545Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:59:05.147573Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:59:05.147658Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:05.147728Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:59:05.148421Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:59:05.148698Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:59:05.212639Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:59:05.212734Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:59:05.228491Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:59:05.228873Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:59:05.229048Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:59:05.234520Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:59:05.234868Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:59:05.235465Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:05.235729Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:59:05.238897Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:05.239073Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:59:05.240180Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:05.240251Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:05.240405Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:59:05.240453Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:59:05.240489Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:59:05.240573Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:59:05.246735Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:59:05.368130Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:59:05.368327Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:05.368485Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:59:05.368516Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:59:05.368714Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:59:05.368773Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:59:05.370510Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:05.370642Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:59:05.370801Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:05.370839Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:59:05.370880Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:59:05.370908Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:59:05.372407Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:05.372448Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:59:05.372475Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:59:05.375617Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:05.375668Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:05.375709Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:05.375738Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:59:05.378104Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:59:05.379587Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:59:05.379805Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:59:05.380549Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:05.380654Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:05.380690Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:05.380868Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:59:05.380898Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:05.381037Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:59:05.381113Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:59:05.382637Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:05.382669Z node 1 :FLAT_TX_SCHEMESHARD ... 8944 2025-06-25T14:59:05.599240Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-25T14:59:05.601801Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:5 2025-06-25T14:59:05.601862Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:5 tabletId 72075186233409550 2025-06-25T14:59:05.602049Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5823: Failed to connect, to tablet: 72075186233409550, at schemeshard: 72057594046678944 2025-06-25T14:59:05.602143Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:7 2025-06-25T14:59:05.602214Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 1 candidates, at schemeshard: 72057594046678944 2025-06-25T14:59:05.602929Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-25T14:59:05.602980Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-25T14:59:05.603060Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:59:05.605266Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-06-25T14:59:05.605311Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:1 tabletId 72075186233409546 2025-06-25T14:59:05.605400Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:3 2025-06-25T14:59:05.605424Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:3 tabletId 72075186233409548 2025-06-25T14:59:05.605486Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:6 2025-06-25T14:59:05.605507Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:6 tabletId 72075186233409551 2025-06-25T14:59:05.605633Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5823: Failed to connect, to tablet: 72075186233409551, at schemeshard: 72057594046678944 2025-06-25T14:59:05.605707Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-06-25T14:59:05.605744Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:2 tabletId 72075186233409547 2025-06-25T14:59:05.605804Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:4 2025-06-25T14:59:05.605840Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:4 tabletId 72075186233409549 2025-06-25T14:59:05.606347Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5823: Failed to connect, to tablet: 72075186233409549, at schemeshard: 72057594046678944 2025-06-25T14:59:05.607817Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-25T14:59:05.607910Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 100 2025-06-25T14:59:05.608176Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 100: send EvNotifyTxCompletion 2025-06-25T14:59:05.608218Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 100 TestWaitNotification wait txId: 101 2025-06-25T14:59:05.608353Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2025-06-25T14:59:05.608389Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 TestWaitNotification wait txId: 102 2025-06-25T14:59:05.608456Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-06-25T14:59:05.608479Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 2025-06-25T14:59:05.609024Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 100, at schemeshard: 72057594046678944 2025-06-25T14:59:05.609159Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 100: got EvNotifyTxCompletionResult 2025-06-25T14:59:05.609204Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 100: satisfy waiter [1:637:2536] 2025-06-25T14:59:05.609339Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-06-25T14:59:05.609459Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-25T14:59:05.609482Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:637:2536] 2025-06-25T14:59:05.609538Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-06-25T14:59:05.609643Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-25T14:59:05.609668Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:637:2536] TestWaitNotification: OK eventTxId 100 TestWaitNotification: OK eventTxId 101 TestWaitNotification: OK eventTxId 102 2025-06-25T14:59:05.610105Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:59:05.610307Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 181us result status StatusPathDoesNotExist 2025-06-25T14:59:05.610564Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/USER_0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-25T14:59:05.611075Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0/table_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:59:05.611232Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0/table_0" took 176us result status StatusPathDoesNotExist 2025-06-25T14:59:05.611358Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0/table_0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/USER_0/table_0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-25T14:59:05.611820Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:59:05.612006Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 186us result status StatusSuccess 2025-06-25T14:59:05.616579Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 4 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::CreateSubDomainWithoutTablets [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:59:05.282401Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:59:05.282480Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:05.282512Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:59:05.282545Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:59:05.282581Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:59:05.282619Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:59:05.282686Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:05.282756Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:59:05.283503Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:59:05.283858Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:59:05.360575Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:59:05.360640Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:59:05.380643Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:59:05.381067Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:59:05.381251Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:59:05.386800Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:59:05.387154Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:59:05.387777Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:05.388028Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:59:05.391408Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:05.391610Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:59:05.392856Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:05.392923Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:05.393075Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:59:05.393124Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:59:05.393181Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:59:05.393276Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:59:05.400145Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:59:05.515901Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:59:05.516115Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:05.516290Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:59:05.516351Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:59:05.516580Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:59:05.516642Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:59:05.521223Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:05.521437Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:59:05.521602Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:05.521645Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:59:05.521697Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:59:05.521731Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:59:05.525016Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:05.525075Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:59:05.525109Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:59:05.529085Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:05.529154Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:05.529227Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:05.529271Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:59:05.532402Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:59:05.537031Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:59:05.537246Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:59:05.538178Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:05.538313Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:05.538367Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:05.538632Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:59:05.538679Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:05.538838Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:59:05.538901Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:59:05.540667Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:05.540701Z node 1 :FLAT_TX_SCHEMESHARD ... oard.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 100, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-25T14:59:05.579534Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:05.579555Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 100, path id: 1 2025-06-25T14:59:05.579578Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 100, path id: 2 FAKE_COORDINATOR: Erasing txId 100 2025-06-25T14:59:05.579786Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 100:0, at schemeshard: 72057594046678944 2025-06-25T14:59:05.579840Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 100:0 ProgressState 2025-06-25T14:59:05.579941Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#100:0 progress is 1/1 2025-06-25T14:59:05.579970Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 100 ready parts: 1/1 2025-06-25T14:59:05.580001Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#100:0 progress is 1/1 2025-06-25T14:59:05.580027Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 100 ready parts: 1/1 2025-06-25T14:59:05.580057Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 100, ready parts: 1/1, is published: false 2025-06-25T14:59:05.580094Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 100 ready parts: 1/1 2025-06-25T14:59:05.580122Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 100:0 2025-06-25T14:59:05.580148Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 100:0 2025-06-25T14:59:05.580197Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-25T14:59:05.580233Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 100, publications: 2, subscribers: 0 2025-06-25T14:59:05.580263Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 100, [OwnerId: 72057594046678944, LocalPathId: 1], 5 2025-06-25T14:59:05.580296Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 100, [OwnerId: 72057594046678944, LocalPathId: 2], 3 2025-06-25T14:59:05.581011Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 100 2025-06-25T14:59:05.581080Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 100 2025-06-25T14:59:05.581109Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 100 2025-06-25T14:59:05.581144Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 100, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 5 2025-06-25T14:59:05.581193Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-25T14:59:05.581800Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 100 2025-06-25T14:59:05.581864Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 100 2025-06-25T14:59:05.581892Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 100 2025-06-25T14:59:05.581916Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 100, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 3 2025-06-25T14:59:05.581939Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-25T14:59:05.582009Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 100, subscribers: 0 2025-06-25T14:59:05.584675Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 100 2025-06-25T14:59:05.584923Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 100 TestModificationResult got TxId: 100, wait until txId: 100 TestWaitNotification wait txId: 100 2025-06-25T14:59:05.585117Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 100: send EvNotifyTxCompletion 2025-06-25T14:59:05.585153Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 100 2025-06-25T14:59:05.585467Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 100, at schemeshard: 72057594046678944 2025-06-25T14:59:05.585524Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 100: got EvNotifyTxCompletionResult 2025-06-25T14:59:05.585551Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 100: satisfy waiter [1:316:2305] TestWaitNotification: OK eventTxId 100 2025-06-25T14:59:05.585865Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:59:05.585987Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 140us result status StatusSuccess 2025-06-25T14:59:05.586279Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0" PathDescription { Self { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:05.586581Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:59:05.586698Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 125us result status StatusSuccess 2025-06-25T14:59:05.586950Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TSchemeShardSubDomainTest::DeleteAdd [GOOD] >> TSchemeShardSubDomainTest::SimultaneousDeclare >> TPartitionTests::ConflictingCommitFails |90.5%| [TA] $(B)/ydb/core/kqp/ut/indexes/test-results/unittest/{meta.json ... results_accumulator.log} >> TStoragePoolsQuotasTest::DifferentQuotasInteraction ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::ForceDropTwice [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:59:05.610318Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:59:05.610399Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:05.610439Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:59:05.610490Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:59:05.610531Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:59:05.610552Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:59:05.610602Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:05.610669Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:59:05.611369Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:59:05.611687Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:59:05.674856Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:59:05.674915Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:59:05.687660Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:59:05.688013Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:59:05.688176Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:59:05.692497Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:59:05.692766Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:59:05.693231Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:05.693445Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:59:05.696513Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:05.696706Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:59:05.697820Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:05.697874Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:05.698044Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:59:05.698093Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:59:05.698132Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:59:05.698205Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:59:05.704371Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:59:05.789471Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:59:05.789682Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:05.789831Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:59:05.789862Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:59:05.790050Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:59:05.790108Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:59:05.791698Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:05.791897Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:59:05.792044Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:05.792091Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:59:05.792160Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:59:05.792196Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:59:05.793881Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:05.793930Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:59:05.793981Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:59:05.795391Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:05.795442Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:05.795497Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:05.795542Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:59:05.804168Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:59:05.805946Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:59:05.806111Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:59:05.807012Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:05.807137Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:05.807185Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:05.807428Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:59:05.807469Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:05.807626Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:59:05.807703Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:59:05.809548Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:05.809583Z node 1 :FLAT_TX_SCHEMESHARD ... node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 3 TxId_Deprecated: 3 TabletID: 72075186233409548 Forgetting tablet 72075186233409546 2025-06-25T14:59:06.046985Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 6 TxId_Deprecated: 6 TabletID: 72075186233409551 2025-06-25T14:59:06.047800Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 3 ShardOwnerId: 72057594046678944 ShardLocalIdx: 3, at schemeshard: 72057594046678944 2025-06-25T14:59:06.047979Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-06-25T14:59:06.048540Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 2 TxId_Deprecated: 2 TabletID: 72075186233409547 2025-06-25T14:59:06.048963Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 6 ShardOwnerId: 72057594046678944 ShardLocalIdx: 6, at schemeshard: 72057594046678944 2025-06-25T14:59:06.049169Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 Forgetting tablet 72075186233409548 2025-06-25T14:59:06.050842Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 4 TxId_Deprecated: 4 TabletID: 72075186233409549 2025-06-25T14:59:06.051338Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046678944 ShardLocalIdx: 2, at schemeshard: 72057594046678944 2025-06-25T14:59:06.051486Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 Forgetting tablet 72075186233409551 Forgetting tablet 72075186233409547 2025-06-25T14:59:06.052829Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 4 ShardOwnerId: 72057594046678944 ShardLocalIdx: 4, at schemeshard: 72057594046678944 2025-06-25T14:59:06.053006Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 Forgetting tablet 72075186233409549 2025-06-25T14:59:06.054403Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-25T14:59:06.054456Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-25T14:59:06.054588Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-25T14:59:06.055295Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-25T14:59:06.055340Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-25T14:59:06.055399Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:59:06.055751Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:5 2025-06-25T14:59:06.055798Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:5 tabletId 72075186233409550 2025-06-25T14:59:06.056097Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-06-25T14:59:06.056129Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:1 tabletId 72075186233409546 2025-06-25T14:59:06.058531Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:3 2025-06-25T14:59:06.058566Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:3 tabletId 72075186233409548 2025-06-25T14:59:06.058656Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:6 2025-06-25T14:59:06.058677Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:6 tabletId 72075186233409551 2025-06-25T14:59:06.058744Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-06-25T14:59:06.058764Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:2 tabletId 72075186233409547 2025-06-25T14:59:06.060819Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:4 2025-06-25T14:59:06.060865Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:4 tabletId 72075186233409549 2025-06-25T14:59:06.061089Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-25T14:59:06.061166Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestWaitNotification wait txId: 102 2025-06-25T14:59:06.061454Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-06-25T14:59:06.061493Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 TestWaitNotification wait txId: 103 2025-06-25T14:59:06.061599Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 103: send EvNotifyTxCompletion 2025-06-25T14:59:06.061624Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 103 2025-06-25T14:59:06.062008Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-06-25T14:59:06.062113Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 103, at schemeshard: 72057594046678944 2025-06-25T14:59:06.062174Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-25T14:59:06.062219Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:679:2580] 2025-06-25T14:59:06.062338Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-06-25T14:59:06.062362Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [1:679:2580] TestWaitNotification: OK eventTxId 102 TestWaitNotification: OK eventTxId 103 2025-06-25T14:59:06.062907Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:59:06.063139Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 193us result status StatusPathDoesNotExist 2025-06-25T14:59:06.063321Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/USER_0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-25T14:59:06.063720Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:59:06.063890Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 165us result status StatusSuccess 2025-06-25T14:59:06.064404Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 8 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 8 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 6 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::CreateAlterNbsChannels [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:59:04.517456Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:59:04.517567Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:04.517604Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:59:04.517639Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:59:04.517698Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:59:04.517723Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:59:04.517777Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:04.517855Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:59:04.518556Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:59:04.518884Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:59:04.589599Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:59:04.589651Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:59:04.604697Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:59:04.605040Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:59:04.605218Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:59:04.610815Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:59:04.611101Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:59:04.611668Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:04.611945Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:59:04.615330Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:04.615527Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:59:04.616671Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:04.616727Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:04.616864Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:59:04.616907Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:59:04.616946Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:59:04.617029Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:59:04.623302Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:59:04.731425Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:59:04.731634Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:04.731805Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:59:04.731848Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:59:04.732059Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:59:04.732121Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:59:04.733878Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:04.734053Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:59:04.734190Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:04.734252Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:59:04.734288Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:59:04.734319Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:59:04.736287Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:04.736351Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:59:04.736386Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:59:04.737807Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:04.737845Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:04.737897Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:04.737941Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:59:04.741038Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:59:04.742478Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:59:04.742620Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:59:04.743283Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:04.743390Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:04.743436Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:04.743665Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:59:04.743706Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:04.743863Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:59:04.743929Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:59:04.745714Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:04.745755Z node 1 :FLAT_TX_SCHEMESHARD ... schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-06-25T14:59:05.960668Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 105, subscribers: 0 2025-06-25T14:59:05.961687Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:1 hive 72057594037968897 at ss 72057594046678944 2025-06-25T14:59:05.961743Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:3 hive 72057594037968897 at ss 72057594046678944 2025-06-25T14:59:05.961769Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:2 hive 72057594037968897 at ss 72057594046678944 2025-06-25T14:59:05.961792Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:4 hive 72057594037968897 at ss 72057594046678944 2025-06-25T14:59:05.962553Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 105 2025-06-25T14:59:05.963339Z node 2 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 1 TxId_Deprecated: 1 TabletID: 72075186233409546 2025-06-25T14:59:05.964274Z node 2 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 3 TxId_Deprecated: 3 TabletID: 72075186233409548 2025-06-25T14:59:05.964721Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 1 ShardOwnerId: 72057594046678944 ShardLocalIdx: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:05.964972Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-25T14:59:05.965433Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 3 ShardOwnerId: 72057594046678944 ShardLocalIdx: 3, at schemeshard: 72057594046678944 2025-06-25T14:59:05.967553Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 Forgetting tablet 72075186233409546 2025-06-25T14:59:05.968284Z node 2 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 2 TxId_Deprecated: 2 TabletID: 72075186233409547 Forgetting tablet 72075186233409548 2025-06-25T14:59:05.968673Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046678944 ShardLocalIdx: 2, at schemeshard: 72057594046678944 2025-06-25T14:59:05.968831Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-25T14:59:05.968976Z node 2 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 4 TxId_Deprecated: 4 TabletID: 72075186233409549 2025-06-25T14:59:05.969101Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 105 Forgetting tablet 72075186233409547 2025-06-25T14:59:05.970449Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 4 ShardOwnerId: 72057594046678944 ShardLocalIdx: 4, at schemeshard: 72057594046678944 2025-06-25T14:59:05.970602Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 Forgetting tablet 72075186233409549 2025-06-25T14:59:05.971367Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-25T14:59:05.971411Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-06-25T14:59:05.971474Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-25T14:59:05.971782Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-25T14:59:05.971828Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-25T14:59:05.971947Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-25T14:59:05.972142Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 105 2025-06-25T14:59:05.974275Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-06-25T14:59:05.974322Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:1 tabletId 72075186233409546 2025-06-25T14:59:05.974678Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:3 2025-06-25T14:59:05.974708Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:3 tabletId 72075186233409548 2025-06-25T14:59:05.974757Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-06-25T14:59:05.974776Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:2 tabletId 72075186233409547 2025-06-25T14:59:05.974871Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:4 2025-06-25T14:59:05.974910Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:4 tabletId 72075186233409549 2025-06-25T14:59:05.975988Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 1 candidates, at schemeshard: 72057594046678944 2025-06-25T14:59:05.976092Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-25T14:59:05.976129Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-25T14:59:05.976195Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:59:05.976374Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-25T14:59:05.977470Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 105, wait until txId: 105 TestWaitNotification wait txId: 105 2025-06-25T14:59:05.977711Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 105: send EvNotifyTxCompletion 2025-06-25T14:59:05.977751Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 105 2025-06-25T14:59:05.978134Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 105, at schemeshard: 72057594046678944 2025-06-25T14:59:05.978207Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 105: got EvNotifyTxCompletionResult 2025-06-25T14:59:05.978243Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 105: satisfy waiter [2:654:2603] TestWaitNotification: OK eventTxId 105 2025-06-25T14:59:05.978725Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0/BSVolume" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:59:05.978877Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0/BSVolume" took 178us result status StatusPathDoesNotExist 2025-06-25T14:59:05.979025Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0/BSVolume\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/USER_0/BSVolume" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-25T14:59:05.979493Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:59:05.979622Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 138us result status StatusPathDoesNotExist 2025-06-25T14:59:05.979731Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/USER_0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 >> TSchemeShardSubDomainTest::ColumnSchemeLimitsRejects [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::DeleteAdd [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:59:05.381989Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:59:05.382078Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:05.382114Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:59:05.382153Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:59:05.382196Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:59:05.382223Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:59:05.382272Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:05.382338Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:59:05.383051Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:59:05.383361Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:59:05.463486Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:59:05.463552Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:59:05.480808Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:59:05.481210Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:59:05.481394Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:59:05.488669Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:59:05.488970Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:59:05.489579Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:05.489814Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:59:05.496672Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:05.496861Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:59:05.498174Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:05.498241Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:05.498392Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:59:05.498438Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:59:05.498478Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:59:05.498562Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:59:05.506099Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:59:05.647219Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:59:05.647432Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:05.647619Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:59:05.647669Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:59:05.647881Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:59:05.647955Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:59:05.650084Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:05.650338Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:59:05.650511Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:05.650560Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:59:05.650615Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:59:05.650658Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:59:05.652446Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:05.652498Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:59:05.652541Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:59:05.654232Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:05.654289Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:05.654347Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:05.654407Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:59:05.657976Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:59:05.659805Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:59:05.659978Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:59:05.660918Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:05.661032Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:05.661084Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:05.661327Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:59:05.661393Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:05.661556Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:59:05.661623Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:59:05.663513Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:05.663557Z node 1 :FLAT_TX_SCHEMESHARD ... meshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:59:06.137364Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-25T14:59:06.137465Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:06.137503Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 102, path id: 1 2025-06-25T14:59:06.137587Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 102, path id: 3 2025-06-25T14:59:06.137876Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-25T14:59:06.137925Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 102:0 ProgressState 2025-06-25T14:59:06.138011Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:0 progress is 1/1 2025-06-25T14:59:06.138043Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-25T14:59:06.138077Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:0 progress is 1/1 2025-06-25T14:59:06.138102Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-25T14:59:06.138151Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: false 2025-06-25T14:59:06.138194Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-25T14:59:06.138235Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 102:0 2025-06-25T14:59:06.138265Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 102:0 2025-06-25T14:59:06.138447Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 9 2025-06-25T14:59:06.138497Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 102, publications: 2, subscribers: 1 2025-06-25T14:59:06.138567Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 102, [OwnerId: 72057594046678944, LocalPathId: 1], 9 2025-06-25T14:59:06.138610Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 102, [OwnerId: 72057594046678944, LocalPathId: 3], 3 2025-06-25T14:59:06.139286Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 9 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T14:59:06.139375Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 9 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T14:59:06.139410Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 102 2025-06-25T14:59:06.139448Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 9 2025-06-25T14:59:06.139484Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-25T14:59:06.140360Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 3 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T14:59:06.140454Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 3 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T14:59:06.140494Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 102 2025-06-25T14:59:06.140524Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 3 2025-06-25T14:59:06.140551Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 8 2025-06-25T14:59:06.140620Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 102, subscribers: 1 2025-06-25T14:59:06.140691Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:212: TTxAckPublishToSchemeBoard Notify send TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, to actorId: [1:572:2479] 2025-06-25T14:59:06.142926Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-25T14:59:06.144121Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-25T14:59:06.144212Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-25T14:59:06.144250Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:966:2785] TestWaitNotification: OK eventTxId 102 2025-06-25T14:59:06.144809Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:59:06.145031Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 241us result status StatusSuccess 2025-06-25T14:59:06.145446Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0" PathDescription { Self { Name: "USER_0" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 102 CreateStep: 5000004 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409552 Coordinators: 72075186233409553 Coordinators: 72075186233409554 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409555 Mediators: 72075186233409556 Mediators: 72075186233409557 } DomainKey { SchemeShard: 72057594046678944 PathId: 3 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 6 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 3 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:06.146027Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:59:06.146201Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 187us result status StatusSuccess 2025-06-25T14:59:06.146608Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 9 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 9 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 7 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "USER_0" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 102 CreateStep: 5000004 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/provider/ut/unittest >> KikimrIcGateway::TestSecretsExistingValidation [GOOD] Test command err: Trying to start YDB, gRPC: 21704, MsgBus: 20710 2025-06-25T14:58:46.997433Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901585816783061:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:46.997584Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001364/r3tmp/tmpxxuBhz/pdisk_1.dat 2025-06-25T14:58:47.300841Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519901585816783041:2080] 1750863526996547 != 1750863526996550 2025-06-25T14:58:47.303852Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 21704, node 1 2025-06-25T14:58:47.379806Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:47.379994Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:47.381953Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:58:47.392843Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:58:47.392868Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:58:47.392877Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:58:47.392990Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:20710 TClient is connected to server localhost:20710 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:58:47.942514Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:47.955154Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:58:48.006699Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:58:49.799034Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901598701685611:2295], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:58:49.799148Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:58:50.036009Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:58:50.147040Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:58:50.176571Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:58:50.206743Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:58:50.246562Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901602996653223:2326], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:58:50.246639Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:58:50.246753Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901602996653228:2329], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:58:50.250240Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710664:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:58:50.261523Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519901602996653230:2330], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710664 completed, doublechecking } 2025-06-25T14:58:50.320493Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519901602996653281:2563] txid# 281474976710665, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 11], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 14425, MsgBus: 3869 2025-06-25T14:58:51.056202Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519901608322506304:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:51.056230Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001364/r3tmp/tmp1ZBiWk/pdisk_1.dat 2025-06-25T14:58:51.169487Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:51.170562Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519901608322506278:2080] 1750863531054982 != 1750863531054985 TServer::EnableGrpc on GrpcPort 14425, node 2 2025-06-25T14:58:51.198269Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:51.198355Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:51.199227Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:58:51.218774Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:58:51.218793Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:58:51.218800Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:58:51.218906Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3869 TClient is connected to server localhost:3869 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:58:51.613108Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:51.625597Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is un ... e/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:58:58.155151Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710759:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:383) 2025-06-25T14:58:59.850479Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715716:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp:267) Trying to start YDB, gRPC: 17081, MsgBus: 10779 2025-06-25T14:59:00.697002Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519901647992236873:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:59:00.697046Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001364/r3tmp/tmpdvqSpg/pdisk_1.dat 2025-06-25T14:59:00.796561Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:59:00.797720Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519901647992236853:2080] 1750863540696577 != 1750863540696580 TServer::EnableGrpc on GrpcPort 17081, node 3 2025-06-25T14:59:00.833694Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:59:00.833794Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:59:00.835429Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:59:00.843931Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:59:00.843977Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:59:00.843986Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:59:00.844128Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10779 TClient is connected to server localhost:10779 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:59:01.307553Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:59:01.323151Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:59:01.394043Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:59:01.535954Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:59:01.609457Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:59:01.726267Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:59:03.939535Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519901660877140396:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:59:03.939665Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:59:03.992758Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:59:04.035537Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:59:04.064232Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:59:04.091730Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:59:04.119698Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:59:04.148718Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:59:04.214924Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:59:04.267060Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519901665172108352:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:59:04.267118Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:59:04.267173Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519901665172108357:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:59:04.270861Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:59:04.279724Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519901665172108359:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:59:04.334079Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519901665172108410:3422] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } >> TStoragePoolsQuotasTest::DifferentQuotasInteraction-IsExternalSubdomain-EnableSeparateQuotas >> TSchemeShardSubDomainTest::CreateDropSolomon >> TSchemeShardSubDomainTest::SchemeQuotas [GOOD] >> TStoragePoolsQuotasTest::DisableWritesToDatabase-IsExternalSubdomain-false >> TStoragePoolsQuotasTest::DifferentQuotasInteraction-EnableSeparateQuotas >> TSchemeShardSubDomainTest::SetSchemeLimits >> TSchemeShardSubDomainTest::SimultaneousCreateTenantDirTable [GOOD] >> TStoragePoolsQuotasTest::DifferentQuotasInteraction-IsExternalSubdomain >> TSchemeShardSubDomainTest::SimultaneousDeclare [GOOD] |90.5%| [TA] {RESULT} $(B)/ydb/core/kqp/ut/indexes/test-results/unittest/{meta.json ... results_accumulator.log} >> TSchemeShardSubDomainTest::SimultaneousDefine >> TLocksTest::CK_Range_BrokenLockInf [GOOD] >> TSchemeShardSubDomainTest::SimultaneousCreateForceDropTwice >> TSchemeShardSubDomainTest::TopicDiskSpaceQuotas >> TSchemeShardSubDomainTest::Delete >> TSchemeShardSubDomainTest::TableDiskSpaceQuotas >> TPartitionTests::ShadowPartitionCountersFirstClass [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::ColumnSchemeLimitsRejects [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:59:04.905850Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:59:04.905945Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:04.905981Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:59:04.906017Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:59:04.906067Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:59:04.906090Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:59:04.906164Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:04.906235Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:59:04.907025Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:59:04.907287Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:59:04.969436Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:59:04.969502Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:59:04.988054Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:59:04.988445Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:59:04.988625Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:59:04.994411Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:59:04.994714Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:59:04.995164Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:04.995358Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:59:04.998355Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:04.998504Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:59:04.999346Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:04.999387Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:04.999512Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:59:04.999548Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:59:04.999578Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:59:04.999679Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:59:05.004795Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:59:05.119052Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:59:05.119290Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:05.119475Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:59:05.119513Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:59:05.119743Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:59:05.119813Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:59:05.121823Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:05.122012Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:59:05.122148Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:05.122190Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:59:05.122223Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:59:05.122247Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:59:05.123846Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:05.123883Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:59:05.123909Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:59:05.125318Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:05.125373Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:05.125431Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:05.125472Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:59:05.128704Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:59:05.130321Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:59:05.130508Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:59:05.131366Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:05.131491Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:05.131535Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:05.131784Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:59:05.131830Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:05.132012Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:59:05.132074Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:59:05.133810Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:05.133848Z node 1 :FLAT_TX_SCHEMESHARD ... : schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 108 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 8589936750 } } Step: 5000004 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:06.785494Z node 2 :FLAT_TX_SCHEMESHARD INFO: alter_store.cpp:199: TAlterOlapStore TPropose operationId# 108:0 HandleReply TEvOperationPlan at tablet: 72057594046678944, stepId: 5000004 2025-06-25T14:59:06.785648Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 108:0 128 -> 129 2025-06-25T14:59:06.785810Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-25T14:59:06.785871Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 3 2025-06-25T14:59:06.786498Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186233409549;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=108;fline=tx_controller.cpp:215;event=finished_tx;tx_id=108; FAKE_COORDINATOR: advance: minStep5000004 State->FrontStep: 5000004 2025-06-25T14:59:06.787953Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:06.787997Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 108, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:59:06.788187Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 108, path id: [OwnerId: 72057594046678944, LocalPathId: 5] 2025-06-25T14:59:06.788399Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:06.788438Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [2:340:2314], at schemeshard: 72057594046678944, txId: 108, path id: 1 2025-06-25T14:59:06.788482Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [2:340:2314], at schemeshard: 72057594046678944, txId: 108, path id: 5 2025-06-25T14:59:06.788967Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 108:0, at schemeshard: 72057594046678944 2025-06-25T14:59:06.789077Z node 2 :FLAT_TX_SCHEMESHARD INFO: alter_store.cpp:305: TAlterOlapStore TProposedWaitParts operationId# 108:0 ProgressState at tablet: 72057594046678944 2025-06-25T14:59:06.789140Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: alter_store.cpp:332: TAlterOlapStore TProposedWaitParts operationId# 108:0 ProgressState wait for NotifyTxCompletionResult tabletId: 72075186233409549 2025-06-25T14:59:06.789774Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 3 LocalPathId: 1 Version: 8 PathOwnerId: 72057594046678944, cookie: 108 2025-06-25T14:59:06.789868Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 3 LocalPathId: 1 Version: 8 PathOwnerId: 72057594046678944, cookie: 108 2025-06-25T14:59:06.789910Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 108 2025-06-25T14:59:06.789947Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 108, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 8 2025-06-25T14:59:06.789989Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-25T14:59:06.790933Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 3 LocalPathId: 5 Version: 5 PathOwnerId: 72057594046678944, cookie: 108 2025-06-25T14:59:06.791012Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 3 LocalPathId: 5 Version: 5 PathOwnerId: 72057594046678944, cookie: 108 2025-06-25T14:59:06.791051Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 108 2025-06-25T14:59:06.791078Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 108, pathId: [OwnerId: 72057594046678944, LocalPathId: 5], version: 5 2025-06-25T14:59:06.791104Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 4 2025-06-25T14:59:06.791163Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 108, ready parts: 0/1, is published: true 2025-06-25T14:59:06.793395Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 108:0 from tablet: 72057594046678944 to tablet: 72075186233409549 cookie: 72057594046678944:4 msg type: 275382275 2025-06-25T14:59:06.794791Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 108 2025-06-25T14:59:06.795189Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 108 2025-06-25T14:59:06.806859Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6230: Handle TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, message: Origin: 72075186233409549 TxId: 108 2025-06-25T14:59:06.806908Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1791: TOperation FindRelatedPartByTabletId, TxId: 108, tablet: 72075186233409549, partId: 0 2025-06-25T14:59:06.807003Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:632: TTxOperationReply execute, operationId: 108:0, at schemeshard: 72057594046678944, message: Origin: 72075186233409549 TxId: 108 2025-06-25T14:59:06.807043Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 108:0 129 -> 240 FAKE_COORDINATOR: Erasing txId 108 2025-06-25T14:59:06.808559Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 108:0, at schemeshard: 72057594046678944 2025-06-25T14:59:06.808688Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 108:0, at schemeshard: 72057594046678944 2025-06-25T14:59:06.808717Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 108:0 ProgressState 2025-06-25T14:59:06.808803Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#108:0 progress is 1/1 2025-06-25T14:59:06.808830Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 108 ready parts: 1/1 2025-06-25T14:59:06.808857Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#108:0 progress is 1/1 2025-06-25T14:59:06.808890Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 108 ready parts: 1/1 2025-06-25T14:59:06.808922Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 108, ready parts: 1/1, is published: true 2025-06-25T14:59:06.808972Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1656: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [2:501:2447] message: TxId: 108 2025-06-25T14:59:06.809009Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 108 ready parts: 1/1 2025-06-25T14:59:06.809087Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 108:0 2025-06-25T14:59:06.809111Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 108:0 2025-06-25T14:59:06.809226Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 3 2025-06-25T14:59:06.810739Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 108: got EvNotifyTxCompletionResult 2025-06-25T14:59:06.810779Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 108: satisfy waiter [2:894:2801] TestWaitNotification: OK eventTxId 108 TestModificationResults wait txId: 109 2025-06-25T14:59:06.813100Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpAlterColumnStore AlterColumnStore { Name: "OlapStore1" AlterSchemaPresets { Name: "default" AlterSchema { AddColumns { Name: "comment2" Type: "Utf8" } } } } } TxId: 109 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:59:06.813251Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: alter_store.cpp:465: TAlterOlapStore Propose, path: /MyRoot/OlapStore1, opId: 109:0, at schemeshard: 72057594046678944 2025-06-25T14:59:06.813545Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 109:1, propose status:StatusSchemeError, reason: Too many columns. new: 4. Limit: 3, at schemeshard: 72057594046678944 2025-06-25T14:59:06.815204Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 109, response: Status: StatusSchemeError Reason: "Too many columns. new: 4. Limit: 3" TxId: 109 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:06.815374Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 109, database: /MyRoot, subject: , status: StatusSchemeError, reason: Too many columns. new: 4. Limit: 3, operation: ALTER COLUMN STORE, path: /MyRoot/OlapStore1 TestModificationResult got TxId: 109, wait until txId: 109 TestWaitNotification wait txId: 109 2025-06-25T14:59:06.815679Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 109: send EvNotifyTxCompletion 2025-06-25T14:59:06.815708Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 109 2025-06-25T14:59:06.816144Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 109, at schemeshard: 72057594046678944 2025-06-25T14:59:06.816223Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 109: got EvNotifyTxCompletionResult 2025-06-25T14:59:06.816247Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 109: satisfy waiter [2:929:2836] TestWaitNotification: OK eventTxId 109 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::SchemeQuotas [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:59:04.653193Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:59:04.653280Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:04.653309Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:59:04.653338Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:59:04.653381Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:59:04.653403Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:59:04.653440Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:04.653491Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:59:04.654063Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:59:04.654357Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:59:04.717211Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:59:04.717271Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:59:04.733728Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:59:04.734089Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:59:04.734254Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:59:04.739619Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:59:04.739910Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:59:04.740564Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:04.740813Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:59:04.744084Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:04.744253Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:59:04.745421Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:04.745482Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:04.745620Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:59:04.745666Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:59:04.745709Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:59:04.745786Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:59:04.751974Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:59:04.883274Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:59:04.883461Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:04.883616Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:59:04.883649Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:59:04.883834Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:59:04.883887Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:59:04.885798Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:04.885977Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:59:04.886129Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:04.886183Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:59:04.886235Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:59:04.886271Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:59:04.887855Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:04.887921Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:59:04.887959Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:59:04.889703Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:04.889764Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:04.889814Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:04.889848Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:59:04.892872Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:59:04.894213Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:59:04.894348Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:59:04.894996Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:04.895089Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:04.895131Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:04.895325Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:59:04.895377Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:04.895521Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:59:04.895591Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:59:04.898459Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:04.898524Z node 1 :FLAT_TX_SCHEMESHARD ... 137:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:59:06.758870Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 11 2025-06-25T14:59:06.758935Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 10] was 3 2025-06-25T14:59:06.761159Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 137, response: Status: StatusAccepted TxId: 137 SchemeshardId: 72057594046678944 PathId: 10, at schemeshard: 72057594046678944 2025-06-25T14:59:06.761400Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 137, database: /MyRoot/USER_0, subject: , status: StatusAccepted, operation: CREATE TABLE, path: /MyRoot/USER_0/Table11 2025-06-25T14:59:06.761670Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:06.761709Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 137, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-25T14:59:06.761931Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 137, path id: [OwnerId: 72057594046678944, LocalPathId: 10] 2025-06-25T14:59:06.762020Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:06.762055Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:1031:2889], at schemeshard: 72057594046678944, txId: 137, path id: 2 2025-06-25T14:59:06.762094Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:1031:2889], at schemeshard: 72057594046678944, txId: 137, path id: 10 2025-06-25T14:59:06.762671Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 137:0, at schemeshard: 72057594046678944 2025-06-25T14:59:06.762749Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 137:0 ProgressState, operation type: TxCreateTable, at tablet# 72057594046678944 2025-06-25T14:59:06.762961Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:359: TCreateParts opId# 137:0 CreateRequest Event to Hive: 72057594037968897 msg: Owner: 72057594046678944 OwnerIdx: 10 TabletType: DataShard ObjectDomain { SchemeShard: 72057594046678944 PathId: 2 } ObjectId: 10 BindedChannels { StoragePoolName: "pool-1" } BindedChannels { StoragePoolName: "pool-1" } BindedChannels { StoragePoolName: "pool-1" } AllowedDomains { SchemeShard: 72057594046678944 PathId: 2 } 2025-06-25T14:59:06.763659Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 2 Version: 18 PathOwnerId: 72057594046678944, cookie: 137 2025-06-25T14:59:06.763749Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 2 Version: 18 PathOwnerId: 72057594046678944, cookie: 137 2025-06-25T14:59:06.763787Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 137 2025-06-25T14:59:06.763824Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 137, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 18 2025-06-25T14:59:06.763862Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 12 2025-06-25T14:59:06.764808Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 10 Version: 1 PathOwnerId: 72057594046678944, cookie: 137 2025-06-25T14:59:06.764880Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 10 Version: 1 PathOwnerId: 72057594046678944, cookie: 137 2025-06-25T14:59:06.764903Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 137 2025-06-25T14:59:06.764927Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 137, pathId: [OwnerId: 72057594046678944, LocalPathId: 10], version: 1 2025-06-25T14:59:06.764955Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 10] was 4 2025-06-25T14:59:06.765018Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 137, ready parts: 0/1, is published: true 2025-06-25T14:59:06.767621Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 137:0 from tablet: 72057594046678944 to tablet: 72057594037968897 cookie: 72057594046678944:10 msg type: 268697601 2025-06-25T14:59:06.767789Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 137, partId: 0, tablet: 72057594037968897 2025-06-25T14:59:06.767847Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1806: TOperation RegisterRelationByShardIdx, TxId: 137, shardIdx: 72057594046678944:10, partId: 0 2025-06-25T14:59:06.768748Z node 1 :HIVE INFO: tablet_helpers.cpp:1181: [72057594037968897] TEvCreateTablet, msg: Owner: 72057594046678944 OwnerIdx: 10 TabletType: DataShard ObjectDomain { SchemeShard: 72057594046678944 PathId: 2 } ObjectId: 10 BindedChannels { StoragePoolName: "pool-1" } BindedChannels { StoragePoolName: "pool-1" } BindedChannels { StoragePoolName: "pool-1" } AllowedDomains { SchemeShard: 72057594046678944 PathId: 2 } 2025-06-25T14:59:06.768948Z node 1 :HIVE INFO: tablet_helpers.cpp:1245: [72057594037968897] TEvCreateTablet, Owner 72057594046678944, OwnerIdx 10, type DataShard, boot OK, tablet id 72075186233409555 2025-06-25T14:59:06.769350Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5951: Handle TEvCreateTabletReply at schemeshard: 72057594046678944 message: Status: OK Owner: 72057594046678944 OwnerIdx: 10 TabletID: 72075186233409555 Origin: 72057594037968897 2025-06-25T14:59:06.769416Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1820: TOperation FindRelatedPartByShardIdx, TxId: 137, shardIdx: 72057594046678944:10, partId: 0 2025-06-25T14:59:06.769532Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:632: TTxOperationReply execute, operationId: 137:0, at schemeshard: 72057594046678944, message: Status: OK Owner: 72057594046678944 OwnerIdx: 10 TabletID: 72075186233409555 Origin: 72057594037968897 2025-06-25T14:59:06.769578Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:177: TCreateParts opId# 137:0 HandleReply TEvCreateTabletReply, at tabletId: 72057594046678944 2025-06-25T14:59:06.769668Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:180: TCreateParts opId# 137:0 HandleReply TEvCreateTabletReply, message: Status: OK Owner: 72057594046678944 OwnerIdx: 10 TabletID: 72075186233409555 Origin: 72057594037968897 2025-06-25T14:59:06.769766Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 137:0 2 -> 3 2025-06-25T14:59:06.770935Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 137 2025-06-25T14:59:06.773156Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 137 2025-06-25T14:59:06.774153Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 137:0, at schemeshard: 72057594046678944 2025-06-25T14:59:06.774308Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 137:0, at schemeshard: 72057594046678944 2025-06-25T14:59:06.774355Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_table.cpp:197: TCreateTable TConfigureParts operationId# 137:0 ProgressState at tabletId# 72057594046678944 2025-06-25T14:59:06.774442Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_table.cpp:217: TCreateTable TConfigureParts operationId# 137:0 ProgressState Propose modify scheme on datashard datashardId: 72075186233409555 seqNo: 4:5 2025-06-25T14:59:06.774798Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_table.cpp:233: TCreateTable TConfigureParts operationId# 137:0 ProgressState Propose modify scheme on datashard datashardId: 72075186233409555 message: TxKind: TX_KIND_SCHEME SourceDeprecated { RawX1: 979 RawX2: 4294970144 } TxBody: "\n\236\004\n\007Table11\020\n\032\r\n\003key\030\002 \001(\000@\000\032\020\n\005Value\030\200$ \002(\000@\000(\001:\262\003\022\253\003\010\200\200\200\002\020\254\002\030\364\003 \200\200\200\010(\0000\200\200\200 8\200\200\200\010@\2008H\000RX\010\000\020\000\030\010 \010(\200\200\200@0\377\377\377\377\0178\001B$\010e\020d\031\000\000\000\000\000\000\360?*\025background_compactionJ\017compaction_gen1P\nX\200\200\001`nh\000p\000Rb\010\001\020\200\200\200\024\030\005 \020(\200\200\200\200\0020\377\377\377\377\0178\000B$\010e\020d\031\000\000\000\000\000\000\360?*\025background_compactionJ\017compaction_gen2P\nX\200\200\001`nh\200\200\200\004p\200\200\200\004Rc\010\002\020\200\200\200\310\001\030\005 \020(\200\200\200\200@0\377\377\377\377\0178\000B$\010e\020d\031\000\000\000\000\000\000\360?*\025background_compactionJ\017compaction_gen3P\nX\200\200\001`nh\200\200\200(p\200\200\200(X\001`\005j$\010e\020d\031\000\000\000\000\000\000\360?*\025background_compactionr\017compaction_gen0z\017compaction_gen0\202\001\004scan\210\001\200\200\200\010\220\001\364\003\230\0012\270\001\2008\300\001\006R\002\020\001J\026/MyRoot/USER_0/Table11\242\001\006\001\000\000\000\000\200\252\001\000\260\001\001\270\001\000\210\002\001\222\002\013\t\240\207\205\000\000\000\000\001\020\n:\004\010\004\020\005" TxId: 137 ExecLevel: 0 Flags: 0 SchemeShardId: 72057594046678944 ProcessingParams { Version: 2 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } SubDomainPathId: 2 2025-06-25T14:59:06.778408Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 137:0 from tablet: 72057594046678944 to tablet: 72075186233409555 cookie: 72057594046678944:10 msg type: 269549568 2025-06-25T14:59:06.778580Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 137, partId: 0, tablet: 72075186233409555 TestModificationResult got TxId: 137, wait until txId: 137 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::SimultaneousDeclare [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:59:06.926356Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:59:06.926437Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:06.926462Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:59:06.926486Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:59:06.926517Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:59:06.926535Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:59:06.926589Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:06.926657Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:59:06.927287Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:59:06.927562Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:59:07.002483Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:59:07.002550Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:59:07.014893Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:59:07.015272Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:59:07.015423Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:59:07.020195Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:59:07.020436Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:59:07.020872Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:07.021066Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:59:07.023749Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:07.023893Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:59:07.024812Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:07.024857Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:07.024989Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:59:07.025022Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:59:07.025052Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:59:07.025112Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:59:07.030383Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:59:07.154085Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:59:07.154331Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:07.154538Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:59:07.154584Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:59:07.154820Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:59:07.154889Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:59:07.157161Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:07.157368Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:59:07.157557Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:07.157604Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:59:07.157669Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:59:07.157703Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:59:07.159593Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:07.159663Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:59:07.159703Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:59:07.161311Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:07.161387Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:07.161465Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:07.161511Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:59:07.165148Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:59:07.166941Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:59:07.167161Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:59:07.168070Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:07.168201Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:07.168249Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:07.168524Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:59:07.168578Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:07.168738Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:59:07.168810Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:59:07.171510Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:07.171553Z node 1 :FLAT_TX_SCHEMESHARD ... kSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:59:07.222747Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 100:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:100 msg type: 269090816 2025-06-25T14:59:07.222867Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 100, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 100 at step: 5000002 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000001 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 100 at step: 5000002 2025-06-25T14:59:07.223268Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000002, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:07.223372Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 100 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000002 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:07.223410Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 100:0, at tablet# 72057594046678944 2025-06-25T14:59:07.223592Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 100:0 128 -> 240 2025-06-25T14:59:07.223640Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 100:0, at tablet# 72057594046678944 2025-06-25T14:59:07.223807Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:59:07.223863Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-25T14:59:07.223907Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 100 2025-06-25T14:59:07.225285Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:07.225310Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 100, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:59:07.225463Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 100, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-25T14:59:07.225544Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:07.225595Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 100, path id: 1 2025-06-25T14:59:07.225637Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 100, path id: 2 2025-06-25T14:59:07.225918Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 100:0, at schemeshard: 72057594046678944 2025-06-25T14:59:07.225955Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 100:0 ProgressState 2025-06-25T14:59:07.226027Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#100:0 progress is 1/1 2025-06-25T14:59:07.226058Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 100 ready parts: 1/1 2025-06-25T14:59:07.226084Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#100:0 progress is 1/1 2025-06-25T14:59:07.226104Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 100 ready parts: 1/1 2025-06-25T14:59:07.226133Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 100, ready parts: 1/1, is published: false 2025-06-25T14:59:07.226163Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 100 ready parts: 1/1 2025-06-25T14:59:07.226189Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 100:0 2025-06-25T14:59:07.226212Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 100:0 2025-06-25T14:59:07.226254Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-25T14:59:07.226277Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 100, publications: 2, subscribers: 1 2025-06-25T14:59:07.226312Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 100, [OwnerId: 72057594046678944, LocalPathId: 1], 5 2025-06-25T14:59:07.226333Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 100, [OwnerId: 72057594046678944, LocalPathId: 2], 3 2025-06-25T14:59:07.226815Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 100 2025-06-25T14:59:07.226884Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 100 2025-06-25T14:59:07.226910Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 100 2025-06-25T14:59:07.226937Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 100, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 5 2025-06-25T14:59:07.226975Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-25T14:59:07.227382Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 100 2025-06-25T14:59:07.227435Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 100 2025-06-25T14:59:07.227455Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 100 2025-06-25T14:59:07.227482Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 100, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 3 2025-06-25T14:59:07.227499Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-25T14:59:07.227540Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 100, subscribers: 1 2025-06-25T14:59:07.227565Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:212: TTxAckPublishToSchemeBoard Notify send TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, to actorId: [1:280:2269] 2025-06-25T14:59:07.230173Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 100 2025-06-25T14:59:07.230827Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 100 2025-06-25T14:59:07.230928Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 100: got EvNotifyTxCompletionResult 2025-06-25T14:59:07.230957Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 100: satisfy waiter [1:281:2270] TestWaitNotification: OK eventTxId 101 TestWaitNotification: OK eventTxId 100 2025-06-25T14:59:07.231322Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:59:07.231517Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 168us result status StatusSuccess 2025-06-25T14:59:07.231830Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0" PathDescription { Self { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 0 TimeCastBucketsPerMediator: 0 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::SimultaneousCreateTenantDirTable [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:59:06.501380Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:59:06.501460Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:06.501493Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:59:06.501527Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:59:06.501566Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:59:06.501591Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:59:06.501635Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:06.501696Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:59:06.502345Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:59:06.502631Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:59:06.568562Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:59:06.568632Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:59:06.584620Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:59:06.585017Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:59:06.585198Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:59:06.590890Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:59:06.591167Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:59:06.591764Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:06.592022Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:59:06.594820Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:06.594988Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:59:06.596047Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:06.596096Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:06.596210Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:59:06.596263Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:59:06.596299Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:59:06.596414Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:59:06.602413Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:59:06.712615Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:59:06.712824Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:06.713001Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:59:06.713038Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:59:06.713243Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:59:06.713314Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:59:06.720977Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:06.721124Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:59:06.721250Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:06.721284Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:59:06.721345Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:59:06.721384Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:59:06.722985Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:06.723039Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:59:06.723089Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:59:06.724480Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:06.724532Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:06.724584Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:06.724638Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:59:06.727894Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:59:06.729555Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:59:06.729770Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:59:06.730612Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:06.730734Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:06.730781Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:06.731069Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:59:06.731126Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:06.731299Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:59:06.731376Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:59:06.733226Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:06.733266Z node 1 :FLAT_TX_SCHEMESHARD ... DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 6 2025-06-25T14:59:07.182570Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 2 TxId_Deprecated: 2 TabletID: 72075186233409547 Forgetting tablet 72075186233409548 2025-06-25T14:59:07.183449Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 3 ShardOwnerId: 72057594046678944 ShardLocalIdx: 3, at schemeshard: 72057594046678944 2025-06-25T14:59:07.183637Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-06-25T14:59:07.184218Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 4 TxId_Deprecated: 4 TabletID: 72075186233409549 2025-06-25T14:59:07.184640Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 6 ShardOwnerId: 72057594046678944 ShardLocalIdx: 6, at schemeshard: 72057594046678944 2025-06-25T14:59:07.184775Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 Forgetting tablet 72075186233409551 Forgetting tablet 72075186233409547 2025-06-25T14:59:07.185658Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 Forgetting tablet 72075186233409549 2025-06-25T14:59:07.186127Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046678944 ShardLocalIdx: 2, at schemeshard: 72057594046678944 2025-06-25T14:59:07.186232Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-25T14:59:07.186415Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-25T14:59:07.186803Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 4 ShardOwnerId: 72057594046678944 ShardLocalIdx: 4, at schemeshard: 72057594046678944 2025-06-25T14:59:07.186897Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-25T14:59:07.187730Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-25T14:59:07.188627Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-25T14:59:07.188677Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-25T14:59:07.188782Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-25T14:59:07.189733Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:5 2025-06-25T14:59:07.189770Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:5 tabletId 72075186233409550 2025-06-25T14:59:07.189820Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:7 2025-06-25T14:59:07.189833Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:7 tabletId 72075186233409552 2025-06-25T14:59:07.189903Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 2 paths, skipped 0, left 1 candidates, at schemeshard: 72057594046678944 2025-06-25T14:59:07.189984Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-25T14:59:07.190013Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-25T14:59:07.190070Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:59:07.192445Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-06-25T14:59:07.192485Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:1 tabletId 72075186233409546 2025-06-25T14:59:07.192534Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:3 2025-06-25T14:59:07.192548Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:3 tabletId 72075186233409548 2025-06-25T14:59:07.192579Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:6 2025-06-25T14:59:07.192597Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:6 tabletId 72075186233409551 2025-06-25T14:59:07.192735Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-06-25T14:59:07.192753Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:2 tabletId 72075186233409547 2025-06-25T14:59:07.192960Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:4 2025-06-25T14:59:07.192994Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:4 tabletId 72075186233409549 2025-06-25T14:59:07.193088Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-25T14:59:07.194133Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 103, wait until txId: 103 TestWaitNotification wait txId: 103 2025-06-25T14:59:07.194419Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 103: send EvNotifyTxCompletion 2025-06-25T14:59:07.194470Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 103 2025-06-25T14:59:07.194855Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 103, at schemeshard: 72057594046678944 2025-06-25T14:59:07.194940Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-06-25T14:59:07.194981Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [1:781:2669] TestWaitNotification: OK eventTxId 103 2025-06-25T14:59:07.195419Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:59:07.195682Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 187us result status StatusPathDoesNotExist 2025-06-25T14:59:07.195860Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/USER_0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-25T14:59:07.196190Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:59:07.196334Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 124us result status StatusSuccess 2025-06-25T14:59:07.196668Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TPQTest::TestComactifiedWithRetention [GOOD] >> TPQTest::TestGetTimestamps >> TSchemeShardSubDomainTest::SchemeLimitsRejects [GOOD] >> TSchemeShardSubDomainTest::SimultaneousCreateForceDropTwice [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/unittest >> TPartitionTests::ShadowPartitionCountersFirstClass [GOOD] Test command err: 2025-06-25T14:58:58.317650Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:58:58.317755Z node 1 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-25T14:58:58.334180Z node 1 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [1:181:2194] 2025-06-25T14:58:58.336059Z node 1 :PERSQUEUE INFO: partition_init.cpp:911: [Root/PQ/rt3.dc1--account--topic:0:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp from keys completed. Value 2025-06-25T14:58:58.000000Z 2025-06-25T14:58:58.336136Z node 1 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'Root/PQ/rt3.dc1--account--topic' partition 0 generation 0 [1:181:2194] Got cmd write: CmdWrite { Key: "i0000000000" Value: "\030\000(\320\316\345\274\3722" StorageChannel: INLINE } CmdWrite { Key: "m0000000000cclient" Value: "\010\000\020\001\030\001\"\007session(\0000\001@\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000000uclient" Value: "\000\000\000\000\000\000\000\000\001\000\000\000\001\000\000\000session" StorageChannel: INLINE } Got cmd write: CmdWrite { Key: "i0000000000" Value: "\030\000(\320\316\345\274\3722" StorageChannel: INLINE } CmdWrite { Key: "m0000000000cclient" Value: "\010\005\020\001\030\001\"\007session(\0000\001@\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000000uclient" Value: "\005\000\000\000\000\000\000\000\001\000\000\000\001\000\000\000session" StorageChannel: INLINE } Got cmd write: CmdWrite { Key: "i0000000000" Value: "\030\000(\320\316\345\274\3722" StorageChannel: INLINE } CmdWrite { Key: "m0000000000cclient" Value: "\010\005\020\001\030\001\"\007session(\0000\001@\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000000uclient" Value: "\005\000\000\000\000\000\000\000\001\000\000\000\001\000\000\000session" StorageChannel: INLINE } 2025-06-25T14:58:58.745774Z node 1 :PERSQUEUE WARN: partition.cpp:3248: [PQ: 72057594037927937, Partition: 0, State: StateIdle] commit to future - topic Root/PQ/rt3.dc1--account--topic partition 0 client client EndOffset 10 offset 13 Got cmd write: CmdWrite { Key: "i0000000000" Value: "\030\000(\320\316\345\274\3722" StorageChannel: INLINE } CmdWrite { Key: "m0000000000cclient" Value: "\010\n\020\001\030\001\"\007session(\0000\001@\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000000uclient" Value: "\n\000\000\000\000\000\000\000\001\000\000\000\001\000\000\000session" StorageChannel: INLINE } 2025-06-25T14:58:59.093820Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:58:59.093894Z node 2 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-25T14:58:59.109764Z node 2 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 3, State: StateInit] bootstrapping 3 [2:183:2196] 2025-06-25T14:58:59.111544Z node 2 :PERSQUEUE INFO: partition_init.cpp:911: [Root/PQ/rt3.dc1--account--topic:3:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp from keys completed. Value 2025-06-25T14:58:59.000000Z 2025-06-25T14:58:59.111606Z node 2 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 3, State: StateInit] init complete for topic 'Root/PQ/rt3.dc1--account--topic' partition 3 generation 0 [2:183:2196] 2025-06-25T14:58:59.864778Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:58:59.864844Z node 3 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-25T14:58:59.878899Z node 3 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [3:183:2196] 2025-06-25T14:58:59.879789Z node 3 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'Root/PQ/rt3.dc1--account--topic' partition 1 generation 0 [3:183:2196] 2025-06-25T14:58:59.880409Z node 3 :PERSQUEUE INFO: partition.cpp:3800: [PQ: 72057594037927937, Partition: 1, State: StateIdle] SubDomainOutOfSpace was changed. Topic: "Root/PQ/rt3.dc1--account--topic". Partition: 1. SubDomainOutOfSpace: 1 2025-06-25T14:58:59.880496Z node 3 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie owner1|2162fa15-421af225-31d27108-1ea4d119_0 generated for partition 1 topic 'Root/PQ/rt3.dc1--account--topic' owner owner1 Send disk status response with cookie: 0 2025-06-25T14:59:00.221542Z node 3 :PERSQUEUE INFO: partition.cpp:3800: [PQ: 72057594037927937, Partition: 1, State: StateIdle] SubDomainOutOfSpace was changed. Topic: "Root/PQ/rt3.dc1--account--topic". Partition: 1. SubDomainOutOfSpace: 0 2025-06-25T14:59:00.733635Z node 4 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:59:00.733713Z node 4 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-25T14:59:00.750571Z node 4 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: {0, {0, 1111}, 123}, State: StateInit] bootstrapping {0, {0, 1111}, 123} [4:183:2196] 2025-06-25T14:59:00.755185Z node 4 :PERSQUEUE INFO: partition_init.cpp:911: [rt3.dc1--account--topic:{0, {0, 1111}, 123}:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp from keys completed. Value 2025-06-25T14:59:00.000000Z 2025-06-25T14:59:00.755277Z node 4 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: {0, {0, 1111}, 123}, State: StateInit] init complete for topic 'rt3.dc1--account--topic' partition {0, {0, 1111}, 123} generation 0 [4:183:2196] 2025-06-25T14:59:01.098853Z node 4 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie owner1|88e3ea48-6a1fe86-fa3aac89-681caf7_0 generated for partition {0, {0, 1111}, 123} topic 'rt3.dc1--account--topic' owner owner1 Send write: 0 Send write: 1 Send write: 2 Send write: 3 Send write: 4 Send write: 5 Send write: 6 Send write: 7 Send write: 8 Send write: 9 2025-06-25T14:59:04.344941Z node 5 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:59:04.345016Z node 5 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-25T14:59:04.359752Z node 5 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: {0, {0, 1111}, 123}, State: StateInit] bootstrapping {0, {0, 1111}, 123} [5:183:2196] 2025-06-25T14:59:04.361217Z node 5 :PERSQUEUE INFO: partition_init.cpp:911: [Root/PQ/rt3.dc1--account--topic:{0, {0, 1111}, 123}:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp from keys completed. Value 2025-06-25T14:59:04.000000Z 2025-06-25T14:59:04.361268Z node 5 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: {0, {0, 1111}, 123}, State: StateInit] init complete for topic 'Root/PQ/rt3.dc1--account--topic' partition {0, {0, 1111}, 123} generation 0 [5:183:2196] 2025-06-25T14:59:04.703423Z node 5 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie owner1|3fa8c3eb-2ad9e14f-ef47c287-b6d68483_0 generated for partition {0, {0, 1111}, 123} topic 'Root/PQ/rt3.dc1--account--topic' owner owner1 Send write: 0 Send write: 1 Send write: 2 Send write: 3 Send write: 4 Send write: 5 Send write: 6 Send write: 7 Send write: 8 Send write: 9 >> TSchemeShardSubDomainTest::SetSchemeLimits [GOOD] >> TSchemeShardSubDomainTest::SimultaneousDefine [GOOD] >> TSchemeShardSubDomainTest::Delete [GOOD] >> TPartitionTests::TestBatchingWithChangeConfig [GOOD] >> TSchemeShardSubDomainTest::RedefineErrors ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::SimultaneousCreateForceDropTwice [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:59:07.883034Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:59:07.883151Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:07.883205Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:59:07.883261Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:59:07.883312Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:59:07.883342Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:59:07.883403Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:07.883483Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:59:07.884247Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:59:07.884612Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:59:07.956029Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:59:07.956076Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:59:07.969865Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:59:07.970230Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:59:07.970432Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:59:07.976287Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:59:07.976603Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:59:07.977291Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:07.977554Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:59:07.980923Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:07.981115Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:59:07.982226Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:07.982284Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:07.982429Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:59:07.982474Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:59:07.982514Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:59:07.982598Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:59:07.989284Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:59:08.087236Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:59:08.087436Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:08.087588Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:59:08.087629Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:59:08.087838Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:59:08.087917Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:59:08.089733Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:08.089930Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:59:08.090061Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:08.090099Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:59:08.090141Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:59:08.090171Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:59:08.091723Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:08.091777Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:59:08.091814Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:59:08.093418Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:08.093463Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:08.093507Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:08.093551Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:59:08.096443Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:59:08.098015Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:59:08.098149Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:59:08.098849Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:08.099053Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:08.099095Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:08.099297Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:59:08.099350Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:08.099516Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:59:08.099577Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:59:08.101064Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:08.101102Z node 1 :FLAT_TX_SCHEMESHARD ... LAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:5 hive 72057594037968897 at ss 72057594046678944 2025-06-25T14:59:08.160454Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:1 hive 72057594037968897 at ss 72057594046678944 2025-06-25T14:59:08.160476Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:3 hive 72057594037968897 at ss 72057594046678944 2025-06-25T14:59:08.160519Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:6 hive 72057594037968897 at ss 72057594046678944 2025-06-25T14:59:08.160542Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:2 hive 72057594037968897 at ss 72057594046678944 2025-06-25T14:59:08.160567Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:4 hive 72057594037968897 at ss 72057594046678944 2025-06-25T14:59:08.161070Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-25T14:59:08.162641Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 5 TxId_Deprecated: 5 2025-06-25T14:59:08.162846Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 5 ShardOwnerId: 72057594046678944 ShardLocalIdx: 5, at schemeshard: 72057594046678944 2025-06-25T14:59:08.163142Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 7 2025-06-25T14:59:08.163523Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 1 TxId_Deprecated: 1 2025-06-25T14:59:08.163643Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 1 ShardOwnerId: 72057594046678944 ShardLocalIdx: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:08.163835Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 6 2025-06-25T14:59:08.163989Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 3 TxId_Deprecated: 3 2025-06-25T14:59:08.164141Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 3 ShardOwnerId: 72057594046678944 ShardLocalIdx: 3, at schemeshard: 72057594046678944 2025-06-25T14:59:08.164279Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-06-25T14:59:08.164575Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 6 TxId_Deprecated: 6 2025-06-25T14:59:08.164715Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 2 TxId_Deprecated: 2 2025-06-25T14:59:08.164853Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 6 ShardOwnerId: 72057594046678944 ShardLocalIdx: 6, at schemeshard: 72057594046678944 2025-06-25T14:59:08.165024Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-25T14:59:08.165290Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046678944 ShardLocalIdx: 2, at schemeshard: 72057594046678944 2025-06-25T14:59:08.165458Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-25T14:59:08.165626Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 4 TxId_Deprecated: 4 2025-06-25T14:59:08.165751Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-25T14:59:08.165901Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 4 ShardOwnerId: 72057594046678944 ShardLocalIdx: 4, at schemeshard: 72057594046678944 2025-06-25T14:59:08.166050Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-25T14:59:08.166239Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-25T14:59:08.166274Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:282:2271] 2025-06-25T14:59:08.166512Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-25T14:59:08.166580Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-25T14:59:08.166739Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-25T14:59:08.167113Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-25T14:59:08.167160Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-25T14:59:08.167237Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:59:08.169602Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:5 2025-06-25T14:59:08.169773Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-06-25T14:59:08.169971Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:3 2025-06-25T14:59:08.172519Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:6 2025-06-25T14:59:08.172618Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-06-25T14:59:08.172712Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:4 2025-06-25T14:59:08.172903Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-25T14:59:08.173090Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 100 TestWaitNotification: OK eventTxId 101 TestWaitNotification: OK eventTxId 102 2025-06-25T14:59:08.173662Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:59:08.173858Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 171us result status StatusPathDoesNotExist 2025-06-25T14:59:08.174064Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/USER_0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-25T14:59:08.174462Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:59:08.174643Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 202us result status StatusSuccess 2025-06-25T14:59:08.175082Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::SchemeLimitsRejects [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:59:04.739009Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:59:04.739109Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:04.739150Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:59:04.739194Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:59:04.739244Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:59:04.739302Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:59:04.739373Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:04.739478Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:59:04.740422Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:59:04.740773Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:59:04.832600Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:59:04.832671Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:59:04.848962Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:59:04.849316Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:59:04.849469Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:59:04.856781Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:59:04.857126Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:59:04.857855Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:04.858129Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:59:04.865062Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:04.865262Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:59:04.866474Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:04.866536Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:04.866680Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:59:04.866729Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:59:04.866771Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:59:04.866846Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:59:04.875583Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:59:04.979634Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:59:04.979832Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:04.980003Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:59:04.980042Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:59:04.980273Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:59:04.980391Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:59:04.982269Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:04.982437Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:59:04.982571Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:04.982618Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:59:04.982657Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:59:04.982684Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:59:04.984055Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:04.984100Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:59:04.984130Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:59:04.985618Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:04.985681Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:04.985746Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:04.985793Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:59:04.988790Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:59:04.990379Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:59:04.990577Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:59:04.991476Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:04.991652Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:04.991717Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:04.992005Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:59:04.992058Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:04.992217Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:59:04.992290Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:59:04.994295Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:04.994341Z node 1 :FLAT_TX_SCHEMESHARD ... p:46: Free shard 72057594046678944:14 hive 72057594037968897 at ss 72057594046678944 2025-06-25T14:59:07.835172Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:2 hive 72057594037968897 at ss 72057594046678944 2025-06-25T14:59:07.835202Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:16 hive 72057594037968897 at ss 72057594046678944 2025-06-25T14:59:07.835695Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 139 2025-06-25T14:59:07.836590Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 1 TxId_Deprecated: 1 TabletID: 72075186233409546 2025-06-25T14:59:07.837341Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 1 ShardOwnerId: 72057594046678944 ShardLocalIdx: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:07.837608Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 Forgetting tablet 72075186233409546 2025-06-25T14:59:07.838811Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 15 TxId_Deprecated: 15 TabletID: 72075186233409556 2025-06-25T14:59:07.839247Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 15 ShardOwnerId: 72057594046678944 ShardLocalIdx: 15, at schemeshard: 72057594046678944 2025-06-25T14:59:07.839442Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 16] was 3 2025-06-25T14:59:07.839652Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 14 TxId_Deprecated: 14 TabletID: 72075186233409555 2025-06-25T14:59:07.841480Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 14 ShardOwnerId: 72057594046678944 ShardLocalIdx: 14, at schemeshard: 72057594046678944 2025-06-25T14:59:07.841679Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 16] was 2 2025-06-25T14:59:07.842198Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 2 TxId_Deprecated: 2 TabletID: 72075186233409547 Forgetting tablet 72075186233409556 Forgetting tablet 72075186233409555 2025-06-25T14:59:07.843687Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046678944 ShardLocalIdx: 2, at schemeshard: 72057594046678944 2025-06-25T14:59:07.843857Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-25T14:59:07.844113Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 16 TxId_Deprecated: 16 TabletID: 72075186233409557 Forgetting tablet 72075186233409547 Forgetting tablet 72075186233409557 2025-06-25T14:59:07.845705Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 16 ShardOwnerId: 72057594046678944 ShardLocalIdx: 16, at schemeshard: 72057594046678944 2025-06-25T14:59:07.845869Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 16] was 1 2025-06-25T14:59:07.846147Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 139 2025-06-25T14:59:07.846310Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 139 2025-06-25T14:59:07.846764Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-25T14:59:07.846814Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 16], at schemeshard: 72057594046678944 2025-06-25T14:59:07.846894Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-25T14:59:07.847394Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-25T14:59:07.847462Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-25T14:59:07.847573Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-25T14:59:07.849249Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-06-25T14:59:07.849302Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:1 tabletId 72075186233409546 2025-06-25T14:59:07.849853Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:15 2025-06-25T14:59:07.849884Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:15 tabletId 72075186233409556 2025-06-25T14:59:07.851496Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:14 2025-06-25T14:59:07.851531Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:14 tabletId 72075186233409555 2025-06-25T14:59:07.851613Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-06-25T14:59:07.851641Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:2 tabletId 72075186233409547 2025-06-25T14:59:07.851681Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:16 2025-06-25T14:59:07.851710Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:16 tabletId 72075186233409557 2025-06-25T14:59:07.852877Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 1 candidates, at schemeshard: 72057594046678944 2025-06-25T14:59:07.852961Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-25T14:59:07.852991Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-25T14:59:07.853057Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:59:07.853216Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-25T14:59:07.854259Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 139, wait until txId: 139 TestWaitNotification wait txId: 139 2025-06-25T14:59:07.854889Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 139: send EvNotifyTxCompletion 2025-06-25T14:59:07.854925Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 139 2025-06-25T14:59:07.855556Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 139, at schemeshard: 72057594046678944 2025-06-25T14:59:07.855644Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 139: got EvNotifyTxCompletionResult 2025-06-25T14:59:07.855679Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 139: satisfy waiter [1:2282:4053] TestWaitNotification: OK eventTxId 139 2025-06-25T14:59:07.856790Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:59:07.856925Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 170us result status StatusSuccess 2025-06-25T14:59:07.857258Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 5 ShardsInside: 0 ShardsLimit: 6 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 20 SecurityState { Audience: "/MyRoot" } SchemeLimits { MaxDepth: 4 MaxPaths: 5 MaxChildrenInDir: 4 MaxAclBytesSize: 25 MaxTableColumns: 3 MaxTableColumnNameLength: 10 MaxTableKeyColumns: 1 MaxTableIndices: 20 MaxShards: 6 MaxShardsInPath: 4 MaxConsistentCopyTargets: 1 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"$%&\'()*+,-.:;<=>?@[]^_`{|}~" MaxPQPartitions: 20 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::SetSchemeLimits [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:59:07.727317Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:59:07.727403Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:07.727437Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:59:07.727472Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:59:07.727513Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:59:07.727538Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:59:07.727610Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:07.727682Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:59:07.728403Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:59:07.728702Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:59:07.807550Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:59:07.807601Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:59:07.822863Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:59:07.823208Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:59:07.823381Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:59:07.828622Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:59:07.828916Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:59:07.829547Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:07.829777Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:59:07.833852Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:07.833987Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:59:07.834941Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:07.834982Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:07.835088Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:59:07.835123Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:59:07.835160Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:59:07.835217Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:59:07.841171Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:59:07.964833Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:59:07.965053Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:07.965238Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:59:07.965285Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:59:07.965570Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:59:07.965640Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:59:07.967590Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:07.967771Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:59:07.967933Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:07.968001Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:59:07.968054Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:59:07.968088Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:59:07.969835Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:07.969888Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:59:07.969924Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:59:07.971271Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:07.971334Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:07.971376Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:07.971409Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:59:07.973928Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:59:07.975236Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:59:07.975364Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:59:07.976089Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:07.976179Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:07.976222Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:07.976473Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:59:07.976528Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:07.976682Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:59:07.976751Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:59:07.978516Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:07.978550Z node 1 :FLAT_TX_SCHEMESHARD ... at schemeshard: 72057594046678944, txId: 100, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-25T14:59:08.171196Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:08.171231Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:341:2315], at schemeshard: 72057594046678944, txId: 100, path id: 1 2025-06-25T14:59:08.171277Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:341:2315], at schemeshard: 72057594046678944, txId: 100, path id: 2 FAKE_COORDINATOR: Erasing txId 100 2025-06-25T14:59:08.171576Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 100:0, at schemeshard: 72057594046678944 2025-06-25T14:59:08.171623Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 100:0 ProgressState 2025-06-25T14:59:08.171725Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#100:0 progress is 1/1 2025-06-25T14:59:08.171755Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 100 ready parts: 1/1 2025-06-25T14:59:08.171788Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#100:0 progress is 1/1 2025-06-25T14:59:08.171817Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 100 ready parts: 1/1 2025-06-25T14:59:08.171888Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 100, ready parts: 1/1, is published: false 2025-06-25T14:59:08.171920Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 100 ready parts: 1/1 2025-06-25T14:59:08.171954Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 100:0 2025-06-25T14:59:08.171983Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 100:0 2025-06-25T14:59:08.172137Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-06-25T14:59:08.172179Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 100, publications: 2, subscribers: 0 2025-06-25T14:59:08.172210Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 100, [OwnerId: 72057594046678944, LocalPathId: 1], 5 2025-06-25T14:59:08.172270Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 100, [OwnerId: 72057594046678944, LocalPathId: 2], 3 2025-06-25T14:59:08.172853Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 3 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 100 2025-06-25T14:59:08.172937Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 3 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 100 2025-06-25T14:59:08.172971Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 100 2025-06-25T14:59:08.173002Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 100, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 5 2025-06-25T14:59:08.173036Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-25T14:59:08.173812Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 3 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 100 2025-06-25T14:59:08.173884Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 3 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 100 2025-06-25T14:59:08.173915Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 100 2025-06-25T14:59:08.173938Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 100, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 3 2025-06-25T14:59:08.173961Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-25T14:59:08.174023Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 100, subscribers: 0 2025-06-25T14:59:08.176352Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 100 2025-06-25T14:59:08.177178Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 100 TestModificationResult got TxId: 100, wait until txId: 100 TestWaitNotification wait txId: 100 2025-06-25T14:59:08.177426Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 100: send EvNotifyTxCompletion 2025-06-25T14:59:08.177467Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 100 2025-06-25T14:59:08.177791Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 100, at schemeshard: 72057594046678944 2025-06-25T14:59:08.177868Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 100: got EvNotifyTxCompletionResult 2025-06-25T14:59:08.177912Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 100: satisfy waiter [1:488:2435] TestWaitNotification: OK eventTxId 100 2025-06-25T14:59:08.178355Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:59:08.178524Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 176us result status StatusSuccess 2025-06-25T14:59:08.178908Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0" PathDescription { Self { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 3 ShardsInside: 2 ShardsLimit: 3 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 300 DatabaseQuotas { data_stream_shards_quota: 3 } SecurityState { } SchemeLimits { MaxDepth: 32 MaxPaths: 3 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 3 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"$%&\'()*+,-.:;<=>?@[]^_`{|}~" MaxPQPartitions: 300 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:08.179408Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:59:08.179564Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 165us result status StatusSuccess 2025-06-25T14:59:08.179934Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 1 PathsLimit: 3 ShardsInside: 0 ShardsLimit: 3 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 300 SecurityState { Audience: "/MyRoot" } SchemeLimits { MaxDepth: 32 MaxPaths: 3 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 3 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"$%&\'()*+,-.:;<=>?@[]^_`{|}~" MaxPQPartitions: 300 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::Delete [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:59:07.893511Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:59:07.893594Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:07.893638Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:59:07.893684Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:59:07.893727Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:59:07.893758Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:59:07.893808Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:07.893881Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:59:07.894636Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:59:07.894959Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:59:07.965770Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:59:07.965846Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:59:07.981729Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:59:07.982078Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:59:07.982241Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:59:07.987477Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:59:07.987789Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:59:07.988471Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:07.988751Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:59:07.992058Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:07.992239Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:59:07.993468Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:07.993529Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:07.993673Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:59:07.993720Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:59:07.993764Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:59:07.993842Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:59:08.000385Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:59:08.119256Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:59:08.119488Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:08.119659Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:59:08.119700Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:59:08.119935Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:59:08.119992Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:59:08.121649Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:08.121889Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:59:08.122023Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:08.122062Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:59:08.122100Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:59:08.122149Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:59:08.123399Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:08.123441Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:59:08.123469Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:59:08.124807Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:08.124895Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:08.124954Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:08.125013Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:59:08.133261Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:59:08.134934Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:59:08.135086Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:59:08.135771Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:08.135881Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:08.135920Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:08.136148Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:59:08.136188Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:08.136326Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:59:08.136383Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:59:08.137989Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:08.138036Z node 1 :FLAT_TX_SCHEMESHARD ... ers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 3 TxId_Deprecated: 3 TabletID: 72075186233409548 2025-06-25T14:59:08.255913Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 2 TxId_Deprecated: 2 TabletID: 72075186233409547 Forgetting tablet 72075186233409548 2025-06-25T14:59:08.256735Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 3 ShardOwnerId: 72057594046678944 ShardLocalIdx: 3, at schemeshard: 72057594046678944 2025-06-25T14:59:08.256914Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 Forgetting tablet 72075186233409547 2025-06-25T14:59:08.257477Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046678944 ShardLocalIdx: 2, at schemeshard: 72057594046678944 2025-06-25T14:59:08.257600Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-25T14:59:08.258475Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-25T14:59:08.258520Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-25T14:59:08.258603Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-25T14:59:08.258805Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-25T14:59:08.258951Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-25T14:59:08.258988Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-25T14:59:08.259035Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:59:08.259387Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-06-25T14:59:08.259436Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:1 tabletId 72075186233409546 2025-06-25T14:59:08.261256Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:3 2025-06-25T14:59:08.261294Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:3 tabletId 72075186233409548 2025-06-25T14:59:08.261369Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-06-25T14:59:08.261403Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:2 tabletId 72075186233409547 2025-06-25T14:59:08.261929Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-25T14:59:08.262255Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 101 2025-06-25T14:59:08.262404Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2025-06-25T14:59:08.262439Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 2025-06-25T14:59:08.262707Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-06-25T14:59:08.262775Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-25T14:59:08.262804Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:494:2447] TestWaitNotification: OK eventTxId 101 2025-06-25T14:59:08.263127Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:59:08.263264Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 166us result status StatusPathDoesNotExist 2025-06-25T14:59:08.263411Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/USER_0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-25T14:59:08.263827Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:59:08.263970Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 150us result status StatusSuccess 2025-06-25T14:59:08.264255Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 wait until 72075186233409546 is deleted wait until 72075186233409547 is deleted wait until 72075186233409548 is deleted 2025-06-25T14:59:08.264678Z node 1 :HIVE INFO: tablet_helpers.cpp:1476: [72057594037968897] TEvSubscribeToTabletDeletion, 72075186233409546 2025-06-25T14:59:08.264750Z node 1 :HIVE INFO: tablet_helpers.cpp:1476: [72057594037968897] TEvSubscribeToTabletDeletion, 72075186233409547 2025-06-25T14:59:08.264780Z node 1 :HIVE INFO: tablet_helpers.cpp:1476: [72057594037968897] TEvSubscribeToTabletDeletion, 72075186233409548 Deleted tabletId 72075186233409546 Deleted tabletId 72075186233409547 Deleted tabletId 72075186233409548 2025-06-25T14:59:08.265095Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:59:08.265242Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 151us result status StatusSuccess 2025-06-25T14:59:08.265559Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::SimultaneousDefine [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:59:07.807927Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:59:07.808008Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:07.808046Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:59:07.808079Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:59:07.808116Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:59:07.808172Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:59:07.808221Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:07.808287Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:59:07.808959Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:59:07.809275Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:59:07.879902Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:59:07.879955Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:59:07.895517Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:59:07.895903Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:59:07.896059Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:59:07.902312Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:59:07.902603Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:59:07.903169Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:07.903421Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:59:07.906651Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:07.906825Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:59:07.908002Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:07.908057Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:07.908201Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:59:07.908245Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:59:07.908282Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:59:07.908385Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:59:07.914461Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:59:08.026630Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:59:08.026880Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:08.027072Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:59:08.027118Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:59:08.027383Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:59:08.027477Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:59:08.029388Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:08.029583Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:59:08.029727Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:08.029771Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:59:08.029828Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:59:08.029863Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:59:08.031561Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:08.031628Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:59:08.031670Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:59:08.033226Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:08.033309Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:08.033385Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:08.033435Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:59:08.036594Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:59:08.038285Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:59:08.038480Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:59:08.039391Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:08.039522Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:08.039569Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:08.039838Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:59:08.039892Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:08.040051Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:59:08.040146Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:59:08.042278Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:08.042311Z node 1 :FLAT_TX_SCHEMESHARD ... :632: TTxOperationReply execute, operationId: 101:0, at schemeshard: 72057594046678944, message: Status: SUCCESS OnTabletId: 72075186233409548 2025-06-25T14:59:08.162628Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:84: NSubDomainState::TConfigureParts operationId# 101:0 HandleReply TEvConfigureStatus operationId:101:0 at schemeshard:72057594046678944 2025-06-25T14:59:08.162672Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:120: NSubDomainState::TConfigureParts operationId# 101:0 Got OK TEvConfigureStatus from tablet# 72075186233409548 shardIdx# 72057594046678944:3 at schemeshard# 72057594046678944 2025-06-25T14:59:08.162705Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 101:0 3 -> 128 2025-06-25T14:59:08.164556Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:59:08.164660Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:59:08.164683Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:59:08.164713Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 101:0, at tablet# 72057594046678944 2025-06-25T14:59:08.164748Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 101 ready parts: 1/1 2025-06-25T14:59:08.164857Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 101 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:59:08.166068Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 101:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:101 msg type: 269090816 2025-06-25T14:59:08.166139Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 101, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 101 at step: 5000003 FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000002 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 101 at step: 5000003 2025-06-25T14:59:08.166355Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000003, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:08.166440Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 101 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000003 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:08.166487Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 101:0, at tablet# 72057594046678944 2025-06-25T14:59:08.166778Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 101:0 128 -> 240 2025-06-25T14:59:08.166816Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 101:0, at tablet# 72057594046678944 2025-06-25T14:59:08.166951Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-06-25T14:59:08.167018Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 101 2025-06-25T14:59:08.168688Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:08.168714Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 101, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-25T14:59:08.168824Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:08.168850Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 101, path id: 2 2025-06-25T14:59:08.168922Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T14:59:08.168958Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 101:0 ProgressState 2025-06-25T14:59:08.169047Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#101:0 progress is 1/1 2025-06-25T14:59:08.169086Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-25T14:59:08.169137Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#101:0 progress is 1/1 2025-06-25T14:59:08.169161Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-25T14:59:08.169191Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 101, ready parts: 1/1, is published: false 2025-06-25T14:59:08.169215Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-25T14:59:08.169243Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 101:0 2025-06-25T14:59:08.169272Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 101:0 2025-06-25T14:59:08.169409Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 6 2025-06-25T14:59:08.169449Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 101, publications: 1, subscribers: 1 2025-06-25T14:59:08.169481Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 2], 4 2025-06-25T14:59:08.170344Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 4 PathOwnerId: 72057594046678944, cookie: 101 2025-06-25T14:59:08.170417Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 4 PathOwnerId: 72057594046678944, cookie: 101 2025-06-25T14:59:08.170444Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 101 2025-06-25T14:59:08.170474Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 4 2025-06-25T14:59:08.170526Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-06-25T14:59:08.170595Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 101, subscribers: 1 2025-06-25T14:59:08.170625Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:212: TTxAckPublishToSchemeBoard Notify send TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, to actorId: [1:315:2304] 2025-06-25T14:59:08.172641Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-25T14:59:08.172706Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-25T14:59:08.172724Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:322:2311] TestWaitNotification: OK eventTxId 100 TestWaitNotification: OK eventTxId 101 2025-06-25T14:59:08.173069Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:59:08.173248Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 185us result status StatusSuccess 2025-06-25T14:59:08.173562Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0" PathDescription { Self { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 2 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 2 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 Mediators: 72075186233409548 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TLocksTest::CK_Range_BrokenLockInf [GOOD] Test command err: 2025-06-25T14:58:29.284939Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901513617124560:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:29.294913Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/002062/r3tmp/tmpGvFyBn/pdisk_1.dat 2025-06-25T14:58:29.777121Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:29.777214Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:29.781300Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:58:29.819867Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:29.824493Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519901513617124526:2080] 1750863509238714 != 1750863509238717 TClient is connected to server localhost:22593 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:30.135268Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:30.156666Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:58:30.180536Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:58:30.195844Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:58:30.300460Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:58:30.367740Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:30.446396Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:32.757967Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519901527471547579:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:32.758062Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/002062/r3tmp/tmpCPcxTJ/pdisk_1.dat 2025-06-25T14:58:33.040865Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:33.040944Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:33.048814Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:33.051058Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519901527471547555:2080] 1750863512750876 != 1750863512750879 2025-06-25T14:58:33.058160Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:16498 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. waiting... 2025-06-25T14:58:33.311085Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:58:33.320629Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:58:33.344465Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-25T14:58:33.351180Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:58:33.429921Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:58:33.493310Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:36.348630Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519901545071097373:2133];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/002062/r3tmp/tmp0RHhNo/pdisk_1.dat 2025-06-25T14:58:36.482157Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:58:36.561245Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:36.576161Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:36.576384Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519901545071097278:2080] 1750863516341047 != 1750863516341050 2025-06-25T14:58:36.576545Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:36.581422Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:25500 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:36.833622Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 28147 ... rations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/002062/r3tmp/tmpfRmiJ0/pdisk_1.dat 2025-06-25T14:58:55.189634Z node 8 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:55.190795Z node 8 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [8:7519901626739069942:2080] 1750863535034566 != 1750863535034569 2025-06-25T14:58:55.206114Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:55.206214Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:55.209364Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:19147 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. waiting... 2025-06-25T14:58:55.538876Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:55.572509Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:55.627198Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:55.684253Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:59.372739Z node 9 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[9:7519901641901631987:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:59.372852Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/002062/r3tmp/tmpUNUpZl/pdisk_1.dat 2025-06-25T14:58:59.491415Z node 9 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:59.505270Z node 9 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [9:7519901641901631968:2080] 1750863539372216 != 1750863539372219 2025-06-25T14:58:59.526998Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:59.527098Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:59.529094Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:24181 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:59.763445Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-25T14:58:59.782162Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:59.838108Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:59.909479Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:59:03.415277Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7519901657830876333:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:59:03.415358Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/002062/r3tmp/tmp9SsreY/pdisk_1.dat 2025-06-25T14:59:03.542498Z node 10 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:59:03.564127Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:59:03.564231Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:59:03.570547Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:28076 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:59:03.846888Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-25T14:59:03.869953Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:59:03.933517Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:59:03.990552Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... >> TSchemeShardSubDomainTest::CreateDropSolomon [GOOD] >> TPartitionTests::TestBatchingWithProposeConfig ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::CreateDropSolomon [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:59:07.609699Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:59:07.609783Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:07.609815Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:59:07.609848Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:59:07.609888Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:59:07.609911Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:59:07.609975Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:07.610048Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:59:07.610736Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:59:07.611015Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:59:07.682997Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:59:07.683058Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:59:07.698554Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:59:07.698923Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:59:07.699084Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:59:07.704710Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:59:07.704997Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:59:07.705607Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:07.705839Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:59:07.708910Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:07.709082Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:59:07.710225Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:07.710294Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:07.710435Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:59:07.710477Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:59:07.710511Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:59:07.710584Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:59:07.716425Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:59:07.833846Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:59:07.834073Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:07.834262Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:59:07.834301Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:59:07.834499Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:59:07.834568Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:59:07.836528Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:07.836715Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:59:07.836886Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:07.836929Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:59:07.837028Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:59:07.837071Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:59:07.841597Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:07.841659Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:59:07.841710Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:59:07.843274Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:07.843313Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:07.843357Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:07.843401Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:59:07.846253Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:59:07.847664Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:59:07.847800Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:59:07.848479Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:07.848587Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:07.848623Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:07.848871Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:59:07.848922Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:07.849078Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:59:07.849149Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:59:07.850837Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:07.850875Z node 1 :FLAT_TX_SCHEMESHARD ... de 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-25T14:59:08.701558Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 104:0 2025-06-25T14:59:08.701591Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 104:0 2025-06-25T14:59:08.701724Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-06-25T14:59:08.701760Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 104, publications: 2, subscribers: 0 2025-06-25T14:59:08.701789Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 104, [OwnerId: 72057594046678944, LocalPathId: 1], 7 2025-06-25T14:59:08.701823Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 104, [OwnerId: 72057594046678944, LocalPathId: 2], 18446744073709551615 2025-06-25T14:59:08.702430Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 104 2025-06-25T14:59:08.702502Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 104 2025-06-25T14:59:08.702533Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 104 2025-06-25T14:59:08.702572Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 104, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 7 2025-06-25T14:59:08.702605Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-25T14:59:08.703308Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 104 2025-06-25T14:59:08.703385Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 104 2025-06-25T14:59:08.703414Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 104 2025-06-25T14:59:08.703439Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 104, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 18446744073709551615 2025-06-25T14:59:08.703467Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-25T14:59:08.703526Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 104, subscribers: 0 2025-06-25T14:59:08.705496Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:1 hive 72057594037968897 at ss 72057594046678944 2025-06-25T14:59:08.705543Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:2 hive 72057594037968897 at ss 72057594046678944 2025-06-25T14:59:08.706443Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2025-06-25T14:59:08.706802Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 1 TxId_Deprecated: 1 TabletID: 72075186233409546 Forgetting tablet 72075186233409546 2025-06-25T14:59:08.707932Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 1 ShardOwnerId: 72057594046678944 ShardLocalIdx: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:08.708163Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-25T14:59:08.708420Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 2 TxId_Deprecated: 2 TabletID: 72075186233409547 Forgetting tablet 72075186233409547 2025-06-25T14:59:08.709110Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046678944 ShardLocalIdx: 2, at schemeshard: 72057594046678944 2025-06-25T14:59:08.709271Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-25T14:59:08.709462Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2025-06-25T14:59:08.709882Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-25T14:59:08.709934Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-25T14:59:08.710027Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-25T14:59:08.710376Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-25T14:59:08.710415Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-25T14:59:08.710473Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:59:08.712748Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-06-25T14:59:08.712802Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:1 tabletId 72075186233409546 2025-06-25T14:59:08.712926Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-06-25T14:59:08.712960Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:2 tabletId 72075186233409547 2025-06-25T14:59:08.713489Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-25T14:59:08.713586Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 104, wait until txId: 104 TestWaitNotification wait txId: 104 2025-06-25T14:59:08.713868Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 104: send EvNotifyTxCompletion 2025-06-25T14:59:08.713912Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 104 2025-06-25T14:59:08.714372Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 104, at schemeshard: 72057594046678944 2025-06-25T14:59:08.714453Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 104: got EvNotifyTxCompletionResult 2025-06-25T14:59:08.714502Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 104: satisfy waiter [1:2111:3712] TestWaitNotification: OK eventTxId 104 2025-06-25T14:59:08.721271Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0/Solomon" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:59:08.721470Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0/Solomon" took 242us result status StatusPathDoesNotExist 2025-06-25T14:59:08.721620Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0/Solomon\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/USER_0/Solomon" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-25T14:59:08.722251Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:59:08.722400Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 145us result status StatusPathDoesNotExist 2025-06-25T14:59:08.722509Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/USER_0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 >> THDRRQuoterResourceTreeRuntimeTest::TestCreateInactiveSession [GOOD] >> THDRRQuoterResourceTreeRuntimeTest::TestDeleteResourceSessions [GOOD] >> THDRRQuoterResourceTreeRuntimeTest::TestDistributeResourcesBetweenConsumers [GOOD] >> THDRRQuoterResourceTreeRuntimeTest::TestEffectiveProps [GOOD] >> THDRRQuoterResourceTreeRuntimeTest::TestDeleteResourceWithActiveChildren [GOOD] >> THDRRQuoterResourceTreeRuntimeTest::TestUpdateResourceSessions [GOOD] >> THDRRQuoterResourceTreeRuntimeTest::TestStopConsuming [GOOD] >> THDRRQuoterResourceTreeRuntimeTest::TestUpdateConsumptionState [GOOD] >> THDRRQuoterResourceTreeRuntimeTest::TestUpdateConsumptionStateAfterAllResourceAllocated [GOOD] >> THDRRQuoterResourceTreeRuntimeTest::TestVeryBigWeights [GOOD] >> TKesusTest::TestAttachOutOfSequence >> TKesusTest::TestRegisterProxy >> TKesusTest::TestKesusConfig >> TPartitionChooserSuite::TPartitionChooserActor_SplitMergeEnabled_NewSourceId_Test [GOOD] >> TPartitionChooserSuite::TPartitionChooserActor_SplitMergeEnabled_BadSourceId_Test >> TKesusTest::TestUnregisterProxy >> TSchemeShardSubDomainTest::RedefineErrors [GOOD] >> TKesusTest::TestSessionDetach >> TKesusTest::TestAcquireSemaphoreTimeout >> THDRRQuoterResourceTreeRuntimeTest::TestAllocateResource [GOOD] >> THDRRQuoterResourceTreeRuntimeTest::TestAllocationGranularity [GOOD] >> THDRRQuoterResourceTreeRuntimeTest::TestAmountIsLessThanEpsilon [GOOD] >> THDRRQuoterResourceTreeRuntimeTest::TestActiveSessionDisconnectsAndThenConnectsAgain [GOOD] >> THDRRQuoterResourceTreeRuntimeTest::TestActiveMultiresourceSessionDisconnectsAndThenConnectsAgain [GOOD] >> TKesusTest::TestQuoterAccountResourcesBurst >> TKesusTest::TestQuoterHDRRParametersValidation >> TKesusTest::TestReleaseLockFailure >> TPQTest::TestGetTimestamps [GOOD] >> TPQTest::TestManyConsumers |90.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/kesus/tablet/ut/unittest >> THDRRQuoterResourceTreeRuntimeTest::TestDeleteResourceWithActiveChildren [GOOD] |90.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/kesus/tablet/ut/unittest >> THDRRQuoterResourceTreeRuntimeTest::TestVeryBigWeights [GOOD] >> TStoragePoolsQuotasTest::DisableWritesToDatabase-IsExternalSubdomain-true [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::RedefineErrors [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:59:09.213133Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:59:09.213205Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:09.213230Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:59:09.213253Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:59:09.213277Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:59:09.213306Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:59:09.213337Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:09.213393Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:59:09.213843Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:59:09.214070Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:59:09.261001Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:59:09.261045Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:59:09.271413Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:59:09.271717Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:59:09.271839Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:59:09.275316Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:59:09.275518Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:59:09.275965Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:09.276198Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:59:09.278404Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:09.278530Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:59:09.279318Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:09.279354Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:09.279446Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:59:09.279477Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:59:09.279503Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:59:09.279560Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:59:09.284031Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:59:09.373609Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:59:09.373849Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:09.374033Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:59:09.374072Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:59:09.374289Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:59:09.374357Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:59:09.376407Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:09.376661Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:59:09.376839Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:09.376884Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:59:09.376950Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:59:09.376987Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:59:09.378775Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:09.378834Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:59:09.378879Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:59:09.380363Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:09.380416Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:09.380489Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:09.380544Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:59:09.384138Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:59:09.386161Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:59:09.386347Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:59:09.387220Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:09.387348Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:09.387393Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:09.387661Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:59:09.387725Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:09.387881Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:59:09.387947Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:59:09.390010Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:09.390053Z node 1 :FLAT_TX_SCHEMESHARD ... o create, do next state 2025-06-25T14:59:09.585146Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 108:0 2 -> 3 2025-06-25T14:59:09.586374Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 108:0, at schemeshard: 72057594046678944 2025-06-25T14:59:09.586433Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 108:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:59:09.586465Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 108:0 3 -> 128 2025-06-25T14:59:09.587580Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 108:0, at schemeshard: 72057594046678944 2025-06-25T14:59:09.587623Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 108:0, at schemeshard: 72057594046678944 2025-06-25T14:59:09.587654Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 108:0, at tablet# 72057594046678944 2025-06-25T14:59:09.587683Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 108 ready parts: 1/1 2025-06-25T14:59:09.587779Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 108 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:59:09.589045Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 108:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:108 msg type: 269090816 2025-06-25T14:59:09.589155Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 108, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 108 at step: 5000007 FAKE_COORDINATOR: advance: minStep5000007 State->FrontStep: 5000006 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 108 at step: 5000007 2025-06-25T14:59:09.589429Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000007, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:09.589553Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 108 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000007 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:09.589602Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 108:0, at tablet# 72057594046678944 2025-06-25T14:59:09.589872Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 108:0 128 -> 240 2025-06-25T14:59:09.589924Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 108:0, at tablet# 72057594046678944 2025-06-25T14:59:09.590067Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-06-25T14:59:09.590141Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 108 2025-06-25T14:59:09.591430Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:09.591521Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 108, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-25T14:59:09.591696Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:09.591728Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 108, path id: 2 2025-06-25T14:59:09.592011Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 108:0, at schemeshard: 72057594046678944 2025-06-25T14:59:09.592058Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 108:0 ProgressState 2025-06-25T14:59:09.592138Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#108:0 progress is 1/1 2025-06-25T14:59:09.592166Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 108 ready parts: 1/1 2025-06-25T14:59:09.592200Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#108:0 progress is 1/1 2025-06-25T14:59:09.592225Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 108 ready parts: 1/1 2025-06-25T14:59:09.592261Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 108, ready parts: 1/1, is published: false 2025-06-25T14:59:09.592302Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 108 ready parts: 1/1 2025-06-25T14:59:09.592350Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 108:0 2025-06-25T14:59:09.592383Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 108:0 2025-06-25T14:59:09.592437Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 6 2025-06-25T14:59:09.592469Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 108, publications: 1, subscribers: 0 2025-06-25T14:59:09.592503Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 108, [OwnerId: 72057594046678944, LocalPathId: 2], 8 2025-06-25T14:59:09.592811Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 8 PathOwnerId: 72057594046678944, cookie: 108 2025-06-25T14:59:09.592878Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 8 PathOwnerId: 72057594046678944, cookie: 108 2025-06-25T14:59:09.592908Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 108 2025-06-25T14:59:09.592946Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 108, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 8 2025-06-25T14:59:09.592973Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-06-25T14:59:09.593037Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 108, subscribers: 0 2025-06-25T14:59:09.594850Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 108 TestModificationResult got TxId: 108, wait until txId: 108 TestWaitNotification wait txId: 108 2025-06-25T14:59:09.595058Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 108: send EvNotifyTxCompletion 2025-06-25T14:59:09.595082Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 108 2025-06-25T14:59:09.595394Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 108, at schemeshard: 72057594046678944 2025-06-25T14:59:09.595465Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 108: got EvNotifyTxCompletionResult 2025-06-25T14:59:09.595509Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 108: satisfy waiter [1:595:2548] TestWaitNotification: OK eventTxId 108 2025-06-25T14:59:09.595897Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:59:09.596064Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 148us result status StatusSuccess 2025-06-25T14:59:09.596395Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0" PathDescription { Self { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 8 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 8 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 6 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 6 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 Mediators: 72075186233409548 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } StoragePools { Name: "pool-hdd-1" Kind: "hdd-1" } StoragePools { Name: "pool-hdd-2" Kind: "hdd-1" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 |90.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/kesus/tablet/ut/unittest >> THDRRQuoterResourceTreeRuntimeTest::TestActiveMultiresourceSessionDisconnectsAndThenConnectsAgain [GOOD] >> TPartitionChooserSuite::TPartitionChooserActor_SplitMergeEnabled_SourceId_OldPartitionExists_NotBoundary_Test [GOOD] >> TPartitionChooserSuite::TPartitionChooserActor_SplitMergeEnabled_PreferedPartition_Active_Test >> TKesusTest::TestUnregisterProxy [GOOD] >> TKesusTest::TestUnregisterProxyBadGeneration >> TKesusTest::TestSessionDetach [GOOD] >> TKesusTest::TestSessionDetachFutureId >> TKesusTest::TestAttachOutOfSequence [GOOD] >> TKesusTest::TestAttachOutOfSequenceInTx >> TKesusTest::TestQuoterHDRRParametersValidation [GOOD] >> TKesusTest::TestReleaseLockFailure [GOOD] >> TKesusTest::TestRegisterProxy [GOOD] >> TKesusTest::TestQuoterAccountResourcesOnDemand >> TKesusTest::TestReleaseSemaphore >> TKesusTest::TestRegisterProxyBadGeneration >> TKesusTest::TestKesusConfig [GOOD] >> TKesusTest::TestLockNotFound >> TKesusTest::TestAttachOutOfSequenceInTx [GOOD] >> TKesusTest::TestAttachThenReRegister ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TStoragePoolsQuotasTest::DisableWritesToDatabase-IsExternalSubdomain-true [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:59:05.178420Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:59:05.178498Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:05.178538Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:59:05.178581Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:59:05.178625Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:59:05.178654Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:59:05.178722Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:05.178801Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:59:05.179528Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:59:05.179835Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:59:05.236612Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:59:05.236677Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:59:05.251946Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:59:05.252292Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:59:05.252502Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:59:05.257974Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:59:05.258267Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:59:05.258902Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:05.259374Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:59:05.263594Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:05.263762Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:59:05.264882Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:05.264956Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:05.265094Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:59:05.265143Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:59:05.265186Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:59:05.265280Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:59:05.272712Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:59:05.408824Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:59:05.409026Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:05.409194Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:59:05.409230Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:59:05.409421Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:59:05.409595Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:59:05.411517Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:05.411688Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:59:05.411826Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:05.411864Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:59:05.411905Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:59:05.411941Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:59:05.413623Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:05.413678Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:59:05.413722Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:59:05.415426Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:05.415489Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:05.415548Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:05.415617Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:59:05.425062Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:59:05.427142Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:59:05.427300Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:59:05.428025Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:05.428135Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:05.428178Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:05.428419Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:59:05.428473Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:05.428608Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:59:05.428677Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:59:05.431129Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:05.431181Z node 1 :FLAT_TX_SCHEMESHARD D ... ply complete, operationId: 104:0, at schemeshard: 72075186233409546 2025-06-25T14:59:10.038389Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 104:0, at schemeshard: 72075186233409546 2025-06-25T14:59:10.039410Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72075186233409546 2025-06-25T14:59:10.039461Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72075186233409546, txId: 104, path id: [OwnerId: 72075186233409546, LocalPathId: 1] 2025-06-25T14:59:10.039631Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72075186233409546, txId: 104, path id: [OwnerId: 72075186233409546, LocalPathId: 2] 2025-06-25T14:59:10.039762Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72075186233409546 2025-06-25T14:59:10.039807Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:452:2402], at schemeshard: 72075186233409546, txId: 104, path id: 1 2025-06-25T14:59:10.039845Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:452:2402], at schemeshard: 72075186233409546, txId: 104, path id: 2 2025-06-25T14:59:10.039890Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 104:0, at schemeshard: 72075186233409546 2025-06-25T14:59:10.039926Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1086: NTableState::TProposedWaitParts operationId# 104:0 ProgressState at tablet: 72075186233409546 2025-06-25T14:59:10.040011Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:707: all shard schema changes has been received, operationId: 104:0, at schemeshard: 72075186233409546 2025-06-25T14:59:10.040043Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:719: send schema changes ack message, operation: 104:0, datashard: 72075186233409549, at schemeshard: 72075186233409546 2025-06-25T14:59:10.040073Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 104:0 129 -> 240 2025-06-25T14:59:10.041207Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72075186233409546, msg: Owner: 72075186233409546 Generation: 2 LocalPathId: 1 Version: 9 PathOwnerId: 72075186233409546, cookie: 104 2025-06-25T14:59:10.041308Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72075186233409546, msg: Owner: 72075186233409546 Generation: 2 LocalPathId: 1 Version: 9 PathOwnerId: 72075186233409546, cookie: 104 2025-06-25T14:59:10.041341Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72075186233409546, txId: 104 2025-06-25T14:59:10.041371Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72075186233409546, txId: 104, pathId: [OwnerId: 72075186233409546, LocalPathId: 1], version: 9 2025-06-25T14:59:10.041405Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72075186233409546, LocalPathId: 1] was 5 2025-06-25T14:59:10.042228Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72075186233409546, msg: Owner: 72075186233409546 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72075186233409546, cookie: 104 2025-06-25T14:59:10.042294Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72075186233409546, msg: Owner: 72075186233409546 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72075186233409546, cookie: 104 2025-06-25T14:59:10.042315Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72075186233409546, txId: 104 2025-06-25T14:59:10.042335Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72075186233409546, txId: 104, pathId: [OwnerId: 72075186233409546, LocalPathId: 2], version: 18446744073709551615 2025-06-25T14:59:10.042355Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72075186233409546, LocalPathId: 2] was 4 2025-06-25T14:59:10.042400Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 104, ready parts: 0/1, is published: true 2025-06-25T14:59:10.043456Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 104:0, at schemeshard: 72075186233409546 2025-06-25T14:59:10.043497Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_table.cpp:414: TDropTable TProposedDeletePart operationId: 104:0 ProgressState, at schemeshard: 72075186233409546 2025-06-25T14:59:10.043787Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove table for pathId [OwnerId: 72075186233409546, LocalPathId: 2] was 3 2025-06-25T14:59:10.043934Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#104:0 progress is 1/1 2025-06-25T14:59:10.043981Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-25T14:59:10.044015Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#104:0 progress is 1/1 2025-06-25T14:59:10.044039Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-25T14:59:10.044073Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 104, ready parts: 1/1, is published: true 2025-06-25T14:59:10.044121Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1656: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:557:2494] message: TxId: 104 2025-06-25T14:59:10.044158Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-25T14:59:10.044183Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 104:0 2025-06-25T14:59:10.044208Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 104:0 2025-06-25T14:59:10.044293Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72075186233409546, LocalPathId: 2] was 2 2025-06-25T14:59:10.044740Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72075186233409546 2025-06-25T14:59:10.044769Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72075186233409546, txId: 0, path id: [OwnerId: 72075186233409546, LocalPathId: 1] 2025-06-25T14:59:10.045453Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72075186233409546, cookie: 104 2025-06-25T14:59:10.046551Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72075186233409546, cookie: 104 2025-06-25T14:59:10.046678Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72075186233409546 2025-06-25T14:59:10.046709Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:452:2402], at schemeshard: 72075186233409546, txId: 0, path id: 1 2025-06-25T14:59:10.046772Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 104: got EvNotifyTxCompletionResult 2025-06-25T14:59:10.046796Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 104: satisfy waiter [1:750:2664] 2025-06-25T14:59:10.047330Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72075186233409546, msg: Owner: 72075186233409546 Generation: 2 LocalPathId: 1 Version: 10 PathOwnerId: 72075186233409546, cookie: 0 TestWaitNotification: OK eventTxId 104 2025-06-25T14:59:10.047997Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/SomeDatabase" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72075186233409546 2025-06-25T14:59:10.048152Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72075186233409546 describe path "/MyRoot/SomeDatabase" took 205us result status StatusSuccess 2025-06-25T14:59:10.048477Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/SomeDatabase" PathDescription { Self { Name: "MyRoot/SomeDatabase" PathId: 1 SchemeshardId: 72075186233409546 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 10 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 10 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 2 SubDomainStateVersion: 2 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 2 ProcessingParams { Version: 2 PlanResolution: 50 Coordinators: 72075186233409547 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409548 SchemeShard: 72075186233409546 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "quoted_storage_pool" Kind: "quoted_storage_pool_kind" } StoragePools { Name: "unquoted_storage_pool" Kind: "unquoted_storage_pool_kind" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } StoragePoolsUsage { PoolKind: "unquoted_storage_pool_kind" TotalSize: 0 DataSize: 0 IndexSize: 0 } StoragePoolsUsage { PoolKind: "quoted_storage_pool_kind" TotalSize: 0 DataSize: 0 IndexSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 DatabaseQuotas { storage_quotas { unit_kind: "quoted_storage_pool_kind" data_size_hard_quota: 1 } } SecurityState { Audience: "/MyRoot/SomeDatabase" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72075186233409546, at schemeshard: 72075186233409546 >> TKesusTest::TestUnregisterProxyBadGeneration [GOOD] >> TDataShardMinStepTest::TestDropTableCompletesQuicklyRW+VolatileTxs >> TKesusTest::TestSessionTimeoutAfterUnregister >> TStoragePoolsQuotasTest::DifferentQuotasInteraction [GOOD] >> TKesusTest::TestSessionDetachFutureId [GOOD] >> TKesusTest::TestSessionDestroy >> TDataShardMinStepTest::TestDropTablePlanComesNotTooEarlyRW+VolatileTxs >> TKesusTest::TestRegisterProxyBadGeneration [GOOD] >> TKesusTest::TestReleaseSemaphore [GOOD] >> TKesusTest::TestSemaphoreData >> TKesusTest::TestRegisterProxyFromDeadActor >> TKesusTest::TestLockNotFound [GOOD] >> TKesusTest::TestDeleteSemaphore >> TKesusTest::TestAttachNewSessions >> TKesusTest::TestAttachThenReRegister [GOOD] >> TKesusTest::TestAttachTimeoutTooBig >> TKesusTest::TestSessionDestroy [GOOD] >> TKesusTest::TestSessionStealing >> TStoragePoolsQuotasTest::DifferentQuotasInteraction-IsExternalSubdomain [GOOD] >> TKesusTest::TestRegisterProxyFromDeadActor [GOOD] >> TKesusTest::TestRegisterProxyLinkFailure >> TKesusTest::TestDeleteSemaphore [GOOD] >> TKesusTest::TestDescribeSemaphoreWatches ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TStoragePoolsQuotasTest::DifferentQuotasInteraction [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:59:07.180109Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:59:07.180199Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:07.180254Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:59:07.180288Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:59:07.180350Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:59:07.180386Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:59:07.180438Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:07.180495Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:59:07.181031Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:59:07.181307Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:59:07.243306Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:59:07.243381Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:59:07.259508Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:59:07.259805Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:59:07.259938Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:59:07.265337Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:59:07.265678Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:59:07.266347Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:07.266621Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:59:07.269628Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:07.269813Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:59:07.270952Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:07.271025Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:07.271169Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:59:07.271225Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:59:07.271280Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:59:07.271369Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:59:07.276654Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:59:07.396529Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:59:07.396773Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:07.396989Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:59:07.397030Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:59:07.397273Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:59:07.397371Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:59:07.399708Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:07.399911Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:59:07.400101Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:07.400147Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:59:07.400202Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:59:07.400239Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:59:07.401968Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:07.402009Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:59:07.402034Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:59:07.403344Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:07.403381Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:07.403434Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:07.403469Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:59:07.406126Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:59:07.407685Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:59:07.407858Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:59:07.408636Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:07.408763Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:07.408822Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:07.409072Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:59:07.409189Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:07.409377Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:59:07.409442Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:59:07.411345Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:07.411382Z node 1 :FLAT_TX_SCHEMESHARD D ... 4046678944 2025-06-25T14:59:10.514201Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-25T14:59:10.514441Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:10.514480Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 103, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-25T14:59:10.514682Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 103, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-25T14:59:10.514827Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:10.514878Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 103, path id: 2 2025-06-25T14:59:10.514925Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 103, path id: 3 2025-06-25T14:59:10.515361Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-25T14:59:10.515416Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1086: NTableState::TProposedWaitParts operationId# 103:0 ProgressState at tablet: 72057594046678944 2025-06-25T14:59:10.515523Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:707: all shard schema changes has been received, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-25T14:59:10.515562Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:719: send schema changes ack message, operation: 103:0, datashard: 72075186233409548, at schemeshard: 72057594046678944 2025-06-25T14:59:10.515603Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 103:0 129 -> 240 2025-06-25T14:59:10.516437Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 8 PathOwnerId: 72057594046678944, cookie: 103 2025-06-25T14:59:10.516539Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 8 PathOwnerId: 72057594046678944, cookie: 103 2025-06-25T14:59:10.516575Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 103 2025-06-25T14:59:10.516618Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 8 2025-06-25T14:59:10.516664Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-06-25T14:59:10.517694Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 103 2025-06-25T14:59:10.517772Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 103 2025-06-25T14:59:10.517802Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 103 2025-06-25T14:59:10.517846Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 18446744073709551615 2025-06-25T14:59:10.517875Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-06-25T14:59:10.517937Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 103, ready parts: 0/1, is published: true 2025-06-25T14:59:10.520338Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-25T14:59:10.520409Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_table.cpp:414: TDropTable TProposedDeletePart operationId: 103:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:59:10.520828Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove table for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-25T14:59:10.521009Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#103:0 progress is 1/1 2025-06-25T14:59:10.521095Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-25T14:59:10.521144Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#103:0 progress is 1/1 2025-06-25T14:59:10.521178Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-25T14:59:10.521229Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 103, ready parts: 1/1, is published: true 2025-06-25T14:59:10.521302Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1656: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:410:2374] message: TxId: 103 2025-06-25T14:59:10.521344Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-25T14:59:10.521381Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 103:0 2025-06-25T14:59:10.521421Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 103:0 2025-06-25T14:59:10.521524Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-25T14:59:10.521884Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:10.521924Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 0, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-25T14:59:10.522914Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-25T14:59:10.523963Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-25T14:59:10.524892Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:10.524943Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 0, path id: 2 2025-06-25T14:59:10.525013Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-06-25T14:59:10.525045Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [1:730:2661] 2025-06-25T14:59:10.525786Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 9 PathOwnerId: 72057594046678944, cookie: 0 TestWaitNotification: OK eventTxId 103 2025-06-25T14:59:10.527157Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/SomeDatabase" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:59:10.527375Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/SomeDatabase" took 222us result status StatusSuccess 2025-06-25T14:59:10.527837Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/SomeDatabase" PathDescription { Self { Name: "SomeDatabase" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 9 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 9 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SubDomainStateVersion: 2 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "fast" Kind: "fast_kind" } StoragePools { Name: "large" Kind: "large_kind" } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } StoragePoolsUsage { PoolKind: "large_kind" TotalSize: 0 DataSize: 0 IndexSize: 0 } StoragePoolsUsage { PoolKind: "fast_kind" TotalSize: 0 DataSize: 0 IndexSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 DatabaseQuotas { data_size_hard_quota: 2800 data_size_soft_quota: 2200 storage_quotas { unit_kind: "fast_kind" data_size_hard_quota: 600 data_size_soft_quota: 500 } storage_quotas { unit_kind: "large_kind" data_size_hard_quota: 2200 data_size_soft_quota: 1700 } } SecurityState { } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TKesusTest::TestSemaphoreData [GOOD] >> TKesusTest::TestSemaphoreReleaseReacquire >> KikimrIcGateway::TestLoadMdbBasicSecretValueFromExternalDataSourceMetadata [GOOD] >> TKesusTest::TestAttachNewSessions [GOOD] >> TKesusTest::TestAttachMissingSession >> TKesusTest::TestAttachTimeoutTooBig [GOOD] >> TKesusTest::TestCreateSemaphore >> TKesusTest::TestSessionStealing [GOOD] >> TKesusTest::TestSessionStealingAnyKey >> TKesusTest::TestRegisterProxyLinkFailure [GOOD] >> TKesusTest::TestRegisterProxyLinkFailureRace ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TStoragePoolsQuotasTest::DifferentQuotasInteraction-IsExternalSubdomain [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:59:07.721813Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:59:07.721892Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:07.721929Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:59:07.721970Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:59:07.722037Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:59:07.722073Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:59:07.722128Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:07.722202Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:59:07.722891Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:59:07.723155Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:59:07.784302Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:59:07.784381Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:59:07.797709Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:59:07.798078Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:59:07.798219Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:59:07.802562Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:59:07.802791Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:59:07.803236Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:07.803420Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:59:07.806153Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:07.806297Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:59:07.807141Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:07.807193Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:07.807308Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:59:07.807349Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:59:07.807382Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:59:07.807442Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:59:07.812711Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:59:07.918517Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:59:07.918750Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:07.918988Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:59:07.919049Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:59:07.919304Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:59:07.919383Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:59:07.921356Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:07.921557Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:59:07.921733Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:07.921782Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:59:07.921850Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:59:07.921892Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:59:07.924276Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:07.924355Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:59:07.924397Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:59:07.926015Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:07.926070Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:07.926138Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:07.926191Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:59:07.929547Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:59:07.931183Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:59:07.931364Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:59:07.932247Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:07.932411Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:07.932465Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:07.932785Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:59:07.932844Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:07.933012Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:59:07.933083Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:59:07.934924Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:07.934969Z node 1 :FLAT_TX_SCHEMESHARD D ... tionId: 104:0, at schemeshard: 72075186233409546 2025-06-25T14:59:11.067884Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72075186233409546 2025-06-25T14:59:11.067938Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72075186233409546, txId: 104, path id: [OwnerId: 72075186233409546, LocalPathId: 1] 2025-06-25T14:59:11.068115Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72075186233409546, txId: 104, path id: [OwnerId: 72075186233409546, LocalPathId: 2] 2025-06-25T14:59:11.068371Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 104:0, at schemeshard: 72075186233409546 2025-06-25T14:59:11.068428Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72075186233409546 2025-06-25T14:59:11.068484Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:452:2402], at schemeshard: 72075186233409546, txId: 104, path id: 1 2025-06-25T14:59:11.068533Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:452:2402], at schemeshard: 72075186233409546, txId: 104, path id: 2 2025-06-25T14:59:11.068813Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 104:0, at schemeshard: 72075186233409546 2025-06-25T14:59:11.068856Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1086: NTableState::TProposedWaitParts operationId# 104:0 ProgressState at tablet: 72075186233409546 2025-06-25T14:59:11.068936Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:707: all shard schema changes has been received, operationId: 104:0, at schemeshard: 72075186233409546 2025-06-25T14:59:11.068970Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:719: send schema changes ack message, operation: 104:0, datashard: 72075186233409549, at schemeshard: 72075186233409546 2025-06-25T14:59:11.069015Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 104:0 129 -> 240 2025-06-25T14:59:11.069976Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72075186233409546, msg: Owner: 72075186233409546 Generation: 2 LocalPathId: 1 Version: 9 PathOwnerId: 72075186233409546, cookie: 104 2025-06-25T14:59:11.070074Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72075186233409546, msg: Owner: 72075186233409546 Generation: 2 LocalPathId: 1 Version: 9 PathOwnerId: 72075186233409546, cookie: 104 2025-06-25T14:59:11.070112Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72075186233409546, txId: 104 2025-06-25T14:59:11.070176Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72075186233409546, txId: 104, pathId: [OwnerId: 72075186233409546, LocalPathId: 1], version: 9 2025-06-25T14:59:11.070212Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72075186233409546, LocalPathId: 1] was 5 2025-06-25T14:59:11.070933Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72075186233409546, msg: Owner: 72075186233409546 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72075186233409546, cookie: 104 2025-06-25T14:59:11.071009Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72075186233409546, msg: Owner: 72075186233409546 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72075186233409546, cookie: 104 2025-06-25T14:59:11.071032Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72075186233409546, txId: 104 2025-06-25T14:59:11.071080Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72075186233409546, txId: 104, pathId: [OwnerId: 72075186233409546, LocalPathId: 2], version: 18446744073709551615 2025-06-25T14:59:11.071112Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72075186233409546, LocalPathId: 2] was 4 2025-06-25T14:59:11.071168Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 104, ready parts: 0/1, is published: true 2025-06-25T14:59:11.075534Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 104:0, at schemeshard: 72075186233409546 2025-06-25T14:59:11.075587Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_table.cpp:414: TDropTable TProposedDeletePart operationId: 104:0 ProgressState, at schemeshard: 72075186233409546 2025-06-25T14:59:11.075950Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove table for pathId [OwnerId: 72075186233409546, LocalPathId: 2] was 3 2025-06-25T14:59:11.076123Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#104:0 progress is 1/1 2025-06-25T14:59:11.076176Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-25T14:59:11.076241Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#104:0 progress is 1/1 2025-06-25T14:59:11.076269Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-25T14:59:11.076320Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 104, ready parts: 1/1, is published: true 2025-06-25T14:59:11.076416Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1656: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:557:2494] message: TxId: 104 2025-06-25T14:59:11.076457Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-25T14:59:11.076492Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 104:0 2025-06-25T14:59:11.076520Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 104:0 2025-06-25T14:59:11.076612Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72075186233409546, LocalPathId: 2] was 2 2025-06-25T14:59:11.077206Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72075186233409546 2025-06-25T14:59:11.077244Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72075186233409546, txId: 0, path id: [OwnerId: 72075186233409546, LocalPathId: 1] 2025-06-25T14:59:11.077553Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72075186233409546, cookie: 104 2025-06-25T14:59:11.077852Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72075186233409546, cookie: 104 2025-06-25T14:59:11.079250Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72075186233409546 2025-06-25T14:59:11.079287Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:452:2402], at schemeshard: 72075186233409546, txId: 0, path id: 1 2025-06-25T14:59:11.079358Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 104: got EvNotifyTxCompletionResult 2025-06-25T14:59:11.079391Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 104: satisfy waiter [1:818:2733] 2025-06-25T14:59:11.080128Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72075186233409546, msg: Owner: 72075186233409546 Generation: 2 LocalPathId: 1 Version: 10 PathOwnerId: 72075186233409546, cookie: 0 TestWaitNotification: OK eventTxId 104 2025-06-25T14:59:11.081544Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/SomeDatabase" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72075186233409546 2025-06-25T14:59:11.081736Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72075186233409546 describe path "/MyRoot/SomeDatabase" took 206us result status StatusSuccess 2025-06-25T14:59:11.082159Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/SomeDatabase" PathDescription { Self { Name: "MyRoot/SomeDatabase" PathId: 1 SchemeshardId: 72075186233409546 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 10 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 10 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 2 SubDomainStateVersion: 2 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 2 ProcessingParams { Version: 2 PlanResolution: 50 Coordinators: 72075186233409547 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409548 SchemeShard: 72075186233409546 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "fast" Kind: "fast_kind" } StoragePools { Name: "large" Kind: "large_kind" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } StoragePoolsUsage { PoolKind: "large_kind" TotalSize: 0 DataSize: 0 IndexSize: 0 } StoragePoolsUsage { PoolKind: "fast_kind" TotalSize: 0 DataSize: 0 IndexSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 DatabaseQuotas { data_size_hard_quota: 2800 data_size_soft_quota: 2200 storage_quotas { unit_kind: "fast_kind" data_size_hard_quota: 600 data_size_soft_quota: 500 } storage_quotas { unit_kind: "large_kind" data_size_hard_quota: 2200 data_size_soft_quota: 1700 } } SecurityState { Audience: "/MyRoot/SomeDatabase" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72075186233409546, at schemeshard: 72075186233409546 >> TKesusTest::TestAttachMissingSession [GOOD] >> TKesusTest::TestAttachOldGeneration >> TKesusTest::TestSessionStealingAnyKey [GOOD] >> TKesusTest::TestCreateSemaphore [GOOD] >> TKesusTest::TestQuoterAccountResourcesBurst [GOOD] >> TKesusTest::TestQuoterAccountResourcesAggregateClients >> TKesusTest::TestSemaphoreReleaseReacquire [GOOD] >> TKesusTest::TestSemaphoreSessionFailures >> TKesusTest::TestAttachOldGeneration [GOOD] >> TKesusTest::TestAttachFastPath >> TStoragePoolsQuotasTest::DisableWritesToDatabase-IsExternalSubdomain-false [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kesus/tablet/ut/unittest >> TKesusTest::TestSessionStealingAnyKey [GOOD] Test command err: 2025-06-25T14:59:10.243160Z node 1 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-25T14:59:10.243283Z node 1 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-25T14:59:10.264164Z node 1 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-25T14:59:10.264277Z node 1 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-25T14:59:10.289100Z node 1 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-25T14:59:10.289538Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[1:136:2160], cookie=174939294128343387, session=0, seqNo=0) 2025-06-25T14:59:10.289691Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 1 2025-06-25T14:59:10.302016Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[1:136:2160], cookie=174939294128343387, session=1) 2025-06-25T14:59:10.303617Z node 1 :KESUS_TABLET DEBUG: tx_session_detach.cpp:28: [72057594037927937] TTxSessionDetach::Execute (sender=[1:136:2160], cookie=5981291312516879123, session=2) 2025-06-25T14:59:10.303703Z node 1 :KESUS_TABLET DEBUG: tx_session_detach.cpp:59: [72057594037927937] TTxSessionDetach::Complete (sender=[1:136:2160], cookie=5981291312516879123) 2025-06-25T14:59:10.304198Z node 1 :KESUS_TABLET DEBUG: tx_session_detach.cpp:100: [72057594037927937] Fast-path detach session=1 from sender=[1:136:2160], cookie=1852257443200374041 2025-06-25T14:59:10.304910Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[1:136:2160], cookie=18060744905503225284, session=1, seqNo=0) 2025-06-25T14:59:10.316537Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[1:136:2160], cookie=18060744905503225284, session=1) 2025-06-25T14:59:10.317099Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[1:136:2160], cookie=111, session=1, semaphore="Lock1" count=18446744073709551615) 2025-06-25T14:59:10.318637Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:126: [72057594037927937] Created new ephemeral semaphore 1 "Lock1" 2025-06-25T14:59:10.318742Z node 1 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 1 "Lock1" queue: next order #1 session 1 2025-06-25T14:59:10.318912Z node 1 :KESUS_TABLET DEBUG: tx_session_detach.cpp:28: [72057594037927937] TTxSessionDetach::Execute (sender=[1:136:2160], cookie=12772656299250949878, session=1) 2025-06-25T14:59:10.329188Z node 1 :KESUS_TABLET DEBUG: tx_session_timeout.cpp:27: [72057594037927937] TTxSessionTimeout::Execute (session=1) 2025-06-25T14:59:10.329241Z node 1 :KESUS_TABLET DEBUG: tablet_db.cpp:32: [72057594037927937] Deleting session 1 2025-06-25T14:59:10.329304Z node 1 :KESUS_TABLET DEBUG: tablet_db.cpp:98: [72057594037927937] Deleting session 1 / semaphore 1 "Lock1" owner link 2025-06-25T14:59:10.341162Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[1:136:2160], cookie=111) 2025-06-25T14:59:10.341246Z node 1 :KESUS_TABLET DEBUG: tx_session_detach.cpp:59: [72057594037927937] TTxSessionDetach::Complete (sender=[1:136:2160], cookie=12772656299250949878) 2025-06-25T14:59:10.341314Z node 1 :KESUS_TABLET DEBUG: tx_session_timeout.cpp:56: [72057594037927937] TTxSessionTimeout::Complete (session=1) 2025-06-25T14:59:10.740632Z node 2 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-25T14:59:10.740705Z node 2 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-25T14:59:10.759556Z node 2 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-25T14:59:10.760933Z node 2 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-25T14:59:10.785533Z node 2 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-25T14:59:10.785817Z node 2 :KESUS_TABLET DEBUG: tx_config_set.cpp:28: [72057594037927937] TTxConfigSet::Execute (sender=[2:136:2160], cookie=393075103008685757, path="") 2025-06-25T14:59:10.797716Z node 2 :KESUS_TABLET DEBUG: tx_config_set.cpp:94: [72057594037927937] TTxConfigSet::Complete (sender=[2:136:2160], cookie=393075103008685757, status=SUCCESS) 2025-06-25T14:59:10.798233Z node 2 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[2:145:2167], cookie=111, session=0, seqNo=0) 2025-06-25T14:59:10.798319Z node 2 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 1 2025-06-25T14:59:10.798444Z node 2 :KESUS_TABLET DEBUG: tx_session_detach.cpp:28: [72057594037927937] TTxSessionDetach::Execute (sender=[2:145:2167], cookie=9599348893877619285, session=1) 2025-06-25T14:59:10.808719Z node 2 :KESUS_TABLET DEBUG: tx_session_timeout.cpp:27: [72057594037927937] TTxSessionTimeout::Execute (session=1) 2025-06-25T14:59:10.808774Z node 2 :KESUS_TABLET DEBUG: tablet_db.cpp:32: [72057594037927937] Deleting session 1 2025-06-25T14:59:10.820378Z node 2 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[2:145:2167], cookie=111, session=1) 2025-06-25T14:59:10.820452Z node 2 :KESUS_TABLET DEBUG: tx_session_detach.cpp:59: [72057594037927937] TTxSessionDetach::Complete (sender=[2:145:2167], cookie=9599348893877619285) 2025-06-25T14:59:10.820494Z node 2 :KESUS_TABLET DEBUG: tx_session_timeout.cpp:56: [72057594037927937] TTxSessionTimeout::Complete (session=1) 2025-06-25T14:59:11.129813Z node 3 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-25T14:59:11.129907Z node 3 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-25T14:59:11.147839Z node 3 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-25T14:59:11.148387Z node 3 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-25T14:59:11.171857Z node 3 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-25T14:59:11.172274Z node 3 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[3:136:2160], cookie=657621149320437921, session=0, seqNo=0) 2025-06-25T14:59:11.172422Z node 3 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 1 2025-06-25T14:59:11.184086Z node 3 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[3:136:2160], cookie=657621149320437921, session=1) 2025-06-25T14:59:11.184726Z node 3 :KESUS_TABLET DEBUG: tx_session_destroy.cpp:37: [72057594037927937] TTxSessionDestroy::Execute (sender=[3:136:2160], cookie=4120812846726059890, session=1) 2025-06-25T14:59:11.184811Z node 3 :KESUS_TABLET DEBUG: tablet_db.cpp:32: [72057594037927937] Deleting session 1 2025-06-25T14:59:11.196624Z node 3 :KESUS_TABLET DEBUG: tx_session_destroy.cpp:75: [72057594037927937] TTxSessionDestroy::Complete (sender=[3:136:2160], cookie=4120812846726059890) 2025-06-25T14:59:11.197446Z node 3 :KESUS_TABLET DEBUG: tx_sessions_describe.cpp:23: [72057594037927937] TTxSessionsDescribe::Execute (sender=[3:153:2175], cookie=13042422426013999108) 2025-06-25T14:59:11.197529Z node 3 :KESUS_TABLET DEBUG: tx_sessions_describe.cpp:48: [72057594037927937] TTxSessionsDescribe::Complete (sender=[3:153:2175], cookie=13042422426013999108) 2025-06-25T14:59:11.198039Z node 3 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[3:156:2178], cookie=7485051791778166763, session=0, seqNo=0) 2025-06-25T14:59:11.198149Z node 3 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 2 2025-06-25T14:59:11.209756Z node 3 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[3:156:2178], cookie=7485051791778166763, session=2) 2025-06-25T14:59:11.210509Z node 3 :KESUS_TABLET DEBUG: tx_session_destroy.cpp:37: [72057594037927937] TTxSessionDestroy::Execute (sender=[3:136:2160], cookie=9774072675325771551, session=2) 2025-06-25T14:59:11.210570Z node 3 :KESUS_TABLET DEBUG: tablet_db.cpp:32: [72057594037927937] Deleting session 2 2025-06-25T14:59:11.222001Z node 3 :KESUS_TABLET DEBUG: tx_session_destroy.cpp:75: [72057594037927937] TTxSessionDestroy::Complete (sender=[3:136:2160], cookie=9774072675325771551) 2025-06-25T14:59:11.542472Z node 4 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-25T14:59:11.542577Z node 4 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-25T14:59:11.553968Z node 4 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-25T14:59:11.554071Z node 4 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-25T14:59:11.567546Z node 4 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-25T14:59:11.568094Z node 4 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[4:134:2158], cookie=12345, session=0, seqNo=0) 2025-06-25T14:59:11.568188Z node 4 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 1 2025-06-25T14:59:11.590532Z node 4 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[4:134:2158], cookie=12345, session=1) 2025-06-25T14:59:11.591238Z node 4 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[4:140:2163], cookie=23456, session=1, seqNo=0) 2025-06-25T14:59:11.602847Z node 4 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[4:140:2163], cookie=23456, session=1) 2025-06-25T14:59:11.918155Z node 5 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-25T14:59:11.918217Z node 5 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-25T14:59:11.932843Z node 5 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-25T14:59:11.932955Z node 5 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-25T14:59:11.956505Z node 5 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-25T14:59:11.957288Z node 5 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[5:136:2160], cookie=12345, session=0, seqNo=0) 2025-06-25T14:59:11.957441Z node 5 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 1 2025-06-25T14:59:11.969412Z node 5 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[5:136:2160], cookie=12345, session=1) 2025-06-25T14:59:11.970110Z node 5 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[5:143:2165], cookie=23456, session=1, seqNo=0) 2025-06-25T14:59:11.981807Z node 5 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[5:143:2165], cookie=23456, session=1) >> TKesusTest::TestQuoterAccountResourcesOnDemand [GOOD] >> TKesusTest::TestQuoterAccountResourcesPaced ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kesus/tablet/ut/unittest >> TKesusTest::TestCreateSemaphore [GOOD] Test command err: 2025-06-25T14:59:10.243182Z node 1 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-25T14:59:10.243297Z node 1 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-25T14:59:10.262608Z node 1 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-25T14:59:10.262758Z node 1 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-25T14:59:10.289272Z node 1 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-25T14:59:10.290050Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[1:136:2160], cookie=14671124424809573260, session=0, seqNo=222) 2025-06-25T14:59:10.290207Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 1 2025-06-25T14:59:10.301978Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[1:136:2160], cookie=14671124424809573260, session=1) 2025-06-25T14:59:10.302246Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[1:137:2161], cookie=4033320401385693476, session=1, seqNo=111) 2025-06-25T14:59:10.314109Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[1:137:2161], cookie=4033320401385693476, session=1) 2025-06-25T14:59:10.716137Z node 2 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-25T14:59:10.716234Z node 2 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-25T14:59:10.733919Z node 2 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-25T14:59:10.734449Z node 2 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-25T14:59:10.757836Z node 2 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-25T14:59:10.758270Z node 2 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[2:136:2160], cookie=111, session=0, seqNo=42) 2025-06-25T14:59:10.758410Z node 2 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 1 2025-06-25T14:59:10.758570Z node 2 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[2:136:2160], cookie=222, session=1, seqNo=41) 2025-06-25T14:59:10.770380Z node 2 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[2:136:2160], cookie=111, session=1) 2025-06-25T14:59:10.770466Z node 2 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[2:136:2160], cookie=222, session=1) 2025-06-25T14:59:11.100167Z node 3 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-25T14:59:11.100262Z node 3 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-25T14:59:11.114246Z node 3 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-25T14:59:11.114751Z node 3 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-25T14:59:11.138272Z node 3 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-25T14:59:11.138714Z node 3 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[3:136:2160], cookie=11887158133510143754, session=0, seqNo=0) 2025-06-25T14:59:11.138856Z node 3 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 1 2025-06-25T14:59:11.150231Z node 3 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[3:136:2160], cookie=11887158133510143754, session=1) 2025-06-25T14:59:11.151137Z node 3 :KESUS_TABLET DEBUG: tx_sessions_describe.cpp:23: [72057594037927937] TTxSessionsDescribe::Execute (sender=[3:153:2175], cookie=506179124341886452) 2025-06-25T14:59:11.151200Z node 3 :KESUS_TABLET DEBUG: tx_sessions_describe.cpp:48: [72057594037927937] TTxSessionsDescribe::Complete (sender=[3:153:2175], cookie=506179124341886452) 2025-06-25T14:59:11.463623Z node 4 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-25T14:59:11.463714Z node 4 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-25T14:59:11.479846Z node 4 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-25T14:59:11.479959Z node 4 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-25T14:59:11.493579Z node 4 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-25T14:59:11.860871Z node 5 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-25T14:59:11.860977Z node 5 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-25T14:59:11.874124Z node 5 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-25T14:59:11.874234Z node 5 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-25T14:59:11.897620Z node 5 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-25T14:59:11.898054Z node 5 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[5:136:2160], cookie=2423839770645423816, session=0, seqNo=0) 2025-06-25T14:59:11.898218Z node 5 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 1 2025-06-25T14:59:11.910027Z node 5 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[5:136:2160], cookie=2423839770645423816, session=1) 2025-06-25T14:59:11.910322Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[5:136:2160], cookie=111, session=1, semaphore="Lock1" count=18446744073709551615) 2025-06-25T14:59:11.910458Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:126: [72057594037927937] Created new ephemeral semaphore 1 "Lock1" 2025-06-25T14:59:11.910554Z node 5 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 1 "Lock1" queue: next order #1 session 1 2025-06-25T14:59:11.922265Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[5:136:2160], cookie=111) 2025-06-25T14:59:11.923042Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:32: [72057594037927937] TTxSemaphoreCreate::Execute (sender=[5:150:2172], cookie=11650886154054434335, name="Sem1", limit=42) 2025-06-25T14:59:11.923160Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:104: [72057594037927937] Created new semaphore 2 "Sem1" 2025-06-25T14:59:11.935027Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:112: [72057594037927937] TTxSemaphoreCreate::Complete (sender=[5:150:2172], cookie=11650886154054434335) 2025-06-25T14:59:11.935552Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:32: [72057594037927937] TTxSemaphoreCreate::Execute (sender=[5:155:2177], cookie=14421741970803353839, name="Sem1", limit=42) 2025-06-25T14:59:11.947906Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:112: [72057594037927937] TTxSemaphoreCreate::Complete (sender=[5:155:2177], cookie=14421741970803353839) 2025-06-25T14:59:11.948548Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:32: [72057594037927937] TTxSemaphoreCreate::Execute (sender=[5:160:2182], cookie=11437506860898063506, name="Sem1", limit=51) 2025-06-25T14:59:11.960822Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:112: [72057594037927937] TTxSemaphoreCreate::Complete (sender=[5:160:2182], cookie=11437506860898063506) 2025-06-25T14:59:11.961426Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:32: [72057594037927937] TTxSemaphoreCreate::Execute (sender=[5:165:2187], cookie=14913389877920250465, name="Lock1", limit=42) 2025-06-25T14:59:11.973720Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:112: [72057594037927937] TTxSemaphoreCreate::Complete (sender=[5:165:2187], cookie=14913389877920250465) 2025-06-25T14:59:11.974280Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:32: [72057594037927937] TTxSemaphoreCreate::Execute (sender=[5:170:2192], cookie=6553636595440794206, name="Lock1", limit=18446744073709551615) 2025-06-25T14:59:11.986537Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:112: [72057594037927937] TTxSemaphoreCreate::Complete (sender=[5:170:2192], cookie=6553636595440794206) 2025-06-25T14:59:11.987172Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[5:175:2197], cookie=17250795536986821548, name="Sem1") 2025-06-25T14:59:11.987275Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[5:175:2197], cookie=17250795536986821548) 2025-06-25T14:59:11.987811Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[5:178:2200], cookie=16639474119095146962, name="Sem2") 2025-06-25T14:59:11.987890Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[5:178:2200], cookie=16639474119095146962) 2025-06-25T14:59:12.002238Z node 5 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-25T14:59:12.002338Z node 5 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-25T14:59:12.002846Z node 5 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-25T14:59:12.003470Z node 5 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-25T14:59:12.061180Z node 5 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-25T14:59:12.061347Z node 5 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 1 "Lock1" queue: next order #1 session 1 2025-06-25T14:59:12.061694Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[5:218:2230], cookie=836209726670940034, name="Sem1") 2025-06-25T14:59:12.061780Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[5:218:2230], cookie=836209726670940034) 2025-06-25T14:59:12.062423Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[5:225:2236], cookie=8076995130036778194, name="Sem2") 2025-06-25T14:59:12.062499Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[5:225:2236], cookie=8076995130036778194) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/provider/ut/unittest >> KikimrIcGateway::TestLoadMdbBasicSecretValueFromExternalDataSourceMetadata [GOOD] Test command err: Trying to start YDB, gRPC: 5092, MsgBus: 21013 2025-06-25T14:58:44.566400Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901579454508046:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:44.572704Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00137f/r3tmp/tmpR05Qoa/pdisk_1.dat 2025-06-25T14:58:45.124446Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519901579454508019:2080] 1750863524562716 != 1750863524562719 2025-06-25T14:58:45.149582Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:45.197569Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:45.197704Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:45.207754Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 5092, node 1 2025-06-25T14:58:45.499465Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:58:45.499484Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:58:45.499503Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:58:45.499602Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:58:45.588234Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:21013 TClient is connected to server localhost:21013 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:58:46.291015Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:46.329665Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:46.506789Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:46.651146Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:46.715986Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:47.723505Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901592339411536:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:58:47.723617Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:58:48.374763Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:58:48.417360Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:58:48.446415Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:58:48.476283Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:58:48.506570Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:58:48.604460Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:58:48.680025Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:58:48.759393Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901596634379498:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:58:48.759477Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:58:48.759915Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901596634379503:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:58:48.765939Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:58:48.781352Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710669, at schemeshard: 72057594046644480 2025-06-25T14:58:48.781863Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519901596634379505:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:58:48.886500Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519901596634379556:3425] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:58:49.568501Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519901579454508046:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:49.582898Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:58:50.187707Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/acti ... is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:59:00.864941Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:59:00.908535Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:59:01.046976Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:59:01.151850Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:59:01.271763Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:59:03.123993Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519901659965531285:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:59:03.124114Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:59:03.157677Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:59:03.185452Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:59:03.212355Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:59:03.238594Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:59:03.273100Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:59:03.300053Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:59:03.368722Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:59:03.428045Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519901659965531948:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:59:03.428167Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:59:03.428366Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519901659965531953:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:59:03.433025Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:59:03.444550Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519901659965531955:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:59:03.505489Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519901659965532006:3420] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:59:04.481105Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:59:05.041944Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:59:05.237534Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519901647080627789:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:59:05.237621Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:59:05.562608Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715680:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:59:06.041858Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715683:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:59:06.494949Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715688:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:59:06.944275Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715693:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:59:07.441836Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:59:07.474674Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710759:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:383) 2025-06-25T14:59:10.455599Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715729:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp:267) >> TKesusTest::TestAttachFastPath [GOOD] >> TKesusTest::TestAttachFastPathBlocked >> TKesusTest::TestSemaphoreSessionFailures [GOOD] |90.6%| [TA] $(B)/ydb/core/kqp/provider/ut/test-results/unittest/{meta.json ... results_accumulator.log} |90.6%| [TA] {RESULT} $(B)/ydb/core/kqp/provider/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TPartitionTests::TestBatchingWithProposeConfig [GOOD] >> TKesusTest::TestRegisterProxyLinkFailureRace [GOOD] >> TKesusTest::TestDescribeSemaphoreWatches [GOOD] >> TKesusTest::TestGetQuoterResourceCounters >> TPartitionTests::ConflictingCommitFails [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TStoragePoolsQuotasTest::DisableWritesToDatabase-IsExternalSubdomain-false [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:59:07.594433Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:59:07.594509Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:07.594545Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:59:07.594579Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:59:07.594618Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:59:07.594644Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:59:07.594696Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:07.594778Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:59:07.595422Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:59:07.595708Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:59:07.660532Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:59:07.660582Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:59:07.673673Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:59:07.674057Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:59:07.674231Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:59:07.679742Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:59:07.679982Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:59:07.680656Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:07.680891Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:59:07.683729Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:07.683891Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:59:07.684837Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:07.684886Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:07.685028Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:59:07.685067Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:59:07.685101Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:59:07.685180Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:59:07.690177Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:59:07.796628Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:59:07.796826Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:07.796990Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:59:07.797022Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:59:07.797222Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:59:07.797289Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:59:07.798794Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:07.798948Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:59:07.799067Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:07.799099Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:59:07.799133Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:59:07.799158Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:59:07.800487Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:07.800520Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:59:07.800544Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:59:07.801724Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:07.801763Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:07.801801Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:07.801851Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:59:07.804096Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:59:07.805212Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:59:07.805348Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:59:07.805974Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:07.806075Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:07.806108Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:07.806292Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:59:07.806331Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:07.806442Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:59:07.806491Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:59:07.807862Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:07.807892Z node 1 :FLAT_TX_SCHEMESHARD D ... ransactionResult> complete, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-25T14:59:12.301749Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-25T14:59:12.301973Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:12.302042Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 103, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-25T14:59:12.302195Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 103, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-25T14:59:12.302314Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:12.302355Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 103, path id: 2 2025-06-25T14:59:12.302402Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 103, path id: 3 2025-06-25T14:59:12.302467Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-25T14:59:12.302506Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1086: NTableState::TProposedWaitParts operationId# 103:0 ProgressState at tablet: 72057594046678944 2025-06-25T14:59:12.302571Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:707: all shard schema changes has been received, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-25T14:59:12.302679Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:719: send schema changes ack message, operation: 103:0, datashard: 72075186233409548, at schemeshard: 72057594046678944 2025-06-25T14:59:12.302715Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 103:0 129 -> 240 2025-06-25T14:59:12.303753Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 8 PathOwnerId: 72057594046678944, cookie: 103 2025-06-25T14:59:12.303846Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 8 PathOwnerId: 72057594046678944, cookie: 103 2025-06-25T14:59:12.303877Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 103 2025-06-25T14:59:12.303913Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 8 2025-06-25T14:59:12.303968Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-06-25T14:59:12.304908Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 103 2025-06-25T14:59:12.304965Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 103 2025-06-25T14:59:12.304992Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 103 2025-06-25T14:59:12.305017Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 18446744073709551615 2025-06-25T14:59:12.305043Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-06-25T14:59:12.305087Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 103, ready parts: 0/1, is published: true 2025-06-25T14:59:12.308244Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-25T14:59:12.308329Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_table.cpp:414: TDropTable TProposedDeletePart operationId: 103:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:59:12.308803Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove table for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-25T14:59:12.309016Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#103:0 progress is 1/1 2025-06-25T14:59:12.309060Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-25T14:59:12.309102Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#103:0 progress is 1/1 2025-06-25T14:59:12.309153Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-25T14:59:12.309190Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 103, ready parts: 1/1, is published: true 2025-06-25T14:59:12.309275Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1656: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:410:2374] message: TxId: 103 2025-06-25T14:59:12.309330Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-25T14:59:12.309394Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 103:0 2025-06-25T14:59:12.309432Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 103:0 2025-06-25T14:59:12.309551Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-25T14:59:12.310193Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:12.310232Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 0, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-25T14:59:12.311325Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-25T14:59:12.311749Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-25T14:59:12.313329Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:12.313382Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 0, path id: 2 2025-06-25T14:59:12.313470Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-06-25T14:59:12.313510Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [1:661:2591] 2025-06-25T14:59:12.314262Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 9 PathOwnerId: 72057594046678944, cookie: 0 TestWaitNotification: OK eventTxId 103 2025-06-25T14:59:12.315278Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/SomeDatabase" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:59:12.315540Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/SomeDatabase" took 237us result status StatusSuccess 2025-06-25T14:59:12.316072Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/SomeDatabase" PathDescription { Self { Name: "SomeDatabase" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 9 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 9 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SubDomainStateVersion: 2 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "unquoted_storage_pool" Kind: "unquoted_storage_pool_kind" } StoragePools { Name: "quoted_storage_pool" Kind: "quoted_storage_pool_kind" } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } StoragePoolsUsage { PoolKind: "unquoted_storage_pool_kind" TotalSize: 0 DataSize: 0 IndexSize: 0 } StoragePoolsUsage { PoolKind: "quoted_storage_pool_kind" TotalSize: 0 DataSize: 0 IndexSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 DatabaseQuotas { storage_quotas { unit_kind: "quoted_storage_pool_kind" data_size_hard_quota: 1 } } SecurityState { } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TKesusTest::TestAttachFastPathBlocked [GOOD] >> TPartitionTests::ConflictingCommitProccesAfterRollback ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kesus/tablet/ut/unittest >> TKesusTest::TestSemaphoreSessionFailures [GOOD] Test command err: 2025-06-25T14:59:10.243264Z node 1 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-25T14:59:10.243399Z node 1 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-25T14:59:10.263262Z node 1 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-25T14:59:10.263381Z node 1 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-25T14:59:10.289054Z node 1 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-25T14:59:10.289547Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[1:136:2160], cookie=15768445443914950974, session=0, seqNo=0) 2025-06-25T14:59:10.289684Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 1 2025-06-25T14:59:10.301965Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[1:136:2160], cookie=15768445443914950974, session=1) 2025-06-25T14:59:10.302280Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[1:136:2160], cookie=13751686660624320874, session=0, seqNo=0) 2025-06-25T14:59:10.302372Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 2 2025-06-25T14:59:10.314011Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[1:136:2160], cookie=13751686660624320874, session=2) 2025-06-25T14:59:10.314344Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_release.cpp:37: [72057594037927937] TTxSemaphoreRelease::Execute (sender=[1:136:2160], cookie=111, name="Lock1") 2025-06-25T14:59:10.325907Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_release.cpp:93: [72057594037927937] TTxSemaphoreRelease::Complete (sender=[1:136:2160], cookie=111) 2025-06-25T14:59:10.326139Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[1:136:2160], cookie=222, session=1, semaphore="Lock1" count=18446744073709551615) 2025-06-25T14:59:10.326249Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:126: [72057594037927937] Created new ephemeral semaphore 1 "Lock1" 2025-06-25T14:59:10.326313Z node 1 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 1 "Lock1" queue: next order #1 session 1 2025-06-25T14:59:10.338178Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[1:136:2160], cookie=222) 2025-06-25T14:59:10.338460Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_release.cpp:37: [72057594037927937] TTxSemaphoreRelease::Execute (sender=[1:136:2160], cookie=333, name="Lock1") 2025-06-25T14:59:10.350477Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_release.cpp:93: [72057594037927937] TTxSemaphoreRelease::Complete (sender=[1:136:2160], cookie=333) 2025-06-25T14:59:10.776578Z node 2 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-25T14:59:10.776682Z node 2 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-25T14:59:10.793914Z node 2 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-25T14:59:10.794431Z node 2 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-25T14:59:10.817707Z node 2 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-25T14:59:10.818079Z node 2 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[2:136:2160], cookie=14973766454636825628, session=0, seqNo=0) 2025-06-25T14:59:10.818196Z node 2 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 1 2025-06-25T14:59:10.829936Z node 2 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[2:136:2160], cookie=14973766454636825628, session=1) 2025-06-25T14:59:10.830185Z node 2 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[2:136:2160], cookie=15556961108135123497, session=0, seqNo=0) 2025-06-25T14:59:10.830296Z node 2 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 2 2025-06-25T14:59:10.842041Z node 2 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[2:136:2160], cookie=15556961108135123497, session=2) 2025-06-25T14:59:10.842520Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:32: [72057594037927937] TTxSemaphoreCreate::Execute (sender=[2:147:2169], cookie=16627109261153143390, name="Sem1", limit=1) 2025-06-25T14:59:10.842633Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:104: [72057594037927937] Created new semaphore 1 "Sem1" 2025-06-25T14:59:10.854612Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:112: [72057594037927937] TTxSemaphoreCreate::Complete (sender=[2:147:2169], cookie=16627109261153143390) 2025-06-25T14:59:10.854916Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[2:136:2160], cookie=111, session=1, semaphore="Sem1" count=1) 2025-06-25T14:59:10.855047Z node 2 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 1 "Sem1" queue: next order #1 session 1 2025-06-25T14:59:10.855208Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[2:136:2160], cookie=222, session=2, semaphore="Sem1" count=1) 2025-06-25T14:59:10.867225Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[2:136:2160], cookie=111) 2025-06-25T14:59:10.867326Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[2:136:2160], cookie=222) 2025-06-25T14:59:10.867849Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[2:155:2177], cookie=11979943197467418853, name="Sem1") 2025-06-25T14:59:10.867933Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[2:155:2177], cookie=11979943197467418853) 2025-06-25T14:59:10.868366Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[2:158:2180], cookie=13782960261488359994, name="Sem1") 2025-06-25T14:59:10.868445Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[2:158:2180], cookie=13782960261488359994) 2025-06-25T14:59:10.868693Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_release.cpp:37: [72057594037927937] TTxSemaphoreRelease::Execute (sender=[2:136:2160], cookie=333, name="Sem1") 2025-06-25T14:59:10.868786Z node 2 :KESUS_TABLET DEBUG: tablet_db.cpp:124: [72057594037927937] Deleting session 2 / semaphore 1 "Sem1" waiter link 2025-06-25T14:59:10.880800Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_release.cpp:93: [72057594037927937] TTxSemaphoreRelease::Complete (sender=[2:136:2160], cookie=333) 2025-06-25T14:59:10.881420Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[2:163:2185], cookie=333220844709353744, name="Sem1") 2025-06-25T14:59:10.881510Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[2:163:2185], cookie=333220844709353744) 2025-06-25T14:59:10.881975Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[2:166:2188], cookie=4623436201125788326, name="Sem1") 2025-06-25T14:59:10.882045Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[2:166:2188], cookie=4623436201125788326) 2025-06-25T14:59:10.882314Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_release.cpp:37: [72057594037927937] TTxSemaphoreRelease::Execute (sender=[2:136:2160], cookie=444, name="Sem1") 2025-06-25T14:59:10.882413Z node 2 :KESUS_TABLET DEBUG: tablet_db.cpp:98: [72057594037927937] Deleting session 1 / semaphore 1 "Sem1" owner link 2025-06-25T14:59:10.894420Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_release.cpp:93: [72057594037927937] TTxSemaphoreRelease::Complete (sender=[2:136:2160], cookie=444) 2025-06-25T14:59:10.895057Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[2:171:2193], cookie=4380255412101703000, name="Sem1") 2025-06-25T14:59:10.895154Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[2:171:2193], cookie=4380255412101703000) 2025-06-25T14:59:10.895759Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[2:174:2196], cookie=3787773399699095350, name="Sem1") 2025-06-25T14:59:10.895835Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[2:174:2196], cookie=3787773399699095350) 2025-06-25T14:59:11.188642Z node 3 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-25T14:59:11.188748Z node 3 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-25T14:59:11.204759Z node 3 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-25T14:59:11.205352Z node 3 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-25T14:59:11.229366Z node 3 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-25T14:59:11.229746Z node 3 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:32: [72057594037927937] TTxSemaphoreCreate::Execute (sender=[3:136:2160], cookie=988677993860825375, name="Sem1", limit=1) 2025-06-25T14:59:11.229917Z node 3 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:104: [72057594037927937] Created new semaphore 1 "Sem1" 2025-06-25T14:59:11.241964Z node 3 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:112: [72057594037927937] TTxSemaphoreCreate::Complete (sender=[3:136:2160], cookie=988677993860825375) 2025-06-25T14:59:11.242536Z node 3 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:32: [72057594037927937] TTxSemaphoreCreate::Execute (sender=[3:145:2167], cookie=10866620665306524992, name="Sem2", limit=1) 2025-06-25T14:59:11.242686Z node 3 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:104: [72057594037927937] Created new semaphore 2 "Sem2" 2025-06-25T14:59:11.254783Z node 3 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:112: [72057594037927937] TTxSemaphoreCreate::Complete (sender=[3:145:2167], cookie=10866620665306524992) 2025-06-25T14:59:11.255340Z node 3 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[3:150:2172], cookie=11040048882418067506, name="Sem1") 2025-06-25T14:59:11.255430Z node 3 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[3:150:2172], cookie=11040048882418067506) 2025-06-25T14:59:11.255872Z node 3 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[3:153:2175], cookie=11388796366110614004, name="Sem2") 2025-06-25T14:59:11.255940Z node 3 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[3:153:2175], cookie=11388796366110614004) 2025-06-25T14:59:11.268137Z node 3 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-25T14:59:11.268255Z node 3 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema: ... TTxSemaphoreCreate::Complete (sender=[4:246:2267], cookie=4505176411237038146) 2025-06-25T14:59:12.112132Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[4:134:2158], cookie=111, session=1, semaphore="Sem1" count=1) 2025-06-25T14:59:12.112242Z node 4 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 11 "Sem1" queue: next order #1 session 1 2025-06-25T14:59:12.124156Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[4:134:2158], cookie=111) 2025-06-25T14:59:12.124717Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[4:134:2158], cookie=222, session=2, semaphore="Sem1" count=1) 2025-06-25T14:59:12.136842Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[4:134:2158], cookie=222) 2025-06-25T14:59:12.137455Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_release.cpp:37: [72057594037927937] TTxSemaphoreRelease::Execute (sender=[4:134:2158], cookie=333, name="Sem1") 2025-06-25T14:59:12.137581Z node 4 :KESUS_TABLET DEBUG: tablet_db.cpp:124: [72057594037927937] Deleting session 2 / semaphore 11 "Sem1" waiter link 2025-06-25T14:59:12.159551Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_release.cpp:93: [72057594037927937] TTxSemaphoreRelease::Complete (sender=[4:134:2158], cookie=333) 2025-06-25T14:59:12.160037Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[4:134:2158], cookie=444, session=2, semaphore="Sem1" count=1) 2025-06-25T14:59:12.171963Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[4:134:2158], cookie=444) 2025-06-25T14:59:12.172491Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_release.cpp:37: [72057594037927937] TTxSemaphoreRelease::Execute (sender=[4:134:2158], cookie=555, name="Sem1") 2025-06-25T14:59:12.172584Z node 4 :KESUS_TABLET DEBUG: tablet_db.cpp:98: [72057594037927937] Deleting session 1 / semaphore 11 "Sem1" owner link 2025-06-25T14:59:12.172654Z node 4 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 11 "Sem1" queue: next order #3 session 2 2025-06-25T14:59:12.184365Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_release.cpp:93: [72057594037927937] TTxSemaphoreRelease::Complete (sender=[4:134:2158], cookie=555) 2025-06-25T14:59:12.599194Z node 5 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-25T14:59:12.599274Z node 5 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-25T14:59:12.613832Z node 5 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-25T14:59:12.613963Z node 5 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-25T14:59:12.637594Z node 5 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-25T14:59:12.638107Z node 5 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[5:136:2160], cookie=6087319967839065647, session=0, seqNo=0) 2025-06-25T14:59:12.638279Z node 5 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 1 2025-06-25T14:59:12.651323Z node 5 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[5:136:2160], cookie=6087319967839065647, session=1) 2025-06-25T14:59:12.651592Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:32: [72057594037927937] TTxSemaphoreCreate::Execute (sender=[5:136:2160], cookie=112, name="Sem1", limit=5) 2025-06-25T14:59:12.651710Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:104: [72057594037927937] Created new semaphore 1 "Sem1" 2025-06-25T14:59:12.664171Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:112: [72057594037927937] TTxSemaphoreCreate::Complete (sender=[5:136:2160], cookie=112) 2025-06-25T14:59:12.664455Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_update.cpp:28: [72057594037927937] TTxSemaphoreUpdate::Execute (sender=[5:136:2160], cookie=113, name="Sem1") 2025-06-25T14:59:12.676788Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_update.cpp:84: [72057594037927937] TTxSemaphoreUpdate::Complete (sender=[5:136:2160], cookie=113) 2025-06-25T14:59:12.677116Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_delete.cpp:28: [72057594037927937] TTxSemaphoreDelete::Execute (sender=[5:136:2160], cookie=114, name="Sem1", force=0) 2025-06-25T14:59:12.677208Z node 5 :KESUS_TABLET DEBUG: tablet_db.cpp:58: [72057594037927937] Deleting semaphore 1 "Sem1" 2025-06-25T14:59:12.689242Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_delete.cpp:95: [72057594037927937] TTxSemaphoreDelete::Complete (sender=[5:136:2160], cookie=114) 2025-06-25T14:59:12.689515Z node 5 :KESUS_TABLET DEBUG: tx_session_detach.cpp:100: [72057594037927937] Fast-path detach session=1 from sender=[5:136:2160], cookie=16730803564224034105 2025-06-25T14:59:12.689726Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:32: [72057594037927937] TTxSemaphoreCreate::Execute (sender=[5:136:2160], cookie=115, name="Sem1", limit=5) 2025-06-25T14:59:12.701696Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:112: [72057594037927937] TTxSemaphoreCreate::Complete (sender=[5:136:2160], cookie=115) 2025-06-25T14:59:12.702062Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_update.cpp:28: [72057594037927937] TTxSemaphoreUpdate::Execute (sender=[5:136:2160], cookie=116, name="Sem1") 2025-06-25T14:59:12.714272Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_update.cpp:84: [72057594037927937] TTxSemaphoreUpdate::Complete (sender=[5:136:2160], cookie=116) 2025-06-25T14:59:12.714645Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_delete.cpp:28: [72057594037927937] TTxSemaphoreDelete::Execute (sender=[5:136:2160], cookie=117, name="Sem1", force=0) 2025-06-25T14:59:12.727801Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_delete.cpp:95: [72057594037927937] TTxSemaphoreDelete::Complete (sender=[5:136:2160], cookie=117) 2025-06-25T14:59:12.728198Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[5:136:2160], cookie=118, session=1, semaphore="Sem1" count=1) 2025-06-25T14:59:12.740859Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[5:136:2160], cookie=118) 2025-06-25T14:59:12.741224Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_release.cpp:37: [72057594037927937] TTxSemaphoreRelease::Execute (sender=[5:136:2160], cookie=119, name="Sem1") 2025-06-25T14:59:12.753334Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_release.cpp:93: [72057594037927937] TTxSemaphoreRelease::Complete (sender=[5:136:2160], cookie=119) 2025-06-25T14:59:12.753678Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[5:136:2160], cookie=120, name="Sem1") 2025-06-25T14:59:12.753775Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[5:136:2160], cookie=120) 2025-06-25T14:59:12.753956Z node 5 :KESUS_TABLET DEBUG: tx_session_destroy.cpp:37: [72057594037927937] TTxSessionDestroy::Execute (sender=[5:136:2160], cookie=5317261359481688416, session=1) 2025-06-25T14:59:12.754032Z node 5 :KESUS_TABLET DEBUG: tablet_db.cpp:32: [72057594037927937] Deleting session 1 2025-06-25T14:59:12.766211Z node 5 :KESUS_TABLET DEBUG: tx_session_destroy.cpp:75: [72057594037927937] TTxSessionDestroy::Complete (sender=[5:136:2160], cookie=5317261359481688416) 2025-06-25T14:59:12.766575Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:32: [72057594037927937] TTxSemaphoreCreate::Execute (sender=[5:136:2160], cookie=121, name="Sem1", limit=5) 2025-06-25T14:59:12.781416Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:112: [72057594037927937] TTxSemaphoreCreate::Complete (sender=[5:136:2160], cookie=121) 2025-06-25T14:59:12.781879Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_update.cpp:28: [72057594037927937] TTxSemaphoreUpdate::Execute (sender=[5:136:2160], cookie=122, name="Sem1") 2025-06-25T14:59:12.794694Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_update.cpp:84: [72057594037927937] TTxSemaphoreUpdate::Complete (sender=[5:136:2160], cookie=122) 2025-06-25T14:59:12.795037Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_delete.cpp:28: [72057594037927937] TTxSemaphoreDelete::Execute (sender=[5:136:2160], cookie=123, name="Sem1", force=0) 2025-06-25T14:59:12.808621Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_delete.cpp:95: [72057594037927937] TTxSemaphoreDelete::Complete (sender=[5:136:2160], cookie=123) 2025-06-25T14:59:12.808969Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[5:136:2160], cookie=124, session=1, semaphore="Sem1" count=1) 2025-06-25T14:59:12.821102Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[5:136:2160], cookie=124) 2025-06-25T14:59:12.821448Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_release.cpp:37: [72057594037927937] TTxSemaphoreRelease::Execute (sender=[5:136:2160], cookie=125, name="Sem1") 2025-06-25T14:59:12.844964Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_release.cpp:93: [72057594037927937] TTxSemaphoreRelease::Complete (sender=[5:136:2160], cookie=125) 2025-06-25T14:59:12.845347Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[5:136:2160], cookie=126, name="Sem1") 2025-06-25T14:59:12.845441Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[5:136:2160], cookie=126) 2025-06-25T14:59:12.846077Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:32: [72057594037927937] TTxSemaphoreCreate::Execute (sender=[5:136:2160], cookie=127, name="Sem1", limit=5) 2025-06-25T14:59:12.846156Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:112: [72057594037927937] TTxSemaphoreCreate::Complete (sender=[5:136:2160], cookie=127) 2025-06-25T14:59:12.846413Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_update.cpp:28: [72057594037927937] TTxSemaphoreUpdate::Execute (sender=[5:136:2160], cookie=128, name="Sem1") 2025-06-25T14:59:12.846479Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_update.cpp:84: [72057594037927937] TTxSemaphoreUpdate::Complete (sender=[5:136:2160], cookie=128) 2025-06-25T14:59:12.846697Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_delete.cpp:28: [72057594037927937] TTxSemaphoreDelete::Execute (sender=[5:136:2160], cookie=129, name="Sem1", force=0) 2025-06-25T14:59:12.846761Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_delete.cpp:95: [72057594037927937] TTxSemaphoreDelete::Complete (sender=[5:136:2160], cookie=129) 2025-06-25T14:59:12.846992Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[5:136:2160], cookie=130, session=1, semaphore="Sem1" count=1) 2025-06-25T14:59:12.847060Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[5:136:2160], cookie=130) 2025-06-25T14:59:12.847255Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_release.cpp:37: [72057594037927937] TTxSemaphoreRelease::Execute (sender=[5:136:2160], cookie=131, name="Sem1") 2025-06-25T14:59:12.847315Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_release.cpp:93: [72057594037927937] TTxSemaphoreRelease::Complete (sender=[5:136:2160], cookie=131) 2025-06-25T14:59:12.847529Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[5:136:2160], cookie=132, name="Sem1") 2025-06-25T14:59:12.847602Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[5:136:2160], cookie=132) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kesus/tablet/ut/unittest >> TKesusTest::TestRegisterProxyLinkFailureRace [GOOD] Test command err: 2025-06-25T14:59:10.243184Z node 1 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-25T14:59:10.243298Z node 1 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-25T14:59:10.272415Z node 1 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-25T14:59:10.272554Z node 1 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-25T14:59:10.296870Z node 1 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-25T14:59:10.741565Z node 2 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-25T14:59:10.741655Z node 2 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-25T14:59:10.759359Z node 2 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-25T14:59:10.759848Z node 2 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-25T14:59:10.783599Z node 2 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-25T14:59:11.148755Z node 3 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-25T14:59:11.148823Z node 3 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-25T14:59:11.162602Z node 3 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-25T14:59:11.163033Z node 3 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-25T14:59:11.185946Z node 3 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-25T14:59:11.550478Z node 4 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-25T14:59:11.550576Z node 4 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-25T14:59:11.567744Z node 4 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-25T14:59:11.567975Z node 4 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-25T14:59:11.592479Z node 4 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-25T14:59:11.594541Z node 4 :PIPE_SERVER ERROR: tablet_pipe_server.cpp:228: [72057594037927937] NodeDisconnected NodeId# 5 2025-06-25T14:59:11.595019Z node 4 :KESUS_TABLET TRACE: quoter_runtime.cpp:318: Got TEvServerDisconnected([4:194:2161]) 2025-06-25T14:59:12.143573Z node 6 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-25T14:59:12.143652Z node 6 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-25T14:59:12.156682Z node 6 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-25T14:59:12.157389Z node 6 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute ... waiting for register request 2025-06-25T14:59:12.181469Z node 6 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete ... blocking NKikimr::NKesus::TEvKesus::TEvRegisterProxy from TEST_ACTOR_RUNTIME to KESUS_TABLET_ACTOR cookie 5953666351740882900 ... waiting for register request (done) ... unblocking NKikimr::NKesus::TEvKesus::TEvRegisterProxy from TEST_ACTOR_RUNTIME to KESUS_TABLET_ACTOR 2025-06-25T14:59:12.182104Z node 6 :PIPE_SERVER ERROR: tablet_pipe_server.cpp:228: [72057594037927937] NodeDisconnected NodeId# 7 2025-06-25T14:59:12.182499Z node 6 :KESUS_TABLET TRACE: quoter_runtime.cpp:318: Got TEvServerDisconnected([6:194:2161]) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kesus/tablet/ut/unittest >> TKesusTest::TestAttachFastPathBlocked [GOOD] Test command err: 2025-06-25T14:59:11.437507Z node 1 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-25T14:59:11.437617Z node 1 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-25T14:59:11.455858Z node 1 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-25T14:59:11.456016Z node 1 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-25T14:59:11.480329Z node 1 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-25T14:59:11.480858Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[1:136:2160], cookie=15423669696932716228, session=0, seqNo=0) 2025-06-25T14:59:11.481018Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 1 2025-06-25T14:59:11.496129Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[1:136:2160], cookie=15423669696932716228, session=1) 2025-06-25T14:59:11.496505Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[1:136:2160], cookie=812566429596757341, session=0, seqNo=0) 2025-06-25T14:59:11.496626Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 2 2025-06-25T14:59:11.508597Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[1:136:2160], cookie=812566429596757341, session=2) 2025-06-25T14:59:11.811954Z node 2 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-25T14:59:11.812050Z node 2 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-25T14:59:11.825786Z node 2 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-25T14:59:11.826335Z node 2 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-25T14:59:11.850004Z node 2 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-25T14:59:11.850461Z node 2 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[2:136:2160], cookie=13803924928608851057, session=1, seqNo=0) 2025-06-25T14:59:11.862153Z node 2 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[2:136:2160], cookie=13803924928608851057, session=1) 2025-06-25T14:59:12.206332Z node 3 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-25T14:59:12.206417Z node 3 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-25T14:59:12.220290Z node 3 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-25T14:59:12.220854Z node 3 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-25T14:59:12.244625Z node 3 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-25T14:59:12.245456Z node 3 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[3:136:2160], cookie=13268604181507153338, session=0, seqNo=0) 2025-06-25T14:59:12.245607Z node 3 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 1 2025-06-25T14:59:12.257622Z node 3 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[3:136:2160], cookie=13268604181507153338, session=1) 2025-06-25T14:59:12.592814Z node 4 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-25T14:59:12.592920Z node 4 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-25T14:59:12.607016Z node 4 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-25T14:59:12.607154Z node 4 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-25T14:59:12.624698Z node 4 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-25T14:59:12.624987Z node 4 :KESUS_TABLET DEBUG: tx_config_set.cpp:28: [72057594037927937] TTxConfigSet::Execute (sender=[4:134:2158], cookie=11357811336986013647, path="") 2025-06-25T14:59:12.649683Z node 4 :KESUS_TABLET DEBUG: tx_config_set.cpp:94: [72057594037927937] TTxConfigSet::Complete (sender=[4:134:2158], cookie=11357811336986013647, status=SUCCESS) 2025-06-25T14:59:12.650425Z node 4 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[4:143:2165], cookie=13272234089296574165, session=0, seqNo=0) 2025-06-25T14:59:12.650523Z node 4 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 1 2025-06-25T14:59:12.663415Z node 4 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[4:143:2165], cookie=13272234089296574165, session=1) 2025-06-25T14:59:12.664198Z node 4 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[4:144:2166], cookie=111, session=0, seqNo=0) 2025-06-25T14:59:12.664345Z node 4 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 2 2025-06-25T14:59:12.664532Z node 4 :KESUS_TABLET DEBUG: tx_session_attach.cpp:262: [72057594037927937] Fast-path attach session=1 to sender=[4:144:2166], cookie=222, seqNo=0 2025-06-25T14:59:12.678600Z node 4 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[4:144:2166], cookie=111, session=2) 2025-06-25T14:59:13.042878Z node 5 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-25T14:59:13.042979Z node 5 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-25T14:59:13.063406Z node 5 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-25T14:59:13.063542Z node 5 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-25T14:59:13.087250Z node 5 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-25T14:59:13.087578Z node 5 :KESUS_TABLET DEBUG: tx_config_set.cpp:28: [72057594037927937] TTxConfigSet::Execute (sender=[5:136:2160], cookie=14700296016755554897, path="") 2025-06-25T14:59:13.099609Z node 5 :KESUS_TABLET DEBUG: tx_config_set.cpp:94: [72057594037927937] TTxConfigSet::Complete (sender=[5:136:2160], cookie=14700296016755554897, status=SUCCESS) 2025-06-25T14:59:13.100638Z node 5 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[5:145:2167], cookie=13235501018442131006, session=0, seqNo=0) 2025-06-25T14:59:13.100767Z node 5 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 1 2025-06-25T14:59:13.113088Z node 5 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[5:145:2167], cookie=13235501018442131006, session=1) 2025-06-25T14:59:13.113885Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[5:145:2167], cookie=123, session=1, semaphore="Lock1" count=18446744073709551615) 2025-06-25T14:59:13.114044Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:126: [72057594037927937] Created new ephemeral semaphore 1 "Lock1" 2025-06-25T14:59:13.114141Z node 5 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 1 "Lock1" queue: next order #1 session 1 2025-06-25T14:59:13.115394Z node 5 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[5:146:2168], cookie=111, session=0, seqNo=0) 2025-06-25T14:59:13.115484Z node 5 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 2 2025-06-25T14:59:13.115621Z node 5 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[5:146:2168], cookie=222, session=1, seqNo=0) 2025-06-25T14:59:13.128780Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[5:145:2167], cookie=123) 2025-06-25T14:59:13.128869Z node 5 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[5:146:2168], cookie=111, session=2) 2025-06-25T14:59:13.128919Z node 5 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[5:146:2168], cookie=222, session=1) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/unittest >> TPartitionTests::TestBatchingWithProposeConfig [GOOD] Test command err: 2025-06-25T14:58:57.429139Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:58:57.429231Z node 1 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-25T14:58:57.446694Z node 1 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 3, State: StateInit] bootstrapping 3 [1:181:2194] 2025-06-25T14:58:57.449215Z node 1 :PERSQUEUE INFO: partition_init.cpp:911: [Root/PQ/rt3.dc1--account--topic:3:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp from keys completed. Value 2025-06-25T14:58:57.000000Z 2025-06-25T14:58:57.449291Z node 1 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 3, State: StateInit] init complete for topic 'Root/PQ/rt3.dc1--account--topic' partition 3 generation 0 [1:181:2194] Got cmd write: CmdDeleteRange { Range { From: "m0000000003cclient-1" IncludeFrom: true To: "m0000000003cclient-1" IncludeTo: true } } CmdDeleteRange { Range { From: "m0000000003uclient-1" IncludeFrom: true To: "m0000000003uclient-1" IncludeTo: true } } CmdWrite { Key: "i0000000003" Value: "\030\000(\350\306\345\274\3722" StorageChannel: INLINE } CmdWrite { Key: "m0000000003cclient-2" Value: "\010\000\020\000\030\000\"\000(\0000\000@\000" StorageChannel: INLINE } CmdWrite { Key: "m0000000003uclient-2" Value: "\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000" StorageChannel: INLINE } CmdWrite { Key: "_config_3" Value: "\022\t\030\200\243\0058\200\200\200\005\030\000\"\027rt3.dc1--account--topic(\0020\001\272\001 /Root/PQ/rt3.dc1--account--topic\352\001\000\372\001\002\010\000\212\002\007account\220\002\001\242\002\002\010\000\252\002\016\n\010client-2@\000H\000" StorageChannel: INLINE } 2025-06-25T14:58:58.226933Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:58:58.227019Z node 2 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-25T14:58:58.243504Z node 2 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: {0, {0, 1111}, 123}, State: StateInit] bootstrapping {0, {0, 1111}, 123} [2:183:2196] 2025-06-25T14:58:58.247643Z node 2 :PERSQUEUE INFO: partition_init.cpp:911: [rt3.dc1--account--topic:{0, {0, 1111}, 123}:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp from keys completed. Value 2025-06-25T14:58:58.000000Z 2025-06-25T14:58:58.247722Z node 2 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: {0, {0, 1111}, 123}, State: StateInit] init complete for topic 'rt3.dc1--account--topic' partition {0, {0, 1111}, 123} generation 0 [2:183:2196] 2025-06-25T14:58:59.013697Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:58:59.013755Z node 3 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-25T14:58:59.025929Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitConfigStep Got KV request 2025-06-25T14:58:59.026194Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-25T14:58:59.026462Z node 3 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [3:182:2195] 2025-06-25T14:58:59.027485Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitDiskStatusStep Got KV request 2025-06-25T14:58:59.027659Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitMetaStep Got KV request 2025-06-25T14:58:59.027831Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitInfoRangeStep Got KV request 2025-06-25T14:58:59.028035Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitDataRangeStep Got KV request 2025-06-25T14:58:59.028289Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:524: key[0]: d0000000000_00000000000000000000_00000_0000000050_00000 2025-06-25T14:58:59.028497Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:624: [Root/PQ/rt3.dc1--account--topic:0:TInitDataRangeStep] Got data offset 0 count 50 size 684 so 0 eo 50 d0000000000_00000000000000000000_00000_0000000050_00000 2025-06-25T14:58:59.028632Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitDataStep 2025-06-25T14:58:59.028681Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitEndWriteTimestampStep 2025-06-25T14:58:59.028733Z node 3 :PERSQUEUE INFO: partition_init.cpp:911: [Root/PQ/rt3.dc1--account--topic:0:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp from keys completed. Value 2025-06-25T14:58:59.000000Z 2025-06-25T14:58:59.028775Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:55: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Initializing completed. 2025-06-25T14:58:59.028821Z node 3 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'Root/PQ/rt3.dc1--account--topic' partition 0 generation 0 [3:182:2195] 2025-06-25T14:58:59.028879Z node 3 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037927937, Partition: 0, State: StateInit] SYNC INIT topic Root/PQ/rt3.dc1--account--topic partitition 0 so 50 endOffset 50 Head Offset 50 PartNo 0 PackedSize 0 count 0 nextOffset 50 batches 0 2025-06-25T14:58:59.028927Z node 3 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Process pending events. Count 0 2025-06-25T14:58:59.029166Z node 3 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-25T14:58:59.372439Z node 3 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie src3|f0217fa5-980478bf-b775c4b1-cb661770_0 generated for partition 0 topic 'Root/PQ/rt3.dc1--account--topic' owner src3 2025-06-25T14:58:59.372607Z node 3 :PERSQUEUE DEBUG: partition_write.cpp:34: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::ReplyOwnerOk. Partition: 0 Got batch complete: 1 2025-06-25T14:58:59.372815Z node 3 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie src4|58cac9eb-d5546304-e6a80f9e-8973f55d_0 generated for partition 0 topic 'Root/PQ/rt3.dc1--account--topic' owner src4 2025-06-25T14:58:59.372869Z node 3 :PERSQUEUE DEBUG: partition_write.cpp:34: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::ReplyOwnerOk. Partition: 0 Got batch complete: 1 2025-06-25T14:59:00.345541Z node 3 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-25T14:59:01.692649Z node 3 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction Create distr tx with id = 0 and act no: 1 Create immediate tx with id = 3 and act no: 4 Create immediate tx with id = 6 and act no: 7 2025-06-25T14:59:01.693052Z node 3 :PERSQUEUE DEBUG: partition.cpp:1170: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCalcPredicate Step 1, TxId 0 Create distr tx with id = 8 and act no: 9 Create distr tx with id = 10 and act no: 11 2025-06-25T14:59:02.180686Z node 3 :PERSQUEUE DEBUG: partition.cpp:1170: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCalcPredicate Step 1, TxId 8 2025-06-25T14:59:02.180800Z node 3 :PERSQUEUE DEBUG: partition.cpp:1170: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCalcPredicate Step 1, TxId 10 2025-06-25T14:59:02.180860Z node 3 :PERSQUEUE DEBUG: partition.cpp:1401: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvGetWriteInfoResponse 2025-06-25T14:59:02.180913Z node 3 :PERSQUEUE DEBUG: partition.cpp:1401: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvGetWriteInfoResponse 2025-06-25T14:59:02.971745Z node 3 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-25T14:59:02.971869Z node 3 :PERSQUEUE DEBUG: partition.cpp:1401: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvGetWriteInfoResponse 2025-06-25T14:59:04.240742Z node 3 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-25T14:59:04.240902Z node 3 :PERSQUEUE DEBUG: partition.cpp:1401: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvGetWriteInfoResponse 2025-06-25T14:59:04.241109Z node 3 :PERSQUEUE DEBUG: partition.cpp:1401: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvGetWriteInfoResponse Got batch complete: 17 Wait batch completion 2025-06-25T14:59:04.241294Z node 3 :PERSQUEUE DEBUG: partition.cpp:1216: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCommit Step 1, TxId 10 Wait kv request 2025-06-25T14:59:04.480834Z node 3 :PERSQUEUE DEBUG: partition.cpp:1216: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCommit Step 1, TxId 0 2025-06-25T14:59:04.480930Z node 3 :PERSQUEUE DEBUG: partition.cpp:2502: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::CommitWriteOperations TxId: 0 2025-06-25T14:59:04.480999Z node 3 :PERSQUEUE DEBUG: partition.cpp:2528: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Head=Offset 50 PartNo 0 PackedSize 0 count 0 nextOffset 50 batches 0, NewHead=Offset 50 PartNo 0 PackedSize 0 count 0 nextOffset 50 batches 0 2025-06-25T14:59:04.481084Z node 3 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::DropOwner. 2025-06-25T14:59:04.481118Z node 3 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::DropOwner. 2025-06-25T14:59:04.481230Z node 3 :PERSQUEUE DEBUG: partition.cpp:2502: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::CommitWriteOperations TxId: (empty maybe) 2025-06-25T14:59:04.481279Z node 3 :PERSQUEUE DEBUG: partition.cpp:2528: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Head=Offset 50 PartNo 0 PackedSize 0 count 0 nextOffset 50 batches 0, NewHead=Offset 50 PartNo 0 PackedSize 0 count 0 nextOffset 50 batches 0 2025-06-25T14:59:04.499425Z node 3 :PERSQUEUE DEBUG: partition.cpp:3403: [PQ: 72057594037927937, Partition: 0, State: StateIdle] schedule TEvPersQueue::TEvProposeTransactionResult(COMPLETE), reason= 2025-06-25T14:59:04.499550Z node 3 :PERSQUEUE DEBUG: partition_write.cpp:1257: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Already written message. Topic: 'Root/PQ/rt3.dc1--account--topic' Partition: 0 SourceId: 'src4'. Message seqNo: 7. Committed seqNo: (NULL). Writing seqNo: 7. EndOffset: 50. CurOffset: 50. Offset: 50 2025-06-25T14:59:04.499650Z node 3 :PERSQUEUE DEBUG: partition_write.cpp:1364: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 part blob processing sourceId 'src4' seqNo 8 partNo 0 2025-06-25T14:59:04.500531Z node 3 :PERSQUEUE DEBUG: partition_write.cpp:1468: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 part blob complete sourceId 'src4' seqNo 8 partNo 0 FormedBlobsCount 0 NewHead: Offset 51 PartNo 0 PackedSize ... 37927937 Status: COMPLETE TxId: 2 Wait batch completion Send disk status response with cookie: 0 2025-06-25T14:59:08.840278Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-06-25T14:59:08.840377Z node 4 :PERSQUEUE DEBUG: partition.cpp:2502: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::CommitWriteOperations TxId: (empty maybe) 2025-06-25T14:59:08.840414Z node 4 :PERSQUEUE DEBUG: partition.cpp:3403: [PQ: 72057594037927937, Partition: 0, State: StateIdle] schedule TEvPersQueue::TEvProposeTransactionResult(COMPLETE), reason= 2025-06-25T14:59:08.840526Z node 4 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction Got batch complete: 1 Got KV request Got KV request Got KV request Got KV request Got KV request Got KV request Got KV request Wait batch completion Send disk status response with cookie: 0 Wait immediate tx complete 3 2025-06-25T14:59:08.871918Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-06-25T14:59:08.872015Z node 4 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction Got propose resutl: Origin: 72057594037927937 Status: COMPLETE TxId: 3 2025-06-25T14:59:09.270551Z node 5 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:59:09.270606Z node 5 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-25T14:59:09.281045Z node 5 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitConfigStep Got KV request 2025-06-25T14:59:09.281250Z node 5 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-25T14:59:09.281447Z node 5 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [5:182:2195] 2025-06-25T14:59:09.282051Z node 5 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitDiskStatusStep Got KV request 2025-06-25T14:59:09.282150Z node 5 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitMetaStep Got KV request 2025-06-25T14:59:09.282259Z node 5 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitInfoRangeStep Got KV request 2025-06-25T14:59:09.282756Z node 5 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitDataRangeStep Got KV request 2025-06-25T14:59:09.282941Z node 5 :PERSQUEUE DEBUG: partition_init.cpp:524: key[0]: d0000000000_00000000000000000000_00000_0000000050_00000 2025-06-25T14:59:09.283077Z node 5 :PERSQUEUE DEBUG: partition_init.cpp:624: [Root/PQ/rt3.dc1--account--topic:0:TInitDataRangeStep] Got data offset 0 count 50 size 684 so 0 eo 50 d0000000000_00000000000000000000_00000_0000000050_00000 2025-06-25T14:59:09.283167Z node 5 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitDataStep 2025-06-25T14:59:09.283194Z node 5 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitEndWriteTimestampStep 2025-06-25T14:59:09.283226Z node 5 :PERSQUEUE INFO: partition_init.cpp:911: [Root/PQ/rt3.dc1--account--topic:0:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp from keys completed. Value 2025-06-25T14:59:09.000000Z 2025-06-25T14:59:09.283253Z node 5 :PERSQUEUE DEBUG: partition_init.cpp:55: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Initializing completed. 2025-06-25T14:59:09.283282Z node 5 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'Root/PQ/rt3.dc1--account--topic' partition 0 generation 0 [5:182:2195] 2025-06-25T14:59:09.283317Z node 5 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037927937, Partition: 0, State: StateInit] SYNC INIT topic Root/PQ/rt3.dc1--account--topic partitition 0 so 50 endOffset 50 Head Offset 50 PartNo 0 PackedSize 0 count 0 nextOffset 50 batches 0 2025-06-25T14:59:09.283355Z node 5 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Process pending events. Count 0 2025-06-25T14:59:09.283403Z node 5 :PERSQUEUE DEBUG: partition_read.cpp:882: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 user client-1 readTimeStamp for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 0 2025-06-25T14:59:09.283443Z node 5 :PERSQUEUE DEBUG: partition_read.cpp:924: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 user client-1 send read request for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 1 rrg 0 2025-06-25T14:59:09.283480Z node 5 :PERSQUEUE DEBUG: partition_read.cpp:882: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 user client-0 readTimeStamp for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 1 rrg 0 2025-06-25T14:59:09.283620Z node 5 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-25T14:59:09.283703Z node 5 :PERSQUEUE DEBUG: partition_read.cpp:839: [PQ: 72057594037927937, Partition: 0, State: StateIdle] read cookie 2 Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 user client-1 offset 0 count 1 size 1024000 endOffset 50 max time lag 0ms effective offset 0 2025-06-25T14:59:09.283838Z node 5 :PERSQUEUE DEBUG: partition_read.cpp:1043: [PQ: 72057594037927937, Partition: 0, State: StateIdle] read cookie 2 added 1 blobs, size 684 count 50 last offset 0, current partition end offset: 50 2025-06-25T14:59:09.283870Z node 5 :PERSQUEUE DEBUG: partition_read.cpp:1069: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Reading cookie 2. Send blob request. 2025-06-25T14:59:10.606036Z node 5 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction Create distr tx with id = 0 and act no: 1 2025-06-25T14:59:10.606321Z node 5 :PERSQUEUE DEBUG: partition.cpp:1170: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCalcPredicate Step 1, TxId 0 2025-06-25T14:59:10.606432Z node 5 :PERSQUEUE DEBUG: partition.cpp:1079: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvProposePartitionConfig Step 1, TxId 3 2025-06-25T14:59:11.876752Z node 5 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction Wait batch completion 2025-06-25T14:59:11.876928Z node 5 :PERSQUEUE DEBUG: partition.cpp:1401: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvGetWriteInfoResponse 2025-06-25T14:59:11.877033Z node 5 :PERSQUEUE DEBUG: partition.cpp:2502: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::CommitWriteOperations TxId: (empty maybe) 2025-06-25T14:59:11.877073Z node 5 :PERSQUEUE DEBUG: partition.cpp:3403: [PQ: 72057594037927937, Partition: 0, State: StateIdle] schedule TEvPersQueue::TEvProposeTransactionResult(COMPLETE), reason= Got KV request Got batch complete: 2 Got KV request Got KV request 2025-06-25T14:59:13.113628Z node 5 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction Send disk status response with cookie: 0 Wait immediate tx complete 2 2025-06-25T14:59:13.113914Z node 5 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-06-25T14:59:13.114061Z node 5 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction Got batch complete: 1 Got propose resutl: Origin: 72057594037927937 Status: COMPLETE TxId: 2 Wait batch completion 2025-06-25T14:59:13.114338Z node 5 :PERSQUEUE DEBUG: partition.cpp:1216: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCommit Step 1, TxId 3 2025-06-25T14:59:13.114480Z node 5 :PERSQUEUE DEBUG: partition.cpp:3297: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 user client-1 drop done Got KV request Send disk status response with cookie: 0 2025-06-25T14:59:13.126209Z node 5 :PERSQUEUE DEBUG: event_helpers.cpp:40: tablet 72057594037927937 topic 'Root/PQ/rt3.dc1--account--topic' partition 0 error: cannot finish read request. Consumer client-1 is gone from partition 2025-06-25T14:59:13.126391Z node 5 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-06-25T14:59:13.126487Z node 5 :PERSQUEUE DEBUG: partition.cpp:2502: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::CommitWriteOperations TxId: (empty maybe) 2025-06-25T14:59:13.126535Z node 5 :PERSQUEUE DEBUG: partition.cpp:3403: [PQ: 72057594037927937, Partition: 0, State: StateIdle] schedule TEvPersQueue::TEvProposeTransactionResult(COMPLETE), reason= 2025-06-25T14:59:13.126698Z node 5 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction Got KV request 2025-06-25T14:59:13.126906Z node 5 :PERSQUEUE DEBUG: partition_read.cpp:882: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 user client-0 readTimeStamp for offset 5 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 0 2025-06-25T14:59:13.126964Z node 5 :PERSQUEUE DEBUG: partition_read.cpp:924: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 user client-0 send read request for offset 5 initiated queuesize 0 startOffset 0 ReadingTimestamp 1 rrg 0 Got KV request Got batch complete: 1 Got KV request Got KV request Got KV request 2025-06-25T14:59:13.127441Z node 5 :PERSQUEUE DEBUG: partition_read.cpp:839: [PQ: 72057594037927937, Partition: 0, State: StateIdle] read cookie 3 Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 user client-0 offset 5 count 1 size 1024000 endOffset 50 max time lag 0ms effective offset 5 2025-06-25T14:59:13.127660Z node 5 :PERSQUEUE DEBUG: partition_read.cpp:1043: [PQ: 72057594037927937, Partition: 0, State: StateIdle] read cookie 3 added 1 blobs, size 0 count 45 last offset 0, current partition end offset: 50 2025-06-25T14:59:13.127711Z node 5 :PERSQUEUE DEBUG: partition_read.cpp:1069: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Reading cookie 3. Send blob request. Got KV request Got KV request Wait batch completion Send disk status response with cookie: 0 Wait immediate tx complete 4 2025-06-25T14:59:13.158881Z node 5 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-06-25T14:59:13.158997Z node 5 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction Got propose resutl: Origin: 72057594037927937 Status: COMPLETE TxId: 4 >> TKesusTest::TestGetQuoterResourceCounters [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kesus/tablet/ut/unittest >> TKesusTest::TestGetQuoterResourceCounters [GOOD] Test command err: 2025-06-25T14:59:10.243184Z node 1 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-25T14:59:10.243319Z node 1 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-25T14:59:10.262599Z node 1 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-25T14:59:10.262714Z node 1 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-25T14:59:10.289218Z node 1 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-25T14:59:10.289536Z node 1 :KESUS_TABLET DEBUG: tx_config_set.cpp:28: [72057594037927937] TTxConfigSet::Execute (sender=[1:136:2160], cookie=3564757409929399205, path="/foo/bar/baz") 2025-06-25T14:59:10.305466Z node 1 :KESUS_TABLET DEBUG: tx_config_set.cpp:94: [72057594037927937] TTxConfigSet::Complete (sender=[1:136:2160], cookie=3564757409929399205, status=SUCCESS) 2025-06-25T14:59:10.305964Z node 1 :KESUS_TABLET DEBUG: tx_config_get.cpp:23: [72057594037927937] TTxConfigGet::Execute (sender=[1:145:2167], cookie=11947199988888158813) 2025-06-25T14:59:10.320865Z node 1 :KESUS_TABLET DEBUG: tx_config_get.cpp:44: [72057594037927937] TTxConfigGet::Complete (sender=[1:145:2167], cookie=11947199988888158813) 2025-06-25T14:59:10.321396Z node 1 :KESUS_TABLET DEBUG: tx_config_set.cpp:28: [72057594037927937] TTxConfigSet::Execute (sender=[1:150:2172], cookie=18034757769509707367, path="/foo/bar/baz") 2025-06-25T14:59:10.333470Z node 1 :KESUS_TABLET DEBUG: tx_config_set.cpp:94: [72057594037927937] TTxConfigSet::Complete (sender=[1:150:2172], cookie=18034757769509707367, status=SUCCESS) 2025-06-25T14:59:10.333974Z node 1 :KESUS_TABLET DEBUG: tx_config_get.cpp:23: [72057594037927937] TTxConfigGet::Execute (sender=[1:155:2177], cookie=13809424924330690580) 2025-06-25T14:59:10.346147Z node 1 :KESUS_TABLET DEBUG: tx_config_get.cpp:44: [72057594037927937] TTxConfigGet::Complete (sender=[1:155:2177], cookie=13809424924330690580) 2025-06-25T14:59:10.359992Z node 1 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-25T14:59:10.360099Z node 1 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-25T14:59:10.360633Z node 1 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-25T14:59:10.361223Z node 1 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-25T14:59:10.378991Z node 1 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-25T14:59:10.379321Z node 1 :KESUS_TABLET DEBUG: tx_config_get.cpp:23: [72057594037927937] TTxConfigGet::Execute (sender=[1:197:2209], cookie=16860401591422224106) 2025-06-25T14:59:10.391601Z node 1 :KESUS_TABLET DEBUG: tx_config_get.cpp:44: [72057594037927937] TTxConfigGet::Complete (sender=[1:197:2209], cookie=16860401591422224106) 2025-06-25T14:59:10.392281Z node 1 :KESUS_TABLET DEBUG: tx_config_set.cpp:28: [72057594037927937] TTxConfigSet::Execute (sender=[1:205:2216], cookie=3021991504992638612, path="/foo/bar/baz") 2025-06-25T14:59:10.404670Z node 1 :KESUS_TABLET DEBUG: tx_config_set.cpp:94: [72057594037927937] TTxConfigSet::Complete (sender=[1:205:2216], cookie=3021991504992638612, status=SUCCESS) 2025-06-25T14:59:10.405309Z node 1 :KESUS_TABLET DEBUG: tx_config_set.cpp:28: [72057594037927937] TTxConfigSet::Execute (sender=[1:210:2221], cookie=4875749265975526691, path="/foo/bar/baz") 2025-06-25T14:59:10.405407Z node 1 :KESUS_TABLET DEBUG: tx_config_set.cpp:94: [72057594037927937] TTxConfigSet::Complete (sender=[1:210:2221], cookie=4875749265975526691, status=PRECONDITION_FAILED) 2025-06-25T14:59:10.723513Z node 2 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-25T14:59:10.723605Z node 2 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-25T14:59:10.738141Z node 2 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-25T14:59:10.738498Z node 2 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-25T14:59:10.761748Z node 2 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-25T14:59:10.762071Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[2:136:2160], cookie=9194531121732345078, name="Lock1") 2025-06-25T14:59:10.762140Z node 2 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[2:136:2160], cookie=9194531121732345078) 2025-06-25T14:59:11.131890Z node 3 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-25T14:59:11.131981Z node 3 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-25T14:59:11.150277Z node 3 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-25T14:59:11.150740Z node 3 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-25T14:59:11.173983Z node 3 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-25T14:59:11.174398Z node 3 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[3:136:2160], cookie=17957519031104767169, session=0, seqNo=0) 2025-06-25T14:59:11.174519Z node 3 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 1 2025-06-25T14:59:11.185783Z node 3 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[3:136:2160], cookie=17957519031104767169, session=1) 2025-06-25T14:59:11.186047Z node 3 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[3:136:2160], cookie=111, session=1, semaphore="Lock1" count=18446744073709551615) 2025-06-25T14:59:11.186170Z node 3 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:126: [72057594037927937] Created new ephemeral semaphore 1 "Lock1" 2025-06-25T14:59:11.186260Z node 3 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 1 "Lock1" queue: next order #1 session 1 2025-06-25T14:59:11.197742Z node 3 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[3:136:2160], cookie=111) 2025-06-25T14:59:11.198112Z node 3 :KESUS_TABLET DEBUG: tx_semaphore_delete.cpp:28: [72057594037927937] TTxSemaphoreDelete::Execute (sender=[3:147:2169], cookie=14609050174302703289, name="Lock1", force=0) 2025-06-25T14:59:11.209770Z node 3 :KESUS_TABLET DEBUG: tx_semaphore_delete.cpp:95: [72057594037927937] TTxSemaphoreDelete::Complete (sender=[3:147:2169], cookie=14609050174302703289) 2025-06-25T14:59:11.210127Z node 3 :KESUS_TABLET DEBUG: tx_semaphore_delete.cpp:28: [72057594037927937] TTxSemaphoreDelete::Execute (sender=[3:152:2174], cookie=5590868872530076595, name="Sem1", force=0) 2025-06-25T14:59:11.221982Z node 3 :KESUS_TABLET DEBUG: tx_semaphore_delete.cpp:95: [72057594037927937] TTxSemaphoreDelete::Complete (sender=[3:152:2174], cookie=5590868872530076595) 2025-06-25T14:59:11.222512Z node 3 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:32: [72057594037927937] TTxSemaphoreCreate::Execute (sender=[3:157:2179], cookie=16648700103203563478, name="Sem1", limit=42) 2025-06-25T14:59:11.222640Z node 3 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:104: [72057594037927937] Created new semaphore 2 "Sem1" 2025-06-25T14:59:11.234654Z node 3 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:112: [72057594037927937] TTxSemaphoreCreate::Complete (sender=[3:157:2179], cookie=16648700103203563478) 2025-06-25T14:59:11.235187Z node 3 :KESUS_TABLET DEBUG: tx_semaphore_delete.cpp:28: [72057594037927937] TTxSemaphoreDelete::Execute (sender=[3:162:2184], cookie=1319160590403044118, name="Sem1", force=0) 2025-06-25T14:59:11.235280Z node 3 :KESUS_TABLET DEBUG: tablet_db.cpp:58: [72057594037927937] Deleting semaphore 2 "Sem1" 2025-06-25T14:59:11.247323Z node 3 :KESUS_TABLET DEBUG: tx_semaphore_delete.cpp:95: [72057594037927937] TTxSemaphoreDelete::Complete (sender=[3:162:2184], cookie=1319160590403044118) 2025-06-25T14:59:11.247863Z node 3 :KESUS_TABLET DEBUG: tx_semaphore_delete.cpp:28: [72057594037927937] TTxSemaphoreDelete::Execute (sender=[3:167:2189], cookie=15742421289873486794, name="Sem1", force=0) 2025-06-25T14:59:11.259886Z node 3 :KESUS_TABLET DEBUG: tx_semaphore_delete.cpp:95: [72057594037927937] TTxSemaphoreDelete::Complete (sender=[3:167:2189], cookie=15742421289873486794) 2025-06-25T14:59:11.519811Z node 4 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-25T14:59:11.519882Z node 4 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-25T14:59:11.532533Z node 4 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-25T14:59:11.532631Z node 4 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-25T14:59:11.546123Z node 4 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-25T14:59:11.546525Z node 4 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[4:134:2158], cookie=15910453362903895092, session=0, seqNo=0) 2025-06-25T14:59:11.546651Z node 4 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 1 2025-06-25T14:59:11.568530Z node 4 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[4:134:2158], cookie=15910453362903895092, session=1) 2025-06-25T14:59:11.568710Z node 4 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[4:134:2158], cookie=9796498362838534510, session=0, seqNo=0) 2025-06-25T14:59:11.568787Z node 4 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 2 2025-06-25T14:59:11.580274Z node 4 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[4:134:2158], cookie=9796498362838534510, session=2) 2025-06-25T14:59:11.580486Z node 4 :KESUS_TABLET DEBUG: tx_session_detach.cpp:100: [72057594037927937] Fast-path detach session=2 from sender=[4:134:2158], cookie=6200686820856726776 2025-06-25T14:59:11.580802Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:32: [72057594037927937] TTxSemaphoreCreate::Execute (sender=[4:146:2168], cookie=536389439238054582, name="Sem1", limit=3) 2025-06-25T14:59:11.580913Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:104: [72057594037927937] Created new semaphore 1 "Sem1" 2025-06-25T14:59:11.592327Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:112: [72057594037927937] TTxSemaphoreCreate::Complete (sender=[4:146:2168], cookie=536389439238054582) 2025-06-25T14:59:11.592539Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[4:134:2158], cookie=112, name="Sem1") 2025-06-25T14:59:11.592590Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[4:134:2158], cookie=112) 2025-06-25T14:59:11.592745Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[4:134:2158], cookie=113, name="Sem1") 2025-06-25T14:59:11.592781Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[4:134:2158], cookie=113) 2025-06-25T14:59:11.592930Z node 4 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[4:134:2158], cookie=15416243443742816252, session=2, seqNo=0) 2025-06-25T14:59:11.604533Z node 4 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TT ... 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:59:13.009672Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:59:13.020368Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[4:134:2158], cookie=129, session=1, semaphore="Sem2" count=2) 2025-06-25T14:59:13.034644Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[4:134:2158], cookie=129) 2025-06-25T14:59:13.035080Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[4:134:2158], cookie=130, name="Sem2") 2025-06-25T14:59:13.035190Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[4:134:2158], cookie=130) 2025-06-25T14:59:13.035492Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[4:134:2158], cookie=131, session=1, semaphore="Sem2" count=1) 2025-06-25T14:59:13.048531Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[4:134:2158], cookie=131) 2025-06-25T14:59:13.049014Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[4:134:2158], cookie=132, name="Sem2") 2025-06-25T14:59:13.049120Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[4:134:2158], cookie=132) 2025-06-25T14:59:13.049442Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[4:134:2158], cookie=133, name="Sem2") 2025-06-25T14:59:13.049539Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[4:134:2158], cookie=133) 2025-06-25T14:59:13.397209Z node 5 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-25T14:59:13.397340Z node 5 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-25T14:59:13.413967Z node 5 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-25T14:59:13.414101Z node 5 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-25T14:59:13.438406Z node 5 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-25T14:59:13.442720Z node 5 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:36: [72057594037927937] TTxQuoterResourceAdd::Execute (sender=[5:136:2160], cookie=9648644762405392261, path="/Root1", config={ MaxUnitsPerSecond: 1000 }) 2025-06-25T14:59:13.442915Z node 5 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:76: [72057594037927937] Created new quoter resource 1 "Root1" 2025-06-25T14:59:13.454969Z node 5 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:85: [72057594037927937] TTxQuoterResourceAdd::Complete (sender=[5:136:2160], cookie=9648644762405392261) 2025-06-25T14:59:13.455568Z node 5 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:36: [72057594037927937] TTxQuoterResourceAdd::Execute (sender=[5:145:2167], cookie=5623989343813793820, path="/Root1/Res", config={ }) 2025-06-25T14:59:13.455797Z node 5 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:76: [72057594037927937] Created new quoter resource 2 "Root1/Res" 2025-06-25T14:59:13.467824Z node 5 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:85: [72057594037927937] TTxQuoterResourceAdd::Complete (sender=[5:145:2167], cookie=5623989343813793820) 2025-06-25T14:59:13.468444Z node 5 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:36: [72057594037927937] TTxQuoterResourceAdd::Execute (sender=[5:150:2172], cookie=17016657386226654064, path="/Root2", config={ MaxUnitsPerSecond: 1000 }) 2025-06-25T14:59:13.468628Z node 5 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:76: [72057594037927937] Created new quoter resource 3 "Root2" 2025-06-25T14:59:13.481015Z node 5 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:85: [72057594037927937] TTxQuoterResourceAdd::Complete (sender=[5:150:2172], cookie=17016657386226654064) 2025-06-25T14:59:13.481651Z node 5 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:36: [72057594037927937] TTxQuoterResourceAdd::Execute (sender=[5:155:2177], cookie=8691255280221219147, path="/Root2/Res", config={ }) 2025-06-25T14:59:13.481872Z node 5 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:76: [72057594037927937] Created new quoter resource 4 "Root2/Res" 2025-06-25T14:59:13.495100Z node 5 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:85: [72057594037927937] TTxQuoterResourceAdd::Complete (sender=[5:155:2177], cookie=8691255280221219147) 2025-06-25T14:59:13.495782Z node 5 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:36: [72057594037927937] TTxQuoterResourceAdd::Execute (sender=[5:160:2182], cookie=16056192297858051477, path="/Root2/Res/Subres", config={ }) 2025-06-25T14:59:13.496029Z node 5 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:76: [72057594037927937] Created new quoter resource 5 "Root2/Res/Subres" 2025-06-25T14:59:13.508625Z node 5 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:85: [72057594037927937] TTxQuoterResourceAdd::Complete (sender=[5:160:2182], cookie=16056192297858051477) 2025-06-25T14:59:13.509900Z node 5 :KESUS_TABLET TRACE: quoter_runtime.cpp:145: [72057594037927937] Send TEvSubscribeOnResourcesResult to [5:165:2187]. Cookie: 10765414009893755078. Data: { Results { ResourceId: 2 Error { Status: SUCCESS } EffectiveProps { ResourceId: 2 ResourcePath: "Root1/Res" HierarchicalDRRResourceConfig { MaxUnitsPerSecond: 1000 MaxBurstSizeCoefficient: 1 Weight: 1 } AccountingConfig { ReportPeriodMs: 5000 AccountPeriodMs: 1000 CollectPeriodSec: 30 ProvisionedCoefficient: 60 OvershootCoefficient: 1.1 Provisioned { BillingPeriodSec: 60 } OnDemand { BillingPeriodSec: 60 } Overshoot { BillingPeriodSec: 60 } } } } ProtocolVersion: 1 } 2025-06-25T14:59:13.509961Z node 5 :KESUS_TABLET DEBUG: quoter_runtime.cpp:150: [72057594037927937] Subscribe on quoter resources (sender=[5:165:2187], cookie=10765414009893755078) 2025-06-25T14:59:13.554193Z node 5 :KESUS_TABLET TRACE: quoter_runtime.cpp:93: [72057594037927937] Send TEvResourcesAllocated to [5:165:2187]. Cookie: 0. Data: { ResourcesInfo { ResourceId: 2 Amount: 100 StateNotification { Status: SUCCESS } } } 2025-06-25T14:59:13.609497Z node 5 :KESUS_TABLET TRACE: quoter_runtime.cpp:93: [72057594037927937] Send TEvResourcesAllocated to [5:165:2187]. Cookie: 0. Data: { ResourcesInfo { ResourceId: 2 Amount: 100 StateNotification { Status: SUCCESS } } } 2025-06-25T14:59:13.641339Z node 5 :KESUS_TABLET TRACE: quoter_runtime.cpp:93: [72057594037927937] Send TEvResourcesAllocated to [5:165:2187]. Cookie: 0. Data: { ResourcesInfo { ResourceId: 2 Amount: 100 StateNotification { Status: SUCCESS } } } 2025-06-25T14:59:13.642055Z node 5 :KESUS_TABLET TRACE: quoter_runtime.cpp:313: [72057594037927937] Send TEvGetQuoterResourceCountersResult to [5:173:2191]. Cookie: 2984709473593125215. Data: { ResourceCounters { ResourcePath: "Root2/Res" } ResourceCounters { ResourcePath: "Root2/Res/Subres" } ResourceCounters { ResourcePath: "Root2" } ResourceCounters { ResourcePath: "Root1/Res" Allocated: 300 } ResourceCounters { ResourcePath: "Root1" Allocated: 300 } } 2025-06-25T14:59:13.642960Z node 5 :KESUS_TABLET TRACE: quoter_runtime.cpp:145: [72057594037927937] Send TEvSubscribeOnResourcesResult to [5:176:2194]. Cookie: 16779914931738588776. Data: { Results { ResourceId: 5 Error { Status: SUCCESS } EffectiveProps { ResourceId: 5 ResourcePath: "Root2/Res/Subres" HierarchicalDRRResourceConfig { MaxUnitsPerSecond: 1000 MaxBurstSizeCoefficient: 1 Weight: 1 } AccountingConfig { ReportPeriodMs: 5000 AccountPeriodMs: 1000 CollectPeriodSec: 30 ProvisionedCoefficient: 60 OvershootCoefficient: 1.1 Provisioned { BillingPeriodSec: 60 } OnDemand { BillingPeriodSec: 60 } Overshoot { BillingPeriodSec: 60 } } } } ProtocolVersion: 1 } 2025-06-25T14:59:13.643020Z node 5 :KESUS_TABLET DEBUG: quoter_runtime.cpp:150: [72057594037927937] Subscribe on quoter resources (sender=[5:176:2194], cookie=16779914931738588776) 2025-06-25T14:59:13.685149Z node 5 :KESUS_TABLET TRACE: quoter_runtime.cpp:93: [72057594037927937] Send TEvResourcesAllocated to [5:176:2194]. Cookie: 0. Data: { ResourcesInfo { ResourceId: 5 Amount: 100 StateNotification { Status: SUCCESS } } } 2025-06-25T14:59:13.727057Z node 5 :KESUS_TABLET TRACE: quoter_runtime.cpp:93: [72057594037927937] Send TEvResourcesAllocated to [5:176:2194]. Cookie: 0. Data: { ResourcesInfo { ResourceId: 5 Amount: 100 StateNotification { Status: SUCCESS } } } 2025-06-25T14:59:13.727778Z node 5 :KESUS_TABLET TRACE: quoter_runtime.cpp:313: [72057594037927937] Send TEvGetQuoterResourceCountersResult to [5:182:2198]. Cookie: 6737421561991955378. Data: { ResourceCounters { ResourcePath: "Root2/Res" Allocated: 200 } ResourceCounters { ResourcePath: "Root2/Res/Subres" Allocated: 200 } ResourceCounters { ResourcePath: "Root2" Allocated: 200 } ResourceCounters { ResourcePath: "Root1/Res" Allocated: 300 } ResourceCounters { ResourcePath: "Root1" Allocated: 300 } } 2025-06-25T14:59:13.728515Z node 5 :KESUS_TABLET TRACE: quoter_runtime.cpp:145: [72057594037927937] Send TEvSubscribeOnResourcesResult to [5:165:2187]. Cookie: 8901474229506428075. Data: { Results { ResourceId: 2 Error { Status: SUCCESS } EffectiveProps { ResourceId: 2 ResourcePath: "Root1/Res" HierarchicalDRRResourceConfig { MaxUnitsPerSecond: 1000 MaxBurstSizeCoefficient: 1 Weight: 1 } AccountingConfig { ReportPeriodMs: 5000 AccountPeriodMs: 1000 CollectPeriodSec: 30 ProvisionedCoefficient: 60 OvershootCoefficient: 1.1 Provisioned { BillingPeriodSec: 60 } OnDemand { BillingPeriodSec: 60 } Overshoot { BillingPeriodSec: 60 } } } } ProtocolVersion: 1 } 2025-06-25T14:59:13.728571Z node 5 :KESUS_TABLET DEBUG: quoter_runtime.cpp:150: [72057594037927937] Subscribe on quoter resources (sender=[5:165:2187], cookie=8901474229506428075) 2025-06-25T14:59:13.729310Z node 5 :KESUS_TABLET TRACE: quoter_runtime.cpp:145: [72057594037927937] Send TEvSubscribeOnResourcesResult to [5:176:2194]. Cookie: 13549793625125906053. Data: { Results { ResourceId: 5 Error { Status: SUCCESS } EffectiveProps { ResourceId: 5 ResourcePath: "Root2/Res/Subres" HierarchicalDRRResourceConfig { MaxUnitsPerSecond: 1000 MaxBurstSizeCoefficient: 1 Weight: 1 } AccountingConfig { ReportPeriodMs: 5000 AccountPeriodMs: 1000 CollectPeriodSec: 30 ProvisionedCoefficient: 60 OvershootCoefficient: 1.1 Provisioned { BillingPeriodSec: 60 } OnDemand { BillingPeriodSec: 60 } Overshoot { BillingPeriodSec: 60 } } } } ProtocolVersion: 1 } 2025-06-25T14:59:13.729367Z node 5 :KESUS_TABLET DEBUG: quoter_runtime.cpp:150: [72057594037927937] Subscribe on quoter resources (sender=[5:176:2194], cookie=13549793625125906053) 2025-06-25T14:59:13.760669Z node 5 :KESUS_TABLET TRACE: quoter_runtime.cpp:93: [72057594037927937] Send TEvResourcesAllocated to [5:176:2194]. Cookie: 0. Data: { ResourcesInfo { ResourceId: 5 Amount: 50 StateNotification { Status: SUCCESS } } } 2025-06-25T14:59:13.760786Z node 5 :KESUS_TABLET TRACE: quoter_runtime.cpp:93: [72057594037927937] Send TEvResourcesAllocated to [5:165:2187]. Cookie: 0. Data: { ResourcesInfo { ResourceId: 2 Amount: 20 StateNotification { Status: SUCCESS } } } 2025-06-25T14:59:13.761519Z node 5 :KESUS_TABLET TRACE: quoter_runtime.cpp:313: [72057594037927937] Send TEvGetQuoterResourceCountersResult to [5:189:2205]. Cookie: 17240056814982233611. Data: { ResourceCounters { ResourcePath: "Root2/Res" Allocated: 250 } ResourceCounters { ResourcePath: "Root2/Res/Subres" Allocated: 250 } ResourceCounters { ResourcePath: "Root2" Allocated: 250 } ResourceCounters { ResourcePath: "Root1/Res" Allocated: 320 } ResourceCounters { ResourcePath: "Root1" Allocated: 320 } } >> TKesusTest::TestQuoterAccountResourcesAggregateClients [GOOD] >> TKesusTest::TestQuoterAccountResourcesAggregateResources >> TSchemeShardLoginTest::RemoveGroup-StrictAclCheck-false >> TSchemeShardLoginTest::AddAccess_NonExisting-StrictAclCheck-false >> TSchemeShardLoginTest::RemoveUser_Owner-StrictAclCheck-true >> TSchemeShardLoginTest::UserLogin >> TWebLoginService::AuditLogEmptySIDsLoginSuccess >> TSchemeShardLoginTest::RemoveUser-StrictAclCheck-false >> TSchemeShardLoginTest::BanUnbanUser >> TSchemeShardLoginTest::RemoveUser_NonExisting-StrictAclCheck-false >> TSchemeShardLoginTest::RemoveGroup_NonExisting-StrictAclCheck-false >> TKesusTest::TestQuoterAccountResourcesPaced [GOOD] >> TKesusTest::TestQuoterAccountResourcesDeduplicateClient >> TLocksTest::Range_CorrectDot [GOOD] >> TPartitionTests::ConflictingCommitProccesAfterRollback [GOOD] >> TSchemeShardLoginTest::RemoveUser_NonExisting-StrictAclCheck-false [GOOD] >> TSchemeShardLoginTest::RemoveUser_NonExisting-StrictAclCheck-true >> TSchemeShardLoginTest::RemoveGroup-StrictAclCheck-false [GOOD] >> TSchemeShardLoginTest::RemoveGroup-StrictAclCheck-true >> TSchemeShardLoginTest::BanUnbanUser [GOOD] >> TSchemeShardLoginTest::BanUserWithWaiting >> TSchemeShardLoginTest::RemoveGroup_NonExisting-StrictAclCheck-false [GOOD] >> TSchemeShardLoginTest::RemoveGroup_NonExisting-StrictAclCheck-true >> TSchemeShardLoginTest::AddAccess_NonExisting-StrictAclCheck-false [GOOD] >> TSchemeShardLoginTest::AddAccess_NonExisting-StrictAclCheck-true >> TSchemeShardLoginTest::RemoveUser_Owner-StrictAclCheck-true [GOOD] >> TSchemeShardLoginTest::TestExternalLogin >> TWebLoginService::AuditLogLoginSuccess >> TSchemeShardLoginTest::UserLogin [GOOD] >> TSchemeShardLoginTest::UserStayLockedOutIfEnterValidPassword >> TWebLoginService::AuditLogEmptySIDsLoginSuccess [GOOD] >> TWebLoginService::AuditLogLdapLoginBadPassword >> TSchemeShardLoginTest::RemoveUser-StrictAclCheck-false [GOOD] >> TSchemeShardLoginTest::RemoveUser-StrictAclCheck-true ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/unittest >> TPartitionTests::ConflictingCommitProccesAfterRollback [GOOD] Test command err: 2025-06-25T14:58:55.188783Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:58:55.189054Z node 1 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-25T14:58:55.213488Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitConfigStep Got KV request 2025-06-25T14:58:55.215120Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-25T14:58:55.215685Z node 1 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [1:180:2193] 2025-06-25T14:58:55.217881Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitDiskStatusStep Got KV request 2025-06-25T14:58:55.218156Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitMetaStep Got KV request 2025-06-25T14:58:55.218461Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitInfoRangeStep Got KV request 2025-06-25T14:58:55.218701Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitDataRangeStep Got KV request 2025-06-25T14:58:55.218974Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:524: key[0]: d0000000000_00000000000000000000_00000_0000000050_00000 2025-06-25T14:58:55.219198Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:624: [Root/PQ/rt3.dc1--account--topic:0:TInitDataRangeStep] Got data offset 0 count 50 size 684 so 0 eo 50 d0000000000_00000000000000000000_00000_0000000050_00000 2025-06-25T14:58:55.219414Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitDataStep 2025-06-25T14:58:55.219460Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitEndWriteTimestampStep 2025-06-25T14:58:55.219510Z node 1 :PERSQUEUE INFO: partition_init.cpp:911: [Root/PQ/rt3.dc1--account--topic:0:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp from keys completed. Value 2025-06-25T14:58:55.000000Z 2025-06-25T14:58:55.219586Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:55: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Initializing completed. 2025-06-25T14:58:55.219650Z node 1 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'Root/PQ/rt3.dc1--account--topic' partition 0 generation 0 [1:180:2193] 2025-06-25T14:58:55.219708Z node 1 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037927937, Partition: 0, State: StateInit] SYNC INIT topic Root/PQ/rt3.dc1--account--topic partitition 0 so 50 endOffset 50 Head Offset 50 PartNo 0 PackedSize 0 count 0 nextOffset 50 batches 0 2025-06-25T14:58:55.219776Z node 1 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Process pending events. Count 0 2025-06-25T14:58:55.220016Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-25T14:58:55.576787Z node 1 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie src1|ef5c9177-8369f1e3-f951ccd3-c5542035_0 generated for partition 0 topic 'Root/PQ/rt3.dc1--account--topic' owner src1 2025-06-25T14:58:55.576927Z node 1 :PERSQUEUE DEBUG: partition_write.cpp:34: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::ReplyOwnerOk. Partition: 0 Got batch complete: 1 2025-06-25T14:58:56.644833Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction Create distr tx with id = 0 and act no: 1 Create distr tx with id = 2 and act no: 3 Create distr tx with id = 4 and act no: 5 Create distr tx with id = 6 and act no: 7 2025-06-25T14:58:56.645224Z node 1 :PERSQUEUE DEBUG: partition.cpp:1170: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCalcPredicate Step 1, TxId 0 2025-06-25T14:58:56.645355Z node 1 :PERSQUEUE DEBUG: partition.cpp:1170: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCalcPredicate Step 1, TxId 2 2025-06-25T14:58:56.645415Z node 1 :PERSQUEUE DEBUG: partition.cpp:1170: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCalcPredicate Step 1, TxId 4 2025-06-25T14:58:56.645447Z node 1 :PERSQUEUE DEBUG: partition.cpp:1170: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCalcPredicate Step 1, TxId 6 2025-06-25T14:58:57.967595Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-25T14:58:57.967778Z node 1 :PERSQUEUE DEBUG: partition.cpp:1401: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvGetWriteInfoResponse 2025-06-25T14:58:57.967868Z node 1 :PERSQUEUE DEBUG: partition.cpp:1401: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvGetWriteInfoResponse 2025-06-25T14:58:57.967917Z node 1 :PERSQUEUE DEBUG: partition.cpp:1401: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvGetWriteInfoResponse 2025-06-25T14:58:57.967966Z node 1 :PERSQUEUE DEBUG: partition.cpp:1401: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvGetWriteInfoResponse Got batch complete: 1 Wait batch of 1 completion Wait batch completion Expect no KV request 2025-06-25T14:58:57.968126Z node 1 :PERSQUEUE DEBUG: partition.cpp:1216: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCommit Step 1, TxId 0 2025-06-25T14:58:57.968176Z node 1 :PERSQUEUE DEBUG: partition.cpp:2502: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::CommitWriteOperations TxId: 0 2025-06-25T14:58:57.968248Z node 1 :PERSQUEUE DEBUG: partition.cpp:2528: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Head=Offset 50 PartNo 0 PackedSize 0 count 0 nextOffset 50 batches 0, NewHead=Offset 50 PartNo 0 PackedSize 0 count 0 nextOffset 50 batches 0 Got batch complete: 1 Waif or tx 3 predicate failure 2025-06-25T14:58:58.242452Z node 1 :PERSQUEUE DEBUG: partition.cpp:1216: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCommit Step 1, TxId 2 2025-06-25T14:58:58.242547Z node 1 :PERSQUEUE DEBUG: partition.cpp:2502: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::CommitWriteOperations TxId: 2 2025-06-25T14:58:58.242603Z node 1 :PERSQUEUE DEBUG: partition.cpp:2528: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Head=Offset 50 PartNo 0 PackedSize 0 count 0 nextOffset 50 batches 0, NewHead=Offset 50 PartNo 0 PackedSize 0 count 0 nextOffset 50 batches 0 Got batch complete: 2 Waif or tx 4 predicate failure Wait batch of 3 completion Wait batch completion Expect no KV request Wait batch completion Wait for no tx committed Got KV request Wait kv request Wait for commits Wait tx committed for tx 0 2025-06-25T14:58:58.700123Z node 1 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 2 2025-06-25T14:58:58.700183Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction Wait tx committed for tx 2 2025-06-25T14:58:59.100738Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:58:59.100796Z node 2 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-25T14:58:59.112080Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitConfigStep Got KV request 2025-06-25T14:58:59.112249Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-25T14:58:59.112481Z node 2 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [2:182:2195] 2025-06-25T14:58:59.113061Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitDiskStatusStep Got KV request 2025-06-25T14:58:59.113165Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitMetaStep Got KV request Got KV request 2025-06-25T14:58:59.113275Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitInfoRangeStep Got KV request 2025-06-25T14:58:59.113521Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitDataRangeStep Got KV request 2025-06-25T14:58:59.113725Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:524: key[0]: d0000000000_00000000000000000000_00000_0000000001_00000 2025-06-25T14:58:59.113917Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:624: [Root/PQ/rt3.dc1--account--topic:0:TInitDataRangeStep] Got data offset 0 count 1 size 684 so 0 eo 1 d0000000000_00000000000000000000_00000_0000000001_00000 2025-06-25T14:58:59.114009Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitDataStep 2025-06-25T14:58:59.114046Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitEndWriteTimestampStep 2025-06-25T14:58:59.114087Z node 2 :PERSQUEUE INFO: partition_init.cpp:911: [Root/PQ/rt3.dc1--account--topic:0:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp from keys completed. Value 2025-06-25T14:58:59.000000Z 2025-06-25T14:58:59.114124Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:55: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Initializing completed. 2025-06-25T14:58:59.114173Z node 2 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'Root/PQ/rt3.dc1--account--topic' partition 0 generation 0 [2:182:2195] 2025-06-25T14:58:59.114219Z node 2 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037927937, Partition: 0, State: StateInit] SYNC INIT topic Root/PQ/rt3.dc1--account--topic partitition 0 so 1 endOffset 1 Head Offset 1 PartNo 0 PackedSize 0 count 0 nextOffset 1 batches 0 2025-06-25T14:58:59.114259Z node 2 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Process pending events. Count 0 2025-06-25T14:58:59.114432Z node 2 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-25T14:58:59.456564Z node 2 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie src1|23ce0bb5-23424ecf-888a2d54-c64cec6a_0 generated for partition 0 topic 'Root/PQ/rt3.dc1--account--topic' owner src1 2025-06-25T14:58:59.456714Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:34: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::ReplyOwnerOk. Partition: 0 Got batch complete: 1 2025-06-25T14:59:00.460915Z node 2 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction Create distr tx with id = 0 and act no: 1 2025-06-25T14:59:00.461145Z node 2 :PERSQUEUE DEBUG: partition.cpp:1170: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCalcPredicate Step 1, TxId 0 2025-06-25T14:59:01.799683Z node 2 :PERSQUEUE DEBUG ... ode 4 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-25T14:59:09.469549Z node 4 :PERSQUEUE DEBUG: partition.cpp:1401: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvGetWriteInfoResponse Got batch complete: 2 Wait batch completion 2025-06-25T14:59:09.469760Z node 4 :PERSQUEUE DEBUG: partition.cpp:1216: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCommit Step 1, TxId 2 2025-06-25T14:59:09.469794Z node 4 :PERSQUEUE DEBUG: partition.cpp:2502: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::CommitWriteOperations TxId: 2 2025-06-25T14:59:09.469846Z node 4 :PERSQUEUE DEBUG: partition.cpp:2461: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Partition 0 Consumer 'client-0' Bad request (gap) Offset 5 Begin 0 Got batch complete: 1 Wait kv request Got KV request Wait tx committed for tx 2 2025-06-25T14:59:09.490665Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-06-25T14:59:09.490742Z node 4 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction Wait for no tx committed 2025-06-25T14:59:10.743186Z node 4 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction Create distr tx with id = 4 and act no: 5 Created Tx with id 7 as act# 7 2025-06-25T14:59:10.743456Z node 4 :PERSQUEUE DEBUG: partition.cpp:1170: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCalcPredicate Step 1, TxId 4 2025-06-25T14:59:10.743560Z node 4 :PERSQUEUE DEBUG: partition.cpp:1170: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCalcPredicate Step 1, TxId 7 2025-06-25T14:59:11.986605Z node 4 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-25T14:59:11.986810Z node 4 :PERSQUEUE DEBUG: partition.cpp:1401: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvGetWriteInfoResponse Got batch complete: 2 2025-06-25T14:59:11.987073Z node 4 :PERSQUEUE DEBUG: partition.cpp:3346: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 user client-1 offset is set to 3 (startOffset 50) session session-client-1 Got KV request Wait batch completion Wait kv request 2025-06-25T14:59:12.225629Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-06-25T14:59:12.225707Z node 4 :PERSQUEUE DEBUG: partition.cpp:2461: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Partition 0 Consumer 'client-1' Bad request (gap) Offset 3 Begin 0 2025-06-25T14:59:12.225762Z node 4 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction Got batch complete: 1 Wait batch completion Wait kv request Got KV request Create distr tx with id = 8 and act no: 9 2025-06-25T14:59:12.226072Z node 4 :PERSQUEUE DEBUG: partition.cpp:1170: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCalcPredicate Step 1, TxId 8 2025-06-25T14:59:12.246730Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-06-25T14:59:12.246823Z node 4 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-25T14:59:13.247289Z node 4 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-25T14:59:13.247557Z node 4 :PERSQUEUE DEBUG: partition.cpp:1401: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvGetWriteInfoResponse Got batch complete: 3 Wait kv request 2025-06-25T14:59:13.247934Z node 4 :PERSQUEUE DEBUG: partition.cpp:2502: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::CommitWriteOperations TxId: (empty maybe) 2025-06-25T14:59:13.248010Z node 4 :PERSQUEUE DEBUG: partition.cpp:3403: [PQ: 72057594037927937, Partition: 0, State: StateIdle] schedule TEvPersQueue::TEvProposeTransactionResult(COMPLETE), reason= 2025-06-25T14:59:13.248146Z node 4 :PERSQUEUE DEBUG: partition.cpp:3403: [PQ: 72057594037927937, Partition: 0, State: StateIdle] schedule TEvPersQueue::TEvProposeTransactionResult(ABORTED), reason=incorrect offset range (gap) Got KV request Wait immediate tx complete 10 2025-06-25T14:59:13.259350Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-06-25T14:59:13.259466Z node 4 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction Got propose resutl: Origin: 72057594037927937 Status: COMPLETE TxId: 10 Wait immediate tx complete 11 Got propose resutl: Origin: 72057594037927937 Status: ABORTED TxId: 11 Errors { Kind: BAD_REQUEST Reason: "incorrect offset range (gap)" } 2025-06-25T14:59:13.684433Z node 5 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:59:13.684512Z node 5 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-25T14:59:13.700945Z node 5 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitConfigStep Got KV request 2025-06-25T14:59:13.701295Z node 5 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-25T14:59:13.701576Z node 5 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [5:182:2195] 2025-06-25T14:59:13.702610Z node 5 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitDiskStatusStep Got KV request 2025-06-25T14:59:13.702789Z node 5 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitMetaStep Got KV request 2025-06-25T14:59:13.702956Z node 5 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitInfoRangeStep Got KV request 2025-06-25T14:59:13.703665Z node 5 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitDataRangeStep Got KV request 2025-06-25T14:59:13.703946Z node 5 :PERSQUEUE DEBUG: partition_init.cpp:524: key[0]: d0000000000_00000000000000000000_00000_0000000050_00000 2025-06-25T14:59:13.704137Z node 5 :PERSQUEUE DEBUG: partition_init.cpp:624: [Root/PQ/rt3.dc1--account--topic:0:TInitDataRangeStep] Got data offset 0 count 50 size 684 so 0 eo 50 d0000000000_00000000000000000000_00000_0000000050_00000 2025-06-25T14:59:13.704323Z node 5 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitDataStep 2025-06-25T14:59:13.704364Z node 5 :PERSQUEUE DEBUG: partition_init.cpp:75: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Start initializing step TInitEndWriteTimestampStep 2025-06-25T14:59:13.704414Z node 5 :PERSQUEUE INFO: partition_init.cpp:911: [Root/PQ/rt3.dc1--account--topic:0:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp from keys completed. Value 2025-06-25T14:59:13.000000Z 2025-06-25T14:59:13.704455Z node 5 :PERSQUEUE DEBUG: partition_init.cpp:55: [Root/PQ/rt3.dc1--account--topic:0:Initializer] Initializing completed. 2025-06-25T14:59:13.704505Z node 5 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'Root/PQ/rt3.dc1--account--topic' partition 0 generation 0 [5:182:2195] 2025-06-25T14:59:13.704565Z node 5 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037927937, Partition: 0, State: StateInit] SYNC INIT topic Root/PQ/rt3.dc1--account--topic partitition 0 so 50 endOffset 50 Head Offset 50 PartNo 0 PackedSize 0 count 0 nextOffset 50 batches 0 2025-06-25T14:59:13.704620Z node 5 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Process pending events. Count 0 2025-06-25T14:59:13.704696Z node 5 :PERSQUEUE DEBUG: partition_read.cpp:882: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 user client-1 readTimeStamp for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 0 2025-06-25T14:59:13.704741Z node 5 :PERSQUEUE DEBUG: partition_read.cpp:924: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 user client-1 send read request for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 1 rrg 0 2025-06-25T14:59:13.704787Z node 5 :PERSQUEUE DEBUG: partition_read.cpp:882: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 user client-0 readTimeStamp for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 1 rrg 0 2025-06-25T14:59:13.705016Z node 5 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-25T14:59:13.705157Z node 5 :PERSQUEUE DEBUG: partition_read.cpp:839: [PQ: 72057594037927937, Partition: 0, State: StateIdle] read cookie 2 Topic 'Root/PQ/rt3.dc1--account--topic' partition 0 user client-1 offset 0 count 1 size 1024000 endOffset 50 max time lag 0ms effective offset 0 2025-06-25T14:59:13.705381Z node 5 :PERSQUEUE DEBUG: partition_read.cpp:1043: [PQ: 72057594037927937, Partition: 0, State: StateIdle] read cookie 2 added 1 blobs, size 684 count 50 last offset 0, current partition end offset: 50 2025-06-25T14:59:13.705432Z node 5 :PERSQUEUE DEBUG: partition_read.cpp:1069: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Reading cookie 2. Send blob request. 2025-06-25T14:59:15.084947Z node 5 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction Created Tx with id 0 as act# 0 Created Tx with id 1 as act# 1 2025-06-25T14:59:15.085233Z node 5 :PERSQUEUE DEBUG: partition.cpp:1170: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCalcPredicate Step 1, TxId 0 2025-06-25T14:59:15.085386Z node 5 :PERSQUEUE DEBUG: partition.cpp:1170: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCalcPredicate Step 1, TxId 1 Got batch complete: 1 Wait batch completion Got batch complete: 1 Wait batch completion Wait kv request 2025-06-25T14:59:15.333147Z node 5 :PERSQUEUE DEBUG: partition.cpp:1216: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCommit Step 1, TxId 1 2025-06-25T14:59:15.333252Z node 5 :PERSQUEUE DEBUG: partition.cpp:2502: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::CommitWriteOperations TxId: 1 Got KV request Wait tx committed for tx 1 2025-06-25T14:59:15.354331Z node 5 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-06-25T14:59:15.354450Z node 5 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction Wait for no tx committed >> TSchemeShardLoginTest::RemoveGroup-StrictAclCheck-true [GOOD] >> TSchemeShardLoginTest::DisableBuiltinAuthMechanism >> TSchemeShardLoginTest::RemoveUser_NonExisting-StrictAclCheck-true [GOOD] >> TSchemeShardLoginTest::RemoveUser_Groups-StrictAclCheck-false >> TSchemeShardLoginTest::RemoveGroup_NonExisting-StrictAclCheck-true [GOOD] >> TSchemeShardLoginTest::RemoveGroup_Owner-StrictAclCheck-false >> TSchemeShardLoginTest::RemoveUser-StrictAclCheck-true [GOOD] >> TSchemeShardLoginTest::RemoveUser_Acl-StrictAclCheck-false >> TSchemeShardLoginTest::AddAccess_NonExisting-StrictAclCheck-true [GOOD] >> TSchemeShardLoginTest::AddAccess_NonYdb-StrictAclCheck-false >> TSchemeShardLoginTest::TestExternalLogin [GOOD] >> TSchemeShardLoginTest::TestExternalLoginWithIncorrectLdapDomain >> TSchemeShardLoginTest::UserStayLockedOutIfEnterValidPassword [GOOD] >> TWebLoginService::AuditLogAdminLoginSuccess >> TWebLoginService::AuditLogLdapLoginBadPassword [GOOD] >> TWebLoginService::AuditLogLdapLoginBadUser >> TKesusTest::TestQuoterAccountResourcesAggregateResources [GOOD] >> TKesusTest::TestQuoterAccountLabels ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TLocksTest::Range_CorrectDot [GOOD] Test command err: 2025-06-25T14:58:38.155133Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901553052326219:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:38.156619Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00205b/r3tmp/tmpHPaD1p/pdisk_1.dat 2025-06-25T14:58:38.562995Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519901553052326175:2080] 1750863518126614 != 1750863518126617 2025-06-25T14:58:38.572099Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:38.573824Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:38.573934Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:38.575865Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:9768 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:38.808871Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:38.822578Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:58:38.843264Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-25T14:58:38.851648Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:39.007341Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:39.093596Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:39.177148Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:58:41.467359Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519901565300395685:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:41.467401Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00205b/r3tmp/tmp8Bo1Kj/pdisk_1.dat 2025-06-25T14:58:41.646716Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:41.649998Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519901565300395664:2080] 1750863521466324 != 1750863521466327 2025-06-25T14:58:41.680909Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:41.680973Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:41.689280Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:61323 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:41.867970Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:41.873139Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:58:41.889030Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:58:41.946260Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:58:41.981127Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:44.833362Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519901576236975830:2150];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:44.840508Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00205b/r3tmp/tmpO7SVl7/pdisk_1.dat 2025-06-25T14:58:45.039278Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:45.039348Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:45.041891Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:45.053811Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:58:45.055604Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519901576236975697:2080] 1750863524805678 != 1750863524805681 TClient is connected to server localhost:7405 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:45.253203Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter ... grations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00205b/r3tmp/tmpeYgoFM/pdisk_1.dat 2025-06-25T14:59:02.906637Z node 8 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:59:02.908266Z node 8 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [8:7519901653199541813:2080] 1750863542789815 != 1750863542789818 2025-06-25T14:59:02.927422Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:59:02.927511Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:59:02.928827Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:19402 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:59:03.158648Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-25T14:59:03.176195Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:59:03.226373Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:59:03.298220Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:59:06.625277Z node 9 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[9:7519901673089430724:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:59:06.625356Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00205b/r3tmp/tmpCgPRcc/pdisk_1.dat 2025-06-25T14:59:06.759557Z node 9 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [9:7519901673089430705:2080] 1750863546624743 != 1750863546624746 2025-06-25T14:59:06.763332Z node 9 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:59:06.779504Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:59:06.779607Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:59:06.782941Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:30463 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:59:07.024279Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-25T14:59:07.040370Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:59:07.116269Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:59:07.175711Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:59:10.909999Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7519901689680461205:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:59:10.910082Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00205b/r3tmp/tmpCv6Yq7/pdisk_1.dat 2025-06-25T14:59:11.064719Z node 10 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:59:11.072574Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:59:11.072682Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:59:11.075726Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:3222 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:59:11.376302Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-25T14:59:11.394775Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:59:11.466702Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:59:11.524016Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... >> TWebLoginService::AuditLogLoginSuccess [GOOD] >> TWebLoginService::AuditLogLoginBadPassword >> TSchemeShardLoginTest::RemoveUser_Groups-StrictAclCheck-false [GOOD] >> TSchemeShardLoginTest::RemoveUser_Groups-StrictAclCheck-true >> TSchemeShardLoginTest::DisableBuiltinAuthMechanism [GOOD] >> TSchemeShardLoginTest::FailedLoginUserUnderNameOfGroup >> TSchemeShardSubDomainTest::TopicDiskSpaceQuotas [GOOD] >> TKesusTest::TestQuoterAccountResourcesDeduplicateClient [GOOD] >> TKesusTest::TestQuoterAccountResourcesForgetClient >> TDataShardMinStepTest::TestDropTableCompletesQuicklyRW+VolatileTxs [GOOD] >> TDataShardMinStepTest::TestDropTableCompletesQuicklyRW-VolatileTxs >> TSchemeShardLoginTest::RemoveGroup_Owner-StrictAclCheck-false [GOOD] >> TSchemeShardLoginTest::RemoveGroup_Acl-StrictAclCheck-false >> TSchemeShardLoginTest::AddAccess_NonYdb-StrictAclCheck-false [GOOD] >> TSchemeShardLoginTest::AddAccess_NonYdb-StrictAclCheck-true >> TDataShardMinStepTest::TestDropTablePlanComesNotTooEarlyRW+VolatileTxs [GOOD] >> TDataShardMinStepTest::TestDropTablePlanComesNotTooEarlyRW-VolatileTxs >> TWebLoginService::AuditLogAdminLoginSuccess [GOOD] >> TWebLoginService::AuditLogCreateModifyUser >> TSchemeShardLoginTest::TestExternalLoginWithIncorrectLdapDomain [GOOD] >> TSchemeShardLoginTest::ResetFailedAttemptCount >> TWebLoginService::AuditLogLdapLoginBadUser [GOOD] >> TWebLoginService::AuditLogLdapLoginBadBind >> TSchemeShardLoginTest::RemoveUser_Acl-StrictAclCheck-false [GOOD] >> TSchemeShardLoginTest::RemoveUser_Acl-StrictAclCheck-true >> TSchemeShardSubDomainTest::DiskSpaceUsage [GOOD] >> TWebLoginService::AuditLogLoginBadPassword [GOOD] >> TWebLoginService::AuditLogLdapLoginSuccess >> TWebLoginService::AuditLogLdapLoginBadBind [GOOD] >> TSchemeShardLoginTest::RemoveGroup_Acl-StrictAclCheck-false [GOOD] >> TSchemeShardLoginTest::RemoveGroup_Acl-StrictAclCheck-true >> TSchemeShardLoginTest::FailedLoginUserUnderNameOfGroup [GOOD] >> TSchemeShardLoginTest::FailedLoginWithInvalidUser >> TSchemeShardLoginTest::AddAccess_NonYdb-StrictAclCheck-true [GOOD] >> TSchemeShardLoginTest::AccountLockoutAndAutomaticallyUnlock >> TSchemeShardLoginTest::RemoveUser_Groups-StrictAclCheck-true [GOOD] >> TSchemeShardLoginTest::RemoveUser_Owner-StrictAclCheck-false >> TWebLoginService::AuditLogCreateModifyUser [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::TopicDiskSpaceQuotas [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:59:07.860338Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:59:07.860421Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:07.860459Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:59:07.860497Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:59:07.860539Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:59:07.860561Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:59:07.860630Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:07.860707Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:59:07.861424Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:59:07.861719Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:59:07.940641Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:59:07.940698Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:59:07.958134Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:59:07.958527Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:59:07.958700Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:59:07.964474Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:59:07.964822Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:59:07.965585Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:07.965877Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:59:07.969446Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:07.969631Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:59:07.970889Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:07.970949Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:07.971099Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:59:07.971149Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:59:07.971193Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:59:07.971280Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:59:07.977958Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:59:08.098819Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:59:08.099070Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:08.099276Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:59:08.099320Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:59:08.099601Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:59:08.099674Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:59:08.101674Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:08.101870Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:59:08.102051Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:08.102101Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:59:08.102156Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:59:08.102198Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:59:08.104063Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:08.104115Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:59:08.104160Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:59:08.105688Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:08.105756Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:08.105806Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:08.105859Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:59:08.109190Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:59:08.111021Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:59:08.111182Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:59:08.112085Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:08.112215Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:08.112276Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:08.112557Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:59:08.112610Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:08.112761Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:59:08.112859Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:59:08.114711Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:08.114782Z node 1 :FLAT_TX_SCHEMESHARD D ... 046678944, LocalPathId: 2] 2025-06-25T14:59:16.334043Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 103, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-25T14:59:16.334153Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:16.334183Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 103, path id: 2 2025-06-25T14:59:16.334232Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 103, path id: 2 2025-06-25T14:59:16.334259Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 103, path id: 3 2025-06-25T14:59:16.334556Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-25T14:59:16.334591Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 103:0 ProgressState 2025-06-25T14:59:16.334706Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#103:0 progress is 1/1 2025-06-25T14:59:16.334738Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-25T14:59:16.334775Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#103:0 progress is 1/1 2025-06-25T14:59:16.334805Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-25T14:59:16.334848Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 103, ready parts: 1/1, is published: false 2025-06-25T14:59:16.334890Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-25T14:59:16.334927Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 103:0 2025-06-25T14:59:16.334974Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 103:0 2025-06-25T14:59:16.335102Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-25T14:59:16.335142Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 103, publications: 2, subscribers: 0 2025-06-25T14:59:16.335173Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 103, [OwnerId: 72057594046678944, LocalPathId: 2], 9 2025-06-25T14:59:16.335203Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 103, [OwnerId: 72057594046678944, LocalPathId: 3], 18446744073709551615 2025-06-25T14:59:16.335892Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 3 TxId_Deprecated: 3 2025-06-25T14:59:16.336270Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 3 ShardOwnerId: 72057594046678944 ShardLocalIdx: 3, at schemeshard: 72057594046678944 2025-06-25T14:59:16.336667Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 4 TxId_Deprecated: 4 2025-06-25T14:59:16.336831Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 103 2025-06-25T14:59:16.336911Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 103 2025-06-25T14:59:16.336946Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 103 2025-06-25T14:59:16.336983Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 18446744073709551615 2025-06-25T14:59:16.337021Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-25T14:59:16.337435Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 4 ShardOwnerId: 72057594046678944 ShardLocalIdx: 4, at schemeshard: 72057594046678944 2025-06-25T14:59:16.337805Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 9 PathOwnerId: 72057594046678944, cookie: 103 2025-06-25T14:59:16.337896Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 9 PathOwnerId: 72057594046678944, cookie: 103 2025-06-25T14:59:16.337927Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 103 2025-06-25T14:59:16.337953Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 9 2025-06-25T14:59:16.337980Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-06-25T14:59:16.338034Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 103, subscribers: 0 2025-06-25T14:59:16.338890Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-25T14:59:16.338940Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-06-25T14:59:16.339010Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-25T14:59:16.341313Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:3 2025-06-25T14:59:16.343299Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-25T14:59:16.343412Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:4 2025-06-25T14:59:16.343647Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-25T14:59:16.343734Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 103, wait until txId: 103 TestWaitNotification wait txId: 103 2025-06-25T14:59:16.344042Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 103: send EvNotifyTxCompletion 2025-06-25T14:59:16.344105Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 103 2025-06-25T14:59:16.344553Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 103, at schemeshard: 72057594046678944 2025-06-25T14:59:16.344642Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-06-25T14:59:16.344675Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [1:809:2715] TestWaitNotification: OK eventTxId 103 2025-06-25T14:59:16.820323Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:59:16.820572Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_1" took 314us result status StatusSuccess 2025-06-25T14:59:16.821075Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_1" PathDescription { Self { Name: "USER_1" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 9 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 9 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SubDomainStateVersion: 2 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "name_USER_0_kind_hdd-1" Kind: "hdd-1" } StoragePools { Name: "name_USER_0_kind_hdd-2" Kind: "hdd-2" } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 DatabaseQuotas { data_size_hard_quota: 1 } SecurityState { } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TSchemeShardLoginTest::RemoveUser_Acl-StrictAclCheck-true [GOOD] >> TSchemeShardLoginTest::RemoveGroup_Owner-StrictAclCheck-true ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::DiskSpaceUsage [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:59:05.604932Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:59:05.605014Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:05.605047Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:59:05.605080Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:59:05.605119Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:59:05.605145Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:59:05.605194Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:05.605262Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:59:05.605952Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:59:05.606254Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:59:05.679203Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:59:05.679261Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:59:05.694979Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:59:05.695317Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:59:05.695479Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:59:05.703755Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:59:05.704077Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:59:05.704703Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:05.704966Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:59:05.708052Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:05.708219Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:59:05.709334Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:05.709404Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:05.709548Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:59:05.709588Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:59:05.709627Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:59:05.709701Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:59:05.715470Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:59:05.823512Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:59:05.823735Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:05.823928Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:59:05.823967Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:59:05.824198Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:59:05.824279Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:59:05.826407Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:05.826589Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:59:05.826763Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:05.826814Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:59:05.826861Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:59:05.826897Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:59:05.828591Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:05.828639Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:59:05.828672Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:59:05.830241Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:05.830299Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:05.830351Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:05.830388Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:59:05.833646Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:59:05.835275Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:59:05.835437Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:59:05.836260Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:05.836402Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:05.836453Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:05.836693Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:59:05.836742Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:05.836905Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:59:05.836991Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:59:05.838770Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:05.838818Z node 1 :FLAT_TX_SCHEMESHARD D ... ablePartition, limit 10000 2025-06-25T14:59:17.360823Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:59:17.360874Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:17.360935Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:59:17.361754Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:59:17.362136Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:59:17.376720Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:59:17.378299Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:59:17.378494Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:59:17.378711Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:59:17.378767Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:59:17.378874Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:59:17.379715Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1393: TTxInit for Paths, read records: 3, at schemeshard: 72057594046678944 2025-06-25T14:59:17.379820Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:347: AttachChild: child attached as only one child to the parent, parent id: [OwnerId: 72057594046678944, LocalPathId: 1], parent name: MyRoot, child name: Table1, child id: [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-25T14:59:17.379876Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:347: AttachChild: child attached as only one child to the parent, parent id: [OwnerId: 72057594046678944, LocalPathId: 1], parent name: MyRoot, child name: Table2, child id: [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-06-25T14:59:17.379950Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1467: TTxInit for UserAttributes, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:17.380039Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1493: TTxInit for UserAttributesAlterData, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:17.380509Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1795: TTxInit for Tables, read records: 2, at schemeshard: 72057594046678944 2025-06-25T14:59:17.380635Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 0 2025-06-25T14:59:17.380688Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 0 2025-06-25T14:59:17.380759Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__root_data_erasure_manager.cpp:452: [RootDataErasureManager] Restore: Generation# 0, Status# 0, WakeupInterval# 604800 s, NumberDataErasureTenantsInRunning# 0 2025-06-25T14:59:17.381041Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2043: TTxInit for Columns, read records: 4, at schemeshard: 72057594046678944 2025-06-25T14:59:17.381207Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2103: TTxInit for ColumnsAlters, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:17.381320Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2161: TTxInit for Shards, read records: 3, at schemeshard: 72057594046678944 2025-06-25T14:59:17.381365Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-25T14:59:17.381413Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-25T14:59:17.381441Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-25T14:59:17.381605Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2247: TTxInit for TablePartitions, read records: 3, at schemeshard: 72057594046678944 2025-06-25T14:59:17.381800Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2313: TTxInit for TableShardPartitionConfigs, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:17.382071Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2463: TTxInit for ChannelsBinding, read records: 9, at schemeshard: 72057594046678944 2025-06-25T14:59:17.382427Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2842: TTxInit for TableIndexes, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:17.382541Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2921: TTxInit for TableIndexKeys, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:17.382976Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3422: TTxInit for KesusInfos, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:17.383076Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3458: TTxInit for KesusAlters, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:17.383280Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3684: TTxInit for TxShards, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:17.383378Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3829: TTxInit for ShardToDelete, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:17.383485Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3846: TTxInit for BackupSettings, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:17.383666Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4006: TTxInit for ShardBackupStatus, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:17.383753Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4022: TTxInit for CompletedBackup, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:17.383903Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4307: TTxInit for Publications, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:17.384145Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4646: IndexBuild , records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:17.384221Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4702: KMeansTreeSample records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:17.384361Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4791: SnapshotTables: snapshots: 0 tables: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:17.384427Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4818: SnapshotSteps: snapshots: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:17.384478Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4845: LongLocks: records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:17.389401Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:59:17.392151Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:17.392227Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:17.393223Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:59:17.393294Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:59:17.393342Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:59:17.395350Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594046678944 is [1:761:2674] sender: [1:821:2058] recipient: [1:15:2062] 2025-06-25T14:59:17.427486Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:59:17.427783Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 296us result status StatusSuccess 2025-06-25T14:59:17.428351Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Table1" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "Table2" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 102 CreateStep: 5000003 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 2 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 1752 DataSize: 1752 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TWebLoginService::AuditLogLdapLoginSuccess [GOOD] >> TWebLoginService::AuditLogLogout ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_login/unittest >> TWebLoginService::AuditLogLdapLoginBadBind [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:59:15.327582Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:59:15.327657Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:15.327698Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:59:15.327747Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:59:15.327795Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:59:15.327824Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:59:15.327880Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:15.327941Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:59:15.328724Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:59:15.329052Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:59:15.407186Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:59:15.407248Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:59:15.423304Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:59:15.423645Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:59:15.423809Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:59:15.428892Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:59:15.429168Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:59:15.429784Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:15.430008Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:59:15.434965Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:15.435667Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:59:15.442699Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:15.442775Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:15.442954Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:59:15.443006Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:59:15.443050Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:59:15.443133Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:59:15.455678Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:59:15.606886Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:59:15.607091Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:15.607297Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:59:15.607335Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:59:15.607533Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:59:15.607601Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:59:15.609806Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:15.609992Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:59:15.610146Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:15.610218Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:59:15.610295Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:59:15.610340Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:59:15.612189Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:15.612238Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:59:15.612276Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:59:15.613935Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:15.613979Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:15.614018Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:15.614062Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:59:15.617770Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:59:15.619548Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:59:15.619793Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:59:15.620702Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:15.620846Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:15.620899Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:15.621251Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:59:15.621307Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:15.621475Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:59:15.621542Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:59:15.623487Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:15.623542Z node 1 :FLAT_TX_SCHEMESHARD ... k/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:59:17.740460Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:17.740575Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:59:17.740687Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:17.740727Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:59:17.740757Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:59:17.740795Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:59:17.742042Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:17.742091Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:59:17.742125Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:59:17.743120Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:17.743151Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:17.743190Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:17.743229Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:59:17.743342Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:59:17.744291Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:59:17.744430Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:59:17.744995Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:17.745077Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 17179871342 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:17.745117Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:17.745305Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:59:17.745348Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:17.745487Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:59:17.745538Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:59:17.746691Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:17.746728Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:59:17.746860Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:17.746903Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [4:209:2209], at schemeshard: 72057594046678944, txId: 1, path id: 1 2025-06-25T14:59:17.747125Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:17.747159Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 1:0 ProgressState 2025-06-25T14:59:17.747290Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1:0 progress is 1/1 2025-06-25T14:59:17.747321Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-25T14:59:17.747352Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1:0 progress is 1/1 2025-06-25T14:59:17.747385Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-25T14:59:17.747415Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 1, ready parts: 1/1, is published: false 2025-06-25T14:59:17.747446Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-25T14:59:17.747469Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1:0 2025-06-25T14:59:17.747494Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 1:0 2025-06-25T14:59:17.747545Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-25T14:59:17.747575Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 1, publications: 1, subscribers: 0 2025-06-25T14:59:17.747601Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 1, [OwnerId: 72057594046678944, LocalPathId: 1], 3 2025-06-25T14:59:17.747959Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-06-25T14:59:17.748049Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-06-25T14:59:17.748082Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1 2025-06-25T14:59:17.748110Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 3 2025-06-25T14:59:17.748142Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:59:17.748205Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1, subscribers: 0 2025-06-25T14:59:17.750080Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1 2025-06-25T14:59:17.750391Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:17.751302Z node 4 :HTTP WARN: login_page.cpp:102: 127.0.0.1:0 POST /login 2025-06-25T14:59:17.751432Z node 4 :TX_PROXY DEBUG: proxy_impl.cpp:434: actor# [4:272:2261] Bootstrap 2025-06-25T14:59:17.767027Z node 4 :TX_PROXY DEBUG: proxy_impl.cpp:453: actor# [4:272:2261] Become StateWork (SchemeCache [4:281:2270]) 2025-06-25T14:59:17.767124Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:3780, port: 3780 2025-06-25T14:59:17.767196Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-25T14:59:17.769819Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:201: Could not perform initial LDAP bind for dn cn=robouser,dc=search,dc=yandex,dc=net on server ldap://localhost:3780. Invalid credentials 2025-06-25T14:59:17.770268Z node 4 :HTTP ERROR: login_page.cpp:209: Login fail for user1@ldap: Could not login via LDAP 2025-06-25T14:59:17.770548Z node 4 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [4:272:2261] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-25T14:59:17.772016Z node 4 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 AUDIT LOG buffer(2): 2025-06-25T14:59:17.740544Z: component=schemeshard, tx_id=1, remote_address={none}, subject={none}, sanitized_token={none}, database={none}, operation=ALTER DATABASE, paths=[//MyRoot], status=SUCCESS, detailed_status=StatusAccepted 2025-06-25T14:59:17.770045Z: component=grpc-login, remote_address=localhost, database=/MyRoot, operation=LOGIN, status=ERROR, detailed_status=UNAUTHORIZED, reason=Could not login via LDAP: Could not perform initial LDAP bind for dn cn=robouser,dc=search,dc=yandex,dc=net on server ldap://localhost:3780. Invalid credentials, login_user=user1@ldap, sanitized_token={none} AUDIT LOG checked line: 2025-06-25T14:59:17.770045Z: component=grpc-login, remote_address=localhost, database=/MyRoot, operation=LOGIN, status=ERROR, detailed_status=UNAUTHORIZED, reason=Could not login via LDAP: Could not perform initial LDAP bind for dn cn=robouser,dc=search,dc=yandex,dc=net on server ldap://localhost:3780. Invalid credentials, login_user=user1@ldap, sanitized_token={none} >> TSchemeShardLoginTest::FailedLoginWithInvalidUser [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_login/unittest >> TWebLoginService::AuditLogCreateModifyUser [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:59:15.326398Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:59:15.326472Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:15.326507Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:59:15.326538Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:59:15.326568Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:59:15.326602Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:59:15.326647Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:15.326694Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:59:15.327235Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:59:15.328492Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:59:15.410401Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:59:15.410466Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:59:15.425932Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:59:15.426263Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:59:15.426417Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:59:15.431207Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:59:15.431485Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:59:15.432109Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:15.432346Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:59:15.435063Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:15.435687Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:59:15.442608Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:15.442660Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:15.442802Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:59:15.442862Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:59:15.442962Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:59:15.443041Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:59:15.448449Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:59:15.553777Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:59:15.555032Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:15.555221Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:59:15.555265Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:59:15.556454Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:59:15.556536Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:59:15.559394Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:15.560221Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:59:15.560479Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:15.560627Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:59:15.560683Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:59:15.560723Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:59:15.562678Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:15.562731Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:59:15.562770Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:59:15.564452Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:15.564493Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:15.564552Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:15.564609Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:59:15.568788Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:59:15.570538Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:59:15.571662Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:59:15.572650Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:15.572795Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:15.572872Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:15.574314Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:59:15.574420Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:15.574589Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:59:15.574725Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:59:15.576652Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:15.576694Z node 1 :FLAT_TX_SCHEMESHARD ... peration: MODIFY USER, path: /MyRoot 2025-06-25T14:59:17.786345Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:17.786385Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 105, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:59:17.786540Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:17.786594Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [4:209:2209], at schemeshard: 72057594046678944, txId: 105, path id: 1 2025-06-25T14:59:17.787313Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 8 PathOwnerId: 72057594046678944, cookie: 105 2025-06-25T14:59:17.787415Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 8 PathOwnerId: 72057594046678944, cookie: 105 2025-06-25T14:59:17.787463Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 105 2025-06-25T14:59:17.787511Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 105, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 8 2025-06-25T14:59:17.787559Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:59:17.787650Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 105, subscribers: 0 2025-06-25T14:59:17.789094Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 105 AUDIT LOG buffer(6): 2025-06-25T14:59:17.701557Z: component=schemeshard, tx_id=1, remote_address={none}, subject={none}, sanitized_token={none}, database={none}, operation=ALTER DATABASE, paths=[//MyRoot], status=SUCCESS, detailed_status=StatusAccepted 2025-06-25T14:59:17.722345Z: component=schemeshard, tx_id=101, remote_address={none}, subject={none}, sanitized_token={none}, database=/MyRoot, operation=CREATE USER, paths=[/MyRoot], status=SUCCESS, detailed_status=StatusSuccess, login_user_level=admin, login_user=user1 2025-06-25T14:59:17.757841Z: component=schemeshard, tx_id=102, remote_address={none}, subject={none}, sanitized_token={none}, database=/MyRoot, operation=MODIFY USER, paths=[/MyRoot], status=SUCCESS, detailed_status=StatusSuccess, login_user_level=admin, login_user=user1, login_user_change=[password] 2025-06-25T14:59:17.766718Z: component=schemeshard, tx_id=103, remote_address={none}, subject={none}, sanitized_token={none}, database=/MyRoot, operation=MODIFY USER, paths=[/MyRoot], status=SUCCESS, detailed_status=StatusSuccess, login_user_level=admin, login_user=user1, login_user_change=[blocking] 2025-06-25T14:59:17.775059Z: component=schemeshard, tx_id=104, remote_address={none}, subject={none}, sanitized_token={none}, database=/MyRoot, operation=MODIFY USER, paths=[/MyRoot], status=SUCCESS, detailed_status=StatusSuccess, login_user_level=admin, login_user=user1, login_user_change=[unblocking] 2025-06-25T14:59:17.783555Z: component=schemeshard, tx_id=105, remote_address={none}, subject={none}, sanitized_token={none}, database=/MyRoot, operation=MODIFY USER, paths=[/MyRoot], status=SUCCESS, detailed_status=StatusSuccess, login_user_level=admin, login_user=user1, login_user_change=[password] AUDIT LOG checked line: 2025-06-25T14:59:17.783555Z: component=schemeshard, tx_id=105, remote_address={none}, subject={none}, sanitized_token={none}, database=/MyRoot, operation=MODIFY USER, paths=[/MyRoot], status=SUCCESS, detailed_status=StatusSuccess, login_user_level=admin, login_user=user1, login_user_change=[password] 2025-06-25T14:59:17.791882Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpAlterLogin AlterLogin { ModifyUser { User: "user1" Password: "password1" CanLogin: false } } } TxId: 106 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:59:17.798722Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 106:1, propose status:StatusSuccess, reason: , at schemeshard: 72057594046678944 2025-06-25T14:59:17.798841Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#106:0 progress is 1/1 2025-06-25T14:59:17.798880Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 106 ready parts: 1/1 2025-06-25T14:59:17.798923Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#106:0 progress is 1/1 2025-06-25T14:59:17.798967Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 106 ready parts: 1/1 2025-06-25T14:59:17.799025Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:59:17.799083Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 106, ready parts: 1/1, is published: false 2025-06-25T14:59:17.799147Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 106 ready parts: 1/1 2025-06-25T14:59:17.799187Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 106:0 2025-06-25T14:59:17.799227Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 106, publications: 1, subscribers: 0 2025-06-25T14:59:17.799262Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 106, [OwnerId: 72057594046678944, LocalPathId: 1], 9 2025-06-25T14:59:17.801511Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 106, response: Status: StatusSuccess TxId: 106 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:17.801620Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 106, database: /MyRoot, subject: , status: StatusSuccess, operation: MODIFY USER, path: /MyRoot 2025-06-25T14:59:17.801795Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:17.801837Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 106, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:59:17.801989Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:17.802044Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [4:209:2209], at schemeshard: 72057594046678944, txId: 106, path id: 1 2025-06-25T14:59:17.802541Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 9 PathOwnerId: 72057594046678944, cookie: 106 2025-06-25T14:59:17.802650Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 9 PathOwnerId: 72057594046678944, cookie: 106 2025-06-25T14:59:17.802690Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 106 2025-06-25T14:59:17.802728Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 106, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 9 2025-06-25T14:59:17.802989Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:59:17.803074Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 106, subscribers: 0 2025-06-25T14:59:17.804640Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 106 AUDIT LOG buffer(7): 2025-06-25T14:59:17.701557Z: component=schemeshard, tx_id=1, remote_address={none}, subject={none}, sanitized_token={none}, database={none}, operation=ALTER DATABASE, paths=[//MyRoot], status=SUCCESS, detailed_status=StatusAccepted 2025-06-25T14:59:17.722345Z: component=schemeshard, tx_id=101, remote_address={none}, subject={none}, sanitized_token={none}, database=/MyRoot, operation=CREATE USER, paths=[/MyRoot], status=SUCCESS, detailed_status=StatusSuccess, login_user_level=admin, login_user=user1 2025-06-25T14:59:17.757841Z: component=schemeshard, tx_id=102, remote_address={none}, subject={none}, sanitized_token={none}, database=/MyRoot, operation=MODIFY USER, paths=[/MyRoot], status=SUCCESS, detailed_status=StatusSuccess, login_user_level=admin, login_user=user1, login_user_change=[password] 2025-06-25T14:59:17.766718Z: component=schemeshard, tx_id=103, remote_address={none}, subject={none}, sanitized_token={none}, database=/MyRoot, operation=MODIFY USER, paths=[/MyRoot], status=SUCCESS, detailed_status=StatusSuccess, login_user_level=admin, login_user=user1, login_user_change=[blocking] 2025-06-25T14:59:17.775059Z: component=schemeshard, tx_id=104, remote_address={none}, subject={none}, sanitized_token={none}, database=/MyRoot, operation=MODIFY USER, paths=[/MyRoot], status=SUCCESS, detailed_status=StatusSuccess, login_user_level=admin, login_user=user1, login_user_change=[unblocking] 2025-06-25T14:59:17.783555Z: component=schemeshard, tx_id=105, remote_address={none}, subject={none}, sanitized_token={none}, database=/MyRoot, operation=MODIFY USER, paths=[/MyRoot], status=SUCCESS, detailed_status=StatusSuccess, login_user_level=admin, login_user=user1, login_user_change=[password] 2025-06-25T14:59:17.798614Z: component=schemeshard, tx_id=106, remote_address={none}, subject={none}, sanitized_token={none}, database=/MyRoot, operation=MODIFY USER, paths=[/MyRoot], status=SUCCESS, detailed_status=StatusSuccess, login_user_level=admin, login_user=user1, login_user_change=[password, blocking] AUDIT LOG checked line: 2025-06-25T14:59:17.798614Z: component=schemeshard, tx_id=106, remote_address={none}, subject={none}, sanitized_token={none}, database=/MyRoot, operation=MODIFY USER, paths=[/MyRoot], status=SUCCESS, detailed_status=StatusSuccess, login_user_level=admin, login_user=user1, login_user_change=[password, blocking] >> TSchemeShardLoginTest::RemoveGroup_Acl-StrictAclCheck-true [GOOD] >> UpsertLoad::ShouldWriteDataBulkUpsert >> UpsertLoad::ShouldWriteKqpUpsert2 >> ReadLoad::ShouldReadIterate >> TSchemeShardLoginTest::RemoveUser_Owner-StrictAclCheck-false [GOOD] >> TSchemeShardLoginTest::RemoveGroup_Owner-StrictAclCheck-true [GOOD] >> TRegisterCheckTest::ShouldRegisterCheckNewGeneration >> TCheckGenerationTest::ShouldRollbackTransactionWhenCheckFails >> TRegisterCheckTest::ShouldRegisterCheckNextGeneration >> TPQTest::TestManyConsumers [GOOD] >> TRegisterCheckTest::ShouldNotRegisterCheckPrevGeneration2 >> TFqYdbTest::ShouldStatusToIssuesProcessExceptions [GOOD] >> TKesusTest::TestQuoterAccountLabels [GOOD] >> TKesusTest::TestPassesUpdatedPropsToSession >> TWebLoginService::AuditLogLogout [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_login/unittest >> TSchemeShardLoginTest::FailedLoginWithInvalidUser [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:59:15.326395Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:59:15.326486Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:15.326532Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:59:15.326576Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:59:15.326620Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:59:15.326650Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:59:15.326710Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:15.326771Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:59:15.327583Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:59:15.328474Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:59:15.410381Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:59:15.410439Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:59:15.426064Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:59:15.426396Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:59:15.426552Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:59:15.431705Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:59:15.431975Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:59:15.432572Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:15.432783Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:59:15.435840Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:15.435995Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:59:15.442654Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:15.442720Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:15.442871Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:59:15.442916Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:59:15.442963Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:59:15.443035Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:59:15.449650Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:59:15.583910Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:59:15.584147Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:15.584339Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:59:15.584383Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:59:15.584611Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:59:15.584684Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:59:15.587547Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:15.587715Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:59:15.587854Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:15.587915Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:59:15.587980Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:59:15.588025Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:59:15.590566Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:15.590638Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:59:15.590682Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:59:15.594690Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:15.594743Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:15.594792Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:15.594846Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:59:15.597783Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:59:15.600015Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:59:15.600204Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:59:15.601078Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:15.601222Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:15.601276Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:15.601526Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:59:15.601578Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:15.601718Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:59:15.601850Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:59:15.603625Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:15.603666Z node 1 :FLAT_TX_SCHEMESHARD ... T14:59:18.172170Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-25T14:59:18.172215Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 1, publications: 1, subscribers: 0 2025-06-25T14:59:18.172257Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 1, [OwnerId: 72057594046678944, LocalPathId: 1], 3 2025-06-25T14:59:18.173179Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-06-25T14:59:18.173281Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-06-25T14:59:18.173332Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1 2025-06-25T14:59:18.173370Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 3 2025-06-25T14:59:18.173413Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:59:18.173506Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1, subscribers: 0 2025-06-25T14:59:18.175619Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1 2025-06-25T14:59:18.176055Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:18.176451Z node 5 :TX_PROXY DEBUG: proxy_impl.cpp:434: actor# [5:274:2263] Bootstrap 2025-06-25T14:59:18.198200Z node 5 :TX_PROXY DEBUG: proxy_impl.cpp:453: actor# [5:274:2263] Become StateWork (SchemeCache [5:279:2268]) 2025-06-25T14:59:18.198690Z node 5 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:59:18.198897Z node 5 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 235us result status StatusSuccess 2025-06-25T14:59:18.199317Z node 5 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:18.199928Z node 5 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [5:274:2263] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-25T14:59:18.201860Z node 5 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944 2025-06-25T14:59:18.202586Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:44: TTxLogin Execute at schemeshard: 72057594046678944 2025-06-25T14:59:18.202630Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:47: TTxLogin RotateKeys at schemeshard: 72057594046678944 2025-06-25T14:59:18.273344Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:86: TTxLogin Complete, result: Error: "Cannot find user: user1", at schemeshard: 72057594046678944 2025-06-25T14:59:18.273459Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:18.273492Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 0, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:59:18.273658Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:18.273698Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [5:211:2211], at schemeshard: 72057594046678944, txId: 0, path id: 1 2025-06-25T14:59:18.274114Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 4 PathOwnerId: 72057594046678944, cookie: 0 2025-06-25T14:59:18.274457Z node 5 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:59:18.274648Z node 5 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 215us result status StatusSuccess 2025-06-25T14:59:18.275067Z node 5 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { PublicKeys { KeyId: 1 KeyDataPEM: "-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA683GQlw4swkK1qSrNV6U\nW0wewOyBhEr1Paq5L6oc/+teq7jrFzSGuN+48F8p88Q24zqDBDJ0bJrJzx55Exry\nQuAVzaIl2/kNwzwdm3oaJ0H7xN1tlHNsfzBMbv1bL0+lOnKrduUOX1HdoNgnNaXq\ndwjsZRrhRKImd6jpVJxXNx4prTyo/BgugaMpzLxYjDy1ZoptZ/QJf+wNxKRIH+zv\nrZsCpAeGDr46KrI8yT7QH/n+ZWfUUggMkX/VYjWuwJUPj5+5D0UIv4xh2wFBFZai\nf0wVR6AcqXXkJRTlhfX+Xbcq3yqOQmyG5CRHWqxJq8mIqFZ2yX2//16X2/EXz9n8\nJwIDAQAB\n-----END PUBLIC KEY-----\n" ExpiresAt: 1750949958271 } Audience: "/MyRoot" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/join/unittest >> KqpJoinOrder::TPCDSEveryQueryWorks+ColumnStore 2025-06-25 14:59:16,196 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper execution timed out 2025-06-25 14:59:16,455 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper has overrun 600 secs timeout. Process tree before termination: pid rss ref pdirt 440437 47.6M 46.5M 24.2M test_tool run_ut @/home/runner/.ya/build/build_root/yft8/000d6d/ydb/core/kqp/ut/join/test-results/unittest/testing_out_stuff/chunk172/testing_out_stuff/test_tool.args 440490 3.1G 3.1G 2.7G └─ ydb-core-kqp-ut-join --trace-path-append /home/runner/.ya/build/build_root/yft8/000d6d/ydb/core/kqp/ut/join/test-results/unittest/testing_out_stuff/chunk172/ytest.report Test command err: Trying to start YDB, gRPC: 16255, MsgBus: 16335 2025-06-25T14:49:18.458111Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519899146345083939:2176];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:49:18.458410Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d6d/r3tmp/tmpjSVskF/pdisk_1.dat 2025-06-25T14:49:19.263249Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519899146345083801:2080] 1750862958429105 != 1750862958429108 2025-06-25T14:49:19.275208Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:49:19.284983Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:49:19.285067Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:49:19.294177Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 16255, node 1 2025-06-25T14:49:19.456372Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:49:19.572548Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:49:19.572565Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:49:19.572572Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:49:19.572666Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16335 TClient is connected to server localhost:16335 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:49:20.383064Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:49:20.418211Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T14:49:22.776804Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899163524953639:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:22.776805Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519899163524953628:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:22.776913Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:49:22.780391Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:49:22.790363Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519899163524953642:2298], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:49:22.860945Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519899163524953693:2337] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:49:23.452421Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519899146345083939:2176];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:49:23.452504Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:49:23.837944Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T14:49:24.186675Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519899167819921381:2331];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:49:24.186893Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519899167819921381:2331];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:49:24.187195Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519899167819921381:2331];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:49:24.187319Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519899167819921381:2331];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:49:24.187409Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519899167819921381:2331];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:49:24.187530Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519899167819921381:2331];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:49:24.187644Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519899167819921381:2331];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:49:24.187735Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519899167819921381:2331];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:49:24.187831Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519899167819921381:2331];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:49:24.187961Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519899167819921381:2331];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:49:24.188075Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037900;self_id=[1:7519899167819921381:2331];tablet_id=72075186224037900;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:49:24.191395Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519899172114888710:2338];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:49:24.191440Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519899172114888710:2338];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:49:24.191632Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519899172114888710:2338];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:49:24.191727Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519899172114888710:2338];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:49:24.191831Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519899172114888710:2338];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:49:24.191949Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519899172114888710:2338];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:49:24.192059Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519899172114888710:2338];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:49:24.192161Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519899172114888710:2338];tabl ... class_id\n and this_year.i_category_id = last_year.i_category_id\n order by ty_channel, ty_brand, ty_class, ty_category\n limit 100;\n\n-- end query 1 in stream 0 using template query14.tpl\n", parameters: 0b 2025-06-25T14:57:25.686668Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyksffmyabdrgrvdkkzkbvvf", SessionId: ydb://session/3?node_id=1&id=YjJhOGU1ZjAtMjBjNDhhMzgtNjU2MzUyN2YtYmFmMDMzM2M=, Slow query, duration: 16.408095s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "pragma warning(\"disable\", \"4527\");\n\n$z0 = 0;\n$z1_2 = 1.2;\n$z1_3 = 1.3;\n$z0_9 = 0.9;\n$z0_99 = 0.99;\n$z1_49 = 1.49;\n\n$z0_35 = 0;\n$z0_1_35 = 0.1;\n$z1_2_35 = 1.2;\n$z0_05_35 = 0.05;\n$z0_9_35 = 0.9;\n$z1_1_35 = 1.1;\n$z0_5_35 = 0.5;\n$z100_35 = 100.;\n$z0_0001_35 = 0.0001;\n$z7_35 = 7.;\n\n$z0_12 = 0.;\n$z1_12 = 1;\n$z0_0100001_12 = 0.0100001;\n$z0_06_12 = 0.06;\n$z0_2_12 = 0.2;\n\n$scale_factor = 1;\n\n$round = ($x, $y) -> { return Math::Round($x, $y); };\n$upscale = ($x) -> { return $x; };\n\n$todecimal = ($x, $p, $s) -> { return cast($x as double); };\n\n\n\n-- NB: Subquerys\n$orders_with_several_warehouses = (\n select cs_order_number\n from `/Root/test/ds/catalog_sales` as catalog_sales\n group by cs_order_number\n having count(distinct cs_warehouse_sk) > 1\n);\n\n-- start query 1 in stream 0 using template query16.tpl and seed 171719422\nselect\n count(distinct cs1.cs_order_number) as `order count`\n ,sum(cs_ext_ship_cost) as `total shipping cost`\n ,sum(cs_net_profit) as `total net profit`\nfrom\n `/Root/test/ds/catalog_sales` cs1\n cross join `/Root/test/ds/date_dim` as date_dim\n cross join `/Root/test/ds/customer_address` as customer_address\n cross join `/Root/test/ds/call_center` as call_center\n left semi join $orders_with_several_warehouses cs2 on cs1.cs_order_number = cs2.cs_order_number\n left only join `/Root/test/ds/catalog_returns` cr1 on cs1.cs_order_number = cr1.cr_order_number\nwhere\n cast(d_date as date) between cast('2002-2-01' as date) and\n (cast('2002-2-01' as date) + DateTime::IntervalFromDays(60))\nand cs1.cs_ship_date_sk = d_date_sk\nand cs1.cs_ship_addr_sk = ca_address_sk\nand ca_state = 'GA'\nand cs1.cs_call_center_sk = cc_call_center_sk\nand cc_county in ('Williamson County','Williamson County','Williamson County','Williamson County',\n 'Williamson County'\n)\norder by `order count`\nlimit 100;\n\n-- end query 1 in stream 0 using template query16.tpl\n", parameters: 0b 2025-06-25T14:57:44.635533Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyksfzpc0rs30jfm16z0drsr", SessionId: ydb://session/3?node_id=1&id=YjJhOGU1ZjAtMjBjNDhhMzgtNjU2MzUyN2YtYmFmMDMzM2M=, Slow query, duration: 18.926198s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "pragma warning(\"disable\", \"4527\");\n\n$z0 = 0;\n$z1_2 = 1.2;\n$z1_3 = 1.3;\n$z0_9 = 0.9;\n$z0_99 = 0.99;\n$z1_49 = 1.49;\n\n$z0_35 = 0;\n$z0_1_35 = 0.1;\n$z1_2_35 = 1.2;\n$z0_05_35 = 0.05;\n$z0_9_35 = 0.9;\n$z1_1_35 = 1.1;\n$z0_5_35 = 0.5;\n$z100_35 = 100.;\n$z0_0001_35 = 0.0001;\n$z7_35 = 7.;\n\n$z0_12 = 0.;\n$z1_12 = 1;\n$z0_0100001_12 = 0.0100001;\n$z0_06_12 = 0.06;\n$z0_2_12 = 0.2;\n\n$scale_factor = 1;\n\n$round = ($x, $y) -> { return Math::Round($x, $y); };\n$upscale = ($x) -> { return $x; };\n\n$todecimal = ($x, $p, $s) -> { return cast($x as double); };\n\n\n\n-- NB: Subquerys\n-- start query 1 in stream 0 using template query17.tpl and seed 1819994127\n$nantonull = ($n) -> {\n return case when Math::IsNaN($n)\n then null\n else $n\n end;\n};\n\nselect item.i_item_id\n ,item.i_item_desc\n ,store.s_state\n ,count(ss_quantity) as store_sales_quantitycount\n ,avg(ss_quantity) as store_sales_quantityave\n ,$nantonull(stddev_samp(ss_quantity)) as store_sales_quantitystdev\n ,$nantonull(stddev_samp(ss_quantity)/avg(ss_quantity)) as store_sales_quantitycov\n ,count(sr_return_quantity) as store_returns_quantitycount\n ,avg(sr_return_quantity) as store_returns_quantityave\n ,$nantonull(stddev_samp(sr_return_quantity)) as store_returns_quantitystdev\n ,$nantonull(stddev_samp(sr_return_quantity)/avg(sr_return_quantity)) as store_returns_quantitycov\n ,count(cs_quantity) as catalog_sales_quantitycount ,avg(cs_quantity) as catalog_sales_quantityave\n ,$nantonull(stddev_samp(cs_quantity)) as catalog_sales_quantitystdev\n ,$nantonull(stddev_samp(cs_quantity)/avg(cs_quantity)) as catalog_sales_quantitycov\n from `/Root/test/ds/store_sales` as store_sales\n cross join `/Root/test/ds/store_returns` as store_returns\n cross join `/Root/test/ds/catalog_sales` as catalog_sales\n cross join `/Root/test/ds/date_dim` d1\n cross join `/Root/test/ds/date_dim` d2\n cross join `/Root/test/ds/date_dim` d3\n cross join `/Root/test/ds/store` as store\n cross join `/Root/test/ds/item` as item\n where d1.d_quarter_name = '2001Q1'\n and d1.d_date_sk = ss_sold_date_sk\n and i_item_sk = ss_item_sk\n and s_store_sk = ss_store_sk\n and ss_customer_sk = sr_customer_sk\n and ss_item_sk = sr_item_sk\n and ss_ticket_number = sr_ticket_number\n and sr_returned_date_sk = d2.d_date_sk\n and d2.d_quarter_name in ('2001Q1','2001Q2','2001Q3')\n and sr_customer_sk = cs_bill_customer_sk\n and sr_item_sk = cs_item_sk\n and cs_sold_date_sk = d3.d_date_sk\n and d3.d_quarter_name in ('2001Q1','2001Q2','2001Q3')\n group by item.i_item_id\n ,item.i_item_desc\n ,store.s_state\n order by item.i_item_id\n ,item.i_item_desc\n ,store.s_state\nlimit 100;\n\n-- end query 1 in stream 0 using template query17.tpl\n", parameters: 0b 2025-06-25T14:58:37.403734Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyksgj6c8z5rd0x9vp0n7fhh", SessionId: ydb://session/3?node_id=1&id=YjJhOGU1ZjAtMjBjNDhhMzgtNjU2MzUyN2YtYmFmMDMzM2M=, Slow query, duration: 52.749896s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "pragma warning(\"disable\", \"4527\");\n\n$z0 = 0;\n$z1_2 = 1.2;\n$z1_3 = 1.3;\n$z0_9 = 0.9;\n$z0_99 = 0.99;\n$z1_49 = 1.49;\n\n$z0_35 = 0;\n$z0_1_35 = 0.1;\n$z1_2_35 = 1.2;\n$z0_05_35 = 0.05;\n$z0_9_35 = 0.9;\n$z1_1_35 = 1.1;\n$z0_5_35 = 0.5;\n$z100_35 = 100.;\n$z0_0001_35 = 0.0001;\n$z7_35 = 7.;\n\n$z0_12 = 0.;\n$z1_12 = 1;\n$z0_0100001_12 = 0.0100001;\n$z0_06_12 = 0.06;\n$z0_2_12 = 0.2;\n\n$scale_factor = 1;\n\n$round = ($x, $y) -> { return Math::Round($x, $y); };\n$upscale = ($x) -> { return $x; };\n\n$todecimal = ($x, $p, $s) -> { return cast($x as double); };\n\n\n\n-- TODO this commit should be reverted upon proper fix for https://github.com/ydb-platform/ydb/issues/7565\n-- NB: Subquerys\n-- start query 1 in stream 0 using template query18.tpl and seed 1978355063\nselect item.i_item_id i_item_id,\n customer_address.ca_country ca_country,\n customer_address.ca_state ca_state,\n customer_address.ca_county ca_county,\n avg( cast(cs_quantity as float)) agg1,\n avg( cast(cs_list_price as float)) agg2,\n avg( cast(cs_coupon_amt as float)) agg3,\n avg( cast(cs_sales_price as float)) agg4,\n avg( cast(cs_net_profit as float)) agg5,\n avg( cast(c_birth_year as float)) agg6,\n avg( cast(cd1.cd_dep_count as float)) agg7\n from `/Root/test/ds/catalog_sales` as catalog_sales\n cross join `/Root/test/ds/customer_demographics` cd1\n cross join `/Root/test/ds/date_dim` as date_dim\n cross join `/Root/test/ds/customer` as customer\n cross join `/Root/test/ds/customer_demographics` cd2\n cross join `/Root/test/ds/customer_address` as customer_address\n cross join `/Root/test/ds/item` as item\n where cs_sold_date_sk = d_date_sk and\n cs_item_sk = i_item_sk and\n cs_bill_cdemo_sk = cd1.cd_demo_sk and\n cs_bill_customer_sk = c_customer_sk and\n cd1.cd_gender = 'F' and\n cd1.cd_education_status = 'Unknown' and\n c_current_cdemo_sk = cd2.cd_demo_sk and\n c_current_addr_sk = ca_address_sk and\n c_birth_month in (1,6,8,9,12,2) and\n d_year = 1998 and\n ca_state in ('MS','IN','ND'\n ,'OK','NM','VA','MS')\n group by rollup (item.i_item_id, customer_address.ca_country, customer_address.ca_state, customer_address.ca_county)\n order by ca_country,\n ca_state,\n ca_county,\n\ti_item_id, agg6\n limit 100;\n\n-- end query 1 in stream 0 using template query18.tpl\n", parameters: 0b 2025-06-25T14:58:49.080531Z node 1 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyksj5rhcmzm4a18xx6kdak2", SessionId: ydb://session/3?node_id=1&id=YjJhOGU1ZjAtMjBjNDhhMzgtNjU2MzUyN2YtYmFmMDMzM2M=, Slow query, duration: 11.599147s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "pragma warning(\"disable\", \"4527\");\n\n$z0 = 0;\n$z1_2 = 1.2;\n$z1_3 = 1.3;\n$z0_9 = 0.9;\n$z0_99 = 0.99;\n$z1_49 = 1.49;\n\n$z0_35 = 0;\n$z0_1_35 = 0.1;\n$z1_2_35 = 1.2;\n$z0_05_35 = 0.05;\n$z0_9_35 = 0.9;\n$z1_1_35 = 1.1;\n$z0_5_35 = 0.5;\n$z100_35 = 100.;\n$z0_0001_35 = 0.0001;\n$z7_35 = 7.;\n\n$z0_12 = 0.;\n$z1_12 = 1;\n$z0_0100001_12 = 0.0100001;\n$z0_06_12 = 0.06;\n$z0_2_12 = 0.2;\n\n$scale_factor = 1;\n\n$round = ($x, $y) -> { return Math::Round($x, $y); };\n$upscale = ($x) -> { return $x; };\n\n$todecimal = ($x, $p, $s) -> { return cast($x as double); };\n\n\n\n-- NB: Subquerys\n-- start query 1 in stream 0 using template query19.tpl and seed 1930872976\nselect item.i_brand_id brand_id, item.i_brand brand, item.i_manufact_id, item.i_manufact,\n \tsum(ss_ext_sales_price) ext_price\n from `/Root/test/ds/date_dim` as date_dim \n cross join `/Root/test/ds/store_sales` as store_sales \n cross join `/Root/test/ds/item` as item\n cross join `/Root/test/ds/customer` as customer\n cross join `/Root/test/ds/customer_address` as customer_address\n cross join `/Root/test/ds/store` as store\n where d_date_sk = ss_sold_date_sk\n and ss_item_sk = i_item_sk\n and i_manager_id=8\n and d_moy=11\n and d_year=1998\n and ss_customer_sk = c_customer_sk\n and c_current_addr_sk = ca_address_sk\n and substring(cast(ca_zip as string),0,5) <> substring(cast(s_zip as string),0,5)\n and ss_store_sk = s_store_sk\n group by item.i_brand\n ,item.i_brand_id\n ,item.i_manufact_id\n ,item.i_manufact\n order by ext_price desc\n ,brand_id\n ,brand\n ,item.i_manufact_id\n ,item.i_manufact\nlimit 100 ;\n\n-- end query 1 in stream 0 using template query19.tpl\n", parameters: 0b Traceback (most recent call last): File "library/python/testing/yatest_common/yatest/common/process.py", line 384, in wait wait_for( File "library/python/testing/yatest_common/yatest/common/process.py", line 765, in wait_for raise TimeoutError(truncate(message, MAX_MESSAGE_LEN)) yatest.common.process.TimeoutError: 600 second(s) wait timeout has expired: Command '['/home/runner/.ya/tools/v4/9029509511/test_tool', 'run_ut', '@/home/runner/.ya/build/build_root/yft8/000d6d/ydb/core/kqp/ut/join/test-results/unittest/testing_out_stuff/chunk172/testing_out_stuff/test_tool.args']' stopped by 600 seconds timeout During handling of the above exception, another exception occurred: Traceback (most recent call last): File "devtools/ya/test/programs/test_tool/run_test/run_test.py", line 1738, in main res.wait(check_exit_code=False, timeout=run_timeout, on_timeout=timeout_callback) File "library/python/testing/yatest_common/yatest/common/process.py", line 398, in wait raise ExecutionTimeoutError(self, str(e)) yatest.common.process.ExecutionTimeoutError: (("600 second(s) wait timeout has expired: Command '['/home/runner/.ya/tools/v4/9029509511/test_tool', 'run_ut', '@/home/runner/.ya/build/build_root/yft8/000d6d/ydb/core/kqp/ut/join/test-results/unittest/testing_out_stuff/chunk172/testing_out_stuff/test_tool.args']' stopped by 600 seconds timeout",), {}) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_login/unittest >> TSchemeShardLoginTest::RemoveGroup_Acl-StrictAclCheck-true [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:59:15.337334Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:59:15.337409Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:15.337444Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:59:15.337490Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:59:15.337533Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:59:15.337552Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:59:15.337596Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:15.337669Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:59:15.338337Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:59:15.338631Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:59:15.406453Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:59:15.406527Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:59:15.421609Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:59:15.422005Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:59:15.422203Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:59:15.428198Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:59:15.428520Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:59:15.429177Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:15.429429Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:59:15.435022Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:15.435653Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:59:15.442665Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:15.442733Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:15.442887Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:59:15.442933Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:59:15.442983Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:59:15.443062Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:59:15.449607Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:59:15.572817Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:59:15.573069Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:15.573295Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:59:15.573336Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:59:15.573544Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:59:15.573596Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:59:15.575432Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:15.575583Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:59:15.575744Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:15.575822Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:59:15.575862Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:59:15.575919Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:59:15.577572Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:15.577617Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:59:15.577651Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:59:15.579153Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:15.579193Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:15.579231Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:15.579278Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:59:15.582313Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:59:15.588058Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:59:15.588224Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:59:15.588986Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:15.589102Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:15.589151Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:15.589437Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:59:15.589491Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:15.589615Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:59:15.589716Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:59:15.593518Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:15.593572Z node 1 :FLAT_TX_SCHEMESHARD ... : schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 105, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 8 2025-06-25T14:59:18.350745Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-25T14:59:18.350819Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 105, subscribers: 0 2025-06-25T14:59:18.353181Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 105 2025-06-25T14:59:18.353336Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 105 TestModificationResult got TxId: 105, wait until txId: 105 2025-06-25T14:59:18.353764Z node 5 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Dir1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:59:18.353919Z node 5 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Dir1" took 186us result status StatusSuccess 2025-06-25T14:59:18.354256Z node 5 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Dir1" PathDescription { Self { Name: "Dir1" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 102 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 2 EffectiveACLVersion: 2 UserAttrsVersion: 1 ChildrenVersion: 2 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 TestModificationResults wait txId: 106 2025-06-25T14:59:18.357057Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpAlterLogin AlterLogin { RemoveGroup { Group: "group1" } } } TxId: 106 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:59:18.357252Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5302: ExamineTreeVFS visit path id [OwnerId: 72057594046678944, LocalPathId: 1] name: MyRoot type: EPathTypeDir state: EPathStateNoChanges stepDropped: 0 droppedTxId: 0 parent: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:59:18.357291Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5318: ExamineTreeVFS run path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:59:18.357337Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5302: ExamineTreeVFS visit path id [OwnerId: 72057594046678944, LocalPathId: 2] name: Dir1 type: EPathTypeDir state: EPathStateNoChanges stepDropped: 0 droppedTxId: 0 parent: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:59:18.357370Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5318: ExamineTreeVFS run path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-25T14:59:18.357721Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 106:1, propose status:StatusSuccess, reason: , at schemeshard: 72057594046678944 2025-06-25T14:59:18.357817Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#106:0 progress is 1/1 2025-06-25T14:59:18.357855Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 106 ready parts: 1/1 2025-06-25T14:59:18.357900Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#106:0 progress is 1/1 2025-06-25T14:59:18.357937Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 106 ready parts: 1/1 2025-06-25T14:59:18.358085Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:59:18.358146Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 106, ready parts: 1/1, is published: false 2025-06-25T14:59:18.358184Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 106 ready parts: 1/1 2025-06-25T14:59:18.358220Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 106:0 2025-06-25T14:59:18.358261Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 106, publications: 1, subscribers: 0 2025-06-25T14:59:18.358301Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 106, [OwnerId: 72057594046678944, LocalPathId: 1], 9 2025-06-25T14:59:18.360050Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 106, response: Status: StatusSuccess TxId: 106 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:18.360148Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 106, database: /MyRoot, subject: , status: StatusSuccess, operation: REMOVE GROUP, path: /MyRoot 2025-06-25T14:59:18.360346Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:18.360391Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 106, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:59:18.360547Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:18.360594Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [5:211:2211], at schemeshard: 72057594046678944, txId: 106, path id: 1 2025-06-25T14:59:18.361062Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 9 PathOwnerId: 72057594046678944, cookie: 106 2025-06-25T14:59:18.361160Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 9 PathOwnerId: 72057594046678944, cookie: 106 2025-06-25T14:59:18.361224Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 106 2025-06-25T14:59:18.361265Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 106, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 9 2025-06-25T14:59:18.361310Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-25T14:59:18.361425Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 106, subscribers: 0 2025-06-25T14:59:18.362951Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 106 TestModificationResult got TxId: 106, wait until txId: 106 2025-06-25T14:59:18.363476Z node 5 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:59:18.363644Z node 5 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 197us result status StatusSuccess 2025-06-25T14:59:18.364069Z node 5 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 9 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 9 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 2 } ChildrenExist: true } Children { Name: "Dir1" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 102 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_login/unittest >> TSchemeShardLoginTest::RemoveGroup_Owner-StrictAclCheck-true [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:59:15.327890Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:59:15.327965Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:15.328015Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:59:15.328069Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:59:15.328108Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:59:15.328139Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:59:15.328194Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:15.328250Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:59:15.329045Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:59:15.329360Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:59:15.415373Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:59:15.415435Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:59:15.430732Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:59:15.431055Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:59:15.431202Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:59:15.436297Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:59:15.436612Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:59:15.437221Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:15.437470Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:59:15.440402Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:15.440577Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:59:15.442645Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:15.442702Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:15.442862Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:59:15.442908Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:59:15.442952Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:59:15.443025Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:59:15.449409Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:59:15.582322Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:59:15.582534Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:15.582724Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:59:15.582759Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:59:15.582954Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:59:15.583093Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:59:15.585107Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:15.585292Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:59:15.585434Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:15.585500Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:59:15.585554Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:59:15.585596Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:59:15.587523Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:15.587576Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:59:15.587623Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:59:15.589219Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:15.589260Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:15.589299Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:15.589347Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:59:15.592918Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:59:15.594629Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:59:15.594812Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:59:15.595675Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:15.595801Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:15.595856Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:15.596121Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:59:15.596175Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:15.596340Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:59:15.596477Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:59:15.598299Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:15.598343Z node 1 :FLAT_TX_SCHEMESHARD ... : schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 105, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 8 2025-06-25T14:59:18.487289Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-25T14:59:18.487393Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 105, subscribers: 0 2025-06-25T14:59:18.490539Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 105 2025-06-25T14:59:18.490760Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 105 TestModificationResult got TxId: 105, wait until txId: 105 2025-06-25T14:59:18.491334Z node 5 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Dir1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:59:18.491553Z node 5 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Dir1" took 260us result status StatusSuccess 2025-06-25T14:59:18.491996Z node 5 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Dir1" PathDescription { Self { Name: "Dir1" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 102 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 4 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 TestModificationResults wait txId: 106 2025-06-25T14:59:18.495378Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpAlterLogin AlterLogin { RemoveGroup { Group: "group1" } } } TxId: 106 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:59:18.495595Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5302: ExamineTreeVFS visit path id [OwnerId: 72057594046678944, LocalPathId: 1] name: MyRoot type: EPathTypeDir state: EPathStateNoChanges stepDropped: 0 droppedTxId: 0 parent: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:59:18.495641Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5318: ExamineTreeVFS run path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:59:18.495696Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5302: ExamineTreeVFS visit path id [OwnerId: 72057594046678944, LocalPathId: 2] name: Dir1 type: EPathTypeDir state: EPathStateNoChanges stepDropped: 0 droppedTxId: 0 parent: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:59:18.495734Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5318: ExamineTreeVFS run path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-25T14:59:18.496058Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 106:1, propose status:StatusSuccess, reason: , at schemeshard: 72057594046678944 2025-06-25T14:59:18.496205Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#106:0 progress is 1/1 2025-06-25T14:59:18.496254Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 106 ready parts: 1/1 2025-06-25T14:59:18.496301Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#106:0 progress is 1/1 2025-06-25T14:59:18.496365Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 106 ready parts: 1/1 2025-06-25T14:59:18.496433Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:59:18.496503Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 106, ready parts: 1/1, is published: false 2025-06-25T14:59:18.496557Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 106 ready parts: 1/1 2025-06-25T14:59:18.496606Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 106:0 2025-06-25T14:59:18.496652Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 106, publications: 1, subscribers: 0 2025-06-25T14:59:18.496702Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 106, [OwnerId: 72057594046678944, LocalPathId: 1], 9 2025-06-25T14:59:18.499301Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 106, response: Status: StatusSuccess TxId: 106 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:18.499428Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 106, database: /MyRoot, subject: , status: StatusSuccess, operation: REMOVE GROUP, path: /MyRoot 2025-06-25T14:59:18.499651Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:18.499711Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 106, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:59:18.499904Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:18.499965Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [5:211:2211], at schemeshard: 72057594046678944, txId: 106, path id: 1 2025-06-25T14:59:18.500596Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 9 PathOwnerId: 72057594046678944, cookie: 106 2025-06-25T14:59:18.500722Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 9 PathOwnerId: 72057594046678944, cookie: 106 2025-06-25T14:59:18.500772Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 106 2025-06-25T14:59:18.500822Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 106, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 9 2025-06-25T14:59:18.500872Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-25T14:59:18.501003Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 106, subscribers: 0 2025-06-25T14:59:18.502994Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 106 TestModificationResult got TxId: 106, wait until txId: 106 2025-06-25T14:59:18.503599Z node 5 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:59:18.503805Z node 5 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 235us result status StatusSuccess 2025-06-25T14:59:18.504341Z node 5 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 9 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 9 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 2 } ChildrenExist: true } Children { Name: "Dir1" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 102 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_login/unittest >> TSchemeShardLoginTest::RemoveUser_Owner-StrictAclCheck-false [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:59:15.327642Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:59:15.327754Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:15.327807Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:59:15.327859Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:59:15.327908Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:59:15.327938Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:59:15.328004Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:15.328081Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:59:15.328916Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:59:15.329270Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:59:15.415624Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:59:15.415692Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:59:15.432904Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:59:15.433296Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:59:15.433460Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:59:15.439420Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:59:15.439732Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:59:15.440448Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:15.440710Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:59:15.444177Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:15.444386Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:59:15.445514Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:15.445588Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:15.445752Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:59:15.445802Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:59:15.445867Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:59:15.445971Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:59:15.452391Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:59:15.564522Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:59:15.564766Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:15.564986Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:59:15.565035Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:59:15.565350Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:59:15.565451Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:59:15.567751Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:15.567937Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:59:15.568091Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:15.568144Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:59:15.568188Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:59:15.568224Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:59:15.569662Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:15.569709Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:59:15.569742Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:59:15.571075Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:15.571108Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:15.571138Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:15.571176Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:59:15.573993Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:59:15.575813Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:59:15.576031Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:59:15.576962Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:15.577068Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:15.577117Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:15.577324Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:59:15.577385Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:15.577495Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:59:15.577630Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:59:15.579217Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:15.579251Z node 1 :FLAT_TX_SCHEMESHARD ... 06-25T14:59:18.524401Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:18.524437Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 105, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-25T14:59:18.524558Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 105, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-25T14:59:18.524646Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:18.524686Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [5:211:2211], at schemeshard: 72057594046678944, txId: 105, path id: 3 2025-06-25T14:59:18.524727Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [5:211:2211], at schemeshard: 72057594046678944, txId: 105, path id: 2 2025-06-25T14:59:18.525310Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 5 PathOwnerId: 72057594046678944, cookie: 105 2025-06-25T14:59:18.525416Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 5 PathOwnerId: 72057594046678944, cookie: 105 2025-06-25T14:59:18.525459Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 105 2025-06-25T14:59:18.525504Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 105, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 5 2025-06-25T14:59:18.525557Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-25T14:59:18.525934Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 7 PathOwnerId: 72057594046678944, cookie: 105 2025-06-25T14:59:18.526008Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 7 PathOwnerId: 72057594046678944, cookie: 105 2025-06-25T14:59:18.526039Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 105 2025-06-25T14:59:18.526067Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 105, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 7 2025-06-25T14:59:18.526097Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-25T14:59:18.526157Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 105, subscribers: 0 2025-06-25T14:59:18.528683Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 105 2025-06-25T14:59:18.528982Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 105 TestModificationResult got TxId: 105, wait until txId: 105 TestModificationResults wait txId: 106 2025-06-25T14:59:18.531873Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpAlterLogin AlterLogin { RemoveUser { User: "user1" } } } TxId: 106 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:59:18.532263Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 106:1, propose status:StatusSuccess, reason: , at schemeshard: 72057594046678944 2025-06-25T14:59:18.532381Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#106:0 progress is 1/1 2025-06-25T14:59:18.532433Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 106 ready parts: 1/1 2025-06-25T14:59:18.532486Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#106:0 progress is 1/1 2025-06-25T14:59:18.532526Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 106 ready parts: 1/1 2025-06-25T14:59:18.532577Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:59:18.532631Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 106, ready parts: 1/1, is published: false 2025-06-25T14:59:18.532664Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 106 ready parts: 1/1 2025-06-25T14:59:18.532697Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 106:0 2025-06-25T14:59:18.532732Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 106, publications: 1, subscribers: 0 2025-06-25T14:59:18.532765Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 106, [OwnerId: 72057594046678944, LocalPathId: 1], 10 2025-06-25T14:59:18.534524Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 106, response: Status: StatusSuccess TxId: 106 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:18.534616Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 106, database: /MyRoot, subject: , status: StatusSuccess, operation: REMOVE USER, path: /MyRoot 2025-06-25T14:59:18.534766Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:18.534802Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 106, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:59:18.534951Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:18.534999Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [5:211:2211], at schemeshard: 72057594046678944, txId: 106, path id: 1 2025-06-25T14:59:18.535422Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 10 PathOwnerId: 72057594046678944, cookie: 106 2025-06-25T14:59:18.535509Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 10 PathOwnerId: 72057594046678944, cookie: 106 2025-06-25T14:59:18.535546Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 106 2025-06-25T14:59:18.535580Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 106, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 10 2025-06-25T14:59:18.535616Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-25T14:59:18.535694Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 106, subscribers: 0 2025-06-25T14:59:18.537089Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 106 TestModificationResult got TxId: 106, wait until txId: 106 2025-06-25T14:59:18.537527Z node 5 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Dir1/DirSub1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:59:18.537702Z node 5 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Dir1/DirSub1" took 187us result status StatusSuccess 2025-06-25T14:59:18.538035Z node 5 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Dir1/DirSub1" PathDescription { Self { Name: "DirSub1" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 103 CreateStep: 5000002 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "user2" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 4 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:18.538564Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:44: TTxLogin Execute at schemeshard: 72057594046678944 2025-06-25T14:59:18.538667Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:86: TTxLogin Complete, result: Error: "Cannot find user: user1", at schemeshard: 72057594046678944 >> TKesusTest::TestPassesUpdatedPropsToSession [GOOD] >> TFqYdbTest::ShouldStatusToIssuesProcessEmptyIssues [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_login/unittest >> TWebLoginService::AuditLogLogout [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:59:16.450206Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:59:16.450282Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:16.450320Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:59:16.450358Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:59:16.450398Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:59:16.450437Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:59:16.450511Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:16.450569Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:59:16.451117Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:59:16.451396Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:59:16.513220Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:59:16.513296Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:59:16.527647Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:59:16.527988Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:59:16.528123Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:59:16.532543Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:59:16.532806Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:59:16.533228Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:16.533450Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:59:16.536037Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:16.536211Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:59:16.537041Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:16.537097Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:16.537236Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:59:16.537270Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:59:16.537299Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:59:16.537369Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:59:16.542874Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:59:16.651309Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:59:16.651475Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:16.651624Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:59:16.651664Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:59:16.651875Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:59:16.651948Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:59:16.654047Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:16.654190Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:59:16.654319Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:16.654361Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:59:16.654395Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:59:16.654424Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:59:16.655887Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:16.655927Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:59:16.655950Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:59:16.657282Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:16.657313Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:16.657342Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:16.657370Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:59:16.659974Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:59:16.661240Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:59:16.661395Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:59:16.661956Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:16.662046Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:16.662088Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:16.662395Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:59:16.662431Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:16.662531Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:59:16.662571Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:59:16.663840Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:16.663870Z node 1 :FLAT_TX_SCHEMESHARD ... node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-25T14:59:18.725520Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#101:0 progress is 1/1 2025-06-25T14:59:18.725552Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-25T14:59:18.725597Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:59:18.725646Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 101, ready parts: 1/1, is published: false 2025-06-25T14:59:18.725678Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-25T14:59:18.725711Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 101:0 2025-06-25T14:59:18.725746Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 101, publications: 1, subscribers: 0 2025-06-25T14:59:18.725775Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 1], 4 2025-06-25T14:59:18.726517Z node 4 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [4:272:2261] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-25T14:59:18.729054Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 101, response: Status: StatusSuccess TxId: 101 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:18.729150Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 101, database: /MyRoot, subject: , status: StatusSuccess, operation: CREATE USER, path: /MyRoot 2025-06-25T14:59:18.729319Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:18.729363Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 101, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:59:18.729509Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:18.729551Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [4:209:2209], at schemeshard: 72057594046678944, txId: 101, path id: 1 2025-06-25T14:59:18.730007Z node 4 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 2025-06-25T14:59:18.730112Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 4 PathOwnerId: 72057594046678944, cookie: 101 2025-06-25T14:59:18.730192Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 4 PathOwnerId: 72057594046678944, cookie: 101 2025-06-25T14:59:18.730230Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 101 2025-06-25T14:59:18.730268Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 4 2025-06-25T14:59:18.730305Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:59:18.730383Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 101, subscribers: 0 2025-06-25T14:59:18.731474Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 TestModificationResult got TxId: 101, wait until txId: 101 2025-06-25T14:59:18.731805Z node 4 :HTTP WARN: login_page.cpp:102: 127.0.0.1:0 POST /login 2025-06-25T14:59:18.733171Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:44: TTxLogin Execute at schemeshard: 72057594046678944 2025-06-25T14:59:18.733209Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:47: TTxLogin RotateKeys at schemeshard: 72057594046678944 2025-06-25T14:59:18.793384Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:86: TTxLogin Complete, result: Token: "eyJhbGciOiJQUzI1NiIsImtpZCI6IjEifQ.eyJhdWQiOlsiXC9NeVJvb3QiXSwiZXhwIjoxNzUwOTA2NzU4LCJpYXQiOjE3NTA4NjM1NTgsInN1YiI6InVzZXIxIn0.TiwxMTuQzts7oVK0WRj_6cDZh-z__oQu4hnFhuUoQ3m8z3yaT6O9taf81rDm8zkz_p7NFoKhcsZHJBToCkfSiAB_U7hYpQhqzxIvnPnO6vL-zGmQuWlfpOkQJr7b4SjBO2tWhNHVc2585YWBlUXOniNabQGEszbqjMvzlsW2KI4pDdx2KGKgHVCwKAiXOufpmJmxUGgM9VuYeOOAjY9T9WxnkWPvND6G8oMMhZyJp8BqiozEq8kf8Brke7OsiaiM0eoQwKk8NnhF5kOo0Cml0xRxczIy03JodZw6dIbjkAjD8SMszWishN33_AtdU3tQfBpJI6TgSsbu25fCbdFbQQ" SanitizedToken: "eyJhbGciOiJQUzI1NiIsImtpZCI6IjEifQ.eyJhdWQiOlsiXC9NeVJvb3QiXSwiZXhwIjoxNzUwOTA2NzU4LCJpYXQiOjE3NTA4NjM1NTgsInN1YiI6InVzZXIxIn0.**" IsAdmin: true, at schemeshard: 72057594046678944 2025-06-25T14:59:18.793665Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:18.793702Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 0, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:59:18.793862Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:18.793904Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [4:209:2209], at schemeshard: 72057594046678944, txId: 0, path id: 1 2025-06-25T14:59:18.794791Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 0 2025-06-25T14:59:18.795353Z node 4 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:59:18.795503Z node 4 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 163us result status StatusSuccess 2025-06-25T14:59:18.795869Z node 4 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 2 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { PublicKeys { KeyId: 1 KeyDataPEM: "-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA7LcwsrFEsxDvqYhOKNh7\n0FZ8XspNwR26JoTA0G32IaB8IqhIkNAMVee0ZGNFBehdnuPsoM0sqRgQhxEYwqjE\nrh0LPDl4XvmsPXaiOzvG1tIWj/bOTvySIKsYV4aHyYxVeaJ744hoqhlWDvLsPrx9\ncu9Oyf92ZRB+fvJdCba+ArmXVSoSe31j4N4LBkhG1+CGi2CaX2p6fpcWafuBWOOn\nY/UL5nxX0kmLttIiNkBuKnk4FoB6rprXsn26KKgZI0Sk3qmHAOrYWPQegnU00Lp1\n8wOcJ1nISeytF0g/c+tYCguly9df6ZxQFfj3HH4SMj9DyreIiJhxeocR+xsssKsn\n8wIDAQAB\n-----END PUBLIC KEY-----\n" ExpiresAt: 1750949958785 } Sids { Name: "user1" Type: USER } Audience: "/MyRoot" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:18.797357Z node 4 :HTTP WARN: login_page.cpp:248: 127.0.0.1:0 POST /logout 2025-06-25T14:59:18.797418Z node 4 :HTTP ERROR: login_page.cpp:326: Logout: No ydb_session_id cookie 2025-06-25T14:59:18.797790Z node 4 :HTTP WARN: login_page.cpp:248: 127.0.0.1:0 POST /logout 2025-06-25T14:59:18.805494Z node 4 :TICKET_PARSER ERROR: ticket_parser_impl.h:963: Ticket **** (589A015B): Token is not in correct format 2025-06-25T14:59:18.805615Z node 4 :HTTP ERROR: login_page.cpp:326: Logout: Token is not in correct format 2025-06-25T14:59:18.806142Z node 4 :HTTP WARN: login_page.cpp:248: 127.0.0.1:0 POST /logout AUDIT LOG buffer(4): 2025-06-25T14:59:18.685546Z: component=schemeshard, tx_id=1, remote_address={none}, subject={none}, sanitized_token={none}, database={none}, operation=ALTER DATABASE, paths=[//MyRoot], status=SUCCESS, detailed_status=StatusAccepted 2025-06-25T14:59:18.725214Z: component=schemeshard, tx_id=101, remote_address={none}, subject={none}, sanitized_token={none}, database=/MyRoot, operation=CREATE USER, paths=[/MyRoot], status=SUCCESS, detailed_status=StatusSuccess, login_user_level=admin, login_user=user1 2025-06-25T14:59:18.793532Z: component=grpc-login, remote_address=localhost, database=/MyRoot, operation=LOGIN, status=SUCCESS, login_user=user1, sanitized_token=eyJhbGciOiJQUzI1NiIsImtpZCI6IjEifQ.eyJhdWQiOlsiXC9NeVJvb3QiXSwiZXhwIjoxNzUwOTA2NzU4LCJpYXQiOjE3NTA4NjM1NTgsInN1YiI6InVzZXIxIn0.**, login_user_level=admin 2025-06-25T14:59:18.807541Z: component=web-login, remote_address=127.0.0.1, subject=user1, sanitized_token=eyJhbGciOiJQUzI1NiIsImtpZCI6IjEifQ.eyJhdWQiOlsiXC9NeVJvb3QiXSwiZXhwIjoxNzUwOTA2NzU4LCJpYXQiOjE3NTA4NjM1NTgsInN1YiI6InVzZXIxIn0.**, operation=LOGOUT, status=SUCCESS AUDIT LOG checked line: 2025-06-25T14:59:18.807541Z: component=web-login, remote_address=127.0.0.1, subject=user1, sanitized_token=eyJhbGciOiJQUzI1NiIsImtpZCI6IjEifQ.eyJhdWQiOlsiXC9NeVJvb3QiXSwiZXhwIjoxNzUwOTA2NzU4LCJpYXQiOjE3NTA4NjM1NTgsInN1YiI6InVzZXIxIn0.**, operation=LOGOUT, status=SUCCESS >> KqpStreamLookup::ReadTableDuringSplit >> TRegisterCheckTest::ShouldRegisterCheckNewGeneration [GOOD] >> TRegisterCheckTest::ShouldRegisterCheckNextGeneration [GOOD] >> KqpStreamLookup::ReadTableWithIndexDuringSplit >> TRegisterCheckTest::ShouldNotRegisterCheckPrevGeneration2 [GOOD] >> TCheckGenerationTest::ShouldRollbackTransactionWhenCheckFails [GOOD] >> TCheckGenerationTest::ShouldRollbackTransactionWhenCheckFails2 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kesus/tablet/ut/unittest >> TKesusTest::TestPassesUpdatedPropsToSession [GOOD] Test command err: 2025-06-25T14:59:10.243206Z node 1 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-25T14:59:10.243312Z node 1 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-25T14:59:10.263162Z node 1 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-25T14:59:10.263265Z node 1 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-25T14:59:10.289056Z node 1 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-25T14:59:10.299450Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:36: [72057594037927937] TTxQuoterResourceAdd::Execute (sender=[1:136:2160], cookie=4420764214714422942, path="/Root", config={ MaxUnitsPerSecond: 300 PrefetchCoefficient: 1 }) 2025-06-25T14:59:10.300261Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:76: [72057594037927937] Created new quoter resource 1 "Root" 2025-06-25T14:59:10.312144Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:85: [72057594037927937] TTxQuoterResourceAdd::Complete (sender=[1:136:2160], cookie=4420764214714422942) 2025-06-25T14:59:10.312723Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:36: [72057594037927937] TTxQuoterResourceAdd::Execute (sender=[1:146:2168], cookie=2987639832497281305, path="/Root/Res", config={ }) 2025-06-25T14:59:10.312932Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:76: [72057594037927937] Created new quoter resource 2 "Root/Res" 2025-06-25T14:59:10.324796Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:85: [72057594037927937] TTxQuoterResourceAdd::Complete (sender=[1:146:2168], cookie=2987639832497281305) 2025-06-25T14:59:10.329050Z node 1 :KESUS_TABLET TRACE: quoter_runtime.cpp:145: [72057594037927937] Send TEvSubscribeOnResourcesResult to [1:151:2173]. Cookie: 12604777436692232365. Data: { Results { ResourceId: 2 Error { Status: SUCCESS } EffectiveProps { ResourceId: 2 ResourcePath: "Root/Res" HierarchicalDRRResourceConfig { MaxUnitsPerSecond: 300 MaxBurstSizeCoefficient: 1 Weight: 1 PrefetchCoefficient: 1 } AccountingConfig { ReportPeriodMs: 1000 AccountPeriodMs: 1000 CollectPeriodSec: 2 ProvisionedCoefficient: 1 OvershootCoefficient: 1 Provisioned { BillingPeriodSec: 2 } OnDemand { BillingPeriodSec: 2 } Overshoot { BillingPeriodSec: 2 } } } } ProtocolVersion: 1 } 2025-06-25T14:59:10.329127Z node 1 :KESUS_TABLET DEBUG: quoter_runtime.cpp:150: [72057594037927937] Subscribe on quoter resources (sender=[1:151:2173], cookie=12604777436692232365) 2025-06-25T14:59:10.329698Z node 1 :KESUS_TABLET TRACE: quoter_runtime.cpp:224: [72057594037927937] Send TEvAccountResourcesAck to [1:151:2173]. Cookie: 9277700536425516158. Data: { ResourcesInfo { ResourceId: 2 AcceptedUs: 29000 } } 2025-06-25T14:59:10.329767Z node 1 :KESUS_TABLET DEBUG: quoter_runtime.cpp:229: [72057594037927937] Account quoter resources (sender=[1:151:2173], cookie=9277700536425516158) 2025-06-25T14:59:12.539311Z node 2 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-25T14:59:12.539399Z node 2 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-25T14:59:12.554841Z node 2 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-25T14:59:12.555353Z node 2 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-25T14:59:12.578776Z node 2 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-25T14:59:12.579146Z node 2 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:36: [72057594037927937] TTxQuoterResourceAdd::Execute (sender=[2:136:2160], cookie=9848478966398711084, path="/Root", config={ MaxUnitsPerSecond: 300 PrefetchCoefficient: 1 }) 2025-06-25T14:59:12.579418Z node 2 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:76: [72057594037927937] Created new quoter resource 1 "Root" 2025-06-25T14:59:12.591130Z node 2 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:85: [72057594037927937] TTxQuoterResourceAdd::Complete (sender=[2:136:2160], cookie=9848478966398711084) 2025-06-25T14:59:12.591908Z node 2 :KESUS_TABLET TRACE: quoter_runtime.cpp:145: [72057594037927937] Send TEvSubscribeOnResourcesResult to [2:146:2168]. Cookie: 11929409861186345678. Data: { Results { ResourceId: 1 Error { Status: SUCCESS } EffectiveProps { ResourceId: 1 ResourcePath: "Root" HierarchicalDRRResourceConfig { MaxUnitsPerSecond: 300 MaxBurstSizeCoefficient: 1 Weight: 1 PrefetchCoefficient: 1 } AccountingConfig { Enabled: true ReportPeriodMs: 1000 AccountPeriodMs: 1000 CollectPeriodSec: 2 ProvisionedUnitsPerSecond: 100 ProvisionedCoefficient: 1 OvershootCoefficient: 1 Provisioned { Enabled: true BillingPeriodSec: 2 } OnDemand { Enabled: true BillingPeriodSec: 2 } Overshoot { Enabled: true BillingPeriodSec: 2 } } } } ProtocolVersion: 1 } 2025-06-25T14:59:12.591962Z node 2 :KESUS_TABLET DEBUG: quoter_runtime.cpp:150: [72057594037927937] Subscribe on quoter resources (sender=[2:146:2168], cookie=11929409861186345678) 2025-06-25T14:59:12.592487Z node 2 :KESUS_TABLET TRACE: quoter_runtime.cpp:145: [72057594037927937] Send TEvSubscribeOnResourcesResult to [2:146:2168]. Cookie: 6527285580630285981. Data: { Results { ResourceId: 1 Error { Status: SUCCESS } EffectiveProps { ResourceId: 1 ResourcePath: "Root" HierarchicalDRRResourceConfig { MaxUnitsPerSecond: 300 MaxBurstSizeCoefficient: 1 Weight: 1 PrefetchCoefficient: 1 } AccountingConfig { Enabled: true ReportPeriodMs: 1000 AccountPeriodMs: 1000 CollectPeriodSec: 2 ProvisionedUnitsPerSecond: 100 ProvisionedCoefficient: 1 OvershootCoefficient: 1 Provisioned { Enabled: true BillingPeriodSec: 2 } OnDemand { Enabled: true BillingPeriodSec: 2 } Overshoot { Enabled: true BillingPeriodSec: 2 } } } } ProtocolVersion: 1 } 2025-06-25T14:59:12.592556Z node 2 :KESUS_TABLET DEBUG: quoter_runtime.cpp:150: [72057594037927937] Subscribe on quoter resources (sender=[2:146:2168], cookie=6527285580630285981) 2025-06-25T14:59:12.593009Z node 2 :KESUS_TABLET TRACE: quoter_runtime.cpp:224: [72057594037927937] Send TEvAccountResourcesAck to [2:146:2168]. Cookie: 13998047445171656892. Data: { ResourcesInfo { ResourceId: 1 AcceptedUs: 1017500 } } 2025-06-25T14:59:12.593051Z node 2 :KESUS_TABLET DEBUG: quoter_runtime.cpp:229: [72057594037927937] Account quoter resources (sender=[2:146:2168], cookie=13998047445171656892) 2025-06-25T14:59:12.593464Z node 2 :KESUS_TABLET TRACE: quoter_runtime.cpp:224: [72057594037927937] Send TEvAccountResourcesAck to [2:146:2168]. Cookie: 10862653805537265566. Data: { ResourcesInfo { ResourceId: 1 AcceptedUs: 1017500 } } 2025-06-25T14:59:12.593510Z node 2 :KESUS_TABLET DEBUG: quoter_runtime.cpp:229: [72057594037927937] Account quoter resources (sender=[2:146:2168], cookie=10862653805537265566) 2025-06-25T14:59:14.803172Z node 3 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-25T14:59:14.803301Z node 3 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-25T14:59:14.820446Z node 3 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-25T14:59:14.821026Z node 3 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-25T14:59:14.844809Z node 3 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-25T14:59:14.845207Z node 3 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:36: [72057594037927937] TTxQuoterResourceAdd::Execute (sender=[3:136:2160], cookie=4400591820939432874, path="/Root", config={ MaxUnitsPerSecond: 300 PrefetchCoefficient: 1 }) 2025-06-25T14:59:14.845531Z node 3 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:76: [72057594037927937] Created new quoter resource 1 "Root" 2025-06-25T14:59:14.857340Z node 3 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:85: [72057594037927937] TTxQuoterResourceAdd::Complete (sender=[3:136:2160], cookie=4400591820939432874) 2025-06-25T14:59:14.857923Z node 3 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:36: [72057594037927937] TTxQuoterResourceAdd::Execute (sender=[3:146:2168], cookie=12337430964763118669, path="/Root/Res1", config={ }) 2025-06-25T14:59:14.858129Z node 3 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:76: [72057594037927937] Created new quoter resource 2 "Root/Res1" 2025-06-25T14:59:14.870102Z node 3 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:85: [72057594037927937] TTxQuoterResourceAdd::Complete (sender=[3:146:2168], cookie=12337430964763118669) 2025-06-25T14:59:14.870650Z node 3 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:36: [72057594037927937] TTxQuoterResourceAdd::Execute (sender=[3:151:2173], cookie=11836072575865714241, path="/Root/Res2", config={ }) 2025-06-25T14:59:14.870860Z node 3 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:76: [72057594037927937] Created new quoter resource 3 "Root/Res2" 2025-06-25T14:59:14.886261Z node 3 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:85: [72057594037927937] TTxQuoterResourceAdd::Complete (sender=[3:151:2173], cookie=11836072575865714241) 2025-06-25T14:59:14.886993Z node 3 :KESUS_TABLET TRACE: quoter_runtime.cpp:145: [72057594037927937] Send TEvSubscribeOnResourcesResult to [3:156:2178]. Cookie: 16841572113155396048. Data: { Results { ResourceId: 2 Error { Status: SUCCESS } EffectiveProps { ResourceId: 2 ResourcePath: "Root/Res1" HierarchicalDRRResourceConfig { MaxUnitsPerSecond: 300 MaxBurstSizeCoefficient: 1 Weight: 1 PrefetchCoefficient: 1 } AccountingConfig { ReportPeriodMs: 1000 AccountPeriodMs: 1000 CollectPeriodSec: 2 ProvisionedCoefficient: 1 OvershootCoefficient: 1 Provisioned { BillingPeriodSec: 2 } OnDemand { BillingPeriodSec: 2 } Overshoot { BillingPeriodSec: 2 } } } } ProtocolVersion: 1 } 2025-06-25T14:59:14.887044Z node 3 :KESUS_TABLET DEBUG: quoter_runtime.cpp:150: [72057594037927937] Subscribe on quoter resources (sender=[3:156:2178], cookie=16841572113155396048) 2025-06-25T14:59:14.887575Z node 3 :KESUS_TABLET TRACE: quoter_runtime.cpp:145: [72057594037927937] Send TEvSubscribeOnResourcesResult to [3:156:2178]. Cookie: 17283951981371914587. Data: { Results { ResourceId: 3 Error { Status: SUCCESS } EffectiveProps { ResourceId: 3 ResourcePath: "Root/Res2" HierarchicalDRRResourceConfig { MaxUnitsPerSecond: 300 MaxBurstSizeCoefficient: 1 Weight: 1 PrefetchCoefficient: 1 } AccountingConfig { ReportPeriodMs: 1000 AccountPeriodMs: 1000 CollectPeriodSec: 2 ProvisionedCoefficient: 1 OvershootCoefficient: 1 Provisioned { BillingPeriodSec: 2 } OnDemand { BillingPeriodSec: 2 } Overshoot { BillingPeriodSec: 2 } } } } ProtocolVersion: 1 } 2025-06-25T14:59:14.887621Z node 3 :KESUS_TABLET DEBUG: quoter_runtime.cpp:150: [72057594037927937] Subscribe on quoter resources (sender=[3:156:2178], cookie=17283951981371914587) 2025-06-25T14:59:14.888053Z node 3 :KESUS_TABLET TRACE: quoter_runtime.cpp:224: [72057594037927937] Send TEvAccountResourcesAck to [3:156:2178]. Cookie: 12884724621490653887. Data: { ResourcesInfo { ResourceId: 2 AcceptedUs: 1020500 } ResourcesInfo { ResourceId: 3 AcceptedUs: 1020500 } } 2025-06-25T14:59:14.888096Z node 3 :KESUS_TABLET DEBUG: quoter_runtime.cpp:229: [72057594037927937] Account quoter resources (sender=[3:156:2178], cookie=12884724621490653887) 2025-06-25T14:59:16.985192Z node 4 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-25T14:59:16.985278Z node 4 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-25T14:59:17.000418Z node 4 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-25T14:59:17.000532Z node 4 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-25T14:59:17.014518Z node 4 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-25T14:59:17.014933Z node 4 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:36: [72057594037927937] TTxQuoterResourceAdd::Execute (sender=[4:134:2158], cookie=4685625724518454566, path="/Root", config={ MaxUnitsPerSecond: 100 PrefetchCoefficient: 300 }) 2025-06-25T14:59:17.015278Z node 4 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:76: [72057594037927937] Created new quoter resource 1 "Root" 2025-06-25T14:59:17.037568Z node 4 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:85: [72057594037927937] TTxQuoterResourceAdd::Complete (sender=[4:134:2158], cookie=4685625724518454566) 2025-06-25T14:59:17.038590Z node 4 :KESUS_TABLET TRACE: quoter_runtime.cpp:145: [72057594037927937] Send TEvSubscribeOnResourcesResult to [4:144:2166]. Cookie: 16739046046382556015. Data: { Results { ResourceId: 1 Error { Status: SUCCESS } EffectiveProps { ResourceId: 1 ResourcePath: "Root" HierarchicalDRRResourceConfig { MaxUnitsPerSecond: 100 MaxBurstSizeCoefficient: 1 Weight: 1 PrefetchCoefficient: 300 } AccountingConfig { Enabled: true ReportPeriodMs: 1000 AccountPeriodMs: 1000 CollectPeriodSec: 2 ProvisionedCoefficient: 60 OvershootCoefficient: 1.1 Provisioned { BillingPeriodSec: 60 } OnDemand { Enabled: true BillingPeriodSec: 2 Labels { key: "k1" value: "v1" } Labels { key: "k2" value: "v2" } } Overshoot { BillingPeriodSec: 60 } } } } ProtocolVersion: 1 } 2025-06-25T14:59:17.038655Z node 4 :KESUS_TABLET DEBUG: quoter_runtime.cpp:150: [72057594037927937] Subscribe on quoter resources (sender=[4:144:2166], cookie=16739046046382556015) 2025-06-25T14:59:17.039115Z node 4 :KESUS_TABLET TRACE: quoter_runtime.cpp:224: [72057594037927937] Send TEvAccountResourcesAck to [4:144:2166]. Cookie: 878632701959991951. Data: { ResourcesInfo { ResourceId: 1 AcceptedUs: 26500 } } 2025-06-25T14:59:17.039162Z node 4 :KESUS_TABLET DEBUG: quoter_runtime.cpp:229: [72057594037927937] Account quoter resources (sender=[4:144:2166], cookie=878632701959991951) 2025-06-25T14:59:19.198168Z node 5 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-25T14:59:19.198281Z node 5 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-25T14:59:19.218369Z node 5 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-25T14:59:19.218515Z node 5 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-25T14:59:19.242800Z node 5 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-25T14:59:19.243224Z node 5 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:36: [72057594037927937] TTxQuoterResourceAdd::Execute (sender=[5:136:2160], cookie=14846872588899127628, path="/Root", config={ MaxUnitsPerSecond: 100 }) 2025-06-25T14:59:19.243430Z node 5 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:76: [72057594037927937] Created new quoter resource 1 "Root" 2025-06-25T14:59:19.255475Z node 5 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:85: [72057594037927937] TTxQuoterResourceAdd::Complete (sender=[5:136:2160], cookie=14846872588899127628) 2025-06-25T14:59:19.256066Z node 5 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:36: [72057594037927937] TTxQuoterResourceAdd::Execute (sender=[5:145:2167], cookie=18109629999163427151, path="/Root/Res", config={ }) 2025-06-25T14:59:19.256339Z node 5 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:76: [72057594037927937] Created new quoter resource 2 "Root/Res" 2025-06-25T14:59:19.268441Z node 5 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:85: [72057594037927937] TTxQuoterResourceAdd::Complete (sender=[5:145:2167], cookie=18109629999163427151) 2025-06-25T14:59:19.269139Z node 5 :KESUS_TABLET TRACE: quoter_runtime.cpp:145: [72057594037927937] Send TEvSubscribeOnResourcesResult to [5:150:2172]. Cookie: 15462398175172141782. Data: { Results { ResourceId: 2 Error { Status: SUCCESS } EffectiveProps { ResourceId: 2 ResourcePath: "Root/Res" HierarchicalDRRResourceConfig { MaxUnitsPerSecond: 100 MaxBurstSizeCoefficient: 1 Weight: 1 } AccountingConfig { ReportPeriodMs: 5000 AccountPeriodMs: 1000 CollectPeriodSec: 30 ProvisionedCoefficient: 60 OvershootCoefficient: 1.1 Provisioned { BillingPeriodSec: 60 } OnDemand { BillingPeriodSec: 60 } Overshoot { BillingPeriodSec: 60 } } } } ProtocolVersion: 1 } 2025-06-25T14:59:19.269199Z node 5 :KESUS_TABLET DEBUG: quoter_runtime.cpp:150: [72057594037927937] Subscribe on quoter resources (sender=[5:150:2172], cookie=15462398175172141782) 2025-06-25T14:59:19.269624Z node 5 :KESUS_TABLET DEBUG: tx_quoter_resource_update.cpp:34: [72057594037927937] TTxQuoterResourceUpdate::Execute (sender=[5:154:2176], cookie=7246346355020076741, id=0, path="/Root", config={ MaxUnitsPerSecond: 150 }) 2025-06-25T14:59:19.269781Z node 5 :KESUS_TABLET DEBUG: tx_quoter_resource_update.cpp:61: [72057594037927937] Updated quoter resource 1 "Root" 2025-06-25T14:59:19.269923Z node 5 :KESUS_TABLET TRACE: quoter_runtime.cpp:93: [72057594037927937] Send TEvResourcesAllocated to [5:150:2172]. Cookie: 0. Data: { ResourcesInfo { ResourceId: 2 EffectiveProps { ResourceId: 2 ResourcePath: "Root/Res" HierarchicalDRRResourceConfig { MaxUnitsPerSecond: 150 MaxBurstSizeCoefficient: 1 Weight: 1 } AccountingConfig { ReportPeriodMs: 5000 AccountPeriodMs: 1000 CollectPeriodSec: 30 ProvisionedCoefficient: 60 OvershootCoefficient: 1.1 Provisioned { BillingPeriodSec: 60 } OnDemand { BillingPeriodSec: 60 } Overshoot { BillingPeriodSec: 60 } } } StateNotification { Status: SUCCESS } } } 2025-06-25T14:59:19.281752Z node 5 :KESUS_TABLET DEBUG: tx_quoter_resource_update.cpp:75: [72057594037927937] TTxQuoterResourceUpdate::Complete (sender=[5:154:2176], cookie=7246346355020076741) 2025-06-25T14:59:19.282208Z node 5 :KESUS_TABLET TRACE: quoter_runtime.cpp:193: [72057594037927937] Send TEvUpdateConsumptionStateAck to [5:150:2172]. Cookie: 1203129111489888997. Data: { } 2025-06-25T14:59:19.282252Z node 5 :KESUS_TABLET DEBUG: quoter_runtime.cpp:198: [72057594037927937] Update quoter resources consumption state (sender=[5:150:2172], cookie=1203129111489888997) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/unittest >> TPQTest::TestManyConsumers [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:107:2057] recipient: [1:105:2137] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:107:2057] recipient: [1:105:2137] Leader for TabletID 72057594037927937 is [1:111:2141] sender: [1:112:2057] recipient: [1:105:2137] 2025-06-25T14:58:55.177305Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3114: [PQ: 72057594037927937] Handle TEvInterconnect::TEvNodeInfo 2025-06-25T14:58:55.191425Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3146: [PQ: 72057594037927937] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-25T14:58:55.191648Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:752: [PQ: 72057594037927937] doesn't have tx info 2025-06-25T14:58:55.191704Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:764: [PQ: 72057594037927937] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-25T14:58:55.191739Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:985: [PQ: 72057594037927937] no config, start with empty partitions and default config 2025-06-25T14:58:55.191769Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4949: [PQ: 72057594037927937] Txs.size=0, PlannedTxs.size=0 2025-06-25T14:58:55.191826Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:58:55.191866Z node 1 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [1:153:2057] recipient: [1:151:2172] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [1:153:2057] recipient: [1:151:2172] Leader for TabletID 72057594037927938 is [1:157:2176] sender: [1:158:2057] recipient: [1:151:2172] Leader for TabletID 72057594037927937 is [1:111:2141] sender: [1:184:2057] recipient: [1:14:2061] 2025-06-25T14:58:55.212151Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037927937] server connected, pipe [1:183:2196], now have 1 active actors on pipe 2025-06-25T14:58:55.212288Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:1470: [PQ: 72057594037927937] Handle TEvPersQueue::TEvUpdateConfig 2025-06-25T14:58:55.232367Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:1656: [PQ: 72057594037927937] Config update version 1(current 0) received from actor [1:181:2194] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 ImportantClientId: "user1" LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 WriteSpeedInBytesPerSecond: 102400 BurstSize: 102400 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--asdfgs--topic" Version: 1 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } ReadRuleGenerations: 1 ReadRuleGenerations: 1 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 1 Important: false } Consumers { Name: "user1" Generation: 1 Important: true } 2025-06-25T14:58:55.235273Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:591: [PQ: 72057594037927937] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 ImportantClientId: "user1" LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 WriteSpeedInBytesPerSecond: 102400 BurstSize: 102400 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--asdfgs--topic" Version: 1 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } ReadRuleGenerations: 1 ReadRuleGenerations: 1 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 1 Important: false } Consumers { Name: "user1" Generation: 1 Important: true } 2025-06-25T14:58:55.235443Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:58:55.236341Z node 1 :PERSQUEUE INFO: pq_impl.cpp:1497: [PQ: 72057594037927937] Config applied version 1 actor [1:181:2194] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 ImportantClientId: "user1" LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 WriteSpeedInBytesPerSecond: 102400 BurstSize: 102400 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--asdfgs--topic" Version: 1 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } ReadRuleGenerations: 1 ReadRuleGenerations: 1 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 1 Important: false } Consumers { Name: "user1" Generation: 1 Important: true } 2025-06-25T14:58:55.236446Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--asdfgs--topic:0:Initializer] Start initializing step TInitConfigStep 2025-06-25T14:58:55.236867Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--asdfgs--topic:0:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-25T14:58:55.237222Z node 1 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [1:190:2201] 2025-06-25T14:58:55.239634Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--asdfgs--topic:0:Initializer] Initializing completed. 2025-06-25T14:58:55.239713Z node 1 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [1:190:2201] 2025-06-25T14:58:55.239771Z node 1 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037927937, Partition: 0, State: StateInit] SYNC INIT topic rt3.dc1--asdfgs--topic partitition 0 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-25T14:58:55.242030Z node 1 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Process pending events. Count 0 2025-06-25T14:58:55.242171Z node 1 :PERSQUEUE DEBUG: partition.cpp:3232: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'rt3.dc1--asdfgs--topic' partition 0 user user reinit request with generation 1 2025-06-25T14:58:55.242231Z node 1 :PERSQUEUE DEBUG: partition.cpp:3302: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'rt3.dc1--asdfgs--topic' partition 0 user user reinit with generation 1 done 2025-06-25T14:58:55.242268Z node 1 :PERSQUEUE DEBUG: partition.cpp:3232: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'rt3.dc1--asdfgs--topic' partition 0 user user1 reinit request with generation 1 2025-06-25T14:58:55.242289Z node 1 :PERSQUEUE DEBUG: partition.cpp:3302: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'rt3.dc1--asdfgs--topic' partition 0 user user1 reinit with generation 1 done 2025-06-25T14:58:55.242504Z node 1 :PERSQUEUE DEBUG: partition_read.cpp:882: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'rt3.dc1--asdfgs--topic' partition 0 user user readTimeStamp for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 0 2025-06-25T14:58:55.242544Z node 1 :PERSQUEUE DEBUG: partition_read.cpp:882: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'rt3.dc1--asdfgs--topic' partition 0 user user1 readTimeStamp for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 0 2025-06-25T14:58:55.242711Z node 1 :PERSQUEUE DEBUG: read.h:272: CacheProxy. Passthrough write request to KV 2025-06-25T14:58:55.242898Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-25T14:58:55.245421Z node 1 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-06-25T14:58:55.245508Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037927937, Partition: 0, State: StateIdle] no data for compaction 2025-06-25T14:58:55.245870Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037927937] server connected, pipe [1:197:2206], now have 1 active actors on pipe 2025-06-25T14:58:55.255775Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037927937] server connected, pipe [1:200:2208], now have 1 active actors on pipe 2025-06-25T14:58:55.255861Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:348: Handle TEvRequest topic: 'rt3.dc1--asdfgs--topic' requestId: 2025-06-25T14:58:55.255926Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2812: [PQ: 72057594037927937] got client message batch for topic 'rt3.dc1--asdfgs--topic' partition 0 2025-06-25T14:58:55.256203Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2209: [PQ: 72057594037927937] got client message topic: rt3.dc1--asdfgs--topic partition: 0 SourceId: 'sourceid0' SeqNo: 1 partNo : 0 messageNo: 0 size 102400 offset: 0 2025-06-25T14:58:55.256249Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2209: [PQ: 72057594037927937] got client message topic: rt3.dc1--asdfgs--topic partition: 0 SourceId: 'sourceid0' SeqNo: 2 partNo : 0 messageNo: 0 size 102400 offset: 1 2025-06-25T14:58:55.256300Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2209: [PQ: 72057594037927937] got client message topic: rt3.dc1--asdfgs--topic partition: 0 SourceId: 'sourceid0' SeqNo: 3 partNo : 0 messageNo: 0 size 102400 offset: 2 2025-06-25T14:58:55.256359Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2209: [PQ: 72057594037927937] got client message topic: rt3.dc1--asdfgs--topic partition: 0 SourceId: 'sourceid0' SeqNo: 4 partNo : 0 messageNo: 0 size 102400 offset: 3 2025-06-25T14:58:55.256388Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2209: [PQ: 72057594037927937] got client message topic: rt3.dc1--asdfgs--topic partition: 0 SourceId: 'sourceid0' SeqNo: 5 partNo : 0 messageNo: 0 size 102400 offset: 4 2025-06-25T14:58:55.256414Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2209: [PQ: 72057594037927937] got client message topic: rt3.dc1--asdfgs--topic partition: 0 SourceId: 'sourceid0' SeqNo: 6 partNo : 0 messageNo: 0 size 102400 offset: 5 2025-06-25T14:58:55.256440Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2209: [PQ: 72057594037927937] got client message topic: rt3.dc1--asdfgs--topic partition: 0 SourceId: 'sourceid0' SeqNo: 7 partNo : 0 messageNo: 0 size 102400 offset: 6 2025-06-25T14:58:55.256466Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2209: [PQ: 72057594037927937] got client message topic: rt3.dc1--asdfgs--topic partition: 0 SourceId: 'sourceid0' SeqNo: 8 partNo : 0 messageNo: 0 size 102400 offset: 7 2025-06-25T14:58:55.256508Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2209: [PQ: 72057594037927937] got client message topic: rt3.dc1--asdfgs--topic partition: 0 SourceId: 'sourceid0' SeqNo: 9 partNo : 0 messageNo: 0 size 102400 offset: 8 2025-06-25T14:58:55.256557Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2209: [PQ: 72057594037927937] got client message topic: rt3.dc1--asdfgs--topic partition: 0 SourceId: 'sourceid0' SeqNo: 10 partNo : 0 messageNo: 0 size 102400 offset: 9 2025-06-25T14:58:55.256593Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2209: [PQ: 72057594037927937] got client message topic: rt3.dc1--asdfgs--topic partition: 0 SourceId: 'sourceid0' SeqNo: 11 partNo : 0 messageNo: 0 size 102400 offset: 10 2025-06-25T14:58:55.256620Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2209: [PQ: 72057594037927937] got client message topic: rt3.dc1--asdfgs--topic partition: 0 SourceId: 'sourceid0' SeqNo: 12 partNo : 0 messageNo: 0 size 102400 offset: 11 2025-06-25T14:58:55.256646Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2209: [PQ: 72057594037927937] got client message topic: rt3.dc1--asdfgs--topic partition: 0 SourceId: 'sourceid0' SeqNo: 13 partNo : 0 messageNo: 0 size 102400 offset: 12 2025-06-25T14:58:55.256686Z node 1 :PERSQUEUE DEB ... UsedReserveSize: 0 ReserveSize: 0 PartitionConfig{ MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } 2025-06-25T14:59:17.525743Z node 16 :PERSQUEUE DEBUG: partition.cpp:873: [PQ: 72057594037927937, Partition: 1, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 0 PartitionConfig{ MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } 2025-06-25T14:59:17.558455Z node 16 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037927937] server connected, pipe [16:1006:3000], now have 1 active actors on pipe 2025-06-25T14:59:17.560400Z node 16 :PERSQUEUE DEBUG: partition.cpp:873: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 0 PartitionConfig{ MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } 2025-06-25T14:59:17.576641Z node 16 :PERSQUEUE DEBUG: partition.cpp:873: [PQ: 72057594037927937, Partition: 1, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 0 PartitionConfig{ MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } 2025-06-25T14:59:17.610377Z node 16 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037927937] server connected, pipe [16:1009:3003], now have 1 active actors on pipe 2025-06-25T14:59:17.612283Z node 16 :PERSQUEUE DEBUG: partition.cpp:873: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 0 PartitionConfig{ MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } 2025-06-25T14:59:17.626551Z node 16 :PERSQUEUE DEBUG: partition.cpp:873: [PQ: 72057594037927937, Partition: 1, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 0 PartitionConfig{ MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } 2025-06-25T14:59:17.654002Z node 16 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037927937] server connected, pipe [16:1012:3006], now have 1 active actors on pipe 2025-06-25T14:59:17.655698Z node 16 :PERSQUEUE DEBUG: partition.cpp:873: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 0 PartitionConfig{ MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } 2025-06-25T14:59:17.670308Z node 16 :PERSQUEUE DEBUG: partition.cpp:873: [PQ: 72057594037927937, Partition: 1, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 0 PartitionConfig{ MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } 2025-06-25T14:59:17.712288Z node 16 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037927937] server connected, pipe [16:1015:3009], now have 1 active actors on pipe 2025-06-25T14:59:17.714226Z node 16 :PERSQUEUE DEBUG: partition.cpp:873: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 0 PartitionConfig{ MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } 2025-06-25T14:59:17.726510Z node 16 :PERSQUEUE DEBUG: partition.cpp:873: [PQ: 72057594037927937, Partition: 1, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 0 PartitionConfig{ MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } 2025-06-25T14:59:17.751391Z node 16 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037927937] server connected, pipe [16:1018:3012], now have 1 active actors on pipe 2025-06-25T14:59:17.753086Z node 16 :PERSQUEUE DEBUG: partition.cpp:873: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 0 PartitionConfig{ MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } 2025-06-25T14:59:17.764058Z node 16 :PERSQUEUE DEBUG: partition.cpp:873: [PQ: 72057594037927937, Partition: 1, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 0 PartitionConfig{ MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } 2025-06-25T14:59:17.786168Z node 16 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037927937] server connected, pipe [16:1021:3015], now have 1 active actors on pipe 2025-06-25T14:59:17.787800Z node 16 :PERSQUEUE DEBUG: partition.cpp:873: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 0 PartitionConfig{ MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } 2025-06-25T14:59:17.798593Z node 16 :PERSQUEUE DEBUG: partition.cpp:873: [PQ: 72057594037927937, Partition: 1, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 0 PartitionConfig{ MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } 2025-06-25T14:59:17.820565Z node 16 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037927937] server connected, pipe [16:1024:3018], now have 1 active actors on pipe 2025-06-25T14:59:17.822344Z node 16 :PERSQUEUE DEBUG: partition.cpp:873: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 0 PartitionConfig{ MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } 2025-06-25T14:59:17.834059Z node 16 :PERSQUEUE DEBUG: partition.cpp:873: [PQ: 72057594037927937, Partition: 1, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 0 PartitionConfig{ MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } 2025-06-25T14:59:17.876655Z node 16 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037927937] server connected, pipe [16:1027:3021], now have 1 active actors on pipe 2025-06-25T14:59:17.878286Z node 16 :PERSQUEUE DEBUG: partition.cpp:873: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 0 PartitionConfig{ MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } 2025-06-25T14:59:17.891043Z node 16 :PERSQUEUE DEBUG: partition.cpp:873: [PQ: 72057594037927937, Partition: 1, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 0 PartitionConfig{ MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } 2025-06-25T14:59:17.913487Z node 16 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037927937] server connected, pipe [16:1030:3024], now have 1 active actors on pipe 2025-06-25T14:59:17.915173Z node 16 :PERSQUEUE DEBUG: partition.cpp:873: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 0 PartitionConfig{ MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } 2025-06-25T14:59:17.926670Z node 16 :PERSQUEUE DEBUG: partition.cpp:873: [PQ: 72057594037927937, Partition: 1, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 0 PartitionConfig{ MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } 2025-06-25T14:59:17.950496Z node 16 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037927937] server connected, pipe [16:1033:3027], now have 1 active actors on pipe 2025-06-25T14:59:17.952593Z node 16 :PERSQUEUE DEBUG: partition.cpp:873: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 0 PartitionConfig{ MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } 2025-06-25T14:59:17.964731Z node 16 :PERSQUEUE DEBUG: partition.cpp:873: [PQ: 72057594037927937, Partition: 1, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 0 PartitionConfig{ MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } 2025-06-25T14:59:17.988403Z node 16 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037927937] server connected, pipe [16:1036:3030], now have 1 active actors on pipe 2025-06-25T14:59:17.990259Z node 16 :PERSQUEUE DEBUG: partition.cpp:873: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 0 PartitionConfig{ MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } 2025-06-25T14:59:18.002264Z node 16 :PERSQUEUE DEBUG: partition.cpp:873: [PQ: 72057594037927937, Partition: 1, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 0 PartitionConfig{ MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } 2025-06-25T14:59:18.043957Z node 16 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037927937] server connected, pipe [16:1039:3033], now have 1 active actors on pipe 2025-06-25T14:59:18.045671Z node 16 :PERSQUEUE DEBUG: partition.cpp:873: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 0 PartitionConfig{ MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } 2025-06-25T14:59:18.058169Z node 16 :PERSQUEUE DEBUG: partition.cpp:873: [PQ: 72057594037927937, Partition: 1, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 0 PartitionConfig{ MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } 2025-06-25T14:59:18.088742Z node 16 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72057594037927938][rt3.dc1--topic] pipe [16:1042:3036] connected; active server actors: 1 >> TRegisterCheckTest::ShouldNotRegisterCheckPrevGeneration >> TRegisterCheckTest::ShouldRegisterCheckSameGenerationAndTransact >> TStoragePoolsQuotasTest::DifferentQuotasInteraction-EnableSeparateQuotas [GOOD] >> TStoragePoolsQuotasTest::DifferentQuotasInteraction-IsExternalSubdomain-EnableSeparateQuotas [GOOD] >> TCheckGenerationTest::ShouldRollbackTransactionWhenCheckFails2 [GOOD] >> TSchemeShardLoginTest::BanUserWithWaiting [GOOD] >> TSchemeShardLoginTest::ChangeAcceptablePasswordParameters ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TStoragePoolsQuotasTest::DifferentQuotasInteraction-EnableSeparateQuotas [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:59:07.715788Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:59:07.715877Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:07.715915Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:59:07.715950Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:59:07.715992Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:59:07.716036Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:59:07.716100Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:07.716171Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:59:07.716885Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:59:07.717293Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:59:07.794698Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:59:07.794752Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:59:07.809343Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:59:07.809740Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:59:07.809937Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:59:07.815174Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:59:07.815426Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:59:07.816026Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:07.816254Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:59:07.819040Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:07.819171Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:59:07.820053Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:07.820110Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:07.820242Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:59:07.820290Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:59:07.820352Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:59:07.820414Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:59:07.825348Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:59:07.932967Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:59:07.933196Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:07.933392Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:59:07.933542Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:59:07.933764Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:59:07.933835Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:59:07.935685Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:07.935867Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:59:07.936012Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:07.936061Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:59:07.936115Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:59:07.936150Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:59:07.937911Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:07.937961Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:59:07.937992Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:59:07.939653Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:07.939710Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:07.939762Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:07.939808Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:59:07.942748Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:59:07.944292Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:59:07.944488Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:59:07.945274Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:07.945399Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:07.945480Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:07.945720Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:59:07.945764Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:07.945984Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:59:07.946048Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:59:07.947831Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:07.947874Z node 1 :FLAT_TX_SCHEMESHARD D ... 944 2025-06-25T14:59:19.790189Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-25T14:59:19.791609Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:19.791659Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 103, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-25T14:59:19.791803Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 103, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-25T14:59:19.791919Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:19.791958Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 103, path id: 2 2025-06-25T14:59:19.791990Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 103, path id: 3 2025-06-25T14:59:19.792330Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-25T14:59:19.792374Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1086: NTableState::TProposedWaitParts operationId# 103:0 ProgressState at tablet: 72057594046678944 2025-06-25T14:59:19.792432Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:707: all shard schema changes has been received, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-25T14:59:19.792457Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:719: send schema changes ack message, operation: 103:0, datashard: 72075186233409548, at schemeshard: 72057594046678944 2025-06-25T14:59:19.792489Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 103:0 129 -> 240 2025-06-25T14:59:19.793060Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 10 PathOwnerId: 72057594046678944, cookie: 103 2025-06-25T14:59:19.793139Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 10 PathOwnerId: 72057594046678944, cookie: 103 2025-06-25T14:59:19.793183Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 103 2025-06-25T14:59:19.793208Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 10 2025-06-25T14:59:19.793240Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-06-25T14:59:19.794000Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 103 2025-06-25T14:59:19.794074Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 103 2025-06-25T14:59:19.794101Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 103 2025-06-25T14:59:19.794129Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 18446744073709551615 2025-06-25T14:59:19.794154Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-06-25T14:59:19.794216Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 103, ready parts: 0/1, is published: true 2025-06-25T14:59:19.796968Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-25T14:59:19.797017Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_table.cpp:414: TDropTable TProposedDeletePart operationId: 103:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:59:19.797360Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove table for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-25T14:59:19.797535Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#103:0 progress is 1/1 2025-06-25T14:59:19.797575Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-25T14:59:19.797610Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#103:0 progress is 1/1 2025-06-25T14:59:19.797642Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-25T14:59:19.797678Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 103, ready parts: 1/1, is published: true 2025-06-25T14:59:19.797736Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1656: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:410:2374] message: TxId: 103 2025-06-25T14:59:19.797777Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-25T14:59:19.797813Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 103:0 2025-06-25T14:59:19.797839Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 103:0 2025-06-25T14:59:19.797925Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-25T14:59:19.798409Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:19.798445Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 0, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-25T14:59:19.799125Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-25T14:59:19.800145Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-25T14:59:19.801430Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:19.801477Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 0, path id: 2 2025-06-25T14:59:19.801793Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-06-25T14:59:19.801836Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [1:1336:3258] 2025-06-25T14:59:19.802483Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 11 PathOwnerId: 72057594046678944, cookie: 0 TestWaitNotification: OK eventTxId 103 2025-06-25T14:59:19.805678Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/SomeDatabase" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:59:19.805846Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/SomeDatabase" took 185us result status StatusSuccess 2025-06-25T14:59:19.806131Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/SomeDatabase" PathDescription { Self { Name: "SomeDatabase" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 11 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 11 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SubDomainStateVersion: 4 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "fast" Kind: "fast_kind" } StoragePools { Name: "large" Kind: "large_kind" } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } StoragePoolsUsage { PoolKind: "large_kind" TotalSize: 0 DataSize: 0 IndexSize: 0 } StoragePoolsUsage { PoolKind: "fast_kind" TotalSize: 0 DataSize: 0 IndexSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 DatabaseQuotas { data_size_hard_quota: 2800 data_size_soft_quota: 2200 storage_quotas { unit_kind: "fast_kind" data_size_hard_quota: 600 data_size_soft_quota: 500 } storage_quotas { unit_kind: "large_kind" data_size_hard_quota: 2200 data_size_soft_quota: 1700 } } SecurityState { } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TPQTest::TestPartitionTotalQuota [GOOD] >> TPQTest::TestPartitionPerConsumerQuota >> TRegisterCheckTest::ShouldNotRegisterCheckPrevGeneration [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TStoragePoolsQuotasTest::DifferentQuotasInteraction-IsExternalSubdomain-EnableSeparateQuotas [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:59:07.527669Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:59:07.527777Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:07.527824Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:59:07.527873Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:59:07.527919Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:59:07.527948Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:59:07.528001Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:07.528072Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:59:07.528843Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:59:07.529189Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:59:07.606311Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:59:07.606366Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:59:07.623121Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:59:07.623503Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:59:07.623696Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:59:07.629188Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:59:07.629503Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:59:07.630173Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:07.630423Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:59:07.633798Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:07.633996Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:59:07.635238Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:07.635312Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:07.635473Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:59:07.635522Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:59:07.635566Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:59:07.635660Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:59:07.645250Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:59:07.750132Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:59:07.750405Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:07.750626Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:59:07.750677Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:59:07.750912Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:59:07.750986Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:59:07.753033Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:07.753222Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:59:07.753443Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:07.753500Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:59:07.753538Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:59:07.753574Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:59:07.755345Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:07.755397Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:59:07.755438Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:59:07.756943Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:07.756986Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:07.757044Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:07.757097Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:59:07.766679Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:59:07.768569Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:59:07.768742Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:59:07.769663Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:07.769785Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:07.769834Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:07.770109Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:59:07.770165Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:07.770330Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:59:07.770454Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:59:07.772388Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:07.772433Z node 1 :FLAT_TX_SCHEMESHARD D ... Id: 104:0, at schemeshard: 72075186233409546 2025-06-25T14:59:19.912862Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 104:0, at schemeshard: 72075186233409546 2025-06-25T14:59:19.913029Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72075186233409546 2025-06-25T14:59:19.913057Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72075186233409546, txId: 104, path id: [OwnerId: 72075186233409546, LocalPathId: 1] 2025-06-25T14:59:19.913219Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72075186233409546, txId: 104, path id: [OwnerId: 72075186233409546, LocalPathId: 2] 2025-06-25T14:59:19.913344Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72075186233409546 2025-06-25T14:59:19.913385Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:452:2402], at schemeshard: 72075186233409546, txId: 104, path id: 1 2025-06-25T14:59:19.913415Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:452:2402], at schemeshard: 72075186233409546, txId: 104, path id: 2 2025-06-25T14:59:19.913778Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 104:0, at schemeshard: 72075186233409546 2025-06-25T14:59:19.913826Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1086: NTableState::TProposedWaitParts operationId# 104:0 ProgressState at tablet: 72075186233409546 2025-06-25T14:59:19.913890Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:707: all shard schema changes has been received, operationId: 104:0, at schemeshard: 72075186233409546 2025-06-25T14:59:19.913920Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:719: send schema changes ack message, operation: 104:0, datashard: 72075186233409549, at schemeshard: 72075186233409546 2025-06-25T14:59:19.913947Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 104:0 129 -> 240 2025-06-25T14:59:19.914590Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72075186233409546, msg: Owner: 72075186233409546 Generation: 2 LocalPathId: 1 Version: 11 PathOwnerId: 72075186233409546, cookie: 104 2025-06-25T14:59:19.914690Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72075186233409546, msg: Owner: 72075186233409546 Generation: 2 LocalPathId: 1 Version: 11 PathOwnerId: 72075186233409546, cookie: 104 2025-06-25T14:59:19.914719Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72075186233409546, txId: 104 2025-06-25T14:59:19.914748Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72075186233409546, txId: 104, pathId: [OwnerId: 72075186233409546, LocalPathId: 1], version: 11 2025-06-25T14:59:19.914781Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72075186233409546, LocalPathId: 1] was 5 2025-06-25T14:59:19.915648Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72075186233409546, msg: Owner: 72075186233409546 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72075186233409546, cookie: 104 2025-06-25T14:59:19.915718Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72075186233409546, msg: Owner: 72075186233409546 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72075186233409546, cookie: 104 2025-06-25T14:59:19.915738Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72075186233409546, txId: 104 2025-06-25T14:59:19.915757Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72075186233409546, txId: 104, pathId: [OwnerId: 72075186233409546, LocalPathId: 2], version: 18446744073709551615 2025-06-25T14:59:19.915777Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72075186233409546, LocalPathId: 2] was 4 2025-06-25T14:59:19.915825Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 104, ready parts: 0/1, is published: true 2025-06-25T14:59:19.918335Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 104:0, at schemeshard: 72075186233409546 2025-06-25T14:59:19.918373Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_table.cpp:414: TDropTable TProposedDeletePart operationId: 104:0 ProgressState, at schemeshard: 72075186233409546 2025-06-25T14:59:19.918636Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove table for pathId [OwnerId: 72075186233409546, LocalPathId: 2] was 3 2025-06-25T14:59:19.918799Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#104:0 progress is 1/1 2025-06-25T14:59:19.918840Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-25T14:59:19.918877Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#104:0 progress is 1/1 2025-06-25T14:59:19.918908Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-25T14:59:19.918946Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 104, ready parts: 1/1, is published: true 2025-06-25T14:59:19.918996Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1656: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:557:2494] message: TxId: 104 2025-06-25T14:59:19.919030Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-25T14:59:19.919063Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 104:0 2025-06-25T14:59:19.919092Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 104:0 2025-06-25T14:59:19.919165Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72075186233409546, LocalPathId: 2] was 2 2025-06-25T14:59:19.919621Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72075186233409546 2025-06-25T14:59:19.919648Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72075186233409546, txId: 0, path id: [OwnerId: 72075186233409546, LocalPathId: 1] 2025-06-25T14:59:19.919887Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72075186233409546, cookie: 104 2025-06-25T14:59:19.921124Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72075186233409546, cookie: 104 2025-06-25T14:59:19.922439Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72075186233409546 2025-06-25T14:59:19.922491Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:452:2402], at schemeshard: 72075186233409546, txId: 0, path id: 1 2025-06-25T14:59:19.922918Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 104: got EvNotifyTxCompletionResult 2025-06-25T14:59:19.922966Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 104: satisfy waiter [1:1427:3333] 2025-06-25T14:59:19.923403Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72075186233409546, msg: Owner: 72075186233409546 Generation: 2 LocalPathId: 1 Version: 12 PathOwnerId: 72075186233409546, cookie: 0 TestWaitNotification: OK eventTxId 104 2025-06-25T14:59:19.927341Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/SomeDatabase" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72075186233409546 2025-06-25T14:59:19.927574Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72075186233409546 describe path "/MyRoot/SomeDatabase" took 265us result status StatusSuccess 2025-06-25T14:59:19.928087Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/SomeDatabase" PathDescription { Self { Name: "MyRoot/SomeDatabase" PathId: 1 SchemeshardId: 72075186233409546 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 12 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 12 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 2 SubDomainStateVersion: 4 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 2 ProcessingParams { Version: 2 PlanResolution: 50 Coordinators: 72075186233409547 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409548 SchemeShard: 72075186233409546 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "fast" Kind: "fast_kind" } StoragePools { Name: "large" Kind: "large_kind" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } StoragePoolsUsage { PoolKind: "large_kind" TotalSize: 0 DataSize: 0 IndexSize: 0 } StoragePoolsUsage { PoolKind: "fast_kind" TotalSize: 0 DataSize: 0 IndexSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 DatabaseQuotas { data_size_hard_quota: 2800 data_size_soft_quota: 2200 storage_quotas { unit_kind: "fast_kind" data_size_hard_quota: 600 data_size_soft_quota: 500 } storage_quotas { unit_kind: "large_kind" data_size_hard_quota: 2200 data_size_soft_quota: 1700 } } SecurityState { Audience: "/MyRoot/SomeDatabase" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72075186233409546, at schemeshard: 72075186233409546 >> TRegisterCheckTest::ShouldRegisterCheckSameGenerationAndTransact [GOOD] >> TSchemeShardSubDomainTest::TableDiskSpaceQuotas [GOOD] >> TPartitionChooserSuite::TPartitionChooserActor_SplitMergeEnabled_BadSourceId_Test [GOOD] >> TPartitionChooserSuite::TPartitionChooserActor_SplitMergeDisabled_NewSourceId_Test >> TSchemeShardLoginTest::ChangeAcceptablePasswordParameters [GOOD] >> TSchemeShardLoginTest::ChangeAccountLockoutParameters ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::TableDiskSpaceQuotas [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:59:07.936293Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:59:07.936389Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:07.936420Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:59:07.936458Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:59:07.936501Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:59:07.936522Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:59:07.936564Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:07.936625Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:59:07.937382Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:59:07.937700Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:59:08.005069Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:59:08.005116Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:59:08.020390Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:59:08.020729Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:59:08.020886Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:59:08.026267Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:59:08.026568Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:59:08.027256Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:08.027484Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:59:08.030658Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:08.030836Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:59:08.032022Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:08.032069Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:08.032172Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:59:08.032207Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:59:08.032235Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:59:08.032290Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:59:08.037258Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:59:08.146890Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:59:08.147112Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:08.147296Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:59:08.147339Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:59:08.147610Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:59:08.147683Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:59:08.149623Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:08.149815Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:59:08.149973Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:08.150029Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:59:08.150079Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:59:08.150122Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:59:08.151802Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:08.151857Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:59:08.151903Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:59:08.153615Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:08.153665Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:08.153714Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:08.153749Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:59:08.156160Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:59:08.157452Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:59:08.157594Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:59:08.158175Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:08.158285Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:08.158339Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:08.158534Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:59:08.158569Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:08.158679Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:59:08.158744Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:59:08.160081Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:08.160122Z node 1 :FLAT_TX_SCHEMESHARD D ... lt> complete, operationId: 107:0, at schemeshard: 72057594046678944 2025-06-25T14:59:20.744957Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 107:0, at schemeshard: 72057594046678944 2025-06-25T14:59:20.745879Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:20.745919Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 107, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-25T14:59:20.746031Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 107, path id: [OwnerId: 72057594046678944, LocalPathId: 4] 2025-06-25T14:59:20.746115Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:20.746140Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 107, path id: 2 2025-06-25T14:59:20.746176Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 107, path id: 4 2025-06-25T14:59:20.746484Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 107:0, at schemeshard: 72057594046678944 2025-06-25T14:59:20.746515Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1086: NTableState::TProposedWaitParts operationId# 107:0 ProgressState at tablet: 72057594046678944 2025-06-25T14:59:20.746580Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:707: all shard schema changes has been received, operationId: 107:0, at schemeshard: 72057594046678944 2025-06-25T14:59:20.746613Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:719: send schema changes ack message, operation: 107:0, datashard: 72075186233409549, at schemeshard: 72057594046678944 2025-06-25T14:59:20.746648Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 107:0 129 -> 240 2025-06-25T14:59:20.747300Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 14 PathOwnerId: 72057594046678944, cookie: 107 2025-06-25T14:59:20.747387Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 14 PathOwnerId: 72057594046678944, cookie: 107 2025-06-25T14:59:20.747436Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 107 2025-06-25T14:59:20.747474Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 107, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 14 2025-06-25T14:59:20.747512Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-06-25T14:59:20.747951Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 107 2025-06-25T14:59:20.748014Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 107 2025-06-25T14:59:20.748035Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 107 2025-06-25T14:59:20.748053Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 107, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 18446744073709551615 2025-06-25T14:59:20.748078Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 4 2025-06-25T14:59:20.748119Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 107, ready parts: 0/1, is published: true 2025-06-25T14:59:20.749957Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 107:0, at schemeshard: 72057594046678944 2025-06-25T14:59:20.750009Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_table.cpp:414: TDropTable TProposedDeletePart operationId: 107:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:59:20.750327Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove table for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 2025-06-25T14:59:20.750459Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#107:0 progress is 1/1 2025-06-25T14:59:20.750496Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 107 ready parts: 1/1 2025-06-25T14:59:20.750534Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#107:0 progress is 1/1 2025-06-25T14:59:20.750561Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 107 ready parts: 1/1 2025-06-25T14:59:20.750607Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 107, ready parts: 1/1, is published: true 2025-06-25T14:59:20.750652Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 107 ready parts: 1/1 2025-06-25T14:59:20.750690Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 107:0 2025-06-25T14:59:20.750726Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 107:0 2025-06-25T14:59:20.750798Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 2 2025-06-25T14:59:20.751055Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:20.751079Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 0, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-25T14:59:20.751580Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 107 2025-06-25T14:59:20.752881Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 107 2025-06-25T14:59:20.753558Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:20.753591Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 0, path id: 2 2025-06-25T14:59:20.754099Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 15 PathOwnerId: 72057594046678944, cookie: 0 TestWaitNotification wait txId: 107 2025-06-25T14:59:20.754474Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 107: send EvNotifyTxCompletion 2025-06-25T14:59:20.754526Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 107 2025-06-25T14:59:20.754894Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 107, at schemeshard: 72057594046678944 2025-06-25T14:59:20.754951Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 107: got EvNotifyTxCompletionResult 2025-06-25T14:59:20.754974Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 107: satisfy waiter [1:983:2906] TestWaitNotification: OK eventTxId 107 2025-06-25T14:59:20.755389Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:59:20.755549Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 168us result status StatusSuccess 2025-06-25T14:59:20.755834Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0" PathDescription { Self { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 15 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 15 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 9 SubDomainVersion: 1 SubDomainStateVersion: 4 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "name_USER_0_kind_hdd-1" Kind: "hdd-1" } StoragePools { Name: "name_USER_0_kind_hdd-2" Kind: "hdd-2" } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 DatabaseQuotas { data_size_hard_quota: 1 } SecurityState { } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> ReadSessionImplTest::ForcefulDestroyPartitionStream [GOOD] >> ReadSessionImplTest::DestroyPartitionStreamRequest [GOOD] >> ReadSessionImplTest::DecompressZstdEmptyMessage >> Compression::WriteRAW >> BasicUsage::WriteAndReadSomeMessagesWithAsyncCompression >> ReadSessionImplTest::UsesOnRetryStateDuringRetries [GOOD] >> RetryPolicy::TWriteSession_TestPolicy >> ApplyClusterEndpointTest::NoPorts [GOOD] >> ApplyClusterEndpointTest::PortFromCds [GOOD] >> ApplyClusterEndpointTest::PortFromDriver [GOOD] >> BasicUsage::MaxByteSizeEqualZero >> TPartitionChooserSuite::TPartitionChooserActor_SplitMergeEnabled_PreferedPartition_Active_Test [GOOD] >> TPartitionChooserSuite::TPartitionChooserActor_SplitMergeEnabled_PreferedPartition_InactiveConfig_Test >> ReadSessionImplTest::ReconnectOnTmpError [GOOD] >> ReadSessionImplTest::ProperlyOrdersDecompressedData [GOOD] >> ReadSessionImplTest::ReconnectOnTmpErrorAndThenTimeout [GOOD] >> ReadSessionImplTest::ReconnectOnTimeout [GOOD] >> ReadSessionImplTest::ReconnectOnTimeoutAndThenCreate [GOOD] >> ReadSessionImplTest::ReconnectsAfterFailure [GOOD] >> ReadSessionImplTest::SimpleDataHandlers >> ReadSessionImplTest::PacksBatches_ExactlyTwoMessagesInBatch [GOOD] >> ReadSessionImplTest::PacksBatches_OneMessageInEveryBatch [GOOD] >> ReadSessionImplTest::PacksBatches_BigBatchDecompressWithTwoBatchTasks >> PersQueueSdkReadSessionTest::ReadSessionWithExplicitlySpecifiedPartitions >> ReadSessionImplTest::DecompressRaw [GOOD] >> ReadSessionImplTest::DecompressGzip [GOOD] >> ReadSessionImplTest::DecompressZstd [GOOD] >> ReadSessionImplTest::DecompressRawEmptyMessage [GOOD] >> ReadSessionImplTest::DecompressGzipEmptyMessage [GOOD] >> ReadSessionImplTest::DecompressWithSynchronousExecutor [GOOD] >> ReadSessionImplTest::DataReceivedCallbackReal >> ReadSessionImplTest::SuccessfulInit [GOOD] >> ReadSessionImplTest::SuccessfulInitAndThenTimeoutCallback [GOOD] >> ReadSessionImplTest::StopsRetryAfterFailedAttempt [GOOD] >> ReadSessionImplTest::StopsRetryAfterTimeout [GOOD] >> ReadSessionImplTest::UnpackBigBatchWithTwoPartitions [GOOD] >> ReadSessionImplTest::SimpleDataHandlersWithGracefulRelease >> ReadSessionImplTest::DecompressZstdEmptyMessage [GOOD] >> ReadSessionImplTest::PacksBatches_BatchABitBiggerThanLimit [GOOD] >> ReadSessionImplTest::PacksBatches_BatchesEqualToServerBatches [GOOD] >> ReadSessionImplTest::HoleBetweenOffsets [GOOD] >> ReadSessionImplTest::LOGBROKER_7702 [GOOD] >> ReadSessionImplTest::SimpleDataHandlers [GOOD] >> ReadSessionImplTest::SimpleDataHandlersWithCommit >> TSchemeShardLoginTest::ResetFailedAttemptCount [GOOD] >> TSchemeShardLoginTest::ResetFailedAttemptCountAfterModifyUser >> TDataShardMinStepTest::TestDropTableCompletesQuicklyRW-VolatileTxs [GOOD] >> ReadSessionImplTest::SimpleDataHandlersWithCommit [GOOD] |90.7%| [TS] {asan, default-linux-x86_64, release} ydb/core/fq/libs/ydb/ut/unittest >> TFqYdbTest::ShouldStatusToIssuesProcessExceptions [GOOD] >> ReadSessionImplTest::SimpleDataHandlersWithGracefulRelease [GOOD] >> ReadSessionImplTest::SimpleDataHandlersWithGracefulReleaseWithCommit ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/persqueue_public/ut/unittest >> ReadSessionImplTest::LOGBROKER_7702 [GOOD] Test command err: 2025-06-25T14:59:21.761469Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.761519Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.761586Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-25T14:59:21.762582Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-25T14:59:21.764077Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-25T14:59:21.776817Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.778134Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-25T14:59:21.782070Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.782088Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.782104Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-25T14:59:21.782407Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-25T14:59:21.782827Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-25T14:59:21.783098Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.783270Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-25T14:59:21.783560Z :INFO: [db] [sessionid] [cluster] Confirm partition stream destroy. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1 2025-06-25T14:59:21.785163Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.785187Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.785219Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-25T14:59:21.785480Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-25T14:59:21.786232Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-25T14:59:21.786318Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.786494Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-25T14:59:21.792117Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.795429Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2025-06-25T14:59:21.795572Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-25T14:59:21.795637Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 0 bytes 2025-06-25T14:59:21.796498Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.796516Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.796536Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-25T14:59:21.796866Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-25T14:59:21.797267Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-25T14:59:21.797430Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.797611Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) Message data size: 11 Compressed message data size: 31 2025-06-25T14:59:21.799439Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function 2025-06-25T14:59:21.799682Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function Getting new event 2025-06-25T14:59:21.799962Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (5-8) 2025-06-25T14:59:21.800122Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-4) 2025-06-25T14:59:21.800222Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-25T14:59:21.800268Z :DEBUG: Take Data. Partition 1. Read: {0, 1} (2-2) 2025-06-25T14:59:21.800298Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 22 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..11 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 1 SeqNo: 42 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..11 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 2 SeqNo: 43 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } } 2025-06-25T14:59:21.800483Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [1, 3). Partition stream id: 1 Getting new event 2025-06-25T14:59:21.800512Z :DEBUG: Take Data. Partition 1. Read: {0, 2} (3-3) 2025-06-25T14:59:21.800530Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (4-4) 2025-06-25T14:59:21.800546Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 22 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..11 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 3 SeqNo: 44 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..11 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 4 SeqNo: 45 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } } 2025-06-25T14:59:21.800668Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [3, 5). Partition stream id: 1 Getting new event 2025-06-25T14:59:21.800734Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (5-5) 2025-06-25T14:59:21.800770Z :DEBUG: Take Data. Partition 1. Read: {0, 1} (6-6) 2025-06-25T14:59:21.800791Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 22 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..11 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 5 SeqNo: 46 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..11 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 6 SeqNo: 47 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } } 2025-06-25T14:59:21.800867Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [5, 7). Partition stream id: 1 Getting new event 2025-06-25T14:59:21.800891Z :DEBUG: Take Data. Partition 1. Read: {0, 2} (7-7) 2025-06-25T14:59:21.800919Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (8-8) 2025-06-25T14:59:21.800936Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 22 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..11 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 7 SeqNo: 48 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..11 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 8 SeqNo: 49 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } } 2025-06-25T14:59:21.801026Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [7, 9). Partition stream id: 1 2025-06-25T14:59:21.801937Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.801961Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.801982Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-25T14:59:21.802251Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-25T14:59:21.802672Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-25T14:59:21.802808Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.803033Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) Message data size: 10 Compressed message data size: 30 2025-06-25T14:59:21.803982Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function 2025-06-25T14:59:21.804183Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function Getting new event 2025-06-25T14:59:21.804478Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (5-8) 2025-06-25T14:59:21.804619Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-4) 2025-06-25T14:59:21.804711Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-25T14:59:21.804742Z :DEBUG: Take Data. Partition 1. Read: {0, 1} (2-2) 2025-06-25T14:59:21.804773Z :DEBUG: Take Data. Partition 1. Read: {0, 2} (3-3) 2025-06-25T14:59:21.804787Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (4-4) 2025-06-25T14:59:21.804816Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 4, size 40 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 1 SeqNo: 42 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 2 SeqNo: 43 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 3 SeqNo: 44 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 4 SeqNo: 45 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } } 2025-06-25T14:59:21.805041Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [1, 5). Partition stream id: 1 Getting new event 2025-06-25T14:59:21.805123Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (5-5) 2025-06-25T14:59:21.805136Z :DEBUG: Take Data. Partition 1. Read: {0, 1} (6-6) 2025-06-25T14:59:21.805151Z :DEBUG: Take Data. Partition 1. Read: {0, 2} (7-7) 2025-06-25T14:59:21.805161Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (8-8) 2025-06-25T14:59:21.805175Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 4, size 40 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 5 SeqNo: 46 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 6 SeqNo: 47 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 7 SeqNo: 48 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 8 SeqNo: 49 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } } 2025-06-25T14:59:21.805282Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [5, 9). Partition stream id: 1 2025-06-25T14:59:21.806222Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.806248Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.806263Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-25T14:59:21.806529Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-25T14:59:21.806786Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-25T14:59:21.806877Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.807041Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-25T14:59:21.807945Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function 2025-06-25T14:59:21.808516Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function 2025-06-25T14:59:21.808722Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (10-11) 2025-06-25T14:59:21.808816Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-2) 2025-06-25T14:59:21.808879Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-25T14:59:21.808901Z :DEBUG: Take Data. Partition 1. Read: {0, 1} (2-2) 2025-06-25T14:59:21.808915Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (10-10) 2025-06-25T14:59:21.808927Z :DEBUG: Take Data. Partition 1. Read: {0, 1} (11-11) 2025-06-25T14:59:21.808950Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 16 bytes 2025-06-25T14:59:21.808964Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 16 bytes got data event: DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..8 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 1 SeqNo: 1 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:00:00.042000Z Ip: "::1" UncompressedSize: 0 Meta: { } } } Message { Data: ..8 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 2 SeqNo: 1 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:00:00.042000Z Ip: "::1" UncompressedSize: 0 Meta: { } } } Message { Data: ..8 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 10 SeqNo: 1 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:00:00.042000Z Ip: "::1" UncompressedSize: 0 Meta: { } } } Message { Data: ..8 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 11 SeqNo: 1 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:00:00.042000Z Ip: "::1" UncompressedSize: 0 Meta: { } } } } 2025-06-25T14:59:21.809098Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [1, 3). Partition stream id: 1 Got commit req { cookies { assign_id: 1 partition_cookie: 1 } } 2025-06-25T14:59:21.809193Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [10, 12). Partition stream id: 1 Got commit req { cookies { assign_id: 1 partition_cookie: 2 } } |90.7%| [TS] {asan, default-linux-x86_64, release} ydb/core/fq/libs/ydb/ut/unittest >> TFqYdbTest::ShouldStatusToIssuesProcessEmptyIssues [GOOD] >> ReadSessionImplTest::SimpleDataHandlersWithGracefulReleaseWithCommit [GOOD] |90.7%| [TA] $(B)/ydb/core/tx/schemeshard/ut_subdomain/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/persqueue_public/ut/unittest >> ReadSessionImplTest::SimpleDataHandlersWithCommit [GOOD] Test command err: 2025-06-25T14:59:21.760680Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.760724Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.760746Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-25T14:59:21.762642Z :ERROR: [db] [sessionid] [cluster] Got error. Status: INTERNAL_ERROR. Description: 2025-06-25T14:59:21.762691Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.762719Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.763926Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.005518s 2025-06-25T14:59:21.764499Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-25T14:59:21.764962Z :INFO: [db] [sessionid] [cluster] Server session id: session id 2025-06-25T14:59:21.765053Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.767709Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.767729Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.767746Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-25T14:59:21.768060Z :ERROR: [db] [sessionid] [cluster] Got error. Status: INTERNAL_ERROR. Description: 2025-06-25T14:59:21.768095Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.768120Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.768170Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.005505s 2025-06-25T14:59:21.768559Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-25T14:59:21.768896Z :INFO: [db] [sessionid] [cluster] Server session id: session id 2025-06-25T14:59:21.768982Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.769593Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.769608Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.769623Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-25T14:59:21.769950Z :ERROR: [db] [sessionid] [cluster] Got error. Status: TIMEOUT. Description:
: Error: Failed to establish connection to server. Attempts done: 1 2025-06-25T14:59:21.769986Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.770003Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.770051Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.178339s 2025-06-25T14:59:21.770519Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-25T14:59:21.770889Z :INFO: [db] [sessionid] [cluster] Server session id: session id 2025-06-25T14:59:21.770963Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.771666Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.771696Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.771723Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-25T14:59:21.772001Z :ERROR: [db] [sessionid] [cluster] Got error. Status: TIMEOUT. Description:
: Error: Failed to establish connection to server. Attempts done: 1 2025-06-25T14:59:21.772031Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.772082Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.772131Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.183876s 2025-06-25T14:59:21.772625Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-25T14:59:21.772967Z :INFO: [db] [sessionid] [cluster] Server session id: session id 2025-06-25T14:59:21.773022Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.773697Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.773749Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.773774Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-25T14:59:21.774022Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-25T14:59:21.774401Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-25T14:59:21.781777Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.782077Z :ERROR: [db] [sessionid] [cluster] Got error. Status: TRANSPORT_UNAVAILABLE. Description:
: Error: GRpc error: (14): 2025-06-25T14:59:21.782105Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.782126Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.782169Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.155939s 2025-06-25T14:59:21.782492Z :DEBUG: [db] [sessionid] [cluster] Abort session to cluster 2025-06-25T14:59:21.783648Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.783673Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.783690Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-25T14:59:21.783950Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-25T14:59:21.784239Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-25T14:59:21.784346Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.784794Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-25T14:59:21.885615Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.885833Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-2) 2025-06-25T14:59:21.885925Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-25T14:59:21.885987Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (2-2) 2025-06-25T14:59:21.886062Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 6 bytes 2025-06-25T14:59:21.986350Z :INFO: [db] [sessionid] [cluster] Confirm partition stream destroy. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1 2025-06-25T14:59:21.986517Z :DEBUG: [db] [sessionid] [cluster] Abort session to cluster 2025-06-25T14:59:21.987441Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.987458Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.987495Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-25T14:59:21.987747Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-25T14:59:21.988104Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-25T14:59:21.988560Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.988979Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-25T14:59:22.091386Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:22.091570Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-2) 2025-06-25T14:59:22.091636Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-25T14:59:22.091681Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (2-2) 2025-06-25T14:59:22.091752Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [1, 3). Partition stream id: 1 2025-06-25T14:59:22.091847Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 6 bytes 2025-06-25T14:59:22.091946Z :INFO: [db] [sessionid] [cluster] Confirm partition stream destroy. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1 2025-06-25T14:59:22.092078Z :DEBUG: [db] [sessionid] [cluster] Committed response: cookies { assign_id: 1 partition_cookie: 1 } 2025-06-25T14:59:22.092680Z :DEBUG: [db] [sessionid] [cluster] Abort session to cluster >> TSchemeShardLoginTest::AccountLockoutAndAutomaticallyUnlock [GOOD] >> UpsertLoad::ShouldWriteDataBulkUpsert [GOOD] >> UpsertLoad::ShouldWriteDataBulkUpsert2 ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/persqueue_public/ut/unittest >> ReadSessionImplTest::SimpleDataHandlersWithGracefulReleaseWithCommit [GOOD] Test command err: 2025-06-25T14:59:21.804142Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.804161Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.804185Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-25T14:59:21.804557Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-25T14:59:21.804916Z :INFO: [db] [sessionid] [cluster] Server session id: session id 2025-06-25T14:59:21.804979Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.805849Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.805864Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.805877Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-25T14:59:21.806081Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-25T14:59:21.806279Z :INFO: [db] [sessionid] [cluster] Server session id: session id 2025-06-25T14:59:21.806314Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.806862Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.806879Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.806893Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-25T14:59:21.807306Z :ERROR: [db] [sessionid] [cluster] Got error. Status: INTERNAL_ERROR. Description: 2025-06-25T14:59:21.807346Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.807372Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.807485Z :INFO: [db] [sessionid] [cluster] Closing session to cluster: SessionClosed { Status: INTERNAL_ERROR Issues: "
: Error: Failed to establish connection to server "" ( cluster cluster). Attempts done: 1 " } 2025-06-25T14:59:21.808106Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.808123Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.808146Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-25T14:59:21.808420Z :ERROR: [db] [sessionid] [cluster] Got error. Status: TIMEOUT. Description:
: Error: Failed to establish connection to server. Attempts done: 1 2025-06-25T14:59:21.808449Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.808460Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.808509Z :INFO: [db] [sessionid] [cluster] Closing session to cluster: SessionClosed { Status: TIMEOUT Issues: "
: Error: Failed to establish connection to server. Attempts done: 1 " } 2025-06-25T14:59:21.809096Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 2500, ReadSizeServerDelta = 0 2025-06-25T14:59:21.809128Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 2500, ReadSizeServerDelta = 0 2025-06-25T14:59:21.809152Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-25T14:59:21.809354Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-25T14:59:21.809708Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-25T14:59:21.819453Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 2500, ReadSizeServerDelta = 0 2025-06-25T14:59:21.819744Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-25T14:59:21.820011Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 2. Cluster: "TestCluster". Topic: "TestTopic". Partition: 2. Read offset: (NULL) 2025-06-25T14:59:21.822574Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-50) 2025-06-25T14:59:21.822742Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-25T14:59:21.822768Z :DEBUG: Take Data. Partition 1. Read: {0, 1} (2-2) 2025-06-25T14:59:21.822781Z :DEBUG: Take Data. Partition 1. Read: {0, 2} (3-3) 2025-06-25T14:59:21.822791Z :DEBUG: Take Data. Partition 1. Read: {0, 3} (4-4) 2025-06-25T14:59:21.822806Z :DEBUG: Take Data. Partition 1. Read: {0, 4} (5-5) 2025-06-25T14:59:21.822815Z :DEBUG: Take Data. Partition 1. Read: {0, 5} (6-6) 2025-06-25T14:59:21.822824Z :DEBUG: Take Data. Partition 1. Read: {0, 6} (7-7) 2025-06-25T14:59:21.822839Z :DEBUG: Take Data. Partition 1. Read: {0, 7} (8-8) 2025-06-25T14:59:21.822866Z :DEBUG: Take Data. Partition 1. Read: {0, 8} (9-9) 2025-06-25T14:59:21.822878Z :DEBUG: Take Data. Partition 1. Read: {0, 9} (10-10) 2025-06-25T14:59:21.822892Z :DEBUG: Take Data. Partition 1. Read: {0, 10} (11-11) 2025-06-25T14:59:21.822907Z :DEBUG: Take Data. Partition 1. Read: {0, 11} (12-12) 2025-06-25T14:59:21.822920Z :DEBUG: Take Data. Partition 1. Read: {0, 12} (13-13) 2025-06-25T14:59:21.822934Z :DEBUG: Take Data. Partition 1. Read: {0, 13} (14-14) 2025-06-25T14:59:21.822948Z :DEBUG: Take Data. Partition 1. Read: {0, 14} (15-15) 2025-06-25T14:59:21.822965Z :DEBUG: Take Data. Partition 1. Read: {0, 15} (16-16) 2025-06-25T14:59:21.823004Z :DEBUG: Take Data. Partition 1. Read: {0, 16} (17-17) 2025-06-25T14:59:21.823029Z :DEBUG: Take Data. Partition 1. Read: {0, 17} (18-18) 2025-06-25T14:59:21.823053Z :DEBUG: Take Data. Partition 1. Read: {0, 18} (19-19) 2025-06-25T14:59:21.823073Z :DEBUG: Take Data. Partition 1. Read: {0, 19} (20-20) 2025-06-25T14:59:21.823092Z :DEBUG: Take Data. Partition 1. Read: {0, 20} (21-21) 2025-06-25T14:59:21.823108Z :DEBUG: Take Data. Partition 1. Read: {0, 21} (22-22) 2025-06-25T14:59:21.823123Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (23-23) 2025-06-25T14:59:21.823136Z :DEBUG: Take Data. Partition 1. Read: {1, 1} (24-24) 2025-06-25T14:59:21.823166Z :DEBUG: Take Data. Partition 1. Read: {1, 2} (25-25) 2025-06-25T14:59:21.823184Z :DEBUG: Take Data. Partition 1. Read: {1, 3} (26-26) 2025-06-25T14:59:21.823200Z :DEBUG: Take Data. Partition 1. Read: {1, 4} (27-27) 2025-06-25T14:59:21.823214Z :DEBUG: Take Data. Partition 1. Read: {1, 5} (28-28) 2025-06-25T14:59:21.823228Z :DEBUG: Take Data. Partition 1. Read: {1, 6} (29-29) 2025-06-25T14:59:21.823242Z :DEBUG: Take Data. Partition 1. Read: {1, 7} (30-30) 2025-06-25T14:59:21.823256Z :DEBUG: Take Data. Partition 1. Read: {1, 8} (31-31) 2025-06-25T14:59:21.823270Z :DEBUG: Take Data. Partition 1. Read: {1, 9} (32-32) 2025-06-25T14:59:21.823335Z :DEBUG: Take Data. Partition 1. Read: {1, 10} (33-33) 2025-06-25T14:59:21.823347Z :DEBUG: Take Data. Partition 1. Read: {1, 11} (34-34) 2025-06-25T14:59:21.823356Z :DEBUG: Take Data. Partition 1. Read: {1, 12} (35-35) 2025-06-25T14:59:21.823365Z :DEBUG: Take Data. Partition 1. Read: {1, 13} (36-36) 2025-06-25T14:59:21.823380Z :DEBUG: Take Data. Partition 1. Read: {1, 14} (37-37) 2025-06-25T14:59:21.823390Z :DEBUG: Take Data. Partition 1. Read: {1, 15} (38-38) 2025-06-25T14:59:21.823399Z :DEBUG: Take Data. Partition 1. Read: {1, 16} (39-39) 2025-06-25T14:59:21.823407Z :DEBUG: Take Data. Partition 1. Read: {1, 17} (40-40) 2025-06-25T14:59:21.823415Z :DEBUG: Take Data. Partition 1. Read: {1, 18} (41-41) 2025-06-25T14:59:21.823423Z :DEBUG: Take Data. Partition 1. Read: {1, 19} (42-42) 2025-06-25T14:59:21.823443Z :DEBUG: Take Data. Partition 1. Read: {1, 20} (43-43) 2025-06-25T14:59:21.823451Z :DEBUG: Take Data. Partition 1. Read: {1, 21} (44-44) 2025-06-25T14:59:21.823460Z :DEBUG: Take Data. Partition 1. Read: {1, 22} (45-45) 2025-06-25T14:59:21.823468Z :DEBUG: Take Data. Partition 1. Read: {1, 23} (46-46) 2025-06-25T14:59:21.823476Z :DEBUG: Take Data. Partition 1. Read: {1, 24} (47-47) 2025-06-25T14:59:21.823485Z :DEBUG: Take Data. Partition 1. Read: {1, 25} (48-48) 2025-06-25T14:59:21.823502Z :DEBUG: Take Data. Partition 1. Read: {1, 26} (49-49) 2025-06-25T14:59:21.823522Z :DEBUG: Take Data. Partition 1. Read: {1, 27} (50-50) 2025-06-25T14:59:21.823559Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 50, size 5000 bytes 2025-06-25T14:59:21.825328Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 2 (51-100) 2025-06-25T14:59:21.825488Z :DEBUG: Take Data. Partition 2. Read: {0, 0} (51-51) 2025-06-25T14:59:21.825520Z :DEBUG: Take Data. Partition 2. Read: {0, 1} (52-52) 2025-06-25T14:59:21.825543Z :DEBUG: Take Data. Partition 2. Read: {0, 2} (53-53) 2025-06-25T14:59:21.825559Z :DEBUG: Take Data. Partition 2. Read: {0, 3} (54-54) 2025-06-25T14:59:21.825581Z :DEBUG: Take Data. Partition 2. Read: {0, 4} (55-55) 2025-06-25T14:59:21.825597Z :DEBUG: Take Data. Partition 2. Read: {0, 5} (56-56) 2025-06-25T14:59:21.825611Z :DEBUG: Take Data. Partition 2. Read: {0, 6} (57-57) 2025-06-25T14:59:21.825626Z :DEBUG: Take Data. Partition 2. Read: {0, 7} (58-58) 2025-06-25T14:59:21.825660Z :DEBUG: Take Data. Partition 2. Read: {0, 8} (59-59) 2025-06-25T14:59:21.825677Z :DEBUG: Take Data. Partition 2. Read: {0, 9} (60-60) 2025-06-25T14:59:21.825694Z :DEBUG: Take Data. Partition 2. Read: {0, 10} (61-61) 2025-06-25T14:59:21.825720Z :DEBUG: Take Data. Partition 2. Read: {0, 11} (62-62) 2025-06-25T14:59:21.825747Z :DEBUG: Take Data. Partition 2. Read: {0, 12} (63-63) 2025-06-25T14:59:21.825770Z :DEBUG: Take Data. Partition 2. Read: {0, 13} (64-64) 2025-06-25T14:59:21.825796Z :DEBUG: Take Data. Partition 2. Read: {0, 14} (65-65) 2025-06-25T14:59:21.825811Z :DEBUG: Take Data. Partition 2. Read: {0, 15} (66-66) 2025-06-25T14:59:21.825846Z :DEBUG: Take Data. Partition 2. Read: {0, 16} (67-67) 2025-06-25T14:59:21.825861Z :DEBUG: Take Data. Partition 2. Read: {0, 17} (68-68) 2025-06-25T14:59:21.825877Z :DEBUG: Take Data. Partition 2. Read: {0, 18} (69-69) 2025-06-25T14:59:21.825892Z :DEBUG: Take Data. Partition 2. Read: {0, 19} (70-70) 2025-06-25T14:59:21.825914Z :DEBUG: Take Data. Partition 2. Read: {0, 20} (71-71) 2025-06-25T14:59:21.825934Z :DEBUG: Take Data. Partition 2. Read: {0, 21} (72-72) 2025-06-25T14:59:21.825949Z :DEBUG: Take Data. Partition 2. Read: {1, 0} (73-73) 2025-06-25T14:59:21.825963Z :DEBUG: Take Data. Partition 2. Read: {1, 1} (74-74) 2025-06-25T14:59:21.825977Z :DEBUG: Take Data. Partition 2. Read: {1, 2} (75-75) 2025-06-25T14:59:21.825991Z :DEBUG: Take Data. Partition 2. Read: {1, 3} (76-76) 2025-06-25T14:59:21.826008Z :DEBUG: Take Data. Partition 2. Read: {1, 4} (77-77) 2025-06-25T14:59:21.826026Z :DEBUG: Take Data. Partition 2. Read: {1, 5} (78-78) 2025-06-25T14:59:21.826045Z :DEBUG: Take Data. Partition 2. Read: {1, 6} (79-79) 2025-06-25T14:59:21.826067Z :DEBUG: Take Data. Partition 2. Read: {1, 7} (80-80) 2025-06-25T14:59:21.826098Z :DEBUG: Take Data. Partition 2. Read: {1, 8} (81-81) 2025-06-25T14:59:21.826114Z :DEBUG: Take Data. Partition 2. Read: {1, 9} (82-82) 2025-06-25T14:59:21.826168Z :DEBUG: Take Data. Partition 2. Read: {1, 10} (83-83) 2025-06-25T14:59:21.826187Z :DEBUG: Take Data. Partition 2. Read: {1, 11} (84-84) 2025-06-25T14:59:21.826207Z :DEBUG: Take Data. Partition 2. Read: {1, 12} (85-85) 2025-06-25T14:59:21.826223Z :DEBUG: Take Data. Partition 2. Read: {1, 13} (86-86) 2025-06-25T14:59:21.826237Z :DEBUG: Take Data. Partition 2. Read: {1, 14} (87-87) 2025-06-25T14:59:21.826251Z :DEBUG: Take Data. Partition 2. Read: {1, 15} (88-88) 2025-06-25T14:59:21.826265Z :DEBUG: Take Data. Partition 2. Read: {1, 16} (89-89) 2025-06-25T14:59:21.826278Z :DEBUG: Take Data. Partition 2. Read: {1, 17} (90-90) 2025-06-25T14:59:21.826292Z :DEBUG: Take Data. Partition 2. Read: {1, 18} (91-91) 2025-06-25T14:59:21.826308Z :DEBUG: Take Data. Partition 2. Read: {1, 19} (92-92) 2025-06-25T14:59:21.826324Z :DEBUG: Take Data. Partition 2. Read: {1, 20} (93-93) 2025-06-25T14:59:21.826383Z :DEBUG: Take Data. Partition 2. Read: {1, 21} (94-94) 2025-06-25T14:59:21.826404Z :DEBUG: Take Data. Partition 2. Read: {1, 22} (95-95) 2025-06-25T14:59:21.826428Z :DEBUG: Take Data. Partition 2. Read: {1, 23} (96-96) 2025-06-25T14:59:21.826455Z :DEBUG: Take Data. Partition 2. Read: {1, 24} (97-97) 2025-06-25T14:59:21.826470Z :DEBUG: Take Data. Partition 2. Read: {1, 25} (98-98) 2025-06-25T14:59:21.826487Z :DEBUG: Take Data. Partition 2. Read: {1, 26} (99-99) 2025-06-25T14:59:21.826501Z :DEBUG: Take Data. Partition 2. Read: {1, 27} (100-100) 2025-06-25T14:59:21.826542Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 50, size 5000 bytes 2025-06-25T14:59:21.826652Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 2500, ReadSizeServerDelta = 0 2025-06-25T14:59:21.827672Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.827690Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.827733Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-25T14:59:21.828015Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-25T14:59:21.828295Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-25T14:59:21.828397Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.828729Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-25T14:59:21.929466Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.929733Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-2) 2025-06-25T14:59:21.929819Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-25T14:59:21.929852Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (2-2) 2025-06-25T14:59:21.929916Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 6 bytes 2025-06-25T14:59:22.130217Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [1, 3). Partition stream id: 1 2025-06-25T14:59:22.230630Z :DEBUG: [db] [sessionid] [cluster] Committed response: cookies { assign_id: 1 partition_cookie: 1 } 2025-06-25T14:59:22.230830Z :INFO: [db] [sessionid] [cluster] Confirm partition stream destroy. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1 2025-06-25T14:59:22.231019Z :DEBUG: [db] [sessionid] [cluster] Abort session to cluster 2025-06-25T14:59:22.231992Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:22.232006Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:22.232019Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-25T14:59:22.232276Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-25T14:59:22.232625Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-25T14:59:22.232741Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:22.233094Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-25T14:59:22.333956Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:22.334133Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-2) 2025-06-25T14:59:22.334199Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-25T14:59:22.334242Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (2-2) 2025-06-25T14:59:22.334317Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [1, 3). Partition stream id: 1 2025-06-25T14:59:22.334411Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 6 bytes 2025-06-25T14:59:22.334536Z :DEBUG: [db] [sessionid] [cluster] Committed response: cookies { assign_id: 1 partition_cookie: 1 } 2025-06-25T14:59:22.334608Z :INFO: [db] [sessionid] [cluster] Confirm partition stream destroy. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1 2025-06-25T14:59:22.334707Z :DEBUG: [db] [sessionid] [cluster] Abort session to cluster |90.7%| [TS] {asan, default-linux-x86_64, release} ydb/core/fq/libs/ydb/ut/unittest >> TRegisterCheckTest::ShouldRegisterCheckNewGeneration [GOOD] |90.7%| [TS] {asan, default-linux-x86_64, release} ydb/core/fq/libs/ydb/ut/unittest >> TRegisterCheckTest::ShouldNotRegisterCheckPrevGeneration2 [GOOD] |90.8%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_subdomain/test-results/unittest/{meta.json ... results_accumulator.log} |90.8%| [TS] {asan, default-linux-x86_64, release} ydb/core/fq/libs/ydb/ut/unittest >> TRegisterCheckTest::ShouldRegisterCheckNextGeneration [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_minstep/unittest >> TDataShardMinStepTest::TestDropTableCompletesQuicklyRW-VolatileTxs [GOOD] Test command err: 2025-06-25T14:59:13.775588Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:59:13.775795Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:59:13.775849Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0010db/r3tmp/tmpQCMR5j/pdisk_1.dat 2025-06-25T14:59:14.169277Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T14:59:14.173938Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //Root, opId: 1:0, at schemeshard: 72057594046644480 2025-06-25T14:59:14.189212Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-25T14:59:14.190853Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:59:14.191680Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //Root 2025-06-25T14:59:14.193728Z node 1 :TX_COORDINATOR DEBUG: coordinator_impl.cpp:183: tablet# 72057594046316545 txid# 1 HANDLE EvProposeTransaction marker# C0 2025-06-25T14:59:14.193789Z node 1 :TX_COORDINATOR DEBUG: coordinator_impl.cpp:29: tablet# 72057594046316545 txid# 1 step# 500 Status# 16 SEND to# [1:373:2367] Proxy marker# C1 2025-06-25T14:59:14.220416Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:59:14.220497Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:59:14.230834Z node 1 :HIVE DEBUG: hive_impl.cpp:2275: HIVE#72057594037968897 Merged config: { } 2025-06-25T14:59:14.231155Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750863551187351 != 1750863551187355 2025-06-25T14:59:14.283925Z node 1 :HIVE DEBUG: hive_impl.cpp:141: HIVE#72057594037968897 Handle TEvLocal::TEvRegisterNode from [1:296:2336] HiveId: 72057594037968897 ServicedDomains { SchemeShard: 72057594046644480 PathId: 1 } TabletAvailability { Type: Mediator Priority: 0 } TabletAvailability { Type: Dummy Priority: 0 } TabletAvailability { Type: KeyValue Priority: 0 } TabletAvailability { Type: Coordinator Priority: 0 } TabletAvailability { Type: Hive Priority: 0 } TabletAvailability { Type: SchemeShard Priority: 0 } TabletAvailability { Type: DataShard Priority: 0 } TabletAvailability { Type: PersQueue Priority: 0 } TabletAvailability { Type: PersQueueReadBalancer Priority: 0 } TabletAvailability { Type: Kesus Priority: 0 } TabletAvailability { Type: SysViewProcessor Priority: 0 } TabletAvailability { Type: ColumnShard Priority: 0 } TabletAvailability { Type: SequenceShard Priority: 0 } TabletAvailability { Type: ReplicationController Priority: 0 } TabletAvailability { Type: StatisticsAggregator Priority: 0 } 2025-06-25T14:59:14.284057Z node 1 :HIVE DEBUG: tx__register_node.cpp:21: HIVE#72057594037968897 THive::TTxRegisterNode(1)::Execute 2025-06-25T14:59:14.284185Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:59:14.284228Z node 1 :HIVE DEBUG: hive_impl.cpp:361: HIVE#72057594037968897 ProcessWaitQueue (0) 2025-06-25T14:59:14.284264Z node 1 :HIVE DEBUG: hive_impl.cpp:342: HIVE#72057594037968897 ProcessBootQueue (0) 2025-06-25T14:59:14.284304Z node 1 :HIVE DEBUG: hive_impl.cpp:361: HIVE#72057594037968897 ProcessWaitQueue (0) 2025-06-25T14:59:14.284375Z node 1 :HIVE DEBUG: hive_impl.cpp:342: HIVE#72057594037968897 ProcessBootQueue (0) 2025-06-25T14:59:14.284457Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:59:14.284650Z node 1 :HIVE DEBUG: tx__process_boot_queue.cpp:18: HIVE#72057594037968897 THive::TTxProcessBootQueue()::Execute 2025-06-25T14:59:14.284702Z node 1 :HIVE DEBUG: hive_impl.cpp:222: HIVE#72057594037968897 Handle ProcessBootQueue (size: 0) 2025-06-25T14:59:14.284753Z node 1 :HIVE DEBUG: hive_impl.cpp:225: HIVE#72057594037968897 Handle ProcessWaitQueue (size: 0) 2025-06-25T14:59:14.284793Z node 1 :HIVE DEBUG: hive_impl.cpp:302: HIVE#72057594037968897 ProcessBootQueue - BootQueue empty (WaitQueue: 0) 2025-06-25T14:59:14.284918Z node 1 :HIVE DEBUG: hive_impl.cpp:808: HIVE#72057594037968897 TEvInterconnect::TEvNodeInfo NodeId 1 Location DataCenter: "1" Module: "1" Rack: "1" Unit: "1" 2025-06-25T14:59:14.295563Z node 1 :HIVE DEBUG: tx__register_node.cpp:95: HIVE#72057594037968897 THive::TTxRegisterNode(1)::Complete 2025-06-25T14:59:14.296563Z node 1 :HIVE DEBUG: node_info.cpp:373: HIVE#72057594037968897 Node(1) Ping([1:296:2336]) 2025-06-25T14:59:14.296696Z node 1 :HIVE DEBUG: tx__process_boot_queue.cpp:26: HIVE#72057594037968897 THive::TTxProcessBootQueue()::Complete 2025-06-25T14:59:14.297245Z node 1 :HIVE DEBUG: hive_impl.cpp:737: HIVE#72057594037968897 THive::Handle::TEvSyncTablets 2025-06-25T14:59:14.297331Z node 1 :HIVE DEBUG: tx__sync_tablets.cpp:41: HIVE#72057594037968897 THive::TTxSyncTablets([1:296:2336])::Execute 2025-06-25T14:59:14.297384Z node 1 :HIVE DEBUG: hive_impl.cpp:342: HIVE#72057594037968897 ProcessBootQueue (0) 2025-06-25T14:59:14.297473Z node 1 :HIVE DEBUG: tx__sync_tablets.cpp:130: HIVE#72057594037968897 THive::TTxSyncTablets([1:296:2336])::Complete 2025-06-25T14:59:14.297653Z node 1 :HIVE DEBUG: hive_impl.cpp:731: HIVE#72057594037968897 Handle TEvLocal::TEvStatus for Node 1: Status: 0 StartTime: 0 ResourceMaximum { Memory: 270443331584 } 2025-06-25T14:59:14.297722Z node 1 :HIVE DEBUG: tx__status.cpp:22: HIVE#72057594037968897 THive::TTxStatus(1)::Execute 2025-06-25T14:59:14.297775Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:59:14.297929Z node 1 :HIVE DEBUG: hive_impl.cpp:2791: HIVE#72057594037968897 AddRegisteredDataCentersNode(1, 1) 2025-06-25T14:59:14.297988Z node 1 :HIVE DEBUG: hive_impl.cpp:361: HIVE#72057594037968897 ProcessWaitQueue (0) 2025-06-25T14:59:14.298037Z node 1 :HIVE DEBUG: hive_impl.cpp:342: HIVE#72057594037968897 ProcessBootQueue (0) 2025-06-25T14:59:14.298173Z node 1 :HIVE DEBUG: tx__process_boot_queue.cpp:18: HIVE#72057594037968897 THive::TTxProcessBootQueue()::Execute 2025-06-25T14:59:14.298222Z node 1 :HIVE DEBUG: hive_impl.cpp:222: HIVE#72057594037968897 Handle ProcessBootQueue (size: 0) 2025-06-25T14:59:14.298265Z node 1 :HIVE DEBUG: hive_impl.cpp:225: HIVE#72057594037968897 Handle ProcessWaitQueue (size: 0) 2025-06-25T14:59:14.298303Z node 1 :HIVE DEBUG: hive_impl.cpp:302: HIVE#72057594037968897 ProcessBootQueue - BootQueue empty (WaitQueue: 0) 2025-06-25T14:59:14.309029Z node 1 :HIVE DEBUG: tx__status.cpp:65: HIVE#72057594037968897 THive::TTxStatus(1)::Complete 2025-06-25T14:59:14.309110Z node 1 :HIVE DEBUG: tx__process_boot_queue.cpp:26: HIVE#72057594037968897 THive::TTxProcessBootQueue()::Complete 2025-06-25T14:59:14.372091Z node 1 :TX_COORDINATOR DEBUG: coordinator__plan_step.cpp:184: Transaction 1 has been planned 2025-06-25T14:59:14.372206Z node 1 :TX_COORDINATOR DEBUG: coordinator__plan_step.cpp:197: Planned transaction 1 for mediator 72057594046382081 tablet 72057594046644480 2025-06-25T14:59:14.372552Z node 1 :TX_COORDINATOR TRACE: coordinator_impl.cpp:268: Coordinator# 72057594046316545 scheduling step 1000 in 0.500000s at 0.950000s 2025-06-25T14:59:14.372892Z node 1 :TX_COORDINATOR DEBUG: coordinator_impl.cpp:579: Send from# 72057594046316545 to mediator# 72057594046382081, step# 500, txid# 1 marker# C2 2025-06-25T14:59:14.372967Z node 1 :TX_COORDINATOR DEBUG: coordinator_impl.cpp:424: tablet# 72057594046316545 txid# 1 stepId# 500 Status# 17 SEND EvProposeTransactionStatus to# [1:373:2367] Proxy 2025-06-25T14:59:14.373873Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 500, transactions count in step: 1, at schemeshard: 72057594046644480 2025-06-25T14:59:14.378022Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1:0 2025-06-25T14:59:14.378114Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 1, publications: 1, subscribers: 1 2025-06-25T14:59:14.378697Z node 1 :TX_COORDINATOR DEBUG: coordinator_impl.cpp:397: tablet# 72057594046316545 HANDLE EvMediatorQueueConfirmations MediatorId# 72057594046382081 2025-06-25T14:59:14.378794Z node 1 :TX_COORDINATOR DEBUG: coordinator__mediators_confirmations.cpp:84: at tablet# 72057594046316545 [2:6] persistent tx 1 for mediator 72057594046382081 tablet 72057594046644480 removed=1 2025-06-25T14:59:14.378832Z node 1 :TX_COORDINATOR DEBUG: coordinator__mediators_confirmations.cpp:91: at tablet# 72057594046316545 [2:6] persistent tx 1 for mediator 72057594046382081 acknowledged 2025-06-25T14:59:14.378865Z node 1 :TX_COORDINATOR DEBUG: coordinator__mediators_confirmations.cpp:99: at tablet# 72057594046316545 [2:6] persistent tx 1 acknowledged 2025-06-25T14:59:14.379661Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046644480, txId: 1, subscribers: 1 2025-06-25T14:59:14.389748Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_table.cpp:423: TCreateTable Propose, path: /Root/table-1, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-25T14:59:14.394958Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 281474976715657:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-25T14:59:14.395072Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:59:14.396060Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976715657, database: /Root, subject: , status: StatusAccepted, operation: CREATE TABLE, path: /Root/table-1 2025-06-25T14:59:14.401741Z node 1 :HIVE DEBUG: hive_impl.cpp:34: HIVE#72057594037968897 Handle TEvHive::TEvCreateTablet(DataShard(72057594046644480,1)) 2025-06-25T14:59:14.416295Z node 1 :HIVE DEBUG: tx__create_tablet.cpp:200: HIVE#72057594037968897 THive::TTxCreateTablet::Execute Owner: 72057594046644480 OwnerIdx: 1 TabletType: DataShard ObjectDomain { SchemeShard: 72057594046644480 PathId: 1 } ObjectId: 2 BindedChannels { StoragePoolName: "/Root:test" } BindedChannels { StoragePoolName: "/Root:test" } BindedChannels { StoragePoolName: "/Root:test" } AllowedDomains { SchemeShard: 72057594046644480 PathId: 1 ... s {} 2025-06-25T14:59:21.545595Z node 2 :TX_COORDINATOR DEBUG: coordinator__plan_step.cpp:184: Transaction 281474976715665 has been planned 2025-06-25T14:59:21.545677Z node 2 :TX_COORDINATOR DEBUG: coordinator__plan_step.cpp:197: Planned transaction 281474976715665 for mediator 72057594046382081 tablet 72057594046644480 2025-06-25T14:59:21.545708Z node 2 :TX_COORDINATOR DEBUG: coordinator__plan_step.cpp:197: Planned transaction 281474976715665 for mediator 72057594046382081 tablet 72075186224037889 2025-06-25T14:59:21.545891Z node 2 :TX_COORDINATOR TRACE: coordinator_impl.cpp:268: Coordinator# 72057594046316545 scheduling step 4000 in 0.500000s at 3.950000s 2025-06-25T14:59:21.546234Z node 2 :TX_COORDINATOR DEBUG: coordinator_impl.cpp:579: Send from# 72057594046316545 to mediator# 72057594046382081, step# 3500, txid# 281474976715665 marker# C2 2025-06-25T14:59:21.546293Z node 2 :TX_COORDINATOR DEBUG: coordinator_impl.cpp:424: tablet# 72057594046316545 txid# 281474976715665 stepId# 3500 Status# 17 SEND EvProposeTransactionStatus to# [2:373:2367] Proxy 2025-06-25T14:59:21.546604Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 3500, transactions count in step: 1, at schemeshard: 72057594046644480 2025-06-25T14:59:21.547207Z node 2 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715665 at step 3500 at tablet 72075186224037889 { Transactions { TxId: 281474976715665 AckTo { RawX1: 0 RawX2: 0 } } Step: 3500 MediatorID: 72057594046382081 TabletID: 72075186224037889 } 2025-06-25T14:59:21.547245Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-25T14:59:21.547396Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2025-06-25T14:59:21.547432Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 active 0 active planned 0 immediate 0 planned 1 2025-06-25T14:59:21.547466Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [3500:281474976715665] in PlanQueue unit at 72075186224037889 2025-06-25T14:59:21.547626Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037889 loaded tx from db 3500:281474976715665 keys extracted: 0 2025-06-25T14:59:21.547734Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-25T14:59:21.547832Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2025-06-25T14:59:21.547890Z node 2 :TX_DATASHARD INFO: drop_table_unit.cpp:72: Trying to DROP TABLE at 72075186224037889 2025-06-25T14:59:21.548201Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:59:21.549605Z node 2 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037889 step# 3500} 2025-06-25T14:59:21.549669Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-06-25T14:59:21.549877Z node 2 :TX_COORDINATOR DEBUG: coordinator_impl.cpp:397: tablet# 72057594046316545 HANDLE EvMediatorQueueConfirmations MediatorId# 72057594046382081 2025-06-25T14:59:21.549954Z node 2 :TX_COORDINATOR DEBUG: coordinator__mediators_confirmations.cpp:84: at tablet# 72057594046316545 [2:20] persistent tx 281474976715665 for mediator 72057594046382081 tablet 72057594046644480 removed=1 2025-06-25T14:59:21.549989Z node 2 :TX_COORDINATOR DEBUG: coordinator__mediators_confirmations.cpp:84: at tablet# 72057594046316545 [2:20] persistent tx 281474976715665 for mediator 72057594046382081 tablet 72075186224037889 removed=1 2025-06-25T14:59:21.550015Z node 2 :TX_COORDINATOR DEBUG: coordinator__mediators_confirmations.cpp:91: at tablet# 72057594046316545 [2:20] persistent tx 281474976715665 for mediator 72057594046382081 acknowledged 2025-06-25T14:59:21.550047Z node 2 :TX_COORDINATOR DEBUG: coordinator__mediators_confirmations.cpp:99: at tablet# 72057594046316545 [2:20] persistent tx 281474976715665 acknowledged 2025-06-25T14:59:21.550317Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-06-25T14:59:21.550384Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [3500 : 281474976715665] from 72075186224037889 at tablet 72075186224037889 send result to client [2:373:2367], exec latency: 0 ms, propose latency: 0 ms 2025-06-25T14:59:21.550446Z node 2 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037889 Sending notify to schemeshard 72057594046644480 txId 281474976715665 state PreOffline TxInFly 0 2025-06-25T14:59:21.550534Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-25T14:59:21.550715Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:1105: All parts have reached barrier, tx: 281474976715665, done: 0, blocked: 1 2025-06-25T14:59:21.553003Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715665 datashard 72075186224037889 state PreOffline 2025-06-25T14:59:21.553093Z node 2 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037889 Got TEvSchemaChangedResult from SS at 72075186224037889 2025-06-25T14:59:21.553649Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976715665:0 2025-06-25T14:59:21.553746Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 281474976715665, publications: 1, subscribers: 1 2025-06-25T14:59:21.554536Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046644480, txId: 281474976715665, subscribers: 1 2025-06-25T14:59:21.554760Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2025-06-25T14:59:21.555696Z node 2 :KQP_YQL INFO: log.cpp:64: SessionId: ydb://session/3?node_id=2&id=Y2JjYTQ0ZGItYTBjMDE4OWQtYmYwOTQ0ZGUtMWNiNjllOGE= 2025-06-25 14:59:21.555 INFO ydb-core-tx-datashard-ut_minstep(pid=663320, tid=0x00007F5B71EC4D40) [core exec] yql_execution.cpp:133: Completed async execution for node #42 2025-06-25T14:59:21.555805Z node 2 :KQP_YQL INFO: log.cpp:64: SessionId: ydb://session/3?node_id=2&id=Y2JjYTQ0ZGItYTBjMDE4OWQtYmYwOTQ0ZGUtMWNiNjllOGE= 2025-06-25 14:59:21.555 INFO ydb-core-tx-datashard-ut_minstep(pid=663320, tid=0x00007F5B71EC4D40) [core exec] yql_execution.cpp:153: State is ExecutionComplete after apply async changes for node #42 2025-06-25T14:59:21.555879Z node 2 :KQP_YQL INFO: log.cpp:64: SessionId: ydb://session/3?node_id=2&id=Y2JjYTQ0ZGItYTBjMDE4OWQtYmYwOTQ0ZGUtMWNiNjllOGE= 2025-06-25 14:59:21.555 INFO ydb-core-tx-datashard-ut_minstep(pid=663320, tid=0x00007F5B71EC4D40) [core exec] yql_execution.cpp:59: Begin, root #43 2025-06-25T14:59:21.555929Z node 2 :KQP_YQL INFO: log.cpp:64: SessionId: ydb://session/3?node_id=2&id=Y2JjYTQ0ZGItYTBjMDE4OWQtYmYwOTQ0ZGUtMWNiNjllOGE= 2025-06-25 14:59:21.555 INFO ydb-core-tx-datashard-ut_minstep(pid=663320, tid=0x00007F5B71EC4D40) [core exec] yql_execution.cpp:72: Collect unused nodes for root #43, status: Ok 2025-06-25T14:59:21.555978Z node 2 :KQP_YQL TRACE: log.cpp:64: SessionId: ydb://session/3?node_id=2&id=Y2JjYTQ0ZGItYTBjMDE4OWQtYmYwOTQ0ZGUtMWNiNjllOGE= 2025-06-25 14:59:21.555 TRACE ydb-core-tx-datashard-ut_minstep(pid=663320, tid=0x00007F5B71EC4D40) [core exec] yql_execution.cpp:387: {0}, callable #43 2025-06-25T14:59:21.556042Z node 2 :KQP_YQL INFO: log.cpp:64: SessionId: ydb://session/3?node_id=2&id=Y2JjYTQ0ZGItYTBjMDE4OWQtYmYwOTQ0ZGUtMWNiNjllOGE= 2025-06-25 14:59:21.556 INFO ydb-core-tx-datashard-ut_minstep(pid=663320, tid=0x00007F5B71EC4D40) [core exec] yql_execution.cpp:577: Node #43 finished execution 2025-06-25T14:59:21.556108Z node 2 :KQP_YQL INFO: log.cpp:64: SessionId: ydb://session/3?node_id=2&id=Y2JjYTQ0ZGItYTBjMDE4OWQtYmYwOTQ0ZGUtMWNiNjllOGE= 2025-06-25 14:59:21.556 INFO ydb-core-tx-datashard-ut_minstep(pid=663320, tid=0x00007F5B71EC4D40) [core exec] yql_execution.cpp:594: Node #43 created 0 trackable nodes: 2025-06-25T14:59:21.556156Z node 2 :KQP_YQL INFO: log.cpp:64: SessionId: ydb://session/3?node_id=2&id=Y2JjYTQ0ZGItYTBjMDE4OWQtYmYwOTQ0ZGUtMWNiNjllOGE= 2025-06-25 14:59:21.556 INFO ydb-core-tx-datashard-ut_minstep(pid=663320, tid=0x00007F5B71EC4D40) [core exec] yql_execution.cpp:87: Finish, output #43, status: Ok 2025-06-25T14:59:21.556211Z node 2 :KQP_YQL INFO: log.cpp:64: SessionId: ydb://session/3?node_id=2&id=Y2JjYTQ0ZGItYTBjMDE4OWQtYmYwOTQ0ZGUtMWNiNjllOGE= 2025-06-25 14:59:21.556 INFO ydb-core-tx-datashard-ut_minstep(pid=663320, tid=0x00007F5B71EC4D40) [core exec] yql_execution.cpp:93: Creating finalizing transformer, output #43 2025-06-25T14:59:21.556367Z node 2 :KQP_YQL NOTICE: log.cpp:64: SessionId: ydb://session/3?node_id=2&id=Y2JjYTQ0ZGItYTBjMDE4OWQtYmYwOTQ0ZGUtMWNiNjllOGE= 2025-06-25 14:59:21.556 NOTE ydb-core-tx-datashard-ut_minstep(pid=663320, tid=0x00007F5B71EC4D40) [common provider] yql_provider_gateway.cpp:21:
: Info: Execution, code: 1060 2025-06-25T14:59:21.556431Z node 2 :KQP_YQL NOTICE: log.cpp:64: SessionId: ydb://session/3?node_id=2&id=Y2JjYTQ0ZGItYTBjMDE4OWQtYmYwOTQ0ZGUtMWNiNjllOGE= 2025-06-25 14:59:21.556 NOTE ydb-core-tx-datashard-ut_minstep(pid=663320, tid=0x00007F5B71EC4D40) [common provider] yql_provider_gateway.cpp:21:
:1:12: Info: Executing DROP TABLE 2025-06-25T14:59:21.556475Z node 2 :KQP_YQL NOTICE: log.cpp:64: SessionId: ydb://session/3?node_id=2&id=Y2JjYTQ0ZGItYTBjMDE4OWQtYmYwOTQ0ZGUtMWNiNjllOGE= 2025-06-25 14:59:21.556 NOTE ydb-core-tx-datashard-ut_minstep(pid=663320, tid=0x00007F5B71EC4D40) [common provider] yql_provider_gateway.cpp:21:
: Info: Success, code: 4 2025-06-25T14:59:21.570121Z node 2 :TX_DATASHARD DEBUG: datashard_loans.cpp:220: 72075186224037889 in PreOffline state HasSharedBobs: 0 SchemaOperations: [ ] OutReadSets count: 0 ChangesQueue size: 0 ChangeExchangeSplit: 1 siblings to be activated: wait to activation from: 2025-06-25T14:59:21.570367Z node 2 :TX_DATASHARD INFO: datashard_loans.cpp:177: 72075186224037889 Initiating switch from PreOffline to Offline state 2025-06-25T14:59:21.572080Z node 2 :TX_DATASHARD INFO: datashard_impl.h:3310: 72075186224037889 Reporting state Offline to schemeshard 72057594046644480 2025-06-25T14:59:21.572961Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2962: Handle TEvStateChangedResult datashard 72075186224037889 state Offline 2025-06-25T14:59:21.573380Z node 2 :HIVE DEBUG: tx__delete_tablet.cpp:74: HIVE#72057594037968897 THive::TTxDeleteTablet::Execute() ShardOwnerId: 72057594046644480 ShardLocalIdx: 2 TxId_Deprecated: 2 TabletID: 72075186224037889 2025-06-25T14:59:21.573434Z node 2 :HIVE DEBUG: tx__delete_tablet.cpp:19: HIVE#72057594037968897 THive::TTxDeleteTablet::Execute Tablet 72075186224037889 2025-06-25T14:59:21.573533Z node 2 :HIVE DEBUG: tablet_info.cpp:123: HIVE#72057594037968897 Tablet(DataShard.72075186224037889.Leader.1) VolatileState: Running -> Stopped (Node 2) 2025-06-25T14:59:21.573674Z node 2 :HIVE DEBUG: tablet_info.cpp:523: HIVE#72057594037968897 Sending TEvStopTablet(DataShard.72075186224037889.Leader.1 gen 1) to node 2 2025-06-25T14:59:21.573782Z node 2 :HIVE DEBUG: tx__delete_tablet.cpp:67: HIVE#72057594037968897 THive::TTxDeleteTablet::Execute() result Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046644480 ShardLocalIdx: 2 >> TSchemeShardLoginTest::ResetFailedAttemptCountAfterModifyUser [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_login/unittest >> TSchemeShardLoginTest::AccountLockoutAndAutomaticallyUnlock [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:59:15.326391Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:59:15.326484Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:15.326532Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:59:15.326597Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:59:15.326639Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:59:15.326680Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:59:15.326755Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:15.326832Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:59:15.327568Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:59:15.332653Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:59:15.423593Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:59:15.423655Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:59:15.441720Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:59:15.442140Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:59:15.442330Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:59:15.449482Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:59:15.449817Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:59:15.450573Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:15.450886Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:59:15.454402Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:15.454617Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:59:15.455847Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:15.455915Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:15.456123Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:59:15.456192Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:59:15.456261Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:59:15.456388Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:59:15.464341Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:59:15.633737Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:59:15.634005Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:15.634256Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:59:15.634314Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:59:15.634584Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:59:15.634683Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:59:15.637626Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:15.637840Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:59:15.638032Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:15.638117Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:59:15.638189Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:59:15.638246Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:59:15.641506Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:15.641589Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:59:15.641635Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:59:15.643547Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:15.643601Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:15.643648Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:15.643709Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:59:15.647760Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:59:15.649915Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:59:15.650141Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:59:15.651217Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:15.651377Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:15.651453Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:15.651754Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:59:15.651819Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:15.652039Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:59:15.652259Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:59:15.654555Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:15.654610Z node 1 :FLAT_TX_SCHEMESHARD ... HARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 4 PathOwnerId: 72057594046678944, cookie: 101 2025-06-25T14:59:18.414791Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 4 PathOwnerId: 72057594046678944, cookie: 101 2025-06-25T14:59:18.414829Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 101 2025-06-25T14:59:18.414868Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 4 2025-06-25T14:59:18.414912Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:59:18.415002Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 101, subscribers: 0 2025-06-25T14:59:18.416340Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 TestModificationResult got TxId: 101, wait until txId: 101 2025-06-25T14:59:18.416672Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:44: TTxLogin Execute at schemeshard: 72057594046678944 2025-06-25T14:59:18.416711Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:47: TTxLogin RotateKeys at schemeshard: 72057594046678944 2025-06-25T14:59:18.495927Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:86: TTxLogin Complete, result: Error: "Invalid password", at schemeshard: 72057594046678944 2025-06-25T14:59:18.496039Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:18.496077Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 0, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:59:18.496228Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:18.496277Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [5:211:2211], at schemeshard: 72057594046678944, txId: 0, path id: 1 2025-06-25T14:59:18.496754Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 0 2025-06-25T14:59:18.497038Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:44: TTxLogin Execute at schemeshard: 72057594046678944 2025-06-25T14:59:18.502511Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:86: TTxLogin Complete, result: Error: "Invalid password", at schemeshard: 72057594046678944 2025-06-25T14:59:18.502817Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:44: TTxLogin Execute at schemeshard: 72057594046678944 2025-06-25T14:59:18.508277Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:86: TTxLogin Complete, result: Error: "Invalid password", at schemeshard: 72057594046678944 2025-06-25T14:59:18.508646Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:44: TTxLogin Execute at schemeshard: 72057594046678944 2025-06-25T14:59:18.517928Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:86: TTxLogin Complete, result: Error: "Invalid password", at schemeshard: 72057594046678944 2025-06-25T14:59:18.518301Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:44: TTxLogin Execute at schemeshard: 72057594046678944 2025-06-25T14:59:18.518396Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:86: TTxLogin Complete, result: Error: "User user1 login denied: too many failed password attempts", at schemeshard: 72057594046678944 2025-06-25T14:59:18.518731Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:44: TTxLogin Execute at schemeshard: 72057594046678944 2025-06-25T14:59:18.518813Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:86: TTxLogin Complete, result: Error: "User user1 login denied: too many failed password attempts", at schemeshard: 72057594046678944 2025-06-25T14:59:18.519182Z node 5 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:59:18.519381Z node 5 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 226us result status StatusSuccess 2025-06-25T14:59:18.519879Z node 5 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 2 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { PublicKeys { KeyId: 1 KeyDataPEM: "-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAuiY5hs+9WUY7JSRGFa9m\n1mx3Di4QRwJefyDQTNJFL+vglDHb0cr5BTNahlSkVcCyazdULvIbGjjdN/Yx5Xv+\n4GKE37KfsAjP3eFAacISwQyUAWKv4SeiOCrPScQpo8iNbM4n4ZA68ztwX7u4J2qK\nQv35Jk36TzcPN9uGF5Q9JQk+jlz72iQjFRxifyB3V/ynSgPK4dBGzdgK8b0wL5fe\nCV5+JL4jErcOvs8L6epjB2HrAdIrj9Ie9R7vFtbxG+bUrNgDVp9T6kD13us70Maf\nhpfI7LyRFkSTdVvUMgy1c/lq2ggzRyarvv3ie9RupUkw1YrPPoIwTFEnSKLShmzO\n7wIDAQAB\n-----END PUBLIC KEY-----\n" ExpiresAt: 1750949958489 } Sids { Name: "user1" Type: USER } Audience: "/MyRoot" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:22.520686Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:44: TTxLogin Execute at schemeshard: 72057594046678944 2025-06-25T14:59:22.528626Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:86: TTxLogin Complete, result: Error: "Invalid password", at schemeshard: 72057594046678944 2025-06-25T14:59:22.529265Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:44: TTxLogin Execute at schemeshard: 72057594046678944 2025-06-25T14:59:22.539354Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:86: TTxLogin Complete, result: Token: "eyJhbGciOiJQUzI1NiIsImtpZCI6IjEifQ.eyJhdWQiOlsiXC9NeVJvb3QiXSwiZXhwIjoxNzUwOTA2NzYyLCJpYXQiOjE3NTA4NjM1NjIsInN1YiI6InVzZXIxIn0.PmTiACGOl8Zga5Xj3mvTici5UFSF_eB9mSP__324QQromPjX-lrNrnlwSwfP-dbaVezbewGtzXat3Yue9Wh0EQ-3NzLxdyABMC2F1IdwFUH6XKVjYL_kI_sFsovC9DGC2WYHlNd9xOY-zOkbFJUxfMropm_BjxAOFf3yKr2oOoRanpsAljLUIc8hNhffqpC-YMJ7xyF3suH6NlzjfGTDfBfvL6vXMjFsFCkkXGzZj7PuNMGAe-M8_tDkepXIvo5IkWKttO6ooX-KyJOUToBg4Qn5O_qcOOM8Jbuj7-V_IGYsLOt60NSWvSmB-icj1OavmAu2jhLD39DgY0OqDYX2yw" SanitizedToken: "eyJhbGciOiJQUzI1NiIsImtpZCI6IjEifQ.eyJhdWQiOlsiXC9NeVJvb3QiXSwiZXhwIjoxNzUwOTA2NzYyLCJpYXQiOjE3NTA4NjM1NjIsInN1YiI6InVzZXIxIn0.**" IsAdmin: true, at schemeshard: 72057594046678944 2025-06-25T14:59:22.540091Z node 5 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:59:22.540355Z node 5 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 267us result status StatusSuccess 2025-06-25T14:59:22.540895Z node 5 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 2 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { PublicKeys { KeyId: 1 KeyDataPEM: "-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAuiY5hs+9WUY7JSRGFa9m\n1mx3Di4QRwJefyDQTNJFL+vglDHb0cr5BTNahlSkVcCyazdULvIbGjjdN/Yx5Xv+\n4GKE37KfsAjP3eFAacISwQyUAWKv4SeiOCrPScQpo8iNbM4n4ZA68ztwX7u4J2qK\nQv35Jk36TzcPN9uGF5Q9JQk+jlz72iQjFRxifyB3V/ynSgPK4dBGzdgK8b0wL5fe\nCV5+JL4jErcOvs8L6epjB2HrAdIrj9Ie9R7vFtbxG+bUrNgDVp9T6kD13us70Maf\nhpfI7LyRFkSTdVvUMgy1c/lq2ggzRyarvv3ie9RupUkw1YrPPoIwTFEnSKLShmzO\n7wIDAQAB\n-----END PUBLIC KEY-----\n" ExpiresAt: 1750949958489 } Sids { Name: "user1" Type: USER } Audience: "/MyRoot" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> ReadLoad::ShouldReadIterate [GOOD] >> ReadLoad::ShouldReadIterateMoreThanRows >> TFlatTest::AutoSplitMergeQueue [GOOD] >> ReadSessionImplTest::PacksBatches_BigBatchDecompressWithTwoBatchTasks [GOOD] >> ReadSessionImplTest::PacksBatches_DecompressesOneMessagePerTime |90.8%| [TS] {asan, default-linux-x86_64, release} ydb/core/fq/libs/ydb/ut/unittest >> TCheckGenerationTest::ShouldRollbackTransactionWhenCheckFails2 [GOOD] >> ReadSessionImplTest::PacksBatches_DecompressesOneMessagePerTime [GOOD] >> ReadSessionImplTest::PartitionStreamStatus [GOOD] >> ReadSessionImplTest::PartitionStreamCallbacks [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_login/unittest >> TSchemeShardLoginTest::ResetFailedAttemptCountAfterModifyUser [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:59:15.326387Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:59:15.326458Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:15.326499Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:59:15.326529Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:59:15.326562Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:59:15.326588Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:59:15.326649Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:15.326704Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:59:15.327297Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:59:15.328532Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:59:15.410194Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:59:15.410257Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:59:15.425929Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:59:15.426281Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:59:15.426415Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:59:15.431540Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:59:15.431791Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:59:15.432391Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:15.432577Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:59:15.435845Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:15.435976Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:59:15.442695Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:15.442763Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:15.442893Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:59:15.442937Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:59:15.442988Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:59:15.443089Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:59:15.452767Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:59:15.578360Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:59:15.578566Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:15.578842Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:59:15.578891Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:59:15.579070Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:59:15.579135Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:59:15.581149Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:15.581330Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:59:15.581477Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:15.581548Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:59:15.581605Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:59:15.581660Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:59:15.583438Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:15.583484Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:59:15.583527Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:59:15.585035Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:15.585076Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:15.585128Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:15.585188Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:59:15.588545Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:59:15.590365Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:59:15.590529Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:59:15.591396Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:15.591522Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:15.591589Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:15.591851Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:59:15.591897Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:15.592041Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:59:15.592152Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:59:15.593950Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:15.593990Z node 1 :FLAT_TX_SCHEMESHARD ... 9:22.848896Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 8 2025-06-25T14:59:22.848945Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:59:22.849043Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 103, subscribers: 0 2025-06-25T14:59:22.850842Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-25T14:59:22.852170Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 Leader for TabletID 72057594046678944 is [5:315:2300] sender: [5:408:2058] recipient: [5:106:2139] Leader for TabletID 72057594046678944 is [5:315:2300] sender: [5:411:2058] recipient: [5:15:2062] Leader for TabletID 72057594046678944 is [5:315:2300] sender: [5:412:2058] recipient: [5:410:2379] Leader for TabletID 72057594046678944 is [5:413:2380] sender: [5:414:2058] recipient: [5:410:2379] 2025-06-25T14:59:22.891466Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:59:22.891576Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:22.891623Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:59:22.891661Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:59:22.891700Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:59:22.891751Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:59:22.891818Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:22.891889Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:59:22.892851Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:59:22.893247Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:59:22.909462Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:59:22.910951Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:59:22.911147Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:59:22.911326Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:59:22.911359Z node 5 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:59:22.911468Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:59:22.912262Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1393: TTxInit for Paths, read records: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:22.912405Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1467: TTxInit for UserAttributes, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:22.912505Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1493: TTxInit for UserAttributesAlterData, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:22.912949Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1795: TTxInit for Tables, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:22.913056Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__root_data_erasure_manager.cpp:452: [RootDataErasureManager] Restore: Generation# 0, Status# 0, WakeupInterval# 604800 s, NumberDataErasureTenantsInRunning# 0 2025-06-25T14:59:22.913331Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2043: TTxInit for Columns, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:22.913427Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2103: TTxInit for ColumnsAlters, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:22.913533Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2161: TTxInit for Shards, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:22.913648Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2247: TTxInit for TablePartitions, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:22.913733Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2313: TTxInit for TableShardPartitionConfigs, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:22.913886Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2463: TTxInit for ChannelsBinding, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:22.914170Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2842: TTxInit for TableIndexes, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:22.914311Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2921: TTxInit for TableIndexKeys, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:22.914701Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3422: TTxInit for KesusInfos, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:22.914798Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3458: TTxInit for KesusAlters, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:22.914988Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3684: TTxInit for TxShards, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:22.915090Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3829: TTxInit for ShardToDelete, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:22.915174Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3846: TTxInit for BackupSettings, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:22.915440Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4006: TTxInit for ShardBackupStatus, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:22.915536Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4022: TTxInit for CompletedBackup, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:22.915662Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4307: TTxInit for Publications, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:22.915900Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4646: IndexBuild , records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:22.915978Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4702: KMeansTreeSample records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:22.916114Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4791: SnapshotTables: snapshots: 0 tables: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:22.916176Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4818: SnapshotSteps: snapshots: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:22.916239Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4845: LongLocks: records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:22.921689Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:59:22.923751Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:22.923820Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:22.924602Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:59:22.924666Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:59:22.924740Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:59:22.925017Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594046678944 is [5:413:2380] sender: [5:469:2058] recipient: [5:15:2062] 2025-06-25T14:59:22.984389Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:44: TTxLogin Execute at schemeshard: 72057594046678944 2025-06-25T14:59:22.984454Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:47: TTxLogin RotateKeys at schemeshard: 72057594046678944 2025-06-25T14:59:23.046019Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:86: TTxLogin Complete, result: Token: "eyJhbGciOiJQUzI1NiIsImtpZCI6IjMifQ.eyJhdWQiOlsiXC9NeVJvb3QiXSwiZXhwIjoxNzUwOTA2NzYzLCJpYXQiOjE3NTA4NjM1NjMsInN1YiI6InVzZXIxIn0.N5RLNJgJSNxqlehmnfvK-IyacUmbsys7WcBIIYkN5ICjQE2lKmkfgxh1Ed4ivssLUKGswgWIBYPE__-Rh66WdTPUM1rktzB7Z9w2I3WGci7myvDaSWleTuR8Jya7g3e5j6tszTOWzT7Aki0ob0FGltl6kZHB3VRjtsZmmYFtFXXKW-9KSfsWwlLCWbDPEazXvxMNv3YBx2AM1e6Ywd19uThgxyj9H9HggahZCQoggA4V381OJuQC8uEPH51T3nSr1xrgTodLZt3hwbtynco89FThOZPweOTZ9P7VRjVktGpcqvTvJ-Qlc75GkuqT6k808qsN3NXeA8tZrwohO48C8g" SanitizedToken: "eyJhbGciOiJQUzI1NiIsImtpZCI6IjMifQ.eyJhdWQiOlsiXC9NeVJvb3QiXSwiZXhwIjoxNzUwOTA2NzYzLCJpYXQiOjE3NTA4NjM1NjMsInN1YiI6InVzZXIxIn0.**" IsAdmin: true, at schemeshard: 72057594046678944 2025-06-25T14:59:23.046151Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:23.046186Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 0, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:59:23.046358Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:23.046402Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [5:462:2418], at schemeshard: 72057594046678944, txId: 0, path id: 1 2025-06-25T14:59:23.046870Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 1 Version: 9 PathOwnerId: 72057594046678944, cookie: 0 >> UpsertLoad::ShouldWriteKqpUpsert2 [GOOD] |90.8%| [TS] {asan, default-linux-x86_64, release} ydb/core/fq/libs/ydb/ut/unittest >> TRegisterCheckTest::ShouldNotRegisterCheckPrevGeneration [GOOD] |90.8%| [TS] {asan, default-linux-x86_64, release} ydb/core/fq/libs/ydb/ut/unittest >> TRegisterCheckTest::ShouldRegisterCheckSameGenerationAndTransact [GOOD] >> TKesusTest::TestQuoterAccountResourcesForgetClient [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/persqueue_public/ut/unittest >> ReadSessionImplTest::PartitionStreamCallbacks [GOOD] Test command err: 2025-06-25T14:59:21.796013Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.796094Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.796125Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-25T14:59:21.796559Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-25T14:59:21.797125Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-25T14:59:21.805999Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.806460Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-25T14:59:21.807294Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function 2025-06-25T14:59:21.807724Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function 2025-06-25T14:59:21.807965Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (2-2) 2025-06-25T14:59:21.808089Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2025-06-25T14:59:21.808190Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-25T14:59:21.808229Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (2-2) 2025-06-25T14:59:21.808262Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 8 bytes 2025-06-25T14:59:21.808285Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 8 bytes 2025-06-25T14:59:21.809516Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.809541Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.809560Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-25T14:59:21.809841Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-25T14:59:21.810273Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-25T14:59:21.810367Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.810537Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) Message data size: 10 Compressed message data size: 30 2025-06-25T14:59:21.811301Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function 2025-06-25T14:59:21.811486Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function Getting new event 2025-06-25T14:59:21.811724Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (5-8) 2025-06-25T14:59:21.811877Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-4) 2025-06-25T14:59:21.811965Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-25T14:59:21.811991Z :DEBUG: Take Data. Partition 1. Read: {0, 1} (2-2) 2025-06-25T14:59:21.812044Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 20 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 1 SeqNo: 42 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 2 SeqNo: 43 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } } 2025-06-25T14:59:21.812160Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [1, 3). Partition stream id: 1 Getting new event 2025-06-25T14:59:21.812187Z :DEBUG: Take Data. Partition 1. Read: {0, 2} (3-3) 2025-06-25T14:59:21.812204Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (4-4) 2025-06-25T14:59:21.812225Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 20 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 3 SeqNo: 44 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 4 SeqNo: 45 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } } 2025-06-25T14:59:21.812346Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [3, 5). Partition stream id: 1 Getting new event 2025-06-25T14:59:21.812417Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (5-5) 2025-06-25T14:59:21.812434Z :DEBUG: Take Data. Partition 1. Read: {0, 1} (6-6) 2025-06-25T14:59:21.812448Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 20 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 5 SeqNo: 46 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 6 SeqNo: 47 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } } 2025-06-25T14:59:21.812521Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [5, 7). Partition stream id: 1 Getting new event 2025-06-25T14:59:21.812542Z :DEBUG: Take Data. Partition 1. Read: {0, 2} (7-7) 2025-06-25T14:59:21.812559Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (8-8) 2025-06-25T14:59:21.812578Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 20 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 7 SeqNo: 48 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 8 SeqNo: 49 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } } 2025-06-25T14:59:21.812665Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [7, 9). Partition stream id: 1 2025-06-25T14:59:21.813600Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.813635Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.813664Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-25T14:59:21.813942Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-25T14:59:21.814368Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-25T14:59:21.814492Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.814698Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) Message data size: 100 Compressed message data size: 91 2025-06-25T14:59:21.815604Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function 2025-06-25T14:59:21.815845Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function Getting new event 2025-06-25T14:59:21.816121Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (5-8) 2025-06-25T14:59:21.816365Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-4) 2025-06-25T14:59:21.816469Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-25T14:59:21.816506Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 100 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..100 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 1 SeqNo: 42 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } } 2025-06-25T14:59:21.816608Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [1, 2). Partition stream id: 1 Getting new event 2025-06-25T14:59:21.816641Z :DEBUG: Take Data. Partition 1. Read: {0, 1} (2-2) 2025-06-25T14:59:21.816667Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 100 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..100 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 2 SeqNo: 43 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } } 2025-06-25T14:59:21.816729Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [2, 3). Partition stream id: 1 Getting new event 2025-06-25T14:59:21.816754Z :DEBUG: Take Data. Partition 1. Read: {0, 2} (3-3) 2025-06-25T14:59:21.816770Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 100 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..100 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 3 SeqNo: 44 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } } 2025-06-25T14:59:21.816821Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [3, 4). Partition stream id: 1 Getting new event 2025-06-25T14:59:21.816841Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (4-4) 2025-06-25T14:59:21.816866Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 100 bytes DataReceived { PartitionStream ... tream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 190 SeqNo: 231 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 191 SeqNo: 232 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 192 SeqNo: 233 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 193 SeqNo: 234 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 194 SeqNo: 235 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 195 SeqNo: 236 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 196 SeqNo: 237 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 197 SeqNo: 238 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 198 SeqNo: 239 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 199 SeqNo: 240 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 200 SeqNo: 241 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } } 2025-06-25T14:59:23.700639Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [1, 201). Partition stream id: 1 2025-06-25T14:59:23.762133Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 5, ReadSizeServerDelta = 0 2025-06-25T14:59:23.762193Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 5, ReadSizeServerDelta = 0 2025-06-25T14:59:23.762261Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-25T14:59:23.762637Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-25T14:59:23.763160Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-25T14:59:23.763431Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 5, ReadSizeServerDelta = 0 2025-06-25T14:59:23.763859Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) Message data size: 1000000 Compressed message data size: 3028 Post function Getting new event 2025-06-25T14:59:23.855763Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-10) 2025-06-25T14:59:23.856696Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-25T14:59:23.862313Z :DEBUG: Take Data. Partition 1. Read: {0, 1} (2-2) 2025-06-25T14:59:23.865178Z :DEBUG: Take Data. Partition 1. Read: {0, 2} (3-3) 2025-06-25T14:59:23.866025Z :DEBUG: Take Data. Partition 1. Read: {0, 3} (4-4) 2025-06-25T14:59:23.870463Z :DEBUG: Take Data. Partition 1. Read: {0, 4} (5-5) 2025-06-25T14:59:23.871133Z :DEBUG: Take Data. Partition 1. Read: {0, 5} (6-6) 2025-06-25T14:59:23.871905Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (7-7) 2025-06-25T14:59:23.872695Z :DEBUG: Take Data. Partition 1. Read: {1, 1} (8-8) 2025-06-25T14:59:23.881776Z :DEBUG: Take Data. Partition 1. Read: {1, 2} (9-9) 2025-06-25T14:59:23.882757Z :DEBUG: Take Data. Partition 1. Read: {1, 3} (10-10) 2025-06-25T14:59:23.882806Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 10, size 10000000 bytes 2025-06-25T14:59:23.883093Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 5, ReadSizeServerDelta = 0 DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 1 SeqNo: 42 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 2 SeqNo: 43 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 3 SeqNo: 44 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 4 SeqNo: 45 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 5 SeqNo: 46 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 6 SeqNo: 47 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 7 SeqNo: 48 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 8 SeqNo: 49 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 9 SeqNo: 50 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 10 SeqNo: 51 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } } 2025-06-25T14:59:23.887473Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [1, 11). Partition stream id: 1 2025-06-25T14:59:23.895285Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:23.895319Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:23.895374Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-25T14:59:23.895663Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-25T14:59:23.896090Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-25T14:59:23.896241Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:23.896533Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-25T14:59:23.897111Z :DEBUG: [db] [sessionid] [cluster] Requesting status for partition stream id: 1 2025-06-25T14:59:23.898134Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:23.898157Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:23.898185Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-25T14:59:23.898642Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-25T14:59:23.898943Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-25T14:59:23.899013Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:23.899496Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:23.899608Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2025-06-25T14:59:23.899678Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-25T14:59:23.899708Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 8 bytes 2025-06-25T14:59:23.899831Z :INFO: [db] [sessionid] [cluster] Confirm partition stream destroy. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1 >> TSchemeshardStatsBatchingTest::ShouldPersistByBatchSize >> TSchemeshardStatsBatchingTest::ShouldNotBatchWhenDisabled >> TSchemeshardStatsBatchingTest::ShouldPersistByBatchTimeout >> TSchemeshardStatsBatchingTest::PeriodicTopicStatsReload ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kesus/tablet/ut/unittest >> TKesusTest::TestQuoterAccountResourcesForgetClient [GOOD] Test command err: 2025-06-25T14:59:10.243282Z node 1 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-25T14:59:10.243425Z node 1 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-25T14:59:10.263603Z node 1 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-25T14:59:10.263721Z node 1 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-25T14:59:10.289055Z node 1 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-25T14:59:10.299092Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:36: [72057594037927937] TTxQuoterResourceAdd::Execute (sender=[1:136:2160], cookie=17448043411617787901, path="/Res", config={ MaxUnitsPerSecond: -100 }) 2025-06-25T14:59:10.299267Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:85: [72057594037927937] TTxQuoterResourceAdd::Complete (sender=[1:136:2160], cookie=17448043411617787901) 2025-06-25T14:59:10.299778Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:36: [72057594037927937] TTxQuoterResourceAdd::Execute (sender=[1:143:2165], cookie=3617373690690147092, path="/ResWithoutMaxUnitsPerSecond", config={ }) 2025-06-25T14:59:10.299896Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:85: [72057594037927937] TTxQuoterResourceAdd::Complete (sender=[1:143:2165], cookie=3617373690690147092) 2025-06-25T14:59:10.300349Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:36: [72057594037927937] TTxQuoterResourceAdd::Execute (sender=[1:146:2168], cookie=4190638812873834760, path="/ResWithMaxUnitsPerSecond", config={ MaxUnitsPerSecond: 1 }) 2025-06-25T14:59:10.300544Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:76: [72057594037927937] Created new quoter resource 1 "ResWithMaxUnitsPerSecond" 2025-06-25T14:59:10.312549Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:85: [72057594037927937] TTxQuoterResourceAdd::Complete (sender=[1:146:2168], cookie=4190638812873834760) 2025-06-25T14:59:10.313074Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:36: [72057594037927937] TTxQuoterResourceAdd::Execute (sender=[1:151:2173], cookie=5850419955370993863, path="/ResWithMaxUnitsPerSecond/ChildWithoutMaxUnitsPerSecond", config={ }) 2025-06-25T14:59:10.313328Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:76: [72057594037927937] Created new quoter resource 2 "ResWithMaxUnitsPerSecond/ChildWithoutMaxUnitsPerSecond" 2025-06-25T14:59:10.325442Z node 1 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:85: [72057594037927937] TTxQuoterResourceAdd::Complete (sender=[1:151:2173], cookie=5850419955370993863) 2025-06-25T14:59:10.740075Z node 2 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-25T14:59:10.740179Z node 2 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-25T14:59:10.756554Z node 2 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-25T14:59:10.757100Z node 2 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-25T14:59:10.780332Z node 2 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-25T14:59:10.780673Z node 2 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:36: [72057594037927937] TTxQuoterResourceAdd::Execute (sender=[2:136:2160], cookie=11466818149604904607, path="/Root", config={ MaxUnitsPerSecond: 100 PrefetchCoefficient: 300 }) 2025-06-25T14:59:10.780947Z node 2 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:76: [72057594037927937] Created new quoter resource 1 "Root" 2025-06-25T14:59:10.792595Z node 2 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:85: [72057594037927937] TTxQuoterResourceAdd::Complete (sender=[2:136:2160], cookie=11466818149604904607) 2025-06-25T14:59:10.793207Z node 2 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:36: [72057594037927937] TTxQuoterResourceAdd::Execute (sender=[2:146:2168], cookie=1593215272341265924, path="/Root/Res", config={ }) 2025-06-25T14:59:10.793453Z node 2 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:76: [72057594037927937] Created new quoter resource 2 "Root/Res" 2025-06-25T14:59:10.805492Z node 2 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:85: [72057594037927937] TTxQuoterResourceAdd::Complete (sender=[2:146:2168], cookie=1593215272341265924) 2025-06-25T14:59:10.807385Z node 2 :KESUS_TABLET TRACE: quoter_runtime.cpp:145: [72057594037927937] Send TEvSubscribeOnResourcesResult to [2:151:2173]. Cookie: 18409153023478025035. Data: { Results { ResourceId: 2 Error { Status: SUCCESS } EffectiveProps { ResourceId: 2 ResourcePath: "Root/Res" HierarchicalDRRResourceConfig { MaxUnitsPerSecond: 100 MaxBurstSizeCoefficient: 1 Weight: 1 PrefetchCoefficient: 300 } AccountingConfig { ReportPeriodMs: 1000 AccountPeriodMs: 1000 CollectPeriodSec: 2 ProvisionedCoefficient: 60 OvershootCoefficient: 1.1 Provisioned { BillingPeriodSec: 60 } OnDemand { BillingPeriodSec: 2 Version: "version" Schema: "schema" CloudId: "cloud" FolderId: "folder" ResourceId: "resource" SourceId: "source" Tags { key: "key" value: "value" } } Overshoot { BillingPeriodSec: 60 } } } } ProtocolVersion: 1 } 2025-06-25T14:59:10.807465Z node 2 :KESUS_TABLET DEBUG: quoter_runtime.cpp:150: [72057594037927937] Subscribe on quoter resources (sender=[2:151:2173], cookie=18409153023478025035) 2025-06-25T14:59:10.808020Z node 2 :KESUS_TABLET TRACE: quoter_runtime.cpp:224: [72057594037927937] Send TEvAccountResourcesAck to [2:151:2173]. Cookie: 7136367838666575273. Data: { ResourcesInfo { ResourceId: 2 AcceptedUs: 29000 } } 2025-06-25T14:59:10.808074Z node 2 :KESUS_TABLET DEBUG: quoter_runtime.cpp:229: [72057594037927937] Account quoter resources (sender=[2:151:2173], cookie=7136367838666575273) 2025-06-25T14:59:12.959574Z node 3 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-25T14:59:12.959684Z node 3 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-25T14:59:12.980026Z node 3 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-25T14:59:12.980633Z node 3 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-25T14:59:13.004519Z node 3 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-25T14:59:13.004942Z node 3 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:36: [72057594037927937] TTxQuoterResourceAdd::Execute (sender=[3:136:2160], cookie=926164825218861828, path="/Root", config={ MaxUnitsPerSecond: 300 PrefetchCoefficient: 1 }) 2025-06-25T14:59:13.005227Z node 3 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:76: [72057594037927937] Created new quoter resource 1 "Root" 2025-06-25T14:59:13.017061Z node 3 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:85: [72057594037927937] TTxQuoterResourceAdd::Complete (sender=[3:136:2160], cookie=926164825218861828) 2025-06-25T14:59:13.017632Z node 3 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:36: [72057594037927937] TTxQuoterResourceAdd::Execute (sender=[3:146:2168], cookie=13431119320018031047, path="/Root/Res", config={ }) 2025-06-25T14:59:13.017835Z node 3 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:76: [72057594037927937] Created new quoter resource 2 "Root/Res" 2025-06-25T14:59:13.030452Z node 3 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:85: [72057594037927937] TTxQuoterResourceAdd::Complete (sender=[3:146:2168], cookie=13431119320018031047) 2025-06-25T14:59:13.031290Z node 3 :KESUS_TABLET TRACE: quoter_runtime.cpp:145: [72057594037927937] Send TEvSubscribeOnResourcesResult to [3:151:2173]. Cookie: 773062608851252329. Data: { Results { ResourceId: 2 Error { Status: SUCCESS } EffectiveProps { ResourceId: 2 ResourcePath: "Root/Res" HierarchicalDRRResourceConfig { MaxUnitsPerSecond: 300 MaxBurstSizeCoefficient: 1 Weight: 1 PrefetchCoefficient: 1 } AccountingConfig { ReportPeriodMs: 1000 AccountPeriodMs: 1000 CollectPeriodSec: 2 ProvisionedCoefficient: 1 OvershootCoefficient: 1 Provisioned { BillingPeriodSec: 2 } OnDemand { BillingPeriodSec: 2 } Overshoot { BillingPeriodSec: 2 } } } } ProtocolVersion: 1 } 2025-06-25T14:59:13.031356Z node 3 :KESUS_TABLET DEBUG: quoter_runtime.cpp:150: [72057594037927937] Subscribe on quoter resources (sender=[3:151:2173], cookie=773062608851252329) 2025-06-25T14:59:13.031916Z node 3 :KESUS_TABLET TRACE: quoter_runtime.cpp:224: [72057594037927937] Send TEvAccountResourcesAck to [3:151:2173]. Cookie: 14220467227779256588. Data: { ResourcesInfo { ResourceId: 2 AcceptedUs: 1019000 } } 2025-06-25T14:59:13.031970Z node 3 :KESUS_TABLET DEBUG: quoter_runtime.cpp:229: [72057594037927937] Account quoter resources (sender=[3:151:2173], cookie=14220467227779256588) 2025-06-25T14:59:15.223724Z node 4 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-25T14:59:15.223836Z node 4 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-25T14:59:15.240867Z node 4 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-25T14:59:15.240985Z node 4 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-25T14:59:15.255164Z node 4 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-25T14:59:15.255613Z node 4 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:36: [72057594037927937] TTxQuoterResourceAdd::Execute (sender=[4:134:2158], cookie=14666431790646371141, path="/Root", config={ MaxUnitsPerSecond: 300 PrefetchCoefficient: 1 }) 2025-06-25T14:59:15.255910Z node 4 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:76: [72057594037927937] Created new quoter resource 1 "Root" 2025-06-25T14:59:15.281087Z node 4 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:85: [72057594037927937] TTxQuoterResourceAdd::Complete (sender=[4:134:2158], cookie=14666431790646371141) 2025-06-25T14:59:15.281898Z node 4 :KESUS_TABLET TRACE: quoter_runtime.cpp:145: [72057594037927937] Send TEvSubscribeOnResourcesResult to [4:144:2166]. Cookie: 12092764712774978228. Data: { Results { ResourceId: 1 Error { Status: SUCCESS } EffectiveProps { ResourceId: 1 ResourcePath: "Root" HierarchicalDRRResourceConfig { MaxUnitsPerSecond: 300 MaxBurstSizeCoefficient: 1 Weight: 1 PrefetchCoefficient: 1 } AccountingConfig { Enabled: true ReportPeriodMs: 1000 AccountPeriodMs: 1000 CollectPeriodSec: 2 ProvisionedUnitsPerSecond: 100 ProvisionedCoefficient: 1 OvershootCoefficient: 1 Provisioned { Enabled: true BillingPeriodSec: 2 } OnDemand { Enabled: true BillingPeriodSec: 2 } Overshoot { Enabled: true BillingPeriodSec: 2 } } } } ProtocolVersion: 1 } 2025-06-25T14:59:15.281957Z node 4 :KESUS_TABLET DEBUG: quoter_runtime.cpp:150: [72057594037927937] Subscribe on quoter resources (sender=[4:144:2166], cookie=12092764712774978228) 2025-06-25T14:59:15.282488Z node 4 :KESUS_TABLET TRACE: quoter_runtime.cpp:224: [72057594037927937] Send TEvAccountResourcesAck to [4:144:2166]. Cookie: 17409517582884564699. Data: { ResourcesInfo { ResourceId: 1 AcceptedUs: 1016500 } } 2025-06-25T14:59:15.282552Z node 4 :KESUS_TABLET DEBUG: quoter_runtime.cpp:229: [72057594037927937] Account quoter resources (sender=[4:144:2166], cookie=17409517582884564699) 2025-06-25T14:59:15.282925Z node 4 :KESUS_TABLET TRACE: quoter_runtime.cpp:224: [72057594037927937] Send TEvAccountResourcesAck to [4:144:2166]. Cookie: 3436404063791298386. Data: { ResourcesInfo { ResourceId: 1 AcceptedUs: 1016500 } } 2025-06-25T14:59:15.282966Z node 4 :KESUS_TABLET DEBUG: quoter_runtime.cpp:229: [72057594037927937] Account quoter resources (sender=[4:144:2166], cookie=3436404063791298386) 2025-06-25T14:59:17.447388Z node 5 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-25T14:59:17.447505Z node 5 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-25T14:59:17.467010Z node 5 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-25T14:59:17.467152Z node 5 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-25T14:59:17.491380Z node 5 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-25T14:59:17.491834Z node 5 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:36: [72057594037927937] TTxQuoterResourceAdd::Execute (sender=[5:136:2160], cookie=8632970714881946560, path="/Root", config={ MaxUnitsPerSecond: 300 PrefetchCoefficient: 1 }) 2025-06-25T14:59:17.492158Z node 5 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:76: [72057594037927937] Created new quoter resource 1 "Root" 2025-06-25T14:59:17.504227Z node 5 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:85: [72057594037927937] TTxQuoterResourceAdd::Complete (sender=[5:136:2160], cookie=8632970714881946560) 2025-06-25T14:59:17.505039Z node 5 :KESUS_TABLET TRACE: quoter_runtime.cpp:145: [72057594037927937] Send TEvSubscribeOnResourcesResult to [5:146:2168]. Cookie: 4867948897931877116. Data: { Results { ResourceId: 1 Error { Status: SUCCESS } EffectiveProps { ResourceId: 1 ResourcePath: "Root" HierarchicalDRRResourceConfig { MaxUnitsPerSecond: 300 MaxBurstSizeCoefficient: 1 Weight: 1 PrefetchCoefficient: 1 } AccountingConfig { Enabled: true ReportPeriodMs: 1000 AccountPeriodMs: 1000 CollectPeriodSec: 2 ProvisionedUnitsPerSecond: 100 ProvisionedCoefficient: 1 OvershootCoefficient: 1 Provisioned { Enabled: true BillingPeriodSec: 2 } OnDemand { Enabled: true BillingPeriodSec: 2 } Overshoot { Enabled: true BillingPeriodSec: 2 } } } } ProtocolVersion: 1 } 2025-06-25T14:59:17.505096Z node 5 :KESUS_TABLET DEBUG: quoter_runtime.cpp:150: [72057594037927937] Subscribe on quoter resources (sender=[5:146:2168], cookie=4867948897931877116) 2025-06-25T14:59:17.505525Z node 5 :KESUS_TABLET TRACE: quoter_runtime.cpp:224: [72057594037927937] Send TEvAccountResourcesAck to [5:146:2168]. Cookie: 9394379486571144125. Data: { ResourcesInfo { ResourceId: 1 AcceptedUs: 3000000 } } 2025-06-25T14:59:17.505568Z node 5 :KESUS_TABLET DEBUG: quoter_runtime.cpp:229: [72057594037927937] Account quoter resources (sender=[5:146:2168], cookie=9394379486571144125) 2025-06-25T14:59:19.936595Z node 5 :KESUS_TABLET TRACE: quoter_runtime.cpp:145: [72057594037927937] Send TEvSubscribeOnResourcesResult to [5:185:2192]. Cookie: 16289193704237874259. Data: { Results { ResourceId: 1 Error { Status: SUCCESS } EffectiveProps { ResourceId: 1 ResourcePath: "Root" HierarchicalDRRResourceConfig { MaxUnitsPerSecond: 300 MaxBurstSizeCoefficient: 1 Weight: 1 PrefetchCoefficient: 1 } AccountingConfig { Enabled: true ReportPeriodMs: 1000 AccountPeriodMs: 1000 CollectPeriodSec: 2 ProvisionedUnitsPerSecond: 100 ProvisionedCoefficient: 1 OvershootCoefficient: 1 Provisioned { Enabled: true BillingPeriodSec: 2 } OnDemand { Enabled: true BillingPeriodSec: 2 } Overshoot { Enabled: true BillingPeriodSec: 2 } } } } ProtocolVersion: 1 } 2025-06-25T14:59:19.936670Z node 5 :KESUS_TABLET DEBUG: quoter_runtime.cpp:150: [72057594037927937] Subscribe on quoter resources (sender=[5:185:2192], cookie=16289193704237874259) 2025-06-25T14:59:19.937079Z node 5 :KESUS_TABLET TRACE: quoter_runtime.cpp:224: [72057594037927937] Send TEvAccountResourcesAck to [5:185:2192]. Cookie: 10458251773876224030. Data: { ResourcesInfo { ResourceId: 1 AcceptedUs: 9000000 } } 2025-06-25T14:59:19.937120Z node 5 :KESUS_TABLET DEBUG: quoter_runtime.cpp:229: [72057594037927937] Account quoter resources (sender=[5:185:2192], cookie=10458251773876224030) 2025-06-25T14:59:21.995424Z node 5 :KESUS_TABLET TRACE: quoter_runtime.cpp:145: [72057594037927937] Send TEvSubscribeOnResourcesResult to [5:218:2218]. Cookie: 18197925261976275238. Data: { Results { ResourceId: 1 Error { Status: SUCCESS } EffectiveProps { ResourceId: 1 ResourcePath: "Root" HierarchicalDRRResourceConfig { MaxUnitsPerSecond: 300 MaxBurstSizeCoefficient: 1 Weight: 1 PrefetchCoefficient: 1 } AccountingConfig { Enabled: true ReportPeriodMs: 1000 AccountPeriodMs: 1000 CollectPeriodSec: 2 ProvisionedUnitsPerSecond: 100 ProvisionedCoefficient: 1 OvershootCoefficient: 1 Provisioned { Enabled: true BillingPeriodSec: 2 } OnDemand { Enabled: true BillingPeriodSec: 2 } Overshoot { Enabled: true BillingPeriodSec: 2 } } } } ProtocolVersion: 1 } 2025-06-25T14:59:21.995466Z node 5 :KESUS_TABLET DEBUG: quoter_runtime.cpp:150: [72057594037927937] Subscribe on quoter resources (sender=[5:218:2218], cookie=18197925261976275238) 2025-06-25T14:59:21.995847Z node 5 :KESUS_TABLET TRACE: quoter_runtime.cpp:224: [72057594037927937] Send TEvAccountResourcesAck to [5:218:2218]. Cookie: 16934191868462261843. Data: { ResourcesInfo { ResourceId: 1 AcceptedUs: 15000000 } } 2025-06-25T14:59:21.995882Z node 5 :KESUS_TABLET DEBUG: quoter_runtime.cpp:229: [72057594037927937] Account quoter resources (sender=[5:218:2218], cookie=16934191868462261843) >> TSchemeshardStatsBatchingTest::TopicPeriodicStatMeteringModeRequest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/load_test/ut_ycsb/unittest >> UpsertLoad::ShouldWriteKqpUpsert2 [GOOD] Test command err: 2025-06-25T14:59:21.153292Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:59:21.153414Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:59:21.153466Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001a2b/r3tmp/tmpM7Rw6A/pdisk_1.dat 2025-06-25T14:59:21.524507Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T14:59:21.532729Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:59:21.586819Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:59:21.598429Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750863559006505 != 1750863559006509 2025-06-25T14:59:21.646057Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:59:21.646182Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:59:21.659197Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:59:21.752712Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:59:22.102317Z node 1 :DS_LOAD_TEST DEBUG: test_load_actor.cpp:425: TLoad# 0 created load actor of type# kUpsertKqpStart with tag# 1, proto# NotifyWhenFinished: true TargetShard { TabletId: 72075186224037888 TableId: 2 WorkingDir: "/Root" TableName: "JustTable" } UpsertKqpStart { RowCount: 20 Inflight: 5 } 2025-06-25T14:59:22.102466Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:298: TKqpUpsertActorMultiSession# {Tag: 0, parent: [1:695:2577], subTag: 2} Bootstrap called: RowCount: 20 Inflight: 5 2025-06-25T14:59:22.119926Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:361: TKqpUpsertActorMultiSession# {Tag: 0, parent: [1:695:2577], subTag: 2} started# 5 actors each with inflight# 4 2025-06-25T14:59:22.120023Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:116: TKqpUpsertActor# {Tag: 0, parent: [1:696:2578], subTag: 1} Bootstrap called: RowCount: 4 Inflight: 1 2025-06-25T14:59:22.120094Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:116: TKqpUpsertActor# {Tag: 0, parent: [1:696:2578], subTag: 2} Bootstrap called: RowCount: 4 Inflight: 1 2025-06-25T14:59:22.120123Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:116: TKqpUpsertActor# {Tag: 0, parent: [1:696:2578], subTag: 3} Bootstrap called: RowCount: 4 Inflight: 1 2025-06-25T14:59:22.120149Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:116: TKqpUpsertActor# {Tag: 0, parent: [1:696:2578], subTag: 4} Bootstrap called: RowCount: 4 Inflight: 1 2025-06-25T14:59:22.120212Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:116: TKqpUpsertActor# {Tag: 0, parent: [1:696:2578], subTag: 5} Bootstrap called: RowCount: 4 Inflight: 1 2025-06-25T14:59:22.128421Z node 1 :DS_LOAD_TEST DEBUG: kqp_upsert.cpp:207: TKqpUpsertActor# {Tag: 0, parent: [1:696:2578], subTag: 1} session: ydb://session/3?node_id=1&id=ODAxMjk4NWUtZWVjYTk5My0zYzljMzZlMi0yZGFhNTVkNA== 2025-06-25T14:59:22.132213Z node 1 :DS_LOAD_TEST DEBUG: kqp_upsert.cpp:207: TKqpUpsertActor# {Tag: 0, parent: [1:696:2578], subTag: 2} session: ydb://session/3?node_id=1&id=OTY4MDI2NS0yMWM4MzZjNi01NmI2OTc1Yi02N2NiY2UxZA== 2025-06-25T14:59:22.132283Z node 1 :DS_LOAD_TEST DEBUG: kqp_upsert.cpp:207: TKqpUpsertActor# {Tag: 0, parent: [1:696:2578], subTag: 3} session: ydb://session/3?node_id=1&id=NmVkMWEyZTctNzYyMzUyMmEtYWFmNGM1ZGUtNDFlNGRiMTk= 2025-06-25T14:59:22.133797Z node 1 :DS_LOAD_TEST DEBUG: kqp_upsert.cpp:207: TKqpUpsertActor# {Tag: 0, parent: [1:696:2578], subTag: 4} session: ydb://session/3?node_id=1&id=NjE5MWYzYTQtZjdmNTU4MTctZmZiOWI5ZTgtZWM4ZTE1NTk= 2025-06-25T14:59:22.135477Z node 1 :DS_LOAD_TEST DEBUG: kqp_upsert.cpp:207: TKqpUpsertActor# {Tag: 0, parent: [1:696:2578], subTag: 5} session: ydb://session/3?node_id=1&id=Y2NhZTlmY2YtZmMzYjAwZDEtZDdhYjI3OGItNjZhZDlmYjc= 2025-06-25T14:59:22.142272Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:709:2591], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:59:22.142411Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:733:2609], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:59:22.142504Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:734:2610], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:59:22.142577Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:735:2611], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:59:22.142623Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:736:2612], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:59:22.142666Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:738:2614], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:59:22.142758Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:59:22.157561Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:59:22.211248Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:754:2630] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateCreate)" severity: 1 } 2025-06-25T14:59:22.212138Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:755:2631] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateCreate)" severity: 1 } 2025-06-25T14:59:22.215167Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:756:2632] txid# 281474976715661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateCreate)" severity: 1 } 2025-06-25T14:59:22.216429Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:757:2633] txid# 281474976715662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateCreate)" severity: 1 } 2025-06-25T14:59:22.260571Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:59:22.391365Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:745:2621], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:59:22.391479Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:746:2622], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:59:22.391531Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:747:2623], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:59:22.391638Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:748:2624], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:59:22.391699Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:749:2625], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:59:22.433304Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:851:2692] txid# 281474976715663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:59:23.473593Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:190: TKqpUpsertActor# {Tag: 0, parent: [1:696:2578], subTag: 5} finished in 1750863563.473542s, errors=0 2025-06-25T14:59:23.474009Z node 1 :DS_LOAD_TEST INFO: kqp_upsert.cpp:376: kqp# {Tag: 0, parent: [1:695:2577], subTag: 2} finished: 5 { Tag: 5 DurationMs: 1750863563473 OperationsOK: 4 OperationsError: 0 } 2025-06-25T14:59:23.488140Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:924:2730] txid# 281474976715668, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:59:23.552654Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:190: TKqpUpsertActor# {Tag: 0, parent: [1:696:2578], subTag: 4} finished in 1750863563.552608s, errors=0 2025-06-25T14:59:23.552970Z node 1 :DS_LOAD_TEST INFO: kqp_upsert.cpp:376: kqp# {Tag: 0, parent: [1:695:2577], subTag: 2} finished: 4 { Tag: 4 DurationMs: 1750863563552 OperationsOK: 4 OperationsError: 0 } 2025-06-25T14:59:23.566718Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:975:2752] txid# 281474976715673, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:59:23.630122Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:190: TKqpUpsertActor# {Tag: 0, parent: [1:696:2578], subTag: 3} finished in 1750863563.630077s, errors=0 2025-06-25T14:59:23.630432Z node 1 :DS_LOAD_TEST INFO: kqp_upsert.cpp:376: kqp# {Tag: 0, parent: [1:695:2577], subTag: 2} finished: 3 { Tag: 3 DurationMs: 1750863563630 OperationsOK: 4 OperationsError: 0 } 2025-06-25T14:59:23.644142Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:1026:2774] txid# 281474976715678, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:59:23.709853Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:190: TKqpUpsertActor# {Tag: 0, parent: [1:696:2578], subTag: 2} finished in 1750863563.709803s, errors=0 2025-06-25T14:59:23.710009Z node 1 :DS_LOAD_TEST INFO: kqp_upsert.cpp:376: kqp# {Tag: 0, parent: [1:695:2577], subTag: 2} finished: 2 { Tag: 2 DurationMs: 1750863563709 OperationsOK: 4 OperationsError: 0 } 2025-06-25T14:59:23.723868Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:1077:2796] txid# 281474976715683, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:59:23.787533Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:190: TKqpUpsertActor# {Tag: 0, parent: [1:696:2578], subTag: 1} finished in 1750863563.787493s, errors=0 2025-06-25T14:59:23.787930Z node 1 :DS_LOAD_TEST INFO: kqp_upsert.cpp:376: kqp# {Tag: 0, parent: [1:695:2577], subTag: 2} finished: 1 { Tag: 1 DurationMs: 1750863563787 OperationsOK: 4 OperationsError: 0 } 2025-06-25T14:59:23.788008Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:395: TKqpUpsertActorMultiSession# {Tag: 0, parent: [1:695:2577], subTag: 2} finished in 1.668328s, oks# 20, errors# 0 2025-06-25T14:59:23.788182Z node 1 :DS_LOAD_TEST INFO: test_load_actor.cpp:447: TLoad# 0 received finished from actor# [1:696:2578] with tag# 2 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TFlatTest::AutoSplitMergeQueue [GOOD] Test command err: 2025-06-25T14:58:08.581473Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901424178895824:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:08.581533Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/002084/r3tmp/tmpI1nW0j/pdisk_1.dat 2025-06-25T14:58:09.037744Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:09.040788Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:09.040860Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:09.055634Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:18759 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:09.312356Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-25T14:58:09.354540Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... TClient::Ls request: /dc-1/Dir/T1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "T1" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710659 CreateStep: 1750863489466 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "T1" Columns { Name: "Key" Type: "String" TypeId: 4097 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "Key" KeyCol... (TRUNCATED) A-0 2025-06-25T14:58:09.601774Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:58:09.862858Z node 1 :OPS_COMPACT INFO: Compact{72075186224037888.1.9, eph 1} end=Done, 2 blobs 1r (max 1), put Spent{time=0.022s,wait=0.000s,interrupts=1} Part{ 1 pk, lobs 0 +0, (6291598 0 0)b }, ecr=1.000 B-0 2025-06-25T14:58:09.896465Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037888 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 3] state 'Ready' dataSize 6291502 rowCount 1 cpuUsage 0 2025-06-25T14:58:09.912778Z node 1 :OPS_COMPACT INFO: Compact{72075186224037888.1.11, eph 1} end=Done, 2 blobs 1r (max 1), put Spent{time=0.024s,wait=0.001s,interrupts=1} Part{ 1 pk, lobs 0 +0, (6291598 0 0)b }, ecr=1.000 2025-06-25T14:58:09.917026Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037888 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 3] state 'Ready' dataSize 6291502 rowCount 1 cpuUsage 0 2025-06-25T14:58:09.997430Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046644480, queue size# 1 2025-06-25T14:58:09.997562Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 3 shard idx 72057594046644480:1 data size 6291502 row count 1 2025-06-25T14:58:09.997670Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037888 maps to shardIdx: 72057594046644480:1 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 3], pathId map=T1, is column=0, is olap=0, RowCount 1, DataSize 6291502 2025-06-25T14:58:09.997813Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037888 2025-06-25T14:58:09.998445Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046644480, queue size# 0 A-1 2025-06-25T14:58:10.291672Z node 1 :OPS_COMPACT INFO: Compact{72075186224037888.1.14, eph 2} end=Done, 2 blobs 1r (max 1), put Spent{time=0.043s,wait=0.000s,interrupts=1} Part{ 1 pk, lobs 0 +0, (6291598 0 0)b }, ecr=1.000 2025-06-25T14:58:10.307585Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037888 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 3] state 'Ready' dataSize 12583004 rowCount 2 cpuUsage 0 2025-06-25T14:58:10.327188Z node 1 :OPS_COMPACT INFO: Compact{72075186224037888.1.16, eph 2} end=Done, 3 blobs 2r (max 2), put Spent{time=0.024s,wait=0.000s,interrupts=1} Part{ 1 pk, lobs 0 +0, (12583142 0 0)b }, ecr=1.000 2025-06-25T14:58:10.330535Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037888 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 3] state 'Ready' dataSize 12583020 rowCount 2 cpuUsage 0 2025-06-25T14:58:10.412401Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046644480, queue size# 1 2025-06-25T14:58:10.412507Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 3 shard idx 72057594046644480:1 data size 12583020 row count 2 2025-06-25T14:58:10.412556Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037888 maps to shardIdx: 72057594046644480:1 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 3], pathId map=T1, is column=0, is olap=0, RowCount 2, DataSize 12583020 2025-06-25T14:58:10.412628Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037888 2025-06-25T14:58:10.419738Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046644480, queue size# 0 B-1 2025-06-25T14:58:10.629932Z node 1 :OPS_COMPACT INFO: Compact{72075186224037888.1.19, eph 3} end=Done, 2 blobs 1r (max 1), put Spent{time=0.033s,wait=0.000s,interrupts=1} Part{ 1 pk, lobs 0 +0, (6291598 0 0)b }, ecr=1.000 2025-06-25T14:58:10.658964Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037888 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 3] state 'Ready' dataSize 18874522 rowCount 3 cpuUsage 0 2025-06-25T14:58:10.700989Z node 1 :OPS_COMPACT INFO: Compact{72075186224037888.1.21, eph 3} end=Done, 4 blobs 3r (max 3), put Spent{time=0.065s,wait=0.000s,interrupts=1} Part{ 1 pk, lobs 0 +0, (18874688 0 0)b }, ecr=1.000 2025-06-25T14:58:10.760383Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046644480, queue size# 1 2025-06-25T14:58:10.760485Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 3 shard idx 72057594046644480:1 data size 18874522 row count 3 2025-06-25T14:58:10.760531Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037888 maps to shardIdx: 72057594046644480:1 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 3], pathId map=T1, is column=0, is olap=0, RowCount 3, DataSize 18874522 2025-06-25T14:58:10.760657Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_compaction.cpp:31: [BackgroundCompaction] [Start] Compacting for pathId# [OwnerId: 72057594046644480, LocalPathId: 3], datashard# 72075186224037888, compactionInfo# {72057594046644480:1, SH# 2, Rows# 3, Deletes# 0, Compaction# 1970-01-01T00:00:00.000000Z}, next wakeup in# 0.000000s, rate# 5.787037037e-06, in queue# 1 shards, waiting after compaction# 0 shards, running# 0 shards at schemeshard 72057594046644480 2025-06-25T14:58:10.764428Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: operation_queue_timer.h:84: Operation queue set wakeup after delta# 599 seconds 2025-06-25T14:58:10.764493Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037888 2025-06-25T14:58:10.764618Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046644480, queue size# 0 2025-06-25T14:58:10.792430Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_compaction.cpp:112: [BackgroundCompaction] [Finished] Compaction completed for pathId# [OwnerId: 72057594046644480, LocalPathId: 3], datashard# 72075186224037888, shardIdx# 72057594046644480:1 in# 31 ms, with status# 1, next wakeup in# 599.968199s, rate# 5.787037037e-06, in queue# 1 shards, waiting after compaction# 1 shards, running# 0 shards at schemeshard 72057594046644480 2025-06-25T14:58:10.792479Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037888 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 3] state 'Ready' dataSize 18874538 rowCount 3 cpuUsage 0 2025-06-25T14:58:10.892411Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046644480, queue size# 1 2025-06-25T14:58:10.892547Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 3 shar ... @builtin" ACL: "" EffectiveACL: "" PathVersion: 28 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 28 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 26 } ChildrenExist: false } Table { Name: "T1" Columns { Name: "Key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "Key" KeyCol... (TRUNCATED) 2025-06-25T14:59:17.607447Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037928 not found 2025-06-25T14:59:17.607709Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037927 not found TClient::Ls request: /dc-1/Dir/T1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "T1" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1750863535463 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 29 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 29 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 27 } ChildrenExist: false } Table { Name: "T1" Columns { Name: "Key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "Key" KeyCol... (TRUNCATED) TClient::Ls request: /dc-1/Dir/T1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "T1" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1750863535463 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 29 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 29 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 27 } ChildrenExist: false } Table { Name: "T1" Columns { Name: "Key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "Key" KeyCol... (TRUNCATED) TClient::Ls request: /dc-1/Dir/T1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "T1" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1750863535463 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 29 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 29 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 27 } ChildrenExist: false } Table { Name: "T1" Columns { Name: "Key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "Key" KeyCol... (TRUNCATED) 2025-06-25T14:59:20.234583Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037921 not found TClient::Ls request: /dc-1/Dir/T1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "T1" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1750863535463 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 30 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 30 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 28 } ChildrenExist: false } Table { Name: "T1" Columns { Name: "Key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "Key" KeyCol... (TRUNCATED) 2025-06-25T14:59:21.310086Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037932 not found 2025-06-25T14:59:21.310135Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037931 not found 2025-06-25T14:59:21.310156Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037933 not found 2025-06-25T14:59:21.310824Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037924 not found 2025-06-25T14:59:21.387118Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037934 not found 2025-06-25T14:59:21.387653Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037935 not found 2025-06-25T14:59:21.650648Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037929 not found 2025-06-25T14:59:21.657261Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037930 not found 2025-06-25T14:59:21.746077Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037940 not found TClient::Ls request: /dc-1/Dir/T1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "T1" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1750863535463 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 36 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 36 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 34 } ChildrenExist: false } Table { Name: "T1" Columns { Name: "Key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "Key" KeyCol... (TRUNCATED) 2025-06-25T14:59:22.691868Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037936 not found 2025-06-25T14:59:22.691912Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037937 not found 2025-06-25T14:59:22.728698Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037939 not found 2025-06-25T14:59:22.728800Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037938 not found 2025-06-25T14:59:22.821578Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037943 not found 2025-06-25T14:59:22.821613Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037944 not found 2025-06-25T14:59:22.848276Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037941 not found 2025-06-25T14:59:22.848323Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037942 not found TClient::Ls request: /dc-1/Dir/T1 2025-06-25T14:59:22.959043Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037946 not found 2025-06-25T14:59:22.959082Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037945 not found TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "T1" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1750863535463 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 41 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 41 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 39 } ChildrenExist: false } Table { Name: "T1" Columns { Name: "Key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "Key" KeyCol... (TRUNCATED) TClient::Ls request: /dc-1/Dir/T1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "T1" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1750863535463 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 41 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 41 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 39 } ChildrenExist: false } Table { Name: "T1" Columns { Name: "Key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "Key" KeyCol... (TRUNCATED) >> TSchemeshardStatsBatchingTest::PeriodicTopicStatsReload [GOOD] >> TDataShardMinStepTest::TestDropTablePlanComesNotTooEarlyRW-VolatileTxs [GOOD] >> UpsertLoad::ShouldWriteDataBulkUpsert2 [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_stats/unittest >> TSchemeshardStatsBatchingTest::PeriodicTopicStatsReload [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:59:25.236889Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:59:25.236983Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:25.237020Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:59:25.237057Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:59:25.238652Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:59:25.238696Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:59:25.238800Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:25.238873Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:59:25.239627Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:59:25.241597Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:59:25.316150Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:59:25.316194Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:59:25.330390Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:59:25.330749Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:59:25.330890Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:59:25.336592Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:59:25.336865Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:59:25.337444Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:25.337693Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:59:25.343878Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:25.349200Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:59:25.356917Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:25.356994Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:25.357161Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:59:25.357217Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:59:25.357274Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:59:25.357363Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:59:25.362717Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:59:25.477349Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:59:25.478420Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:25.479564Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:59:25.479623Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:59:25.480479Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:59:25.480535Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:59:25.482897Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:25.483551Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:59:25.483738Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:25.483804Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:59:25.483846Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:59:25.483892Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:59:25.485638Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:25.485694Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:59:25.485776Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:59:25.487100Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:25.487145Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:25.487218Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:25.487290Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:59:25.491303Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:59:25.493053Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:59:25.493219Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:59:25.494131Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:25.494251Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:25.494298Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:25.494901Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:59:25.494956Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:25.495114Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:59:25.495203Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:59:25.496914Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:25.496946Z node 1 :FLAT_TX_SCHEMESHARD ... : TTxInit for KesusInfos, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:25.902261Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3458: TTxInit for KesusAlters, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:25.902394Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3684: TTxInit for TxShards, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:25.902466Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3829: TTxInit for ShardToDelete, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:25.902518Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3846: TTxInit for BackupSettings, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:25.902653Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4006: TTxInit for ShardBackupStatus, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:25.902715Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4022: TTxInit for CompletedBackup, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:25.902814Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4307: TTxInit for Publications, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:25.902972Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4646: IndexBuild , records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:25.903019Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4702: KMeansTreeSample records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:25.903109Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4791: SnapshotTables: snapshots: 0 tables: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:25.903150Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4818: SnapshotSteps: snapshots: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:25.903194Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4845: LongLocks: records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:25.903348Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-06-25T14:59:25.907000Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-25T14:59:25.907109Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:59:25.907805Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 2146435083, Sender [1:572:2500], Recipient [1:572:2500]: NKikimr::NSchemeShard::TEvPrivate::TEvServerlessStorageBilling 2025-06-25T14:59:25.907838Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5018: StateWork, processing event TEvPrivate::TEvServerlessStorageBilling 2025-06-25T14:59:25.908815Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:25.908872Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:25.908985Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:59:25.909015Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:59:25.909039Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:59:25.909072Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-25T14:59:25.909203Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 274399233, Sender [1:608:2500], Recipient [1:572:2500]: NKikimr::TEvTxAllocatorClient::TEvAllocateResult 2025-06-25T14:59:25.909251Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5109: StateWork, processing event TEvTxAllocatorClient::TEvAllocateResult 2025-06-25T14:59:25.909276Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594046678944 is [1:572:2500] sender: [1:628:2058] recipient: [1:15:2062] 2025-06-25T14:59:25.972293Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271122945, Sender [1:627:2542], Recipient [1:572:2500]: NKikimrSchemeOp.TDescribePath Path: "/MyRoot/Topic1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false } 2025-06-25T14:59:25.972374Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4967: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2025-06-25T14:59:25.972482Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Topic1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:59:25.972721Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Topic1" took 185us result status StatusSuccess 2025-06-25T14:59:25.973148Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Topic1" PathDescription { Self { Name: "Topic1" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 1 } ChildrenExist: false BalancerTabletID: 72075186233409547 } PersQueueGroup { Name: "Topic1" PathId: 2 TotalGroupCount: 1 PartitionPerTablet: 1 PQTabletConfig { PartitionConfig { LifetimeSeconds: 1 WriteSpeedInBytesPerSecond: 7 } YdbDatabasePath: "/MyRoot" MeteringMode: METERING_MODE_RESERVED_CAPACITY } Partitions { PartitionId: 0 TabletId: 72075186233409546 Status: Active } AlterVersion: 1 BalancerTabletID: 72075186233409547 NextPartitionId: 1 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 7 AccountSize: 17 DataSize: 17 UsedReserveSize: 7 } } PQPartitionsInside: 1 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:25.973735Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271188001, Sender [1:629:2543], Recipient [1:572:2500]: NKikimrPQ.TEvPeriodicTopicStats PathId: 2 Generation: 1 Round: 96 DataSize: 19 UsedReserveSize: 7 2025-06-25T14:59:25.973786Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4993: StateWork, processing event TEvPersQueue::TEvPeriodicTopicStats 2025-06-25T14:59:25.973831Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__pq_stats.cpp:100: Got periodic topic stats at partition [OwnerId: 72057594046678944, LocalPathId: 2] DataSize 19 UsedReserveSize 7 2025-06-25T14:59:25.973872Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__pq_stats.cpp:128: Will execute TTxStoreStats, queue# 1 2025-06-25T14:59:25.973929Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__pq_stats.cpp:141: Will delay TTxStoreTopicStats on# 0.000000s, queue# 1 2025-06-25T14:59:25.974144Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271122945, Sender [1:630:2544], Recipient [1:572:2500]: NKikimrSchemeOp.TDescribePath Path: "/MyRoot/Topic1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false } 2025-06-25T14:59:25.974176Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4967: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2025-06-25T14:59:25.974258Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Topic1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:59:25.974419Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Topic1" took 165us result status StatusSuccess 2025-06-25T14:59:25.974786Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Topic1" PathDescription { Self { Name: "Topic1" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 1 } ChildrenExist: false BalancerTabletID: 72075186233409547 } PersQueueGroup { Name: "Topic1" PathId: 2 TotalGroupCount: 1 PartitionPerTablet: 1 PQTabletConfig { PartitionConfig { LifetimeSeconds: 1 WriteSpeedInBytesPerSecond: 7 } YdbDatabasePath: "/MyRoot" MeteringMode: METERING_MODE_RESERVED_CAPACITY } Partitions { PartitionId: 0 TabletId: 72075186233409546 Status: Active } AlterVersion: 1 BalancerTabletID: 72075186233409547 NextPartitionId: 1 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 7 AccountSize: 17 DataSize: 17 UsedReserveSize: 7 } } PQPartitionsInside: 1 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeInvalid [GOOD] >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeTable >> EncryptedBackupParamsValidationTestFeatureDisabled::EncryptionParamsSpecifiedExport >> BackupRestoreS3::TestAllPrimitiveTypes-PRIMITIVE_TYPE_ID_UNSPECIFIED [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-INT8 >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeInvalid [GOOD] >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeDir [GOOD] >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypePersQueueGroup [GOOD] >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeRtmrVolume [GOOD] >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeBlockStoreVolume [GOOD] >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeKesus [GOOD] >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeExtSubDomain [GOOD] >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeFileStore [GOOD] >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeColumnStore [GOOD] >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeColumnTable [GOOD] >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeCdcStream >> TCmsTest::CollectInfo >> BackupPathTest::ExportWholeDatabase >> BackupRestore::TestAllIndexTypes-EIndexTypeInvalid [GOOD] >> BackupRestore::TestAllIndexTypes-EIndexTypeGlobalVectorKmeansTree >> TMaintenanceApiTest::SingleCompositeActionGroup >> TCmsTest::ActionIssuePartialPermissions >> BackupPathTest::RecursiveDirectoryPlusExplicitTable >> TCmsTenatsTest::TestTenantRatioLimit >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeTable >> TCmsTest::StateStorageNodesFromOneRing >> TCmsTest::ManagePermissions >> TMaintenanceApiTest::ManyActionGroupsWithSingleAction >> TCmsTest::VDisksEvictionShouldFailWhileSentinelIsDisabled >> TCmsTest::TestForceRestartMode >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeDir >> BackupRestoreS3::RestoreTablePartitioningSettings ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_minstep/unittest >> TDataShardMinStepTest::TestDropTablePlanComesNotTooEarlyRW-VolatileTxs [GOOD] Test command err: 2025-06-25T14:59:13.775877Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:59:13.776054Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:59:13.776110Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00114c/r3tmp/tmpGf5AnZ/pdisk_1.dat 2025-06-25T14:59:14.169432Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T14:59:14.175117Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //Root, opId: 1:0, at schemeshard: 72057594046644480 2025-06-25T14:59:14.182564Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-25T14:59:14.184026Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:59:14.186391Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //Root 2025-06-25T14:59:14.191643Z node 1 :TX_COORDINATOR DEBUG: coordinator_impl.cpp:183: tablet# 72057594046316545 txid# 1 HANDLE EvProposeTransaction marker# C0 2025-06-25T14:59:14.191700Z node 1 :TX_COORDINATOR DEBUG: coordinator_impl.cpp:29: tablet# 72057594046316545 txid# 1 step# 500 Status# 16 SEND to# [1:373:2367] Proxy marker# C1 2025-06-25T14:59:14.220068Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:59:14.220145Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:59:14.230833Z node 1 :HIVE DEBUG: hive_impl.cpp:2275: HIVE#72057594037968897 Merged config: { } 2025-06-25T14:59:14.231155Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750863551187351 != 1750863551187355 2025-06-25T14:59:14.283748Z node 1 :HIVE DEBUG: hive_impl.cpp:141: HIVE#72057594037968897 Handle TEvLocal::TEvRegisterNode from [1:296:2336] HiveId: 72057594037968897 ServicedDomains { SchemeShard: 72057594046644480 PathId: 1 } TabletAvailability { Type: Mediator Priority: 0 } TabletAvailability { Type: Dummy Priority: 0 } TabletAvailability { Type: KeyValue Priority: 0 } TabletAvailability { Type: Coordinator Priority: 0 } TabletAvailability { Type: Hive Priority: 0 } TabletAvailability { Type: SchemeShard Priority: 0 } TabletAvailability { Type: DataShard Priority: 0 } TabletAvailability { Type: PersQueue Priority: 0 } TabletAvailability { Type: PersQueueReadBalancer Priority: 0 } TabletAvailability { Type: Kesus Priority: 0 } TabletAvailability { Type: SysViewProcessor Priority: 0 } TabletAvailability { Type: ColumnShard Priority: 0 } TabletAvailability { Type: SequenceShard Priority: 0 } TabletAvailability { Type: ReplicationController Priority: 0 } TabletAvailability { Type: StatisticsAggregator Priority: 0 } 2025-06-25T14:59:14.283879Z node 1 :HIVE DEBUG: tx__register_node.cpp:21: HIVE#72057594037968897 THive::TTxRegisterNode(1)::Execute 2025-06-25T14:59:14.283990Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:59:14.284030Z node 1 :HIVE DEBUG: hive_impl.cpp:361: HIVE#72057594037968897 ProcessWaitQueue (0) 2025-06-25T14:59:14.284061Z node 1 :HIVE DEBUG: hive_impl.cpp:342: HIVE#72057594037968897 ProcessBootQueue (0) 2025-06-25T14:59:14.284092Z node 1 :HIVE DEBUG: hive_impl.cpp:361: HIVE#72057594037968897 ProcessWaitQueue (0) 2025-06-25T14:59:14.284118Z node 1 :HIVE DEBUG: hive_impl.cpp:342: HIVE#72057594037968897 ProcessBootQueue (0) 2025-06-25T14:59:14.284186Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:59:14.284381Z node 1 :HIVE DEBUG: tx__process_boot_queue.cpp:18: HIVE#72057594037968897 THive::TTxProcessBootQueue()::Execute 2025-06-25T14:59:14.284416Z node 1 :HIVE DEBUG: hive_impl.cpp:222: HIVE#72057594037968897 Handle ProcessBootQueue (size: 0) 2025-06-25T14:59:14.284460Z node 1 :HIVE DEBUG: hive_impl.cpp:225: HIVE#72057594037968897 Handle ProcessWaitQueue (size: 0) 2025-06-25T14:59:14.284492Z node 1 :HIVE DEBUG: hive_impl.cpp:302: HIVE#72057594037968897 ProcessBootQueue - BootQueue empty (WaitQueue: 0) 2025-06-25T14:59:14.284611Z node 1 :HIVE DEBUG: hive_impl.cpp:808: HIVE#72057594037968897 TEvInterconnect::TEvNodeInfo NodeId 1 Location DataCenter: "1" Module: "1" Rack: "1" Unit: "1" 2025-06-25T14:59:14.295352Z node 1 :HIVE DEBUG: tx__register_node.cpp:95: HIVE#72057594037968897 THive::TTxRegisterNode(1)::Complete 2025-06-25T14:59:14.296558Z node 1 :HIVE DEBUG: node_info.cpp:373: HIVE#72057594037968897 Node(1) Ping([1:296:2336]) 2025-06-25T14:59:14.296687Z node 1 :HIVE DEBUG: tx__process_boot_queue.cpp:26: HIVE#72057594037968897 THive::TTxProcessBootQueue()::Complete 2025-06-25T14:59:14.297243Z node 1 :HIVE DEBUG: hive_impl.cpp:737: HIVE#72057594037968897 THive::Handle::TEvSyncTablets 2025-06-25T14:59:14.297346Z node 1 :HIVE DEBUG: tx__sync_tablets.cpp:41: HIVE#72057594037968897 THive::TTxSyncTablets([1:296:2336])::Execute 2025-06-25T14:59:14.297406Z node 1 :HIVE DEBUG: hive_impl.cpp:342: HIVE#72057594037968897 ProcessBootQueue (0) 2025-06-25T14:59:14.297484Z node 1 :HIVE DEBUG: tx__sync_tablets.cpp:130: HIVE#72057594037968897 THive::TTxSyncTablets([1:296:2336])::Complete 2025-06-25T14:59:14.297654Z node 1 :HIVE DEBUG: hive_impl.cpp:731: HIVE#72057594037968897 Handle TEvLocal::TEvStatus for Node 1: Status: 0 StartTime: 0 ResourceMaximum { Memory: 270443331584 } 2025-06-25T14:59:14.297716Z node 1 :HIVE DEBUG: tx__status.cpp:22: HIVE#72057594037968897 THive::TTxStatus(1)::Execute 2025-06-25T14:59:14.297763Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:59:14.297916Z node 1 :HIVE DEBUG: hive_impl.cpp:2791: HIVE#72057594037968897 AddRegisteredDataCentersNode(1, 1) 2025-06-25T14:59:14.297974Z node 1 :HIVE DEBUG: hive_impl.cpp:361: HIVE#72057594037968897 ProcessWaitQueue (0) 2025-06-25T14:59:14.298012Z node 1 :HIVE DEBUG: hive_impl.cpp:342: HIVE#72057594037968897 ProcessBootQueue (0) 2025-06-25T14:59:14.298172Z node 1 :HIVE DEBUG: tx__process_boot_queue.cpp:18: HIVE#72057594037968897 THive::TTxProcessBootQueue()::Execute 2025-06-25T14:59:14.298206Z node 1 :HIVE DEBUG: hive_impl.cpp:222: HIVE#72057594037968897 Handle ProcessBootQueue (size: 0) 2025-06-25T14:59:14.298263Z node 1 :HIVE DEBUG: hive_impl.cpp:225: HIVE#72057594037968897 Handle ProcessWaitQueue (size: 0) 2025-06-25T14:59:14.298299Z node 1 :HIVE DEBUG: hive_impl.cpp:302: HIVE#72057594037968897 ProcessBootQueue - BootQueue empty (WaitQueue: 0) 2025-06-25T14:59:14.309860Z node 1 :HIVE DEBUG: tx__status.cpp:65: HIVE#72057594037968897 THive::TTxStatus(1)::Complete 2025-06-25T14:59:14.309928Z node 1 :HIVE DEBUG: tx__process_boot_queue.cpp:26: HIVE#72057594037968897 THive::TTxProcessBootQueue()::Complete 2025-06-25T14:59:14.372104Z node 1 :TX_COORDINATOR DEBUG: coordinator__plan_step.cpp:184: Transaction 1 has been planned 2025-06-25T14:59:14.372202Z node 1 :TX_COORDINATOR DEBUG: coordinator__plan_step.cpp:197: Planned transaction 1 for mediator 72057594046382081 tablet 72057594046644480 2025-06-25T14:59:14.372558Z node 1 :TX_COORDINATOR TRACE: coordinator_impl.cpp:268: Coordinator# 72057594046316545 scheduling step 1000 in 0.500000s at 0.950000s 2025-06-25T14:59:14.372905Z node 1 :TX_COORDINATOR DEBUG: coordinator_impl.cpp:579: Send from# 72057594046316545 to mediator# 72057594046382081, step# 500, txid# 1 marker# C2 2025-06-25T14:59:14.372968Z node 1 :TX_COORDINATOR DEBUG: coordinator_impl.cpp:424: tablet# 72057594046316545 txid# 1 stepId# 500 Status# 17 SEND EvProposeTransactionStatus to# [1:373:2367] Proxy 2025-06-25T14:59:14.373852Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 500, transactions count in step: 1, at schemeshard: 72057594046644480 2025-06-25T14:59:14.378032Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1:0 2025-06-25T14:59:14.378118Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 1, publications: 1, subscribers: 1 2025-06-25T14:59:14.378700Z node 1 :TX_COORDINATOR DEBUG: coordinator_impl.cpp:397: tablet# 72057594046316545 HANDLE EvMediatorQueueConfirmations MediatorId# 72057594046382081 2025-06-25T14:59:14.378800Z node 1 :TX_COORDINATOR DEBUG: coordinator__mediators_confirmations.cpp:84: at tablet# 72057594046316545 [2:6] persistent tx 1 for mediator 72057594046382081 tablet 72057594046644480 removed=1 2025-06-25T14:59:14.378848Z node 1 :TX_COORDINATOR DEBUG: coordinator__mediators_confirmations.cpp:91: at tablet# 72057594046316545 [2:6] persistent tx 1 for mediator 72057594046382081 acknowledged 2025-06-25T14:59:14.378885Z node 1 :TX_COORDINATOR DEBUG: coordinator__mediators_confirmations.cpp:99: at tablet# 72057594046316545 [2:6] persistent tx 1 acknowledged 2025-06-25T14:59:14.379709Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046644480, txId: 1, subscribers: 1 2025-06-25T14:59:14.389469Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_table.cpp:423: TCreateTable Propose, path: /Root/table-1, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-25T14:59:14.394119Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 281474976715657:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-25T14:59:14.394255Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:59:14.395173Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976715657, database: /Root, subject: , status: StatusAccepted, operation: CREATE TABLE, path: /Root/table-1 2025-06-25T14:59:14.401568Z node 1 :HIVE DEBUG: hive_impl.cpp:34: HIVE#72057594037968897 Handle TEvHive::TEvCreateTablet(DataShard(72057594046644480,1)) 2025-06-25T14:59:14.414690Z node 1 :HIVE DEBUG: tx__create_tablet.cpp:200: HIVE#72057594037968897 THive::TTxCreateTablet::Execute Owner: 72057594046644480 OwnerIdx: 1 TabletType: DataShard ObjectDomain { SchemeShard: 72057594046644480 PathId: 1 } ObjectId: 2 BindedChannels { StoragePoolName: "/Root:test" } BindedChannels { StoragePoolName: "/Root:test" } BindedChannels { StoragePoolName: "/Root:test" } AllowedDomains { SchemeShard: 72057594046644480 PathId: 1 ... 16 SEND to# [2:373:2367] Proxy marker# C1 2025-06-25T14:59:26.122057Z node 2 :TX_COORDINATOR DEBUG: coordinator__plan_step.cpp:184: Transaction 281474976715665 has been planned 2025-06-25T14:59:26.122156Z node 2 :TX_COORDINATOR DEBUG: coordinator__plan_step.cpp:197: Planned transaction 281474976715665 for mediator 72057594046382081 tablet 72057594046644480 2025-06-25T14:59:26.122196Z node 2 :TX_COORDINATOR DEBUG: coordinator__plan_step.cpp:197: Planned transaction 281474976715665 for mediator 72057594046382081 tablet 72075186224037889 2025-06-25T14:59:26.122428Z node 2 :TX_COORDINATOR TRACE: coordinator_impl.cpp:268: Coordinator# 72057594046316545 scheduling step 33500 in 0.500000s at 33.450000s 2025-06-25T14:59:26.122737Z node 2 :TX_COORDINATOR DEBUG: coordinator_impl.cpp:579: Send from# 72057594046316545 to mediator# 72057594046382081, step# 33000, txid# 281474976715665 marker# C2 2025-06-25T14:59:26.122791Z node 2 :TX_COORDINATOR DEBUG: coordinator_impl.cpp:424: tablet# 72057594046316545 txid# 281474976715665 stepId# 33000 Status# 17 SEND EvProposeTransactionStatus to# [2:373:2367] Proxy 2025-06-25T14:59:26.123112Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 33000, transactions count in step: 1, at schemeshard: 72057594046644480 2025-06-25T14:59:26.123551Z node 2 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715665 at step 33000 at tablet 72075186224037889 { Transactions { TxId: 281474976715665 AckTo { RawX1: 0 RawX2: 0 } } Step: 33000 MediatorID: 72057594046382081 TabletID: 72075186224037889 } 2025-06-25T14:59:26.123590Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-25T14:59:26.123750Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2025-06-25T14:59:26.123779Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 active 0 active planned 0 immediate 0 planned 1 2025-06-25T14:59:26.123814Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [33000:281474976715665] in PlanQueue unit at 72075186224037889 2025-06-25T14:59:26.123959Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037889 loaded tx from db 33000:281474976715665 keys extracted: 0 2025-06-25T14:59:26.124060Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-25T14:59:26.124229Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2025-06-25T14:59:26.124278Z node 2 :TX_DATASHARD INFO: drop_table_unit.cpp:72: Trying to DROP TABLE at 72075186224037889 2025-06-25T14:59:26.124604Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-25T14:59:26.125884Z node 2 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037889 step# 33000} 2025-06-25T14:59:26.125934Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-06-25T14:59:26.126070Z node 2 :TX_COORDINATOR DEBUG: coordinator_impl.cpp:397: tablet# 72057594046316545 HANDLE EvMediatorQueueConfirmations MediatorId# 72057594046382081 2025-06-25T14:59:26.126139Z node 2 :TX_COORDINATOR DEBUG: coordinator__mediators_confirmations.cpp:84: at tablet# 72057594046316545 [2:49] persistent tx 281474976715665 for mediator 72057594046382081 tablet 72057594046644480 removed=1 2025-06-25T14:59:26.126176Z node 2 :TX_COORDINATOR DEBUG: coordinator__mediators_confirmations.cpp:84: at tablet# 72057594046316545 [2:49] persistent tx 281474976715665 for mediator 72057594046382081 tablet 72075186224037889 removed=1 2025-06-25T14:59:26.126208Z node 2 :TX_COORDINATOR DEBUG: coordinator__mediators_confirmations.cpp:91: at tablet# 72057594046316545 [2:49] persistent tx 281474976715665 for mediator 72057594046382081 acknowledged 2025-06-25T14:59:26.126239Z node 2 :TX_COORDINATOR DEBUG: coordinator__mediators_confirmations.cpp:99: at tablet# 72057594046316545 [2:49] persistent tx 281474976715665 acknowledged 2025-06-25T14:59:26.126568Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-06-25T14:59:26.126619Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [33000 : 281474976715665] from 72075186224037889 at tablet 72075186224037889 send result to client [2:373:2367], exec latency: 0 ms, propose latency: 0 ms 2025-06-25T14:59:26.126663Z node 2 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037889 Sending notify to schemeshard 72057594046644480 txId 281474976715665 state PreOffline TxInFly 0 2025-06-25T14:59:26.126752Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-25T14:59:26.127401Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:1105: All parts have reached barrier, tx: 281474976715665, done: 0, blocked: 1 2025-06-25T14:59:26.130082Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976715665:0 2025-06-25T14:59:26.130191Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 281474976715665, publications: 2, subscribers: 1 2025-06-25T14:59:26.130582Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715665 datashard 72075186224037889 state PreOffline 2025-06-25T14:59:26.130629Z node 2 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037889 Got TEvSchemaChangedResult from SS at 72075186224037889 2025-06-25T14:59:26.131193Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046644480, txId: 281474976715665, subscribers: 1 2025-06-25T14:59:26.131593Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2025-06-25T14:59:26.132271Z node 2 :KQP_YQL INFO: log.cpp:64: SessionId: ydb://session/3?node_id=2&id=MTVjYzg1MjEtMTU4ZjIwYTctMzQzMWY4LTI5MTVmMjkz 2025-06-25 14:59:26.132 INFO ydb-core-tx-datashard-ut_minstep(pid=663319, tid=0x00007F49C8653D40) [core exec] yql_execution.cpp:133: Completed async execution for node #42 2025-06-25T14:59:26.132428Z node 2 :KQP_YQL INFO: log.cpp:64: SessionId: ydb://session/3?node_id=2&id=MTVjYzg1MjEtMTU4ZjIwYTctMzQzMWY4LTI5MTVmMjkz 2025-06-25 14:59:26.132 INFO ydb-core-tx-datashard-ut_minstep(pid=663319, tid=0x00007F49C8653D40) [core exec] yql_execution.cpp:153: State is ExecutionComplete after apply async changes for node #42 2025-06-25T14:59:26.132538Z node 2 :KQP_YQL INFO: log.cpp:64: SessionId: ydb://session/3?node_id=2&id=MTVjYzg1MjEtMTU4ZjIwYTctMzQzMWY4LTI5MTVmMjkz 2025-06-25 14:59:26.132 INFO ydb-core-tx-datashard-ut_minstep(pid=663319, tid=0x00007F49C8653D40) [core exec] yql_execution.cpp:59: Begin, root #43 2025-06-25T14:59:26.132591Z node 2 :KQP_YQL INFO: log.cpp:64: SessionId: ydb://session/3?node_id=2&id=MTVjYzg1MjEtMTU4ZjIwYTctMzQzMWY4LTI5MTVmMjkz 2025-06-25 14:59:26.132 INFO ydb-core-tx-datashard-ut_minstep(pid=663319, tid=0x00007F49C8653D40) [core exec] yql_execution.cpp:72: Collect unused nodes for root #43, status: Ok 2025-06-25T14:59:26.132645Z node 2 :KQP_YQL TRACE: log.cpp:64: SessionId: ydb://session/3?node_id=2&id=MTVjYzg1MjEtMTU4ZjIwYTctMzQzMWY4LTI5MTVmMjkz 2025-06-25 14:59:26.132 TRACE ydb-core-tx-datashard-ut_minstep(pid=663319, tid=0x00007F49C8653D40) [core exec] yql_execution.cpp:387: {0}, callable #43 2025-06-25T14:59:26.132737Z node 2 :KQP_YQL INFO: log.cpp:64: SessionId: ydb://session/3?node_id=2&id=MTVjYzg1MjEtMTU4ZjIwYTctMzQzMWY4LTI5MTVmMjkz 2025-06-25 14:59:26.132 INFO ydb-core-tx-datashard-ut_minstep(pid=663319, tid=0x00007F49C8653D40) [core exec] yql_execution.cpp:577: Node #43 finished execution 2025-06-25T14:59:26.132800Z node 2 :KQP_YQL INFO: log.cpp:64: SessionId: ydb://session/3?node_id=2&id=MTVjYzg1MjEtMTU4ZjIwYTctMzQzMWY4LTI5MTVmMjkz 2025-06-25 14:59:26.132 INFO ydb-core-tx-datashard-ut_minstep(pid=663319, tid=0x00007F49C8653D40) [core exec] yql_execution.cpp:594: Node #43 created 0 trackable nodes: 2025-06-25T14:59:26.132846Z node 2 :KQP_YQL INFO: log.cpp:64: SessionId: ydb://session/3?node_id=2&id=MTVjYzg1MjEtMTU4ZjIwYTctMzQzMWY4LTI5MTVmMjkz 2025-06-25 14:59:26.132 INFO ydb-core-tx-datashard-ut_minstep(pid=663319, tid=0x00007F49C8653D40) [core exec] yql_execution.cpp:87: Finish, output #43, status: Ok 2025-06-25T14:59:26.132891Z node 2 :KQP_YQL INFO: log.cpp:64: SessionId: ydb://session/3?node_id=2&id=MTVjYzg1MjEtMTU4ZjIwYTctMzQzMWY4LTI5MTVmMjkz 2025-06-25 14:59:26.132 INFO ydb-core-tx-datashard-ut_minstep(pid=663319, tid=0x00007F49C8653D40) [core exec] yql_execution.cpp:93: Creating finalizing transformer, output #43 2025-06-25T14:59:26.133033Z node 2 :KQP_YQL NOTICE: log.cpp:64: SessionId: ydb://session/3?node_id=2&id=MTVjYzg1MjEtMTU4ZjIwYTctMzQzMWY4LTI5MTVmMjkz 2025-06-25 14:59:26.132 NOTE ydb-core-tx-datashard-ut_minstep(pid=663319, tid=0x00007F49C8653D40) [common provider] yql_provider_gateway.cpp:21:
: Info: Execution, code: 1060 2025-06-25T14:59:26.133096Z node 2 :KQP_YQL NOTICE: log.cpp:64: SessionId: ydb://session/3?node_id=2&id=MTVjYzg1MjEtMTU4ZjIwYTctMzQzMWY4LTI5MTVmMjkz 2025-06-25 14:59:26.133 NOTE ydb-core-tx-datashard-ut_minstep(pid=663319, tid=0x00007F49C8653D40) [common provider] yql_provider_gateway.cpp:21:
:1:12: Info: Executing DROP TABLE 2025-06-25T14:59:26.133137Z node 2 :KQP_YQL NOTICE: log.cpp:64: SessionId: ydb://session/3?node_id=2&id=MTVjYzg1MjEtMTU4ZjIwYTctMzQzMWY4LTI5MTVmMjkz 2025-06-25 14:59:26.133 NOTE ydb-core-tx-datashard-ut_minstep(pid=663319, tid=0x00007F49C8653D40) [common provider] yql_provider_gateway.cpp:21:
: Info: Success, code: 4 2025-06-25T14:59:26.145979Z node 2 :TX_DATASHARD DEBUG: datashard_loans.cpp:220: 72075186224037889 in PreOffline state HasSharedBobs: 0 SchemaOperations: [ ] OutReadSets count: 0 ChangesQueue size: 0 ChangeExchangeSplit: 1 siblings to be activated: wait to activation from: 2025-06-25T14:59:26.146232Z node 2 :TX_DATASHARD INFO: datashard_loans.cpp:177: 72075186224037889 Initiating switch from PreOffline to Offline state 2025-06-25T14:59:26.164787Z node 2 :TX_DATASHARD INFO: datashard_impl.h:3310: 72075186224037889 Reporting state Offline to schemeshard 72057594046644480 2025-06-25T14:59:26.165554Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2962: Handle TEvStateChangedResult datashard 72075186224037889 state Offline 2025-06-25T14:59:26.165878Z node 2 :HIVE DEBUG: tx__delete_tablet.cpp:74: HIVE#72057594037968897 THive::TTxDeleteTablet::Execute() ShardOwnerId: 72057594046644480 ShardLocalIdx: 2 TxId_Deprecated: 2 TabletID: 72075186224037889 2025-06-25T14:59:26.165923Z node 2 :HIVE DEBUG: tx__delete_tablet.cpp:19: HIVE#72057594037968897 THive::TTxDeleteTablet::Execute Tablet 72075186224037889 2025-06-25T14:59:26.166008Z node 2 :HIVE DEBUG: tablet_info.cpp:123: HIVE#72057594037968897 Tablet(DataShard.72075186224037889.Leader.1) VolatileState: Running -> Stopped (Node 2) 2025-06-25T14:59:26.166102Z node 2 :HIVE DEBUG: tablet_info.cpp:523: HIVE#72057594037968897 Sending TEvStopTablet(DataShard.72075186224037889.Leader.1 gen 1) to node 2 2025-06-25T14:59:26.166202Z node 2 :HIVE DEBUG: tx__delete_tablet.cpp:67: HIVE#72057594037968897 THive::TTxDeleteTablet::Execute() result Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046644480 ShardLocalIdx: 2 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/load_test/ut_ycsb/unittest >> UpsertLoad::ShouldWriteDataBulkUpsert2 [GOOD] Test command err: 2025-06-25T14:59:21.396203Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:59:21.396390Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:59:21.396506Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001a3c/r3tmp/tmp1Tidu4/pdisk_1.dat 2025-06-25T14:59:21.679858Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T14:59:21.682690Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:59:21.717895Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:59:21.725331Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750863559008703 != 1750863559008707 2025-06-25T14:59:21.770568Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:59:21.770696Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:59:21.781789Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:59:21.860332Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:59:22.172471Z node 1 :DS_LOAD_TEST DEBUG: test_load_actor.cpp:425: TLoad# 0 created load actor of type# kUpsertBulkStart with tag# 1, proto# NotifyWhenFinished: true TargetShard { TabletId: 72075186224037888 TableId: 2 TableName: "usertable" } UpsertBulkStart { RowCount: 10 Inflight: 3 } 2025-06-25T14:59:22.172607Z node 1 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:157: Id# {Tag: 0, parent: [1:695:2577], subTag: 2} TUpsertActor Bootstrap called: RowCount: 10 Inflight: 3 with type# 0, target# TabletId: 72075186224037888 TableId: 2 TableName: "usertable" 2025-06-25T14:59:22.241393Z node 1 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:255: Id# {Tag: 0, parent: [1:695:2577], subTag: 2} TUpsertActor finished in 0.068365s, errors=0 2025-06-25T14:59:22.241512Z node 1 :DS_LOAD_TEST INFO: test_load_actor.cpp:447: TLoad# 0 received finished from actor# [1:696:2578] with tag# 2 2025-06-25T14:59:25.373695Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:265:2309], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:59:25.373815Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:59:25.373903Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001a3c/r3tmp/tmpOhmpY2/pdisk_1.dat 2025-06-25T14:59:25.580325Z node 2 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 2 Type# 268639257 2025-06-25T14:59:25.581624Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:59:25.604891Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:59:25.606681Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:33:2080] 1750863562818950 != 1750863562818954 2025-06-25T14:59:25.651662Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:59:25.651781Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:59:25.663016Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:59:25.742221Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:59:25.993605Z node 2 :DS_LOAD_TEST DEBUG: test_load_actor.cpp:425: TLoad# 0 created load actor of type# kUpsertBulkStart with tag# 1, proto# NotifyWhenFinished: true TargetShard { TabletId: 72075186224037888 TableId: 2 TableName: "JustTable" } UpsertBulkStart { RowCount: 10 Inflight: 3 } 2025-06-25T14:59:25.993723Z node 2 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:157: Id# {Tag: 0, parent: [2:695:2577], subTag: 2} TUpsertActor Bootstrap called: RowCount: 10 Inflight: 3 with type# 0, target# TabletId: 72075186224037888 TableId: 2 TableName: "JustTable" 2025-06-25T14:59:26.059818Z node 2 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:255: Id# {Tag: 0, parent: [2:695:2577], subTag: 2} TUpsertActor finished in 0.065724s, errors=0 2025-06-25T14:59:26.059899Z node 2 :DS_LOAD_TEST INFO: test_load_actor.cpp:447: TLoad# 0 received finished from actor# [2:696:2578] with tag# 2 >> ReadLoad::ShouldReadIterateMoreThanRows [GOOD] |90.8%| [TA] $(B)/ydb/core/tx/datashard/ut_minstep/test-results/unittest/{meta.json ... results_accumulator.log} |90.8%| [TA] {RESULT} $(B)/ydb/core/tx/datashard/ut_minstep/test-results/unittest/{meta.json ... results_accumulator.log} >> TCmsTenatsTest::TestTenantLimit >> TSchemeshardStatsBatchingTest::TopicPeriodicStatMeteringModeRequest [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/load_test/ut_ycsb/unittest >> ReadLoad::ShouldReadIterateMoreThanRows [GOOD] Test command err: 2025-06-25T14:59:21.315238Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:59:21.315484Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:59:21.315553Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001a4b/r3tmp/tmpSeqx40/pdisk_1.dat 2025-06-25T14:59:21.589105Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T14:59:21.595308Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:59:21.635180Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:59:21.640359Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750863559006490 != 1750863559006494 2025-06-25T14:59:21.686274Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:59:21.686414Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:59:21.697788Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:59:21.778019Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:59:22.149992Z node 1 :DS_LOAD_TEST INFO: test_load_actor.cpp:346: TLoad# 0 warmups table# usertable in dir# /Root with rows# 1000 2025-06-25T14:59:22.151573Z node 1 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:157: Id# {Tag: 0, parent: [1:695:2577], subTag: 1} TUpsertActor Bootstrap called: RowCount: 1000 Inflight: 100 BatchSize: 100 with type# 0, target# TabletId: 72075186224037888 TableId: 2 WorkingDir: "/Root" TableName: "usertable" 2025-06-25T14:59:22.211177Z node 1 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:255: Id# {Tag: 0, parent: [1:695:2577], subTag: 1} TUpsertActor finished in 0.059236s, errors=0 2025-06-25T14:59:22.211721Z node 1 :DS_LOAD_TEST DEBUG: test_load_actor.cpp:425: TLoad# 0 created load actor of type# kReadIteratorStart with tag# 2, proto# NotifyWhenFinished: true TableSetup { WorkingDir: "/Root" TableName: "usertable" } TargetShard { TabletId: 72075186224037888 TableId: 2 WorkingDir: "/Root" TableName: "usertable" } ReadIteratorStart { RowCount: 1000 Inflights: 1 Chunks: 0 Chunks: 1 Chunks: 10 } 2025-06-25T14:59:22.211843Z node 1 :DS_LOAD_TEST NOTICE: test_load_read_iterator.cpp:334: ReadIteratorLoadScenario# [1:704:2586] with id# {Tag: 0, parent: [1:695:2577], subTag: 3} Bootstrap called: RowCount: 1000 Inflights: 1 Chunks: 0 Chunks: 1 Chunks: 10 2025-06-25T14:59:22.229294Z node 1 :DS_LOAD_TEST INFO: test_load_read_iterator.cpp:396: ReadIteratorLoadScenario# {Tag: 0, parent: [1:695:2577], subTag: 3} will work with tablet# 72075186224037888 with ownerId# 72057594046644480 with tableId# 2 resolved for path# /Root/usertable with columnsCount# 11, keyColumnCount# 1 2025-06-25T14:59:22.231746Z node 1 :DS_LOAD_TEST INFO: test_load_read_iterator.cpp:437: started fullscan actor# [1:707:2589] 2025-06-25T14:59:22.231932Z node 1 :DS_LOAD_TEST INFO: common.cpp:52: ReadIteratorScan# {Tag: 0, parent: [1:704:2586], subTag: 1} Bootstrap called, sample# 0 2025-06-25T14:59:22.232001Z node 1 :DS_LOAD_TEST DEBUG: common.cpp:61: ReadIteratorScan# {Tag: 0, parent: [1:704:2586], subTag: 1} Connect to# 72075186224037888 called 2025-06-25T14:59:22.232271Z node 1 :DS_LOAD_TEST DEBUG: common.cpp:75: ReadIteratorScan# {Tag: 0, parent: [1:704:2586], subTag: 1} Handle TEvClientConnected called, Status# OK 2025-06-25T14:59:22.241777Z node 1 :DS_LOAD_TEST NOTICE: common.cpp:147: ReadIteratorScan# {Tag: 0, parent: [1:704:2586], subTag: 1} finished in 0.009445s, read# 1000 2025-06-25T14:59:22.242187Z node 1 :DS_LOAD_TEST NOTICE: test_load_read_iterator.cpp:456: fullscan actor# [1:707:2589] with chunkSize# 0 finished: 0 { DurationMs: 9 OperationsOK: 1000 OperationsError: 0 } 2025-06-25T14:59:22.242320Z node 1 :DS_LOAD_TEST INFO: test_load_read_iterator.cpp:437: started fullscan actor# [1:710:2592] 2025-06-25T14:59:22.242377Z node 1 :DS_LOAD_TEST INFO: common.cpp:52: ReadIteratorScan# {Tag: 0, parent: [1:704:2586], subTag: 2} Bootstrap called, sample# 0 2025-06-25T14:59:22.242407Z node 1 :DS_LOAD_TEST DEBUG: common.cpp:61: ReadIteratorScan# {Tag: 0, parent: [1:704:2586], subTag: 2} Connect to# 72075186224037888 called 2025-06-25T14:59:22.242690Z node 1 :DS_LOAD_TEST DEBUG: common.cpp:75: ReadIteratorScan# {Tag: 0, parent: [1:704:2586], subTag: 2} Handle TEvClientConnected called, Status# OK 2025-06-25T14:59:22.464221Z node 1 :DS_LOAD_TEST NOTICE: common.cpp:147: ReadIteratorScan# {Tag: 0, parent: [1:704:2586], subTag: 2} finished in 0.221476s, read# 1000 2025-06-25T14:59:22.464405Z node 1 :DS_LOAD_TEST NOTICE: test_load_read_iterator.cpp:456: fullscan actor# [1:710:2592] with chunkSize# 1 finished: 0 { DurationMs: 221 OperationsOK: 1000 OperationsError: 0 } 2025-06-25T14:59:22.464519Z node 1 :DS_LOAD_TEST INFO: test_load_read_iterator.cpp:437: started fullscan actor# [1:713:2595] 2025-06-25T14:59:22.464557Z node 1 :DS_LOAD_TEST INFO: common.cpp:52: ReadIteratorScan# {Tag: 0, parent: [1:704:2586], subTag: 3} Bootstrap called, sample# 0 2025-06-25T14:59:22.464586Z node 1 :DS_LOAD_TEST DEBUG: common.cpp:61: ReadIteratorScan# {Tag: 0, parent: [1:704:2586], subTag: 3} Connect to# 72075186224037888 called 2025-06-25T14:59:22.464838Z node 1 :DS_LOAD_TEST DEBUG: common.cpp:75: ReadIteratorScan# {Tag: 0, parent: [1:704:2586], subTag: 3} Handle TEvClientConnected called, Status# OK 2025-06-25T14:59:22.533516Z node 1 :DS_LOAD_TEST NOTICE: common.cpp:147: ReadIteratorScan# {Tag: 0, parent: [1:704:2586], subTag: 3} finished in 0.068623s, read# 1000 2025-06-25T14:59:22.533676Z node 1 :DS_LOAD_TEST NOTICE: test_load_read_iterator.cpp:456: fullscan actor# [1:713:2595] with chunkSize# 10 finished: 0 { DurationMs: 68 OperationsOK: 1000 OperationsError: 0 } 2025-06-25T14:59:22.533807Z node 1 :DS_LOAD_TEST INFO: test_load_read_iterator.cpp:437: started fullscan actor# [1:716:2598] 2025-06-25T14:59:22.533867Z node 1 :DS_LOAD_TEST INFO: common.cpp:52: ReadIteratorScan# {Tag: 0, parent: [1:704:2586], subTag: 4} Bootstrap called, sample# 1000 2025-06-25T14:59:22.533907Z node 1 :DS_LOAD_TEST DEBUG: common.cpp:61: ReadIteratorScan# {Tag: 0, parent: [1:704:2586], subTag: 4} Connect to# 72075186224037888 called 2025-06-25T14:59:22.534160Z node 1 :DS_LOAD_TEST DEBUG: common.cpp:75: ReadIteratorScan# {Tag: 0, parent: [1:704:2586], subTag: 4} Handle TEvClientConnected called, Status# OK 2025-06-25T14:59:22.537034Z node 1 :DS_LOAD_TEST NOTICE: common.cpp:137: ReadIteratorScan# {Tag: 0, parent: [1:704:2586], subTag: 4} finished in 0.002263s, sampled# 1000, iter finished# 1, oks# 1000 2025-06-25T14:59:22.537169Z node 1 :DS_LOAD_TEST INFO: test_load_read_iterator.cpp:506: ReadIteratorLoadScenario# {Tag: 0, parent: [1:695:2577], subTag: 3} received keyCount# 1000 2025-06-25T14:59:22.537375Z node 1 :DS_LOAD_TEST DEBUG: test_load_read_iterator.cpp:551: ReadIteratorLoadScenario# {Tag: 0, parent: [1:695:2577], subTag: 3} started read actor with id# [1:719:2601] 2025-06-25T14:59:22.537792Z node 1 :DS_LOAD_TEST NOTICE: test_load_read_iterator.cpp:79: TReadIteratorPoints# {Tag: 0, parent: [1:704:2586], subTag: 5} Bootstrap called, will read keys# 1000 2025-06-25T14:59:22.939662Z node 1 :DS_LOAD_TEST DEBUG: test_load_read_iterator.cpp:559: ReadIteratorLoadScenario# {Tag: 0, parent: [1:695:2577], subTag: 3} received point times# 1000, Inflight left# 0 2025-06-25T14:59:22.939866Z node 1 :DS_LOAD_TEST INFO: test_load_read_iterator.cpp:482: headread with inflight# 1 finished: 0 { DurationMs: 402 OperationsOK: 1000 OperationsError: 0 Info: "single row head read hist (ms):\n50%: 1\n95%: 1\n99%: 1\n99.9%: 42\n" } 2025-06-25T14:59:22.940044Z node 1 :DS_LOAD_TEST NOTICE: test_load_read_iterator.cpp:616: ReadIteratorLoadScenario# {Tag: 0, parent: [1:695:2577], subTag: 3} finished in 0.728026s with report: { DurationMs: 9 OperationsOK: 1000 OperationsError: 0 PrefixInfo: "Test run# 1, type# FullScan with chunk# inf" } { DurationMs: 221 OperationsOK: 1000 OperationsError: 0 PrefixInfo: "Test run# 2, type# FullScan with chunk# 1" } { DurationMs: 68 OperationsOK: 1000 OperationsError: 0 PrefixInfo: "Test run# 3, type# FullScan with chunk# 10" } { DurationMs: 402 OperationsOK: 1000 OperationsError: 0 Info: "single row head read hist (ms):\n50%: 1\n95%: 1\n99%: 1\n99.9%: 42\n" PrefixInfo: "Test run# 4, type# ReadHeadPoints with inflight# 1" } 2025-06-25T14:59:22.940450Z node 1 :DS_LOAD_TEST INFO: test_load_actor.cpp:447: TLoad# 0 received finished from actor# [1:704:2586] with tag# 3 2025-06-25T14:59:26.128533Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:265:2309], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:59:26.128653Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:59:26.128739Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001a4b/r3tmp/tmpuhE2b6/pdisk_1.dat 2025-06-25T14:59:26.363646Z node 2 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 2 Type# 268639257 2025-06-25T14:59:26.365004Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:59:26.387540Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:59:26.389167Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:33:2080] 1750863563471545 != 1750863563471549 2025-06-25T14:59:26.434698Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:59:26.434821Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:59:26.445967Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:59:26.527118Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:59:26.834361Z node 2 :DS_LOAD_TEST INFO: test_load_actor.cpp:346: TLoad# 0 warmups table# usertable in dir# /Root with rows# 10 2025-06-25T14:59:26.834640Z node 2 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:157: Id# {Tag: 0, parent: [2:695:2577], subTag: 1} TUpsertActor Bootstrap called: RowCount: 10 Inflight: 100 BatchSize: 100 with type# 0, target# TabletId: 72075186224037888 TableId: 2 WorkingDir: "/Root" TableName: "usertable" 2025-06-25T14:59:26.856677Z node 2 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:255: Id# {Tag: 0, parent: [2:695:2577], subTag: 1} TUpsertActor finished in 0.021824s, errors=0 2025-06-25T14:59:26.857112Z node 2 :DS_LOAD_TEST DEBUG: test_load_actor.cpp:425: TLoad# 0 created load actor of type# kReadIteratorStart with tag# 2, proto# NotifyWhenFinished: true TableSetup { WorkingDir: "/Root" TableName: "usertable" } TargetShard { TabletId: 72075186224037888 TableId: 2 WorkingDir: "/Root" TableName: "usertable" } ReadIteratorStart { RowCount: 10 ReadCount: 1000 Inflights: 1 Chunks: 0 Chunks: 1 Chunks: 10 } 2025-06-25T14:59:26.857211Z node 2 :DS_LOAD_TEST NOTICE: test_load_read_iterator.cpp:334: ReadIteratorLoadScenario# [2:704:2586] with id# {Tag: 0, parent: [2:695:2577], subTag: 3} Bootstrap called: RowCount: 10 ReadCount: 1000 Inflights: 1 Chunks: 0 Chunks: 1 Chunks: 10 2025-06-25T14:59:26.870902Z node 2 :DS_LOAD_TEST INFO: test_load_read_iterator.cpp:396: ReadIteratorLoadScenario# {Tag: 0, parent: [2:695:2577], subTag: 3} will work with tablet# 72075186224037888 with ownerId# 72057594046644480 with tableId# 2 resolved for path# /Root/usertable with columnsCount# 11, keyColumnCount# 1 2025-06-25T14:59:26.871007Z node 2 :DS_LOAD_TEST INFO: test_load_read_iterator.cpp:437: started fullscan actor# [2:707:2589] 2025-06-25T14:59:26.871072Z node 2 :DS_LOAD_TEST INFO: common.cpp:52: ReadIteratorScan# {Tag: 0, parent: [2:704:2586], subTag: 1} Bootstrap called, sample# 0 2025-06-25T14:59:26.871098Z node 2 :DS_LOAD_TEST DEBUG: common.cpp:61: ReadIteratorScan# {Tag: 0, parent: [2:704:2586], subTag: 1} Connect to# 72075186224037888 called 2025-06-25T14:59:26.871280Z node 2 :DS_LOAD_TEST DEBUG: common.cpp:75: ReadIteratorScan# {Tag: 0, parent: [2:704:2586], subTag: 1} Handle TEvClientConnected called, Status# OK 2025-06-25T14:59:26.871824Z node 2 :DS_LOAD_TEST NOTICE: common.cpp:147: ReadIteratorScan# {Tag: 0, parent: [2:704:2586], subTag: 1} finished in 0.000505s, read# 10 2025-06-25T14:59:26.871944Z node 2 :DS_LOAD_TEST NOTICE: test_load_read_iterator.cpp:456: fullscan actor# [2:707:2589] with chunkSize# 0 finished: 0 { DurationMs: 0 OperationsOK: 10 OperationsError: 0 } 2025-06-25T14:59:26.872016Z node 2 :DS_LOAD_TEST INFO: test_load_read_iterator.cpp:437: started fullscan actor# [2:710:2592] 2025-06-25T14:59:26.872047Z node 2 :DS_LOAD_TEST INFO: common.cpp:52: ReadIteratorScan# {Tag: 0, parent: [2:704:2586], subTag: 2} Bootstrap called, sample# 0 2025-06-25T14:59:26.872073Z node 2 :DS_LOAD_TEST DEBUG: common.cpp:61: ReadIteratorScan# {Tag: 0, parent: [2:704:2586], subTag: 2} Connect to# 72075186224037888 called 2025-06-25T14:59:26.872216Z node 2 :DS_LOAD_TEST DEBUG: common.cpp:75: ReadIteratorScan# {Tag: 0, parent: [2:704:2586], subTag: 2} Handle TEvClientConnected called, Status# OK 2025-06-25T14:59:26.873614Z node 2 :DS_LOAD_TEST NOTICE: common.cpp:147: ReadIteratorScan# {Tag: 0, parent: [2:704:2586], subTag: 2} finished in 0.001374s, read# 10 2025-06-25T14:59:26.873687Z node 2 :DS_LOAD_TEST NOTICE: test_load_read_iterator.cpp:456: fullscan actor# [2:710:2592] with chunkSize# 1 finished: 0 { DurationMs: 1 OperationsOK: 10 OperationsError: 0 } 2025-06-25T14:59:26.873736Z node 2 :DS_LOAD_TEST INFO: test_load_read_iterator.cpp:437: started fullscan actor# [2:713:2595] 2025-06-25T14:59:26.873759Z node 2 :DS_LOAD_TEST INFO: common.cpp:52: ReadIteratorScan# {Tag: 0, parent: [2:704:2586], subTag: 3} Bootstrap called, sample# 0 2025-06-25T14:59:26.873775Z node 2 :DS_LOAD_TEST DEBUG: common.cpp:61: ReadIteratorScan# {Tag: 0, parent: [2:704:2586], subTag: 3} Connect to# 72075186224037888 called 2025-06-25T14:59:26.873927Z node 2 :DS_LOAD_TEST DEBUG: common.cpp:75: ReadIteratorScan# {Tag: 0, parent: [2:704:2586], subTag: 3} Handle TEvClientConnected called, Status# OK 2025-06-25T14:59:26.874300Z node 2 :DS_LOAD_TEST NOTICE: common.cpp:147: ReadIteratorScan# {Tag: 0, parent: [2:704:2586], subTag: 3} finished in 0.000347s, read# 10 2025-06-25T14:59:26.874359Z node 2 :DS_LOAD_TEST NOTICE: test_load_read_iterator.cpp:456: fullscan actor# [2:713:2595] with chunkSize# 10 finished: 0 { DurationMs: 0 OperationsOK: 10 OperationsError: 0 } 2025-06-25T14:59:26.874420Z node 2 :DS_LOAD_TEST INFO: test_load_read_iterator.cpp:437: started fullscan actor# [2:716:2598] 2025-06-25T14:59:26.874450Z node 2 :DS_LOAD_TEST INFO: common.cpp:52: ReadIteratorScan# {Tag: 0, parent: [2:704:2586], subTag: 4} Bootstrap called, sample# 10 2025-06-25T14:59:26.874473Z node 2 :DS_LOAD_TEST DEBUG: common.cpp:61: ReadIteratorScan# {Tag: 0, parent: [2:704:2586], subTag: 4} Connect to# 72075186224037888 called 2025-06-25T14:59:26.874629Z node 2 :DS_LOAD_TEST DEBUG: common.cpp:75: ReadIteratorScan# {Tag: 0, parent: [2:704:2586], subTag: 4} Handle TEvClientConnected called, Status# OK 2025-06-25T14:59:26.874879Z node 2 :DS_LOAD_TEST NOTICE: common.cpp:137: ReadIteratorScan# {Tag: 0, parent: [2:704:2586], subTag: 4} finished in 0.000212s, sampled# 10, iter finished# 1, oks# 10 2025-06-25T14:59:26.874939Z node 2 :DS_LOAD_TEST INFO: test_load_read_iterator.cpp:506: ReadIteratorLoadScenario# {Tag: 0, parent: [2:695:2577], subTag: 3} received keyCount# 10 2025-06-25T14:59:26.875036Z node 2 :DS_LOAD_TEST DEBUG: test_load_read_iterator.cpp:551: ReadIteratorLoadScenario# {Tag: 0, parent: [2:695:2577], subTag: 3} started read actor with id# [2:719:2601] 2025-06-25T14:59:26.875078Z node 2 :DS_LOAD_TEST NOTICE: test_load_read_iterator.cpp:79: TReadIteratorPoints# {Tag: 0, parent: [2:704:2586], subTag: 5} Bootstrap called, will read keys# 10 2025-06-25T14:59:27.174638Z node 2 :DS_LOAD_TEST DEBUG: test_load_read_iterator.cpp:559: ReadIteratorLoadScenario# {Tag: 0, parent: [2:695:2577], subTag: 3} received point times# 1000, Inflight left# 0 2025-06-25T14:59:27.174848Z node 2 :DS_LOAD_TEST INFO: test_load_read_iterator.cpp:482: headread with inflight# 1 finished: 0 { DurationMs: 299 OperationsOK: 1000 OperationsError: 0 Info: "single row head read hist (ms):\n50%: 1\n95%: 1\n99%: 1\n99.9%: 23\n" } 2025-06-25T14:59:27.175024Z node 2 :DS_LOAD_TEST NOTICE: test_load_read_iterator.cpp:616: ReadIteratorLoadScenario# {Tag: 0, parent: [2:695:2577], subTag: 3} finished in 0.317651s with report: { DurationMs: 0 OperationsOK: 10 OperationsError: 0 PrefixInfo: "Test run# 1, type# FullScan with chunk# inf" } { DurationMs: 1 OperationsOK: 10 OperationsError: 0 PrefixInfo: "Test run# 2, type# FullScan with chunk# 1" } { DurationMs: 0 OperationsOK: 10 OperationsError: 0 PrefixInfo: "Test run# 3, type# FullScan with chunk# 10" } { DurationMs: 299 OperationsOK: 1000 OperationsError: 0 Info: "single row head read hist (ms):\n50%: 1\n95%: 1\n99%: 1\n99.9%: 23\n" PrefixInfo: "Test run# 4, type# ReadHeadPoints with inflight# 1" } 2025-06-25T14:59:27.175141Z node 2 :DS_LOAD_TEST INFO: test_load_actor.cpp:447: TLoad# 0 received finished from actor# [2:704:2586] with tag# 3 >> TCmsTest::StateStorageNodesFromOneRing [GOOD] >> TCmsTest::StateStorageAvailabilityMode >> TCmsTest::RequestRestartServicesReject >> TCmsTenatsTest::TestNoneTenantPolicy ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_stats/unittest >> TSchemeshardStatsBatchingTest::TopicPeriodicStatMeteringModeRequest [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:59:25.236916Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:59:25.237050Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:25.237107Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:59:25.237158Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:59:25.238662Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:59:25.238708Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:59:25.238774Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:25.238905Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:59:25.239625Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:59:25.241578Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:59:25.311089Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:59:25.311149Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:59:25.327015Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:59:25.327422Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:59:25.327611Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:59:25.333558Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:59:25.333872Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:59:25.336180Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:25.337225Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:59:25.344441Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:25.349234Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:59:25.356859Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:25.356932Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:25.357123Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:59:25.357174Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:59:25.357223Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:59:25.357317Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:59:25.362933Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:59:25.479155Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:59:25.479332Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:25.479575Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:59:25.479620Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:59:25.480628Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:59:25.480692Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:59:25.482747Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:25.483512Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:59:25.483684Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:25.483737Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:59:25.483782Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:59:25.483839Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:59:25.485382Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:25.485423Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:59:25.485489Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:59:25.486655Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:25.486694Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:25.486747Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:25.486810Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:59:25.490190Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:59:25.491558Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:59:25.491680Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:59:25.493516Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:25.493617Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:25.493654Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:25.494853Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:59:25.494892Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:25.495012Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:59:25.495060Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:59:25.496790Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:25.496834Z node 1 :FLAT_TX_SCHEMESHARD ... 6233409546 Status: Active } Partitions { PartitionId: 1 TabletId: 72075186233409546 Status: Active } Partitions { PartitionId: 2 TabletId: 72075186233409546 Status: Active } AlterVersion: 1 BalancerTabletID: 72075186233409547 NextPartitionId: 3 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 16975298 DataSize: 16975298 UsedReserveSize: 0 } } PQPartitionsInside: 3 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:27.730402Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:157: [72075186233409547][Topic1] TPersQueueReadBalancer::HandleWakeup 2025-06-25T14:59:27.730474Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:550: [72075186233409547][Topic1] Send TEvPersQueue::TEvStatus TabletId: 72075186233409546 Cookie: 3 2025-06-25T14:59:27.730968Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:688: [72075186233409547][Topic1] Send TEvPeriodicTopicStats PathId: 2 Generation: 2 StatsReportRound: 4 DataSize: 16975298 UsedReserveSize: 0 2025-06-25T14:59:27.731052Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1823: [72075186233409547][Topic1] ProcessPendingStats. PendingUpdates size 0 2025-06-25T14:59:27.731242Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__pq_stats.cpp:100: Got periodic topic stats at partition [OwnerId: 72057594046678944, LocalPathId: 2] DataSize 16975298 UsedReserveSize 0 2025-06-25T14:59:27.745155Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__pq_stats.cpp:119: Started TEvPersistStats at tablet 72057594046678944, queue size# 0 2025-06-25T14:59:27.756702Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: PathId: 2 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:27.756887Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:44: Tablet 72057594046678944 describe pathId 2 took 203us result status StatusSuccess 2025-06-25T14:59:27.757375Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Topic1" PathDescription { Self { Name: "Topic1" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 1 } ChildrenExist: false BalancerTabletID: 72075186233409547 } PersQueueGroup { Name: "Topic1" PathId: 2 TotalGroupCount: 3 PartitionPerTablet: 3 PQTabletConfig { PartitionConfig { LifetimeSeconds: 11 WriteSpeedInBytesPerSecond: 17 } YdbDatabasePath: "/MyRoot" MeteringMode: METERING_MODE_REQUEST_UNITS } Partitions { PartitionId: 0 TabletId: 72075186233409546 Status: Active } Partitions { PartitionId: 1 TabletId: 72075186233409546 Status: Active } Partitions { PartitionId: 2 TabletId: 72075186233409546 Status: Active } AlterVersion: 1 BalancerTabletID: 72075186233409547 NextPartitionId: 3 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 16975298 DataSize: 16975298 UsedReserveSize: 0 } } PQPartitionsInside: 3 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:27.811546Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Topic1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:59:27.811757Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Topic1" took 247us result status StatusSuccess 2025-06-25T14:59:27.812115Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Topic1" PathDescription { Self { Name: "Topic1" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 1 } ChildrenExist: false BalancerTabletID: 72075186233409547 } PersQueueGroup { Name: "Topic1" PathId: 2 TotalGroupCount: 3 PartitionPerTablet: 3 PQTabletConfig { PartitionConfig { LifetimeSeconds: 11 WriteSpeedInBytesPerSecond: 17 } YdbDatabasePath: "/MyRoot" MeteringMode: METERING_MODE_REQUEST_UNITS } Partitions { PartitionId: 0 TabletId: 72075186233409546 Status: Active } Partitions { PartitionId: 1 TabletId: 72075186233409546 Status: Active } Partitions { PartitionId: 2 TabletId: 72075186233409546 Status: Active } AlterVersion: 1 BalancerTabletID: 72075186233409547 NextPartitionId: 3 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 16975298 DataSize: 16975298 UsedReserveSize: 0 } } PQPartitionsInside: 3 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:27.812846Z node 1 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72075186233409547][Topic1] pipe [1:675:2589] connected; active server actors: 1 2025-06-25T14:59:27.828823Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:138: [72075186233409547][Topic1] BALANCER INIT DONE for Topic1: (0, 72075186233409546) (1, 72075186233409546) (2, 72075186233409546) 2025-06-25T14:59:27.829482Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:1040: [72075186233409547][Topic1] Discovered subdomain [OwnerId: 72057594046678944, LocalPathId: 1] state, outOfSpace = 0 at RB 72075186233409547 2025-06-25T14:59:27.830721Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: PathId: 2 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:27.830913Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:44: Tablet 72057594046678944 describe pathId 2 took 194us result status StatusSuccess 2025-06-25T14:59:27.831364Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Topic1" PathDescription { Self { Name: "Topic1" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 1 } ChildrenExist: false BalancerTabletID: 72075186233409547 } PersQueueGroup { Name: "Topic1" PathId: 2 TotalGroupCount: 3 PartitionPerTablet: 3 PQTabletConfig { PartitionConfig { LifetimeSeconds: 11 WriteSpeedInBytesPerSecond: 17 } YdbDatabasePath: "/MyRoot" MeteringMode: METERING_MODE_REQUEST_UNITS } Partitions { PartitionId: 0 TabletId: 72075186233409546 Status: Active } Partitions { PartitionId: 1 TabletId: 72075186233409546 Status: Active } Partitions { PartitionId: 2 TabletId: 72075186233409546 Status: Active } AlterVersion: 1 BalancerTabletID: 72075186233409547 NextPartitionId: 3 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 16975298 DataSize: 16975298 UsedReserveSize: 0 } } PQPartitionsInside: 3 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:27.831695Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:493: [72075186233409547][Topic1] TEvClientConnected TabletId 72057594046678944, NodeId 1, Generation 3 2025-06-25T14:59:27.832169Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:493: [72075186233409547][Topic1] TEvClientConnected TabletId 72075186233409546, NodeId 1, Generation 2 2025-06-25T14:59:27.878347Z node 1 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72075186233409547][Topic1] pipe [1:722:2624] connected; active server actors: 1 |90.8%| [TA] $(B)/ydb/core/load_test/ut_ycsb/test-results/unittest/{meta.json ... results_accumulator.log} |90.8%| [TA] {RESULT} $(B)/ydb/core/load_test/ut_ycsb/test-results/unittest/{meta.json ... results_accumulator.log} >> TCmsTest::CollectInfo [GOOD] >> TCmsTest::DynamicConfig >> TCmsTest::ActionIssuePartialPermissions [GOOD] >> TCmsTest::ActionWithZeroDuration >> TMaintenanceApiTest::ManyActionGroupsWithSingleAction [GOOD] >> TMaintenanceApiTest::CreateTime >> TCmsTest::VDisksEvictionShouldFailWhileSentinelIsDisabled [GOOD] >> TCmsTest::VDisksEvictionShouldFailOnUnsupportedAction >> TCmsTest::TestForceRestartMode [GOOD] >> TCmsTest::StateStorageTwoRings >> TCmsTest::ManagePermissions [GOOD] >> TCmsTest::ManagePermissionWrongRequest >> TCmsTenatsTest::TestTenantRatioLimit [GOOD] >> TCmsTenatsTest::TestTenantRatioLimitForceRestartMode >> TCmsTest::DynamicConfig [GOOD] >> TCmsTest::DisabledEvictVDisks >> TClusterInfoTest::DeviceId [GOOD] >> TClusterInfoTest::FillInfo [GOOD] >> TCmsTenatsTest::CollectInfo >> TDowntimeTest::AddDowntime [GOOD] >> TDowntimeTest::HasUpcomingDowntime [GOOD] >> TDowntimeTest::CleanupOldSegments [GOOD] >> EncryptedBackupParamsValidationTestFeatureDisabled::EncryptionParamsSpecifiedExport [GOOD] >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeDir [GOOD] >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeBlockStoreVolume [GOOD] >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeExtSubDomain [GOOD] >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeColumnStore [GOOD] >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeColumnTable [GOOD] >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeCdcStream >> TPQTest::TestPartitionPerConsumerQuota [GOOD] >> TPQTest::TestPartitionWriteQuota |90.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/cms/ut/unittest >> TDowntimeTest::CleanupOldSegments [GOOD] >> TCmsTest::StateStorageTwoRings [GOOD] >> TCmsTest::StateStorageTwoBrokenRings >> TCmsTenatsTest::TestTenantLimit [GOOD] >> TCmsTenatsTest::TestScheduledPermissionWithNonePolicy >> KqpStreamLookup::ReadTableDuringSplit [GOOD] >> TCmsTest::RequestRestartServicesReject [GOOD] >> TCmsTest::RequestRestartServicesRejectSecond >> TMaintenanceApiTest::SingleCompositeActionGroup [GOOD] >> TMaintenanceApiTest::SimplifiedMirror3DC >> TCmsTest::VDisksEvictionShouldFailOnUnsupportedAction [GOOD] >> TCmsTest::VDisksEvictionShouldFailOnMultipleActions >> BackupPathTest::ExportWholeDatabase [GOOD] >> BackupPathTest::RecursiveDirectoryPlusExplicitTable [GOOD] >> EncryptedBackupParamsValidationTestFeatureDisabled::CommonSourcePathSpecified >> TCmsTenatsTest::TestNoneTenantPolicy [GOOD] >> TCmsTenatsTest::TestDefaultTenantPolicyWithSingleTenantHost >> TCmsTest::StateStorageAvailabilityMode [GOOD] >> TCmsTest::StateStorageRollingRestart >> TMaintenanceApiTest::CreateTime [GOOD] >> TMaintenanceApiTest::LastRefreshTime >> TCmsTest::ActionWithZeroDuration [GOOD] >> TCmsTest::CheckUnreplicatedDiskPreventsRestart >> TCmsTest::ManagePermissionWrongRequest [GOOD] >> TCmsTest::ManageRequests ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_kqp/unittest >> KqpStreamLookup::ReadTableDuringSplit [GOOD] Test command err: 2025-06-25T14:59:22.535933Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:59:22.536133Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:59:22.536191Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0012c8/r3tmp/tmp6sApPo/pdisk_1.dat 2025-06-25T14:59:22.970448Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T14:59:22.981511Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:59:23.028659Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:59:23.032765Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750863559984610 != 1750863559984614 2025-06-25T14:59:23.078301Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:59:23.078458Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:59:23.090972Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:59:23.183696Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:59:23.598470Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:696:2578], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:59:23.598590Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:705:2583], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:59:23.598661Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:59:23.609560Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:59:23.662564Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:59:23.779040Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:710:2586], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:59:23.862691Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:780:2625] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:59:29.544483Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715660. Ctx: { TraceId: 01jykskjta90fadx8nank9ke5b, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODgzYjg5MmUtZWRhNzQyZjEtOGY0MzU1NGUtOGY5ZmEwZGY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:59:30.015507Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jykskrpj9kbjj842znr8gtxx, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NDI5OWZhMC0xYTRiY2ZmYi05MGNjNjE3Yy05NmU3OGZj, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root Captured TEvDataShard::TEvRead from KQP_SOURCE_READ_ACTOR to TX_DATASHARD_ACTOR 2025-06-25T14:59:30.035661Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jykskrpj9kbjj842znr8gtxx, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NDI5OWZhMC0xYTRiY2ZmYi05MGNjNjE3Yy05NmU3OGZj, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root Captured TEvDataShard::TEvRead from KQP_STREAM_LOOKUP_ACTOR to TX_DATASHARD_ACTOR --- split started --- --- split finished --- Captured TEvDataShard::TEvRead from KQP_STREAM_LOOKUP_ACTOR to TX_DATASHARD_ACTOR Captured TEvDataShard::TEvRead from KQP_STREAM_LOOKUP_ACTOR to TX_DATASHARD_ACTOR >> BackupRestoreS3::RestoreTablePartitioningSettings [GOOD] >> BackupRestoreS3::RestoreIndexTablePartitioningSettings >> TCmsTenatsTest::CollectInfo [GOOD] >> TCmsTenatsTest::RequestRestartServices >> TMaintenanceApiTest::SimplifiedMirror3DC [GOOD] >> TMaintenanceApiTest::RequestReplaceDevicePDisk >> TCmsTest::RequestRestartServicesOk >> BackupRestore::TestAllIndexTypes-EIndexTypeGlobalVectorKmeansTree [GOOD] >> BackupRestore::TestAllPrimitiveTypes-PRIMITIVE_TYPE_ID_UNSPECIFIED [GOOD] >> BackupRestore::TestAllPrimitiveTypes-BOOL >> TCmsTenatsTest::TestTenantRatioLimitForceRestartMode [GOOD] >> TCmsTenatsTest::TestTenantRatioLimitForceRestartModeScheduled >> KqpStreamLookup::ReadTableWithIndexDuringSplit [GOOD] >> BackupPathTest::ExportWholeDatabaseWithEncryption >> BackupRestore::RestoreTablePartitioningSettings >> BackupRestoreS3::TestAllPrimitiveTypes-INT8 [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-UINT8 >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeTable [GOOD] >> BackupRestore::TestAllSchemeObjectTypes-EPathTypePersQueueGroup >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeTable [GOOD] >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeSubDomain [GOOD] >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeSolomonVolume [GOOD] >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeTableIndex >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeCdcStream [GOOD] >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeReplication [GOOD] >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeBlobDepot [GOOD] >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeExternalTable [GOOD] >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeExternalDataSource [GOOD] >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeResourcePool [GOOD] >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeBackupCollection [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-UTF8 >> TPartitionChooserSuite::TPartitionChooserActor_SplitMergeEnabled_PreferedPartition_InactiveConfig_Test [GOOD] >> TPartitionChooserSuite::TPartitionChooserActor_SplitMergeEnabled_PreferedPartition_InactiveActor_Test >> TCmsTenatsTest::TestScheduledPermissionWithNonePolicy [GOOD] >> TCmsTenatsTest::TestTenantLimitForceRestartMode >> TCmsTest::RequestRestartServicesRejectSecond [GOOD] >> TCmsTest::RequestRestartServicesWrongHost >> TCmsTest::RequestReplaceBrokenDevices >> TKesusTest::TestAcquireSemaphoreTimeout [GOOD] >> TKesusTest::TestAcquireSemaphoreTimeoutTooBig >> TCmsTest::VDisksEvictionShouldFailOnMultipleActions [GOOD] >> TCmsTest::StateStorageTwoBrokenRings [GOOD] >> TCmsTest::SysTabletsNode ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_kqp/unittest >> KqpStreamLookup::ReadTableWithIndexDuringSplit [GOOD] Test command err: 2025-06-25T14:59:22.509186Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:59:22.509382Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:59:22.509443Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0012eb/r3tmp/tmpRSkFRM/pdisk_1.dat 2025-06-25T14:59:22.970408Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T14:59:22.984546Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:59:23.022302Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:59:23.029603Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750863559984565 != 1750863559984569 2025-06-25T14:59:23.078279Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:59:23.078425Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:59:23.090972Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:59:23.185284Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:59:23.599006Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:744:2615], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:59:23.599102Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:754:2620], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:59:23.599164Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:59:23.609503Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:59:23.662675Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:59:23.779932Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:758:2623], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:59:23.862693Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:829:2663] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:59:31.491964Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715660. Ctx: { TraceId: 01jykskjtc3xza3z9q9wv99bt0, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MzVhODU2ZTAtMzgzMGZlYWYtNDg4YWJhOC0xMzRlNmYxMg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:59:31.532373Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jykskjtc3xza3z9q9wv99bt0, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MzVhODU2ZTAtMzgzMGZlYWYtNDg4YWJhOC0xMzRlNmYxMg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:59:31.850497Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jyksktnf58ap16t513rfra9q, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTYyZWIzNjgtYTE0MTQyZS0xMzg1ZjY4MC0xNzFjMWNmZA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root Captured TEvDataShard::TEvRead from KQP_SOURCE_READ_ACTOR to TX_DATASHARD_ACTOR >> TCmsTenatsTest::TestDefaultTenantPolicyWithSingleTenantHost [GOOD] >> TCmsTenatsTest::TestLimitsWithDownNode >> TMaintenanceApiTest::LastRefreshTime [GOOD] >> TCmsTest::CheckUnreplicatedDiskPreventsRestart [GOOD] >> TCmsTest::AllVDisksEvictionInRack >> TKesusTest::TestAcquireSemaphoreTimeoutTooBig [GOOD] >> TKesusTest::TestAcquireSemaphoreTimeoutInfinite >> TSchemeshardStatsBatchingTest::ShouldNotBatchWhenDisabled [GOOD] |90.9%| [TA] $(B)/ydb/core/tx/datashard/ut_kqp/test-results/unittest/{meta.json ... results_accumulator.log} |90.9%| [TA] {RESULT} $(B)/ydb/core/tx/datashard/ut_kqp/test-results/unittest/{meta.json ... results_accumulator.log} |90.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/cms/ut/unittest >> TCmsTest::VDisksEvictionShouldFailOnMultipleActions [GOOD] >> PersQueueSdkReadSessionTest::ReadSessionWithExplicitlySpecifiedPartitions [GOOD] >> PersQueueSdkReadSessionTest::SettingsValidation >> TCmsTest::ManageRequests [GOOD] >> TCmsTest::EnableCMSRequestPrioritiesFeatureFlag >> TKesusTest::TestSessionTimeoutAfterUnregister [GOOD] >> TKesusTest::TestStopResourceAllocationWhenPipeDestroyed |90.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/cms/ut/unittest >> TMaintenanceApiTest::LastRefreshTime [GOOD] >> TKesusTest::TestAcquireSemaphoreTimeoutInfinite [GOOD] >> TKesusTest::TestAcquireSemaphoreRebootTimeout >> TCmsTest::RequestRestartServicesOk [GOOD] >> TCmsTest::RequestRestartServicesPartial ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_stats/unittest >> TSchemeshardStatsBatchingTest::ShouldNotBatchWhenDisabled [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:59:25.236880Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:59:25.236992Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:25.237032Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:59:25.237083Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:59:25.238632Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:59:25.238698Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:59:25.238776Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:25.238849Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:59:25.239634Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:59:25.241560Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:59:25.310094Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:59:25.310159Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:59:25.323675Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:59:25.324060Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:59:25.324275Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:59:25.331932Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:59:25.333352Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:59:25.336152Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:25.337198Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:59:25.343796Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:25.349203Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:59:25.356875Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:25.356963Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:25.357112Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:59:25.357158Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:59:25.357221Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:59:25.357300Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:59:25.363776Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:59:25.491261Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:59:25.491447Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:25.491621Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:59:25.491652Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:59:25.491837Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:59:25.491896Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:59:25.493628Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:25.493757Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:59:25.493892Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:25.493949Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:59:25.493978Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:59:25.494013Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:59:25.495462Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:25.495511Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:59:25.495541Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:59:25.496844Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:25.496884Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:25.496949Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:25.497007Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:59:25.499934Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:59:25.501143Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:59:25.501264Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:59:25.501920Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:25.502021Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:25.502057Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:25.502291Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:59:25.502339Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:25.502459Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:59:25.502523Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:59:25.503887Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:25.503928Z node 1 :FLAT_TX_SCHEMESHARD ... _SCHEMESHARD NOTICE: schemeshard__init.cpp:2247: TTxInit for TablePartitions, read records: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:33.520324Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2313: TTxInit for TableShardPartitionConfigs, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:33.520572Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2463: TTxInit for ChannelsBinding, read records: 3, at schemeshard: 72057594046678944 2025-06-25T14:59:33.520888Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2842: TTxInit for TableIndexes, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:33.521049Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2921: TTxInit for TableIndexKeys, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:33.521453Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3422: TTxInit for KesusInfos, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:33.521523Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3458: TTxInit for KesusAlters, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:33.521756Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3684: TTxInit for TxShards, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:33.521838Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3829: TTxInit for ShardToDelete, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:33.521917Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3846: TTxInit for BackupSettings, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:33.522154Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4006: TTxInit for ShardBackupStatus, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:33.522236Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4022: TTxInit for CompletedBackup, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:33.522359Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4307: TTxInit for Publications, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:33.522619Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4646: IndexBuild , records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:33.522697Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4702: KMeansTreeSample records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:33.522818Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4791: SnapshotTables: snapshots: 0 tables: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:33.522871Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4818: SnapshotSteps: snapshots: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:33.522926Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4845: LongLocks: records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:33.523164Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-06-25T14:59:33.532148Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-25T14:59:33.532333Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:59:33.533859Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 2146435083, Sender [1:1018:2959], Recipient [1:1018:2959]: NKikimr::NSchemeShard::TEvPrivate::TEvServerlessStorageBilling 2025-06-25T14:59:33.533907Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5018: StateWork, processing event TEvPrivate::TEvServerlessStorageBilling 2025-06-25T14:59:33.534917Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:33.534974Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:33.535508Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [1:1018:2959], Recipient [1:1018:2959]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:59:33.535541Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:59:33.535631Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:59:33.535679Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:59:33.535711Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:59:33.535734Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-25T14:59:33.536704Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 274399233, Sender [1:1054:2959], Recipient [1:1018:2959]: NKikimr::TEvTxAllocatorClient::TEvAllocateResult 2025-06-25T14:59:33.536749Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5109: StateWork, processing event TEvTxAllocatorClient::TEvAllocateResult 2025-06-25T14:59:33.536775Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594046678944 is [1:1018:2959] sender: [1:1074:2058] recipient: [1:15:2062] 2025-06-25T14:59:33.578079Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271122945, Sender [1:1073:3003], Recipient [1:1018:2959]: NKikimrSchemeOp.TDescribePath Path: "/MyRoot/Simple" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true } 2025-06-25T14:59:33.578152Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4967: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2025-06-25T14:59:33.578281Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Simple" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-25T14:59:33.578594Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Simple" took 297us result status StatusSuccess 2025-06-25T14:59:33.579447Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Simple" PathDescription { Self { Name: "Simple" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1001 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Simple" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 1 MaxPartitionsCount: 1 } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409546 } TableStats { DataSize: 13984 RowCount: 100 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 82488 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 1 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 13984 DataSize: 13984 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> EncryptedBackupParamsValidationTestFeatureDisabled::CommonSourcePathSpecified [GOOD] >> TKesusTest::TestStopResourceAllocationWhenPipeDestroyed [GOOD] >> TCmsTest::RequestReplaceDevices >> BasicUsage::WriteAndReadSomeMessagesWithAsyncCompression [GOOD] >> BasicUsage::WriteAndReadSomeMessagesWithSyncCompression >> TCmsTenatsTest::TestTenantRatioLimitForceRestartModeScheduled [GOOD] >> TCmsTest::ActionIssue >> Compression::WriteRAW [GOOD] >> Compression::WriteGZIP >> BasicUsage::MaxByteSizeEqualZero [GOOD] >> BasicUsage::TSimpleWriteSession_AutoSeqNo_BasicUsage >> TCmsTest::RequestRestartServicesWrongHost [GOOD] >> TCmsTest::RestartNodeInDownState ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kesus/tablet/ut/unittest >> TKesusTest::TestStopResourceAllocationWhenPipeDestroyed [GOOD] Test command err: 2025-06-25T14:59:10.243207Z node 1 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-25T14:59:10.243320Z node 1 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-25T14:59:10.273677Z node 1 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-25T14:59:10.273787Z node 1 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-25T14:59:10.297737Z node 1 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-25T14:59:10.736005Z node 2 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-25T14:59:10.736082Z node 2 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-25T14:59:10.755117Z node 2 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-25T14:59:10.756016Z node 2 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-25T14:59:10.779041Z node 2 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-25T14:59:11.126410Z node 3 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-25T14:59:11.126481Z node 3 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-25T14:59:11.139677Z node 3 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-25T14:59:11.140239Z node 3 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-25T14:59:11.163370Z node 3 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-25T14:59:11.163718Z node 3 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[3:136:2160], cookie=12603239518825294044, session=0, seqNo=0) 2025-06-25T14:59:11.163858Z node 3 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 1 2025-06-25T14:59:11.175174Z node 3 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[3:136:2160], cookie=12603239518825294044, session=1) 2025-06-25T14:59:11.175548Z node 3 :KESUS_TABLET DEBUG: tx_sessions_describe.cpp:23: [72057594037927937] TTxSessionsDescribe::Execute (sender=[3:145:2167], cookie=5115134292466120510) 2025-06-25T14:59:11.175613Z node 3 :KESUS_TABLET DEBUG: tx_sessions_describe.cpp:48: [72057594037927937] TTxSessionsDescribe::Complete (sender=[3:145:2167], cookie=5115134292466120510) 2025-06-25T14:59:11.609131Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:59:11.620957Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:59:11.969988Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:59:11.981776Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:59:12.330755Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:59:12.342834Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:59:12.691549Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:59:12.704891Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:59:13.089816Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:59:13.103881Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:59:13.473360Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:59:13.485593Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:59:13.840153Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:59:13.852060Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:59:14.216126Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:59:14.228139Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:59:14.583133Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:59:14.595492Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:59:14.995721Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:59:15.008934Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:59:15.386800Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:59:15.399064Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:59:15.764425Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:59:15.776557Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:59:16.143163Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:59:16.155974Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:59:16.518000Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:59:16.530077Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:59:16.922794Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:59:16.934939Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:59:17.306413Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:59:17.318709Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:59:17.680046Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:59:17.692254Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:59:18.053569Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:59:18.065789Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:59:18.427287Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:59:18.439154Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:59:18.821358Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:59:18.833535Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:59:19.195746Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:59:19.207945Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:59:19.569452Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:59:19.581249Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:59:19.941979Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:59:19.953965Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:59:20.314326Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:59:20.326366Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:59:20.697300Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:59:20.709461Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:59:21.079966Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:59:21.091614Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:59:21.451086Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:59:21.463140Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:59:21.825712Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:59:21.837669Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:59:22.190982Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:59:22.205159Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:59:22.623121Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:59:22.641407Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:59:23.026362Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:59:23.038589Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:59:23.424696Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:59:23.438191Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:59:23.789717Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:59:23.802011Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:59:24.163642Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:59:24.181161Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:59:24.554038Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:59:24.566273Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:59:24.927144Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:59:24.939201Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:59:25.313834Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:59:25.325970Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:59:25.676427Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:59:25.688323Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:59:26.048980Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:59:26.061009Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:59:26.443875Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:59:26.455810Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:59:26.808761Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:59:26.820725Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:59:27.192170Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:59:27.204112Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:59:27.564532Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:59:27.577131Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:59:27.940146Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:59:27.955970Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:59:28.359180Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:59:28.371370Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:59:28.733333Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:59:28.745715Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:59:29.119715Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:59:29.131831Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:59:29.494930Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:59:29.507159Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:59:29.868244Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:59:29.880227Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:59:30.253793Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:59:30.266067Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:59:30.620793Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:59:30.632970Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:59:31.000085Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:59:31.012327Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:59:31.366263Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:59:31.378306Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:59:31.744650Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:59:31.757721Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:59:32.128793Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:59:32.141593Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:59:32.499956Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:59:32.511858Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:59:32.853080Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:59:32.865347Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:59:33.251649Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:59:33.264181Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:59:33.629984Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:59:33.642387Z node 3 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:59:34.056996Z node 3 :KESUS_TABLET DEBUG: tx_session_timeout.cpp:27: [72057594037927937] TTxSessionTimeout::Execute (session=1) 2025-06-25T14:59:34.057098Z node 3 :KESUS_TABLET DEBUG: tablet_db.cpp:32: [72057594037927937] Deleting session 1 2025-06-25T14:59:34.069358Z node 3 :KESUS_TABLET DEBUG: tx_session_timeout.cpp:56: [72057594037927937] TTxSessionTimeout::Complete (session=1) 2025-06-25T14:59:34.080508Z node 3 :KESUS_TABLET DEBUG: tx_sessions_describe.cpp:23: [72057594037927937] TTxSessionsDescribe::Execute (sender=[3:534:2481], cookie=4769304707089612163) 2025-06-25T14:59:34.080609Z node 3 :KESUS_TABLET DEBUG: tx_sessions_describe.cpp:48: [72057594037927937] TTxSessionsDescribe::Complete (sender=[3:534:2481], cookie=4769304707089612163) 2025-06-25T14:59:34.480153Z node 4 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-25T14:59:34.480262Z node 4 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-25T14:59:34.499654Z node 4 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-25T14:59:34.499781Z node 4 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-25T14:59:34.516300Z node 4 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-25T14:59:34.527280Z node 4 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:36: [72057594037927937] TTxQuoterResourceAdd::Execute (sender=[4:134:2158], cookie=13668039004648137056, path="Root", config={ MaxUnitsPerSecond: 100 }) 2025-06-25T14:59:34.527615Z node 4 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:76: [72057594037927937] Created new quoter resource 1 "Root" 2025-06-25T14:59:34.550816Z node 4 :KESUS_TABLET DEBUG: tx_quoter_resource_add.cpp:85: [72057594037927937] TTxQuoterResourceAdd::Complete (sender=[4:134:2158], cookie=13668039004648137056) 2025-06-25T14:59:34.552696Z node 4 :KESUS_TABLET TRACE: quoter_runtime.cpp:145: [72057594037927937] Send TEvSubscribeOnResourcesResult to [4:145:2167]. Cookie: 0. Data: { Results { ResourceId: 1 Error { Status: SUCCESS } EffectiveProps { ResourceId: 1 ResourcePath: "Root" HierarchicalDRRResourceConfig { MaxUnitsPerSecond: 100 MaxBurstSizeCoefficient: 1 Weight: 1 } AccountingConfig { ReportPeriodMs: 5000 AccountPeriodMs: 1000 CollectPeriodSec: 30 ProvisionedCoefficient: 60 OvershootCoefficient: 1.1 Provisioned { BillingPeriodSec: 60 } OnDemand { BillingPeriodSec: 60 } Overshoot { BillingPeriodSec: 60 } } } } ProtocolVersion: 1 } 2025-06-25T14:59:34.552774Z node 4 :KESUS_TABLET DEBUG: quoter_runtime.cpp:150: [72057594037927937] Subscribe on quoter resources (sender=[4:145:2167], cookie=0) 2025-06-25T14:59:34.553076Z node 4 :KESUS_TABLET TRACE: quoter_runtime.cpp:145: [72057594037927937] Send TEvSubscribeOnResourcesResult to [4:143:2165]. Cookie: 0. Data: { Results { ResourceId: 1 Error { Status: SUCCESS } EffectiveProps { ResourceId: 1 ResourcePath: "Root" HierarchicalDRRResourceConfig { MaxUnitsPerSecond: 100 MaxBurstSizeCoefficient: 1 Weight: 1 } AccountingConfig { ReportPeriodMs: 5000 AccountPeriodMs: 1000 CollectPeriodSec: 30 ProvisionedCoefficient: 60 OvershootCoefficient: 1.1 Provisioned { BillingPeriodSec: 60 } OnDemand { BillingPeriodSec: 60 } Overshoot { BillingPeriodSec: 60 } } } } ProtocolVersion: 1 } 2025-06-25T14:59:34.553129Z node 4 :KESUS_TABLET DEBUG: quoter_runtime.cpp:150: [72057594037927937] Subscribe on quoter resources (sender=[4:143:2165], cookie=0) 2025-06-25T14:59:34.595595Z node 4 :KESUS_TABLET TRACE: quoter_runtime.cpp:93: [72057594037927937] Send TEvResourcesAllocated to [4:145:2167]. Cookie: 0. Data: { ResourcesInfo { ResourceId: 1 Amount: 5 StateNotification { Status: SUCCESS } } } 2025-06-25T14:59:34.595706Z node 4 :KESUS_TABLET TRACE: quoter_runtime.cpp:93: [72057594037927937] Send TEvResourcesAllocated to [4:143:2165]. Cookie: 0. Data: { ResourcesInfo { ResourceId: 1 Amount: 5 StateNotification { Status: SUCCESS } } } 2025-06-25T14:59:34.595983Z node 4 :KESUS_TABLET TRACE: quoter_runtime.cpp:318: Got TEvServerDisconnected([4:147:2169]) 2025-06-25T14:59:34.596132Z node 4 :KESUS_TABLET TRACE: quoter_runtime.cpp:37: [72057594037927937] Send TEvResourcesAllocated to [4:145:2167]. Cookie: 0. Data: { ResourcesInfo { ResourceId: 1 StateNotification { Status: SESSION_EXPIRED Issues { message: "Disconected." } } } } 2025-06-25T14:59:34.648533Z node 4 :KESUS_TABLET TRACE: quoter_runtime.cpp:93: [72057594037927937] Send TEvResourcesAllocated to [4:143:2165]. Cookie: 0. Data: { ResourcesInfo { ResourceId: 1 Amount: 10 StateNotification { Status: SUCCESS } } } >> TCmsTest::SysTabletsNode [GOOD] >> TCmsTenatsTest::TestTenantLimitForceRestartMode [GOOD] >> TCmsTenatsTest::TestTenantLimitForceRestartModeScheduled >> EncryptedBackupParamsValidationTestFeatureDisabled::CommonDestPrefixSpecified >> TCmsTest::RequestReplaceBrokenDevices [GOOD] >> TCmsTest::PermissionDuration >> TCmsTenatsTest::TestLimitsWithDownNode [GOOD] >> TCmsTenatsTest::TestScheduledPermissionWithDefaultPolicy >> TSchemeshardStatsBatchingTest::ShouldPersistByBatchSize [GOOD] |90.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/cms/ut/unittest >> TCmsTest::SysTabletsNode [GOOD] >> TCmsTest::RequestRestartServicesDryRun >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeCdcStream [GOOD] >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeBlobDepot [GOOD] >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeExternalDataSource >> TCmsTest::EnableCMSRequestPrioritiesFeatureFlag [GOOD] >> TCmsTenatsTest::RequestRestartServices [GOOD] >> TMaintenanceApiTest::RequestReplaceDevicePDisk [GOOD] >> TCmsTest::RequestRestartServicesPartial [GOOD] >> TCmsTest::RequestRestartServicesMultipleNodes ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_stats/unittest >> TSchemeshardStatsBatchingTest::ShouldPersistByBatchSize [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:59:25.236913Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:59:25.237021Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:25.237079Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:59:25.237121Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:59:25.238687Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:59:25.238758Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:59:25.238831Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:25.238914Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:59:25.239717Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:59:25.241607Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:59:25.311252Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:59:25.311321Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:59:25.328224Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:59:25.328619Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:59:25.328790Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:59:25.334734Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:59:25.335041Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:59:25.336172Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:25.337174Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:59:25.343887Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:25.349207Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:59:25.356946Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:25.357017Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:25.357134Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:59:25.357172Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:59:25.357221Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:59:25.357284Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:59:25.363224Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:59:25.477346Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:59:25.478388Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:25.479568Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:59:25.479626Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:59:25.480479Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:59:25.480555Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:59:25.482742Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:25.483518Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:59:25.483678Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:25.483772Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:59:25.483813Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:59:25.483855Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:59:25.485345Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:25.485381Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:59:25.485408Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:59:25.486622Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:25.486662Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:25.486722Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:25.486761Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:59:25.490113Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:59:25.491375Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:59:25.491518Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:59:25.493588Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:25.493706Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:25.493752Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:25.494851Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:59:25.494890Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:25.495020Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:59:25.495077Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:59:25.496584Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:25.496616Z node 1 :FLAT_TX_SCHEMESHARD ... chemeshard__init.cpp:2247: TTxInit for TablePartitions, read records: 2, at schemeshard: 72057594046678944 2025-06-25T14:59:35.777562Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2313: TTxInit for TableShardPartitionConfigs, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:35.777801Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2463: TTxInit for ChannelsBinding, read records: 6, at schemeshard: 72057594046678944 2025-06-25T14:59:35.778115Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2842: TTxInit for TableIndexes, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:35.778227Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2921: TTxInit for TableIndexKeys, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:35.778570Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3422: TTxInit for KesusInfos, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:35.778649Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3458: TTxInit for KesusAlters, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:35.778876Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3684: TTxInit for TxShards, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:35.778978Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3829: TTxInit for ShardToDelete, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:35.779058Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3846: TTxInit for BackupSettings, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:35.779268Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4006: TTxInit for ShardBackupStatus, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:35.779363Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4022: TTxInit for CompletedBackup, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:35.779511Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4307: TTxInit for Publications, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:35.779758Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4646: IndexBuild , records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:35.779838Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4702: KMeansTreeSample records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:35.779960Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4791: SnapshotTables: snapshots: 0 tables: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:35.780002Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4818: SnapshotSteps: snapshots: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:35.780057Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4845: LongLocks: records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:35.780298Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-06-25T14:59:35.787856Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-25T14:59:35.788016Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:59:35.789351Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 2146435083, Sender [1:1135:3064], Recipient [1:1135:3064]: NKikimr::NSchemeShard::TEvPrivate::TEvServerlessStorageBilling 2025-06-25T14:59:35.789422Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5018: StateWork, processing event TEvPrivate::TEvServerlessStorageBilling 2025-06-25T14:59:35.790725Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:35.790793Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:35.791229Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [1:1135:3064], Recipient [1:1135:3064]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:59:35.791275Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:59:35.792053Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:59:35.792110Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:59:35.792158Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:59:35.792191Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-25T14:59:35.794236Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 274399233, Sender [1:1171:3064], Recipient [1:1135:3064]: NKikimr::TEvTxAllocatorClient::TEvAllocateResult 2025-06-25T14:59:35.794280Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5109: StateWork, processing event TEvTxAllocatorClient::TEvAllocateResult 2025-06-25T14:59:35.794312Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594046678944 is [1:1135:3064] sender: [1:1191:2058] recipient: [1:15:2062] 2025-06-25T14:59:35.831571Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271122945, Sender [1:1190:3108], Recipient [1:1135:3064]: NKikimrSchemeOp.TDescribePath Path: "/MyRoot/Simple" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true } 2025-06-25T14:59:35.831670Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4967: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2025-06-25T14:59:35.831787Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Simple" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-25T14:59:35.832089Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Simple" took 290us result status StatusSuccess 2025-06-25T14:59:35.832922Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Simple" PathDescription { Self { Name: "Simple" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1001 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Simple" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 1 MinPartitionsCount: 20 MaxPartitionsCount: 20 } } TableSchemaVersion: 2 IsBackup: false IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409546 } TableStats { DataSize: 13984 RowCount: 100 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 2 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 18961 Memory: 141368 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 13984 DataSize: 13984 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> BackupPathTest::ExportWholeDatabaseWithEncryption [GOOD] |90.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/cms/ut/unittest >> TCmsTest::EnableCMSRequestPrioritiesFeatureFlag [GOOD] |90.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/cms/ut/unittest >> TCmsTenatsTest::RequestRestartServices [GOOD] |90.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/cms/ut/unittest >> TMaintenanceApiTest::RequestReplaceDevicePDisk [GOOD] >> TCmsTest::StateStorageRollingRestart [GOOD] >> TCmsTest::StateStorageLockedNodes >> TPartitionChooserSuite::TPartitionChooserActor_SplitMergeDisabled_NewSourceId_Test [GOOD] >> TPartitionChooserSuite::TPartitionChooserActor_SplitMergeDisabled_RegisteredSourceId_Test >> BackupRestore::RestoreTablePartitioningSettings [GOOD] >> BackupRestore::RestoreIndexTablePartitioningSettings >> BackupRestore::TestAllSchemeObjectTypes-EPathTypePersQueueGroup [GOOD] >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeSubDomain [GOOD] >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeRtmrVolume [GOOD] >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeKesus >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeTableIndex [GOOD] >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeSequence >> BackupRestoreS3::RestoreIndexTablePartitioningSettings [GOOD] >> BackupRestoreS3::RestoreIndexTableReadReplicasSettings >> BackupRestore::TestAllPrimitiveTypes-BOOL [GOOD] >> BackupRestore::TestAllPrimitiveTypes-INT8 >> TCmsTest::RestartNodeInDownState [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-UTF8 [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-YSON >> BackupRestoreS3::TestAllPrimitiveTypes-UINT8 [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-INT16 >> TCmsTest::RequestReplaceDevices [GOOD] >> TCmsTest::RequestReplaceDevicePDisk |90.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut_fat/unittest |90.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut_fat/unittest |90.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut_fat/unittest |90.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut_fat/unittest >> TCmsTest::PermissionDuration [GOOD] >> TCmsTest::RacyStartCollecting >> BackupPathTest::ExportWithCommonSourcePath >> TBlobStorageWardenTest::TestCreatePDiskAndEncryptedGroup |90.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut_fat/unittest |90.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut_fat/unittest |90.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/cms/ut/unittest >> TCmsTest::RestartNodeInDownState [GOOD] >> TCmsTenatsTest::TestScheduledPermissionWithDefaultPolicy [GOOD] >> TCmsTenatsTest::TestTenantLimitForceRestartModeScheduled [GOOD] |90.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut_fat/unittest >> TSchemeShardLoginTest::ChangeAccountLockoutParameters [GOOD] >> TSchemeShardLoginTest::CheckThatLockedOutParametersIsRestoredFromLocalDb >> TCmsTest::RequestRestartServicesDryRun [GOOD] >> TCmsTest::RequestReplacePDiskDoesntBreakGroup >> TCmsTest::RequestRestartServicesMultipleNodes [GOOD] >> TCmsTest::RequestRestartServicesNoUser >> EncryptedBackupParamsValidationTestFeatureDisabled::CommonDestPrefixSpecified [GOOD] |90.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/cms/ut/unittest >> TCmsTenatsTest::TestScheduledPermissionWithDefaultPolicy [GOOD] |90.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/cms/ut/unittest >> TCmsTenatsTest::TestTenantLimitForceRestartModeScheduled [GOOD] >> TCmsTest::ActionIssue [GOOD] |90.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/cms/ut/unittest >> TCmsTest::ActionIssue [GOOD] >> EncryptedBackupParamsValidationTestFeatureDisabled::EncryptionParamsSpecifiedImport |91.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut_fat/unittest >> TCmsTest::StateStorageLockedNodes [GOOD] >> TCmsTest::RequestReplaceDevicePDisk [GOOD] >> TCmsTest::RequestReplaceDevicePDiskByPath >> TCmsTest::RacyStartCollecting [GOOD] >> TCmsTest::PriorityRange |91.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/cms/ut/unittest >> TCmsTest::StateStorageLockedNodes [GOOD] >> TCmsTest::RequestReplacePDiskDoesntBreakGroup [GOOD] >> TCmsTest::RequestReplacePDiskConsecutiveWithDone |91.0%| [TA] $(B)/ydb/core/kqp/ut/join/test-results/unittest/{meta.json ... results_accumulator.log} >> TBlobStorageWardenTest::TestCreatePDiskAndEncryptedGroup [GOOD] >> TCmsTest::RequestRestartServicesNoUser [GOOD] >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeKesus [GOOD] >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeSolomonVolume [GOOD] >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeTableIndex >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeExternalDataSource [GOOD] >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeBackupCollection [GOOD] >> BackupRestore::TestAllPrimitiveTypes-UINT8 >> TCmsTest::AllVDisksEvictionInRack [GOOD] |91.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/cms/ut/unittest >> TCmsTest::RequestRestartServicesNoUser [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut_fat/unittest >> TBlobStorageWardenTest::TestCreatePDiskAndEncryptedGroup [GOOD] Test command err: 2025-06-25T14:59:38.773465Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[3e000000:_:0:0:0]: (1040187392) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [3e000000:1:0:1:0] targetVDisk# [3e000000:1:0:0:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-25T14:59:38.773615Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[3e000000:_:0:2:0]: (1040187392) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [3e000000:1:0:1:0] targetVDisk# [3e000000:1:0:2:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-25T14:59:38.776156Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[3e000000:_:0:1:0]: (1040187392) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [3e000000:1:0:2:0] targetVDisk# [3e000000:1:0:1:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-25T14:59:38.776926Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[3e000000:_:0:0:0]: (1040187392) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [3e000000:1:0:2:0] targetVDisk# [3e000000:1:0:0:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-25T14:59:38.785803Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[3e000000:_:0:0:0]: (1040187392) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [3e000000:1:0:3:0] targetVDisk# [3e000000:1:0:0:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-25T14:59:38.786526Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[3e000000:_:0:1:0]: (1040187392) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [3e000000:1:0:3:0] targetVDisk# [3e000000:1:0:1:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-25T14:59:38.786676Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[3e000000:_:0:2:0]: (1040187392) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [3e000000:1:0:3:0] targetVDisk# [3e000000:1:0:2:0] oldSyncState# [0 0] DbBirthLsn# 0 Sending TEvPut Sending TEvGet Sending TEvVGet Sending TEvPut 2025-06-25T14:59:40.132787Z node 1 :BS_CONTROLLER ERROR: {BSCTXPGK04@propose_group_key.cpp:47} Group LifeCyclePhase does not match ELCP_INITIAL GroupId.GetRawId()# 3187671040 LifeCyclePhase# 3 2025-06-25T14:59:40.133824Z node 1 :BS_CONTROLLER ERROR: {BSCTXPGK10@propose_group_key.cpp:108} TTxProposeGroupKey error GroupId# 3187671040 Status# ERROR Request# {NodeId: 2 GroupId: 3187671040 LifeCyclePhase: 1 MainKeyId: "/home/runner/.ya/build/build_root/yft8/001939/r3tmp/tmp92FGau//key.txt" EncryptedGroupKey: "a\005\302l\274\232wu\234\326\371\025\021mO\325$\020%\244\324\305o\305\3110D\272\311qPV\344Q\227\277" MainKeyVersion: 1 GroupKeyNonce: 3187671040 } Sending TEvGet |91.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest |91.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest >> TColumnShardTestSchema::CreateTable-Reboots-GenerateInternalPathId >> TColumnShardTestSchema::DropWriteRace >> TColumnShardTestSchema::CreateTable+Reboots+GenerateInternalPathId >> MoveTable::WithUncomittedData ------- [TM] {asan, default-linux-x86_64, release} ydb/core/cms/ut/unittest >> TCmsTest::AllVDisksEvictionInRack [GOOD] >> TColumnShardTestSchema::ColdCompactionSmoke >> TColumnShardTestSchema::HotTiersTtl Test command err: 2025-06-25T14:59:34.268000Z node 25 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-25T14:59:34.268115Z node 25 :CMS DEBUG: cms_tx_update_downtimes.cpp:26: TTxUpdateDowntimes Complete 2025-06-25T14:59:34.268263Z node 25 :CMS DEBUG: cluster_info.cpp:968: Timestamp: 1970-01-01T00:02:00Z 2025-06-25T14:59:34.270158Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvClusterStateRequest { }, response# NKikimr::NCms::TEvCms::TEvClusterStateResponse { Status { Code: OK } State { Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120029000 } Devices { Name: "vdisk-0-1-0-0-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-1-1-0-0-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-2-1-0-0-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-3-1-0-0-0" State: UP Timestamp: 120029000 } Devices { Name: "pdisk-25-25" State: UP Timestamp: 120029000 } Timestamp: 120029000 NodeId: 25 InterconnectPort: 12001 Location { Rack: "1" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120029000 } Devices { Name: "vdisk-0-1-0-1-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-1-1-0-1-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-2-1-0-1-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-3-1-0-1-0" State: UP Timestamp: 120029000 } Devices { Name: "pdisk-26-26" State: UP Timestamp: 120029000 } Timestamp: 120029000 NodeId: 26 InterconnectPort: 12002 Location { Rack: "1" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120029000 } Devices { Name: "vdisk-0-1-0-2-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-1-1-0-2-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-2-1-0-2-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-3-1-0-2-0" State: UP Timestamp: 120029000 } Devices { Name: "pdisk-27-27" State: UP Timestamp: 120029000 } Timestamp: 120029000 NodeId: 27 InterconnectPort: 12003 Location { Rack: "2" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120029000 } Devices { Name: "vdisk-0-1-0-3-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-1-1-0-3-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-2-1-0-3-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-3-1-0-3-0" State: UP Timestamp: 120029000 } Devices { Name: "pdisk-28-28" State: UP Timestamp: 120029000 } Timestamp: 120029000 NodeId: 28 InterconnectPort: 12004 Location { Rack: "2" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120029000 } Devices { Name: "vdisk-0-1-0-4-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-1-1-0-4-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-2-1-0-4-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-3-1-0-4-0" State: UP Timestamp: 120029000 } Devices { Name: "pdisk-29-29" State: UP Timestamp: 120029000 } Timestamp: 120029000 NodeId: 29 InterconnectPort: 12005 Location { Rack: "3" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120029000 } Devices { Name: "vdisk-0-1-0-5-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-1-1-0-5-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-2-1-0-5-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-3-1-0-5-0" State: UP Timestamp: 120029000 } Devices { Name: "pdisk-30-30" State: UP Timestamp: 120029000 } Timestamp: 120029000 NodeId: 30 InterconnectPort: 12006 Location { Rack: "3" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120029000 } Devices { Name: "vdisk-0-1-0-6-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-1-1-0-6-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-2-1-0-6-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-3-1-0-6-0" State: UP Timestamp: 120029000 } Devices { Name: "pdisk-31-31" State: UP Timestamp: 120029000 } Timestamp: 120029000 NodeId: 31 InterconnectPort: 12007 Location { Rack: "4" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120029000 } Devices { Name: "vdisk-0-1-0-7-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-1-1-0-7-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-2-1-0-7-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-3-1-0-7-0" State: UP Timestamp: 120029000 } Devices { Name: "pdisk-32-32" State: UP Timestamp: 120029000 } Timestamp: 120029000 NodeId: 32 InterconnectPort: 12008 Location { Rack: "4" } StartTimeSeconds: 0 } Timestamp: 120029000 } } 2025-06-25T14:59:34.270936Z node 25 :CMS DEBUG: sentinel.cpp:486: [Sentinel] [ConfigUpdater] Handle TEvCms::TEvClusterStateResponse: response# Status { Code: OK } State { Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120029000 } Devices { Name: "vdisk-0-1-0-0-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-1-1-0-0-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-2-1-0-0-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-3-1-0-0-0" State: UP Timestamp: 120029000 } Devices { Name: "pdisk-25-25" State: UP Timestamp: 120029000 } Timestamp: 120029000 NodeId: 25 InterconnectPort: 12001 Location { Rack: "1" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120029000 } Devices { Name: "vdisk-0-1-0-1-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-1-1-0-1-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-2-1-0-1-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-3-1-0-1-0" State: UP Timestamp: 120029000 } Devices { Name: "pdisk-26-26" State: UP Timestamp: 120029000 } Timestamp: 120029000 NodeId: 26 InterconnectPort: 12002 Location { Rack: "1" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120029000 } Devices { Name: "vdisk-0-1-0-2-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-1-1-0-2-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-2-1-0-2-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-3-1-0-2-0" State: UP Timestamp: 120029000 } Devices { Name: "pdisk-27-27" State: UP Timestamp: 120029000 } Timestamp: 120029000 NodeId: 27 InterconnectPort: 12003 Location { Rack: "2" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120029000 } Devices { Name: "vdisk-0-1-0-3-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-1-1-0-3-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-2-1-0-3-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-3-1-0-3-0" State: UP Timestamp: 120029000 } Devices { Name: "pdisk-28-28" State: UP Timestamp: 120029000 } Timestamp: 120029000 NodeId: 28 InterconnectPort: 12004 Location { Rack: "2" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120029000 } Devices { Name: "vdisk-0-1-0-4-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-1-1-0-4-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-2-1-0-4-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-3-1-0-4-0" State: UP Timestamp: 120029000 } Devices { Name: "pdisk-29-29" State: UP Timestamp: 120029000 } Timestamp: 120029000 NodeId: 29 InterconnectPort: 12005 Location { Rack: "3" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120029000 } Devices { Name: "vdisk-0-1-0-5-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-1-1-0-5-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-2-1-0-5-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-3-1-0-5-0" State: UP Timestamp: 120029000 } Devices { Name: "pdisk-30-30" State: UP Timestamp: 120029000 } Timestamp: 120029000 NodeId: 30 InterconnectPort: 12006 Location { Rack: "3" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120029000 } Devices { Name: "vdisk-0-1-0-6-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-1-1-0-6-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-2-1-0-6-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-3-1-0-6-0" State: UP Timestamp: 120029000 } Devices { Name: "pdisk-31-31" State: UP Timestamp: 120029000 } Timestamp: 120029000 NodeId: 31 InterconnectPort: 12007 Location { Rack: "4" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120029000 } Devices { Name: "vdisk-0-1-0-7-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-1-1-0-7-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-2-1-0-7-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-3-1-0-7-0" State: UP Timestamp: 120029000 } Devices { Name: "pdisk-32-32" State: UP Timestamp: 120029000 } Timestamp: 120029000 NodeId: 32 InterconnectPort: 12008 Location { Rack: "4" } StartTimeSeconds: 0 } Timestamp: 120029000 } 2025-06-25T14:59:34.271178Z node 25 :CMS DEBUG: sentinel.cpp:944: [Sentinel] [Main] Config was updated in 120.003000s 2025-06-25T14:59:34.271240Z node 25 :CMS DEBUG: sentinel.cpp:884: [Sentinel] [Main] Start StateUpdater 2025-06-25T14:59:34.271417Z node 25 :CMS INFO: cms.cpp:347: Check request: User: "user" Actions { Type: RESTART_SERVICES Host: "25" Services: "storage" Duration: 600000000 } PartialPermissionAllowed: false Schedule: false DryRun: false EvictVDisks: true 2025-06-25T14:59:34.271493Z node 25 :CMS DEBUG: cms.cpp:379: Checking action: Type: RESTART_SERVICES Host: "25" Services: "storage" Duration: 600000000 2025-06-25T14:59:34.271563Z node 25 :CMS DEBUG: cms.cpp:398: Result: DISALLOW_TEMP (reason: VDisks eviction from host 25 has not yet been completed) 2025-06-25T14:59:34.271747Z node 25 :CMS DEBUG: cms_tx_store_permissions.cpp:26: TTxStorePermissions Execute 2025-06-25T14:59:34.271997Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store request: id# user-r-1, owner# user, order# 1, priority# 0, body# User: "user" Actions { Type: RESTART_SERVICES Host: "25" Services: "storage" Duration: 600000000 Issue { Type: GENERIC Message: "VDisks eviction from host 25 has not yet been completed" } } PartialPermissionAllowed: false Schedule: false Reason: "" TenantPolicy: DEFAULT AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: true 2025-06-25T14:59:34.272052Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Add host marker: host# 25, marker# MARKER_DISK_FAULTY 2025-06-25T14:59:34.272398Z node 25 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 25, wbId# [25:8388350642965737326:1634689637] 2025-06-25T14:59:34.272475Z node 25 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 26, wbId# [26:8388350642965737326:1634689637] 2025-06-25T14:59:34.272510Z node 25 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 27, wbId# [27:8388350642965737326:1634689637] 2025-06-25T14:59:34.272539Z node 25 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 28, wbId# [28:8388350642965737326:1634689637] 2025-06-25T14:59:34.272570Z node 25 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 29, wbId# [29:8388350642965737326:1634689637] 2025-06-25T14:59:34.272599Z node 25 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 30, wbId# [30:8388350642965737326:1634689637] 2025-06-25T14:59:34.272630Z node 25 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] R ... dle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 31, response# PDiskStateInfo { PDiskId: 31 CreateTime: 0 ChangeTime: 0 Path: "/31/pdisk-31.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 180029 2025-06-25T14:59:39.528289Z node 25 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 32, response# PDiskStateInfo { PDiskId: 32 CreateTime: 0 ChangeTime: 0 Path: "/32/pdisk-32.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 180029 2025-06-25T14:59:39.528380Z node 25 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 26, response# PDiskStateInfo { PDiskId: 26 CreateTime: 0 ChangeTime: 0 Path: "/26/pdisk-26.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 180029 2025-06-25T14:59:39.528466Z node 25 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 27, response# PDiskStateInfo { PDiskId: 27 CreateTime: 0 ChangeTime: 0 Path: "/27/pdisk-27.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 180029 2025-06-25T14:59:39.528554Z node 25 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 28, response# PDiskStateInfo { PDiskId: 28 CreateTime: 0 ChangeTime: 0 Path: "/28/pdisk-28.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 180029 2025-06-25T14:59:39.528634Z node 25 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 29, response# PDiskStateInfo { PDiskId: 29 CreateTime: 0 ChangeTime: 0 Path: "/29/pdisk-29.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 180029 2025-06-25T14:59:39.528691Z node 25 :CMS DEBUG: sentinel.cpp:960: [Sentinel] [Main] State was updated in 0.000000s 2025-06-25T14:59:39.528953Z node 25 :CMS NOTICE: sentinel.cpp:1039: [Sentinel] [Main] PDisk status changed: pdiskId# 26:26, status# ACTIVE, required status# FAULTY, reason# Forced status, dry run# 0 2025-06-25T14:59:39.529012Z node 25 :CMS NOTICE: sentinel.cpp:1039: [Sentinel] [Main] PDisk status changed: pdiskId# 25:25, status# ACTIVE, required status# FAULTY, reason# Forced status, dry run# 0 2025-06-25T14:59:39.529059Z node 25 :CMS DEBUG: sentinel.cpp:1076: [Sentinel] [Main] Change pdisk status: requestsSize# 2 2025-06-25T14:59:39.529205Z node 25 :CMS DEBUG: cms_tx_log_and_send.cpp:19: TTxLogAndSend Execute 2025-06-25T14:59:39.529478Z node 25 :CMS DEBUG: cms_tx_log_and_send.cpp:19: TTxLogAndSend Execute 2025-06-25T14:59:39.529631Z node 25 :CMS DEBUG: sentinel.cpp:1202: [Sentinel] [Main] Handle TEvBlobStorage::TEvControllerConfigResponse: response# Status { Success: true } Status { Success: true } Success: true, cookie# 1 2025-06-25T14:59:39.529698Z node 25 :CMS NOTICE: sentinel.cpp:1226: [Sentinel] [Main] PDisk status has been changed: pdiskId# 25:25 2025-06-25T14:59:39.529746Z node 25 :CMS NOTICE: sentinel.cpp:1226: [Sentinel] [Main] PDisk status has been changed: pdiskId# 26:26 2025-06-25T14:59:39.542510Z node 25 :CMS DEBUG: cms_tx_log_and_send.cpp:27: TTxLogAndSend Complete 2025-06-25T14:59:39.542603Z node 25 :CMS DEBUG: cms_tx_log_and_send.cpp:27: TTxLogAndSend Complete 2025-06-25T14:59:39.558020Z node 25 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-25T14:59:39.558119Z node 25 :CMS DEBUG: cms_tx_update_downtimes.cpp:26: TTxUpdateDowntimes Complete 2025-06-25T14:59:39.558186Z node 25 :CMS DEBUG: cluster_info.cpp:968: Timestamp: 1970-01-01T00:03:00Z 2025-06-25T14:59:39.558994Z node 25 :CMS INFO: cms.cpp:347: Check request: User: "user" Actions { Type: RESTART_SERVICES Host: "25" Services: "storage" Duration: 600000000 Issue { Type: GENERIC Message: "VDisks eviction from host 25 has not yet been completed" } } PartialPermissionAllowed: false Schedule: false Reason: "" TenantPolicy: DEFAULT AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: true 2025-06-25T14:59:39.559092Z node 25 :CMS DEBUG: cms.cpp:379: Checking action: Type: RESTART_SERVICES Host: "25" Services: "storage" Duration: 600000000 Issue { Type: GENERIC Message: "VDisks eviction from host 25 has not yet been completed" } 2025-06-25T14:59:39.559150Z node 25 :CMS DEBUG: node_checkers.cpp:101: [Nodes Counter] Checking Node: 25, with state: Up, with limit: 0, with ratio limit: 0, locked nodes: 0, down nodes: 0 2025-06-25T14:59:39.559213Z node 25 :CMS DEBUG: cms.cpp:729: Ring: 0; State: Ok 2025-06-25T14:59:39.559244Z node 25 :CMS DEBUG: cms.cpp:729: Ring: 1; State: Ok 2025-06-25T14:59:39.559286Z node 25 :CMS DEBUG: cms.cpp:729: Ring: 2; State: Ok 2025-06-25T14:59:39.559315Z node 25 :CMS DEBUG: cms.cpp:387: Result: ALLOW 2025-06-25T14:59:39.559470Z node 25 :CMS DEBUG: cms.cpp:1042: Accepting permission: id# user-p-1, requestId# user-r-1, owner# user 2025-06-25T14:59:39.559536Z node 25 :CMS INFO: cluster_info.cpp:777: Adding lock for Host ::1:12001 (25) (permission user-p-1 until 1970-01-01T00:13:00Z) 2025-06-25T14:59:39.559617Z node 25 :CMS DEBUG: cms_tx_store_permissions.cpp:26: TTxStorePermissions Execute 2025-06-25T14:59:39.559819Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store permission: id# user-p-1, validity# 1970-01-01T00:13:00.129000Z, action# Type: RESTART_SERVICES Host: "25" Services: "storage" Duration: 600000000 2025-06-25T14:59:39.559935Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store request: id# user-r-1, owner# user, order# 1, priority# 0, body# User: "user" PartialPermissionAllowed: false Schedule: false Reason: "" TenantPolicy: DEFAULT AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: true 2025-06-25T14:59:39.572032Z node 25 :CMS DEBUG: cms_tx_store_permissions.cpp:137: TTxStorePermissions complete 2025-06-25T14:59:39.572336Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvCheckRequest { User: "user" RequestId: "user-r-1" DryRun: false AvailabilityMode: MODE_MAX_AVAILABILITY }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: ALLOW } RequestId: "user-r-1" Permissions { Id: "user-p-1" Action { Type: RESTART_SERVICES Host: "25" Services: "storage" Duration: 600000000 } Deadline: 780129000 Extentions { Type: HostInfo Hosts { Name: "::1" State: UP NodeId: 25 InterconnectPort: 12001 } } } } 2025-06-25T14:59:39.572397Z node 25 :CMS DEBUG: cms.cpp:1070: Schedule cleanup at 1970-01-01T00:33:00.129000Z 2025-06-25T14:59:39.588175Z node 25 :CMS INFO: cluster_info.cpp:777: Adding lock for Host ::1:12001 (25) (permission user-p-1 until 1970-01-01T00:13:00Z) 2025-06-25T14:59:39.588549Z node 25 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-25T14:59:39.588632Z node 25 :CMS DEBUG: cms_tx_update_downtimes.cpp:26: TTxUpdateDowntimes Complete 2025-06-25T14:59:39.588697Z node 25 :CMS DEBUG: cluster_info.cpp:968: Timestamp: 1970-01-01T00:03:00Z 2025-06-25T14:59:39.589569Z node 25 :CMS INFO: cms.cpp:347: Check request: User: "user" Actions { Type: RESTART_SERVICES Host: "26" Services: "storage" Duration: 600000000 Issue { Type: GENERIC Message: "VDisks eviction from host 26 has not yet been completed" } } PartialPermissionAllowed: false Schedule: false Reason: "" TenantPolicy: DEFAULT AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: true 2025-06-25T14:59:39.589674Z node 25 :CMS DEBUG: cms.cpp:379: Checking action: Type: RESTART_SERVICES Host: "26" Services: "storage" Duration: 600000000 Issue { Type: GENERIC Message: "VDisks eviction from host 26 has not yet been completed" } 2025-06-25T14:59:39.589730Z node 25 :CMS DEBUG: node_checkers.cpp:101: [Nodes Counter] Checking Node: 26, with state: Up, with limit: 0, with ratio limit: 0, locked nodes: 1, down nodes: 0 2025-06-25T14:59:39.589792Z node 25 :CMS DEBUG: cms.cpp:387: Result: ALLOW 2025-06-25T14:59:39.589980Z node 25 :CMS DEBUG: cms.cpp:1042: Accepting permission: id# user-p-2, requestId# user-r-2, owner# user 2025-06-25T14:59:39.590052Z node 25 :CMS INFO: cluster_info.cpp:777: Adding lock for Host ::1:12002 (26) (permission user-p-2 until 1970-01-01T00:13:00Z) 2025-06-25T14:59:39.590150Z node 25 :CMS DEBUG: cms_tx_store_permissions.cpp:26: TTxStorePermissions Execute 2025-06-25T14:59:39.590307Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store permission: id# user-p-2, validity# 1970-01-01T00:13:00.230512Z, action# Type: RESTART_SERVICES Host: "26" Services: "storage" Duration: 600000000 2025-06-25T14:59:39.590405Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store request: id# user-r-2, owner# user, order# 2, priority# 0, body# User: "user" PartialPermissionAllowed: false Schedule: false Reason: "" TenantPolicy: DEFAULT AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: true 2025-06-25T14:59:39.602621Z node 25 :CMS DEBUG: cms_tx_store_permissions.cpp:137: TTxStorePermissions complete 2025-06-25T14:59:39.602895Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvCheckRequest { User: "user" RequestId: "user-r-2" DryRun: false AvailabilityMode: MODE_MAX_AVAILABILITY }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: ALLOW } RequestId: "user-r-2" Permissions { Id: "user-p-2" Action { Type: RESTART_SERVICES Host: "26" Services: "storage" Duration: 600000000 } Deadline: 780230512 Extentions { Type: HostInfo Hosts { Name: "::1" State: UP NodeId: 26 InterconnectPort: 12002 } } } } 2025-06-25T14:59:39.603323Z node 25 :CMS INFO: cms.cpp:1332: User user is done with permissions user-p-1 2025-06-25T14:59:39.603364Z node 25 :CMS DEBUG: cms.cpp:1355: Resulting status: OK 2025-06-25T14:59:39.603428Z node 25 :CMS DEBUG: cms_tx_remove_permissions.cpp:28: TTxRemovePermissions Execute 2025-06-25T14:59:39.603504Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reset host markers: host# 25 2025-06-25T14:59:39.603580Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Remove request: id# user-r-1, reason# permission user-p-1 was removed 2025-06-25T14:59:39.603628Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Remove permission: id# user-p-1, reason# explicit remove 2025-06-25T14:59:39.615638Z node 25 :CMS DEBUG: cms_tx_remove_permissions.cpp:79: TTxRemovePermissions Complete 2025-06-25T14:59:39.615829Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvManagePermissionRequest { User: "user" Command: DONE Permissions: "user-p-1" DryRun: false }, response# NKikimr::NCms::TEvCms::TEvManagePermissionResponse { Status { Code: OK } } 2025-06-25T14:59:39.616379Z node 25 :CMS INFO: cms.cpp:1332: User user is done with permissions user-p-2 2025-06-25T14:59:39.616446Z node 25 :CMS DEBUG: cms.cpp:1355: Resulting status: OK 2025-06-25T14:59:39.616523Z node 25 :CMS DEBUG: cms_tx_remove_permissions.cpp:28: TTxRemovePermissions Execute 2025-06-25T14:59:39.616617Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reset host markers: host# 26 2025-06-25T14:59:39.616708Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Remove request: id# user-r-2, reason# permission user-p-2 was removed 2025-06-25T14:59:39.616749Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Remove permission: id# user-p-2, reason# explicit remove 2025-06-25T14:59:39.628726Z node 25 :CMS DEBUG: cms_tx_remove_permissions.cpp:79: TTxRemovePermissions Complete 2025-06-25T14:59:39.628907Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvManagePermissionRequest { User: "user" Command: DONE Permissions: "user-p-2" DryRun: false }, response# NKikimr::NCms::TEvCms::TEvManagePermissionResponse { Status { Code: OK } } >> TColumnShardTestSchema::RebootHotTiersAfterTtl >> BackupPathTest::ExportWithCommonSourcePath [GOOD] >> TColumnShardTestSchema::TTL+Reboot+Internal-FirstPkColumn >> TColumnShardTestSchema::EnableColdTiersAfterTtl >> TCmsTest::PriorityRange [GOOD] >> TColumnShardTestSchema::OneColdTier >> TCmsTest::RequestReplaceDevicePDiskByPath [GOOD] >> TCmsTest::RequestReplaceDeviceTwiceWithNoVDisks >> BackupRestoreS3::RestoreIndexTableReadReplicasSettings [GOOD] >> BackupRestoreS3::RestoreTableSplitBoundaries >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeSequence [GOOD] >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeView >> BackupRestore::RestoreIndexTablePartitioningSettings [GOOD] >> BackupRestore::RestoreIndexTableReadReplicasSettings ------- [TM] {asan, default-linux-x86_64, release} ydb/core/cms/ut/unittest >> TCmsTest::PriorityRange [GOOD] Test command err: 2025-06-25T14:59:38.547146Z node 17 :CMS ERROR: cluster_info.cpp:489: Cannot update state for unknown PDisk 17:17 2025-06-25T14:59:38.547218Z node 17 :CMS ERROR: cluster_info.cpp:489: Cannot update state for unknown PDisk 18:18 2025-06-25T14:59:38.547244Z node 17 :CMS ERROR: cluster_info.cpp:489: Cannot update state for unknown PDisk 19:19 2025-06-25T14:59:38.547268Z node 17 :CMS ERROR: cluster_info.cpp:489: Cannot update state for unknown PDisk 20:20 2025-06-25T14:59:38.547292Z node 17 :CMS ERROR: cluster_info.cpp:489: Cannot update state for unknown PDisk 21:21 2025-06-25T14:59:38.547318Z node 17 :CMS ERROR: cluster_info.cpp:489: Cannot update state for unknown PDisk 22:22 2025-06-25T14:59:38.547344Z node 17 :CMS ERROR: cluster_info.cpp:489: Cannot update state for unknown PDisk 23:23 2025-06-25T14:59:38.547368Z node 17 :CMS ERROR: cluster_info.cpp:489: Cannot update state for unknown PDisk 24:24 2025-06-25T14:59:38.553401Z node 17 :CMS ERROR: cluster_info.cpp:489: Cannot update state for unknown PDisk 17:17 2025-06-25T14:59:38.553478Z node 17 :CMS ERROR: cluster_info.cpp:489: Cannot update state for unknown PDisk 18:18 2025-06-25T14:59:38.553508Z node 17 :CMS ERROR: cluster_info.cpp:489: Cannot update state for unknown PDisk 19:19 2025-06-25T14:59:38.553538Z node 17 :CMS ERROR: cluster_info.cpp:489: Cannot update state for unknown PDisk 20:20 2025-06-25T14:59:38.553560Z node 17 :CMS ERROR: cluster_info.cpp:489: Cannot update state for unknown PDisk 21:21 2025-06-25T14:59:38.553582Z node 17 :CMS ERROR: cluster_info.cpp:489: Cannot update state for unknown PDisk 22:22 2025-06-25T14:59:38.553604Z node 17 :CMS ERROR: cluster_info.cpp:489: Cannot update state for unknown PDisk 23:23 2025-06-25T14:59:38.553624Z node 17 :CMS ERROR: cluster_info.cpp:489: Cannot update state for unknown PDisk 24:24 2025-06-25T14:59:38.583821Z node 17 :CMS ERROR: cluster_info.cpp:489: Cannot update state for unknown PDisk 17:17 2025-06-25T14:59:38.583889Z node 17 :CMS ERROR: cluster_info.cpp:489: Cannot update state for unknown PDisk 18:18 2025-06-25T14:59:38.583915Z node 17 :CMS ERROR: cluster_info.cpp:489: Cannot update state for unknown PDisk 19:19 2025-06-25T14:59:38.583941Z node 17 :CMS ERROR: cluster_info.cpp:489: Cannot update state for unknown PDisk 20:20 2025-06-25T14:59:38.583963Z node 17 :CMS ERROR: cluster_info.cpp:489: Cannot update state for unknown PDisk 21:21 2025-06-25T14:59:38.583985Z node 17 :CMS ERROR: cluster_info.cpp:489: Cannot update state for unknown PDisk 22:22 2025-06-25T14:59:38.584006Z node 17 :CMS ERROR: cluster_info.cpp:489: Cannot update state for unknown PDisk 23:23 2025-06-25T14:59:38.584028Z node 17 :CMS ERROR: cluster_info.cpp:489: Cannot update state for unknown PDisk 24:24 >> EncryptedBackupParamsValidationTestFeatureDisabled::EncryptionParamsSpecifiedImport [GOOD] >> BackupRestore::TestAllPrimitiveTypes-INT8 [GOOD] >> BackupRestore::TestAllPrimitiveTypes-INT16 >> BackupRestoreS3::TestAllPrimitiveTypes-INT16 [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-UINT16 >> BackupPathTest::ExportWithCommonSourcePathAndExplicitTableInside >> BackupRestoreS3::TestAllPrimitiveTypes-YSON [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-UUID |91.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest >> MoveTable::WithData >> TCmsTest::RequestReplacePDiskConsecutiveWithDone [GOOD] >> TCmsTest::RequestReplaceManyDevicesOnOneNode >> MoveTable::WithUncomittedData [GOOD] >> TColumnShardTestSchema::RebootExportAfterFail >> TColumnShardTestSchema::ForgetWithLostAnswer >> TColumnShardTestSchema::DropWriteRace [GOOD] >> TColumnShardTestSchema::Drop+Reboots+GenerateInternalPathId >> TSchemeShardLoginTest::CheckThatLockedOutParametersIsRestoredFromLocalDb [GOOD] >> TColumnShardTestSchema::CreateTable-Reboots-GenerateInternalPathId [GOOD] >> EncryptedBackupParamsValidationTestFeatureDisabled::CommonSourcePrefixSpecified >> TCmsTest::RequestReplaceDeviceTwiceWithNoVDisks [GOOD] >> TColumnShardTestSchema::CreateTable+Reboots+GenerateInternalPathId [GOOD] >> MoveTable::RenameAbsentTable_Negative >> KqpNamedExpressions::NamedExpressionRandomUpsertRevert+UseSink+UseDataQuery [GOOD] >> MoveTable::WithData [GOOD] >> MoveTable::RenameAbsentTable_Negative [GOOD] >> TPartitionChooserSuite::TPartitionChooserActor_SplitMergeEnabled_PreferedPartition_InactiveActor_Test [GOOD] >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeTableIndex [GOOD] >> TCmsTest::DisabledEvictVDisks [GOOD] >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeFileStore [GOOD] >> BackupPathTest::ExportWithCommonSourcePathAndExplicitTableInside [GOOD] >> BackupRestore::TestAllPrimitiveTypes-UINT8 [GOOD] >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeSequence >> TPartitionChooserSuite::TPartitionChooserActor_SplitMergeEnabled_PreferedPartition_OtherPartition_Test >> BackupRestore::TestAllPrimitiveTypes-UINT16 >> TColumnShardTestSchema::TTL-Reboot+Internal+FirstPkColumn >> TCmsTest::EmergencyDuringRollingRestart |91.0%| [TA] $(B)/ydb/core/mind/ut_fat/test-results/unittest/{meta.json ... results_accumulator.log} |91.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/cms/ut/unittest >> TCmsTest::RequestReplaceDeviceTwiceWithNoVDisks [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest >> MoveTable::RenameAbsentTable_Negative [GOOD] Test command err: 2025-06-25T14:59:44.593493Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:99;event=initialize_shard;step=OnActivateExecutor; 2025-06-25T14:59:44.614752Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:117;event=initialize_shard;step=initialize_tiring_finished; 2025-06-25T14:59:44.614985Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-25T14:59:44.620609Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:59:44.620810Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:59:44.621051Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:59:44.621173Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:59:44.621287Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:59:44.621400Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:59:44.621505Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:59:44.621624Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:59:44.621734Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:59:44.621854Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:59:44.621983Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:59:44.642212Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-25T14:59:44.642352Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-25T14:59:44.642419Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-25T14:59:44.642611Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T14:59:44.642743Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-25T14:59:44.642838Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-25T14:59:44.642881Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-25T14:59:44.642965Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-25T14:59:44.643020Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-25T14:59:44.643061Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-25T14:59:44.643086Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-25T14:59:44.643259Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T14:59:44.643319Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-25T14:59:44.643356Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-25T14:59:44.643382Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-25T14:59:44.643465Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-25T14:59:44.643524Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-25T14:59:44.643580Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-25T14:59:44.643614Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-25T14:59:44.643690Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-25T14:59:44.643724Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-25T14:59:44.643756Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-25T14:59:44.643940Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-25T14:59:44.643978Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-25T14:59:44.644007Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-25T14:59:44.644221Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-25T14:59:44.644270Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-25T14:59:44.644297Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-25T14:59:44.644406Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-25T14:59:44.644449Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-25T14:59:44.644476Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-25T14:59:44.644528Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-25T14:59:44.644571Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-25T14:59:44.644598Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-25T14:59:44.644631Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-25T14:59:44.644894Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=58; 2025-06-25T14:59:44.644990Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=49; 2025-06-25T14:59:44.645062Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=32; 2025-06-25T14:59:44.645151Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=42; 2025-06-25T14:59:44.645253Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-25T14:59:44.645345Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-25T14:59:44.645393Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-25T14:59:44.645441Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: table ... ollerLoadingTime=59; 2025-06-25T14:59:44.907620Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:operations_managerLoadingTime=49; 2025-06-25T14:59:44.907709Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:operations_managerLoadingTime=54; 2025-06-25T14:59:44.907846Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:storages_managerLoadingTime=98; 2025-06-25T14:59:44.908162Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:storages_managerLoadingTime=276; 2025-06-25T14:59:44.908229Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:db_locksLoadingTime=6; 2025-06-25T14:59:44.908267Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:db_locksLoadingTime=6; 2025-06-25T14:59:44.908301Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:bg_sessionsLoadingTime=5; 2025-06-25T14:59:44.908409Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:bg_sessionsLoadingTime=46; 2025-06-25T14:59:44.908461Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:sharing_sessionsLoadingTime=6; 2025-06-25T14:59:44.908615Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:sharing_sessionsLoadingTime=57; 2025-06-25T14:59:44.908666Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:in_flight_readsLoadingTime=5; 2025-06-25T14:59:44.908734Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:in_flight_readsLoadingTime=37; 2025-06-25T14:59:44.908796Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tiers_managerLoadingTime=32; 2025-06-25T14:59:44.908853Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tiers_managerLoadingTime=27; 2025-06-25T14:59:44.908895Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;fline=common_data.cpp:29;EXECUTE:composite_initLoadingTime=2654; 2025-06-25T14:59:44.909043Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];process=SwitchToWork;fline=columnshard.cpp:74;event=initialize_shard;step=SwitchToWork; 2025-06-25T14:59:44.909100Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];process=SwitchToWork;fline=columnshard.cpp:77;event=initialize_shard;step=SignalTabletActive; 2025-06-25T14:59:44.909194Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];process=SwitchToWork;fline=columnshard_impl.cpp:1331;event=OnTieringModified;path_id=NO_VALUE_OPTIONAL; 2025-06-25T14:59:44.909520Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:442;event=EnqueueBackgroundActivities;periodic=0; 2025-06-25T14:59:44.909573Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:448;problem=Background activities cannot be started: no index at tablet; 2025-06-25T14:59:44.909879Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NActors::TEvents::TEvWakeup;fline=columnshard.cpp:250;event=TEvPrivate::TEvPeriodicWakeup::MANUAL;tablet_id=9437184; 2025-06-25T14:59:44.909969Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;fline=columnshard.cpp:239;event=TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184; 2025-06-25T14:59:44.910000Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Send periodic stats. 2025-06-25T14:59:44.910027Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Disabled periodic stats at tablet 9437184 2025-06-25T14:59:44.910073Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:442;event=EnqueueBackgroundActivities;periodic=0; 2025-06-25T14:59:44.910110Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:448;problem=Background activities cannot be started: no index at tablet; 2025-06-25T14:59:45.201647Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::TEvColumnShard::TEvProposeTransaction;tablet_id=9437184;tx_id=10;this=88923004793280;method=TTxController::StartProposeOnExecute;tx_info=10:TX_KIND_SCHEMA;min=1750863585567;max=18446744073709551615;plan=0;src=[1:156:2178];cookie=00:0;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=1;result=not_found; 2025-06-25T14:59:45.201727Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::TEvColumnShard::TEvProposeTransaction;tablet_id=9437184;tx_id=10;this=88923004793280;method=TTxController::StartProposeOnExecute;tx_info=10:TX_KIND_SCHEMA;min=1750863585567;max=18446744073709551615;plan=0;src=[1:156:2178];cookie=00:0;;fline=schema.h:38;event=sync_schema; 2025-06-25T14:59:45.213885Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;request_tx=10:TX_KIND_SCHEMA;min=1750863585567;max=18446744073709551615;plan=0;src=[1:156:2178];cookie=00:0;;this=88923004793280;op_tx=10:TX_KIND_SCHEMA;min=1750863585567;max=18446744073709551615;plan=0;src=[1:156:2178];cookie=00:0;;int_op_tx=10:TX_KIND_SCHEMA;min=1750863585567;max=18446744073709551615;plan=0;src=[1:156:2178];cookie=00:0;;int_this=89129165578496;fline=columnshard__propose_transaction.cpp:105;event=actual tx operator; 2025-06-25T14:59:45.213993Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;request_tx=10:TX_KIND_SCHEMA;min=1750863585567;max=18446744073709551615;plan=0;src=[1:156:2178];cookie=00:0;;this=88923004793280;op_tx=10:TX_KIND_SCHEMA;min=1750863585567;max=18446744073709551615;plan=0;src=[1:156:2178];cookie=00:0;;int_op_tx=10:TX_KIND_SCHEMA;min=1750863585567;max=18446744073709551615;plan=0;src=[1:156:2178];cookie=00:0;;int_this=89129165578496;method=TTxController::FinishProposeOnComplete;tx_id=10;fline=propose_tx.cpp:11;event=scheme_shard_tablet_not_initialized;source=[1:156:2178]; 2025-06-25T14:59:45.214045Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;request_tx=10:TX_KIND_SCHEMA;min=1750863585567;max=18446744073709551615;plan=0;src=[1:156:2178];cookie=00:0;;this=88923004793280;op_tx=10:TX_KIND_SCHEMA;min=1750863585567;max=18446744073709551615;plan=0;src=[1:156:2178];cookie=00:0;;int_op_tx=10:TX_KIND_SCHEMA;min=1750863585567;max=18446744073709551615;plan=0;src=[1:156:2178];cookie=00:0;;int_this=89129165578496;method=TTxController::FinishProposeOnComplete;tx_id=10;fline=propose_tx.cpp:32;message=;tablet_id=9437184;tx_id=10; 2025-06-25T14:59:45.214424Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TTxNotifyTxCompletion.Execute at tablet 9437184 2025-06-25T14:59:45.214545Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: PlanStep 1750863585567 at tablet 9437184, mediator 0 2025-06-25T14:59:45.214596Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxPlanStep[2] execute at tablet 9437184 2025-06-25T14:59:45.214957Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=10;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=1;result=not_found; 2025-06-25T14:59:45.215046Z node 1 :TX_COLUMNSHARD INFO: ctor_logger.h:56: EnsureTable for pathId: {internal: 9438184000001, ss: 1} ttl settings: { Version: 1 } at tablet 9437184 2025-06-25T14:59:45.219648Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=10;fline=column_engine_logs.cpp:471;event=OnTieringModified;new_count_tierings=0; 2025-06-25T14:59:45.219789Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=10;fline=tables_manager.cpp:304;method=RegisterTable;path_id=9438184000001; 2025-06-25T14:59:45.219860Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=10;fline=column_engine.h:144;event=RegisterTable;path_id=9438184000001; 2025-06-25T14:59:45.226497Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=10;fline=column_engine_logs.cpp:463;event=OnTieringModified;path_id=9438184000001; 2025-06-25T14:59:45.226670Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=10;fline=tx_controller.cpp:215;event=finished_tx;tx_id=10; 2025-06-25T14:59:45.250327Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxPlanStep[2] complete at tablet 9437184 2025-06-25T14:59:45.251126Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::TEvColumnShard::TEvProposeTransaction;tablet_id=9437184;tx_id=11;this=88923004831808;method=TTxController::StartProposeOnExecute;tx_info=11:TX_KIND_SCHEMA;min=1750863585571;max=18446744073709551615;plan=0;src=[1:103:2136];cookie=00:1;;fline=schema.cpp:134;propose_execute=move_table;src=111;dst=2; 2025-06-25T14:59:45.251172Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::TEvColumnShard::TEvProposeTransaction;tablet_id=9437184;tx_id=11;this=88923004831808;method=TTxController::StartProposeOnExecute;tx_info=11:TX_KIND_SCHEMA;min=1750863585571;max=18446744073709551615;plan=0;src=[1:103:2136];cookie=00:1;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=111;result=not_found; 2025-06-25T14:59:45.251216Z node 1 :TX_COLUMNSHARD_TX ERROR: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::TEvColumnShard::TEvProposeTransaction;tablet_id=9437184;tx_id=11;this=88923004831808;method=TTxController::StartProposeOnExecute;tx_info=11:TX_KIND_SCHEMA;min=1750863585571;max=18446744073709551615;plan=0;src=[1:103:2136];cookie=00:1;;fline=tx_controller.cpp:364;error=problem on start;message=No such table; 2025-06-25T14:59:45.263096Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;request_tx=11:TX_KIND_SCHEMA;min=1750863585571;max=18446744073709551615;plan=0;src=[1:103:2136];cookie=00:1;;this=88923004831808;op_tx=11:TX_KIND_SCHEMA;min=1750863585571;max=18446744073709551615;plan=0;src=[1:103:2136];cookie=00:1;;fline=propose_tx.cpp:11;event=scheme_shard_tablet_not_initialized;source=[1:103:2136]; 2025-06-25T14:59:45.263164Z node 1 :TX_COLUMNSHARD ERROR: log.cpp:784: tablet_id=9437184;request_tx=11:TX_KIND_SCHEMA;min=1750863585571;max=18446744073709551615;plan=0;src=[1:103:2136];cookie=00:1;;this=88923004831808;op_tx=11:TX_KIND_SCHEMA;min=1750863585571;max=18446744073709551615;plan=0;src=[1:103:2136];cookie=00:1;;fline=propose_tx.cpp:23;message=No such table;tablet_id=9437184;tx_id=11; |91.0%| [TA] {RESULT} $(B)/ydb/core/mind/ut_fat/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest >> MoveTable::WithData [GOOD] Test command err: 2025-06-25T14:59:43.364405Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:99;event=initialize_shard;step=OnActivateExecutor; 2025-06-25T14:59:43.394818Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:117;event=initialize_shard;step=initialize_tiring_finished; 2025-06-25T14:59:43.395108Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-25T14:59:43.402525Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:59:43.402776Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:59:43.403054Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:59:43.403208Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:59:43.403350Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:59:43.403470Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:59:43.403572Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:59:43.403701Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:59:43.403829Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:59:43.403934Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:59:43.404088Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:59:43.430982Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-25T14:59:43.431121Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-25T14:59:43.431167Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-25T14:59:43.431351Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T14:59:43.431498Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-25T14:59:43.431596Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-25T14:59:43.431640Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-25T14:59:43.431725Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-25T14:59:43.431784Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-25T14:59:43.431825Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-25T14:59:43.431857Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-25T14:59:43.432030Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T14:59:43.432100Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-25T14:59:43.432140Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-25T14:59:43.432169Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-25T14:59:43.432255Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-25T14:59:43.432352Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-25T14:59:43.432401Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-25T14:59:43.432433Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-25T14:59:43.432495Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-25T14:59:43.432533Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-25T14:59:43.432565Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-25T14:59:43.432768Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-25T14:59:43.432846Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-25T14:59:43.432878Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-25T14:59:43.433111Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-25T14:59:43.433174Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-25T14:59:43.433203Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-25T14:59:43.433324Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-25T14:59:43.433368Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-25T14:59:43.433392Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-25T14:59:43.433454Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-25T14:59:43.433511Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-25T14:59:43.433546Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-25T14:59:43.433586Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-25T14:59:43.433807Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=48; 2025-06-25T14:59:43.433906Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=52; 2025-06-25T14:59:43.433977Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=33; 2025-06-25T14:59:43.434057Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=41; 2025-06-25T14:59:43.434140Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-25T14:59:43.434231Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-25T14:59:43.434261Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-25T14:59:43.434301Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: table ... 44.186688Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:278:2290];TabletId=9437184;ScanId=0;TxId=12;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:272:2284];bytes=8850;rows=100;faults=0;finished=0;fault=0;schema=timestamp: timestamp[us] resource_type: string resource_id: string uid: string level: int32 message: string json_payload: string ingested_at: timestamp[us] saved_at: timestamp[us] request_id: string _yql_plan_step: uint64 _yql_tx_id: uint64 _yql_write_id: uint64 _yql_delete_flag: bool; 2025-06-25T14:59:44.186838Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:278:2290];TabletId=9437184;ScanId=0;TxId=12;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:274;stage=finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1;column_names=timestamp;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10,4294967040,4294967041,4294967042,4294967043;column_names=_yql_delete_flag,_yql_plan_step,_yql_tx_id,_yql_write_id,ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10,4294967040,4294967041,4294967042,4294967043;column_names=_yql_delete_flag,_yql_plan_step,_yql_tx_id,_yql_write_id,ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-25T14:59:44.187008Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:278:2290];TabletId=9437184;ScanId=0;TxId=12;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1;column_names=timestamp;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10,4294967040,4294967041,4294967042,4294967043;column_names=_yql_delete_flag,_yql_plan_step,_yql_tx_id,_yql_write_id,ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10,4294967040,4294967041,4294967042,4294967043;column_names=_yql_delete_flag,_yql_plan_step,_yql_tx_id,_yql_write_id,ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-25T14:59:44.187170Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:278:2290];TabletId=9437184;ScanId=0;TxId=12;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1;column_names=timestamp;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10,4294967040,4294967041,4294967042,4294967043;column_names=_yql_delete_flag,_yql_plan_step,_yql_tx_id,_yql_write_id,ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10,4294967040,4294967041,4294967042,4294967043;column_names=_yql_delete_flag,_yql_plan_step,_yql_tx_id,_yql_write_id,ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-25T14:59:44.188370Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:278:2290];TabletId=9437184;ScanId=0;TxId=12;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-25T14:59:44.188533Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:278:2290];TabletId=9437184;ScanId=0;TxId=12;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1;column_names=timestamp;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10,4294967040,4294967041,4294967042,4294967043;column_names=_yql_delete_flag,_yql_plan_step,_yql_tx_id,_yql_write_id,ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10,4294967040,4294967041,4294967042,4294967043;column_names=_yql_delete_flag,_yql_plan_step,_yql_tx_id,_yql_write_id,ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-25T14:59:44.188688Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:278:2290];TabletId=9437184;ScanId=0;TxId=12;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1;column_names=timestamp;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10,4294967040,4294967041,4294967042,4294967043;column_names=_yql_delete_flag,_yql_plan_step,_yql_tx_id,_yql_write_id,ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10,4294967040,4294967041,4294967042,4294967043;column_names=_yql_delete_flag,_yql_plan_step,_yql_tx_id,_yql_write_id,ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-25T14:59:44.188752Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: actor.cpp:414: Scan [1:278:2290] finished for tablet 9437184 2025-06-25T14:59:44.189242Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:278:2290];TabletId=9437184;ScanId=0;TxId=12;ScanGen=0;task_identifier=;fline=actor.cpp:420;event=scan_finish;compute_actor_id=[1:272:2284];stats={"p":[{"events":["f_bootstrap"],"t":0.001},{"events":["f_ProduceResults"],"t":0.005},{"events":["l_bootstrap"],"t":0.009},{"events":["f_processing","f_task_result"],"t":0.013},{"events":["f_ack","l_task_result"],"t":0.023},{"events":["l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.026}],"full":{"a":1750863584162570,"name":"_full_task","f":1750863584162570,"d_finished":0,"c":0,"l":1750863584188807,"d":26237},"events":[{"name":"bootstrap","f":1750863584163981,"d_finished":8075,"c":1,"l":1750863584172056,"d":8075},{"a":1750863584188341,"name":"ack","f":1750863584186014,"d_finished":1185,"c":1,"l":1750863584187199,"d":1651},{"a":1750863584188322,"name":"processing","f":1750863584175584,"d_finished":6930,"c":9,"l":1750863584187201,"d":7415},{"name":"ProduceResults","f":1750863584168538,"d_finished":2712,"c":12,"l":1750863584188730,"d":2712},{"a":1750863584188734,"name":"Finish","f":1750863584188734,"d_finished":0,"c":0,"l":1750863584188807,"d":73},{"name":"task_result","f":1750863584175598,"d_finished":5638,"c":8,"l":1750863584185933,"d":5638}],"id":"9437184::1"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1;column_names=timestamp;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10,4294967040,4294967041,4294967042,4294967043;column_names=_yql_delete_flag,_yql_plan_step,_yql_tx_id,_yql_write_id,ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10,4294967040,4294967041,4294967042,4294967043;column_names=_yql_delete_flag,_yql_plan_step,_yql_tx_id,_yql_write_id,ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-25T14:59:44.189337Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:278:2290];TabletId=9437184;ScanId=0;TxId=12;ScanGen=0;task_identifier=;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:272:2284];bytes=0;rows=0;faults=0;finished=1;fault=0;schema=; 2025-06-25T14:59:44.189757Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:278:2290];TabletId=9437184;ScanId=0;TxId=12;ScanGen=0;task_identifier=;fline=actor.cpp:375;event=scan_finished;compute_actor_id=[1:272:2284];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap"],"t":0.001},{"events":["f_ProduceResults"],"t":0.005},{"events":["l_bootstrap"],"t":0.009},{"events":["f_processing","f_task_result"],"t":0.013},{"events":["f_ack","l_task_result"],"t":0.023},{"events":["l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.026}],"full":{"a":1750863584162570,"name":"_full_task","f":1750863584162570,"d_finished":0,"c":0,"l":1750863584189387,"d":26817},"events":[{"name":"bootstrap","f":1750863584163981,"d_finished":8075,"c":1,"l":1750863584172056,"d":8075},{"a":1750863584188341,"name":"ack","f":1750863584186014,"d_finished":1185,"c":1,"l":1750863584187199,"d":2231},{"a":1750863584188322,"name":"processing","f":1750863584175584,"d_finished":6930,"c":9,"l":1750863584187201,"d":7995},{"name":"ProduceResults","f":1750863584168538,"d_finished":2712,"c":12,"l":1750863584188730,"d":2712},{"a":1750863584188734,"name":"Finish","f":1750863584188734,"d_finished":0,"c":0,"l":1750863584189387,"d":653},{"name":"task_result","f":1750863584175598,"d_finished":5638,"c":8,"l":1750863584185933,"d":5638}],"id":"9437184::1"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1;column_names=timestamp;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10,4294967040,4294967041,4294967042,4294967043;column_names=_yql_delete_flag,_yql_plan_step,_yql_tx_id,_yql_write_id,ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10,4294967040,4294967041,4294967042,4294967043;column_names=_yql_delete_flag,_yql_plan_step,_yql_tx_id,_yql_write_id,ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-25T14:59:44.189848Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:278:2290];TabletId=9437184;ScanId=0;TxId=12;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-06-25T14:59:44.161827Z;index_granules=0;index_portions=1;index_batches=1;schema_columns=11;filter_columns=0;additional_columns=0;compacted_portions_bytes=0;inserted_portions_bytes=8392;committed_portions_bytes=0;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=8392;selected_rows=0; 2025-06-25T14:59:44.189882Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:278:2290];TabletId=9437184;ScanId=0;TxId=12;ScanGen=0;task_identifier=;fline=read_context.h:192;event=scan_aborted;reason=unexpected on destructor; 2025-06-25T14:59:44.190200Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:278:2290];TabletId=9437184;ScanId=0;TxId=12;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1;column_names=timestamp;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10,4294967040,4294967041,4294967042,4294967043;column_names=_yql_delete_flag,_yql_plan_step,_yql_tx_id,_yql_write_id,ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10,4294967040,4294967041,4294967042,4294967043;column_names=_yql_delete_flag,_yql_plan_step,_yql_tx_id,_yql_write_id,ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;; 2025-06-25T14:59:44.190939Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Finished read cookie: 1 at tablet 9437184 2025-06-25T14:59:44.191045Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: EvScan txId: 12 scanId: 0 version: {1750863584339:12} readable: {1750863584339:max} at tablet 9437184 2025-06-25T14:59:44.191081Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::TEvDataShard::TEvKqpScan;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=1;result=not_found; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest >> TColumnShardTestSchema::CreateTable+Reboots+GenerateInternalPathId [GOOD] Test command err: 2025-06-25T14:59:42.226062Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:99;event=initialize_shard;step=OnActivateExecutor; 2025-06-25T14:59:42.247399Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:117;event=initialize_shard;step=initialize_tiring_finished; 2025-06-25T14:59:42.248404Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-25T14:59:42.258890Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:59:42.259133Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:59:42.259408Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:59:42.259567Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:59:42.259700Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:59:42.259824Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:59:42.259935Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:59:42.260050Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:59:42.260174Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:59:42.260302Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:59:42.260487Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:59:42.287648Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-25T14:59:42.287817Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-25T14:59:42.287881Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-25T14:59:42.288046Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T14:59:42.288195Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-25T14:59:42.288295Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-25T14:59:42.288367Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-25T14:59:42.288458Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-25T14:59:42.288519Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-25T14:59:42.288561Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-25T14:59:42.288593Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-25T14:59:42.288776Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T14:59:42.288861Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-25T14:59:42.288906Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-25T14:59:42.288937Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-25T14:59:42.289030Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-25T14:59:42.289103Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-25T14:59:42.289146Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-25T14:59:42.289180Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-25T14:59:42.289253Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-25T14:59:42.289307Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-25T14:59:42.289344Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-25T14:59:42.289536Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-25T14:59:42.289577Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-25T14:59:42.289604Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-25T14:59:42.289849Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-25T14:59:42.289913Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-25T14:59:42.289945Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-25T14:59:42.290067Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-25T14:59:42.290112Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-25T14:59:42.290145Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-25T14:59:42.290217Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-25T14:59:42.290292Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-25T14:59:42.290341Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-25T14:59:42.290380Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-25T14:59:42.290590Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=44; 2025-06-25T14:59:42.290667Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=35; 2025-06-25T14:59:42.290736Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=32; 2025-06-25T14:59:42.290816Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=41; 2025-06-25T14:59:42.290895Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-25T14:59:42.290979Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-25T14:59:42.291041Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-25T14:59:42.291091Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: table ... 2;method=TTxController::StartProposeOnExecute;tx_info=119:TX_KIND_SCHEMA;min=1750863583278;max=18446744073709551615;plan=0;src=[1:155:2177];cookie=019:0;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=20;result=not_found; 2025-06-25T14:59:43.628573Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:320:2328];ev=NKikimr::TEvColumnShard::TEvProposeTransaction;tablet_id=9437184;tx_id=119;this=88923004928352;method=TTxController::StartProposeOnExecute;tx_info=119:TX_KIND_SCHEMA;min=1750863583278;max=18446744073709551615;plan=0;src=[1:155:2177];cookie=019:0;;fline=schema.h:38;event=sync_schema; 2025-06-25T14:59:43.640784Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;request_tx=119:TX_KIND_SCHEMA;min=1750863583278;max=18446744073709551615;plan=0;src=[1:155:2177];cookie=019:0;;this=88923004928352;op_tx=119:TX_KIND_SCHEMA;min=1750863583278;max=18446744073709551615;plan=0;src=[1:155:2177];cookie=019:0;;int_op_tx=119:TX_KIND_SCHEMA;min=1750863583278;max=18446744073709551615;plan=0;src=[1:155:2177];cookie=019:0;;int_this=89129166061696;fline=columnshard__propose_transaction.cpp:105;event=actual tx operator; 2025-06-25T14:59:43.640882Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;request_tx=119:TX_KIND_SCHEMA;min=1750863583278;max=18446744073709551615;plan=0;src=[1:155:2177];cookie=019:0;;this=88923004928352;op_tx=119:TX_KIND_SCHEMA;min=1750863583278;max=18446744073709551615;plan=0;src=[1:155:2177];cookie=019:0;;int_op_tx=119:TX_KIND_SCHEMA;min=1750863583278;max=18446744073709551615;plan=0;src=[1:155:2177];cookie=019:0;;int_this=89129166061696;method=TTxController::FinishProposeOnComplete;tx_id=119;fline=propose_tx.cpp:11;event=scheme_shard_tablet_not_initialized;source=[1:155:2177]; 2025-06-25T14:59:43.640933Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;request_tx=119:TX_KIND_SCHEMA;min=1750863583278;max=18446744073709551615;plan=0;src=[1:155:2177];cookie=019:0;;this=88923004928352;op_tx=119:TX_KIND_SCHEMA;min=1750863583278;max=18446744073709551615;plan=0;src=[1:155:2177];cookie=019:0;;int_op_tx=119:TX_KIND_SCHEMA;min=1750863583278;max=18446744073709551615;plan=0;src=[1:155:2177];cookie=019:0;;int_this=89129166061696;method=TTxController::FinishProposeOnComplete;tx_id=119;fline=propose_tx.cpp:32;message=;tablet_id=9437184;tx_id=119; 2025-06-25T14:59:43.641275Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TTxNotifyTxCompletion.Execute at tablet 9437184 2025-06-25T14:59:43.641404Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: PlanStep 1750863583278 at tablet 9437184, mediator 0 2025-06-25T14:59:43.641446Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxPlanStep[4] execute at tablet 9437184 2025-06-25T14:59:43.641720Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=119;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=20;result=not_found; 2025-06-25T14:59:43.641793Z node 1 :TX_COLUMNSHARD INFO: ctor_logger.h:56: EnsureTable for pathId: {internal: 9438184000018, ss: 20} ttl settings: { Version: 1 } at tablet 9437184 2025-06-25T14:59:43.641858Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=119;fline=tables_manager.cpp:304;method=RegisterTable;path_id=9438184000018; 2025-06-25T14:59:43.641916Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=119;fline=column_engine.h:144;event=RegisterTable;path_id=9438184000018; 2025-06-25T14:59:43.642449Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=119;fline=column_engine_logs.cpp:463;event=OnTieringModified;path_id=9438184000018; 2025-06-25T14:59:43.642553Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=119;fline=tx_controller.cpp:215;event=finished_tx;tx_id=119; 2025-06-25T14:59:43.654629Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxPlanStep[4] complete at tablet 9437184 CreateTable: { SeqNo { Generation: 20 } EnsureTables { Tables { PathId: 21 SchemaPreset { Id: 1 Name: "default" Schema { Columns { Id: 1 Name: "k0" TypeId: 4609 } Columns { Id: 2 Name: "resource_type" TypeId: 4608 } Columns { Id: 3 Name: "resource_id" TypeId: 4608 DataAccessorConstructor { ClassName: "SPARSED" } } Columns { Id: 4 Name: "uid" TypeId: 4608 StorageId: "__MEMORY" } Columns { Id: 5 Name: "level" TypeId: 1 } Columns { Id: 6 Name: "message" TypeId: 4608 StorageId: "__MEMORY" } Columns { Id: 7 Name: "json_payload" TypeId: 4610 } Columns { Id: 8 Name: "ingested_at" TypeId: 50 } Columns { Id: 9 Name: "saved_at" TypeId: 50 } Columns { Id: 10 Name: "request_id" TypeId: 4608 } KeyColumnNames: "k0" KeyColumnNames: "resource_type" KeyColumnNames: "resource_id" KeyColumnNames: "uid" Indexes { Id: 1004 Name: "MAX::INDEX::level" StorageId: "__LOCAL_METADATA" ClassName: "MAX" MaxIndex { ColumnId: 5 } } Indexes { Id: 1007 Name: "MAX::INDEX::ingested_at" StorageId: "__LOCAL_METADATA" ClassName: "MAX" MaxIndex { ColumnId: 8 } } Indexes { Id: 1008 Name: "MAX::INDEX::saved_at" StorageId: "__LOCAL_METADATA" ClassName: "MAX" MaxIndex { ColumnId: 9 } } } } TtlSettings { Version: 1 } } } } 2025-06-25T14:59:43.656332Z node 1 :TX_COLUMNSHARD_TX ERROR: log.cpp:784: tablet_id=9437184;self_id=[1:320:2328];ev=NKikimr::TEvColumnShard::TEvProposeTransaction;tablet_id=9437184;tx_id=120;this=88923004931040;method=TTxController::StartProposeOnExecute;tx_info=120:TX_KIND_SCHEMA;min=1750863583281;max=18446744073709551615;plan=0;src=[1:155:2177];cookie=020:0;;fline=tx_controller.cpp:364;error=problem on start;message=Invalid schema: Column errors: key column k0 has unsupported type NKikimrSchemeOp.TOlapColumnDescription; 2025-06-25T14:59:43.669140Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;request_tx=120:TX_KIND_SCHEMA;min=1750863583281;max=18446744073709551615;plan=0;src=[1:155:2177];cookie=020:0;;this=88923004931040;op_tx=120:TX_KIND_SCHEMA;min=1750863583281;max=18446744073709551615;plan=0;src=[1:155:2177];cookie=020:0;;fline=propose_tx.cpp:11;event=scheme_shard_tablet_not_initialized;source=[1:155:2177]; 2025-06-25T14:59:43.669218Z node 1 :TX_COLUMNSHARD ERROR: log.cpp:784: tablet_id=9437184;request_tx=120:TX_KIND_SCHEMA;min=1750863583281;max=18446744073709551615;plan=0;src=[1:155:2177];cookie=020:0;;this=88923004931040;op_tx=120:TX_KIND_SCHEMA;min=1750863583281;max=18446744073709551615;plan=0;src=[1:155:2177];cookie=020:0;;fline=propose_tx.cpp:23;message=Invalid schema: Column errors: key column k0 has unsupported type NKikimrSchemeOp.TOlapColumnDescription;tablet_id=9437184;tx_id=120; CreateTable: { SeqNo { Generation: 21 } EnsureTables { Tables { PathId: 22 SchemaPreset { Id: 1 Name: "default" Schema { Columns { Id: 1 Name: "k0" TypeId: 4610 } Columns { Id: 2 Name: "resource_type" TypeId: 4608 } Columns { Id: 3 Name: "resource_id" TypeId: 4608 DataAccessorConstructor { ClassName: "SPARSED" } } Columns { Id: 4 Name: "uid" TypeId: 4608 StorageId: "__MEMORY" } Columns { Id: 5 Name: "level" TypeId: 1 } Columns { Id: 6 Name: "message" TypeId: 4608 StorageId: "__MEMORY" } Columns { Id: 7 Name: "json_payload" TypeId: 4610 } Columns { Id: 8 Name: "ingested_at" TypeId: 50 } Columns { Id: 9 Name: "saved_at" TypeId: 50 } Columns { Id: 10 Name: "request_id" TypeId: 4608 } KeyColumnNames: "k0" KeyColumnNames: "resource_type" KeyColumnNames: "resource_id" KeyColumnNames: "uid" Indexes { Id: 1004 Name: "MAX::INDEX::level" StorageId: "__LOCAL_METADATA" ClassName: "MAX" MaxIndex { ColumnId: 5 } } Indexes { Id: 1007 Name: "MAX::INDEX::ingested_at" StorageId: "__LOCAL_METADATA" ClassName: "MAX" MaxIndex { ColumnId: 8 } } Indexes { Id: 1008 Name: "MAX::INDEX::saved_at" StorageId: "__LOCAL_METADATA" ClassName: "MAX" MaxIndex { ColumnId: 9 } } } } TtlSettings { Version: 1 } } } } 2025-06-25T14:59:43.670577Z node 1 :TX_COLUMNSHARD_TX ERROR: log.cpp:784: tablet_id=9437184;self_id=[1:320:2328];ev=NKikimr::TEvColumnShard::TEvProposeTransaction;tablet_id=9437184;tx_id=121;this=88923004932832;method=TTxController::StartProposeOnExecute;tx_info=121:TX_KIND_SCHEMA;min=1750863583282;max=18446744073709551615;plan=0;src=[1:155:2177];cookie=021:0;;fline=tx_controller.cpp:364;error=problem on start;message=Invalid schema: Column errors: key column k0 has unsupported type NKikimrSchemeOp.TOlapColumnDescription; 2025-06-25T14:59:43.682676Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;request_tx=121:TX_KIND_SCHEMA;min=1750863583282;max=18446744073709551615;plan=0;src=[1:155:2177];cookie=021:0;;this=88923004932832;op_tx=121:TX_KIND_SCHEMA;min=1750863583282;max=18446744073709551615;plan=0;src=[1:155:2177];cookie=021:0;;fline=propose_tx.cpp:11;event=scheme_shard_tablet_not_initialized;source=[1:155:2177]; 2025-06-25T14:59:43.682806Z node 1 :TX_COLUMNSHARD ERROR: log.cpp:784: tablet_id=9437184;request_tx=121:TX_KIND_SCHEMA;min=1750863583282;max=18446744073709551615;plan=0;src=[1:155:2177];cookie=021:0;;this=88923004932832;op_tx=121:TX_KIND_SCHEMA;min=1750863583282;max=18446744073709551615;plan=0;src=[1:155:2177];cookie=021:0;;fline=propose_tx.cpp:23;message=Invalid schema: Column errors: key column k0 has unsupported type NKikimrSchemeOp.TOlapColumnDescription;tablet_id=9437184;tx_id=121; CreateTable: { SeqNo { Generation: 22 } EnsureTables { Tables { PathId: 23 SchemaPreset { Id: 1 Name: "default" Schema { Columns { Id: 1 Name: "k0" TypeId: 4612 } Columns { Id: 2 Name: "resource_type" TypeId: 4608 } Columns { Id: 3 Name: "resource_id" TypeId: 4608 DataAccessorConstructor { ClassName: "SPARSED" } } Columns { Id: 4 Name: "uid" TypeId: 4608 StorageId: "__MEMORY" } Columns { Id: 5 Name: "level" TypeId: 1 } Columns { Id: 6 Name: "message" TypeId: 4608 StorageId: "__MEMORY" } Columns { Id: 7 Name: "json_payload" TypeId: 4610 } Columns { Id: 8 Name: "ingested_at" TypeId: 50 } Columns { Id: 9 Name: "saved_at" TypeId: 50 } Columns { Id: 10 Name: "request_id" TypeId: 4608 } KeyColumnNames: "k0" KeyColumnNames: "resource_type" KeyColumnNames: "resource_id" KeyColumnNames: "uid" Indexes { Id: 1004 Name: "MAX::INDEX::level" StorageId: "__LOCAL_METADATA" ClassName: "MAX" MaxIndex { ColumnId: 5 } } Indexes { Id: 1007 Name: "MAX::INDEX::ingested_at" StorageId: "__LOCAL_METADATA" ClassName: "MAX" MaxIndex { ColumnId: 8 } } Indexes { Id: 1008 Name: "MAX::INDEX::saved_at" StorageId: "__LOCAL_METADATA" ClassName: "MAX" MaxIndex { ColumnId: 9 } } } } TtlSettings { Version: 1 } } } } 2025-06-25T14:59:43.684174Z node 1 :TX_COLUMNSHARD_TX ERROR: log.cpp:784: tablet_id=9437184;self_id=[1:320:2328];ev=NKikimr::TEvColumnShard::TEvProposeTransaction;tablet_id=9437184;tx_id=122;this=88923004934624;method=TTxController::StartProposeOnExecute;tx_info=122:TX_KIND_SCHEMA;min=1750863583284;max=18446744073709551615;plan=0;src=[1:155:2177];cookie=022:0;;fline=tx_controller.cpp:364;error=problem on start;message=Invalid schema: Column errors: key column k0 has unsupported type NKikimrSchemeOp.TOlapColumnDescription; 2025-06-25T14:59:43.696576Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;request_tx=122:TX_KIND_SCHEMA;min=1750863583284;max=18446744073709551615;plan=0;src=[1:155:2177];cookie=022:0;;this=88923004934624;op_tx=122:TX_KIND_SCHEMA;min=1750863583284;max=18446744073709551615;plan=0;src=[1:155:2177];cookie=022:0;;fline=propose_tx.cpp:11;event=scheme_shard_tablet_not_initialized;source=[1:155:2177]; 2025-06-25T14:59:43.696650Z node 1 :TX_COLUMNSHARD ERROR: log.cpp:784: tablet_id=9437184;request_tx=122:TX_KIND_SCHEMA;min=1750863583284;max=18446744073709551615;plan=0;src=[1:155:2177];cookie=022:0;;this=88923004934624;op_tx=122:TX_KIND_SCHEMA;min=1750863583284;max=18446744073709551615;plan=0;src=[1:155:2177];cookie=022:0;;fline=propose_tx.cpp:23;message=Invalid schema: Column errors: key column k0 has unsupported type NKikimrSchemeOp.TOlapColumnDescription;tablet_id=9437184;tx_id=122; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest >> TColumnShardTestSchema::CreateTable-Reboots-GenerateInternalPathId [GOOD] Test command err: 2025-06-25T14:59:42.226003Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:99;event=initialize_shard;step=OnActivateExecutor; 2025-06-25T14:59:42.254062Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:117;event=initialize_shard;step=initialize_tiring_finished; 2025-06-25T14:59:42.254289Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-25T14:59:42.260740Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:59:42.260967Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:59:42.261181Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:59:42.261306Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:59:42.261408Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:59:42.261522Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:59:42.261626Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:59:42.261727Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:59:42.261833Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:59:42.261953Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:59:42.262059Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:59:42.286547Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-25T14:59:42.286692Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-25T14:59:42.286745Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-25T14:59:42.286892Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T14:59:42.287036Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-25T14:59:42.287141Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-25T14:59:42.287188Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-25T14:59:42.287285Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-25T14:59:42.287341Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-25T14:59:42.287379Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-25T14:59:42.287408Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-25T14:59:42.287576Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T14:59:42.287635Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-25T14:59:42.287674Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-25T14:59:42.287698Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-25T14:59:42.287796Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-25T14:59:42.287841Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-25T14:59:42.287901Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-25T14:59:42.287934Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-25T14:59:42.287991Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-25T14:59:42.288025Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-25T14:59:42.288070Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-25T14:59:42.288280Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-25T14:59:42.288351Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-25T14:59:42.288390Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-25T14:59:42.288635Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-25T14:59:42.288683Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-25T14:59:42.288711Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-25T14:59:42.288852Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-25T14:59:42.288893Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-25T14:59:42.288920Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-25T14:59:42.288988Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-25T14:59:42.289044Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-25T14:59:42.289100Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-25T14:59:42.289144Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-25T14:59:42.289348Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=37; 2025-06-25T14:59:42.289435Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=40; 2025-06-25T14:59:42.289508Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=31; 2025-06-25T14:59:42.289582Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=34; 2025-06-25T14:59:42.290438Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-25T14:59:42.290526Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-25T14:59:42.290571Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-25T14:59:42.290618Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: table ... et_id=9437184;tx_id=119;this=88923004880192;method=TTxController::StartProposeOnExecute;tx_info=119:TX_KIND_SCHEMA;min=1750863583223;max=18446744073709551615;plan=0;src=[1:155:2177];cookie=019:0;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=20;result=not_found; 2025-06-25T14:59:43.442100Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::TEvColumnShard::TEvProposeTransaction;tablet_id=9437184;tx_id=119;this=88923004880192;method=TTxController::StartProposeOnExecute;tx_info=119:TX_KIND_SCHEMA;min=1750863583223;max=18446744073709551615;plan=0;src=[1:155:2177];cookie=019:0;;fline=schema.h:38;event=sync_schema; 2025-06-25T14:59:43.455074Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;request_tx=119:TX_KIND_SCHEMA;min=1750863583223;max=18446744073709551615;plan=0;src=[1:155:2177];cookie=019:0;;this=88923004880192;op_tx=119:TX_KIND_SCHEMA;min=1750863583223;max=18446744073709551615;plan=0;src=[1:155:2177];cookie=019:0;;int_op_tx=119:TX_KIND_SCHEMA;min=1750863583223;max=18446744073709551615;plan=0;src=[1:155:2177];cookie=019:0;;int_this=89129165838976;fline=columnshard__propose_transaction.cpp:105;event=actual tx operator; 2025-06-25T14:59:43.455148Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;request_tx=119:TX_KIND_SCHEMA;min=1750863583223;max=18446744073709551615;plan=0;src=[1:155:2177];cookie=019:0;;this=88923004880192;op_tx=119:TX_KIND_SCHEMA;min=1750863583223;max=18446744073709551615;plan=0;src=[1:155:2177];cookie=019:0;;int_op_tx=119:TX_KIND_SCHEMA;min=1750863583223;max=18446744073709551615;plan=0;src=[1:155:2177];cookie=019:0;;int_this=89129165838976;method=TTxController::FinishProposeOnComplete;tx_id=119;fline=propose_tx.cpp:11;event=scheme_shard_tablet_not_initialized;source=[1:155:2177]; 2025-06-25T14:59:43.455208Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;request_tx=119:TX_KIND_SCHEMA;min=1750863583223;max=18446744073709551615;plan=0;src=[1:155:2177];cookie=019:0;;this=88923004880192;op_tx=119:TX_KIND_SCHEMA;min=1750863583223;max=18446744073709551615;plan=0;src=[1:155:2177];cookie=019:0;;int_op_tx=119:TX_KIND_SCHEMA;min=1750863583223;max=18446744073709551615;plan=0;src=[1:155:2177];cookie=019:0;;int_this=89129165838976;method=TTxController::FinishProposeOnComplete;tx_id=119;fline=propose_tx.cpp:32;message=;tablet_id=9437184;tx_id=119; 2025-06-25T14:59:43.455591Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TTxNotifyTxCompletion.Execute at tablet 9437184 2025-06-25T14:59:43.455751Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: PlanStep 1750863583223 at tablet 9437184, mediator 0 2025-06-25T14:59:43.455800Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxPlanStep[36] execute at tablet 9437184 2025-06-25T14:59:43.456097Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=119;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=20;result=not_found; 2025-06-25T14:59:43.456173Z node 1 :TX_COLUMNSHARD INFO: ctor_logger.h:56: EnsureTable for pathId: {internal: 20, ss: 20} ttl settings: { Version: 1 } at tablet 9437184 2025-06-25T14:59:43.456257Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=119;fline=tables_manager.cpp:304;method=RegisterTable;path_id=20; 2025-06-25T14:59:43.456354Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=119;fline=column_engine.h:144;event=RegisterTable;path_id=20; 2025-06-25T14:59:43.456825Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=119;fline=column_engine_logs.cpp:463;event=OnTieringModified;path_id=20; 2025-06-25T14:59:43.456953Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=119;fline=tx_controller.cpp:215;event=finished_tx;tx_id=119; 2025-06-25T14:59:43.469420Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxPlanStep[36] complete at tablet 9437184 CreateTable: { SeqNo { Generation: 20 } EnsureTables { Tables { PathId: 21 SchemaPreset { Id: 1 Name: "default" Schema { Columns { Id: 1 Name: "k0" TypeId: 4609 } Columns { Id: 2 Name: "resource_type" TypeId: 4608 } Columns { Id: 3 Name: "resource_id" TypeId: 4608 DataAccessorConstructor { ClassName: "SPARSED" } } Columns { Id: 4 Name: "uid" TypeId: 4608 StorageId: "__MEMORY" } Columns { Id: 5 Name: "level" TypeId: 1 } Columns { Id: 6 Name: "message" TypeId: 4608 StorageId: "__MEMORY" } Columns { Id: 7 Name: "json_payload" TypeId: 4610 } Columns { Id: 8 Name: "ingested_at" TypeId: 50 } Columns { Id: 9 Name: "saved_at" TypeId: 50 } Columns { Id: 10 Name: "request_id" TypeId: 4608 } KeyColumnNames: "k0" KeyColumnNames: "resource_type" KeyColumnNames: "resource_id" KeyColumnNames: "uid" Indexes { Id: 1004 Name: "MAX::INDEX::level" StorageId: "__LOCAL_METADATA" ClassName: "MAX" MaxIndex { ColumnId: 5 } } Indexes { Id: 1007 Name: "MAX::INDEX::ingested_at" StorageId: "__LOCAL_METADATA" ClassName: "MAX" MaxIndex { ColumnId: 8 } } Indexes { Id: 1008 Name: "MAX::INDEX::saved_at" StorageId: "__LOCAL_METADATA" ClassName: "MAX" MaxIndex { ColumnId: 9 } } } } TtlSettings { Version: 1 } } } } 2025-06-25T14:59:43.471064Z node 1 :TX_COLUMNSHARD_TX ERROR: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::TEvColumnShard::TEvProposeTransaction;tablet_id=9437184;tx_id=120;this=88923004882880;method=TTxController::StartProposeOnExecute;tx_info=120:TX_KIND_SCHEMA;min=1750863583226;max=18446744073709551615;plan=0;src=[1:155:2177];cookie=020:0;;fline=tx_controller.cpp:364;error=problem on start;message=Invalid schema: Column errors: key column k0 has unsupported type NKikimrSchemeOp.TOlapColumnDescription; 2025-06-25T14:59:43.483256Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;request_tx=120:TX_KIND_SCHEMA;min=1750863583226;max=18446744073709551615;plan=0;src=[1:155:2177];cookie=020:0;;this=88923004882880;op_tx=120:TX_KIND_SCHEMA;min=1750863583226;max=18446744073709551615;plan=0;src=[1:155:2177];cookie=020:0;;fline=propose_tx.cpp:11;event=scheme_shard_tablet_not_initialized;source=[1:155:2177]; 2025-06-25T14:59:43.483360Z node 1 :TX_COLUMNSHARD ERROR: log.cpp:784: tablet_id=9437184;request_tx=120:TX_KIND_SCHEMA;min=1750863583226;max=18446744073709551615;plan=0;src=[1:155:2177];cookie=020:0;;this=88923004882880;op_tx=120:TX_KIND_SCHEMA;min=1750863583226;max=18446744073709551615;plan=0;src=[1:155:2177];cookie=020:0;;fline=propose_tx.cpp:23;message=Invalid schema: Column errors: key column k0 has unsupported type NKikimrSchemeOp.TOlapColumnDescription;tablet_id=9437184;tx_id=120; CreateTable: { SeqNo { Generation: 21 } EnsureTables { Tables { PathId: 22 SchemaPreset { Id: 1 Name: "default" Schema { Columns { Id: 1 Name: "k0" TypeId: 4610 } Columns { Id: 2 Name: "resource_type" TypeId: 4608 } Columns { Id: 3 Name: "resource_id" TypeId: 4608 DataAccessorConstructor { ClassName: "SPARSED" } } Columns { Id: 4 Name: "uid" TypeId: 4608 StorageId: "__MEMORY" } Columns { Id: 5 Name: "level" TypeId: 1 } Columns { Id: 6 Name: "message" TypeId: 4608 StorageId: "__MEMORY" } Columns { Id: 7 Name: "json_payload" TypeId: 4610 } Columns { Id: 8 Name: "ingested_at" TypeId: 50 } Columns { Id: 9 Name: "saved_at" TypeId: 50 } Columns { Id: 10 Name: "request_id" TypeId: 4608 } KeyColumnNames: "k0" KeyColumnNames: "resource_type" KeyColumnNames: "resource_id" KeyColumnNames: "uid" Indexes { Id: 1004 Name: "MAX::INDEX::level" StorageId: "__LOCAL_METADATA" ClassName: "MAX" MaxIndex { ColumnId: 5 } } Indexes { Id: 1007 Name: "MAX::INDEX::ingested_at" StorageId: "__LOCAL_METADATA" ClassName: "MAX" MaxIndex { ColumnId: 8 } } Indexes { Id: 1008 Name: "MAX::INDEX::saved_at" StorageId: "__LOCAL_METADATA" ClassName: "MAX" MaxIndex { ColumnId: 9 } } } } TtlSettings { Version: 1 } } } } 2025-06-25T14:59:43.484777Z node 1 :TX_COLUMNSHARD_TX ERROR: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::TEvColumnShard::TEvProposeTransaction;tablet_id=9437184;tx_id=121;this=88923004884672;method=TTxController::StartProposeOnExecute;tx_info=121:TX_KIND_SCHEMA;min=1750863583227;max=18446744073709551615;plan=0;src=[1:155:2177];cookie=021:0;;fline=tx_controller.cpp:364;error=problem on start;message=Invalid schema: Column errors: key column k0 has unsupported type NKikimrSchemeOp.TOlapColumnDescription; 2025-06-25T14:59:43.497263Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;request_tx=121:TX_KIND_SCHEMA;min=1750863583227;max=18446744073709551615;plan=0;src=[1:155:2177];cookie=021:0;;this=88923004884672;op_tx=121:TX_KIND_SCHEMA;min=1750863583227;max=18446744073709551615;plan=0;src=[1:155:2177];cookie=021:0;;fline=propose_tx.cpp:11;event=scheme_shard_tablet_not_initialized;source=[1:155:2177]; 2025-06-25T14:59:43.497359Z node 1 :TX_COLUMNSHARD ERROR: log.cpp:784: tablet_id=9437184;request_tx=121:TX_KIND_SCHEMA;min=1750863583227;max=18446744073709551615;plan=0;src=[1:155:2177];cookie=021:0;;this=88923004884672;op_tx=121:TX_KIND_SCHEMA;min=1750863583227;max=18446744073709551615;plan=0;src=[1:155:2177];cookie=021:0;;fline=propose_tx.cpp:23;message=Invalid schema: Column errors: key column k0 has unsupported type NKikimrSchemeOp.TOlapColumnDescription;tablet_id=9437184;tx_id=121; CreateTable: { SeqNo { Generation: 22 } EnsureTables { Tables { PathId: 23 SchemaPreset { Id: 1 Name: "default" Schema { Columns { Id: 1 Name: "k0" TypeId: 4612 } Columns { Id: 2 Name: "resource_type" TypeId: 4608 } Columns { Id: 3 Name: "resource_id" TypeId: 4608 DataAccessorConstructor { ClassName: "SPARSED" } } Columns { Id: 4 Name: "uid" TypeId: 4608 StorageId: "__MEMORY" } Columns { Id: 5 Name: "level" TypeId: 1 } Columns { Id: 6 Name: "message" TypeId: 4608 StorageId: "__MEMORY" } Columns { Id: 7 Name: "json_payload" TypeId: 4610 } Columns { Id: 8 Name: "ingested_at" TypeId: 50 } Columns { Id: 9 Name: "saved_at" TypeId: 50 } Columns { Id: 10 Name: "request_id" TypeId: 4608 } KeyColumnNames: "k0" KeyColumnNames: "resource_type" KeyColumnNames: "resource_id" KeyColumnNames: "uid" Indexes { Id: 1004 Name: "MAX::INDEX::level" StorageId: "__LOCAL_METADATA" ClassName: "MAX" MaxIndex { ColumnId: 5 } } Indexes { Id: 1007 Name: "MAX::INDEX::ingested_at" StorageId: "__LOCAL_METADATA" ClassName: "MAX" MaxIndex { ColumnId: 8 } } Indexes { Id: 1008 Name: "MAX::INDEX::saved_at" StorageId: "__LOCAL_METADATA" ClassName: "MAX" MaxIndex { ColumnId: 9 } } } } TtlSettings { Version: 1 } } } } 2025-06-25T14:59:43.498806Z node 1 :TX_COLUMNSHARD_TX ERROR: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::TEvColumnShard::TEvProposeTransaction;tablet_id=9437184;tx_id=122;this=88923004886464;method=TTxController::StartProposeOnExecute;tx_info=122:TX_KIND_SCHEMA;min=1750863583229;max=18446744073709551615;plan=0;src=[1:155:2177];cookie=022:0;;fline=tx_controller.cpp:364;error=problem on start;message=Invalid schema: Column errors: key column k0 has unsupported type NKikimrSchemeOp.TOlapColumnDescription; 2025-06-25T14:59:43.511128Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;request_tx=122:TX_KIND_SCHEMA;min=1750863583229;max=18446744073709551615;plan=0;src=[1:155:2177];cookie=022:0;;this=88923004886464;op_tx=122:TX_KIND_SCHEMA;min=1750863583229;max=18446744073709551615;plan=0;src=[1:155:2177];cookie=022:0;;fline=propose_tx.cpp:11;event=scheme_shard_tablet_not_initialized;source=[1:155:2177]; 2025-06-25T14:59:43.511200Z node 1 :TX_COLUMNSHARD ERROR: log.cpp:784: tablet_id=9437184;request_tx=122:TX_KIND_SCHEMA;min=1750863583229;max=18446744073709551615;plan=0;src=[1:155:2177];cookie=022:0;;this=88923004886464;op_tx=122:TX_KIND_SCHEMA;min=1750863583229;max=18446744073709551615;plan=0;src=[1:155:2177];cookie=022:0;;fline=propose_tx.cpp:23;message=Invalid schema: Column errors: key column k0 has unsupported type NKikimrSchemeOp.TOlapColumnDescription;tablet_id=9437184;tx_id=122; >> PersQueueSdkReadSessionTest::SettingsValidation [GOOD] >> PersQueueSdkReadSessionTest::SpecifyClustersExplicitly >> EncryptedBackupParamsValidationTestFeatureDisabled::CommonSourcePrefixSpecified [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest >> MoveTable::WithUncomittedData [GOOD] Test command err: 2025-06-25T14:59:42.226040Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:99;event=initialize_shard;step=OnActivateExecutor; 2025-06-25T14:59:42.251377Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:117;event=initialize_shard;step=initialize_tiring_finished; 2025-06-25T14:59:42.251591Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-25T14:59:42.258833Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:59:42.259026Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:59:42.259223Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:59:42.259371Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:59:42.259455Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:59:42.259521Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:59:42.259578Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:59:42.259665Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:59:42.259752Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:59:42.259836Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:59:42.259904Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:59:42.285306Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-25T14:59:42.285450Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-25T14:59:42.285506Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-25T14:59:42.285670Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T14:59:42.287094Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-25T14:59:42.287188Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-25T14:59:42.287240Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-25T14:59:42.287329Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-25T14:59:42.287382Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-25T14:59:42.287421Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-25T14:59:42.287452Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-25T14:59:42.287611Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T14:59:42.287664Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-25T14:59:42.287699Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-25T14:59:42.287729Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-25T14:59:42.287826Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-25T14:59:42.287883Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-25T14:59:42.287942Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-25T14:59:42.287974Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-25T14:59:42.288024Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-25T14:59:42.288069Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-25T14:59:42.288100Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-25T14:59:42.288355Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-25T14:59:42.288397Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-25T14:59:42.288422Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-25T14:59:42.288596Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-25T14:59:42.288641Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-25T14:59:42.288666Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-25T14:59:42.288766Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-25T14:59:42.288802Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-25T14:59:42.288841Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-25T14:59:42.288922Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-25T14:59:42.288985Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-25T14:59:42.289032Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-25T14:59:42.289069Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-25T14:59:42.289284Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=44; 2025-06-25T14:59:42.289358Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=37; 2025-06-25T14:59:42.289423Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=27; 2025-06-25T14:59:42.289510Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=46; 2025-06-25T14:59:42.290432Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-25T14:59:42.290526Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-25T14:59:42.290566Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-25T14:59:42.290612Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: table ... 4;self_id=[1:128:2158];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:448;problem=Background activities cannot be started: no index at tablet; 2025-06-25T14:59:42.870638Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::TEvColumnShard::TEvProposeTransaction;tablet_id=9437184;tx_id=10;this=88923004793280;method=TTxController::StartProposeOnExecute;tx_info=10:TX_KIND_SCHEMA;min=1750863583168;max=18446744073709551615;plan=0;src=[1:156:2178];cookie=00:0;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=1;result=not_found; 2025-06-25T14:59:42.870709Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::TEvColumnShard::TEvProposeTransaction;tablet_id=9437184;tx_id=10;this=88923004793280;method=TTxController::StartProposeOnExecute;tx_info=10:TX_KIND_SCHEMA;min=1750863583168;max=18446744073709551615;plan=0;src=[1:156:2178];cookie=00:0;;fline=schema.h:38;event=sync_schema; 2025-06-25T14:59:42.885079Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;request_tx=10:TX_KIND_SCHEMA;min=1750863583168;max=18446744073709551615;plan=0;src=[1:156:2178];cookie=00:0;;this=88923004793280;op_tx=10:TX_KIND_SCHEMA;min=1750863583168;max=18446744073709551615;plan=0;src=[1:156:2178];cookie=00:0;;int_op_tx=10:TX_KIND_SCHEMA;min=1750863583168;max=18446744073709551615;plan=0;src=[1:156:2178];cookie=00:0;;int_this=89129165578496;fline=columnshard__propose_transaction.cpp:105;event=actual tx operator; 2025-06-25T14:59:42.885176Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;request_tx=10:TX_KIND_SCHEMA;min=1750863583168;max=18446744073709551615;plan=0;src=[1:156:2178];cookie=00:0;;this=88923004793280;op_tx=10:TX_KIND_SCHEMA;min=1750863583168;max=18446744073709551615;plan=0;src=[1:156:2178];cookie=00:0;;int_op_tx=10:TX_KIND_SCHEMA;min=1750863583168;max=18446744073709551615;plan=0;src=[1:156:2178];cookie=00:0;;int_this=89129165578496;method=TTxController::FinishProposeOnComplete;tx_id=10;fline=propose_tx.cpp:11;event=scheme_shard_tablet_not_initialized;source=[1:156:2178]; 2025-06-25T14:59:42.885226Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;request_tx=10:TX_KIND_SCHEMA;min=1750863583168;max=18446744073709551615;plan=0;src=[1:156:2178];cookie=00:0;;this=88923004793280;op_tx=10:TX_KIND_SCHEMA;min=1750863583168;max=18446744073709551615;plan=0;src=[1:156:2178];cookie=00:0;;int_op_tx=10:TX_KIND_SCHEMA;min=1750863583168;max=18446744073709551615;plan=0;src=[1:156:2178];cookie=00:0;;int_this=89129165578496;method=TTxController::FinishProposeOnComplete;tx_id=10;fline=propose_tx.cpp:32;message=;tablet_id=9437184;tx_id=10; 2025-06-25T14:59:42.885556Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TTxNotifyTxCompletion.Execute at tablet 9437184 2025-06-25T14:59:42.885666Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: PlanStep 1750863583168 at tablet 9437184, mediator 0 2025-06-25T14:59:42.885711Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxPlanStep[2] execute at tablet 9437184 2025-06-25T14:59:42.886002Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=10;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=1;result=not_found; 2025-06-25T14:59:42.886102Z node 1 :TX_COLUMNSHARD INFO: ctor_logger.h:56: EnsureTable for pathId: {internal: 9438184000001, ss: 1} ttl settings: { Version: 1 } at tablet 9437184 2025-06-25T14:59:42.891318Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=10;fline=column_engine_logs.cpp:471;event=OnTieringModified;new_count_tierings=0; 2025-06-25T14:59:42.891443Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=10;fline=tables_manager.cpp:304;method=RegisterTable;path_id=9438184000001; 2025-06-25T14:59:42.891508Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=10;fline=column_engine.h:144;event=RegisterTable;path_id=9438184000001; 2025-06-25T14:59:42.897428Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=10;fline=column_engine_logs.cpp:463;event=OnTieringModified;path_id=9438184000001; 2025-06-25T14:59:42.897571Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=10;fline=tx_controller.cpp:215;event=finished_tx;tx_id=10; 2025-06-25T14:59:42.921382Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxPlanStep[2] complete at tablet 9437184 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6120;columns=10; 2025-06-25T14:59:42.925744Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=9437184;event=TEvWrite;fline=manager.cpp:210;event=register_operation;operation_id=1;last=1; 2025-06-25T14:59:42.925804Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=9437184;event=TEvWrite;fline=write_queue.cpp:14;writing_size=6120;operation_id=69c0378-51d511f0-a627b4e8-458824df;in_flight=1;size_in_flight=6120; 2025-06-25T14:59:42.983596Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=9437184;event=TEvWrite;scope=TBuildBatchesTask::DoExecute;tablet_id=9437184;parent_id=[1:128:2158];write_id=1;path_id={internal: 9438184000001, ss: 1};fline=write_actor.cpp:24;event=actor_created;tablet_id=9437184;debug=size=8392;count=1;actions=__DEFAULT,;waiting=1;; 2025-06-25T14:59:42.990313Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;fline=columnshard__write.cpp:105;writing_size=6120;event=data_write_finished;writing_id=69c0378-51d511f0-a627b4e8-458824df; 2025-06-25T14:59:42.990601Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=4;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=constructor_meta.cpp:51;memory_size=60;data_size=20;sum=60;count=1; 2025-06-25T14:59:42.990702Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=4;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=constructor_meta.cpp:71;memory_size=212;data_size=188;sum=212;count=2;size_of_meta=136; 2025-06-25T14:59:42.990808Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=4;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=constructor_portion.cpp:40;memory_size=284;data_size=260;sum=284;count=1;size_of_portion=208; 2025-06-25T14:59:42.991636Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager on execute at tablet 9437184 Save Batch GenStep: 2:1 Blob count: 1 2025-06-25T14:59:42.991818Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=4;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=manager.h:156;event=add_by_insert_id;id=2;operation_id=1; 2025-06-25T14:59:43.004576Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Save Batch GenStep: 2:1 Blob count: 1 2025-06-25T14:59:43.005901Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::TEvColumnShard::TEvProposeTransaction;tablet_id=9437184;tx_id=11;this=88923005132864;method=TTxController::StartProposeOnExecute;tx_info=11:TX_KIND_SCHEMA;min=1750863583174;max=18446744073709551615;plan=0;src=[1:103:2136];cookie=00:1;;fline=schema.cpp:134;propose_execute=move_table;src=1;dst=2; 2025-06-25T14:59:43.006052Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::TEvColumnShard::TEvProposeTransaction;tablet_id=9437184;tx_id=11;this=88923005132864;method=TTxController::StartProposeOnExecute;tx_info=11:TX_KIND_SCHEMA;min=1750863583174;max=18446744073709551615;plan=0;src=[1:103:2136];cookie=00:1;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-25T14:59:43.021356Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;request_tx=11:TX_KIND_SCHEMA;min=1750863583174;max=18446744073709551615;plan=0;src=[1:103:2136];cookie=00:1;;this=88923005132864;op_tx=11:TX_KIND_SCHEMA;min=1750863583174;max=18446744073709551615;plan=0;src=[1:103:2136];cookie=00:1;;int_op_tx=11:TX_KIND_SCHEMA;min=1750863583174;max=18446744073709551615;plan=0;src=[1:103:2136];cookie=00:1;;int_this=89129165632256;fline=columnshard__propose_transaction.cpp:105;event=actual tx operator; 2025-06-25T14:59:43.021463Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;request_tx=11:TX_KIND_SCHEMA;min=1750863583174;max=18446744073709551615;plan=0;src=[1:103:2136];cookie=00:1;;this=88923005132864;op_tx=11:TX_KIND_SCHEMA;min=1750863583174;max=18446744073709551615;plan=0;src=[1:103:2136];cookie=00:1;;int_op_tx=11:TX_KIND_SCHEMA;min=1750863583174;max=18446744073709551615;plan=0;src=[1:103:2136];cookie=00:1;;int_this=89129165632256;method=TTxController::FinishProposeOnComplete;tx_id=11;fline=propose_tx.cpp:11;event=scheme_shard_tablet_not_initialized;source=[1:103:2136]; 2025-06-25T14:59:43.021543Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;request_tx=11:TX_KIND_SCHEMA;min=1750863583174;max=18446744073709551615;plan=0;src=[1:103:2136];cookie=00:1;;this=88923005132864;op_tx=11:TX_KIND_SCHEMA;min=1750863583174;max=18446744073709551615;plan=0;src=[1:103:2136];cookie=00:1;;int_op_tx=11:TX_KIND_SCHEMA;min=1750863583174;max=18446744073709551615;plan=0;src=[1:103:2136];cookie=00:1;;int_this=89129165632256;method=TTxController::FinishProposeOnComplete;tx_id=11;fline=propose_tx.cpp:32;message=;tablet_id=9437184;tx_id=11; 2025-06-25T14:59:43.021942Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TTxNotifyTxCompletion.Execute at tablet 9437184 2025-06-25T14:59:43.022112Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: PlanStep 1750863583174 at tablet 9437184, mediator 0 2025-06-25T14:59:43.022173Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxPlanStep[5] execute at tablet 9437184 2025-06-25T14:59:43.022495Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=11;event=move_table_progress;old_path_id=1;new_path_id=2;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=1;result=not_found; 2025-06-25T14:59:43.022569Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=11;event=move_table_progress;old_path_id=1;new_path_id=2;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-25T14:59:43.022812Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=11;fline=tx_controller.cpp:215;event=finished_tx;tx_id=11; 2025-06-25T14:59:43.035148Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxPlanStep[5] complete at tablet 9437184 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest >> TColumnShardTestSchema::DropWriteRace [GOOD] Test command err: 2025-06-25T14:59:42.226071Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:99;event=initialize_shard;step=OnActivateExecutor; 2025-06-25T14:59:42.253359Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:117;event=initialize_shard;step=initialize_tiring_finished; 2025-06-25T14:59:42.253598Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-25T14:59:42.259784Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:59:42.259978Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:59:42.260193Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:59:42.260330Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:59:42.260441Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:59:42.260549Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:59:42.260648Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:59:42.260749Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:59:42.260866Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:59:42.260962Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:59:42.261065Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:59:42.285940Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-25T14:59:42.286066Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-25T14:59:42.286124Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-25T14:59:42.286278Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T14:59:42.287087Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-25T14:59:42.287190Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-25T14:59:42.287234Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-25T14:59:42.287338Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-25T14:59:42.287398Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-25T14:59:42.287437Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-25T14:59:42.287466Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-25T14:59:42.287642Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T14:59:42.287700Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-25T14:59:42.287735Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-25T14:59:42.287761Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-25T14:59:42.287854Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-25T14:59:42.287908Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-25T14:59:42.287962Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-25T14:59:42.287993Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-25T14:59:42.288041Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-25T14:59:42.288081Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-25T14:59:42.288113Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-25T14:59:42.288361Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-25T14:59:42.288424Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-25T14:59:42.288454Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-25T14:59:42.288639Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-25T14:59:42.288687Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-25T14:59:42.288720Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-25T14:59:42.288876Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-25T14:59:42.288920Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-25T14:59:42.288948Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-25T14:59:42.289015Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-25T14:59:42.289072Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-25T14:59:42.289108Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-25T14:59:42.289146Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-25T14:59:42.289351Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=40; 2025-06-25T14:59:42.289435Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=40; 2025-06-25T14:59:42.289512Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=38; 2025-06-25T14:59:42.289609Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=35; 2025-06-25T14:59:42.290443Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-25T14:59:42.290532Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-25T14:59:42.290570Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-25T14:59:42.290613Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: table ... cannot be started: no index at tablet; 2025-06-25T14:59:42.874244Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::TEvColumnShard::TEvProposeTransaction;tablet_id=9437184;tx_id=101;this=88923004793280;method=TTxController::StartProposeOnExecute;tx_info=101:TX_KIND_SCHEMA;min=1750863583168;max=18446744073709551615;plan=0;src=[1:103:2136];cookie=00:0;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=1;result=not_found; 2025-06-25T14:59:42.874305Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::TEvColumnShard::TEvProposeTransaction;tablet_id=9437184;tx_id=101;this=88923004793280;method=TTxController::StartProposeOnExecute;tx_info=101:TX_KIND_SCHEMA;min=1750863583168;max=18446744073709551615;plan=0;src=[1:103:2136];cookie=00:0;;fline=schema.h:38;event=sync_schema; 2025-06-25T14:59:42.893176Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;request_tx=101:TX_KIND_SCHEMA;min=1750863583168;max=18446744073709551615;plan=0;src=[1:103:2136];cookie=00:0;;this=88923004793280;op_tx=101:TX_KIND_SCHEMA;min=1750863583168;max=18446744073709551615;plan=0;src=[1:103:2136];cookie=00:0;;int_op_tx=101:TX_KIND_SCHEMA;min=1750863583168;max=18446744073709551615;plan=0;src=[1:103:2136];cookie=00:0;;int_this=89129165578176;fline=columnshard__propose_transaction.cpp:105;event=actual tx operator; 2025-06-25T14:59:42.893282Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;request_tx=101:TX_KIND_SCHEMA;min=1750863583168;max=18446744073709551615;plan=0;src=[1:103:2136];cookie=00:0;;this=88923004793280;op_tx=101:TX_KIND_SCHEMA;min=1750863583168;max=18446744073709551615;plan=0;src=[1:103:2136];cookie=00:0;;int_op_tx=101:TX_KIND_SCHEMA;min=1750863583168;max=18446744073709551615;plan=0;src=[1:103:2136];cookie=00:0;;int_this=89129165578176;method=TTxController::FinishProposeOnComplete;tx_id=101;fline=propose_tx.cpp:11;event=scheme_shard_tablet_not_initialized;source=[1:103:2136]; 2025-06-25T14:59:42.893336Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;request_tx=101:TX_KIND_SCHEMA;min=1750863583168;max=18446744073709551615;plan=0;src=[1:103:2136];cookie=00:0;;this=88923004793280;op_tx=101:TX_KIND_SCHEMA;min=1750863583168;max=18446744073709551615;plan=0;src=[1:103:2136];cookie=00:0;;int_op_tx=101:TX_KIND_SCHEMA;min=1750863583168;max=18446744073709551615;plan=0;src=[1:103:2136];cookie=00:0;;int_this=89129165578176;method=TTxController::FinishProposeOnComplete;tx_id=101;fline=propose_tx.cpp:32;message=;tablet_id=9437184;tx_id=101; 2025-06-25T14:59:42.894048Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TTxNotifyTxCompletion.Execute at tablet 9437184 2025-06-25T14:59:42.894206Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: PlanStep 1750863583168 at tablet 9437184, mediator 0 2025-06-25T14:59:42.894254Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxPlanStep[2] execute at tablet 9437184 2025-06-25T14:59:42.894573Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=101;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=1;result=not_found; 2025-06-25T14:59:42.894654Z node 1 :TX_COLUMNSHARD INFO: ctor_logger.h:56: EnsureTable for pathId: {internal: 9438184000001, ss: 1} ttl settings: { Version: 1 } at tablet 9437184 2025-06-25T14:59:42.899691Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=101;fline=column_engine_logs.cpp:471;event=OnTieringModified;new_count_tierings=0; 2025-06-25T14:59:42.899818Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=101;fline=tables_manager.cpp:304;method=RegisterTable;path_id=9438184000001; 2025-06-25T14:59:42.899873Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=101;fline=column_engine.h:144;event=RegisterTable;path_id=9438184000001; 2025-06-25T14:59:42.905820Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=101;fline=column_engine_logs.cpp:463;event=OnTieringModified;path_id=9438184000001; 2025-06-25T14:59:42.905947Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=101;fline=tx_controller.cpp:215;event=finished_tx;tx_id=101; 2025-06-25T14:59:42.930742Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxPlanStep[2] complete at tablet 9437184 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6120;columns=10; 2025-06-25T14:59:42.935020Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=9437184;event=TEvWrite;fline=manager.cpp:210;event=register_operation;operation_id=1;last=1; 2025-06-25T14:59:42.935083Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=9437184;event=TEvWrite;fline=write_queue.cpp:14;writing_size=6120;operation_id=69d6d94-51d511f0-8a98dd01-a3d57b67;in_flight=1;size_in_flight=6120; 2025-06-25T14:59:42.983595Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=9437184;event=TEvWrite;scope=TBuildBatchesTask::DoExecute;tablet_id=9437184;parent_id=[1:128:2158];write_id=1;path_id={internal: 9438184000001, ss: 1};fline=write_actor.cpp:24;event=actor_created;tablet_id=9437184;debug=size=8392;count=1;actions=__DEFAULT,;waiting=1;; 2025-06-25T14:59:42.985526Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;fline=columnshard__write.cpp:105;writing_size=6120;event=data_write_finished;writing_id=69d6d94-51d511f0-8a98dd01-a3d57b67; 2025-06-25T14:59:42.985775Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=4;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=constructor_meta.cpp:51;memory_size=78;data_size=59;sum=78;count=1; 2025-06-25T14:59:42.986611Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=4;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=constructor_meta.cpp:71;memory_size=230;data_size=227;sum=230;count=2;size_of_meta=136; 2025-06-25T14:59:42.986781Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=4;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=constructor_portion.cpp:40;memory_size=302;data_size=299;sum=302;count=1;size_of_portion=208; 2025-06-25T14:59:42.989282Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager on execute at tablet 9437184 Save Batch GenStep: 2:1 Blob count: 1 2025-06-25T14:59:42.989484Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=4;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=manager.h:156;event=add_by_insert_id;id=2;operation_id=1; 2025-06-25T14:59:43.009291Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Save Batch GenStep: 2:1 Blob count: 1 2025-06-25T14:59:43.045798Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;request_tx=103:TX_KIND_SCHEMA;min=1750863583176;max=18446744073709551615;plan=0;src=[1:103:2136];cookie=00:2;;this=88923005133312;op_tx=103:TX_KIND_SCHEMA;min=1750863583176;max=18446744073709551615;plan=0;src=[1:103:2136];cookie=00:2;;int_op_tx=103:TX_KIND_SCHEMA;min=1750863583176;max=18446744073709551615;plan=0;src=[1:103:2136];cookie=00:2;;int_this=89129165638336;fline=columnshard__propose_transaction.cpp:105;event=actual tx operator; 2025-06-25T14:59:43.045900Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;request_tx=103:TX_KIND_SCHEMA;min=1750863583176;max=18446744073709551615;plan=0;src=[1:103:2136];cookie=00:2;;this=88923005133312;op_tx=103:TX_KIND_SCHEMA;min=1750863583176;max=18446744073709551615;plan=0;src=[1:103:2136];cookie=00:2;;int_op_tx=103:TX_KIND_SCHEMA;min=1750863583176;max=18446744073709551615;plan=0;src=[1:103:2136];cookie=00:2;;int_this=89129165638336;method=TTxController::FinishProposeOnComplete;tx_id=103;fline=propose_tx.cpp:11;event=scheme_shard_tablet_not_initialized;source=[1:103:2136]; 2025-06-25T14:59:43.045950Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;request_tx=103:TX_KIND_SCHEMA;min=1750863583176;max=18446744073709551615;plan=0;src=[1:103:2136];cookie=00:2;;this=88923005133312;op_tx=103:TX_KIND_SCHEMA;min=1750863583176;max=18446744073709551615;plan=0;src=[1:103:2136];cookie=00:2;;int_op_tx=103:TX_KIND_SCHEMA;min=1750863583176;max=18446744073709551615;plan=0;src=[1:103:2136];cookie=00:2;;int_this=89129165638336;method=TTxController::FinishProposeOnComplete;tx_id=103;fline=propose_tx.cpp:32;message=;tablet_id=9437184;tx_id=103; 2025-06-25T14:59:43.046249Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TTxNotifyTxCompletion.Execute at tablet 9437184 2025-06-25T14:59:43.046359Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: PlanStep 1750863583176 at tablet 9437184, mediator 0 2025-06-25T14:59:43.046410Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxPlanStep[6] execute at tablet 9437184 2025-06-25T14:59:43.046838Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: DropTable for pathId: {internal: 9438184000001, ss: 1} at tablet 9437184 2025-06-25T14:59:43.046913Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=103;fline=tx_controller.cpp:215;event=finished_tx;tx_id=103; 2025-06-25T14:59:43.061090Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxPlanStep[6] complete at tablet 9437184 2025-06-25T14:59:43.061453Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: PlanStep 1750863583177 at tablet 9437184, mediator 0 2025-06-25T14:59:43.061532Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxPlanStep[8] execute at tablet 9437184 2025-06-25T14:59:43.061793Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=102;fline=abstract.h:83;progress_tx_id=102;lock_id=1;broken=0; 2025-06-25T14:59:43.062257Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=102;fline=tx_controller.cpp:215;event=finished_tx;tx_id=102; 2025-06-25T14:59:43.081602Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxPlanStep[8] complete at tablet 9437184 2025-06-25T14:59:43.081715Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Complete;fline=abstract.h:93;progress_tx_id=102;lock_id=1;broken=0; 2025-06-25T14:59:43.081949Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Complete;commit_tx_id=102;commit_lock_id=1;fline=manager.cpp:177;event=remove_by_insert_id;id=2;operation_id=1; 2025-06-25T14:59:43.082002Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Complete;commit_tx_id=102;commit_lock_id=1;fline=manager.cpp:180;event=remove_operation;operation_id=1; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_login/unittest >> TSchemeShardLoginTest::CheckThatLockedOutParametersIsRestoredFromLocalDb [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:59:15.326365Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:59:15.326455Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:15.326492Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:59:15.326524Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:59:15.326557Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:59:15.326582Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:59:15.326692Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:15.326749Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:59:15.327327Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:59:15.328854Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:59:15.404330Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:59:15.404385Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:59:15.418957Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:59:15.419248Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:59:15.419416Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:59:15.424877Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:59:15.425089Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:59:15.426983Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:15.427243Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:59:15.434688Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:15.435637Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:59:15.442567Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:15.442637Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:15.442926Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:59:15.442997Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:59:15.443047Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:59:15.443145Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:59:15.448371Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:59:15.573529Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:59:15.573744Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:15.573933Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:59:15.573973Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:59:15.574181Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:59:15.574252Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:59:15.577070Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:15.577262Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:59:15.577424Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:15.577505Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:59:15.577553Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:59:15.577594Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:59:15.579188Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:15.579234Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:59:15.579267Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:59:15.580749Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:15.580785Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:15.580823Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:15.580868Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:59:15.584211Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:59:15.585777Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:59:15.585954Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:59:15.586779Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:15.586905Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:15.586959Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:15.587204Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:59:15.587263Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:15.587403Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:59:15.587528Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:59:15.589591Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:15.589631Z node 1 :FLAT_TX_SCHEMESHARD ... utes, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:41.048149Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1493: TTxInit for UserAttributesAlterData, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:41.048658Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1795: TTxInit for Tables, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:41.048741Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__root_data_erasure_manager.cpp:452: [RootDataErasureManager] Restore: Generation# 0, Status# 0, WakeupInterval# 604800 s, NumberDataErasureTenantsInRunning# 0 2025-06-25T14:59:41.049032Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2043: TTxInit for Columns, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:41.049126Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2103: TTxInit for ColumnsAlters, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:41.049221Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2161: TTxInit for Shards, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:41.049320Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2247: TTxInit for TablePartitions, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:41.049387Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2313: TTxInit for TableShardPartitionConfigs, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:41.049519Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2463: TTxInit for ChannelsBinding, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:41.049814Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2842: TTxInit for TableIndexes, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:41.049933Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2921: TTxInit for TableIndexKeys, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:41.050327Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3422: TTxInit for KesusInfos, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:41.050434Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3458: TTxInit for KesusAlters, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:41.050622Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3684: TTxInit for TxShards, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:41.050745Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3829: TTxInit for ShardToDelete, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:41.050852Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3846: TTxInit for BackupSettings, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:41.051094Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4006: TTxInit for ShardBackupStatus, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:41.051162Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4022: TTxInit for CompletedBackup, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:41.051278Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4307: TTxInit for Publications, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:41.051516Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4646: IndexBuild , records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:41.051599Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4702: KMeansTreeSample records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:41.051721Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4791: SnapshotTables: snapshots: 0 tables: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:41.051787Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4818: SnapshotSteps: snapshots: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:41.051853Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4845: LongLocks: records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:41.057813Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:59:41.059596Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:41.059669Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:41.060347Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:59:41.060405Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:59:41.060472Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:59:41.061202Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594046678944 is [5:377:2344] sender: [5:435:2058] recipient: [5:15:2062] 2025-06-25T14:59:41.114345Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:44: TTxLogin Execute at schemeshard: 72057594046678944 2025-06-25T14:59:41.114410Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:47: TTxLogin RotateKeys at schemeshard: 72057594046678944 2025-06-25T14:59:41.336847Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:86: TTxLogin Complete, result: Error: "User user1 login denied: too many failed password attempts", at schemeshard: 72057594046678944 2025-06-25T14:59:41.337032Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:41.337091Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 0, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:59:41.337345Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:41.337433Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [5:426:2382], at schemeshard: 72057594046678944, txId: 0, path id: 1 2025-06-25T14:59:41.338127Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 0 2025-06-25T14:59:43.340882Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:44: TTxLogin Execute at schemeshard: 72057594046678944 2025-06-25T14:59:43.363715Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:86: TTxLogin Complete, result: Token: "eyJhbGciOiJQUzI1NiIsImtpZCI6IjMifQ.eyJhdWQiOlsiXC9NeVJvb3QiXSwiZXhwIjoxNzUwOTA2NzgzLCJpYXQiOjE3NTA4NjM1ODMsInN1YiI6InVzZXIxIn0.GD5mM_DEv4VZeKnF1s6fBPTaYT_axfTiEn7ZZH6eqbO9wDmGkvXuCBw7qzD8qfxHJbVx9ZOV1euYebwmjGtrOe397IDNbJrPUQhNF-ZISvoAk_9tA1P7Y9r0RDZVX1JHK3N_YoDguoKGriE7jaomwFfFXlexep5GLu8aNFeGViMLBV2tH212r1xKlSuXw1evax5jFUTq4BX0uxGJjXYLZd1SA1K38SzW4zjqIPDjjwbxqIrTu1RVTD4XA7Iuw4wtuE1_Di9ZlKzBOMAF5OGbugP3f2W_k9RWDTcxEn69WZyjwvE-NuDUwVhnMzoXSZSNSgjTdBFoaUCWChpUaLuXXQ" SanitizedToken: "eyJhbGciOiJQUzI1NiIsImtpZCI6IjMifQ.eyJhdWQiOlsiXC9NeVJvb3QiXSwiZXhwIjoxNzUwOTA2NzgzLCJpYXQiOjE3NTA4NjM1ODMsInN1YiI6InVzZXIxIn0.**" IsAdmin: true, at schemeshard: 72057594046678944 2025-06-25T14:59:43.364352Z node 5 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T14:59:43.364580Z node 5 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 269us result status StatusSuccess 2025-06-25T14:59:43.365153Z node 5 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 4 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { PublicKeys { KeyId: 1 KeyDataPEM: "-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAvcqD4wlS7FshV6+8Nbwv\nELv9RvGWMzJfhOJ758IaRCCJsB7VW8WCf/D2zB16r6KORqnp5QQ+NVfrtWmrRCmQ\nVbjhbHsGpGXkC1aLt1Ly/i+fjNXkx0Ag+lBnpqO3j/W0YAvqNiE3hs4T2AYEdEw1\nL1YJrVRKqjnrVpHSqn5WiaAe+1tsNWjKYYd3W2suLaTCXQxiDnjA26zimr/+5Nda\nVYHhMpPPxblMugmHxgGniIsOn/dNYiqae7BLV/rgWB59f3KaJZyAhZXXHD12viW7\ndPdl9Ai10YeROdN2WzundOgcdbHLHo01OBzzOzpVTubHacONIjq3zXJtNlcYoIJp\nMwIDAQAB\n-----END PUBLIC KEY-----\n" ExpiresAt: 1750949978812 } PublicKeys { KeyId: 2 KeyDataPEM: "-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEApkhf8Wb14SqchRvcip6d\nm+lhxvCE90OS0rpsx3W5DdvPGdd79gQ2R71vB8zycw3fPtHi9K5fQJ4rh7yIj15w\nKv7z/S2a24yBdm6iz+42FVRaouyiOcX6kwPVjT5FJguCaS/upZ6BTAO2TxuuLzBv\nxk2G4E5qU/zHTIVMrr9qfo74/0vYF3nxA4ccS95GA3oWCtut/o3pA1hi8CgL5+bo\nesgz5nyJhdtFiPYCG1+MB1R6ZIb+OSRq9RmouZj6DhXohZ58nzjcqg+rCCuFbrqF\nWTY14FAR/m1fKuZSjDAyirsBM2z3qARI+9yOGUDgx7DQi0G30VGxahN1dJGSnRie\nNwIDAQAB\n-----END PUBLIC KEY-----\n" ExpiresAt: 1750949978984 } PublicKeys { KeyId: 3 KeyDataPEM: "-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA4ZbkfIemo0NijR7OZGPE\nqa1MLbZTj5dunaR3DvK69JAC5PsS39qPcxRrnAQEU7SSItQMpF+C/5DHBl3Xqevb\nua21YoM7CjXIpZP1VXTLjtHoLI9V6UHi6A6sY7/pNMDANtniofIxoDxB8gsfH30S\nT0nle0YZPcL/s4Ruwso7a3Nn3UxIGC05RckvaBkYN/9+zMTBXCNlPiuHmfDD6N+j\naMK9NLjPIywimAMhSWKtDXNnbmKOEULsXMmW29oJzaP4OE2oyCou4T8YHCRs51ek\n7RXEVwIIiirOmZG/n8khNJk/Ikumf9R2RJDSW9OT5BtthqlniXNnnnBbOSAxBS97\nwwIDAQAB\n-----END PUBLIC KEY-----\n" ExpiresAt: 1750949981333 } Sids { Name: "user1" Type: USER } Audience: "/MyRoot" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/opt/unittest >> KqpNamedExpressions::NamedExpressionRandomUpsertRevert+UseSink+UseDataQuery [GOOD] Test command err: Trying to start YDB, gRPC: 2719, MsgBus: 11780 2025-06-25T14:52:47.309094Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900044329944900:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:47.309163Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000a6c/r3tmp/tmpbp86cz/pdisk_1.dat 2025-06-25T14:52:47.799735Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 2719, node 1 2025-06-25T14:52:47.809272Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:52:47.809419Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:52:47.811254Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:52:47.846755Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:52:47.846777Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:52:47.846784Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:52:47.846942Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11780 TClient is connected to server localhost:11780 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-25T14:52:48.349475Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:52:48.391131Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:52:48.416015Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:48.554905Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:48.708635Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:48.784890Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:52:50.333504Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900057214848376:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:50.333622Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:50.618992Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:50.651312Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:50.725268Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:50.785604Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:50.826751Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:50.904196Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:50.981833Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:52:51.081960Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900061509816340:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:51.082034Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:51.082248Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900061509816345:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:52:51.086171Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:52:51.108599Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519900061509816347:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:52:51.211883Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519900061509816398:3420] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:52:52.316413Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519900044329944900:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:52.316475Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; [[[1u];["One"]];[[2u];["Two"]]] [[[1u];["One"]];[[2u];["Two"]]] Trying to start YDB, gRPC: 31545, MsgBus: 25119 2025-06-25T14:52:53.344781Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519900071726662104:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:52:53.344825Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000a6c/r3tmp/tmpp0F3Ot/pdisk_1.dat 2025-06-25T14:52:53.486683Z node 2 :IMPORT WARN: schemeshard_i ... 58228066:2080] 1750863574823201 != 1750863574823204 2025-06-25T14:59:35.002811Z node 42 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(42, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:59:35.002931Z node 42 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(42, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:59:35.004776Z node 42 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(42, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 62518, node 42 2025-06-25T14:59:35.040209Z node 42 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:59:35.040234Z node 42 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:59:35.040249Z node 42 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:59:35.040428Z node 42 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10214 TClient is connected to server localhost:10214 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:59:35.496772Z node 42 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:59:35.510808Z node 42 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:59:35.595840Z node 42 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:59:35.819757Z node 42 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:59:35.830070Z node 42 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:59:35.982427Z node 42 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:59:39.824085Z node 42 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[42:7519901792558228085:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:59:39.824194Z node 42 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:59:39.998720Z node 42 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [42:7519901814033066174:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:59:39.998865Z node 42 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:59:40.041731Z node 42 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:59:40.076342Z node 42 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:59:40.110788Z node 42 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:59:40.148513Z node 42 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:59:40.184563Z node 42 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:59:40.256593Z node 42 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:59:40.294324Z node 42 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:59:40.380430Z node 42 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [42:7519901818328034137:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:59:40.380553Z node 42 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:59:40.380662Z node 42 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [42:7519901818328034142:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:59:40.385650Z node 42 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:59:40.396961Z node 42 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [42:7519901818328034144:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:59:40.452225Z node 42 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [42:7519901818328034195:3424] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:59:42.015607Z node 42 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:59:42.060799Z node 42 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:59:42.096956Z node 42 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) [[["a6a881f1-92b4-4510-8692-222fbe9c2feb"]]] [[["a6a881f1-92b4-4510-8692-222fbe9c2feb"]]] >> TCmsTest::RequestReplaceManyDevicesOnOneNode [GOOD] >> BasicUsage::TSimpleWriteSession_AutoSeqNo_BasicUsage [GOOD] >> BasicUsage::TWriteSession_AutoBatching [GOOD] >> BasicUsage::TWriteSession_BatchingProducesContinueTokens [GOOD] >> BasicUsage::BrokenCredentialsProvider >> BackupRestoreS3::TestAllPrimitiveTypes-UINT16 [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-INT32 |91.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/cms/ut/unittest >> TCmsTest::RequestReplaceManyDevicesOnOneNode [GOOD] >> BackupRestore::TestAllPrimitiveTypes-INT16 [GOOD] >> BackupRestore::TestAllPrimitiveTypes-INT32 >> BackupRestore::RestoreIndexTableReadReplicasSettings [GOOD] >> BackupRestore::RestoreTableSplitBoundaries >> Compression::WriteGZIP [GOOD] >> Compression::WriteZSTD >> BasicUsage::WriteAndReadSomeMessagesWithSyncCompression [GOOD] >> BasicUsage::WriteAndReadSomeMessagesWithNoCompression >> BackupPathTest::EmptyDirectoryIsOk >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeView [GOOD] >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeTransfer [GOOD] >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeSysView [GOOD] >> CommonEncryptionRequirementsTest::CommonEncryptionRequirements >> BackupRestoreS3::TestAllPrimitiveTypes-UUID [GOOD] |91.0%| [TA] $(B)/ydb/core/tx/schemeshard/ut_login/test-results/unittest/{meta.json ... results_accumulator.log} >> BackupRestoreS3::RestoreTableSplitBoundaries [GOOD] >> BackupRestoreS3::RestoreIndexTableSplitBoundaries >> EncryptedBackupParamsValidationTestFeatureDisabled::SrcPrefixAndSrcPathSpecified |91.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest |91.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest |91.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest >> TColumnShardTestSchema::ExportAfterFail >> TColumnShardTestSchema::TTL+Reboot+Internal+FirstPkColumn >> KqpQueryService::TableSink_ReplaceFromSelectLargeOlap [GOOD] >> KqpQueryService::TableSink_ReplaceDuplicatesOlap >> TColumnShardTestSchema::TTL-Reboot+Internal-FirstPkColumn >> TColumnShardTestSchema::Drop-Reboots-GenerateInternalPathId >> MoveTable::EmptyTable >> TCmsTest::EmergencyDuringRollingRestart [GOOD] >> TSchemeshardStatsBatchingTest::ShouldPersistByBatchTimeout [GOOD] >> ReadSessionImplTest::DataReceivedCallbackReal [GOOD] >> TColumnShardTestSchema::ExportWithLostAnswer >> MoveTable::EmptyTable [GOOD] >> ReadSessionImplTest::DataReceivedCallback >> TKesusTest::TestAcquireSemaphoreRebootTimeout [GOOD] >> BackupPathTest::EmptyDirectoryIsOk [GOOD] >> TColumnShardTestSchema::TTL-Reboot-Internal-FirstPkColumn >> TColumnShardTestSchema::Drop+Reboots+GenerateInternalPathId [GOOD] >> EncryptedBackupParamsValidationTestFeatureDisabled::SrcPrefixAndSrcPathSpecified [GOOD] >> BackupRestore::TestAllPrimitiveTypes-UINT16 [GOOD] >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeSequence [GOOD] >> TPartitionChooserSuite::TPartitionChooserActor_SplitMergeDisabled_RegisteredSourceId_Test [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-INT32 [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-UINT32 >> TKesusTest::TestAcquireSemaphoreViaDecrease >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeReplication >> TPartitionChooserSuite::TPartitionChooserActor_SplitMergeDisabled_PreferedPartition_Test >> BackupRestore::TestAllPrimitiveTypes-UINT32 >> TKesusTest::TestAcquireSemaphoreViaDecrease [GOOD] >> ReadSessionImplTest::DataReceivedCallback [GOOD] >> TColumnShardTestSchema::Drop-Reboots-GenerateInternalPathId [GOOD] >> BackupPathTest::CommonPrefixButExplicitImportItems |91.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/cms/ut/unittest >> TCmsTest::EmergencyDuringRollingRestart [GOOD] Test command err: 2025-06-25T14:59:30.127168Z node 10 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvGetConfigRequest { }, response# NKikimr::NCms::TEvCms::TEvGetConfigResponse { Status { Code: OK } Config { DefaultRetryTime: 300000000 DefaultPermissionDuration: 300000000 TenantLimits { DisabledNodesRatioLimit: 0 } ClusterLimits { DisabledNodesRatioLimit: 0 } InfoCollectionTimeout: 15000000 LogConfig { DefaultLevel: ENABLED TTL: 1209600000000 } SentinelConfig { Enable: true UpdateConfigInterval: 3600000000 RetryUpdateConfig: 60000000 UpdateStateInterval: 60000000 UpdateStateTimeout: 45000000 RetryChangeStatus: 10000000 ChangeStatusRetries: 5 DefaultStateLimit: 60 DataCenterRatio: 50 RoomRatio: 70 RackRatio: 90 DryRun: false EvictVDisksStatus: FAULTY GoodStateLimit: 5 FaultyPDisksThresholdPerNode: 0 } } } 2025-06-25T14:59:30.127523Z node 10 :CMS DEBUG: cms_tx_update_config.cpp:23: TTxUpdateConfig Execute 2025-06-25T14:59:30.167021Z node 10 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-25T14:59:30.167206Z node 10 :CMS DEBUG: cluster_info.cpp:968: Timestamp: 1970-01-01T00:02:00Z 2025-06-25T14:59:30.168985Z node 10 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvClusterStateRequest { }, response# NKikimr::NCms::TEvCms::TEvClusterStateResponse { Status { Code: OK } State { Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120026512 } Devices { Name: "vdisk-0-1-0-7-0" State: UP Timestamp: 120026512 } Devices { Name: "vdisk-1-1-0-7-0" State: UP Timestamp: 120026512 } Devices { Name: "vdisk-2-1-0-7-0" State: UP Timestamp: 120026512 } Devices { Name: "vdisk-3-1-0-7-0" State: UP Timestamp: 120026512 } Devices { Name: "pdisk-17-17" State: UP Timestamp: 120026512 } Timestamp: 120026512 NodeId: 17 InterconnectPort: 12008 Location { DataCenter: "1" Module: "8" Rack: "8" Unit: "8" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120026512 } Devices { Name: "vdisk-0-1-0-0-0" State: UP Timestamp: 120026512 } Devices { Name: "vdisk-1-1-0-0-0" State: UP Timestamp: 120026512 } Devices { Name: "vdisk-2-1-0-0-0" State: UP Timestamp: 120026512 } Devices { Name: "vdisk-3-1-0-0-0" State: UP Timestamp: 120026512 } Devices { Name: "pdisk-10-10" State: UP Timestamp: 120026512 } Timestamp: 120026512 NodeId: 10 InterconnectPort: 12001 Location { DataCenter: "1" Module: "1" Rack: "1" Unit: "1" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120026512 } Devices { Name: "vdisk-0-1-0-1-0" State: UP Timestamp: 120026512 } Devices { Name: "vdisk-1-1-0-1-0" State: UP Timestamp: 120026512 } Devices { Name: "vdisk-2-1-0-1-0" State: UP Timestamp: 120026512 } Devices { Name: "vdisk-3-1-0-1-0" State: UP Timestamp: 120026512 } Devices { Name: "pdisk-11-11" State: UP Timestamp: 120026512 } Timestamp: 120026512 NodeId: 11 InterconnectPort: 12002 Location { DataCenter: "1" Module: "2" Rack: "2" Unit: "2" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120026512 } Devices { Name: "vdisk-0-1-0-2-0" State: UP Timestamp: 120026512 } Devices { Name: "vdisk-1-1-0-2-0" State: UP Timestamp: 120026512 } Devices { Name: "vdisk-2-1-0-2-0" State: UP Timestamp: 120026512 } Devices { Name: "vdisk-3-1-0-2-0" State: UP Timestamp: 120026512 } Devices { Name: "pdisk-12-12" State: UP Timestamp: 120026512 } Timestamp: 120026512 NodeId: 12 InterconnectPort: 12003 Location { DataCenter: "1" Module: "3" Rack: "3" Unit: "3" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120026512 } Devices { Name: "vdisk-0-1-0-3-0" State: UP Timestamp: 120026512 } Devices { Name: "vdisk-1-1-0-3-0" State: UP Timestamp: 120026512 } Devices { Name: "vdisk-2-1-0-3-0" State: UP Timestamp: 120026512 } Devices { Name: "vdisk-3-1-0-3-0" State: UP Timestamp: 120026512 } Devices { Name: "pdisk-13-13" State: UP Timestamp: 120026512 } Timestamp: 120026512 NodeId: 13 InterconnectPort: 12004 Location { DataCenter: "1" Module: "4" Rack: "4" Unit: "4" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120026512 } Devices { Name: "vdisk-0-1-0-4-0" State: UP Timestamp: 120026512 } Devices { Name: "vdisk-1-1-0-4-0" State: UP Timestamp: 120026512 } Devices { Name: "vdisk-2-1-0-4-0" State: UP Timestamp: 120026512 } Devices { Name: "vdisk-3-1-0-4-0" State: UP Timestamp: 120026512 } Devices { Name: "pdisk-14-14" State: UP Timestamp: 120026512 } Timestamp: 120026512 NodeId: 14 InterconnectPort: 12005 Location { DataCenter: "1" Module: "5" Rack: "5" Unit: "5" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120026512 } Devices { Name: "vdisk-0-1-0-5-0" State: UP Timestamp: 120026512 } Devices { Name: "vdisk-1-1-0-5-0" State: UP Timestamp: 120026512 } Devices { Name: "vdisk-2-1-0-5-0" State: UP Timestamp: 120026512 } Devices { Name: "vdisk-3-1-0-5-0" State: UP Timestamp: 120026512 } Devices { Name: "pdisk-15-15" State: UP Timestamp: 120026512 } Timestamp: 120026512 NodeId: 15 InterconnectPort: 12006 Location { DataCenter: "1" Module: "6" Rack: "6" Unit: "6" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120026512 } Devices { Name: "vdisk-0-1-0-6-0" State: UP Timestamp: 120026512 } Devices { Name: "vdisk-1-1-0-6-0" State: UP Timestamp: 120026512 } Devices { Name: "vdisk-2-1-0-6-0" State: UP Timestamp: 120026512 } Devices { Name: "vdisk-3-1-0-6-0" State: UP Timestamp: 120026512 } Devices { Name: "pdisk-16-16" State: UP Timestamp: 120026512 } Timestamp: 120026512 NodeId: 16 InterconnectPort: 12007 Location { DataCenter: "1" Module: "7" Rack: "7" Unit: "7" } StartTimeSeconds: 0 } Timestamp: 120026512 } } 2025-06-25T14:59:30.170339Z node 10 :CMS DEBUG: sentinel.cpp:486: [Sentinel] [ConfigUpdater] Handle TEvCms::TEvClusterStateResponse: response# Status { Code: OK } State { Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120026512 } Devices { Name: "vdisk-0-1-0-7-0" State: UP Timestamp: 120026512 } Devices { Name: "vdisk-1-1-0-7-0" State: UP Timestamp: 120026512 } Devices { Name: "vdisk-2-1-0-7-0" State: UP Timestamp: 120026512 } Devices { Name: "vdisk-3-1-0-7-0" State: UP Timestamp: 120026512 } Devices { Name: "pdisk-17-17" State: UP Timestamp: 120026512 } Timestamp: 120026512 NodeId: 17 InterconnectPort: 12008 Location { DataCenter: "1" Module: "8" Rack: "8" Unit: "8" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120026512 } Devices { Name: "vdisk-0-1-0-0-0" State: UP Timestamp: 120026512 } Devices { Name: "vdisk-1-1-0-0-0" State: UP Timestamp: 120026512 } Devices { Name: "vdisk-2-1-0-0-0" State: UP Timestamp: 120026512 } Devices { Name: "vdisk-3-1-0-0-0" State: UP Timestamp: 120026512 } Devices { Name: "pdisk-10-10" State: UP Timestamp: 120026512 } Timestamp: 120026512 NodeId: 10 InterconnectPort: 12001 Location { DataCenter: "1" Module: "1" Rack: "1" Unit: "1" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120026512 } Devices { Name: "vdisk-0-1-0-1-0" State: UP Timestamp: 120026512 } Devices { Name: "vdisk-1-1-0-1-0" State: UP Timestamp: 120026512 } Devices { Name: "vdisk-2-1-0-1-0" State: UP Timestamp: 120026512 } Devices { Name: "vdisk-3-1-0-1-0" State: UP Timestamp: 120026512 } Devices { Name: "pdisk-11-11" State: UP Timestamp: 120026512 } Timestamp: 120026512 NodeId: 11 InterconnectPort: 12002 Location { DataCenter: "1" Module: "2" Rack: "2" Unit: "2" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120026512 } Devices { Name: "vdisk-0-1-0-2-0" State: UP Timestamp: 120026512 } Devices { Name: "vdisk-1-1-0-2-0" State: UP Timestamp: 120026512 } Devices { Name: "vdisk-2-1-0-2-0" State: UP Timestamp: 120026512 } Devices { Name: "vdisk-3-1-0-2-0" State: UP Timestamp: 120026512 } Devices { Name: "pdisk-12-12" State: UP Timestamp: 120026512 } Timestamp: 120026512 NodeId: 12 InterconnectPort: 12003 Location { DataCenter: "1" Module: "3" Rack: "3" Unit: "3" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120026512 } Devices { Name: "vdisk-0-1-0-3-0" State: UP Timestamp: 120026512 } Devices { Name: "vdisk-1-1-0-3-0" State: UP Timestamp: 120026512 } Devices { Name: "vdisk-2-1-0-3-0" State: UP Timestamp: 120026512 } Devices { Name: "vdisk-3-1-0-3-0" State: UP Timestamp: 120026512 } Devices { Name: "pdisk-13-13" State: UP Timestamp: 120026512 } Timestamp: 120026512 NodeId: 13 InterconnectPort: 12004 Location { DataCenter: "1" Module: "4" Rack: "4" Unit: "4" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120026512 } Devices { Name: "vdisk-0-1-0-4-0" State: UP Timestamp: 120026512 } Devices { Name: "vdisk-1-1-0-4-0" State: UP Timestamp: 120026512 } Devices { Name: "vdisk-2-1-0-4-0" State: UP Timestamp: 120026512 } Devices { Name: "vdisk-3-1-0-4-0" State: UP Timestamp: 120026512 } Devices { Name: "pdisk-14-14" State: UP Timestamp: 120026512 } Timestamp: 120026512 NodeId: 14 InterconnectPort: 12005 Location { DataCenter: "1" Module: "5" Rack: "5" Unit: "5" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120026512 } Devices { Name: "vdisk-0-1-0-5-0" State: UP Timestamp: 120026512 } Devices { Name: "vdisk-1-1-0-5-0" State: UP Timestamp: 120026512 } Devices { Name: "vdisk-2-1-0-5-0" State: UP Timestamp: 120026512 } Devices { Name: "vdisk-3-1-0-5-0" State: UP Timestamp: 120026512 } Devices { Name: "pdisk-15-15" State: UP Timestamp: 120026512 } Timestamp: 120026512 NodeId: 15 InterconnectPort: 12006 Location { DataCenter: "1" Module: "6" Rack: "6" Unit: "6" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120026512 } Devices { Name: "vdisk-0-1-0-6-0" State: UP Timestamp: 120026512 } Devices { Name: "vdisk-1-1-0-6-0" State: UP Timestamp: 120026512 } Devices { Name: "vdisk-2-1-0-6-0" State: UP Timestamp: 120026512 } Devices { Name: "vdisk-3-1-0-6-0" State: UP Timestamp: 120026512 } Devices { Name: "pdisk-16-16" State: UP Timestamp: 120026512 } Timestamp: 120026512 NodeId: 16 InterconnectPort: 12007 Location { DataCenter: "1" Module: "7" Rack: "7" Unit: "7" } StartTimeSeconds: 0 } Timestamp: 120026512 } 2025-06-25T14:59:30.170608Z node 10 :CMS DEBUG: sentinel.cpp:944: [Sentinel] [Main] Config was updated in 120.002512s 2025-06-25T14:59:30.170667Z node 10 :CMS DEBUG: sentinel.cpp:884: [Sentinel] [Main] Start StateUpdater 2025-06-25T14:59:30.170777Z node 10 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 10, wbId# [10:8388350642965737326:1634689637] 2025-06-25T14:59:30.170824Z node 10 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 11, wbId# [11:8388350642965737326:1634689637] 2025-06-25T14:59:30.170851Z node 10 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 12, wbId# [12:8388350642965737326:1634689637] 2025-06-25T14:59:30.170876Z node 10 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 13, wbId# [13:8388350642965737326:1634689637] 2025-06-25T14:59:30.170900Z node 10 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 14, wbId# [14:8388350642965737326:1634689637] 2025-06-25T14:59:30.170930Z node 10 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 15, wbId# ... de 10 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 17, wbId# [17:8388350642965737326:1634689637] 2025-06-25T14:59:40.300330Z node 10 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 10, response# PDiskStateInfo { PDiskId: 10 CreateTime: 0 ChangeTime: 0 Path: "/10/pdisk-10.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 240026 2025-06-25T14:59:40.300978Z node 10 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 15, response# PDiskStateInfo { PDiskId: 15 CreateTime: 0 ChangeTime: 0 Path: "/15/pdisk-15.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 240026 2025-06-25T14:59:40.301084Z node 10 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 16, response# PDiskStateInfo { PDiskId: 16 CreateTime: 0 ChangeTime: 0 Path: "/16/pdisk-16.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 240026 2025-06-25T14:59:40.301208Z node 10 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 17, response# PDiskStateInfo { PDiskId: 17 CreateTime: 0 ChangeTime: 0 Path: "/17/pdisk-17.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 240026 2025-06-25T14:59:40.301250Z node 10 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 11, response# PDiskStateInfo { PDiskId: 11 CreateTime: 0 ChangeTime: 0 Path: "/11/pdisk-11.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 240026 2025-06-25T14:59:40.301282Z node 10 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 12, response# PDiskStateInfo { PDiskId: 12 CreateTime: 0 ChangeTime: 0 Path: "/12/pdisk-12.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 240026 2025-06-25T14:59:40.301309Z node 10 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 13, response# PDiskStateInfo { PDiskId: 13 CreateTime: 0 ChangeTime: 0 Path: "/13/pdisk-13.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 240026 2025-06-25T14:59:40.301338Z node 10 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 14, response# PDiskStateInfo { PDiskId: 14 CreateTime: 0 ChangeTime: 0 Path: "/14/pdisk-14.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 240026 2025-06-25T14:59:40.301372Z node 10 :CMS DEBUG: sentinel.cpp:960: [Sentinel] [Main] State was updated in 0.000000s 2025-06-25T14:59:40.301594Z node 10 :CMS NOTICE: sentinel.cpp:1039: [Sentinel] [Main] PDisk status changed: pdiskId# 10:10, status# FAULTY, required status# ACTIVE, reason# PrevState# Normal State# Normal StateCounter# 3 StateLimit# 1, dry run# 0 2025-06-25T14:59:40.301659Z node 10 :CMS DEBUG: sentinel.cpp:1076: [Sentinel] [Main] Change pdisk status: requestsSize# 1 2025-06-25T14:59:40.301825Z node 10 :CMS DEBUG: cms_tx_log_and_send.cpp:19: TTxLogAndSend Execute 2025-06-25T14:59:40.302011Z node 10 :CMS DEBUG: sentinel.cpp:1202: [Sentinel] [Main] Handle TEvBlobStorage::TEvControllerConfigResponse: response# Status { Success: true } Success: true, cookie# 2 2025-06-25T14:59:40.302062Z node 10 :CMS NOTICE: sentinel.cpp:1226: [Sentinel] [Main] PDisk status has been changed: pdiskId# 10:10 2025-06-25T14:59:40.314629Z node 10 :CMS DEBUG: cms_tx_log_and_send.cpp:27: TTxLogAndSend Complete 2025-06-25T14:59:40.340588Z node 10 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-25T14:59:40.340682Z node 10 :CMS DEBUG: cms_tx_update_downtimes.cpp:26: TTxUpdateDowntimes Complete 2025-06-25T14:59:40.340746Z node 10 :CMS DEBUG: cluster_info.cpp:968: Timestamp: 1970-01-01T00:04:00Z 2025-06-25T14:59:40.341640Z node 10 :CMS INFO: cms.cpp:347: Check request: User: "user" Actions { Type: RESTART_SERVICES Host: "10" Services: "storage" Duration: 600000000 Issue { Type: GENERIC Message: "VDisks eviction from host 10 has not yet been completed" } } PartialPermissionAllowed: false Schedule: false Reason: "" TenantPolicy: DEFAULT AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: true 2025-06-25T14:59:40.341736Z node 10 :CMS DEBUG: cms.cpp:379: Checking action: Type: RESTART_SERVICES Host: "10" Services: "storage" Duration: 600000000 Issue { Type: GENERIC Message: "VDisks eviction from host 10 has not yet been completed" } 2025-06-25T14:59:40.341784Z node 10 :CMS DEBUG: cms.cpp:398: Result: ERROR (reason: Evict vdisks is disabled in Sentinel (self heal)) 2025-06-25T14:59:40.341908Z node 10 :CMS DEBUG: cms_tx_store_permissions.cpp:26: TTxStorePermissions Execute 2025-06-25T14:59:40.342045Z node 10 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store request: id# user-r-1, owner# user, order# 1, priority# 0, body# User: "user" PartialPermissionAllowed: false Schedule: false Reason: "" TenantPolicy: DEFAULT AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: true 2025-06-25T14:59:40.353988Z node 10 :CMS DEBUG: cms_tx_store_permissions.cpp:137: TTxStorePermissions complete 2025-06-25T14:59:40.354194Z node 10 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvCheckRequest { User: "user" RequestId: "user-r-1" DryRun: false AvailabilityMode: MODE_MAX_AVAILABILITY }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: ERROR Reason: "Evict vdisks is disabled in Sentinel (self heal)" } RequestId: "user-r-1" } 2025-06-25T14:59:40.354674Z node 10 :CMS DEBUG: cms_tx_update_config.cpp:23: TTxUpdateConfig Execute 2025-06-25T14:59:40.366716Z node 10 :CMS DEBUG: cms_tx_update_config.cpp:37: TTxUpdateConfig Complete 2025-06-25T14:59:40.366923Z node 10 :CMS DEBUG: cms_tx_update_config.cpp:44: Updated config: DefaultRetryTime: 300000000 DefaultPermissionDuration: 300000000 TenantLimits { DisabledNodesRatioLimit: 0 } ClusterLimits { DisabledNodesRatioLimit: 0 } InfoCollectionTimeout: 15000000 LogConfig { DefaultLevel: ENABLED TTL: 1209600000000 } SentinelConfig { Enable: true UpdateConfigInterval: 3600000000 RetryUpdateConfig: 60000000 UpdateStateInterval: 60000000 UpdateStateTimeout: 45000000 RetryChangeStatus: 10000000 ChangeStatusRetries: 5 DefaultStateLimit: 1 DataCenterRatio: 50 RoomRatio: 70 RackRatio: 90 DryRun: false EvictVDisksStatus: FAULTY GoodStateLimit: 5 FaultyPDisksThresholdPerNode: 0 } 2025-06-25T14:59:45.333789Z node 10 :CMS DEBUG: sentinel.cpp:955: [Sentinel] [Main] UpdateState 2025-06-25T14:59:45.333884Z node 10 :CMS DEBUG: sentinel.cpp:884: [Sentinel] [Main] Start StateUpdater 2025-06-25T14:59:45.334163Z node 10 :CMS DEBUG: cms.cpp:1153: Running CleanupWalleTasks 2025-06-25T14:59:45.334476Z node 10 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 10, wbId# [10:8388350642965737326:1634689637] 2025-06-25T14:59:45.334547Z node 10 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 11, wbId# [11:8388350642965737326:1634689637] 2025-06-25T14:59:45.334582Z node 10 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 12, wbId# [12:8388350642965737326:1634689637] 2025-06-25T14:59:45.334626Z node 10 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 13, wbId# [13:8388350642965737326:1634689637] 2025-06-25T14:59:45.334668Z node 10 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 14, wbId# [14:8388350642965737326:1634689637] 2025-06-25T14:59:45.334713Z node 10 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 15, wbId# [15:8388350642965737326:1634689637] 2025-06-25T14:59:45.334743Z node 10 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 16, wbId# [16:8388350642965737326:1634689637] 2025-06-25T14:59:45.334771Z node 10 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 17, wbId# [17:8388350642965737326:1634689637] 2025-06-25T14:59:45.335031Z node 10 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 10, response# PDiskStateInfo { PDiskId: 10 CreateTime: 0 ChangeTime: 0 Path: "/10/pdisk-10.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 300026 2025-06-25T14:59:45.335764Z node 10 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 13, response# PDiskStateInfo { PDiskId: 13 CreateTime: 0 ChangeTime: 0 Path: "/13/pdisk-13.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 300026 2025-06-25T14:59:45.335899Z node 10 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 14, response# PDiskStateInfo { PDiskId: 14 CreateTime: 0 ChangeTime: 0 Path: "/14/pdisk-14.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 300026 2025-06-25T14:59:45.335964Z node 10 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 15, response# PDiskStateInfo { PDiskId: 15 CreateTime: 0 ChangeTime: 0 Path: "/15/pdisk-15.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 300026 2025-06-25T14:59:45.336042Z node 10 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 16, response# PDiskStateInfo { PDiskId: 16 CreateTime: 0 ChangeTime: 0 Path: "/16/pdisk-16.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 300026 2025-06-25T14:59:45.336108Z node 10 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 17, response# PDiskStateInfo { PDiskId: 17 CreateTime: 0 ChangeTime: 0 Path: "/17/pdisk-17.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 300026 2025-06-25T14:59:45.336166Z node 10 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 11, response# PDiskStateInfo { PDiskId: 11 CreateTime: 0 ChangeTime: 0 Path: "/11/pdisk-11.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 300026 2025-06-25T14:59:45.336221Z node 10 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 12, response# PDiskStateInfo { PDiskId: 12 CreateTime: 0 ChangeTime: 0 Path: "/12/pdisk-12.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 300026 2025-06-25T14:59:45.336280Z node 10 :CMS DEBUG: sentinel.cpp:960: [Sentinel] [Main] State was updated in 0.000000s 2025-06-25T14:59:45.336597Z node 10 :CMS NOTICE: sentinel.cpp:1039: [Sentinel] [Main] PDisk status changed: pdiskId# 10:10, status# ACTIVE, required status# FAULTY, reason# Forced status, dry run# 0 2025-06-25T14:59:45.336693Z node 10 :CMS DEBUG: sentinel.cpp:1076: [Sentinel] [Main] Change pdisk status: requestsSize# 1 2025-06-25T14:59:45.336892Z node 10 :CMS DEBUG: cms_tx_log_and_send.cpp:19: TTxLogAndSend Execute 2025-06-25T14:59:45.337208Z node 10 :CMS DEBUG: sentinel.cpp:1202: [Sentinel] [Main] Handle TEvBlobStorage::TEvControllerConfigResponse: response# Status { Success: true } Success: true, cookie# 3 2025-06-25T14:59:45.337267Z node 10 :CMS NOTICE: sentinel.cpp:1226: [Sentinel] [Main] PDisk status has been changed: pdiskId# 10:10 |91.1%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_login/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest >> TColumnShardTestSchema::Drop+Reboots+GenerateInternalPathId [GOOD] Test command err: 2025-06-25T14:59:43.776828Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:99;event=initialize_shard;step=OnActivateExecutor; 2025-06-25T14:59:43.805713Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:117;event=initialize_shard;step=initialize_tiring_finished; 2025-06-25T14:59:43.806002Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-25T14:59:43.813075Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:59:43.813304Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:59:43.813535Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:59:43.813697Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:59:43.813813Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:59:43.813932Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:59:43.814042Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:59:43.814150Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:59:43.814263Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:59:43.814388Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:59:43.814507Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:59:43.841656Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-25T14:59:43.841853Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-25T14:59:43.841911Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-25T14:59:43.842114Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T14:59:43.842272Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-25T14:59:43.842362Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-25T14:59:43.842429Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-25T14:59:43.842522Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-25T14:59:43.842587Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-25T14:59:43.842634Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-25T14:59:43.842667Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-25T14:59:43.842842Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T14:59:43.842979Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-25T14:59:43.843024Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-25T14:59:43.843055Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-25T14:59:43.843190Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-25T14:59:43.843275Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-25T14:59:43.843335Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-25T14:59:43.843372Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-25T14:59:43.843422Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-25T14:59:43.843464Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-25T14:59:43.843494Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-25T14:59:43.843710Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-25T14:59:43.843757Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-25T14:59:43.843788Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-25T14:59:43.890932Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-25T14:59:43.891062Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-25T14:59:43.891104Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-25T14:59:43.891236Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-25T14:59:43.891284Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-25T14:59:43.891314Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-25T14:59:43.891395Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-25T14:59:43.891460Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-25T14:59:43.891502Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-25T14:59:43.891543Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-25T14:59:43.891766Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=37; 2025-06-25T14:59:43.891832Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=33; 2025-06-25T14:59:43.891924Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=41; 2025-06-25T14:59:43.892051Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=67; 2025-06-25T14:59:43.892144Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-25T14:59:43.892224Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-25T14:59:43.892257Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-25T14:59:43.892324Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: table ... columnshard.cpp:250;event=TEvPrivate::TEvPeriodicWakeup::MANUAL;tablet_id=9437184; 2025-06-25T14:59:52.066568Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:769:2764];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;fline=columnshard.cpp:239;event=TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184; 2025-06-25T14:59:52.066602Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Send periodic stats. 2025-06-25T14:59:52.066634Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Disabled periodic stats at tablet 9437184 2025-06-25T14:59:52.066671Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:769:2764];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:442;event=EnqueueBackgroundActivities;periodic=0; 2025-06-25T14:59:52.066745Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:769:2764];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=1; 2025-06-25T14:59:52.066804Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:769:2764];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750863285029;tx_id=18446744073709551615;;current_snapshot_ts=1750863584858; 2025-06-25T14:59:52.066854Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:769:2764];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=1;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-25T14:59:52.066909Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:769:2764];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:791;background=cleanup;skip_reason=no_changes; 2025-06-25T14:59:52.066943Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:769:2764];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:820;background=cleanup;skip_reason=no_changes; 2025-06-25T14:59:52.067018Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:769:2764];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;queue=ttl;external_count=0;fline=granule.cpp:168;event=skip_actualization;waiting=1.000000s; 2025-06-25T14:59:52.067066Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:769:2764];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:749;background=ttl;skip_reason=no_changes; 2025-06-25T14:59:52.159733Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: EvScan txId: 18446744073709551615 scanId: 0 version: {1750863585029:max} readable: {1750863585029:max} at tablet 9437184 2025-06-25T14:59:52.159882Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TTxScan prepare txId: 18446744073709551615 scanId: 0 at tablet 9437184 2025-06-25T14:59:52.161725Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:769:2764];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750863585029:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=program.cpp:33;event=parse_program;program=Command { Projection { Columns { Id: 1 } } } ; 2025-06-25T14:59:52.161817Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:769:2764];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750863585029:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=program.cpp:102;parse_proto_program=Command { Projection { Columns { Id: 1 } } } ; 2025-06-25T14:59:52.162392Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:769:2764];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750863585029:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=program.cpp:51;event=program_parsed;result={"edges":[{"owner_id":0,"inputs":[{"from":2}]},{"owner_id":1,"inputs":[{"from":3}]},{"owner_id":2,"inputs":[{"from":1}]},{"owner_id":3,"inputs":[]}],"nodes":{"1":{"p":{"i":"0","p":{"data":[{"name":"timestamp","id":1}]},"o":"1","t":"FetchOriginalData"},"w":2,"id":1},"3":{"p":{"p":{"data":[{"name":"timestamp","id":1}]},"o":"0","t":"ReserveMemory"},"w":0,"id":3},"2":{"p":{"i":"1","p":{"address":{"name":"timestamp","id":1}},"o":"1","t":"AssembleOriginalData"},"w":7,"id":2},"0":{"p":{"i":"1","t":"Projection"},"w":7,"id":0}}}; 2025-06-25T14:59:52.162528Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:769:2764];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750863585029:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=read_metadata.h:142;filter_limit_not_detected= range{ from {+Inf} to {-Inf}}; 2025-06-25T14:59:52.165664Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:769:2764];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750863585029:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=tx_scan.cpp:172;event=TTxScan started;actor_id=[1:825:2811];trace_detailed=; 2025-06-25T14:59:52.166764Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: fline=context.cpp:84;ff_first=(column_ids=1;column_names=timestamp;);; 2025-06-25T14:59:52.166963Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: fline=context.cpp:99;columns_context_info=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;; 2025-06-25T14:59:52.167287Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:825:2811];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-25T14:59:52.167409Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:825:2811];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T14:59:52.167535Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:825:2811];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T14:59:52.167584Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: actor.cpp:414: Scan [1:825:2811] finished for tablet 9437184 2025-06-25T14:59:52.167971Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:825:2811];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:420;event=scan_finish;compute_actor_id=[1:818:2805];stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["l_bootstrap","f_ack","f_processing","f_ProduceResults","l_ProduceResults","f_Finish"],"t":0.001},{"events":["l_ack","l_processing","l_Finish"],"t":0.002}],"full":{"a":1750863592165594,"name":"_full_task","f":1750863592165594,"d_finished":0,"c":0,"l":1750863592167627,"d":2033},"events":[{"name":"bootstrap","f":1750863592165942,"d_finished":1169,"c":1,"l":1750863592167111,"d":1169},{"a":1750863592167266,"name":"ack","f":1750863592167266,"d_finished":0,"c":0,"l":1750863592167627,"d":361},{"a":1750863592167251,"name":"processing","f":1750863592167251,"d_finished":0,"c":0,"l":1750863592167627,"d":376},{"name":"ProduceResults","f":1750863592167093,"d_finished":276,"c":2,"l":1750863592167569,"d":276},{"a":1750863592167573,"name":"Finish","f":1750863592167573,"d_finished":0,"c":0,"l":1750863592167627,"d":54}],"id":"9437184::1"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T14:59:52.168039Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:825:2811];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:818:2805];bytes=0;rows=0;faults=0;finished=1;fault=0;schema=; 2025-06-25T14:59:52.168417Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:825:2811];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:375;event=scan_finished;compute_actor_id=[1:818:2805];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["l_bootstrap","f_ack","f_processing","f_ProduceResults","l_ProduceResults","f_Finish"],"t":0.001},{"events":["l_ack","l_processing","l_Finish"],"t":0.002}],"full":{"a":1750863592165594,"name":"_full_task","f":1750863592165594,"d_finished":0,"c":0,"l":1750863592168093,"d":2499},"events":[{"name":"bootstrap","f":1750863592165942,"d_finished":1169,"c":1,"l":1750863592167111,"d":1169},{"a":1750863592167266,"name":"ack","f":1750863592167266,"d_finished":0,"c":0,"l":1750863592168093,"d":827},{"a":1750863592167251,"name":"processing","f":1750863592167251,"d_finished":0,"c":0,"l":1750863592168093,"d":842},{"name":"ProduceResults","f":1750863592167093,"d_finished":276,"c":2,"l":1750863592167569,"d":276},{"a":1750863592167573,"name":"Finish","f":1750863592167573,"d_finished":0,"c":0,"l":1750863592168093,"d":520}],"id":"9437184::1"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T14:59:52.168494Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:825:2811];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-06-25T14:59:52.162490Z;index_granules=0;index_portions=0;index_batches=0;schema_columns=1;filter_columns=0;additional_columns=0;compacted_portions_bytes=0;inserted_portions_bytes=0;committed_portions_bytes=0;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=0;selected_rows=0; 2025-06-25T14:59:52.168556Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:825:2811];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=read_context.h:192;event=scan_aborted;reason=unexpected on destructor; 2025-06-25T14:59:52.168670Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:825:2811];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_stats/unittest >> TSchemeshardStatsBatchingTest::ShouldPersistByBatchTimeout [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T14:59:25.237000Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:59:25.237153Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:25.237215Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:59:25.237280Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:59:25.238677Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:59:25.238740Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:59:25.238826Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:59:25.238916Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:59:25.239838Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:59:25.241585Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:59:25.328957Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T14:59:25.329020Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:59:25.347142Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:59:25.347579Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:59:25.347753Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:59:25.353903Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:59:25.354263Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:59:25.354950Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:25.355259Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:59:25.358927Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:25.359116Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:59:25.360458Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:25.360558Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:25.360705Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:59:25.360759Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:59:25.360814Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:59:25.360902Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T14:59:25.367671Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T14:59:25.491941Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:59:25.492104Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:25.492250Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:59:25.492288Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:59:25.492472Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:59:25.492524Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:59:25.494217Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:25.494340Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:59:25.494468Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:25.494506Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:59:25.494534Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:59:25.494580Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:59:25.496134Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:25.496175Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:59:25.496203Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:59:25.497354Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:25.497385Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:59:25.497432Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:25.497469Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:59:25.499890Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:59:25.501197Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:59:25.501321Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:59:25.501957Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:25.502056Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T14:59:25.502092Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:25.502348Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T14:59:25.502400Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:59:25.502525Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T14:59:25.502614Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T14:59:25.503919Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:25.503950Z node 1 :FLAT_TX_SCHEMESHARD ... MESHARD NOTICE: schemeshard__init.cpp:2247: TTxInit for TablePartitions, read records: 1, at schemeshard: 72057594046678944 2025-06-25T14:59:49.367608Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2313: TTxInit for TableShardPartitionConfigs, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:49.367814Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2463: TTxInit for ChannelsBinding, read records: 3, at schemeshard: 72057594046678944 2025-06-25T14:59:49.368117Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2842: TTxInit for TableIndexes, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:49.368230Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2921: TTxInit for TableIndexKeys, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:49.368599Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3422: TTxInit for KesusInfos, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:49.368678Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3458: TTxInit for KesusAlters, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:49.368901Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3684: TTxInit for TxShards, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:49.368996Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3829: TTxInit for ShardToDelete, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:49.369087Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3846: TTxInit for BackupSettings, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:49.369272Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4006: TTxInit for ShardBackupStatus, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:49.369397Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4022: TTxInit for CompletedBackup, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:49.369548Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4307: TTxInit for Publications, read records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:49.369790Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4646: IndexBuild , records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:49.369880Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4702: KMeansTreeSample records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:49.370007Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4791: SnapshotTables: snapshots: 0 tables: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:49.370057Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4818: SnapshotSteps: snapshots: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:49.370105Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4845: LongLocks: records: 0, at schemeshard: 72057594046678944 2025-06-25T14:59:49.370344Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-06-25T14:59:49.380857Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-25T14:59:49.381048Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:59:49.383172Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 2146435083, Sender [1:1754:3674], Recipient [1:1754:3674]: NKikimr::NSchemeShard::TEvPrivate::TEvServerlessStorageBilling 2025-06-25T14:59:49.383233Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5018: StateWork, processing event TEvPrivate::TEvServerlessStorageBilling 2025-06-25T14:59:49.384823Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:59:49.384916Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:59:49.386033Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [1:1754:3674], Recipient [1:1754:3674]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:59:49.386098Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:59:49.386561Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:59:49.386629Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:59:49.386695Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:59:49.386739Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-25T14:59:49.388764Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 274399233, Sender [1:1792:3674], Recipient [1:1754:3674]: NKikimr::TEvTxAllocatorClient::TEvAllocateResult 2025-06-25T14:59:49.388840Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5109: StateWork, processing event TEvTxAllocatorClient::TEvAllocateResult 2025-06-25T14:59:49.388886Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594046678944 is [1:1754:3674] sender: [1:1812:2058] recipient: [1:15:2062] 2025-06-25T14:59:49.442823Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271122945, Sender [1:1811:3720], Recipient [1:1754:3674]: NKikimrSchemeOp.TDescribePath Path: "/MyRoot/Simple" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true } 2025-06-25T14:59:49.442914Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4967: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2025-06-25T14:59:49.443054Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Simple" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-25T14:59:49.443358Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Simple" took 295us result status StatusSuccess 2025-06-25T14:59:49.444824Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Simple" PathDescription { Self { Name: "Simple" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1001 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Simple" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 1 MaxPartitionsCount: 1 } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409546 } TableStats { DataSize: 27456 RowCount: 200 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 16717 Memory: 156728 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 1 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 27456 DataSize: 27456 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> EncryptedExportTest::EncryptedExportAndImport >> BackupRestore::TestAllPrimitiveTypes-INT32 [GOOD] >> BackupRestore::TestAllPrimitiveTypes-INT64 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest >> MoveTable::EmptyTable [GOOD] Test command err: 2025-06-25T14:59:49.415894Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:99;event=initialize_shard;step=OnActivateExecutor; 2025-06-25T14:59:49.444179Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:117;event=initialize_shard;step=initialize_tiring_finished; 2025-06-25T14:59:49.444482Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-25T14:59:49.451459Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:59:49.451673Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:59:49.451904Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:59:49.452023Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:59:49.452140Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:59:49.452249Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:59:49.452357Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:59:49.452472Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:59:49.452576Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:59:49.452684Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:59:49.452817Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:59:49.485294Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-25T14:59:49.485456Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-25T14:59:49.485522Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-25T14:59:49.485698Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T14:59:49.485853Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-25T14:59:49.485952Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-25T14:59:49.485998Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-25T14:59:49.486083Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-25T14:59:49.486140Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-25T14:59:49.486182Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-25T14:59:49.486220Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-25T14:59:49.486396Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T14:59:49.486458Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-25T14:59:49.486497Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-25T14:59:49.486527Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-25T14:59:49.486614Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-25T14:59:49.486669Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-25T14:59:49.486733Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-25T14:59:49.486791Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-25T14:59:49.486842Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-25T14:59:49.486886Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-25T14:59:49.486921Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-25T14:59:49.487139Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-25T14:59:49.487182Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-25T14:59:49.487213Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-25T14:59:49.487417Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-25T14:59:49.487480Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-25T14:59:49.487513Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-25T14:59:49.487628Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-25T14:59:49.487677Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-25T14:59:49.487713Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-25T14:59:49.487833Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-25T14:59:49.487906Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-25T14:59:49.487947Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-25T14:59:49.488006Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-25T14:59:49.488226Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=42; 2025-06-25T14:59:49.488336Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=49; 2025-06-25T14:59:49.488414Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=37; 2025-06-25T14:59:49.488499Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=43; 2025-06-25T14:59:49.488593Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-25T14:59:49.488688Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-25T14:59:49.488742Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-25T14:59:49.488798Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: table ... sLoadingTime=41; 2025-06-25T14:59:49.742142Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:sharing_sessionsLoadingTime=7; 2025-06-25T14:59:49.742269Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:sharing_sessionsLoadingTime=49; 2025-06-25T14:59:49.742318Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:in_flight_readsLoadingTime=10; 2025-06-25T14:59:49.742378Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:in_flight_readsLoadingTime=27; 2025-06-25T14:59:49.742429Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tiers_managerLoadingTime=21; 2025-06-25T14:59:49.742480Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tiers_managerLoadingTime=25; 2025-06-25T14:59:49.742518Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;fline=common_data.cpp:29;EXECUTE:composite_initLoadingTime=2890; 2025-06-25T14:59:49.742657Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];process=SwitchToWork;fline=columnshard.cpp:74;event=initialize_shard;step=SwitchToWork; 2025-06-25T14:59:49.742711Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];process=SwitchToWork;fline=columnshard.cpp:77;event=initialize_shard;step=SignalTabletActive; 2025-06-25T14:59:49.742811Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];process=SwitchToWork;fline=columnshard_impl.cpp:1331;event=OnTieringModified;path_id=NO_VALUE_OPTIONAL; 2025-06-25T14:59:49.743103Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:442;event=EnqueueBackgroundActivities;periodic=0; 2025-06-25T14:59:49.743154Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:448;problem=Background activities cannot be started: no index at tablet; 2025-06-25T14:59:49.743498Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NActors::TEvents::TEvWakeup;fline=columnshard.cpp:250;event=TEvPrivate::TEvPeriodicWakeup::MANUAL;tablet_id=9437184; 2025-06-25T14:59:49.743581Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;fline=columnshard.cpp:239;event=TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184; 2025-06-25T14:59:49.743608Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Send periodic stats. 2025-06-25T14:59:49.743632Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Disabled periodic stats at tablet 9437184 2025-06-25T14:59:49.743682Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:442;event=EnqueueBackgroundActivities;periodic=0; 2025-06-25T14:59:49.743725Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:448;problem=Background activities cannot be started: no index at tablet; 2025-06-25T14:59:50.033659Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::TEvColumnShard::TEvProposeTransaction;tablet_id=9437184;tx_id=10;this=88923004793280;method=TTxController::StartProposeOnExecute;tx_info=10:TX_KIND_SCHEMA;min=1750863590383;max=18446744073709551615;plan=0;src=[1:156:2178];cookie=00:0;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=1;result=not_found; 2025-06-25T14:59:50.033744Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::TEvColumnShard::TEvProposeTransaction;tablet_id=9437184;tx_id=10;this=88923004793280;method=TTxController::StartProposeOnExecute;tx_info=10:TX_KIND_SCHEMA;min=1750863590383;max=18446744073709551615;plan=0;src=[1:156:2178];cookie=00:0;;fline=schema.h:38;event=sync_schema; 2025-06-25T14:59:50.046245Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;request_tx=10:TX_KIND_SCHEMA;min=1750863590383;max=18446744073709551615;plan=0;src=[1:156:2178];cookie=00:0;;this=88923004793280;op_tx=10:TX_KIND_SCHEMA;min=1750863590383;max=18446744073709551615;plan=0;src=[1:156:2178];cookie=00:0;;int_op_tx=10:TX_KIND_SCHEMA;min=1750863590383;max=18446744073709551615;plan=0;src=[1:156:2178];cookie=00:0;;int_this=89129165578496;fline=columnshard__propose_transaction.cpp:105;event=actual tx operator; 2025-06-25T14:59:50.046360Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;request_tx=10:TX_KIND_SCHEMA;min=1750863590383;max=18446744073709551615;plan=0;src=[1:156:2178];cookie=00:0;;this=88923004793280;op_tx=10:TX_KIND_SCHEMA;min=1750863590383;max=18446744073709551615;plan=0;src=[1:156:2178];cookie=00:0;;int_op_tx=10:TX_KIND_SCHEMA;min=1750863590383;max=18446744073709551615;plan=0;src=[1:156:2178];cookie=00:0;;int_this=89129165578496;method=TTxController::FinishProposeOnComplete;tx_id=10;fline=propose_tx.cpp:11;event=scheme_shard_tablet_not_initialized;source=[1:156:2178]; 2025-06-25T14:59:50.046418Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;request_tx=10:TX_KIND_SCHEMA;min=1750863590383;max=18446744073709551615;plan=0;src=[1:156:2178];cookie=00:0;;this=88923004793280;op_tx=10:TX_KIND_SCHEMA;min=1750863590383;max=18446744073709551615;plan=0;src=[1:156:2178];cookie=00:0;;int_op_tx=10:TX_KIND_SCHEMA;min=1750863590383;max=18446744073709551615;plan=0;src=[1:156:2178];cookie=00:0;;int_this=89129165578496;method=TTxController::FinishProposeOnComplete;tx_id=10;fline=propose_tx.cpp:32;message=;tablet_id=9437184;tx_id=10; 2025-06-25T14:59:50.046794Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TTxNotifyTxCompletion.Execute at tablet 9437184 2025-06-25T14:59:50.046910Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: PlanStep 1750863590383 at tablet 9437184, mediator 0 2025-06-25T14:59:50.046955Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxPlanStep[2] execute at tablet 9437184 2025-06-25T14:59:50.047270Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=10;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=1;result=not_found; 2025-06-25T14:59:50.047410Z node 1 :TX_COLUMNSHARD INFO: ctor_logger.h:56: EnsureTable for pathId: {internal: 9438184000001, ss: 1} ttl settings: { Version: 1 } at tablet 9437184 2025-06-25T14:59:50.052919Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=10;fline=column_engine_logs.cpp:471;event=OnTieringModified;new_count_tierings=0; 2025-06-25T14:59:50.053080Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=10;fline=tables_manager.cpp:304;method=RegisterTable;path_id=9438184000001; 2025-06-25T14:59:50.053154Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=10;fline=column_engine.h:144;event=RegisterTable;path_id=9438184000001; 2025-06-25T14:59:50.059545Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=10;fline=column_engine_logs.cpp:463;event=OnTieringModified;path_id=9438184000001; 2025-06-25T14:59:50.059717Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=10;fline=tx_controller.cpp:215;event=finished_tx;tx_id=10; 2025-06-25T14:59:50.085298Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxPlanStep[2] complete at tablet 9437184 2025-06-25T14:59:50.086247Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::TEvColumnShard::TEvProposeTransaction;tablet_id=9437184;tx_id=11;this=88923004831808;method=TTxController::StartProposeOnExecute;tx_info=11:TX_KIND_SCHEMA;min=1750863590386;max=18446744073709551615;plan=0;src=[1:103:2136];cookie=00:1;;fline=schema.cpp:134;propose_execute=move_table;src=1;dst=2; 2025-06-25T14:59:50.086328Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::TEvColumnShard::TEvProposeTransaction;tablet_id=9437184;tx_id=11;this=88923004831808;method=TTxController::StartProposeOnExecute;tx_info=11:TX_KIND_SCHEMA;min=1750863590386;max=18446744073709551615;plan=0;src=[1:103:2136];cookie=00:1;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-25T14:59:50.098456Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;request_tx=11:TX_KIND_SCHEMA;min=1750863590386;max=18446744073709551615;plan=0;src=[1:103:2136];cookie=00:1;;this=88923004831808;op_tx=11:TX_KIND_SCHEMA;min=1750863590386;max=18446744073709551615;plan=0;src=[1:103:2136];cookie=00:1;;int_op_tx=11:TX_KIND_SCHEMA;min=1750863590386;max=18446744073709551615;plan=0;src=[1:103:2136];cookie=00:1;;int_this=89129165595456;fline=columnshard__propose_transaction.cpp:105;event=actual tx operator; 2025-06-25T14:59:50.098527Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;request_tx=11:TX_KIND_SCHEMA;min=1750863590386;max=18446744073709551615;plan=0;src=[1:103:2136];cookie=00:1;;this=88923004831808;op_tx=11:TX_KIND_SCHEMA;min=1750863590386;max=18446744073709551615;plan=0;src=[1:103:2136];cookie=00:1;;int_op_tx=11:TX_KIND_SCHEMA;min=1750863590386;max=18446744073709551615;plan=0;src=[1:103:2136];cookie=00:1;;int_this=89129165595456;method=TTxController::FinishProposeOnComplete;tx_id=11;fline=propose_tx.cpp:11;event=scheme_shard_tablet_not_initialized;source=[1:103:2136]; 2025-06-25T14:59:50.098563Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;request_tx=11:TX_KIND_SCHEMA;min=1750863590386;max=18446744073709551615;plan=0;src=[1:103:2136];cookie=00:1;;this=88923004831808;op_tx=11:TX_KIND_SCHEMA;min=1750863590386;max=18446744073709551615;plan=0;src=[1:103:2136];cookie=00:1;;int_op_tx=11:TX_KIND_SCHEMA;min=1750863590386;max=18446744073709551615;plan=0;src=[1:103:2136];cookie=00:1;;int_this=89129165595456;method=TTxController::FinishProposeOnComplete;tx_id=11;fline=propose_tx.cpp:32;message=;tablet_id=9437184;tx_id=11; 2025-06-25T14:59:50.098917Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TTxNotifyTxCompletion.Execute at tablet 9437184 2025-06-25T14:59:50.099080Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: PlanStep 1750863590386 at tablet 9437184, mediator 0 2025-06-25T14:59:50.099158Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxPlanStep[4] execute at tablet 9437184 2025-06-25T14:59:50.099398Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=11;event=move_table_progress;old_path_id=1;new_path_id=2;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=1;result=not_found; 2025-06-25T14:59:50.099440Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=11;event=move_table_progress;old_path_id=1;new_path_id=2;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-25T14:59:50.099536Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=11;fline=tx_controller.cpp:215;event=finished_tx;tx_id=11; 2025-06-25T14:59:50.111438Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxPlanStep[4] complete at tablet 9437184 |91.1%| [TA] {RESULT} $(B)/ydb/core/kqp/ut/join/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kesus/tablet/ut/unittest >> TKesusTest::TestAcquireSemaphoreViaDecrease [GOOD] Test command err: 2025-06-25T14:59:10.243160Z node 1 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-25T14:59:10.243286Z node 1 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-25T14:59:10.262586Z node 1 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-25T14:59:10.262729Z node 1 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-25T14:59:10.289073Z node 1 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-25T14:59:10.289526Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[1:136:2160], cookie=14222926413456063486, session=0, seqNo=0) 2025-06-25T14:59:10.289691Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 1 2025-06-25T14:59:10.301914Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[1:136:2160], cookie=14222926413456063486, session=1) 2025-06-25T14:59:10.302236Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[1:136:2160], cookie=3085503424479364465, session=0, seqNo=0) 2025-06-25T14:59:10.302329Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 2 2025-06-25T14:59:10.313817Z node 1 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[1:136:2160], cookie=3085503424479364465, session=2) 2025-06-25T14:59:10.314563Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:32: [72057594037927937] TTxSemaphoreCreate::Execute (sender=[1:147:2169], cookie=5470904328884335903, name="Sem1", limit=1) 2025-06-25T14:59:10.314760Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:104: [72057594037927937] Created new semaphore 1 "Sem1" 2025-06-25T14:59:10.327428Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:112: [72057594037927937] TTxSemaphoreCreate::Complete (sender=[1:147:2169], cookie=5470904328884335903) 2025-06-25T14:59:10.327751Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[1:136:2160], cookie=111, session=1, semaphore="Sem1" count=1) 2025-06-25T14:59:10.327906Z node 1 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 1 "Sem1" queue: next order #1 session 1 2025-06-25T14:59:10.328079Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[1:136:2160], cookie=222, session=2, semaphore="Sem1" count=1) 2025-06-25T14:59:10.340013Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[1:136:2160], cookie=111) 2025-06-25T14:59:10.340089Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[1:136:2160], cookie=222) 2025-06-25T14:59:10.340623Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[1:155:2177], cookie=14932349272488359649, name="Sem1") 2025-06-25T14:59:10.340715Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[1:155:2177], cookie=14932349272488359649) 2025-06-25T14:59:10.342568Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[1:158:2180], cookie=12004645903142362801, name="Sem1") 2025-06-25T14:59:10.342660Z node 1 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[1:158:2180], cookie=12004645903142362801) 2025-06-25T14:59:10.778728Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:59:10.792051Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:59:11.142726Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:59:11.154451Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:59:11.517682Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:59:11.535276Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:59:11.885474Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:59:11.897314Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:59:12.246273Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:59:12.257698Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:59:12.607359Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:59:12.622215Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:59:12.978599Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:59:12.991105Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:59:13.346948Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:59:13.358933Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:59:13.727139Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:59:13.741146Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:59:14.142282Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:59:14.161468Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:59:14.542233Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:59:14.565023Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:59:14.946104Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:59:14.958190Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:59:15.327952Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:59:15.340542Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:59:15.701597Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:59:15.713926Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:59:16.129114Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:59:16.141379Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:59:16.503451Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:59:16.515595Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:59:16.877512Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:59:16.889560Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:59:17.251061Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:59:17.263458Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:59:17.629962Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:59:17.642026Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:59:18.025842Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:59:18.038217Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:59:18.399785Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:59:18.411876Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:59:18.773899Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:59:18.785995Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:59:19.147362Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:59:19.159193Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:59:19.509785Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:59:19.521812Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:59:19.894132Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:59:19.906291Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:59:20.267450Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:59:20.279672Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:59:20.640955Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:59:20.652800Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:59:21.013945Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:59:21.026185Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:59:21.388334Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:59:21.400436Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:59:21.794473Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:59:21.806350Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:59:22.172439Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:59:22.184866Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:59:22.559739Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:59:22.577561Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:59:22.943684Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:59:22.958176Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:59:23.332611Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:59:23.345023Z node 1 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck: ... BUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:59:45.056915Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:59:45.409439Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:59:45.421569Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:59:45.799043Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:59:45.811080Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:59:46.242101Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:59:46.254346Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:59:46.630204Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:59:46.642235Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:59:47.009874Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:59:47.025297Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:59:47.416645Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:59:47.434450Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:59:47.799407Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:59:47.811573Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:59:48.208430Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:59:48.221394Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:59:48.624682Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:59:48.638241Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:59:49.021000Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:59:49.045074Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:59:49.431030Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:59:49.445458Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:59:49.830574Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:22: [72057594037927937] TTxSelfCheck::Execute 2025-06-25T14:59:49.842718Z node 4 :KESUS_TABLET DEBUG: tx_self_check.cpp:31: [72057594037927937] TTxSelfCheck::Complete 2025-06-25T14:59:50.197857Z node 4 :KESUS_TABLET DEBUG: tx_session_timeout.cpp:27: [72057594037927937] TTxSessionTimeout::Execute (session=1) 2025-06-25T14:59:50.197938Z node 4 :KESUS_TABLET DEBUG: tablet_db.cpp:32: [72057594037927937] Deleting session 1 2025-06-25T14:59:50.197988Z node 4 :KESUS_TABLET DEBUG: tablet_db.cpp:98: [72057594037927937] Deleting session 1 / semaphore 1 "Sem1" owner link 2025-06-25T14:59:50.210376Z node 4 :KESUS_TABLET DEBUG: tx_session_timeout.cpp:56: [72057594037927937] TTxSessionTimeout::Complete (session=1) 2025-06-25T14:59:50.221352Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[4:452:2411], cookie=8605880611777585814, name="Sem1") 2025-06-25T14:59:50.221467Z node 4 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[4:452:2411], cookie=8605880611777585814) 2025-06-25T14:59:50.669321Z node 5 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-25T14:59:50.669433Z node 5 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-25T14:59:50.689642Z node 5 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-25T14:59:50.689775Z node 5 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-25T14:59:50.713939Z node 5 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-25T14:59:50.714433Z node 5 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[5:136:2160], cookie=17182065626038421088, session=0, seqNo=0) 2025-06-25T14:59:50.714582Z node 5 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 1 2025-06-25T14:59:50.727236Z node 5 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[5:136:2160], cookie=17182065626038421088, session=1) 2025-06-25T14:59:50.727576Z node 5 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[5:136:2160], cookie=14694636520545662267, session=0, seqNo=0) 2025-06-25T14:59:50.727708Z node 5 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 2 2025-06-25T14:59:50.739645Z node 5 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[5:136:2160], cookie=14694636520545662267, session=2) 2025-06-25T14:59:50.739927Z node 5 :KESUS_TABLET DEBUG: tx_session_attach.cpp:35: [72057594037927937] TTxSessionAttach::Execute (sender=[5:136:2160], cookie=8932073202424584913, session=0, seqNo=0) 2025-06-25T14:59:50.740045Z node 5 :KESUS_TABLET DEBUG: tx_session_attach.cpp:118: [72057594037927937] Created new session 3 2025-06-25T14:59:50.752020Z node 5 :KESUS_TABLET DEBUG: tx_session_attach.cpp:140: [72057594037927937] TTxSessionAttach::Complete (sender=[5:136:2160], cookie=8932073202424584913, session=3) 2025-06-25T14:59:50.752598Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:32: [72057594037927937] TTxSemaphoreCreate::Execute (sender=[5:149:2171], cookie=16329385413390682682, name="Sem1", limit=3) 2025-06-25T14:59:50.752756Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:104: [72057594037927937] Created new semaphore 1 "Sem1" 2025-06-25T14:59:50.764829Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_create.cpp:112: [72057594037927937] TTxSemaphoreCreate::Complete (sender=[5:149:2171], cookie=16329385413390682682) 2025-06-25T14:59:50.765144Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[5:136:2160], cookie=111, session=1, semaphore="Sem1" count=2) 2025-06-25T14:59:50.765289Z node 5 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 1 "Sem1" queue: next order #1 session 1 2025-06-25T14:59:50.765485Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[5:136:2160], cookie=222, session=2, semaphore="Sem1" count=1) 2025-06-25T14:59:50.765556Z node 5 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 1 "Sem1" queue: next order #2 session 2 2025-06-25T14:59:50.765623Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[5:136:2160], cookie=333, session=3, semaphore="Sem1" count=1) 2025-06-25T14:59:50.777695Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[5:136:2160], cookie=111) 2025-06-25T14:59:50.777775Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[5:136:2160], cookie=222) 2025-06-25T14:59:50.777804Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[5:136:2160], cookie=333) 2025-06-25T14:59:50.778388Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[5:157:2179], cookie=15257644963419615533, name="Sem1") 2025-06-25T14:59:50.778480Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[5:157:2179], cookie=15257644963419615533) 2025-06-25T14:59:50.778907Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[5:160:2182], cookie=6135609379190958715, name="Sem1") 2025-06-25T14:59:50.778983Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[5:160:2182], cookie=6135609379190958715) 2025-06-25T14:59:50.779224Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:48: [72057594037927937] TTxSemaphoreAcquire::Execute (sender=[5:136:2160], cookie=444, session=1, semaphore="Sem1" count=1) 2025-06-25T14:59:50.779339Z node 5 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 1 "Sem1" queue: next order #3 session 3 2025-06-25T14:59:50.791308Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_acquire.cpp:263: [72057594037927937] TTxSemaphoreAcquire::Complete (sender=[5:136:2160], cookie=444) 2025-06-25T14:59:50.791891Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[5:165:2187], cookie=1052567867856800059, name="Sem1") 2025-06-25T14:59:50.791983Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[5:165:2187], cookie=1052567867856800059) 2025-06-25T14:59:50.792453Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[5:168:2190], cookie=5190022095666343092, name="Sem1") 2025-06-25T14:59:50.792524Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[5:168:2190], cookie=5190022095666343092) 2025-06-25T14:59:50.805229Z node 5 :KESUS_TABLET INFO: tablet_impl.cpp:71: OnActivateExecutor: 72057594037927937 2025-06-25T14:59:50.805331Z node 5 :KESUS_TABLET DEBUG: tx_init_schema.cpp:14: [72057594037927937] TTxInitSchema::Execute 2025-06-25T14:59:50.805802Z node 5 :KESUS_TABLET DEBUG: tx_init_schema.cpp:21: [72057594037927937] TTxInitSchema::Complete 2025-06-25T14:59:50.806380Z node 5 :KESUS_TABLET DEBUG: tx_init.cpp:30: [72057594037927937] TTxInit::Execute 2025-06-25T14:59:50.843352Z node 5 :KESUS_TABLET DEBUG: tx_init.cpp:242: [72057594037927937] TTxInit::Complete 2025-06-25T14:59:50.843537Z node 5 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 1 "Sem1" queue: next order #1 session 1 2025-06-25T14:59:50.843591Z node 5 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 1 "Sem1" queue: next order #2 session 2 2025-06-25T14:59:50.843618Z node 5 :KESUS_TABLET DEBUG: tablet_db.cpp:152: [72057594037927937] Processing semaphore 1 "Sem1" queue: next order #3 session 3 2025-06-25T14:59:50.843942Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[5:208:2220], cookie=1238312105511191835, name="Sem1") 2025-06-25T14:59:50.844035Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[5:208:2220], cookie=1238312105511191835) 2025-06-25T14:59:50.844618Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:29: [72057594037927937] TTxSemaphoreDescribe::Execute (sender=[5:217:2228], cookie=1435900281464609797, name="Sem1") 2025-06-25T14:59:50.844708Z node 5 :KESUS_TABLET DEBUG: tx_semaphore_describe.cpp:134: [72057594037927937] TTxSemaphoreDescribe::Complete (sender=[5:217:2228], cookie=1435900281464609797) ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/persqueue_public/ut/unittest >> ReadSessionImplTest::DataReceivedCallback [GOOD] Test command err: 2025-06-25T14:59:21.817860Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.817883Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.817910Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-25T14:59:21.818220Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-25T14:59:21.827826Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-25T14:59:21.827999Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.828358Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-25T14:59:21.828790Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.828880Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2025-06-25T14:59:21.828951Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-25T14:59:21.828983Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 3 bytes 2025-06-25T14:59:21.829538Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.829561Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.829584Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-25T14:59:21.829785Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-25T14:59:21.830213Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-25T14:59:21.830297Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.830459Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-25T14:59:21.830768Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.830855Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2025-06-25T14:59:21.830932Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-25T14:59:21.830963Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 3 bytes 2025-06-25T14:59:21.831704Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.831732Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.831760Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-25T14:59:21.831959Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-25T14:59:21.832426Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-25T14:59:21.832538Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.832724Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-25T14:59:21.833393Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.833520Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2025-06-25T14:59:21.833588Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-25T14:59:21.833620Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 3 bytes 2025-06-25T14:59:21.834366Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.834386Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.834414Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-25T14:59:21.834666Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-25T14:59:21.835847Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-25T14:59:21.835955Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.836107Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-25T14:59:21.837572Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.838018Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2025-06-25T14:59:21.838101Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-25T14:59:21.838137Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 3 bytes 2025-06-25T14:59:21.838871Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.838890Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.838911Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-25T14:59:21.839142Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-25T14:59:21.839509Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-25T14:59:21.839589Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.839739Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-25T14:59:21.840017Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.840086Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2025-06-25T14:59:21.840154Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-25T14:59:21.840189Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 0 bytes 2025-06-25T14:59:21.840655Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.840676Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.840701Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-25T14:59:21.840910Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-25T14:59:21.841264Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-25T14:59:21.841585Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.841743Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-25T14:59:21.842023Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.842095Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2025-06-25T14:59:21.842155Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-25T14:59:21.842191Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 0 bytes 2025-06-25T14:59:21.842898Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.842916Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.842946Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-25T14:59:21.843176Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-25T14:59:21.843611Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-25T14:59:21.843698Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.843838Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-25T14:59:21.844475Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.844585Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2025-06-25T14:59:21.844657Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-25T14:59:21.844701Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 0 bytes 2025-06-25T14:59:21.845548Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.845576Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.845613Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-25T14:59:21.845840Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-25T14:59:21.846293Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-25T14:59:21.846381Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.846523Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-25T14:59:21.847826Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.848184Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2025-06-25T14:59:21.848249Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-25T14:59:21.848274Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 3 bytes 2025-06-25T14:59:21.852793Z :ReadSession INFO: Random seed for debugging is 1750863561852770 2025-06-25T14:59:22.194467Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901739068664287:2077];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:59:22.195053Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:59:22.272711Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519901742556292396:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:59:22.272769Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;p ... signId:1) 2025-06-25T14:59:38.749366Z node 1 :PQ_READ_PROXY DEBUG: partition_actor.cpp:192: session cookie 1 consumer shared/user session shared/user_1_1_10749573281037835208_v1 TopicId: Topic rt3.dc1--test-topic in dc dc1 in database: Root, partition 0(assignId:1) committing to position 3 prev 2 end 3 by cookie 3 2025-06-25T14:59:38.749744Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:348: Handle TEvRequest topic: 'rt3.dc1--test-topic' requestId: 2025-06-25T14:59:38.749782Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2812: [PQ: 72075186224037892] got client message batch for topic 'rt3.dc1--test-topic' partition 0 2025-06-25T14:59:38.749928Z node 2 :PERSQUEUE DEBUG: partition.cpp:3346: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Topic 'rt3.dc1--test-topic' partition 0 user user offset is set to 3 (startOffset 0) session shared/user_1_1_10749573281037835208_v1 2025-06-25T14:59:38.750086Z node 2 :PERSQUEUE DEBUG: read.h:272: CacheProxy. Passthrough write request to KV 2025-06-25T14:59:38.752593Z node 2 :PERSQUEUE DEBUG: partition_read.cpp:882: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Topic 'rt3.dc1--test-topic' partition 0 user user readTimeStamp for offset 3 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 0 2025-06-25T14:59:38.752653Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:580: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-06-25T14:59:38.752709Z node 2 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72075186224037892, Partition: 0, State: StateIdle] need more data for compaction. cumulativeSize=468, count=3, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-25T14:59:38.752728Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'rt3.dc1--test-topic' partition: 0 messageNo: 0 requestId: cookie: 3 2025-06-25T14:59:38.752970Z node 1 :PQ_READ_PROXY DEBUG: partition_actor.cpp:652: session cookie 1 consumer shared/user session shared/user_1_1_10749573281037835208_v1 TopicId: Topic rt3.dc1--test-topic in dc dc1 in database: Root, partition 0(assignId:1) initDone 1 event { Cookie: 3 } 2025-06-25T14:59:38.753029Z node 1 :PQ_READ_PROXY DEBUG: partition_actor.cpp:950: session cookie 1 consumer shared/user session shared/user_1_1_10749573281037835208_v1 TopicId: Topic rt3.dc1--test-topic in dc dc1 in database: Root, partition 0(assignId:1) commit done to position 3 endOffset 3 with cookie 3 2025-06-25T14:59:38.753070Z node 1 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:696: session cookie 1 consumer shared/user session shared/user_1_1_10749573281037835208_v1 replying for commits: assignId# 1, from# 3, to# 3, offset# 3 2025-06-25T14:59:38.753568Z :DEBUG: [/Root] [/Root] [f130f229-54b62ac9-1263eb0b-36d402cd] [dc1] Committed response: cookies { assign_id: 1 partition_cookie: 3 } 2025-06-25T14:59:38.827134Z :INFO: [] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|4e7eda36-9e2f5c83-26ff6e55-62830443_0] Write session will now close 2025-06-25T14:59:38.827193Z :DEBUG: [] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|4e7eda36-9e2f5c83-26ff6e55-62830443_0] Write session: aborting 2025-06-25T14:59:38.828005Z :INFO: [] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|4e7eda36-9e2f5c83-26ff6e55-62830443_0] Write session: gracefully shut down, all writes complete 2025-06-25T14:59:38.828036Z :DEBUG: [] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|4e7eda36-9e2f5c83-26ff6e55-62830443_0] Write session is aborting and will not restart 2025-06-25T14:59:38.828133Z :DEBUG: [] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|4e7eda36-9e2f5c83-26ff6e55-62830443_0] Write session: destroy 2025-06-25T14:59:38.828206Z node 1 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 4 sessionId: test-message-group-id|4e7eda36-9e2f5c83-26ff6e55-62830443_0 grpc read done: success: 0 data: 2025-06-25T14:59:38.828240Z node 1 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 4 sessionId: test-message-group-id|4e7eda36-9e2f5c83-26ff6e55-62830443_0 grpc read failed 2025-06-25T14:59:38.828286Z node 1 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 4 sessionId: test-message-group-id|4e7eda36-9e2f5c83-26ff6e55-62830443_0 grpc closed 2025-06-25T14:59:38.828303Z node 1 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 4 sessionId: test-message-group-id|4e7eda36-9e2f5c83-26ff6e55-62830443_0 is DEAD 2025-06-25T14:59:38.829088Z node 1 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037892 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-25T14:59:38.829562Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037892] server disconnected, pipe [1:7519901807788143700:2550] destroyed 2025-06-25T14:59:38.829598Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::DropOwner. 2025-06-25T14:59:41.441673Z node 1 :PQ_READ_PROXY DEBUG: partition_actor.cpp:1266: session cookie 1 consumer shared/user session shared/user_1_1_10749573281037835208_v1 TopicId: Topic rt3.dc1--test-topic in dc dc1 in database: Root, partition 0(assignId:1) wait data in partition inited, cookie 5 from offset 3 2025-06-25T14:59:42.079823Z node 2 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72075186224037892, Partition: 0, State: StateIdle] need more data for compaction. cumulativeSize=468, count=3, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-25T14:59:47.081331Z node 2 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72075186224037892, Partition: 0, State: StateIdle] need more data for compaction. cumulativeSize=468, count=3, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-25T14:59:48.739786Z node 1 :PQ_READ_PROXY DEBUG: partition_actor.cpp:1266: session cookie 1 consumer shared/user session shared/user_1_1_10749573281037835208_v1 TopicId: Topic rt3.dc1--test-topic in dc dc1 in database: Root, partition 0(assignId:1) wait data in partition inited, cookie 6 from offset 3 2025-06-25T14:59:48.832381Z :INFO: [/Root] [/Root] [f130f229-54b62ac9-1263eb0b-36d402cd] Closing read session. Close timeout: 0.000000s 2025-06-25T14:59:48.832470Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): dc1:test-topic:0:1:2:3 2025-06-25T14:59:48.832527Z :INFO: [/Root] [/Root] [f130f229-54b62ac9-1263eb0b-36d402cd] Counters: { Errors: 0 CurrentSessionLifetimeMs: 16415 BytesRead: 24 MessagesRead: 3 BytesReadCompressed: 24 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-25T14:59:48.832660Z :NOTICE: [/Root] [/Root] [f130f229-54b62ac9-1263eb0b-36d402cd] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Close with zero timeout " } 2025-06-25T14:59:48.832708Z :DEBUG: [/Root] [/Root] [f130f229-54b62ac9-1263eb0b-36d402cd] [dc1] Abort session to cluster 2025-06-25T14:59:48.833317Z :NOTICE: [/Root] [/Root] [f130f229-54b62ac9-1263eb0b-36d402cd] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-06-25T14:59:48.844579Z node 1 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer shared/user session shared/user_1_1_10749573281037835208_v1 grpc read done: success# 0, data# { } 2025-06-25T14:59:48.844618Z node 1 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 1 consumer shared/user session shared/user_1_1_10749573281037835208_v1 grpc read failed 2025-06-25T14:59:48.848375Z node 1 :PQ_READ_PROXY INFO: read_session_actor.cpp:92: session cookie 1 consumer shared/user session shared/user_1_1_10749573281037835208_v1 grpc closed 2025-06-25T14:59:48.848446Z node 1 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 1 consumer shared/user session shared/user_1_1_10749573281037835208_v1 is DEAD 2025-06-25T14:59:48.850481Z node 1 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037893][rt3.dc1--test-topic] pipe [1:7519901782018339459:2469] disconnected; active server actors: 1 2025-06-25T14:59:48.850550Z node 1 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037893][rt3.dc1--test-topic] pipe [1:7519901782018339459:2469] client user disconnected session shared/user_1_1_10749573281037835208_v1 2025-06-25T14:59:48.851969Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2452: [PQ: 72075186224037892] Destroy direct read session shared/user_1_1_10749573281037835208_v1 2025-06-25T14:59:48.852023Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037892] server disconnected, pipe [1:7519901782018339462:2472] destroyed 2025-06-25T14:59:48.852078Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: shared/user_1_1_10749573281037835208_v1 2025-06-25T14:59:49.261518Z node 1 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976710721, task: 1, CA Id [1:7519901855032784444:2645]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 0 2025-06-25T14:59:49.295388Z node 1 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976710721, task: 1, CA Id [1:7519901855032784444:2645]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 1 2025-06-25T14:59:49.344405Z node 1 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976710721, task: 1, CA Id [1:7519901855032784444:2645]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 1 2025-06-25T14:59:49.402481Z node 1 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976710721, task: 1, CA Id [1:7519901855032784444:2645]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 1 2025-06-25T14:59:49.500666Z node 1 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976710721, task: 1, CA Id [1:7519901855032784444:2645]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 1 2025-06-25T14:59:50.203094Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:50.203166Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:50.203196Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-25T14:59:50.203462Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-25T14:59:50.203927Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-25T14:59:50.204093Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:50.204335Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-25T14:59:50.205018Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function 2025-06-25T14:59:50.205419Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function 2025-06-25T14:59:50.205611Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (2-2) 2025-06-25T14:59:50.205700Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2025-06-25T14:59:50.205757Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-25T14:59:50.205798Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (2-2) 2025-06-25T14:59:50.205964Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 8 bytes 2025-06-25T14:59:50.206003Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 8 bytes ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/backup_ut/unittest >> BackupRestoreS3::TestAllPrimitiveTypes-UUID [GOOD] Test command err: 2025-06-25T14:59:27.287250Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901763695796234:2148];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:59:27.287730Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001495/r3tmp/tmpCGHYzm/pdisk_1.dat 2025-06-25T14:59:27.653667Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:59:27.670493Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:59:27.670591Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:59:27.675557Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 27619, node 1 2025-06-25T14:59:27.859231Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:59:27.859252Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:59:27.859258Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:59:27.859379Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4055 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:59:28.269586Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:59:28.288349Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:59:29.764203Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901772285731731:2300], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:59:29.764289Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:59:30.156095Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7519901763695796350:2143] Handle TEvProposeTransaction 2025-06-25T14:59:30.156122Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:7519901763695796350:2143] TxId# 281474976715658 ProcessProposeTransaction 2025-06-25T14:59:30.156166Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:7519901763695796350:2143] Cookie# 0 userReqId# "" txid# 281474976715658 SEND to# [1:7519901776580699051:2626] 2025-06-25T14:59:30.210503Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:7519901776580699051:2626] txid# 281474976715658 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateTable CreateTable { Name: "table" Columns { Name: "Key" Type: "Uint32" NotNull: false } Columns { Name: "Value" Type: "Utf8" NotNull: false } KeyColumnNames: "Key" PartitionConfig { } Temporary: false } } } UserToken: "" DatabaseName: "" 2025-06-25T14:59:30.210582Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:7519901776580699051:2626] txid# 281474976715658 Bootstrap, UserSID: CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-25T14:59:30.210910Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1660: Actor# [1:7519901776580699051:2626] txid# 281474976715658 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-06-25T14:59:30.210975Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:7519901776580699051:2626] txid# 281474976715658 TEvNavigateKeySet requested from SchemeCache 2025-06-25T14:59:30.211306Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:7519901776580699051:2626] txid# 281474976715658 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-25T14:59:30.211421Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:7519901776580699051:2626] HANDLE EvNavigateKeySetResult, txid# 281474976715658 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-25T14:59:30.211459Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:7519901776580699051:2626] txid# 281474976715658 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715658 TabletId# 72057594046644480} 2025-06-25T14:59:30.211580Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:7519901776580699051:2626] txid# 281474976715658 HANDLE EvClientConnected 2025-06-25T14:59:30.212905Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:59:30.214463Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [1:7519901776580699051:2626] txid# 281474976715658 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715658} 2025-06-25T14:59:30.214509Z node 1 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [1:7519901776580699051:2626] txid# 281474976715658 SEND to# [1:7519901776580699050:2303] Source {TEvProposeTransactionStatus txid# 281474976715658 Status# 53} 2025-06-25T14:59:30.332367Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901776580699199:2311], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:59:30.332436Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:59:30.421821Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7519901763695796350:2143] Handle TEvProposeTransaction 2025-06-25T14:59:30.421851Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:7519901763695796350:2143] TxId# 281474976715659 ProcessProposeTransaction 2025-06-25T14:59:30.421884Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:7519901763695796350:2143] Cookie# 0 userReqId# "" txid# 281474976715659 SEND to# [1:7519901776580699211:2746] 2025-06-25T14:59:30.424090Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:7519901776580699211:2746] txid# 281474976715659 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateCdcStream CreateCdcStream { TableName: "table" StreamDescription { Name: "a" Mode: ECdcStreamModeUpdate Format: ECdcStreamFormatJson VirtualTimestamps: false AwsRegion: "" SchemaChanges: false } } } } UserToken: "" DatabaseName: "" PeerName: "" 2025-06-25T14:59:30.424127Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:7519901776580699211:2746] txid# 281474976715659 Bootstrap, UserSID: CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-25T14:59:30.424183Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:7519901776580699211:2746] txid# 281474976715659 TEvNavigateKeySet requested from SchemeCache 2025-06-25T14:59:30.424462Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:7519901776580699211:2746] txid# 281474976715659 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-25T14:59:30.424570Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:7519901776580699211:2746] HANDLE EvNavigateKeySetResult, txid# 281474976715659 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-25T14:59:30.424606Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:7519901776580699211:2746] txid# 281474976715659 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715659 TabletId# 72057594046644480} 2025-06-25T14:59:30.424759Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:7519901776580699211:2746] txid# 281474976715659 HANDLE EvClientConnected 2025-06-25T14:59:30.432152Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [1:7519901776580699211:2746] txid# 281474976715659 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715659} 2025-06-25T14:59:30.432192Z node 1 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [1:7519901776580699211:2746] txid# 281474976715659 SEND to# [1:7519901776580699210:2316] Source {TEvProposeTransactionStatus txid# 281474976715659 Status# 53} 2025-06-25T14:59:30.530791Z node 1 :CHANGE_EXCHANGE WARN: change_sender_cdc_stream.cpp:398: [CdcChangeSenderMain][72075186224037888:1][1:7519901776580699399:2324] Failed entry at 'ResolveTopic': entry# { Path: TableId: [72057594046644480:4:0] RequestType: ByTableId Operation: OpTopic RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo } 2025-06-25T14:59:30.583331Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901776580699492:2332], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:59:30.583401Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access ... 835858Z node 10 :IMPORT DEBUG: schemeshard_import_getters.cpp:346: HandleScheme TEvExternalStorage::TEvHeadObjectResponse: self# [10:7519901843480391424:2210], result# HeadObjectResult { ETag: 625d2681cf599ca2d3b1f18a7a5a3ae6 ContentLength: 356 } REQUEST: GET /test_bucket/UuidTable/scheme.pb HTTP/1.1 HEADERS: Host: localhost:21534 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 0C59921B-63FE-488C-8CDB-53A6A63123DD amz-sdk-request: attempt=1 authorization: AWS4-HMAC-SHA256 Credential=test_key/20250625/us-east-1/s3/aws4_request, SignedHeaders=amz-sdk-invocation-id;amz-sdk-request;content-type;host;range;x-amz-api-version;x-amz-content-sha256;x-amz-date, Signature=0680074bb1de21e0129ee4d9debe1016309abe7916208ea2f2cf3687d18d6551 content-type: application/xml range: bytes=0-355 user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-api-version: 2006-03-01 x-amz-content-sha256: e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 x-amz-date: 20250625T145946Z S3_MOCK::HttpServeRead: /test_bucket/UuidTable/scheme.pb / 356 2025-06-25T14:59:46.838951Z node 10 :IMPORT DEBUG: schemeshard_import_getters.cpp:475: HandleScheme TEvExternalStorage::TEvGetObjectResponse: self# [10:7519901843480391424:2210], result# 625d2681cf599ca2d3b1f18a7a5a3ae6 2025-06-25T14:59:46.839357Z node 10 :IMPORT INFO: schemeshard_import_getters.cpp:692: Reply: self# [10:7519901843480391424:2210], success# 1, error# 2025-06-25T14:59:46.839444Z node 10 :IMPORT DEBUG: schemeshard_import__create.cpp:362: TImport::TTxProgress: DoExecute 2025-06-25T14:59:46.839463Z node 10 :IMPORT DEBUG: schemeshard_import__create.cpp:990: TImport::TTxProgress: OnSchemeResult: id# 281474976715665, itemIdx# 0, success# 1 2025-06-25T14:59:46.839715Z node 10 :IMPORT INFO: schemeshard_import__create.cpp:630: TImport::TTxProgress: Allocate txId: info# { Id: 281474976715665 Uid: '' Kind: S3 DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1] UserSID: '(empty maybe)' State: Waiting Issue: '' Items: 1 }, item# { Idx: 0 DstPathName: '/Root/UuidTable' DstPathId: State: CreateSchemeObject SubState: AllocateTxId WaitTxId: 0 Issue: '' } 2025-06-25T14:59:46.846425Z node 10 :IMPORT DEBUG: schemeshard_import__create.cpp:386: TImport::TTxProgress: DoComplete 2025-06-25T14:59:46.846551Z node 10 :IMPORT DEBUG: schemeshard_import__create.cpp:362: TImport::TTxProgress: DoExecute 2025-06-25T14:59:46.846565Z node 10 :IMPORT DEBUG: schemeshard_import__create.cpp:1219: TImport::TTxProgress: OnAllocateResult: txId# 281474976710760, id# 281474976715665 2025-06-25T14:59:46.846613Z node 10 :IMPORT INFO: schemeshard_import__create.cpp:420: TImport::TTxProgress: CreateTable propose: info# { Id: 281474976715665 Uid: '' Kind: S3 DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1] UserSID: '(empty maybe)' State: Waiting Issue: '' Items: 1 }, item# { Idx: 0 DstPathName: '/Root/UuidTable' DstPathId: State: CreateSchemeObject SubState: Proposed WaitTxId: 0 Issue: '' }, txId# 281474976710760 2025-06-25T14:59:46.846745Z node 10 :IMPORT DEBUG: schemeshard_import__create.cpp:386: TImport::TTxProgress: DoComplete 2025-06-25T14:59:46.847990Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710760:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:59:46.850855Z node 10 :IMPORT DEBUG: schemeshard_import__create.cpp:362: TImport::TTxProgress: DoExecute 2025-06-25T14:59:46.850877Z node 10 :IMPORT DEBUG: schemeshard_import__create.cpp:1315: TImport::TTxProgress: OnModifyResult: txId# 281474976710760, status# StatusAccepted 2025-06-25T14:59:46.851067Z node 10 :IMPORT INFO: schemeshard_import__create.cpp:644: TImport::TTxProgress: Wait for completion: info# { Id: 281474976715665 Uid: '' Kind: S3 DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1] UserSID: '(empty maybe)' State: Waiting Issue: '' Items: 1 }, item# { Idx: 0 DstPathName: '/Root/UuidTable' DstPathId: [OwnerId: 72057594046644480, LocalPathId: 9] State: CreateSchemeObject SubState: Subscribed WaitTxId: 281474976710760 Issue: '' } 2025-06-25T14:59:46.854424Z node 10 :IMPORT DEBUG: schemeshard_import__create.cpp:386: TImport::TTxProgress: DoComplete 2025-06-25T14:59:46.901148Z node 10 :IMPORT DEBUG: schemeshard_import__create.cpp:362: TImport::TTxProgress: DoExecute 2025-06-25T14:59:46.901181Z node 10 :IMPORT DEBUG: schemeshard_import__create.cpp:1473: TImport::TTxProgress: OnNotifyResult: txId# 281474976710760 2025-06-25T14:59:46.901274Z node 10 :IMPORT INFO: schemeshard_import__create.cpp:630: TImport::TTxProgress: Allocate txId: info# { Id: 281474976715665 Uid: '' Kind: S3 DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1] UserSID: '(empty maybe)' State: Waiting Issue: '' Items: 1 }, item# { Idx: 0 DstPathName: '/Root/UuidTable' DstPathId: [OwnerId: 72057594046644480, LocalPathId: 9] State: Transferring SubState: AllocateTxId WaitTxId: 0 Issue: '' } 2025-06-25T14:59:46.902494Z node 10 :IMPORT DEBUG: schemeshard_import__create.cpp:386: TImport::TTxProgress: DoComplete 2025-06-25T14:59:46.902574Z node 10 :IMPORT DEBUG: schemeshard_import__create.cpp:362: TImport::TTxProgress: DoExecute 2025-06-25T14:59:46.902595Z node 10 :IMPORT DEBUG: schemeshard_import__create.cpp:1219: TImport::TTxProgress: OnAllocateResult: txId# 281474976710761, id# 281474976715665 2025-06-25T14:59:46.902636Z node 10 :IMPORT INFO: schemeshard_import__create.cpp:521: TImport::TTxProgress: Restore propose: info# { Id: 281474976715665 Uid: '' Kind: S3 DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1] UserSID: '(empty maybe)' State: Waiting Issue: '' Items: 1 }, item# { Idx: 0 DstPathName: '/Root/UuidTable' DstPathId: [OwnerId: 72057594046644480, LocalPathId: 9] State: Transferring SubState: Proposed WaitTxId: 0 Issue: '' }, txId# 281474976710761 2025-06-25T14:59:46.903297Z node 10 :IMPORT DEBUG: schemeshard_import__create.cpp:386: TImport::TTxProgress: DoComplete 2025-06-25T14:59:46.903777Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpRestore, opId: 281474976710761:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_backup_restore_common.h:563) 2025-06-25T14:59:46.908456Z node 10 :IMPORT DEBUG: schemeshard_import__create.cpp:362: TImport::TTxProgress: DoExecute 2025-06-25T14:59:46.908487Z node 10 :IMPORT DEBUG: schemeshard_import__create.cpp:1315: TImport::TTxProgress: OnModifyResult: txId# 281474976710761, status# StatusAccepted 2025-06-25T14:59:46.908600Z node 10 :IMPORT INFO: schemeshard_import__create.cpp:644: TImport::TTxProgress: Wait for completion: info# { Id: 281474976715665 Uid: '' Kind: S3 DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1] UserSID: '(empty maybe)' State: Waiting Issue: '' Items: 1 }, item# { Idx: 0 DstPathName: '/Root/UuidTable' DstPathId: [OwnerId: 72057594046644480, LocalPathId: 9] State: Transferring SubState: Subscribed WaitTxId: 281474976710761 Issue: '' } 2025-06-25T14:59:46.910966Z node 10 :IMPORT DEBUG: schemeshard_import__create.cpp:386: TImport::TTxProgress: DoComplete REQUEST: HEAD /test_bucket/UuidTable/data_00.csv HTTP/1.1 HEADERS: Host: localhost:21534 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: FE1E9DE2-9441-478E-B697-DE265C7087B1 amz-sdk-request: attempt=1 authorization: AWS4-HMAC-SHA256 Credential=test_key/20250625/us-east-1/s3/aws4_request, SignedHeaders=amz-sdk-invocation-id;amz-sdk-request;content-type;host;x-amz-api-version;x-amz-content-sha256;x-amz-date, Signature=cde00114998c49e8a44a687dc699526eb8c56f6739fcdd85be3c834fea4580b6 content-type: application/xml user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-api-version: 2006-03-01 x-amz-content-sha256: e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 x-amz-date: 20250625T145946Z S3_MOCK::HttpServeRead: /test_bucket/UuidTable/data_00.csv / 39 REQUEST: GET /test_bucket/UuidTable/data_00.csv HTTP/1.1 HEADERS: Host: localhost:21534 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 7E41D8A1-B04F-4748-81F0-5E5B0A40FC01 amz-sdk-request: attempt=1 authorization: AWS4-HMAC-SHA256 Credential=test_key/20250625/us-east-1/s3/aws4_request, SignedHeaders=amz-sdk-invocation-id;amz-sdk-request;content-type;host;range;x-amz-api-version;x-amz-content-sha256;x-amz-date, Signature=54ff4ba6502423baf9c363ef4b04d4f49e426c72582ca9b0a58a606a8c4ae9df content-type: application/xml range: bytes=0-38 user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-api-version: 2006-03-01 x-amz-content-sha256: e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 x-amz-date: 20250625T145946Z S3_MOCK::HttpServeRead: /test_bucket/UuidTable/data_00.csv / 39 2025-06-25T14:59:46.973846Z node 10 :IMPORT DEBUG: schemeshard_import__create.cpp:362: TImport::TTxProgress: DoExecute 2025-06-25T14:59:46.973877Z node 10 :IMPORT DEBUG: schemeshard_import__create.cpp:1473: TImport::TTxProgress: OnNotifyResult: txId# 281474976710761 2025-06-25T14:59:46.975247Z node 10 :IMPORT DEBUG: schemeshard_import__create.cpp:386: TImport::TTxProgress: DoComplete 2025-06-25T14:59:46.996552Z node 10 :TX_PROXY DEBUG: rpc_operation_request_base.h:50: [GetImport] [10:7519901843480391614:2357] [0] Resolve database: name# /Root 2025-06-25T14:59:46.999740Z node 10 :TX_PROXY DEBUG: rpc_operation_request_base.h:66: [GetImport] [10:7519901843480391614:2357] [0] Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult: request# { ErrorCount: 0 DatabaseName: /Root DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root TableId: [72057594046644480:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-25T14:59:46.999774Z node 10 :TX_PROXY DEBUG: rpc_operation_request_base.h:106: [GetImport] [10:7519901843480391614:2357] [0] Send request: schemeShardId# 72057594046644480 2025-06-25T14:59:47.000299Z node 10 :TX_PROXY DEBUG: rpc_get_operation.cpp:220: [GetImport] [10:7519901843480391614:2357] [0] Handle TEvImport::TEvGetImportResponse: record# Entry { Id: 281474976715665 Status: SUCCESS Progress: PROGRESS_DONE ImportFromS3Settings { endpoint: "localhost:21534" scheme: HTTP bucket: "test_bucket" items { source_prefix: "UuidTable" destination_path: "/Root/UuidTable" } } StartTime { seconds: 1750863586 } EndTime { seconds: 1750863586 } } 2025-06-25T14:59:47.093574Z node 10 :TX_PROXY DEBUG: proxy_impl.cpp:353: actor# [10:7519901830595487907:2140] Handle TEvExecuteKqpTransaction 2025-06-25T14:59:47.093607Z node 10 :TX_PROXY DEBUG: proxy_impl.cpp:342: actor# [10:7519901830595487907:2140] TxId# 281474976715666 ProcessProposeKqpTransaction 2025-06-25T14:59:47.096519Z node 10 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715666. Ctx: { TraceId: 01jyksm9nxb437a7aa0gvem9hz, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=10&id=Y2RlZjFlZTctYzc2NDRhNDAtZGRiZDUzNTUtY2EyMDY5MTQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest >> TColumnShardTestSchema::Drop-Reboots-GenerateInternalPathId [GOOD] Test command err: 2025-06-25T14:59:49.513734Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:99;event=initialize_shard;step=OnActivateExecutor; 2025-06-25T14:59:49.535285Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:117;event=initialize_shard;step=initialize_tiring_finished; 2025-06-25T14:59:49.535601Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-25T14:59:49.541256Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:59:49.541406Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:59:49.541596Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:59:49.541682Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:59:49.541770Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:59:49.541850Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:59:49.541918Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:59:49.541972Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:59:49.542029Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:59:49.542098Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:59:49.542164Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:59:49.562048Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-25T14:59:49.562223Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-25T14:59:49.562277Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-25T14:59:49.562443Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T14:59:49.562621Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-25T14:59:49.562709Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-25T14:59:49.562750Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-25T14:59:49.562828Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-25T14:59:49.562889Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-25T14:59:49.562934Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-25T14:59:49.562962Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-25T14:59:49.563120Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T14:59:49.563171Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-25T14:59:49.563204Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-25T14:59:49.563231Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-25T14:59:49.563356Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-25T14:59:49.563409Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-25T14:59:49.563446Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-25T14:59:49.563466Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-25T14:59:49.563509Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-25T14:59:49.563554Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-25T14:59:49.563592Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-25T14:59:49.563733Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-25T14:59:49.563764Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-25T14:59:49.563796Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-25T14:59:49.563922Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-25T14:59:49.563954Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-25T14:59:49.563971Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-25T14:59:49.564054Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-25T14:59:49.564084Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-25T14:59:49.564109Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-25T14:59:49.564162Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-25T14:59:49.564198Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-25T14:59:49.564222Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-25T14:59:49.564248Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-25T14:59:49.564466Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=37; 2025-06-25T14:59:49.564550Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=46; 2025-06-25T14:59:49.564616Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=30; 2025-06-25T14:59:49.564682Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=35; 2025-06-25T14:59:49.564754Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-25T14:59:49.564861Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-25T14:59:49.564900Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-25T14:59:49.564939Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: table ... 00s; 2025-06-25T14:59:52.955903Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:749;background=ttl;skip_reason=no_changes; 2025-06-25T14:59:52.968461Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;request_tx=104:TX_KIND_SCHEMA;min=1750863590706;max=18446744073709551615;plan=0;src=[1:103:2136];cookie=00:2;;this=88923022687072;op_tx=104:TX_KIND_SCHEMA;min=1750863590706;max=18446744073709551615;plan=0;src=[1:103:2136];cookie=00:2;;int_op_tx=104:TX_KIND_SCHEMA;min=1750863590706;max=18446744073709551615;plan=0;src=[1:103:2136];cookie=00:2;;int_this=89129165168576;fline=columnshard__propose_transaction.cpp:105;event=actual tx operator; 2025-06-25T14:59:52.968544Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;request_tx=104:TX_KIND_SCHEMA;min=1750863590706;max=18446744073709551615;plan=0;src=[1:103:2136];cookie=00:2;;this=88923022687072;op_tx=104:TX_KIND_SCHEMA;min=1750863590706;max=18446744073709551615;plan=0;src=[1:103:2136];cookie=00:2;;int_op_tx=104:TX_KIND_SCHEMA;min=1750863590706;max=18446744073709551615;plan=0;src=[1:103:2136];cookie=00:2;;int_this=89129165168576;method=TTxController::FinishProposeOnComplete;tx_id=104;fline=propose_tx.cpp:11;event=scheme_shard_tablet_not_initialized;source=[1:103:2136]; 2025-06-25T14:59:52.968594Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;request_tx=104:TX_KIND_SCHEMA;min=1750863590706;max=18446744073709551615;plan=0;src=[1:103:2136];cookie=00:2;;this=88923022687072;op_tx=104:TX_KIND_SCHEMA;min=1750863590706;max=18446744073709551615;plan=0;src=[1:103:2136];cookie=00:2;;int_op_tx=104:TX_KIND_SCHEMA;min=1750863590706;max=18446744073709551615;plan=0;src=[1:103:2136];cookie=00:2;;int_this=89129165168576;method=TTxController::FinishProposeOnComplete;tx_id=104;fline=propose_tx.cpp:32;message=;tablet_id=9437184;tx_id=104; 2025-06-25T14:59:52.968955Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TTxNotifyTxCompletion.Execute at tablet 9437184 2025-06-25T14:59:52.969092Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: PlanStep 1750863590706 at tablet 9437184, mediator 0 2025-06-25T14:59:52.969152Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxPlanStep[14] execute at tablet 9437184 2025-06-25T14:59:52.969405Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: DropTable for pathId: {internal: 9438184000001, ss: 1} at tablet 9437184 2025-06-25T14:59:52.969500Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=104;fline=tx_controller.cpp:215;event=finished_tx;tx_id=104; 2025-06-25T14:59:52.982160Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxPlanStep[14] complete at tablet 9437184 2025-06-25T14:59:52.982859Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: EvScan txId: 18446744073709551615 scanId: 0 version: {1750863590706:max} readable: {1750863590706:max} at tablet 9437184 2025-06-25T14:59:52.983052Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TTxScan prepare txId: 18446744073709551615 scanId: 0 at tablet 9437184 2025-06-25T14:59:52.985378Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750863590706:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=program.cpp:33;event=parse_program;program=Command { Projection { Columns { Id: 1 } } } ; 2025-06-25T14:59:52.985471Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750863590706:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=program.cpp:102;parse_proto_program=Command { Projection { Columns { Id: 1 } } } ; 2025-06-25T14:59:52.986223Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750863590706:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=program.cpp:51;event=program_parsed;result={"edges":[{"owner_id":0,"inputs":[{"from":2}]},{"owner_id":1,"inputs":[{"from":3}]},{"owner_id":2,"inputs":[{"from":1}]},{"owner_id":3,"inputs":[]}],"nodes":{"1":{"p":{"i":"0","p":{"data":[{"name":"timestamp","id":1}]},"o":"1","t":"FetchOriginalData"},"w":2,"id":1},"3":{"p":{"p":{"data":[{"name":"timestamp","id":1}]},"o":"0","t":"ReserveMemory"},"w":0,"id":3},"2":{"p":{"i":"1","p":{"address":{"name":"timestamp","id":1}},"o":"1","t":"AssembleOriginalData"},"w":7,"id":2},"0":{"p":{"i":"1","t":"Projection"},"w":7,"id":0}}}; 2025-06-25T14:59:52.986351Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750863590706:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=read_metadata.h:142;filter_limit_not_detected= range{ from {+Inf} to {-Inf}}; 2025-06-25T14:59:52.987327Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750863590706:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=tx_scan.cpp:172;event=TTxScan started;actor_id=[1:716:2728];trace_detailed=; 2025-06-25T14:59:52.988553Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: fline=context.cpp:84;ff_first=(column_ids=1;column_names=timestamp;);; 2025-06-25T14:59:52.988850Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: fline=context.cpp:99;columns_context_info=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;; 2025-06-25T14:59:52.989261Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:716:2728];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-25T14:59:52.989392Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:716:2728];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T14:59:52.989498Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:716:2728];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T14:59:52.989542Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: actor.cpp:414: Scan [1:716:2728] finished for tablet 9437184 2025-06-25T14:59:52.989970Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:716:2728];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:420;event=scan_finish;compute_actor_id=[1:710:2722];stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["l_bootstrap","f_ack","f_processing","f_ProduceResults"],"t":0.001},{"events":["l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.002}],"full":{"a":1750863592987279,"name":"_full_task","f":1750863592987279,"d_finished":0,"c":0,"l":1750863592989607,"d":2328},"events":[{"name":"bootstrap","f":1750863592987510,"d_finished":1496,"c":1,"l":1750863592989006,"d":1496},{"a":1750863592989234,"name":"ack","f":1750863592989234,"d_finished":0,"c":0,"l":1750863592989607,"d":373},{"a":1750863592989211,"name":"processing","f":1750863592989211,"d_finished":0,"c":0,"l":1750863592989607,"d":396},{"name":"ProduceResults","f":1750863592988989,"d_finished":247,"c":2,"l":1750863592989526,"d":247},{"a":1750863592989529,"name":"Finish","f":1750863592989529,"d_finished":0,"c":0,"l":1750863592989607,"d":78}],"id":"9437184::1"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T14:59:52.990081Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:716:2728];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:710:2722];bytes=0;rows=0;faults=0;finished=1;fault=0;schema=; 2025-06-25T14:59:52.990452Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:716:2728];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:375;event=scan_finished;compute_actor_id=[1:710:2722];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["l_bootstrap","f_ack","f_processing","f_ProduceResults"],"t":0.001},{"events":["l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.002}],"full":{"a":1750863592987279,"name":"_full_task","f":1750863592987279,"d_finished":0,"c":0,"l":1750863592990140,"d":2861},"events":[{"name":"bootstrap","f":1750863592987510,"d_finished":1496,"c":1,"l":1750863592989006,"d":1496},{"a":1750863592989234,"name":"ack","f":1750863592989234,"d_finished":0,"c":0,"l":1750863592990140,"d":906},{"a":1750863592989211,"name":"processing","f":1750863592989211,"d_finished":0,"c":0,"l":1750863592990140,"d":929},{"name":"ProduceResults","f":1750863592988989,"d_finished":247,"c":2,"l":1750863592989526,"d":247},{"a":1750863592989529,"name":"Finish","f":1750863592989529,"d_finished":0,"c":0,"l":1750863592990140,"d":611}],"id":"9437184::1"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T14:59:52.990531Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:716:2728];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-06-25T14:59:52.986313Z;index_granules=0;index_portions=0;index_batches=0;schema_columns=1;filter_columns=0;additional_columns=0;compacted_portions_bytes=0;inserted_portions_bytes=0;committed_portions_bytes=0;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=0;selected_rows=0; 2025-06-25T14:59:52.990576Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:716:2728];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=read_context.h:192;event=scan_aborted;reason=unexpected on destructor; 2025-06-25T14:59:52.990670Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:716:2728];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;; >> BackupRestore::RestoreTableSplitBoundaries [GOOD] >> BackupRestore::ImportDataShouldHandleErrors |91.1%| [TA] $(B)/ydb/core/kesus/tablet/ut/test-results/unittest/{meta.json ... results_accumulator.log} |91.1%| [TA] $(B)/ydb/core/cms/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> BackupRestoreS3::RestoreIndexTableSplitBoundaries [GOOD] >> BackupRestoreS3::RestoreViewQueryText |91.1%| [TA] {RESULT} $(B)/ydb/core/kesus/tablet/ut/test-results/unittest/{meta.json ... results_accumulator.log} |91.1%| [TA] {RESULT} $(B)/ydb/core/cms/ut/test-results/unittest/{meta.json ... results_accumulator.log} |91.1%| [TA] $(B)/ydb/core/kqp/ut/opt/test-results/unittest/{meta.json ... results_accumulator.log} |91.1%| [TA] {RESULT} $(B)/ydb/core/kqp/ut/opt/test-results/unittest/{meta.json ... results_accumulator.log} >> TColumnShardTestSchema::ForgetAfterFail >> TColumnShardTestSchema::ColdTiers >> TColumnShardTestSchema::RebootHotTiers >> TColumnShardTestSchema::TTL+Reboot-Internal+FirstPkColumn >> TColumnShardTestSchema::RebootEnableColdTiersAfterNoEviction >> KqpQueryService::TableSink_ReplaceDuplicatesOlap [GOOD] >> KqpQueryService::TableSink_Oltp_Replace-UseSink >> TColumnShardTestSchema::HotTiers >> TColumnShardTestSchema::RebootHotTiersTtl >> TColumnShardTestSchema::RebootEnableColdTiersAfterTtl >> TColumnShardTestSchema::Drop+Reboots-GenerateInternalPathId >> TBlobStorageProxyTest::TestProxyLongTailDiscover >> BackupPathTest::CommonPrefixButExplicitImportItems [GOOD] >> EncryptedExportTest::EncryptedExportAndImport [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-UINT32 [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-INT64 >> BackupRestore::TestAllPrimitiveTypes-UINT32 [GOOD] >> BackupRestore::TestAllPrimitiveTypes-UINT64 >> BackupPathTest::ExportDirectoryWithEncryption >> BackupRestore::TestAllPrimitiveTypes-INT64 [GOOD] >> BackupRestore::TestAllPrimitiveTypes-FLOAT >> EncryptedExportTest::EncryptionAndCompression >> TBlobStorageProxyTest::TestProxyLongTailDiscover [GOOD] >> TBlobStorageProxyTest::TestProxyLongTailDiscoverMaxi >> BackupRestoreS3::RestoreViewQueryText [GOOD] >> BackupRestoreS3::RestoreViewReferenceTable >> TColumnShardTestSchema::Drop+Reboots-GenerateInternalPathId [GOOD] >> BackupRestore::ImportDataShouldHandleErrors [GOOD] >> BackupRestore::BackupUuid >> PersQueueSdkReadSessionTest::SpecifyClustersExplicitly [GOOD] >> PersQueueSdkReadSessionTest::StopResumeReadingData ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest >> TColumnShardTestSchema::Drop+Reboots-GenerateInternalPathId [GOOD] Test command err: 2025-06-25T14:59:55.876298Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:99;event=initialize_shard;step=OnActivateExecutor; 2025-06-25T14:59:55.898772Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:117;event=initialize_shard;step=initialize_tiring_finished; 2025-06-25T14:59:55.899012Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-25T14:59:55.904771Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:59:55.904933Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:59:55.905105Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:59:55.905200Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:59:55.905301Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:59:55.905391Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:59:55.905463Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:59:55.905528Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:59:55.905605Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:59:55.905678Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:59:55.905766Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:59:55.929423Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-25T14:59:55.929614Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-25T14:59:55.929675Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-25T14:59:55.929855Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T14:59:55.930034Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-25T14:59:55.930139Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-25T14:59:55.930190Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-25T14:59:55.930313Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-25T14:59:55.930380Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-25T14:59:55.930426Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-25T14:59:55.930466Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-25T14:59:55.930645Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T14:59:55.930712Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-25T14:59:55.930753Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-25T14:59:55.930783Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-25T14:59:55.930894Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-25T14:59:55.930960Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-25T14:59:55.931006Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-25T14:59:55.931055Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-25T14:59:55.931109Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-25T14:59:55.931145Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-25T14:59:55.931181Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-25T14:59:55.931371Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-25T14:59:55.931427Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-25T14:59:55.931476Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-25T14:59:55.931686Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-25T14:59:55.931735Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-25T14:59:55.931768Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-25T14:59:55.931930Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-25T14:59:55.931979Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-25T14:59:55.932009Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-25T14:59:55.932083Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-25T14:59:55.932148Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-25T14:59:55.932187Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-25T14:59:55.932233Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-25T14:59:55.932496Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=55; 2025-06-25T14:59:55.932597Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=51; 2025-06-25T14:59:55.932691Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=37; 2025-06-25T14:59:55.932785Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=47; 2025-06-25T14:59:55.932899Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-25T14:59:55.933003Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-25T14:59:55.933048Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-25T14:59:55.933106Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: table ... EvPrivate::TEvPeriodicWakeup::MANUAL;tablet_id=9437184; 2025-06-25T15:00:00.259331Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:769:2764];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;fline=columnshard.cpp:239;event=TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184; 2025-06-25T15:00:00.259371Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Send periodic stats. 2025-06-25T15:00:00.259411Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Disabled periodic stats at tablet 9437184 2025-06-25T15:00:00.259483Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:769:2764];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:442;event=EnqueueBackgroundActivities;periodic=0; 2025-06-25T15:00:00.259575Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:769:2764];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=1; 2025-06-25T15:00:00.259675Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:769:2764];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750863297138;tx_id=18446744073709551615;;current_snapshot_ts=1750863596967; 2025-06-25T15:00:00.259727Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:769:2764];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=1;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-25T15:00:00.259792Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:769:2764];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:791;background=cleanup;skip_reason=no_changes; 2025-06-25T15:00:00.259843Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:769:2764];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:820;background=cleanup;skip_reason=no_changes; 2025-06-25T15:00:00.259946Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:769:2764];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;queue=ttl;external_count=0;fline=granule.cpp:168;event=skip_actualization;waiting=0.999000s; 2025-06-25T15:00:00.260013Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:769:2764];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:749;background=ttl;skip_reason=no_changes; 2025-06-25T15:00:00.355661Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: EvScan txId: 18446744073709551615 scanId: 0 version: {1750863597138:max} readable: {1750863597138:max} at tablet 9437184 2025-06-25T15:00:00.355875Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TTxScan prepare txId: 18446744073709551615 scanId: 0 at tablet 9437184 2025-06-25T15:00:00.360980Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:769:2764];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750863597138:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=program.cpp:33;event=parse_program;program=Command { Projection { Columns { Id: 1 } } } ; 2025-06-25T15:00:00.361090Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:769:2764];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750863597138:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=program.cpp:102;parse_proto_program=Command { Projection { Columns { Id: 1 } } } ; 2025-06-25T15:00:00.361750Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:769:2764];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750863597138:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=program.cpp:51;event=program_parsed;result={"edges":[{"owner_id":0,"inputs":[{"from":2}]},{"owner_id":1,"inputs":[{"from":3}]},{"owner_id":2,"inputs":[{"from":1}]},{"owner_id":3,"inputs":[]}],"nodes":{"1":{"p":{"i":"0","p":{"data":[{"name":"timestamp","id":1}]},"o":"1","t":"FetchOriginalData"},"w":2,"id":1},"3":{"p":{"p":{"data":[{"name":"timestamp","id":1}]},"o":"0","t":"ReserveMemory"},"w":0,"id":3},"2":{"p":{"i":"1","p":{"address":{"name":"timestamp","id":1}},"o":"1","t":"AssembleOriginalData"},"w":7,"id":2},"0":{"p":{"i":"1","t":"Projection"},"w":7,"id":0}}}; 2025-06-25T15:00:00.361887Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:769:2764];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750863597138:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=read_metadata.h:142;filter_limit_not_detected= range{ from {+Inf} to {-Inf}}; 2025-06-25T15:00:00.362833Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:769:2764];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750863597138:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=tx_scan.cpp:172;event=TTxScan started;actor_id=[1:825:2811];trace_detailed=; 2025-06-25T15:00:00.370038Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: fline=context.cpp:84;ff_first=(column_ids=1;column_names=timestamp;);; 2025-06-25T15:00:00.370370Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: fline=context.cpp:99;columns_context_info=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;; 2025-06-25T15:00:00.370883Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:825:2811];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-25T15:00:00.371058Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:825:2811];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:00:00.371188Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:825:2811];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:00:00.371242Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: actor.cpp:414: Scan [1:825:2811] finished for tablet 9437184 2025-06-25T15:00:00.375055Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:825:2811];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:420;event=scan_finish;compute_actor_id=[1:818:2805];stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["l_bootstrap","f_ProduceResults"],"t":0.007},{"events":["f_ack","l_ack","f_processing","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.008}],"full":{"a":1750863600362782,"name":"_full_task","f":1750863600362782,"d_finished":0,"c":0,"l":1750863600371307,"d":8525},"events":[{"name":"bootstrap","f":1750863600363035,"d_finished":7517,"c":1,"l":1750863600370552,"d":7517},{"a":1750863600370849,"name":"ack","f":1750863600370849,"d_finished":0,"c":0,"l":1750863600371307,"d":458},{"a":1750863600370827,"name":"processing","f":1750863600370827,"d_finished":0,"c":0,"l":1750863600371307,"d":480},{"name":"ProduceResults","f":1750863600370531,"d_finished":324,"c":2,"l":1750863600371226,"d":324},{"a":1750863600371229,"name":"Finish","f":1750863600371229,"d_finished":0,"c":0,"l":1750863600371307,"d":78}],"id":"9437184::1"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:00:00.375243Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:825:2811];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:818:2805];bytes=0;rows=0;faults=0;finished=1;fault=0;schema=; 2025-06-25T15:00:00.375747Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:825:2811];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:375;event=scan_finished;compute_actor_id=[1:818:2805];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["l_bootstrap","f_ProduceResults"],"t":0.007},{"events":["f_ack","f_processing","l_ProduceResults","f_Finish"],"t":0.008},{"events":["l_ack","l_processing","l_Finish"],"t":0.012}],"full":{"a":1750863600362782,"name":"_full_task","f":1750863600362782,"d_finished":0,"c":0,"l":1750863600375307,"d":12525},"events":[{"name":"bootstrap","f":1750863600363035,"d_finished":7517,"c":1,"l":1750863600370552,"d":7517},{"a":1750863600370849,"name":"ack","f":1750863600370849,"d_finished":0,"c":0,"l":1750863600375307,"d":4458},{"a":1750863600370827,"name":"processing","f":1750863600370827,"d_finished":0,"c":0,"l":1750863600375307,"d":4480},{"name":"ProduceResults","f":1750863600370531,"d_finished":324,"c":2,"l":1750863600371226,"d":324},{"a":1750863600371229,"name":"Finish","f":1750863600371229,"d_finished":0,"c":0,"l":1750863600375307,"d":4078}],"id":"9437184::1"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:00:00.375847Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:825:2811];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-06-25T15:00:00.361851Z;index_granules=0;index_portions=0;index_batches=0;schema_columns=1;filter_columns=0;additional_columns=0;compacted_portions_bytes=0;inserted_portions_bytes=0;committed_portions_bytes=0;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=0;selected_rows=0; 2025-06-25T15:00:00.375899Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:825:2811];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=read_context.h:192;event=scan_aborted;reason=unexpected on destructor; 2025-06-25T15:00:00.376010Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:825:2811];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;; >> BasicUsage::BrokenCredentialsProvider [GOOD] >> Compression::WriteZSTD [GOOD] >> Compression::WriteWithMixedCodecs >> BasicUsage::WriteAndReadSomeMessagesWithNoCompression [GOOD] >> BasicUsage::TWriteSession_WriteAndReadAndCommitRandomMessages ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/persqueue_public/ut/unittest >> BasicUsage::BrokenCredentialsProvider [GOOD] Test command err: 2025-06-25T14:59:21.769357Z :MaxByteSizeEqualZero INFO: Random seed for debugging is 1750863561769332 2025-06-25T14:59:22.116461Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901742160328516:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:59:22.122684Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:59:22.184377Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519901742181742922:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:59:22.184416Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:59:22.374171Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-25T14:59:22.386915Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00108a/r3tmp/tmptQ7rZo/pdisk_1.dat 2025-06-25T14:59:22.645080Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:59:22.645184Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:59:22.661249Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:59:22.661338Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:59:22.668582Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:59:22.679774Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T14:59:22.701261Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:59:22.722824Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 24721, node 1 2025-06-25T14:59:22.750939Z node 1 :GRPC_SERVER WARN: grpc_request_proxy.cpp:529: SchemeBoardDelete /Root Strong=0 2025-06-25T14:59:22.750958Z node 1 :GRPC_SERVER WARN: grpc_request_proxy.cpp:529: SchemeBoardDelete /Root Strong=0 2025-06-25T14:59:22.880452Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/yft8/00108a/r3tmp/yandex158siL.tmp 2025-06-25T14:59:22.880994Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/yft8/00108a/r3tmp/yandex158siL.tmp 2025-06-25T14:59:22.882743Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/yft8/00108a/r3tmp/yandex158siL.tmp 2025-06-25T14:59:22.882891Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:59:23.109034Z INFO: TTestServer started on Port 29980 GrpcPort 24721 2025-06-25T14:59:23.130077Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:59:23.215513Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:29980 PQClient connected to localhost:24721 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:59:23.424178Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... waiting... waiting... 2025-06-25T14:59:25.287737Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519901755066645089:2270], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:59:25.287738Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519901755066645100:2273], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:59:25.287832Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:59:25.293114Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715657:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:59:25.311244Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519901755066645103:2274], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715657 completed, doublechecking } 2025-06-25T14:59:25.555082Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519901755066645131:2132] txid# 281474976715658, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:59:25.575726Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:59:25.577090Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519901755045231450:2304], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:59:25.577285Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=NzcxMDcwZjAtYmY1NTcxNzctOTM5M2JhNzctZDllNzNjYg==, ActorId: [1:7519901755045231424:2297], ActorState: ExecuteState, TraceId: 01jykskmgm72nq3qaywt19r4m8, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:59:25.578113Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7519901755066645146:2278], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:59:25.578333Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=2&id=ZTMwNTk2MTgtY2QwNjdkYTQtYWQ0Yjg5MTAtODMwZTRjZjU=, ActorId: [2:7519901755066645087:2269], ActorState: ExecuteState, TraceId: 01jykskmf61p4nwxh54g6mygra, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:59:25.611184Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-25T14:59:25.611196Z node 2 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-25T14:59:25.686616Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:59:25.800502Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost:24721", true, true, 1000); ... G: partition_chooser_impl__table_helper.h:64: TTableHelper UpdateAccessTimeQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint32; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64; UPDATE `/Root/PQ/SourceIdMeta2` SET AccessTime = $AccessTime WHERE Hash = $Hash AND Topic = $Topic AND SourceId = $SourceId AND Partition = $Partition; 2025-06-25T14:59:59.455029Z node 5 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:111: TPartitionChooser [5:7519901899080106037:2457] (SourceId=src, PreferedPartition=(NULL)) StartKqpSession 2025-06-25T14:59:59.457742Z node 5 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:142: TPartitionChooser [5:7519901899080106037:2457] (SourceId=src, PreferedPartition=(NULL)) Select from the table 2025-06-25T14:59:59.613234Z node 5 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__old_chooser_actor.h:67: TPartitionChooser [5:7519901899080106037:2457] (SourceId=src, PreferedPartition=(NULL)) RequestPQRB 2025-06-25T14:59:59.613700Z node 5 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72075186224037893][rt3.dc1--test-topic] pipe [5:7519901899080106083:2457] connected; active server actors: 1 2025-06-25T14:59:59.616463Z node 5 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__old_chooser_actor.h:80: TPartitionChooser [5:7519901899080106037:2457] (SourceId=src, PreferedPartition=(NULL)) Received partition 0 from PQRB for SourceId=src 2025-06-25T14:59:59.616495Z node 5 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:174: TPartitionChooser [5:7519901899080106037:2457] (SourceId=src, PreferedPartition=(NULL)) Update the table 2025-06-25T14:59:59.617169Z node 5 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037893][rt3.dc1--test-topic] pipe [5:7519901899080106083:2457] disconnected; active server actors: 1 2025-06-25T14:59:59.617197Z node 5 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1688: [72075186224037893][rt3.dc1--test-topic] pipe [5:7519901899080106083:2457] disconnected no session 2025-06-25T14:59:59.742807Z node 5 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:183: TPartitionChooser [5:7519901899080106037:2457] (SourceId=src, PreferedPartition=(NULL)) HandleUpdate PartitionPersisted=0 Status=SUCCESS 2025-06-25T14:59:59.742846Z node 5 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:305: TPartitionChooser [5:7519901899080106037:2457] (SourceId=src, PreferedPartition=(NULL)) ReplyResult: Partition=0, SeqNo=(NULL) 2025-06-25T14:59:59.742864Z node 5 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:268: TPartitionChooser [5:7519901899080106037:2457] (SourceId=src, PreferedPartition=(NULL)) Start idle 2025-06-25T14:59:59.742891Z node 5 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:689: ProceedPartition. session cookie: 1 sessionId: partition: 0 expectedGeneration: (NULL) 2025-06-25T14:59:59.743603Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72075186224037892] server connected, pipe [5:7519901899080106107:2457], now have 1 active actors on pipe 2025-06-25T14:59:59.743770Z node 5 :PQ_WRITE_PROXY DEBUG: writer.cpp:819: TPartitionWriter 72075186224037892 (partition=0) TEvClientConnected Status OK, TabletId: 72075186224037892, NodeId 6, Generation: 1 2025-06-25T14:59:59.743913Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:348: Handle TEvRequest topic: 'rt3.dc1--test-topic' requestId: 2025-06-25T14:59:59.743941Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:2812: [PQ: 72075186224037892] got client message batch for topic 'rt3.dc1--test-topic' partition 0 2025-06-25T14:59:59.744023Z node 6 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie src|a0fb5d36-5e5776fb-5b7be707-fc989e5e_0 generated for partition 0 topic 'rt3.dc1--test-topic' owner src 2025-06-25T14:59:59.744115Z node 6 :PERSQUEUE DEBUG: partition_write.cpp:34: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyOwnerOk. Partition: 0 2025-06-25T14:59:59.744168Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'rt3.dc1--test-topic' partition: 0 messageNo: 0 requestId: cookie: 0 2025-06-25T14:59:59.744548Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:348: Handle TEvRequest topic: 'rt3.dc1--test-topic' requestId: 2025-06-25T14:59:59.744581Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:2812: [PQ: 72075186224037892] got client message batch for topic 'rt3.dc1--test-topic' partition 0 2025-06-25T14:59:59.744640Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'rt3.dc1--test-topic' partition: 0 messageNo: 0 requestId: cookie: 0 2025-06-25T14:59:59.744892Z node 5 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:865: session inited cookie: 1 partition: 0 MaxSeqNo: 0 sessionId: src|a0fb5d36-5e5776fb-5b7be707-fc989e5e_0 2025-06-25T14:59:59.748576Z :INFO: [] MessageGroupId [src] SessionId [] Counters: { Errors: 0 CurrentSessionLifetimeMs: 1750863599748 BytesWritten: 0 MessagesWritten: 0 BytesWrittenCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-25T14:59:59.748721Z :INFO: [] MessageGroupId [src] SessionId [] Write session established. Init response: session_id: "src|a0fb5d36-5e5776fb-5b7be707-fc989e5e_0" topic: "test-topic" cluster: "dc1" supported_codecs: CODEC_RAW supported_codecs: CODEC_GZIP supported_codecs: CODEC_LZOP 2025-06-25T14:59:59.752415Z :INFO: [] MessageGroupId [src] SessionId [src|a0fb5d36-5e5776fb-5b7be707-fc989e5e_0] Write session: close. Timeout = 0 ms 2025-06-25T14:59:59.752477Z :INFO: [] MessageGroupId [src] SessionId [src|a0fb5d36-5e5776fb-5b7be707-fc989e5e_0] Write session will now close 2025-06-25T14:59:59.752532Z :DEBUG: [] MessageGroupId [src] SessionId [src|a0fb5d36-5e5776fb-5b7be707-fc989e5e_0] Write session: aborting 2025-06-25T14:59:59.753127Z :INFO: [] MessageGroupId [src] SessionId [src|a0fb5d36-5e5776fb-5b7be707-fc989e5e_0] Write session: gracefully shut down, all writes complete 2025-06-25T14:59:59.753188Z :DEBUG: [] MessageGroupId [src] SessionId [src|a0fb5d36-5e5776fb-5b7be707-fc989e5e_0] Write session: destroy 2025-06-25T14:59:59.753969Z node 5 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 1 sessionId: src|a0fb5d36-5e5776fb-5b7be707-fc989e5e_0 grpc read done: success: 0 data: 2025-06-25T14:59:59.753997Z node 5 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 1 sessionId: src|a0fb5d36-5e5776fb-5b7be707-fc989e5e_0 grpc read failed 2025-06-25T14:59:59.754042Z node 5 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 1 sessionId: src|a0fb5d36-5e5776fb-5b7be707-fc989e5e_0 grpc closed 2025-06-25T14:59:59.754059Z node 5 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 1 sessionId: src|a0fb5d36-5e5776fb-5b7be707-fc989e5e_0 is DEAD 2025-06-25T14:59:59.755146Z node 5 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037892 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-25T14:59:59.755481Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037892] server disconnected, pipe [5:7519901899080106107:2457] destroyed 2025-06-25T14:59:59.755535Z node 6 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::DropOwner. 2025-06-25T14:59:59.778958Z :INFO: [/Root] [/Root] [360abe85-fee2103a-16befffd-96c3513f] Starting read session 2025-06-25T14:59:59.779021Z :DEBUG: [/Root] [/Root] [360abe85-fee2103a-16befffd-96c3513f] Starting session to cluster null (localhost:30907) 2025-06-25T14:59:59.784859Z :DEBUG: [/Root] [/Root] [360abe85-fee2103a-16befffd-96c3513f] [null] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:59.784906Z :DEBUG: [/Root] [/Root] [360abe85-fee2103a-16befffd-96c3513f] [null] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:59.785045Z :DEBUG: [/Root] [/Root] [360abe85-fee2103a-16befffd-96c3513f] [null] Reconnecting session to cluster null in 0.000000s 2025-06-25T14:59:59.786440Z :ERROR: [/Root] [/Root] [360abe85-fee2103a-16befffd-96c3513f] [null] Got error. Status: CLIENT_UNAUTHENTICATED. Description:
: Error: Can't get Authentication info from CredentialsProvider. ydb/public/sdk/cpp/src/client/persqueue_public/ut/basic_usage_ut.cpp:451: exception during creation 2025-06-25T14:59:59.786499Z :DEBUG: [/Root] [/Root] [360abe85-fee2103a-16befffd-96c3513f] [null] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:59.786530Z :DEBUG: [/Root] [/Root] [360abe85-fee2103a-16befffd-96c3513f] [null] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:59.786639Z :INFO: [/Root] [/Root] [360abe85-fee2103a-16befffd-96c3513f] [null] Closing session to cluster: SessionClosed { Status: CLIENT_UNAUTHENTICATED Issues: "
: Error: Failed to establish connection to server "" ( cluster null). Attempts done: 1
: Error: Can't get Authentication info from CredentialsProvider. ydb/public/sdk/cpp/src/client/persqueue_public/ut/basic_usage_ut.cpp:451: exception during creation " } Get event on client 2025-06-25T14:59:59.786797Z :NOTICE: [/Root] [/Root] [360abe85-fee2103a-16befffd-96c3513f] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-06-25T14:59:59.786831Z :DEBUG: [/Root] [/Root] [360abe85-fee2103a-16befffd-96c3513f] [null] Abort session to cluster Got close event: SessionClosed { Status: CLIENT_UNAUTHENTICATED Issues: "
: Error: Failed to establish connection to server "" ( cluster null). Attempts done: 1
: Error: Can't get Authentication info from CredentialsProvider. ydb/public/sdk/cpp/src/client/persqueue_public/ut/basic_usage_ut.cpp:451: exception during creation " }2025-06-25T14:59:59.786909Z :INFO: [/Root] [/Root] [360abe85-fee2103a-16befffd-96c3513f] Closing read session. Close timeout: 0.000000s 2025-06-25T14:59:59.786948Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): 2025-06-25T14:59:59.786987Z :INFO: [/Root] [/Root] [360abe85-fee2103a-16befffd-96c3513f] Counters: { Errors: 1 CurrentSessionLifetimeMs: 8 BytesRead: 0 MessagesRead: 0 BytesReadCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-25T14:59:59.787076Z :NOTICE: [/Root] [/Root] [360abe85-fee2103a-16befffd-96c3513f] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-06-25T15:00:00.196996Z node 5 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976710687, task: 1, CA Id [5:7519901903375073431:2473]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 0 2025-06-25T15:00:00.232951Z node 5 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976710687, task: 1, CA Id [5:7519901903375073431:2473]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 1 2025-06-25T15:00:00.282561Z node 5 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976710687, task: 1, CA Id [5:7519901903375073431:2473]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 1 2025-06-25T15:00:00.346469Z node 5 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976710687, task: 1, CA Id [5:7519901903375073431:2473]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 1 2025-06-25T15:00:00.440220Z node 5 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976710687, task: 1, CA Id [5:7519901903375073431:2473]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 1 2025-06-25T15:00:00.632350Z node 5 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976710687, task: 1, CA Id [5:7519901903375073431:2473]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 1 |91.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest >> TPartitionChooserSuite::TPartitionChooserActor_SplitMergeEnabled_PreferedPartition_OtherPartition_Test [GOOD] >> BackupPathTest::ExportDirectoryWithEncryption [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-INT64 [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-UINT64 >> KqpQueryService::TableSink_Oltp_Replace-UseSink [GOOD] >> EncryptedExportTest::EncryptionAndCompression [GOOD] >> TColumnShardTestSchema::CreateTable+Reboots-GenerateInternalPathId >> YdbYqlClient::DiscoveryLocationOverride ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/unittest >> TPartitionChooserSuite::TPartitionChooserActor_SplitMergeEnabled_PreferedPartition_OtherPartition_Test [GOOD] Test command err: 2025-06-25T14:58:55.376458Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901626599205344:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:55.376518Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:58:55.416930Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519901623336184203:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:55.417020Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:58:55.594673Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000a38/r3tmp/tmpQoSzaa/pdisk_1.dat 2025-06-25T14:58:55.638037Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-25T14:58:55.899310Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:55.918601Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:55.918716Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:55.919744Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:55.919791Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:55.924825Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T14:58:55.925134Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:58:55.925563Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 17333, node 1 2025-06-25T14:58:55.950413Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T14:58:56.108260Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/yft8/000a38/r3tmp/yandexkxT2pL.tmp 2025-06-25T14:58:56.108299Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/yft8/000a38/r3tmp/yandexkxT2pL.tmp 2025-06-25T14:58:56.109911Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/yft8/000a38/r3tmp/yandexkxT2pL.tmp 2025-06-25T14:58:56.112996Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:58:56.394849Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:58:56.433253Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:58:56.466886Z INFO: TTestServer started on Port 25372 GrpcPort 17333 TClient is connected to server localhost:25372 PQClient connected to localhost:17333 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:58:56.770365Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976720657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-25T14:58:56.842021Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... waiting... 2025-06-25T14:58:58.809471Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519901636221086441:2274], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:58:58.809471Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519901636221086452:2277], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:58:58.809574Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:58:58.814360Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715657:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:58:58.821190Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901639484108269:2300], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:58:58.821614Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:58:58.833058Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519901636221086455:2278], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715657 completed, doublechecking } 2025-06-25T14:58:59.049436Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519901636221086482:2175] txid# 281474976715658, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:58:59.072773Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519901639484108331:2305], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:58:59.072983Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=OGJlYmRhMmEtNWU1ODVkLWUxYTRlYTMxLTEwMjU3NWI3, ActorId: [1:7519901639484108266:2298], ActorState: ExecuteState, TraceId: 01jyksjtkt78tpn4r81bag8hd0, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:58:59.073261Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7519901640516053792:2282], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:58:59.074797Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=2&id=ZDI5NTExY2QtMTU0MWE1MmQtOGM4YzdjZWEtMWRmMTA0YTI=, ActorId: [2:7519901636221086439:2273], ActorState: ExecuteState, TraceId: 01jyksjtkq7cyqnsg95mhrtw09, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:58:59.075155Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-25T14:58:59.075176Z node 2 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-25T14:58:59.075677Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:58:59.154458Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose ... meout;self_id=[10:7519901840784481297:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:59:50.254004Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; === CheckClustersList. Ok 2025-06-25T14:59:56.516594Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715679:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:59:57.390440Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715684:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:59:58.098800Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715690:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:59:58.902241Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715695:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:171) 2025-06-25T14:59:59.624795Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715699:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:171) 2025-06-25T15:00:00.339691Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715704:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T15:00:00.356007Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7382: Cannot get console configs 2025-06-25T15:00:00.356038Z node 9 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded Run query: --!syntax_v1 UPSERT INTO `//Root/.metadata/TopicPartitionsMapping` (Hash, Topic, ProducerId, CreateTime, AccessTime, Partition, SeqNo) VALUES (16261273835729377752, "Root", "00415F536F757263655F3130", 1750863601000, 1750863601000, 0, 13); 2025-06-25T15:00:01.125110Z node 9 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715710. Ctx: { TraceId: 01jyksmqc0fjr1xpkxj0ns7jfh, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=9&id=OTM4YjBiYjgtN2Q1NjY2ODQtYmVkYzA1MGEtNzJjNWY2MWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:00:01.147353Z node 9 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:62: TTableHelper SelectQuery: --!syntax_v1 DECLARE $Hash AS Uint64; DECLARE $Topic AS Utf8; DECLARE $SourceId AS Utf8; SELECT Partition, CreateTime, AccessTime, SeqNo FROM `//Root/.metadata/TopicPartitionsMapping` WHERE Hash == $Hash AND Topic == $Topic AND ProducerId == $SourceId; 2025-06-25T15:00:01.147382Z node 9 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:63: TTableHelper UpdateQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint64; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64; DECLARE $SeqNo AS Uint64; UPSERT INTO `//Root/.metadata/TopicPartitionsMapping` (Hash, Topic, ProducerId, CreateTime, AccessTime, Partition, SeqNo) VALUES ($Hash, $Topic, $SourceId, $CreateTime, $AccessTime, $Partition, $SeqNo); 2025-06-25T15:00:01.147393Z node 9 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:64: TTableHelper UpdateAccessTimeQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint64; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64; UPDATE `//Root/.metadata/TopicPartitionsMapping` SET AccessTime = $AccessTime WHERE Hash = $Hash AND Topic = $Topic AND ProducerId = $SourceId AND Partition = $Partition; 2025-06-25T15:00:01.147416Z node 9 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__sm_chooser_actor.h:116: TPartitionChooser [9:7519901908382196587:3879] (SourceId=A_Source_10, PreferedPartition=1) GetOwnershipFast Partition=1 TabletId=1001 2025-06-25T15:00:01.147556Z node 9 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_ut.cpp:382: StateMockWork, received event# 269877760, Sender [9:7519901908382196588:3879], Recipient [9:7519901886907359061:3290]: NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 1001 Status: OK ServerId: [9:7519901908382196587:3879] Leader: 1 Dead: 0 Generation: 1 VersionInfo: } 2025-06-25T15:00:01.147664Z node 9 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_ut.cpp:382: StateMockWork, received event# 271188557, Sender [9:7519901908382196587:3879], Recipient [9:7519901886907359061:3290]: NKikimrPQ.TEvCheckPartitionStatusRequest Partition: 1 SourceId: "A_Source_10" 2025-06-25T15:00:01.147736Z node 9 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_impl__sm_chooser_actor.h:139: StateOwnershipFast, received event# 271188558, Sender [9:7519901886907359061:3290], Recipient [9:7519901908382196587:3879]: NKikimrPQ.TEvCheckPartitionStatusResponse Status: Active 2025-06-25T15:00:01.147769Z node 9 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_impl__abstract_chooser_actor.h:88: TPartitionChooser [9:7519901908382196587:3879] (SourceId=A_Source_10, PreferedPartition=1) InitTable: SourceId=A_Source_10 TopicsAreFirstClassCitizen=1 UseSrcIdMetaMappingInFirstClass=1 2025-06-25T15:00:01.147881Z node 9 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_ut.cpp:382: StateMockWork, received event# 65543, Sender [9:7519901908382196587:3879], Recipient [9:7519901886907359061:3290]: NActors::TEvents::TEvPoison 2025-06-25T15:00:01.147972Z node 9 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_impl__abstract_chooser_actor.h:101: StateInitTable, received event# 277020685, Sender [9:7519901839662716762:2070], Recipient [9:7519901908382196587:3879]: NKikimr::NMetadata::NProvider::TEvManagerPrepared 2025-06-25T15:00:01.147996Z node 9 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:111: TPartitionChooser [9:7519901908382196587:3879] (SourceId=A_Source_10, PreferedPartition=1) StartKqpSession 2025-06-25T15:00:01.150913Z node 9 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_impl__abstract_chooser_actor.h:132: StateCreateKqpSession, received event# 271646728, Sender [9:7519901839662716885:2177], Recipient [9:7519901908382196587:3879]: NKikimrKqp.TEvCreateSessionResponse Error: "" Response { SessionId: "ydb://session/3?node_id=9&id=NzE4YTAyMDYtOTZkNzA5YWItZDk3M2U1Mi05MDIzZDJmNA==" NodeId: 9 } YdbStatus: SUCCESS ResourceExhausted: false 2025-06-25T15:00:01.150954Z node 9 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:142: TPartitionChooser [9:7519901908382196587:3879] (SourceId=A_Source_10, PreferedPartition=1) Select from the table Received TEvChooseError: MessageGroupId A_Source_10 is already bound to PartitionGroupId 1, but client provided 2. MessageGroupId->PartitionGroupId binding cannot be changed, either use another MessageGroupId, specify PartitionGroupId 1, or do not specify PartitionGroupId at all. 2025-06-25T15:00:01.383274Z node 9 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_impl__abstract_chooser_actor.h:163: StateSelect, received event# 271646721, Sender [9:7519901839662716885:2177], Recipient [9:7519901908382196587:3879]: NKikimrKqp.TEvQueryResponse Response { SessionId: "ydb://session/3?node_id=9&id=NzE4YTAyMDYtOTZkNzA5YWItZDk3M2U1Mi05MDIzZDJmNA==" PreparedQuery: "e2dda67f-10883dd6-9be80ae0-541144a2" QueryParameters { Name: "$Hash" Type { Kind: Data Data { Scheme: 4 } } } QueryParameters { Name: "$Topic" Type { Kind: Data Data { Scheme: 4608 } } } QueryParameters { Name: "$SourceId" Type { Kind: Data Data { Scheme: 4608 } } } TxMeta { id: "01jyksmqpzes06rs0pt1s1xe13" } YdbResults { columns { name: "Partition" type { optional_type { item { type_id: UINT32 } } } } columns { name: "CreateTime" type { optional_type { item { type_id: UINT64 } } } } columns { name: "AccessTime" type { optional_type { item { type_id: UINT64 } } } } columns { name: "SeqNo" type { optional_type { item { type_id: UINT64 } } } } rows { items { uint32_value: 0 } items { uint64_value: 1750863601000 } items { uint64_value: 1750863601000 } items { uint64_value: 13 } } } QueryDiagnostics: "" } YdbStatus: SUCCESS ConsumedRu: 143 2025-06-25T15:00:01.383484Z node 9 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_impl__abstract_chooser_actor.h:151: TPartitionChooser [9:7519901908382196587:3879] (SourceId=A_Source_10, PreferedPartition=1) Selected from table PartitionId=0 SeqNo=13 2025-06-25T15:00:01.383513Z node 9 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_impl__sm_chooser_actor.h:209: TPartitionChooser [9:7519901908382196587:3879] (SourceId=A_Source_10, PreferedPartition=1) OnPartitionChosen 2025-06-25T15:00:01.383559Z node 9 :PQ_PARTITION_CHOOSER INFO: partition_chooser_impl__abstract_chooser_actor.h:312: TPartitionChooser [9:7519901908382196587:3879] (SourceId=A_Source_10, PreferedPartition=1) ReplyError: MessageGroupId A_Source_10 is already bound to PartitionGroupId 1, but client provided 2. MessageGroupId->PartitionGroupId binding cannot be changed, either use another MessageGroupId, specify PartitionGroupId 1, or do not specify PartitionGroupId at all. Run query: --!syntax_v1 SELECT Partition, SeqNo FROM `//Root/.metadata/TopicPartitionsMapping` WHERE Hash = 16261273835729377752 AND Topic = "Root" AND ProducerId = "00415F536F757263655F3130" 2025-06-25T15:00:01.590523Z node 9 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715713. Ctx: { TraceId: 01jyksmqr5b1r98f1fj7xe84cj, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=9&id=N2NmNTljNmUtMTIyOWNiMTAtYjc2ZDI3ZmItMWFjODliMTc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:00:02.153925Z node 9 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1985: ActorId: [9:7519901912677164015:2674] TxId: 281474976715715. Ctx: { TraceId: 01jyksmra1djdnhny3kn8sdg5b, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=9&id=MWY0Zjg2ZDItMzE5ZGRhZDAtYTc2NGRiYTEtZTEwZDUwNDg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. UNAVAILABLE: Failed to send EvStartKqpTasksRequest because node is unavailable: 10 2025-06-25T15:00:02.154089Z node 9 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [9:7519901912677164022:2674], TxId: 281474976715715, task: 2. Ctx: { SessionId : ydb://session/3?node_id=9&id=MWY0Zjg2ZDItMzE5ZGRhZDAtYTc2NGRiYTEtZTEwZDUwNDg=. CustomerSuppliedId : . TraceId : 01jyksmra1djdnhny3kn8sdg5b. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Handle abort execution event from: [9:7519901912677164015:2674], status: UNAVAILABLE, reason: {
: Error: Terminate execution } >> BackupPathTest::EncryptedExportWithExplicitDestinationPath >> BackupRestore::TestAllPrimitiveTypes-UINT64 [GOOD] >> BackupRestore::TestAllPrimitiveTypes-TZ_DATE [GOOD] >> BackupRestore::TestAllPrimitiveTypes-TZ_DATETIME [GOOD] >> BackupRestore::TestAllPrimitiveTypes-TZ_TIMESTAMP [GOOD] >> BackupRestore::TestAllPrimitiveTypes-TIMESTAMP64 >> TPQTest::TestPartitionWriteQuota [GOOD] >> TPQTest::TestPQRead [GOOD] >> TPQTest::TestPQSmallRead >> BackupRestore::TestAllPrimitiveTypes-FLOAT [GOOD] >> BackupRestore::TestAllPrimitiveTypes-DOUBLE >> TStateStorageTest::ShouldDeleteNoCheckpoints >> EncryptedExportTest::EncryptionAndChecksum >> TColumnShardTestSchema::EnableColdTiersAfterNoEviction >> TColumnShardTestSchema::CreateTable+Reboots-GenerateInternalPathId [GOOD] >> BackupRestoreS3::RestoreViewReferenceTable [GOOD] >> BackupRestoreS3::RestoreViewDependentOnAnotherView >> TStateStorageTest::ShouldDeleteNoCheckpoints [GOOD] >> TStateStorageTest::ShouldDeleteNoCheckpoints2 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest >> TColumnShardTestSchema::CreateTable+Reboots-GenerateInternalPathId [GOOD] Test command err: 2025-06-25T15:00:05.111818Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:99;event=initialize_shard;step=OnActivateExecutor; 2025-06-25T15:00:05.135762Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:117;event=initialize_shard;step=initialize_tiring_finished; 2025-06-25T15:00:05.136065Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-25T15:00:05.142633Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T15:00:05.142795Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T15:00:05.143027Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T15:00:05.143104Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T15:00:05.143186Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T15:00:05.143260Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T15:00:05.143329Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T15:00:05.143421Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T15:00:05.143522Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T15:00:05.143607Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T15:00:05.143678Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T15:00:05.163830Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-25T15:00:05.163986Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-25T15:00:05.164034Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-25T15:00:05.164216Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:00:05.164391Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-25T15:00:05.164478Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-25T15:00:05.164542Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-25T15:00:05.164641Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-25T15:00:05.164702Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-25T15:00:05.164743Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-25T15:00:05.164782Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-25T15:00:05.164942Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:00:05.165004Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-25T15:00:05.165039Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-25T15:00:05.165076Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-25T15:00:05.165169Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-25T15:00:05.165234Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-25T15:00:05.165275Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-25T15:00:05.165308Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-25T15:00:05.165354Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-25T15:00:05.165393Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-25T15:00:05.165417Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-25T15:00:05.165613Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-25T15:00:05.165652Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-25T15:00:05.165670Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-25T15:00:05.165797Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-25T15:00:05.165829Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-25T15:00:05.165848Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-25T15:00:05.165998Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-25T15:00:05.166037Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-25T15:00:05.166060Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-25T15:00:05.166135Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-25T15:00:05.166199Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-25T15:00:05.166252Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-25T15:00:05.166281Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-25T15:00:05.166472Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=40; 2025-06-25T15:00:05.166549Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=39; 2025-06-25T15:00:05.166623Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=23; 2025-06-25T15:00:05.166701Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=42; 2025-06-25T15:00:05.166771Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-25T15:00:05.166859Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-25T15:00:05.166905Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-25T15:00:05.166950Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: table ... blet_id=9437184;tx_id=119;this=88923004928352;method=TTxController::StartProposeOnExecute;tx_info=119:TX_KIND_SCHEMA;min=1750863606195;max=18446744073709551615;plan=0;src=[1:155:2177];cookie=019:0;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=20;result=not_found; 2025-06-25T15:00:06.778884Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:320:2328];ev=NKikimr::TEvColumnShard::TEvProposeTransaction;tablet_id=9437184;tx_id=119;this=88923004928352;method=TTxController::StartProposeOnExecute;tx_info=119:TX_KIND_SCHEMA;min=1750863606195;max=18446744073709551615;plan=0;src=[1:155:2177];cookie=019:0;;fline=schema.h:38;event=sync_schema; 2025-06-25T15:00:06.790633Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;request_tx=119:TX_KIND_SCHEMA;min=1750863606195;max=18446744073709551615;plan=0;src=[1:155:2177];cookie=019:0;;this=88923004928352;op_tx=119:TX_KIND_SCHEMA;min=1750863606195;max=18446744073709551615;plan=0;src=[1:155:2177];cookie=019:0;;int_op_tx=119:TX_KIND_SCHEMA;min=1750863606195;max=18446744073709551615;plan=0;src=[1:155:2177];cookie=019:0;;int_this=89129166072256;fline=columnshard__propose_transaction.cpp:105;event=actual tx operator; 2025-06-25T15:00:06.790689Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;request_tx=119:TX_KIND_SCHEMA;min=1750863606195;max=18446744073709551615;plan=0;src=[1:155:2177];cookie=019:0;;this=88923004928352;op_tx=119:TX_KIND_SCHEMA;min=1750863606195;max=18446744073709551615;plan=0;src=[1:155:2177];cookie=019:0;;int_op_tx=119:TX_KIND_SCHEMA;min=1750863606195;max=18446744073709551615;plan=0;src=[1:155:2177];cookie=019:0;;int_this=89129166072256;method=TTxController::FinishProposeOnComplete;tx_id=119;fline=propose_tx.cpp:11;event=scheme_shard_tablet_not_initialized;source=[1:155:2177]; 2025-06-25T15:00:06.790725Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;request_tx=119:TX_KIND_SCHEMA;min=1750863606195;max=18446744073709551615;plan=0;src=[1:155:2177];cookie=019:0;;this=88923004928352;op_tx=119:TX_KIND_SCHEMA;min=1750863606195;max=18446744073709551615;plan=0;src=[1:155:2177];cookie=019:0;;int_op_tx=119:TX_KIND_SCHEMA;min=1750863606195;max=18446744073709551615;plan=0;src=[1:155:2177];cookie=019:0;;int_this=89129166072256;method=TTxController::FinishProposeOnComplete;tx_id=119;fline=propose_tx.cpp:32;message=;tablet_id=9437184;tx_id=119; 2025-06-25T15:00:06.791022Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TTxNotifyTxCompletion.Execute at tablet 9437184 2025-06-25T15:00:06.791110Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: PlanStep 1750863606195 at tablet 9437184, mediator 0 2025-06-25T15:00:06.791140Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxPlanStep[4] execute at tablet 9437184 2025-06-25T15:00:06.791354Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=119;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=20;result=not_found; 2025-06-25T15:00:06.791421Z node 1 :TX_COLUMNSHARD INFO: ctor_logger.h:56: EnsureTable for pathId: {internal: 20, ss: 20} ttl settings: { Version: 1 } at tablet 9437184 2025-06-25T15:00:06.791491Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=119;fline=tables_manager.cpp:304;method=RegisterTable;path_id=20; 2025-06-25T15:00:06.791536Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=119;fline=column_engine.h:144;event=RegisterTable;path_id=20; 2025-06-25T15:00:06.791883Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=119;fline=column_engine_logs.cpp:463;event=OnTieringModified;path_id=20; 2025-06-25T15:00:06.791958Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=119;fline=tx_controller.cpp:215;event=finished_tx;tx_id=119; 2025-06-25T15:00:06.803765Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxPlanStep[4] complete at tablet 9437184 CreateTable: { SeqNo { Generation: 20 } EnsureTables { Tables { PathId: 21 SchemaPreset { Id: 1 Name: "default" Schema { Columns { Id: 1 Name: "k0" TypeId: 4609 } Columns { Id: 2 Name: "resource_type" TypeId: 4608 } Columns { Id: 3 Name: "resource_id" TypeId: 4608 DataAccessorConstructor { ClassName: "SPARSED" } } Columns { Id: 4 Name: "uid" TypeId: 4608 StorageId: "__MEMORY" } Columns { Id: 5 Name: "level" TypeId: 1 } Columns { Id: 6 Name: "message" TypeId: 4608 StorageId: "__MEMORY" } Columns { Id: 7 Name: "json_payload" TypeId: 4610 } Columns { Id: 8 Name: "ingested_at" TypeId: 50 } Columns { Id: 9 Name: "saved_at" TypeId: 50 } Columns { Id: 10 Name: "request_id" TypeId: 4608 } KeyColumnNames: "k0" KeyColumnNames: "resource_type" KeyColumnNames: "resource_id" KeyColumnNames: "uid" Indexes { Id: 1004 Name: "MAX::INDEX::level" StorageId: "__LOCAL_METADATA" ClassName: "MAX" MaxIndex { ColumnId: 5 } } Indexes { Id: 1007 Name: "MAX::INDEX::ingested_at" StorageId: "__LOCAL_METADATA" ClassName: "MAX" MaxIndex { ColumnId: 8 } } Indexes { Id: 1008 Name: "MAX::INDEX::saved_at" StorageId: "__LOCAL_METADATA" ClassName: "MAX" MaxIndex { ColumnId: 9 } } } } TtlSettings { Version: 1 } } } } 2025-06-25T15:00:06.805212Z node 1 :TX_COLUMNSHARD_TX ERROR: log.cpp:784: tablet_id=9437184;self_id=[1:320:2328];ev=NKikimr::TEvColumnShard::TEvProposeTransaction;tablet_id=9437184;tx_id=120;this=88923004931040;method=TTxController::StartProposeOnExecute;tx_info=120:TX_KIND_SCHEMA;min=1750863606198;max=18446744073709551615;plan=0;src=[1:155:2177];cookie=020:0;;fline=tx_controller.cpp:364;error=problem on start;message=Invalid schema: Column errors: key column k0 has unsupported type NKikimrSchemeOp.TOlapColumnDescription; 2025-06-25T15:00:06.817020Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;request_tx=120:TX_KIND_SCHEMA;min=1750863606198;max=18446744073709551615;plan=0;src=[1:155:2177];cookie=020:0;;this=88923004931040;op_tx=120:TX_KIND_SCHEMA;min=1750863606198;max=18446744073709551615;plan=0;src=[1:155:2177];cookie=020:0;;fline=propose_tx.cpp:11;event=scheme_shard_tablet_not_initialized;source=[1:155:2177]; 2025-06-25T15:00:06.817075Z node 1 :TX_COLUMNSHARD ERROR: log.cpp:784: tablet_id=9437184;request_tx=120:TX_KIND_SCHEMA;min=1750863606198;max=18446744073709551615;plan=0;src=[1:155:2177];cookie=020:0;;this=88923004931040;op_tx=120:TX_KIND_SCHEMA;min=1750863606198;max=18446744073709551615;plan=0;src=[1:155:2177];cookie=020:0;;fline=propose_tx.cpp:23;message=Invalid schema: Column errors: key column k0 has unsupported type NKikimrSchemeOp.TOlapColumnDescription;tablet_id=9437184;tx_id=120; CreateTable: { SeqNo { Generation: 21 } EnsureTables { Tables { PathId: 22 SchemaPreset { Id: 1 Name: "default" Schema { Columns { Id: 1 Name: "k0" TypeId: 4610 } Columns { Id: 2 Name: "resource_type" TypeId: 4608 } Columns { Id: 3 Name: "resource_id" TypeId: 4608 DataAccessorConstructor { ClassName: "SPARSED" } } Columns { Id: 4 Name: "uid" TypeId: 4608 StorageId: "__MEMORY" } Columns { Id: 5 Name: "level" TypeId: 1 } Columns { Id: 6 Name: "message" TypeId: 4608 StorageId: "__MEMORY" } Columns { Id: 7 Name: "json_payload" TypeId: 4610 } Columns { Id: 8 Name: "ingested_at" TypeId: 50 } Columns { Id: 9 Name: "saved_at" TypeId: 50 } Columns { Id: 10 Name: "request_id" TypeId: 4608 } KeyColumnNames: "k0" KeyColumnNames: "resource_type" KeyColumnNames: "resource_id" KeyColumnNames: "uid" Indexes { Id: 1004 Name: "MAX::INDEX::level" StorageId: "__LOCAL_METADATA" ClassName: "MAX" MaxIndex { ColumnId: 5 } } Indexes { Id: 1007 Name: "MAX::INDEX::ingested_at" StorageId: "__LOCAL_METADATA" ClassName: "MAX" MaxIndex { ColumnId: 8 } } Indexes { Id: 1008 Name: "MAX::INDEX::saved_at" StorageId: "__LOCAL_METADATA" ClassName: "MAX" MaxIndex { ColumnId: 9 } } } } TtlSettings { Version: 1 } } } } 2025-06-25T15:00:06.818318Z node 1 :TX_COLUMNSHARD_TX ERROR: log.cpp:784: tablet_id=9437184;self_id=[1:320:2328];ev=NKikimr::TEvColumnShard::TEvProposeTransaction;tablet_id=9437184;tx_id=121;this=88923004932832;method=TTxController::StartProposeOnExecute;tx_info=121:TX_KIND_SCHEMA;min=1750863606199;max=18446744073709551615;plan=0;src=[1:155:2177];cookie=021:0;;fline=tx_controller.cpp:364;error=problem on start;message=Invalid schema: Column errors: key column k0 has unsupported type NKikimrSchemeOp.TOlapColumnDescription; 2025-06-25T15:00:06.830109Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;request_tx=121:TX_KIND_SCHEMA;min=1750863606199;max=18446744073709551615;plan=0;src=[1:155:2177];cookie=021:0;;this=88923004932832;op_tx=121:TX_KIND_SCHEMA;min=1750863606199;max=18446744073709551615;plan=0;src=[1:155:2177];cookie=021:0;;fline=propose_tx.cpp:11;event=scheme_shard_tablet_not_initialized;source=[1:155:2177]; 2025-06-25T15:00:06.830189Z node 1 :TX_COLUMNSHARD ERROR: log.cpp:784: tablet_id=9437184;request_tx=121:TX_KIND_SCHEMA;min=1750863606199;max=18446744073709551615;plan=0;src=[1:155:2177];cookie=021:0;;this=88923004932832;op_tx=121:TX_KIND_SCHEMA;min=1750863606199;max=18446744073709551615;plan=0;src=[1:155:2177];cookie=021:0;;fline=propose_tx.cpp:23;message=Invalid schema: Column errors: key column k0 has unsupported type NKikimrSchemeOp.TOlapColumnDescription;tablet_id=9437184;tx_id=121; CreateTable: { SeqNo { Generation: 22 } EnsureTables { Tables { PathId: 23 SchemaPreset { Id: 1 Name: "default" Schema { Columns { Id: 1 Name: "k0" TypeId: 4612 } Columns { Id: 2 Name: "resource_type" TypeId: 4608 } Columns { Id: 3 Name: "resource_id" TypeId: 4608 DataAccessorConstructor { ClassName: "SPARSED" } } Columns { Id: 4 Name: "uid" TypeId: 4608 StorageId: "__MEMORY" } Columns { Id: 5 Name: "level" TypeId: 1 } Columns { Id: 6 Name: "message" TypeId: 4608 StorageId: "__MEMORY" } Columns { Id: 7 Name: "json_payload" TypeId: 4610 } Columns { Id: 8 Name: "ingested_at" TypeId: 50 } Columns { Id: 9 Name: "saved_at" TypeId: 50 } Columns { Id: 10 Name: "request_id" TypeId: 4608 } KeyColumnNames: "k0" KeyColumnNames: "resource_type" KeyColumnNames: "resource_id" KeyColumnNames: "uid" Indexes { Id: 1004 Name: "MAX::INDEX::level" StorageId: "__LOCAL_METADATA" ClassName: "MAX" MaxIndex { ColumnId: 5 } } Indexes { Id: 1007 Name: "MAX::INDEX::ingested_at" StorageId: "__LOCAL_METADATA" ClassName: "MAX" MaxIndex { ColumnId: 8 } } Indexes { Id: 1008 Name: "MAX::INDEX::saved_at" StorageId: "__LOCAL_METADATA" ClassName: "MAX" MaxIndex { ColumnId: 9 } } } } TtlSettings { Version: 1 } } } } 2025-06-25T15:00:06.831281Z node 1 :TX_COLUMNSHARD_TX ERROR: log.cpp:784: tablet_id=9437184;self_id=[1:320:2328];ev=NKikimr::TEvColumnShard::TEvProposeTransaction;tablet_id=9437184;tx_id=122;this=88923004934624;method=TTxController::StartProposeOnExecute;tx_info=122:TX_KIND_SCHEMA;min=1750863606201;max=18446744073709551615;plan=0;src=[1:155:2177];cookie=022:0;;fline=tx_controller.cpp:364;error=problem on start;message=Invalid schema: Column errors: key column k0 has unsupported type NKikimrSchemeOp.TOlapColumnDescription; 2025-06-25T15:00:06.843040Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;request_tx=122:TX_KIND_SCHEMA;min=1750863606201;max=18446744073709551615;plan=0;src=[1:155:2177];cookie=022:0;;this=88923004934624;op_tx=122:TX_KIND_SCHEMA;min=1750863606201;max=18446744073709551615;plan=0;src=[1:155:2177];cookie=022:0;;fline=propose_tx.cpp:11;event=scheme_shard_tablet_not_initialized;source=[1:155:2177]; 2025-06-25T15:00:06.843106Z node 1 :TX_COLUMNSHARD ERROR: log.cpp:784: tablet_id=9437184;request_tx=122:TX_KIND_SCHEMA;min=1750863606201;max=18446744073709551615;plan=0;src=[1:155:2177];cookie=022:0;;this=88923004934624;op_tx=122:TX_KIND_SCHEMA;min=1750863606201;max=18446744073709551615;plan=0;src=[1:155:2177];cookie=022:0;;fline=propose_tx.cpp:23;message=Invalid schema: Column errors: key column k0 has unsupported type NKikimrSchemeOp.TOlapColumnDescription;tablet_id=9437184;tx_id=122; >> TBlobStorageProxyTest::TestProxyLongTailDiscoverMaxi [GOOD] >> YdbYqlClient::DiscoveryLocationOverride [GOOD] >> YdbYqlClient::DeleteTableWithDeletedIndex >> TStateStorageTest::ShouldDeleteNoCheckpoints2 [GOOD] >> TStateStorageTest::ShouldDeleteCheckpoints >> BackupRestore::BackupUuid [GOOD] >> BackupRestore::RestoreViewQueryText |91.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/dsproxy/ut_fat/unittest >> TBlobStorageProxyTest::TestProxyLongTailDiscoverMaxi [GOOD] |91.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest |91.2%| [TA] $(B)/ydb/core/blobstorage/dsproxy/ut_fat/test-results/unittest/{meta.json ... results_accumulator.log} |91.2%| [TA] {RESULT} $(B)/ydb/core/blobstorage/dsproxy/ut_fat/test-results/unittest/{meta.json ... results_accumulator.log} >> BackupRestoreS3::TestAllPrimitiveTypes-UINT64 [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-FLOAT >> BackupPathTest::EncryptedExportWithExplicitDestinationPath [GOOD] >> TPartitionChooserSuite::TPartitionChooserActor_SplitMergeDisabled_PreferedPartition_Test [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/service/unittest >> KqpQueryService::TableSink_Oltp_Replace-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 62231, MsgBus: 21447 2025-06-25T14:55:12.730575Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900665101332470:2162];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:12.731161Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001862/r3tmp/tmp0IfcJm/pdisk_1.dat 2025-06-25T14:55:13.239867Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:55:13.254514Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519900665101332339:2080] 1750863312711913 != 1750863312711916 2025-06-25T14:55:13.257633Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:55:13.257709Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:55:13.266479Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 62231, node 1 2025-06-25T14:55:13.556805Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:55:13.556825Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:55:13.556831Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:55:13.556932Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:55:13.754881Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:21447 TClient is connected to server localhost:21447 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:55:14.349902Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:55:14.370732Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 CREATE TABLE `/Root/ColumnShard1` (Col1 Int64 NOT NULL, Col2 Int32 NOT NULL, PRIMARY KEY (Col1)) PARTITION BY HASH(Col1) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 1000); 2025-06-25T14:55:15.877660Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900677986234878:2295], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:15.877755Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:16.707127Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T14:55:17.727876Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519900665101332470:2162];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:17.727967Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:55:21.315677Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038864;self_id=[1:7519900699461075765:2328];tablet_id=72075186224038864;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:55:21.315883Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038864;self_id=[1:7519900699461075765:2328];tablet_id=72075186224038864;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:55:21.315895Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519900699461075706:2323];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:55:21.315973Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519900699461075706:2323];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:55:21.316323Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519900699461075706:2323];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:55:21.316455Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519900699461075706:2323];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:55:21.316569Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519900699461075706:2323];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:55:21.316698Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519900699461075706:2323];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:55:21.316806Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519900699461075706:2323];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:55:21.316915Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519900699461075706:2323];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:55:21.317025Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519900699461075706:2323];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:55:21.317177Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519900699461075706:2323];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:55:21.317199Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038864;self_id=[1:7519900699461075765:2328];tablet_id=72075186224038864;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:55:21.317532Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038864;self_id=[1:7519900699461075765:2328];tablet_id=72075186224038864;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:55:21.317533Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7519900699461075706:2323];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:55:21.317703Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038864;self_id=[1:7519900699461075765:2328];tablet_id=72075186224038864;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:55:21.317807Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038864;self_id=[1:7519900699461075765:2328];tablet_id=72075186224038864;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:55:21.317911Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038864;self_id=[1:7519900699461075765:2328];tablet_id=72075186224038864;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:55:21.317999Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038864;self_id=[1:7519900699461075765:2328];tablet_id=72075186224038864;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:55:21.318081Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038864;self_id=[1:7519900699461075765:2328];tablet_id=72075186224038864;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:55:21.318173Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038864;self_id=[1:7519900699461075765:2328];tablet_id=72075186224038864;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:55:21.318255Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038864;self_id=[1:7519900699461075765:2328];tablet_id=72075186224038864;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:55:21.358317Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224038884;self_id=[1:7519900699461075712:2324];tablet_id=72075186224038884;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;l ... e=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-25T14:59:53.805426Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037897;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710658; 2025-06-25T14:59:53.806232Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710658; 2025-06-25T14:59:53.806258Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-25T14:59:53.807326Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710658;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=2;result=not_found; 2025-06-25T14:59:53.813035Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037890;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710658; 2025-06-25T14:59:53.813529Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037892;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710658;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710658; 2025-06-25T14:59:53.828949Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519901873138768781:2352], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:59:53.829041Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:59:53.829078Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519901873138768786:2355], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:59:53.833075Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:59:53.844925Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519901873138768788:2356], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-25T14:59:53.940408Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519901873138768839:2577] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:59:54.080180Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037894;tx_state=TTxProgressTx::Execute;tx_current=281474976710662;tx_id=281474976710662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710662; 2025-06-25T14:59:54.104106Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037894;tx_state=TTxProgressTx::Execute;tx_current=281474976710664;tx_id=281474976710664;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710664; 2025-06-25T14:59:54.168405Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519901855958898514:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:59:54.168487Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 15977, MsgBus: 27511 2025-06-25T14:59:55.683002Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519901883919490657:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:59:55.683056Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001862/r3tmp/tmp3KoSB8/pdisk_1.dat 2025-06-25T14:59:55.813673Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:59:55.813783Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:59:55.817611Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 15977, node 3 2025-06-25T14:59:55.881648Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:59:55.883869Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:59:55.883892Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:59:55.883908Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:59:55.884062Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27511 TClient is connected to server localhost:27511 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:59:56.537446Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:59:56.699385Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:59:59.943197Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519901901099360453:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:59:59.943299Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:59:59.952761Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:00.284843Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:00.620135Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519901905394329088:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:00:00.620255Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:00:00.620371Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519901905394329093:2406], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:00:00.624599Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715660:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:00:00.637132Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519901905394329095:2407], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715660 completed, doublechecking } 2025-06-25T15:00:00.683210Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519901883919490657:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:00:00.683297Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T15:00:00.727479Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519901905394329149:3205] txid# 281474976715661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } >> TStateStorageTest::ShouldDeleteCheckpoints [GOOD] >> TStateStorageTest::ShouldDeleteGraph |91.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest |91.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest >> TStateStorageTest::ShouldDeleteGraph [GOOD] >> TStateStorageTest::ShouldGetMultipleStates >> BackupRestore::TestAllPrimitiveTypes-TIMESTAMP64 [GOOD] >> BackupRestore::TestAllPrimitiveTypes-UTF8 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/unittest >> TPartitionChooserSuite::TPartitionChooserActor_SplitMergeDisabled_PreferedPartition_Test [GOOD] Test command err: 2025-06-25T14:58:55.093228Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901626535066023:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:55.093288Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:58:55.176665Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519901626313825213:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:55.176709Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:58:55.431807Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000aad/r3tmp/tmpUlTB68/pdisk_1.dat 2025-06-25T14:58:55.452553Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-25T14:58:55.711090Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:55.722476Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:55.722581Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:55.724982Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:55.725037Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:55.739637Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T14:58:55.739803Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:58:55.745177Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 15809, node 1 2025-06-25T14:58:56.128409Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:58:56.164702Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/yft8/000aad/r3tmp/yandexRKClqm.tmp 2025-06-25T14:58:56.164725Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/yft8/000aad/r3tmp/yandexRKClqm.tmp 2025-06-25T14:58:56.164887Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/yft8/000aad/r3tmp/yandexRKClqm.tmp 2025-06-25T14:58:56.164999Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:58:56.214886Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:58:56.452961Z INFO: TTestServer started on Port 16540 GrpcPort 15809 TClient is connected to server localhost:16540 PQClient connected to localhost:15809 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:58:56.770630Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-25T14:58:56.829731Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-25T14:58:56.844884Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... waiting... 2025-06-25T14:58:58.554293Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901639419968952:2300], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:58:58.554409Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:58:58.554500Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901639419968964:2303], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:58:58.560605Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:58:58.582650Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519901639419968966:2304], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710662 completed, doublechecking } 2025-06-25T14:58:58.671791Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519901639419969050:2751] txid# 281474976710663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:58:59.020557Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7519901639198727465:2279], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:58:59.020833Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=2&id=OGUwOWM4OTktZDlkMjA3OC0xZjE4MmY0YS0xNGQ2YmEzMg==, ActorId: [2:7519901639198727440:2273], ActorState: ExecuteState, TraceId: 01jyksjtd6a2sw8s2cwn82n584, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:58:59.021753Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519901639419969067:2310], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:58:59.024913Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=ZjYxZGQ4ZjUtNGM3MWY2YS1mNmI2NzMtYTE3YmNkNDc=, ActorId: [1:7519901639419968934:2298], ActorState: ExecuteState, TraceId: 01jyksjtb0db1c9y6crfm2n0x7, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:58:59.024452Z node 2 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-25T14:58:59.025458Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-25T14:58:59.066841Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:58:59.145635Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:58:59.227827Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: ( ... a { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-25T14:59:57.644466Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:59:57.734487Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:59:57.885603Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); 2025-06-25T14:59:58.143677Z node 9 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715665. Ctx: { TraceId: 01jyksmmd6d1h7aykzmqb895zx, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=9&id=MTVjZWE3ZmEtNjQ5Y2JiZmQtODM4MGNmNmItODRjYjM3M2E=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root === CheckClustersList. Subcribe to ClusterTracker from [9:7519901893332673541:3067] 2025-06-25T14:59:58.318943Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[9:7519901871857835491:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:59:58.319031Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:59:58.328085Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[10:7519901873347900575:2145];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:59:58.328221Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; === CheckClustersList. Ok 2025-06-25T15:00:03.302621Z node 9 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:62: TTableHelper SelectQuery: --!syntax_v1 DECLARE $Hash AS Uint64; DECLARE $Topic AS Utf8; DECLARE $SourceId AS Utf8; SELECT Partition, CreateTime, AccessTime, SeqNo FROM `//Root/.metadata/TopicPartitionsMapping` WHERE Hash == $Hash AND Topic == $Topic AND ProducerId == $SourceId; 2025-06-25T15:00:03.302651Z node 9 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:63: TTableHelper UpdateQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint64; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64; DECLARE $SeqNo AS Uint64; UPSERT INTO `//Root/.metadata/TopicPartitionsMapping` (Hash, Topic, ProducerId, CreateTime, AccessTime, Partition, SeqNo) VALUES ($Hash, $Topic, $SourceId, $CreateTime, $AccessTime, $Partition, $SeqNo); 2025-06-25T15:00:03.302661Z node 9 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:64: TTableHelper UpdateAccessTimeQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint64; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64; UPDATE `//Root/.metadata/TopicPartitionsMapping` SET AccessTime = $AccessTime WHERE Hash = $Hash AND Topic = $Topic AND ProducerId = $SourceId AND Partition = $Partition; 2025-06-25T15:00:03.302681Z node 9 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_impl__abstract_chooser_actor.h:88: TPartitionChooser [9:7519901914807510349:3222] (SourceId=A_Source, PreferedPartition=0) InitTable: SourceId=A_Source TopicsAreFirstClassCitizen=1 UseSrcIdMetaMappingInFirstClass=1 2025-06-25T15:00:03.307842Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:04.101709Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715679:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T15:00:04.874872Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715685:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:05.555620Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715688:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:171) 2025-06-25T15:00:06.304654Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715694:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:171) 2025-06-25T15:00:06.991172Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715697:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T15:00:07.669173Z node 9 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_impl__abstract_chooser_actor.h:101: StateInitTable, received event# 277020685, Sender [9:7519901871857835488:2069], Recipient [9:7519901914807510349:3222]: NKikimr::NMetadata::NProvider::TEvManagerPrepared 2025-06-25T15:00:07.669207Z node 9 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:111: TPartitionChooser [9:7519901914807510349:3222] (SourceId=A_Source, PreferedPartition=0) StartKqpSession 2025-06-25T15:00:07.672216Z node 9 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_impl__abstract_chooser_actor.h:132: StateCreateKqpSession, received event# 271646728, Sender [9:7519901871857835610:2174], Recipient [9:7519901914807510349:3222]: NKikimrKqp.TEvCreateSessionResponse Error: "" Response { SessionId: "ydb://session/3?node_id=9&id=ZGE1N2IzYWYtNDliNzI5MmItYWMyNTJjYTMtNWU3Y2RmYWM=" NodeId: 9 } YdbStatus: SUCCESS ResourceExhausted: false 2025-06-25T15:00:07.672253Z node 9 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:142: TPartitionChooser [9:7519901914807510349:3222] (SourceId=A_Source, PreferedPartition=0) Select from the table 2025-06-25T15:00:07.856108Z node 9 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_impl__abstract_chooser_actor.h:163: StateSelect, received event# 271646721, Sender [9:7519901871857835610:2174], Recipient [9:7519901914807510349:3222]: NKikimrKqp.TEvQueryResponse Response { SessionId: "ydb://session/3?node_id=9&id=ZGE1N2IzYWYtNDliNzI5MmItYWMyNTJjYTMtNWU3Y2RmYWM=" PreparedQuery: "8fcd9013-e18ac28a-8f7df13f-c15c7c0a" QueryParameters { Name: "$Hash" Type { Kind: Data Data { Scheme: 4 } } } QueryParameters { Name: "$Topic" Type { Kind: Data Data { Scheme: 4608 } } } QueryParameters { Name: "$SourceId" Type { Kind: Data Data { Scheme: 4608 } } } TxMeta { id: "01jyksmy19b3gk655tcmw38mr0" } YdbResults { columns { name: "Partition" type { optional_type { item { type_id: UINT32 } } } } columns { name: "CreateTime" type { optional_type { item { type_id: UINT64 } } } } columns { name: "AccessTime" type { optional_type { item { type_id: UINT64 } } } } columns { name: "SeqNo" type { optional_type { item { type_id: UINT64 } } } } } QueryDiagnostics: "" } YdbStatus: SUCCESS ConsumedRu: 116 2025-06-25T15:00:07.856252Z node 9 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_impl__abstract_chooser_actor.h:151: TPartitionChooser [9:7519901914807510349:3222] (SourceId=A_Source, PreferedPartition=0) Selected from table PartitionId=(NULL) SeqNo=(NULL) 2025-06-25T15:00:07.856281Z node 9 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_impl__old_chooser_actor.h:113: TPartitionChooser [9:7519901914807510349:3222] (SourceId=A_Source, PreferedPartition=0) OnPartitionChosen 2025-06-25T15:00:07.856295Z node 9 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:174: TPartitionChooser [9:7519901914807510349:3222] (SourceId=A_Source, PreferedPartition=0) Update the table Received TEvChooseResult: 0 2025-06-25T15:00:07.974918Z node 9 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_impl__abstract_chooser_actor.h:212: StateUpdate, received event# 271646721, Sender [9:7519901871857835610:2174], Recipient [9:7519901914807510349:3222]: NKikimrKqp.TEvQueryResponse Response { SessionId: "ydb://session/3?node_id=9&id=ZGE1N2IzYWYtNDliNzI5MmItYWMyNTJjYTMtNWU3Y2RmYWM=" PreparedQuery: "b7b28a27-aac82248-af3ca3fa-bb2ff518" QueryParameters { Name: "$AccessTime" Type { Kind: Data Data { Scheme: 4 } } } QueryParameters { Name: "$CreateTime" Type { Kind: Data Data { Scheme: 4 } } } QueryParameters { Name: "$Hash" Type { Kind: Data Data { Scheme: 4 } } } QueryParameters { Name: "$Partition" Type { Kind: Data Data { Scheme: 2 } } } QueryParameters { Name: "$SourceId" Type { Kind: Data Data { Scheme: 4608 } } } QueryParameters { Name: "$SeqNo" Type { Kind: Data Data { Scheme: 4 } } } QueryParameters { Name: "$Topic" Type { Kind: Data Data { Scheme: 4608 } } } TxMeta { } QueryDiagnostics: "" } YdbStatus: SUCCESS ConsumedRu: 71 2025-06-25T15:00:07.974959Z node 9 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:183: TPartitionChooser [9:7519901914807510349:3222] (SourceId=A_Source, PreferedPartition=0) HandleUpdate PartitionPersisted=0 Status=SUCCESS 2025-06-25T15:00:07.974988Z node 9 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:305: TPartitionChooser [9:7519901914807510349:3222] (SourceId=A_Source, PreferedPartition=0) ReplyResult: Partition=0, SeqNo=(NULL) 2025-06-25T15:00:07.975011Z node 9 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:268: TPartitionChooser [9:7519901914807510349:3222] (SourceId=A_Source, PreferedPartition=0) Start idle 2025-06-25T15:00:08.434936Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7382: Cannot get console configs 2025-06-25T15:00:08.434964Z node 9 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded >> BackupPathTest::EncryptedExportWithExplicitObjectList >> BackupRestore::TestAllPrimitiveTypes-DOUBLE [GOOD] >> BackupRestore::TestAllPrimitiveTypes-DATE >> TColumnShardTestSchema::TTL-Reboot-Internal+FirstPkColumn >> TColumnShardTestSchema::RebootExportWithLostAnswer >> TColumnShardTestSchema::HotTiersAfterTtl >> YdbYqlClient::DeleteTableWithDeletedIndex [GOOD] >> YdbYqlClient::CreateTableWithUniformPartitions >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeReplication [GOOD] >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeExternalTable >> KqpQueryService::ReturnAndCloseSameTime [GOOD] >> KqpQueryService::ReplaceIntoWithDefaultValue >> TStateStorageTest::ShouldGetMultipleStates [GOOD] >> YdbScripting::MultiResults >> EncryptedExportTest::EncryptionAndChecksum [GOOD] >> BackupRestoreS3::RestoreViewDependentOnAnotherView [GOOD] >> BackupRestoreS3::TestAllIndexTypes-EIndexTypeInvalid [GOOD] >> BackupRestoreS3::TestAllIndexTypes-EIndexTypeGlobal >> BackupRestore::RestoreViewQueryText [GOOD] >> BackupRestore::RestoreViewReferenceTable >> BackupRestoreS3::TestAllPrimitiveTypes-FLOAT [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-TIMESTAMP >> EncryptedExportTest::EncryptionChecksumAndCompression >> BackupPathTest::EncryptedExportWithExplicitObjectList [GOOD] >> PersQueueSdkReadSessionTest::StopResumeReadingData [GOOD] >> ReadSessionImplTest::CreatePartitionStream [GOOD] >> ReadSessionImplTest::BrokenCompressedData [GOOD] >> ReadSessionImplTest::CommitOffsetTwiceIsError >> ReadSessionImplTest::CommitOffsetTwiceIsError [GOOD] >> ReadSessionImplTest::CommonHandler [GOOD] >> BasicUsage::TWriteSession_WriteAndReadAndCommitRandomMessages [GOOD] >> BasicUsage::TWriteSession_WriteAndReadAndCommitRandomMessagesNoClusterDiscovery >> Compression::WriteWithMixedCodecs [GOOD] >> PersQueueSdkReadSessionTest::ReadSessionWithAbort |91.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/checkpoint_storage/ut/unittest >> TStateStorageTest::ShouldGetMultipleStates [GOOD] >> BackupRestore::TestAllPrimitiveTypes-UTF8 [GOOD] >> BackupRestore::TestAllPrimitiveTypes-YSON >> BackupRestore::TestAllPrimitiveTypes-DATE [GOOD] >> BackupRestore::TestAllPrimitiveTypes-DATETIME >> BackupPathTest::ExportCommonSourcePathImportExplicitly >> YdbYqlClient::CreateTableWithUniformPartitions [GOOD] >> YdbYqlClient::CreateTableWithUniformPartitionsAndAutoPartitioning |91.2%| [TA] $(B)/ydb/core/fq/libs/checkpoint_storage/ut/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/persqueue_public/ut/unittest >> ReadSessionImplTest::CommonHandler [GOOD] Test command err: 2025-06-25T14:59:21.822528Z :ReadSession INFO: Random seed for debugging is 1750863561822505 2025-06-25T14:59:22.112726Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901740614806043:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:59:22.112864Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:59:22.198152Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519901740499536049:2081];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:59:22.199900Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:59:22.389793Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0010d6/r3tmp/tmpNvpo31/pdisk_1.dat 2025-06-25T14:59:22.417134Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-25T14:59:22.777717Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:59:22.792401Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:59:22.792513Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:59:22.795257Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:59:22.795330Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:59:22.801584Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:59:22.810048Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T14:59:22.812293Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 27937, node 1 2025-06-25T14:59:22.912474Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/yft8/0010d6/r3tmp/yandex7xukfI.tmp 2025-06-25T14:59:22.912504Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/yft8/0010d6/r3tmp/yandex7xukfI.tmp 2025-06-25T14:59:22.912725Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/yft8/0010d6/r3tmp/yandex7xukfI.tmp 2025-06-25T14:59:22.912892Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:59:23.108951Z INFO: TTestServer started on Port 8277 GrpcPort 27937 2025-06-25T14:59:23.127840Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:59:23.204636Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:8277 PQClient connected to localhost:27937 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:59:23.413416Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... waiting... waiting... 2025-06-25T14:59:25.065252Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519901753384438188:2269], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:59:25.065318Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519901753384438195:2272], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:59:25.065442Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:59:25.069848Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976720657:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:59:25.087490Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519901753384438217:2273], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976720657 completed, doublechecking } 2025-06-25T14:59:25.186447Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519901753384438245:2131] txid# 281474976720658, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:59:25.471164Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7519901753384438260:2277], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:59:25.471441Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=2&id=NzIyYjc5YmItMThlYjZlNGUtNTU1NDBiNzYtZjI4ZWMzZGM=, ActorId: [2:7519901753384438186:2268], ActorState: ExecuteState, TraceId: 01jykskm86etvr909jve19y9d1, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:59:25.474662Z node 2 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-25T14:59:25.475360Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519901753499708966:2304], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:59:25.477164Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=NzMxYTU2YzAtNDFmNGMwNGMtOGQyMTk4ZjctYWFkNTlmM2M=, ActorId: [1:7519901753499708936:2297], ActorState: ExecuteState, TraceId: 01jykskmbs2cq4k30v25nmav8g, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:59:25.477482Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-25T14:59:25.525852Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:59:25.628911Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:59:25.740838Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost:27937", true, true, 1000); 2025-06-25T14:59:26.033484Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715664. Ctx: { TraceId: 01jykskn0e5fh1g6ys54nk9jet, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTE2MzEwNjQtM2FkNThiNzYtYj ... partition_actor.cpp:890: session cookie 1 consumer shared/user session shared/user_7_1_1181820582412582005_v1 after read state TopicId: Topic rt3.dc1--test-topic in dc dc1 in database: Root, partition 0(assignId:1) EndOffset 3 ReadOffset 3 ReadGuid 170bb9cd-c2275d4-a44c8e30-c0417837 has messages 1 2025-06-25T15:00:14.237573Z node 7 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:1917: session cookie 1 consumer shared/user session shared/user_7_1_1181820582412582005_v1 read done: guid# 170bb9cd-c2275d4-a44c8e30-c0417837, partition# TopicId: Topic rt3.dc1--test-topic in dc dc1 in database: Root, partition 0(assignId:1), size# 220 2025-06-25T15:00:14.237619Z node 7 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2079: session cookie 1 consumer shared/user session shared/user_7_1_1181820582412582005_v1 response to read: guid# 170bb9cd-c2275d4-a44c8e30-c0417837 2025-06-25T15:00:14.237787Z node 7 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2122: session cookie 1 consumer shared/user session shared/user_7_1_1181820582412582005_v1 Process answer. Aval parts: 0 2025-06-25T15:00:14.239453Z :DEBUG: [/Root] [/Root] [21fe2c8a-e4dfc92f-54f0211d-6dc970f8] [dc1] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T15:00:14.239634Z node 7 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer shared/user session shared/user_7_1_1181820582412582005_v1 grpc read done: success# 1, data# { read { } } 2025-06-25T15:00:14.239675Z :DEBUG: [/Root] Decompression task done. Partition/PartitionSessionId: 0 (2-2) 2025-06-25T15:00:14.239782Z node 7 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:1816: session cookie 1 consumer shared/user session shared/user_7_1_1181820582412582005_v1 got read request: guid# faab9d0a-a315612b-9f6f8625-a702a286 2025-06-25T15:00:14.239817Z :DEBUG: [/Root] Take Data. Partition 0. Read: {0, 0} (2-2) 2025-06-25T15:00:14.239884Z :DEBUG: [/Root] [/Root] [21fe2c8a-e4dfc92f-54f0211d-6dc970f8] [dc1] The application data is transferred to the client. Number of messages 1, size 8 bytes DataReceived { PartitionStreamId: 1 PartitionId: 0 Message { Data: ..8 bytes.. Partition stream id: 1 Cluster: "dc1". Topic: "test-topic" Partition: 0 PartitionKey: "" Information: { Offset: 2 SeqNo: 3 MessageGroupId: "test-message-group-id" CreateTime: 2025-06-25T15:00:13.122000Z WriteTime: 2025-06-25T15:00:13.123000Z Ip: "ipv6:[::1]:39204" UncompressedSize: 8 Meta: { "logtype": "unknown", "ident": "unknown", "server": "ipv6:[::1]:39204" } } } } 2025-06-25T15:00:14.240099Z :INFO: [/Root] [/Root] [21fe2c8a-e4dfc92f-54f0211d-6dc970f8] Closing read session. Close timeout: 3.000000s 2025-06-25T15:00:14.240153Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): dc1:test-topic:0:1:2:2 2025-06-25T15:00:14.240197Z :INFO: [/Root] [/Root] [21fe2c8a-e4dfc92f-54f0211d-6dc970f8] Counters: { Errors: 0 CurrentSessionLifetimeMs: 1327 BytesRead: 24 MessagesRead: 3 BytesReadCompressed: 84 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-25T15:00:14.240829Z :INFO: [/Root] [/Root] [21fe2c8a-e4dfc92f-54f0211d-6dc970f8] Closing read session. Close timeout: 0.000000s 2025-06-25T15:00:14.240884Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): dc1:test-topic:0:1:2:2 2025-06-25T15:00:14.240930Z :INFO: [/Root] [/Root] [21fe2c8a-e4dfc92f-54f0211d-6dc970f8] Counters: { Errors: 0 CurrentSessionLifetimeMs: 1328 BytesRead: 24 MessagesRead: 3 BytesReadCompressed: 84 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-25T15:00:14.241074Z :NOTICE: [/Root] [/Root] [21fe2c8a-e4dfc92f-54f0211d-6dc970f8] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-06-25T15:00:14.241226Z node 7 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer shared/user session shared/user_7_1_1181820582412582005_v1 grpc read done: success# 0, data# { } 2025-06-25T15:00:14.241253Z node 7 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 1 consumer shared/user session shared/user_7_1_1181820582412582005_v1 grpc read failed 2025-06-25T15:00:14.241283Z node 7 :PQ_READ_PROXY INFO: read_session_actor.cpp:1645: session cookie 1 consumer shared/user session shared/user_7_1_1181820582412582005_v1 closed 2025-06-25T15:00:14.241742Z node 7 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 1 consumer shared/user session shared/user_7_1_1181820582412582005_v1 is DEAD 2025-06-25T15:00:14.241971Z node 7 :PERSQUEUE DEBUG: pq_impl.cpp:2452: [PQ: 72075186224037892] Destroy direct read session shared/user_7_1_1181820582412582005_v1 2025-06-25T15:00:14.242011Z node 7 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037892] server disconnected, pipe [7:7519901953963083013:2501] destroyed 2025-06-25T15:00:14.242060Z node 7 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: shared/user_7_1_1181820582412582005_v1 2025-06-25T15:00:14.242409Z node 8 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037893][rt3.dc1--test-topic] pipe [7:7519901953963083011:2498] disconnected; active server actors: 1 2025-06-25T15:00:14.242500Z node 8 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037893][rt3.dc1--test-topic] pipe [7:7519901953963083011:2498] client user disconnected session shared/user_7_1_1181820582412582005_v1 2025-06-25T15:00:15.955284Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T15:00:15.955323Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T15:00:15.955372Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-25T15:00:15.955680Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-25T15:00:15.956130Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-25T15:00:15.956426Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T15:00:15.956867Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: 13. Commit offset: 31 2025-06-25T15:00:15.958475Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T15:00:15.958519Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T15:00:15.958561Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-25T15:00:15.958860Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-25T15:00:15.959237Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-25T15:00:15.959350Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T15:00:15.959650Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-25T15:00:15.960827Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function 2025-06-25T15:00:15.961372Z :INFO: Error decompressing data: (TZLibDecompressorError) util/stream/zlib.cpp:143: inflate error(incorrect header check) 2025-06-25T15:00:15.961504Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-3) 2025-06-25T15:00:15.961678Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-25T15:00:15.961752Z :DEBUG: Take Data. Partition 1. Read: {0, 1} (2-2) 2025-06-25T15:00:15.961788Z :DEBUG: Take Data. Partition 1. Read: {0, 2} (3-3) 2025-06-25T15:00:15.961842Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 3, size 16 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { DataDecompressionError: "(TZLibDecompressorError) util/stream/zlib.cpp:143: inflate error(incorrect header check)" Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 1 SeqNo: 1 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:00:00.042000Z Ip: "::1" UncompressedSize: 0 Meta: { } } } Message { Data: ..8 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 2 SeqNo: 1 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:00:00.042000Z Ip: "::1" UncompressedSize: 0 Meta: { } } } Message { Data: ..8 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 3 SeqNo: 1 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:00:00.042000Z Ip: "::1" UncompressedSize: 0 Meta: { } } } } 2025-06-25T15:00:15.964178Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T15:00:15.964228Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T15:00:15.964275Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-25T15:00:15.964613Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-25T15:00:15.965004Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-25T15:00:15.965134Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T15:00:15.965388Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-25T15:00:15.966275Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T15:00:15.966485Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2025-06-25T15:00:15.966640Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-25T15:00:15.966721Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 8 bytes 2025-06-25T15:00:15.966822Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [1, 2). Partition stream id: 1 2025-06-25T15:00:15.968273Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T15:00:15.968342Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T15:00:15.968380Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-25T15:00:15.968679Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-25T15:00:15.969002Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-25T15:00:15.969169Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T15:00:15.969841Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T15:00:15.969989Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2025-06-25T15:00:15.970052Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-25T15:00:15.970142Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 8 bytes |91.2%| [TA] {RESULT} $(B)/ydb/core/fq/libs/checkpoint_storage/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpQueryService::ReplaceIntoWithDefaultValue [GOOD] |91.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest >> YdbScripting::MultiResults [GOOD] >> YdbScripting::Params ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/service/unittest >> KqpQueryService::ReplaceIntoWithDefaultValue [GOOD] Test command err: Trying to start YDB, gRPC: 15104, MsgBus: 13732 2025-06-25T14:55:19.978446Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900698513964485:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:19.979334Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00183b/r3tmp/tmpgsh2jW/pdisk_1.dat 2025-06-25T14:55:20.354527Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:55:20.359090Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519900698513964445:2080] 1750863319971559 != 1750863319971562 TServer::EnableGrpc on GrpcPort 15104, node 1 2025-06-25T14:55:20.422385Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:55:20.422481Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:55:20.424726Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:55:20.431369Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:55:20.431400Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:55:20.431408Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:55:20.431550Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13732 TClient is connected to server localhost:13732 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:55:21.000078Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:55:21.000452Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... 2025-06-25T14:55:21.019188Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T14:55:21.026170Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:21.182530Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T14:55:21.337570Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:21.403322Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:22.944857Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900711398867958:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:22.944971Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:23.281446Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:23.317723Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:23.349273Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:23.376772Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:23.409248Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:23.447843Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:23.478045Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:23.541063Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900715693835914:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:23.541122Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900715693835919:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:23.541123Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:23.543853Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:55:23.554895Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519900715693835921:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:55:23.613797Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519900715693835972:3417] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 1431, MsgBus: 30936 2025-06-25T14:55:25.212622Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519900724683561107:2229];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00183b/r3tmp/tmpCbpXJ2/pdisk_1.dat 2025-06-25T14:55:25.254018Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:55:25.369142Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:55:25.369381Z node 2 :HIVE WARN: nod ... r: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:28.239685Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:28.297123Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:28.328139Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:28.368731Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:28.442486Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:28.479978Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:28.529277Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:28.610347Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:28.711637Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519900737568465093:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:28.711778Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:28.711982Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519900737568465098:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:28.715367Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:55:28.729931Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519900737568465100:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T14:55:28.825902Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519900737568465151:3419] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:55:30.194338Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519900724683561107:2229];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:30.194643Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T14:55:40.334302Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7382: Cannot get console configs 2025-06-25T14:55:40.334330Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded Trying to start YDB, gRPC: 32045, MsgBus: 11145 2025-06-25T15:00:13.280996Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519901961139353417:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:00:13.281064Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00183b/r3tmp/tmp8D3iq3/pdisk_1.dat 2025-06-25T15:00:13.419078Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:00:13.435781Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:00:13.435872Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 32045, node 3 2025-06-25T15:00:13.439857Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:00:13.479364Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:00:13.479394Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:00:13.479403Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:00:13.479572Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11145 TClient is connected to server localhost:11145 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:00:14.027938Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:00:14.288771Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:00:16.499034Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519901974024255891:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:00:16.499115Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:00:16.499195Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519901974024255908:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:00:16.502553Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:00:16.511246Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519901974024255918:2295], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T15:00:16.583966Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519901974024255969:2332] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:00:16.621697Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) >> TColumnShardTestSchema::RebootForgetWithLostAnswer >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeExternalTable [GOOD] >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeView >> TColumnShardTestSchema::Drop-Reboots+GenerateInternalPathId >> TColumnShardTestSchema::TTL-Reboot+Internal+FirstPkColumn [GOOD] >> TColumnShardTestSchema::TTL+Reboot+Internal+FirstPkColumn [GOOD] >> TColumnShardTestSchema::RebootOneColdTier >> BackupRestoreS3::TestAllPrimitiveTypes-TIMESTAMP [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-INTERVAL ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest >> TColumnShardTestSchema::TTL-Reboot+Internal+FirstPkColumn [GOOD] Test command err: Running TestTtl ttlColumnType=Timestamp 2025-06-25T14:59:47.130707Z node 1 :TX_COLUMNSHARD TRACE: columnshard_impl.h:381: StateInit, received event# 268828672, Sender [1:106:2138], Recipient [1:128:2158]: NKikimr::TEvTablet::TEvBoot 2025-06-25T14:59:47.138710Z node 1 :TX_COLUMNSHARD TRACE: columnshard_impl.h:381: StateInit, received event# 268828673, Sender [1:106:2138], Recipient [1:128:2158]: NKikimr::TEvTablet::TEvRestored 2025-06-25T14:59:47.139149Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:99;event=initialize_shard;step=OnActivateExecutor; 2025-06-25T14:59:47.167739Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:117;event=initialize_shard;step=initialize_tiring_finished; 2025-06-25T14:59:47.168007Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-25T14:59:47.176998Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:59:47.177210Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:59:47.177451Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:59:47.177575Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:59:47.177693Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:59:47.177801Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:59:47.177910Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:59:47.178004Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:59:47.178112Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:59:47.178212Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:59:47.178305Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:59:47.180977Z node 1 :TX_COLUMNSHARD TRACE: columnshard_impl.h:381: StateInit, received event# 268828684, Sender [1:106:2138], Recipient [1:128:2158]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-25T14:59:47.204045Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-25T14:59:47.204209Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-25T14:59:47.204264Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-25T14:59:47.204467Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T14:59:47.204642Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-25T14:59:47.204727Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-25T14:59:47.204797Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-25T14:59:47.204889Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-25T14:59:47.204961Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-25T14:59:47.205001Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-25T14:59:47.205031Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-25T14:59:47.205219Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T14:59:47.205281Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-25T14:59:47.205327Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-25T14:59:47.205361Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-25T14:59:47.205457Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-25T14:59:47.205513Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-25T14:59:47.205573Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-25T14:59:47.205603Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-25T14:59:47.205653Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-25T14:59:47.205693Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-25T14:59:47.205729Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-25T14:59:47.205911Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-25T14:59:47.205958Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-25T14:59:47.205995Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-25T14:59:47.206228Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-25T14:59:47.206277Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-25T14:59:47.206310Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-25T14:59:47.206415Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-25T14:59:47.206456Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-25T14:59:47.206488Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-25T14:59:47.206571Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-25T14:59:47.206652Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-25T14:59:47.206694Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-25T14:59:47.206720Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-25T14:59:47.206947Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=38; 2025-06-25T14:59:47.207045Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=55; 2025-06-25T14:59:47.207116Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=32; 2025-06-25T14:59:47.207221Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=62; 2025-06-25T14:59:47.207322Z node 1 :T ... received;interval_idx=0;intervalId=170; 2025-06-25T15:00:19.307042Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:506:2509];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=scanner.cpp:47;event=interval_result;interval_idx=0;count=1000;merger=0;interval_id=170; 2025-06-25T15:00:19.307087Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:506:2509];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=scanner.cpp:65;event=intervals_finished; 2025-06-25T15:00:19.307154Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:506:2509];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:00:19.307177Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:506:2509];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=1;count=1000;finished=1; 2025-06-25T15:00:19.307203Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:506:2509];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:203;stage=limit exhausted;limit=limits:(bytes=0;chunks=0);; 2025-06-25T15:00:19.307453Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:506:2509];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-25T15:00:19.307590Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:506:2509];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:1;records_count:1000;schema=timestamp: uint64;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:00:19.307627Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:506:2509];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=0;count=0;finished=1; 2025-06-25T15:00:19.307738Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:506:2509];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:234;stage=ready result;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;);columns=1;rows=1000; 2025-06-25T15:00:19.307791Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:506:2509];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:254;stage=data_format;batch_size=8000;num_rows=1000;batch_columns=timestamp; 2025-06-25T15:00:19.308030Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:506:2509];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:370;event=send_data;compute_actor_id=[5:505:2508];bytes=8000;rows=1000;faults=0;finished=0;fault=0;schema=timestamp: uint64; 2025-06-25T15:00:19.308162Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:506:2509];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:274;stage=finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:00:19.308256Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:506:2509];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:00:19.308396Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:506:2509];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:00:19.308552Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:506:2509];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-25T15:00:19.308662Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:506:2509];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:00:19.308764Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:506:2509];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:00:19.308800Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: actor.cpp:414: Scan [5:506:2509] finished for tablet 9437184 2025-06-25T15:00:19.309245Z node 5 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[5:506:2509];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:420;event=scan_finish;compute_actor_id=[5:505:2508];stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.002},{"events":["l_bootstrap"],"t":0.003},{"events":["f_processing","f_task_result"],"t":0.004},{"events":["f_ack","l_task_result"],"t":0.013},{"events":["l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.015}],"full":{"a":1750863619293748,"name":"_full_task","f":1750863619293748,"d_finished":0,"c":0,"l":1750863619308850,"d":15102},"events":[{"name":"bootstrap","f":1750863619293992,"d_finished":3384,"c":1,"l":1750863619297376,"d":3384},{"a":1750863619308531,"name":"ack","f":1750863619307427,"d_finished":992,"c":1,"l":1750863619308419,"d":1311},{"a":1750863619308513,"name":"processing","f":1750863619298649,"d_finished":5896,"c":8,"l":1750863619308421,"d":6233},{"name":"ProduceResults","f":1750863619295863,"d_finished":2400,"c":11,"l":1750863619308786,"d":2400},{"a":1750863619308789,"name":"Finish","f":1750863619308789,"d_finished":0,"c":0,"l":1750863619308850,"d":61},{"name":"task_result","f":1750863619298669,"d_finished":4772,"c":7,"l":1750863619307276,"d":4772}],"id":"9437184::35"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:00:19.309364Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:506:2509];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:370;event=send_data;compute_actor_id=[5:505:2508];bytes=0;rows=0;faults=0;finished=1;fault=0;schema=; 2025-06-25T15:00:19.309823Z node 5 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[5:506:2509];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:375;event=scan_finished;compute_actor_id=[5:505:2508];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.002},{"events":["l_bootstrap"],"t":0.003},{"events":["f_processing","f_task_result"],"t":0.004},{"events":["f_ack","l_task_result"],"t":0.013},{"events":["l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.015}],"full":{"a":1750863619293748,"name":"_full_task","f":1750863619293748,"d_finished":0,"c":0,"l":1750863619309407,"d":15659},"events":[{"name":"bootstrap","f":1750863619293992,"d_finished":3384,"c":1,"l":1750863619297376,"d":3384},{"a":1750863619308531,"name":"ack","f":1750863619307427,"d_finished":992,"c":1,"l":1750863619308419,"d":1868},{"a":1750863619308513,"name":"processing","f":1750863619298649,"d_finished":5896,"c":8,"l":1750863619308421,"d":6790},{"name":"ProduceResults","f":1750863619295863,"d_finished":2400,"c":11,"l":1750863619308786,"d":2400},{"a":1750863619308789,"name":"Finish","f":1750863619308789,"d_finished":0,"c":0,"l":1750863619309407,"d":618},{"name":"task_result","f":1750863619298669,"d_finished":4772,"c":7,"l":1750863619307276,"d":4772}],"id":"9437184::35"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:00:19.309898Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:506:2509];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-06-25T15:00:19.291822Z;index_granules=0;index_portions=1;index_batches=10;schema_columns=1;filter_columns=0;additional_columns=0;compacted_portions_bytes=0;inserted_portions_bytes=59184;committed_portions_bytes=0;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=59184;selected_rows=0; 2025-06-25T15:00:19.309940Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:506:2509];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=read_context.h:192;event=scan_aborted;reason=unexpected on destructor; 2025-06-25T15:00:19.310199Z node 5 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[5:506:2509];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest >> TColumnShardTestSchema::TTL+Reboot+Internal+FirstPkColumn [GOOD] Test command err: Running TestTtl ttlColumnType=Timestamp 2025-06-25T14:59:49.241023Z node 1 :TX_COLUMNSHARD TRACE: columnshard_impl.h:381: StateInit, received event# 268828672, Sender [1:106:2138], Recipient [1:128:2158]: NKikimr::TEvTablet::TEvBoot 2025-06-25T14:59:49.253073Z node 1 :TX_COLUMNSHARD TRACE: columnshard_impl.h:381: StateInit, received event# 268828673, Sender [1:106:2138], Recipient [1:128:2158]: NKikimr::TEvTablet::TEvRestored 2025-06-25T14:59:49.253512Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:99;event=initialize_shard;step=OnActivateExecutor; 2025-06-25T14:59:49.288414Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:117;event=initialize_shard;step=initialize_tiring_finished; 2025-06-25T14:59:49.288663Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-25T14:59:49.295093Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:59:49.295284Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:59:49.295483Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:59:49.295590Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:59:49.295691Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:59:49.295796Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:59:49.295904Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:59:49.296017Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:59:49.296113Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:59:49.296207Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:59:49.296291Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:59:49.298876Z node 1 :TX_COLUMNSHARD TRACE: columnshard_impl.h:381: StateInit, received event# 268828684, Sender [1:106:2138], Recipient [1:128:2158]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-25T14:59:49.321503Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-25T14:59:49.321653Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-25T14:59:49.321718Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-25T14:59:49.321891Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T14:59:49.322039Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-25T14:59:49.322110Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-25T14:59:49.322162Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-25T14:59:49.322261Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-25T14:59:49.322341Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-25T14:59:49.322386Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-25T14:59:49.322411Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-25T14:59:49.322575Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T14:59:49.322635Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-25T14:59:49.322677Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-25T14:59:49.322705Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-25T14:59:49.322844Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-25T14:59:49.322898Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-25T14:59:49.322938Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-25T14:59:49.322963Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-25T14:59:49.323021Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-25T14:59:49.323059Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-25T14:59:49.323091Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-25T14:59:49.323267Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-25T14:59:49.323303Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-25T14:59:49.323345Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-25T14:59:49.323527Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-25T14:59:49.323581Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-25T14:59:49.323610Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-25T14:59:49.323715Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-25T14:59:49.323750Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-25T14:59:49.323778Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-25T14:59:49.323849Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-25T14:59:49.323923Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-25T14:59:49.323968Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-25T14:59:49.323995Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-25T14:59:49.324206Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=41; 2025-06-25T14:59:49.324303Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=41; 2025-06-25T14:59:49.324403Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=36; 2025-06-25T14:59:49.324484Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=45; 2025-06-25T14:59:49.324579Z node 1 :T ... tervalId=170; 2025-06-25T15:00:19.474702Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:612:2589];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=scanner.cpp:47;event=interval_result;interval_idx=0;count=1000;merger=0;interval_id=170; 2025-06-25T15:00:19.474764Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:612:2589];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=scanner.cpp:65;event=intervals_finished; 2025-06-25T15:00:19.474869Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:612:2589];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:00:19.474933Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:612:2589];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=1;count=1000;finished=1; 2025-06-25T15:00:19.474980Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:612:2589];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:203;stage=limit exhausted;limit=limits:(bytes=0;chunks=0);; 2025-06-25T15:00:19.475374Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:612:2589];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-25T15:00:19.475578Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:612:2589];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:1;records_count:1000;schema=timestamp: uint64;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:00:19.475642Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:612:2589];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=0;count=0;finished=1; 2025-06-25T15:00:19.475843Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:612:2589];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:234;stage=ready result;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;);columns=1;rows=1000; 2025-06-25T15:00:19.475928Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:612:2589];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:254;stage=data_format;batch_size=8000;num_rows=1000;batch_columns=timestamp; 2025-06-25T15:00:19.476294Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:612:2589];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:370;event=send_data;compute_actor_id=[5:611:2588];bytes=8000;rows=1000;faults=0;finished=0;fault=0;schema=timestamp: uint64; 2025-06-25T15:00:19.476528Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:612:2589];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:274;stage=finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:00:19.476665Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:612:2589];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:00:19.476843Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:612:2589];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:00:19.477136Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:612:2589];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-25T15:00:19.477352Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:612:2589];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:00:19.477524Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:612:2589];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:00:19.477635Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: actor.cpp:414: Scan [5:612:2589] finished for tablet 9437184 2025-06-25T15:00:19.478266Z node 5 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[5:612:2589];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:420;event=scan_finish;compute_actor_id=[5:611:2588];stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.002},{"events":["l_bootstrap"],"t":0.004},{"events":["f_processing","f_task_result"],"t":0.005},{"events":["f_ack","l_task_result"],"t":0.017},{"events":["l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.019}],"full":{"a":1750863619457987,"name":"_full_task","f":1750863619457987,"d_finished":0,"c":0,"l":1750863619477727,"d":19740},"events":[{"name":"bootstrap","f":1750863619458260,"d_finished":3793,"c":1,"l":1750863619462053,"d":3793},{"a":1750863619477098,"name":"ack","f":1750863619475334,"d_finished":1570,"c":1,"l":1750863619476904,"d":2199},{"a":1750863619477073,"name":"processing","f":1750863619463510,"d_finished":9228,"c":8,"l":1750863619476907,"d":9882},{"name":"ProduceResults","f":1750863619460383,"d_finished":3346,"c":11,"l":1750863619477598,"d":3346},{"a":1750863619477607,"name":"Finish","f":1750863619477607,"d_finished":0,"c":0,"l":1750863619477727,"d":120},{"name":"task_result","f":1750863619463544,"d_finished":7489,"c":7,"l":1750863619475099,"d":7489}],"id":"9437184::35"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:00:19.478372Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:612:2589];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:370;event=send_data;compute_actor_id=[5:611:2588];bytes=0;rows=0;faults=0;finished=1;fault=0;schema=; 2025-06-25T15:00:19.478924Z node 5 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[5:612:2589];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:375;event=scan_finished;compute_actor_id=[5:611:2588];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.002},{"events":["l_bootstrap"],"t":0.004},{"events":["f_processing","f_task_result"],"t":0.005},{"events":["f_ack","l_task_result"],"t":0.017},{"events":["l_ProduceResults","f_Finish"],"t":0.019},{"events":["l_ack","l_processing","l_Finish"],"t":0.02}],"full":{"a":1750863619457987,"name":"_full_task","f":1750863619457987,"d_finished":0,"c":0,"l":1750863619478456,"d":20469},"events":[{"name":"bootstrap","f":1750863619458260,"d_finished":3793,"c":1,"l":1750863619462053,"d":3793},{"a":1750863619477098,"name":"ack","f":1750863619475334,"d_finished":1570,"c":1,"l":1750863619476904,"d":2928},{"a":1750863619477073,"name":"processing","f":1750863619463510,"d_finished":9228,"c":8,"l":1750863619476907,"d":10611},{"name":"ProduceResults","f":1750863619460383,"d_finished":3346,"c":11,"l":1750863619477598,"d":3346},{"a":1750863619477607,"name":"Finish","f":1750863619477607,"d_finished":0,"c":0,"l":1750863619478456,"d":849},{"name":"task_result","f":1750863619463544,"d_finished":7489,"c":7,"l":1750863619475099,"d":7489}],"id":"9437184::35"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:00:19.479027Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:612:2589];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-06-25T15:00:19.455939Z;index_granules=0;index_portions=1;index_batches=10;schema_columns=1;filter_columns=0;additional_columns=0;compacted_portions_bytes=0;inserted_portions_bytes=59184;committed_portions_bytes=0;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=59184;selected_rows=0; 2025-06-25T15:00:19.479085Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:612:2589];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=read_context.h:192;event=scan_aborted;reason=unexpected on destructor; 2025-06-25T15:00:19.479452Z node 5 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[5:612:2589];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;; >> BackupRestoreS3::TestAllIndexTypes-EIndexTypeGlobal [GOOD] >> BackupRestoreS3::TestAllIndexTypes-EIndexTypeGlobalAsync >> TColumnShardTestSchema::TTL+Reboot+Internal-FirstPkColumn [GOOD] >> TPQTest::TestPQSmallRead [GOOD] >> TPQTest::TestPQReadAhead [GOOD] >> YdbYqlClient::CreateTableWithUniformPartitionsAndAutoPartitioning [GOOD] >> YdbYqlClient::CreateTableWithPartitionAtKeysAndAutoPartitioning >> TColumnShardTestSchema::HotTiersTtl [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/persqueue/ut/unittest >> TPQTest::TestPQReadAhead [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:107:2057] recipient: [1:105:2137] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:107:2057] recipient: [1:105:2137] Leader for TabletID 72057594037927937 is [1:111:2141] sender: [1:112:2057] recipient: [1:105:2137] 2025-06-25T14:58:55.190224Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:58:55.190298Z node 1 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [1:153:2057] recipient: [1:151:2172] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [1:153:2057] recipient: [1:151:2172] Leader for TabletID 72057594037927938 is [1:157:2176] sender: [1:158:2057] recipient: [1:151:2172] Leader for TabletID 72057594037927937 is [1:111:2141] sender: [1:183:2057] recipient: [1:14:2061] 2025-06-25T14:58:55.212856Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:58:55.233325Z node 1 :PERSQUEUE INFO: pq_impl.cpp:1497: [PQ: 72057594037927937] Config applied version 1 actor [1:181:2194] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 ImportantClientId: "important_user" LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 WriteSpeedInBytesPerSecond: 102400 BurstSize: 102400 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--asdfgs--topic" Version: 1 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } ReadRuleGenerations: 1 ReadRuleGenerations: 1 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 1 Important: false } Consumers { Name: "important_user" Generation: 1 Important: true } 2025-06-25T14:58:55.235434Z node 1 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [1:189:2200] 2025-06-25T14:58:55.237879Z node 1 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [1:189:2200] 2025-06-25T14:58:55.251922Z node 1 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|2f8b651-4ad18363-3ff1383a-6a1cd012_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user1" SessionId: "" Offset: 0 Count: 2147483647 Bytes: 2147483647 } Cookie: 123 } via pipe: [1:181:2194] Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user2" SessionId: "" Offset: 0 Count: 2147483647 Bytes: 2147483647 } Cookie: 123 } via pipe: [1:181:2194] Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:107:2057] recipient: [2:105:2137] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:107:2057] recipient: [2:105:2137] Leader for TabletID 72057594037927937 is [2:111:2141] sender: [2:112:2057] recipient: [2:105:2137] 2025-06-25T14:59:03.727979Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:59:03.728066Z node 2 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [2:153:2057] recipient: [2:151:2172] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [2:153:2057] recipient: [2:151:2172] Leader for TabletID 72057594037927938 is [2:157:2176] sender: [2:158:2057] recipient: [2:151:2172] Leader for TabletID 72057594037927937 is [2:111:2141] sender: [2:183:2057] recipient: [2:14:2061] 2025-06-25T14:59:03.743358Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:59:03.743987Z node 2 :PERSQUEUE INFO: pq_impl.cpp:1497: [PQ: 72057594037927937] Config applied version 2 actor [2:181:2194] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 ImportantClientId: "important_user" LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 WriteSpeedInBytesPerSecond: 102400 BurstSize: 102400 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--asdfgs--topic" Version: 2 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } ReadRuleGenerations: 2 ReadRuleGenerations: 2 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 2 Important: false } Consumers { Name: "important_user" Generation: 2 Important: true } 2025-06-25T14:59:03.744576Z node 2 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [2:189:2200] 2025-06-25T14:59:03.747108Z node 2 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [2:189:2200] 2025-06-25T14:59:03.758012Z node 2 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|3a124a1f-e36f1519-66afd8d0-fb7c2cc5_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user1" SessionId: "" Offset: 0 Count: 2147483647 Bytes: 2147483647 } Cookie: 123 } via pipe: [2:181:2194] Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user2" SessionId: "" Offset: 0 Count: 2147483647 Bytes: 2147483647 } Cookie: 123 } via pipe: [2:181:2194] Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:107:2057] recipient: [3:105:2137] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:107:2057] recipient: [3:105:2137] Leader for TabletID 72057594037927937 is [3:111:2141] sender: [3:112:2057] recipient: [3:105:2137] 2025-06-25T14:59:12.339987Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:59:12.340068Z node 3 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [3:153:2057] recipient: [3:151:2172] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [3:153:2057] recipient: [3:151:2172] Leader for TabletID 72057594037927938 is [3:157:2176] sender: [3:158:2057] recipient: [3:151:2172] Leader for TabletID 72057594037927937 is [3:111:2141] sender: [3:183:2057] recipient: [3:14:2061] 2025-06-25T14:59:12.357943Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:59:12.358575Z node 3 :PERSQUEUE INFO: pq_impl.cpp:1497: [PQ: 72057594037927937] Config applied version 3 actor [3:181:2194] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 ImportantClientId: "important_user" LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 WriteSpeedInBytesPerSecond: 102400 BurstSize: 102400 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--asdfgs--topic" Version: 3 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } ReadRuleGenerations: 3 ReadRuleGenerations: 3 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 3 Important: false } Consumers { Name: "important_user" Generation: 3 Important: true } 2025-06-25T14:59:12.359179Z node 3 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [3:189:2200] 2025-06-25T14:59:12.361639Z node 3 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [3:189:2200] 2025-06-25T14:59:12.372146Z node 3 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|b595ebd0-d38d296b-230eff2a-5805679a_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user1" SessionId: "" Offset: 0 Count: 2147483647 Bytes: 2147483647 } Cookie: 123 } via pipe: [3:181:2194] Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user2" SessionId: "" Offset: 0 Count: 2147483647 Bytes: 2147483647 } Cookie: 123 } via pipe: [3:181:2194] Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:107:2057] recipient: [4:105:2137] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:107:2057] recipient: [4:105:2137] Leader for TabletID 72057594037927937 is [4:111:2141] sender: [4:112:2057] recipient: [4:105:2137] 2025-06-25T14:59:21.038122Z node 4 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:59:21.038216Z node 4 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [4:153:2057] recipient: [4:151:2172] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [4:153:2057] recipient: [4:151:2172] Leader for TabletID 72057594037927938 is [4:157:2176] sender: [4:158:2057] recipient: [4:151:2172] Leader for TabletID 72057594037927937 is [4:111:2141] sender: [4:183:2057] recipient: [4:14:2061] 2025-06-25T14:59:21.056852Z node 4 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T14:59:21.057569Z node 4 :PERSQUEUE INFO: pq_impl.cpp:1497: [PQ: 72057594037927937] Config applied version 4 actor [4:181:2194] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 ImportantClientId: "important_user" LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 WriteSpeedInBytesPerSecond: 102400 BurstSize: 102400 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--asdfgs--topic" Version: 4 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } ReadRuleGenerations: 4 ReadRuleGenerations: 4 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 4 Important: false } Consumers { Name: "important_user" Generation: 4 Important: true } 2025-06-25T14:59:21.058177Z node 4 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [4:189:2200] 2025-06-25T14:59:21.060736Z node 4 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [4:189:2200] 2025-06-25T14:59:21.071089Z node 4 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|1c510161-eab44b88-be4eaf3a-58eaf88a_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user1" SessionId: "" Offset: 0 Count: 2147483647 Bytes: 2147483647 } Cookie ... 6322Z node 26 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 3 [26:263:2258] 2025-06-25T15:00:18.528521Z node 26 :PERSQUEUE INFO: partition_init.cpp:895: [rt3.dc1--asdfgs--topic:1:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-25T15:00:18.528571Z node 26 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 3 [26:264:2259] !Reboot 72057594037927937 (actor [26:111:2141]) rebooted! !Reboot 72057594037927937 (actor [26:111:2141]) tablet resolver refreshed! new actor is[26:210:2213] Leader for TabletID 72057594037927937 is [26:210:2213] sender: [26:314:2057] recipient: [26:14:2061] 2025-06-25T15:00:19.778598Z node 26 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|b4166a60-9b3993ef-1ede4441-ca6c6325_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-25T15:00:19.784776Z node 26 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|aa683cc8-d3e2cd5e-f64f5f7-f187e250_1 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-25T15:00:19.791020Z node 26 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|75d88e32-b1805baa-9e439240-20839382_2 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-25T15:00:19.795763Z node 26 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|b15fb2a3-c6bec736-b4d1950d-2144ed9c_3 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-25T15:00:19.800690Z node 26 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|d58d1d9b-ba2a1cf4-7cf07430-41e73a19_4 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user" SessionId: "" Offset: 5 Count: 2147483647 Bytes: 2147483647 } Cookie: 123 } via pipe: [26:181:2194] Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user" SessionId: "" Offset: 0 Count: 2147483647 Bytes: 2147483647 } Cookie: 123 } via pipe: [26:181:2194] Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user" SessionId: "" Offset: 0 Count: 3 Bytes: 104857600 } Cookie: 123 } via pipe: [26:181:2194] Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user" SessionId: "" Offset: 3 Count: 1000 Bytes: 1024 } Cookie: 123 } via pipe: [26:181:2194] Leader for TabletID 72057594037927937 is [0:0:0] sender: [27:107:2057] recipient: [27:105:2137] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [27:107:2057] recipient: [27:105:2137] Leader for TabletID 72057594037927937 is [27:111:2141] sender: [27:112:2057] recipient: [27:105:2137] 2025-06-25T15:00:20.333625Z node 27 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T15:00:20.333702Z node 27 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [27:153:2057] recipient: [27:151:2172] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [27:153:2057] recipient: [27:151:2172] Leader for TabletID 72057594037927938 is [27:157:2176] sender: [27:158:2057] recipient: [27:151:2172] Leader for TabletID 72057594037927937 is [27:111:2141] sender: [27:183:2057] recipient: [27:14:2061] 2025-06-25T15:00:20.353431Z node 27 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T15:00:20.354194Z node 27 :PERSQUEUE INFO: pq_impl.cpp:1497: [PQ: 72057594037927937] Config applied version 27 actor [27:181:2194] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 ImportantClientId: "aaa" LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "rt3.dc1--asdfgs--topic" Version: 27 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 27 ReadRuleGenerations: 27 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 27 Important: false } Consumers { Name: "aaa" Generation: 27 Important: true } 2025-06-25T15:00:20.354798Z node 27 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [27:189:2200] 2025-06-25T15:00:20.357458Z node 27 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [27:189:2200] 2025-06-25T15:00:20.360646Z node 27 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [27:190:2201] 2025-06-25T15:00:20.362733Z node 27 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 2 [27:190:2201] 2025-06-25T15:00:20.372321Z node 27 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|b89ee5e8-480e6609-2771b567-e7038465_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-25T15:00:20.377871Z node 27 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|499492c5-c2c1fb24-f82d656b-6b33eab0_1 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-25T15:00:20.385654Z node 27 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|fe6738b5-bd271179-6abc6c62-be0e1f9b_2 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-25T15:00:20.390204Z node 27 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|be36036e-bd12d0c0-dcaf2990-a055a94f_3 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-25T15:00:20.394538Z node 27 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|fbe86186-4c27289e-ea0ee36a-39ab9fb4_4 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user" SessionId: "" Offset: 5 Count: 2147483647 Bytes: 2147483647 } Cookie: 123 } via pipe: [27:181:2194] Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user" SessionId: "" Offset: 0 Count: 2147483647 Bytes: 2147483647 } Cookie: 123 } via pipe: [27:181:2194] Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user" SessionId: "" Offset: 0 Count: 3 Bytes: 104857600 } Cookie: 123 } via pipe: [27:181:2194] Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user" SessionId: "" Offset: 3 Count: 1000 Bytes: 1024 } Cookie: 123 } via pipe: [27:181:2194] Leader for TabletID 72057594037927937 is [0:0:0] sender: [28:107:2057] recipient: [28:105:2137] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [28:107:2057] recipient: [28:105:2137] Leader for TabletID 72057594037927937 is [28:111:2141] sender: [28:112:2057] recipient: [28:105:2137] 2025-06-25T15:00:20.752064Z node 28 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T15:00:20.752141Z node 28 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [28:153:2057] recipient: [28:151:2172] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [28:153:2057] recipient: [28:151:2172] Leader for TabletID 72057594037927938 is [28:157:2176] sender: [28:158:2057] recipient: [28:151:2172] Leader for TabletID 72057594037927937 is [28:111:2141] sender: [28:183:2057] recipient: [28:14:2061] 2025-06-25T15:00:20.768573Z node 28 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T15:00:20.769350Z node 28 :PERSQUEUE INFO: pq_impl.cpp:1497: [PQ: 72057594037927937] Config applied version 28 actor [28:181:2194] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 ImportantClientId: "aaa" LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "rt3.dc1--asdfgs--topic" Version: 28 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 28 ReadRuleGenerations: 28 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 28 Important: false } Consumers { Name: "aaa" Generation: 28 Important: true } 2025-06-25T15:00:20.770009Z node 28 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [28:189:2200] 2025-06-25T15:00:20.772547Z node 28 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [28:189:2200] 2025-06-25T15:00:20.775671Z node 28 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [28:190:2201] 2025-06-25T15:00:20.777455Z node 28 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 2 [28:190:2201] 2025-06-25T15:00:20.785460Z node 28 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|26cccd93-176d2148-a8ed56ef-9af91085_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-25T15:00:20.789626Z node 28 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|3b14acb3-a8e8fa98-edc14ffd-ebc31255_1 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-25T15:00:20.795427Z node 28 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|9e2ed353-47bee641-4a90148f-df39c272_2 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-25T15:00:20.799378Z node 28 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|daf26d72-e9bd0c88-eee8efae-577f90a6_3 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-25T15:00:20.803187Z node 28 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|c4c6f2c7-e5f0ca91-e3be8e7b-b1eface0_4 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user" SessionId: "" Offset: 5 Count: 2147483647 Bytes: 2147483647 } Cookie: 123 } via pipe: [28:181:2194] Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user" SessionId: "" Offset: 0 Count: 2147483647 Bytes: 2147483647 } Cookie: 123 } via pipe: [28:181:2194] Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user" SessionId: "" Offset: 0 Count: 3 Bytes: 104857600 } Cookie: 123 } via pipe: [28:181:2194] Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user" SessionId: "" Offset: 3 Count: 1000 Bytes: 1024 } Cookie: 123 } via pipe: [28:181:2194] >> BackupPathTest::ExportCommonSourcePathImportExplicitly [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest >> TColumnShardTestSchema::TTL+Reboot+Internal-FirstPkColumn [GOOD] Test command err: Running TestTtl ttlColumnType=Timestamp 2025-06-25T14:59:42.209677Z node 1 :TX_COLUMNSHARD TRACE: columnshard_impl.h:381: StateInit, received event# 268828672, Sender [1:106:2138], Recipient [1:128:2158]: NKikimr::TEvTablet::TEvBoot 2025-06-25T14:59:42.222732Z node 1 :TX_COLUMNSHARD TRACE: columnshard_impl.h:381: StateInit, received event# 268828673, Sender [1:106:2138], Recipient [1:128:2158]: NKikimr::TEvTablet::TEvRestored 2025-06-25T14:59:42.226013Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:99;event=initialize_shard;step=OnActivateExecutor; 2025-06-25T14:59:42.248341Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:117;event=initialize_shard;step=initialize_tiring_finished; 2025-06-25T14:59:42.248565Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-25T14:59:42.258843Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:59:42.259028Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:59:42.259206Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:59:42.259295Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:59:42.259398Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:59:42.259479Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:59:42.259585Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:59:42.259652Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:59:42.259724Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:59:42.259798Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:59:42.259867Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:59:42.263397Z node 1 :TX_COLUMNSHARD TRACE: columnshard_impl.h:381: StateInit, received event# 268828684, Sender [1:106:2138], Recipient [1:128:2158]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-25T14:59:42.287124Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-25T14:59:42.287266Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-25T14:59:42.287315Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-25T14:59:42.287489Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T14:59:42.287635Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-25T14:59:42.287707Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-25T14:59:42.287753Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-25T14:59:42.287839Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-25T14:59:42.287926Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-25T14:59:42.287977Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-25T14:59:42.288022Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-25T14:59:42.288203Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T14:59:42.288264Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-25T14:59:42.288326Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-25T14:59:42.288358Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-25T14:59:42.288460Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-25T14:59:42.288533Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-25T14:59:42.288576Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-25T14:59:42.288604Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-25T14:59:42.288663Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-25T14:59:42.288702Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-25T14:59:42.288747Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-25T14:59:42.288965Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-25T14:59:42.289016Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-25T14:59:42.289052Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-25T14:59:42.289292Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-25T14:59:42.289349Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-25T14:59:42.289379Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-25T14:59:42.289488Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-25T14:59:42.289526Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-25T14:59:42.289557Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-25T14:59:42.289641Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-25T14:59:42.289731Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-25T14:59:42.289771Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-25T14:59:42.289803Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-25T14:59:42.290005Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=42; 2025-06-25T14:59:42.290087Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=34; 2025-06-25T14:59:42.290175Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=45; 2025-06-25T14:59:42.290274Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=48; 2025-06-25T14:59:42.290404Z node 1 :T ... ;event=interval_result_received;interval_idx=0;intervalId=295; 2025-06-25T15:00:20.565077Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:787:2764];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=scanner.cpp:47;event=interval_result;interval_idx=0;count=1000;merger=0;interval_id=295; 2025-06-25T15:00:20.565128Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:787:2764];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=scanner.cpp:65;event=intervals_finished; 2025-06-25T15:00:20.565195Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:787:2764];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=9;column_names=saved_at;);;program_input=(column_ids=9;column_names=saved_at;);;;); 2025-06-25T15:00:20.565227Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:787:2764];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=1;count=1000;finished=1; 2025-06-25T15:00:20.565266Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:787:2764];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:203;stage=limit exhausted;limit=limits:(bytes=0;chunks=0);; 2025-06-25T15:00:20.565642Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:787:2764];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-25T15:00:20.565848Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:787:2764];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:1;records_count:1000;schema=saved_at: uint64;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=9;column_names=saved_at;);;program_input=(column_ids=9;column_names=saved_at;);;;); 2025-06-25T15:00:20.565903Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:787:2764];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=0;count=0;finished=1; 2025-06-25T15:00:20.566043Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:787:2764];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:234;stage=ready result;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=9;column_names=saved_at;);;program_input=(column_ids=9;column_names=saved_at;);;;);columns=1;rows=1000; 2025-06-25T15:00:20.566109Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:787:2764];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:254;stage=data_format;batch_size=8000;num_rows=1000;batch_columns=saved_at; 2025-06-25T15:00:20.566405Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:787:2764];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:370;event=send_data;compute_actor_id=[5:786:2763];bytes=8000;rows=1000;faults=0;finished=0;fault=0;schema=saved_at: uint64; 2025-06-25T15:00:20.566598Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:787:2764];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:274;stage=finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=9;column_names=saved_at;);;program_input=(column_ids=9;column_names=saved_at;);;;); 2025-06-25T15:00:20.566711Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:787:2764];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=9;column_names=saved_at;);;program_input=(column_ids=9;column_names=saved_at;);;;); 2025-06-25T15:00:20.566852Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:787:2764];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=9;column_names=saved_at;);;program_input=(column_ids=9;column_names=saved_at;);;;); 2025-06-25T15:00:20.567067Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:787:2764];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-25T15:00:20.567229Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:787:2764];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=9;column_names=saved_at;);;program_input=(column_ids=9;column_names=saved_at;);;;); 2025-06-25T15:00:20.567371Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:787:2764];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=9;column_names=saved_at;);;program_input=(column_ids=9;column_names=saved_at;);;;); 2025-06-25T15:00:20.567418Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: actor.cpp:414: Scan [5:787:2764] finished for tablet 9437184 2025-06-25T15:00:20.567844Z node 5 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[5:787:2764];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:420;event=scan_finish;compute_actor_id=[5:786:2763];stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.001},{"events":["l_bootstrap"],"t":0.003},{"events":["f_processing","f_task_result"],"t":0.004},{"events":["f_ack","l_task_result"],"t":0.014},{"events":["l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.016}],"full":{"a":1750863620551285,"name":"_full_task","f":1750863620551285,"d_finished":0,"c":0,"l":1750863620567472,"d":16187},"events":[{"name":"bootstrap","f":1750863620551582,"d_finished":2963,"c":1,"l":1750863620554545,"d":2963},{"a":1750863620567036,"name":"ack","f":1750863620565605,"d_finished":1279,"c":1,"l":1750863620566884,"d":1715},{"a":1750863620567018,"name":"processing","f":1750863620555695,"d_finished":7342,"c":9,"l":1750863620566890,"d":7796},{"name":"ProduceResults","f":1750863620553229,"d_finished":2819,"c":12,"l":1750863620567402,"d":2819},{"a":1750863620567405,"name":"Finish","f":1750863620567405,"d_finished":0,"c":0,"l":1750863620567472,"d":67},{"name":"task_result","f":1750863620555714,"d_finished":5907,"c":8,"l":1750863620565369,"d":5907}],"id":"9437184::35"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=9;column_names=saved_at;);;program_input=(column_ids=9;column_names=saved_at;);;;); 2025-06-25T15:00:20.567931Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:787:2764];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:370;event=send_data;compute_actor_id=[5:786:2763];bytes=0;rows=0;faults=0;finished=1;fault=0;schema=; 2025-06-25T15:00:20.568385Z node 5 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[5:787:2764];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:375;event=scan_finished;compute_actor_id=[5:786:2763];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.001},{"events":["l_bootstrap"],"t":0.003},{"events":["f_processing","f_task_result"],"t":0.004},{"events":["f_ack","l_task_result"],"t":0.014},{"events":["l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.016}],"full":{"a":1750863620551285,"name":"_full_task","f":1750863620551285,"d_finished":0,"c":0,"l":1750863620567980,"d":16695},"events":[{"name":"bootstrap","f":1750863620551582,"d_finished":2963,"c":1,"l":1750863620554545,"d":2963},{"a":1750863620567036,"name":"ack","f":1750863620565605,"d_finished":1279,"c":1,"l":1750863620566884,"d":2223},{"a":1750863620567018,"name":"processing","f":1750863620555695,"d_finished":7342,"c":9,"l":1750863620566890,"d":8304},{"name":"ProduceResults","f":1750863620553229,"d_finished":2819,"c":12,"l":1750863620567402,"d":2819},{"a":1750863620567405,"name":"Finish","f":1750863620567405,"d_finished":0,"c":0,"l":1750863620567980,"d":575},{"name":"task_result","f":1750863620555714,"d_finished":5907,"c":8,"l":1750863620565369,"d":5907}],"id":"9437184::35"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=9;column_names=saved_at;);;program_input=(column_ids=9;column_names=saved_at;);;;); 2025-06-25T15:00:20.568455Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:787:2764];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-06-25T15:00:20.548180Z;index_granules=0;index_portions=1;index_batches=10;schema_columns=1;filter_columns=0;additional_columns=0;compacted_portions_bytes=0;inserted_portions_bytes=59288;committed_portions_bytes=0;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=59288;selected_rows=0; 2025-06-25T15:00:20.568499Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:787:2764];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=read_context.h:192;event=scan_aborted;reason=unexpected on destructor; 2025-06-25T15:00:20.568857Z node 5 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[5:787:2764];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=9;column_names=saved_at;);;program_input=(column_ids=9;column_names=saved_at;);;; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest >> TColumnShardTestSchema::HotTiersTtl [GOOD] Test command err: FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; WaitEmptyAfter=0;Tiers=;TTL={Column=timestamp;EvictAfter=0.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=150864184.000000s;Name=tier0;Codec=};}{{Column=timestamp;EvictAfter=150864184.000000s;Name=tier1;Codec=zstd};};TTL={Column=timestamp;EvictAfter=150864184.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=150864184.000000s;Name=tier0;Codec=};}{{Column=timestamp;EvictAfter=150864184.000000s;Name=tier1;Codec=zstd};};TTL={Column=timestamp;EvictAfter=130864184.000000s;Name=;Codec=}; WaitEmptyAfter=1;Tiers={{Column=timestamp;EvictAfter=150864184.000000s;Name=tier0;Codec=};}{{Column=timestamp;EvictAfter=150864184.000000s;Name=tier1;Codec=zstd};};TTL={Column=timestamp;EvictAfter=130862984.000000s;Name=;Codec=}; 2025-06-25T14:59:44.712675Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:99;event=initialize_shard;step=OnActivateExecutor; 2025-06-25T14:59:44.739765Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:117;event=initialize_shard;step=initialize_tiring_finished; 2025-06-25T14:59:44.740033Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-25T14:59:44.747051Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:59:44.747298Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:59:44.747541Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:59:44.747659Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:59:44.747759Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:59:44.747867Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:59:44.747963Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:59:44.748064Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:59:44.748160Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:59:44.748266Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:59:44.748414Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:59:44.774586Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-25T14:59:44.774766Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-25T14:59:44.774839Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-25T14:59:44.775011Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T14:59:44.775173Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-25T14:59:44.775256Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-25T14:59:44.775296Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-25T14:59:44.775392Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-25T14:59:44.775447Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-25T14:59:44.775486Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-25T14:59:44.775522Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-25T14:59:44.775717Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T14:59:44.775792Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-25T14:59:44.775839Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-25T14:59:44.775869Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-25T14:59:44.775963Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-25T14:59:44.776011Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-25T14:59:44.776051Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-25T14:59:44.776077Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-25T14:59:44.776137Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-25T14:59:44.776172Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-25T14:59:44.776200Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-25T14:59:44.776451Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-25T14:59:44.776499Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-25T14:59:44.776531Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-25T14:59:44.776722Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-25T14:59:44.776771Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-25T14:59:44.776822Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-25T14:59:44.776940Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-25T14:59:44.776984Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-25T14:59:44.777010Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-25T14:59:44.777103Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-25T14:59:44.777171Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-25T14:59:44.777213Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.c ... -25T15:00:20.886123Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=4;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-25T15:00:20.886180Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:791;background=cleanup;skip_reason=no_changes; 2025-06-25T15:00:20.886213Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:820;background=cleanup;skip_reason=no_changes; 2025-06-25T15:00:20.886294Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:749;background=ttl;skip_reason=no_changes; 2025-06-25T15:00:20.886417Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: EvScan txId: 18446744073709551615 scanId: 0 version: {1750863621036:max} readable: {1750863621036:max} at tablet 9437184 2025-06-25T15:00:20.886528Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TTxScan prepare txId: 18446744073709551615 scanId: 0 at tablet 9437184 2025-06-25T15:00:20.886647Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750863621036:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=program.cpp:33;event=parse_program;program=Command { Projection { Columns { Id: 1 } } } ; 2025-06-25T15:00:20.886701Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750863621036:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=program.cpp:102;parse_proto_program=Command { Projection { Columns { Id: 1 } } } ; 2025-06-25T15:00:20.887071Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750863621036:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=program.cpp:51;event=program_parsed;result={"edges":[{"owner_id":0,"inputs":[{"from":2}]},{"owner_id":1,"inputs":[{"from":3}]},{"owner_id":2,"inputs":[{"from":1}]},{"owner_id":3,"inputs":[]}],"nodes":{"1":{"p":{"i":"0","p":{"data":[{"name":"timestamp","id":1}]},"o":"1","t":"FetchOriginalData"},"w":2,"id":1},"3":{"p":{"p":{"data":[{"name":"timestamp","id":1}]},"o":"0","t":"ReserveMemory"},"w":0,"id":3},"2":{"p":{"i":"1","p":{"address":{"name":"timestamp","id":1}},"o":"1","t":"AssembleOriginalData"},"w":7,"id":2},"0":{"p":{"i":"1","t":"Projection"},"w":7,"id":0}}}; 2025-06-25T15:00:20.887141Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750863621036:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=read_metadata.h:142;filter_limit_not_detected= range{ from {+Inf} to {-Inf}}; 2025-06-25T15:00:20.887496Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750863621036:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=tx_scan.cpp:172;event=TTxScan started;actor_id=[1:1321:3290];trace_detailed=; 2025-06-25T15:00:20.887790Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;fline=context.cpp:84;ff_first=(column_ids=1;column_names=timestamp;);; 2025-06-25T15:00:20.887953Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;fline=context.cpp:99;columns_context_info=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;; 2025-06-25T15:00:20.888081Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:00:20.888174Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:00:20.888413Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:1321:3290];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-25T15:00:20.888490Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:1321:3290];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:00:20.888557Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:1321:3290];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:00:20.888606Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: actor.cpp:414: Scan [1:1321:3290] finished for tablet 9437184 2025-06-25T15:00:20.888898Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=3;SelfId=[1:1321:3290];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:420;event=scan_finish;compute_actor_id=[1:1320:3289];stats={"p":[{"events":["f_bootstrap","l_bootstrap","f_ack","f_processing","f_ProduceResults"],"t":0},{"events":["l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.001}],"full":{"a":1750863620887449,"name":"_full_task","f":1750863620887449,"d_finished":0,"c":0,"l":1750863620888653,"d":1204},"events":[{"name":"bootstrap","f":1750863620887584,"d_finished":623,"c":1,"l":1750863620888207,"d":623},{"a":1750863620888395,"name":"ack","f":1750863620888395,"d_finished":0,"c":0,"l":1750863620888653,"d":258},{"a":1750863620888383,"name":"processing","f":1750863620888383,"d_finished":0,"c":0,"l":1750863620888653,"d":270},{"name":"ProduceResults","f":1750863620888017,"d_finished":346,"c":2,"l":1750863620888592,"d":346},{"a":1750863620888595,"name":"Finish","f":1750863620888595,"d_finished":0,"c":0,"l":1750863620888653,"d":58}],"id":"9437184::8"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:00:20.888952Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:1321:3290];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:1320:3289];bytes=0;rows=0;faults=0;finished=1;fault=0;schema=; 2025-06-25T15:00:20.889235Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=3;SelfId=[1:1321:3290];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:375;event=scan_finished;compute_actor_id=[1:1320:3289];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap","l_bootstrap","f_ack","f_processing","f_ProduceResults"],"t":0},{"events":["l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.001}],"full":{"a":1750863620887449,"name":"_full_task","f":1750863620887449,"d_finished":0,"c":0,"l":1750863620888985,"d":1536},"events":[{"name":"bootstrap","f":1750863620887584,"d_finished":623,"c":1,"l":1750863620888207,"d":623},{"a":1750863620888395,"name":"ack","f":1750863620888395,"d_finished":0,"c":0,"l":1750863620888985,"d":590},{"a":1750863620888383,"name":"processing","f":1750863620888383,"d_finished":0,"c":0,"l":1750863620888985,"d":602},{"name":"ProduceResults","f":1750863620888017,"d_finished":346,"c":2,"l":1750863620888592,"d":346},{"a":1750863620888595,"name":"Finish","f":1750863620888595,"d_finished":0,"c":0,"l":1750863620888985,"d":390}],"id":"9437184::8"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); Got TEvKqpCompute::TEvScanData [1:1321:3290]->[1:1320:3289] 2025-06-25T15:00:20.889297Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:1321:3290];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-06-25T15:00:20.887119Z;index_granules=0;index_portions=0;index_batches=0;schema_columns=1;filter_columns=0;additional_columns=0;compacted_portions_bytes=0;inserted_portions_bytes=0;committed_portions_bytes=0;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=0;selected_rows=0; 2025-06-25T15:00:20.889330Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:1321:3290];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=read_context.h:192;event=scan_aborted;reason=unexpected on destructor; 2025-06-25T15:00:20.889404Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=3;SelfId=[1:1321:3290];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/tier0' stopped at tablet 9437184 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/tier1' stopped at tablet 9437184 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/tier0' stopped at tablet 9437184 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/tier1' stopped at tablet 9437184 240000/14617704 160000/9752224 80000/4886744 0/0 >> TColumnShardTestSchema::CreateTable-Reboots+GenerateInternalPathId >> TColumnShardTestSchema::RebootColdTiers >> BackupRestore::TestAllPrimitiveTypes-DATETIME [GOOD] >> BackupRestore::TestAllPrimitiveTypes-TIMESTAMP >> BackupRestore::RestoreViewReferenceTable [GOOD] >> BackupRestore::RestoreViewToDifferentDatabase >> EncryptedExportTest::EncryptionChecksumAndCompression [GOOD] >> BackupRestore::TestAllPrimitiveTypes-YSON [GOOD] >> BackupRestore::TestAllPrimitiveTypes-UUID >> YdbScripting::Params [GOOD] >> YdbTableBulkUpsert::DataValidation >> BackupPathTest::ImportFilterByPrefix |91.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest |91.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest |91.2%| [TA] $(B)/ydb/core/persqueue/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> Secret::Simple [GOOD] >> TColumnShardTestSchema::Drop-Reboots+GenerateInternalPathId [GOOD] >> TColumnShardTestSchema::CreateTable-Reboots+GenerateInternalPathId [GOOD] >> EncryptedExportTest::ChangefeedEncryption |91.3%| [TA] {RESULT} $(B)/ydb/core/persqueue/ut/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest >> TColumnShardTestSchema::CreateTable-Reboots+GenerateInternalPathId [GOOD] Test command err: 2025-06-25T15:00:22.165213Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:99;event=initialize_shard;step=OnActivateExecutor; 2025-06-25T15:00:22.182771Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:117;event=initialize_shard;step=initialize_tiring_finished; 2025-06-25T15:00:22.182977Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-25T15:00:22.187766Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T15:00:22.187893Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T15:00:22.188065Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T15:00:22.188136Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T15:00:22.188220Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T15:00:22.188301Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T15:00:22.188399Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T15:00:22.188460Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T15:00:22.188527Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T15:00:22.188602Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T15:00:22.188666Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T15:00:22.204625Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-25T15:00:22.204743Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-25T15:00:22.204794Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-25T15:00:22.204901Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:00:22.205015Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-25T15:00:22.205090Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-25T15:00:22.205131Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-25T15:00:22.205185Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-25T15:00:22.205221Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-25T15:00:22.205243Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-25T15:00:22.205270Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-25T15:00:22.205384Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:00:22.205425Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-25T15:00:22.205453Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-25T15:00:22.205476Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-25T15:00:22.205562Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-25T15:00:22.205608Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-25T15:00:22.205642Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-25T15:00:22.205661Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-25T15:00:22.205689Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-25T15:00:22.205730Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-25T15:00:22.205752Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-25T15:00:22.205888Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-25T15:00:22.205913Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-25T15:00:22.205930Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-25T15:00:22.206059Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-25T15:00:22.206107Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-25T15:00:22.206126Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-25T15:00:22.206205Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-25T15:00:22.206230Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-25T15:00:22.206246Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-25T15:00:22.206295Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-25T15:00:22.206339Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-25T15:00:22.206373Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-25T15:00:22.206397Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-25T15:00:22.206544Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=28; 2025-06-25T15:00:22.206669Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=89; 2025-06-25T15:00:22.206722Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=22; 2025-06-25T15:00:22.206780Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=31; 2025-06-25T15:00:22.206837Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-25T15:00:22.206891Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-25T15:00:22.206915Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-25T15:00:22.206947Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: table ... method=TTxController::StartProposeOnExecute;tx_info=119:TX_KIND_SCHEMA;min=1750863623198;max=18446744073709551615;plan=0;src=[1:155:2177];cookie=019:0;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=20;result=not_found; 2025-06-25T15:00:23.228780Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::TEvColumnShard::TEvProposeTransaction;tablet_id=9437184;tx_id=119;this=88923004880192;method=TTxController::StartProposeOnExecute;tx_info=119:TX_KIND_SCHEMA;min=1750863623198;max=18446744073709551615;plan=0;src=[1:155:2177];cookie=019:0;;fline=schema.h:38;event=sync_schema; 2025-06-25T15:00:23.240839Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;request_tx=119:TX_KIND_SCHEMA;min=1750863623198;max=18446744073709551615;plan=0;src=[1:155:2177];cookie=019:0;;this=88923004880192;op_tx=119:TX_KIND_SCHEMA;min=1750863623198;max=18446744073709551615;plan=0;src=[1:155:2177];cookie=019:0;;int_op_tx=119:TX_KIND_SCHEMA;min=1750863623198;max=18446744073709551615;plan=0;src=[1:155:2177];cookie=019:0;;int_this=89129165828096;fline=columnshard__propose_transaction.cpp:105;event=actual tx operator; 2025-06-25T15:00:23.240908Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;request_tx=119:TX_KIND_SCHEMA;min=1750863623198;max=18446744073709551615;plan=0;src=[1:155:2177];cookie=019:0;;this=88923004880192;op_tx=119:TX_KIND_SCHEMA;min=1750863623198;max=18446744073709551615;plan=0;src=[1:155:2177];cookie=019:0;;int_op_tx=119:TX_KIND_SCHEMA;min=1750863623198;max=18446744073709551615;plan=0;src=[1:155:2177];cookie=019:0;;int_this=89129165828096;method=TTxController::FinishProposeOnComplete;tx_id=119;fline=propose_tx.cpp:11;event=scheme_shard_tablet_not_initialized;source=[1:155:2177]; 2025-06-25T15:00:23.240943Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;request_tx=119:TX_KIND_SCHEMA;min=1750863623198;max=18446744073709551615;plan=0;src=[1:155:2177];cookie=019:0;;this=88923004880192;op_tx=119:TX_KIND_SCHEMA;min=1750863623198;max=18446744073709551615;plan=0;src=[1:155:2177];cookie=019:0;;int_op_tx=119:TX_KIND_SCHEMA;min=1750863623198;max=18446744073709551615;plan=0;src=[1:155:2177];cookie=019:0;;int_this=89129165828096;method=TTxController::FinishProposeOnComplete;tx_id=119;fline=propose_tx.cpp:32;message=;tablet_id=9437184;tx_id=119; 2025-06-25T15:00:23.241254Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TTxNotifyTxCompletion.Execute at tablet 9437184 2025-06-25T15:00:23.241367Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: PlanStep 1750863623198 at tablet 9437184, mediator 0 2025-06-25T15:00:23.241401Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxPlanStep[36] execute at tablet 9437184 2025-06-25T15:00:23.241615Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=119;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=20;result=not_found; 2025-06-25T15:00:23.241671Z node 1 :TX_COLUMNSHARD INFO: ctor_logger.h:56: EnsureTable for pathId: {internal: 9438184000018, ss: 20} ttl settings: { Version: 1 } at tablet 9437184 2025-06-25T15:00:23.241728Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=119;fline=tables_manager.cpp:304;method=RegisterTable;path_id=9438184000018; 2025-06-25T15:00:23.241787Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=119;fline=column_engine.h:144;event=RegisterTable;path_id=9438184000018; 2025-06-25T15:00:23.242186Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=119;fline=column_engine_logs.cpp:463;event=OnTieringModified;path_id=9438184000018; 2025-06-25T15:00:23.242282Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=119;fline=tx_controller.cpp:215;event=finished_tx;tx_id=119; 2025-06-25T15:00:23.254041Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxPlanStep[36] complete at tablet 9437184 CreateTable: { SeqNo { Generation: 20 } EnsureTables { Tables { PathId: 21 SchemaPreset { Id: 1 Name: "default" Schema { Columns { Id: 1 Name: "k0" TypeId: 4609 } Columns { Id: 2 Name: "resource_type" TypeId: 4608 } Columns { Id: 3 Name: "resource_id" TypeId: 4608 DataAccessorConstructor { ClassName: "SPARSED" } } Columns { Id: 4 Name: "uid" TypeId: 4608 StorageId: "__MEMORY" } Columns { Id: 5 Name: "level" TypeId: 1 } Columns { Id: 6 Name: "message" TypeId: 4608 StorageId: "__MEMORY" } Columns { Id: 7 Name: "json_payload" TypeId: 4610 } Columns { Id: 8 Name: "ingested_at" TypeId: 50 } Columns { Id: 9 Name: "saved_at" TypeId: 50 } Columns { Id: 10 Name: "request_id" TypeId: 4608 } KeyColumnNames: "k0" KeyColumnNames: "resource_type" KeyColumnNames: "resource_id" KeyColumnNames: "uid" Indexes { Id: 1004 Name: "MAX::INDEX::level" StorageId: "__LOCAL_METADATA" ClassName: "MAX" MaxIndex { ColumnId: 5 } } Indexes { Id: 1007 Name: "MAX::INDEX::ingested_at" StorageId: "__LOCAL_METADATA" ClassName: "MAX" MaxIndex { ColumnId: 8 } } Indexes { Id: 1008 Name: "MAX::INDEX::saved_at" StorageId: "__LOCAL_METADATA" ClassName: "MAX" MaxIndex { ColumnId: 9 } } } } TtlSettings { Version: 1 } } } } 2025-06-25T15:00:23.255343Z node 1 :TX_COLUMNSHARD_TX ERROR: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::TEvColumnShard::TEvProposeTransaction;tablet_id=9437184;tx_id=120;this=88923004882880;method=TTxController::StartProposeOnExecute;tx_info=120:TX_KIND_SCHEMA;min=1750863623201;max=18446744073709551615;plan=0;src=[1:155:2177];cookie=020:0;;fline=tx_controller.cpp:364;error=problem on start;message=Invalid schema: Column errors: key column k0 has unsupported type NKikimrSchemeOp.TOlapColumnDescription; 2025-06-25T15:00:23.267567Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;request_tx=120:TX_KIND_SCHEMA;min=1750863623201;max=18446744073709551615;plan=0;src=[1:155:2177];cookie=020:0;;this=88923004882880;op_tx=120:TX_KIND_SCHEMA;min=1750863623201;max=18446744073709551615;plan=0;src=[1:155:2177];cookie=020:0;;fline=propose_tx.cpp:11;event=scheme_shard_tablet_not_initialized;source=[1:155:2177]; 2025-06-25T15:00:23.267639Z node 1 :TX_COLUMNSHARD ERROR: log.cpp:784: tablet_id=9437184;request_tx=120:TX_KIND_SCHEMA;min=1750863623201;max=18446744073709551615;plan=0;src=[1:155:2177];cookie=020:0;;this=88923004882880;op_tx=120:TX_KIND_SCHEMA;min=1750863623201;max=18446744073709551615;plan=0;src=[1:155:2177];cookie=020:0;;fline=propose_tx.cpp:23;message=Invalid schema: Column errors: key column k0 has unsupported type NKikimrSchemeOp.TOlapColumnDescription;tablet_id=9437184;tx_id=120; CreateTable: { SeqNo { Generation: 21 } EnsureTables { Tables { PathId: 22 SchemaPreset { Id: 1 Name: "default" Schema { Columns { Id: 1 Name: "k0" TypeId: 4610 } Columns { Id: 2 Name: "resource_type" TypeId: 4608 } Columns { Id: 3 Name: "resource_id" TypeId: 4608 DataAccessorConstructor { ClassName: "SPARSED" } } Columns { Id: 4 Name: "uid" TypeId: 4608 StorageId: "__MEMORY" } Columns { Id: 5 Name: "level" TypeId: 1 } Columns { Id: 6 Name: "message" TypeId: 4608 StorageId: "__MEMORY" } Columns { Id: 7 Name: "json_payload" TypeId: 4610 } Columns { Id: 8 Name: "ingested_at" TypeId: 50 } Columns { Id: 9 Name: "saved_at" TypeId: 50 } Columns { Id: 10 Name: "request_id" TypeId: 4608 } KeyColumnNames: "k0" KeyColumnNames: "resource_type" KeyColumnNames: "resource_id" KeyColumnNames: "uid" Indexes { Id: 1004 Name: "MAX::INDEX::level" StorageId: "__LOCAL_METADATA" ClassName: "MAX" MaxIndex { ColumnId: 5 } } Indexes { Id: 1007 Name: "MAX::INDEX::ingested_at" StorageId: "__LOCAL_METADATA" ClassName: "MAX" MaxIndex { ColumnId: 8 } } Indexes { Id: 1008 Name: "MAX::INDEX::saved_at" StorageId: "__LOCAL_METADATA" ClassName: "MAX" MaxIndex { ColumnId: 9 } } } } TtlSettings { Version: 1 } } } } 2025-06-25T15:00:23.268974Z node 1 :TX_COLUMNSHARD_TX ERROR: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::TEvColumnShard::TEvProposeTransaction;tablet_id=9437184;tx_id=121;this=88923004884672;method=TTxController::StartProposeOnExecute;tx_info=121:TX_KIND_SCHEMA;min=1750863623203;max=18446744073709551615;plan=0;src=[1:155:2177];cookie=021:0;;fline=tx_controller.cpp:364;error=problem on start;message=Invalid schema: Column errors: key column k0 has unsupported type NKikimrSchemeOp.TOlapColumnDescription; 2025-06-25T15:00:23.280874Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;request_tx=121:TX_KIND_SCHEMA;min=1750863623203;max=18446744073709551615;plan=0;src=[1:155:2177];cookie=021:0;;this=88923004884672;op_tx=121:TX_KIND_SCHEMA;min=1750863623203;max=18446744073709551615;plan=0;src=[1:155:2177];cookie=021:0;;fline=propose_tx.cpp:11;event=scheme_shard_tablet_not_initialized;source=[1:155:2177]; 2025-06-25T15:00:23.280939Z node 1 :TX_COLUMNSHARD ERROR: log.cpp:784: tablet_id=9437184;request_tx=121:TX_KIND_SCHEMA;min=1750863623203;max=18446744073709551615;plan=0;src=[1:155:2177];cookie=021:0;;this=88923004884672;op_tx=121:TX_KIND_SCHEMA;min=1750863623203;max=18446744073709551615;plan=0;src=[1:155:2177];cookie=021:0;;fline=propose_tx.cpp:23;message=Invalid schema: Column errors: key column k0 has unsupported type NKikimrSchemeOp.TOlapColumnDescription;tablet_id=9437184;tx_id=121; CreateTable: { SeqNo { Generation: 22 } EnsureTables { Tables { PathId: 23 SchemaPreset { Id: 1 Name: "default" Schema { Columns { Id: 1 Name: "k0" TypeId: 4612 } Columns { Id: 2 Name: "resource_type" TypeId: 4608 } Columns { Id: 3 Name: "resource_id" TypeId: 4608 DataAccessorConstructor { ClassName: "SPARSED" } } Columns { Id: 4 Name: "uid" TypeId: 4608 StorageId: "__MEMORY" } Columns { Id: 5 Name: "level" TypeId: 1 } Columns { Id: 6 Name: "message" TypeId: 4608 StorageId: "__MEMORY" } Columns { Id: 7 Name: "json_payload" TypeId: 4610 } Columns { Id: 8 Name: "ingested_at" TypeId: 50 } Columns { Id: 9 Name: "saved_at" TypeId: 50 } Columns { Id: 10 Name: "request_id" TypeId: 4608 } KeyColumnNames: "k0" KeyColumnNames: "resource_type" KeyColumnNames: "resource_id" KeyColumnNames: "uid" Indexes { Id: 1004 Name: "MAX::INDEX::level" StorageId: "__LOCAL_METADATA" ClassName: "MAX" MaxIndex { ColumnId: 5 } } Indexes { Id: 1007 Name: "MAX::INDEX::ingested_at" StorageId: "__LOCAL_METADATA" ClassName: "MAX" MaxIndex { ColumnId: 8 } } Indexes { Id: 1008 Name: "MAX::INDEX::saved_at" StorageId: "__LOCAL_METADATA" ClassName: "MAX" MaxIndex { ColumnId: 9 } } } } TtlSettings { Version: 1 } } } } 2025-06-25T15:00:23.282091Z node 1 :TX_COLUMNSHARD_TX ERROR: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];ev=NKikimr::TEvColumnShard::TEvProposeTransaction;tablet_id=9437184;tx_id=122;this=88923004886464;method=TTxController::StartProposeOnExecute;tx_info=122:TX_KIND_SCHEMA;min=1750863623204;max=18446744073709551615;plan=0;src=[1:155:2177];cookie=022:0;;fline=tx_controller.cpp:364;error=problem on start;message=Invalid schema: Column errors: key column k0 has unsupported type NKikimrSchemeOp.TOlapColumnDescription; 2025-06-25T15:00:23.293874Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;request_tx=122:TX_KIND_SCHEMA;min=1750863623204;max=18446744073709551615;plan=0;src=[1:155:2177];cookie=022:0;;this=88923004886464;op_tx=122:TX_KIND_SCHEMA;min=1750863623204;max=18446744073709551615;plan=0;src=[1:155:2177];cookie=022:0;;fline=propose_tx.cpp:11;event=scheme_shard_tablet_not_initialized;source=[1:155:2177]; 2025-06-25T15:00:23.293933Z node 1 :TX_COLUMNSHARD ERROR: log.cpp:784: tablet_id=9437184;request_tx=122:TX_KIND_SCHEMA;min=1750863623204;max=18446744073709551615;plan=0;src=[1:155:2177];cookie=022:0;;this=88923004886464;op_tx=122:TX_KIND_SCHEMA;min=1750863623204;max=18446744073709551615;plan=0;src=[1:155:2177];cookie=022:0;;fline=propose_tx.cpp:23;message=Invalid schema: Column errors: key column k0 has unsupported type NKikimrSchemeOp.TOlapColumnDescription;tablet_id=9437184;tx_id=122; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest >> TColumnShardTestSchema::Drop-Reboots+GenerateInternalPathId [GOOD] Test command err: 2025-06-25T15:00:19.785113Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:99;event=initialize_shard;step=OnActivateExecutor; 2025-06-25T15:00:19.811984Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:117;event=initialize_shard;step=initialize_tiring_finished; 2025-06-25T15:00:19.812266Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-25T15:00:19.818992Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T15:00:19.819190Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T15:00:19.819400Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T15:00:19.819552Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T15:00:19.819654Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T15:00:19.819765Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T15:00:19.819870Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T15:00:19.819966Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T15:00:19.820062Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T15:00:19.820172Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T15:00:19.820297Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T15:00:19.846563Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-25T15:00:19.846701Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-25T15:00:19.846751Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-25T15:00:19.846927Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:00:19.847076Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-25T15:00:19.847175Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-25T15:00:19.847224Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-25T15:00:19.847310Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-25T15:00:19.847369Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-25T15:00:19.847405Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-25T15:00:19.847435Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-25T15:00:19.847595Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:00:19.847652Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-25T15:00:19.847687Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-25T15:00:19.847713Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-25T15:00:19.847803Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-25T15:00:19.847869Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-25T15:00:19.847925Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-25T15:00:19.847960Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-25T15:00:19.848003Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-25T15:00:19.848037Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-25T15:00:19.848081Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-25T15:00:19.848328Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-25T15:00:19.848375Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-25T15:00:19.848406Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-25T15:00:19.848589Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-25T15:00:19.848637Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-25T15:00:19.848663Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-25T15:00:19.848781Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-25T15:00:19.848823Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-25T15:00:19.848852Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-25T15:00:19.848918Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-25T15:00:19.848978Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-25T15:00:19.849015Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-25T15:00:19.849051Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-25T15:00:19.849265Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=49; 2025-06-25T15:00:19.849360Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=48; 2025-06-25T15:00:19.849436Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=34; 2025-06-25T15:00:19.849532Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=43; 2025-06-25T15:00:19.849648Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-25T15:00:19.849732Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-25T15:00:19.849775Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-25T15:00:19.849820Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: table ... pp:168;event=skip_actualization;waiting=0.887500s; 2025-06-25T15:00:22.926600Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:749;background=ttl;skip_reason=no_changes; 2025-06-25T15:00:22.938943Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;request_tx=104:TX_KIND_SCHEMA;min=1750863620984;max=18446744073709551615;plan=0;src=[1:103:2136];cookie=00:2;;this=88923022688640;op_tx=104:TX_KIND_SCHEMA;min=1750863620984;max=18446744073709551615;plan=0;src=[1:103:2136];cookie=00:2;;int_op_tx=104:TX_KIND_SCHEMA;min=1750863620984;max=18446744073709551615;plan=0;src=[1:103:2136];cookie=00:2;;int_this=89129165168576;fline=columnshard__propose_transaction.cpp:105;event=actual tx operator; 2025-06-25T15:00:22.939035Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;request_tx=104:TX_KIND_SCHEMA;min=1750863620984;max=18446744073709551615;plan=0;src=[1:103:2136];cookie=00:2;;this=88923022688640;op_tx=104:TX_KIND_SCHEMA;min=1750863620984;max=18446744073709551615;plan=0;src=[1:103:2136];cookie=00:2;;int_op_tx=104:TX_KIND_SCHEMA;min=1750863620984;max=18446744073709551615;plan=0;src=[1:103:2136];cookie=00:2;;int_this=89129165168576;method=TTxController::FinishProposeOnComplete;tx_id=104;fline=propose_tx.cpp:11;event=scheme_shard_tablet_not_initialized;source=[1:103:2136]; 2025-06-25T15:00:22.939081Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;request_tx=104:TX_KIND_SCHEMA;min=1750863620984;max=18446744073709551615;plan=0;src=[1:103:2136];cookie=00:2;;this=88923022688640;op_tx=104:TX_KIND_SCHEMA;min=1750863620984;max=18446744073709551615;plan=0;src=[1:103:2136];cookie=00:2;;int_op_tx=104:TX_KIND_SCHEMA;min=1750863620984;max=18446744073709551615;plan=0;src=[1:103:2136];cookie=00:2;;int_this=89129165168576;method=TTxController::FinishProposeOnComplete;tx_id=104;fline=propose_tx.cpp:32;message=;tablet_id=9437184;tx_id=104; 2025-06-25T15:00:22.939398Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TTxNotifyTxCompletion.Execute at tablet 9437184 2025-06-25T15:00:22.939529Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: PlanStep 1750863620984 at tablet 9437184, mediator 0 2025-06-25T15:00:22.939588Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxPlanStep[14] execute at tablet 9437184 2025-06-25T15:00:22.939829Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: DropTable for pathId: {internal: 9438184000001, ss: 1} at tablet 9437184 2025-06-25T15:00:22.939914Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=104;fline=tx_controller.cpp:215;event=finished_tx;tx_id=104; 2025-06-25T15:00:22.952512Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxPlanStep[14] complete at tablet 9437184 2025-06-25T15:00:22.953161Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: EvScan txId: 18446744073709551615 scanId: 0 version: {1750863620984:max} readable: {1750863620984:max} at tablet 9437184 2025-06-25T15:00:22.953295Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TTxScan prepare txId: 18446744073709551615 scanId: 0 at tablet 9437184 2025-06-25T15:00:22.954905Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750863620984:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=program.cpp:33;event=parse_program;program=Command { Projection { Columns { Id: 1 } } } ; 2025-06-25T15:00:22.954973Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750863620984:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=program.cpp:102;parse_proto_program=Command { Projection { Columns { Id: 1 } } } ; 2025-06-25T15:00:22.955520Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750863620984:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=program.cpp:51;event=program_parsed;result={"edges":[{"owner_id":0,"inputs":[{"from":2}]},{"owner_id":1,"inputs":[{"from":3}]},{"owner_id":2,"inputs":[{"from":1}]},{"owner_id":3,"inputs":[]}],"nodes":{"1":{"p":{"i":"0","p":{"data":[{"name":"timestamp","id":1}]},"o":"1","t":"FetchOriginalData"},"w":2,"id":1},"3":{"p":{"p":{"data":[{"name":"timestamp","id":1}]},"o":"0","t":"ReserveMemory"},"w":0,"id":3},"2":{"p":{"i":"1","p":{"address":{"name":"timestamp","id":1}},"o":"1","t":"AssembleOriginalData"},"w":7,"id":2},"0":{"p":{"i":"1","t":"Projection"},"w":7,"id":0}}}; 2025-06-25T15:00:22.955601Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750863620984:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=read_metadata.h:142;filter_limit_not_detected= range{ from {+Inf} to {-Inf}}; 2025-06-25T15:00:22.956443Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750863620984:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=tx_scan.cpp:172;event=TTxScan started;actor_id=[1:716:2728];trace_detailed=; 2025-06-25T15:00:22.957310Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: fline=context.cpp:84;ff_first=(column_ids=1;column_names=timestamp;);; 2025-06-25T15:00:22.957502Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: fline=context.cpp:99;columns_context_info=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;; 2025-06-25T15:00:22.957790Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:716:2728];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-25T15:00:22.957892Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:716:2728];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:00:22.957966Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:716:2728];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:00:22.958000Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: actor.cpp:414: Scan [1:716:2728] finished for tablet 9437184 2025-06-25T15:00:22.958262Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:716:2728];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:420;event=scan_finish;compute_actor_id=[1:710:2722];stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["l_bootstrap","f_ack","l_ack","f_processing","l_processing","f_ProduceResults","l_ProduceResults","f_Finish","l_Finish"],"t":0.001}],"full":{"a":1750863622956400,"name":"_full_task","f":1750863622956400,"d_finished":0,"c":0,"l":1750863622958043,"d":1643},"events":[{"name":"bootstrap","f":1750863622956590,"d_finished":1021,"c":1,"l":1750863622957611,"d":1021},{"a":1750863622957769,"name":"ack","f":1750863622957769,"d_finished":0,"c":0,"l":1750863622958043,"d":274},{"a":1750863622957752,"name":"processing","f":1750863622957752,"d_finished":0,"c":0,"l":1750863622958043,"d":291},{"name":"ProduceResults","f":1750863622957597,"d_finished":177,"c":2,"l":1750863622957986,"d":177},{"a":1750863622957989,"name":"Finish","f":1750863622957989,"d_finished":0,"c":0,"l":1750863622958043,"d":54}],"id":"9437184::1"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:00:22.958325Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:716:2728];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:710:2722];bytes=0;rows=0;faults=0;finished=1;fault=0;schema=; 2025-06-25T15:00:22.958555Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:716:2728];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:375;event=scan_finished;compute_actor_id=[1:710:2722];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["l_bootstrap","f_ack","l_ack","f_processing","l_processing","f_ProduceResults","l_ProduceResults","f_Finish","l_Finish"],"t":0.001}],"full":{"a":1750863622956400,"name":"_full_task","f":1750863622956400,"d_finished":0,"c":0,"l":1750863622958379,"d":1979},"events":[{"name":"bootstrap","f":1750863622956590,"d_finished":1021,"c":1,"l":1750863622957611,"d":1021},{"a":1750863622957769,"name":"ack","f":1750863622957769,"d_finished":0,"c":0,"l":1750863622958379,"d":610},{"a":1750863622957752,"name":"processing","f":1750863622957752,"d_finished":0,"c":0,"l":1750863622958379,"d":627},{"name":"ProduceResults","f":1750863622957597,"d_finished":177,"c":2,"l":1750863622957986,"d":177},{"a":1750863622957989,"name":"Finish","f":1750863622957989,"d_finished":0,"c":0,"l":1750863622958379,"d":390}],"id":"9437184::1"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:00:22.958605Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:716:2728];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-06-25T15:00:22.955578Z;index_granules=0;index_portions=0;index_batches=0;schema_columns=1;filter_columns=0;additional_columns=0;compacted_portions_bytes=0;inserted_portions_bytes=0;committed_portions_bytes=0;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=0;selected_rows=0; 2025-06-25T15:00:22.958634Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:716:2728];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=read_context.h:192;event=scan_aborted;reason=unexpected on destructor; 2025-06-25T15:00:22.958696Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:716:2728];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;; ------- [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest >> Secret::Simple [GOOD] Test command err: 2025-06-25T14:58:05.722756Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:58:05.722889Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:58:05.722935Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d31/r3tmp/tmp7fllRZ/pdisk_1.dat 2025-06-25T14:58:06.023708Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 3847, node 1 TClient is connected to server localhost:32025 2025-06-25T14:58:06.248753Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:58:06.288862Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:06.293061Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:58:06.293141Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:58:06.293184Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:58:06.293723Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:58:06.294011Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750863483023989 != 1750863483023993 2025-06-25T14:58:06.339613Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:06.339740Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:06.351155Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected snapshot->GetSecrets().size() incorrect: SECRETS:ACCESS: 2025-06-25T14:58:06.561094Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Initialization finished REQUEST=CREATE OBJECT secret1 (TYPE SECRET) WITH value = `100`;EXPECTATION=1;WAITING=1 2025-06-25T14:58:18.553770Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:769:2635], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:58:18.553917Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:58:18.569160Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:58:18.849441Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:886:2713], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:58:18.849597Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:58:18.849888Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:891:2718], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:58:18.854468Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:58:18.989983Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:893:2720], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:58:19.792303Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:987:2785] txid# 281474976715661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:58:20.386093Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:58:20.897501Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:58:21.666184Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:58:22.502018Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:58:22.989436Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:58:24.410067Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) snapshot->GetSecrets().size() incorrect: SECRETS:ACCESS: 2025-06-25T14:58:24.904713Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710759:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:383) REQUEST=CREATE OBJECT secret1 (TYPE SECRET) WITH value = `100`;RESULT=;EXPECTATION=1 FINISHED_REQUEST=CREATE OBJECT secret1 (TYPE SECRET) WITH value = `100`;EXPECTATION=1;WAITING=1 REQUEST=UPSERT OBJECT secret1_1 (TYPE SECRET) WITH value = `100`;EXPECTATION=1;WAITING=1 snapshot->GetAccess().size() incorrect: SECRETS:root@builtin:secret1:100;ACCESS: REQUEST=UPSERT OBJECT secret1_1 (TYPE SECRET) WITH value = `100`;RESULT=;EXPECTATION=1 snapshot->GetSecrets().size() incorrect: SECRETS:root@builtin:secret1:100;root@builtin:secret1_1:100;ACCESS: 2025-06-25T14:58:41.475334Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7382: Cannot get console configs 2025-06-25T14:58:41.475412Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded FINISHED_REQUEST=UPSERT OBJECT secret1_1 (TYPE SECRET) WITH value = `100`;EXPECTATION=1;WAITING=1 REQUEST=UPSERT OBJECT secret1_1 (TYPE SECRET) WITH value = `200`;EXPECTATION=1;WAITING=1 REQUEST=UPSERT OBJECT secret1_1 (TYPE SECRET) WITH value = `200`;RESULT=;EXPECTATION=1 snapshot->GetSecrets().size() incorrect: SECRETS:root@builtin:secret1:100;root@builtin:secret1_1:200;ACCESS: FINISHED_REQUEST=UPSERT OBJECT secret1_1 (TYPE SECRET) WITH value = `200`;EXPECTATION=1;WAITING=1 2025-06-25T14:59:04.818395Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715719. Ctx: { TraceId: 01jyksk04g2c1ja0dcxd2vvwkd, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NGNmMWUwZjAtYzU4MTgzNGItYThhN2YwZTQtN2U5YzhhYjE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root REQUEST=SELECT COUNT(*) FROM `/Root/.metadata/initialization/migrations`;RESULT=;EXPECTATION=1 REQUEST=SELECT COUNT(*) FROM `/Root/.metadata/initialization/migrations`;EXPECTATION=1 REQUEST=ALTER OBJECT secret1 (TYPE SECRET) SET value = `abcde`;EXPECTATION=1;WAITING=1 REQUEST=ALTER OBJECT secret1 (TYPE SECRET) SET value = `abcde`;RESULT=;EXPECTATION=1 FINISHED_REQUEST=ALTER OBJECT secret1 (TYPE SECRET) SET value = `abcde`;EXPECTATION=1;WAITING=1 REQUEST=CREATE OBJECT `secret1:test@test1` (TYPE SECRET_ACCESS);EXPECTATION=1;WAITING=1 2025-06-25T14:59:27.494258Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715736:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:59:28.566887Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715743:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:59:29.974451Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715754:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:59:30.506195Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715757:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) REQUEST=CREATE OBJECT `secret1:test@test1` (TYPE SECRET_ACCESS);RESULT=;EXPECTATION=1 snapshot->GetAccess().size() incorrect (zero expects): SECRETS:root@builtin:secret1:abcde;root@builtin:secret1_1:200;ACCESS:root@builtin:secret1:test@test1; FINISHED_REQUEST=CREATE OBJECT `secret1:test@test1` (TYPE SECRET_ACCESS);EXPECTATION=1;WAITING=1 2025-06-25T14:59:43.397451Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715769. Ctx: { TraceId: 01jyksm5stfzqndjw1prdq1ks5, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Nzk3OWM4OTItMWI0YjViYy01MmI3ODlhMC1mM2ZhYWIyZA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root REQUEST=SELECT COUNT(*) FROM `/Root/.metadata/initialization/migrations`;RESULT=;EXPECTATION=1 REQUEST=SELECT COUNT(*) FROM `/Root/.metadata/initialization/migrations`;EXPECTATION=1 REQUEST=DROP OBJECT `secret1:test@test1` (TYPE SECRET_ACCESS);EXPECTATION=1;WAITING=1 REQUEST=DROP OBJECT `secret1:test@test1` (TYPE SECRET_ACCESS);RESULT=;EXPECTATION=1 snapshot->GetAccess().size() incorrect: SECRETS:root@builtin:secret1:abcde;root@builtin:secret1_1:200;ACCESS: FINISHED_REQUEST=DROP OBJECT `secret1:test@test1` (TYPE SECRET_ACCESS);EXPECTATION=1;WAITING=1 REQUEST=DROP OBJECT `secret1` (TYPE SECRET);EXPECTATION=1;WAITING=1 REQUEST=DROP OBJECT `secret1` (TYPE SECRET);RESULT=;EXPECTATION=1 snapshot->GetSecrets().size() incorrect: SECRETS:root@builtin:secret1_1:200;ACCESS: FINISHED_REQUEST=DROP OBJECT `secret1` (TYPE SECRET);EXPECTATION=1;WAITING=1 2025-06-25T15:00:21.102371Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715809. Ctx: { TraceId: 01jyksnaw466qackhvvtb8fn1d, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NzU4YjczMDUtM2U3ZmNhMWMtZTU5ZWU0MzItM2NhYzg0Yjg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root REQUEST=SELECT * FROM `/Root/.metadata/initialization/migrations`;RESULT=;EXPECTATION=1 REQUEST=SELECT * FROM `/Root/.metadata/initialization/migrations`;EXPECTATION=1 >> TColumnShardTestSchema::TTL-Reboot-Internal-FirstPkColumn [GOOD] >> Secret::SimpleQueryService [GOOD] >> YdbYqlClient::CreateTableWithPartitionAtKeysAndAutoPartitioning [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest >> TColumnShardTestSchema::TTL-Reboot-Internal-FirstPkColumn [GOOD] Test command err: Running TestTtl ttlColumnType=Timestamp 2025-06-25T14:59:50.901762Z node 1 :TX_COLUMNSHARD TRACE: columnshard_impl.h:381: StateInit, received event# 268828672, Sender [1:106:2138], Recipient [1:128:2158]: NKikimr::TEvTablet::TEvBoot 2025-06-25T14:59:50.905962Z node 1 :TX_COLUMNSHARD TRACE: columnshard_impl.h:381: StateInit, received event# 268828673, Sender [1:106:2138], Recipient [1:128:2158]: NKikimr::TEvTablet::TEvRestored 2025-06-25T14:59:50.906325Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:99;event=initialize_shard;step=OnActivateExecutor; 2025-06-25T14:59:50.934122Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:117;event=initialize_shard;step=initialize_tiring_finished; 2025-06-25T14:59:50.934364Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-25T14:59:50.941300Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:59:50.941500Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:59:50.941711Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:59:50.941843Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:59:50.941955Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:59:50.942054Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:59:50.942179Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:59:50.942277Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:59:50.942378Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:59:50.942492Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:59:50.942611Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:59:50.945224Z node 1 :TX_COLUMNSHARD TRACE: columnshard_impl.h:381: StateInit, received event# 268828684, Sender [1:106:2138], Recipient [1:128:2158]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-25T14:59:50.968035Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-25T14:59:50.968170Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-25T14:59:50.968221Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-25T14:59:50.968409Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T14:59:50.968546Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-25T14:59:50.968618Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-25T14:59:50.968655Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-25T14:59:50.968754Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-25T14:59:50.968815Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-25T14:59:50.968868Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-25T14:59:50.968918Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-25T14:59:50.969080Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T14:59:50.969146Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-25T14:59:50.969193Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-25T14:59:50.969224Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-25T14:59:50.969317Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-25T14:59:50.969367Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-25T14:59:50.969405Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-25T14:59:50.969431Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-25T14:59:50.969476Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-25T14:59:50.969511Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-25T14:59:50.969545Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-25T14:59:50.969768Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-25T14:59:50.969817Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-25T14:59:50.969846Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-25T14:59:50.970019Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-25T14:59:50.970069Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-25T14:59:50.970100Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-25T14:59:50.970202Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-25T14:59:50.970237Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-25T14:59:50.970271Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-25T14:59:50.970338Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-25T14:59:50.970411Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-25T14:59:50.970448Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-25T14:59:50.970473Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-25T14:59:50.970680Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=47; 2025-06-25T14:59:50.970757Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=34; 2025-06-25T14:59:50.970820Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=27; 2025-06-25T14:59:50.970914Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=52; 2025-06-25T14:59:50.971009Z node 1 :T ... OLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:630:2634];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:274;stage=finished;iterator=ready_results:(count:1;records_count:31;schema=saved_at: uint64;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=9;column_names=saved_at;);;program_input=(column_ids=9;column_names=saved_at;);;;); 2025-06-25T15:00:24.219616Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:630:2634];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:1;records_count:31;schema=saved_at: uint64;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=9;column_names=saved_at;);;program_input=(column_ids=9;column_names=saved_at;);;;); 2025-06-25T15:00:24.219646Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:630:2634];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=0;count=0;finished=1; 2025-06-25T15:00:24.219675Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:630:2634];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:203;stage=limit exhausted;limit=limits:(bytes=0;chunks=0);; 2025-06-25T15:00:24.219831Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:630:2634];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-25T15:00:24.219928Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:630:2634];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:1;records_count:31;schema=saved_at: uint64;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=9;column_names=saved_at;);;program_input=(column_ids=9;column_names=saved_at;);;;); 2025-06-25T15:00:24.219963Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:630:2634];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=0;count=0;finished=1; 2025-06-25T15:00:24.220039Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:630:2634];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:234;stage=ready result;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=9;column_names=saved_at;);;program_input=(column_ids=9;column_names=saved_at;);;;);columns=1;rows=31; 2025-06-25T15:00:24.220075Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:630:2634];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:254;stage=data_format;batch_size=248;num_rows=31;batch_columns=saved_at; 2025-06-25T15:00:24.220292Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:630:2634];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:370;event=send_data;compute_actor_id=[5:629:2633];bytes=248;rows=31;faults=0;finished=0;fault=0;schema=saved_at: uint64; 2025-06-25T15:00:24.220409Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:630:2634];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:274;stage=finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=9;column_names=saved_at;);;program_input=(column_ids=9;column_names=saved_at;);;;); 2025-06-25T15:00:24.220529Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:630:2634];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=9;column_names=saved_at;);;program_input=(column_ids=9;column_names=saved_at;);;;); 2025-06-25T15:00:24.220660Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:630:2634];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=9;column_names=saved_at;);;program_input=(column_ids=9;column_names=saved_at;);;;); 2025-06-25T15:00:24.220800Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:630:2634];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-25T15:00:24.220890Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:630:2634];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=9;column_names=saved_at;);;program_input=(column_ids=9;column_names=saved_at;);;;); 2025-06-25T15:00:24.220955Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:630:2634];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=9;column_names=saved_at;);;program_input=(column_ids=9;column_names=saved_at;);;;); 2025-06-25T15:00:24.220987Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: actor.cpp:414: Scan [5:630:2634] finished for tablet 9437184 2025-06-25T15:00:24.221521Z node 5 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[5:630:2634];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:420;event=scan_finish;compute_actor_id=[5:629:2633];stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.003},{"events":["l_bootstrap"],"t":0.007},{"events":["f_processing","f_task_result"],"t":0.008},{"events":["f_ack","l_task_result"],"t":0.034},{"events":["l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.039}],"full":{"a":1750863624181832,"name":"_full_task","f":1750863624181832,"d_finished":0,"c":0,"l":1750863624221050,"d":39218},"events":[{"name":"bootstrap","f":1750863624182094,"d_finished":7355,"c":1,"l":1750863624189449,"d":7355},{"a":1750863624220785,"name":"ack","f":1750863624216339,"d_finished":3975,"c":4,"l":1750863624220695,"d":4240},{"a":1750863624220773,"name":"processing","f":1750863624190729,"d_finished":18551,"c":36,"l":1750863624220697,"d":18828},{"name":"ProduceResults","f":1750863624185504,"d_finished":8469,"c":42,"l":1750863624220972,"d":8469},{"a":1750863624220976,"name":"Finish","f":1750863624220976,"d_finished":0,"c":0,"l":1750863624221050,"d":74},{"name":"task_result","f":1750863624190759,"d_finished":14036,"c":32,"l":1750863624216097,"d":14036}],"id":"9437184::30"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=9;column_names=saved_at;);;program_input=(column_ids=9;column_names=saved_at;);;;); 2025-06-25T15:00:24.221607Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:630:2634];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:370;event=send_data;compute_actor_id=[5:629:2633];bytes=0;rows=0;faults=0;finished=1;fault=0;schema=; 2025-06-25T15:00:24.222107Z node 5 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[5:630:2634];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:375;event=scan_finished;compute_actor_id=[5:629:2633];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.003},{"events":["l_bootstrap"],"t":0.007},{"events":["f_processing","f_task_result"],"t":0.008},{"events":["f_ack","l_task_result"],"t":0.034},{"events":["l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.039}],"full":{"a":1750863624181832,"name":"_full_task","f":1750863624181832,"d_finished":0,"c":0,"l":1750863624221660,"d":39828},"events":[{"name":"bootstrap","f":1750863624182094,"d_finished":7355,"c":1,"l":1750863624189449,"d":7355},{"a":1750863624220785,"name":"ack","f":1750863624216339,"d_finished":3975,"c":4,"l":1750863624220695,"d":4850},{"a":1750863624220773,"name":"processing","f":1750863624190729,"d_finished":18551,"c":36,"l":1750863624220697,"d":19438},{"name":"ProduceResults","f":1750863624185504,"d_finished":8469,"c":42,"l":1750863624220972,"d":8469},{"a":1750863624220976,"name":"Finish","f":1750863624220976,"d_finished":0,"c":0,"l":1750863624221660,"d":684},{"name":"task_result","f":1750863624190759,"d_finished":14036,"c":32,"l":1750863624216097,"d":14036}],"id":"9437184::30"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=9;column_names=saved_at;);;program_input=(column_ids=9;column_names=saved_at;);;;); 2025-06-25T15:00:24.222197Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:630:2634];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-06-25T15:00:24.179162Z;index_granules=0;index_portions=4;index_batches=13;schema_columns=1;filter_columns=0;additional_columns=0;compacted_portions_bytes=0;inserted_portions_bytes=71800;committed_portions_bytes=0;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=71800;selected_rows=0; 2025-06-25T15:00:24.222255Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:630:2634];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=read_context.h:192;event=scan_aborted;reason=unexpected on destructor; 2025-06-25T15:00:24.222630Z node 5 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[5:630:2634];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=9;column_names=saved_at;);;program_input=(column_ids=9;column_names=saved_at;);;; >> TNodeBrokerTest::NodeNameReuseRestartWithHostChanges >> TNodeBrokerTest::RegistrationPipeliningNodeName >> TNodeBrokerTest::TestListNodes >> TNodeBrokerTest::NodeNameWithDifferentTenants >> TColumnShardTestSchema::TTL+Reboot-Internal+FirstPkColumn [GOOD] >> TNodeBrokerTest::RegistrationPipelining >> BackupRestoreS3::TestAllPrimitiveTypes-INTERVAL [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-TZ_DATE [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-TZ_DATETIME [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-TZ_TIMESTAMP [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-TIMESTAMP64 >> TNodeBrokerTest::NodesMigrationExtendLeaseThenExpire >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeView [GOOD] >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeResourcePool [GOOD] >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeTransfer [GOOD] >> BackupRestore::TestAllSchemeObjectTypes-EPathTypeSysView [GOOD] >> BackupRestore::TestReplaceRestoreOption >> TColumnShardTestSchema::TTL-Reboot+Internal-FirstPkColumn [GOOD] >> TColumnShardTestSchema::RebootExportAfterFail [GOOD] >> TAsyncIndexTests::MergeBothWithReboots[TabletReboots] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest >> TColumnShardTestSchema::TTL+Reboot-Internal+FirstPkColumn [GOOD] Test command err: Running TestTtl ttlColumnType=Timestamp 2025-06-25T14:59:55.765928Z node 1 :TX_COLUMNSHARD TRACE: columnshard_impl.h:381: StateInit, received event# 268828672, Sender [1:106:2138], Recipient [1:128:2158]: NKikimr::TEvTablet::TEvBoot 2025-06-25T14:59:55.770680Z node 1 :TX_COLUMNSHARD TRACE: columnshard_impl.h:381: StateInit, received event# 268828673, Sender [1:106:2138], Recipient [1:128:2158]: NKikimr::TEvTablet::TEvRestored 2025-06-25T14:59:55.771120Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:99;event=initialize_shard;step=OnActivateExecutor; 2025-06-25T14:59:55.801098Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:117;event=initialize_shard;step=initialize_tiring_finished; 2025-06-25T14:59:55.801369Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-25T14:59:55.808654Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:59:55.808879Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:59:55.809122Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:59:55.809250Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:59:55.809369Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:59:55.809524Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:59:55.809644Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:59:55.809745Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:59:55.809857Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:59:55.809974Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:59:55.810067Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:59:55.812718Z node 1 :TX_COLUMNSHARD TRACE: columnshard_impl.h:381: StateInit, received event# 268828684, Sender [1:106:2138], Recipient [1:128:2158]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-25T14:59:55.837495Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-25T14:59:55.837645Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-25T14:59:55.837701Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-25T14:59:55.837879Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T14:59:55.838039Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-25T14:59:55.838127Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-25T14:59:55.838187Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-25T14:59:55.838292Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-25T14:59:55.838372Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-25T14:59:55.838416Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-25T14:59:55.838446Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-25T14:59:55.838620Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T14:59:55.838682Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-25T14:59:55.838748Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-25T14:59:55.838783Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-25T14:59:55.838903Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-25T14:59:55.838963Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-25T14:59:55.839004Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-25T14:59:55.839033Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-25T14:59:55.839085Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-25T14:59:55.839129Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-25T14:59:55.839158Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-25T14:59:55.839350Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-25T14:59:55.839394Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-25T14:59:55.839445Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-25T14:59:55.839683Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-25T14:59:55.839732Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-25T14:59:55.839761Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-25T14:59:55.839895Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-25T14:59:55.839941Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-25T14:59:55.839971Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-25T14:59:55.840043Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-25T14:59:55.840132Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-25T14:59:55.840177Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-25T14:59:55.840205Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-25T14:59:55.840532Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=45; 2025-06-25T14:59:55.840660Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=76; 2025-06-25T14:59:55.840745Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=36; 2025-06-25T14:59:55.840834Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=48; 2025-06-25T14:59:55.840943Z node 1 :T ... ult_received;interval_idx=0;intervalId=90; 2025-06-25T15:00:24.996993Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:586:2563];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=scanner.cpp:47;event=interval_result;interval_idx=0;count=1000;merger=0;interval_id=90; 2025-06-25T15:00:24.997055Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:586:2563];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=scanner.cpp:65;event=intervals_finished; 2025-06-25T15:00:24.997150Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:586:2563];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:00:24.997186Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:586:2563];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=1;count=1000;finished=1; 2025-06-25T15:00:24.997225Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:586:2563];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:203;stage=limit exhausted;limit=limits:(bytes=0;chunks=0);; 2025-06-25T15:00:24.997488Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:586:2563];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-25T15:00:24.997650Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:586:2563];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:1;records_count:1000;schema=timestamp: uint64;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:00:24.997695Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:586:2563];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=0;count=0;finished=1; 2025-06-25T15:00:24.997806Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:586:2563];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:234;stage=ready result;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;);columns=1;rows=1000; 2025-06-25T15:00:24.997855Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:586:2563];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:254;stage=data_format;batch_size=8000;num_rows=1000;batch_columns=timestamp; 2025-06-25T15:00:24.998077Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:586:2563];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:370;event=send_data;compute_actor_id=[5:585:2562];bytes=8000;rows=1000;faults=0;finished=0;fault=0;schema=timestamp: uint64; 2025-06-25T15:00:24.998217Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:586:2563];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:274;stage=finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:00:24.998304Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:586:2563];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:00:24.998403Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:586:2563];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:00:24.998549Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:586:2563];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-25T15:00:24.998677Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:586:2563];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:00:24.998803Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:586:2563];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:00:24.998843Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: actor.cpp:414: Scan [5:586:2563] finished for tablet 9437184 2025-06-25T15:00:24.999228Z node 5 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[5:586:2563];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:420;event=scan_finish;compute_actor_id=[5:585:2562];stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.001},{"events":["l_bootstrap"],"t":0.002},{"events":["f_processing","f_task_result"],"t":0.003},{"events":["f_ack","l_task_result"],"t":0.01},{"events":["l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.012}],"full":{"a":1750863624986781,"name":"_full_task","f":1750863624986781,"d_finished":0,"c":0,"l":1750863624998891,"d":12110},"events":[{"name":"bootstrap","f":1750863624987004,"d_finished":2361,"c":1,"l":1750863624989365,"d":2361},{"a":1750863624998532,"name":"ack","f":1750863624997460,"d_finished":964,"c":1,"l":1750863624998424,"d":1323},{"a":1750863624998506,"name":"processing","f":1750863624990287,"d_finished":5520,"c":8,"l":1750863624998425,"d":5905},{"name":"ProduceResults","f":1750863624988342,"d_finished":2122,"c":11,"l":1750863624998830,"d":2122},{"a":1750863624998833,"name":"Finish","f":1750863624998833,"d_finished":0,"c":0,"l":1750863624998891,"d":58},{"name":"task_result","f":1750863624990308,"d_finished":4462,"c":7,"l":1750863624997319,"d":4462}],"id":"9437184::30"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:00:24.999290Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:586:2563];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:370;event=send_data;compute_actor_id=[5:585:2562];bytes=0;rows=0;faults=0;finished=1;fault=0;schema=; 2025-06-25T15:00:24.999631Z node 5 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[5:586:2563];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:375;event=scan_finished;compute_actor_id=[5:585:2562];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.001},{"events":["l_bootstrap"],"t":0.002},{"events":["f_processing","f_task_result"],"t":0.003},{"events":["f_ack","l_task_result"],"t":0.01},{"events":["l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.012}],"full":{"a":1750863624986781,"name":"_full_task","f":1750863624986781,"d_finished":0,"c":0,"l":1750863624999331,"d":12550},"events":[{"name":"bootstrap","f":1750863624987004,"d_finished":2361,"c":1,"l":1750863624989365,"d":2361},{"a":1750863624998532,"name":"ack","f":1750863624997460,"d_finished":964,"c":1,"l":1750863624998424,"d":1763},{"a":1750863624998506,"name":"processing","f":1750863624990287,"d_finished":5520,"c":8,"l":1750863624998425,"d":6345},{"name":"ProduceResults","f":1750863624988342,"d_finished":2122,"c":11,"l":1750863624998830,"d":2122},{"a":1750863624998833,"name":"Finish","f":1750863624998833,"d_finished":0,"c":0,"l":1750863624999331,"d":498},{"name":"task_result","f":1750863624990308,"d_finished":4462,"c":7,"l":1750863624997319,"d":4462}],"id":"9437184::30"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:00:24.999709Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:586:2563];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-06-25T15:00:24.985432Z;index_granules=0;index_portions=1;index_batches=10;schema_columns=1;filter_columns=0;additional_columns=0;compacted_portions_bytes=0;inserted_portions_bytes=59184;committed_portions_bytes=0;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=59184;selected_rows=0; 2025-06-25T15:00:24.999759Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:586:2563];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=read_context.h:192;event=scan_aborted;reason=unexpected on destructor; 2025-06-25T15:00:25.000000Z node 5 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[5:586:2563];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;; ------- [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest >> Secret::SimpleQueryService [GOOD] Test command err: 2025-06-25T14:58:06.483419Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:58:06.483541Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:58:06.483585Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000d28/r3tmp/tmp0dBZFW/pdisk_1.dat 2025-06-25T14:58:06.748829Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 25070, node 1 TClient is connected to server localhost:17619 2025-06-25T14:58:06.962700Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:58:06.997379Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:07.001637Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:58:07.001711Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:58:07.001755Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:58:07.002381Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:58:07.002718Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750863484002484 != 1750863484002488 2025-06-25T14:58:07.048916Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:07.049097Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:07.060724Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected snapshot->GetSecrets().size() incorrect: SECRETS:ACCESS: 2025-06-25T14:58:07.267238Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Initialization finished REQUEST=CREATE OBJECT secret1 (TYPE SECRET) WITH value = `100`;EXPECTATION=1;WAITING=1 2025-06-25T14:58:18.960782Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:771:2639], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:58:18.960932Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:781:2644], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:58:18.961003Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:58:18.965295Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715657:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:58:18.982723Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:785:2647], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715657 completed, doublechecking } 2025-06-25T14:58:19.014121Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:836:2679] txid# 281474976715658, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:58:19.294917Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:58:20.395768Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:58:20.887593Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:58:21.750340Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:58:22.644815Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:58:23.120455Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:58:24.379484Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) snapshot->GetSecrets().size() incorrect: SECRETS:ACCESS: 2025-06-25T14:58:24.903150Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710759:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:383) REQUEST=CREATE OBJECT secret1 (TYPE SECRET) WITH value = `100`;RESULT=;EXPECTATION=1 FINISHED_REQUEST=CREATE OBJECT secret1 (TYPE SECRET) WITH value = `100`;EXPECTATION=1;WAITING=1 REQUEST=UPSERT OBJECT secret1_1 (TYPE SECRET) WITH value = `100`;EXPECTATION=1;WAITING=1 snapshot->GetAccess().size() incorrect: SECRETS:root@builtin:secret1:100;ACCESS: REQUEST=UPSERT OBJECT secret1_1 (TYPE SECRET) WITH value = `100`;RESULT=;EXPECTATION=1 snapshot->GetSecrets().size() incorrect: SECRETS:root@builtin:secret1:100;root@builtin:secret1_1:100;ACCESS: 2025-06-25T14:58:40.353569Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7382: Cannot get console configs 2025-06-25T14:58:40.353633Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded FINISHED_REQUEST=UPSERT OBJECT secret1_1 (TYPE SECRET) WITH value = `100`;EXPECTATION=1;WAITING=1 REQUEST=UPSERT OBJECT secret1_1 (TYPE SECRET) WITH value = `200`;EXPECTATION=1;WAITING=1 REQUEST=UPSERT OBJECT secret1_1 (TYPE SECRET) WITH value = `200`;RESULT=;EXPECTATION=1 snapshot->GetSecrets().size() incorrect: SECRETS:root@builtin:secret1:100;root@builtin:secret1_1:200;ACCESS: FINISHED_REQUEST=UPSERT OBJECT secret1_1 (TYPE SECRET) WITH value = `200`;EXPECTATION=1;WAITING=1 2025-06-25T14:59:04.578833Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715721. Ctx: { TraceId: 01jyksjzwd8sd5zcnp45e6tv7c, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NjI3ZDI5M2UtMTRhODJkMjUtNDJmY2YwMTYtMWZjOTQ1ZjE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root REQUEST=SELECT COUNT(*) FROM `/Root/.metadata/initialization/migrations`;RESULT=;EXPECTATION=1 REQUEST=SELECT COUNT(*) FROM `/Root/.metadata/initialization/migrations`;EXPECTATION=1 REQUEST=ALTER OBJECT secret1 (TYPE SECRET) SET value = `abcde`;EXPECTATION=1;WAITING=1 REQUEST=ALTER OBJECT secret1 (TYPE SECRET) SET value = `abcde`;RESULT=;EXPECTATION=1 FINISHED_REQUEST=ALTER OBJECT secret1 (TYPE SECRET) SET value = `abcde`;EXPECTATION=1;WAITING=1 REQUEST=CREATE OBJECT `secret1:test@test1` (TYPE SECRET_ACCESS);EXPECTATION=1;WAITING=1 2025-06-25T14:59:27.654599Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715742:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:59:28.833346Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715751:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:59:30.945551Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715764:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:59:31.457307Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715767:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) REQUEST=CREATE OBJECT `secret1:test@test1` (TYPE SECRET_ACCESS);RESULT=;EXPECTATION=1 snapshot->GetAccess().size() incorrect (zero expects): SECRETS:root@builtin:secret1:abcde;root@builtin:secret1_1:200;ACCESS:root@builtin:secret1:test@test1; FINISHED_REQUEST=CREATE OBJECT `secret1:test@test1` (TYPE SECRET_ACCESS);EXPECTATION=1;WAITING=1 2025-06-25T14:59:44.351559Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715779. Ctx: { TraceId: 01jyksm6qpchvgejawgdp1kstz, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDY5NjE0ODItOGFiMDhjNzctZTRhNDg5NmUtN2Q2OTU5MTA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root REQUEST=SELECT COUNT(*) FROM `/Root/.metadata/initialization/migrations`;RESULT=;EXPECTATION=1 REQUEST=SELECT COUNT(*) FROM `/Root/.metadata/initialization/migrations`;EXPECTATION=1 REQUEST=DROP OBJECT `secret1:test@test1` (TYPE SECRET_ACCESS);EXPECTATION=1;WAITING=1 REQUEST=DROP OBJECT `secret1:test@test1` (TYPE SECRET_ACCESS);RESULT=;EXPECTATION=1 snapshot->GetAccess().size() incorrect: SECRETS:root@builtin:secret1:abcde;root@builtin:secret1_1:200;ACCESS: FINISHED_REQUEST=DROP OBJECT `secret1:test@test1` (TYPE SECRET_ACCESS);EXPECTATION=1;WAITING=1 REQUEST=DROP OBJECT `secret1` (TYPE SECRET);EXPECTATION=1;WAITING=1 REQUEST=DROP OBJECT `secret1` (TYPE SECRET);RESULT=;EXPECTATION=1 snapshot->GetSecrets().size() incorrect: SECRETS:root@builtin:secret1_1:200;ACCESS: FINISHED_REQUEST=DROP OBJECT `secret1` (TYPE SECRET);EXPECTATION=1;WAITING=1 2025-06-25T15:00:22.668340Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715825. Ctx: { TraceId: 01jyksncdpfkfmrz7y80sk3vqk, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MTUzMmU3LTFjYTY5ZTgzLWMyNWE4NzU5LTY4OTY4NjRk, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root REQUEST=SELECT * FROM `/Root/.metadata/initialization/migrations`;RESULT=;EXPECTATION=1 REQUEST=SELECT * FROM `/Root/.metadata/initialization/migrations`;EXPECTATION=1 ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> YdbYqlClient::CreateTableWithPartitionAtKeysAndAutoPartitioning [GOOD] Test command err: 2025-06-25T15:00:05.344532Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901923385464856:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:00:05.344604Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001892/r3tmp/tmp3tqAhf/pdisk_1.dat 2025-06-25T15:00:05.689270Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:00:05.741823Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:00:05.741913Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:00:05.744643Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:00:05.746626Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 26608, node 1 2025-06-25T15:00:05.780408Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:00:05.780430Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:00:05.780437Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:00:05.780553Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10206 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:00:06.094092Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:00:06.378964Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:00:08.995238Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519901939856215856:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:00:08.995303Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001892/r3tmp/tmpNxh4Bs/pdisk_1.dat 2025-06-25T15:00:09.095057Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:00:09.111574Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:00:09.111656Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:00:09.117847Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 15791, node 4 2025-06-25T15:00:09.198983Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:00:09.199005Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:00:09.199017Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:00:09.199144Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:30211 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-25T15:00:09.388874Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:00:10.001351Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:00:11.336496Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:11.521628Z node 4 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 4, TabletId: 72075186224037889 not found 2025-06-25T15:00:11.548925Z node 4 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 4, TabletId: 72075186224037888 not found 2025-06-25T15:00:12.943702Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519901953659291787:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:00:12.943748Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001892/r3tmp/tmpi57SHP/pdisk_1.dat 2025-06-25T15:00:13.053747Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:00:13.060900Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:00:13.060980Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:00:13.064151Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 7322, node 7 2025-06-25T15:00:13.108255Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:00:13.108282Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:00:13.108290Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:00:13.108458Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17272 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:00:13.347029Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:00:13.951981Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:00:15.758876Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:17.305404Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7519901975618308452:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:00:17.305560Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001892/r3tmp/tmpVDceoT/pdisk_1.dat 2025-06-25T15:00:17.450580Z node 10 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:00:17.469558Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:00:17.469639Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:00:17.473448Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 15073, node 10 2025-06-25T15:00:17.536372Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:00:17.536397Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:00:17.536404Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:00:17.536543Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10288 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:00:17.807488Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:00:18.314112Z node 10 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:00:20.101576Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:21.453105Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7519901994292618118:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:00:21.453199Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001892/r3tmp/tmp3L5yWY/pdisk_1.dat 2025-06-25T15:00:21.610836Z node 13 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:00:21.627610Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:00:21.627680Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:00:21.632990Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 12350, node 13 2025-06-25T15:00:21.681918Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:00:21.681948Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:00:21.681956Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:00:21.682095Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27782 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:00:21.911394Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:00:22.480787Z node 13 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:00:24.020703Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) >> BackupRestoreS3::TestAllIndexTypes-EIndexTypeGlobalAsync [GOOD] >> BackupRestoreS3::TestAllIndexTypes-EIndexTypeGlobalUnique [GOOD] >> BackupRestoreS3::TestAllIndexTypes-EIndexTypeGlobalVectorKmeansTree >> YdbTableBulkUpsert::DataValidation [GOOD] >> YdbTableBulkUpsert::AsyncIndexShouldFail >> TNodeBrokerTest::TestListNodesEpochDeltas ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest >> TColumnShardTestSchema::RebootExportAfterFail [GOOD] Test command err: FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; WaitEmptyAfter=0;Tiers=;TTL={Column=timestamp;EvictAfter=0.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=150864183.000000s;Name=cold;Codec=};};TTL={Column=timestamp;EvictAfter=0.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=130864183.000000s;Name=cold;Codec=};};TTL={Column=timestamp;EvictAfter=0.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=130862983.000000s;Name=cold;Codec=};};TTL={Column=timestamp;EvictAfter=0.000000s;Name=;Codec=}; 2025-06-25T14:59:44.866249Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:99;event=initialize_shard;step=OnActivateExecutor; 2025-06-25T14:59:44.892293Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:117;event=initialize_shard;step=initialize_tiring_finished; 2025-06-25T14:59:44.892562Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-25T14:59:44.898883Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:59:44.899107Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:59:44.899331Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:59:44.899449Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:59:44.899542Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:59:44.899636Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:59:44.899737Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:59:44.899827Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:59:44.899921Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:59:44.900017Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:59:44.900133Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:59:44.924164Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-25T14:59:44.924292Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-25T14:59:44.924364Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-25T14:59:44.924511Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T14:59:44.924687Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-25T14:59:44.924761Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-25T14:59:44.924820Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-25T14:59:44.924899Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-25T14:59:44.924950Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-25T14:59:44.924989Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-25T14:59:44.925023Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-25T14:59:44.925174Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T14:59:44.925240Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-25T14:59:44.925285Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-25T14:59:44.925309Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-25T14:59:44.925387Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-25T14:59:44.925433Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-25T14:59:44.925466Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-25T14:59:44.925490Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-25T14:59:44.925528Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-25T14:59:44.925560Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-25T14:59:44.925586Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-25T14:59:44.925740Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-25T14:59:44.925772Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-25T14:59:44.925809Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-25T14:59:44.925964Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-25T14:59:44.926001Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-25T14:59:44.926031Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-25T14:59:44.926148Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-25T14:59:44.926188Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-25T14:59:44.926213Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-25T14:59:44.926282Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-25T14:59:44.926332Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-25T14:59:44.926366Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-25T14:59:44.926389Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-25T14:59:44.926573Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=42; 2025-06-25T14:59:44.926673Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=56; 2025-06-25T14:59:44.926744Z node 1 :TX_COLUMNSHA ... s;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;load_stage_name=EXECUTE:granule/portions;fline=constructor_meta.cpp:71;memory_size=16934;data_size=16908;sum=336728;count=40;size_of_meta=136; 2025-06-25T15:00:25.563679Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;load_stage_name=EXECUTE:granule/portions;fline=constructor_portion.cpp:40;memory_size=17006;data_size=16980;sum=338168;count=20;size_of_portion=208; 2025-06-25T15:00:25.564134Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;EXECUTE:portionsLoadingTime=2638; 2025-06-25T15:00:25.564188Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;PRECHARGE:granule_finished_commonLoadingTime=8; 2025-06-25T15:00:25.564717Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;EXECUTE:granule_finished_commonLoadingTime=473; 2025-06-25T15:00:25.564757Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;fline=common_data.cpp:29;EXECUTE:granuleLoadingTime=3364; 2025-06-25T15:00:25.564788Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:granulesLoadingTime=3466; 2025-06-25T15:00:25.564836Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;PRECHARGE:finishLoadingTime=9; 2025-06-25T15:00:25.564930Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:finishLoadingTime=58; 2025-06-25T15:00:25.564959Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:column_enginesLoadingTime=3945; 2025-06-25T15:00:25.565059Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tx_controllerLoadingTime=60; 2025-06-25T15:00:25.565148Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tx_controllerLoadingTime=53; 2025-06-25T15:00:25.565243Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:operations_managerLoadingTime=58; 2025-06-25T15:00:25.565333Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:operations_managerLoadingTime=52; 2025-06-25T15:00:25.567409Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:storages_managerLoadingTime=2029; 2025-06-25T15:00:25.569029Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:storages_managerLoadingTime=1565; 2025-06-25T15:00:25.569079Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:db_locksLoadingTime=7; 2025-06-25T15:00:25.569115Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:db_locksLoadingTime=7; 2025-06-25T15:00:25.569146Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:bg_sessionsLoadingTime=5; 2025-06-25T15:00:25.569203Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:bg_sessionsLoadingTime=29; 2025-06-25T15:00:25.569237Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:sharing_sessionsLoadingTime=4; 2025-06-25T15:00:25.569301Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:sharing_sessionsLoadingTime=38; 2025-06-25T15:00:25.569333Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:in_flight_readsLoadingTime=5; 2025-06-25T15:00:25.569383Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:in_flight_readsLoadingTime=23; 2025-06-25T15:00:25.569443Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tiers_managerLoadingTime=31; 2025-06-25T15:00:25.569610Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tiers_managerLoadingTime=136; 2025-06-25T15:00:25.569640Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;fline=common_data.cpp:29;EXECUTE:composite_initLoadingTime=14563; 2025-06-25T15:00:25.569738Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Index: tables 1 inserted {blob_bytes=9739224;raw_bytes=13544452;count=2;records=160000} compacted {blob_bytes=0;raw_bytes=0;count=0;records=0} s-compacted {blob_bytes=0;raw_bytes=0;count=0;records=0} inactive {blob_bytes=0;raw_bytes=0;count=0;records=0} evicted {blob_bytes=0;raw_bytes=0;count=0;records=0} at tablet 9437184 2025-06-25T15:00:25.569822Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1283:3142];process=SwitchToWork;fline=columnshard.cpp:74;event=initialize_shard;step=SwitchToWork; 2025-06-25T15:00:25.569861Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1283:3142];process=SwitchToWork;fline=columnshard.cpp:77;event=initialize_shard;step=SignalTabletActive; 2025-06-25T15:00:25.569912Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1283:3142];process=SwitchToWork;fline=columnshard_impl.cpp:1331;event=OnTieringModified;path_id=NO_VALUE_OPTIONAL; 2025-06-25T15:00:25.577695Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1283:3142];process=SwitchToWork;fline=column_engine_logs.cpp:471;event=OnTieringModified;new_count_tierings=1; 2025-06-25T15:00:25.577824Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;fline=columnshard_impl.cpp:442;event=EnqueueBackgroundActivities;periodic=0; 2025-06-25T15:00:25.577897Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=0; 2025-06-25T15:00:25.577948Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=0;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-25T15:00:25.577983Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;fline=columnshard_impl.cpp:791;background=cleanup;skip_reason=no_changes; 2025-06-25T15:00:25.578010Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;fline=columnshard_impl.cpp:820;background=cleanup;skip_reason=no_changes; 2025-06-25T15:00:25.578074Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;fline=columnshard_impl.cpp:749;background=ttl;skip_reason=no_changes; 2025-06-25T15:00:25.579685Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1283:3142];ev=NActors::TEvents::TEvWakeup;fline=columnshard.cpp:250;event=TEvPrivate::TEvPeriodicWakeup::MANUAL;tablet_id=9437184; 2025-06-25T15:00:25.579784Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1283:3142];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;fline=columnshard.cpp:239;event=TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184; 2025-06-25T15:00:25.579809Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Send periodic stats. 2025-06-25T15:00:25.579831Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Disabled periodic stats at tablet 9437184 2025-06-25T15:00:25.579863Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1283:3142];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:442;event=EnqueueBackgroundActivities;periodic=0; 2025-06-25T15:00:25.579927Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1283:3142];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=0; 2025-06-25T15:00:25.579973Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1283:3142];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=0;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-25T15:00:25.580008Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1283:3142];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:791;background=cleanup;skip_reason=no_changes; 2025-06-25T15:00:25.580037Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1283:3142];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:820;background=cleanup;skip_reason=no_changes; 2025-06-25T15:00:25.580102Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1283:3142];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:749;background=ttl;skip_reason=no_changes; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/cold' stopped at tablet 9437184 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/cold' stopped at tablet 9437184 160000/9739224 160000/9739224 160000/9739224 160000/9739224 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest >> TColumnShardTestSchema::TTL-Reboot+Internal-FirstPkColumn [GOOD] Test command err: Running TestTtl ttlColumnType=Timestamp 2025-06-25T14:59:49.609469Z node 1 :TX_COLUMNSHARD TRACE: columnshard_impl.h:381: StateInit, received event# 268828672, Sender [1:106:2138], Recipient [1:128:2158]: NKikimr::TEvTablet::TEvBoot 2025-06-25T14:59:49.613916Z node 1 :TX_COLUMNSHARD TRACE: columnshard_impl.h:381: StateInit, received event# 268828673, Sender [1:106:2138], Recipient [1:128:2158]: NKikimr::TEvTablet::TEvRestored 2025-06-25T14:59:49.614332Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:99;event=initialize_shard;step=OnActivateExecutor; 2025-06-25T14:59:49.647799Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:117;event=initialize_shard;step=initialize_tiring_finished; 2025-06-25T14:59:49.648059Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-25T14:59:49.655177Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:59:49.655403Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:59:49.655615Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:59:49.655754Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:59:49.655896Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:59:49.656006Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:59:49.656138Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:59:49.656247Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:59:49.656376Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:59:49.656507Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:59:49.656614Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:59:49.659398Z node 1 :TX_COLUMNSHARD TRACE: columnshard_impl.h:381: StateInit, received event# 268828684, Sender [1:106:2138], Recipient [1:128:2158]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-25T14:59:49.684383Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-25T14:59:49.684549Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-25T14:59:49.684618Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-25T14:59:49.684807Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T14:59:49.684982Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-25T14:59:49.685065Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-25T14:59:49.685109Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-25T14:59:49.685192Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-25T14:59:49.685250Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-25T14:59:49.685303Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-25T14:59:49.685347Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-25T14:59:49.685542Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T14:59:49.685610Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-25T14:59:49.685670Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-25T14:59:49.685713Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-25T14:59:49.685812Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-25T14:59:49.685867Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-25T14:59:49.685955Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-25T14:59:49.685986Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-25T14:59:49.686038Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-25T14:59:49.686072Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-25T14:59:49.686108Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-25T14:59:49.686309Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-25T14:59:49.686351Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-25T14:59:49.686383Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-25T14:59:49.686566Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-25T14:59:49.686626Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-25T14:59:49.686665Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-25T14:59:49.686790Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-25T14:59:49.686833Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-25T14:59:49.686866Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-25T14:59:49.686933Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-25T14:59:49.687019Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-25T14:59:49.687060Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-25T14:59:49.687089Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-25T14:59:49.687320Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=48; 2025-06-25T14:59:49.687407Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=40; 2025-06-25T14:59:49.687516Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=65; 2025-06-25T14:59:49.687609Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=49; 2025-06-25T14:59:49.687697Z node 1 :T ... canGen=0;task_identifier=;fline=scanner.cpp:21;event=interval_result_received;interval_idx=0;intervalId=295; 2025-06-25T15:00:25.463929Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:683:2686];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=scanner.cpp:47;event=interval_result;interval_idx=0;count=1000;merger=0;interval_id=295; 2025-06-25T15:00:25.463975Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:683:2686];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=scanner.cpp:65;event=intervals_finished; 2025-06-25T15:00:25.464043Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:683:2686];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=9;column_names=saved_at;);;program_input=(column_ids=9;column_names=saved_at;);;;); 2025-06-25T15:00:25.464066Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:683:2686];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=1;count=1000;finished=1; 2025-06-25T15:00:25.464095Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:683:2686];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:203;stage=limit exhausted;limit=limits:(bytes=0;chunks=0);; 2025-06-25T15:00:25.464428Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:683:2686];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-25T15:00:25.464585Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:683:2686];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:1;records_count:1000;schema=saved_at: uint64;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=9;column_names=saved_at;);;program_input=(column_ids=9;column_names=saved_at;);;;); 2025-06-25T15:00:25.464624Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:683:2686];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=0;count=0;finished=1; 2025-06-25T15:00:25.464738Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:683:2686];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:234;stage=ready result;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=9;column_names=saved_at;);;program_input=(column_ids=9;column_names=saved_at;);;;);columns=1;rows=1000; 2025-06-25T15:00:25.464792Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:683:2686];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:254;stage=data_format;batch_size=8000;num_rows=1000;batch_columns=saved_at; 2025-06-25T15:00:25.465033Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:683:2686];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:370;event=send_data;compute_actor_id=[5:682:2685];bytes=8000;rows=1000;faults=0;finished=0;fault=0;schema=saved_at: uint64; 2025-06-25T15:00:25.465191Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:683:2686];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:274;stage=finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=9;column_names=saved_at;);;program_input=(column_ids=9;column_names=saved_at;);;;); 2025-06-25T15:00:25.465311Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:683:2686];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=9;column_names=saved_at;);;program_input=(column_ids=9;column_names=saved_at;);;;); 2025-06-25T15:00:25.465440Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:683:2686];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=9;column_names=saved_at;);;program_input=(column_ids=9;column_names=saved_at;);;;); 2025-06-25T15:00:25.465618Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:683:2686];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-25T15:00:25.465745Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:683:2686];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=9;column_names=saved_at;);;program_input=(column_ids=9;column_names=saved_at;);;;); 2025-06-25T15:00:25.465848Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:683:2686];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=9;column_names=saved_at;);;program_input=(column_ids=9;column_names=saved_at;);;;); 2025-06-25T15:00:25.465894Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: actor.cpp:414: Scan [5:683:2686] finished for tablet 9437184 2025-06-25T15:00:25.466324Z node 5 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[5:683:2686];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:420;event=scan_finish;compute_actor_id=[5:682:2685];stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.001},{"events":["l_bootstrap","f_processing","f_task_result"],"t":0.002},{"events":["f_ack","l_task_result"],"t":0.012},{"events":["l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.014}],"full":{"a":1750863625451483,"name":"_full_task","f":1750863625451483,"d_finished":0,"c":0,"l":1750863625465943,"d":14460},"events":[{"name":"bootstrap","f":1750863625451708,"d_finished":2648,"c":1,"l":1750863625454356,"d":2648},{"a":1750863625465595,"name":"ack","f":1750863625464402,"d_finished":1063,"c":1,"l":1750863625465465,"d":1411},{"a":1750863625465581,"name":"processing","f":1750863625454444,"d_finished":5891,"c":9,"l":1750863625465467,"d":6253},{"name":"ProduceResults","f":1750863625453257,"d_finished":2496,"c":12,"l":1750863625465879,"d":2496},{"a":1750863625465883,"name":"Finish","f":1750863625465883,"d_finished":0,"c":0,"l":1750863625465943,"d":60},{"name":"task_result","f":1750863625454471,"d_finished":4678,"c":8,"l":1750863625464176,"d":4678}],"id":"9437184::35"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=9;column_names=saved_at;);;program_input=(column_ids=9;column_names=saved_at;);;;); 2025-06-25T15:00:25.466399Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:683:2686];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:370;event=send_data;compute_actor_id=[5:682:2685];bytes=0;rows=0;faults=0;finished=1;fault=0;schema=; 2025-06-25T15:00:25.466759Z node 5 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[5:683:2686];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:375;event=scan_finished;compute_actor_id=[5:682:2685];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.001},{"events":["l_bootstrap","f_processing","f_task_result"],"t":0.002},{"events":["f_ack","l_task_result"],"t":0.012},{"events":["l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.014}],"full":{"a":1750863625451483,"name":"_full_task","f":1750863625451483,"d_finished":0,"c":0,"l":1750863625466439,"d":14956},"events":[{"name":"bootstrap","f":1750863625451708,"d_finished":2648,"c":1,"l":1750863625454356,"d":2648},{"a":1750863625465595,"name":"ack","f":1750863625464402,"d_finished":1063,"c":1,"l":1750863625465465,"d":1907},{"a":1750863625465581,"name":"processing","f":1750863625454444,"d_finished":5891,"c":9,"l":1750863625465467,"d":6749},{"name":"ProduceResults","f":1750863625453257,"d_finished":2496,"c":12,"l":1750863625465879,"d":2496},{"a":1750863625465883,"name":"Finish","f":1750863625465883,"d_finished":0,"c":0,"l":1750863625466439,"d":556},{"name":"task_result","f":1750863625454471,"d_finished":4678,"c":8,"l":1750863625464176,"d":4678}],"id":"9437184::35"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=9;column_names=saved_at;);;program_input=(column_ids=9;column_names=saved_at;);;;); 2025-06-25T15:00:25.466822Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:683:2686];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-06-25T15:00:25.448473Z;index_granules=0;index_portions=1;index_batches=10;schema_columns=1;filter_columns=0;additional_columns=0;compacted_portions_bytes=0;inserted_portions_bytes=59288;committed_portions_bytes=0;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=59288;selected_rows=0; 2025-06-25T15:00:25.466866Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:683:2686];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=read_context.h:192;event=scan_aborted;reason=unexpected on destructor; 2025-06-25T15:00:25.467189Z node 5 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[5:683:2686];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=9;column_names=saved_at;);;program_input=(column_ids=9;column_names=saved_at;);;; >> BackupPathTest::ImportFilterByPrefix [GOOD] >> TColumnShardTestSchema::ExportAfterFail [GOOD] >> TNodeBrokerTest::UpdateNodesLog >> TNodeBrokerTest::NodesMigrationManyNodesInterrupted >> TTenantPoolTests::TestSensorsConfigForStaticSlot ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest >> TAsyncIndexTests::MergeBothWithReboots[TabletReboots] [GOOD] Test command err: =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:119:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:115:2144] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:120:2058] recipient: [1:115:2144] Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:130:2058] recipient: [1:112:2142] Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:133:2058] recipient: [1:113:2143] Leader for TabletID 72057594046316545 is [1:136:2157] sender: [1:138:2058] recipient: [1:115:2144] 2025-06-25T14:51:16.588939Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T14:51:16.589026Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:16.589067Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-25T14:51:16.589101Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T14:51:16.589147Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T14:51:16.589168Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T14:51:16.589214Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T14:51:16.589278Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T14:51:16.589914Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T14:51:16.590228Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T14:51:16.656842Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7732: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-25T14:51:16.656889Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:51:16.657570Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:178:2058] recipient: [1:15:2062] 2025-06-25T14:51:16.668760Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T14:51:16.671875Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T14:51:16.672041Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T14:51:16.678818Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T14:51:16.678969Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T14:51:16.679553Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:16.679806Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T14:51:16.682436Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:16.682617Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T14:51:16.683671Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T14:51:16.683746Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T14:51:16.683846Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T14:51:16.683890Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T14:51:16.683926Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T14:51:16.684050Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:217:2058] recipient: [1:215:2214] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:217:2058] recipient: [1:215:2214] Leader for TabletID 72057594037968897 is [1:221:2218] sender: [1:222:2058] recipient: [1:215:2214] 2025-06-25T14:51:16.690212Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:128:2152] sender: [1:242:2058] recipient: [1:15:2062] 2025-06-25T14:51:16.799804Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T14:51:16.800004Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:16.800193Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T14:51:16.800233Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T14:51:16.800453Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T14:51:16.800531Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:51:16.802719Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:16.802892Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T14:51:16.803063Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:16.803132Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T14:51:16.803175Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T14:51:16.803203Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T14:51:16.804959Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:16.805008Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T14:51:16.805044Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T14:51:16.806526Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:16.806572Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T14:51:16.806625Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T14:51:16.806667Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T14:51:16.809974Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T14:51:16.811622Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T14:51:16.811781Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:136:2157] sender: [1:257:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T14:51:16.812722Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T14:51:16.812848Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969453 } } Step: 5000001 Media ... criptions { PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 1 } } } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409550 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T15:00:25.534653Z node 202 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:78: [TableChangeSenderShard][72075186233409550:2][72075186233409551][202:1108:2883] Handshake NKikimrChangeExchange.TEvStatus Status: STATUS_OK LastRecordOrder: 0 2025-06-25T15:00:25.534781Z node 202 :CHANGE_EXCHANGE DEBUG: change_sender_async_index.cpp:239: [AsyncIndexChangeSenderMain][72075186233409550:2][202:1067:2883] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186233409551 } 2025-06-25T15:00:25.534955Z node 202 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:123: [TableChangeSenderShard][72075186233409550:2][72075186233409551][202:1108:2883] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 1 Group: 1750863625509415 Step: 5000003 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046678944, LocalPathId: 4] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046678944, LocalPathId: 3] SchemaVersion: 1 LockId: 0 LockOffset: 0 },{ Order: 2 Group: 1750863625509415 Step: 5000003 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046678944, LocalPathId: 4] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046678944, LocalPathId: 3] SchemaVersion: 1 LockId: 0 LockOffset: 0 },{ Order: 3 Group: 1750863625509415 Step: 5000003 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046678944, LocalPathId: 4] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046678944, LocalPathId: 3] SchemaVersion: 1 LockId: 0 LockOffset: 0 }] } 2025-06-25T15:00:25.537860Z node 202 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:200: [TableChangeSenderShard][72075186233409550:2][72075186233409551][202:1108:2883] Handle NKikimrChangeExchange.TEvStatus Status: STATUS_OK RecordStatuses { Order: 1 Status: STATUS_OK Reason: REASON_NONE } RecordStatuses { Order: 2 Status: STATUS_OK Reason: REASON_NONE } RecordStatuses { Order: 3 Status: STATUS_OK Reason: REASON_NONE } LastRecordOrder: 3 2025-06-25T15:00:25.537973Z node 202 :CHANGE_EXCHANGE DEBUG: change_sender_async_index.cpp:239: [AsyncIndexChangeSenderMain][72075186233409550:2][202:1067:2883] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186233409551 } 2025-06-25T15:00:25.719512Z node 202 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/UserDefinedIndex/indexImplTable" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-25T15:00:25.719843Z node 202 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/UserDefinedIndex/indexImplTable" took 371us result status StatusSuccess 2025-06-25T15:00:25.720765Z node 202 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table/UserDefinedIndex/indexImplTable" PathDescription { Self { Name: "indexImplTable" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1002 CreateStep: 5000003 ParentPathId: 4 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeAsyncIndexImplTable Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 2 } ChildrenExist: false } Table { Name: "indexImplTable" Columns { Name: "indexed" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "indexed" KeyColumnNames: "key" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 1 } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409551 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest >> TColumnShardTestSchema::ExportAfterFail [GOOD] Test command err: FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; WaitEmptyAfter=0;Tiers=;TTL={Column=timestamp;EvictAfter=0.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=150864188.000000s;Name=cold;Codec=};};TTL={Column=timestamp;EvictAfter=0.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=130864188.000000s;Name=cold;Codec=};};TTL={Column=timestamp;EvictAfter=0.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=130862988.000000s;Name=cold;Codec=};};TTL={Column=timestamp;EvictAfter=0.000000s;Name=;Codec=}; 2025-06-25T14:59:50.465452Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:99;event=initialize_shard;step=OnActivateExecutor; 2025-06-25T14:59:50.495968Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:117;event=initialize_shard;step=initialize_tiring_finished; 2025-06-25T14:59:50.496243Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-25T14:59:50.503643Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:59:50.503879Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:59:50.504130Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:59:50.504253Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:59:50.504380Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:59:50.504485Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:59:50.504599Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:59:50.504716Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:59:50.504817Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:59:50.504921Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:59:50.505055Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:59:50.532491Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-25T14:59:50.532640Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-25T14:59:50.532690Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-25T14:59:50.532887Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T14:59:50.533056Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-25T14:59:50.533145Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-25T14:59:50.533197Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-25T14:59:50.533286Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-25T14:59:50.533348Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-25T14:59:50.533389Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-25T14:59:50.533428Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-25T14:59:50.533613Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T14:59:50.533691Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-25T14:59:50.533742Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-25T14:59:50.533773Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-25T14:59:50.533878Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-25T14:59:50.533935Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-25T14:59:50.533976Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-25T14:59:50.534005Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-25T14:59:50.534054Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-25T14:59:50.534092Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-25T14:59:50.534129Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-25T14:59:50.534355Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-25T14:59:50.534403Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-25T14:59:50.534439Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-25T14:59:50.534631Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-25T14:59:50.534677Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-25T14:59:50.534712Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-25T14:59:50.534850Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-25T14:59:50.534901Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-25T14:59:50.534932Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-25T14:59:50.535013Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-25T14:59:50.535074Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-25T14:59:50.535115Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-25T14:59:50.535147Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-25T14:59:50.535387Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=65; 2025-06-25T14:59:50.535496Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=55; 2025-06-25T14:59:50.535626Z node 1 :TX_COLUMNSHA ... ce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:1;records_count:80000;schema=timestamp: timestamp[us];);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:00:26.882389Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:826:2795];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=0;count=0;finished=1; 2025-06-25T15:00:26.882432Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:826:2795];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:203;stage=limit exhausted;limit=limits:(bytes=0;chunks=0);; 2025-06-25T15:00:26.882555Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:826:2795];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-25T15:00:26.882650Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:826:2795];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:1;records_count:80000;schema=timestamp: timestamp[us];);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:00:26.882683Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:826:2795];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=0;count=0;finished=1; 2025-06-25T15:00:26.882759Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:826:2795];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:234;stage=ready result;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;);columns=1;rows=80000; 2025-06-25T15:00:26.882805Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:826:2795];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:254;stage=data_format;batch_size=640000;num_rows=80000;batch_columns=timestamp; 2025-06-25T15:00:26.883005Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:826:2795];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:825:2794];bytes=640000;rows=80000;faults=0;finished=0;fault=0;schema=timestamp: timestamp[us]; Got TEvKqpCompute::TEvScanData [1:826:2795]->[1:825:2794] 2025-06-25T15:00:26.883098Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:826:2795];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:274;stage=finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:00:26.883184Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:826:2795];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:00:26.883259Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:826:2795];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:00:26.883361Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:826:2795];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-25T15:00:26.883424Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:826:2795];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:00:26.883480Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:826:2795];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:00:26.883508Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: actor.cpp:414: Scan [1:826:2795] finished for tablet 9437184 2025-06-25T15:00:26.883898Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=3;SelfId=[1:826:2795];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:420;event=scan_finish;compute_actor_id=[1:825:2794];stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.001},{"events":["l_bootstrap"],"t":0.003},{"events":["f_processing","f_task_result"],"t":0.004},{"events":["f_ack","l_task_result"],"t":0.446},{"events":["l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.448}],"full":{"a":1750863626435368,"name":"_full_task","f":1750863626435368,"d_finished":0,"c":0,"l":1750863626883549,"d":448181},"events":[{"name":"bootstrap","f":1750863626435525,"d_finished":3493,"c":1,"l":1750863626439018,"d":3493},{"a":1750863626883346,"name":"ack","f":1750863626881589,"d_finished":1603,"c":2,"l":1750863626883279,"d":1806},{"a":1750863626883336,"name":"processing","f":1750863626440172,"d_finished":298820,"c":16,"l":1750863626883281,"d":299033},{"name":"ProduceResults","f":1750863626437186,"d_finished":4053,"c":20,"l":1750863626883495,"d":4053},{"a":1750863626883497,"name":"Finish","f":1750863626883497,"d_finished":0,"c":0,"l":1750863626883549,"d":52},{"name":"task_result","f":1750863626440199,"d_finished":296871,"c":14,"l":1750863626881420,"d":296871}],"id":"9437184::7"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:00:26.883952Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:826:2795];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:825:2794];bytes=0;rows=0;faults=0;finished=1;fault=0;schema=; 2025-06-25T15:00:26.884293Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=3;SelfId=[1:826:2795];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:375;event=scan_finished;compute_actor_id=[1:825:2794];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.001},{"events":["l_bootstrap"],"t":0.003},{"events":["f_processing","f_task_result"],"t":0.004},{"events":["f_ack","l_task_result"],"t":0.446},{"events":["l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.448}],"full":{"a":1750863626435368,"name":"_full_task","f":1750863626435368,"d_finished":0,"c":0,"l":1750863626883982,"d":448614},"events":[{"name":"bootstrap","f":1750863626435525,"d_finished":3493,"c":1,"l":1750863626439018,"d":3493},{"a":1750863626883346,"name":"ack","f":1750863626881589,"d_finished":1603,"c":2,"l":1750863626883279,"d":2239},{"a":1750863626883336,"name":"processing","f":1750863626440172,"d_finished":298820,"c":16,"l":1750863626883281,"d":299466},{"name":"ProduceResults","f":1750863626437186,"d_finished":4053,"c":20,"l":1750863626883495,"d":4053},{"a":1750863626883497,"name":"Finish","f":1750863626883497,"d_finished":0,"c":0,"l":1750863626883982,"d":485},{"name":"task_result","f":1750863626440199,"d_finished":296871,"c":14,"l":1750863626881420,"d":296871}],"id":"9437184::7"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); Got TEvKqpCompute::TEvScanData [1:826:2795]->[1:825:2794] 2025-06-25T15:00:26.884370Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:826:2795];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-06-25T15:00:26.434955Z;index_granules=0;index_portions=2;index_batches=1038;schema_columns=1;filter_columns=0;additional_columns=0;compacted_portions_bytes=0;inserted_portions_bytes=9739224;committed_portions_bytes=0;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=9739224;selected_rows=0; 2025-06-25T15:00:26.884405Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:826:2795];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=read_context.h:192;event=scan_aborted;reason=unexpected on destructor; 2025-06-25T15:00:26.884625Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=3;SelfId=[1:826:2795];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/cold' stopped at tablet 9437184 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/cold' stopped at tablet 9437184 160000/9739224 160000/9739224 160000/9739224 160000/9739224 >> BackupRestore::TestAllPrimitiveTypes-TIMESTAMP [GOOD] >> BackupRestore::TestAllPrimitiveTypes-INTERVAL >> TNodeBrokerTest::NodesMigration1000Nodes >> TNodeBrokerTest::RegistrationPipeliningNodeName [GOOD] >> TNodeBrokerTest::RegistrationPipelining [GOOD] >> TNodeBrokerTest::NodeNameWithDifferentTenants [GOOD] >> TNodeBrokerTest::NodesMigrationExpiredChanged >> BackupRestore::TestAllPrimitiveTypes-UUID [GOOD] >> BackupPathTest::ImportFilterByYdbObjectPath >> TNodeBrokerTest::NodesMigrationExtendLeaseThenExpire [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::RegistrationPipeliningNodeName [GOOD] Test command err: 2025-06-25T15:00:26.107643Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:00:26.107708Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) 2025-06-25T15:00:26.407364Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 101:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) FAKE_COORDINATOR: Add transaction: 101 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 101 at step: 5000001 FAKE_COORDINATOR: Erasing txId 101 2025-06-25T15:00:26.428536Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 102:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) FAKE_COORDINATOR: Add transaction: 102 at step: 5000002 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000001 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 102 at step: 5000002 FAKE_COORDINATOR: Erasing txId 102 ... waiting for commit ... blocking NKikimr::TEvTablet::TEvCommit from FLAT_EXECUTOR to TABLET_ACTOR cookie 1 ... waiting for commit (done) ... unblocking NKikimr::TEvTablet::TEvCommit from FLAT_EXECUTOR to TABLET_ACTOR ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::RegistrationPipelining [GOOD] Test command err: 2025-06-25T15:00:26.108020Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:00:26.108087Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) ... waiting for commit ... blocking NKikimr::TEvTablet::TEvCommit from FLAT_EXECUTOR to TABLET_ACTOR cookie 1 ... waiting for commit (done) ... unblocking NKikimr::TEvTablet::TEvCommit from FLAT_EXECUTOR to TABLET_ACTOR ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::NodeNameWithDifferentTenants [GOOD] Test command err: 2025-06-25T15:00:26.110236Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:00:26.110293Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) 2025-06-25T15:00:26.407202Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 101:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) FAKE_COORDINATOR: Add transaction: 101 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 101 at step: 5000001 FAKE_COORDINATOR: Erasing txId 101 2025-06-25T15:00:26.433613Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 102:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) FAKE_COORDINATOR: Add transaction: 102 at step: 5000002 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000001 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 102 at step: 5000002 FAKE_COORDINATOR: Erasing txId 102 >> EncryptedExportTest::ChangefeedEncryption [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::NodesMigrationExtendLeaseThenExpire [GOOD] Test command err: 2025-06-25T15:00:26.107792Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:00:26.107841Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) >> TNodeBrokerTest::NodesMigrationRemovedChanged >> TTenantPoolTests::TestSensorsConfigForStaticSlot [GOOD] >> TNodeBrokerTest::ResolveScopeIdForServerless ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/backup_ut/unittest >> BackupRestore::TestAllPrimitiveTypes-UUID [GOOD] Test command err: 2025-06-25T14:59:27.311177Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901760191846397:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:59:27.311268Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0014a3/r3tmp/tmpmeonW6/pdisk_1.dat 2025-06-25T14:59:27.716463Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:59:27.749275Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:59:27.749349Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:59:27.751772Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 64951, node 1 2025-06-25T14:59:27.857369Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:59:27.857397Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:59:27.857410Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:59:27.857552Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17223 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:59:28.240476Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:59:28.322747Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Backup "/Root" to "/home/runner/.ya/build/build_root/yft8/0014a3/r3tmp/tmpNJQFvI/"Create temporary directory "/Root/~backup_20250625T145928" in databaseProcess "/home/runner/.ya/build/build_root/yft8/0014a3/r3tmp/tmpNJQFvI/dir"Create directory "/Root/~backup_20250625T145928/dir" in databaseWrite ACL into "/home/runner/.ya/build/build_root/yft8/0014a3/r3tmp/tmpNJQFvI/dir/permissions.pb"Remove directory "/Root/~backup_20250625T145928/dir"2025-06-25T14:59:28.502231Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpRmDir, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_rmdir.cpp:66) Remove temporary directory "/Root/~backup_20250625T145928" in database2025-06-25T14:59:28.544825Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpRmDir, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_rmdir.cpp:66) Backup completed successfully2025-06-25T14:59:28.561897Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpRmDir, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_rmdir.cpp:66) Restore "/home/runner/.ya/build/build_root/yft8/0014a3/r3tmp/tmpNJQFvI/" to "/Root"Resolved db base path: "/Root"List of entries in the backup: [{"type":"Directory","path":"/home/runner/.ya/build/build_root/yft8/0014a3/r3tmp/tmpNJQFvI/"},{"type":"Directory","path":"/home/runner/.ya/build/build_root/yft8/0014a3/r3tmp/tmpNJQFvI/dir"}]Process "/home/runner/.ya/build/build_root/yft8/0014a3/r3tmp/tmpNJQFvI/dir"Restore empty directory "/home/runner/.ya/build/build_root/yft8/0014a3/r3tmp/tmpNJQFvI/dir" to "/Root/dir"Restore ACL "/home/runner/.ya/build/build_root/yft8/0014a3/r3tmp/tmpNJQFvI/dir" to "/Root/dir"Read ACL from "/home/runner/.ya/build/build_root/yft8/0014a3/r3tmp/tmpNJQFvI/dir/permissions.pb"2025-06-25T14:59:28.642160Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) Restore completed successfully 2025-06-25T14:59:30.772322Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519901775800603123:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:59:30.772395Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0014a3/r3tmp/tmpIDvmrK/pdisk_1.dat 2025-06-25T14:59:30.876529Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:59:30.896439Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:59:30.896514Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:59:30.899108Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 11895, node 4 2025-06-25T14:59:30.939136Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:59:30.939158Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:59:30.939164Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:59:30.939305Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28619 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:59:31.158491Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:59:31.785161Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:59:33.676027Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519901788685505977:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:59:33.676136Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:59:33.867736Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:59:33.982443Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519901788685506157:2308], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:59:33.982561Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:59:34.110062Z node 4 :CHANGE_EXCHANGE WARN: change_sender_cdc_stream.cpp:398: [CdcChangeSenderMain][72075186224037888:1][4:7519901792980473634:2320] Failed entry at 'ResolveTopic': entry# { Path: TableId: [72057594046644480:4:0] RequestType: ByTableId Operation: OpTopic RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo } 2025-06-25T14:59:34.155883Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:75199 ... ACL "/home/runner/.ya/build/build_root/yft8/0014a3/r3tmp/tmpNN0D7o/YsonTable" to "/Root/YsonTable"Read ACL from "/home/runner/.ya/build/build_root/yft8/0014a3/r3tmp/tmpNN0D7o/YsonTable/permissions.pb"2025-06-25T15:00:21.168913Z node 28 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) Restore completed successfully2025-06-25T15:00:21.299676Z node 28 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715675. Ctx: { TraceId: 01jyksnb1w4vrgafg8bwcw61pd, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=28&id=Yzk3ZmQ3NDQtNTE3YzBhMGYtZTE4ODQzYzQtMjNkNzdjZDk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:00:22.597682Z node 31 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[31:7519901997680557907:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:00:22.597735Z node 31 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0014a3/r3tmp/tmpr1Ci3F/pdisk_1.dat 2025-06-25T15:00:22.706503Z node 31 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:00:22.723762Z node 31 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(31, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:00:22.723841Z node 31 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(31, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:00:22.730017Z node 31 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(31, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 15702, node 31 2025-06-25T15:00:22.774352Z node 31 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:00:22.774370Z node 31 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:00:22.774377Z node 31 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:00:22.774518Z node 31 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11518 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:00:22.967524Z node 31 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:00:23.602955Z node 31 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:00:25.763973Z node 31 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [31:7519902010565460774:2299], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:00:25.763973Z node 31 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [31:7519902010565460784:2302], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:00:25.764072Z node 31 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:00:25.769769Z node 31 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:00:25.789483Z node 31 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [31:7519902010565460794:2303], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T15:00:25.846608Z node 31 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [31:7519902010565460874:2675] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:00:25.869469Z node 31 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:26.047425Z node 31 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jyksnfqt1jw370s12cnqyh6c, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=31&id=Nzc5OTExZTItYTU5ZjRmZDAtMTllZWI0NjMtNDc2ZWUwNGQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:00:26.184117Z node 31 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jyksnfta7jngnd229tj75nh6, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=31&id=Nzc5OTExZTItYTU5ZjRmZDAtMTllZWI0NjMtNDc2ZWUwNGQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root Backup "/Root" to "/home/runner/.ya/build/build_root/yft8/0014a3/r3tmp/tmpl4Cpf7/"Create temporary directory "/Root/~backup_20250625T150026" in databaseProcess "/home/runner/.ya/build/build_root/yft8/0014a3/r3tmp/tmpl4Cpf7/UuidTable"Copy tables: { src: "/Root/UuidTable", dst: "/Root/~backup_20250625T150026/UuidTable" }Describe table "/Root/UuidTable"Describe table "/Root/~backup_20250625T150026/UuidTable"Backup table "/Root/~backup_20250625T150026/UuidTable" to "/home/runner/.ya/build/build_root/yft8/0014a3/r3tmp/tmpl4Cpf7/UuidTable"Write scheme into "/home/runner/.ya/build/build_root/yft8/0014a3/r3tmp/tmpl4Cpf7/UuidTable/scheme.pb"Write ACL into "/home/runner/.ya/build/build_root/yft8/0014a3/r3tmp/tmpl4Cpf7/UuidTable/permissions.pb"Read table "/Root/~backup_20250625T150026/UuidTable"Write data into "/home/runner/.ya/build/build_root/yft8/0014a3/r3tmp/tmpl4Cpf7/UuidTable/data_00.csv"Drop table "/Root/~backup_20250625T150026/UuidTable"Remove temporary directory "/Root/~backup_20250625T150026" in database2025-06-25T15:00:26.529886Z node 31 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 31, TabletId: 72075186224037889 not found 2025-06-25T15:00:26.617752Z node 31 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpRmDir, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_rmdir.cpp:66) Backup completed successfullyRestore "/home/runner/.ya/build/build_root/yft8/0014a3/r3tmp/tmpl4Cpf7/" to "/Root"2025-06-25T15:00:26.699032Z node 31 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 31, TabletId: 72075186224037888 not found Resolved db base path: "/Root"List of entries in the backup: [{"type":"Directory","path":"/home/runner/.ya/build/build_root/yft8/0014a3/r3tmp/tmpl4Cpf7/"},{"type":"Table","path":"/home/runner/.ya/build/build_root/yft8/0014a3/r3tmp/tmpl4Cpf7/UuidTable"}]Process "/home/runner/.ya/build/build_root/yft8/0014a3/r3tmp/tmpl4Cpf7/UuidTable"Read scheme from "/home/runner/.ya/build/build_root/yft8/0014a3/r3tmp/tmpl4Cpf7/UuidTable/scheme.pb"Restore table "/home/runner/.ya/build/build_root/yft8/0014a3/r3tmp/tmpl4Cpf7/UuidTable" to "/Root/UuidTable"2025-06-25T15:00:26.765037Z node 31 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) Created "/Root/UuidTable"Read data from "/home/runner/.ya/build/build_root/yft8/0014a3/r3tmp/tmpl4Cpf7/UuidTable/data_00.csv"2025-06-25T15:00:26.880079Z node 31 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715671. Ctx: { TraceId: 01jyksngj7avf9v0yr4rm6p6ac, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=31&id=MTFlYTQwNzItNmM4ZDY2OGMtNDc1OTU2YWItOWU4OWFmMjI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root Restore ACL "/home/runner/.ya/build/build_root/yft8/0014a3/r3tmp/tmpl4Cpf7/UuidTable" to "/Root/UuidTable"Read ACL from "/home/runner/.ya/build/build_root/yft8/0014a3/r3tmp/tmpl4Cpf7/UuidTable/permissions.pb"2025-06-25T15:00:26.915843Z node 31 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) Restore completed successfully2025-06-25T15:00:27.091198Z node 31 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715673. Ctx: { TraceId: 01jyksngne37r26g2qg704zaqm, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=31&id=Nzc5OTExZTItYTU5ZjRmZDAtMTllZWI0NjMtNDc2ZWUwNGQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TTenantPoolTests::TestSensorsConfigForStaticSlot [GOOD] Test command err: 2025-06-25T15:00:27.837984Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:00:27.838047Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:00:27.891196Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) >> TDynamicNameserverTest::BasicFunctionality-EnableNodeBrokerDeltaProtocol-false >> TNodeBrokerTest::ExtendLeaseSetLocationInOneRegistration >> TNodeBrokerTest::ShiftIdRangeRemoveNew >> EncryptedExportTest::TopicEncryption >> TNodeBrokerTest::NodeNameReuseRestartWithHostChanges [GOOD] >> TNodeBrokerTest::ResolveScopeIdForServerless [GOOD] >> BasicUsage::TWriteSession_WriteAndReadAndCommitRandomMessagesNoClusterDiscovery [GOOD] >> BasicUsage::TWriteSession_WriteEncoded >> TNodeBrokerTest::ExtendLeasePipelining >> YdbTableBulkUpsert::AsyncIndexShouldFail [GOOD] >> YdbTableBulkUpsert::AsyncIndexShouldSucceed ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::NodeNameReuseRestartWithHostChanges [GOOD] Test command err: 2025-06-25T15:00:26.112102Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:00:26.112169Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) 2025-06-25T15:00:26.389605Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 101:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) FAKE_COORDINATOR: Add transaction: 101 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 101 at step: 5000001 FAKE_COORDINATOR: Erasing txId 101 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::ResolveScopeIdForServerless [GOOD] Test command err: 2025-06-25T15:00:29.206754Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:00:29.206807Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) 2025-06-25T15:00:29.334246Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 101:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) FAKE_COORDINATOR: Add transaction: 101 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 101 at step: 5000001 FAKE_COORDINATOR: Erasing txId 101 2025-06-25T15:00:29.376599Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 102:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) FAKE_COORDINATOR: Add transaction: 102 at step: 5000002 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000001 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 102 at step: 5000002 FAKE_COORDINATOR: Erasing txId 102 >> TDynamicNameserverTest::CacheMissNoDeadline-EnableNodeBrokerDeltaProtocol-false >> BackupRestore::RestoreViewToDifferentDatabase [GOOD] >> BackupRestore::RestoreViewDependentOnAnotherView >> TNodeBrokerTest::TestListNodesEpochDeltas [GOOD] >> TNodeBrokerTest::NodesMigrationExtendLeaseThenRemove >> PersQueueSdkReadSessionTest::ReadSessionWithAbort [GOOD] >> PersQueueSdkReadSessionTest::ReadSessionWithClose >> BackupRestoreS3::TestAllPrimitiveTypes-TIMESTAMP64 [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-INTERVAL64 >> TDynamicNameserverTest::CacheMissNoDeadline-EnableNodeBrokerDeltaProtocol-false [GOOD] >> TDynamicNameserverTest::CacheMissDifferentDeadlineInverseOrder-EnableNodeBrokerDeltaProtocol-true ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::TestListNodesEpochDeltas [GOOD] Test command err: 2025-06-25T15:00:27.207965Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:00:27.208027Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) >> TColumnShardTestSchema::OneColdTier [GOOD] >> TDynamicNameserverTest::TestCacheUsage >> TNodeBrokerTest::ExtendLeaseBumpVersion >> TNodeBrokerTest::TestListNodes [GOOD] >> TDynamicNameserverTest::CacheMissDifferentDeadlineInverseOrder-EnableNodeBrokerDeltaProtocol-true [GOOD] >> TNodeBrokerTest::NodesMigrationExpiredChanged [GOOD] >> TNodeBrokerTest::UpdateNodesLog [GOOD] >> TNodeBrokerTest::NodesMigrationManyNodesInterrupted [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest >> TColumnShardTestSchema::OneColdTier [GOOD] Test command err: FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; WaitEmptyAfter=0;Tiers=;TTL={Column=timestamp;EvictAfter=0.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=150864182.000000s;Name=cold;Codec=};};TTL={Column=timestamp;EvictAfter=150864182.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=130864182.000000s;Name=cold;Codec=};};TTL={Column=timestamp;EvictAfter=130864182.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=130862982.000000s;Name=cold;Codec=};};TTL={Column=timestamp;EvictAfter=130862982.000000s;Name=;Codec=}; 2025-06-25T14:59:43.991284Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:99;event=initialize_shard;step=OnActivateExecutor; 2025-06-25T14:59:44.023243Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:117;event=initialize_shard;step=initialize_tiring_finished; 2025-06-25T14:59:44.023530Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-25T14:59:44.031130Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:59:44.031423Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:59:44.031689Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:59:44.031833Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:59:44.031959Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:59:44.032102Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:59:44.032231Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:59:44.032369Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:59:44.032498Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:59:44.032630Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:59:44.032785Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:59:44.062682Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-25T14:59:44.062843Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-25T14:59:44.062913Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-25T14:59:44.063109Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T14:59:44.063286Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-25T14:59:44.063391Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-25T14:59:44.063437Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-25T14:59:44.063541Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-25T14:59:44.063607Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-25T14:59:44.063657Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-25T14:59:44.063715Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-25T14:59:44.063901Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T14:59:44.063989Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-25T14:59:44.064045Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-25T14:59:44.064081Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-25T14:59:44.064185Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-25T14:59:44.064243Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-25T14:59:44.064291Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-25T14:59:44.064350Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-25T14:59:44.064420Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-25T14:59:44.064464Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-25T14:59:44.064496Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-25T14:59:44.064706Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-25T14:59:44.064752Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-25T14:59:44.064803Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-25T14:59:44.065024Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-25T14:59:44.065079Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-25T14:59:44.065118Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-25T14:59:44.065272Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-25T14:59:44.065323Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-25T14:59:44.065355Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-25T14:59:44.065445Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-25T14:59:44.065537Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-25T14:59:44.065579Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-25T14:59:44.065610Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-25T14:59:44.065868Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=76; 2025-06-25T14:59:44.065978Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=54; 2025-06-25T14:59:44.0660 ... anData [1:869:2827]->[1:868:2826] 2025-06-25T15:00:30.984749Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:869:2827];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-06-25T15:00:30.786007Z;index_granules=0;index_portions=1;index_batches=522;schema_columns=1;filter_columns=0;additional_columns=0;compacted_portions_bytes=0;inserted_portions_bytes=0;committed_portions_bytes=4873744;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=4873744;selected_rows=0; 2025-06-25T15:00:30.984789Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:869:2827];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=read_context.h:192;event=scan_aborted;reason=unexpected on destructor; 2025-06-25T15:00:30.984963Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=3;SelfId=[1:869:2827];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;; 2025-06-25T15:00:30.985540Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Finished read cookie: 7 at tablet 9437184 2025-06-25T15:00:30.985725Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: EvScan txId: 18446744073709551615 scanId: 0 version: {1750863620139:max} readable: {1750863620139:max} at tablet 9437184 2025-06-25T15:00:30.985840Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TTxScan prepare txId: 18446744073709551615 scanId: 0 at tablet 9437184 2025-06-25T15:00:30.985965Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750863620139:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=program.cpp:33;event=parse_program;program=Command { Projection { Columns { Id: 1 } } } ; 2025-06-25T15:00:30.986009Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750863620139:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=program.cpp:102;parse_proto_program=Command { Projection { Columns { Id: 1 } } } ; 2025-06-25T15:00:30.986392Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750863620139:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=program.cpp:51;event=program_parsed;result={"edges":[{"owner_id":0,"inputs":[{"from":2}]},{"owner_id":1,"inputs":[{"from":3}]},{"owner_id":2,"inputs":[{"from":1}]},{"owner_id":3,"inputs":[]}],"nodes":{"1":{"p":{"i":"0","p":{"data":[{"name":"timestamp","id":1}]},"o":"1","t":"FetchOriginalData"},"w":2,"id":1},"3":{"p":{"p":{"data":[{"name":"timestamp","id":1}]},"o":"0","t":"ReserveMemory"},"w":0,"id":3},"2":{"p":{"i":"1","p":{"address":{"name":"timestamp","id":1}},"o":"1","t":"AssembleOriginalData"},"w":7,"id":2},"0":{"p":{"i":"1","t":"Projection"},"w":7,"id":0}}}; 2025-06-25T15:00:30.986459Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750863620139:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=read_metadata.h:142;filter_limit_not_detected= range{ from {+Inf} to {-Inf}}; 2025-06-25T15:00:30.986811Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750863620139:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=tx_scan.cpp:172;event=TTxScan started;actor_id=[1:876:2834];trace_detailed=; 2025-06-25T15:00:30.987121Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;fline=context.cpp:84;ff_first=(column_ids=1;column_names=timestamp;);; 2025-06-25T15:00:30.987284Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;fline=context.cpp:99;columns_context_info=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;; 2025-06-25T15:00:30.987412Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:00:30.987503Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:00:30.987668Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:876:2834];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-25T15:00:30.987748Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:876:2834];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:00:30.987830Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:876:2834];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:00:30.987876Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: actor.cpp:414: Scan [1:876:2834] finished for tablet 9437184 2025-06-25T15:00:30.988192Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=3;SelfId=[1:876:2834];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:420;event=scan_finish;compute_actor_id=[1:875:2833];stats={"p":[{"events":["f_bootstrap","l_bootstrap","f_ack","f_processing","f_ProduceResults"],"t":0},{"events":["l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.001}],"full":{"a":1750863630986759,"name":"_full_task","f":1750863630986759,"d_finished":0,"c":0,"l":1750863630987927,"d":1168},"events":[{"name":"bootstrap","f":1750863630986900,"d_finished":623,"c":1,"l":1750863630987523,"d":623},{"a":1750863630987646,"name":"ack","f":1750863630987646,"d_finished":0,"c":0,"l":1750863630987927,"d":281},{"a":1750863630987630,"name":"processing","f":1750863630987630,"d_finished":0,"c":0,"l":1750863630987927,"d":297},{"name":"ProduceResults","f":1750863630987347,"d_finished":340,"c":2,"l":1750863630987853,"d":340},{"a":1750863630987857,"name":"Finish","f":1750863630987857,"d_finished":0,"c":0,"l":1750863630987927,"d":70}],"id":"9437184::8"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:00:30.988246Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:876:2834];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:875:2833];bytes=0;rows=0;faults=0;finished=1;fault=0;schema=; 2025-06-25T15:00:30.988549Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=3;SelfId=[1:876:2834];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:375;event=scan_finished;compute_actor_id=[1:875:2833];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap","l_bootstrap","f_ack","f_processing","f_ProduceResults"],"t":0},{"events":["l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.001}],"full":{"a":1750863630986759,"name":"_full_task","f":1750863630986759,"d_finished":0,"c":0,"l":1750863630988287,"d":1528},"events":[{"name":"bootstrap","f":1750863630986900,"d_finished":623,"c":1,"l":1750863630987523,"d":623},{"a":1750863630987646,"name":"ack","f":1750863630987646,"d_finished":0,"c":0,"l":1750863630988287,"d":641},{"a":1750863630987630,"name":"processing","f":1750863630987630,"d_finished":0,"c":0,"l":1750863630988287,"d":657},{"name":"ProduceResults","f":1750863630987347,"d_finished":340,"c":2,"l":1750863630987853,"d":340},{"a":1750863630987857,"name":"Finish","f":1750863630987857,"d_finished":0,"c":0,"l":1750863630988287,"d":430}],"id":"9437184::8"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); Got TEvKqpCompute::TEvScanData [1:876:2834]->[1:875:2833] 2025-06-25T15:00:30.988617Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:876:2834];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-06-25T15:00:30.986438Z;index_granules=0;index_portions=0;index_batches=0;schema_columns=1;filter_columns=0;additional_columns=0;compacted_portions_bytes=0;inserted_portions_bytes=0;committed_portions_bytes=0;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=0;selected_rows=0; 2025-06-25T15:00:30.988648Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:876:2834];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=read_context.h:192;event=scan_aborted;reason=unexpected on destructor; 2025-06-25T15:00:30.988730Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=3;SelfId=[1:876:2834];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/cold' stopped at tablet 9437184 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/cold' stopped at tablet 9437184 160000/9739224 160000/9739224 80000/4873744 0/0 >> TNodeBrokerTest::NodesMigration1000Nodes [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::TestListNodes [GOOD] Test command err: 2025-06-25T15:00:26.107819Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:00:26.107873Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TDynamicNameserverTest::CacheMissDifferentDeadlineInverseOrder-EnableNodeBrokerDeltaProtocol-true [GOOD] Test command err: 2025-06-25T15:00:30.659390Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:00:30.659447Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) ... waiting for cache miss ... blocking NKikimr::NNodeBroker::TEvNodeBroker::TEvResolveNode from NAMESERVICE to NODE_BROKER_ACTOR cookie 0 ... blocking NKikimr::NNodeBroker::TEvNodeBroker::TEvResolveNode from NAMESERVICE to NODE_BROKER_ACTOR cookie 0 ... waiting for cache miss (done) ... unblocking NKikimr::NNodeBroker::TEvNodeBroker::TEvResolveNode from NAMESERVICE to NODE_BROKER_ACTOR ... unblocking NKikimr::NNodeBroker::TEvNodeBroker::TEvResolveNode from to NODE_BROKER_ACTOR 2025-06-25T15:00:31.256534Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:00:31.256580Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) ... blocking NKikimr::NNodeBroker::TEvNodeBroker::TEvUpdateNodes from NODE_BROKER_ACTOR to NAMESERVICE cookie 0 ... waiting for cache miss ... blocking NKikimr::NNodeBroker::TEvNodeBroker::TEvSyncNodesRequest from NAMESERVICE to NODE_BROKER_ACTOR cookie 1 ... waiting for cache miss (done) ... blocking NKikimr::NNodeBroker::TEvNodeBroker::TEvUpdateNodes from NODE_BROKER_ACTOR to NAMESERVICE cookie 0 >> TColumnShardTestSchema::ForgetWithLostAnswer [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::NodesMigrationExpiredChanged [GOOD] Test command err: 2025-06-25T15:00:28.578778Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:00:28.578839Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::UpdateNodesLog [GOOD] Test command err: 2025-06-25T15:00:27.923642Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:00:27.923684Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) ... blocking NKikimr::TEvTablet::TEvCommit from FLAT_EXECUTOR to TABLET_ACTOR cookie 2 ... unblocking NKikimr::TEvTablet::TEvCommit from FLAT_EXECUTOR to TABLET_ACTOR 2025-06-25T15:00:30.257802Z node 1 :NODE_BROKER ERROR: node_broker.cpp:1090: [DB] Removing node with wrong ID 1026 not in range (1023, 1024] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::NodesMigrationManyNodesInterrupted [GOOD] Test command err: 2025-06-25T15:00:27.941641Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:00:27.941681Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) ... waiting for first batch is committed ... blocking NKikimr::TEvTablet::TEvCommitResult from TABLET_ACTOR to FLAT_EXECUTOR cookie 2 ... blocking NKikimr::TEvTablet::TEvCommitResult from TABLET_ACTOR to FLAT_EXECUTOR cookie 1 ... waiting for first batch is committed (done) >> TNodeBrokerTest::NodesMigrationExtendLease ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::NodesMigration1000Nodes [GOOD] Test command err: 2025-06-25T15:00:28.405644Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:00:28.405688Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest >> TColumnShardTestSchema::ForgetWithLostAnswer [GOOD] Test command err: FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; WaitEmptyAfter=0;Tiers=;TTL={Column=timestamp;EvictAfter=0.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=150864183.000000s;Name=cold;Codec=};};TTL={Column=timestamp;EvictAfter=150864183.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=130864183.000000s;Name=cold;Codec=};};TTL={Column=timestamp;EvictAfter=130864183.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=130862983.000000s;Name=cold;Codec=};};TTL={Column=timestamp;EvictAfter=130862983.000000s;Name=;Codec=}; 2025-06-25T14:59:45.088754Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:99;event=initialize_shard;step=OnActivateExecutor; 2025-06-25T14:59:45.108474Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:117;event=initialize_shard;step=initialize_tiring_finished; 2025-06-25T14:59:45.108660Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-25T14:59:45.114133Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:59:45.114345Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:59:45.114548Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:59:45.114635Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:59:45.114705Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:59:45.114766Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:59:45.114833Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:59:45.114891Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:59:45.114944Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:59:45.115002Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:59:45.115092Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:59:45.134121Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-25T14:59:45.134227Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-25T14:59:45.134260Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-25T14:59:45.134382Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T14:59:45.134490Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-25T14:59:45.134544Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-25T14:59:45.134581Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-25T14:59:45.134638Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-25T14:59:45.134676Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-25T14:59:45.134700Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-25T14:59:45.134724Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-25T14:59:45.134842Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T14:59:45.134891Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-25T14:59:45.134937Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-25T14:59:45.134961Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-25T14:59:45.135012Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-25T14:59:45.135043Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-25T14:59:45.135068Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-25T14:59:45.135083Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-25T14:59:45.135109Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-25T14:59:45.135157Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-25T14:59:45.135174Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-25T14:59:45.135346Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-25T14:59:45.135383Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-25T14:59:45.176087Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-25T14:59:45.176385Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-25T14:59:45.176456Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-25T14:59:45.176486Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-25T14:59:45.176590Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-25T14:59:45.176616Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-25T14:59:45.176635Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-25T14:59:45.176696Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-25T14:59:45.176763Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-25T14:59:45.176810Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-25T14:59:45.176837Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-25T14:59:45.177076Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=92; 2025-06-25T14:59:45.177149Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=38; 2025-06-25T14:59:45.1772 ... ] 2025-06-25T15:00:31.680800Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:869:2827];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-06-25T15:00:31.201996Z;index_granules=0;index_portions=1;index_batches=522;schema_columns=1;filter_columns=0;additional_columns=0;compacted_portions_bytes=0;inserted_portions_bytes=0;committed_portions_bytes=4873744;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=4873744;selected_rows=0; 2025-06-25T15:00:31.680841Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:869:2827];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=read_context.h:192;event=scan_aborted;reason=unexpected on destructor; 2025-06-25T15:00:31.681047Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=3;SelfId=[1:869:2827];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;; 2025-06-25T15:00:31.681809Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Finished read cookie: 7 at tablet 9437184 2025-06-25T15:00:31.682022Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: EvScan txId: 18446744073709551615 scanId: 0 version: {1750863621234:max} readable: {1750863621234:max} at tablet 9437184 2025-06-25T15:00:31.682133Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TTxScan prepare txId: 18446744073709551615 scanId: 0 at tablet 9437184 2025-06-25T15:00:31.682267Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750863621234:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=program.cpp:33;event=parse_program;program=Command { Projection { Columns { Id: 1 } } } ; 2025-06-25T15:00:31.682315Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750863621234:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=program.cpp:102;parse_proto_program=Command { Projection { Columns { Id: 1 } } } ; 2025-06-25T15:00:31.682785Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750863621234:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=program.cpp:51;event=program_parsed;result={"edges":[{"owner_id":0,"inputs":[{"from":2}]},{"owner_id":1,"inputs":[{"from":3}]},{"owner_id":2,"inputs":[{"from":1}]},{"owner_id":3,"inputs":[]}],"nodes":{"1":{"p":{"i":"0","p":{"data":[{"name":"timestamp","id":1}]},"o":"1","t":"FetchOriginalData"},"w":2,"id":1},"3":{"p":{"p":{"data":[{"name":"timestamp","id":1}]},"o":"0","t":"ReserveMemory"},"w":0,"id":3},"2":{"p":{"i":"1","p":{"address":{"name":"timestamp","id":1}},"o":"1","t":"AssembleOriginalData"},"w":7,"id":2},"0":{"p":{"i":"1","t":"Projection"},"w":7,"id":0}}}; 2025-06-25T15:00:31.682858Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750863621234:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=read_metadata.h:142;filter_limit_not_detected= range{ from {+Inf} to {-Inf}}; 2025-06-25T15:00:31.683231Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750863621234:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=tx_scan.cpp:172;event=TTxScan started;actor_id=[1:876:2834];trace_detailed=; 2025-06-25T15:00:31.781124Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;fline=context.cpp:84;ff_first=(column_ids=1;column_names=timestamp;);; 2025-06-25T15:00:31.781375Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;fline=context.cpp:99;columns_context_info=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;; 2025-06-25T15:00:31.781538Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:00:31.781650Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:00:31.781910Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:876:2834];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-25T15:00:31.782018Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:876:2834];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:00:31.782084Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:876:2834];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:00:31.782121Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: actor.cpp:414: Scan [1:876:2834] finished for tablet 9437184 2025-06-25T15:00:31.782486Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=3;SelfId=[1:876:2834];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:420;event=scan_finish;compute_actor_id=[1:875:2833];stats={"p":[{"events":["f_bootstrap"],"t":0.097},{"events":["l_bootstrap","f_ack","l_ack","f_processing","l_processing","f_ProduceResults","l_ProduceResults","f_Finish","l_Finish"],"t":0.098}],"full":{"a":1750863631683185,"name":"_full_task","f":1750863631683185,"d_finished":0,"c":0,"l":1750863631782177,"d":98992},"events":[{"name":"bootstrap","f":1750863631780770,"d_finished":908,"c":1,"l":1750863631781678,"d":908},{"a":1750863631781892,"name":"ack","f":1750863631781892,"d_finished":0,"c":0,"l":1750863631782177,"d":285},{"a":1750863631781879,"name":"processing","f":1750863631781879,"d_finished":0,"c":0,"l":1750863631782177,"d":298},{"name":"ProduceResults","f":1750863631781464,"d_finished":380,"c":2,"l":1750863631782102,"d":380},{"a":1750863631782105,"name":"Finish","f":1750863631782105,"d_finished":0,"c":0,"l":1750863631782177,"d":72}],"id":"9437184::8"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:00:31.782572Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:876:2834];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:875:2833];bytes=0;rows=0;faults=0;finished=1;fault=0;schema=; 2025-06-25T15:00:31.782974Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=3;SelfId=[1:876:2834];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:375;event=scan_finished;compute_actor_id=[1:875:2833];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap"],"t":0.097},{"events":["l_bootstrap","f_ack","f_processing","f_ProduceResults","l_ProduceResults","f_Finish"],"t":0.098},{"events":["l_ack","l_processing","l_Finish"],"t":0.099}],"full":{"a":1750863631683185,"name":"_full_task","f":1750863631683185,"d_finished":0,"c":0,"l":1750863631782616,"d":99431},"events":[{"name":"bootstrap","f":1750863631780770,"d_finished":908,"c":1,"l":1750863631781678,"d":908},{"a":1750863631781892,"name":"ack","f":1750863631781892,"d_finished":0,"c":0,"l":1750863631782616,"d":724},{"a":1750863631781879,"name":"processing","f":1750863631781879,"d_finished":0,"c":0,"l":1750863631782616,"d":737},{"name":"ProduceResults","f":1750863631781464,"d_finished":380,"c":2,"l":1750863631782102,"d":380},{"a":1750863631782105,"name":"Finish","f":1750863631782105,"d_finished":0,"c":0,"l":1750863631782616,"d":511}],"id":"9437184::8"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); Got TEvKqpCompute::TEvScanData [1:876:2834]->[1:875:2833] 2025-06-25T15:00:31.783094Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:876:2834];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-06-25T15:00:31.682835Z;index_granules=0;index_portions=0;index_batches=0;schema_columns=1;filter_columns=0;additional_columns=0;compacted_portions_bytes=0;inserted_portions_bytes=0;committed_portions_bytes=0;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=0;selected_rows=0; 2025-06-25T15:00:31.783157Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:876:2834];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=read_context.h:192;event=scan_aborted;reason=unexpected on destructor; 2025-06-25T15:00:31.881291Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=3;SelfId=[1:876:2834];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/cold' stopped at tablet 9437184 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/cold' stopped at tablet 9437184 160000/9739224 160000/9739224 80000/4873744 0/0 >> BackupPathTest::ImportFilterByYdbObjectPath [GOOD] >> TDynamicNameserverTest::TestCacheUsage [GOOD] >> TDynamicNameserverTest::ListNodesCacheWhenNoChanges-EnableNodeBrokerDeltaProtocol-true >> TNodeBrokerTest::ExtendLeaseSetLocationInOneRegistration [GOOD] >> TNodeBrokerTest::NodesMigrationRemovedChanged [GOOD] >> TNodeBrokerTest::ShiftIdRangeRemoveNew [GOOD] >> TDynamicNameserverTest::ListNodesCacheWhenNoChanges-EnableNodeBrokerDeltaProtocol-true [GOOD] >> TDynamicNameserverTest::BasicFunctionality-EnableNodeBrokerDeltaProtocol-false [GOOD] >> GracefulShutdown::TTxGracefulShutdown >> BackupRestoreS3::TestAllIndexTypes-EIndexTypeGlobalVectorKmeansTree [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-BOOL >> TNodeBrokerTest::NodesMigrationExtendLeaseThenRemove [GOOD] >> TNodeBrokerTest::ExtendLeasePipelining [GOOD] >> TNodeBrokerTest::NodesMigrationNewExpiredNode ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::ExtendLeaseSetLocationInOneRegistration [GOOD] Test command err: 2025-06-25T15:00:30.143427Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:00:30.143477Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::NodesMigrationRemovedChanged [GOOD] Test command err: 2025-06-25T15:00:29.335056Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:00:29.335105Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) >> TNodeBrokerTest::NodesMigrationReuseID >> TNodeBrokerTest::MinDynamicNodeIdShifted >> TNodeBrokerTest::SyncNodes >> TNodeBrokerTest::NodesMigrationRemoveExpired ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::ShiftIdRangeRemoveNew [GOOD] Test command err: 2025-06-25T15:00:30.232973Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:00:30.233027Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) 2025-06-25T15:00:31.481206Z node 1 :NODE_BROKER ERROR: node_broker.cpp:1090: [DB] Removing node with wrong ID 1026 not in range (1023, 1025] >> BackupRestore::TestAllPrimitiveTypes-INTERVAL [GOOD] >> BackupRestore::TestAllPrimitiveTypes-DATE32 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TDynamicNameserverTest::ListNodesCacheWhenNoChanges-EnableNodeBrokerDeltaProtocol-true [GOOD] Test command err: 2025-06-25T15:00:31.807399Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:00:31.807462Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) 2025-06-25T15:00:32.998111Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:00:32.998159Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::NodesMigrationExtendLeaseThenRemove [GOOD] Test command err: 2025-06-25T15:00:31.241066Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:00:31.241120Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) |91.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/executer_actor/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::ExtendLeasePipelining [GOOD] Test command err: 2025-06-25T15:00:30.450616Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:00:30.450659Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) ... blocking NKikimr::TEvTablet::TEvCommit from FLAT_EXECUTOR to TABLET_ACTOR cookie 1 ... unblocking NKikimr::TEvTablet::TEvCommit from FLAT_EXECUTOR to TABLET_ACTOR >> TNodeBrokerTest::NodesMigration999Nodes >> TEnumerationTest::TestPublish [GOOD] >> TLocalTests::TestAddTenant >> BackupPathTest::EncryptedImportWithoutCommonPrefix |91.4%| [TA] $(B)/ydb/core/kqp/executer_actor/ut/test-results/unittest/{meta.json ... results_accumulator.log} |91.4%| [TA] {RESULT} $(B)/ydb/core/kqp/executer_actor/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> EncryptedExportTest::TopicEncryption [GOOD] >> TNodeBrokerTest::NodesV2BackMigrationShiftIdRange >> TNodeBrokerTest::ExtendLeaseRestartRace >> TLocalTests::TestAddTenant [GOOD] >> TNodeBrokerTest::ExtendLeaseBumpVersion [GOOD] >> TNodeBrokerTest::EpochCacheUpdate >> TLocalTests::TestRemoveTenantWhileResolving >> TNodeBrokerTest::ShiftIdRangeRemoveReusedID >> TNodeBrokerTest::UpdateNodesLogEmptyEpoch >> TTenantPoolTests::TestForcedSensorLabelsForStaticConfig ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TLocalTests::TestAddTenant [GOOD] Test command err: 2025-06-25T15:00:34.355207Z node 1 :LOCAL ERROR: local.cpp:1293: TDomainLocal(dc-1): Receive TEvDescribeSchemeResult with bad status StatusPathDoesNotExist reason is <> while resolving subdomain dc-1 2025-06-25T15:00:34.355405Z node 1 :LOCAL ERROR: local.cpp:1543: Unknown domain dc-3 >> TNodeBrokerTest::NodesMigrationExtendLease [GOOD] >> TNodeBrokerTest::NodesMigrationNewActiveNode >> EncryptedExportTest::ViewEncryption >> YdbTableBulkUpsert::AsyncIndexShouldSucceed [GOOD] >> TLocalTests::TestRemoveTenantWhileResolving [GOOD] >> TNodeBrokerTest::BasicFunctionality ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::NodesMigrationExtendLease [GOOD] Test command err: 2025-06-25T15:00:32.994308Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:00:32.994384Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) >> GracefulShutdown::TTxGracefulShutdown [GOOD] >> TNodeBrokerTest::NodesMigrationReuseID [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> GracefulShutdown::TTxGracefulShutdown [GOOD] Test command err: 2025-06-25T15:00:30.155456Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:00:30.155496Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) 2025-06-25T15:00:30.431527Z node 1 :NODE_BROKER ERROR: node_broker.cpp:797: [Dirty] Configured lease duration (10.000000s) is too small. Using min. value: 300.000000s 2025-06-25T15:00:30.443484Z node 1 :NODE_BROKER ERROR: node_broker.cpp:797: [Committed] Configured lease duration (10.000000s) is too small. Using min. value: 300.000000s 2025-06-25T15:00:33.583463Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:00:33.583504Z node 9 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) >> TNodeBrokerTest::NodesMigrationNewExpiredNode [GOOD] >> TNodeBrokerTest::SyncNodes [GOOD] >> TTenantPoolTests::TestForcedSensorLabelsForStaticConfig [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-INTERVAL64 [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-STRING ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/ut/unittest >> YdbTableBulkUpsert::AsyncIndexShouldSucceed [GOOD] Test command err: 2025-06-25T15:00:13.821480Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901957629335709:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:00:13.821575Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00185d/r3tmp/tmpok3pVp/pdisk_1.dat 2025-06-25T15:00:14.122919Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 19228, node 1 2025-06-25T15:00:14.196071Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:00:14.196090Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:00:14.196098Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:00:14.196229Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T15:00:14.214269Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:00:14.214362Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:00:14.216594Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:27370 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:00:14.462807Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:00:14.833230Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:00:16.050362Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901970514238547:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:00:16.050422Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:00:16.553528Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:16.656789Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901970514238729:2307], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:00:16.656851Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:00:16.656859Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901970514238734:2310], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:00:16.659360Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:00:16.672524Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519901970514238736:2311], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-25T15:00:16.771650Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519901970514238807:2783] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:00:16.823007Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710661. Ctx: { TraceId: 01jyksn4ht3qj2q4pa5cg664na, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjEzOGU3NjQtNDczMzRlNGMtYzg1MGRmYzItYjI3MjM3OTI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:00:16.958192Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710662. Ctx: { TraceId: 01jyksn4ht3qj2q4pa5cg664na, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OWZiNzEyYmItNWEyYWJhOTEtZWQwN2JiNDgtYTEwYTA0MzM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:00:16.996005Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710663. Ctx: { TraceId: 01jyksn4ht3qj2q4pa5cg664na, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTk1NDEzYmItY2RjNWYyMjEtNjRmNTUzNi02ZmQ4YTQxMQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:00:17.101874Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710664. Ctx: { TraceId: 01jyksn4ht3qj2q4pa5cg664na, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=N2VmYjdlNzAtYjIxZTZjMGEtNTY3MTUzMmMtN2I2NmZiNGM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:00:18.339615Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519901983073524426:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:00:18.339677Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00185d/r3tmp/tmpDNdLP9/pdisk_1.dat 2025-06-25T15:00:18.452045Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:00:18.469395Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:00:18.469456Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:00:18.476500Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 24209, node 4 2025-06-25T15:00:18.527435Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:00:18.527459Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:00:18.527467Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:00:18.527621Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:61830 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:00:18.736524Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:00:19.348100Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:00:21.075579Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519901995958427294:2296], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:00:21.075645Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't h ... ssifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:00:22.987995Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:00:22.988002Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:00:22.988161Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25464 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:00:23.201495Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:00:23.841921Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:00:25.183476Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) BAD_REQUEST
: Error: Bulk upsert to table '/Root/TestInvalidData' Invalid Decimal(22,9) value BAD_REQUEST
: Error: Bulk upsert to table '/Root/TestInvalidData' Invalid Date value BAD_REQUEST
: Error: Bulk upsert to table '/Root/TestInvalidData' Invalid Datetime value BAD_REQUEST
: Error: Bulk upsert to table '/Root/TestInvalidData' Invalid Timestamp value BAD_REQUEST
: Error: Bulk upsert to table '/Root/TestInvalidData' Invalid Interval value CLIENT_INTERNAL_ERROR
: Error: GRpc error: (13): Unable to parse request
: Error: Grpc error response on endpoint localhost:3190 BAD_REQUEST
: Error: Bulk upsert to table '/Root/TestInvalidData' Invalid Yson value BAD_REQUEST
: Error: Bulk upsert to table '/Root/TestInvalidData' Invalid Json value BAD_REQUEST
: Error: Bulk upsert to table '/Root/TestInvalidData' Invalid JSON for JsonDocument provided: TAPE_ERROR: The JSON document has an improper structure: missing or superfluous commas, braces, missing keys, etc. This is a fatal and unrecoverable error. BAD_REQUEST
: Error: Bulk upsert to table '/Root/TestInvalidData' Invalid DyNumber string representation 2025-06-25T15:00:26.591106Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7519902015992152808:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:00:26.591184Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00185d/r3tmp/tmpt1Tqjd/pdisk_1.dat 2025-06-25T15:00:26.723716Z node 10 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:00:26.740543Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:00:26.740639Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:00:26.748053Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 3255, node 10 2025-06-25T15:00:26.815386Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:00:26.815411Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:00:26.815418Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:00:26.815581Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3596 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:00:27.074294Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:00:27.599704Z node 10 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:00:29.011827Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664)
: Error: Bulk upsert to table '/Root/ui8' Only async-indexed tables are supported by BulkUpsert
: Error: Bulk upsert to table '/Root/ui8/Value_index/indexImplTable' unknown table 2025-06-25T15:00:30.456231Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7519902034098015445:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:00:30.456331Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00185d/r3tmp/tmpzqRFUc/pdisk_1.dat 2025-06-25T15:00:30.547185Z node 13 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:00:30.566942Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:00:30.567029Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:00:30.571299Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 23240, node 13 2025-06-25T15:00:30.610016Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:00:30.610035Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:00:30.610040Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:00:30.610154Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:12661 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:00:30.773027Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:00:31.465098Z node 13 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:00:33.050493Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:34.182924Z node 13 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill
: Error: Bulk upsert to table '/Root/ui8/Value_index/indexImplTable' unknown table ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::NodesMigrationNewExpiredNode [GOOD] Test command err: 2025-06-25T15:00:33.824961Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:00:33.825016Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) >> TDynamicNameserverTest::CacheMissPipeDisconnect-EnableNodeBrokerDeltaProtocol-false ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::SyncNodes [GOOD] Test command err: 2025-06-25T15:00:34.115652Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:00:34.115725Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::NodesMigrationReuseID [GOOD] Test command err: 2025-06-25T15:00:33.797621Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:00:33.797670Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TTenantPoolTests::TestForcedSensorLabelsForStaticConfig [GOOD] Test command err: 2025-06-25T15:00:35.406743Z node 2 :BS_PDISK WARN: {BPD01@blobstorage_pdisk_blockdevice_async.cpp:922} Warning# got EIoResult::FileOpenError from IoContext->Setup PDiskId# 1000 2025-06-25T15:00:35.407106Z node 2 :BS_PDISK CRIT: {BPD39@blobstorage_pdisk_impl.cpp:2897} BlockDevice initialization error Details# Can't open file "/home/runner/.ya/build/build_root/yft8/001b84/r3tmp/tmpxNuOSx/pdisk_1.dat": unknown reason, errno# 0. PDiskId# 1000 2025-06-25T15:00:35.407512Z node 2 :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:300} PDiskId# 1000 bootstrapped to the StateError, reason# Can't open file "/home/runner/.ya/build/build_root/yft8/001b84/r3tmp/tmpxNuOSx/pdisk_1.dat": unknown reason, errno# 0. Can not be initialized Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/yft8/001b84/r3tmp/tmpxNuOSx/pdisk_1.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 10594892197723042938 PDiskId# 1000 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 2 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 1000 2025-06-25T15:00:35.411743Z node 3 :BS_PDISK WARN: {BPD01@blobstorage_pdisk_blockdevice_async.cpp:922} Warning# got EIoResult::FileOpenError from IoContext->Setup PDiskId# 1000 2025-06-25T15:00:35.412167Z node 3 :BS_PDISK CRIT: {BPD39@blobstorage_pdisk_impl.cpp:2897} BlockDevice initialization error Details# Can't open file "/home/runner/.ya/build/build_root/yft8/001b84/r3tmp/tmpxNuOSx/pdisk_1.dat": unknown reason, errno# 0. PDiskId# 1000 2025-06-25T15:00:35.412366Z node 3 :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:300} PDiskId# 1000 bootstrapped to the StateError, reason# Can't open file "/home/runner/.ya/build/build_root/yft8/001b84/r3tmp/tmpxNuOSx/pdisk_1.dat": unknown reason, errno# 0. Can not be initialized Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/yft8/001b84/r3tmp/tmpxNuOSx/pdisk_1.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 10260371048740874338 PDiskId# 1000 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 2 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 1000 >> TNodeBrokerTest::Test1000NodesSubscribers >> TNodeBrokerTest::MinDynamicNodeIdShifted [GOOD] >> BackupRestore::RestoreViewDependentOnAnotherView [GOOD] >> BackupRestore::RestoreKesusResources >> TDynamicNameserverTest::CacheMissPipeDisconnect-EnableNodeBrokerDeltaProtocol-false [GOOD] >> TDynamicNameserverTest::CacheMissNoDeadline-EnableNodeBrokerDeltaProtocol-true >> TDynamicNameserverTest::CacheMissSimpleDeadline-EnableNodeBrokerDeltaProtocol-false >> TNodeBrokerTest::NodesMigrationRemoveExpired [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::MinDynamicNodeIdShifted [GOOD] Test command err: 2025-06-25T15:00:33.929160Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:00:33.929216Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) >> TNodeBrokerTest::TestRandomActions >> TDynamicNameserverTest::BasicFunctionality-EnableNodeBrokerDeltaProtocol-true >> BackupPathTest::EncryptedImportWithoutCommonPrefix [GOOD] >> TDynamicNameserverTest::CacheMissNoDeadline-EnableNodeBrokerDeltaProtocol-true [GOOD] >> TNodeBrokerTest::NodesMigrationNewActiveNode [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::NodesMigrationRemoveExpired [GOOD] Test command err: 2025-06-25T15:00:34.109290Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:00:34.109361Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) >> TNodeBrokerTest::NodesMigration2000Nodes >> TSlotIndexesPoolTest::Ranges [GOOD] >> TDynamicNameserverTest::CacheMissSimpleDeadline-EnableNodeBrokerDeltaProtocol-false [GOOD] >> TDynamicNameserverTest::CacheMissSameDeadline-EnableNodeBrokerDeltaProtocol-true >> TNodeBrokerTest::NodesMigrationReuseIDThenExtendLease >> TColumnShardTestSchema::ExportWithLostAnswer [GOOD] >> TNodeBrokerTest::NodesMigration999Nodes [GOOD] >> TNodeBrokerTest::EpochCacheUpdate [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TDynamicNameserverTest::CacheMissNoDeadline-EnableNodeBrokerDeltaProtocol-true [GOOD] Test command err: 2025-06-25T15:00:36.443832Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:00:36.443879Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) ... waiting for cache miss ... blocking NKikimr::NNodeBroker::TEvNodeBroker::TEvResolveNode from NAMESERVICE to NODE_BROKER_ACTOR cookie 0 ... waiting for cache miss (done) 2025-06-25T15:00:37.045906Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:00:37.045966Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) ... blocking NKikimr::NNodeBroker::TEvNodeBroker::TEvUpdateNodes from NODE_BROKER_ACTOR to NAMESERVICE cookie 0 ... waiting for cache miss ... blocking NKikimr::NNodeBroker::TEvNodeBroker::TEvSyncNodesRequest from NAMESERVICE to NODE_BROKER_ACTOR cookie 1 ... waiting for cache miss (done) ... blocking NKikimr::NNodeBroker::TEvNodeBroker::TEvUpdateNodes from NODE_BROKER_ACTOR to NAMESERVICE cookie 0 ... unblocking NKikimr::NNodeBroker::TEvNodeBroker::TEvUpdateNodes from NODE_BROKER_ACTOR to NAMESERVICE ... unblocking NKikimr::NNodeBroker::TEvNodeBroker::TEvUpdateNodes from NODE_BROKER_ACTOR to NAMESERVICE ... unblocking NKikimr::NNodeBroker::TEvNodeBroker::TEvSyncNodesRequest from NAMESERVICE to NODE_BROKER_ACTOR ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::NodesMigrationNewActiveNode [GOOD] Test command err: 2025-06-25T15:00:35.534619Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:00:35.534683Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) >> TNodeBrokerTest::UpdateNodesLogEmptyEpoch [GOOD] |91.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TSlotIndexesPoolTest::Ranges [GOOD] >> TDynamicNameserverTest::CacheMissSameDeadline-EnableNodeBrokerDeltaProtocol-true [GOOD] >> TNodeBrokerTest::NodesV2BackMigrationShiftIdRange [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::EpochCacheUpdate [GOOD] Test command err: 2025-06-25T15:00:32.053651Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:00:32.053709Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) 2025-06-25T15:00:35.012155Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:00:35.012202Z node 9 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest >> TColumnShardTestSchema::ExportWithLostAnswer [GOOD] Test command err: FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; WaitEmptyAfter=0;Tiers=;TTL={Column=timestamp;EvictAfter=0.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=150864189.000000s;Name=cold;Codec=};};TTL={Column=timestamp;EvictAfter=150864189.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=130864189.000000s;Name=cold;Codec=};};TTL={Column=timestamp;EvictAfter=130864189.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=130862989.000000s;Name=cold;Codec=};};TTL={Column=timestamp;EvictAfter=130862989.000000s;Name=;Codec=}; 2025-06-25T14:59:51.702236Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:99;event=initialize_shard;step=OnActivateExecutor; 2025-06-25T14:59:51.729217Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:117;event=initialize_shard;step=initialize_tiring_finished; 2025-06-25T14:59:51.729479Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-25T14:59:51.736211Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:59:51.736451Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:59:51.736698Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:59:51.736833Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:59:51.736943Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:59:51.737047Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:59:51.737163Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:59:51.737266Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:59:51.737390Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:59:51.737497Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:59:51.737625Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:59:51.765364Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-25T14:59:51.765504Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-25T14:59:51.765557Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-25T14:59:51.765714Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T14:59:51.765899Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-25T14:59:51.765985Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-25T14:59:51.766031Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-25T14:59:51.766109Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-25T14:59:51.766163Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-25T14:59:51.766202Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-25T14:59:51.766244Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-25T14:59:51.766390Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T14:59:51.766465Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-25T14:59:51.766516Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-25T14:59:51.766542Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-25T14:59:51.766620Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-25T14:59:51.766672Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-25T14:59:51.766710Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-25T14:59:51.766736Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-25T14:59:51.766781Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-25T14:59:51.766815Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-25T14:59:51.766839Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-25T14:59:51.767016Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-25T14:59:51.767054Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-25T14:59:51.767088Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-25T14:59:51.767256Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-25T14:59:51.767302Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-25T14:59:51.767333Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-25T14:59:51.767461Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-25T14:59:51.767507Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-25T14:59:51.767535Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-25T14:59:51.767601Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-25T14:59:51.767655Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-25T14:59:51.767688Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-25T14:59:51.767714Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-25T14:59:51.767942Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=51; 2025-06-25T14:59:51.768032Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=43; 2025-06-25T14:59:51.7680 ... Got TEvKqpCompute::TEvScanData [1:869:2827]->[1:868:2826] 2025-06-25T15:00:37.606338Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:869:2827];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-06-25T15:00:37.409952Z;index_granules=0;index_portions=1;index_batches=522;schema_columns=1;filter_columns=0;additional_columns=0;compacted_portions_bytes=0;inserted_portions_bytes=0;committed_portions_bytes=4873744;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=4873744;selected_rows=0; 2025-06-25T15:00:37.606377Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:869:2827];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=read_context.h:192;event=scan_aborted;reason=unexpected on destructor; 2025-06-25T15:00:37.606534Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=3;SelfId=[1:869:2827];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;; 2025-06-25T15:00:37.607016Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Finished read cookie: 7 at tablet 9437184 2025-06-25T15:00:37.607149Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: EvScan txId: 18446744073709551615 scanId: 0 version: {1750863627851:max} readable: {1750863627851:max} at tablet 9437184 2025-06-25T15:00:37.607234Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TTxScan prepare txId: 18446744073709551615 scanId: 0 at tablet 9437184 2025-06-25T15:00:37.607334Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750863627851:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=program.cpp:33;event=parse_program;program=Command { Projection { Columns { Id: 1 } } } ; 2025-06-25T15:00:37.607372Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750863627851:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=program.cpp:102;parse_proto_program=Command { Projection { Columns { Id: 1 } } } ; 2025-06-25T15:00:37.607730Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750863627851:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=program.cpp:51;event=program_parsed;result={"edges":[{"owner_id":0,"inputs":[{"from":2}]},{"owner_id":1,"inputs":[{"from":3}]},{"owner_id":2,"inputs":[{"from":1}]},{"owner_id":3,"inputs":[]}],"nodes":{"1":{"p":{"i":"0","p":{"data":[{"name":"timestamp","id":1}]},"o":"1","t":"FetchOriginalData"},"w":2,"id":1},"3":{"p":{"p":{"data":[{"name":"timestamp","id":1}]},"o":"0","t":"ReserveMemory"},"w":0,"id":3},"2":{"p":{"i":"1","p":{"address":{"name":"timestamp","id":1}},"o":"1","t":"AssembleOriginalData"},"w":7,"id":2},"0":{"p":{"i":"1","t":"Projection"},"w":7,"id":0}}}; 2025-06-25T15:00:37.607807Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750863627851:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=read_metadata.h:142;filter_limit_not_detected= range{ from {+Inf} to {-Inf}}; 2025-06-25T15:00:37.608144Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750863627851:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=tx_scan.cpp:172;event=TTxScan started;actor_id=[1:876:2834];trace_detailed=; 2025-06-25T15:00:37.608407Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;fline=context.cpp:84;ff_first=(column_ids=1;column_names=timestamp;);; 2025-06-25T15:00:37.608533Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;fline=context.cpp:99;columns_context_info=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;; 2025-06-25T15:00:37.608628Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:00:37.608712Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:00:37.608847Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:876:2834];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-25T15:00:37.608901Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:876:2834];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:00:37.608951Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:876:2834];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:00:37.608973Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: actor.cpp:414: Scan [1:876:2834] finished for tablet 9437184 2025-06-25T15:00:37.609257Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=3;SelfId=[1:876:2834];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:420;event=scan_finish;compute_actor_id=[1:875:2833];stats={"p":[{"events":["f_bootstrap","l_bootstrap","f_ack","l_ack","f_processing","l_processing","f_ProduceResults","l_ProduceResults","f_Finish","l_Finish"],"t":0}],"full":{"a":1750863637608102,"name":"_full_task","f":1750863637608102,"d_finished":0,"c":0,"l":1750863637609017,"d":915},"events":[{"name":"bootstrap","f":1750863637608212,"d_finished":520,"c":1,"l":1750863637608732,"d":520},{"a":1750863637608834,"name":"ack","f":1750863637608834,"d_finished":0,"c":0,"l":1750863637609017,"d":183},{"a":1750863637608824,"name":"processing","f":1750863637608824,"d_finished":0,"c":0,"l":1750863637609017,"d":193},{"name":"ProduceResults","f":1750863637608587,"d_finished":246,"c":2,"l":1750863637608964,"d":246},{"a":1750863637608965,"name":"Finish","f":1750863637608965,"d_finished":0,"c":0,"l":1750863637609017,"d":52}],"id":"9437184::8"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:00:37.609334Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:876:2834];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:875:2833];bytes=0;rows=0;faults=0;finished=1;fault=0;schema=; 2025-06-25T15:00:37.609596Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=3;SelfId=[1:876:2834];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:375;event=scan_finished;compute_actor_id=[1:875:2833];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap","l_bootstrap","f_ack","f_processing","f_ProduceResults","l_ProduceResults","f_Finish"],"t":0},{"events":["l_ack","l_processing","l_Finish"],"t":0.001}],"full":{"a":1750863637608102,"name":"_full_task","f":1750863637608102,"d_finished":0,"c":0,"l":1750863637609368,"d":1266},"events":[{"name":"bootstrap","f":1750863637608212,"d_finished":520,"c":1,"l":1750863637608732,"d":520},{"a":1750863637608834,"name":"ack","f":1750863637608834,"d_finished":0,"c":0,"l":1750863637609368,"d":534},{"a":1750863637608824,"name":"processing","f":1750863637608824,"d_finished":0,"c":0,"l":1750863637609368,"d":544},{"name":"ProduceResults","f":1750863637608587,"d_finished":246,"c":2,"l":1750863637608964,"d":246},{"a":1750863637608965,"name":"Finish","f":1750863637608965,"d_finished":0,"c":0,"l":1750863637609368,"d":403}],"id":"9437184::8"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); Got TEvKqpCompute::TEvScanData [1:876:2834]->[1:875:2833] 2025-06-25T15:00:37.609655Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:876:2834];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-06-25T15:00:37.607786Z;index_granules=0;index_portions=0;index_batches=0;schema_columns=1;filter_columns=0;additional_columns=0;compacted_portions_bytes=0;inserted_portions_bytes=0;committed_portions_bytes=0;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=0;selected_rows=0; 2025-06-25T15:00:37.609683Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:876:2834];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=read_context.h:192;event=scan_aborted;reason=unexpected on destructor; 2025-06-25T15:00:37.609747Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=3;SelfId=[1:876:2834];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/cold' stopped at tablet 9437184 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/cold' stopped at tablet 9437184 160000/9739224 160000/9739224 80000/4873744 0/0 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::NodesMigration999Nodes [GOOD] Test command err: 2025-06-25T15:00:34.411668Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:00:34.411758Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) >> TNodeBrokerTest::NodeNameReuseRestart ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::UpdateNodesLogEmptyEpoch [GOOD] Test command err: 2025-06-25T15:00:35.355446Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:00:35.355489Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TDynamicNameserverTest::CacheMissSameDeadline-EnableNodeBrokerDeltaProtocol-true [GOOD] Test command err: 2025-06-25T15:00:37.374382Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:00:37.374434Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) ... waiting for cache miss ... blocking NKikimr::NNodeBroker::TEvNodeBroker::TEvResolveNode from NAMESERVICE to NODE_BROKER_ACTOR cookie 0 ... waiting for cache miss (done) 2025-06-25T15:00:37.931058Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:00:37.931115Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) ... blocking NKikimr::NNodeBroker::TEvNodeBroker::TEvUpdateNodes from NODE_BROKER_ACTOR to NAMESERVICE cookie 0 ... waiting for cache miss ... blocking NKikimr::NNodeBroker::TEvNodeBroker::TEvSyncNodesRequest from NAMESERVICE to NODE_BROKER_ACTOR cookie 1 ... waiting for cache miss (done) ... blocking NKikimr::NNodeBroker::TEvNodeBroker::TEvUpdateNodes from NODE_BROKER_ACTOR to NAMESERVICE cookie 0 >> TNodeBrokerTest::ShiftIdRangeRemoveActive ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::NodesV2BackMigrationShiftIdRange [GOOD] Test command err: 2025-06-25T15:00:34.940676Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:00:34.940718Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) 2025-06-25T15:00:36.866119Z node 1 :NODE_BROKER ERROR: node_broker.cpp:1150: [DB] Removing node with wrong ID 1025 not in range (1023, 1024] ... blocking NKikimr::TEvTablet::TEvCommit from FLAT_EXECUTOR to TABLET_ACTOR cookie 2 ... unblocking NKikimr::TEvTablet::TEvCommit from FLAT_EXECUTOR to TABLET_ACTOR >> BackupRestore::TestAllPrimitiveTypes-DATE32 [GOOD] >> BackupRestore::TestAllPrimitiveTypes-DATETIME64 >> BackupPathTest::ExplicitDuplicatedItems >> TNodeBrokerTest::ShiftIdRangeRemoveReusedID [GOOD] >> TNodeBrokerTest::ExtendLeaseRestartRace [GOOD] >> TDynamicNameserverTest::CacheMissPipeDisconnect-EnableNodeBrokerDeltaProtocol-true >> BackupRestoreS3::TestAllPrimitiveTypes-BOOL [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-DOUBLE >> TColumnShardTestSchema::RebootHotTiersTtl [GOOD] >> TLocalTests::TestAlterTenant >> TNodeBrokerTest::NoEffectBeforeCommit >> EncryptedExportTest::ViewEncryption [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::ShiftIdRangeRemoveReusedID [GOOD] Test command err: 2025-06-25T15:00:35.416014Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:00:35.416085Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) 2025-06-25T15:00:37.437185Z node 1 :NODE_BROKER ERROR: node_broker.cpp:1090: [DB] Removing node with wrong ID 1026 not in range (1023, 1025] >> TNodeBrokerTest::NodesMigrationExpireRemoved >> TSlotIndexesPoolTest::Expansion [GOOD] >> TNodeBrokerTest::NodesAlreadyMigrated ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::ExtendLeaseRestartRace [GOOD] Test command err: 2025-06-25T15:00:34.965983Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:00:34.966027Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) ... rebooting node broker ... OnActivateExecutor tabletId# 72057594037936129 ... captured cache request ... sending extend lease request ... captured cache request ... captured cache request ... waiting for response ... waiting for epoch update ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest >> TColumnShardTestSchema::RebootHotTiersTtl [GOOD] Test command err: FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; WaitEmptyAfter=0;Tiers=;TTL={Column=timestamp;EvictAfter=0.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=150864198.000000s;Name=tier0;Codec=};}{{Column=timestamp;EvictAfter=150864198.000000s;Name=tier1;Codec=zstd};};TTL={Column=timestamp;EvictAfter=150864198.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=150864198.000000s;Name=tier0;Codec=};}{{Column=timestamp;EvictAfter=150864198.000000s;Name=tier1;Codec=zstd};};TTL={Column=timestamp;EvictAfter=130864198.000000s;Name=;Codec=}; WaitEmptyAfter=1;Tiers={{Column=timestamp;EvictAfter=150864198.000000s;Name=tier0;Codec=};}{{Column=timestamp;EvictAfter=150864198.000000s;Name=tier1;Codec=zstd};};TTL={Column=timestamp;EvictAfter=130862998.000000s;Name=;Codec=}; 2025-06-25T14:59:59.130217Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:99;event=initialize_shard;step=OnActivateExecutor; 2025-06-25T14:59:59.154658Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:117;event=initialize_shard;step=initialize_tiring_finished; 2025-06-25T14:59:59.154908Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-25T14:59:59.161004Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:59:59.161209Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:59:59.161424Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:59:59.161538Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:59:59.161628Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:59:59.161726Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:59:59.161825Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:59:59.161929Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:59:59.162028Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:59:59.162120Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:59:59.162221Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:59:59.181956Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-25T14:59:59.182107Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-25T14:59:59.182172Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-25T14:59:59.182330Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T14:59:59.182464Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-25T14:59:59.182540Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-25T14:59:59.182581Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-25T14:59:59.182657Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-25T14:59:59.182716Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-25T14:59:59.182754Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-25T14:59:59.182791Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-25T14:59:59.182946Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T14:59:59.183012Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-25T14:59:59.183055Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-25T14:59:59.183082Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-25T14:59:59.183165Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-25T14:59:59.183213Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-25T14:59:59.183252Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-25T14:59:59.183278Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-25T14:59:59.183339Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-25T14:59:59.183375Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-25T14:59:59.183408Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-25T14:59:59.183599Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-25T14:59:59.183644Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-25T14:59:59.183672Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-25T14:59:59.183848Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-25T14:59:59.183905Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-25T14:59:59.183942Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-25T14:59:59.184056Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-25T14:59:59.184103Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-25T14:59:59.184135Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-25T14:59:59.184225Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-25T14:59:59.184295Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-25T14:59:59.184360Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.c ... ECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;EXECUTE:portionsLoadingTime=4435; 2025-06-25T15:00:38.871154Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;PRECHARGE:granule_finished_commonLoadingTime=8; 2025-06-25T15:00:38.871302Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;EXECUTE:granule_finished_commonLoadingTime=113; 2025-06-25T15:00:38.871329Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;fline=common_data.cpp:29;EXECUTE:granuleLoadingTime=4748; 2025-06-25T15:00:38.871360Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:granulesLoadingTime=4831; 2025-06-25T15:00:38.871405Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;PRECHARGE:finishLoadingTime=8; 2025-06-25T15:00:38.871503Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:finishLoadingTime=66; 2025-06-25T15:00:38.871528Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:column_enginesLoadingTime=5284; 2025-06-25T15:00:38.871614Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tx_controllerLoadingTime=50; 2025-06-25T15:00:38.871690Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tx_controllerLoadingTime=44; 2025-06-25T15:00:38.871781Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:operations_managerLoadingTime=59; 2025-06-25T15:00:38.871868Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:operations_managerLoadingTime=58; 2025-06-25T15:00:38.873979Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:storages_managerLoadingTime=2065; 2025-06-25T15:00:38.875930Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:storages_managerLoadingTime=1899; 2025-06-25T15:00:38.875987Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:db_locksLoadingTime=10; 2025-06-25T15:00:38.876033Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:db_locksLoadingTime=7; 2025-06-25T15:00:38.876064Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:bg_sessionsLoadingTime=5; 2025-06-25T15:00:38.876119Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:bg_sessionsLoadingTime=29; 2025-06-25T15:00:38.876154Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:sharing_sessionsLoadingTime=5; 2025-06-25T15:00:38.876211Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:sharing_sessionsLoadingTime=37; 2025-06-25T15:00:38.876239Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:in_flight_readsLoadingTime=4; 2025-06-25T15:00:38.876283Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:in_flight_readsLoadingTime=22; 2025-06-25T15:00:38.876375Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tiers_managerLoadingTime=65; 2025-06-25T15:00:38.876573Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tiers_managerLoadingTime=168; 2025-06-25T15:00:38.876599Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;fline=common_data.cpp:29;EXECUTE:composite_initLoadingTime=15221; 2025-06-25T15:00:38.876697Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Index: tables 1 inserted {blob_bytes=0;raw_bytes=0;count=0;records=0} compacted {blob_bytes=0;raw_bytes=0;count=0;records=0} s-compacted {blob_bytes=0;raw_bytes=0;count=0;records=0} inactive {blob_bytes=24365192;raw_bytes=35131129;count=5;records=400000} evicted {blob_bytes=0;raw_bytes=0;count=0;records=0} at tablet 9437184 2025-06-25T15:00:38.876776Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1800:3660];process=SwitchToWork;fline=columnshard.cpp:74;event=initialize_shard;step=SwitchToWork; 2025-06-25T15:00:38.876813Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1800:3660];process=SwitchToWork;fline=columnshard.cpp:77;event=initialize_shard;step=SignalTabletActive; 2025-06-25T15:00:38.876858Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1800:3660];process=SwitchToWork;fline=columnshard_impl.cpp:1331;event=OnTieringModified;path_id=NO_VALUE_OPTIONAL; 2025-06-25T15:00:38.888947Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1800:3660];process=SwitchToWork;fline=column_engine_logs.cpp:471;event=OnTieringModified;new_count_tierings=1; 2025-06-25T15:00:38.889057Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;fline=columnshard_impl.cpp:442;event=EnqueueBackgroundActivities;periodic=0; 2025-06-25T15:00:38.889134Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=4; 2025-06-25T15:00:38.889186Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750863336856;tx_id=18446744073709551615;;current_snapshot_ts=1750863600447; 2025-06-25T15:00:38.889219Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=4;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-25T15:00:38.889254Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;fline=columnshard_impl.cpp:791;background=cleanup;skip_reason=no_changes; 2025-06-25T15:00:38.889279Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;fline=columnshard_impl.cpp:820;background=cleanup;skip_reason=no_changes; 2025-06-25T15:00:38.889344Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;fline=columnshard_impl.cpp:749;background=ttl;skip_reason=no_changes; 2025-06-25T15:00:38.891076Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1800:3660];ev=NActors::TEvents::TEvWakeup;fline=columnshard.cpp:250;event=TEvPrivate::TEvPeriodicWakeup::MANUAL;tablet_id=9437184; 2025-06-25T15:00:38.891328Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1800:3660];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;fline=columnshard.cpp:239;event=TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184; 2025-06-25T15:00:38.891352Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Send periodic stats. 2025-06-25T15:00:38.891370Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Disabled periodic stats at tablet 9437184 2025-06-25T15:00:38.891403Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1800:3660];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:442;event=EnqueueBackgroundActivities;periodic=0; 2025-06-25T15:00:38.891472Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1800:3660];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=4; 2025-06-25T15:00:38.891519Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1800:3660];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750863336856;tx_id=18446744073709551615;;current_snapshot_ts=1750863600447; 2025-06-25T15:00:38.891555Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1800:3660];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=4;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-25T15:00:38.891590Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1800:3660];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:791;background=cleanup;skip_reason=no_changes; 2025-06-25T15:00:38.891621Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1800:3660];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:820;background=cleanup;skip_reason=no_changes; 2025-06-25T15:00:38.891682Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1800:3660];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:749;background=ttl;skip_reason=no_changes; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/tier0' stopped at tablet 9437184 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/tier1' stopped at tablet 9437184 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/tier0' stopped at tablet 9437184 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/tier1' stopped at tablet 9437184 240000/14617704 160000/9752224 80000/4886744 0/0 >> TNodeBrokerTest::BasicFunctionality [GOOD] >> TNodeBrokerTest::Test1001NodesSubscribers >> TLocalTests::TestAlterTenant [GOOD] >> TLocalTests::TestAddTenantWhileResolving >> TDynamicNameserverTest::CacheMissPipeDisconnect-EnableNodeBrokerDeltaProtocol-true [GOOD] >> TDynamicNameserverTest::CacheMissSameDeadline-EnableNodeBrokerDeltaProtocol-false >> TNodeBrokerTest::DoNotReuseDynnodeIdsBelowMinDynamicNodeId |91.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TSlotIndexesPoolTest::Expansion [GOOD] >> TNodeBrokerTest::NodesMigrationReuseIDThenExtendLease [GOOD] >> TNodeBrokerTest::FixedNodeId ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::BasicFunctionality [GOOD] Test command err: 2025-06-25T15:00:35.778220Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:00:35.778288Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) 2025-06-25T15:00:37.153330Z node 2 :NODE_BROKER ERROR: node_broker__register_node.cpp:41: Cannot register node host1:1001: WRONG_REQUEST: Another location is registered for host1:1001 2025-06-25T15:00:37.166879Z node 2 :NODE_BROKER ERROR: node_broker__register_node.cpp:41: Cannot register node host4:1001: ERROR_TEMP: No free node IDs 2025-06-25T15:00:37.167278Z node 2 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1025: WRONG_REQUEST: Node has expired 2025-06-25T15:00:37.167690Z node 2 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1025: WRONG_REQUEST: Node has expired >> TLocalTests::TestAddTenantWhileResolving [GOOD] >> TDynamicNameserverTest::CacheMissSameDeadline-EnableNodeBrokerDeltaProtocol-false [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::NodesMigrationReuseIDThenExtendLease [GOOD] Test command err: 2025-06-25T15:00:38.252051Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:00:38.252100Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) >> ImportBigEncryptedFileTest::ImportBigEncryptedFile >> TNodeBrokerTest::ShiftIdRangeRemoveExpired >> TNodeBrokerTest::NodeNameReuseRestart [GOOD] >> TDynamicNameserverTest::BasicFunctionality-EnableNodeBrokerDeltaProtocol-true [GOOD] >> TDynamicNameserverTest::CacheMissDifferentDeadline-EnableNodeBrokerDeltaProtocol-false >> TNodeBrokerTest::NodesMigrationRemoveActive |91.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TLocalTests::TestAddTenantWhileResolving [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TDynamicNameserverTest::CacheMissSameDeadline-EnableNodeBrokerDeltaProtocol-false [GOOD] Test command err: 2025-06-25T15:00:39.612396Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:00:39.612452Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) ... blocking NKikimr::NNodeBroker::TEvNodeBroker::TEvUpdateNodes from NODE_BROKER_ACTOR to NAMESERVICE cookie 0 ... waiting for cache miss ... blocking NKikimr::NNodeBroker::TEvNodeBroker::TEvSyncNodesRequest from NAMESERVICE to NODE_BROKER_ACTOR cookie 1 ... waiting for cache miss (done) 2025-06-25T15:00:40.243139Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:00:40.243215Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) ... waiting for cache miss ... blocking NKikimr::NNodeBroker::TEvNodeBroker::TEvResolveNode from NAMESERVICE to NODE_BROKER_ACTOR cookie 0 ... blocking NKikimr::NNodeBroker::TEvNodeBroker::TEvResolveNode from NAMESERVICE to NODE_BROKER_ACTOR cookie 0 ... waiting for cache miss (done) >> TNodeBrokerTest::NodesMigrationExpireActive ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::NodeNameReuseRestart [GOOD] Test command err: 2025-06-25T15:00:39.075092Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:00:39.075145Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) 2025-06-25T15:00:39.365467Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 101:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) FAKE_COORDINATOR: Add transaction: 101 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 101 at step: 5000001 FAKE_COORDINATOR: Erasing txId 101 >> TTenantPoolTests::TestStateStatic >> TDynamicNameserverTest::CacheMissDifferentDeadline-EnableNodeBrokerDeltaProtocol-false [GOOD] >> TNodeBrokerTest::NoEffectBeforeCommit [GOOD] >> TNodeBrokerTest::NodesV2BackMigrationManyNodesInterrupted |91.5%| [TA] $(B)/ydb/services/ydb/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> BackupRestoreS3::TestAllPrimitiveTypes-STRING [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-JSON ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TDynamicNameserverTest::CacheMissDifferentDeadline-EnableNodeBrokerDeltaProtocol-false [GOOD] Test command err: 2025-06-25T15:00:37.910205Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:00:37.910254Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) 2025-06-25T15:00:38.187968Z node 1 :NODE_BROKER ERROR: node_broker.cpp:797: [Dirty] Configured lease duration (10.000000s) is too small. Using min. value: 300.000000s 2025-06-25T15:00:38.199867Z node 1 :NODE_BROKER ERROR: node_broker.cpp:797: [Committed] Configured lease duration (10.000000s) is too small. Using min. value: 300.000000s 2025-06-25T15:00:41.201162Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:00:41.201229Z node 9 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) ... waiting for cache miss ... blocking NKikimr::NNodeBroker::TEvNodeBroker::TEvResolveNode from NAMESERVICE to NODE_BROKER_ACTOR cookie 0 ... blocking NKikimr::NNodeBroker::TEvNodeBroker::TEvResolveNode from NAMESERVICE to NODE_BROKER_ACTOR cookie 0 ... waiting for cache miss (done) >> TTenantPoolTests::TestStateStatic [GOOD] >> TNodeBrokerTest::NodesMigrationReuseExpiredID ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::NoEffectBeforeCommit [GOOD] Test command err: 2025-06-25T15:00:39.839315Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:00:39.839372Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) ... waiting for commit ... blocking NKikimr::TEvTablet::TEvCommit from FLAT_EXECUTOR to TABLET_ACTOR cookie 1 ... waiting for commit (done) ... unblocking NKikimr::TEvTablet::TEvCommit from FLAT_EXECUTOR to TABLET_ACTOR >> TNodeBrokerTest::ShiftIdRangeRemoveActive [GOOD] >> TSlotIndexesPoolTest::Basic [GOOD] >> TNodeBrokerTest::ListNodesEpochDeltasPersistance >> TNodeBrokerTest::FixedNodeId [GOOD] >> TNodeBrokerTest::NodeNameExpiration >> BackupRestore::RestoreKesusResources [GOOD] >> TFlatTest::SplitEmptyToMany [GOOD] >> BackupRestore::RestoreReplicationWithoutSecret >> BackupPathTest::ExplicitDuplicatedItems [GOOD] >> TNodeBrokerTest::NodesMigration2000Nodes [GOOD] >> TFlatTest::SplitEmptyTwice >> PersQueueSdkReadSessionTest::ReadSessionWithClose [GOOD] >> PersQueueSdkReadSessionTest::ReadSessionWithCloseNotCommitted >> TColumnShardTestSchema::TTL-Reboot-Internal+FirstPkColumn [GOOD] |91.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TTenantPoolTests::TestStateStatic [GOOD] |91.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TSlotIndexesPoolTest::Basic [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::ShiftIdRangeRemoveActive [GOOD] Test command err: 2025-06-25T15:00:39.455475Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:00:39.455540Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) 2025-06-25T15:00:40.643203Z node 1 :NODE_BROKER ERROR: node_broker.cpp:1090: [DB] Removing node with wrong ID 1026 not in range (1023, 1025] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::FixedNodeId [GOOD] Test command err: 2025-06-25T15:00:40.752642Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:00:40.752702Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) >> TNodeBrokerTest::NodesMigrationExpireRemoved [GOOD] >> TNodeBrokerTest::NodesMigrationRemoveActive [GOOD] >> TNodeBrokerTest::NodesMigrationSetLocation >> TColumnShardTestSchema::EnableColdTiersAfterTtl [GOOD] >> TNodeBrokerTest::SubscribeToNodes ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::NodesMigration2000Nodes [GOOD] Test command err: 2025-06-25T15:00:38.198192Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:00:38.198286Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) >> TNodeBrokerTest::NodesAlreadyMigrated [GOOD] >> TNodeBrokerTest::NodesMigrationExpireActive [GOOD] >> BackupPathTest::ExportUnexistingExplicitPath >> TDynamicNameserverTest::CacheMissDifferentDeadline-EnableNodeBrokerDeltaProtocol-true >> TNodeBrokerTest::ShiftIdRangeRemoveExpired [GOOD] >> TNodeBrokerTest::NodesMigration1001Nodes >> TNodeBrokerTest::NodesMigrationNodeName >> TNodeBrokerTest::UpdateEpochPipelining ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::NodesMigrationRemoveActive [GOOD] Test command err: 2025-06-25T15:00:41.590840Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:00:41.590898Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest >> TColumnShardTestSchema::TTL-Reboot-Internal+FirstPkColumn [GOOD] Test command err: Running TestTtl ttlColumnType=Timestamp 2025-06-25T15:00:11.732656Z node 1 :TX_COLUMNSHARD TRACE: columnshard_impl.h:381: StateInit, received event# 268828672, Sender [1:106:2138], Recipient [1:128:2158]: NKikimr::TEvTablet::TEvBoot 2025-06-25T15:00:11.737080Z node 1 :TX_COLUMNSHARD TRACE: columnshard_impl.h:381: StateInit, received event# 268828673, Sender [1:106:2138], Recipient [1:128:2158]: NKikimr::TEvTablet::TEvRestored 2025-06-25T15:00:11.737499Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:99;event=initialize_shard;step=OnActivateExecutor; 2025-06-25T15:00:11.764849Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:117;event=initialize_shard;step=initialize_tiring_finished; 2025-06-25T15:00:11.765123Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-25T15:00:11.772530Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T15:00:11.772751Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T15:00:11.772991Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T15:00:11.773121Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T15:00:11.773240Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T15:00:11.773351Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T15:00:11.773463Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T15:00:11.773558Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T15:00:11.773668Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T15:00:11.773772Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T15:00:11.773865Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T15:00:11.776645Z node 1 :TX_COLUMNSHARD TRACE: columnshard_impl.h:381: StateInit, received event# 268828684, Sender [1:106:2138], Recipient [1:128:2158]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-25T15:00:11.799015Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-25T15:00:11.799162Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-25T15:00:11.799215Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-25T15:00:11.799384Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:00:11.799567Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-25T15:00:11.799651Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-25T15:00:11.799693Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-25T15:00:11.799803Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-25T15:00:11.799887Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-25T15:00:11.799942Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-25T15:00:11.799971Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-25T15:00:11.800143Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:00:11.800208Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-25T15:00:11.800254Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-25T15:00:11.800284Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-25T15:00:11.800411Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-25T15:00:11.800470Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-25T15:00:11.800519Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-25T15:00:11.800560Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-25T15:00:11.800615Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-25T15:00:11.800647Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-25T15:00:11.800685Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-25T15:00:11.800860Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-25T15:00:11.800898Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-25T15:00:11.800932Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-25T15:00:11.801131Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-25T15:00:11.801195Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-25T15:00:11.801225Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-25T15:00:11.801397Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-25T15:00:11.801447Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-25T15:00:11.801482Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-25T15:00:11.801548Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-25T15:00:11.801625Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-25T15:00:11.801663Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-25T15:00:11.801699Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-25T15:00:11.801921Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=45; 2025-06-25T15:00:11.802026Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=55; 2025-06-25T15:00:11.802103Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=33; 2025-06-25T15:00:11.802190Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=47; 2025-06-25T15:00:11.802286Z node 1 :T ... de 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:479:2483];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=scanner.cpp:47;event=interval_result;interval_idx=0;count=1000;merger=0;interval_id=90; 2025-06-25T15:00:42.968380Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:479:2483];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=scanner.cpp:65;event=intervals_finished; 2025-06-25T15:00:42.968478Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:479:2483];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:00:42.968528Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:479:2483];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=1;count=1000;finished=1; 2025-06-25T15:00:42.968566Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:479:2483];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:203;stage=limit exhausted;limit=limits:(bytes=0;chunks=0);; 2025-06-25T15:00:42.968920Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:479:2483];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-25T15:00:42.969098Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:479:2483];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:1;records_count:1000;schema=timestamp: uint64;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:00:42.969146Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:479:2483];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=0;count=0;finished=1; 2025-06-25T15:00:42.969305Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:479:2483];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:234;stage=ready result;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;);columns=1;rows=1000; 2025-06-25T15:00:42.969380Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:479:2483];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:254;stage=data_format;batch_size=8000;num_rows=1000;batch_columns=timestamp; 2025-06-25T15:00:42.969701Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:479:2483];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:370;event=send_data;compute_actor_id=[5:478:2482];bytes=8000;rows=1000;faults=0;finished=0;fault=0;schema=timestamp: uint64; 2025-06-25T15:00:42.969856Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:479:2483];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:274;stage=finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:00:42.969999Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:479:2483];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:00:42.970119Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:479:2483];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:00:42.970304Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:479:2483];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-25T15:00:42.970451Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:479:2483];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:00:42.970588Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:479:2483];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:00:42.970637Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: actor.cpp:414: Scan [5:479:2483] finished for tablet 9437184 2025-06-25T15:00:42.971239Z node 5 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[5:479:2483];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:420;event=scan_finish;compute_actor_id=[5:478:2482];stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.001},{"events":["l_bootstrap"],"t":0.003},{"events":["f_processing","f_task_result"],"t":0.004},{"events":["l_task_result"],"t":0.014},{"events":["f_ack"],"t":0.015},{"events":["l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.016}],"full":{"a":1750863642953885,"name":"_full_task","f":1750863642953885,"d_finished":0,"c":0,"l":1750863642970701,"d":16816},"events":[{"name":"bootstrap","f":1750863642954124,"d_finished":2978,"c":1,"l":1750863642957102,"d":2978},{"a":1750863642970283,"name":"ack","f":1750863642968890,"d_finished":1256,"c":1,"l":1750863642970146,"d":1674},{"a":1750863642970262,"name":"processing","f":1750863642958286,"d_finished":8269,"c":8,"l":1750863642970151,"d":8708},{"name":"ProduceResults","f":1750863642955778,"d_finished":2775,"c":11,"l":1750863642970619,"d":2775},{"a":1750863642970623,"name":"Finish","f":1750863642970623,"d_finished":0,"c":0,"l":1750863642970701,"d":78},{"name":"task_result","f":1750863642958317,"d_finished":6861,"c":7,"l":1750863642968693,"d":6861}],"id":"9437184::30"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:00:42.971333Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:479:2483];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:370;event=send_data;compute_actor_id=[5:478:2482];bytes=0;rows=0;faults=0;finished=1;fault=0;schema=; 2025-06-25T15:00:42.971860Z node 5 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[5:479:2483];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:375;event=scan_finished;compute_actor_id=[5:478:2482];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.001},{"events":["l_bootstrap"],"t":0.003},{"events":["f_processing","f_task_result"],"t":0.004},{"events":["l_task_result"],"t":0.014},{"events":["f_ack"],"t":0.015},{"events":["l_ProduceResults","f_Finish"],"t":0.016},{"events":["l_ack","l_processing","l_Finish"],"t":0.017}],"full":{"a":1750863642953885,"name":"_full_task","f":1750863642953885,"d_finished":0,"c":0,"l":1750863642971388,"d":17503},"events":[{"name":"bootstrap","f":1750863642954124,"d_finished":2978,"c":1,"l":1750863642957102,"d":2978},{"a":1750863642970283,"name":"ack","f":1750863642968890,"d_finished":1256,"c":1,"l":1750863642970146,"d":2361},{"a":1750863642970262,"name":"processing","f":1750863642958286,"d_finished":8269,"c":8,"l":1750863642970151,"d":9395},{"name":"ProduceResults","f":1750863642955778,"d_finished":2775,"c":11,"l":1750863642970619,"d":2775},{"a":1750863642970623,"name":"Finish","f":1750863642970623,"d_finished":0,"c":0,"l":1750863642971388,"d":765},{"name":"task_result","f":1750863642958317,"d_finished":6861,"c":7,"l":1750863642968693,"d":6861}],"id":"9437184::30"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:00:42.971969Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:479:2483];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-06-25T15:00:42.952182Z;index_granules=0;index_portions=1;index_batches=10;schema_columns=1;filter_columns=0;additional_columns=0;compacted_portions_bytes=0;inserted_portions_bytes=59184;committed_portions_bytes=0;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=59184;selected_rows=0; 2025-06-25T15:00:42.972021Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:479:2483];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=read_context.h:192;event=scan_aborted;reason=unexpected on destructor; 2025-06-25T15:00:42.972364Z node 5 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[5:479:2483];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::NodesAlreadyMigrated [GOOD] Test command err: 2025-06-25T15:00:40.239497Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:00:40.239544Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) ... blocking NKikimr::TEvTablet::TEvCommit from FLAT_EXECUTOR to TABLET_ACTOR cookie 2 ... unblocking NKikimr::TEvTablet::TEvCommit from FLAT_EXECUTOR to TABLET_ACTOR ... blocking NKikimr::TEvTablet::TEvCommit from FLAT_EXECUTOR to TABLET_ACTOR cookie 2 ... unblocking NKikimr::TEvTablet::TEvCommit from FLAT_EXECUTOR to TABLET_ACTOR ... blocking NKikimr::TEvTablet::TEvCommit from FLAT_EXECUTOR to TABLET_ACTOR cookie 2 ... unblocking NKikimr::TEvTablet::TEvCommit from FLAT_EXECUTOR to TABLET_ACTOR ... blocking NKikimr::TEvTablet::TEvCommit from FLAT_EXECUTOR to TABLET_ACTOR cookie 2 ... unblocking NKikimr::TEvTablet::TEvCommit from FLAT_EXECUTOR to TABLET_ACTOR ... blocking NKikimr::TEvTablet::TEvCommit from FLAT_EXECUTOR to TABLET_ACTOR cookie 2 ... unblocking NKikimr::TEvTablet::TEvCommit from FLAT_EXECUTOR to TABLET_ACTOR ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::NodesMigrationExpireRemoved [GOOD] Test command err: 2025-06-25T15:00:40.105543Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:00:40.105589Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::NodesMigrationExpireActive [GOOD] Test command err: 2025-06-25T15:00:41.960602Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:00:41.960646Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest >> TColumnShardTestSchema::EnableColdTiersAfterTtl [GOOD] Test command err: FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; WaitEmptyAfter=0;Tiers=;TTL={Column=timestamp;EvictAfter=150864184.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=150864184.000000s;Name=tier0;Codec=};}{{Column=timestamp;EvictAfter=150864184.000000s;Name=tier1;Codec=};};TTL={Column=timestamp;EvictAfter=150864184.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=130864184.000000s;Name=tier0;Codec=};}{{Column=timestamp;EvictAfter=150864184.000000s;Name=tier1;Codec=};};TTL={Column=timestamp;EvictAfter=150864184.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=130862984.000000s;Name=tier0;Codec=};}{{Column=timestamp;EvictAfter=130864184.000000s;Name=tier1;Codec=};};TTL={Column=timestamp;EvictAfter=130864184.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=130862984.000000s;Name=tier0;Codec=};}{{Column=timestamp;EvictAfter=130862984.000000s;Name=tier1;Codec=};};TTL={Column=timestamp;EvictAfter=130862984.000000s;Name=;Codec=}; 2025-06-25T14:59:44.759158Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:99;event=initialize_shard;step=OnActivateExecutor; 2025-06-25T14:59:44.786529Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:117;event=initialize_shard;step=initialize_tiring_finished; 2025-06-25T14:59:44.786846Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-25T14:59:44.794267Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:59:44.794554Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:59:44.794806Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:59:44.794923Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:59:44.795022Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:59:44.795132Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:59:44.795238Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:59:44.795341Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:59:44.795444Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:59:44.795556Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:59:44.795682Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:59:44.822527Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-25T14:59:44.822667Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-25T14:59:44.822737Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-25T14:59:44.822917Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T14:59:44.823065Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-25T14:59:44.823159Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-25T14:59:44.823201Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-25T14:59:44.823303Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-25T14:59:44.823362Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-25T14:59:44.823403Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-25T14:59:44.823442Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-25T14:59:44.823623Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T14:59:44.823690Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-25T14:59:44.823731Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-25T14:59:44.823759Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-25T14:59:44.823854Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-25T14:59:44.823903Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-25T14:59:44.823941Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-25T14:59:44.823966Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-25T14:59:44.824023Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-25T14:59:44.824059Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-25T14:59:44.824088Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-25T14:59:44.824277Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-25T14:59:44.824342Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-25T14:59:44.824384Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-25T14:59:44.824557Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-25T14:59:44.824601Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-25T14:59:44.824646Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-25T14:59:44.824766Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-25T14:59:44.824818Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-25T14:59:44.824847Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-25T14:59:44.824923Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-25T14:59:44.824989Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cp ... HARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=3;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-25T15:00:43.430414Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:791;background=cleanup;skip_reason=no_changes; 2025-06-25T15:00:43.430460Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:820;background=cleanup;skip_reason=no_changes; 2025-06-25T15:00:43.430567Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:749;background=ttl;skip_reason=no_changes; 2025-06-25T15:00:43.430782Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: EvScan txId: 18446744073709551615 scanId: 0 version: {1750863633078:max} readable: {1750863633078:max} at tablet 9437184 2025-06-25T15:00:43.430917Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TTxScan prepare txId: 18446744073709551615 scanId: 0 at tablet 9437184 2025-06-25T15:00:43.431088Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750863633078:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=program.cpp:33;event=parse_program;program=Command { Projection { Columns { Id: 1 } } } ; 2025-06-25T15:00:43.431148Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750863633078:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=program.cpp:102;parse_proto_program=Command { Projection { Columns { Id: 1 } } } ; 2025-06-25T15:00:43.431663Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750863633078:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=program.cpp:51;event=program_parsed;result={"edges":[{"owner_id":0,"inputs":[{"from":2}]},{"owner_id":1,"inputs":[{"from":3}]},{"owner_id":2,"inputs":[{"from":1}]},{"owner_id":3,"inputs":[]}],"nodes":{"1":{"p":{"i":"0","p":{"data":[{"name":"timestamp","id":1}]},"o":"1","t":"FetchOriginalData"},"w":2,"id":1},"3":{"p":{"p":{"data":[{"name":"timestamp","id":1}]},"o":"0","t":"ReserveMemory"},"w":0,"id":3},"2":{"p":{"i":"1","p":{"address":{"name":"timestamp","id":1}},"o":"1","t":"AssembleOriginalData"},"w":7,"id":2},"0":{"p":{"i":"1","t":"Projection"},"w":7,"id":0}}}; 2025-06-25T15:00:43.431775Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750863633078:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=read_metadata.h:142;filter_limit_not_detected= range{ from {+Inf} to {-Inf}}; 2025-06-25T15:00:43.432242Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750863633078:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=tx_scan.cpp:172;event=TTxScan started;actor_id=[1:1441:3385];trace_detailed=; 2025-06-25T15:00:43.432645Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=4;fline=context.cpp:84;ff_first=(column_ids=1;column_names=timestamp;);; 2025-06-25T15:00:43.432856Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=4;fline=context.cpp:99;columns_context_info=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;; 2025-06-25T15:00:43.433030Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:00:43.433153Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:00:43.433465Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=4;SelfId=[1:1441:3385];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-25T15:00:43.433587Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=4;SelfId=[1:1441:3385];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:00:43.433674Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=4;SelfId=[1:1441:3385];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:00:43.433710Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: actor.cpp:414: Scan [1:1441:3385] finished for tablet 9437184 2025-06-25T15:00:43.434080Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=4;SelfId=[1:1441:3385];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:420;event=scan_finish;compute_actor_id=[1:1440:3384];stats={"p":[{"events":["f_bootstrap","f_ProduceResults"],"t":0},{"events":["l_bootstrap","f_ack","l_ack","f_processing","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.001}],"full":{"a":1750863643432176,"name":"_full_task","f":1750863643432176,"d_finished":0,"c":0,"l":1750863643433770,"d":1594},"events":[{"name":"bootstrap","f":1750863643432384,"d_finished":805,"c":1,"l":1750863643433189,"d":805},{"a":1750863643433438,"name":"ack","f":1750863643433438,"d_finished":0,"c":0,"l":1750863643433770,"d":332},{"a":1750863643433416,"name":"processing","f":1750863643433416,"d_finished":0,"c":0,"l":1750863643433770,"d":354},{"name":"ProduceResults","f":1750863643432956,"d_finished":428,"c":2,"l":1750863643433694,"d":428},{"a":1750863643433697,"name":"Finish","f":1750863643433697,"d_finished":0,"c":0,"l":1750863643433770,"d":73}],"id":"9437184::10"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:00:43.434159Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=4;SelfId=[1:1441:3385];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:1440:3384];bytes=0;rows=0;faults=0;finished=1;fault=0;schema=; 2025-06-25T15:00:43.434565Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=4;SelfId=[1:1441:3385];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:375;event=scan_finished;compute_actor_id=[1:1440:3384];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap","f_ProduceResults"],"t":0},{"events":["l_bootstrap","f_ack","f_processing","l_ProduceResults","f_Finish"],"t":0.001},{"events":["l_ack","l_processing","l_Finish"],"t":0.002}],"full":{"a":1750863643432176,"name":"_full_task","f":1750863643432176,"d_finished":0,"c":0,"l":1750863643434204,"d":2028},"events":[{"name":"bootstrap","f":1750863643432384,"d_finished":805,"c":1,"l":1750863643433189,"d":805},{"a":1750863643433438,"name":"ack","f":1750863643433438,"d_finished":0,"c":0,"l":1750863643434204,"d":766},{"a":1750863643433416,"name":"processing","f":1750863643433416,"d_finished":0,"c":0,"l":1750863643434204,"d":788},{"name":"ProduceResults","f":1750863643432956,"d_finished":428,"c":2,"l":1750863643433694,"d":428},{"a":1750863643433697,"name":"Finish","f":1750863643433697,"d_finished":0,"c":0,"l":1750863643434204,"d":507}],"id":"9437184::10"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); Got TEvKqpCompute::TEvScanData [1:1441:3385]->[1:1440:3384] 2025-06-25T15:00:43.434689Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=4;SelfId=[1:1441:3385];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-06-25T15:00:43.431745Z;index_granules=0;index_portions=0;index_batches=0;schema_columns=1;filter_columns=0;additional_columns=0;compacted_portions_bytes=0;inserted_portions_bytes=0;committed_portions_bytes=0;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=0;selected_rows=0; 2025-06-25T15:00:43.434730Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=4;SelfId=[1:1441:3385];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=read_context.h:192;event=scan_aborted;reason=unexpected on destructor; 2025-06-25T15:00:43.434836Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=4;SelfId=[1:1441:3385];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/tier0' stopped at tablet 9437184 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/tier1' stopped at tablet 9437184 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/tier0' stopped at tablet 9437184 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/tier1' stopped at tablet 9437184 160000/9752224 160000/9752224 160000/9752224 80000/4886744 0/0 |91.6%| [TA] {RESULT} $(B)/ydb/services/ydb/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TNodeBrokerTest::DoNotReuseDynnodeIdsBelowMinDynamicNodeId [GOOD] >> TNodeBrokerTest::ConfigPipelining ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::ShiftIdRangeRemoveExpired [GOOD] Test command err: 2025-06-25T15:00:41.554560Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:00:41.554621Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) 2025-06-25T15:00:43.128048Z node 1 :NODE_BROKER ERROR: node_broker.cpp:1090: [DB] Removing node with wrong ID 1026 not in range (1023, 1025] >> TDynamicNameserverTest::ListNodesCacheWhenNoChanges-EnableNodeBrokerDeltaProtocol-false >> BackupRestore::TestAllPrimitiveTypes-DATETIME64 [GOOD] >> BackupRestore::TestAllPrimitiveTypes-INTERVAL64 >> TDynamicNameserverTest::CacheMissDifferentDeadline-EnableNodeBrokerDeltaProtocol-true [GOOD] >> TDynamicNameserverTest::CacheMissDifferentDeadlineInverseOrder-EnableNodeBrokerDeltaProtocol-false >> TNodeBrokerTest::NodesMigrationReuseExpiredID [GOOD] >> TNodeBrokerTest::ListNodesEpochDeltasPersistance [GOOD] >> TDynamicNameserverTest::ListNodesCacheWhenNoChanges-EnableNodeBrokerDeltaProtocol-false [GOOD] >> TDynamicNameserverTest::CacheMissSimpleDeadline-EnableNodeBrokerDeltaProtocol-true >> TNodeBrokerTest::NodesMigrationSetLocation [GOOD] >> TSlotIndexesPoolTest::Init [GOOD] >> TDynamicNameserverTest::CacheMissDifferentDeadlineInverseOrder-EnableNodeBrokerDeltaProtocol-false [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::NodesMigrationReuseExpiredID [GOOD] Test command err: 2025-06-25T15:00:42.646873Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:00:42.646950Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) >> TNodeBrokerTest::SeveralNodesSubscribersPerPipe >> TNodeBrokerTest::Test999NodesSubscribers >> TNodeBrokerTest::SingleDomainModeBannedIds >> TNodeBrokerTest::LoadStateMoveEpoch >> TNodeBrokerTest::NodesSubscriberDisconnect ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::ListNodesEpochDeltasPersistance [GOOD] Test command err: 2025-06-25T15:00:43.102685Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:00:43.102745Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) >> TNodeBrokerTest::NodesV2BackMigration ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::NodesMigrationSetLocation [GOOD] Test command err: 2025-06-25T15:00:44.164640Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:00:44.164697Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) |91.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TSlotIndexesPoolTest::Init [GOOD] >> TDynamicNameserverTest::CacheMissSimpleDeadline-EnableNodeBrokerDeltaProtocol-true [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-DOUBLE [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-DATE ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TDynamicNameserverTest::CacheMissDifferentDeadlineInverseOrder-EnableNodeBrokerDeltaProtocol-false [GOOD] Test command err: 2025-06-25T15:00:45.236798Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:00:45.236869Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) ... blocking NKikimr::NNodeBroker::TEvNodeBroker::TEvUpdateNodes from NODE_BROKER_ACTOR to NAMESERVICE cookie 0 ... waiting for cache miss ... blocking NKikimr::NNodeBroker::TEvNodeBroker::TEvSyncNodesRequest from NAMESERVICE to NODE_BROKER_ACTOR cookie 1 ... waiting for cache miss (done) ... blocking NKikimr::NNodeBroker::TEvNodeBroker::TEvUpdateNodes from NODE_BROKER_ACTOR to NAMESERVICE cookie 0 2025-06-25T15:00:45.996355Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:00:45.996409Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) ... waiting for cache miss ... blocking NKikimr::NNodeBroker::TEvNodeBroker::TEvResolveNode from NAMESERVICE to NODE_BROKER_ACTOR cookie 0 ... blocking NKikimr::NNodeBroker::TEvNodeBroker::TEvResolveNode from NAMESERVICE to NODE_BROKER_ACTOR cookie 0 ... waiting for cache miss (done) >> TNodeBrokerTest::NodeNameExpiration [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TDynamicNameserverTest::CacheMissSimpleDeadline-EnableNodeBrokerDeltaProtocol-true [GOOD] Test command err: 2025-06-25T15:00:45.709167Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:00:45.709231Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) 2025-06-25T15:00:46.337116Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:00:46.337185Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) ... blocking NKikimr::NNodeBroker::TEvNodeBroker::TEvUpdateNodes from NODE_BROKER_ACTOR to NAMESERVICE cookie 0 ... waiting for cache miss ... blocking NKikimr::NNodeBroker::TEvNodeBroker::TEvSyncNodesRequest from NAMESERVICE to NODE_BROKER_ACTOR cookie 1 ... waiting for cache miss (done) ... blocking NKikimr::NNodeBroker::TEvNodeBroker::TEvUpdateNodes from NODE_BROKER_ACTOR to NAMESERVICE cookie 0 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::NodeNameExpiration [GOOD] Test command err: 2025-06-25T15:00:43.264382Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:00:43.264426Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) 2025-06-25T15:00:43.546533Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 101:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) FAKE_COORDINATOR: Add transaction: 101 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 101 at step: 5000001 FAKE_COORDINATOR: Erasing txId 101 >> TNodeBrokerTest::ConfigPipelining [GOOD] >> TFlatTest::SplitEmptyTwice [GOOD] >> TNodeBrokerTest::NodesMigrationNodeName [GOOD] >> TNodeBrokerTest::SingleDomainModeBannedIds [GOOD] >> TNodeBrokerTest::SubscribeToNodes [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::ConfigPipelining [GOOD] Test command err: 2025-06-25T15:00:40.722079Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:00:40.722141Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) 2025-06-25T15:00:45.617317Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:00:45.617370Z node 9 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) 2025-06-25T15:00:45.881775Z node 9 :NODE_BROKER ERROR: node_broker__register_node.cpp:41: Cannot register node host1:1001: ERROR_TEMP: No free node IDs ... waiting for commit ... blocking NKikimr::TEvTablet::TEvCommit from FLAT_EXECUTOR to TABLET_ACTOR cookie 1 ... waiting for commit (done) ... unblocking NKikimr::TEvTablet::TEvCommit from FLAT_EXECUTOR to TABLET_ACTOR ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::NodesMigrationNodeName [GOOD] Test command err: 2025-06-25T15:00:45.516412Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:00:45.516473Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) >> BackupPathTest::ExportUnexistingExplicitPath [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-JSON [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-JSON_DOCUMENT ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::SingleDomainModeBannedIds [GOOD] Test command err: 2025-06-25T15:00:46.761432Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:00:46.761495Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) 2025-06-25T15:00:46.897558Z node 1 :NODE_BROKER ERROR: node_broker__register_node.cpp:41: Cannot register node host3:1001: ERROR_TEMP: No free node IDs 2025-06-25T15:00:46.923783Z node 1 :NODE_BROKER ERROR: node_broker__register_node.cpp:41: Cannot register node host4:1001: ERROR_TEMP: No free node IDs 2025-06-25T15:00:46.937911Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1024: WRONG_REQUEST: Node ID is banned 2025-06-25T15:00:47.722762Z node 1 :NODE_BROKER ERROR: node_broker__register_node.cpp:41: Cannot register node host4:1001: ERROR_TEMP: No free node IDs 2025-06-25T15:00:47.738767Z node 1 :NODE_BROKER ERROR: node_broker__register_node.cpp:41: Cannot register node host4:1001: ERROR_TEMP: No free node IDs ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::SubscribeToNodes [GOOD] Test command err: 2025-06-25T15:00:44.355589Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:00:44.355650Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) 2025-06-25T15:00:46.369183Z node 1 :NODE_BROKER ERROR: node_broker.cpp:1090: [DB] Removing node with wrong ID 1025 not in range (1023, 1024] >> TNodeBrokerTest::UpdateEpochPipelining [GOOD] >> TNodeBrokerTest::NodesV2BackMigrationManyNodesInterrupted [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::UpdateEpochPipelining [GOOD] Test command err: 2025-06-25T15:00:45.426734Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:00:45.426789Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) 2025-06-25T15:00:46.818488Z node 1 :NODE_BROKER ERROR: node_broker__register_node.cpp:41: Cannot register node host2:1001: ERROR_TEMP: No free node IDs ... blocking NKikimr::TEvTablet::TEvCommit from FLAT_EXECUTOR to TABLET_ACTOR cookie 1 ... unblocking NKikimr::TEvTablet::TEvCommit from FLAT_EXECUTOR to TABLET_ACTOR >> TColumnShardTestSchema::RebootHotTiersAfterTtl [GOOD] >> TNodeBrokerTest::NodesMigration1001Nodes [GOOD] >> TNodeBrokerTest::SeveralNodesSubscribersPerPipe [GOOD] >> TNodeBrokerTest::NodesSubscriberDisconnect [GOOD] >> BackupPathTest::ExportUnexistingCommonSourcePath ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::NodesV2BackMigrationManyNodesInterrupted [GOOD] Test command err: 2025-06-25T15:00:42.266772Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:00:42.266828Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) ... waiting for first batch is committed ... blocking NKikimr::TEvTablet::TEvCommitResult from TABLET_ACTOR to FLAT_EXECUTOR cookie 2 ... blocking NKikimr::TEvTablet::TEvCommitResult from TABLET_ACTOR to FLAT_EXECUTOR cookie 1 ... waiting for first batch is committed (done) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::SeveralNodesSubscribersPerPipe [GOOD] Test command err: 2025-06-25T15:00:46.953100Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:00:46.953152Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) ... waiting for updates are sent ... blocking NKikimr::NNodeBroker::TEvNodeBroker::TEvUpdateNodes from NODE_BROKER_ACTOR to TEST_ACTOR_RUNTIME cookie 0 ... blocking NKikimr::NNodeBroker::TEvNodeBroker::TEvUpdateNodes from NODE_BROKER_ACTOR to TEST_ACTOR_RUNTIME cookie 0 ... waiting for updates are sent (done) ... unblocking NKikimr::NNodeBroker::TEvNodeBroker::TEvUpdateNodes from NODE_BROKER_ACTOR to TEST_ACTOR_RUNTIME ... unblocking NKikimr::NNodeBroker::TEvNodeBroker::TEvUpdateNodes from NODE_BROKER_ACTOR to TEST_ACTOR_RUNTIME ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::NodesMigration1001Nodes [GOOD] Test command err: 2025-06-25T15:00:45.492249Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:00:45.492322Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) >> TNodeBrokerTest::LoadStateMoveEpoch [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::NodesSubscriberDisconnect [GOOD] Test command err: 2025-06-25T15:00:47.082580Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:00:47.082627Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest >> TColumnShardTestSchema::RebootHotTiersAfterTtl [GOOD] Test command err: FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; WaitEmptyAfter=0;Tiers=;TTL={Column=timestamp;EvictAfter=150864184.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=150864184.000000s;Name=tier0;Codec=};}{{Column=timestamp;EvictAfter=150864184.000000s;Name=tier1;Codec=zstd};};TTL={Column=timestamp;EvictAfter=150864184.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=130864184.000000s;Name=tier0;Codec=};}{{Column=timestamp;EvictAfter=150864184.000000s;Name=tier1;Codec=zstd};};TTL={Column=timestamp;EvictAfter=150864184.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=130862984.000000s;Name=tier0;Codec=};}{{Column=timestamp;EvictAfter=130864184.000000s;Name=tier1;Codec=zstd};};TTL={Column=timestamp;EvictAfter=130864184.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=130862984.000000s;Name=tier0;Codec=};}{{Column=timestamp;EvictAfter=130862984.000000s;Name=tier1;Codec=zstd};};TTL={Column=timestamp;EvictAfter=130862984.000000s;Name=;Codec=}; 2025-06-25T14:59:45.171596Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:99;event=initialize_shard;step=OnActivateExecutor; 2025-06-25T14:59:45.190142Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:117;event=initialize_shard;step=initialize_tiring_finished; 2025-06-25T14:59:45.190369Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-25T14:59:45.196058Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:59:45.196236Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:59:45.196423Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:59:45.196508Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:59:45.196589Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:59:45.196674Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:59:45.196736Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:59:45.196843Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:59:45.196932Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:59:45.197008Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:59:45.197124Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:59:45.216559Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-25T14:59:45.216669Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-25T14:59:45.216708Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-25T14:59:45.216851Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T14:59:45.216956Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-25T14:59:45.217011Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-25T14:59:45.217038Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-25T14:59:45.217097Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-25T14:59:45.217134Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-25T14:59:45.217159Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-25T14:59:45.217185Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-25T14:59:45.217335Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T14:59:45.217404Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-25T14:59:45.217440Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-25T14:59:45.217470Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-25T14:59:45.217555Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-25T14:59:45.217610Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-25T14:59:45.217636Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-25T14:59:45.217653Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-25T14:59:45.217688Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-25T14:59:45.217709Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-25T14:59:45.217726Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-25T14:59:45.217890Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-25T14:59:45.217928Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-25T14:59:45.217959Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-25T14:59:45.218133Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-25T14:59:45.218182Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-25T14:59:45.218215Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-25T14:59:45.218333Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-25T14:59:45.218371Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-25T14:59:45.218396Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-25T14:59:45.218470Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-25T14:59:45.218529Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;f ... nit;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;EXECUTE:portionsLoadingTime=6921; 2025-06-25T15:00:48.673602Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;PRECHARGE:granule_finished_commonLoadingTime=9; 2025-06-25T15:00:48.673782Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;EXECUTE:granule_finished_commonLoadingTime=146; 2025-06-25T15:00:48.673813Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;fline=common_data.cpp:29;EXECUTE:granuleLoadingTime=7330; 2025-06-25T15:00:48.673876Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:granulesLoadingTime=7458; 2025-06-25T15:00:48.673946Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;PRECHARGE:finishLoadingTime=11; 2025-06-25T15:00:48.674113Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:finishLoadingTime=127; 2025-06-25T15:00:48.674143Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:column_enginesLoadingTime=8081; 2025-06-25T15:00:48.674258Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tx_controllerLoadingTime=72; 2025-06-25T15:00:48.674341Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tx_controllerLoadingTime=47; 2025-06-25T15:00:48.674458Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:operations_managerLoadingTime=79; 2025-06-25T15:00:48.674551Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:operations_managerLoadingTime=59; 2025-06-25T15:00:48.677424Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:storages_managerLoadingTime=2820; 2025-06-25T15:00:48.679980Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:storages_managerLoadingTime=2483; 2025-06-25T15:00:48.680053Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:db_locksLoadingTime=11; 2025-06-25T15:00:48.680092Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:db_locksLoadingTime=10; 2025-06-25T15:00:48.680120Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:bg_sessionsLoadingTime=4; 2025-06-25T15:00:48.680186Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:bg_sessionsLoadingTime=41; 2025-06-25T15:00:48.680220Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:sharing_sessionsLoadingTime=4; 2025-06-25T15:00:48.680300Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:sharing_sessionsLoadingTime=50; 2025-06-25T15:00:48.680367Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:in_flight_readsLoadingTime=6; 2025-06-25T15:00:48.680424Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:in_flight_readsLoadingTime=30; 2025-06-25T15:00:48.680498Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tiers_managerLoadingTime=41; 2025-06-25T15:00:48.680811Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tiers_managerLoadingTime=285; 2025-06-25T15:00:48.680853Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;fline=common_data.cpp:29;EXECUTE:composite_initLoadingTime=21588; 2025-06-25T15:00:48.680995Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Index: tables 1 inserted {blob_bytes=0;raw_bytes=0;count=0;records=0} compacted {blob_bytes=0;raw_bytes=0;count=0;records=0} s-compacted {blob_bytes=0;raw_bytes=0;count=0;records=0} inactive {blob_bytes=29251936;raw_bytes=43173354;count=6;records=480000} evicted {blob_bytes=0;raw_bytes=0;count=0;records=0} at tablet 9437184 2025-06-25T15:00:48.681100Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2015:3824];process=SwitchToWork;fline=columnshard.cpp:74;event=initialize_shard;step=SwitchToWork; 2025-06-25T15:00:48.681141Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2015:3824];process=SwitchToWork;fline=columnshard.cpp:77;event=initialize_shard;step=SignalTabletActive; 2025-06-25T15:00:48.681197Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2015:3824];process=SwitchToWork;fline=columnshard_impl.cpp:1331;event=OnTieringModified;path_id=NO_VALUE_OPTIONAL; 2025-06-25T15:00:48.696499Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2015:3824];process=SwitchToWork;fline=column_engine_logs.cpp:471;event=OnTieringModified;new_count_tierings=1; 2025-06-25T15:00:48.696686Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;fline=columnshard_impl.cpp:442;event=EnqueueBackgroundActivities;periodic=0; 2025-06-25T15:00:48.696777Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=3; 2025-06-25T15:00:48.696847Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750863335006;tx_id=18446744073709551615;;current_snapshot_ts=1750863586484; 2025-06-25T15:00:48.696890Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=3;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-25T15:00:48.696929Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;fline=columnshard_impl.cpp:791;background=cleanup;skip_reason=no_changes; 2025-06-25T15:00:48.696959Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;fline=columnshard_impl.cpp:820;background=cleanup;skip_reason=no_changes; 2025-06-25T15:00:48.697026Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;fline=columnshard_impl.cpp:749;background=ttl;skip_reason=no_changes; 2025-06-25T15:00:48.698362Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2015:3824];ev=NActors::TEvents::TEvWakeup;fline=columnshard.cpp:250;event=TEvPrivate::TEvPeriodicWakeup::MANUAL;tablet_id=9437184; 2025-06-25T15:00:48.698450Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2015:3824];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;fline=columnshard.cpp:239;event=TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184; 2025-06-25T15:00:48.698471Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Send periodic stats. 2025-06-25T15:00:48.698490Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Disabled periodic stats at tablet 9437184 2025-06-25T15:00:48.698522Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2015:3824];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:442;event=EnqueueBackgroundActivities;periodic=0; 2025-06-25T15:00:48.698580Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2015:3824];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=3; 2025-06-25T15:00:48.698637Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2015:3824];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750863335006;tx_id=18446744073709551615;;current_snapshot_ts=1750863586484; 2025-06-25T15:00:48.698671Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2015:3824];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=3;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-25T15:00:48.698709Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2015:3824];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:791;background=cleanup;skip_reason=no_changes; 2025-06-25T15:00:48.698739Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2015:3824];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:820;background=cleanup;skip_reason=no_changes; 2025-06-25T15:00:48.698808Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2015:3824];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:749;background=ttl;skip_reason=no_changes; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/tier0' stopped at tablet 9437184 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/tier1' stopped at tablet 9437184 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/tier0' stopped at tablet 9437184 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/tier1' stopped at tablet 9437184 160000/9752224 160000/9752224 160000/9752224 80000/4886744 0/0 >> BasicUsage::TWriteSession_WriteEncoded [GOOD] >> CompressExecutor::TestReorderedExecutor ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::LoadStateMoveEpoch [GOOD] Test command err: 2025-06-25T15:00:46.949470Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:00:46.949550Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) >> TNodeBrokerTest::NodesV2BackMigration [GOOD] >> KqpLimits::TooBigQuery+useSink >> KqpQuery::QueryCacheTtl >> KqpExplain::Explain >> KqpQuery::RewriteIfPresentToMap >> KqpParams::ExplicitSameParameterTypesQueryCacheCheck >> KqpQuery::ExecuteDataQueryCollectMeta >> KqpParams::RowsList >> KqpAnalyze::AnalyzeTable+ColumnStore >> KqpParams::MissingOptionalParameter+UseSink >> KqpLimits::TooBigQuery-useSink >> KqpQuery::RandomNumber >> KqpStats::MultiTxStatsFullExpYql ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::NodesV2BackMigration [GOOD] Test command err: 2025-06-25T15:00:47.263617Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:00:47.263667Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) ... blocking NKikimr::TEvTablet::TEvCommit from FLAT_EXECUTOR to TABLET_ACTOR cookie 2 ... unblocking NKikimr::TEvTablet::TEvCommit from FLAT_EXECUTOR to TABLET_ACTOR >> KqpParams::CheckQueryCacheForPreparedQuery >> KqpQuery::CurrentUtcTimestamp >> KqpQuery::Now >> KqpStats::RequestUnitForBadRequestExecute >> KqpQuery::CreateAsSelectBadTypes+IsOlap >> BackupRestore::TestAllPrimitiveTypes-INTERVAL64 [GOOD] >> BackupRestore::TestAllPrimitiveTypes-STRING ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/ut/unittest >> TFlatTest::SplitEmptyTwice [GOOD] Test command err: 2025-06-25T14:58:23.458901Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901487928419917:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:58:23.467311Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00206d/r3tmp/tmpglY5qk/pdisk_1.dat 2025-06-25T14:58:23.772635Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519901487928419884:2080] 1750863503455002 != 1750863503455005 2025-06-25T14:58:23.778149Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:23.874378Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:23.874511Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:23.875983Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:21158 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T14:58:24.091330Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:58:24.120875Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-25T14:58:24.151529Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:58:24.340936Z node 1 :OPS_COMPACT INFO: Compact{72075186224037888.1.11, eph 1} end=Done, 4 blobs 3r (max 3), put Spent{time=0.002s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 2 +0, (1265 647 2154)b }, ecr=1.000 2025-06-25T14:58:24.350676Z node 1 :OPS_COMPACT INFO: Compact{72075186224037889.1.11, eph 1} end=Done, 4 blobs 3r (max 3), put Spent{time=0.006s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 2 +0, (1139 521 2626)b }, ecr=1.000 2025-06-25T14:58:24.379488Z node 1 :OPS_COMPACT INFO: Compact{72075186224037888.1.16, eph 2} end=Done, 4 blobs 6r (max 6), put Spent{time=0.002s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 5 +0, (1573 647 6413)b }, ecr=1.000 2025-06-25T14:58:24.391886Z node 1 :OPS_COMPACT INFO: Compact{72075186224037889.1.16, eph 2} end=Done, 4 blobs 6r (max 6), put Spent{time=0.003s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 4 +0, (2326 1432 5183)b }, ecr=1.000 TClient::Ls request: /dc-1/Dir/TableOld TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "TableOld" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1750863504271 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "TableOld" Columns { Name: "Key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "... (TRUNCATED) TClient::Ls request: /dc-1/Dir/TableOld TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "TableOld" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1750863504271 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "TableOld" Columns { Name: "Key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "... (TRUNCATED) 2025-06-25T14:58:24.469930Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... 2025-06-25T14:58:26.516015Z node 1 :OPS_COMPACT INFO: Compact{72057594037968897.2.87, eph 1} end=Done, 2 blobs 226r (max 226), put Spent{time=0.012s,wait=0.004s,interrupts=1} Part{ 1 pk, lobs 0 +0, (65560 0 0)b }, ecr=1.000 2025-06-25T14:58:26.558286Z node 1 :OPS_COMPACT INFO: Compact{72057594037968897.2.98, eph 1} end=Done, 2 blobs 756r (max 756), put Spent{time=0.015s,wait=0.004s,interrupts=1} Part{ 1 pk, lobs 0 +0, (49618 0 0)b }, ecr=1.000 2025-06-25T14:58:26.725363Z node 1 :OPS_COMPACT INFO: Compact{72057594046644480.2.525, eph 1} end=Done, 2 blobs 3r (max 3), put Spent{time=0.012s,wait=0.011s,interrupts=1} Part{ 1 pk, lobs 0 +0, (187 0 0)b }, ecr=1.000 2025-06-25T14:58:26.726918Z node 1 :OPS_COMPACT INFO: Compact{72057594037968897.2.175, eph 2} end=Done, 2 blobs 460r (max 469), put Spent{time=0.061s,wait=0.010s,interrupts=1} Part{ 1 pk, lobs 0 +0, (133168 0 0)b }, ecr=1.000 2025-06-25T14:58:26.779202Z node 1 :OPS_COMPACT INFO: Compact{72057594037968897.2.199, eph 1} end=Done, 2 blobs 2r (max 2), put Spent{time=0.006s,wait=0.005s,interrupts=1} Part{ 1 pk, lobs 0 +0, (181 0 0)b }, ecr=1.000 2025-06-25T14:58:26.787944Z node 1 :OPS_COMPACT INFO: Compact{72057594037968897.2.198, eph 1} end=Done, 2 blobs 2r (max 2), put Spent{time=0.015s,wait=0.008s,interrupts=1} Part{ 1 pk, lobs 0 +0, (252 0 0)b }, ecr=1.000 2025-06-25T14:58:26.847970Z node 1 :OPS_COMPACT INFO: Compact{72057594037968897.2.200, eph 1} end=Done, 2 blobs 510r (max 510), put Spent{time=0.074s,wait=0.060s,interrupts=1} Part{ 1 pk, lobs 0 +0, (32533 0 0)b }, ecr=1.000 2025-06-25T14:58:26.859853Z node 1 :OPS_COMPACT INFO: Compact{72057594037968897.2.202, eph 2} end=Done, 2 blobs 1530r (max 1533), put Spent{time=0.087s,wait=0.019s,interrupts=1} Part{ 1 pk, lobs 0 +0, (100360 0 0)b }, ecr=1.000 2025-06-25T14:58:26.861740Z node 1 :OPS_COMPACT INFO: Compact{72057594037968897.2.201, eph 1} end=Done, 2 blobs 1527r (max 1527), put Spent{time=0.021s,wait=0.013s,interrupts=1} Part{ 1 pk, lobs 0 +0, (104906 0 0)b }, ecr=1.000 2025-06-25T14:58:26.863988Z node 1 :OPS_COMPACT INFO: Compact{72057594046644480.2.553, eph 1} end=Done, 2 blobs 10001r (max 10001), put Spent{time=0.075s,wait=0.004s,interrupts=1} Part{ 1 pk, lobs 0 +0, (553660 0 0)b }, ecr=1.000 2025-06-25T14:58:26.956413Z node 1 :OPS_COMPACT INFO: Compact{72057594037968897.2.285, eph 3} end=Done, 2 blobs 713r (max 714), put Spent{time=0.028s,wait=0.006s,interrupts=1} Part{ 1 pk, lobs 0 +0, (206229 0 0)b }, ecr=1.000 2025-06-25T14:58:27.017432Z node 1 :OPS_COMPACT INFO: Compact{72057594037968897.2.300, eph 3} end=Done, 2 blobs 2283r (max 2286), put Spent{time=0.040s,wait=0.004s,interrupts=1} Part{ 1 pk, lobs 0 +0, (149629 0 0)b }, ecr=1.000 2025-06-25T14:58:27.145452Z node 1 :OPS_COMPACT INFO: Compact{72057594046644480.2.1042, eph 2} end=Done, 2 blobs 3r (max 5), put Spent{time=0.017s,wait=0.008s,interrupts=1} Part{ 1 pk, lobs 0 +0, (187 0 0)b }, ecr=1.000 2025-06-25T14:58:27.184220Z node 1 :OPS_COMPACT INFO: Compact{72057594037968897.2.366, eph 4} end=Done, 2 blobs 967r (max 968), put Spent{time=0.079s,wait=0.029s,interrupts=1} Part{ 1 pk, lobs 0 +0, (279622 0 0)b }, ecr=1.000 2025-06-25T14:58:27.219575Z node 1 :OPS_COMPACT INFO: Compact{72057594037968897.2.378, eph 2} end=Done, 2 blobs 3030r (max 3030), put Spent{time=0.087s,wait=0.044s,interrupts=1} Part{ 1 pk, lobs 0 +0, (207992 0 0)b }, ecr=1.000 2025-06-25T14:58:27.228300Z node 1 :OPS_COMPACT INFO: Compact{72057594037968897.2.379, eph 2} end=Done, 2 blobs 2r (max 3), put Spent{time=0.095s,wait=0.087s,interrupts=1} Part{ 1 pk, lobs 0 +0, (252 0 0)b }, ecr=1.000 2025-06-25T14:58:27.236797Z node 1 :OPS_COMPACT INFO: Compact{72057594037968897.2.380, eph 2} end=Done, 2 blobs 2r (max 3), put Spent{time=0.103s,wait=0.091s,interrupts=1} Part{ 1 pk, lobs 0 +0, (181 0 0)b }, ecr=1.000 2025-06-25T14:58:27.240740Z node 1 :OPS_COMPACT INFO: Compact{72057594037968897.2.383, eph 4} end=Done, 2 blobs 3036r (max 3039), put Spent{time=0.103s,wait=0.012s,interrupts=1} Part{ 1 pk, lobs 0 +0, (198952 0 0)b }, ecr=1.000 2025-06-25T14:58:27.242066Z node 1 :OPS_COMPACT INFO: Compact{72057594046644480.2.1060, eph 2} end=Done, 2 blobs 10001r (max 10502), put Spent{time=0.105s,wait=0.005s,interrupts=1} Part{ 1 pk, lobs 0 +0, (553660 0 0)b }, ecr=1.000 2025-06-25T14:58:27.259519Z node 1 :OPS_COMPACT INFO: Compact{72057594037968897.2.381, eph 2} end=Done, 2 blobs 1011r (max 1011), put Spent{time=0.126s,wait=0.000s,interrupts=1} Part{ 1 pk, lobs 0 +0, (64296 0 0)b }, ecr=1.000 2025-06-25T14:58:27.288797Z node 1 :OPS_COMPACT INFO: Compact{72057594037968897.2.462, eph 5} end=Done, 2 blobs 1218r (max 1219), put Spent{time=0.020s,wait=0.000s,interrupts=1} Part{ 1 pk, lobs 0 +0, (352109 0 0)b }, ecr=1.000 2025-06-25T14:58:27.310192Z node 1 :OPS_COMPACT INFO: Compact{72057594037968897.2.481, eph 5} end=Done, 2 blobs 3789r (max 3792), put Spent{time=0.021s,wait=0.000s,interrupts=1} Part{ 1 pk, lobs 0 +0, (248275 0 0)b }, ecr=1.000 2025-06-25T14:58:27.423770Z node 1 :OPS_COMPACT INFO: Compact{72057594046644480.2.1560, eph 3} end=Done, 2 blobs 3r (max 5), put Spent{time=0.007s,wait=0.002s,interrupts=1} Part{ 1 pk, lobs 0 +0, (187 0 0)b }, ecr=1.000 2025-06-25T14:58:27.437746Z node 1 :OPS_COMPACT INFO: Compact{720575 ... 0 2025-06-25T15:00:44.015127Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5633: Handle TEvStateChanged, at schemeshard: 72057594046644480, message: Source { RawX1: 7519902089722931852 RawX2: 4503608217307405 } TabletId: 72075186224037890 State: 4 2025-06-25T15:00:44.015166Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186224037890, state: Offline, at schemeshard: 72057594046644480 2025-06-25T15:00:44.015603Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:1 hive 72057594037968897 at ss 72057594046644480 2025-06-25T15:00:44.015716Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:3 hive 72057594037968897 at ss 72057594046644480 2025-06-25T15:00:44.017210Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 1 ShardOwnerId: 72057594046644480 ShardLocalIdx: 1, at schemeshard: 72057594046644480 2025-06-25T15:00:44.017549Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 7 2025-06-25T15:00:44.017794Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 3 ShardOwnerId: 72057594046644480 ShardLocalIdx: 3, at schemeshard: 72057594046644480 2025-06-25T15:00:44.017970Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 6 2025-06-25T15:00:44.020535Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:1 2025-06-25T15:00:44.020565Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:1 tabletId 72075186224037888 2025-06-25T15:00:44.020620Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:3 2025-06-25T15:00:44.020638Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:3 tabletId 72075186224037890 2025-06-25T15:00:44.025844Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037890 not found 2025-06-25T15:00:44.026108Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037888 not found 2025-06-25T15:00:44.029607Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5633: Handle TEvStateChanged, at schemeshard: 72057594046644480, message: Source { RawX1: 7519902089722931547 RawX2: 4503608217307347 } TabletId: 72075186224037889 State: 4 2025-06-25T15:00:44.029680Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186224037889, state: Offline, at schemeshard: 72057594046644480 2025-06-25T15:00:44.029906Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5633: Handle TEvStateChanged, at schemeshard: 72057594046644480, message: Source { RawX1: 7519902089722931853 RawX2: 4503608217307406 } TabletId: 72075186224037891 State: 4 2025-06-25T15:00:44.029946Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186224037891, state: Offline, at schemeshard: 72057594046644480 2025-06-25T15:00:44.030058Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5633: Handle TEvStateChanged, at schemeshard: 72057594046644480, message: Source { RawX1: 7519902089722932027 RawX2: 4503608217307424 } TabletId: 72075186224037893 State: 4 2025-06-25T15:00:44.030093Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186224037893, state: Offline, at schemeshard: 72057594046644480 2025-06-25T15:00:44.030209Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5633: Handle TEvStateChanged, at schemeshard: 72057594046644480, message: Source { RawX1: 7519902089722932033 RawX2: 4503608217307425 } TabletId: 72075186224037895 State: 4 2025-06-25T15:00:44.030243Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186224037895, state: Offline, at schemeshard: 72057594046644480 2025-06-25T15:00:44.030758Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:2 hive 72057594037968897 at ss 72057594046644480 2025-06-25T15:00:44.030993Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5633: Handle TEvStateChanged, at schemeshard: 72057594046644480, message: Source { RawX1: 7519902089722932036 RawX2: 4503608217307426 } TabletId: 72075186224037894 State: 4 2025-06-25T15:00:44.031037Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186224037894, state: Offline, at schemeshard: 72057594046644480 2025-06-25T15:00:44.031356Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:4 hive 72057594037968897 at ss 72057594046644480 2025-06-25T15:00:44.031427Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:6 hive 72057594037968897 at ss 72057594046644480 2025-06-25T15:00:44.031498Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:8 hive 72057594037968897 at ss 72057594046644480 2025-06-25T15:00:44.036897Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:7 hive 72057594037968897 at ss 72057594046644480 2025-06-25T15:00:44.037793Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046644480 ShardLocalIdx: 2, at schemeshard: 72057594046644480 2025-06-25T15:00:44.038113Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 5 2025-06-25T15:00:44.038359Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 4 ShardOwnerId: 72057594046644480 ShardLocalIdx: 4, at schemeshard: 72057594046644480 2025-06-25T15:00:44.038573Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 4 2025-06-25T15:00:44.038772Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 6 ShardOwnerId: 72057594046644480 ShardLocalIdx: 6, at schemeshard: 72057594046644480 2025-06-25T15:00:44.039034Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 3 2025-06-25T15:00:44.040098Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 8 ShardOwnerId: 72057594046644480 ShardLocalIdx: 8, at schemeshard: 72057594046644480 2025-06-25T15:00:44.040464Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 2 2025-06-25T15:00:44.040632Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 7 ShardOwnerId: 72057594046644480 ShardLocalIdx: 7, at schemeshard: 72057594046644480 2025-06-25T15:00:44.040830Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 1 2025-06-25T15:00:44.041027Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046644480 2025-06-25T15:00:44.041061Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046644480, LocalPathId: 3], at schemeshard: 72057594046644480 2025-06-25T15:00:44.041124Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 1 2025-06-25T15:00:44.042918Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037893 not found 2025-06-25T15:00:44.042947Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037889 not found 2025-06-25T15:00:44.042965Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037891 not found 2025-06-25T15:00:44.044172Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:2 2025-06-25T15:00:44.044192Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:2 tabletId 72075186224037889 2025-06-25T15:00:44.044231Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:4 2025-06-25T15:00:44.044239Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:4 tabletId 72075186224037891 2025-06-25T15:00:44.044254Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:6 2025-06-25T15:00:44.044260Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:6 tabletId 72075186224037893 2025-06-25T15:00:44.044750Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:8 2025-06-25T15:00:44.044771Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:8 tabletId 72075186224037895 2025-06-25T15:00:44.044802Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:7 2025-06-25T15:00:44.044815Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:7 tabletId 72075186224037894 2025-06-25T15:00:44.044859Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046644480 2025-06-25T15:00:44.045169Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037894 not found TClient::Ls response: Status: 128 StatusCode: PATH_NOT_EXIST Issues { message: "Path not exist" issue_code: 200200 severity: 1 } SchemeStatus: 2 ErrorReason: "Path not found" 2025-06-25T15:00:44.051220Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037895 not found >> KqpLimits::OutOfSpaceBulkUpsertFail >> BackupRestore::TestReplaceRestoreOption [GOOD] >> BackupRestore::TestReplaceRestoreOptionOnNonExistingSchemeObjects |91.7%| [TA] $(B)/ydb/core/client/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> Secret::Validation [GOOD] |91.7%| [TA] {RESULT} $(B)/ydb/core/client/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpTypes::UnsafeTimestampCastV0 >> Secret::ValidationQueryService [GOOD] >> KqpExplain::UpdateConditional+UseSink >> BackupPathTest::ExportUnexistingCommonSourcePath [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest >> Secret::Validation [GOOD] Test command err: 2025-06-25T14:58:00.858388Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:58:00.858550Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:58:00.858605Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000e79/r3tmp/tmpv7T9PL/pdisk_1.dat 2025-06-25T14:58:01.278626Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 12528, node 1 TClient is connected to server localhost:7863 2025-06-25T14:58:01.837211Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:58:01.884266Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:01.901727Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:58:01.901810Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:58:01.901867Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:58:01.902559Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:58:01.902962Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750863477997454 != 1750863477997458 2025-06-25T14:58:01.954809Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:01.954952Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:01.969513Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:58:02.186181Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Initialization finished REQUEST=CREATE OBJECT secret-1 (TYPE SECRET) WITH value = `100`;EXPECTATION=0;WAITING=1 2025-06-25T14:58:13.921476Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:709:2587], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:58:13.921605Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } REQUEST=CREATE OBJECT secret-1 (TYPE SECRET) WITH value = `100`;RESULT=
:1:20: Error: mismatched input '-' expecting '(' ;EXPECTATION=0 FINISHED_REQUEST=CREATE OBJECT secret-1 (TYPE SECRET) WITH value = `100`;EXPECTATION=0;WAITING=1 REQUEST=ALTER OBJECT secret1 (TYPE SECRET) SET value = `abcde`;EXPECTATION=0;WAITING=1 2025-06-25T14:58:24.245900Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:734:2601], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:58:24.246058Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:58:24.251629Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:58:24.399288Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:844:2678], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:58:24.399387Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:58:24.399630Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:849:2683], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:58:24.404070Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:58:24.543622Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:851:2685], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T14:58:24.793777Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:944:2749] txid# 281474976715661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:58:25.378456Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:58:25.803655Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:58:26.529206Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:58:27.290519Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:58:27.714682Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:58:28.811545Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:58:29.144155Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710759:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:383) REQUEST=ALTER OBJECT secret1 (TYPE SECRET) SET value = `abcde`;RESULT=
: Error: Execution, code: 1060
:1:48: Error: Executing ALTER OBJECT SECRET
: Error: preparation problem: secret secret1 not found for alter ;EXPECTATION=0 2025-06-25T14:58:31.640083Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7382: Cannot get console configs 2025-06-25T14:58:31.640168Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded FINISHED_REQUEST=ALTER OBJECT secret1 (TYPE SECRET) SET value = `abcde`;EXPECTATION=0;WAITING=1 REQUEST=CREATE OBJECT secret1 (TYPE SECRET) WITH value = `100`;EXPECTATION=1;WAITING=1 REQUEST=CREATE OBJECT secret1 (TYPE SECRET) WITH value = `100`;RESULT=;EXPECTATION=1 FINISHED_REQUEST=CREATE OBJECT secret1 (TYPE SECRET) WITH value = `100`;EXPECTATION=1;WAITING=1 REQUEST=ALTER OBJECT secret1 (TYPE SECRET) SET value = `abcde`;EXPECTATION=1;WAITING=1 REQUEST=ALTER OBJECT secret1 (TYPE SECRET) SET value = `abcde`;RESULT=;EXPECTATION=1 FINISHED_REQUEST=ALTER OBJECT secret1 (TYPE SECRET) SET value = `abcde`;EXPECTATION=1;WAITING=1 REQUEST=CREATE OBJECT `secret1:test@test1` (TYPE SECRET_ACCESS);EXPECTATION=1;WAITING=1 2025-06-25T14:59:07.084098Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715711:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:59:07.998227Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715716:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:59:09.633551Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715725:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:59:10.134983Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715728:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) REQUEST=CREATE OBJECT `secret1:test@test1` (TYPE SECRET_ACCESS);RESULT=;EXPECTATION=1 FINISHED_REQUEST=CREATE OBJECT `secret1:test@test1` (TYPE SECRET_ACCESS);EXPECTATION=1;WAITING=1 REQUEST=CREATE OBJECT `secret2:test@test1` (TYPE SECRET_ACCESS);EXPECTATION=0;WAITING=1 REQUEST=CREATE OBJECT `secret2:test@test1` (TYPE SECRET_ACCESS);RESULT=
: Error: Execution, code: 1060
:1:42: Error: Executing CREATE OBJECT SECRET_ACCESS
: Error: preparation problem: used in access secret secret2 not found ;EXPECTATION=0 FINISHED_REQUEST=CREATE OBJECT `secret2:test@test1` (TYPE SECRET_ACCESS);EXPECTATION=0;WAITING=1 REQUEST=CREATE OBJECT IF NOT EXISTS `secret1:test@test1` (TYPE SECRET_ACCESS);EXPECTATION=1;WAITING=1 2025-06-25T14:59:35.521223Z node 1 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_CONSTRAINT_VIOLATION;details=Conflict with existing key.;tx_id=17; 2025-06-25T14:59:35.521413Z node 1 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:226: Prepare transaction failed. txid 17 at tablet 72075186224037892 errors: Status: STATUS_CONSTRAINT_VIOLATION Issues: { message: "Conflict with existing key." issue_code: 2012 severity: 1 } 2025-06-25T14:59:35.521610Z node 1 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:168: Errors while proposing transaction txid 17 at tablet 72075186224037892 Status: STATUS_CONSTRAINT_VIOLATION Issues: { message: "Conflict with existing key." issue_code: 2012 severity: 1 } 2025-06-25T14:59:35.521884Z node 1 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:819: SelfId: [1:3550:4589], Table: `//Root/.metadata/secrets/access` ([72057594046644480:13:1]), SessionActorId: [1:3438:4589]Got CONSTRAINT VIOLATION for table `//Root/.metadata/secrets/access`. ShardID=72075186224037892, Sink=[1:3550:4589].{
: Error: Conflict with existing key., code: 2012 } 2025-06-25T14:59:35.522367Z node 1 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:3004: SelfId: [1:3543:4589], SessionActorId: [1:3438:4589], statusCode=PRECONDITION_FAILED. Issue=
: Error: Constraint violated. Table: `//Root/.metadata/secrets/access`., code: 2012
: Error: Conflict with existing key., code: 2012 . sessionActorId=[1:3438:4589]. isRollback=0 2025-06-25T14:59:35.522803Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:1895: SessionId: ydb://session/3?node_id=1&id=Y2JkYjZjNWEtYzk2YWMxMjAtODcwMTU5NzUtODUxZDVmMzA=, ActorId: [1:3438:4589], ActorState: ExecuteState, TraceId: 01jykskycnbscxypgswz7428e0, got TEvKqpBuffer::TEvError in ExecuteState, status: PRECONDITION_FAILED send to: [1:3544:4589] from: [1:3543:4589] 2025-06-25T14:59:35.523379Z node 1 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [1:3544:4589] TxId: 281474976715757. Ctx: { TraceId: 01jykskycnbscxypgswz7428e0, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Y2JkYjZjNWEtYzk2YWMxMjAtODcwMTU5NzUtODUxZDVmMzA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. PRECONDITION_FAILED: {
: Error: Constraint violated. Table: `//Root/.metadata/secrets/access`., code: 2012 subissue: {
: Error: Conflict with existing key., code: 2012 } } 2025-06-25T14:59:35.523654Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=1&id=Y2JkYjZjNWEtYzk2YWMxMjAtODcwMTU5NzUtODUxZDVmMzA=, ActorId: [1:3438:4589], ActorState: ExecuteState, TraceId: 01jykskycnbscxypgswz7428e0, Create QueryResponse for error on request, msg: 2025-06-25T14:59:35.528441Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=request_actor.h:64;event=unexpected reply;error_message=operation { ready: true status: PRECONDITION_FAILED issues { message: "Constraint violated. Table: `//Root/.metadata/secrets/access`." issue_code: 2012 severity: 1 issues { message: "Conflict with existing key." issue_code: 2012 severity: 1 } } result { [type.googleapis.com/Ydb.Table.ExecuteQueryResult] { tx_meta { id: "01jyksky5f5rqpaa39dywesx4v" } } } } ;request=session_id: "ydb://session/3?node_id=1&id=Y2JkYjZjNWEtYzk2YWMxMjAtODcwMTU5NzUtODUxZDVmMzA=" tx_control { tx_id: "01jyksky5f5rqpaa39dywesx4v" } query { yql_text: "DECLARE $objects AS List>;\nINSERT INTO `//Root/.metadata/secrets/access`\nSELECT ownerUserId,secretId,accessSID FROM AS_TABLE($objects)\n" } parameters { key: "$objects" value { type { list_type { item { struct_type { members { name: "ownerUserId" type { type_id: UTF8 } } members { name: "secretId" type { type_id: UTF8 } } members { name: "accessSID" type { type_id: UTF8 } } } } } } value { items { items { text_value: "root@builtin" } items { text_value: "secret1" } items { text_value: "test@test1" } } } } } ; REQUEST=CREATE OBJECT IF NOT EXISTS `secret1:test@test1` (TYPE SECRET_ACCESS);RESULT=;EXPECTATION=1 FINISHED_REQUEST=CREATE OBJECT IF NOT EXISTS `secret1:test@test1` (TYPE SECRET_ACCESS);EXPECTATION=1;WAITING=1 REQUEST=DROP OBJECT `secret1` (TYPE SECRET);EXPECTATION=0;WAITING=1 REQUEST=DROP OBJECT `secret1` (TYPE SECRET);RESULT=
: Error: Execution, code: 1060
:1:29: Error: Executing DROP OBJECT SECRET
: Error: preparation problem: secret secret1 using in access for test@test1 ;EXPECTATION=0 FINISHED_REQUEST=DROP OBJECT `secret1` (TYPE SECRET);EXPECTATION=0;WAITING=1 2025-06-25T14:59:58.821190Z node 1 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:304: Access denied: self# [1:4077:5045], for# root@builtin, access# DescribeSchema 2025-06-25T14:59:58.821274Z node 1 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:304: Access denied: self# [1:4077:5045], for# root@builtin, access# DescribeSchema 2025-06-25T14:59:58.822898Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:4074:5042], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:1:1: Error: At function: KiReadTable!
:1:1: Error: Cannot find table 'db.[/Root/.metadata/secrets/values]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:59:58.824782Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=OWE5ZTFhMDktMTJmNjYwOGYtNzdkZDUyMGQtYzFkNDYxM2M=, ActorId: [1:4070:5039], ActorState: ExecuteState, TraceId: 01jyksmn6p5p0tm3jhn5d7j11t, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: REQUEST=SELECT * FROM `/Root/.metadata/secrets/values`;RESULT=
: Error: Type annotation, code: 1030
:1:1: Error: At function: KiReadTable!
:1:1: Error: Cannot find table 'db.[/Root/.metadata/secrets/values]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 ;EXPECTATION=0 REQUEST=SELECT * FROM `/Root/.metadata/secrets/values`;EXPECTATION=0 REQUEST=CREATE OBJECT secret1 (TYPE SECRET) WITH value = `100`;EXPECTATION=0;WAITING=1 2025-06-25T15:00:10.691598Z node 1 :TICKET_PARSER ERROR: ticket_parser_impl.h:963: Ticket **** (51449FAE): Could not find correct token validator REQUEST=CREATE OBJECT secret1 (TYPE SECRET) WITH value = `100`;RESULT=
: Error: Execution, code: 1060
:1:50: Error: Executing CREATE OBJECT SECRET
: Error: cannot CREATE objects: Secret already exists: secret1 ;EXPECTATION=0 FINISHED_REQUEST=CREATE OBJECT secret1 (TYPE SECRET) WITH value = `100`;EXPECTATION=0;WAITING=1 REQUEST=UPSERT OBJECT secret1 (TYPE SECRET) WITH value = `100`;EXPECTATION=0;WAITING=1 REQUEST=UPSERT OBJECT secret1 (TYPE SECRET) WITH value = `100`;RESULT=
: Error: Execution, code: 1060
:1:50: Error: Executing UPSERT OBJECT SECRET
: Error: cannot UPSERT objects: Secret already exists: secret1 ;EXPECTATION=0 FINISHED_REQUEST=UPSERT OBJECT secret1 (TYPE SECRET) WITH value = `100`;EXPECTATION=0;WAITING=1 REQUEST=CREATE OBJECT secret2 (TYPE SECRET) WITH value = `100`;EXPECTATION=1;WAITING=1 REQUEST=CREATE OBJECT secret2 (TYPE SECRET) WITH value = `100`;RESULT=;EXPECTATION=1 FINISHED_REQUEST=CREATE OBJECT secret2 (TYPE SECRET) WITH value = `100`;EXPECTATION=1;WAITING=1 2025-06-25T15:00:50.161755Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715845. Ctx: { TraceId: 01jyksp70553aqf7tr8nnv88mt, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MTNhZGE3NTgtZjUyY2E4ZTEtMjc3NjkwNzUtZmNhM2Q1NTc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root REQUEST=SELECT COUNT(*) FROM `/Root/.metadata/initialization/migrations`;RESULT=;EXPECTATION=1 REQUEST=SELECT COUNT(*) FROM `/Root/.metadata/initialization/migrations`;EXPECTATION=1 ------- [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/secret/ut/unittest >> Secret::ValidationQueryService [GOOD] Test command err: 2025-06-25T14:58:00.864944Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T14:58:00.865101Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T14:58:00.865169Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000ea7/r3tmp/tmpLTTzel/pdisk_1.dat 2025-06-25T14:58:01.278625Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 14638, node 1 TClient is connected to server localhost:27281 2025-06-25T14:58:01.839944Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:58:01.886743Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:58:01.895804Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:58:01.895882Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:58:01.895920Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:58:01.897668Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:58:01.897988Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750863477997419 != 1750863477997423 2025-06-25T14:58:01.951676Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:58:01.951798Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:58:01.964257Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:58:02.184942Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Initialization finished REQUEST=CREATE OBJECT secret-1 (TYPE SECRET) WITH value = `100`;EXPECTATION=0;WAITING=1 2025-06-25T14:58:13.743289Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:711:2591], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:58:13.743384Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:721:2596], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:58:13.743442Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:58:13.756277Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715657:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:58:13.783306Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:725:2599], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715657 completed, doublechecking } 2025-06-25T14:58:13.843093Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:776:2631] txid# 281474976715658, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:58:13.910669Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:786:2640], status: GENERIC_ERROR, issues:
:1:20: Error: mismatched input '-' expecting '(' 2025-06-25T14:58:13.917969Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=Y2Q3YzA0M2YtNjg4NmM2YTktNmVjNjQ3YTItMjBhNWUwNDI=, ActorId: [1:709:2589], ActorState: ExecuteState, TraceId: 01jykshejk2jec5yn6rg64keek, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: REQUEST=CREATE OBJECT secret-1 (TYPE SECRET) WITH value = `100`;RESULT=
:1:20: Error: mismatched input '-' expecting '(' ;EXPECTATION=0 FINISHED_REQUEST=CREATE OBJECT secret-1 (TYPE SECRET) WITH value = `100`;EXPECTATION=0;WAITING=1 REQUEST=ALTER OBJECT secret1 (TYPE SECRET) SET value = `abcde`;EXPECTATION=0;WAITING=1 2025-06-25T14:58:24.361701Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:58:25.226663Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:58:25.707239Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:58:26.436863Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:58:27.329757Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:58:27.804709Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:58:28.393714Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:58:29.257296Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710759:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:383) 2025-06-25T14:58:31.623703Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=1&id=NzIwYjVlNmQtMWZjZWE0YTgtNzE0OGRlYzYtNTBiMTQxMmU=, ActorId: [1:808:2654], ActorState: ExecuteState, TraceId: 01jykshrqr058tgdc1yxvd9aej, Create QueryResponse for error on request, msg: 2025-06-25T14:58:31.630220Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715689. Ctx: { TraceId: 01jykshrqr058tgdc1yxvd9aej, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NzIwYjVlNmQtMWZjZWE0YTgtNzE0OGRlYzYtNTBiMTQxMmU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root REQUEST=ALTER OBJECT secret1 (TYPE SECRET) SET value = `abcde`;RESULT=
: Error: preparation problem: secret secret1 not found for alter ;EXPECTATION=0 2025-06-25T14:58:32.967423Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7382: Cannot get console configs 2025-06-25T14:58:32.967493Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded FINISHED_REQUEST=ALTER OBJECT secret1 (TYPE SECRET) SET value = `abcde`;EXPECTATION=0;WAITING=1 REQUEST=CREATE OBJECT secret1 (TYPE SECRET) WITH value = `100`;EXPECTATION=1;WAITING=1 REQUEST=CREATE OBJECT secret1 (TYPE SECRET) WITH value = `100`;RESULT=;EXPECTATION=1 FINISHED_REQUEST=CREATE OBJECT secret1 (TYPE SECRET) WITH value = `100`;EXPECTATION=1;WAITING=1 REQUEST=ALTER OBJECT secret1 (TYPE SECRET) SET value = `abcde`;EXPECTATION=1;WAITING=1 REQUEST=ALTER OBJECT secret1 (TYPE SECRET) SET value = `abcde`;RESULT=;EXPECTATION=1 FINISHED_REQUEST=ALTER OBJECT secret1 (TYPE SECRET) SET value = `abcde`;EXPECTATION=1;WAITING=1 REQUEST=CREATE OBJECT `secret1:test@test1` (TYPE SECRET_ACCESS);EXPECTATION=1;WAITING=1 2025-06-25T14:59:07.464667Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715712:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:59:08.464338Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715719:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:59:10.021926Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose ... 4046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T14:59:10.670165Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715731:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) REQUEST=CREATE OBJECT `secret1:test@test1` (TYPE SECRET_ACCESS);RESULT=;EXPECTATION=1 FINISHED_REQUEST=CREATE OBJECT `secret1:test@test1` (TYPE SECRET_ACCESS);EXPECTATION=1;WAITING=1 REQUEST=CREATE OBJECT `secret2:test@test1` (TYPE SECRET_ACCESS);EXPECTATION=0;WAITING=1 2025-06-25T14:59:23.020273Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=1&id=MTIxZjk4MWMtODM0Y2JkY2QtZGFkZjM1OGYtMWJhNDcwZGM=, ActorId: [1:3118:4361], ActorState: ExecuteState, TraceId: 01jykskhn4544fyqt34dk3bh5g, Create QueryResponse for error on request, msg: 2025-06-25T14:59:23.021698Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715744. Ctx: { TraceId: 01jykskhn4544fyqt34dk3bh5g, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MTIxZjk4MWMtODM0Y2JkY2QtZGFkZjM1OGYtMWJhNDcwZGM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root REQUEST=CREATE OBJECT `secret2:test@test1` (TYPE SECRET_ACCESS);RESULT=
: Error: preparation problem: used in access secret secret2 not found ;EXPECTATION=0 FINISHED_REQUEST=CREATE OBJECT `secret2:test@test1` (TYPE SECRET_ACCESS);EXPECTATION=0;WAITING=1 REQUEST=CREATE OBJECT IF NOT EXISTS `secret1:test@test1` (TYPE SECRET_ACCESS);EXPECTATION=1;WAITING=1 2025-06-25T14:59:35.206724Z node 1 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_CONSTRAINT_VIOLATION;details=Conflict with existing key.;tx_id=15; 2025-06-25T14:59:35.207054Z node 1 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:226: Prepare transaction failed. txid 15 at tablet 72075186224037892 errors: Status: STATUS_CONSTRAINT_VIOLATION Issues: { message: "Conflict with existing key." issue_code: 2012 severity: 1 } 2025-06-25T14:59:35.207319Z node 1 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:168: Errors while proposing transaction txid 15 at tablet 72075186224037892 Status: STATUS_CONSTRAINT_VIOLATION Issues: { message: "Conflict with existing key." issue_code: 2012 severity: 1 } 2025-06-25T14:59:35.207854Z node 1 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:819: SelfId: [1:3494:4566], Table: `//Root/.metadata/secrets/access` ([72057594046644480:13:1]), SessionActorId: [1:3401:4566]Got CONSTRAINT VIOLATION for table `//Root/.metadata/secrets/access`. ShardID=72075186224037892, Sink=[1:3494:4566].{
: Error: Conflict with existing key., code: 2012 } 2025-06-25T14:59:35.208416Z node 1 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:3004: SelfId: [1:3487:4566], SessionActorId: [1:3401:4566], statusCode=PRECONDITION_FAILED. Issue=
: Error: Constraint violated. Table: `//Root/.metadata/secrets/access`., code: 2012
: Error: Conflict with existing key., code: 2012 . sessionActorId=[1:3401:4566]. isRollback=0 2025-06-25T14:59:35.208854Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:1895: SessionId: ydb://session/3?node_id=1&id=NGE0ZTVhZDktNGE5OGI4NmQtNWIzYjMwM2MtMmM2ZjE5NDg=, ActorId: [1:3401:4566], ActorState: ExecuteState, TraceId: 01jyksky26bktqn4nfc0ss869k, got TEvKqpBuffer::TEvError in ExecuteState, status: PRECONDITION_FAILED send to: [1:3488:4566] from: [1:3487:4566] 2025-06-25T14:59:35.209063Z node 1 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [1:3488:4566] TxId: 281474976715757. Ctx: { TraceId: 01jyksky26bktqn4nfc0ss869k, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NGE0ZTVhZDktNGE5OGI4NmQtNWIzYjMwM2MtMmM2ZjE5NDg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. PRECONDITION_FAILED: {
: Error: Constraint violated. Table: `//Root/.metadata/secrets/access`., code: 2012 subissue: {
: Error: Conflict with existing key., code: 2012 } } 2025-06-25T14:59:35.209610Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=1&id=NGE0ZTVhZDktNGE5OGI4NmQtNWIzYjMwM2MtMmM2ZjE5NDg=, ActorId: [1:3401:4566], ActorState: ExecuteState, TraceId: 01jyksky26bktqn4nfc0ss869k, Create QueryResponse for error on request, msg: 2025-06-25T14:59:35.218523Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=request_actor.h:64;event=unexpected reply;error_message=operation { ready: true status: PRECONDITION_FAILED issues { message: "Constraint violated. Table: `//Root/.metadata/secrets/access`." issue_code: 2012 severity: 1 issues { message: "Conflict with existing key." issue_code: 2012 severity: 1 } } result { [type.googleapis.com/Ydb.Table.ExecuteQueryResult] { tx_meta { id: "01jykskxtk35ateh6tr50rdqf7" } } } } ;request=session_id: "ydb://session/3?node_id=1&id=NGE0ZTVhZDktNGE5OGI4NmQtNWIzYjMwM2MtMmM2ZjE5NDg=" tx_control { tx_id: "01jykskxtk35ateh6tr50rdqf7" } query { yql_text: "DECLARE $objects AS List>;\nINSERT INTO `//Root/.metadata/secrets/access`\nSELECT ownerUserId,secretId,accessSID FROM AS_TABLE($objects)\n" } parameters { key: "$objects" value { type { list_type { item { struct_type { members { name: "ownerUserId" type { type_id: UTF8 } } members { name: "secretId" type { type_id: UTF8 } } members { name: "accessSID" type { type_id: UTF8 } } } } } } value { items { items { text_value: "root@builtin" } items { text_value: "secret1" } items { text_value: "test@test1" } } } } } ; REQUEST=CREATE OBJECT IF NOT EXISTS `secret1:test@test1` (TYPE SECRET_ACCESS);RESULT=;EXPECTATION=1 FINISHED_REQUEST=CREATE OBJECT IF NOT EXISTS `secret1:test@test1` (TYPE SECRET_ACCESS);EXPECTATION=1;WAITING=1 REQUEST=DROP OBJECT `secret1` (TYPE SECRET);EXPECTATION=0;WAITING=1 2025-06-25T14:59:47.606894Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=1&id=ZmRlMTY0MmEtZTU3YjljOTctNjQ1ZjY5NDItZTA2ZTEzYjA=, ActorId: [1:3726:4799], ActorState: ExecuteState, TraceId: 01jyksm9nk3yq1s24y3y8p566k, Create QueryResponse for error on request, msg: 2025-06-25T14:59:47.608173Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715771. Ctx: { TraceId: 01jyksm9nk3yq1s24y3y8p566k, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZmRlMTY0MmEtZTU3YjljOTctNjQ1ZjY5NDItZTA2ZTEzYjA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root REQUEST=DROP OBJECT `secret1` (TYPE SECRET);RESULT=
: Error: preparation problem: secret secret1 using in access for test@test1 ;EXPECTATION=0 FINISHED_REQUEST=DROP OBJECT `secret1` (TYPE SECRET);EXPECTATION=0;WAITING=1 2025-06-25T14:59:59.228600Z node 1 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:304: Access denied: self# [1:4070:5052], for# root@builtin, access# DescribeSchema 2025-06-25T14:59:59.228720Z node 1 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:304: Access denied: self# [1:4070:5052], for# root@builtin, access# DescribeSchema 2025-06-25T14:59:59.230650Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:4067:5049], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:1:1: Error: At function: KiReadTable!
:1:1: Error: Cannot find table 'db.[/Root/.metadata/secrets/values]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:59:59.241694Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=Mjc4MjFhNjAtZGNhYzc3NWYtYjc1NWE4OTAtNzQwZmYwOQ==, ActorId: [1:4063:5046], ActorState: ExecuteState, TraceId: 01jyksmnkbabj83wpnez0nh9s5, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: REQUEST=SELECT * FROM `/Root/.metadata/secrets/values`;RESULT=
: Error: Type annotation, code: 1030
:1:1: Error: At function: KiReadTable!
:1:1: Error: Cannot find table 'db.[/Root/.metadata/secrets/values]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 ;EXPECTATION=0 REQUEST=SELECT * FROM `/Root/.metadata/secrets/values`;EXPECTATION=0 REQUEST=CREATE OBJECT secret1 (TYPE SECRET) WITH value = `100`;EXPECTATION=0;WAITING=1 2025-06-25T15:00:11.028756Z node 1 :TICKET_PARSER ERROR: ticket_parser_impl.h:963: Ticket **** (51449FAE): Could not find correct token validator 2025-06-25T15:00:11.801214Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=1&id=ZjRjZTZjNDgtZWFlN2YwNTktMzhiNjlkMGYtNjM3Zjg5NmI=, ActorId: [1:4354:5258], ActorState: ExecuteState, TraceId: 01jyksn14mb93qngykqrz743nz, Create QueryResponse for error on request, msg: 2025-06-25T15:00:11.802314Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715799. Ctx: { TraceId: 01jyksn14mb93qngykqrz743nz, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZjRjZTZjNDgtZWFlN2YwNTktMzhiNjlkMGYtNjM3Zjg5NmI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root REQUEST=CREATE OBJECT secret1 (TYPE SECRET) WITH value = `100`;RESULT=
: Error: cannot CREATE objects: Secret already exists: secret1 ;EXPECTATION=0 FINISHED_REQUEST=CREATE OBJECT secret1 (TYPE SECRET) WITH value = `100`;EXPECTATION=0;WAITING=1 REQUEST=UPSERT OBJECT secret1 (TYPE SECRET) WITH value = `100`;EXPECTATION=0;WAITING=1 2025-06-25T15:00:24.226155Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=1&id=NmFhNjMxMjItY2Y1M2VhYjItMmJkOTlkZGMtZWFhYTRkYmE=, ActorId: [1:4768:5560], ActorState: ExecuteState, TraceId: 01jyksndb5b9wa7vha3hnkne47, Create QueryResponse for error on request, msg: 2025-06-25T15:00:24.227003Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715817. Ctx: { TraceId: 01jyksndb5b9wa7vha3hnkne47, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NmFhNjMxMjItY2Y1M2VhYjItMmJkOTlkZGMtZWFhYTRkYmE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root REQUEST=UPSERT OBJECT secret1 (TYPE SECRET) WITH value = `100`;RESULT=
: Error: cannot UPSERT objects: Secret already exists: secret1 ;EXPECTATION=0 FINISHED_REQUEST=UPSERT OBJECT secret1 (TYPE SECRET) WITH value = `100`;EXPECTATION=0;WAITING=1 REQUEST=CREATE OBJECT secret2 (TYPE SECRET) WITH value = `100`;EXPECTATION=1;WAITING=1 REQUEST=CREATE OBJECT secret2 (TYPE SECRET) WITH value = `100`;RESULT=;EXPECTATION=1 FINISHED_REQUEST=CREATE OBJECT secret2 (TYPE SECRET) WITH value = `100`;EXPECTATION=1;WAITING=1 2025-06-25T15:00:50.505958Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715850. Ctx: { TraceId: 01jyksp7a46np78vdz4pak335k, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NzQ5YmJhYzAtMjEzZDJkMzAtM2ViOTAxYzgtMjZmY2M1NzA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root REQUEST=SELECT COUNT(*) FROM `/Root/.metadata/initialization/migrations`;RESULT=;EXPECTATION=1 REQUEST=SELECT COUNT(*) FROM `/Root/.metadata/initialization/migrations`;EXPECTATION=1 |91.7%| [TA] $(B)/ydb/services/metadata/secret/ut/test-results/unittest/{meta.json ... results_accumulator.log} |91.7%| [TA] {RESULT} $(B)/ydb/services/metadata/secret/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> BackupRestoreS3::TestAllPrimitiveTypes-DATE [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-DATETIME >> BackupRestoreS3::TestAllPrimitiveTypes-JSON_DOCUMENT [GOOD] >> BackupPathTest::FilterByPathFailsWhenNoSchemaMapping >> TColumnShardTestSchema::ColdTiers [GOOD] >> KqpQuery::CreateAsSelectTypes-NotNull-IsOlap >> KqpQuery::CreateAsSelectBadTypes+IsOlap [GOOD] >> KqpQuery::CreateAsSelectBadTypes-IsOlap >> KqpExplain::SortStage >> TColumnShardTestSchema::HotTiers [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest >> TColumnShardTestSchema::ColdTiers [GOOD] Test command err: FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; WaitEmptyAfter=0;Tiers=;TTL={Column=timestamp;EvictAfter=0.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=150864196.000000s;Name=tier0;Codec=};}{{Column=timestamp;EvictAfter=150864196.000000s;Name=tier1;Codec=};};TTL={Column=timestamp;EvictAfter=150864196.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=130864196.000000s;Name=tier0;Codec=};}{{Column=timestamp;EvictAfter=150864196.000000s;Name=tier1;Codec=};};TTL={Column=timestamp;EvictAfter=150864196.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=130862996.000000s;Name=tier0;Codec=};}{{Column=timestamp;EvictAfter=130864196.000000s;Name=tier1;Codec=};};TTL={Column=timestamp;EvictAfter=130864196.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=130862996.000000s;Name=tier0;Codec=};}{{Column=timestamp;EvictAfter=130862996.000000s;Name=tier1;Codec=};};TTL={Column=timestamp;EvictAfter=130862996.000000s;Name=;Codec=}; 2025-06-25T14:59:57.317676Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:99;event=initialize_shard;step=OnActivateExecutor; 2025-06-25T14:59:57.346895Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:117;event=initialize_shard;step=initialize_tiring_finished; 2025-06-25T14:59:57.347156Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-25T14:59:57.353755Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:59:57.353973Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:59:57.354194Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:59:57.354310Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:59:57.354416Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:59:57.354528Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:59:57.354638Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:59:57.354743Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:59:57.354848Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:59:57.354953Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:59:57.355098Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:59:57.382376Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-25T14:59:57.382517Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-25T14:59:57.382580Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-25T14:59:57.382744Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T14:59:57.382885Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-25T14:59:57.382962Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-25T14:59:57.383005Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-25T14:59:57.383085Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-25T14:59:57.383139Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-25T14:59:57.383182Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-25T14:59:57.383223Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-25T14:59:57.383386Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T14:59:57.383458Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-25T14:59:57.383505Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-25T14:59:57.383533Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-25T14:59:57.383615Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-25T14:59:57.383662Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-25T14:59:57.383703Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-25T14:59:57.383730Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-25T14:59:57.383775Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-25T14:59:57.383810Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-25T14:59:57.383837Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-25T14:59:57.384023Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-25T14:59:57.384060Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-25T14:59:57.384095Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-25T14:59:57.384288Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-25T14:59:57.384362Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-25T14:59:57.384401Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-25T14:59:57.384525Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-25T14:59:57.384563Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-25T14:59:57.384590Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-25T14:59:57.384686Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-25T14:59:57.384759Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-25T14:59:57.384800Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-25T14:59:57.384830Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:15 ... 025-06-25T15:00:55.388434Z;index_granules=0;index_portions=1;index_batches=522;schema_columns=1;filter_columns=0;additional_columns=0;compacted_portions_bytes=0;inserted_portions_bytes=0;committed_portions_bytes=4873744;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=4873744;selected_rows=0; 2025-06-25T15:00:55.671848Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=4;SelfId=[1:946:2891];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=read_context.h:192;event=scan_aborted;reason=unexpected on destructor; 2025-06-25T15:00:55.672096Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=4;SelfId=[1:946:2891];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;; 2025-06-25T15:00:55.673024Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Finished read cookie: 9 at tablet 9437184 2025-06-25T15:00:55.673290Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: EvScan txId: 18446744073709551615 scanId: 0 version: {1750863645467:max} readable: {1750863645467:max} at tablet 9437184 2025-06-25T15:00:55.673428Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TTxScan prepare txId: 18446744073709551615 scanId: 0 at tablet 9437184 2025-06-25T15:00:55.673598Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750863645467:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=program.cpp:33;event=parse_program;program=Command { Projection { Columns { Id: 1 } } } ; 2025-06-25T15:00:55.673670Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750863645467:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=program.cpp:102;parse_proto_program=Command { Projection { Columns { Id: 1 } } } ; 2025-06-25T15:00:55.674206Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750863645467:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=program.cpp:51;event=program_parsed;result={"edges":[{"owner_id":0,"inputs":[{"from":2}]},{"owner_id":1,"inputs":[{"from":3}]},{"owner_id":2,"inputs":[{"from":1}]},{"owner_id":3,"inputs":[]}],"nodes":{"1":{"p":{"i":"0","p":{"data":[{"name":"timestamp","id":1}]},"o":"1","t":"FetchOriginalData"},"w":2,"id":1},"3":{"p":{"p":{"data":[{"name":"timestamp","id":1}]},"o":"0","t":"ReserveMemory"},"w":0,"id":3},"2":{"p":{"i":"1","p":{"address":{"name":"timestamp","id":1}},"o":"1","t":"AssembleOriginalData"},"w":7,"id":2},"0":{"p":{"i":"1","t":"Projection"},"w":7,"id":0}}}; 2025-06-25T15:00:55.674313Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750863645467:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=read_metadata.h:142;filter_limit_not_detected= range{ from {+Inf} to {-Inf}}; 2025-06-25T15:00:55.674790Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750863645467:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=tx_scan.cpp:172;event=TTxScan started;actor_id=[1:953:2898];trace_detailed=; 2025-06-25T15:00:55.675195Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=4;fline=context.cpp:84;ff_first=(column_ids=1;column_names=timestamp;);; 2025-06-25T15:00:55.675435Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=4;fline=context.cpp:99;columns_context_info=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;; 2025-06-25T15:00:55.675626Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:00:55.675771Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:00:55.676056Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=4;SelfId=[1:953:2898];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-25T15:00:55.676170Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=4;SelfId=[1:953:2898];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:00:55.676259Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=4;SelfId=[1:953:2898];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:00:55.676304Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: actor.cpp:414: Scan [1:953:2898] finished for tablet 9437184 2025-06-25T15:00:55.676754Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=4;SelfId=[1:953:2898];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:420;event=scan_finish;compute_actor_id=[1:952:2897];stats={"p":[{"events":["f_bootstrap","f_ProduceResults"],"t":0},{"events":["l_bootstrap","f_ack","l_ack","f_processing","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.001}],"full":{"a":1750863655674722,"name":"_full_task","f":1750863655674722,"d_finished":0,"c":0,"l":1750863655676400,"d":1678},"events":[{"name":"bootstrap","f":1750863655674911,"d_finished":908,"c":1,"l":1750863655675819,"d":908},{"a":1750863655676032,"name":"ack","f":1750863655676032,"d_finished":0,"c":0,"l":1750863655676400,"d":368},{"a":1750863655676012,"name":"processing","f":1750863655676012,"d_finished":0,"c":0,"l":1750863655676400,"d":388},{"name":"ProduceResults","f":1750863655675546,"d_finished":466,"c":2,"l":1750863655676285,"d":466},{"a":1750863655676290,"name":"Finish","f":1750863655676290,"d_finished":0,"c":0,"l":1750863655676400,"d":110}],"id":"9437184::10"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:00:55.676842Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=4;SelfId=[1:953:2898];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:952:2897];bytes=0;rows=0;faults=0;finished=1;fault=0;schema=; 2025-06-25T15:00:55.677242Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=4;SelfId=[1:953:2898];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:375;event=scan_finished;compute_actor_id=[1:952:2897];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap","f_ProduceResults"],"t":0},{"events":["l_bootstrap","f_ack","f_processing","l_ProduceResults","f_Finish"],"t":0.001},{"events":["l_ack","l_processing","l_Finish"],"t":0.002}],"full":{"a":1750863655674722,"name":"_full_task","f":1750863655674722,"d_finished":0,"c":0,"l":1750863655676889,"d":2167},"events":[{"name":"bootstrap","f":1750863655674911,"d_finished":908,"c":1,"l":1750863655675819,"d":908},{"a":1750863655676032,"name":"ack","f":1750863655676032,"d_finished":0,"c":0,"l":1750863655676889,"d":857},{"a":1750863655676012,"name":"processing","f":1750863655676012,"d_finished":0,"c":0,"l":1750863655676889,"d":877},{"name":"ProduceResults","f":1750863655675546,"d_finished":466,"c":2,"l":1750863655676285,"d":466},{"a":1750863655676290,"name":"Finish","f":1750863655676290,"d_finished":0,"c":0,"l":1750863655676889,"d":599}],"id":"9437184::10"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); Got TEvKqpCompute::TEvScanData [1:953:2898]->[1:952:2897] 2025-06-25T15:00:55.677348Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=4;SelfId=[1:953:2898];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-06-25T15:00:55.674283Z;index_granules=0;index_portions=0;index_batches=0;schema_columns=1;filter_columns=0;additional_columns=0;compacted_portions_bytes=0;inserted_portions_bytes=0;committed_portions_bytes=0;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=0;selected_rows=0; 2025-06-25T15:00:55.677418Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=4;SelfId=[1:953:2898];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=read_context.h:192;event=scan_aborted;reason=unexpected on destructor; 2025-06-25T15:00:55.677540Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=4;SelfId=[1:953:2898];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/tier0' stopped at tablet 9437184 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/tier1' stopped at tablet 9437184 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/tier0' stopped at tablet 9437184 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/tier1' stopped at tablet 9437184 160000/9739224 160000/9739224 160000/9739224 80000/4873744 0/0 >> KqpQuery::RandomNumber [GOOD] >> KqpQuery::RandomUuid >> KqpParams::MissingOptionalParameter+UseSink [GOOD] >> KqpParams::ImplicitParameterTypes >> KqpParams::RowsList [GOOD] >> KqpParams::MissingParameter >> KqpQuery::ExecuteDataQueryCollectMeta [GOOD] >> KqpQuery::DeleteWhereInSubquery >> KqpQuery::RewriteIfPresentToMap [GOOD] >> KqpQuery::RowsLimit >> KqpParams::CheckQueryCacheForPreparedQuery [GOOD] >> KqpParams::CheckQueryCacheForExecuteAndPreparedQueries >> KqpParams::ExplicitSameParameterTypesQueryCacheCheck [GOOD] >> KqpParams::ImplicitDifferentParameterTypesQueryCacheCheck >> KqpStats::MultiTxStatsFullExpYql [GOOD] >> KqpStats::MultiTxStatsFullExpScan >> KqpStats::RequestUnitForBadRequestExecute [GOOD] >> KqpStats::RequestUnitForBadRequestExplicitPrepare >> KqpQuery::CurrentUtcTimestamp [GOOD] >> KqpQuery::DdlInDataQuery >> PersQueueSdkReadSessionTest::ReadSessionWithCloseNotCommitted [GOOD] >> PersQueueSdkReadSessionTest::ClosesAfterFailedConnectionToCds >> KqpExplain::Explain [GOOD] >> KqpExplain::ExplainDataQuery ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest >> TColumnShardTestSchema::HotTiers [GOOD] Test command err: FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; WaitEmptyAfter=0;Tiers=;TTL={Column=timestamp;EvictAfter=0.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=150864198.000000s;Name=tier0;Codec=};}{{Column=timestamp;EvictAfter=150864198.000000s;Name=tier1;Codec=zstd};};TTL={Column=timestamp;EvictAfter=150864198.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=130864198.000000s;Name=tier0;Codec=};}{{Column=timestamp;EvictAfter=150864198.000000s;Name=tier1;Codec=zstd};};TTL={Column=timestamp;EvictAfter=150864198.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=130862998.000000s;Name=tier0;Codec=};}{{Column=timestamp;EvictAfter=130864198.000000s;Name=tier1;Codec=zstd};};TTL={Column=timestamp;EvictAfter=130864198.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=130862998.000000s;Name=tier0;Codec=};}{{Column=timestamp;EvictAfter=130862998.000000s;Name=tier1;Codec=zstd};};TTL={Column=timestamp;EvictAfter=130862998.000000s;Name=;Codec=}; 2025-06-25T14:59:59.001795Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:99;event=initialize_shard;step=OnActivateExecutor; 2025-06-25T14:59:59.029275Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:117;event=initialize_shard;step=initialize_tiring_finished; 2025-06-25T14:59:59.029567Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-25T14:59:59.036570Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:59:59.036847Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:59:59.037078Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:59:59.037190Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:59:59.037288Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:59:59.037390Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:59:59.037483Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:59:59.037589Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:59:59.037686Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:59:59.037791Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:59:59.037885Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:59:59.063123Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-25T14:59:59.063268Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-25T14:59:59.063342Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-25T14:59:59.063507Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T14:59:59.063673Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-25T14:59:59.063754Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-25T14:59:59.063799Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-25T14:59:59.063884Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-25T14:59:59.063943Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-25T14:59:59.063985Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-25T14:59:59.064021Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-25T14:59:59.064177Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T14:59:59.064249Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-25T14:59:59.064293Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-25T14:59:59.064341Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-25T14:59:59.064429Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-25T14:59:59.064479Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-25T14:59:59.064518Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-25T14:59:59.064545Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-25T14:59:59.064609Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-25T14:59:59.064646Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-25T14:59:59.064672Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-25T14:59:59.064857Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-25T14:59:59.064899Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-25T14:59:59.064932Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-25T14:59:59.065112Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-25T14:59:59.065163Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-25T14:59:59.065194Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-25T14:59:59.065299Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-25T14:59:59.065339Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-25T14:59:59.065367Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-25T14:59:59.065438Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-25T14:59:59.065498Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abs ... ARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=4;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-25T15:00:56.402285Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:791;background=cleanup;skip_reason=no_changes; 2025-06-25T15:00:56.402339Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:820;background=cleanup;skip_reason=no_changes; 2025-06-25T15:00:56.402446Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:749;background=ttl;skip_reason=no_changes; 2025-06-25T15:00:56.402655Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: EvScan txId: 18446744073709551615 scanId: 0 version: {1750863647329:max} readable: {1750863647329:max} at tablet 9437184 2025-06-25T15:00:56.402789Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TTxScan prepare txId: 18446744073709551615 scanId: 0 at tablet 9437184 2025-06-25T15:00:56.402950Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750863647329:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=program.cpp:33;event=parse_program;program=Command { Projection { Columns { Id: 1 } } } ; 2025-06-25T15:00:56.403013Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750863647329:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=program.cpp:102;parse_proto_program=Command { Projection { Columns { Id: 1 } } } ; 2025-06-25T15:00:56.403547Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750863647329:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=program.cpp:51;event=program_parsed;result={"edges":[{"owner_id":0,"inputs":[{"from":2}]},{"owner_id":1,"inputs":[{"from":3}]},{"owner_id":2,"inputs":[{"from":1}]},{"owner_id":3,"inputs":[]}],"nodes":{"1":{"p":{"i":"0","p":{"data":[{"name":"timestamp","id":1}]},"o":"1","t":"FetchOriginalData"},"w":2,"id":1},"3":{"p":{"p":{"data":[{"name":"timestamp","id":1}]},"o":"0","t":"ReserveMemory"},"w":0,"id":3},"2":{"p":{"i":"1","p":{"address":{"name":"timestamp","id":1}},"o":"1","t":"AssembleOriginalData"},"w":7,"id":2},"0":{"p":{"i":"1","t":"Projection"},"w":7,"id":0}}}; 2025-06-25T15:00:56.403645Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750863647329:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=read_metadata.h:142;filter_limit_not_detected= range{ from {+Inf} to {-Inf}}; 2025-06-25T15:00:56.404120Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750863647329:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=tx_scan.cpp:172;event=TTxScan started;actor_id=[1:1446:3390];trace_detailed=; 2025-06-25T15:00:56.404565Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=4;fline=context.cpp:84;ff_first=(column_ids=1;column_names=timestamp;);; 2025-06-25T15:00:56.404796Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=4;fline=context.cpp:99;columns_context_info=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;; 2025-06-25T15:00:56.404972Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:00:56.405094Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:00:56.405406Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=4;SelfId=[1:1446:3390];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-25T15:00:56.405522Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=4;SelfId=[1:1446:3390];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:00:56.405623Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=4;SelfId=[1:1446:3390];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:00:56.405662Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: actor.cpp:414: Scan [1:1446:3390] finished for tablet 9437184 2025-06-25T15:00:56.406091Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=4;SelfId=[1:1446:3390];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:420;event=scan_finish;compute_actor_id=[1:1445:3389];stats={"p":[{"events":["f_bootstrap","f_ProduceResults"],"t":0},{"events":["l_bootstrap","f_ack","l_ack","f_processing","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.001}],"full":{"a":1750863656404058,"name":"_full_task","f":1750863656404058,"d_finished":0,"c":0,"l":1750863656405732,"d":1674},"events":[{"name":"bootstrap","f":1750863656404268,"d_finished":858,"c":1,"l":1750863656405126,"d":858},{"a":1750863656405382,"name":"ack","f":1750863656405382,"d_finished":0,"c":0,"l":1750863656405732,"d":350},{"a":1750863656405361,"name":"processing","f":1750863656405361,"d_finished":0,"c":0,"l":1750863656405732,"d":371},{"name":"ProduceResults","f":1750863656404887,"d_finished":444,"c":2,"l":1750863656405644,"d":444},{"a":1750863656405647,"name":"Finish","f":1750863656405647,"d_finished":0,"c":0,"l":1750863656405732,"d":85}],"id":"9437184::10"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:00:56.406170Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=4;SelfId=[1:1446:3390];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:1445:3389];bytes=0;rows=0;faults=0;finished=1;fault=0;schema=; 2025-06-25T15:00:56.406575Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=4;SelfId=[1:1446:3390];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:375;event=scan_finished;compute_actor_id=[1:1445:3389];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap","f_ProduceResults"],"t":0},{"events":["l_bootstrap","f_ack","f_processing","l_ProduceResults","f_Finish"],"t":0.001},{"events":["l_ack","l_processing","l_Finish"],"t":0.002}],"full":{"a":1750863656404058,"name":"_full_task","f":1750863656404058,"d_finished":0,"c":0,"l":1750863656406217,"d":2159},"events":[{"name":"bootstrap","f":1750863656404268,"d_finished":858,"c":1,"l":1750863656405126,"d":858},{"a":1750863656405382,"name":"ack","f":1750863656405382,"d_finished":0,"c":0,"l":1750863656406217,"d":835},{"a":1750863656405361,"name":"processing","f":1750863656405361,"d_finished":0,"c":0,"l":1750863656406217,"d":856},{"name":"ProduceResults","f":1750863656404887,"d_finished":444,"c":2,"l":1750863656405644,"d":444},{"a":1750863656405647,"name":"Finish","f":1750863656405647,"d_finished":0,"c":0,"l":1750863656406217,"d":570}],"id":"9437184::10"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); Got TEvKqpCompute::TEvScanData [1:1446:3390]->[1:1445:3389] 2025-06-25T15:00:56.406670Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=4;SelfId=[1:1446:3390];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-06-25T15:00:56.403617Z;index_granules=0;index_portions=0;index_batches=0;schema_columns=1;filter_columns=0;additional_columns=0;compacted_portions_bytes=0;inserted_portions_bytes=0;committed_portions_bytes=0;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=0;selected_rows=0; 2025-06-25T15:00:56.406719Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=4;SelfId=[1:1446:3390];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=read_context.h:192;event=scan_aborted;reason=unexpected on destructor; 2025-06-25T15:00:56.406829Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=4;SelfId=[1:1446:3390];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/tier0' stopped at tablet 9437184 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/tier1' stopped at tablet 9437184 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/tier0' stopped at tablet 9437184 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/tier1' stopped at tablet 9437184 240000/14617704 160000/9752224 160000/9752224 80000/4886744 0/0 ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/backup_ut/unittest >> BackupRestoreS3::TestAllPrimitiveTypes-JSON_DOCUMENT [GOOD] Test command err: 2025-06-25T14:59:27.296732Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901761136299969:2143];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:59:27.300480Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001497/r3tmp/tmpvxTjx1/pdisk_1.dat 2025-06-25T14:59:27.733898Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:59:27.740044Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:59:27.740119Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:59:27.746595Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 12114, node 1 2025-06-25T14:59:27.860642Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:59:27.860664Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:59:27.860675Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:59:27.860798Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11764 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:59:28.240411Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:59:28.306616Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:59:29.988332Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901769726235513:2303], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:59:29.988332Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901769726235505:2300], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:59:29.988397Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:59:29.988634Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7519901761136300115:2143] Handle TEvProposeTransaction 2025-06-25T14:59:29.988659Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:7519901761136300115:2143] TxId# 281474976710658 ProcessProposeTransaction 2025-06-25T14:59:29.988708Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:7519901761136300115:2143] Cookie# 0 userReqId# "" txid# 281474976710658 SEND to# [1:7519901769726235520:2632] 2025-06-25T14:59:30.055092Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:7519901769726235520:2632] txid# 281474976710658 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root/.metadata/workload_manager/pools" OperationType: ESchemeOpCreateResourcePool ModifyACL { Name: "default" DiffACL: "\n!\010\000\022\035\010\001\020\201\004\032\024all-users@well-known \003\n\031\010\000\022\025\010\001\020\201\004\032\014root@builtin \003" NewOwner: "metadata@system" } Internal: true CreateResourcePool { Name: "default" Properties { Properties { key: "concurrent_query_limit" value: "-1" } Properties { key: "database_load_cpu_threshold" value: "-1" } Properties { key: "query_cancel_after_seconds" value: "0" } Properties { key: "query_cpu_limit_percent_per_node" value: "-1" } Properties { key: "query_memory_limit_percent_per_node" value: "-1" } Properties { key: "queue_size" value: "-1" } Properties { key: "resource_weight" value: "-1" } Properties { key: "total_cpu_limit_percent_per_node" value: "-1" } } } } } UserToken: "\n\017metadata@system\022\000" DatabaseName: "/Root" 2025-06-25T14:59:30.055158Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:7519901769726235520:2632] txid# 281474976710658 Bootstrap, UserSID: metadata@system CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-25T14:59:30.055172Z node 1 :TX_PROXY DEBUG: schemereq.cpp:585: Actor# [1:7519901769726235520:2632] txid# 281474976710658 Bootstrap, UserSID: metadata@system IsClusterAdministrator: 1 2025-06-25T14:59:30.056390Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1660: Actor# [1:7519901769726235520:2632] txid# 281474976710658 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-06-25T14:59:30.056470Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:7519901769726235520:2632] txid# 281474976710658 TEvNavigateKeySet requested from SchemeCache 2025-06-25T14:59:30.056647Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:7519901769726235520:2632] txid# 281474976710658 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-25T14:59:30.056754Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:7519901769726235520:2632] HANDLE EvNavigateKeySetResult, txid# 281474976710658 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-25T14:59:30.056794Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:7519901769726235520:2632] txid# 281474976710658 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976710658 TabletId# 72057594046644480} 2025-06-25T14:59:30.056910Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:7519901769726235520:2632] txid# 281474976710658 HANDLE EvClientConnected 2025-06-25T14:59:30.058118Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:59:30.060714Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [1:7519901769726235520:2632] txid# 281474976710658 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976710658} 2025-06-25T14:59:30.060799Z node 1 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [1:7519901769726235520:2632] txid# 281474976710658 SEND to# [1:7519901769726235519:2304] Source {TEvProposeTransactionStatus txid# 281474976710658 Status# 53} 2025-06-25T14:59:30.076619Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519901769726235519:2304], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T14:59:30.166389Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7519901761136300115:2143] Handle TEvProposeTransaction 2025-06-25T14:59:30.166420Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:7519901761136300115:2143] TxId# 281474976710659 ProcessProposeTransaction 2025-06-25T14:59:30.166463Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:7519901761136300115:2143] Cookie# 0 userReqId# "" txid# 281474976710659 SEND to# [1:7519901774021202886:2682] 2025-06-25T14:59:30.169319Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:7519901774021202886:2682] txid# 281474976710659 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root/.metadata/workload_manager/pools" OperationType: ESchemeOpCreateResourcePool ModifyACL { Name: "default" DiffACL: "\n!\010\000\022\035\010\001\020\201\004\032\024all-users@well-known \003\n\031\010\000\022\025\010\001\020\201\004\032\014root@builtin \003" NewOwner: "metadata@system" } Internal: true CreateResourcePool { Name: "default" Properties { Properties { key: "concurrent_query_limit" value: "-1" } Properties { key: "database_load_cpu_threshold" value: "-1" } Properties { key: "query_cancel_after_seconds" value: "0" } Properties { key: "query_cpu_limit_percent_per_node" value: "-1" } Properties { key: "query_memory_limit_percent_per_node" value: "-1" } Properties { key: "queue_size" value: "-1" } Properties { key: "resource_weight" value: "-1" } Properties { key: "total_cpu_limit_percent_per_node" value: "-1" } } } } } UserToken: "\n\017metadata@system\022\000" DatabaseName: "/Root" 2025-06-25T14:59:30.169387Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:7519901774021202886:2682] txid# 281474976710659 Bootstrap, UserSID: metadata@system CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-25T14:59:30.169418Z node 1 :TX_PROXY DEBUG: schemereq.cpp:585: Actor# [1:7519901774021202886:2682] txid# 281474976710659 Bootstrap, UserSID: metadata@system IsClusterAdministrator: 1 2025-06-25T14:59:30.169835Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1660: Actor# [1:7519901774021202886:2682] txid# 281474976710659 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-06-25T14:59:30.169918Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:7519901774021202886:2682] txid# 281474976710659 TEvNavigateKeySet requested from SchemeCache 2025-06-25T14:59:30.170179Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:7519901774021202886:2682] txid# 281474976710659 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-25T14:59:30.170302Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:7519901774021202886:2682] HANDLE EvNavigateKeySetResult, txid# 281474976710659 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-25T14:59:30.170345Z node 1 ... BUG: schemeshard_import__create.cpp:990: TImport::TTxProgress: OnSchemeResult: id# 281474976715667, itemIdx# 0, success# 1 2025-06-25T15:00:53.839204Z node 46 :IMPORT INFO: schemeshard_import__create.cpp:630: TImport::TTxProgress: Allocate txId: info# { Id: 281474976715667 Uid: '' Kind: S3 DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1] UserSID: '(empty maybe)' State: Waiting Issue: '' Items: 1 }, item# { Idx: 0 DstPathName: '/Root/JsonDocumentTable' DstPathId: State: CreateSchemeObject SubState: AllocateTxId WaitTxId: 0 Issue: '' } 2025-06-25T15:00:53.856887Z node 46 :IMPORT DEBUG: schemeshard_import__create.cpp:386: TImport::TTxProgress: DoComplete 2025-06-25T15:00:53.857030Z node 46 :IMPORT DEBUG: schemeshard_import__create.cpp:362: TImport::TTxProgress: DoExecute 2025-06-25T15:00:53.857044Z node 46 :IMPORT DEBUG: schemeshard_import__create.cpp:1219: TImport::TTxProgress: OnAllocateResult: txId# 281474976710760, id# 281474976715667 2025-06-25T15:00:53.857095Z node 46 :IMPORT INFO: schemeshard_import__create.cpp:420: TImport::TTxProgress: CreateTable propose: info# { Id: 281474976715667 Uid: '' Kind: S3 DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1] UserSID: '(empty maybe)' State: Waiting Issue: '' Items: 1 }, item# { Idx: 0 DstPathName: '/Root/JsonDocumentTable' DstPathId: State: CreateSchemeObject SubState: Proposed WaitTxId: 0 Issue: '' }, txId# 281474976710760 2025-06-25T15:00:53.857256Z node 46 :IMPORT DEBUG: schemeshard_import__create.cpp:386: TImport::TTxProgress: DoComplete 2025-06-25T15:00:53.858672Z node 46 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710760:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:53.866130Z node 46 :IMPORT DEBUG: schemeshard_import__create.cpp:362: TImport::TTxProgress: DoExecute 2025-06-25T15:00:53.866158Z node 46 :IMPORT DEBUG: schemeshard_import__create.cpp:1315: TImport::TTxProgress: OnModifyResult: txId# 281474976710760, status# StatusAccepted 2025-06-25T15:00:53.866298Z node 46 :IMPORT INFO: schemeshard_import__create.cpp:644: TImport::TTxProgress: Wait for completion: info# { Id: 281474976715667 Uid: '' Kind: S3 DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1] UserSID: '(empty maybe)' State: Waiting Issue: '' Items: 1 }, item# { Idx: 0 DstPathName: '/Root/JsonDocumentTable' DstPathId: [OwnerId: 72057594046644480, LocalPathId: 9] State: CreateSchemeObject SubState: Subscribed WaitTxId: 281474976710760 Issue: '' } 2025-06-25T15:00:53.877460Z node 46 :IMPORT DEBUG: schemeshard_import__create.cpp:386: TImport::TTxProgress: DoComplete 2025-06-25T15:00:53.944722Z node 46 :IMPORT DEBUG: schemeshard_import__create.cpp:362: TImport::TTxProgress: DoExecute 2025-06-25T15:00:53.944754Z node 46 :IMPORT DEBUG: schemeshard_import__create.cpp:1473: TImport::TTxProgress: OnNotifyResult: txId# 281474976710760 2025-06-25T15:00:53.944835Z node 46 :IMPORT INFO: schemeshard_import__create.cpp:630: TImport::TTxProgress: Allocate txId: info# { Id: 281474976715667 Uid: '' Kind: S3 DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1] UserSID: '(empty maybe)' State: Waiting Issue: '' Items: 1 }, item# { Idx: 0 DstPathName: '/Root/JsonDocumentTable' DstPathId: [OwnerId: 72057594046644480, LocalPathId: 9] State: Transferring SubState: AllocateTxId WaitTxId: 0 Issue: '' } 2025-06-25T15:00:53.947029Z node 46 :IMPORT DEBUG: schemeshard_import__create.cpp:386: TImport::TTxProgress: DoComplete 2025-06-25T15:00:53.947108Z node 46 :IMPORT DEBUG: schemeshard_import__create.cpp:362: TImport::TTxProgress: DoExecute 2025-06-25T15:00:53.947125Z node 46 :IMPORT DEBUG: schemeshard_import__create.cpp:1219: TImport::TTxProgress: OnAllocateResult: txId# 281474976710761, id# 281474976715667 2025-06-25T15:00:53.947166Z node 46 :IMPORT INFO: schemeshard_import__create.cpp:521: TImport::TTxProgress: Restore propose: info# { Id: 281474976715667 Uid: '' Kind: S3 DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1] UserSID: '(empty maybe)' State: Waiting Issue: '' Items: 1 }, item# { Idx: 0 DstPathName: '/Root/JsonDocumentTable' DstPathId: [OwnerId: 72057594046644480, LocalPathId: 9] State: Transferring SubState: Proposed WaitTxId: 0 Issue: '' }, txId# 281474976710761 2025-06-25T15:00:53.947875Z node 46 :IMPORT DEBUG: schemeshard_import__create.cpp:386: TImport::TTxProgress: DoComplete 2025-06-25T15:00:53.948348Z node 46 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpRestore, opId: 281474976710761:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_backup_restore_common.h:563) 2025-06-25T15:00:53.949913Z node 46 :IMPORT DEBUG: schemeshard_import__create.cpp:362: TImport::TTxProgress: DoExecute 2025-06-25T15:00:53.949936Z node 46 :IMPORT DEBUG: schemeshard_import__create.cpp:1315: TImport::TTxProgress: OnModifyResult: txId# 281474976710761, status# StatusAccepted 2025-06-25T15:00:53.950012Z node 46 :IMPORT INFO: schemeshard_import__create.cpp:644: TImport::TTxProgress: Wait for completion: info# { Id: 281474976715667 Uid: '' Kind: S3 DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1] UserSID: '(empty maybe)' State: Waiting Issue: '' Items: 1 }, item# { Idx: 0 DstPathName: '/Root/JsonDocumentTable' DstPathId: [OwnerId: 72057594046644480, LocalPathId: 9] State: Transferring SubState: Subscribed WaitTxId: 281474976710761 Issue: '' } 2025-06-25T15:00:53.952336Z node 46 :IMPORT DEBUG: schemeshard_import__create.cpp:386: TImport::TTxProgress: DoComplete 2025-06-25T15:00:53.953193Z node 46 :TX_PROXY DEBUG: rpc_operation_request_base.h:50: [GetImport] [46:7519902133543979247:2363] [0] Resolve database: name# /Root 2025-06-25T15:00:53.955067Z node 46 :TX_PROXY DEBUG: rpc_operation_request_base.h:66: [GetImport] [46:7519902133543979247:2363] [0] Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult: request# { ErrorCount: 0 DatabaseName: /Root DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root TableId: [72057594046644480:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-25T15:00:53.955101Z node 46 :TX_PROXY DEBUG: rpc_operation_request_base.h:106: [GetImport] [46:7519902133543979247:2363] [0] Send request: schemeShardId# 72057594046644480 2025-06-25T15:00:53.957349Z node 46 :TX_PROXY DEBUG: rpc_get_operation.cpp:220: [GetImport] [46:7519902133543979247:2363] [0] Handle TEvImport::TEvGetImportResponse: record# Entry { Id: 281474976715667 Status: SUCCESS Progress: PROGRESS_TRANSFER_DATA ImportFromS3Settings { endpoint: "localhost:9974" scheme: HTTP bucket: "test_bucket" items { source_prefix: "JsonDocumentTable" destination_path: "/Root/JsonDocumentTable" } } StartTime { seconds: 1750863653 } } REQUEST: HEAD /test_bucket/JsonDocumentTable/data_00.csv HTTP/1.1 HEADERS: Host: localhost:9974 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: A867ED6A-8973-4DE8-B9C8-66381B11DB9C amz-sdk-request: attempt=1 authorization: AWS4-HMAC-SHA256 Credential=test_key/20250625/us-east-1/s3/aws4_request, SignedHeaders=amz-sdk-invocation-id;amz-sdk-request;content-type;host;x-amz-api-version;x-amz-content-sha256;x-amz-date, Signature=ca276df644c715053fe079839a9ec57e1e8531902323990a26679cb16f19bb94 content-type: application/xml user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-api-version: 2006-03-01 x-amz-content-sha256: e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 x-amz-date: 20250625T150054Z S3_MOCK::HttpServeRead: /test_bucket/JsonDocumentTable/data_00.csv / 32 REQUEST: GET /test_bucket/JsonDocumentTable/data_00.csv HTTP/1.1 HEADERS: Host: localhost:9974 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 14195F76-335B-47C2-ACA6-08BA4613B19C amz-sdk-request: attempt=1 authorization: AWS4-HMAC-SHA256 Credential=test_key/20250625/us-east-1/s3/aws4_request, SignedHeaders=amz-sdk-invocation-id;amz-sdk-request;content-type;host;range;x-amz-api-version;x-amz-content-sha256;x-amz-date, Signature=aee9221c6a0e7db33c3c2a995d86d2677c7ee0c8417b1fa07035f50f368c273b content-type: application/xml range: bytes=0-31 user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-api-version: 2006-03-01 x-amz-content-sha256: e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 x-amz-date: 20250625T150054Z S3_MOCK::HttpServeRead: /test_bucket/JsonDocumentTable/data_00.csv / 32 2025-06-25T15:00:54.026497Z node 46 :IMPORT DEBUG: schemeshard_import__create.cpp:362: TImport::TTxProgress: DoExecute 2025-06-25T15:00:54.026522Z node 46 :IMPORT DEBUG: schemeshard_import__create.cpp:1473: TImport::TTxProgress: OnNotifyResult: txId# 281474976710761 2025-06-25T15:00:54.028295Z node 46 :IMPORT DEBUG: schemeshard_import__create.cpp:386: TImport::TTxProgress: DoComplete 2025-06-25T15:00:54.365849Z node 46 :TX_PROXY DEBUG: rpc_operation_request_base.h:50: [GetImport] [46:7519902137838946601:2368] [0] Resolve database: name# /Root 2025-06-25T15:00:54.366192Z node 46 :TX_PROXY DEBUG: rpc_operation_request_base.h:66: [GetImport] [46:7519902137838946601:2368] [0] Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult: request# { ErrorCount: 0 DatabaseName: /Root DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root TableId: [72057594046644480:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-25T15:00:54.366216Z node 46 :TX_PROXY DEBUG: rpc_operation_request_base.h:106: [GetImport] [46:7519902137838946601:2368] [0] Send request: schemeShardId# 72057594046644480 2025-06-25T15:00:54.366942Z node 46 :TX_PROXY DEBUG: rpc_get_operation.cpp:220: [GetImport] [46:7519902137838946601:2368] [0] Handle TEvImport::TEvGetImportResponse: record# Entry { Id: 281474976715667 Status: SUCCESS Progress: PROGRESS_DONE ImportFromS3Settings { endpoint: "localhost:9974" scheme: HTTP bucket: "test_bucket" items { source_prefix: "JsonDocumentTable" destination_path: "/Root/JsonDocumentTable" } } StartTime { seconds: 1750863653 } EndTime { seconds: 1750863654 } } 2025-06-25T15:00:54.500700Z node 46 :TX_PROXY DEBUG: proxy_impl.cpp:353: actor# [46:7519902112069141004:2135] Handle TEvExecuteKqpTransaction 2025-06-25T15:00:54.500738Z node 46 :TX_PROXY DEBUG: proxy_impl.cpp:342: actor# [46:7519902112069141004:2135] TxId# 281474976715668 ProcessProposeKqpTransaction 2025-06-25T15:00:54.501847Z node 46 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715668. Ctx: { TraceId: 01jykspbf6fgmjc7ax1v95z1d8, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=46&id=MzdlNzc5MmUtYWJmN2MzM2ItOTVmYzVmMWYtZGM1NzkzZQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root >> KqpQuery::Now [GOOD] >> KqpQuery::GenericQueryNoRowsLimit >> KqpLimits::StreamWrite+Allowed >> KqpTypes::UnsafeTimestampCastV0 [GOOD] >> KqpTypes::UnsafeTimestampCastV1 >> KqpLimits::QSReplySizeEnsureMemoryLimits+useSink >> KqpExplain::ExplainStream >> BackupRestore::TestAllPrimitiveTypes-STRING [GOOD] >> BackupRestore::TestAllPrimitiveTypes-JSON >> KqpExplain::UpdateConditional+UseSink [GOOD] >> KqpExplain::UpdateConditional-UseSink >> KqpQuery::CreateAsSelectBadTypes-IsOlap [GOOD] >> KqpQuery::CreateAsSelectPath+UseTablePathPrefix >> KqpQuery::CreateAsSelectTypes-NotNull-IsOlap [GOOD] >> KqpQuery::CreateAsSelectTypes+NotNull-IsOlap >> KqpQuery::QueryCacheTtl [GOOD] >> KqpQuery::QueryCachePermissionsLoss >> BackupPathTest::FilterByPathFailsWhenNoSchemaMapping [GOOD] >> TColumnShardTestSchema::RebootExportWithLostAnswer [GOOD] >> KqpExplain::SortStage [GOOD] >> KqpExplain::SelfJoin3xSameLabels >> KqpParams::ImplicitParameterTypes [GOOD] >> KqpParams::ImplicitSameParameterTypesQueryCacheCheck >> KqpQuery::DdlInDataQuery [GOOD] >> KqpQuery::CreateAsSelect_BadCases >> KqpQuery::RandomUuid [GOOD] >> KqpQuery::ReadOverloaded+StreamLookup >> KqpQuery::DeleteWhereInSubquery [GOOD] >> KqpQuery::DictJoin >> KqpParams::MissingParameter [GOOD] >> KqpParams::MissingOptionalParameter-UseSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest >> TColumnShardTestSchema::RebootExportWithLostAnswer [GOOD] Test command err: FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; WaitEmptyAfter=0;Tiers=;TTL={Column=timestamp;EvictAfter=0.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=150864211.000000s;Name=cold;Codec=};};TTL={Column=timestamp;EvictAfter=150864211.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=130864211.000000s;Name=cold;Codec=};};TTL={Column=timestamp;EvictAfter=130864211.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=130863011.000000s;Name=cold;Codec=};};TTL={Column=timestamp;EvictAfter=130863011.000000s;Name=;Codec=}; 2025-06-25T15:00:13.254907Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:99;event=initialize_shard;step=OnActivateExecutor; 2025-06-25T15:00:13.274492Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:117;event=initialize_shard;step=initialize_tiring_finished; 2025-06-25T15:00:13.274652Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-25T15:00:13.279711Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T15:00:13.279877Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T15:00:13.280075Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T15:00:13.280147Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T15:00:13.280204Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T15:00:13.280273Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T15:00:13.280376Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T15:00:13.280471Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T15:00:13.280543Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T15:00:13.280621Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T15:00:13.280740Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T15:00:13.304165Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-25T15:00:13.304298Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-25T15:00:13.304383Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-25T15:00:13.304545Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:00:13.304683Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-25T15:00:13.304766Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-25T15:00:13.304814Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-25T15:00:13.304892Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-25T15:00:13.304951Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-25T15:00:13.304993Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-25T15:00:13.305032Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-25T15:00:13.305195Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:00:13.305255Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-25T15:00:13.305294Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-25T15:00:13.305321Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-25T15:00:13.305402Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-25T15:00:13.305454Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-25T15:00:13.305491Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-25T15:00:13.305519Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-25T15:00:13.305561Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-25T15:00:13.305596Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-25T15:00:13.305630Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-25T15:00:13.305804Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-25T15:00:13.305843Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-25T15:00:13.305871Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-25T15:00:13.306041Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-25T15:00:13.306103Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-25T15:00:13.306146Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-25T15:00:13.306286Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-25T15:00:13.306328Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-25T15:00:13.306357Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-25T15:00:13.306425Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-25T15:00:13.306483Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-25T15:00:13.306521Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-25T15:00:13.306570Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-25T15:00:13.306777Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=45; 2025-06-25T15:00:13.306868Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=42; 2025-06-25T15:00:13.3069 ... p:40;memory_size=17006;data_size=16980;sum=338168;count=20;size_of_portion=208; 2025-06-25T15:01:01.757206Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;EXECUTE:portionsLoadingTime=3654; 2025-06-25T15:01:01.757271Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;PRECHARGE:granule_finished_commonLoadingTime=11; 2025-06-25T15:01:01.757443Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;EXECUTE:granule_finished_commonLoadingTime=125; 2025-06-25T15:01:01.757484Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;fline=common_data.cpp:29;EXECUTE:granuleLoadingTime=4077; 2025-06-25T15:01:01.757563Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:granulesLoadingTime=4221; 2025-06-25T15:01:01.757641Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;PRECHARGE:finishLoadingTime=11; 2025-06-25T15:01:01.757769Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:finishLoadingTime=69; 2025-06-25T15:01:01.757811Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:column_enginesLoadingTime=4947; 2025-06-25T15:01:01.757950Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tx_controllerLoadingTime=81; 2025-06-25T15:01:01.758075Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tx_controllerLoadingTime=73; 2025-06-25T15:01:01.758221Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:operations_managerLoadingTime=89; 2025-06-25T15:01:01.758351Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:operations_managerLoadingTime=79; 2025-06-25T15:01:01.760724Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:storages_managerLoadingTime=2317; 2025-06-25T15:01:01.762784Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:storages_managerLoadingTime=1989; 2025-06-25T15:01:01.762867Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:db_locksLoadingTime=11; 2025-06-25T15:01:01.762918Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:db_locksLoadingTime=13; 2025-06-25T15:01:01.762959Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:bg_sessionsLoadingTime=5; 2025-06-25T15:01:01.763028Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:bg_sessionsLoadingTime=33; 2025-06-25T15:01:01.763069Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:sharing_sessionsLoadingTime=5; 2025-06-25T15:01:01.763161Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:sharing_sessionsLoadingTime=63; 2025-06-25T15:01:01.763205Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:in_flight_readsLoadingTime=6; 2025-06-25T15:01:01.763267Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:in_flight_readsLoadingTime=30; 2025-06-25T15:01:01.763373Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tiers_managerLoadingTime=56; 2025-06-25T15:01:01.763618Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tiers_managerLoadingTime=200; 2025-06-25T15:01:01.763661Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;fline=common_data.cpp:29;EXECUTE:composite_initLoadingTime=18093; 2025-06-25T15:01:01.763810Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Index: tables 1 inserted {blob_bytes=0;raw_bytes=0;count=0;records=0} compacted {blob_bytes=0;raw_bytes=0;count=0;records=0} s-compacted {blob_bytes=0;raw_bytes=0;count=0;records=0} inactive {blob_bytes=9739224;raw_bytes=13544452;count=2;records=160000} evicted {blob_bytes=0;raw_bytes=0;count=0;records=0} at tablet 9437184 2025-06-25T15:01:01.763918Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1336:3185];process=SwitchToWork;fline=columnshard.cpp:74;event=initialize_shard;step=SwitchToWork; 2025-06-25T15:01:01.763982Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1336:3185];process=SwitchToWork;fline=columnshard.cpp:77;event=initialize_shard;step=SignalTabletActive; 2025-06-25T15:01:01.764054Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1336:3185];process=SwitchToWork;fline=columnshard_impl.cpp:1331;event=OnTieringModified;path_id=NO_VALUE_OPTIONAL; 2025-06-25T15:01:01.773902Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1336:3185];process=SwitchToWork;fline=column_engine_logs.cpp:471;event=OnTieringModified;new_count_tierings=1; 2025-06-25T15:01:01.774121Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;fline=columnshard_impl.cpp:442;event=EnqueueBackgroundActivities;periodic=0; 2025-06-25T15:01:01.774209Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=2; 2025-06-25T15:01:01.774279Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750863350802;tx_id=18446744073709551615;;current_snapshot_ts=1750863638689; 2025-06-25T15:01:01.774321Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=2;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-25T15:01:01.774368Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;fline=columnshard_impl.cpp:791;background=cleanup;skip_reason=no_changes; 2025-06-25T15:01:01.774416Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;fline=columnshard_impl.cpp:820;background=cleanup;skip_reason=no_changes; 2025-06-25T15:01:01.774513Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;fline=columnshard_impl.cpp:749;background=ttl;skip_reason=no_changes; 2025-06-25T15:01:01.775620Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1336:3185];ev=NActors::TEvents::TEvWakeup;fline=columnshard.cpp:250;event=TEvPrivate::TEvPeriodicWakeup::MANUAL;tablet_id=9437184; 2025-06-25T15:01:01.776061Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1336:3185];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;fline=columnshard.cpp:239;event=TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184; 2025-06-25T15:01:01.776105Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Send periodic stats. 2025-06-25T15:01:01.776133Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Disabled periodic stats at tablet 9437184 2025-06-25T15:01:01.776179Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1336:3185];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:442;event=EnqueueBackgroundActivities;periodic=0; 2025-06-25T15:01:01.776250Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1336:3185];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=2; 2025-06-25T15:01:01.776347Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1336:3185];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750863350802;tx_id=18446744073709551615;;current_snapshot_ts=1750863638689; 2025-06-25T15:01:01.776395Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1336:3185];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=2;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-25T15:01:01.776443Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1336:3185];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:791;background=cleanup;skip_reason=no_changes; 2025-06-25T15:01:01.776481Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1336:3185];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:820;background=cleanup;skip_reason=no_changes; 2025-06-25T15:01:01.776576Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1336:3185];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:749;background=ttl;skip_reason=no_changes; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/cold' stopped at tablet 9437184 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/cold' stopped at tablet 9437184 160000/9739224 160000/9739224 80000/4873744 0/0 >> KqpStats::RequestUnitForBadRequestExplicitPrepare [GOOD] >> KqpStats::OneShardNonLocalExec+UseSink >> KqpParams::ImplicitDifferentParameterTypesQueryCacheCheck [GOOD] >> KqpParams::DefaultParameterValue >> KqpParams::CheckQueryCacheForExecuteAndPreparedQueries [GOOD] >> KqpParams::CheckCacheByAst >> BackupPathTest::OnlyOneEmptyDirectory >> KqpStats::MultiTxStatsFullExpScan [GOOD] >> KqpStats::JoinStatsBasicYql+StreamLookupJoin >> TColumnShardTestSchema::RebootEnableColdTiersAfterNoEviction [GOOD] >> KqpQuery::RowsLimit [GOOD] >> KqpQuery::RowsLimitServiceOverride >> TColumnShardTestSchema::RebootHotTiers [GOOD] >> TColumnShardTestSchema::ForgetAfterFail [GOOD] >> KqpQuery::GenericQueryNoRowsLimit [GOOD] >> KqpQuery::GenericQueryNoRowsLimitLotsOfRows >> CommonEncryptionRequirementsTest::CommonEncryptionRequirements [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest >> TColumnShardTestSchema::RebootEnableColdTiersAfterNoEviction [GOOD] Test command err: FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; WaitEmptyAfter=0;Tiers=;TTL={Column=timestamp;EvictAfter=0.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=150864198.000000s;Name=tier0;Codec=};}{{Column=timestamp;EvictAfter=150864198.000000s;Name=tier1;Codec=};};TTL={Column=timestamp;EvictAfter=150864198.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=130864198.000000s;Name=tier0;Codec=};}{{Column=timestamp;EvictAfter=150864198.000000s;Name=tier1;Codec=};};TTL={Column=timestamp;EvictAfter=150864198.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=130862998.000000s;Name=tier0;Codec=};}{{Column=timestamp;EvictAfter=130864198.000000s;Name=tier1;Codec=};};TTL={Column=timestamp;EvictAfter=130864198.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=130862998.000000s;Name=tier0;Codec=};}{{Column=timestamp;EvictAfter=130862998.000000s;Name=tier1;Codec=};};TTL={Column=timestamp;EvictAfter=130862998.000000s;Name=;Codec=}; 2025-06-25T14:59:58.767620Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:99;event=initialize_shard;step=OnActivateExecutor; 2025-06-25T14:59:58.794354Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:117;event=initialize_shard;step=initialize_tiring_finished; 2025-06-25T14:59:58.794652Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-25T14:59:58.801495Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:59:58.801721Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:59:58.801948Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:59:58.802054Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:59:58.802144Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:59:58.802249Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:59:58.802344Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:59:58.802442Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:59:58.802535Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:59:58.802638Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:59:58.802741Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:59:58.827230Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-25T14:59:58.827380Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-25T14:59:58.827437Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-25T14:59:58.827612Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T14:59:58.827760Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-25T14:59:58.827845Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-25T14:59:58.827905Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-25T14:59:58.827996Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-25T14:59:58.828063Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-25T14:59:58.828109Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-25T14:59:58.828147Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-25T14:59:58.828332Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T14:59:58.828412Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-25T14:59:58.828459Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-25T14:59:58.828488Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-25T14:59:58.828587Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-25T14:59:58.828639Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-25T14:59:58.828680Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-25T14:59:58.828709Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-25T14:59:58.828756Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-25T14:59:58.828795Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-25T14:59:58.828826Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-25T14:59:58.829028Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-25T14:59:58.829071Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-25T14:59:58.829100Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-25T14:59:58.829272Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-25T14:59:58.829318Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-25T14:59:58.829353Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-25T14:59:58.829494Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-25T14:59:58.829542Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-25T14:59:58.829573Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-25T14:59:58.829649Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-25T14:59:58.829714Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;eve ... d_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;EXECUTE:portionsLoadingTime=10738; 2025-06-25T15:01:02.638060Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;PRECHARGE:granule_finished_commonLoadingTime=10; 2025-06-25T15:01:02.638300Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;EXECUTE:granule_finished_commonLoadingTime=195; 2025-06-25T15:01:02.638339Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;fline=common_data.cpp:29;EXECUTE:granuleLoadingTime=11239; 2025-06-25T15:01:02.638390Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:granulesLoadingTime=11353; 2025-06-25T15:01:02.638448Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;PRECHARGE:finishLoadingTime=11; 2025-06-25T15:01:02.638634Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:finishLoadingTime=145; 2025-06-25T15:01:02.638671Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:column_enginesLoadingTime=12084; 2025-06-25T15:01:02.638842Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tx_controllerLoadingTime=110; 2025-06-25T15:01:02.638973Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tx_controllerLoadingTime=79; 2025-06-25T15:01:02.639120Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:operations_managerLoadingTime=99; 2025-06-25T15:01:02.639244Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:operations_managerLoadingTime=79; 2025-06-25T15:01:02.644118Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:storages_managerLoadingTime=4812; 2025-06-25T15:01:02.649134Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:storages_managerLoadingTime=4923; 2025-06-25T15:01:02.649234Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:db_locksLoadingTime=13; 2025-06-25T15:01:02.649289Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:db_locksLoadingTime=10; 2025-06-25T15:01:02.649356Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:bg_sessionsLoadingTime=6; 2025-06-25T15:01:02.649442Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:bg_sessionsLoadingTime=45; 2025-06-25T15:01:02.649500Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:sharing_sessionsLoadingTime=6; 2025-06-25T15:01:02.649591Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:sharing_sessionsLoadingTime=57; 2025-06-25T15:01:02.649630Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:in_flight_readsLoadingTime=5; 2025-06-25T15:01:02.649697Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:in_flight_readsLoadingTime=31; 2025-06-25T15:01:02.649795Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tiers_managerLoadingTime=61; 2025-06-25T15:01:02.650163Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tiers_managerLoadingTime=321; 2025-06-25T15:01:02.650212Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;fline=common_data.cpp:29;EXECUTE:composite_initLoadingTime=31394; 2025-06-25T15:01:02.650357Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Index: tables 1 inserted {blob_bytes=0;raw_bytes=0;count=0;records=0} compacted {blob_bytes=0;raw_bytes=0;count=0;records=0} s-compacted {blob_bytes=0;raw_bytes=0;count=0;records=0} inactive {blob_bytes=29251936;raw_bytes=43173354;count=6;records=480000} evicted {blob_bytes=0;raw_bytes=0;count=0;records=0} at tablet 9437184 2025-06-25T15:01:02.650464Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2039:3848];process=SwitchToWork;fline=columnshard.cpp:74;event=initialize_shard;step=SwitchToWork; 2025-06-25T15:01:02.650530Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2039:3848];process=SwitchToWork;fline=columnshard.cpp:77;event=initialize_shard;step=SignalTabletActive; 2025-06-25T15:01:02.650597Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2039:3848];process=SwitchToWork;fline=columnshard_impl.cpp:1331;event=OnTieringModified;path_id=NO_VALUE_OPTIONAL; 2025-06-25T15:01:02.671437Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2039:3848];process=SwitchToWork;fline=column_engine_logs.cpp:471;event=OnTieringModified;new_count_tierings=1; 2025-06-25T15:01:02.671574Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;fline=columnshard_impl.cpp:442;event=EnqueueBackgroundActivities;periodic=0; 2025-06-25T15:01:02.671669Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=4; 2025-06-25T15:01:02.671734Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750863348600;tx_id=18446744073709551615;;current_snapshot_ts=1750863600078; 2025-06-25T15:01:02.671779Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=4;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-25T15:01:02.671827Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;fline=columnshard_impl.cpp:791;background=cleanup;skip_reason=no_changes; 2025-06-25T15:01:02.671863Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;fline=columnshard_impl.cpp:820;background=cleanup;skip_reason=no_changes; 2025-06-25T15:01:02.671981Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;fline=columnshard_impl.cpp:749;background=ttl;skip_reason=no_changes; 2025-06-25T15:01:02.673494Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2039:3848];ev=NActors::TEvents::TEvWakeup;fline=columnshard.cpp:250;event=TEvPrivate::TEvPeriodicWakeup::MANUAL;tablet_id=9437184; 2025-06-25T15:01:02.674150Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2039:3848];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;fline=columnshard.cpp:239;event=TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184; 2025-06-25T15:01:02.674184Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Send periodic stats. 2025-06-25T15:01:02.674211Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Disabled periodic stats at tablet 9437184 2025-06-25T15:01:02.674250Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2039:3848];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:442;event=EnqueueBackgroundActivities;periodic=0; 2025-06-25T15:01:02.674353Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2039:3848];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=4; 2025-06-25T15:01:02.674418Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2039:3848];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750863348600;tx_id=18446744073709551615;;current_snapshot_ts=1750863600078; 2025-06-25T15:01:02.674457Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2039:3848];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=4;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-25T15:01:02.674506Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2039:3848];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:791;background=cleanup;skip_reason=no_changes; 2025-06-25T15:01:02.674543Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2039:3848];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:820;background=cleanup;skip_reason=no_changes; 2025-06-25T15:01:02.674625Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2039:3848];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:749;background=ttl;skip_reason=no_changes; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/tier0' stopped at tablet 9437184 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/tier1' stopped at tablet 9437184 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/tier0' stopped at tablet 9437184 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/tier1' stopped at tablet 9437184 240000/14617704 160000/9752224 160000/9752224 80000/4886744 0/0 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest >> TColumnShardTestSchema::RebootHotTiers [GOOD] Test command err: FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; WaitEmptyAfter=0;Tiers=;TTL={Column=timestamp;EvictAfter=0.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=150864198.000000s;Name=tier0;Codec=};}{{Column=timestamp;EvictAfter=150864198.000000s;Name=tier1;Codec=zstd};};TTL={Column=timestamp;EvictAfter=150864198.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=130864198.000000s;Name=tier0;Codec=};}{{Column=timestamp;EvictAfter=150864198.000000s;Name=tier1;Codec=zstd};};TTL={Column=timestamp;EvictAfter=150864198.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=130862998.000000s;Name=tier0;Codec=};}{{Column=timestamp;EvictAfter=130864198.000000s;Name=tier1;Codec=zstd};};TTL={Column=timestamp;EvictAfter=130864198.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=130862998.000000s;Name=tier0;Codec=};}{{Column=timestamp;EvictAfter=130862998.000000s;Name=tier1;Codec=zstd};};TTL={Column=timestamp;EvictAfter=130862998.000000s;Name=;Codec=}; 2025-06-25T14:59:58.798842Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:99;event=initialize_shard;step=OnActivateExecutor; 2025-06-25T14:59:58.821776Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:117;event=initialize_shard;step=initialize_tiring_finished; 2025-06-25T14:59:58.822031Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-25T14:59:58.828153Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:59:58.828501Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:59:58.828736Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:59:58.828852Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:59:58.828932Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:59:58.829023Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:59:58.829120Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:59:58.829215Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:59:58.829302Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:59:58.829395Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:59:58.829489Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:59:58.866800Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-25T14:59:58.866928Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-25T14:59:58.866984Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-25T14:59:58.867131Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T14:59:58.867279Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-25T14:59:58.867355Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-25T14:59:58.867397Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-25T14:59:58.867476Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-25T14:59:58.867531Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-25T14:59:58.867569Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-25T14:59:58.867605Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-25T14:59:58.867751Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T14:59:58.867817Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-25T14:59:58.867856Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-25T14:59:58.867882Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-25T14:59:58.867959Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-25T14:59:58.868003Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-25T14:59:58.868034Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-25T14:59:58.868057Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-25T14:59:58.868100Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-25T14:59:58.868134Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-25T14:59:58.868159Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-25T14:59:58.868360Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-25T14:59:58.868416Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-25T14:59:58.868458Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-25T14:59:58.868702Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-25T14:59:58.868767Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-25T14:59:58.868804Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-25T14:59:58.868915Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-25T14:59:58.868971Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-25T14:59:58.869011Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-25T14:59:58.869114Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-25T14:59:58.869190Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abs ... ad_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;EXECUTE:portionsLoadingTime=9735; 2025-06-25T15:01:03.059005Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;PRECHARGE:granule_finished_commonLoadingTime=11; 2025-06-25T15:01:03.059268Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;EXECUTE:granule_finished_commonLoadingTime=216; 2025-06-25T15:01:03.059307Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;fline=common_data.cpp:29;EXECUTE:granuleLoadingTime=10234; 2025-06-25T15:01:03.059355Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:granulesLoadingTime=10365; 2025-06-25T15:01:03.059418Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;PRECHARGE:finishLoadingTime=10; 2025-06-25T15:01:03.059627Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:finishLoadingTime=167; 2025-06-25T15:01:03.059673Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:column_enginesLoadingTime=11096; 2025-06-25T15:01:03.059828Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tx_controllerLoadingTime=102; 2025-06-25T15:01:03.059957Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tx_controllerLoadingTime=75; 2025-06-25T15:01:03.060102Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:operations_managerLoadingTime=96; 2025-06-25T15:01:03.060232Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:operations_managerLoadingTime=86; 2025-06-25T15:01:03.064116Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:storages_managerLoadingTime=3805; 2025-06-25T15:01:03.068041Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:storages_managerLoadingTime=3824; 2025-06-25T15:01:03.068137Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:db_locksLoadingTime=14; 2025-06-25T15:01:03.068190Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:db_locksLoadingTime=12; 2025-06-25T15:01:03.068230Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:bg_sessionsLoadingTime=5; 2025-06-25T15:01:03.068301Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:bg_sessionsLoadingTime=38; 2025-06-25T15:01:03.068359Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:sharing_sessionsLoadingTime=5; 2025-06-25T15:01:03.068442Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:sharing_sessionsLoadingTime=49; 2025-06-25T15:01:03.068489Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:in_flight_readsLoadingTime=5; 2025-06-25T15:01:03.068554Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:in_flight_readsLoadingTime=30; 2025-06-25T15:01:03.068648Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tiers_managerLoadingTime=58; 2025-06-25T15:01:03.068979Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tiers_managerLoadingTime=292; 2025-06-25T15:01:03.069015Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;fline=common_data.cpp:29;EXECUTE:composite_initLoadingTime=27523; 2025-06-25T15:01:03.069153Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Index: tables 1 inserted {blob_bytes=0;raw_bytes=0;count=0;records=0} compacted {blob_bytes=0;raw_bytes=0;count=0;records=0} s-compacted {blob_bytes=0;raw_bytes=0;count=0;records=0} inactive {blob_bytes=29251936;raw_bytes=43173354;count=6;records=480000} evicted {blob_bytes=0;raw_bytes=0;count=0;records=0} at tablet 9437184 2025-06-25T15:01:03.069263Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2039:3848];process=SwitchToWork;fline=columnshard.cpp:74;event=initialize_shard;step=SwitchToWork; 2025-06-25T15:01:03.069310Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2039:3848];process=SwitchToWork;fline=columnshard.cpp:77;event=initialize_shard;step=SignalTabletActive; 2025-06-25T15:01:03.069373Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2039:3848];process=SwitchToWork;fline=columnshard_impl.cpp:1331;event=OnTieringModified;path_id=NO_VALUE_OPTIONAL; 2025-06-25T15:01:03.087956Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2039:3848];process=SwitchToWork;fline=column_engine_logs.cpp:471;event=OnTieringModified;new_count_tierings=1; 2025-06-25T15:01:03.088109Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;fline=columnshard_impl.cpp:442;event=EnqueueBackgroundActivities;periodic=0; 2025-06-25T15:01:03.088193Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=4; 2025-06-25T15:01:03.088260Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750863348636;tx_id=18446744073709551615;;current_snapshot_ts=1750863600114; 2025-06-25T15:01:03.088302Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=4;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-25T15:01:03.088367Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;fline=columnshard_impl.cpp:791;background=cleanup;skip_reason=no_changes; 2025-06-25T15:01:03.088408Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;fline=columnshard_impl.cpp:820;background=cleanup;skip_reason=no_changes; 2025-06-25T15:01:03.088496Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;fline=columnshard_impl.cpp:749;background=ttl;skip_reason=no_changes; 2025-06-25T15:01:03.089722Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2039:3848];ev=NActors::TEvents::TEvWakeup;fline=columnshard.cpp:250;event=TEvPrivate::TEvPeriodicWakeup::MANUAL;tablet_id=9437184; 2025-06-25T15:01:03.089816Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2039:3848];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;fline=columnshard.cpp:239;event=TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184; 2025-06-25T15:01:03.089846Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Send periodic stats. 2025-06-25T15:01:03.089873Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Disabled periodic stats at tablet 9437184 2025-06-25T15:01:03.089917Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2039:3848];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:442;event=EnqueueBackgroundActivities;periodic=0; 2025-06-25T15:01:03.089986Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2039:3848];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=4; 2025-06-25T15:01:03.090046Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2039:3848];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750863348636;tx_id=18446744073709551615;;current_snapshot_ts=1750863600114; 2025-06-25T15:01:03.090088Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2039:3848];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=4;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-25T15:01:03.090133Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2039:3848];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:791;background=cleanup;skip_reason=no_changes; 2025-06-25T15:01:03.090173Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2039:3848];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:820;background=cleanup;skip_reason=no_changes; 2025-06-25T15:01:03.090268Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2039:3848];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:749;background=ttl;skip_reason=no_changes; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/tier0' stopped at tablet 9437184 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/tier1' stopped at tablet 9437184 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/tier0' stopped at tablet 9437184 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/tier1' stopped at tablet 9437184 240000/14617704 160000/9752224 160000/9752224 80000/4886744 0/0 >> KqpTypes::UnsafeTimestampCastV1 [GOOD] >> KqpTypes::Time64Columns+EnableTableDatetime64 >> KqpQuery::CreateAsSelectPath+UseTablePathPrefix [GOOD] >> KqpQuery::CreateAsSelectPath-UseTablePathPrefix >> TColumnShardTestSchema::RebootEnableColdTiersAfterTtl [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest >> TColumnShardTestSchema::ForgetAfterFail [GOOD] Test command err: FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; WaitEmptyAfter=0;Tiers=;TTL={Column=timestamp;EvictAfter=0.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=150864195.000000s;Name=cold;Codec=};};TTL={Column=timestamp;EvictAfter=0.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=130864195.000000s;Name=cold;Codec=};};TTL={Column=timestamp;EvictAfter=0.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=130862995.000000s;Name=cold;Codec=};};TTL={Column=timestamp;EvictAfter=0.000000s;Name=;Codec=}; 2025-06-25T14:59:57.337876Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:99;event=initialize_shard;step=OnActivateExecutor; 2025-06-25T14:59:57.364390Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:117;event=initialize_shard;step=initialize_tiring_finished; 2025-06-25T14:59:57.364635Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-25T14:59:57.370986Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:59:57.371213Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:59:57.371435Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:59:57.371544Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:59:57.371662Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:59:57.371761Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:59:57.371858Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:59:57.371961Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:59:57.372060Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:59:57.372158Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:59:57.372283Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:59:57.407528Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-25T14:59:57.407664Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-25T14:59:57.407742Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-25T14:59:57.407895Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T14:59:57.408029Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-25T14:59:57.408104Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-25T14:59:57.408145Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-25T14:59:57.408229Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-25T14:59:57.408275Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-25T14:59:57.408329Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-25T14:59:57.408366Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-25T14:59:57.408521Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T14:59:57.408577Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-25T14:59:57.408631Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-25T14:59:57.408658Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-25T14:59:57.408734Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-25T14:59:57.408796Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-25T14:59:57.408842Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-25T14:59:57.408866Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-25T14:59:57.408926Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-25T14:59:57.408964Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-25T14:59:57.408991Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-25T14:59:57.409156Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-25T14:59:57.409190Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-25T14:59:57.409215Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-25T14:59:57.409409Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-25T14:59:57.409453Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-25T14:59:57.409483Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-25T14:59:57.409606Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-25T14:59:57.409648Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-25T14:59:57.409679Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-25T14:59:57.409750Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-25T14:59:57.409806Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-25T14:59:57.409840Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-25T14:59:57.409865Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-25T14:59:57.410059Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=46; 2025-06-25T14:59:57.410135Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=40; 2025-06-25T14:59:57.410222Z node 1 :TX_COLUMNSHA ... =produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:01:03.451542Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:1238:3173];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=1;count=160000;finished=1; 2025-06-25T15:01:03.451591Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:1238:3173];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:203;stage=limit exhausted;limit=limits:(bytes=0;chunks=0);; 2025-06-25T15:01:03.451929Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:1238:3173];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-25T15:01:03.452084Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:1238:3173];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:1;records_count:160000;schema=timestamp: timestamp[us];);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:01:03.452128Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:1238:3173];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=0;count=0;finished=1; 2025-06-25T15:01:03.452231Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:1238:3173];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:234;stage=ready result;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;);columns=1;rows=160000; 2025-06-25T15:01:03.452288Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:1238:3173];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:254;stage=data_format;batch_size=1280000;num_rows=160000;batch_columns=timestamp; 2025-06-25T15:01:03.452569Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:1238:3173];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:1237:3172];bytes=1280000;rows=160000;faults=0;finished=0;fault=0;schema=timestamp: timestamp[us]; Got TEvKqpCompute::TEvScanData [1:1238:3173]->[1:1237:3172] 2025-06-25T15:01:03.452709Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:1238:3173];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:274;stage=finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:01:03.452814Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:1238:3173];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:01:03.452910Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:1238:3173];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:01:03.453060Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:1238:3173];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-25T15:01:03.453156Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:1238:3173];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:01:03.453250Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:1238:3173];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:01:03.453289Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: actor.cpp:414: Scan [1:1238:3173] finished for tablet 9437184 2025-06-25T15:01:03.453736Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=3;SelfId=[1:1238:3173];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:420;event=scan_finish;compute_actor_id=[1:1237:3172];stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.001},{"events":["l_bootstrap"],"t":0.002},{"events":["f_processing","f_task_result"],"t":0.005},{"events":["f_ack","l_task_result"],"t":0.594},{"events":["l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.596}],"full":{"a":1750863662857145,"name":"_full_task","f":1750863662857145,"d_finished":0,"c":0,"l":1750863663453339,"d":596194},"events":[{"name":"bootstrap","f":1750863662857320,"d_finished":2583,"c":1,"l":1750863662859903,"d":2583},{"a":1750863663453038,"name":"ack","f":1750863663451900,"d_finished":1039,"c":1,"l":1750863663452939,"d":1340},{"a":1750863663453023,"name":"processing","f":1750863662862745,"d_finished":328226,"c":8,"l":1750863663452941,"d":328542},{"name":"ProduceResults","f":1750863662858621,"d_finished":2646,"c":11,"l":1750863663453274,"d":2646},{"a":1750863663453277,"name":"Finish","f":1750863663453277,"d_finished":0,"c":0,"l":1750863663453339,"d":62},{"name":"task_result","f":1750863662862765,"d_finished":327002,"c":7,"l":1750863663451714,"d":327002}],"id":"9437184::7"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:01:03.453794Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:1238:3173];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:1237:3172];bytes=0;rows=0;faults=0;finished=1;fault=0;schema=; 2025-06-25T15:01:03.454174Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=3;SelfId=[1:1238:3173];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:375;event=scan_finished;compute_actor_id=[1:1237:3172];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.001},{"events":["l_bootstrap"],"t":0.002},{"events":["f_processing","f_task_result"],"t":0.005},{"events":["f_ack","l_task_result"],"t":0.594},{"events":["l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.596}],"full":{"a":1750863662857145,"name":"_full_task","f":1750863662857145,"d_finished":0,"c":0,"l":1750863663453831,"d":596686},"events":[{"name":"bootstrap","f":1750863662857320,"d_finished":2583,"c":1,"l":1750863662859903,"d":2583},{"a":1750863663453038,"name":"ack","f":1750863663451900,"d_finished":1039,"c":1,"l":1750863663452939,"d":1832},{"a":1750863663453023,"name":"processing","f":1750863662862745,"d_finished":328226,"c":8,"l":1750863663452941,"d":329034},{"name":"ProduceResults","f":1750863662858621,"d_finished":2646,"c":11,"l":1750863663453274,"d":2646},{"a":1750863663453277,"name":"Finish","f":1750863663453277,"d_finished":0,"c":0,"l":1750863663453831,"d":554},{"name":"task_result","f":1750863662862765,"d_finished":327002,"c":7,"l":1750863663451714,"d":327002}],"id":"9437184::7"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); Got TEvKqpCompute::TEvScanData [1:1238:3173]->[1:1237:3172] 2025-06-25T15:01:03.454242Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:1238:3173];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-06-25T15:01:02.856743Z;index_granules=0;index_portions=1;index_batches=1146;schema_columns=1;filter_columns=0;additional_columns=0;compacted_portions_bytes=10565848;inserted_portions_bytes=0;committed_portions_bytes=0;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=10565848;selected_rows=0; 2025-06-25T15:01:03.454280Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=3;SelfId=[1:1238:3173];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=read_context.h:192;event=scan_aborted;reason=unexpected on destructor; 2025-06-25T15:01:03.454509Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=3;SelfId=[1:1238:3173];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/cold' stopped at tablet 9437184 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/cold' stopped at tablet 9437184 160000/10565848 160000/10565848 0/0 160000/10565848 >> KqpExplain::PrecomputeRange >> BackupRestoreS3::TestAllPrimitiveTypes-DATETIME [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-DATE32 >> KqpExplain::ExplainDataQuery [GOOD] >> KqpExplain::ExplainDataQueryWithParams >> KqpQuery::CreateAsSelectTypes+NotNull-IsOlap [GOOD] >> KqpQuery::CreateAsSelectTypes-NotNull+IsOlap >> EncryptedBackupParamsValidationTest::BadSourcePath ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest >> TColumnShardTestSchema::RebootEnableColdTiersAfterTtl [GOOD] Test command err: FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; WaitEmptyAfter=0;Tiers=;TTL={Column=timestamp;EvictAfter=150864198.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=150864198.000000s;Name=tier0;Codec=};}{{Column=timestamp;EvictAfter=150864198.000000s;Name=tier1;Codec=};};TTL={Column=timestamp;EvictAfter=150864198.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=130864198.000000s;Name=tier0;Codec=};}{{Column=timestamp;EvictAfter=150864198.000000s;Name=tier1;Codec=};};TTL={Column=timestamp;EvictAfter=150864198.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=130862998.000000s;Name=tier0;Codec=};}{{Column=timestamp;EvictAfter=130864198.000000s;Name=tier1;Codec=};};TTL={Column=timestamp;EvictAfter=130864198.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=130862998.000000s;Name=tier0;Codec=};}{{Column=timestamp;EvictAfter=130862998.000000s;Name=tier1;Codec=};};TTL={Column=timestamp;EvictAfter=130862998.000000s;Name=;Codec=}; 2025-06-25T14:59:58.909235Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:99;event=initialize_shard;step=OnActivateExecutor; 2025-06-25T14:59:58.934556Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:117;event=initialize_shard;step=initialize_tiring_finished; 2025-06-25T14:59:58.934835Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-25T14:59:58.941735Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:59:58.941967Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:59:58.942211Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:59:58.942333Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:59:58.942438Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:59:58.942547Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:59:58.942656Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:59:58.942769Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:59:58.942877Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:59:58.942982Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:59:58.943095Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:59:58.967559Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-25T14:59:58.967703Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-25T14:59:58.967759Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-25T14:59:58.967923Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T14:59:58.968088Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-25T14:59:58.968180Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-25T14:59:58.968233Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-25T14:59:58.968339Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-25T14:59:58.968407Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-25T14:59:58.968457Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-25T14:59:58.968513Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-25T14:59:58.968697Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T14:59:58.968766Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-25T14:59:58.968813Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-25T14:59:58.968843Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-25T14:59:58.968923Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-25T14:59:58.968974Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-25T14:59:58.969014Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-25T14:59:58.969041Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-25T14:59:58.969096Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-25T14:59:58.969135Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-25T14:59:58.969165Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-25T14:59:58.969343Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-25T14:59:58.969393Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-25T14:59:58.969425Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-25T14:59:58.969598Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-25T14:59:58.969648Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-25T14:59:58.969701Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-25T14:59:58.969829Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-25T14:59:58.969868Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-25T14:59:58.969896Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-25T14:59:58.969969Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-25T14:59:58.970033Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cp ... ad_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;EXECUTE:portionsLoadingTime=10354; 2025-06-25T15:01:04.281200Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;PRECHARGE:granule_finished_commonLoadingTime=11; 2025-06-25T15:01:04.281465Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;EXECUTE:granule_finished_commonLoadingTime=215; 2025-06-25T15:01:04.281507Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;fline=common_data.cpp:29;EXECUTE:granuleLoadingTime=10892; 2025-06-25T15:01:04.281585Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:granulesLoadingTime=11040; 2025-06-25T15:01:04.281653Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;PRECHARGE:finishLoadingTime=13; 2025-06-25T15:01:04.281846Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:finishLoadingTime=143; 2025-06-25T15:01:04.281885Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:column_enginesLoadingTime=11821; 2025-06-25T15:01:04.282035Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tx_controllerLoadingTime=96; 2025-06-25T15:01:04.282156Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tx_controllerLoadingTime=72; 2025-06-25T15:01:04.282309Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:operations_managerLoadingTime=104; 2025-06-25T15:01:04.282446Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:operations_managerLoadingTime=90; 2025-06-25T15:01:04.289440Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:storages_managerLoadingTime=6925; 2025-06-25T15:01:04.293994Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:storages_managerLoadingTime=4439; 2025-06-25T15:01:04.294106Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:db_locksLoadingTime=14; 2025-06-25T15:01:04.294162Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:db_locksLoadingTime=11; 2025-06-25T15:01:04.294204Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:bg_sessionsLoadingTime=6; 2025-06-25T15:01:04.294284Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:bg_sessionsLoadingTime=42; 2025-06-25T15:01:04.294328Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:sharing_sessionsLoadingTime=6; 2025-06-25T15:01:04.294412Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:sharing_sessionsLoadingTime=50; 2025-06-25T15:01:04.294476Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:in_flight_readsLoadingTime=7; 2025-06-25T15:01:04.294549Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:in_flight_readsLoadingTime=35; 2025-06-25T15:01:04.294639Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tiers_managerLoadingTime=53; 2025-06-25T15:01:04.295027Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tiers_managerLoadingTime=347; 2025-06-25T15:01:04.295078Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;fline=common_data.cpp:29;EXECUTE:composite_initLoadingTime=33031; 2025-06-25T15:01:04.295217Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Index: tables 1 inserted {blob_bytes=0;raw_bytes=0;count=0;records=0} compacted {blob_bytes=0;raw_bytes=0;count=0;records=0} s-compacted {blob_bytes=0;raw_bytes=0;count=0;records=0} inactive {blob_bytes=29251936;raw_bytes=43173354;count=6;records=480000} evicted {blob_bytes=0;raw_bytes=0;count=0;records=0} at tablet 9437184 2025-06-25T15:01:04.295336Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2010:3819];process=SwitchToWork;fline=columnshard.cpp:74;event=initialize_shard;step=SwitchToWork; 2025-06-25T15:01:04.295390Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2010:3819];process=SwitchToWork;fline=columnshard.cpp:77;event=initialize_shard;step=SignalTabletActive; 2025-06-25T15:01:04.295457Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2010:3819];process=SwitchToWork;fline=columnshard_impl.cpp:1331;event=OnTieringModified;path_id=NO_VALUE_OPTIONAL; 2025-06-25T15:01:04.314649Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2010:3819];process=SwitchToWork;fline=column_engine_logs.cpp:471;event=OnTieringModified;new_count_tierings=1; 2025-06-25T15:01:04.314811Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;fline=columnshard_impl.cpp:442;event=EnqueueBackgroundActivities;periodic=0; 2025-06-25T15:01:04.314910Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=3; 2025-06-25T15:01:04.314987Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750863348735;tx_id=18446744073709551615;;current_snapshot_ts=1750863600213; 2025-06-25T15:01:04.315032Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=3;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-25T15:01:04.315081Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;fline=columnshard_impl.cpp:791;background=cleanup;skip_reason=no_changes; 2025-06-25T15:01:04.315120Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;fline=columnshard_impl.cpp:820;background=cleanup;skip_reason=no_changes; 2025-06-25T15:01:04.315206Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;fline=columnshard_impl.cpp:749;background=ttl;skip_reason=no_changes; 2025-06-25T15:01:04.316600Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2010:3819];ev=NActors::TEvents::TEvWakeup;fline=columnshard.cpp:250;event=TEvPrivate::TEvPeriodicWakeup::MANUAL;tablet_id=9437184; 2025-06-25T15:01:04.320925Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2010:3819];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;fline=columnshard.cpp:239;event=TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184; 2025-06-25T15:01:04.320983Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Send periodic stats. 2025-06-25T15:01:04.321011Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Disabled periodic stats at tablet 9437184 2025-06-25T15:01:04.321058Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2010:3819];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:442;event=EnqueueBackgroundActivities;periodic=0; 2025-06-25T15:01:04.321165Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2010:3819];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=3; 2025-06-25T15:01:04.321233Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2010:3819];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750863348735;tx_id=18446744073709551615;;current_snapshot_ts=1750863600213; 2025-06-25T15:01:04.321276Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2010:3819];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=3;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-25T15:01:04.321324Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2010:3819];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:791;background=cleanup;skip_reason=no_changes; 2025-06-25T15:01:04.321361Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2010:3819];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:820;background=cleanup;skip_reason=no_changes; 2025-06-25T15:01:04.321467Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:2010:3819];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:749;background=ttl;skip_reason=no_changes; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/tier0' stopped at tablet 9437184 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/tier1' stopped at tablet 9437184 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/tier0' stopped at tablet 9437184 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/tier1' stopped at tablet 9437184 160000/9752224 160000/9752224 160000/9752224 80000/4886744 0/0 >> KqpLimits::KqpMkqlMemoryLimitException >> BackupRestore::RestoreReplicationWithoutSecret [GOOD] >> BackupRestore::RestoreExternalDataSourceWithoutSecret >> KqpExplain::UpdateConditional-UseSink [GOOD] >> KqpExplain::UpdateConditionalKey+UseSink >> KqpExplain::ExplainStream [GOOD] >> KqpExplain::ExplainScanQueryWithParams >> KqpLimits::ComputeActorMemoryAllocationFailure+useSink >> KqpLimits::QSReplySizeEnsureMemoryLimits+useSink [GOOD] >> KqpLimits::QSReplySizeEnsureMemoryLimits-useSink >> KqpLimits::ComputeActorMemoryAllocationFailure-useSink >> KqpLimits::QueryReplySize >> BackupRestore::TestAllPrimitiveTypes-JSON [GOOD] >> BackupRestore::TestAllPrimitiveTypes-JSON_DOCUMENT >> CompressExecutor::TestReorderedExecutor [GOOD] >> CompressExecutor::TestExecutorMemUsage >> TColumnShardTestSchema::EnableColdTiersAfterNoEviction [GOOD] >> TColumnShardTestSchema::RebootForgetWithLostAnswer [GOOD] >> KqpParams::DefaultParameterValue [GOOD] >> KqpParams::Decimal-QueryService-UseSink >> BackupPathTest::OnlyOneEmptyDirectory [GOOD] >> KqpExplain::SelfJoin3xSameLabels [GOOD] >> KqpExplain::SqlIn >> KqpParams::ImplicitSameParameterTypesQueryCacheCheck [GOOD] >> KqpParams::InvalidJson >> KqpTypes::Time64Columns+EnableTableDatetime64 [GOOD] >> KqpTypes::Time64Columns-EnableTableDatetime64 >> KqpQuery::QueryCachePermissionsLoss [GOOD] >> KqpQuery::QueryCancelWrite >> KqpParams::MissingOptionalParameter-UseSink [GOOD] >> KqpParams::ParameterTypes >> TColumnShardTestSchema::RebootOneColdTier [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest >> TColumnShardTestSchema::EnableColdTiersAfterNoEviction [GOOD] Test command err: FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; WaitEmptyAfter=0;Tiers=;TTL={Column=timestamp;EvictAfter=0.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=150864209.000000s;Name=tier0;Codec=};}{{Column=timestamp;EvictAfter=150864209.000000s;Name=tier1;Codec=};};TTL={Column=timestamp;EvictAfter=150864209.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=130864209.000000s;Name=tier0;Codec=};}{{Column=timestamp;EvictAfter=150864209.000000s;Name=tier1;Codec=};};TTL={Column=timestamp;EvictAfter=150864209.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=130863009.000000s;Name=tier0;Codec=};}{{Column=timestamp;EvictAfter=130864209.000000s;Name=tier1;Codec=};};TTL={Column=timestamp;EvictAfter=130864209.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=130863009.000000s;Name=tier0;Codec=};}{{Column=timestamp;EvictAfter=130863009.000000s;Name=tier1;Codec=};};TTL={Column=timestamp;EvictAfter=130863009.000000s;Name=;Codec=}; 2025-06-25T15:00:09.522932Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:99;event=initialize_shard;step=OnActivateExecutor; 2025-06-25T15:00:09.546919Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:117;event=initialize_shard;step=initialize_tiring_finished; 2025-06-25T15:00:09.547176Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-25T15:00:09.553825Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T15:00:09.554085Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T15:00:09.554335Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T15:00:09.554470Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T15:00:09.554570Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T15:00:09.554689Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T15:00:09.554800Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T15:00:09.554915Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T15:00:09.555024Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T15:00:09.555136Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T15:00:09.555236Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T15:00:09.577199Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-25T15:00:09.577346Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-25T15:00:09.577401Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-25T15:00:09.577529Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:00:09.577681Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-25T15:00:09.577751Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-25T15:00:09.577784Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-25T15:00:09.577845Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-25T15:00:09.577888Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-25T15:00:09.577919Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-25T15:00:09.577959Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-25T15:00:09.578088Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:00:09.578136Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-25T15:00:09.578166Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-25T15:00:09.578186Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-25T15:00:09.578247Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-25T15:00:09.578282Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-25T15:00:09.578318Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-25T15:00:09.578345Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-25T15:00:09.578398Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-25T15:00:09.578436Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-25T15:00:09.578466Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-25T15:00:09.578667Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-25T15:00:09.578717Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-25T15:00:09.578756Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-25T15:00:09.578890Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-25T15:00:09.578923Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-25T15:00:09.578949Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-25T15:00:09.579052Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-25T15:00:09.579087Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-25T15:00:09.579110Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-25T15:00:09.579163Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-25T15:00:09.579206Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;eve ... ARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=4;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-25T15:01:07.822595Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:791;background=cleanup;skip_reason=no_changes; 2025-06-25T15:01:07.822633Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:820;background=cleanup;skip_reason=no_changes; 2025-06-25T15:01:07.822716Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:749;background=ttl;skip_reason=no_changes; 2025-06-25T15:01:07.822849Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: EvScan txId: 18446744073709551615 scanId: 0 version: {1750863657848:max} readable: {1750863657848:max} at tablet 9437184 2025-06-25T15:01:07.822945Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TTxScan prepare txId: 18446744073709551615 scanId: 0 at tablet 9437184 2025-06-25T15:01:07.823054Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750863657848:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=program.cpp:33;event=parse_program;program=Command { Projection { Columns { Id: 1 } } } ; 2025-06-25T15:01:07.823093Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750863657848:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=program.cpp:102;parse_proto_program=Command { Projection { Columns { Id: 1 } } } ; 2025-06-25T15:01:07.823438Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750863657848:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=program.cpp:51;event=program_parsed;result={"edges":[{"owner_id":0,"inputs":[{"from":2}]},{"owner_id":1,"inputs":[{"from":3}]},{"owner_id":2,"inputs":[{"from":1}]},{"owner_id":3,"inputs":[]}],"nodes":{"1":{"p":{"i":"0","p":{"data":[{"name":"timestamp","id":1}]},"o":"1","t":"FetchOriginalData"},"w":2,"id":1},"3":{"p":{"p":{"data":[{"name":"timestamp","id":1}]},"o":"0","t":"ReserveMemory"},"w":0,"id":3},"2":{"p":{"i":"1","p":{"address":{"name":"timestamp","id":1}},"o":"1","t":"AssembleOriginalData"},"w":7,"id":2},"0":{"p":{"i":"1","t":"Projection"},"w":7,"id":0}}}; 2025-06-25T15:01:07.823502Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750863657848:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=read_metadata.h:142;filter_limit_not_detected= range{ from {+Inf} to {-Inf}}; 2025-06-25T15:01:07.823920Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750863657848:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=tx_scan.cpp:172;event=TTxScan started;actor_id=[1:1446:3390];trace_detailed=; 2025-06-25T15:01:07.824323Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=4;fline=context.cpp:84;ff_first=(column_ids=1;column_names=timestamp;);; 2025-06-25T15:01:07.824546Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=4;fline=context.cpp:99;columns_context_info=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;; 2025-06-25T15:01:07.824746Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:01:07.824862Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:01:07.825142Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=4;SelfId=[1:1446:3390];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-25T15:01:07.825248Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=4;SelfId=[1:1446:3390];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:01:07.825333Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=4;SelfId=[1:1446:3390];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:01:07.825370Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: actor.cpp:414: Scan [1:1446:3390] finished for tablet 9437184 2025-06-25T15:01:07.825789Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=4;SelfId=[1:1446:3390];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:420;event=scan_finish;compute_actor_id=[1:1445:3389];stats={"p":[{"events":["f_bootstrap","f_ProduceResults"],"t":0},{"events":["l_bootstrap","f_ack","l_ack","f_processing","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.001}],"full":{"a":1750863667823858,"name":"_full_task","f":1750863667823858,"d_finished":0,"c":0,"l":1750863667825434,"d":1576},"events":[{"name":"bootstrap","f":1750863667824048,"d_finished":844,"c":1,"l":1750863667824892,"d":844},{"a":1750863667825118,"name":"ack","f":1750863667825118,"d_finished":0,"c":0,"l":1750863667825434,"d":316},{"a":1750863667825099,"name":"processing","f":1750863667825099,"d_finished":0,"c":0,"l":1750863667825434,"d":335},{"name":"ProduceResults","f":1750863667824656,"d_finished":416,"c":2,"l":1750863667825353,"d":416},{"a":1750863667825357,"name":"Finish","f":1750863667825357,"d_finished":0,"c":0,"l":1750863667825434,"d":77}],"id":"9437184::10"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:01:07.825876Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=4;SelfId=[1:1446:3390];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:1445:3389];bytes=0;rows=0;faults=0;finished=1;fault=0;schema=; 2025-06-25T15:01:07.826255Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=4;SelfId=[1:1446:3390];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:375;event=scan_finished;compute_actor_id=[1:1445:3389];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap","f_ProduceResults"],"t":0},{"events":["l_bootstrap","f_ack","f_processing","l_ProduceResults","f_Finish"],"t":0.001},{"events":["l_ack","l_processing","l_Finish"],"t":0.002}],"full":{"a":1750863667823858,"name":"_full_task","f":1750863667823858,"d_finished":0,"c":0,"l":1750863667825929,"d":2071},"events":[{"name":"bootstrap","f":1750863667824048,"d_finished":844,"c":1,"l":1750863667824892,"d":844},{"a":1750863667825118,"name":"ack","f":1750863667825118,"d_finished":0,"c":0,"l":1750863667825929,"d":811},{"a":1750863667825099,"name":"processing","f":1750863667825099,"d_finished":0,"c":0,"l":1750863667825929,"d":830},{"name":"ProduceResults","f":1750863667824656,"d_finished":416,"c":2,"l":1750863667825353,"d":416},{"a":1750863667825357,"name":"Finish","f":1750863667825357,"d_finished":0,"c":0,"l":1750863667825929,"d":572}],"id":"9437184::10"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); Got TEvKqpCompute::TEvScanData [1:1446:3390]->[1:1445:3389] 2025-06-25T15:01:07.826354Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=4;SelfId=[1:1446:3390];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-06-25T15:01:07.823481Z;index_granules=0;index_portions=0;index_batches=0;schema_columns=1;filter_columns=0;additional_columns=0;compacted_portions_bytes=0;inserted_portions_bytes=0;committed_portions_bytes=0;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=0;selected_rows=0; 2025-06-25T15:01:07.826399Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=4;SelfId=[1:1446:3390];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=read_context.h:192;event=scan_aborted;reason=unexpected on destructor; 2025-06-25T15:01:07.826510Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=4;SelfId=[1:1446:3390];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/tier0' stopped at tablet 9437184 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/tier1' stopped at tablet 9437184 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/tier0' stopped at tablet 9437184 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/tier1' stopped at tablet 9437184 240000/14617704 160000/9752224 160000/9752224 80000/4886744 0/0 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest >> TColumnShardTestSchema::RebootForgetWithLostAnswer [GOOD] Test command err: FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; WaitEmptyAfter=0;Tiers=;TTL={Column=timestamp;EvictAfter=0.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=150864218.000000s;Name=cold;Codec=};};TTL={Column=timestamp;EvictAfter=150864218.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=130864218.000000s;Name=cold;Codec=};};TTL={Column=timestamp;EvictAfter=130864218.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=130863018.000000s;Name=cold;Codec=};};TTL={Column=timestamp;EvictAfter=130863018.000000s;Name=;Codec=}; 2025-06-25T15:00:19.991065Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:99;event=initialize_shard;step=OnActivateExecutor; 2025-06-25T15:00:20.015351Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:117;event=initialize_shard;step=initialize_tiring_finished; 2025-06-25T15:00:20.015621Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-25T15:00:20.022545Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T15:00:20.022766Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T15:00:20.023003Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T15:00:20.023111Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T15:00:20.023216Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T15:00:20.023315Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T15:00:20.023424Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T15:00:20.023524Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T15:00:20.023622Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T15:00:20.023720Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T15:00:20.023828Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T15:00:20.047456Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-25T15:00:20.047571Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-25T15:00:20.047612Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-25T15:00:20.047742Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:00:20.047864Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-25T15:00:20.047945Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-25T15:00:20.047981Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-25T15:00:20.048044Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-25T15:00:20.048089Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-25T15:00:20.048128Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-25T15:00:20.048167Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-25T15:00:20.048348Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:00:20.048423Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-25T15:00:20.048454Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-25T15:00:20.048478Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-25T15:00:20.048541Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-25T15:00:20.048579Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-25T15:00:20.048607Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-25T15:00:20.048627Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-25T15:00:20.048660Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-25T15:00:20.048687Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-25T15:00:20.048706Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-25T15:00:20.048835Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-25T15:00:20.048861Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-25T15:00:20.048881Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-25T15:00:20.049006Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-25T15:00:20.049038Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-25T15:00:20.049060Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-25T15:00:20.049167Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-25T15:00:20.049216Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-25T15:00:20.049239Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-25T15:00:20.049295Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-25T15:00:20.049342Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-25T15:00:20.049370Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-25T15:00:20.049392Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-25T15:00:20.049564Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=43; 2025-06-25T15:00:20.049641Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=36; 2025-06-25T15:00:20.0497 ... p:40;memory_size=17006;data_size=16980;sum=338168;count=20;size_of_portion=208; 2025-06-25T15:01:08.011517Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;EXECUTE:portionsLoadingTime=3554; 2025-06-25T15:01:08.011608Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;PRECHARGE:granule_finished_commonLoadingTime=23; 2025-06-25T15:01:08.011785Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;EXECUTE:granule_finished_commonLoadingTime=121; 2025-06-25T15:01:08.011829Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;fline=common_data.cpp:29;EXECUTE:granuleLoadingTime=4002; 2025-06-25T15:01:08.011869Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:granulesLoadingTime=4114; 2025-06-25T15:01:08.011944Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;PRECHARGE:finishLoadingTime=10; 2025-06-25T15:01:08.012049Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:finishLoadingTime=63; 2025-06-25T15:01:08.012079Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:column_enginesLoadingTime=4783; 2025-06-25T15:01:08.012213Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tx_controllerLoadingTime=81; 2025-06-25T15:01:08.012347Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tx_controllerLoadingTime=80; 2025-06-25T15:01:08.012487Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:operations_managerLoadingTime=95; 2025-06-25T15:01:08.012597Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:operations_managerLoadingTime=69; 2025-06-25T15:01:08.014380Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:storages_managerLoadingTime=1730; 2025-06-25T15:01:08.016215Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:storages_managerLoadingTime=1768; 2025-06-25T15:01:08.016296Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:db_locksLoadingTime=11; 2025-06-25T15:01:08.016369Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:db_locksLoadingTime=10; 2025-06-25T15:01:08.016417Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:bg_sessionsLoadingTime=7; 2025-06-25T15:01:08.016501Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:bg_sessionsLoadingTime=40; 2025-06-25T15:01:08.016542Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:sharing_sessionsLoadingTime=5; 2025-06-25T15:01:08.016629Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:sharing_sessionsLoadingTime=54; 2025-06-25T15:01:08.016687Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:in_flight_readsLoadingTime=7; 2025-06-25T15:01:08.016778Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:in_flight_readsLoadingTime=55; 2025-06-25T15:01:08.016875Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tiers_managerLoadingTime=58; 2025-06-25T15:01:08.017113Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tiers_managerLoadingTime=193; 2025-06-25T15:01:08.017151Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;fline=common_data.cpp:29;EXECUTE:composite_initLoadingTime=15817; 2025-06-25T15:01:08.017285Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Index: tables 1 inserted {blob_bytes=0;raw_bytes=0;count=0;records=0} compacted {blob_bytes=0;raw_bytes=0;count=0;records=0} s-compacted {blob_bytes=0;raw_bytes=0;count=0;records=0} inactive {blob_bytes=9739224;raw_bytes=13544452;count=2;records=160000} evicted {blob_bytes=0;raw_bytes=0;count=0;records=0} at tablet 9437184 2025-06-25T15:01:08.017400Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1336:3185];process=SwitchToWork;fline=columnshard.cpp:74;event=initialize_shard;step=SwitchToWork; 2025-06-25T15:01:08.017465Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1336:3185];process=SwitchToWork;fline=columnshard.cpp:77;event=initialize_shard;step=SignalTabletActive; 2025-06-25T15:01:08.017530Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1336:3185];process=SwitchToWork;fline=columnshard_impl.cpp:1331;event=OnTieringModified;path_id=NO_VALUE_OPTIONAL; 2025-06-25T15:01:08.024050Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1336:3185];process=SwitchToWork;fline=column_engine_logs.cpp:471;event=OnTieringModified;new_count_tierings=1; 2025-06-25T15:01:08.024147Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;fline=columnshard_impl.cpp:442;event=EnqueueBackgroundActivities;periodic=0; 2025-06-25T15:01:08.024211Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=2; 2025-06-25T15:01:08.024266Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750863357530;tx_id=18446744073709551615;;current_snapshot_ts=1750863645417; 2025-06-25T15:01:08.024303Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=2;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-25T15:01:08.024388Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;fline=columnshard_impl.cpp:791;background=cleanup;skip_reason=no_changes; 2025-06-25T15:01:08.024416Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;fline=columnshard_impl.cpp:820;background=cleanup;skip_reason=no_changes; 2025-06-25T15:01:08.024475Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;fline=columnshard_impl.cpp:749;background=ttl;skip_reason=no_changes; 2025-06-25T15:01:08.026205Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1336:3185];ev=NActors::TEvents::TEvWakeup;fline=columnshard.cpp:250;event=TEvPrivate::TEvPeriodicWakeup::MANUAL;tablet_id=9437184; 2025-06-25T15:01:08.026318Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1336:3185];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;fline=columnshard.cpp:239;event=TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184; 2025-06-25T15:01:08.026351Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Send periodic stats. 2025-06-25T15:01:08.026376Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Disabled periodic stats at tablet 9437184 2025-06-25T15:01:08.026416Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1336:3185];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:442;event=EnqueueBackgroundActivities;periodic=0; 2025-06-25T15:01:08.026480Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1336:3185];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=2; 2025-06-25T15:01:08.026555Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1336:3185];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750863357530;tx_id=18446744073709551615;;current_snapshot_ts=1750863645417; 2025-06-25T15:01:08.026605Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1336:3185];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=2;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-25T15:01:08.026660Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1336:3185];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:791;background=cleanup;skip_reason=no_changes; 2025-06-25T15:01:08.026705Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1336:3185];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:820;background=cleanup;skip_reason=no_changes; 2025-06-25T15:01:08.026796Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1336:3185];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:749;background=ttl;skip_reason=no_changes; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/cold' stopped at tablet 9437184 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/cold' stopped at tablet 9437184 160000/9739224 160000/9739224 80000/4873744 0/0 >> KqpQuery::DictJoin [GOOD] >> KqpQuery::ExecuteWriteQuery >> KqpQuery::RowsLimitServiceOverride [GOOD] >> KqpQuery::SelectCountAsteriskFromVar >> KqpQuery::CreateAsSelectPath-UseTablePathPrefix [GOOD] >> KqpStats::JoinStatsBasicYql+StreamLookupJoin [GOOD] >> KqpStats::JoinStatsBasicYql-StreamLookupJoin ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest >> TColumnShardTestSchema::RebootOneColdTier [GOOD] Test command err: FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; WaitEmptyAfter=0;Tiers=;TTL={Column=timestamp;EvictAfter=0.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=150864220.000000s;Name=cold;Codec=};};TTL={Column=timestamp;EvictAfter=150864220.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=130864220.000000s;Name=cold;Codec=};};TTL={Column=timestamp;EvictAfter=130864220.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=130863020.000000s;Name=cold;Codec=};};TTL={Column=timestamp;EvictAfter=130863020.000000s;Name=;Codec=}; 2025-06-25T15:00:21.732594Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:99;event=initialize_shard;step=OnActivateExecutor; 2025-06-25T15:00:21.750192Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:117;event=initialize_shard;step=initialize_tiring_finished; 2025-06-25T15:00:21.750370Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-25T15:00:21.755135Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T15:00:21.755326Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T15:00:21.755483Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T15:00:21.755546Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T15:00:21.755604Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T15:00:21.755660Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T15:00:21.755714Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T15:00:21.755772Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T15:00:21.755826Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T15:00:21.755884Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T15:00:21.755938Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T15:00:21.772644Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-25T15:00:21.772737Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-25T15:00:21.772772Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-25T15:00:21.772877Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:00:21.772975Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-25T15:00:21.773036Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-25T15:00:21.773063Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-25T15:00:21.773115Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-25T15:00:21.773158Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-25T15:00:21.773184Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-25T15:00:21.773210Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-25T15:00:21.773317Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:00:21.773362Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-25T15:00:21.773389Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-25T15:00:21.773405Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-25T15:00:21.773456Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-25T15:00:21.773484Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-25T15:00:21.773508Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-25T15:00:21.773522Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-25T15:00:21.773549Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-25T15:00:21.773570Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-25T15:00:21.773585Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-25T15:00:21.773703Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-25T15:00:21.773723Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-25T15:00:21.773746Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-25T15:00:21.773914Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-25T15:00:21.773990Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-25T15:00:21.774019Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-25T15:00:21.774122Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-25T15:00:21.774154Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-25T15:00:21.774172Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-25T15:00:21.774215Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-25T15:00:21.774255Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-25T15:00:21.774276Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-25T15:00:21.774293Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-25T15:00:21.774426Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=33; 2025-06-25T15:00:21.774489Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=34; 2025-06-25T15:00:21.7745 ... .cpp:40;memory_size=17006;data_size=16980;sum=338168;count=20;size_of_portion=208; 2025-06-25T15:01:08.793582Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;EXECUTE:portionsLoadingTime=3237; 2025-06-25T15:01:08.793647Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;PRECHARGE:granule_finished_commonLoadingTime=8; 2025-06-25T15:01:08.793798Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;EXECUTE:granule_finished_commonLoadingTime=112; 2025-06-25T15:01:08.793835Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;fline=common_data.cpp:29;EXECUTE:granuleLoadingTime=3632; 2025-06-25T15:01:08.793875Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:granulesLoadingTime=3736; 2025-06-25T15:01:08.793924Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;PRECHARGE:finishLoadingTime=9; 2025-06-25T15:01:08.794021Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:finishLoadingTime=57; 2025-06-25T15:01:08.794057Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:column_enginesLoadingTime=4321; 2025-06-25T15:01:08.794158Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tx_controllerLoadingTime=62; 2025-06-25T15:01:08.794282Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tx_controllerLoadingTime=65; 2025-06-25T15:01:08.794401Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:operations_managerLoadingTime=78; 2025-06-25T15:01:08.794505Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:operations_managerLoadingTime=68; 2025-06-25T15:01:08.796599Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:storages_managerLoadingTime=2025; 2025-06-25T15:01:08.798439Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:storages_managerLoadingTime=1775; 2025-06-25T15:01:08.798499Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:db_locksLoadingTime=9; 2025-06-25T15:01:08.798544Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:db_locksLoadingTime=8; 2025-06-25T15:01:08.798583Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:bg_sessionsLoadingTime=6; 2025-06-25T15:01:08.798653Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:bg_sessionsLoadingTime=41; 2025-06-25T15:01:08.798690Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:sharing_sessionsLoadingTime=5; 2025-06-25T15:01:08.798770Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:sharing_sessionsLoadingTime=46; 2025-06-25T15:01:08.798816Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:in_flight_readsLoadingTime=13; 2025-06-25T15:01:08.798888Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:in_flight_readsLoadingTime=34; 2025-06-25T15:01:08.798967Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tiers_managerLoadingTime=43; 2025-06-25T15:01:08.799184Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tiers_managerLoadingTime=183; 2025-06-25T15:01:08.799225Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;fline=common_data.cpp:29;EXECUTE:composite_initLoadingTime=15967; 2025-06-25T15:01:08.799348Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Index: tables 1 inserted {blob_bytes=0;raw_bytes=0;count=0;records=0} compacted {blob_bytes=0;raw_bytes=0;count=0;records=0} s-compacted {blob_bytes=0;raw_bytes=0;count=0;records=0} inactive {blob_bytes=9739224;raw_bytes=13544452;count=2;records=160000} evicted {blob_bytes=0;raw_bytes=0;count=0;records=0} at tablet 9437184 2025-06-25T15:01:08.799443Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1336:3185];process=SwitchToWork;fline=columnshard.cpp:74;event=initialize_shard;step=SwitchToWork; 2025-06-25T15:01:08.799485Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1336:3185];process=SwitchToWork;fline=columnshard.cpp:77;event=initialize_shard;step=SignalTabletActive; 2025-06-25T15:01:08.799554Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1336:3185];process=SwitchToWork;fline=columnshard_impl.cpp:1331;event=OnTieringModified;path_id=NO_VALUE_OPTIONAL; 2025-06-25T15:01:08.808488Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1336:3185];process=SwitchToWork;fline=column_engine_logs.cpp:471;event=OnTieringModified;new_count_tierings=1; 2025-06-25T15:01:08.808622Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;fline=columnshard_impl.cpp:442;event=EnqueueBackgroundActivities;periodic=0; 2025-06-25T15:01:08.808708Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=2; 2025-06-25T15:01:08.808773Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750863359278;tx_id=18446744073709551615;;current_snapshot_ts=1750863647165; 2025-06-25T15:01:08.808808Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=2;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-25T15:01:08.808873Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;fline=columnshard_impl.cpp:791;background=cleanup;skip_reason=no_changes; 2025-06-25T15:01:08.808913Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;fline=columnshard_impl.cpp:820;background=cleanup;skip_reason=no_changes; 2025-06-25T15:01:08.808988Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;fline=columnshard_impl.cpp:749;background=ttl;skip_reason=no_changes; 2025-06-25T15:01:08.810444Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1336:3185];ev=NActors::TEvents::TEvWakeup;fline=columnshard.cpp:250;event=TEvPrivate::TEvPeriodicWakeup::MANUAL;tablet_id=9437184; 2025-06-25T15:01:08.811125Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1336:3185];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;fline=columnshard.cpp:239;event=TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184; 2025-06-25T15:01:08.811158Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Send periodic stats. 2025-06-25T15:01:08.811181Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Disabled periodic stats at tablet 9437184 2025-06-25T15:01:08.811219Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1336:3185];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:442;event=EnqueueBackgroundActivities;periodic=0; 2025-06-25T15:01:08.811296Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1336:3185];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=2; 2025-06-25T15:01:08.811348Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1336:3185];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750863359278;tx_id=18446744073709551615;;current_snapshot_ts=1750863647165; 2025-06-25T15:01:08.811395Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1336:3185];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=2;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-25T15:01:08.811449Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1336:3185];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:791;background=cleanup;skip_reason=no_changes; 2025-06-25T15:01:08.811503Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1336:3185];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:820;background=cleanup;skip_reason=no_changes; 2025-06-25T15:01:08.811599Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1336:3185];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:749;background=ttl;skip_reason=no_changes; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/cold' stopped at tablet 9437184 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/cold' stopped at tablet 9437184 160000/9739224 160000/9739224 80000/4873744 0/0 >> BackupPathTest::ExportRecursiveWithoutDestinationPrefix >> KqpLimits::ComputeActorMemoryAllocationFailure-useSink [GOOD] >> KqpLimits::ComputeActorMemoryAllocationFailureQueryService+useSink >> KqpLimits::ComputeActorMemoryAllocationFailure+useSink [GOOD] >> KqpLimits::CancelAfterRwTx+useSink >> KqpExplain::PrecomputeRange [GOOD] >> KqpExplain::PureExpr >> KqpParams::CheckQueryCacheForUnpreparedQuery >> EncryptedBackupParamsValidationTest::BadSourcePath [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpQuery::CreateAsSelectPath-UseTablePathPrefix [GOOD] Test command err: Trying to start YDB, gRPC: 27494, MsgBus: 13920 2025-06-25T15:00:51.947145Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519902121336618912:2141];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:00:51.947609Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001422/r3tmp/tmp45vFn1/pdisk_1.dat 2025-06-25T15:00:52.385366Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:00:52.406745Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:00:52.406848Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:00:52.409367Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 27494, node 1 2025-06-25T15:00:52.522388Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:00:52.522441Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:00:52.522454Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:00:52.522562Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13920 2025-06-25T15:00:52.952684Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:13920 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:00:53.126806Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:00:54.967639Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902134221521308:2289], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:00:54.967786Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:00:54.985422Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902134221521342:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:00:54.997015Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:00:55.011661Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519902134221521346:2295], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T15:00:55.097618Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519902138516488693:2337] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:00:55.532096Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519902138516488715:2301], status: BAD_REQUEST, issues:
: Error: Type annotation, code: 1030
:6:62: Error: At function: KiCreateTable!
:6:20: Error: Invalid type for column: Value. Only YQL data types and PG types are currently supported, code: 2031 2025-06-25T15:00:55.532371Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=YWQ0MDUxOTYtN2I3OTVjNWMtYTY5Y2U1ZmYtYWRjYTJhNTI=, ActorId: [1:7519902134221521303:2287], ActorState: ExecuteState, TraceId: 01jykspa9fd9tnjgn2j0t7yaxw, ReplyQueryCompileError, status BAD_REQUEST remove tx with tx_id: 2025-06-25T15:00:55.601232Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519902138516488739:2310], status: BAD_REQUEST, issues:
: Error: Type annotation, code: 1030
:6:45: Error: At function: KiCreateTable!
:6:20: Error: Invalid type for column: Value. Only YQL data types and PG types are currently supported, code: 2031 2025-06-25T15:00:55.601452Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=NDUwMWViMzktNmYzNzVjMmMtZTQ3NzE5NGMtZjBmZjg5NTY=, ActorId: [1:7519902138516488728:2304], ActorState: ExecuteState, TraceId: 01jykspcm51bhfh0reymwqm41r, ReplyQueryCompileError, status BAD_REQUEST remove tx with tx_id: 2025-06-25T15:00:55.648372Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519902138516488751:2316], status: BAD_REQUEST, issues:
: Error: Type annotation, code: 1030
:6:43: Error: At function: KiCreateTable!
:6:20: Error: Invalid type for column: Value. Only YQL data types and PG types are currently supported, code: 2031 2025-06-25T15:00:55.649143Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=ZDcxNjdjNGUtMmY1YmFlNDgtZjI0ZDVhNDktMTI0MzkyZDc=, ActorId: [1:7519902138516488745:2313], ActorState: ExecuteState, TraceId: 01jykspcnv5p08r875kssty1j6, ReplyQueryCompileError, status BAD_REQUEST remove tx with tx_id: Trying to start YDB, gRPC: 27163, MsgBus: 2820 2025-06-25T15:00:56.455871Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519902145224152949:2056];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:00:56.455952Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001422/r3tmp/tmpdGhw0Q/pdisk_1.dat 2025-06-25T15:00:56.635649Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:00:56.640603Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519902145224152934:2080] 1750863656454741 != 1750863656454744 2025-06-25T15:00:56.652415Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:00:56.652522Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:00:56.654709Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 27163, node 2 2025-06-25T15:00:56.744931Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:00:56.744963Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:00:56.744970Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:00:56.745077Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2820 TClient is connected to server localhost:2820 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:00:57.165305Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:00:57.170422Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T15:00:57.507201Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:00:59.379767Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519902158109055438:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Err ... pose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:01:00.810527Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T15:01:00.833804Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715659, at schemeshard: 72057594046644480 2025-06-25T15:01:00.845816Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T15:01:01.237630Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:01:03.183659Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519902175092300907:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:03.183746Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:03.184136Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519902175092300927:2300], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:03.188027Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715661:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:01:03.201998Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519902175092300929:2301], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715661 completed, doublechecking } 2025-06-25T15:01:03.263498Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519902175092300980:2360] txid# 281474976715662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:01:03.302023Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:03.653324Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) Trying to start YDB, gRPC: 28168, MsgBus: 4623 2025-06-25T15:01:04.849919Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519902180565549873:2136];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:01:04.854739Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001422/r3tmp/tmpKzITnT/pdisk_1.dat 2025-06-25T15:01:05.011751Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:01:05.011849Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:01:05.013155Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:01:05.027864Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 28168, node 4 2025-06-25T15:01:05.078433Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:01:05.078457Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:01:05.078465Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:01:05.078589Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4623 TClient is connected to server localhost:4623 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:01:05.592826Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:01:05.604809Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T15:01:05.612197Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T15:01:05.652652Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710659, at schemeshard: 72057594046644480 2025-06-25T15:01:05.669062Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T15:01:05.849815Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:01:08.138040Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519902197745419632:2301], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:08.138043Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519902197745419608:2298], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:08.138128Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:08.142052Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710661:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:01:08.154251Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519902197745419645:2302], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710661 completed, doublechecking } 2025-06-25T15:01:08.239206Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519902197745419696:2361] txid# 281474976710662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:01:08.268795Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:08.516230Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) >> KqpStats::StreamLookupStats+StreamLookupJoin >> KqpExplain::ExplainDataQueryWithParams [GOOD] >> KqpExplain::CreateTableAs+Stats >> KqpQuery::GenericQueryNoRowsLimitLotsOfRows [GOOD] >> KqpQuery::NoEvaluate >> KqpExplain::ExplainScanQueryWithParams [GOOD] >> KqpExplain::FewEffects+UseSink >> KqpQuery::SelectWhereInSubquery >> EncryptedBackupParamsValidationTest::NoDestination >> KqpExplain::UpdateConditionalKey+UseSink [GOOD] >> KqpExplain::UpdateConditionalKey-UseSink >> KqpQuery::CreateAsSelectTypes-NotNull+IsOlap [GOOD] >> KqpQuery::CreateAsSelectTypes+NotNull+IsOlap >> KqpStats::OneShardNonLocalExec+UseSink [GOOD] >> KqpStats::OneShardNonLocalExec-UseSink >> KqpTypes::Time64Columns-EnableTableDatetime64 [GOOD] >> KqpQuery::OlapCreateAsSelect_Simple >> KqpLimits::QSReplySizeEnsureMemoryLimits-useSink [GOOD] >> KqpLimits::QSReplySize+useSink >> TColumnShardTestSchema::HotTiersAfterTtl [GOOD] >> PersQueueSdkReadSessionTest::ClosesAfterFailedConnectionToCds [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-DATE32 [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-DATETIME64 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpTypes::Time64Columns-EnableTableDatetime64 [GOOD] Test command err: Trying to start YDB, gRPC: 29267, MsgBus: 17690 2025-06-25T15:00:53.446350Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519902132497420630:2205];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:00:53.453995Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00141a/r3tmp/tmpCJuUek/pdisk_1.dat 2025-06-25T15:00:53.933551Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:00:53.936611Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:00:53.936718Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:00:53.950259Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 29267, node 1 2025-06-25T15:00:54.011671Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:00:54.011695Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:00:54.011703Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:00:54.011872Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17690 TClient is connected to server localhost:17690 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-25T15:00:54.417060Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:00:54.557529Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:00:54.574442Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T15:00:54.581549Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:00:54.722495Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:00:54.867418Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:00:54.955043Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:00:56.538033Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902145382323974:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:00:56.538114Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:00:56.819456Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:56.892162Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:56.918035Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:56.945286Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:56.995561Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:57.022990Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:57.089045Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:57.171624Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902149677291936:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:00:57.171686Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:00:57.171729Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902149677291941:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:00:57.175028Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:00:57.184661Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519902149677291943:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T15:00:57.253701Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519902149677291994:3415] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:00:58.318345Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:58.442691Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519902132497420630:2205];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:00:58.442770Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout;
: Warning: Optimization, code: 1070
:3:29: Warning: Unsafe conversion integral value to Timestamp, consi ... main>: Error: Type annotation, code: 1030
:6:25: Error: At function: AsList
:6:46: Error: At function: AsStruct, At tuple
:3:29: Error: At function: Just, At function: UnsafeTimestampCast
:3:29: Error: Unsafe timestamp cast restricted from SQL v1. Trying to start YDB, gRPC: 32727, MsgBus: 28899 2025-06-25T15:01:04.718814Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519902180732837172:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:01:04.718861Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00141a/r3tmp/tmpEHMxVK/pdisk_1.dat 2025-06-25T15:01:04.827438Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:01:04.827520Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:01:04.828776Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:01:04.841365Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 32727, node 3 2025-06-25T15:01:04.916853Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:01:04.916880Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:01:04.916889Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:01:04.917014Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28899 TClient is connected to server localhost:28899 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:01:05.367028Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:01:05.373915Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T15:01:05.728426Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:01:07.753613Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519902193617739653:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:07.753703Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:07.763561Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:07.808559Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519902193617739753:2301], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:07.808632Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:07.808849Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519902193617739758:2304], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:07.813826Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:01:07.829807Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519902193617739760:2305], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-25T15:01:07.887166Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519902193617739811:2390] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 9828, MsgBus: 6283 2025-06-25T15:01:09.024755Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519902198490704898:2196];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00141a/r3tmp/tmp7KasYM/pdisk_1.dat 2025-06-25T15:01:09.095943Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T15:01:09.145484Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:01:09.148288Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7519902194195737428:2080] 1750863668989888 != 1750863668989891 TServer::EnableGrpc on GrpcPort 9828, node 4 2025-06-25T15:01:09.165710Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:01:09.165818Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:01:09.167322Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:01:09.229209Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:01:09.229241Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:01:09.229250Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:01:09.229415Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6283 TClient is connected to server localhost:6283 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:01:09.811256Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:01:09.817456Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T15:01:10.024775Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:01:12.816592Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519902211375607244:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:12.827348Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:12.830804Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519902211375607265:2302] txid# 281474976715658, issues: { message: "Type \'Datetime64\' specified for column \'DatetimePK\', but support for new date/time 64 types is disabled (EnableTableDatetime64 feature flag is off)" severity: 1 } >> KqpLimits::ComputeActorMemoryAllocationFailureQueryService+useSink [GOOD] >> KqpLimits::ComputeActorMemoryAllocationFailureQueryService-useSink >> KqpQuery::CreateAsSelect_BadCases [GOOD] >> KqpQuery::CreateAsSelectView >> KqpLimits::KqpMkqlMemoryLimitException [GOOD] >> KqpLimits::LargeParametersAndMkqlFailure ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest >> TColumnShardTestSchema::HotTiersAfterTtl [GOOD] Test command err: FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; WaitEmptyAfter=0;Tiers=;TTL={Column=timestamp;EvictAfter=150864214.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=150864214.000000s;Name=tier0;Codec=};}{{Column=timestamp;EvictAfter=150864214.000000s;Name=tier1;Codec=zstd};};TTL={Column=timestamp;EvictAfter=150864214.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=130864214.000000s;Name=tier0;Codec=};}{{Column=timestamp;EvictAfter=150864214.000000s;Name=tier1;Codec=zstd};};TTL={Column=timestamp;EvictAfter=150864214.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=130863014.000000s;Name=tier0;Codec=};}{{Column=timestamp;EvictAfter=130864214.000000s;Name=tier1;Codec=zstd};};TTL={Column=timestamp;EvictAfter=130864214.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=130863014.000000s;Name=tier0;Codec=};}{{Column=timestamp;EvictAfter=130863014.000000s;Name=tier1;Codec=zstd};};TTL={Column=timestamp;EvictAfter=130863014.000000s;Name=;Codec=}; 2025-06-25T15:00:15.243079Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:99;event=initialize_shard;step=OnActivateExecutor; 2025-06-25T15:00:15.268220Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:117;event=initialize_shard;step=initialize_tiring_finished; 2025-06-25T15:00:15.268531Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-25T15:00:15.275597Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T15:00:15.275826Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T15:00:15.276057Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T15:00:15.276165Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T15:00:15.276264Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T15:00:15.276392Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T15:00:15.276494Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T15:00:15.276595Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T15:00:15.276693Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T15:00:15.276792Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T15:00:15.276891Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T15:00:15.301044Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-25T15:00:15.301175Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-25T15:00:15.301232Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-25T15:00:15.301395Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:00:15.301538Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-25T15:00:15.301614Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-25T15:00:15.301657Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-25T15:00:15.301749Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-25T15:00:15.301821Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-25T15:00:15.301866Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-25T15:00:15.301912Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-25T15:00:15.302084Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:00:15.302159Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-25T15:00:15.302204Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-25T15:00:15.302233Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-25T15:00:15.302319Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-25T15:00:15.302369Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-25T15:00:15.302406Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-25T15:00:15.302433Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-25T15:00:15.302485Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-25T15:00:15.302526Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-25T15:00:15.302555Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-25T15:00:15.302742Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-25T15:00:15.302784Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-25T15:00:15.302855Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-25T15:00:15.303028Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-25T15:00:15.303073Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-25T15:00:15.303106Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-25T15:00:15.303213Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-25T15:00:15.303252Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-25T15:00:15.303277Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-25T15:00:15.303352Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-25T15:00:15.303403Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;f ... HARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=3;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-25T15:01:13.842372Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:791;background=cleanup;skip_reason=no_changes; 2025-06-25T15:01:13.842417Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:820;background=cleanup;skip_reason=no_changes; 2025-06-25T15:01:13.842514Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:749;background=ttl;skip_reason=no_changes; 2025-06-25T15:01:13.842704Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: EvScan txId: 18446744073709551615 scanId: 0 version: {1750863663572:max} readable: {1750863663572:max} at tablet 9437184 2025-06-25T15:01:13.842843Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TTxScan prepare txId: 18446744073709551615 scanId: 0 at tablet 9437184 2025-06-25T15:01:13.842996Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750863663572:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=program.cpp:33;event=parse_program;program=Command { Projection { Columns { Id: 1 } } } ; 2025-06-25T15:01:13.843052Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750863663572:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=program.cpp:102;parse_proto_program=Command { Projection { Columns { Id: 1 } } } ; 2025-06-25T15:01:13.843602Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750863663572:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=program.cpp:51;event=program_parsed;result={"edges":[{"owner_id":0,"inputs":[{"from":2}]},{"owner_id":1,"inputs":[{"from":3}]},{"owner_id":2,"inputs":[{"from":1}]},{"owner_id":3,"inputs":[]}],"nodes":{"1":{"p":{"i":"0","p":{"data":[{"name":"timestamp","id":1}]},"o":"1","t":"FetchOriginalData"},"w":2,"id":1},"3":{"p":{"p":{"data":[{"name":"timestamp","id":1}]},"o":"0","t":"ReserveMemory"},"w":0,"id":3},"2":{"p":{"i":"1","p":{"address":{"name":"timestamp","id":1}},"o":"1","t":"AssembleOriginalData"},"w":7,"id":2},"0":{"p":{"i":"1","t":"Projection"},"w":7,"id":0}}}; 2025-06-25T15:01:13.843704Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750863663572:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=read_metadata.h:142;filter_limit_not_detected= range{ from {+Inf} to {-Inf}}; 2025-06-25T15:01:13.844176Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750863663572:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=tx_scan.cpp:172;event=TTxScan started;actor_id=[1:1441:3385];trace_detailed=; 2025-06-25T15:01:13.844632Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=4;fline=context.cpp:84;ff_first=(column_ids=1;column_names=timestamp;);; 2025-06-25T15:01:13.844846Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=4;fline=context.cpp:99;columns_context_info=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;; 2025-06-25T15:01:13.845012Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:01:13.845129Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:01:13.845411Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=4;SelfId=[1:1441:3385];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-25T15:01:13.845528Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=4;SelfId=[1:1441:3385];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:01:13.845622Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=4;SelfId=[1:1441:3385];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:01:13.845663Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: actor.cpp:414: Scan [1:1441:3385] finished for tablet 9437184 2025-06-25T15:01:13.846096Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=4;SelfId=[1:1441:3385];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:420;event=scan_finish;compute_actor_id=[1:1440:3384];stats={"p":[{"events":["f_bootstrap","f_ProduceResults"],"t":0},{"events":["l_bootstrap","f_ack","l_ack","f_processing","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.001}],"full":{"a":1750863673844114,"name":"_full_task","f":1750863673844114,"d_finished":0,"c":0,"l":1750863673845722,"d":1608},"events":[{"name":"bootstrap","f":1750863673844352,"d_finished":805,"c":1,"l":1750863673845157,"d":805},{"a":1750863673845387,"name":"ack","f":1750863673845387,"d_finished":0,"c":0,"l":1750863673845722,"d":335},{"a":1750863673845367,"name":"processing","f":1750863673845367,"d_finished":0,"c":0,"l":1750863673845722,"d":355},{"name":"ProduceResults","f":1750863673844937,"d_finished":423,"c":2,"l":1750863673845643,"d":423},{"a":1750863673845646,"name":"Finish","f":1750863673845646,"d_finished":0,"c":0,"l":1750863673845722,"d":76}],"id":"9437184::10"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:01:13.846175Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=4;SelfId=[1:1441:3385];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:1440:3384];bytes=0;rows=0;faults=0;finished=1;fault=0;schema=; 2025-06-25T15:01:13.846622Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=4;SelfId=[1:1441:3385];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:375;event=scan_finished;compute_actor_id=[1:1440:3384];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap","f_ProduceResults"],"t":0},{"events":["l_bootstrap","f_ack","f_processing","l_ProduceResults","f_Finish"],"t":0.001},{"events":["l_ack","l_processing","l_Finish"],"t":0.002}],"full":{"a":1750863673844114,"name":"_full_task","f":1750863673844114,"d_finished":0,"c":0,"l":1750863673846218,"d":2104},"events":[{"name":"bootstrap","f":1750863673844352,"d_finished":805,"c":1,"l":1750863673845157,"d":805},{"a":1750863673845387,"name":"ack","f":1750863673845387,"d_finished":0,"c":0,"l":1750863673846218,"d":831},{"a":1750863673845367,"name":"processing","f":1750863673845367,"d_finished":0,"c":0,"l":1750863673846218,"d":851},{"name":"ProduceResults","f":1750863673844937,"d_finished":423,"c":2,"l":1750863673845643,"d":423},{"a":1750863673845646,"name":"Finish","f":1750863673845646,"d_finished":0,"c":0,"l":1750863673846218,"d":572}],"id":"9437184::10"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); Got TEvKqpCompute::TEvScanData [1:1441:3385]->[1:1440:3384] 2025-06-25T15:01:13.846721Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=4;SelfId=[1:1441:3385];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-06-25T15:01:13.843673Z;index_granules=0;index_portions=0;index_batches=0;schema_columns=1;filter_columns=0;additional_columns=0;compacted_portions_bytes=0;inserted_portions_bytes=0;committed_portions_bytes=0;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=0;selected_rows=0; 2025-06-25T15:01:13.846764Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=4;SelfId=[1:1441:3385];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=read_context.h:192;event=scan_aborted;reason=unexpected on destructor; 2025-06-25T15:01:13.846884Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=4;SelfId=[1:1441:3385];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/tier0' stopped at tablet 9437184 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/tier1' stopped at tablet 9437184 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/tier0' stopped at tablet 9437184 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/tier1' stopped at tablet 9437184 160000/9752224 160000/9752224 160000/9752224 80000/4886744 0/0 >> BackupRestore::TestAllPrimitiveTypes-JSON_DOCUMENT [GOOD] >> BackupRestore::TestAllPrimitiveTypes-DYNUMBER >> KqpQuery::ReadOverloaded+StreamLookup [GOOD] >> KqpQuery::ReadOverloaded-StreamLookup >> KqpParams::InvalidJson [GOOD] >> KqpQuery::QueryCancelWrite [GOOD] >> KqpQuery::QueryCancelWriteImmediate ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/persqueue_public/ut/unittest >> PersQueueSdkReadSessionTest::ClosesAfterFailedConnectionToCds [GOOD] Test command err: 2025-06-25T14:59:21.750825Z :WriteRAW INFO: Random seed for debugging is 1750863561750796 2025-06-25T14:59:22.158177Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901738591908154:2213];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:59:22.209282Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519901741299049981:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:59:22.209398Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0010ff/r3tmp/tmpxsTbsM/pdisk_1.dat 2025-06-25T14:59:22.421888Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-25T14:59:22.410943Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-25T14:59:22.477612Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:59:22.672266Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:59:22.682697Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:59:22.682793Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:59:22.683530Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:59:22.683583Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:59:22.690590Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T14:59:22.690798Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:59:22.693333Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 64568, node 1 2025-06-25T14:59:22.880340Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/yft8/0010ff/r3tmp/yandextZFKxH.tmp 2025-06-25T14:59:22.880363Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/yft8/0010ff/r3tmp/yandextZFKxH.tmp 2025-06-25T14:59:22.881663Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/yft8/0010ff/r3tmp/yandextZFKxH.tmp 2025-06-25T14:59:22.881859Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:59:23.108912Z INFO: TTestServer started on Port 63646 GrpcPort 64568 2025-06-25T14:59:23.153012Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:59:23.217485Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:63646 PQClient connected to localhost:64568 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:59:23.424725Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... waiting... waiting... 2025-06-25T14:59:25.183079Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901751476810850:2300], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:59:25.183176Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901751476810842:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:59:25.183550Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:59:25.186128Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710661:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:59:25.198819Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519901751476810857:2302], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710661 completed, doublechecking } 2025-06-25T14:59:25.425905Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519901751476810940:2667] txid# 281474976710662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:59:25.470653Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519901751476810950:2308], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:59:25.470754Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7519901754183952167:2275], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:59:25.471013Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=2&id=YjBlNTVmZmQtOTYyYjVhNzAtYzdlMDk3MjUtNWE5ZDNhZTA=, ActorId: [2:7519901754183952141:2269], ActorState: ExecuteState, TraceId: 01jykskmfw9dbm8w5hfy7td5qc, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:59:25.474650Z node 2 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-25T14:59:25.476681Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=NjM2N2UxNWMtMTM0YmRlZDEtMzc4ZThkZDItYTczMjEzYjc=, ActorId: [1:7519901751476810840:2296], ActorState: ExecuteState, TraceId: 01jykskmbx63f4emyef0egymap, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:59:25.476942Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-25T14:59:25.525743Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:59:25.631255Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:59:25.741528Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost:64568", true, true, 1000); 2025-06-25T14:59:26.033526Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710666. Ctx: { TraceId: 01jykskn0gave3br9ypkpyhyex, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MWI2Mjg3YzYtYTc2NWU0NjQtNDB ... 25-06-25T15:01:11.437727Z node 15 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:63: TTableHelper UpdateQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint32; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64;DECLARE $SeqNo AS Uint64; UPSERT INTO `/Root/PQ/SourceIdMeta2` (Hash, Topic, SourceId, CreateTime, AccessTime, Partition, SeqNo) VALUES ($Hash, $Topic, $SourceId, $CreateTime, $AccessTime, $Partition, $SeqNo); 2025-06-25T15:01:11.437740Z node 15 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:64: TTableHelper UpdateAccessTimeQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint32; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64; UPDATE `/Root/PQ/SourceIdMeta2` SET AccessTime = $AccessTime WHERE Hash = $Hash AND Topic = $Topic AND SourceId = $SourceId AND Partition = $Partition; 2025-06-25T15:01:11.437765Z node 15 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:111: TPartitionChooser [15:7519902206706174471:2467] (SourceId=src, PreferedPartition=(NULL)) StartKqpSession 2025-06-25T15:01:11.442353Z node 15 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:142: TPartitionChooser [15:7519902206706174471:2467] (SourceId=src, PreferedPartition=(NULL)) Select from the table 2025-06-25T15:01:11.666414Z node 15 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__old_chooser_actor.h:67: TPartitionChooser [15:7519902206706174471:2467] (SourceId=src, PreferedPartition=(NULL)) RequestPQRB 2025-06-25T15:01:11.667206Z node 15 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72075186224037893][rt3.dc1--test-topic] pipe [15:7519902206706174512:2467] connected; active server actors: 1 2025-06-25T15:01:11.667360Z node 15 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__old_chooser_actor.h:80: TPartitionChooser [15:7519902206706174471:2467] (SourceId=src, PreferedPartition=(NULL)) Received partition 0 from PQRB for SourceId=src 2025-06-25T15:01:11.667385Z node 15 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:174: TPartitionChooser [15:7519902206706174471:2467] (SourceId=src, PreferedPartition=(NULL)) Update the table 2025-06-25T15:01:11.698798Z node 15 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037893][rt3.dc1--test-topic] pipe [15:7519902206706174512:2467] disconnected; active server actors: 1 2025-06-25T15:01:11.698846Z node 15 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1688: [72075186224037893][rt3.dc1--test-topic] pipe [15:7519902206706174512:2467] disconnected no session 2025-06-25T15:01:11.854060Z node 16 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72075186224037892] server connected, pipe [15:7519902206706174537:2467], now have 1 active actors on pipe 2025-06-25T15:01:11.852859Z node 15 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:183: TPartitionChooser [15:7519902206706174471:2467] (SourceId=src, PreferedPartition=(NULL)) HandleUpdate PartitionPersisted=0 Status=SUCCESS 2025-06-25T15:01:11.852910Z node 15 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:305: TPartitionChooser [15:7519902206706174471:2467] (SourceId=src, PreferedPartition=(NULL)) ReplyResult: Partition=0, SeqNo=(NULL) 2025-06-25T15:01:11.852933Z node 15 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:268: TPartitionChooser [15:7519902206706174471:2467] (SourceId=src, PreferedPartition=(NULL)) Start idle 2025-06-25T15:01:11.852966Z node 15 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:689: ProceedPartition. session cookie: 1 sessionId: partition: 0 expectedGeneration: (NULL) 2025-06-25T15:01:11.854479Z node 15 :PQ_WRITE_PROXY DEBUG: writer.cpp:819: TPartitionWriter 72075186224037892 (partition=0) TEvClientConnected Status OK, TabletId: 72075186224037892, NodeId 16, Generation: 1 2025-06-25T15:01:11.854772Z node 16 :PERSQUEUE DEBUG: pq_impl.cpp:348: Handle TEvRequest topic: 'rt3.dc1--test-topic' requestId: 2025-06-25T15:01:11.854810Z node 16 :PERSQUEUE DEBUG: pq_impl.cpp:2812: [PQ: 72075186224037892] got client message batch for topic 'rt3.dc1--test-topic' partition 0 2025-06-25T15:01:11.854916Z node 16 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie src|470acb16-6179e85e-97ec76ff-929c3d64_0 generated for partition 0 topic 'rt3.dc1--test-topic' owner src 2025-06-25T15:01:11.855035Z node 16 :PERSQUEUE DEBUG: partition_write.cpp:34: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyOwnerOk. Partition: 0 2025-06-25T15:01:11.855101Z node 16 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'rt3.dc1--test-topic' partition: 0 messageNo: 0 requestId: cookie: 0 2025-06-25T15:01:11.855722Z node 16 :PERSQUEUE DEBUG: pq_impl.cpp:348: Handle TEvRequest topic: 'rt3.dc1--test-topic' requestId: 2025-06-25T15:01:11.855745Z node 16 :PERSQUEUE DEBUG: pq_impl.cpp:2812: [PQ: 72075186224037892] got client message batch for topic 'rt3.dc1--test-topic' partition 0 2025-06-25T15:01:11.855818Z node 16 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'rt3.dc1--test-topic' partition: 0 messageNo: 0 requestId: cookie: 0 2025-06-25T15:01:11.856542Z node 15 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:865: session inited cookie: 1 partition: 0 MaxSeqNo: 0 sessionId: src|470acb16-6179e85e-97ec76ff-929c3d64_0 2025-06-25T15:01:11.864476Z :INFO: [] MessageGroupId [src] SessionId [] Counters: { Errors: 0 CurrentSessionLifetimeMs: 1750863671864 BytesWritten: 0 MessagesWritten: 0 BytesWrittenCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-25T15:01:11.864635Z :INFO: [] MessageGroupId [src] SessionId [] Write session established. Init response: session_id: "src|470acb16-6179e85e-97ec76ff-929c3d64_0" topic: "test-topic" cluster: "dc1" supported_codecs: CODEC_RAW supported_codecs: CODEC_GZIP supported_codecs: CODEC_LZOP 2025-06-25T15:01:11.864917Z :INFO: [] MessageGroupId [src] SessionId [src|470acb16-6179e85e-97ec76ff-929c3d64_0] Write session: close. Timeout = 0 ms 2025-06-25T15:01:11.864963Z :INFO: [] MessageGroupId [src] SessionId [src|470acb16-6179e85e-97ec76ff-929c3d64_0] Write session will now close 2025-06-25T15:01:11.865023Z :DEBUG: [] MessageGroupId [src] SessionId [src|470acb16-6179e85e-97ec76ff-929c3d64_0] Write session: aborting 2025-06-25T15:01:11.865506Z :INFO: [] MessageGroupId [src] SessionId [src|470acb16-6179e85e-97ec76ff-929c3d64_0] Write session: gracefully shut down, all writes complete 2025-06-25T15:01:11.865557Z :DEBUG: [] MessageGroupId [src] SessionId [src|470acb16-6179e85e-97ec76ff-929c3d64_0] Write session: destroy 2025-06-25T15:01:11.866349Z node 15 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 1 sessionId: src|470acb16-6179e85e-97ec76ff-929c3d64_0 grpc read done: success: 0 data: 2025-06-25T15:01:11.866379Z node 15 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 1 sessionId: src|470acb16-6179e85e-97ec76ff-929c3d64_0 grpc read failed 2025-06-25T15:01:11.866412Z node 15 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 1 sessionId: src|470acb16-6179e85e-97ec76ff-929c3d64_0 grpc closed 2025-06-25T15:01:11.866431Z node 15 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 1 sessionId: src|470acb16-6179e85e-97ec76ff-929c3d64_0 is DEAD 2025-06-25T15:01:11.867230Z node 15 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037892 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-25T15:01:11.867534Z node 16 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037892] server disconnected, pipe [15:7519902206706174537:2467] destroyed 2025-06-25T15:01:11.867582Z node 16 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::DropOwner. 2025-06-25T15:01:11.945470Z :INFO: [/Root] [/Root] [371cfa73-69a8ca43-baa946bb-95a741e] Starting read session 2025-06-25T15:01:11.945539Z :DEBUG: [/Root] [/Root] [371cfa73-69a8ca43-baa946bb-95a741e] Starting cluster discovery 2025-06-25T15:01:11.945823Z :INFO: [/Root] [/Root] [371cfa73-69a8ca43-baa946bb-95a741e] Cluster discovery request failed. Status: TRANSPORT_UNAVAILABLE. Issues: "
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:61147: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint localhost:61147
: Error: Endpoint list is empty for database /Root, cluster endpoint localhost:61147. " 2025-06-25T15:01:11.945875Z :DEBUG: [/Root] [/Root] [371cfa73-69a8ca43-baa946bb-95a741e] Restart cluster discovery in 0.009849s 2025-06-25T15:01:11.968473Z :DEBUG: [/Root] [/Root] [371cfa73-69a8ca43-baa946bb-95a741e] Starting cluster discovery 2025-06-25T15:01:11.980844Z :INFO: [/Root] [/Root] [371cfa73-69a8ca43-baa946bb-95a741e] Cluster discovery request failed. Status: TRANSPORT_UNAVAILABLE. Issues: "
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:61147: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint localhost:61147
: Error: Endpoint list is empty for database /Root, cluster endpoint localhost:61147. " 2025-06-25T15:01:11.980912Z :DEBUG: [/Root] [/Root] [371cfa73-69a8ca43-baa946bb-95a741e] Restart cluster discovery in 0.018407s 2025-06-25T15:01:12.004726Z :DEBUG: [/Root] [/Root] [371cfa73-69a8ca43-baa946bb-95a741e] Starting cluster discovery 2025-06-25T15:01:12.004948Z :INFO: [/Root] [/Root] [371cfa73-69a8ca43-baa946bb-95a741e] Cluster discovery request failed. Status: TRANSPORT_UNAVAILABLE. Issues: "
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:61147: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint localhost:61147
: Error: Endpoint list is empty for database /Root, cluster endpoint localhost:61147. " 2025-06-25T15:01:12.004978Z :DEBUG: [/Root] [/Root] [371cfa73-69a8ca43-baa946bb-95a741e] Restart cluster discovery in 0.035729s 2025-06-25T15:01:12.041513Z :DEBUG: [/Root] [/Root] [371cfa73-69a8ca43-baa946bb-95a741e] Starting cluster discovery 2025-06-25T15:01:12.041891Z :NOTICE: [/Root] [/Root] [371cfa73-69a8ca43-baa946bb-95a741e] Aborting read session. Description: SessionClosed { Status: TRANSPORT_UNAVAILABLE Issues: "
: Error: Failed to discover clusters
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:61147: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint localhost:61147
: Error: Endpoint list is empty for database /Root, cluster endpoint localhost:61147. " } 2025-06-25T15:01:12.042418Z :NOTICE: [/Root] [/Root] [371cfa73-69a8ca43-baa946bb-95a741e] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } SessionClosed { Status: TRANSPORT_UNAVAILABLE Issues: "
: Error: Failed to discover clusters
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:61147: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint localhost:61147
: Error: Endpoint list is empty for database /Root, cluster endpoint localhost:61147. " } 2025-06-25T15:01:12.042577Z :INFO: [/Root] [/Root] [371cfa73-69a8ca43-baa946bb-95a741e] Closing read session. Close timeout: 0.000000s 2025-06-25T15:01:12.042710Z :NOTICE: [/Root] [/Root] [371cfa73-69a8ca43-baa946bb-95a741e] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } >> KqpQuery::SelectCountAsteriskFromVar [GOOD] >> KqpExplain::SqlIn [GOOD] >> KqpExplain::SsaProgramInJsonPlan >> KqpParams::ParameterTypes [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpParams::InvalidJson [GOOD] Test command err: Trying to start YDB, gRPC: 18851, MsgBus: 26416 2025-06-25T15:00:51.179894Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519902123643374808:2161];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:00:51.185736Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00144b/r3tmp/tmps358oJ/pdisk_1.dat 2025-06-25T15:00:51.648790Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:00:51.648889Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:00:51.652930Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:00:51.697818Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519902123643374684:2080] 1750863651170282 != 1750863651170285 2025-06-25T15:00:51.698825Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 18851, node 1 2025-06-25T15:00:51.819538Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:00:51.819558Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:00:51.819566Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:00:51.819687Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T15:00:52.184443Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:26416 TClient is connected to server localhost:26416 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:00:52.819350Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:00:52.848746Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T15:00:52.869372Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T15:00:53.017925Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:00:53.238951Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:53.308222Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:00:54.596553Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902136528278219:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:00:54.596651Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:00:54.866550Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:54.904426Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:54.935430Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:55.003049Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:55.038223Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:55.109567Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:55.140018Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:55.234443Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902140823246185:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:00:55.234506Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:00:55.234730Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902140823246190:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:00:55.239160Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:00:55.250720Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519902140823246192:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T15:00:55.314620Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519902140823246243:3421] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:00:56.176800Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519902123643374808:2161];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:00:56.176881Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 22786, MsgBus: 21098 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00144b/r3tmp/tmpPNJftm/pdisk_1.dat 2025-06-25T15:00:57.325292Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519902147622715273:2227];send_to ... tion=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00144b/r3tmp/tmp0Volo2/pdisk_1.dat 2025-06-25T15:01:09.161939Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7519902193981374468:2080] 1750863668922189 != 1750863668922192 2025-06-25T15:01:09.177364Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:01:09.181660Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:01:09.181749Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:01:09.184861Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 23607, node 4 2025-06-25T15:01:09.328935Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:01:09.328959Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:01:09.328968Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:01:09.329114Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:65093 TClient is connected to server localhost:65093 2025-06-25T15:01:09.992442Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:01:10.106183Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:01:10.123484Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:10.194616Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:10.354114Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:10.421613Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:12.878253Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519902211161245270:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:12.878372Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:12.932070Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:12.990056Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:13.047341Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:13.091334Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:13.126188Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:13.183472Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:13.274444Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:13.404504Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519902215456213228:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:13.404593Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:13.405120Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519902215456213233:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:13.414533Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:01:13.432043Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519902215456213235:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T15:01:13.487321Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519902215456213286:3421] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:01:13.972454Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519902193981374507:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:01:13.972526Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T15:01:14.723099Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:14.862652Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=ZmY2NzE4YjItMmYwYjMyODgtODNmNDMzZC01NzZkZDI2ZA==, ActorId: [4:7519902219751180853:2474], ActorState: ExecuteState, TraceId: 01jykspzd0bd6p00aqxpqhwm48, Create QueryResponse for error on request, msg: ydb/core/kqp/session_actor/kqp_session_actor.cpp:1052: Invalid Json value
: Error: ydb/core/kqp/session_actor/kqp_session_actor.cpp:1052: Invalid Json value >> KqpExplain::PureExpr [GOOD] >> KqpExplain::ReadTableRangesFullScan >> KqpLimits::QueryReplySize [GOOD] >> KqpLimits::ReadsetCountLimit >> KqpExplain::CreateTableAs+Stats [GOOD] >> KqpExplain::CreateTableAs-Stats ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpQuery::SelectCountAsteriskFromVar [GOOD] Test command err: Trying to start YDB, gRPC: 28121, MsgBus: 21455 2025-06-25T15:00:51.174960Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519902124013096172:2194];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:00:51.179491Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00143d/r3tmp/tmpgdfYsS/pdisk_1.dat 2025-06-25T15:00:51.713658Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519902124013096010:2080] 1750863651165500 != 1750863651165503 2025-06-25T15:00:51.722062Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 28121, node 1 2025-06-25T15:00:51.727135Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:00:51.727232Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:00:51.731538Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:00:51.823378Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:00:51.823413Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:00:51.823423Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:00:51.823565Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T15:00:52.224477Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:21455 TClient is connected to server localhost:21455 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:00:52.625256Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:00:52.641042Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T15:00:52.656822Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:00:52.834651Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:00:52.979578Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T15:00:53.059468Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:54.592261Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902136897999538:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:00:54.592924Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:00:54.870023Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:54.902274Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:54.934399Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:54.971930Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:55.056609Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:55.097022Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:55.176328Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:55.233742Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902141192967499:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:00:55.233814Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:00:55.234163Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902141192967504:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:00:55.237656Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:00:55.246422Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519902141192967506:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T15:00:55.315313Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519902141192967559:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:00:56.176693Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519902124013096172:2194];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:00:56.176772Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 11272, MsgBus: 27803 2025-06-25T15:00:57.370451Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519902146613475987:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:00:57.370509Z node 2 :METADATA_PROVIDER ERROR: log.cp ... 46644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:01:08.464418Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519902176497033646:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:01:08.464545Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 8005, MsgBus: 31554 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00143d/r3tmp/tmpnoFqjP/pdisk_1.dat 2025-06-25T15:01:09.490986Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519902202017681363:2136];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:01:09.519390Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T15:01:09.709354Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:01:09.709441Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:01:09.725867Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:01:09.726134Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7519902202017681265:2080] 1750863669484524 != 1750863669484527 2025-06-25T15:01:09.726304Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 8005, node 4 2025-06-25T15:01:09.796909Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:01:09.796933Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:01:09.796943Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:01:09.797057Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:31554 TClient is connected to server localhost:31554 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:01:10.328636Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:01:10.348020Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:10.432584Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:10.553226Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:01:10.668455Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:10.763936Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:13.092896Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519902219197552092:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:13.092967Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:13.157977Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:13.209731Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:13.290062Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:13.319724Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:13.352546Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:13.435126Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:13.531918Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:13.636586Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519902219197552765:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:13.636656Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:13.636889Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519902219197552770:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:13.640299Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:01:13.649498Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519902219197552772:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T15:01:13.705915Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519902219197552823:3419] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:01:14.488767Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519902202017681363:2136];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:01:14.488838Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> KqpStats::JoinStatsBasicYql-StreamLookupJoin [GOOD] >> BackupPathTest::ExportRecursiveWithoutDestinationPrefix [GOOD] >> KqpParams::CheckQueryCacheForUnpreparedQuery [GOOD] >> KqpParams::Decimal+QueryService-UseSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpParams::ParameterTypes [GOOD] Test command err: Trying to start YDB, gRPC: 9513, MsgBus: 31609 2025-06-25T15:00:51.184813Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519902123065885109:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:00:51.185871Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001449/r3tmp/tmpp8zFvN/pdisk_1.dat 2025-06-25T15:00:51.647252Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519902123065885061:2080] 1750863651164102 != 1750863651164105 2025-06-25T15:00:51.662236Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 9513, node 1 2025-06-25T15:00:51.673224Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:00:51.673358Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:00:51.681138Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:00:51.824904Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:00:51.824928Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:00:51.824950Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:00:51.825067Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T15:00:52.190388Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:31609 TClient is connected to server localhost:31609 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:00:52.653585Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:00:52.672337Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T15:00:52.695916Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:00:52.829549Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:00:52.998603Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:00:53.077097Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:00:54.523188Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902135950788579:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:00:54.523288Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:00:54.866503Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:54.907683Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:54.941619Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:54.982792Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:55.036126Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:55.070803Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:55.107705Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:55.212168Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902140245756536:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:00:55.212229Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:00:55.212442Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902140245756541:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:00:55.216413Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:00:55.227521Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519902140245756543:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T15:00:55.320764Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519902140245756596:3422] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:00:56.188673Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519902123065885109:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:00:56.188736Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 5764, MsgBus: 23004 2025-06-25T15:00:57.382914Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519902147166642485:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:00:57.383058Z node 2 :METADATA_PROVIDER ERROR: log.cpp:7 ... d: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:01:08.021869Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519902174416579717:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:01:08.021942Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 29522, MsgBus: 16373 2025-06-25T15:01:09.146311Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519902201341871726:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:01:09.154134Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001449/r3tmp/tmpbJXUgu/pdisk_1.dat 2025-06-25T15:01:09.359048Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:01:09.367916Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:01:09.368014Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:01:09.395333Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 29522, node 4 2025-06-25T15:01:09.564892Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:01:09.564916Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:01:09.564925Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:01:09.565044Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16373 2025-06-25T15:01:10.164963Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:16373 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:01:10.276875Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:01:10.285418Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T15:01:10.294742Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:10.432853Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:10.584690Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:10.672340Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:13.213498Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519902218521742511:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:13.213601Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:13.276342Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:13.316976Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:13.358785Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:13.437223Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:13.482389Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:13.568544Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:13.647007Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:13.729754Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519902218521743171:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:13.729853Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:13.730286Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519902218521743176:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:13.735064Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:01:13.753194Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519902218521743178:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T15:01:13.843996Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519902218521743229:3423] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:01:14.151631Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519902201341871726:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:01:14.151747Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> KqpParams::Decimal-QueryService-UseSink [GOOD] >> KqpExplain::FewEffects+UseSink [GOOD] >> KqpExplain::FewEffects-UseSink >> KqpStats::StreamLookupStats+StreamLookupJoin [GOOD] >> KqpStats::StreamLookupStats-StreamLookupJoin >> KqpQuery::NoEvaluate [GOOD] >> KqpQuery::SelectWhereInSubquery [GOOD] >> KqpQuery::TableSink_ReplaceDataShardDataQuery+UseSink >> KqpLimits::StreamWrite+Allowed [GOOD] >> KqpLimits::StreamWrite-Allowed ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpStats::JoinStatsBasicYql-StreamLookupJoin [GOOD] Test command err: Trying to start YDB, gRPC: 28172, MsgBus: 19282 2025-06-25T15:00:51.195782Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519902123470726489:2170];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:00:51.195969Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001447/r3tmp/tmpLXXbC9/pdisk_1.dat 2025-06-25T15:00:51.619612Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:00:51.624368Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519902123470726357:2080] 1750863651185745 != 1750863651185748 2025-06-25T15:00:51.634379Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:00:51.634478Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:00:51.641342Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 28172, node 1 2025-06-25T15:00:51.820930Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:00:51.820955Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:00:51.820961Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:00:51.821073Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T15:00:52.238786Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:19282 TClient is connected to server localhost:19282 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:00:52.747154Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:00:52.778031Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:00:52.958505Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:00:53.134638Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T15:00:53.245140Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:54.531674Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902136355629877:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:00:54.531797Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:00:54.866833Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:54.907674Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:54.984497Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:55.030640Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:55.066436Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:55.119719Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:55.149169Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:55.205816Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902140650597828:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:00:55.205942Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:00:55.206238Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902140650597833:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:00:55.209653Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:00:55.224467Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519902140650597835:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T15:00:55.311757Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519902140650597888:3418] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:00:56.196441Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519902123470726489:2170];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:00:56.196504Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T15:00:56.707510Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863656689, txId: 281474976710672] shutting down Trying to start YDB, gRPC: 4161, MsgBus: 2086 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001447/r3tmp/tmpuV6Bjn/pdisk_1.dat 2025-06-25T15:00:57.654637Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:75199021477885211 ... 644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:01:08.350328Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519902174328199880:2080];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:01:08.350396Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 14612, MsgBus: 64322 2025-06-25T15:01:10.175096Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519902202873045142:2078];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:01:10.179180Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001447/r3tmp/tmphIsnsB/pdisk_1.dat 2025-06-25T15:01:10.496400Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:01:10.500505Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7519902202873045099:2080] 1750863670168496 != 1750863670168499 2025-06-25T15:01:10.521471Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:01:10.521572Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:01:10.524018Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 14612, node 4 2025-06-25T15:01:10.640932Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:01:10.640959Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:01:10.640969Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:01:10.641098Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:64322 TClient is connected to server localhost:64322 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-25T15:01:11.176818Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:01:11.227063Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:01:11.247902Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:11.329477Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:11.524654Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:11.616483Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:14.254948Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519902220052915922:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:14.255070Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:14.316760Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:14.396180Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:14.444699Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:14.479126Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:14.519107Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:14.610374Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:14.648761Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:14.719164Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519902220052916579:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:14.719279Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:14.719727Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519902220052916584:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:14.723489Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:01:14.732736Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519902220052916586:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T15:01:14.819740Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519902220052916637:3421] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:01:15.180442Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519902202873045142:2078];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:01:15.180530Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> EncryptedBackupParamsValidationTest::NoDestination [GOOD] >> KqpLimits::ComputeActorMemoryAllocationFailureQueryService-useSink [GOOD] >> KqpLimits::ComputeNodeMemoryLimit >> BackupPathTest::ParallelBackupWholeDatabase >> KqpExplain::UpdateConditionalKey-UseSink [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpParams::Decimal-QueryService-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 7369, MsgBus: 24422 2025-06-25T15:00:51.177378Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519902122042514854:2196];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:00:51.177669Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001434/r3tmp/tmpW113YZ/pdisk_1.dat 2025-06-25T15:00:51.608719Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:00:51.608818Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:00:51.612885Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:00:51.636370Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:00:51.640338Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519902122042514688:2080] 1750863651164074 != 1750863651164077 TServer::EnableGrpc on GrpcPort 7369, node 1 2025-06-25T15:00:51.819349Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:00:51.819375Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:00:51.819382Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:00:51.819518Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T15:00:52.179564Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:24422 TClient is connected to server localhost:24422 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:00:52.619502Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:00:52.644570Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T15:00:52.670700Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:00:52.830509Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:00:52.963213Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:00:53.054347Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:00:54.367585Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902134927418218:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:00:54.367687Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:00:54.866990Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:54.905149Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:54.972488Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:55.003050Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:55.048324Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:55.123067Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:55.160256Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:55.229005Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902139222386178:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:00:55.229076Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:00:55.229136Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902139222386183:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:00:55.233322Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:00:55.242630Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519902139222386185:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T15:00:55.316620Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519902139222386236:3420] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:00:56.175796Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519902122042514854:2196];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:00:56.175858Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 17192, MsgBus: 28550 2025-06-25T15:00:57.585398Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519902148975863935:2154];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001 ... 72057594046644480 2025-06-25T15:01:09.534770Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:09.631512Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:09.766847Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:01:09.821373Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:09.919997Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:12.776443Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519902214665678509:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:12.776549Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:12.866276Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:12.900702Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:12.944873Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:12.981125Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:13.047626Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:13.103364Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:13.192290Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:13.267259Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519902218960646467:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:13.267340Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:13.267762Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519902218960646472:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:13.271783Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:01:13.285837Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519902218960646474:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T15:01:13.378561Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519902218960646525:3410] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:01:13.756985Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519902197485807747:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:01:13.757052Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T15:01:14.917896Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:16.616934Z node 4 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [4:7519902231845548854:2512], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:4:17: Error: At function: RemovePrefixMembers, At function: Unordered, At function: PersistableRepr, At function: OrderedSqlProject, At tuple, At function: SqlProjectItem, At lambda
:3:25: Error: At function: Parameter, At function: DataType
:3:25: Error: Invalid decimal precision: 99 2025-06-25T15:01:16.619010Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=4&id=ZjJiOTQwYzYtYTk2ZWZkMzEtODRjNmZiMTctYjE3ZGEzMzI=, ActorId: [4:7519902223255614063:2466], ActorState: ExecuteState, TraceId: 01jyksq154eb8277rzx19qhm18, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-06-25T15:01:16.754937Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=ZjJiOTQwYzYtYTk2ZWZkMzEtODRjNmZiMTctYjE3ZGEzMzI=, ActorId: [4:7519902223255614063:2466], ActorState: ExecuteState, TraceId: 01jyksq16r3mp3t5jhvcgq3384, Create QueryResponse for error on request, msg: ydb/core/kqp/session_actor/kqp_session_actor.cpp:1374: ydb/core/kqp/query_data/kqp_query_data.cpp:271: Parameter $value22 type mismatch, expected: { Kind: Data Data { Scheme: 4865 DecimalParams { Precision: 22 Scale: 9 } } }, actual: Type (Data), schemeType: Decimal(35,10), schemeTypeId: 4865 2025-06-25T15:01:16.807843Z node 4 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [4:7519902231845548872:2519], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:7:29: Error: At function: KiWriteTable!
:7:50: Error: Failed to convert type: Struct<'Key':Int32,'Value22':Decimal(35,10),'Value35':Decimal(35,10)> to Struct<'Key':Int32?,'Value22':Decimal(22,9)?,'Value35':Decimal(35,10)?>
:4:25: Error: Implicit decimal cast would lose precision
:7:50: Error: Failed to convert 'Value22': Decimal(35,10) to Optional
:7:50: Error: Failed to convert input columns types to scheme types, code: 2031 2025-06-25T15:01:16.810313Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=4&id=ZjJiOTQwYzYtYTk2ZWZkMzEtODRjNmZiMTctYjE3ZGEzMzI=, ActorId: [4:7519902223255614063:2466], ActorState: ExecuteState, TraceId: 01jyksq1b44a0nem7ehpghhahx, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-06-25T15:01:16.842329Z node 4 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [4:7519902231845548881:2523], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:3:29: Error: At function: KiWriteTable!
:3:50: Error: Failed to convert type: Struct<'Key':Int32,'Value22':Decimal(35,10),'Value35':Decimal(35,10)> to Struct<'Key':Int32?,'Value22':Decimal(22,9)?,'Value35':Decimal(35,10)?>
:0:14: Error: Implicit decimal cast would lose precision
:3:50: Error: Failed to convert 'Value22': Decimal(35,10) to Optional
:3:50: Error: Failed to convert input columns types to scheme types, code: 2031 2025-06-25T15:01:16.844908Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=4&id=ZjJiOTQwYzYtYTk2ZWZkMzEtODRjNmZiMTctYjE3ZGEzMzI=, ActorId: [4:7519902223255614063:2466], ActorState: ExecuteState, TraceId: 01jyksq1ck5etme28pxszw8pka, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpQuery::NoEvaluate [GOOD] Test command err: Trying to start YDB, gRPC: 28956, MsgBus: 7189 2025-06-25T15:00:51.873452Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519902122306404818:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:00:51.873502Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001426/r3tmp/tmpSHL69N/pdisk_1.dat TServer::EnableGrpc on GrpcPort 28956, node 1 2025-06-25T15:00:52.359303Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:00:52.360547Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519902122306404798:2080] 1750863651865666 != 1750863651865669 2025-06-25T15:00:52.362999Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:00:52.363253Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:00:52.368918Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:00:52.400908Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:00:52.400934Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:00:52.400940Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:00:52.401076Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:7189 2025-06-25T15:00:52.888566Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:7189 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:00:53.136676Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:00:53.148847Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T15:00:53.158172Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T15:00:53.326838Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:00:53.509130Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:53.585701Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:00:55.073747Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902139486275614:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:00:55.073826Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:00:55.369476Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:55.405718Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:55.475461Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:55.548204Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:55.573688Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:55.644435Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:55.679806Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:55.754164Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902139486276281:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:00:55.754238Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:00:55.754407Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902139486276287:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:00:55.757266Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:00:55.766267Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519902139486276289:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T15:00:55.837003Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519902139486276340:3426] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:00:56.874005Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519902122306404818:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:00:56.874073Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 26941, MsgBus: 2377 2025-06-25T15:00:58.156676Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519902152465826221:2056];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:00:58.156719Z node 2 :METADATA_PROVIDER ERROR: log.cpp:78 ... use file: (empty maybe) 2025-06-25T15:01:12.078849Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:01:12.078858Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:01:12.078987Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19390 TClient is connected to server localhost:19390 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:01:12.682542Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:01:12.697459Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-25T15:01:12.718103Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:12.785966Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... 2025-06-25T15:01:12.846361Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:13.022567Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:13.112979Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:15.820488Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519902224112447149:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:15.820585Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:15.874660Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:15.930525Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:15.982060Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:16.042380Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:16.078630Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:16.127588Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:16.208685Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:16.309817Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519902228407415112:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:16.309916Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:16.311997Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519902228407415117:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:16.317147Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:01:16.332669Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519902228407415119:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T15:01:16.419392Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519902228407415170:3420] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:01:16.731295Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519902206932576510:2206];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:01:16.731361Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T15:01:17.763666Z node 4 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [4:7519902232702382744:2479], status: UNSUPPORTED, issues:
: Error: Default error
:7:24: Error: EVALUATE IF is not supported in YDB queries., code: 2030 2025-06-25T15:01:17.764517Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=4&id=YjFkMDhhZGItNjI2OGM1NmItOWU5NmI1ZGItYjQ3ODJhMTU=, ActorId: [4:7519902232702382736:2474], ActorState: ExecuteState, TraceId: 01jyksq292fnzf8mjmtkh7101y, ReplyQueryCompileError, status UNSUPPORTED remove tx with tx_id: 2025-06-25T15:01:17.808074Z node 4 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [4:7519902232702382751:2482], status: UNSUPPORTED, issues:
: Error: Default error
:4:28: Error: EVALUATE is not supported in YDB queries., code: 2030 2025-06-25T15:01:17.810066Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=4&id=YjFkMDhhZGItNjI2OGM1NmItOWU5NmI1ZGItYjQ3ODJhMTU=, ActorId: [4:7519902232702382736:2474], ActorState: ExecuteState, TraceId: 01jyksq2asc3tacss7qvsew86s, ReplyQueryCompileError, status UNSUPPORTED remove tx with tx_id: 2025-06-25T15:01:17.966760Z node 4 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [4:7519902232702382762:2487], status: UNSUPPORTED, issues:
: Error: Default error
:8:78: Error: ATOM evaluation is not supported in YDB queries., code: 2030 2025-06-25T15:01:17.968460Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=4&id=YjFkMDhhZGItNjI2OGM1NmItOWU5NmI1ZGItYjQ3ODJhMTU=, ActorId: [4:7519902232702382736:2474], ActorState: ExecuteState, TraceId: 01jyksq2fremx7nsy7dj0xsjw1, ReplyQueryCompileError, status UNSUPPORTED remove tx with tx_id: >> EncryptedBackupParamsValidationTest::NoItemDestination ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpExplain::UpdateConditionalKey-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 4223, MsgBus: 26577 2025-06-25T15:00:54.282181Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519902135892468613:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:00:54.282258Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001415/r3tmp/tmpvlmawi/pdisk_1.dat 2025-06-25T15:00:54.615784Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519902135892468591:2080] 1750863654280846 != 1750863654280849 2025-06-25T15:00:54.627612Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 4223, node 1 2025-06-25T15:00:54.664043Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:00:54.664548Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:00:54.666325Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:00:54.708946Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:00:54.708974Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:00:54.709004Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:00:54.709114Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26577 TClient is connected to server localhost:26577 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:00:55.286619Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:00:55.299254Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:00:55.304828Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:00:55.454974Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:00:55.652074Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:00:55.719992Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:00:57.331423Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902148777372115:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:00:57.331560Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:00:57.712112Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:57.748303Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:57.781095Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:57.813083Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:57.852896Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:57.888690Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:57.927129Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:57.982343Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902148777372770:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:00:57.982430Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:00:57.985037Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902148777372775:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:00:57.988957Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:00:58.001359Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519902148777372777:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T15:00:58.082802Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519902153072340124:3420] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:00:59.282082Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519902135892468613:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:00:59.282138Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; {"Plan":{"Plans":[{"Tables":["EightShard"],"PlanNodeId":5,"Operators":[{"Inputs":[],"Path":"\/Root\/EightShard","Name":"Upsert","SinkType":"KqpTableSink","Table":"EightShard"}],"Plans":[{"PlanNodeId":4,"Plans":[{"PlanNodeId":3,"Plans":[{"PlanNodeId":2,"Operators":[{"E-Rows":"0","Inputs":[{"ExternalPlanNodeId":1}],"Predicate":"item.Data \u003E 0","E-Cost":"0","E-Size":"0","Name":"Filter"}],"Plans":[{"Tables":["EightShard"],"PlanNodeId":1,"Operators":[{"Scan":"Parallel","E-Size":"0","ReadRanges":["Key (-∞, +∞)"],"Name":"T ... 0818Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:01:13.340827Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:01:13.340941Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24118 TClient is connected to server localhost:24118 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:01:13.961548Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:01:13.977510Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... 2025-06-25T15:01:13.985018Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:14.101273Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:14.293418Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:14.380099Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:16.879025Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519902228484072073:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:16.879117Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:16.994532Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:17.039579Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:17.081870Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:17.159045Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:17.212607Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:17.295033Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:17.366336Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:17.428115Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519902232779040034:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:17.428233Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:17.428848Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519902232779040039:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:17.437041Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:01:17.449785Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519902232779040041:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T15:01:17.515283Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519902232779040092:3418] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:01:17.978849Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519902211304201308:2079];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:01:17.978934Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; {"Plan":{"Plans":[{"PlanNodeId":8,"Plans":[{"Tables":["EightShard"],"PlanNodeId":7,"Operators":[{"Inputs":[{"InternalOperatorId":1}],"Path":"\/Root\/EightShard","Name":"Upsert","Table":"EightShard"},{"Inputs":[],"Iterator":"precompute_1_0","Name":"Iterator"}],"Node Type":"Upsert-ConstantExpr","CTE Name":"precompute_1_0"}],"Node Type":"Effect"},{"PlanNodeId":5,"Plans":[{"PlanNodeId":4,"Plans":[{"PlanNodeId":3,"Plans":[{"PlanNodeId":2,"Plans":[{"Tables":["EightShard"],"PlanNodeId":1,"Operators":[{"Scan":"Parallel","E-Size":"0","ReadRanges":["Key [100, 100]","Key [200, 200]","Key [300, 300]"],"Name":"TableRangeScan","Inputs":[],"Path":"\/Root\/EightShard","E-Rows":"0","ReadRangesPointPrefixLen":"1","ReadRangesKeys":["Key"],"Table":"EightShard","ReadColumns":["Data","Key"],"E-Cost":"0","ReadRangesExpectedSize":"3"}],"Node Type":"TableRangeScan"}],"Node Type":"Stage"}],"Node Type":"UnionAll","PlanNodeType":"Connection"}],"Node Type":"Collect"}],"Subplan Name":"CTE precompute_1_0","Node Type":"Precompute_1","Parent Relationship":"InitPlan","PlanNodeType":"Materialize"}],"Node Type":"Query","PlanNodeType":"Query","Stats":{"ResourcePoolId":"default"}},"meta":{"version":"0.2","type":"query"},"tables":[{"name":"\/Root\/EightShard","reads":[{"columns":["Data","Key"],"scan_by":["Key [100, 100]","Key [200, 200]","Key [300, 300]"],"type":"Scan"}],"writes":[{"columns":["Data","Key"],"type":"MultiUpsert"}]}],"SimplifiedPlan":{"PlanNodeId":0,"Plans":[{"PlanNodeId":1,"Plans":[{"PlanNodeId":2,"Operators":[{"Path":"\/Root\/EightShard","Name":"Upsert","Table":"EightShard"}],"Plans":[{"PlanNodeId":8,"Operators":[{"Scan":"Parallel","E-Size":"0","ReadRanges":["Key [100, 100]","Key [200, 200]","Key [300, 300]"],"Name":"TableRangeScan","Path":"\/Root\/EightShard","ReadRangesPointPrefixLen":"1","E-Rows":"0","ReadRangesKeys":["Key"],"Table":"EightShard","ReadColumns":["Data","Key"],"E-Cost":"0","ReadRangesExpectedSize":"3"}],"Node Type":"TableRangeScan"}],"Node Type":"Upsert"}],"Node Type":"Effect"}],"Node Type":"Query","PlanNodeType":"Query","OptimizerStats":{"EquiJoinsCount":0,"JoinsCount":0}}} >> KqpLimits::LargeParametersAndMkqlFailure [GOOD] >> KqpLimits::ManyPartitions >> KqpQuery::CreateAsSelectTypes+NotNull+IsOlap [GOOD] >> TSchemeShardSysViewTest::AsyncCreateDirWithSysView |91.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_sysview/unittest >> TSchemeShardSysViewTest::EmptyName >> TSchemeShardSysViewTest::AsyncCreateSameSysView >> TSchemeShardSysViewTest::AsyncCreateDifferentSysViews >> TSchemeShardSysViewTest::CreateExistingSysView >> KqpQuery::OlapCreateAsSelect_Simple [GOOD] >> KqpQuery::OltpCreateAsSelect_Simple >> TNebiusAccessServiceTest::PassRequestId >> TNebiusAccessServiceTest::PassRequestId [GOOD] >> TSchemeShardSysViewTest::ReadOnlyMode ------- [TM] {asan, default-linux-x86_64, release} ydb/library/ncloud/impl/ut/unittest >> TNebiusAccessServiceTest::PassRequestId [GOOD] Test command err: 2025-06-25T15:01:21.952515Z node 2 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [517000003908]{reqId} Connect to grpc://localhost:15004 2025-06-25T15:01:21.980433Z node 2 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [517000003908]{reqId} Request AuthenticateRequest { iam_token: "**** (717F937C)" } 2025-06-25T15:01:22.029265Z node 2 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [517000003908]{reqId} Response AuthenticateResponse { account { user_account { id: "1234" } } } >> KqpQuery::QueryCancelWriteImmediate [GOOD] >> KqpQuery::CreateAsSelectView [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpQuery::CreateAsSelectTypes+NotNull+IsOlap [GOOD] Test command err: Trying to start YDB, gRPC: 4782, MsgBus: 16693 2025-06-25T15:00:56.302294Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519902145018594880:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:00:56.302818Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001413/r3tmp/tmpankP2y/pdisk_1.dat 2025-06-25T15:00:56.692176Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:00:56.716730Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519902145018594862:2080] 1750863656301494 != 1750863656301497 2025-06-25T15:00:56.742509Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:00:56.742656Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:00:56.743526Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 4782, node 1 2025-06-25T15:00:56.852836Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:00:56.852860Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:00:56.852867Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:00:56.852995Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16693 TClient is connected to server localhost:16693 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-25T15:00:57.316395Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:00:57.372402Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:00:57.387536Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T15:00:59.206448Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902157903497389:2290], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:00:59.206593Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:00:59.206945Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902157903497401:2293], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:00:59.210967Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:00:59.225794Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519902157903497403:2294], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T15:00:59.322570Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519902157903497456:2334] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:00:59.578899Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:00.030318Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) Trying to start YDB, gRPC: 61495, MsgBus: 26823 2025-06-25T15:01:01.125472Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519902166034180497:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:01:01.125531Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001413/r3tmp/tmpYZHRKq/pdisk_1.dat 2025-06-25T15:01:01.261533Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:01:01.266578Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519902166034180475:2080] 1750863661124694 != 1750863661124697 TServer::EnableGrpc on GrpcPort 61495, node 2 2025-06-25T15:01:01.288250Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:01:01.288352Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:01:01.296689Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:01:01.340807Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:01:01.340828Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:01:01.340834Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:01:01.340949Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26823 TClient is connected to server localhost:26823 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-25T15:01:01.766993Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:01:02.132621Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:01:04.188647Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519902178919082982:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:04.189013Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:04.189733Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519902178919083009:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:04.193246Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:01:04.204732Z node 2 :KQP_WORKLOAD ... 29927Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037941;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715662; 2025-06-25T15:01:20.030276Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037921;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-25T15:01:20.030452Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-25T15:01:20.035239Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037921;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715662; 2025-06-25T15:01:20.038003Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037935;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-25T15:01:20.039695Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037899;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715662; 2025-06-25T15:01:20.040238Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037911;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-25T15:01:20.043974Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037935;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715662; 2025-06-25T15:01:20.044652Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037951;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-25T15:01:20.045957Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037911;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715662; 2025-06-25T15:01:20.046449Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-25T15:01:20.049788Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037951;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715662; 2025-06-25T15:01:20.050316Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037939;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-25T15:01:20.051948Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037897;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715662; 2025-06-25T15:01:20.052538Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037901;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-25T15:01:20.055375Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037939;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715662; 2025-06-25T15:01:20.055888Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037923;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-25T15:01:20.058072Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037901;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715662; 2025-06-25T15:01:20.058608Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037905;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-25T15:01:20.059677Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037923;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715662; 2025-06-25T15:01:20.060215Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-25T15:01:20.064115Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037905;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715662; 2025-06-25T15:01:20.064241Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037895;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715662; 2025-06-25T15:01:20.064837Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037917;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-25T15:01:20.064849Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037947;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-25T15:01:20.071182Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037917;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715662; 2025-06-25T15:01:20.071184Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037947;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715662; 2025-06-25T15:01:20.071801Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037909;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-25T15:01:20.071809Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037937;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-25T15:01:20.077872Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037937;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715662; 2025-06-25T15:01:20.078531Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-25T15:01:20.078734Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037909;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715662; 2025-06-25T15:01:20.079635Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037927;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-25T15:01:20.084531Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037927;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715662; 2025-06-25T15:01:20.084957Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037890;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715662; 2025-06-25T15:01:20.085104Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037933;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-25T15:01:20.085474Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037907;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-25T15:01:20.090565Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037933;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715662; 2025-06-25T15:01:20.091129Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037949;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-25T15:01:20.091816Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037907;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715662; 2025-06-25T15:01:20.092528Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037929;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-25T15:01:20.097143Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037949;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715662; 2025-06-25T15:01:20.097814Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037943;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-25T15:01:20.098555Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037929;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715662; 2025-06-25T15:01:20.099612Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037903;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-25T15:01:20.104178Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037943;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715662; 2025-06-25T15:01:20.105801Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037903;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715662; 2025-06-25T15:01:20.106377Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037915;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-25T15:01:20.112883Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037915;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715662; >> KqpExplain::CreateTableAs-Stats [GOOD] >> TSchemeShardSysViewTest::AsyncCreateDifferentSysViews [GOOD] >> TSchemeShardSysViewTest::AsyncCreateDirWithSysView [GOOD] >> TSchemeShardSysViewTest::AsyncCreateSameSysView [GOOD] >> TSchemeShardSysViewTest::EmptyName [GOOD] >> TSchemeShardSysViewTest::CreateExistingSysView [GOOD] |91.8%| [TA] $(B)/ydb/library/ncloud/impl/ut/test-results/unittest/{meta.json ... results_accumulator.log} |91.8%| [TA] {RESULT} $(B)/ydb/library/ncloud/impl/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpExplain::ReadTableRangesFullScan [GOOD] >> KqpExplain::ReadTableRanges ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_sysview/unittest >> TSchemeShardSysViewTest::AsyncCreateSameSysView [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T15:01:22.457377Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T15:01:22.457485Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T15:01:22.457533Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T15:01:22.457583Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T15:01:22.457632Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T15:01:22.457664Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T15:01:22.457732Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T15:01:22.457830Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T15:01:22.458848Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T15:01:22.459205Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T15:01:22.534234Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:01:22.534299Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:01:22.555429Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T15:01:22.555923Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T15:01:22.556124Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T15:01:22.564084Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T15:01:22.564424Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T15:01:22.565716Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T15:01:22.566078Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T15:01:22.577542Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T15:01:22.577738Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T15:01:22.589247Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T15:01:22.589331Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T15:01:22.589500Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T15:01:22.589553Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T15:01:22.589607Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T15:01:22.589697Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T15:01:22.600595Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T15:01:22.727214Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T15:01:22.727477Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:01:22.727683Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T15:01:22.727731Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T15:01:22.727975Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T15:01:22.728066Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:01:22.730971Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T15:01:22.735487Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T15:01:22.735761Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:01:22.735844Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T15:01:22.735901Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T15:01:22.735975Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T15:01:22.741796Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:01:22.741877Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T15:01:22.741941Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T15:01:22.744030Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:01:22.744085Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:01:22.744160Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T15:01:22.744226Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T15:01:22.749428Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T15:01:22.752715Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T15:01:22.752938Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T15:01:22.754069Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T15:01:22.754236Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T15:01:22.754295Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T15:01:22.754642Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T15:01:22.754712Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T15:01:22.754914Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T15:01:22.755002Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T15:01:22.757274Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T15:01:22.757329Z node 1 :FLAT_TX_SCHEMESHARD ... Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000003 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T15:01:22.842415Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_sysview.cpp:45: [72057594046678944] TCreateSysView::TPropose, opId: 102:0 HandleReply TEvPrivate::TEvOperationPlan, step: 5000003 2025-06-25T15:01:22.842551Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 102:0 128 -> 240 2025-06-25T15:01:22.842727Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-25T15:01:22.842805Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-25T15:01:22.844046Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 FAKE_COORDINATOR: Erasing txId 102 2025-06-25T15:01:22.845472Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T15:01:22.845519Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-25T15:01:22.845631Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-25T15:01:22.845768Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T15:01:22.845853Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 102, path id: 2 2025-06-25T15:01:22.845909Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 102, path id: 3 2025-06-25T15:01:22.845990Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-25T15:01:22.846040Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 102:0 ProgressState 2025-06-25T15:01:22.846141Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:0 progress is 1/1 2025-06-25T15:01:22.846190Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-25T15:01:22.846230Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:0 progress is 1/1 2025-06-25T15:01:22.846273Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-25T15:01:22.846329Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: false 2025-06-25T15:01:22.846370Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-25T15:01:22.846436Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 102:0 2025-06-25T15:01:22.846475Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 102:0 2025-06-25T15:01:22.846544Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-25T15:01:22.846584Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 102, publications: 2, subscribers: 0 2025-06-25T15:01:22.846619Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 102, [OwnerId: 72057594046678944, LocalPathId: 2], 4 2025-06-25T15:01:22.846675Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 102, [OwnerId: 72057594046678944, LocalPathId: 3], 2 2025-06-25T15:01:22.847829Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 4 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T15:01:22.847925Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 4 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T15:01:22.847965Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 102 2025-06-25T15:01:22.848025Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 4 2025-06-25T15:01:22.848075Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-25T15:01:22.849262Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T15:01:22.849341Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T15:01:22.849372Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 102 2025-06-25T15:01:22.849401Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 2 2025-06-25T15:01:22.849438Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-25T15:01:22.849510Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 102, subscribers: 0 2025-06-25T15:01:22.852100Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-25T15:01:22.853214Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 TestModificationResult got TxId: 102, wait until txId: 102 TestModificationResults wait txId: 103 TestModificationResult got TxId: 103, wait until txId: 103 TestWaitNotification wait txId: 102 2025-06-25T15:01:22.853478Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-06-25T15:01:22.853524Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 TestWaitNotification wait txId: 103 2025-06-25T15:01:22.853614Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 103: send EvNotifyTxCompletion 2025-06-25T15:01:22.853639Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 103 2025-06-25T15:01:22.854150Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-06-25T15:01:22.854247Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-25T15:01:22.854321Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:339:2328] 2025-06-25T15:01:22.854594Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 103, at schemeshard: 72057594046678944 2025-06-25T15:01:22.854675Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-06-25T15:01:22.854728Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [1:339:2328] TestWaitNotification: OK eventTxId 102 TestWaitNotification: OK eventTxId 103 2025-06-25T15:01:22.855257Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/.sys/new_sys_view" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T15:01:22.855576Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/.sys/new_sys_view" took 278us result status StatusSuccess 2025-06-25T15:01:22.856096Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/.sys/new_sys_view" PathDescription { Self { Name: "new_sys_view" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeSysView CreateFinished: true CreateTxId: 102 CreateStep: 5000003 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 SysViewVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } SysViewDescription { Name: "new_sys_view" Type: EPartitionStats SourceObject { OwnerId: 72057594046678944 LocalId: 1 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_sysview/unittest >> TSchemeShardSysViewTest::AsyncCreateDifferentSysViews [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T15:01:22.436159Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T15:01:22.436258Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T15:01:22.436297Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T15:01:22.436351Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T15:01:22.441890Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T15:01:22.441961Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T15:01:22.442062Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T15:01:22.442147Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T15:01:22.442997Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T15:01:22.445455Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T15:01:22.536585Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:01:22.536649Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:01:22.553460Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T15:01:22.553890Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T15:01:22.554147Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T15:01:22.562459Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T15:01:22.562889Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T15:01:22.566181Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T15:01:22.566460Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T15:01:22.582414Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T15:01:22.582597Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T15:01:22.586303Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T15:01:22.586367Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T15:01:22.586609Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T15:01:22.586655Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T15:01:22.586730Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T15:01:22.586818Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T15:01:22.593614Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T15:01:22.732747Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T15:01:22.732945Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:01:22.733120Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T15:01:22.733168Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T15:01:22.733429Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T15:01:22.733503Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:01:22.736149Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T15:01:22.736362Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T15:01:22.736570Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:01:22.736624Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T15:01:22.736691Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T15:01:22.736765Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T15:01:22.738597Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:01:22.738687Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T15:01:22.738756Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T15:01:22.745525Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:01:22.745583Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:01:22.745645Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T15:01:22.745712Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T15:01:22.756160Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T15:01:22.758136Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T15:01:22.758327Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T15:01:22.759215Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T15:01:22.759360Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T15:01:22.759425Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T15:01:22.759713Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T15:01:22.759762Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T15:01:22.759940Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T15:01:22.760005Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T15:01:22.762358Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T15:01:22.762401Z node 1 :FLAT_TX_SCHEMESHARD ... 5T15:01:22.856615Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-25T15:01:22.856647Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 103, ready parts: 1/1, is published: false 2025-06-25T15:01:22.856682Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-25T15:01:22.856710Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 103:0 2025-06-25T15:01:22.856748Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 103:0 2025-06-25T15:01:22.856836Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 2 2025-06-25T15:01:22.856877Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 103, publications: 2, subscribers: 0 2025-06-25T15:01:22.856904Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 103, [OwnerId: 72057594046678944, LocalPathId: 2], 5 2025-06-25T15:01:22.856928Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 103, [OwnerId: 72057594046678944, LocalPathId: 4], 2 2025-06-25T15:01:22.857427Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-25T15:01:22.857478Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T15:01:22.857508Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 103, path id: 2 2025-06-25T15:01:22.857567Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 103, path id: 4 2025-06-25T15:01:22.858238Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 5 PathOwnerId: 72057594046678944, cookie: 103 2025-06-25T15:01:22.858313Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 5 PathOwnerId: 72057594046678944, cookie: 103 2025-06-25T15:01:22.858345Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 103 2025-06-25T15:01:22.858374Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 5 2025-06-25T15:01:22.858405Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-25T15:01:22.858930Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 2 PathOwnerId: 72057594046678944, cookie: 103 2025-06-25T15:01:22.859013Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 2 PathOwnerId: 72057594046678944, cookie: 103 2025-06-25T15:01:22.859043Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 103 2025-06-25T15:01:22.859073Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 2 2025-06-25T15:01:22.859111Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 1 2025-06-25T15:01:22.859167Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 103, subscribers: 0 2025-06-25T15:01:22.861273Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-25T15:01:22.862252Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 TestModificationResult got TxId: 102, wait until txId: 102 TestModificationResults wait txId: 103 TestModificationResult got TxId: 103, wait until txId: 103 TestWaitNotification wait txId: 102 2025-06-25T15:01:22.862495Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-06-25T15:01:22.862533Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 TestWaitNotification wait txId: 103 2025-06-25T15:01:22.862605Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 103: send EvNotifyTxCompletion 2025-06-25T15:01:22.862630Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 103 2025-06-25T15:01:22.863048Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-06-25T15:01:22.863116Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-25T15:01:22.863160Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:352:2341] 2025-06-25T15:01:22.863330Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 103, at schemeshard: 72057594046678944 2025-06-25T15:01:22.863394Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-06-25T15:01:22.863416Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [1:352:2341] TestWaitNotification: OK eventTxId 102 TestWaitNotification: OK eventTxId 103 2025-06-25T15:01:22.863883Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/.sys/sys_view_1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T15:01:22.864088Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/.sys/sys_view_1" took 240us result status StatusSuccess 2025-06-25T15:01:22.864478Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/.sys/sys_view_1" PathDescription { Self { Name: "sys_view_1" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeSysView CreateFinished: true CreateTxId: 102 CreateStep: 5000003 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 SysViewVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } SysViewDescription { Name: "sys_view_1" Type: EPartitionStats SourceObject { OwnerId: 72057594046678944 LocalId: 1 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T15:01:22.864945Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/.sys/sys_view_2" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T15:01:22.865127Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/.sys/sys_view_2" took 169us result status StatusSuccess 2025-06-25T15:01:22.865401Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/.sys/sys_view_2" PathDescription { Self { Name: "sys_view_2" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeSysView CreateFinished: true CreateTxId: 103 CreateStep: 5000004 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 SysViewVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } SysViewDescription { Name: "sys_view_2" Type: ENodes SourceObject { OwnerId: 72057594046678944 LocalId: 1 } } } PathId: 4 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_sysview/unittest >> TSchemeShardSysViewTest::AsyncCreateDirWithSysView [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T15:01:22.435847Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T15:01:22.435943Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T15:01:22.435983Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T15:01:22.436029Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T15:01:22.440396Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T15:01:22.440474Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T15:01:22.440572Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T15:01:22.440758Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T15:01:22.441563Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T15:01:22.444447Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T15:01:22.532553Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:01:22.532628Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:01:22.555222Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T15:01:22.555655Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T15:01:22.555795Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T15:01:22.568052Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T15:01:22.568386Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T15:01:22.569089Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T15:01:22.569322Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T15:01:22.577687Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T15:01:22.577873Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T15:01:22.586537Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T15:01:22.586612Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T15:01:22.586755Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T15:01:22.586798Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T15:01:22.586845Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T15:01:22.586921Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T15:01:22.595595Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T15:01:22.723240Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T15:01:22.724474Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:01:22.725847Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T15:01:22.725907Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T15:01:22.727277Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T15:01:22.727377Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:01:22.733283Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T15:01:22.733566Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T15:01:22.733802Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:01:22.733945Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T15:01:22.734014Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T15:01:22.734090Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T15:01:22.736876Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:01:22.736939Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T15:01:22.736986Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T15:01:22.738647Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:01:22.738691Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:01:22.738746Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T15:01:22.738811Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T15:01:22.750081Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T15:01:22.753672Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T15:01:22.753865Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T15:01:22.754622Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T15:01:22.754741Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T15:01:22.754781Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T15:01:22.755067Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T15:01:22.755112Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T15:01:22.755275Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T15:01:22.755341Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T15:01:22.759143Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T15:01:22.759183Z node 1 :FLAT_TX_SCHEMESHARD ... eration.cpp:498: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-25T15:01:22.835342Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 102:0 ProgressState 2025-06-25T15:01:22.835412Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:0 progress is 1/1 2025-06-25T15:01:22.835435Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-25T15:01:22.835469Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:0 progress is 1/1 2025-06-25T15:01:22.835503Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-25T15:01:22.835530Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: false 2025-06-25T15:01:22.835568Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-25T15:01:22.835595Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 102:0 2025-06-25T15:01:22.835617Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 102:0 2025-06-25T15:01:22.835666Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-25T15:01:22.835692Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 102, publications: 2, subscribers: 0 2025-06-25T15:01:22.835713Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 102, [OwnerId: 72057594046678944, LocalPathId: 2], 4 2025-06-25T15:01:22.835752Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 102, [OwnerId: 72057594046678944, LocalPathId: 3], 2 2025-06-25T15:01:22.836527Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 4 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T15:01:22.836604Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 4 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T15:01:22.836631Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 102 2025-06-25T15:01:22.836658Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 4 2025-06-25T15:01:22.836686Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-25T15:01:22.837996Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T15:01:22.838065Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T15:01:22.838093Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 102 2025-06-25T15:01:22.838118Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 2 2025-06-25T15:01:22.838143Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-25T15:01:22.838199Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 102, subscribers: 0 2025-06-25T15:01:22.840786Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-25T15:01:22.841184Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 TestModificationResult got TxId: 101, wait until txId: 101 TestModificationResults wait txId: 102 TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 101 2025-06-25T15:01:22.841399Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2025-06-25T15:01:22.841437Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 TestWaitNotification wait txId: 102 2025-06-25T15:01:22.841528Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-06-25T15:01:22.841546Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 2025-06-25T15:01:22.841931Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-06-25T15:01:22.842032Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-25T15:01:22.842071Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:328:2317] 2025-06-25T15:01:22.842237Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-06-25T15:01:22.842279Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-25T15:01:22.842306Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:328:2317] TestWaitNotification: OK eventTxId 101 TestWaitNotification: OK eventTxId 102 2025-06-25T15:01:22.842896Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/.sys" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T15:01:22.843050Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/.sys" took 185us result status StatusSuccess 2025-06-25T15:01:22.844632Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/.sys" PathDescription { Self { Name: ".sys" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 } ChildrenExist: true } Children { Name: "new_sys_view" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeSysView CreateFinished: true CreateTxId: 102 CreateStep: 5000003 ParentPathId: 2 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T15:01:22.846105Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/.sys/new_sys_view" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T15:01:22.846324Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/.sys/new_sys_view" took 228us result status StatusSuccess 2025-06-25T15:01:22.846596Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/.sys/new_sys_view" PathDescription { Self { Name: "new_sys_view" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeSysView CreateFinished: true CreateTxId: 102 CreateStep: 5000003 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 SysViewVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } SysViewDescription { Name: "new_sys_view" Type: EPartitionStats SourceObject { OwnerId: 72057594046678944 LocalId: 1 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_sysview/unittest >> TSchemeShardSysViewTest::CreateExistingSysView [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T15:01:22.435838Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T15:01:22.435926Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T15:01:22.435965Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T15:01:22.436010Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T15:01:22.444943Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T15:01:22.445018Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T15:01:22.445121Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T15:01:22.445213Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T15:01:22.446083Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T15:01:22.446441Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T15:01:22.537152Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:01:22.537207Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:01:22.558533Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T15:01:22.558957Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T15:01:22.559124Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T15:01:22.564992Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T15:01:22.565274Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T15:01:22.565916Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T15:01:22.566127Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T15:01:22.575990Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T15:01:22.580473Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T15:01:22.588679Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T15:01:22.588771Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T15:01:22.588953Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T15:01:22.589008Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T15:01:22.589059Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T15:01:22.589148Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T15:01:22.595803Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T15:01:22.742413Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T15:01:22.742614Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:01:22.742799Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T15:01:22.742845Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T15:01:22.743078Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T15:01:22.743153Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:01:22.748141Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T15:01:22.748367Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T15:01:22.748554Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:01:22.748616Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T15:01:22.748670Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T15:01:22.748736Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T15:01:22.750364Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:01:22.750423Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T15:01:22.750486Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T15:01:22.751880Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:01:22.751919Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:01:22.751980Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T15:01:22.752036Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T15:01:22.755677Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T15:01:22.757501Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T15:01:22.757691Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T15:01:22.758541Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T15:01:22.758659Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T15:01:22.758704Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T15:01:22.758989Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T15:01:22.759037Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T15:01:22.759205Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T15:01:22.759279Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T15:01:22.760979Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T15:01:22.761020Z node 1 :FLAT_TX_SCHEMESHARD ... 22.845316Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 102, [OwnerId: 72057594046678944, LocalPathId: 3], 2 2025-06-25T15:01:22.845955Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 4 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T15:01:22.846029Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 4 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T15:01:22.846064Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 102 2025-06-25T15:01:22.846117Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 4 2025-06-25T15:01:22.846179Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-25T15:01:22.846765Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T15:01:22.846848Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T15:01:22.846909Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 102 2025-06-25T15:01:22.846939Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 2 2025-06-25T15:01:22.846967Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-25T15:01:22.847044Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 102, subscribers: 0 2025-06-25T15:01:22.849633Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-25T15:01:22.849707Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 102 2025-06-25T15:01:22.849861Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-06-25T15:01:22.849897Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 2025-06-25T15:01:22.850297Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-06-25T15:01:22.850404Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-25T15:01:22.850441Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:333:2322] TestWaitNotification: OK eventTxId 102 2025-06-25T15:01:22.850922Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/.sys/new_sys_view" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T15:01:22.851125Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/.sys/new_sys_view" took 215us result status StatusSuccess 2025-06-25T15:01:22.851543Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/.sys/new_sys_view" PathDescription { Self { Name: "new_sys_view" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeSysView CreateFinished: true CreateTxId: 102 CreateStep: 5000003 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 SysViewVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } SysViewDescription { Name: "new_sys_view" Type: EPartitionStats SourceObject { OwnerId: 72057594046678944 LocalId: 1 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 TestModificationResults wait txId: 103 2025-06-25T15:01:22.857657Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView CreateSysView { Name: "new_sys_view" Type: ENodes } } TxId: 103 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T15:01:22.857829Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_sysview.cpp:116: [72057594046678944] TCreateSysView Propose, path: /MyRoot/.sys/new_sys_view, opId: 103:0 2025-06-25T15:01:22.857896Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_sysview.cpp:122: [72057594046678944] TCreateSysView Propose, path: /MyRoot/.sys/new_sys_view, opId: 103:0, sysViewDescription: Name: "new_sys_view" Type: ENodes 2025-06-25T15:01:22.858041Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 103:1, propose status:StatusSchemeError, reason: Check failed: path: '/MyRoot/.sys/new_sys_view', error: path exist, request doesn't accept it (id: [OwnerId: 72057594046678944, LocalPathId: 3], type: EPathTypeSysView, state: EPathStateNoChanges), at schemeshard: 72057594046678944 2025-06-25T15:01:22.860272Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 103, response: Status: StatusSchemeError Reason: "Check failed: path: \'/MyRoot/.sys/new_sys_view\', error: path exist, request doesn\'t accept it (id: [OwnerId: 72057594046678944, LocalPathId: 3], type: EPathTypeSysView, state: EPathStateNoChanges)" TxId: 103 SchemeshardId: 72057594046678944 PathId: 3 PathCreateTxId: 102, at schemeshard: 72057594046678944 2025-06-25T15:01:22.860567Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 103, database: /MyRoot, subject: , status: StatusSchemeError, reason: Check failed: path: '/MyRoot/.sys/new_sys_view', error: path exist, request doesn't accept it (id: [OwnerId: 72057594046678944, LocalPathId: 3], type: EPathTypeSysView, state: EPathStateNoChanges), operation: CREATE SYSTEM VIEW, path: /MyRoot/.sys/new_sys_view TestModificationResult got TxId: 103, wait until txId: 103 TestWaitNotification wait txId: 103 2025-06-25T15:01:22.860874Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 103: send EvNotifyTxCompletion 2025-06-25T15:01:22.860916Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 103 2025-06-25T15:01:22.861337Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 103, at schemeshard: 72057594046678944 2025-06-25T15:01:22.861439Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-06-25T15:01:22.861475Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [1:341:2330] TestWaitNotification: OK eventTxId 103 2025-06-25T15:01:22.861936Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/.sys/new_sys_view" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T15:01:22.862155Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/.sys/new_sys_view" took 202us result status StatusSuccess 2025-06-25T15:01:22.862713Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/.sys/new_sys_view" PathDescription { Self { Name: "new_sys_view" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeSysView CreateFinished: true CreateTxId: 102 CreateStep: 5000003 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 SysViewVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } SysViewDescription { Name: "new_sys_view" Type: EPartitionStats SourceObject { OwnerId: 72057594046678944 LocalId: 1 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_sysview/unittest >> TSchemeShardSysViewTest::EmptyName [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T15:01:22.435841Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T15:01:22.435940Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T15:01:22.435985Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T15:01:22.436027Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T15:01:22.441842Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T15:01:22.441921Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T15:01:22.442021Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T15:01:22.442111Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T15:01:22.442864Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T15:01:22.444216Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T15:01:22.536836Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:01:22.536917Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:01:22.559699Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T15:01:22.560114Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T15:01:22.560291Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T15:01:22.566817Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T15:01:22.567136Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T15:01:22.567837Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T15:01:22.568100Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T15:01:22.581963Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T15:01:22.582162Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T15:01:22.586604Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T15:01:22.586674Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T15:01:22.586805Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T15:01:22.586851Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T15:01:22.586914Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T15:01:22.586997Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T15:01:22.594713Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T15:01:22.731187Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T15:01:22.731407Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:01:22.731594Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T15:01:22.731636Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T15:01:22.731866Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T15:01:22.731938Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:01:22.739227Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T15:01:22.739433Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T15:01:22.739615Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:01:22.739679Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T15:01:22.739724Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T15:01:22.739797Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T15:01:22.743001Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:01:22.743066Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T15:01:22.743117Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T15:01:22.744526Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:01:22.744564Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:01:22.744613Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T15:01:22.744662Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T15:01:22.747415Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T15:01:22.749211Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T15:01:22.750650Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T15:01:22.751573Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T15:01:22.751693Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T15:01:22.751741Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T15:01:22.753251Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T15:01:22.753316Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T15:01:22.753554Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T15:01:22.753634Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T15:01:22.755893Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T15:01:22.755931Z node 1 :FLAT_TX_SCHEMESHARD ... Add transaction: 101 at step: 5000002 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000001 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 101 at step: 5000002 2025-06-25T15:01:22.811426Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000002, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T15:01:22.811555Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 101 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000002 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T15:01:22.811621Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_mkdir.cpp:33: MkDir::TPropose operationId# 101:0 HandleReply TEvPrivate::TEvOperationPlan, step: 5000002, at schemeshard: 72057594046678944 2025-06-25T15:01:22.811738Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 101:0 128 -> 240 2025-06-25T15:01:22.811926Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T15:01:22.811989Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-25T15:01:22.813812Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-25T15:01:22.814754Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-25T15:01:22.815299Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T15:01:22.815347Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 101, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T15:01:22.815515Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 101, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-25T15:01:22.815654Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T15:01:22.815692Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 101, path id: 1 2025-06-25T15:01:22.815731Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 101, path id: 2 FAKE_COORDINATOR: Erasing txId 101 2025-06-25T15:01:22.815994Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T15:01:22.816040Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 101:0 ProgressState 2025-06-25T15:01:22.816130Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#101:0 progress is 1/1 2025-06-25T15:01:22.816167Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-25T15:01:22.816261Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#101:0 progress is 1/1 2025-06-25T15:01:22.816300Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-25T15:01:22.816354Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 101, ready parts: 1/1, is published: false 2025-06-25T15:01:22.816394Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-25T15:01:22.816445Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 101:0 2025-06-25T15:01:22.816480Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 101:0 2025-06-25T15:01:22.816540Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-25T15:01:22.816574Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 101, publications: 2, subscribers: 0 2025-06-25T15:01:22.816638Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 1], 5 2025-06-25T15:01:22.816672Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 2], 3 2025-06-25T15:01:22.817290Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 101 2025-06-25T15:01:22.817376Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 101 2025-06-25T15:01:22.817418Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 101 2025-06-25T15:01:22.817467Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 5 2025-06-25T15:01:22.817512Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-25T15:01:22.818108Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 101 2025-06-25T15:01:22.818202Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 101 2025-06-25T15:01:22.818234Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 101 2025-06-25T15:01:22.818261Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 3 2025-06-25T15:01:22.818293Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-25T15:01:22.818353Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 101, subscribers: 0 2025-06-25T15:01:22.822116Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-25T15:01:22.822548Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 101 2025-06-25T15:01:22.823187Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2025-06-25T15:01:22.823241Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 2025-06-25T15:01:22.823688Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-06-25T15:01:22.823778Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-25T15:01:22.823814Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:310:2299] TestWaitNotification: OK eventTxId 101 TestModificationResults wait txId: 102 2025-06-25T15:01:22.827721Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/.sys" OperationType: ESchemeOpCreateSysView CreateSysView { Name: "" Type: EPartitionStats } } TxId: 102 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T15:01:22.827905Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_sysview.cpp:116: [72057594046678944] TCreateSysView Propose, path: /MyRoot/.sys/, opId: 102:0 2025-06-25T15:01:22.827976Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_sysview.cpp:122: [72057594046678944] TCreateSysView Propose, path: /MyRoot/.sys/, opId: 102:0, sysViewDescription: Name: "" Type: EPartitionStats 2025-06-25T15:01:22.828093Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 102:1, propose status:StatusSchemeError, reason: Check failed: path: '/MyRoot/.sys/', error: path part shouldn't be empty, at schemeshard: 72057594046678944 2025-06-25T15:01:22.831092Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 102, response: Status: StatusSchemeError Reason: "Check failed: path: \'/MyRoot/.sys/\', error: path part shouldn\'t be empty" TxId: 102 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T15:01:22.831341Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 102, database: /MyRoot, subject: , status: StatusSchemeError, reason: Check failed: path: '/MyRoot/.sys/', error: path part shouldn't be empty, operation: CREATE SYSTEM VIEW, path: /MyRoot/.sys/ TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 102 2025-06-25T15:01:22.831629Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-06-25T15:01:22.831669Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 2025-06-25T15:01:22.832008Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-06-25T15:01:22.832101Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-25T15:01:22.832164Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:317:2306] TestWaitNotification: OK eventTxId 102 >> KqpParams::CheckCacheByAst [GOOD] >> KqpParams::CheckCacheWithRecompilationQuery >> KqpLimits::ReadsetCountLimit [GOOD] >> KqpLimits::QueryExecTimeoutCancel >> BackupRestore::TestAllPrimitiveTypes-DYNUMBER [GOOD] >> TSchemeShardSysViewTest::ReadOnlyMode [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpQuery::QueryCancelWriteImmediate [GOOD] Test command err: Trying to start YDB, gRPC: 22682, MsgBus: 5847 2025-06-25T15:00:51.173800Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519902123706650157:2226];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:00:51.174926Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001450/r3tmp/tmpo2W00b/pdisk_1.dat 2025-06-25T15:00:51.606714Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519902123706649962:2080] 1750863651165784 != 1750863651165787 2025-06-25T15:00:51.619953Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:00:51.628827Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:00:51.628933Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:00:51.637660Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 22682, node 1 2025-06-25T15:00:51.828763Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:00:51.828787Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:00:51.828810Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:00:51.828902Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T15:00:52.175035Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:5847 TClient is connected to server localhost:5847 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:00:52.650926Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:00:52.669162Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T15:00:52.680651Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:00:52.870234Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:00:53.016678Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:00:53.082579Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:00:54.506158Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902136591553486:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:00:54.506243Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:00:54.866901Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:54.899548Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:54.928250Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:54.964289Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:55.017556Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:55.096259Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:55.132413Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:55.225167Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902140886521447:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:00:55.225246Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:00:55.225584Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902140886521452:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:00:55.228648Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:00:55.236247Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519902140886521456:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T15:00:55.332234Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519902140886521507:3424] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:00:56.176403Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519902123706650157:2226];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:00:56.176473Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 5914, MsgBus: 11655 2025-06-25T15:01:01.664167Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519902164567336953:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:01:01.664209Z node 2 :METADATA_PROVIDER ERROR: log.cpp:78 ... sts.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519902197769578536:2232];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:01:13.980752Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 2019, MsgBus: 15893 2025-06-25T15:01:15.886068Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519902227307844657:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:01:15.886143Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001450/r3tmp/tmpyw0M4c/pdisk_1.dat 2025-06-25T15:01:16.153725Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:01:16.154951Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7519902227307844639:2080] 1750863675885200 != 1750863675885203 2025-06-25T15:01:16.181254Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:01:16.181350Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:01:16.183185Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 2019, node 4 2025-06-25T15:01:16.368995Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:01:16.369020Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:01:16.369030Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:01:16.369162Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:15893 2025-06-25T15:01:16.913431Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:15893 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:01:17.082332Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:01:17.092991Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T15:01:17.126452Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:17.227418Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:17.458050Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:17.547957Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:19.982066Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519902244487715455:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:19.982185Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:20.064891Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:20.113010Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:20.190192Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:20.228485Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:20.303024Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:20.384021Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:20.461394Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:20.554815Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519902248782683423:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:20.554905Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:20.555008Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519902248782683428:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:20.558956Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:01:20.570085Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519902248782683430:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T15:01:20.666450Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519902248782683481:3424] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:01:20.887542Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519902227307844657:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:01:20.887607Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpExplain::CreateTableAs-Stats [GOOD] Test command err: Trying to start YDB, gRPC: 21267, MsgBus: 2594 2025-06-25T15:00:51.188439Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519902121649807359:2219];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:00:51.192229Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00143e/r3tmp/tmpf9iVrq/pdisk_1.dat 2025-06-25T15:00:51.613223Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:00:51.669890Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:00:51.670013Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 21267, node 1 2025-06-25T15:00:51.677253Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:00:51.822540Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:00:51.822559Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:00:51.822571Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:00:51.822654Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T15:00:52.180445Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:2594 TClient is connected to server localhost:2594 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:00:52.721235Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:00:52.744989Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T15:00:52.754348Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:00:52.886012Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:00:53.050749Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:00:53.127380Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:00:54.584339Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902134534710703:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:00:54.584455Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:00:54.866854Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:54.907917Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:54.939960Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:54.985237Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:55.024333Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:55.079154Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:55.149579Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:55.210945Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902138829678659:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:00:55.211026Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:00:55.211239Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902138829678664:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:00:55.214795Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:00:55.225858Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519902138829678666:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T15:00:55.306411Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519902138829678719:3424] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:00:56.181136Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519902121649807359:2219];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:00:56.181193Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; {"Plan":{"Plans":[{"PlanNodeId":9,"Plans":[{"PlanNodeId":8,"Operators":[{"Inputs":[],"Iterator":"precompute_0_0","Name":"Iterator"}],"Node Type":"ConstantExpr","CTE Name":"precompute_0_0"}],"Node Type":"ResultSet_1","PlanNodeType":"ResultSet"},{"PlanNodeId":6,"Subplan Name":"CTE precompute_0_0","Plans":[{"PlanNodeId":5,"Plans":[{"PlanNodeId":4,"Plans":[{"Tables":["EightShard"],"PlanNodeId":3,"Plans":[{"PlanNodeId":2,"Plans":[{"Tables":["KeyValue"],"PlanNodeId":1,"Operators":[{"Scan":"Parallel","E-Size":"0","ReadRanges":["Key (-∞, +∞)" ... ce;path=//Root/.metadata/initialization/migrations;error=timeout; PLAN::{"Plan":{"Plans":[{"Tables":["test\/test2\/Destination3"],"PlanNodeId":5,"Plans":[{"PlanNodeId":4,"Plans":[{"PlanNodeId":3,"Plans":[{"PlanNodeId":2,"Plans":[{"Tables":["Source"],"PlanNodeId":1,"Operators":[{"Scan":"Parallel","E-Size":"0","ReadRanges":["Col1 (-∞, +∞)"],"Name":"TableFullScan","Inputs":[],"Path":"\/Root\/Source","ReadRangesPointPrefixLen":"0","E-Rows":"0","Table":"Source","ReadColumns":["Col1","Col2"],"E-Cost":"0"}],"Node Type":"TableFullScan"}],"Node Type":"Stage"}],"Node Type":"Map","PlanNodeType":"Connection"}],"Node Type":"Stage"}],"Operators":[{"Inputs":[],"Path":"\/Root\/test\/test2\/Destination3","Name":"FillTable","Table":"test\/test2\/Destination3","SinkType":"KqpTableSink"}],"Node Type":"Sink"}],"Node Type":"Query","Stats":{"ResourcePoolId":"default"},"PlanNodeType":"Query"},"meta":{"version":"0.2","type":"query"},"tables":[{"name":"\/Root\/Source","reads":[{"columns":["Col1","Col2"],"scan_by":["Col1 (-∞, +∞)"],"type":"FullScan"}]},{"name":"\/Root\/test\/test2\/Destination3","writes":[{"columns":["Col1","Col2"],"type":"MultiReplace"}]}],"SimplifiedPlan":{"PlanNodeId":0,"Plans":[{"PlanNodeId":1,"Operators":[{"Path":"\/Root\/test\/test2\/Destination3","Name":"FillTable","Table":"test\/test2\/Destination3","SinkType":"KqpTableSink"}],"Node Type":"FillTable"}],"Node Type":"Query","OptimizerStats":{"EquiJoinsCount":0,"JoinsCount":0},"PlanNodeType":"Query"}} Trying to start YDB, gRPC: 19607, MsgBus: 29620 2025-06-25T15:01:17.741087Z node 5 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[5:7519902232809332689:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:01:17.741148Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00143e/r3tmp/tmph6wZ0E/pdisk_1.dat 2025-06-25T15:01:17.898583Z node 5 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:01:17.905131Z node 5 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [5:7519902232809332666:2080] 1750863677723628 != 1750863677723631 2025-06-25T15:01:17.917639Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:01:17.917733Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:01:17.920564Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 19607, node 5 2025-06-25T15:01:18.029096Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:01:18.029127Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:01:18.029138Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:01:18.029282Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:29620 TClient is connected to server localhost:29620 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:01:18.725885Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:01:18.749800Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T15:01:18.756899Z node 5 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:01:21.569545Z node 5 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [5:7519902249989202461:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:21.569768Z node 5 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:21.570184Z node 5 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [5:7519902249989202496:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:21.574908Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:01:21.587869Z node 5 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [5:7519902249989202498:2295], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T15:01:21.651411Z node 5 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [5:7519902249989202550:2333] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:01:21.683860Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) PLAN::{"Plan":{"Plans":[{"Tables":["Destination"],"PlanNodeId":5,"Plans":[{"PlanNodeId":4,"Plans":[{"PlanNodeId":3,"Plans":[{"PlanNodeId":2,"Plans":[{"Tables":["Source"],"PlanNodeId":1,"Operators":[{"Scan":"Parallel","E-Size":"0","ReadRanges":["Col1 (-∞, +∞)"],"Name":"TableFullScan","Inputs":[],"Path":"\/Root\/Source","ReadRangesPointPrefixLen":"0","E-Rows":"0","Table":"Source","ReadColumns":["Col1","Col2"],"E-Cost":"0"}],"Node Type":"TableFullScan"}],"Node Type":"Stage"}],"Node Type":"Map","PlanNodeType":"Connection"}],"Node Type":"Stage"}],"Operators":[{"Inputs":[],"Path":"\/Root\/Destination","Name":"FillTable","Table":"Destination","SinkType":"KqpTableSink"}],"Node Type":"Sink"}],"Node Type":"Query","Stats":{"ResourcePoolId":"default"},"PlanNodeType":"Query"},"meta":{"version":"0.2","type":"query"},"tables":[{"name":"\/Root\/Destination","writes":[{"columns":["Col1","Col2"],"type":"MultiReplace"}]},{"name":"\/Root\/Source","reads":[{"columns":["Col1","Col2"],"scan_by":["Col1 (-∞, +∞)"],"type":"FullScan"}]}],"SimplifiedPlan":{"PlanNodeId":0,"Plans":[{"PlanNodeId":1,"Operators":[{"Path":"\/Root\/Destination","Name":"FillTable","Table":"Destination","SinkType":"KqpTableSink"}],"Node Type":"FillTable"}],"Node Type":"Query","OptimizerStats":{"EquiJoinsCount":0,"JoinsCount":0},"PlanNodeType":"Query"}} PLAN::{"Plan":{"Plans":[{"Tables":["test\/Destination2"],"PlanNodeId":5,"Plans":[{"PlanNodeId":4,"Plans":[{"PlanNodeId":3,"Plans":[{"PlanNodeId":2,"Plans":[{"Tables":["Source"],"PlanNodeId":1,"Operators":[{"Scan":"Parallel","E-Size":"0","ReadRanges":["Col1 (-∞, +∞)"],"Name":"TableFullScan","Inputs":[],"Path":"\/Root\/Source","ReadRangesPointPrefixLen":"0","E-Rows":"0","Table":"Source","ReadColumns":["Col1","Col2"],"E-Cost":"0"}],"Node Type":"TableFullScan"}],"Node Type":"Stage"}],"Node Type":"Map","PlanNodeType":"Connection"}],"Node Type":"Stage"}],"Operators":[{"Inputs":[],"Path":"\/Root\/test\/Destination2","Name":"FillTable","Table":"test\/Destination2","SinkType":"KqpTableSink"}],"Node Type":"Sink"}],"Node Type":"Query","Stats":{"ResourcePoolId":"default"},"PlanNodeType":"Query"},"meta":{"version":"0.2","type":"query"},"tables":[{"name":"\/Root\/Source","reads":[{"columns":["Col1","Col2"],"scan_by":["Col1 (-∞, +∞)"],"type":"FullScan"}]},{"name":"\/Root\/test\/Destination2","writes":[{"columns":["Col1","Col2"],"type":"MultiReplace"}]}],"SimplifiedPlan":{"PlanNodeId":0,"Plans":[{"PlanNodeId":1,"Operators":[{"Path":"\/Root\/test\/Destination2","Name":"FillTable","Table":"test\/Destination2","SinkType":"KqpTableSink"}],"Node Type":"FillTable"}],"Node Type":"Query","OptimizerStats":{"EquiJoinsCount":0,"JoinsCount":0},"PlanNodeType":"Query"}} PLAN::{"Plan":{"Plans":[{"Tables":["test\/test2\/Destination3"],"PlanNodeId":5,"Plans":[{"PlanNodeId":4,"Plans":[{"PlanNodeId":3,"Plans":[{"PlanNodeId":2,"Plans":[{"Tables":["Source"],"PlanNodeId":1,"Operators":[{"Scan":"Parallel","E-Size":"0","ReadRanges":["Col1 (-∞, +∞)"],"Name":"TableFullScan","Inputs":[],"Path":"\/Root\/Source","ReadRangesPointPrefixLen":"0","E-Rows":"0","Table":"Source","ReadColumns":["Col1","Col2"],"E-Cost":"0"}],"Node Type":"TableFullScan"}],"Node Type":"Stage"}],"Node Type":"Map","PlanNodeType":"Connection"}],"Node Type":"Stage"}],"Operators":[{"Inputs":[],"Path":"\/Root\/test\/test2\/Destination3","Name":"FillTable","Table":"test\/test2\/Destination3","SinkType":"KqpTableSink"}],"Node Type":"Sink"}],"Node Type":"Query","Stats":{"ResourcePoolId":"default"},"PlanNodeType":"Query"},"meta":{"version":"0.2","type":"query"},"tables":[{"name":"\/Root\/Source","reads":[{"columns":["Col1","Col2"],"scan_by":["Col1 (-∞, +∞)"],"type":"FullScan"}]},{"name":"\/Root\/test\/test2\/Destination3","writes":[{"columns":["Col1","Col2"],"type":"MultiReplace"}]}],"SimplifiedPlan":{"PlanNodeId":0,"Plans":[{"PlanNodeId":1,"Operators":[{"Path":"\/Root\/test\/test2\/Destination3","Name":"FillTable","Table":"test\/test2\/Destination3","SinkType":"KqpTableSink"}],"Node Type":"FillTable"}],"Node Type":"Query","OptimizerStats":{"EquiJoinsCount":0,"JoinsCount":0},"PlanNodeType":"Query"}} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpQuery::CreateAsSelectView [GOOD] Test command err: Trying to start YDB, gRPC: 3205, MsgBus: 28854 2025-06-25T15:00:51.729312Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519902124263329995:2236];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:00:51.729357Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001430/r3tmp/tmpoxPUzt/pdisk_1.dat 2025-06-25T15:00:52.102200Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 3205, node 1 2025-06-25T15:00:52.141055Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:00:52.141177Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:00:52.142855Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:00:52.200278Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:00:52.200300Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:00:52.200305Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:00:52.200437Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28854 2025-06-25T15:00:52.716496Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:28854 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:00:52.991975Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:00:53.011949Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T15:00:53.017710Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:00:53.218346Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:00:53.400550Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:00:53.486377Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:00:55.192287Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902141443200610:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:00:55.192451Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:00:55.480481Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:55.548461Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:55.582641Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:55.609096Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:55.677369Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:55.752402Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:55.825774Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:55.879670Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902141443201281:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:00:55.879794Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:00:55.880448Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902141443201286:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:00:55.883588Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:00:55.893167Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519902141443201288:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T15:00:55.953037Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519902141443201339:3424] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:00:56.729519Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519902124263329995:2236];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:00:56.729568Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 11345, MsgBus: 17245 2025-06-25T15:00:57.694152Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519902147362890627:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:00:57.694776Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runne ... d=72075186224037906;self_id=[3:7519902194928714243:2647];ev=NKikimr::TEvTablet::TEvTabletDead;fline=columnshard_impl.cpp:864;event=tablet_die; 2025-06-25T15:01:12.271509Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037963;self_id=[3:7519902194928713885:2604];ev=NKikimr::TEvTablet::TEvTabletDead;fline=columnshard_impl.cpp:864;event=tablet_die; 2025-06-25T15:01:12.273231Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037942;self_id=[3:7519902194928713935:2632];ev=NKikimr::TEvTablet::TEvTabletDead;fline=columnshard_impl.cpp:864;event=tablet_die; 2025-06-25T15:01:12.274079Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037944;self_id=[3:7519902194928713923:2622];ev=NKikimr::TEvTablet::TEvTabletDead;fline=columnshard_impl.cpp:864;event=tablet_die; 2025-06-25T15:01:12.361813Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519902212108586298:5214] txid# 281474976715685, issues: { message: "Check failed: path: \'/Root/.tmp/sessions\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeDir, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:01:12.374078Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715687:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:12.521168Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519902212108586442:5304] txid# 281474976715689, issues: { message: "Check failed: path: \'/Root/RowSrc\', error: path exist, request doesn\'t accept it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeTable, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:01:12.521569Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=3&id=MTRjZTViYTItYjljMjY0ZDItOTYzZTg1MmItMTJlNDQyMg==, ActorId: [3:7519902212108586274:3275], ActorState: ExecuteState, TraceId: 01jykspx011ynkn21xekvhdxfj, Create QueryResponse for error on request, msg: 2025-06-25T15:01:12.750556Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519902212108586519:5337] txid# 281474976715691, issues: { message: "Check failed: path: \'/Root/.tmp/sessions\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeDir, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:01:12.760682Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715693:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:13.769188Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519902216403554177:5495] txid# 281474976715697, issues: { message: "Check failed: path: \'/Root/.tmp/sessions\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeDir, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:01:13.781667Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715699:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) Trying to start YDB, gRPC: 63607, MsgBus: 13853 2025-06-25T15:01:15.518424Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519902225868889613:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:01:15.518556Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001430/r3tmp/tmpmixXwX/pdisk_1.dat 2025-06-25T15:01:15.740991Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 63607, node 4 2025-06-25T15:01:15.818018Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:01:15.818048Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:01:15.818058Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:01:15.818196Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T15:01:15.851522Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:01:15.851645Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:01:15.856255Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:13853 TClient is connected to server localhost:13853 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:01:16.420579Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:01:16.427592Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T15:01:16.505448Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:01:19.672734Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519902243048759379:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:19.672884Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:19.676800Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519902243048759399:2295], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:19.685758Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:01:19.705635Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519902243048759401:2296], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T15:01:19.767248Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519902243048759452:2334] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:01:19.820935Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:19.908432Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:20.327482Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:20.518591Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519902225868889613:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:01:20.518675Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T15:01:20.848568Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519902247343727392:2705] txid# 281474976715671, issues: { message: "Check failed: path: \'/Root/.tmp/sessions\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 11], type: EPathTypeDir, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:01:20.858852Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) >> KqpStats::StreamLookupStats-StreamLookupJoin [GOOD] >> KqpStats::SysViewClientLost >> BackupRestoreS3::TestAllPrimitiveTypes-DATETIME64 [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-DYNUMBER ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_sysview/unittest >> TSchemeShardSysViewTest::ReadOnlyMode [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T15:01:23.232886Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T15:01:23.232978Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T15:01:23.233020Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T15:01:23.233060Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T15:01:23.233102Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T15:01:23.233135Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T15:01:23.233189Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T15:01:23.233264Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T15:01:23.234065Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T15:01:23.234418Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T15:01:23.313682Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:01:23.313769Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:01:23.341469Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T15:01:23.341952Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T15:01:23.342126Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T15:01:23.353867Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T15:01:23.354221Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T15:01:23.354896Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T15:01:23.355154Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T15:01:23.358726Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T15:01:23.358905Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T15:01:23.360115Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T15:01:23.360175Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T15:01:23.360334Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T15:01:23.360386Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T15:01:23.360429Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T15:01:23.360516Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T15:01:23.367550Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T15:01:23.500189Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T15:01:23.500527Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:01:23.500815Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T15:01:23.500867Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T15:01:23.501154Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T15:01:23.501239Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:01:23.503636Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T15:01:23.503855Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T15:01:23.504050Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:01:23.504113Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T15:01:23.504160Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T15:01:23.504230Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T15:01:23.506329Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:01:23.506384Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T15:01:23.506434Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T15:01:23.508194Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:01:23.508239Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:01:23.508297Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T15:01:23.508385Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T15:01:23.512160Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T15:01:23.514284Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T15:01:23.514505Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T15:01:23.515423Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T15:01:23.515558Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T15:01:23.515604Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T15:01:23.515922Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T15:01:23.515978Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T15:01:23.516169Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T15:01:23.516246Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T15:01:23.518620Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T15:01:23.518690Z node 1 :FLAT_TX_SCHEMESHARD ... DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 103:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:103 msg type: 269090816 2025-06-25T15:01:23.998475Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 103, partId: 4294967295, tablet: 72057594046316545 2025-06-25T15:01:23.998867Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 FAKE_COORDINATOR: Add transaction: 103 at step: 5000003 FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000002 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 103 at step: 5000003 2025-06-25T15:01:23.999430Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000003, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T15:01:23.999531Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 103 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000003 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T15:01:23.999575Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_sysview.cpp:45: [72057594046678944] TCreateSysView::TPropose, opId: 103:0 HandleReply TEvPrivate::TEvOperationPlan, step: 5000003 2025-06-25T15:01:23.999722Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 103:0 128 -> 240 2025-06-25T15:01:23.999872Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-25T15:01:23.999930Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 FAKE_COORDINATOR: Erasing txId 103 2025-06-25T15:01:24.005858Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T15:01:24.005903Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 103, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-25T15:01:24.006045Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 103, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-25T15:01:24.006181Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T15:01:24.006225Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:440:2396], at schemeshard: 72057594046678944, txId: 103, path id: 2 2025-06-25T15:01:24.006258Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:440:2396], at schemeshard: 72057594046678944, txId: 103, path id: 3 2025-06-25T15:01:24.006642Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-25T15:01:24.006680Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 103:0 ProgressState 2025-06-25T15:01:24.006766Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#103:0 progress is 1/1 2025-06-25T15:01:24.006793Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-25T15:01:24.006829Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#103:0 progress is 1/1 2025-06-25T15:01:24.006856Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-25T15:01:24.006890Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 103, ready parts: 1/1, is published: false 2025-06-25T15:01:24.006932Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-25T15:01:24.006972Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 103:0 2025-06-25T15:01:24.006999Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 103:0 2025-06-25T15:01:24.007079Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-25T15:01:24.007120Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 103, publications: 2, subscribers: 0 2025-06-25T15:01:24.007148Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 103, [OwnerId: 72057594046678944, LocalPathId: 2], 4 2025-06-25T15:01:24.007172Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 103, [OwnerId: 72057594046678944, LocalPathId: 3], 2 2025-06-25T15:01:24.007750Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 2 Version: 4 PathOwnerId: 72057594046678944, cookie: 103 2025-06-25T15:01:24.007822Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 2 Version: 4 PathOwnerId: 72057594046678944, cookie: 103 2025-06-25T15:01:24.007854Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 103 2025-06-25T15:01:24.007892Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 4 2025-06-25T15:01:24.007929Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-25T15:01:24.008815Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 103 2025-06-25T15:01:24.008877Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 103 2025-06-25T15:01:24.008900Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 103 2025-06-25T15:01:24.008935Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 2 2025-06-25T15:01:24.008964Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-25T15:01:24.009026Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 103, subscribers: 0 2025-06-25T15:01:24.012883Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-25T15:01:24.017160Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 TestModificationResult got TxId: 103, wait until txId: 103 TestWaitNotification wait txId: 103 2025-06-25T15:01:24.017466Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 103: send EvNotifyTxCompletion 2025-06-25T15:01:24.017508Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 103 2025-06-25T15:01:24.017928Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 103, at schemeshard: 72057594046678944 2025-06-25T15:01:24.018030Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-06-25T15:01:24.018068Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [1:476:2429] TestWaitNotification: OK eventTxId 103 2025-06-25T15:01:24.018569Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/.sys/new_sys_view" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T15:01:24.018821Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/.sys/new_sys_view" took 226us result status StatusSuccess 2025-06-25T15:01:24.019189Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/.sys/new_sys_view" PathDescription { Self { Name: "new_sys_view" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeSysView CreateFinished: true CreateTxId: 103 CreateStep: 5000003 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 SysViewVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } SysViewDescription { Name: "new_sys_view" Type: EPartitionStats SourceObject { OwnerId: 72057594046678944 LocalId: 1 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 |91.9%| [TA] $(B)/ydb/core/tx/schemeshard/ut_sysview/test-results/unittest/{meta.json ... results_accumulator.log} |91.9%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_sysview/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/backup_ut/unittest >> BackupRestore::TestAllPrimitiveTypes-DYNUMBER [GOOD] Test command err: 2025-06-25T14:59:27.260893Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901761697243029:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:59:27.260955Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001493/r3tmp/tmp45otiw/pdisk_1.dat 2025-06-25T14:59:27.646198Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:59:27.662063Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:59:27.662134Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:59:27.675002Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 13992, node 1 2025-06-25T14:59:27.857055Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:59:27.857094Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:59:27.857104Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:59:27.857204Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:12000 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:59:28.260644Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:59:28.278969Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:59:29.758720Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901770287178598:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:59:29.758812Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:59:30.161331Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) Backup "/Root" to "/home/runner/.ya/build/build_root/yft8/001493/r3tmp/tmpKsk4V1/"Create temporary directory "/Root/~backup_20250625T145930" in databaseProcess "/home/runner/.ya/build/build_root/yft8/001493/r3tmp/tmpKsk4V1/table"Copy tables: { src: "/Root/table", dst: "/Root/~backup_20250625T145930/table" }Describe table "/Root/table"Describe table "/Root/~backup_20250625T145930/table"Backup table "/Root/~backup_20250625T145930/table" to "/home/runner/.ya/build/build_root/yft8/001493/r3tmp/tmpKsk4V1/table"Write scheme into "/home/runner/.ya/build/build_root/yft8/001493/r3tmp/tmpKsk4V1/table/scheme.pb"Write ACL into "/home/runner/.ya/build/build_root/yft8/001493/r3tmp/tmpKsk4V1/table/permissions.pb"Read table "/Root/~backup_20250625T145930/table"Write data into "/home/runner/.ya/build/build_root/yft8/001493/r3tmp/tmpKsk4V1/table/data_00.csv"Drop table "/Root/~backup_20250625T145930/table"2025-06-25T14:59:30.727072Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037892 not found 2025-06-25T14:59:30.727727Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037891 not found 2025-06-25T14:59:30.728128Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037893 not found Remove temporary directory "/Root/~backup_20250625T145930" in database2025-06-25T14:59:30.754951Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpRmDir, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_rmdir.cpp:66) Backup completed successfully2025-06-25T14:59:30.769873Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901774582146966:2364], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:59:30.769953Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:59:30.819884Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037889 not found 2025-06-25T14:59:30.819918Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037890 not found 2025-06-25T14:59:30.820847Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037888 not found Restore "/home/runner/.ya/build/build_root/yft8/001493/r3tmp/tmpKsk4V1/" to "/Root"Resolved db base path: "/Root"List of entries in the backup: [{"type":"Directory","path":"/home/runner/.ya/build/build_root/yft8/001493/r3tmp/tmpKsk4V1/"},{"type":"Table","path":"/home/runner/.ya/build/build_root/yft8/001493/r3tmp/tmpKsk4V1/table"}]Process "/home/runner/.ya/build/build_root/yft8/001493/r3tmp/tmpKsk4V1/table"Read scheme from "/home/runner/.ya/build/build_root/yft8/001493/r3tmp/tmpKsk4V1/table/scheme.pb"Restore table "/home/runner/.ya/build/build_root/yft8/001493/r3tmp/tmpKsk4V1/table" to "/Root/table"2025-06-25T14:59:30.879452Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) Created "/Root/table"Read data from "/home/runner/.ya/build/build_root/yft8/001493/r3tmp/tmpKsk4V1/table/data_00.csv"Restore index "byValue" on "/Root/table"2025-06-25T14:59:30.962363Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976715758:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:59:31.013175Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976715759:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:59:31.057818Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976715760:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:59:31.194766Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976715762:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:383) 2025-06-25T14:59:31.257498Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037897 not found 2025-06-25T14:59:31.260128Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037898 not found Restore ACL "/home/runner/.ya/build/build_root/yft8/001493/r3tmp/tmpKsk4V1/table" to "/Root/table"Read ACL from "/home/runner/.ya/build/build_root/yft8/001493/r3tmp/tmpKsk4V1/table/permissions.pb"2025-06-25T14:59:31.289546Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) Restore completed successfully 2025-06-25T14:59:32.543673Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519901785051437373:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:59:32.543739Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001493/r3tmp/tmpZ9CV2B/pdisk_1.dat ... er/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) Restore completed successfully2025-06-25T15:01:14.025912Z node 52 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710675. Ctx: { TraceId: 01jykspygd8gbdmv6bz53vwgn2, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=52&id=N2RkNTA2YzctMTQ2MjUyMjYtMjNhNGFlOTctMzFkNTEyMzU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:01:16.024366Z node 55 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[55:7519902227528605022:2205];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:01:16.073210Z node 55 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001493/r3tmp/tmpEdk740/pdisk_1.dat 2025-06-25T15:01:16.310681Z node 55 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(55, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:01:16.310777Z node 55 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(55, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:01:16.317318Z node 55 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:01:16.343502Z node 55 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(55, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 23550, node 55 2025-06-25T15:01:16.565332Z node 55 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:01:16.565355Z node 55 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:01:16.565365Z node 55 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:01:16.565520Z node 55 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:21042 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:01:16.810700Z node 55 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:01:16.984470Z node 55 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:01:20.461678Z node 55 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [55:7519902249003442390:2303], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:20.461717Z node 55 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [55:7519902249003442382:2300], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:20.461804Z node 55 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:20.469038Z node 55 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:01:20.493349Z node 55 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [55:7519902249003442396:2304], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T15:01:20.549823Z node 55 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [55:7519902249003442477:2674] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:01:20.573143Z node 55 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:20.774668Z node 55 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710661. Ctx: { TraceId: 01jyksq55z1eqyw3s209r0k612, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=55&id=ZDVjYjA5ZC0yYjA0NDRiOC05Yzg3OGVlNi05MTc4NGRkMg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:01:20.931257Z node 55 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710662. Ctx: { TraceId: 01jyksq58s4ay0b8zpmqfnsrge, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=55&id=ZDVjYjA5ZC0yYjA0NDRiOC05Yzg3OGVlNi05MTc4NGRkMg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root Backup "/Root" to "/home/runner/.ya/build/build_root/yft8/001493/r3tmp/tmp1rfQFv/"Create temporary directory "/Root/~backup_20250625T150120" in database2025-06-25T15:01:20.988773Z node 55 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[55:7519902227528605022:2205];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:01:20.988834Z node 55 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Process "/home/runner/.ya/build/build_root/yft8/001493/r3tmp/tmp1rfQFv/DyNumberTable"Copy tables: { src: "/Root/DyNumberTable", dst: "/Root/~backup_20250625T150120/DyNumberTable" }Describe table "/Root/DyNumberTable"Describe table "/Root/~backup_20250625T150120/DyNumberTable"Backup table "/Root/~backup_20250625T150120/DyNumberTable" to "/home/runner/.ya/build/build_root/yft8/001493/r3tmp/tmp1rfQFv/DyNumberTable"Write scheme into "/home/runner/.ya/build/build_root/yft8/001493/r3tmp/tmp1rfQFv/DyNumberTable/scheme.pb"Write ACL into "/home/runner/.ya/build/build_root/yft8/001493/r3tmp/tmp1rfQFv/DyNumberTable/permissions.pb"Read table "/Root/~backup_20250625T150120/DyNumberTable"Write data into "/home/runner/.ya/build/build_root/yft8/001493/r3tmp/tmp1rfQFv/DyNumberTable/data_00.csv"Drop table "/Root/~backup_20250625T150120/DyNumberTable"Remove temporary directory "/Root/~backup_20250625T150120" in database2025-06-25T15:01:21.713369Z node 55 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 55, TabletId: 72075186224037889 not found 2025-06-25T15:01:21.754101Z node 55 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpRmDir, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_rmdir.cpp:66) Backup completed successfullyRestore "/home/runner/.ya/build/build_root/yft8/001493/r3tmp/tmp1rfQFv/" to "/Root"2025-06-25T15:01:21.905153Z node 55 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 55, TabletId: 72075186224037888 not found Resolved db base path: "/Root"List of entries in the backup: [{"type":"Directory","path":"/home/runner/.ya/build/build_root/yft8/001493/r3tmp/tmp1rfQFv/"},{"type":"Table","path":"/home/runner/.ya/build/build_root/yft8/001493/r3tmp/tmp1rfQFv/DyNumberTable"}]Process "/home/runner/.ya/build/build_root/yft8/001493/r3tmp/tmp1rfQFv/DyNumberTable"Read scheme from "/home/runner/.ya/build/build_root/yft8/001493/r3tmp/tmp1rfQFv/DyNumberTable/scheme.pb"Restore table "/home/runner/.ya/build/build_root/yft8/001493/r3tmp/tmp1rfQFv/DyNumberTable" to "/Root/DyNumberTable"2025-06-25T15:01:21.980518Z node 55 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) Created "/Root/DyNumberTable"Read data from "/home/runner/.ya/build/build_root/yft8/001493/r3tmp/tmp1rfQFv/DyNumberTable/data_00.csv"2025-06-25T15:01:22.193251Z node 55 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710671. Ctx: { TraceId: 01jyksq6h581nmr66qrcmgqjqn, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=55&id=YTY2N2EzNGUtNzc1NDZmYjQtODBlYmJlZjctOTM5MmU3NDM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root Restore ACL "/home/runner/.ya/build/build_root/yft8/001493/r3tmp/tmp1rfQFv/DyNumberTable" to "/Root/DyNumberTable"Read ACL from "/home/runner/.ya/build/build_root/yft8/001493/r3tmp/tmp1rfQFv/DyNumberTable/permissions.pb"2025-06-25T15:01:22.266470Z node 55 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) Restore completed successfully2025-06-25T15:01:22.532804Z node 55 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710673. Ctx: { TraceId: 01jyksq6qj11987pk6jcbqajf4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=55&id=ZDVjYjA5ZC0yYjA0NDRiOC05Yzg3OGVlNi05MTc4NGRkMg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root >> KqpStats::OneShardNonLocalExec-UseSink [GOOD] >> KqpQuery::TableSink_ReplaceDataShardDataQuery+UseSink [GOOD] >> KqpQuery::TableSink_ReplaceDataShardDataQuery-UseSink >> KqpParams::Decimal+QueryService-UseSink [GOOD] >> KqpParams::Decimal-QueryService+UseSink >> KqpExplain::FewEffects-UseSink [GOOD] >> KqpExplain::FullOuterJoin >> KqpExplain::SsaProgramInJsonPlan [GOOD] >> KqpLimits::ManyPartitions [GOOD] >> KqpLimits::ManyPartitionsSorting >> KqpQuery::OltpCreateAsSelect_Simple [GOOD] >> KqpQuery::OltpCreateAsSelect_Disable ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpStats::OneShardNonLocalExec-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 25320, MsgBus: 30010 2025-06-25T15:00:51.877312Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519902124210226361:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:00:51.877382Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001427/r3tmp/tmpQEIShl/pdisk_1.dat 2025-06-25T15:00:52.294531Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519902124210226340:2080] 1750863651876439 != 1750863651876442 2025-06-25T15:00:52.314190Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:00:52.338217Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:00:52.338342Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 25320, node 1 2025-06-25T15:00:52.340744Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:00:52.418551Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:00:52.418571Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:00:52.418579Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:00:52.418697Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:30010 TClient is connected to server localhost:30010 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-25T15:00:52.877285Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:00:53.113608Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:00:53.141102Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T15:00:53.150339Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:00:53.314821Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:00:53.511125Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:00:53.586192Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:00:55.195241Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902141390097168:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:00:55.195366Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:00:55.510639Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:55.538585Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:55.569220Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:55.601144Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:55.642565Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:55.669898Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:55.699680Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:55.791130Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902141390097828:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:00:55.791195Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902141390097833:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:00:55.791214Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:00:55.794960Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:00:55.806027Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519902141390097835:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T15:00:55.861916Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519902141390097886:3418] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:00:56.880427Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519902124210226361:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:00:56.880521Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T15:00:56.880622Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519902145685065459:2478], status: GENERIC_ERROR, issues:
:2:12: Error: mismatched input 'INCORRECT_STMT' expecting {';', '(', '$', ALTER, ANALYZE, BACKUP, BATCH, COMMIT, CREATE, DECLARE, DEFINE, DELETE, DISCARD ... ations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001427/r3tmp/tmpA0SYVK/pdisk_1.dat 2025-06-25T15:01:14.367851Z node 5 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:01:14.410240Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:01:14.410308Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:01:14.411845Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:01:14.411905Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:01:14.415199Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:01:14.418542Z node 5 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 6 Cookie 6 2025-06-25T15:01:14.420686Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 14437, node 5 2025-06-25T15:01:14.544884Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:01:14.544909Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:01:14.544919Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:01:14.545050Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3753 2025-06-25T15:01:15.012913Z node 6 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:01:15.013322Z node 5 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:3753 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:01:15.288141Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:01:15.328558Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:15.464757Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:15.650251Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:15.795869Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:18.537405Z node 5 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [5:7519902240135932418:2337], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:18.537534Z node 5 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:18.562198Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:18.640754Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:18.741464Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:18.810162Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:18.906892Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:18.989402Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[5:7519902218661093881:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:01:18.989468Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T15:01:19.007316Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[6:7519902220327041648:2142];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:01:19.007391Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T15:01:19.006884Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:19.078096Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:19.194558Z node 5 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [5:7519902244430900656:2386], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:19.194676Z node 5 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:19.195003Z node 5 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [5:7519902244430900661:2389], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:19.198234Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:01:19.223936Z node 5 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [5:7519902244430900663:2390], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T15:01:19.297705Z node 5 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [5:7519902244430900733:4343] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } >> ApplyClusterEndpointTest::NoPorts [GOOD] >> ApplyClusterEndpointTest::PortFromCds [GOOD] >> ApplyClusterEndpointTest::PortFromDriver [GOOD] >> BasicUsage::MaxByteSizeEqualZero >> CompressExecutor::TestReorderedExecutor >> EncryptedBackupParamsValidationTest::NoItemDestination [GOOD] >> TColumnShardTestSchema::RebootColdTiers [GOOD] |91.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/cost/unittest |91.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/cost/unittest |91.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/cost/unittest |91.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/cost/unittest |91.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/cost/unittest |91.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/cost/unittest |91.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/cost/unittest |91.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/cost/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpExplain::SsaProgramInJsonPlan [GOOD] Test command err: Trying to start YDB, gRPC: 2528, MsgBus: 31868 2025-06-25T15:00:56.921063Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519902142641890535:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:00:56.921418Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00140e/r3tmp/tmpXB6CCX/pdisk_1.dat 2025-06-25T15:00:57.232479Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519902142641890515:2080] 1750863656920204 != 1750863656920207 2025-06-25T15:00:57.240842Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:00:57.242394Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:00:57.242457Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:00:57.249038Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 2528, node 1 2025-06-25T15:00:57.367431Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:00:57.367456Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:00:57.367463Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:00:57.367568Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:31868 TClient is connected to server localhost:31868 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-25T15:00:57.963533Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:00:58.027091Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:00:58.061224Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T15:00:58.079560Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:00:58.227200Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:00:58.378491Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:00:58.475731Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:00:59.947786Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902155526794047:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:00:59.947930Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:00.206416Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:00.242800Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:00.281166Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:00.313467Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:00.348263Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:00.407992Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:00.488115Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:00.570911Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902159821762011:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:00.571003Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:00.571209Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902159821762016:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:00.574910Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:01:00.590529Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519902159821762018:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T15:01:00.651763Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519902159821762069:3423] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } {"Plan":{"Plans":[{"PlanNodeId":4,"Plans":[{"PlanNodeId":3,"Plans":[{"PlanNodeId":2,"Plans":[{"Tables":["EightShard"],"PlanNodeId":1,"Operators":[{"Inputs":[{"InternalOperatorId":1}],"SortBy":"row.Text","Name":"Sort"},{"Scan":"Parallel","ReadRange":["Key [150, 266]"],"E-Size":"0","Name":"TableRangeScan","Inputs":[],"Path":"\/Root\/EightShard","E-Rows":"1","Table":"EightShard","ReadColumns":["Data","Key","Text"],"E-Cost":"0"}],"Node Type":"Sort-TableRangeScan"}],"Node Type":"Merge","SortColumns":["Text (Asc)"],"PlanNodeType":"Connection"}],"Node Type":"Stage"}],"Node Type":"ResultSet","PlanNodeType":"ResultSet"}],"Node Type":"Query","Stats":{"ResourcePoolId":"default"},"PlanNodeType":"Query"},"meta":{"version":"0.2","type":"query"},"tables":[{"name":"\ ... : tablet_id=72075186224037959;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-25T15:01:24.731634Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037977;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710672; 2025-06-25T15:01:24.732365Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037975;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-25T15:01:24.734723Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037959;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710672; 2025-06-25T15:01:24.735237Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037928;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-25T15:01:24.737997Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037975;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710672; 2025-06-25T15:01:24.738597Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037951;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-25T15:01:24.739503Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037928;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710672; 2025-06-25T15:01:24.739952Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037980;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-25T15:01:24.744642Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037980;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710672; 2025-06-25T15:01:24.745216Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037954;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-25T15:01:24.748688Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037951;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710672; 2025-06-25T15:01:24.749066Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037961;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-25T15:01:24.751081Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037954;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710672; 2025-06-25T15:01:24.751645Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037925;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-25T15:01:24.753756Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037961;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710672; 2025-06-25T15:01:24.754446Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037923;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-25T15:01:24.757958Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037923;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710672; 2025-06-25T15:01:24.758490Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037969;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-25T15:01:24.761854Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037969;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710672; 2025-06-25T15:01:24.762395Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037953;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-25T15:01:24.767487Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037953;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710672; 2025-06-25T15:01:24.768145Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037983;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-25T15:01:24.773691Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037983;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710672; 2025-06-25T15:01:24.774482Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037935;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-25T15:01:24.782886Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037935;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710672; 2025-06-25T15:01:24.783625Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037973;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-25T15:01:24.793193Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037973;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710672; 2025-06-25T15:01:24.793787Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037940;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-25T15:01:24.800250Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037925;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710672; 2025-06-25T15:01:24.800787Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037957;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-25T15:01:24.802752Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037940;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710672; 2025-06-25T15:01:24.803760Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037979;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-25T15:01:24.806754Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037957;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710672; 2025-06-25T15:01:24.807388Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037927;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-25T15:01:24.809005Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037979;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710672; 2025-06-25T15:01:24.809501Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037949;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-25T15:01:24.812274Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037927;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710672; 2025-06-25T15:01:24.814470Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037949;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710672; {"Plan":{"Plans":[{"PlanNodeId":4,"Plans":[{"PlanNodeId":3,"Plans":[{"PlanNodeId":2,"Plans":[{"Tables":["OlapTable"],"PlanNodeId":1,"Operators":[{"Inputs":[{"InternalOperatorId":1}],"E-Rows":"0","Predicate":"Value \u003E 0","Pushdown":"True","Name":"Filter","E-Size":"0","E-Cost":"0"},{"Scan":"Parallel","E-Size":"0","ReadRanges":["Key (-∞, +∞)"],"Name":"TableFullScan","Inputs":[],"Path":"\/Root\/OlapTable","E-Rows":"0","Table":"OlapTable","ReadColumns":["Key","Value"],"SsaProgram":{"Command":[{"Assign":{"Constant":{"Int32":0},"Column":{"Id":3}}},{"Assign":{"Function":{"YqlOperationId":15,"KernelIdx":0,"FunctionType":2,"Arguments":[{"Id":2},{"Id":3}]},"Column":{"Id":4}}},{"Assign":{"Constant":{"Uint8":0},"Column":{"Id":5}}},{"Assign":{"Function":{"YqlOperationId":17,"KernelIdx":1,"FunctionType":2,"Arguments":[{"Id":4},{"Id":5}]},"Column":{"Id":6}}},{"Filter":{"Predicate":{"Id":6}}},{"Projection":{"Columns":[{"Id":1},{"Id":2}]}}]},"E-Cost":"0"}],"Node Type":"Filter-TableFullScan"}],"Node Type":"UnionAll","PlanNodeType":"Connection"}],"Node Type":"Collect"}],"Node Type":"ResultSet","PlanNodeType":"ResultSet"}],"Node Type":"Query","Stats":{"ResourcePoolId":"default"},"PlanNodeType":"Query"},"meta":{"version":"0.2","type":"query"},"tables":[{"name":"\/Root\/OlapTable","reads":[{"columns":["Key","Value"],"scan_by":["Key (-∞, +∞)"],"type":"FullScan"}]}],"SimplifiedPlan":{"PlanNodeId":0,"Plans":[{"PlanNodeId":1,"Plans":[{"PlanNodeId":4,"Plans":[{"PlanNodeId":5,"Operators":[{"Scan":"Parallel","E-Size":"0","ReadRanges":["Key (-∞, +∞)"],"Name":"TableFullScan","Path":"\/Root\/OlapTable","E-Rows":"0","Table":"OlapTable","ReadColumns":["Key","Value"],"SsaProgram":{"Command":[{"Assign":{"Constant":{"Int32":0},"Column":{"Id":3}}},{"Assign":{"Function":{"YqlOperationId":15,"KernelIdx":0,"FunctionType":2,"Arguments":[{"Id":2},{"Id":3}]},"Column":{"Id":4}}},{"Assign":{"Constant":{"Uint8":0},"Column":{"Id":5}}},{"Assign":{"Function":{"YqlOperationId":17,"KernelIdx":1,"FunctionType":2,"Arguments":[{"Id":4},{"Id":5}]},"Column":{"Id":6}}},{"Filter":{"Predicate":{"Id":6}}},{"Projection":{"Columns":[{"Id":1},{"Id":2}]}}]},"E-Cost":"0"}],"Node Type":"TableFullScan"}],"Operators":[{"E-Rows":"0","Predicate":"Value \u003E 0","Pushdown":"True","Name":"Filter","E-Size":"0","E-Cost":"0"}],"Node Type":"Filter"}],"Node Type":"ResultSet","PlanNodeType":"ResultSet"}],"Node Type":"Query","OptimizerStats":{"EquiJoinsCount":0,"JoinsCount":0},"PlanNodeType":"Query"}} >> KqpCost::IndexLookupAndTake+useSink >> KqpCost::ScanScriptingRangeFullScan-SourceRead >> KqpCost::ScanScriptingRangeFullScan+SourceRead >> KqpCost::IndexLookup-useSink >> KqpCost::IndexLookup+useSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest >> TColumnShardTestSchema::RebootColdTiers [GOOD] Test command err: FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; WaitEmptyAfter=0;Tiers=;TTL={Column=timestamp;EvictAfter=0.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=150864223.000000s;Name=tier0;Codec=};}{{Column=timestamp;EvictAfter=150864223.000000s;Name=tier1;Codec=};};TTL={Column=timestamp;EvictAfter=150864223.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=130864223.000000s;Name=tier0;Codec=};}{{Column=timestamp;EvictAfter=150864223.000000s;Name=tier1;Codec=};};TTL={Column=timestamp;EvictAfter=150864223.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=130863023.000000s;Name=tier0;Codec=};}{{Column=timestamp;EvictAfter=130864223.000000s;Name=tier1;Codec=};};TTL={Column=timestamp;EvictAfter=130864223.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=130863023.000000s;Name=tier0;Codec=};}{{Column=timestamp;EvictAfter=130863023.000000s;Name=tier1;Codec=};};TTL={Column=timestamp;EvictAfter=130863023.000000s;Name=;Codec=}; 2025-06-25T15:00:23.828980Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:99;event=initialize_shard;step=OnActivateExecutor; 2025-06-25T15:00:23.854974Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:117;event=initialize_shard;step=initialize_tiring_finished; 2025-06-25T15:00:23.855187Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-25T15:00:23.859908Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T15:00:23.860069Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T15:00:23.860232Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T15:00:23.860304Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T15:00:23.860388Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T15:00:23.860454Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T15:00:23.860513Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T15:00:23.860573Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T15:00:23.860629Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T15:00:23.860691Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T15:00:23.860751Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T15:00:23.877018Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-25T15:00:23.877151Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-25T15:00:23.877191Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-25T15:00:23.877297Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:00:23.877422Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-25T15:00:23.877490Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-25T15:00:23.877536Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-25T15:00:23.877591Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-25T15:00:23.877628Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-25T15:00:23.877653Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-25T15:00:23.877679Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-25T15:00:23.877793Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:00:23.877843Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-25T15:00:23.877871Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-25T15:00:23.877889Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-25T15:00:23.877952Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-25T15:00:23.877987Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-25T15:00:23.878023Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-25T15:00:23.878043Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-25T15:00:23.878070Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-25T15:00:23.878092Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-25T15:00:23.878113Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-25T15:00:23.878245Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-25T15:00:23.878270Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-25T15:00:23.878291Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-25T15:00:23.878419Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-25T15:00:23.878460Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-25T15:00:23.878488Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-25T15:00:23.878590Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-25T15:00:23.878623Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-25T15:00:23.878640Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-25T15:00:23.878689Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-25T15:00:23.878728Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-25T15:00:23.878751Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-25T15:00:23.878768Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:15 ... init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;EXECUTE:portionsLoadingTime=3713; 2025-06-25T15:01:27.120819Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;PRECHARGE:granule_finished_commonLoadingTime=10; 2025-06-25T15:01:27.121005Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;EXECUTE:granule_finished_commonLoadingTime=123; 2025-06-25T15:01:27.121050Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;fline=common_data.cpp:29;EXECUTE:granuleLoadingTime=4168; 2025-06-25T15:01:27.121102Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:granulesLoadingTime=4310; 2025-06-25T15:01:27.121168Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;PRECHARGE:finishLoadingTime=11; 2025-06-25T15:01:27.121280Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:finishLoadingTime=61; 2025-06-25T15:01:27.121319Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:column_enginesLoadingTime=5060; 2025-06-25T15:01:27.121449Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tx_controllerLoadingTime=81; 2025-06-25T15:01:27.121590Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tx_controllerLoadingTime=85; 2025-06-25T15:01:27.121751Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:operations_managerLoadingTime=100; 2025-06-25T15:01:27.121872Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:operations_managerLoadingTime=72; 2025-06-25T15:01:27.124833Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:storages_managerLoadingTime=2904; 2025-06-25T15:01:27.127904Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:storages_managerLoadingTime=2977; 2025-06-25T15:01:27.127983Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:db_locksLoadingTime=11; 2025-06-25T15:01:27.128032Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:db_locksLoadingTime=9; 2025-06-25T15:01:27.128067Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:bg_sessionsLoadingTime=5; 2025-06-25T15:01:27.128152Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:bg_sessionsLoadingTime=48; 2025-06-25T15:01:27.128198Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:sharing_sessionsLoadingTime=5; 2025-06-25T15:01:27.128540Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:sharing_sessionsLoadingTime=61; 2025-06-25T15:01:27.128626Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:in_flight_readsLoadingTime=7; 2025-06-25T15:01:27.128728Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:in_flight_readsLoadingTime=48; 2025-06-25T15:01:27.128847Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tiers_managerLoadingTime=66; 2025-06-25T15:01:27.129186Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tiers_managerLoadingTime=295; 2025-06-25T15:01:27.129230Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;event=initialize_shard;fline=common_data.cpp:29;EXECUTE:composite_initLoadingTime=20949; 2025-06-25T15:01:27.129393Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Index: tables 1 inserted {blob_bytes=0;raw_bytes=0;count=0;records=0} compacted {blob_bytes=0;raw_bytes=0;count=0;records=0} s-compacted {blob_bytes=0;raw_bytes=0;count=0;records=0} inactive {blob_bytes=9739224;raw_bytes=13544452;count=2;records=160000} evicted {blob_bytes=0;raw_bytes=0;count=0;records=0} at tablet 9437184 2025-06-25T15:01:27.129522Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:1524:3333];process=SwitchToWork;fline=columnshard.cpp:74;event=initialize_shard;step=SwitchToWork; 2025-06-25T15:01:27.129577Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:1524:3333];process=SwitchToWork;fline=columnshard.cpp:77;event=initialize_shard;step=SignalTabletActive; 2025-06-25T15:01:27.129647Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:1524:3333];process=SwitchToWork;fline=columnshard_impl.cpp:1331;event=OnTieringModified;path_id=NO_VALUE_OPTIONAL; 2025-06-25T15:01:27.151378Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:1524:3333];process=SwitchToWork;fline=column_engine_logs.cpp:471;event=OnTieringModified;new_count_tierings=1; 2025-06-25T15:01:27.151538Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;fline=columnshard_impl.cpp:442;event=EnqueueBackgroundActivities;periodic=0; 2025-06-25T15:01:27.151659Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=2; 2025-06-25T15:01:27.151744Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750863373486;tx_id=18446744073709551615;;current_snapshot_ts=1750863661373; 2025-06-25T15:01:27.151786Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=2;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-25T15:01:27.151834Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;fline=columnshard_impl.cpp:791;background=cleanup;skip_reason=no_changes; 2025-06-25T15:01:27.151876Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;fline=columnshard_impl.cpp:820;background=cleanup;skip_reason=no_changes; 2025-06-25T15:01:27.151984Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;fline=columnshard_impl.cpp:749;background=ttl;skip_reason=no_changes; 2025-06-25T15:01:27.154736Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:1524:3333];ev=NActors::TEvents::TEvWakeup;fline=columnshard.cpp:250;event=TEvPrivate::TEvPeriodicWakeup::MANUAL;tablet_id=9437184; 2025-06-25T15:01:27.154927Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:1524:3333];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;fline=columnshard.cpp:239;event=TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184; 2025-06-25T15:01:27.154962Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Send periodic stats. 2025-06-25T15:01:27.155008Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Disabled periodic stats at tablet 9437184 2025-06-25T15:01:27.155060Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:1524:3333];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:442;event=EnqueueBackgroundActivities;periodic=0; 2025-06-25T15:01:27.155161Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:1524:3333];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=2; 2025-06-25T15:01:27.155241Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:1524:3333];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750863373486;tx_id=18446744073709551615;;current_snapshot_ts=1750863661373; 2025-06-25T15:01:27.155333Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:1524:3333];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=2;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-25T15:01:27.155407Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:1524:3333];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:791;background=cleanup;skip_reason=no_changes; 2025-06-25T15:01:27.155458Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:1524:3333];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:820;background=cleanup;skip_reason=no_changes; 2025-06-25T15:01:27.155575Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:1524:3333];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:749;background=ttl;skip_reason=no_changes; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/tier0' stopped at tablet 9437184 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/tier1' stopped at tablet 9437184 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/tier0' stopped at tablet 9437184 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/tier1' stopped at tablet 9437184 160000/9739224 160000/9739224 160000/9739224 80000/4873744 0/0 >> KqpCost::IndexLookupAtLeast8BytesInStorage-useSink >> KqpQuery::ReadOverloaded-StreamLookup [GOOD] |91.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/cost/unittest |91.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/cost/unittest >> EncryptedBackupParamsValidationTest::NoCommonDestination |91.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/cost/unittest |92.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/cost/unittest |92.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/cost/unittest |92.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/cost/unittest |92.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/cost/unittest >> KqpCost::ScanQueryRangeFullScan-SourceRead >> KqpCost::Range >> CompressExecutor::TestExecutorMemUsage [GOOD] >> KqpLimits::QSReplySize+useSink [GOOD] >> KqpLimits::QSReplySize-useSink |92.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/cost/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpQuery::ReadOverloaded-StreamLookup [GOOD] Test command err: Trying to start YDB, gRPC: 4054, MsgBus: 7735 2025-06-25T15:00:51.220937Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519902124148643293:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:00:51.221027Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001454/r3tmp/tmpHfGCLi/pdisk_1.dat TServer::EnableGrpc on GrpcPort 4054, node 1 2025-06-25T15:00:51.675656Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:00:51.677035Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519902124148643270:2080] 1750863651219545 != 1750863651219548 2025-06-25T15:00:51.679183Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:00:51.679249Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:00:51.685597Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:00:51.821556Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:00:51.821583Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:00:51.821613Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:00:51.821727Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:7735 2025-06-25T15:00:52.237374Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:7735 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:00:52.669923Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:00:52.700220Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T15:00:52.709206Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:00:52.850177Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:00:53.040544Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:00:53.131247Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:00:54.453336Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902137033546803:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:00:54.453445Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:00:54.866502Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:54.905216Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:54.984270Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:55.014007Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:55.045229Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:55.116188Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:55.186457Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:55.250284Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902141328514765:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:00:55.250348Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:00:55.250525Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902141328514770:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:00:55.254686Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:00:55.261997Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519902141328514772:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T15:00:55.319794Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519902141328514823:3422] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:00:56.223640Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519902124148643293:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:00:56.223703Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 3696, MsgBus: 27919 2025-06-25T15:00:57.219239Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519902150345640704:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:00:57.219298Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: ... alhost:17775 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:01:20.228650Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:01:20.303942Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:20.451860Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:01:20.637800Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:21.067526Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:21.358247Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:22.011289Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:1682:3280], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:22.011502Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:22.037246Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:22.275566Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:22.611222Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:22.894071Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:23.171732Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:23.540791Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:23.827252Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:24.213327Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:2352:3775], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:24.213462Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:24.213844Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:2357:3780], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:24.232500Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:01:24.402791Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:2359:3782], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T15:01:24.461584Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:2417:3821] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:01:25.756497Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:26.031677Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:26.393959Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:28.261785Z node 4 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1546: SelfId: [4:3104:4334], TxId: 281474976715675, task: 1. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=4&id=OGQwMWI4ODQtMWE5YjI0YzMtMTI3ZTRlYjQtNmYxYjU4MzE=. TraceId : 01jyksqb61aypgbhdhh13rsbwx. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Source[0] fatal error: {
: Error: Table '/Root/SecondaryKeys' retry limit exceeded. } 2025-06-25T15:01:28.261923Z node 4 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:678: SelfId: [4:3104:4334], TxId: 281474976715675, task: 1. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=4&id=OGQwMWI4ODQtMWE5YjI0YzMtMTI3ZTRlYjQtNmYxYjU4MzE=. TraceId : 01jyksqb61aypgbhdhh13rsbwx. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. InternalError: OVERLOADED DEFAULT_ERROR: {
: Error: Table '/Root/SecondaryKeys' retry limit exceeded. }. 2025-06-25T15:01:28.263030Z node 4 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [4:3105:4335], TxId: 281474976715675, task: 2. Ctx: { TraceId : 01jyksqb61aypgbhdhh13rsbwx. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=4&id=OGQwMWI4ODQtMWE5YjI0YzMtMTI3ZTRlYjQtNmYxYjU4MzE=. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Handle abort execution event from: [4:3098:4025], status: OVERLOADED, reason: {
: Error: Terminate execution } 2025-06-25T15:01:28.263958Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=OGQwMWI4ODQtMWE5YjI0YzMtMTI3ZTRlYjQtNmYxYjU4MzE=, ActorId: [4:2680:4025], ActorState: ExecuteState, TraceId: 01jyksqb61aypgbhdhh13rsbwx, Create QueryResponse for error on request, msg: >> BackupRestore::TestReplaceRestoreOptionOnNonExistingSchemeObjects [GOOD] >> BackupRestoreS3::RestoreIndexTableDecimalSplitBoundaries >> KqpExplain::ReadTableRanges [GOOD] >> KqpExplain::Predicates >> BackupRestore::RestoreExternalDataSourceWithoutSecret [GOOD] >> BackupRestore::TestAllIndexTypes-EIndexTypeGlobal >> KqpCost::IndexLookupAndTake-useSink |92.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/cost/unittest |92.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/cost/unittest |92.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/cost/unittest >> KqpCost::OlapRangeFullScan >> KqpCost::PointLookup >> KqpQuery::OltpCreateAsSelect_Disable [GOOD] >> KqpQuery::OlapCreateAsSelect_Complex >> KqpCost::IndexLookupAtLeast8BytesInStorage+useSink >> KqpParams::CheckCacheWithRecompilationQuery [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/persqueue_public/ut/unittest >> CompressExecutor::TestExecutorMemUsage [GOOD] Test command err: 2025-06-25T14:59:21.750824Z :WriteAndReadSomeMessagesWithAsyncCompression INFO: Random seed for debugging is 1750863561750792 2025-06-25T14:59:22.116886Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901740343950371:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:59:22.118217Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:59:22.169866Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519901741609872930:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:59:22.169895Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:59:22.311114Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0010fc/r3tmp/tmpDhUHii/pdisk_1.dat 2025-06-25T14:59:22.331554Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-25T14:59:22.631625Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:59:22.631729Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:59:22.640811Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:59:22.677424Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:59:22.698452Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:59:22.698508Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:59:22.706555Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T14:59:22.721816Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 1203, node 1 2025-06-25T14:59:22.726291Z node 1 :GRPC_SERVER WARN: grpc_request_proxy.cpp:529: SchemeBoardDelete /Root Strong=0 2025-06-25T14:59:22.726312Z node 1 :GRPC_SERVER WARN: grpc_request_proxy.cpp:529: SchemeBoardDelete /Root Strong=0 2025-06-25T14:59:22.881156Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/yft8/0010fc/r3tmp/yandexJ7A54k.tmp 2025-06-25T14:59:22.881186Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/yft8/0010fc/r3tmp/yandexJ7A54k.tmp 2025-06-25T14:59:22.881684Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/yft8/0010fc/r3tmp/yandexJ7A54k.tmp 2025-06-25T14:59:22.881855Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:59:23.108900Z INFO: TTestServer started on Port 9092 GrpcPort 1203 2025-06-25T14:59:23.135000Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:59:23.179537Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:9092 PQClient connected to localhost:1203 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:59:23.456160Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976720657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... waiting... waiting... 2025-06-25T14:59:25.051084Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901753228853227:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:59:25.051234Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:59:25.051751Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901753228853240:2301], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:59:25.059703Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976720661:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:59:25.075687Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519901753228853242:2302], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976720661 completed, doublechecking } 2025-06-25T14:59:25.139410Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519901753228853329:2683] txid# 281474976720662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:59:25.470976Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7519901754494775112:2274], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:59:25.470975Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519901753228853346:2309], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:59:25.471225Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=2&id=ZjM5YzZlYzItN2VhMWRhZTItYzY0ZWU2ZTUtOGQzNmNmYTQ=, ActorId: [2:7519901754494775086:2268], ActorState: ExecuteState, TraceId: 01jykskm9v3wmazc0z88qexnk5, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:59:25.474648Z node 2 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-25T14:59:25.476791Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=NzczZTc5M2EtOGE1YTJhYjctYWUyNGQzZDItODMzZTBlMjQ=, ActorId: [1:7519901753228853225:2296], ActorState: ExecuteState, TraceId: 01jykskm7r4zy91q3a4y1w91pq, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:59:25.477114Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-25T14:59:25.525904Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:59:25.644176Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:59:25.751728Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost:1203", t ... erver" ip=ipv6:[::1]:55784 proto=v1 topic=test-topic durationSec=0 2025-06-25T15:01:26.867012Z node 15 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:566: init check schema 2025-06-25T15:01:26.868846Z node 15 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:627: session v1 cookie: 3 sessionId: describe result for acl check 2025-06-25T15:01:26.868998Z node 15 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:62: TTableHelper SelectQuery: --!syntax_v1 DECLARE $Hash AS Uint32; DECLARE $Topic AS Utf8; DECLARE $SourceId AS Utf8; SELECT Partition, CreateTime, AccessTime, SeqNo FROM `/Root/PQ/SourceIdMeta2` WHERE Hash == $Hash AND Topic == $Topic AND SourceId == $SourceId; 2025-06-25T15:01:26.869013Z node 15 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:63: TTableHelper UpdateQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint32; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64;DECLARE $SeqNo AS Uint64; UPSERT INTO `/Root/PQ/SourceIdMeta2` (Hash, Topic, SourceId, CreateTime, AccessTime, Partition, SeqNo) VALUES ($Hash, $Topic, $SourceId, $CreateTime, $AccessTime, $Partition, $SeqNo); 2025-06-25T15:01:26.869024Z node 15 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:64: TTableHelper UpdateAccessTimeQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint32; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64; UPDATE `/Root/PQ/SourceIdMeta2` SET AccessTime = $AccessTime WHERE Hash = $Hash AND Topic = $Topic AND SourceId = $SourceId AND Partition = $Partition; 2025-06-25T15:01:26.869048Z node 15 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:111: TPartitionChooser [15:7519902273159180927:2548] (SourceId=test-message-group-id, PreferedPartition=(NULL)) StartKqpSession 2025-06-25T15:01:26.880895Z node 15 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:142: TPartitionChooser [15:7519902273159180927:2548] (SourceId=test-message-group-id, PreferedPartition=(NULL)) Select from the table 2025-06-25T15:01:27.093832Z node 15 :KQP_EXECUTER WARN: kqp_shards_resolver.cpp:86: [ShardsResolver] TxId: 281474976715698. Failed to resolve tablet: 72075186224037891 after several retries. 2025-06-25T15:01:27.093947Z node 15 :KQP_EXECUTER WARN: kqp_executer_impl.h:265: ActorId: [15:7519902273159180939:2550] TxId: 281474976715698. Ctx: { TraceId: 01jyksqb711ahp672rjtkexdb5, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=15&id=ODk4NjhlOTUtNzhiYTM5MTgtNjk2ZGQ3MjItYTk5ZTRlZGY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Shards nodes resolve failed, status: UNAVAILABLE, issues:
: Error: Failed to resolve tablet: 72075186224037891 after several retries. 2025-06-25T15:01:27.094148Z node 15 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=15&id=ODk4NjhlOTUtNzhiYTM5MTgtNjk2ZGQ3MjItYTk5ZTRlZGY=, ActorId: [15:7519902273159180928:2550], ActorState: ExecuteState, TraceId: 01jyksqb711ahp672rjtkexdb5, Create QueryResponse for error on request, msg: 2025-06-25T15:01:27.095946Z node 15 :PQ_PARTITION_CHOOSER INFO: partition_chooser_impl__abstract_chooser_actor.h:312: TPartitionChooser [15:7519902273159180927:2548] (SourceId=test-message-group-id, PreferedPartition=(NULL)) ReplyError: kqp error Marker# PQ50 : Response { SessionId: "ydb://session/3?node_id=15&id=ODk4NjhlOTUtNzhiYTM5MTgtNjk2ZGQ3MjItYTk5ZTRlZGY=" QueryIssues { message: "Failed to resolve tablet: 72075186224037891 after several retries." severity: 1 } TxMeta { id: "01jyksqb74az5pv8fhs987ax55" } } YdbStatus: UNAVAILABLE ConsumedRu: 1 2025-06-25T15:01:27.096066Z node 15 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:809: session v1 error cookie: 3 reason: kqp error Marker# PQ50 : Response { SessionId: "ydb://session/3?node_id=15&id=ODk4NjhlOTUtNzhiYTM5MTgtNjk2ZGQ3MjItYTk5ZTRlZGY=" QueryIssues { message: "Failed to resolve tablet: 72075186224037891 after several retries." severity: 1 } TxMeta { id: "01jyksqb74az5pv8fhs987ax55" } } YdbStatus: UNAVAILABLE ConsumedRu: 1 sessionId: Test retry state: get retry delay 2025-06-25T15:01:27.096436Z node 15 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 3 sessionId: is DEAD 2025-06-25T15:01:27.097596Z :INFO: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|5e507e69-2ce320b3-cd09540d-9c0a1523_0] Got error. Status: UNAVAILABLE, Description:
: Error: kqp error Marker# PQ50 : Response { SessionId: "ydb://session/3?node_id=15&id=ODk4NjhlOTUtNzhiYTM5MTgtNjk2ZGQ3MjItYTk5ZTRlZGY=" QueryIssues { message: "Failed to resolve tablet: 72075186224037891 after several retries." severity: 1 } TxMeta { id: "01jyksqb74az5pv8fhs987ax55" } } YdbStatus: UNAVAILABLE ConsumedRu: 1 , code: 500001 2025-06-25T15:01:27.097633Z :INFO: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|5e507e69-2ce320b3-cd09540d-9c0a1523_0] Write session will restart in 2.000000s 2025-06-25T15:01:27.097748Z :INFO: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|5e507e69-2ce320b3-cd09540d-9c0a1523_0] Write session: Do CDS request 2025-06-25T15:01:27.097787Z :INFO: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|5e507e69-2ce320b3-cd09540d-9c0a1523_0] Do schedule cds request after 2000 ms 2025-06-25T15:01:27.228351Z node 15 :KQP_EXECUTER WARN: kqp_shards_resolver.cpp:86: [ShardsResolver] TxId: 281474976715699. Failed to resolve tablet: 72075186224037890 after several retries. 2025-06-25T15:01:27.228468Z node 15 :KQP_EXECUTER WARN: kqp_executer_impl.h:265: ActorId: [15:7519902277454148271:2552] TxId: 281474976715699. Ctx: { TraceId: 01jyksqbcc5sp18c3ndefjjvga, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=15&id=NWQ1M2YxMDYtNGI1OTg0YWUtZjNkM2QyMTYtZGIyMzQ2MDU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Shards nodes resolve failed, status: UNAVAILABLE, issues:
: Error: Failed to resolve tablet: 72075186224037890 after several retries. 2025-06-25T15:01:27.228662Z node 15 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=15&id=NWQ1M2YxMDYtNGI1OTg0YWUtZjNkM2QyMTYtZGIyMzQ2MDU=, ActorId: [15:7519902277454148266:2552], ActorState: ExecuteState, TraceId: 01jyksqbcc5sp18c3ndefjjvga, Create QueryResponse for error on request, msg: 2025-06-25T15:01:27.229861Z node 15 :PQ_METACACHE ERROR: msgbus_server_pq_metacache.cpp:260: Got error trying to perform request: { Response { QueryIssues { message: "Failed to resolve tablet: 72075186224037890 after several retries." severity: 1 } TxMeta { id: "01jyksqbcc5sp18c3ndgy70ac1" } } YdbStatus: UNAVAILABLE ConsumedRu: 1 } 2025-06-25T15:01:27.456429Z node 15 :KQP_EXECUTER WARN: kqp_shards_resolver.cpp:86: [ShardsResolver] TxId: 281474976715702. Failed to resolve tablet: 72075186224037890 after several retries. 2025-06-25T15:01:27.456566Z node 15 :KQP_EXECUTER WARN: kqp_executer_impl.h:265: ActorId: [15:7519902277454148327:2541] TxId: 281474976715702. Ctx: { TraceId: 01jyksqb51fbrftvpt27bvqet3, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=15&id=YjYxZmQxNGItNDNkZDY1MWEtNjczYWQzZi0zOTgxM2JhZg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Shards nodes resolve failed, status: UNAVAILABLE, issues:
: Error: Failed to resolve tablet: 72075186224037890 after several retries. 2025-06-25T15:01:27.456774Z node 15 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=15&id=YjYxZmQxNGItNDNkZDY1MWEtNjczYWQzZi0zOTgxM2JhZg==, ActorId: [15:7519902273159180911:2541], ActorState: ExecuteState, TraceId: 01jyksqb51fbrftvpt27bvqet3, Create QueryResponse for error on request, msg: 2025-06-25T15:01:27.457979Z node 15 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Failed to resolve tablet: 72075186224037890 after several retries." severity: 1 } TxMeta { id: "01jyksqbk88xras5dxqbfh8ss9" } } YdbStatus: UNAVAILABLE ConsumedRu: 299 } 2025-06-25T15:01:27.577739Z node 16 :KQP_EXECUTER WARN: kqp_shards_resolver.cpp:86: [ShardsResolver] TxId: 281474976720680. Failed to resolve tablet: 72075186224037890 after several retries. 2025-06-25T15:01:27.577882Z node 16 :KQP_EXECUTER WARN: kqp_executer_impl.h:265: ActorId: [16:7519902275697846555:2405] TxId: 281474976720680. Ctx: { TraceId: 01jyksqbj5ddbwkpynwd860wf7, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=16&id=YmZiNmFhYmEtOGNjZGUwZTctMTVmMjAwZGEtZDg3OTg1NTE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Shards nodes resolve failed, status: UNAVAILABLE, issues:
: Error: Failed to resolve tablet: 72075186224037890 after several retries. 2025-06-25T15:01:27.578113Z node 16 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=16&id=YmZiNmFhYmEtOGNjZGUwZTctMTVmMjAwZGEtZDg3OTg1NTE=, ActorId: [16:7519902275697846552:2405], ActorState: ExecuteState, TraceId: 01jyksqbj5ddbwkpynwd860wf7, Create QueryResponse for error on request, msg: 2025-06-25T15:01:27.579123Z node 16 :PQ_METACACHE ERROR: msgbus_server_pq_metacache.cpp:260: Got error trying to perform request: { Response { QueryIssues { message: "Failed to resolve tablet: 72075186224037890 after several retries." severity: 1 } TxMeta { id: "01jyksqbj6e4r7rxv8qmd8cd89" } } YdbStatus: UNAVAILABLE ConsumedRu: 1 } 2025-06-25T15:01:27.695320Z node 16 :KQP_EXECUTER WARN: kqp_shards_resolver.cpp:86: [ShardsResolver] TxId: 281474976720681. Failed to resolve tablet: 72075186224037890 after several retries. 2025-06-25T15:01:27.695462Z node 16 :KQP_EXECUTER WARN: kqp_executer_impl.h:265: ActorId: [16:7519902275697846587:2399] TxId: 281474976720681. Ctx: { TraceId: 01jyksqbbkexyakfgkmp6sjbj1, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=16&id=NWJhN2VkY2UtNzA2Y2FlZWEtNDNhNWIwYzMtYmFhZDM5YTc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Shards nodes resolve failed, status: UNAVAILABLE, issues:
: Error: Failed to resolve tablet: 72075186224037890 after several retries. 2025-06-25T15:01:27.695724Z node 16 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=16&id=NWJhN2VkY2UtNzA2Y2FlZWEtNDNhNWIwYzMtYmFhZDM5YTc=, ActorId: [16:7519902275697846539:2399], ActorState: ExecuteState, TraceId: 01jyksqbbkexyakfgkmp6sjbj1, Create QueryResponse for error on request, msg: 2025-06-25T15:01:27.697215Z node 16 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Failed to resolve tablet: 72075186224037890 after several retries." severity: 1 } TxMeta { id: "01jyksqbtc4y74px8ndeb17av8" } } YdbStatus: UNAVAILABLE ConsumedRu: 308 } 2025-06-25T15:01:27.867180Z :INFO: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|5e507e69-2ce320b3-cd09540d-9c0a1523_0] Write session: close. Timeout = 0 ms 2025-06-25T15:01:27.867260Z :INFO: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|5e507e69-2ce320b3-cd09540d-9c0a1523_0] Write session will now close 2025-06-25T15:01:27.867345Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|5e507e69-2ce320b3-cd09540d-9c0a1523_0] Write session: aborting 2025-06-25T15:01:27.868137Z :WARNING: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|5e507e69-2ce320b3-cd09540d-9c0a1523_0] Write session: could not confirm all writes in time or session aborted, perform hard shutdown 2025-06-25T15:01:27.868190Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|5e507e69-2ce320b3-cd09540d-9c0a1523_0] Write session: destroy >> KqpLimits::StreamWrite-Allowed [GOOD] >> KqpLimits::TooBigColumn+useSink >> KqpLimits::ManyPartitionsSorting [GOOD] |92.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/cost/unittest >> KqpCost::ScanQueryRangeFullScan+SourceRead >> KqpQuery::TableSink_ReplaceDataShardDataQuery-UseSink [GOOD] >> KqpQuery::TableSinkWithSubquery |92.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/cost/unittest |92.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/cost/unittest >> KqpCost::OlapWriteRow ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpParams::CheckCacheWithRecompilationQuery [GOOD] Test command err: Trying to start YDB, gRPC: 3084, MsgBus: 1400 2025-06-25T15:00:51.568633Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519902123229065893:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:00:51.569633Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001432/r3tmp/tmpqYqAWl/pdisk_1.dat 2025-06-25T15:00:52.073135Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:00:52.073247Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:00:52.123618Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519902123229065863:2080] 1750863651553298 != 1750863651553301 2025-06-25T15:00:52.143551Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:00:52.144727Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 3084, node 1 2025-06-25T15:00:52.219202Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:00:52.219228Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:00:52.219234Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:00:52.219338Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1400 2025-06-25T15:00:52.583057Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:1400 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:00:52.761439Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:00:52.792458Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T15:00:52.803067Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:00:53.014107Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:00:53.172672Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:00:53.256818Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:00:54.862455Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902136113969383:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:00:54.862545Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:00:55.159973Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:55.185139Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:55.224719Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:55.255287Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:55.277744Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:55.344817Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:55.377023Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:55.467211Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902140408937343:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:00:55.467284Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:00:55.467487Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902140408937348:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:00:55.471214Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:00:55.483799Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519902140408937350:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T15:00:55.548862Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519902140408937401:3417] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:00:56.568765Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519902123229065893:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:00:56.568836Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 4950, MsgBus: 61625 2025-06-25T15:00:57.469039Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519902149304080636:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:00:57.469436Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: ... fa80] received request Name# ListDatabases ok# false data# peer# 2025-06-25T15:01:30.802154Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000134780] received request Name# RemoveDatabase ok# false data# peer# 2025-06-25T15:01:30.802165Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000135580] received request Name# DescribeDatabaseOptions ok# false data# peer# 2025-06-25T15:01:30.802340Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000134e80] received request Name# GetScaleRecommendation ok# false data# peer# 2025-06-25T15:01:30.802366Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0002be380] received request Name# ListEndpoints ok# false data# peer# 2025-06-25T15:01:30.802455Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000141980] received request Name# WhoAmI ok# false data# peer# 2025-06-25T15:01:30.802535Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0000f3480] received request Name# NodeRegistration ok# false data# peer# 2025-06-25T15:01:30.802669Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0001c1c80] received request Name# Scan ok# false data# peer# 2025-06-25T15:01:30.802771Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b00016f180] received request Name# GetShardLocations ok# false data# peer# 2025-06-25T15:01:30.802882Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0001c2a80] received request Name# DescribeTable ok# false data# peer# 2025-06-25T15:01:30.802990Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000032c80] received request Name# CreateSnapshot ok# false data# peer# 2025-06-25T15:01:30.803091Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000031780] received request Name# RefreshSnapshot ok# false data# peer# 2025-06-25T15:01:30.803188Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000030980] received request Name# DiscardSnapshot ok# false data# peer# 2025-06-25T15:01:30.803284Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000042f80] received request Name# List ok# false data# peer# 2025-06-25T15:01:30.803406Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000042880] received request Name# RateLimiter/CreateResource ok# false data# peer# 2025-06-25T15:01:30.803478Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000040c80] received request Name# RateLimiter/AlterResource ok# false data# peer# 2025-06-25T15:01:30.803597Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000040580] received request Name# RateLimiter/DropResource ok# false data# peer# 2025-06-25T15:01:30.803659Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000031e80] received request Name# RateLimiter/ListResources ok# false data# peer# 2025-06-25T15:01:30.803788Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000032580] received request Name# RateLimiter/DescribeResource ok# false data# peer# 2025-06-25T15:01:30.803843Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0000b0c80] received request Name# RateLimiter/AcquireResource ok# false data# peer# 2025-06-25T15:01:30.803992Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0001a6a80] received request Name# CreateStream ok# false data# peer# 2025-06-25T15:01:30.804021Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0000d9080] received request Name# ListStreams ok# false data# peer# 2025-06-25T15:01:30.804177Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0001ac580] received request Name# DeleteStream ok# false data# peer# 2025-06-25T15:01:30.804205Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000183380] received request Name# DescribeStream ok# false data# peer# 2025-06-25T15:01:30.804412Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000344180] received request Name# SetWriteQuota ok# false data# peer# 2025-06-25T15:01:30.804418Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0000d5180] received request Name# ListShards ok# false data# peer# 2025-06-25T15:01:30.804606Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b00031d280] received request Name# UpdateStream ok# false data# peer# 2025-06-25T15:01:30.804612Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0001b9e80] received request Name# PutRecord ok# false data# peer# 2025-06-25T15:01:30.804807Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0001bac80] received request Name# GetRecords ok# false data# peer# 2025-06-25T15:01:30.804808Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0000d7b80] received request Name# PutRecords ok# false data# peer# 2025-06-25T15:01:30.805011Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0001bba80] received request Name# SubscribeToShard ok# false data# peer# 2025-06-25T15:01:30.805011Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b00001b980] received request Name# GetShardIterator ok# false data# peer# 2025-06-25T15:01:30.805213Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0001bc880] received request Name# DescribeLimits ok# false data# peer# 2025-06-25T15:01:30.805214Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000460780] received request Name# DescribeStreamSummary ok# false data# peer# 2025-06-25T15:01:30.805398Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000466280] received request Name# IncreaseStreamRetentionPeriod ok# false data# peer# 2025-06-25T15:01:30.805399Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0001bd680] received request Name# DecreaseStreamRetentionPeriod ok# false data# peer# 2025-06-25T15:01:30.805601Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0001be480] received request Name# UpdateShardCount ok# false data# peer# 2025-06-25T15:01:30.805603Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0000b1380] received request Name# UpdateStreamMode ok# false data# peer# 2025-06-25T15:01:30.805784Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0000b1a80] received request Name# RegisterStreamConsumer ok# false data# peer# 2025-06-25T15:01:30.805793Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0000b2180] received request Name# DeregisterStreamConsumer ok# false data# peer# 2025-06-25T15:01:30.805983Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000411480] received request Name# DescribeStreamConsumer ok# false data# peer# 2025-06-25T15:01:30.806023Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000348780] received request Name# ListStreamConsumers ok# false data# peer# 2025-06-25T15:01:30.806188Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000349580] received request Name# AddTagsToStream ok# false data# peer# 2025-06-25T15:01:30.806219Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000347980] received request Name# DisableEnhancedMonitoring ok# false data# peer# 2025-06-25T15:01:30.806373Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b00034b880] received request Name# EnableEnhancedMonitoring ok# false data# peer# 2025-06-25T15:01:30.806495Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b00031fc80] received request Name# ListTagsForStream ok# false data# peer# 2025-06-25T15:01:30.806548Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000342c80] received request Name# MergeShards ok# false data# peer# 2025-06-25T15:01:30.806715Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000343a80] received request Name# RemoveTagsFromStream ok# false data# peer# 2025-06-25T15:01:30.806734Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b00031bd80] received request Name# SplitShard ok# false data# peer# 2025-06-25T15:01:30.806916Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000346480] received request Name# StartStreamEncryption ok# false data# peer# 2025-06-25T15:01:30.806923Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000347280] received request Name# StopStreamEncryption ok# false data# peer# 2025-06-25T15:01:30.807106Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000341780] received request Name# SelfCheck ok# false data# peer# 2025-06-25T15:01:30.807117Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b00031e780] received request Name# NodeCheck ok# false data# peer# 2025-06-25T15:01:30.807301Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0003aca80] received request Name# CreateSession ok# false data# peer# 2025-06-25T15:01:30.807359Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0003a1480] received request Name# DeleteSession ok# false data# peer# 2025-06-25T15:01:30.807500Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000354b80] received request Name# AttachSession ok# false data# peer# 2025-06-25T15:01:30.807568Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0000cfd80] received request Name# BeginTransaction ok# false data# peer# 2025-06-25T15:01:30.807773Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0003a3e80] received request Name# CommitTransaction ok# false data# peer# 2025-06-25T15:01:30.807872Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0003a4c80] received request Name# RollbackTransaction ok# false data# peer# 2025-06-25T15:01:30.807998Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000346b80] received request Name# ExecuteQuery ok# false data# peer# 2025-06-25T15:01:30.808062Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0003aa780] received request Name# ExecuteScript ok# false data# peer# 2025-06-25T15:01:30.808215Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0003a8b80] received request Name# FetchScriptResults ok# false data# peer# 2025-06-25T15:01:30.808262Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b00034d480] received request Name# ExecuteTabletMiniKQL ok# false data# peer# 2025-06-25T15:01:30.808429Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0003a7680] received request Name# ChangeTabletSchema ok# false data# peer# 2025-06-25T15:01:30.808643Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0003a8480] received request Name# RestartTablet ok# false data# peer# 2025-06-25T15:01:30.808676Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b00034e980] received request Name# CreateLogStore ok# false data# peer# 2025-06-25T15:01:30.808841Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0003a5380] received request Name# DescribeLogStore ok# false data# peer# 2025-06-25T15:01:30.808863Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0003a2980] received request Name# DropLogStore ok# false data# peer# 2025-06-25T15:01:30.809049Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b00034fe80] received request Name# AlterLogStore ok# false data# peer# 2025-06-25T15:01:30.809068Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0003a7d80] received request Name# CreateLogTable ok# false data# peer# 2025-06-25T15:01:30.809243Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000273d80] received request Name# DescribeLogTable ok# false data# peer# 2025-06-25T15:01:30.809265Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0000a2c80] received request Name# DropLogTable ok# false data# peer# 2025-06-25T15:01:30.809476Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0002ef380] received request Name# Login ok# false data# peer# 2025-06-25T15:01:30.809478Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000458280] received request Name# AlterLogTable ok# false data# peer# 2025-06-25T15:01:30.809677Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b0000a2580] received request Name# DescribeReplication ok# false data# peer# 2025-06-25T15:01:30.809682Z node 6 :GRPC_SERVER DEBUG: logger.cpp:36: [0x51b000053280] received request Name# DescribeView ok# false data# peer# ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpLimits::ManyPartitionsSorting [GOOD] Test command err: Trying to start YDB, gRPC: 20297, MsgBus: 24919 2025-06-25T15:01:06.198951Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519902188683565471:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:01:06.199012Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0013f2/r3tmp/tmpvKMn5F/pdisk_1.dat 2025-06-25T15:01:06.536829Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:01:06.537758Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519902188683565449:2080] 1750863666197022 != 1750863666197025 TServer::EnableGrpc on GrpcPort 20297, node 1 2025-06-25T15:01:06.632534Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:01:06.632713Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:01:06.638414Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:01:06.700807Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:01:06.700827Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:01:06.700840Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:01:06.700962Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24919 TClient is connected to server localhost:24919 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-25T15:01:07.213942Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:01:07.307812Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:01:07.354982Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:07.501658Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:07.647556Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:07.710546Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:09.297206Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902201568468987:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:09.297303Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:09.593762Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:09.633028Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:09.705956Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:09.758376Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:09.805356Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:09.856138Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:09.901493Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:10.005728Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902205863436945:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:10.005820Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:10.006064Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902205863436950:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:10.010318Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:01:10.027480Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519902205863436952:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T15:01:10.096579Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519902205863437003:3424] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:01:11.199049Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519902188683565471:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:01:11.199110Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T15:01:11.290904Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:14.632902Z node 1 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [1:7 ... en\":\"0\",\"E-Rows\":\"0\",\"Table\":\"ManyShardsTable\",\"ReadColumns\":[\"Data\",\"Key\"],\"E-Cost\":\"0\"}],\"Node Type\":\"TableFullScan\"}],\"Node Type\":\"Stage\",\"Stats\":{\"UseLlvm\":\"undefined\",\"Output\":[{\"Pop\":{\"Chunks\":{\"Count\":4,\"Sum\":100,\"Max\":25,\"Min\":25},\"Rows\":{\"Count\":4,\"Sum\":1100,\"Max\":276,\"Min\":274},\"LastMessageMs\":{\"Count\":4,\"Sum\":447,\"Max\":129,\"Min\":104},\"ActiveMessageMs\":{\"Count\":4,\"Max\":129,\"Min\":4},\"FirstMessageMs\":{\"Count\":4,\"Sum\":18,\"Max\":5,\"Min\":4},\"Bytes\":{\"Count\":4,\"Sum\":8168,\"Max\":2075,\"Min\":2004,\"History\":[21,913,22,1848,43,3176,62,4004,67,4502,83,4967,89,5216,100,5548,104,5963,105,6349,106,6681,108,6923,119,7670,133,8168]},\"ActiveTimeUs\":{\"Count\":4,\"Sum\":429000,\"Max\":125000,\"Min\":99000}},\"Name\":\"4\",\"Push\":{\"Rows\":{\"Count\":4,\"Sum\":1100,\"Max\":276,\"Min\":274},\"LastMessageMs\":{\"Count\":4,\"Sum\":447,\"Max\":129,\"Min\":104},\"Chunks\":{\"Count\":4,\"Sum\":1100,\"Max\":276,\"Min\":274},\"ResumeMessageMs\":{\"Count\":4,\"Sum\":446,\"Max\":128,\"Min\":104},\"FirstMessageMs\":{\"Count\":4,\"Sum\":18,\"Max\":5,\"Min\":4},\"ActiveMessageMs\":{\"Count\":4,\"Max\":129,\"Min\":4},\"PauseMessageMs\":{\"Count\":4,\"Sum\":1,\"Max\":1,\"Min\":0},\"ActiveTimeUs\":{\"Count\":4,\"Sum\":429000,\"Max\":125000,\"Min\":99000},\"WaitTimeUs\":{\"Count\":4,\"Sum\":431369,\"Max\":125412,\"Min\":100939,\"History\":[21,40448,22,81093,43,140224,62,178487,67,201613,83,243031,89,265439,100,343252,104,361882,105,383125,106,397704,108,403763,119,422473,133,431369]},\"WaitPeriods\":{\"Count\":4,\"Sum\":90,\"Max\":24,\"Min\":22}}}],\"DurationUs\":{\"Count\":4,\"Sum\":434000,\"Max\":128000,\"Min\":100000},\"MaxMemoryUsage\":{\"Count\":4,\"Sum\":4194304,\"Max\":1048576,\"Min\":1048576,\"History\":[0,3145728,1,4194304,133,4194304]},\"Tasks\":4,\"OutputRows\":{\"Count\":4,\"Sum\":1100,\"Max\":276,\"Min\":274},\"FinishedTasks\":4,\"IngressRows\":{\"Count\":4,\"Sum\":1100,\"Max\":276,\"Min\":274},\"PhysicalStageId\":0,\"StageDurationUs\":129000,\"Table\":[{\"Path\":\"\\/Root\\/ManyShardsTable\",\"ReadRows\":{\"Count\":4,\"Sum\":1100,\"Max\":276,\"Min\":274},\"ReadBytes\":{\"Count\":4,\"Sum\":8800,\"Max\":2208,\"Min\":2192}}],\"BaseTimeMs\":1750863690964,\"WaitInputTimeUs\":{\"Count\":4,\"Sum\":404062,\"Max\":118368,\"Min\":93808,\"History\":[21,32110,22,62112,43,119900,62,157144,67,179748,83,220524,89,242679,100,320132,104,338312,105,359049,106,373281,108,379054,119,395755,133,404062]},\"OutputBytes\":{\"Count\":4,\"Sum\":8168,\"Max\":2075,\"Min\":2004},\"CpuTimeUs\":{\"Count\":4,\"Sum\":17491,\"Max\":5722,\"Min\":3238,\"History\":[0,1210,1,1263,21,2570,22,4668,43,7247,62,9662,67,10367,83,11206,89,11542,100,12046,104,13994,105,14646,106,15153,108,15520,119,16855,133,17491]},\"Ingress\":[{\"Pop\":{\"Chunks\":{\"Count\":4,\"Sum\":100,\"Max\":25,\"Min\":25},\"Rows\":{\"Count\":4,\"Sum\":1100,\"Max\":276,\"Min\":274},\"LastMessageMs\":{\"Count\":4,\"Sum\":446,\"Max\":128,\"Min\":104},\"ActiveMessageMs\":{\"Count\":4,\"Max\":128,\"Min\":4},\"FirstMessageMs\":{\"Count\":4,\"Sum\":18,\"Max\":5,\"Min\":4},\"Bytes\":{\"Count\":4,\"Sum\":35200,\"Max\":8832,\"Min\":8768,\"History\":[21,3872,22,8128,43,13760,62,17280,67,19392,83,21504,89,22560,100,23968,104,25728,105,27488,106,28896,108,29920,119,33088,133,35200]},\"ActiveTimeUs\":{\"Count\":4,\"Sum\":428000,\"Max\":124000,\"Min\":99000}},\"External\":{},\"Name\":\"KqpReadRangesSource\",\"Ingress\":{},\"Push\":{\"Rows\":{\"Count\":4,\"Sum\":1100,\"Max\":276,\"Min\":274},\"LastMessageMs\":{\"Count\":4,\"Sum\":446,\"Max\":128,\"Min\":104},\"Chunks\":{\"Count\":4,\"Sum\":100,\"Max\":25,\"Min\":25},\"ResumeMessageMs\":{\"Count\":4,\"Sum\":446,\"Max\":128,\"Min\":104},\"FirstMessageMs\":{\"Count\":4,\"Sum\":17,\"Max\":5,\"Min\":3},\"ActiveMessageMs\":{\"Count\":4,\"Max\":128,\"Min\":3},\"Bytes\":{\"Count\":4,\"Sum\":35200,\"Max\":8832,\"Min\":8768,\"History\":[21,3872,22,8128,43,13760,62,17280,67,19392,83,21504,89,22560,100,23968,104,25728,105,27488,106,28896,108,29920,119,33088,133,35200]},\"PauseMessageMs\":{\"Count\":4,\"Sum\":1,\"Max\":1,\"Min\":0},\"ActiveTimeUs\":{\"Count\":4,\"Sum\":429000,\"Max\":124000,\"Min\":99000},\"WaitTimeUs\":{\"Count\":4,\"Sum\":431468,\"Max\":125568,\"Min\":100963,\"History\":[21,40323,22,80969,43,140136,62,178438,67,201572,83,242945,89,265353,100,343178,104,361816,105,383072,106,397643,108,403704,119,422452,133,431468]},\"WaitPeriods\":{\"Count\":4,\"Sum\":90,\"Max\":24,\"Min\":21}}}],\"UpdateTimeMs\":132}}],\"Node Type\":\"Merge\",\"SortColumns\":[\"Key (Asc)\"],\"PlanNodeType\":\"Connection\"}],\"Node Type\":\"Stage\",\"Stats\":{\"UseLlvm\":\"undefined\",\"OutputRows\":{\"Count\":1,\"Sum\":1100,\"Max\":1100,\"Min\":1100},\"PhysicalStageId\":1,\"FinishedTasks\":1,\"InputBytes\":{\"Count\":1,\"Sum\":8168,\"Max\":8168,\"Min\":8168},\"DurationUs\":{\"Count\":1,\"Sum\":129000,\"Max\":129000,\"Min\":129000},\"MaxMemoryUsage\":{\"Count\":1,\"Sum\":1048576,\"Max\":1048576,\"Min\":1048576,\"History\":[1,1048576,133,1048576]},\"BaseTimeMs\":1750863690964,\"Output\":[{\"Pop\":{\"Chunks\":{\"Count\":1,\"Sum\":39,\"Max\":39,\"Min\":39},\"Rows\":{\"Count\":1,\"Sum\":1100,\"Max\":1100,\"Min\":1100},\"LastMessageMs\":{\"Count\":1,\"Sum\":132,\"Max\":132,\"Min\":132},\"ActiveMessageMs\":{\"Count\":1,\"Max\":132,\"Min\":6},\"FirstMessageMs\":{\"Count\":1,\"Sum\":6,\"Max\":6,\"Min\":6},\"Bytes\":{\"Count\":1,\"Sum\":7804,\"Max\":7804,\"Min\":7804,\"History\":[21,362,43,935,67,1433,90,1682,110,2945,133,7804]},\"ActiveTimeUs\":{\"Count\":1,\"Sum\":126000,\"Max\":126000,\"Min\":126000}},\"Name\":\"RESULT\",\"Push\":{\"Rows\":{\"Count\":1,\"Sum\":1100,\"Max\":1100,\"Min\":1100},\"LastMessageMs\":{\"Count\":1,\"Sum\":132,\"Max\":132,\"Min\":132},\"Chunks\":{\"Count\":1,\"Sum\":1100,\"Max\":1100,\"Min\":1100},\"ResumeMessageMs\":{\"Count\":1,\"Sum\":129,\"Max\":129,\"Min\":129},\"FirstMessageMs\":{\"Count\":1,\"Sum\":6,\"Max\":6,\"Min\":6},\"ActiveMessageMs\":{\"Count\":1,\"Max\":132,\"Min\":6},\"PauseMessageMs\":{\"Count\":1,\"Sum\":6,\"Max\":6,\"Min\":6},\"ActiveTimeUs\":{\"Count\":1,\"Sum\":126000,\"Max\":126000,\"Min\":126000},\"WaitTimeUs\":{\"Count\":1,\"Sum\":122836,\"Max\":122836,\"Min\":122836,\"History\":[21,19821,43,40516,67,63607,90,86055,110,104387,133,122836]},\"WaitPeriods\":{\"Count\":1,\"Sum\":32,\"Max\":32,\"Min\":32},\"WaitMessageMs\":{\"Count\":1,\"Max\":129,\"Min\":6}}}],\"CpuTimeUs\":{\"Count\":1,\"Sum\":11810,\"Max\":11810,\"Min\":11810,\"History\":[1,404,21,1647,43,3476,67,4495,90,5002,110,7142,133,11810]},\"StageDurationUs\":129000,\"ResultRows\":{\"Count\":1,\"Sum\":1100,\"Max\":1100,\"Min\":1100},\"ResultBytes\":{\"Count\":1,\"Sum\":7804,\"Max\":7804,\"Min\":7804},\"OutputBytes\":{\"Count\":1,\"Sum\":7804,\"Max\":7804,\"Min\":7804},\"Input\":[{\"Pop\":{\"Chunks\":{\"Count\":1,\"Sum\":45,\"Max\":45,\"Min\":45},\"Rows\":{\"Count\":1,\"Sum\":1100,\"Max\":1100,\"Min\":1100},\"LastMessageMs\":{\"Count\":1,\"Sum\":131,\"Max\":131,\"Min\":131},\"ActiveMessageMs\":{\"Count\":1,\"Max\":131,\"Min\":6},\"FirstMessageMs\":{\"Count\":1,\"Sum\":6,\"Max\":6,\"Min\":6},\"Bytes\":{\"Count\":1,\"Sum\":8168,\"Max\":8168,\"Min\":8168,\"History\":[21,694,43,1267,67,1765,90,2014,110,3259,133,8168]},\"ActiveTimeUs\":{\"Count\":1,\"Sum\":125000,\"Max\":125000,\"Min\":125000}},\"Name\":\"2\",\"Push\":{\"Rows\":{\"Count\":1,\"Sum\":1100,\"Max\":1100,\"Min\":1100},\"LastMessageMs\":{\"Count\":1,\"Sum\":129,\"Max\":129,\"Min\":129},\"Chunks\":{\"Count\":1,\"Sum\":100,\"Max\":100,\"Min\":100},\"ResumeMessageMs\":{\"Count\":1,\"Sum\":129,\"Max\":129,\"Min\":129},\"FirstMessageMs\":{\"Count\":1,\"Sum\":4,\"Max\":4,\"Min\":4},\"ActiveMessageMs\":{\"Count\":1,\"Max\":129,\"Min\":4},\"Bytes\":{\"Count\":1,\"Sum\":8168,\"Max\":8168,\"Min\":8168,\"History\":[21,1690,43,3425,67,4906,90,5620,110,7089,133,8168]},\"PauseMessageMs\":{\"Count\":1,\"Sum\":4,\"Max\":4,\"Min\":4},\"ActiveTimeUs\":{\"Count\":1,\"Sum\":125000,\"Max\":125000,\"Min\":125000},\"WaitTimeUs\":{\"Count\":1,\"Sum\":29813,\"Max\":29813,\"Min\":29813,\"History\":[21,4795,43,9833,67,15473,90,21013,110,25457,133,29813]},\"WaitPeriods\":{\"Count\":1,\"Sum\":40,\"Max\":40,\"Min\":40},\"WaitMessageMs\":{\"Count\":1,\"Max\":129,\"Min\":4}}}],\"UpdateTimeMs\":133,\"InputRows\":{\"Count\":1,\"Sum\":1100,\"Max\":1100,\"Min\":1100},\"Tasks\":1}}],\"Node Type\":\"ResultSet\",\"PlanNodeType\":\"ResultSet\"}],\"Node Type\":\"Query\",\"Stats\":{\"Compilation\":{\"FromCache\":false,\"DurationUs\":173403,\"CpuTimeUs\":169676},\"ProcessCpuTimeUs\":300,\"TotalDurationUs\":457026,\"ResourcePoolId\":\"default\",\"QueuedTimeUs\":130201},\"PlanNodeType\":\"Query\"},\"meta\":{\"version\":\"0.2\",\"type\":\"query\"},\"SimplifiedPlan\":{\"PlanNodeId\":0,\"Plans\":[{\"PlanNodeId\":1,\"Plans\":[{\"PlanNodeId\":5,\"Operators\":[{\"Scan\":\"Parallel\",\"E-Size\":\"0\",\"ReadRanges\":[\"Key (-\342\210\236, +\342\210\236)\"],\"Reverse\":false,\"Name\":\"TableFullScan\",\"Path\":\"\\/Root\\/ManyShardsTable\",\"ReadRangesPointPrefixLen\":\"0\",\"E-Rows\":\"0\",\"Table\":\"ManyShardsTable\",\"ReadColumns\":[\"Data\",\"Key\"],\"E-Cost\":\"0\"}],\"Node Type\":\"TableFullScan\"}],\"Node Type\":\"ResultSet\",\"PlanNodeType\":\"ResultSet\"}],\"Node Type\":\"Query\",\"PlanNodeType\":\"Query\"}}" query_ast: "(\n(let $1 (KqpTable \'\"/Root/ManyShardsTable\" \'\"72057594046644480:2\" \'\"\" \'1))\n(let $2 (KqpRowsSourceSettings $1 \'(\'\"Data\" \'\"Key\") \'(\'(\'\"Sorted\")) (Void) \'()))\n(let $3 (StructType \'(\'\"Data\" (OptionalType (DataType \'Int32))) \'(\'\"Key\" (OptionalType (DataType \'Uint32)))))\n(let $4 \'(\'(\'\"_logical_id\" \'367) \'(\'\"_id\" \'\"1d5418c9-422fa7f5-cfa9b562-c27206ea\") \'(\'\"_wide_channels\" $3)))\n(let $5 (DqPhyStage \'((DqSource (DataSource \'\"KqpReadRangesSource\") $2)) (lambda \'($9) (block \'(\n (let $10 (lambda \'($11) (Member $11 \'\"Data\") (Member $11 \'\"Key\")))\n (return (FromFlow (ExpandMap (ToFlow $9) $10)))\n))) $4))\n(let $6 (DqCnMerge (TDqOutput $5 \'\"0\") \'(\'(\'1 \'\"Asc\"))))\n(let $7 (DqPhyStage \'($6) (lambda \'($12) (FromFlow (NarrowMap (ToFlow $12) (lambda \'($13 $14) (AsStruct \'(\'\"Data\" $13) \'(\'\"Key\" $14)))))) \'(\'(\'\"_logical_id\" \'379) \'(\'\"_id\" \'\"de1a813a-8810e52e-ae98115b-626c93c0\"))))\n(let $8 (DqCnResult (TDqOutput $7 \'\"0\") \'(\'\"Key\" \'\"Data\")))\n(return (KqpPhysicalQuery \'((KqpPhysicalTx \'($5 $7) \'($8) \'() \'(\'(\'\"type\" \'\"generic\")))) \'((KqpTxResultBinding (ListType $3) \'\"0\" \'\"0\")) \'(\'(\'\"type\" \'\"query\"))))\n)\n" total_duration_us: 457026 total_cpu_time_us: 303316 query_meta: "{\"query_database\":\"/Root\",\"query_parameter_types\":{},\"table_metadata\":[\"{\\\"DoesExist\\\":true,\\\"Cluster\\\":\\\"db\\\",\\\"Name\\\":\\\"/Root/ManyShardsTable\\\",\\\"SysView\\\":\\\"\\\",\\\"PathId\\\":{\\\"OwnerId\\\":72057594046644480,\\\"TableId\\\":2},\\\"SchemaVersion\\\":1,\\\"Kind\\\":1,\\\"Columns\\\":[{\\\"Name\\\":\\\"Data\\\",\\\"Id\\\":2,\\\"Type\\\":\\\"Int32\\\",\\\"TypeId\\\":1,\\\"NotNull\\\":false,\\\"DefaultFromSequence\\\":\\\"\\\",\\\"DefaultKind\\\":0,\\\"DefaultFromLiteral\\\":{},\\\"IsBuildInProgress\\\":false,\\\"DefaultFromSequencePathId\\\":{\\\"OwnerId\\\":18446744073709551615,\\\"TableId\\\":18446744073709551615}},{\\\"Name\\\":\\\"Key\\\",\\\"Id\\\":1,\\\"Type\\\":\\\"Uint32\\\",\\\"TypeId\\\":2,\\\"NotNull\\\":false,\\\"DefaultFromSequence\\\":\\\"\\\",\\\"DefaultKind\\\":0,\\\"DefaultFromLiteral\\\":{},\\\"IsBuildInProgress\\\":false,\\\"DefaultFromSequencePathId\\\":{\\\"OwnerId\\\":18446744073709551615,\\\"TableId\\\":18446744073709551615}}],\\\"KeyColunmNames\\\":[\\\"Key\\\"],\\\"RecordsCount\\\":0,\\\"DataSize\\\":0,\\\"StatsLoaded\\\":false}\"],\"table_meta_serialization_type\":2,\"created_at\":\"1750863690\",\"query_type\":\"QUERY_TYPE_SQL_GENERIC_CONCURRENT_QUERY\",\"query_syntax\":\"1\",\"query_cluster\":\"db\",\"query_id\":\"909cde24-7da19cae-4d17694b-8aeab7f5\",\"version\":\"1.0\"}" |92.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/cost/unittest >> KqpParams::Decimal-QueryService+UseSink [GOOD] >> KqpParams::Decimal+QueryService+UseSink >> KqpExplain::FullOuterJoin [GOOD] |92.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/cost/unittest >> BackupRestoreS3::TestAllPrimitiveTypes-DYNUMBER [GOOD] >> KqpCost::OlapPointLookup >> KqpCost::QuerySeviceRangeFullScan ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpExplain::FullOuterJoin [GOOD] Test command err: Trying to start YDB, gRPC: 30401, MsgBus: 3458 2025-06-25T15:00:59.833411Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519902159176136429:2081];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:00:59.837657Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0013fe/r3tmp/tmp4QZjha/pdisk_1.dat 2025-06-25T15:01:00.217899Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 30401, node 1 2025-06-25T15:01:00.265009Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:01:00.280367Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:01:00.286235Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:01:00.412018Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:01:00.412043Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:01:00.412049Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:01:00.412152Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3458 TClient is connected to server localhost:3458 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-25T15:01:00.854462Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:01:00.966332Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:01:00.993090Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:01.164434Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:01.312019Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:01.399812Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:03.200658Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902176356007203:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:03.200764Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:03.521820Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:03.551898Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:03.651702Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:03.705385Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:03.784225Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:03.840875Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:03.913030Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:03.992676Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902176356007876:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:03.992764Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:03.993015Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902176356007881:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:03.997060Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:01:04.011218Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519902176356007883:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T15:01:04.107254Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519902180650975230:3431] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:01:04.836440Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519902159176136429:2081];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:01:04.836504Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; {"Plan":{"Plans":[{"PlanNodeId":9,"Plans":[{"PlanNodeId":8,"Operators":[{"Inputs":[],"Iterator":"precompute_0_0","Name":"Iterator"}],"Node Type":"ConstantExpr","CTE Name":"precompute_0_0"}],"Node Type":"ResultSet_1","PlanNodeType":"ResultSet"},{"PlanNodeId":6,"Subplan Name":"CTE precompute_0_0","Plans":[{"PlanNodeId":5,"Plans":[{"PlanNodeId":4,"Plans":[{"Tables":["EightShard"],"PlanNodeId":3,"Plans":[{"PlanNodeId":2,"Plans":[{"Tables":["KeyValue"],"PlanNodeId":1,"Operators":[{"Scan":"Parallel","E-Size":"0","ReadRanges":["Key (-∞, +∞)"],"Name":"TableFullScan","Inputs":[],"Path":"\/Root\/KeyValue","E-Rows":"0","Table":"KeyValue","ReadColumns":["Key"],"E-Cost":"0"}],"Node Type":"TableFullScan"}],"Node Type":"Broadcast","Pl ... Grpc on GrpcPort 13374, node 5 2025-06-25T15:01:25.921051Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:01:25.921171Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:01:25.928856Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:01:25.958696Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:01:25.958722Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:01:25.958732Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:01:25.958880Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27563 TClient is connected to server localhost:27563 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:01:26.580168Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:01:26.595458Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-25T15:01:26.615798Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:26.693640Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:26.780961Z node 5 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:01:26.876996Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T15:01:26.953185Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:29.911871Z node 5 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [5:7519902284299064418:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:29.911969Z node 5 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:30.011433Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:30.094061Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:30.139629Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:30.187974Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:30.238921Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:30.289748Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:30.372862Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:30.463723Z node 5 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [5:7519902288594032376:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:30.463857Z node 5 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:30.464149Z node 5 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [5:7519902288594032381:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:30.473335Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:01:30.489594Z node 5 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [5:7519902288594032383:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T15:01:30.555509Z node 5 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [5:7519902288594032434:3421] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:01:30.772407Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[5:7519902267119193603:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:01:30.772478Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T15:01:31.883448Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:32.279427Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:32.342136Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715675:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) >> KqpCost::OltpWriteRow-isSink |92.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/cost/unittest >> KqpCost::ScanScriptingRangeFullScan-SourceRead [GOOD] >> KqpCost::ScanScriptingRangeFullScan+SourceRead [GOOD] >> KqpCost::IndexLookupJoin+StreamLookupJoin |92.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/cost/unittest >> EncryptedBackupParamsValidationTest::NoCommonDestination [GOOD] >> KqpCost::IndexLookupAndTake+useSink [GOOD] >> KqpCost::IndexLookup-useSink [GOOD] >> KqpCost::IndexLookup+useSink [GOOD] >> KqpCost::IndexLookupAtLeast8BytesInStorage-useSink [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/cost/unittest >> KqpCost::ScanScriptingRangeFullScan+SourceRead [GOOD] Test command err: Trying to start YDB, gRPC: 6606, MsgBus: 5459 2025-06-25T15:01:28.256941Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519902279808378561:2157];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:01:28.257004Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001685/r3tmp/tmpBHC9X7/pdisk_1.dat 2025-06-25T15:01:28.781437Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:01:28.781562Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:01:28.806826Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:01:28.822593Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 6606, node 1 2025-06-25T15:01:29.021485Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:01:29.021509Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:01:29.021525Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:01:29.021624Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T15:01:29.280231Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:5459 TClient is connected to server localhost:5459 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:01:30.040868Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:01:30.096152Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:30.305934Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T15:01:30.506212Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:30.624238Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:32.036185Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902296988249275:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:32.036268Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:32.505986Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:32.549007Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:32.588172Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:32.628116Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:32.671635Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:32.727787Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:32.795751Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:32.873414Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902296988249934:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:32.873498Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:32.873792Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902296988249939:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:32.879193Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:01:32.888960Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519902296988249941:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T15:01:32.947747Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519902296988249992:3417] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:01:33.259962Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519902279808378561:2157];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:01:33.260033Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T15:01:34.756857Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863694769, txId: 281474976710672] shutting down >> KqpCost::Range [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/cost/unittest >> KqpCost::ScanScriptingRangeFullScan-SourceRead [GOOD] Test command err: Trying to start YDB, gRPC: 1883, MsgBus: 64421 2025-06-25T15:01:28.258141Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519902281604485121:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:01:28.288957Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00167f/r3tmp/tmpJIBJzi/pdisk_1.dat 2025-06-25T15:01:28.758271Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519902281604485103:2080] 1750863688251267 != 1750863688251270 2025-06-25T15:01:28.764930Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:01:28.783323Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:01:28.783428Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:01:28.786017Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 1883, node 1 2025-06-25T15:01:29.020837Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:01:29.020859Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:01:29.020867Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:01:29.020972Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T15:01:29.300651Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:64421 TClient is connected to server localhost:64421 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:01:29.996458Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:01:30.037245Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T15:01:30.056620Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:30.253853Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T15:01:30.467878Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:30.542349Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:31.737275Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902294489388639:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:31.737394Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:32.504090Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:32.539160Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:32.623569Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:32.699993Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:32.740980Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:32.794037Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:32.895509Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:32.996262Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902298784356609:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:32.996366Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:32.996618Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902298784356614:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:33.000298Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:01:33.011488Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519902298784356616:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T15:01:33.069622Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519902303079323963:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:01:33.259767Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519902281604485121:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:01:33.259852Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T15:01:34.786791Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863694769, txId: 281474976710672] shutting down >> KqpCost::ScanQueryRangeFullScan-SourceRead [GOOD] >> KqpCost::OlapRange ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/backup_ut/unittest >> BackupRestoreS3::TestAllPrimitiveTypes-DYNUMBER [GOOD] Test command err: 2025-06-25T14:59:27.525139Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901760599952709:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:59:27.527839Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0014c2/r3tmp/tmptUHJUR/pdisk_1.dat 2025-06-25T14:59:27.908743Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:59:27.949329Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:59:27.949420Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:59:27.953528Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 3269, node 1 2025-06-25T14:59:28.025501Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:59:28.025537Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:59:28.025547Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:59:28.025682Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:64330 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:59:28.259088Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:59:28.540575Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:59:29.834530Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901769189888288:2300], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:59:29.834605Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:59:30.156147Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7519901760599952934:2143] Handle TEvProposeTransaction 2025-06-25T14:59:30.156181Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:7519901760599952934:2143] TxId# 281474976710658 ProcessProposeTransaction 2025-06-25T14:59:30.156242Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:7519901760599952934:2143] Cookie# 0 userReqId# "" txid# 281474976710658 SEND to# [1:7519901773484855623:2614] 2025-06-25T14:59:30.235128Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:7519901773484855623:2614] txid# 281474976710658 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateTable CreateTable { Name: "table" Columns { Name: "Key" Type: "Uint32" NotNull: false } Columns { Name: "Value" Type: "Utf8" NotNull: false } KeyColumnNames: "Key" PartitionConfig { PartitioningPolicy { MinPartitionsCount: 10 SplitByLoadSettings { Enabled: true } } } Temporary: false } } } UserToken: "" DatabaseName: "" 2025-06-25T14:59:30.235180Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:7519901773484855623:2614] txid# 281474976710658 Bootstrap, UserSID: CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-25T14:59:30.235528Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1660: Actor# [1:7519901773484855623:2614] txid# 281474976710658 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-06-25T14:59:30.235614Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:7519901773484855623:2614] txid# 281474976710658 TEvNavigateKeySet requested from SchemeCache 2025-06-25T14:59:30.235820Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:7519901773484855623:2614] txid# 281474976710658 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-25T14:59:30.235956Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:7519901773484855623:2614] HANDLE EvNavigateKeySetResult, txid# 281474976710658 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-25T14:59:30.236009Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:7519901773484855623:2614] txid# 281474976710658 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976710658 TabletId# 72057594046644480} 2025-06-25T14:59:30.236185Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:7519901773484855623:2614] txid# 281474976710658 HANDLE EvClientConnected 2025-06-25T14:59:30.237546Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:59:30.239258Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [1:7519901773484855623:2614] txid# 281474976710658 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976710658} 2025-06-25T14:59:30.239345Z node 1 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [1:7519901773484855623:2614] txid# 281474976710658 SEND to# [1:7519901773484855622:2303] Source {TEvProposeTransactionStatus txid# 281474976710658 Status# 53} 2025-06-25T14:59:30.363467Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7519901760599952934:2143] Handle TEvNavigate describe path /Root/table 2025-06-25T14:59:30.376886Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7519901773484855764:2727] HANDLE EvNavigateScheme /Root/table 2025-06-25T14:59:30.377226Z node 1 :TX_PROXY DEBUG: describe.cpp:356: Actor# [1:7519901773484855764:2727] HANDLE EvNavigateKeySetResult TDescribeReq marker# P5 ErrorCount# 0 2025-06-25T14:59:30.377341Z node 1 :TX_PROXY DEBUG: describe.cpp:435: Actor# [1:7519901773484855764:2727] SEND to# 72057594046644480 shardToRequest NKikimrSchemeOp.TDescribePath Path: "/Root/table" Options { ShowPrivateTable: false } 2025-06-25T14:59:30.378464Z node 1 :TX_PROXY DEBUG: describe.cpp:448: Actor# [1:7519901773484855764:2727] Handle TEvDescribeSchemeResult Forward to# [1:7519901773484855762:2309] Cookie: 0 TEvDescribeSchemeResult: NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 20 Record# Status: StatusSuccess Path: "/Root/table" PathDescription { Self { Name: "table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710658 CreateStep: 1750863570351 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "table" Columns { Name: "Key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "Key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 10 SplitByLoadSettings { Enabled: true } } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 ... s: DoExecute 2025-06-25T15:01:32.522723Z node 52 :IMPORT DEBUG: schemeshard_import__create.cpp:990: TImport::TTxProgress: OnSchemeResult: id# 281474976715665, itemIdx# 0, success# 1 2025-06-25T15:01:32.523351Z node 52 :IMPORT INFO: schemeshard_import__create.cpp:630: TImport::TTxProgress: Allocate txId: info# { Id: 281474976715665 Uid: '' Kind: S3 DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1] UserSID: '(empty maybe)' State: Waiting Issue: '' Items: 1 }, item# { Idx: 0 DstPathName: '/Root/DyNumberTable' DstPathId: State: CreateSchemeObject SubState: AllocateTxId WaitTxId: 0 Issue: '' } 2025-06-25T15:01:32.544991Z node 52 :IMPORT DEBUG: schemeshard_import__create.cpp:386: TImport::TTxProgress: DoComplete 2025-06-25T15:01:32.545151Z node 52 :IMPORT DEBUG: schemeshard_import__create.cpp:362: TImport::TTxProgress: DoExecute 2025-06-25T15:01:32.545176Z node 52 :IMPORT DEBUG: schemeshard_import__create.cpp:1219: TImport::TTxProgress: OnAllocateResult: txId# 281474976710760, id# 281474976715665 2025-06-25T15:01:32.545258Z node 52 :IMPORT INFO: schemeshard_import__create.cpp:420: TImport::TTxProgress: CreateTable propose: info# { Id: 281474976715665 Uid: '' Kind: S3 DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1] UserSID: '(empty maybe)' State: Waiting Issue: '' Items: 1 }, item# { Idx: 0 DstPathName: '/Root/DyNumberTable' DstPathId: State: CreateSchemeObject SubState: Proposed WaitTxId: 0 Issue: '' }, txId# 281474976710760 2025-06-25T15:01:32.545475Z node 52 :IMPORT DEBUG: schemeshard_import__create.cpp:386: TImport::TTxProgress: DoComplete 2025-06-25T15:01:32.547378Z node 52 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710760:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:32.552357Z node 52 :IMPORT DEBUG: schemeshard_import__create.cpp:362: TImport::TTxProgress: DoExecute 2025-06-25T15:01:32.552389Z node 52 :IMPORT DEBUG: schemeshard_import__create.cpp:1315: TImport::TTxProgress: OnModifyResult: txId# 281474976710760, status# StatusAccepted 2025-06-25T15:01:32.552573Z node 52 :IMPORT INFO: schemeshard_import__create.cpp:644: TImport::TTxProgress: Wait for completion: info# { Id: 281474976715665 Uid: '' Kind: S3 DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1] UserSID: '(empty maybe)' State: Waiting Issue: '' Items: 1 }, item# { Idx: 0 DstPathName: '/Root/DyNumberTable' DstPathId: [OwnerId: 72057594046644480, LocalPathId: 9] State: CreateSchemeObject SubState: Subscribed WaitTxId: 281474976710760 Issue: '' } 2025-06-25T15:01:32.562577Z node 52 :IMPORT DEBUG: schemeshard_import__create.cpp:386: TImport::TTxProgress: DoComplete 2025-06-25T15:01:32.657045Z node 52 :TX_PROXY DEBUG: rpc_operation_request_base.h:50: [GetImport] [52:7519902297302300778:2364] [0] Resolve database: name# /Root 2025-06-25T15:01:32.657685Z node 52 :TX_PROXY DEBUG: rpc_operation_request_base.h:66: [GetImport] [52:7519902297302300778:2364] [0] Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult: request# { ErrorCount: 0 DatabaseName: /Root DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root TableId: [72057594046644480:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-25T15:01:32.657718Z node 52 :TX_PROXY DEBUG: rpc_operation_request_base.h:106: [GetImport] [52:7519902297302300778:2364] [0] Send request: schemeShardId# 72057594046644480 2025-06-25T15:01:32.659867Z node 52 :TX_PROXY DEBUG: rpc_get_operation.cpp:220: [GetImport] [52:7519902297302300778:2364] [0] Handle TEvImport::TEvGetImportResponse: record# Entry { Id: 281474976715665 Status: SUCCESS Progress: PROGRESS_PREPARING ImportFromS3Settings { endpoint: "localhost:12696" scheme: HTTP bucket: "test_bucket" items { source_prefix: "DyNumberTable" destination_path: "/Root/DyNumberTable" } } StartTime { seconds: 1750863692 } } 2025-06-25T15:01:32.700165Z node 52 :IMPORT DEBUG: schemeshard_import__create.cpp:362: TImport::TTxProgress: DoExecute 2025-06-25T15:01:32.700208Z node 52 :IMPORT DEBUG: schemeshard_import__create.cpp:1473: TImport::TTxProgress: OnNotifyResult: txId# 281474976710760 2025-06-25T15:01:32.700353Z node 52 :IMPORT INFO: schemeshard_import__create.cpp:630: TImport::TTxProgress: Allocate txId: info# { Id: 281474976715665 Uid: '' Kind: S3 DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1] UserSID: '(empty maybe)' State: Waiting Issue: '' Items: 1 }, item# { Idx: 0 DstPathName: '/Root/DyNumberTable' DstPathId: [OwnerId: 72057594046644480, LocalPathId: 9] State: Transferring SubState: AllocateTxId WaitTxId: 0 Issue: '' } 2025-06-25T15:01:32.701994Z node 52 :IMPORT DEBUG: schemeshard_import__create.cpp:386: TImport::TTxProgress: DoComplete 2025-06-25T15:01:32.702130Z node 52 :IMPORT DEBUG: schemeshard_import__create.cpp:362: TImport::TTxProgress: DoExecute 2025-06-25T15:01:32.702153Z node 52 :IMPORT DEBUG: schemeshard_import__create.cpp:1219: TImport::TTxProgress: OnAllocateResult: txId# 281474976710761, id# 281474976715665 2025-06-25T15:01:32.702223Z node 52 :IMPORT INFO: schemeshard_import__create.cpp:521: TImport::TTxProgress: Restore propose: info# { Id: 281474976715665 Uid: '' Kind: S3 DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1] UserSID: '(empty maybe)' State: Waiting Issue: '' Items: 1 }, item# { Idx: 0 DstPathName: '/Root/DyNumberTable' DstPathId: [OwnerId: 72057594046644480, LocalPathId: 9] State: Transferring SubState: Proposed WaitTxId: 0 Issue: '' }, txId# 281474976710761 2025-06-25T15:01:32.703436Z node 52 :IMPORT DEBUG: schemeshard_import__create.cpp:386: TImport::TTxProgress: DoComplete 2025-06-25T15:01:32.704103Z node 52 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpRestore, opId: 281474976710761:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_backup_restore_common.h:563) 2025-06-25T15:01:32.709465Z node 52 :IMPORT DEBUG: schemeshard_import__create.cpp:362: TImport::TTxProgress: DoExecute 2025-06-25T15:01:32.709506Z node 52 :IMPORT DEBUG: schemeshard_import__create.cpp:1315: TImport::TTxProgress: OnModifyResult: txId# 281474976710761, status# StatusAccepted 2025-06-25T15:01:32.709631Z node 52 :IMPORT INFO: schemeshard_import__create.cpp:644: TImport::TTxProgress: Wait for completion: info# { Id: 281474976715665 Uid: '' Kind: S3 DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1] UserSID: '(empty maybe)' State: Waiting Issue: '' Items: 1 }, item# { Idx: 0 DstPathName: '/Root/DyNumberTable' DstPathId: [OwnerId: 72057594046644480, LocalPathId: 9] State: Transferring SubState: Subscribed WaitTxId: 281474976710761 Issue: '' } 2025-06-25T15:01:32.712385Z node 52 :IMPORT DEBUG: schemeshard_import__create.cpp:386: TImport::TTxProgress: DoComplete REQUEST: HEAD /test_bucket/DyNumberTable/data_00.csv HTTP/1.1 HEADERS: Host: localhost:12696 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: EA09DF83-CBFF-4FC2-A810-4584F45918EA amz-sdk-request: attempt=1 authorization: AWS4-HMAC-SHA256 Credential=test_key/20250625/us-east-1/s3/aws4_request, SignedHeaders=amz-sdk-invocation-id;amz-sdk-request;content-type;host;x-amz-api-version;x-amz-content-sha256;x-amz-date, Signature=95a1f12e38403efa3fa6342afee1a40c05e414f404f159cd094e3e413e34953a content-type: application/xml user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-api-version: 2006-03-01 x-amz-content-sha256: e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 x-amz-date: 20250625T150132Z S3_MOCK::HttpServeRead: /test_bucket/DyNumberTable/data_00.csv / 7 REQUEST: GET /test_bucket/DyNumberTable/data_00.csv HTTP/1.1 HEADERS: Host: localhost:12696 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: E916B5CE-6300-4AA2-8A56-9DD416E22110 amz-sdk-request: attempt=1 authorization: AWS4-HMAC-SHA256 Credential=test_key/20250625/us-east-1/s3/aws4_request, SignedHeaders=amz-sdk-invocation-id;amz-sdk-request;content-type;host;range;x-amz-api-version;x-amz-content-sha256;x-amz-date, Signature=de0a560c09ecc5f88faf683f5c9ec069c59b865ad75042072147ffec5049189e content-type: application/xml range: bytes=0-6 user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-api-version: 2006-03-01 x-amz-content-sha256: e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 x-amz-date: 20250625T150132Z S3_MOCK::HttpServeRead: /test_bucket/DyNumberTable/data_00.csv / 7 2025-06-25T15:01:32.823036Z node 52 :IMPORT DEBUG: schemeshard_import__create.cpp:362: TImport::TTxProgress: DoExecute 2025-06-25T15:01:32.823066Z node 52 :IMPORT DEBUG: schemeshard_import__create.cpp:1473: TImport::TTxProgress: OnNotifyResult: txId# 281474976710761 2025-06-25T15:01:32.831526Z node 52 :IMPORT DEBUG: schemeshard_import__create.cpp:386: TImport::TTxProgress: DoComplete 2025-06-25T15:01:33.078715Z node 52 :TX_PROXY DEBUG: rpc_operation_request_base.h:50: [GetImport] [52:7519902301597268187:2368] [0] Resolve database: name# /Root 2025-06-25T15:01:33.079396Z node 52 :TX_PROXY DEBUG: rpc_operation_request_base.h:66: [GetImport] [52:7519902301597268187:2368] [0] Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult: request# { ErrorCount: 0 DatabaseName: /Root DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root TableId: [72057594046644480:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-25T15:01:33.079436Z node 52 :TX_PROXY DEBUG: rpc_operation_request_base.h:106: [GetImport] [52:7519902301597268187:2368] [0] Send request: schemeShardId# 72057594046644480 2025-06-25T15:01:33.080497Z node 52 :TX_PROXY DEBUG: rpc_get_operation.cpp:220: [GetImport] [52:7519902301597268187:2368] [0] Handle TEvImport::TEvGetImportResponse: record# Entry { Id: 281474976715665 Status: SUCCESS Progress: PROGRESS_DONE ImportFromS3Settings { endpoint: "localhost:12696" scheme: HTTP bucket: "test_bucket" items { source_prefix: "DyNumberTable" destination_path: "/Root/DyNumberTable" } } StartTime { seconds: 1750863692 } EndTime { seconds: 1750863692 } } 2025-06-25T15:01:33.236331Z node 52 :TX_PROXY DEBUG: proxy_impl.cpp:353: actor# [52:7519902267237527851:2116] Handle TEvExecuteKqpTransaction 2025-06-25T15:01:33.236366Z node 52 :TX_PROXY DEBUG: proxy_impl.cpp:342: actor# [52:7519902267237527851:2116] TxId# 281474976715666 ProcessProposeKqpTransaction 2025-06-25T15:01:33.238661Z node 52 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715666. Ctx: { TraceId: 01jyksqh9161nnydtkdygszna6, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=52&id=ZjU0MTlhZWYtZGYyMWEzNTEtNjg0M2UwNTQtNDJkOTUzMDQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/cost/unittest >> KqpCost::IndexLookupAndTake+useSink [GOOD] Test command err: Trying to start YDB, gRPC: 12891, MsgBus: 9485 2025-06-25T15:01:28.331210Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519902283634416751:2236];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:01:28.331253Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001666/r3tmp/tmpRHOkz8/pdisk_1.dat 2025-06-25T15:01:28.767265Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:01:28.767355Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:01:28.775398Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:01:28.780456Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519902283634416530:2080] 1750863688261006 != 1750863688261009 2025-06-25T15:01:28.789584Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 12891, node 1 2025-06-25T15:01:29.016234Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:01:29.016259Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:01:29.016274Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:01:29.018044Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T15:01:29.312523Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:9485 TClient is connected to server localhost:9485 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:01:29.946242Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:01:29.993673Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T15:01:30.017493Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:30.204422Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:30.405138Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:30.487830Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:31.994509Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902296519320063:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:31.994623Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:32.502867Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:32.534933Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:32.568527Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:32.614745Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:32.665629Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:32.733870Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:32.817856Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:32.922262Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902300814288022:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:32.922308Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:32.926434Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902300814288027:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:32.932910Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:01:32.968531Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519902300814288029:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T15:01:33.073071Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519902305109255378:3421] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:01:33.331623Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519902283634416751:2236];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:01:33.331764Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T15:01:34.358938Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) /Root/SecondaryKeys/Index/indexImplTable 2 16 /Root/SecondaryKeys 1 8 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/cost/unittest >> KqpCost::IndexLookup-useSink [GOOD] Test command err: Trying to start YDB, gRPC: 30887, MsgBus: 9828 2025-06-25T15:01:28.264599Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519902282889343389:2223];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:01:28.264686Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001688/r3tmp/tmpPlIbQc/pdisk_1.dat 2025-06-25T15:01:28.782531Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:01:28.783209Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:01:28.783333Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:01:28.810635Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 30887, node 1 2025-06-25T15:01:29.015194Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:01:29.015220Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:01:29.015226Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:01:29.015368Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T15:01:29.268421Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:9828 TClient is connected to server localhost:9828 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:01:29.999469Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:01:30.047130Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:30.218651Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T15:01:30.404194Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:30.508544Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:31.878424Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902295774246728:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:31.878563Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:32.502821Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:32.532359Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:32.566251Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:32.626763Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:32.658355Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:32.707076Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:32.746301Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:32.881424Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902300069214686:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:32.881560Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:32.882208Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902300069214691:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:32.886148Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:01:32.901054Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519902300069214693:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T15:01:32.980460Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519902300069214744:3429] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:01:33.265992Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519902282889343389:2223];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:01:33.266066Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T15:01:34.426955Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) /Root/SecondaryKeys/Index/indexImplTable 1 8 /Root/SecondaryKeys 1 8 >> KqpCost::IndexLookupJoin-StreamLookupJoin ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/cost/unittest >> KqpCost::IndexLookup+useSink [GOOD] Test command err: Trying to start YDB, gRPC: 23608, MsgBus: 14770 2025-06-25T15:01:28.258597Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519902281270443085:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:01:28.260235Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001674/r3tmp/tmpNmy2sE/pdisk_1.dat 2025-06-25T15:01:28.760857Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519902281270443065:2080] 1750863688252664 != 1750863688252667 2025-06-25T15:01:28.772815Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:01:28.778941Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:01:28.779076Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:01:28.781826Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 23608, node 1 2025-06-25T15:01:29.016928Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:01:29.016951Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:01:29.016962Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:01:29.017087Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T15:01:29.291237Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:14770 TClient is connected to server localhost:14770 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:01:29.948106Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:01:29.992808Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T15:01:30.016216Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:30.208127Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:30.428302Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:30.513156Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:31.733636Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902294155346611:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:31.733769Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:32.504162Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:32.542393Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:32.583153Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:32.637276Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:32.679987Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:32.766136Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:32.863629Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:32.918800Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902298450314574:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:32.918867Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:32.920537Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902298450314579:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:32.927999Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:01:32.943664Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519902298450314581:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T15:01:33.024243Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519902302745281928:3427] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:01:33.257692Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519902281270443085:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:01:33.257804Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T15:01:34.331575Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) /Root/SecondaryKeys/Index/indexImplTable 1 8 /Root/SecondaryKeys 1 8 |92.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/cost/unittest >> EncryptedBackupParamsValidationTest::IncorrectKeyLengthExport ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/cost/unittest >> KqpCost::IndexLookupAtLeast8BytesInStorage-useSink [GOOD] Test command err: Trying to start YDB, gRPC: 29951, MsgBus: 18042 2025-06-25T15:01:28.929911Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519902283446095102:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:01:28.929981Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001662/r3tmp/tmpie2xwo/pdisk_1.dat 2025-06-25T15:01:29.457387Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:01:29.457474Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:01:29.458762Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:01:29.484964Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 29951, node 1 2025-06-25T15:01:29.668034Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:01:29.668056Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:01:29.668063Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:01:29.668231Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:18042 2025-06-25T15:01:29.936372Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:18042 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:01:30.283514Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:01:30.304443Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T15:01:30.322546Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:30.529712Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:30.713099Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:30.799084Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:32.550155Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902300625965868:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:32.550273Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:32.957895Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:33.007320Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:33.037081Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:33.072587Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:33.146792Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:33.196739Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:33.249262Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:33.320649Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902304920933827:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:33.320732Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:33.321025Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902304920933832:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:33.325313Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:01:33.337981Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519902304920933834:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T15:01:33.429635Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519902304920933885:3424] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:01:33.930068Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519902283446095102:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:01:33.930158Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T15:01:34.798633Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) /Root/SecondaryKeys/Index/indexImplTable 1 8 /Root/SecondaryKeys 1 8 >> BackupPathTest::ParallelBackupWholeDatabase [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/cost/unittest >> KqpCost::Range [GOOD] Test command err: Trying to start YDB, gRPC: 5677, MsgBus: 2308 2025-06-25T15:01:30.165403Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519902289333393363:2210];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:01:30.165796Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00163f/r3tmp/tmpTkyQaw/pdisk_1.dat 2025-06-25T15:01:30.740439Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519902289333393180:2080] 1750863690114885 != 1750863690114888 2025-06-25T15:01:30.744990Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:01:30.752900Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:01:30.753018Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:01:30.756859Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 5677, node 1 2025-06-25T15:01:30.896502Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:01:30.896524Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:01:30.896531Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:01:30.896660Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T15:01:31.164788Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:2308 TClient is connected to server localhost:2308 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:01:31.685182Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:01:31.704659Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T15:01:31.720285Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:31.877015Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:32.050981Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:32.179877Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:33.877188Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902302218296724:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:33.877291Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:34.183130Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:34.224732Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:34.293977Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:34.338013Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:34.367123Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:34.405600Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:34.433304Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:34.494413Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902306513264681:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:34.494473Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:34.494706Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902306513264686:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:34.502146Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:01:34.512834Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519902306513264688:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T15:01:34.588536Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519902306513264739:3424] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:01:35.142156Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519902289333393363:2210];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:01:35.142228Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/cost/unittest >> KqpCost::ScanQueryRangeFullScan-SourceRead [GOOD] Test command err: Trying to start YDB, gRPC: 61247, MsgBus: 2758 2025-06-25T15:01:30.008945Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519902290653480476:2224];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:01:30.008989Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001649/r3tmp/tmpY7uxt3/pdisk_1.dat 2025-06-25T15:01:30.636109Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:01:30.636205Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:01:30.643496Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:01:30.649743Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:01:30.654957Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519902286358512990:2080] 1750863689946634 != 1750863689946637 TServer::EnableGrpc on GrpcPort 61247, node 1 2025-06-25T15:01:30.812421Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:01:30.812459Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:01:30.812472Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:01:30.812596Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T15:01:31.016849Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:2758 TClient is connected to server localhost:2758 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:01:31.477402Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:01:31.504289Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T15:01:31.513722Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:31.683484Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:31.865733Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:31.952124Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:33.858512Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902303538383811:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:33.858637Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:34.175058Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:34.213187Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:34.291323Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:34.328560Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:34.366939Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:34.424259Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:34.500752Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:34.567018Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902307833351773:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:34.567086Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902307833351778:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:34.567099Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:34.570662Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:01:34.588560Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519902307833351780:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T15:01:34.648157Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519902307833351831:3424] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:01:35.011391Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519902290653480476:2224];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:01:35.011467Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T15:01:35.946193Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:645: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-25T15:01:35.993785Z node 1 :KQP_GATEWAY DEBUG: kqp_metadata_loader.cpp:886: Load table metadata from cache by path, request Path: /Root/Test 20 ... UTE DEBUG: log.cpp:784: fline=kqp_scan_compute_manager.h:184;event=stop_scanner; 2025-06-25T15:01:36.191654Z node 1 :KQP_COMPUTE DEBUG: log.cpp:784: fline=kqp_scan_compute_manager.h:42;event=scan_ack_on_finished;actor_id=[1:7519902316423286697:2050]; 2025-06-25T15:01:36.191667Z node 1 :KQP_COMPUTE DEBUG: kqp_scan_fetcher_actor.cpp:555: SelfId: [1:7519902316423286693:2477]. EVLOGKQP:0/0/3/3 2025-06-25T15:01:36.191685Z node 1 :KQP_COMPUTE DEBUG: log.cpp:784: fline=kqp_scan_compute_manager.h:383;event=scanner_finished;tablet_id=72075186224037914;stop_shard=1; 2025-06-25T15:01:36.191706Z node 1 :KQP_COMPUTE DEBUG: log.cpp:784: fline=kqp_scan_compute_manager.h:96;event=stop_scanner;actor_id=[1:7519902316423286697:2050];message=;final_flag=1; 2025-06-25T15:01:36.191794Z node 1 :KQP_COMPUTE DEBUG: kqp_scan_fetcher_actor.cpp:599: SelfId: [1:7519902316423286693:2477]. Scheduled table scans, in flight: 0 shards. pending shards to read: 0, pending resolve shards: 0, average read rows: 3, average read bytes: 0, 2025-06-25T15:01:36.191819Z node 1 :KQP_COMPUTE DEBUG: log.cpp:784: fline=kqp_scan_compute_manager.h:430;event=wait_all_scanner_finished;scans=0; 2025-06-25T15:01:36.191868Z node 1 :KQP_COMPUTE DEBUG: kqp_scan_fetcher_actor.cpp:699: SelfId: [1:7519902316423286693:2477]. EVLOGKQP(max_in_flight:1) InFlightScans:InFlightShards:;wScans=0;wShards=0; {SHARD(72075186224037914):CHUNKS=1;D=0.000000s;PacksCount=1;RowsCount=3;BytesCount=0;MinPackSize=3;MaxPackSize=3;CAVG=0.000000s;CMIN=0.000000s;CMAX=0.000000s;}; 2025-06-25T15:01:36.192068Z node 1 :KQP_COMPUTE DEBUG: log.h:466: kqp_scan_compute_actor.cpp:175 :TEvSendData: [1:7519902316423286693:2477]/[1:7519902316423286690:2475] 2025-06-25T15:01:36.192305Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:502: SelfId: [1:7519902316423286690:2475], TxId: 281474976710673, task: 1. Ctx: { TraceId : 01jyksqm29c88ctkzw7f2mnvxf. SessionId : ydb://session/3?node_id=1&id=M2QwYzg2ODctNWNmMWZkYjEtZTE0Njg1NjAtOGM4OTY5ZjI=. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Continue execution, either output buffers are not empty or not all channels are ready, hasDataToSend: 1, channelsReady: 1 2025-06-25T15:01:36.192340Z node 1 :KQP_COMPUTE DEBUG: log.h:466: kqp_scan_compute_actor.cpp:205 :TEvFetcherFinished: [1:7519902316423286693:2477] 2025-06-25T15:01:36.192352Z node 1 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [1:7519902316423286692:2476], TxId: 281474976710673, task: 2. Ctx: { TraceId : 01jyksqm29c88ctkzw7f2mnvxf. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=1&id=M2QwYzg2ODctNWNmMWZkYjEtZTE0Njg1NjAtOGM4OTY5ZjI=. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. CA StateFunc 271646923 2025-06-25T15:01:36.192384Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:670: TxId: 281474976710673, task: 1. Tasks execution finished, waiting for chunk delivery in output channelId: 1, seqNo: [1] 2025-06-25T15:01:36.192406Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:163: TxId: 281474976710673, task: 2. Finish input channelId: 1, from: [1:7519902316423286690:2475] 2025-06-25T15:01:36.192474Z node 1 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [1:7519902316423286692:2476], TxId: 281474976710673, task: 2. Ctx: { TraceId : 01jyksqm29c88ctkzw7f2mnvxf. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=1&id=M2QwYzg2ODctNWNmMWZkYjEtZTE0Njg1NjAtOGM4OTY5ZjI=. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. CA StateFunc 271646922 2025-06-25T15:01:36.192814Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:502: SelfId: [1:7519902316423286692:2476], TxId: 281474976710673, task: 2. Ctx: { TraceId : 01jyksqm29c88ctkzw7f2mnvxf. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=1&id=M2QwYzg2ODctNWNmMWZkYjEtZTE0Njg1NjAtOGM4OTY5ZjI=. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Continue execution, either output buffers are not empty or not all channels are ready, hasDataToSend: 1, channelsReady: 1 2025-06-25T15:01:36.192884Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:675: TxId: 281474976710673, task: 1. Tasks execution finished 2025-06-25T15:01:36.192900Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:510: SelfId: [1:7519902316423286690:2475], TxId: 281474976710673, task: 1. Ctx: { TraceId : 01jyksqm29c88ctkzw7f2mnvxf. SessionId : ydb://session/3?node_id=1&id=M2QwYzg2ODctNWNmMWZkYjEtZTE0Njg1NjAtOGM4OTY5ZjI=. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Compute state finished. All channels and sinks finished 2025-06-25T15:01:36.193017Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:494: TxId: 281474976710673, task: 1. pass away 2025-06-25T15:01:36.193105Z node 1 :KQP_COMPUTE DEBUG: log.cpp:784: fline=kqp_compute_actor_factory.cpp:67;problem=finish_compute_actor;tx_id=281474976710673;task_id=1;success=1;message={
: Error: COMPUTE_STATE_FINISHED }; 2025-06-25T15:01:36.193213Z node 1 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:357: ActorId: [1:7519902316423286686:2466] TxId: 281474976710673. Ctx: { TraceId: 01jyksqm29c88ctkzw7f2mnvxf, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=M2QwYzg2ODctNWNmMWZkYjEtZTE0Njg1NjAtOGM4OTY5ZjI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Send TEvStreamData to [1:7519902312128319361:2466], seqNo: 1, nRows: 1 2025-06-25T15:01:36.193355Z node 1 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:442: ActorId: [1:7519902316423286686:2466] TxId: 281474976710673. Ctx: { TraceId: 01jyksqm29c88ctkzw7f2mnvxf, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=M2QwYzg2ODctNWNmMWZkYjEtZTE0Njg1NjAtOGM4OTY5ZjI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [1:7519902316423286690:2475], task: 1, state: COMPUTE_STATE_FINISHED, stats: { CpuTimeUs: 2929 Tasks { TaskId: 1 CpuTimeUs: 1007 FinishTimeMs: 1750863696192 OutputRows: 1 OutputBytes: 19 Tables { TablePath: "/Root/Test" ReadRows: 3 ReadBytes: 96 } ComputeCpuTimeUs: 134 BuildCpuTimeUs: 873 Sources { IngressName: "CS" Ingress { } } HostName: "ghrun-kqfvx6aroe" NodeId: 1 StartTimeMs: 1750863696192 CreateTimeMs: 1750863696178 UpdateTimeMs: 1750863696192 } MaxMemoryUsage: 1048576 } 2025-06-25T15:01:36.193415Z node 1 :KQP_EXECUTER INFO: kqp_planner.cpp:697: TxId: 281474976710673. Ctx: { TraceId: 01jyksqm29c88ctkzw7f2mnvxf, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=M2QwYzg2ODctNWNmMWZkYjEtZTE0Njg1NjAtOGM4OTY5ZjI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Compute actor has finished execution: [1:7519902316423286690:2475] 2025-06-25T15:01:36.193436Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:404: TxId: 281474976710673, taskId: 1. Released resources, Memory: 0, Free Tier: 1048576, ExecutionUnits: 1. 2025-06-25T15:01:36.193460Z node 1 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:668: ActorId: [1:7519902316423286686:2466] TxId: 281474976710673. Ctx: { TraceId: 01jyksqm29c88ctkzw7f2mnvxf, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=M2QwYzg2ODctNWNmMWZkYjEtZTE0Njg1NjAtOGM4OTY5ZjI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Waiting for: CA [1:7519902316423286692:2476], 2025-06-25T15:01:36.193506Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:893: Schedule publish at 2025-06-25T15:01:38.110623Z, after 1.918594s 2025-06-25T15:01:36.195304Z node 1 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:423: TxId: 281474976710673, send ack to channelId: 2, seqNo: 1, enough: 0, freeSpace: 8388470, to: [1:7519902316423286694:2476] 2025-06-25T15:01:36.195357Z node 1 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [1:7519902316423286692:2476], TxId: 281474976710673, task: 2. Ctx: { TraceId : 01jyksqm29c88ctkzw7f2mnvxf. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=1&id=M2QwYzg2ODctNWNmMWZkYjEtZTE0Njg1NjAtOGM4OTY5ZjI=. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. CA StateFunc 271646922 2025-06-25T15:01:36.195406Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976710673, task: 2. Tasks execution finished, don't wait for ack delivery in input channelId: 1, seqNo: [1] 2025-06-25T15:01:36.195415Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:675: TxId: 281474976710673, task: 2. Tasks execution finished 2025-06-25T15:01:36.195431Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:510: SelfId: [1:7519902316423286692:2476], TxId: 281474976710673, task: 2. Ctx: { TraceId : 01jyksqm29c88ctkzw7f2mnvxf. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=1&id=M2QwYzg2ODctNWNmMWZkYjEtZTE0Njg1NjAtOGM4OTY5ZjI=. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Compute state finished. All channels and sinks finished 2025-06-25T15:01:36.195507Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:494: TxId: 281474976710673, task: 2. pass away 2025-06-25T15:01:36.195607Z node 1 :KQP_COMPUTE DEBUG: log.cpp:784: fline=kqp_compute_actor_factory.cpp:67;problem=finish_compute_actor;tx_id=281474976710673;task_id=2;success=1;message={
: Error: COMPUTE_STATE_FINISHED }; 2025-06-25T15:01:36.195793Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:404: TxId: 281474976710673, taskId: 2. Released resources, Memory: 0, Free Tier: 1048576, ExecutionUnits: 1. 2025-06-25T15:01:36.195918Z node 1 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:442: ActorId: [1:7519902316423286686:2466] TxId: 281474976710673. Ctx: { TraceId: 01jyksqm29c88ctkzw7f2mnvxf, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=M2QwYzg2ODctNWNmMWZkYjEtZTE0Njg1NjAtOGM4OTY5ZjI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [1:7519902316423286692:2476], task: 2, state: COMPUTE_STATE_FINISHED, stats: { CpuTimeUs: 10628 Tasks { TaskId: 2 StageId: 1 CpuTimeUs: 1662 FinishTimeMs: 1750863696195 InputRows: 1 InputBytes: 19 OutputRows: 1 OutputBytes: 19 ResultRows: 1 ResultBytes: 19 ComputeCpuTimeUs: 982 BuildCpuTimeUs: 680 HostName: "ghrun-kqfvx6aroe" NodeId: 1 CreateTimeMs: 1750863696179 UpdateTimeMs: 1750863696195 } MaxMemoryUsage: 1048576 } 2025-06-25T15:01:36.195963Z node 1 :KQP_EXECUTER INFO: kqp_planner.cpp:697: TxId: 281474976710673. Ctx: { TraceId: 01jyksqm29c88ctkzw7f2mnvxf, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=M2QwYzg2ODctNWNmMWZkYjEtZTE0Njg1NjAtOGM4OTY5ZjI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Compute actor has finished execution: [1:7519902316423286692:2476] 2025-06-25T15:01:36.196077Z node 1 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:2188: ActorId: [1:7519902316423286686:2466] TxId: 281474976710673. Ctx: { TraceId: 01jyksqm29c88ctkzw7f2mnvxf, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=M2QwYzg2ODctNWNmMWZkYjEtZTE0Njg1NjAtOGM4OTY5ZjI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. terminate execution. 2025-06-25T15:01:36.196119Z node 1 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:862: ActorId: [1:7519902316423286686:2466] TxId: 281474976710673. Ctx: { TraceId: 01jyksqm29c88ctkzw7f2mnvxf, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=M2QwYzg2ODctNWNmMWZkYjEtZTE0Njg1NjAtOGM4OTY5ZjI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Resource usage for last stat interval: ComputeTime: 0.013557s ReadRows: 3 ReadBytes: 96 ru: 9 rate limiter was not found force flag: 1 2025-06-25T15:01:36.197055Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863696218, txId: 281474976710672] shutting down >> KqpCost::IndexLookupAndTake-useSink [GOOD] >> KqpCost::PointLookup [GOOD] >> KqpCost::AAARangeFullScan >> KqpQuery::TableSinkWithSubquery [GOOD] >> KqpCost::IndexLookupAtLeast8BytesInStorage+useSink [GOOD] >> KqpExplain::Predicates [GOOD] >> KqpLimits::TooBigColumn+useSink [GOOD] >> KqpLimits::ReplySizeExceeded >> KqpCost::ScanQueryRangeFullScan+SourceRead [GOOD] |92.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/cost/unittest >> BackupPathTest::ChecksumsForSchemaMappingFiles ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/cost/unittest >> KqpCost::PointLookup [GOOD] Test command err: Trying to start YDB, gRPC: 30114, MsgBus: 63432 2025-06-25T15:01:31.915411Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519902292740910323:2200];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:01:31.936140Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001617/r3tmp/tmp6bTxff/pdisk_1.dat 2025-06-25T15:01:32.440258Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:01:32.444464Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519902292740910151:2080] 1750863691856139 != 1750863691856142 TServer::EnableGrpc on GrpcPort 30114, node 1 2025-06-25T15:01:32.456776Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:01:32.456893Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:01:32.460409Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:01:32.743654Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:01:32.743675Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:01:32.743681Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:01:32.743779Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T15:01:32.855564Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:63432 TClient is connected to server localhost:63432 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:01:33.502856Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:01:33.529038Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T15:01:33.538755Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:33.711127Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:33.987485Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:34.097757Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:35.580648Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902309920780980:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:35.580755Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:35.866992Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:35.950088Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:36.005196Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:36.043125Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:36.092029Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:36.169998Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:36.211954Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:36.309064Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902314215748938:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:36.309143Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:36.309576Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902314215748943:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:36.313812Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:01:36.364350Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519902314215748945:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T15:01:36.452611Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519902314215748996:3424] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:01:36.908425Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519902292740910323:2200];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:01:36.908501Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/cost/unittest >> KqpCost::IndexLookupAndTake-useSink [GOOD] Test command err: Trying to start YDB, gRPC: 26279, MsgBus: 8604 2025-06-25T15:01:31.562092Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519902292708135344:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:01:31.562155Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001636/r3tmp/tmpub52Dm/pdisk_1.dat 2025-06-25T15:01:32.046136Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:01:32.046271Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:01:32.049001Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:01:32.052254Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 26279, node 1 2025-06-25T15:01:32.216968Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:01:32.216995Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:01:32.217018Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:01:32.217157Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8604 2025-06-25T15:01:32.581747Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:8604 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:01:32.941595Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:01:32.973231Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T15:01:32.987260Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:33.182825Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T15:01:33.387826Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:33.481416Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:34.924066Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902305593038824:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:34.924144Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:35.211839Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:35.255788Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:35.324416Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:35.371497Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:35.421308Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:35.458482Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:35.498408Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:35.563488Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902309888006780:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:35.563572Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:35.563701Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902309888006785:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:35.566409Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:01:35.575328Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519902309888006787:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T15:01:35.632099Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519902309888006838:3419] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:01:36.563879Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519902292708135344:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:01:36.563946Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T15:01:36.739526Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) /Root/SecondaryKeys/Index/indexImplTable 2 16 /Root/SecondaryKeys 1 8 >> KqpCost::OlapRangeFullScan [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/cost/unittest >> KqpCost::IndexLookupAtLeast8BytesInStorage+useSink [GOOD] Test command err: Trying to start YDB, gRPC: 15465, MsgBus: 29259 2025-06-25T15:01:31.800832Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519902293802420346:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:01:31.807858Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001614/r3tmp/tmp3LdCaf/pdisk_1.dat 2025-06-25T15:01:32.329853Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519902293802420315:2080] 1750863691796492 != 1750863691796495 2025-06-25T15:01:32.338178Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:01:32.340404Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:01:32.340498Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:01:32.342597Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 15465, node 1 2025-06-25T15:01:32.456269Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:01:32.456297Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:01:32.456322Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:01:32.456413Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:29259 2025-06-25T15:01:32.803008Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:29259 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:01:33.093651Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:01:33.129273Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T15:01:33.148224Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:33.387543Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:33.592446Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:33.696198Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:35.377109Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902310982291136:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:35.377205Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:35.734376Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:35.766862Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:35.800577Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:35.873068Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:35.903379Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:35.939192Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:36.011202Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:36.113472Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902315277259095:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:36.113552Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:36.113778Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902315277259100:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:36.117681Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:01:36.130638Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519902315277259102:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T15:01:36.213047Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519902315277259153:3423] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:01:36.800927Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519902293802420346:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:01:36.800991Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T15:01:37.248154Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) /Root/SecondaryKeys/Index/indexImplTable 1 8 /Root/SecondaryKeys 1 8 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpQuery::TableSinkWithSubquery [GOOD] Test command err: Trying to start YDB, gRPC: 19587, MsgBus: 20229 2025-06-25T15:01:12.275837Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519902213217861162:2223];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:01:12.276101Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0013c6/r3tmp/tmp9BYyta/pdisk_1.dat 2025-06-25T15:01:12.705150Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:01:12.709574Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519902213217860977:2080] 1750863672233915 != 1750863672233918 TServer::EnableGrpc on GrpcPort 19587, node 1 2025-06-25T15:01:12.746989Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:01:12.747098Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:01:12.760302Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:01:12.867682Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:01:12.867704Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:01:12.867711Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:01:12.867812Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:20229 2025-06-25T15:01:13.263293Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:20229 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:01:13.580113Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:01:13.606808Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:13.745275Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:13.922641Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:13.989345Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:15.783222Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902226102764498:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:15.783336Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:16.116839Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:16.197688Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:16.299485Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:16.332947Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:16.373196Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:16.452503Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:16.531352Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:16.672761Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902230397732459:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:16.672832Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:16.673273Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902230397732464:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:16.677420Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:01:16.689230Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519902230397732466:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T15:01:16.784225Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519902230397732519:3421] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:01:17.268476Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519902213217861162:2223];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:01:17.268543Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 29104, MsgBus: 25779 2025-06-25T15:01:19.061083Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519902241309233121:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:01:19.061132Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPa ... ecutor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:01:28.968941Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519902283991040041:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:28.969038Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:28.999705Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:29.219436Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:29.480092Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519902288286008670:2401], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:29.480185Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:29.480518Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7519902288286008675:2404], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:29.485699Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715660:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:01:29.503023Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7519902288286008677:2405], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715660 completed, doublechecking } 2025-06-25T15:01:29.599007Z node 3 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [3:7519902288286008728:3197] txid# 281474976715661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:01:30.545040Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7519902271106137624:2142];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:01:30.545128Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 21639, MsgBus: 2712 2025-06-25T15:01:33.195933Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519902301926039211:2148];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:01:33.262965Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0013c6/r3tmp/tmpKrxNtI/pdisk_1.dat 2025-06-25T15:01:33.401669Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7519902301926039089:2080] 1750863693176019 != 1750863693176022 2025-06-25T15:01:33.402108Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:01:33.423930Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:01:33.423997Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:01:33.425117Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 21639, node 4 2025-06-25T15:01:33.494877Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:01:33.494900Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:01:33.494910Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:01:33.495034Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2712 TClient is connected to server localhost:2712 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:01:34.122628Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:01:34.132665Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T15:01:34.197916Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:01:37.137885Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519902319105908910:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:37.137975Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:37.166598Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:37.250877Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:37.372121Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519902319105909089:2310], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:37.372230Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:37.372560Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519902319105909094:2313], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:37.377253Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715660:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:01:37.389923Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519902319105909096:2314], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715660 completed, doublechecking } 2025-06-25T15:01:37.479165Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519902319105909147:2440] txid# 281474976715661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:01:38.024162Z node 4 :TX_DATASHARD WARN: datashard__read_iterator.cpp:3439: 72075186224037889 Cancelled read: {[4:7519902323400876530:2338], 0} 2025-06-25T15:01:38.197359Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519902301926039211:2148];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:01:38.197618Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> KqpQuery::OlapCreateAsSelect_Complex [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpExplain::Predicates [GOOD] Test command err: Trying to start YDB, gRPC: 27312, MsgBus: 8651 2025-06-25T15:01:05.133866Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519902182674856166:2151];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:01:05.283029Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0013f7/r3tmp/tmpFpbIUe/pdisk_1.dat 2025-06-25T15:01:05.448698Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:01:05.448801Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:01:05.450425Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:01:05.482021Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:01:05.482620Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519902182674856053:2080] 1750863665092490 != 1750863665092493 TServer::EnableGrpc on GrpcPort 27312, node 1 2025-06-25T15:01:05.541871Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:01:05.541891Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:01:05.541914Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:01:05.542031Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8651 TClient is connected to server localhost:8651 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:01:06.060304Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:01:06.073543Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T15:01:06.083359Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:06.146731Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... 2025-06-25T15:01:06.231518Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:06.389599Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T15:01:06.472755Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:08.159612Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902195559759582:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:08.159941Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:08.430304Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:08.470693Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:08.507738Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:08.537740Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:08.575422Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:08.649517Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:08.676925Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:08.775965Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902195559760240:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:08.776048Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:08.776283Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902195559760245:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:08.779872Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:01:08.789488Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519902195559760247:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T15:01:08.888243Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519902195559760298:3415] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:01:10.121869Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519902182674856166:2151];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:01:10.121936Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; {"Plan":{"Plans":[{"PlanNodeId":4,"Plans":[{"PlanNodeId":3,"Plans":[{"PlanNodeId":2,"Plans":[{"Tables":["EightShard"],"PlanNodeId":1,"Operators":[{"Inputs":[{"InternalOperatorId":1}],"Name":"TopSort","Limit":"4","TopSortBy":"row.Data"},{"Scan":"Parallel","ReadRange":["Key [150, 266]"],"E-Size":"0","Name":"TableRangeScan","Inputs":[],"P ... tSet"}],"Node Type":"Query","OptimizerStats":{"EquiJoinsCount":0,"JoinsCount":0},"PlanNodeType":"Query"}} Trying to start YDB, gRPC: 23001, MsgBus: 27551 2025-06-25T15:01:30.954412Z node 5 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[5:7519902291587338730:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:01:30.954503Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0013f7/r3tmp/tmpoBXShR/pdisk_1.dat 2025-06-25T15:01:31.159497Z node 5 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:01:31.161822Z node 5 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [5:7519902291587338710:2080] 1750863690952598 != 1750863690952601 2025-06-25T15:01:31.167169Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:01:31.167516Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:01:31.177260Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 23001, node 5 2025-06-25T15:01:31.260913Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:01:31.260939Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:01:31.260950Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:01:31.261099Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27551 2025-06-25T15:01:32.008546Z node 5 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:27551 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:01:32.153613Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:01:32.171754Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:32.260957Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:32.472703Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:32.632299Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:35.125585Z node 5 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [5:7519902313062176814:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:35.125695Z node 5 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:35.213311Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:35.258924Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:35.347152Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:35.392260Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:35.439735Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:35.504933Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:35.583419Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:35.659462Z node 5 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [5:7519902313062177475:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:35.659551Z node 5 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:35.660050Z node 5 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [5:7519902313062177480:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:35.665458Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:01:35.678859Z node 5 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [5:7519902313062177482:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T15:01:35.775945Z node 5 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [5:7519902313062177533:3418] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:01:35.954977Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[5:7519902291587338730:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:01:35.955035Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T15:01:37.295752Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/cost/unittest >> KqpCost::ScanQueryRangeFullScan+SourceRead [GOOD] Test command err: Trying to start YDB, gRPC: 20833, MsgBus: 27495 2025-06-25T15:01:32.748420Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519902299991738955:2144];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:01:32.748622Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00160e/r3tmp/tmpQeLt4B/pdisk_1.dat 2025-06-25T15:01:33.139368Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:01:33.142552Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519902299991738823:2080] 1750863692710434 != 1750863692710437 TServer::EnableGrpc on GrpcPort 20833, node 1 2025-06-25T15:01:33.169576Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:01:33.169700Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:01:33.192384Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:01:33.424892Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:01:33.424922Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:01:33.424929Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:01:33.425050Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T15:01:33.752856Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:27495 TClient is connected to server localhost:27495 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:01:34.263393Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:01:34.307629Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:34.464236Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:34.620658Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:34.704025Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:36.401683Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902317171609648:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:36.401803Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:36.779690Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:36.813344Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:36.844477Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:36.874257Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:36.909775Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:36.982250Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:37.055275Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:37.112011Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902321466577615:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:37.112113Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:37.112425Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902321466577620:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:37.116562Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:01:37.132304Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519902321466577622:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T15:01:37.204216Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519902321466577673:3421] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:01:37.744750Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519902299991738955:2144];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:01:37.744810Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T15:01:38.510081Z node 1 :KQP_GATEWAY DEBUG: kqp_metadata_loader.cpp:886: Load table metadata from cache by path, request Path: /Root/Test 2025-06-25T15:01:38.668588Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_snapshot_manager.cpp:37: Start KqpSnapshotManager at [1:7519902325761545216:2466] 2025-06-25T15:01:38.668625Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_snapshot_manager.cpp:58: KqpSnapshotManager: got snapshot request from [1:7519902325761545202:2466] 2025-06-25T15:01:38.675187Z node 1 :KQP_RESOURCE_MANAGER ... 34:2473], TxId: 281474976715673, task: 1, CA Id [1:7519902325761545231:2473]. returned async data processed rows 3 left freeSpace 8388548 received rows 3 running reads 0 pending shards 0 finished = 1 has limit 0 limit reached 0 2025-06-25T15:01:38.686664Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:502: SelfId: [1:7519902325761545231:2473], TxId: 281474976715673, task: 1. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=1&id=ZmRmN2FkZDQtYzJlNmY1YzYtZDU0YTAzNTgtMmYwNzQ2MDc=. TraceId : 01jyksqpgyadea33x7b90wm095. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Continue execution, either output buffers are not empty or not all channels are ready, hasDataToSend: 1, channelsReady: 1 2025-06-25T15:01:38.686695Z node 1 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [1:7519902325761545231:2473], TxId: 281474976715673, task: 1. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=1&id=ZmRmN2FkZDQtYzJlNmY1YzYtZDU0YTAzNTgtMmYwNzQ2MDc=. TraceId : 01jyksqpgyadea33x7b90wm095. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. CA StateFunc 271646922 2025-06-25T15:01:38.686746Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:670: TxId: 281474976715673, task: 1. Tasks execution finished, waiting for chunk delivery in output channelId: 1, seqNo: [1] 2025-06-25T15:01:38.686765Z node 1 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [1:7519902325761545232:2474], TxId: 281474976715673, task: 2. Ctx: { SessionId : ydb://session/3?node_id=1&id=ZmRmN2FkZDQtYzJlNmY1YzYtZDU0YTAzNTgtMmYwNzQ2MDc=. CustomerSuppliedId : . TraceId : 01jyksqpgyadea33x7b90wm095. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. CA StateFunc 271646923 2025-06-25T15:01:38.686798Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:163: TxId: 281474976715673, task: 2. Finish input channelId: 1, from: [1:7519902325761545231:2473] 2025-06-25T15:01:38.686835Z node 1 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [1:7519902325761545232:2474], TxId: 281474976715673, task: 2. Ctx: { SessionId : ydb://session/3?node_id=1&id=ZmRmN2FkZDQtYzJlNmY1YzYtZDU0YTAzNTgtMmYwNzQ2MDc=. CustomerSuppliedId : . TraceId : 01jyksqpgyadea33x7b90wm095. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. CA StateFunc 271646922 2025-06-25T15:01:38.686902Z node 1 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [1:7519902325761545231:2473], TxId: 281474976715673, task: 1. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=1&id=ZmRmN2FkZDQtYzJlNmY1YzYtZDU0YTAzNTgtMmYwNzQ2MDc=. TraceId : 01jyksqpgyadea33x7b90wm095. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. CA StateFunc 271646927 2025-06-25T15:01:38.686961Z node 1 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [1:7519902325761545231:2473], TxId: 281474976715673, task: 1. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=1&id=ZmRmN2FkZDQtYzJlNmY1YzYtZDU0YTAzNTgtMmYwNzQ2MDc=. TraceId : 01jyksqpgyadea33x7b90wm095. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. CA StateFunc 271646922 2025-06-25T15:01:38.686986Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:675: TxId: 281474976715673, task: 1. Tasks execution finished 2025-06-25T15:01:38.686999Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:510: SelfId: [1:7519902325761545231:2473], TxId: 281474976715673, task: 1. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=1&id=ZmRmN2FkZDQtYzJlNmY1YzYtZDU0YTAzNTgtMmYwNzQ2MDc=. TraceId : 01jyksqpgyadea33x7b90wm095. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Compute state finished. All channels and sinks finished 2025-06-25T15:01:38.687064Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:502: SelfId: [1:7519902325761545232:2474], TxId: 281474976715673, task: 2. Ctx: { SessionId : ydb://session/3?node_id=1&id=ZmRmN2FkZDQtYzJlNmY1YzYtZDU0YTAzNTgtMmYwNzQ2MDc=. CustomerSuppliedId : . TraceId : 01jyksqpgyadea33x7b90wm095. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Continue execution, either output buffers are not empty or not all channels are ready, hasDataToSend: 1, channelsReady: 1 2025-06-25T15:01:38.687108Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:494: TxId: 281474976715673, task: 1. pass away 2025-06-25T15:01:38.687246Z node 1 :KQP_COMPUTE DEBUG: log.cpp:784: fline=kqp_compute_actor_factory.cpp:67;problem=finish_compute_actor;tx_id=281474976715673;task_id=1;success=1;message={
: Error: COMPUTE_STATE_FINISHED }; 2025-06-25T15:01:38.687258Z node 1 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:357: ActorId: [1:7519902325761545227:2466] TxId: 281474976715673. Ctx: { TraceId: 01jyksqpgyadea33x7b90wm095, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZmRmN2FkZDQtYzJlNmY1YzYtZDU0YTAzNTgtMmYwNzQ2MDc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Send TEvStreamData to [1:7519902325761545202:2466], seqNo: 1, nRows: 1 2025-06-25T15:01:38.687374Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:404: TxId: 281474976715673, taskId: 1. Released resources, Memory: 0, Free Tier: 1048576, ExecutionUnits: 1. 2025-06-25T15:01:38.687401Z node 1 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:442: ActorId: [1:7519902325761545227:2466] TxId: 281474976715673. Ctx: { TraceId: 01jyksqpgyadea33x7b90wm095, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZmRmN2FkZDQtYzJlNmY1YzYtZDU0YTAzNTgtMmYwNzQ2MDc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [1:7519902325761545231:2473], task: 1, state: COMPUTE_STATE_FINISHED, stats: { CpuTimeUs: 8720 Tasks { TaskId: 1 CpuTimeUs: 1034 FinishTimeMs: 1750863698686 OutputRows: 1 OutputBytes: 19 Tables { TablePath: "/Root/Test" ReadRows: 1 ReadBytes: 20 AffectedPartitions: 1 } IngressRows: 3 ComputeCpuTimeUs: 175 BuildCpuTimeUs: 859 HostName: "ghrun-kqfvx6aroe" NodeId: 1 StartTimeMs: 1750863698686 CreateTimeMs: 1750863698677 UpdateTimeMs: 1750863698687 } MaxMemoryUsage: 1048576 } 2025-06-25T15:01:38.687460Z node 1 :KQP_EXECUTER INFO: kqp_planner.cpp:697: TxId: 281474976715673. Ctx: { TraceId: 01jyksqpgyadea33x7b90wm095, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZmRmN2FkZDQtYzJlNmY1YzYtZDU0YTAzNTgtMmYwNzQ2MDc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Compute actor has finished execution: [1:7519902325761545231:2473] 2025-06-25T15:01:38.687515Z node 1 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:668: ActorId: [1:7519902325761545227:2466] TxId: 281474976715673. Ctx: { TraceId: 01jyksqpgyadea33x7b90wm095, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZmRmN2FkZDQtYzJlNmY1YzYtZDU0YTAzNTgtMmYwNzQ2MDc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Waiting for: CA [1:7519902325761545232:2474], 2025-06-25T15:01:38.689559Z node 1 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:423: TxId: 281474976715673, send ack to channelId: 2, seqNo: 1, enough: 0, freeSpace: 8388470, to: [1:7519902325761545235:2474] 2025-06-25T15:01:38.689631Z node 1 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [1:7519902325761545232:2474], TxId: 281474976715673, task: 2. Ctx: { SessionId : ydb://session/3?node_id=1&id=ZmRmN2FkZDQtYzJlNmY1YzYtZDU0YTAzNTgtMmYwNzQ2MDc=. CustomerSuppliedId : . TraceId : 01jyksqpgyadea33x7b90wm095. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. CA StateFunc 271646922 2025-06-25T15:01:38.689675Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715673, task: 2. Tasks execution finished, don't wait for ack delivery in input channelId: 1, seqNo: [1] 2025-06-25T15:01:38.689684Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:675: TxId: 281474976715673, task: 2. Tasks execution finished 2025-06-25T15:01:38.689694Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:510: SelfId: [1:7519902325761545232:2474], TxId: 281474976715673, task: 2. Ctx: { SessionId : ydb://session/3?node_id=1&id=ZmRmN2FkZDQtYzJlNmY1YzYtZDU0YTAzNTgtMmYwNzQ2MDc=. CustomerSuppliedId : . TraceId : 01jyksqpgyadea33x7b90wm095. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Compute state finished. All channels and sinks finished 2025-06-25T15:01:38.689749Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:494: TxId: 281474976715673, task: 2. pass away 2025-06-25T15:01:38.689813Z node 1 :KQP_COMPUTE DEBUG: log.cpp:784: fline=kqp_compute_actor_factory.cpp:67;problem=finish_compute_actor;tx_id=281474976715673;task_id=2;success=1;message={
: Error: COMPUTE_STATE_FINISHED }; 2025-06-25T15:01:38.689948Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:404: TxId: 281474976715673, taskId: 2. Released resources, Memory: 0, Free Tier: 1048576, ExecutionUnits: 1. 2025-06-25T15:01:38.690088Z node 1 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:442: ActorId: [1:7519902325761545227:2466] TxId: 281474976715673. Ctx: { TraceId: 01jyksqpgyadea33x7b90wm095, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZmRmN2FkZDQtYzJlNmY1YzYtZDU0YTAzNTgtMmYwNzQ2MDc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [1:7519902325761545232:2474], task: 2, state: COMPUTE_STATE_FINISHED, stats: { CpuTimeUs: 5411 Tasks { TaskId: 2 StageId: 1 CpuTimeUs: 708 FinishTimeMs: 1750863698689 InputRows: 1 InputBytes: 19 OutputRows: 1 OutputBytes: 19 ResultRows: 1 ResultBytes: 19 ComputeCpuTimeUs: 207 BuildCpuTimeUs: 501 HostName: "ghrun-kqfvx6aroe" NodeId: 1 CreateTimeMs: 1750863698680 UpdateTimeMs: 1750863698689 } MaxMemoryUsage: 1048576 } 2025-06-25T15:01:38.690127Z node 1 :KQP_EXECUTER INFO: kqp_planner.cpp:697: TxId: 281474976715673. Ctx: { TraceId: 01jyksqpgyadea33x7b90wm095, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZmRmN2FkZDQtYzJlNmY1YzYtZDU0YTAzNTgtMmYwNzQ2MDc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Compute actor has finished execution: [1:7519902325761545232:2474] 2025-06-25T15:01:38.690224Z node 1 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:2188: ActorId: [1:7519902325761545227:2466] TxId: 281474976715673. Ctx: { TraceId: 01jyksqpgyadea33x7b90wm095, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZmRmN2FkZDQtYzJlNmY1YzYtZDU0YTAzNTgtMmYwNzQ2MDc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. terminate execution. 2025-06-25T15:01:38.690263Z node 1 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:862: ActorId: [1:7519902325761545227:2466] TxId: 281474976715673. Ctx: { TraceId: 01jyksqpgyadea33x7b90wm095, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZmRmN2FkZDQtYzJlNmY1YzYtZDU0YTAzNTgtMmYwNzQ2MDc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Resource usage for last stat interval: ComputeTime: 0.014131s ReadRows: 1 ReadBytes: 20 ru: 9 rate limiter was not found force flag: 1 2025-06-25T15:01:38.690944Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863698717, txId: 281474976715672] shutting down 2025-06-25T15:01:38.783386Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:929: Send to publish resource usage for reason: batching, payload: NodeId: 1 ResourceManagerActorId { RawX1: 7886758914357752171 RawX2: 9223372041149771361 } Timestamp: 1750863698 AvailableComputeActors: 10000 UsedMemory: 0 TotalMemory: 10737418240 Memory { Pool: 1 Available: 10737418240 } ExecutionUnits: 10000 KqpProxyNodeResources { NodeId: 1 DataCenterNumId: 49 ActiveWorkersCount: 0 DataCenterId: "1" } ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/cost/unittest >> KqpCost::OlapRangeFullScan [GOOD] Test command err: Trying to start YDB, gRPC: 12189, MsgBus: 12611 2025-06-25T15:01:31.844779Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519902292908608369:2220];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:01:31.846251Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00162c/r3tmp/tmpyqmarb/pdisk_1.dat 2025-06-25T15:01:32.346331Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519902292908608187:2080] 1750863691815521 != 1750863691815524 2025-06-25T15:01:32.379698Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:01:32.383048Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:01:32.383129Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:01:32.386457Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 12189, node 1 2025-06-25T15:01:32.517613Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:01:32.517632Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:01:32.517638Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:01:32.517767Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:12611 2025-06-25T15:01:32.830032Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:12611 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:01:33.177469Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:01:33.190822Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T15:01:33.203655Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:33.376568Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T15:01:33.559695Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:33.652710Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:35.483735Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902310088479000:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:35.483862Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:35.826535Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:35.870876Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:35.903751Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:35.947814Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:35.979915Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:36.048141Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:36.133244Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:36.203255Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902314383446961:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:36.203336Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:36.204345Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902314383446966:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:36.208136Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:01:36.219085Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519902314383446968:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T15:01:36.291652Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519902314383447019:3423] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:01:36.832398Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519902292908608369:2220];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:01:36.832475Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T15:01:37.494937Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_r ... ormalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-25T15:01:37.926778Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=72075186224037929;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-25T15:01:37.926810Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037929;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-25T15:01:37.926846Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037929;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-25T15:01:37.927101Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037929;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-25T15:01:37.927120Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037929;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-25T15:01:37.930362Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037926;self_id=[1:7519902318678414715:2484];ev=NActors::IEventHandle;tablet_id=72075186224037926;tx_id=281474976710672;this=88923012910144;method=TTxController::StartProposeOnExecute;tx_info=281474976710672:TX_KIND_SCHEMA;min=1750863697929;max=18446744073709551615;plan=0;src=[1:7519902297203575867:2185];cookie=392:12;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-25T15:01:37.930922Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037925;self_id=[1:7519902318678414685:2478];ev=NActors::IEventHandle;tablet_id=72075186224037925;tx_id=281474976710672;this=88923012913728;method=TTxController::StartProposeOnExecute;tx_info=281474976710672:TX_KIND_SCHEMA;min=1750863697930;max=18446744073709551615;plan=0;src=[1:7519902297203575867:2185];cookie=382:12;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-25T15:01:37.931648Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037928;self_id=[1:7519902318678414707:2482];ev=NActors::IEventHandle;tablet_id=72075186224037928;tx_id=281474976710672;this=88923012924256;method=TTxController::StartProposeOnExecute;tx_info=281474976710672:TX_KIND_SCHEMA;min=1750863697931;max=18446744073709551615;plan=0;src=[1:7519902297203575867:2185];cookie=412:12;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-25T15:01:37.937782Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037924;self_id=[1:7519902318678414693:2481];ev=NActors::IEventHandle;tablet_id=72075186224037924;tx_id=281474976710672;this=88923012917088;method=TTxController::StartProposeOnExecute;tx_info=281474976710672:TX_KIND_SCHEMA;min=1750863697937;max=18446744073709551615;plan=0;src=[1:7519902297203575867:2185];cookie=372:12;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-25T15:01:37.937783Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037930;self_id=[1:7519902318678414681:2477];ev=NActors::IEventHandle;tablet_id=72075186224037930;tx_id=281474976710672;this=88923012614240;method=TTxController::StartProposeOnExecute;tx_info=281474976710672:TX_KIND_SCHEMA;min=1750863697937;max=18446744073709551615;plan=0;src=[1:7519902297203575867:2185];cookie=432:12;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-25T15:01:37.938341Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037922;self_id=[1:7519902318678414687:2479];ev=NActors::IEventHandle;tablet_id=72075186224037922;tx_id=281474976710672;this=88923012614464;method=TTxController::StartProposeOnExecute;tx_info=281474976710672:TX_KIND_SCHEMA;min=1750863697938;max=18446744073709551615;plan=0;src=[1:7519902297203575867:2185];cookie=352:12;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-25T15:01:37.938644Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037931;self_id=[1:7519902318678414679:2475];ev=NActors::IEventHandle;tablet_id=72075186224037931;tx_id=281474976710672;this=88923012918208;method=TTxController::StartProposeOnExecute;tx_info=281474976710672:TX_KIND_SCHEMA;min=1750863697938;max=18446744073709551615;plan=0;src=[1:7519902297203575867:2185];cookie=442:12;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-25T15:01:37.938892Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037927;self_id=[1:7519902318678414692:2480];ev=NActors::IEventHandle;tablet_id=72075186224037927;tx_id=281474976710672;this=88923012770144;method=TTxController::StartProposeOnExecute;tx_info=281474976710672:TX_KIND_SCHEMA;min=1750863697938;max=18446744073709551615;plan=0;src=[1:7519902297203575867:2185];cookie=402:12;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-25T15:01:37.939371Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037929;self_id=[1:7519902318678414710:2483];ev=NActors::IEventHandle;tablet_id=72075186224037929;tx_id=281474976710672;this=88923012919104;method=TTxController::StartProposeOnExecute;tx_info=281474976710672:TX_KIND_SCHEMA;min=1750863697939;max=18446744073709551615;plan=0;src=[1:7519902297203575867:2185];cookie=422:12;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-25T15:01:37.946248Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037928;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-25T15:01:37.947341Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037922;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-25T15:01:37.962893Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037922;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710672; 2025-06-25T15:01:37.963353Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037923;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-25T15:01:37.964355Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037928;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710672; 2025-06-25T15:01:37.964879Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037924;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-25T15:01:37.967745Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037923;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710672; 2025-06-25T15:01:37.968498Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037929;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-25T15:01:37.969176Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037924;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710672; 2025-06-25T15:01:37.969715Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037925;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-25T15:01:37.972728Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037929;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710672; 2025-06-25T15:01:37.973317Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037926;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-25T15:01:37.973837Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037925;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710672; 2025-06-25T15:01:37.974380Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037927;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-25T15:01:37.977761Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037926;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710672; 2025-06-25T15:01:37.978355Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037931;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-25T15:01:37.978514Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037927;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710672; 2025-06-25T15:01:37.979064Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037930;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-25T15:01:37.983326Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037931;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710672; 2025-06-25T15:01:37.983495Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037930;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710672; 2025-06-25T15:01:38.232571Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037926;tx_state=TTxProgressTx::Execute;tx_current=281474976710674;tx_id=281474976710674;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710674; 2025-06-25T15:01:38.232694Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037931;tx_state=TTxProgressTx::Execute;tx_current=281474976710674;tx_id=281474976710674;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710674; 2025-06-25T15:01:38.232889Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037930;self_id=[1:7519902318678414681:2477];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037930;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037926;receive=72075186224037931; 2025-06-25T15:01:38.233705Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037930;tx_state=TTxProgressTx::Execute;tx_current=281474976710674;tx_id=281474976710674;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710674; query_phases { duration_us: 273247 table_access { name: "/Root/TestTable" reads { rows: 2 bytes: 72 } } cpu_time_us: 135589 } compilation { duration_us: 359165 cpu_time_us: 354390 } process_cpu_time_us: 345 total_duration_us: 638877 total_cpu_time_us: 490324 >> KqpCost::QuerySeviceRangeFullScan [GOOD] >> TVectorIndexTests::CreateTablePrefixCovering ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpQuery::OlapCreateAsSelect_Complex [GOOD] Test command err: Trying to start YDB, gRPC: 23710, MsgBus: 6460 2025-06-25T15:01:14.114936Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519902222898361988:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:01:14.134879Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0013b5/r3tmp/tmpIMj7du/pdisk_1.dat 2025-06-25T15:01:14.596469Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519902222898361954:2080] 1750863674089414 != 1750863674089417 2025-06-25T15:01:14.604802Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 23710, node 1 2025-06-25T15:01:14.668906Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:01:14.671097Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:01:14.683804Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:01:14.705391Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:01:14.705417Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:01:14.705424Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:01:14.705551Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6460 2025-06-25T15:01:15.116490Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:6460 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:01:15.286229Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:01:15.300014Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T15:01:17.258949Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902235783264483:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:17.259039Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:17.259344Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902235783264495:2294], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:17.263091Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:01:17.280496Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519902235783264497:2295], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-25T15:01:17.375877Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519902235783264548:2336] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:01:17.639674Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T15:01:17.802482Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519902235783264714:2304];fline=columnshard.cpp:99;event=initialize_shard;step=OnActivateExecutor; 2025-06-25T15:01:17.802596Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519902235783264715:2305];fline=columnshard.cpp:99;event=initialize_shard;step=OnActivateExecutor; 2025-06-25T15:01:17.836271Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519902235783264714:2304];fline=columnshard.cpp:117;event=initialize_shard;step=initialize_tiring_finished; 2025-06-25T15:01:17.836490Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 72075186224037895 2025-06-25T15:01:17.838965Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519902235783264715:2305];fline=columnshard.cpp:117;event=initialize_shard;step=initialize_tiring_finished; 2025-06-25T15:01:17.839120Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 72075186224037892 2025-06-25T15:01:17.844703Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519902235783264714:2304];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T15:01:17.844704Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519902235783264715:2305];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T15:01:17.844928Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519902235783264714:2304];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T15:01:17.845221Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519902235783264714:2304];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T15:01:17.845332Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519902235783264714:2304];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T15:01:17.845435Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519902235783264715:2305];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T15:01:17.845447Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519902235783264714:2304];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T15:01:17.845898Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519902235783264714:2304];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T15:01:17.846041Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519902235783264714:2304];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T15:01:17.846159Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519902235783264714:2304];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T15:01:17.846280Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519902235783264714:2304];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T15:01:17.846397Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519902235783264714:2304];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T15:01:17.846513Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7519902235783264714:2304];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T15:01:17.849641Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519902235783264715:2305];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T15:01:17.849788Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519902235783264715:2305];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T15:01:17.849897Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7519902235783264715:2305];tablet_id=72075186224037892;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T15:01:17.850001Z node 1 :TX_COL ... 3Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037908;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-25T15:01:37.797623Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037910;self_id=[4:7519902322223605613:2453];tablet_id=72075186224037910;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T15:01:37.797708Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037910;self_id=[4:7519902322223605613:2453];tablet_id=72075186224037910;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T15:01:37.798011Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037910;self_id=[4:7519902322223605613:2453];tablet_id=72075186224037910;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T15:01:37.798147Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037910;self_id=[4:7519902322223605613:2453];tablet_id=72075186224037910;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T15:01:37.798281Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037910;self_id=[4:7519902322223605613:2453];tablet_id=72075186224037910;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T15:01:37.798406Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037910;self_id=[4:7519902322223605613:2453];tablet_id=72075186224037910;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T15:01:37.798878Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037910;self_id=[4:7519902322223605613:2453];tablet_id=72075186224037910;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T15:01:37.799047Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037910;self_id=[4:7519902322223605613:2453];tablet_id=72075186224037910;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T15:01:37.799211Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037910;self_id=[4:7519902322223605613:2453];tablet_id=72075186224037910;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T15:01:37.799360Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037910;self_id=[4:7519902322223605613:2453];tablet_id=72075186224037910;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T15:01:37.799475Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037910;self_id=[4:7519902322223605613:2453];tablet_id=72075186224037910;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T15:01:37.806492Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037910;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-25T15:01:37.806572Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037910;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-25T15:01:37.806694Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037910;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-25T15:01:37.806726Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037910;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-25T15:01:37.806965Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037910;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-25T15:01:37.807001Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037910;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-25T15:01:37.807113Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037910;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-25T15:01:37.807180Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037910;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-25T15:01:37.807227Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037910;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-25T15:01:37.807257Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037910;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-25T15:01:37.807486Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037910;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-25T15:01:37.807518Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037910;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-25T15:01:37.807720Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037910;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-25T15:01:37.807761Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037910;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-25T15:01:37.807890Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037910;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-25T15:01:37.807925Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037910;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-25T15:01:37.808010Z node 4 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=72075186224037910;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-25T15:01:37.808068Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037910;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-25T15:01:37.808100Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037910;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-25T15:01:37.809008Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037910;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-25T15:01:37.809053Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037910;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-25T15:01:37.809987Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037911;self_id=[4:7519902322223605580:2452];ev=NActors::IEventHandle;tablet_id=72075186224037911;tx_id=281474976710666;this=88923042653760;method=TTxController::StartProposeOnExecute;tx_info=281474976710666:TX_KIND_SCHEMA;min=1750863697809;max=18446744073709551615;plan=0;src=[4:7519902296453800331:2159];cookie=242:3;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=8;result=not_found; 2025-06-25T15:01:37.814833Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037908;self_id=[4:7519902322223605578:2450];ev=NActors::IEventHandle;tablet_id=72075186224037908;tx_id=281474976710666;this=88923042655776;method=TTxController::StartProposeOnExecute;tx_info=281474976710666:TX_KIND_SCHEMA;min=1750863697814;max=18446744073709551615;plan=0;src=[4:7519902296453800331:2159];cookie=212:3;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=8;result=not_found; 2025-06-25T15:01:37.821093Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037910;self_id=[4:7519902322223605613:2453];ev=NActors::IEventHandle;tablet_id=72075186224037910;tx_id=281474976710666;this=88923042657568;method=TTxController::StartProposeOnExecute;tx_info=281474976710666:TX_KIND_SCHEMA;min=1750863697820;max=18446744073709551615;plan=0;src=[4:7519902296453800331:2159];cookie=232:3;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=8;result=not_found; 2025-06-25T15:01:37.828615Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037909;self_id=[4:7519902322223605579:2451];ev=NActors::IEventHandle;tablet_id=72075186224037909;tx_id=281474976710666;this=88923009210560;method=TTxController::StartProposeOnExecute;tx_info=281474976710666:TX_KIND_SCHEMA;min=1750863697828;max=18446744073709551615;plan=0;src=[4:7519902296453800331:2159];cookie=222:3;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=8;result=not_found; 2025-06-25T15:01:37.833042Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037911;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710666;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=8;result=not_found; 2025-06-25T15:01:37.833641Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037909;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710666;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=8;result=not_found; 2025-06-25T15:01:37.839830Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037909;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710666;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710666; 2025-06-25T15:01:37.839971Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037911;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710666;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710666; 2025-06-25T15:01:37.841004Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037910;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710666;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=8;result=not_found; 2025-06-25T15:01:37.841118Z node 4 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037908;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710666;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=8;result=not_found; 2025-06-25T15:01:37.845560Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037910;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710666;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710666; 2025-06-25T15:01:37.846967Z node 4 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037908;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710666;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710666; >> BackupRestoreS3::RestoreIndexTableDecimalSplitBoundaries [GOOD] >> BackupRestoreS3::PrefixedVectorIndex >> KqpCost::OlapWriteRow [GOOD] |92.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/cost/unittest >> KqpCost::QuerySeviceRangeFullScan [GOOD] Test command err: Trying to start YDB, gRPC: 16861, MsgBus: 9708 2025-06-25T15:01:35.401771Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519902311136643200:2234];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:01:35.404527Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0015fd/r3tmp/tmpgRcrgs/pdisk_1.dat 2025-06-25T15:01:35.773375Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 16861, node 1 2025-06-25T15:01:35.832525Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:01:35.832893Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:01:35.835318Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:01:35.867195Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:01:35.867216Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:01:35.867223Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:01:35.867312Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9708 TClient is connected to server localhost:9708 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-25T15:01:36.400544Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:01:36.505321Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:01:36.528590Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T15:01:36.541801Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:36.684280Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:36.807497Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:36.903792Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:38.455043Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902324021546500:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:38.455212Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:38.739200Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:38.774143Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:38.804329Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:38.835642Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:38.865723Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:38.949901Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:39.025006Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:39.095034Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902328316514457:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:39.095116Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:39.095173Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902328316514462:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:39.099160Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:01:39.111451Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519902328316514464:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T15:01:39.207605Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519902328316514515:3415] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:01:40.398753Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519902311136643200:2234];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:01:40.398958Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> KqpCost::OlapPointLookup [GOOD] >> KqpParams::Decimal+QueryService+UseSink [GOOD] >> KqpQuery::ExecuteWriteQuery [GOOD] >> BasicUsage::MaxByteSizeEqualZero [GOOD] >> BasicUsage::TSimpleWriteSession_AutoSeqNo_BasicUsage >> TVectorIndexTests::CreateTablePrefixCovering [GOOD] >> KqpCost::OltpWriteRow-isSink [GOOD] >> DataShardOutOfOrder::TestOutOfOrderReadOnlyAllowed+EvWrite >> DataShardOutOfOrder::TestUnprotectedReadsThenWriteVisibility >> DataShardOutOfOrder::TestImmediateQueueThenSplit+UseSink >> DataShardOutOfOrder::TestPlannedTimeoutSplit >> DataShardTxOrder::ImmediateBetweenOnline_Init_oo8 >> KqpCost::IndexLookupJoin+StreamLookupJoin [GOOD] >> DataShardTxOrder::ZigZag_oo >> DataShardTxOrder::ForceOnlineBetweenOnline >> DataShardOutOfOrder::TestOutOfOrderLockLost >> DataShardOutOfOrder::TestReadTableImmediateWriteBlock >> DataShardOutOfOrder::TestOutOfOrderNonConflictingWrites+EvWrite >> DataShardTxOrder::ImmediateBetweenOnline_oo8 >> DataShardOutOfOrder::TestShardRestartPlannedCommitShouldSucceed+EvWrite >> DataShardTxOrder::ForceOnlineBetweenOnline_oo8 >> TxOrderInternals::OperationOrder [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/cost/unittest >> KqpCost::OlapWriteRow [GOOD] Test command err: Trying to start YDB, gRPC: 27785, MsgBus: 1850 2025-06-25T15:01:33.545716Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519902302429292880:2175];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:01:33.545762Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001608/r3tmp/tmpr4cIAJ/pdisk_1.dat 2025-06-25T15:01:34.062513Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:01:34.062610Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:01:34.081393Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:01:34.087924Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:01:34.092618Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519902302429292733:2080] 1750863693532170 != 1750863693532173 TServer::EnableGrpc on GrpcPort 27785, node 1 2025-06-25T15:01:34.256453Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:01:34.256477Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:01:34.256488Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:01:34.256603Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1850 2025-06-25T15:01:34.556676Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:1850 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:01:34.925019Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:01:34.956062Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:35.112651Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:35.262686Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:35.359831Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:36.976390Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902315314196272:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:36.976497Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:37.296907Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:37.367686Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:37.403955Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:37.444213Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:37.470641Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:37.549659Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:37.584811Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:37.670215Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902319609164231:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:37.670295Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:37.670792Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902319609164236:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:37.674443Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:01:37.690647Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519902319609164238:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T15:01:37.780077Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519902319609164289:3423] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:01:38.541958Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519902302429292880:2175];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:01:38.551235Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T15:01:38.900216Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T15:01:39.100268Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037928;self_id=[1:7 ... 74976710682; query_phases { duration_us: 15574 table_access { name: "/Root/TestTable" updates { rows: 2 bytes: 744 } } cpu_time_us: 4702 affected_shards: 2 } query_phases { duration_us: 11026 cpu_time_us: 323 affected_shards: 2 } compilation { duration_us: 64310 cpu_time_us: 61602 } process_cpu_time_us: 747 total_duration_us: 94423 total_cpu_time_us: 67374 2025-06-25T15:01:40.059124Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037923;tx_state=TTxProgressTx::Execute;tx_current=281474976710684;tx_id=281474976710684;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710684; query_phases { duration_us: 21221 table_access { name: "/Root/TestTable" updates { rows: 1 bytes: 368 } } cpu_time_us: 2150 affected_shards: 1 } query_phases { duration_us: 6014 cpu_time_us: 276 affected_shards: 1 } compilation { duration_us: 92716 cpu_time_us: 90317 } process_cpu_time_us: 677 total_duration_us: 121891 total_cpu_time_us: 93420 2025-06-25T15:01:40.221863Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037931;tx_state=TTxProgressTx::Execute;tx_current=281474976710687;tx_id=281474976710687;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710687; 2025-06-25T15:01:40.221906Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037925;tx_state=TTxProgressTx::Execute;tx_current=281474976710687;tx_id=281474976710687;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710687; query_phases { duration_us: 9768 table_access { name: "/Root/TestTable" updates { rows: 1 bytes: 368 } } cpu_time_us: 2077 affected_shards: 1 } query_phases { duration_us: 8917 table_access { name: "/Root/TestTable" updates { rows: 1 bytes: 368 } } cpu_time_us: 2014 affected_shards: 2 } query_phases { duration_us: 9496 cpu_time_us: 154 affected_shards: 2 } compilation { duration_us: 123980 cpu_time_us: 121625 } process_cpu_time_us: 778 total_duration_us: 155135 total_cpu_time_us: 126648 2025-06-25T15:01:40.291983Z node 1 :TX_COLUMNSHARD_RESTORE WARN: log.cpp:784: tablet_id=72075186224037930;tablet_actor_id=[1:7519902328199099312:2491];this=89129162631936;activity=1;task_id=4c8f17a8-51d511f0-a3efedab-a552f43e::4;fline=restore.cpp:28;event=merge_data_problems;write_id=4;tablet_id=72075186224037930;message=Conflict with existing key. {"sorting_columns":[{"name":"Group","value":"1"},{"name":"Name","value":"Anna"}],"fields":["Group: uint32","Name: binary"]}; 2025-06-25T15:01:40.292196Z node 1 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037930;self_id=[1:7519902328199099312:2491];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteBlobsResult;tablet_id=72075186224037930;event=TEvWriteBlobsResult;fline=events.h:103;event=ev_write_error;status=STATUS_CONSTRAINT_VIOLATION;details=Conflict with existing key. {"sorting_columns":[{"name":"Group","value":"1"},{"name":"Name","value":"Anna"}],"fields":["Group: uint32","Name: binary"]};tx_id=281474976710688; 2025-06-25T15:01:40.292796Z node 1 :TX_COLUMNSHARD_SCAN WARN: actor.cpp:133: Scan [1:7519902332494067339:2700] got AbortExecution txId: 281474976710688 scanId: 1 gen: 1 tablet: 72075186224037930 code: ABORTED reason: {
: Error: task finished: Conflict with existing key. {"sorting_columns":[{"name":"Group","value":"1"},{"name":"Name","value":"Anna"}],"fields":["Group: uint32","Name: binary"]} } 2025-06-25T15:01:40.313341Z node 1 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:819: SelfId: [1:7519902332494067336:2698], Table: `/Root/TestTable` ([72057594046644480:17:1]), SessionActorId: [0:0:0]Got CONSTRAINT VIOLATION for table `/Root/TestTable`. ShardID=72075186224037930, Sink=[1:7519902332494067336:2698].{
: Error: Conflict with existing key. {"sorting_columns":[{"name":"Group","value":"1"},{"name":"Name","value":"Anna"}],"fields":["Group: uint32","Name: binary"]}, code: 2012 } 2025-06-25T15:01:40.313442Z node 1 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1566: SelfId: [1:7519902332494067333:2698], TxId: 281474976710688, task: 1. Ctx: { CustomerSuppliedId : . TraceId : 01jyksqr851azncrsavzpzeqxq. SessionId : ydb://session/3?node_id=1&id=MzZhNjk3NWMtN2QxZTc1YjctYjBiNTY0NDAtNTFkNzA2NQ==. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Sink[0] fatal error: {
: Error: Constraint violated. Table: `/Root/TestTable`., code: 2012 subissue: {
: Error: Conflict with existing key. {"sorting_columns":[{"name":"Group","value":"1"},{"name":"Name","value":"Anna"}],"fields":["Group: uint32","Name: binary"]}, code: 2012 } } 2025-06-25T15:01:40.313519Z node 1 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:678: SelfId: [1:7519902332494067333:2698], TxId: 281474976710688, task: 1. Ctx: { CustomerSuppliedId : . TraceId : 01jyksqr851azncrsavzpzeqxq. SessionId : ydb://session/3?node_id=1&id=MzZhNjk3NWMtN2QxZTc1YjctYjBiNTY0NDAtNTFkNzA2NQ==. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. InternalError: PRECONDITION_FAILED KIKIMR_CONSTRAINT_VIOLATION: {
: Error: Constraint violated. Table: `/Root/TestTable`., code: 2012 subissue: {
: Error: Conflict with existing key. {"sorting_columns":[{"name":"Group","value":"1"},{"name":"Name","value":"Anna"}],"fields":["Group: uint32","Name: binary"]}, code: 2012 } }. 2025-06-25T15:01:40.314331Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=1&id=MzZhNjk3NWMtN2QxZTc1YjctYjBiNTY0NDAtNTFkNzA2NQ==, ActorId: [1:7519902323904131854:2474], ActorState: ExecuteState, TraceId: 01jyksqr851azncrsavzpzeqxq, Create QueryResponse for error on request, msg: query_phases { duration_us: 34933 cpu_time_us: 1588 } compilation { duration_us: 46439 cpu_time_us: 44327 } process_cpu_time_us: 546 total_duration_us: 84235 total_cpu_time_us: 46461 2025-06-25T15:01:40.422801Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037926;tx_state=TTxProgressTx::Execute;tx_current=281474976710690;tx_id=281474976710690;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710690; query_phases { duration_us: 7736 cpu_time_us: 1830 affected_shards: 1 } query_phases { duration_us: 4972 cpu_time_us: 237 affected_shards: 1 } compilation { duration_us: 75733 cpu_time_us: 69392 } process_cpu_time_us: 801 total_duration_us: 90359 total_cpu_time_us: 72260 2025-06-25T15:01:40.523943Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037926;tx_state=TTxProgressTx::Execute;tx_current=281474976710692;tx_id=281474976710692;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710692; query_phases { duration_us: 15758 table_access { name: "/Root/TestTable" deletes { rows: 1 } } cpu_time_us: 2139 affected_shards: 1 } query_phases { duration_us: 5183 cpu_time_us: 289 affected_shards: 1 } compilation { duration_us: 62159 cpu_time_us: 58545 } process_cpu_time_us: 770 total_duration_us: 86671 total_cpu_time_us: 61743 2025-06-25T15:01:40.596983Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037923;tx_state=TTxProgressTx::Execute;tx_current=281474976710694;tx_id=281474976710694;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710694; query_phases { duration_us: 6606 table_access { name: "/Root/TestTable" deletes { rows: 1 } } cpu_time_us: 1699 affected_shards: 1 } query_phases { duration_us: 5988 cpu_time_us: 253 affected_shards: 1 } compilation { duration_us: 52095 cpu_time_us: 45161 } process_cpu_time_us: 525 total_duration_us: 66328 total_cpu_time_us: 47638 2025-06-25T15:01:40.733470Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037931;tx_state=TTxProgressTx::Execute;tx_current=281474976710697;tx_id=281474976710697;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710697; 2025-06-25T15:01:40.734203Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037923;tx_state=TTxProgressTx::Execute;tx_current=281474976710697;tx_id=281474976710697;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710697; query_phases { duration_us: 7489 table_access { name: "/Root/TestTable" deletes { rows: 1 } } cpu_time_us: 1743 affected_shards: 1 } query_phases { duration_us: 7791 table_access { name: "/Root/TestTable" updates { rows: 1 bytes: 368 } } cpu_time_us: 1868 affected_shards: 2 } query_phases { duration_us: 19586 cpu_time_us: 274 affected_shards: 2 } compilation { duration_us: 89607 cpu_time_us: 86613 } process_cpu_time_us: 764 total_duration_us: 130385 total_cpu_time_us: 91262 2025-06-25T15:01:41.233856Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037922;tx_state=TTxProgressTx::Execute;tx_current=281474976710699;tx_id=281474976710699;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710699; 2025-06-25T15:01:41.233896Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037929;tx_state=TTxProgressTx::Execute;tx_current=281474976710699;tx_id=281474976710699;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710699; 2025-06-25T15:01:41.234071Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037924;tx_state=TTxProgressTx::Execute;tx_current=281474976710699;tx_id=281474976710699;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710699; 2025-06-25T15:01:41.234215Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037926;tx_state=TTxProgressTx::Execute;tx_current=281474976710699;tx_id=281474976710699;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710699; 2025-06-25T15:01:41.234364Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037923;tx_state=TTxProgressTx::Execute;tx_current=281474976710699;tx_id=281474976710699;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710699; 2025-06-25T15:01:41.234603Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037927;tx_state=TTxProgressTx::Execute;tx_current=281474976710699;tx_id=281474976710699;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710699; 2025-06-25T15:01:41.234758Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037931;tx_state=TTxProgressTx::Execute;tx_current=281474976710699;tx_id=281474976710699;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710699; 2025-06-25T15:01:41.234963Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037928;tx_state=TTxProgressTx::Execute;tx_current=281474976710699;tx_id=281474976710699;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710699; 2025-06-25T15:01:41.235352Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037925;tx_state=TTxProgressTx::Execute;tx_current=281474976710699;tx_id=281474976710699;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710699; 2025-06-25T15:01:41.235451Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037930;tx_state=TTxProgressTx::Execute;tx_current=281474976710699;tx_id=281474976710699;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710699; 2025-06-25T15:01:41.235539Z node 1 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037931;local_tx_no=39;method=complete;tx_info=;fline=secondary.h:126;event=duplication_tablet_broken_flag;txId=281474976710699; query_phases { duration_us: 720 cpu_time_us: 720 } query_phases { duration_us: 125943 table_access { name: "/Root/TestTable" reads { rows: 2 bytes: 40 } deletes { rows: 2 } } cpu_time_us: 20427 affected_shards: 10 } query_phases { duration_us: 12529 cpu_time_us: 653 affected_shards: 10 } compilation { duration_us: 347826 cpu_time_us: 338648 } process_cpu_time_us: 1534 total_duration_us: 491897 total_cpu_time_us: 361982 >> BackupRestore::TestAllIndexTypes-EIndexTypeGlobal [GOOD] >> BackupRestore::TestAllIndexTypes-EIndexTypeGlobalAsync ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_index/unittest >> TVectorIndexTests::CreateTablePrefixCovering [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T15:01:41.666991Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T15:01:41.667067Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T15:01:41.667125Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T15:01:41.667148Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T15:01:41.667183Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T15:01:41.667223Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T15:01:41.667272Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T15:01:41.667317Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T15:01:41.667858Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T15:01:41.668109Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T15:01:41.732577Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:01:41.732659Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:01:41.750181Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T15:01:41.750645Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T15:01:41.750814Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T15:01:41.758015Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T15:01:41.758311Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T15:01:41.758950Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T15:01:41.759209Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T15:01:41.762269Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T15:01:41.762405Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T15:01:41.763259Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T15:01:41.763301Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T15:01:41.763397Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T15:01:41.763446Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T15:01:41.763489Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T15:01:41.763579Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T15:01:41.770336Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T15:01:41.898441Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T15:01:41.898690Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:01:41.898879Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T15:01:41.898929Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T15:01:41.899146Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T15:01:41.899214Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:01:41.901438Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T15:01:41.901611Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T15:01:41.901808Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:01:41.901883Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T15:01:41.901931Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T15:01:41.901967Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T15:01:41.904160Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:01:41.904213Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T15:01:41.904249Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T15:01:41.906034Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:01:41.906089Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:01:41.906138Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T15:01:41.906187Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T15:01:41.909620Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T15:01:41.911719Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T15:01:41.911886Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T15:01:41.912717Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T15:01:41.912865Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T15:01:41.912910Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T15:01:41.913183Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T15:01:41.913236Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T15:01:41.913392Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T15:01:41.913474Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T15:01:41.915722Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T15:01:41.915769Z node 1 :FLAT_TX_SCHEMESHARD ... ementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 4 2025-06-25T15:01:42.441939Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 6 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T15:01:42.442000Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 6 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T15:01:42.442030Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 102 2025-06-25T15:01:42.442057Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 6], version: 18446744073709551615 2025-06-25T15:01:42.442080Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 6] was 4 2025-06-25T15:01:42.442134Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/5, is published: true 2025-06-25T15:01:42.446154Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 102:3, at schemeshard: 72057594046678944 2025-06-25T15:01:42.446217Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_table.cpp:414: TDropTable TProposedDeletePart operationId: 102:3 ProgressState, at schemeshard: 72057594046678944 2025-06-25T15:01:42.446598Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove table for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 3 2025-06-25T15:01:42.446740Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:3 progress is 2/5 2025-06-25T15:01:42.446784Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 2/5 2025-06-25T15:01:42.446833Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:3 progress is 2/5 2025-06-25T15:01:42.446865Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 2/5 2025-06-25T15:01:42.446901Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 102, ready parts: 2/5, is published: true 2025-06-25T15:01:42.448624Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 102:4, at schemeshard: 72057594046678944 2025-06-25T15:01:42.448670Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_table.cpp:414: TDropTable TProposedDeletePart operationId: 102:4 ProgressState, at schemeshard: 72057594046678944 2025-06-25T15:01:42.448887Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove table for pathId [OwnerId: 72057594046678944, LocalPathId: 6] was 3 2025-06-25T15:01:42.448986Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:4 progress is 3/5 2025-06-25T15:01:42.449013Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 3/5 2025-06-25T15:01:42.449081Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:4 progress is 3/5 2025-06-25T15:01:42.449107Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 3/5 2025-06-25T15:01:42.449132Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 102, ready parts: 3/5, is published: true 2025-06-25T15:01:42.449582Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 102:2, at schemeshard: 72057594046678944 2025-06-25T15:01:42.449619Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_table.cpp:414: TDropTable TProposedDeletePart operationId: 102:2 ProgressState, at schemeshard: 72057594046678944 2025-06-25T15:01:42.449764Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove table for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 2025-06-25T15:01:42.449835Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:2 progress is 4/5 2025-06-25T15:01:42.449857Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 4/5 2025-06-25T15:01:42.449880Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:2 progress is 4/5 2025-06-25T15:01:42.449902Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 4/5 2025-06-25T15:01:42.449938Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 102, ready parts: 4/5, is published: true 2025-06-25T15:01:42.450187Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-25T15:01:42.450432Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-25T15:01:42.450534Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-25T15:01:42.450575Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-25T15:01:42.450970Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-25T15:01:42.451018Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_table.cpp:414: TDropTable TProposedDeletePart operationId: 102:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T15:01:42.451191Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove table for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-25T15:01:42.451275Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:0 progress is 5/5 2025-06-25T15:01:42.451297Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 5/5 2025-06-25T15:01:42.451325Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:0 progress is 5/5 2025-06-25T15:01:42.451352Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 5/5 2025-06-25T15:01:42.451382Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 102, ready parts: 5/5, is published: true 2025-06-25T15:01:42.451456Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1656: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:455:2399] message: TxId: 102 2025-06-25T15:01:42.451497Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 5/5 2025-06-25T15:01:42.451533Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 102:0 2025-06-25T15:01:42.451580Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 102:0 2025-06-25T15:01:42.451685Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-25T15:01:42.451720Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 102:1 2025-06-25T15:01:42.451750Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 102:1 2025-06-25T15:01:42.451779Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-06-25T15:01:42.451799Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 102:2 2025-06-25T15:01:42.451816Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 102:2 2025-06-25T15:01:42.451859Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 2 2025-06-25T15:01:42.451879Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 102:3 2025-06-25T15:01:42.451895Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 102:3 2025-06-25T15:01:42.451941Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 2 2025-06-25T15:01:42.451968Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 102:4 2025-06-25T15:01:42.451985Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 102:4 2025-06-25T15:01:42.452018Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 6] was 2 2025-06-25T15:01:42.452348Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-25T15:01:42.452388Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-25T15:01:42.452417Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-25T15:01:42.452597Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-25T15:01:42.452650Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-25T15:01:42.452701Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-25T15:01:42.457672Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-25T15:01:42.457729Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:603:2540] TestWaitNotification: OK eventTxId 102 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/cost/unittest >> KqpCost::OlapPointLookup [GOOD] Test command err: Trying to start YDB, gRPC: 28083, MsgBus: 1440 2025-06-25T15:01:35.315699Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519902310895760439:2220];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:01:35.324152Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0015fe/r3tmp/tmpI4hAHB/pdisk_1.dat 2025-06-25T15:01:35.609499Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:01:35.609638Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:01:35.625670Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:01:35.627903Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519902310895760257:2080] 1750863695270044 != 1750863695270047 2025-06-25T15:01:35.643701Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 28083, node 1 2025-06-25T15:01:35.704804Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:01:35.704825Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:01:35.704831Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:01:35.704946Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1440 TClient is connected to server localhost:1440 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-25T15:01:36.272160Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:01:36.431868Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:01:36.475626Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T15:01:36.495680Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:36.651754Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:36.811574Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:36.869207Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:38.410437Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902323780663784:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:38.410531Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:38.714914Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:38.749249Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:38.785946Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:38.823367Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:38.891731Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:38.937348Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:38.989811Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:39.069896Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902328075631737:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:39.069966Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:39.070132Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902328075631742:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:39.073621Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:01:39.086241Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519902328075631744:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T15:01:39.175520Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519902328075631795:3419] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:01:40.308886Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519902310895760439:2220];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:01:40.309030Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T15:01:40.461729Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runn ... s=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-25T15:01:40.812634Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037922;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-25T15:01:40.812654Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037922;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-25T15:01:40.812778Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037922;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-25T15:01:40.812851Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037929;self_id=[1:7519902332370599477:2481];ev=NActors::IEventHandle;tablet_id=72075186224037929;tx_id=281474976710672;this=88923023008736;method=TTxController::StartProposeOnExecute;tx_info=281474976710672:TX_KIND_SCHEMA;min=1750863700812;max=18446744073709551615;plan=0;src=[1:7519902310895760588:2150];cookie=422:12;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-25T15:01:40.812861Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037922;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-25T15:01:40.812928Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037922;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-25T15:01:40.812951Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037922;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-25T15:01:40.812981Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037922;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-25T15:01:40.813019Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037922;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-25T15:01:40.813155Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037922;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-25T15:01:40.813178Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037922;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-25T15:01:40.813306Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037922;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-25T15:01:40.813334Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037922;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-25T15:01:40.813413Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037922;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-25T15:01:40.813419Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037924;self_id=[1:7519902332370599485:2483];ev=NActors::IEventHandle;tablet_id=72075186224037924;tx_id=281474976710672;this=88923023008512;method=TTxController::StartProposeOnExecute;tx_info=281474976710672:TX_KIND_SCHEMA;min=1750863700813;max=18446744073709551615;plan=0;src=[1:7519902310895760588:2150];cookie=372:12;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-25T15:01:40.813434Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037922;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-25T15:01:40.813481Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=72075186224037922;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-25T15:01:40.813511Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037922;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-25T15:01:40.813531Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037922;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-25T15:01:40.813770Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037922;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-25T15:01:40.813792Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037922;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-25T15:01:40.818119Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037922;self_id=[1:7519902332370599465:2475];ev=NActors::IEventHandle;tablet_id=72075186224037922;tx_id=281474976710672;this=88923023032480;method=TTxController::StartProposeOnExecute;tx_info=281474976710672:TX_KIND_SCHEMA;min=1750863700817;max=18446744073709551615;plan=0;src=[1:7519902310895760588:2150];cookie=352:12;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-25T15:01:40.821638Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037928;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-25T15:01:40.821687Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037923;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-25T15:01:40.834434Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037928;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710672; 2025-06-25T15:01:40.834705Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037923;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710672; 2025-06-25T15:01:40.835029Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037922;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-25T15:01:40.835082Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037925;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-25T15:01:40.839571Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037925;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710672; 2025-06-25T15:01:40.839578Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037922;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710672; 2025-06-25T15:01:40.840114Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037927;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-25T15:01:40.840171Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037929;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-25T15:01:40.844648Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037927;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710672; 2025-06-25T15:01:40.844654Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037929;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710672; 2025-06-25T15:01:40.845156Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037931;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-25T15:01:40.845222Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037924;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-25T15:01:40.849729Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037924;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710672; 2025-06-25T15:01:40.849733Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037931;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710672; 2025-06-25T15:01:40.850259Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037926;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-25T15:01:40.850302Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037930;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-25T15:01:40.854732Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037926;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710672; 2025-06-25T15:01:40.854732Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037930;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710672; 2025-06-25T15:01:40.971165Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037926;tx_state=TTxProgressTx::Execute;tx_current=281474976710674;tx_id=281474976710674;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710674; 2025-06-25T15:01:40.976813Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037930;self_id=[1:7519902332370599479:2482];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037930;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037931;receive=72075186224037926; 2025-06-25T15:01:40.977352Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037931;tx_state=TTxProgressTx::Execute;tx_current=281474976710674;tx_id=281474976710674;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710674; 2025-06-25T15:01:40.977977Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037930;tx_state=TTxProgressTx::Execute;tx_current=281474976710674;tx_id=281474976710674;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710674; 2 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpParams::Decimal+QueryService+UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 26731, MsgBus: 24871 2025-06-25T15:01:11.312994Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519902208648181238:2227];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:01:11.313058Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0013d9/r3tmp/tmpg6KsE3/pdisk_1.dat 2025-06-25T15:01:11.845770Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:01:11.845862Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:01:11.856549Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:01:11.870453Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:01:11.872060Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519902208648181048:2080] 1750863671278083 != 1750863671278086 TServer::EnableGrpc on GrpcPort 26731, node 1 2025-06-25T15:01:12.095833Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:01:12.095854Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:01:12.095860Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:01:12.095954Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T15:01:12.312445Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:24871 TClient is connected to server localhost:24871 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:01:12.827680Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:01:12.841895Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T15:01:12.860188Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:12.970536Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:13.139466Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:13.220607Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:15.015630Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902225828051860:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:15.015975Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:15.378703Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:15.417743Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:15.476589Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:15.511217Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:15.541504Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:15.618070Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:15.676754Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:15.747979Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902225828052521:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:15.748054Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:15.748331Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902225828052526:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:15.752489Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:01:15.763647Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519902225828052528:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T15:01:15.847358Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519902225828052579:3416] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:01:16.312543Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519902208648181238:2227];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:01:16.312627Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 17862, MsgBus: 8574 2025-06-25T15:01:18.046209Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519902237606005448:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:01:18.046346Z node 2 :METADATA_PROVIDER ERROR: log.cpp ... in.cpp:311) waiting... 2025-06-25T15:01:34.864624Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:34.951852Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:35.081353Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:01:35.198800Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:35.284678Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:37.778777Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519902319469195436:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:37.778860Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:37.855619Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:37.897037Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:37.934311Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:38.011789Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:38.097783Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:38.185563Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:38.231522Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:38.299166Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519902323764163399:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:38.299269Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:38.299540Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519902323764163404:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:38.303330Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:01:38.320828Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519902323764163406:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T15:01:38.385454Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519902323764163457:3421] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:01:38.972607Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519902302289324630:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:01:38.972680Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T15:01:39.532112Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:40.709661Z node 4 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [4:7519902332354098556:2525], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:4:17: Error: At function: RemovePrefixMembers, At function: Unordered, At function: PersistableRepr, At function: OrderedSqlProject, At tuple, At function: SqlProjectItem, At lambda
:3:25: Error: At function: Parameter, At function: DataType
:3:25: Error: Invalid decimal precision: 99 2025-06-25T15:01:40.711280Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=4&id=NmIxNjIxNjgtZTFhMmM5NzMtZGIxZDk2ODQtMTZiNTg3M2E=, ActorId: [4:7519902332354098554:2524], ActorState: ExecuteState, TraceId: 01jyksqrpj70bq5skd96p00abv, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-06-25T15:01:40.877063Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=Yjk3ZmU4OC0zNDQ3NDYxYi02MGJlNzk1LTYzZDA3ODFl, ActorId: [4:7519902332354098560:2527], ActorState: ExecuteState, TraceId: 01jyksqrqe7x57t2z6n6wkykdh, Create QueryResponse for error on request, msg: ydb/core/kqp/session_actor/kqp_session_actor.cpp:1223: ydb/core/kqp/query_data/kqp_query_data.cpp:271: Parameter $value22 type mismatch, expected: { Kind: Data Data { Scheme: 4865 DecimalParams { Precision: 22 Scale: 9 } } }, actual: Type (Data), schemeType: Decimal(35,10), schemeTypeId: 4865 2025-06-25T15:01:40.913378Z node 4 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [4:7519902332354098577:2534], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:7:29: Error: At function: KiWriteTable!
:7:50: Error: Failed to convert type: Struct<'Key':Int32,'Value22':Decimal(35,10),'Value35':Decimal(35,10)> to Struct<'Key':Int32?,'Value22':Decimal(22,9)?,'Value35':Decimal(35,10)?>
:4:25: Error: Implicit decimal cast would lose precision
:7:50: Error: Failed to convert 'Value22': Decimal(35,10) to Optional
:7:50: Error: Failed to convert input columns types to scheme types, code: 2031 2025-06-25T15:01:40.913623Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=4&id=NGQ4MDZjMDgtYmI5MWUwYmEtNTMyN2U4YmItNzE2OGY3YTg=, ActorId: [4:7519902332354098575:2533], ActorState: ExecuteState, TraceId: 01jyksqrwq7knssak48ds9pzsy, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-06-25T15:01:40.942503Z node 4 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [4:7519902332354098588:2539], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:3:29: Error: At function: KiWriteTable!
:3:50: Error: Failed to convert type: Struct<'Key':Int32,'Value22':Decimal(35,10),'Value35':Decimal(35,10)> to Struct<'Key':Int32?,'Value22':Decimal(22,9)?,'Value35':Decimal(35,10)?>
:0:14: Error: Implicit decimal cast would lose precision
:3:50: Error: Failed to convert 'Value22': Decimal(35,10) to Optional
:3:50: Error: Failed to convert input columns types to scheme types, code: 2031 2025-06-25T15:01:40.944587Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=4&id=MmZkYzU5ZWEtNWU1ZjUwMS1jY2UzMzdmMC1hZmJmZjJkNg==, ActorId: [4:7519902332354098586:2538], ActorState: ExecuteState, TraceId: 01jyksqrxs2b9maecsfdvj477y, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: >> DataShardTxOrder::RandomPoints_DelayRS_Reboot_Dirty |92.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_order/unittest >> TxOrderInternals::OperationOrder [GOOD] >> KqpCost::OlapRange [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/cost/unittest >> KqpCost::IndexLookupJoin+StreamLookupJoin [GOOD] Test command err: Trying to start YDB, gRPC: 15143, MsgBus: 18114 2025-06-25T15:01:35.876270Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519902311952364674:2228];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:01:35.876586Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0015ed/r3tmp/tmpT76ajN/pdisk_1.dat 2025-06-25T15:01:36.313772Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:01:36.315520Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:01:36.315616Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:01:36.321002Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 15143, node 1 2025-06-25T15:01:36.509564Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:01:36.509595Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:01:36.509602Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:01:36.509730Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:18114 2025-06-25T15:01:36.872475Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:18114 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:01:37.219288Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:01:37.247831Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T15:01:37.258579Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:37.419037Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:37.615890Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:37.719261Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:39.405015Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902329132235296:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:39.405152Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:39.721443Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:39.762066Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:39.795158Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:39.840738Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:39.876401Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:39.909220Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:39.981807Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:40.072335Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902333427203254:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:40.072438Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:40.072518Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902333427203259:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:40.076333Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:01:40.089690Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519902333427203261:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T15:01:40.166044Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519902333427203312:3424] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:01:40.871629Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519902311952364674:2228];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:01:40.871728Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T15:01:41.366747Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:41.406408Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:41.434220Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) /Root/Join1_2 1 19 /Root/Join1_1 8 136 >> DataShardTxOrder::RandomPoints_DelayRS_Reboot ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/cost/unittest >> KqpCost::OltpWriteRow-isSink [GOOD] Test command err: Trying to start YDB, gRPC: 27549, MsgBus: 18901 2025-06-25T15:01:35.568989Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519902314045937472:2140];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:01:35.569579Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0015f9/r3tmp/tmpco1g8T/pdisk_1.dat 2025-06-25T15:01:35.977274Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519902314045937358:2080] 1750863695547149 != 1750863695547152 2025-06-25T15:01:35.983844Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 27549, node 1 2025-06-25T15:01:36.039470Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:01:36.039850Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:01:36.052433Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:01:36.096898Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:01:36.096935Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:01:36.096942Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:01:36.097065Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:18901 TClient is connected to server localhost:18901 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-25T15:01:36.569408Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:01:36.638744Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:01:36.660632Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T15:01:36.675243Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:36.820202Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T15:01:37.016982Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:37.107796Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:38.838230Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902326930840888:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:38.838327Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:39.152768Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:39.185097Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:39.223563Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:39.252729Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:39.324292Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:39.401019Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:39.437423Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:39.510610Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902331225808843:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:39.510726Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:39.510816Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902331225808848:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:39.514719Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:01:39.525491Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519902331225808850:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T15:01:39.606216Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519902331225808903:3424] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:01:40.568415Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519902314045937472:2140];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:01:40.573135Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T15:01:40.894802Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) query_phases { duration_us: 631 cpu_time_us: 631 } query_phases { duration_us: 2929 table_access { name: "/Root/TestTable" updates { rows: 1 bytes: 20 } partitions_count: 1 } cpu_time_us: 847 affected_shards: 1 } compilation { duration_us: 71806 cpu_time_us: 68018 } process_cpu_time_us: 930 total_duration_us: 77366 total_cpu_time_us: 70426 query_phases { duration_us: 532 cpu_time_us: 532 } query_phases { duration_us: 3559 table_access { name: "/Root/TestTable" updates { rows: 1 bytes: 20 } partitions_count: 1 } cpu_time_us: 1014 affected_shards: 1 } compilation { duration_us: 66482 cpu_time_us: 63490 } process_cpu_time_us: 872 total_duration_us: 74006 total_cpu_time_us: 65908 2025-06-25T15:01:41.456130Z node 1 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:678: SelfId: [1:7519902339815743884:2503], TxId: 281474976710677, task: 1. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=1&id=MmMwYjFmY2YtOWIwMTFkNGUtYWQzZjBhNC01MmU1ODU5NQ==. TraceId : 01jyksqs6jcjcmp28n6tr8a7x3. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. InternalError: PRECONDITION_FAILED KIKIMR_CONSTRAINT_VIOLATION: {
: Error: Conflict with existing key., code: 2012 }. 2025-06-25T15:01:41.456457Z node 1 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [1:7519902339815743886:2504], TxId: 281474976710677, task: 2. Ctx: { SessionId : ydb://session/3?node_id=1&id=MmMwYjFmY2YtOWIwMTFkNGUtYWQzZjBhNC01MmU1ODU5NQ==. CustomerSuppliedId : . TraceId : 01jyksqs6jcjcmp28n6tr8a7x3. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Handle abort execution event from: [1:7519902339815743881:2466], status: PRECONDITION_FAILED, reason: {
: Error: Terminate execution } 2025-06-25T15:01:41.456767Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=1&id=MmMwYjFmY2YtOWIwMTFkNGUtYWQzZjBhNC01MmU1ODU5NQ==, ActorId: [1:7519902335520776431:2466], ActorState: ExecuteState, TraceId: 01jyksqs6jcjcmp28n6tr8a7x3, Create QueryResponse for error on request, msg: query_phases { duration_us: 756 cpu_time_us: 756 } query_phases { duration_us: 6641 table_access { name: "/Root/TestTable" reads { rows: 1 bytes: 8 } partitions_count: 1 } cpu_time_us: 6539 affected_shards: 1 } query_phases { duration_us: 26288 cpu_time_us: 26889 } compilation { duration_us: 212518 cpu_time_us: 205240 } process_cpu_time_us: 1573 total_duration_us: 253970 total_cpu_time_us: 240997 query_phases { duration_us: 770 cpu_time_us: 770 } query_phases { duration_us: 3621 table_access { name: "/Root/TestTable" partitions_count: 1 } cpu_time_us: 3321 affected_shards: 1 } query_phases { duration_us: 1704 cpu_time_us: 1491 } query_phases { duration_us: 3006 table_access { name: "/Root/TestTable" updates { rows: 1 bytes: 20 } partitions_count: 1 } cpu_time_us: 1091 affected_shards: 1 } compilation { duration_us: 196535 cpu_time_us: 192396 } process_cpu_time_us: 1633 total_duration_us: 211387 total_cpu_time_us: 200702 query_phases { duration_us: 702 cpu_time_us: 702 } query_phases { duration_us: 4218 table_access { name: "/Root/TestTable" partitions_count: 1 } cpu_time_us: 4687 affected_shards: 1 } query_phases { duration_us: 1042 cpu_time_us: 627 affected_shards: 1 } compilation { duration_us: 242420 cpu_time_us: 237758 } process_cpu_time_us: 1457 total_duration_us: 255349 total_cpu_time_us: 245231 query_phases { duration_us: 665 cpu_time_us: 665 } query_phases { duration_us: 8818 table_access { name: "/Root/TestTable" reads { rows: 1 bytes: 8 } partitions_count: 1 } cpu_time_us: 8143 affected_shards: 1 } query_phases { duration_us: 3381 table_access { name: "/Root/TestTable" updates { rows: 1 bytes: 20 } partitions_count: 1 } cpu_time_us: 1305 affected_shards: 1 } compilation { duration_us: 186414 cpu_time_us: 182277 } process_cpu_time_us: 1494 total_duration_us: 204082 total_cpu_time_us: 193884 query_phases { duration_us: 543 cpu_time_us: 543 } query_phases { duration_us: 3343 table_access { name: "/Root/TestTable" deletes { rows: 1 } partitions_count: 1 } cpu_time_us: 1079 affected_shards: 1 } compilation { duration_us: 66884 cpu_time_us: 63383 } process_cpu_time_us: 921 total_duration_us: 72471 total_cpu_time_us: 65926 query_phases { duration_us: 597 cpu_time_us: 597 } query_phases { duration_us: 5057 table_access { name: "/Root/TestTable" deletes { rows: 1 } partitions_count: 1 } cpu_time_us: 1096 affected_shards: 1 } compilation { duration_us: 60701 cpu_time_us: 53882 } process_cpu_time_us: 1004 total_duration_us: 67901 total_cpu_time_us: 56579 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpQuery::ExecuteWriteQuery [GOOD] Test command err: Trying to start YDB, gRPC: 31825, MsgBus: 28797 2025-06-25T15:00:51.188449Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519902121056390225:2139];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:00:51.188611Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001442/r3tmp/tmpfu4md2/pdisk_1.dat 2025-06-25T15:00:51.613627Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:00:51.618494Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519902121056390116:2080] 1750863651168828 != 1750863651168831 2025-06-25T15:00:51.682241Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:00:51.682368Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 31825, node 1 2025-06-25T15:00:51.685318Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:00:51.824886Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:00:51.824905Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:00:51.824910Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:00:51.825005Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T15:00:52.192263Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:28797 TClient is connected to server localhost:28797 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:00:52.679058Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:00:52.713666Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:00:52.855973Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T15:00:53.026926Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:53.103139Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:00:54.573300Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902133941293662:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:00:54.573407Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:00:54.869227Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:54.906235Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:54.937837Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:54.969035Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:55.000350Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:55.040711Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:55.117053Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:55.202055Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902138236261616:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:00:55.202149Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:00:55.202438Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902138236261621:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:00:55.206915Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:00:55.221827Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519902138236261623:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T15:00:55.289677Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519902138236261676:3418] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:00:56.177454Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519902121056390225:2139];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:00:56.177511Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T15:00:56.409868Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) Trying to start YDB, gRPC: 3991, MsgBus: 16970 2025-06-25T15:00:57.412498Z node 2 :METADATA_PROVIDER WARN: ... 0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:13.883131Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:13.936748Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:14.026167Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519902220333713402:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:14.026266Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:14.026518Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519902220333713407:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:14.031289Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:01:14.044078Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519902220333713409:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T15:01:14.131060Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519902220333713460:3419] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:01:14.543741Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519902198858874761:2150];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:01:14.543897Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T15:01:15.548224Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:20.204400Z node 4 :KQP_COMPUTE WARN: kqp_write_actor.cpp:1097: SelfId: [4:7519902233218615841:2474], Table: `/Root/.tmp/sessions/MTU0ZWIwNmUtZDUzMDA3ZDktY2NmNTU1NDUtM2UyOTRlMDA=/Root/test_table_cas_11938098647351320300` ([72057594046644480:21:1]), SessionActorId: [0:0:0]Timeout shardID=72075186224037922 2025-06-25T15:01:21.465573Z node 4 :KQP_COMPUTE WARN: kqp_write_actor.cpp:1097: SelfId: [4:7519902233218615841:2474], Table: `/Root/.tmp/sessions/MTU0ZWIwNmUtZDUzMDA3ZDktY2NmNTU1NDUtM2UyOTRlMDA=/Root/test_table_cas_11938098647351320300` ([72057594046644480:21:1]), SessionActorId: [0:0:0]Timeout shardID=72075186224037922 2025-06-25T15:01:24.612778Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7382: Cannot get console configs 2025-06-25T15:01:24.612811Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:01:24.652408Z node 4 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_OVERLOADED;details=Rejecting data TxId 0 because datashard 72075186224037922: decided to reject due to given RejectProbability;tx_id=0; 2025-06-25T15:01:24.653722Z node 4 :KQP_COMPUTE WARN: kqp_write_actor.cpp:734: SelfId: [4:7519902233218615841:2474], Table: `/Root/.tmp/sessions/MTU0ZWIwNmUtZDUzMDA3ZDktY2NmNTU1NDUtM2UyOTRlMDA=/Root/test_table_cas_11938098647351320300` ([72057594046644480:21:1]), SessionActorId: [0:0:0]Got OVERLOADED for table `/Root/.tmp/sessions/MTU0ZWIwNmUtZDUzMDA3ZDktY2NmNTU1NDUtM2UyOTRlMDA=/Root/test_table_cas_11938098647351320300`. ShardID=72075186224037922, Sink=[4:7519902233218615841:2474]. Ignored this error.{
: Error: Rejecting data TxId 0 because datashard 72075186224037922: decided to reject due to given RejectProbability, code: 2006 } 2025-06-25T15:01:24.682921Z node 4 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_OVERLOADED;details=Rejecting data TxId 0 because datashard 72075186224037922: decided to reject due to given RejectProbability;tx_id=0; 2025-06-25T15:01:24.683908Z node 4 :KQP_COMPUTE WARN: kqp_write_actor.cpp:734: SelfId: [4:7519902233218615841:2474], Table: `/Root/.tmp/sessions/MTU0ZWIwNmUtZDUzMDA3ZDktY2NmNTU1NDUtM2UyOTRlMDA=/Root/test_table_cas_11938098647351320300` ([72057594046644480:21:1]), SessionActorId: [0:0:0]Got OVERLOADED for table `/Root/.tmp/sessions/MTU0ZWIwNmUtZDUzMDA3ZDktY2NmNTU1NDUtM2UyOTRlMDA=/Root/test_table_cas_11938098647351320300`. ShardID=72075186224037922, Sink=[4:7519902233218615841:2474]. Ignored this error.{
: Error: Rejecting data TxId 0 because datashard 72075186224037922: decided to reject due to given RejectProbability, code: 2006 } 2025-06-25T15:01:25.781385Z node 4 :KQP_COMPUTE WARN: kqp_write_actor.cpp:1097: SelfId: [4:7519902233218615841:2474], Table: `/Root/.tmp/sessions/MTU0ZWIwNmUtZDUzMDA3ZDktY2NmNTU1NDUtM2UyOTRlMDA=/Root/test_table_cas_11938098647351320300` ([72057594046644480:21:1]), SessionActorId: [0:0:0]Timeout shardID=72075186224037922 2025-06-25T15:01:25.820854Z node 4 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_OVERLOADED;details=Rejecting data TxId 0 because datashard 72075186224037922: decided to reject due to given RejectProbability;tx_id=0; 2025-06-25T15:01:25.830461Z node 4 :KQP_COMPUTE WARN: kqp_write_actor.cpp:734: SelfId: [4:7519902233218615841:2474], Table: `/Root/.tmp/sessions/MTU0ZWIwNmUtZDUzMDA3ZDktY2NmNTU1NDUtM2UyOTRlMDA=/Root/test_table_cas_11938098647351320300` ([72057594046644480:21:1]), SessionActorId: [0:0:0]Got OVERLOADED for table `/Root/.tmp/sessions/MTU0ZWIwNmUtZDUzMDA3ZDktY2NmNTU1NDUtM2UyOTRlMDA=/Root/test_table_cas_11938098647351320300`. ShardID=72075186224037922, Sink=[4:7519902233218615841:2474]. Ignored this error.{
: Error: Rejecting data TxId 0 because datashard 72075186224037922: decided to reject due to given RejectProbability, code: 2006 } 2025-06-25T15:01:25.832668Z node 4 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_OVERLOADED;details=Rejecting data TxId 0 because datashard 72075186224037922: decided to reject due to given RejectProbability;tx_id=0; 2025-06-25T15:01:25.836511Z node 4 :KQP_COMPUTE WARN: kqp_write_actor.cpp:734: SelfId: [4:7519902233218615841:2474], Table: `/Root/.tmp/sessions/MTU0ZWIwNmUtZDUzMDA3ZDktY2NmNTU1NDUtM2UyOTRlMDA=/Root/test_table_cas_11938098647351320300` ([72057594046644480:21:1]), SessionActorId: [0:0:0]Got OVERLOADED for table `/Root/.tmp/sessions/MTU0ZWIwNmUtZDUzMDA3ZDktY2NmNTU1NDUtM2UyOTRlMDA=/Root/test_table_cas_11938098647351320300`. ShardID=72075186224037922, Sink=[4:7519902233218615841:2474]. Ignored this error.{
: Error: Rejecting data TxId 0 because datashard 72075186224037922: decided to reject due to given RejectProbability, code: 2006 } 2025-06-25T15:01:27.124404Z node 4 :KQP_COMPUTE WARN: kqp_write_actor.cpp:1097: SelfId: [4:7519902233218615841:2474], Table: `/Root/.tmp/sessions/MTU0ZWIwNmUtZDUzMDA3ZDktY2NmNTU1NDUtM2UyOTRlMDA=/Root/test_table_cas_11938098647351320300` ([72057594046644480:21:1]), SessionActorId: [0:0:0]Timeout shardID=72075186224037922 2025-06-25T15:01:27.124453Z node 4 :KQP_COMPUTE WARN: kqp_write_actor.cpp:1081: SelfId: [4:7519902233218615841:2474], Table: `/Root/.tmp/sessions/MTU0ZWIwNmUtZDUzMDA3ZDktY2NmNTU1NDUtM2UyOTRlMDA=/Root/test_table_cas_11938098647351320300` ([72057594046644480:21:1]), SessionActorId: [0:0:0]Retry failed: not found ShardID=72075186224037922 with Cookie=1 2025-06-25T15:01:27.383943Z node 4 :KQP_COMPUTE WARN: kqp_write_actor.cpp:1097: SelfId: [4:7519902233218615841:2474], Table: `/Root/.tmp/sessions/MTU0ZWIwNmUtZDUzMDA3ZDktY2NmNTU1NDUtM2UyOTRlMDA=/Root/test_table_cas_11938098647351320300` ([72057594046644480:21:1]), SessionActorId: [0:0:0]Timeout shardID=72075186224037922 2025-06-25T15:01:28.845151Z node 4 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jykspzyccnrmp61pj4twmqjt", SessionId: ydb://session/3?node_id=4&id=MTU0ZWIwNmUtZDUzMDA3ZDktY2NmNTU1NDUtM2UyOTRlMDA=, Slow query, duration: 13.501612s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "\n CREATE TABLE test_table (\n PRIMARY KEY (id)\n ) AS SELECT\n ROW_NUMBER() OVER w AS id, data\n FROM\n AS_TABLE(ListReplicate(<|data: 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'|>, 500000))\n WINDOW\n w AS (ORDER BY data)", parameters: 0b 2025-06-25T15:01:29.153123Z node 4 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jykspzyccnrmp61pj4twmqjt", SessionId: ydb://session/3?node_id=4&id=MTU0ZWIwNmUtZDUzMDA3ZDktY2NmNTU1NDUtM2UyOTRlMDA=, Slow query, duration: 13.809571s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "\n CREATE TABLE test_table (\n PRIMARY KEY (id)\n ) AS SELECT\n ROW_NUMBER() OVER w AS id, data\n FROM\n AS_TABLE(ListReplicate(<|data: 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'|>, 500000))\n WINDOW\n w AS (ORDER BY data)", parameters: 0b 2025-06-25T15:01:29.163165Z --------------- Start update --------------- 2025-06-25T15:01:40.338822Z node 4 :TX_DATASHARD ERROR: datashard__stats.cpp:649: CPU usage 61.3197 is higher than threshold of 60 in-flight Tx: 0 immediate Tx: 0 readIterators: 0 at datashard: 72075186224037922 table: [/Root/test_table] 2025-06-25T15:01:40.392225Z node 4 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyksqden85grwmpezrgtf7vt", SessionId: ydb://session/3?node_id=4&id=MWI5MjdmZmItMWYxNzgzYjAtZjQ1MzgyNi0zNGE4MzhkZg==, Slow query, duration: 11.215954s, status: STATUS_CODE_UNSPECIFIED, user: UNAUTHENTICATED, results: 0b, text: "\n UPDATE test_table SET data = \"a\"\n ", parameters: 0b >> DataShardOutOfOrder::TestOutOfOrderRestartLocksReorderedWithoutBarrier >> EncryptedBackupParamsValidationTest::IncorrectKeyLengthExport [GOOD] >> KqpCost::AAARangeFullScan [GOOD] >> KqpCost::IndexLookupJoin-StreamLookupJoin [GOOD] >> CompressExecutor::TestReorderedExecutor [GOOD] >> PersQueueSdkReadSessionTest::ReadSessionWithAbort >> DataShardOutOfOrder::UncommittedReads ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/cost/unittest >> KqpCost::OlapRange [GOOD] Test command err: Trying to start YDB, gRPC: 1351, MsgBus: 14411 2025-06-25T15:01:37.533874Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519902320255436349:2133];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:01:37.542741Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0015e1/r3tmp/tmp8Cnxn7/pdisk_1.dat 2025-06-25T15:01:37.980287Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:01:37.984436Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519902320255436251:2080] 1750863697527076 != 1750863697527079 2025-06-25T15:01:37.990509Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:01:37.990639Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 1351, node 1 2025-06-25T15:01:38.002335Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:01:38.080872Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:01:38.080896Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:01:38.080903Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:01:38.081023Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14411 TClient is connected to server localhost:14411 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-25T15:01:38.544215Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:01:38.588364Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:01:38.605164Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T15:01:38.621086Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:38.764109Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:38.925290Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:39.001681Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:40.475179Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902333140339780:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:40.475261Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:40.748964Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:40.783934Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:40.850065Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:40.879977Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:40.915140Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:40.986548Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:41.015509Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:41.067213Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902337435307740:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:41.067275Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:41.067339Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902337435307745:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:41.070271Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:01:41.079563Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519902337435307747:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T15:01:41.137373Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519902337435307798:3419] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:01:42.117473Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T15:01:42.261795Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037922;self_id=[1:7519902341730275457:2473];tablet_id=72075186224037922;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T15:01:42.262101Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=720751 ... TxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-25T15:01:42.447295Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037925;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-25T15:01:42.447370Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037925;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-25T15:01:42.447389Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037925;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-25T15:01:42.447438Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037925;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-25T15:01:42.447452Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037925;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-25T15:01:42.447481Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=72075186224037925;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-25T15:01:42.447539Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037925;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-25T15:01:42.447557Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037925;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-25T15:01:42.447710Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037925;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-25T15:01:42.447722Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037925;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-25T15:01:42.448064Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037931;self_id=[1:7519902341730275471:2477];ev=NActors::IEventHandle;tablet_id=72075186224037931;tx_id=281474976710672;this=88923015274464;method=TTxController::StartProposeOnExecute;tx_info=281474976710672:TX_KIND_SCHEMA;min=1750863702447;max=18446744073709551615;plan=0;src=[1:7519902320255436578:2147];cookie=442:12;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-25T15:01:42.449322Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037930;self_id=[1:7519902341730275482:2481];ev=NActors::IEventHandle;tablet_id=72075186224037930;tx_id=281474976710672;this=88923025279872;method=TTxController::StartProposeOnExecute;tx_info=281474976710672:TX_KIND_SCHEMA;min=1750863702449;max=18446744073709551615;plan=0;src=[1:7519902320255436578:2147];cookie=432:12;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-25T15:01:42.450235Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037929;self_id=[1:7519902341730275467:2476];ev=NActors::IEventHandle;tablet_id=72075186224037929;tx_id=281474976710672;this=88923025278976;method=TTxController::StartProposeOnExecute;tx_info=281474976710672:TX_KIND_SCHEMA;min=1750863702449;max=18446744073709551615;plan=0;src=[1:7519902320255436578:2147];cookie=422:12;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-25T15:01:42.452160Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037923;self_id=[1:7519902341730275458:2474];ev=NActors::IEventHandle;tablet_id=72075186224037923;tx_id=281474976710672;this=88923015279616;method=TTxController::StartProposeOnExecute;tx_info=281474976710672:TX_KIND_SCHEMA;min=1750863702451;max=18446744073709551615;plan=0;src=[1:7519902320255436578:2147];cookie=362:12;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-25T15:01:42.452462Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037924;self_id=[1:7519902341730275476:2480];ev=NActors::IEventHandle;tablet_id=72075186224037924;tx_id=281474976710672;this=88923025277856;method=TTxController::StartProposeOnExecute;tx_info=281474976710672:TX_KIND_SCHEMA;min=1750863702450;max=18446744073709551615;plan=0;src=[1:7519902320255436578:2147];cookie=372:12;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-25T15:01:42.453054Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037926;self_id=[1:7519902341730275475:2479];ev=NActors::IEventHandle;tablet_id=72075186224037926;tx_id=281474976710672;this=88923014942272;method=TTxController::StartProposeOnExecute;tx_info=281474976710672:TX_KIND_SCHEMA;min=1750863702452;max=18446744073709551615;plan=0;src=[1:7519902320255436578:2147];cookie=392:12;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-25T15:01:42.455631Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037925;self_id=[1:7519902341730275466:2475];ev=NActors::IEventHandle;tablet_id=72075186224037925;tx_id=281474976710672;this=88923015279392;method=TTxController::StartProposeOnExecute;tx_info=281474976710672:TX_KIND_SCHEMA;min=1750863702455;max=18446744073709551615;plan=0;src=[1:7519902320255436578:2147];cookie=382:12;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-25T15:01:42.459209Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037928;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-25T15:01:42.460168Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037929;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-25T15:01:42.472739Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037928;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710672; 2025-06-25T15:01:42.473131Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037929;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710672; 2025-06-25T15:01:42.473303Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037923;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-25T15:01:42.473692Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037931;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-25T15:01:42.478869Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037931;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710672; 2025-06-25T15:01:42.479572Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037922;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-25T15:01:42.479877Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037923;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710672; 2025-06-25T15:01:42.481407Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037926;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-25T15:01:42.483689Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037922;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710672; 2025-06-25T15:01:42.484108Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037924;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-25T15:01:42.485314Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037926;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710672; 2025-06-25T15:01:42.485622Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037930;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-25T15:01:42.488091Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037924;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710672; 2025-06-25T15:01:42.488862Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037930;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710672; 2025-06-25T15:01:42.488965Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037925;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-25T15:01:42.489298Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037927;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=17;result=not_found; 2025-06-25T15:01:42.492734Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037927;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710672; 2025-06-25T15:01:42.492975Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037925;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710672; 2025-06-25T15:01:42.543886Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519902320255436349:2133];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:01:42.544149Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T15:01:42.625624Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037926;tx_state=TTxProgressTx::Execute;tx_current=281474976710674;tx_id=281474976710674;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710674; 2025-06-25T15:01:42.626110Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037930;tx_state=TTxProgressTx::Execute;tx_current=281474976710674;tx_id=281474976710674;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710674; 2025-06-25T15:01:42.626192Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037931;tx_state=TTxProgressTx::Execute;tx_current=281474976710674;tx_id=281474976710674;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976710674; >> DataShardTxOrder::ImmediateBetweenOnline_oo8_dirty >> DataShardTxOrder::ZigZag ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/cost/unittest >> KqpCost::AAARangeFullScan [GOOD] Test command err: Trying to start YDB, gRPC: 7550, MsgBus: 17164 2025-06-25T15:01:38.852626Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519902326412181920:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:01:38.852690Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0015cf/r3tmp/tmpOP7Bt9/pdisk_1.dat 2025-06-25T15:01:39.324494Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:01:39.328042Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519902326412181897:2080] 1750863698847820 != 1750863698847823 TServer::EnableGrpc on GrpcPort 7550, node 1 2025-06-25T15:01:39.358728Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:01:39.359270Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:01:39.361274Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:01:39.391265Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:01:39.391286Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:01:39.391292Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:01:39.391412Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17164 TClient is connected to server localhost:17164 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-25T15:01:39.854958Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:01:39.924943Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:01:39.936626Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T15:01:39.944757Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T15:01:40.078779Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:40.238397Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:40.322535Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:41.833792Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902339297085426:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:41.833869Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:42.156814Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:42.191281Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:42.221940Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:42.260781Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:42.291948Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:42.328498Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:42.400760Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:42.450922Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902343592053383:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:42.451035Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:42.451222Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902343592053388:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:42.454410Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:01:42.463817Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519902343592053390:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T15:01:42.523650Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519902343592053441:3421] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:01:43.859797Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519902326412181920:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:01:43.859877Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; {"Plan":{"Plans":[{"PlanNodeId":5,"Plans":[{"PlanNodeId":4,"Plans":[{"PlanNodeId":3,"Plans":[{"PlanNodeId":2,"Plans":[{"Tables":["Test"],"PlanNodeId":1,"Operators":[{"Scan":"Parallel","E-Size":"0","ReadRanges":["Group (-∞, +∞)","Name (-∞, +∞)"],"Reverse":false,"Name":"TableFullScan","Inputs":[],"Path":"\/Root\/Test","ReadRangesPointPr ... Type":"Limit"}],"Node Type":"ResultSet","PlanNodeType":"ResultSet"}],"Node Type":"Query","PlanNodeType":"Query"}} query_phases { duration_us: 5814 table_access { name: "/Root/Test" reads { rows: 1 bytes: 20 } partitions_count: 1 } cpu_time_us: 5786 affected_shards: 1 } compilation { duration_us: 198392 cpu_time_us: 195280 } process_cpu_time_us: 276 query_plan: "{\"Plan\":{\"Plans\":[{\"PlanNodeId\":5,\"Plans\":[{\"PlanNodeId\":4,\"Plans\":[{\"PlanNodeId\":3,\"Plans\":[{\"PlanNodeId\":2,\"Plans\":[{\"Tables\":[\"Test\"],\"PlanNodeId\":1,\"Operators\":[{\"Scan\":\"Parallel\",\"E-Size\":\"0\",\"ReadRanges\":[\"Group (-\342\210\236, +\342\210\236)\",\"Name (-\342\210\236, +\342\210\236)\"],\"Reverse\":false,\"Name\":\"TableFullScan\",\"Inputs\":[],\"Path\":\"\\/Root\\/Test\",\"ReadRangesPointPrefixLen\":\"0\",\"E-Rows\":\"0\",\"Table\":\"Test\",\"ReadColumns\":[\"Amount\",\"Comment\",\"Group\",\"Name\"],\"E-Cost\":\"0\"}],\"Node Type\":\"TableFullScan\"}],\"Operators\":[{\"Inputs\":[{\"InternalOperatorId\":1}],\"Name\":\"Limit\",\"Limit\":\"1\"},{\"Inputs\":[{\"ExternalPlanNodeId\":1}],\"E-Rows\":\"0\",\"Predicate\":\"item.Amount \\u003C 5000\",\"Name\":\"Filter\",\"E-Size\":\"0\",\"E-Cost\":\"0\"}],\"Node Type\":\"Limit-Filter\",\"Stats\":{\"UseLlvm\":\"undefined\",\"Output\":[{\"Pop\":{\"Chunks\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"Rows\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"LastMessageMs\":{\"Count\":1,\"Sum\":2,\"Max\":2,\"Min\":2},\"FirstMessageMs\":{\"Count\":1,\"Sum\":2,\"Max\":2,\"Min\":2},\"Bytes\":{\"Count\":1,\"Sum\":19,\"Max\":19,\"Min\":19,\"History\":[3,19]}},\"Name\":\"4\",\"Push\":{\"LastMessageMs\":{\"Count\":1,\"Sum\":2,\"Max\":2,\"Min\":2},\"Rows\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"Chunks\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"ResumeMessageMs\":{\"Count\":1,\"Sum\":2,\"Max\":2,\"Min\":2},\"FirstMessageMs\":{\"Count\":1,\"Sum\":2,\"Max\":2,\"Min\":2},\"PauseMessageMs\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"WaitTimeUs\":{\"Count\":1,\"Sum\":841,\"Max\":841,\"Min\":841,\"History\":[3,841]},\"WaitPeriods\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"WaitMessageMs\":{\"Count\":1,\"Max\":2,\"Min\":1}}}],\"MaxMemoryUsage\":{\"Count\":1,\"Sum\":1048576,\"Max\":1048576,\"Min\":1048576,\"History\":[3,1048576]},\"Tasks\":1,\"OutputRows\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"FinishedTasks\":1,\"IngressRows\":{\"Count\":1,\"Sum\":3,\"Max\":3,\"Min\":3},\"PhysicalStageId\":0,\"StageDurationUs\":0,\"Table\":[{\"Path\":\"\\/Root\\/Test\",\"ReadRows\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"ReadBytes\":{\"Count\":1,\"Sum\":20,\"Max\":20,\"Min\":20}}],\"BaseTimeMs\":1750863703829,\"OutputBytes\":{\"Count\":1,\"Sum\":19,\"Max\":19,\"Min\":19},\"CpuTimeUs\":{\"Count\":1,\"Sum\":1947,\"Max\":1947,\"Min\":1947,\"History\":[3,1947]},\"Ingress\":[{\"Pop\":{\"Chunks\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"Rows\":{\"Count\":1,\"Sum\":3,\"Max\":3,\"Min\":3},\"LastMessageMs\":{\"Count\":1,\"Sum\":2,\"Max\":2,\"Min\":2},\"FirstMessageMs\":{\"Count\":1,\"Sum\":2,\"Max\":2,\"Min\":2},\"Bytes\":{\"Count\":1,\"Sum\":192,\"Max\":192,\"Min\":192,\"History\":[3,192]}},\"External\":{},\"Name\":\"KqpReadRangesSource\",\"Ingress\":{},\"Push\":{\"LastMessageMs\":{\"Count\":1,\"Sum\":2,\"Max\":2,\"Min\":2},\"Rows\":{\"Count\":1,\"Sum\":3,\"Max\":3,\"Min\":3},\"Chunks\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"ResumeMessageMs\":{\"Count\":1,\"Sum\":2,\"Max\":2,\"Min\":2},\"FirstMessageMs\":{\"Count\":1,\"Sum\":2,\"Max\":2,\"Min\":2},\"Bytes\":{\"Count\":1,\"Sum\":192,\"Max\":192,\"Min\":192,\"History\":[3,192]},\"PauseMessageMs\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"WaitTimeUs\":{\"Count\":1,\"Sum\":840,\"Max\":840,\"Min\":840,\"History\":[3,840]},\"WaitPeriods\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"WaitMessageMs\":{\"Count\":1,\"Max\":2,\"Min\":1}}}],\"UpdateTimeMs\":3}}],\"Node Type\":\"Merge\",\"SortColumns\":[\"Group (Asc)\"],\"PlanNodeType\":\"Connection\"}],\"Operators\":[{\"Inputs\":[{\"ExternalPlanNodeId\":3}],\"Name\":\"Limit\",\"Limit\":\"1\"}],\"Node Type\":\"Limit\",\"Stats\":{\"UseLlvm\":\"undefined\",\"OutputRows\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"PhysicalStageId\":1,\"FinishedTasks\":1,\"InputBytes\":{\"Count\":1,\"Sum\":19,\"Max\":19,\"Min\":19},\"DurationUs\":{\"Count\":1,\"Sum\":1000,\"Max\":1000,\"Min\":1000},\"MaxMemoryUsage\":{\"Count\":1,\"Sum\":1048576,\"Max\":1048576,\"Min\":1048576,\"History\":[3,1048576]},\"BaseTimeMs\":1750863703829,\"Output\":[{\"Pop\":{\"Chunks\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"Rows\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"LastMessageMs\":{\"Count\":1,\"Sum\":3,\"Max\":3,\"Min\":3},\"FirstMessageMs\":{\"Count\":1,\"Sum\":3,\"Max\":3,\"Min\":3},\"Bytes\":{\"Count\":1,\"Sum\":19,\"Max\":19,\"Min\":19,\"History\":[3,19]}},\"Name\":\"RESULT\",\"Push\":{\"WaitTimeUs\":{\"Count\":1,\"Sum\":2225,\"Max\":2225,\"Min\":2225,\"History\":[3,2225]},\"WaitPeriods\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"Chunks\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"ResumeMessageMs\":{\"Count\":1,\"Sum\":3,\"Max\":3,\"Min\":3},\"Rows\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"LastMessageMs\":{\"Count\":1,\"Sum\":3,\"Max\":3,\"Min\":3},\"FirstMessageMs\":{\"Count\":1,\"Sum\":3,\"Max\":3,\"Min\":3}}}],\"CpuTimeUs\":{\"Count\":1,\"Sum\":762,\"Max\":762,\"Min\":762,\"History\":[3,762]},\"StageDurationUs\":1000,\"ResultRows\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"ResultBytes\":{\"Count\":1,\"Sum\":19,\"Max\":19,\"Min\":19},\"OutputBytes\":{\"Count\":1,\"Sum\":19,\"Max\":19,\"Min\":19},\"Input\":[{\"Pop\":{\"Chunks\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"Rows\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"LastMessageMs\":{\"Count\":1,\"Sum\":2,\"Max\":2,\"Min\":2},\"FirstMessageMs\":{\"Count\":1,\"Sum\":2,\"Max\":2,\"Min\":2},\"Bytes\":{\"Count\":1,\"Sum\":19,\"Max\":19,\"Min\":19,\"History\":[3,19]}},\"Name\":\"2\",\"Push\":{\"LastMessageMs\":{\"Count\":1,\"Sum\":2,\"Max\":2,\"Min\":2},\"Rows\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"Chunks\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"ResumeMessageMs\":{\"Count\":1,\"Sum\":2,\"Max\":2,\"Min\":2},\"FirstMessageMs\":{\"Count\":1,\"Sum\":2,\"Max\":2,\"Min\":2},\"Bytes\":{\"Count\":1,\"Sum\":19,\"Max\":19,\"Min\":19,\"History\":[3,19]},\"WaitTimeUs\":{\"Count\":1,\"Sum\":2141,\"Max\":2141,\"Min\":2141,\"History\":[3,2141]},\"WaitPeriods\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1}}}],\"UpdateTimeMs\":3,\"InputRows\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"Tasks\":1}}],\"Node Type\":\"ResultSet\",\"PlanNodeType\":\"ResultSet\"}],\"Node Type\":\"Query\",\"Stats\":{\"Compilation\":{\"FromCache\":false,\"DurationUs\":198392,\"CpuTimeUs\":195280},\"ProcessCpuTimeUs\":276,\"TotalDurationUs\":212376,\"ResourcePoolId\":\"default\",\"QueuedTimeUs\":558},\"PlanNodeType\":\"Query\"},\"meta\":{\"version\":\"0.2\",\"type\":\"query\"},\"SimplifiedPlan\":{\"PlanNodeId\":0,\"Plans\":[{\"PlanNodeId\":1,\"Plans\":[{\"PlanNodeId\":2,\"Plans\":[{\"PlanNodeId\":4,\"Plans\":[{\"PlanNodeId\":5,\"Plans\":[{\"PlanNodeId\":6,\"Operators\":[{\"Scan\":\"Parallel\",\"E-Size\":\"0\",\"ReadRanges\":[\"Group (-\342\210\236, +\342\210\236)\",\"Name (-\342\210\236, +\342\210\236)\"],\"Reverse\":false,\"Name\":\"TableFullScan\",\"Path\":\"\\/Root\\/Test\",\"ReadRangesPointPrefixLen\":\"0\",\"E-Rows\":\"0\",\"Table\":\"Test\",\"ReadColumns\":[\"Amount\",\"Comment\",\"Group\",\"Name\"],\"E-Cost\":\"0\"}],\"Node Type\":\"TableFullScan\"}],\"Operators\":[{\"E-Rows\":\"0\",\"Predicate\":\"item.Amount \\u003C 5000\",\"Name\":\"Filter\",\"E-Size\":\"0\",\"E-Cost\":\"0\"}],\"Node Type\":\"Filter\"}],\"Operators\":[{\"A-Rows\":1,\"A-SelfCpu\":1.947,\"A-Cpu\":1.947,\"A-Size\":19,\"Name\":\"Limit\",\"Limit\":\"1\"}],\"Node Type\":\"Limit\"}],\"Operators\":[{\"A-Rows\":1,\"A-SelfCpu\":0.762,\"A-Cpu\":2.709,\"A-Size\":19,\"Name\":\"Limit\",\"Limit\":\"1\"}],\"Node Type\":\"Limit\"}],\"Node Type\":\"ResultSet\",\"PlanNodeType\":\"ResultSet\"}],\"Node Type\":\"Query\",\"PlanNodeType\":\"Query\"}}" query_ast: "(\n(let $1 (KqpTable \'\"/Root/Test\" \'\"72057594046644480:9\" \'\"\" \'1))\n(let $2 \'(\'\"Amount\" \'\"Comment\" \'\"Group\" \'\"Name\"))\n(let $3 (KqpRowsSourceSettings $1 $2 \'(\'(\'\"Sorted\")) (Void) \'()))\n(let $4 (Uint64 \'1))\n(let $5 (OptionalType (DataType \'String)))\n(let $6 (StructType \'(\'\"Amount\" (OptionalType (DataType \'Uint64))) \'(\'\"Comment\" $5) \'(\'\"Group\" (OptionalType (DataType \'Uint32))) \'(\'\"Name\" $5)))\n(let $7 \'(\'(\'\"_logical_id\" \'559) \'(\'\"_id\" \'\"dbc9c25a-c4c0512f-adc3dc59-32d041d0\") \'(\'\"_wide_channels\" $6)))\n(let $8 (DqPhyStage \'((DqSource (DataSource \'\"KqpReadRangesSource\") $3)) (lambda \'($12) (block \'(\n (let $13 (lambda \'($16) (block \'(\n (let $17 (Member $16 \'\"Amount\"))\n (return $17 (Member $16 \'\"Comment\") (Member $16 \'\"Group\") (Member $16 \'\"Name\") (Coalesce (< $17 (Uint64 \'\"5000\")) (Bool \'false)))\n ))))\n (let $14 (WideFilter (ExpandMap (ToFlow $12) $13) (lambda \'($18 $19 $20 $21 $22) $22) $4))\n (let $15 (lambda \'($23 $24 $25 $26 $27) $23 $24 $25 $26))\n (return (FromFlow (WideMap $14 $15)))\n))) $7))\n(let $9 (DqCnMerge (TDqOutput $8 \'0) \'(\'(\'\"2\" \'\"Asc\"))))\n(let $10 (DqPhyStage \'($9) (lambda \'($28) (FromFlow (NarrowMap (Take (ToFlow $28) $4) (lambda \'($29 $30 $31 $32) (AsStruct \'(\'\"Amount\" $29) \'(\'\"Comment\" $30) \'(\'\"Group\" $31) \'(\'\"Name\" $32)))))) \'(\'(\'\"_logical_id\" \'572) \'(\'\"_id\" \'\"ede77ecc-6813a026-1d03ad54-232f0af2\"))))\n(let $11 (DqCnResult (TDqOutput $10 \'0) \'()))\n(return (KqpPhysicalQuery \'((KqpPhysicalTx \'($8 $10) \'($11) \'() \'(\'(\'\"type\" \'\"data\")))) \'((KqpTxResultBinding (ListType $6) \'0 \'0)) \'(\'(\'\"type\" \'\"data_query\"))))\n)\n" total_duration_us: 212376 total_cpu_time_us: 201342 query_meta: "{\"query_database\":\"/Root\",\"query_parameter_types\":{},\"table_metadata\":[\"{\\\"DoesExist\\\":true,\\\"Cluster\\\":\\\"db\\\",\\\"Name\\\":\\\"/Root/Test\\\",\\\"SysView\\\":\\\"\\\",\\\"PathId\\\":{\\\"OwnerId\\\":72057594046644480,\\\"TableId\\\":9},\\\"SchemaVersion\\\":1,\\\"Kind\\\":1,\\\"Columns\\\":[{\\\"Name\\\":\\\"Amount\\\",\\\"Id\\\":3,\\\"Type\\\":\\\"Uint64\\\",\\\"TypeId\\\":4,\\\"NotNull\\\":false,\\\"DefaultFromSequence\\\":\\\"\\\",\\\"DefaultKind\\\":0,\\\"DefaultFromLiteral\\\":{},\\\"IsBuildInProgress\\\":false,\\\"DefaultFromSequencePathId\\\":{\\\"OwnerId\\\":18446744073709551615,\\\"TableId\\\":18446744073709551615}},{\\\"Name\\\":\\\"Comment\\\",\\\"Id\\\":4,\\\"Type\\\":\\\"String\\\",\\\"TypeId\\\":4097,\\\"NotNull\\\":false,\\\"DefaultFromSequence\\\":\\\"\\\",\\\"DefaultKind\\\":0,\\\"DefaultFromLiteral\\\":{},\\\"IsBuildInProgress\\\":false,\\\"DefaultFromSequencePathId\\\":{\\\"OwnerId\\\":18446744073709551615,\\\"TableId\\\":18446744073709551615}},{\\\"Name\\\":\\\"Group\\\",\\\"Id\\\":1,\\\"Type\\\":\\\"Uint32\\\",\\\"TypeId\\\":2,\\\"NotNull\\\":false,\\\"DefaultFromSequence\\\":\\\"\\\",\\\"DefaultKind\\\":0,\\\"DefaultFromLiteral\\\":{},\\\"IsBuildInProgress\\\":false,\\\"DefaultFromSequencePathId\\\":{\\\"OwnerId\\\":18446744073709551615,\\\"TableId\\\":18446744073709551615}},{\\\"Name\\\":\\\"Name\\\",\\\"Id\\\":2,\\\"Type\\\":\\\"String\\\",\\\"TypeId\\\":4097,\\\"NotNull\\\":false,\\\"DefaultFromSequence\\\":\\\"\\\",\\\"DefaultKind\\\":0,\\\"DefaultFromLiteral\\\":{},\\\"IsBuildInProgress\\\":false,\\\"DefaultFromSequencePathId\\\":{\\\"OwnerId\\\":18446744073709551615,\\\"TableId\\\":18446744073709551615}}],\\\"KeyColunmNames\\\":[\\\"Group\\\",\\\"Name\\\"],\\\"RecordsCount\\\":0,\\\"DataSize\\\":0,\\\"StatsLoaded\\\":false}\"],\"table_meta_serialization_type\":2,\"created_at\":\"1750863703\",\"query_type\":\"QUERY_TYPE_SQL_DML\",\"query_syntax\":\"1\",\"query_cluster\":\"db\",\"query_id\":\"222f9b4b-751033d3-dce378b9-488da085\",\"version\":\"1.0\"}" >> DataShardScan::ScanFollowedByUpdate >> DataShardOutOfOrder::TestSnapshotReadAfterStuckRW ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/cost/unittest >> KqpCost::IndexLookupJoin-StreamLookupJoin [GOOD] Test command err: Trying to start YDB, gRPC: 24365, MsgBus: 23955 2025-06-25T15:01:37.540949Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519902322475935056:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:01:37.551307Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0015db/r3tmp/tmpeqAnVJ/pdisk_1.dat 2025-06-25T15:01:38.038389Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:01:38.038519Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:01:38.046619Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:01:38.070291Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519902322475935032:2080] 1750863697532791 != 1750863697532794 2025-06-25T15:01:38.073662Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 24365, node 1 2025-06-25T15:01:38.178621Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:01:38.178646Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:01:38.178659Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:01:38.178798Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23955 2025-06-25T15:01:38.550685Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:23955 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:01:38.727228Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:01:38.744567Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T15:01:38.758879Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:38.983732Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:39.174690Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:39.268400Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:40.836855Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902335360838568:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:40.836978Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:41.179900Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:41.249475Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:41.283471Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:41.313948Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:41.340883Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:41.369138Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:41.401247Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:41.482948Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902339655806525:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:41.483018Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902339655806530:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:41.483022Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:41.486468Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:01:41.497785Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519902339655806532:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T15:01:41.554384Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519902339655806583:3418] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:01:42.537074Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519902322475935056:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:01:42.537154Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T15:01:42.706544Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:42.749261Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:42.784380Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) /Root/Join1_2 1 19 /Root/Join1_1 8 136 >> DataShardTxOrder::RandomPoints_ReproducerDelayRS1 >> DataShardOutOfOrder::TestShardSnapshotReadNoEarlyReply >> DataShardTxOrder::ImmediateBetweenOnline >> EncryptedBackupParamsValidationTest::NoSourcePrefix |92.2%| [TA] $(B)/ydb/core/kqp/ut/cost/test-results/unittest/{meta.json ... results_accumulator.log} |92.2%| [TA] {RESULT} $(B)/ydb/core/kqp/ut/cost/test-results/unittest/{meta.json ... results_accumulator.log} >> RetryPolicy::TWriteSession_TestPolicy [GOOD] >> RetryPolicy::TWriteSession_TestBrokenPolicy >> DataShardOutOfOrder::TestReadTableWriteConflict >> DataShardTxOrder::RandomDotRanges_DelayRS >> DataShardTxOrder::ReadWriteReorder >> DataShardTxOrder::RandomPoints_ReproducerDelayRS1 [GOOD] >> DataShardScan::ScanFollowedByUpdate [GOOD] >> KqpLimits::QSReplySize-useSink [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_order/unittest >> DataShardTxOrder::RandomPoints_ReproducerDelayRS1 [GOOD] Test command err: 2025-06-25T15:01:46.491546Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:01:46.491597Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:01:46.493507Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:112:2142], Recipient [1:135:2156]: NKikimr::TEvTablet::TEvBoot 2025-06-25T15:01:46.510627Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:112:2142], Recipient [1:135:2156]: NKikimr::TEvTablet::TEvRestored 2025-06-25T15:01:46.511099Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 9437184 actor [1:135:2156] 2025-06-25T15:01:46.511344Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T15:01:46.520098Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:112:2142], Recipient [1:135:2156]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-25T15:01:46.561332Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T15:01:46.561492Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T15:01:46.562963Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 9437184 2025-06-25T15:01:46.563043Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 9437184 2025-06-25T15:01:46.563092Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 9437184 2025-06-25T15:01:46.563504Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T15:01:46.563578Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T15:01:46.563632Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 9437184 persisting started state actor id [1:204:2156] in generation 2 2025-06-25T15:01:46.622699Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T15:01:46.655348Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 9437184 2025-06-25T15:01:46.655539Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 9437184 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T15:01:46.655646Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 9437184, actorId: [1:219:2215] 2025-06-25T15:01:46.655692Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 9437184 2025-06-25T15:01:46.655734Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 9437184, state: WaitScheme 2025-06-25T15:01:46.655774Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:01:46.655982Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:135:2156], Recipient [1:135:2156]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:46.656034Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:46.656234Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 9437184 2025-06-25T15:01:46.656302Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 9437184 2025-06-25T15:01:46.656357Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-25T15:01:46.656380Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:01:46.656415Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 9437184 2025-06-25T15:01:46.656442Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437184 has no attached operations 2025-06-25T15:01:46.656474Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 9437184 2025-06-25T15:01:46.656495Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 9437184 TxInFly 0 2025-06-25T15:01:46.656523Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-25T15:01:46.656576Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:215:2212], Recipient [1:135:2156]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T15:01:46.656599Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T15:01:46.656638Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:213:2211], serverId# [1:215:2212], sessionId# [0:0:0] 2025-06-25T15:01:46.658490Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:103:2136], Recipient [1:135:2156]: NKikimrTxDataShard.TEvProposeTransaction TxKind: TX_KIND_SCHEME SourceDeprecated { RawX1: 103 RawX2: 4294969432 } TxBody: "\nI\n\006table1\020\r\032\t\n\003key\030\002 \"\032\014\n\005value\030\200$ 8\032\n\n\004uint\030\002 9(\":\010Z\006\010\010\030\001(\000J\014/Root/table1" TxId: 1 ExecLevel: 0 Flags: 0 SchemeShardId: 4200 ProcessingParams { } 2025-06-25T15:01:46.658537Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-25T15:01:46.658597Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-25T15:01:46.658759Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit CheckSchemeTx 2025-06-25T15:01:46.658830Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 9437184 txId 1 ssId 4200 seqNo 0:0 2025-06-25T15:01:46.658879Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 1 at tablet 9437184 2025-06-25T15:01:46.658928Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is ExecutedNoMoreRestarts 2025-06-25T15:01:46.658960Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit CheckSchemeTx 2025-06-25T15:01:46.659000Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit StoreSchemeTx 2025-06-25T15:01:46.659049Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit StoreSchemeTx 2025-06-25T15:01:46.659319Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayCompleteNoMoreRestarts 2025-06-25T15:01:46.659347Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit StoreSchemeTx 2025-06-25T15:01:46.659393Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit FinishPropose 2025-06-25T15:01:46.659434Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit FinishPropose 2025-06-25T15:01:46.659491Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayComplete 2025-06-25T15:01:46.659524Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit FinishPropose 2025-06-25T15:01:46.659564Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit WaitForPlan 2025-06-25T15:01:46.659594Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit WaitForPlan 2025-06-25T15:01:46.659617Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:1] at 9437184 is not ready to execute on unit WaitForPlan 2025-06-25T15:01:46.672290Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 9437184 2025-06-25T15:01:46.672351Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit StoreSchemeTx 2025-06-25T15:01:46.672376Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit FinishPropose 2025-06-25T15:01:46.672414Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 1 at tablet 9437184 send to client, exec latency: 0 ms, propose latency: 1 ms, status: PREPARED 2025-06-25T15:01:46.672455Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 9437184 not sending time cast registration request in state WaitScheme 2025-06-25T15:01:46.672787Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:225:2221], Recipient [1:135:2156]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T15:01:46.672822Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T15:01:46.672859Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:224:2220], serverId# [1:225:2221], sessionId# [0:0:0] 2025-06-25T15:01:46.672967Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287424, Sender [1:103:2136], Recipient [1:135:2156]: {TEvPlanStep step# 1000001 MediatorId# 0 TabletID 9437184} 2025-06-25T15:01:46.672990Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3150: StateWork, processing event TEvTxProcessing::TEvPlanStep 2025-06-25T15:01:46.673088Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1790: Trying to execute [1000001:1] at 9437184 on unit WaitForPlan 2025-06-25T15:01:46.673117Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1805: Execution status for [1000001:1] at 9437184 is Executed 2025-06-25T15:01:46.673139Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000001:1] at 9437184 executing on unit WaitForPlan 2025-06-25T15:01:46.673163Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000001:1] at 9437184 to execution unit PlanQueue 2025-06-25T15:01:46.680747Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 1 at step 1000001 at tablet 9437184 { Transactions { TxId: 1 AckTo { RawX1: 103 RawX2: 4294969432 } } Step: 1000001 MediatorID: 0 TabletID: 9437184 } 2025-06-25T15:01:46.680819Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:01:46.680997Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:135:2156], Recipient [1:135:2156]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:46.681036Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:46.681084Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-25T15:01:46.681122Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 1 2025-06-25T15:01:46.681153Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437184 2025-06-25T15:01:46.681189Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000001:1] in PlanQueue unit at 9437184 2025-06-25T15:01:46.681220Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [100000 ... 01:47.729454Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:01:47.729599Z node 1 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 9437184 2025-06-25T15:01:47.729623Z node 1 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 9437184 2025-06-25T15:01:47.729642Z node 1 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 9437184 2025-06-25T15:01:47.729661Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-25T15:01:47.729687Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000004:7] at 9437184 on unit CompleteOperation 2025-06-25T15:01:47.729718Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000004 : 7] from 9437184 at tablet 9437184 send result to client [1:103:2136], exec latency: 1 ms, propose latency: 3 ms 2025-06-25T15:01:47.729755Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:563: Send delayed Ack RS Ack at 9437184 {TEvReadSet step# 1000004 txid# 7 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 4} 2025-06-25T15:01:47.729779Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:01:47.729879Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-25T15:01:47.729901Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000004:8] at 9437184 on unit CompleteOperation 2025-06-25T15:01:47.729932Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000004 : 8] from 9437184 at tablet 9437184 send result to client [1:103:2136], exec latency: 1 ms, propose latency: 3 ms 2025-06-25T15:01:47.729981Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:563: Send delayed Ack RS Ack at 9437184 {TEvReadSet step# 1000004 txid# 8 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 5} 2025-06-25T15:01:47.730017Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:01:47.730185Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-25T15:01:47.730208Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000004:9] at 9437184 on unit CompleteOperation 2025-06-25T15:01:47.730239Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000004 : 9] from 9437184 at tablet 9437184 send result to client [1:103:2136], exec latency: 1 ms, propose latency: 3 ms 2025-06-25T15:01:47.730289Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:563: Send delayed Ack RS Ack at 9437184 {TEvReadSet step# 1000004 txid# 9 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 6} 2025-06-25T15:01:47.730318Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:01:47.730480Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-25T15:01:47.730518Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:14] at 9437184 on unit FinishPropose 2025-06-25T15:01:47.730558Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 14 at tablet 9437184 send to client, exec latency: 0 ms, propose latency: 1 ms, status: COMPLETE 2025-06-25T15:01:47.730639Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:01:47.730798Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-25T15:01:47.730835Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000004:10] at 9437184 on unit CompleteOperation 2025-06-25T15:01:47.730870Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000004 : 10] from 9437184 at tablet 9437184 send result to client [1:103:2136], exec latency: 1 ms, propose latency: 3 ms 2025-06-25T15:01:47.730916Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:563: Send delayed Ack RS Ack at 9437184 {TEvReadSet step# 1000004 txid# 10 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 7} 2025-06-25T15:01:47.730940Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:01:47.731099Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-25T15:01:47.731125Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000004:12] at 9437184 on unit CompleteOperation 2025-06-25T15:01:47.731154Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000004 : 12] from 9437184 at tablet 9437184 send result to client [1:103:2136], exec latency: 1 ms, propose latency: 3 ms 2025-06-25T15:01:47.731190Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:563: Send delayed Ack RS Ack at 9437184 {TEvReadSet step# 1000004 txid# 12 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 8} 2025-06-25T15:01:47.731213Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:01:47.731350Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-25T15:01:47.731393Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000004:13] at 9437184 on unit CompleteOperation 2025-06-25T15:01:47.731440Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000004 : 13] from 9437184 at tablet 9437184 send result to client [1:103:2136], exec latency: 0 ms, propose latency: 1 ms 2025-06-25T15:01:47.731476Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:01:47.731579Z node 1 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 9437184 2025-06-25T15:01:47.731616Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-25T15:01:47.731639Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000004:5] at 9437184 on unit CompleteOperation 2025-06-25T15:01:47.731669Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000004 : 5] from 9437184 at tablet 9437184 send result to client [1:103:2136], exec latency: 1 ms, propose latency: 3 ms 2025-06-25T15:01:47.731725Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:563: Send delayed Ack RS Ack at 9437184 {TEvReadSet step# 1000004 txid# 5 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 2} 2025-06-25T15:01:47.731752Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:01:47.731967Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:239:2230], Recipient [1:351:2316]: {TEvReadSet step# 1000004 txid# 4 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 1} 2025-06-25T15:01:47.732031Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:01:47.732082Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 4 2025-06-25T15:01:47.732177Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:239:2230], Recipient [1:351:2316]: {TEvReadSet step# 1000004 txid# 6 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 3} 2025-06-25T15:01:47.732204Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:01:47.732241Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 6 2025-06-25T15:01:47.732304Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:239:2230], Recipient [1:351:2316]: {TEvReadSet step# 1000004 txid# 7 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 4} 2025-06-25T15:01:47.732343Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:01:47.732364Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 7 2025-06-25T15:01:47.732420Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:239:2230], Recipient [1:351:2316]: {TEvReadSet step# 1000004 txid# 8 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 5} 2025-06-25T15:01:47.732451Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:01:47.732480Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 8 2025-06-25T15:01:47.732541Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:239:2230], Recipient [1:351:2316]: {TEvReadSet step# 1000004 txid# 9 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 6} 2025-06-25T15:01:47.732565Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:01:47.732587Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 9 2025-06-25T15:01:47.732646Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:239:2230], Recipient [1:351:2316]: {TEvReadSet step# 1000004 txid# 10 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 7} 2025-06-25T15:01:47.732674Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:01:47.732708Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 10 2025-06-25T15:01:47.732761Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:239:2230], Recipient [1:351:2316]: {TEvReadSet step# 1000004 txid# 12 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 8} 2025-06-25T15:01:47.732788Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:01:47.732810Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 12 2025-06-25T15:01:47.732872Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:239:2230], Recipient [1:351:2316]: {TEvReadSet step# 1000004 txid# 5 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 2} 2025-06-25T15:01:47.732898Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:01:47.732920Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 5 expect 7 2 5 4 - 3 - - - - - - - - - - - - - - - - - - - - - - - - - - actual 7 2 5 4 - 3 - - - - - - - - - - - - - - - - - - - - - - - - - - interm - 2 5 4 - 3 - - - - - - - - - - - - - - - - - - - - - - - - - - >> DataShardOutOfOrder::TestReadTableImmediateWriteBlock [GOOD] >> DataShardOutOfOrder::TestReadTableSingleShardImmediate >> KqpLimits::ReplySizeExceeded [GOOD] >> DataShardTxOrder::ImmediateBetweenOnline_oo8 [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_order/unittest >> DataShardScan::ScanFollowedByUpdate [GOOD] Test command err: 2025-06-25T15:01:46.011959Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:01:46.012002Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:01:46.013148Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:112:2142], Recipient [1:135:2156]: NKikimr::TEvTablet::TEvBoot 2025-06-25T15:01:46.026612Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:112:2142], Recipient [1:135:2156]: NKikimr::TEvTablet::TEvRestored 2025-06-25T15:01:46.027106Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 9437184 actor [1:135:2156] 2025-06-25T15:01:46.027374Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T15:01:46.037256Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:112:2142], Recipient [1:135:2156]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-25T15:01:46.085049Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T15:01:46.085212Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T15:01:46.087022Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 9437184 2025-06-25T15:01:46.087133Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 9437184 2025-06-25T15:01:46.087191Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 9437184 2025-06-25T15:01:46.087563Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T15:01:46.087649Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T15:01:46.087712Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 9437184 persisting started state actor id [1:204:2156] in generation 2 2025-06-25T15:01:46.161014Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T15:01:46.201385Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 9437184 2025-06-25T15:01:46.201583Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 9437184 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T15:01:46.201696Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 9437184, actorId: [1:219:2215] 2025-06-25T15:01:46.201747Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 9437184 2025-06-25T15:01:46.201793Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 9437184, state: WaitScheme 2025-06-25T15:01:46.201834Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:01:46.202067Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:135:2156], Recipient [1:135:2156]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:46.202125Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:46.202369Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 9437184 2025-06-25T15:01:46.202488Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 9437184 2025-06-25T15:01:46.202559Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-25T15:01:46.202600Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:01:46.202641Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 9437184 2025-06-25T15:01:46.202681Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437184 has no attached operations 2025-06-25T15:01:46.202726Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 9437184 2025-06-25T15:01:46.202761Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 9437184 TxInFly 0 2025-06-25T15:01:46.202806Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-25T15:01:46.202886Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:215:2212], Recipient [1:135:2156]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T15:01:46.202920Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T15:01:46.202971Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:213:2211], serverId# [1:215:2212], sessionId# [0:0:0] 2025-06-25T15:01:46.205782Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:103:2136], Recipient [1:135:2156]: NKikimrTxDataShard.TEvProposeTransaction TxKind: TX_KIND_SCHEME SourceDeprecated { RawX1: 103 RawX2: 4294969432 } TxBody: "\nK\n\006table1\020\r\032\t\n\003key\030\002 \"\032\014\n\005value\030\200$ 8\032\n\n\004uint\030\002 9(\":\n \000Z\006\010\010\030\001(\000J\014/Root/table1" TxId: 1 ExecLevel: 0 Flags: 0 SchemeShardId: 4200 ProcessingParams { } 2025-06-25T15:01:46.205848Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-25T15:01:46.205922Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-25T15:01:46.206085Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit CheckSchemeTx 2025-06-25T15:01:46.206154Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 9437184 txId 1 ssId 4200 seqNo 0:0 2025-06-25T15:01:46.206204Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 1 at tablet 9437184 2025-06-25T15:01:46.206255Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is ExecutedNoMoreRestarts 2025-06-25T15:01:46.206287Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit CheckSchemeTx 2025-06-25T15:01:46.206333Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit StoreSchemeTx 2025-06-25T15:01:46.206372Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit StoreSchemeTx 2025-06-25T15:01:46.206711Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayCompleteNoMoreRestarts 2025-06-25T15:01:46.206751Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit StoreSchemeTx 2025-06-25T15:01:46.206788Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit FinishPropose 2025-06-25T15:01:46.206830Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit FinishPropose 2025-06-25T15:01:46.206900Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayComplete 2025-06-25T15:01:46.206935Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit FinishPropose 2025-06-25T15:01:46.206973Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit WaitForPlan 2025-06-25T15:01:46.207004Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit WaitForPlan 2025-06-25T15:01:46.207047Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:1] at 9437184 is not ready to execute on unit WaitForPlan 2025-06-25T15:01:46.222988Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 9437184 2025-06-25T15:01:46.223097Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit StoreSchemeTx 2025-06-25T15:01:46.223139Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit FinishPropose 2025-06-25T15:01:46.223195Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 1 at tablet 9437184 send to client, exec latency: 0 ms, propose latency: 1 ms, status: PREPARED 2025-06-25T15:01:46.223262Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 9437184 not sending time cast registration request in state WaitScheme 2025-06-25T15:01:46.223760Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:225:2221], Recipient [1:135:2156]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T15:01:46.223817Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T15:01:46.223862Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:224:2220], serverId# [1:225:2221], sessionId# [0:0:0] 2025-06-25T15:01:46.224013Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287424, Sender [1:103:2136], Recipient [1:135:2156]: {TEvPlanStep step# 1000001 MediatorId# 0 TabletID 9437184} 2025-06-25T15:01:46.224053Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3150: StateWork, processing event TEvTxProcessing::TEvPlanStep 2025-06-25T15:01:46.224233Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1790: Trying to execute [1000001:1] at 9437184 on unit WaitForPlan 2025-06-25T15:01:46.224285Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1805: Execution status for [1000001:1] at 9437184 is Executed 2025-06-25T15:01:46.224345Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000001:1] at 9437184 executing on unit WaitForPlan 2025-06-25T15:01:46.224384Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000001:1] at 9437184 to execution unit PlanQueue 2025-06-25T15:01:46.228090Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 1 at step 1000001 at tablet 9437184 { Transactions { TxId: 1 AckTo { RawX1: 103 RawX2: 4294969432 } } Step: 1000001 MediatorID: 0 TabletID: 9437184 } 2025-06-25T15:01:46.228163Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:01:46.228632Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:135:2156], Recipient [1:135:2156]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:46.228685Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:46.228742Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-25T15:01:46.228787Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 1 2025-06-25T15:01:46.228822Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437184 2025-06-25T15:01:46.228862Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000001:1] in PlanQueue unit at 9437184 2025-06-25T15:01:46.228897Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [100 ... on::Execute at 9437185 2025-06-25T15:01:48.229144Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437185 active 1 active planned 1 immediate 0 planned 1 2025-06-25T15:01:48.229200Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [1000006:36] at 9437185 for ReadTableScan 2025-06-25T15:01:48.229241Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000006:36] at 9437185 on unit ReadTableScan 2025-06-25T15:01:48.229275Z node 1 :TX_DATASHARD TRACE: read_table_scan_unit.cpp:158: ReadTable scan complete for [1000006:36] at 9437185 error: , IsFatalError: 0 2025-06-25T15:01:48.229336Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000006:36] at 9437185 is Executed 2025-06-25T15:01:48.229409Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000006:36] at 9437185 executing on unit ReadTableScan 2025-06-25T15:01:48.229441Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000006:36] at 9437185 to execution unit CompleteOperation 2025-06-25T15:01:48.229474Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000006:36] at 9437185 on unit CompleteOperation 2025-06-25T15:01:48.229664Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000006:36] at 9437185 is DelayComplete 2025-06-25T15:01:48.229696Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000006:36] at 9437185 executing on unit CompleteOperation 2025-06-25T15:01:48.229734Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000006:36] at 9437185 to execution unit CompletedOperations 2025-06-25T15:01:48.229771Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000006:36] at 9437185 on unit CompletedOperations 2025-06-25T15:01:48.229825Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000006:36] at 9437185 is Executed 2025-06-25T15:01:48.229860Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000006:36] at 9437185 executing on unit CompletedOperations 2025-06-25T15:01:48.229887Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [1000006:36] at 9437185 has finished 2025-06-25T15:01:48.229927Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437185 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:01:48.229955Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437185 2025-06-25T15:01:48.230040Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437185 has no attached operations 2025-06-25T15:01:48.230075Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 9437185 2025-06-25T15:01:48.230236Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:461:2400], Recipient [1:461:2400]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:48.230268Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:48.230311Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437186 2025-06-25T15:01:48.230340Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437186 active 1 active planned 1 immediate 0 planned 1 2025-06-25T15:01:48.230373Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [1000006:36] at 9437186 for ReadTableScan 2025-06-25T15:01:48.230407Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000006:36] at 9437186 on unit ReadTableScan 2025-06-25T15:01:48.230442Z node 1 :TX_DATASHARD TRACE: read_table_scan_unit.cpp:158: ReadTable scan complete for [1000006:36] at 9437186 error: , IsFatalError: 0 2025-06-25T15:01:48.230473Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000006:36] at 9437186 is Executed 2025-06-25T15:01:48.230499Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000006:36] at 9437186 executing on unit ReadTableScan 2025-06-25T15:01:48.230525Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000006:36] at 9437186 to execution unit CompleteOperation 2025-06-25T15:01:48.230565Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000006:36] at 9437186 on unit CompleteOperation 2025-06-25T15:01:48.230729Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000006:36] at 9437186 is DelayComplete 2025-06-25T15:01:48.230757Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000006:36] at 9437186 executing on unit CompleteOperation 2025-06-25T15:01:48.230782Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000006:36] at 9437186 to execution unit CompletedOperations 2025-06-25T15:01:48.230810Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000006:36] at 9437186 on unit CompletedOperations 2025-06-25T15:01:48.230843Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000006:36] at 9437186 is Executed 2025-06-25T15:01:48.230880Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000006:36] at 9437186 executing on unit CompletedOperations 2025-06-25T15:01:48.230906Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [1000006:36] at 9437186 has finished 2025-06-25T15:01:48.230932Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437186 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:01:48.230955Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437186 2025-06-25T15:01:48.231025Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437186 has no attached operations 2025-06-25T15:01:48.231050Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 9437186 2025-06-25T15:01:48.231245Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:239:2230], Recipient [1:239:2230]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:48.231297Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:48.231348Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-25T15:01:48.231374Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 1 active planned 1 immediate 0 planned 1 2025-06-25T15:01:48.231402Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [1000006:36] at 9437184 for ReadTableScan 2025-06-25T15:01:48.231426Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000006:36] at 9437184 on unit ReadTableScan 2025-06-25T15:01:48.231476Z node 1 :TX_DATASHARD TRACE: read_table_scan_unit.cpp:158: ReadTable scan complete for [1000006:36] at 9437184 error: , IsFatalError: 0 2025-06-25T15:01:48.231518Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000006:36] at 9437184 is Executed 2025-06-25T15:01:48.231545Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000006:36] at 9437184 executing on unit ReadTableScan 2025-06-25T15:01:48.231570Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000006:36] at 9437184 to execution unit CompleteOperation 2025-06-25T15:01:48.231597Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000006:36] at 9437184 on unit CompleteOperation 2025-06-25T15:01:48.231738Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000006:36] at 9437184 is DelayComplete 2025-06-25T15:01:48.231764Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000006:36] at 9437184 executing on unit CompleteOperation 2025-06-25T15:01:48.231791Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000006:36] at 9437184 to execution unit CompletedOperations 2025-06-25T15:01:48.231829Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000006:36] at 9437184 on unit CompletedOperations 2025-06-25T15:01:48.231868Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000006:36] at 9437184 is Executed 2025-06-25T15:01:48.231899Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000006:36] at 9437184 executing on unit CompletedOperations 2025-06-25T15:01:48.231929Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [1000006:36] at 9437184 has finished 2025-06-25T15:01:48.231952Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:01:48.231973Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437184 2025-06-25T15:01:48.231995Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437184 has no attached operations 2025-06-25T15:01:48.232016Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 9437184 2025-06-25T15:01:48.245020Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437186 2025-06-25T15:01:48.245077Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437186 2025-06-25T15:01:48.245110Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000006:36] at 9437186 on unit CompleteOperation 2025-06-25T15:01:48.245165Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000006 : 36] from 9437186 at tablet 9437186 send result to client [1:103:2136], exec latency: 4 ms, propose latency: 5 ms 2025-06-25T15:01:48.245214Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437186 2025-06-25T15:01:48.245413Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-25T15:01:48.245444Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-25T15:01:48.245482Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000006:36] at 9437184 on unit CompleteOperation 2025-06-25T15:01:48.245526Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000006 : 36] from 9437184 at tablet 9437184 send result to client [1:103:2136], exec latency: 4 ms, propose latency: 5 ms 2025-06-25T15:01:48.245560Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:01:48.245688Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437185 2025-06-25T15:01:48.245711Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437185 2025-06-25T15:01:48.245733Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000006:36] at 9437185 on unit CompleteOperation 2025-06-25T15:01:48.245761Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000006 : 36] from 9437185 at tablet 9437185 send result to client [1:103:2136], exec latency: 4 ms, propose latency: 5 ms 2025-06-25T15:01:48.245785Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437185 >> DataShardOutOfOrder::TestImmediateQueueThenSplit+UseSink [GOOD] >> DataShardOutOfOrder::TestImmediateQueueThenSplit-UseSink >> DataShardTxOrder::ImmediateBetweenOnline_Init_oo8 [GOOD] >> DataShardOutOfOrder::TestOutOfOrderReadOnlyAllowed+EvWrite [GOOD] >> DataShardOutOfOrder::TestOutOfOrderReadOnlyAllowed-EvWrite >> DataShardOutOfOrder::TestShardRestartPlannedCommitShouldSucceed+EvWrite [GOOD] >> DataShardOutOfOrder::TestShardRestartPlannedCommitShouldSucceed-EvWrite >> DataShardOutOfOrder::TestPlannedTimeoutSplit [GOOD] >> DataShardOutOfOrder::TestPlannedHalfOverloadedSplit-UseSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpLimits::QSReplySize-useSink [GOOD] Test command err: Trying to start YDB, gRPC: 25399, MsgBus: 24756 2025-06-25T15:00:59.739984Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519902156753829297:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:00:59.740120Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001405/r3tmp/tmp0L3mg0/pdisk_1.dat 2025-06-25T15:01:00.118974Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:01:00.182527Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:01:00.182675Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:01:00.184787Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 25399, node 1 2025-06-25T15:01:00.244861Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:01:00.244882Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:01:00.244887Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:01:00.245014Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24756 TClient is connected to server localhost:24756 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-25T15:01:00.764423Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:01:00.835203Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:01:00.864149Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T15:01:00.878547Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:01.054831Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:01.219892Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:01.290945Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:02.865963Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902169638732792:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:02.866087Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:03.151135Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:03.181426Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:03.212875Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:03.290387Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:03.343921Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:03.388541Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:03.424756Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:03.523337Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902173933700753:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:03.523412Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:03.523774Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902173933700758:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:03.527586Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:01:03.544627Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519902173933700760:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T15:01:03.623594Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519902173933700811:3419] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:01:04.577333Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:04.744398Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519902156753829297:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:01:04.744727Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T15:01:05.723700Z node 1 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:3004: SelfId: [1:75199021825236361 ... :30.761228Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:01:30.762847Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7519902290484621162:2080] 1750863690445393 != 1750863690445396 2025-06-25T15:01:30.781971Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 24442, node 4 2025-06-25T15:01:30.880989Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:01:30.881013Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:01:30.881023Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:01:30.881191Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:15969 TClient is connected to server localhost:15969 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-25T15:01:31.505277Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:01:31.558500Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:01:31.568269Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T15:01:31.585229Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:31.677089Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:31.889533Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:32.010111Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:34.651089Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519902307664491964:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:34.651175Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:34.683677Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:34.760625Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:34.838506Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:34.874119Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:34.945743Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:35.001783Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:35.046146Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:35.173668Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519902311959459920:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:35.173771Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:35.174071Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519902311959459925:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:35.178305Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:01:35.190833Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519902311959459927:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T15:01:35.279553Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519902311959459978:3413] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:01:35.506241Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519902290484621373:2235];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:01:35.506335Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T15:01:36.837374Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:45.720626Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7382: Cannot get console configs 2025-06-25T15:01:45.720655Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:01:47.581680Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=OTdkNDJjN2QtYzMxYWI4MzktMjdiNzcyM2UtZjM4ODdhZWU=, ActorId: [4:7519902359204101390:2630], ActorState: ExecuteState, TraceId: 01jyksqxxndfyv7qcba199rpme, Create QueryResponse for error on request, msg:
: Error: Intermediate data materialization exceeded size limit (88240924 > 50331648). This usually happens when trying to write large amounts of data or to perform lookup by big collection of keys in single query. Consider using smaller batches of data., code: 2013 >> DataShardOutOfOrder::TestOutOfOrderNonConflictingWrites+EvWrite [GOOD] >> DataShardOutOfOrder::TestOutOfOrderNonConflictingWrites-EvWrite ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_order/unittest >> DataShardTxOrder::ImmediateBetweenOnline_oo8 [GOOD] Test command err: 2025-06-25T15:01:43.655650Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:01:43.655704Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:01:43.660252Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:112:2142], Recipient [1:135:2156]: NKikimr::TEvTablet::TEvBoot 2025-06-25T15:01:43.677839Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:112:2142], Recipient [1:135:2156]: NKikimr::TEvTablet::TEvRestored 2025-06-25T15:01:43.678331Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 9437184 actor [1:135:2156] 2025-06-25T15:01:43.682956Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T15:01:43.699488Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:112:2142], Recipient [1:135:2156]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-25T15:01:43.757571Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T15:01:43.757785Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T15:01:43.760891Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 9437184 2025-06-25T15:01:43.760973Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 9437184 2025-06-25T15:01:43.761032Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 9437184 2025-06-25T15:01:43.762137Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T15:01:43.762249Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T15:01:43.762312Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 9437184 persisting started state actor id [1:204:2156] in generation 2 2025-06-25T15:01:43.837652Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T15:01:43.880569Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 9437184 2025-06-25T15:01:43.880764Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 9437184 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T15:01:43.880878Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 9437184, actorId: [1:219:2215] 2025-06-25T15:01:43.880932Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 9437184 2025-06-25T15:01:43.880965Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 9437184, state: WaitScheme 2025-06-25T15:01:43.880995Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:01:43.881242Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:135:2156], Recipient [1:135:2156]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:43.881307Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:43.881595Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 9437184 2025-06-25T15:01:43.881692Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 9437184 2025-06-25T15:01:43.881745Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-25T15:01:43.881781Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:01:43.881821Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 9437184 2025-06-25T15:01:43.881853Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437184 has no attached operations 2025-06-25T15:01:43.881894Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 9437184 2025-06-25T15:01:43.881941Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 9437184 TxInFly 0 2025-06-25T15:01:43.881979Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-25T15:01:43.882055Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:215:2212], Recipient [1:135:2156]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T15:01:43.882090Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T15:01:43.882137Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:213:2211], serverId# [1:215:2212], sessionId# [0:0:0] 2025-06-25T15:01:43.885577Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:103:2136], Recipient [1:135:2156]: NKikimrTxDataShard.TEvProposeTransaction TxKind: TX_KIND_SCHEME SourceDeprecated { RawX1: 103 RawX2: 4294969432 } TxBody: "\nI\n\006table1\020\r\032\t\n\003key\030\002 \"\032\014\n\005value\030\200$ 8\032\n\n\004uint\030\002 9(\":\010Z\006\010\010\030\001(\000J\014/Root/table1" TxId: 1 ExecLevel: 0 Flags: 0 SchemeShardId: 4200 ProcessingParams { } 2025-06-25T15:01:43.885653Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-25T15:01:43.885730Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-25T15:01:43.885936Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit CheckSchemeTx 2025-06-25T15:01:43.886004Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 9437184 txId 1 ssId 4200 seqNo 0:0 2025-06-25T15:01:43.886070Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 1 at tablet 9437184 2025-06-25T15:01:43.886123Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is ExecutedNoMoreRestarts 2025-06-25T15:01:43.886154Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit CheckSchemeTx 2025-06-25T15:01:43.886190Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit StoreSchemeTx 2025-06-25T15:01:43.886225Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit StoreSchemeTx 2025-06-25T15:01:43.886599Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayCompleteNoMoreRestarts 2025-06-25T15:01:43.886638Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit StoreSchemeTx 2025-06-25T15:01:43.886681Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit FinishPropose 2025-06-25T15:01:43.886725Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit FinishPropose 2025-06-25T15:01:43.886800Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayComplete 2025-06-25T15:01:43.886834Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit FinishPropose 2025-06-25T15:01:43.886866Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit WaitForPlan 2025-06-25T15:01:43.886893Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit WaitForPlan 2025-06-25T15:01:43.886924Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:1] at 9437184 is not ready to execute on unit WaitForPlan 2025-06-25T15:01:43.901148Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 9437184 2025-06-25T15:01:43.901233Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit StoreSchemeTx 2025-06-25T15:01:43.901264Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit FinishPropose 2025-06-25T15:01:43.901309Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 1 at tablet 9437184 send to client, exec latency: 0 ms, propose latency: 1 ms, status: PREPARED 2025-06-25T15:01:43.901365Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 9437184 not sending time cast registration request in state WaitScheme 2025-06-25T15:01:43.901789Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:225:2221], Recipient [1:135:2156]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T15:01:43.901833Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T15:01:43.901887Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:224:2220], serverId# [1:225:2221], sessionId# [0:0:0] 2025-06-25T15:01:43.902007Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287424, Sender [1:103:2136], Recipient [1:135:2156]: {TEvPlanStep step# 1000001 MediatorId# 0 TabletID 9437184} 2025-06-25T15:01:43.902034Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3150: StateWork, processing event TEvTxProcessing::TEvPlanStep 2025-06-25T15:01:43.902160Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1790: Trying to execute [1000001:1] at 9437184 on unit WaitForPlan 2025-06-25T15:01:43.902203Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1805: Execution status for [1000001:1] at 9437184 is Executed 2025-06-25T15:01:43.902234Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000001:1] at 9437184 executing on unit WaitForPlan 2025-06-25T15:01:43.902268Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000001:1] at 9437184 to execution unit PlanQueue 2025-06-25T15:01:43.905586Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 1 at step 1000001 at tablet 9437184 { Transactions { TxId: 1 AckTo { RawX1: 103 RawX2: 4294969432 } } Step: 1000001 MediatorID: 0 TabletID: 9437184 } 2025-06-25T15:01:43.905647Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:01:43.905852Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:135:2156], Recipient [1:135:2156]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:43.905891Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:43.905937Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-25T15:01:43.905972Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 1 2025-06-25T15:01:43.906005Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437184 2025-06-25T15:01:43.906042Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000001:1] in PlanQueue unit at 9437184 2025-06-25T15:01:43.906075Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [100000 ... RD DEBUG: datashard.cpp:563: Send delayed Ack RS Ack at 9437186 {TEvReadSet step# 1000005 txid# 140 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 46} 2025-06-25T15:01:48.950884Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437186 2025-06-25T15:01:48.950979Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437186 2025-06-25T15:01:48.951028Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000005:143] at 9437186 on unit CompleteOperation 2025-06-25T15:01:48.951064Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000005 : 143] from 9437186 at tablet 9437186 send result to client [1:103:2136], exec latency: 0 ms, propose latency: 1 ms 2025-06-25T15:01:48.951104Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:563: Send delayed Ack RS Ack at 9437186 {TEvReadSet step# 1000005 txid# 143 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 47} 2025-06-25T15:01:48.951125Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437186 2025-06-25T15:01:48.951210Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437186 2025-06-25T15:01:48.951230Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000005:146] at 9437186 on unit CompleteOperation 2025-06-25T15:01:48.951257Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000005 : 146] from 9437186 at tablet 9437186 send result to client [1:103:2136], exec latency: 0 ms, propose latency: 1 ms 2025-06-25T15:01:48.951318Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437186 2025-06-25T15:01:48.951408Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437186 2025-06-25T15:01:48.951439Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000005:149] at 9437186 on unit CompleteOperation 2025-06-25T15:01:48.951469Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000005 : 149] from 9437186 at tablet 9437186 send result to client [1:103:2136], exec latency: 0 ms, propose latency: 1 ms 2025-06-25T15:01:48.951493Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437186 2025-06-25T15:01:48.951577Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437186 2025-06-25T15:01:48.951597Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000005:152] at 9437186 on unit CompleteOperation 2025-06-25T15:01:48.951622Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000005 : 152] from 9437186 at tablet 9437186 send result to client [1:103:2136], exec latency: 0 ms, propose latency: 1 ms 2025-06-25T15:01:48.951647Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437186 2025-06-25T15:01:48.951835Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:461:2400], Recipient [1:239:2230]: {TEvReadSet step# 1000005 txid# 119 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 39} 2025-06-25T15:01:48.951868Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:01:48.951907Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 119 2025-06-25T15:01:48.952006Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:239:2230], Recipient [1:351:2316]: {TEvReadSet step# 1000005 txid# 151 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 98} 2025-06-25T15:01:48.952034Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:01:48.952056Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 151 2025-06-25T15:01:48.952135Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:461:2400], Recipient [1:239:2230]: {TEvReadSet step# 1000005 txid# 122 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 40} 2025-06-25T15:01:48.952154Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:01:48.952173Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 122 2025-06-25T15:01:48.952213Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:239:2230], Recipient [1:351:2316]: {TEvReadSet step# 1000005 txid# 152 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 99} 2025-06-25T15:01:48.952236Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:01:48.952256Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 152 2025-06-25T15:01:48.953954Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:461:2400], Recipient [1:239:2230]: {TEvReadSet step# 1000005 txid# 125 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 41} 2025-06-25T15:01:48.953996Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:01:48.954030Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 125 2025-06-25T15:01:48.954106Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:239:2230], Recipient [1:351:2316]: {TEvReadSet step# 1000005 txid# 154 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 100} 2025-06-25T15:01:48.954149Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:01:48.954174Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 154 2025-06-25T15:01:48.954292Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:461:2400], Recipient [1:239:2230]: {TEvReadSet step# 1000005 txid# 146 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 48} 2025-06-25T15:01:48.954338Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:01:48.954367Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 146 2025-06-25T15:01:48.954482Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:461:2400], Recipient [1:239:2230]: {TEvReadSet step# 1000005 txid# 128 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 42} 2025-06-25T15:01:48.954511Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:01:48.954538Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 128 2025-06-25T15:01:48.954608Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:461:2400], Recipient [1:239:2230]: {TEvReadSet step# 1000005 txid# 149 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 49} 2025-06-25T15:01:48.954632Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:01:48.954652Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 149 2025-06-25T15:01:48.954719Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:461:2400], Recipient [1:239:2230]: {TEvReadSet step# 1000005 txid# 152 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 50} 2025-06-25T15:01:48.954762Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:01:48.954798Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 152 2025-06-25T15:01:48.954894Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:461:2400], Recipient [1:239:2230]: {TEvReadSet step# 1000005 txid# 131 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 43} 2025-06-25T15:01:48.954917Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:01:48.954939Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 131 2025-06-25T15:01:48.955018Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:461:2400], Recipient [1:239:2230]: {TEvReadSet step# 1000005 txid# 134 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 44} 2025-06-25T15:01:48.955042Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:01:48.955064Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 134 2025-06-25T15:01:48.955126Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:461:2400], Recipient [1:239:2230]: {TEvReadSet step# 1000005 txid# 137 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 45} 2025-06-25T15:01:48.955148Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:01:48.955168Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 137 2025-06-25T15:01:48.955230Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:461:2400], Recipient [1:239:2230]: {TEvReadSet step# 1000005 txid# 140 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 46} 2025-06-25T15:01:48.955273Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:01:48.955297Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 140 2025-06-25T15:01:48.955409Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:461:2400], Recipient [1:239:2230]: {TEvReadSet step# 1000005 txid# 143 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 47} 2025-06-25T15:01:48.955439Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:01:48.955468Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 143 >> DataShardOutOfOrder::TestOutOfOrderLockLost [GOOD] >> DataShardOutOfOrder::TestOutOfOrderNoBarrierRestartImmediateLongTail >> BackupPathTest::ChecksumsForSchemaMappingFiles [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpLimits::ReplySizeExceeded [GOOD] Test command err: Trying to start YDB, gRPC: 3241, MsgBus: 8341 2025-06-25T15:00:58.842588Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519902151985998793:2141];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:00:58.854206Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001409/r3tmp/tmp8eHfJc/pdisk_1.dat 2025-06-25T15:00:59.222810Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 3241, node 1 2025-06-25T15:00:59.257597Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:00:59.257952Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:00:59.267793Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:00:59.308935Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:00:59.308958Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:00:59.308965Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:00:59.309108Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8341 TClient is connected to server localhost:8341 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:00:59.799294Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:00:59.820410Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T15:00:59.841856Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:00:59.843314Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:03.842245Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519902151985998793:2141];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:01:03.842299Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T15:01:08.117116Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902194935673199:2411], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:08.117143Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902194935673207:2414], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:08.117188Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:08.120816Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:01:08.133420Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519902194935673213:2415], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-25T15:01:08.240666Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519902194935673264:2969] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:01:08.612139Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:14.160403Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7382: Cannot get console configs 2025-06-25T15:01:14.160435Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded Trying to start YDB, gRPC: 14321, MsgBus: 27714 2025-06-25T15:01:19.385077Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519902242416790027:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:01:19.388744Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001409/r3tmp/tmp7dvPCX/pdisk_1.dat 2025-06-25T15:01:19.513863Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:01:19.513963Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:01:19.518510Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:01:19.519063Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 14321, node 2 2025-06-25T15:01:19.621485Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:01:19.621511Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:01:19.621519Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:01:19.621623Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27714 TClient is connected to server localhost:27714 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:01:20.092079Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:01:20.108133Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:20.396937Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:01:24.385359Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519902242416790027:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:01:24.385423Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T15:01:29.430884Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519902285366464479:2413], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissi ... 05706:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:01:39.592372Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001409/r3tmp/tmpOILjRu/pdisk_1.dat 2025-06-25T15:01:39.751256Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:01:39.751346Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:01:39.753191Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:01:39.753343Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:01:39.754362Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7519902327365105676:2080] 1750863699591553 != 1750863699591556 TServer::EnableGrpc on GrpcPort 9983, node 4 2025-06-25T15:01:39.821823Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:01:39.821845Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:01:39.821856Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:01:39.821988Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1790 TClient is connected to server localhost:1790 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:01:40.362620Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:01:40.386377Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:40.475382Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:40.623143Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:01:40.675938Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:40.754133Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:43.278355Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519902344544976502:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:43.278450Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:43.346445Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:43.393037Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:43.437663Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:43.477025Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:43.552577Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:43.645069Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:43.685636Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:43.755569Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519902344544977172:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:43.755654Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:43.756064Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519902344544977177:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:43.760228Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:01:43.780533Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519902344544977179:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T15:01:43.883862Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519902344544977230:3423] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:01:44.593132Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519902327365105706:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:01:44.593202Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T15:01:45.032225Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:48.312434Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=MmRhNWE4MzQtYTVhYmNjZmUtY2YxOGQyNmMtZDY2N2YyODY=, ActorId: [4:7519902348839944796:2474], ActorState: ExecuteState, TraceId: 01jyksqzsd94npk9kmd77hn5vx, Create QueryResponse for error on request, msg: >> DataShardOutOfOrder::TestShardRestartNoUndeterminedImmediate >> DataShardTxOrder::ForceOnlineBetweenOnline_oo8 [GOOD] |92.2%| [TA] $(B)/ydb/core/tx/schemeshard/ut_index/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_order/unittest >> DataShardTxOrder::ImmediateBetweenOnline_Init_oo8 [GOOD] Test command err: 2025-06-25T15:01:43.664843Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:01:43.664903Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:01:43.668585Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:112:2142], Recipient [1:135:2156]: NKikimr::TEvTablet::TEvBoot 2025-06-25T15:01:43.680842Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:112:2142], Recipient [1:135:2156]: NKikimr::TEvTablet::TEvRestored 2025-06-25T15:01:43.681335Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 9437184 actor [1:135:2156] 2025-06-25T15:01:43.681601Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T15:01:43.700023Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:112:2142], Recipient [1:135:2156]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-25T15:01:43.758316Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T15:01:43.758528Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T15:01:43.760906Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 9437184 2025-06-25T15:01:43.760993Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 9437184 2025-06-25T15:01:43.761058Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 9437184 2025-06-25T15:01:43.762160Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T15:01:43.762282Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T15:01:43.762360Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 9437184 persisting started state actor id [1:204:2156] in generation 2 2025-06-25T15:01:43.847300Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T15:01:43.878553Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 9437184 2025-06-25T15:01:43.878756Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 9437184 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T15:01:43.878865Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 9437184, actorId: [1:219:2215] 2025-06-25T15:01:43.878913Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 9437184 2025-06-25T15:01:43.878963Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 9437184, state: WaitScheme 2025-06-25T15:01:43.878996Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:01:43.879234Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:135:2156], Recipient [1:135:2156]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:43.879292Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:43.879622Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 9437184 2025-06-25T15:01:43.879721Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 9437184 2025-06-25T15:01:43.879778Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-25T15:01:43.879808Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:01:43.879855Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 9437184 2025-06-25T15:01:43.879889Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437184 has no attached operations 2025-06-25T15:01:43.879936Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 9437184 2025-06-25T15:01:43.879969Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 9437184 TxInFly 0 2025-06-25T15:01:43.880008Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-25T15:01:43.880104Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:215:2212], Recipient [1:135:2156]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T15:01:43.880149Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T15:01:43.880199Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:213:2211], serverId# [1:215:2212], sessionId# [0:0:0] 2025-06-25T15:01:43.884689Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:103:2136], Recipient [1:135:2156]: NKikimrTxDataShard.TEvProposeTransaction TxKind: TX_KIND_SCHEME SourceDeprecated { RawX1: 103 RawX2: 4294969432 } TxBody: "\nI\n\006table1\020\r\032\t\n\003key\030\002 \"\032\014\n\005value\030\200$ 8\032\n\n\004uint\030\002 9(\":\010Z\006\010\010\030\001(\000J\014/Root/table1" TxId: 1 ExecLevel: 0 Flags: 0 SchemeShardId: 4200 ProcessingParams { } 2025-06-25T15:01:43.884747Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-25T15:01:43.884816Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-25T15:01:43.885104Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit CheckSchemeTx 2025-06-25T15:01:43.885174Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 9437184 txId 1 ssId 4200 seqNo 0:0 2025-06-25T15:01:43.885221Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 1 at tablet 9437184 2025-06-25T15:01:43.885271Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is ExecutedNoMoreRestarts 2025-06-25T15:01:43.885300Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit CheckSchemeTx 2025-06-25T15:01:43.885329Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit StoreSchemeTx 2025-06-25T15:01:43.885362Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit StoreSchemeTx 2025-06-25T15:01:43.885668Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayCompleteNoMoreRestarts 2025-06-25T15:01:43.885731Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit StoreSchemeTx 2025-06-25T15:01:43.885763Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit FinishPropose 2025-06-25T15:01:43.885800Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit FinishPropose 2025-06-25T15:01:43.885861Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayComplete 2025-06-25T15:01:43.885894Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit FinishPropose 2025-06-25T15:01:43.885930Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit WaitForPlan 2025-06-25T15:01:43.885969Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit WaitForPlan 2025-06-25T15:01:43.885992Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:1] at 9437184 is not ready to execute on unit WaitForPlan 2025-06-25T15:01:43.898464Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 9437184 2025-06-25T15:01:43.898532Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit StoreSchemeTx 2025-06-25T15:01:43.898570Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit FinishPropose 2025-06-25T15:01:43.898622Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 1 at tablet 9437184 send to client, exec latency: 0 ms, propose latency: 1 ms, status: PREPARED 2025-06-25T15:01:43.898677Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 9437184 not sending time cast registration request in state WaitScheme 2025-06-25T15:01:43.900867Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:225:2221], Recipient [1:135:2156]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T15:01:43.900946Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T15:01:43.900994Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:224:2220], serverId# [1:225:2221], sessionId# [0:0:0] 2025-06-25T15:01:43.901149Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287424, Sender [1:103:2136], Recipient [1:135:2156]: {TEvPlanStep step# 2 MediatorId# 0 TabletID 9437184} 2025-06-25T15:01:43.901183Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3150: StateWork, processing event TEvTxProcessing::TEvPlanStep 2025-06-25T15:01:43.901315Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1790: Trying to execute [2:1] at 9437184 on unit WaitForPlan 2025-06-25T15:01:43.901353Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1805: Execution status for [2:1] at 9437184 is Executed 2025-06-25T15:01:43.901389Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [2:1] at 9437184 executing on unit WaitForPlan 2025-06-25T15:01:43.901421Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [2:1] at 9437184 to execution unit PlanQueue 2025-06-25T15:01:43.910068Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 1 at step 2 at tablet 9437184 { Transactions { TxId: 1 AckTo { RawX1: 103 RawX2: 4294969432 } } Step: 2 MediatorID: 0 TabletID: 9437184 } 2025-06-25T15:01:43.910150Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:01:43.910418Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:135:2156], Recipient [1:135:2156]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:43.910472Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:43.910535Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-25T15:01:43.910575Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 1 2025-06-25T15:01:43.910605Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437184 2025-06-25T15:01:43.910645Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [2:1] in PlanQueue unit at 9437184 2025-06-25T15:01:43.910680Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [2:1] at 9437184 on unit PlanQueue 2025-06-25T15:01:43. ... 967Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437186 2025-06-25T15:01:49.200036Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437186 2025-06-25T15:01:49.200054Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [6:143] at 9437186 on unit CompleteOperation 2025-06-25T15:01:49.200083Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [6 : 143] from 9437186 at tablet 9437186 send result to client [1:103:2136], exec latency: 0 ms, propose latency: 2 ms 2025-06-25T15:01:49.200100Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437186 2025-06-25T15:01:49.200156Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437186 2025-06-25T15:01:49.200177Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [6:146] at 9437186 on unit CompleteOperation 2025-06-25T15:01:49.200201Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [6 : 146] from 9437186 at tablet 9437186 send result to client [1:103:2136], exec latency: 0 ms, propose latency: 2 ms 2025-06-25T15:01:49.200218Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437186 2025-06-25T15:01:49.200274Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437186 2025-06-25T15:01:49.200289Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [6:149] at 9437186 on unit CompleteOperation 2025-06-25T15:01:49.200337Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [6 : 149] from 9437186 at tablet 9437186 send result to client [1:103:2136], exec latency: 0 ms, propose latency: 2 ms 2025-06-25T15:01:49.200365Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437186 2025-06-25T15:01:49.200441Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437186 2025-06-25T15:01:49.200466Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [6:152] at 9437186 on unit CompleteOperation 2025-06-25T15:01:49.200497Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [6 : 152] from 9437186 at tablet 9437186 send result to client [1:103:2136], exec latency: 0 ms, propose latency: 2 ms 2025-06-25T15:01:49.200519Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437186 2025-06-25T15:01:49.200720Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:460:2399], Recipient [1:239:2230]: {TEvReadSet step# 6 txid# 110 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 36} 2025-06-25T15:01:49.200744Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:01:49.200763Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 110 2025-06-25T15:01:49.200800Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:239:2230], Recipient [1:351:2316]: {TEvReadSet step# 6 txid# 154 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 100} 2025-06-25T15:01:49.200826Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:01:49.200847Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 154 2025-06-25T15:01:49.200889Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:460:2399], Recipient [1:239:2230]: {TEvReadSet step# 6 txid# 113 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 37} 2025-06-25T15:01:49.200903Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:01:49.200925Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 113 2025-06-25T15:01:49.201002Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:460:2399], Recipient [1:239:2230]: {TEvReadSet step# 6 txid# 116 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 38} 2025-06-25T15:01:49.201016Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:01:49.201030Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 116 2025-06-25T15:01:49.201065Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:460:2399], Recipient [1:239:2230]: {TEvReadSet step# 6 txid# 137 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 45} 2025-06-25T15:01:49.201088Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:01:49.201104Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 137 2025-06-25T15:01:49.201153Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:460:2399], Recipient [1:239:2230]: {TEvReadSet step# 6 txid# 140 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 46} 2025-06-25T15:01:49.201166Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:01:49.201178Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 140 2025-06-25T15:01:49.201216Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:460:2399], Recipient [1:239:2230]: {TEvReadSet step# 6 txid# 119 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 39} 2025-06-25T15:01:49.201229Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:01:49.201251Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 119 2025-06-25T15:01:49.201297Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:460:2399], Recipient [1:239:2230]: {TEvReadSet step# 6 txid# 143 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 47} 2025-06-25T15:01:49.201321Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:01:49.201338Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 143 2025-06-25T15:01:49.201385Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:460:2399], Recipient [1:239:2230]: {TEvReadSet step# 6 txid# 146 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 48} 2025-06-25T15:01:49.201397Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:01:49.201409Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 146 2025-06-25T15:01:49.201456Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:460:2399], Recipient [1:239:2230]: {TEvReadSet step# 6 txid# 149 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 49} 2025-06-25T15:01:49.201474Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:01:49.201485Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 149 2025-06-25T15:01:49.201531Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:460:2399], Recipient [1:239:2230]: {TEvReadSet step# 6 txid# 152 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 50} 2025-06-25T15:01:49.201583Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:01:49.201614Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 152 2025-06-25T15:01:49.201650Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:460:2399], Recipient [1:239:2230]: {TEvReadSet step# 6 txid# 122 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 40} 2025-06-25T15:01:49.201663Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:01:49.201676Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 122 2025-06-25T15:01:49.201728Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:460:2399], Recipient [1:239:2230]: {TEvReadSet step# 6 txid# 125 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 41} 2025-06-25T15:01:49.201752Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:01:49.201765Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 125 2025-06-25T15:01:49.201809Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:460:2399], Recipient [1:239:2230]: {TEvReadSet step# 6 txid# 128 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 42} 2025-06-25T15:01:49.201826Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:01:49.201844Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 128 2025-06-25T15:01:49.201899Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:460:2399], Recipient [1:239:2230]: {TEvReadSet step# 6 txid# 131 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 43} 2025-06-25T15:01:49.201914Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:01:49.201936Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 131 2025-06-25T15:01:49.201982Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:460:2399], Recipient [1:239:2230]: {TEvReadSet step# 6 txid# 134 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 44} 2025-06-25T15:01:49.201995Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:01:49.202006Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 134 >> DataShardTxOrder::ForceOnlineBetweenOnline [GOOD] >> DataShardOutOfOrder::UncommittedReads [GOOD] >> DataShardTxOrder::RandomPointsAndRanges >> DataShardOutOfOrder::TestOutOfOrderRestartLocksReorderedWithoutBarrier [GOOD] >> DataShardTxOrder::ImmediateBetweenOnline_oo8_dirty [GOOD] >> DataShardOutOfOrder::TestPlannedHalfOverloadedSplit+UseSink >> DataShardOutOfOrder::UncommittedReadSetAck >> DataShardTxOrder::ImmediateBetweenOnline [GOOD] >> DataShardOutOfOrder::TestSnapshotReadAfterStuckRW [GOOD] >> DataShardTxOrder::ReadWriteReorder [GOOD] >> DataShardTxOrder::RandomPoints_DelayRS >> DataShardOutOfOrder::TestLateKqpScanAfterColumnDrop+UseSink >> BackupRestoreS3::PrefixedVectorIndex [GOOD] >> DataShardTxOrder::RandomPoints_ReproducerDelayData1 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_order/unittest >> DataShardTxOrder::ForceOnlineBetweenOnline [GOOD] Test command err: 2025-06-25T15:01:43.657583Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:01:43.657635Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:01:43.665053Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:112:2142], Recipient [1:135:2156]: NKikimr::TEvTablet::TEvBoot 2025-06-25T15:01:43.685435Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:112:2142], Recipient [1:135:2156]: NKikimr::TEvTablet::TEvRestored 2025-06-25T15:01:43.686009Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 9437184 actor [1:135:2156] 2025-06-25T15:01:43.686327Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T15:01:43.701748Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:112:2142], Recipient [1:135:2156]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-25T15:01:43.757488Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T15:01:43.757703Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T15:01:43.760935Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 9437184 2025-06-25T15:01:43.761007Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 9437184 2025-06-25T15:01:43.761069Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 9437184 2025-06-25T15:01:43.762035Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T15:01:43.762144Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T15:01:43.762212Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 9437184 persisting started state actor id [1:204:2156] in generation 2 2025-06-25T15:01:43.837099Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T15:01:43.881179Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 9437184 2025-06-25T15:01:43.881349Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 9437184 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T15:01:43.881467Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 9437184, actorId: [1:219:2215] 2025-06-25T15:01:43.881524Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 9437184 2025-06-25T15:01:43.881566Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 9437184, state: WaitScheme 2025-06-25T15:01:43.881600Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:01:43.881811Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:135:2156], Recipient [1:135:2156]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:43.881865Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:43.882110Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 9437184 2025-06-25T15:01:43.882217Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 9437184 2025-06-25T15:01:43.882282Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-25T15:01:43.882321Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:01:43.882362Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 9437184 2025-06-25T15:01:43.882395Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437184 has no attached operations 2025-06-25T15:01:43.882445Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 9437184 2025-06-25T15:01:43.882478Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 9437184 TxInFly 0 2025-06-25T15:01:43.882517Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-25T15:01:43.882596Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:215:2212], Recipient [1:135:2156]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T15:01:43.882633Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T15:01:43.882683Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:213:2211], serverId# [1:215:2212], sessionId# [0:0:0] 2025-06-25T15:01:43.885858Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:103:2136], Recipient [1:135:2156]: NKikimrTxDataShard.TEvProposeTransaction TxKind: TX_KIND_SCHEME SourceDeprecated { RawX1: 103 RawX2: 4294969432 } TxBody: "\nI\n\006table1\020\r\032\t\n\003key\030\002 \"\032\014\n\005value\030\200$ 8\032\n\n\004uint\030\002 9(\":\010Z\006\010\000\030\000(\000J\014/Root/table1" TxId: 1 ExecLevel: 0 Flags: 0 SchemeShardId: 4200 ProcessingParams { } 2025-06-25T15:01:43.885929Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-25T15:01:43.886002Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-25T15:01:43.886154Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit CheckSchemeTx 2025-06-25T15:01:43.886215Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 9437184 txId 1 ssId 4200 seqNo 0:0 2025-06-25T15:01:43.886269Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 1 at tablet 9437184 2025-06-25T15:01:43.886329Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is ExecutedNoMoreRestarts 2025-06-25T15:01:43.886370Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit CheckSchemeTx 2025-06-25T15:01:43.886406Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit StoreSchemeTx 2025-06-25T15:01:43.886441Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit StoreSchemeTx 2025-06-25T15:01:43.886762Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayCompleteNoMoreRestarts 2025-06-25T15:01:43.886792Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit StoreSchemeTx 2025-06-25T15:01:43.886819Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit FinishPropose 2025-06-25T15:01:43.886852Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit FinishPropose 2025-06-25T15:01:43.886899Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayComplete 2025-06-25T15:01:43.886923Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit FinishPropose 2025-06-25T15:01:43.886955Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit WaitForPlan 2025-06-25T15:01:43.886978Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit WaitForPlan 2025-06-25T15:01:43.886997Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:1] at 9437184 is not ready to execute on unit WaitForPlan 2025-06-25T15:01:43.899771Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 9437184 2025-06-25T15:01:43.899850Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit StoreSchemeTx 2025-06-25T15:01:43.899890Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit FinishPropose 2025-06-25T15:01:43.899968Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 1 at tablet 9437184 send to client, exec latency: 0 ms, propose latency: 1 ms, status: PREPARED 2025-06-25T15:01:43.900040Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 9437184 not sending time cast registration request in state WaitScheme 2025-06-25T15:01:43.901094Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:225:2221], Recipient [1:135:2156]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T15:01:43.901179Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T15:01:43.901222Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:224:2220], serverId# [1:225:2221], sessionId# [0:0:0] 2025-06-25T15:01:43.901376Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287424, Sender [1:103:2136], Recipient [1:135:2156]: {TEvPlanStep step# 1000001 MediatorId# 0 TabletID 9437184} 2025-06-25T15:01:43.901406Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3150: StateWork, processing event TEvTxProcessing::TEvPlanStep 2025-06-25T15:01:43.901535Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1790: Trying to execute [1000001:1] at 9437184 on unit WaitForPlan 2025-06-25T15:01:43.901585Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1805: Execution status for [1000001:1] at 9437184 is Executed 2025-06-25T15:01:43.901624Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000001:1] at 9437184 executing on unit WaitForPlan 2025-06-25T15:01:43.901677Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000001:1] at 9437184 to execution unit PlanQueue 2025-06-25T15:01:43.910621Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 1 at step 1000001 at tablet 9437184 { Transactions { TxId: 1 AckTo { RawX1: 103 RawX2: 4294969432 } } Step: 1000001 MediatorID: 0 TabletID: 9437184 } 2025-06-25T15:01:43.910698Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:01:43.910907Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:135:2156], Recipient [1:135:2156]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:43.910955Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:43.911009Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-25T15:01:43.911047Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 1 2025-06-25T15:01:43.911098Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437184 2025-06-25T15:01:43.911138Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000001:1] in PlanQueue unit at 9437184 2025-06-25T15:01:43.911172Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [100000 ... eadset.cpp:91: TTxReadSet::Complete at 9437184 2025-06-25T15:01:50.166086Z node 1 :TX_DATASHARD DEBUG: datashard__readset.cpp:99: Send RS Ack at 9437184 {TEvReadSet step# 1000005 txid# 154 TabletSource# 9437185 TabletDest# 9437184 SetTabletProducer# 9437185 ReadSet.Size()# 7 Seqno# 100 Flags# 0} 2025-06-25T15:01:50.166110Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-25T15:01:50.166130Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000005:152] at 9437184 on unit StoreAndSendOutRS 2025-06-25T15:01:50.166155Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3990: Send RS 50 at 9437184 from 9437184 to 9437186 txId 152 2025-06-25T15:01:50.166197Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-25T15:01:50.166222Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000005:152] at 9437184 on unit CompleteOperation 2025-06-25T15:01:50.166248Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000005 : 152] from 9437184 at tablet 9437184 send result to client [1:103:2136], exec latency: 0 ms, propose latency: 1 ms 2025-06-25T15:01:50.166284Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:01:50.166384Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-25T15:01:50.166403Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000005:153] at 9437184 on unit CompleteOperation 2025-06-25T15:01:50.166428Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000005 : 153] from 9437184 at tablet 9437184 send result to client [1:103:2136], exec latency: 0 ms, propose latency: 1 ms 2025-06-25T15:01:50.166451Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:01:50.166540Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-25T15:01:50.166567Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000005:154] at 9437184 on unit CompleteOperation 2025-06-25T15:01:50.166593Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000005 : 154] from 9437184 at tablet 9437184 send result to client [1:103:2136], exec latency: 0 ms, propose latency: 1 ms 2025-06-25T15:01:50.166634Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:01:50.166782Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:239:2230], Recipient [1:349:2314]: {TEvReadSet step# 1000005 txid# 149 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 97} 2025-06-25T15:01:50.166809Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:01:50.166836Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 149 2025-06-25T15:01:50.167002Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:239:2230], Recipient [1:349:2314]: {TEvReadSet step# 1000005 txid# 151 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 98} 2025-06-25T15:01:50.167028Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:01:50.167050Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 151 2025-06-25T15:01:50.167103Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287425, Sender [1:239:2230], Recipient [1:461:2400]: {TEvReadSet step# 1000005 txid# 152 TabletSource# 9437184 TabletDest# 9437186 SetTabletProducer# 9437184 ReadSet.Size()# 7 Seqno# 50 Flags# 0} 2025-06-25T15:01:50.167132Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3151: StateWork, processing event TEvTxProcessing::TEvReadSet 2025-06-25T15:01:50.167162Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3359: Receive RS at 9437186 source 9437184 dest 9437186 producer 9437184 txId 152 2025-06-25T15:01:50.167232Z node 1 :TX_DATASHARD DEBUG: datashard__readset.cpp:15: TTxReadSet::Execute at 9437186 got read set: {TEvReadSet step# 1000005 txid# 152 TabletSource# 9437184 TabletDest# 9437186 SetTabletProducer# 9437184 ReadSet.Size()# 7 Seqno# 50 Flags# 0} 2025-06-25T15:01:50.167297Z node 1 :TX_DATASHARD TRACE: operation.cpp:67: Filled readset for [1000005:152] from=9437184 to=9437186origin=9437184 2025-06-25T15:01:50.167354Z node 1 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 9437186 2025-06-25T15:01:50.167452Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:239:2230], Recipient [1:349:2314]: {TEvReadSet step# 1000005 txid# 152 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 99} 2025-06-25T15:01:50.167475Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:01:50.167494Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 152 2025-06-25T15:01:50.167568Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:461:2400], Recipient [1:461:2400]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:50.167595Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:50.167629Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437186 2025-06-25T15:01:50.167660Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437186 active 1 active planned 1 immediate 0 planned 1 2025-06-25T15:01:50.167689Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [1000005:152] at 9437186 for LoadAndWaitInRS 2025-06-25T15:01:50.167716Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000005:152] at 9437186 on unit LoadAndWaitInRS 2025-06-25T15:01:50.167749Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000005:152] at 9437186 is Executed 2025-06-25T15:01:50.167776Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000005:152] at 9437186 executing on unit LoadAndWaitInRS 2025-06-25T15:01:50.167810Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000005:152] at 9437186 to execution unit ExecuteDataTx 2025-06-25T15:01:50.167840Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000005:152] at 9437186 on unit ExecuteDataTx 2025-06-25T15:01:50.168325Z node 1 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:306: Executed operation [1000005:152] at tablet 9437186 with status COMPLETE 2025-06-25T15:01:50.168374Z node 1 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:312: Datashard execution counters for [1000005:152] at 9437186: {NSelectRow: 0, NSelectRange: 0, NUpdateRow: 1, NEraseRow: 0, SelectRowRows: 0, SelectRowBytes: 0, SelectRangeRows: 0, SelectRangeBytes: 0, UpdateRowBytes: 5, EraseRowBytes: 0, SelectRangeDeletedRowSkips: 0, InvisibleRowSkips: 0} 2025-06-25T15:01:50.168422Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000005:152] at 9437186 is ExecutedNoMoreRestarts 2025-06-25T15:01:50.168464Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000005:152] at 9437186 executing on unit ExecuteDataTx 2025-06-25T15:01:50.168496Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000005:152] at 9437186 to execution unit CompleteOperation 2025-06-25T15:01:50.168524Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000005:152] at 9437186 on unit CompleteOperation 2025-06-25T15:01:50.168711Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000005:152] at 9437186 is DelayComplete 2025-06-25T15:01:50.168735Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000005:152] at 9437186 executing on unit CompleteOperation 2025-06-25T15:01:50.168769Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000005:152] at 9437186 to execution unit CompletedOperations 2025-06-25T15:01:50.168800Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000005:152] at 9437186 on unit CompletedOperations 2025-06-25T15:01:50.168828Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000005:152] at 9437186 is Executed 2025-06-25T15:01:50.168846Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000005:152] at 9437186 executing on unit CompletedOperations 2025-06-25T15:01:50.168868Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [1000005:152] at 9437186 has finished 2025-06-25T15:01:50.168895Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437186 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:01:50.168916Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437186 2025-06-25T15:01:50.168942Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437186 has no attached operations 2025-06-25T15:01:50.168984Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 9437186 2025-06-25T15:01:50.169173Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:239:2230], Recipient [1:349:2314]: {TEvReadSet step# 1000005 txid# 154 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 100} 2025-06-25T15:01:50.169202Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:01:50.169231Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 154 2025-06-25T15:01:50.183882Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437186 2025-06-25T15:01:50.183936Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000005:152] at 9437186 on unit CompleteOperation 2025-06-25T15:01:50.183986Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000005 : 152] from 9437186 at tablet 9437186 send result to client [1:103:2136], exec latency: 1 ms, propose latency: 3 ms 2025-06-25T15:01:50.184042Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:563: Send delayed Ack RS Ack at 9437186 {TEvReadSet step# 1000005 txid# 152 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 50} 2025-06-25T15:01:50.184080Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437186 2025-06-25T15:01:50.184366Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:461:2400], Recipient [1:239:2230]: {TEvReadSet step# 1000005 txid# 152 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 50} 2025-06-25T15:01:50.184424Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:01:50.184458Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 152 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_order/unittest >> DataShardOutOfOrder::UncommittedReads [GOOD] Test command err: 2025-06-25T15:01:47.824101Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T15:01:47.824247Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T15:01:47.824340Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001f67/r3tmp/tmpx775un/pdisk_1.dat 2025-06-25T15:01:48.148705Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T15:01:48.152145Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:01:48.197753Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:01:48.202877Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750863705182468 != 1750863705182472 2025-06-25T15:01:48.254162Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:01:48.254280Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:01:48.265536Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:01:48.346519Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:48.392974Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvBoot 2025-06-25T15:01:48.393775Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvRestored 2025-06-25T15:01:48.394148Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:627:2531] 2025-06-25T15:01:48.394342Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T15:01:48.435060Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-25T15:01:48.435637Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T15:01:48.435743Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T15:01:48.437256Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-25T15:01:48.437349Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-25T15:01:48.437403Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-25T15:01:48.437736Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T15:01:48.437843Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T15:01:48.437903Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:643:2531] in generation 1 2025-06-25T15:01:48.438216Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T15:01:48.487410Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-25T15:01:48.487583Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T15:01:48.487653Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:645:2541] 2025-06-25T15:01:48.487678Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T15:01:48.487717Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-25T15:01:48.487756Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T15:01:48.487955Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:627:2531], Recipient [1:627:2531]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:48.488020Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:48.488440Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-25T15:01:48.488542Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-25T15:01:48.488597Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T15:01:48.488631Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:01:48.488678Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-25T15:01:48.488715Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-25T15:01:48.488756Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-25T15:01:48.488798Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-25T15:01:48.488852Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T15:01:48.489218Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:634:2535], Recipient [1:627:2531]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T15:01:48.489258Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T15:01:48.489297Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:623:2528], serverId# [1:634:2535], sessionId# [0:0:0] 2025-06-25T15:01:48.489368Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:373:2367], Recipient [1:634:2535] 2025-06-25T15:01:48.489404Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-25T15:01:48.489488Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T15:01:48.489671Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-25T15:01:48.489725Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-25T15:01:48.489826Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-25T15:01:48.489885Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-25T15:01:48.489930Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-06-25T15:01:48.489966Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-06-25T15:01:48.490005Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-25T15:01:48.490257Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-06-25T15:01:48.490290Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit StoreSchemeTx 2025-06-25T15:01:48.490344Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit FinishPropose 2025-06-25T15:01:48.490381Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-25T15:01:48.490430Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayComplete 2025-06-25T15:01:48.490463Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit FinishPropose 2025-06-25T15:01:48.490501Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit WaitForPlan 2025-06-25T15:01:48.490532Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit WaitForPlan 2025-06-25T15:01:48.490556Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:281474976715657] at 72075186224037888 is not ready to execute on unit WaitForPlan 2025-06-25T15:01:48.491343Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-25T15:01:48.491388Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-25T15:01:48.491434Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-25T15:01:48.491471Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715657 at tablet 72075186224037888 send to client, exec latency: 0 ms, propose latency: 0 ms, status: PREPARED 2025-06-25T15:01:48.491524Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-25T15:01:48.493774Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269746185, Sender [1:646:2542], Recipient [1:627:2531]: NKikimr::TEvTxProxySchemeCache::TEvWatchNotifyUpdated 2025-06-25T15:01:48.493820Z ... arsing write transaction for 0 at 72075186224037888, record: Operations { Type: OPERATION_UPSERT TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } ColumnIds: 1 ColumnIds: 2 PayloadIndex: 0 PayloadFormat: FORMAT_CELLVEC } TxMode: MODE_IMMEDIATE 2025-06-25T15:01:49.912834Z node 1 :TX_DATASHARD TRACE: datashard_write_operation.cpp:213: Table /Root/table-1, shard: 72075186224037888, write point (Uint32 : 4) 2025-06-25T15:01:49.912893Z node 1 :TX_DATASHARD TRACE: key_validator.cpp:54: -- AddWriteRange: (Uint32 : 4) table: [72057594046644480:2:1] 2025-06-25T15:01:49.912973Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:6] at 72075186224037888 on unit CheckWrite 2025-06-25T15:01:49.913024Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:6] at 72075186224037888 is Executed 2025-06-25T15:01:49.913063Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:6] at 72075186224037888 executing on unit CheckWrite 2025-06-25T15:01:49.913109Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:6] at 72075186224037888 to execution unit BuildAndWaitDependencies 2025-06-25T15:01:49.913143Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:6] at 72075186224037888 on unit BuildAndWaitDependencies 2025-06-25T15:01:49.913196Z node 1 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 72075186224037888 CompleteEdge# v2500/0 IncompleteEdge# v{min} UnprotectedReadEdge# v2000/18446744073709551615 ImmediateWriteEdge# v2500/18446744073709551615 ImmediateWriteEdgeReplied# v2500/18446744073709551615 2025-06-25T15:01:49.913247Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:6] at 72075186224037888 2025-06-25T15:01:49.913297Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:6] at 72075186224037888 is Executed 2025-06-25T15:01:49.913323Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:6] at 72075186224037888 executing on unit BuildAndWaitDependencies 2025-06-25T15:01:49.913343Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:6] at 72075186224037888 to execution unit ExecuteWrite 2025-06-25T15:01:49.913372Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:6] at 72075186224037888 on unit ExecuteWrite 2025-06-25T15:01:49.913405Z node 1 :TX_DATASHARD DEBUG: execute_write_unit.cpp:251: Executing write operation for [0:6] at 72075186224037888 2025-06-25T15:01:49.913464Z node 1 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 72075186224037888 CompleteEdge# v2500/0 IncompleteEdge# v{min} UnprotectedReadEdge# v2000/18446744073709551615 ImmediateWriteEdge# v2500/18446744073709551615 ImmediateWriteEdgeReplied# v2500/18446744073709551615 2025-06-25T15:01:49.913551Z node 1 :TX_DATASHARD DEBUG: execute_write_unit.cpp:416: Executed write operation for [0:6] at 72075186224037888, row count=1 2025-06-25T15:01:49.913595Z node 1 :TX_DATASHARD TRACE: execute_write_unit.cpp:47: add locks to result: 0 2025-06-25T15:01:49.913648Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:6] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-25T15:01:49.913679Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:6] at 72075186224037888 executing on unit ExecuteWrite 2025-06-25T15:01:49.913713Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:6] at 72075186224037888 to execution unit FinishProposeWrite 2025-06-25T15:01:49.913745Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:6] at 72075186224037888 on unit FinishProposeWrite 2025-06-25T15:01:49.913773Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:6] at 72075186224037888 is DelayComplete 2025-06-25T15:01:49.913799Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:6] at 72075186224037888 executing on unit FinishProposeWrite 2025-06-25T15:01:49.913856Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:6] at 72075186224037888 to execution unit CompletedOperations 2025-06-25T15:01:49.913906Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:6] at 72075186224037888 on unit CompletedOperations 2025-06-25T15:01:49.913939Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:6] at 72075186224037888 is Executed 2025-06-25T15:01:49.913960Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:6] at 72075186224037888 executing on unit CompletedOperations 2025-06-25T15:01:49.913984Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:6] at 72075186224037888 has finished ... blocked commit for tablet 72075186224037888 2025-06-25T15:01:50.044556Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715665. Ctx: { TraceId: 01jyksr1ptb2y9zb72nq1deys9, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NTdhMTdhNWUtZjg0OGFiYjMtY2ExOGMzZjgtYTM3ZTY5OTI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:01:50.046317Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553215, Sender [1:930:2724], Recipient [1:627:2531]: NKikimrTxDataShard.TEvRead ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 ResultFormat: FORMAT_CELLVEC MaxRows: 1001 MaxBytes: 5242880 Reverse: false TotalRowsLimit: 1001 RangesSize: 1 2025-06-25T15:01:50.046453Z node 1 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2452: TTxReadViaPipeline execute: at tablet# 72075186224037888, FollowerId 0 2025-06-25T15:01:50.046509Z node 1 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 72075186224037888 CompleteEdge# v2500/0 IncompleteEdge# v{min} UnprotectedReadEdge# v2000/18446744073709551615 ImmediateWriteEdge# v2500/18446744073709551615 ImmediateWriteEdgeReplied# v2500/18446744073709551615 2025-06-25T15:01:50.046550Z node 1 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2555: 72075186224037888 changed HEAD read to non-repeatable v2500/18446744073709551615 2025-06-25T15:01:50.046604Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:7] at 72075186224037888 on unit CheckRead 2025-06-25T15:01:50.046684Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:7] at 72075186224037888 is Executed 2025-06-25T15:01:50.046718Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:7] at 72075186224037888 executing on unit CheckRead 2025-06-25T15:01:50.046757Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:7] at 72075186224037888 to execution unit BuildAndWaitDependencies 2025-06-25T15:01:50.046817Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:7] at 72075186224037888 on unit BuildAndWaitDependencies 2025-06-25T15:01:50.046865Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:7] at 72075186224037888 2025-06-25T15:01:50.046903Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:7] at 72075186224037888 is Executed 2025-06-25T15:01:50.046926Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:7] at 72075186224037888 executing on unit BuildAndWaitDependencies 2025-06-25T15:01:50.046947Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:7] at 72075186224037888 to execution unit ExecuteRead 2025-06-25T15:01:50.046980Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:7] at 72075186224037888 on unit ExecuteRead 2025-06-25T15:01:50.047075Z node 1 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1578: 72075186224037888 Execute read# 1, request: { ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 ResultFormat: FORMAT_CELLVEC MaxRows: 1001 MaxBytes: 5242880 Reverse: false TotalRowsLimit: 1001 } 2025-06-25T15:01:50.047283Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:7] at 72075186224037888 is DelayComplete 2025-06-25T15:01:50.047318Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:7] at 72075186224037888 executing on unit ExecuteRead 2025-06-25T15:01:50.047365Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:7] at 72075186224037888 to execution unit CompletedOperations 2025-06-25T15:01:50.047395Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:7] at 72075186224037888 on unit CompletedOperations 2025-06-25T15:01:50.047442Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:7] at 72075186224037888 is Executed 2025-06-25T15:01:50.047464Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:7] at 72075186224037888 executing on unit CompletedOperations 2025-06-25T15:01:50.047491Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:7] at 72075186224037888 has finished 2025-06-25T15:01:50.047533Z node 1 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2687: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037888 2025-06-25T15:01:50.133067Z node 1 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:692: Actor# [1:25:2072] HANDLE NKikimrTxMediatorTimecast.TEvGranularUpdate Mediator: 72057594046382081 Bucket: 0 SubscriptionId: 1 LatestStep: 3000 2025-06-25T15:01:50.133192Z node 1 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:625: Actor# [1:25:2072] HANDLE {TEvUpdate Mediator# 72057594046382081 Bucket# 0 TimeBarrier# 3000} 2025-06-25T15:01:50.280410Z node 1 :TX_DATASHARD TRACE: datashard__write.cpp:150: TTxWrite complete: at tablet# 72075186224037888 2025-06-25T15:01:50.280481Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:6] at 72075186224037888 on unit FinishProposeWrite 2025-06-25T15:01:50.280538Z node 1 :TX_DATASHARD TRACE: finish_propose_write_unit.cpp:163: Propose transaction complete txid 6 at tablet 72075186224037888 send to client, propose latency: 1000 ms, status: STATUS_COMPLETED 2025-06-25T15:01:50.280631Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T15:01:50.280762Z node 1 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2736: TTxReadViaPipeline(69) Complete: at tablet# 72075186224037888 2025-06-25T15:01:50.280806Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:7] at 72075186224037888 on unit ExecuteRead 2025-06-25T15:01:50.280857Z node 1 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2163: 72075186224037888 Complete read# {[1:930:2724], 0} after executionsCount# 1 2025-06-25T15:01:50.280918Z node 1 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2137: 72075186224037888 read iterator# {[1:930:2724], 0} sends rowCount# 4, bytes# 128, quota rows left# 997, quota bytes left# 5242752, hasUnreadQueries# 0, total queries# 1, firstUnprocessed# 0 2025-06-25T15:01:50.281066Z node 1 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2188: 72075186224037888 read iterator# {[1:930:2724], 0} finished in read 2025-06-25T15:01:50.283704Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553219, Sender [1:930:2724], Recipient [1:627:2531]: NKikimrTxDataShard.TEvReadCancel ReadId: 0 2025-06-25T15:01:50.283795Z node 1 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3409: 72075186224037888 ReadCancel: { ReadId: 0 } { items { uint32_value: 1 } items { uint32_value: 1 } }, { items { uint32_value: 2 } items { uint32_value: 2 } }, { items { uint32_value: 3 } items { uint32_value: 3 } }, { items { uint32_value: 4 } items { uint32_value: 4 } } >> EncryptedBackupParamsValidationTest::NoSourcePrefix [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_order/unittest >> DataShardTxOrder::ImmediateBetweenOnline_oo8_dirty [GOOD] Test command err: 2025-06-25T15:01:45.718760Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:01:45.718806Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:01:45.720839Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:112:2142], Recipient [1:135:2156]: NKikimr::TEvTablet::TEvBoot 2025-06-25T15:01:45.734085Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:112:2142], Recipient [1:135:2156]: NKikimr::TEvTablet::TEvRestored 2025-06-25T15:01:45.734420Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 9437184 actor [1:135:2156] 2025-06-25T15:01:45.734617Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T15:01:45.743631Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:112:2142], Recipient [1:135:2156]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-25T15:01:45.788051Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T15:01:45.788231Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T15:01:45.789745Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 9437184 2025-06-25T15:01:45.789845Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 9437184 2025-06-25T15:01:45.789894Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 9437184 2025-06-25T15:01:45.790432Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T15:01:45.790517Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T15:01:45.790575Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 9437184 persisting started state actor id [1:204:2156] in generation 2 2025-06-25T15:01:45.852876Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T15:01:45.882356Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 9437184 2025-06-25T15:01:45.882535Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 9437184 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T15:01:45.882643Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 9437184, actorId: [1:219:2215] 2025-06-25T15:01:45.882701Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 9437184 2025-06-25T15:01:45.882734Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 9437184, state: WaitScheme 2025-06-25T15:01:45.882771Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:01:45.882950Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:135:2156], Recipient [1:135:2156]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:45.883015Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:45.883264Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 9437184 2025-06-25T15:01:45.883357Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 9437184 2025-06-25T15:01:45.883409Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-25T15:01:45.883444Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:01:45.883483Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 9437184 2025-06-25T15:01:45.883512Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437184 has no attached operations 2025-06-25T15:01:45.883552Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 9437184 2025-06-25T15:01:45.883583Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 9437184 TxInFly 0 2025-06-25T15:01:45.883616Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-25T15:01:45.883708Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:215:2212], Recipient [1:135:2156]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T15:01:45.883740Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T15:01:45.883783Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:213:2211], serverId# [1:215:2212], sessionId# [0:0:0] 2025-06-25T15:01:45.886358Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:103:2136], Recipient [1:135:2156]: NKikimrTxDataShard.TEvProposeTransaction TxKind: TX_KIND_SCHEME SourceDeprecated { RawX1: 103 RawX2: 4294969432 } TxBody: "\nI\n\006table1\020\r\032\t\n\003key\030\002 \"\032\014\n\005value\030\200$ 8\032\n\n\004uint\030\002 9(\":\010Z\006\010\010\030\001(\001J\014/Root/table1" TxId: 1 ExecLevel: 0 Flags: 0 SchemeShardId: 4200 ProcessingParams { } 2025-06-25T15:01:45.886408Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-25T15:01:45.886474Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-25T15:01:45.886629Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit CheckSchemeTx 2025-06-25T15:01:45.886696Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 9437184 txId 1 ssId 4200 seqNo 0:0 2025-06-25T15:01:45.886746Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 1 at tablet 9437184 2025-06-25T15:01:45.886798Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is ExecutedNoMoreRestarts 2025-06-25T15:01:45.886828Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit CheckSchemeTx 2025-06-25T15:01:45.886864Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit StoreSchemeTx 2025-06-25T15:01:45.886908Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit StoreSchemeTx 2025-06-25T15:01:45.887239Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayCompleteNoMoreRestarts 2025-06-25T15:01:45.887268Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit StoreSchemeTx 2025-06-25T15:01:45.887297Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit FinishPropose 2025-06-25T15:01:45.887333Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit FinishPropose 2025-06-25T15:01:45.887392Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayComplete 2025-06-25T15:01:45.887422Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit FinishPropose 2025-06-25T15:01:45.887457Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit WaitForPlan 2025-06-25T15:01:45.887484Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit WaitForPlan 2025-06-25T15:01:45.887509Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:1] at 9437184 is not ready to execute on unit WaitForPlan 2025-06-25T15:01:45.904661Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 9437184 2025-06-25T15:01:45.904717Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit StoreSchemeTx 2025-06-25T15:01:45.904748Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit FinishPropose 2025-06-25T15:01:45.904797Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 1 at tablet 9437184 send to client, exec latency: 0 ms, propose latency: 1 ms, status: PREPARED 2025-06-25T15:01:45.904849Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 9437184 not sending time cast registration request in state WaitScheme 2025-06-25T15:01:45.905393Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:225:2221], Recipient [1:135:2156]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T15:01:45.905451Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T15:01:45.905490Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:224:2220], serverId# [1:225:2221], sessionId# [0:0:0] 2025-06-25T15:01:45.905633Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287424, Sender [1:103:2136], Recipient [1:135:2156]: {TEvPlanStep step# 1000001 MediatorId# 0 TabletID 9437184} 2025-06-25T15:01:45.905662Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3150: StateWork, processing event TEvTxProcessing::TEvPlanStep 2025-06-25T15:01:45.905764Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1790: Trying to execute [1000001:1] at 9437184 on unit WaitForPlan 2025-06-25T15:01:45.905802Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1805: Execution status for [1000001:1] at 9437184 is Executed 2025-06-25T15:01:45.905833Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000001:1] at 9437184 executing on unit WaitForPlan 2025-06-25T15:01:45.905864Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000001:1] at 9437184 to execution unit PlanQueue 2025-06-25T15:01:45.912283Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 1 at step 1000001 at tablet 9437184 { Transactions { TxId: 1 AckTo { RawX1: 103 RawX2: 4294969432 } } Step: 1000001 MediatorID: 0 TabletID: 9437184 } 2025-06-25T15:01:45.912364Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:01:45.912547Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:135:2156], Recipient [1:135:2156]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:45.912590Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:45.912644Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-25T15:01:45.912679Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 1 2025-06-25T15:01:45.912711Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437184 2025-06-25T15:01:45.912748Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000001:1] in PlanQueue unit at 9437184 2025-06-25T15:01:45.912781Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [100000 ... 7Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437186 2025-06-25T15:01:51.100288Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000005:143] at 9437186 on unit CompleteOperation 2025-06-25T15:01:51.100325Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000005 : 143] from 9437186 at tablet 9437186 send result to client [1:103:2136], exec latency: 0 ms, propose latency: 2 ms 2025-06-25T15:01:51.100358Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437186 2025-06-25T15:01:51.100471Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437186 2025-06-25T15:01:51.100494Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000005:146] at 9437186 on unit CompleteOperation 2025-06-25T15:01:51.100521Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000005 : 146] from 9437186 at tablet 9437186 send result to client [1:103:2136], exec latency: 0 ms, propose latency: 2 ms 2025-06-25T15:01:51.100550Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437186 2025-06-25T15:01:51.100689Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437186 2025-06-25T15:01:51.100714Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000005:149] at 9437186 on unit CompleteOperation 2025-06-25T15:01:51.100744Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000005 : 149] from 9437186 at tablet 9437186 send result to client [1:103:2136], exec latency: 0 ms, propose latency: 2 ms 2025-06-25T15:01:51.100767Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437186 2025-06-25T15:01:51.100860Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437186 2025-06-25T15:01:51.100881Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000005:152] at 9437186 on unit CompleteOperation 2025-06-25T15:01:51.100930Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000005 : 152] from 9437186 at tablet 9437186 send result to client [1:103:2136], exec latency: 0 ms, propose latency: 2 ms 2025-06-25T15:01:51.100959Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437186 2025-06-25T15:01:51.101164Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:239:2230], Recipient [1:351:2316]: {TEvReadSet step# 1000005 txid# 152 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 99} 2025-06-25T15:01:51.101200Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:01:51.101230Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 152 2025-06-25T15:01:51.101330Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:463:2402], Recipient [1:239:2230]: {TEvReadSet step# 1000005 txid# 113 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 37} 2025-06-25T15:01:51.101353Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:01:51.101380Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 113 2025-06-25T15:01:51.101434Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:239:2230], Recipient [1:351:2316]: {TEvReadSet step# 1000005 txid# 154 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 100} 2025-06-25T15:01:51.101456Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:01:51.101478Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 154 2025-06-25T15:01:51.101537Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:463:2402], Recipient [1:239:2230]: {TEvReadSet step# 1000005 txid# 116 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 38} 2025-06-25T15:01:51.101562Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:01:51.101603Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 116 2025-06-25T15:01:51.101694Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:463:2402], Recipient [1:239:2230]: {TEvReadSet step# 1000005 txid# 119 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 39} 2025-06-25T15:01:51.101728Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:01:51.101755Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 119 2025-06-25T15:01:51.101822Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:463:2402], Recipient [1:239:2230]: {TEvReadSet step# 1000005 txid# 140 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 46} 2025-06-25T15:01:51.101853Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:01:51.101881Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 140 2025-06-25T15:01:51.101944Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:463:2402], Recipient [1:239:2230]: {TEvReadSet step# 1000005 txid# 122 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 40} 2025-06-25T15:01:51.101963Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:01:51.101984Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 122 2025-06-25T15:01:51.102057Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:463:2402], Recipient [1:239:2230]: {TEvReadSet step# 1000005 txid# 143 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 47} 2025-06-25T15:01:51.102085Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:01:51.102117Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 143 2025-06-25T15:01:51.102208Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:463:2402], Recipient [1:239:2230]: {TEvReadSet step# 1000005 txid# 146 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 48} 2025-06-25T15:01:51.102244Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:01:51.102267Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 146 2025-06-25T15:01:51.102317Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:463:2402], Recipient [1:239:2230]: {TEvReadSet step# 1000005 txid# 149 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 49} 2025-06-25T15:01:51.102338Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:01:51.102358Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 149 2025-06-25T15:01:51.102441Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:463:2402], Recipient [1:239:2230]: {TEvReadSet step# 1000005 txid# 152 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 50} 2025-06-25T15:01:51.102464Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:01:51.102486Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 152 2025-06-25T15:01:51.102547Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:463:2402], Recipient [1:239:2230]: {TEvReadSet step# 1000005 txid# 125 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 41} 2025-06-25T15:01:51.102568Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:01:51.102602Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 125 2025-06-25T15:01:51.102691Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:463:2402], Recipient [1:239:2230]: {TEvReadSet step# 1000005 txid# 128 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 42} 2025-06-25T15:01:51.102716Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:01:51.102738Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 128 2025-06-25T15:01:51.102820Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:463:2402], Recipient [1:239:2230]: {TEvReadSet step# 1000005 txid# 131 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 43} 2025-06-25T15:01:51.102844Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:01:51.102864Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 131 2025-06-25T15:01:51.102909Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:463:2402], Recipient [1:239:2230]: {TEvReadSet step# 1000005 txid# 134 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 44} 2025-06-25T15:01:51.102942Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:01:51.102977Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 134 2025-06-25T15:01:51.103057Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:463:2402], Recipient [1:239:2230]: {TEvReadSet step# 1000005 txid# 137 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 45} 2025-06-25T15:01:51.103079Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:01:51.103124Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 137 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_order/unittest >> DataShardTxOrder::ForceOnlineBetweenOnline_oo8 [GOOD] Test command err: 2025-06-25T15:01:43.654847Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:01:43.654924Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:01:43.656872Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:112:2142], Recipient [1:135:2156]: NKikimr::TEvTablet::TEvBoot 2025-06-25T15:01:43.681274Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:112:2142], Recipient [1:135:2156]: NKikimr::TEvTablet::TEvRestored 2025-06-25T15:01:43.681741Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 9437184 actor [1:135:2156] 2025-06-25T15:01:43.681996Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T15:01:43.700078Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:112:2142], Recipient [1:135:2156]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-25T15:01:43.768708Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T15:01:43.768890Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T15:01:43.770433Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 9437184 2025-06-25T15:01:43.770497Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 9437184 2025-06-25T15:01:43.770556Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 9437184 2025-06-25T15:01:43.770855Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T15:01:43.770924Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T15:01:43.770985Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 9437184 persisting started state actor id [1:204:2156] in generation 2 2025-06-25T15:01:43.845782Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T15:01:43.888324Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 9437184 2025-06-25T15:01:43.888542Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 9437184 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T15:01:43.888694Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 9437184, actorId: [1:219:2215] 2025-06-25T15:01:43.888775Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 9437184 2025-06-25T15:01:43.888816Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 9437184, state: WaitScheme 2025-06-25T15:01:43.888853Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:01:43.889115Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:135:2156], Recipient [1:135:2156]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:43.889193Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:43.889485Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 9437184 2025-06-25T15:01:43.889591Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 9437184 2025-06-25T15:01:43.889652Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-25T15:01:43.889691Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:01:43.889733Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 9437184 2025-06-25T15:01:43.889769Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437184 has no attached operations 2025-06-25T15:01:43.889815Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 9437184 2025-06-25T15:01:43.889849Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 9437184 TxInFly 0 2025-06-25T15:01:43.889886Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-25T15:01:43.889966Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:215:2212], Recipient [1:135:2156]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T15:01:43.890002Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T15:01:43.890052Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:213:2211], serverId# [1:215:2212], sessionId# [0:0:0] 2025-06-25T15:01:43.892907Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:103:2136], Recipient [1:135:2156]: NKikimrTxDataShard.TEvProposeTransaction TxKind: TX_KIND_SCHEME SourceDeprecated { RawX1: 103 RawX2: 4294969432 } TxBody: "\nI\n\006table1\020\r\032\t\n\003key\030\002 \"\032\014\n\005value\030\200$ 8\032\n\n\004uint\030\002 9(\":\010Z\006\010\010\030\001(\000J\014/Root/table1" TxId: 1 ExecLevel: 0 Flags: 0 SchemeShardId: 4200 ProcessingParams { } 2025-06-25T15:01:43.892969Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-25T15:01:43.893053Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-25T15:01:43.893227Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit CheckSchemeTx 2025-06-25T15:01:43.893288Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 9437184 txId 1 ssId 4200 seqNo 0:0 2025-06-25T15:01:43.893348Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 1 at tablet 9437184 2025-06-25T15:01:43.893412Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is ExecutedNoMoreRestarts 2025-06-25T15:01:43.893446Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit CheckSchemeTx 2025-06-25T15:01:43.893492Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit StoreSchemeTx 2025-06-25T15:01:43.893526Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit StoreSchemeTx 2025-06-25T15:01:43.893871Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayCompleteNoMoreRestarts 2025-06-25T15:01:43.893908Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit StoreSchemeTx 2025-06-25T15:01:43.893954Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit FinishPropose 2025-06-25T15:01:43.894004Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit FinishPropose 2025-06-25T15:01:43.894066Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayComplete 2025-06-25T15:01:43.894114Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit FinishPropose 2025-06-25T15:01:43.894154Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit WaitForPlan 2025-06-25T15:01:43.894185Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit WaitForPlan 2025-06-25T15:01:43.894209Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:1] at 9437184 is not ready to execute on unit WaitForPlan 2025-06-25T15:01:43.906976Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 9437184 2025-06-25T15:01:43.907040Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit StoreSchemeTx 2025-06-25T15:01:43.907093Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit FinishPropose 2025-06-25T15:01:43.907149Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 1 at tablet 9437184 send to client, exec latency: 0 ms, propose latency: 1 ms, status: PREPARED 2025-06-25T15:01:43.907207Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 9437184 not sending time cast registration request in state WaitScheme 2025-06-25T15:01:43.907683Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:225:2221], Recipient [1:135:2156]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T15:01:43.907738Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T15:01:43.907787Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:224:2220], serverId# [1:225:2221], sessionId# [0:0:0] 2025-06-25T15:01:43.907915Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287424, Sender [1:103:2136], Recipient [1:135:2156]: {TEvPlanStep step# 1000001 MediatorId# 0 TabletID 9437184} 2025-06-25T15:01:43.907947Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3150: StateWork, processing event TEvTxProcessing::TEvPlanStep 2025-06-25T15:01:43.908069Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1790: Trying to execute [1000001:1] at 9437184 on unit WaitForPlan 2025-06-25T15:01:43.908113Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1805: Execution status for [1000001:1] at 9437184 is Executed 2025-06-25T15:01:43.908151Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000001:1] at 9437184 executing on unit WaitForPlan 2025-06-25T15:01:43.908185Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000001:1] at 9437184 to execution unit PlanQueue 2025-06-25T15:01:43.911902Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 1 at step 1000001 at tablet 9437184 { Transactions { TxId: 1 AckTo { RawX1: 103 RawX2: 4294969432 } } Step: 1000001 MediatorID: 0 TabletID: 9437184 } 2025-06-25T15:01:43.911969Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:01:43.912142Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:135:2156], Recipient [1:135:2156]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:43.912180Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:43.912232Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-25T15:01:43.912269Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 1 2025-06-25T15:01:43.912301Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437184 2025-06-25T15:01:43.912373Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000001:1] in PlanQueue unit at 9437184 2025-06-25T15:01:43.912432Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [100000 ... # 1000005 txid# 154 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 100} 2025-06-25T15:01:50.226838Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:01:50.227099Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:239:2230], Recipient [1:351:2316]: {TEvReadSet step# 1000005 txid# 149 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 97} 2025-06-25T15:01:50.227141Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:01:50.227179Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 149 2025-06-25T15:01:50.227277Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437186 2025-06-25T15:01:50.227303Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000005:137] at 9437186 on unit CompleteOperation 2025-06-25T15:01:50.227339Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000005 : 137] from 9437186 at tablet 9437186 send result to client [1:103:2136], exec latency: 2 ms, propose latency: 3 ms 2025-06-25T15:01:50.227379Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:563: Send delayed Ack RS Ack at 9437186 {TEvReadSet step# 1000005 txid# 137 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 45} 2025-06-25T15:01:50.227405Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437186 2025-06-25T15:01:50.227506Z node 1 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 9437186 2025-06-25T15:01:50.227572Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437186 2025-06-25T15:01:50.227603Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000005:140] at 9437186 on unit CompleteOperation 2025-06-25T15:01:50.227654Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000005 : 140] from 9437186 at tablet 9437186 send result to client [1:103:2136], exec latency: 2 ms, propose latency: 3 ms 2025-06-25T15:01:50.227729Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:563: Send delayed Ack RS Ack at 9437186 {TEvReadSet step# 1000005 txid# 140 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 46} 2025-06-25T15:01:50.227762Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437186 2025-06-25T15:01:50.227876Z node 1 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 9437186 2025-06-25T15:01:50.227909Z node 1 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 9437186 2025-06-25T15:01:50.227930Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437186 2025-06-25T15:01:50.227952Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000005:143] at 9437186 on unit CompleteOperation 2025-06-25T15:01:50.227982Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000005 : 143] from 9437186 at tablet 9437186 send result to client [1:103:2136], exec latency: 2 ms, propose latency: 3 ms 2025-06-25T15:01:50.228026Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:563: Send delayed Ack RS Ack at 9437186 {TEvReadSet step# 1000005 txid# 143 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 47} 2025-06-25T15:01:50.228048Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437186 2025-06-25T15:01:50.228148Z node 1 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 9437186 2025-06-25T15:01:50.228174Z node 1 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 9437186 2025-06-25T15:01:50.228215Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437186 2025-06-25T15:01:50.228240Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000005:146] at 9437186 on unit CompleteOperation 2025-06-25T15:01:50.228272Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000005 : 146] from 9437186 at tablet 9437186 send result to client [1:103:2136], exec latency: 2 ms, propose latency: 3 ms 2025-06-25T15:01:50.228326Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:563: Send delayed Ack RS Ack at 9437186 {TEvReadSet step# 1000005 txid# 146 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 48} 2025-06-25T15:01:50.228357Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437186 2025-06-25T15:01:50.228490Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437186 2025-06-25T15:01:50.228516Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000005:149] at 9437186 on unit CompleteOperation 2025-06-25T15:01:50.228550Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000005 : 149] from 9437186 at tablet 9437186 send result to client [1:103:2136], exec latency: 2 ms, propose latency: 3 ms 2025-06-25T15:01:50.228590Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:563: Send delayed Ack RS Ack at 9437186 {TEvReadSet step# 1000005 txid# 149 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 49} 2025-06-25T15:01:50.228636Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437186 2025-06-25T15:01:50.228763Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437186 2025-06-25T15:01:50.228809Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000005:152] at 9437186 on unit CompleteOperation 2025-06-25T15:01:50.228844Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000005 : 152] from 9437186 at tablet 9437186 send result to client [1:103:2136], exec latency: 2 ms, propose latency: 3 ms 2025-06-25T15:01:50.228887Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:563: Send delayed Ack RS Ack at 9437186 {TEvReadSet step# 1000005 txid# 152 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 50} 2025-06-25T15:01:50.228912Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437186 2025-06-25T15:01:50.229113Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:461:2400], Recipient [1:239:2230]: {TEvReadSet step# 1000005 txid# 137 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 45} 2025-06-25T15:01:50.229148Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:01:50.229178Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 137 2025-06-25T15:01:50.229286Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:461:2400], Recipient [1:239:2230]: {TEvReadSet step# 1000005 txid# 140 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 46} 2025-06-25T15:01:50.229313Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:01:50.229374Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 140 2025-06-25T15:01:50.229445Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:239:2230], Recipient [1:351:2316]: {TEvReadSet step# 1000005 txid# 151 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 98} 2025-06-25T15:01:50.229473Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:01:50.229518Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 151 2025-06-25T15:01:50.229612Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:461:2400], Recipient [1:239:2230]: {TEvReadSet step# 1000005 txid# 143 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 47} 2025-06-25T15:01:50.229638Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:01:50.229695Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 143 2025-06-25T15:01:50.229746Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:239:2230], Recipient [1:351:2316]: {TEvReadSet step# 1000005 txid# 152 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 99} 2025-06-25T15:01:50.229771Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:01:50.229792Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 152 2025-06-25T15:01:50.229866Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:461:2400], Recipient [1:239:2230]: {TEvReadSet step# 1000005 txid# 146 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 48} 2025-06-25T15:01:50.229890Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:01:50.229912Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 146 2025-06-25T15:01:50.229970Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:239:2230], Recipient [1:351:2316]: {TEvReadSet step# 1000005 txid# 154 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 100} 2025-06-25T15:01:50.230001Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:01:50.230025Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 154 2025-06-25T15:01:50.230131Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:461:2400], Recipient [1:239:2230]: {TEvReadSet step# 1000005 txid# 149 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 49} 2025-06-25T15:01:50.230167Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:01:50.230191Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 149 2025-06-25T15:01:50.230273Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:461:2400], Recipient [1:239:2230]: {TEvReadSet step# 1000005 txid# 152 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 50} 2025-06-25T15:01:50.230301Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:01:50.230325Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 152 >> DataShardTxOrder::ZigZag [GOOD] >> DataShardOutOfOrder::TestReadTableSingleShardImmediate [GOOD] >> BackupRestore::TestAllIndexTypes-EIndexTypeGlobalAsync [GOOD] >> DataShardOutOfOrder::TestUnprotectedReadsThenWriteVisibility [GOOD] >> DataShardOutOfOrder::TestReadTableWriteConflict [GOOD] >> BackupRestore::TestAllIndexTypes-EIndexTypeGlobalUnique [GOOD] >> BackupRestore::PrefixedVectorIndex >> DataShardOutOfOrder::TestSecondaryClearanceAfterShardRestartRace |92.2%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_index/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/backup_ut/unittest >> BackupRestoreS3::PrefixedVectorIndex [GOOD] Test command err: 2025-06-25T14:59:27.290255Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901762296016949:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:59:27.290322Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001483/r3tmp/tmpF36Mno/pdisk_1.dat 2025-06-25T14:59:27.725061Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:59:27.734755Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:59:27.734835Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 8493, node 1 2025-06-25T14:59:27.746811Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:59:27.857102Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:59:27.857124Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:59:27.857131Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:59:27.857236Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10707 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:59:28.256600Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:59:28.302283Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:59:29.905617Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901770885952532:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:59:29.905717Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:59:30.161442Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:59:30.308683Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901775180920003:2308], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:59:30.308773Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:59:30.308914Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901775180920008:2311], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:59:30.312442Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:59:30.327196Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519901775180920010:2312], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-25T14:59:30.387837Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519901775180920085:2808] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:59:30.666959Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710661. Ctx: { TraceId: 01jyksksc32gvpaewan9v82ftq, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTNhZTVjMGEtMWFjNjViYmYtY2RhZDBiOTEtZjI2MzUyNWM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:59:30.908916Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710662. Ctx: { TraceId: 01jyksksrkah8qsr4ebrb6ak46, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTNhZTVjMGEtMWFjNjViYmYtY2RhZDBiOTEtZjI2MzUyNWM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root Backup "/Root" to "/home/runner/.ya/build/build_root/yft8/001483/r3tmp/tmpdEoHem/"Create temporary directory "/Root/~backup_20250625T145930" in databaseProcess "/home/runner/.ya/build/build_root/yft8/001483/r3tmp/tmpdEoHem/table"Copy tables: { src: "/Root/table", dst: "/Root/~backup_20250625T145930/table" }Describe table "/Root/table"Describe table "/Root/~backup_20250625T145930/table"Backup table "/Root/~backup_20250625T145930/table" to "/home/runner/.ya/build/build_root/yft8/001483/r3tmp/tmpdEoHem/table"Write scheme into "/home/runner/.ya/build/build_root/yft8/001483/r3tmp/tmpdEoHem/table/scheme.pb"Write ACL into "/home/runner/.ya/build/build_root/yft8/001483/r3tmp/tmpdEoHem/table/permissions.pb"Read table "/Root/~backup_20250625T145930/table"Write data into "/home/runner/.ya/build/build_root/yft8/001483/r3tmp/tmpdEoHem/table/data_00.csv"Drop table "/Root/~backup_20250625T145930/table"Remove temporary directory "/Root/~backup_20250625T145930" in database2025-06-25T14:59:31.236919Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037889 not found 2025-06-25T14:59:31.267343Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpRmDir, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_rmdir.cpp:66) Backup completed successfully2025-06-25T14:59:31.387056Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037888 not found Restore "/home/runner/.ya/build/build_root/yft8/001483/r3tmp/tmpdEoHem/" to "/Root"Resolved db base path: "/Root"List of entries in the backup: [{"type":"Directory","path":"/home/runner/.ya/build/build_root/yft8/001483/r3tmp/tmpdEoHem/"},{"type":"Table","path":"/home/runner/.ya/build/build_root/yft8/001483/r3tmp/tmpdEoHem/table"}]Process "/home/runner/.ya/build/build_root/yft8/001483/r3tmp/tmpdEoHem/table"Read scheme from "/home/runner/.ya/build/build_root/yft8/001483/r3tmp/tmpdEoHem/table/scheme.pb"Restore table "/home/runner/.ya/build/build_root/yft8/001483/r3tmp/tmpdEoHem/table" to "/Root/table"2025-06-25T14:59:31.449744Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) Created "/Root/table"Read data from "/home/runner/.ya/build/build_root/yft8/001483/r3tmp/tmpdEoHem/table/data_00.csv"2025-06-25T14:59:31.608608Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710671. Ctx: { TraceId: 01jykskthhe9d0z9x4zy91c7vs, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTI3YjQxM2UtMTcyYTFkOWQtOWY5YmEzMmYtYTUzMjQ3MTI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root Restore ACL "/home/runner/.ya/build/build_root/yft8/001483/r3tmp/tmpdEoHem/table" to "/Root/table"Read ACL from "/home/runner/.ya/build/build_root/yft8/001483/r3tmp/tmpdEoHem/table/permissions.pb"2025-06-25T14:59:31.675975Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) Restore completed successfully2025-06-25T14:59:31.817461Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710673. Ctx: { TraceId: 01jyksktqk1j0sbc61r2vjz47a, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTNhZTVjMGEtMWFjNjViYmYtY2RhZDBiOTEtZjI2MzUyNWM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T14:59:32.951711Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519901785429059062:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:59:32.951763Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yf ... t@builtin" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 TableSchemaVersion: 3 TablePartitionVersion: 1 } ChildrenExist: true } Table { Name: "table" Columns { Name: "Key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Group" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "Value" Type: "String" TypeId: 4097 Id: 3 NotNull: false IsBuildInProgress: false } KeyColumnNames: "Key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 0 MinPartitionsCount: 1 SplitByLoadSettings { Enabled: false } } } TableIndexes { Name: "value_idx" LocalPathId: 10 Type: EIndexTypeGlobalVectorKmeansTree State: EIndexStateReady KeyColumnNames: "Group" KeyColumnNames: "Value" SchemaVersion: 2 PathOwnerId: 72057594046644480 DataSize: 0 IndexImplTableDescriptions { PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 KeepEraseMarkers: false MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 SplitByLoadSettings { Enabled: false } } } } IndexImplTableDescriptions { PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 KeepEraseMarkers: false MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 SplitByLoadSettings { Enabled: false } } } } IndexImplTableDescriptions { PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 KeepEraseMarkers: false MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 SplitByLoadSettings { Enabled: false } } } } VectorIndexKmeansTreeDescription { Settings { settings { metric: SIMILARITY_INNER_PRODUCT vector_type: VECTOR_TYPE_FLOAT vector_dimension: 768 } clusters: 80 levels: 2 } } } TableSchemaVersion: 3 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 7 PathsLimit: 10000 ShardsInside: 5 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 9 PathOwnerId: 72057594046644480 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_order/unittest >> DataShardOutOfOrder::TestSnapshotReadAfterStuckRW [GOOD] Test command err: 2025-06-25T15:01:48.296977Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T15:01:48.297106Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T15:01:48.297171Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001f55/r3tmp/tmp12aGB3/pdisk_1.dat 2025-06-25T15:01:48.612783Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T15:01:48.616193Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:01:48.674132Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:01:48.679454Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750863705631373 != 1750863705631377 2025-06-25T15:01:48.728683Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:01:48.728840Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:01:48.740438Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:01:48.824118Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:48.860279Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvBoot 2025-06-25T15:01:48.861260Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvRestored 2025-06-25T15:01:48.861605Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:627:2531] 2025-06-25T15:01:48.861778Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T15:01:48.894283Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-25T15:01:48.895223Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T15:01:48.895394Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T15:01:48.897185Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-25T15:01:48.897300Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-25T15:01:48.897360Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-25T15:01:48.897739Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T15:01:48.897868Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T15:01:48.897945Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:643:2531] in generation 1 2025-06-25T15:01:48.908926Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T15:01:48.951128Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-25T15:01:48.951336Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T15:01:48.951444Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:645:2541] 2025-06-25T15:01:48.951480Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T15:01:48.951539Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-25T15:01:48.951588Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T15:01:48.951789Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:627:2531], Recipient [1:627:2531]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:48.951844Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:48.952209Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-25T15:01:48.952304Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-25T15:01:48.952387Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T15:01:48.952424Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:01:48.952474Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-25T15:01:48.952527Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-25T15:01:48.952569Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-25T15:01:48.952618Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-25T15:01:48.952664Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T15:01:48.953056Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:634:2535], Recipient [1:627:2531]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T15:01:48.953099Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T15:01:48.953154Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:623:2528], serverId# [1:634:2535], sessionId# [0:0:0] 2025-06-25T15:01:48.953231Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:373:2367], Recipient [1:634:2535] 2025-06-25T15:01:48.953272Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-25T15:01:48.953349Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T15:01:48.953533Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-25T15:01:48.953599Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-25T15:01:48.953692Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-25T15:01:48.953747Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-25T15:01:48.953799Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-06-25T15:01:48.953836Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-06-25T15:01:48.953876Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-25T15:01:48.954210Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-06-25T15:01:48.954262Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit StoreSchemeTx 2025-06-25T15:01:48.954298Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit FinishPropose 2025-06-25T15:01:48.954340Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-25T15:01:48.954398Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayComplete 2025-06-25T15:01:48.954447Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit FinishPropose 2025-06-25T15:01:48.954487Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit WaitForPlan 2025-06-25T15:01:48.954524Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit WaitForPlan 2025-06-25T15:01:48.954552Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:281474976715657] at 72075186224037888 is not ready to execute on unit WaitForPlan 2025-06-25T15:01:48.955651Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269746185, Sender [1:646:2542], Recipient [1:627:2531]: NKikimr::TEvTxProxySchemeCache::TEvWatchNotifyUpdated 2025-06-25T15:01:48.955690Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T15:01:48.968246Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-25T15:01:48.968351Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-25T15:01:48.968391Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-25T15:01:48.968441Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715657 at tablet 72075186224037888 send to client, exec latency: 0 ms, propose late ... d_outreadset.cpp:150: Receive RS Ack at 72075186224037888 source 72075186224037888 dest 72075186224037889 consumer 72075186224037889 txId 281474976715664 ... performing the first select 2025-06-25T15:01:51.470853Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715665. Ctx: { TraceId: 01jyksr2w700q6pt9f5td0n7fa, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTZlZDE0MmUtNWExOWJlMDUtOGNiNWVhZmItMTliY2JhMDY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:01:51.474826Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553215, Sender [1:1036:2806], Recipient [1:627:2531]: NKikimrTxDataShard.TEvRead ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 Snapshot { Step: 4000 TxId: 18446744073709551615 } LockTxId: 281474976715665 ResultFormat: FORMAT_CELLVEC MaxRows: 1001 MaxBytes: 5242880 Reverse: false LockNodeId: 1 TotalRowsLimit: 1001 LockMode: OPTIMISTIC KeysSize: 1 2025-06-25T15:01:51.475031Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553215, Sender [1:1038:2807], Recipient [1:713:2591]: NKikimrTxDataShard.TEvRead ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 3 SchemaVersion: 1 } Columns: 1 Columns: 2 Snapshot { Step: 4000 TxId: 18446744073709551615 } LockTxId: 281474976715665 ResultFormat: FORMAT_CELLVEC MaxRows: 1001 MaxBytes: 5242880 Reverse: false LockNodeId: 1 TotalRowsLimit: 1001 LockMode: OPTIMISTIC KeysSize: 1 2025-06-25T15:01:51.475361Z node 1 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2452: TTxReadViaPipeline execute: at tablet# 72075186224037888, FollowerId 0 2025-06-25T15:01:51.475439Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:5] at 72075186224037888 on unit CheckRead 2025-06-25T15:01:51.475511Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:5] at 72075186224037888 is Executed 2025-06-25T15:01:51.475548Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:5] at 72075186224037888 executing on unit CheckRead 2025-06-25T15:01:51.475609Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:5] at 72075186224037888 to execution unit BuildAndWaitDependencies 2025-06-25T15:01:51.475639Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:5] at 72075186224037888 on unit BuildAndWaitDependencies 2025-06-25T15:01:51.475686Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:5] at 72075186224037888 2025-06-25T15:01:51.475719Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:5] at 72075186224037888 is Executed 2025-06-25T15:01:51.475747Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:5] at 72075186224037888 executing on unit BuildAndWaitDependencies 2025-06-25T15:01:51.475788Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:5] at 72075186224037888 to execution unit ExecuteRead 2025-06-25T15:01:51.475808Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:5] at 72075186224037888 on unit ExecuteRead 2025-06-25T15:01:51.475917Z node 1 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1578: 72075186224037888 Execute read# 1, request: { ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 Snapshot { Step: 4000 TxId: 18446744073709551615 } LockTxId: 281474976715665 ResultFormat: FORMAT_CELLVEC MaxRows: 1001 MaxBytes: 5242880 Reverse: false LockNodeId: 1 TotalRowsLimit: 1001 LockMode: OPTIMISTIC } 2025-06-25T15:01:51.476158Z node 1 :TX_DATASHARD DEBUG: datashard__read_iterator.cpp:2427: 72075186224037888 Acquired lock# 281474976715665, counter# 1 for [OwnerId: 72057594046644480, LocalPathId: 2] 2025-06-25T15:01:51.476214Z node 1 :TX_DATASHARD TRACE: datashard.cpp:2476: PromoteImmediatePostExecuteEdges at 72075186224037888 promoting UnprotectedReadEdge to v4000/18446744073709551615 2025-06-25T15:01:51.476254Z node 1 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2163: 72075186224037888 Complete read# {[1:1036:2806], 0} after executionsCount# 1 2025-06-25T15:01:51.476297Z node 1 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2137: 72075186224037888 read iterator# {[1:1036:2806], 0} sends rowCount# 1, bytes# 32, quota rows left# 1000, quota bytes left# 5242848, hasUnreadQueries# 0, total queries# 1, firstUnprocessed# 0 2025-06-25T15:01:51.476387Z node 1 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2188: 72075186224037888 read iterator# {[1:1036:2806], 0} finished in read 2025-06-25T15:01:51.476479Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:5] at 72075186224037888 is Executed 2025-06-25T15:01:51.476511Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:5] at 72075186224037888 executing on unit ExecuteRead 2025-06-25T15:01:51.476540Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:5] at 72075186224037888 to execution unit CompletedOperations 2025-06-25T15:01:51.476580Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:5] at 72075186224037888 on unit CompletedOperations 2025-06-25T15:01:51.476632Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:5] at 72075186224037888 is Executed 2025-06-25T15:01:51.476655Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:5] at 72075186224037888 executing on unit CompletedOperations 2025-06-25T15:01:51.476678Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:5] at 72075186224037888 has finished 2025-06-25T15:01:51.476717Z node 1 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2687: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037888 2025-06-25T15:01:51.476812Z node 1 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2736: TTxReadViaPipeline(69) Complete: at tablet# 72075186224037888 2025-06-25T15:01:51.476885Z node 1 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2452: TTxReadViaPipeline execute: at tablet# 72075186224037889, FollowerId 0 2025-06-25T15:01:51.476923Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:5] at 72075186224037889 on unit CheckRead 2025-06-25T15:01:51.476971Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:5] at 72075186224037889 is Executed 2025-06-25T15:01:51.477001Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:5] at 72075186224037889 executing on unit CheckRead 2025-06-25T15:01:51.477026Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:5] at 72075186224037889 to execution unit BuildAndWaitDependencies 2025-06-25T15:01:51.477047Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:5] at 72075186224037889 on unit BuildAndWaitDependencies 2025-06-25T15:01:51.477101Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:5] at 72075186224037889 2025-06-25T15:01:51.477127Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:5] at 72075186224037889 is Executed 2025-06-25T15:01:51.477148Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:5] at 72075186224037889 executing on unit BuildAndWaitDependencies 2025-06-25T15:01:51.477167Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:5] at 72075186224037889 to execution unit ExecuteRead 2025-06-25T15:01:51.477185Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:5] at 72075186224037889 on unit ExecuteRead 2025-06-25T15:01:51.477277Z node 1 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1578: 72075186224037889 Execute read# 1, request: { ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 3 SchemaVersion: 1 } Columns: 1 Columns: 2 Snapshot { Step: 4000 TxId: 18446744073709551615 } LockTxId: 281474976715665 ResultFormat: FORMAT_CELLVEC MaxRows: 1001 MaxBytes: 5242880 Reverse: false LockNodeId: 1 TotalRowsLimit: 1001 LockMode: OPTIMISTIC } 2025-06-25T15:01:51.477452Z node 1 :TX_DATASHARD DEBUG: datashard__read_iterator.cpp:2427: 72075186224037889 Acquired lock# 281474976715665, counter# 1 for [OwnerId: 72057594046644480, LocalPathId: 3] 2025-06-25T15:01:51.477487Z node 1 :TX_DATASHARD TRACE: datashard.cpp:2476: PromoteImmediatePostExecuteEdges at 72075186224037889 promoting UnprotectedReadEdge to v4000/18446744073709551615 2025-06-25T15:01:51.477522Z node 1 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2163: 72075186224037889 Complete read# {[1:1038:2807], 0} after executionsCount# 1 2025-06-25T15:01:51.477562Z node 1 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2137: 72075186224037889 read iterator# {[1:1038:2807], 0} sends rowCount# 1, bytes# 32, quota rows left# 1000, quota bytes left# 5242848, hasUnreadQueries# 0, total queries# 1, firstUnprocessed# 0 2025-06-25T15:01:51.477611Z node 1 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2188: 72075186224037889 read iterator# {[1:1038:2807], 0} finished in read 2025-06-25T15:01:51.477651Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:5] at 72075186224037889 is Executed 2025-06-25T15:01:51.477673Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:5] at 72075186224037889 executing on unit ExecuteRead 2025-06-25T15:01:51.477708Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:5] at 72075186224037889 to execution unit CompletedOperations 2025-06-25T15:01:51.477736Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:5] at 72075186224037889 on unit CompletedOperations 2025-06-25T15:01:51.477767Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:5] at 72075186224037889 is Executed 2025-06-25T15:01:51.477786Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:5] at 72075186224037889 executing on unit CompletedOperations 2025-06-25T15:01:51.477804Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:5] at 72075186224037889 has finished 2025-06-25T15:01:51.477845Z node 1 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2687: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037889 2025-06-25T15:01:51.477911Z node 1 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2736: TTxReadViaPipeline(69) Complete: at tablet# 72075186224037889 2025-06-25T15:01:51.478104Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 275709965, Sender [1:63:2110], Recipient [1:627:2531]: NKikimrLongTxService.TEvLockStatus LockId: 281474976715665 LockNode: 1 Status: STATUS_SUBSCRIBED 2025-06-25T15:01:51.478294Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 275709965, Sender [1:63:2110], Recipient [1:713:2591]: NKikimrLongTxService.TEvLockStatus LockId: 281474976715665 LockNode: 1 Status: STATUS_SUBSCRIBED 2025-06-25T15:01:51.479319Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553219, Sender [1:1036:2806], Recipient [1:627:2531]: NKikimrTxDataShard.TEvReadCancel ReadId: 0 2025-06-25T15:01:51.479380Z node 1 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3409: 72075186224037888 ReadCancel: { ReadId: 0 } 2025-06-25T15:01:51.479436Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553219, Sender [1:1038:2807], Recipient [1:713:2591]: NKikimrTxDataShard.TEvReadCancel ReadId: 0 2025-06-25T15:01:51.479463Z node 1 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3409: 72075186224037889 ReadCancel: { ReadId: 0 } { items { uint32_value: 1 } items { uint32_value: 1 } }, { items { uint32_value: 2 } items { uint32_value: 2 } } ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_order/unittest >> DataShardTxOrder::ImmediateBetweenOnline [GOOD] Test command err: 2025-06-25T15:01:46.495402Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:01:46.495445Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:01:46.496408Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:112:2142], Recipient [1:135:2156]: NKikimr::TEvTablet::TEvBoot 2025-06-25T15:01:46.507263Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:112:2142], Recipient [1:135:2156]: NKikimr::TEvTablet::TEvRestored 2025-06-25T15:01:46.507722Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 9437184 actor [1:135:2156] 2025-06-25T15:01:46.507956Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T15:01:46.518176Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:112:2142], Recipient [1:135:2156]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-25T15:01:46.560363Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T15:01:46.560538Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T15:01:46.561877Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 9437184 2025-06-25T15:01:46.561936Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 9437184 2025-06-25T15:01:46.561995Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 9437184 2025-06-25T15:01:46.562284Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T15:01:46.562354Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T15:01:46.562404Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 9437184 persisting started state actor id [1:204:2156] in generation 2 2025-06-25T15:01:46.622518Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T15:01:46.659354Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 9437184 2025-06-25T15:01:46.659522Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 9437184 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T15:01:46.659617Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 9437184, actorId: [1:219:2215] 2025-06-25T15:01:46.659665Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 9437184 2025-06-25T15:01:46.659710Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 9437184, state: WaitScheme 2025-06-25T15:01:46.659742Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:01:46.659928Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:135:2156], Recipient [1:135:2156]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:46.659981Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:46.660213Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 9437184 2025-06-25T15:01:46.660296Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 9437184 2025-06-25T15:01:46.660372Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-25T15:01:46.660409Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:01:46.660450Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 9437184 2025-06-25T15:01:46.660484Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437184 has no attached operations 2025-06-25T15:01:46.660527Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 9437184 2025-06-25T15:01:46.660557Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 9437184 TxInFly 0 2025-06-25T15:01:46.660592Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-25T15:01:46.660671Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:215:2212], Recipient [1:135:2156]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T15:01:46.660704Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T15:01:46.660753Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:213:2211], serverId# [1:215:2212], sessionId# [0:0:0] 2025-06-25T15:01:46.663564Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:103:2136], Recipient [1:135:2156]: NKikimrTxDataShard.TEvProposeTransaction TxKind: TX_KIND_SCHEME SourceDeprecated { RawX1: 103 RawX2: 4294969432 } TxBody: "\nI\n\006table1\020\r\032\t\n\003key\030\002 \"\032\014\n\005value\030\200$ 8\032\n\n\004uint\030\002 9(\":\010Z\006\010\000\030\000(\000J\014/Root/table1" TxId: 1 ExecLevel: 0 Flags: 0 SchemeShardId: 4200 ProcessingParams { } 2025-06-25T15:01:46.663618Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-25T15:01:46.663681Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-25T15:01:46.663846Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit CheckSchemeTx 2025-06-25T15:01:46.663912Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 9437184 txId 1 ssId 4200 seqNo 0:0 2025-06-25T15:01:46.663961Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 1 at tablet 9437184 2025-06-25T15:01:46.664011Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is ExecutedNoMoreRestarts 2025-06-25T15:01:46.664042Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit CheckSchemeTx 2025-06-25T15:01:46.664089Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit StoreSchemeTx 2025-06-25T15:01:46.664120Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit StoreSchemeTx 2025-06-25T15:01:46.664414Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayCompleteNoMoreRestarts 2025-06-25T15:01:46.664449Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit StoreSchemeTx 2025-06-25T15:01:46.664495Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit FinishPropose 2025-06-25T15:01:46.664532Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit FinishPropose 2025-06-25T15:01:46.664587Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayComplete 2025-06-25T15:01:46.664620Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit FinishPropose 2025-06-25T15:01:46.664658Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit WaitForPlan 2025-06-25T15:01:46.664685Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit WaitForPlan 2025-06-25T15:01:46.664706Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:1] at 9437184 is not ready to execute on unit WaitForPlan 2025-06-25T15:01:46.677362Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 9437184 2025-06-25T15:01:46.677420Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit StoreSchemeTx 2025-06-25T15:01:46.677458Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit FinishPropose 2025-06-25T15:01:46.677511Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 1 at tablet 9437184 send to client, exec latency: 0 ms, propose latency: 1 ms, status: PREPARED 2025-06-25T15:01:46.677565Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 9437184 not sending time cast registration request in state WaitScheme 2025-06-25T15:01:46.678040Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:225:2221], Recipient [1:135:2156]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T15:01:46.678089Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T15:01:46.678128Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:224:2220], serverId# [1:225:2221], sessionId# [0:0:0] 2025-06-25T15:01:46.678267Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287424, Sender [1:103:2136], Recipient [1:135:2156]: {TEvPlanStep step# 1000001 MediatorId# 0 TabletID 9437184} 2025-06-25T15:01:46.678298Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3150: StateWork, processing event TEvTxProcessing::TEvPlanStep 2025-06-25T15:01:46.678399Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1790: Trying to execute [1000001:1] at 9437184 on unit WaitForPlan 2025-06-25T15:01:46.678431Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1805: Execution status for [1000001:1] at 9437184 is Executed 2025-06-25T15:01:46.678464Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000001:1] at 9437184 executing on unit WaitForPlan 2025-06-25T15:01:46.678500Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000001:1] at 9437184 to execution unit PlanQueue 2025-06-25T15:01:46.681806Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 1 at step 1000001 at tablet 9437184 { Transactions { TxId: 1 AckTo { RawX1: 103 RawX2: 4294969432 } } Step: 1000001 MediatorID: 0 TabletID: 9437184 } 2025-06-25T15:01:46.681862Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:01:46.682022Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:135:2156], Recipient [1:135:2156]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:46.682065Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:46.682114Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-25T15:01:46.682146Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 1 2025-06-25T15:01:46.682174Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437184 2025-06-25T15:01:46.682207Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000001:1] in PlanQueue unit at 9437184 2025-06-25T15:01:46.682236Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [100000 ... [1000005:152] at 9437186 on unit CompletedOperations 2025-06-25T15:01:51.722012Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000005:152] at 9437186 is Executed 2025-06-25T15:01:51.722033Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000005:152] at 9437186 executing on unit CompletedOperations 2025-06-25T15:01:51.722056Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [1000005:152] at 9437186 has finished 2025-06-25T15:01:51.722085Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437186 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:01:51.722113Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437186 2025-06-25T15:01:51.722139Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437186 has no attached operations 2025-06-25T15:01:51.722180Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 9437186 2025-06-25T15:01:51.722397Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:463:2402], Recipient [1:239:2230]: {TEvReadSet step# 1000005 txid# 107 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 35} 2025-06-25T15:01:51.722431Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:01:51.722455Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 107 2025-06-25T15:01:51.722520Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:239:2230], Recipient [1:351:2316]: {TEvReadSet step# 1000005 txid# 149 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 97} 2025-06-25T15:01:51.722539Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:01:51.722561Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 149 2025-06-25T15:01:51.722647Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:463:2402], Recipient [1:239:2230]: {TEvReadSet step# 1000005 txid# 110 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 36} 2025-06-25T15:01:51.722664Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:01:51.722678Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 110 2025-06-25T15:01:51.722737Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:463:2402], Recipient [1:239:2230]: {TEvReadSet step# 1000005 txid# 113 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 37} 2025-06-25T15:01:51.722762Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:01:51.722788Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 113 2025-06-25T15:01:51.722839Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:463:2402], Recipient [1:239:2230]: {TEvReadSet step# 1000005 txid# 116 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 38} 2025-06-25T15:01:51.722860Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:01:51.722884Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 116 2025-06-25T15:01:51.722986Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:463:2402], Recipient [1:239:2230]: {TEvReadSet step# 1000005 txid# 119 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 39} 2025-06-25T15:01:51.723004Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:01:51.723020Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 119 2025-06-25T15:01:51.723076Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:463:2402], Recipient [1:239:2230]: {TEvReadSet step# 1000005 txid# 122 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 40} 2025-06-25T15:01:51.723093Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:01:51.723105Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 122 2025-06-25T15:01:51.723154Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:463:2402], Recipient [1:239:2230]: {TEvReadSet step# 1000005 txid# 125 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 41} 2025-06-25T15:01:51.723178Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:01:51.723203Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 125 2025-06-25T15:01:51.723264Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:463:2402], Recipient [1:239:2230]: {TEvReadSet step# 1000005 txid# 128 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 42} 2025-06-25T15:01:51.723278Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:01:51.723291Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 128 2025-06-25T15:01:51.723336Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:463:2402], Recipient [1:239:2230]: {TEvReadSet step# 1000005 txid# 131 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 43} 2025-06-25T15:01:51.723354Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:01:51.723370Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 131 2025-06-25T15:01:51.723423Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:463:2402], Recipient [1:239:2230]: {TEvReadSet step# 1000005 txid# 134 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 44} 2025-06-25T15:01:51.723437Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:01:51.723456Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 134 2025-06-25T15:01:51.723504Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:463:2402], Recipient [1:239:2230]: {TEvReadSet step# 1000005 txid# 137 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 45} 2025-06-25T15:01:51.723522Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:01:51.723548Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 137 2025-06-25T15:01:51.723630Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:463:2402], Recipient [1:239:2230]: {TEvReadSet step# 1000005 txid# 140 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 46} 2025-06-25T15:01:51.723649Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:01:51.723666Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 140 2025-06-25T15:01:51.723714Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:463:2402], Recipient [1:239:2230]: {TEvReadSet step# 1000005 txid# 143 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 47} 2025-06-25T15:01:51.723738Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:01:51.723761Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 143 2025-06-25T15:01:51.723823Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:463:2402], Recipient [1:239:2230]: {TEvReadSet step# 1000005 txid# 146 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 48} 2025-06-25T15:01:51.723837Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:01:51.723850Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 146 2025-06-25T15:01:51.723879Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:463:2402], Recipient [1:239:2230]: {TEvReadSet step# 1000005 txid# 149 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 49} 2025-06-25T15:01:51.723898Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:01:51.723920Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 149 2025-06-25T15:01:51.741170Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437186 2025-06-25T15:01:51.741273Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000005:152] at 9437186 on unit CompleteOperation 2025-06-25T15:01:51.741342Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000005 : 152] from 9437186 at tablet 9437186 send result to client [1:103:2136], exec latency: 1 ms, propose latency: 3 ms 2025-06-25T15:01:51.741416Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:563: Send delayed Ack RS Ack at 9437186 {TEvReadSet step# 1000005 txid# 152 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 50} 2025-06-25T15:01:51.741457Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437186 2025-06-25T15:01:51.741741Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:463:2402], Recipient [1:239:2230]: {TEvReadSet step# 1000005 txid# 152 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 50} 2025-06-25T15:01:51.741781Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:01:51.741826Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 152 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_order/unittest >> DataShardTxOrder::ReadWriteReorder [GOOD] Test command err: 2025-06-25T15:01:48.068300Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:01:48.068375Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:01:48.071630Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:112:2142], Recipient [1:135:2156]: NKikimr::TEvTablet::TEvBoot 2025-06-25T15:01:48.085747Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:112:2142], Recipient [1:135:2156]: NKikimr::TEvTablet::TEvRestored 2025-06-25T15:01:48.086328Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 9437184 actor [1:135:2156] 2025-06-25T15:01:48.086607Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T15:01:48.097156Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:112:2142], Recipient [1:135:2156]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-25T15:01:48.141017Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T15:01:48.141206Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T15:01:48.143049Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 9437184 2025-06-25T15:01:48.143122Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 9437184 2025-06-25T15:01:48.143177Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 9437184 2025-06-25T15:01:48.143520Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T15:01:48.143612Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T15:01:48.143692Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 9437184 persisting started state actor id [1:204:2156] in generation 2 2025-06-25T15:01:48.218639Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T15:01:48.249958Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 9437184 2025-06-25T15:01:48.250150Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 9437184 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T15:01:48.250258Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 9437184, actorId: [1:219:2215] 2025-06-25T15:01:48.250318Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 9437184 2025-06-25T15:01:48.250355Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 9437184, state: WaitScheme 2025-06-25T15:01:48.250394Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:01:48.250631Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:135:2156], Recipient [1:135:2156]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:48.250696Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:48.250915Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 9437184 2025-06-25T15:01:48.251013Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 9437184 2025-06-25T15:01:48.251075Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-25T15:01:48.251111Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:01:48.251155Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 9437184 2025-06-25T15:01:48.251184Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437184 has no attached operations 2025-06-25T15:01:48.251220Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 9437184 2025-06-25T15:01:48.251247Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 9437184 TxInFly 0 2025-06-25T15:01:48.251277Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-25T15:01:48.251336Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:215:2212], Recipient [1:135:2156]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T15:01:48.251362Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T15:01:48.251400Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:213:2211], serverId# [1:215:2212], sessionId# [0:0:0] 2025-06-25T15:01:48.254292Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:103:2136], Recipient [1:135:2156]: NKikimrTxDataShard.TEvProposeTransaction TxKind: TX_KIND_SCHEME SourceDeprecated { RawX1: 103 RawX2: 4294969432 } TxBody: "\nI\n\006table1\020\r\032\t\n\003key\030\002 \"\032\014\n\005value\030\200$ 8\032\n\n\004uint\030\002 9(\":\010Z\006\010\n\030\001(\000J\014/Root/table1" TxId: 1 ExecLevel: 0 Flags: 0 SchemeShardId: 4200 ProcessingParams { } 2025-06-25T15:01:48.254358Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-25T15:01:48.254438Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-25T15:01:48.254623Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit CheckSchemeTx 2025-06-25T15:01:48.254686Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 9437184 txId 1 ssId 4200 seqNo 0:0 2025-06-25T15:01:48.254733Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 1 at tablet 9437184 2025-06-25T15:01:48.254772Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is ExecutedNoMoreRestarts 2025-06-25T15:01:48.254798Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit CheckSchemeTx 2025-06-25T15:01:48.254831Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit StoreSchemeTx 2025-06-25T15:01:48.254859Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit StoreSchemeTx 2025-06-25T15:01:48.255145Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayCompleteNoMoreRestarts 2025-06-25T15:01:48.255175Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit StoreSchemeTx 2025-06-25T15:01:48.255199Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit FinishPropose 2025-06-25T15:01:48.255230Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit FinishPropose 2025-06-25T15:01:48.255277Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayComplete 2025-06-25T15:01:48.255305Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit FinishPropose 2025-06-25T15:01:48.255336Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit WaitForPlan 2025-06-25T15:01:48.255359Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit WaitForPlan 2025-06-25T15:01:48.255379Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:1] at 9437184 is not ready to execute on unit WaitForPlan 2025-06-25T15:01:48.273474Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 9437184 2025-06-25T15:01:48.273548Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit StoreSchemeTx 2025-06-25T15:01:48.273593Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit FinishPropose 2025-06-25T15:01:48.273648Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 1 at tablet 9437184 send to client, exec latency: 0 ms, propose latency: 1 ms, status: PREPARED 2025-06-25T15:01:48.273711Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 9437184 not sending time cast registration request in state WaitScheme 2025-06-25T15:01:48.274219Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:225:2221], Recipient [1:135:2156]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T15:01:48.274271Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T15:01:48.274316Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:224:2220], serverId# [1:225:2221], sessionId# [0:0:0] 2025-06-25T15:01:48.274462Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287424, Sender [1:103:2136], Recipient [1:135:2156]: {TEvPlanStep step# 1000001 MediatorId# 0 TabletID 9437184} 2025-06-25T15:01:48.274495Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3150: StateWork, processing event TEvTxProcessing::TEvPlanStep 2025-06-25T15:01:48.274627Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1790: Trying to execute [1000001:1] at 9437184 on unit WaitForPlan 2025-06-25T15:01:48.274673Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1805: Execution status for [1000001:1] at 9437184 is Executed 2025-06-25T15:01:48.291642Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000001:1] at 9437184 executing on unit WaitForPlan 2025-06-25T15:01:48.291701Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000001:1] at 9437184 to execution unit PlanQueue 2025-06-25T15:01:48.294711Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 1 at step 1000001 at tablet 9437184 { Transactions { TxId: 1 AckTo { RawX1: 103 RawX2: 4294969432 } } Step: 1000001 MediatorID: 0 TabletID: 9437184 } 2025-06-25T15:01:48.294786Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:01:48.295013Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:135:2156], Recipient [1:135:2156]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:48.295061Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:48.295113Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-25T15:01:48.295159Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 1 2025-06-25T15:01:48.295192Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437184 2025-06-25T15:01:48.295233Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000001:1] in PlanQueue unit at 9437184 2025-06-25T15:01:48.295268Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000001: ... 7185 executing on unit CompletedOperations 2025-06-25T15:01:51.777876Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [1000005:12] at 9437185 has finished 2025-06-25T15:01:51.777915Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437185 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:01:51.777953Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437185 2025-06-25T15:01:51.778001Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437185 has no attached operations 2025-06-25T15:01:51.778043Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 9437185 2025-06-25T15:01:51.778291Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:239:2230], Recipient [1:239:2230]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:51.778331Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:51.778432Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-25T15:01:51.778470Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 1 2025-06-25T15:01:51.778497Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437184 2025-06-25T15:01:51.778529Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000005:12] in PlanQueue unit at 9437184 2025-06-25T15:01:51.778569Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000005:12] at 9437184 on unit PlanQueue 2025-06-25T15:01:51.778597Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000005:12] at 9437184 is Executed 2025-06-25T15:01:51.778623Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000005:12] at 9437184 executing on unit PlanQueue 2025-06-25T15:01:51.778650Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000005:12] at 9437184 to execution unit LoadTxDetails 2025-06-25T15:01:51.778686Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000005:12] at 9437184 on unit LoadTxDetails 2025-06-25T15:01:51.779541Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 9437184 loaded tx from db 1000005:12 keys extracted: 3 2025-06-25T15:01:51.779592Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000005:12] at 9437184 is Executed 2025-06-25T15:01:51.779619Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000005:12] at 9437184 executing on unit LoadTxDetails 2025-06-25T15:01:51.779649Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000005:12] at 9437184 to execution unit FinalizeDataTxPlan 2025-06-25T15:01:51.779675Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000005:12] at 9437184 on unit FinalizeDataTxPlan 2025-06-25T15:01:51.779709Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000005:12] at 9437184 is Executed 2025-06-25T15:01:51.779732Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000005:12] at 9437184 executing on unit FinalizeDataTxPlan 2025-06-25T15:01:51.779752Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000005:12] at 9437184 to execution unit BuildAndWaitDependencies 2025-06-25T15:01:51.779777Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000005:12] at 9437184 on unit BuildAndWaitDependencies 2025-06-25T15:01:51.779836Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:455: Operation [1000005:12] is the new logically complete end at 9437184 2025-06-25T15:01:51.779873Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:461: Operation [1000005:12] is the new logically incomplete end at 9437184 2025-06-25T15:01:51.779913Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [1000005:12] at 9437184 2025-06-25T15:01:51.779950Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000005:12] at 9437184 is Executed 2025-06-25T15:01:51.779972Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000005:12] at 9437184 executing on unit BuildAndWaitDependencies 2025-06-25T15:01:51.779993Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000005:12] at 9437184 to execution unit BuildDataTxOutRS 2025-06-25T15:01:51.780015Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000005:12] at 9437184 on unit BuildDataTxOutRS 2025-06-25T15:01:51.780068Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000005:12] at 9437184 is Executed 2025-06-25T15:01:51.780087Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000005:12] at 9437184 executing on unit BuildDataTxOutRS 2025-06-25T15:01:51.780108Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000005:12] at 9437184 to execution unit StoreAndSendOutRS 2025-06-25T15:01:51.780147Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000005:12] at 9437184 on unit StoreAndSendOutRS 2025-06-25T15:01:51.780174Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000005:12] at 9437184 is Executed 2025-06-25T15:01:51.780206Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000005:12] at 9437184 executing on unit StoreAndSendOutRS 2025-06-25T15:01:51.780229Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000005:12] at 9437184 to execution unit PrepareDataTxInRS 2025-06-25T15:01:51.780252Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000005:12] at 9437184 on unit PrepareDataTxInRS 2025-06-25T15:01:51.780352Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000005:12] at 9437184 is Executed 2025-06-25T15:01:51.780380Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000005:12] at 9437184 executing on unit PrepareDataTxInRS 2025-06-25T15:01:51.780401Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000005:12] at 9437184 to execution unit LoadAndWaitInRS 2025-06-25T15:01:51.780426Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000005:12] at 9437184 on unit LoadAndWaitInRS 2025-06-25T15:01:51.780448Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000005:12] at 9437184 is Executed 2025-06-25T15:01:51.879594Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000005:12] at 9437184 executing on unit LoadAndWaitInRS 2025-06-25T15:01:51.879665Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000005:12] at 9437184 to execution unit ExecuteDataTx 2025-06-25T15:01:51.879714Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000005:12] at 9437184 on unit ExecuteDataTx 2025-06-25T15:01:51.880458Z node 1 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:306: Executed operation [1000005:12] at tablet 9437184 with status COMPLETE 2025-06-25T15:01:51.880522Z node 1 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:312: Datashard execution counters for [1000005:12] at 9437184: {NSelectRow: 3, NSelectRange: 0, NUpdateRow: 0, NEraseRow: 0, SelectRowRows: 3, SelectRowBytes: 24, SelectRangeRows: 0, SelectRangeBytes: 0, UpdateRowBytes: 0, EraseRowBytes: 0, SelectRangeDeletedRowSkips: 0, InvisibleRowSkips: 0} 2025-06-25T15:01:51.880596Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000005:12] at 9437184 is Executed 2025-06-25T15:01:51.880629Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000005:12] at 9437184 executing on unit ExecuteDataTx 2025-06-25T15:01:51.880667Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000005:12] at 9437184 to execution unit CompleteOperation 2025-06-25T15:01:51.880698Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000005:12] at 9437184 on unit CompleteOperation 2025-06-25T15:01:51.880894Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000005:12] at 9437184 is DelayComplete 2025-06-25T15:01:51.880926Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000005:12] at 9437184 executing on unit CompleteOperation 2025-06-25T15:01:51.880955Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000005:12] at 9437184 to execution unit CompletedOperations 2025-06-25T15:01:51.881009Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000005:12] at 9437184 on unit CompletedOperations 2025-06-25T15:01:51.881055Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000005:12] at 9437184 is Executed 2025-06-25T15:01:51.881077Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000005:12] at 9437184 executing on unit CompletedOperations 2025-06-25T15:01:51.881101Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [1000005:12] at 9437184 has finished 2025-06-25T15:01:51.881132Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:01:51.881400Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437184 2025-06-25T15:01:51.881433Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437184 has no attached operations 2025-06-25T15:01:51.881463Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 9437184 2025-06-25T15:01:51.895779Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:91: Sending '{TEvPlanStepAck TabletId# 9437185 step# 1000005 txid# 12} 2025-06-25T15:01:51.895855Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 9437185 step# 1000005} 2025-06-25T15:01:51.895914Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437185 2025-06-25T15:01:51.895959Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000005:12] at 9437185 on unit CompleteOperation 2025-06-25T15:01:51.896029Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000005 : 12] from 9437185 at tablet 9437185 send result to client [1:103:2136], exec latency: 0 ms, propose latency: 2 ms 2025-06-25T15:01:51.896082Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437185 2025-06-25T15:01:51.896394Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:91: Sending '{TEvPlanStepAck TabletId# 9437184 step# 1000005 txid# 12} 2025-06-25T15:01:51.896466Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 9437184 step# 1000005} 2025-06-25T15:01:51.896505Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-25T15:01:51.896539Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000005:12] at 9437184 on unit CompleteOperation 2025-06-25T15:01:51.896585Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000005 : 12] from 9437184 at tablet 9437184 send result to client [1:103:2136], exec latency: 0 ms, propose latency: 2 ms 2025-06-25T15:01:51.896615Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_order/unittest >> DataShardTxOrder::ZigZag [GOOD] Test command err: 2025-06-25T15:01:45.805825Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:01:45.805881Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:01:45.807205Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:112:2142], Recipient [1:135:2156]: NKikimr::TEvTablet::TEvBoot 2025-06-25T15:01:45.819757Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:112:2142], Recipient [1:135:2156]: NKikimr::TEvTablet::TEvRestored 2025-06-25T15:01:45.820211Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 9437184 actor [1:135:2156] 2025-06-25T15:01:45.820493Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T15:01:45.831425Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:112:2142], Recipient [1:135:2156]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-25T15:01:45.874372Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T15:01:45.874558Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T15:01:45.876232Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 9437184 2025-06-25T15:01:45.876341Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 9437184 2025-06-25T15:01:45.876393Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 9437184 2025-06-25T15:01:45.876774Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T15:01:45.876867Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T15:01:45.876933Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 9437184 persisting started state actor id [1:204:2156] in generation 2 2025-06-25T15:01:45.940915Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T15:01:45.972860Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 9437184 2025-06-25T15:01:45.973051Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 9437184 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T15:01:45.973165Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 9437184, actorId: [1:219:2215] 2025-06-25T15:01:45.973212Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 9437184 2025-06-25T15:01:45.973248Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 9437184, state: WaitScheme 2025-06-25T15:01:45.973286Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:01:45.974000Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:135:2156], Recipient [1:135:2156]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:45.974077Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:45.974333Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 9437184 2025-06-25T15:01:45.974431Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 9437184 2025-06-25T15:01:45.974496Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-25T15:01:45.974537Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:01:45.974581Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 9437184 2025-06-25T15:01:45.974615Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437184 has no attached operations 2025-06-25T15:01:45.974663Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 9437184 2025-06-25T15:01:45.974698Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 9437184 TxInFly 0 2025-06-25T15:01:45.974737Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-25T15:01:45.974828Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:215:2212], Recipient [1:135:2156]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T15:01:45.974863Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T15:01:45.974915Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:213:2211], serverId# [1:215:2212], sessionId# [0:0:0] 2025-06-25T15:01:45.977671Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:103:2136], Recipient [1:135:2156]: NKikimrTxDataShard.TEvProposeTransaction TxKind: TX_KIND_SCHEME SourceDeprecated { RawX1: 103 RawX2: 4294969432 } TxBody: "\nI\n\006table1\020\r\032\t\n\003key\030\002 \"\032\014\n\005value\030\200$ 8\032\n\n\004uint\030\002 9(\":\010Z\006\010\000\030\000(\000J\014/Root/table1" TxId: 1 ExecLevel: 0 Flags: 0 SchemeShardId: 4200 ProcessingParams { } 2025-06-25T15:01:45.977727Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-25T15:01:45.977796Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-25T15:01:45.977954Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit CheckSchemeTx 2025-06-25T15:01:45.978018Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 9437184 txId 1 ssId 4200 seqNo 0:0 2025-06-25T15:01:45.978067Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 1 at tablet 9437184 2025-06-25T15:01:45.978121Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is ExecutedNoMoreRestarts 2025-06-25T15:01:45.978156Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit CheckSchemeTx 2025-06-25T15:01:45.978188Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit StoreSchemeTx 2025-06-25T15:01:45.978221Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit StoreSchemeTx 2025-06-25T15:01:45.978513Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayCompleteNoMoreRestarts 2025-06-25T15:01:45.978564Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit StoreSchemeTx 2025-06-25T15:01:45.978610Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit FinishPropose 2025-06-25T15:01:45.978648Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit FinishPropose 2025-06-25T15:01:45.978707Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayComplete 2025-06-25T15:01:45.978743Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit FinishPropose 2025-06-25T15:01:45.978783Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit WaitForPlan 2025-06-25T15:01:45.978815Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit WaitForPlan 2025-06-25T15:01:45.978838Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:1] at 9437184 is not ready to execute on unit WaitForPlan 2025-06-25T15:01:45.994579Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 9437184 2025-06-25T15:01:45.994682Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit StoreSchemeTx 2025-06-25T15:01:45.994732Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit FinishPropose 2025-06-25T15:01:45.994797Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 1 at tablet 9437184 send to client, exec latency: 0 ms, propose latency: 1 ms, status: PREPARED 2025-06-25T15:01:45.994868Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 9437184 not sending time cast registration request in state WaitScheme 2025-06-25T15:01:45.995521Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:225:2221], Recipient [1:135:2156]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T15:01:45.995590Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T15:01:45.995670Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:224:2220], serverId# [1:225:2221], sessionId# [0:0:0] 2025-06-25T15:01:45.995879Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287424, Sender [1:103:2136], Recipient [1:135:2156]: {TEvPlanStep step# 1000001 MediatorId# 0 TabletID 9437184} 2025-06-25T15:01:45.995932Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3150: StateWork, processing event TEvTxProcessing::TEvPlanStep 2025-06-25T15:01:45.996081Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1790: Trying to execute [1000001:1] at 9437184 on unit WaitForPlan 2025-06-25T15:01:45.996141Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1805: Execution status for [1000001:1] at 9437184 is Executed 2025-06-25T15:01:45.996193Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000001:1] at 9437184 executing on unit WaitForPlan 2025-06-25T15:01:45.996250Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000001:1] at 9437184 to execution unit PlanQueue 2025-06-25T15:01:46.001211Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 1 at step 1000001 at tablet 9437184 { Transactions { TxId: 1 AckTo { RawX1: 103 RawX2: 4294969432 } } Step: 1000001 MediatorID: 0 TabletID: 9437184 } 2025-06-25T15:01:46.001294Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:01:46.001542Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:135:2156], Recipient [1:135:2156]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:46.001595Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:46.001665Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-25T15:01:46.001754Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 1 2025-06-25T15:01:46.001801Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437184 2025-06-25T15:01:46.001854Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000001:1] in PlanQueue unit at 9437184 2025-06-25T15:01:46.001904Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [100000 ... 1000016:45] at 9437184 is Executed 2025-06-25T15:01:52.995066Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000016:45] at 9437184 executing on unit WaitForPlan 2025-06-25T15:01:52.995099Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000016:45] at 9437184 to execution unit PlanQueue 2025-06-25T15:01:52.995242Z node 2 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 45 at step 1000016 at tablet 9437184 { Transactions { TxId: 45 AckTo { RawX1: 103 RawX2: 8589936728 } } Step: 1000016 MediatorID: 0 TabletID: 9437184 } 2025-06-25T15:01:52.995271Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:01:52.995432Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [2:239:2230], Recipient [2:239:2230]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:52.995465Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:52.995493Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-25T15:01:52.995518Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 1 2025-06-25T15:01:52.995536Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437184 2025-06-25T15:01:52.995556Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000016:45] in PlanQueue unit at 9437184 2025-06-25T15:01:52.995580Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000016:45] at 9437184 on unit PlanQueue 2025-06-25T15:01:52.995622Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000016:45] at 9437184 is Executed 2025-06-25T15:01:52.995641Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000016:45] at 9437184 executing on unit PlanQueue 2025-06-25T15:01:52.995675Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000016:45] at 9437184 to execution unit LoadTxDetails 2025-06-25T15:01:52.995711Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000016:45] at 9437184 on unit LoadTxDetails 2025-06-25T15:01:52.996262Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 9437184 loaded tx from db 1000016:45 keys extracted: 2 2025-06-25T15:01:52.996293Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000016:45] at 9437184 is Executed 2025-06-25T15:01:52.996416Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000016:45] at 9437184 executing on unit LoadTxDetails 2025-06-25T15:01:52.996443Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000016:45] at 9437184 to execution unit FinalizeDataTxPlan 2025-06-25T15:01:52.996481Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000016:45] at 9437184 on unit FinalizeDataTxPlan 2025-06-25T15:01:52.996518Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000016:45] at 9437184 is Executed 2025-06-25T15:01:52.996549Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000016:45] at 9437184 executing on unit FinalizeDataTxPlan 2025-06-25T15:01:52.996580Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000016:45] at 9437184 to execution unit BuildAndWaitDependencies 2025-06-25T15:01:52.996604Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000016:45] at 9437184 on unit BuildAndWaitDependencies 2025-06-25T15:01:52.996642Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:455: Operation [1000016:45] is the new logically complete end at 9437184 2025-06-25T15:01:52.996667Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:461: Operation [1000016:45] is the new logically incomplete end at 9437184 2025-06-25T15:01:52.996692Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [1000016:45] at 9437184 2025-06-25T15:01:52.996722Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000016:45] at 9437184 is Executed 2025-06-25T15:01:52.996745Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000016:45] at 9437184 executing on unit BuildAndWaitDependencies 2025-06-25T15:01:52.996766Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000016:45] at 9437184 to execution unit BuildDataTxOutRS 2025-06-25T15:01:52.996793Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000016:45] at 9437184 on unit BuildDataTxOutRS 2025-06-25T15:01:52.996833Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000016:45] at 9437184 is Executed 2025-06-25T15:01:52.996859Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000016:45] at 9437184 executing on unit BuildDataTxOutRS 2025-06-25T15:01:52.996886Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000016:45] at 9437184 to execution unit StoreAndSendOutRS 2025-06-25T15:01:52.996908Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000016:45] at 9437184 on unit StoreAndSendOutRS 2025-06-25T15:01:52.996931Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000016:45] at 9437184 is Executed 2025-06-25T15:01:52.997002Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000016:45] at 9437184 executing on unit StoreAndSendOutRS 2025-06-25T15:01:52.997024Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000016:45] at 9437184 to execution unit PrepareDataTxInRS 2025-06-25T15:01:52.997045Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000016:45] at 9437184 on unit PrepareDataTxInRS 2025-06-25T15:01:52.997069Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000016:45] at 9437184 is Executed 2025-06-25T15:01:52.997089Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000016:45] at 9437184 executing on unit PrepareDataTxInRS 2025-06-25T15:01:52.997111Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000016:45] at 9437184 to execution unit LoadAndWaitInRS 2025-06-25T15:01:52.997132Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000016:45] at 9437184 on unit LoadAndWaitInRS 2025-06-25T15:01:52.997172Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000016:45] at 9437184 is Executed 2025-06-25T15:01:52.997197Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000016:45] at 9437184 executing on unit LoadAndWaitInRS 2025-06-25T15:01:52.997219Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000016:45] at 9437184 to execution unit ExecuteDataTx 2025-06-25T15:01:52.997240Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000016:45] at 9437184 on unit ExecuteDataTx 2025-06-25T15:01:52.997518Z node 2 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:306: Executed operation [1000016:45] at tablet 9437184 with status COMPLETE 2025-06-25T15:01:52.997568Z node 2 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:312: Datashard execution counters for [1000016:45] at 9437184: {NSelectRow: 2, NSelectRange: 0, NUpdateRow: 0, NEraseRow: 0, SelectRowRows: 2, SelectRowBytes: 16, SelectRangeRows: 0, SelectRangeBytes: 0, UpdateRowBytes: 0, EraseRowBytes: 0, SelectRangeDeletedRowSkips: 0, InvisibleRowSkips: 0} 2025-06-25T15:01:52.997605Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000016:45] at 9437184 is Executed 2025-06-25T15:01:52.997629Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000016:45] at 9437184 executing on unit ExecuteDataTx 2025-06-25T15:01:52.997649Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000016:45] at 9437184 to execution unit CompleteOperation 2025-06-25T15:01:52.997664Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000016:45] at 9437184 on unit CompleteOperation 2025-06-25T15:01:52.997814Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000016:45] at 9437184 is DelayComplete 2025-06-25T15:01:52.997836Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000016:45] at 9437184 executing on unit CompleteOperation 2025-06-25T15:01:52.997858Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000016:45] at 9437184 to execution unit CompletedOperations 2025-06-25T15:01:52.997895Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000016:45] at 9437184 on unit CompletedOperations 2025-06-25T15:01:52.997923Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000016:45] at 9437184 is Executed 2025-06-25T15:01:52.997937Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000016:45] at 9437184 executing on unit CompletedOperations 2025-06-25T15:01:52.997956Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [1000016:45] at 9437184 has finished 2025-06-25T15:01:52.997984Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:01:52.998010Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437184 2025-06-25T15:01:52.998082Z node 2 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437184 has no attached operations 2025-06-25T15:01:52.998104Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 9437184 2025-06-25T15:01:53.020534Z node 2 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:91: Sending '{TEvPlanStepAck TabletId# 9437184 step# 1000016 txid# 45} 2025-06-25T15:01:53.020609Z node 2 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 9437184 step# 1000016} 2025-06-25T15:01:53.020674Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-25T15:01:53.020727Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000016:45] at 9437184 on unit CompleteOperation 2025-06-25T15:01:53.020808Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000016 : 45] from 9437184 at tablet 9437184 send result to client [2:103:2136], exec latency: 0 ms, propose latency: 2 ms 2025-06-25T15:01:53.020862Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:01:53.021338Z node 2 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:91: Sending '{TEvPlanStepAck TabletId# 9437185 step# 1000016 txid# 45} 2025-06-25T15:01:53.021378Z node 2 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 9437185 step# 1000016} 2025-06-25T15:01:53.021414Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437185 2025-06-25T15:01:53.021441Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000016:45] at 9437185 on unit CompleteOperation 2025-06-25T15:01:53.021491Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000016 : 45] from 9437185 at tablet 9437185 send result to client [2:103:2136], exec latency: 0 ms, propose latency: 2 ms 2025-06-25T15:01:53.021530Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437185 >> DataShardTxOrder::DelayData >> EncryptedBackupParamsValidationTest::EmptyImportItem >> DataShardOutOfOrder::TestImmediateQueueThenSplit-UseSink [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_order/unittest >> DataShardOutOfOrder::TestReadTableSingleShardImmediate [GOOD] Test command err: 2025-06-25T15:01:46.158044Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T15:01:46.158230Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T15:01:46.158318Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0020b4/r3tmp/tmpofvXSm/pdisk_1.dat 2025-06-25T15:01:46.562166Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T15:01:46.569982Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:01:46.628168Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:01:46.632957Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750863703257724 != 1750863703257728 2025-06-25T15:01:46.677322Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:61:2108] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-25T15:01:46.678288Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 2025-06-25T15:01:46.678833Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:01:46.678940Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:01:46.690432Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:01:46.773603Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:61:2108] Handle TEvProposeTransaction 2025-06-25T15:01:46.773662Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:61:2108] TxId# 281474976715657 ProcessProposeTransaction 2025-06-25T15:01:46.774441Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:61:2108] Cookie# 0 userReqId# "" txid# 281474976715657 SEND to# [1:602:2510] 2025-06-25T15:01:46.890525Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:602:2510] txid# 281474976715657 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateTable CreateTable { Name: "table-1" Columns { Name: "key" Type: "Uint32" FamilyName: "" NotNull: false } Columns { Name: "value" Type: "Uint32" FamilyName: "" NotNull: false } KeyColumnNames: "key" UniformPartitionsCount: 2 } } } ExecTimeoutPeriod: 18446744073709551615 2025-06-25T15:01:46.890615Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:602:2510] txid# 281474976715657 Bootstrap, UserSID: CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-25T15:01:46.891193Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1660: Actor# [1:602:2510] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-06-25T15:01:46.891315Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:602:2510] txid# 281474976715657 TEvNavigateKeySet requested from SchemeCache 2025-06-25T15:01:46.891619Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:602:2510] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-25T15:01:46.891783Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:602:2510] HANDLE EvNavigateKeySetResult, txid# 281474976715657 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-25T15:01:46.891916Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:602:2510] txid# 281474976715657 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715657 TabletId# 72057594046644480} 2025-06-25T15:01:46.892208Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:602:2510] txid# 281474976715657 HANDLE EvClientConnected 2025-06-25T15:01:46.893697Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:46.894729Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [1:602:2510] txid# 281474976715657 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715657} 2025-06-25T15:01:46.894795Z node 1 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [1:602:2510] txid# 281474976715657 SEND to# [1:554:2480] Source {TEvProposeTransactionStatus txid# 281474976715657 Status# 53} 2025-06-25T15:01:46.927029Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:618:2525], Recipient [1:633:2534]: NKikimr::TEvTablet::TEvBoot 2025-06-25T15:01:46.928213Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:619:2526], Recipient [1:635:2536]: NKikimr::TEvTablet::TEvBoot 2025-06-25T15:01:46.929231Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:618:2525], Recipient [1:633:2534]: NKikimr::TEvTablet::TEvRestored 2025-06-25T15:01:46.929626Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:633:2534] 2025-06-25T15:01:46.929857Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T15:01:46.972205Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:618:2525], Recipient [1:633:2534]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-25T15:01:46.972291Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:619:2526], Recipient [1:635:2536]: NKikimr::TEvTablet::TEvRestored 2025-06-25T15:01:46.972753Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037889 actor [1:635:2536] 2025-06-25T15:01:46.972962Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T15:01:46.980584Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:619:2526], Recipient [1:635:2536]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-25T15:01:46.981340Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T15:01:46.981507Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T15:01:46.983097Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-25T15:01:46.983183Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-25T15:01:46.983239Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-25T15:01:46.983595Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T15:01:46.983752Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T15:01:46.983828Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:666:2534] in generation 1 2025-06-25T15:01:46.984099Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T15:01:46.984176Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T15:01:46.985403Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037889 2025-06-25T15:01:46.985464Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037889 2025-06-25T15:01:46.985520Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037889 2025-06-25T15:01:46.985767Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T15:01:46.985862Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T15:01:46.985922Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037889 persisting started state actor id [1:667:2536] in generation 1 2025-06-25T15:01:46.996762Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T15:01:47.020075Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-25T15:01:47.020279Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T15:01:47.020401Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:670:2555] 2025-06-25T15:01:47.020439Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T15:01:47.020469Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-25T15:01:47.020502Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T15:01:47.020783Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:633:2534], Recipient [1:633:2534]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:47.020840Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:47.020939Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T15:01:47.020973Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037889 2025-06-25T15:01:47.021019Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037889 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T15:01:47.021090Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037889, actorId: [1:671:2556] 2025-06-25T15:01:47.021117Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037889 2025-06-25T15:01:47.021139Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037889, state: WaitScheme 2025-06-25T15:01:47.021159Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-25T15:01:47.021439Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender ... ecutedNoMoreRestarts 2025-06-25T15:01:52.925589Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:2] at 72075186224037888 executing on unit ExecuteWrite 2025-06-25T15:01:52.925625Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:2] at 72075186224037888 to execution unit FinishProposeWrite 2025-06-25T15:01:52.925655Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:2] at 72075186224037888 on unit FinishProposeWrite 2025-06-25T15:01:52.925742Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-06-25T15:01:52.925772Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:2] at 72075186224037888 executing on unit FinishProposeWrite 2025-06-25T15:01:52.925804Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:2] at 72075186224037888 to execution unit CompletedOperations 2025-06-25T15:01:52.925833Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:2] at 72075186224037888 on unit CompletedOperations 2025-06-25T15:01:52.925900Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 72075186224037888 is Executed 2025-06-25T15:01:52.925928Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:2] at 72075186224037888 executing on unit CompletedOperations 2025-06-25T15:01:52.925955Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:2] at 72075186224037888 has finished 2025-06-25T15:01:52.936641Z node 2 :TX_DATASHARD TRACE: datashard__write.cpp:150: TTxWrite complete: at tablet# 72075186224037888 2025-06-25T15:01:52.936704Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:2] at 72075186224037888 on unit FinishProposeWrite 2025-06-25T15:01:52.936744Z node 2 :TX_DATASHARD TRACE: finish_propose_write_unit.cpp:163: Propose transaction complete txid 2 at tablet 72075186224037888 send to client, propose latency: 0 ms, status: STATUS_COMPLETED 2025-06-25T15:01:52.936835Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T15:01:52.938653Z node 2 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [2:61:2108] Handle TEvProposeTransaction 2025-06-25T15:01:52.938704Z node 2 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [2:61:2108] TxId# 281474976715661 ProcessProposeTransaction 2025-06-25T15:01:52.938767Z node 2 :TX_PROXY DEBUG: proxy_impl.cpp:273: actor# [2:61:2108] Cookie# 0 userReqId# "" txid# 281474976715661 SEND to# [2:814:2644] DataReq marker# P0 2025-06-25T15:01:52.938923Z node 2 :TX_PROXY DEBUG: datareq.cpp:1330: Actor# [2:814:2644] Cookie# 0 txid# 281474976715661 HANDLE TDataReq marker# P1 2025-06-25T15:01:52.939125Z node 2 :TX_PROXY DEBUG: datareq.cpp:1467: Actor# [2:814:2644] txid# 281474976715661 HANDLE EvNavigateKeySetResult TDataReq marker# P3b ErrorCount# 0 2025-06-25T15:01:52.939297Z node 2 :TX_PROXY DEBUG: datareq.cpp:1620: Actor# [2:814:2644] txid# 281474976715661 HANDLE EvResolveKeySetResult TDataReq marker# P3 ErrorCount# 0 2025-06-25T15:01:52.939389Z node 2 :TX_PROXY DEBUG: datareq.cpp:1204: Actor# [2:814:2644] txid# 281474976715661 SEND TEvProposeTransaction to datashard 72075186224037888 with read table request affected shards 1 followers disallowed marker# P4b 2025-06-25T15:01:52.939675Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [2:814:2644], Recipient [2:627:2531]: NKikimrTxDataShard.TEvProposeTransaction TxKind: TX_KIND_SCAN SourceDeprecated { RawX1: 814 RawX2: 8589937236 } TxBody: " \0018\001B8\n\014\010\200\202\224\204\200\200\200\200\001\020\002\022\t\010\001\022\003key\030\002\022\013\010\002\022\005value\030\002\032\016\n\006\001\000\000\000\000\200\022\000\030\001 \001 \001H\001R\022\t.\003\000\000\000\000\000\000\021T\n\000\000\002\000\000\000" TxId: 281474976715661 ExecLevel: 0 Flags: 8 2025-06-25T15:01:52.939751Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-25T15:01:52.939859Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T15:01:52.940049Z node 2 :TX_DATASHARD TRACE: key_validator.cpp:33: -- AddReadRange: [(Uint32 : NULL) ; ()] table: [72057594046644480:2:0] 2025-06-25T15:01:52.940133Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715661] at 72075186224037888 on unit CheckDataTx 2025-06-25T15:01:52.940181Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715661] at 72075186224037888 is Executed 2025-06-25T15:01:52.940220Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715661] at 72075186224037888 executing on unit CheckDataTx 2025-06-25T15:01:52.940254Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715661] at 72075186224037888 to execution unit BuildAndWaitDependencies 2025-06-25T15:01:52.940285Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715661] at 72075186224037888 on unit BuildAndWaitDependencies 2025-06-25T15:01:52.940348Z node 2 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 72075186224037888 CompleteEdge# v1500/0 IncompleteEdge# v{min} UnprotectedReadEdge# v{min} ImmediateWriteEdge# v1500/18446744073709551615 ImmediateWriteEdgeReplied# v1500/18446744073709551615 2025-06-25T15:01:52.940409Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:281474976715661] at 72075186224037888 2025-06-25T15:01:52.940453Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715661] at 72075186224037888 is Executed 2025-06-25T15:01:52.940477Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715661] at 72075186224037888 executing on unit BuildAndWaitDependencies 2025-06-25T15:01:52.940497Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715661] at 72075186224037888 to execution unit MakeScanSnapshot 2025-06-25T15:01:52.940514Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715661] at 72075186224037888 on unit MakeScanSnapshot 2025-06-25T15:01:52.940537Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715661] at 72075186224037888 is Executed 2025-06-25T15:01:52.940559Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715661] at 72075186224037888 executing on unit MakeScanSnapshot 2025-06-25T15:01:52.940580Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715661] at 72075186224037888 to execution unit WaitForStreamClearance 2025-06-25T15:01:52.940600Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715661] at 72075186224037888 on unit WaitForStreamClearance 2025-06-25T15:01:52.940647Z node 2 :TX_DATASHARD TRACE: wait_for_stream_clearance_unit.cpp:99: Requested stream clearance from [2:814:2644] for [0:281474976715661] at 72075186224037888 2025-06-25T15:01:52.940680Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715661] at 72075186224037888 is Continue 2025-06-25T15:01:52.940726Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-25T15:01:52.940801Z node 2 :TX_PROXY DEBUG: datareq.cpp:2504: Got clearance request, shard: 72075186224037888, txid: 281474976715661 2025-06-25T15:01:52.940883Z node 2 :TX_PROXY DEBUG: datareq.cpp:2513: Collected all clerance requests, txid: 281474976715661 2025-06-25T15:01:52.940921Z node 2 :TX_PROXY DEBUG: datareq.cpp:2968: Send stream clearance, shard: 72075186224037888, txid: 281474976715661, cleared: 1 2025-06-25T15:01:52.941048Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287942, Sender [2:814:2644], Recipient [2:627:2531]: NKikimrTx.TEvStreamClearancePending TxId: 281474976715661 2025-06-25T15:01:52.941102Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3154: StateWork, processing event TEvTxProcessing::TEvStreamClearancePending 2025-06-25T15:01:52.941249Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287940, Sender [2:814:2644], Recipient [2:627:2531]: NKikimrTx.TEvStreamClearanceResponse TxId: 281474976715661 Cleared: true 2025-06-25T15:01:52.941298Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, processing event TEvTxProcessing::TEvStreamClearanceResponse 2025-06-25T15:01:52.941370Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [2:627:2531], Recipient [2:627:2531]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:52.941398Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:52.941454Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T15:01:52.941495Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 1 active planned 0 immediate 1 planned 0 2025-06-25T15:01:52.941540Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715661] at 72075186224037888 for WaitForStreamClearance 2025-06-25T15:01:52.941575Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715661] at 72075186224037888 on unit WaitForStreamClearance 2025-06-25T15:01:52.941610Z node 2 :TX_DATASHARD TRACE: wait_for_stream_clearance_unit.cpp:156: Got stream clearance for [0:281474976715661] at 72075186224037888 2025-06-25T15:01:52.941639Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715661] at 72075186224037888 is Executed 2025-06-25T15:01:52.941679Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715661] at 72075186224037888 executing on unit WaitForStreamClearance 2025-06-25T15:01:52.941717Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715661] at 72075186224037888 to execution unit ReadTableScan 2025-06-25T15:01:52.941747Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715661] at 72075186224037888 on unit ReadTableScan 2025-06-25T15:01:52.941937Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715661] at 72075186224037888 is Continue 2025-06-25T15:01:52.941962Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 0 immediate 1 planned 0 2025-06-25T15:01:52.941996Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 72075186224037888 2025-06-25T15:01:52.942031Z node 2 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-25T15:01:52.942062Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-25T15:01:52.942111Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T15:01:52.942517Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435082, Sender [2:820:2649], Recipient [2:627:2531]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvRegisterScanActor 2025-06-25T15:01:52.942545Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3162: StateWork, processing event TEvPrivate::TEvRegisterScanActor ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/backup_ut/unittest >> BackupPathTest::ChecksumsForSchemaMappingFiles [GOOD] Test command err: 2025-06-25T14:59:27.278312Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901760912323323:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:59:27.278400Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00149d/r3tmp/tmpspEcqZ/pdisk_1.dat 2025-06-25T14:59:27.690569Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:59:27.709509Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:59:27.709586Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:59:27.717883Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 2182, node 1 2025-06-25T14:59:27.858852Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:59:27.858871Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:59:27.858878Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:59:27.858993Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10009 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:59:28.249998Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:59:28.286016Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:59:28.714165Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:7519901760912323692:2221]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:59:28.714225Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:59:28.714276Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [1:7519901760912323692:2221], Recipient [1:7519901760912323692:2221]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:59:28.714295Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:59:29.671532Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901769502258904:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:59:29.671532Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901769502258913:2300], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:59:29.671675Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:59:29.671882Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7519901760912323549:2143] Handle TEvProposeTransaction 2025-06-25T14:59:29.671902Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:7519901760912323549:2143] TxId# 281474976715658 ProcessProposeTransaction 2025-06-25T14:59:29.671944Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:7519901760912323549:2143] Cookie# 0 userReqId# "" txid# 281474976715658 SEND to# [1:7519901769502258919:2617] 2025-06-25T14:59:29.714469Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:7519901760912323692:2221]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:59:29.714496Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:59:29.714561Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [1:7519901760912323692:2221], Recipient [1:7519901760912323692:2221]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:59:29.714572Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:59:29.744851Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:7519901769502258919:2617] txid# 281474976715658 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root/.metadata/workload_manager/pools" OperationType: ESchemeOpCreateResourcePool ModifyACL { Name: "default" DiffACL: "\n!\010\000\022\035\010\001\020\201\004\032\024all-users@well-known \003\n\031\010\000\022\025\010\001\020\201\004\032\014root@builtin \003" NewOwner: "metadata@system" } Internal: true CreateResourcePool { Name: "default" Properties { Properties { key: "concurrent_query_limit" value: "-1" } Properties { key: "database_load_cpu_threshold" value: "-1" } Properties { key: "query_cancel_after_seconds" value: "0" } Properties { key: "query_cpu_limit_percent_per_node" value: "-1" } Properties { key: "query_memory_limit_percent_per_node" value: "-1" } Properties { key: "queue_size" value: "-1" } Properties { key: "resource_weight" value: "-1" } Properties { key: "total_cpu_limit_percent_per_node" value: "-1" } } } } } UserToken: "\n\017metadata@system\022\000" DatabaseName: "/Root" 2025-06-25T14:59:29.745759Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:7519901769502258919:2617] txid# 281474976715658 Bootstrap, UserSID: metadata@system CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-25T14:59:29.745810Z node 1 :TX_PROXY DEBUG: schemereq.cpp:585: Actor# [1:7519901769502258919:2617] txid# 281474976715658 Bootstrap, UserSID: metadata@system IsClusterAdministrator: 1 2025-06-25T14:59:29.746984Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1660: Actor# [1:7519901769502258919:2617] txid# 281474976715658 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-06-25T14:59:29.747061Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:7519901769502258919:2617] txid# 281474976715658 TEvNavigateKeySet requested from SchemeCache 2025-06-25T14:59:29.747242Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:7519901769502258919:2617] txid# 281474976715658 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-25T14:59:29.747367Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:7519901769502258919:2617] HANDLE EvNavigateKeySetResult, txid# 281474976715658 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-25T14:59:29.747442Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:7519901769502258919:2617] txid# 281474976715658 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715658 TabletId# 72057594046644480} 2025-06-25T14:59:29.747608Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:7519901769502258919:2617] txid# 281474976715658 HANDLE EvClientConnected 2025-06-25T14:59:29.747662Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877761, Sender [1:7519901769502258944:2623], Recipient [1:7519901760912323692:2221]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:59:29.747686Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5052: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T14:59:29.747698Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5837: Pipe server connected, at tablet: 72057594046644480 2025-06-25T14:59:29.747733Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271122432, Sender [1:7519901769502258919:2617], Recipient [1:7519901760912323692:2221]: {TEvModifySchemeTransaction txid# 281474976715658 TabletId# 72057594046644480} 2025-06-25T14:59:29.747747Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4966: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-06-25T14:59:29.750192Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/Root" OperationType: ESchemeOpCreateResourcePool ModifyACL { Name: "default" DiffACL: "\n!\010\000\022\035\010\001\020\201\004\032\024all-users@well-known \003\n\031\010\000\022\025\010\001\020\201\004\032\014root@builtin \003" NewOwner: "metadata@system" } Internal: true CreateResourcePool { Name: ".metadata/workload_manager/pools/default" Properties { Properties { key: "concurrent_query_limit" value: "-1" } Properties { key: "database_load_cpu_threshold" value: "-1" } Properties { key: "query_cancel_after_seconds" value: "0" } Properties { key: "query_cpu_limit_percent_per_node" value: "-1" } Properties { key: "query_memory_limit_percent_per_node" value: "-1" } Properties { key: "queue_size" value: "-1" } Properties { key: "resource_weight" value: "-1" } Properties { key: "total_cpu_limit_percent_per_node" value: "-1" } } } } TxId: 281474976715658 TabletId: 72057594046644480 Owner: "metadata@system" UserToken: "***" PeerName: "" , at schemeshard: 72057594046644480 2025-06-25T14:59:29.750575Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_mkdir.cpp:115: TMkDir Propose, path: /Root/.metadata, operationId: 281474976715658:0, at schemeshard: 72057594046644480 2025-06-25T14:59:29.750692Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:347: AttachChild: child attached as only one child to the parent, parent id: ... ReadyForNotifications: 1, at schemeshard: 72057594046644480 2025-06-25T15:01:50.359749Z node 61 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:707: all shard schema changes has been received, operationId: 281474976715765:0, at schemeshard: 72057594046644480 2025-06-25T15:01:50.359763Z node 61 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:719: send schema changes ack message, operation: 281474976715765:0, datashard: 72075186224037894, at schemeshard: 72057594046644480 2025-06-25T15:01:50.359782Z node 61 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 281474976715765:0 129 -> 240 2025-06-25T15:01:50.359932Z node 61 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:116: Unable to make a bill: kind# TRestore, opId# 281474976715765:0, reason# domain is not a serverless db, domain# /Root, domainPathId# [OwnerId: 72057594046644480, LocalPathId: 1], IsDomainSchemeShard: 1, ParentDomainId: [OwnerId: 72057594046644480, LocalPathId: 1], ResourcesDomainId: [OwnerId: 72057594046644480, LocalPathId: 1] 2025-06-25T15:01:50.360152Z node 61 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-25T15:01:50.362610Z node 61 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 281474976715765:0, at schemeshard: 72057594046644480 2025-06-25T15:01:50.362638Z node 61 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T15:01:50.362655Z node 61 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:276: Activate send for 281474976715765:0 2025-06-25T15:01:50.362812Z node 61 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:632: Send to actor: [61:7519902375073405689:2412] msg type: 269552132 msg: NKikimrTxDataShard.TEvSchemaChangedResult TxId: 281474976715765 at schemeshard: 72057594046644480 2025-06-25T15:01:50.363110Z node 61 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 2146435072, Sender [61:7519902332123730234:2198], Recipient [61:7519902332123730234:2198]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-25T15:01:50.363164Z node 61 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4972: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-25T15:01:50.363261Z node 61 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 281474976715765:0, at schemeshard: 72057594046644480 2025-06-25T15:01:50.363318Z node 61 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046644480] TDone opId# 281474976715765:0 ProgressState 2025-06-25T15:01:50.363514Z node 61 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-25T15:01:50.363559Z node 61 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976715765:0 progress is 1/1 2025-06-25T15:01:50.363591Z node 61 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976715765 ready parts: 1/1 2025-06-25T15:01:50.363656Z node 61 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976715765:0 progress is 1/1 2025-06-25T15:01:50.363682Z node 61 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976715765 ready parts: 1/1 2025-06-25T15:01:50.363730Z node 61 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 281474976715765, ready parts: 1/1, is published: true 2025-06-25T15:01:50.363818Z node 61 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1656: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [61:7519902332123730234:2198] message: TxId: 281474976715765 2025-06-25T15:01:50.363886Z node 61 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976715765 ready parts: 1/1 2025-06-25T15:01:50.363940Z node 61 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976715765:0 2025-06-25T15:01:50.363963Z node 61 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 281474976715765:0 2025-06-25T15:01:50.364208Z node 61 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 20] was 3 2025-06-25T15:01:50.368379Z node 61 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T15:01:50.368498Z node 61 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:632: Send to actor: [61:7519902332123730234:2198] msg type: 271124998 msg: NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976715765 at schemeshard: 72057594046644480 2025-06-25T15:01:50.368687Z node 61 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124998, Sender [61:7519902332123730234:2198], Recipient [61:7519902332123730234:2198]: NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976715765 2025-06-25T15:01:50.368711Z node 61 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5113: StateWork, processing event TEvSchemeShard::TEvNotifyTxCompletionResult 2025-06-25T15:01:50.368732Z node 61 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6830: Handle: TEvNotifyTxCompletionResult: txId# 281474976715765 2025-06-25T15:01:50.368755Z node 61 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6832: Message: TxId: 281474976715765 2025-06-25T15:01:50.368800Z node 61 :IMPORT DEBUG: schemeshard_import__create.cpp:362: TImport::TTxProgress: DoExecute 2025-06-25T15:01:50.368817Z node 61 :IMPORT DEBUG: schemeshard_import__create.cpp:1473: TImport::TTxProgress: OnNotifyResult: txId# 281474976715765 2025-06-25T15:01:50.368980Z node 61 :IMPORT NOTICE: schemeshard_import__create.cpp:754: TImport::TTxProgress: issues during restore, cancelling, info# { Id: 281474976710672 Uid: '' Kind: S3 DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1] UserSID: '(empty maybe)' State: Waiting Issue: '' Items: 1 }, item# { Idx: 0 DstPathName: '/Root/Prefix_6/Table2' DstPathId: [OwnerId: 72057594046644480, LocalPathId: 20] State: Transferring SubState: Subscribed WaitTxId: 0 Issue: 'shard: 72057594046644480:7, error: Checksum mismatch for Prefix/Table2/data_00.csv expected# f3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855, got# e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855' } 2025-06-25T15:01:50.369048Z node 61 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_xxport__tx_base.h:63: SendNotifications: : id# 281474976710672, subscribers count# 0 2025-06-25T15:01:50.372185Z node 61 :IMPORT DEBUG: schemeshard_import__create.cpp:386: TImport::TTxProgress: DoComplete 2025-06-25T15:01:50.372296Z node 61 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877764, Sender [61:7519902375073405829:4014], Recipient [61:7519902332123730234:2198]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-25T15:01:50.372346Z node 61 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5053: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-25T15:01:50.372364Z node 61 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5885: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-25T15:01:50.376772Z node 61 :TX_PROXY DEBUG: rpc_operation_request_base.h:50: [GetImport] [61:7519902375073405841:2417] [0] Resolve database: name# /Root 2025-06-25T15:01:50.377235Z node 61 :TX_PROXY DEBUG: rpc_operation_request_base.h:66: [GetImport] [61:7519902375073405841:2417] [0] Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult: request# { ErrorCount: 0 DatabaseName: /Root DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root TableId: [72057594046644480:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-25T15:01:50.377262Z node 61 :TX_PROXY DEBUG: rpc_operation_request_base.h:106: [GetImport] [61:7519902375073405841:2417] [0] Send request: schemeShardId# 72057594046644480 2025-06-25T15:01:50.377708Z node 61 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877761, Sender [61:7519902375073405844:4027], Recipient [61:7519902332123730234:2198]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T15:01:50.377769Z node 61 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5052: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T15:01:50.377813Z node 61 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5837: Pipe server connected, at tablet: 72057594046644480 2025-06-25T15:01:50.378073Z node 61 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 275251202, Sender [61:7519902375073405841:2417], Recipient [61:7519902332123730234:2198]: NKikimrImport.TEvGetImportRequest Request { Id: 281474976710672 } DatabaseName: "/Root" 2025-06-25T15:01:50.378094Z node 61 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5068: StateWork, processing event TEvImport::TEvGetImportRequest 2025-06-25T15:01:50.378467Z node 61 :TX_PROXY DEBUG: rpc_get_operation.cpp:220: [GetImport] [61:7519902375073405841:2417] [0] Handle TEvImport::TEvGetImportResponse: record# Entry { Id: 281474976710672 Status: CANCELLED Issues { message: "shard: 72057594046644480:7, error: Checksum mismatch for Prefix/Table2/data_00.csv expected# f3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855, got# e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" severity: 1 } Progress: PROGRESS_CANCELLED ImportFromS3Settings { endpoint: "localhost:22232" scheme: HTTP bucket: "test_bucket" source_prefix: "Prefix" destination_path: "/Root/Prefix_6" } StartTime { seconds: 1750863709 } EndTime { seconds: 1750863710 } } 2025-06-25T15:01:50.379682Z node 61 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877764, Sender [61:7519902375073405844:4027], Recipient [61:7519902332123730234:2198]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-25T15:01:50.379717Z node 61 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5053: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-25T15:01:50.379737Z node 61 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5885: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-25T15:01:50.413026Z node 61 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [61:7519902332123730234:2198]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T15:01:50.413068Z node 61 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T15:01:50.413112Z node 61 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [61:7519902332123730234:2198], Recipient [61:7519902332123730234:2198]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T15:01:50.413130Z node 61 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime >> DataShardOutOfOrder::TestSnapshotReadAfterBrokenLock-EvWrite >> DataShardOutOfOrder::TestSnapshotReadPriority ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_order/unittest >> DataShardOutOfOrder::TestUnprotectedReadsThenWriteVisibility [GOOD] Test command err: 2025-06-25T15:01:48.880923Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:628:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T15:01:48.881378Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:625:2319], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T15:01:48.881513Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T15:01:48.881640Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T15:01:48.881675Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T15:01:48.881730Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001fe4/r3tmp/tmp0xL9cA/pdisk_1.dat 2025-06-25T15:01:49.251953Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:01:49.421368Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:01:49.560676Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:01:49.560818Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:01:49.564044Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:01:49.564127Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:01:49.580713Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T15:01:49.581195Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:01:49.581607Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:01:49.879373Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:49.969714Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [2:1178:2341], Recipient [2:1203:2353]: NKikimr::TEvTablet::TEvBoot 2025-06-25T15:01:49.974182Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [2:1178:2341], Recipient [2:1203:2353]: NKikimr::TEvTablet::TEvRestored 2025-06-25T15:01:49.974604Z node 2 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [2:1203:2353] 2025-06-25T15:01:49.974809Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T15:01:49.983460Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [2:1178:2341], Recipient [2:1203:2353]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-25T15:01:50.023034Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T15:01:50.023265Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T15:01:50.025059Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-25T15:01:50.025159Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-25T15:01:50.025214Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-25T15:01:50.025607Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T15:01:50.026513Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T15:01:50.026616Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [2:1227:2353] in generation 1 2025-06-25T15:01:50.029645Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T15:01:50.060137Z node 2 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-25T15:01:50.062750Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T15:01:50.062902Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [2:1231:2370] 2025-06-25T15:01:50.062944Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T15:01:50.063000Z node 2 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-25T15:01:50.063042Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T15:01:50.063318Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [2:1203:2353], Recipient [2:1203:2353]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:50.063381Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:50.063847Z node 2 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-25T15:01:50.063935Z node 2 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-25T15:01:50.064018Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T15:01:50.064072Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:01:50.064124Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-25T15:01:50.064158Z node 2 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-25T15:01:50.064192Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-25T15:01:50.064226Z node 2 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-25T15:01:50.064270Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T15:01:50.119443Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [2:1235:2371], Recipient [2:1203:2353]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T15:01:50.119519Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T15:01:50.119587Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:1187:2732], serverId# [2:1235:2371], sessionId# [0:0:0] 2025-06-25T15:01:50.119884Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:763:2428], Recipient [2:1235:2371] 2025-06-25T15:01:50.119922Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-25T15:01:50.120014Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T15:01:50.120241Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-25T15:01:50.120298Z node 2 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-25T15:01:50.120434Z node 2 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-25T15:01:50.120492Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-25T15:01:50.120562Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-06-25T15:01:50.120612Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-06-25T15:01:50.120647Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-25T15:01:50.120962Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-06-25T15:01:50.120997Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit StoreSchemeTx 2025-06-25T15:01:50.121035Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit FinishPropose 2025-06-25T15:01:50.121067Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-25T15:01:50.121118Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayComplete 2025-06-25T15:01:50.121146Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit FinishPropose 2025-06-25T15:01:50.121177Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit WaitForPlan 2025-06-25T15:01:50.121215Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit WaitForPlan 2025-06-25T15:01:50.121245Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:281474976715657] at 72075186224037888 is not ready to execute on unit WaitForPlan 2025-06-25T15:01:50.124963Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152 ... e: 2 } } 2025-06-25T15:01:52.533189Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715665. Ctx: { TraceId: 01jyksr44668nqsatb4rqr0k6j, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NjMyYThmMzQtNWViZTU4ZTMtN2M1MjRmLTY0MTU3NmQx, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:01:52.535597Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553215, Sender [2:1646:2412], Recipient [2:1203:2353]: NKikimrTxDataShard.TEvRead ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 Snapshot { Step: 2000 TxId: 18446744073709551615 } LockTxId: 281474976715662 ResultFormat: FORMAT_CELLVEC MaxRows: 1001 MaxBytes: 5242880 Reverse: false LockNodeId: 1 TotalRowsLimit: 1001 LockMode: OPTIMISTIC RangesSize: 1 2025-06-25T15:01:52.535824Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2452: TTxReadViaPipeline execute: at tablet# 72075186224037888, FollowerId 0 2025-06-25T15:01:52.535901Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:7] at 72075186224037888 on unit CheckRead 2025-06-25T15:01:52.535969Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:7] at 72075186224037888 is Executed 2025-06-25T15:01:52.535998Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:7] at 72075186224037888 executing on unit CheckRead 2025-06-25T15:01:52.536036Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:7] at 72075186224037888 to execution unit BuildAndWaitDependencies 2025-06-25T15:01:52.536069Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:7] at 72075186224037888 on unit BuildAndWaitDependencies 2025-06-25T15:01:52.536097Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:7] at 72075186224037888 2025-06-25T15:01:52.536124Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:7] at 72075186224037888 is Executed 2025-06-25T15:01:52.536142Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:7] at 72075186224037888 executing on unit BuildAndWaitDependencies 2025-06-25T15:01:52.536161Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:7] at 72075186224037888 to execution unit ExecuteRead 2025-06-25T15:01:52.536183Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:7] at 72075186224037888 on unit ExecuteRead 2025-06-25T15:01:52.536277Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1578: 72075186224037888 Execute read# 1, request: { ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 Snapshot { Step: 2000 TxId: 18446744073709551615 } LockTxId: 281474976715662 ResultFormat: FORMAT_CELLVEC MaxRows: 1001 MaxBytes: 5242880 Reverse: false LockNodeId: 1 TotalRowsLimit: 1001 LockMode: OPTIMISTIC } 2025-06-25T15:01:52.536497Z node 2 :TX_DATASHARD DEBUG: datashard__read_iterator.cpp:2427: 72075186224037888 Acquired lock# 281474976715662, counter# 18446744073709551612 for [OwnerId: 72057594046644480, LocalPathId: 2] 2025-06-25T15:01:52.536546Z node 2 :TX_DATASHARD TRACE: datashard.cpp:2476: PromoteImmediatePostExecuteEdges at 72075186224037888 promoting UnprotectedReadEdge to v2000/18446744073709551615 2025-06-25T15:01:52.536593Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2163: 72075186224037888 Complete read# {[2:1646:2412], 0} after executionsCount# 1 2025-06-25T15:01:52.536639Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2137: 72075186224037888 read iterator# {[2:1646:2412], 0} sends rowCount# 1, bytes# 32, quota rows left# 1000, quota bytes left# 5242848, hasUnreadQueries# 0, total queries# 1, firstUnprocessed# 0 2025-06-25T15:01:52.536712Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2188: 72075186224037888 read iterator# {[2:1646:2412], 0} finished in read 2025-06-25T15:01:52.536765Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:7] at 72075186224037888 is Executed 2025-06-25T15:01:52.536792Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:7] at 72075186224037888 executing on unit ExecuteRead 2025-06-25T15:01:52.536816Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:7] at 72075186224037888 to execution unit CompletedOperations 2025-06-25T15:01:52.536839Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:7] at 72075186224037888 on unit CompletedOperations 2025-06-25T15:01:52.536875Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:7] at 72075186224037888 is Executed 2025-06-25T15:01:52.536890Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:7] at 72075186224037888 executing on unit CompletedOperations 2025-06-25T15:01:52.536908Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:7] at 72075186224037888 has finished 2025-06-25T15:01:52.536935Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2687: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037888 2025-06-25T15:01:52.537004Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2736: TTxReadViaPipeline(69) Complete: at tablet# 72075186224037888 2025-06-25T15:01:52.537986Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553219, Sender [2:1646:2412], Recipient [2:1203:2353]: NKikimrTxDataShard.TEvReadCancel ReadId: 0 2025-06-25T15:01:52.538036Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3409: 72075186224037888 ReadCancel: { ReadId: 0 } { items { uint32_value: 1 } items { uint32_value: 1 } } 2025-06-25T15:01:52.667602Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715666. Ctx: { TraceId: 01jyksr496539hxwtxm2d5c22k, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NDgwNDkwMGUtZjRmN2ZhMmYtNDU3NTlkZC1iOTRkODgwYw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:01:52.669469Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553215, Sender [2:1670:2413], Recipient [2:1203:2353]: NKikimrTxDataShard.TEvRead ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 Snapshot { Step: 2500 TxId: 18446744073709551615 } LockTxId: 281474976715666 ResultFormat: FORMAT_CELLVEC MaxRows: 1001 MaxBytes: 5242880 Reverse: false LockNodeId: 1 TotalRowsLimit: 1001 LockMode: OPTIMISTIC RangesSize: 1 2025-06-25T15:01:52.669672Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2452: TTxReadViaPipeline execute: at tablet# 72075186224037888, FollowerId 0 2025-06-25T15:01:52.669732Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:8] at 72075186224037888 on unit CheckRead 2025-06-25T15:01:52.669799Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:8] at 72075186224037888 is Executed 2025-06-25T15:01:52.669838Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:8] at 72075186224037888 executing on unit CheckRead 2025-06-25T15:01:52.669875Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:8] at 72075186224037888 to execution unit BuildAndWaitDependencies 2025-06-25T15:01:52.669903Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:8] at 72075186224037888 on unit BuildAndWaitDependencies 2025-06-25T15:01:52.669941Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:8] at 72075186224037888 2025-06-25T15:01:52.669978Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:8] at 72075186224037888 is Executed 2025-06-25T15:01:52.670004Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:8] at 72075186224037888 executing on unit BuildAndWaitDependencies 2025-06-25T15:01:52.670024Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:8] at 72075186224037888 to execution unit ExecuteRead 2025-06-25T15:01:52.670043Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:8] at 72075186224037888 on unit ExecuteRead 2025-06-25T15:01:52.670145Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1578: 72075186224037888 Execute read# 1, request: { ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 Snapshot { Step: 2500 TxId: 18446744073709551615 } LockTxId: 281474976715666 ResultFormat: FORMAT_CELLVEC MaxRows: 1001 MaxBytes: 5242880 Reverse: false LockNodeId: 1 TotalRowsLimit: 1001 LockMode: OPTIMISTIC } 2025-06-25T15:01:52.670374Z node 2 :TX_DATASHARD DEBUG: datashard__read_iterator.cpp:2427: 72075186224037888 Acquired lock# 281474976715666, counter# 1 for [OwnerId: 72057594046644480, LocalPathId: 2] 2025-06-25T15:01:52.670412Z node 2 :TX_DATASHARD TRACE: datashard.cpp:2476: PromoteImmediatePostExecuteEdges at 72075186224037888 promoting UnprotectedReadEdge to v2500/18446744073709551615 2025-06-25T15:01:52.670448Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2163: 72075186224037888 Complete read# {[2:1670:2413], 0} after executionsCount# 1 2025-06-25T15:01:52.670484Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2137: 72075186224037888 read iterator# {[2:1670:2413], 0} sends rowCount# 2, bytes# 64, quota rows left# 999, quota bytes left# 5242816, hasUnreadQueries# 0, total queries# 1, firstUnprocessed# 0 2025-06-25T15:01:52.670539Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2188: 72075186224037888 read iterator# {[2:1670:2413], 0} finished in read 2025-06-25T15:01:52.670591Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:8] at 72075186224037888 is Executed 2025-06-25T15:01:52.670610Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:8] at 72075186224037888 executing on unit ExecuteRead 2025-06-25T15:01:52.670625Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:8] at 72075186224037888 to execution unit CompletedOperations 2025-06-25T15:01:52.670642Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:8] at 72075186224037888 on unit CompletedOperations 2025-06-25T15:01:52.670668Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:8] at 72075186224037888 is Executed 2025-06-25T15:01:52.670680Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:8] at 72075186224037888 executing on unit CompletedOperations 2025-06-25T15:01:52.670698Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:8] at 72075186224037888 has finished 2025-06-25T15:01:52.670722Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2687: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037888 2025-06-25T15:01:52.670795Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2736: TTxReadViaPipeline(69) Complete: at tablet# 72075186224037888 2025-06-25T15:01:52.671584Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553219, Sender [2:1670:2413], Recipient [2:1203:2353]: NKikimrTxDataShard.TEvReadCancel ReadId: 0 2025-06-25T15:01:52.671646Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3409: 72075186224037888 ReadCancel: { ReadId: 0 } 2025-06-25T15:01:52.671912Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 275709965, Sender [2:247:2133], Recipient [2:1203:2353]: NKikimrLongTxService.TEvLockStatus LockId: 281474976715666 LockNode: 1 Status: STATUS_SUBSCRIBED { items { uint32_value: 1 } items { uint32_value: 1 } }, { items { uint32_value: 2 } items { uint32_value: 2 } } >> DataShardOutOfOrder::TestShardRestartPlannedCommitShouldSucceed-EvWrite [GOOD] >> DataShardOutOfOrder::TestOutOfOrderReadOnlyAllowed-EvWrite [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_order/unittest >> DataShardOutOfOrder::TestImmediateQueueThenSplit-UseSink [GOOD] Test command err: 2025-06-25T15:01:46.167664Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T15:01:46.167813Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T15:01:46.167873Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0020b8/r3tmp/tmpxX8utj/pdisk_1.dat 2025-06-25T15:01:46.561619Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T15:01:46.571264Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:01:46.619847Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:01:46.623848Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750863703245440 != 1750863703245444 2025-06-25T15:01:46.674143Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:01:46.674261Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:01:46.689125Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:01:46.785915Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:46.830939Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvBoot 2025-06-25T15:01:46.831970Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvRestored 2025-06-25T15:01:46.832385Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:627:2531] 2025-06-25T15:01:46.832607Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T15:01:46.879133Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-25T15:01:46.879866Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T15:01:46.879982Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T15:01:46.881701Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-25T15:01:46.881787Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-25T15:01:46.881854Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-25T15:01:46.882258Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T15:01:46.882394Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T15:01:46.882482Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:643:2531] in generation 1 2025-06-25T15:01:46.893235Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T15:01:46.924354Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-25T15:01:46.924560Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T15:01:46.924681Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:645:2541] 2025-06-25T15:01:46.924717Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T15:01:46.924753Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-25T15:01:46.924790Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T15:01:46.924972Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:627:2531], Recipient [1:627:2531]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:46.925033Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:46.925429Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-25T15:01:46.925527Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-25T15:01:46.925589Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T15:01:46.925626Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:01:46.925683Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-25T15:01:46.925718Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-25T15:01:46.925750Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-25T15:01:46.925780Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-25T15:01:46.925930Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T15:01:46.926380Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:634:2535], Recipient [1:627:2531]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T15:01:46.926427Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T15:01:46.926480Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:623:2528], serverId# [1:634:2535], sessionId# [0:0:0] 2025-06-25T15:01:46.926574Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:373:2367], Recipient [1:634:2535] 2025-06-25T15:01:46.926609Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-25T15:01:46.926689Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T15:01:46.926877Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-25T15:01:46.926939Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-25T15:01:46.927051Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-25T15:01:46.927108Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-25T15:01:46.927145Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-06-25T15:01:46.927187Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-06-25T15:01:46.927223Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-25T15:01:46.927508Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-06-25T15:01:46.927541Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit StoreSchemeTx 2025-06-25T15:01:46.927575Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit FinishPropose 2025-06-25T15:01:46.927610Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-25T15:01:46.927657Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayComplete 2025-06-25T15:01:46.927688Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit FinishPropose 2025-06-25T15:01:46.927740Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit WaitForPlan 2025-06-25T15:01:46.927774Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit WaitForPlan 2025-06-25T15:01:46.927798Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:281474976715657] at 72075186224037888 is not ready to execute on unit WaitForPlan 2025-06-25T15:01:46.929146Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269746185, Sender [1:646:2542], Recipient [1:627:2531]: NKikimr::TEvTxProxySchemeCache::TEvWatchNotifyUpdated 2025-06-25T15:01:46.929197Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T15:01:46.940873Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-25T15:01:46.940948Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-25T15:01:46.940990Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-25T15:01:46.941050Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715657 at tablet 72075186224037888 send to client, exec latency: 0 ms, propose late ... /Root, SessionId: ydb://session/3?node_id=2&id=ZTkxZDNhODktZjY4OTkzYzQtYjdjMDkxYWEtNzQ4NmRhNGM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Resource usage for last stat interval: ComputeTime: 0.000000s ReadRows: 0 ReadBytes: 0 ru: 1 rate limiter was not found force flag: 1 2025-06-25T15:01:54.179446Z node 2 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:1365: ActorId: [2:982:2688] TxId: 281474976715671. Ctx: { TraceId: 01jyksr53b38xnezmyrnwhp6db, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YjgwNzhjMGUtNTc0ZWIwZC1lZWExZTkwYi0xMmE0YTY3OQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Got propose result, shard: 72075186224037888, status: ERROR, error: WRONG_SHARD_STATE (Rejecting data TxId 281474976715671 because datashard 72075186224037888: is in a pre/offline state assuming this is due to a finished split (wrong shard state)) | 2025-06-25T15:01:54.179478Z node 2 :KQP_EXECUTER ERROR: kqp_data_executer.cpp:864: ActorId: [2:982:2688] TxId: 281474976715671. Ctx: { TraceId: 01jyksr53b38xnezmyrnwhp6db, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YjgwNzhjMGUtNTc0ZWIwZC1lZWExZTkwYi0xMmE0YTY3OQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ERROR: [WRONG_SHARD_STATE] Rejecting data TxId 281474976715671 because datashard 72075186224037888: is in a pre/offline state assuming this is due to a finished split (wrong shard state); 2025-06-25T15:01:54.179520Z node 2 :KQP_EXECUTER INFO: kqp_executer_impl.h:1951: ActorId: [2:982:2688] TxId: 281474976715671. Ctx: { TraceId: 01jyksr53b38xnezmyrnwhp6db, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YjgwNzhjMGUtNTc0ZWIwZC1lZWExZTkwYi0xMmE0YTY3OQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. task: 1, does not have the CA id yet or is already complete 2025-06-25T15:01:54.179545Z node 2 :KQP_EXECUTER INFO: kqp_data_executer.cpp:2877: ActorId: [2:982:2688] TxId: 281474976715671. Ctx: { TraceId: 01jyksr53b38xnezmyrnwhp6db, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YjgwNzhjMGUtNTc0ZWIwZC1lZWExZTkwYi0xMmE0YTY3OQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Shutdown immediately - nothing to wait 2025-06-25T15:01:54.179593Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:2188: ActorId: [2:982:2688] TxId: 281474976715671. Ctx: { TraceId: 01jyksr53b38xnezmyrnwhp6db, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YjgwNzhjMGUtNTc0ZWIwZC1lZWExZTkwYi0xMmE0YTY3OQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. terminate execution. 2025-06-25T15:01:54.179819Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=YjgwNzhjMGUtNTc0ZWIwZC1lZWExZTkwYi0xMmE0YTY3OQ==, ActorId: [2:853:2688], ActorState: ExecuteState, TraceId: 01jyksr53b38xnezmyrnwhp6db, Create QueryResponse for error on request, msg: 2025-06-25T15:01:54.181210Z node 2 :KQP_EXECUTER DEBUG: kqp_table_resolver.cpp:271: TxId: 281474976715678. Resolved key sets: 0 2025-06-25T15:01:54.181710Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715678. Ctx: { TraceId: 01jyksr5392r15v1fqsw135vm5, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YTUyMzNkY2EtY2E4MDlmY2EtYWZhMzBkYzctOWRmMjQzMmM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:01:54.181749Z node 2 :KQP_EXECUTER DEBUG: kqp_planner.cpp:562: TxId: 281474976715678. Ctx: { TraceId: 01jyksr5392r15v1fqsw135vm5, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YTUyMzNkY2EtY2E4MDlmY2EtYWZhMzBkYzctOWRmMjQzMmM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Total tasks: 0, readonly: true, 0 scan tasks on 0 nodes, localComputeTasks: 0, snapshot: {0, 0} 2025-06-25T15:01:54.181787Z node 2 :KQP_EXECUTER INFO: kqp_data_executer.cpp:2806: ActorId: [2:1108:2675] TxId: 281474976715678. Ctx: { TraceId: 01jyksr5392r15v1fqsw135vm5, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YTUyMzNkY2EtY2E4MDlmY2EtYWZhMzBkYzctOWRmMjQzMmM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Total tasks: 0, readonly: 1, datashardTxs: 0, evWriteTxs: 0, topicTxs: 0, volatile: 0, immediate: 1, pending compute tasks0, useFollowers: 0 2025-06-25T15:01:54.181844Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:2188: ActorId: [2:1108:2675] TxId: 281474976715678. Ctx: { TraceId: 01jyksr5392r15v1fqsw135vm5, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YTUyMzNkY2EtY2E4MDlmY2EtYWZhMzBkYzctOWRmMjQzMmM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. terminate execution. 2025-06-25T15:01:54.181875Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:862: ActorId: [2:1108:2675] TxId: 281474976715678. Ctx: { TraceId: 01jyksr5392r15v1fqsw135vm5, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YTUyMzNkY2EtY2E4MDlmY2EtYWZhMzBkYzctOWRmMjQzMmM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Resource usage for last stat interval: ComputeTime: 0.000000s ReadRows: 0 ReadBytes: 0 ru: 1 rate limiter was not found force flag: 1 2025-06-25T15:01:54.181915Z node 2 :KQP_EXECUTER DEBUG: kqp_table_resolver.cpp:271: TxId: 281474976715679. Resolved key sets: 0 2025-06-25T15:01:54.182108Z node 2 :KQP_EXECUTER DEBUG: kqp_table_resolver.cpp:271: TxId: 281474976715680. Resolved key sets: 0 2025-06-25T15:01:54.182154Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715679. Ctx: { TraceId: 01jyksr53bakk65z23tj6x63ev, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YTc5NzY2NGQtZTgyODMyMWYtMzNiMWI1MWUtYjM4MDYwNzE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:01:54.182177Z node 2 :KQP_EXECUTER DEBUG: kqp_planner.cpp:562: TxId: 281474976715679. Ctx: { TraceId: 01jyksr53bakk65z23tj6x63ev, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YTc5NzY2NGQtZTgyODMyMWYtMzNiMWI1MWUtYjM4MDYwNzE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Total tasks: 0, readonly: true, 0 scan tasks on 0 nodes, localComputeTasks: 0, snapshot: {0, 0} 2025-06-25T15:01:54.182204Z node 2 :KQP_EXECUTER INFO: kqp_data_executer.cpp:2806: ActorId: [2:1110:2683] TxId: 281474976715679. Ctx: { TraceId: 01jyksr53bakk65z23tj6x63ev, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YTc5NzY2NGQtZTgyODMyMWYtMzNiMWI1MWUtYjM4MDYwNzE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Total tasks: 0, readonly: 1, datashardTxs: 0, evWriteTxs: 0, topicTxs: 0, volatile: 0, immediate: 1, pending compute tasks0, useFollowers: 0 2025-06-25T15:01:54.182247Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:2188: ActorId: [2:1110:2683] TxId: 281474976715679. Ctx: { TraceId: 01jyksr53bakk65z23tj6x63ev, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YTc5NzY2NGQtZTgyODMyMWYtMzNiMWI1MWUtYjM4MDYwNzE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. terminate execution. 2025-06-25T15:01:54.182275Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:862: ActorId: [2:1110:2683] TxId: 281474976715679. Ctx: { TraceId: 01jyksr53bakk65z23tj6x63ev, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YTc5NzY2NGQtZTgyODMyMWYtMzNiMWI1MWUtYjM4MDYwNzE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Resource usage for last stat interval: ComputeTime: 0.000000s ReadRows: 0 ReadBytes: 0 ru: 1 rate limiter was not found force flag: 1 2025-06-25T15:01:54.182371Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715680. Ctx: { TraceId: 01jyksr53ae27yhr6xsdz2fz9s, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZWI3MzVhMjYtM2EwZDIyYzMtZGEzNWQ1YzAtODQxMDAxZGU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:01:54.182397Z node 2 :KQP_EXECUTER DEBUG: kqp_planner.cpp:562: TxId: 281474976715680. Ctx: { TraceId: 01jyksr53ae27yhr6xsdz2fz9s, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZWI3MzVhMjYtM2EwZDIyYzMtZGEzNWQ1YzAtODQxMDAxZGU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Total tasks: 0, readonly: true, 0 scan tasks on 0 nodes, localComputeTasks: 0, snapshot: {0, 0} 2025-06-25T15:01:54.182428Z node 2 :KQP_EXECUTER INFO: kqp_data_executer.cpp:2806: ActorId: [2:1113:2678] TxId: 281474976715680. Ctx: { TraceId: 01jyksr53ae27yhr6xsdz2fz9s, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZWI3MzVhMjYtM2EwZDIyYzMtZGEzNWQ1YzAtODQxMDAxZGU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Total tasks: 0, readonly: 1, datashardTxs: 0, evWriteTxs: 0, topicTxs: 0, volatile: 0, immediate: 1, pending compute tasks0, useFollowers: 0 2025-06-25T15:01:54.182462Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:2188: ActorId: [2:1113:2678] TxId: 281474976715680. Ctx: { TraceId: 01jyksr53ae27yhr6xsdz2fz9s, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZWI3MzVhMjYtM2EwZDIyYzMtZGEzNWQ1YzAtODQxMDAxZGU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. terminate execution. 2025-06-25T15:01:54.182505Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:862: ActorId: [2:1113:2678] TxId: 281474976715680. Ctx: { TraceId: 01jyksr53ae27yhr6xsdz2fz9s, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZWI3MzVhMjYtM2EwZDIyYzMtZGEzNWQ1YzAtODQxMDAxZGU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Resource usage for last stat interval: ComputeTime: 0.000000s ReadRows: 0 ReadBytes: 0 ru: 1 rate limiter was not found force flag: 1 2025-06-25T15:01:54.182784Z node 2 :KQP_EXECUTER DEBUG: kqp_table_resolver.cpp:271: TxId: 281474976715681. Resolved key sets: 0 2025-06-25T15:01:54.183230Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715681. Ctx: { TraceId: 01jyksr53b38xnezmyrnwhp6db, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YjgwNzhjMGUtNTc0ZWIwZC1lZWExZTkwYi0xMmE0YTY3OQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:01:54.183262Z node 2 :KQP_EXECUTER DEBUG: kqp_planner.cpp:562: TxId: 281474976715681. Ctx: { TraceId: 01jyksr53b38xnezmyrnwhp6db, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YjgwNzhjMGUtNTc0ZWIwZC1lZWExZTkwYi0xMmE0YTY3OQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Total tasks: 0, readonly: true, 0 scan tasks on 0 nodes, localComputeTasks: 0, snapshot: {0, 0} 2025-06-25T15:01:54.183294Z node 2 :KQP_EXECUTER INFO: kqp_data_executer.cpp:2806: ActorId: [2:1118:2688] TxId: 281474976715681. Ctx: { TraceId: 01jyksr53b38xnezmyrnwhp6db, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YjgwNzhjMGUtNTc0ZWIwZC1lZWExZTkwYi0xMmE0YTY3OQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Total tasks: 0, readonly: 1, datashardTxs: 0, evWriteTxs: 0, topicTxs: 0, volatile: 0, immediate: 1, pending compute tasks0, useFollowers: 0 2025-06-25T15:01:54.183336Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:2188: ActorId: [2:1118:2688] TxId: 281474976715681. Ctx: { TraceId: 01jyksr53b38xnezmyrnwhp6db, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YjgwNzhjMGUtNTc0ZWIwZC1lZWExZTkwYi0xMmE0YTY3OQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. terminate execution. 2025-06-25T15:01:54.183372Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:862: ActorId: [2:1118:2688] TxId: 281474976715681. Ctx: { TraceId: 01jyksr53b38xnezmyrnwhp6db, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YjgwNzhjMGUtNTc0ZWIwZC1lZWExZTkwYi0xMmE0YTY3OQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Resource usage for last stat interval: ComputeTime: 0.000000s ReadRows: 0 ReadBytes: 0 ru: 1 rate limiter was not found force flag: 1 >> DataShardOutOfOrder::TestPlannedHalfOverloadedSplit-UseSink [GOOD] >> DataShardTxOrder::ImmediateBetweenOnline_Init >> DataShardOutOfOrder::TestOutOfOrderNonConflictingWrites-EvWrite [GOOD] >> DataShardOutOfOrder::TestShardRestartNoUndeterminedImmediate [GOOD] >> DataShardOutOfOrder::TestShardRestartDuringWaitingRead >> BasicUsage::TSimpleWriteSession_AutoSeqNo_BasicUsage [GOOD] >> BasicUsage::TWriteSession_AutoBatching [GOOD] >> BasicUsage::TWriteSession_BatchingProducesContinueTokens [GOOD] >> BasicUsage::BrokenCredentialsProvider ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_order/unittest >> DataShardOutOfOrder::TestShardRestartPlannedCommitShouldSucceed-EvWrite [GOOD] Test command err: 2025-06-25T15:01:46.219117Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T15:01:46.219276Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T15:01:46.219334Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001fcb/r3tmp/tmp8N1WmI/pdisk_1.dat 2025-06-25T15:01:46.561052Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T15:01:46.572204Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:01:46.629141Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:01:46.633359Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750863703245443 != 1750863703245447 2025-06-25T15:01:46.679072Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:01:46.679202Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:01:46.693013Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:01:46.780450Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:47.146096Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:47.268857Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; ===== UPSERT initial rows 2025-06-25T15:01:47.450620Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:782:2634], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:47.450693Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:792:2639], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:47.450738Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:47.459753Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:01:47.619487Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:796:2642], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-25T15:01:47.688037Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:852:2679] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:01:48.197759Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jyksqz9s0nwkjt69md810e6f, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NTIxYTc2M2YtZDYyNjM3ODEtZGQwZWQ2MGUtOWRhZjNiMTE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:01:48.301063Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jyksr02g6n4y8nskjm9mj7ax, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YWU1YzdhNGUtNmRjYWU4ZjQtYTJmNGMyNTgtYzNmZWM5NmU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root ===== Begin SELECT 2025-06-25T15:01:48.787350Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715663. Ctx: { TraceId: 01jyksr05p915pp7p9zvzwwqdq, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MTNjZjdiZjEtM2I5ZDNhOWItZGU4Mzc2N2ItYWE5NzllYzc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root { items { uint32_value: 1 } items { uint32_value: 1 } }, { items { uint32_value: 2 } items { uint32_value: 1 } } ===== UPSERT and commit ... waiting for commit read sets 2025-06-25T15:01:48.912823Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715664. Ctx: { TraceId: 01jyksr0n3589xfw5850e6h5yt, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MTNjZjdiZjEtM2I5ZDNhOWItZGU4Mzc2N2ItYWE5NzllYzc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root ... captured readset ... captured readset ===== restarting tablet 2025-06-25T15:01:49.097245Z node 1 :KQP_COMPUTE WARN: kqp_write_actor.cpp:1103: SelfId: [1:979:2720], Table: `/Root/table-1` ([72057594046644480:2:1]), SessionActorId: [1:916:2720]TEvDeliveryProblem was received from tablet: 72075186224037888 ===== Waiting for commit response ===== Last SELECT 2025-06-25T15:01:49.389743Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715665. Ctx: { TraceId: 01jyksr11c586spmq4e295ewas, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjYzODZjNjUtYWI1MDI4ZTktYWEyZDliY2QtYmU4YTNmNWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root { items { uint32_value: 3 } items { uint32_value: 2 } } 2025-06-25T15:01:52.672168Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:265:2309], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T15:01:52.672289Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T15:01:52.672427Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001fcb/r3tmp/tmpUQPG9h/pdisk_1.dat 2025-06-25T15:01:52.917170Z node 2 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 2 Type# 268639257 2025-06-25T15:01:52.918800Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:01:52.944210Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:01:52.945112Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:33:2080] 1750863709909110 != 1750863709909114 2025-06-25T15:01:52.991594Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:01:52.991715Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:01:53.003952Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:01:53.091522Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:53.366707Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:53.479741Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; ===== UPSERT initial rows 2025-06-25T15:01:53.684587Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:782:2634], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:53.684670Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:792:2639], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:53.684737Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:53.689406Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:01:53.843076Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:796:2642], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-25T15:01:53.881993Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:852:2679] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:01:53.958066Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jyksr5cjb4j3n0snmhghcry3, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZjVkYTY0ZmEtYzYzZDQ5MGUtZGYyYmQ3ZTEtNGY0ZTc4MmM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:01:54.048254Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jyksr5nt6mz8bj5pjdh7tzbp, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=OTljYjAwLTY5MDRjZTg2LTc4OTA5ZDkxLWE5OGE5Yzlk, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root ===== Begin SELECT 2025-06-25T15:01:54.383700Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715663. Ctx: { TraceId: 01jyksr5rm1hvn934pwfah7y6t, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=Y2MxZDVjOGQtYmNlNGRhMWEtMTMzNDZmYjItYTA3MzcwYzA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root { items { uint32_value: 1 } items { uint32_value: 1 } }, { items { uint32_value: 2 } items { uint32_value: 1 } } ===== UPSERT and commit ... waiting for commit read sets 2025-06-25T15:01:54.513635Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715664. Ctx: { TraceId: 01jyksr62xeedzy2f9yaqgbm67, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=Y2MxZDVjOGQtYmNlNGRhMWEtMTMzNDZmYjItYTA3MzcwYzA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root ... captured readset ... captured readset ===== restarting tablet ===== Waiting for commit response ===== Last SELECT 2025-06-25T15:01:54.955575Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715665. Ctx: { TraceId: 01jyksr6fx4q06bazxtqwyrf3m, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=OTU4YjhkZGMtMTZkYzNlYTctZTM5YjM3OTctNDhkNmE4YTY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root { items { uint32_value: 3 } items { uint32_value: 2 } } ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_order/unittest >> DataShardOutOfOrder::TestOutOfOrderReadOnlyAllowed-EvWrite [GOOD] Test command err: 2025-06-25T15:01:46.171416Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T15:01:46.171517Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T15:01:46.171550Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001ff8/r3tmp/tmpSRAkBc/pdisk_1.dat 2025-06-25T15:01:46.566093Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T15:01:46.575387Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:01:46.630791Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:01:46.635334Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750863703245578 != 1750863703245582 2025-06-25T15:01:46.684097Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:01:46.684294Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:01:46.696880Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:01:46.785049Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:46.831944Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvBoot 2025-06-25T15:01:46.833010Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvRestored 2025-06-25T15:01:46.833418Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:627:2531] 2025-06-25T15:01:46.833666Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T15:01:46.876334Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-25T15:01:46.877147Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T15:01:46.877272Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T15:01:46.879135Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-25T15:01:46.879231Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-25T15:01:46.879301Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-25T15:01:46.879690Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T15:01:46.879831Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T15:01:46.879913Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:643:2531] in generation 1 2025-06-25T15:01:46.891215Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T15:01:46.933276Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-25T15:01:46.933504Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T15:01:46.933608Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:645:2541] 2025-06-25T15:01:46.933644Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T15:01:46.933680Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-25T15:01:46.933728Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T15:01:46.934026Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:627:2531], Recipient [1:627:2531]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:46.934097Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:46.934620Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-25T15:01:46.934715Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-25T15:01:46.934785Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T15:01:46.934824Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:01:46.934874Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-25T15:01:46.934912Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-25T15:01:46.934945Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-25T15:01:46.934980Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-25T15:01:46.935044Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T15:01:46.935532Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:634:2535], Recipient [1:627:2531]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T15:01:46.935584Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T15:01:46.935641Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:623:2528], serverId# [1:634:2535], sessionId# [0:0:0] 2025-06-25T15:01:46.935751Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:373:2367], Recipient [1:634:2535] 2025-06-25T15:01:46.935789Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-25T15:01:46.935890Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T15:01:46.936110Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-25T15:01:46.936171Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-25T15:01:46.936260Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-25T15:01:46.936367Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-25T15:01:46.936412Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-06-25T15:01:46.936453Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-06-25T15:01:46.936497Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-25T15:01:46.936816Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-06-25T15:01:46.936861Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit StoreSchemeTx 2025-06-25T15:01:46.936894Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit FinishPropose 2025-06-25T15:01:46.936931Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-25T15:01:46.936999Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayComplete 2025-06-25T15:01:46.937031Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit FinishPropose 2025-06-25T15:01:46.937062Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit WaitForPlan 2025-06-25T15:01:46.937109Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit WaitForPlan 2025-06-25T15:01:46.937139Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:281474976715657] at 72075186224037888 is not ready to execute on unit WaitForPlan 2025-06-25T15:01:46.940228Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269746185, Sender [1:646:2542], Recipient [1:627:2531]: NKikimr::TEvTxProxySchemeCache::TEvWatchNotifyUpdated 2025-06-25T15:01:46.940279Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T15:01:46.951860Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-25T15:01:46.951937Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-25T15:01:46.951982Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-25T15:01:46.952029Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715657 at tablet 72075186224037888 send to client, exec latency: 0 ms, propose late ... s: 1750863715142 CreateTimeMs: 1750863715092 UpdateTimeMs: 1750863715143 } MaxMemoryUsage: 1048576 } 2025-06-25T15:01:55.145074Z node 2 :KQP_EXECUTER INFO: kqp_planner.cpp:697: TxId: 281474976715665. Ctx: { TraceId: 01jyksr6cm7217feb49cqmzvqt, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=Yzk5M2QyOWYtOThmNzgyNTUtODdkNDhjYzMtZDM2OTM3ZTE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Compute actor has finished execution: [2:993:2789] 2025-06-25T15:01:55.145126Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:668: ActorId: [2:986:2771] TxId: 281474976715665. Ctx: { TraceId: 01jyksr6cm7217feb49cqmzvqt, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=Yzk5M2QyOWYtOThmNzgyNTUtODdkNDhjYzMtZDM2OTM3ZTE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Waiting for: CA [2:996:2792], CA [2:997:2793], CA [2:994:2790], CA [2:998:2794], CA [2:995:2791], 2025-06-25T15:01:55.145165Z node 2 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:156: ActorId: [2:986:2771] TxId: 281474976715665. Ctx: { TraceId: 01jyksr6cm7217feb49cqmzvqt, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=Yzk5M2QyOWYtOThmNzgyNTUtODdkNDhjYzMtZDM2OTM3ZTE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, waiting for 5 compute actor(s) and 0 datashard(s): CA [2:996:2792], CA [2:997:2793], CA [2:994:2790], CA [2:998:2794], CA [2:995:2791], 2025-06-25T15:01:55.145609Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:442: ActorId: [2:986:2771] TxId: 281474976715665. Ctx: { TraceId: 01jyksr6cm7217feb49cqmzvqt, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=Yzk5M2QyOWYtOThmNzgyNTUtODdkNDhjYzMtZDM2OTM3ZTE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [2:994:2790], task: 2, state: COMPUTE_STATE_FINISHED, stats: { CpuTimeUs: 616 DurationUs: 1000 Tasks { TaskId: 2 StageId: 1 CpuTimeUs: 384 FinishTimeMs: 1750863715143 InputRows: 1 InputBytes: 5 OutputRows: 1 OutputBytes: 5 ComputeCpuTimeUs: 339 BuildCpuTimeUs: 45 HostName: "ghrun-kqfvx6aroe" NodeId: 2 StartTimeMs: 1750863715142 CreateTimeMs: 1750863715092 UpdateTimeMs: 1750863715143 } MaxMemoryUsage: 1048576 } 2025-06-25T15:01:55.145670Z node 2 :KQP_EXECUTER INFO: kqp_planner.cpp:697: TxId: 281474976715665. Ctx: { TraceId: 01jyksr6cm7217feb49cqmzvqt, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=Yzk5M2QyOWYtOThmNzgyNTUtODdkNDhjYzMtZDM2OTM3ZTE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Compute actor has finished execution: [2:994:2790] 2025-06-25T15:01:55.145712Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:668: ActorId: [2:986:2771] TxId: 281474976715665. Ctx: { TraceId: 01jyksr6cm7217feb49cqmzvqt, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=Yzk5M2QyOWYtOThmNzgyNTUtODdkNDhjYzMtZDM2OTM3ZTE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Waiting for: CA [2:996:2792], CA [2:997:2793], CA [2:998:2794], CA [2:995:2791], 2025-06-25T15:01:55.145748Z node 2 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:156: ActorId: [2:986:2771] TxId: 281474976715665. Ctx: { TraceId: 01jyksr6cm7217feb49cqmzvqt, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=Yzk5M2QyOWYtOThmNzgyNTUtODdkNDhjYzMtZDM2OTM3ZTE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, waiting for 4 compute actor(s) and 0 datashard(s): CA [2:996:2792], CA [2:997:2793], CA [2:998:2794], CA [2:995:2791], 2025-06-25T15:01:55.145978Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:442: ActorId: [2:986:2771] TxId: 281474976715665. Ctx: { TraceId: 01jyksr6cm7217feb49cqmzvqt, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=Yzk5M2QyOWYtOThmNzgyNTUtODdkNDhjYzMtZDM2OTM3ZTE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [2:995:2791], task: 4, state: COMPUTE_STATE_FINISHED, stats: { CpuTimeUs: 424 DurationUs: 2000 Tasks { TaskId: 4 StageId: 3 CpuTimeUs: 230 FinishTimeMs: 1750863715144 InputRows: 1 InputBytes: 5 OutputRows: 1 OutputBytes: 5 ComputeCpuTimeUs: 201 BuildCpuTimeUs: 29 HostName: "ghrun-kqfvx6aroe" NodeId: 2 StartTimeMs: 1750863715142 CreateTimeMs: 1750863715092 UpdateTimeMs: 1750863715144 } MaxMemoryUsage: 1048576 } 2025-06-25T15:01:55.146037Z node 2 :KQP_EXECUTER INFO: kqp_planner.cpp:697: TxId: 281474976715665. Ctx: { TraceId: 01jyksr6cm7217feb49cqmzvqt, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=Yzk5M2QyOWYtOThmNzgyNTUtODdkNDhjYzMtZDM2OTM3ZTE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Compute actor has finished execution: [2:995:2791] 2025-06-25T15:01:55.146076Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:668: ActorId: [2:986:2771] TxId: 281474976715665. Ctx: { TraceId: 01jyksr6cm7217feb49cqmzvqt, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=Yzk5M2QyOWYtOThmNzgyNTUtODdkNDhjYzMtZDM2OTM3ZTE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Waiting for: CA [2:996:2792], CA [2:997:2793], CA [2:998:2794], 2025-06-25T15:01:55.146109Z node 2 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:156: ActorId: [2:986:2771] TxId: 281474976715665. Ctx: { TraceId: 01jyksr6cm7217feb49cqmzvqt, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=Yzk5M2QyOWYtOThmNzgyNTUtODdkNDhjYzMtZDM2OTM3ZTE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, waiting for 3 compute actor(s) and 0 datashard(s): CA [2:996:2792], CA [2:997:2793], CA [2:998:2794], 2025-06-25T15:01:55.146210Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:442: ActorId: [2:986:2771] TxId: 281474976715665. Ctx: { TraceId: 01jyksr6cm7217feb49cqmzvqt, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=Yzk5M2QyOWYtOThmNzgyNTUtODdkNDhjYzMtZDM2OTM3ZTE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [2:996:2792], task: 5, state: COMPUTE_STATE_FINISHED, stats: { CpuTimeUs: 582 DurationUs: 2000 Tasks { TaskId: 5 StageId: 4 CpuTimeUs: 351 FinishTimeMs: 1750863715145 InputRows: 2 InputBytes: 10 OutputRows: 2 OutputBytes: 7 ComputeCpuTimeUs: 300 BuildCpuTimeUs: 51 HostName: "ghrun-kqfvx6aroe" NodeId: 2 StartTimeMs: 1750863715143 CreateTimeMs: 1750863715092 UpdateTimeMs: 1750863715145 } MaxMemoryUsage: 1048576 } 2025-06-25T15:01:55.146262Z node 2 :KQP_EXECUTER INFO: kqp_planner.cpp:697: TxId: 281474976715665. Ctx: { TraceId: 01jyksr6cm7217feb49cqmzvqt, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=Yzk5M2QyOWYtOThmNzgyNTUtODdkNDhjYzMtZDM2OTM3ZTE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Compute actor has finished execution: [2:996:2792] 2025-06-25T15:01:55.146293Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:668: ActorId: [2:986:2771] TxId: 281474976715665. Ctx: { TraceId: 01jyksr6cm7217feb49cqmzvqt, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=Yzk5M2QyOWYtOThmNzgyNTUtODdkNDhjYzMtZDM2OTM3ZTE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Waiting for: CA [2:997:2793], CA [2:998:2794], 2025-06-25T15:01:55.146327Z node 2 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:156: ActorId: [2:986:2771] TxId: 281474976715665. Ctx: { TraceId: 01jyksr6cm7217feb49cqmzvqt, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=Yzk5M2QyOWYtOThmNzgyNTUtODdkNDhjYzMtZDM2OTM3ZTE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, waiting for 2 compute actor(s) and 0 datashard(s): CA [2:997:2793], CA [2:998:2794], 2025-06-25T15:01:55.146538Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:442: ActorId: [2:986:2771] TxId: 281474976715665. Ctx: { TraceId: 01jyksr6cm7217feb49cqmzvqt, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=Yzk5M2QyOWYtOThmNzgyNTUtODdkNDhjYzMtZDM2OTM3ZTE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [2:997:2793], task: 6, state: COMPUTE_STATE_FINISHED, stats: { CpuTimeUs: 396 Tasks { TaskId: 6 StageId: 5 CpuTimeUs: 174 FinishTimeMs: 1750863715145 InputRows: 2 InputBytes: 7 OutputRows: 2 OutputBytes: 7 ComputeCpuTimeUs: 106 BuildCpuTimeUs: 68 HostName: "ghrun-kqfvx6aroe" NodeId: 2 CreateTimeMs: 1750863715092 UpdateTimeMs: 1750863715145 } MaxMemoryUsage: 1048576 } 2025-06-25T15:01:55.146604Z node 2 :KQP_EXECUTER INFO: kqp_planner.cpp:697: TxId: 281474976715665. Ctx: { TraceId: 01jyksr6cm7217feb49cqmzvqt, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=Yzk5M2QyOWYtOThmNzgyNTUtODdkNDhjYzMtZDM2OTM3ZTE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Compute actor has finished execution: [2:997:2793] 2025-06-25T15:01:55.146644Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:668: ActorId: [2:986:2771] TxId: 281474976715665. Ctx: { TraceId: 01jyksr6cm7217feb49cqmzvqt, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=Yzk5M2QyOWYtOThmNzgyNTUtODdkNDhjYzMtZDM2OTM3ZTE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Waiting for: CA [2:998:2794], 2025-06-25T15:01:55.146674Z node 2 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:156: ActorId: [2:986:2771] TxId: 281474976715665. Ctx: { TraceId: 01jyksr6cm7217feb49cqmzvqt, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=Yzk5M2QyOWYtOThmNzgyNTUtODdkNDhjYzMtZDM2OTM3ZTE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, waiting for 1 compute actor(s) and 0 datashard(s): CA [2:998:2794], 2025-06-25T15:01:55.146915Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:442: ActorId: [2:986:2771] TxId: 281474976715665. Ctx: { TraceId: 01jyksr6cm7217feb49cqmzvqt, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=Yzk5M2QyOWYtOThmNzgyNTUtODdkNDhjYzMtZDM2OTM3ZTE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [2:998:2794], task: 7, state: COMPUTE_STATE_FINISHED, stats: { CpuTimeUs: 370 DurationUs: 1000 Tasks { TaskId: 7 StageId: 6 CpuTimeUs: 171 FinishTimeMs: 1750863715146 InputRows: 2 InputBytes: 7 OutputRows: 2 OutputBytes: 7 ResultRows: 2 ResultBytes: 7 ComputeCpuTimeUs: 123 BuildCpuTimeUs: 48 HostName: "ghrun-kqfvx6aroe" NodeId: 2 StartTimeMs: 1750863715145 CreateTimeMs: 1750863715093 UpdateTimeMs: 1750863715146 } MaxMemoryUsage: 1048576 } 2025-06-25T15:01:55.146979Z node 2 :KQP_EXECUTER INFO: kqp_planner.cpp:697: TxId: 281474976715665. Ctx: { TraceId: 01jyksr6cm7217feb49cqmzvqt, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=Yzk5M2QyOWYtOThmNzgyNTUtODdkNDhjYzMtZDM2OTM3ZTE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Compute actor has finished execution: [2:998:2794] 2025-06-25T15:01:55.147136Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:2188: ActorId: [2:986:2771] TxId: 281474976715665. Ctx: { TraceId: 01jyksr6cm7217feb49cqmzvqt, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=Yzk5M2QyOWYtOThmNzgyNTUtODdkNDhjYzMtZDM2OTM3ZTE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. terminate execution. 2025-06-25T15:01:55.147194Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:862: ActorId: [2:986:2771] TxId: 281474976715665. Ctx: { TraceId: 01jyksr6cm7217feb49cqmzvqt, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=Yzk5M2QyOWYtOThmNzgyNTUtODdkNDhjYzMtZDM2OTM3ZTE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Resource usage for last stat interval: ComputeTime: 0.003797s ReadRows: 2 ReadBytes: 16 ru: 2 rate limiter was not found force flag: 1 { items { uint32_value: 3 } items { uint32_value: 2 } }, { items { uint32_value: 4 } items { uint32_value: 2 } } ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_order/unittest >> DataShardOutOfOrder::TestPlannedHalfOverloadedSplit-UseSink [GOOD] Test command err: 2025-06-25T15:01:46.214512Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T15:01:46.214680Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T15:01:46.214770Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001ff0/r3tmp/tmphlnTfa/pdisk_1.dat 2025-06-25T15:01:46.561945Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T15:01:46.570057Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:01:46.619387Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:01:46.624153Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750863703245528 != 1750863703245532 2025-06-25T15:01:46.674125Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:01:46.674252Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:01:46.689146Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:01:46.780447Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:46.825016Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvBoot 2025-06-25T15:01:46.826061Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvRestored 2025-06-25T15:01:46.826492Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:627:2531] 2025-06-25T15:01:46.826718Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T15:01:46.872364Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-25T15:01:46.873191Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T15:01:46.873348Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T15:01:46.875171Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-25T15:01:46.875253Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-25T15:01:46.875316Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-25T15:01:46.875712Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T15:01:46.875866Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T15:01:46.875946Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:643:2531] in generation 1 2025-06-25T15:01:46.886848Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T15:01:46.921857Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-25T15:01:46.922062Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T15:01:46.922162Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:645:2541] 2025-06-25T15:01:46.922245Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T15:01:46.922296Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-25T15:01:46.922349Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T15:01:46.922572Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:627:2531], Recipient [1:627:2531]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:46.922631Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:46.923028Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-25T15:01:46.923139Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-25T15:01:46.923204Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T15:01:46.923240Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:01:46.923291Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-25T15:01:46.923343Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-25T15:01:46.923380Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-25T15:01:46.923421Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-25T15:01:46.923470Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T15:01:46.923902Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:634:2535], Recipient [1:627:2531]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T15:01:46.923950Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T15:01:46.923992Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:623:2528], serverId# [1:634:2535], sessionId# [0:0:0] 2025-06-25T15:01:46.924088Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:373:2367], Recipient [1:634:2535] 2025-06-25T15:01:46.924133Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-25T15:01:46.924226Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T15:01:46.924716Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-25T15:01:46.924792Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-25T15:01:46.924876Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-25T15:01:46.924946Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-25T15:01:46.924987Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-06-25T15:01:46.925049Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-06-25T15:01:46.925092Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-25T15:01:46.925373Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-06-25T15:01:46.925414Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit StoreSchemeTx 2025-06-25T15:01:46.925446Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit FinishPropose 2025-06-25T15:01:46.925482Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-25T15:01:46.925551Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayComplete 2025-06-25T15:01:46.925584Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit FinishPropose 2025-06-25T15:01:46.925619Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit WaitForPlan 2025-06-25T15:01:46.925649Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit WaitForPlan 2025-06-25T15:01:46.925684Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:281474976715657] at 72075186224037888 is not ready to execute on unit WaitForPlan 2025-06-25T15:01:46.927140Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269746185, Sender [1:646:2542], Recipient [1:627:2531]: NKikimr::TEvTxProxySchemeCache::TEvWatchNotifyUpdated 2025-06-25T15:01:46.927199Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T15:01:46.937888Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-25T15:01:46.937971Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-25T15:01:46.938003Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-25T15:01:46.938042Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715657 at tablet 72075186224037888 send to client, exec latency: 0 ms, propose late ... HARD TRACE: datashard_impl.h:3132: StateWork, received event# 270270976, Sender [2:25:2072], Recipient [2:974:2771]: {TEvRegisterTabletResult TabletId# 72075186224037892 Entry# 2000} 2025-06-25T15:01:54.691772Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3170: StateWork, processing event TEvMediatorTimecast::TEvRegisterTabletResult 2025-06-25T15:01:54.691814Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037892 time 2000 2025-06-25T15:01:54.691854Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037892 2025-06-25T15:01:54.691992Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037892 2025-06-25T15:01:54.692038Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037892 active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:01:54.692078Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037892 2025-06-25T15:01:54.692116Z node 2 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037892 has no attached operations 2025-06-25T15:01:54.692153Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037892 2025-06-25T15:01:54.692189Z node 2 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037892 TxInFly 0 2025-06-25T15:01:54.692237Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037892 2025-06-25T15:01:54.692457Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877764, Sender [2:1130:2882], Recipient [2:974:2771]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-25T15:01:54.692495Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3169: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-25T15:01:54.692540Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037892, clientId# [2:1128:2880], serverId# [2:1130:2882], sessionId# [0:0:0] 2025-06-25T15:01:54.692827Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 270270978, Sender [2:25:2072], Recipient [2:974:2771]: NKikimr::TEvMediatorTimecast::TEvSubscribeReadStepResult{ CoordinatorId# 72057594046316545 LastReadStep# 0 NextReadStep# 2000 ReadStep# 2000 } 2025-06-25T15:01:54.692861Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3171: StateWork, processing event TEvMediatorTimecast::TEvSubscribeReadStepResult 2025-06-25T15:01:54.692903Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037892 coordinator 72057594046316545 last step 0 next step 2000 2025-06-25T15:01:54.692964Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2812: CheckMediatorStateRestored at 72075186224037892: waitStep# 2000 readStep# 2000 observedStep# 2000 2025-06-25T15:01:54.693024Z node 2 :TX_DATASHARD TRACE: datashard.cpp:2846: CheckMediatorStateRestored at 72075186224037892 promoting UnprotectedReadEdge to v2000/18446744073709551615 2025-06-25T15:01:54.705058Z node 2 :TX_DATASHARD DEBUG: datashard_split_dst.cpp:304: 72075186224037893 ack snapshot OpId 281474976715665 2025-06-25T15:01:54.705176Z node 2 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state Ready tabletId 72075186224037893 2025-06-25T15:01:54.705290Z node 2 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037893 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-25T15:01:54.705363Z node 2 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186224037893 2025-06-25T15:01:54.705412Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037893, actorId: [2:1137:2889] 2025-06-25T15:01:54.705439Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037893 2025-06-25T15:01:54.705471Z node 2 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037893 2025-06-25T15:01:54.705496Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037893 2025-06-25T15:01:54.705635Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [2:983:2774], Recipient [2:983:2774]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:54.705672Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:54.705849Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553157, Sender [2:983:2774], Recipient [2:713:2591]: NKikimrTxDataShard.TEvSplitTransferSnapshotAck TabletId: 72075186224037893 OperationCookie: 281474976715665 2025-06-25T15:01:54.705896Z node 2 :TX_DATASHARD DEBUG: datashard_split_src.cpp:461: 72075186224037889 Received snapshot Ack from dst 72075186224037893 for split OpId 281474976715665 2025-06-25T15:01:54.706220Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877763, Sender [2:1129:2881], Recipient [2:713:2591]: NKikimr::TEvTabletPipe::TEvClientDestroyed { TabletId: 72075186224037893 ClientId: [2:1129:2881] ServerId: [2:1131:2883] } 2025-06-25T15:01:54.706255Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3166: StateWork, processing event TEvTabletPipe::TEvClientDestroyed 2025-06-25T15:01:54.706448Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037893 2025-06-25T15:01:54.706481Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037893 active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:01:54.706508Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037893 2025-06-25T15:01:54.706536Z node 2 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037893 has no attached operations 2025-06-25T15:01:54.706563Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037893 2025-06-25T15:01:54.706601Z node 2 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037893 TxInFly 0 2025-06-25T15:01:54.706633Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037893 2025-06-25T15:01:54.706874Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 270270976, Sender [2:25:2072], Recipient [2:983:2774]: {TEvRegisterTabletResult TabletId# 72075186224037893 Entry# 2000} 2025-06-25T15:01:54.706920Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3170: StateWork, processing event TEvMediatorTimecast::TEvRegisterTabletResult 2025-06-25T15:01:54.706949Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037893 time 2000 2025-06-25T15:01:54.706978Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037893 2025-06-25T15:01:54.707131Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877764, Sender [2:1131:2883], Recipient [2:983:2774]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-25T15:01:54.707162Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3169: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-25T15:01:54.707194Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037893, clientId# [2:1129:2881], serverId# [2:1131:2883], sessionId# [0:0:0] 2025-06-25T15:01:54.707488Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 270270978, Sender [2:25:2072], Recipient [2:983:2774]: NKikimr::TEvMediatorTimecast::TEvSubscribeReadStepResult{ CoordinatorId# 72057594046316545 LastReadStep# 0 NextReadStep# 2000 ReadStep# 2000 } 2025-06-25T15:01:54.707523Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3171: StateWork, processing event TEvMediatorTimecast::TEvSubscribeReadStepResult 2025-06-25T15:01:54.707552Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037893 coordinator 72057594046316545 last step 0 next step 2000 2025-06-25T15:01:54.707590Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2812: CheckMediatorStateRestored at 72075186224037893: waitStep# 2000 readStep# 2000 observedStep# 2000 2025-06-25T15:01:54.707635Z node 2 :TX_DATASHARD TRACE: datashard.cpp:2846: CheckMediatorStateRestored at 72075186224037893 promoting UnprotectedReadEdge to v2000/18446744073709551615 2025-06-25T15:01:54.718661Z node 2 :TX_DATASHARD DEBUG: datashard_split_src.cpp:485: 72075186224037889 ack split to schemeshard 281474976715665 2025-06-25T15:01:54.722395Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553158, Sender [2:373:2367], Recipient [2:720:2595] 2025-06-25T15:01:54.722475Z node 2 :TX_DATASHARD DEBUG: datashard_split_src.cpp:565: Got TEvSplitPartitioningChanged: opId: 281474976715665, at datashard: 72075186224037889, state: SplitSrcWaitForPartitioningChanged 2025-06-25T15:01:54.724281Z node 2 :TX_DATASHARD DEBUG: datashard_split_src.cpp:532: 72075186224037889 ack split partitioning changed to schemeshard 281474976715665 2025-06-25T15:01:54.724433Z node 2 :TX_DATASHARD DEBUG: datashard_loans.cpp:220: 72075186224037889 in PreOffline state HasSharedBobs: 1 SchemaOperations: [ ] OutReadSets count: 0 ChangesQueue size: 0 ChangeExchangeSplit: 1 siblings to be activated: wait to activation from: 2025-06-25T15:01:54.725055Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268828683, Sender [2:705:2586], Recipient [2:713:2591]: NKikimr::TEvTablet::TEvFollowerGcApplied 2025-06-25T15:01:55.269781Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [2:925:2633], Recipient [2:627:2531]: NKikimrTxDataShard.TEvProposeTransaction TxKind: TX_KIND_DATA SourceDeprecated { RawX1: 925 RawX2: 8589937225 } TxBody: " \0008\000`\200\200\200\005j\324\006\010\001\022\225\006\010\001\022\024\n\022\t\235\003\000\000\000\000\000\000\021I\n\000\000\002\000\000\000\032\256\002\010\240\215\006\022\207\002\037\002\022KqpEffect\005\205\006\213\000\205\002\206\205\004\207\203\004?\004\014key\024valueh%kqp%tx_result_binding_0_1\204\214\002\030Inputs(Parameters\034Program\013?\000)\251\000?\n\014Arg\000\002)\211\002?\016\204\214\002(KqpEffects\000)\211\010?\032\213\010\203\010\203\010\203\005@\203\010\204?\006\210\203\004\203\004\203\0144KqpUpsertRows\000\013?&\003?\036\177\000\001\205\000\000\000\000\001\003? \004\003?\"\000\003?$\002\017)\211\002?(?\010 Iterator\000)\211\004?\010?\n\203\004\030Member\000?\026\003?@\000\002\004\000\006\010\002?.\003\203\004\004\003\203\004\002\003\003?0\000\r\010\000\n\001/\032\0369\000\000\000\000\000\000\000@i\000\000\000\000\000\000\360?q\000\000\000\000\ 2025-06-25T15:01:55.269850Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-25T15:01:55.269952Z node 2 :TX_DATASHARD NOTICE: datashard.cpp:3097: Rejecting data TxId 281474976715663 because datashard 72075186224037888: is in a pre/offline state assuming this is due to a finished split (wrong shard state) 2025-06-25T15:01:55.270385Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715664, at schemeshard: 72057594046644480 2025-06-25T15:01:55.270852Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715665, at schemeshard: 72057594046644480 >> DataShardOutOfOrder::TestPlannedHalfOverloadedSplit+UseSink [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_order/unittest >> DataShardOutOfOrder::TestOutOfOrderNonConflictingWrites-EvWrite [GOOD] Test command err: 2025-06-25T15:01:46.153007Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T15:01:46.153166Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T15:01:46.153232Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0020b0/r3tmp/tmpzbHpN3/pdisk_1.dat 2025-06-25T15:01:46.560996Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T15:01:46.573396Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:01:46.611803Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:01:46.621998Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750863703245733 != 1750863703245737 2025-06-25T15:01:46.672674Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:01:46.672797Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:01:46.690149Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:01:46.780965Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:46.825316Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvBoot 2025-06-25T15:01:46.826362Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvRestored 2025-06-25T15:01:46.826754Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:627:2531] 2025-06-25T15:01:46.826996Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T15:01:46.866958Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-25T15:01:46.867519Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T15:01:46.867607Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T15:01:46.869173Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-25T15:01:46.869246Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-25T15:01:46.869309Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-25T15:01:46.869737Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T15:01:46.869873Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T15:01:46.869947Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:643:2531] in generation 1 2025-06-25T15:01:46.880824Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T15:01:46.908039Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-25T15:01:46.908274Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T15:01:46.908608Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:645:2541] 2025-06-25T15:01:46.908652Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T15:01:46.908688Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-25T15:01:46.908732Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T15:01:46.908905Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:627:2531], Recipient [1:627:2531]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:46.908948Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:46.909240Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-25T15:01:46.909304Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-25T15:01:46.909345Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T15:01:46.909395Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:01:46.909441Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-25T15:01:46.909469Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-25T15:01:46.909494Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-25T15:01:46.909516Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-25T15:01:46.909544Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T15:01:46.909918Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:634:2535], Recipient [1:627:2531]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T15:01:46.909956Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T15:01:46.909995Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:623:2528], serverId# [1:634:2535], sessionId# [0:0:0] 2025-06-25T15:01:46.910066Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:373:2367], Recipient [1:634:2535] 2025-06-25T15:01:46.910093Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-25T15:01:46.910159Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T15:01:46.910304Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-25T15:01:46.910351Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-25T15:01:46.910413Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-25T15:01:46.910481Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-25T15:01:46.910522Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-06-25T15:01:46.910559Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-06-25T15:01:46.910589Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-25T15:01:46.910810Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-06-25T15:01:46.910836Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit StoreSchemeTx 2025-06-25T15:01:46.910857Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit FinishPropose 2025-06-25T15:01:46.910883Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-25T15:01:46.910915Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayComplete 2025-06-25T15:01:46.910935Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit FinishPropose 2025-06-25T15:01:46.910957Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit WaitForPlan 2025-06-25T15:01:46.910981Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit WaitForPlan 2025-06-25T15:01:46.911001Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:281474976715657] at 72075186224037888 is not ready to execute on unit WaitForPlan 2025-06-25T15:01:46.912008Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269746185, Sender [1:646:2542], Recipient [1:627:2531]: NKikimr::TEvTxProxySchemeCache::TEvWatchNotifyUpdated 2025-06-25T15:01:46.912041Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T15:01:46.922970Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-25T15:01:46.923062Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-25T15:01:46.923113Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-25T15:01:46.923169Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715657 at tablet 72075186224037888 send to client, exec latency: 0 ms, propose late ... 1750863715714 } MaxMemoryUsage: 1048576 } 2025-06-25T15:01:55.716188Z node 2 :KQP_EXECUTER INFO: kqp_planner.cpp:697: TxId: 281474976715667. Ctx: { TraceId: 01jyksr7079q590cvq8rravbqb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NWUxNDhiZTEtZmNjODBkNDMtNDIxOWMxNDEtNGRmZjkzYTQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Compute actor has finished execution: [2:1031:2823] 2025-06-25T15:01:55.716238Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:668: ActorId: [2:1024:2805] TxId: 281474976715667. Ctx: { TraceId: 01jyksr7079q590cvq8rravbqb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NWUxNDhiZTEtZmNjODBkNDMtNDIxOWMxNDEtNGRmZjkzYTQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Waiting for: CA [2:1036:2828], CA [2:1033:2825], CA [2:1034:2826], CA [2:1035:2827], CA [2:1032:2824], 2025-06-25T15:01:55.716277Z node 2 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:156: ActorId: [2:1024:2805] TxId: 281474976715667. Ctx: { TraceId: 01jyksr7079q590cvq8rravbqb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NWUxNDhiZTEtZmNjODBkNDMtNDIxOWMxNDEtNGRmZjkzYTQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, waiting for 5 compute actor(s) and 0 datashard(s): CA [2:1036:2828], CA [2:1033:2825], CA [2:1034:2826], CA [2:1035:2827], CA [2:1032:2824], 2025-06-25T15:01:55.716898Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:442: ActorId: [2:1024:2805] TxId: 281474976715667. Ctx: { TraceId: 01jyksr7079q590cvq8rravbqb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NWUxNDhiZTEtZmNjODBkNDMtNDIxOWMxNDEtNGRmZjkzYTQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [2:1032:2824], task: 2, state: COMPUTE_STATE_FINISHED, stats: { CpuTimeUs: 500 DurationUs: 1000 Tasks { TaskId: 2 StageId: 1 CpuTimeUs: 258 FinishTimeMs: 1750863715714 InputRows: 1 InputBytes: 5 OutputRows: 1 OutputBytes: 5 ComputeCpuTimeUs: 214 BuildCpuTimeUs: 44 HostName: "ghrun-kqfvx6aroe" NodeId: 2 StartTimeMs: 1750863715713 CreateTimeMs: 1750863715706 UpdateTimeMs: 1750863715714 } MaxMemoryUsage: 1048576 } 2025-06-25T15:01:55.716976Z node 2 :KQP_EXECUTER INFO: kqp_planner.cpp:697: TxId: 281474976715667. Ctx: { TraceId: 01jyksr7079q590cvq8rravbqb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NWUxNDhiZTEtZmNjODBkNDMtNDIxOWMxNDEtNGRmZjkzYTQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Compute actor has finished execution: [2:1032:2824] 2025-06-25T15:01:55.717027Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:668: ActorId: [2:1024:2805] TxId: 281474976715667. Ctx: { TraceId: 01jyksr7079q590cvq8rravbqb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NWUxNDhiZTEtZmNjODBkNDMtNDIxOWMxNDEtNGRmZjkzYTQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Waiting for: CA [2:1036:2828], CA [2:1033:2825], CA [2:1034:2826], CA [2:1035:2827], 2025-06-25T15:01:55.717067Z node 2 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:156: ActorId: [2:1024:2805] TxId: 281474976715667. Ctx: { TraceId: 01jyksr7079q590cvq8rravbqb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NWUxNDhiZTEtZmNjODBkNDMtNDIxOWMxNDEtNGRmZjkzYTQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, waiting for 4 compute actor(s) and 0 datashard(s): CA [2:1036:2828], CA [2:1033:2825], CA [2:1034:2826], CA [2:1035:2827], 2025-06-25T15:01:55.717664Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:442: ActorId: [2:1024:2805] TxId: 281474976715667. Ctx: { TraceId: 01jyksr7079q590cvq8rravbqb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NWUxNDhiZTEtZmNjODBkNDMtNDIxOWMxNDEtNGRmZjkzYTQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [2:1033:2825], task: 4, state: COMPUTE_STATE_FINISHED, stats: { CpuTimeUs: 419 DurationUs: 3000 Tasks { TaskId: 4 StageId: 3 CpuTimeUs: 170 FinishTimeMs: 1750863715716 InputRows: 1 InputBytes: 5 OutputRows: 1 OutputBytes: 5 ComputeCpuTimeUs: 137 BuildCpuTimeUs: 33 HostName: "ghrun-kqfvx6aroe" NodeId: 2 StartTimeMs: 1750863715713 CreateTimeMs: 1750863715706 UpdateTimeMs: 1750863715716 } MaxMemoryUsage: 1048576 } 2025-06-25T15:01:55.717735Z node 2 :KQP_EXECUTER INFO: kqp_planner.cpp:697: TxId: 281474976715667. Ctx: { TraceId: 01jyksr7079q590cvq8rravbqb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NWUxNDhiZTEtZmNjODBkNDMtNDIxOWMxNDEtNGRmZjkzYTQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Compute actor has finished execution: [2:1033:2825] 2025-06-25T15:01:55.717779Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:668: ActorId: [2:1024:2805] TxId: 281474976715667. Ctx: { TraceId: 01jyksr7079q590cvq8rravbqb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NWUxNDhiZTEtZmNjODBkNDMtNDIxOWMxNDEtNGRmZjkzYTQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Waiting for: CA [2:1036:2828], CA [2:1034:2826], CA [2:1035:2827], 2025-06-25T15:01:55.717815Z node 2 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:156: ActorId: [2:1024:2805] TxId: 281474976715667. Ctx: { TraceId: 01jyksr7079q590cvq8rravbqb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NWUxNDhiZTEtZmNjODBkNDMtNDIxOWMxNDEtNGRmZjkzYTQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, waiting for 3 compute actor(s) and 0 datashard(s): CA [2:1036:2828], CA [2:1034:2826], CA [2:1035:2827], 2025-06-25T15:01:55.717898Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:442: ActorId: [2:1024:2805] TxId: 281474976715667. Ctx: { TraceId: 01jyksr7079q590cvq8rravbqb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NWUxNDhiZTEtZmNjODBkNDMtNDIxOWMxNDEtNGRmZjkzYTQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [2:1034:2826], task: 5, state: COMPUTE_STATE_FINISHED, stats: { CpuTimeUs: 630 DurationUs: 2000 Tasks { TaskId: 5 StageId: 4 CpuTimeUs: 378 FinishTimeMs: 1750863715717 InputRows: 2 InputBytes: 10 OutputRows: 2 OutputBytes: 7 ComputeCpuTimeUs: 322 BuildCpuTimeUs: 56 HostName: "ghrun-kqfvx6aroe" NodeId: 2 StartTimeMs: 1750863715715 CreateTimeMs: 1750863715707 UpdateTimeMs: 1750863715717 } MaxMemoryUsage: 1048576 } 2025-06-25T15:01:55.717934Z node 2 :KQP_EXECUTER INFO: kqp_planner.cpp:697: TxId: 281474976715667. Ctx: { TraceId: 01jyksr7079q590cvq8rravbqb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NWUxNDhiZTEtZmNjODBkNDMtNDIxOWMxNDEtNGRmZjkzYTQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Compute actor has finished execution: [2:1034:2826] 2025-06-25T15:01:55.717969Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:668: ActorId: [2:1024:2805] TxId: 281474976715667. Ctx: { TraceId: 01jyksr7079q590cvq8rravbqb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NWUxNDhiZTEtZmNjODBkNDMtNDIxOWMxNDEtNGRmZjkzYTQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Waiting for: CA [2:1036:2828], CA [2:1035:2827], 2025-06-25T15:01:55.717998Z node 2 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:156: ActorId: [2:1024:2805] TxId: 281474976715667. Ctx: { TraceId: 01jyksr7079q590cvq8rravbqb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NWUxNDhiZTEtZmNjODBkNDMtNDIxOWMxNDEtNGRmZjkzYTQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, waiting for 2 compute actor(s) and 0 datashard(s): CA [2:1036:2828], CA [2:1035:2827], 2025-06-25T15:01:55.718143Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:442: ActorId: [2:1024:2805] TxId: 281474976715667. Ctx: { TraceId: 01jyksr7079q590cvq8rravbqb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NWUxNDhiZTEtZmNjODBkNDMtNDIxOWMxNDEtNGRmZjkzYTQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [2:1035:2827], task: 6, state: COMPUTE_STATE_FINISHED, stats: { CpuTimeUs: 429 Tasks { TaskId: 6 StageId: 5 CpuTimeUs: 194 FinishTimeMs: 1750863715717 InputRows: 2 InputBytes: 7 OutputRows: 2 OutputBytes: 7 ComputeCpuTimeUs: 137 BuildCpuTimeUs: 57 HostName: "ghrun-kqfvx6aroe" NodeId: 2 CreateTimeMs: 1750863715707 UpdateTimeMs: 1750863715717 } MaxMemoryUsage: 1048576 } 2025-06-25T15:01:55.718193Z node 2 :KQP_EXECUTER INFO: kqp_planner.cpp:697: TxId: 281474976715667. Ctx: { TraceId: 01jyksr7079q590cvq8rravbqb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NWUxNDhiZTEtZmNjODBkNDMtNDIxOWMxNDEtNGRmZjkzYTQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Compute actor has finished execution: [2:1035:2827] 2025-06-25T15:01:55.718220Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:668: ActorId: [2:1024:2805] TxId: 281474976715667. Ctx: { TraceId: 01jyksr7079q590cvq8rravbqb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NWUxNDhiZTEtZmNjODBkNDMtNDIxOWMxNDEtNGRmZjkzYTQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Waiting for: CA [2:1036:2828], 2025-06-25T15:01:55.718249Z node 2 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:156: ActorId: [2:1024:2805] TxId: 281474976715667. Ctx: { TraceId: 01jyksr7079q590cvq8rravbqb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NWUxNDhiZTEtZmNjODBkNDMtNDIxOWMxNDEtNGRmZjkzYTQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, waiting for 1 compute actor(s) and 0 datashard(s): CA [2:1036:2828], 2025-06-25T15:01:55.718489Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:442: ActorId: [2:1024:2805] TxId: 281474976715667. Ctx: { TraceId: 01jyksr7079q590cvq8rravbqb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NWUxNDhiZTEtZmNjODBkNDMtNDIxOWMxNDEtNGRmZjkzYTQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [2:1036:2828], task: 7, state: COMPUTE_STATE_FINISHED, stats: { CpuTimeUs: 386 DurationUs: 1000 Tasks { TaskId: 7 StageId: 6 CpuTimeUs: 172 FinishTimeMs: 1750863715718 InputRows: 2 InputBytes: 7 OutputRows: 2 OutputBytes: 7 ResultRows: 2 ResultBytes: 7 ComputeCpuTimeUs: 132 BuildCpuTimeUs: 40 HostName: "ghrun-kqfvx6aroe" NodeId: 2 StartTimeMs: 1750863715717 CreateTimeMs: 1750863715707 UpdateTimeMs: 1750863715718 } MaxMemoryUsage: 1048576 } 2025-06-25T15:01:55.718550Z node 2 :KQP_EXECUTER INFO: kqp_planner.cpp:697: TxId: 281474976715667. Ctx: { TraceId: 01jyksr7079q590cvq8rravbqb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NWUxNDhiZTEtZmNjODBkNDMtNDIxOWMxNDEtNGRmZjkzYTQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Compute actor has finished execution: [2:1036:2828] 2025-06-25T15:01:55.718736Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:2188: ActorId: [2:1024:2805] TxId: 281474976715667. Ctx: { TraceId: 01jyksr7079q590cvq8rravbqb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NWUxNDhiZTEtZmNjODBkNDMtNDIxOWMxNDEtNGRmZjkzYTQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. terminate execution. 2025-06-25T15:01:55.718814Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:862: ActorId: [2:1024:2805] TxId: 281474976715667. Ctx: { TraceId: 01jyksr7079q590cvq8rravbqb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NWUxNDhiZTEtZmNjODBkNDMtNDIxOWMxNDEtNGRmZjkzYTQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Resource usage for last stat interval: ComputeTime: 0.003857s ReadRows: 2 ReadBytes: 16 ru: 2 rate limiter was not found force flag: 1 { items { uint32_value: 3 } items { uint32_value: 2 } }, { items { uint32_value: 4 } items { uint32_value: 2 } } >> DataShardOutOfOrder::TestOutOfOrderNoBarrierRestartImmediateLongTail [GOOD] >> DataShardOutOfOrder::TestSecondaryClearanceAfterShardRestartRace [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_order/unittest >> DataShardOutOfOrder::TestPlannedHalfOverloadedSplit+UseSink [GOOD] Test command err: 2025-06-25T15:01:47.108984Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T15:01:47.109137Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T15:01:47.109189Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001f6b/r3tmp/tmpXmobph/pdisk_1.dat 2025-06-25T15:01:47.427420Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T15:01:47.434771Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:01:47.467363Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:01:47.470759Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750863704355629 != 1750863704355633 2025-06-25T15:01:47.518451Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:01:47.518587Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:01:47.530070Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:01:47.613656Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:47.658477Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvBoot 2025-06-25T15:01:47.659646Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvRestored 2025-06-25T15:01:47.660105Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:627:2531] 2025-06-25T15:01:47.660377Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T15:01:47.708810Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-25T15:01:47.709558Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T15:01:47.709741Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T15:01:47.711412Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-25T15:01:47.711486Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-25T15:01:47.711549Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-25T15:01:47.711923Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T15:01:47.712058Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T15:01:47.712141Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:643:2531] in generation 1 2025-06-25T15:01:47.722895Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T15:01:47.754466Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-25T15:01:47.754654Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T15:01:47.754746Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:645:2541] 2025-06-25T15:01:47.754781Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T15:01:47.754819Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-25T15:01:47.754851Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T15:01:47.755065Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:627:2531], Recipient [1:627:2531]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:47.755122Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:47.755532Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-25T15:01:47.755617Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-25T15:01:47.755668Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T15:01:47.755702Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:01:47.755747Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-25T15:01:47.755781Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-25T15:01:47.755815Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-25T15:01:47.755846Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-25T15:01:47.755884Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T15:01:47.756287Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:634:2535], Recipient [1:627:2531]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T15:01:47.759500Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T15:01:47.759574Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:623:2528], serverId# [1:634:2535], sessionId# [0:0:0] 2025-06-25T15:01:47.759697Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:373:2367], Recipient [1:634:2535] 2025-06-25T15:01:47.759737Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-25T15:01:47.759848Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T15:01:47.760094Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-25T15:01:47.760149Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-25T15:01:47.760230Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-25T15:01:47.760282Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-25T15:01:47.760340Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-06-25T15:01:47.760387Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-06-25T15:01:47.760425Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-25T15:01:47.760716Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-06-25T15:01:47.760747Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit StoreSchemeTx 2025-06-25T15:01:47.760785Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit FinishPropose 2025-06-25T15:01:47.760815Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-25T15:01:47.760870Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayComplete 2025-06-25T15:01:47.760896Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit FinishPropose 2025-06-25T15:01:47.760928Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit WaitForPlan 2025-06-25T15:01:47.760959Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit WaitForPlan 2025-06-25T15:01:47.760982Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:281474976715657] at 72075186224037888 is not ready to execute on unit WaitForPlan 2025-06-25T15:01:47.762390Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269746185, Sender [1:646:2542], Recipient [1:627:2531]: NKikimr::TEvTxProxySchemeCache::TEvWatchNotifyUpdated 2025-06-25T15:01:47.762438Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T15:01:47.773167Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-25T15:01:47.773234Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-25T15:01:47.773295Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-25T15:01:47.773337Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715657 at tablet 72075186224037888 send to client, exec latency: 0 ms, propose late ... entDestroyed { TabletId: 72075186224037892 ClientId: [2:1141:2878] ServerId: [2:1143:2880] } 2025-06-25T15:01:56.193277Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3166: StateWork, processing event TEvTabletPipe::TEvClientDestroyed 2025-06-25T15:01:56.193645Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 270270976, Sender [2:25:2072], Recipient [2:990:2770]: {TEvRegisterTabletResult TabletId# 72075186224037892 Entry# 2000} 2025-06-25T15:01:56.193687Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3170: StateWork, processing event TEvMediatorTimecast::TEvRegisterTabletResult 2025-06-25T15:01:56.193724Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037892 time 2000 2025-06-25T15:01:56.193767Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037892 2025-06-25T15:01:56.194028Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037892 2025-06-25T15:01:56.194087Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037892 active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:01:56.194130Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037892 2025-06-25T15:01:56.194171Z node 2 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037892 has no attached operations 2025-06-25T15:01:56.194213Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037892 2025-06-25T15:01:56.194251Z node 2 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037892 TxInFly 0 2025-06-25T15:01:56.194305Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037892 2025-06-25T15:01:56.194403Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877764, Sender [2:1143:2880], Recipient [2:990:2770]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-25T15:01:56.194448Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3169: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-25T15:01:56.194667Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037892, clientId# [2:1141:2878], serverId# [2:1143:2880], sessionId# [0:0:0] 2025-06-25T15:01:56.195278Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 270270978, Sender [2:25:2072], Recipient [2:990:2770]: NKikimr::TEvMediatorTimecast::TEvSubscribeReadStepResult{ CoordinatorId# 72057594046316545 LastReadStep# 0 NextReadStep# 2000 ReadStep# 2000 } 2025-06-25T15:01:56.195335Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3171: StateWork, processing event TEvMediatorTimecast::TEvSubscribeReadStepResult 2025-06-25T15:01:56.195385Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037892 coordinator 72057594046316545 last step 0 next step 2000 2025-06-25T15:01:56.195450Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2812: CheckMediatorStateRestored at 72075186224037892: waitStep# 2000 readStep# 2000 observedStep# 2000 2025-06-25T15:01:56.195533Z node 2 :TX_DATASHARD TRACE: datashard.cpp:2846: CheckMediatorStateRestored at 72075186224037892 promoting UnprotectedReadEdge to v2000/18446744073709551615 2025-06-25T15:01:56.206475Z node 2 :TX_DATASHARD DEBUG: datashard_split_dst.cpp:304: 72075186224037893 ack snapshot OpId 281474976715665 2025-06-25T15:01:56.206586Z node 2 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state Ready tabletId 72075186224037893 2025-06-25T15:01:56.206669Z node 2 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037893 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-25T15:01:56.206736Z node 2 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186224037893 2025-06-25T15:01:56.206781Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037893, actorId: [2:1150:2887] 2025-06-25T15:01:56.206806Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037893 2025-06-25T15:01:56.206837Z node 2 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037893 2025-06-25T15:01:56.206862Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037893 2025-06-25T15:01:56.207036Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [2:997:2772], Recipient [2:997:2772]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:56.207067Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:56.207196Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553157, Sender [2:997:2772], Recipient [2:713:2591]: NKikimrTxDataShard.TEvSplitTransferSnapshotAck TabletId: 72075186224037893 OperationCookie: 281474976715665 2025-06-25T15:01:56.207243Z node 2 :TX_DATASHARD DEBUG: datashard_split_src.cpp:461: 72075186224037889 Received snapshot Ack from dst 72075186224037893 for split OpId 281474976715665 2025-06-25T15:01:56.207589Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877763, Sender [2:1142:2879], Recipient [2:713:2591]: NKikimr::TEvTabletPipe::TEvClientDestroyed { TabletId: 72075186224037893 ClientId: [2:1142:2879] ServerId: [2:1144:2881] } 2025-06-25T15:01:56.207624Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3166: StateWork, processing event TEvTabletPipe::TEvClientDestroyed 2025-06-25T15:01:56.207893Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 270270976, Sender [2:25:2072], Recipient [2:997:2772]: {TEvRegisterTabletResult TabletId# 72075186224037893 Entry# 2000} 2025-06-25T15:01:56.207925Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3170: StateWork, processing event TEvMediatorTimecast::TEvRegisterTabletResult 2025-06-25T15:01:56.207951Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037893 time 2000 2025-06-25T15:01:56.207978Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037893 2025-06-25T15:01:56.208078Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037893 2025-06-25T15:01:56.208108Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037893 active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:01:56.208134Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037893 2025-06-25T15:01:56.208158Z node 2 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037893 has no attached operations 2025-06-25T15:01:56.208184Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037893 2025-06-25T15:01:56.208206Z node 2 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037893 TxInFly 0 2025-06-25T15:01:56.208241Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037893 2025-06-25T15:01:56.208382Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877764, Sender [2:1144:2881], Recipient [2:997:2772]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-25T15:01:56.208412Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3169: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-25T15:01:56.208463Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037893, clientId# [2:1142:2879], serverId# [2:1144:2881], sessionId# [0:0:0] 2025-06-25T15:01:56.208708Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 270270978, Sender [2:25:2072], Recipient [2:997:2772]: NKikimr::TEvMediatorTimecast::TEvSubscribeReadStepResult{ CoordinatorId# 72057594046316545 LastReadStep# 0 NextReadStep# 2000 ReadStep# 2000 } 2025-06-25T15:01:56.208736Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3171: StateWork, processing event TEvMediatorTimecast::TEvSubscribeReadStepResult 2025-06-25T15:01:56.208760Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037893 coordinator 72057594046316545 last step 0 next step 2000 2025-06-25T15:01:56.208795Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2812: CheckMediatorStateRestored at 72075186224037893: waitStep# 2000 readStep# 2000 observedStep# 2000 2025-06-25T15:01:56.208836Z node 2 :TX_DATASHARD TRACE: datashard.cpp:2846: CheckMediatorStateRestored at 72075186224037893 promoting UnprotectedReadEdge to v2000/18446744073709551615 2025-06-25T15:01:56.219564Z node 2 :TX_DATASHARD DEBUG: datashard_split_src.cpp:485: 72075186224037889 ack split to schemeshard 281474976715665 2025-06-25T15:01:56.222978Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553158, Sender [2:373:2367], Recipient [2:720:2595] 2025-06-25T15:01:56.223040Z node 2 :TX_DATASHARD DEBUG: datashard_split_src.cpp:565: Got TEvSplitPartitioningChanged: opId: 281474976715665, at datashard: 72075186224037889, state: SplitSrcWaitForPartitioningChanged 2025-06-25T15:01:56.224869Z node 2 :TX_DATASHARD DEBUG: datashard_split_src.cpp:532: 72075186224037889 ack split partitioning changed to schemeshard 281474976715665 2025-06-25T15:01:56.224966Z node 2 :TX_DATASHARD DEBUG: datashard_loans.cpp:220: 72075186224037889 in PreOffline state HasSharedBobs: 1 SchemaOperations: [ ] OutReadSets count: 0 ChangesQueue size: 0 ChangeExchangeSplit: 1 siblings to be activated: wait to activation from: 2025-06-25T15:01:56.225632Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268828683, Sender [2:705:2586], Recipient [2:713:2591]: NKikimr::TEvTablet::TEvFollowerGcApplied 2025-06-25T15:01:56.732086Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 278003712, Sender [2:942:2633], Recipient [2:627:2531]: NKikimrDataEvents.TEvWrite Operations { Type: OPERATION_UPSERT TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } ColumnIds: 1 ColumnIds: 2 PayloadIndex: 0 PayloadFormat: FORMAT_CELLVEC } TxId: 281474976715663 TxMode: MODE_VOLATILE_PREPARE Locks { SendingShards: 72075186224037888 SendingShards: 72075186224037889 ReceivingShards: 72075186224037888 ReceivingShards: 72075186224037889 Op: Commit } 2025-06-25T15:01:56.732155Z node 2 :TX_DATASHARD TRACE: datashard__write.cpp:182: Handle TTxWrite: at tablet# 72075186224037888 2025-06-25T15:01:56.732273Z node 2 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_WRONG_SHARD_STATE;details=Rejecting data TxId 281474976715663 because datashard 72075186224037888: is in a pre/offline state assuming this is due to a finished split (wrong shard state);tx_id=281474976715663; 2025-06-25T15:01:56.732355Z node 2 :TX_DATASHARD NOTICE: datashard.cpp:3137: Rejecting data TxId 281474976715663 because datashard 72075186224037888: is in a pre/offline state assuming this is due to a finished split (wrong shard state) 2025-06-25T15:01:56.732779Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715664, at schemeshard: 72057594046644480 2025-06-25T15:01:56.733234Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715665, at schemeshard: 72057594046644480 >> DataShardOutOfOrder::TestLateKqpScanAfterColumnDrop+UseSink [GOOD] >> DataShardOutOfOrder::TestLateKqpScanAfterColumnDrop-UseSink >> PersQueueSdkReadSessionTest::ReadSessionWithAbort [GOOD] >> PersQueueSdkReadSessionTest::ReadSessionWithClose >> KqpLimits::QueryExecTimeoutCancel [GOOD] >> KqpLimits::QueryExecTimeout ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_order/unittest >> DataShardOutOfOrder::TestOutOfOrderNoBarrierRestartImmediateLongTail [GOOD] Test command err: 2025-06-25T15:01:46.283253Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T15:01:46.283412Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T15:01:46.283465Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/002003/r3tmp/tmpByPziV/pdisk_1.dat 2025-06-25T15:01:46.611593Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T15:01:46.616163Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:01:46.653217Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:01:46.660721Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750863703245409 != 1750863703245413 2025-06-25T15:01:46.707838Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:01:46.707960Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:01:46.719050Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:01:46.807391Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:46.851100Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvBoot 2025-06-25T15:01:46.852132Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvRestored 2025-06-25T15:01:46.852551Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:627:2531] 2025-06-25T15:01:46.852788Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T15:01:46.896932Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-25T15:01:46.897594Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T15:01:46.897748Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T15:01:46.899377Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-25T15:01:46.899451Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-25T15:01:46.899511Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-25T15:01:46.899878Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T15:01:46.900008Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T15:01:46.900081Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:643:2531] in generation 1 2025-06-25T15:01:46.911092Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T15:01:46.949081Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-25T15:01:46.949278Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T15:01:46.949380Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:645:2541] 2025-06-25T15:01:46.949419Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T15:01:46.949450Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-25T15:01:46.949490Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T15:01:46.949702Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:627:2531], Recipient [1:627:2531]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:46.949759Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:46.950124Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-25T15:01:46.950202Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-25T15:01:46.950264Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T15:01:46.950300Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:01:46.950355Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-25T15:01:46.950389Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-25T15:01:46.950437Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-25T15:01:46.950469Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-25T15:01:46.950512Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T15:01:46.950885Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:634:2535], Recipient [1:627:2531]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T15:01:46.950925Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T15:01:46.950981Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:623:2528], serverId# [1:634:2535], sessionId# [0:0:0] 2025-06-25T15:01:46.951073Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:373:2367], Recipient [1:634:2535] 2025-06-25T15:01:46.951117Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-25T15:01:46.951205Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T15:01:46.951405Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-25T15:01:46.951487Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-25T15:01:46.951565Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-25T15:01:46.951660Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-25T15:01:46.951698Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-06-25T15:01:46.951737Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-06-25T15:01:46.951780Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-25T15:01:46.952057Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-06-25T15:01:46.952091Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit StoreSchemeTx 2025-06-25T15:01:46.952121Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit FinishPropose 2025-06-25T15:01:46.952155Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-25T15:01:46.952203Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayComplete 2025-06-25T15:01:46.952230Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit FinishPropose 2025-06-25T15:01:46.952259Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit WaitForPlan 2025-06-25T15:01:46.952289Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit WaitForPlan 2025-06-25T15:01:46.952334Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:281474976715657] at 72075186224037888 is not ready to execute on unit WaitForPlan 2025-06-25T15:01:46.953679Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269746185, Sender [1:646:2542], Recipient [1:627:2531]: NKikimr::TEvTxProxySchemeCache::TEvWatchNotifyUpdated 2025-06-25T15:01:46.953727Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T15:01:46.965023Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-25T15:01:46.965096Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-25T15:01:46.965140Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-25T15:01:46.965196Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715657 at tablet 72075186224037888 send to client, exec latency: 0 ms, propose late ... ode 2: [72075186224037888] 2025-06-25T15:01:56.975875Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715670. Ctx: { TraceId: 01jyksr8gtctq0evaqvmkn2xqe, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZmZiY2E3YjktNGU3Y2U4NjktZTdhOGRlZWEtZmU0NmVmYmE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:01:56.975922Z node 2 :KQP_EXECUTER DEBUG: kqp_planner.cpp:562: TxId: 281474976715670. Ctx: { TraceId: 01jyksr8gtctq0evaqvmkn2xqe, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZmZiY2E3YjktNGU3Y2U4NjktZTdhOGRlZWEtZmU0NmVmYmE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Total tasks: 1, readonly: true, 1 scan tasks on 1 nodes, localComputeTasks: 0, snapshot: {0, 0} 2025-06-25T15:01:56.976191Z node 2 :KQP_EXECUTER DEBUG: kqp_planner.cpp:802: TxId: 281474976715670. Ctx: { TraceId: 01jyksr8gtctq0evaqvmkn2xqe, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZmZiY2E3YjktNGU3Y2U4NjktZTdhOGRlZWEtZmU0NmVmYmE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Collect channels updates for task: 1 at actor [2:1184:2900] 2025-06-25T15:01:56.976255Z node 2 :KQP_EXECUTER DEBUG: kqp_planner.cpp:794: TxId: 281474976715670. Ctx: { TraceId: 01jyksr8gtctq0evaqvmkn2xqe, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZmZiY2E3YjktNGU3Y2U4NjktZTdhOGRlZWEtZmU0NmVmYmE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Sending channels info to compute actor: [2:1184:2900], channels: 1 2025-06-25T15:01:56.976356Z node 2 :KQP_EXECUTER INFO: kqp_data_executer.cpp:2806: ActorId: [2:1180:2900] TxId: 281474976715670. Ctx: { TraceId: 01jyksr8gtctq0evaqvmkn2xqe, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZmZiY2E3YjktNGU3Y2U4NjktZTdhOGRlZWEtZmU0NmVmYmE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Total tasks: 1, readonly: 1, datashardTxs: 0, evWriteTxs: 0, topicTxs: 0, volatile: 0, immediate: 1, pending compute tasks0, useFollowers: 0 2025-06-25T15:01:56.976425Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:668: ActorId: [2:1180:2900] TxId: 281474976715670. Ctx: { TraceId: 01jyksr8gtctq0evaqvmkn2xqe, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZmZiY2E3YjktNGU3Y2U4NjktZTdhOGRlZWEtZmU0NmVmYmE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Waiting for: CA [2:1184:2900], 2025-06-25T15:01:56.976490Z node 2 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:156: ActorId: [2:1180:2900] TxId: 281474976715670. Ctx: { TraceId: 01jyksr8gtctq0evaqvmkn2xqe, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZmZiY2E3YjktNGU3Y2U4NjktZTdhOGRlZWEtZmU0NmVmYmE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: WaitResolveState, waiting for 1 compute actor(s) and 0 datashard(s): CA [2:1184:2900], 2025-06-25T15:01:56.976541Z node 2 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:2368: ActorId: [2:1180:2900] TxId: 281474976715670. Ctx: { TraceId: 01jyksr8gtctq0evaqvmkn2xqe, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZmZiY2E3YjktNGU3Y2U4NjktZTdhOGRlZWEtZmU0NmVmYmE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: WaitResolveState, immediate tx, become ExecuteState 2025-06-25T15:01:56.977281Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:442: ActorId: [2:1180:2900] TxId: 281474976715670. Ctx: { TraceId: 01jyksr8gtctq0evaqvmkn2xqe, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZmZiY2E3YjktNGU3Y2U4NjktZTdhOGRlZWEtZmU0NmVmYmE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [2:1184:2900], task: 1, state: COMPUTE_STATE_EXECUTING, stats: { } 2025-06-25T15:01:56.977341Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:668: ActorId: [2:1180:2900] TxId: 281474976715670. Ctx: { TraceId: 01jyksr8gtctq0evaqvmkn2xqe, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZmZiY2E3YjktNGU3Y2U4NjktZTdhOGRlZWEtZmU0NmVmYmE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Waiting for: CA [2:1184:2900], 2025-06-25T15:01:56.977390Z node 2 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:156: ActorId: [2:1180:2900] TxId: 281474976715670. Ctx: { TraceId: 01jyksr8gtctq0evaqvmkn2xqe, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZmZiY2E3YjktNGU3Y2U4NjktZTdhOGRlZWEtZmU0NmVmYmE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, waiting for 1 compute actor(s) and 0 datashard(s): CA [2:1184:2900], 2025-06-25T15:01:56.977820Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553215, Sender [2:1186:2900], Recipient [2:1120:2866]: NKikimrTxDataShard.TEvRead ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 ResultFormat: FORMAT_CELLVEC MaxRows: 32767 MaxBytes: 5242880 Reverse: false KeysSize: 1 2025-06-25T15:01:56.977977Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2452: TTxReadViaPipeline execute: at tablet# 72075186224037888, FollowerId 0 2025-06-25T15:01:56.978041Z node 2 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 72075186224037888 CompleteEdge# v4033/281474976715667 IncompleteEdge# v{min} UnprotectedReadEdge# v2000/18446744073709551615 ImmediateWriteEdge# v2000/18446744073709551615 ImmediateWriteEdgeReplied# v4033/18446744073709551615 2025-06-25T15:01:56.978083Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2555: 72075186224037888 changed HEAD read to non-repeatable v5000/18446744073709551615 2025-06-25T15:01:56.978141Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:3] at 72075186224037888 on unit CheckRead 2025-06-25T15:01:56.978223Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:3] at 72075186224037888 is Executed 2025-06-25T15:01:56.978260Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:3] at 72075186224037888 executing on unit CheckRead 2025-06-25T15:01:56.978302Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:3] at 72075186224037888 to execution unit BuildAndWaitDependencies 2025-06-25T15:01:56.978333Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:3] at 72075186224037888 on unit BuildAndWaitDependencies 2025-06-25T15:01:56.978377Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:3] at 72075186224037888 2025-06-25T15:01:56.978416Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:3] at 72075186224037888 is Executed 2025-06-25T15:01:56.978440Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:3] at 72075186224037888 executing on unit BuildAndWaitDependencies 2025-06-25T15:01:56.978459Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:3] at 72075186224037888 to execution unit ExecuteRead 2025-06-25T15:01:56.978479Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:3] at 72075186224037888 on unit ExecuteRead 2025-06-25T15:01:56.978598Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1578: 72075186224037888 Execute read# 1, request: { ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 ResultFormat: FORMAT_CELLVEC MaxRows: 32767 MaxBytes: 5242880 Reverse: false } 2025-06-25T15:01:56.978812Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2163: 72075186224037888 Complete read# {[2:1186:2900], 0} after executionsCount# 1 2025-06-25T15:01:56.978904Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2137: 72075186224037888 read iterator# {[2:1186:2900], 0} sends rowCount# 1, bytes# 32, quota rows left# 32766, quota bytes left# 5242848, hasUnreadQueries# 0, total queries# 1, firstUnprocessed# 0 2025-06-25T15:01:56.978982Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2188: 72075186224037888 read iterator# {[2:1186:2900], 0} finished in read 2025-06-25T15:01:56.979046Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:3] at 72075186224037888 is Executed 2025-06-25T15:01:56.979072Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:3] at 72075186224037888 executing on unit ExecuteRead 2025-06-25T15:01:56.979097Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:3] at 72075186224037888 to execution unit CompletedOperations 2025-06-25T15:01:56.979119Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:3] at 72075186224037888 on unit CompletedOperations 2025-06-25T15:01:56.979157Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:3] at 72075186224037888 is Executed 2025-06-25T15:01:56.979175Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:3] at 72075186224037888 executing on unit CompletedOperations 2025-06-25T15:01:56.979200Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:3] at 72075186224037888 has finished 2025-06-25T15:01:56.979240Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2687: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037888 2025-06-25T15:01:56.979864Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553219, Sender [2:1186:2900], Recipient [2:1120:2866]: NKikimrTxDataShard.TEvReadCancel ReadId: 0 2025-06-25T15:01:56.979917Z node 2 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3409: 72075186224037888 ReadCancel: { ReadId: 0 } 2025-06-25T15:01:56.980542Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:442: ActorId: [2:1180:2900] TxId: 281474976715670. Ctx: { TraceId: 01jyksr8gtctq0evaqvmkn2xqe, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZmZiY2E3YjktNGU3Y2U4NjktZTdhOGRlZWEtZmU0NmVmYmE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [2:1184:2900], task: 1, state: COMPUTE_STATE_FINISHED, stats: { CpuTimeUs: 850 DurationUs: 1000 Tasks { TaskId: 1 CpuTimeUs: 181 FinishTimeMs: 1750863716980 OutputRows: 1 OutputBytes: 5 Tables { TablePath: "/Root/table-1" ReadRows: 1 ReadBytes: 8 AffectedPartitions: 1 } IngressRows: 1 ResultRows: 1 ResultBytes: 5 ComputeCpuTimeUs: 81 BuildCpuTimeUs: 100 HostName: "ghrun-kqfvx6aroe" NodeId: 2 StartTimeMs: 1750863716979 CreateTimeMs: 1750863716976 UpdateTimeMs: 1750863716980 } MaxMemoryUsage: 1048576 } 2025-06-25T15:01:56.980691Z node 2 :KQP_EXECUTER INFO: kqp_planner.cpp:697: TxId: 281474976715670. Ctx: { TraceId: 01jyksr8gtctq0evaqvmkn2xqe, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZmZiY2E3YjktNGU3Y2U4NjktZTdhOGRlZWEtZmU0NmVmYmE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Compute actor has finished execution: [2:1184:2900] 2025-06-25T15:01:56.980864Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:2188: ActorId: [2:1180:2900] TxId: 281474976715670. Ctx: { TraceId: 01jyksr8gtctq0evaqvmkn2xqe, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZmZiY2E3YjktNGU3Y2U4NjktZTdhOGRlZWEtZmU0NmVmYmE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. terminate execution. 2025-06-25T15:01:56.980942Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:862: ActorId: [2:1180:2900] TxId: 281474976715670. Ctx: { TraceId: 01jyksr8gtctq0evaqvmkn2xqe, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZmZiY2E3YjktNGU3Y2U4NjktZTdhOGRlZWEtZmU0NmVmYmE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Resource usage for last stat interval: ComputeTime: 0.000850s ReadRows: 1 ReadBytes: 8 ru: 1 rate limiter was not found force flag: 1 { items { uint32_value: 7 } items { uint32_value: 4 } } >> DataShardReadIterator::ShouldReadRangeInclusiveEndsCellVec >> DataShardReadIterator::ShouldHandleReadAck >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadKey+EvWrite >> DataShardReadIterator::ShouldRangeReadReverseLeftInclusive >> DataShardReadIteratorSysTables::ShouldRead >> DataShardReadIteratorConsistency::LocalSnapshotReadWithPlanQueueRace >> DataShardReadIterator::ShouldStopWhenNodeDisconnected >> DataShardReadIterator::ShouldReadRangeCellVec >> DataShardReadIterator::ShouldReadKeyCellVec ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_order/unittest >> DataShardOutOfOrder::TestSecondaryClearanceAfterShardRestartRace [GOOD] Test command err: 2025-06-25T15:01:49.767245Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T15:01:49.767374Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T15:01:49.767435Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001f2e/r3tmp/tmpZFV2dY/pdisk_1.dat 2025-06-25T15:01:50.060483Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T15:01:50.063856Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:01:50.113480Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:01:50.118595Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750863707006410 != 1750863707006414 2025-06-25T15:01:50.164221Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:61:2108] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-25T15:01:50.165286Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 2025-06-25T15:01:50.165804Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:01:50.165935Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:01:50.177350Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:01:50.258720Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:61:2108] Handle TEvProposeTransaction 2025-06-25T15:01:50.258786Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:61:2108] TxId# 281474976715657 ProcessProposeTransaction 2025-06-25T15:01:50.258986Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:61:2108] Cookie# 0 userReqId# "" txid# 281474976715657 SEND to# [1:602:2510] 2025-06-25T15:01:50.405876Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:602:2510] txid# 281474976715657 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateTable CreateTable { Name: "table-1" Columns { Name: "key" Type: "Uint32" FamilyName: "" NotNull: false } Columns { Name: "value" Type: "Uint32" FamilyName: "" NotNull: false } KeyColumnNames: "key" UniformPartitionsCount: 2 } } } ExecTimeoutPeriod: 18446744073709551615 2025-06-25T15:01:50.405976Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:602:2510] txid# 281474976715657 Bootstrap, UserSID: CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-25T15:01:50.406527Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1660: Actor# [1:602:2510] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-06-25T15:01:50.406618Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:602:2510] txid# 281474976715657 TEvNavigateKeySet requested from SchemeCache 2025-06-25T15:01:50.406922Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:602:2510] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-25T15:01:50.407117Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:602:2510] HANDLE EvNavigateKeySetResult, txid# 281474976715657 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-25T15:01:50.407265Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:602:2510] txid# 281474976715657 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715657 TabletId# 72057594046644480} 2025-06-25T15:01:50.407584Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:602:2510] txid# 281474976715657 HANDLE EvClientConnected 2025-06-25T15:01:50.409103Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:50.410214Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [1:602:2510] txid# 281474976715657 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715657} 2025-06-25T15:01:50.410283Z node 1 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [1:602:2510] txid# 281474976715657 SEND to# [1:554:2480] Source {TEvProposeTransactionStatus txid# 281474976715657 Status# 53} 2025-06-25T15:01:50.442499Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:618:2525], Recipient [1:633:2534]: NKikimr::TEvTablet::TEvBoot 2025-06-25T15:01:50.443803Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:619:2526], Recipient [1:635:2536]: NKikimr::TEvTablet::TEvBoot 2025-06-25T15:01:50.444468Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:618:2525], Recipient [1:633:2534]: NKikimr::TEvTablet::TEvRestored 2025-06-25T15:01:50.444893Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:633:2534] 2025-06-25T15:01:50.445169Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T15:01:50.489610Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:618:2525], Recipient [1:633:2534]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-25T15:01:50.489710Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:619:2526], Recipient [1:635:2536]: NKikimr::TEvTablet::TEvRestored 2025-06-25T15:01:50.490202Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037889 actor [1:635:2536] 2025-06-25T15:01:50.490424Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T15:01:50.496238Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:619:2526], Recipient [1:635:2536]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-25T15:01:50.497094Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T15:01:50.497288Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T15:01:50.499031Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-25T15:01:50.499119Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-25T15:01:50.499169Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-25T15:01:50.499597Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T15:01:50.499795Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T15:01:50.499886Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:666:2534] in generation 1 2025-06-25T15:01:50.500211Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T15:01:50.500358Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T15:01:50.501780Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037889 2025-06-25T15:01:50.501844Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037889 2025-06-25T15:01:50.501910Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037889 2025-06-25T15:01:50.502185Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T15:01:50.502282Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T15:01:50.502363Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037889 persisting started state actor id [1:667:2536] in generation 1 2025-06-25T15:01:50.513423Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T15:01:50.567339Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-25T15:01:50.567546Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T15:01:50.567646Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:670:2555] 2025-06-25T15:01:50.567689Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T15:01:50.567729Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-25T15:01:50.567769Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T15:01:50.568120Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:633:2534], Recipient [1:633:2534]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:50.568189Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:50.568292Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T15:01:50.568349Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037889 2025-06-25T15:01:50.568403Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037889 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T15:01:50.568465Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037889, actorId: [1:671:2556] 2025-06-25T15:01:50.568496Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037889 2025-06-25T15:01:50.568519Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037889, state: WaitScheme 2025-06-25T15:01:50.568540Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-25T15:01:50.568824Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender ... k7f9qtexnwjvx, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NzUwYTg4ZmItOTU2MTdkNGEtNTAzMmNhNjctYzJhYjQyNjE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: WaitResolveState, waiting for 1 compute actor(s) and 0 datashard(s): CA [2:924:2712], 2025-06-25T15:01:57.559750Z node 2 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:2368: ActorId: [2:921:2712] TxId: 281474976715662. Ctx: { TraceId: 01jyksr941bmdk7f9qtexnwjvx, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NzUwYTg4ZmItOTU2MTdkNGEtNTAzMmNhNjctYzJhYjQyNjE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: WaitResolveState, immediate tx, become ExecuteState 2025-06-25T15:01:57.560630Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:442: ActorId: [2:921:2712] TxId: 281474976715662. Ctx: { TraceId: 01jyksr941bmdk7f9qtexnwjvx, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NzUwYTg4ZmItOTU2MTdkNGEtNTAzMmNhNjctYzJhYjQyNjE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [2:924:2712], task: 1, state: COMPUTE_STATE_EXECUTING, stats: { } 2025-06-25T15:01:57.560692Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:668: ActorId: [2:921:2712] TxId: 281474976715662. Ctx: { TraceId: 01jyksr941bmdk7f9qtexnwjvx, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NzUwYTg4ZmItOTU2MTdkNGEtNTAzMmNhNjctYzJhYjQyNjE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Waiting for: CA [2:924:2712], 2025-06-25T15:01:57.560746Z node 2 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:156: ActorId: [2:921:2712] TxId: 281474976715662. Ctx: { TraceId: 01jyksr941bmdk7f9qtexnwjvx, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NzUwYTg4ZmItOTU2MTdkNGEtNTAzMmNhNjctYzJhYjQyNjE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, waiting for 1 compute actor(s) and 0 datashard(s): CA [2:924:2712], 2025-06-25T15:01:57.561448Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:442: ActorId: [2:921:2712] TxId: 281474976715662. Ctx: { TraceId: 01jyksr941bmdk7f9qtexnwjvx, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NzUwYTg4ZmItOTU2MTdkNGEtNTAzMmNhNjctYzJhYjQyNjE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [2:924:2712], task: 1, state: COMPUTE_STATE_FINISHED, stats: { CpuTimeUs: 773 Tasks { TaskId: 1 CpuTimeUs: 338 FinishTimeMs: 1750863717561 EgressBytes: 10 EgressRows: 1 ComputeCpuTimeUs: 22 BuildCpuTimeUs: 316 HostName: "ghrun-kqfvx6aroe" NodeId: 2 CreateTimeMs: 1750863717559 UpdateTimeMs: 1750863717561 } MaxMemoryUsage: 1048576 } 2025-06-25T15:01:57.561556Z node 2 :KQP_EXECUTER INFO: kqp_planner.cpp:697: TxId: 281474976715662. Ctx: { TraceId: 01jyksr941bmdk7f9qtexnwjvx, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NzUwYTg4ZmItOTU2MTdkNGEtNTAzMmNhNjctYzJhYjQyNjE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Compute actor has finished execution: [2:924:2712] 2025-06-25T15:01:57.561605Z node 2 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:276: ActorId: [2:921:2712] TxId: 281474976715662. Ctx: { TraceId: 01jyksr941bmdk7f9qtexnwjvx, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NzUwYTg4ZmItOTU2MTdkNGEtNTAzMmNhNjctYzJhYjQyNjE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Send Commit to BufferActor=[2:920:2712] 2025-06-25T15:01:57.561658Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:862: ActorId: [2:921:2712] TxId: 281474976715662. Ctx: { TraceId: 01jyksr941bmdk7f9qtexnwjvx, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NzUwYTg4ZmItOTU2MTdkNGEtNTAzMmNhNjctYzJhYjQyNjE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Resource usage for last stat interval: ComputeTime: 0.000773s ReadRows: 0 ReadBytes: 0 ru: 1 rate limiter was not found force flag: 1 2025-06-25T15:01:57.562085Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [2:929:2728], Recipient [2:879:2695]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T15:01:57.562125Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T15:01:57.562164Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [2:928:2727], serverId# [2:929:2728], sessionId# [0:0:0] 2025-06-25T15:01:57.562297Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 278003712, Sender [2:927:2712], Recipient [2:879:2695]: NKikimrDataEvents.TEvWrite Operations { Type: OPERATION_UPSERT TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } ColumnIds: 1 ColumnIds: 2 PayloadIndex: 0 PayloadFormat: FORMAT_CELLVEC } TxMode: MODE_IMMEDIATE 2025-06-25T15:01:57.562322Z node 2 :TX_DATASHARD TRACE: datashard__write.cpp:182: Handle TTxWrite: at tablet# 72075186224037888 2025-06-25T15:01:57.562407Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435074, Sender [2:879:2695], Recipient [2:879:2695]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvDelayedProposeTransaction 2025-06-25T15:01:57.562429Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3159: StateWork, processing event TEvPrivate::TEvDelayedProposeTransaction 2025-06-25T15:01:57.562505Z node 2 :TX_DATASHARD TRACE: datashard__write.cpp:28: TTxWrite:: execute at tablet# 72075186224037888 2025-06-25T15:01:57.562592Z node 2 :TX_DATASHARD TRACE: datashard_write_operation.cpp:64: Parsing write transaction for 0 at 72075186224037888, record: Operations { Type: OPERATION_UPSERT TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } ColumnIds: 1 ColumnIds: 2 PayloadIndex: 0 PayloadFormat: FORMAT_CELLVEC } TxMode: MODE_IMMEDIATE 2025-06-25T15:01:57.562643Z node 2 :TX_DATASHARD TRACE: datashard_write_operation.cpp:213: Table /Root/table-1, shard: 72075186224037888, write point (Uint32 : 4) 2025-06-25T15:01:57.562680Z node 2 :TX_DATASHARD TRACE: key_validator.cpp:54: -- AddWriteRange: (Uint32 : 4) table: [72057594046644480:2:1] 2025-06-25T15:01:57.562748Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:2] at 72075186224037888 on unit CheckWrite 2025-06-25T15:01:57.562786Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 72075186224037888 is Executed 2025-06-25T15:01:57.562816Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:2] at 72075186224037888 executing on unit CheckWrite 2025-06-25T15:01:57.562844Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:2] at 72075186224037888 to execution unit BuildAndWaitDependencies 2025-06-25T15:01:57.562886Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:2] at 72075186224037888 on unit BuildAndWaitDependencies 2025-06-25T15:01:57.562919Z node 2 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 72075186224037888 CompleteEdge# v2000/281474976715661 IncompleteEdge# v{min} UnprotectedReadEdge# v{min} ImmediateWriteEdge# v1500/18446744073709551615 ImmediateWriteEdgeReplied# v1500/18446744073709551615 2025-06-25T15:01:57.562964Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:2] at 72075186224037888 2025-06-25T15:01:57.562998Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 72075186224037888 is Executed 2025-06-25T15:01:57.563022Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:2] at 72075186224037888 executing on unit BuildAndWaitDependencies 2025-06-25T15:01:57.563044Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:2] at 72075186224037888 to execution unit ExecuteWrite 2025-06-25T15:01:57.563064Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:2] at 72075186224037888 on unit ExecuteWrite 2025-06-25T15:01:57.563087Z node 2 :TX_DATASHARD DEBUG: execute_write_unit.cpp:251: Executing write operation for [0:2] at 72075186224037888 2025-06-25T15:01:57.563122Z node 2 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 72075186224037888 CompleteEdge# v2000/281474976715661 IncompleteEdge# v{min} UnprotectedReadEdge# v{min} ImmediateWriteEdge# v1500/18446744073709551615 ImmediateWriteEdgeReplied# v1500/18446744073709551615 2025-06-25T15:01:57.563212Z node 2 :TX_DATASHARD DEBUG: execute_write_unit.cpp:416: Executed write operation for [0:2] at 72075186224037888, row count=1 2025-06-25T15:01:57.563249Z node 2 :TX_DATASHARD TRACE: execute_write_unit.cpp:47: add locks to result: 0 2025-06-25T15:01:57.563299Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-25T15:01:57.563319Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:2] at 72075186224037888 executing on unit ExecuteWrite 2025-06-25T15:01:57.563345Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:2] at 72075186224037888 to execution unit FinishProposeWrite 2025-06-25T15:01:57.563370Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:2] at 72075186224037888 on unit FinishProposeWrite 2025-06-25T15:01:57.563419Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-06-25T15:01:57.563442Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:2] at 72075186224037888 executing on unit FinishProposeWrite 2025-06-25T15:01:57.563466Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:2] at 72075186224037888 to execution unit CompletedOperations 2025-06-25T15:01:57.563491Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:2] at 72075186224037888 on unit CompletedOperations 2025-06-25T15:01:57.563524Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 72075186224037888 is Executed 2025-06-25T15:01:57.563550Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:2] at 72075186224037888 executing on unit CompletedOperations 2025-06-25T15:01:57.563570Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:2] at 72075186224037888 has finished 2025-06-25T15:01:57.576714Z node 2 :TX_DATASHARD TRACE: datashard__write.cpp:150: TTxWrite complete: at tablet# 72075186224037888 2025-06-25T15:01:57.576791Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:2] at 72075186224037888 on unit FinishProposeWrite 2025-06-25T15:01:57.576845Z node 2 :TX_DATASHARD TRACE: finish_propose_write_unit.cpp:163: Propose transaction complete txid 2 at tablet 72075186224037888 send to client, propose latency: 0 ms, status: STATUS_COMPLETED 2025-06-25T15:01:57.576946Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T15:01:57.577425Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:2188: ActorId: [2:921:2712] TxId: 281474976715662. Ctx: { TraceId: 01jyksr941bmdk7f9qtexnwjvx, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NzUwYTg4ZmItOTU2MTdkNGEtNTAzMmNhNjctYzJhYjQyNjE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. terminate execution. 2025-06-25T15:01:57.577484Z node 2 :KQP_EXECUTER TRACE: kqp_executer_impl.h:2202: ActorId: [2:921:2712] TxId: 281474976715662. Ctx: { TraceId: 01jyksr941bmdk7f9qtexnwjvx, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NzUwYTg4ZmItOTU2MTdkNGEtNTAzMmNhNjctYzJhYjQyNjE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Terminate, become ZombieState >> DataShardReadIterator::ShouldReverseReadMultipleKeys >> EncryptedBackupParamsValidationTest::EmptyImportItem [GOOD] >> DataShardOutOfOrder::TestSnapshotReadAfterBrokenLock-EvWrite [GOOD] >> DataShardOutOfOrder::TestSnapshotReadAfterBrokenLockOutOfOrder >> DataShardTxOrder::ImmediateBetweenOnline_Init [GOOD] >> DataShardOutOfOrder::TestShardRestartDuringWaitingRead [GOOD] >> EncryptedBackupParamsValidationTest::IncorrectKeyImport >> DataShardSnapshots::VolatileSnapshotSplit >> DataShardSnapshots::MvccSnapshotAndSplit >> DataShardSnapshots::LockedWriteReuseAfterCommit+UseSink >> DataShardSnapshots::UncommittedChangesRenameTable+UseSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_order/unittest >> DataShardTxOrder::ImmediateBetweenOnline_Init [GOOD] Test command err: 2025-06-25T15:01:56.300749Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:01:56.300800Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:01:56.304815Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:112:2142], Recipient [1:135:2156]: NKikimr::TEvTablet::TEvBoot 2025-06-25T15:01:56.319123Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:112:2142], Recipient [1:135:2156]: NKikimr::TEvTablet::TEvRestored 2025-06-25T15:01:56.319552Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 9437184 actor [1:135:2156] 2025-06-25T15:01:56.319789Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T15:01:56.329305Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:112:2142], Recipient [1:135:2156]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-25T15:01:56.370150Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T15:01:56.370320Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T15:01:56.372290Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 9437184 2025-06-25T15:01:56.372376Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 9437184 2025-06-25T15:01:56.372440Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 9437184 2025-06-25T15:01:56.372840Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T15:01:56.372942Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T15:01:56.372989Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 9437184 persisting started state actor id [1:204:2156] in generation 2 2025-06-25T15:01:56.432237Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T15:01:56.468145Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 9437184 2025-06-25T15:01:56.468345Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 9437184 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T15:01:56.468484Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 9437184, actorId: [1:219:2215] 2025-06-25T15:01:56.468532Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 9437184 2025-06-25T15:01:56.468566Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 9437184, state: WaitScheme 2025-06-25T15:01:56.468594Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:01:56.468803Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:135:2156], Recipient [1:135:2156]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:56.468865Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:56.469123Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 9437184 2025-06-25T15:01:56.469228Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 9437184 2025-06-25T15:01:56.469290Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-25T15:01:56.469323Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:01:56.469366Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 9437184 2025-06-25T15:01:56.469398Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437184 has no attached operations 2025-06-25T15:01:56.469439Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 9437184 2025-06-25T15:01:56.469469Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 9437184 TxInFly 0 2025-06-25T15:01:56.469505Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-25T15:01:56.469611Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:215:2212], Recipient [1:135:2156]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T15:01:56.469642Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T15:01:56.469686Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:213:2211], serverId# [1:215:2212], sessionId# [0:0:0] 2025-06-25T15:01:56.472284Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:103:2136], Recipient [1:135:2156]: NKikimrTxDataShard.TEvProposeTransaction TxKind: TX_KIND_SCHEME SourceDeprecated { RawX1: 103 RawX2: 4294969432 } TxBody: "\nI\n\006table1\020\r\032\t\n\003key\030\002 \"\032\014\n\005value\030\200$ 8\032\n\n\004uint\030\002 9(\":\010Z\006\010\000\030\000(\000J\014/Root/table1" TxId: 1 ExecLevel: 0 Flags: 0 SchemeShardId: 4200 ProcessingParams { } 2025-06-25T15:01:56.472362Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-25T15:01:56.472431Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-25T15:01:56.472595Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit CheckSchemeTx 2025-06-25T15:01:56.472685Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 9437184 txId 1 ssId 4200 seqNo 0:0 2025-06-25T15:01:56.472733Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 1 at tablet 9437184 2025-06-25T15:01:56.472789Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is ExecutedNoMoreRestarts 2025-06-25T15:01:56.472815Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit CheckSchemeTx 2025-06-25T15:01:56.472836Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit StoreSchemeTx 2025-06-25T15:01:56.472857Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit StoreSchemeTx 2025-06-25T15:01:56.473074Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayCompleteNoMoreRestarts 2025-06-25T15:01:56.473096Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit StoreSchemeTx 2025-06-25T15:01:56.473127Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit FinishPropose 2025-06-25T15:01:56.473165Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit FinishPropose 2025-06-25T15:01:56.473209Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayComplete 2025-06-25T15:01:56.473235Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit FinishPropose 2025-06-25T15:01:56.473262Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit WaitForPlan 2025-06-25T15:01:56.473284Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit WaitForPlan 2025-06-25T15:01:56.473300Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:1] at 9437184 is not ready to execute on unit WaitForPlan 2025-06-25T15:01:56.484961Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 9437184 2025-06-25T15:01:56.485028Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit StoreSchemeTx 2025-06-25T15:01:56.485062Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit FinishPropose 2025-06-25T15:01:56.485110Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 1 at tablet 9437184 send to client, exec latency: 0 ms, propose latency: 1 ms, status: PREPARED 2025-06-25T15:01:56.485155Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 9437184 not sending time cast registration request in state WaitScheme 2025-06-25T15:01:56.485550Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:225:2221], Recipient [1:135:2156]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T15:01:56.485600Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T15:01:56.485640Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:224:2220], serverId# [1:225:2221], sessionId# [0:0:0] 2025-06-25T15:01:56.485746Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287424, Sender [1:103:2136], Recipient [1:135:2156]: {TEvPlanStep step# 1000001 MediatorId# 0 TabletID 9437184} 2025-06-25T15:01:56.485777Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3150: StateWork, processing event TEvTxProcessing::TEvPlanStep 2025-06-25T15:01:56.485878Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1790: Trying to execute [1000001:1] at 9437184 on unit WaitForPlan 2025-06-25T15:01:56.485915Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1805: Execution status for [1000001:1] at 9437184 is Executed 2025-06-25T15:01:56.485940Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000001:1] at 9437184 executing on unit WaitForPlan 2025-06-25T15:01:56.485965Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000001:1] at 9437184 to execution unit PlanQueue 2025-06-25T15:01:56.495284Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 1 at step 1000001 at tablet 9437184 { Transactions { TxId: 1 AckTo { RawX1: 103 RawX2: 4294969432 } } Step: 1000001 MediatorID: 0 TabletID: 9437184 } 2025-06-25T15:01:56.495358Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:01:56.495578Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:135:2156], Recipient [1:135:2156]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:56.495621Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:56.495683Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-25T15:01:56.495720Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 1 2025-06-25T15:01:56.495751Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437184 2025-06-25T15:01:56.495790Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000001:1] in PlanQueue unit at 9437184 2025-06-25T15:01:56.495823Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [100000 ... at tablet 9437186 send result to client [1:103:2136], exec latency: 0 ms, propose latency: 2 ms 2025-06-25T15:02:01.171989Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437186 2025-06-25T15:02:01.172046Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437186 2025-06-25T15:02:01.172065Z node 1 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 9437186 2025-06-25T15:02:01.172170Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:463:2402], Recipient [1:239:2230]: {TEvReadSet step# 1000005 txid# 101 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 33} 2025-06-25T15:02:01.172199Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:02:01.172229Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 101 2025-06-25T15:02:01.172355Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:463:2402], Recipient [1:239:2230]: {TEvReadSet step# 1000005 txid# 104 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 34} 2025-06-25T15:02:01.172386Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:02:01.172403Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 104 2025-06-25T15:02:01.172460Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:463:2402], Recipient [1:239:2230]: {TEvReadSet step# 1000005 txid# 107 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 35} 2025-06-25T15:02:01.172475Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:02:01.172489Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 107 2025-06-25T15:02:01.172531Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:463:2402], Recipient [1:239:2230]: {TEvReadSet step# 1000005 txid# 110 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 36} 2025-06-25T15:02:01.172545Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:02:01.172558Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 110 2025-06-25T15:02:01.172605Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:463:2402], Recipient [1:239:2230]: {TEvReadSet step# 1000005 txid# 113 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 37} 2025-06-25T15:02:01.172622Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:02:01.172651Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 113 2025-06-25T15:02:01.172705Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:463:2402], Recipient [1:239:2230]: {TEvReadSet step# 1000005 txid# 116 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 38} 2025-06-25T15:02:01.172720Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:02:01.172732Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 116 2025-06-25T15:02:01.172764Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:463:2402], Recipient [1:239:2230]: {TEvReadSet step# 1000005 txid# 119 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 39} 2025-06-25T15:02:01.172784Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:02:01.172806Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 119 2025-06-25T15:02:01.172893Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:463:2402], Recipient [1:239:2230]: {TEvReadSet step# 1000005 txid# 122 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 40} 2025-06-25T15:02:01.172910Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:02:01.172924Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 122 2025-06-25T15:02:01.172963Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:463:2402], Recipient [1:239:2230]: {TEvReadSet step# 1000005 txid# 125 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 41} 2025-06-25T15:02:01.172983Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:02:01.173022Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 125 2025-06-25T15:02:01.173098Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:463:2402], Recipient [1:239:2230]: {TEvReadSet step# 1000005 txid# 128 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 42} 2025-06-25T15:02:01.173143Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:02:01.173166Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 128 2025-06-25T15:02:01.173245Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:463:2402], Recipient [1:239:2230]: {TEvReadSet step# 1000005 txid# 131 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 43} 2025-06-25T15:02:01.173269Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:02:01.173290Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 131 2025-06-25T15:02:01.173370Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:463:2402], Recipient [1:239:2230]: {TEvReadSet step# 1000005 txid# 134 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 44} 2025-06-25T15:02:01.173397Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:02:01.173423Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 134 2025-06-25T15:02:01.173490Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:463:2402], Recipient [1:239:2230]: {TEvReadSet step# 1000005 txid# 137 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 45} 2025-06-25T15:02:01.173515Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:02:01.173555Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 137 2025-06-25T15:02:01.173659Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:463:2402], Recipient [1:239:2230]: {TEvReadSet step# 1000005 txid# 140 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 46} 2025-06-25T15:02:01.173688Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:02:01.173711Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 140 2025-06-25T15:02:01.173814Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:463:2402], Recipient [1:239:2230]: {TEvReadSet step# 1000005 txid# 143 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 47} 2025-06-25T15:02:01.173842Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:02:01.173869Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 143 2025-06-25T15:02:01.173961Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:463:2402], Recipient [1:239:2230]: {TEvReadSet step# 1000005 txid# 146 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 48} 2025-06-25T15:02:01.173994Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:02:01.174015Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 146 2025-06-25T15:02:01.174099Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:463:2402], Recipient [1:239:2230]: {TEvReadSet step# 1000005 txid# 149 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 49} 2025-06-25T15:02:01.174125Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:02:01.174166Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 149 2025-06-25T15:02:01.188434Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437186 2025-06-25T15:02:01.188491Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000005:152] at 9437186 on unit CompleteOperation 2025-06-25T15:02:01.188554Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000005 : 152] from 9437186 at tablet 9437186 send result to client [1:103:2136], exec latency: 2 ms, propose latency: 4 ms 2025-06-25T15:02:01.188614Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:563: Send delayed Ack RS Ack at 9437186 {TEvReadSet step# 1000005 txid# 152 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 50} 2025-06-25T15:02:01.188647Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437186 2025-06-25T15:02:01.188841Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:463:2402], Recipient [1:239:2230]: {TEvReadSet step# 1000005 txid# 152 TabletSource# 9437184 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 50} 2025-06-25T15:02:01.188870Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:02:01.188903Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437186 consumer 9437186 txId 152 >> DataShardOutOfOrder::TestSnapshotReadPriority [GOOD] |92.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/database/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_order/unittest >> DataShardOutOfOrder::TestShardRestartDuringWaitingRead [GOOD] Test command err: 2025-06-25T15:01:53.270397Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T15:01:53.270532Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T15:01:53.270576Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001f1b/r3tmp/tmp6tV7sM/pdisk_1.dat 2025-06-25T15:01:53.571740Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T15:01:53.574978Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:01:53.606377Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:01:53.615070Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750863710590665 != 1750863710590669 2025-06-25T15:01:53.660949Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:01:53.661093Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:01:53.672685Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:01:53.756845Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:54.104330Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:54.220178Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:01:54.433347Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:882:2689], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:54.433477Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:872:2684], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:54.433808Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:54.439233Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:01:54.613010Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:886:2692], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-25T15:01:54.685840Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:942:2729] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:01:55.000078Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jyksr63yd03bd68nvv996vhk, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZGZjMmZkNjAtNDllNjQ4YjctM2JkNTZiNjYtOTZjY2IwMmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:01:55.082321Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jyksr6ph9cvhfkbqegern73t, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NzU1YTdmY2UtZmEwMTIyZjQtYWNkOWUyNzEtN2U5ZWMwZWY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:01:55.490863Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715663. Ctx: { TraceId: 01jyksr6s94k6842rb9vr363t5, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDMzMTBiMTEtZDc4OTAzNjUtYWJhMzdmMmQtMWNhNTI4MDc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root { items { uint32_value: 1 } items { uint32_value: 1 } }, { items { uint32_value: 2 } items { uint32_value: 1 } } ... waiting for commit read sets 2025-06-25T15:01:55.604185Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715664. Ctx: { TraceId: 01jyksr7613ssrzcgh6hq26hfh, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDMzMTBiMTEtZDc4OTAzNjUtYWJhMzdmMmQtMWNhNTI4MDc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root ... sending immediate upsert ... waiting for immediate propose 2025-06-25T15:01:55.689905Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715665. Ctx: { TraceId: 01jyksr797cv5x83zw4wt30y98, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDk1MGRjODEtZWExZTJkMjAtYmU2ZjM2ZWUtYTVjYzRmMWY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root ... immediate upsert is blocked 2025-06-25T15:01:55.701741Z node 1 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_OVERLOADED;details=Rejecting immediate write tx 281474976715666 because datashard 72075186224037889 is restarting;tx_id=281474976715666; 2025-06-25T15:01:55.712831Z node 1 :KQP_COMPUTE WARN: kqp_write_actor.cpp:734: SelfId: [1:1140:2782], Table: `/Root/table-1` ([72057594046644480:2:1]), SessionActorId: [1:1019:2782]Got OVERLOADED for table `/Root/table-1`. ShardID=72075186224037889, Sink=[1:1140:2782]. Ignored this error.{
: Error: Rejecting immediate write tx 281474976715666 because datashard 72075186224037889 is restarting, code: 2006 } 2025-06-25T15:01:55.713557Z node 1 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:3004: SelfId: [1:1133:2782], SessionActorId: [1:1019:2782], statusCode=OVERLOADED. Issue=
: Error: Kikimr cluster or one of its subsystems is overloaded. Tablet 72075186224037889 is overloaded. Table `/Root/table-1`., code: 2006
: Error: Rejecting immediate write tx 281474976715666 because datashard 72075186224037889 is restarting, code: 2006 . sessionActorId=[1:1019:2782]. isRollback=0 2025-06-25T15:01:55.714517Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:1895: SessionId: ydb://session/3?node_id=1&id=ZDk1MGRjODEtZWExZTJkMjAtYmU2ZjM2ZWUtYTVjYzRmMWY=, ActorId: [1:1019:2782], ActorState: ExecuteState, TraceId: 01jyksr797cv5x83zw4wt30y98, got TEvKqpBuffer::TEvError in ExecuteState, status: OVERLOADED send to: [1:1134:2782] from: [1:1133:2782] 2025-06-25T15:01:55.714743Z node 1 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [1:1134:2782] TxId: 281474976715665. Ctx: { TraceId: 01jyksr797cv5x83zw4wt30y98, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDk1MGRjODEtZWExZTJkMjAtYmU2ZjM2ZWUtYTVjYzRmMWY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. OVERLOADED: {
: Error: Kikimr cluster or one of its subsystems is overloaded. Tablet 72075186224037889 is overloaded. Table `/Root/table-1`., code: 2006 subissue: {
: Error: Rejecting immediate write tx 281474976715666 because datashard 72075186224037889 is restarting, code: 2006 } } 2025-06-25T15:01:55.718727Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=1&id=ZDk1MGRjODEtZWExZTJkMjAtYmU2ZjM2ZWUtYTVjYzRmMWY=, ActorId: [1:1019:2782], ActorState: ExecuteState, TraceId: 01jyksr797cv5x83zw4wt30y98, Create QueryResponse for error on request, msg: 2025-06-25T15:01:55.719853Z node 1 :KQP_COMPUTE WARN: kqp_write_actor.cpp:1103: SelfId: [1:1106:2784], Table: `/Root/table-1` ([72057594046644480:2:1]), SessionActorId: [1:1021:2784]TEvDeliveryProblem was received from tablet: 72075186224037889 2025-06-25T15:01:55.719961Z node 1 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:3004: SelfId: [1:1096:2784], SessionActorId: [1:1021:2784], statusCode=UNDETERMINED. Issue=
: Error: State of operation is unknown. Error writing to table `/Root/table-1`. Transaction state unknown for tablet 72075186224037889., code: 2026 . sessionActorId=[1:1021:2784]. isRollback=0 2025-06-25T15:01:55.720367Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:1895: SessionId: ydb://session/3?node_id=1&id=ZDMzMTBiMTEtZDc4OTAzNjUtYWJhMzdmMmQtMWNhNTI4MDc=, ActorId: [1:1021:2784], ActorState: ExecuteState, TraceId: 01jyksr7613ssrzcgh6hq26hfh, got TEvKqpBuffer::TEvError in ExecuteState, status: UNDETERMINED send to: [1:1097:2784] from: [1:1096:2784] 2025-06-25T15:01:55.720964Z node 1 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [1:1097:2784] TxId: 281474976715664. Ctx: { TraceId: 01jyksr7613ssrzcgh6hq26hfh, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDMzMTBiMTEtZDc4OTAzNjUtYWJhMzdmMmQtMWNhNTI4MDc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. UNDETERMINED: {
: Error: State of operation is unknown. Error writing to table `/Root/table-1`. Transaction state unknown for tablet 72075186224037889., code: 2026 } 2025-06-25T15:01:55.721224Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=1&id=ZDMzMTBiMTEtZDc4OTAzNjUtYWJhMzdmMmQtMWNhNTI4MDc=, ActorId: [1:1021:2784], ActorState: ExecuteState, TraceId: 01jyksr7613ssrzcgh6hq26hfh, Create QueryResponse for error on request, msg: 2025-06-25T15:01:56.070215Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715667. Ctx: { TraceId: 01jyksr7hs3hw091yrhq5sgsh9, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NDUxNjJkY2MtOTQzYzA4MTctOGY0ZTc2MzctNGMzMDM4Y2U=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root { items { uint32_value: 1 } items { uint32_value: 1 } } 2025-06-25T15:01:59.474810Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:265:2309], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T15:01:59.474989Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T15:01:59.475150Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001f1b/r3tmp/tmp7qNdkB/pdisk_1.dat 2025-06-25T15:01:59.722329Z node 2 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 2 Type# 268639257 2025-06-25T15:01:59.724020Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:01:59.747552Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:01:59.748466Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:33:2080] 1750863716585630 != 1750863716585634 2025-06-25T15:01:59.795379Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:01:59.795513Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:01:59.807235Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:01:59.888823Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:00.191820Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:00.306680Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:02:00.510661Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:784:2636], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:00.510769Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:795:2641], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:00.510864Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:00.516159Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:02:00.669180Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:798:2644], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-25T15:02:00.705005Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:854:2681] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:02:00.761098Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jyksrc1w73ekbmf32zgmaw09, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YWIwZmQ4OWYtNGExOWY1ODYtZDljZWYxYTItOWNlYjMzYWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:02:00.834806Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jyksrcab9k0c10zebfwr279t, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZThhYWNhZGItYTAyZTRiMzMtYmQ4ZjdlM2UtYjI0MTE1NWU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root ... waiting for readsets 2025-06-25T15:02:01.330375Z node 2 :KQP_COMPUTE WARN: kqp_write_actor.cpp:1103: SelfId: [2:940:2720], Table: `/Root/table-1` ([72057594046644480:2:1]), SessionActorId: [2:916:2720]TEvDeliveryProblem was received from tablet: 72075186224037888 2025-06-25T15:02:01.330510Z node 2 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:3004: SelfId: [2:930:2720], SessionActorId: [2:916:2720], statusCode=UNDETERMINED. Issue=
: Error: State of operation is unknown. Error writing to table `/Root/table-1`. Transaction state unknown for tablet 72075186224037888., code: 2026 . sessionActorId=[2:916:2720]. isRollback=0 2025-06-25T15:02:01.330716Z node 2 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976715664, task: 1, CA Id [2:966:2758]. Got EvDeliveryProblem, TabletId: 72075186224037888, NotDelivered: 0 2025-06-25T15:02:01.331135Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:1895: SessionId: ydb://session/3?node_id=2&id=ODZhZmYxMi1lYmI2NTc2YS0zOGIyZjExYy1mYjk4MTAzMg==, ActorId: [2:916:2720], ActorState: ExecuteState, TraceId: 01jyksrccma1x2cmwth7afemaw, got TEvKqpBuffer::TEvError in ExecuteState, status: UNDETERMINED send to: [2:931:2720] from: [2:930:2720] 2025-06-25T15:02:01.331763Z node 2 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [2:931:2720] TxId: 281474976715663. Ctx: { TraceId: 01jyksrccma1x2cmwth7afemaw, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ODZhZmYxMi1lYmI2NTc2YS0zOGIyZjExYy1mYjk4MTAzMg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. UNDETERMINED: {
: Error: State of operation is unknown. Error writing to table `/Root/table-1`. Transaction state unknown for tablet 72075186224037888., code: 2026 } 2025-06-25T15:02:01.332116Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=ODZhZmYxMi1lYmI2NTc2YS0zOGIyZjExYy1mYjk4MTAzMg==, ActorId: [2:916:2720], ActorState: ExecuteState, TraceId: 01jyksrccma1x2cmwth7afemaw, Create QueryResponse for error on request, msg: { items { uint32_value: 1 } items { uint32_value: 10 } }, { items { uint32_value: 3 } items { uint32_value: 30 } } >> DataShardOutOfOrder::UncommittedReadSetAck [GOOD] >> DataShardTxOrder::ZigZag_oo [GOOD] >> DataShardOutOfOrder::TestShardSnapshotReadNoEarlyReply [GOOD] >> DataShardOutOfOrder::TestSnapshotReadAfterBrokenLock+EvWrite |92.3%| [TA] $(B)/ydb/core/statistics/database/ut/test-results/unittest/{meta.json ... results_accumulator.log} |92.3%| [TA] {RESULT} $(B)/ydb/core/statistics/database/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> DataShardOutOfOrder::TestLateKqpScanAfterColumnDrop-UseSink [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_order/unittest >> DataShardTxOrder::ZigZag_oo [GOOD] Test command err: 2025-06-25T15:01:43.654368Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:01:43.654434Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:01:43.657542Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:112:2142], Recipient [1:135:2156]: NKikimr::TEvTablet::TEvBoot 2025-06-25T15:01:43.681767Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:112:2142], Recipient [1:135:2156]: NKikimr::TEvTablet::TEvRestored 2025-06-25T15:01:43.682248Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 9437184 actor [1:135:2156] 2025-06-25T15:01:43.682573Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T15:01:43.701487Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:112:2142], Recipient [1:135:2156]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-25T15:01:43.757514Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T15:01:43.757708Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T15:01:43.760903Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 9437184 2025-06-25T15:01:43.760987Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 9437184 2025-06-25T15:01:43.761039Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 9437184 2025-06-25T15:01:43.763160Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T15:01:43.763277Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T15:01:43.763351Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 9437184 persisting started state actor id [1:204:2156] in generation 2 2025-06-25T15:01:43.840900Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T15:01:43.873341Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 9437184 2025-06-25T15:01:43.876471Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 9437184 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T15:01:43.876688Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 9437184, actorId: [1:219:2215] 2025-06-25T15:01:43.876747Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 9437184 2025-06-25T15:01:43.876787Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 9437184, state: WaitScheme 2025-06-25T15:01:43.876821Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:01:43.877122Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:135:2156], Recipient [1:135:2156]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:43.878082Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:43.880502Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 9437184 2025-06-25T15:01:43.880603Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 9437184 2025-06-25T15:01:43.880652Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-25T15:01:43.880679Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:01:43.880711Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 9437184 2025-06-25T15:01:43.880737Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437184 has no attached operations 2025-06-25T15:01:43.880772Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 9437184 2025-06-25T15:01:43.880797Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 9437184 TxInFly 0 2025-06-25T15:01:43.880825Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-25T15:01:43.880893Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:215:2212], Recipient [1:135:2156]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T15:01:43.880920Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T15:01:43.880952Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:213:2211], serverId# [1:215:2212], sessionId# [0:0:0] 2025-06-25T15:01:43.886683Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:103:2136], Recipient [1:135:2156]: NKikimrTxDataShard.TEvProposeTransaction TxKind: TX_KIND_SCHEME SourceDeprecated { RawX1: 103 RawX2: 4294969432 } TxBody: "\nI\n\006table1\020\r\032\t\n\003key\030\002 \"\032\014\n\005value\030\200$ 8\032\n\n\004uint\030\002 9(\":\010Z\006\010\004\030\001(\000J\014/Root/table1" TxId: 1 ExecLevel: 0 Flags: 0 SchemeShardId: 4200 ProcessingParams { } 2025-06-25T15:01:43.886784Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-25T15:01:43.886857Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-25T15:01:43.887054Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit CheckSchemeTx 2025-06-25T15:01:43.887143Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 9437184 txId 1 ssId 4200 seqNo 0:0 2025-06-25T15:01:43.887198Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 1 at tablet 9437184 2025-06-25T15:01:43.887260Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is ExecutedNoMoreRestarts 2025-06-25T15:01:43.887295Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit CheckSchemeTx 2025-06-25T15:01:43.887327Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit StoreSchemeTx 2025-06-25T15:01:43.887358Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit StoreSchemeTx 2025-06-25T15:01:43.887738Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayCompleteNoMoreRestarts 2025-06-25T15:01:43.887780Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit StoreSchemeTx 2025-06-25T15:01:43.887834Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit FinishPropose 2025-06-25T15:01:43.887877Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit FinishPropose 2025-06-25T15:01:43.887941Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayComplete 2025-06-25T15:01:43.887977Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit FinishPropose 2025-06-25T15:01:43.888019Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit WaitForPlan 2025-06-25T15:01:43.888060Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit WaitForPlan 2025-06-25T15:01:43.888085Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:1] at 9437184 is not ready to execute on unit WaitForPlan 2025-06-25T15:01:43.902150Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 9437184 2025-06-25T15:01:43.902220Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit StoreSchemeTx 2025-06-25T15:01:43.902258Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit FinishPropose 2025-06-25T15:01:43.902319Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 1 at tablet 9437184 send to client, exec latency: 0 ms, propose latency: 1 ms, status: PREPARED 2025-06-25T15:01:43.902383Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 9437184 not sending time cast registration request in state WaitScheme 2025-06-25T15:01:43.902840Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:225:2221], Recipient [1:135:2156]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T15:01:43.902889Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T15:01:43.902944Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:224:2220], serverId# [1:225:2221], sessionId# [0:0:0] 2025-06-25T15:01:43.903096Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287424, Sender [1:103:2136], Recipient [1:135:2156]: {TEvPlanStep step# 1000001 MediatorId# 0 TabletID 9437184} 2025-06-25T15:01:43.903122Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3150: StateWork, processing event TEvTxProcessing::TEvPlanStep 2025-06-25T15:01:43.903240Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1790: Trying to execute [1000001:1] at 9437184 on unit WaitForPlan 2025-06-25T15:01:43.903280Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1805: Execution status for [1000001:1] at 9437184 is Executed 2025-06-25T15:01:43.903307Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000001:1] at 9437184 executing on unit WaitForPlan 2025-06-25T15:01:43.903341Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000001:1] at 9437184 to execution unit PlanQueue 2025-06-25T15:01:43.906642Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 1 at step 1000001 at tablet 9437184 { Transactions { TxId: 1 AckTo { RawX1: 103 RawX2: 4294969432 } } Step: 1000001 MediatorID: 0 TabletID: 9437184 } 2025-06-25T15:01:43.906704Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:01:43.906875Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:135:2156], Recipient [1:135:2156]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:43.906918Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:43.906966Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-25T15:01:43.907004Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 1 2025-06-25T15:01:43.907039Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437184 2025-06-25T15:01:43.907096Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000001:1] in PlanQueue unit at 9437184 2025-06-25T15:01:43.907131Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [100000 ... 000016:45] at 9437185 is Executed 2025-06-25T15:02:02.973820Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000016:45] at 9437185 executing on unit WaitForPlan 2025-06-25T15:02:02.973852Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000016:45] at 9437185 to execution unit PlanQueue 2025-06-25T15:02:02.973992Z node 6 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 45 at step 1000016 at tablet 9437185 { Transactions { TxId: 45 AckTo { RawX1: 103 RawX2: 25769805912 } } Step: 1000016 MediatorID: 0 TabletID: 9437185 } 2025-06-25T15:02:02.974023Z node 6 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437185 2025-06-25T15:02:02.974169Z node 6 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [6:350:2315], Recipient [6:350:2315]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T15:02:02.974203Z node 6 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T15:02:02.974242Z node 6 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437185 2025-06-25T15:02:02.974274Z node 6 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437185 active 0 active planned 0 immediate 0 planned 1 2025-06-25T15:02:02.974300Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437185 2025-06-25T15:02:02.974331Z node 6 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000016:45] in PlanQueue unit at 9437185 2025-06-25T15:02:02.974360Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000016:45] at 9437185 on unit PlanQueue 2025-06-25T15:02:02.974389Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000016:45] at 9437185 is Executed 2025-06-25T15:02:02.974416Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000016:45] at 9437185 executing on unit PlanQueue 2025-06-25T15:02:02.974440Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000016:45] at 9437185 to execution unit LoadTxDetails 2025-06-25T15:02:02.974467Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000016:45] at 9437185 on unit LoadTxDetails 2025-06-25T15:02:02.975086Z node 6 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 9437185 loaded tx from db 1000016:45 keys extracted: 2 2025-06-25T15:02:02.975128Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000016:45] at 9437185 is Executed 2025-06-25T15:02:02.975157Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000016:45] at 9437185 executing on unit LoadTxDetails 2025-06-25T15:02:02.975182Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000016:45] at 9437185 to execution unit FinalizeDataTxPlan 2025-06-25T15:02:02.975211Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000016:45] at 9437185 on unit FinalizeDataTxPlan 2025-06-25T15:02:02.975246Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000016:45] at 9437185 is Executed 2025-06-25T15:02:02.975269Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000016:45] at 9437185 executing on unit FinalizeDataTxPlan 2025-06-25T15:02:02.975292Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000016:45] at 9437185 to execution unit BuildAndWaitDependencies 2025-06-25T15:02:02.975316Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000016:45] at 9437185 on unit BuildAndWaitDependencies 2025-06-25T15:02:02.975359Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:455: Operation [1000016:45] is the new logically complete end at 9437185 2025-06-25T15:02:02.975391Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:461: Operation [1000016:45] is the new logically incomplete end at 9437185 2025-06-25T15:02:02.975417Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [1000016:45] at 9437185 2025-06-25T15:02:02.975453Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000016:45] at 9437185 is Executed 2025-06-25T15:02:02.975477Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000016:45] at 9437185 executing on unit BuildAndWaitDependencies 2025-06-25T15:02:02.975500Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000016:45] at 9437185 to execution unit BuildDataTxOutRS 2025-06-25T15:02:02.975521Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000016:45] at 9437185 on unit BuildDataTxOutRS 2025-06-25T15:02:02.975567Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000016:45] at 9437185 is Executed 2025-06-25T15:02:02.975592Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000016:45] at 9437185 executing on unit BuildDataTxOutRS 2025-06-25T15:02:02.975615Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000016:45] at 9437185 to execution unit StoreAndSendOutRS 2025-06-25T15:02:02.975640Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000016:45] at 9437185 on unit StoreAndSendOutRS 2025-06-25T15:02:02.975665Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000016:45] at 9437185 is Executed 2025-06-25T15:02:02.975687Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000016:45] at 9437185 executing on unit StoreAndSendOutRS 2025-06-25T15:02:02.975708Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000016:45] at 9437185 to execution unit PrepareDataTxInRS 2025-06-25T15:02:02.975731Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000016:45] at 9437185 on unit PrepareDataTxInRS 2025-06-25T15:02:02.975760Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000016:45] at 9437185 is Executed 2025-06-25T15:02:02.975782Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000016:45] at 9437185 executing on unit PrepareDataTxInRS 2025-06-25T15:02:02.975805Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000016:45] at 9437185 to execution unit LoadAndWaitInRS 2025-06-25T15:02:02.975830Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000016:45] at 9437185 on unit LoadAndWaitInRS 2025-06-25T15:02:02.975854Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000016:45] at 9437185 is Executed 2025-06-25T15:02:02.975877Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000016:45] at 9437185 executing on unit LoadAndWaitInRS 2025-06-25T15:02:02.975900Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000016:45] at 9437185 to execution unit ExecuteDataTx 2025-06-25T15:02:02.975921Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000016:45] at 9437185 on unit ExecuteDataTx 2025-06-25T15:02:02.976273Z node 6 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:306: Executed operation [1000016:45] at tablet 9437185 with status COMPLETE 2025-06-25T15:02:02.976339Z node 6 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:312: Datashard execution counters for [1000016:45] at 9437185: {NSelectRow: 2, NSelectRange: 0, NUpdateRow: 0, NEraseRow: 0, SelectRowRows: 2, SelectRowBytes: 16, SelectRangeRows: 0, SelectRangeBytes: 0, UpdateRowBytes: 0, EraseRowBytes: 0, SelectRangeDeletedRowSkips: 0, InvisibleRowSkips: 0} 2025-06-25T15:02:02.976387Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000016:45] at 9437185 is Executed 2025-06-25T15:02:02.976414Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000016:45] at 9437185 executing on unit ExecuteDataTx 2025-06-25T15:02:02.976438Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000016:45] at 9437185 to execution unit CompleteOperation 2025-06-25T15:02:02.976464Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000016:45] at 9437185 on unit CompleteOperation 2025-06-25T15:02:02.976640Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000016:45] at 9437185 is DelayComplete 2025-06-25T15:02:02.976670Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000016:45] at 9437185 executing on unit CompleteOperation 2025-06-25T15:02:02.976711Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000016:45] at 9437185 to execution unit CompletedOperations 2025-06-25T15:02:02.976739Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000016:45] at 9437185 on unit CompletedOperations 2025-06-25T15:02:02.976771Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000016:45] at 9437185 is Executed 2025-06-25T15:02:02.976795Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000016:45] at 9437185 executing on unit CompletedOperations 2025-06-25T15:02:02.976819Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [1000016:45] at 9437185 has finished 2025-06-25T15:02:02.976848Z node 6 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437185 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:02:02.976873Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437185 2025-06-25T15:02:02.976899Z node 6 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437185 has no attached operations 2025-06-25T15:02:02.976926Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 9437185 2025-06-25T15:02:02.990632Z node 6 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:91: Sending '{TEvPlanStepAck TabletId# 9437185 step# 1000016 txid# 45} 2025-06-25T15:02:02.990712Z node 6 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 9437185 step# 1000016} 2025-06-25T15:02:02.990803Z node 6 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437185 2025-06-25T15:02:02.990860Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000016:45] at 9437185 on unit CompleteOperation 2025-06-25T15:02:02.990938Z node 6 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000016 : 45] from 9437185 at tablet 9437185 send result to client [6:103:2136], exec latency: 0 ms, propose latency: 2 ms 2025-06-25T15:02:02.990996Z node 6 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437185 2025-06-25T15:02:02.991511Z node 6 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:91: Sending '{TEvPlanStepAck TabletId# 9437184 step# 1000016 txid# 45} 2025-06-25T15:02:02.991551Z node 6 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 9437184 step# 1000016} 2025-06-25T15:02:02.991594Z node 6 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-25T15:02:02.991625Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000016:45] at 9437184 on unit CompleteOperation 2025-06-25T15:02:02.991668Z node 6 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000016 : 45] from 9437184 at tablet 9437184 send result to client [6:103:2136], exec latency: 0 ms, propose latency: 2 ms 2025-06-25T15:02:02.991705Z node 6 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_order/unittest >> DataShardOutOfOrder::TestSnapshotReadPriority [GOOD] Test command err: 2025-06-25T15:01:57.469334Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T15:01:57.469490Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T15:01:57.469558Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001ed4/r3tmp/tmpGzPQNP/pdisk_1.dat 2025-06-25T15:01:57.791192Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T15:01:57.794490Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:01:57.806592Z node 1 :TX_COORDINATOR DEBUG: coordinator_impl.cpp:183: tablet# 72057594046316545 txid# 1 HANDLE EvProposeTransaction marker# C0 2025-06-25T15:01:57.806668Z node 1 :TX_COORDINATOR DEBUG: coordinator_impl.cpp:29: tablet# 72057594046316545 txid# 1 step# 500 Status# 16 SEND to# [1:373:2367] Proxy marker# C1 2025-06-25T15:01:57.836266Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:01:57.846204Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750863714907618 != 1750863714907622 2025-06-25T15:01:57.894436Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:01:57.894552Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:01:57.906055Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:01:57.980300Z node 1 :TX_COORDINATOR DEBUG: coordinator__plan_step.cpp:184: Transaction 1 has been planned 2025-06-25T15:01:57.980409Z node 1 :TX_COORDINATOR DEBUG: coordinator__plan_step.cpp:197: Planned transaction 1 for mediator 72057594046382081 tablet 72057594046644480 2025-06-25T15:01:57.980634Z node 1 :TX_COORDINATOR TRACE: coordinator_impl.cpp:268: Coordinator# 72057594046316545 scheduling step 1000 in 0.500000s at 0.950000s 2025-06-25T15:01:57.980952Z node 1 :TX_COORDINATOR DEBUG: coordinator_impl.cpp:579: Send from# 72057594046316545 to mediator# 72057594046382081, step# 500, txid# 1 marker# C2 2025-06-25T15:01:57.981020Z node 1 :TX_COORDINATOR DEBUG: coordinator_impl.cpp:424: tablet# 72057594046316545 txid# 1 stepId# 500 Status# 17 SEND EvProposeTransactionStatus to# [1:373:2367] Proxy 2025-06-25T15:01:57.983668Z node 1 :TX_COORDINATOR DEBUG: coordinator_impl.cpp:397: tablet# 72057594046316545 HANDLE EvMediatorQueueConfirmations MediatorId# 72057594046382081 2025-06-25T15:01:57.983793Z node 1 :TX_COORDINATOR DEBUG: coordinator__mediators_confirmations.cpp:84: at tablet# 72057594046316545 [2:6] persistent tx 1 for mediator 72057594046382081 tablet 72057594046644480 removed=1 2025-06-25T15:01:57.983835Z node 1 :TX_COORDINATOR DEBUG: coordinator__mediators_confirmations.cpp:91: at tablet# 72057594046316545 [2:6] persistent tx 1 for mediator 72057594046382081 acknowledged 2025-06-25T15:01:57.983884Z node 1 :TX_COORDINATOR DEBUG: coordinator__mediators_confirmations.cpp:99: at tablet# 72057594046316545 [2:6] persistent tx 1 acknowledged 2025-06-25T15:01:57.988259Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:58.025521Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:617:2525], Recipient [1:626:2531]: NKikimr::TEvTablet::TEvBoot 2025-06-25T15:01:58.026561Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:617:2525], Recipient [1:626:2531]: NKikimr::TEvTablet::TEvRestored 2025-06-25T15:01:58.026988Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:626:2531] 2025-06-25T15:01:58.027232Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T15:01:58.070762Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:617:2525], Recipient [1:626:2531]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-25T15:01:58.071459Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T15:01:58.071553Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T15:01:58.073178Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-25T15:01:58.073251Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-25T15:01:58.073315Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-25T15:01:58.073660Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T15:01:58.073787Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T15:01:58.073866Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:642:2531] in generation 1 2025-06-25T15:01:58.074250Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T15:01:58.102120Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-25T15:01:58.102318Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T15:01:58.102423Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:644:2541] 2025-06-25T15:01:58.102469Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T15:01:58.102510Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-25T15:01:58.102543Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T15:01:58.102738Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:626:2531], Recipient [1:626:2531]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:58.102791Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:58.103173Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-25T15:01:58.103268Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-25T15:01:58.103329Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T15:01:58.103365Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:01:58.103418Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-25T15:01:58.103466Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-25T15:01:58.103502Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-25T15:01:58.103534Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-25T15:01:58.103575Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T15:01:58.103956Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:633:2535], Recipient [1:626:2531]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T15:01:58.103999Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T15:01:58.104064Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:622:2528], serverId# [1:633:2535], sessionId# [0:0:0] 2025-06-25T15:01:58.104141Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:373:2367], Recipient [1:633:2535] 2025-06-25T15:01:58.104181Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-25T15:01:58.104275Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T15:01:58.104513Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-25T15:01:58.104586Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-25T15:01:58.104665Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-25T15:01:58.104717Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-25T15:01:58.104753Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-06-25T15:01:58.104785Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-06-25T15:01:58.104815Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-25T15:01:58.105091Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-06-25T15:01:58.105134Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit StoreSchemeTx 2025-06-25T15:01:58.105166Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit FinishPropose 2025-06-25T15:01:58.105198Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-25T15:01:58.105265Z node 1 :TX_DATASHARD TRACE: datashard_ ... MaxBytes: 5242880 Reverse: false LockNodeId: 1 TotalRowsLimit: 1001 LockMode: OPTIMISTIC RangesSize: 1 2025-06-25T15:02:02.604519Z node 1 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2452: TTxReadViaPipeline execute: at tablet# 72075186224037888, FollowerId 0 2025-06-25T15:02:02.604574Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:2] at 72075186224037888 on unit CheckRead 2025-06-25T15:02:02.604640Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 72075186224037888 is Executed 2025-06-25T15:02:02.604672Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:2] at 72075186224037888 executing on unit CheckRead 2025-06-25T15:02:02.604702Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:2] at 72075186224037888 to execution unit BuildAndWaitDependencies 2025-06-25T15:02:02.604730Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:2] at 72075186224037888 on unit BuildAndWaitDependencies 2025-06-25T15:02:02.604758Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:2] at 72075186224037888 2025-06-25T15:02:02.604786Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 72075186224037888 is Executed 2025-06-25T15:02:02.604804Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:2] at 72075186224037888 executing on unit BuildAndWaitDependencies 2025-06-25T15:02:02.604818Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:2] at 72075186224037888 to execution unit ExecuteRead 2025-06-25T15:02:02.604833Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:2] at 72075186224037888 on unit ExecuteRead 2025-06-25T15:02:02.604913Z node 1 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1578: 72075186224037888 Execute read# 1, request: { ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 Snapshot { Step: 3500 TxId: 18446744073709551615 } LockTxId: 281474976715682 ResultFormat: FORMAT_CELLVEC MaxRows: 1001 MaxBytes: 5242880 Reverse: false LockNodeId: 1 TotalRowsLimit: 1001 LockMode: OPTIMISTIC } 2025-06-25T15:02:02.605102Z node 1 :TX_DATASHARD DEBUG: datashard__read_iterator.cpp:2427: 72075186224037888 Acquired lock# 281474976715682, counter# 18446744073709551612 for [OwnerId: 72057594046644480, LocalPathId: 2] 2025-06-25T15:02:02.605203Z node 1 :TX_DATASHARD TRACE: datashard.cpp:2476: PromoteImmediatePostExecuteEdges at 72075186224037888 promoting UnprotectedReadEdge to v3500/18446744073709551615 2025-06-25T15:02:02.605237Z node 1 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2163: 72075186224037888 Complete read# {[1:1378:3030], 0} after executionsCount# 1 2025-06-25T15:02:02.605271Z node 1 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2137: 72075186224037888 read iterator# {[1:1378:3030], 0} sends rowCount# 5, bytes# 160, quota rows left# 996, quota bytes left# 5242720, hasUnreadQueries# 0, total queries# 1, firstUnprocessed# 0 2025-06-25T15:02:02.605336Z node 1 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2188: 72075186224037888 read iterator# {[1:1378:3030], 0} finished in read 2025-06-25T15:02:02.605380Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 72075186224037888 is Executed 2025-06-25T15:02:02.605399Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:2] at 72075186224037888 executing on unit ExecuteRead 2025-06-25T15:02:02.605416Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:2] at 72075186224037888 to execution unit CompletedOperations 2025-06-25T15:02:02.605433Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:2] at 72075186224037888 on unit CompletedOperations 2025-06-25T15:02:02.605459Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 72075186224037888 is Executed 2025-06-25T15:02:02.605474Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:2] at 72075186224037888 executing on unit CompletedOperations 2025-06-25T15:02:02.605496Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:2] at 72075186224037888 has finished 2025-06-25T15:02:02.605525Z node 1 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2687: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037888 2025-06-25T15:02:02.605592Z node 1 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2736: TTxReadViaPipeline(69) Complete: at tablet# 72075186224037888 2025-06-25T15:02:02.606288Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553219, Sender [1:1378:3030], Recipient [1:1319:2995]: NKikimrTxDataShard.TEvReadCancel ReadId: 0 2025-06-25T15:02:02.606334Z node 1 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3409: 72075186224037888 ReadCancel: { ReadId: 0 } { items { uint32_value: 1 } items { uint32_value: 1 } }, { items { uint32_value: 3 } items { uint32_value: 3 } }, { items { uint32_value: 5 } items { uint32_value: 5 } }, { items { uint32_value: 7 } items { uint32_value: 7 } }, { items { uint32_value: 9 } items { uint32_value: 9 } } 2025-06-25T15:02:02.718622Z node 1 :TX_COORDINATOR DEBUG: coordinator__acquire_read_step.cpp:97: tablet# 72057594046316545 HANDLE TEvAcquireReadStep 2025-06-25T15:02:02.718725Z node 1 :TX_COORDINATOR TRACE: coordinator_impl.cpp:293: Coordinator# 72057594046316545 scheduling step 4500 in 0.499900s at 4.450000s 2025-06-25T15:02:02.719988Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715685. Ctx: { TraceId: 01jyksre3j639f548zf2ytkcp2, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NzQwOThhODUtMmFkOGI4MDEtYTM0ZjZhNmMtOGI5ODI4YjA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:02:02.721808Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553215, Sender [1:1402:3047], Recipient [1:1319:2995]: NKikimrTxDataShard.TEvRead ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 Snapshot { Step: 4000 TxId: 18446744073709551615 } LockTxId: 281474976715685 ResultFormat: FORMAT_CELLVEC MaxRows: 1001 MaxBytes: 5242880 Reverse: false LockNodeId: 1 TotalRowsLimit: 1001 LockMode: OPTIMISTIC RangesSize: 1 2025-06-25T15:02:02.722019Z node 1 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2452: TTxReadViaPipeline execute: at tablet# 72075186224037888, FollowerId 0 2025-06-25T15:02:02.722094Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:3] at 72075186224037888 on unit CheckRead 2025-06-25T15:02:02.722184Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:3] at 72075186224037888 is Executed 2025-06-25T15:02:02.722226Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:3] at 72075186224037888 executing on unit CheckRead 2025-06-25T15:02:02.722265Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:3] at 72075186224037888 to execution unit BuildAndWaitDependencies 2025-06-25T15:02:02.722300Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:3] at 72075186224037888 on unit BuildAndWaitDependencies 2025-06-25T15:02:02.722341Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:3] at 72075186224037888 2025-06-25T15:02:02.722387Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:3] at 72075186224037888 is Executed 2025-06-25T15:02:02.722415Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:3] at 72075186224037888 executing on unit BuildAndWaitDependencies 2025-06-25T15:02:02.722442Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:3] at 72075186224037888 to execution unit ExecuteRead 2025-06-25T15:02:02.722463Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:3] at 72075186224037888 on unit ExecuteRead 2025-06-25T15:02:02.722575Z node 1 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1578: 72075186224037888 Execute read# 1, request: { ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 Snapshot { Step: 4000 TxId: 18446744073709551615 } LockTxId: 281474976715685 ResultFormat: FORMAT_CELLVEC MaxRows: 1001 MaxBytes: 5242880 Reverse: false LockNodeId: 1 TotalRowsLimit: 1001 LockMode: OPTIMISTIC } 2025-06-25T15:02:02.722885Z node 1 :TX_DATASHARD DEBUG: datashard__read_iterator.cpp:2427: 72075186224037888 Acquired lock# 281474976715685, counter# 0 for [OwnerId: 72057594046644480, LocalPathId: 2] 2025-06-25T15:02:02.722944Z node 1 :TX_DATASHARD TRACE: datashard.cpp:2476: PromoteImmediatePostExecuteEdges at 72075186224037888 promoting UnprotectedReadEdge to v4000/18446744073709551615 2025-06-25T15:02:02.722992Z node 1 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2163: 72075186224037888 Complete read# {[1:1402:3047], 0} after executionsCount# 1 2025-06-25T15:02:02.723046Z node 1 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2137: 72075186224037888 read iterator# {[1:1402:3047], 0} sends rowCount# 6, bytes# 192, quota rows left# 995, quota bytes left# 5242688, hasUnreadQueries# 0, total queries# 1, firstUnprocessed# 0 2025-06-25T15:02:02.723129Z node 1 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2188: 72075186224037888 read iterator# {[1:1402:3047], 0} finished in read 2025-06-25T15:02:02.723204Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:3] at 72075186224037888 is Executed 2025-06-25T15:02:02.723232Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:3] at 72075186224037888 executing on unit ExecuteRead 2025-06-25T15:02:02.723257Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:3] at 72075186224037888 to execution unit CompletedOperations 2025-06-25T15:02:02.723282Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:3] at 72075186224037888 on unit CompletedOperations 2025-06-25T15:02:02.723319Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:3] at 72075186224037888 is Executed 2025-06-25T15:02:02.723341Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:3] at 72075186224037888 executing on unit CompletedOperations 2025-06-25T15:02:02.723371Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:3] at 72075186224037888 has finished 2025-06-25T15:02:02.723415Z node 1 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2687: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037888 2025-06-25T15:02:02.723514Z node 1 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2736: TTxReadViaPipeline(69) Complete: at tablet# 72075186224037888 2025-06-25T15:02:02.723720Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 275709965, Sender [1:63:2110], Recipient [1:1319:2995]: NKikimrLongTxService.TEvLockStatus LockId: 281474976715685 LockNode: 1 Status: STATUS_SUBSCRIBED 2025-06-25T15:02:02.724538Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553219, Sender [1:1402:3047], Recipient [1:1319:2995]: NKikimrTxDataShard.TEvReadCancel ReadId: 0 2025-06-25T15:02:02.724596Z node 1 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3409: 72075186224037888 ReadCancel: { ReadId: 0 } { items { uint32_value: 1 } items { uint32_value: 1 } }, { items { uint32_value: 3 } items { uint32_value: 3 } }, { items { uint32_value: 5 } items { uint32_value: 5 } }, { items { uint32_value: 7 } items { uint32_value: 7 } }, { items { uint32_value: 9 } items { uint32_value: 9 } }, { items { uint32_value: 11 } items { uint32_value: 11 } } ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_order/unittest >> DataShardOutOfOrder::UncommittedReadSetAck [GOOD] Test command err: 2025-06-25T15:01:56.952742Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:628:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T15:01:56.953155Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T15:01:56.953390Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:625:2319], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T15:01:56.953528Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T15:01:56.953850Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T15:01:56.953888Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001f0c/r3tmp/tmpjbQ4gw/pdisk_1.dat 2025-06-25T15:01:57.268513Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:01:57.408325Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:01:57.535892Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:01:57.535987Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:01:57.541358Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:01:57.541478Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:01:57.556275Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T15:01:57.556928Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:01:57.557296Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:01:57.850501Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:57.928437Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [2:1176:2341], Recipient [2:1202:2353]: NKikimr::TEvTablet::TEvBoot 2025-06-25T15:01:57.932446Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [2:1176:2341], Recipient [2:1202:2353]: NKikimr::TEvTablet::TEvRestored 2025-06-25T15:01:57.932949Z node 2 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [2:1202:2353] 2025-06-25T15:01:57.933177Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T15:01:57.983003Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [2:1176:2341], Recipient [2:1202:2353]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-25T15:01:57.987904Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T15:01:57.988022Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T15:01:57.989759Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-25T15:01:57.989846Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-25T15:01:57.989920Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-25T15:01:57.990252Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T15:01:57.991019Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T15:01:57.991130Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [2:1226:2353] in generation 1 2025-06-25T15:01:57.994207Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T15:01:58.028677Z node 2 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-25T15:01:58.028906Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T15:01:58.029014Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [2:1230:2370] 2025-06-25T15:01:58.029051Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T15:01:58.029086Z node 2 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-25T15:01:58.029122Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T15:01:58.029375Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [2:1202:2353], Recipient [2:1202:2353]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:58.029448Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:58.029812Z node 2 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-25T15:01:58.029912Z node 2 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-25T15:01:58.030041Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T15:01:58.030083Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:01:58.030156Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-25T15:01:58.030200Z node 2 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-25T15:01:58.030234Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-25T15:01:58.030264Z node 2 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-25T15:01:58.030314Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T15:01:58.100626Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [2:1234:2371], Recipient [2:1202:2353]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T15:01:58.100686Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T15:01:58.100757Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:1186:2731], serverId# [2:1234:2371], sessionId# [0:0:0] 2025-06-25T15:01:58.101079Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:765:2430], Recipient [2:1234:2371] 2025-06-25T15:01:58.101126Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-25T15:01:58.101240Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T15:01:58.101505Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-25T15:01:58.101580Z node 2 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-25T15:01:58.101685Z node 2 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-25T15:01:58.101739Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-25T15:01:58.101782Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-06-25T15:01:58.101819Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-06-25T15:01:58.101879Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-25T15:01:58.102166Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-06-25T15:01:58.102221Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit StoreSchemeTx 2025-06-25T15:01:58.102261Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit FinishPropose 2025-06-25T15:01:58.102298Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-25T15:01:58.102349Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayComplete 2025-06-25T15:01:58.102381Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit FinishPropose 2025-06-25T15:01:58.102417Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit WaitForPlan 2025-06-25T15:01:58.102450Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit WaitForPlan 2025-06-25T15:01:58.102474Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:281474976715657] at 72075186224037888 is not ready to execute on unit WaitForPlan 2025-06-25T15:01:58.106523Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, ... 224037890 SetTabletProducer# 72075186224037888 ReadSet.Size()# 2 Seqno# 1 Flags# 0} 2025-06-25T15:02:02.106258Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3151: StateWork, processing event TEvTxProcessing::TEvReadSet 2025-06-25T15:02:02.106294Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3359: Receive RS at 72075186224037890 source 72075186224037888 dest 72075186224037890 producer 72075186224037888 txId 281474976715667 2025-06-25T15:02:02.106351Z node 2 :TX_DATASHARD DEBUG: datashard__readset.cpp:15: TTxReadSet::Execute at 72075186224037890 got read set: {TEvReadSet step# 2545 txid# 281474976715667 TabletSource# 72075186224037888 TabletDest# 72075186224037890 SetTabletProducer# 72075186224037888 ReadSet.Size()# 2 Seqno# 1 Flags# 0} 2025-06-25T15:02:02.106720Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037890 2025-06-25T15:02:02.107144Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287425, Sender [2:1979:2468], Recipient [2:2105:2498]: {TEvReadSet step# 2696 txid# 281474976715669 TabletSource# 72075186224037888 TabletDest# 72075186224037890 SetTabletProducer# 72075186224037888 ReadSet.Size()# 0 Seqno# 0 Flags# 7} 2025-06-25T15:02:02.107180Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3151: StateWork, processing event TEvTxProcessing::TEvReadSet 2025-06-25T15:02:02.107210Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3359: Receive RS at 72075186224037890 source 72075186224037888 dest 72075186224037890 producer 72075186224037888 txId 281474976715669 2025-06-25T15:02:02.107259Z node 2 :TX_DATASHARD DEBUG: datashard__readset.cpp:15: TTxReadSet::Execute at 72075186224037890 got read set: {TEvReadSet step# 2696 txid# 281474976715669 TabletSource# 72075186224037888 TabletDest# 72075186224037890 SetTabletProducer# 72075186224037888 ReadSet.Size()# 0 Seqno# 0 Flags# 7} 2025-06-25T15:02:02.107378Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287425, Sender [2:1979:2468], Recipient [2:2105:2498]: {TEvReadSet step# 2696 txid# 281474976715669 TabletSource# 72075186224037888 TabletDest# 72075186224037890 SetTabletProducer# 72075186224037888 ReadSet.Size()# 2 Seqno# 3 Flags# 0} 2025-06-25T15:02:02.107403Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3151: StateWork, processing event TEvTxProcessing::TEvReadSet 2025-06-25T15:02:02.107427Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3359: Receive RS at 72075186224037890 source 72075186224037888 dest 72075186224037890 producer 72075186224037888 txId 281474976715669 2025-06-25T15:02:02.107464Z node 2 :TX_DATASHARD DEBUG: datashard__readset.cpp:15: TTxReadSet::Execute at 72075186224037890 got read set: {TEvReadSet step# 2696 txid# 281474976715669 TabletSource# 72075186224037888 TabletDest# 72075186224037890 SetTabletProducer# 72075186224037888 ReadSet.Size()# 2 Seqno# 3 Flags# 0} 2025-06-25T15:02:02.107780Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287425, Sender [1:2039:3230], Recipient [2:2233:2540] 2025-06-25T15:02:02.107816Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3151: StateWork, processing event TEvTxProcessing::TEvReadSet 2025-06-25T15:02:02.107851Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3359: Receive RS at 72075186224037890 source 72075186224037889 dest 72075186224037890 producer 72075186224037889 txId 281474976715668 2025-06-25T15:02:02.107893Z node 2 :TX_DATASHARD DEBUG: datashard__readset.cpp:15: TTxReadSet::Execute at 72075186224037890 got read set: {TEvReadSet step# 2546 txid# 281474976715668 TabletSource# 72075186224037889 TabletDest# 72075186224037890 SetTabletProducer# 72075186224037889 ReadSet.Size()# 2 Seqno# 1 Flags# 0} 2025-06-25T15:02:02.108547Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287425, Sender [1:2039:3230], Recipient [2:2233:2540] 2025-06-25T15:02:02.108580Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3151: StateWork, processing event TEvTxProcessing::TEvReadSet 2025-06-25T15:02:02.108612Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3359: Receive RS at 72075186224037890 source 72075186224037889 dest 72075186224037890 producer 72075186224037889 txId 281474976715669 2025-06-25T15:02:02.108807Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037890 2025-06-25T15:02:02.109083Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287425, Sender [1:2039:3230], Recipient [2:2233:2540] 2025-06-25T15:02:02.109116Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3151: StateWork, processing event TEvTxProcessing::TEvReadSet 2025-06-25T15:02:02.109151Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3359: Receive RS at 72075186224037890 source 72075186224037889 dest 72075186224037890 producer 72075186224037889 txId 281474976715669 2025-06-25T15:02:02.109276Z node 2 :TX_DATASHARD DEBUG: datashard__readset.cpp:15: TTxReadSet::Execute at 72075186224037890 got read set: {TEvReadSet step# 2696 txid# 281474976715669 TabletSource# 72075186224037889 TabletDest# 72075186224037890 SetTabletProducer# 72075186224037889 ReadSet.Size()# 0 Seqno# 0 Flags# 7} 2025-06-25T15:02:02.111989Z node 2 :TX_DATASHARD DEBUG: datashard__readset.cpp:15: TTxReadSet::Execute at 72075186224037890 got read set: {TEvReadSet step# 2696 txid# 281474976715669 TabletSource# 72075186224037889 TabletDest# 72075186224037890 SetTabletProducer# 72075186224037889 ReadSet.Size()# 2 Seqno# 3 Flags# 0} 2025-06-25T15:02:02.112211Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:755: Complete volatile write [2696 : 281474976715669] from 72075186224037890 at tablet 72075186224037890 send result to client [1:2189:3293] 2025-06-25T15:02:02.112873Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037890 2025-06-25T15:02:02.116974Z node 2 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 72075186224037890 2025-06-25T15:02:02.117203Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [2:2105:2498], Recipient [2:1979:2468]: {TEvReadSet step# 2545 txid# 281474976715667 TabletSource# 72075186224037888 TabletDest# 72075186224037890 SetTabletConsumer# 72075186224037890 Flags# 0 Seqno# 1} 2025-06-25T15:02:02.117254Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:02:02.117295Z node 2 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 72075186224037888 source 72075186224037888 dest 72075186224037890 consumer 72075186224037890 txId 281474976715667 2025-06-25T15:02:02.120739Z node 2 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 72075186224037890 2025-06-25T15:02:02.120827Z node 2 :TX_DATASHARD DEBUG: datashard__readset.cpp:99: Send RS Reply at 72075186224037890 {TEvReadSet step# 2696 txid# 281474976715669 TabletSource# 72075186224037888 TabletDest# 72075186224037890 SetTabletProducer# 72075186224037888 ReadSet.Size()# 0 Seqno# 0 Flags# 7} 2025-06-25T15:02:02.120884Z node 2 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 72075186224037890 2025-06-25T15:02:02.121040Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287425, Sender [2:2105:2498], Recipient [2:1979:2468]: {TEvReadSet step# 2696 txid# 281474976715669 TabletSource# 72075186224037890 TabletDest# 72075186224037888 SetTabletProducer# 72075186224037890 ReadSet.Size()# 0 Seqno# 0 Flags# 3} 2025-06-25T15:02:02.121087Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3151: StateWork, processing event TEvTxProcessing::TEvReadSet 2025-06-25T15:02:02.121128Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3359: Receive RS at 72075186224037888 source 72075186224037890 dest 72075186224037888 producer 72075186224037890 txId 281474976715669 2025-06-25T15:02:02.121212Z node 2 :TX_DATASHARD DEBUG: datashard__readset.cpp:15: TTxReadSet::Execute at 72075186224037888 got read set: {TEvReadSet step# 2696 txid# 281474976715669 TabletSource# 72075186224037890 TabletDest# 72075186224037888 SetTabletProducer# 72075186224037890 ReadSet.Size()# 0 Seqno# 0 Flags# 3} 2025-06-25T15:02:02.121259Z node 2 :TX_DATASHARD NOTICE: datashard_pipeline.cpp:734: Outdated readset for 2696:281474976715669 at 72075186224037888 2025-06-25T15:02:02.121326Z node 2 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 72075186224037888 2025-06-25T15:02:02.121458Z node 2 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 72075186224037890 2025-06-25T15:02:02.121743Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [2:2105:2498], Recipient [1:2039:3230] 2025-06-25T15:02:02.121776Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:02:02.121811Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 72075186224037889 source 72075186224037889 dest 72075186224037890 consumer 72075186224037890 txId 281474976715668 2025-06-25T15:02:02.122465Z node 2 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 72075186224037890 2025-06-25T15:02:02.122524Z node 2 :TX_DATASHARD DEBUG: datashard__readset.cpp:99: Send RS Reply at 72075186224037890 {TEvReadSet step# 2696 txid# 281474976715669 TabletSource# 72075186224037889 TabletDest# 72075186224037890 SetTabletProducer# 72075186224037889 ReadSet.Size()# 0 Seqno# 0 Flags# 7} 2025-06-25T15:02:02.122681Z node 2 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 72075186224037890 2025-06-25T15:02:02.122859Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [2:2105:2498], Recipient [2:1979:2468]: {TEvReadSet step# 2696 txid# 281474976715669 TabletSource# 72075186224037888 TabletDest# 72075186224037890 SetTabletConsumer# 72075186224037890 Flags# 0 Seqno# 3} 2025-06-25T15:02:02.122900Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:02:02.122934Z node 2 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 72075186224037888 source 72075186224037888 dest 72075186224037890 consumer 72075186224037890 txId 281474976715669 2025-06-25T15:02:02.123211Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287425, Sender [2:2105:2498], Recipient [1:2039:3230] 2025-06-25T15:02:02.123254Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3151: StateWork, processing event TEvTxProcessing::TEvReadSet 2025-06-25T15:02:02.123299Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3359: Receive RS at 72075186224037889 source 72075186224037890 dest 72075186224037889 producer 72075186224037890 txId 281474976715669 2025-06-25T15:02:02.123373Z node 1 :TX_DATASHARD DEBUG: datashard__readset.cpp:15: TTxReadSet::Execute at 72075186224037889 got read set: {TEvReadSet step# 2696 txid# 281474976715669 TabletSource# 72075186224037890 TabletDest# 72075186224037889 SetTabletProducer# 72075186224037890 ReadSet.Size()# 0 Seqno# 0 Flags# 3} 2025-06-25T15:02:02.123418Z node 1 :TX_DATASHARD NOTICE: datashard_pipeline.cpp:734: Outdated readset for 2696:281474976715669 at 72075186224037889 2025-06-25T15:02:02.123483Z node 1 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 72075186224037889 2025-06-25T15:02:02.123671Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [2:2105:2498], Recipient [1:2039:3230] 2025-06-25T15:02:02.123706Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:02:02.123742Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 72075186224037889 source 72075186224037889 dest 72075186224037890 consumer 72075186224037890 txId 281474976715669 >> DataShardTxOrder::RandomPoints_ReproducerDelayData1 [GOOD] >> RetryPolicy::TWriteSession_TestBrokenPolicy [GOOD] >> RetryPolicy::TWriteSession_RetryOnTargetCluster |92.3%| [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data_integrity/unittest |92.3%| [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data_integrity/unittest |92.3%| [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data_integrity/unittest |92.4%| [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data_integrity/unittest >> DataShardReadIteratorConsistency::LocalSnapshotReadWithPlanQueueRace [GOOD] >> DataShardReadIteratorConsistency::LocalSnapshotReadHasRequiredDependencies >> DataShardReadIteratorSysTables::ShouldRead [GOOD] >> DataShardReadIteratorSysTables::ShouldNotReadUserTableUsingLocalTid >> DataShardReadIterator::ShouldReverseReadMultipleKeys [GOOD] >> DataShardReadIterator::ShouldReverseReadMultipleKeysOneByOne >> DataShardReadIterator::ShouldReadRangeCellVec [GOOD] >> DataShardReadIterator::ShouldReadRangeArrow >> BackupRestore::PrefixedVectorIndex [GOOD] >> BackupRestore::RestoreReplicationThatDoesNotUseSecret >> DataShardReadIterator::ShouldHandleReadAck [GOOD] >> DataShardReadIterator::ShouldHandleOutOfOrderReadAck >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadKey+EvWrite [GOOD] >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadKey-EvWrite >> DataShardReadIterator::ShouldReadRangeInclusiveEndsCellVec [GOOD] >> DataShardReadIterator::ShouldReadRangeInclusiveEndsArrow >> DataShardReadIterator::ShouldRangeReadReverseLeftInclusive [GOOD] >> DataShardReadIterator::ShouldRangeReadReverseLeftNonInclusive ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_order/unittest >> DataShardOutOfOrder::TestLateKqpScanAfterColumnDrop-UseSink [GOOD] Test command err: 2025-06-25T15:01:54.871558Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T15:01:54.871699Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T15:01:54.871759Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001eff/r3tmp/tmpsOUyhb/pdisk_1.dat 2025-06-25T15:01:55.184247Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T15:01:55.186496Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:01:55.224009Z node 1 :KQP_RESOURCE_MANAGER INFO: kqp_rm_service.cpp:782: Updated table service config: ComputeActorsCount: 10000 ChannelBufferSize: 8388608 MkqlLightProgramMemoryLimit: 1048576 MkqlHeavyProgramMemoryLimit: 31457280 QueryMemoryLimit: 32212254720 PublishStatisticsIntervalSec: 2 MaxTotalChannelBuffersSize: 2147483648 MinChannelBufferSize: 2048 2025-06-25T15:01:55.224102Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:531: Updated table service config. 2025-06-25T15:01:55.224158Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1692: Updated YQL logs priority to current level: 7 2025-06-25T15:01:55.224755Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_resource_info_exchanger.cpp:382: Updated table service config. 2025-06-25T15:01:55.224916Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:01:55.228841Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750863712199235 != 1750863712199239 2025-06-25T15:01:55.274058Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:61:2108] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-25T15:01:55.275105Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 2025-06-25T15:01:55.275668Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:01:55.275792Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:01:55.287111Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:01:55.365063Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:61:2108] Handle TEvProposeTransaction 2025-06-25T15:01:55.365115Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:61:2108] TxId# 281474976715657 ProcessProposeTransaction 2025-06-25T15:01:55.365273Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:61:2108] Cookie# 0 userReqId# "" txid# 281474976715657 SEND to# [1:603:2511] 2025-06-25T15:01:55.486218Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:603:2511] txid# 281474976715657 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateTable CreateTable { Name: "table-1" Columns { Name: "key" Type: "Uint32" FamilyName: "" NotNull: false } Columns { Name: "value1" Type: "Uint32" FamilyName: "" NotNull: false } Columns { Name: "value2" Type: "Uint32" FamilyName: "" NotNull: false } KeyColumnNames: "key" UniformPartitionsCount: 1 } } } ExecTimeoutPeriod: 18446744073709551615 2025-06-25T15:01:55.486331Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:603:2511] txid# 281474976715657 Bootstrap, UserSID: CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-25T15:01:55.487008Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1660: Actor# [1:603:2511] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-06-25T15:01:55.487103Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:603:2511] txid# 281474976715657 TEvNavigateKeySet requested from SchemeCache 2025-06-25T15:01:55.487404Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:603:2511] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-25T15:01:55.487611Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:603:2511] HANDLE EvNavigateKeySetResult, txid# 281474976715657 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-25T15:01:55.487753Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:603:2511] txid# 281474976715657 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715657 TabletId# 72057594046644480} 2025-06-25T15:01:55.489619Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:55.490111Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:603:2511] txid# 281474976715657 HANDLE EvClientConnected 2025-06-25T15:01:55.490812Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [1:603:2511] txid# 281474976715657 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715657} 2025-06-25T15:01:55.490877Z node 1 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [1:603:2511] txid# 281474976715657 SEND to# [1:554:2480] Source {TEvProposeTransactionStatus txid# 281474976715657 Status# 53} 2025-06-25T15:01:55.522544Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:619:2526], Recipient [1:628:2532]: NKikimr::TEvTablet::TEvBoot 2025-06-25T15:01:55.523775Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:619:2526], Recipient [1:628:2532]: NKikimr::TEvTablet::TEvRestored 2025-06-25T15:01:55.524251Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:628:2532] 2025-06-25T15:01:55.524520Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T15:01:55.570648Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:619:2526], Recipient [1:628:2532]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-25T15:01:55.571494Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T15:01:55.571644Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T15:01:55.573384Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-25T15:01:55.573473Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-25T15:01:55.573537Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-25T15:01:55.573911Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T15:01:55.574054Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T15:01:55.574151Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:644:2532] in generation 1 2025-06-25T15:01:55.584970Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T15:01:55.640374Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-25T15:01:55.640583Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T15:01:55.640687Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:646:2542] 2025-06-25T15:01:55.640720Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T15:01:55.640752Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-25T15:01:55.640784Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T15:01:55.641019Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:628:2532], Recipient [1:628:2532]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:55.641075Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:55.641391Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-25T15:01:55.641473Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-25T15:01:55.641570Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T15:01:55.641619Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:01:55.641680Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-25T15:01:55.641713Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-25T15:01:55.641743Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-25T15:01:55.641774Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-25T15:01:55.641812Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T15:01:55.641925Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:635:2536], Recipient [1:628:2532]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T15:01:55.641958Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T15:01:55.641997Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:624:2529], serverId# [1:635:2536], sessionId# [0:0:0] 2025-06-25T15:01:55.642061Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:373:2367], Recipient [1:635:2536] 2025-06-25T15:01:55.642116Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-25T15:01:55.642223Z node 1 :TX_DATAS ... node 2 :KQP_COMPUTE WARN: kqp_scan_fetcher_actor.cpp:170: SelfId: [2:843:2673]. Got EvScanError scan state: , status: ABORTED, reason:
: Error: Table '/Root/table-1' scheme changed., code: 2028 , tablet id: 72075186224037888, actor_id: [2:627:2532] 2025-06-25T15:02:03.479136Z node 2 :KQP_COMPUTE DEBUG: kqp_scan_fetcher_actor.cpp:674: SelfId: [2:843:2673]. Enqueue for resolve 72075186224037888 2025-06-25T15:02:03.479178Z node 2 :KQP_COMPUTE DEBUG: log.cpp:784: fline=kqp_scan_compute_manager.h:383;event=scanner_finished;tablet_id=72075186224037888;stop_shard=1; 2025-06-25T15:02:03.479217Z node 2 :KQP_COMPUTE DEBUG: log.cpp:784: fline=kqp_scan_compute_manager.h:96;event=stop_scanner;actor_id=NO_VALUE_OPTIONAL;message=;final_flag=1; 2025-06-25T15:02:03.479558Z node 2 :KQP_COMPUTE DEBUG: kqp_scan_fetcher_actor.cpp:666: SelfId: [2:843:2673]. Sending TEvResolveKeySet update for table '/Root/table-1', range: [(Uint32 : NULL) ; ()), attempt #1 2025-06-25T15:02:03.479706Z node 2 :KQP_COMPUTE DEBUG: kqp_scan_fetcher_actor.cpp:253: SelfId: [2:843:2673]. Received TEvResolveKeySetResult update for table '/Root/table-1' 2025-06-25T15:02:03.479737Z node 2 :KQP_COMPUTE ERROR: kqp_scan_fetcher_actor.cpp:257: SelfId: [2:843:2673]. Resolve request failed for table '/Root/table-1', ErrorCount# 1 2025-06-25T15:02:03.479819Z node 2 :KQP_COMPUTE DEBUG: log.h:466: kqp_scan_compute_actor.cpp:168 :TEvTerminateFromFetcher: [2:843:2673]/[2:841:2671] 2025-06-25T15:02:03.479900Z node 2 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:678: SelfId: [2:841:2671], TxId: 281474976715662, task: 1. Ctx: { TraceId : 01jyksrdw7e36r8hfsea3nd9ez. SessionId : ydb://session/3?node_id=2&id=NzgwZDA1YzQtNmE2NWZkZWMtM2JiZjIxMjMtOGFlMTBmNWI=. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : . PoolId : default. }. InternalError: SCHEME_ERROR KIKIMR_SCHEME_MISMATCH: {
: Error: Table '/Root/table-1' scheme changed., code: 2028 }. 2025-06-25T15:02:03.480057Z node 2 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:494: TxId: 281474976715662, task: 1. pass away 2025-06-25T15:02:03.480135Z node 2 :KQP_COMPUTE DEBUG: log.cpp:784: fline=kqp_compute_actor_factory.cpp:67;problem=finish_compute_actor;tx_id=281474976715662;task_id=1;success=0;message={
: Error: COMPUTE_STATE_FAILURE }; 2025-06-25T15:02:03.482191Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:404: TxId: 281474976715662, taskId: 1. Released resources, Memory: 0, Free Tier: 1048576, ExecutionUnits: 1. 2025-06-25T15:02:03.482354Z node 2 :KQP_COMPUTE DEBUG: log.cpp:784: fline=kqp_scan_fetcher_actor.cpp:106;event=TEvTerminateFromCompute;sender=[2:841:2671];info={
: Error: COMPUTE_STATE_FAILURE }; 2025-06-25T15:02:03.482431Z node 2 :KQP_COMPUTE DEBUG: log.cpp:784: fline=kqp_scan_compute_manager.h:313;event=abort_all_scanners;error_message=Send abort execution from compute actor, message: {
: Error: COMPUTE_STATE_FAILURE }; 2025-06-25T15:02:03.482623Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:893: Schedule publish at 1970-01-01T00:00:04.000000Z, after 1.550000s 2025-06-25T15:02:03.482873Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:442: ActorId: [2:836:2645] TxId: 281474976715662. Ctx: { TraceId: 01jyksrdw7e36r8hfsea3nd9ez, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NzgwZDA1YzQtNmE2NWZkZWMtM2JiZjIxMjMtOGFlMTBmNWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [2:841:2671], task: 1, state: COMPUTE_STATE_FAILURE, stats: { CpuTimeUs: 171886 Tasks { TaskId: 1 CpuTimeUs: 170561 Tables { TablePath: "/Root/table-1" } ComputeCpuTimeUs: 12 BuildCpuTimeUs: 170549 Sources { IngressName: "CS" Ingress { } } HostName: "ghrun-kqfvx6aroe" NodeId: 2 CreateTimeMs: 1750863722908 CurrentWaitInputTimeUs: 122782 UpdateTimeMs: 1750863723479 } MaxMemoryUsage: 1048576 } 2025-06-25T15:02:03.482978Z node 2 :KQP_EXECUTER INFO: kqp_planner.cpp:697: TxId: 281474976715662. Ctx: { TraceId: 01jyksrdw7e36r8hfsea3nd9ez, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NzgwZDA1YzQtNmE2NWZkZWMtM2JiZjIxMjMtOGFlMTBmNWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Compute actor has finished execution: [2:841:2671] 2025-06-25T15:02:03.483078Z node 2 :KQP_EXECUTER INFO: kqp_executer_impl.h:1951: ActorId: [2:836:2645] TxId: 281474976715662. Ctx: { TraceId: 01jyksrdw7e36r8hfsea3nd9ez, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NzgwZDA1YzQtNmE2NWZkZWMtM2JiZjIxMjMtOGFlMTBmNWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. task: 1, does not have the CA id yet or is already complete 2025-06-25T15:02:03.483162Z node 2 :KQP_EXECUTER INFO: kqp_executer_impl.h:1946: ActorId: [2:836:2645] TxId: 281474976715662. Ctx: { TraceId: 01jyksrdw7e36r8hfsea3nd9ez, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NzgwZDA1YzQtNmE2NWZkZWMtM2JiZjIxMjMtOGFlMTBmNWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. aborting compute actor execution, message: {
: Error: Terminate execution }, compute actor: [2:842:2672], task: 2 2025-06-25T15:02:03.483305Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:2188: ActorId: [2:836:2645] TxId: 281474976715662. Ctx: { TraceId: 01jyksrdw7e36r8hfsea3nd9ez, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NzgwZDA1YzQtNmE2NWZkZWMtM2JiZjIxMjMtOGFlMTBmNWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. terminate execution. 2025-06-25T15:02:03.483402Z node 2 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:149: SelfId: [2:842:2672], TxId: 281474976715662, task: 2. Ctx: { TraceId : 01jyksrdw7e36r8hfsea3nd9ez. SessionId : ydb://session/3?node_id=2&id=NzgwZDA1YzQtNmE2NWZkZWMtM2JiZjIxMjMtOGFlMTBmNWI=. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. CA StateFunc 271646735 2025-06-25T15:02:03.483477Z node 2 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [2:842:2672], TxId: 281474976715662, task: 2. Ctx: { TraceId : 01jyksrdw7e36r8hfsea3nd9ez. SessionId : ydb://session/3?node_id=2&id=NzgwZDA1YzQtNmE2NWZkZWMtM2JiZjIxMjMtOGFlMTBmNWI=. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : . }. Handle abort execution event from: [2:836:2645], status: SCHEME_ERROR, reason: {
: Error: Terminate execution } 2025-06-25T15:02:03.483563Z node 2 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:494: TxId: 281474976715662, task: 2. pass away 2025-06-25T15:02:03.483659Z node 2 :KQP_COMPUTE DEBUG: log.cpp:784: fline=kqp_compute_actor_factory.cpp:67;problem=finish_compute_actor;tx_id=281474976715662;task_id=2;success=0;message={
: Error: COMPUTE_STATE_FAILURE }; 2025-06-25T15:02:03.486054Z node 2 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:404: TxId: 281474976715662, taskId: 2. Released resources, Memory: 0, Free Tier: 1048576, ExecutionUnits: 1. 2025-06-25T15:02:03.486274Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=NzgwZDA1YzQtNmE2NWZkZWMtM2JiZjIxMjMtOGFlMTBmNWI=, ActorId: [2:810:2645], ActorState: ExecuteState, TraceId: 01jyksrdw7e36r8hfsea3nd9ez, Create QueryResponse for error on request, msg: 2025-06-25T15:02:03.486680Z node 2 :TX_PROXY DEBUG: proxy_impl.cpp:353: actor# [2:61:2108] Handle TEvExecuteKqpTransaction 2025-06-25T15:02:03.486734Z node 2 :TX_PROXY DEBUG: proxy_impl.cpp:342: actor# [2:61:2108] TxId# 281474976715664 ProcessProposeKqpTransaction 2025-06-25T15:02:03.487234Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 2000, txId: 281474976715661] shutting down 2025-06-25T15:02:03.487322Z node 2 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [2:61:2108] Handle TEvProposeTransaction 2025-06-25T15:02:03.487369Z node 2 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [2:61:2108] TxId# 0 ProcessProposeTransaction 2025-06-25T15:02:03.487463Z node 2 :TX_PROXY DEBUG: proxy_impl.cpp:289: actor# [2:61:2108] Cookie# 0 userReqId# "" txid# 0 reqId# [2:878:2704] SnapshotReq marker# P0 2025-06-25T15:02:03.487947Z node 2 :TX_PROXY DEBUG: resolvereq.cpp:152: Actor# [2:881:2704] txid# 0 HANDLE EvNavigateKeySetResult TResolveTablesActor marker# P1 ErrorCount# 0 2025-06-25T15:02:03.488125Z node 2 :KQP_EXECUTER DEBUG: kqp_table_resolver.cpp:271: TxId: 281474976715664. Resolved key sets: 0 2025-06-25T15:02:03.488304Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715664. Ctx: { TraceId: 01jyksrdw7e36r8hfsea3nd9ez, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NzgwZDA1YzQtNmE2NWZkZWMtM2JiZjIxMjMtOGFlMTBmNWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:02:03.488386Z node 2 :KQP_EXECUTER DEBUG: kqp_planner.cpp:562: TxId: 281474976715664. Ctx: { TraceId: 01jyksrdw7e36r8hfsea3nd9ez, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NzgwZDA1YzQtNmE2NWZkZWMtM2JiZjIxMjMtOGFlMTBmNWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Total tasks: 0, readonly: true, 0 scan tasks on 0 nodes, localComputeTasks: 0, snapshot: {0, 0} 2025-06-25T15:02:03.488462Z node 2 :KQP_EXECUTER INFO: kqp_data_executer.cpp:2806: ActorId: [2:877:2645] TxId: 281474976715664. Ctx: { TraceId: 01jyksrdw7e36r8hfsea3nd9ez, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NzgwZDA1YzQtNmE2NWZkZWMtM2JiZjIxMjMtOGFlMTBmNWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Total tasks: 0, readonly: 1, datashardTxs: 0, evWriteTxs: 0, topicTxs: 0, volatile: 0, immediate: 1, pending compute tasks0, useFollowers: 0 2025-06-25T15:02:03.488587Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:2188: ActorId: [2:877:2645] TxId: 281474976715664. Ctx: { TraceId: 01jyksrdw7e36r8hfsea3nd9ez, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NzgwZDA1YzQtNmE2NWZkZWMtM2JiZjIxMjMtOGFlMTBmNWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. terminate execution. 2025-06-25T15:02:03.488649Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:862: ActorId: [2:877:2645] TxId: 281474976715664. Ctx: { TraceId: 01jyksrdw7e36r8hfsea3nd9ez, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NzgwZDA1YzQtNmE2NWZkZWMtM2JiZjIxMjMtOGFlMTBmNWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Resource usage for last stat interval: ComputeTime: 0.000000s ReadRows: 0 ReadBytes: 0 ru: 1 rate limiter was not found force flag: 1 2025-06-25T15:02:03.488711Z node 2 :TX_PROXY DEBUG: resolvereq.cpp:272: Actor# [2:881:2704] txid# 0 HANDLE EvResolveKeySetResult TResolveTablesActor marker# P2 ErrorCount# 0 2025-06-25T15:02:03.488817Z node 2 :TX_PROXY DEBUG: snapshotreq.cpp:1453: Actor# [2:878:2704] SEND TEvDiscardVolatileSnapshotRequest to datashard 72075186224037888 marker# P3 2025-06-25T15:02:03.489126Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:974: Forwarded response to sender actor, requestId: 3, sender: [2:555:2481], selfId: [2:59:2106], source: [2:810:2645] 2025-06-25T15:02:03.489268Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553190, Sender [2:878:2704], Recipient [2:627:2532]: NKikimrTxDataShard.TEvDiscardVolatileSnapshotRequest OwnerId: 72057594046644480 PathId: 2 Step: 2000 TxId: 281474976715661 2025-06-25T15:02:03.490151Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1374: Session closed, sessionId: ydb://session/3?node_id=2&id=NzgwZDA1YzQtNmE2NWZkZWMtM2JiZjIxMjMtOGFlMTBmNWI=, workerId: [2:810:2645], local sessions count: 0 Response { QueryIssues { message: "Table \'/Root/table-1\' scheme changed." issue_code: 2028 severity: 1 } QueryIssues { message: "Query invalidated on scheme/internal error during Scan execution" issue_code: 2019 severity: 1 } TxMeta { } } YdbStatus: ABORTED ConsumedRu: 390 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_order/unittest >> DataShardTxOrder::RandomPoints_ReproducerDelayData1 [GOOD] Test command err: 2025-06-25T15:01:52.795745Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:01:52.795781Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:01:52.796905Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:112:2142], Recipient [1:135:2156]: NKikimr::TEvTablet::TEvBoot 2025-06-25T15:01:52.805789Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:112:2142], Recipient [1:135:2156]: NKikimr::TEvTablet::TEvRestored 2025-06-25T15:01:52.806140Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 9437184 actor [1:135:2156] 2025-06-25T15:01:52.806355Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T15:01:52.814170Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:112:2142], Recipient [1:135:2156]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-25T15:01:52.850516Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T15:01:52.850666Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T15:01:52.852184Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 9437184 2025-06-25T15:01:52.852247Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 9437184 2025-06-25T15:01:52.852291Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 9437184 2025-06-25T15:01:52.852698Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T15:01:52.852777Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T15:01:52.852836Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 9437184 persisting started state actor id [1:204:2156] in generation 2 2025-06-25T15:01:52.907396Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T15:01:52.934973Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 9437184 2025-06-25T15:01:52.935155Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 9437184 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T15:01:52.935246Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 9437184, actorId: [1:219:2215] 2025-06-25T15:01:52.935288Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 9437184 2025-06-25T15:01:52.935328Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 9437184, state: WaitScheme 2025-06-25T15:01:52.935359Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:01:52.935542Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:135:2156], Recipient [1:135:2156]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:52.935593Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:52.935823Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 9437184 2025-06-25T15:01:52.935897Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 9437184 2025-06-25T15:01:52.935973Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-25T15:01:52.936003Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:01:52.936042Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 9437184 2025-06-25T15:01:52.936073Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437184 has no attached operations 2025-06-25T15:01:52.936112Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 9437184 2025-06-25T15:01:52.936142Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 9437184 TxInFly 0 2025-06-25T15:01:52.936172Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-25T15:01:52.936249Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:215:2212], Recipient [1:135:2156]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T15:01:52.936280Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T15:01:52.936339Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:213:2211], serverId# [1:215:2212], sessionId# [0:0:0] 2025-06-25T15:01:52.938661Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:103:2136], Recipient [1:135:2156]: NKikimrTxDataShard.TEvProposeTransaction TxKind: TX_KIND_SCHEME SourceDeprecated { RawX1: 103 RawX2: 4294969432 } TxBody: "\nK\n\006table1\020\r\032\t\n\003key\030\002 \"\032\014\n\005value\030\200$ 8\032\n\n\004uint\030\002 9(\":\n \000Z\006\010\010\030\001(\000J\014/Root/table1" TxId: 1 ExecLevel: 0 Flags: 0 SchemeShardId: 4200 ProcessingParams { } 2025-06-25T15:01:52.938733Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-25T15:01:52.938794Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-25T15:01:52.938976Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit CheckSchemeTx 2025-06-25T15:01:52.939041Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 9437184 txId 1 ssId 4200 seqNo 0:0 2025-06-25T15:01:52.939087Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 1 at tablet 9437184 2025-06-25T15:01:52.939129Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is ExecutedNoMoreRestarts 2025-06-25T15:01:52.939162Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit CheckSchemeTx 2025-06-25T15:01:52.939205Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit StoreSchemeTx 2025-06-25T15:01:52.939235Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit StoreSchemeTx 2025-06-25T15:01:52.939479Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayCompleteNoMoreRestarts 2025-06-25T15:01:52.939503Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit StoreSchemeTx 2025-06-25T15:01:52.939543Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit FinishPropose 2025-06-25T15:01:52.939581Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit FinishPropose 2025-06-25T15:01:52.939652Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayComplete 2025-06-25T15:01:52.939680Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit FinishPropose 2025-06-25T15:01:52.939721Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit WaitForPlan 2025-06-25T15:01:52.939748Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit WaitForPlan 2025-06-25T15:01:52.939767Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:1] at 9437184 is not ready to execute on unit WaitForPlan 2025-06-25T15:01:52.952345Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 9437184 2025-06-25T15:01:52.952400Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit StoreSchemeTx 2025-06-25T15:01:52.952436Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit FinishPropose 2025-06-25T15:01:52.952487Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 1 at tablet 9437184 send to client, exec latency: 0 ms, propose latency: 1 ms, status: PREPARED 2025-06-25T15:01:52.952535Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 9437184 not sending time cast registration request in state WaitScheme 2025-06-25T15:01:52.953019Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:225:2221], Recipient [1:135:2156]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T15:01:52.953067Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T15:01:52.953114Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:224:2220], serverId# [1:225:2221], sessionId# [0:0:0] 2025-06-25T15:01:52.953239Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287424, Sender [1:103:2136], Recipient [1:135:2156]: {TEvPlanStep step# 1000001 MediatorId# 0 TabletID 9437184} 2025-06-25T15:01:52.953265Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3150: StateWork, processing event TEvTxProcessing::TEvPlanStep 2025-06-25T15:01:52.953385Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1790: Trying to execute [1000001:1] at 9437184 on unit WaitForPlan 2025-06-25T15:01:52.953414Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1805: Execution status for [1000001:1] at 9437184 is Executed 2025-06-25T15:01:52.953436Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000001:1] at 9437184 executing on unit WaitForPlan 2025-06-25T15:01:52.953459Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000001:1] at 9437184 to execution unit PlanQueue 2025-06-25T15:01:52.955820Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 1 at step 1000001 at tablet 9437184 { Transactions { TxId: 1 AckTo { RawX1: 103 RawX2: 4294969432 } } Step: 1000001 MediatorID: 0 TabletID: 9437184 } 2025-06-25T15:01:52.955868Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:01:52.955994Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:135:2156], Recipient [1:135:2156]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:52.956026Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:52.956079Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-25T15:01:52.956113Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 1 2025-06-25T15:01:52.956138Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437184 2025-06-25T15:01:52.956164Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000001:1] in PlanQueue unit at 9437184 2025-06-25T15:01:52.956185Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [100 ... DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 9437184 2025-06-25T15:02:03.901643Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-25T15:02:03.901674Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000005:508] at 9437184 on unit CompleteOperation 2025-06-25T15:02:03.901713Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000005 : 508] from 9437184 at tablet 9437184 send result to client [1:103:2136], exec latency: 0 ms, propose latency: 1 ms 2025-06-25T15:02:03.901752Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:563: Send delayed Ack RS Ack at 9437184 {TEvReadSet step# 1000005 txid# 508 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 3} 2025-06-25T15:02:03.901773Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:02:03.901869Z node 1 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 9437184 2025-06-25T15:02:03.901887Z node 1 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 9437184 2025-06-25T15:02:03.901903Z node 1 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 9437184 2025-06-25T15:02:03.901932Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-25T15:02:03.901954Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000005:509] at 9437184 on unit CompleteOperation 2025-06-25T15:02:03.901984Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000005 : 509] from 9437184 at tablet 9437184 send result to client [1:103:2136], exec latency: 0 ms, propose latency: 1 ms 2025-06-25T15:02:03.902034Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:563: Send delayed Ack RS Ack at 9437184 {TEvReadSet step# 1000005 txid# 509 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 4} 2025-06-25T15:02:03.902060Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:02:03.902166Z node 1 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 9437184 2025-06-25T15:02:03.902197Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-25T15:02:03.902223Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000005:510] at 9437184 on unit CompleteOperation 2025-06-25T15:02:03.902251Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000005 : 510] from 9437184 at tablet 9437184 send result to client [1:103:2136], exec latency: 0 ms, propose latency: 1 ms 2025-06-25T15:02:03.902285Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:563: Send delayed Ack RS Ack at 9437184 {TEvReadSet step# 1000005 txid# 510 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 5} 2025-06-25T15:02:03.902306Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:02:03.902400Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-25T15:02:03.902428Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000005:511] at 9437184 on unit CompleteOperation 2025-06-25T15:02:03.902468Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000005 : 511] from 9437184 at tablet 9437184 send result to client [1:103:2136], exec latency: 0 ms, propose latency: 1 ms 2025-06-25T15:02:03.902506Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:563: Send delayed Ack RS Ack at 9437184 {TEvReadSet step# 1000005 txid# 511 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 6} 2025-06-25T15:02:03.902528Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:02:03.902665Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-25T15:02:03.902708Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000005:512] at 9437184 on unit CompleteOperation 2025-06-25T15:02:03.902741Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000005 : 512] from 9437184 at tablet 9437184 send result to client [1:103:2136], exec latency: 0 ms, propose latency: 1 ms 2025-06-25T15:02:03.902792Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:563: Send delayed Ack RS Ack at 9437184 {TEvReadSet step# 1000005 txid# 512 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 7} 2025-06-25T15:02:03.902814Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:02:03.902913Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-25T15:02:03.902941Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:516] at 9437184 on unit FinishPropose 2025-06-25T15:02:03.902984Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 516 at tablet 9437184 send to client, exec latency: 0 ms, propose latency: 1 ms, status: COMPLETE 2025-06-25T15:02:03.903075Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:02:03.903205Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-25T15:02:03.903234Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000005:514] at 9437184 on unit CompleteOperation 2025-06-25T15:02:03.903292Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000005 : 514] from 9437184 at tablet 9437184 send result to client [1:103:2136], exec latency: 1 ms, propose latency: 3 ms 2025-06-25T15:02:03.903344Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:563: Send delayed Ack RS Ack at 9437184 {TEvReadSet step# 1000005 txid# 514 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 8} 2025-06-25T15:02:03.903369Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:02:03.903469Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-25T15:02:03.903490Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000005:515] at 9437184 on unit CompleteOperation 2025-06-25T15:02:03.903518Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000005 : 515] from 9437184 at tablet 9437184 send result to client [1:103:2136], exec latency: 0 ms, propose latency: 1 ms 2025-06-25T15:02:03.903541Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:02:03.903702Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:239:2230], Recipient [1:351:2316]: {TEvReadSet step# 1000005 txid# 506 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 1} 2025-06-25T15:02:03.903745Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:02:03.903782Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 506 2025-06-25T15:02:03.904018Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:239:2230], Recipient [1:351:2316]: {TEvReadSet step# 1000005 txid# 507 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 2} 2025-06-25T15:02:03.904088Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:02:03.904135Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 507 2025-06-25T15:02:03.904280Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:239:2230], Recipient [1:351:2316]: {TEvReadSet step# 1000005 txid# 508 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 3} 2025-06-25T15:02:03.904337Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:02:03.904367Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 508 2025-06-25T15:02:03.904488Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:239:2230], Recipient [1:351:2316]: {TEvReadSet step# 1000005 txid# 509 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 4} 2025-06-25T15:02:03.904514Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:02:03.904537Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 509 2025-06-25T15:02:03.904614Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:239:2230], Recipient [1:351:2316]: {TEvReadSet step# 1000005 txid# 510 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 5} 2025-06-25T15:02:03.904632Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:02:03.904645Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 510 2025-06-25T15:02:03.904718Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:239:2230], Recipient [1:351:2316]: {TEvReadSet step# 1000005 txid# 511 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 6} 2025-06-25T15:02:03.904734Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:02:03.904761Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 511 2025-06-25T15:02:03.904865Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:239:2230], Recipient [1:351:2316]: {TEvReadSet step# 1000005 txid# 512 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 7} 2025-06-25T15:02:03.904904Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:02:03.904927Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 512 2025-06-25T15:02:03.905012Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [1:239:2230], Recipient [1:351:2316]: {TEvReadSet step# 1000005 txid# 514 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 8} 2025-06-25T15:02:03.905029Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:02:03.905044Z node 1 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 514 expect 5 6 - 6 6 7 - - - - - - - - - - - - - - - - - - - - - - - - - - actual 5 6 - 6 6 7 - - - - - - - - - - - - - - - - - - - - - - - - - - interm 5 6 - 6 6 - - - - - - - - - - - - - - - - - - - - - - - - - - - >> DataShardReadIterator::ShouldReadKeyCellVec [GOOD] >> DataShardReadIterator::ShouldReadKeyArrow |92.4%| [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data_integrity/unittest |92.4%| [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data_integrity/unittest >> KqpDataIntegrityTrails::BrokenReadLock-UseSink >> DataShardTxOrder::DelayData [GOOD] |92.4%| [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data_integrity/unittest >> KqpDataIntegrityTrails::Upsert+LogEnabled+UseSink |92.4%| [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data_integrity/unittest >> KqpDataIntegrityTrails::Upsert+LogEnabled-UseSink |92.4%| [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data_integrity/unittest >> KqpDataIntegrityTrails::UpsertEvWriteQueryService-isOlap-useOltpSink >> KqpLimits::ComputeNodeMemoryLimit [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_order/unittest >> DataShardTxOrder::DelayData [GOOD] Test command err: 2025-06-25T15:01:54.901012Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:01:54.901067Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:01:54.904012Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:112:2142], Recipient [1:135:2156]: NKikimr::TEvTablet::TEvBoot 2025-06-25T15:01:54.918616Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:112:2142], Recipient [1:135:2156]: NKikimr::TEvTablet::TEvRestored 2025-06-25T15:01:54.919198Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 9437184 actor [1:135:2156] 2025-06-25T15:01:54.919473Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T15:01:54.929058Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:112:2142], Recipient [1:135:2156]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-25T15:01:54.971125Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T15:01:54.971319Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T15:01:54.973018Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 9437184 2025-06-25T15:01:54.973090Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 9437184 2025-06-25T15:01:54.973144Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 9437184 2025-06-25T15:01:54.973476Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T15:01:54.973566Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T15:01:54.973610Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 9437184 persisting started state actor id [1:204:2156] in generation 2 2025-06-25T15:01:55.046350Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T15:01:55.079536Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 9437184 2025-06-25T15:01:55.079729Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 9437184 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T15:01:55.079843Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 9437184, actorId: [1:219:2215] 2025-06-25T15:01:55.079896Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 9437184 2025-06-25T15:01:55.079945Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 9437184, state: WaitScheme 2025-06-25T15:01:55.079979Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:01:55.080191Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:135:2156], Recipient [1:135:2156]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:55.080246Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:55.080594Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 9437184 2025-06-25T15:01:55.080702Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 9437184 2025-06-25T15:01:55.080780Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-25T15:01:55.080819Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:01:55.080864Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 9437184 2025-06-25T15:01:55.080895Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437184 has no attached operations 2025-06-25T15:01:55.080960Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 9437184 2025-06-25T15:01:55.080994Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 9437184 TxInFly 0 2025-06-25T15:01:55.081033Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-25T15:01:55.081129Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:215:2212], Recipient [1:135:2156]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T15:01:55.081165Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T15:01:55.081216Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:213:2211], serverId# [1:215:2212], sessionId# [0:0:0] 2025-06-25T15:01:55.084044Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:103:2136], Recipient [1:135:2156]: NKikimrTxDataShard.TEvProposeTransaction TxKind: TX_KIND_SCHEME SourceDeprecated { RawX1: 103 RawX2: 4294969432 } TxBody: "\nK\n\006table1\020\r\032\t\n\003key\030\002 \"\032\014\n\005value\030\200$ 8\032\n\n\004uint\030\002 9(\":\n \000Z\006\010\002\030\001(\000J\014/Root/table1" TxId: 1 ExecLevel: 0 Flags: 0 SchemeShardId: 4200 ProcessingParams { } 2025-06-25T15:01:55.084104Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-25T15:01:55.084172Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-25T15:01:55.084373Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit CheckSchemeTx 2025-06-25T15:01:55.084442Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 9437184 txId 1 ssId 4200 seqNo 0:0 2025-06-25T15:01:55.084492Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 1 at tablet 9437184 2025-06-25T15:01:55.084538Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is ExecutedNoMoreRestarts 2025-06-25T15:01:55.084575Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit CheckSchemeTx 2025-06-25T15:01:55.084623Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit StoreSchemeTx 2025-06-25T15:01:55.084656Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit StoreSchemeTx 2025-06-25T15:01:55.084966Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayCompleteNoMoreRestarts 2025-06-25T15:01:55.085007Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit StoreSchemeTx 2025-06-25T15:01:55.085036Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit FinishPropose 2025-06-25T15:01:55.085078Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit FinishPropose 2025-06-25T15:01:55.085146Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayComplete 2025-06-25T15:01:55.085183Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit FinishPropose 2025-06-25T15:01:55.085219Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit WaitForPlan 2025-06-25T15:01:55.085274Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit WaitForPlan 2025-06-25T15:01:55.085297Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:1] at 9437184 is not ready to execute on unit WaitForPlan 2025-06-25T15:01:55.098991Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 9437184 2025-06-25T15:01:55.099068Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit StoreSchemeTx 2025-06-25T15:01:55.099102Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit FinishPropose 2025-06-25T15:01:55.099151Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 1 at tablet 9437184 send to client, exec latency: 0 ms, propose latency: 1 ms, status: PREPARED 2025-06-25T15:01:55.099217Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 9437184 not sending time cast registration request in state WaitScheme 2025-06-25T15:01:55.099716Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:225:2221], Recipient [1:135:2156]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T15:01:55.099767Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T15:01:55.099808Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:224:2220], serverId# [1:225:2221], sessionId# [0:0:0] 2025-06-25T15:01:55.099979Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287424, Sender [1:103:2136], Recipient [1:135:2156]: {TEvPlanStep step# 1000001 MediatorId# 0 TabletID 9437184} 2025-06-25T15:01:55.100011Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3150: StateWork, processing event TEvTxProcessing::TEvPlanStep 2025-06-25T15:01:55.100126Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1790: Trying to execute [1000001:1] at 9437184 on unit WaitForPlan 2025-06-25T15:01:55.100171Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1805: Execution status for [1000001:1] at 9437184 is Executed 2025-06-25T15:01:55.100203Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000001:1] at 9437184 executing on unit WaitForPlan 2025-06-25T15:01:55.100235Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000001:1] at 9437184 to execution unit PlanQueue 2025-06-25T15:01:55.104083Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 1 at step 1000001 at tablet 9437184 { Transactions { TxId: 1 AckTo { RawX1: 103 RawX2: 4294969432 } } Step: 1000001 MediatorID: 0 TabletID: 9437184 } 2025-06-25T15:01:55.104151Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:01:55.104389Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:135:2156], Recipient [1:135:2156]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:55.104441Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:55.104493Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-25T15:01:55.104532Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 1 2025-06-25T15:01:55.104563Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437184 2025-06-25T15:01:55.104602Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000001:1] in PlanQueue unit at 9437184 2025-06-25T15:01:55.104635Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [100 ... 184 to execution unit CompletedOperations 2025-06-25T15:02:05.786924Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000005:506] at 9437184 on unit CompletedOperations 2025-06-25T15:02:05.786951Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000005:506] at 9437184 is Executed 2025-06-25T15:02:05.786970Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000005:506] at 9437184 executing on unit CompletedOperations 2025-06-25T15:02:05.786997Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [1000005:506] at 9437184 has finished 2025-06-25T15:02:05.787027Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 (dry run) active 0 active planned 0 immediate 0 planned 1 2025-06-25T15:02:05.787065Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437184 2025-06-25T15:02:05.787105Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000005:507] in PlanQueue unit at 9437184 2025-06-25T15:02:05.787403Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:239:2230], Recipient [1:239:2230]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T15:02:05.787439Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T15:02:05.787494Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-25T15:02:05.787523Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 1 2025-06-25T15:02:05.787568Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:282: Return cached ready operation [1000005:507] at 9437184 2025-06-25T15:02:05.787604Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000005:507] at 9437184 on unit PlanQueue 2025-06-25T15:02:05.787632Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000005:507] at 9437184 is Executed 2025-06-25T15:02:05.787654Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000005:507] at 9437184 executing on unit PlanQueue 2025-06-25T15:02:05.787679Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000005:507] at 9437184 to execution unit LoadTxDetails 2025-06-25T15:02:05.787700Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000005:507] at 9437184 on unit LoadTxDetails 2025-06-25T15:02:05.788166Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 9437184 loaded tx from db 1000005:507 keys extracted: 1 2025-06-25T15:02:05.788205Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000005:507] at 9437184 is Executed 2025-06-25T15:02:05.788247Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000005:507] at 9437184 executing on unit LoadTxDetails 2025-06-25T15:02:05.788271Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000005:507] at 9437184 to execution unit FinalizeDataTxPlan 2025-06-25T15:02:05.788294Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000005:507] at 9437184 on unit FinalizeDataTxPlan 2025-06-25T15:02:05.788345Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000005:507] at 9437184 is Executed 2025-06-25T15:02:05.788365Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000005:507] at 9437184 executing on unit FinalizeDataTxPlan 2025-06-25T15:02:05.788384Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000005:507] at 9437184 to execution unit BuildAndWaitDependencies 2025-06-25T15:02:05.788402Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000005:507] at 9437184 on unit BuildAndWaitDependencies 2025-06-25T15:02:05.788460Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:455: Operation [1000005:507] is the new logically complete end at 9437184 2025-06-25T15:02:05.788486Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:461: Operation [1000005:507] is the new logically incomplete end at 9437184 2025-06-25T15:02:05.788510Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [1000005:507] at 9437184 2025-06-25T15:02:05.788540Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000005:507] at 9437184 is Executed 2025-06-25T15:02:05.788562Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000005:507] at 9437184 executing on unit BuildAndWaitDependencies 2025-06-25T15:02:05.788595Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000005:507] at 9437184 to execution unit BuildDataTxOutRS 2025-06-25T15:02:05.788619Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000005:507] at 9437184 on unit BuildDataTxOutRS 2025-06-25T15:02:05.788662Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000005:507] at 9437184 is Executed 2025-06-25T15:02:05.788690Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000005:507] at 9437184 executing on unit BuildDataTxOutRS 2025-06-25T15:02:05.788714Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000005:507] at 9437184 to execution unit StoreAndSendOutRS 2025-06-25T15:02:05.788740Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000005:507] at 9437184 on unit StoreAndSendOutRS 2025-06-25T15:02:05.788769Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000005:507] at 9437184 is Executed 2025-06-25T15:02:05.788790Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000005:507] at 9437184 executing on unit StoreAndSendOutRS 2025-06-25T15:02:05.788809Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000005:507] at 9437184 to execution unit PrepareDataTxInRS 2025-06-25T15:02:05.788839Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000005:507] at 9437184 on unit PrepareDataTxInRS 2025-06-25T15:02:05.788868Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000005:507] at 9437184 is Executed 2025-06-25T15:02:05.788888Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000005:507] at 9437184 executing on unit PrepareDataTxInRS 2025-06-25T15:02:05.788907Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000005:507] at 9437184 to execution unit LoadAndWaitInRS 2025-06-25T15:02:05.788939Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000005:507] at 9437184 on unit LoadAndWaitInRS 2025-06-25T15:02:05.788963Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000005:507] at 9437184 is Executed 2025-06-25T15:02:05.788983Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000005:507] at 9437184 executing on unit LoadAndWaitInRS 2025-06-25T15:02:05.789012Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000005:507] at 9437184 to execution unit ExecuteDataTx 2025-06-25T15:02:05.789036Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000005:507] at 9437184 on unit ExecuteDataTx 2025-06-25T15:02:05.789308Z node 1 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:306: Executed operation [1000005:507] at tablet 9437184 with status COMPLETE 2025-06-25T15:02:05.789371Z node 1 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:312: Datashard execution counters for [1000005:507] at 9437184: {NSelectRow: 0, NSelectRange: 0, NUpdateRow: 1, NEraseRow: 0, SelectRowRows: 0, SelectRowBytes: 0, SelectRangeRows: 0, SelectRangeBytes: 0, UpdateRowBytes: 11, EraseRowBytes: 0, SelectRangeDeletedRowSkips: 0, InvisibleRowSkips: 0} 2025-06-25T15:02:05.789416Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000005:507] at 9437184 is ExecutedNoMoreRestarts 2025-06-25T15:02:05.789446Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000005:507] at 9437184 executing on unit ExecuteDataTx 2025-06-25T15:02:05.789476Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000005:507] at 9437184 to execution unit CompleteOperation 2025-06-25T15:02:05.789510Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000005:507] at 9437184 on unit CompleteOperation 2025-06-25T15:02:05.789645Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000005:507] at 9437184 is DelayComplete 2025-06-25T15:02:05.789685Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000005:507] at 9437184 executing on unit CompleteOperation 2025-06-25T15:02:05.789713Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000005:507] at 9437184 to execution unit CompletedOperations 2025-06-25T15:02:05.789752Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000005:507] at 9437184 on unit CompletedOperations 2025-06-25T15:02:05.789782Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000005:507] at 9437184 is Executed 2025-06-25T15:02:05.789802Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000005:507] at 9437184 executing on unit CompletedOperations 2025-06-25T15:02:05.789824Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [1000005:507] at 9437184 has finished 2025-06-25T15:02:05.789849Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:02:05.789876Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437184 2025-06-25T15:02:05.789902Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437184 has no attached operations 2025-06-25T15:02:05.789932Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 9437184 2025-06-25T15:02:05.806085Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:91: Sending '{TEvPlanStepAck TabletId# 9437184 step# 1000005 txid# 506 txid# 507} 2025-06-25T15:02:05.806157Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 9437184 step# 1000005} 2025-06-25T15:02:05.806217Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-25T15:02:05.806276Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000005:506] at 9437184 on unit CompleteOperation 2025-06-25T15:02:05.806351Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000005 : 506] from 9437184 at tablet 9437184 send result to client [1:103:2136], exec latency: 0 ms, propose latency: 1 ms 2025-06-25T15:02:05.806401Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:02:05.806584Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-25T15:02:05.806611Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000005:507] at 9437184 on unit CompleteOperation 2025-06-25T15:02:05.806657Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000005 : 507] from 9437184 at tablet 9437184 send result to client [1:103:2136], exec latency: 0 ms, propose latency: 1 ms 2025-06-25T15:02:05.806697Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 >> DataShardOutOfOrder::TestSnapshotReadAfterBrokenLockOutOfOrder [GOOD] |92.4%| [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data_integrity/unittest >> KqpDataIntegrityTrails::Upsert-LogEnabled+UseSink |92.4%| [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data_integrity/unittest |92.4%| [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data_integrity/unittest >> DataShardSnapshots::VolatileSnapshotSplit [GOOD] >> DataShardSnapshots::VolatileSnapshotMerge >> DataShardSnapshots::LockedWriteReuseAfterCommit+UseSink [GOOD] >> DataShardSnapshots::LockedWriteReuseAfterCommit-UseSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_order/unittest >> DataShardOutOfOrder::TestSnapshotReadAfterBrokenLockOutOfOrder [GOOD] Test command err: 2025-06-25T15:01:57.327007Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T15:01:57.327110Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T15:01:57.327148Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001ecb/r3tmp/tmpo4sM6l/pdisk_1.dat 2025-06-25T15:01:57.615019Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T15:01:57.617550Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:01:57.655918Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:01:57.659775Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750863714840088 != 1750863714840092 2025-06-25T15:01:57.705319Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:01:57.705442Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:01:57.716733Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:01:57.797249Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:58.120418Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:58.231500Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:01:58.406499Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:782:2634], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:58.406597Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:792:2639], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:58.406657Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:58.411918Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:01:58.570644Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:796:2642], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-25T15:01:58.650725Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:852:2679] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:01:58.990774Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jyksra04djd75m2c8mzge6a6, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NDNkMTc3OGEtNDllNDQyMzYtYjkyZDJjLTJjOTFmNDk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:01:59.094956Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jyksrakha0w3k45ak1zs6tsq, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTNjY2E3ZmQtYTQ1ZGQ4OTgtYTZkYjQwNDMtM2Y2NDc1Yjk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:01:59.689351Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715663. Ctx: { TraceId: 01jyksravza1fg7tsmxxhampc3, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZmI1MWM0YjctMjI3N2RkODQtNmY0NjIyMWItZmZmZGNiZGE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root { items { uint32_value: 1 } items { uint32_value: 1 } }, { items { uint32_value: 2 } items { uint32_value: 2 } } 2025-06-25T15:02:00.059208Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715664. Ctx: { TraceId: 01jyksrbh80mv9p7vwcr7gv946, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Yjc5YTYwMzAtODcyM2ViZDktOTI0ZDk4NzgtYzE3NWMxNjM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:02:00.181339Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715665. Ctx: { TraceId: 01jyksrbmmagbwt863wt33138x, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZmI1MWM0YjctMjI3N2RkODQtNmY0NjIyMWItZmZmZGNiZGE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:02:00.289063Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715666. Ctx: { TraceId: 01jyksrbr40djmg8chbnmqyhrg, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZmI1MWM0YjctMjI3N2RkODQtNmY0NjIyMWItZmZmZGNiZGE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:02:00.370056Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=1&id=ZmI1MWM0YjctMjI3N2RkODQtNmY0NjIyMWItZmZmZGNiZGE=, ActorId: [1:915:2729], ActorState: ExecuteState, TraceId: 01jyksrbv355v7nnqcxn205wh0, Create QueryResponse for error on request, msg: tx has deferred effects, but locks are broken 2025-06-25T15:02:00.382042Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715667. Ctx: { TraceId: 01jyksrbv355v7nnqcxn205wh0, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZmI1MWM0YjctMjI3N2RkODQtNmY0NjIyMWItZmZmZGNiZGE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:02:03.739197Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:265:2309], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T15:02:03.739328Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T15:02:03.739434Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001ecb/r3tmp/tmpJEjE5d/pdisk_1.dat 2025-06-25T15:02:03.982829Z node 2 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 2 Type# 268639257 2025-06-25T15:02:03.984261Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:02:04.011608Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:02:04.013546Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:33:2080] 1750863720848414 != 1750863720848418 2025-06-25T15:02:04.061854Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:02:04.062000Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:02:04.073602Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:02:04.153777Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:04.420926Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:04.534281Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:02:04.750647Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:782:2634], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:04.750748Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:792:2639], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:04.750840Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:04.755996Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:02:04.922011Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:796:2642], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-25T15:02:04.959987Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:852:2679] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:02:05.028149Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jyksrg6d4pcypnbsytz0j522, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YjczZmIxYjYtNWIyMTJjMjgtNjYzNTg2ODEtMzZmZmUwYzA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:02:05.105746Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jyksrgfp5yzcwspfp48qem8m, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NzY5ZWRmMDQtYTM3NzJmZTYtY2MyMjU5YzgtZjIxYmI2N2Q=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root ... performing the first select 2025-06-25T15:02:05.670688Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715663. Ctx: { TraceId: 01jyksrgqt3532q8cg5gbfnfda, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZWMxMjE2ZjQtZjQzMWYyZWQtZGE3MTdmY2ItY2ZiYjI1NDU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root { items { uint32_value: 1 } items { uint32_value: 1 } }, { items { uint32_value: 2 } items { uint32_value: 2 } } 2025-06-25T15:02:06.028333Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715664. Ctx: { TraceId: 01jyksrh3tcbz2r17qt64yr61t, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MzBkOTQyNjgtMTcwNzI4ZjktYjFkMmMwNzktOTE0Mzg0YjI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root { items { uint32_value: 1 } items { uint32_value: 1 } }, { items { uint32_value: 2 } items { uint32_value: 2 } } ... waiting for commit read sets 2025-06-25T15:02:06.122094Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715665. Ctx: { TraceId: 01jyksrheq5a9ppybpw4q4q8sz, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MzBkOTQyNjgtMTcwNzI4ZjktYjFkMmMwNzktOTE0Mzg0YjI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root ... captured readset ... captured readset ... performing an upsert 2025-06-25T15:02:06.521629Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715666. Ctx: { TraceId: 01jyksrhvjb8j0dak1yv8warrb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YTgyNDI5ZWYtN2RiNmMwYjUtNjc4YTFiMzUtM2I3NzI1ZWU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root ... performing the second select 2025-06-25T15:02:06.648786Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715667. Ctx: { TraceId: 01jyksrhyb2zfbmn4fymv6e222, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZWMxMjE2ZjQtZjQzMWYyZWQtZGE3MTdmY2ItY2ZiYjI1NDU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root ... performing the third select 2025-06-25T15:02:06.755570Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715668. Ctx: { TraceId: 01jyksrj1w6k02k3e8dtp8bqx8, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZWMxMjE2ZjQtZjQzMWYyZWQtZGE3MTdmY2ItY2ZiYjI1NDU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root ... performing the last upsert and commit 2025-06-25T15:02:06.820575Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=ZWMxMjE2ZjQtZjQzMWYyZWQtZGE3MTdmY2ItY2ZiYjI1NDU=, ActorId: [2:923:2727], ActorState: ExecuteState, TraceId: 01jyksrj5781bppt6144brgy66, Create QueryResponse for error on request, msg: tx has deferred effects, but locks are broken |92.4%| [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data_integrity/unittest >> DataShardSnapshots::UncommittedChangesRenameTable+UseSink [GOOD] >> DataShardSnapshots::ShardRestartWholeShardLockBasic >> KqpDataIntegrityTrails::UpsertEvWriteQueryService-isOlap+useOltpSink >> EncryptedBackupParamsValidationTest::IncorrectKeyImport [GOOD] >> DataShardOutOfOrder::TestSnapshotReadAfterBrokenLock+EvWrite [GOOD] >> DataShardSnapshots::MvccSnapshotAndSplit [GOOD] >> DataShardSnapshots::MvccSnapshotLockedWrites+UseSink |92.4%| [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data_integrity/unittest >> BasicUsage::BrokenCredentialsProvider [GOOD] >> KqpDataIntegrityTrails::BrokenReadLock+UseSink |92.4%| [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data_integrity/unittest >> DataShardReadIteratorSysTables::ShouldNotReadUserTableUsingLocalTid [GOOD] >> DataShardReadIteratorSysTables::ShouldForbidSchemaVersion |92.4%| [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data_integrity/unittest >> DataShardReadIterator::ShouldReverseReadMultipleKeysOneByOne [GOOD] >> DataShardReadIterator::ShouldReverseReadMultipleRanges >> DataShardReadIterator::ShouldStopWhenNodeDisconnected [GOOD] >> DataShardReadIterator::TryCommitLocksPrepared-Volatile-BreakLocks >> DataShardReadIterator::ShouldReadRangeArrow [GOOD] >> DataShardReadIterator::ShouldReadNoColumnsKeysRequestCellVec ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_order/unittest >> DataShardOutOfOrder::TestSnapshotReadAfterBrokenLock+EvWrite [GOOD] Test command err: 2025-06-25T15:01:48.761285Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T15:01:48.761382Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T15:01:48.761427Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001f31/r3tmp/tmpvXpLyi/pdisk_1.dat 2025-06-25T15:01:49.056162Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T15:01:49.059085Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:01:49.100824Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:01:49.105761Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750863706241828 != 1750863706241832 2025-06-25T15:01:49.154170Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:01:49.154298Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:01:49.165653Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:01:49.248038Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:49.617239Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:49.734533Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:01:49.928603Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:782:2634], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:49.928736Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:792:2639], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:49.928808Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:49.934144Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:01:50.101616Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:796:2642], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-25T15:01:50.171149Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:852:2679] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:01:50.543464Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jyksr1q6exta9c175mzwsydq, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MmZjMzM4MjQtZDNmMWVlYzktNzFiZWE0YWMtZjc2ODU3YTg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:01:50.631983Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jyksr2bm1s5c4m9wy56nmgj1, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=N2ZlMjBlMi1hN2Y1ZjkyOS01YjI1NDAwMS0xYmViYjBmYg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root ... waiting for at least 2 blocked commits 2025-06-25T15:01:53.173471Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7382: Cannot get console configs 2025-06-25T15:01:53.173523Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... blocked commit for tablet 72075186224037888 ... blocked commit for tablet 72075186224037888 ... blocked commit for tablet 72075186224037889 ... blocked commit for tablet 72075186224037889 2025-06-25T15:02:02.376530Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715665. Ctx: { TraceId: 01jyksrds7fqgd3z2xtnwdgkws, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OWZjNmUzNC0xYjVmNGMxZS1mMDlmYjcwZS0xNjdjMjQ2OA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:02:02.485702Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715666. Ctx: { TraceId: 01jyksrdweffek89sykn7ww6nv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=N2RlZTVkMTQtZTNiZjIzMmEtM2M3NjY2YTctOTUxNGRmMg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root ... shards are ready for read-only immediate transactions ... waiting for at least 2 blocked commits ... blocked commit for tablet 72075186224037888 ... blocked commit for tablet 72075186224037889 2025-06-25T15:02:06.010966Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:265:2309], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T15:02:06.011125Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T15:02:06.011242Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001f31/r3tmp/tmpSFAmps/pdisk_1.dat 2025-06-25T15:02:06.261795Z node 2 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 2 Type# 268639257 2025-06-25T15:02:06.263320Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:02:06.288790Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:02:06.289811Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:33:2080] 1750863723306905 != 1750863723306909 2025-06-25T15:02:06.338236Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:02:06.338357Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:02:06.349644Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:02:06.436758Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:06.739939Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:06.857514Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:02:07.041356Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:782:2634], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:07.041456Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:792:2639], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:07.041532Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:07.046224Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:02:07.206710Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:796:2642], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-25T15:02:07.245058Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:852:2679] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:02:07.316016Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jyksrjdz5rtgyshjnk54984r, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=M2M0YzQzMmItOGZiNDZjYWMtN2QwODY2NjktNTJjM2YzMg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:02:07.398380Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jyksrjq73k0qn6sayn0cfk50, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YmM1NTcwYmItMmUxYTVmNy1mZGIzZTQzYy0zNmFiYjA1OA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:02:08.001539Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715663. Ctx: { TraceId: 01jyksrjzkb91v81v7kgz8wv0q, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YmFlYjRjY2MtZTdiNjNkNWItYzA0OTkxMWItODdjYThmYzE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root { items { uint32_value: 1 } items { uint32_value: 1 } }, { items { uint32_value: 2 } items { uint32_value: 2 } } 2025-06-25T15:02:08.367264Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715664. Ctx: { TraceId: 01jyksrkn62jn10e52spy579g1, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MjVkZDk3NTgtYTljZTc2MTgtNWU3MTMzMGUtZTIxZGMyMGY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:02:08.489710Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715665. Ctx: { TraceId: 01jyksrkrc21yftat0t9tz8wy5, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YmFlYjRjY2MtZTdiNjNkNWItYzA0OTkxMWItODdjYThmYzE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:02:08.578250Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715666. Ctx: { TraceId: 01jyksrkvd4xjs7zsjez1zdhm5, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YmFlYjRjY2MtZTdiNjNkNWItYzA0OTkxMWItODdjYThmYzE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:02:08.634237Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=YmFlYjRjY2MtZTdiNjNkNWItYzA0OTkxMWItODdjYThmYzE=, ActorId: [2:923:2727], ActorState: ExecuteState, TraceId: 01jyksrky56s1kxg1fzv6vd23f, Create QueryResponse for error on request, msg: tx has deferred effects, but locks are broken >> KqpDataIntegrityTrails::BrokenReadLockAbortedTx >> DataShardReadIterator::ShouldHandleOutOfOrderReadAck [GOOD] >> DataShardReadIterator::ShouldHandleReadAckWhenExhaustedRangeRead >> DataShardReadIterator::ShouldReadRangeInclusiveEndsArrow [GOOD] >> DataShardReadIterator::ShouldReadRangeReverse >> DataShardReadIterator::ShouldRangeReadReverseLeftNonInclusive [GOOD] >> DataShardReadIterator::ShouldNotReadAfterCancel >> DataShardReadIteratorConsistency::LocalSnapshotReadHasRequiredDependencies [GOOD] >> DataShardReadIteratorConsistency::LocalSnapshotReadNoUnnecessaryDependencies >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadKey-EvWrite [GOOD] >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadRange+EvWrite >> EncryptedBackupParamsValidationTest::EncryptionSettingsWithoutKeyImport |92.4%| [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data_integrity/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/persqueue_public/ut/with_offset_ranges_mode_ut/unittest >> BasicUsage::BrokenCredentialsProvider [GOOD] Test command err: 2025-06-25T15:01:27.135858Z :MaxByteSizeEqualZero INFO: Random seed for debugging is 1750863687135814 2025-06-25T15:01:27.662427Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519902276032474625:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:01:27.662665Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T15:01:27.727218Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519902277240563514:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:01:27.727310Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T15:01:28.002113Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017b3/r3tmp/tmpgST48Y/pdisk_1.dat 2025-06-25T15:01:28.035863Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-25T15:01:28.454693Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:01:28.454814Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:01:28.457951Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:01:28.458037Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:01:28.502616Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:01:28.508432Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T15:01:28.508593Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:01:28.509322Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 61389, node 1 2025-06-25T15:01:28.695450Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:01:28.785250Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:01:28.901949Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/yft8/0017b3/r3tmp/yandexRo2zWp.tmp 2025-06-25T15:01:28.901975Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/yft8/0017b3/r3tmp/yandexRo2zWp.tmp 2025-06-25T15:01:28.902135Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/yft8/0017b3/r3tmp/yandexRo2zWp.tmp 2025-06-25T15:01:28.902306Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T15:01:29.101305Z INFO: TTestServer started on Port 29638 GrpcPort 61389 TClient is connected to server localhost:29638 PQClient connected to localhost:61389 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:01:29.588786Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... waiting... waiting... 2025-06-25T15:01:31.800848Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902293212344780:2299], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:31.801213Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:31.801446Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519902294420432978:2270], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:31.801637Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519902294420432989:2273], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:31.801874Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:31.802122Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902293212344793:2303], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:31.814477Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715661:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:01:31.846391Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519902294420432994:2126] txid# 281474976720657, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateCreate)" severity: 1 } 2025-06-25T15:01:31.855893Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715661, at schemeshard: 72057594046644480 2025-06-25T15:01:31.858546Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519902293212344795:2304], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715661 completed, doublechecking } 2025-06-25T15:01:31.858785Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519902294420432993:2274], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715661 completed, doublechecking } 2025-06-25T15:01:31.926608Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519902293212344885:2678] txid# 281474976715662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:01:31.940696Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519902294420433023:2133] txid# 281474976720658, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:01:32.444699Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519902293212344903:2310], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T15:01:32.445049Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7519902294420433030:2279], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T15:01:32.451980Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=Y2UwOTcwODEtMmI4MzgwNzktYmFmYzcyYTEtZDk0ZTg4N2Q=, ActorId: [1:7519902293212344778:2298], ActorState: ExecuteState, TraceId: 01jyksqg0c3t1rakss3c42bh60, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T15:01:32.448633Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=2&id=ZDQ3YjY5ZTQtOGExZjMxZWYtOTJiMTY4YzMtYzFlMzgxZjU=, ActorId: [2:7519902294420432962:2269], ActorState: ExecuteState, TraceId: 01jyksqg0d5348nartqye65bhq, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T15:01:32.466659Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-25T15:01:32.468542Z node 2 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clu ... 4;DECLARE $SeqNo AS Uint64; UPSERT INTO `/Root/PQ/SourceIdMeta2` (Hash, Topic, SourceId, CreateTime, AccessTime, Partition, SeqNo) VALUES ($Hash, $Topic, $SourceId, $CreateTime, $AccessTime, $Partition, $SeqNo); 2025-06-25T15:02:07.337143Z node 5 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:64: TTableHelper UpdateAccessTimeQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint32; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64; UPDATE `/Root/PQ/SourceIdMeta2` SET AccessTime = $AccessTime WHERE Hash = $Hash AND Topic = $Topic AND SourceId = $SourceId AND Partition = $Partition; 2025-06-25T15:02:07.337161Z node 5 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:111: TPartitionChooser [5:7519902448674467084:2452] (SourceId=src, PreferedPartition=(NULL)) StartKqpSession 2025-06-25T15:02:07.339984Z node 5 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:142: TPartitionChooser [5:7519902448674467084:2452] (SourceId=src, PreferedPartition=(NULL)) Select from the table 2025-06-25T15:02:07.510080Z node 5 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__old_chooser_actor.h:67: TPartitionChooser [5:7519902448674467084:2452] (SourceId=src, PreferedPartition=(NULL)) RequestPQRB 2025-06-25T15:02:07.510889Z node 5 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72075186224037893][rt3.dc1--test-topic] pipe [5:7519902448674467124:2452] connected; active server actors: 1 2025-06-25T15:02:07.511057Z node 5 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__old_chooser_actor.h:80: TPartitionChooser [5:7519902448674467084:2452] (SourceId=src, PreferedPartition=(NULL)) Received partition 0 from PQRB for SourceId=src 2025-06-25T15:02:07.511078Z node 5 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:174: TPartitionChooser [5:7519902448674467084:2452] (SourceId=src, PreferedPartition=(NULL)) Update the table 2025-06-25T15:02:07.511491Z node 5 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037893][rt3.dc1--test-topic] pipe [5:7519902448674467124:2452] disconnected; active server actors: 1 2025-06-25T15:02:07.511513Z node 5 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1688: [72075186224037893][rt3.dc1--test-topic] pipe [5:7519902448674467124:2452] disconnected no session 2025-06-25T15:02:07.631673Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72075186224037892] server connected, pipe [5:7519902448674467146:2452], now have 1 active actors on pipe 2025-06-25T15:02:07.629433Z node 5 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:183: TPartitionChooser [5:7519902448674467084:2452] (SourceId=src, PreferedPartition=(NULL)) HandleUpdate PartitionPersisted=0 Status=SUCCESS 2025-06-25T15:02:07.629484Z node 5 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:305: TPartitionChooser [5:7519902448674467084:2452] (SourceId=src, PreferedPartition=(NULL)) ReplyResult: Partition=0, SeqNo=(NULL) 2025-06-25T15:02:07.629504Z node 5 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:268: TPartitionChooser [5:7519902448674467084:2452] (SourceId=src, PreferedPartition=(NULL)) Start idle 2025-06-25T15:02:07.629548Z node 5 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:689: ProceedPartition. session cookie: 1 sessionId: partition: 0 expectedGeneration: (NULL) 2025-06-25T15:02:07.632010Z node 5 :PQ_WRITE_PROXY DEBUG: writer.cpp:819: TPartitionWriter 72075186224037892 (partition=0) TEvClientConnected Status OK, TabletId: 72075186224037892, NodeId 6, Generation: 1 2025-06-25T15:02:07.632917Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:348: Handle TEvRequest topic: 'rt3.dc1--test-topic' requestId: 2025-06-25T15:02:07.632955Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:2812: [PQ: 72075186224037892] got client message batch for topic 'rt3.dc1--test-topic' partition 0 2025-06-25T15:02:07.633049Z node 6 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie src|46554aa2-ba072c00-885d6a38-a9b4d271_0 generated for partition 0 topic 'rt3.dc1--test-topic' owner src 2025-06-25T15:02:07.633156Z node 6 :PERSQUEUE DEBUG: partition_write.cpp:34: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyOwnerOk. Partition: 0 2025-06-25T15:02:07.633258Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'rt3.dc1--test-topic' partition: 0 messageNo: 0 requestId: cookie: 0 2025-06-25T15:02:07.633946Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:348: Handle TEvRequest topic: 'rt3.dc1--test-topic' requestId: 2025-06-25T15:02:07.633974Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:2812: [PQ: 72075186224037892] got client message batch for topic 'rt3.dc1--test-topic' partition 0 2025-06-25T15:02:07.634057Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'rt3.dc1--test-topic' partition: 0 messageNo: 0 requestId: cookie: 0 2025-06-25T15:02:07.634477Z node 5 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:865: session inited cookie: 1 partition: 0 MaxSeqNo: 0 sessionId: src|46554aa2-ba072c00-885d6a38-a9b4d271_0 2025-06-25T15:02:07.635402Z :INFO: [] MessageGroupId [src] SessionId [] Counters: { Errors: 0 CurrentSessionLifetimeMs: 1750863727635 BytesWritten: 0 MessagesWritten: 0 BytesWrittenCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-25T15:02:07.635536Z :INFO: [] MessageGroupId [src] SessionId [] Write session established. Init response: session_id: "src|46554aa2-ba072c00-885d6a38-a9b4d271_0" topic: "test-topic" cluster: "dc1" supported_codecs: CODEC_RAW supported_codecs: CODEC_GZIP supported_codecs: CODEC_LZOP 2025-06-25T15:02:07.635748Z :INFO: [] MessageGroupId [src] SessionId [src|46554aa2-ba072c00-885d6a38-a9b4d271_0] Write session: close. Timeout = 0 ms 2025-06-25T15:02:07.635797Z :INFO: [] MessageGroupId [src] SessionId [src|46554aa2-ba072c00-885d6a38-a9b4d271_0] Write session will now close 2025-06-25T15:02:07.635842Z :DEBUG: [] MessageGroupId [src] SessionId [src|46554aa2-ba072c00-885d6a38-a9b4d271_0] Write session: aborting 2025-06-25T15:02:07.636982Z node 5 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 1 sessionId: src|46554aa2-ba072c00-885d6a38-a9b4d271_0 grpc read done: success: 0 data: 2025-06-25T15:02:07.637002Z node 5 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 1 sessionId: src|46554aa2-ba072c00-885d6a38-a9b4d271_0 grpc read failed 2025-06-25T15:02:07.637528Z node 5 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:818: session v1 closed cookie: 1 sessionId: src|46554aa2-ba072c00-885d6a38-a9b4d271_0 2025-06-25T15:02:07.637573Z node 5 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 1 sessionId: src|46554aa2-ba072c00-885d6a38-a9b4d271_0 is DEAD 2025-06-25T15:02:07.638077Z node 5 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037892 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-25T15:02:07.638417Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037892] server disconnected, pipe [5:7519902448674467146:2452] destroyed 2025-06-25T15:02:07.638462Z node 6 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::DropOwner. 2025-06-25T15:02:07.640391Z :INFO: [] MessageGroupId [src] SessionId [src|46554aa2-ba072c00-885d6a38-a9b4d271_0] Write session: gracefully shut down, all writes complete 2025-06-25T15:02:07.641466Z :DEBUG: [] MessageGroupId [src] SessionId [src|46554aa2-ba072c00-885d6a38-a9b4d271_0] Write session is aborting and will not restart 2025-06-25T15:02:07.642133Z :DEBUG: [] MessageGroupId [src] SessionId [src|46554aa2-ba072c00-885d6a38-a9b4d271_0] Write session: destroy 2025-06-25T15:02:07.685083Z :INFO: [/Root] [/Root] [e8abdb88-86242d73-5885d997-d5cb4456] Starting read session 2025-06-25T15:02:07.685160Z :DEBUG: [/Root] [/Root] [e8abdb88-86242d73-5885d997-d5cb4456] Starting session to cluster null (localhost:21627) 2025-06-25T15:02:07.696409Z :DEBUG: [/Root] [/Root] [e8abdb88-86242d73-5885d997-d5cb4456] [null] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T15:02:07.696462Z :DEBUG: [/Root] [/Root] [e8abdb88-86242d73-5885d997-d5cb4456] [null] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T15:02:07.696507Z :DEBUG: [/Root] [/Root] [e8abdb88-86242d73-5885d997-d5cb4456] [null] Reconnecting session to cluster null in 0.000000s 2025-06-25T15:02:07.698078Z :ERROR: [/Root] [/Root] [e8abdb88-86242d73-5885d997-d5cb4456] [null] Got error. Status: CLIENT_UNAUTHENTICATED. Description:
: Error: Can't get Authentication info from CredentialsProvider. ydb/public/sdk/cpp/src/client/persqueue_public/ut/basic_usage_ut.cpp:451: exception during creation 2025-06-25T15:02:07.698151Z :DEBUG: [/Root] [/Root] [e8abdb88-86242d73-5885d997-d5cb4456] [null] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T15:02:07.698211Z :DEBUG: [/Root] [/Root] [e8abdb88-86242d73-5885d997-d5cb4456] [null] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T15:02:07.698332Z :INFO: [/Root] [/Root] [e8abdb88-86242d73-5885d997-d5cb4456] [null] Closing session to cluster: SessionClosed { Status: CLIENT_UNAUTHENTICATED Issues: "
: Error: Failed to establish connection to server "" ( cluster null). Attempts done: 1
: Error: Can't get Authentication info from CredentialsProvider. ydb/public/sdk/cpp/src/client/persqueue_public/ut/basic_usage_ut.cpp:451: exception during creation " } Get event on client 2025-06-25T15:02:07.698529Z :NOTICE: [/Root] [/Root] [e8abdb88-86242d73-5885d997-d5cb4456] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-06-25T15:02:07.698567Z :DEBUG: [/Root] [/Root] [e8abdb88-86242d73-5885d997-d5cb4456] [null] Abort session to cluster Got close event: SessionClosed { Status: CLIENT_UNAUTHENTICATED Issues: "
: Error: Failed to establish connection to server "" ( cluster null). Attempts done: 1
: Error: Can't get Authentication info from CredentialsProvider. ydb/public/sdk/cpp/src/client/persqueue_public/ut/basic_usage_ut.cpp:451: exception during creation " }2025-06-25T15:02:07.698656Z :INFO: [/Root] [/Root] [e8abdb88-86242d73-5885d997-d5cb4456] Closing read session. Close timeout: 0.000000s 2025-06-25T15:02:07.698732Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): 2025-06-25T15:02:07.698778Z :INFO: [/Root] [/Root] [e8abdb88-86242d73-5885d997-d5cb4456] Counters: { Errors: 1 CurrentSessionLifetimeMs: 13 BytesRead: 0 MessagesRead: 0 BytesReadCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-25T15:02:07.698863Z :NOTICE: [/Root] [/Root] [e8abdb88-86242d73-5885d997-d5cb4456] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-06-25T15:02:08.212200Z node 5 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976715686, task: 1, CA Id [5:7519902452969434483:2468]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 0 2025-06-25T15:02:08.246338Z node 5 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976715686, task: 1, CA Id [5:7519902452969434483:2468]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 1 2025-06-25T15:02:08.299749Z node 5 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976715686, task: 1, CA Id [5:7519902452969434483:2468]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 1 2025-06-25T15:02:08.373320Z node 5 :KQP_COMPUTE WARN: kqp_read_actor.cpp:1076: TxId: 281474976715686, task: 1, CA Id [5:7519902452969434483:2468]. Got EvDeliveryProblem, TabletId: 72075186224037890, NotDelivered: 1 >> DataShardReadIterator::ShouldReadKeyArrow [GOOD] >> DataShardReadIterator::ShouldReadKeyOnlyValueColumn |92.4%| [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data_integrity/unittest |92.4%| [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data_integrity/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpLimits::ComputeNodeMemoryLimit [GOOD] Test command err: Trying to start YDB, gRPC: 13413, MsgBus: 27757 2025-06-25T15:01:06.673058Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519902189339819132:2079];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:01:06.677238Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0013e6/r3tmp/tmpfN3cS4/pdisk_1.dat 2025-06-25T15:01:07.097071Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:01:07.112623Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519902189339819090:2080] 1750863666665500 != 1750863666665503 2025-06-25T15:01:07.162269Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:01:07.162370Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 13413, node 1 2025-06-25T15:01:07.166193Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:01:07.202031Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:01:07.202050Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:01:07.202074Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:01:07.202191Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27757 TClient is connected to server localhost:27757 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-25T15:01:07.680430Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:01:07.731916Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:01:07.749286Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T15:01:07.763956Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:09.553499Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902202224721974:2316], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:09.553616Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:09.554222Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902202224721986:2319], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:09.559105Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:01:09.578261Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519902202224721988:2320], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-25T15:01:09.672349Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519902202224722039:2557] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:01:10.119311Z node 1 :KQP_COMPUTE WARN: log.cpp:784: fline=kqp_compute_actor_factory.cpp:41;problem=cannot_allocate_memory;tx_id=281474976710661;task_id=2;memory=1048576; 2025-06-25T15:01:10.119349Z node 1 :KQP_COMPUTE WARN: dq_compute_memory_quota.h:152: TxId: 281474976710661, task: 2. [Mem] memory 1048576 NOT granted 2025-06-25T15:01:10.148930Z node 1 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:678: SelfId: [1:7519902206519689380:2330], TxId: 281474976710661, task: 2. Ctx: { TraceId : 01jykspt9e1f065qtsng3nvsag. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=1&id=NjE4OTFiN2UtZDU0NTE5NjItNjdjNmQ4N2QtNjU4NDNlZmM=. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. InternalError: OVERLOADED KIKIMR_PRECONDITION_FAILED: {
: Error: Mkql memory limit exceeded, allocated by task 2: 10, host: ghrun-kqfvx6aroe, canAllocateExtraMemory: 1, memory manager details for current node: TxResourcesInfo { TxId: 281474976710661, Database: /Root, PoolId: default, MemoryPoolPercent: 100.00, tx initially granted memory: 20B, tx total memory allocations: 1MiB, tx largest successful memory allocation: 1MiB, tx last failed memory allocation: 1MiB, tx total execution units: 2, started at: 2025-06-25T15:01:10.116585Z }, code: 2029 }. 2025-06-25T15:01:10.151806Z node 1 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [1:7519902206519689379:2329], TxId: 281474976710661, task: 1. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=1&id=NjE4OTFiN2UtZDU0NTE5NjItNjdjNmQ4N2QtNjU4NDNlZmM=. TraceId : 01jykspt9e1f065qtsng3nvsag. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Handle abort execution event from: [1:7519902206519689368:2314], status: OVERLOADED, reason: {
: Error: Terminate execution } 2025-06-25T15:01:10.154573Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=1&id=NjE4OTFiN2UtZDU0NTE5NjItNjdjNmQ4N2QtNjU4NDNlZmM=, ActorId: [1:7519902202224721948:2314], ActorState: ExecuteState, TraceId: 01jykspt9e1f065qtsng3nvsag, Create QueryResponse for error on request, msg:
: Error: Mkql memory limit exceeded, allocated by task 2: 10, host: ghrun-kqfvx6aroe, canAllocateExtraMemory: 1, memory manager details for current node: TxResourcesInfo { TxId: 281474976710661, Database: /Root, PoolId: default, MemoryPoolPercent: 100.00, tx initially granted memory: 20B, tx total memory allocations: 1MiB, tx largest successful memory allocation: 1MiB, tx last failed memory allocation: 1MiB, tx total execution units: 2, started at: 2025-06-25T15:01:10.116585Z } , code: 2029 Trying to start YDB, gRPC: 8797, MsgBus: 20616 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0013e6/r3tmp/tmpU2WT9S/pdisk_1.dat 2025-06-25T15:01:11.127837Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T15:01:11.128226Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:01:11.136478Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519902202451135880:2080] 1750863670895117 != 1750863670895120 2025-06-25T15:01:11.143378Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:01:11.143454Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:01:11.144959Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 8797, node 2 2025-06-25T15:01:11.215647Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:01:11.215670Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:01:11.215677Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:01:11.215774Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:20616 TClient is connected to server localhost:20616 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. ... sifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:01:20.004943Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:01:20.005070Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:62794 TClient is connected to server localhost:62794 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:01:20.582846Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:01:20.599104Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:20.678753Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:20.793192Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:01:20.830101Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:20.889885Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:23.298687Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519902258311893015:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:23.298784Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:23.417110Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:23.478290Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:23.510725Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:23.547003Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:23.619368Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:23.659297Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:23.704067Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:23.798597Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519902258311893674:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:23.798686Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:23.799029Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519902258311893679:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:23.803661Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:01:23.822781Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519902258311893681:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T15:01:23.900873Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519902258311893734:3418] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:01:24.763398Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519902241132022212:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:01:24.763476Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T15:01:34.880962Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7382: Cannot get console configs 2025-06-25T15:01:34.880996Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:02:05.710404Z node 4 :KQP_EXECUTER WARN: kqp_literal_executer.cpp:103: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: 01jyksq9fz4sbrwtwy9x22f3xn, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=ZjA4OTk5ODAtZTczYjI2OTQtNDgyZGJkYzQtNjY2Nzc3NzE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. TKqpLiteralExecuter, memory limit exceeded. 2025-06-25T15:02:05.711216Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=ZjA4OTk5ODAtZTczYjI2OTQtNDgyZGJkYzQtNjY2Nzc3NzE=, ActorId: [4:7519902266901828597:2474], ActorState: ExecuteState, TraceId: 01jyksq9fz4sbrwtwy9x22f3xn, Create QueryResponse for error on request, msg: 2025-06-25T15:02:05.711367Z node 4 :KQP_SLOW_LOG WARN: kqp_worker_common.cpp:132: TraceId: "01jyksq9fz4sbrwtwy9x22f3xn", SessionId: ydb://session/3?node_id=4&id=ZjA4OTk5ODAtZTczYjI2OTQtNDgyZGJkYzQtNjY2Nzc3NzE=, Slow query, duration: 40.590506s, status: PRECONDITION_FAILED, user: UNAUTHENTICATED, results: 0b, text: "\n SELECT ToDict(\n ListMap(\n ListFromRange(0ul, 5000000ul),\n ($x) -> { RETURN AsTuple($x, $x + 1); }\n )\n );\n ", parameters: 0b
: Warning: Type annotation, code: 1030
:2:13: Warning: At function: RemovePrefixMembers, At function: Unordered, At function: PersistableRepr, At function: OrderedSqlProject, At tuple, At function: SqlProjectItem, At lambda
:2:20: Warning: At function: ToDict
:3:17: Warning: At function: OrderedMap
:5:26: Warning: At lambda
:5:38: Warning: At tuple
:5:53: Warning: At function: +
:5:53: Warning: Integral type implicit bitcast: Uint64 and Int32, code: 1107
: Error: Memory limit exceeded, code: 2029 |92.5%| [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data_integrity/unittest |92.5%| [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data_integrity/unittest >> KqpDataIntegrityTrails::Upsert+LogEnabled-UseSink [GOOD] >> KqpDataIntegrityTrails::Upsert+LogEnabled+UseSink [GOOD] |92.5%| [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data_integrity/unittest >> PersQueueSdkReadSessionTest::ReadSessionWithClose [GOOD] >> PersQueueSdkReadSessionTest::ReadSessionWithCloseNotCommitted >> KqpDataIntegrityTrails::BrokenReadLock-UseSink [GOOD] >> KqpDataIntegrityTrails::UpsertEvWriteQueryService-isOlap-useOltpSink [GOOD] >> KqpDataIntegrityTrails::Ddl >> DataShardSnapshots::LockedWriteReuseAfterCommit-UseSink [GOOD] >> DataShardSnapshots::LockedWriteDistributedCommitSuccess+UseSink >> KqpDataIntegrityTrails::Upsert-LogEnabled+UseSink [GOOD] >> KqpDataIntegrityTrails::UpsertEvWriteQueryService+isOlap+useOltpSink ------- [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data_integrity/unittest >> KqpDataIntegrityTrails::Upsert+LogEnabled-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 3307, MsgBus: 8279 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00176a/r3tmp/tmp1pY4gC/pdisk_1.dat TServer::EnableGrpc on GrpcPort 3307, node 1 TClient is connected to server localhost:8279 TClient is connected to server localhost:8279 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... waiting... waiting... waiting... waiting... |92.5%| [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data_integrity/unittest ------- [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data_integrity/unittest >> KqpDataIntegrityTrails::Upsert+LogEnabled+UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 3513, MsgBus: 8491 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00176c/r3tmp/tmp1Y7kkm/pdisk_1.dat TServer::EnableGrpc on GrpcPort 3513, node 1 TClient is connected to server localhost:8491 TClient is connected to server localhost:8491 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... waiting... waiting... waiting... waiting... >> DataShardSnapshots::ShardRestartWholeShardLockBasic [GOOD] >> DataShardSnapshots::ShardRestartLockUnrelatedUpsert |92.5%| [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data_integrity/unittest ------- [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data_integrity/unittest >> KqpDataIntegrityTrails::BrokenReadLock-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 11558, MsgBus: 9769 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00176f/r3tmp/tmpGrJuG0/pdisk_1.dat TServer::EnableGrpc on GrpcPort 11558, node 1 TClient is connected to server localhost:9769 TClient is connected to server localhost:9769 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... waiting... waiting... waiting... waiting... ------- [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data_integrity/unittest >> KqpDataIntegrityTrails::UpsertEvWriteQueryService-isOlap-useOltpSink [GOOD] Test command err: Trying to start YDB, gRPC: 5864, MsgBus: 5750 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001766/r3tmp/tmpojL8y0/pdisk_1.dat TServer::EnableGrpc on GrpcPort 5864, node 1 TClient is connected to server localhost:5750 TClient is connected to server localhost:5750 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... waiting... waiting... waiting... waiting... |92.5%| [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data_integrity/unittest |92.5%| [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data_integrity/unittest >> BackupRestore::RestoreReplicationThatDoesNotUseSecret [FAIL] >> BackupRestore::ReplicasAreNotBackedUp >> DataShardSnapshots::VolatileSnapshotMerge [GOOD] >> DataShardSnapshots::VolatileSnapshotAndLocalMKQLUpdate ------- [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data_integrity/unittest >> KqpDataIntegrityTrails::Upsert-LogEnabled+UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 20086, MsgBus: 11442 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001763/r3tmp/tmpTfgxmT/pdisk_1.dat TServer::EnableGrpc on GrpcPort 20086, node 1 TClient is connected to server localhost:11442 TClient is connected to server localhost:11442 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... waiting... waiting... waiting... waiting... >> KqpDataIntegrityTrails::UpsertEvWriteQueryService-isOlap+useOltpSink [GOOD] >> DataShardSnapshots::MvccSnapshotLockedWrites+UseSink [GOOD] >> DataShardSnapshots::MvccSnapshotLockedWrites-UseSink |92.5%| [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data_integrity/unittest |92.5%| [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data_integrity/unittest |92.5%| [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data_integrity/unittest |92.5%| [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data_integrity/unittest |92.5%| [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data_integrity/unittest >> DataShardReadIteratorSysTables::ShouldForbidSchemaVersion [GOOD] >> DataShardReadIteratorSysTables::ShouldNotAllowArrow >> KqpDataIntegrityTrails::BrokenReadLock+UseSink [GOOD] >> DataShardReadIterator::ShouldReadRangeReverse [GOOD] >> DataShardReadIterator::ShouldReadRangeInclusiveEndsMissingLeftRight |92.5%| [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data_integrity/unittest |92.5%| [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data_integrity/unittest >> DataShardReadIterator::ShouldReverseReadMultipleRanges [GOOD] >> DataShardReadIterator::ShouldReverseReadMultipleRangesOneByOneWithAcks >> KqpDataIntegrityTrails::Select ------- [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data_integrity/unittest >> KqpDataIntegrityTrails::UpsertEvWriteQueryService-isOlap+useOltpSink [GOOD] Test command err: Trying to start YDB, gRPC: 5007, MsgBus: 61641 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00175b/r3tmp/tmpqItwNg/pdisk_1.dat TServer::EnableGrpc on GrpcPort 5007, node 1 TClient is connected to server localhost:61641 TClient is connected to server localhost:61641 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... waiting... waiting... waiting... waiting... >> DataShardReadIteratorConsistency::LocalSnapshotReadNoUnnecessaryDependencies [GOOD] >> DataShardReadIteratorConsistency::LocalSnapshotReadWithConcurrentWrites >> DataShardReadIterator::TryCommitLocksPrepared-Volatile-BreakLocks [GOOD] >> DataShardReadIterator::TryCommitLocksPrepared+Volatile-BreakLocks >> DataShardReadIterator::ShouldHandleReadAckWhenExhaustedRangeRead [GOOD] >> DataShardReadIterator::ShouldHandleReadAckWhenExhaustedRangeReadReverse >> DataShardReadIterator::ShouldNotReadAfterCancel [GOOD] >> DataShardReadIterator::ShouldLimitReadRangeChunk1Limit100 |92.5%| [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data_integrity/unittest >> KqpDataIntegrityTrails::BrokenReadLockAbortedTx [GOOD] >> DataShardReadIterator::ShouldReadNoColumnsKeysRequestCellVec [GOOD] >> DataShardReadIterator::ShouldReadNoColumnsKeysRequestArrow >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadRange+EvWrite [GOOD] >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadRange-EvWrite >> DataShardReadIterator::ShouldReadKeyOnlyValueColumn [GOOD] >> DataShardReadIterator::ShouldReadKeyValueColumnAndSomeKeyColumn ------- [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data_integrity/unittest >> KqpDataIntegrityTrails::BrokenReadLock+UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 30575, MsgBus: 20313 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001758/r3tmp/tmpA1JHGQ/pdisk_1.dat TServer::EnableGrpc on GrpcPort 30575, node 1 TClient is connected to server localhost:20313 TClient is connected to server localhost:20313 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... waiting... waiting... waiting... waiting... >> KqpDataIntegrityTrails::UpsertEvWriteQueryService+isOlap-useOltpSink ------- [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data_integrity/unittest >> KqpDataIntegrityTrails::BrokenReadLockAbortedTx [GOOD] Test command err: Trying to start YDB, gRPC: 22678, MsgBus: 23183 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001753/r3tmp/tmp2Vsn88/pdisk_1.dat TServer::EnableGrpc on GrpcPort 22678, node 1 TClient is connected to server localhost:23183 TClient is connected to server localhost:23183 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... waiting... waiting... waiting... waiting... >> EncryptedBackupParamsValidationTest::EncryptionSettingsWithoutKeyImport [GOOD] |92.6%| [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data_integrity/unittest >> DataShardSnapshots::ShardRestartLockUnrelatedUpsert [GOOD] >> DataShardSnapshots::ShardRestartLockBrokenByConflict >> test_sql_streaming.py::test[suites-ReadTwoTopics-default.txt] >> test_sql_streaming.py::test[suites-GroupByHop-default.txt] >> KqpDataIntegrityTrails::Ddl [GOOD] >> DataShardSnapshots::LockedWriteDistributedCommitSuccess+UseSink [GOOD] >> DataShardSnapshots::LockedWriteDistributedCommitSuccess-UseSink >> test_sql_streaming.py::test[suites-GroupByHopTimeExtractorUnusedColumns-default.txt] >> EncryptedBackupParamsValidationTest::NoSourcePrefixEncrypted >> test_sql_streaming.py::test[suites-ReadTopicWithMetadata-default.txt] >> DataShardSnapshots::VolatileSnapshotAndLocalMKQLUpdate [GOOD] >> DataShardSnapshots::VolatileSnapshotReadTable ------- [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data_integrity/unittest >> KqpDataIntegrityTrails::Ddl [GOOD] Test command err: Trying to start YDB, gRPC: 29336, MsgBus: 10584 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001740/r3tmp/tmptmBMTl/pdisk_1.dat TServer::EnableGrpc on GrpcPort 29336, node 1 TClient is connected to server localhost:10584 TClient is connected to server localhost:10584 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... waiting... waiting... waiting... waiting... >> DataShardSnapshots::MvccSnapshotLockedWrites-UseSink [GOOD] >> DataShardSnapshots::MvccSnapshotLockedWritesRestart+UseSink >> DataShardReadIteratorSysTables::ShouldNotAllowArrow [GOOD] >> ReadIteratorExternalBlobs::ExtBlobs >> DataShardReadIterator::ShouldLimitReadRangeChunk1Limit100 [GOOD] >> DataShardReadIterator::ShouldLimitRead10RangesChunk99Limit98 >> KqpDataIntegrityTrails::Select [GOOD] >> DataShardReadIterator::ShouldReverseReadMultipleRangesOneByOneWithAcks [GOOD] >> DataShardReadIterator::ShouldReturnMvccSnapshotFromFuture >> DataShardReadIterator::ShouldReadRangeInclusiveEndsMissingLeftRight [GOOD] >> DataShardReadIterator::ShouldReadRangeNonInclusiveEnds >> DataShardReadIterator::ShouldHandleReadAckWhenExhaustedRangeReadReverse [GOOD] >> DataShardReadIterator::ShouldForbidDuplicatedReadId >> KqpDataIntegrityTrails::UpsertEvWriteQueryService+isOlap+useOltpSink [GOOD] >> DataShardReadIterator::ShouldReadNoColumnsKeysRequestArrow [GOOD] >> DataShardReadIterator::ShouldReadNoColumnsRangeRequestCellVec >> DataShardReadIterator::TryCommitLocksPrepared+Volatile-BreakLocks [GOOD] >> DataShardReadIterator::TryCommitLocksPrepared-Volatile+BreakLocks >> KqpScripting::StreamExecuteYqlScriptScanCancelation >> DataShardReadIteratorConsistency::LocalSnapshotReadWithConcurrentWrites [GOOD] >> DataShardReadIteratorConsistency::Bug_7674_IteratorDuplicateRows >> KqpYql::BinaryJsonOffsetBound >> KqpYql::InsertCVList+useSink >> KqpScripting::StreamExecuteYqlScriptOperationTmeoutBruteForce >> KqpYql::TestUuidDefaultColumn >> KqpScripting::ScriptExplainCreatedTable >> DataShardReadIterator::ShouldReadKeyValueColumnAndSomeKeyColumn [GOOD] >> DataShardReadIterator::ShouldReadMultipleKeys >> KqpScripting::ScriptValidate ------- [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data_integrity/unittest >> KqpDataIntegrityTrails::Select [GOOD] Test command err: Trying to start YDB, gRPC: 10132, MsgBus: 4396 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00170c/r3tmp/tmpRdhUcH/pdisk_1.dat TServer::EnableGrpc on GrpcPort 10132, node 1 TClient is connected to server localhost:4396 TClient is connected to server localhost:4396 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... waiting... waiting... waiting... waiting... >> BackupRestore::ReplicasAreNotBackedUp [FAIL] >> BackupRestore::SkipEmptyDirsOnRestore ------- [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data_integrity/unittest >> KqpDataIntegrityTrails::UpsertEvWriteQueryService+isOlap+useOltpSink [GOOD] Test command err: Trying to start YDB, gRPC: 15296, MsgBus: 11793 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00173e/r3tmp/tmpHNRGfq/pdisk_1.dat TServer::EnableGrpc on GrpcPort 15296, node 1 TClient is connected to server localhost:11793 TClient is connected to server localhost:11793 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... waiting... waiting... waiting... waiting... >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadRange-EvWrite [GOOD] >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadRangeInvisibleRowSkips+EvWrite >> DataShardSnapshots::ShardRestartLockBrokenByConflict [GOOD] >> DataShardSnapshots::ShardRestartWholeShardLockBrokenByUpsert >> KqpScripting::UnsafeTimestampCast >> DataShardSnapshots::LockedWriteDistributedCommitSuccess-UseSink [GOOD] >> DataShardSnapshots::LockedWriteDistributedCommitFreeze+UseSink >> KqpScripting::SelectNullType >> KqpDataIntegrityTrails::UpsertEvWriteQueryService+isOlap-useOltpSink [GOOD] >> EncryptedBackupParamsValidationTest::NoSourcePrefixEncrypted [GOOD] >> KqpStats::SysViewClientLost [FAIL] >> KqpStats::SysViewCancelled >> DataShardSnapshots::VolatileSnapshotReadTable [GOOD] >> DataShardSnapshots::VolatileSnapshotRefreshDiscard ------- [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/data_integrity/unittest >> KqpDataIntegrityTrails::UpsertEvWriteQueryService+isOlap-useOltpSink [GOOD] Test command err: Trying to start YDB, gRPC: 13118, MsgBus: 12928 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0016eb/r3tmp/tmpBZ2kvI/pdisk_1.dat TServer::EnableGrpc on GrpcPort 13118, node 1 TClient is connected to server localhost:12928 TClient is connected to server localhost:12928 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... waiting... waiting... waiting... waiting... >> EncryptedBackupParamsValidationTestFeatureDisabled::CommonDestPathSpecified >> DataShardReadIterator::ShouldLimitRead10RangesChunk99Limit98 [GOOD] >> DataShardReadIterator::ShouldLimitRead10RangesChunk99Limit99 |92.6%| [TA] $(B)/ydb/core/kqp/ut/data_integrity/test-results/unittest/{meta.json ... results_accumulator.log} |92.6%| [TA] {RESULT} $(B)/ydb/core/kqp/ut/data_integrity/test-results/unittest/{meta.json ... results_accumulator.log} >> DataShardSnapshots::MvccSnapshotLockedWritesRestart+UseSink [GOOD] >> DataShardSnapshots::MvccSnapshotLockedWritesRestart-UseSink >> DataShardReadIterator::ShouldReadRangeNonInclusiveEnds [GOOD] >> DataShardReadIterator::ShouldReadRangeLeftInclusive >> KqpLimits::QueryExecTimeout [GOOD] >> KqpYql::TestUuidDefaultColumn [GOOD] >> PersQueueSdkReadSessionTest::ReadSessionWithCloseNotCommitted [GOOD] >> PersQueueSdkReadSessionTest::ReadSessionWithExplicitlySpecifiedPartitions >> DataShardReadIterator::ShouldForbidDuplicatedReadId [GOOD] >> DataShardReadIterator::ShouldLimitRead10RangesChunk100Limit1000 >> DataShardReadIterator::ShouldReadNoColumnsRangeRequestCellVec [GOOD] >> DataShardReadIterator::ShouldReadNoColumnsRangeRequestArrow >> DataShardReadIterator::ShouldReturnMvccSnapshotFromFuture [GOOD] >> DataShardReadIterator::ShouldRollbackLocksWhenWrite >> DataShardReadIteratorConsistency::Bug_7674_IteratorDuplicateRows [GOOD] >> DataShardReadIteratorConsistency::LeaseConfirmationNotOutOfOrder >> KqpScripting::ScriptingCreateAndAlterTableTest >> KqpScripting::ScriptValidate [GOOD] >> KqpScripting::ScriptStats >> KqpYql::BinaryJsonOffsetBound [GOOD] >> KqpYql::AnsiIn >> KqpYql::InsertCVList+useSink [GOOD] >> KqpYql::InsertCVList-useSink >> DataShardReadIterator::TryCommitLocksPrepared-Volatile+BreakLocks [GOOD] >> DataShardReadIterator::TryCommitLocksPrepared+Volatile+BreakLocks >> DataShardReadIterator::ShouldReadMultipleKeys [GOOD] >> DataShardReadIterator::ShouldReadMultipleKeysOneByOne ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/yql/unittest >> KqpYql::TestUuidDefaultColumn [GOOD] Test command err: Trying to start YDB, gRPC: 18605, MsgBus: 64965 2025-06-25T15:02:22.563924Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519902513714917183:2143];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:02:22.564470Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0008c7/r3tmp/tmpVWa6ig/pdisk_1.dat 2025-06-25T15:02:22.972878Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519902513714917064:2080] 1750863742522889 != 1750863742522892 2025-06-25T15:02:22.984931Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:02:22.997941Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:02:22.998039Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:02:23.002041Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 18605, node 1 2025-06-25T15:02:23.208581Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:02:23.208603Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:02:23.208608Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:02:23.208692Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T15:02:23.564687Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:64965 TClient is connected to server localhost:64965 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:02:24.044978Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:02:25.632151Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902526599819595:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:25.632260Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:26.234111Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:26.397090Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902530894786994:2301], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:26.397158Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:26.397338Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902530894786999:2304], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:26.404169Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:02:26.418569Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519902530894787001:2305], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-25T15:02:26.495990Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519902530894787052:2394] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpLimits::QueryExecTimeout [GOOD] Test command err: Trying to start YDB, gRPC: 30243, MsgBus: 25807 2025-06-25T15:01:07.756007Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519902190151000011:2231];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:01:07.761031Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0013e5/r3tmp/tmpn9DdJi/pdisk_1.dat 2025-06-25T15:01:08.137758Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:01:08.144399Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519902190150999804:2080] 1750863667701808 != 1750863667701811 TServer::EnableGrpc on GrpcPort 30243, node 1 2025-06-25T15:01:08.208563Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:01:08.210024Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:01:08.217675Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:01:08.240717Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:01:08.240735Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:01:08.240742Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:01:08.240847Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25807 TClient is connected to server localhost:25807 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-25T15:01:08.751275Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:01:08.781944Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:01:08.800794Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T15:01:08.813612Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:08.943784Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T15:01:09.115701Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:09.200541Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:10.937935Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902203035903325:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:10.938016Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:11.197937Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:11.237906Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:11.272403Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:11.304014Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:11.337318Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:11.414317Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:11.459644Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:11.548767Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902207330871287:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:11.548845Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:11.549213Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902207330871292:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:11.553312Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:01:11.569299Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519902207330871294:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T15:01:11.640449Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519902207330871345:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:01:12.653268Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:12.754184Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519902190151000011:2231];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:01:12.756080Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;actio ... e QueryResponse for error on request, msg:
: Error: Request canceled after 100ms
: Error: Cancelling after 100ms during execution Trying to start YDB, gRPC: 23331, MsgBus: 4152 2025-06-25T15:01:58.656229Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519902412839180535:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:01:58.656331Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0013e5/r3tmp/tmpF9sjAz/pdisk_1.dat 2025-06-25T15:01:58.775505Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:01:58.778540Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7519902412839180514:2080] 1750863718655440 != 1750863718655443 2025-06-25T15:01:58.798497Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:01:58.798593Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:01:58.805503Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 23331, node 4 2025-06-25T15:01:58.854067Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:01:58.854094Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:01:58.854107Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:01:58.854297Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4152 TClient is connected to server localhost:4152 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:01:59.463700Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:01:59.483803Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:59.553524Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:59.697339Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:01:59.748180Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T15:01:59.827437Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:02.249296Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519902430019051343:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:02.249416Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:02.310661Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:02.345488Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:02.377645Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:02.413975Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:02.448458Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:02.523035Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:02.559388Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:02.644193Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519902430019052006:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:02.644278Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519902430019052011:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:02.644291Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:02.647687Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:02:02.657230Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519902430019052013:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T15:02:02.722412Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519902430019052066:3420] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:02:03.656636Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519902412839180535:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:02:03.656724Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T15:02:13.757304Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7382: Cannot get console configs 2025-06-25T15:02:13.757332Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded
: Error: Query did not complete within specified timeout 500ms, session id ydb://session/3?node_id=4&id=ODk3OGNjY2YtNDU0MjFmMjktNWRmYTA4OGMtYzU1YWU0Nzk= >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadRangeInvisibleRowSkips+EvWrite [GOOD] >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadKeyPrefix+EvWrite >> DataShardSnapshots::ShardRestartWholeShardLockBrokenByUpsert [GOOD] >> DataShardSnapshots::ShardRestartLockNotBrokenByUncommittedBeforeRead+UseSink >> KqpScripting::UnsafeTimestampCast [GOOD] >> KqpScripting::SystemTables >> KqpScripting::StreamExecuteYqlScriptScanCancelation [GOOD] >> KqpScripting::StreamExecuteYqlScriptScanClientOperationTimeoutBruteForce >> KqpScripting::ScriptExplainCreatedTable [GOOD] >> KqpScripting::ScriptExplain >> KqpScripting::ScanQuery >> KqpScripting::SelectNullType [GOOD] >> KqpScripting::StreamDdlAndDml >> KqpScripting::StreamExecuteYqlScriptClientTimeoutBruteForce >> BackupRestore::SkipEmptyDirsOnRestore [GOOD] >> DataShardSnapshots::LockedWriteDistributedCommitFreeze+UseSink [GOOD] >> DataShardSnapshots::LockedWriteDistributedCommitFreeze-UseSink >> DataShardSnapshots::VolatileSnapshotRefreshDiscard [GOOD] >> DataShardSnapshots::VolatileSnapshotTimeout >> EncryptedBackupParamsValidationTestFeatureDisabled::CommonDestPathSpecified [GOOD] >> DataShardReadIterator::ShouldLimitRead10RangesChunk99Limit99 [GOOD] >> DataShardReadIterator::ShouldLimitRead10RangesChunk99Limit100 >> DataShardReadIterator::ShouldReadRangeLeftInclusive [GOOD] >> DataShardReadIterator::ShouldReadRangeRightInclusive >> KqpYql::AnsiIn [GOOD] >> DataShardSnapshots::MvccSnapshotLockedWritesRestart-UseSink [GOOD] >> DataShardSnapshots::MvccSnapshotLockedWritesWithoutConflicts+UseSink >> KqpYql::InsertCVList-useSink [GOOD] >> DataShardReadIterator::ShouldLimitRead10RangesChunk100Limit1000 [GOOD] >> DataShardReadIterator::ShouldFailUknownColumns >> DataShardReadIterator::ShouldReadNoColumnsRangeRequestArrow [GOOD] >> DataShardReadIterator::ShouldReadNonExistingKey >> DataShardReadIterator::ShouldRollbackLocksWhenWrite [GOOD] >> DataShardReadIterator::ShouldReturnBrokenLockWhenWriteInSeparateTransactions+EvWrite >> KqpScripting::StreamExecuteYqlScriptOperationTmeoutBruteForce [GOOD] >> KqpScripting::StreamExecuteYqlScriptPg >> DataShardReadIterator::ShouldReadMultipleKeysOneByOne [GOOD] >> DataShardReadIterator::ShouldReadKeyPrefix1 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/yql/unittest >> KqpYql::AnsiIn [GOOD] Test command err: Trying to start YDB, gRPC: 14423, MsgBus: 5557 2025-06-25T15:02:22.519434Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519902514304725787:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:02:22.519493Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0008dd/r3tmp/tmpoBnB4B/pdisk_1.dat 2025-06-25T15:02:22.982524Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:02:22.982617Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:02:22.988465Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:02:23.024617Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519902514304725751:2080] 1750863742514584 != 1750863742514587 2025-06-25T15:02:23.042038Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 14423, node 1 2025-06-25T15:02:23.207916Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:02:23.207941Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:02:23.207952Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:02:23.208047Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T15:02:23.540336Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:5557 TClient is connected to server localhost:5557 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:02:24.095295Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:02:24.131978Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:24.291426Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:24.455050Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:24.541094Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:25.893831Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902527189629272:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:25.893920Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:26.233499Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:26.265298Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:26.297110Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:26.334222Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:26.374688Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:26.443339Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:26.494847Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:26.551869Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902531484597228:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:26.551957Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:26.552157Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902531484597233:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:26.555605Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:02:26.568113Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519902531484597235:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T15:02:26.629170Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519902531484597288:3419] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:02:27.521222Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519902514304725787:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:02:27.521279Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T15:02:27.817265Z node 1 :KQP_SESSION ERROR: kqp_session_actor.cpp:2865: SessionId: ydb://session/3?node_id=1&id=MWRmODFhZWEtZWM4YmI5ZjgtODIzYjdiMzgtYjUyNTA2NDQ=, ActorId: [1:7519902535779564855:2474], ActorState: ExecuteState, TraceId: 01jykss6hnbwb2ajgj2a8agdyq, Internal error, message: yql/essentials/types/binary_json/read.cpp:161: StringOffset must be inside buffer 2025-06-25T15:02:27.817324Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=1&id=MWRmODFhZWEtZWM4YmI5ZjgtODIzYjdiMzgtYjUyNTA2NDQ=, ActorId: [1:7519902535779564855:2474], ActorState: ExecuteState, TraceId: 01jykss6hnbwb2ajgj2a8agdyq, Create QueryResponse for error on request, msg: yql/essentials/types/binary_json/read.cpp:161: StringOffset must be inside buffer Trying to start YDB, gRPC: 62483, MsgBus: 6939 2025-06-25T15:02:28.529831Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519902538563036132:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:02:28.529872Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0008dd/r3tmp/tmpeDFvM8/pdisk_1.dat 2025-06-25T15:02:28.711845Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:02:28.712924Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519902538563036109:2080] 1750863748528721 != 1750863748528724 2025-06-25T15:02:28.720898Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:02:28.720968Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:02:28.722323Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 62483, node 2 2025-06-25T15:02:28.861737Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:02:28.861757Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:02:28.861762Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:02:28.861850Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6939 TClient is connected to server localhost:6939 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:02:29.438462Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:02:29.444254Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T15:02:29.457505Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:29.527271Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:29.647801Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:02:29.689372Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:29.746140Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:31.752814Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519902551447939610:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:31.752884Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:31.816598Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:31.853215Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:31.890421Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:31.917966Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:31.947239Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:31.980408Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:32.012978Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:32.078579Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519902555742907561:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:32.078660Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:32.078797Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519902555742907566:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:32.081811Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:02:32.092386Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519902555742907568:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T15:02:32.182312Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519902555742907619:3414] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/yql/unittest >> KqpYql::InsertCVList-useSink [GOOD] Test command err: Trying to start YDB, gRPC: 3106, MsgBus: 18855 2025-06-25T15:02:22.524858Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519902512372114578:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:02:22.524932Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0008b8/r3tmp/tmpXZRehF/pdisk_1.dat 2025-06-25T15:02:22.972491Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519902512372114553:2080] 1750863742517810 != 1750863742517813 2025-06-25T15:02:22.982639Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:02:22.984180Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:02:22.984275Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:02:23.018311Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 3106, node 1 2025-06-25T15:02:23.203256Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:02:23.203286Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:02:23.203298Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:02:23.203416Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T15:02:23.579549Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:18855 TClient is connected to server localhost:18855 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:02:24.107254Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:02:24.124245Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T15:02:24.144523Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:24.306648Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:24.468727Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:24.555040Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:25.810952Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902525257018091:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:25.811048Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:26.235456Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:26.274185Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:26.308818Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:26.364500Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:26.406321Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:26.438364Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:26.477830Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:26.537861Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902529551986041:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:26.538026Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:26.538129Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902529551986046:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:26.541863Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:02:26.551819Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519902529551986048:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T15:02:26.628448Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519902529551986099:3417] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:02:27.526182Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519902512372114578:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:02:27.526282Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T15:02:27.748845Z node 1 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_CONSTRAINT_VIOLATION;details=Conflict with existing key.;tx_id=3; 2025-06-25T15:02:27.757516Z node 1 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:226: Prepare transaction failed. txid 3 at tablet 72075186224037914 ... VE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:02:28.755881Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:02:28.792778Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:02:28.792800Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:02:28.792805Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:02:28.792911Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11937 TClient is connected to server localhost:11937 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:02:29.257488Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:02:29.263675Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T15:02:29.272983Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:29.332916Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:29.492050Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:29.565441Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:29.731409Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:02:31.580476Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519902552333905251:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:31.580570Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:31.640960Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:31.688537Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:31.724405Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:31.770420Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:31.833774Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:31.868471Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:31.904228Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:31.997828Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519902552333905916:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:31.997942Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:31.998240Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519902552333905921:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:32.003085Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:02:32.018235Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519902552333905923:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T15:02:32.074051Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519902556628873270:3418] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:02:33.454040Z node 2 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:678: SelfId: [2:7519902560923840834:2478], TxId: 281474976715673, task: 1. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=2&id=YjhiOGM5NjAtNDU5ZTQyNjItZDNjMzFlNzMtZjIwNzRmZGQ=. TraceId : 01jykssby14whrkhr2m65zcmts. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. InternalError: PRECONDITION_FAILED KIKIMR_CONSTRAINT_VIOLATION: {
: Error: Duplicated keys found., code: 2012 }. 2025-06-25T15:02:33.454431Z node 2 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [2:7519902560923840835:2479], TxId: 281474976715673, task: 2. Ctx: { SessionId : ydb://session/3?node_id=2&id=YjhiOGM5NjAtNDU5ZTQyNjItZDNjMzFlNzMtZjIwNzRmZGQ=. CustomerSuppliedId : . TraceId : 01jykssby14whrkhr2m65zcmts. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Handle abort execution event from: [2:7519902560923840831:2469], status: PRECONDITION_FAILED, reason: {
: Error: Terminate execution } 2025-06-25T15:02:33.454951Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=YjhiOGM5NjAtNDU5ZTQyNjItZDNjMzFlNzMtZjIwNzRmZGQ=, ActorId: [2:7519902560923840805:2469], ActorState: ExecuteState, TraceId: 01jykssby14whrkhr2m65zcmts, Create QueryResponse for error on request, msg:
: Error: Execution, code: 1060
: Error: Duplicated keys found., code: 2012 2025-06-25T15:02:33.583230Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519902539449001842:2138];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:02:33.583307Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> DataShardReadIterator::TryCommitLocksPrepared+Volatile+BreakLocks [GOOD] >> DataShardReadIterator::TryWriteManyRows+Commit >> KqpScripting::ScriptingCreateAndAlterTableTest [GOOD] >> KqpScripting::SecondaryIndexes >> DataShardSnapshots::ShardRestartLockNotBrokenByUncommittedBeforeRead+UseSink [GOOD] >> DataShardSnapshots::ShardRestartLockNotBrokenByUncommittedBeforeRead-UseSink >> KqpScripting::ScriptStats [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/backup_ut/unittest >> BackupRestore::SkipEmptyDirsOnRestore [GOOD] Test command err: 2025-06-25T14:59:27.301209Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901762401237696:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:59:27.301299Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0014a6/r3tmp/tmphybMSE/pdisk_1.dat 2025-06-25T14:59:27.640713Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:59:27.665627Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:59:27.665713Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:59:27.675319Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 2805, node 1 2025-06-25T14:59:27.860010Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:59:27.860053Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:59:27.860083Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:59:27.860212Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28391 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:59:28.260953Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:59:28.312328Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:59:28.663640Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:7519901762401238028:2201]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:59:28.663669Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:59:28.663728Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [1:7519901762401238028:2201], Recipient [1:7519901762401238028:2201]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:59:28.663756Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:59:29.663902Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:7519901762401238028:2201]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:59:29.663930Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:59:29.663955Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [1:7519901762401238028:2201], Recipient [1:7519901762401238028:2201]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:59:29.663964Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:59:29.916554Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901770991173305:2300], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:59:29.916555Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901770991173294:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:59:29.916642Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:59:29.916986Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7519901762401237921:2143] Handle TEvProposeTransaction 2025-06-25T14:59:29.917022Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:7519901762401237921:2143] TxId# 281474976710658 ProcessProposeTransaction 2025-06-25T14:59:29.917088Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:7519901762401237921:2143] Cookie# 0 userReqId# "" txid# 281474976710658 SEND to# [1:7519901770991173310:2622] 2025-06-25T14:59:29.989178Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:7519901770991173310:2622] txid# 281474976710658 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root/.metadata/workload_manager/pools" OperationType: ESchemeOpCreateResourcePool ModifyACL { Name: "default" DiffACL: "\n!\010\000\022\035\010\001\020\201\004\032\024all-users@well-known \003\n\031\010\000\022\025\010\001\020\201\004\032\014root@builtin \003" NewOwner: "metadata@system" } Internal: true CreateResourcePool { Name: "default" Properties { Properties { key: "concurrent_query_limit" value: "-1" } Properties { key: "database_load_cpu_threshold" value: "-1" } Properties { key: "query_cancel_after_seconds" value: "0" } Properties { key: "query_cpu_limit_percent_per_node" value: "-1" } Properties { key: "query_memory_limit_percent_per_node" value: "-1" } Properties { key: "queue_size" value: "-1" } Properties { key: "resource_weight" value: "-1" } Properties { key: "total_cpu_limit_percent_per_node" value: "-1" } } } } } UserToken: "\n\017metadata@system\022\000" DatabaseName: "/Root" 2025-06-25T14:59:29.989250Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:7519901770991173310:2622] txid# 281474976710658 Bootstrap, UserSID: metadata@system CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-25T14:59:29.989280Z node 1 :TX_PROXY DEBUG: schemereq.cpp:585: Actor# [1:7519901770991173310:2622] txid# 281474976710658 Bootstrap, UserSID: metadata@system IsClusterAdministrator: 1 2025-06-25T14:59:29.990844Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1660: Actor# [1:7519901770991173310:2622] txid# 281474976710658 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-06-25T14:59:29.990927Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:7519901770991173310:2622] txid# 281474976710658 TEvNavigateKeySet requested from SchemeCache 2025-06-25T14:59:29.991117Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:7519901770991173310:2622] txid# 281474976710658 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-25T14:59:29.991281Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:7519901770991173310:2622] HANDLE EvNavigateKeySetResult, txid# 281474976710658 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-25T14:59:29.991346Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:7519901770991173310:2622] txid# 281474976710658 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976710658 TabletId# 72057594046644480} 2025-06-25T14:59:29.991522Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:7519901770991173310:2622] txid# 281474976710658 HANDLE EvClientConnected 2025-06-25T14:59:29.991564Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877761, Sender [1:7519901770991173338:2631], Recipient [1:7519901762401238028:2201]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:59:29.991589Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5052: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T14:59:29.991605Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5837: Pipe server connected, at tablet: 72057594046644480 2025-06-25T14:59:29.991632Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271122432, Sender [1:7519901770991173310:2622], Recipient [1:7519901762401238028:2201]: {TEvModifySchemeTransaction txid# 281474976710658 TabletId# 72057594046644480} 2025-06-25T14:59:29.991646Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4966: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-06-25T14:59:29.993756Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/Root" OperationType: ESchemeOpCreateResourcePool ModifyACL { Name: "default" DiffACL: "\n!\010\000\022\035\010\001\020\201\004\032\024all-users@well-known \003\n\031\010\000\022\025\010\001\020\201\004\032\014root@builtin \003" NewOwner: "metadata@system" } Internal: true CreateResourcePool { Name: ".metadata/workload_manager/pools/default" Properties { Properties { key: "concurrent_query_limit" value: "-1" } Properties { key: "database_load_cpu_threshold" value: "-1" } Properties { key: "query_cancel_after_seconds" value: "0" } Properties { key: "query_cpu_limit_percent_per_node" value: "-1" } Properties { key: "query_memory_limit_percent_per_node" value: "-1" } Properties { key: "queue_size" value: "-1" } Properties { key: "resource_weight" value: "-1" } Properties { key: "total_cpu_limit_percent_per_node" value: "-1" } } } } TxId: 281474976710658 TabletId: 72057594046644480 Owner: "metadata@system" UserToken: "***" PeerName: "" , at schemeshard: 72057594046644480 2025-06-25T14:59:29.994073Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_mkdir.cpp:115: TMkDir Propose, path: /Root/.metadata, operationId: 281474976710658:0, at schemeshard: 72057594046644480 2025-06-25T14:59:29.994168Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:347: AttachChild: child attached as only one child to the parent, parent id: ... 5:02:05.820141Z node 52 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(52, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 9034, node 52 2025-06-25T15:02:05.958526Z node 52 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:02:05.958560Z node 52 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:02:05.958583Z node 52 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:02:05.958812Z node 52 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T15:02:06.484539Z node 52 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:27448 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:02:06.712233Z node 52 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:02:10.456538Z node 52 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[52:7519902442163431762:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:02:10.456640Z node 52 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T15:02:12.359918Z node 52 :KQP_PROXY ERROR: kqp_proxy_service.cpp:1482: TraceId: "01jyksrj789t0axj76wsbh248t", Request deadline has expired for 0.537549s seconds (NYdb::Dev::TContractViolation) Attempt to use result with not successfull status. TCreateSessionResult::GetSession 2025-06-25T15:02:14.696065Z node 55 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[55:7519902479071025677:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:02:14.696190Z node 55 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0014a6/r3tmp/tmpWBBsvb/pdisk_1.dat 2025-06-25T15:02:15.056231Z node 55 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:02:15.091897Z node 55 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(55, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:02:15.092066Z node 55 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(55, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:02:15.108570Z node 55 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(55, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 28471, node 55 2025-06-25T15:02:15.253742Z node 55 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:02:15.253776Z node 55 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:02:15.253801Z node 55 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:02:15.254021Z node 55 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T15:02:15.713809Z node 55 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:11186 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:02:15.915852Z node 55 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:02:19.696084Z node 55 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[55:7519902479071025677:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:02:19.696211Z node 55 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T15:02:21.124357Z node 55 :KQP_PROXY ERROR: kqp_proxy_service.cpp:1482: TraceId: "01jyksrv6e600nsbq91befbgyp", Request deadline has expired for 0.121960s seconds (NYdb::Dev::TContractViolation) Attempt to use result with not successfull status. TCreateSessionResult::GetSession 2025-06-25T15:02:23.067427Z node 58 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[58:7519902517240932313:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:02:23.067544Z node 58 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0014a6/r3tmp/tmpMeOUPr/pdisk_1.dat 2025-06-25T15:02:23.359205Z node 58 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(58, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:02:23.359359Z node 58 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(58, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:02:23.360725Z node 58 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:02:23.399989Z node 58 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(58, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 12324, node 58 2025-06-25T15:02:23.520728Z node 58 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:02:23.520762Z node 58 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:02:23.520788Z node 58 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:02:23.521011Z node 58 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T15:02:24.092587Z node 58 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:21160 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:02:24.214855Z node 58 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:02:24.241868Z node 58 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 Restore "/home/runner/.ya/build/build_root/yft8/0014a6/r3tmp/tmp04f74G/" to "/Root"Resolved db base path: "/Root"List of entries in the backup: [{"type":"Directory","path":"/home/runner/.ya/build/build_root/yft8/0014a6/r3tmp/tmp04f74G/"},{"type":"Directory","path":"/home/runner/.ya/build/build_root/yft8/0014a6/r3tmp/tmp04f74G/with_one_dir"},{"type":"Directory","path":"/home/runner/.ya/build/build_root/yft8/0014a6/r3tmp/tmp04f74G/with_one_file"}]Process "/home/runner/.ya/build/build_root/yft8/0014a6/r3tmp/tmp04f74G/with_one_dir"Restore empty directory "/home/runner/.ya/build/build_root/yft8/0014a6/r3tmp/tmp04f74G/with_one_dir" to "/Root/with_one_dir"Process "/home/runner/.ya/build/build_root/yft8/0014a6/r3tmp/tmp04f74G/with_one_file"Restore empty directory "/home/runner/.ya/build/build_root/yft8/0014a6/r3tmp/tmp04f74G/with_one_file" to "/Root/with_one_file"Restore completed successfully >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadKeyPrefix+EvWrite [GOOD] >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadKeyPrefix-EvWrite >> DataShardReadIteratorConsistency::LeaseConfirmationNotOutOfOrder [GOOD] >> DataShardReadIteratorConsistency::BrokenWriteLockBeforeIteration >> KqpScripting::ScriptExplain [GOOD] >> KqpScripting::StreamDdlAndDml [GOOD] >> KqpScripting::ScanQuery [GOOD] >> KqpScripting::ScanQueryDisable >> KqpYql::TableUseBeforeCreate >> KqpScripting::SystemTables [GOOD] >> KqpYql::UpdatePk >> KqpLimits::OutOfSpaceBulkUpsertFail [GOOD] >> KqpLimits::OutOfSpaceYQLUpsertFail+useSink ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/backup_ut/unittest >> EncryptedBackupParamsValidationTestFeatureDisabled::CommonDestPathSpecified [GOOD] Test command err: 2025-06-25T14:59:27.260864Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901761442313401:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:59:27.260924Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0014d4/r3tmp/tmpPDuyI0/pdisk_1.dat 2025-06-25T14:59:27.650378Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:59:27.675514Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:59:27.675600Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:59:27.694680Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 9196, node 1 2025-06-25T14:59:27.857057Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:59:27.857083Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:59:27.857090Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:59:27.857205Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:29287 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:59:28.251484Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:59:28.268721Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:59:30.072439Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901774327216282:2300], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:59:30.072572Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:59:30.270259Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7519901761442313608:2143] Handle TEvProposeTransaction 2025-06-25T14:59:30.270288Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:7519901761442313608:2143] TxId# 281474976710658 ProcessProposeTransaction 2025-06-25T14:59:30.270328Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:7519901761442313608:2143] Cookie# 0 userReqId# "" txid# 281474976710658 SEND to# [1:7519901774327216308:2622] 2025-06-25T14:59:30.326145Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:7519901774327216308:2622] txid# 281474976710658 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateTable CreateTable { Name: "table" Columns { Name: "Key" Type: "Uint32" NotNull: false } Columns { Name: "Value" Type: "Utf8" NotNull: false } KeyColumnNames: "Key" PartitionConfig { } Temporary: false } } } UserToken: "" DatabaseName: "" 2025-06-25T14:59:30.326195Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:7519901774327216308:2622] txid# 281474976710658 Bootstrap, UserSID: CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-25T14:59:30.326534Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1660: Actor# [1:7519901774327216308:2622] txid# 281474976710658 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-06-25T14:59:30.326611Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:7519901774327216308:2622] txid# 281474976710658 TEvNavigateKeySet requested from SchemeCache 2025-06-25T14:59:30.326801Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:7519901774327216308:2622] txid# 281474976710658 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-25T14:59:30.326911Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:7519901774327216308:2622] HANDLE EvNavigateKeySetResult, txid# 281474976710658 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-25T14:59:30.326951Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:7519901774327216308:2622] txid# 281474976710658 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976710658 TabletId# 72057594046644480} 2025-06-25T14:59:30.327085Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:7519901774327216308:2622] txid# 281474976710658 HANDLE EvClientConnected 2025-06-25T14:59:30.328389Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:59:30.330104Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [1:7519901774327216308:2622] txid# 281474976710658 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976710658} 2025-06-25T14:59:30.330179Z node 1 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [1:7519901774327216308:2622] txid# 281474976710658 SEND to# [1:7519901774327216307:2304] Source {TEvProposeTransactionStatus txid# 281474976710658 Status# 53} 2025-06-25T14:59:30.462445Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901774327216453:2311], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:59:30.462507Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:59:30.462650Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901774327216458:2314], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:59:30.462870Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7519901761442313608:2143] Handle TEvProposeTransaction 2025-06-25T14:59:30.462886Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:7519901761442313608:2143] TxId# 281474976710659 ProcessProposeTransaction 2025-06-25T14:59:30.462941Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:7519901761442313608:2143] Cookie# 0 userReqId# "" txid# 281474976710659 SEND to# [1:7519901774327216461:2742] 2025-06-25T14:59:30.465360Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:7519901774327216461:2742] txid# 281474976710659 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root/.metadata/workload_manager/pools" OperationType: ESchemeOpCreateResourcePool ModifyACL { Name: "default" DiffACL: "\n!\010\000\022\035\010\001\020\201\004\032\024all-users@well-known \003\n\031\010\000\022\025\010\001\020\201\004\032\014root@builtin \003" NewOwner: "metadata@system" } Internal: true CreateResourcePool { Name: "default" Properties { Properties { key: "concurrent_query_limit" value: "-1" } Properties { key: "database_load_cpu_threshold" value: "-1" } Properties { key: "query_cancel_after_seconds" value: "0" } Properties { key: "query_cpu_limit_percent_per_node" value: "-1" } Properties { key: "query_memory_limit_percent_per_node" value: "-1" } Properties { key: "queue_size" value: "-1" } Properties { key: "resource_weight" value: "-1" } Properties { key: "total_cpu_limit_percent_per_node" value: "-1" } } } } } UserToken: "\n\017metadata@system\022\000" DatabaseName: "/Root" 2025-06-25T14:59:30.465420Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:7519901774327216461:2742] txid# 281474976710659 Bootstrap, UserSID: metadata@system CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-25T14:59:30.465443Z node 1 :TX_PROXY DEBUG: schemereq.cpp:585: Actor# [1:7519901774327216461:2742] txid# 281474976710659 Bootstrap, UserSID: metadata@system IsClusterAdministrator: 1 2025-06-25T14:59:30.467178Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1660: Actor# [1:7519901774327216461:2742] txid# 281474976710659 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-06-25T14:59:30.467263Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:7519901774327216461:2742] txid# 281474976710659 TEvNavigateKeySet requested from SchemeCache 2025-06-25T14:59:30.467518Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:7519901774327216461:2742] txid# 281474976710659 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-25T14:59:30.467665Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:7519901774327216461:2742] HANDLE EvNavigateKeySetResult, txid# 281474976710659 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-25T14:59:30.467750Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:7519901774327216461:2742] txid# 281474976710659 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976710659 TabletId# 72057594046644480} 2025-06-25T14:59:30.467882Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:7519901774327216461:2742] txid# 281474976710659 HANDLE EvClientConnected 2025-06-25T14:59:30.469398Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo ... emeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T15:02:32.777683Z node 46 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T15:02:32.777803Z node 46 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877761, Sender [46:7519902557342413102:2827], Recipient [46:7519902535867575609:2202]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T15:02:32.777830Z node 46 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5052: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T15:02:32.777850Z node 46 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5837: Pipe server connected, at tablet: 72057594046644480 2025-06-25T15:02:32.777958Z node 46 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269551620, Sender [46:7519902557342413015:2313], Recipient [46:7519902535867575609:2202]: NKikimrTxDataShard.TEvSchemaChanged Source { RawX1: 7519902557342413015 RawX2: 4503797195868425 } Origin: 72075186224037888 State: 2 TxId: 281474976710660 Step: 0 Generation: 1 2025-06-25T15:02:32.777978Z node 46 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4987: StateWork, processing event TEvDataShard::TEvSchemaChanged 2025-06-25T15:02:32.778048Z node 46 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5596: Handle TEvSchemaChanged, tabletId: 72057594046644480, at schemeshard: 72057594046644480, message: Source { RawX1: 7519902557342413015 RawX2: 4503797195868425 } Origin: 72075186224037888 State: 2 TxId: 281474976710660 Step: 0 Generation: 1 2025-06-25T15:02:32.778069Z node 46 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1791: TOperation FindRelatedPartByTabletId, TxId: 281474976710660, tablet: 72075186224037888, partId: 2 2025-06-25T15:02:32.778192Z node 46 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:632: TTxOperationReply execute, operationId: 281474976710660:2, at schemeshard: 72057594046644480, message: Source { RawX1: 7519902557342413015 RawX2: 4503797195868425 } Origin: 72075186224037888 State: 2 TxId: 281474976710660 Step: 0 Generation: 1 2025-06-25T15:02:32.778231Z node 46 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1047: NTableState::TProposedWaitParts operationId# 281474976710660:2 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046644480 2025-06-25T15:02:32.778318Z node 46 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1051: NTableState::TProposedWaitParts operationId# 281474976710660:2 HandleReply TEvDataShard::TEvSchemaChanged at tablet: 72057594046644480 message: Source { RawX1: 7519902557342413015 RawX2: 4503797195868425 } Origin: 72075186224037888 State: 2 TxId: 281474976710660 Step: 0 Generation: 1 2025-06-25T15:02:32.778387Z node 46 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:670: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 281474976710660:2, shardIdx: 72057594046644480:1, shard: 72075186224037888, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046644480 2025-06-25T15:02:32.778418Z node 46 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:707: all shard schema changes has been received, operationId: 281474976710660:2, at schemeshard: 72057594046644480 2025-06-25T15:02:32.778438Z node 46 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:719: send schema changes ack message, operation: 281474976710660:2, datashard: 72075186224037888, at schemeshard: 72057594046644480 2025-06-25T15:02:32.778470Z node 46 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 281474976710660:2 129 -> 240 2025-06-25T15:02:32.778685Z node 46 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-25T15:02:32.780535Z node 46 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046644480, cookie: 281474976710660 2025-06-25T15:02:32.780563Z node 46 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T15:02:32.780745Z node 46 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046644480, cookie: 281474976710660 2025-06-25T15:02:32.780759Z node 46 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T15:02:32.780936Z node 46 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046644480, cookie: 281474976710660 2025-06-25T15:02:32.780946Z node 46 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T15:02:32.780989Z node 46 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046644480, cookie: 281474976710660 2025-06-25T15:02:32.780999Z node 46 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T15:02:32.781065Z node 46 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 281474976710660:2, at schemeshard: 72057594046644480 2025-06-25T15:02:32.781077Z node 46 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T15:02:32.781811Z node 46 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 281474976710660:2, at schemeshard: 72057594046644480 2025-06-25T15:02:32.781835Z node 46 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T15:02:32.781852Z node 46 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:276: Activate send for 281474976710660:2 2025-06-25T15:02:32.781930Z node 46 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:632: Send to actor: [46:7519902557342413015:2313] msg type: 269552132 msg: NKikimrTxDataShard.TEvSchemaChangedResult TxId: 281474976710660 at schemeshard: 72057594046644480 2025-06-25T15:02:32.782062Z node 46 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 2146435072, Sender [46:7519902535867575609:2202], Recipient [46:7519902535867575609:2202]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-25T15:02:32.782090Z node 46 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4972: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-25T15:02:32.782144Z node 46 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 281474976710660:2, at schemeshard: 72057594046644480 2025-06-25T15:02:32.782171Z node 46 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046644480] TDone opId# 281474976710660:2 ProgressState 2025-06-25T15:02:32.782322Z node 46 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:166: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-25T15:02:32.782344Z node 46 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976710660:2 progress is 3/3 2025-06-25T15:02:32.782377Z node 46 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976710660 ready parts: 3/3 2025-06-25T15:02:32.782403Z node 46 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976710660:2 progress is 3/3 2025-06-25T15:02:32.782416Z node 46 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976710660 ready parts: 3/3 2025-06-25T15:02:32.782436Z node 46 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 281474976710660, ready parts: 3/3, is published: true 2025-06-25T15:02:32.782483Z node 46 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1656: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [46:7519902557342412942:2299] message: TxId: 281474976710660 2025-06-25T15:02:32.782516Z node 46 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976710660 ready parts: 3/3 2025-06-25T15:02:32.782545Z node 46 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976710660:0 2025-06-25T15:02:32.782558Z node 46 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 281474976710660:0 2025-06-25T15:02:32.782622Z node 46 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 6] was 2 2025-06-25T15:02:32.782635Z node 46 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976710660:1 2025-06-25T15:02:32.782644Z node 46 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 281474976710660:1 2025-06-25T15:02:32.782662Z node 46 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 7] was 2 2025-06-25T15:02:32.782671Z node 46 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976710660:2 2025-06-25T15:02:32.782679Z node 46 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 281474976710660:2 2025-06-25T15:02:32.782765Z node 46 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 8] was 3 2025-06-25T15:02:32.784782Z node 46 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:208: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-25T15:02:32.784855Z node 46 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:632: Send to actor: [46:7519902557342412942:2299] msg type: 271124998 msg: NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976710660 at schemeshard: 72057594046644480 2025-06-25T15:02:32.785878Z node 46 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877764, Sender [46:7519902557342412971:2721], Recipient [46:7519902535867575609:2202]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-25T15:02:32.785914Z node 46 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5053: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-25T15:02:32.785930Z node 46 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5885: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-25T15:02:32.789654Z node 46 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877764, Sender [46:7519902557342413102:2827], Recipient [46:7519902535867575609:2202]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-25T15:02:32.789682Z node 46 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5053: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-25T15:02:32.789697Z node 46 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5885: Server pipe is reset, at schemeshard: 72057594046644480 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/yql/unittest >> KqpScripting::ScriptStats [GOOD] Test command err: Trying to start YDB, gRPC: 29953, MsgBus: 8547 2025-06-25T15:02:22.545264Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519902512582503259:2177];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:02:22.545324Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0008ae/r3tmp/tmpgnXlnJ/pdisk_1.dat 2025-06-25T15:02:23.015913Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:02:23.017073Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519902512582503105:2080] 1750863742518073 != 1750863742518076 2025-06-25T15:02:23.023201Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:02:23.023282Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:02:23.024608Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 29953, node 1 2025-06-25T15:02:23.204847Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:02:23.204870Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:02:23.204878Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:02:23.205003Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T15:02:23.541415Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:8547 TClient is connected to server localhost:8547 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:02:24.028058Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:02:24.047326Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T15:02:24.070911Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:24.262910Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:24.426742Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:24.509716Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:25.643309Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902525467406620:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:25.643430Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:26.234835Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:26.265069Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:26.301986Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:26.334224Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:26.375372Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:26.452523Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:26.488523Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:26.559214Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902529762374575:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:26.559283Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:26.559493Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902529762374580:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:26.564248Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:02:26.574468Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519902529762374582:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T15:02:26.642270Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519902529762374635:3418] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:02:27.563752Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519902512582503259:2177];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:02:27.563968Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 3665, MsgBus: 17730 2025-06-25T15:02:28.617448Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519902537826474239:2185];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:02:28.617509Z node 2 :METADATA_PROVIDER ERROR: log.cpp:78 ... loaded 2025-06-25T15:02:28.718529Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519902537826474069:2080] 1750863748551388 != 1750863748551391 2025-06-25T15:02:28.746315Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:02:28.746390Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:02:28.755569Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 3665, node 2 2025-06-25T15:02:28.824855Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:02:28.824884Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:02:28.824890Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:02:28.825031Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17730 TClient is connected to server localhost:17730 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:02:29.373611Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:02:29.379999Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T15:02:29.390117Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:29.480531Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:29.591365Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:02:29.643702Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:29.723883Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:31.732480Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519902550711377582:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:31.732565Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:31.787421Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:31.825011Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:31.855221Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:31.887049Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:31.924029Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:31.976404Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:32.021343Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:32.077245Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519902555006345535:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:32.077360Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:32.077556Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519902555006345540:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:32.081281Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:02:32.091592Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519902555006345542:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T15:02:32.146648Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519902555006345593:3414] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:02:33.579805Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519902537826474239:2185];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:02:33.579877Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T15:02:33.705013Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:34.230904Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863754234, txId: 281474976715675] shutting down 2025-06-25T15:02:34.843213Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863754857, txId: 281474976715679] shutting down 2025-06-25T15:02:35.235894Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863755229, txId: 281474976715683] shutting down ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/yql/unittest >> KqpScripting::ScriptExplain [GOOD] Test command err: Trying to start YDB, gRPC: 10420, MsgBus: 14424 2025-06-25T15:02:22.533761Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519902515727176711:2144];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:02:22.536636Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0008c5/r3tmp/tmpuGGOq8/pdisk_1.dat 2025-06-25T15:02:22.978356Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:02:22.978438Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:02:23.021486Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:02:23.038947Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 10420, node 1 2025-06-25T15:02:23.203653Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:02:23.203685Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:02:23.203692Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:02:23.203825Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T15:02:23.541100Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:14424 TClient is connected to server localhost:14424 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:02:24.015451Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:02:24.052962Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T15:02:24.064389Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:24.255297Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T15:02:24.423276Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:24.515982Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:25.760820Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902528612080140:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:25.760953Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:26.241816Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:26.284399Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:26.314108Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:26.351937Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:26.404009Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:26.477077Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:26.553184Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:26.651153Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902532907048108:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:26.651228Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:26.651448Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902532907048113:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:26.657490Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:02:26.672863Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519902532907048115:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T15:02:26.778727Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519902532907048170:3427] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:02:27.528549Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519902515727176711:2144];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:02:27.528653Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T15:02:27.763753Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) Trying to start YDB, gRPC: 18373, MsgBus: 21743 2025-06-25T15:02:30.608263Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: ... le profiles were not loaded 2025-06-25T15:02:30.740801Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519902548886786801:2080] 1750863750533971 != 1750863750533974 2025-06-25T15:02:30.757094Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:02:30.757183Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:02:30.760583Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 18373, node 2 2025-06-25T15:02:30.836536Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:02:30.836557Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:02:30.836563Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:02:30.836662Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:21743 TClient is connected to server localhost:21743 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:02:31.303181Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:02:31.312625Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-25T15:02:31.324683Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:31.388676Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:31.577830Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:02:31.590554Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T15:02:31.665153Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:33.726286Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519902561771690315:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:33.726365Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:33.787832Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:33.855690Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:33.886837Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:33.919224Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:33.951978Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:34.026583Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:34.094179Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:34.177555Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519902566066658281:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:34.177663Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:34.177761Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519902566066658286:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:34.181774Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:02:34.195738Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519902566066658288:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T15:02:34.277725Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519902566066658339:3417] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:02:35.572195Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519902548886786935:2150];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:02:35.572277Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T15:02:35.746701Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7519902570361625925:2480], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:1:168: Error: At function: DataQueryBlocks
:1:185: Error: At function: TKiDataQueryBlock
:1:208: Error: At function: KiEffects
:1:219: Error: At function: KiWriteTable!
:1:219: Error: Cannot find table 'db.[/Root/ScriptingTest]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T15:02:35.748204Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=2&id=YThhMTAzNGItY2E5ZWU2MzAtNjNhNTBiOS1lMmIyMmY2MA==, ActorId: [2:7519902570361625923:2479], ActorState: ExecuteState, TraceId: 01jyksseee9bf2mrvq8b7nf6jm, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/yql/unittest >> KqpScripting::StreamDdlAndDml [GOOD] Test command err: Trying to start YDB, gRPC: 4810, MsgBus: 23370 2025-06-25T15:02:24.965287Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519902523259518260:2233];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:02:24.965357Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000879/r3tmp/tmp8k45IV/pdisk_1.dat 2025-06-25T15:02:25.320025Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:02:25.320128Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:02:25.323147Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:02:25.354476Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:02:25.355130Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519902523259518054:2080] 1750863744927487 != 1750863744927490 TServer::EnableGrpc on GrpcPort 4810, node 1 2025-06-25T15:02:25.420832Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:02:25.420857Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:02:25.420863Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:02:25.420970Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23370 TClient is connected to server localhost:23370 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-25T15:02:25.949425Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:02:25.995306Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:02:26.008731Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T15:02:26.022789Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:26.158665Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:26.325767Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:26.405840Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:28.121099Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902540439388867:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:28.121192Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:28.381394Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:28.454317Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:28.485270Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:28.521662Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:28.550741Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:28.594652Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:28.664712Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:28.756606Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902540439389538:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:28.756721Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:28.757118Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902540439389543:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:28.761032Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:02:28.776966Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519902540439389545:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T15:02:28.878560Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519902540439389596:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:02:29.914950Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:29.968476Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519902523259518260:2233];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:02:29.969078Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot dete ... 467Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000879/r3tmp/tmpaCdhgx/pdisk_1.dat 2025-06-25T15:02:31.132761Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:02:31.133951Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519902547673496868:2080] 1750863750980731 != 1750863750980734 2025-06-25T15:02:31.147706Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:02:31.147785Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:02:31.151736Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 27271, node 2 2025-06-25T15:02:31.244867Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:02:31.244888Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:02:31.244894Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:02:31.244999Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23063 TClient is connected to server localhost:23063 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:02:31.787837Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:02:31.797254Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T15:02:31.808365Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:31.888907Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:32.037286Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:02:32.081986Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:32.164424Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:33.988521Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519902560558400405:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:33.988612Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:34.048691Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:34.077940Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:34.105577Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:34.136065Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:34.165737Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:34.195766Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:34.229062Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:34.294129Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519902564853368357:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:34.294198Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:34.294260Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519902564853368362:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:34.297296Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:02:34.307092Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519902564853368364:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T15:02:34.361222Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519902564853368415:3417] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:02:35.374342Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:35.877894Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863755907, txId: 281474976715674] shutting down 2025-06-25T15:02:35.982062Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519902547673496890:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:02:35.982131Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> TNodeBrokerTest::TestRandomActions [GOOD] >> KqpYql::DdlDmlMix ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/yql/unittest >> KqpScripting::SystemTables [GOOD] Test command err: Trying to start YDB, gRPC: 25414, MsgBus: 26391 2025-06-25T15:02:24.334113Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519902521868801099:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:02:24.341977Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000890/r3tmp/tmp3ZkDsa/pdisk_1.dat 2025-06-25T15:02:24.745428Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 25414, node 1 2025-06-25T15:02:24.777999Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:02:24.778115Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:02:24.780025Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:02:24.860852Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:02:24.860874Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:02:24.860881Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:02:24.861009Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26391 TClient is connected to server localhost:26391 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-25T15:02:25.344594Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:02:25.407717Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:02:25.427236Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:25.606847Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:25.755600Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:25.813013Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:27.330698Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902534753704584:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:27.330803Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:27.631579Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:27.663004Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:27.695210Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:27.729279Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:27.757430Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:27.831358Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:27.878065Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:27.977489Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902534753705250:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:27.977570Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:27.978053Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902534753705255:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:27.981866Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:02:27.992997Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519902534753705257:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T15:02:28.084098Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519902539048672604:3417] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:02:29.317490Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:29.333475Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519902521868801099:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:02:29.333546Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 11730, MsgBus: 32370 2025-06-25T15:02:30.363047Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519902546441736156:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:02:30.363249Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000890/r3tmp/tmpCTdpuB/pdisk_1.dat 2025-06-25T15:02:30.537931Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:02:30.538025Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:02:30.542652Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:02:30.552458Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 11730, node 2 2025-06-25T15:02:30.628902Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:02:30.628925Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:02:30.628932Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:02:30.629049Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:32370 TClient is connected to server localhost:32370 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:02:31.144650Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:02:31.153106Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T15:02:31.165462Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:31.235578Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:31.370673Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:02:31.391689Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T15:02:31.485815Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:33.461886Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519902559326639640:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:33.461952Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:33.505611Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:33.534197Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:33.563369Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:33.595981Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:33.632050Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:33.722565Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:33.758561Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:33.816401Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519902559326640299:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:33.816504Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:33.816864Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519902559326640304:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:33.821051Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:02:33.833070Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519902559326640306:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T15:02:33.902592Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519902559326640357:3415] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:02:35.033165Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863755013, txId: 281474976710672] shutting down 2025-06-25T15:02:35.225558Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863755176, txId: 281474976710674] shutting down 2025-06-25T15:02:35.364175Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519902546441736156:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:02:35.364250Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T15:02:36.094697Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863756110, txId: 281474976710676] shutting down ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::TestRandomActions [GOOD] Test command err: 2025-06-25T15:00:38.002312Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:00:38.002362Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) 2025-06-25T15:00:38.283580Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1024: WRONG_REQUEST: Unknown node 2025-06-25T15:00:38.356498Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1028: WRONG_REQUEST: Unknown node 2025-06-25T15:00:38.412177Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1030: WRONG_REQUEST: Unknown node 2025-06-25T15:00:38.412976Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1030: WRONG_REQUEST: Unknown node 2025-06-25T15:00:38.413221Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1030: WRONG_REQUEST: Unknown node 2025-06-25T15:00:39.476332Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1030: WRONG_REQUEST: Unknown node 2025-06-25T15:00:41.290135Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #0: WRONG_REQUEST: Unknown node 2025-06-25T15:00:41.290571Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #0: WRONG_REQUEST: Unknown node 2025-06-25T15:00:41.304370Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #0: WRONG_REQUEST: Unknown node 2025-06-25T15:00:41.341906Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #0: WRONG_REQUEST: Unknown node 2025-06-25T15:00:41.355655Z node 1 :NODE_BROKER ERROR: node_broker__register_node.cpp:41: Cannot register node host1:0: ERROR_TEMP: No free node IDs 2025-06-25T15:00:41.375989Z node 1 :NODE_BROKER ERROR: node_broker__register_node.cpp:41: Cannot register node host9:8: ERROR_TEMP: No free node IDs 2025-06-25T15:00:41.377362Z node 1 :NODE_BROKER ERROR: node_broker__register_node.cpp:41: Cannot register node host12:11: ERROR_TEMP: No free node IDs 2025-06-25T15:00:41.378200Z node 1 :NODE_BROKER ERROR: node_broker__register_node.cpp:41: Cannot register node host1:0: ERROR_TEMP: No free node IDs 2025-06-25T15:00:41.379113Z node 1 :NODE_BROKER ERROR: node_broker__register_node.cpp:41: Cannot register node host3:2: ERROR_TEMP: No free node IDs 2025-06-25T15:00:42.407893Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1028: WRONG_REQUEST: Unknown node 2025-06-25T15:00:42.430596Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1028: WRONG_REQUEST: Unknown node 2025-06-25T15:00:42.430932Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1028: WRONG_REQUEST: Unknown node 2025-06-25T15:00:42.431536Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1028: WRONG_REQUEST: Unknown node 2025-06-25T15:00:42.431784Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1028: WRONG_REQUEST: Unknown node 2025-06-25T15:00:43.173765Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1025: WRONG_REQUEST: Unknown node 2025-06-25T15:00:43.174309Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1025: WRONG_REQUEST: Unknown node 2025-06-25T15:00:43.187915Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1025: WRONG_REQUEST: Unknown node 2025-06-25T15:00:43.203313Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1026: WRONG_REQUEST: Unknown node 2025-06-25T15:00:43.219299Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1027: WRONG_REQUEST: Unknown node 2025-06-25T15:00:43.233756Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1028: WRONG_REQUEST: Unknown node 2025-06-25T15:00:43.234217Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1028: WRONG_REQUEST: Unknown node 2025-06-25T15:00:43.236429Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1028: WRONG_REQUEST: Unknown node 2025-06-25T15:00:43.252688Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1029: WRONG_REQUEST: Unknown node 2025-06-25T15:00:43.253513Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1029: WRONG_REQUEST: Unknown node 2025-06-25T15:00:43.670425Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1031: WRONG_REQUEST: Unknown node 2025-06-25T15:00:43.670902Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1031: WRONG_REQUEST: Unknown node 2025-06-25T15:00:43.705875Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1031: WRONG_REQUEST: Unknown node 2025-06-25T15:00:44.071272Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1031: WRONG_REQUEST: Unknown node 2025-06-25T15:00:44.071719Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1031: WRONG_REQUEST: Unknown node 2025-06-25T15:00:44.085038Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1031: WRONG_REQUEST: Unknown node 2025-06-25T15:00:44.085534Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1031: WRONG_REQUEST: Unknown node 2025-06-25T15:00:44.402553Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1024: WRONG_REQUEST: Unknown node 2025-06-25T15:00:44.403215Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1024: WRONG_REQUEST: Unknown node 2025-06-25T15:00:44.515935Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1028: WRONG_REQUEST: Unknown node 2025-06-25T15:00:44.534339Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1028: WRONG_REQUEST: Unknown node 2025-06-25T15:00:44.535365Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1028: WRONG_REQUEST: Unknown node 2025-06-25T15:00:44.535792Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1028: WRONG_REQUEST: Unknown node 2025-06-25T15:00:44.538018Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1028: WRONG_REQUEST: Unknown node 2025-06-25T15:00:44.552835Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1029: WRONG_REQUEST: Unknown node 2025-06-25T15:00:44.646694Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1030: WRONG_REQUEST: Node has expired 2025-06-25T15:00:44.693490Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1030: WRONG_REQUEST: Node has expired 2025-06-25T15:00:44.694071Z node 1 :NODE_BROKER ERROR: node_broker__register_node.cpp:41: Cannot register node host4:3: ERROR_TEMP: No free node IDs 2025-06-25T15:00:44.694551Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1030: WRONG_REQUEST: Node has expired 2025-06-25T15:00:45.874764Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1030: WRONG_REQUEST: Unknown node 2025-06-25T15:00:45.875344Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1030: WRONG_REQUEST: Unknown node 2025-06-25T15:00:45.911747Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1030: WRONG_REQUEST: Unknown node 2025-06-25T15:00:45.912338Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1030: WRONG_REQUEST: Unknown node 2025-06-25T15:00:45.912795Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1030: WRONG_REQUEST: Unknown node 2025-06-25T15:00:45.913306Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1030: WRONG_REQUEST: Unknown node 2025-06-25T15:00:45.913719Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1030: WRONG_REQUEST: Unknown node 2025-06-25T15:00:45.928963Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1024: WRONG_REQUEST: Node has expired 2025-06-25T15:00:45.929647Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1024: WRONG_REQUEST: Node has expired 2025-06-25T15:00:45.930623Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1024: WRONG_REQUEST: Node has expired 2025-06-25T15:00:45.931095Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1024: WRONG_REQUEST: Node has expired 2025-06-25T15:00:45.932023Z node 1 :NODE_BROKER ERROR: node_broker__register_node.cpp:41: Cannot register node host2:1: ERROR_TEMP: No free node IDs 2025-06-25T15:00:45.932775Z node 1 :NODE_BROKER ERROR: node_broker__register_node.cpp:41: Cannot register node host13:12: ERROR_TEMP: No free node IDs 2025-06-25T15:00:46.453694Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1024: WRONG_REQUEST: Unknown node 2025-06-25T15:00:46.777883Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1025: WRONG_REQUEST: Unknown node 2025-06-25T15:00:46.779726Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1025: WRONG_REQUEST: Unknown node 2025-06-25T15:00:47.481154Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1027: WRONG_REQUEST: Unknown node 2025-06-25T15:00:47.509815Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1029: WRONG_REQUEST: Unknown node 2025-06-25T15:00:47.511204Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1029: WRONG_REQUEST: Unknown node 2025-06-25T15:00:47.985602Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1024: WRONG_REQUEST: Unknown node 2025-06-25T15:00:47.986143Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1024: WRONG_REQUEST: Unknown node 2025-06-25T15:00:47.986609Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1024: WRONG_REQUEST: Unknown node 2025-06-25T15:00:47.986939Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1024: WRONG_REQUEST: Unknown node 2025-06-25T15:00:47.987308Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1024: WRONG_REQUEST: Unknown node 2025-06-25T15:00:48.398413Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1025: WRONG_REQUEST: Unknown node 2025-06-25T15:00:48.399032Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1025: WRONG_REQUEST: Unknown node 2025-06-25T15:00:48.399557Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1025: WRONG_REQUEST: Unknown node 2025-06-25T15:00:48.439930Z node 1 :NODE_BROKER ERROR: node_ ... _broker__extend_lease.cpp:31: Cannot extend lease for node #0: WRONG_REQUEST: Unknown node 2025-06-25T15:02:24.366105Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #0: WRONG_REQUEST: Unknown node 2025-06-25T15:02:24.413157Z node 1 :NODE_BROKER ERROR: node_broker__register_node.cpp:41: Cannot register node host14:13: ERROR_TEMP: No free node IDs 2025-06-25T15:02:24.506108Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #0: WRONG_REQUEST: Unknown node 2025-06-25T15:02:24.508410Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #0: WRONG_REQUEST: Unknown node 2025-06-25T15:02:24.514890Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #0: WRONG_REQUEST: Unknown node 2025-06-25T15:02:24.519807Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #0: WRONG_REQUEST: Unknown node 2025-06-25T15:02:24.590622Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #0: WRONG_REQUEST: Unknown node 2025-06-25T15:02:24.844742Z node 1 :NODE_BROKER ERROR: node_broker__register_node.cpp:41: Cannot register node host12:11: ERROR_TEMP: No free node IDs 2025-06-25T15:02:24.848471Z node 1 :NODE_BROKER ERROR: node_broker__register_node.cpp:41: Cannot register node host3:2: ERROR_TEMP: No free node IDs 2025-06-25T15:02:25.977869Z node 1 :NODE_BROKER ERROR: node_broker__register_node.cpp:41: Cannot register node host13:12: ERROR_TEMP: No free node IDs 2025-06-25T15:02:26.033051Z node 1 :NODE_BROKER ERROR: node_broker__register_node.cpp:41: Cannot register node host12:11: ERROR_TEMP: No free node IDs 2025-06-25T15:02:26.038390Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1029: WRONG_REQUEST: Node has expired 2025-06-25T15:02:26.119890Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1029: WRONG_REQUEST: Node has expired 2025-06-25T15:02:27.050339Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1026: WRONG_REQUEST: Unknown node 2025-06-25T15:02:27.072151Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1027: WRONG_REQUEST: Unknown node 2025-06-25T15:02:27.077176Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1027: WRONG_REQUEST: Unknown node 2025-06-25T15:02:27.510617Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1028: WRONG_REQUEST: Unknown node 2025-06-25T15:02:27.533839Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1029: WRONG_REQUEST: Unknown node 2025-06-25T15:02:27.938656Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1029: WRONG_REQUEST: Unknown node 2025-06-25T15:02:27.943971Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1029: WRONG_REQUEST: Unknown node 2025-06-25T15:02:27.946409Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1029: WRONG_REQUEST: Unknown node 2025-06-25T15:02:28.067104Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1030: WRONG_REQUEST: Unknown node 2025-06-25T15:02:28.069617Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1030: WRONG_REQUEST: Unknown node 2025-06-25T15:02:28.126402Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1030: WRONG_REQUEST: Unknown node 2025-06-25T15:02:28.128899Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1030: WRONG_REQUEST: Unknown node 2025-06-25T15:02:28.134572Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1030: WRONG_REQUEST: Unknown node 2025-06-25T15:02:28.152511Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1030: WRONG_REQUEST: Unknown node 2025-06-25T15:02:28.745269Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1026: WRONG_REQUEST: Unknown node 2025-06-25T15:02:28.772397Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1030: WRONG_REQUEST: Unknown node 2025-06-25T15:02:28.887149Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1030: WRONG_REQUEST: Unknown node 2025-06-25T15:02:28.889977Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1030: WRONG_REQUEST: Unknown node 2025-06-25T15:02:29.001886Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1027: WRONG_REQUEST: Node has expired 2025-06-25T15:02:29.012696Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1027: WRONG_REQUEST: Node has expired 2025-06-25T15:02:29.015437Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1027: WRONG_REQUEST: Node has expired 2025-06-25T15:02:29.019057Z node 1 :NODE_BROKER ERROR: node_broker__register_node.cpp:41: Cannot register node host6:5: ERROR_TEMP: No free node IDs 2025-06-25T15:02:29.021075Z node 1 :NODE_BROKER ERROR: node_broker__register_node.cpp:41: Cannot register node host4:3: ERROR_TEMP: No free node IDs 2025-06-25T15:02:29.026133Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1027: WRONG_REQUEST: Node has expired 2025-06-25T15:02:30.036363Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1027: WRONG_REQUEST: Unknown node 2025-06-25T15:02:30.060597Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1029: WRONG_REQUEST: Unknown node 2025-06-25T15:02:30.078716Z node 1 :NODE_BROKER ERROR: node_broker__register_node.cpp:41: Cannot register node host11:10: ERROR_TEMP: No free node IDs 2025-06-25T15:02:30.081515Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1024: WRONG_REQUEST: Node has expired 2025-06-25T15:02:30.084621Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1024: WRONG_REQUEST: Node has expired 2025-06-25T15:02:30.087327Z node 1 :NODE_BROKER ERROR: node_broker__register_node.cpp:41: Cannot register node host10:9: ERROR_TEMP: No free node IDs 2025-06-25T15:02:30.105704Z node 1 :NODE_BROKER ERROR: node_broker__register_node.cpp:41: Cannot register node host7:6: ERROR_TEMP: No free node IDs 2025-06-25T15:02:30.108033Z node 1 :NODE_BROKER ERROR: node_broker__register_node.cpp:41: Cannot register node host7:6: ERROR_TEMP: No free node IDs 2025-06-25T15:02:30.110575Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1024: WRONG_REQUEST: Node has expired 2025-06-25T15:02:30.655566Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1025: WRONG_REQUEST: Unknown node 2025-06-25T15:02:30.745719Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1025: WRONG_REQUEST: Unknown node 2025-06-25T15:02:30.768243Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1026: WRONG_REQUEST: Unknown node 2025-06-25T15:02:30.820567Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1028: WRONG_REQUEST: Unknown node 2025-06-25T15:02:30.823182Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1028: WRONG_REQUEST: Unknown node 2025-06-25T15:02:30.849356Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1030: WRONG_REQUEST: Unknown node 2025-06-25T15:02:31.498637Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1032: WRONG_REQUEST: Unknown node 2025-06-25T15:02:31.702416Z node 1 :NODE_BROKER ERROR: node_broker__register_node.cpp:41: Cannot register node host13:12: ERROR_TEMP: No free node IDs 2025-06-25T15:02:31.808555Z node 1 :NODE_BROKER ERROR: node_broker__register_node.cpp:41: Cannot register node host15:14: ERROR_TEMP: No free node IDs 2025-06-25T15:02:31.960763Z node 1 :NODE_BROKER ERROR: node_broker__register_node.cpp:41: Cannot register node host3:2: ERROR_TEMP: No free node IDs 2025-06-25T15:02:32.821408Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1024: WRONG_REQUEST: Node has expired 2025-06-25T15:02:32.828095Z node 1 :NODE_BROKER ERROR: node_broker__register_node.cpp:41: Cannot register node host3:2: ERROR_TEMP: No free node IDs 2025-06-25T15:02:32.834953Z node 1 :NODE_BROKER ERROR: node_broker__register_node.cpp:41: Cannot register node host10:9: ERROR_TEMP: No free node IDs 2025-06-25T15:02:32.906366Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1024: WRONG_REQUEST: Node has expired 2025-06-25T15:02:32.936980Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1024: WRONG_REQUEST: Node has expired 2025-06-25T15:02:33.560177Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1025: WRONG_REQUEST: Unknown node 2025-06-25T15:02:34.821522Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1024: WRONG_REQUEST: Unknown node 2025-06-25T15:02:34.845528Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1025: WRONG_REQUEST: Unknown node 2025-06-25T15:02:34.847939Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1025: WRONG_REQUEST: Unknown node 2025-06-25T15:02:34.849398Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1025: WRONG_REQUEST: Unknown node 2025-06-25T15:02:34.852168Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1025: WRONG_REQUEST: Unknown node 2025-06-25T15:02:35.242390Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1026: WRONG_REQUEST: Unknown node 2025-06-25T15:02:35.244581Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1026: WRONG_REQUEST: Unknown node 2025-06-25T15:02:35.246631Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1026: WRONG_REQUEST: Unknown node 2025-06-25T15:02:35.299206Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1029: WRONG_REQUEST: Unknown node 2025-06-25T15:02:35.385307Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1032: WRONG_REQUEST: Unknown node 2025-06-25T15:02:35.387566Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1032: WRONG_REQUEST: Unknown node 2025-06-25T15:02:35.416088Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1033: WRONG_REQUEST: Unknown node 2025-06-25T15:02:35.501690Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1033: WRONG_REQUEST: Unknown node 2025-06-25T15:02:35.503807Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #1033: WRONG_REQUEST: Unknown node 2025-06-25T15:02:35.527072Z node 1 :NODE_BROKER ERROR: node_broker__register_node.cpp:41: Cannot register node host5:4: ERROR_TEMP: No free node IDs 2025-06-25T15:02:36.183433Z node 1 :NODE_BROKER ERROR: node_broker__extend_lease.cpp:31: Cannot extend lease for node #0: WRONG_REQUEST: Unknown node >> DataShardSnapshots::LockedWriteDistributedCommitFreeze-UseSink [GOOD] >> DataShardSnapshots::LockedWriteDistributedCommitCrossConflict-UseSink >> KqpScripting::StreamExecuteYqlScriptWriteCancelAfterBruteForced >> KqpYql::EvaluateExpr2 >> RetryPolicy::TWriteSession_RetryOnTargetCluster [GOOD] >> RetryPolicy::TWriteSession_SwitchBackToLocalCluster >> DataShardReadIterator::ShouldLimitRead10RangesChunk99Limit100 [GOOD] >> DataShardReadIterator::ShouldLimitRead10RangesChunk99Limit101 >> KqpScripting::StreamExecuteYqlScriptScanClientOperationTimeoutBruteForce [GOOD] >> KqpScripting::StreamExecuteYqlScriptSeveralQueries >> DataShardReadIterator::ShouldReadRangeRightInclusive [GOOD] >> DataShardReadIterator::ShouldReadRangeOneByOne >> KqpScripting::ExecuteYqlScriptScanScalar >> KqpYql::BinaryJsonOffsetNormal >> KqpScripting::StreamExecuteYqlScriptPg [GOOD] >> DataShardReadIterator::ShouldFailUknownColumns [GOOD] >> DataShardReadIterator::ShouldFailWrongSchema >> KqpYql::UuidPrimaryKey ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/yql/unittest >> KqpScripting::StreamExecuteYqlScriptScanClientOperationTimeoutBruteForce [GOOD] Test command err: Trying to start YDB, gRPC: 10855, MsgBus: 1932 2025-06-25T15:02:22.518770Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519902515138959897:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:02:22.521754Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0008ad/r3tmp/tmpBph7C9/pdisk_1.dat 2025-06-25T15:02:22.975642Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:02:22.990003Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:02:22.990232Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:02:23.035692Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 10855, node 1 2025-06-25T15:02:23.209074Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:02:23.209101Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:02:23.209110Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:02:23.209222Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T15:02:23.536429Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:1932 TClient is connected to server localhost:1932 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:02:24.124745Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:02:24.161771Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:24.310917Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:24.464129Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T15:02:24.514457Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:25.805371Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902528023863382:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:25.805489Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:26.233885Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:26.278075Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:26.318928Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:26.359566Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:26.402714Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:26.478541Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:26.520792Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:26.582509Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902532318831336:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:26.582618Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:26.582620Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902532318831341:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:26.585574Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:02:26.595888Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519902532318831343:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T15:02:26.666346Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519902532318831396:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:02:27.519985Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519902515138959897:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:02:27.520068Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T15:02:28.685531Z node 1 :RPC_REQUEST WARN: rpc_stream_execute_yql_script.cpp:377: Client lost, ActorId: [1:7519902536613798961:2473] 2025-06-25T15:02:28.688911Z node 1 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [1:7519902540908766309:2480] TxId: 281474976710673. Ctx: { TraceId: 01jykss6k589xphw1h25pj6yff, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NmQ0ODg4MzgtZDMwZDNjMGYtMTk1MjRjNWQtOGE5ZWI4YjA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Client lost } 2025-06-25T15:02:28.689199Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=1&id=NmQ0ODg4MzgtZDMwZDNjMGYtMTk1MjRjNWQtOGE5ZWI4YjA= ... RN: rpc_stream_execute_yql_script.cpp:377: Client lost, ActorId: [2:7519902568629865968:2582] 2025-06-25T15:02:35.944105Z node 2 :RPC_REQUEST WARN: rpc_stream_execute_yql_script.cpp:377: Client lost, ActorId: [2:7519902568629865989:2591] 2025-06-25T15:02:36.021379Z node 2 :RPC_REQUEST WARN: rpc_stream_execute_yql_script.cpp:377: Client lost, ActorId: [2:7519902568629866004:2597] 2025-06-25T15:02:36.094637Z node 2 :RPC_REQUEST WARN: rpc_stream_execute_yql_script.cpp:377: Client lost, ActorId: [2:7519902572924833321:2606] 2025-06-25T15:02:36.174714Z node 2 :RPC_REQUEST WARN: rpc_stream_execute_yql_script.cpp:377: Client lost, ActorId: [2:7519902572924833335:2612] 2025-06-25T15:02:36.265676Z node 2 :RPC_REQUEST WARN: rpc_stream_execute_yql_script.cpp:377: Client lost, ActorId: [2:7519902572924833360:2624] 2025-06-25T15:02:36.343699Z node 2 :RPC_REQUEST WARN: rpc_stream_execute_yql_script.cpp:377: Client lost, ActorId: [2:7519902572924833371:2629] 2025-06-25T15:02:36.438313Z node 2 :RPC_REQUEST WARN: rpc_stream_execute_yql_script.cpp:377: Client lost, ActorId: [2:7519902572924833397:2639] 2025-06-25T15:02:36.438531Z node 2 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [2:7519902572924833435:2644] TxId: 281474976715673. Ctx: { TraceId: 01jykssf236b8m7n9y9qhndrns, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZTZhMmQ0NTUtZTFlZWMzNGMtNjk5MWNkZTgtZGExYTVmMjc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Client lost } 2025-06-25T15:02:36.438714Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=ZTZhMmQ0NTUtZTFlZWMzNGMtNjk5MWNkZTgtZGExYTVmMjc=, ActorId: [2:7519902572924833407:2644], ActorState: ExecuteState, TraceId: 01jykssf236b8m7n9y9qhndrns, Create QueryResponse for error on request, msg: 2025-06-25T15:02:36.438960Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863756481, txId: 281474976715672] shutting down 2025-06-25T15:02:36.529967Z node 2 :RPC_REQUEST WARN: rpc_stream_execute_yql_script.cpp:377: Client lost, ActorId: [2:7519902572924833465:2645] 2025-06-25T15:02:36.624677Z node 2 :RPC_REQUEST WARN: rpc_stream_execute_yql_script.cpp:377: Client lost, ActorId: [2:7519902572924833488:2654] 2025-06-25T15:02:36.721970Z node 2 :RPC_REQUEST WARN: rpc_stream_execute_yql_script.cpp:377: Client lost, ActorId: [2:7519902572924833537:2662] 2025-06-25T15:02:36.825513Z node 2 :RPC_REQUEST WARN: rpc_stream_execute_yql_script.cpp:377: Client lost, ActorId: [2:7519902572924833578:2672] 2025-06-25T15:02:36.826239Z node 2 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [2:7519902572924833614:2677] TxId: 281474976715677. Ctx: { TraceId: 01jykssfdtfc6h6fhpc8tes671, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NWJkNTY5NGEtMzNjN2M0YjAtZjVkNzgyNGItYjEzZmNiYWY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Client lost } 2025-06-25T15:02:36.826473Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=NWJkNTY5NGEtMzNjN2M0YjAtZjVkNzgyNGItYjEzZmNiYWY=, ActorId: [2:7519902572924833588:2677], ActorState: ExecuteState, TraceId: 01jykssfdtfc6h6fhpc8tes671, Create QueryResponse for error on request, msg: 2025-06-25T15:02:36.827002Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863756852, txId: 281474976715676] shutting down 2025-06-25T15:02:36.827100Z node 2 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [2:7519902572924833619:2678], TxId: 281474976715677, task: 1. Ctx: { TraceId : 01jykssfdtfc6h6fhpc8tes671. SessionId : ydb://session/3?node_id=2&id=NWJkNTY5NGEtMzNjN2M0YjAtZjVkNzgyNGItYjEzZmNiYWY=. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Handle abort execution event from: [2:7519902572924833614:2677], status: ABORTED, reason: {
: Error: Terminate execution } 2025-06-25T15:02:36.827575Z node 2 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [2:7519902572924833620:2679], TxId: 281474976715677, task: 2. Ctx: { SessionId : ydb://session/3?node_id=2&id=NWJkNTY5NGEtMzNjN2M0YjAtZjVkNzgyNGItYjEzZmNiYWY=. TraceId : 01jykssfdtfc6h6fhpc8tes671. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Handle abort execution event from: [2:7519902572924833614:2677], status: ABORTED, reason: {
: Error: Terminate execution } 2025-06-25T15:02:36.827674Z node 2 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [2:7519902572924833622:2680], TxId: 281474976715677, task: 3. Ctx: { SessionId : ydb://session/3?node_id=2&id=NWJkNTY5NGEtMzNjN2M0YjAtZjVkNzgyNGItYjEzZmNiYWY=. TraceId : 01jykssfdtfc6h6fhpc8tes671. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Handle abort execution event from: [2:7519902572924833614:2677], status: ABORTED, reason: {
: Error: Terminate execution } 2025-06-25T15:02:36.827846Z node 2 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [2:7519902572924833624:2682], TxId: 281474976715677, task: 5. Ctx: { CustomerSuppliedId : . TraceId : 01jykssfdtfc6h6fhpc8tes671. SessionId : ydb://session/3?node_id=2&id=NWJkNTY5NGEtMzNjN2M0YjAtZjVkNzgyNGItYjEzZmNiYWY=. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Handle abort execution event from: [2:7519902572924833614:2677], status: ABORTED, reason: {
: Error: Terminate execution } 2025-06-25T15:02:36.828762Z node 2 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [2:7519902572924833623:2681], TxId: 281474976715677, task: 4. Ctx: { TraceId : 01jykssfdtfc6h6fhpc8tes671. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=2&id=NWJkNTY5NGEtMzNjN2M0YjAtZjVkNzgyNGItYjEzZmNiYWY=. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Handle abort execution event from: [2:7519902572924833614:2677], status: ABORTED, reason: {
: Error: Terminate execution } 2025-06-25T15:02:36.839636Z node 2 :TX_DATASHARD ERROR: datashard__kqp_scan.cpp:601: TxId: 281474976715677. Snapshot is not valid, tabletId: 72075186224037895, step: 1750863756852 2025-06-25T15:02:36.934603Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=MmZlYjM5MmMtOTE5ZTU5N2QtMjM0NzM4NjYtYTk4YjZkY2U=, ActorId: [2:7519902572924833680:2688], ActorState: ExecuteState, TraceId: 01jykssfgz84ys4ya60b1dnrf2, Create QueryResponse for error on request, msg: 2025-06-25T15:02:37.041756Z node 2 :RPC_REQUEST WARN: rpc_stream_execute_yql_script.cpp:377: Client lost, ActorId: [2:7519902572924833700:2696] 2025-06-25T15:02:37.141804Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863757174, txId: 281474976715679] shutting down 2025-06-25T15:02:37.260140Z node 2 :RPC_REQUEST WARN: rpc_stream_execute_yql_script.cpp:377: Client lost, ActorId: [2:7519902577219801120:2720] 2025-06-25T15:02:37.374776Z node 2 :RPC_REQUEST WARN: rpc_stream_execute_yql_script.cpp:377: Client lost, ActorId: [2:7519902577219801141:2729] 2025-06-25T15:02:37.495054Z node 2 :RPC_REQUEST WARN: rpc_stream_execute_yql_script.cpp:377: Client lost, ActorId: [2:7519902577219801162:2738] 2025-06-25T15:02:37.495736Z node 2 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [2:7519902577219801198:2743] TxId: 281474976715682. Ctx: { TraceId: 01jykssg263pet3gfsy95kg2ge, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=OWI5YTI1ODUtNDNlMmYyNDAtM2VmMzExM2EtNWZiZGQzZjg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Client lost } 2025-06-25T15:02:37.495916Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=2&id=OWI5YTI1ODUtNDNlMmYyNDAtM2VmMzExM2EtNWZiZGQzZjg=, ActorId: [2:7519902577219801172:2743], ActorState: ExecuteState, TraceId: 01jykssg263pet3gfsy95kg2ge, Create QueryResponse for error on request, msg: 2025-06-25T15:02:37.496394Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863757524, txId: 281474976715681] shutting down 2025-06-25T15:02:37.496476Z node 2 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [2:7519902577219801203:2744], TxId: 281474976715682, task: 1. Ctx: { SessionId : ydb://session/3?node_id=2&id=OWI5YTI1ODUtNDNlMmYyNDAtM2VmMzExM2EtNWZiZGQzZjg=. TraceId : 01jykssg263pet3gfsy95kg2ge. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Handle abort execution event from: [2:7519902577219801198:2743], status: ABORTED, reason: {
: Error: Terminate execution } 2025-06-25T15:02:37.496757Z node 2 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [2:7519902577219801204:2745], TxId: 281474976715682, task: 2. Ctx: { TraceId : 01jykssg263pet3gfsy95kg2ge. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=2&id=OWI5YTI1ODUtNDNlMmYyNDAtM2VmMzExM2EtNWZiZGQzZjg=. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Handle abort execution event from: [2:7519902577219801198:2743], status: ABORTED, reason: {
: Error: Terminate execution } 2025-06-25T15:02:37.496987Z node 2 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [2:7519902577219801207:2748], TxId: 281474976715682, task: 5. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=2&id=OWI5YTI1ODUtNDNlMmYyNDAtM2VmMzExM2EtNWZiZGQzZjg=. TraceId : 01jykssg263pet3gfsy95kg2ge. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Handle abort execution event from: [2:7519902577219801198:2743], status: ABORTED, reason: {
: Error: Terminate execution } 2025-06-25T15:02:37.497741Z node 2 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [2:7519902577219801206:2747], TxId: 281474976715682, task: 4. Ctx: { TraceId : 01jykssg263pet3gfsy95kg2ge. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=2&id=OWI5YTI1ODUtNDNlMmYyNDAtM2VmMzExM2EtNWZiZGQzZjg=. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Handle abort execution event from: [2:7519902577219801198:2743], status: ABORTED, reason: {
: Error: Terminate execution } 2025-06-25T15:02:37.616229Z node 2 :RPC_REQUEST WARN: rpc_stream_execute_yql_script.cpp:377: Client lost, ActorId: [2:7519902577219801267:2753] 2025-06-25T15:02:37.744874Z node 2 :RPC_REQUEST WARN: rpc_stream_execute_yql_script.cpp:377: Client lost, ActorId: [2:7519902577219801291:2763] 2025-06-25T15:02:37.826985Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863757853, txId: 281474976715684] shutting down 2025-06-25T15:02:37.968429Z node 2 :RPC_REQUEST WARN: rpc_stream_execute_yql_script.cpp:377: Client lost, ActorId: [2:7519902577219801416:2788] 2025-06-25T15:02:38.106721Z node 2 :RPC_REQUEST WARN: rpc_stream_execute_yql_script.cpp:377: Client lost, ActorId: [2:7519902577219801438:2797] 2025-06-25T15:02:38.245416Z node 2 :RPC_REQUEST WARN: rpc_stream_execute_yql_script.cpp:377: Client lost, ActorId: [2:7519902581514768755:2806] 2025-06-25T15:02:38.387962Z node 2 :RPC_REQUEST WARN: rpc_stream_execute_yql_script.cpp:377: Client lost, ActorId: [2:7519902581514768776:2815] 2025-06-25T15:02:38.528876Z node 2 :RPC_REQUEST WARN: rpc_stream_execute_yql_script.cpp:377: Client lost, ActorId: [2:7519902581514768796:2824] 2025-06-25T15:02:38.639683Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863758644, txId: 281474976715686] shutting down >> DataShardReadIterator::ShouldReturnBrokenLockWhenWriteInSeparateTransactions+EvWrite [GOOD] >> DataShardReadIterator::ShouldReturnBrokenLockWhenWriteInSeparateTransactions-EvWrite >> DataShardReadIterator::ShouldReadNonExistingKey [GOOD] >> DataShardReadIterator::ShouldReadNotExistingRange >> DataShardReadIterator::ShouldReadKeyPrefix1 [GOOD] >> DataShardReadIterator::ShouldReadKeyPrefix2 >> DataShardSnapshots::MvccSnapshotLockedWritesWithoutConflicts+UseSink [GOOD] >> DataShardSnapshots::MvccSnapshotLockedWritesWithConflicts+UseSink ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/yql/unittest >> KqpScripting::StreamExecuteYqlScriptPg [GOOD] Test command err: Trying to start YDB, gRPC: 15203, MsgBus: 27489 2025-06-25T15:02:22.518983Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519902514577759485:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:02:22.522629Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00089e/r3tmp/tmpFsUxBb/pdisk_1.dat 2025-06-25T15:02:22.983903Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:02:22.984561Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:02:22.984636Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:02:23.007768Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 15203, node 1 2025-06-25T15:02:23.221564Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:02:23.221594Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:02:23.221600Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:02:23.221737Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T15:02:23.538952Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:27489 TClient is connected to server localhost:27489 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:02:24.022675Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-25T15:02:24.065323Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:24.259215Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:24.398851Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:24.481750Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:25.669687Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902527462662977:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:25.669801Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:26.233474Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:26.264558Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:26.298989Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:26.334662Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:26.375714Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:26.409878Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:26.480115Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:26.582637Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902531757630935:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:26.582723Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:26.582831Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902531757630940:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:26.585933Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:02:26.598294Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519902531757630942:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T15:02:26.664260Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519902531757630997:3425] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:02:27.519089Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519902514577759485:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:02:27.519191Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T15:02:27.640606Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=1&id=MTFkZjAyMTgtZTgwY2Y4NTEtMTgzNTBkZGEtZTcxMDYyZmM=, ActorId: [1:7519902536052598560:2474], ActorState: ExecuteState, TraceId: 01jykss6hmecvnsmywkhe7pgd3, Create QueryResponse for error on request, msg: 2025-06-25T15:02:27.711405Z node 1 :KQP_PROXY ERROR: kqp_proxy_service.cpp:957: Unknown sender for proxy response, requestId: 5 2025-06-25T15:02:27.726824Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=1&id=MWI0NjI5OGMtOTVmOWEzNmMtYmM0MzY5YWQtM2FlYjlkOWQ=, ActorId: [1:7519902536052598573:2480], ActorState: ExecuteState, TraceId: 01jykss6m15d4cvgp7t8350ngs, C ... 1:7519902561822407116:3320], ActorState: ExecuteState, TraceId: 01jykssc837b1b47849nqfx4qg, Create QueryResponse for error on request, msg: 2025-06-25T15:02:33.665279Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863753695, txId: 281474976710766] shutting down 2025-06-25T15:02:33.802138Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863753835, txId: 281474976710768] shutting down 2025-06-25T15:02:33.981153Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863754017, txId: 281474976710770] shutting down Trying to start YDB, gRPC: 24018, MsgBus: 1657 2025-06-25T15:02:34.784870Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519902566586245174:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:02:34.784928Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00089e/r3tmp/tmpBkApKB/pdisk_1.dat 2025-06-25T15:02:34.935824Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:02:34.936386Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519902566586245149:2080] 1750863754783890 != 1750863754783893 TServer::EnableGrpc on GrpcPort 24018, node 2 2025-06-25T15:02:34.955811Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:02:34.955896Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:02:34.958954Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:02:35.032838Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:02:35.032866Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:02:35.032872Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:02:35.032979Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1657 TClient is connected to server localhost:1657 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:02:35.554402Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:02:35.561496Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T15:02:35.569488Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:35.627503Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:35.811662Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:02:35.826141Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:35.893065Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:37.861675Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519902579471148669:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:37.861754Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:37.926973Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:37.966870Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:37.992548Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:38.040502Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:38.120376Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:38.155302Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:38.187094Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:38.245702Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519902583766116628:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:38.245808Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:38.246430Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519902583766116633:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:38.249878Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:02:38.259763Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519902583766116635:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T15:02:38.335712Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519902583766116686:3416] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } >> DataShardSnapshots::ShardRestartLockNotBrokenByUncommittedBeforeRead-UseSink [GOOD] >> DataShardSnapshots::ShardRestartLockBrokenByUncommittedBeforeRead+UseSink >> KqpYql::TableUseBeforeCreate [GOOD] >> KqpYql::NonStrictDml >> KqpYql::UpdatePk [GOOD] >> KqpScripting::SecondaryIndexes [GOOD] >> KqpScripting::StreamExecuteYqlScriptClientTimeoutBruteForce [GOOD] >> KqpScripting::StreamExecuteYqlScriptClientOperationTimeoutBruteForce >> PersQueueSdkReadSessionTest::ReadSessionWithExplicitlySpecifiedPartitions [GOOD] >> PersQueueSdkReadSessionTest::SettingsValidation >> KqpScripting::ScanQueryDisable [GOOD] >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadKeyPrefix-EvWrite [GOOD] >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadKeyPrefixLeftBorder+EvWrite >> KqpYql::DdlDmlMix [GOOD] >> KqpYql::CreateUseTable ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/yql/unittest >> KqpYql::TableUseBeforeCreate [GOOD] Test command err: Trying to start YDB, gRPC: 16779, MsgBus: 65446 2025-06-25T15:02:37.032463Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519902576557393901:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:02:37.032555Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000835/r3tmp/tmpELux39/pdisk_1.dat 2025-06-25T15:02:37.374840Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519902576557393883:2080] 1750863757031955 != 1750863757031958 2025-06-25T15:02:37.380857Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 16779, node 1 2025-06-25T15:02:37.443619Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:02:37.444205Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:02:37.445178Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:02:37.492948Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:02:37.492971Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:02:37.492976Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:02:37.493081Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:65446 TClient is connected to server localhost:65446 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:02:37.991261Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:02:38.012399Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T15:02:38.027305Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:38.040943Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:02:38.164144Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:38.322889Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:38.405911Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:39.887852Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902585147330113:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:39.887957Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:40.187529Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:40.221715Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:40.250729Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:40.278953Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:40.308873Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:40.356065Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:40.385723Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:40.447314Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902589442298068:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:40.447417Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:40.447688Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902589442298073:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:40.451318Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:02:40.460166Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519902589442298075:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T15:02:40.539990Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519902589442298126:3416] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 }
: Error: Type annotation, code: 1030
:3:13: Error: At function: KiReadTable!
:3:13: Error: Cannot find table 'db.[/Root/NewTable]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/yql/unittest >> KqpYql::UpdatePk [GOOD] Test command err: Trying to start YDB, gRPC: 22186, MsgBus: 4574 2025-06-25T15:02:37.142631Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519902580074620290:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:02:37.142734Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0007f6/r3tmp/tmp5nSPCY/pdisk_1.dat 2025-06-25T15:02:37.466432Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:02:37.466537Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:02:37.468234Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:02:37.484820Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:02:37.485488Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519902580074620264:2080] 1750863757140669 != 1750863757140672 TServer::EnableGrpc on GrpcPort 22186, node 1 2025-06-25T15:02:37.540450Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:02:37.540481Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:02:37.540491Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:02:37.540629Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4574 TClient is connected to server localhost:4574 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:02:38.031322Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:02:38.044481Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-25T15:02:38.057129Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:38.160255Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:02:38.234022Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:38.365083Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:38.429792Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:40.069891Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902592959523793:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:40.069977Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:40.337643Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:40.372789Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:40.403468Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:40.436135Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:40.469346Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:40.498786Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:40.546594Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:40.665612Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902592959524452:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:40.665682Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:40.665828Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902592959524457:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:40.669430Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:02:40.679359Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519902592959524459:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T15:02:40.734604Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519902592959524510:3423] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 }
: Error: Type annotation, code: 1030
:3:20: Warning: At lambda, At function: AsStruct, At tuple
:4:31: Warning: At function: +
:4:31: Warning: Integral type implicit bitcast: Optional and Int32, code: 1107
:5:27: Error: At function: KiUpdateTable!
:5:27: Error: Cannot update primary key column: Group >> DataShardReadIteratorConsistency::BrokenWriteLockBeforeIteration [GOOD] >> DataShardReadIteratorConsistency::BrokenWriteLockDuringIteration ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/yql/unittest >> KqpScripting::SecondaryIndexes [GOOD] Test command err: Trying to start YDB, gRPC: 12665, MsgBus: 15970 2025-06-25T15:02:28.537174Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519902540488563287:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:02:28.537207Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000873/r3tmp/tmps5yaB5/pdisk_1.dat 2025-06-25T15:02:29.114871Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519902540488563266:2080] 1750863748506147 != 1750863748506150 2025-06-25T15:02:29.123475Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:02:29.129923Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:02:29.130024Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:02:29.135131Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 12665, node 1 2025-06-25T15:02:29.209679Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:02:29.209709Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:02:29.209722Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:02:29.209840Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:15970 2025-06-25T15:02:29.607186Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:15970 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:02:29.841367Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:02:29.857259Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T15:02:29.874538Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:30.039344Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:30.185507Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:30.259962Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:31.836765Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902553373466778:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:31.836862Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:32.208013Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:32.260013Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:32.297054Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:32.355369Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:32.387429Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:32.435694Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:32.470659Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:32.534100Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902557668434732:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:32.534171Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:32.534383Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902557668434737:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:32.538791Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:02:32.548886Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519902557668434739:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T15:02:32.626281Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519902557668434790:3421] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:02:33.538330Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519902540488563287:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:02:33.538411Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T15:02:33.567130Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/ ... rver::EnableGrpc on GrpcPort 4051, node 2 2025-06-25T15:02:35.854402Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:02:35.854490Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:02:35.857317Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:02:35.879693Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:02:35.879719Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:02:35.879725Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:02:35.879845Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27107 TClient is connected to server localhost:27107 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:02:36.293724Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:02:36.300920Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T15:02:36.313396Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:36.390088Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:36.555665Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:36.630784Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:36.765788Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:02:38.533348Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519902581254288822:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:38.533421Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:38.610563Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:38.640924Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:38.667554Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:38.690533Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:38.718953Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:38.748000Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:38.778771Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:38.890178Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519902581254289482:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:38.890289Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:38.890498Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519902581254289487:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:38.894552Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:02:38.903385Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519902581254289489:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T15:02:38.963727Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519902581254289540:3421] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:02:39.998945Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:40.053781Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:40.122624Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:40.728462Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519902568369385307:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:02:40.735141Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> KqpPragma::ResetPerQuery ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/yql/unittest >> KqpScripting::ScanQueryDisable [GOOD] Test command err: Trying to start YDB, gRPC: 28288, MsgBus: 24081 2025-06-25T15:02:30.830062Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519902547487102881:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:02:30.830109Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00085b/r3tmp/tmpbX8COq/pdisk_1.dat 2025-06-25T15:02:31.199920Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:02:31.203300Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519902547487102860:2080] 1750863750816457 != 1750863750816460 2025-06-25T15:02:31.209046Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:02:31.209144Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:02:31.214253Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 28288, node 1 2025-06-25T15:02:31.300096Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:02:31.300114Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:02:31.300121Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:02:31.300245Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24081 TClient is connected to server localhost:24081 2025-06-25T15:02:31.863564Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:02:32.059488Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:02:32.079316Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:32.235699Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:32.399376Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:32.485338Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:33.996871Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902560372006385:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:33.996985Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:34.400046Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:34.468247Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:34.498559Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:34.526266Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:34.558440Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:34.588930Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:34.623454Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:34.672679Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902564666974336:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:34.672742Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:34.672784Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902564666974341:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:34.675870Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:02:34.684754Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519902564666974343:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T15:02:34.757138Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519902564666974394:3419] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:02:35.830349Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519902547487102881:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:02:35.830483Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T15:02:36.261911Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863756285, txId: 281474976710672] shutting down Trying to start YDB, gRPC: 25309, MsgBus: 23721 2025-06-25T15:02:36.978312Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519902574759725596:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:02:36.978375Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00085b/r3tmp/tmpIcvGGi/pdisk_1.dat 2025-06-25T15:02:37.079684Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:02:37.081140Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519902574759725575:2080] 1750863756977516 != 1750863756977519 TServer::EnableGrpc on GrpcPort 25309, node 2 2025-06-25T15:02:37.126536Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:02:37.126642Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:02:37.132534Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:02:37.154635Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:02:37.154655Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:02:37.154660Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:02:37.154765Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23721 TClient is connected to server localhost:23721 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:02:37.617977Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:02:37.622159Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T15:02:37.631814Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:37.703527Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:37.852107Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:37.928178Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:37.995684Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:02:39.989433Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519902587644629114:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:39.989521Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:40.062383Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:40.132641Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:40.163690Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:40.189336Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:40.215802Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:40.245720Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:40.314397Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:40.406543Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519902591939597078:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:40.406654Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:40.406902Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519902591939597083:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:40.411654Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:02:40.424868Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519902591939597085:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T15:02:40.487331Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519902591939597136:3420] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:02:41.974246Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863762004, txId: 281474976715672] shutting down 2025-06-25T15:02:41.980412Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519902574759725596:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:02:41.980613Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> KqpYql::EvaluateExpr2 [GOOD] >> KqpYql::EvaluateExpr3 >> KqpScripting::StreamExecuteYqlScriptSeveralQueries [GOOD] >> KqpScripting::StreamExecuteYqlScriptSeveralQueriesComplex >> TNodeBrokerTest::Test1000NodesSubscribers [GOOD] >> KqpYql::BinaryJsonOffsetNormal [GOOD] >> KqpYql::Closure >> KqpYql::UuidPrimaryKey [GOOD] >> DataShardReadIterator::ShouldReadRangeOneByOne [GOOD] >> DataShardReadIterator::ShouldReadRangePrefix1 >> DataShardReadIterator::ShouldLimitRead10RangesChunk99Limit101 [GOOD] >> DataShardReadIterator::ShouldLimitRead10RangesChunk99Limit198 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::Test1000NodesSubscribers [GOOD] Test command err: 2025-06-25T15:00:37.268362Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:00:37.268416Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) >> KqpScripting::ExecuteYqlScriptScanScalar [GOOD] >> KqpScripting::JoinIndexLookup ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/yql/unittest >> KqpYql::UuidPrimaryKey [GOOD] Test command err: Trying to start YDB, gRPC: 17756, MsgBus: 13164 2025-06-25T15:02:40.502277Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519902592370557079:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:02:40.502377Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00071c/r3tmp/tmptVD8up/pdisk_1.dat 2025-06-25T15:02:40.907400Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:02:40.908413Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519902592370557060:2080] 1750863760501676 != 1750863760501679 2025-06-25T15:02:40.922337Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:02:40.922443Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:02:40.924044Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 17756, node 1 2025-06-25T15:02:41.056876Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:02:41.056902Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:02:41.056913Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:02:41.057040Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13164 TClient is connected to server localhost:13164 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-25T15:02:41.513110Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:02:41.538842Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:02:41.555533Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T15:02:43.186413Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902605255459589:2291], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:43.186530Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:43.436951Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:43.565566Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902605255459693:2301], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:43.565624Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:43.565659Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902605255459698:2304], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:43.574317Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:02:43.584288Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519902605255459700:2305], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-25T15:02:43.656553Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519902605255459751:2392] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:02:44.045478Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519902609550427144:2328], status: GENERIC_ERROR, issues:
:3:25: Error: Invalid value "invalid-uuid" for type Uuid 2025-06-25T15:02:44.046920Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=YWExNmVlYjUtOGRmZDNmMDItMzg5MjBkZi00NTNiYWJiMw==, ActorId: [1:7519902605255459571:2289], ActorState: ExecuteState, TraceId: 01jyksspj34fagg696s8rtbg3x, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: >> DataShardReadIterator::ShouldFailWrongSchema [GOOD] >> DataShardReadIterator::ShouldFailReadNextAfterSchemeChange >> DataShardReadIterator::ShouldReadKeyPrefix2 [GOOD] >> DataShardReadIterator::ShouldReadKeyPrefix3 >> DataShardReadIterator::ShouldReadNotExistingRange [GOOD] >> DataShardReadIterator::ShouldReadRangeChunk1_100 >> DataShardReadIterator::ShouldReturnBrokenLockWhenWriteInSeparateTransactions-EvWrite [GOOD] >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadRangeInvisibleRowSkips-EvWrite >> DataShardSnapshots::MvccSnapshotLockedWritesWithConflicts+UseSink [GOOD] >> DataShardSnapshots::MvccSnapshotLockedWritesWithConflicts-UseSink >> KqpLimits::TooBigQuery-useSink [GOOD] >> KqpLimits::WaitCAsStateOnAbort >> DataShardSnapshots::LockedWriteDistributedCommitCrossConflict-UseSink [GOOD] >> DataShardSnapshots::LockedWriteWithAsyncIndex-WithRestart-UseSink >> KqpYql::NonStrictDml [GOOD] >> KqpYql::JsonNumberPrecision >> DataShardSnapshots::ShardRestartLockBrokenByUncommittedBeforeRead+UseSink [GOOD] >> DataShardSnapshots::ShardRestartLockBrokenByUncommittedBeforeRead-UseSink >> TOlapNaming::CreateColumnTableOk >> TOlap::CreateStore >> TOlapNaming::AlterColumnStoreOk >> TOlap::CreateTableWithNullableKeysNotAllowed >> TOlap::CreateStoreWithDirs >> TOlapNaming::CreateColumnStoreOk >> DataShardSnapshots::VolatileSnapshotTimeout [GOOD] >> DataShardSnapshots::VolatileSnapshotTimeoutRefresh >> KqpYql::CreateUseTable [GOOD] >> test_sql_streaming.py::test[suites-ReadTopicWithMetadata-default.txt] [FAIL] >> KqpYql::EvaluateExpr3 [GOOD] >> DataShardReadIterator::TryWriteManyRows+Commit [GOOD] >> DataShardReadIterator::TryWriteManyRows-Commit >> test_sql_streaming.py::test[suites-ReadTopicWithMetadataInsideFilter-default.txt] >> KqpPragma::ResetPerQuery [GOOD] >> KqpPragma::Warning >> TOlap::CreateTableWithNullableKeysNotAllowed [GOOD] >> TOlap::CreateTableWithNullableKeys >> test_sql_streaming.py::test[suites-GroupByHop-default.txt] [FAIL] >> test_sql_streaming.py::test[suites-GroupByHopByStringKey-default.txt] >> test_sql_streaming.py::test[suites-GroupByHopTimeExtractorUnusedColumns-default.txt] [FAIL] >> KqpYql::Closure [GOOD] >> test_sql_streaming.py::test[suites-GroupByHopWithDataWatermarks-default.txt] >> test_sql_streaming.py::test[suites-ReadTwoTopics-default.txt] [FAIL] >> test_sql_streaming.py::test[suites-ReadWriteSameTopic-default.txt] >> TOlapNaming::CreateColumnStoreOk [GOOD] >> TOlapNaming::CreateColumnStoreFailed >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadKeyPrefixLeftBorder+EvWrite [GOOD] >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadKeyPrefixLeftBorder-EvWrite >> TOlap::CreateStore [GOOD] >> TOlap::CreateDropTable >> TOlapNaming::AlterColumnStoreOk [GOOD] >> TOlapNaming::AlterColumnStoreFailed >> TOlap::CreateStoreWithDirs [GOOD] >> TOlap::CreateTable >> TNodeBrokerTest::Test1001NodesSubscribers [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/yql/unittest >> KqpYql::CreateUseTable [GOOD] Test command err: Trying to start YDB, gRPC: 17020, MsgBus: 9542 2025-06-25T15:02:38.140648Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519902581720230091:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:02:38.140690Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0007ce/r3tmp/tmpj4Ee7K/pdisk_1.dat 2025-06-25T15:02:38.489251Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519902581720230065:2080] 1750863758126475 != 1750863758126478 2025-06-25T15:02:38.517745Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 17020, node 1 2025-06-25T15:02:38.572916Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:02:38.573008Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:02:38.574442Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:02:38.584211Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:02:38.584251Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:02:38.584264Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:02:38.584414Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9542 TClient is connected to server localhost:9542 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:02:39.065968Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:02:39.088967Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:39.165628Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:02:39.238103Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:39.386189Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:39.464136Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:40.993958Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902590310166307:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:40.994063Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:41.298556Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:41.325128Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:41.390391Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:41.429477Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:41.497544Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:41.527111Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:41.556783Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:41.610988Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902594605134269:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:41.611065Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:41.611150Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902594605134274:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:41.614768Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:02:41.624409Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519902594605134276:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T15:02:41.719999Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519902594605134327:3427] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 }
: Error: Optimization, code: 1070
:4:24: Error: Queries with mixed data and scheme operations are not supported. Use separate queries for different types of operations., code: 2009 Trying to start YDB, gRPC: 2067, MsgBus: 1913 2025-06-25T15:02:43.535498Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519902602201559429:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:02:43.535574Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0007ce/r3tmp/tmpkp5GcC/pdisk_1.dat 2025-06-25T15:02:43.697587Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:02:43.705055Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:02:43.705115Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:02:43.710399Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 2067, node 2 2025-06-25T15:02:43.762112Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:02:43.762147Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:02:43.762161Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:02:43.762327Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1913 TClient is connected to server localhost:1913 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:02:44.205463Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:02:44.210408Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T15:02:44.220903Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:44.296583Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:44.434280Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:44.496101Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:44.626921Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:02:46.595883Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519902615086462902:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:46.595970Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:46.650263Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:46.693322Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:46.722443Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:46.766715Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:46.835803Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:46.863563Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:46.896689Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:46.978601Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519902615086463567:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:46.978665Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:46.978758Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519902615086463572:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:46.982097Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:02:46.992361Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519902615086463574:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T15:02:47.082040Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519902619381430921:3415] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:02:48.301107Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:48.513358Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863768549, txId: 281474976715674] shutting down 2025-06-25T15:02:48.532584Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519902602201559429:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:02:48.532700Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> TOlapNaming::CreateColumnStoreFailed [GOOD] >> KqpScripting::StreamExecuteYqlScriptSeveralQueriesComplex [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/yql/unittest >> KqpYql::EvaluateExpr3 [GOOD] Test command err: Trying to start YDB, gRPC: 28301, MsgBus: 3677 2025-06-25T15:02:39.403772Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519902588777483668:2132];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:02:39.404870Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00078f/r3tmp/tmpqDSJmQ/pdisk_1.dat 2025-06-25T15:02:39.773350Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:02:39.773396Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519902588777483573:2080] 1750863759359431 != 1750863759359434 TServer::EnableGrpc on GrpcPort 28301, node 1 2025-06-25T15:02:39.835257Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:02:39.835331Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:02:39.839877Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:02:39.872929Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:02:39.872957Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:02:39.872965Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:02:39.873092Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3677 TClient is connected to server localhost:3677 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:02:40.369487Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:02:40.384130Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T15:02:40.395752Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:40.408848Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:02:40.537070Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:40.682569Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:40.759737Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:42.136827Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902601662387100:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:42.136952Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:42.471094Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:42.506554Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:42.538258Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:42.566302Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:42.602975Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:42.671518Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:42.743836Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:42.830539Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902601662387773:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:42.830592Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:42.830746Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902601662387778:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:42.833812Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:02:42.842046Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519902601662387780:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T15:02:42.924943Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519902601662387831:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 16432, MsgBus: 20098 2025-06-25T15:02:44.729514Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519902606373904869:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:02:44.729702Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00078f/r3tmp/tmpFE7tTZ/pdisk_1.dat 2025-06-25T15:02:44.847324Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:02:44.848653Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519902606373904834:2080] 1750863764727760 != 1750863764727763 TServer::EnableGrpc on GrpcPort 16432, node 2 2025-06-25T15:02:44.876751Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:02:44.876840Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:02:44.878287Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:02:44.895717Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:02:44.895737Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:02:44.895742Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:02:44.895857Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:20098 TClient is connected to server localhost:20098 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:02:45.346903Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:02:45.356579Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T15:02:45.363132Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:45.419487Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:45.566997Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:45.666742Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:45.785218Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:02:47.433354Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519902619258808331:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:47.433449Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:47.488364Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:47.522328Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:47.551477Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:47.576872Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:47.604449Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:47.673423Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:47.713967Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:47.769347Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519902619258808992:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:47.769430Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519902619258808997:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:47.769454Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:47.773254Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:02:47.783999Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519902619258808999:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T15:02:47.850963Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519902619258809052:3412] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } >> TOlapNaming::AlterColumnStoreFailed [GOOD] >> TOlap::CreateTableWithNullableKeys [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/yql/unittest >> KqpYql::Closure [GOOD] Test command err: Trying to start YDB, gRPC: 27217, MsgBus: 15842 2025-06-25T15:02:39.902770Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519902585999469224:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:02:39.902829Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00073d/r3tmp/tmp6mTbHz/pdisk_1.dat 2025-06-25T15:02:40.236858Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 27217, node 1 2025-06-25T15:02:40.301083Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:02:40.301447Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:02:40.309480Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:02:40.332949Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:02:40.332980Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:02:40.332987Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:02:40.333121Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:15842 TClient is connected to server localhost:15842 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:02:40.865560Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:02:40.882884Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-25T15:02:40.911908Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:40.923793Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:02:41.043552Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:41.204296Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:41.290267Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:42.764928Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902598884372709:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:42.765076Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:43.071290Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:43.106094Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:43.135564Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:43.206122Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:43.242073Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:43.279565Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:43.313806Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:43.406116Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902603179340672:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:43.406221Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:43.406296Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902603179340677:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:43.409608Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:02:43.422412Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519902603179340679:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T15:02:43.500838Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519902603179340730:3420] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Trying to start YDB, gRPC: 18813, MsgBus: 7419 2025-06-25T15:02:45.341474Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519902611955713496:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:02:45.341537Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00073d/r3tmp/tmpcGxqla/pdisk_1.dat 2025-06-25T15:02:45.464661Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:02:45.465849Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519902611955713477:2080] 1750863765341132 != 1750863765341135 TServer::EnableGrpc on GrpcPort 18813, node 2 2025-06-25T15:02:45.474837Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:02:45.474924Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:02:45.486470Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:02:45.546222Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:02:45.546264Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:02:45.546276Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:02:45.546416Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:7419 TClient is connected to server localhost:7419 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:02:45.975879Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:02:45.987545Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T15:02:46.005556Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:46.089989Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T15:02:46.213004Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:46.293934Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:46.452191Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:02:47.938868Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519902620545649695:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:47.938949Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:47.981990Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:48.010641Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:48.050893Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:48.078827Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:48.105796Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:48.135761Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:48.165589Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:48.214988Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519902624840617646:2429], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:48.215038Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:48.215195Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519902624840617651:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:48.218814Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:02:48.230711Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519902624840617653:2433], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T15:02:48.330471Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519902624840617704:3417] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } >> DataShardReadIteratorConsistency::BrokenWriteLockDuringIteration [GOOD] >> DataShardReadIteratorConsistency::WriteLockThenUncommittedReadUpgradeRetryAndRestart >> TOlap::CreateTable [GOOD] >> TOlap::CreateTableTtl >> TOlap::CreateDropTable [GOOD] >> TOlap::CreateDropStandaloneTableDefaultSharding ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::Test1001NodesSubscribers [GOOD] Test command err: 2025-06-25T15:00:40.436645Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:00:40.436698Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_olap/unittest >> TOlapNaming::CreateColumnStoreFailed [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T15:02:49.147200Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T15:02:49.147300Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T15:02:49.147350Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T15:02:49.147387Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T15:02:49.148316Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T15:02:49.148356Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T15:02:49.148429Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T15:02:49.148607Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T15:02:49.149348Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T15:02:49.150821Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T15:02:49.238137Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:02:49.238207Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:02:49.255927Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T15:02:49.256272Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T15:02:49.256462Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T15:02:49.265287Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T15:02:49.265593Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T15:02:49.266275Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T15:02:49.266451Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T15:02:49.269433Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T15:02:49.269593Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T15:02:49.278102Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T15:02:49.278203Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T15:02:49.278420Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T15:02:49.278473Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T15:02:49.278532Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T15:02:49.278623Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T15:02:49.284851Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T15:02:49.415993Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T15:02:49.416239Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:02:49.416469Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T15:02:49.416516Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T15:02:49.416798Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T15:02:49.416937Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:02:49.419615Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T15:02:49.419816Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T15:02:49.420042Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:02:49.420104Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T15:02:49.420148Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T15:02:49.420240Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T15:02:49.427431Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:02:49.427511Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T15:02:49.427561Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T15:02:49.429680Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:02:49.429729Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:02:49.429786Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T15:02:49.429845Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T15:02:49.438609Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T15:02:49.440714Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T15:02:49.440965Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T15:02:49.441899Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T15:02:49.442033Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T15:02:49.442080Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T15:02:49.442450Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T15:02:49.442504Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T15:02:49.442680Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T15:02:49.442755Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T15:02:49.444693Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T15:02:49.444738Z node 1 :FLAT_TX_SCHEMESHARD ... hemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T15:02:50.458046Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T15:02:50.458217Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T15:02:50.459848Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T15:02:50.460029Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T15:02:50.460972Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T15:02:50.461103Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 8589936750 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T15:02:50.461158Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T15:02:50.461438Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T15:02:50.461504Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T15:02:50.461741Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T15:02:50.461827Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T15:02:50.463747Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T15:02:50.463798Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T15:02:50.463989Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T15:02:50.464038Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [2:208:2208], at schemeshard: 72057594046678944, txId: 1, path id: 1 2025-06-25T15:02:50.464505Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:02:50.464565Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 1:0 ProgressState 2025-06-25T15:02:50.464677Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1:0 progress is 1/1 2025-06-25T15:02:50.464719Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-25T15:02:50.464763Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1:0 progress is 1/1 2025-06-25T15:02:50.464801Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-25T15:02:50.464844Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 1, ready parts: 1/1, is published: false 2025-06-25T15:02:50.464888Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-25T15:02:50.464927Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1:0 2025-06-25T15:02:50.464960Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 1:0 2025-06-25T15:02:50.465029Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-25T15:02:50.465073Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 1, publications: 1, subscribers: 0 2025-06-25T15:02:50.465112Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 1, [OwnerId: 72057594046678944, LocalPathId: 1], 3 2025-06-25T15:02:50.465617Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-06-25T15:02:50.465715Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-06-25T15:02:50.465764Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1 2025-06-25T15:02:50.465807Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 3 2025-06-25T15:02:50.465854Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T15:02:50.465945Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1, subscribers: 0 2025-06-25T15:02:50.468195Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1 2025-06-25T15:02:50.468706Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1, at schemeshard: 72057594046678944 TestModificationResults wait txId: 101 2025-06-25T15:02:50.469698Z node 2 :TX_PROXY DEBUG: proxy_impl.cpp:434: actor# [2:271:2260] Bootstrap 2025-06-25T15:02:50.489733Z node 2 :TX_PROXY DEBUG: proxy_impl.cpp:453: actor# [2:271:2260] Become StateWork (SchemeCache [2:276:2265]) 2025-06-25T15:02:50.492501Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateColumnStore CreateColumnStore { Name: "OlapStore" ColumnShardCount: 1 SchemaPresets { Name: "default" Schema { Columns { Name: "timestamp" Type: "Timestamp" NotNull: true } Columns { Name: "data" Type: "Utf8" } Columns { Name: "mess age" Type: "Utf8" } KeyColumnNames: "timestamp" } } } } TxId: 101 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T15:02:50.492838Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: create_store.cpp:331: TCreateOlapStore Propose, path: /MyRoot/OlapStore, opId: 101:0, at schemeshard: 72057594046678944 2025-06-25T15:02:50.493011Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 101:1, propose status:StatusSchemeError, reason: Invalid name for column 'mess age', at schemeshard: 72057594046678944 2025-06-25T15:02:50.493943Z node 2 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [2:271:2260] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-25T15:02:50.499037Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 101, response: Status: StatusSchemeError Reason: "Invalid name for column \'mess age\'" TxId: 101 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T15:02:50.499291Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 101, database: /MyRoot, subject: , status: StatusSchemeError, reason: Invalid name for column 'mess age', operation: CREATE COLUMN STORE, path: /MyRoot/OlapStore 2025-06-25T15:02:50.501713Z node 2 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 101 2025-06-25T15:02:50.501997Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2025-06-25T15:02:50.502056Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 2025-06-25T15:02:50.502470Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-06-25T15:02:50.502600Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-25T15:02:50.502645Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [2:286:2275] TestWaitNotification: OK eventTxId 101 2025-06-25T15:02:50.503060Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/OlapStore" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T15:02:50.503239Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/OlapStore" took 213us result status StatusPathDoesNotExist 2025-06-25T15:02:50.503442Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/OlapStore\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/OlapStore" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_olap/unittest >> TOlap::CreateTableWithNullableKeys [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T15:02:49.147217Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T15:02:49.147300Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T15:02:49.147336Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T15:02:49.147366Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T15:02:49.148321Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T15:02:49.148369Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T15:02:49.148433Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T15:02:49.148504Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T15:02:49.149205Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T15:02:49.150782Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T15:02:49.236774Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:02:49.236846Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:02:49.250930Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T15:02:49.251263Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T15:02:49.251498Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T15:02:49.257556Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T15:02:49.257880Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T15:02:49.259813Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T15:02:49.260030Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T15:02:49.266884Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T15:02:49.267847Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T15:02:49.278058Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T15:02:49.278301Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T15:02:49.278521Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T15:02:49.278586Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T15:02:49.278648Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T15:02:49.278723Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T15:02:49.285134Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T15:02:49.416741Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T15:02:49.416990Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:02:49.417208Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T15:02:49.417255Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T15:02:49.417524Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T15:02:49.417603Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:02:49.419892Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T15:02:49.420051Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T15:02:49.420230Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:02:49.420278Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T15:02:49.420340Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T15:02:49.420390Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T15:02:49.422064Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:02:49.422170Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T15:02:49.422222Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T15:02:49.426863Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:02:49.426916Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:02:49.426976Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T15:02:49.427030Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T15:02:49.430517Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T15:02:49.434286Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T15:02:49.434580Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T15:02:49.435677Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T15:02:49.435826Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T15:02:49.435878Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T15:02:49.436203Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T15:02:49.436281Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T15:02:49.436483Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T15:02:49.436560Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T15:02:49.438484Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T15:02:49.438534Z node 1 :FLAT_TX_SCHEMESHARD ... : 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 106 2025-06-25T15:02:50.658636Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 106 2025-06-25T15:02:50.658668Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 106, pathId: [OwnerId: 72057594046678944, LocalPathId: 5], version: 18446744073709551615 2025-06-25T15:02:50.658706Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 4 2025-06-25T15:02:50.659440Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 13 PathOwnerId: 72057594046678944, cookie: 106 2025-06-25T15:02:50.659492Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 13 PathOwnerId: 72057594046678944, cookie: 106 2025-06-25T15:02:50.659511Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 106 2025-06-25T15:02:50.659538Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 106, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 13 2025-06-25T15:02:50.659562Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-25T15:02:50.660877Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 7 PathOwnerId: 72057594046678944, cookie: 106 2025-06-25T15:02:50.660934Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 7 PathOwnerId: 72057594046678944, cookie: 106 2025-06-25T15:02:50.660950Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 106 2025-06-25T15:02:50.660978Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 106, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 7 2025-06-25T15:02:50.661003Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-25T15:02:50.661045Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 106, ready parts: 0/1, is published: true 2025-06-25T15:02:50.661767Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 106:0 from tablet: 72057594046678944 to tablet: 72075186233409547 cookie: 72057594046678944:2 msg type: 275382275 2025-06-25T15:02:50.663142Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 106 2025-06-25T15:02:50.663383Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 106 2025-06-25T15:02:50.664467Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 106 2025-06-25T15:02:50.676523Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6230: Handle TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, message: Origin: 72075186233409547 TxId: 106 2025-06-25T15:02:50.676584Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1791: TOperation FindRelatedPartByTabletId, TxId: 106, tablet: 72075186233409547, partId: 0 2025-06-25T15:02:50.676702Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:632: TTxOperationReply execute, operationId: 106:0, at schemeshard: 72057594046678944, message: Origin: 72075186233409547 TxId: 106 2025-06-25T15:02:50.676748Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 106:0 129 -> 130 FAKE_COORDINATOR: Erasing txId 106 2025-06-25T15:02:50.678639Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 106:0, at schemeshard: 72057594046678944 2025-06-25T15:02:50.678863Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 106:0, at schemeshard: 72057594046678944 2025-06-25T15:02:50.678918Z node 2 :FLAT_TX_SCHEMESHARD INFO: drop_table.cpp:315: TDropColumnTable TProposedDeleteParts operationId# 106:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T15:02:50.678998Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 3 2025-06-25T15:02:50.679104Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#106:0 progress is 1/1 2025-06-25T15:02:50.679173Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 106 ready parts: 1/1 2025-06-25T15:02:50.679213Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#106:0 progress is 1/1 2025-06-25T15:02:50.679243Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 106 ready parts: 1/1 2025-06-25T15:02:50.679268Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 106, ready parts: 1/1, is published: true 2025-06-25T15:02:50.679326Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1656: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [2:358:2334] message: TxId: 106 2025-06-25T15:02:50.679359Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 106 ready parts: 1/1 2025-06-25T15:02:50.679387Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 106:0 2025-06-25T15:02:50.679413Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 106:0 2025-06-25T15:02:50.679500Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 2 2025-06-25T15:02:50.682129Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:2 hive 72057594037968897 at ss 72057594046678944 2025-06-25T15:02:50.682319Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 106: got EvNotifyTxCompletionResult 2025-06-25T15:02:50.682358Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 106: satisfy waiter [2:662:2618] 2025-06-25T15:02:50.682764Z node 2 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 2 TxId_Deprecated: 2 TabletID: 72075186233409547 2025-06-25T15:02:50.683428Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186233409547;self_id=[2:507:2473];ev=NKikimr::TEvTablet::TEvTabletDead;fline=columnshard_impl.cpp:864;event=tablet_die; Forgetting tablet 72075186233409547 2025-06-25T15:02:50.686171Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046678944 ShardLocalIdx: 2, at schemeshard: 72057594046678944 2025-06-25T15:02:50.686739Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 1 2025-06-25T15:02:50.687163Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-25T15:02:50.687216Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 5], at schemeshard: 72057594046678944 2025-06-25T15:02:50.687292Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-25T15:02:50.689641Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-06-25T15:02:50.689727Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:2 tabletId 72075186233409547 2025-06-25T15:02:50.690210Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 106 2025-06-25T15:02:50.690741Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/MyDir/MyTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T15:02:50.690902Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/MyDir/MyTable" took 186us result status StatusPathDoesNotExist 2025-06-25T15:02:50.691078Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/MyDir/MyTable\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot/MyDir\' (id: [OwnerId: 72057594046678944, LocalPathId: 3])" Path: "/MyRoot/MyDir/MyTable" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot/MyDir" LastExistedPrefixPathId: 3 LastExistedPrefixDescription { Self { Name: "MyDir" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 102 CreateStep: 5000003 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-25T15:02:50.691687Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: PathId: 5 SchemeshardId: 72057594046678944 Options { }, at schemeshard: 72057594046678944 2025-06-25T15:02:50.691768Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:44: Tablet 72057594046678944 describe pathId 5 took 86us result status StatusPathDoesNotExist 2025-06-25T15:02:50.691848Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'\', error: path is empty" Path: "" PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_olap/unittest >> TOlapNaming::AlterColumnStoreFailed [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T15:02:49.152080Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T15:02:49.152159Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T15:02:49.152191Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T15:02:49.152219Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T15:02:49.152252Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T15:02:49.152277Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T15:02:49.152939Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T15:02:49.153018Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T15:02:49.153694Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T15:02:49.153987Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T15:02:49.235037Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:02:49.235105Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:02:49.253267Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T15:02:49.253603Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T15:02:49.253763Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T15:02:49.258793Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T15:02:49.259067Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T15:02:49.259864Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T15:02:49.260093Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T15:02:49.267563Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T15:02:49.267843Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T15:02:49.278031Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T15:02:49.278122Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T15:02:49.278318Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T15:02:49.278370Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T15:02:49.278449Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T15:02:49.278525Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T15:02:49.285590Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T15:02:49.435222Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T15:02:49.435462Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:02:49.435672Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T15:02:49.435719Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T15:02:49.435969Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T15:02:49.436046Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:02:49.440746Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T15:02:49.440971Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T15:02:49.441205Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:02:49.441268Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T15:02:49.441312Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T15:02:49.441366Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T15:02:49.446551Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:02:49.446625Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T15:02:49.446679Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T15:02:49.448450Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:02:49.448501Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:02:49.448556Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T15:02:49.448609Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T15:02:49.452208Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T15:02:49.454483Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T15:02:49.454678Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T15:02:49.455636Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T15:02:49.455777Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T15:02:49.455828Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T15:02:49.456109Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T15:02:49.456163Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T15:02:49.456351Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T15:02:49.456433Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T15:02:49.461788Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T15:02:49.461842Z node 1 :FLAT_TX_SCHEMESHARD ... advance: minStep5000003 State->FrontStep: 5000003 2025-06-25T15:02:50.783344Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T15:02:50.783401Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-25T15:02:50.783643Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-25T15:02:50.783795Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T15:02:50.783857Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [2:209:2209], at schemeshard: 72057594046678944, txId: 102, path id: 2 2025-06-25T15:02:50.783913Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [2:209:2209], at schemeshard: 72057594046678944, txId: 102, path id: 3 2025-06-25T15:02:50.784252Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-25T15:02:50.784323Z node 2 :FLAT_TX_SCHEMESHARD INFO: create_table.cpp:459: TCreateColumnTable TProposedWaitParts operationId# 102:0 ProgressState at tablet: 72057594046678944 2025-06-25T15:02:50.784386Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: create_table.cpp:485: TCreateColumnTable TProposedWaitParts operationId# 102:0 ProgressState wait for NotifyTxCompletionResult tabletId: 72075186233409546 2025-06-25T15:02:50.785122Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 5 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T15:02:50.785232Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 5 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T15:02:50.785277Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 102 2025-06-25T15:02:50.785320Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 5 2025-06-25T15:02:50.785376Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-25T15:02:50.786242Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 4 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T15:02:50.786319Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 4 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T15:02:50.786351Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 102 2025-06-25T15:02:50.786381Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 4 2025-06-25T15:02:50.786409Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-25T15:02:50.786485Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 102, ready parts: 0/1, is published: true 2025-06-25T15:02:50.788562Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 102:0 from tablet: 72057594046678944 to tablet: 72075186233409546 cookie: 72057594046678944:1 msg type: 275382275 2025-06-25T15:02:50.788644Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 102:0 from tablet: 72057594046678944 to tablet: 72057594037968897 cookie: 72057594046678944:3 msg type: 268697639 2025-06-25T15:02:50.788714Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 102, partId: 0, tablet: 72057594037968897 2025-06-25T15:02:50.789059Z node 2 :HIVE INFO: tablet_helpers.cpp:1441: [72057594037968897] TEvUpdateTabletsObject, msg: ObjectId: 7726343884038809171 TabletIds: 72075186233409546 TxId: 102 TxPartId: 0 2025-06-25T15:02:50.789645Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6056: Update tablets object reply, message: Status: OK TxId: 102 TxPartId: 0, at schemeshard: 72057594046678944 2025-06-25T15:02:50.789761Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:632: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: Status: OK TxId: 102 TxPartId: 0 2025-06-25T15:02:50.790428Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-25T15:02:50.790663Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-25T15:02:50.791760Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-25T15:02:50.804406Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6230: Handle TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, message: Origin: 72075186233409546 TxId: 102 2025-06-25T15:02:50.804491Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1791: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409546, partId: 0 2025-06-25T15:02:50.804650Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:632: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: Origin: 72075186233409546 TxId: 102 FAKE_COORDINATOR: Erasing txId 102 2025-06-25T15:02:50.807271Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-25T15:02:50.807451Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-25T15:02:50.807515Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 102:0 ProgressState 2025-06-25T15:02:50.807661Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:0 progress is 1/1 2025-06-25T15:02:50.807721Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-25T15:02:50.807795Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:0 progress is 1/1 2025-06-25T15:02:50.807839Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-25T15:02:50.807887Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: true 2025-06-25T15:02:50.807967Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1656: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [2:341:2317] message: TxId: 102 2025-06-25T15:02:50.808033Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-25T15:02:50.808089Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 102:0 2025-06-25T15:02:50.808139Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 102:0 2025-06-25T15:02:50.808292Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-25T15:02:50.813448Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-25T15:02:50.813525Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [2:402:2371] TestWaitNotification: OK eventTxId 102 TestModificationResults wait txId: 103 2025-06-25T15:02:50.817701Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpAlterColumnStore AlterColumnStore { Name: "OlapStore" AlterSchemaPresets { Name: "default" AlterSchema { AddColumns { Name: "mess age" Type: "Utf8" } } } } } TxId: 103 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T15:02:50.818006Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: alter_store.cpp:465: TAlterOlapStore Propose, path: /MyRoot/OlapStore, opId: 103:0, at schemeshard: 72057594046678944 2025-06-25T15:02:50.818338Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 103:1, propose status:StatusSchemeError, reason: Invalid name for column 'mess age', at schemeshard: 72057594046678944 2025-06-25T15:02:50.821455Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 103, response: Status: StatusSchemeError Reason: "Invalid name for column \'mess age\'" TxId: 103 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T15:02:50.821694Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 103, database: /MyRoot, subject: , status: StatusSchemeError, reason: Invalid name for column 'mess age', operation: ALTER COLUMN STORE, path: /MyRoot/OlapStore TestModificationResult got TxId: 103, wait until txId: 103 TestWaitNotification wait txId: 103 2025-06-25T15:02:50.822071Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 103: send EvNotifyTxCompletion 2025-06-25T15:02:50.822142Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 103 2025-06-25T15:02:50.822623Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 103, at schemeshard: 72057594046678944 2025-06-25T15:02:50.822760Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-06-25T15:02:50.822819Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [2:440:2409] TestWaitNotification: OK eventTxId 103 >> KqpScripting::JoinIndexLookup [GOOD] >> DataShardReadIterator::ShouldReadRangePrefix1 [GOOD] >> DataShardReadIterator::ShouldReadRangePrefix2 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/yql/unittest >> KqpScripting::StreamExecuteYqlScriptSeveralQueriesComplex [GOOD] Test command err: Trying to start YDB, gRPC: 13087, MsgBus: 15765 2025-06-25T15:02:39.701048Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519902587772781974:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:02:39.701130Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000774/r3tmp/tmp5AHxl2/pdisk_1.dat 2025-06-25T15:02:40.070981Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 13087, node 1 2025-06-25T15:02:40.090574Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:02:40.091520Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:02:40.098160Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:02:40.144183Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:02:40.144216Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:02:40.144239Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:02:40.144439Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:15765 TClient is connected to server localhost:15765 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:02:40.690611Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:02:40.707058Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T15:02:40.709038Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... 2025-06-25T15:02:40.724050Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:40.891941Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:41.034275Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:41.101970Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:42.537558Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902600657685452:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:42.537683Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:42.871035Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:42.907006Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:42.938590Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:42.970089Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:43.003137Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:43.076730Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:43.108534Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:43.162482Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902604952653411:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:43.162587Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:43.162881Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902604952653416:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:43.166185Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:02:43.176172Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519902604952653418:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T15:02:43.268877Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519902604952653469:3422] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:02:44.704434Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519902587772781974:2068];send_to=[0:7307199536658146131:7762515]; Trying to start YDB, gRPC: 10796, MsgBus: 30846 2025-06-25T15:02:45.271972Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519902612518216869:2057];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:02:45.272047Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000774/r3tmp/tmphHW0Oa/pdisk_1.dat 2025-06-25T15:02:45.413454Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:02:45.424729Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519902612518216852:2080] 1750863765271538 != 1750863765271541 TServer::EnableGrpc on GrpcPort 10796, node 2 2025-06-25T15:02:45.432942Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:02:45.433025Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:02:45.433895Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:02:45.496930Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:02:45.496956Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:02:45.496964Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:02:45.497067Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:30846 TClient is connected to server localhost:30846 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:02:45.959053Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:02:45.969354Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T15:02:45.987108Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:46.057164Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:46.189003Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:46.246742Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:46.286369Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:02:48.153728Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519902625403120370:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:48.153805Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:48.203680Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:48.252815Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:48.279831Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:48.306886Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:48.335484Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:48.407500Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:48.480256Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:48.568500Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519902625403121035:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:48.568609Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:48.569035Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519902625403121040:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:48.573126Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:02:48.582493Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519902625403121042:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T15:02:48.654442Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519902625403121093:3414] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } Result: [[[[101u]]];[[[102u]]];[[[103u]]];[[[104u]]];[[[105u]]]] 2025-06-25T15:02:50.273611Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519902612518216869:2057];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:02:50.273708Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> TOlap::CreateDropStandaloneTable >> DataShardReadIterator::ShouldLimitRead10RangesChunk99Limit198 [GOOD] >> DataShardReadIterator::ShouldLimitRead10RangesChunk99Limit900 >> TOlap::CreateTableTtl [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/yql/unittest >> KqpScripting::JoinIndexLookup [GOOD] Test command err: Trying to start YDB, gRPC: 5562, MsgBus: 6813 2025-06-25T15:02:39.783223Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519902587910330403:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:02:39.785502Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000750/r3tmp/tmpnQjPPn/pdisk_1.dat 2025-06-25T15:02:40.187107Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:02:40.188437Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519902587910330368:2080] 1750863759779603 != 1750863759779606 TServer::EnableGrpc on GrpcPort 5562, node 1 2025-06-25T15:02:40.213777Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:02:40.213857Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:02:40.230976Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:02:40.275813Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:02:40.275835Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:02:40.275846Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:02:40.275945Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6813 TClient is connected to server localhost:6813 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:02:40.794027Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:02:40.801858Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... 2025-06-25T15:02:40.845799Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:41.009571Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:41.168379Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:41.255302Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:42.716265Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902600795233909:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:42.716407Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:43.050869Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:43.115070Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:43.148881Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:43.184651Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:43.257465Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:43.323921Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:43.400571Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:43.464545Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902605090201878:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:43.464640Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:43.464904Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902605090201883:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:43.468636Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:02:43.479000Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519902605090201885:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T15:02:43.565910Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519902605090201936:3421] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:02:44.797308Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519902587910330403:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:02:44.797563Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T15:02:45.439141Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863765434, txId: 281474976710672] shutting down Trying to start YDB, gRPC: 3757, MsgBus: 14636 2025-06-25T15:02:46.152499Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519902618761504089:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:02:46.152550Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000750/r3tmp/tmpUqtyRE/pdisk_1.dat 2025-06-25T15:02:46.261432Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:02:46.262999Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519902618761504062:2080] 1750863766152007 != 1750863766152010 TServer::EnableGrpc on GrpcPort 3757, node 2 2025-06-25T15:02:46.307532Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:02:46.307611Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:02:46.309094Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:02:46.320863Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:02:46.320895Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:02:46.320901Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:02:46.321016Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14636 TClient is connected to server localhost:14636 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:02:46.786945Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-25T15:02:46.803151Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:46.866976Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:47.010388Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:47.082131Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:47.221805Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:02:48.936685Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519902627351440275:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:48.936769Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:49.006899Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:49.077787Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:49.108890Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:49.137218Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:49.169300Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:49.208638Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:49.246431Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:49.298616Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519902631646408231:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:49.298705Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:49.298835Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519902631646408236:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:49.302830Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:02:49.315201Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519902631646408238:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T15:02:49.417092Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519902631646408289:3419] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:02:51.153975Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519902618761504089:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:02:51.154077Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_olap/unittest >> TOlap::CreateTableTtl [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T15:02:49.147234Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T15:02:49.147323Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T15:02:49.147361Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T15:02:49.147402Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T15:02:49.148652Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T15:02:49.148710Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T15:02:49.148780Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T15:02:49.148856Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T15:02:49.149662Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T15:02:49.152064Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T15:02:49.237964Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:02:49.238032Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:02:49.254076Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T15:02:49.254419Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T15:02:49.254699Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T15:02:49.260074Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T15:02:49.260355Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T15:02:49.260944Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T15:02:49.261143Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T15:02:49.266871Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T15:02:49.267851Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T15:02:49.278061Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T15:02:49.278145Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T15:02:49.278327Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T15:02:49.278377Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T15:02:49.278431Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T15:02:49.278513Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T15:02:49.287660Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T15:02:49.407123Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T15:02:49.408690Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:02:49.409344Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T15:02:49.409407Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T15:02:49.411154Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T15:02:49.411262Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:02:49.414946Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T15:02:49.415242Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T15:02:49.415477Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:02:49.415603Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T15:02:49.415646Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T15:02:49.415699Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T15:02:49.417815Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:02:49.417875Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T15:02:49.417922Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T15:02:49.419550Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:02:49.419596Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:02:49.419646Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T15:02:49.419700Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T15:02:49.424735Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T15:02:49.426769Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T15:02:49.427902Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T15:02:49.428888Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T15:02:49.429028Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T15:02:49.429077Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T15:02:49.430208Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T15:02:49.430271Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T15:02:49.430447Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T15:02:49.430517Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T15:02:49.436769Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T15:02:49.436821Z node 1 :FLAT_TX_SCHEMESHARD ... PathType: EPathTypeColumnTable CreateFinished: true CreateTxId: 105 CreateStep: 5000006 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 ColumnTableVersion: 1 ColumnTableSchemaVersion: 1 ColumnTableTtlSettingsVersion: 1 } ChildrenExist: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 5 PathsLimit: 10000 ShardsInside: 1 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } ColumnTableDescription { Name: "Table3" Schema { Columns { Id: 1 Name: "timestamp" Type: "Timestamp" TypeId: 50 NotNull: true StorageId: "" DefaultValue { } } Columns { Id: 2 Name: "data" Type: "Utf8" TypeId: 4608 NotNull: false StorageId: "" DefaultValue { } } KeyColumnNames: "timestamp" NextColumnId: 3 Version: 1 Options { SchemeNeedActualization: false } NextColumnFamilyId: 1 } TtlSettings { Enabled { ColumnName: "timestamp" ColumnUnit: UNIT_AUTO Tiers { ApplyAfterSeconds: 360 EvictToExternalStorage { Storage: "/MyRoot/Tier1" } } } Version: 1 } SchemaPresetId: 1 SchemaPresetName: "default" ColumnStorePathId { OwnerId: 72057594046678944 LocalId: 2 } ColumnShardCount: 1 Sharding { ColumnShards: 72075186233409546 HashSharding { Function: HASH_FUNCTION_CONSISTENCY_64 Columns: "timestamp" } } } } PathId: 6 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 TestModificationResults wait txId: 106 2025-06-25T15:02:52.346879Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/OlapStore" OperationType: ESchemeOpCreateColumnTable CreateColumnTable { Name: "Table4" TtlSettings { Enabled { ColumnName: "timestamp" ColumnUnit: UNIT_AUTO Tiers { ApplyAfterSeconds: 3600000000 EvictToExternalStorage { Storage: "/MyRoot/Tier1" } } } } ColumnShardCount: 1 } } TxId: 106 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T15:02:52.347218Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: create_table.cpp:593: TCreateColumnTable Propose, path: /MyRoot/OlapStore/Table4, opId: 106:0, at schemeshard: 72057594046678944 2025-06-25T15:02:52.347658Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:347: AttachChild: child attached as only one child to the parent, parent id: [OwnerId: 72057594046678944, LocalPathId: 2], parent name: OlapStore, child name: Table4, child id: [OwnerId: 72057594046678944, LocalPathId: 7], at schemeshard: 72057594046678944 2025-06-25T15:02:52.347723Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 7] was 0 2025-06-25T15:02:52.347765Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 106:0 type: TxCreateColumnTable target path: [OwnerId: 72057594046678944, LocalPathId: 7] source path: 2025-06-25T15:02:52.347972Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 7] was 1 2025-06-25T15:02:52.348296Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 106:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T15:02:52.349078Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 106:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:738) 2025-06-25T15:02:52.349223Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 6 2025-06-25T15:02:52.349285Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 7] was 2 2025-06-25T15:02:52.352124Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 106, response: Status: StatusAccepted TxId: 106 SchemeshardId: 72057594046678944 PathId: 7, at schemeshard: 72057594046678944 2025-06-25T15:02:52.352398Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 106, database: /MyRoot, subject: , status: StatusAccepted, operation: CREATE COLUMN TABLE, path: /MyRoot/OlapStore/ 2025-06-25T15:02:52.352675Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T15:02:52.352720Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 106, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-25T15:02:52.352936Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 106, path id: [OwnerId: 72057594046678944, LocalPathId: 7] 2025-06-25T15:02:52.353047Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T15:02:52.353102Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [3:211:2211], at schemeshard: 72057594046678944, txId: 106, path id: 2 2025-06-25T15:02:52.353150Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [3:211:2211], at schemeshard: 72057594046678944, txId: 106, path id: 7 2025-06-25T15:02:52.353583Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 106:0, at schemeshard: 72057594046678944 2025-06-25T15:02:52.353642Z node 3 :FLAT_TX_SCHEMESHARD INFO: create_table.cpp:235: TCreateColumnTable TConfigureParts operationId# 106:0 ProgressState at tabletId# 72057594046678944 2025-06-25T15:02:52.353813Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: create_table.cpp:321: TCreateColumnTable TConfigureParts operationId# 106:0 ProgressState Propose modify scheme on shard tabletId: 72075186233409546 2025-06-25T15:02:52.354781Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 10 PathOwnerId: 72057594046678944, cookie: 106 2025-06-25T15:02:52.354887Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 10 PathOwnerId: 72057594046678944, cookie: 106 2025-06-25T15:02:52.354933Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 106 2025-06-25T15:02:52.354975Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 106, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 10 2025-06-25T15:02:52.355035Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 7 2025-06-25T15:02:52.355676Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 7 Version: 1 PathOwnerId: 72057594046678944, cookie: 106 2025-06-25T15:02:52.355761Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 7 Version: 1 PathOwnerId: 72057594046678944, cookie: 106 2025-06-25T15:02:52.355892Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 106 2025-06-25T15:02:52.355926Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 106, pathId: [OwnerId: 72057594046678944, LocalPathId: 7], version: 1 2025-06-25T15:02:52.355958Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 7] was 3 2025-06-25T15:02:52.356026Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 106, ready parts: 0/1, is published: true 2025-06-25T15:02:52.363961Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 106:0 from tablet: 72057594046678944 to tablet: 72075186233409546 cookie: 72057594046678944:1 msg type: 275382272 2025-06-25T15:02:52.364122Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 106, partId: 0, tablet: 72075186233409546 2025-06-25T15:02:52.373291Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186233409546;self_id=[3:314:2299];ev=NActors::IEventHandle;tablet_id=72075186233409546;tx_id=106;this=88923004032128;method=TTxController::StartProposeOnExecute;tx_info=106:TX_KIND_SCHEMA;min=5000007;max=18446744073709551615;plan=0;src=[3:129:2153];cookie=12:5;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=7;result=not_found; 2025-06-25T15:02:52.374153Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 106 2025-06-25T15:02:52.374308Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 106 TestModificationResult got TxId: 106, wait until txId: 106 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/MyRoot/Tier1' stopped at tablet 72075186233409546 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/MyRoot/Tier1' stopped at tablet 72075186233409546 >> KqpYql::JsonNumberPrecision [GOOD] |92.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/address_classification/ut/unittest >> TNetClassifierTest::TestInitFromRemoteSource >> DataShardReadIterator::ShouldFailReadNextAfterSchemeChange [GOOD] >> DataShardReadIterator::ShouldFailReadNextAfterSchemeChangeExhausted >> DataShardReadIterator::ShouldReadRangeChunk1_100 [GOOD] >> DataShardReadIterator::ShouldReadRangeChunk1 >> DataShardReadIterator::ShouldReadKeyPrefix3 [GOOD] >> DataShardReadIterator::ShouldReadHeadFromFollower >> TOlap::CreateDropStandaloneTable [GOOD] >> TOlap::AlterStore ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/yql/unittest >> KqpYql::JsonNumberPrecision [GOOD] Test command err: Trying to start YDB, gRPC: 16445, MsgBus: 26081 2025-06-25T15:02:42.550664Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519902600606134112:2135];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:02:42.553309Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000709/r3tmp/tmpqKswHK/pdisk_1.dat 2025-06-25T15:02:42.889457Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:02:42.889550Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:02:42.891914Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:02:42.914956Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:02:42.915450Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519902600606134015:2080] 1750863762525892 != 1750863762525895 TServer::EnableGrpc on GrpcPort 16445, node 1 2025-06-25T15:02:42.974392Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:02:42.974422Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:02:42.974433Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:02:42.974534Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26081 TClient is connected to server localhost:26081 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:02:43.505963Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:02:43.539708Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:43.554827Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:02:43.687300Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T15:02:43.864173Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:43.936768Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:45.518558Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902613491037524:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:45.518660Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:45.856821Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:45.881791Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:45.906489Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:45.930767Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:45.957079Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:45.989761Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:46.058523Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:46.142973Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902617786005484:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:46.143041Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902617786005489:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:46.143061Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:46.145753Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:02:46.156908Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519902617786005491:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T15:02:46.253378Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519902617786005542:3418] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:02:47.540720Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519902600606134112:2135];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:02:47.540814Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 1516, MsgBus: 63341 2025-06-25T15:02:48.105693Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519902625471138466:2057];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:02:48.105747Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000709/r3tmp/tmpNjhoZ0/pdisk_1.dat 2025-06-25T15:02:48.223081Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:02:48.228474Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519902625471138450:2080] 1750863768105357 != 1750863768105360 2025-06-25T15:02:48.246439Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:02:48.246513Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 1516, node 2 2025-06-25T15:02:48.247875Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:02:48.286143Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:02:48.286165Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:02:48.286172Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:02:48.286276Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:63341 TClient is connected to server localhost:63341 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-25T15:02:48.702021Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:02:48.707652Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T15:02:48.711159Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:48.783392Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:48.945247Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:49.018881Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:49.154771Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:02:51.029489Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519902638356041999:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:51.029563Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:51.090658Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:51.136804Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:51.168809Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:51.206352Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:51.237358Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:51.298129Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:51.332786Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:51.397520Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519902638356042659:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:51.397607Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:51.397840Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519902638356042664:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:51.401145Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:02:51.410146Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519902638356042666:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T15:02:51.474716Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519902638356042717:3424] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } >> KqpScripting::StreamExecuteYqlScriptClientOperationTimeoutBruteForce [GOOD] >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadRangeInvisibleRowSkips-EvWrite [GOOD] >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadRangeInvisibleRowSkips2+EvWrite >> DataShardSnapshots::ShardRestartLockBrokenByUncommittedBeforeRead-UseSink [GOOD] >> DataShardSnapshots::ShardRestartLockNotBrokenByUncommittedAfterRead+UseSink |92.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/address_classification/ut/unittest >> KqpPragma::Warning [GOOD] >> TOlap::AlterStore [GOOD] >> TOlap::AlterTtl >> DataShardSnapshots::MvccSnapshotLockedWritesWithConflicts-UseSink [GOOD] >> DataShardSnapshots::MvccSnapshotLockedWritesWithReadConflicts ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/yql/unittest >> KqpScripting::StreamExecuteYqlScriptClientOperationTimeoutBruteForce [GOOD] Test command err: Trying to start YDB, gRPC: 9006, MsgBus: 11092 2025-06-25T15:02:31.182922Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519902551136615769:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:02:31.182977Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000838/r3tmp/tmpVbdi7s/pdisk_1.dat 2025-06-25T15:02:31.532793Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 9006, node 1 2025-06-25T15:02:31.601063Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:02:31.601154Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:02:31.618886Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:02:31.676787Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:02:31.676815Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:02:31.676822Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:02:31.676917Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11092 TClient is connected to server localhost:11092 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-25T15:02:32.204613Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:02:32.261929Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-25T15:02:32.291751Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:32.435313Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:32.585678Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:32.667515Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:34.245829Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902564021519262:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:34.245919Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:34.615250Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:34.648628Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:34.688232Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:34.761027Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:34.788803Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:34.824416Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:34.894193Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:34.966277Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902564021519926:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:34.966379Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:34.966799Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902564021519931:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:34.970356Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:02:34.981421Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519902564021519933:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T15:02:35.080083Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519902568316487280:3424] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:02:36.227750Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519902551136615769:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:02:36.228576Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T15:02:36.442382Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863756369, txId: 281474976710672] shutting down 2025-06-25T15:02:36.449608Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863756369, txId: 281474976710673] shutting down 2025-06-25T15:02:36.477961Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863756481, txId: 281474976710677] shutting down 2025-06-25T15:02:36.478861Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotMan ... lechecking } 2025-06-25T15:02:46.278607Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519902616095914429:3421] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:02:47.536482Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863767527, txId: 281474976715673] shutting down 2025-06-25T15:02:47.537235Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863767527, txId: 281474976715675] shutting down 2025-06-25T15:02:47.578899Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863767527, txId: 281474976715672] shutting down 2025-06-25T15:02:47.585091Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863767527, txId: 281474976715674] shutting down 2025-06-25T15:02:47.799830Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519902598916042927:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:02:47.807559Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T15:02:47.849427Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863767716, txId: 281474976715680] shutting down 2025-06-25T15:02:47.850587Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863767716, txId: 281474976715681] shutting down 2025-06-25T15:02:47.905604Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863767898, txId: 281474976715684] shutting down 2025-06-25T15:02:47.906467Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863767898, txId: 281474976715686] shutting down 2025-06-25T15:02:47.908108Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863767898, txId: 281474976715685] shutting down 2025-06-25T15:02:48.056018Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863768066, txId: 281474976715692] shutting down 2025-06-25T15:02:48.063251Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863768073, txId: 281474976715691] shutting down 2025-06-25T15:02:48.064157Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863768073, txId: 281474976715690] shutting down 2025-06-25T15:02:48.354954Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863768255, txId: 281474976715696] shutting down 2025-06-25T15:02:48.452755Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863768402, txId: 281474976715698] shutting down 2025-06-25T15:02:48.453448Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863768402, txId: 281474976715699] shutting down 2025-06-25T15:02:48.502474Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863768521, txId: 281474976715702] shutting down 2025-06-25T15:02:48.502856Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863768521, txId: 281474976715703] shutting down 2025-06-25T15:02:48.665770Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863768647, txId: 281474976715706] shutting down 2025-06-25T15:02:48.668629Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863768647, txId: 281474976715707] shutting down 2025-06-25T15:02:48.860770Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863768871, txId: 281474976715710] shutting down 2025-06-25T15:02:48.864094Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863768871, txId: 281474976715711] shutting down 2025-06-25T15:02:49.042986Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863769053, txId: 281474976715714] shutting down 2025-06-25T15:02:49.043702Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863769053, txId: 281474976715715] shutting down 2025-06-25T15:02:49.188076Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863769214, txId: 281474976715718] shutting down 2025-06-25T15:02:49.188883Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863769221, txId: 281474976715719] shutting down 2025-06-25T15:02:49.409419Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863769431, txId: 281474976715722] shutting down 2025-06-25T15:02:49.534988Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863769564, txId: 281474976715725] shutting down 2025-06-25T15:02:49.535819Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863769564, txId: 281474976715724] shutting down 2025-06-25T15:02:49.801676Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863769823, txId: 281474976715729] shutting down 2025-06-25T15:02:49.803766Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863769823, txId: 281474976715728] shutting down 2025-06-25T15:02:49.995213Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863770019, txId: 281474976715732] shutting down 2025-06-25T15:02:50.124659Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863770152, txId: 281474976715734] shutting down 2025-06-25T15:02:50.281671Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863770285, txId: 281474976715736] shutting down 2025-06-25T15:02:50.380510Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863770404, txId: 281474976715738] shutting down 2025-06-25T15:02:50.575712Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863770586, txId: 281474976715741] shutting down 2025-06-25T15:02:50.575974Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863770593, txId: 281474976715740] shutting down 2025-06-25T15:02:50.800832Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863770824, txId: 281474976715744] shutting down 2025-06-25T15:02:50.944961Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863770971, txId: 281474976715746] shutting down 2025-06-25T15:02:51.118076Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863771139, txId: 281474976715748] shutting down 2025-06-25T15:02:51.258683Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863771286, txId: 281474976715750] shutting down 2025-06-25T15:02:51.493625Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863771475, txId: 281474976715752] shutting down 2025-06-25T15:02:51.589010Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863771615, txId: 281474976715754] shutting down 2025-06-25T15:02:51.710604Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863771741, txId: 281474976715756] shutting down 2025-06-25T15:02:51.891865Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863771923, txId: 281474976715758] shutting down 2025-06-25T15:02:52.084902Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863772084, txId: 281474976715760] shutting down 2025-06-25T15:02:52.190755Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863772224, txId: 281474976715762] shutting down 2025-06-25T15:02:52.399509Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863772413, txId: 281474976715764] shutting down 2025-06-25T15:02:52.581704Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863772602, txId: 281474976715766] shutting down 2025-06-25T15:02:52.753739Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863772784, txId: 281474976715768] shutting down 2025-06-25T15:02:52.934175Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863772959, txId: 281474976715770] shutting down 2025-06-25T15:02:53.098563Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863773134, txId: 281474976715772] shutting down 2025-06-25T15:02:53.358278Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863773386, txId: 281474976715774] shutting down 2025-06-25T15:02:53.491819Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863773519, txId: 281474976715776] shutting down 2025-06-25T15:02:53.660691Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863773694, txId: 281474976715778] shutting down |92.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/address_classification/ut/unittest >> TTxDataShardMiniKQL::ReadConstant >> TTxDataShardMiniKQL::CrossShard_5_AllToAll >> TTxDataShardMiniKQL::WriteKeyTooLarge |92.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> TOlapNaming::CreateColumnTableOk [GOOD] >> TOlapNaming::CreateColumnTableFailed ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/yql/unittest >> KqpPragma::Warning [GOOD] Test command err: Trying to start YDB, gRPC: 14364, MsgBus: 20215 2025-06-25T15:02:44.282889Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519902609592380151:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:02:44.283546Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0006dd/r3tmp/tmpsImSkA/pdisk_1.dat 2025-06-25T15:02:44.646276Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:02:44.647957Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519902609592380131:2080] 1750863764279896 != 1750863764279899 TServer::EnableGrpc on GrpcPort 14364, node 1 2025-06-25T15:02:44.717682Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:02:44.717705Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:02:44.717710Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:02:44.717814Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T15:02:44.718289Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:02:44.718390Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:02:44.720515Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:20215 TClient is connected to server localhost:20215 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:02:45.196735Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:02:45.211992Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T15:02:45.223681Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:45.296545Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:02:45.387340Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:45.550401Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:45.628887Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:47.080715Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902622477283679:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:47.080860Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:47.368535Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:47.437159Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:47.463680Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:47.491494Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:47.527143Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:47.563261Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:47.591313Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:47.672423Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902622477284343:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:47.672506Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:47.672781Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902622477284348:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:47.676935Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:02:47.687059Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519902622477284350:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T15:02:47.760106Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519902622477284401:3421] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:02:49.070395Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519902631067219290:2484], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:2:13: Error: At function: RemovePrefixMembers, At function: PersistableRepr, At function: SqlProject
:2:50: Error: At function: AssumeColumnOrderPartial, At function: Aggregate, At tuple
:2:20: Error: At tuple /lib/yql/aggregate.yqls:650:12: Error: At function: AggregationTraits /lib/yql/aggregate.yqls:648:18: Error: At lambda /lib/yql/aggregate.yqls:60:31: Error: At function: AggrCountInit
:2:20: Err ... PersistableRepr
:2:26: Error: At function: Member
:2:26: Error: Member not found: _yql_partition_id 2025-06-25T15:02:49.072509Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=YWM1OWJlMS0yNWFiYTNhOC01NTcyNTMxNy00ZGI4ZDNiMg==, ActorId: [1:7519902626772251965:2473], ActorState: ExecuteState, TraceId: 01jykssvej80z9fk0hb0xbhqzs, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-06-25T15:02:49.283089Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519902609592380151:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:02:49.283172Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 18953, MsgBus: 13461 2025-06-25T15:02:49.856793Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519902628843322541:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:02:49.856862Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0006dd/r3tmp/tmpQ48516/pdisk_1.dat 2025-06-25T15:02:49.954834Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:02:49.956937Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519902628843322514:2080] 1750863769856484 != 1750863769856487 TServer::EnableGrpc on GrpcPort 18953, node 2 2025-06-25T15:02:49.987708Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:02:49.987796Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:02:49.989879Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:02:50.040020Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:02:50.040041Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:02:50.040049Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:02:50.040164Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13461 TClient is connected to server localhost:13461 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:02:50.498209Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:02:50.508890Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T15:02:50.584599Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:50.745080Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:50.820206Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:50.875570Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:02:52.787158Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519902641728226033:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:52.787224Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:52.833040Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:52.903311Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:52.933643Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:52.962853Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:52.991738Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:53.025692Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:53.059269Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:53.146257Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519902646023193986:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:53.146334Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:53.146343Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519902646023193991:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:53.149530Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:02:53.160841Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519902646023193993:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T15:02:53.217945Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519902646023194044:3416] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } >> TOlap::AlterTtl [GOOD] >> KqpService::PatternCache [GOOD] >> KqpService::RangeCache+UseCache >> TNetClassifierTest::TestInitFromRemoteSource [GOOD] >> DataShardSnapshots::LockedWriteWithAsyncIndex-WithRestart-UseSink [GOOD] >> DataShardSnapshots::LockedWriteWithAsyncIndex+WithRestart-UseSink >> TOlapNaming::CreateColumnTableFailed [GOOD] >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadKeyPrefixLeftBorder-EvWrite [GOOD] >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadKeyPrefixRightBorder+EvWrite ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_olap/unittest >> TOlap::AlterTtl [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T15:02:52.529334Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T15:02:52.529428Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T15:02:52.529464Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T15:02:52.529500Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T15:02:52.529539Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T15:02:52.529566Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T15:02:52.529648Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T15:02:52.529708Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T15:02:52.530452Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T15:02:52.530884Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T15:02:52.604637Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:02:52.604721Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:02:52.620364Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T15:02:52.620753Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T15:02:52.620937Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T15:02:52.626597Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T15:02:52.626882Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T15:02:52.627451Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T15:02:52.627717Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T15:02:52.631084Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T15:02:52.631288Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T15:02:52.632371Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T15:02:52.632425Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T15:02:52.632568Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T15:02:52.632608Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T15:02:52.632645Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T15:02:52.632745Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T15:02:52.639169Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T15:02:52.753189Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T15:02:52.753402Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:02:52.753601Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T15:02:52.753652Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T15:02:52.753879Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T15:02:52.753946Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:02:52.757444Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T15:02:52.757632Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T15:02:52.757846Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:02:52.757902Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T15:02:52.757935Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T15:02:52.757981Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T15:02:52.759925Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:02:52.759978Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T15:02:52.760017Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T15:02:52.761360Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:02:52.761389Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:02:52.761423Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T15:02:52.761461Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T15:02:52.768630Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T15:02:52.770762Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T15:02:52.770948Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T15:02:52.771769Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T15:02:52.771894Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T15:02:52.771934Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T15:02:52.772230Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T15:02:52.772281Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T15:02:52.772459Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T15:02:52.772547Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T15:02:52.774377Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T15:02:52.774417Z node 1 :FLAT_TX_SCHEMESHARD ... NKikimr::TEvColumnShard::TEvProposeTransactionResult> complete, operationId: 106:0, at schemeshard: 72057594046678944 2025-06-25T15:02:55.936778Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 106:0, at schemeshard: 72057594046678944 2025-06-25T15:02:55.936832Z node 3 :FLAT_TX_SCHEMESHARD INFO: alter_table.cpp:148: TAlterColumnTable TPropose operationId# 106:0 HandleReply ProgressState at tablet: 72057594046678944 2025-06-25T15:02:55.936920Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 106 ready parts: 1/1 2025-06-25T15:02:55.937080Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } AffectedSet { TabletId: 72075186233409546 Flags: 2 } ExecLevel: 0 TxId: 106 MinStep: 5000006 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T15:02:55.938766Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 106:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:106 msg type: 269090816 2025-06-25T15:02:55.938921Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 106, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 106 at step: 5000007 FAKE_COORDINATOR: advance: minStep5000007 State->FrontStep: 5000006 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 106 at step: 5000007 FAKE_COORDINATOR: Send Plan to tablet 72075186233409546 for txId: 106 at step: 5000007 2025-06-25T15:02:55.939595Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000007, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T15:02:55.939719Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 106 Coordinator: 72057594046316545 AckTo { RawX1: 138 RawX2: 12884904047 } } Step: 5000007 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T15:02:55.939781Z node 3 :FLAT_TX_SCHEMESHARD INFO: alter_table.cpp:109: TAlterColumnTable TPropose operationId# 106:0 HandleReply TEvOperationPlan at tablet: 72057594046678944, stepId: 5000007 2025-06-25T15:02:55.940667Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 106:0 128 -> 129 2025-06-25T15:02:55.940926Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-25T15:02:55.941006Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-25T15:02:55.958255Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186233409546;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=106;fline=tx_controller.cpp:215;event=finished_tx;tx_id=106; FAKE_COORDINATOR: advance: minStep5000007 State->FrontStep: 5000007 2025-06-25T15:02:55.962086Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T15:02:55.962157Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 106, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-25T15:02:55.962411Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 106, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-25T15:02:55.962624Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T15:02:55.962681Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [3:211:2211], at schemeshard: 72057594046678944, txId: 106, path id: 2 2025-06-25T15:02:55.962738Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [3:211:2211], at schemeshard: 72057594046678944, txId: 106, path id: 3 2025-06-25T15:02:55.962821Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 106:0, at schemeshard: 72057594046678944 2025-06-25T15:02:55.962885Z node 3 :FLAT_TX_SCHEMESHARD INFO: alter_table.cpp:199: TAlterColumnTable TProposedWaitParts operationId# 106:0 ProgressState at tablet: 72057594046678944 2025-06-25T15:02:55.962957Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: alter_table.cpp:222: TAlterColumnTable TProposedWaitParts operationId# 106:0 ProgressState wait for NotifyTxCompletionResult tabletId: 72075186233409546 2025-06-25T15:02:55.964486Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 8 PathOwnerId: 72057594046678944, cookie: 106 2025-06-25T15:02:55.964607Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 8 PathOwnerId: 72057594046678944, cookie: 106 2025-06-25T15:02:55.964653Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 106 2025-06-25T15:02:55.964706Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 106, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 8 2025-06-25T15:02:55.964760Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-25T15:02:55.965945Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 14 PathOwnerId: 72057594046678944, cookie: 106 2025-06-25T15:02:55.966041Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 14 PathOwnerId: 72057594046678944, cookie: 106 2025-06-25T15:02:55.966071Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 106 2025-06-25T15:02:55.966102Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 106, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 14 2025-06-25T15:02:55.966133Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-25T15:02:55.966204Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 106, ready parts: 0/1, is published: true 2025-06-25T15:02:55.968482Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 106:0 from tablet: 72057594046678944 to tablet: 72075186233409546 cookie: 72057594046678944:1 msg type: 275382275 2025-06-25T15:02:55.969875Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 106 2025-06-25T15:02:55.971268Z node 3 :TX_TIERING ERROR: log.cpp:784: fline=manager.cpp:158;error=cannot_read_secrets;reason=Can't read access key: No such secret: SId:secret; 2025-06-25T15:02:55.971629Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 106 2025-06-25T15:02:55.985514Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6230: Handle TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, message: Origin: 72075186233409546 TxId: 106 2025-06-25T15:02:55.985589Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1791: TOperation FindRelatedPartByTabletId, TxId: 106, tablet: 72075186233409546, partId: 0 2025-06-25T15:02:55.985732Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:632: TTxOperationReply execute, operationId: 106:0, at schemeshard: 72057594046678944, message: Origin: 72075186233409546 TxId: 106 FAKE_COORDINATOR: Erasing txId 106 2025-06-25T15:02:55.989476Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 106:0, at schemeshard: 72057594046678944 2025-06-25T15:02:55.989619Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 106:0, at schemeshard: 72057594046678944 2025-06-25T15:02:55.989657Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 106:0 ProgressState 2025-06-25T15:02:55.989761Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#106:0 progress is 1/1 2025-06-25T15:02:55.989815Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 106 ready parts: 1/1 2025-06-25T15:02:55.989862Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#106:0 progress is 1/1 2025-06-25T15:02:55.989897Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 106 ready parts: 1/1 2025-06-25T15:02:55.989933Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 106, ready parts: 1/1, is published: true 2025-06-25T15:02:55.990004Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1656: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [3:345:2321] message: TxId: 106 2025-06-25T15:02:55.990061Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 106 ready parts: 1/1 2025-06-25T15:02:55.990101Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 106:0 2025-06-25T15:02:55.990136Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 106:0 2025-06-25T15:02:55.990260Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-25T15:02:55.992118Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 106: got EvNotifyTxCompletionResult 2025-06-25T15:02:55.992174Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 106: satisfy waiter [3:552:2520] TestWaitNotification: OK eventTxId 106 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/MyRoot/Tier1' stopped at tablet 72075186233409546 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/MyRoot/Tier1' stopped at tablet 72075186233409546 |92.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/select/py3test >> TTxDataShardMiniKQL::ReadConstant [GOOD] >> TTxDataShardMiniKQL::ReadAfterWrite >> TTxDataShardMiniKQL::WriteKeyTooLarge [GOOD] >> TTxDataShardMiniKQL::WriteValueTooLarge ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/address_classification/ut/unittest >> TNetClassifierTest::TestInitFromRemoteSource [GOOD] Test command err: 2025-06-25T15:02:53.636199Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519902647630863853:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:02:53.636326Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00199c/r3tmp/tmpsN3rb7/pdisk_1.dat 2025-06-25T15:02:53.941589Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:02:53.953154Z node 1 :HTTP ERROR: http_proxy_outgoing.cpp:122: (#26,[::1]:24156) connection closed with error: Connection refused 2025-06-25T15:02:53.955251Z node 1 :CMS_CONFIGS ERROR: net_classifier_updater.cpp:278: NetClassifierUpdater failed to get subnets: Connection refused 2025-06-25T15:02:53.979770Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:02:53.979792Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:02:53.979798Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:02:53.979911Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T15:02:54.007222Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:02:54.007319Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:02:54.009126Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:02:54.644118Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; >> test_s3.py::TestYdbS3TTL::test_s3[table_ttl_Date-pk_types13-all_types13-index13-Date--] |92.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/mind/address_classification/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_olap/unittest >> TOlapNaming::CreateColumnTableFailed [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T15:02:49.147198Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T15:02:49.147281Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T15:02:49.147320Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T15:02:49.147350Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T15:02:49.148333Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T15:02:49.148377Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T15:02:49.148452Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T15:02:49.148516Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T15:02:49.149208Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T15:02:49.150787Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T15:02:49.234386Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:02:49.234471Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:02:49.252865Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T15:02:49.253183Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T15:02:49.253412Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T15:02:49.258667Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T15:02:49.258930Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T15:02:49.259808Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T15:02:49.260106Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T15:02:49.266991Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T15:02:49.267841Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T15:02:49.278032Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T15:02:49.278108Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T15:02:49.278393Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T15:02:49.278446Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T15:02:49.278498Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T15:02:49.278572Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T15:02:49.285123Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T15:02:49.424759Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T15:02:49.425000Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:02:49.425206Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T15:02:49.425251Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T15:02:49.425469Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T15:02:49.425539Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:02:49.427556Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T15:02:49.427733Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T15:02:49.427949Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:02:49.428004Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T15:02:49.428063Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T15:02:49.428113Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T15:02:49.429851Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:02:49.429906Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T15:02:49.429952Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T15:02:49.431464Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:02:49.431511Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:02:49.431566Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T15:02:49.431621Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T15:02:49.440777Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T15:02:49.442707Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T15:02:49.442866Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T15:02:49.443581Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T15:02:49.443707Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T15:02:49.443759Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T15:02:49.444032Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T15:02:49.444097Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T15:02:49.444272Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T15:02:49.444362Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T15:02:49.446464Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T15:02:49.446510Z node 1 :FLAT_TX_SCHEMESHARD ... node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T15:02:56.458943Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T15:02:56.459046Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 8589936750 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T15:02:56.459085Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T15:02:56.459314Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T15:02:56.459363Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T15:02:56.459506Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T15:02:56.459566Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T15:02:56.461061Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T15:02:56.461103Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T15:02:56.461267Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T15:02:56.461308Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [2:208:2208], at schemeshard: 72057594046678944, txId: 1, path id: 1 2025-06-25T15:02:56.461631Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:02:56.461672Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 1:0 ProgressState 2025-06-25T15:02:56.461765Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1:0 progress is 1/1 2025-06-25T15:02:56.461795Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-25T15:02:56.461835Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#1:0 progress is 1/1 2025-06-25T15:02:56.461869Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-25T15:02:56.461902Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 1, ready parts: 1/1, is published: false 2025-06-25T15:02:56.461939Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-25T15:02:56.461971Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 1:0 2025-06-25T15:02:56.461999Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 1:0 2025-06-25T15:02:56.462065Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-25T15:02:56.462105Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 1, publications: 1, subscribers: 0 2025-06-25T15:02:56.462136Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 1, [OwnerId: 72057594046678944, LocalPathId: 1], 3 2025-06-25T15:02:56.462550Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-06-25T15:02:56.462630Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-06-25T15:02:56.462668Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1 2025-06-25T15:02:56.462702Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 3 2025-06-25T15:02:56.462736Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T15:02:56.462817Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1, subscribers: 0 2025-06-25T15:02:56.464996Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1 2025-06-25T15:02:56.465367Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1, at schemeshard: 72057594046678944 TestModificationResults wait txId: 101 2025-06-25T15:02:56.466050Z node 2 :TX_PROXY DEBUG: proxy_impl.cpp:434: actor# [2:271:2260] Bootstrap 2025-06-25T15:02:56.485248Z node 2 :TX_PROXY DEBUG: proxy_impl.cpp:453: actor# [2:271:2260] Become StateWork (SchemeCache [2:276:2265]) 2025-06-25T15:02:56.487922Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateColumnTable CreateColumnTable { Name: "TestTable" Schema { Columns { Name: "Id" Type: "Int32" NotNull: true } Columns { Name: "mess age" Type: "Utf8" } KeyColumnNames: "Id" } } } TxId: 101 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T15:02:56.488252Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: create_table.cpp:593: TCreateColumnTable Propose, path: /MyRoot/TestTable, opId: 101:0, at schemeshard: 72057594046678944 2025-06-25T15:02:56.488480Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 101:1, propose status:StatusSchemeError, reason: Invalid name for column 'mess age', at schemeshard: 72057594046678944 2025-06-25T15:02:56.489344Z node 2 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [2:271:2260] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-25T15:02:56.491460Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 101, response: Status: StatusSchemeError Reason: "Invalid name for column \'mess age\'" TxId: 101 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T15:02:56.491669Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 101, database: /MyRoot, subject: , status: StatusSchemeError, reason: Invalid name for column 'mess age', operation: CREATE COLUMN TABLE, path: /MyRoot/ 2025-06-25T15:02:56.492619Z node 2 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 101 2025-06-25T15:02:56.492802Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2025-06-25T15:02:56.492843Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 2025-06-25T15:02:56.493164Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-06-25T15:02:56.493244Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-25T15:02:56.493282Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [2:286:2275] TestWaitNotification: OK eventTxId 101 TestModificationResults wait txId: 102 2025-06-25T15:02:56.495906Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateColumnTable CreateColumnTable { Name: "TestTable" Schema { Columns { Name: "Id" Type: "Int32" NotNull: true } Columns { Name: "~!@#$%^&*()+=asdfa" Type: "Utf8" } KeyColumnNames: "Id" } } } TxId: 102 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T15:02:56.496151Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: create_table.cpp:593: TCreateColumnTable Propose, path: /MyRoot/TestTable, opId: 102:0, at schemeshard: 72057594046678944 2025-06-25T15:02:56.496333Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 102:1, propose status:StatusSchemeError, reason: Invalid name for column '~!@#$%^&*()+=asdfa', at schemeshard: 72057594046678944 2025-06-25T15:02:56.498267Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 102, response: Status: StatusSchemeError Reason: "Invalid name for column \'~!@#$%^&*()+=asdfa\'" TxId: 102 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T15:02:56.498428Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 102, database: /MyRoot, subject: , status: StatusSchemeError, reason: Invalid name for column '~!@#$%^&*()+=asdfa', operation: CREATE COLUMN TABLE, path: /MyRoot/ TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 102 2025-06-25T15:02:56.498656Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-06-25T15:02:56.498689Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 2025-06-25T15:02:56.498972Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-06-25T15:02:56.499048Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-25T15:02:56.499074Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [2:293:2282] TestWaitNotification: OK eventTxId 102 >> DataShardReadIteratorConsistency::WriteLockThenUncommittedReadUpgradeRetryAndRestart [GOOD] >> DataShardReadIteratorConsistency::WriteLockThenUncommittedReadUpgradeRestartWithStateMigrationRetryAndRestartWithoutStateMigration >> PersQueueSdkReadSessionTest::SettingsValidation [GOOD] >> PersQueueSdkReadSessionTest::ClosesAfterFailedConnectionToCds |92.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> TTxDataShardMiniKQL::ReadAfterWrite [GOOD] >> TTxDataShardMiniKQL::ReadNonExisting |92.8%| [TA] $(B)/ydb/core/mind/address_classification/ut/test-results/unittest/{meta.json ... results_accumulator.log} |92.8%| [TA] {RESULT} $(B)/ydb/core/mind/address_classification/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TNodeBrokerTest::Test999NodesSubscribers [GOOD] >> DataShardReadIterator::ShouldReadRangePrefix2 [GOOD] >> DataShardReadIterator::ShouldReadRangePrefix3 >> TTxDataShardMiniKQL::ReadNonExisting [GOOD] >> DataShardReadIterator::ShouldLimitRead10RangesChunk99Limit900 [GOOD] >> DataShardReadIterator::ShouldLimitRead10RangesChunk100Limit900 >> TTxDataShardMiniKQL::WriteValueTooLarge [GOOD] >> TTxDataShardMiniKQL::WriteLargeExternalBlob |92.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> KqpAnalyze::AnalyzeTable+ColumnStore [FAIL] >> KqpAnalyze::AnalyzeTable-ColumnStore ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::Test999NodesSubscribers [GOOD] Test command err: 2025-06-25T15:00:46.980930Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:00:46.980974Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) >> TOlap::CreateDropStandaloneTableDefaultSharding [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_minikql/unittest >> TTxDataShardMiniKQL::ReadNonExisting [GOOD] Test command err: 2025-06-25T15:02:56.432548Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:02:56.432596Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:02:56.436820Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:112:2142], Recipient [1:135:2156]: NKikimr::TEvTablet::TEvBoot 2025-06-25T15:02:56.448489Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:112:2142], Recipient [1:135:2156]: NKikimr::TEvTablet::TEvRestored 2025-06-25T15:02:56.448923Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 9437184 actor [1:135:2156] 2025-06-25T15:02:56.450867Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T15:02:56.462909Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:112:2142], Recipient [1:135:2156]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-25T15:02:56.507818Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T15:02:56.507998Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T15:02:56.509691Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 9437184 2025-06-25T15:02:56.509765Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 9437184 2025-06-25T15:02:56.509820Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 9437184 2025-06-25T15:02:56.510204Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T15:02:56.510305Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T15:02:56.510369Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 9437184 persisting started state actor id [1:204:2156] in generation 2 2025-06-25T15:02:56.577025Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T15:02:56.617165Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 9437184 2025-06-25T15:02:56.617352Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 9437184 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T15:02:56.617476Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 9437184, actorId: [1:219:2215] 2025-06-25T15:02:56.617522Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 9437184 2025-06-25T15:02:56.617554Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 9437184, state: WaitScheme 2025-06-25T15:02:56.617584Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:02:56.617796Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:135:2156], Recipient [1:135:2156]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T15:02:56.617839Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T15:02:56.618116Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 9437184 2025-06-25T15:02:56.618222Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 9437184 2025-06-25T15:02:56.618289Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-25T15:02:56.618329Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:02:56.618373Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 9437184 2025-06-25T15:02:56.618407Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437184 has no attached operations 2025-06-25T15:02:56.618452Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 9437184 2025-06-25T15:02:56.618493Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 9437184 TxInFly 0 2025-06-25T15:02:56.618531Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-25T15:02:56.618621Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:215:2212], Recipient [1:135:2156]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T15:02:56.618661Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T15:02:56.618708Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:213:2211], serverId# [1:215:2212], sessionId# [0:0:0] 2025-06-25T15:02:56.626031Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:103:2136], Recipient [1:135:2156]: NKikimrTxDataShard.TEvProposeTransaction TxKind: TX_KIND_SCHEME SourceDeprecated { RawX1: 103 RawX2: 4294969432 } TxBody: "\nI\n\006table1\020\r\032\t\n\003key\030\002 \"\032\014\n\005value\030\200$ 8\032\n\n\004uint\030\002 9(\":\010Z\006\010\000\030\000(\000J\014/Root/table1" TxId: 1 ExecLevel: 0 Flags: 0 SchemeShardId: 4200 ProcessingParams { } 2025-06-25T15:02:56.626101Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-25T15:02:56.626206Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-25T15:02:56.626387Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit CheckSchemeTx 2025-06-25T15:02:56.626444Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 9437184 txId 1 ssId 4200 seqNo 0:0 2025-06-25T15:02:56.626524Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 1 at tablet 9437184 2025-06-25T15:02:56.626571Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is ExecutedNoMoreRestarts 2025-06-25T15:02:56.626604Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit CheckSchemeTx 2025-06-25T15:02:56.626635Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit StoreSchemeTx 2025-06-25T15:02:56.626666Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit StoreSchemeTx 2025-06-25T15:02:56.626992Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayCompleteNoMoreRestarts 2025-06-25T15:02:56.627024Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit StoreSchemeTx 2025-06-25T15:02:56.627059Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit FinishPropose 2025-06-25T15:02:56.627095Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit FinishPropose 2025-06-25T15:02:56.627150Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayComplete 2025-06-25T15:02:56.627180Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit FinishPropose 2025-06-25T15:02:56.627218Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit WaitForPlan 2025-06-25T15:02:56.627264Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit WaitForPlan 2025-06-25T15:02:56.627292Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:1] at 9437184 is not ready to execute on unit WaitForPlan 2025-06-25T15:02:56.639563Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 9437184 2025-06-25T15:02:56.639624Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit StoreSchemeTx 2025-06-25T15:02:56.639660Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit FinishPropose 2025-06-25T15:02:56.639704Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 1 at tablet 9437184 send to client, exec latency: 0 ms, propose latency: 1 ms, status: PREPARED 2025-06-25T15:02:56.639778Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 9437184 not sending time cast registration request in state WaitScheme 2025-06-25T15:02:56.640201Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:225:2221], Recipient [1:135:2156]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T15:02:56.640257Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T15:02:56.640303Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:224:2220], serverId# [1:225:2221], sessionId# [0:0:0] 2025-06-25T15:02:56.640455Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287424, Sender [1:103:2136], Recipient [1:135:2156]: {TEvPlanStep step# 1000001 MediatorId# 0 TabletID 9437184} 2025-06-25T15:02:56.640501Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3150: StateWork, processing event TEvTxProcessing::TEvPlanStep 2025-06-25T15:02:56.640662Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1790: Trying to execute [1000001:1] at 9437184 on unit WaitForPlan 2025-06-25T15:02:56.640702Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1805: Execution status for [1000001:1] at 9437184 is Executed 2025-06-25T15:02:56.640737Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000001:1] at 9437184 executing on unit WaitForPlan 2025-06-25T15:02:56.640770Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000001:1] at 9437184 to execution unit PlanQueue 2025-06-25T15:02:56.644084Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 1 at step 1000001 at tablet 9437184 { Transactions { TxId: 1 AckTo { RawX1: 103 RawX2: 4294969432 } } Step: 1000001 MediatorID: 0 TabletID: 9437184 } 2025-06-25T15:02:56.644144Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:02:56.644579Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:135:2156], Recipient [1:135:2156]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T15:02:56.644628Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T15:02:56.644681Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-25T15:02:56.644731Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 1 2025-06-25T15:02:56.644760Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437184 2025-06-25T15:02:56.644795Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000001:1] in PlanQueue unit at 9437184 2025-06-25T15:02:56.644825Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [100000 ... oordinators count is 1 buckets per mediator 2 2025-06-25T15:02:58.437655Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 9437184, actorId: [3:287:2269] 2025-06-25T15:02:58.437717Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 9437184 2025-06-25T15:02:58.437768Z node 3 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 9437184 2025-06-25T15:02:58.437807Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:02:58.438104Z node 3 :TX_DATASHARD DEBUG: datashard__init.cpp:711: TxInitSchemaDefaults.Execute 2025-06-25T15:02:58.438217Z node 3 :TX_DATASHARD DEBUG: datashard__init.cpp:723: TxInitSchemaDefaults.Complete 2025-06-25T15:02:58.438401Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [3:239:2230], Recipient [3:239:2230]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T15:02:58.438453Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T15:02:58.438732Z node 3 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 9437184 2025-06-25T15:02:58.438830Z node 3 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 9437184 2025-06-25T15:02:58.438969Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:5607: Got TEvDataShard::TEvSchemaChanged for unknown txId 1 message# Source { RawX1: 239 RawX2: 12884904118 } Origin: 9437184 State: 2 TxId: 1 Step: 0 Generation: 3 2025-06-25T15:02:58.439054Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 270270976, Sender [3:25:2072], Recipient [3:239:2230]: {TEvRegisterTabletResult TabletId# 9437184 Entry# 0} 2025-06-25T15:02:58.439094Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3170: StateWork, processing event TEvMediatorTimecast::TEvRegisterTabletResult 2025-06-25T15:02:58.439131Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 9437184 time 0 2025-06-25T15:02:58.439189Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:02:58.439285Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 270270978, Sender [3:25:2072], Recipient [3:239:2230]: NKikimr::TEvMediatorTimecast::TEvSubscribeReadStepResult{ CoordinatorId# 72057594046316545 LastReadStep# 0 NextReadStep# 0 ReadStep# 0 } 2025-06-25T15:02:58.439321Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3171: StateWork, processing event TEvMediatorTimecast::TEvSubscribeReadStepResult 2025-06-25T15:02:58.439361Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 9437184 coordinator 72057594046316545 last step 0 next step 0 2025-06-25T15:02:58.439433Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-25T15:02:58.439476Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:02:58.439517Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 9437184 2025-06-25T15:02:58.439559Z node 3 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437184 has no attached operations 2025-06-25T15:02:58.439593Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 9437184 2025-06-25T15:02:58.439629Z node 3 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 9437184 TxInFly 0 2025-06-25T15:02:58.439674Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-25T15:02:58.439803Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877760, Sender [3:285:2267], Recipient [3:239:2230]: NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 4200 Status: OK ServerId: [3:289:2271] Leader: 1 Dead: 0 Generation: 2 VersionInfo: } 2025-06-25T15:02:58.439849Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3165: StateWork, processing event TEvTabletPipe::TEvClientConnected 2025-06-25T15:02:58.439941Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269552132, Sender [3:127:2151], Recipient [3:239:2230]: NKikimrTxDataShard.TEvSchemaChangedResult TxId: 1 2025-06-25T15:02:58.439972Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3136: StateWork, processing event TEvDataShard::TEvSchemaChangedResult 2025-06-25T15:02:58.440011Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 1 datashard 9437184 state Ready 2025-06-25T15:02:58.440071Z node 3 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 9437184 Got TEvSchemaChangedResult from SS at 9437184 2025-06-25T15:02:58.452421Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877763, Sender [3:285:2267], Recipient [3:239:2230]: NKikimr::TEvTabletPipe::TEvClientDestroyed { TabletId: 4200 ClientId: [3:285:2267] ServerId: [3:289:2271] } 2025-06-25T15:02:58.452486Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3166: StateWork, processing event TEvTabletPipe::TEvClientDestroyed 2025-06-25T15:02:58.510912Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269551617, Sender [3:103:2136], Recipient [3:239:2230]: NKikimrTxDataShard.TEvGetShardState Source { RawX1: 103 RawX2: 12884904024 } 2025-06-25T15:02:58.510993Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3135: StateWork, processing event TEvDataShard::TEvGetShardState 2025-06-25T15:02:58.511246Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [3:295:2275], Recipient [3:239:2230]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T15:02:58.511282Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T15:02:58.511330Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [3:293:2274], serverId# [3:295:2275], sessionId# [0:0:0] 2025-06-25T15:02:58.511543Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [3:103:2136], Recipient [3:239:2230]: NKikimrTxDataShard.TEvProposeTransaction TxKind: TX_KIND_DATA SourceDeprecated { RawX1: 103 RawX2: 12884904024 } TxBody: "\032\365\001\037\004\0021\nvalue\005\205\n\205\002\207\205\002\207\203\001H\006\002\205\004\205\002?\006\002\205\000\034MyReads MyWrites\205\004\205\002?\006\002\206\202\024Reply\024Write?\014\205\002\206\203\010\002 AllReads\030MyKeys\014Run4ShardsForRead4ShardsToWrite\005?\010)\211\n?\006\203\005\004\200\205\002\203\004\006\213\002\203\004\203\004$SelectRow\000\003?\036 h\020\000\000\000\000\000\000\r\000\000\000\000\000\000\000?\004\005?\"\003? p\001\013?&\003?$T\001\003?(\000\037\002\000\005?\016\005?\n?8\000\005?\014\003\005?\024\005?\020?8\000\006\000?\022\003?>\005?\032\006\000?\030\001\037/ \0018\001" TxId: 2 ExecLevel: 0 Flags: 0 2025-06-25T15:02:58.511579Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-25T15:02:58.511678Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-25T15:02:58.512463Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:2] at 9437184 on unit CheckDataTx 2025-06-25T15:02:58.512589Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 9437184 is Executed 2025-06-25T15:02:58.512639Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:2] at 9437184 executing on unit CheckDataTx 2025-06-25T15:02:58.512676Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:2] at 9437184 to execution unit BuildAndWaitDependencies 2025-06-25T15:02:58.512715Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:2] at 9437184 on unit BuildAndWaitDependencies 2025-06-25T15:02:58.512757Z node 3 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 9437184 CompleteEdge# v1000001/1 IncompleteEdge# v{min} UnprotectedReadEdge# v{min} ImmediateWriteEdge# v{min} ImmediateWriteEdgeReplied# v{min} 2025-06-25T15:02:58.512822Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:2] at 9437184 2025-06-25T15:02:58.512865Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 9437184 is Executed 2025-06-25T15:02:58.512889Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:2] at 9437184 executing on unit BuildAndWaitDependencies 2025-06-25T15:02:58.512911Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:2] at 9437184 to execution unit ExecuteDataTx 2025-06-25T15:02:58.512936Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:2] at 9437184 on unit ExecuteDataTx 2025-06-25T15:02:58.513350Z node 3 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:306: Executed operation [0:2] at tablet 9437184 with status COMPLETE 2025-06-25T15:02:58.513420Z node 3 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:312: Datashard execution counters for [0:2] at 9437184: {NSelectRow: 1, NSelectRange: 0, NUpdateRow: 0, NEraseRow: 0, SelectRowRows: 0, SelectRowBytes: 0, SelectRangeRows: 0, SelectRangeBytes: 0, UpdateRowBytes: 0, EraseRowBytes: 0, SelectRangeDeletedRowSkips: 0, InvisibleRowSkips: 0} 2025-06-25T15:02:58.513481Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 9437184 is Executed 2025-06-25T15:02:58.513506Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:2] at 9437184 executing on unit ExecuteDataTx 2025-06-25T15:02:58.513530Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:2] at 9437184 to execution unit FinishPropose 2025-06-25T15:02:58.513556Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:2] at 9437184 on unit FinishPropose 2025-06-25T15:02:58.513596Z node 3 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 2 at tablet 9437184 send to client, exec latency: 0 ms, propose latency: 0 ms, status: COMPLETE 2025-06-25T15:02:58.513668Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 9437184 is DelayComplete 2025-06-25T15:02:58.513697Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:2] at 9437184 executing on unit FinishPropose 2025-06-25T15:02:58.513738Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:2] at 9437184 to execution unit CompletedOperations 2025-06-25T15:02:58.513795Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:2] at 9437184 on unit CompletedOperations 2025-06-25T15:02:58.513845Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 9437184 is Executed 2025-06-25T15:02:58.513869Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:2] at 9437184 executing on unit CompletedOperations 2025-06-25T15:02:58.513894Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:2] at 9437184 has finished 2025-06-25T15:02:58.513954Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 9437184 2025-06-25T15:02:58.514001Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:2] at 9437184 on unit FinishPropose 2025-06-25T15:02:58.514046Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 >> test_s3.py::TestYdbS3TTL::test_s3[table_index_2__SYNC-pk_types2-all_types2-index2---SYNC] >> KqpLimits::WaitCAsStateOnAbort [GOOD] >> KqpLimits::WaitCAsTimeout >> DataShardReadIterator::ShouldReadHeadFromFollower [GOOD] >> DataShardReadIterator::ShouldReadFromHead |92.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_olap/unittest >> TOlap::CreateDropStandaloneTableDefaultSharding [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T15:02:49.147836Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T15:02:49.147918Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T15:02:49.147951Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T15:02:49.147993Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T15:02:49.148340Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T15:02:49.148377Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T15:02:49.148459Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T15:02:49.148523Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T15:02:49.149193Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T15:02:49.150779Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T15:02:49.236978Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:02:49.237032Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:02:49.255938Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T15:02:49.256274Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T15:02:49.256446Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T15:02:49.268927Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T15:02:49.269272Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T15:02:49.269955Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T15:02:49.270230Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T15:02:49.274306Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T15:02:49.274503Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T15:02:49.278065Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T15:02:49.278166Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T15:02:49.278338Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T15:02:49.278393Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T15:02:49.278441Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T15:02:49.278527Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T15:02:49.285123Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T15:02:49.407116Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T15:02:49.408692Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:02:49.409783Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T15:02:49.409841Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T15:02:49.411052Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T15:02:49.411155Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:02:49.414250Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T15:02:49.415247Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T15:02:49.415484Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:02:49.415543Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T15:02:49.415595Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T15:02:49.415631Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T15:02:49.417822Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:02:49.417886Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T15:02:49.417923Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T15:02:49.419575Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:02:49.419618Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:02:49.419677Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T15:02:49.419718Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T15:02:49.424435Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T15:02:49.426772Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T15:02:49.427881Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T15:02:49.428882Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T15:02:49.429008Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T15:02:49.429071Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T15:02:49.430177Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T15:02:49.430237Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T15:02:49.430447Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T15:02:49.430549Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T15:02:49.432463Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T15:02:49.432504Z node 1 :FLAT_TX_SCHEMESHARD ... :180: Close pipe to deleted shardIdx 72057594046678944:59 tabletId 72075186233409604 2025-06-25T15:02:58.816722Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:6 2025-06-25T15:02:58.816766Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:6 tabletId 72075186233409551 2025-06-25T15:02:58.816862Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-06-25T15:02:58.816890Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:2 tabletId 72075186233409547 2025-06-25T15:02:58.816974Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:4 2025-06-25T15:02:58.816999Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:4 tabletId 72075186233409549 2025-06-25T15:02:58.817078Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:23 2025-06-25T15:02:58.817101Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:23 tabletId 72075186233409568 2025-06-25T15:02:58.817202Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:21 2025-06-25T15:02:58.817226Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:21 tabletId 72075186233409566 2025-06-25T15:02:58.817309Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:19 2025-06-25T15:02:58.817333Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:19 tabletId 72075186233409564 2025-06-25T15:02:58.817386Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:17 2025-06-25T15:02:58.817410Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:17 tabletId 72075186233409562 2025-06-25T15:02:58.817484Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:15 2025-06-25T15:02:58.817506Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:15 tabletId 72075186233409560 2025-06-25T15:02:58.818409Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:13 2025-06-25T15:02:58.818442Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:13 tabletId 72075186233409558 2025-06-25T15:02:58.818510Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:11 2025-06-25T15:02:58.818531Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:11 tabletId 72075186233409556 2025-06-25T15:02:58.818593Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:9 2025-06-25T15:02:58.818614Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:9 tabletId 72075186233409554 2025-06-25T15:02:58.818682Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:36 2025-06-25T15:02:58.818706Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:36 tabletId 72075186233409581 2025-06-25T15:02:58.818764Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:38 2025-06-25T15:02:58.818785Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:38 tabletId 72075186233409583 2025-06-25T15:02:58.818845Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:40 2025-06-25T15:02:58.818865Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:40 tabletId 72075186233409585 2025-06-25T15:02:58.818927Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:32 2025-06-25T15:02:58.818948Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:32 tabletId 72075186233409577 2025-06-25T15:02:58.822547Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:34 2025-06-25T15:02:58.822591Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:34 tabletId 72075186233409579 2025-06-25T15:02:58.822673Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:28 2025-06-25T15:02:58.822694Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:28 tabletId 72075186233409573 2025-06-25T15:02:58.822741Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:30 2025-06-25T15:02:58.822759Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:30 tabletId 72075186233409575 2025-06-25T15:02:58.822809Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:26 2025-06-25T15:02:58.822828Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:26 tabletId 72075186233409571 2025-06-25T15:02:58.822878Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:24 2025-06-25T15:02:58.822895Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:24 tabletId 72075186233409569 2025-06-25T15:02:58.822946Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:57 2025-06-25T15:02:58.822965Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:57 tabletId 72075186233409602 2025-06-25T15:02:58.823014Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:53 2025-06-25T15:02:58.823033Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:53 tabletId 72075186233409598 2025-06-25T15:02:58.823082Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:55 2025-06-25T15:02:58.823101Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:55 tabletId 72075186233409600 2025-06-25T15:02:58.826670Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:49 2025-06-25T15:02:58.826717Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:49 tabletId 72075186233409594 2025-06-25T15:02:58.826810Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:47 2025-06-25T15:02:58.826832Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:47 tabletId 72075186233409592 2025-06-25T15:02:58.826896Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:51 2025-06-25T15:02:58.826916Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:51 tabletId 72075186233409596 2025-06-25T15:02:58.826974Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:45 2025-06-25T15:02:58.826995Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:45 tabletId 72075186233409590 2025-06-25T15:02:58.827055Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:43 2025-06-25T15:02:58.827077Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:43 tabletId 72075186233409588 2025-06-25T15:02:58.827135Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:41 2025-06-25T15:02:58.827185Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:41 tabletId 72075186233409586 2025-06-25T15:02:58.827306Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 109 2025-06-25T15:02:58.828291Z node 3 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/MyDir/ColumnTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T15:02:58.828529Z node 3 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/MyDir/ColumnTable" took 278us result status StatusPathDoesNotExist 2025-06-25T15:02:58.828688Z node 3 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/MyDir/ColumnTable\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot/MyDir\' (id: [OwnerId: 72057594046678944, LocalPathId: 2])" Path: "/MyRoot/MyDir/ColumnTable" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot/MyDir" LastExistedPrefixPathId: 2 LastExistedPrefixDescription { Self { Name: "MyDir" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-25T15:02:58.829455Z node 3 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: PathId: 6 SchemeshardId: 72057594046678944 Options { }, at schemeshard: 72057594046678944 2025-06-25T15:02:58.829551Z node 3 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:44: Tablet 72057594046678944 describe pathId 6 took 170us result status StatusPathDoesNotExist 2025-06-25T15:02:58.829627Z node 3 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'\', error: path is empty" Path: "" PathId: 6 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> AsyncIndexChangeCollector::UpsertSingleRow >> AsyncIndexChangeCollector::InsertSingleRow >> CdcStreamChangeCollector::UpsertManyRows >> CdcStreamChangeCollector::InsertSingleRow >> DataShardReadIterator::ShouldFailReadNextAfterSchemeChangeExhausted [GOOD] >> DataShardReadIterator::NoErrorOnFinalACK >> AsyncIndexChangeCollector::DeleteNothing >> DataShardReadIterator::ShouldReadRangeChunk1 [GOOD] >> DataShardReadIterator::ShouldReadRangeChunk2 >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadRangeInvisibleRowSkips2+EvWrite [GOOD] >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadRangeInvisibleRowSkips2-EvWrite >> TTxDataShardMiniKQL::WriteLargeExternalBlob [GOOD] >> DataShardSnapshots::ShardRestartLockNotBrokenByUncommittedAfterRead+UseSink [GOOD] >> DataShardSnapshots::ShardRestartLockNotBrokenByUncommittedAfterRead-UseSink |92.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> DataShardReadIterator::TryWriteManyRows-Commit [GOOD] >> DataShardReadIteratorBatchMode::RangeFull ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_minikql/unittest >> TTxDataShardMiniKQL::WriteLargeExternalBlob [GOOD] Test command err: 2025-06-25T15:02:56.431747Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:02:56.431810Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:02:56.434440Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:112:2142], Recipient [1:135:2156]: NKikimr::TEvTablet::TEvBoot 2025-06-25T15:02:56.448577Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:112:2142], Recipient [1:135:2156]: NKikimr::TEvTablet::TEvRestored 2025-06-25T15:02:56.449121Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 9437184 actor [1:135:2156] 2025-06-25T15:02:56.450859Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T15:02:56.462922Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:112:2142], Recipient [1:135:2156]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-25T15:02:56.503010Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T15:02:56.503196Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T15:02:56.505940Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 9437184 2025-06-25T15:02:56.506028Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 9437184 2025-06-25T15:02:56.506095Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 9437184 2025-06-25T15:02:56.508105Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T15:02:56.508208Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T15:02:56.508282Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 9437184 persisting started state actor id [1:204:2156] in generation 2 2025-06-25T15:02:56.579047Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T15:02:56.609172Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 9437184 2025-06-25T15:02:56.610716Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 9437184 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T15:02:56.610853Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 9437184, actorId: [1:219:2215] 2025-06-25T15:02:56.610904Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 9437184 2025-06-25T15:02:56.610941Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 9437184, state: WaitScheme 2025-06-25T15:02:56.610977Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:02:56.611226Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:135:2156], Recipient [1:135:2156]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T15:02:56.611277Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T15:02:56.612483Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 9437184 2025-06-25T15:02:56.612594Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 9437184 2025-06-25T15:02:56.612650Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-25T15:02:56.612688Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:02:56.612794Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 9437184 2025-06-25T15:02:56.612830Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437184 has no attached operations 2025-06-25T15:02:56.612896Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 9437184 2025-06-25T15:02:56.612932Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 9437184 TxInFly 0 2025-06-25T15:02:56.612976Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-25T15:02:56.613063Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:215:2212], Recipient [1:135:2156]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T15:02:56.613131Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T15:02:56.613179Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:213:2211], serverId# [1:215:2212], sessionId# [0:0:0] 2025-06-25T15:02:56.617575Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:103:2136], Recipient [1:135:2156]: NKikimrTxDataShard.TEvProposeTransaction TxKind: TX_KIND_SCHEME SourceDeprecated { RawX1: 103 RawX2: 4294969432 } TxBody: "\nY\n\006table2\032\n\n\004key1\030\002 \"\032\013\n\004key2\030\200$ #\032\014\n\005value\030\200$ 8(\"(#:\010Z\006\010\000\030\000(\000J\014/Root/table2\222\002\013\th\020\000\000\000\000\000\000\020\016" TxId: 1 ExecLevel: 0 Flags: 0 SchemeShardId: 4200 ProcessingParams { } 2025-06-25T15:02:56.617666Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-25T15:02:56.617765Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-25T15:02:56.618046Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit CheckSchemeTx 2025-06-25T15:02:56.618107Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 9437184 txId 1 ssId 4200 seqNo 0:0 2025-06-25T15:02:56.618181Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 1 at tablet 9437184 2025-06-25T15:02:56.618316Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is ExecutedNoMoreRestarts 2025-06-25T15:02:56.618358Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit CheckSchemeTx 2025-06-25T15:02:56.618405Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit StoreSchemeTx 2025-06-25T15:02:56.618439Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit StoreSchemeTx 2025-06-25T15:02:56.618740Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayCompleteNoMoreRestarts 2025-06-25T15:02:56.618780Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit StoreSchemeTx 2025-06-25T15:02:56.618818Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit FinishPropose 2025-06-25T15:02:56.618851Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit FinishPropose 2025-06-25T15:02:56.618916Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayComplete 2025-06-25T15:02:56.618953Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit FinishPropose 2025-06-25T15:02:56.618987Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit WaitForPlan 2025-06-25T15:02:56.619017Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit WaitForPlan 2025-06-25T15:02:56.619042Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:1] at 9437184 is not ready to execute on unit WaitForPlan 2025-06-25T15:02:56.631293Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 9437184 2025-06-25T15:02:56.631363Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit StoreSchemeTx 2025-06-25T15:02:56.631397Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit FinishPropose 2025-06-25T15:02:56.631450Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 1 at tablet 9437184 send to client, exec latency: 0 ms, propose latency: 1 ms, status: PREPARED 2025-06-25T15:02:56.633329Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 9437184 not sending time cast registration request in state WaitScheme 2025-06-25T15:02:56.636818Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:225:2221], Recipient [1:135:2156]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T15:02:56.636887Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T15:02:56.636939Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:224:2220], serverId# [1:225:2221], sessionId# [0:0:0] 2025-06-25T15:02:56.637090Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287424, Sender [1:103:2136], Recipient [1:135:2156]: {TEvPlanStep step# 1000001 MediatorId# 0 TabletID 9437184} 2025-06-25T15:02:56.637130Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3150: StateWork, processing event TEvTxProcessing::TEvPlanStep 2025-06-25T15:02:56.637284Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1790: Trying to execute [1000001:1] at 9437184 on unit WaitForPlan 2025-06-25T15:02:56.637332Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1805: Execution status for [1000001:1] at 9437184 is Executed 2025-06-25T15:02:56.637371Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000001:1] at 9437184 executing on unit WaitForPlan 2025-06-25T15:02:56.637422Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000001:1] at 9437184 to execution unit PlanQueue 2025-06-25T15:02:56.641049Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 1 at step 1000001 at tablet 9437184 { Transactions { TxId: 1 AckTo { RawX1: 103 RawX2: 4294969432 } } Step: 1000001 MediatorID: 0 TabletID: 9437184 } 2025-06-25T15:02:56.641111Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:02:56.641322Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:135:2156], Recipient [1:135:2156]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T15:02:56.641366Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T15:02:56.641428Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-25T15:02:56.641468Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 1 2025-06-25T15:02:56.641503Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437184 2025-06-25T15:02:56.641552Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000001:1] in PlanQueue unit at 9437184 2025-06-25T15:02:56.641614Z node 1 :TX_DATASHARD TRACE: dat ... s: 0, InvisibleRowSkips: 0} 2025-06-25T15:02:59.924492Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 9437184 is ExecutedNoMoreRestarts 2025-06-25T15:02:59.924543Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:2] at 9437184 executing on unit ExecuteDataTx 2025-06-25T15:02:59.924604Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:2] at 9437184 to execution unit FinishPropose 2025-06-25T15:02:59.924653Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:2] at 9437184 on unit FinishPropose 2025-06-25T15:02:59.924755Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 9437184 is DelayCompleteNoMoreRestarts 2025-06-25T15:02:59.924797Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:2] at 9437184 executing on unit FinishPropose 2025-06-25T15:02:59.924846Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:2] at 9437184 to execution unit CompletedOperations 2025-06-25T15:02:59.924888Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:2] at 9437184 on unit CompletedOperations 2025-06-25T15:02:59.924953Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 9437184 is Executed 2025-06-25T15:02:59.924989Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:2] at 9437184 executing on unit CompletedOperations 2025-06-25T15:02:59.925032Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:2] at 9437184 has finished 2025-06-25T15:02:59.950668Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 9437184 2025-06-25T15:02:59.950735Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:2] at 9437184 on unit FinishPropose 2025-06-25T15:02:59.950781Z node 3 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 2 at tablet 9437184 send to client, exec latency: 0 ms, propose latency: 5 ms, status: COMPLETE 2025-06-25T15:02:59.950864Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:03:00.729438Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269551617, Sender [3:103:2136], Recipient [3:239:2230]: NKikimrTxDataShard.TEvGetShardState Source { RawX1: 103 RawX2: 12884904024 } 2025-06-25T15:03:00.729540Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3135: StateWork, processing event TEvDataShard::TEvGetShardState 2025-06-25T15:03:00.730050Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [3:304:2283], Recipient [3:239:2230]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T15:03:00.730096Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T15:03:00.730153Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [3:303:2282], serverId# [3:304:2283], sessionId# [0:0:0] 2025-06-25T15:03:00.898505Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [3:103:2136], Recipient [3:239:2230]: NKikimrTxDataShard.TEvProposeTransaction TxKind: TX_KIND_DATA SourceDeprecated { RawX1: 103 RawX2: 12884904024 } TxBody: "\032\332\201\200\010\037\000\005\205\n\205\000\205\004?\000\205\002\202\0041\034MyReads MyWrites\205\004?\000\206\202\024Reply\024Write?\000?\000 AllReads\030MyKeys\014Run4ShardsForRead4ShardsToWrite\005?\000\005?\004?\014\005?\002)\211\006\202\203\005\004\213\004\203\004\203\001H\205\002\203\001H\01056$UpdateRow\000\003?\016 h\020\000\000\000\000\000\000\016\000\000\000\000\000\000\000\013?\024\003?\020\251\003\003?\022\006bar\003\005?\030\003?\026\007\000\000\000\001xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx 2025-06-25T15:03:00.900833Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-25T15:03:00.901011Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-25T15:03:00.954028Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:3] at 9437184 on unit CheckDataTx 2025-06-25T15:03:00.954157Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:3] at 9437184 is Executed 2025-06-25T15:03:00.954211Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:3] at 9437184 executing on unit CheckDataTx 2025-06-25T15:03:00.954257Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:3] at 9437184 to execution unit BuildAndWaitDependencies 2025-06-25T15:03:00.954294Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:3] at 9437184 on unit BuildAndWaitDependencies 2025-06-25T15:03:00.954344Z node 3 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 9437184 CompleteEdge# v1000001/1 IncompleteEdge# v{min} UnprotectedReadEdge# v{min} ImmediateWriteEdge# v1000001/18446744073709551615 ImmediateWriteEdgeReplied# v1000001/18446744073709551615 2025-06-25T15:03:00.954407Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:3] at 9437184 2025-06-25T15:03:00.954453Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:3] at 9437184 is Executed 2025-06-25T15:03:00.954478Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:3] at 9437184 executing on unit BuildAndWaitDependencies 2025-06-25T15:03:00.954500Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:3] at 9437184 to execution unit ExecuteDataTx 2025-06-25T15:03:00.954522Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:3] at 9437184 on unit ExecuteDataTx 2025-06-25T15:03:00.954574Z node 3 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 9437184 CompleteEdge# v1000001/1 IncompleteEdge# v{min} UnprotectedReadEdge# v{min} ImmediateWriteEdge# v1000001/18446744073709551615 ImmediateWriteEdgeReplied# v1000001/18446744073709551615 2025-06-25T15:03:00.954624Z node 3 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:124: Operation [0:3] at 9437184 requested 46269670 more memory 2025-06-25T15:03:00.954664Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:3] at 9437184 is Restart 2025-06-25T15:03:00.954808Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-25T15:03:00.954859Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:3] at 9437184 on unit ExecuteDataTx 2025-06-25T15:03:00.954909Z node 3 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 9437184 CompleteEdge# v1000001/1 IncompleteEdge# v{min} UnprotectedReadEdge# v{min} ImmediateWriteEdge# v1000001/18446744073709551615 ImmediateWriteEdgeReplied# v1000001/18446744073709551615 2025-06-25T15:03:01.001408Z node 3 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:175: Operation [0:3] at 9437184 exceeded memory limit 50463974 and requests 403711792 more for the next try 2025-06-25T15:03:01.004942Z node 3 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:561: tx 3 released its data 2025-06-25T15:03:01.005030Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:3] at 9437184 is Restart 2025-06-25T15:03:01.005393Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-25T15:03:01.005440Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:3] at 9437184 on unit ExecuteDataTx 2025-06-25T15:03:01.061954Z node 3 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:661: tx 3 at 9437184 restored its data 2025-06-25T15:03:01.062042Z node 3 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 9437184 CompleteEdge# v1000001/1 IncompleteEdge# v{min} UnprotectedReadEdge# v{min} ImmediateWriteEdge# v1000001/18446744073709551615 ImmediateWriteEdgeReplied# v1000001/18446744073709551615 2025-06-25T15:03:01.145880Z node 3 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:306: Executed operation [0:3] at tablet 9437184 with status COMPLETE 2025-06-25T15:03:01.145994Z node 3 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:312: Datashard execution counters for [0:3] at 9437184: {NSelectRow: 0, NSelectRange: 0, NUpdateRow: 1, NEraseRow: 0, SelectRowRows: 0, SelectRowBytes: 0, SelectRangeRows: 0, SelectRangeBytes: 0, UpdateRowBytes: 16777223, EraseRowBytes: 0, SelectRangeDeletedRowSkips: 0, InvisibleRowSkips: 0} 2025-06-25T15:03:01.146079Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:3] at 9437184 is ExecutedNoMoreRestarts 2025-06-25T15:03:01.146119Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:3] at 9437184 executing on unit ExecuteDataTx 2025-06-25T15:03:01.146160Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:3] at 9437184 to execution unit FinishPropose 2025-06-25T15:03:01.146199Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:3] at 9437184 on unit FinishPropose 2025-06-25T15:03:01.146245Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:3] at 9437184 is DelayComplete 2025-06-25T15:03:01.146275Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:3] at 9437184 executing on unit FinishPropose 2025-06-25T15:03:01.146314Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:3] at 9437184 to execution unit CompletedOperations 2025-06-25T15:03:01.146347Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:3] at 9437184 on unit CompletedOperations 2025-06-25T15:03:01.146399Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:3] at 9437184 is Executed 2025-06-25T15:03:01.146429Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:3] at 9437184 executing on unit CompletedOperations 2025-06-25T15:03:01.146469Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:3] at 9437184 has finished 2025-06-25T15:03:01.221674Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 9437184 2025-06-25T15:03:01.221747Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:3] at 9437184 on unit FinishPropose 2025-06-25T15:03:01.221802Z node 3 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 3 at tablet 9437184 send to client, exec latency: 0 ms, propose latency: 2 ms, status: COMPLETE 2025-06-25T15:03:01.221902Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:03:01.276970Z node 3 :TX_DATASHARD DEBUG: datashard__compaction.cpp:203: CompactionComplete of tablet# 9437184, table# 1001, finished edge# 0, ts 1970-01-01T00:00:00.000000Z 2025-06-25T15:03:01.277041Z node 3 :TX_DATASHARD DEBUG: datashard__compaction.cpp:240: ReplyCompactionWaiters of tablet# 9437184, table# 1001, finished edge# 0, front# 0 2025-06-25T15:03:01.283153Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268828683, Sender [3:236:2229], Recipient [3:239:2230]: NKikimr::TEvTablet::TEvFollowerGcApplied >> AsyncIndexChangeCollector::UpsertToSameKey >> DataShardSnapshots::MvccSnapshotLockedWritesWithReadConflicts [GOOD] >> DataShardSnapshots::LockedWritesLimitedPerKey+UseSink >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_1__ASYNC-pk_types10-all_types10-index10---ASYNC] >> test_s3.py::TestYdbS3TTL::test_s3[table_ttl_Uint64-pk_types10-all_types10-index10-Uint64--] >> DataShardTxOrder::RandomPoints_DelayRS_Reboot_Dirty [GOOD] >> DataShardTxOrder::RandomPoints_DelayRS_Reboot [GOOD] >> KqpStats::SysViewCancelled [GOOD] >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadKeyPrefixRightBorder+EvWrite [GOOD] >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadKeyPrefixRightBorder-EvWrite >> test_sql_streaming.py::test[suites-ReadWriteSameTopic-default.txt] [FAIL] >> test_sql_streaming.py::test[suites-ReadWriteTopic-default.txt] |92.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_sql_streaming.py::test[suites-ReadTopicWithMetadataInsideFilter-default.txt] [FAIL] >> test_sql_streaming.py::test[suites-ReadTopicWithMetadataNestedDeep-default.txt] >> KqpAnalyze::AnalyzeTable-ColumnStore [GOOD] >> KqpExplain::AggGroupLimit >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_index_0__SYNC-pk_types4-all_types4-index4---SYNC] >> test_sql_streaming.py::test[suites-GroupByHopByStringKey-default.txt] [FAIL] >> test_sql_streaming.py::test[suites-GroupByHopExprKey-default.txt] >> DataShardReadIteratorConsistency::WriteLockThenUncommittedReadUpgradeRestartWithStateMigrationRetryAndRestartWithoutStateMigration [GOOD] >> DataShardReadIteratorFastCancel::ShouldProcessFastCancel >> test_sql_streaming.py::test[suites-GroupByHopWithDataWatermarks-default.txt] [FAIL] >> test_sql_streaming.py::test[suites-GroupByHoppingWithDataWatermarks-default.txt] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_order/unittest >> DataShardTxOrder::RandomPoints_DelayRS_Reboot_Dirty [GOOD] Test command err: 2025-06-25T15:01:44.101062Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:01:44.101151Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:01:44.102271Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:112:2142], Recipient [1:135:2156]: NKikimr::TEvTablet::TEvBoot 2025-06-25T15:01:44.110852Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:112:2142], Recipient [1:135:2156]: NKikimr::TEvTablet::TEvRestored 2025-06-25T15:01:44.111186Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 9437184 actor [1:135:2156] 2025-06-25T15:01:44.111435Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T15:01:44.119649Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:112:2142], Recipient [1:135:2156]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-25T15:01:44.164435Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T15:01:44.164598Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T15:01:44.166137Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 9437184 2025-06-25T15:01:44.166209Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 9437184 2025-06-25T15:01:44.166254Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 9437184 2025-06-25T15:01:44.166580Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T15:01:44.166654Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T15:01:44.166709Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 9437184 persisting started state actor id [1:204:2156] in generation 2 2025-06-25T15:01:44.237417Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T15:01:44.275543Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 9437184 2025-06-25T15:01:44.275742Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 9437184 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T15:01:44.275854Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 9437184, actorId: [1:219:2215] 2025-06-25T15:01:44.275899Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 9437184 2025-06-25T15:01:44.275937Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 9437184, state: WaitScheme 2025-06-25T15:01:44.275967Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:01:44.276186Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:135:2156], Recipient [1:135:2156]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:44.276246Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:44.276536Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 9437184 2025-06-25T15:01:44.276632Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 9437184 2025-06-25T15:01:44.276693Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-25T15:01:44.276726Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:01:44.276766Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 9437184 2025-06-25T15:01:44.276796Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437184 has no attached operations 2025-06-25T15:01:44.276840Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 9437184 2025-06-25T15:01:44.276868Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 9437184 TxInFly 0 2025-06-25T15:01:44.276905Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-25T15:01:44.276994Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:215:2212], Recipient [1:135:2156]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T15:01:44.277034Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T15:01:44.277080Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:213:2211], serverId# [1:215:2212], sessionId# [0:0:0] 2025-06-25T15:01:44.279754Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:103:2136], Recipient [1:135:2156]: NKikimrTxDataShard.TEvProposeTransaction TxKind: TX_KIND_SCHEME SourceDeprecated { RawX1: 103 RawX2: 4294969432 } TxBody: "\nI\n\006table1\020\r\032\t\n\003key\030\002 \"\032\014\n\005value\030\200$ 8\032\n\n\004uint\030\002 9(\":\010Z\006\010\010\030\001(\001J\014/Root/table1" TxId: 1 ExecLevel: 0 Flags: 0 SchemeShardId: 4200 ProcessingParams { } 2025-06-25T15:01:44.279817Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-25T15:01:44.279886Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-25T15:01:44.280057Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit CheckSchemeTx 2025-06-25T15:01:44.280121Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 9437184 txId 1 ssId 4200 seqNo 0:0 2025-06-25T15:01:44.280169Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 1 at tablet 9437184 2025-06-25T15:01:44.280232Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is ExecutedNoMoreRestarts 2025-06-25T15:01:44.280266Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit CheckSchemeTx 2025-06-25T15:01:44.280296Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit StoreSchemeTx 2025-06-25T15:01:44.280345Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit StoreSchemeTx 2025-06-25T15:01:44.280666Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayCompleteNoMoreRestarts 2025-06-25T15:01:44.280705Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit StoreSchemeTx 2025-06-25T15:01:44.280745Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit FinishPropose 2025-06-25T15:01:44.280782Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit FinishPropose 2025-06-25T15:01:44.280838Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayComplete 2025-06-25T15:01:44.280874Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit FinishPropose 2025-06-25T15:01:44.280913Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit WaitForPlan 2025-06-25T15:01:44.280940Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit WaitForPlan 2025-06-25T15:01:44.280961Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:1] at 9437184 is not ready to execute on unit WaitForPlan 2025-06-25T15:01:44.297257Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 9437184 2025-06-25T15:01:44.297329Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit StoreSchemeTx 2025-06-25T15:01:44.297388Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit FinishPropose 2025-06-25T15:01:44.297445Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 1 at tablet 9437184 send to client, exec latency: 0 ms, propose latency: 1 ms, status: PREPARED 2025-06-25T15:01:44.297500Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 9437184 not sending time cast registration request in state WaitScheme 2025-06-25T15:01:44.298019Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:225:2221], Recipient [1:135:2156]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T15:01:44.298075Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T15:01:44.298124Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:224:2220], serverId# [1:225:2221], sessionId# [0:0:0] 2025-06-25T15:01:44.298258Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287424, Sender [1:103:2136], Recipient [1:135:2156]: {TEvPlanStep step# 1000001 MediatorId# 0 TabletID 9437184} 2025-06-25T15:01:44.298290Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3150: StateWork, processing event TEvTxProcessing::TEvPlanStep 2025-06-25T15:01:44.298423Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1790: Trying to execute [1000001:1] at 9437184 on unit WaitForPlan 2025-06-25T15:01:44.298461Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1805: Execution status for [1000001:1] at 9437184 is Executed 2025-06-25T15:01:44.298496Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000001:1] at 9437184 executing on unit WaitForPlan 2025-06-25T15:01:44.298527Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000001:1] at 9437184 to execution unit PlanQueue 2025-06-25T15:01:44.302877Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 1 at step 1000001 at tablet 9437184 { Transactions { TxId: 1 AckTo { RawX1: 103 RawX2: 4294969432 } } Step: 1000001 MediatorID: 0 TabletID: 9437184 } 2025-06-25T15:01:44.302964Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:01:44.303202Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:135:2156], Recipient [1:135:2156]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:44.303248Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:44.303303Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-25T15:01:44.303338Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 1 2025-06-25T15:01:44.303366Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437184 2025-06-25T15:01:44.303400Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000001:1] in PlanQueue unit at 9437184 2025-06-25T15:01:44.303430Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [100000 ... :03.024261Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000004:26] at 9437184 on unit CompleteOperation 2025-06-25T15:03:03.024304Z node 32 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000004 : 26] from 9437184 at tablet 9437184 send result to client [32:103:2136], exec latency: 0 ms, propose latency: 1 ms 2025-06-25T15:03:03.024359Z node 32 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:03:03.024542Z node 32 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-25T15:03:03.024578Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000004:27] at 9437184 on unit CompleteOperation 2025-06-25T15:03:03.024628Z node 32 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000004 : 27] from 9437184 at tablet 9437184 send result to client [32:103:2136], exec latency: 0 ms, propose latency: 1 ms 2025-06-25T15:03:03.024664Z node 32 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:03:03.024892Z node 32 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-25T15:03:03.024925Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000004:28] at 9437184 on unit CompleteOperation 2025-06-25T15:03:03.024972Z node 32 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000004 : 28] from 9437184 at tablet 9437184 send result to client [32:103:2136], exec latency: 0 ms, propose latency: 1 ms 2025-06-25T15:03:03.025009Z node 32 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:03:03.025253Z node 32 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-25T15:03:03.025291Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000004:29] at 9437184 on unit CompleteOperation 2025-06-25T15:03:03.025338Z node 32 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000004 : 29] from 9437184 at tablet 9437184 send result to client [32:103:2136], exec latency: 0 ms, propose latency: 1 ms 2025-06-25T15:03:03.025373Z node 32 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:03:03.025616Z node 32 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-25T15:03:03.025650Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000004:30] at 9437184 on unit CompleteOperation 2025-06-25T15:03:03.025695Z node 32 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000004 : 30] from 9437184 at tablet 9437184 send result to client [32:103:2136], exec latency: 0 ms, propose latency: 1 ms 2025-06-25T15:03:03.025729Z node 32 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:03:03.025979Z node 32 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-25T15:03:03.026019Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000004:31] at 9437184 on unit CompleteOperation 2025-06-25T15:03:03.026065Z node 32 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000004 : 31] from 9437184 at tablet 9437184 send result to client [32:103:2136], exec latency: 0 ms, propose latency: 1 ms 2025-06-25T15:03:03.026100Z node 32 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:03:03.026311Z node 32 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-25T15:03:03.026347Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000004:32] at 9437184 on unit CompleteOperation 2025-06-25T15:03:03.026395Z node 32 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000004 : 32] from 9437184 at tablet 9437184 send result to client [32:103:2136], exec latency: 0 ms, propose latency: 1 ms 2025-06-25T15:03:03.026434Z node 32 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:03:03.026641Z node 32 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-25T15:03:03.026677Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000004:33] at 9437184 on unit CompleteOperation 2025-06-25T15:03:03.026725Z node 32 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000004 : 33] from 9437184 at tablet 9437184 send result to client [32:103:2136], exec latency: 0 ms, propose latency: 1 ms 2025-06-25T15:03:03.026759Z node 32 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:03:03.026913Z node 32 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-25T15:03:03.026943Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000004:34] at 9437184 on unit CompleteOperation 2025-06-25T15:03:03.026985Z node 32 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000004 : 34] from 9437184 at tablet 9437184 send result to client [32:103:2136], exec latency: 0 ms, propose latency: 1 ms 2025-06-25T15:03:03.027020Z node 32 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:03:03.027186Z node 32 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-25T15:03:03.027221Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000004:35] at 9437184 on unit CompleteOperation 2025-06-25T15:03:03.027264Z node 32 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000004 : 35] from 9437184 at tablet 9437184 send result to client [32:103:2136], exec latency: 0 ms, propose latency: 1 ms 2025-06-25T15:03:03.027298Z node 32 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:03:03.027485Z node 32 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-25T15:03:03.027518Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000004:36] at 9437184 on unit CompleteOperation 2025-06-25T15:03:03.027560Z node 32 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000004 : 36] from 9437184 at tablet 9437184 send result to client [32:103:2136], exec latency: 0 ms, propose latency: 1 ms 2025-06-25T15:03:03.027594Z node 32 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:03:03.027802Z node 32 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-25T15:03:03.027836Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000004:37] at 9437184 on unit CompleteOperation 2025-06-25T15:03:03.027882Z node 32 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000004 : 37] from 9437184 at tablet 9437184 send result to client [32:103:2136], exec latency: 0 ms, propose latency: 1 ms 2025-06-25T15:03:03.027919Z node 32 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:03:03.028212Z node 32 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [32:808:2731], Recipient [32:351:2316]: {TEvReadSet step# 1000004 txid# 5 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 2} 2025-06-25T15:03:03.028265Z node 32 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:03:03.028323Z node 32 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 5 2025-06-25T15:03:03.028468Z node 32 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [32:808:2731], Recipient [32:351:2316]: {TEvReadSet step# 1000004 txid# 6 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 3} 2025-06-25T15:03:03.028503Z node 32 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:03:03.028537Z node 32 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 6 2025-06-25T15:03:03.028631Z node 32 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [32:808:2731], Recipient [32:351:2316]: {TEvReadSet step# 1000004 txid# 7 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 4} 2025-06-25T15:03:03.028667Z node 32 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:03:03.028702Z node 32 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 7 2025-06-25T15:03:03.028792Z node 32 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [32:808:2731], Recipient [32:351:2316]: {TEvReadSet step# 1000004 txid# 8 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 5} 2025-06-25T15:03:03.028825Z node 32 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:03:03.028858Z node 32 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 8 2025-06-25T15:03:03.028946Z node 32 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [32:808:2731], Recipient [32:351:2316]: {TEvReadSet step# 1000004 txid# 9 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 6} 2025-06-25T15:03:03.028978Z node 32 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:03:03.029009Z node 32 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 9 2025-06-25T15:03:03.029100Z node 32 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [32:808:2731], Recipient [32:351:2316]: {TEvReadSet step# 1000004 txid# 12 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 8} 2025-06-25T15:03:03.029133Z node 32 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:03:03.029164Z node 32 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 12 2025-06-25T15:03:03.029251Z node 32 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [32:808:2731], Recipient [32:351:2316]: {TEvReadSet step# 1000004 txid# 13 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 9} 2025-06-25T15:03:03.029284Z node 32 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:03:03.029314Z node 32 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 13 expect 27 30 28 31 26 30 31 30 30 30 27 24 25 19 31 21 31 27 25 27 27 27 18 - - 22 22 - 18 - - - actual 27 30 28 31 26 30 31 30 30 30 27 24 25 19 31 21 31 27 25 27 27 27 18 - - 22 22 - 18 - - - interm - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_order/unittest >> DataShardTxOrder::RandomPoints_DelayRS_Reboot [GOOD] Test command err: 2025-06-25T15:01:44.649790Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:01:44.649835Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:01:44.650715Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:112:2142], Recipient [1:135:2156]: NKikimr::TEvTablet::TEvBoot 2025-06-25T15:01:44.658847Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:112:2142], Recipient [1:135:2156]: NKikimr::TEvTablet::TEvRestored 2025-06-25T15:01:44.659252Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 9437184 actor [1:135:2156] 2025-06-25T15:01:44.659479Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T15:01:44.668206Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:112:2142], Recipient [1:135:2156]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-25T15:01:44.712090Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T15:01:44.712266Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T15:01:44.713861Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 9437184 2025-06-25T15:01:44.713926Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 9437184 2025-06-25T15:01:44.713975Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 9437184 2025-06-25T15:01:44.714293Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T15:01:44.714373Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T15:01:44.714430Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 9437184 persisting started state actor id [1:204:2156] in generation 2 2025-06-25T15:01:44.785062Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T15:01:44.821941Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 9437184 2025-06-25T15:01:44.822133Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 9437184 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T15:01:44.822233Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 9437184, actorId: [1:219:2215] 2025-06-25T15:01:44.822282Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 9437184 2025-06-25T15:01:44.822315Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 9437184, state: WaitScheme 2025-06-25T15:01:44.822349Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:01:44.822541Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:135:2156], Recipient [1:135:2156]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:44.822591Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:44.822871Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 9437184 2025-06-25T15:01:44.822966Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 9437184 2025-06-25T15:01:44.823020Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-25T15:01:44.823075Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:01:44.823135Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 9437184 2025-06-25T15:01:44.823167Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437184 has no attached operations 2025-06-25T15:01:44.823215Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 9437184 2025-06-25T15:01:44.823247Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 9437184 TxInFly 0 2025-06-25T15:01:44.823286Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-25T15:01:44.823361Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:215:2212], Recipient [1:135:2156]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T15:01:44.823391Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T15:01:44.823439Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:213:2211], serverId# [1:215:2212], sessionId# [0:0:0] 2025-06-25T15:01:44.826231Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:103:2136], Recipient [1:135:2156]: NKikimrTxDataShard.TEvProposeTransaction TxKind: TX_KIND_SCHEME SourceDeprecated { RawX1: 103 RawX2: 4294969432 } TxBody: "\nI\n\006table1\020\r\032\t\n\003key\030\002 \"\032\014\n\005value\030\200$ 8\032\n\n\004uint\030\002 9(\":\010Z\006\010\010\030\001(\000J\014/Root/table1" TxId: 1 ExecLevel: 0 Flags: 0 SchemeShardId: 4200 ProcessingParams { } 2025-06-25T15:01:44.826301Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-25T15:01:44.826387Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-25T15:01:44.826558Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit CheckSchemeTx 2025-06-25T15:01:44.826617Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 9437184 txId 1 ssId 4200 seqNo 0:0 2025-06-25T15:01:44.826663Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 1 at tablet 9437184 2025-06-25T15:01:44.826709Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is ExecutedNoMoreRestarts 2025-06-25T15:01:44.826737Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit CheckSchemeTx 2025-06-25T15:01:44.826766Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit StoreSchemeTx 2025-06-25T15:01:44.826794Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit StoreSchemeTx 2025-06-25T15:01:44.827090Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayCompleteNoMoreRestarts 2025-06-25T15:01:44.827129Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit StoreSchemeTx 2025-06-25T15:01:44.827158Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit FinishPropose 2025-06-25T15:01:44.827208Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit FinishPropose 2025-06-25T15:01:44.827268Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayComplete 2025-06-25T15:01:44.827316Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit FinishPropose 2025-06-25T15:01:44.827351Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit WaitForPlan 2025-06-25T15:01:44.827378Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit WaitForPlan 2025-06-25T15:01:44.827400Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:1] at 9437184 is not ready to execute on unit WaitForPlan 2025-06-25T15:01:44.839448Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 9437184 2025-06-25T15:01:44.839514Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit StoreSchemeTx 2025-06-25T15:01:44.839554Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit FinishPropose 2025-06-25T15:01:44.839604Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 1 at tablet 9437184 send to client, exec latency: 0 ms, propose latency: 1 ms, status: PREPARED 2025-06-25T15:01:44.839672Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 9437184 not sending time cast registration request in state WaitScheme 2025-06-25T15:01:44.840109Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:225:2221], Recipient [1:135:2156]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T15:01:44.840152Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T15:01:44.840198Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:224:2220], serverId# [1:225:2221], sessionId# [0:0:0] 2025-06-25T15:01:44.840348Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287424, Sender [1:103:2136], Recipient [1:135:2156]: {TEvPlanStep step# 1000001 MediatorId# 0 TabletID 9437184} 2025-06-25T15:01:44.840376Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3150: StateWork, processing event TEvTxProcessing::TEvPlanStep 2025-06-25T15:01:44.840533Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1790: Trying to execute [1000001:1] at 9437184 on unit WaitForPlan 2025-06-25T15:01:44.840574Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1805: Execution status for [1000001:1] at 9437184 is Executed 2025-06-25T15:01:44.840603Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000001:1] at 9437184 executing on unit WaitForPlan 2025-06-25T15:01:44.840634Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000001:1] at 9437184 to execution unit PlanQueue 2025-06-25T15:01:44.844096Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 1 at step 1000001 at tablet 9437184 { Transactions { TxId: 1 AckTo { RawX1: 103 RawX2: 4294969432 } } Step: 1000001 MediatorID: 0 TabletID: 9437184 } 2025-06-25T15:01:44.844155Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:01:44.844341Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:135:2156], Recipient [1:135:2156]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:44.844377Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:44.844426Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-25T15:01:44.844464Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 1 2025-06-25T15:01:44.844492Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437184 2025-06-25T15:01:44.844538Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000001:1] in PlanQueue unit at 9437184 2025-06-25T15:01:44.844569Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [100000 ... xProgressTransaction::Complete at 9437184 2025-06-25T15:03:03.029054Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000004:27] at 9437184 on unit CompleteOperation 2025-06-25T15:03:03.029097Z node 32 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000004 : 27] from 9437184 at tablet 9437184 send result to client [32:103:2136], exec latency: 0 ms, propose latency: 1 ms 2025-06-25T15:03:03.029136Z node 32 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:03:03.029341Z node 32 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-25T15:03:03.029375Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000004:28] at 9437184 on unit CompleteOperation 2025-06-25T15:03:03.029422Z node 32 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000004 : 28] from 9437184 at tablet 9437184 send result to client [32:103:2136], exec latency: 0 ms, propose latency: 1 ms 2025-06-25T15:03:03.029462Z node 32 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:03:03.029658Z node 32 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-25T15:03:03.029692Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000004:29] at 9437184 on unit CompleteOperation 2025-06-25T15:03:03.029741Z node 32 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000004 : 29] from 9437184 at tablet 9437184 send result to client [32:103:2136], exec latency: 0 ms, propose latency: 1 ms 2025-06-25T15:03:03.029778Z node 32 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:03:03.029967Z node 32 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-25T15:03:03.030000Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000004:30] at 9437184 on unit CompleteOperation 2025-06-25T15:03:03.030043Z node 32 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000004 : 30] from 9437184 at tablet 9437184 send result to client [32:103:2136], exec latency: 0 ms, propose latency: 1 ms 2025-06-25T15:03:03.030079Z node 32 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:03:03.030278Z node 32 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-25T15:03:03.030311Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000004:31] at 9437184 on unit CompleteOperation 2025-06-25T15:03:03.030356Z node 32 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000004 : 31] from 9437184 at tablet 9437184 send result to client [32:103:2136], exec latency: 0 ms, propose latency: 1 ms 2025-06-25T15:03:03.030397Z node 32 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:03:03.030577Z node 32 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-25T15:03:03.030618Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000004:32] at 9437184 on unit CompleteOperation 2025-06-25T15:03:03.030670Z node 32 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000004 : 32] from 9437184 at tablet 9437184 send result to client [32:103:2136], exec latency: 0 ms, propose latency: 1 ms 2025-06-25T15:03:03.030712Z node 32 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:03:03.030957Z node 32 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-25T15:03:03.031001Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000004:33] at 9437184 on unit CompleteOperation 2025-06-25T15:03:03.031051Z node 32 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000004 : 33] from 9437184 at tablet 9437184 send result to client [32:103:2136], exec latency: 0 ms, propose latency: 1 ms 2025-06-25T15:03:03.031094Z node 32 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:03:03.031298Z node 32 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-25T15:03:03.031341Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000004:34] at 9437184 on unit CompleteOperation 2025-06-25T15:03:03.031393Z node 32 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000004 : 34] from 9437184 at tablet 9437184 send result to client [32:103:2136], exec latency: 0 ms, propose latency: 1 ms 2025-06-25T15:03:03.031435Z node 32 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:03:03.031580Z node 32 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-25T15:03:03.031612Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000004:35] at 9437184 on unit CompleteOperation 2025-06-25T15:03:03.031656Z node 32 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000004 : 35] from 9437184 at tablet 9437184 send result to client [32:103:2136], exec latency: 0 ms, propose latency: 1 ms 2025-06-25T15:03:03.031692Z node 32 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:03:03.031913Z node 32 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-25T15:03:03.031951Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000004:36] at 9437184 on unit CompleteOperation 2025-06-25T15:03:03.031994Z node 32 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000004 : 36] from 9437184 at tablet 9437184 send result to client [32:103:2136], exec latency: 0 ms, propose latency: 1 ms 2025-06-25T15:03:03.032033Z node 32 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:03:03.032203Z node 32 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-25T15:03:03.032235Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000004:37] at 9437184 on unit CompleteOperation 2025-06-25T15:03:03.032277Z node 32 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000004 : 37] from 9437184 at tablet 9437184 send result to client [32:103:2136], exec latency: 0 ms, propose latency: 1 ms 2025-06-25T15:03:03.032331Z node 32 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:03:03.032635Z node 32 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [32:809:2732], Recipient [32:351:2316]: {TEvReadSet step# 1000004 txid# 5 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 2} 2025-06-25T15:03:03.032683Z node 32 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:03:03.032722Z node 32 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 5 2025-06-25T15:03:03.032872Z node 32 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [32:809:2732], Recipient [32:351:2316]: {TEvReadSet step# 1000004 txid# 6 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 3} 2025-06-25T15:03:03.032910Z node 32 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:03:03.032942Z node 32 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 6 2025-06-25T15:03:03.033046Z node 32 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [32:809:2732], Recipient [32:351:2316]: {TEvReadSet step# 1000004 txid# 7 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 4} 2025-06-25T15:03:03.033084Z node 32 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:03:03.033117Z node 32 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 7 2025-06-25T15:03:03.033211Z node 32 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [32:809:2732], Recipient [32:351:2316]: {TEvReadSet step# 1000004 txid# 8 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 5} 2025-06-25T15:03:03.033245Z node 32 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:03:03.033274Z node 32 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 8 2025-06-25T15:03:03.033370Z node 32 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [32:809:2732], Recipient [32:351:2316]: {TEvReadSet step# 1000004 txid# 9 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 6} 2025-06-25T15:03:03.033405Z node 32 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:03:03.033438Z node 32 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 9 2025-06-25T15:03:03.033536Z node 32 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [32:809:2732], Recipient [32:351:2316]: {TEvReadSet step# 1000004 txid# 10 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 7} 2025-06-25T15:03:03.033572Z node 32 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:03:03.033602Z node 32 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 10 2025-06-25T15:03:03.033702Z node 32 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [32:809:2732], Recipient [32:351:2316]: {TEvReadSet step# 1000004 txid# 12 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 8} 2025-06-25T15:03:03.033742Z node 32 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:03:03.033778Z node 32 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 12 2025-06-25T15:03:03.033888Z node 32 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [32:809:2732], Recipient [32:351:2316]: {TEvReadSet step# 1000004 txid# 13 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 9} 2025-06-25T15:03:03.033940Z node 32 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:03:03.033976Z node 32 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 13 expect 25 26 31 27 28 30 30 30 28 25 27 25 27 30 28 30 31 - 18 10 21 27 30 - 10 9 - 9 0 - - - actual 25 26 31 27 28 30 30 30 28 25 27 25 27 30 28 30 31 - 18 10 21 27 30 - 10 9 - 9 0 - - - interm - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - >> KqpLimits::CancelAfterRwTx+useSink [GOOD] >> KqpLimits::CancelAfterRwTx-useSink >> DataShardReadIterator::ShouldLimitRead10RangesChunk100Limit900 [GOOD] >> DataShardReadIterator::ShouldLimitRead10RangesChunk100Limit1001 >> CdcStreamChangeCollector::InsertSingleRow [GOOD] >> CdcStreamChangeCollector::InsertSingleUuidRow >> AsyncIndexChangeCollector::UpsertSingleRow [GOOD] >> AsyncIndexChangeCollector::UpsertManyRows |92.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest |92.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest >> DataShardReadIterator::ShouldReadRangePrefix3 [GOOD] >> DataShardReadIterator::ShouldReadRangePrefix4 >> AsyncIndexChangeCollector::InsertSingleRow [GOOD] >> AsyncIndexChangeCollector::InsertManyRows >> CdcStreamChangeCollector::UpsertManyRows [GOOD] >> CdcStreamChangeCollector::UpsertToSameKey >> AsyncIndexChangeCollector::DeleteNothing [GOOD] >> AsyncIndexChangeCollector::DeleteSingleRow >> AnalyzeDatashard::AnalyzeTwoTables |92.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> DataShardSnapshots::VolatileSnapshotTimeoutRefresh [GOOD] >> DataShardSnapshots::VolatileSnapshotCleanupOnReboot >> DataShardSnapshots::LockedWriteWithAsyncIndex+WithRestart-UseSink [GOOD] >> DataShardSnapshots::LockedWriteWithAsyncIndex-WithRestart+UseSink >> DataShardReadIterator::ShouldReadFromHead [GOOD] >> DataShardReadIterator::ShouldReadFromHeadWithConflict+UseSink >> AsyncIndexChangeCollector::UpsertToSameKey [GOOD] >> AsyncIndexChangeCollector::UpsertWithoutIndexedValue >> DataShardTxOrder::RandomPoints_DelayRS [GOOD] |92.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest >> DataShardReadIterator::NoErrorOnFinalACK [GOOD] >> DataShardReadIterator::ShouldCancelMvccSnapshotFromFuture |92.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpStats::SysViewCancelled [GOOD] Test command err: Trying to start YDB, gRPC: 64834, MsgBus: 28199 2025-06-25T15:01:11.494684Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519902208894277890:2220];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:01:11.520840Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0013d6/r3tmp/tmpVzEkHu/pdisk_1.dat 2025-06-25T15:01:11.988934Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519902208894277708:2080] 1750863671483982 != 1750863671483985 2025-06-25T15:01:12.016411Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:01:12.029332Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:01:12.029454Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 64834, node 1 2025-06-25T15:01:12.031153Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:01:12.144848Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:01:12.144872Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:01:12.144879Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:01:12.144997Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28199 2025-06-25T15:01:12.492482Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:28199 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:01:12.899048Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:01:12.915770Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T15:01:12.928571Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:13.099907Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:13.293487Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:13.398095Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:15.307617Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902226074148537:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:15.307720Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:15.691313Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:15.747017Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:15.833343Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:15.885977Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:15.921935Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:15.960057Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:16.005734Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:16.084540Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902230369116494:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:16.084634Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:16.084866Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902230369116499:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:16.088984Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:01:16.116667Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519902230369116501:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T15:01:16.185674Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519902230369116552:3418] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:01:16.493880Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519902208894277890:2220];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:01:16.493945Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; {"Plan":{"Plans":[{"PlanNodeId":9,"Plans":[{"PlanNodeId":8,"Plans":[{"PlanNodeId":7,"Plans":[{"PlanNodeId":6,"Plans":[{"E-Size":"0","PlanNodeId":5,"LookupKeyColumns":["Key"],"Node Type":"TableLookup","Path":"\/Root\/TwoShard","Columns":["Key","Value1","Value2"],"E-Rows":"0","Table":"TwoShard","Plans":[{"PlanNodeId":4,"Plans":[{"Plan ... urityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-25T15:02:27.253931Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:02:27.277551Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:27.383972Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:02:27.395495Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:27.655373Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T15:02:27.753443Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:30.639658Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519902546170339458:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:30.639766Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:30.708146Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:30.784944Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:30.841298Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:30.879958Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:30.958101Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:31.005382Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:31.094656Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:31.196396Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519902550465307424:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:31.196483Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:31.196844Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519902550465307429:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:31.201016Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:02:31.211887Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519902550465307431:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T15:02:31.307360Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519902550465307482:3425] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:02:31.380412Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519902528990468659:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:02:31.380527Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T15:02:32.891555Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:41.491469Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7382: Cannot get console configs 2025-06-25T15:02:41.491502Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:03:01.352352Z node 4 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863781345, txId: 281474976715673] shutting down 2025-06-25T15:03:02.047885Z node 4 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [4:7519902679314327964:2704] TxId: 281474976715675. Ctx: { TraceId: 01jykst80gdv6qydznt1ax27g6, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=MzUyODJkLWRiYjk4MWRjLTJlOTRhNmYyLWY1NTk1ZmZl, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. CANCELLED: [ {
: Error: Request canceled after 100ms } {
: Error: Cancelling after 143ms during execution } ] 2025-06-25T15:03:02.063518Z node 4 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [4:7519902679314327994:2733], TxId: 281474976715675, task: 9. Ctx: { CustomerSuppliedId : . TraceId : 01jykst80gdv6qydznt1ax27g6. SessionId : ydb://session/3?node_id=4&id=MzUyODJkLWRiYjk4MWRjLTJlOTRhNmYyLWY1NTk1ZmZl. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Handle abort execution event from: [4:7519902679314327964:2704], status: CANCELLED, reason: {
: Error: Terminate execution } 2025-06-25T15:03:02.252389Z node 4 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [4:7519902679314327985:2725], TxId: 281474976715675, task: 1. Ctx: { SessionId : ydb://session/3?node_id=4&id=MzUyODJkLWRiYjk4MWRjLTJlOTRhNmYyLWY1NTk1ZmZl. TraceId : 01jykst80gdv6qydznt1ax27g6. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Handle abort execution event from: [4:7519902679314327964:2704], status: CANCELLED, reason: {
: Error: Terminate execution } 2025-06-25T15:03:02.252948Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=MzUyODJkLWRiYjk4MWRjLTJlOTRhNmYyLWY1NTk1ZmZl, ActorId: [4:7519902679314327899:2704], ActorState: ExecuteState, TraceId: 01jykst80gdv6qydznt1ax27g6, Create QueryResponse for error on request, msg: 2025-06-25T15:03:02.253944Z node 4 :TX_DATASHARD WARN: datashard__read_iterator.cpp:3439: 72075186224037922 Cancelled read: {[4:7519902679314327995:2725], 0}
: Error: Request canceled after 100ms
: Error: Cancelling after 143ms during execution 2025-06-25T15:03:02.490708Z node 4 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863782481, txId: 281474976715677] shutting down >> DataShardReadIterator::ShouldReadRangeChunk2 [GOOD] >> DataShardReadIterator::ShouldReadRangeChunk3 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_order/unittest >> DataShardTxOrder::RandomPoints_DelayRS [GOOD] Test command err: 2025-06-25T15:01:52.640867Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:01:52.640927Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:01:52.642167Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:112:2142], Recipient [1:135:2156]: NKikimr::TEvTablet::TEvBoot 2025-06-25T15:01:52.653418Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:112:2142], Recipient [1:135:2156]: NKikimr::TEvTablet::TEvRestored 2025-06-25T15:01:52.653828Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 9437184 actor [1:135:2156] 2025-06-25T15:01:52.654056Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T15:01:52.664419Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:112:2142], Recipient [1:135:2156]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-25T15:01:52.709200Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T15:01:52.709343Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T15:01:52.710822Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 9437184 2025-06-25T15:01:52.710886Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 9437184 2025-06-25T15:01:52.710999Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 9437184 2025-06-25T15:01:52.711292Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T15:01:52.711371Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T15:01:52.711429Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 9437184 persisting started state actor id [1:204:2156] in generation 2 2025-06-25T15:01:52.771266Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T15:01:52.813745Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 9437184 2025-06-25T15:01:52.813926Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 9437184 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T15:01:52.814031Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 9437184, actorId: [1:219:2215] 2025-06-25T15:01:52.814081Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 9437184 2025-06-25T15:01:52.814113Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 9437184, state: WaitScheme 2025-06-25T15:01:52.814142Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:01:52.814322Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:135:2156], Recipient [1:135:2156]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:52.814372Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:52.814594Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 9437184 2025-06-25T15:01:52.814719Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 9437184 2025-06-25T15:01:52.814779Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-25T15:01:52.814812Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:01:52.814852Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 9437184 2025-06-25T15:01:52.814882Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437184 has no attached operations 2025-06-25T15:01:52.814941Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 9437184 2025-06-25T15:01:52.814971Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 9437184 TxInFly 0 2025-06-25T15:01:52.815010Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-25T15:01:52.815116Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:215:2212], Recipient [1:135:2156]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T15:01:52.815144Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T15:01:52.815189Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:213:2211], serverId# [1:215:2212], sessionId# [0:0:0] 2025-06-25T15:01:52.817787Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:103:2136], Recipient [1:135:2156]: NKikimrTxDataShard.TEvProposeTransaction TxKind: TX_KIND_SCHEME SourceDeprecated { RawX1: 103 RawX2: 4294969432 } TxBody: "\nI\n\006table1\020\r\032\t\n\003key\030\002 \"\032\014\n\005value\030\200$ 8\032\n\n\004uint\030\002 9(\":\010Z\006\010\010\030\001(\000J\014/Root/table1" TxId: 1 ExecLevel: 0 Flags: 0 SchemeShardId: 4200 ProcessingParams { } 2025-06-25T15:01:52.817849Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-25T15:01:52.817931Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-25T15:01:52.818100Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit CheckSchemeTx 2025-06-25T15:01:52.818160Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 9437184 txId 1 ssId 4200 seqNo 0:0 2025-06-25T15:01:52.818211Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 1 at tablet 9437184 2025-06-25T15:01:52.818258Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is ExecutedNoMoreRestarts 2025-06-25T15:01:52.818289Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit CheckSchemeTx 2025-06-25T15:01:52.818318Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit StoreSchemeTx 2025-06-25T15:01:52.818348Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit StoreSchemeTx 2025-06-25T15:01:52.818632Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayCompleteNoMoreRestarts 2025-06-25T15:01:52.818669Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit StoreSchemeTx 2025-06-25T15:01:52.818697Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit FinishPropose 2025-06-25T15:01:52.818732Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit FinishPropose 2025-06-25T15:01:52.818791Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayComplete 2025-06-25T15:01:52.818825Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit FinishPropose 2025-06-25T15:01:52.818860Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit WaitForPlan 2025-06-25T15:01:52.818888Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit WaitForPlan 2025-06-25T15:01:52.818907Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:1] at 9437184 is not ready to execute on unit WaitForPlan 2025-06-25T15:01:52.831729Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 9437184 2025-06-25T15:01:52.831809Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit StoreSchemeTx 2025-06-25T15:01:52.831848Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit FinishPropose 2025-06-25T15:01:52.831902Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 1 at tablet 9437184 send to client, exec latency: 0 ms, propose latency: 1 ms, status: PREPARED 2025-06-25T15:01:52.831985Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 9437184 not sending time cast registration request in state WaitScheme 2025-06-25T15:01:52.832571Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:225:2221], Recipient [1:135:2156]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T15:01:52.832629Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T15:01:52.832679Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:224:2220], serverId# [1:225:2221], sessionId# [0:0:0] 2025-06-25T15:01:52.832830Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287424, Sender [1:103:2136], Recipient [1:135:2156]: {TEvPlanStep step# 1000001 MediatorId# 0 TabletID 9437184} 2025-06-25T15:01:52.832860Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3150: StateWork, processing event TEvTxProcessing::TEvPlanStep 2025-06-25T15:01:52.832996Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1790: Trying to execute [1000001:1] at 9437184 on unit WaitForPlan 2025-06-25T15:01:52.833038Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1805: Execution status for [1000001:1] at 9437184 is Executed 2025-06-25T15:01:52.833090Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000001:1] at 9437184 executing on unit WaitForPlan 2025-06-25T15:01:52.833128Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000001:1] at 9437184 to execution unit PlanQueue 2025-06-25T15:01:52.843132Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 1 at step 1000001 at tablet 9437184 { Transactions { TxId: 1 AckTo { RawX1: 103 RawX2: 4294969432 } } Step: 1000001 MediatorID: 0 TabletID: 9437184 } 2025-06-25T15:01:52.843224Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:01:52.843444Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:135:2156], Recipient [1:135:2156]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:52.843492Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:52.843554Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-25T15:01:52.843593Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 1 2025-06-25T15:01:52.843628Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437184 2025-06-25T15:01:52.843668Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000001:1] in PlanQueue unit at 9437184 2025-06-25T15:01:52.843703Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [100000 ... HARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-25T15:03:06.640407Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000004:28] at 9437184 on unit CompleteOperation 2025-06-25T15:03:06.640454Z node 32 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000004 : 28] from 9437184 at tablet 9437184 send result to client [32:103:2136], exec latency: 0 ms, propose latency: 1 ms 2025-06-25T15:03:06.640489Z node 32 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:03:06.640697Z node 32 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-25T15:03:06.640728Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000004:29] at 9437184 on unit CompleteOperation 2025-06-25T15:03:06.640773Z node 32 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000004 : 29] from 9437184 at tablet 9437184 send result to client [32:103:2136], exec latency: 0 ms, propose latency: 1 ms 2025-06-25T15:03:06.640807Z node 32 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:03:06.640990Z node 32 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-25T15:03:06.641026Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000004:30] at 9437184 on unit CompleteOperation 2025-06-25T15:03:06.641067Z node 32 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000004 : 30] from 9437184 at tablet 9437184 send result to client [32:103:2136], exec latency: 0 ms, propose latency: 1 ms 2025-06-25T15:03:06.641100Z node 32 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:03:06.641241Z node 32 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-25T15:03:06.641273Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000004:31] at 9437184 on unit CompleteOperation 2025-06-25T15:03:06.641316Z node 32 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000004 : 31] from 9437184 at tablet 9437184 send result to client [32:103:2136], exec latency: 0 ms, propose latency: 1 ms 2025-06-25T15:03:06.641348Z node 32 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:03:06.641523Z node 32 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-25T15:03:06.641556Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000004:32] at 9437184 on unit CompleteOperation 2025-06-25T15:03:06.641599Z node 32 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000004 : 32] from 9437184 at tablet 9437184 send result to client [32:103:2136], exec latency: 0 ms, propose latency: 1 ms 2025-06-25T15:03:06.641633Z node 32 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:03:06.641796Z node 32 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-25T15:03:06.641828Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000004:33] at 9437184 on unit CompleteOperation 2025-06-25T15:03:06.641886Z node 32 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000004 : 33] from 9437184 at tablet 9437184 send result to client [32:103:2136], exec latency: 0 ms, propose latency: 1 ms 2025-06-25T15:03:06.641918Z node 32 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:03:06.642061Z node 32 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-25T15:03:06.642094Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000004:34] at 9437184 on unit CompleteOperation 2025-06-25T15:03:06.642137Z node 32 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000004 : 34] from 9437184 at tablet 9437184 send result to client [32:103:2136], exec latency: 0 ms, propose latency: 1 ms 2025-06-25T15:03:06.642169Z node 32 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:03:06.642354Z node 32 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-25T15:03:06.642384Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000004:35] at 9437184 on unit CompleteOperation 2025-06-25T15:03:06.642424Z node 32 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000004 : 35] from 9437184 at tablet 9437184 send result to client [32:103:2136], exec latency: 0 ms, propose latency: 1 ms 2025-06-25T15:03:06.642455Z node 32 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:03:06.642587Z node 32 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-25T15:03:06.642619Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000004:36] at 9437184 on unit CompleteOperation 2025-06-25T15:03:06.642666Z node 32 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000004 : 36] from 9437184 at tablet 9437184 send result to client [32:103:2136], exec latency: 0 ms, propose latency: 1 ms 2025-06-25T15:03:06.642696Z node 32 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:03:06.642858Z node 32 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-25T15:03:06.642890Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000004:37] at 9437184 on unit CompleteOperation 2025-06-25T15:03:06.642931Z node 32 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000004 : 37] from 9437184 at tablet 9437184 send result to client [32:103:2136], exec latency: 0 ms, propose latency: 1 ms 2025-06-25T15:03:06.642969Z node 32 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:03:06.643233Z node 32 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [32:239:2230], Recipient [32:351:2316]: {TEvReadSet step# 1000004 txid# 36 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 32} 2025-06-25T15:03:06.643282Z node 32 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:03:06.643322Z node 32 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 36 2025-06-25T15:03:06.643487Z node 32 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [32:239:2230], Recipient [32:351:2316]: {TEvReadSet step# 1000004 txid# 5 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 2} 2025-06-25T15:03:06.643521Z node 32 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:03:06.643552Z node 32 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 5 2025-06-25T15:03:06.643639Z node 32 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [32:239:2230], Recipient [32:351:2316]: {TEvReadSet step# 1000004 txid# 6 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 3} 2025-06-25T15:03:06.643672Z node 32 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:03:06.643703Z node 32 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 6 2025-06-25T15:03:06.643785Z node 32 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [32:239:2230], Recipient [32:351:2316]: {TEvReadSet step# 1000004 txid# 8 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 5} 2025-06-25T15:03:06.643816Z node 32 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:03:06.643851Z node 32 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 8 2025-06-25T15:03:06.643934Z node 32 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [32:239:2230], Recipient [32:351:2316]: {TEvReadSet step# 1000004 txid# 9 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 6} 2025-06-25T15:03:06.643965Z node 32 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:03:06.643998Z node 32 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 9 2025-06-25T15:03:06.644084Z node 32 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [32:239:2230], Recipient [32:351:2316]: {TEvReadSet step# 1000004 txid# 10 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 7} 2025-06-25T15:03:06.644119Z node 32 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:03:06.644149Z node 32 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 10 2025-06-25T15:03:06.644280Z node 32 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [32:239:2230], Recipient [32:351:2316]: {TEvReadSet step# 1000004 txid# 12 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 8} 2025-06-25T15:03:06.644327Z node 32 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:03:06.644370Z node 32 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 12 2025-06-25T15:03:06.644486Z node 32 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [32:239:2230], Recipient [32:351:2316]: {TEvReadSet step# 1000004 txid# 13 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 9} 2025-06-25T15:03:06.644519Z node 32 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:03:06.644548Z node 32 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 13 2025-06-25T15:03:06.644610Z node 32 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [32:239:2230], Recipient [32:351:2316]: {TEvReadSet step# 1000004 txid# 14 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 10} 2025-06-25T15:03:06.644641Z node 32 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:03:06.644673Z node 32 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 14 expect 28 24 31 26 28 27 17 31 31 21 19 29 26 24 5 21 29 23 24 29 21 19 - - 21 0 21 - 29 - - - actual 28 24 31 26 28 27 17 31 31 21 19 29 26 24 5 21 29 23 24 29 21 19 - - 21 0 21 - 29 - - - interm 4 5 6 0 4 6 2 - 2 0 6 0 6 5 5 1 - - - - 0 - - - - 0 - - - - - - >> DataShardReadIteratorBatchMode::RangeFull [GOOD] >> DataShardReadIteratorBatchMode::RangeFromInclusive >> ReadIteratorExternalBlobs::ExtBlobs [GOOD] >> ReadIteratorExternalBlobs::ExtBlobsWithSpecificKeys >> test_s3.py::TestYdbS3TTL::test_s3[table_index_1__SYNC-pk_types3-all_types3-index3---SYNC] >> DataShardSnapshots::ShardRestartLockNotBrokenByUncommittedAfterRead-UseSink [GOOD] >> DataShardSnapshots::ShardRestartLockBrokenByUncommittedAfterRead+UseSink |92.8%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadRangeInvisibleRowSkips2-EvWrite [GOOD] >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadRangeLeftBorder+EvWrite |92.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest |92.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> AsyncIndexChangeCollector::InsertManyRows [GOOD] >> AsyncIndexChangeCollector::MultiIndexedTableInsertSingleRow >> AsyncIndexChangeCollector::UpsertManyRows [GOOD] >> AsyncIndexChangeCollector::MultiIndexedTableUpdateOneIndexedColumn >> AsyncIndexChangeCollector::DeleteSingleRow [GOOD] >> AsyncIndexChangeCollector::IndexedPrimaryKeyDeleteSingleRow >> DataShardSnapshots::LockedWritesLimitedPerKey+UseSink [GOOD] >> DataShardSnapshots::LockedWritesLimitedPerKey-UseSink >> CdcStreamChangeCollector::UpsertToSameKey [GOOD] >> CdcStreamChangeCollector::UpsertToSameKeyWithImages >> KqpExplain::AggGroupLimit [GOOD] >> KqpExplain::ComplexJoin >> CdcStreamChangeCollector::InsertSingleUuidRow [GOOD] >> CdcStreamChangeCollector::IndexAndStreamUpsert |92.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> AnalyzeColumnshard::AnalyzeTwoColumnTables >> TraverseColumnShard::TraverseServerlessColumnTable >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadKeyPrefixRightBorder-EvWrite [GOOD] >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadKeyWithContinue+EvWrite >> AsyncIndexChangeCollector::UpsertWithoutIndexedValue [GOOD] >> CdcStreamChangeCollector::DeleteNothing >> test_s3.py::TestYdbS3TTL::test_s3[table_index_3__SYNC-pk_types1-all_types1-index1---SYNC] >> PersQueueSdkReadSessionTest::ClosesAfterFailedConnectionToCds [GOOD] >> DataShardReadIteratorFastCancel::ShouldProcessFastCancel [GOOD] >> DataShardReadIteratorLatency::ReadSplitLatency >> test_s3.py::TestYdbS3TTL::test_s3[table_ttl_DyNumber-pk_types8-all_types8-index8-DyNumber--] >> DataShardReadIterator::ShouldLimitRead10RangesChunk100Limit1001 [GOOD] >> DataShardReadIterator::ShouldReadFromFollower >> DataShardReadIterator::ShouldReadRangePrefix4 [GOOD] >> DataShardReadIterator::ShouldReadRangePrefix5 ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/persqueue_public/ut/with_offset_ranges_mode_ut/unittest >> PersQueueSdkReadSessionTest::ClosesAfterFailedConnectionToCds [GOOD] Test command err: 2025-06-25T15:01:27.207369Z :TestReorderedExecutor INFO: Random seed for debugging is 1750863687207346 2025-06-25T15:01:27.666348Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519902275545941900:2144];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:01:27.667310Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T15:01:27.746782Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519902279083815713:2200];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:01:27.749853Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00179d/r3tmp/tmpCmCXqd/pdisk_1.dat 2025-06-25T15:01:27.988780Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-25T15:01:27.989467Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-25T15:01:28.487476Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:01:28.503292Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:01:28.503409Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:01:28.504634Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:01:28.504705Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:01:28.521738Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T15:01:28.521962Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:01:28.523957Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 29198, node 1 2025-06-25T15:01:28.685664Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:01:28.761276Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:01:28.900974Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/yft8/00179d/r3tmp/yandexvBenII.tmp 2025-06-25T15:01:28.901004Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/yft8/00179d/r3tmp/yandexvBenII.tmp 2025-06-25T15:01:28.902686Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/yft8/00179d/r3tmp/yandexvBenII.tmp 2025-06-25T15:01:28.902816Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T15:01:29.105467Z INFO: TTestServer started on Port 2184 GrpcPort 29198 TClient is connected to server localhost:2184 PQClient connected to localhost:29198 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:01:29.588888Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... waiting... waiting... 2025-06-25T15:01:32.109855Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902297020779301:2302], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:32.109950Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902297020779289:2299], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:32.110114Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:32.113833Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715661:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:01:32.143920Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519902297020779304:2303], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715661 completed, doublechecking } 2025-06-25T15:01:32.227446Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519902297020779393:2684] txid# 281474976715662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:01:32.476687Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7519902300558652360:2276], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T15:01:32.478800Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=2&id=NGM3ZmU5ZTAtOTZmNDVhMDQtOTU3YWEzOTUtMmU4YWNiMzc=, ActorId: [2:7519902300558652326:2269], ActorState: ExecuteState, TraceId: 01jyksqgdj7s8vj5rabazgwmrw, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T15:01:32.480759Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519902297020779403:2310], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T15:01:32.481707Z node 2 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-25T15:01:32.482224Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=NjJiMTdjMjctYTFiNWIzNWEtYjAxYWU1MDItNWYxYjEyODY=, ActorId: [1:7519902297020779280:2298], ActorState: ExecuteState, TraceId: 01jyksqgac5aj00qgqg5q0nc8z, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T15:01:32.482597Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-25T15:01:32.557382Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:01:32.663262Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519902275545941900:2144];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:01:32.663337Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T15:01:32.737154Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519902279083815713:2200];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:01:32.737214Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T15:01:32.814122Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/sch ... 06-25T15:03:09.675254Z node 13 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:63: TTableHelper UpdateQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint32; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64;DECLARE $SeqNo AS Uint64; UPSERT INTO `/Root/PQ/SourceIdMeta2` (Hash, Topic, SourceId, CreateTime, AccessTime, Partition, SeqNo) VALUES ($Hash, $Topic, $SourceId, $CreateTime, $AccessTime, $Partition, $SeqNo); 2025-06-25T15:03:09.675266Z node 13 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:64: TTableHelper UpdateAccessTimeQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint32; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64; UPDATE `/Root/PQ/SourceIdMeta2` SET AccessTime = $AccessTime WHERE Hash = $Hash AND Topic = $Topic AND SourceId = $SourceId AND Partition = $Partition; 2025-06-25T15:03:09.675285Z node 13 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:111: TPartitionChooser [13:7519902713995800113:2451] (SourceId=src, PreferedPartition=(NULL)) StartKqpSession 2025-06-25T15:03:09.678618Z node 13 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:142: TPartitionChooser [13:7519902713995800113:2451] (SourceId=src, PreferedPartition=(NULL)) Select from the table 2025-06-25T15:03:09.890770Z node 13 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__old_chooser_actor.h:67: TPartitionChooser [13:7519902713995800113:2451] (SourceId=src, PreferedPartition=(NULL)) RequestPQRB 2025-06-25T15:03:09.891180Z node 13 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72075186224037893][rt3.dc1--test-topic] pipe [13:7519902713995800150:2451] connected; active server actors: 1 2025-06-25T15:03:09.891229Z node 13 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__old_chooser_actor.h:80: TPartitionChooser [13:7519902713995800113:2451] (SourceId=src, PreferedPartition=(NULL)) Received partition 0 from PQRB for SourceId=src 2025-06-25T15:03:09.891250Z node 13 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:174: TPartitionChooser [13:7519902713995800113:2451] (SourceId=src, PreferedPartition=(NULL)) Update the table 2025-06-25T15:03:09.891469Z node 13 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037893][rt3.dc1--test-topic] pipe [13:7519902713995800150:2451] disconnected; active server actors: 1 2025-06-25T15:03:09.891491Z node 13 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1688: [72075186224037893][rt3.dc1--test-topic] pipe [13:7519902713995800150:2451] disconnected no session 2025-06-25T15:03:10.037556Z node 13 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:183: TPartitionChooser [13:7519902713995800113:2451] (SourceId=src, PreferedPartition=(NULL)) HandleUpdate PartitionPersisted=0 Status=SUCCESS 2025-06-25T15:03:10.037602Z node 13 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:305: TPartitionChooser [13:7519902713995800113:2451] (SourceId=src, PreferedPartition=(NULL)) ReplyResult: Partition=0, SeqNo=(NULL) 2025-06-25T15:03:10.037624Z node 13 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:268: TPartitionChooser [13:7519902713995800113:2451] (SourceId=src, PreferedPartition=(NULL)) Start idle 2025-06-25T15:03:10.037664Z node 13 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:689: ProceedPartition. session cookie: 1 sessionId: partition: 0 expectedGeneration: (NULL) 2025-06-25T15:03:10.039951Z node 13 :PQ_WRITE_PROXY DEBUG: writer.cpp:819: TPartitionWriter 72075186224037892 (partition=0) TEvClientConnected Status OK, TabletId: 72075186224037892, NodeId 14, Generation: 1 2025-06-25T15:03:10.039776Z node 14 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72075186224037892] server connected, pipe [13:7519902718290767469:2451], now have 1 active actors on pipe 2025-06-25T15:03:10.040912Z node 14 :PERSQUEUE DEBUG: pq_impl.cpp:348: Handle TEvRequest topic: 'rt3.dc1--test-topic' requestId: 2025-06-25T15:03:10.040957Z node 14 :PERSQUEUE DEBUG: pq_impl.cpp:2812: [PQ: 72075186224037892] got client message batch for topic 'rt3.dc1--test-topic' partition 0 2025-06-25T15:03:10.041056Z node 14 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie src|9bef79fc-1ae84fb1-82fa56ad-ff6b87e_0 generated for partition 0 topic 'rt3.dc1--test-topic' owner src 2025-06-25T15:03:10.041174Z node 14 :PERSQUEUE DEBUG: partition_write.cpp:34: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyOwnerOk. Partition: 0 2025-06-25T15:03:10.041227Z node 14 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'rt3.dc1--test-topic' partition: 0 messageNo: 0 requestId: cookie: 0 2025-06-25T15:03:10.042117Z node 14 :PERSQUEUE DEBUG: pq_impl.cpp:348: Handle TEvRequest topic: 'rt3.dc1--test-topic' requestId: 2025-06-25T15:03:10.042139Z node 14 :PERSQUEUE DEBUG: pq_impl.cpp:2812: [PQ: 72075186224037892] got client message batch for topic 'rt3.dc1--test-topic' partition 0 2025-06-25T15:03:10.042224Z node 14 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'rt3.dc1--test-topic' partition: 0 messageNo: 0 requestId: cookie: 0 2025-06-25T15:03:10.042570Z node 13 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:865: session inited cookie: 1 partition: 0 MaxSeqNo: 0 sessionId: src|9bef79fc-1ae84fb1-82fa56ad-ff6b87e_0 2025-06-25T15:03:10.045635Z :INFO: [] MessageGroupId [src] SessionId [] Counters: { Errors: 0 CurrentSessionLifetimeMs: 1750863790045 BytesWritten: 0 MessagesWritten: 0 BytesWrittenCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-25T15:03:10.045771Z :INFO: [] MessageGroupId [src] SessionId [] Write session established. Init response: session_id: "src|9bef79fc-1ae84fb1-82fa56ad-ff6b87e_0" topic: "test-topic" cluster: "dc1" supported_codecs: CODEC_RAW supported_codecs: CODEC_GZIP supported_codecs: CODEC_LZOP 2025-06-25T15:03:10.048438Z :INFO: [] MessageGroupId [src] SessionId [src|9bef79fc-1ae84fb1-82fa56ad-ff6b87e_0] Write session: close. Timeout = 0 ms 2025-06-25T15:03:10.048509Z :INFO: [] MessageGroupId [src] SessionId [src|9bef79fc-1ae84fb1-82fa56ad-ff6b87e_0] Write session will now close 2025-06-25T15:03:10.048557Z :DEBUG: [] MessageGroupId [src] SessionId [src|9bef79fc-1ae84fb1-82fa56ad-ff6b87e_0] Write session: aborting 2025-06-25T15:03:10.049051Z :INFO: [] MessageGroupId [src] SessionId [src|9bef79fc-1ae84fb1-82fa56ad-ff6b87e_0] Write session: gracefully shut down, all writes complete 2025-06-25T15:03:10.049097Z :DEBUG: [] MessageGroupId [src] SessionId [src|9bef79fc-1ae84fb1-82fa56ad-ff6b87e_0] Write session: destroy 2025-06-25T15:03:10.053595Z node 13 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 1 sessionId: src|9bef79fc-1ae84fb1-82fa56ad-ff6b87e_0 grpc read done: success: 0 data: 2025-06-25T15:03:10.053624Z node 13 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 1 sessionId: src|9bef79fc-1ae84fb1-82fa56ad-ff6b87e_0 grpc read failed 2025-06-25T15:03:10.053663Z node 13 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 1 sessionId: src|9bef79fc-1ae84fb1-82fa56ad-ff6b87e_0 grpc closed 2025-06-25T15:03:10.053681Z node 13 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 1 sessionId: src|9bef79fc-1ae84fb1-82fa56ad-ff6b87e_0 is DEAD 2025-06-25T15:03:10.054413Z node 13 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037892 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-25T15:03:10.060455Z node 14 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037892] server disconnected, pipe [13:7519902718290767469:2451] destroyed 2025-06-25T15:03:10.060543Z node 14 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::DropOwner. 2025-06-25T15:03:10.104618Z :INFO: [/Root] [/Root] [52dabea3-7cb0b646-41d05e98-99eca95a] Starting read session 2025-06-25T15:03:10.104667Z :DEBUG: [/Root] [/Root] [52dabea3-7cb0b646-41d05e98-99eca95a] Starting cluster discovery 2025-06-25T15:03:10.104938Z :INFO: [/Root] [/Root] [52dabea3-7cb0b646-41d05e98-99eca95a] Cluster discovery request failed. Status: TRANSPORT_UNAVAILABLE. Issues: "
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:12341: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint localhost:12341
: Error: Endpoint list is empty for database /Root, cluster endpoint localhost:12341. " 2025-06-25T15:03:10.104984Z :DEBUG: [/Root] [/Root] [52dabea3-7cb0b646-41d05e98-99eca95a] Restart cluster discovery in 0.006473s 2025-06-25T15:03:10.112800Z :DEBUG: [/Root] [/Root] [52dabea3-7cb0b646-41d05e98-99eca95a] Starting cluster discovery 2025-06-25T15:03:10.113147Z :INFO: [/Root] [/Root] [52dabea3-7cb0b646-41d05e98-99eca95a] Cluster discovery request failed. Status: TRANSPORT_UNAVAILABLE. Issues: "
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:12341: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint localhost:12341
: Error: Endpoint list is empty for database /Root, cluster endpoint localhost:12341. " 2025-06-25T15:03:10.113202Z :DEBUG: [/Root] [/Root] [52dabea3-7cb0b646-41d05e98-99eca95a] Restart cluster discovery in 0.012525s 2025-06-25T15:03:10.126502Z :DEBUG: [/Root] [/Root] [52dabea3-7cb0b646-41d05e98-99eca95a] Starting cluster discovery 2025-06-25T15:03:10.126715Z :INFO: [/Root] [/Root] [52dabea3-7cb0b646-41d05e98-99eca95a] Cluster discovery request failed. Status: TRANSPORT_UNAVAILABLE. Issues: "
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:12341: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint localhost:12341
: Error: Endpoint list is empty for database /Root, cluster endpoint localhost:12341. " 2025-06-25T15:03:10.126757Z :DEBUG: [/Root] [/Root] [52dabea3-7cb0b646-41d05e98-99eca95a] Restart cluster discovery in 0.039157s 2025-06-25T15:03:10.166706Z :DEBUG: [/Root] [/Root] [52dabea3-7cb0b646-41d05e98-99eca95a] Starting cluster discovery 2025-06-25T15:03:10.167030Z :NOTICE: [/Root] [/Root] [52dabea3-7cb0b646-41d05e98-99eca95a] Aborting read session. Description: SessionClosed { Status: TRANSPORT_UNAVAILABLE Issues: "
: Error: Failed to discover clusters
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:12341: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint localhost:12341
: Error: Endpoint list is empty for database /Root, cluster endpoint localhost:12341. " } 2025-06-25T15:03:10.167224Z :NOTICE: [/Root] [/Root] [52dabea3-7cb0b646-41d05e98-99eca95a] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } SessionClosed { Status: TRANSPORT_UNAVAILABLE Issues: "
: Error: Failed to discover clusters
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:12341: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint localhost:12341
: Error: Endpoint list is empty for database /Root, cluster endpoint localhost:12341. " } 2025-06-25T15:03:10.167374Z :INFO: [/Root] [/Root] [52dabea3-7cb0b646-41d05e98-99eca95a] Closing read session. Close timeout: 0.000000s 2025-06-25T15:03:10.167482Z :NOTICE: [/Root] [/Root] [52dabea3-7cb0b646-41d05e98-99eca95a] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } >> KqpLimits::WaitCAsTimeout [GOOD] >> KqpParams::BadParameterType >> DataShardSnapshots::VolatileSnapshotCleanupOnReboot [GOOD] >> DataShardSnapshots::VolatileSnapshotCleanupOnFinish |92.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |92.9%| [TA] $(B)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/with_offset_ranges_mode_ut/test-results/unittest/{meta.json ... results_accumulator.log} |92.9%| [TA] {RESULT} $(B)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/with_offset_ranges_mode_ut/test-results/unittest/{meta.json ... results_accumulator.log} >> AsyncIndexChangeCollector::MultiIndexedTableInsertSingleRow [GOOD] >> AsyncIndexChangeCollector::IndexedPrimaryKeyInsertSingleRow >> AsyncIndexChangeCollector::IndexedPrimaryKeyDeleteSingleRow [GOOD] >> AsyncIndexChangeCollector::ImplicitlyUpdateCoveredColumn >> DataShardReadIterator::ShouldReadFromHeadWithConflict+UseSink [GOOD] >> DataShardReadIterator::ShouldReadFromHeadWithConflict-UseSink >> AsyncIndexChangeCollector::MultiIndexedTableUpdateOneIndexedColumn [GOOD] >> AsyncIndexChangeCollector::MultiIndexedTableReplaceSingleRow >> DataShardReadIteratorBatchMode::RangeFromInclusive [GOOD] >> DataShardReadIteratorBatchMode::RangeFromNonInclusive >> AnalyzeColumnshard::AnalyzeRebootSaInAggregate >> DataShardReadIterator::ShouldReadRangeChunk3 [GOOD] >> DataShardReadIterator::ShouldReadRangeChunk5 |92.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> CdcStreamChangeCollector::UpsertToSameKeyWithImages [GOOD] >> CdcStreamChangeCollector::UpsertModifyDelete >> CdcStreamChangeCollector::IndexAndStreamUpsert [GOOD] >> CdcStreamChangeCollector::NewImage >> AnalyzeColumnshard::AnalyzeTable >> DataShardReadIterator::ShouldCancelMvccSnapshotFromFuture [GOOD] >> DataShardReadIterator::ShouldCommitLocksWhenReadWriteInOneTransaction >> DataShardSnapshots::LockedWriteWithAsyncIndex-WithRestart+UseSink [GOOD] >> DataShardSnapshots::LockedWriteWithAsyncIndex+WithRestart+UseSink >> DataShardSnapshots::ShardRestartLockBrokenByUncommittedAfterRead+UseSink [GOOD] >> DataShardSnapshots::ShardRestartLockBrokenByUncommittedAfterRead-UseSink >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadRangeLeftBorder+EvWrite [GOOD] >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadRangeLeftBorder-EvWrite >> KqpService::RangeCache+UseCache [GOOD] >> CdcStreamChangeCollector::DeleteNothing [GOOD] >> CdcStreamChangeCollector::DeleteSingleRow >> KqpExplain::ComplexJoin [GOOD] >> KqpExplain::CompoundKeyRange >> test_sql_streaming.py::test[suites-ReadWriteTopic-default.txt] [FAIL] >> test_sql_streaming.py::test[suites-ReadWriteTopicWithSchema-default.txt] >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadKeyWithContinue+EvWrite [GOOD] >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadKeyWithContinue-EvWrite ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/service/unittest >> KqpService::RangeCache+UseCache [GOOD] Test command err: Trying to start YDB, gRPC: 28337, MsgBus: 2140 2025-06-25T14:55:53.788776Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519900841945776273:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:55:53.788844Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017af/r3tmp/tmpFBIzcQ/pdisk_1.dat 2025-06-25T14:55:54.175896Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 28337, node 1 2025-06-25T14:55:54.213083Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:55:54.213191Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:55:54.214828Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:55:54.226674Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:55:54.226702Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:55:54.226711Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:55:54.226845Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2140 TClient is connected to server localhost:2140 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:55:54.795323Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T14:55:54.797197Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; waiting... waiting... 2025-06-25T14:55:54.815111Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:54.944032Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:55.121690Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:55.199968Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T14:55:56.691923Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900854830679768:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:56.692009Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:57.028600Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:57.054605Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:57.085554Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:57.119641Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:57.148370Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:57.225881Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:57.275218Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T14:55:57.337553Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900859125647732:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:57.337652Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:57.337804Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519900859125647737:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:55:57.341289Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:55:57.353420Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519900859125647739:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T14:55:57.418325Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519900859125647790:3420] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:55:58.390236Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:223: SessionId: ydb://session/3?node_id=1&id=MjIxMmNiZWYtN2U3YTE0N2EtMzUwNWJjMTItNzk1ODNhOTg=, ActorId: [0:0:0], ActorState: unknown state, Create session actor with id MjIxMmNiZWYtN2U3YTE0N2EtMzUwNWJjMTItNzk1ODNhOTg= 2025-06-25T14:55:58.390784Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:227: SessionId: ydb://session/3?node_id=1&id=MjIxMmNiZWYtN2U3YTE0N2EtMzUwNWJjMTItNzk1ODNhOTg=, ActorId: [1:7519900863420615354:2473], ActorState: unknown state, session actor bootstrapped 2025-06-25T14:55:58.399485Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:223: SessionId: ydb://session/3?node_id=1&id=MzhkNWNiZWMtZjdjNmZlODYtZGRlMzUzZmYtYWE1NzI2MzM=, ActorId: [0:0:0], ActorState: unknown state, Create session actor with id MzhkNWNiZWMtZjdjNmZlODYtZGRlMzUzZmYtYWE1NzI2MzM= 2025-06-25T14:55:58.399694Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:227: SessionId: ydb://session/3?node_id=1&id=MzhkNWNiZWMtZjdjNmZlODYtZGRlMzUzZmYtYWE1NzI2MzM=, ActorId: [1:7519900863420615356:2475], ActorState: unknown state, session actor bootstrapped 2025-06-25T14:55:58.417358Z node 1 ... 4046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:01:17.898139Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7382: Cannot get console configs 2025-06-25T15:01:17.898174Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded Trying to start YDB, gRPC: 20185, MsgBus: 2195 2025-06-25T15:02:56.626097Z node 8 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[8:7519902658148035300:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:02:56.626187Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0017af/r3tmp/tmpBCPxOP/pdisk_1.dat 2025-06-25T15:02:56.774373Z node 8 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:02:56.793278Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:02:56.793417Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:02:56.796551Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 20185, node 8 2025-06-25T15:02:56.846635Z node 8 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:02:56.846674Z node 8 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:02:56.846686Z node 8 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:02:56.846868Z node 8 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2195 TClient is connected to server localhost:2195 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:02:57.472908Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:02:57.492023Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:57.575570Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:57.719944Z node 8 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:02:57.779979Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:57.867449Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:03:01.516606Z node 8 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [8:7519902679622873395:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:03:01.516741Z node 8 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:03:01.556225Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:03:01.595610Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:03:01.632964Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[8:7519902658148035300:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:03:01.633893Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T15:03:01.637558Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:03:01.688118Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:03:01.730313Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:03:01.773061Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:03:01.848187Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:03:01.918703Z node 8 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [8:7519902679622874061:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:03:01.918806Z node 8 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:03:01.918879Z node 8 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [8:7519902679622874066:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:03:01.922604Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:03:01.933893Z node 8 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [8:7519902679622874068:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T15:03:02.013980Z node 8 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [8:7519902683917841415:3426] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:03:11.764690Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7382: Cannot get console configs 2025-06-25T15:03:11.764720Z node 8 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded took: 12.028860s took: 12.034186s took: 12.036933s took: 12.038422s took: 12.040740s took: 12.040859s took: 12.041198s took: 12.042309s took: 12.045630s took: 12.047370s >> test_sql_streaming.py::test[suites-ReadTopicWithMetadataNestedDeep-default.txt] [FAIL] >> test_sql_streaming.py::test[suites-ReadTopicWithMetadataWithFilter-default.txt] >> test_sql_streaming.py::test[suites-GroupByHopExprKey-default.txt] [FAIL] >> test_sql_streaming.py::test[suites-GroupByHopListKey-default.txt] >> RetryPolicy::TWriteSession_SwitchBackToLocalCluster [GOOD] >> RetryPolicy::TWriteSession_SeqNoShift >> DataShardSnapshots::LockedWritesLimitedPerKey-UseSink [GOOD] >> DataShardSnapshots::LockedWriteWithPendingVolatileCommit+UseSink >> DataShardReadIteratorLatency::ReadSplitLatency [GOOD] >> DataShardReadIteratorPageFaults::CancelPageFaultedReadThenDropTable >> AsyncIndexChangeCollector::IndexedPrimaryKeyInsertSingleRow [GOOD] >> AsyncIndexChangeCollector::ImplicitlyUpdateCoveredColumn [GOOD] |92.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_sql_streaming.py::test[suites-GroupByHoppingWithDataWatermarks-default.txt] [FAIL] >> test_sql_streaming.py::test[suites-ReadTopic-default.txt] >> AsyncIndexChangeCollector::MultiIndexedTableReplaceSingleRow [GOOD] >> CdcStreamChangeCollector::UpsertModifyDelete [GOOD] >> CdcStreamChangeCollector::NewImage [GOOD] >> DataShardReadIterator::ShouldReadFromFollower [GOOD] >> DataShardReadIterator::ShouldNotReadFutureMvccFromFollower ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_change_collector/unittest >> AsyncIndexChangeCollector::IndexedPrimaryKeyInsertSingleRow [GOOD] Test command err: 2025-06-25T15:03:03.330753Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T15:03:03.330907Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T15:03:03.330962Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0013ae/r3tmp/tmpbIljjS/pdisk_1.dat 2025-06-25T15:03:03.650663Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T15:03:03.656331Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:03:03.696646Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:03:03.701522Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750863780658672 != 1750863780658676 2025-06-25T15:03:03.746717Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:03:03.746820Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:03:03.758099Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:03:03.842294Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:03:03.887631Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:638:2539] 2025-06-25T15:03:03.887860Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T15:03:03.929653Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T15:03:03.929802Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T15:03:03.931349Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-25T15:03:03.931430Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-25T15:03:03.931482Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-25T15:03:03.931789Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T15:03:03.932773Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T15:03:03.932839Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:665:2539] in generation 1 2025-06-25T15:03:03.933377Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037889 actor [1:642:2541] 2025-06-25T15:03:03.933566Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T15:03:03.941280Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T15:03:03.941405Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T15:03:03.942658Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037889 2025-06-25T15:03:03.942713Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037889 2025-06-25T15:03:03.942758Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037889 2025-06-25T15:03:03.943001Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T15:03:03.943087Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T15:03:03.943132Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037889 persisting started state actor id [1:672:2541] in generation 1 2025-06-25T15:03:03.953802Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T15:03:03.986205Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-25T15:03:03.986398Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T15:03:03.986476Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:675:2560] 2025-06-25T15:03:03.986516Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T15:03:03.986545Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-25T15:03:03.986567Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T15:03:03.986817Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T15:03:03.986843Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037889 2025-06-25T15:03:03.986871Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037889 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T15:03:03.986896Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037889, actorId: [1:676:2561] 2025-06-25T15:03:03.986907Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037889 2025-06-25T15:03:03.986919Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037889, state: WaitScheme 2025-06-25T15:03:03.986934Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-25T15:03:03.987195Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-25T15:03:03.987254Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-25T15:03:03.987362Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T15:03:03.987386Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:03:03.987418Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-25T15:03:03.987441Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T15:03:03.987466Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037889 2025-06-25T15:03:03.987505Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037889 2025-06-25T15:03:03.987784Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:631:2535], serverId# [1:649:2545], sessionId# [0:0:0] 2025-06-25T15:03:03.987823Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2025-06-25T15:03:03.987848Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:03:03.987872Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037889 TxInFly 0 2025-06-25T15:03:03.987894Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-06-25T15:03:03.987976Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T15:03:03.988137Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-25T15:03:03.988196Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-25T15:03:03.988586Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [1:632:2536], serverId# [1:657:2551], sessionId# [0:0:0] 2025-06-25T15:03:03.988733Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037889 2025-06-25T15:03:03.988850Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037889 txId 281474976715657 ssId 72057594046644480 seqNo 2:2 2025-06-25T15:03:03.988893Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037889 2025-06-25T15:03:03.990472Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T15:03:03.990545Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2025-06-25T15:03:04.001342Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-25T15:03:04.001441Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-25T15:03:04.001994Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037889 2025-06-25T15:03:04.002046Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037889 not sending time cast registration request in state WaitScheme 2025-06-25T15:03:04.156162Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [1:694:2573], serverId# [1:697:2576], sessionId# [0:0:0] 2025-06-25T15:03:04.156360Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:695:2574], serverId# [1:698:2577], sessionId# [0:0:0] 2025-06-25T15:03:04.159731Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1000 at tablet 72075186224037889 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 10 ... HARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2025-06-25T15:03:18.880633Z node 4 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037889 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-06-25T15:03:18.881183Z node 4 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037889 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-25T15:03:18.881753Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:03:18.883217Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T15:03:18.883265Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-06-25T15:03:18.883306Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-06-25T15:03:18.883519Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-06-25T15:03:18.883656Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-25T15:03:18.883986Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T15:03:18.884044Z node 4 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 4] schema version# 1 2025-06-25T15:03:18.884424Z node 4 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-25T15:03:18.884803Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:03:18.885982Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037889 time 0 2025-06-25T15:03:18.886047Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-25T15:03:18.886459Z node 4 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037889 step# 1000} 2025-06-25T15:03:18.886545Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-06-25T15:03:18.889539Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-06-25T15:03:18.889591Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037889 2025-06-25T15:03:18.889647Z node 4 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037889 2025-06-25T15:03:18.889728Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037889 at tablet 72075186224037889 send result to client [4:373:2367], exec latency: 0 ms, propose latency: 0 ms 2025-06-25T15:03:18.889789Z node 4 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037889 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-25T15:03:18.889899Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-25T15:03:18.891098Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-06-25T15:03:18.891139Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T15:03:18.891368Z node 4 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-06-25T15:03:18.891424Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T15:03:18.893027Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T15:03:18.893067Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T15:03:18.893126Z node 4 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-25T15:03:18.893181Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [4:373:2367], exec latency: 0 ms, propose latency: 0 ms 2025-06-25T15:03:18.893222Z node 4 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-25T15:03:18.893304Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T15:03:18.894300Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T15:03:18.894391Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2025-06-25T15:03:18.897789Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037889 coordinator 72057594046316545 last step 0 next step 1000 2025-06-25T15:03:18.898186Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037889 state Ready 2025-06-25T15:03:18.898259Z node 4 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037889 Got TEvSchemaChangedResult from SS at 72075186224037889 2025-06-25T15:03:18.898768Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-25T15:03:18.898956Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-25T15:03:18.898997Z node 4 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-25T15:03:18.941208Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:742:2613], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:03:18.941310Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:752:2618], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:03:18.941699Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:03:18.947175Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:03:18.954369Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T15:03:18.954512Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2025-06-25T15:03:19.002506Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:03:19.121202Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T15:03:19.121323Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2025-06-25T15:03:19.124811Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:756:2621], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T15:03:19.160034Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:827:2661] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:03:19.233790Z node 4 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715660. Ctx: { TraceId: 01jykstrmv8rgvj4bbqsab62j9, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=YWIzZGFlYjYtM2I3YmVkOGEtMThlODk5NGQtYTJjN2EzOTg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:03:19.236459Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [4:896:2692], serverId# [4:897:2693], sessionId# [0:0:0] 2025-06-25T15:03:19.236835Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:251: Executing write operation for [0:2] at 72075186224037889 2025-06-25T15:03:19.237100Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:874: PersistChangeRecord: record: { Order: 1 Group: 1750863799237011 Step: 1500 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037889 2025-06-25T15:03:19.237294Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:416: Executed write operation for [0:2] at 72075186224037889, row count=1 2025-06-25T15:03:19.248362Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1170: EnqueueChangeRecords: at tablet: 72075186224037889, records: { Order: 1 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] BodySize: 28 TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 } 2025-06-25T15:03:19.248498Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-25T15:03:19.275575Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [4:903:2698], serverId# [4:904:2699], sessionId# [0:0:0] 2025-06-25T15:03:19.314964Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [4:905:2700], serverId# [4:906:2701], sessionId# [0:0:0] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_change_collector/unittest >> AsyncIndexChangeCollector::ImplicitlyUpdateCoveredColumn [GOOD] Test command err: 2025-06-25T15:03:03.205828Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T15:03:03.205966Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T15:03:03.206065Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001391/r3tmp/tmpGEJlA8/pdisk_1.dat 2025-06-25T15:03:03.526753Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T15:03:03.535146Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:03:03.587772Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:03:03.594309Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750863780658539 != 1750863780658543 2025-06-25T15:03:03.641181Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:03:03.641323Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:03:03.653656Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:03:03.746703Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:03:03.806614Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:638:2539] 2025-06-25T15:03:03.806898Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T15:03:03.856919Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T15:03:03.857114Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T15:03:03.859077Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-25T15:03:03.859190Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-25T15:03:03.859329Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-25T15:03:03.859745Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T15:03:03.861038Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T15:03:03.861141Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:665:2539] in generation 1 2025-06-25T15:03:03.861859Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037889 actor [1:642:2541] 2025-06-25T15:03:03.862076Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T15:03:03.871411Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T15:03:03.871586Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T15:03:03.873197Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037889 2025-06-25T15:03:03.873267Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037889 2025-06-25T15:03:03.873323Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037889 2025-06-25T15:03:03.873673Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T15:03:03.873784Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T15:03:03.873847Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037889 persisting started state actor id [1:672:2541] in generation 1 2025-06-25T15:03:03.884806Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T15:03:03.916657Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-25T15:03:03.916902Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T15:03:03.917022Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:675:2560] 2025-06-25T15:03:03.917069Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T15:03:03.917102Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-25T15:03:03.917144Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T15:03:03.917474Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T15:03:03.917518Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037889 2025-06-25T15:03:03.917572Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037889 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T15:03:03.917624Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037889, actorId: [1:676:2561] 2025-06-25T15:03:03.917647Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037889 2025-06-25T15:03:03.917670Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037889, state: WaitScheme 2025-06-25T15:03:03.917696Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-25T15:03:03.918150Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-25T15:03:03.918262Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-25T15:03:03.918433Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T15:03:03.918474Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:03:03.918533Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-25T15:03:03.918575Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T15:03:03.918619Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037889 2025-06-25T15:03:03.918675Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037889 2025-06-25T15:03:03.919116Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:631:2535], serverId# [1:649:2545], sessionId# [0:0:0] 2025-06-25T15:03:03.919183Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2025-06-25T15:03:03.919224Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:03:03.919274Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037889 TxInFly 0 2025-06-25T15:03:03.919311Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-06-25T15:03:03.919443Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T15:03:03.919702Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-25T15:03:03.919791Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-25T15:03:03.920278Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [1:632:2536], serverId# [1:657:2551], sessionId# [0:0:0] 2025-06-25T15:03:03.920462Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037889 2025-06-25T15:03:03.920611Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037889 txId 281474976715657 ssId 72057594046644480 seqNo 2:2 2025-06-25T15:03:03.920666Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037889 2025-06-25T15:03:03.922494Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T15:03:03.922575Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2025-06-25T15:03:03.933441Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-25T15:03:03.933542Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-25T15:03:03.934099Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037889 2025-06-25T15:03:03.934157Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037889 not sending time cast registration request in state WaitScheme 2025-06-25T15:03:04.085784Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [1:694:2573], serverId# [1:697:2576], sessionId# [0:0:0] 2025-06-25T15:03:04.085948Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:695:2574], serverId# [1:698:2577], sessionId# [0:0:0] 2025-06-25T15:03:04.090388Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1000 at tablet 72075186224037889 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 10 ... uckets per mediator 2 2025-06-25T15:03:18.881344Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:03:18.882875Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037889 time 0 2025-06-25T15:03:18.882940Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-25T15:03:18.883434Z node 4 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037889 step# 1000} 2025-06-25T15:03:18.883514Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-06-25T15:03:18.886148Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-06-25T15:03:18.886209Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037889 2025-06-25T15:03:18.886260Z node 4 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037889 2025-06-25T15:03:18.886358Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037889 at tablet 72075186224037889 send result to client [4:373:2367], exec latency: 0 ms, propose latency: 0 ms 2025-06-25T15:03:18.886430Z node 4 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037889 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-25T15:03:18.886552Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-25T15:03:18.887957Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-06-25T15:03:18.887997Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T15:03:18.888177Z node 4 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-06-25T15:03:18.888227Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T15:03:18.889892Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T15:03:18.889929Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T15:03:18.889967Z node 4 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-25T15:03:18.890019Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [4:373:2367], exec latency: 0 ms, propose latency: 0 ms 2025-06-25T15:03:18.890057Z node 4 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-25T15:03:18.890133Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T15:03:18.891167Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T15:03:18.891252Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2025-06-25T15:03:18.895108Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037889 coordinator 72057594046316545 last step 0 next step 1000 2025-06-25T15:03:18.895588Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037889 state Ready 2025-06-25T15:03:18.895667Z node 4 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037889 Got TEvSchemaChangedResult from SS at 72075186224037889 2025-06-25T15:03:18.896160Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-25T15:03:18.896348Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-25T15:03:18.896425Z node 4 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-25T15:03:18.933640Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:742:2613], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:03:18.933727Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:752:2618], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:03:18.934018Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:03:18.938933Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:03:18.944438Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T15:03:18.944523Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2025-06-25T15:03:18.991263Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:03:19.112589Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T15:03:19.112701Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2025-06-25T15:03:19.115880Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:756:2621], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T15:03:19.151474Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:827:2661] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:03:19.230535Z node 4 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715660. Ctx: { TraceId: 01jykstrmm8p29qc3exte7g7w3, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=ZDkyY2NjYjQtZjFkMDk5ODktNTIwNGZjNzMtMTBhYTQzYTM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:03:19.233017Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [4:896:2692], serverId# [4:897:2693], sessionId# [0:0:0] 2025-06-25T15:03:19.233415Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:251: Executing write operation for [0:2] at 72075186224037889 2025-06-25T15:03:19.233710Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:874: PersistChangeRecord: record: { Order: 1 Group: 1750863799233596 Step: 1500 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] Kind: AsyncIndex Source: Unspecified Body: 42b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037889 2025-06-25T15:03:19.233893Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:416: Executed write operation for [0:2] at 72075186224037889, row count=1 2025-06-25T15:03:19.244992Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1170: EnqueueChangeRecords: at tablet: 72075186224037889, records: { Order: 1 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] BodySize: 42 TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 } 2025-06-25T15:03:19.245086Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-25T15:03:19.343987Z node 4 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jykstrzr29xwh6aty3shdq7g, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=MmY0OWQzMTYtZDRhODMyODQtNGExY2Y1NzMtM2M5NmU5OTA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:03:19.345880Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:251: Executing write operation for [0:3] at 72075186224037889 2025-06-25T15:03:19.346167Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:874: PersistChangeRecord: record: { Order: 2 Group: 1750863799346062 Step: 1500 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037889 2025-06-25T15:03:19.346341Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:874: PersistChangeRecord: record: { Order: 3 Group: 1750863799346062 Step: 1500 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] Kind: AsyncIndex Source: Unspecified Body: 42b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037889 2025-06-25T15:03:19.346429Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:416: Executed write operation for [0:3] at 72075186224037889, row count=1 2025-06-25T15:03:19.360908Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1170: EnqueueChangeRecords: at tablet: 72075186224037889, records: { Order: 2 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] BodySize: 28 TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 }, { Order: 3 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] BodySize: 42 TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 } 2025-06-25T15:03:19.360971Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-25T15:03:19.388268Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [4:937:2724], serverId# [4:938:2725], sessionId# [0:0:0] 2025-06-25T15:03:19.393990Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [4:939:2726], serverId# [4:940:2727], sessionId# [0:0:0] >> DataShardReadIterator::ShouldReadRangePrefix5 [GOOD] >> DataShardReadIterator::ShouldReceiveErrorAfterSplit >> DataShardSnapshots::VolatileSnapshotCleanupOnFinish [GOOD] >> DataShardSnapshots::VolatileSnapshotRenameTimeout ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_change_collector/unittest >> AsyncIndexChangeCollector::MultiIndexedTableReplaceSingleRow [GOOD] Test command err: 2025-06-25T15:03:03.135824Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T15:03:03.135965Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T15:03:03.136014Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001394/r3tmp/tmpfv6Rky/pdisk_1.dat 2025-06-25T15:03:03.526663Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T15:03:03.534931Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:03:03.586041Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:03:03.594319Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750863780658396 != 1750863780658400 2025-06-25T15:03:03.641174Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:03:03.641299Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:03:03.653521Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:03:03.746514Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:03:03.801618Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:638:2539] 2025-06-25T15:03:03.801930Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T15:03:03.836946Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T15:03:03.837077Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T15:03:03.839717Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-25T15:03:03.839796Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-25T15:03:03.839841Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-25T15:03:03.841516Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T15:03:03.842335Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T15:03:03.842405Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:665:2539] in generation 1 2025-06-25T15:03:03.842819Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037889 actor [1:642:2541] 2025-06-25T15:03:03.843019Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T15:03:03.849083Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T15:03:03.849188Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T15:03:03.850237Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037889 2025-06-25T15:03:03.850286Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037889 2025-06-25T15:03:03.850323Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037889 2025-06-25T15:03:03.850523Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T15:03:03.850604Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T15:03:03.850645Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037889 persisting started state actor id [1:672:2541] in generation 1 2025-06-25T15:03:03.861341Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T15:03:03.895235Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-25T15:03:03.896496Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T15:03:03.896631Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:675:2560] 2025-06-25T15:03:03.896684Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T15:03:03.896716Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-25T15:03:03.896765Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T15:03:03.897697Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T15:03:03.897745Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037889 2025-06-25T15:03:03.897802Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037889 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T15:03:03.897873Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037889, actorId: [1:676:2561] 2025-06-25T15:03:03.897897Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037889 2025-06-25T15:03:03.897933Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037889, state: WaitScheme 2025-06-25T15:03:03.897956Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-25T15:03:03.898381Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-25T15:03:03.898472Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-25T15:03:03.898653Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T15:03:03.898696Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:03:03.898787Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-25T15:03:03.898824Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T15:03:03.898867Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037889 2025-06-25T15:03:03.898921Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037889 2025-06-25T15:03:03.899975Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:631:2535], serverId# [1:649:2545], sessionId# [0:0:0] 2025-06-25T15:03:03.900054Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2025-06-25T15:03:03.900094Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:03:03.900132Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037889 TxInFly 0 2025-06-25T15:03:03.900167Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-06-25T15:03:03.900305Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T15:03:03.900656Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-25T15:03:03.900743Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-25T15:03:03.901189Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [1:632:2536], serverId# [1:657:2551], sessionId# [0:0:0] 2025-06-25T15:03:03.901329Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037889 2025-06-25T15:03:03.901452Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037889 txId 281474976715657 ssId 72057594046644480 seqNo 2:2 2025-06-25T15:03:03.901499Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037889 2025-06-25T15:03:03.903267Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T15:03:03.903356Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2025-06-25T15:03:03.916986Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-25T15:03:03.917083Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-25T15:03:03.917623Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037889 2025-06-25T15:03:03.917676Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037889 not sending time cast registration request in state WaitScheme 2025-06-25T15:03:04.073606Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [1:694:2573], serverId# [1:697:2576], sessionId# [0:0:0] 2025-06-25T15:03:04.073772Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:695:2574], serverId# [1:698:2577], sessionId# [0:0:0] 2025-06-25T15:03:04.078505Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1000 at tablet 72075186224037889 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 10 ... Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037890 2025-06-25T15:03:19.348041Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037890 2025-06-25T15:03:19.348076Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037890 2025-06-25T15:03:19.348114Z node 4 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037890 2025-06-25T15:03:19.348168Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037890 at tablet 72075186224037890 send result to client [4:373:2367], exec latency: 0 ms, propose latency: 0 ms 2025-06-25T15:03:19.348238Z node 4 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037890 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-25T15:03:19.348736Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037890 2025-06-25T15:03:19.353317Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037889 coordinator 72057594046316545 last step 0 next step 1000 2025-06-25T15:03:19.354010Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037889 state Ready 2025-06-25T15:03:19.354088Z node 4 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037889 Got TEvSchemaChangedResult from SS at 72075186224037889 2025-06-25T15:03:19.355065Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-25T15:03:19.355431Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037890 coordinator 72057594046316545 last step 0 next step 1000 2025-06-25T15:03:19.355650Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-25T15:03:19.355698Z node 4 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-25T15:03:19.356581Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037890 state Ready 2025-06-25T15:03:19.356630Z node 4 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037890 Got TEvSchemaChangedResult from SS at 72075186224037890 2025-06-25T15:03:19.392944Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:791:2650], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:03:19.393073Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:802:2655], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:03:19.393181Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:03:19.399060Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:03:19.406707Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T15:03:19.406840Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2025-06-25T15:03:19.406900Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037890 2025-06-25T15:03:19.496121Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:03:19.618923Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T15:03:19.619067Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2025-06-25T15:03:19.619133Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037890 2025-06-25T15:03:19.623043Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:805:2658], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T15:03:19.664136Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:877:2699] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 10], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:03:19.748506Z node 4 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715660. Ctx: { TraceId: 01jyksts2y8ysh2tgastwgfkkp, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=YjdlMWVlYjctZGEyZTYzYmEtMjAzZDkwMzQtMzk1Mjg1MGU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:03:19.751238Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [4:982:2742], serverId# [4:983:2743], sessionId# [0:0:0] 2025-06-25T15:03:19.751690Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:251: Executing write operation for [0:2] at 72075186224037889 2025-06-25T15:03:19.751984Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:874: PersistChangeRecord: record: { Order: 1 Group: 1750863799751877 Step: 1500 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037889 2025-06-25T15:03:19.752184Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:874: PersistChangeRecord: record: { Order: 2 Group: 1750863799751877 Step: 1500 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 5] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037889 2025-06-25T15:03:19.752285Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:416: Executed write operation for [0:2] at 72075186224037889, row count=1 2025-06-25T15:03:19.763512Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1170: EnqueueChangeRecords: at tablet: 72075186224037889, records: { Order: 1 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] BodySize: 28 TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 }, { Order: 2 PathId: [OwnerId: 72057594046644480, LocalPathId: 5] BodySize: 28 TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 } 2025-06-25T15:03:19.763607Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-25T15:03:19.841158Z node 4 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jykstseq52j16jjf4ywn9egh, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=Nzk5YjIzZTUtYjA3OGEzYTUtZDZhMDY3NzgtZjVmNjQ4MjI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:03:19.843341Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:251: Executing write operation for [0:3] at 72075186224037889 2025-06-25T15:03:19.843658Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:874: PersistChangeRecord: record: { Order: 3 Group: 1750863799843555 Step: 1500 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037889 2025-06-25T15:03:19.843853Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:874: PersistChangeRecord: record: { Order: 4 Group: 1750863799843555 Step: 1500 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037889 2025-06-25T15:03:19.843956Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:874: PersistChangeRecord: record: { Order: 5 Group: 1750863799843555 Step: 1500 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 5] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037889 2025-06-25T15:03:19.844085Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:874: PersistChangeRecord: record: { Order: 6 Group: 1750863799843555 Step: 1500 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 5] Kind: AsyncIndex Source: Unspecified Body: 24b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037889 2025-06-25T15:03:19.844163Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:416: Executed write operation for [0:3] at 72075186224037889, row count=1 2025-06-25T15:03:19.856288Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1170: EnqueueChangeRecords: at tablet: 72075186224037889, records: { Order: 3 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] BodySize: 28 TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 }, { Order: 4 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] BodySize: 28 TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 }, { Order: 5 PathId: [OwnerId: 72057594046644480, LocalPathId: 5] BodySize: 28 TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 }, { Order: 6 PathId: [OwnerId: 72057594046644480, LocalPathId: 5] BodySize: 24 TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 } 2025-06-25T15:03:19.856416Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-25T15:03:19.889843Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [4:1031:2782], serverId# [4:1032:2783], sessionId# [0:0:0] 2025-06-25T15:03:19.896609Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [4:1033:2784], serverId# [4:1034:2785], sessionId# [0:0:0] >> CdcStreamChangeCollector::DeleteSingleRow [GOOD] >> AnalyzeColumnshard::AnalyzeDeadline ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_change_collector/unittest >> CdcStreamChangeCollector::NewImage [GOOD] Test command err: 2025-06-25T15:03:03.433583Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T15:03:03.433812Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T15:03:03.433868Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00139f/r3tmp/tmpIKdw02/pdisk_1.dat 2025-06-25T15:03:03.749037Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T15:03:03.751441Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:03:03.784409Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:03:03.791422Z node 1 :TABLET_SAUSAGECACHE NOTICE: shared_sausagecache.cpp:1191: Update config MemoryLimit: 33554432 2025-06-25T15:03:03.791920Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750863780658524 != 1750863780658528 2025-06-25T15:03:03.837915Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:03:03.838053Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:03:03.849455Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:03:03.931580Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:03:03.973807Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:627:2531] 2025-06-25T15:03:03.974099Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T15:03:04.025601Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T15:03:04.025771Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T15:03:04.027551Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-25T15:03:04.027631Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-25T15:03:04.027686Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-25T15:03:04.028035Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T15:03:04.028171Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T15:03:04.028251Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:643:2531] in generation 1 2025-06-25T15:03:04.039082Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T15:03:04.070664Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-25T15:03:04.070871Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T15:03:04.070969Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:645:2541] 2025-06-25T15:03:04.071099Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T15:03:04.071137Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-25T15:03:04.071170Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T15:03:04.071684Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-25T15:03:04.071806Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-25T15:03:04.071865Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T15:03:04.071900Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:03:04.071937Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-25T15:03:04.071980Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T15:03:04.072435Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:623:2528], serverId# [1:634:2535], sessionId# [0:0:0] 2025-06-25T15:03:04.072612Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T15:03:04.072893Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-25T15:03:04.072988Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-25T15:03:04.074761Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T15:03:04.085416Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-25T15:03:04.085528Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-25T15:03:04.237653Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:660:2550], serverId# [1:662:2552], sessionId# [0:0:0] 2025-06-25T15:03:04.247665Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1000 at tablet 72075186224037888 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1000 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-06-25T15:03:04.247737Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T15:03:04.248453Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T15:03:04.248496Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-06-25T15:03:04.248538Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-06-25T15:03:04.248737Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-06-25T15:03:04.248855Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-25T15:03:04.249211Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T15:03:04.249264Z node 1 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-06-25T15:03:04.250559Z node 1 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-25T15:03:04.250844Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:03:04.252026Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-06-25T15:03:04.252066Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T15:03:04.252485Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-06-25T15:03:04.252551Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T15:03:04.253067Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T15:03:04.253109Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T15:03:04.253140Z node 1 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-25T15:03:04.253183Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [1:373:2367], exec latency: 0 ms, propose latency: 0 ms 2025-06-25T15:03:04.253221Z node 1 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-25T15:03:04.253284Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T15:03:04.256400Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T15:03:04.257768Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-25T15:03:04.257820Z node 1 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-25T15:03:04.258521Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-25T15:03:04.290335Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T15:03:04.290493Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715658 ssId 72057594046644480 seqNo 2:2 2025-06-25T15:03:0 ... pp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T15:03:19.516570Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T15:03:19.516634Z node 4 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-25T15:03:19.516704Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [4:373:2367], exec latency: 0 ms, propose latency: 0 ms 2025-06-25T15:03:19.516778Z node 4 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-25T15:03:19.516886Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T15:03:19.517978Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T15:03:19.520087Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-25T15:03:19.520165Z node 4 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-25T15:03:19.521112Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-25T15:03:19.562016Z node 4 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T15:03:19.562190Z node 4 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715658 ssId 72057594046644480 seqNo 2:2 2025-06-25T15:03:19.562245Z node 4 :TX_DATASHARD INFO: check_scheme_tx_unit.cpp:234: Check scheme tx, proposed scheme version# 2 current version# 1 expected version# 2 at tablet# 72075186224037888 txId# 281474976715658 2025-06-25T15:03:19.562284Z node 4 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715658 at tablet 72075186224037888 2025-06-25T15:03:19.563572Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T15:03:19.588592Z node 4 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-25T15:03:19.681764Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:03:19.810152Z node 4 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715658 at step 1500 at tablet 72075186224037888 { Transactions { TxId: 281474976715658 AckTo { RawX1: 0 RawX2: 0 } } Step: 1500 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-06-25T15:03:19.810228Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T15:03:19.810508Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T15:03:19.810559Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-06-25T15:03:19.810615Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1500:281474976715658] in PlanQueue unit at 72075186224037888 2025-06-25T15:03:19.810841Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1500:281474976715658 keys extracted: 0 2025-06-25T15:03:19.810982Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-25T15:03:19.811263Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T15:03:19.812013Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:03:19.871409Z node 4 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1500} 2025-06-25T15:03:19.871538Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T15:03:19.871581Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T15:03:19.871630Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T15:03:19.871706Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1500 : 281474976715658] from 72075186224037888 at tablet 72075186224037888 send result to client [4:373:2367], exec latency: 0 ms, propose latency: 0 ms 2025-06-25T15:03:19.871784Z node 4 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715658 state Ready TxInFly 0 2025-06-25T15:03:19.871891Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T15:03:19.874355Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715658 datashard 72075186224037888 state Ready 2025-06-25T15:03:19.874458Z node 4 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-25T15:03:19.951378Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:839:2677], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:03:19.951490Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:848:2682], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:03:19.951572Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:03:19.957909Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:03:19.965197Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T15:03:20.145923Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T15:03:20.152021Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:853:2685], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-25T15:03:20.179810Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:909:2722] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:03:20.250280Z node 4 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jykstsmd0x5nz876hw40qncz, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=OGZlZmQyNmMtZTk5NTg5OTQtYmU4NjY3YmEtODMzMGIxYjU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:03:20.252925Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [4:940:2739], serverId# [4:941:2740], sessionId# [0:0:0] 2025-06-25T15:03:20.253340Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:251: Executing write operation for [0:3] at 72075186224037888 2025-06-25T15:03:20.253628Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:874: PersistChangeRecord: record: { Order: 1 Group: 1750863800253526 Step: 2000 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] Kind: CdcDataChange Source: Unspecified Body: 40b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 2 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037888 2025-06-25T15:03:20.253836Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:416: Executed write operation for [0:3] at 72075186224037888, row count=1 2025-06-25T15:03:20.267476Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1170: EnqueueChangeRecords: at tablet: 72075186224037888, records: { Order: 1 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] BodySize: 40 TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 2 } 2025-06-25T15:03:20.267567Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T15:03:20.348009Z node 4 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jykstsyf903knvh7h435bk74, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=ZGJlYjFkMGMtNmJjYzI1ZjEtYmE2YzE1M2MtMTRmOTllNWE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:03:20.350147Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:251: Executing write operation for [0:4] at 72075186224037888 2025-06-25T15:03:20.350440Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:874: PersistChangeRecord: record: { Order: 2 Group: 1750863800350333 Step: 2000 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] Kind: CdcDataChange Source: Unspecified Body: 18b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 2 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037888 2025-06-25T15:03:20.350590Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:416: Executed write operation for [0:4] at 72075186224037888, row count=1 2025-06-25T15:03:20.361557Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1170: EnqueueChangeRecords: at tablet: 72075186224037888, records: { Order: 2 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] BodySize: 18 TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 2 } 2025-06-25T15:03:20.361685Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T15:03:20.363404Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [4:968:2758], serverId# [4:969:2759], sessionId# [0:0:0] 2025-06-25T15:03:20.369382Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [4:970:2760], serverId# [4:971:2761], sessionId# [0:0:0] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_change_collector/unittest >> CdcStreamChangeCollector::UpsertModifyDelete [GOOD] Test command err: 2025-06-25T15:03:03.357731Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T15:03:03.357926Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T15:03:03.357997Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0013a7/r3tmp/tmpasZdYp/pdisk_1.dat 2025-06-25T15:03:03.671738Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T15:03:03.674868Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:03:03.711072Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:03:03.715740Z node 1 :TABLET_SAUSAGECACHE NOTICE: shared_sausagecache.cpp:1191: Update config MemoryLimit: 33554432 2025-06-25T15:03:03.716365Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750863780658584 != 1750863780658588 2025-06-25T15:03:03.763088Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:03:03.763213Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:03:03.774575Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:03:03.854982Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:03:03.893917Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:627:2531] 2025-06-25T15:03:03.894207Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T15:03:03.938366Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T15:03:03.938507Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T15:03:03.940293Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-25T15:03:03.940483Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-25T15:03:03.940533Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-25T15:03:03.940907Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T15:03:03.941053Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T15:03:03.941144Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:643:2531] in generation 1 2025-06-25T15:03:03.952162Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T15:03:03.991815Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-25T15:03:03.992011Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T15:03:03.992175Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:645:2541] 2025-06-25T15:03:03.992236Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T15:03:03.992272Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-25T15:03:03.992302Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T15:03:03.992765Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-25T15:03:03.992851Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-25T15:03:03.992914Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T15:03:03.992949Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:03:03.992999Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-25T15:03:03.993047Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T15:03:03.993406Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:623:2528], serverId# [1:634:2535], sessionId# [0:0:0] 2025-06-25T15:03:03.993549Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T15:03:03.993790Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-25T15:03:03.993869Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-25T15:03:03.995368Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T15:03:04.006029Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-25T15:03:04.006130Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-25T15:03:04.155274Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:660:2550], serverId# [1:662:2552], sessionId# [0:0:0] 2025-06-25T15:03:04.170086Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1000 at tablet 72075186224037888 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1000 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-06-25T15:03:04.170158Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T15:03:04.170994Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T15:03:04.171051Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-06-25T15:03:04.171101Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-06-25T15:03:04.171335Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-06-25T15:03:04.171472Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-25T15:03:04.171885Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T15:03:04.171965Z node 1 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-06-25T15:03:04.173785Z node 1 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-25T15:03:04.174156Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:03:04.175720Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-06-25T15:03:04.175764Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T15:03:04.176208Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-06-25T15:03:04.176279Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T15:03:04.176865Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T15:03:04.176916Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T15:03:04.176961Z node 1 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-25T15:03:04.177013Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [1:373:2367], exec latency: 0 ms, propose latency: 0 ms 2025-06-25T15:03:04.177071Z node 1 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-25T15:03:04.177142Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T15:03:04.181117Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T15:03:04.182632Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-25T15:03:04.182688Z node 1 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-25T15:03:04.183432Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-25T15:03:04.218007Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T15:03:04.218158Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715658 ssId 72057594046644480 seqNo 2:2 2025-06-25T15:03:0 ... UG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-25T15:03:19.541781Z node 4 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T15:03:19.541929Z node 4 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715658 ssId 72057594046644480 seqNo 2:2 2025-06-25T15:03:19.541971Z node 4 :TX_DATASHARD INFO: check_scheme_tx_unit.cpp:234: Check scheme tx, proposed scheme version# 2 current version# 1 expected version# 2 at tablet# 72075186224037888 txId# 281474976715658 2025-06-25T15:03:19.542006Z node 4 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715658 at tablet 72075186224037888 2025-06-25T15:03:19.542995Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T15:03:19.567342Z node 4 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-25T15:03:19.669579Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:03:19.795181Z node 4 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715658 at step 1500 at tablet 72075186224037888 { Transactions { TxId: 281474976715658 AckTo { RawX1: 0 RawX2: 0 } } Step: 1500 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-06-25T15:03:19.795239Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T15:03:19.795478Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T15:03:19.795520Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-06-25T15:03:19.795569Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1500:281474976715658] in PlanQueue unit at 72075186224037888 2025-06-25T15:03:19.795751Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1500:281474976715658 keys extracted: 0 2025-06-25T15:03:19.795883Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-25T15:03:19.796115Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T15:03:19.796783Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:03:19.842932Z node 4 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1500} 2025-06-25T15:03:19.843021Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T15:03:19.843059Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T15:03:19.843101Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T15:03:19.843171Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1500 : 281474976715658] from 72075186224037888 at tablet 72075186224037888 send result to client [4:373:2367], exec latency: 0 ms, propose latency: 0 ms 2025-06-25T15:03:19.843226Z node 4 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715658 state Ready TxInFly 0 2025-06-25T15:03:19.843322Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T15:03:19.844996Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715658 datashard 72075186224037888 state Ready 2025-06-25T15:03:19.845084Z node 4 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-25T15:03:19.879680Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:839:2677], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:03:19.879761Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:848:2682], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:03:19.879823Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:03:19.883696Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:03:19.890381Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T15:03:20.067008Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T15:03:20.070789Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:853:2685], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-25T15:03:20.097837Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:909:2722] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:03:20.196227Z node 4 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jykstsj676ryzwz7hwp5xv7t, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=MTRmNTNhNzItNmU4NmYyNWItZTk0NTM0OWQtY2E4Y2Q1NDQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:03:20.198754Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [4:940:2739], serverId# [4:941:2740], sessionId# [0:0:0] 2025-06-25T15:03:20.199179Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:251: Executing write operation for [0:3] at 72075186224037888 2025-06-25T15:03:20.199459Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:874: PersistChangeRecord: record: { Order: 1 Group: 1750863800199355 Step: 2000 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] Kind: CdcDataChange Source: Unspecified Body: 34b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 2 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037888 2025-06-25T15:03:20.199658Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:416: Executed write operation for [0:3] at 72075186224037888, row count=1 2025-06-25T15:03:20.210795Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1170: EnqueueChangeRecords: at tablet: 72075186224037888, records: { Order: 1 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] BodySize: 34 TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 2 } 2025-06-25T15:03:20.210905Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T15:03:20.272143Z node 4 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jykstswq4ektz5xb4c9ns2a2, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=ODM4YjcwNjEtNWQ0M2VhZDctNmI2NzE0NTgtMzE5YWU4N2Q=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:03:20.274437Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:251: Executing write operation for [0:4] at 72075186224037888 2025-06-25T15:03:20.274715Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:874: PersistChangeRecord: record: { Order: 2 Group: 1750863800274624 Step: 2000 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] Kind: CdcDataChange Source: Unspecified Body: 50b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 2 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037888 2025-06-25T15:03:20.274850Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:416: Executed write operation for [0:4] at 72075186224037888, row count=1 2025-06-25T15:03:20.285969Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1170: EnqueueChangeRecords: at tablet: 72075186224037888, records: { Order: 2 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] BodySize: 50 TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 2 } 2025-06-25T15:03:20.286046Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T15:03:20.371301Z node 4 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715663. Ctx: { TraceId: 01jykstsz180cfrza9x39vge1v, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=YTgwNDI4ZTgtZTQ5MWQ0ZTItNDg5OWY2OWYtNGI0YWQyMmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:03:20.373572Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:251: Executing write operation for [0:5] at 72075186224037888 2025-06-25T15:03:20.373907Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:874: PersistChangeRecord: record: { Order: 3 Group: 1750863800373789 Step: 2000 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] Kind: CdcDataChange Source: Unspecified Body: 34b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 2 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037888 2025-06-25T15:03:20.374058Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:416: Executed write operation for [0:5] at 72075186224037888, row count=1 2025-06-25T15:03:20.386781Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1170: EnqueueChangeRecords: at tablet: 72075186224037888, records: { Order: 3 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] BodySize: 34 TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 2 } 2025-06-25T15:03:20.386852Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T15:03:20.388604Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [4:987:2769], serverId# [4:988:2770], sessionId# [0:0:0] 2025-06-25T15:03:20.394812Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [4:989:2771], serverId# [4:990:2772], sessionId# [0:0:0] >> KqpParams::BadParameterType [GOOD] >> DataShardReadIteratorBatchMode::RangeFromNonInclusive [GOOD] >> DataShardReadIteratorBatchMode::RangeToInclusive |92.9%| [TA] $(B)/ydb/core/kqp/ut/service/test-results/unittest/{meta.json ... results_accumulator.log} >> TraverseColumnShard::TraverseColumnTableRebootSaTabletBeforeReqDistribution >> test_auditlog.py::test_single_dml_query_logged[upsert] >> TraverseDatashard::TraverseTwoTablesTwoServerlessDbs >> DataShardReadIterator::ShouldReadRangeChunk5 [GOOD] >> DataShardReadIterator::ShouldReadRangeChunk7 >> AnalyzeColumnshard::AnalyzeRebootSaBeforeReqDistribution >> DataShardReadIterator::ShouldReadFromHeadWithConflict-UseSink [GOOD] >> DataShardSnapshots::ShardRestartLockBrokenByUncommittedAfterRead-UseSink [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_change_collector/unittest >> CdcStreamChangeCollector::DeleteSingleRow [GOOD] Test command err: 2025-06-25T15:03:05.154870Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T15:03:05.155045Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T15:03:05.155101Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00137e/r3tmp/tmptF7jvE/pdisk_1.dat 2025-06-25T15:03:05.480788Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T15:03:05.489153Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:03:05.538410Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:03:05.549215Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750863782758274 != 1750863782758278 2025-06-25T15:03:05.596595Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:03:05.596748Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:03:05.608116Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:03:05.693163Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:03:05.740012Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:638:2539] 2025-06-25T15:03:05.740254Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T15:03:05.782688Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T15:03:05.782858Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T15:03:05.784506Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-25T15:03:05.784694Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-25T15:03:05.784748Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-25T15:03:05.785119Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T15:03:05.786119Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T15:03:05.786197Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:665:2539] in generation 1 2025-06-25T15:03:05.786737Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037889 actor [1:642:2541] 2025-06-25T15:03:05.786924Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T15:03:05.794725Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T15:03:05.794867Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T15:03:05.796191Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037889 2025-06-25T15:03:05.796249Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037889 2025-06-25T15:03:05.796291Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037889 2025-06-25T15:03:05.796569Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T15:03:05.796654Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T15:03:05.796702Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037889 persisting started state actor id [1:672:2541] in generation 1 2025-06-25T15:03:05.808901Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T15:03:05.831394Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-25T15:03:05.831601Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T15:03:05.831694Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:675:2560] 2025-06-25T15:03:05.831735Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T15:03:05.831767Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-25T15:03:05.831796Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T15:03:05.832089Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T15:03:05.832128Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037889 2025-06-25T15:03:05.832175Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037889 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T15:03:05.832221Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037889, actorId: [1:676:2561] 2025-06-25T15:03:05.832240Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037889 2025-06-25T15:03:05.832261Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037889, state: WaitScheme 2025-06-25T15:03:05.832280Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-25T15:03:05.832662Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-25T15:03:05.832760Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-25T15:03:05.832905Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T15:03:05.832940Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:03:05.832975Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-25T15:03:05.833008Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T15:03:05.833044Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037889 2025-06-25T15:03:05.833091Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037889 2025-06-25T15:03:05.833499Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:631:2535], serverId# [1:649:2545], sessionId# [0:0:0] 2025-06-25T15:03:05.833559Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2025-06-25T15:03:05.833592Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:03:05.833626Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037889 TxInFly 0 2025-06-25T15:03:05.833659Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-06-25T15:03:05.833773Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T15:03:05.833998Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-25T15:03:05.834082Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-25T15:03:05.834524Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [1:632:2536], serverId# [1:657:2551], sessionId# [0:0:0] 2025-06-25T15:03:05.834657Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037889 2025-06-25T15:03:05.834779Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037889 txId 281474976715657 ssId 72057594046644480 seqNo 2:2 2025-06-25T15:03:05.834824Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037889 2025-06-25T15:03:05.836378Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T15:03:05.836449Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2025-06-25T15:03:05.847791Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-25T15:03:05.847888Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-25T15:03:05.848372Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037889 2025-06-25T15:03:05.848422Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037889 not sending time cast registration request in state WaitScheme 2025-06-25T15:03:06.014569Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [1:694:2573], serverId# [1:697:2576], sessionId# [0:0:0] 2025-06-25T15:03:06.014712Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:695:2574], serverId# [1:698:2577], sessionId# [0:0:0] 2025-06-25T15:03:06.018864Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1000 at tablet 72075186224037889 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 10 ... pp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T15:03:20.582009Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T15:03:20.582085Z node 4 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-25T15:03:20.582161Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [4:373:2367], exec latency: 0 ms, propose latency: 0 ms 2025-06-25T15:03:20.582224Z node 4 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-25T15:03:20.582320Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T15:03:20.583296Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T15:03:20.585170Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-25T15:03:20.585256Z node 4 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-25T15:03:20.586161Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-25T15:03:20.625689Z node 4 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T15:03:20.625873Z node 4 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715658 ssId 72057594046644480 seqNo 2:2 2025-06-25T15:03:20.625933Z node 4 :TX_DATASHARD INFO: check_scheme_tx_unit.cpp:234: Check scheme tx, proposed scheme version# 2 current version# 1 expected version# 2 at tablet# 72075186224037888 txId# 281474976715658 2025-06-25T15:03:20.625971Z node 4 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715658 at tablet 72075186224037888 2025-06-25T15:03:20.627228Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T15:03:20.652267Z node 4 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-25T15:03:20.752977Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:03:20.887304Z node 4 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715658 at step 1500 at tablet 72075186224037888 { Transactions { TxId: 281474976715658 AckTo { RawX1: 0 RawX2: 0 } } Step: 1500 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-06-25T15:03:20.887386Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T15:03:20.887674Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T15:03:20.887728Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-06-25T15:03:20.887782Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1500:281474976715658] in PlanQueue unit at 72075186224037888 2025-06-25T15:03:20.888016Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1500:281474976715658 keys extracted: 0 2025-06-25T15:03:20.888159Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-25T15:03:20.888780Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T15:03:20.889623Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:03:20.949344Z node 4 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1500} 2025-06-25T15:03:20.949471Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T15:03:20.949521Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T15:03:20.949573Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T15:03:20.949672Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1500 : 281474976715658] from 72075186224037888 at tablet 72075186224037888 send result to client [4:373:2367], exec latency: 0 ms, propose latency: 0 ms 2025-06-25T15:03:20.949751Z node 4 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715658 state Ready TxInFly 0 2025-06-25T15:03:20.949865Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T15:03:20.952093Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715658 datashard 72075186224037888 state Ready 2025-06-25T15:03:20.952197Z node 4 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-25T15:03:20.995940Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:839:2677], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:03:20.996061Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:848:2682], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:03:20.996157Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:03:21.001891Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:03:21.008937Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T15:03:21.217398Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T15:03:21.220877Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:853:2685], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-25T15:03:21.247763Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:909:2722] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:03:21.319553Z node 4 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jyksttn28jw188k4r7s82hny, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=NTQ5OTYwNWQtODBiNjFkYzEtYTQ1YjYwMzEtNDMwMmFlYjE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:03:21.322419Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [4:940:2739], serverId# [4:941:2740], sessionId# [0:0:0] 2025-06-25T15:03:21.322880Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:251: Executing write operation for [0:3] at 72075186224037888 2025-06-25T15:03:21.323244Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:874: PersistChangeRecord: record: { Order: 1 Group: 1750863801323113 Step: 2000 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] Kind: CdcDataChange Source: Unspecified Body: 34b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 2 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037888 2025-06-25T15:03:21.323486Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:416: Executed write operation for [0:3] at 72075186224037888, row count=1 2025-06-25T15:03:21.347560Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1170: EnqueueChangeRecords: at tablet: 72075186224037888, records: { Order: 1 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] BodySize: 34 TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 2 } 2025-06-25T15:03:21.347659Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T15:03:21.471313Z node 4 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jykstv1n23bpvee9kc28cjrs, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=Yjg0ZWU0MTUtMzIxMTNlOGMtZGQ4YTA5YjEtY2M4MDhlOWE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:03:21.473391Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:251: Executing write operation for [0:4] at 72075186224037888 2025-06-25T15:03:21.473731Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:874: PersistChangeRecord: record: { Order: 2 Group: 1750863801473587 Step: 2000 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] Kind: CdcDataChange Source: Unspecified Body: 34b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 2 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037888 2025-06-25T15:03:21.473902Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:416: Executed write operation for [0:4] at 72075186224037888, row count=1 2025-06-25T15:03:21.485185Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1170: EnqueueChangeRecords: at tablet: 72075186224037888, records: { Order: 2 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] BodySize: 34 TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 2 } 2025-06-25T15:03:21.485286Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T15:03:21.487279Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [4:968:2758], serverId# [4:969:2759], sessionId# [0:0:0] 2025-06-25T15:03:21.493856Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [4:970:2760], serverId# [4:971:2761], sessionId# [0:0:0] >> DataShardReadIterator::ShouldCommitLocksWhenReadWriteInOneTransaction [GOOD] >> DataShardReadIterator::ShouldReadFromHeadToMvccWithConflict+UseSink >> DataShardReadIterator::ShouldCommitLocksWhenReadWriteInSeparateTransactions ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpParams::BadParameterType [GOOD] Test command err: Trying to start YDB, gRPC: 10164, MsgBus: 21178 2025-06-25T15:00:51.203683Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519902124969069374:2200];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:00:51.203801Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00145d/r3tmp/tmplZgoeD/pdisk_1.dat 2025-06-25T15:00:51.695434Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:00:51.695541Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:00:51.704698Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:00:51.731821Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:00:51.732588Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519902124969069212:2080] 1750863651176993 != 1750863651176996 TServer::EnableGrpc on GrpcPort 10164, node 1 2025-06-25T15:00:51.822466Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:00:51.822496Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:00:51.822511Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:00:51.822648Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T15:00:52.199142Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:21178 TClient is connected to server localhost:21178 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:00:52.615086Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:00:52.652052Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T15:00:52.828293Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:53.010817Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:00:53.091932Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:00:54.607697Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902137853972732:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:00:54.607798Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:00:54.889881Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:54.959623Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:54.991747Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:55.021877Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:55.057469Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:55.122030Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:55.192407Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:55.256436Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902142148940691:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:00:55.256520Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:00:55.256579Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902142148940696:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:00:55.259890Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:00:55.269924Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519902142148940698:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T15:00:55.374760Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519902142148940751:3426] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:00:56.189758Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519902124969069374:2200];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:00:56.189813Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T15:00:56.487109Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:06.678659Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7382: Cannot ... ZDA2NjYtZDQwMDY2MGEtY2ExOWRhOTY=, ActorId: [3:2661:4003], ActorState: ExecuteState, TraceId: 01jykstj6w1t2sy6g1smjfxz0k, Create QueryResponse for error on request, msg: Trying to start YDB, gRPC: 64661, MsgBus: 4051 2025-06-25T15:03:13.905399Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519902733408831730:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:03:13.905485Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00145d/r3tmp/tmpvKhjlu/pdisk_1.dat 2025-06-25T15:03:14.051488Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:03:14.066238Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:03:14.066341Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:03:14.070723Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 64661, node 4 2025-06-25T15:03:14.137004Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:03:14.137036Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:03:14.137049Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:03:14.137207Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4051 TClient is connected to server localhost:4051 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:03:14.834745Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:03:14.855044Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:03:14.913650Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:03:14.951145Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:03:15.193444Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T15:03:15.280991Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:03:18.908784Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519902733408831730:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:03:18.908876Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T15:03:18.980498Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519902754883669818:2369], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:03:18.980604Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:03:19.029836Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:03:19.071000Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:03:19.140756Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:03:19.187701Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:03:19.225013Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:03:19.301101Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:03:19.348392Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:03:19.431407Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519902759178637775:2432], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:03:19.431510Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:03:19.431783Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519902759178637780:2435], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:03:19.437222Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:03:19.456181Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519902759178637782:2436], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T15:03:19.529903Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519902759178637833:3427] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:03:21.275415Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=ZDk5OWIyNTgtNGI5NGE4NzAtODhhMmE3YWEtMTI1NzcxMTI=, ActorId: [4:7519902767768572724:2479], ActorState: ExecuteState, TraceId: 01jyksttr90batq3b2qkzvk309, Create QueryResponse for error on request, msg: ydb/core/kqp/session_actor/kqp_session_actor.cpp:1374: ydb/core/kqp/query_data/kqp_query_data.cpp:271: Parameter $group type mismatch, expected: { Kind: Data Data { Scheme: 2 } }, actual: Type (Data), schemeType: Int32, schemeTypeId: 1 |92.9%| [TA] {RESULT} $(B)/ydb/core/kqp/ut/service/test-results/unittest/{meta.json ... results_accumulator.log} |92.9%| [TA] $(B)/ydb/core/tx/datashard/ut_change_collector/test-results/unittest/{meta.json ... results_accumulator.log} |92.9%| [TA] {RESULT} $(B)/ydb/core/tx/datashard/ut_change_collector/test-results/unittest/{meta.json ... results_accumulator.log} >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadRangeLeftBorder-EvWrite [GOOD] >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadRangeRightBorder+EvWrite ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_snapshot/unittest >> DataShardSnapshots::ShardRestartLockBrokenByUncommittedAfterRead-UseSink [GOOD] Test command err: 2025-06-25T15:02:04.994019Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T15:02:04.994222Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T15:02:04.994287Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001b51/r3tmp/tmpLWg53r/pdisk_1.dat 2025-06-25T15:02:05.376378Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T15:02:05.386743Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:02:05.435970Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:02:05.444554Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750863722361691 != 1750863722361695 2025-06-25T15:02:05.530091Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:223: SessionId: ydb://session/3?node_id=1&id=NTFmMjJhY2UtMjcyM2M2NGUtOTVmZTY4ODItYjJjNGRmYjY=, ActorId: [0:0:0], ActorState: unknown state, Create session actor with id NTFmMjJhY2UtMjcyM2M2NGUtOTVmZTY4ODItYjJjNGRmYjY= 2025-06-25T15:02:05.530895Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:227: SessionId: ydb://session/3?node_id=1&id=NTFmMjJhY2UtMjcyM2M2NGUtOTVmZTY4ODItYjJjNGRmYjY=, ActorId: [1:580:2502], ActorState: unknown state, session actor bootstrapped 2025-06-25T15:02:05.536001Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:443: SessionId: ydb://session/3?node_id=1&id=NTFmMjJhY2UtMjcyM2M2NGUtOTVmZTY4ODItYjJjNGRmYjY=, ActorId: [1:580:2502], ActorState: ReadyState, TraceId: 01jyksrgyz3d1dx16vfavrvkew, received request, proxyRequestId: 3 prepared: 0 tx_control: 0 action: QUERY_ACTION_EXECUTE type: QUERY_TYPE_SQL_DDL text: CREATE TABLE `/Root/table1` (key int, value int, PRIMARY KEY (key)); rpcActor: [0:0:0] database: databaseId: /Root pool id: default 2025-06-25T15:02:05.954430Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:584:2505], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:05.954611Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:06.104704Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:02:06.104839Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:02:06.107702Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:06.129253Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:02:06.157225Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:647:2539], Recipient [1:652:2542]: NKikimr::TEvTablet::TEvBoot 2025-06-25T15:02:06.158479Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:647:2539], Recipient [1:652:2542]: NKikimr::TEvTablet::TEvRestored 2025-06-25T15:02:06.158968Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:652:2542] 2025-06-25T15:02:06.159223Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T15:02:06.201403Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:647:2539], Recipient [1:652:2542]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-25T15:02:06.202157Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T15:02:06.202283Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T15:02:06.203985Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-25T15:02:06.204073Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-25T15:02:06.204128Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-25T15:02:06.204541Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T15:02:06.204694Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T15:02:06.204786Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:666:2542] in generation 1 2025-06-25T15:02:06.205217Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T15:02:06.266396Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-25T15:02:06.266641Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T15:02:06.266768Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:668:2551] 2025-06-25T15:02:06.266809Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T15:02:06.266842Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-25T15:02:06.266877Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T15:02:06.267092Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:652:2542], Recipient [1:652:2542]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T15:02:06.267138Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T15:02:06.267372Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-25T15:02:06.267469Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-25T15:02:06.267555Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T15:02:06.267591Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:02:06.267639Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-25T15:02:06.267719Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-25T15:02:06.267773Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-25T15:02:06.267805Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-25T15:02:06.267848Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T15:02:06.300579Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:671:2553], Recipient [1:652:2542]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T15:02:06.300658Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T15:02:06.300729Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:643:2537], serverId# [1:671:2553], sessionId# [0:0:0] 2025-06-25T15:02:06.300852Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:373:2367], Recipient [1:671:2553] 2025-06-25T15:02:06.300889Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-25T15:02:06.301021Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T15:02:06.301230Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-25T15:02:06.301337Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-25T15:02:06.301452Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-25T15:02:06.301500Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-25T15:02:06.301542Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-06-25T15:02:06.301582Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-06-25T15:02:06.301619Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-25T15:02:06.301918Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-06-25T15:02:06.301964Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit StoreSchemeTx 2025-06-25T15:02:06.302020Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit FinishPropose 2025-06-25T15:02:06.302053Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-25T15:02:06.302103Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayComplete 2025-06-25T15:02:06.302152Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] ... d_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-25T15:03:22.858118Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435074, Sender [13:865:2693], Recipient [13:865:2693]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvDelayedProposeTransaction 2025-06-25T15:03:22.858150Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3159: StateWork, processing event TEvPrivate::TEvDelayedProposeTransaction 2025-06-25T15:03:22.858217Z node 13 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T15:03:22.858406Z node 13 :TX_DATASHARD TRACE: key_validator.cpp:54: -- AddWriteRange: (Uint64 : 281474976715661, Uint64 : 72075186224037888, Uint64 : 72057594046644480, Uint64 : 2) table: [1:997:0] 2025-06-25T15:03:22.858509Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715666] at 72075186224037888 on unit CheckDataTx 2025-06-25T15:03:22.858553Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715666] at 72075186224037888 is Executed 2025-06-25T15:03:22.858580Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715666] at 72075186224037888 executing on unit CheckDataTx 2025-06-25T15:03:22.858608Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715666] at 72075186224037888 to execution unit BuildAndWaitDependencies 2025-06-25T15:03:22.858635Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715666] at 72075186224037888 on unit BuildAndWaitDependencies 2025-06-25T15:03:22.858676Z node 13 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 72075186224037888 CompleteEdge# v501/0 IncompleteEdge# v{min} UnprotectedReadEdge# v500/18446744073709551615 ImmediateWriteEdge# v501/18446744073709551615 ImmediateWriteEdgeReplied# v501/18446744073709551615 2025-06-25T15:03:22.858725Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:281474976715666] at 72075186224037888 2025-06-25T15:03:22.858756Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715666] at 72075186224037888 is Executed 2025-06-25T15:03:22.858782Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715666] at 72075186224037888 executing on unit BuildAndWaitDependencies 2025-06-25T15:03:22.858805Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715666] at 72075186224037888 to execution unit ExecuteKqpDataTx 2025-06-25T15:03:22.858843Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715666] at 72075186224037888 on unit ExecuteKqpDataTx 2025-06-25T15:03:22.858940Z node 13 :TX_DATASHARD TRACE: execute_kqp_data_tx_unit.cpp:236: Operation [0:281474976715666] (execute_kqp_data_tx) at 72075186224037888 set memory limit 4193448 2025-06-25T15:03:22.859074Z node 13 :TX_DATASHARD TRACE: datashard_kqp.cpp:787: KqpEraseLock LockId: 281474976715661 DataShard: 72075186224037888 Generation: 1 Counter: 0 SchemeShard: 72057594046644480 PathId: 2 HasWrites: false 2025-06-25T15:03:22.859166Z node 13 :TX_DATASHARD TRACE: execute_kqp_data_tx_unit.cpp:482: add locks to result: 0 2025-06-25T15:03:22.859252Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715666] at 72075186224037888 is Executed 2025-06-25T15:03:22.859280Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715666] at 72075186224037888 executing on unit ExecuteKqpDataTx 2025-06-25T15:03:22.859306Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715666] at 72075186224037888 to execution unit FinishPropose 2025-06-25T15:03:22.859332Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715666] at 72075186224037888 on unit FinishPropose 2025-06-25T15:03:22.859391Z node 13 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715666 at tablet 72075186224037888 send to client, exec latency: 0 ms, propose latency: 0 ms, status: COMPLETE 2025-06-25T15:03:22.859501Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715666] at 72075186224037888 is DelayComplete 2025-06-25T15:03:22.859557Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715666] at 72075186224037888 executing on unit FinishPropose 2025-06-25T15:03:22.859644Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715666] at 72075186224037888 to execution unit CompletedOperations 2025-06-25T15:03:22.859710Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715666] at 72075186224037888 on unit CompletedOperations 2025-06-25T15:03:22.859757Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715666] at 72075186224037888 is Executed 2025-06-25T15:03:22.859800Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715666] at 72075186224037888 executing on unit CompletedOperations 2025-06-25T15:03:22.859838Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:281474976715666] at 72075186224037888 has finished 2025-06-25T15:03:22.859919Z node 13 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-25T15:03:22.859990Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715666] at 72075186224037888 on unit FinishPropose 2025-06-25T15:03:22.860068Z node 13 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T15:03:22.865946Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 275709965, Sender [13:63:2110], Recipient [13:865:2693]: NKikimrLongTxService.TEvLockStatus LockId: 281474976715661 LockNode: 13 Status: STATUS_NOT_FOUND 2025-06-25T15:03:23.080619Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715667. Ctx: { TraceId: 01jykstwfr7y6pbkrr871ktmjt, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=YzdmNDVjNDMtMjFlNDI5MzktZjM5ZDgxOC02NTllNzVlMA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:03:23.082831Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553215, Sender [13:932:2738], Recipient [13:865:2693]: NKikimrTxDataShard.TEvRead ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 ResultFormat: FORMAT_CELLVEC MaxRows: 1001 MaxBytes: 5242880 Reverse: false TotalRowsLimit: 1001 RangesSize: 1 2025-06-25T15:03:23.083045Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2452: TTxReadViaPipeline execute: at tablet# 72075186224037888, FollowerId 0 2025-06-25T15:03:23.083138Z node 13 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 72075186224037888 CompleteEdge# v501/0 IncompleteEdge# v{min} UnprotectedReadEdge# v500/18446744073709551615 ImmediateWriteEdge# v501/18446744073709551615 ImmediateWriteEdgeReplied# v501/18446744073709551615 2025-06-25T15:03:23.083205Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2555: 72075186224037888 changed HEAD read to non-repeatable v501/18446744073709551615 2025-06-25T15:03:23.083302Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:4] at 72075186224037888 on unit CheckRead 2025-06-25T15:03:23.083442Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:4] at 72075186224037888 is Executed 2025-06-25T15:03:23.083515Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:4] at 72075186224037888 executing on unit CheckRead 2025-06-25T15:03:23.083579Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:4] at 72075186224037888 to execution unit BuildAndWaitDependencies 2025-06-25T15:03:23.083636Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:4] at 72075186224037888 on unit BuildAndWaitDependencies 2025-06-25T15:03:23.083687Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:4] at 72075186224037888 2025-06-25T15:03:23.083748Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:4] at 72075186224037888 is Executed 2025-06-25T15:03:23.083778Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:4] at 72075186224037888 executing on unit BuildAndWaitDependencies 2025-06-25T15:03:23.083803Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:4] at 72075186224037888 to execution unit ExecuteRead 2025-06-25T15:03:23.083832Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:4] at 72075186224037888 on unit ExecuteRead 2025-06-25T15:03:23.083980Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1578: 72075186224037888 Execute read# 1, request: { ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 ResultFormat: FORMAT_CELLVEC MaxRows: 1001 MaxBytes: 5242880 Reverse: false TotalRowsLimit: 1001 } 2025-06-25T15:03:23.084277Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2163: 72075186224037888 Complete read# {[13:932:2738], 0} after executionsCount# 1 2025-06-25T15:03:23.084385Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2137: 72075186224037888 read iterator# {[13:932:2738], 0} sends rowCount# 2, bytes# 64, quota rows left# 999, quota bytes left# 5242816, hasUnreadQueries# 0, total queries# 1, firstUnprocessed# 0 2025-06-25T15:03:23.084503Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2188: 72075186224037888 read iterator# {[13:932:2738], 0} finished in read 2025-06-25T15:03:23.084602Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:4] at 72075186224037888 is Executed 2025-06-25T15:03:23.084629Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:4] at 72075186224037888 executing on unit ExecuteRead 2025-06-25T15:03:23.084656Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:4] at 72075186224037888 to execution unit CompletedOperations 2025-06-25T15:03:23.084683Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:4] at 72075186224037888 on unit CompletedOperations 2025-06-25T15:03:23.084730Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:4] at 72075186224037888 is Executed 2025-06-25T15:03:23.084753Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:4] at 72075186224037888 executing on unit CompletedOperations 2025-06-25T15:03:23.084800Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:4] at 72075186224037888 has finished 2025-06-25T15:03:23.084870Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2687: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037888 2025-06-25T15:03:23.085021Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2736: TTxReadViaPipeline(69) Complete: at tablet# 72075186224037888 2025-06-25T15:03:23.086024Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553219, Sender [13:932:2738], Recipient [13:865:2693]: NKikimrTxDataShard.TEvReadCancel ReadId: 0 2025-06-25T15:03:23.086124Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3409: 72075186224037888 ReadCancel: { ReadId: 0 } { items { uint32_value: 1 } items { uint32_value: 11 } }, { items { uint32_value: 2 } items { uint32_value: 22 } } >> KqpExplain::CompoundKeyRange [GOOD] >> DataShardSnapshots::LockedWriteWithAsyncIndex+WithRestart+UseSink [GOOD] >> DataShardSnapshots::LockedWriteWithAsyncIndexAndVolatileCommit+UseSink >> TestKinesisHttpProxy::CreateStreamInIncorrectDb >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadKeyWithContinue-EvWrite [GOOD] >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadKeyWithContinueInvisibleRowSkips+EvWrite >> TTxDataShardMiniKQL::CrossShard_5_AllToAll [GOOD] >> TTxDataShardMiniKQL::CrossShard_6_Local >> DataShardReadIteratorPageFaults::CancelPageFaultedReadThenDropTable [GOOD] >> DataShardReadIteratorPageFaults::LocksNotLostOnPageFault |92.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest >> DataShardSnapshots::LockedWriteWithPendingVolatileCommit+UseSink [GOOD] >> DataShardSnapshots::LockedWriteWithPendingVolatileCommit-UseSink >> DataShardReadIterator::ShouldNotReadFutureMvccFromFollower [GOOD] >> DataShardReadIterator::ShouldProperlyOrderConflictingTransactionsMvcc+UseSink >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_index_4__SYNC-pk_types5-all_types5-index5---SYNC] >> DataShardReadIterator::ShouldReceiveErrorAfterSplit [GOOD] >> DataShardReadIterator::ShouldReceiveErrorAfterSplitWhenExhausted |92.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/select/py3test >> DataShardReadIteratorBatchMode::RangeToInclusive [GOOD] >> DataShardReadIteratorBatchMode::RangeToNonInclusive >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_not_throttling_with_custom_queue_name[std-tables_format_v1] >> DataShardReadIterator::ShouldReadRangeChunk7 [GOOD] >> DataShardReadIterator::ShouldReadRangeChunk100 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpExplain::CompoundKeyRange [GOOD] Test command err: 2025-06-25T15:00:51.416509Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519902123073242628:2076];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:00:51.416619Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001459/r3tmp/tmpn1aMOq/pdisk_1.dat 2025-06-25T15:00:52.093113Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:00:52.113595Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:00:52.113713Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:00:52.121109Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 18504, node 1 2025-06-25T15:00:52.324867Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:00:52.324895Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:00:52.324902Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:00:52.325014Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T15:00:52.435133Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:00:52.442314Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) TClient is connected to server localhost:5234 2025-06-25T15:00:52.796354Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-25T15:00:52.907323Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519902127607822475:2159];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:00:52.907667Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/Database/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T15:00:52.973970Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:00:52.974038Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:00:52.986463Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T15:00:52.988028Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:00:53.130093Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-25T15:00:53.193056Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:00:53.196608Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:00:53.196740Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:00:53.196874Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:00:53.196947Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:00:53.197147Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:00:53.197227Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:00:53.197296Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:00:53.197364Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:00:53.320573Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:00:53.320681Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:00:53.350494Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:00:53.523747Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-25T15:00:53.523825Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-25T15:00:53.540831Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-25T15:00:53.540896Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-25T15:00:53.541116Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-25T15:00:53.541166Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-25T15:00:53.541199Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-25T15:00:53.541234Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-25T15:00:53.541267Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-25T15:00:53.541287Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-25T15:00:53.544813Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:00:53.549540Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-25T15:00:53.577560Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7949: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-25T15:00:53.577612Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7979: ConnectToSA(), pipe client id: [2:7519902131902790258:2276], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-25T15:00:53.588445Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:7519902131902790274:2350] 2025-06-25T15:00:53.589426Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:7519902131902790274:2350], schemeshard id = 72075186224037897 2025-06-25T15:00:53.644067Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:7519902131902790301:2360] 2025-06-25T15:00:53.645290Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-25T15:00:53.651897Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-25T15:00:53.651926Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-25T15:00:53.652215Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-25T15:00:53.659654Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-25T15:00:53.667124Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:53.691347Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-25T15:00:53.691409Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-25T15:00:53.877550Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-25T15:00:53.910939Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:00:53.971984Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-25T15:00:55.266172Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902140253113024:2295], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:00:55.266255Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:00:55.539206Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715659:0, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T15:00:55.840097Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037906;self_id=[2:7519902140492725285:2319];tablet_id=72075186224037906;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T15:00:55.840360Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037906;self_id=[2:7519902140492725285:2319];tablet_id=72075186224037906;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T15:00:55.840681Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037906;self_id=[2:7519902140492725285:2319];tablet_id=72075186224037906;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T15:00:55.840820Z node 2 :TX_COLUMNS ... Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:03:18.774494Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 19185, node 7 2025-06-25T15:03:18.862476Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:03:18.862504Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:03:18.862515Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:03:18.862670Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:29587 TClient is connected to server localhost:29587 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-25T15:03:19.453617Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:03:19.471234Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T15:03:19.554073Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:03:19.736270Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:03:19.793456Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:03:19.878652Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:03:22.785970Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519902773434973451:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:03:22.786097Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:03:22.857621Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:03:22.935385Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:03:23.035365Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:03:23.072139Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:03:23.107353Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:03:23.146943Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:03:23.193598Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:03:23.270228Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519902777729941405:2431], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:03:23.270338Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:03:23.270556Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519902777729941410:2434], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:03:23.274961Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:03:23.287455Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7519902777729941412:2435], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T15:03:23.352249Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7519902777729941463:3424] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:03:23.629326Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[7:7519902756255102880:2245];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:03:23.629403Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; {"Plan":{"Plans":[{"PlanNodeId":4,"Plans":[{"PlanNodeId":3,"Plans":[{"PlanNodeId":2,"Plans":[{"Tables":["Logs"],"PlanNodeId":1,"Operators":[{"Inputs":[{"InternalOperatorId":1}],"Name":"Limit","Limit":"10"},{"Scan":"Parallel","ReadRange":["App (new_app_1)","Ts (49)","Host (null, xyz)"],"E-Size":"0","Name":"TableRangeScan","Inputs":[],"Path":"\/Root\/Logs","E-Rows":"1","Table":"Logs","ReadColumns":["App","Host","Message","Ts"],"E-Cost":"0"}],"Node Type":"Limit-TableRangeScan"}],"Node Type":"UnionAll","PlanNodeType":"Connection"}],"Operators":[{"Inputs":[{"ExternalPlanNodeId":2}],"Name":"Limit","Limit":"10"}],"Node Type":"Limit"}],"Node Type":"ResultSet","PlanNodeType":"ResultSet"}],"Node Type":"Query","Stats":{"ResourcePoolId":"default"},"PlanNodeType":"Query"},"meta":{"version":"0.2","type":"query"},"tables":[{"name":"\/Root\/Logs","reads":[{"lookup_by":["App (new_app_1)","Ts (49)"],"columns":["App","Host","Message","Ts"],"scan_by":["Host (null, xyz)"],"type":"Scan"}]}],"SimplifiedPlan":{"PlanNodeId":0,"Plans":[{"PlanNodeId":1,"Plans":[{"PlanNodeId":2,"Plans":[{"PlanNodeId":4,"Plans":[{"PlanNodeId":5,"Operators":[{"Scan":"Parallel","ReadRange":["App (new_app_1)","Ts (49)","Host (null, xyz)"],"E-Size":"0","Name":"TableRangeScan","Path":"\/Root\/Logs","E-Rows":"1","Table":"Logs","ReadColumns":["App","Host","Message","Ts"],"E-Cost":"0"}],"Node Type":"TableRangeScan"}],"Operators":[{"Name":"Limit","Limit":"10"}],"Node Type":"Limit"}],"Operators":[{"Name":"Limit","Limit":"10"}],"Node Type":"Limit"}],"Node Type":"ResultSet","PlanNodeType":"ResultSet"}],"Node Type":"Query","OptimizerStats":{"EquiJoinsCount":0,"JoinsCount":0},"PlanNodeType":"Query"}} >> DataShardReadIterator::ShouldCommitLocksWhenReadWriteInSeparateTransactions [GOOD] >> DataShardReadIterator::HandlePersistentSnapshotGoneInContinue [GOOD] >> DataShardReadIterator::HandleMvccGoneInContinue [GOOD] >> DataShardReadIterator::ShouldReadFromHeadToMvccWithConflict+UseSink [GOOD] >> DataShardReadIterator::ShouldReadFromHeadToMvccWithConflict-UseSink >> test_s3.py::TestYdbS3TTL::test_s3[table_index_1__ASYNC-pk_types5-all_types5-index5---ASYNC] >> AnalyzeColumnshard::AnalyzeTable [GOOD] >> TestKinesisHttpProxy::CreateStreamInIncorrectDb [GOOD] >> TraverseColumnShard::TraverseColumnTableAggrStatUnavailableNode >> TestKinesisHttpProxy::CreateStreamWithInvalidName >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadRangeRightBorder+EvWrite [GOOD] >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadRangeRightBorder-EvWrite >> test_sql_streaming.py::test[suites-ReadWriteTopicWithSchema-default.txt] [FAIL] >> test_sql_streaming.py::test[suites-WriteTwoTopics-default.txt] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_read_iterator/unittest >> DataShardReadIterator::HandleMvccGoneInContinue [GOOD] Test command err: 2025-06-25T15:02:01.927449Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T15:02:01.927603Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T15:02:01.927658Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000dd0/r3tmp/tmp0ibZr6/pdisk_1.dat 2025-06-25T15:02:02.229797Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T15:02:02.236517Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:02:02.271737Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:02:02.276603Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750863719121084 != 1750863719121088 2025-06-25T15:02:02.324038Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:02:02.324186Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:02:02.335439Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:02:02.419610Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:02.462763Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvBoot 2025-06-25T15:02:02.463810Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvRestored 2025-06-25T15:02:02.464322Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:627:2531] 2025-06-25T15:02:02.464587Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T15:02:02.505283Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-25T15:02:02.505821Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T15:02:02.505915Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T15:02:02.507968Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-25T15:02:02.508046Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-25T15:02:02.508102Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-25T15:02:02.510490Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T15:02:02.510683Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T15:02:02.510769Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:643:2531] in generation 1 2025-06-25T15:02:02.521612Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T15:02:02.552816Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-25T15:02:02.552995Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T15:02:02.553087Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:645:2541] 2025-06-25T15:02:02.553122Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T15:02:02.553159Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-25T15:02:02.553198Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T15:02:02.553391Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:627:2531], Recipient [1:627:2531]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T15:02:02.553439Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T15:02:02.553810Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-25T15:02:02.553897Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-25T15:02:02.553968Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T15:02:02.554029Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:02:02.554068Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-25T15:02:02.554103Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-25T15:02:02.554135Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-25T15:02:02.554167Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-25T15:02:02.554210Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T15:02:02.554584Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:634:2535], Recipient [1:627:2531]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T15:02:02.554645Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T15:02:02.554696Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:623:2528], serverId# [1:634:2535], sessionId# [0:0:0] 2025-06-25T15:02:02.554781Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:373:2367], Recipient [1:634:2535] 2025-06-25T15:02:02.554830Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-25T15:02:02.554946Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T15:02:02.555133Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-25T15:02:02.555188Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-25T15:02:02.555270Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-25T15:02:02.555313Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-25T15:02:02.555370Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-06-25T15:02:02.555412Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-06-25T15:02:02.555444Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-25T15:02:02.555699Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-06-25T15:02:02.555735Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit StoreSchemeTx 2025-06-25T15:02:02.555767Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit FinishPropose 2025-06-25T15:02:02.555801Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-25T15:02:02.555862Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayComplete 2025-06-25T15:02:02.555895Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit FinishPropose 2025-06-25T15:02:02.555930Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit WaitForPlan 2025-06-25T15:02:02.555960Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit WaitForPlan 2025-06-25T15:02:02.555995Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:281474976715657] at 72075186224037888 is not ready to execute on unit WaitForPlan 2025-06-25T15:02:02.557340Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269746185, Sender [1:646:2542], Recipient [1:627:2531]: NKikimr::TEvTxProxySchemeCache::TEvWatchNotifyUpdated 2025-06-25T15:02:02.557396Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T15:02:02.568376Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-25T15:02:02.568450Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-25T15:02:02.568482Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-25T15:02:02.568512Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715657 at tablet 72075186224037888 send to client, exec latency: 0 ms, propose late ... cution unit LoadTxDetails 2025-06-25T15:03:31.052864Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:281474976715665] at 72075186224037889 on unit LoadTxDetails 2025-06-25T15:03:31.052977Z node 14 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037889 loaded tx from db 3500:281474976715665 keys extracted: 0 2025-06-25T15:03:31.053042Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:281474976715665] at 72075186224037889 is Executed 2025-06-25T15:03:31.053075Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:281474976715665] at 72075186224037889 executing on unit LoadTxDetails 2025-06-25T15:03:31.053105Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [3500:281474976715665] at 72075186224037889 to execution unit BuildAndWaitDependencies 2025-06-25T15:03:31.053134Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:281474976715665] at 72075186224037889 on unit BuildAndWaitDependencies 2025-06-25T15:03:31.053175Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:455: Operation [3500:281474976715665] is the new logically complete end at 72075186224037889 2025-06-25T15:03:31.053214Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:461: Operation [3500:281474976715665] is the new logically incomplete end at 72075186224037889 2025-06-25T15:03:31.053251Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [3500:281474976715665] at 72075186224037889 2025-06-25T15:03:31.053296Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:281474976715665] at 72075186224037889 is Executed 2025-06-25T15:03:31.053323Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:281474976715665] at 72075186224037889 executing on unit BuildAndWaitDependencies 2025-06-25T15:03:31.053349Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [3500:281474976715665] at 72075186224037889 to execution unit CreateVolatileSnapshot 2025-06-25T15:03:31.053377Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:281474976715665] at 72075186224037889 on unit CreateVolatileSnapshot 2025-06-25T15:03:31.053477Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:281474976715665] at 72075186224037889 is ExecutedNoMoreRestarts 2025-06-25T15:03:31.053504Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:281474976715665] at 72075186224037889 executing on unit CreateVolatileSnapshot 2025-06-25T15:03:31.053554Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [3500:281474976715665] at 72075186224037889 to execution unit DropVolatileSnapshot 2025-06-25T15:03:31.053591Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:281474976715665] at 72075186224037889 on unit DropVolatileSnapshot 2025-06-25T15:03:31.053646Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:281474976715665] at 72075186224037889 is Executed 2025-06-25T15:03:31.053677Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:281474976715665] at 72075186224037889 executing on unit DropVolatileSnapshot 2025-06-25T15:03:31.053705Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [3500:281474976715665] at 72075186224037889 to execution unit CompleteOperation 2025-06-25T15:03:31.053735Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:281474976715665] at 72075186224037889 on unit CompleteOperation 2025-06-25T15:03:31.053880Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:281474976715665] at 72075186224037889 is DelayComplete 2025-06-25T15:03:31.053912Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:281474976715665] at 72075186224037889 executing on unit CompleteOperation 2025-06-25T15:03:31.053946Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [3500:281474976715665] at 72075186224037889 to execution unit CompletedOperations 2025-06-25T15:03:31.053984Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:281474976715665] at 72075186224037889 on unit CompletedOperations 2025-06-25T15:03:31.054023Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:281474976715665] at 72075186224037889 is Executed 2025-06-25T15:03:31.054047Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:281474976715665] at 72075186224037889 executing on unit CompletedOperations 2025-06-25T15:03:31.054074Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [3500:281474976715665] at 72075186224037889 has finished 2025-06-25T15:03:31.054111Z node 14 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:03:31.054145Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 72075186224037889 2025-06-25T15:03:31.054180Z node 14 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037889 has no attached operations 2025-06-25T15:03:31.054232Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 72075186224037889 2025-06-25T15:03:31.065345Z node 14 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 3500} 2025-06-25T15:03:31.065544Z node 14 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T15:03:31.065639Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [3500:281474976715665] at 72075186224037888 on unit CompleteOperation 2025-06-25T15:03:31.065753Z node 14 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [3500 : 281474976715665] from 72075186224037888 at tablet 72075186224037888 send result to client [14:1022:2808], exec latency: 0 ms, propose latency: 0 ms 2025-06-25T15:03:31.065856Z node 14 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T15:03:31.066316Z node 14 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037889 step# 3500} 2025-06-25T15:03:31.066370Z node 14 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-06-25T15:03:31.066400Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [3500:281474976715665] at 72075186224037889 on unit CompleteOperation 2025-06-25T15:03:31.066454Z node 14 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [3500 : 281474976715665] from 72075186224037889 at tablet 72075186224037889 send result to client [14:1022:2808], exec latency: 0 ms, propose latency: 0 ms 2025-06-25T15:03:31.066508Z node 14 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-25T15:03:31.068218Z node 14 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553215, Sender [14:555:2481], Recipient [14:628:2532]: NKikimrTxDataShard.TEvRead ReadId: 1 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 Columns: 3 Columns: 4 Snapshot { Step: 3500 TxId: 281474976715665 } ResultFormat: FORMAT_ARROW KeysSize: 1 2025-06-25T15:03:31.068437Z node 14 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2452: TTxReadViaPipeline execute: at tablet# 72075186224037888, FollowerId 0 2025-06-25T15:03:31.068557Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:8] at 72075186224037888 on unit CheckRead 2025-06-25T15:03:31.068698Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:8] at 72075186224037888 is Executed 2025-06-25T15:03:31.068772Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:8] at 72075186224037888 executing on unit CheckRead 2025-06-25T15:03:31.068843Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:8] at 72075186224037888 to execution unit BuildAndWaitDependencies 2025-06-25T15:03:31.068902Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:8] at 72075186224037888 on unit BuildAndWaitDependencies 2025-06-25T15:03:31.068955Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:8] at 72075186224037888 2025-06-25T15:03:31.069015Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:8] at 72075186224037888 is Executed 2025-06-25T15:03:31.069047Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:8] at 72075186224037888 executing on unit BuildAndWaitDependencies 2025-06-25T15:03:31.069094Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:8] at 72075186224037888 to execution unit ExecuteRead 2025-06-25T15:03:31.069124Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:8] at 72075186224037888 on unit ExecuteRead 2025-06-25T15:03:31.069290Z node 14 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1578: 72075186224037888 Execute read# 1, request: { ReadId: 1 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 Columns: 3 Columns: 4 Snapshot { Step: 3500 TxId: 281474976715665 } ResultFormat: FORMAT_ARROW } 2025-06-25T15:03:31.069721Z node 14 :TX_DATASHARD TRACE: datashard.cpp:2476: PromoteImmediatePostExecuteEdges at 72075186224037888 promoting UnprotectedReadEdge to v3500/281474976715665 2025-06-25T15:03:31.069814Z node 14 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2163: 72075186224037888 Complete read# {[14:555:2481], 1} after executionsCount# 1 2025-06-25T15:03:31.069901Z node 14 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2137: 72075186224037888 read iterator# {[14:555:2481], 1} sends rowCount# 1, bytes# 16, quota rows left# 18446744073709551614, quota bytes left# 18446744073709551599, hasUnreadQueries# 0, total queries# 1, firstUnprocessed# 0 2025-06-25T15:03:31.070133Z node 14 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2188: 72075186224037888 read iterator# {[14:555:2481], 1} finished in read 2025-06-25T15:03:31.070254Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:8] at 72075186224037888 is Executed 2025-06-25T15:03:31.070287Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:8] at 72075186224037888 executing on unit ExecuteRead 2025-06-25T15:03:31.070313Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:8] at 72075186224037888 to execution unit CompletedOperations 2025-06-25T15:03:31.070342Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:8] at 72075186224037888 on unit CompletedOperations 2025-06-25T15:03:31.070391Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:8] at 72075186224037888 is Executed 2025-06-25T15:03:31.070415Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:8] at 72075186224037888 executing on unit CompletedOperations 2025-06-25T15:03:31.070453Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:8] at 72075186224037888 has finished 2025-06-25T15:03:31.070519Z node 14 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2687: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037888 2025-06-25T15:03:31.070709Z node 14 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2736: TTxReadViaPipeline(69) Complete: at tablet# 72075186224037888 >> TTxDataShardMiniKQL::CrossShard_6_Local [GOOD] >> TTxDataShardMiniKQL::MemoryUsageImmediateHugeTx >> test_auditlog.py::test_dml_requests_arent_logged_when_anonymous >> test_sql_streaming.py::test[suites-ReadTopicWithMetadataWithFilter-default.txt] [FAIL] >> test_sql_streaming.py::test[suites-ReadTopicWithSchema-default.txt] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest >> AnalyzeColumnshard::AnalyzeTable [GOOD] Test command err: 2025-06-25T15:03:18.700901Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:419:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T15:03:18.701272Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T15:03:18.701380Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001feb/r3tmp/tmpdUe2kY/pdisk_1.dat 2025-06-25T15:03:19.039748Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 14894, node 1 2025-06-25T15:03:19.277830Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:03:19.277882Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:03:19.277911Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:03:19.278399Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T15:03:19.284284Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:03:19.389063Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:03:19.389175Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:03:19.406909Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:13703 2025-06-25T15:03:20.071019Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-25T15:03:23.082493Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-25T15:03:23.140086Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:03:23.140213Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:03:23.182037Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T15:03:23.184662Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:03:23.394536Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:03:23.430659Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:23.431364Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:23.431944Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:23.432115Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:23.432596Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:23.432727Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:23.432844Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:23.432917Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:23.432984Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:23.659405Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:03:23.659548Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:03:23.678174Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:03:23.833008Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:03:23.887708Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-25T15:03:23.887820Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-25T15:03:23.933375Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-25T15:03:23.933557Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-25T15:03:23.933777Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-25T15:03:23.933868Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-25T15:03:23.933915Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-25T15:03:23.933968Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-25T15:03:23.934034Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-25T15:03:23.934089Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-25T15:03:23.934587Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-25T15:03:23.958430Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7949: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-25T15:03:23.958535Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7979: ConnectToSA(), pipe client id: [2:1793:2562], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-25T15:03:23.967582Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1808:2573] 2025-06-25T15:03:23.974271Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1849:2589] 2025-06-25T15:03:23.974606Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1849:2589], schemeshard id = 72075186224037897 2025-06-25T15:03:23.983260Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-25T15:03:24.000518Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-25T15:03:24.000578Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-25T15:03:24.000642Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-25T15:03:24.012477Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:03:24.024478Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-25T15:03:24.024622Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-25T15:03:24.244366Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-25T15:03:24.388610Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-25T15:03:24.450935Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-25T15:03:25.009194Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:03:25.260900Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2153:3026], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:03:25.261060Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:03:25.281399Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715659:0, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T15:03:25.413070Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2224:2794];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T15:03:25.413346Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2224:2794];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T15:03:25.413669Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2224:2794];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T15:03:25.413803Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2224:2794];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T15:03:25.413924Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2224:2794];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T15:03:25.414091Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2224:2794];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T15:03:25.414221Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2224:2794];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T15:03:25.414364Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2224:2794];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T15:03:25.414485Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2224:2794];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T15:03:25.414639Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2224:2794];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T15:03:25.414761Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2224:2794];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T15:03:25.452299Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-25T15:03:25.452431Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-25T15:03:25.452571Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-25T15:03:25.452623Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-25T15:03:25.452886Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-25T15:03:25.452944Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-25T15:03:25.453082Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-25T15:03:25.453134Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-25T15:03:25.453192Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-25T15:03:25.453230Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-25T15:03:25.453475Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-25T15:03:25.453520Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-25T15:03:25.453752Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-25T15:03:25.453814Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-25T15:03:25.453948Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-25T15:03:25.453993Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-25T15:03:25.454093Z node 2 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=72075186224037899;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-25T15:03:25.454170Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-25T15:03:25.454227Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-25T15:03:25.454776Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-25T15:03:25.454851Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-25T15:03:25.527742Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2224:2794];ev=NActors::IEventHandle;tablet_id=72075186224037899;tx_id=281474976715659;this=88923096991680;method=TTxController::StartProposeOnExecute;tx_info=281474976715659:TX_KIND_SCHEMA;min=1970;max=18446744073709551615;plan=0;src=[2:1559:2421];cookie=121:2;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=4;result=not_found; 2025-06-25T15:03:25.575378Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=4;result=not_found; 2025-06-25T15:03:25.589268Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037899;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715659; 2025-06-25T15:03:26.404778Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2504:3078], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:03:26.404938Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:03:26.407613Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterColumnTable, opId: 281474976715660:0, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/alter_table.cpp:323) 2025-06-25T15:03:26.541428Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037899;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715660; 2025-06-25T15:03:27.293233Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2602:3123], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:03:27.293386Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:03:27.296699Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterColumnTable, opId: 281474976715661:0, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/alter_table.cpp:323) 2025-06-25T15:03:27.344451Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037899;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715661; 2025-06-25T15:03:28.119832Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2717:3164], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:03:28.120378Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:03:28.124238Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterColumnTable, opId: 281474976715662:0, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/alter_table.cpp:323) 2025-06-25T15:03:28.177702Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037899;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tx_controller.cpp:215;event=finished_tx;tx_id=281474976715662; waiting actualization: 0/0.000015s FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=abstract.cpp:105;event=AbortEmergency;reason=TTxWriteIndex destructor withno CompleteReady flag;prev_reason=; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=manager.cpp:51;message=aborted data locks manager; >> TTxDataShardMiniKQL::MemoryUsageImmediateHugeTx [GOOD] >> test_sql_streaming.py::test[suites-ReadTopic-default.txt] [FAIL] >> test_sql_streaming.py::test[suites-ReadTopicGroupWriteToSolomon-default.txt] >> test_sql_streaming.py::test[suites-GroupByHopListKey-default.txt] [FAIL] >> test_sql_streaming.py::test[suites-GroupByHopNoKey-default.txt] |93.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadKeyWithContinueInvisibleRowSkips+EvWrite [GOOD] >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadKeyWithContinueInvisibleRowSkips-EvWrite ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_minikql/unittest >> TTxDataShardMiniKQL::MemoryUsageImmediateHugeTx [GOOD] Test command err: Leader for TabletID 9437184 is [0:0:0] sender: [1:118:2057] recipient: [1:112:2142] IGNORE Leader for TabletID 9437184 is [0:0:0] sender: [1:118:2057] recipient: [1:112:2142] Leader for TabletID 9437184 is [1:135:2156] sender: [1:138:2057] recipient: [1:112:2142] 2025-06-25T15:02:56.432402Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:02:56.432462Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:02:56.434442Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:112:2142], Recipient [1:135:2156]: NKikimr::TEvTablet::TEvBoot 2025-06-25T15:02:56.450804Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:112:2142], Recipient [1:135:2156]: NKikimr::TEvTablet::TEvRestored 2025-06-25T15:02:56.451334Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 9437184 actor [1:135:2156] 2025-06-25T15:02:56.451597Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T15:02:56.461582Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:112:2142], Recipient [1:135:2156]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-25T15:02:56.505501Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T15:02:56.506048Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T15:02:56.507712Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 9437184 2025-06-25T15:02:56.507777Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 9437184 2025-06-25T15:02:56.507902Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 9437184 2025-06-25T15:02:56.508226Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T15:02:56.508414Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T15:02:56.508478Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 9437184 persisting started state actor id [1:208:2156] in generation 2 Leader for TabletID 9437184 is [1:135:2156] sender: [1:216:2057] recipient: [1:14:2061] 2025-06-25T15:02:56.576958Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T15:02:56.609721Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 9437184 2025-06-25T15:02:56.610710Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 9437184 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T15:02:56.610875Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 9437184, actorId: [1:221:2217] 2025-06-25T15:02:56.610936Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 9437184 2025-06-25T15:02:56.610975Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 9437184, state: WaitScheme 2025-06-25T15:02:56.611008Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:02:56.611225Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:135:2156], Recipient [1:135:2156]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T15:02:56.611275Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T15:02:56.612482Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 9437184 2025-06-25T15:02:56.612590Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 9437184 2025-06-25T15:02:56.612660Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-25T15:02:56.612699Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:02:56.612753Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 9437184 2025-06-25T15:02:56.612801Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437184 has no attached operations 2025-06-25T15:02:56.612861Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 9437184 2025-06-25T15:02:56.612895Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 9437184 TxInFly 0 2025-06-25T15:02:56.612938Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-25T15:02:56.613046Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:217:2214], Recipient [1:135:2156]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T15:02:56.613096Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T15:02:56.613155Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:215:2213], serverId# [1:217:2214], sessionId# [0:0:0] 2025-06-25T15:02:56.617810Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:103:2136], Recipient [1:135:2156]: NKikimrTxDataShard.TEvProposeTransaction TxKind: TX_KIND_SCHEME SourceDeprecated { RawX1: 103 RawX2: 4294969432 } TxBody: "\nI\n\006table1\020\r\032\t\n\003key\030\002 \"\032\014\n\005value\030\200$ 8\032\n\n\004uint\030\002 9(\":\010Z\006\010\000\030\000(\000J\014/Root/table1" TxId: 1 ExecLevel: 0 Flags: 0 SchemeShardId: 4200 ProcessingParams { } 2025-06-25T15:02:56.617874Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-25T15:02:56.617964Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-25T15:02:56.618129Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit CheckSchemeTx 2025-06-25T15:02:56.618188Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 9437184 txId 1 ssId 4200 seqNo 0:0 2025-06-25T15:02:56.618242Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 1 at tablet 9437184 2025-06-25T15:02:56.618307Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is ExecutedNoMoreRestarts 2025-06-25T15:02:56.618353Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit CheckSchemeTx 2025-06-25T15:02:56.618389Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit StoreSchemeTx 2025-06-25T15:02:56.618422Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit StoreSchemeTx 2025-06-25T15:02:56.618706Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayCompleteNoMoreRestarts 2025-06-25T15:02:56.618741Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit StoreSchemeTx 2025-06-25T15:02:56.618775Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit FinishPropose 2025-06-25T15:02:56.618808Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit FinishPropose 2025-06-25T15:02:56.618865Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayComplete 2025-06-25T15:02:56.618916Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit FinishPropose 2025-06-25T15:02:56.618952Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit WaitForPlan 2025-06-25T15:02:56.618989Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit WaitForPlan 2025-06-25T15:02:56.619020Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:1] at 9437184 is not ready to execute on unit WaitForPlan 2025-06-25T15:02:56.631282Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 9437184 2025-06-25T15:02:56.631355Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit StoreSchemeTx 2025-06-25T15:02:56.631390Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit FinishPropose 2025-06-25T15:02:56.631452Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 1 at tablet 9437184 send to client, exec latency: 0 ms, propose latency: 1 ms, status: PREPARED 2025-06-25T15:02:56.633319Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 9437184 not sending time cast registration request in state WaitScheme 2025-06-25T15:02:56.636875Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:227:2223], Recipient [1:135:2156]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T15:02:56.636968Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T15:02:56.637017Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:226:2222], serverId# [1:227:2223], sessionId# [0:0:0] 2025-06-25T15:02:56.637103Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287424, Sender [1:103:2136], Recipient [1:135:2156]: {TEvPlanStep step# 2 MediatorId# 0 TabletID 9437184} 2025-06-25T15:02:56.637144Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3150: StateWork, processing event TEvTxProcessing::TEvPlanStep 2025-06-25T15:02:56.637281Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1790: Trying to execute [2:1] at 9437184 on unit WaitForPlan 2025-06-25T15:02:56.637326Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1805: Execution status for [2:1] at 9437184 is Executed 2025-06-25T15:02:56.637384Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [2:1] at 9437184 executing on unit WaitForPlan 2025-06-25T15:02:56.637432Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [2:1] at 9437184 to execution unit PlanQueue 2025-06-25T15:02:56.646145Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 1 at step 2 at tablet 9437184 { Transactions { TxId: 1 AckTo { RawX1: 103 RawX2: 4294969432 } } Step: 2 MediatorID: 0 TabletID: 9437184 } 2025-06-25T15:02:56.646227Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:02:56.646449Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:135:2156], Recipient [1:135:2156]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T15:02:56.646501Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T15:02:56.646563Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-25T15:02:56.646613Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 1 2025-06-25T15:02:56.646660Z node 1 :TX_DATASHARD TRACE: datashard_pipelin ... ard_impl.h:3132: StateWork, received event# 268830214, Sender [24:292:2273], Recipient [24:238:2229]: NKikimrTabletBase.TEvGetCounters 2025-06-25T15:03:34.077013Z node 24 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269551617, Sender [24:103:2136], Recipient [24:238:2229]: NKikimrTxDataShard.TEvGetShardState Source { RawX1: 103 RawX2: 103079217240 } 2025-06-25T15:03:34.077095Z node 24 :TX_DATASHARD TRACE: datashard_impl.h:3135: StateWork, processing event TEvDataShard::TEvGetShardState 2025-06-25T15:03:34.077456Z node 24 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [24:297:2277], Recipient [24:238:2229]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T15:03:34.077514Z node 24 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T15:03:34.077569Z node 24 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [24:296:2276], serverId# [24:297:2277], sessionId# [0:0:0] 2025-06-25T15:03:34.077798Z node 24 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [24:103:2136], Recipient [24:238:2229]: NKikimrTxDataShard.TEvProposeTransaction TxKind: TX_KIND_DATA SourceDeprecated { RawX1: 103 RawX2: 103079217240 } TxBody: "\032\324\002\037\002\006Arg\005\205\n\205\000\205\004?\000\205\002\202\0047\034MyReads MyWrites\205\004?\000\206\202\024Reply\024Write?\000?\000 AllReads\030MyKeys\014Run4ShardsForRead4ShardsToWrite\005?\000\005?\004?\014\005?\002)\211\006\202\203\005\004\213\002\203\004\205\002\203\004\01057$UpdateRow\000\003?\016 h\020\000\000\000\000\000\000\r\000\000\000\000\000\000\000\013?\022\003?\020T\001\005?\026)\211\n?\024\206\203\004?\024? ?\024\203\004\020Fold\000)\211\002?\"\206? \034Collect\000)\211\006?(? \203\004\203\0024ListFromRange\000\003? \000\003?,\003\022z\003?.\004\007\010\000\n\003?\024\000)\251\000? \002\000\004)\251\000?\024\002\000\002)\211\006?$\203\005@? ?\024\030Invoke\000\003?F\006Add?@?D\001\006\002\014\000\007\016\000\003\005?\010?\014\006\002?\006?R\000\003?\014?\014\037/ \0018\000" TxId: 2 ExecLevel: 0 Flags: 0 2025-06-25T15:03:34.077839Z node 24 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-25T15:03:34.077950Z node 24 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-25T15:03:34.085279Z node 24 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:2] at 9437184 on unit CheckDataTx 2025-06-25T15:03:34.085424Z node 24 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 9437184 is Executed 2025-06-25T15:03:34.085496Z node 24 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:2] at 9437184 executing on unit CheckDataTx 2025-06-25T15:03:34.085552Z node 24 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:2] at 9437184 to execution unit BuildAndWaitDependencies 2025-06-25T15:03:34.085602Z node 24 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:2] at 9437184 on unit BuildAndWaitDependencies 2025-06-25T15:03:34.085676Z node 24 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 9437184 CompleteEdge# v2/1 IncompleteEdge# v{min} UnprotectedReadEdge# v{min} ImmediateWriteEdge# v{min} ImmediateWriteEdgeReplied# v{min} 2025-06-25T15:03:34.085775Z node 24 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:2] at 9437184 2025-06-25T15:03:34.085842Z node 24 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 9437184 is Executed 2025-06-25T15:03:34.085874Z node 24 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:2] at 9437184 executing on unit BuildAndWaitDependencies 2025-06-25T15:03:34.085908Z node 24 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:2] at 9437184 to execution unit ExecuteDataTx 2025-06-25T15:03:34.085943Z node 24 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:2] at 9437184 on unit ExecuteDataTx 2025-06-25T15:03:34.086001Z node 24 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 9437184 CompleteEdge# v2/1 IncompleteEdge# v{min} UnprotectedReadEdge# v{min} ImmediateWriteEdge# v{min} ImmediateWriteEdgeReplied# v{min} 2025-06-25T15:03:34.086068Z node 24 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:124: Operation [0:2] at 9437184 requested 132374 more memory 2025-06-25T15:03:34.086124Z node 24 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 9437184 is Restart 2025-06-25T15:03:34.086544Z node 24 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-25T15:03:34.086606Z node 24 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:2] at 9437184 on unit ExecuteDataTx 2025-06-25T15:03:34.086664Z node 24 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 9437184 CompleteEdge# v2/1 IncompleteEdge# v{min} UnprotectedReadEdge# v{min} ImmediateWriteEdge# v{min} ImmediateWriteEdgeReplied# v{min} 2025-06-25T15:03:34.096886Z node 24 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:175: Operation [0:2] at 9437184 exceeded memory limit 132502 and requests 1060016 more for the next try 2025-06-25T15:03:34.097158Z node 24 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:561: tx 2 released its data 2025-06-25T15:03:34.097228Z node 24 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 9437184 is Restart 2025-06-25T15:03:34.097567Z node 24 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-25T15:03:34.097609Z node 24 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:2] at 9437184 on unit ExecuteDataTx 2025-06-25T15:03:34.098574Z node 24 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:661: tx 2 at 9437184 restored its data 2025-06-25T15:03:34.098638Z node 24 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 9437184 CompleteEdge# v2/1 IncompleteEdge# v{min} UnprotectedReadEdge# v{min} ImmediateWriteEdge# v{min} ImmediateWriteEdgeReplied# v{min} 2025-06-25T15:03:34.099201Z node 24 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:175: Operation [0:2] at 9437184 exceeded memory limit 1192518 and requests 9540144 more for the next try 2025-06-25T15:03:34.099296Z node 24 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:561: tx 2 released its data 2025-06-25T15:03:34.099336Z node 24 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 9437184 is Restart 2025-06-25T15:03:34.099530Z node 24 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-25T15:03:34.099568Z node 24 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:2] at 9437184 on unit ExecuteDataTx 2025-06-25T15:03:34.100129Z node 24 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:661: tx 2 at 9437184 restored its data 2025-06-25T15:03:34.100174Z node 24 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 9437184 CompleteEdge# v2/1 IncompleteEdge# v{min} UnprotectedReadEdge# v{min} ImmediateWriteEdge# v{min} ImmediateWriteEdgeReplied# v{min} 2025-06-25T15:03:34.100989Z node 24 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:175: Operation [0:2] at 9437184 exceeded memory limit 10732662 and requests 85861296 more for the next try 2025-06-25T15:03:34.101093Z node 24 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:561: tx 2 released its data 2025-06-25T15:03:34.101130Z node 24 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 9437184 is Restart 2025-06-25T15:03:34.101290Z node 24 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-25T15:03:34.101337Z node 24 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:2] at 9437184 on unit ExecuteDataTx 2025-06-25T15:03:34.101877Z node 24 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:661: tx 2 at 9437184 restored its data 2025-06-25T15:03:34.101923Z node 24 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 9437184 CompleteEdge# v2/1 IncompleteEdge# v{min} UnprotectedReadEdge# v{min} ImmediateWriteEdge# v{min} ImmediateWriteEdgeReplied# v{min} 2025-06-25T15:03:34.397797Z node 24 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:306: Executed operation [0:2] at tablet 9437184 with status COMPLETE 2025-06-25T15:03:34.397908Z node 24 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:312: Datashard execution counters for [0:2] at 9437184: {NSelectRow: 0, NSelectRange: 0, NUpdateRow: 1, NEraseRow: 0, SelectRowRows: 0, SelectRowBytes: 0, SelectRangeRows: 0, SelectRangeBytes: 0, UpdateRowBytes: 8, EraseRowBytes: 0, SelectRangeDeletedRowSkips: 0, InvisibleRowSkips: 0} 2025-06-25T15:03:34.397991Z node 24 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 9437184 is ExecutedNoMoreRestarts 2025-06-25T15:03:34.398031Z node 24 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:2] at 9437184 executing on unit ExecuteDataTx 2025-06-25T15:03:34.398075Z node 24 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:2] at 9437184 to execution unit FinishPropose 2025-06-25T15:03:34.398119Z node 24 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:2] at 9437184 on unit FinishPropose 2025-06-25T15:03:34.398223Z node 24 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 9437184 is DelayCompleteNoMoreRestarts 2025-06-25T15:03:34.398253Z node 24 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:2] at 9437184 executing on unit FinishPropose 2025-06-25T15:03:34.398292Z node 24 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:2] at 9437184 to execution unit CompletedOperations 2025-06-25T15:03:34.398331Z node 24 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:2] at 9437184 on unit CompletedOperations 2025-06-25T15:03:34.398383Z node 24 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 9437184 is Executed 2025-06-25T15:03:34.398414Z node 24 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:2] at 9437184 executing on unit CompletedOperations 2025-06-25T15:03:34.398451Z node 24 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:2] at 9437184 has finished 2025-06-25T15:03:34.413101Z node 24 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 9437184 2025-06-25T15:03:34.413197Z node 24 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:2] at 9437184 on unit FinishPropose 2025-06-25T15:03:34.413252Z node 24 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 2 at tablet 9437184 send to client, exec latency: 0 ms, propose latency: 1 ms, status: COMPLETE 2025-06-25T15:03:34.413341Z node 24 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:03:34.414449Z node 24 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [24:302:2282], Recipient [24:238:2229]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T15:03:34.414504Z node 24 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T15:03:34.414548Z node 24 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [24:301:2281], serverId# [24:302:2282], sessionId# [0:0:0] 2025-06-25T15:03:34.414652Z node 24 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268830214, Sender [24:300:2280], Recipient [24:238:2229]: NKikimrTabletBase.TEvGetCounters >> test_auditlog.py::test_single_dml_query_logged[upsert] [GOOD] >> DataShardReadIteratorPageFaults::LocksNotLostOnPageFault [GOOD] >> DataShardReadIteratorState::ShouldCalculateQuota [GOOD] |93.0%| [TA] $(B)/ydb/core/tx/datashard/ut_minikql/test-results/unittest/{meta.json ... results_accumulator.log} >> TraverseColumnShard::TraverseColumnTableRebootColumnshard >> DataShardTxOrder::RandomPointsAndRanges [GOOD] |93.0%| [TA] {RESULT} $(B)/ydb/core/tx/datashard/ut_minikql/test-results/unittest/{meta.json ... results_accumulator.log} >> DataShardSnapshots::LockedWriteWithPendingVolatileCommit-UseSink [GOOD] >> DataShardReadIterator::ShouldProperlyOrderConflictingTransactionsMvcc+UseSink [GOOD] >> DataShardReadIterator::ShouldProperlyOrderConflictingTransactionsMvcc-UseSink >> DataShardReadIterator::ShouldReceiveErrorAfterSplitWhenExhausted [GOOD] >> AnalyzeDatashard::DropTableNavigateError >> DataShardReadIteratorBatchMode::RangeToNonInclusive [GOOD] >> DataShardReadIteratorBatchMode::MultipleRanges ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_read_iterator/unittest >> DataShardReadIteratorState::ShouldCalculateQuota [GOOD] Test command err: 2025-06-25T15:02:01.918232Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T15:02:01.918392Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T15:02:01.918469Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000ddd/r3tmp/tmpoiy7zW/pdisk_1.dat 2025-06-25T15:02:02.203137Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T15:02:02.211576Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:02:02.261566Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:02:02.265322Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750863719121103 != 1750863719121107 2025-06-25T15:02:02.314771Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:02:02.314917Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:02:02.326374Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:02:02.421676Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:02.463596Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvBoot 2025-06-25T15:02:02.464447Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvRestored 2025-06-25T15:02:02.464841Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:627:2531] 2025-06-25T15:02:02.465018Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T15:02:02.509761Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-25T15:02:02.510414Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T15:02:02.510560Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T15:02:02.512200Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-25T15:02:02.512290Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-25T15:02:02.512370Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-25T15:02:02.512778Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T15:02:02.512933Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T15:02:02.513019Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:643:2531] in generation 1 2025-06-25T15:02:02.513480Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T15:02:02.534827Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-25T15:02:02.540419Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T15:02:02.540652Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:645:2541] 2025-06-25T15:02:02.540711Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T15:02:02.540765Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-25T15:02:02.540835Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T15:02:02.541121Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:627:2531], Recipient [1:627:2531]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T15:02:02.541194Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T15:02:02.542549Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-25T15:02:02.542688Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-25T15:02:02.542782Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T15:02:02.542875Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:02:02.542926Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-25T15:02:02.542966Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-25T15:02:02.543020Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-25T15:02:02.543091Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-25T15:02:02.543144Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T15:02:02.544756Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:634:2535], Recipient [1:627:2531]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T15:02:02.545459Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T15:02:02.545533Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:623:2528], serverId# [1:634:2535], sessionId# [0:0:0] 2025-06-25T15:02:02.545641Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:373:2367], Recipient [1:634:2535] 2025-06-25T15:02:02.545694Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-25T15:02:02.545841Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T15:02:02.546213Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-25T15:02:02.546286Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-25T15:02:02.546418Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-25T15:02:02.546478Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-25T15:02:02.546515Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-06-25T15:02:02.546546Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-06-25T15:02:02.546586Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-25T15:02:02.546928Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-06-25T15:02:02.546971Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit StoreSchemeTx 2025-06-25T15:02:02.547007Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit FinishPropose 2025-06-25T15:02:02.547056Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-25T15:02:02.547118Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayComplete 2025-06-25T15:02:02.547159Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit FinishPropose 2025-06-25T15:02:02.547209Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit WaitForPlan 2025-06-25T15:02:02.547243Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit WaitForPlan 2025-06-25T15:02:02.547273Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:281474976715657] at 72075186224037888 is not ready to execute on unit WaitForPlan 2025-06-25T15:02:02.548089Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-25T15:02:02.548135Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-25T15:02:02.548179Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-25T15:02:02.548222Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715657 at tablet 72075186224037888 send to client, exec latency: 0 ms, propose latency: 0 ms, status: PREPARED 2025-06-25T15:02:02.548286Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-25T15:02:02.552142Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269746185, Sender [1:646:2542], Recipient [1:627:2531]: NKikimr::TEvTxProxySchemeCache::TEvWatchNotifyUpdated 2025-06-25T15:02:02.552197Z ... tx_id=5; 2025-06-25T15:03:35.585043Z node 14 :TX_DATASHARD INFO: datashard_write_operation.cpp:707: Write transaction 5 at 72075186224037888 has an error: Operation is aborting because locks are not valid 2025-06-25T15:03:35.585166Z node 14 :TX_DATASHARD TRACE: datashard_kqp.cpp:787: KqpEraseLock LockId: 281474976715663 DataShard: 72075186224037888 Generation: 2 Counter: 5 SchemeShard: 72057594046644480 PathId: 2 2025-06-25T15:03:35.585268Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:5] at 72075186224037888 is Executed 2025-06-25T15:03:35.585295Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:5] at 72075186224037888 executing on unit ExecuteWrite 2025-06-25T15:03:35.585321Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:5] at 72075186224037888 to execution unit FinishProposeWrite 2025-06-25T15:03:35.585347Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:5] at 72075186224037888 on unit FinishProposeWrite 2025-06-25T15:03:35.585509Z node 14 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:226: Prepare transaction failed. txid 5 at tablet 72075186224037888 errors: Status: STATUS_LOCKS_BROKEN Issues: { message: "Operation is aborting because locks are not valid" issue_code: 2001 severity: 1 } 2025-06-25T15:03:35.585573Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:5] at 72075186224037888 is DelayComplete 2025-06-25T15:03:35.585608Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:5] at 72075186224037888 executing on unit FinishProposeWrite 2025-06-25T15:03:35.585674Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:5] at 72075186224037888 to execution unit CompletedOperations 2025-06-25T15:03:35.585736Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:5] at 72075186224037888 on unit CompletedOperations 2025-06-25T15:03:35.585791Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:5] at 72075186224037888 is Executed 2025-06-25T15:03:35.585813Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:5] at 72075186224037888 executing on unit CompletedOperations 2025-06-25T15:03:35.585851Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:5] at 72075186224037888 has finished 2025-06-25T15:03:35.585954Z node 14 :TX_DATASHARD TRACE: datashard__write.cpp:150: TTxWrite complete: at tablet# 72075186224037888 2025-06-25T15:03:35.586021Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:5] at 72075186224037888 on unit FinishProposeWrite 2025-06-25T15:03:35.586085Z node 14 :TX_DATASHARD TRACE: finish_propose_write_unit.cpp:163: Propose transaction complete txid 5 at tablet 72075186224037888 send to client, propose latency: 0 ms, status: STATUS_LOCKS_BROKEN 2025-06-25T15:03:35.586242Z node 14 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:168: Errors while proposing transaction txid 5 at tablet 72075186224037888 Status: STATUS_LOCKS_BROKEN Issues: { message: "Operation is aborting because locks are not valid" issue_code: 2001 severity: 1 } 2025-06-25T15:03:35.586347Z node 14 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T15:03:35.586717Z node 14 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:802: SelfId: [14:963:2713], Table: `/Root/table-1` ([72057594046644480:2:1]), SessionActorId: [14:909:2713]Got LOCKS BROKEN for table `/Root/table-1`. ShardID=72075186224037888, Sink=[14:963:2713].{
: Error: Operation is aborting because locks are not valid, code: 2001 } 2025-06-25T15:03:35.586914Z node 14 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:3004: SelfId: [14:956:2713], SessionActorId: [14:909:2713], statusCode=ABORTED. Issue=
: Error: Transaction locks invalidated. Table: `/Root/table-1`., code: 2001
: Error: Operation is aborting because locks are not valid, code: 2001 . sessionActorId=[14:909:2713]. isRollback=0 2025-06-25T15:03:35.587421Z node 14 :KQP_SESSION WARN: kqp_session_actor.cpp:1895: SessionId: ydb://session/3?node_id=14&id=NDA4MGY1MTktMmQ1NDE2LTM4ZmVhYTUwLWFhMzQ0ZmI1, ActorId: [14:909:2713], ActorState: ExecuteState, TraceId: 01jyksv8rp6cc03jr09yn1b1kx, got TEvKqpBuffer::TEvError in ExecuteState, status: ABORTED send to: [14:957:2713] from: [14:956:2713] 2025-06-25T15:03:35.587652Z node 14 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 278003712, Sender [14:956:2713], Recipient [14:832:2660]: NKikimrDataEvents.TEvWrite TxMode: MODE_IMMEDIATE Locks { Locks { LockId: 281474976715663 DataShard: 72075186224037888 Generation: 2 Counter: 5 SchemeShard: 72057594046644480 PathId: 2 } Op: Rollback } 2025-06-25T15:03:35.587685Z node 14 :TX_DATASHARD TRACE: datashard__write.cpp:182: Handle TTxWrite: at tablet# 72075186224037888 2025-06-25T15:03:35.587859Z node 14 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [14:957:2713] TxId: 281474976715665. Ctx: { TraceId: 01jyksv8rp6cc03jr09yn1b1kx, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=14&id=NDA4MGY1MTktMmQ1NDE2LTM4ZmVhYTUwLWFhMzQ0ZmI1, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Transaction locks invalidated. Table: `/Root/table-1`., code: 2001 subissue: {
: Error: Operation is aborting because locks are not valid, code: 2001 } } 2025-06-25T15:03:35.588127Z node 14 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435074, Sender [14:832:2660], Recipient [14:832:2660]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvDelayedProposeTransaction 2025-06-25T15:03:35.588166Z node 14 :TX_DATASHARD TRACE: datashard_impl.h:3159: StateWork, processing event TEvPrivate::TEvDelayedProposeTransaction 2025-06-25T15:03:35.588223Z node 14 :TX_DATASHARD TRACE: datashard__write.cpp:28: TTxWrite:: execute at tablet# 72075186224037888 2025-06-25T15:03:35.588354Z node 14 :TX_DATASHARD TRACE: datashard_write_operation.cpp:64: Parsing write transaction for 0 at 72075186224037888, record: TxMode: MODE_IMMEDIATE Locks { Locks { LockId: 281474976715663 DataShard: 72075186224037888 Generation: 2 Counter: 5 SchemeShard: 72057594046644480 PathId: 2 } Op: Rollback } 2025-06-25T15:03:35.588422Z node 14 :TX_DATASHARD TRACE: key_validator.cpp:54: -- AddWriteRange: (Uint64 : 281474976715663, Uint64 : 72075186224037888, Uint64 : 72057594046644480, Uint64 : 2) table: [1:997:0] 2025-06-25T15:03:35.588485Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:6] at 72075186224037888 on unit CheckWrite 2025-06-25T15:03:35.588520Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:6] at 72075186224037888 is Executed 2025-06-25T15:03:35.588547Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:6] at 72075186224037888 executing on unit CheckWrite 2025-06-25T15:03:35.588576Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:6] at 72075186224037888 to execution unit BuildAndWaitDependencies 2025-06-25T15:03:35.588602Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:6] at 72075186224037888 on unit BuildAndWaitDependencies 2025-06-25T15:03:35.588635Z node 14 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 72075186224037888 CompleteEdge# v1500/0 IncompleteEdge# v{min} UnprotectedReadEdge# v3500/18446744073709551615 ImmediateWriteEdge# v3501/0 ImmediateWriteEdgeReplied# v3501/0 2025-06-25T15:03:35.588680Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:6] at 72075186224037888 2025-06-25T15:03:35.588710Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:6] at 72075186224037888 is Executed 2025-06-25T15:03:35.588734Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:6] at 72075186224037888 executing on unit BuildAndWaitDependencies 2025-06-25T15:03:35.588755Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:6] at 72075186224037888 to execution unit ExecuteWrite 2025-06-25T15:03:35.588779Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:6] at 72075186224037888 on unit ExecuteWrite 2025-06-25T15:03:35.588805Z node 14 :TX_DATASHARD DEBUG: execute_write_unit.cpp:251: Executing write operation for [0:6] at 72075186224037888 2025-06-25T15:03:35.588905Z node 14 :TX_DATASHARD TRACE: datashard_kqp.cpp:787: KqpEraseLock LockId: 281474976715663 DataShard: 72075186224037888 Generation: 2 Counter: 5 SchemeShard: 72057594046644480 PathId: 2 2025-06-25T15:03:35.588978Z node 14 :TX_DATASHARD DEBUG: execute_write_unit.cpp:420: Skip empty write operation for [0:6] at 72075186224037888 2025-06-25T15:03:35.589043Z node 14 :TX_DATASHARD TRACE: execute_write_unit.cpp:47: add locks to result: 0 2025-06-25T15:03:35.589134Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:6] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-25T15:03:35.589171Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:6] at 72075186224037888 executing on unit ExecuteWrite 2025-06-25T15:03:35.589232Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:6] at 72075186224037888 to execution unit FinishProposeWrite 2025-06-25T15:03:35.589300Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:6] at 72075186224037888 on unit FinishProposeWrite 2025-06-25T15:03:35.589332Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:6] at 72075186224037888 is DelayComplete 2025-06-25T15:03:35.589356Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:6] at 72075186224037888 executing on unit FinishProposeWrite 2025-06-25T15:03:35.589381Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:6] at 72075186224037888 to execution unit CompletedOperations 2025-06-25T15:03:35.589407Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:6] at 72075186224037888 on unit CompletedOperations 2025-06-25T15:03:35.589460Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:6] at 72075186224037888 is Executed 2025-06-25T15:03:35.589483Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:6] at 72075186224037888 executing on unit CompletedOperations 2025-06-25T15:03:35.589507Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:6] at 72075186224037888 has finished 2025-06-25T15:03:35.589557Z node 14 :TX_DATASHARD TRACE: datashard__write.cpp:150: TTxWrite complete: at tablet# 72075186224037888 2025-06-25T15:03:35.589584Z node 14 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:6] at 72075186224037888 on unit FinishProposeWrite 2025-06-25T15:03:35.589616Z node 14 :TX_DATASHARD TRACE: finish_propose_write_unit.cpp:163: Propose transaction complete txid 6 at tablet 72075186224037888 send to client, propose latency: 0 ms, status: STATUS_COMPLETED 2025-06-25T15:03:35.589692Z node 14 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T15:03:35.589891Z node 14 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=14&id=NDA4MGY1MTktMmQ1NDE2LTM4ZmVhYTUwLWFhMzQ0ZmI1, ActorId: [14:909:2713], ActorState: ExecuteState, TraceId: 01jyksv8rp6cc03jr09yn1b1kx, Create QueryResponse for error on request, msg: 2025-06-25T15:03:35.591315Z node 14 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 275709965, Sender [14:63:2110], Recipient [14:832:2660]: NKikimrLongTxService.TEvLockStatus LockId: 281474976715663 LockNode: 14 Status: STATUS_NOT_FOUND |93.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> DataShardSnapshots::LockedWriteWithAsyncIndexAndVolatileCommit+UseSink [GOOD] >> DataShardSnapshots::LockedWriteWithAsyncIndexAndVolatileCommit-UseSink >> TestKinesisHttpProxy::CreateStreamWithInvalidName [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_snapshot/unittest >> DataShardSnapshots::LockedWriteWithPendingVolatileCommit-UseSink [GOOD] Test command err: 2025-06-25T15:02:05.026788Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T15:02:05.026930Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T15:02:05.026982Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001baf/r3tmp/tmpMht5h8/pdisk_1.dat 2025-06-25T15:02:05.376541Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T15:02:05.385266Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:02:05.431745Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:02:05.443887Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750863722361808 != 1750863722361812 2025-06-25T15:02:05.489146Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:61:2108] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-25T15:02:05.490035Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 2025-06-25T15:02:05.492367Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:02:05.492476Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:02:05.504998Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:02:05.594204Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:61:2108] Handle TEvProposeTransaction 2025-06-25T15:02:05.594261Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:61:2108] TxId# 281474976715657 ProcessProposeTransaction 2025-06-25T15:02:05.596578Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:61:2108] Cookie# 0 userReqId# "" txid# 281474976715657 SEND to# [1:602:2510] 2025-06-25T15:02:05.700545Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:602:2510] txid# 281474976715657 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateTable CreateTable { Name: "table-1" Columns { Name: "key" Type: "Uint32" FamilyName: "" NotNull: false } Columns { Name: "value" Type: "Uint32" FamilyName: "" NotNull: false } KeyColumnNames: "key" UniformPartitionsCount: 1 } } } ExecTimeoutPeriod: 18446744073709551615 2025-06-25T15:02:05.700697Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:602:2510] txid# 281474976715657 Bootstrap, UserSID: CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-25T15:02:05.708601Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1660: Actor# [1:602:2510] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-06-25T15:02:05.708721Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:602:2510] txid# 281474976715657 TEvNavigateKeySet requested from SchemeCache 2025-06-25T15:02:05.709022Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:602:2510] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-25T15:02:05.709290Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:602:2510] HANDLE EvNavigateKeySetResult, txid# 281474976715657 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-25T15:02:05.709423Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:602:2510] txid# 281474976715657 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715657 TabletId# 72057594046644480} 2025-06-25T15:02:05.709676Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:602:2510] txid# 281474976715657 HANDLE EvClientConnected 2025-06-25T15:02:05.719384Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:05.720767Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [1:602:2510] txid# 281474976715657 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715657} 2025-06-25T15:02:05.720863Z node 1 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [1:602:2510] txid# 281474976715657 SEND to# [1:554:2480] Source {TEvProposeTransactionStatus txid# 281474976715657 Status# 53} 2025-06-25T15:02:05.768692Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvBoot 2025-06-25T15:02:05.769864Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvRestored 2025-06-25T15:02:05.770348Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:627:2531] 2025-06-25T15:02:05.770621Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T15:02:05.824287Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-25T15:02:05.825112Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T15:02:05.825283Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T15:02:05.827225Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-25T15:02:05.827337Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-25T15:02:05.827395Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-25T15:02:05.829536Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T15:02:05.829746Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T15:02:05.829834Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:643:2531] in generation 1 2025-06-25T15:02:05.830233Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T15:02:05.853365Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-25T15:02:05.855078Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T15:02:05.855284Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:645:2541] 2025-06-25T15:02:05.855326Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T15:02:05.855364Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-25T15:02:05.855422Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T15:02:05.855680Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:627:2531], Recipient [1:627:2531]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T15:02:05.855725Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T15:02:05.857887Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-25T15:02:05.858014Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-25T15:02:05.858085Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T15:02:05.858126Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:02:05.858168Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-25T15:02:05.858201Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-25T15:02:05.858246Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-25T15:02:05.858280Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-25T15:02:05.858325Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T15:02:05.858782Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:634:2535], Recipient [1:627:2531]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T15:02:05.860374Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T15:02:05.860461Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:623:2528], serverId# [1:634:2535], sessionId# [0:0:0] 2025-06-25T15:02:05.860585Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:373:2367], Recipient [1:634:2535] 2025-06-25T15:02:05.860624Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-25T15:02:05.860811Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T15:02:05.861144Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-25T15:02:05.861226Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-25T15:02:05.861331Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-25T15:02:05.861378Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-25T15: ... 888 TabletDest# 72075186224037889 SetTabletProducer# 72075186224037888 ReadSet.Size()# 0 Seqno# 0 Flags# 7} 2025-06-25T15:03:36.071021Z node 13 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 72075186224037889 2025-06-25T15:03:36.071105Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287425, Sender [13:714:2592], Recipient [13:627:2531]: {TEvReadSet step# 3049 txid# 281474976715663 TabletSource# 72075186224037889 TabletDest# 72075186224037888 SetTabletProducer# 72075186224037889 ReadSet.Size()# 2 Seqno# 1 Flags# 0} 2025-06-25T15:03:36.071136Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3151: StateWork, processing event TEvTxProcessing::TEvReadSet 2025-06-25T15:03:36.071163Z node 13 :TX_DATASHARD DEBUG: datashard.cpp:3359: Receive RS at 72075186224037888 source 72075186224037889 dest 72075186224037888 producer 72075186224037889 txId 281474976715663 2025-06-25T15:03:36.071205Z node 13 :TX_DATASHARD DEBUG: datashard__readset.cpp:15: TTxReadSet::Execute at 72075186224037888 got read set: {TEvReadSet step# 3049 txid# 281474976715663 TabletSource# 72075186224037889 TabletDest# 72075186224037888 SetTabletProducer# 72075186224037889 ReadSet.Size()# 2 Seqno# 1 Flags# 0} 2025-06-25T15:03:36.071386Z node 13 :TX_DATASHARD DEBUG: datashard.cpp:705: Complete [3049 : 281474976715663] from 72075186224037888 at tablet 72075186224037888 send result to client [13:931:2729], exec latency: 0 ms, propose latency: 0 ms 2025-06-25T15:03:36.071701Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287425, Sender [13:627:2531], Recipient [13:714:2592]: {TEvReadSet step# 3049 txid# 281474976715663 TabletSource# 72075186224037888 TabletDest# 72075186224037889 SetTabletProducer# 72075186224037888 ReadSet.Size()# 2 Seqno# 1 Flags# 0} 2025-06-25T15:03:36.071735Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3151: StateWork, processing event TEvTxProcessing::TEvReadSet 2025-06-25T15:03:36.071762Z node 13 :TX_DATASHARD DEBUG: datashard.cpp:3359: Receive RS at 72075186224037889 source 72075186224037888 dest 72075186224037889 producer 72075186224037888 txId 281474976715663 2025-06-25T15:03:36.071811Z node 13 :TX_DATASHARD DEBUG: datashard__readset.cpp:15: TTxReadSet::Execute at 72075186224037889 got read set: {TEvReadSet step# 3049 txid# 281474976715663 TabletSource# 72075186224037888 TabletDest# 72075186224037889 SetTabletProducer# 72075186224037888 ReadSet.Size()# 2 Seqno# 1 Flags# 0} 2025-06-25T15:03:36.071914Z node 13 :TX_DATASHARD DEBUG: datashard.cpp:705: Complete [3049 : 281474976715663] from 72075186224037889 at tablet 72075186224037889 send result to client [13:931:2729], exec latency: 0 ms, propose latency: 0 ms 2025-06-25T15:03:36.072399Z node 13 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T15:03:36.072771Z node 13 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 TEvProposeTransactionResult: TxKind: TX_KIND_DATA Origin: 72075186224037888 Status: COMPLETE TxId: 281474976715663 TxResult: "" ExecLatency: 0 ProposeLatency: 0 TxStats { PerShardStats { ShardId: 72075186224037888 CpuTimeUsec: 1783 } } ComputeActorStats { Tasks { Tables { TablePath: "/Root/table-1" WriteRows: 1 WriteBytes: 8 } } } CommitVersion { Step: 3049 TxId: 281474976715663 } 2025-06-25T15:03:36.073650Z node 13 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 TEvProposeTransactionResult: TxKind: TX_KIND_DATA Origin: 72075186224037889 Status: COMPLETE TxId: 281474976715663 TxResult: "" ExecLatency: 0 ProposeLatency: 0 TxStats { PerShardStats { ShardId: 72075186224037889 CpuTimeUsec: 935 } } ComputeActorStats { Tasks { Tables { TablePath: "/Root/table-2" WriteRows: 1 WriteBytes: 8 } } } CommitVersion { Step: 3049 TxId: 281474976715663 } 2025-06-25T15:03:36.076085Z node 13 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 72075186224037888 2025-06-25T15:03:36.076272Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [13:627:2531], Recipient [13:714:2592]: {TEvReadSet step# 3049 txid# 281474976715663 TabletSource# 72075186224037889 TabletDest# 72075186224037888 SetTabletConsumer# 72075186224037888 Flags# 0 Seqno# 1} 2025-06-25T15:03:36.076337Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:03:36.076418Z node 13 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 72075186224037889 source 72075186224037889 dest 72075186224037888 consumer 72075186224037888 txId 281474976715663 2025-06-25T15:03:36.083271Z node 13 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 72075186224037889 2025-06-25T15:03:36.083564Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [13:714:2592], Recipient [13:627:2531]: {TEvReadSet step# 3049 txid# 281474976715663 TabletSource# 72075186224037888 TabletDest# 72075186224037889 SetTabletConsumer# 72075186224037889 Flags# 0 Seqno# 1} 2025-06-25T15:03:36.083658Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:03:36.083741Z node 13 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 72075186224037888 source 72075186224037888 dest 72075186224037889 consumer 72075186224037889 txId 281474976715663 2025-06-25T15:03:36.316199Z node 13 :TX_PROXY DEBUG: proxy_impl.cpp:353: actor# [13:61:2108] Handle TEvExecuteKqpTransaction 2025-06-25T15:03:36.316298Z node 13 :TX_PROXY DEBUG: proxy_impl.cpp:342: actor# [13:61:2108] TxId# 281474976715667 ProcessProposeKqpTransaction 2025-06-25T15:03:36.317524Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715667. Ctx: { TraceId: 01jyksv9ckbbv72za5c4f74e59, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=ZDBhZGMzN2EtOGYxNjIxZWQtZjY3NmI1MTYtMmU5OTM5M2I=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root TEvRead: ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 Columns: 3 ResultFormat: FORMAT_CELLVEC MaxRows: 1001 MaxBytes: 5242880 Reverse: false TotalRowsLimit: 1001 2025-06-25T15:03:36.320986Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553215, Sender [13:1041:2835], Recipient [13:627:2531]: NKikimrTxDataShard.TEvRead ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 Columns: 3 ResultFormat: FORMAT_CELLVEC MaxRows: 1001 MaxBytes: 5242880 Reverse: false TotalRowsLimit: 1001 RangesSize: 1 2025-06-25T15:03:36.321258Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2452: TTxReadViaPipeline execute: at tablet# 72075186224037888, FollowerId 0 2025-06-25T15:03:36.321358Z node 13 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 72075186224037888 CompleteEdge# v3049/281474976715663 IncompleteEdge# v{min} UnprotectedReadEdge# v4000/18446744073709551615 ImmediateWriteEdge# v4001/0 ImmediateWriteEdgeReplied# v4001/0 2025-06-25T15:03:36.321451Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2555: 72075186224037888 changed HEAD read to non-repeatable v4001/18446744073709551615 2025-06-25T15:03:36.321569Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:7] at 72075186224037888 on unit CheckRead 2025-06-25T15:03:36.321724Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:7] at 72075186224037888 is Executed 2025-06-25T15:03:36.321803Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:7] at 72075186224037888 executing on unit CheckRead 2025-06-25T15:03:36.321866Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:7] at 72075186224037888 to execution unit BuildAndWaitDependencies 2025-06-25T15:03:36.321926Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:7] at 72075186224037888 on unit BuildAndWaitDependencies 2025-06-25T15:03:36.321989Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:7] at 72075186224037888 2025-06-25T15:03:36.322053Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:7] at 72075186224037888 is Executed 2025-06-25T15:03:36.322084Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:7] at 72075186224037888 executing on unit BuildAndWaitDependencies 2025-06-25T15:03:36.322106Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:7] at 72075186224037888 to execution unit ExecuteRead 2025-06-25T15:03:36.322130Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:7] at 72075186224037888 on unit ExecuteRead 2025-06-25T15:03:36.322295Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1578: 72075186224037888 Execute read# 1, request: { ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 Columns: 3 ResultFormat: FORMAT_CELLVEC MaxRows: 1001 MaxBytes: 5242880 Reverse: false TotalRowsLimit: 1001 } 2025-06-25T15:03:36.322703Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2163: 72075186224037888 Complete read# {[13:1041:2835], 0} after executionsCount# 1 2025-06-25T15:03:36.322790Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2137: 72075186224037888 read iterator# {[13:1041:2835], 0} sends rowCount# 2, bytes# 96, quota rows left# 999, quota bytes left# 5242784, hasUnreadQueries# 0, total queries# 1, firstUnprocessed# 0 2025-06-25T15:03:36.322921Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2188: 72075186224037888 read iterator# {[13:1041:2835], 0} finished in read 2025-06-25T15:03:36.323014Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:7] at 72075186224037888 is Executed 2025-06-25T15:03:36.323043Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:7] at 72075186224037888 executing on unit ExecuteRead 2025-06-25T15:03:36.323073Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:7] at 72075186224037888 to execution unit CompletedOperations 2025-06-25T15:03:36.323100Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:7] at 72075186224037888 on unit CompletedOperations 2025-06-25T15:03:36.323148Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:7] at 72075186224037888 is Executed 2025-06-25T15:03:36.323169Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:7] at 72075186224037888 executing on unit CompletedOperations 2025-06-25T15:03:36.323210Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:7] at 72075186224037888 has finished 2025-06-25T15:03:36.323276Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2687: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037888 2025-06-25T15:03:36.323437Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2736: TTxReadViaPipeline(69) Complete: at tablet# 72075186224037888 2025-06-25T15:03:36.324556Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553219, Sender [13:1041:2835], Recipient [13:627:2531]: NKikimrTxDataShard.TEvReadCancel ReadId: 0 2025-06-25T15:03:36.324637Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3409: 72075186224037888 ReadCancel: { ReadId: 0 } { items { uint32_value: 1 } items { uint32_value: 1 } items { uint32_value: 11 } }, { items { uint32_value: 2 } items { uint32_value: 2 } items { uint32_value: 22 } } ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_read_iterator/unittest >> DataShardReadIterator::ShouldReceiveErrorAfterSplitWhenExhausted [GOOD] Test command err: 2025-06-25T15:02:01.844666Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T15:02:01.844791Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T15:02:01.844838Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000e21/r3tmp/tmp0wRcIb/pdisk_1.dat 2025-06-25T15:02:02.203037Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T15:02:02.211564Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:02:02.256155Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:02:02.264789Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750863719121082 != 1750863719121086 2025-06-25T15:02:02.312442Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:02:02.312571Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:02:02.325360Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:02:02.419615Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:02.469356Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvBoot 2025-06-25T15:02:02.470297Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvRestored 2025-06-25T15:02:02.470733Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:627:2531] 2025-06-25T15:02:02.471003Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T15:02:02.510514Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-25T15:02:02.511336Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T15:02:02.511489Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T15:02:02.512971Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-25T15:02:02.513057Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-25T15:02:02.513116Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-25T15:02:02.513463Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T15:02:02.513568Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T15:02:02.513644Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:643:2531] in generation 1 2025-06-25T15:02:02.524468Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T15:02:02.563127Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-25T15:02:02.563317Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T15:02:02.563420Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:645:2541] 2025-06-25T15:02:02.563459Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T15:02:02.563507Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-25T15:02:02.563566Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T15:02:02.563763Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:627:2531], Recipient [1:627:2531]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T15:02:02.563812Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T15:02:02.564141Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-25T15:02:02.564251Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-25T15:02:02.564343Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T15:02:02.564407Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:02:02.564457Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-25T15:02:02.564502Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-25T15:02:02.564569Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-25T15:02:02.564613Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-25T15:02:02.564663Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T15:02:02.565042Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:634:2535], Recipient [1:627:2531]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T15:02:02.565090Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T15:02:02.565141Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:623:2528], serverId# [1:634:2535], sessionId# [0:0:0] 2025-06-25T15:02:02.565221Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:373:2367], Recipient [1:634:2535] 2025-06-25T15:02:02.565261Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-25T15:02:02.565377Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T15:02:02.565548Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-25T15:02:02.565632Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-25T15:02:02.565705Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-25T15:02:02.565754Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-25T15:02:02.565791Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-06-25T15:02:02.565825Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-06-25T15:02:02.565861Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-25T15:02:02.566133Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-06-25T15:02:02.566170Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit StoreSchemeTx 2025-06-25T15:02:02.566210Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit FinishPropose 2025-06-25T15:02:02.566248Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-25T15:02:02.566316Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayComplete 2025-06-25T15:02:02.566355Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit FinishPropose 2025-06-25T15:02:02.566393Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit WaitForPlan 2025-06-25T15:02:02.566425Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit WaitForPlan 2025-06-25T15:02:02.566448Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:281474976715657] at 72075186224037888 is not ready to execute on unit WaitForPlan 2025-06-25T15:02:02.567773Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269746185, Sender [1:646:2542], Recipient [1:627:2531]: NKikimr::TEvTxProxySchemeCache::TEvWatchNotifyUpdated 2025-06-25T15:02:02.567822Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T15:02:02.578497Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-25T15:02:02.578578Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-25T15:02:02.578613Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-25T15:02:02.578662Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715657 at tablet 72075186224037888 send to client, exec latency: 0 ms, propose late ... .cpp:1250: Change sender created: at tablet: 72075186224037891, actorId: [15:1105:2864] 2025-06-25T15:03:36.841081Z node 15 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037891 2025-06-25T15:03:36.841161Z node 15 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037891 2025-06-25T15:03:36.841236Z node 15 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037891 2025-06-25T15:03:36.841579Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553157, Sender [15:1031:2812], Recipient [15:625:2530]: NKikimrTxDataShard.TEvSplitTransferSnapshotAck TabletId: 72075186224037891 OperationCookie: 281474976715665 2025-06-25T15:03:36.841687Z node 15 :TX_DATASHARD DEBUG: datashard_split_src.cpp:461: 72075186224037888 Received snapshot Ack from dst 72075186224037891 for split OpId 281474976715665 2025-06-25T15:03:36.842148Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [15:1031:2812], Recipient [15:1031:2812]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T15:03:36.842184Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T15:03:36.842488Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877763, Sender [15:1100:2859], Recipient [15:625:2530]: NKikimr::TEvTabletPipe::TEvClientDestroyed { TabletId: 72075186224037891 ClientId: [15:1100:2859] ServerId: [15:1101:2860] } 2025-06-25T15:03:36.842535Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3166: StateWork, processing event TEvTabletPipe::TEvClientDestroyed 2025-06-25T15:03:36.843165Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 270270976, Sender [15:25:2072], Recipient [15:1031:2812]: {TEvRegisterTabletResult TabletId# 72075186224037891 Entry# 3000} 2025-06-25T15:03:36.843221Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3170: StateWork, processing event TEvMediatorTimecast::TEvRegisterTabletResult 2025-06-25T15:03:36.843296Z node 15 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037891 time 3000 2025-06-25T15:03:36.843362Z node 15 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037891 2025-06-25T15:03:36.843675Z node 15 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037891 2025-06-25T15:03:36.843712Z node 15 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037891 active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:03:36.843741Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037891 2025-06-25T15:03:36.843772Z node 15 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037891 has no attached operations 2025-06-25T15:03:36.843799Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037891 2025-06-25T15:03:36.843827Z node 15 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037891 TxInFly 0 2025-06-25T15:03:36.843864Z node 15 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037891 2025-06-25T15:03:36.843930Z node 15 :TX_DATASHARD DEBUG: datashard_split_dst.cpp:304: 72075186224037892 ack snapshot OpId 281474976715665 2025-06-25T15:03:36.844032Z node 15 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state Ready tabletId 72075186224037892 2025-06-25T15:03:36.844118Z node 15 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037892 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-25T15:03:36.844187Z node 15 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186224037892 2025-06-25T15:03:36.844237Z node 15 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037892, actorId: [15:1107:2866] 2025-06-25T15:03:36.844283Z node 15 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037892 2025-06-25T15:03:36.844340Z node 15 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037892 2025-06-25T15:03:36.844368Z node 15 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037892 2025-06-25T15:03:36.844538Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553157, Sender [15:1034:2814], Recipient [15:625:2530]: NKikimrTxDataShard.TEvSplitTransferSnapshotAck TabletId: 72075186224037892 OperationCookie: 281474976715665 2025-06-25T15:03:36.844587Z node 15 :TX_DATASHARD DEBUG: datashard_split_src.cpp:461: 72075186224037888 Received snapshot Ack from dst 72075186224037892 for split OpId 281474976715665 2025-06-25T15:03:36.844868Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877764, Sender [15:1101:2860], Recipient [15:1031:2812]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-25T15:03:36.844919Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3169: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-25T15:03:36.845000Z node 15 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037891, clientId# [15:1100:2859], serverId# [15:1101:2860], sessionId# [0:0:0] 2025-06-25T15:03:36.845086Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [15:1034:2814], Recipient [15:1034:2814]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T15:03:36.845114Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T15:03:36.845352Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877763, Sender [15:1099:2858], Recipient [15:625:2530]: NKikimr::TEvTabletPipe::TEvClientDestroyed { TabletId: 72075186224037892 ClientId: [15:1099:2858] ServerId: [15:1102:2861] } 2025-06-25T15:03:36.845386Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3166: StateWork, processing event TEvTabletPipe::TEvClientDestroyed 2025-06-25T15:03:36.845732Z node 15 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037892 2025-06-25T15:03:36.845768Z node 15 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037892 active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:03:36.845796Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037892 2025-06-25T15:03:36.845824Z node 15 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037892 has no attached operations 2025-06-25T15:03:36.845853Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037892 2025-06-25T15:03:36.845877Z node 15 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037892 TxInFly 0 2025-06-25T15:03:36.845909Z node 15 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037892 2025-06-25T15:03:36.846028Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 270270976, Sender [15:25:2072], Recipient [15:1034:2814]: {TEvRegisterTabletResult TabletId# 72075186224037892 Entry# 3000} 2025-06-25T15:03:36.846061Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3170: StateWork, processing event TEvMediatorTimecast::TEvRegisterTabletResult 2025-06-25T15:03:36.846087Z node 15 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037892 time 3000 2025-06-25T15:03:36.846136Z node 15 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037892 2025-06-25T15:03:36.846419Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 270270978, Sender [15:25:2072], Recipient [15:1031:2812]: NKikimr::TEvMediatorTimecast::TEvSubscribeReadStepResult{ CoordinatorId# 72057594046316545 LastReadStep# 0 NextReadStep# 3000 ReadStep# 3000 } 2025-06-25T15:03:36.846468Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3171: StateWork, processing event TEvMediatorTimecast::TEvSubscribeReadStepResult 2025-06-25T15:03:36.846543Z node 15 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037891 coordinator 72057594046316545 last step 0 next step 3000 2025-06-25T15:03:36.846630Z node 15 :TX_DATASHARD DEBUG: datashard.cpp:2812: CheckMediatorStateRestored at 72075186224037891: waitStep# 3000 readStep# 3000 observedStep# 3000 2025-06-25T15:03:36.846731Z node 15 :TX_DATASHARD TRACE: datashard.cpp:2846: CheckMediatorStateRestored at 72075186224037891 promoting UnprotectedReadEdge to v3000/18446744073709551615 2025-06-25T15:03:36.846869Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877764, Sender [15:1102:2861], Recipient [15:1034:2814]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-25T15:03:36.846905Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3169: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-25T15:03:36.846939Z node 15 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037892, clientId# [15:1099:2858], serverId# [15:1102:2861], sessionId# [0:0:0] 2025-06-25T15:03:36.847392Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 270270978, Sender [15:25:2072], Recipient [15:1034:2814]: NKikimr::TEvMediatorTimecast::TEvSubscribeReadStepResult{ CoordinatorId# 72057594046316545 LastReadStep# 0 NextReadStep# 3000 ReadStep# 3000 } 2025-06-25T15:03:36.847427Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3171: StateWork, processing event TEvMediatorTimecast::TEvSubscribeReadStepResult 2025-06-25T15:03:36.847454Z node 15 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037892 coordinator 72057594046316545 last step 0 next step 3000 2025-06-25T15:03:36.847495Z node 15 :TX_DATASHARD DEBUG: datashard.cpp:2812: CheckMediatorStateRestored at 72075186224037892: waitStep# 3000 readStep# 3000 observedStep# 3000 2025-06-25T15:03:36.847551Z node 15 :TX_DATASHARD TRACE: datashard.cpp:2846: CheckMediatorStateRestored at 72075186224037892 promoting UnprotectedReadEdge to v3000/18446744073709551615 2025-06-25T15:03:36.869631Z node 15 :TX_DATASHARD DEBUG: datashard_split_src.cpp:485: 72075186224037888 ack split to schemeshard 281474976715665 2025-06-25T15:03:36.874585Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553158, Sender [15:373:2367], Recipient [15:633:2535] 2025-06-25T15:03:36.874680Z node 15 :TX_DATASHARD DEBUG: datashard_split_src.cpp:565: Got TEvSplitPartitioningChanged: opId: 281474976715665, at datashard: 72075186224037888, state: SplitSrcWaitForPartitioningChanged 2025-06-25T15:03:36.877513Z node 15 :TX_DATASHARD DEBUG: datashard_split_src.cpp:532: 72075186224037888 ack split partitioning changed to schemeshard 281474976715665 2025-06-25T15:03:36.877646Z node 15 :TX_DATASHARD DEBUG: datashard_loans.cpp:220: 72075186224037888 in PreOffline state HasSharedBobs: 1 SchemaOperations: [ ] OutReadSets count: 0 ChangesQueue size: 0 ChangeExchangeSplit: 1 siblings to be activated: wait to activation from: 2025-06-25T15:03:36.877785Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268828683, Sender [15:618:2525], Recipient [15:625:2530]: NKikimr::TEvTablet::TEvFollowerGcApplied >> DataShardReadIterator::ShouldReadRangeChunk100 [GOOD] >> DataShardSnapshots::VolatileSnapshotRenameTimeout [GOOD] >> DataShardSnapshots::UncommittedWriteRestartDuringCommit >> TraverseDatashard::TraverseTwoTablesTwoServerlessDbs [GOOD] >> TestKinesisHttpProxy::CreateStreamWithDifferentRetentions ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_order/unittest >> DataShardTxOrder::RandomPointsAndRanges [GOOD] Test command err: 2025-06-25T15:01:51.606288Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:01:51.606341Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:01:51.609435Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:112:2142], Recipient [1:135:2156]: NKikimr::TEvTablet::TEvBoot 2025-06-25T15:01:51.625455Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:112:2142], Recipient [1:135:2156]: NKikimr::TEvTablet::TEvRestored 2025-06-25T15:01:51.625950Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 9437184 actor [1:135:2156] 2025-06-25T15:01:51.626206Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T15:01:51.636106Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:112:2142], Recipient [1:135:2156]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-25T15:01:51.679115Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T15:01:51.679306Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T15:01:51.680921Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 9437184 2025-06-25T15:01:51.680994Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 9437184 2025-06-25T15:01:51.681069Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 9437184 2025-06-25T15:01:51.681476Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T15:01:51.681569Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T15:01:51.681624Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 9437184 persisting started state actor id [1:204:2156] in generation 2 2025-06-25T15:01:51.735034Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T15:01:51.775450Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 9437184 2025-06-25T15:01:51.775689Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 9437184 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T15:01:51.775805Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 9437184, actorId: [1:219:2215] 2025-06-25T15:01:51.775856Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 9437184 2025-06-25T15:01:51.775897Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 9437184, state: WaitScheme 2025-06-25T15:01:51.775937Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:01:51.776187Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:135:2156], Recipient [1:135:2156]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:51.776246Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:51.776982Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 9437184 2025-06-25T15:01:51.777105Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 9437184 2025-06-25T15:01:51.777166Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-25T15:01:51.777202Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:01:51.777247Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 9437184 2025-06-25T15:01:51.777282Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437184 has no attached operations 2025-06-25T15:01:51.777330Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 9437184 2025-06-25T15:01:51.777368Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 9437184 TxInFly 0 2025-06-25T15:01:51.777410Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-25T15:01:51.777506Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:215:2212], Recipient [1:135:2156]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T15:01:51.777544Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T15:01:51.777595Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:213:2211], serverId# [1:215:2212], sessionId# [0:0:0] 2025-06-25T15:01:51.780725Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:103:2136], Recipient [1:135:2156]: NKikimrTxDataShard.TEvProposeTransaction TxKind: TX_KIND_SCHEME SourceDeprecated { RawX1: 103 RawX2: 4294969432 } TxBody: "\nK\n\006table1\020\r\032\t\n\003key\030\002 \"\032\014\n\005value\030\200$ 8\032\n\n\004uint\030\002 9(\":\n \000Z\006\010\010\030\001(\000J\014/Root/table1" TxId: 1 ExecLevel: 0 Flags: 0 SchemeShardId: 4200 ProcessingParams { } 2025-06-25T15:01:51.780808Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-25T15:01:51.780885Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-25T15:01:51.781073Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit CheckSchemeTx 2025-06-25T15:01:51.781151Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 9437184 txId 1 ssId 4200 seqNo 0:0 2025-06-25T15:01:51.781206Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 1 at tablet 9437184 2025-06-25T15:01:51.781262Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is ExecutedNoMoreRestarts 2025-06-25T15:01:51.781298Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit CheckSchemeTx 2025-06-25T15:01:51.781331Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit StoreSchemeTx 2025-06-25T15:01:51.781366Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit StoreSchemeTx 2025-06-25T15:01:51.781690Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayCompleteNoMoreRestarts 2025-06-25T15:01:51.781723Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit StoreSchemeTx 2025-06-25T15:01:51.781770Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit FinishPropose 2025-06-25T15:01:51.781809Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit FinishPropose 2025-06-25T15:01:51.781879Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayComplete 2025-06-25T15:01:51.781915Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit FinishPropose 2025-06-25T15:01:51.781957Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit WaitForPlan 2025-06-25T15:01:51.781989Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit WaitForPlan 2025-06-25T15:01:51.782016Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:1] at 9437184 is not ready to execute on unit WaitForPlan 2025-06-25T15:01:51.794539Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 9437184 2025-06-25T15:01:51.794620Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit StoreSchemeTx 2025-06-25T15:01:51.794672Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit FinishPropose 2025-06-25T15:01:51.794721Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 1 at tablet 9437184 send to client, exec latency: 0 ms, propose latency: 1 ms, status: PREPARED 2025-06-25T15:01:51.794788Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 9437184 not sending time cast registration request in state WaitScheme 2025-06-25T15:01:51.795316Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:225:2221], Recipient [1:135:2156]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T15:01:51.795372Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T15:01:51.795430Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:224:2220], serverId# [1:225:2221], sessionId# [0:0:0] 2025-06-25T15:01:51.795575Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287424, Sender [1:103:2136], Recipient [1:135:2156]: {TEvPlanStep step# 1000001 MediatorId# 0 TabletID 9437184} 2025-06-25T15:01:51.795609Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3150: StateWork, processing event TEvTxProcessing::TEvPlanStep 2025-06-25T15:01:51.795763Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1790: Trying to execute [1000001:1] at 9437184 on unit WaitForPlan 2025-06-25T15:01:51.795805Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1805: Execution status for [1000001:1] at 9437184 is Executed 2025-06-25T15:01:51.795841Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000001:1] at 9437184 executing on unit WaitForPlan 2025-06-25T15:01:51.795879Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000001:1] at 9437184 to execution unit PlanQueue 2025-06-25T15:01:51.799893Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 1 at step 1000001 at tablet 9437184 { Transactions { TxId: 1 AckTo { RawX1: 103 RawX2: 4294969432 } } Step: 1000001 MediatorID: 0 TabletID: 9437184 } 2025-06-25T15:01:51.799962Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:01:51.800173Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:135:2156], Recipient [1:135:2156]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:51.800217Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:51.800275Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-25T15:01:51.800331Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 1 2025-06-25T15:01:51.800365Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437184 2025-06-25T15:01:51.800407Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000001:1] in PlanQueue unit at 9437184 2025-06-25T15:01:51.800442Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [100 ... tion plan for [1000004:403] at 9437186 executing on unit LoadAndWaitInRS 2025-06-25T15:03:33.873869Z node 4 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000004:403] at 9437186 to execution unit ExecuteDataTx 2025-06-25T15:03:33.873895Z node 4 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000004:403] at 9437186 on unit ExecuteDataTx 2025-06-25T15:03:33.875045Z node 4 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:306: Executed operation [1000004:403] at tablet 9437186 with status COMPLETE 2025-06-25T15:03:33.875102Z node 4 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:312: Datashard execution counters for [1000004:403] at 9437186: {NSelectRow: 1, NSelectRange: 1, NUpdateRow: 3, NEraseRow: 0, SelectRowRows: 1, SelectRowBytes: 8, SelectRangeRows: 26, SelectRangeBytes: 208, UpdateRowBytes: 21, EraseRowBytes: 0, SelectRangeDeletedRowSkips: 0, InvisibleRowSkips: 0} 2025-06-25T15:03:33.875152Z node 4 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000004:403] at 9437186 is ExecutedNoMoreRestarts 2025-06-25T15:03:33.875180Z node 4 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000004:403] at 9437186 executing on unit ExecuteDataTx 2025-06-25T15:03:33.875208Z node 4 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000004:403] at 9437186 to execution unit CompleteOperation 2025-06-25T15:03:33.875234Z node 4 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000004:403] at 9437186 on unit CompleteOperation 2025-06-25T15:03:33.875486Z node 4 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000004:403] at 9437186 is DelayComplete 2025-06-25T15:03:33.875515Z node 4 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000004:403] at 9437186 executing on unit CompleteOperation 2025-06-25T15:03:33.875542Z node 4 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000004:403] at 9437186 to execution unit CompletedOperations 2025-06-25T15:03:33.875568Z node 4 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000004:403] at 9437186 on unit CompletedOperations 2025-06-25T15:03:33.875601Z node 4 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000004:403] at 9437186 is Executed 2025-06-25T15:03:33.875640Z node 4 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000004:403] at 9437186 executing on unit CompletedOperations 2025-06-25T15:03:33.875666Z node 4 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [1000004:403] at 9437186 has finished 2025-06-25T15:03:33.875694Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437186 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:03:33.875717Z node 4 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437186 2025-06-25T15:03:33.875753Z node 4 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437186 has no attached operations 2025-06-25T15:03:33.875777Z node 4 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 9437186 2025-06-25T15:03:33.876566Z node 4 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [4:237:2228], Recipient [4:237:2228]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T15:03:33.876606Z node 4 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T15:03:33.876653Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-25T15:03:33.876683Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 1 active planned 1 immediate 0 planned 1 2025-06-25T15:03:33.876713Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [1000004:403] at 9437184 for LoadAndWaitInRS 2025-06-25T15:03:33.876751Z node 4 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000004:403] at 9437184 on unit LoadAndWaitInRS 2025-06-25T15:03:33.876777Z node 4 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000004:403] at 9437184 is Executed 2025-06-25T15:03:33.876803Z node 4 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000004:403] at 9437184 executing on unit LoadAndWaitInRS 2025-06-25T15:03:33.876829Z node 4 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000004:403] at 9437184 to execution unit ExecuteDataTx 2025-06-25T15:03:33.876859Z node 4 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000004:403] at 9437184 on unit ExecuteDataTx 2025-06-25T15:03:33.878416Z node 4 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:306: Executed operation [1000004:403] at tablet 9437184 with status COMPLETE 2025-06-25T15:03:33.878470Z node 4 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:312: Datashard execution counters for [1000004:403] at 9437184: {NSelectRow: 1, NSelectRange: 2, NUpdateRow: 2, NEraseRow: 0, SelectRowRows: 0, SelectRowBytes: 0, SelectRangeRows: 82, SelectRangeBytes: 656, UpdateRowBytes: 13, EraseRowBytes: 0, SelectRangeDeletedRowSkips: 0, InvisibleRowSkips: 0} 2025-06-25T15:03:33.878523Z node 4 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000004:403] at 9437184 is ExecutedNoMoreRestarts 2025-06-25T15:03:33.878550Z node 4 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000004:403] at 9437184 executing on unit ExecuteDataTx 2025-06-25T15:03:33.878576Z node 4 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000004:403] at 9437184 to execution unit CompleteOperation 2025-06-25T15:03:33.878603Z node 4 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000004:403] at 9437184 on unit CompleteOperation 2025-06-25T15:03:33.878830Z node 4 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000004:403] at 9437184 is DelayComplete 2025-06-25T15:03:33.878905Z node 4 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000004:403] at 9437184 executing on unit CompleteOperation 2025-06-25T15:03:33.878933Z node 4 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000004:403] at 9437184 to execution unit CompletedOperations 2025-06-25T15:03:33.878958Z node 4 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000004:403] at 9437184 on unit CompletedOperations 2025-06-25T15:03:33.878992Z node 4 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000004:403] at 9437184 is Executed 2025-06-25T15:03:33.879015Z node 4 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000004:403] at 9437184 executing on unit CompletedOperations 2025-06-25T15:03:33.879039Z node 4 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [1000004:403] at 9437184 has finished 2025-06-25T15:03:33.879066Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:03:33.879089Z node 4 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437184 2025-06-25T15:03:33.879115Z node 4 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437184 has no attached operations 2025-06-25T15:03:33.879140Z node 4 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 9437184 2025-06-25T15:03:33.902841Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-25T15:03:33.902894Z node 4 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000004:403] at 9437184 on unit CompleteOperation 2025-06-25T15:03:33.902941Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000004 : 403] from 9437184 at tablet 9437184 send result to client [4:103:2136], exec latency: 1 ms, propose latency: 3 ms 2025-06-25T15:03:33.902998Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:563: Send delayed Ack RS Ack at 9437184 {TEvReadSet step# 1000004 txid# 403 TabletSource# 9437186 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 400} 2025-06-25T15:03:33.903051Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:03:33.903332Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437185 2025-06-25T15:03:33.903359Z node 4 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000004:403] at 9437185 on unit CompleteOperation 2025-06-25T15:03:33.903396Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000004 : 403] from 9437185 at tablet 9437185 send result to client [4:103:2136], exec latency: 1 ms, propose latency: 3 ms 2025-06-25T15:03:33.903433Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:563: Send delayed Ack RS Ack at 9437185 {TEvReadSet step# 1000004 txid# 403 TabletSource# 9437184 TabletDest# 9437185 SetTabletConsumer# 9437185 Flags# 0 Seqno# 400} 2025-06-25T15:03:33.903453Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437185 2025-06-25T15:03:33.903622Z node 4 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [4:237:2228], Recipient [4:459:2398]: {TEvReadSet step# 1000004 txid# 403 TabletSource# 9437186 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 400} 2025-06-25T15:03:33.903653Z node 4 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:03:33.903681Z node 4 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437186 source 9437186 dest 9437184 consumer 9437184 txId 403 2025-06-25T15:03:33.903953Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437186 2025-06-25T15:03:33.903979Z node 4 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000004:403] at 9437186 on unit CompleteOperation 2025-06-25T15:03:33.904012Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000004 : 403] from 9437186 at tablet 9437186 send result to client [4:103:2136], exec latency: 1 ms, propose latency: 3 ms 2025-06-25T15:03:33.904049Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:563: Send delayed Ack RS Ack at 9437186 {TEvReadSet step# 1000004 txid# 403 TabletSource# 9437185 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 400} 2025-06-25T15:03:33.904103Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437186 2025-06-25T15:03:33.904406Z node 4 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [4:349:2314], Recipient [4:237:2228]: {TEvReadSet step# 1000004 txid# 403 TabletSource# 9437184 TabletDest# 9437185 SetTabletConsumer# 9437185 Flags# 0 Seqno# 400} 2025-06-25T15:03:33.904434Z node 4 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:03:33.904456Z node 4 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437184 source 9437184 dest 9437185 consumer 9437185 txId 403 2025-06-25T15:03:33.904569Z node 4 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [4:459:2398], Recipient [4:349:2314]: {TEvReadSet step# 1000004 txid# 403 TabletSource# 9437185 TabletDest# 9437186 SetTabletConsumer# 9437186 Flags# 0 Seqno# 400} 2025-06-25T15:03:33.904619Z node 4 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:03:33.904651Z node 4 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437186 consumer 9437186 txId 403 >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_index_4__SYNC-pk_types5-all_types5-index5---SYNC] [GOOD] >> TColumnShardTestSchema::ColdCompactionSmoke [GOOD] |93.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_read_iterator/unittest >> DataShardReadIterator::ShouldReadRangeChunk100 [GOOD] Test command err: 2025-06-25T15:02:01.876624Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T15:02:01.876778Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T15:02:01.876825Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000deb/r3tmp/tmpCttudV/pdisk_1.dat 2025-06-25T15:02:02.203081Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T15:02:02.211711Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:02:02.253648Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:02:02.267200Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750863719121086 != 1750863719121090 2025-06-25T15:02:02.316339Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:02:02.316492Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:02:02.327904Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:02:02.419590Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:02.465760Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvBoot 2025-06-25T15:02:02.466836Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvRestored 2025-06-25T15:02:02.467270Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:627:2531] 2025-06-25T15:02:02.467495Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T15:02:02.513925Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-25T15:02:02.514636Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T15:02:02.514761Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T15:02:02.516413Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-25T15:02:02.516469Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-25T15:02:02.516527Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-25T15:02:02.516842Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T15:02:02.516948Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T15:02:02.517029Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:643:2531] in generation 1 2025-06-25T15:02:02.527743Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T15:02:02.562191Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-25T15:02:02.562424Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T15:02:02.562544Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:645:2541] 2025-06-25T15:02:02.562613Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T15:02:02.562650Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-25T15:02:02.562693Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T15:02:02.562920Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:627:2531], Recipient [1:627:2531]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T15:02:02.562972Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T15:02:02.563344Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-25T15:02:02.563445Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-25T15:02:02.563520Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T15:02:02.563588Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:02:02.563639Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-25T15:02:02.563676Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-25T15:02:02.563710Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-25T15:02:02.563742Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-25T15:02:02.563792Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T15:02:02.564249Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:634:2535], Recipient [1:627:2531]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T15:02:02.564548Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T15:02:02.564613Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:623:2528], serverId# [1:634:2535], sessionId# [0:0:0] 2025-06-25T15:02:02.564731Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:373:2367], Recipient [1:634:2535] 2025-06-25T15:02:02.564781Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-25T15:02:02.564932Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T15:02:02.565151Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-25T15:02:02.565224Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-25T15:02:02.565310Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-25T15:02:02.565388Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-25T15:02:02.565429Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-06-25T15:02:02.565462Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-06-25T15:02:02.565497Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-25T15:02:02.565804Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-06-25T15:02:02.565851Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit StoreSchemeTx 2025-06-25T15:02:02.565890Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit FinishPropose 2025-06-25T15:02:02.565928Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-25T15:02:02.565989Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayComplete 2025-06-25T15:02:02.566023Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit FinishPropose 2025-06-25T15:02:02.566059Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit WaitForPlan 2025-06-25T15:02:02.566108Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit WaitForPlan 2025-06-25T15:02:02.566138Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:281474976715657] at 72075186224037888 is not ready to execute on unit WaitForPlan 2025-06-25T15:02:02.567596Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269746185, Sender [1:646:2542], Recipient [1:627:2531]: NKikimr::TEvTxProxySchemeCache::TEvWatchNotifyUpdated 2025-06-25T15:02:02.567647Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T15:02:02.578433Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-25T15:02:02.578513Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-25T15:02:02.578549Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-25T15:02:02.578606Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715657 at tablet 72075186224037888 send to client, exec latency: 0 ms, propose late ... 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3079: 72075186224037890 readContinue iterator# {[15:554:2480], 1} sends rowCount# 99, bytes# 6336, quota rows left# 18446744073709542804, quota bytes left# 18446744073708987711, hasUnreadQueries# 1, total queries# 1, firstUnprocessed# 0 2025-06-25T15:03:38.419162Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553217, Sender [15:936:2738], Recipient [15:936:2738]: NKikimr::TEvDataShard::TEvReadContinue 2025-06-25T15:03:38.419195Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2836: 72075186224037890 ReadContinue for iterator# {[15:554:2480], 1}, firstUnprocessedQuery# 0 2025-06-25T15:03:38.419231Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2929: 72075186224037890 ReadContinue: iterator# {[15:554:2480], 1}, FirstUnprocessedQuery# 0 2025-06-25T15:03:38.419454Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3079: 72075186224037890 readContinue iterator# {[15:554:2480], 1} sends rowCount# 99, bytes# 6336, quota rows left# 18446744073709542705, quota bytes left# 18446744073708981375, hasUnreadQueries# 1, total queries# 1, firstUnprocessed# 0 2025-06-25T15:03:38.419540Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553217, Sender [15:936:2738], Recipient [15:936:2738]: NKikimr::TEvDataShard::TEvReadContinue 2025-06-25T15:03:38.419579Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2836: 72075186224037890 ReadContinue for iterator# {[15:554:2480], 1}, firstUnprocessedQuery# 0 2025-06-25T15:03:38.419616Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2929: 72075186224037890 ReadContinue: iterator# {[15:554:2480], 1}, FirstUnprocessedQuery# 0 2025-06-25T15:03:38.419887Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3079: 72075186224037890 readContinue iterator# {[15:554:2480], 1} sends rowCount# 99, bytes# 6336, quota rows left# 18446744073709542606, quota bytes left# 18446744073708975039, hasUnreadQueries# 1, total queries# 1, firstUnprocessed# 0 2025-06-25T15:03:38.420031Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553217, Sender [15:936:2738], Recipient [15:936:2738]: NKikimr::TEvDataShard::TEvReadContinue 2025-06-25T15:03:38.420071Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2836: 72075186224037890 ReadContinue for iterator# {[15:554:2480], 1}, firstUnprocessedQuery# 0 2025-06-25T15:03:38.420100Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2929: 72075186224037890 ReadContinue: iterator# {[15:554:2480], 1}, FirstUnprocessedQuery# 0 2025-06-25T15:03:38.420360Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3079: 72075186224037890 readContinue iterator# {[15:554:2480], 1} sends rowCount# 99, bytes# 6336, quota rows left# 18446744073709542507, quota bytes left# 18446744073708968703, hasUnreadQueries# 1, total queries# 1, firstUnprocessed# 0 2025-06-25T15:03:38.420483Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553217, Sender [15:936:2738], Recipient [15:936:2738]: NKikimr::TEvDataShard::TEvReadContinue 2025-06-25T15:03:38.420524Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2836: 72075186224037890 ReadContinue for iterator# {[15:554:2480], 1}, firstUnprocessedQuery# 0 2025-06-25T15:03:38.420554Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2929: 72075186224037890 ReadContinue: iterator# {[15:554:2480], 1}, FirstUnprocessedQuery# 0 2025-06-25T15:03:38.420849Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3079: 72075186224037890 readContinue iterator# {[15:554:2480], 1} sends rowCount# 99, bytes# 6336, quota rows left# 18446744073709542408, quota bytes left# 18446744073708962367, hasUnreadQueries# 1, total queries# 1, firstUnprocessed# 0 2025-06-25T15:03:38.420977Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553217, Sender [15:936:2738], Recipient [15:936:2738]: NKikimr::TEvDataShard::TEvReadContinue 2025-06-25T15:03:38.421023Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2836: 72075186224037890 ReadContinue for iterator# {[15:554:2480], 1}, firstUnprocessedQuery# 0 2025-06-25T15:03:38.421062Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2929: 72075186224037890 ReadContinue: iterator# {[15:554:2480], 1}, FirstUnprocessedQuery# 0 2025-06-25T15:03:38.421305Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3079: 72075186224037890 readContinue iterator# {[15:554:2480], 1} sends rowCount# 99, bytes# 6336, quota rows left# 18446744073709542309, quota bytes left# 18446744073708956031, hasUnreadQueries# 1, total queries# 1, firstUnprocessed# 0 2025-06-25T15:03:38.421441Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553217, Sender [15:936:2738], Recipient [15:936:2738]: NKikimr::TEvDataShard::TEvReadContinue 2025-06-25T15:03:38.421486Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2836: 72075186224037890 ReadContinue for iterator# {[15:554:2480], 1}, firstUnprocessedQuery# 0 2025-06-25T15:03:38.421518Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2929: 72075186224037890 ReadContinue: iterator# {[15:554:2480], 1}, FirstUnprocessedQuery# 0 2025-06-25T15:03:38.421763Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3079: 72075186224037890 readContinue iterator# {[15:554:2480], 1} sends rowCount# 99, bytes# 6336, quota rows left# 18446744073709542210, quota bytes left# 18446744073708949695, hasUnreadQueries# 1, total queries# 1, firstUnprocessed# 0 2025-06-25T15:03:38.421863Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553217, Sender [15:936:2738], Recipient [15:936:2738]: NKikimr::TEvDataShard::TEvReadContinue 2025-06-25T15:03:38.421897Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2836: 72075186224037890 ReadContinue for iterator# {[15:554:2480], 1}, firstUnprocessedQuery# 0 2025-06-25T15:03:38.421925Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2929: 72075186224037890 ReadContinue: iterator# {[15:554:2480], 1}, FirstUnprocessedQuery# 0 2025-06-25T15:03:38.422149Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3079: 72075186224037890 readContinue iterator# {[15:554:2480], 1} sends rowCount# 99, bytes# 6336, quota rows left# 18446744073709542111, quota bytes left# 18446744073708943359, hasUnreadQueries# 1, total queries# 1, firstUnprocessed# 0 2025-06-25T15:03:38.422267Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553217, Sender [15:936:2738], Recipient [15:936:2738]: NKikimr::TEvDataShard::TEvReadContinue 2025-06-25T15:03:38.422312Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2836: 72075186224037890 ReadContinue for iterator# {[15:554:2480], 1}, firstUnprocessedQuery# 0 2025-06-25T15:03:38.422352Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2929: 72075186224037890 ReadContinue: iterator# {[15:554:2480], 1}, FirstUnprocessedQuery# 0 2025-06-25T15:03:38.422616Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3079: 72075186224037890 readContinue iterator# {[15:554:2480], 1} sends rowCount# 99, bytes# 6336, quota rows left# 18446744073709542012, quota bytes left# 18446744073708937023, hasUnreadQueries# 1, total queries# 1, firstUnprocessed# 0 2025-06-25T15:03:38.422723Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553217, Sender [15:936:2738], Recipient [15:936:2738]: NKikimr::TEvDataShard::TEvReadContinue 2025-06-25T15:03:38.422755Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2836: 72075186224037890 ReadContinue for iterator# {[15:554:2480], 1}, firstUnprocessedQuery# 0 2025-06-25T15:03:38.422797Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2929: 72075186224037890 ReadContinue: iterator# {[15:554:2480], 1}, FirstUnprocessedQuery# 0 2025-06-25T15:03:38.423046Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3079: 72075186224037890 readContinue iterator# {[15:554:2480], 1} sends rowCount# 99, bytes# 6336, quota rows left# 18446744073709541913, quota bytes left# 18446744073708930687, hasUnreadQueries# 1, total queries# 1, firstUnprocessed# 0 2025-06-25T15:03:38.423141Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553217, Sender [15:936:2738], Recipient [15:936:2738]: NKikimr::TEvDataShard::TEvReadContinue 2025-06-25T15:03:38.423174Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2836: 72075186224037890 ReadContinue for iterator# {[15:554:2480], 1}, firstUnprocessedQuery# 0 2025-06-25T15:03:38.423201Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2929: 72075186224037890 ReadContinue: iterator# {[15:554:2480], 1}, FirstUnprocessedQuery# 0 2025-06-25T15:03:38.423469Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3079: 72075186224037890 readContinue iterator# {[15:554:2480], 1} sends rowCount# 99, bytes# 6336, quota rows left# 18446744073709541814, quota bytes left# 18446744073708924351, hasUnreadQueries# 1, total queries# 1, firstUnprocessed# 0 2025-06-25T15:03:38.423604Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553217, Sender [15:936:2738], Recipient [15:936:2738]: NKikimr::TEvDataShard::TEvReadContinue 2025-06-25T15:03:38.423652Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2836: 72075186224037890 ReadContinue for iterator# {[15:554:2480], 1}, firstUnprocessedQuery# 0 2025-06-25T15:03:38.423687Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2929: 72075186224037890 ReadContinue: iterator# {[15:554:2480], 1}, FirstUnprocessedQuery# 0 2025-06-25T15:03:38.423994Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3079: 72075186224037890 readContinue iterator# {[15:554:2480], 1} sends rowCount# 99, bytes# 6336, quota rows left# 18446744073709541715, quota bytes left# 18446744073708918015, hasUnreadQueries# 1, total queries# 1, firstUnprocessed# 0 2025-06-25T15:03:38.424164Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553217, Sender [15:936:2738], Recipient [15:936:2738]: NKikimr::TEvDataShard::TEvReadContinue 2025-06-25T15:03:38.424210Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2836: 72075186224037890 ReadContinue for iterator# {[15:554:2480], 1}, firstUnprocessedQuery# 0 2025-06-25T15:03:38.424238Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2929: 72075186224037890 ReadContinue: iterator# {[15:554:2480], 1}, FirstUnprocessedQuery# 0 2025-06-25T15:03:38.424524Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3079: 72075186224037890 readContinue iterator# {[15:554:2480], 1} sends rowCount# 99, bytes# 6336, quota rows left# 18446744073709541616, quota bytes left# 18446744073708911679, hasUnreadQueries# 1, total queries# 1, firstUnprocessed# 0 2025-06-25T15:03:38.424650Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553217, Sender [15:936:2738], Recipient [15:936:2738]: NKikimr::TEvDataShard::TEvReadContinue 2025-06-25T15:03:38.424684Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2836: 72075186224037890 ReadContinue for iterator# {[15:554:2480], 1}, firstUnprocessedQuery# 0 2025-06-25T15:03:38.424711Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2929: 72075186224037890 ReadContinue: iterator# {[15:554:2480], 1}, FirstUnprocessedQuery# 0 2025-06-25T15:03:38.424820Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3079: 72075186224037890 readContinue iterator# {[15:554:2480], 1} sends rowCount# 1, bytes# 64, quota rows left# 18446744073709541615, quota bytes left# 18446744073708911615, hasUnreadQueries# 0, total queries# 1, firstUnprocessed# 0 2025-06-25T15:03:38.424890Z node 15 :TX_DATASHARD DEBUG: datashard__read_iterator.cpp:3103: 72075186224037890 read iterator# {[15:554:2480], 1} finished in ReadContinue ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest >> TraverseDatashard::TraverseTwoTablesTwoServerlessDbs [GOOD] Test command err: 2025-06-25T15:03:26.105443Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:419:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T15:03:26.105858Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T15:03:26.105975Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001fc6/r3tmp/tmpakVjuO/pdisk_1.dat 2025-06-25T15:03:26.443842Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 7257, node 1 2025-06-25T15:03:26.672365Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:03:26.672422Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:03:26.672449Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:03:26.672897Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T15:03:26.679010Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:03:26.774502Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:03:26.774631Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:03:26.789838Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:5573 2025-06-25T15:03:27.346078Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-25T15:03:30.369318Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-25T15:03:30.409030Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:03:30.409153Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:03:30.449707Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T15:03:30.451962Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:03:30.655054Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:03:30.692630Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:30.693252Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:30.693803Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:30.693947Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:30.694188Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:30.694302Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:30.694409Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:30.694477Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:30.694539Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:30.890704Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:03:30.890820Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:03:30.905794Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:03:31.084242Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:03:31.140741Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-25T15:03:31.140849Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-25T15:03:31.194776Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-25T15:03:31.194987Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-25T15:03:31.195198Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-25T15:03:31.195279Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-25T15:03:31.195341Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-25T15:03:31.195394Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-25T15:03:31.195467Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-25T15:03:31.195528Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-25T15:03:31.196075Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-25T15:03:31.221079Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7949: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-25T15:03:31.221181Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7979: ConnectToSA(), pipe client id: [2:1793:2562], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-25T15:03:31.240740Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1808:2573] 2025-06-25T15:03:31.249825Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1849:2589] 2025-06-25T15:03:31.250157Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1849:2589], schemeshard id = 72075186224037897 2025-06-25T15:03:31.259739Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Shared 2025-06-25T15:03:31.281604Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-25T15:03:31.281674Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-25T15:03:31.281753Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Shared/.metadata/_statistics 2025-06-25T15:03:31.293323Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:03:31.300219Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-25T15:03:31.300357Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-25T15:03:31.501811Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-25T15:03:31.654284Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-25T15:03:31.760561Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-25T15:03:32.300400Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:03:32.331835Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-25T15:03:32.962064Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:03:33.155674Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7894: Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult, at schemeshard: 72075186224037899 2025-06-25T15:03:33.155737Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7910: Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult, StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037899 2025-06-25T15:03:33.155857Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7979: ConnectToSA(), pipe client id: [2:2504:2905], at schemeshard: 72075186224037899, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037899 2025-06-25T15:03:33.157355Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:2506:2907] 2025-06-25T15:03:33.157670Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:2506:2907], schemeshard id = 72075186224037899 2025-06-25T15:03:34.063539Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-25T15:03:34.649497Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:03:34.929415Z node 2 :STATISTICS DEBUG: schemeshard_impl.cp ... ode 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7979: ConnectToSA(), pipe client id: [2:2981:3103], at schemeshard: 72075186224037905, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037905 2025-06-25T15:03:34.930609Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:2983:3105] 2025-06-25T15:03:34.930822Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:2983:3105], schemeshard id = 72075186224037905 2025-06-25T15:03:36.032215Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:3104:3360], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:03:36.032417Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:03:36.051417Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72075186224037899, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:03:36.570256Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:3413:3408], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:03:36.570707Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:03:36.572747Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [1:3418:3412]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T15:03:36.573077Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-25T15:03:36.573277Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 18446744073709551615 ] 2025-06-25T15:03:36.573350Z node 1 :STATISTICS DEBUG: service_impl.cpp:1219: ConnectToSA(), pipe client id = [1:3421:3415] 2025-06-25T15:03:36.573413Z node 1 :STATISTICS DEBUG: service_impl.cpp:1248: SyncNode(), pipe client id = [1:3421:3415] 2025-06-25T15:03:36.574233Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:3422:3340] 2025-06-25T15:03:36.574555Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:3421:3415], server id = [2:3422:3340], tablet id = 72075186224037894, status = OK 2025-06-25T15:03:36.574812Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:133: [72075186224037894] EvConnectNode, pipe server id = [2:3422:3340], node id = 1, have schemeshards count = 0, need schemeshards count = 1 2025-06-25T15:03:36.574906Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:314: [72075186224037894] SendStatisticsToNode(), node id = 1, schemeshard count = 1 2025-06-25T15:03:36.575177Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-06-25T15:03:36.575244Z node 1 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 1, ReplyToActorId = [1:3418:3412], StatRequests.size() = 1 2025-06-25T15:03:36.593962Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:3426:3419], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:03:36.594141Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:03:36.594560Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:3431:3424], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:03:36.600458Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715664:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:03:36.880893Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:217: [72075186224037894] EvFastPropagateCheck 2025-06-25T15:03:36.880978Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:357: [72075186224037894] PropagateFastStatistics(), node count = 0, schemeshard count = 0 2025-06-25T15:03:36.953796Z node 1 :STATISTICS DEBUG: service_impl.cpp:1189: EvRequestTimeout, pipe client id = [1:3421:3415], schemeshard count = 1 2025-06-25T15:03:37.269481Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:3433:3426], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715664 completed, doublechecking } 2025-06-25T15:03:37.439539Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:3555:3500] txid# 281474976715665, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:03:37.451368Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [1:3578:3516]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T15:03:37.451601Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-25T15:03:37.451650Z node 1 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 2, ReplyToActorId = [1:3578:3516], StatRequests.size() = 1 2025-06-25T15:03:37.507082Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715666. Ctx: { TraceId: 01jyksv9ta5cqcevtdd6aja6br, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MTUwZjhjMWMtY2Y2MWU0NDEtZGI0ZjdmMjQtYzQ2ZWE4Yjk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:03:37.592247Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72075186224037905, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:03:38.069603Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 3 ], ReplyToActorId[ [1:3940:3581]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T15:03:38.069859Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] 2025-06-25T15:03:38.070347Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:171: [72075186224037894] EvRequestStats, node id = 1, schemeshard count = 1, urgent = 0 2025-06-25T15:03:38.070407Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:314: [72075186224037894] SendStatisticsToNode(), node id = 1, schemeshard count = 1 2025-06-25T15:03:38.070686Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-06-25T15:03:38.070770Z node 1 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 3, ReplyToActorId = [1:3940:3581], StatRequests.size() = 1 2025-06-25T15:03:38.097924Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 4 ], ReplyToActorId[ [1:3949:3590]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T15:03:38.098110Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 4 ] 2025-06-25T15:03:38.098146Z node 1 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 4, ReplyToActorId = [1:3949:3590], StatRequests.size() = 1 2025-06-25T15:03:38.144134Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715668. Ctx: { TraceId: 01jyksvba51asppygv1h6kzxtq, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YWQzNWRiZjMtYzQwM2I1MjMtN2E4MWRmZTgtN2MxNmMzZWE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:03:38.191388Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [2:3993:3607]], StatType[ 2 ], StatRequestsCount[ 1 ] 2025-06-25T15:03:38.194447Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-25T15:03:38.194511Z node 2 :STATISTICS DEBUG: service_impl.cpp:812: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] resolve DatabasePath[ [OwnerId: 72057594046644480, LocalPathId: 2] ] 2025-06-25T15:03:38.194861Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-25T15:03:38.194916Z node 2 :STATISTICS DEBUG: service_impl.cpp:715: [TStatService::QueryStatistics] RequestId[ 1 ], Database[ Root/Shared ], TablePath[ /Root/Shared/.metadata/_statistics ] 2025-06-25T15:03:38.194965Z node 2 :STATISTICS DEBUG: service_impl.cpp:656: [TStatService::LoadStatistics] QueryId[ 1 ], PathId[ [OwnerId: 72075186224037899, LocalPathId: 2] ], StatType[ 2 ], ColumnTag[ 1 ] 2025-06-25T15:03:38.216723Z node 2 :STATISTICS ERROR: service_impl.cpp:691: [TStatService::ReadRowsResponse] QueryId[ 1 ], RowsCount[ 0 ] 2025-06-25T15:03:38.217791Z node 2 :STATISTICS DEBUG: service_impl.cpp:1152: TEvLoadStatisticsQueryResponse, request id = 1 2025-06-25T15:03:38.218234Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [2:4017:3619]], StatType[ 2 ], StatRequestsCount[ 1 ] 2025-06-25T15:03:38.221252Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-25T15:03:38.221309Z node 2 :STATISTICS DEBUG: service_impl.cpp:812: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] resolve DatabasePath[ [OwnerId: 72057594046644480, LocalPathId: 2] ] 2025-06-25T15:03:38.221569Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-25T15:03:38.221617Z node 2 :STATISTICS DEBUG: service_impl.cpp:715: [TStatService::QueryStatistics] RequestId[ 2 ], Database[ Root/Shared ], TablePath[ /Root/Shared/.metadata/_statistics ] 2025-06-25T15:03:38.221663Z node 2 :STATISTICS DEBUG: service_impl.cpp:656: [TStatService::LoadStatistics] QueryId[ 2 ], PathId[ [OwnerId: 72075186224037905, LocalPathId: 2] ], StatType[ 2 ], ColumnTag[ 1 ] 2025-06-25T15:03:38.224106Z node 2 :STATISTICS ERROR: service_impl.cpp:691: [TStatService::ReadRowsResponse] QueryId[ 2 ], RowsCount[ 0 ] 2025-06-25T15:03:38.224554Z node 2 :STATISTICS DEBUG: service_impl.cpp:1152: TEvLoadStatisticsQueryResponse, request id = 2 >> TraverseDatashard::TraverseOneTableServerless >> DataShardReadIterator::ShouldReadFromHeadToMvccWithConflict-UseSink [GOOD] >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadRangeRightBorder-EvWrite [GOOD] |93.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest >> TraverseColumnShard::TraverseColumnTableAggrStatNonLocalTablet |93.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_read_iterator/unittest >> DataShardReadIterator::ShouldReadFromHeadToMvccWithConflict-UseSink [GOOD] Test command err: 2025-06-25T15:02:02.137078Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T15:02:02.137203Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T15:02:02.137244Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000dfb/r3tmp/tmplLpGdR/pdisk_1.dat 2025-06-25T15:02:02.453113Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T15:02:02.456419Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:02:02.499177Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:02:02.507965Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750863719150607 != 1750863719150611 2025-06-25T15:02:02.559126Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:02:02.559250Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:02:02.570604Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:02:02.653079Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:02.685555Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvBoot 2025-06-25T15:02:02.686470Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvRestored 2025-06-25T15:02:02.686858Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:627:2531] 2025-06-25T15:02:02.687054Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T15:02:02.725741Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-25T15:02:02.726421Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T15:02:02.726543Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T15:02:02.728240Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-25T15:02:02.728411Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-25T15:02:02.728478Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-25T15:02:02.728819Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T15:02:02.728955Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T15:02:02.729025Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:643:2531] in generation 1 2025-06-25T15:02:02.739776Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T15:02:02.767560Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-25T15:02:02.767764Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T15:02:02.767889Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:645:2541] 2025-06-25T15:02:02.767928Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T15:02:02.767975Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-25T15:02:02.768013Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T15:02:02.768232Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:627:2531], Recipient [1:627:2531]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T15:02:02.768278Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T15:02:02.768671Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-25T15:02:02.768765Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-25T15:02:02.768816Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T15:02:02.768877Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:02:02.768921Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-25T15:02:02.768957Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-25T15:02:02.768988Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-25T15:02:02.769020Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-25T15:02:02.769064Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T15:02:02.769506Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:634:2535], Recipient [1:627:2531]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T15:02:02.769552Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T15:02:02.769592Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:623:2528], serverId# [1:634:2535], sessionId# [0:0:0] 2025-06-25T15:02:02.769672Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:373:2367], Recipient [1:634:2535] 2025-06-25T15:02:02.769710Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-25T15:02:02.769843Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T15:02:02.770080Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-25T15:02:02.770140Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-25T15:02:02.770245Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-25T15:02:02.770297Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-25T15:02:02.770336Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-06-25T15:02:02.770370Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-06-25T15:02:02.770401Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-25T15:02:02.770701Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-06-25T15:02:02.770745Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit StoreSchemeTx 2025-06-25T15:02:02.770803Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit FinishPropose 2025-06-25T15:02:02.770840Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-25T15:02:02.770928Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayComplete 2025-06-25T15:02:02.770964Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit FinishPropose 2025-06-25T15:02:02.770995Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit WaitForPlan 2025-06-25T15:02:02.771027Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit WaitForPlan 2025-06-25T15:02:02.771054Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:281474976715657] at 72075186224037888 is not ready to execute on unit WaitForPlan 2025-06-25T15:02:02.772328Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269746185, Sender [1:646:2542], Recipient [1:627:2531]: NKikimr::TEvTxProxySchemeCache::TEvWatchNotifyUpdated 2025-06-25T15:02:02.772380Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T15:02:02.783076Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-25T15:02:02.783156Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-25T15:02:02.783188Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-25T15:02:02.783229Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715657 at tablet 72075186224037888 send to client, exec latency: 0 ms, propose late ... IncompleteEdge# v{min} UnprotectedReadEdge# v{min} ImmediateWriteEdge# v1500/18446744073709551615 ImmediateWriteEdgeReplied# v1500/18446744073709551615 2025-06-25T15:03:40.455955Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2555: 72075186224037888 changed HEAD read to non-repeatable v3001/18446744073709551615 2025-06-25T15:03:40.456070Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:4] at 72075186224037888 on unit CheckRead 2025-06-25T15:03:40.456237Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:4] at 72075186224037888 is Executed 2025-06-25T15:03:40.456323Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:4] at 72075186224037888 executing on unit CheckRead 2025-06-25T15:03:40.456404Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:4] at 72075186224037888 to execution unit BuildAndWaitDependencies 2025-06-25T15:03:40.456504Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:4] at 72075186224037888 on unit BuildAndWaitDependencies 2025-06-25T15:03:40.456570Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:4] at 72075186224037888 2025-06-25T15:03:40.456645Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:4] at 72075186224037888 is Executed 2025-06-25T15:03:40.456676Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:4] at 72075186224037888 executing on unit BuildAndWaitDependencies 2025-06-25T15:03:40.456703Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:4] at 72075186224037888 to execution unit ExecuteRead 2025-06-25T15:03:40.456728Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:4] at 72075186224037888 on unit ExecuteRead 2025-06-25T15:03:40.456873Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1578: 72075186224037888 Execute read# 1, request: { ReadId: 1 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 Columns: 3 Columns: 4 ResultFormat: FORMAT_ARROW MaxRowsInResult: 2 } 2025-06-25T15:03:40.457407Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:4] at 72075186224037888 is Continue 2025-06-25T15:03:40.457463Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2687: TTxReadViaPipeline(69) Execute with status# Continue at tablet# 72075186224037888 2025-06-25T15:03:40.457570Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2736: TTxReadViaPipeline(69) Complete: at tablet# 72075186224037888 2025-06-25T15:03:40.481125Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287425, Sender [15:1008:2801], Recipient [15:625:2530]: {TEvReadSet step# 3001 txid# 281474976715667 TabletSource# 72075186224037891 TabletDest# 72075186224037888 SetTabletProducer# 72075186224037891 ReadSet.Size()# 2 Seqno# 1 Flags# 0} 2025-06-25T15:03:40.481286Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3151: StateWork, processing event TEvTxProcessing::TEvReadSet 2025-06-25T15:03:40.481401Z node 15 :TX_DATASHARD DEBUG: datashard.cpp:3359: Receive RS at 72075186224037888 source 72075186224037891 dest 72075186224037888 producer 72075186224037891 txId 281474976715667 2025-06-25T15:03:40.481574Z node 15 :TX_DATASHARD DEBUG: datashard__readset.cpp:15: TTxReadSet::Execute at 72075186224037888 got read set: {TEvReadSet step# 3001 txid# 281474976715667 TabletSource# 72075186224037891 TabletDest# 72075186224037888 SetTabletProducer# 72075186224037891 ReadSet.Size()# 2 Seqno# 1 Flags# 0} 2025-06-25T15:03:40.481855Z node 15 :TX_DATASHARD DEBUG: datashard.cpp:705: Complete [3001 : 281474976715667] from 72075186224037888 at tablet 72075186224037888 send result to client [15:1096:2850], exec latency: 0 ms, propose latency: 0 ms 2025-06-25T15:03:40.481970Z node 15 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 0 immediate 1 planned 0 2025-06-25T15:03:40.482061Z node 15 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:4] at 72075186224037888 for ExecuteRead 2025-06-25T15:03:40.482523Z node 15 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:1365: ActorId: [15:1096:2850] TxId: 281474976715667. Ctx: { TraceId: 01jyksvdde0hk0xyba6xr3te2w, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=15&id=MjE0NTJhZC1mM2ZjODJjMS05ZmUwMGNjMS01YjZlNzJkZQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Got propose result, shard: 72075186224037888, status: COMPLETE, error: 2025-06-25T15:03:40.482803Z node 15 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:2188: ActorId: [15:1096:2850] TxId: 281474976715667. Ctx: { TraceId: 01jyksvdde0hk0xyba6xr3te2w, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=15&id=MjE0NTJhZC1mM2ZjODJjMS05ZmUwMGNjMS01YjZlNzJkZQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. terminate execution. 2025-06-25T15:03:40.482921Z node 15 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:862: ActorId: [15:1096:2850] TxId: 281474976715667. Ctx: { TraceId: 01jyksvdde0hk0xyba6xr3te2w, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=15&id=MjE0NTJhZC1mM2ZjODJjMS05ZmUwMGNjMS01YjZlNzJkZQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Resource usage for last stat interval: ComputeTime: 0.000000s ReadRows: 0 ReadBytes: 0 ru: 1 rate limiter was not found force flag: 1 2025-06-25T15:03:40.483111Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [15:625:2530], Recipient [15:625:2530]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T15:03:40.483208Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T15:03:40.483526Z node 15 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T15:03:40.485142Z node 15 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:974: Forwarded response to sender actor, requestId: 5, sender: [15:554:2480], selfId: [15:59:2106], source: [15:1074:2850] 2025-06-25T15:03:40.485253Z node 15 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T15:03:40.485337Z node 15 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 1 active planned 0 immediate 1 planned 0 2025-06-25T15:03:40.485425Z node 15 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:282: Return cached ready operation [0:4] at 72075186224037888 2025-06-25T15:03:40.485499Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:4] at 72075186224037888 on unit ExecuteRead 2025-06-25T15:03:40.485713Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1578: 72075186224037888 Execute read# 2, request: { ReadId: 1 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 Columns: 3 Columns: 4 ResultFormat: FORMAT_ARROW MaxRowsInResult: 2 } 2025-06-25T15:03:40.486313Z node 15 :TX_DATASHARD TRACE: datashard.cpp:2476: PromoteImmediatePostExecuteEdges at 72075186224037888 promoting UnprotectedReadEdge to v3001/18446744073709551615 2025-06-25T15:03:40.486402Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2163: 72075186224037888 Complete read# {[15:554:2480], 1} after executionsCount# 2 2025-06-25T15:03:40.486494Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2137: 72075186224037888 read iterator# {[15:554:2480], 1} sends rowCount# 2, bytes# 32, quota rows left# 18446744073709551613, quota bytes left# 18446744073709551583, hasUnreadQueries# 1, total queries# 6, firstUnprocessed# 0 2025-06-25T15:03:40.486787Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:4] at 72075186224037888 is Executed 2025-06-25T15:03:40.486888Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:4] at 72075186224037888 executing on unit ExecuteRead 2025-06-25T15:03:40.486951Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:4] at 72075186224037888 to execution unit CompletedOperations 2025-06-25T15:03:40.487018Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:4] at 72075186224037888 on unit CompletedOperations 2025-06-25T15:03:40.487077Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:4] at 72075186224037888 is Executed 2025-06-25T15:03:40.487102Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:4] at 72075186224037888 executing on unit CompletedOperations 2025-06-25T15:03:40.487137Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:4] at 72075186224037888 has finished 2025-06-25T15:03:40.487204Z node 15 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:03:40.487274Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 72075186224037888 2025-06-25T15:03:40.487342Z node 15 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-25T15:03:40.487406Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-25T15:03:40.487793Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553217, Sender [15:625:2530], Recipient [15:625:2530]: NKikimr::TEvDataShard::TEvReadContinue 2025-06-25T15:03:40.487893Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2836: 72075186224037888 ReadContinue for iterator# {[15:554:2480], 1}, firstUnprocessedQuery# 2 2025-06-25T15:03:40.488135Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2929: 72075186224037888 ReadContinue: iterator# {[15:554:2480], 1}, FirstUnprocessedQuery# 2 2025-06-25T15:03:40.488406Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3079: 72075186224037888 readContinue iterator# {[15:554:2480], 1} sends rowCount# 2, bytes# 32, quota rows left# 18446744073709551611, quota bytes left# 18446744073709551551, hasUnreadQueries# 1, total queries# 6, firstUnprocessed# 2 2025-06-25T15:03:40.489543Z node 15 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1374: Session closed, sessionId: ydb://session/3?node_id=15&id=MjE0NTJhZC1mM2ZjODJjMS05ZmUwMGNjMS01YjZlNzJkZQ==, workerId: [15:1074:2850], local sessions count: 0 2025-06-25T15:03:40.489688Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553217, Sender [15:625:2530], Recipient [15:625:2530]: NKikimr::TEvDataShard::TEvReadContinue 2025-06-25T15:03:40.489749Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2836: 72075186224037888 ReadContinue for iterator# {[15:554:2480], 1}, firstUnprocessedQuery# 4 2025-06-25T15:03:40.489923Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2929: 72075186224037888 ReadContinue: iterator# {[15:554:2480], 1}, FirstUnprocessedQuery# 4 2025-06-25T15:03:40.490036Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3079: 72075186224037888 readContinue iterator# {[15:554:2480], 1} sends rowCount# 2, bytes# 32, quota rows left# 18446744073709551609, quota bytes left# 18446744073709551519, hasUnreadQueries# 0, total queries# 6, firstUnprocessed# 4 2025-06-25T15:03:40.490160Z node 15 :TX_DATASHARD DEBUG: datashard__read_iterator.cpp:3103: 72075186224037888 read iterator# {[15:554:2480], 1} finished in ReadContinue 2025-06-25T15:03:40.490434Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 275709965, Sender [15:63:2110], Recipient [15:1008:2801]: NKikimrLongTxService.TEvLockStatus LockId: 281474976715666 LockNode: 15 Status: STATUS_NOT_FOUND ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest >> TColumnShardTestSchema::ColdCompactionSmoke [GOOD] Test command err: 2025-06-25T14:59:42.226024Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:99;event=initialize_shard;step=OnActivateExecutor; 2025-06-25T14:59:42.253781Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:117;event=initialize_shard;step=initialize_tiring_finished; 2025-06-25T14:59:42.254014Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-25T14:59:42.260372Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T14:59:42.260552Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T14:59:42.260759Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T14:59:42.260905Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T14:59:42.261012Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T14:59:42.261130Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T14:59:42.261224Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T14:59:42.261319Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T14:59:42.261409Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T14:59:42.261517Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T14:59:42.261607Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T14:59:42.286149Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-25T14:59:42.286277Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-25T14:59:42.286320Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-25T14:59:42.286475Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T14:59:42.287094Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-25T14:59:42.287191Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-25T14:59:42.287238Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-25T14:59:42.287331Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-25T14:59:42.287394Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-25T14:59:42.287431Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-25T14:59:42.287461Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-25T14:59:42.287633Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T14:59:42.287690Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-25T14:59:42.287726Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-25T14:59:42.287752Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-25T14:59:42.287842Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-25T14:59:42.287890Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-25T14:59:42.287950Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-25T14:59:42.287997Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-25T14:59:42.288049Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-25T14:59:42.288082Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-25T14:59:42.288111Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-25T14:59:42.288325Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-25T14:59:42.288372Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-25T14:59:42.288408Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-25T14:59:42.288620Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-25T14:59:42.288664Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-25T14:59:42.288693Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-25T14:59:42.288796Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-25T14:59:42.288846Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-25T14:59:42.288871Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-25T14:59:42.288941Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-25T14:59:42.288995Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-25T14:59:42.289029Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-25T14:59:42.289067Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-25T14:59:42.289316Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=45; 2025-06-25T14:59:42.289388Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=32; 2025-06-25T14:59:42.289466Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=37; 2025-06-25T14:59:42.289548Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=39; 2025-06-25T14:59:42.290422Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-25T14:59:42.290524Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-25T14:59:42.290564Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-25T14:59:42.290610Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: table ... B:0:9160];;column_id:8;chunk_idx:44;blob_range:[NO_BLOB:0:9160];;column_id:8;chunk_idx:45;blob_range:[NO_BLOB:0:9160];;column_id:8;chunk_idx:46;blob_range:[NO_BLOB:0:9160];;column_id:8;chunk_idx:47;blob_range:[NO_BLOB:0:9160];;column_id:8;chunk_idx:48;blob_range:[NO_BLOB:0:9160];;column_id:8;chunk_idx:49;blob_range:[NO_BLOB:0:9160];;column_id:8;chunk_idx:50;blob_range:[NO_BLOB:0:9160];;column_id:8;chunk_idx:51;blob_range:[NO_BLOB:0:9160];;column_id:8;chunk_idx:52;blob_range:[NO_BLOB:0:9160];;column_id:8;chunk_idx:53;blob_range:[NO_BLOB:0:9160];;column_id:8;chunk_idx:54;blob_range:[NO_BLOB:0:9160];;column_id:8;chunk_idx:55;blob_range:[NO_BLOB:0:9160];;column_id:8;chunk_idx:56;blob_range:[NO_BLOB:0:9160];;column_id:8;chunk_idx:57;blob_range:[NO_BLOB:0:9160];;column_id:8;chunk_idx:58;blob_range:[NO_BLOB:0:9160];;column_id:8;chunk_idx:59;blob_range:[NO_BLOB:0:9160];;column_id:8;chunk_idx:60;blob_range:[NO_BLOB:0:9160];;column_id:8;chunk_idx:61;blob_range:[NO_BLOB:0:9160];;column_id:8;chunk_idx:62;blob_range:[NO_BLOB:0:9160];;column_id:8;chunk_idx:63;blob_range:[NO_BLOB:0:9160];;column_id:8;chunk_idx:64;blob_range:[NO_BLOB:0:9160];;column_id:8;chunk_idx:65;blob_range:[NO_BLOB:0:9160];;column_id:8;chunk_idx:66;blob_range:[NO_BLOB:0:9160];;column_id:8;chunk_idx:67;blob_range:[NO_BLOB:0:9160];;column_id:8;chunk_idx:68;blob_range:[NO_BLOB:0:9160];;column_id:8;chunk_idx:69;blob_range:[NO_BLOB:0:9160];;column_id:8;chunk_idx:70;blob_range:[NO_BLOB:0:9160];;column_id:9;chunk_idx:0;blob_range:[NO_BLOB:0:9152];;column_id:9;chunk_idx:1;blob_range:[NO_BLOB:0:9152];;column_id:9;chunk_idx:2;blob_range:[NO_BLOB:0:9152];;column_id:9;chunk_idx:3;blob_range:[NO_BLOB:0:9152];;column_id:9;chunk_idx:4;blob_range:[NO_BLOB:0:9152];;column_id:9;chunk_idx:5;blob_range:[NO_BLOB:0:9152];;column_id:9;chunk_idx:6;blob_range:[NO_BLOB:0:9152];;column_id:9;chunk_idx:7;blob_range:[NO_BLOB:0:9152];;column_id:9;chunk_idx:8;blob_range:[NO_BLOB:0:9152];;column_id:9;chunk_idx:9;blob_range:[NO_BLOB:0:9152];;column_id:9;chunk_idx:10;blob_range:[NO_BLOB:0:9152];;column_id:9;chunk_idx:11;blob_range:[NO_BLOB:0:9152];;column_id:9;chunk_idx:12;blob_range:[NO_BLOB:0:9152];;column_id:9;chunk_idx:13;blob_range:[NO_BLOB:0:9152];;column_id:9;chunk_idx:14;blob_range:[NO_BLOB:0:9152];;column_id:9;chunk_idx:15;blob_range:[NO_BLOB:0:9152];;column_id:9;chunk_idx:16;blob_range:[NO_BLOB:0:9152];;column_id:9;chunk_idx:17;blob_range:[NO_BLOB:0:9160];;column_id:9;chunk_idx:18;blob_range:[NO_BLOB:0:9160];;column_id:9;chunk_idx:19;blob_range:[NO_BLOB:0:9160];;column_id:9;chunk_idx:20;blob_range:[NO_BLOB:0:9160];;column_id:9;chunk_idx:21;blob_range:[NO_BLOB:0:9160];;column_id:9;chunk_idx:22;blob_range:[NO_BLOB:0:9160];;column_id:9;chunk_idx:23;blob_range:[NO_BLOB:0:9160];;column_id:9;chunk_idx:24;blob_range:[NO_BLOB:0:9160];;column_id:9;chunk_idx:25;blob_range:[NO_BLOB:0:9160];;column_id:9;chunk_idx:26;blob_range:[NO_BLOB:0:9160];;column_id:9;chunk_idx:27;blob_range:[NO_BLOB:0:9160];;column_id:9;chunk_idx:28;blob_range:[NO_BLOB:0:9160];;column_id:9;chunk_idx:29;blob_range:[NO_BLOB:0:9160];;column_id:9;chunk_idx:30;blob_range:[NO_BLOB:0:9160];;column_id:9;chunk_idx:31;blob_range:[NO_BLOB:0:9160];;column_id:9;chunk_idx:32;blob_range:[NO_BLOB:0:9160];;column_id:9;chunk_idx:33;blob_range:[NO_BLOB:0:9160];;column_id:9;chunk_idx:34;blob_range:[NO_BLOB:0:9160];;column_id:9;chunk_idx:35;blob_range:[NO_BLOB:0:9160];;column_id:9;chunk_idx:36;blob_range:[NO_BLOB:0:9160];;column_id:9;chunk_idx:37;blob_range:[NO_BLOB:0:9160];;column_id:9;chunk_idx:38;blob_range:[NO_BLOB:0:9160];;column_id:9;chunk_idx:39;blob_range:[NO_BLOB:0:9160];;column_id:9;chunk_idx:40;blob_range:[NO_BLOB:0:9160];;column_id:9;chunk_idx:41;blob_range:[NO_BLOB:0:9160];;column_id:9;chunk_idx:42;blob_range:[NO_BLOB:0:9160];;column_id:9;chunk_idx:43;blob_range:[NO_BLOB:0:9160];;column_id:9;chunk_idx:44;blob_range:[NO_BLOB:0:9160];;column_id:9;chunk_idx:45;blob_range:[NO_BLOB:0:9160];;column_id:9;chunk_idx:46;blob_range:[NO_BLOB:0:9160];;column_id:9;chunk_idx:47;blob_range:[NO_BLOB:0:9160];;column_id:9;chunk_idx:48;blob_range:[NO_BLOB:0:9160];;column_id:9;chunk_idx:49;blob_range:[NO_BLOB:0:9160];;column_id:9;chunk_idx:50;blob_range:[NO_BLOB:0:9160];;column_id:9;chunk_idx:51;blob_range:[NO_BLOB:0:9160];;column_id:9;chunk_idx:52;blob_range:[NO_BLOB:0:9160];;column_id:9;chunk_idx:53;blob_range:[NO_BLOB:0:9160];;column_id:9;chunk_idx:54;blob_range:[NO_BLOB:0:9160];;column_id:9;chunk_idx:55;blob_range:[NO_BLOB:0:9160];;column_id:9;chunk_idx:56;blob_range:[NO_BLOB:0:9160];;column_id:9;chunk_idx:57;blob_range:[NO_BLOB:0:9160];;column_id:9;chunk_idx:58;blob_range:[NO_BLOB:0:9160];;column_id:9;chunk_idx:59;blob_range:[NO_BLOB:0:9160];;column_id:9;chunk_idx:60;blob_range:[NO_BLOB:0:9160];;column_id:9;chunk_idx:61;blob_range:[NO_BLOB:0:9160];;column_id:9;chunk_idx:62;blob_range:[NO_BLOB:0:9160];;column_id:9;chunk_idx:63;blob_range:[NO_BLOB:0:9160];;column_id:9;chunk_idx:64;blob_range:[NO_BLOB:0:9160];;column_id:9;chunk_idx:65;blob_range:[NO_BLOB:0:9160];;column_id:9;chunk_idx:66;blob_range:[NO_BLOB:0:9160];;column_id:9;chunk_idx:67;blob_range:[NO_BLOB:0:9160];;column_id:9;chunk_idx:68;blob_range:[NO_BLOB:0:9160];;column_id:9;chunk_idx:69;blob_range:[NO_BLOB:0:9160];;column_id:9;chunk_idx:70;blob_range:[NO_BLOB:0:9160];;column_id:10;chunk_idx:0;blob_range:[NO_BLOB:0:9056];;column_id:10;chunk_idx:1;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:2;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:3;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:4;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:5;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:6;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:7;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:8;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:9;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:10;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:11;blob_range:[NO_BLOB:0:9056];;column_id:10;chunk_idx:12;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:13;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:14;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:15;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:16;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:17;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:18;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:19;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:20;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:21;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:22;blob_range:[NO_BLOB:0:9056];;column_id:10;chunk_idx:23;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:24;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:25;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:26;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:27;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:28;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:29;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:30;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:31;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:32;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:33;blob_range:[NO_BLOB:0:9056];;column_id:10;chunk_idx:34;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:35;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:36;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:37;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:38;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:39;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:40;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:41;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:42;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:43;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:44;blob_range:[NO_BLOB:0:9056];;column_id:10;chunk_idx:45;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:46;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:47;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:48;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:49;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:50;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:51;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:52;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:53;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:54;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:55;blob_range:[NO_BLOB:0:9056];;column_id:10;chunk_idx:56;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:57;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:58;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:59;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:60;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:61;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:62;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:63;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:64;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:65;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:66;blob_range:[NO_BLOB:0:9056];;column_id:10;chunk_idx:67;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:68;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:69;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:70;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:71;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:72;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:73;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:74;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:75;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:76;blob_range:[NO_BLOB:0:9064];;column_id:10;chunk_idx:77;blob_range:[NO_BLOB:0:8840];;column_id:10;chunk_idx:78;blob_range:[NO_BLOB:0:8064];;column_id:10;chunk_idx:79;blob_range:[NO_BLOB:0:8064];;;;switched=(portion_id:64;path_id:9438184000001;records_count:80000;schema_version:1;level:0;cs:plan_step=1750863591879;tx_id=128;;wi:27;;column_size:6817016;index_size:0;meta:(()););; 2025-06-25T15:03:39.868002Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: event_type=NKikimr::NBlobCache::TEvBlobCache::TEvReadBlobRangeResult;event=on_execution;consumer=GENERAL_COMPACTION;task_id=92542e04-51d511f0-93407530-1b8949b2;script=FULL_PORTIONS_FETCHING::GENERAL_COMPACTION;event=on_finished;consumer=GENERAL_COMPACTION;task_id=92542e04-51d511f0-93407530-1b8949b2;script=FULL_PORTIONS_FETCHING::GENERAL_COMPACTION;tablet_id=9437184;parent_id=[1:128:2158];task_id=92542e04-51d511f0-93407530-1b8949b2;task_class=CS::GENERAL;fline=general_compaction.cpp:140;event=blobs_created;appended=1;switched=1; 2025-06-25T15:03:39.871687Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=columnshard__write_index.cpp:52;event=TEvWriteIndex;count=1; 2025-06-25T15:03:39.876825Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=columnshard__write_index.cpp:59;event=TTxWriteDraft; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=write_controller.h:66;event=IWriteController aborted;reason=TTxWriteDraft aborted before complete; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=compacted_blob_constructor.cpp:47;event=TCompactedWriteController::DoAbort;reason=TTxWriteDraft aborted before complete; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=abstract.cpp:105;event=AbortEmergency;reason=TCompactedWriteController destructed with WriteIndexEv and WriteIndexEv->IndexChanges;prev_reason=; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=manager.cpp:51;message=aborted data locks manager; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/cold' stopped at tablet 9437184 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/hot' stopped at tablet 9437184 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/hot' stopped at tablet 9437184 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/cold' stopped at tablet 9437184 |93.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/s3/py3test ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_read_iterator/unittest >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadRangeRightBorder-EvWrite [GOOD] Test command err: 2025-06-25T15:02:02.196723Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T15:02:02.196863Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T15:02:02.196911Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000dc6/r3tmp/tmpQzw4dX/pdisk_1.dat 2025-06-25T15:02:02.504369Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T15:02:02.507699Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:02:02.547902Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:02:02.553072Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750863719392126 != 1750863719392130 2025-06-25T15:02:02.598972Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:02:02.599175Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:02:02.610706Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:02:02.691146Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:02.727507Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvBoot 2025-06-25T15:02:02.728603Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvRestored 2025-06-25T15:02:02.729097Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:627:2531] 2025-06-25T15:02:02.729335Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T15:02:02.771045Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-25T15:02:02.771750Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T15:02:02.771856Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T15:02:02.773536Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-25T15:02:02.773604Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-25T15:02:02.773659Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-25T15:02:02.774007Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T15:02:02.774136Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T15:02:02.774203Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:643:2531] in generation 1 2025-06-25T15:02:02.784954Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T15:02:02.820333Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-25T15:02:02.820556Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T15:02:02.820663Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:645:2541] 2025-06-25T15:02:02.820702Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T15:02:02.820737Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-25T15:02:02.820783Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T15:02:02.821038Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:627:2531], Recipient [1:627:2531]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T15:02:02.821088Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T15:02:02.821476Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-25T15:02:02.821572Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-25T15:02:02.821631Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T15:02:02.821692Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:02:02.821734Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-25T15:02:02.821770Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-25T15:02:02.821804Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-25T15:02:02.821834Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-25T15:02:02.821896Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T15:02:02.822338Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:634:2535], Recipient [1:627:2531]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T15:02:02.822380Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T15:02:02.822425Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:623:2528], serverId# [1:634:2535], sessionId# [0:0:0] 2025-06-25T15:02:02.822510Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:373:2367], Recipient [1:634:2535] 2025-06-25T15:02:02.822547Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-25T15:02:02.822681Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T15:02:02.823084Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-25T15:02:02.823151Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-25T15:02:02.823254Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-25T15:02:02.823311Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-25T15:02:02.823350Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-06-25T15:02:02.823379Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-06-25T15:02:02.823410Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-25T15:02:02.823704Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-06-25T15:02:02.823740Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit StoreSchemeTx 2025-06-25T15:02:02.823768Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit FinishPropose 2025-06-25T15:02:02.823801Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-25T15:02:02.823869Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayComplete 2025-06-25T15:02:02.823901Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit FinishPropose 2025-06-25T15:02:02.823941Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit WaitForPlan 2025-06-25T15:02:02.823976Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit WaitForPlan 2025-06-25T15:02:02.824000Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:281474976715657] at 72075186224037888 is not ready to execute on unit WaitForPlan 2025-06-25T15:02:02.825380Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269746185, Sender [1:646:2542], Recipient [1:627:2531]: NKikimr::TEvTxProxySchemeCache::TEvWatchNotifyUpdated 2025-06-25T15:02:02.825426Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T15:02:02.836165Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-25T15:02:02.836250Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-25T15:02:02.836285Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-25T15:02:02.836342Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715657 at tablet 72075186224037888 send to client, exec latency: 0 ms, propose late ... hard_pipeline.cpp:634: LoadTxDetails at 72075186224037889 loaded tx from db 3500:281474976715666 keys extracted: 0 2025-06-25T15:03:41.097248Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:281474976715666] at 72075186224037889 is Executed 2025-06-25T15:03:41.097271Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:281474976715666] at 72075186224037889 executing on unit LoadTxDetails 2025-06-25T15:03:41.097292Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [3500:281474976715666] at 72075186224037889 to execution unit BuildAndWaitDependencies 2025-06-25T15:03:41.097313Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:281474976715666] at 72075186224037889 on unit BuildAndWaitDependencies 2025-06-25T15:03:41.097344Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:455: Operation [3500:281474976715666] is the new logically complete end at 72075186224037889 2025-06-25T15:03:41.097388Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:461: Operation [3500:281474976715666] is the new logically incomplete end at 72075186224037889 2025-06-25T15:03:41.097424Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [3500:281474976715666] at 72075186224037889 2025-06-25T15:03:41.097463Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:281474976715666] at 72075186224037889 is Executed 2025-06-25T15:03:41.097488Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:281474976715666] at 72075186224037889 executing on unit BuildAndWaitDependencies 2025-06-25T15:03:41.097507Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [3500:281474976715666] at 72075186224037889 to execution unit CreateVolatileSnapshot 2025-06-25T15:03:41.097526Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:281474976715666] at 72075186224037889 on unit CreateVolatileSnapshot 2025-06-25T15:03:41.097599Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:281474976715666] at 72075186224037889 is ExecutedNoMoreRestarts 2025-06-25T15:03:41.097627Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:281474976715666] at 72075186224037889 executing on unit CreateVolatileSnapshot 2025-06-25T15:03:41.097659Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [3500:281474976715666] at 72075186224037889 to execution unit DropVolatileSnapshot 2025-06-25T15:03:41.097692Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:281474976715666] at 72075186224037889 on unit DropVolatileSnapshot 2025-06-25T15:03:41.097710Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:281474976715666] at 72075186224037889 is Executed 2025-06-25T15:03:41.097730Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:281474976715666] at 72075186224037889 executing on unit DropVolatileSnapshot 2025-06-25T15:03:41.097747Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [3500:281474976715666] at 72075186224037889 to execution unit CompleteOperation 2025-06-25T15:03:41.097770Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:281474976715666] at 72075186224037889 on unit CompleteOperation 2025-06-25T15:03:41.097865Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:281474976715666] at 72075186224037889 is DelayComplete 2025-06-25T15:03:41.097885Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:281474976715666] at 72075186224037889 executing on unit CompleteOperation 2025-06-25T15:03:41.097917Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [3500:281474976715666] at 72075186224037889 to execution unit CompletedOperations 2025-06-25T15:03:41.097951Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:281474976715666] at 72075186224037889 on unit CompletedOperations 2025-06-25T15:03:41.097976Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:281474976715666] at 72075186224037889 is Executed 2025-06-25T15:03:41.097995Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:281474976715666] at 72075186224037889 executing on unit CompletedOperations 2025-06-25T15:03:41.098014Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [3500:281474976715666] at 72075186224037889 has finished 2025-06-25T15:03:41.098041Z node 15 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:03:41.098067Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 72075186224037889 2025-06-25T15:03:41.098094Z node 15 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037889 has no attached operations 2025-06-25T15:03:41.098123Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 72075186224037889 2025-06-25T15:03:41.109253Z node 15 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 3500} 2025-06-25T15:03:41.109440Z node 15 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T15:03:41.109535Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [3500:281474976715666] at 72075186224037888 on unit CompleteOperation 2025-06-25T15:03:41.109654Z node 15 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [3500 : 281474976715666] from 72075186224037888 at tablet 72075186224037888 send result to client [15:1029:2808], exec latency: 0 ms, propose latency: 0 ms 2025-06-25T15:03:41.109764Z node 15 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T15:03:41.110043Z node 15 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037889 step# 3500} 2025-06-25T15:03:41.110090Z node 15 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-06-25T15:03:41.110121Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [3500:281474976715666] at 72075186224037889 on unit CompleteOperation 2025-06-25T15:03:41.110166Z node 15 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [3500 : 281474976715666] from 72075186224037889 at tablet 72075186224037889 send result to client [15:1029:2808], exec latency: 0 ms, propose latency: 0 ms 2025-06-25T15:03:41.110213Z node 15 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-25T15:03:41.112104Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553215, Sender [15:554:2480], Recipient [15:625:2530]: NKikimrTxDataShard.TEvRead ReadId: 3 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 Columns: 3 Columns: 4 Snapshot { Step: 3500 TxId: 281474976715666 } LockTxId: 1011121314 ResultFormat: FORMAT_ARROW KeysSize: 1 2025-06-25T15:03:41.112734Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2452: TTxReadViaPipeline execute: at tablet# 72075186224037888, FollowerId 0 2025-06-25T15:03:41.112912Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:7] at 72075186224037888 on unit CheckRead 2025-06-25T15:03:41.113092Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:7] at 72075186224037888 is Executed 2025-06-25T15:03:41.113201Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:7] at 72075186224037888 executing on unit CheckRead 2025-06-25T15:03:41.113299Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:7] at 72075186224037888 to execution unit BuildAndWaitDependencies 2025-06-25T15:03:41.113375Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:7] at 72075186224037888 on unit BuildAndWaitDependencies 2025-06-25T15:03:41.113441Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:7] at 72075186224037888 2025-06-25T15:03:41.113521Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:7] at 72075186224037888 is Executed 2025-06-25T15:03:41.113555Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:7] at 72075186224037888 executing on unit BuildAndWaitDependencies 2025-06-25T15:03:41.113584Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:7] at 72075186224037888 to execution unit ExecuteRead 2025-06-25T15:03:41.113614Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:7] at 72075186224037888 on unit ExecuteRead 2025-06-25T15:03:41.113812Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1578: 72075186224037888 Execute read# 1, request: { ReadId: 3 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 Columns: 3 Columns: 4 Snapshot { Step: 3500 TxId: 281474976715666 } LockTxId: 1011121314 ResultFormat: FORMAT_ARROW } 2025-06-25T15:03:41.114335Z node 15 :TX_DATASHARD DEBUG: datashard__read_iterator.cpp:2427: 72075186224037888 Acquired lock# 1011121314, counter# 18446744073709551615 for [OwnerId: 72057594046644480, LocalPathId: 2] 2025-06-25T15:03:41.114448Z node 15 :TX_DATASHARD TRACE: datashard.cpp:2476: PromoteImmediatePostExecuteEdges at 72075186224037888 promoting UnprotectedReadEdge to v3500/281474976715666 2025-06-25T15:03:41.114550Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2163: 72075186224037888 Complete read# {[15:554:2480], 3} after executionsCount# 1 2025-06-25T15:03:41.114664Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2137: 72075186224037888 read iterator# {[15:554:2480], 3} sends rowCount# 1, bytes# 16, quota rows left# 18446744073709551614, quota bytes left# 18446744073709551599, hasUnreadQueries# 0, total queries# 1, firstUnprocessed# 0 2025-06-25T15:03:41.114980Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2188: 72075186224037888 read iterator# {[15:554:2480], 3} finished in read 2025-06-25T15:03:41.115131Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:7] at 72075186224037888 is Executed 2025-06-25T15:03:41.115168Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:7] at 72075186224037888 executing on unit ExecuteRead 2025-06-25T15:03:41.115197Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:7] at 72075186224037888 to execution unit CompletedOperations 2025-06-25T15:03:41.115225Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:7] at 72075186224037888 on unit CompletedOperations 2025-06-25T15:03:41.115280Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:7] at 72075186224037888 is Executed 2025-06-25T15:03:41.115304Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:7] at 72075186224037888 executing on unit CompletedOperations 2025-06-25T15:03:41.115345Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:7] at 72075186224037888 has finished 2025-06-25T15:03:41.115412Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2687: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037888 2025-06-25T15:03:41.115619Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2736: TTxReadViaPipeline(69) Complete: at tablet# 72075186224037888 >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadKeyWithContinueInvisibleRowSkips-EvWrite [GOOD] >> AnalyzeColumnshard::AnalyzeMultiOperationId |93.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> AnalyzeColumnshard::AnalyzeServerless >> test_select.py::TestDML::test_as_table |93.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest >> DataShardReadIteratorBatchMode::MultipleRanges [GOOD] >> DataShardReadIteratorBatchMode::SelectingColumns ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_read_iterator/unittest >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadKeyWithContinueInvisibleRowSkips-EvWrite [GOOD] Test command err: 2025-06-25T15:02:01.815422Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T15:02:01.815575Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T15:02:01.815627Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000df6/r3tmp/tmplWDwvE/pdisk_1.dat 2025-06-25T15:02:02.203049Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T15:02:02.215197Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:02:02.259027Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:02:02.267982Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750863719121131 != 1750863719121135 2025-06-25T15:02:02.313416Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:02:02.313523Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:02:02.325340Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:02:02.419605Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:02.465123Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvBoot 2025-06-25T15:02:02.466232Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvRestored 2025-06-25T15:02:02.466659Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:627:2531] 2025-06-25T15:02:02.466907Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T15:02:02.512506Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-25T15:02:02.513254Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T15:02:02.513385Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T15:02:02.515076Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-25T15:02:02.515160Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-25T15:02:02.515220Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-25T15:02:02.515533Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T15:02:02.515669Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T15:02:02.515751Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:643:2531] in generation 1 2025-06-25T15:02:02.526486Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T15:02:02.551236Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-25T15:02:02.551454Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T15:02:02.551564Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:645:2541] 2025-06-25T15:02:02.551615Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T15:02:02.551661Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-25T15:02:02.551712Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T15:02:02.551939Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:627:2531], Recipient [1:627:2531]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T15:02:02.551988Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T15:02:02.552894Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-25T15:02:02.553014Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-25T15:02:02.553086Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T15:02:02.553128Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:02:02.553186Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-25T15:02:02.553242Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-25T15:02:02.553283Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-25T15:02:02.553315Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-25T15:02:02.553378Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T15:02:02.553837Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:634:2535], Recipient [1:627:2531]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T15:02:02.553894Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T15:02:02.553939Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:623:2528], serverId# [1:634:2535], sessionId# [0:0:0] 2025-06-25T15:02:02.554032Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:373:2367], Recipient [1:634:2535] 2025-06-25T15:02:02.554071Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-25T15:02:02.554188Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T15:02:02.554424Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-25T15:02:02.554485Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-25T15:02:02.554572Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-25T15:02:02.554628Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-25T15:02:02.554684Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-06-25T15:02:02.554738Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-06-25T15:02:02.554776Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-25T15:02:02.555077Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-06-25T15:02:02.555119Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit StoreSchemeTx 2025-06-25T15:02:02.555151Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit FinishPropose 2025-06-25T15:02:02.555190Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-25T15:02:02.555251Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayComplete 2025-06-25T15:02:02.555280Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit FinishPropose 2025-06-25T15:02:02.555310Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit WaitForPlan 2025-06-25T15:02:02.555338Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit WaitForPlan 2025-06-25T15:02:02.555365Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:281474976715657] at 72075186224037888 is not ready to execute on unit WaitForPlan 2025-06-25T15:02:02.556667Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269746185, Sender [1:646:2542], Recipient [1:627:2531]: NKikimr::TEvTxProxySchemeCache::TEvWatchNotifyUpdated 2025-06-25T15:02:02.556736Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T15:02:02.567466Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-25T15:02:02.567531Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-25T15:02:02.567568Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-25T15:02:02.567627Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715657 at tablet 72075186224037888 send to client, exec latency: 0 ms, propose late ... pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 3500:281474976715666 keys extracted: 0 2025-06-25T15:03:43.160610Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:281474976715666] at 72075186224037888 is Executed 2025-06-25T15:03:43.160635Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:281474976715666] at 72075186224037888 executing on unit LoadTxDetails 2025-06-25T15:03:43.160665Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [3500:281474976715666] at 72075186224037888 to execution unit BuildAndWaitDependencies 2025-06-25T15:03:43.160691Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:281474976715666] at 72075186224037888 on unit BuildAndWaitDependencies 2025-06-25T15:03:43.160730Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:455: Operation [3500:281474976715666] is the new logically complete end at 72075186224037888 2025-06-25T15:03:43.160770Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:461: Operation [3500:281474976715666] is the new logically incomplete end at 72075186224037888 2025-06-25T15:03:43.160815Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [3500:281474976715666] at 72075186224037888 2025-06-25T15:03:43.160871Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:281474976715666] at 72075186224037888 is Executed 2025-06-25T15:03:43.160895Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:281474976715666] at 72075186224037888 executing on unit BuildAndWaitDependencies 2025-06-25T15:03:43.160920Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [3500:281474976715666] at 72075186224037888 to execution unit CreateVolatileSnapshot 2025-06-25T15:03:43.160946Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:281474976715666] at 72075186224037888 on unit CreateVolatileSnapshot 2025-06-25T15:03:43.161054Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:281474976715666] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-25T15:03:43.161082Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:281474976715666] at 72075186224037888 executing on unit CreateVolatileSnapshot 2025-06-25T15:03:43.161127Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [3500:281474976715666] at 72075186224037888 to execution unit DropVolatileSnapshot 2025-06-25T15:03:43.161168Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:281474976715666] at 72075186224037888 on unit DropVolatileSnapshot 2025-06-25T15:03:43.161193Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:281474976715666] at 72075186224037888 is Executed 2025-06-25T15:03:43.161220Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:281474976715666] at 72075186224037888 executing on unit DropVolatileSnapshot 2025-06-25T15:03:43.161244Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [3500:281474976715666] at 72075186224037888 to execution unit CompleteOperation 2025-06-25T15:03:43.161270Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:281474976715666] at 72075186224037888 on unit CompleteOperation 2025-06-25T15:03:43.161701Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:281474976715666] at 72075186224037888 is DelayComplete 2025-06-25T15:03:43.161764Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:281474976715666] at 72075186224037888 executing on unit CompleteOperation 2025-06-25T15:03:43.161810Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [3500:281474976715666] at 72075186224037888 to execution unit CompletedOperations 2025-06-25T15:03:43.161853Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:281474976715666] at 72075186224037888 on unit CompletedOperations 2025-06-25T15:03:43.161892Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:281474976715666] at 72075186224037888 is Executed 2025-06-25T15:03:43.161918Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:281474976715666] at 72075186224037888 executing on unit CompletedOperations 2025-06-25T15:03:43.161948Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [3500:281474976715666] at 72075186224037888 has finished 2025-06-25T15:03:43.161995Z node 15 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:03:43.162038Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 72075186224037888 2025-06-25T15:03:43.162074Z node 15 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-25T15:03:43.162115Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-25T15:03:43.177154Z node 15 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037889 step# 3500} 2025-06-25T15:03:43.177313Z node 15 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-06-25T15:03:43.177411Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [3500:281474976715666] at 72075186224037889 on unit CompleteOperation 2025-06-25T15:03:43.177535Z node 15 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [3500 : 281474976715666] from 72075186224037889 at tablet 72075186224037889 send result to client [15:1029:2808], exec latency: 0 ms, propose latency: 0 ms 2025-06-25T15:03:43.177640Z node 15 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-25T15:03:43.177971Z node 15 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 3500} 2025-06-25T15:03:43.178019Z node 15 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T15:03:43.178047Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [3500:281474976715666] at 72075186224037888 on unit CompleteOperation 2025-06-25T15:03:43.178090Z node 15 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [3500 : 281474976715666] from 72075186224037888 at tablet 72075186224037888 send result to client [15:1029:2808], exec latency: 0 ms, propose latency: 0 ms 2025-06-25T15:03:43.178127Z node 15 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T15:03:43.179902Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553215, Sender [15:554:2480], Recipient [15:625:2530]: NKikimrTxDataShard.TEvRead ReadId: 10 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 Columns: 3 Columns: 4 Snapshot { Step: 3500 TxId: 281474976715666 } LockTxId: 1011121314 ResultFormat: FORMAT_ARROW KeysSize: 1 2025-06-25T15:03:43.180102Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2452: TTxReadViaPipeline execute: at tablet# 72075186224037888, FollowerId 0 2025-06-25T15:03:43.180233Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:7] at 72075186224037888 on unit CheckRead 2025-06-25T15:03:43.180408Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:7] at 72075186224037888 is Executed 2025-06-25T15:03:43.180500Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:7] at 72075186224037888 executing on unit CheckRead 2025-06-25T15:03:43.180569Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:7] at 72075186224037888 to execution unit BuildAndWaitDependencies 2025-06-25T15:03:43.180630Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:7] at 72075186224037888 on unit BuildAndWaitDependencies 2025-06-25T15:03:43.180675Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:7] at 72075186224037888 2025-06-25T15:03:43.180741Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:7] at 72075186224037888 is Executed 2025-06-25T15:03:43.180773Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:7] at 72075186224037888 executing on unit BuildAndWaitDependencies 2025-06-25T15:03:43.180797Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:7] at 72075186224037888 to execution unit ExecuteRead 2025-06-25T15:03:43.180823Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:7] at 72075186224037888 on unit ExecuteRead 2025-06-25T15:03:43.180984Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1578: 72075186224037888 Execute read# 1, request: { ReadId: 10 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 Columns: 3 Columns: 4 Snapshot { Step: 3500 TxId: 281474976715666 } LockTxId: 1011121314 ResultFormat: FORMAT_ARROW } 2025-06-25T15:03:43.181515Z node 15 :TX_DATASHARD DEBUG: datashard__read_iterator.cpp:2427: 72075186224037888 Acquired lock# 1011121314, counter# 18446744073709551615 for [OwnerId: 72057594046644480, LocalPathId: 2] 2025-06-25T15:03:43.181613Z node 15 :TX_DATASHARD TRACE: datashard.cpp:2476: PromoteImmediatePostExecuteEdges at 72075186224037888 promoting UnprotectedReadEdge to v3500/281474976715666 2025-06-25T15:03:43.181701Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2163: 72075186224037888 Complete read# {[15:554:2480], 10} after executionsCount# 1 2025-06-25T15:03:43.181790Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2137: 72075186224037888 read iterator# {[15:554:2480], 10} sends rowCount# 1, bytes# 16, quota rows left# 18446744073709551614, quota bytes left# 18446744073709551599, hasUnreadQueries# 0, total queries# 1, firstUnprocessed# 0 2025-06-25T15:03:43.182059Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2188: 72075186224037888 read iterator# {[15:554:2480], 10} finished in read 2025-06-25T15:03:43.182182Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:7] at 72075186224037888 is Executed 2025-06-25T15:03:43.182214Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:7] at 72075186224037888 executing on unit ExecuteRead 2025-06-25T15:03:43.182242Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:7] at 72075186224037888 to execution unit CompletedOperations 2025-06-25T15:03:43.182288Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:7] at 72075186224037888 on unit CompletedOperations 2025-06-25T15:03:43.182344Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:7] at 72075186224037888 is Executed 2025-06-25T15:03:43.182368Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:7] at 72075186224037888 executing on unit CompletedOperations 2025-06-25T15:03:43.182408Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:7] at 72075186224037888 has finished 2025-06-25T15:03:43.182474Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2687: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037888 2025-06-25T15:03:43.182688Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2736: TTxReadViaPipeline(69) Complete: at tablet# 72075186224037888 >> TestKinesisHttpProxy::CreateStreamWithDifferentRetentions [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_not_throttling_with_custom_queue_name[std-tables_format_v1] [GOOD] >> TestKinesisHttpProxy::CreateDeleteStream >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_queue_counters_are_in_folder[tables_format_v0] >> AnalyzeColumnshard::AnalyzeRebootSaBeforeSave >> DataShardReadIterator::ShouldProperlyOrderConflictingTransactionsMvcc-UseSink [GOOD] |93.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/s3/py3test >> AnalyzeColumnshard::AnalyzeAnalyzeOneColumnTableSpecificColumns >> TraverseColumnShard::TraverseColumnTableRebootSaTabletBeforeAggregate |93.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_dml_requests_arent_logged_when_anonymous [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_single_dml_query_logged[upsert] [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/yft8/0011c3/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk22/testing_out_stuff/test_auditlog.py.test_single_dml_query_logged.upsert/audit.txt 2025-06-25T15:03:35.807933Z: {"tx_id":"{none}","database":"/Root/test_auditlog.py","end_time":"2025-06-25T15:03:35.807882Z","sanitized_token":"**** (B6C6F477)","begin_tx":"1","remote_address":"127.0.0.1","commit_tx":"1","status":"SUCCESS","query_text":"upsert into `/Root/test_auditlog.py/test-table` (id, value) values (4, 4), (5, 5)","start_time":"2025-06-25T15:03:35.737402Z","subject":"root@builtin","detailed_status":"SUCCESS","operation":"ExecuteDataQueryRequest","component":"grpc-proxy"} |93.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_read_iterator/unittest >> DataShardReadIterator::ShouldProperlyOrderConflictingTransactionsMvcc-UseSink [GOOD] Test command err: 2025-06-25T15:02:01.815396Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T15:02:01.815562Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T15:02:01.815617Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000e09/r3tmp/tmpMeQLit/pdisk_1.dat 2025-06-25T15:02:02.203194Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T15:02:02.215959Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:02:02.276360Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:02:02.281905Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750863719121081 != 1750863719121085 2025-06-25T15:02:02.327656Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:02:02.327829Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:02:02.339399Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:02:02.422070Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:02.465099Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvBoot 2025-06-25T15:02:02.466144Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvRestored 2025-06-25T15:02:02.466675Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:627:2531] 2025-06-25T15:02:02.467008Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T15:02:02.505180Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-25T15:02:02.505974Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T15:02:02.506078Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T15:02:02.507967Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-25T15:02:02.508072Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-25T15:02:02.508138Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-25T15:02:02.510490Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T15:02:02.510684Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T15:02:02.510800Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:643:2531] in generation 1 2025-06-25T15:02:02.521611Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T15:02:02.552999Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-25T15:02:02.553177Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T15:02:02.553278Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:645:2541] 2025-06-25T15:02:02.553316Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T15:02:02.553352Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-25T15:02:02.553392Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T15:02:02.553611Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:627:2531], Recipient [1:627:2531]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T15:02:02.553685Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T15:02:02.554037Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-25T15:02:02.554112Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-25T15:02:02.554183Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T15:02:02.554249Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:02:02.554298Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-25T15:02:02.554333Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-25T15:02:02.554381Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-25T15:02:02.554412Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-25T15:02:02.554462Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T15:02:02.554841Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:634:2535], Recipient [1:627:2531]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T15:02:02.554898Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T15:02:02.554942Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:623:2528], serverId# [1:634:2535], sessionId# [0:0:0] 2025-06-25T15:02:02.555030Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:373:2367], Recipient [1:634:2535] 2025-06-25T15:02:02.555066Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-25T15:02:02.555178Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T15:02:02.555353Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-25T15:02:02.555414Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-25T15:02:02.555486Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-25T15:02:02.555537Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-25T15:02:02.555601Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-06-25T15:02:02.555641Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-06-25T15:02:02.555672Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-25T15:02:02.555954Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-06-25T15:02:02.555992Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit StoreSchemeTx 2025-06-25T15:02:02.556040Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit FinishPropose 2025-06-25T15:02:02.556083Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-25T15:02:02.556148Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayComplete 2025-06-25T15:02:02.556185Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit FinishPropose 2025-06-25T15:02:02.556223Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit WaitForPlan 2025-06-25T15:02:02.556263Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit WaitForPlan 2025-06-25T15:02:02.556295Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:281474976715657] at 72075186224037888 is not ready to execute on unit WaitForPlan 2025-06-25T15:02:02.557694Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269746185, Sender [1:646:2542], Recipient [1:627:2531]: NKikimr::TEvTxProxySchemeCache::TEvWatchNotifyUpdated 2025-06-25T15:02:02.557744Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T15:02:02.568623Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-25T15:02:02.568687Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-25T15:02:02.568715Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-25T15:02:02.568776Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715657 at tablet 72075186224037888 send to client, exec latency: 0 ms, propose late ... Columns: 4 Snapshot { Step: 3001 TxId: 18446744073709551615 } ResultFormat: FORMAT_ARROW } 2025-06-25T15:03:46.111140Z node 16 :TX_DATASHARD TRACE: datashard.cpp:2476: PromoteImmediatePostExecuteEdges at 72075186224037888 promoting UnprotectedReadEdge to v3001/18446744073709551615 2025-06-25T15:03:46.111176Z node 16 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2163: 72075186224037888 Complete read# {[16:554:2480], 3} after executionsCount# 2 2025-06-25T15:03:46.111210Z node 16 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2137: 72075186224037888 read iterator# {[16:554:2480], 3} sends rowCount# 1, bytes# 16, quota rows left# 18446744073709551614, quota bytes left# 18446744073709551599, hasUnreadQueries# 0, total queries# 1, firstUnprocessed# 0 2025-06-25T15:03:46.111350Z node 16 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2188: 72075186224037888 read iterator# {[16:554:2480], 3} finished in read 2025-06-25T15:03:46.111409Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:8] at 72075186224037888 is Executed 2025-06-25T15:03:46.111436Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:8] at 72075186224037888 executing on unit ExecuteRead 2025-06-25T15:03:46.111461Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:8] at 72075186224037888 to execution unit CompletedOperations 2025-06-25T15:03:46.111486Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:8] at 72075186224037888 on unit CompletedOperations 2025-06-25T15:03:46.111526Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:8] at 72075186224037888 is Executed 2025-06-25T15:03:46.111546Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:8] at 72075186224037888 executing on unit CompletedOperations 2025-06-25T15:03:46.111567Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:8] at 72075186224037888 has finished 2025-06-25T15:03:46.111593Z node 16 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:03:46.111629Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 72075186224037888 2025-06-25T15:03:46.111698Z node 16 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-25T15:03:46.111766Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-25T15:03:46.112502Z node 16 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1374: Session closed, sessionId: ydb://session/3?node_id=16&id=NWM2MDk3MGYtNDkzNTdhOWUtM2M1ZjI0Mi04ZGZmNDY1YQ==, workerId: [16:1102:2869], local sessions count: 0 2025-06-25T15:03:46.113941Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553215, Sender [16:554:2480], Recipient [16:627:2531]: NKikimrTxDataShard.TEvRead ReadId: 4 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 Columns: 3 Columns: 4 Snapshot { Step: 3001 TxId: 18446744073709551615 } ResultFormat: FORMAT_ARROW KeysSize: 1 2025-06-25T15:03:46.114117Z node 16 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2452: TTxReadViaPipeline execute: at tablet# 72075186224037888, FollowerId 0 2025-06-25T15:03:46.114228Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:9] at 72075186224037888 on unit CheckRead 2025-06-25T15:03:46.114365Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:9] at 72075186224037888 is Executed 2025-06-25T15:03:46.114453Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:9] at 72075186224037888 executing on unit CheckRead 2025-06-25T15:03:46.114524Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:9] at 72075186224037888 to execution unit BuildAndWaitDependencies 2025-06-25T15:03:46.114579Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:9] at 72075186224037888 on unit BuildAndWaitDependencies 2025-06-25T15:03:46.114646Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:9] at 72075186224037888 2025-06-25T15:03:46.114712Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:9] at 72075186224037888 is Executed 2025-06-25T15:03:46.114743Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:9] at 72075186224037888 executing on unit BuildAndWaitDependencies 2025-06-25T15:03:46.114764Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:9] at 72075186224037888 to execution unit ExecuteRead 2025-06-25T15:03:46.114790Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:9] at 72075186224037888 on unit ExecuteRead 2025-06-25T15:03:46.114933Z node 16 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1578: 72075186224037888 Execute read# 1, request: { ReadId: 4 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 Columns: 3 Columns: 4 Snapshot { Step: 3001 TxId: 18446744073709551615 } ResultFormat: FORMAT_ARROW } 2025-06-25T15:03:46.115280Z node 16 :TX_DATASHARD TRACE: datashard.cpp:2476: PromoteImmediatePostExecuteEdges at 72075186224037888 promoting UnprotectedReadEdge to v3001/18446744073709551615 2025-06-25T15:03:46.115355Z node 16 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2163: 72075186224037888 Complete read# {[16:554:2480], 4} after executionsCount# 1 2025-06-25T15:03:46.115436Z node 16 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2137: 72075186224037888 read iterator# {[16:554:2480], 4} sends rowCount# 1, bytes# 16, quota rows left# 18446744073709551614, quota bytes left# 18446744073709551599, hasUnreadQueries# 0, total queries# 1, firstUnprocessed# 0 2025-06-25T15:03:46.115668Z node 16 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2188: 72075186224037888 read iterator# {[16:554:2480], 4} finished in read 2025-06-25T15:03:46.115762Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:9] at 72075186224037888 is Executed 2025-06-25T15:03:46.115789Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:9] at 72075186224037888 executing on unit ExecuteRead 2025-06-25T15:03:46.115814Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:9] at 72075186224037888 to execution unit CompletedOperations 2025-06-25T15:03:46.115839Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:9] at 72075186224037888 on unit CompletedOperations 2025-06-25T15:03:46.115882Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:9] at 72075186224037888 is Executed 2025-06-25T15:03:46.115906Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:9] at 72075186224037888 executing on unit CompletedOperations 2025-06-25T15:03:46.115934Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:9] at 72075186224037888 has finished 2025-06-25T15:03:46.116002Z node 16 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2687: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037888 2025-06-25T15:03:46.116999Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553215, Sender [16:554:2480], Recipient [16:627:2531]: NKikimrTxDataShard.TEvRead ReadId: 5 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 Columns: 3 Columns: 4 Snapshot { Step: 3000 TxId: 18446744073709551615 } ResultFormat: FORMAT_ARROW KeysSize: 1 2025-06-25T15:03:46.117183Z node 16 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2452: TTxReadViaPipeline execute: at tablet# 72075186224037888, FollowerId 0 2025-06-25T15:03:46.117315Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:10] at 72075186224037888 on unit CheckRead 2025-06-25T15:03:46.117460Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:10] at 72075186224037888 is Executed 2025-06-25T15:03:46.117532Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:10] at 72075186224037888 executing on unit CheckRead 2025-06-25T15:03:46.117595Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:10] at 72075186224037888 to execution unit BuildAndWaitDependencies 2025-06-25T15:03:46.117653Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:10] at 72075186224037888 on unit BuildAndWaitDependencies 2025-06-25T15:03:46.117709Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:10] at 72075186224037888 2025-06-25T15:03:46.117771Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:10] at 72075186224037888 is Executed 2025-06-25T15:03:46.117804Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:10] at 72075186224037888 executing on unit BuildAndWaitDependencies 2025-06-25T15:03:46.117828Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:10] at 72075186224037888 to execution unit ExecuteRead 2025-06-25T15:03:46.117852Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:10] at 72075186224037888 on unit ExecuteRead 2025-06-25T15:03:46.117995Z node 16 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1578: 72075186224037888 Execute read# 1, request: { ReadId: 5 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 Columns: 3 Columns: 4 Snapshot { Step: 3000 TxId: 18446744073709551615 } ResultFormat: FORMAT_ARROW } 2025-06-25T15:03:46.118347Z node 16 :TX_DATASHARD TRACE: datashard.cpp:2476: PromoteImmediatePostExecuteEdges at 72075186224037888 promoting UnprotectedReadEdge to v3000/18446744073709551615 2025-06-25T15:03:46.118424Z node 16 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2163: 72075186224037888 Complete read# {[16:554:2480], 5} after executionsCount# 1 2025-06-25T15:03:46.118506Z node 16 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2137: 72075186224037888 read iterator# {[16:554:2480], 5} sends rowCount# 1, bytes# 16, quota rows left# 18446744073709551614, quota bytes left# 18446744073709551599, hasUnreadQueries# 0, total queries# 1, firstUnprocessed# 0 2025-06-25T15:03:46.118738Z node 16 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2188: 72075186224037888 read iterator# {[16:554:2480], 5} finished in read 2025-06-25T15:03:46.118825Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:10] at 72075186224037888 is Executed 2025-06-25T15:03:46.118851Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:10] at 72075186224037888 executing on unit ExecuteRead 2025-06-25T15:03:46.118877Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:10] at 72075186224037888 to execution unit CompletedOperations 2025-06-25T15:03:46.118901Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:10] at 72075186224037888 on unit CompletedOperations 2025-06-25T15:03:46.118943Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:10] at 72075186224037888 is Executed 2025-06-25T15:03:46.118965Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:10] at 72075186224037888 executing on unit CompletedOperations 2025-06-25T15:03:46.119003Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:10] at 72075186224037888 has finished 2025-06-25T15:03:46.119072Z node 16 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2687: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037888 >> DataShardSnapshots::UncommittedWriteRestartDuringCommit [GOOD] >> DataShardSnapshots::UncommittedWriteRestartDuringCommitThenBulkErase |93.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_queue_counters_are_in_folder[tables_format_v0] [GOOD] >> test_sql_streaming.py::test[suites-WriteTwoTopics-default.txt] [FAIL] >> KqpScripting::StreamExecuteYqlScriptWriteCancelAfterBruteForced [GOOD] >> KqpScripting::StreamOperationTimeout |93.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest >> test_sql_streaming.py::test[suites-ReadTopicWithSchema-default.txt] [FAIL] |93.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest >> TraverseDatashard::TraverseTwoTablesServerless |93.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/fq/streaming_optimize/py3test >> test_sql_streaming.py::test[suites-WriteTwoTopics-default.txt] [FAIL] >> TraverseColumnShard::TraverseColumnTableHiveDistributionAbsentNodes >> AnalyzeColumnshard::AnalyzeRebootSaBeforeAnalyzeTableResponse |93.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/fq/streaming_optimize/py3test >> test_sql_streaming.py::test[suites-ReadTopicWithSchema-default.txt] [FAIL] >> test_sql_streaming.py::test[suites-GroupByHopNoKey-default.txt] [FAIL] >> test_sql_streaming.py::test[suites-GroupByHopPercentile-default.txt] |93.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest >> test_sql_streaming.py::test[suites-ReadTopicGroupWriteToSolomon-default.txt] [FAIL] >> AnalyzeColumnshard::Analyze >> test_s3.py::TestYdbS3TTL::test_s3[table_ttl_Date-pk_types13-all_types13-index13-Date--] [GOOD] |93.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest >> DataShardSnapshots::LockedWriteWithAsyncIndexAndVolatileCommit-UseSink [GOOD] |93.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/fq/streaming_optimize/py3test >> test_sql_streaming.py::test[suites-ReadTopicGroupWriteToSolomon-default.txt] [FAIL] >> test_select.py::TestDML::test_as_table [GOOD] >> TraverseColumnShard::TraverseColumnTableRebootSaTabletBeforeResolve >> TestKinesisHttpProxy::CreateDeleteStream [GOOD] >> AnalyzeColumnshard::AnalyzeRebootSaBeforeAggregate >> TestKinesisHttpProxy::CreateDeleteStreamWithConsumer |93.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/parametrized_queries/py3test >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_index_4__SYNC-pk_types5-all_types5-index5---SYNC] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_snapshot/unittest >> DataShardSnapshots::LockedWriteWithAsyncIndexAndVolatileCommit-UseSink [GOOD] Test command err: 2025-06-25T15:02:05.029466Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T15:02:05.029636Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T15:02:05.029691Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001bc7/r3tmp/tmpMknZGN/pdisk_1.dat 2025-06-25T15:02:05.376382Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T15:02:05.387086Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:02:05.435365Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:02:05.447723Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750863722361978 != 1750863722361982 2025-06-25T15:02:05.494540Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:61:2108] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-25T15:02:05.495573Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 2025-06-25T15:02:05.496103Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:02:05.496220Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:02:05.507858Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:02:05.594175Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:61:2108] Handle TEvProposeTransaction 2025-06-25T15:02:05.594248Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:61:2108] TxId# 281474976715657 ProcessProposeTransaction 2025-06-25T15:02:05.596830Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:61:2108] Cookie# 0 userReqId# "" txid# 281474976715657 SEND to# [1:602:2510] 2025-06-25T15:02:05.741099Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:602:2510] txid# 281474976715657 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateTable CreateTable { Name: "table-1" Columns { Name: "key" Type: "Uint32" FamilyName: "" NotNull: false } Columns { Name: "value" Type: "Uint32" FamilyName: "" NotNull: false } KeyColumnNames: "key" UniformPartitionsCount: 1 } } } ExecTimeoutPeriod: 18446744073709551615 2025-06-25T15:02:05.741235Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:602:2510] txid# 281474976715657 Bootstrap, UserSID: CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-25T15:02:05.741876Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1660: Actor# [1:602:2510] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-06-25T15:02:05.741963Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:602:2510] txid# 281474976715657 TEvNavigateKeySet requested from SchemeCache 2025-06-25T15:02:05.742333Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:602:2510] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-25T15:02:05.742551Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:602:2510] HANDLE EvNavigateKeySetResult, txid# 281474976715657 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-25T15:02:05.742710Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:602:2510] txid# 281474976715657 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715657 TabletId# 72057594046644480} 2025-06-25T15:02:05.743015Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:602:2510] txid# 281474976715657 HANDLE EvClientConnected 2025-06-25T15:02:05.744589Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:05.745702Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [1:602:2510] txid# 281474976715657 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715657} 2025-06-25T15:02:05.745778Z node 1 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [1:602:2510] txid# 281474976715657 SEND to# [1:554:2480] Source {TEvProposeTransactionStatus txid# 281474976715657 Status# 53} 2025-06-25T15:02:05.778143Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvBoot 2025-06-25T15:02:05.779400Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvRestored 2025-06-25T15:02:05.779886Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:627:2531] 2025-06-25T15:02:05.780157Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T15:02:05.828848Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-25T15:02:05.829595Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T15:02:05.829741Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T15:02:05.831423Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-25T15:02:05.831508Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-25T15:02:05.831578Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-25T15:02:05.831933Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T15:02:05.832073Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T15:02:05.832158Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:643:2531] in generation 1 2025-06-25T15:02:05.832610Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T15:02:05.866677Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-25T15:02:05.866876Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T15:02:05.866986Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:645:2541] 2025-06-25T15:02:05.867026Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T15:02:05.867056Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-25T15:02:05.867100Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T15:02:05.867335Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:627:2531], Recipient [1:627:2531]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T15:02:05.867389Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T15:02:05.867741Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-25T15:02:05.867848Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-25T15:02:05.867917Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T15:02:05.867954Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:02:05.867994Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-25T15:02:05.868030Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-25T15:02:05.868063Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-25T15:02:05.868096Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-25T15:02:05.868134Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T15:02:05.868571Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:634:2535], Recipient [1:627:2531]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T15:02:05.868623Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T15:02:05.868660Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:623:2528], serverId# [1:634:2535], sessionId# [0:0:0] 2025-06-25T15:02:05.868744Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:373:2367], Recipient [1:634:2535] 2025-06-25T15:02:05.868793Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-25T15:02:05.868893Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T15:02:05.869113Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-25T15:02:05.869182Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-25T15:02:05.869265Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-25T15:02:05.869330Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-25T15: ... r.cpp:120: TxId: 281474976715671. Ctx: { TraceId: 01jyksvpk02nv7xe4bdmadwh0n, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=ZDZhZjM5OTgtNTAxOWU1NWEtOTAwOGMzNTMtZDEyMGM2ODk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root TEvRead: ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 4 SchemaVersion: 2 } Columns: 2 Columns: 1 ResultFormat: FORMAT_CELLVEC MaxRows: 32767 MaxBytes: 5242880 Reverse: false 2025-06-25T15:03:50.165346Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553215, Sender [13:1598:3307], Recipient [13:750:2618]: NKikimrTxDataShard.TEvRead ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 4 SchemaVersion: 2 } Columns: 2 Columns: 1 ResultFormat: FORMAT_CELLVEC MaxRows: 32767 MaxBytes: 5242880 Reverse: false RangesSize: 3 2025-06-25T15:03:50.165591Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2452: TTxReadViaPipeline execute: at tablet# 72075186224037889, FollowerId 0 2025-06-25T15:03:50.165690Z node 13 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 72075186224037889 CompleteEdge# v8027/281474976715670 IncompleteEdge# v{min} UnprotectedReadEdge# v{min} ImmediateWriteEdge# v{min} ImmediateWriteEdgeReplied# v{min} 2025-06-25T15:03:50.165777Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2555: 72075186224037889 changed HEAD read to non-repeatable v9000/18446744073709551615 2025-06-25T15:03:50.165896Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:4] at 72075186224037889 on unit CheckRead 2025-06-25T15:03:50.166093Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:4] at 72075186224037889 is Executed 2025-06-25T15:03:50.166171Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:4] at 72075186224037889 executing on unit CheckRead 2025-06-25T15:03:50.166242Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:4] at 72075186224037889 to execution unit BuildAndWaitDependencies 2025-06-25T15:03:50.166311Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:4] at 72075186224037889 on unit BuildAndWaitDependencies 2025-06-25T15:03:50.166380Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:4] at 72075186224037889 2025-06-25T15:03:50.166452Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:4] at 72075186224037889 is Executed 2025-06-25T15:03:50.166484Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:4] at 72075186224037889 executing on unit BuildAndWaitDependencies 2025-06-25T15:03:50.166509Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:4] at 72075186224037889 to execution unit ExecuteRead 2025-06-25T15:03:50.166536Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:4] at 72075186224037889 on unit ExecuteRead 2025-06-25T15:03:50.166687Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1578: 72075186224037889 Execute read# 1, request: { ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 4 SchemaVersion: 2 } Columns: 2 Columns: 1 ResultFormat: FORMAT_CELLVEC MaxRows: 32767 MaxBytes: 5242880 Reverse: false } 2025-06-25T15:03:50.167134Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2163: 72075186224037889 Complete read# {[13:1598:3307], 0} after executionsCount# 1 2025-06-25T15:03:50.167250Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2137: 72075186224037889 read iterator# {[13:1598:3307], 0} sends rowCount# 2, bytes# 64, quota rows left# 32765, quota bytes left# 5242816, hasUnreadQueries# 0, total queries# 3, firstUnprocessed# 0 2025-06-25T15:03:50.167384Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2188: 72075186224037889 read iterator# {[13:1598:3307], 0} finished in read 2025-06-25T15:03:50.167500Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:4] at 72075186224037889 is Executed 2025-06-25T15:03:50.167532Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:4] at 72075186224037889 executing on unit ExecuteRead 2025-06-25T15:03:50.167560Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:4] at 72075186224037889 to execution unit CompletedOperations 2025-06-25T15:03:50.167589Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:4] at 72075186224037889 on unit CompletedOperations 2025-06-25T15:03:50.167667Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:4] at 72075186224037889 is Executed 2025-06-25T15:03:50.167698Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:4] at 72075186224037889 executing on unit CompletedOperations 2025-06-25T15:03:50.167750Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:4] at 72075186224037889 has finished 2025-06-25T15:03:50.167827Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2687: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037889 2025-06-25T15:03:50.168021Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2736: TTxReadViaPipeline(69) Complete: at tablet# 72075186224037889 2025-06-25T15:03:50.169448Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553219, Sender [13:1598:3307], Recipient [13:750:2618]: NKikimrTxDataShard.TEvReadCancel ReadId: 0 2025-06-25T15:03:50.169531Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3409: 72075186224037889 ReadCancel: { ReadId: 0 } { items { uint32_value: 1 } items { uint32_value: 11 } }, { items { uint32_value: 2 } items { uint32_value: 21 } } 2025-06-25T15:03:50.704522Z node 13 :TX_PROXY DEBUG: proxy_impl.cpp:353: actor# [13:61:2108] Handle TEvExecuteKqpTransaction 2025-06-25T15:03:50.704636Z node 13 :TX_PROXY DEBUG: proxy_impl.cpp:342: actor# [13:61:2108] TxId# 281474976715672 ProcessProposeKqpTransaction 2025-06-25T15:03:50.706013Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715672. Ctx: { TraceId: 01jyksvq5426x9fsrv0p5dkp9e, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=N2Y3NDk3MWYtYzNjNzQyYTctYWRhMjI5YzAtYWY5ODg0M2I=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root TEvRead: ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 7 SchemaVersion: 2 } Columns: 2 Columns: 1 ResultFormat: FORMAT_CELLVEC MaxRows: 32767 MaxBytes: 5242880 Reverse: false 2025-06-25T15:03:50.708664Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553215, Sender [13:1629:3332], Recipient [13:1022:2836]: NKikimrTxDataShard.TEvRead ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 7 SchemaVersion: 2 } Columns: 2 Columns: 1 ResultFormat: FORMAT_CELLVEC MaxRows: 32767 MaxBytes: 5242880 Reverse: false RangesSize: 3 2025-06-25T15:03:50.708887Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2452: TTxReadViaPipeline execute: at tablet# 72075186224037891, FollowerId 0 2025-06-25T15:03:50.708997Z node 13 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 72075186224037891 CompleteEdge# v8027/281474976715670 IncompleteEdge# v{min} UnprotectedReadEdge# v{min} ImmediateWriteEdge# v{min} ImmediateWriteEdgeReplied# v{min} 2025-06-25T15:03:50.709077Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2555: 72075186224037891 changed HEAD read to non-repeatable v9000/18446744073709551615 2025-06-25T15:03:50.709190Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:4] at 72075186224037891 on unit CheckRead 2025-06-25T15:03:50.709381Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:4] at 72075186224037891 is Executed 2025-06-25T15:03:50.709451Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:4] at 72075186224037891 executing on unit CheckRead 2025-06-25T15:03:50.709523Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:4] at 72075186224037891 to execution unit BuildAndWaitDependencies 2025-06-25T15:03:50.709589Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:4] at 72075186224037891 on unit BuildAndWaitDependencies 2025-06-25T15:03:50.709656Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:4] at 72075186224037891 2025-06-25T15:03:50.709717Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:4] at 72075186224037891 is Executed 2025-06-25T15:03:50.709745Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:4] at 72075186224037891 executing on unit BuildAndWaitDependencies 2025-06-25T15:03:50.709768Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:4] at 72075186224037891 to execution unit ExecuteRead 2025-06-25T15:03:50.709791Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:4] at 72075186224037891 on unit ExecuteRead 2025-06-25T15:03:50.709943Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1578: 72075186224037891 Execute read# 1, request: { ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 7 SchemaVersion: 2 } Columns: 2 Columns: 1 ResultFormat: FORMAT_CELLVEC MaxRows: 32767 MaxBytes: 5242880 Reverse: false } 2025-06-25T15:03:50.710361Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2163: 72075186224037891 Complete read# {[13:1629:3332], 0} after executionsCount# 1 2025-06-25T15:03:50.710460Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2137: 72075186224037891 read iterator# {[13:1629:3332], 0} sends rowCount# 2, bytes# 64, quota rows left# 32765, quota bytes left# 5242816, hasUnreadQueries# 0, total queries# 3, firstUnprocessed# 0 2025-06-25T15:03:50.710587Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2188: 72075186224037891 read iterator# {[13:1629:3332], 0} finished in read 2025-06-25T15:03:50.710712Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:4] at 72075186224037891 is Executed 2025-06-25T15:03:50.710741Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:4] at 72075186224037891 executing on unit ExecuteRead 2025-06-25T15:03:50.710765Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:4] at 72075186224037891 to execution unit CompletedOperations 2025-06-25T15:03:50.710790Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:4] at 72075186224037891 on unit CompletedOperations 2025-06-25T15:03:50.710846Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:4] at 72075186224037891 is Executed 2025-06-25T15:03:50.710870Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:4] at 72075186224037891 executing on unit CompletedOperations 2025-06-25T15:03:50.710902Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:4] at 72075186224037891 has finished 2025-06-25T15:03:50.710962Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2687: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037891 2025-06-25T15:03:50.711140Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2736: TTxReadViaPipeline(69) Complete: at tablet# 72075186224037891 2025-06-25T15:03:50.712265Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553219, Sender [13:1629:3332], Recipient [13:1022:2836]: NKikimrTxDataShard.TEvReadCancel ReadId: 0 2025-06-25T15:03:50.712374Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3409: 72075186224037891 ReadCancel: { ReadId: 0 } { items { uint32_value: 10 } items { uint32_value: 110 } }, { items { uint32_value: 20 } items { uint32_value: 210 } } >> AnalyzeColumnshard::AnalyzeRebootColumnShard >> DataShardReadIteratorBatchMode::SelectingColumns [GOOD] >> DataShardReadIteratorBatchMode::ShouldHandleReadAck |93.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest >> TraverseDatashard::TraverseOneTableServerless [GOOD] |93.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest |93.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest >> KqpScripting::StreamOperationTimeout [GOOD] |93.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest >> test_s3.py::TestYdbS3TTL::test_s3[table_ttl_Uint64-pk_types10-all_types10-index10-Uint64--] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest >> TraverseDatashard::TraverseOneTableServerless [GOOD] Test command err: 2025-06-25T15:03:43.984260Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:419:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T15:03:43.984717Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T15:03:43.984840Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001f97/r3tmp/tmpt2q3Wj/pdisk_1.dat 2025-06-25T15:03:44.332058Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 12671, node 1 2025-06-25T15:03:44.571070Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:03:44.571127Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:03:44.571156Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:03:44.571756Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T15:03:44.574193Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:03:44.679063Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:03:44.679200Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:03:44.700642Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:15574 2025-06-25T15:03:45.270032Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-25T15:03:48.308245Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-25T15:03:48.349169Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:03:48.349287Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:03:48.391049Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T15:03:48.393326Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:03:48.615031Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:03:48.654356Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:48.655194Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:48.655839Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:48.655986Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:48.656182Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:48.656277Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:48.656413Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:48.656479Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:48.656569Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:48.895929Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:03:48.896065Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:03:48.934094Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:03:49.087220Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:03:49.138224Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-25T15:03:49.138330Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-25T15:03:49.178850Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-25T15:03:49.179091Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-25T15:03:49.179298Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-25T15:03:49.179372Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-25T15:03:49.179428Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-25T15:03:49.179478Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-25T15:03:49.179556Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-25T15:03:49.179629Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-25T15:03:49.180165Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-25T15:03:49.205711Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7949: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-25T15:03:49.205829Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7979: ConnectToSA(), pipe client id: [2:1793:2562], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-25T15:03:49.221329Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1808:2573] 2025-06-25T15:03:49.228862Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1849:2589] 2025-06-25T15:03:49.229207Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1849:2589], schemeshard id = 72075186224037897 2025-06-25T15:03:49.240048Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Shared 2025-06-25T15:03:49.259271Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-25T15:03:49.259332Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-25T15:03:49.259399Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Shared/.metadata/_statistics 2025-06-25T15:03:49.271322Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:03:49.279481Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-25T15:03:49.279616Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-25T15:03:49.481314Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-25T15:03:49.635518Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-25T15:03:49.694173Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-25T15:03:50.248426Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:03:50.276350Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-25T15:03:50.844091Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:03:50.994709Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7894: Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult, at schemeshard: 72075186224037899 2025-06-25T15:03:50.994780Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7910: Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult, StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037899 2025-06-25T15:03:50.994869Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7979: ConnectToSA(), pipe client id: [2:2505:2905], at schemeshard: 72075186224037899, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037899 2025-06-25T15:03:50.995921Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:2507:2907] 2025-06-25T15:03:50.996157Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:2507:2907], schemeshard id = 72075186224037899 2025-06-25T15:03:52.095303Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2629:3196], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:03:52.095417Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:03:52.109507Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72075186224037899, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:03:52.416548Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2935:3245], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:03:52.460535Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:03:52.461859Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [1:2940:3249]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T15:03:52.462039Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-25T15:03:52.462200Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 18446744073709551615 ] 2025-06-25T15:03:52.462272Z node 1 :STATISTICS DEBUG: service_impl.cpp:1219: ConnectToSA(), pipe client id = [1:2943:3252] 2025-06-25T15:03:52.462339Z node 1 :STATISTICS DEBUG: service_impl.cpp:1248: SyncNode(), pipe client id = [1:2943:3252] 2025-06-25T15:03:52.462876Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:2944:3140] 2025-06-25T15:03:52.463225Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:2943:3252], server id = [2:2944:3140], tablet id = 72075186224037894, status = OK 2025-06-25T15:03:52.463418Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:133: [72075186224037894] EvConnectNode, pipe server id = [2:2944:3140], node id = 1, have schemeshards count = 0, need schemeshards count = 1 2025-06-25T15:03:52.463487Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:314: [72075186224037894] SendStatisticsToNode(), node id = 1, schemeshard count = 1 2025-06-25T15:03:52.463681Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-06-25T15:03:52.463747Z node 1 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 1, ReplyToActorId = [1:2940:3249], StatRequests.size() = 1 2025-06-25T15:03:52.481661Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2948:3256], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:03:52.481797Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:03:52.482088Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2953:3261], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:03:52.487938Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:03:52.620479Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:217: [72075186224037894] EvFastPropagateCheck 2025-06-25T15:03:52.620585Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:357: [72075186224037894] PropagateFastStatistics(), node count = 0, schemeshard count = 0 2025-06-25T15:03:52.665048Z node 1 :STATISTICS DEBUG: service_impl.cpp:1189: EvRequestTimeout, pipe client id = [1:2943:3252], schemeshard count = 1 2025-06-25T15:03:52.937267Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:2955:3263], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715662 completed, doublechecking } 2025-06-25T15:03:53.144544Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:3074:3332] txid# 281474976715663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:03:53.158171Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [1:3097:3348]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T15:03:53.158388Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-25T15:03:53.158429Z node 1 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 2, ReplyToActorId = [1:3097:3348], StatRequests.size() = 1 2025-06-25T15:03:53.255300Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715664. Ctx: { TraceId: 01jyksvsay9eyssz92mzqpdg3e, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NDcwN2NiYWItZjRjNzNlMzItMWUzYzQ5ODctZjk1MGRmMjk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:03:53.300838Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [2:3139:3190]], StatType[ 2 ], StatRequestsCount[ 1 ] 2025-06-25T15:03:53.303084Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-25T15:03:53.303130Z node 2 :STATISTICS DEBUG: service_impl.cpp:812: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] resolve DatabasePath[ [OwnerId: 72057594046644480, LocalPathId: 2] ] 2025-06-25T15:03:53.303348Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-25T15:03:53.303389Z node 2 :STATISTICS DEBUG: service_impl.cpp:715: [TStatService::QueryStatistics] RequestId[ 1 ], Database[ Root/Shared ], TablePath[ /Root/Shared/.metadata/_statistics ] 2025-06-25T15:03:53.303426Z node 2 :STATISTICS DEBUG: service_impl.cpp:656: [TStatService::LoadStatistics] QueryId[ 1 ], PathId[ [OwnerId: 72075186224037899, LocalPathId: 2] ], StatType[ 2 ], ColumnTag[ 1 ] 2025-06-25T15:03:53.315085Z node 2 :STATISTICS ERROR: service_impl.cpp:691: [TStatService::ReadRowsResponse] QueryId[ 1 ], RowsCount[ 0 ] 2025-06-25T15:03:53.315529Z node 2 :STATISTICS DEBUG: service_impl.cpp:1152: TEvLoadStatisticsQueryResponse, request id = 1 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/yql/unittest >> KqpScripting::StreamOperationTimeout [GOOD] Test command err: Trying to start YDB, gRPC: 23090, MsgBus: 25584 2025-06-25T15:02:39.258570Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519902586741095933:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:02:39.258654Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0007b2/r3tmp/tmpgYf6lp/pdisk_1.dat 2025-06-25T15:02:39.625505Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:02:39.628478Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519902586741095911:2080] 1750863759247668 != 1750863759247671 TServer::EnableGrpc on GrpcPort 23090, node 1 2025-06-25T15:02:39.660688Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:02:39.660763Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:02:39.662358Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:02:39.673129Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:02:39.673156Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:02:39.673164Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:02:39.673292Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25584 TClient is connected to server localhost:25584 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:02:40.227695Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:02:40.239011Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T15:02:40.251514Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:40.269186Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:02:40.374864Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:02:40.524767Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T15:02:40.583511Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:42.195039Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902599625999454:2366], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:42.195164Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:42.476059Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:42.509327Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:42.536375Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:42.565307Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:42.633293Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:42.677810Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:42.707565Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:42.770294Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902599626000115:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:42.770386Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:42.773200Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902599626000120:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:02:42.776825Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:02:42.787963Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519902599626000122:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T15:02:42.874193Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519902599626000173:3423] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:02:44.160249Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863764181, txId: 281474976715673] shutting down 2025-06-25T15:02:44.258458Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519902586741095933:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:02:44.258536Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T15:02:44.459643Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding sna ... otManager: discarding snapshot; our snapshot: [step: 1750863826719, txId: 281474976716261] shutting down 2025-06-25T15:03:47.048692Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863827069, txId: 281474976716264] shutting down 2025-06-25T15:03:47.424420Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863827433, txId: 281474976716267] shutting down 2025-06-25T15:03:47.845082Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750863827860, txId: 281474976716270] shutting down Trying to start YDB, gRPC: 15933, MsgBus: 27377 2025-06-25T15:03:48.944555Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519902882540863584:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:03:48.944607Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0007b2/r3tmp/tmpWaWpMe/pdisk_1.dat 2025-06-25T15:03:49.100665Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 15933, node 2 2025-06-25T15:03:49.120590Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:03:49.120686Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:03:49.122948Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:03:49.159341Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:03:49.159365Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:03:49.159372Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:03:49.159532Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27377 TClient is connected to server localhost:27377 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:03:49.639454Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:03:49.660282Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:03:49.751198Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:03:49.929176Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:03:49.975831Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:03:50.007998Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:03:52.305651Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519902899720734396:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:03:52.305724Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:03:52.358639Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:03:52.384191Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:03:52.408925Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:03:52.454810Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:03:52.480111Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:03:52.546704Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:03:52.579419Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:03:52.659579Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519902899720735057:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:03:52.659651Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:03:52.659716Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519902899720735062:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:03:52.662718Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:03:52.671110Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7519902899720735064:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T15:03:52.760287Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7519902899720735115:3418] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:03:53.944782Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7519902882540863584:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:03:53.944904Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> AnalyzeColumnshard::AnalyzeStatus >> AnalyzeColumnshard::AnalyzeRebootSaBeforeResolve >> AnalyzeDatashard::AnalyzeOneTable >> DataShardSnapshots::UncommittedWriteRestartDuringCommitThenBulkErase [GOOD] >> DataShardSnapshots::UncommittedChangesRenameTable-UseSink |93.1%| [TA] $(B)/ydb/core/kqp/ut/yql/test-results/unittest/{meta.json ... results_accumulator.log} |93.2%| [TA] {RESULT} $(B)/ydb/core/kqp/ut/yql/test-results/unittest/{meta.json ... results_accumulator.log} |93.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest >> TraverseDatashard::TraverseOneTable >> TraverseColumnShard::TraverseColumnTableHiveDistributionZeroNodes ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_dml_requests_arent_logged_when_anonymous [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/yft8/0011bf/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk12/testing_out_stuff/test_auditlog.py.test_dml_requests_arent_logged_when_anonymous/audit.txt >> TestKinesisHttpProxy::CreateDeleteStreamWithConsumer [GOOD] >> TestKinesisHttpProxy::CreateDeleteStreamWithConsumerWithFlag |93.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/select/py3test >> test_select.py::TestDML::test_as_table [GOOD] |93.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest |93.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/s3/py3test >> test_s3.py::TestYdbS3TTL::test_s3[table_ttl_Date-pk_types13-all_types13-index13-Date--] [GOOD] >> RetryPolicy::TWriteSession_SeqNoShift [GOOD] >> RetryPolicy::RetryWithBatching |93.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest >> DataShardReadIteratorBatchMode::ShouldHandleReadAck [GOOD] >> TraverseColumnShard::TraverseColumnTable >> test_s3.py::TestYdbS3TTL::test_s3[table_ttl_DyNumber-pk_types8-all_types8-index8-DyNumber--] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_read_iterator/unittest >> DataShardReadIteratorBatchMode::ShouldHandleReadAck [GOOD] Test command err: 2025-06-25T15:02:04.523290Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:628:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T15:02:04.523875Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T15:02:04.524244Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T15:02:04.524613Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:625:2319], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T15:02:04.524812Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T15:02:04.524943Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000e14/r3tmp/tmpV0wqT4/pdisk_1.dat 2025-06-25T15:02:04.893509Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:02:05.041194Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:02:05.160927Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:02:05.161103Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:02:05.167015Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:02:05.167145Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:02:05.182723Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T15:02:05.183541Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:02:05.184058Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:02:05.493613Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:05.585314Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [2:1179:2341], Recipient [2:1205:2353]: NKikimr::TEvTablet::TEvBoot 2025-06-25T15:02:05.590037Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [2:1179:2341], Recipient [2:1205:2353]: NKikimr::TEvTablet::TEvRestored 2025-06-25T15:02:05.590541Z node 2 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [2:1205:2353] 2025-06-25T15:02:05.590807Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T15:02:05.599984Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [2:1179:2341], Recipient [2:1205:2353]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-25T15:02:05.642453Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T15:02:05.642638Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T15:02:05.644296Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-25T15:02:05.644396Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-25T15:02:05.644449Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-25T15:02:05.644801Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T15:02:05.645144Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T15:02:05.645232Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [2:1229:2353] in generation 1 2025-06-25T15:02:05.669200Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T15:02:05.705362Z node 2 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-25T15:02:05.705572Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T15:02:05.705683Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [2:1232:2370] 2025-06-25T15:02:05.705715Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T15:02:05.705747Z node 2 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-25T15:02:05.705779Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T15:02:05.706033Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [2:1205:2353], Recipient [2:1205:2353]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T15:02:05.706085Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T15:02:05.706337Z node 2 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-25T15:02:05.706421Z node 2 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-25T15:02:05.706516Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T15:02:05.706552Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:02:05.706589Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-25T15:02:05.706626Z node 2 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-25T15:02:05.706654Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-25T15:02:05.706684Z node 2 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-25T15:02:05.706738Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T15:02:05.754135Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [2:1236:2371], Recipient [2:1205:2353]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T15:02:05.754219Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T15:02:05.754276Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:1191:2734], serverId# [2:1236:2371], sessionId# [0:0:0] 2025-06-25T15:02:05.754651Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:768:2431], Recipient [2:1236:2371] 2025-06-25T15:02:05.754703Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-25T15:02:05.754854Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T15:02:05.755101Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-25T15:02:05.755170Z node 2 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-25T15:02:05.755284Z node 2 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-25T15:02:05.755350Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-25T15:02:05.755412Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-06-25T15:02:05.755449Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-06-25T15:02:05.755484Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-25T15:02:05.755786Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-06-25T15:02:05.755825Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit StoreSchemeTx 2025-06-25T15:02:05.755861Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit FinishPropose 2025-06-25T15:02:05.755894Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-25T15:02:05.755939Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayComplete 2025-06-25T15:02:05.755969Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit FinishPropose 2025-06-25T15:02:05.756002Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit WaitForPlan 2025-06-25T15:02:05.756035Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit WaitForPlan 2025-06-25T15:02:05.756070Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:281474976715657] at 72075186224037888 is not ready to execute on unit WaitForPlan 2025-06-25T15:02:05.758146Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, ... 72075186224037889 to execution unit CreateVolatileSnapshot 2025-06-25T15:04:00.415016Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3000:281474976715664] at 72075186224037889 on unit CreateVolatileSnapshot 2025-06-25T15:04:00.415110Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3000:281474976715664] at 72075186224037889 is ExecutedNoMoreRestarts 2025-06-25T15:04:00.415137Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3000:281474976715664] at 72075186224037889 executing on unit CreateVolatileSnapshot 2025-06-25T15:04:00.415174Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [3000:281474976715664] at 72075186224037889 to execution unit DropVolatileSnapshot 2025-06-25T15:04:00.415208Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3000:281474976715664] at 72075186224037889 on unit DropVolatileSnapshot 2025-06-25T15:04:00.415234Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3000:281474976715664] at 72075186224037889 is Executed 2025-06-25T15:04:00.415260Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3000:281474976715664] at 72075186224037889 executing on unit DropVolatileSnapshot 2025-06-25T15:04:00.415284Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [3000:281474976715664] at 72075186224037889 to execution unit CompleteOperation 2025-06-25T15:04:00.415310Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3000:281474976715664] at 72075186224037889 on unit CompleteOperation 2025-06-25T15:04:00.415444Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3000:281474976715664] at 72075186224037889 is DelayComplete 2025-06-25T15:04:00.415476Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3000:281474976715664] at 72075186224037889 executing on unit CompleteOperation 2025-06-25T15:04:00.415511Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [3000:281474976715664] at 72075186224037889 to execution unit CompletedOperations 2025-06-25T15:04:00.415548Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3000:281474976715664] at 72075186224037889 on unit CompletedOperations 2025-06-25T15:04:00.415597Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3000:281474976715664] at 72075186224037889 is Executed 2025-06-25T15:04:00.415625Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3000:281474976715664] at 72075186224037889 executing on unit CompletedOperations 2025-06-25T15:04:00.415653Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [3000:281474976715664] at 72075186224037889 has finished 2025-06-25T15:04:00.415693Z node 16 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:04:00.415728Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 72075186224037889 2025-06-25T15:04:00.415784Z node 16 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037889 has no attached operations 2025-06-25T15:04:00.415823Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 72075186224037889 2025-06-25T15:04:00.437368Z node 16 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 3000} 2025-06-25T15:04:00.437553Z node 16 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T15:04:00.437645Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [3000:281474976715664] at 72075186224037888 on unit CompleteOperation 2025-06-25T15:04:00.437769Z node 16 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [3000 : 281474976715664] from 72075186224037888 at tablet 72075186224037888 send result to client [16:996:2782], exec latency: 0 ms, propose latency: 0 ms 2025-06-25T15:04:00.437873Z node 16 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T15:04:00.438364Z node 16 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037889 step# 3000} 2025-06-25T15:04:00.438420Z node 16 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-06-25T15:04:00.438449Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [3000:281474976715664] at 72075186224037889 on unit CompleteOperation 2025-06-25T15:04:00.438489Z node 16 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [3000 : 281474976715664] from 72075186224037889 at tablet 72075186224037889 send result to client [16:996:2782], exec latency: 0 ms, propose latency: 0 ms 2025-06-25T15:04:00.438529Z node 16 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-25T15:04:00.440264Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553215, Sender [16:554:2480], Recipient [16:627:2531]: NKikimrTxDataShard.TEvRead ReadId: 1 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 Columns: 3 Columns: 4 Snapshot { Step: 3000 TxId: 281474976715664 } ResultFormat: FORMAT_ARROW MaxRows: 1 Hints: 1 RangesSize: 1 2025-06-25T15:04:00.440536Z node 16 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2452: TTxReadViaPipeline execute: at tablet# 72075186224037888, FollowerId 0 2025-06-25T15:04:00.440652Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:4] at 72075186224037888 on unit CheckRead 2025-06-25T15:04:00.440827Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:4] at 72075186224037888 is Executed 2025-06-25T15:04:00.440907Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:4] at 72075186224037888 executing on unit CheckRead 2025-06-25T15:04:00.440980Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:4] at 72075186224037888 to execution unit BuildAndWaitDependencies 2025-06-25T15:04:00.441048Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:4] at 72075186224037888 on unit BuildAndWaitDependencies 2025-06-25T15:04:00.441111Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:4] at 72075186224037888 2025-06-25T15:04:00.441181Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:4] at 72075186224037888 is Executed 2025-06-25T15:04:00.441213Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:4] at 72075186224037888 executing on unit BuildAndWaitDependencies 2025-06-25T15:04:00.441236Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:4] at 72075186224037888 to execution unit ExecuteRead 2025-06-25T15:04:00.441265Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:4] at 72075186224037888 on unit ExecuteRead 2025-06-25T15:04:00.441434Z node 16 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1578: 72075186224037888 Execute read# 1, request: { ReadId: 1 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 Columns: 3 Columns: 4 Snapshot { Step: 3000 TxId: 281474976715664 } ResultFormat: FORMAT_ARROW MaxRows: 1 Hints: 1 } 2025-06-25T15:04:00.441511Z node 16 :TX_DATASHARD TRACE: datashard.cpp:2476: PromoteImmediatePostExecuteEdges at 72075186224037888 promoting UnprotectedReadEdge to v3000/281474976715664 2025-06-25T15:04:00.441798Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:4] at 72075186224037888 is Executed 2025-06-25T15:04:00.441827Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:4] at 72075186224037888 executing on unit ExecuteRead 2025-06-25T15:04:00.441856Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:4] at 72075186224037888 to execution unit CompletedOperations 2025-06-25T15:04:00.441886Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:4] at 72075186224037888 on unit CompletedOperations 2025-06-25T15:04:00.441934Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:4] at 72075186224037888 is Executed 2025-06-25T15:04:00.441958Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:4] at 72075186224037888 executing on unit CompletedOperations 2025-06-25T15:04:00.441994Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:4] at 72075186224037888 has finished 2025-06-25T15:04:00.442061Z node 16 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2687: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037888 2025-06-25T15:04:00.442241Z node 16 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2736: TTxReadViaPipeline(69) Complete: at tablet# 72075186224037888 2025-06-25T15:04:00.443144Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553236, Sender [16:1012:2796], Recipient [16:627:2531]: NKikimr::TEvDataShard::TEvReadScanStarted 2025-06-25T15:04:00.444097Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553218, Sender [16:554:2480], Recipient [16:627:2531]: NKikimrTxDataShard.TEvReadAck ReadId: 1 SeqNo: 1 MaxRows: 2 MaxBytes: 10000 2025-06-25T15:04:00.444220Z node 16 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1193: 72075186224037888 forwarding NKikimr::TEvDataShard::TEvReadAck to scan actor [16:1012:2796] 2025-06-25T15:04:00.445418Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553218, Sender [16:554:2480], Recipient [16:627:2531]: NKikimrTxDataShard.TEvReadAck ReadId: 1 SeqNo: 2 MaxRows: 100 MaxBytes: 10000 2025-06-25T15:04:00.445537Z node 16 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1193: 72075186224037888 forwarding NKikimr::TEvDataShard::TEvReadAck to scan actor [16:1012:2796] 2025-06-25T15:04:00.446279Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553237, Sender [16:1012:2796], Recipient [16:627:2531]: NKikimr::TEvDataShard::TEvReadScanFinished 2025-06-25T15:04:00.446462Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [16:627:2531], Recipient [16:627:2531]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T15:04:00.446520Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T15:04:00.446649Z node 16 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T15:04:00.446721Z node 16 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:04:00.446786Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 72075186224037888 2025-06-25T15:04:00.446853Z node 16 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-25T15:04:00.446919Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-25T15:04:00.446992Z node 16 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-25T15:04:00.447083Z node 16 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 >> TraverseDatashard::TraverseTwoTablesServerless [GOOD] >> DataShardSnapshots::UncommittedChangesRenameTable-UseSink [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest >> TraverseDatashard::TraverseTwoTablesServerless [GOOD] Test command err: 2025-06-25T15:03:51.993218Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:419:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T15:03:51.993603Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T15:03:51.993686Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001f6f/r3tmp/tmpzpCtXG/pdisk_1.dat 2025-06-25T15:03:52.298525Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 23812, node 1 2025-06-25T15:03:52.526836Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:03:52.526892Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:03:52.526922Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:03:52.527464Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T15:03:52.529681Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:03:52.627456Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:03:52.627609Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:03:52.643149Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:12375 2025-06-25T15:03:53.190327Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-25T15:03:56.213090Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-25T15:03:56.242109Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:03:56.242208Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:03:56.279729Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T15:03:56.281797Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:03:56.485519Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:03:56.520773Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:56.521280Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:56.521994Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:56.522219Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:56.522551Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:56.522676Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:56.522817Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:56.522907Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:56.523010Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:56.712490Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:03:56.712572Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:03:56.725406Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:03:56.893044Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:03:56.940080Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-25T15:03:56.940183Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-25T15:03:56.976737Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-25T15:03:56.976891Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-25T15:03:56.977040Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-25T15:03:56.977104Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-25T15:03:56.977158Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-25T15:03:56.977223Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-25T15:03:56.977278Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-25T15:03:56.977329Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-25T15:03:56.977857Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-25T15:03:56.998104Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7949: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-25T15:03:56.998201Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7979: ConnectToSA(), pipe client id: [2:1793:2562], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-25T15:03:57.005160Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1808:2573] 2025-06-25T15:03:57.010329Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1849:2589] 2025-06-25T15:03:57.010607Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1849:2589], schemeshard id = 72075186224037897 2025-06-25T15:03:57.017052Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Shared 2025-06-25T15:03:57.031409Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-25T15:03:57.031470Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-25T15:03:57.031535Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Shared/.metadata/_statistics 2025-06-25T15:03:57.041215Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:03:57.047632Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-25T15:03:57.047776Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-25T15:03:57.222940Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-25T15:03:57.381823Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-25T15:03:57.438883Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-25T15:03:57.951862Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:03:57.980360Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-25T15:03:58.536066Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:03:58.656016Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7894: Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult, at schemeshard: 72075186224037899 2025-06-25T15:03:58.656076Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7910: Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult, StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037899 2025-06-25T15:03:58.656142Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7979: ConnectToSA(), pipe client id: [2:2501:2901], at schemeshard: 72075186224037899, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037899 2025-06-25T15:03:58.657230Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:2503:2903] 2025-06-25T15:03:58.657825Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:2503:2903], schemeshard id = 72075186224037899 2025-06-25T15:03:59.728581Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2625:3195], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:03:59.728769Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:03:59.745900Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72075186224037899, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:04:00.039328Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2927:3244], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:04:00.039477Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:04:00.084811Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [1:2932:3248]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T15:04:00.084968Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-25T15:04:00.085140Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 18446744073709551615 ] 2025-06-25T15:04:00.085198Z node 1 :STATISTICS DEBUG: service_impl.cpp:1219: ConnectToSA(), pipe client id = [1:2935:3251] 2025-06-25T15:04:00.085254Z node 1 :STATISTICS DEBUG: service_impl.cpp:1248: SyncNode(), pipe client id = [1:2935:3251] 2025-06-25T15:04:00.085750Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:2936:3133] 2025-06-25T15:04:00.085989Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:2935:3251], server id = [2:2936:3133], tablet id = 72075186224037894, status = OK 2025-06-25T15:04:00.086172Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:133: [72075186224037894] EvConnectNode, pipe server id = [2:2936:3133], node id = 1, have schemeshards count = 0, need schemeshards count = 1 2025-06-25T15:04:00.086235Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:314: [72075186224037894] SendStatisticsToNode(), node id = 1, schemeshard count = 1 2025-06-25T15:04:00.086437Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-06-25T15:04:00.086505Z node 1 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 1, ReplyToActorId = [1:2932:3248], StatRequests.size() = 1 2025-06-25T15:04:00.104094Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2940:3255], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:04:00.104251Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:04:00.104605Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2945:3260], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:04:00.108763Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:04:00.235965Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:217: [72075186224037894] EvFastPropagateCheck 2025-06-25T15:04:00.236051Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:357: [72075186224037894] PropagateFastStatistics(), node count = 0, schemeshard count = 0 2025-06-25T15:04:00.281003Z node 1 :STATISTICS DEBUG: service_impl.cpp:1189: EvRequestTimeout, pipe client id = [1:2935:3251], schemeshard count = 1 2025-06-25T15:04:00.532734Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:2947:3262], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715662 completed, doublechecking } 2025-06-25T15:04:00.758619Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:3067:3330] txid# 281474976715663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:04:00.772883Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [1:3090:3346]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T15:04:00.773064Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-25T15:04:00.773118Z node 1 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 2, ReplyToActorId = [1:3090:3346], StatRequests.size() = 1 2025-06-25T15:04:00.829287Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715664. Ctx: { TraceId: 01jyksw0s5dzbwhffd3fmmkbzh, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YzQ4OTQ0OWYtOGIyMTU0ZTctMWUyOWJhNWEtODYxODBiMDk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:04:00.916265Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72075186224037899, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:04:01.281193Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 3 ], ReplyToActorId[ [1:3419:3407]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T15:04:01.281364Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] 2025-06-25T15:04:01.281404Z node 1 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 3, ReplyToActorId = [1:3419:3407], StatRequests.size() = 1 2025-06-25T15:04:01.302070Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 4 ], ReplyToActorId[ [1:3428:3416]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T15:04:01.302303Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 4 ] 2025-06-25T15:04:01.302345Z node 1 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 4, ReplyToActorId = [1:3428:3416], StatRequests.size() = 1 2025-06-25T15:04:01.336039Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715666. Ctx: { TraceId: 01jyksw1zmdqaqeaf52d0zep5g, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Y2E2NTBiMTMtNjFlMDliMDgtYjM0MzgwNGMtMmExMmZjMmI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:04:01.415203Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [2:3473:3386]], StatType[ 2 ], StatRequestsCount[ 1 ] 2025-06-25T15:04:01.417760Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-25T15:04:01.417818Z node 2 :STATISTICS DEBUG: service_impl.cpp:812: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] resolve DatabasePath[ [OwnerId: 72057594046644480, LocalPathId: 2] ] 2025-06-25T15:04:01.418186Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-25T15:04:01.418234Z node 2 :STATISTICS DEBUG: service_impl.cpp:715: [TStatService::QueryStatistics] RequestId[ 1 ], Database[ Root/Shared ], TablePath[ /Root/Shared/.metadata/_statistics ] 2025-06-25T15:04:01.418278Z node 2 :STATISTICS DEBUG: service_impl.cpp:656: [TStatService::LoadStatistics] QueryId[ 1 ], PathId[ [OwnerId: 72075186224037899, LocalPathId: 2] ], StatType[ 2 ], ColumnTag[ 1 ] 2025-06-25T15:04:01.430851Z node 2 :STATISTICS ERROR: service_impl.cpp:691: [TStatService::ReadRowsResponse] QueryId[ 1 ], RowsCount[ 0 ] 2025-06-25T15:04:01.431106Z node 2 :STATISTICS DEBUG: service_impl.cpp:1152: TEvLoadStatisticsQueryResponse, request id = 1 2025-06-25T15:04:01.431370Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [2:3497:3398]], StatType[ 2 ], StatRequestsCount[ 1 ] 2025-06-25T15:04:01.433543Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-25T15:04:01.433599Z node 2 :STATISTICS DEBUG: service_impl.cpp:812: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] resolve DatabasePath[ [OwnerId: 72057594046644480, LocalPathId: 2] ] 2025-06-25T15:04:01.433889Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-25T15:04:01.433928Z node 2 :STATISTICS DEBUG: service_impl.cpp:715: [TStatService::QueryStatistics] RequestId[ 2 ], Database[ Root/Shared ], TablePath[ /Root/Shared/.metadata/_statistics ] 2025-06-25T15:04:01.433966Z node 2 :STATISTICS DEBUG: service_impl.cpp:656: [TStatService::LoadStatistics] QueryId[ 2 ], PathId[ [OwnerId: 72075186224037899, LocalPathId: 3] ], StatType[ 2 ], ColumnTag[ 1 ] 2025-06-25T15:04:01.436036Z node 2 :STATISTICS ERROR: service_impl.cpp:691: [TStatService::ReadRowsResponse] QueryId[ 2 ], RowsCount[ 0 ] 2025-06-25T15:04:01.436284Z node 2 :STATISTICS DEBUG: service_impl.cpp:1152: TEvLoadStatisticsQueryResponse, request id = 2 |93.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/s3/py3test >> test_s3.py::TestYdbS3TTL::test_s3[table_ttl_Uint64-pk_types10-all_types10-index10-Uint64--] [GOOD] >> test_sql_streaming.py::test[suites-GroupByHopPercentile-default.txt] [FAIL] >> KqpErrors::ProposeError >> KqpErrors::ResolveTableError ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_snapshot/unittest >> DataShardSnapshots::UncommittedChangesRenameTable-UseSink [GOOD] Test command err: 2025-06-25T15:02:04.996863Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T15:02:04.997300Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T15:02:04.997360Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001b6c/r3tmp/tmpAdayJ6/pdisk_1.dat 2025-06-25T15:02:05.377444Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T15:02:05.385030Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:02:05.438505Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:02:05.443852Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750863722361795 != 1750863722361799 2025-06-25T15:02:05.489031Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:61:2108] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-25T15:02:05.489889Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 2025-06-25T15:02:05.491760Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:02:05.491868Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:02:05.504986Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:02:05.593798Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:61:2108] Handle TEvProposeTransaction 2025-06-25T15:02:05.593919Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:61:2108] TxId# 281474976715657 ProcessProposeTransaction 2025-06-25T15:02:05.596522Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:61:2108] Cookie# 0 userReqId# "" txid# 281474976715657 SEND to# [1:602:2510] 2025-06-25T15:02:05.731478Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:602:2510] txid# 281474976715657 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateTable CreateTable { Name: "table-1" Columns { Name: "key" Type: "Uint32" FamilyName: "" NotNull: false } Columns { Name: "value" Type: "Uint32" FamilyName: "" NotNull: false } KeyColumnNames: "key" UniformPartitionsCount: 1 } } } ExecTimeoutPeriod: 18446744073709551615 2025-06-25T15:02:05.731613Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:602:2510] txid# 281474976715657 Bootstrap, UserSID: CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-25T15:02:05.732253Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1660: Actor# [1:602:2510] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-06-25T15:02:05.732391Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:602:2510] txid# 281474976715657 TEvNavigateKeySet requested from SchemeCache 2025-06-25T15:02:05.732753Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:602:2510] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-25T15:02:05.732968Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:602:2510] HANDLE EvNavigateKeySetResult, txid# 281474976715657 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-25T15:02:05.733145Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:602:2510] txid# 281474976715657 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715657 TabletId# 72057594046644480} 2025-06-25T15:02:05.733488Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:602:2510] txid# 281474976715657 HANDLE EvClientConnected 2025-06-25T15:02:05.735158Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:05.736329Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [1:602:2510] txid# 281474976715657 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715657} 2025-06-25T15:02:05.736410Z node 1 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [1:602:2510] txid# 281474976715657 SEND to# [1:554:2480] Source {TEvProposeTransactionStatus txid# 281474976715657 Status# 53} 2025-06-25T15:02:05.777940Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvBoot 2025-06-25T15:02:05.779315Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvRestored 2025-06-25T15:02:05.779838Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:627:2531] 2025-06-25T15:02:05.780125Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T15:02:05.827363Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-25T15:02:05.828057Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T15:02:05.828220Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T15:02:05.829876Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-25T15:02:05.829948Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-25T15:02:05.830003Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-25T15:02:05.830335Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T15:02:05.830466Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T15:02:05.830536Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:643:2531] in generation 1 2025-06-25T15:02:05.841631Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T15:02:05.884532Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-25T15:02:05.884737Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T15:02:05.884819Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:645:2541] 2025-06-25T15:02:05.884850Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T15:02:05.884875Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-25T15:02:05.884904Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T15:02:05.885085Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:627:2531], Recipient [1:627:2531]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T15:02:05.885122Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T15:02:05.885381Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-25T15:02:05.885447Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-25T15:02:05.885483Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T15:02:05.885509Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:02:05.885536Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-25T15:02:05.885574Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-25T15:02:05.885597Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-25T15:02:05.885623Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-25T15:02:05.885660Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T15:02:05.885956Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:634:2535], Recipient [1:627:2531]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T15:02:05.885998Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T15:02:05.886029Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:623:2528], serverId# [1:634:2535], sessionId# [0:0:0] 2025-06-25T15:02:05.886081Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:373:2367], Recipient [1:634:2535] 2025-06-25T15:02:05.886109Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-25T15:02:05.886223Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T15:02:05.886392Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-25T15:02:05.886450Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-25T15:02:05.886519Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-25T15:02:05.886559Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-25T15: ... eId: 01jyksw39zaxn2kqmn8bt3sqhn, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=ZDQ1MzY4MDktZjhmZWYzYzItM2NmYjFmMmUtYzRjMzYyZDA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. State: WaitResolveState, Executing KQP transaction on shard: 72075186224037888, tasks: [], lockTxId: (empty maybe), locks: Locks { LockId: 281474976715661 DataShard: 72075186224037888 Generation: 1 Counter: 0 SchemeShard: 72057594046644480 PathId: 2 HasWrites: true } Op: Rollback, immediate: 1 2025-06-25T15:04:02.712406Z node 13 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:1832: ActorId: [13:944:2645] TxId: 281474976715665. Ctx: { TraceId: 01jyksw39zaxn2kqmn8bt3sqhn, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=ZDQ1MzY4MDktZjhmZWYzYzItM2NmYjFmMmUtYzRjMzYyZDA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ExecuteDatashardTransaction traceId.verbosity: 0 2025-06-25T15:04:02.712482Z node 13 :KQP_EXECUTER INFO: kqp_data_executer.cpp:2806: ActorId: [13:944:2645] TxId: 281474976715665. Ctx: { TraceId: 01jyksw39zaxn2kqmn8bt3sqhn, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=ZDQ1MzY4MDktZjhmZWYzYzItM2NmYjFmMmUtYzRjMzYyZDA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Total tasks: 0, readonly: 1, datashardTxs: 1, evWriteTxs: 0, topicTxs: 0, volatile: 0, immediate: 1, pending compute tasks0, useFollowers: 0 2025-06-25T15:04:02.712523Z node 13 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:135: ActorId: [13:944:2645] TxId: 281474976715665. Ctx: { TraceId: 01jyksw39zaxn2kqmn8bt3sqhn, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=ZDQ1MzY4MDktZjhmZWYzYzItM2NmYjFmMmUtYzRjMzYyZDA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: WaitResolveState, datashard 72075186224037888 not finished yet: Executing 2025-06-25T15:04:02.712570Z node 13 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:156: ActorId: [13:944:2645] TxId: 281474976715665. Ctx: { TraceId: 01jyksw39zaxn2kqmn8bt3sqhn, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=ZDQ1MzY4MDktZjhmZWYzYzItM2NmYjFmMmUtYzRjMzYyZDA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: WaitResolveState, waiting for 0 compute actor(s) and 1 datashard(s): DS 72075186224037888 (Executing), 2025-06-25T15:04:02.712601Z node 13 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:2368: ActorId: [13:944:2645] TxId: 281474976715665. Ctx: { TraceId: 01jyksw39zaxn2kqmn8bt3sqhn, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=ZDQ1MzY4MDktZjhmZWYzYzItM2NmYjFmMmUtYzRjMzYyZDA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: WaitResolveState, immediate tx, become ExecuteState 2025-06-25T15:04:02.712788Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [13:944:2645], Recipient [13:913:2729]: NKikimrTxDataShard.TEvProposeTransaction TxKind: TX_KIND_DATA SourceDeprecated { RawX1: 944 RawX2: 55834577493 } TxBody: " \0018\001j3\010\001\032\'\n#\t\215\023\000\000\000\000\001\000\021\000\000\001\000\000\020\000\001\030\001 \000)\000\001\205\000\000\000\000\0010\0028\001 \003\"\006\020\0020\000@\n\220\001\000" TxId: 281474976715665 ExecLevel: 0 Flags: 8 2025-06-25T15:04:02.712818Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-25T15:04:02.712934Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435074, Sender [13:913:2729], Recipient [13:913:2729]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvDelayedProposeTransaction 2025-06-25T15:04:02.712962Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3159: StateWork, processing event TEvPrivate::TEvDelayedProposeTransaction 2025-06-25T15:04:02.713014Z node 13 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T15:04:02.713161Z node 13 :TX_DATASHARD TRACE: key_validator.cpp:54: -- AddWriteRange: (Uint64 : 281474976715661, Uint64 : 72075186224037888, Uint64 : 72057594046644480, Uint64 : 2) table: [1:997:0] 2025-06-25T15:04:02.713226Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715665] at 72075186224037888 on unit CheckDataTx 2025-06-25T15:04:02.713267Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715665] at 72075186224037888 is Executed 2025-06-25T15:04:02.713291Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715665] at 72075186224037888 executing on unit CheckDataTx 2025-06-25T15:04:02.713311Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715665] at 72075186224037888 to execution unit BuildAndWaitDependencies 2025-06-25T15:04:02.713332Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715665] at 72075186224037888 on unit BuildAndWaitDependencies 2025-06-25T15:04:02.713361Z node 13 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 72075186224037888 CompleteEdge# v500/281474976715663 IncompleteEdge# v{min} UnprotectedReadEdge# v400/18446744073709551615 ImmediateWriteEdge# v400/18446744073709551615 ImmediateWriteEdgeReplied# v1000/18446744073709551615 2025-06-25T15:04:02.713395Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:281474976715665] at 72075186224037888 2025-06-25T15:04:02.713420Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715665] at 72075186224037888 is Executed 2025-06-25T15:04:02.713439Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715665] at 72075186224037888 executing on unit BuildAndWaitDependencies 2025-06-25T15:04:02.713455Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715665] at 72075186224037888 to execution unit ExecuteKqpDataTx 2025-06-25T15:04:02.713477Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715665] at 72075186224037888 on unit ExecuteKqpDataTx 2025-06-25T15:04:02.713535Z node 13 :TX_DATASHARD TRACE: execute_kqp_data_tx_unit.cpp:236: Operation [0:281474976715665] (execute_kqp_data_tx) at 72075186224037888 set memory limit 4193448 2025-06-25T15:04:02.713661Z node 13 :TX_DATASHARD TRACE: datashard_kqp.cpp:787: KqpEraseLock LockId: 281474976715661 DataShard: 72075186224037888 Generation: 1 Counter: 0 SchemeShard: 72057594046644480 PathId: 2 HasWrites: true 2025-06-25T15:04:02.713754Z node 13 :TX_DATASHARD TRACE: execute_kqp_data_tx_unit.cpp:482: add locks to result: 0 2025-06-25T15:04:02.713825Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715665] at 72075186224037888 is Executed 2025-06-25T15:04:02.713845Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715665] at 72075186224037888 executing on unit ExecuteKqpDataTx 2025-06-25T15:04:02.713863Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715665] at 72075186224037888 to execution unit FinishPropose 2025-06-25T15:04:02.713882Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715665] at 72075186224037888 on unit FinishPropose 2025-06-25T15:04:02.713924Z node 13 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715665 at tablet 72075186224037888 send to client, exec latency: 0 ms, propose latency: 0 ms, status: COMPLETE 2025-06-25T15:04:02.714009Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715665] at 72075186224037888 is DelayComplete 2025-06-25T15:04:02.714033Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715665] at 72075186224037888 executing on unit FinishPropose 2025-06-25T15:04:02.714054Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715665] at 72075186224037888 to execution unit CompletedOperations 2025-06-25T15:04:02.714076Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715665] at 72075186224037888 on unit CompletedOperations 2025-06-25T15:04:02.714109Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715665] at 72075186224037888 is Executed 2025-06-25T15:04:02.714130Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715665] at 72075186224037888 executing on unit CompletedOperations 2025-06-25T15:04:02.714150Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:281474976715665] at 72075186224037888 has finished 2025-06-25T15:04:02.714194Z node 13 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-25T15:04:02.714217Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715665] at 72075186224037888 on unit FinishPropose 2025-06-25T15:04:02.714245Z node 13 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T15:04:02.714348Z node 13 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:1365: ActorId: [13:944:2645] TxId: 281474976715665. Ctx: { TraceId: 01jyksw39zaxn2kqmn8bt3sqhn, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=ZDQ1MzY4MDktZjhmZWYzYzItM2NmYjFmMmUtYzRjMzYyZDA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Got propose result, shard: 72075186224037888, status: COMPLETE, error: 2025-06-25T15:04:02.714452Z node 13 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:2188: ActorId: [13:944:2645] TxId: 281474976715665. Ctx: { TraceId: 01jyksw39zaxn2kqmn8bt3sqhn, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=ZDQ1MzY4MDktZjhmZWYzYzItM2NmYjFmMmUtYzRjMzYyZDA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. terminate execution. 2025-06-25T15:04:02.714547Z node 13 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:862: ActorId: [13:944:2645] TxId: 281474976715665. Ctx: { TraceId: 01jyksw39zaxn2kqmn8bt3sqhn, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=ZDQ1MzY4MDktZjhmZWYzYzItM2NmYjFmMmUtYzRjMzYyZDA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Resource usage for last stat interval: ComputeTime: 0.000000s ReadRows: 0 ReadBytes: 0 ru: 1 rate limiter was not found force flag: 1 2025-06-25T15:04:02.714672Z node 13 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2589: SessionId: ydb://session/3?node_id=13&id=ZDQ1MzY4MDktZjhmZWYzYzItM2NmYjFmMmUtYzRjMzYyZDA=, ActorId: [13:800:2645], ActorState: CleanupState, TraceId: 01jyksw39zaxn2kqmn8bt3sqhn, EndCleanup, isFinal: 0 2025-06-25T15:04:02.714834Z node 13 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2325: SessionId: ydb://session/3?node_id=13&id=ZDQ1MzY4MDktZjhmZWYzYzItM2NmYjFmMmUtYzRjMzYyZDA=, ActorId: [13:800:2645], ActorState: CleanupState, TraceId: 01jyksw39zaxn2kqmn8bt3sqhn, Sent query response back to proxy, proxyRequestId: 8, proxyId: [13:59:2106] 2025-06-25T15:04:02.969720Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [13:953:2755], Recipient [13:913:2729]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T15:04:02.969854Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T15:04:02.969960Z node 13 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [13:952:2754], serverId# [13:953:2755], sessionId# [0:0:0] 2025-06-25T15:04:02.970221Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553224, Sender [13:554:2480], Recipient [13:913:2729]: NKikimr::TEvDataShard::TEvGetOpenTxs >> KqpErrors::ProposeResultLost_RwTx+UseSink >> TestKinesisHttpProxy::CreateDeleteStreamWithConsumerWithFlag [GOOD] |93.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/fq/streaming_optimize/py3test >> test_sql_streaming.py::test[suites-GroupByHopPercentile-default.txt] [FAIL] |93.2%| [TA] $(B)/ydb/core/tx/datashard/ut_snapshot/test-results/unittest/{meta.json ... results_accumulator.log} |93.2%| [TA] {RESULT} $(B)/ydb/core/tx/datashard/ut_snapshot/test-results/unittest/{meta.json ... results_accumulator.log} >> TestKinesisHttpProxy::BadRequestUnknownMethod |93.2%| [TA] $(B)/ydb/tests/fq/streaming_optimize/test-results/py3test/{meta.json ... results_accumulator.log} |93.2%| [TA] {RESULT} $(B)/ydb/tests/fq/streaming_optimize/test-results/py3test/{meta.json ... results_accumulator.log} |93.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_queue_counters_are_in_folder[tables_format_v0] [GOOD] >> TFlatMetrics::TimeSeriesAvg16 [GOOD] >> TFlatMetrics::TimeSeriesAVG [GOOD] >> BootstrapperTest::KeepExistingTablet >> TTabletPipeTest::TestSendAfterOpen >> TTabletPipeTest::TestPipeConnectToHint |93.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TFlatMetrics::TimeSeriesAVG [GOOD] >> TTabletPipeTest::TestSendAfterOpen [GOOD] >> TTabletPipeTest::TestPipeConnectToHint [GOOD] >> TraverseDatashard::TraverseOneTable [GOOD] >> TResourceBroker::TestRealUsage |93.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TTabletPipeTest::TestSendAfterOpen [GOOD] |93.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TTabletPipeTest::TestPipeConnectToHint [GOOD] >> TResourceBroker::TestRealUsage [GOOD] >> TResourceBroker::TestRandomQueue >> BootstrapperTest::KeepExistingTablet [GOOD] >> BootstrapperTest::DuplicateNodes ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest >> TraverseDatashard::TraverseOneTable [GOOD] Test command err: 2025-06-25T15:03:59.875514Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:419:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T15:03:59.875752Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T15:03:59.875822Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001ee3/r3tmp/tmpeAbSnk/pdisk_1.dat 2025-06-25T15:04:00.175984Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 19011, node 1 2025-06-25T15:04:00.391618Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:04:00.391655Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:04:00.391675Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:04:00.392080Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T15:04:00.393621Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:04:00.482142Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:04:00.482264Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:04:00.496437Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:26443 2025-06-25T15:04:01.031437Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-25T15:04:03.578007Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-25T15:04:03.603089Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:04:03.603175Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:04:03.641158Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T15:04:03.642948Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:04:03.835782Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:04:03.870924Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:04:03.871612Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:04:03.872129Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:04:03.872272Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:04:03.872489Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:04:03.872608Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:04:03.872678Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:04:03.872728Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:04:03.872773Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:04:04.056507Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:04:04.056631Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:04:04.069714Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:04:04.212811Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:04:04.250110Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-25T15:04:04.250197Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-25T15:04:04.280928Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-25T15:04:04.281126Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-25T15:04:04.281324Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-25T15:04:04.281377Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-25T15:04:04.281444Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-25T15:04:04.281502Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-25T15:04:04.281566Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-25T15:04:04.281616Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-25T15:04:04.282129Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-25T15:04:04.304831Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7949: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-25T15:04:04.304964Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7979: ConnectToSA(), pipe client id: [2:1793:2562], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-25T15:04:04.313270Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1808:2573] 2025-06-25T15:04:04.320000Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1849:2589] 2025-06-25T15:04:04.320289Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1849:2589], schemeshard id = 72075186224037897 2025-06-25T15:04:04.329330Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-25T15:04:04.344652Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-25T15:04:04.344698Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-25T15:04:04.344749Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-25T15:04:04.353270Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:04:04.358456Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-25T15:04:04.358585Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-25T15:04:04.532985Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-25T15:04:04.642071Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-25T15:04:04.709711Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-25T15:04:05.220468Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:04:05.473058Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2153:3026], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:04:05.473199Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:04:05.490893Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:04:05.879809Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2452:3074], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:04:05.879993Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:04:05.881369Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [1:2457:3078]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T15:04:05.881566Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-25T15:04:05.881666Z node 1 :STATISTICS DEBUG: service_impl.cpp:1219: ConnectToSA(), pipe client id = [1:2459:3080] 2025-06-25T15:04:05.881759Z node 1 :STATISTICS DEBUG: service_impl.cpp:1248: SyncNode(), pipe client id = [1:2459:3080] 2025-06-25T15:04:05.882305Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:2460:2949] 2025-06-25T15:04:05.882594Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:2459:3080], server id = [2:2460:2949], tablet id = 72075186224037894, status = OK 2025-06-25T15:04:05.882791Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:133: [72075186224037894] EvConnectNode, pipe server id = [2:2460:2949], node id = 1, have schemeshards count = 0, need schemeshards count = 1 2025-06-25T15:04:05.882862Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:314: [72075186224037894] SendStatisticsToNode(), node id = 1, schemeshard count = 1 2025-06-25T15:04:05.883143Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-06-25T15:04:05.883226Z node 1 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 1, ReplyToActorId = [1:2457:3078], StatRequests.size() = 1 2025-06-25T15:04:05.903278Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2464:3084], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:04:05.903411Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:04:05.903911Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2469:3089], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:04:05.910934Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715660:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:04:06.100790Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:217: [72075186224037894] EvFastPropagateCheck 2025-06-25T15:04:06.100858Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:357: [72075186224037894] PropagateFastStatistics(), node count = 0, schemeshard count = 0 2025-06-25T15:04:06.164634Z node 1 :STATISTICS DEBUG: service_impl.cpp:1189: EvRequestTimeout, pipe client id = [1:2459:3080], schemeshard count = 1 2025-06-25T15:04:06.493749Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:2471:3091], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715660 completed, doublechecking } 2025-06-25T15:04:06.627464Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:2581:3159] txid# 281474976715661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:04:06.640502Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [1:2604:3175]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T15:04:06.640721Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-25T15:04:06.640764Z node 1 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 2, ReplyToActorId = [1:2604:3175], StatRequests.size() = 1 2025-06-25T15:04:06.727305Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jyksw6e633a547s0a4y6pwa0, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTM4OTg1MDItOGUwY2ZiLWNmNzljYzFhLTY0N2RiMDk0, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:04:06.769958Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [2:2653:2996]], StatType[ 2 ], StatRequestsCount[ 1 ] 2025-06-25T15:04:06.771977Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-25T15:04:06.772024Z node 2 :STATISTICS DEBUG: service_impl.cpp:812: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] resolve DatabasePath[ [OwnerId: 72057594046644480, LocalPathId: 2] ] 2025-06-25T15:04:06.772429Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-25T15:04:06.772479Z node 2 :STATISTICS DEBUG: service_impl.cpp:715: [TStatService::QueryStatistics] RequestId[ 1 ], Database[ Root/Database ], TablePath[ /Root/Database/.metadata/_statistics ] 2025-06-25T15:04:06.772515Z node 2 :STATISTICS DEBUG: service_impl.cpp:656: [TStatService::LoadStatistics] QueryId[ 1 ], PathId[ [OwnerId: 72075186224037897, LocalPathId: 4] ], StatType[ 2 ], ColumnTag[ 1 ] 2025-06-25T15:04:06.782669Z node 2 :STATISTICS ERROR: service_impl.cpp:691: [TStatService::ReadRowsResponse] QueryId[ 1 ], RowsCount[ 0 ] 2025-06-25T15:04:06.782908Z node 2 :STATISTICS DEBUG: service_impl.cpp:1152: TEvLoadStatisticsQueryResponse, request id = 1 >> TResourceBroker::TestRandomQueue [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TResourceBroker::TestRandomQueue [GOOD] Test command err: 2025-06-25T15:04:08.450436Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-1 (1 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.450495Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-1 (1 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.450569Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-5 (5 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.450605Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-6 (6 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.450702Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-12 (12 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.450927Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-33 (33 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.450962Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-34 (34 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.451016Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-38 (38 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.451090Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-44 (44 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.451170Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-50 (50 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.451277Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-56 (56 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.451309Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-58 (58 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.451338Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-59 (59 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.451378Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-60 (60 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.451625Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-72 (72 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.451679Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-74 (74 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.451799Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-79 (79 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.451857Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-81 (81 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.451936Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-85 (85 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.452129Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-94 (94 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.452198Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-97 (97 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.452241Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-98 (98 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.452420Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-106 (106 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.452460Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-107 (107 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.452568Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-111 (111 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.452661Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-114 (114 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.452798Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-120 (120 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.453004Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-130 (130 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.453044Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-131 (131 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.453214Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-141 (141 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.453323Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-146 (146 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.453357Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-147 (147 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.453572Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-159 (159 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.453731Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-168 (168 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.453860Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-173 (173 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.453924Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-176 (176 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.454314Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-196 (196 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.454448Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-203 (203 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.454499Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-205 (205 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.454576Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-208 (208 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.454683Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-213 (213 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.454723Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-214 (214 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.454761Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-215 (215 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.454853Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-220 (220 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.454887Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-221 (221 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.455084Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-232 (232 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.455117Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-233 (233 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.455189Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-237 (237 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.455387Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-248 (248 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.455426Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-249 (249 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.455481Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-251 (251 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.455518Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-252 (252 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.455572Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-254 (254 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.455818Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-265 (265 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.455909Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-267 (267 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.455980Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-268 (268 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.456112Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-274 (274 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.456266Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-283 (283 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.456321Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-284 (284 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.456380Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-286 (286 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.456479Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-291 (291 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.456639Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-298 (298 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.456718Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-300 (300 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.456835Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-306 (306 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.456885Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-308 (308 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.457007Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-314 (314 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.457054Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task ... R ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-544 (544 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.483008Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-593 (593 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.483024Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-595 (595 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.483050Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-602 (602 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.483099Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-637 (637 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.483149Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-653 (653 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.483172Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-655 (655 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.483203Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-691 (691 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.483218Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-692 (692 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.483255Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-728 (728 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.483296Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-734 (734 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.483328Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-767 (767 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.483348Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-769 (769 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.483398Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-775 (775 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.483431Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-793 (793 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.483456Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-800 (800 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.483530Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-860 (860 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.483585Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-938 (938 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.483615Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-964 (964 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.483683Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-989 (989 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.483741Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-60 (60 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.483762Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-74 (74 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.483782Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-81 (81 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.483829Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-85 (85 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.483856Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-97 (97 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.483906Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-106 (106 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.483944Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-120 (120 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.483989Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-147 (147 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.484010Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-159 (159 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.484039Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-168 (168 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.484055Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-203 (203 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.484074Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-213 (213 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.484164Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-283 (283 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.484196Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-300 (300 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.484210Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-332 (332 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.484224Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-372 (372 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.484274Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-389 (389 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.484342Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-411 (411 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.484371Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-443 (443 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.484391Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-463 (463 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.484462Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-510 (510 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.484516Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-517 (517 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.484539Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-525 (525 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.484598Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-570 (570 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.484642Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-577 (577 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.484672Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-588 (588 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.484710Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-617 (617 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.484732Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-630 (630 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.484799Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-709 (709 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.484828Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-723 (723 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.484851Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-730 (730 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.484879Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-741 (741 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.484914Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-764 (764 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.484948Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-776 (776 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.484984Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-789 (789 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.485006Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-790 (790 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.485038Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-812 (812 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.485074Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-813 (813 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.485126Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-858 (858 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.485157Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-865 (865 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.485241Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-908 (908 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.485285Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-922 (922 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.485305Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-923 (923 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.485331Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-926 (926 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.485365Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-932 (932 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.485387Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-943 (943 by [2:103:2136])' of unknown type 'wrong' to default queue 2025-06-25T15:04:08.485449Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-968 (968 by [2:103:2136])' of unknown type 'wrong' to default queue >> TResourceBrokerConfig::UpdateQueues [GOOD] >> TResourceBrokerConfig::DefaultConfig [GOOD] >> TPipeTrackerTest::TestAddSameTabletTwice [GOOD] >> TPipeTrackerTest::TestAddTwoTablets [GOOD] |93.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TPipeTrackerTest::TestAddTwoTablets [GOOD] |93.3%| [TM] {asan, default-linux-x86_64, release} ydb/services/metadata/initializer/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TResourceBrokerConfig::DefaultConfig [GOOD] Test command err: Queues { Name: "queue_default" Weight: 30 Limit { Cpu: 2 } } Queues { Name: "queue_compaction_gen0" Weight: 100 Limit { Cpu: 10 } } Queues { Name: "queue_compaction_gen1" Weight: 100 Limit { Cpu: 6 } } Queues { Name: "queue_compaction_gen2" Weight: 100 Limit { Cpu: 3 } } Queues { Name: "queue_compaction_gen3" Weight: 100 Limit { Cpu: 3 } } Queues { Name: "queue_compaction_borrowed" Weight: 100 Limit { Cpu: 3 } } Queues { Name: "queue_cs_indexation" Weight: 100 Limit { Cpu: 3 Memory: 1073741824 } } Queues { Name: "queue_cs_ttl" Weight: 100 Limit { Cpu: 3 Memory: 1073741824 } } Queues { Name: "queue_cs_general" Weight: 100 Limit { Cpu: 3 Memory: 3221225472 } } Queues { Name: "queue_cs_scan_read" Weight: 100 Limit { Cpu: 3 Memory: 3221225472 } } Queues { Name: "queue_cs_normalizer" Weight: 100 Limit { Cpu: 3 Memory: 3221225472 } } Queues { Name: "queue_transaction" Weight: 100 Limit { Cpu: 4 } } Queues { Name: "queue_background_compaction" Weight: 10 Limit { Cpu: 1 } } Queues { Name: "queue_scan" Weight: 100 Limit { Cpu: 10 } } Queues { Name: "queue_backup" Weight: 100 Limit { Cpu: 2 } } Queues { Name: "queue_restore" Weight: 100 Limit { Cpu: 10 } } Queues { Name: "queue_kqp_resource_manager" Weight: 30 Limit { Cpu: 4 Memory: 10737418240 } } Queues { Name: "queue_build_index" Weight: 100 Limit { Cpu: 10 } } Queues { Name: "queue_ttl" Weight: 100 Limit { Cpu: 2 } } Queues { Name: "queue_datashard_build_stats" Weight: 100 Limit { Cpu: 1 } } Queues { Name: "queue_cdc_initial_scan" Weight: 100 Limit { Cpu: 2 } } Queues { Name: "queue_statistics_scan" Weight: 100 Limit { Cpu: 1 } } Tasks { Name: "unknown" QueueName: "queue_default" DefaultDuration: 60000000 } Tasks { Name: "compaction_gen0" QueueName: "queue_compaction_gen0" DefaultDuration: 10000000 } Tasks { Name: "compaction_gen1" QueueName: "queue_compaction_gen1" DefaultDuration: 30000000 } Tasks { Name: "compaction_gen2" QueueName: "queue_compaction_gen2" DefaultDuration: 120000000 } Tasks { Name: "compaction_gen3" QueueName: "queue_compaction_gen3" DefaultDuration: 600000000 } Tasks { Name: "compaction_borrowed" QueueName: "queue_compaction_borrowed" DefaultDuration: 600000000 } Tasks { Name: "CS::TTL" QueueName: "queue_cs_ttl" DefaultDuration: 600000000 } Tasks { Name: "CS::INDEXATION" QueueName: "queue_cs_indexation" DefaultDuration: 600000000 } Tasks { Name: "CS::GENERAL" QueueName: "queue_cs_general" DefaultDuration: 600000000 } Tasks { Name: "CS::SCAN_READ" QueueName: "queue_cs_scan_read" DefaultDuration: 600000000 } Tasks { Name: "CS::NORMALIZER" QueueName: "queue_cs_normalizer" DefaultDuration: 600000000 } Tasks { Name: "transaction" QueueName: "queue_transaction" DefaultDuration: 600000000 } Tasks { Name: "background_compaction" QueueName: "queue_background_compaction" DefaultDuration: 60000000 } Tasks { Name: "background_compaction_gen0" QueueName: "queue_background_compaction" DefaultDuration: 10000000 } Tasks { Name: "background_compaction_gen1" QueueName: "queue_background_compaction" DefaultDuration: 20000000 } Tasks { Name: "background_compaction_gen2" QueueName: "queue_background_compaction" DefaultDuration: 60000000 } Tasks { Name: "background_compaction_gen3" QueueName: "queue_background_compaction" DefaultDuration: 300000000 } Tasks { Name: "scan" QueueName: "queue_scan" DefaultDuration: 300000000 } Tasks { Name: "backup" QueueName: "queue_backup" DefaultDuration: 300000000 } Tasks { Name: "restore" QueueName: "queue_restore" DefaultDuration: 300000000 } Tasks { Name: "kqp_query" QueueName: "queue_kqp_resource_manager" DefaultDuration: 600000000 } Tasks { Name: "build_index" QueueName: "queue_build_index" DefaultDuration: 600000000 } Tasks { Name: "ttl" QueueName: "queue_ttl" DefaultDuration: 300000000 } Tasks { Name: "datashard_build_stats" QueueName: "queue_datashard_build_stats" DefaultDuration: 5000000 } Tasks { Name: "cdc_initial_scan" QueueName: "queue_cdc_initial_scan" DefaultDuration: 600000000 } Tasks { Name: "statistics_scan" QueueName: "queue_statistics_scan" DefaultDuration: 600000000 } ResourceLimit { Cpu: 256 Memory: 17179869184 } Total queues cpu: 89 >> TResourceBroker::TestErrors >> BootstrapperTest::DuplicateNodes [GOOD] |93.3%| [TA] $(B)/ydb/services/metadata/initializer/ut/test-results/unittest/{meta.json ... results_accumulator.log} |93.3%| [TA] {RESULT} $(B)/ydb/services/metadata/initializer/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TResourceBroker::TestErrors [GOOD] >> TResourceBroker::TestExecutionStat ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> BootstrapperTest::DuplicateNodes [GOOD] Test command err: ... waiting for pipe to connect ... sleeping (original instance should be preserved) ... waiting for original instance to stop ... waiting for original instance to stop (done) ... waiting for pipe to connect 2025-06-25T15:04:08.574561Z node 4 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:193: tablet: 9437184, type: Dummy, begin new cycle (lookup in state storage) 2025-06-25T15:04:08.574623Z node 5 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:193: tablet: 9437184, type: Dummy, begin new cycle (lookup in state storage) 2025-06-25T15:04:08.574967Z node 4 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:215: tablet: 9437184, type: Dummy, lookup: NODATA, leader: [0:0:0] 2025-06-25T15:04:08.574995Z node 4 :BOOTSTRAPPER INFO: bootstrapper.cpp:330: tablet:9437184, type: Dummy, begin new round, seed: 2303809724928703835 2025-06-25T15:04:08.575073Z node 5 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:215: tablet: 9437184, type: Dummy, lookup: NODATA, leader: [0:0:0] 2025-06-25T15:04:08.575095Z node 5 :BOOTSTRAPPER INFO: bootstrapper.cpp:330: tablet:9437184, type: Dummy, begin new round, seed: 13151740404452589043 2025-06-25T15:04:08.575592Z node 5 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:428: tablet: 9437184, type: Dummy, apply alien 4 state: FREE 2025-06-25T15:04:08.575640Z node 5 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:499: tablet: 9437184, type: Dummy, lost round, wait for 0.190190s 2025-06-25T15:04:08.575702Z node 4 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:428: tablet: 9437184, type: Dummy, apply alien 5 state: FREE 2025-06-25T15:04:08.575724Z node 4 :BOOTSTRAPPER NOTICE: bootstrapper.cpp:680: tablet: 9437184, type: Dummy, boot 2025-06-25T15:04:08.778521Z node 5 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:193: tablet: 9437184, type: Dummy, begin new cycle (lookup in state storage) 2025-06-25T15:04:08.779062Z node 5 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:215: tablet: 9437184, type: Dummy, lookup: OK, leader: [4:222:2097] 2025-06-25T15:04:08.779470Z node 5 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:266: tablet: 9437184, type: Dummy, connect: OK 2025-06-25T15:04:08.779511Z node 5 :BOOTSTRAPPER INFO: bootstrapper.cpp:277: tablet: 9437184, type: Dummy, connected to leader, waiting >> TTabletCountersAggregator::IntegralPercentileAggregationHistNamedSingleBucket >> TTabletCountersAggregator::IntegralPercentileAggregationHistNamedSingleBucket [GOOD] >> TTabletCountersAggregator::IntegralPercentileAggregationRegular >> TResourceBroker::TestExecutionStat [GOOD] >> TestKinesisHttpProxy::BadRequestUnknownMethod [GOOD] >> TFlatMetrics::MaximumValue1 [GOOD] >> TFlatMetrics::MaximumValue2 [GOOD] >> TTabletCountersPercentile::SingleBucket [GOOD] >> TTabletCountersPercentile::StartFromZero [GOOD] >> TTabletCountersAggregator::IntegralPercentileAggregationRegular [GOOD] >> TPipeTrackerTest::TestSimpleAdd [GOOD] >> TResourceBroker::TestAutoTaskId |93.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TResourceBroker::TestExecutionStat [GOOD] |93.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TFlatMetrics::MaximumValue2 [GOOD] |93.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TTabletCountersPercentile::StartFromZero [GOOD] |93.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TTabletCountersAggregator::IntegralPercentileAggregationRegular [GOOD] >> TResourceBroker::TestAutoTaskId [GOOD] |93.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/s3/py3test >> test_s3.py::TestYdbS3TTL::test_s3[table_ttl_DyNumber-pk_types8-all_types8-index8-DyNumber--] [GOOD] >> TTabletCountersAggregator::IntegralPercentileAggregationHistNamed >> HttpProxyInsideYdb::TestIfEnvVariableSet [GOOD] >> TTabletCountersAggregator::IntegralPercentileAggregationHistNamed [GOOD] >> TTabletCountersAggregator::IntegralPercentileAggregationHistNamedNoOverflowCheck |93.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TResourceBroker::TestAutoTaskId [GOOD] >> TTabletCountersAggregator::IntegralPercentileAggregationHistNamedNoOverflowCheck [GOOD] |93.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TTabletCountersAggregator::IntegralPercentileAggregationHistNamedNoOverflowCheck [GOOD] >> TResourceBroker::TestResubmitTask >> TTabletPipeTest::TestTwoNodesAndRebootOfConsumer >> TTabletLabeledCountersAggregator::SimpleAggregation >> KqpErrors::ResolveTableError [GOOD] >> TTabletResolver::NodeProblem >> TTabletLabeledCountersAggregator::SimpleAggregation [GOOD] >> TTabletLabeledCountersAggregator::HeavyAggregation ------- [TM] {asan, default-linux-x86_64, release} ydb/core/http_proxy/ut/inside_ydb_ut/unittest >> HttpProxyInsideYdb::TestIfEnvVariableSet [GOOD] Test command err: 2025-06-25T15:03:26.606349Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519902787088035383:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:03:26.606437Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000c8e/r3tmp/tmpa5Svhm/pdisk_1.dat 2025-06-25T15:03:26.951217Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 13719, node 1 2025-06-25T15:03:26.978595Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:03:26.978947Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:03:26.995821Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:03:27.016575Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:03:27.016603Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:03:27.016619Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:03:27.016776Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:62517 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:03:27.337866Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... TClient is connected to server localhost:62517 2025-06-25T15:03:27.520841Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... 2025-06-25T15:03:27.528833Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-25T15:03:27.533762Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) waiting... 2025-06-25T15:03:27.550151Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:03:27.613462Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:03:27.695855Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T15:03:27.737537Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T15:03:27.778039Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:03:27.810771Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:03:27.848651Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:03:27.922053Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715669:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:03:27.960155Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:03:27.995214Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715671:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:03:28.025274Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:03:29.504267Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902799972938622:2338], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:03:29.504283Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902799972938614:2335], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:03:29.504401Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:03:29.508699Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715673:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:03:29.518537Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519902799972938628:2339], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715673 completed, doublechecking } 2025-06-25T15:03:29.593831Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519902799972938679:2862] txid# 281474976715674, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 18], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:03:29.855589Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715675. Ctx: { TraceId: 01jyksv2yy0pt3fhc1ecyfmw8r, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZjY1MDM3ZWYtMzA0NDFmMmEtZDkyZjIyYjMtMWEzMTkyMGU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root waiting... 2025-06-25T15:03:29.887041Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:03:29.914770Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715677:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home ... Member { Name: "Account" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "Name" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "Value" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } } } } } } } } Member { Name: "truncated" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } } } Value { Struct { Optional { } } Struct { Optional { Bool: false } } } } } 2025-06-25T15:04:09.693233Z node 7 :SQS TRACE: executor.cpp:327: Request [] Query(idx=GET_USER_SETTINGS_ID) Queue [] Minikql data response: {"settings": [], "truncated": false} 2025-06-25T15:04:09.693314Z node 7 :SQS DEBUG: executor.cpp:401: Request [] Query(idx=GET_USER_SETTINGS_ID) Queue [] execution duration: 15ms 2025-06-25T15:04:09.693484Z node 7 :SQS TRACE: user_settings_reader.cpp:89: Handle user settings: { Status: 48 TxId: 281474976715686 StatusCode: SUCCESS ExecutionEngineStatus: 1 ExecutionEngineResponseStatus: 2 ExecutionEngineEvaluatedResponse { Type { Kind: Struct Struct { Member { Name: "settings" Type { Kind: Optional Optional { Item { Kind: List List { Item { Kind: Struct Struct { Member { Name: "Account" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "Name" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "Value" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } } } } } } } } Member { Name: "truncated" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } } } Value { Struct { Optional { } } Struct { Optional { Bool: false } } } } } 2025-06-25T15:04:09.694589Z node 7 :SQS TRACE: executor.cpp:286: Request [] Query(idx=GET_QUEUES_LIST_ID) Queue [] HandleResponse { Status: 48 TxId: 281474976715685 StatusCode: SUCCESS ExecutionEngineStatus: 1 ExecutionEngineResponseStatus: 2 ExecutionEngineEvaluatedResponse { Type { Kind: Struct Struct { Member { Name: "queues" Type { Kind: Optional Optional { Item { Kind: List List { Item { Kind: Struct Struct { Member { Name: "Account" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "CreatedTimestamp" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "CustomQueueName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "DlqName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "FifoQueue" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } Member { Name: "FolderId" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "MasterTabletId" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "QueueName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "QueueState" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "Shards" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "TablesFormat" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 2 } } } } } Member { Name: "Version" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } } } } } } } } Member { Name: "truncated" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } } } Value { Struct { Optional { } } Struct { Optional { Bool: false } } } } } 2025-06-25T15:04:09.694614Z node 7 :SQS DEBUG: executor.cpp:287: Request [] Query(idx=GET_QUEUES_LIST_ID) Queue [] Attempt 1 execution duration: 17ms 2025-06-25T15:04:09.694971Z node 7 :SQS TRACE: executor.cpp:325: Request [] Query(idx=GET_QUEUES_LIST_ID) Queue [] Sending mkql execution result: { Status: 48 TxId: 281474976715685 StatusCode: SUCCESS ExecutionEngineStatus: 1 ExecutionEngineResponseStatus: 2 ExecutionEngineEvaluatedResponse { Type { Kind: Struct Struct { Member { Name: "queues" Type { Kind: Optional Optional { Item { Kind: List List { Item { Kind: Struct Struct { Member { Name: "Account" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "CreatedTimestamp" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "CustomQueueName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "DlqName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "FifoQueue" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } Member { Name: "FolderId" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "MasterTabletId" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "QueueName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "QueueState" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "Shards" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "TablesFormat" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 2 } } } } } Member { Name: "Version" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } } } } } } } } Member { Name: "truncated" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } } } Value { Struct { Optional { } } Struct { Optional { Bool: false } } } } } 2025-06-25T15:04:09.695016Z node 7 :SQS TRACE: executor.cpp:327: Request [] Query(idx=GET_QUEUES_LIST_ID) Queue [] Minikql data response: {"queues": [], "truncated": false} 2025-06-25T15:04:09.695122Z node 7 :SQS DEBUG: executor.cpp:401: Request [] Query(idx=GET_QUEUES_LIST_ID) Queue [] execution duration: 18ms 2025-06-25T15:04:09.695417Z node 7 :SQS TRACE: queues_list_reader.cpp:82: Handle queues list: { Status: 48 TxId: 281474976715685 StatusCode: SUCCESS ExecutionEngineStatus: 1 ExecutionEngineResponseStatus: 2 ExecutionEngineEvaluatedResponse { Type { Kind: Struct Struct { Member { Name: "queues" Type { Kind: Optional Optional { Item { Kind: List List { Item { Kind: Struct Struct { Member { Name: "Account" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "CreatedTimestamp" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "CustomQueueName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "DlqName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "FifoQueue" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } Member { Name: "FolderId" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "MasterTabletId" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "QueueName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "QueueState" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "Shards" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "TablesFormat" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 2 } } } } } Member { Name: "Version" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } } } } } } } } Member { Name: "truncated" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } } } Value { Struct { Optional { } } Struct { Optional { Bool: false } } } } } 2025-06-25T15:04:09.809539Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:552: [WorkloadService] [Service] Reply cleanup error NOT_FOUND to [7:7519902972646948760:2397]: Pool not found 2025-06-25T15:04:09.809679Z node 7 :SQS DEBUG: monitoring.cpp:60: [monitoring] Report deletion queue data lag: 0.000000s, count: 0 2025-06-25T15:04:09.983487Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:552: [WorkloadService] [Service] Reply cleanup error NOT_FOUND to [7:7519902972646948752:2396]: Pool not found 2025-06-25T15:04:09.983609Z node 7 :SQS DEBUG: cleanup_queue_data.cpp:100: [cleanup removed queues] getting queues... 2025-06-25T15:04:09.986296Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:602: [WorkloadService] [TDatabaseFetcherActor] ActorId: [7:7519902972646948888:2419], Database: /Root/SQS, Failed to fetch database info, UNSUPPORTED, issues: {
: Error: Invalid database path /Root/SQS, please check the correctness of the path } 2025-06-25T15:04:09.986312Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7519902972646948887:2418], DatabaseId: /Root/SQS, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:04:09.986408Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root/SQS, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:04:10.210342Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:552: [WorkloadService] [Service] Reply cleanup error NOT_FOUND to [7:7519902972646948885:2417]: Pool not found 2025-06-25T15:04:10.210552Z node 7 :SQS DEBUG: cleanup_queue_data.cpp:138: [cleanup removed queues] there are no queues to delete 2025-06-25T15:04:10.591318Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[7:7519902955467077495:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:04:10.591431Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T15:04:10.652319Z node 7 :HTTP DEBUG: http_proxy_incoming.cpp:83: (#37,[::1]:58340) incoming connection opened 2025-06-25T15:04:10.652395Z node 7 :HTTP DEBUG: http_proxy_incoming.cpp:156: (#37,[::1]:58340) -> (POST /Root, 3 bytes) 2025-06-25T15:04:10.652504Z node 7 :HTTP_PROXY INFO: http_service.cpp:102: proxy service: incoming request from [d8da:db00:6050:0:c0da:db00:6050:0] request [UnknownMethodName] url [/Root] database [/Root] requestId: 46aa46f8-30a1e803-3d8859ec-3554e43b 2025-06-25T15:04:10.652688Z node 7 :HTTP_PROXY INFO: http_req.cpp:1211: http request [UnknownMethodName] requestId [46aa46f8-30a1e803-3d8859ec-3554e43b] reply with status: UNSUPPORTED message: Missing method name UnknownMethodName 2025-06-25T15:04:10.652849Z node 7 :HTTP DEBUG: http_proxy_incoming.cpp:280: (#37,[::1]:58340) <- (400 InvalidAction, 76 bytes) 2025-06-25T15:04:10.652928Z node 7 :HTTP DEBUG: http_proxy_incoming.cpp:289: (#37,[::1]:58340) Request: POST /Root HTTP/1.1 Host: example.amazonaws.com X-Amz-Target: kinesisApi.UnknownMethodName X-Amz-Date: 20150830T123600Z Authorization: Content-Type: application/json Connection: Close Transfer-Encoding: chunked { } 2025-06-25T15:04:10.652973Z node 7 :HTTP DEBUG: http_proxy_incoming.cpp:296: (#37,[::1]:58340) Response: HTTP/1.1 400 InvalidAction Connection: close x-amzn-requestid: 46aa46f8-30a1e803-3d8859ec-3554e43b x-amz-crc32: 139748724 Content-Type: application/x-amz-json-1.1 Content-Length: 76 2025-06-25T15:04:10.653044Z node 7 :HTTP DEBUG: http_proxy_incoming.cpp:335: (#37,[::1]:58340) connection closed Http output full {"__type":"InvalidAction","message":"Missing method name UnknownMethodName"} 400 {"__type":"InvalidAction","message":"Missing method name UnknownMethodName"} >> TTabletPipeTest::TestConsumerSidePipeReset >> TResourceBroker::TestResubmitTask [GOOD] >> TResourceBroker::TestUpdateCookie >> TResourceBroker::TestQueueWithConfigure >> TTabletPipeTest::TestConsumerSidePipeReset [GOOD] >> TTabletPipeTest::TestConnectReject >> KqpErrors::ProposeResultLost_RwTx+UseSink [GOOD] >> KqpErrors::ProposeResultLost_RwTx-UseSink >> TTabletPipeTest::TestTwoNodesAndRebootOfConsumer [GOOD] >> TResourceBroker::TestUpdateCookie [GOOD] >> TTabletResolver::NodeProblem [GOOD] |93.3%| [TA] $(B)/ydb/core/http_proxy/ut/inside_ydb_ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TTabletPipeTest::TestConnectReject [GOOD] >> TResourceBroker::TestQueueWithConfigure [GOOD] >> TResourceBroker::TestOverusageDifferentResources >> TResourceBrokerInstant::Test ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TTabletPipeTest::TestTwoNodesAndRebootOfConsumer [GOOD] Test command err: Leader for TabletID 9437184 is [0:0:0] sender: [1:166:2058] recipient: [1:164:2139] IGNORE Leader for TabletID 9437184 is [0:0:0] sender: [1:166:2058] recipient: [1:164:2139] Leader for TabletID 9437184 is [1:172:2143] sender: [1:173:2058] recipient: [1:164:2139] Leader for TabletID 9437185 is [0:0:0] sender: [2:176:2049] recipient: [2:167:2097] IGNORE Leader for TabletID 9437185 is [0:0:0] sender: [2:176:2049] recipient: [2:167:2097] Leader for TabletID 9437185 is [2:188:2100] sender: [2:190:2049] recipient: [2:167:2097] Leader for TabletID 9437184 is [1:172:2143] sender: [1:216:2058] recipient: [1:15:2062] Leader for TabletID 9437185 is [2:188:2100] sender: [1:218:2058] recipient: [1:15:2062] Leader for TabletID 9437185 is [2:188:2100] sender: [2:220:2049] recipient: [2:44:2053] Leader for TabletID 9437185 is [2:188:2100] sender: [2:221:2049] recipient: [2:161:2096] Leader for TabletID 9437185 is [2:188:2100] sender: [1:224:2058] recipient: [1:15:2062] Leader for TabletID 9437185 is [2:188:2100] sender: [2:226:2049] recipient: [2:44:2053] Leader for TabletID 9437185 is [2:188:2100] sender: [2:227:2049] recipient: [2:225:2113] Leader for TabletID 9437185 is [2:228:2114] sender: [2:229:2049] recipient: [2:225:2113] Leader for TabletID 9437185 is [2:228:2114] sender: [1:258:2058] recipient: [1:15:2062] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_kqp_errors/unittest >> KqpErrors::ResolveTableError [GOOD] Test command err: 2025-06-25T15:04:08.964531Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T15:04:08.965993Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001b5b/r3tmp/tmpjMqgVR/pdisk_1.dat 2025-06-25T15:04:09.225327Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:04:09.352464Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:04:09.432023Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:04:09.432159Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:04:09.435047Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:04:09.435104Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:04:09.448168Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T15:04:09.448704Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:04:09.448984Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:04:09.736049Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:04:11.242383Z node 1 :KQP_EXECUTER TRACE: kqp_executer_impl.h:198: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: 01jykswb47d9ejkdbkwf4sbgsa, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MmYwNzY3NWUtZTY2ZTA4N2MtYjhlMDFhZDktNDBkODg1YWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. Bootstrap done, become ReadyState 2025-06-25T15:04:11.243773Z node 1 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:608: ActorId: [1:1470:2900] TxId: 281474976715658. Ctx: { TraceId: 01jykswb47d9ejkdbkwf4sbgsa, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MmYwNzY3NWUtZTY2ZTA4N2MtYjhlMDFhZDktNDBkODg1YWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. Executing physical tx, type: 2, stages: 1 2025-06-25T15:04:11.244614Z node 1 :KQP_EXECUTER DEBUG: kqp_tasks_graph.cpp:25: StageInfo: StageId #[0,0], InputsCount: 0, OutputsCount: 1 2025-06-25T15:04:11.245673Z node 1 :KQP_EXECUTER TRACE: kqp_executer_impl.h:623: ActorId: [1:1470:2900] TxId: 281474976715658. Ctx: { TraceId: 01jykswb47d9ejkdbkwf4sbgsa, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MmYwNzY3NWUtZTY2ZTA4N2MtYjhlMDFhZDktNDBkODg1YWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. Got request, become WaitResolveState 2025-06-25T15:04:11.246789Z node 1 :KQP_EXECUTER DEBUG: kqp_table_resolver.cpp:271: TxId: 281474976715658. Resolved key sets: 1 2025-06-25T15:04:11.246969Z node 1 :KQP_EXECUTER DEBUG: kqp_table_resolver.cpp:295: TxId: 281474976715658. Resolved key: { TableId: [OwnerId: 72057594046644480, LocalPathId: 2] Access: 2 SyncVersion: false Status: OkData Kind: KindRegularTable PartitionsCount: 4 DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } From: (Uint32 : NULL) IncFrom: 1 To: () IncTo: 0 } 2025-06-25T15:04:11.248208Z node 1 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:2035: ActorId: [1:1470:2900] TxId: 281474976715658. Ctx: { TraceId: 01jykswb47d9ejkdbkwf4sbgsa, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MmYwNzY3NWUtZTY2ZTA4N2MtYjhlMDFhZDktNDBkODg1YWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. Stage [0,0] AST: ( (return (lambda '() (block '( (let $1 (Just (Uint32 '1))) (let $2 (Just (Uint32 '2))) (let $3 (Just (Uint32 '3))) (return (Iterator (AsList (AsStruct '('"key" $1) '('"value" $1)) (AsStruct '('"key" $2) '('"value" $2)) (AsStruct '('"key" $3) '('"value" $3))))) )))) ) 2025-06-25T15:04:11.251250Z node 1 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:1512: ActorId: [1:1470:2900] TxId: 281474976715658. Ctx: { TraceId: 01jykswb47d9ejkdbkwf4sbgsa, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MmYwNzY3NWUtZTY2ZTA4N2MtYjhlMDFhZDktNDBkODg1YWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. Stage [0,0] create compute task: 1 2025-06-25T15:04:11.254684Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715658. Ctx: { TraceId: 01jykswb47d9ejkdbkwf4sbgsa, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MmYwNzY3NWUtZTY2ZTA4N2MtYjhlMDFhZDktNDBkODg1YWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. Database not set, use /Root 2025-06-25T15:04:11.254759Z node 1 :KQP_EXECUTER DEBUG: kqp_planner.cpp:562: TxId: 281474976715658. Ctx: { TraceId: 01jykswb47d9ejkdbkwf4sbgsa, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MmYwNzY3NWUtZTY2ZTA4N2MtYjhlMDFhZDktNDBkODg1YWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. Total tasks: 0, readonly: true, 0 scan tasks on 0 nodes, localComputeTasks: 0, snapshot: {0, 0} 2025-06-25T15:04:11.260918Z node 1 :KQP_EXECUTER DEBUG: kqp_planner.cpp:802: TxId: 281474976715658. Ctx: { TraceId: 01jykswb47d9ejkdbkwf4sbgsa, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MmYwNzY3NWUtZTY2ZTA4N2MtYjhlMDFhZDktNDBkODg1YWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. Collect channels updates for task: 1 at actor [1:1473:2900] 2025-06-25T15:04:11.261008Z node 1 :KQP_EXECUTER DEBUG: kqp_planner.cpp:794: TxId: 281474976715658. Ctx: { TraceId: 01jykswb47d9ejkdbkwf4sbgsa, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MmYwNzY3NWUtZTY2ZTA4N2MtYjhlMDFhZDktNDBkODg1YWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. Sending channels info to compute actor: [1:1473:2900], channels: 0 2025-06-25T15:04:11.261079Z node 1 :KQP_EXECUTER INFO: kqp_data_executer.cpp:2806: ActorId: [1:1470:2900] TxId: 281474976715658. Ctx: { TraceId: 01jykswb47d9ejkdbkwf4sbgsa, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MmYwNzY3NWUtZTY2ZTA4N2MtYjhlMDFhZDktNDBkODg1YWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. Total tasks: 1, readonly: 0, datashardTxs: 0, evWriteTxs: 0, topicTxs: 0, volatile: 0, immediate: 1, pending compute tasks0, useFollowers: 0 2025-06-25T15:04:11.261162Z node 1 :KQP_EXECUTER TRACE: kqp_data_executer.cpp:2809: ActorId: [1:1470:2900] TxId: 281474976715658. Ctx: { TraceId: 01jykswb47d9ejkdbkwf4sbgsa, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MmYwNzY3NWUtZTY2ZTA4N2MtYjhlMDFhZDktNDBkODg1YWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. Updating channels after the creation of compute actors 2025-06-25T15:04:11.261202Z node 1 :KQP_EXECUTER DEBUG: kqp_planner.cpp:802: TxId: 281474976715658. Ctx: { TraceId: 01jykswb47d9ejkdbkwf4sbgsa, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MmYwNzY3NWUtZTY2ZTA4N2MtYjhlMDFhZDktNDBkODg1YWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. Collect channels updates for task: 1 at actor [1:1473:2900] 2025-06-25T15:04:11.261266Z node 1 :KQP_EXECUTER DEBUG: kqp_planner.cpp:794: TxId: 281474976715658. Ctx: { TraceId: 01jykswb47d9ejkdbkwf4sbgsa, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MmYwNzY3NWUtZTY2ZTA4N2MtYjhlMDFhZDktNDBkODg1YWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. Sending channels info to compute actor: [1:1473:2900], channels: 0 2025-06-25T15:04:11.261353Z node 1 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:668: ActorId: [1:1470:2900] TxId: 281474976715658. Ctx: { TraceId: 01jykswb47d9ejkdbkwf4sbgsa, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MmYwNzY3NWUtZTY2ZTA4N2MtYjhlMDFhZDktNDBkODg1YWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. Waiting for: CA [1:1473:2900], 2025-06-25T15:04:11.261406Z node 1 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:156: ActorId: [1:1470:2900] TxId: 281474976715658. Ctx: { TraceId: 01jykswb47d9ejkdbkwf4sbgsa, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MmYwNzY3NWUtZTY2ZTA4N2MtYjhlMDFhZDktNDBkODg1YWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. ActorState: WaitResolveState, waiting for 1 compute actor(s) and 0 datashard(s): CA [1:1473:2900], 2025-06-25T15:04:11.261466Z node 1 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:2368: ActorId: [1:1470:2900] TxId: 281474976715658. Ctx: { TraceId: 01jykswb47d9ejkdbkwf4sbgsa, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MmYwNzY3NWUtZTY2ZTA4N2MtYjhlMDFhZDktNDBkODg1YWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. ActorState: WaitResolveState, immediate tx, become ExecuteState 2025-06-25T15:04:11.293247Z node 1 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:442: ActorId: [1:1470:2900] TxId: 281474976715658. Ctx: { TraceId: 01jykswb47d9ejkdbkwf4sbgsa, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MmYwNzY3NWUtZTY2ZTA4N2MtYjhlMDFhZDktNDBkODg1YWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. ActorState: ExecuteState, got execution state from compute actor: [1:1473:2900], task: 1, state: COMPUTE_STATE_EXECUTING, stats: { } 2025-06-25T15:04:11.293368Z node 1 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:668: ActorId: [1:1470:2900] TxId: 281474976715658. Ctx: { TraceId: 01jykswb47d9ejkdbkwf4sbgsa, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MmYwNzY3NWUtZTY2ZTA4N2MtYjhlMDFhZDktNDBkODg1YWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. Waiting for: CA [1:1473:2900], 2025-06-25T15:04:11.293428Z node 1 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:156: ActorId: [1:1470:2900] TxId: 281474976715658. Ctx: { TraceId: 01jykswb47d9ejkdbkwf4sbgsa, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MmYwNzY3NWUtZTY2ZTA4N2MtYjhlMDFhZDktNDBkODg1YWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. ActorState: ExecuteState, waiting for 1 compute actor(s) and 0 datashard(s): CA [1:1473:2900], 2025-06-25T15:04:11.296137Z node 1 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:442: ActorId: [1:1470:2900] TxId: 281474976715658. Ctx: { TraceId: 01jykswb47d9ejkdbkwf4sbgsa, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MmYwNzY3NWUtZTY2ZTA4N2MtYjhlMDFhZDktNDBkODg1YWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. ActorState: ExecuteState, got execution state from compute actor: [1:1473:2900], task: 1, state: COMPUTE_STATE_FINISHED, stats: { CpuTimeUs: 19830 Tasks { TaskId: 1 CpuTimeUs: 10402 FinishTimeMs: 1750863851295 EgressBytes: 30 EgressRows: 3 ComputeCpuTimeUs: 1944 BuildCpuTimeUs: 8458 HostName: "ghrun-kqfvx6aroe" NodeId: 1 CreateTimeMs: 1750863851267 UpdateTimeMs: 1750863851295 } MaxMemoryUsage: 1048576 } 2025-06-25T15:04:11.296263Z node 1 :KQP_EXECUTER INFO: kqp_planner.cpp:697: TxId: 281474976715658. Ctx: { TraceId: 01jykswb47d9ejkdbkwf4sbgsa, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MmYwNzY3NWUtZTY2ZTA4N2MtYjhlMDFhZDktNDBkODg1YWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. Compute actor has finished execution: [1:1473:2900] 2025-06-25T15:04:11.296374Z node 1 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:276: ActorId: [1:1470:2900] TxId: 281474976715658. Ctx: { TraceId: 01jykswb47d9ejkdbkwf4sbgsa, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MmYwNzY3NWUtZTY2ZTA4N2MtYjhlMDFhZDktNDBkODg1YWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. Send Commit to BufferActor=[1:1469:2900] 2025-06-25T15:04:11.296433Z node 1 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:862: ActorId: [1:1470:2900] TxId: 281474976715658. Ctx: { TraceId: 01jykswb47d9ejkdbkwf4sbgsa, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MmYwNzY3NWUtZTY2ZTA4N2MtYjhlMDFhZDktNDBkODg1YWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. Resource usage for last stat interval: ComputeTime: 0.019830s ReadRows: 0 ReadBytes: 0 ru: 13 rate limiter was not found force flag: 1 2025-06-25T15:04:11.339597Z node 1 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:2188: ActorId: [1:1470:2900] TxId: 281474976715658. Ctx: { TraceId: 01jykswb47d9ejkdbkwf4sbgsa, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MmYwNzY3NWUtZTY2ZTA4N2MtYjhlMDFhZDktNDBkODg1YWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. terminate execution. 2025-06-25T15:04:11.339674Z node 1 :KQP_EXECUTER TRACE: kqp_executer_impl.h:2202: ActorId: [1:1470:2900] TxId: 281474976715658. Ctx: { TraceId: 01jykswb47d9ejkdbkwf4sbgsa, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MmYwNzY3NWUtZTY2ZTA4N2MtYjhlMDFhZDktNDBkODg1YWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. Terminate, become ZombieState 2025-06-25T15:04:11.407324Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:1489:2918], status: UNAVAILABLE, issues:
: Error: Table metadata loading, code: 1050
:1:1: Error: Failed to load metadata for table: db.[/Root/table-1]
: Error: LookupError, code: 2005 2025-06-25T15:04:11.409208Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=NmI5YmFkZjMtYWRhOWRmZTItMTk2ZjY2ZDAtYjM0MGViYzM=, ActorId: [1:1487:2916], ActorState: ExecuteState, TraceId: 01jykswbtjb1bdw8y1neh3q7mv, ReplyQueryCompileError, status UNAVAILABLE remove tx with tx_id: >> TResourceBroker::TestOverusageDifferentResources [GOOD] |93.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TResourceBroker::TestUpdateCookie [GOOD] |93.3%| [TA] {RESULT} $(B)/ydb/core/http_proxy/ut/inside_ydb_ut/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TTabletResolver::NodeProblem [GOOD] Test command err: 2025-06-25T15:04:12.671276Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 123 entry.State: StInit ev: {EvForward TabletID: 123 Ev: nullptr Flags: 1:2:0} 2025-06-25T15:04:12.671446Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:610: Handle TEvInfo tabletId: 123 entry.State: StInitResolve success: true ev: {EvInfo Status: 0 TabletID: 123 Cookie: 0 CurrentLeader: [1:217:2138] CurrentLeaderTablet: [1:218:2139] CurrentGeneration: 1 CurrentStep: 0 Locked: false LockedFor: 0 Signature: { Size: 2 Signature: {{[1:24343667:0] : 3}, {[1:1099535971443:0] : 6}}}} 2025-06-25T15:04:12.671485Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:354: ApplyEntry leader tabletId: 123 followers: 0 2025-06-25T15:04:12.671520Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 1 selfDC 1 leaderDC 1 1:2:0 local 1 localDc 1 other 0 disallowed 0 tabletId: 123 followers: 0 countLeader 1 allowFollowers 0 winner: [1:217:2138] 2025-06-25T15:04:12.671648Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 234 entry.State: StInit ev: {EvForward TabletID: 234 Ev: nullptr Flags: 1:2:0} 2025-06-25T15:04:12.671818Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:610: Handle TEvInfo tabletId: 234 entry.State: StInitResolve success: true ev: {EvInfo Status: 0 TabletID: 234 Cookie: 0 CurrentLeader: [1:223:2142] CurrentLeaderTablet: [1:224:2143] CurrentGeneration: 1 CurrentStep: 0 Locked: false LockedFor: 0 Signature: { Size: 2 Signature: {{[1:24343667:0] : 3}, {[1:1099535971443:0] : 6}}}} 2025-06-25T15:04:12.671842Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:354: ApplyEntry leader tabletId: 234 followers: 0 2025-06-25T15:04:12.671870Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 1 selfDC 1 leaderDC 1 1:2:0 local 1 localDc 1 other 0 disallowed 0 tabletId: 234 followers: 0 countLeader 1 allowFollowers 0 winner: [1:223:2142] 2025-06-25T15:04:12.672816Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 123 entry.State: StNormal ev: {EvForward TabletID: 123 Ev: nullptr Flags: 1:2:0} 2025-06-25T15:04:12.672856Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 1 selfDC 1 leaderDC 1 1:2:0 local 1 localDc 1 other 0 disallowed 0 tabletId: 123 followers: 0 countLeader 1 allowFollowers 0 winner: [1:217:2138] 2025-06-25T15:04:12.672979Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 234 entry.State: StNormal ev: {EvForward TabletID: 234 Ev: nullptr Flags: 1:2:0} 2025-06-25T15:04:12.673006Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 1 selfDC 1 leaderDC 1 1:2:0 local 1 localDc 1 other 0 disallowed 0 tabletId: 234 followers: 0 countLeader 1 allowFollowers 0 winner: [1:223:2142] 2025-06-25T15:04:12.673145Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:583: Handle TEvNodeProblem nodeId: 1 max(problemEpoch): 2 2025-06-25T15:04:12.673176Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:429: Delayed invalidation of tabletId: 123 leader: [1:217:2138] by NodeId 2025-06-25T15:04:12.673223Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 123 entry.State: StProblemResolve ev: {EvForward TabletID: 123 Ev: nullptr Flags: 1:2:0} 2025-06-25T15:04:12.673375Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:610: Handle TEvInfo tabletId: 123 entry.State: StProblemResolve success: true ev: {EvInfo Status: 0 TabletID: 123 Cookie: 0 CurrentLeader: [2:233:2096] CurrentLeaderTablet: [2:234:2097] CurrentGeneration: 2 CurrentStep: 0 Locked: false LockedFor: 0 Signature: { Size: 2 Signature: {{[1:24343667:0] : 3}, {[1:1099535971443:0] : 6}}}} 2025-06-25T15:04:12.673406Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:354: ApplyEntry leader tabletId: 123 followers: 0 2025-06-25T15:04:12.673437Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 1 selfDC 1 leaderDC 2 1:2:0 local 0 localDc 0 other 1 disallowed 0 tabletId: 123 followers: 0 countLeader 1 allowFollowers 0 winner: [2:233:2096] 2025-06-25T15:04:12.673620Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:429: Delayed invalidation of tabletId: 234 leader: [1:223:2142] by NodeId 2025-06-25T15:04:12.673661Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 234 entry.State: StProblemResolve ev: {EvForward TabletID: 234 Ev: nullptr Flags: 1:2:0} 2025-06-25T15:04:12.673828Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:610: Handle TEvInfo tabletId: 234 entry.State: StProblemResolve success: true ev: {EvInfo Status: 0 TabletID: 234 Cookie: 0 CurrentLeader: [2:239:2098] CurrentLeaderTablet: [2:240:2099] CurrentGeneration: 2 CurrentStep: 0 Locked: false LockedFor: 0 Signature: { Size: 2 Signature: {{[1:24343667:0] : 3}, {[1:1099535971443:0] : 6}}}} 2025-06-25T15:04:12.673853Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:354: ApplyEntry leader tabletId: 234 followers: 0 2025-06-25T15:04:12.673896Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 1 selfDC 1 leaderDC 2 1:2:0 local 0 localDc 0 other 1 disallowed 0 tabletId: 234 followers: 0 countLeader 1 allowFollowers 0 winner: [2:239:2098] 2025-06-25T15:04:12.675072Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:583: Handle TEvNodeProblem nodeId: 2 max(problemEpoch): 2 2025-06-25T15:04:12.675128Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 123 entry.State: StNormal ev: {EvForward TabletID: 123 Ev: nullptr Flags: 1:2:0} 2025-06-25T15:04:12.675159Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 1 selfDC 1 leaderDC 2 1:2:0 local 0 localDc 0 other 1 disallowed 0 tabletId: 123 followers: 0 countLeader 1 allowFollowers 0 winner: [2:233:2096] 2025-06-25T15:04:12.675325Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 234 entry.State: StNormal ev: {EvForward TabletID: 234 Ev: nullptr Flags: 1:2:0} 2025-06-25T15:04:12.675355Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 1 selfDC 1 leaderDC 2 1:2:0 local 0 localDc 0 other 1 disallowed 0 tabletId: 234 followers: 0 countLeader 1 allowFollowers 0 winner: [2:239:2098] 2025-06-25T15:04:12.675514Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:583: Handle TEvNodeProblem nodeId: 2 max(problemEpoch): 4 2025-06-25T15:04:12.675557Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:429: Delayed invalidation of tabletId: 123 leader: [2:233:2096] by NodeId 2025-06-25T15:04:12.675601Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 123 entry.State: StProblemResolve ev: {EvForward TabletID: 123 Ev: nullptr Flags: 1:2:0} 2025-06-25T15:04:12.675767Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:610: Handle TEvInfo tabletId: 123 entry.State: StProblemResolve success: true ev: {EvInfo Status: 0 TabletID: 123 Cookie: 0 CurrentLeader: [3:251:2096] CurrentLeaderTablet: [3:252:2097] CurrentGeneration: 3 CurrentStep: 0 Locked: false LockedFor: 0 Signature: { Size: 2 Signature: {{[1:24343667:0] : 3}, {[1:1099535971443:0] : 6}}}} 2025-06-25T15:04:12.675811Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:354: ApplyEntry leader tabletId: 123 followers: 0 2025-06-25T15:04:12.675842Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 1 selfDC 1 leaderDC 3 1:2:0 local 0 localDc 0 other 1 disallowed 0 tabletId: 123 followers: 0 countLeader 1 allowFollowers 0 winner: [3:251:2096] 2025-06-25T15:04:12.676066Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 234 entry.State: StNormal ev: {EvForward TabletID: 234 Ev: nullptr Flags: 1:2:0} 2025-06-25T15:04:12.676115Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 1 selfDC 1 leaderDC 2 1:2:0 local 0 localDc 0 other 1 disallowed 0 tabletId: 234 followers: 0 countLeader 1 allowFollowers 0 winner: [2:239:2098] 2025-06-25T15:04:12.676328Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:583: Handle TEvNodeProblem nodeId: 2 max(problemEpoch): 5 2025-06-25T15:04:12.676377Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 123 entry.State: StNormal ev: {EvForward TabletID: 123 Ev: nullptr Flags: 1:2:0} 2025-06-25T15:04:12.676426Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 1 selfDC 1 leaderDC 3 1:2:0 local 0 localDc 0 other 1 disallowed 0 tabletId: 123 followers: 0 countLeader 1 allowFollowers 0 winner: [3:251:2096] 2025-06-25T15:04:12.676619Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:429: Delayed invalidation of tabletId: 234 leader: [2:239:2098] by NodeId 2025-06-25T15:04:12.676680Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 234 entry.State: StProblemResolve ev: {EvForward TabletID: 234 Ev: nullptr Flags: 1:2:0} 2025-06-25T15:04:12.676858Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:610: Handle TEvInfo tabletId: 234 entry.State: StProblemResolve success: true ev: {EvInfo Status: 0 TabletID: 234 Cookie: 0 CurrentLeader: [3:257:2098] CurrentLeaderTablet: [3:258:2099] CurrentGeneration: 3 CurrentStep: 0 Locked: false LockedFor: 0 Signature: { Size: 2 Signature: {{[1:24343667:0] : 3}, {[1:1099535971443:0] : 6}}}} 2025-06-25T15:04:12.676881Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:354: ApplyEntry leader tabletId: 234 followers: 0 2025-06-25T15:04:12.676919Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 1 selfDC 1 leaderDC 3 1:2:0 local 0 localDc 0 other 1 disallowed 0 tabletId: 234 followers: 0 countLeader 1 allowFollowers 0 winner: [3:257:2098] |93.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TTabletPipeTest::TestConnectReject [GOOD] >> KqpErrors::ProposeError [GOOD] >> KqpErrors::ProposeErrorEvWrite >> TResourceBrokerInstant::Test [GOOD] >> TResourceBrokerInstant::TestErrors ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TResourceBroker::TestOverusageDifferentResources [GOOD] Test command err: 2025-06-25T15:04:13.133161Z node 1 :RESOURCE_BROKER ERROR: resource_broker.cpp:1240: Configure result: Success: false Message: "task \'compaction1\' uses unknown queue \'queue_default1\'" 2025-06-25T15:04:13.133385Z node 1 :RESOURCE_BROKER ERROR: resource_broker.cpp:1240: Configure result: Success: false Message: "task \'unknown\' is required" 2025-06-25T15:04:13.133557Z node 1 :RESOURCE_BROKER ERROR: resource_broker.cpp:1240: Configure result: Success: false Message: "task \'unknown\' uses unknown queue \'queue_default\'" >> TResourceBrokerInstant::TestErrors [GOOD] >> TInterconnectTest::OldFormat >> TTabletPipeTest::TestSendBeforeBootTarget >> TTabletCountersAggregator::IntegralPercentileAggregationRegularCheckSingleTablet >> TInterconnectTest::OldFormat [GOOD] >> TInterconnectTest::OldFormatSuppressVersionCheckOnNew ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TResourceBrokerInstant::TestErrors [GOOD] Test command err: 2025-06-25T15:04:14.110394Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:1080: FinishTaskInstant failed for task 2: cannot finish unknown task >> TFlatMetrics::TimeSeriesAvg4 [GOOD] >> TFlatMetrics::TimeSeriesKV [GOOD] >> TTabletPipeTest::TestShutdown >> TTabletCountersAggregator::IntegralPercentileAggregationRegularCheckSingleTablet [GOOD] >> TTabletCountersAggregator::IntegralPercentileAggregationRegularNoOverflowCheck >> TTabletPipeTest::TestRewriteSameNode >> TTabletCountersAggregator::IntegralPercentileAggregationRegularNoOverflowCheck [GOOD] |93.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TFlatMetrics::TimeSeriesKV [GOOD] >> TInterconnectTest::OldFormatSuppressVersionCheckOnNew [GOOD] >> TInterconnectTest::OldFormatSuppressVersionCheckOnOld >> TTabletPipeTest::TestShutdown [GOOD] >> TPipeCacheTest::TestIdleRefresh >> TTabletPipeTest::TestRewriteSameNode [GOOD] |93.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TTabletCountersAggregator::IntegralPercentileAggregationRegularNoOverflowCheck [GOOD] >> TPipeCacheTest::TestIdleRefresh [GOOD] >> TPipeCacheTest::TestTabletNode >> TInterconnectTest::OldFormatSuppressVersionCheckOnOld [GOOD] >> TInterconnectTest::OldFormatSuppressVersionCheck |93.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TTabletPipeTest::TestShutdown [GOOD] |93.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TTabletPipeTest::TestRewriteSameNode [GOOD] >> TTabletCountersPercentile::WithoutZero [GOOD] >> TTabletLabeledCountersAggregator::DbAggregation >> TPipeCacheTest::TestTabletNode [GOOD] >> TInterconnectTest::OldFormatSuppressVersionCheck [GOOD] >> TTabletLabeledCountersAggregator::DbAggregation [GOOD] |93.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TPipeCacheTest::TestTabletNode [GOOD] >> TFlatMetrics::MaximumValue3 [GOOD] >> TFlatMetrics::MaximumValue4 [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/actorlib_impl/ut/unittest >> TInterconnectTest::OldFormatSuppressVersionCheck [GOOD] Test command err: 2025-06-25T15:04:14.837946Z node 4 :INTERCONNECT WARN: interconnect_handshake.cpp:501: Handshake [4:22:2057] [node 3] ICH09 Neither CompatibilityInfo nor VersionTag of the peer can be validated, accepting by default 2025-06-25T15:04:15.290160Z node 5 :INTERCONNECT WARN: interconnect_handshake.cpp:501: Handshake [5:20:2058] [node 6] ICH09 Neither CompatibilityInfo nor VersionTag of the peer can be validated, accepting by default 2025-06-25T15:04:15.766587Z node 8 :INTERCONNECT WARN: interconnect_handshake.cpp:501: Handshake [8:22:2057] [node 7] ICH09 Neither CompatibilityInfo nor VersionTag of the peer can be validated, accepting by default 2025-06-25T15:04:15.767852Z node 7 :INTERCONNECT WARN: interconnect_handshake.cpp:501: Handshake [7:20:2058] [node 8] ICH09 Neither CompatibilityInfo nor VersionTag of the peer can be validated, accepting by default >> TTabletPipeTest::TestSendBeforeBootTarget [GOOD] |93.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TTabletLabeledCountersAggregator::DbAggregation [GOOD] |93.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TFlatMetrics::MaximumValue4 [GOOD] |93.4%| [TA] $(B)/ydb/core/actorlib_impl/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> BootstrapperTest::LoneBootstrapper |93.4%| [TA] {RESULT} $(B)/ydb/core/actorlib_impl/ut/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TTabletPipeTest::TestSendBeforeBootTarget [GOOD] Test command err: Leader for TabletID 9437184 is [0:0:0] sender: [1:108:2057] recipient: [1:106:2138] IGNORE Leader for TabletID 9437184 is [0:0:0] sender: [1:108:2057] recipient: [1:106:2138] Leader for TabletID 9437184 is [1:112:2142] sender: [1:113:2057] recipient: [1:106:2138] Leader for TabletID 9437184 is [1:112:2142] sender: [1:132:2057] recipient: [1:14:2061] Leader for TabletID 9437185 is [0:0:0] sender: [1:161:2057] recipient: [1:159:2164] IGNORE Leader for TabletID 9437185 is [0:0:0] sender: [1:161:2057] recipient: [1:159:2164] Leader for TabletID 9437185 is [1:165:2168] sender: [1:166:2057] recipient: [1:159:2164] Leader for TabletID 9437185 is [1:165:2168] sender: [1:191:2057] recipient: [1:14:2061] >> TTabletPipeTest::TestTwoNodes >> TTabletPipeTest::TestSendAfterOpenUsingTabletWithoutAcceptor >> TTabletLabeledCountersAggregator::HeavyAggregation [GOOD] >> BootstrapperTest::LoneBootstrapper [GOOD] >> BootstrapperTest::MultipleBootstrappers >> TTabletPipeTest::TestSendAfterOpenUsingTabletWithoutAcceptor [GOOD] >> TBlockBlobStorageTest::DelayedErrorsNotIgnored >> TTabletPipeTest::TestTwoNodes [GOOD] >> TFlatMetrics::TimeSeriesAvg16x60 [GOOD] >> TFlatMetrics::TimeSeriesAvg16Signed [GOOD] >> TTabletPipeTest::TestKillClientBeforServerIdKnown |93.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TTabletPipeTest::TestSendAfterOpenUsingTabletWithoutAcceptor [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TTabletLabeledCountersAggregator::HeavyAggregation [GOOD] Test command err: 2025-06-25T15:04:12.558804Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1983: aggregator new request V2 [2:8:2055] 2025-06-25T15:04:12.558967Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1981: aggregator new request V2 Initiator [2:8:2055] self [2:9:2056] worker 0 2025-06-25T15:04:12.559009Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1981: aggregator new request V2 Initiator [2:8:2055] self [2:10:2057] worker 1 2025-06-25T15:04:12.559033Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1981: aggregator new request V2 Initiator [2:8:2055] self [2:11:2058] worker 2 2025-06-25T15:04:12.559050Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1981: aggregator new request V2 Initiator [2:8:2055] self [2:12:2059] worker 3 2025-06-25T15:04:12.559067Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1981: aggregator new request V2 Initiator [2:8:2055] self [2:13:2060] worker 4 2025-06-25T15:04:12.559101Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1981: aggregator new request V2 Initiator [2:8:2055] self [2:14:2061] worker 5 2025-06-25T15:04:12.559127Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1981: aggregator new request V2 Initiator [2:8:2055] self [2:15:2062] worker 6 2025-06-25T15:04:12.559142Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1981: aggregator new request V2 Initiator [2:8:2055] self [2:16:2063] worker 7 2025-06-25T15:04:12.559155Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1981: aggregator new request V2 Initiator [2:8:2055] self [2:17:2064] worker 8 2025-06-25T15:04:12.559188Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1981: aggregator new request V2 Initiator [2:8:2055] self [2:18:2065] worker 9 Sending message to [2:10:2057] from [2:8:2055] id 1 Sending message to [2:11:2058] from [2:8:2055] id 2 Sending message to [2:12:2059] from [2:8:2055] id 3 Sending message to [2:13:2060] from [2:8:2055] id 4 Sending message to [2:14:2061] from [2:8:2055] id 5 Sending message to [2:15:2062] from [2:8:2055] id 6 Sending message to [2:16:2063] from [2:8:2055] id 7 Sending message to [2:17:2064] from [2:8:2055] id 8 Sending message to [2:18:2065] from [2:8:2055] id 9 Sending message to [2:9:2056] from [2:8:2055] id 10 2025-06-25T15:04:13.065062Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1964: aggregator actor request to node 1 [2:10:2057] 2025-06-25T15:04:13.065121Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1964: aggregator actor request to node 2 [2:11:2058] 2025-06-25T15:04:13.065186Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1964: aggregator actor request to node 3 [2:12:2059] 2025-06-25T15:04:13.065213Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1964: aggregator actor request to node 4 [2:13:2060] 2025-06-25T15:04:13.065271Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1964: aggregator actor request to node 5 [2:14:2061] 2025-06-25T15:04:13.065299Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1964: aggregator actor request to node 6 [2:15:2062] 2025-06-25T15:04:13.065322Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1964: aggregator actor request to node 7 [2:16:2063] 2025-06-25T15:04:13.065380Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1964: aggregator actor request to node 8 [2:17:2064] 2025-06-25T15:04:13.065413Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1964: aggregator actor request to node 9 [2:18:2065] 2025-06-25T15:04:13.065650Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1964: aggregator actor request to node 10 [2:9:2056] 2025-06-25T15:04:13.065685Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 4 [2:13:2060] 2025-06-25T15:04:13.066589Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 4 [2:13:2060] 2025-06-25T15:04:13.090333Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2072: aggregator request processed [2:13:2060] Initiator [2:8:2055] 2025-06-25T15:04:13.103557Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 5 [2:14:2061] 2025-06-25T15:04:13.104457Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 5 [2:14:2061] 2025-06-25T15:04:13.124853Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2072: aggregator request processed [2:14:2061] Initiator [2:8:2055] 2025-06-25T15:04:13.138121Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 6 [2:15:2062] 2025-06-25T15:04:13.139017Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 6 [2:15:2062] 2025-06-25T15:04:13.157300Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2072: aggregator request processed [2:15:2062] Initiator [2:8:2055] 2025-06-25T15:04:13.169878Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 7 [2:16:2063] 2025-06-25T15:04:13.170730Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 7 [2:16:2063] 2025-06-25T15:04:13.189419Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2072: aggregator request processed [2:16:2063] Initiator [2:8:2055] 2025-06-25T15:04:13.204456Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 8 [2:17:2064] 2025-06-25T15:04:13.205415Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 8 [2:17:2064] 2025-06-25T15:04:13.224636Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2072: aggregator request processed [2:17:2064] Initiator [2:8:2055] 2025-06-25T15:04:13.237332Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 9 [2:18:2065] 2025-06-25T15:04:13.238285Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 9 [2:18:2065] 2025-06-25T15:04:13.256765Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2072: aggregator request processed [2:18:2065] Initiator [2:8:2055] 2025-06-25T15:04:13.268467Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 4 [2:8:2055] 2025-06-25T15:04:13.268558Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 4 [2:8:2055] 2025-06-25T15:04:13.271218Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 10 [2:9:2056] 2025-06-25T15:04:13.272062Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 10 [2:9:2056] 2025-06-25T15:04:13.290302Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2072: aggregator request processed [2:9:2056] Initiator [2:8:2055] 2025-06-25T15:04:13.302787Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 1 [2:10:2057] 2025-06-25T15:04:13.303723Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 1 [2:10:2057] 2025-06-25T15:04:13.324736Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2072: aggregator request processed [2:10:2057] Initiator [2:8:2055] 2025-06-25T15:04:13.338796Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 2 [2:11:2058] 2025-06-25T15:04:13.339818Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 2 [2:11:2058] 2025-06-25T15:04:13.358712Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2072: aggregator request processed [2:11:2058] Initiator [2:8:2055] 2025-06-25T15:04:13.370928Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 3 [2:12:2059] 2025-06-25T15:04:13.371808Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 3 [2:12:2059] 2025-06-25T15:04:13.391246Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2072: aggregator request processed [2:12:2059] Initiator [2:8:2055] 2025-06-25T15:04:13.405071Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 5 [2:8:2055] 2025-06-25T15:04:13.405216Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 5 [2:8:2055] 2025-06-25T15:04:13.410218Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 6 [2:8:2055] 2025-06-25T15:04:13.410338Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 6 [2:8:2055] 2025-06-25T15:04:13.414408Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 7 [2:8:2055] 2025-06-25T15:04:13.414507Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 7 [2:8:2055] 2025-06-25T15:04:13.418868Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 8 [2:8:2055] 2025-06-25T15:04:13.418973Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 8 [2:8:2055] 2025-06-25T15:04:13.422425Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 9 [2:8:2055] 2025-06-25T15:04:13.422510Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 9 [2:8:2055] 2025-06-25T15:04:13.427395Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 0 [2:8:2055] 2025-06-25T15:04:13.427500Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 0 [2:8:2055] 2025-06-25T15:04:13.432818Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 1 [2:8:2055] 2025-06-25T15:04:13.432935Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 1 [2:8:2055] 2025-06-25T15:04:13.437336Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 2 [2:8:2055] 2025-06-25T15:04:13.437468Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 2 [2:8:2055] 2025-06-25T15:04:13.442389Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 3 [2:8:2055] 2025-06-25T15:04:13.442522Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 3 [2:8:2055] 2025-06-25T15:04:13.447438Z node 2 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2072: aggregator request processed [2:8:2055] Initiator [2:7:2054] TEST 2 10 duration 0.983610s 2025-06-25T15:04:13.620088Z node 3 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1983: aggregator new request V2 [3:8:2055] 2025-06-25T15:04:13.620431Z node 3 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1981: aggregator new request V2 Initiator [3:8:2055] self [3:9:2056] worker 0 2025-06-25T15:04:13.620467Z node 3 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1981: aggregator new request V2 Initiator [3:8:2055] self [3:10:2057] worker 1 2025-06-25T15:04:13.620486Z node 3 :TABLET_AGGREGATOR INFO: tablet_counters_ag ... ctor got response node 3 [3:8:2055] 2025-06-25T15:04:14.639140Z node 3 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 3 [3:8:2055] 2025-06-25T15:04:14.641990Z node 3 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 4 [3:8:2055] 2025-06-25T15:04:14.642091Z node 3 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 4 [3:8:2055] 2025-06-25T15:04:14.645134Z node 3 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 5 [3:8:2055] 2025-06-25T15:04:14.645214Z node 3 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 5 [3:8:2055] 2025-06-25T15:04:14.648272Z node 3 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2072: aggregator request processed [3:8:2055] Initiator [3:7:2054] TEST 2 20 duration 1.121384s 2025-06-25T15:04:14.898124Z node 4 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1983: aggregator new request V2 [4:8:2055] 2025-06-25T15:04:14.898223Z node 4 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1981: aggregator new request V2 Initiator [4:8:2055] self [4:9:2056] worker 0 Sending message to [4:9:2056] from [4:8:2055] id 1 Sending message to [4:9:2056] from [4:8:2055] id 2 Sending message to [4:9:2056] from [4:8:2055] id 3 Sending message to [4:9:2056] from [4:8:2055] id 4 Sending message to [4:9:2056] from [4:8:2055] id 5 Sending message to [4:9:2056] from [4:8:2055] id 6 Sending message to [4:9:2056] from [4:8:2055] id 7 Sending message to [4:9:2056] from [4:8:2055] id 8 Sending message to [4:9:2056] from [4:8:2055] id 9 Sending message to [4:9:2056] from [4:8:2055] id 10 2025-06-25T15:04:15.398236Z node 4 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1964: aggregator actor request to node 1 [4:9:2056] 2025-06-25T15:04:15.398278Z node 4 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1964: aggregator actor request to node 2 [4:9:2056] 2025-06-25T15:04:15.398304Z node 4 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1964: aggregator actor request to node 3 [4:9:2056] 2025-06-25T15:04:15.398317Z node 4 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1964: aggregator actor request to node 4 [4:9:2056] 2025-06-25T15:04:15.398364Z node 4 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1964: aggregator actor request to node 5 [4:9:2056] 2025-06-25T15:04:15.398386Z node 4 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1964: aggregator actor request to node 6 [4:9:2056] 2025-06-25T15:04:15.398404Z node 4 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1964: aggregator actor request to node 7 [4:9:2056] 2025-06-25T15:04:15.398422Z node 4 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1964: aggregator actor request to node 8 [4:9:2056] 2025-06-25T15:04:15.398439Z node 4 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1964: aggregator actor request to node 9 [4:9:2056] 2025-06-25T15:04:15.398460Z node 4 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1964: aggregator actor request to node 10 [4:9:2056] 2025-06-25T15:04:15.398640Z node 4 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 1 [4:9:2056] 2025-06-25T15:04:15.399456Z node 4 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 1 [4:9:2056] 2025-06-25T15:04:15.419481Z node 4 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 2 [4:9:2056] 2025-06-25T15:04:15.420386Z node 4 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 2 [4:9:2056] 2025-06-25T15:04:15.440174Z node 4 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 3 [4:9:2056] 2025-06-25T15:04:15.441447Z node 4 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 3 [4:9:2056] 2025-06-25T15:04:15.462911Z node 4 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 4 [4:9:2056] 2025-06-25T15:04:15.463965Z node 4 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 4 [4:9:2056] 2025-06-25T15:04:15.482556Z node 4 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 5 [4:9:2056] 2025-06-25T15:04:15.483397Z node 4 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 5 [4:9:2056] 2025-06-25T15:04:15.508521Z node 4 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 6 [4:9:2056] 2025-06-25T15:04:15.509459Z node 4 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 6 [4:9:2056] 2025-06-25T15:04:15.530097Z node 4 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 7 [4:9:2056] 2025-06-25T15:04:15.531071Z node 4 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 7 [4:9:2056] 2025-06-25T15:04:15.551542Z node 4 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 8 [4:9:2056] 2025-06-25T15:04:15.552889Z node 4 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 8 [4:9:2056] 2025-06-25T15:04:15.571617Z node 4 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 9 [4:9:2056] 2025-06-25T15:04:15.572444Z node 4 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 9 [4:9:2056] 2025-06-25T15:04:15.591230Z node 4 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 10 [4:9:2056] 2025-06-25T15:04:15.592095Z node 4 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 10 [4:9:2056] 2025-06-25T15:04:15.623951Z node 4 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2072: aggregator request processed [4:9:2056] Initiator [4:8:2055] 2025-06-25T15:04:15.790606Z node 4 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 0 [4:8:2055] 2025-06-25T15:04:15.791151Z node 4 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 0 [4:8:2055] 2025-06-25T15:04:15.822276Z node 4 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2072: aggregator request processed [4:8:2055] Initiator [4:7:2054] TEST 2 1 duration 1.053465s 2025-06-25T15:04:16.155190Z node 5 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1981: aggregator new request V2 Initiator [5:7:2054] self [5:8:2055] worker 0 Sending message to [5:8:2055] from [5:8:2055] id 1 Sending message to [5:8:2055] from [5:8:2055] id 2 Sending message to [5:8:2055] from [5:8:2055] id 3 Sending message to [5:8:2055] from [5:8:2055] id 4 Sending message to [5:8:2055] from [5:8:2055] id 5 Sending message to [5:8:2055] from [5:8:2055] id 6 Sending message to [5:8:2055] from [5:8:2055] id 7 Sending message to [5:8:2055] from [5:8:2055] id 8 Sending message to [5:8:2055] from [5:8:2055] id 9 Sending message to [5:8:2055] from [5:8:2055] id 10 2025-06-25T15:04:16.559677Z node 5 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1964: aggregator actor request to node 1 [5:8:2055] 2025-06-25T15:04:16.559743Z node 5 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1964: aggregator actor request to node 2 [5:8:2055] 2025-06-25T15:04:16.559760Z node 5 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1964: aggregator actor request to node 3 [5:8:2055] 2025-06-25T15:04:16.559777Z node 5 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1964: aggregator actor request to node 4 [5:8:2055] 2025-06-25T15:04:16.559791Z node 5 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1964: aggregator actor request to node 5 [5:8:2055] 2025-06-25T15:04:16.559848Z node 5 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1964: aggregator actor request to node 6 [5:8:2055] 2025-06-25T15:04:16.559870Z node 5 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1964: aggregator actor request to node 7 [5:8:2055] 2025-06-25T15:04:16.559891Z node 5 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1964: aggregator actor request to node 8 [5:8:2055] 2025-06-25T15:04:16.559911Z node 5 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1964: aggregator actor request to node 9 [5:8:2055] 2025-06-25T15:04:16.559933Z node 5 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:1964: aggregator actor request to node 10 [5:8:2055] 2025-06-25T15:04:16.560126Z node 5 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 1 [5:8:2055] 2025-06-25T15:04:16.561138Z node 5 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 1 [5:8:2055] 2025-06-25T15:04:16.580377Z node 5 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 2 [5:8:2055] 2025-06-25T15:04:16.581240Z node 5 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 2 [5:8:2055] 2025-06-25T15:04:16.601101Z node 5 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 3 [5:8:2055] 2025-06-25T15:04:16.601969Z node 5 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 3 [5:8:2055] 2025-06-25T15:04:16.622538Z node 5 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 4 [5:8:2055] 2025-06-25T15:04:16.623449Z node 5 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 4 [5:8:2055] 2025-06-25T15:04:16.643108Z node 5 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 5 [5:8:2055] 2025-06-25T15:04:16.644326Z node 5 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 5 [5:8:2055] 2025-06-25T15:04:16.670577Z node 5 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 6 [5:8:2055] 2025-06-25T15:04:16.671641Z node 5 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 6 [5:8:2055] 2025-06-25T15:04:16.691198Z node 5 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 7 [5:8:2055] 2025-06-25T15:04:16.692101Z node 5 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 7 [5:8:2055] 2025-06-25T15:04:16.711369Z node 5 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 8 [5:8:2055] 2025-06-25T15:04:16.712258Z node 5 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 8 [5:8:2055] 2025-06-25T15:04:16.732785Z node 5 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 9 [5:8:2055] 2025-06-25T15:04:16.734155Z node 5 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 9 [5:8:2055] 2025-06-25T15:04:16.760329Z node 5 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2045: aggregator actor got response node 10 [5:8:2055] 2025-06-25T15:04:16.761800Z node 5 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2050: aggregator actor merged response node 10 [5:8:2055] 2025-06-25T15:04:16.812915Z node 5 :TABLET_AGGREGATOR INFO: tablet_counters_aggregator.cpp:2072: aggregator request processed [5:8:2055] Initiator [5:7:2054] TEST 2 1 duration 0.901338s >> TFlatMetrics::TimeSeriesKV2 [GOOD] >> TPipeCacheTest::TestAutoConnect |93.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TTabletPipeTest::TestTwoNodes [GOOD] |93.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TFlatMetrics::TimeSeriesAvg16Signed [GOOD] >> BootstrapperTest::RestartUnavailableTablet >> TTabletPipeTest::TestKillClientBeforServerIdKnown [GOOD] >> TTabletPipeTest::TestInterconnectSession >> TTabletPipeTest::TestInterconnectSession [GOOD] >> BootstrapperTest::RestartUnavailableTablet [GOOD] >> BootstrapperTest::UnavailableStateStorage >> KqpLimits::TooBigQuery+useSink [GOOD] >> KqpLimits::TooBigKey+useSink >> test_s3.py::TestYdbS3TTL::test_s3[table_index_1__SYNC-pk_types3-all_types3-index3---SYNC] [GOOD] >> TResourceBrokerInstant::TestMerge >> TTabletLabeledCountersAggregator::Version3Aggregation >> TTabletResolver::TabletResolvePriority [GOOD] >> TPipeTrackerTest::TestShareTablet [GOOD] >> TPipeTrackerTest::TestIdempotentAttachDetach [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TTabletPipeTest::TestInterconnectSession [GOOD] Test command err: 2025-06-25T15:04:18.036481Z node 1 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:315: [9437185] Detach 2025-06-25T15:04:18.045971Z node 1 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:338: [9437185] Activate 2025-06-25T15:04:18.050127Z node 1 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:338: [9437185] Activate 2025-06-25T15:04:18.052088Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[9437185] ::Bootstrap [1:132:2156] 2025-06-25T15:04:18.052120Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[9437185] lookup [1:132:2156] 2025-06-25T15:04:18.052319Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:190: TClient[9437185] forward result local node, try to connect [1:132:2156] 2025-06-25T15:04:18.052353Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[9437185]::SendEvent [1:132:2156] 2025-06-25T15:04:18.052398Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:387: TClient[9437185] poison pill while connecting [1:132:2156] 2025-06-25T15:04:18.052418Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:498: TClient[9437185] connect failed [1:132:2156] 2025-06-25T15:04:18.052469Z node 1 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:291: [9437185] Accept Connect Originator# [1:132:2156] 2025-06-25T15:04:18.052583Z node 1 :PIPE_SERVER INFO: tablet_pipe_server.cpp:236: [9437185] Undelivered Target# [1:132:2156] Type# 269877249 Reason# ActorUnknown 2025-06-25T15:04:18.052691Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[9437185] ::Bootstrap [1:135:2158] 2025-06-25T15:04:18.052711Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[9437185] lookup [1:135:2158] 2025-06-25T15:04:18.052743Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:190: TClient[9437185] forward result local node, try to connect [1:135:2158] 2025-06-25T15:04:18.052768Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[9437185]::SendEvent [1:135:2158] 2025-06-25T15:04:18.052802Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:387: TClient[9437185] poison pill while connecting [1:135:2158] 2025-06-25T15:04:18.052815Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:498: TClient[9437185] connect failed [1:135:2158] 2025-06-25T15:04:18.052850Z node 1 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:291: [9437185] Accept Connect Originator# [1:135:2158] 2025-06-25T15:04:18.052936Z node 1 :PIPE_SERVER INFO: tablet_pipe_server.cpp:236: [9437185] Undelivered Target# [1:135:2158] Type# 269877249 Reason# ActorUnknown 2025-06-25T15:04:18.053004Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[9437185] ::Bootstrap [1:137:2160] 2025-06-25T15:04:18.053019Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[9437185] lookup [1:137:2160] 2025-06-25T15:04:18.053045Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:190: TClient[9437185] forward result local node, try to connect [1:137:2160] 2025-06-25T15:04:18.053059Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[9437185]::SendEvent [1:137:2160] 2025-06-25T15:04:18.053076Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:387: TClient[9437185] poison pill while connecting [1:137:2160] 2025-06-25T15:04:18.053087Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:498: TClient[9437185] connect failed [1:137:2160] 2025-06-25T15:04:18.053107Z node 1 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:291: [9437185] Accept Connect Originator# [1:137:2160] 2025-06-25T15:04:18.053152Z node 1 :PIPE_SERVER INFO: tablet_pipe_server.cpp:236: [9437185] Undelivered Target# [1:137:2160] Type# 269877249 Reason# ActorUnknown >> TTabletLabeledCountersAggregator::Version3Aggregation [GOOD] >> TTabletPipeTest::TestClientDisconnectAfterPipeOpen >> TResourceBrokerInstant::TestMerge [GOOD] >> TTabletCountersAggregator::ColumnShardCounters >> TPipeCacheTest::TestAutoConnect [GOOD] >> TTabletCountersAggregator::ColumnShardCounters [GOOD] |93.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TTabletResolver::TabletResolvePriority [GOOD] |93.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TPipeTrackerTest::TestIdempotentAttachDetach [GOOD] >> KqpErrors::ProposeResultLost_RwTx-UseSink [GOOD] >> BootstrapperTest::UnavailableStateStorage [GOOD] >> TBlockBlobStorageTest::DelayedErrorsNotIgnored [GOOD] >> TFlatMetrics::DecayingAverageAvg [GOOD] |93.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TPipeCacheTest::TestAutoConnect [GOOD] >> TTabletPipeTest::TestClientDisconnectAfterPipeOpen [GOOD] |93.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TTabletCountersAggregator::ColumnShardCounters [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> BootstrapperTest::UnavailableStateStorage [GOOD] Test command err: ... waiting for pipe to connect ... waiting for blocked connect attempt ... blocking NKikimr::TEvTabletPipe::TEvConnect from TABLET_PIPE_CLIENT to TABLET_ACTOR cookie 1 ... waiting for blocked connect attempt (done) ... disconnecting nodes 2 <-> 1 ... waiting for pipe to disconnect ... waiting for pipe to connect ... waiting for pipe to connect ... waiting for multiple state storage lookup attempts 2025-06-25T15:04:19.198927Z node 6 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:193: tablet: 9437184, type: Dummy, begin new cycle (lookup in state storage) ... disconnecting nodes 2 <-> 0 ({EvReplicaLookup TabletID: 9437184 Cookie: 0} for [4:3:2050]) ... blocking NKikimr::TEvStateStorage::TEvReplicaLookup from SS_PROXY_REQUEST to SS_REPLICA cookie 0 ... disconnecting nodes 2 <-> 0 ({EvReplicaLookup TabletID: 9437184 Cookie: 1} for [4:6:2053]) ... blocking NKikimr::TEvStateStorage::TEvReplicaLookup from SS_PROXY_REQUEST to SS_REPLICA cookie 1 ... disconnecting nodes 2 <-> 0 ({EvReplicaLookup TabletID: 9437184 Cookie: 2} for [4:9:2056]) ... blocking NKikimr::TEvStateStorage::TEvReplicaLookup from SS_PROXY_REQUEST to SS_REPLICA cookie 2 2025-06-25T15:04:19.199437Z node 6 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:215: tablet: 9437184, type: Dummy, lookup: ERROR, leader: [0:0:0] 2025-06-25T15:04:19.199476Z node 6 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:242: tablet: 9437184, type: Dummy, state storage unavailable, sleeping for 0.182540s 2025-06-25T15:04:19.367544Z node 6 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:193: tablet: 9437184, type: Dummy, begin new cycle (lookup in state storage) ... disconnecting nodes 2 <-> 0 ({EvReplicaLookup TabletID: 9437184 Cookie: 0} for [4:3:2050]) ... blocking NKikimr::TEvStateStorage::TEvReplicaLookup from SS_PROXY_REQUEST to SS_REPLICA cookie 0 ... disconnecting nodes 2 <-> 0 ({EvReplicaLookup TabletID: 9437184 Cookie: 1} for [4:6:2053]) ... blocking NKikimr::TEvStateStorage::TEvReplicaLookup from SS_PROXY_REQUEST to SS_REPLICA cookie 1 ... disconnecting nodes 2 <-> 0 ({EvReplicaLookup TabletID: 9437184 Cookie: 2} for [4:9:2056]) ... blocking NKikimr::TEvStateStorage::TEvReplicaLookup from SS_PROXY_REQUEST to SS_REPLICA cookie 2 ... waiting for multiple state storage lookup attempts (done) ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TTabletPipeTest::TestClientDisconnectAfterPipeOpen [GOOD] Test command err: { LabeledCountersByGroup { Group: "aba/caba/daba|man" LabeledCounter { Value: 13 AggregateFunc: EAF_SUM Type: CT_SIMPLE NameId: 0 } Delimiter: "|" } LabeledCountersByGroup { Group: "cons/aaa|1|aba/caba/daba|man" LabeledCounter { Value: 13 AggregateFunc: EAF_SUM Type: CT_SIMPLE NameId: 0 } Delimiter: "|" } CounterNames: "value1" } 2025-06-25T15:04:19.630881Z node 3 :PIPE_SERVER ERROR: tablet_pipe_server.cpp:228: [9437185] NodeDisconnected NodeId# 2 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TFlatMetrics::DecayingAverageAvg [GOOD] Test command err: ... waiting for all block results ... passing block result OK for [1:105:2137] ... blocking block result NO_GROUP for [1:106:2137] ... blocking block result NO_GROUP for [1:107:2137] ... blocking block result NO_GROUP for [1:108:2137] >> TTabletPipeTest::TestSendWithoutWaitOpen ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_kqp_errors/unittest >> KqpErrors::ProposeResultLost_RwTx-UseSink [GOOD] Test command err: 2025-06-25T15:04:08.757436Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:628:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T15:04:08.757959Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T15:04:08.758303Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T15:04:08.758640Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:625:2319], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T15:04:08.758823Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T15:04:08.758937Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001b3f/r3tmp/tmpMWyneF/pdisk_1.dat 2025-06-25T15:04:09.153658Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:04:09.301968Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:04:09.420980Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:04:09.421108Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:04:09.424706Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:04:09.424798Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:04:09.439540Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T15:04:09.440002Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:04:09.440330Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:04:09.735930Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:04:10.620553Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:1508:2913], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:04:10.620692Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:1519:2918], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:04:10.620795Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:04:10.630410Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:04:10.797369Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:04:10.798090Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:04:11.218874Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:1522:2921], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T15:04:11.418312Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:1660:3000] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:04:11.674485Z node 1 :KQP_EXECUTER TRACE: kqp_executer_impl.h:198: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: 01jykswb3r12d3eg8fcqdjv2zv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTNkNGJiLWU1MzA4ZTIxLTZiNjcyMDVjLWE0NTYxZTU3, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Bootstrap done, become ReadyState 2025-06-25T15:04:11.674702Z node 1 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:608: ActorId: [1:1686:2911] TxId: 281474976715660. Ctx: { TraceId: 01jykswb3r12d3eg8fcqdjv2zv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTNkNGJiLWU1MzA4ZTIxLTZiNjcyMDVjLWE0NTYxZTU3, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Executing physical tx, type: 2, stages: 1 2025-06-25T15:04:11.674800Z node 1 :KQP_EXECUTER DEBUG: kqp_tasks_graph.cpp:25: StageInfo: StageId #[0,0], InputsCount: 0, OutputsCount: 1 2025-06-25T15:04:11.674913Z node 1 :KQP_EXECUTER TRACE: kqp_executer_impl.h:623: ActorId: [1:1686:2911] TxId: 281474976715660. Ctx: { TraceId: 01jykswb3r12d3eg8fcqdjv2zv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTNkNGJiLWU1MzA4ZTIxLTZiNjcyMDVjLWE0NTYxZTU3, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Got request, become WaitResolveState 2025-06-25T15:04:11.675137Z node 1 :KQP_EXECUTER DEBUG: kqp_table_resolver.cpp:271: TxId: 281474976715660. Resolved key sets: 1 2025-06-25T15:04:11.675288Z node 1 :KQP_EXECUTER DEBUG: kqp_table_resolver.cpp:295: TxId: 281474976715660. Resolved key: { TableId: [OwnerId: 72057594046644480, LocalPathId: 2] Access: 2 SyncVersion: false Status: OkData Kind: KindRegularTable PartitionsCount: 4 DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } From: (Uint32 : NULL) IncFrom: 1 To: () IncTo: 0 } 2025-06-25T15:04:11.675431Z node 1 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:2035: ActorId: [1:1686:2911] TxId: 281474976715660. Ctx: { TraceId: 01jykswb3r12d3eg8fcqdjv2zv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTNkNGJiLWU1MzA4ZTIxLTZiNjcyMDVjLWE0NTYxZTU3, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Stage [0,0] AST: ( (return (lambda '() (block '( (let $1 (Just (Uint32 '1))) (let $2 (Just (Uint32 '2))) (let $3 (Just (Uint32 '3))) (return (Iterator (AsList (AsStruct '('"key" $1) '('"value" $1)) (AsStruct '('"key" $2) '('"value" $2)) (AsStruct '('"key" $3) '('"value" $3))))) )))) ) 2025-06-25T15:04:11.675553Z node 1 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:1512: ActorId: [1:1686:2911] TxId: 281474976715660. Ctx: { TraceId: 01jykswb3r12d3eg8fcqdjv2zv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTNkNGJiLWU1MzA4ZTIxLTZiNjcyMDVjLWE0NTYxZTU3, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Stage [0,0] create compute task: 1 2025-06-25T15:04:11.675655Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715660. Ctx: { TraceId: 01jykswb3r12d3eg8fcqdjv2zv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTNkNGJiLWU1MzA4ZTIxLTZiNjcyMDVjLWE0NTYxZTU3, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:04:11.675719Z node 1 :KQP_EXECUTER DEBUG: kqp_planner.cpp:562: TxId: 281474976715660. Ctx: { TraceId: 01jykswb3r12d3eg8fcqdjv2zv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTNkNGJiLWU1MzA4ZTIxLTZiNjcyMDVjLWE0NTYxZTU3, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Total tasks: 0, readonly: true, 0 scan tasks on 0 nodes, localComputeTasks: 0, snapshot: {0, 0} 2025-06-25T15:04:11.676001Z node 1 :KQP_EXECUTER DEBUG: kqp_planner.cpp:802: TxId: 281474976715660. Ctx: { TraceId: 01jykswb3r12d3eg8fcqdjv2zv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTNkNGJiLWU1MzA4ZTIxLTZiNjcyMDVjLWE0NTYxZTU3, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Collect channels updates for task: 1 at actor [1:1689:2911] 2025-06-25T15:04:11.676065Z node 1 :KQP_EXECUTER DEBUG: kqp_planner.cpp:794: TxId: 281474976715660. Ctx: { TraceId: 01jykswb3r12d3eg8fcqdjv2zv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTNkNGJiLWU1MzA4ZTIxLTZiNjcyMDVjLWE0NTYxZTU3, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Sending channels info to compute actor: [1:1689:2911], channels: 0 2025-06-25T15:04:11.676120Z node 1 :KQP_EXECUTER INFO: kqp_data_executer.cpp:2806: ActorId: [1:1686:2911] TxId: 281474976715660. Ctx: { TraceId: 01jykswb3r12d3eg8fcqdjv2zv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTNkNGJiLWU1MzA4ZTIxLTZiNjcyMDVjLWE0NTYxZTU3, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Total tasks: 1, readonly: 0, datashardTxs: 0, evWriteTxs: 0, topicTxs: 0, volatile: 0, immediate: 1, pending compute tasks0, useFollowers: 0 2025-06-25T15:04:11.676156Z node 1 :KQP_EXECUTER TRACE: kqp_data_executer.cpp:2809: ActorId: [1:1686:2911] TxId: 281474976715660. Ctx: { TraceId: 01jykswb3r12d3eg8fcqdjv2zv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTNkNGJiLWU1MzA4ZTIxLTZiNjcyMDVjLWE0NTYxZTU3, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Updating channels after the creation of compute actors 2025-06-25T15:04:11.676207Z node 1 :KQP_EXECUTER DEBUG: kqp_planner.cpp:802: TxId: 281474976715660. Ctx: { TraceId: 01jykswb3r12d3eg8fcqdjv2zv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTNkNGJiLWU1MzA4ZTIxLTZiNjcyMDVjLWE0NTYxZTU3, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Collect channels updates for task: 1 at actor [1:1689:2911] 2025-06-25T15:04:11.676247Z node 1 :KQP_EXECUTER DEBUG: kqp_planner.cpp:794: TxId: 281474976715660. Ctx: { TraceId: 01jykswb3r12d3eg8fcqdjv2zv, Database: , DatabaseId ... ode_id=3&id=N2NjM2I4YjctNzY3OWExNS00NzVhZjYzNC01NjhhNThlNg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Waiting for: CT 1, CA [3:1741:3043], 2025-06-25T15:04:19.003422Z node 3 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:156: ActorId: [3:1733:3043] TxId: 281474976715663. Ctx: { TraceId: 01jykswk6h9m8ev12de1ezfe70, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=N2NjM2I4YjctNzY3OWExNS00NzVhZjYzNC01NjhhNThlNg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, waiting for 1 compute actor(s) and 0 datashard(s): CA [3:1741:3043], 2025-06-25T15:04:19.003709Z node 3 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:791: ActorId: [3:1733:3043] TxId: 281474976715663. Ctx: { TraceId: 01jykswk6h9m8ev12de1ezfe70, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=N2NjM2I4YjctNzY3OWExNS00NzVhZjYzNC01NjhhNThlNg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Executing task: 1 on compute actor: [4:1743:2434] 2025-06-25T15:04:19.003746Z node 3 :KQP_EXECUTER DEBUG: kqp_planner.cpp:802: TxId: 281474976715663. Ctx: { TraceId: 01jykswk6h9m8ev12de1ezfe70, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=N2NjM2I4YjctNzY3OWExNS00NzVhZjYzNC01NjhhNThlNg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Collect channels updates for task: 1 at actor [4:1743:2434] 2025-06-25T15:04:19.003780Z node 3 :KQP_EXECUTER DEBUG: kqp_planner.cpp:838: TxId: 281474976715663. Ctx: { TraceId: 01jykswk6h9m8ev12de1ezfe70, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=N2NjM2I4YjctNzY3OWExNS00NzVhZjYzNC01NjhhNThlNg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Task: 1, output channelId: 1, dst task: 2, at actor [3:1741:3043] 2025-06-25T15:04:19.003817Z node 3 :KQP_EXECUTER DEBUG: kqp_planner.cpp:794: TxId: 281474976715663. Ctx: { TraceId: 01jykswk6h9m8ev12de1ezfe70, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=N2NjM2I4YjctNzY3OWExNS00NzVhZjYzNC01NjhhNThlNg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Sending channels info to compute actor: [3:1741:3043], channels: 1 2025-06-25T15:04:19.003866Z node 3 :KQP_EXECUTER DEBUG: kqp_planner.cpp:794: TxId: 281474976715663. Ctx: { TraceId: 01jykswk6h9m8ev12de1ezfe70, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=N2NjM2I4YjctNzY3OWExNS00NzVhZjYzNC01NjhhNThlNg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Sending channels info to compute actor: [4:1743:2434], channels: 1 2025-06-25T15:04:19.004098Z node 3 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:442: ActorId: [3:1733:3043] TxId: 281474976715663. Ctx: { TraceId: 01jykswk6h9m8ev12de1ezfe70, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=N2NjM2I4YjctNzY3OWExNS00NzVhZjYzNC01NjhhNThlNg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [4:1743:2434], task: 1, state: COMPUTE_STATE_EXECUTING, stats: { } 2025-06-25T15:04:19.004131Z node 3 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:668: ActorId: [3:1733:3043] TxId: 281474976715663. Ctx: { TraceId: 01jykswk6h9m8ev12de1ezfe70, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=N2NjM2I4YjctNzY3OWExNS00NzVhZjYzNC01NjhhNThlNg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Waiting for: CA [3:1741:3043], CA [4:1743:2434], 2025-06-25T15:04:19.004157Z node 3 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:156: ActorId: [3:1733:3043] TxId: 281474976715663. Ctx: { TraceId: 01jykswk6h9m8ev12de1ezfe70, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=N2NjM2I4YjctNzY3OWExNS00NzVhZjYzNC01NjhhNThlNg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, waiting for 2 compute actor(s) and 0 datashard(s): CA [3:1741:3043], CA [4:1743:2434], 2025-06-25T15:04:19.004592Z node 3 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:442: ActorId: [3:1733:3043] TxId: 281474976715663. Ctx: { TraceId: 01jykswk6h9m8ev12de1ezfe70, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=N2NjM2I4YjctNzY3OWExNS00NzVhZjYzNC01NjhhNThlNg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [4:1743:2434], task: 1, state: COMPUTE_STATE_EXECUTING, stats: { CpuTimeUs: 391 Tasks { TaskId: 1 CpuTimeUs: 262 ComputeCpuTimeUs: 8 BuildCpuTimeUs: 254 HostName: "ghrun-kqfvx6aroe" NodeId: 4 CreateTimeMs: 1750863859002 CurrentWaitInputTimeUs: 19 UpdateTimeMs: 1750863859003 } MaxMemoryUsage: 1048576 } 2025-06-25T15:04:19.004668Z node 3 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:668: ActorId: [3:1733:3043] TxId: 281474976715663. Ctx: { TraceId: 01jykswk6h9m8ev12de1ezfe70, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=N2NjM2I4YjctNzY3OWExNS00NzVhZjYzNC01NjhhNThlNg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Waiting for: CA [3:1741:3043], CA [4:1743:2434], 2025-06-25T15:04:19.004692Z node 3 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:156: ActorId: [3:1733:3043] TxId: 281474976715663. Ctx: { TraceId: 01jykswk6h9m8ev12de1ezfe70, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=N2NjM2I4YjctNzY3OWExNS00NzVhZjYzNC01NjhhNThlNg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, waiting for 2 compute actor(s) and 0 datashard(s): CA [3:1741:3043], CA [4:1743:2434], 2025-06-25T15:04:19.008839Z node 3 :KQP_EXECUTER TRACE: kqp_executer_impl.h:391: ActorId: [3:1733:3043] TxId: 281474976715663. Ctx: { TraceId: 01jykswk6h9m8ev12de1ezfe70, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=N2NjM2I4YjctNzY3OWExNS00NzVhZjYzNC01NjhhNThlNg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Got result, channelId: 2, shardId: 0, inputIndex: 0, from: [3:1742:3043], finished: 0 2025-06-25T15:04:19.008906Z node 3 :KQP_EXECUTER TRACE: kqp_executer_impl.h:394: ActorId: [3:1733:3043] TxId: 281474976715663. Ctx: { TraceId: 01jykswk6h9m8ev12de1ezfe70, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=N2NjM2I4YjctNzY3OWExNS00NzVhZjYzNC01NjhhNThlNg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Send ack to channelId: 2, seqNo: 1, to: [3:1742:3043] 2025-06-25T15:04:19.012163Z node 3 :KQP_EXECUTER TRACE: kqp_executer_impl.h:391: ActorId: [3:1733:3043] TxId: 281474976715663. Ctx: { TraceId: 01jykswk6h9m8ev12de1ezfe70, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=N2NjM2I4YjctNzY3OWExNS00NzVhZjYzNC01NjhhNThlNg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Got result, channelId: 2, shardId: 0, inputIndex: 0, from: [3:1742:3043], finished: 1 2025-06-25T15:04:19.012194Z node 3 :KQP_EXECUTER TRACE: kqp_executer_impl.h:394: ActorId: [3:1733:3043] TxId: 281474976715663. Ctx: { TraceId: 01jykswk6h9m8ev12de1ezfe70, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=N2NjM2I4YjctNzY3OWExNS00NzVhZjYzNC01NjhhNThlNg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Send ack to channelId: 2, seqNo: 2, to: [3:1742:3043] 2025-06-25T15:04:19.012827Z node 3 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:442: ActorId: [3:1733:3043] TxId: 281474976715663. Ctx: { TraceId: 01jykswk6h9m8ev12de1ezfe70, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=N2NjM2I4YjctNzY3OWExNS00NzVhZjYzNC01NjhhNThlNg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [3:1741:3043], task: 2, state: COMPUTE_STATE_FINISHED, stats: { CpuTimeUs: 914 Tasks { TaskId: 2 StageId: 1 CpuTimeUs: 488 FinishTimeMs: 1750863859012 InputRows: 3 InputBytes: 12 OutputRows: 3 OutputBytes: 12 ResultRows: 3 ResultBytes: 12 ComputeCpuTimeUs: 118 BuildCpuTimeUs: 370 HostName: "ghrun-kqfvx6aroe" NodeId: 3 CreateTimeMs: 1750863859001 UpdateTimeMs: 1750863859012 } MaxMemoryUsage: 1048576 } 2025-06-25T15:04:19.012897Z node 3 :KQP_EXECUTER INFO: kqp_planner.cpp:697: TxId: 281474976715663. Ctx: { TraceId: 01jykswk6h9m8ev12de1ezfe70, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=N2NjM2I4YjctNzY3OWExNS00NzVhZjYzNC01NjhhNThlNg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Compute actor has finished execution: [3:1741:3043] 2025-06-25T15:04:19.012944Z node 3 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:668: ActorId: [3:1733:3043] TxId: 281474976715663. Ctx: { TraceId: 01jykswk6h9m8ev12de1ezfe70, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=N2NjM2I4YjctNzY3OWExNS00NzVhZjYzNC01NjhhNThlNg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Waiting for: CA [4:1743:2434], 2025-06-25T15:04:19.012965Z node 3 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:156: ActorId: [3:1733:3043] TxId: 281474976715663. Ctx: { TraceId: 01jykswk6h9m8ev12de1ezfe70, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=N2NjM2I4YjctNzY3OWExNS00NzVhZjYzNC01NjhhNThlNg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, waiting for 1 compute actor(s) and 0 datashard(s): CA [4:1743:2434], 2025-06-25T15:04:19.013220Z node 3 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:442: ActorId: [3:1733:3043] TxId: 281474976715663. Ctx: { TraceId: 01jykswk6h9m8ev12de1ezfe70, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=N2NjM2I4YjctNzY3OWExNS00NzVhZjYzNC01NjhhNThlNg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [4:1743:2434], task: 1, state: COMPUTE_STATE_FINISHED, stats: { CpuTimeUs: 980 DurationUs: 5000 Tasks { TaskId: 1 CpuTimeUs: 353 FinishTimeMs: 1750863859012 OutputRows: 3 OutputBytes: 12 Tables { TablePath: "/Root/table-1" ReadRows: 3 ReadBytes: 24 AffectedPartitions: 4 } IngressRows: 3 ComputeCpuTimeUs: 99 BuildCpuTimeUs: 254 WaitInputTimeUs: 3871 HostName: "ghrun-kqfvx6aroe" NodeId: 4 StartTimeMs: 1750863859007 CreateTimeMs: 1750863859002 UpdateTimeMs: 1750863859012 } MaxMemoryUsage: 1048576 } 2025-06-25T15:04:19.013263Z node 3 :KQP_EXECUTER INFO: kqp_planner.cpp:697: TxId: 281474976715663. Ctx: { TraceId: 01jykswk6h9m8ev12de1ezfe70, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=N2NjM2I4YjctNzY3OWExNS00NzVhZjYzNC01NjhhNThlNg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Compute actor has finished execution: [4:1743:2434] 2025-06-25T15:04:19.013369Z node 3 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:2188: ActorId: [3:1733:3043] TxId: 281474976715663. Ctx: { TraceId: 01jykswk6h9m8ev12de1ezfe70, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=N2NjM2I4YjctNzY3OWExNS00NzVhZjYzNC01NjhhNThlNg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. terminate execution. 2025-06-25T15:04:19.013427Z node 3 :KQP_EXECUTER TRACE: kqp_executer_impl.h:2202: ActorId: [3:1733:3043] TxId: 281474976715663. Ctx: { TraceId: 01jykswk6h9m8ev12de1ezfe70, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=N2NjM2I4YjctNzY3OWExNS00NzVhZjYzNC01NjhhNThlNg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Terminate, become ZombieState 2025-06-25T15:04:19.013463Z node 3 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:862: ActorId: [3:1733:3043] TxId: 281474976715663. Ctx: { TraceId: 01jykswk6h9m8ev12de1ezfe70, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=N2NjM2I4YjctNzY3OWExNS00NzVhZjYzNC01NjhhNThlNg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Resource usage for last stat interval: ComputeTime: 0.001894s ReadRows: 3 ReadBytes: 24 ru: 3 rate limiter was not found force flag: 1 { items { uint32_value: 1 } items { uint32_value: 1 } }, { items { uint32_value: 2 } items { uint32_value: 2 } }, { items { uint32_value: 3 } items { uint32_value: 3 } } >> TResourceBrokerConfig::UpdateTasks [GOOD] >> TResourceBrokerConfig::UpdateResourceLimit [GOOD] >> TTabletPipeTest::TestSendWithoutWaitOpen [GOOD] >> TTabletPipeTest::TestRebootUsingTabletWithoutAcceptor |93.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TTabletPipeTest::TestSendWithoutWaitOpen [GOOD] |93.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TResourceBrokerConfig::UpdateResourceLimit [GOOD] >> TTabletPipeTest::TestSendWithoutWaitOpenToWrongTablet >> TResourceBroker::TestCounters >> TTabletPipeTest::TestRebootUsingTabletWithoutAcceptor [GOOD] >> TResourceBroker::TestOverusage >> TTabletPipeTest::TestTwoNodesAndRebootOfProducer >> TTabletPipeTest::TestSendWithoutWaitOpenToWrongTablet [GOOD] >> TTabletPipeTest::TestPipeWithVersionInfo >> BootstrapperTest::MultipleBootstrappers [GOOD] >> TResourceBroker::TestCounters [GOOD] >> TResourceBroker::TestChangeTaskType ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TTabletPipeTest::TestRebootUsingTabletWithoutAcceptor [GOOD] Test command err: Leader for TabletID 9437184 is [0:0:0] sender: [1:112:2057] recipient: [1:108:2139] IGNORE Leader for TabletID 9437184 is [0:0:0] sender: [1:112:2057] recipient: [1:108:2139] Leader for TabletID 9437185 is [0:0:0] sender: [1:113:2057] recipient: [1:109:2140] IGNORE Leader for TabletID 9437185 is [0:0:0] sender: [1:113:2057] recipient: [1:109:2140] Leader for TabletID 9437184 is [1:120:2147] sender: [1:121:2057] recipient: [1:108:2139] Leader for TabletID 9437185 is [1:123:2149] sender: [1:125:2057] recipient: [1:109:2140] Leader for TabletID 9437184 is [1:120:2147] sender: [1:160:2057] recipient: [1:14:2061] Leader for TabletID 9437185 is [1:123:2149] sender: [1:162:2057] recipient: [1:14:2061] Leader for TabletID 9437185 is [1:123:2149] sender: [1:165:2057] recipient: [1:105:2138] Leader for TabletID 9437185 is [1:123:2149] sender: [1:167:2057] recipient: [1:14:2061] Leader for TabletID 9437185 is [1:123:2149] sender: [1:169:2057] recipient: [1:168:2178] Leader for TabletID 9437185 is [1:170:2179] sender: [1:171:2057] recipient: [1:168:2178] Leader for TabletID 9437185 is [1:170:2179] sender: [1:199:2057] recipient: [1:14:2061] Leader for TabletID 9437184 is [1:120:2147] sender: [1:202:2057] recipient: [1:104:2137] Leader for TabletID 9437184 is [1:120:2147] sender: [1:205:2057] recipient: [1:14:2061] Leader for TabletID 9437184 is [1:120:2147] sender: [1:206:2057] recipient: [1:204:2202] Leader for TabletID 9437184 is [1:207:2203] sender: [1:208:2057] recipient: [1:204:2202] Leader for TabletID 9437184 is [1:207:2203] sender: [1:237:2057] recipient: [1:14:2061] >> TResourceBroker::TestOverusage [GOOD] >> TResourceBroker::TestNotifyActorDied >> TTabletPipeTest::TestPipeWithVersionInfo [GOOD] >> KqpErrors::ProposeErrorEvWrite [GOOD] |93.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TTabletPipeTest::TestSendWithoutWaitOpenToWrongTablet [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> BootstrapperTest::MultipleBootstrappers [GOOD] Test command err: ... waiting for pipe to connect ... stopping current instance ... waiting for pipe to disconnect ... waiting for pipe to connect ... sleeping for 2 seconds 2025-06-25T15:04:17.469265Z node 3 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:193: tablet: 9437184, type: Dummy, begin new cycle (lookup in state storage) 2025-06-25T15:04:17.469318Z node 4 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:193: tablet: 9437184, type: Dummy, begin new cycle (lookup in state storage) 2025-06-25T15:04:17.469344Z node 5 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:193: tablet: 9437184, type: Dummy, begin new cycle (lookup in state storage) 2025-06-25T15:04:17.470041Z node 4 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:215: tablet: 9437184, type: Dummy, lookup: NODATA, leader: [0:0:0] 2025-06-25T15:04:17.470075Z node 4 :BOOTSTRAPPER INFO: bootstrapper.cpp:330: tablet:9437184, type: Dummy, begin new round, seed: 8427358873417017059 2025-06-25T15:04:17.470210Z node 3 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:215: tablet: 9437184, type: Dummy, lookup: NODATA, leader: [0:0:0] 2025-06-25T15:04:17.470226Z node 3 :BOOTSTRAPPER INFO: bootstrapper.cpp:330: tablet:9437184, type: Dummy, begin new round, seed: 314095936534775797 2025-06-25T15:04:17.470467Z node 5 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:215: tablet: 9437184, type: Dummy, lookup: NODATA, leader: [0:0:0] 2025-06-25T15:04:17.470488Z node 5 :BOOTSTRAPPER INFO: bootstrapper.cpp:330: tablet:9437184, type: Dummy, begin new round, seed: 4772764162469967008 2025-06-25T15:04:17.471159Z node 4 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:428: tablet: 9437184, type: Dummy, apply alien 5 state: UNKNOWN 2025-06-25T15:04:17.471294Z node 4 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:428: tablet: 9437184, type: Dummy, apply alien 5 state: FREE 2025-06-25T15:04:17.471410Z node 3 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:428: tablet: 9437184, type: Dummy, apply alien 4 state: FREE 2025-06-25T15:04:17.471471Z node 4 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:428: tablet: 9437184, type: Dummy, apply alien 3 state: FREE 2025-06-25T15:04:17.471491Z node 4 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:499: tablet: 9437184, type: Dummy, lost round, wait for 0.106295s 2025-06-25T15:04:17.471537Z node 5 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:428: tablet: 9437184, type: Dummy, apply alien 4 state: FREE 2025-06-25T15:04:17.471580Z node 3 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:428: tablet: 9437184, type: Dummy, apply alien 5 state: FREE 2025-06-25T15:04:17.471596Z node 3 :BOOTSTRAPPER NOTICE: bootstrapper.cpp:680: tablet: 9437184, type: Dummy, boot 2025-06-25T15:04:17.471733Z node 5 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:428: tablet: 9437184, type: Dummy, apply alien 3 state: FREE 2025-06-25T15:04:17.471750Z node 5 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:499: tablet: 9437184, type: Dummy, lost round, wait for 0.101463s 2025-06-25T15:04:17.669315Z node 5 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:193: tablet: 9437184, type: Dummy, begin new cycle (lookup in state storage) 2025-06-25T15:04:17.669796Z node 5 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:215: tablet: 9437184, type: Dummy, lookup: OK, leader: [3:287:2098] 2025-06-25T15:04:17.670141Z node 5 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:266: tablet: 9437184, type: Dummy, connect: OK 2025-06-25T15:04:17.670169Z node 5 :BOOTSTRAPPER INFO: bootstrapper.cpp:277: tablet: 9437184, type: Dummy, connected to leader, waiting 2025-06-25T15:04:17.680499Z node 4 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:193: tablet: 9437184, type: Dummy, begin new cycle (lookup in state storage) 2025-06-25T15:04:17.681198Z node 4 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:215: tablet: 9437184, type: Dummy, lookup: OK, leader: [3:287:2098] 2025-06-25T15:04:17.681745Z node 4 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:266: tablet: 9437184, type: Dummy, connect: OK 2025-06-25T15:04:17.681785Z node 4 :BOOTSTRAPPER INFO: bootstrapper.cpp:277: tablet: 9437184, type: Dummy, connected to leader, waiting ... waiting for pipe to connect ... tablet initially started on node 3 (idx 1) in gen 2 ... disconnecting other nodes ... sleeping for 2 seconds (tablet expected to survive) 2025-06-25T15:04:18.603636Z node 3 :PIPE_SERVER ERROR: tablet_pipe_server.cpp:228: [9437184] NodeDisconnected NodeId# 5 2025-06-25T15:04:18.603698Z node 3 :PIPE_SERVER ERROR: tablet_pipe_server.cpp:228: [9437184] NodeDisconnected NodeId# 4 2025-06-25T15:04:18.603774Z node 4 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:303: tablet: 9437184, type: Dummy, disconnected 2025-06-25T15:04:18.603818Z node 4 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:193: tablet: 9437184, type: Dummy, begin new cycle (lookup in state storage) 2025-06-25T15:04:18.603965Z node 5 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:303: tablet: 9437184, type: Dummy, disconnected 2025-06-25T15:04:18.603992Z node 5 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:193: tablet: 9437184, type: Dummy, begin new cycle (lookup in state storage) 2025-06-25T15:04:18.604530Z node 4 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:215: tablet: 9437184, type: Dummy, lookup: OK, leader: [3:287:2098] 2025-06-25T15:04:18.604750Z node 5 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:215: tablet: 9437184, type: Dummy, lookup: OK, leader: [3:287:2098] 2025-06-25T15:04:18.605334Z node 4 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:266: tablet: 9437184, type: Dummy, connect: OK 2025-06-25T15:04:18.605371Z node 4 :BOOTSTRAPPER INFO: bootstrapper.cpp:277: tablet: 9437184, type: Dummy, connected to leader, waiting 2025-06-25T15:04:18.605416Z node 5 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:266: tablet: 9437184, type: Dummy, connect: OK 2025-06-25T15:04:18.605431Z node 5 :BOOTSTRAPPER INFO: bootstrapper.cpp:277: tablet: 9437184, type: Dummy, connected to leader, waiting ... disconnecting other nodes (new tablet connections fail) ... sleeping for 2 seconds (tablet expected to survive) 2025-06-25T15:04:19.382767Z node 3 :PIPE_SERVER ERROR: tablet_pipe_server.cpp:228: [9437184] NodeDisconnected NodeId# 4 2025-06-25T15:04:19.382833Z node 3 :PIPE_SERVER ERROR: tablet_pipe_server.cpp:228: [9437184] NodeDisconnected NodeId# 5 2025-06-25T15:04:19.382908Z node 4 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:303: tablet: 9437184, type: Dummy, disconnected 2025-06-25T15:04:19.382943Z node 4 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:193: tablet: 9437184, type: Dummy, begin new cycle (lookup in state storage) 2025-06-25T15:04:19.382984Z node 5 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:303: tablet: 9437184, type: Dummy, disconnected 2025-06-25T15:04:19.383001Z node 5 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:193: tablet: 9437184, type: Dummy, begin new cycle (lookup in state storage) 2025-06-25T15:04:19.383529Z node 4 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:215: tablet: 9437184, type: Dummy, lookup: OK, leader: [3:287:2098] 2025-06-25T15:04:19.383743Z node 5 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:215: tablet: 9437184, type: Dummy, lookup: OK, leader: [3:287:2098] ... disconnecting nodes 1 <-> 2 (tablet connect attempt) ... blocking NKikimr::TEvTabletPipe::TEvConnect from TABLET_PIPE_CLIENT to TABLET_ACTOR cookie 1 2025-06-25T15:04:19.383970Z node 4 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:266: tablet: 9437184, type: Dummy, connect: ERROR 2025-06-25T15:04:19.384000Z node 4 :BOOTSTRAPPER INFO: bootstrapper.cpp:330: tablet:9437184, type: Dummy, begin new round, seed: 10053858333920509680 ... disconnecting nodes 1 <-> 3 (tablet connect attempt) ... blocking NKikimr::TEvTabletPipe::TEvConnect from TABLET_PIPE_CLIENT to TABLET_ACTOR cookie 1 2025-06-25T15:04:19.384320Z node 5 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:266: tablet: 9437184, type: Dummy, connect: ERROR 2025-06-25T15:04:19.384340Z node 5 :BOOTSTRAPPER INFO: bootstrapper.cpp:330: tablet:9437184, type: Dummy, begin new round, seed: 11851482555838222794 2025-06-25T15:04:19.384424Z node 4 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:428: tablet: 9437184, type: Dummy, apply alien 5 state: UNKNOWN 2025-06-25T15:04:19.384489Z node 4 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:428: tablet: 9437184, type: Dummy, apply alien 3 state: OWNER 2025-06-25T15:04:19.384516Z node 4 :BOOTSTRAPPER INFO: bootstrapper.cpp:571: tablet: 9437184, type: Dummy, become watch on node 3 (owner) 2025-06-25T15:04:19.384677Z node 5 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:428: tablet: 9437184, type: Dummy, apply alien 4 state: WAITFOR 2025-06-25T15:04:19.384692Z node 5 :BOOTSTRAPPER INFO: bootstrapper.cpp:571: tablet: 9437184, type: Dummy, become watch on node 4 ... disconnect other nodes (new owner expected) ... sleeping for 2 seconds (new tablet expected to start once) 2025-06-25T15:04:20.144237Z node 5 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:643: tablet: 9437184, type: Dummy, disconnected from 3, round 16045690984833335029 2025-06-25T15:04:20.144331Z node 4 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:643: tablet: 9437184, type: Dummy, disconnected from 3, round 16045690984833335029 2025-06-25T15:04:20.144377Z node 4 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:193: tablet: 9437184, type: Dummy, begin new cycle (lookup in state storage) 2025-06-25T15:04:20.144595Z node 5 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:193: tablet: 9437184, type: Dummy, begin new cycle (lookup in state storage) 2025-06-25T15:04:20.145241Z node 4 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:215: tablet: 9437184, type: Dummy, lookup: OK, leader: [3:287:2098] 2025-06-25T15:04:20.145501Z node 5 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:215: tablet: 9437184, type: Dummy, lookup: OK, leader: [3:287:2098] ... disconnecting nodes 1 <-> 2 (tablet connect attempt) ... blocking NKikimr::TEvTabletPipe::TEvConnect from TABLET_PIPE_CLIENT to TABLET_ACTOR cookie 1 ... disconnecting nodes 1 <-> 3 (tablet connect attempt) ... blocking NKikimr::TEvTabletPipe::TEvConnect from TABLET_PIPE_CLIENT to TABLET_ACTOR cookie 1 2025-06-25T15:04:20.145974Z node 4 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:266: tablet: 9437184, type: Dummy, connect: ERROR 2025-06-25T15:04:20.146026Z node 4 :BOOTSTRAPPER INFO: bootstrapper.cpp:330: tablet:9437184, type: Dummy, begin new round, seed: 8470239763125230813 2025-06-25T15:04:20.146252Z node 5 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:266: tablet: 9437184, type: Dummy, connect: ERROR 2025-06-25T15:04:20.146283Z node 5 :BOOTSTRAPPER INFO: bootstrapper.cpp:330: tablet:9437184, type: Dummy, begin new round, seed: 6622044195218853944 ... disconnecting nodes 1 <-> 2 (bootstrap watch attempt) ... blocking NKikimr::TEvBootstrapper::TEvWatch from TABLET_BOOTSTRAPPER to TABLET_BOOTSTRAPPER cookie 16045690984833335031 ... disconnecting nodes 1 <-> 3 (bootstrap watch attempt) ... blocking NKikimr::TEvBootstrapper::TEvWatch from TABLET_BOOTSTRAPPER to TABLET_BOOTSTRAPPER cookie 16045690984833335031 2025-06-25T15:04:20.146790Z node 4 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:403: tablet: 9437184, type: Dummy, disconnected from 3, round 16045690984833335031 2025-06-25T15:04:20.146843Z node 4 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:428: tablet: 9437184, type: Dummy, apply alien 3 state: DISCONNECTED 2025-06-25T15:04:20.146994Z node 4 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:428: tablet: 9437184, type: Dummy, apply alien 5 state: FREE 2025-06-25T15:04:20.147037Z node 4 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:499: tablet: 9437184, type: Dummy, lost round, wait for 0.127990s 2025-06-25T15:04:20.147134Z node 5 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:403: tablet: 9437184, type: Dummy, disconnected from 3, round 16045690984833335031 2025-06-25T15:04:20.147159Z node 5 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:428: tablet: 9437184, type: Dummy, apply alien 3 state: DISCONNECTED 2025-06-25T15:04:20.147192Z node 5 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:428: tablet: 9437184, type: Dummy, apply alien 4 state: FREE 2025-06-25T15:04:20.147209Z node 5 :BOOTSTRAPPER NOTICE: bootstrapper.cpp:680: tablet: 9437184, type: Dummy, boot 2025-06-25T15:04:20.148978Z node 3 :BOOTSTRAPPER INFO: bootstrapper.cpp:715: tablet: 9437184, type: Dummy, tablet dead 2025-06-25T15:04:20.149058Z node 3 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:193: tablet: 9437184, type: Dummy, begin new cycle (lookup in state storage) 2025-06-25T15:04:20.152109Z node 3 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:215: tablet: 9437184, type: Dummy, lookup: OK, leader: [5:438:2098] 2025-06-25T15:04:20.164726Z node 3 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:266: tablet: 9437184, type: Dummy, connect: OK 2025-06-25T15:04:20.164771Z node 3 :BOOTSTRAPPER INFO: bootstrapper.cpp:277: tablet: 9437184, type: Dummy, connected to leader, waiting 2025-06-25T15:04:20.239637Z node 4 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:193: tablet: 9437184, type: Dummy, begin new cycle (lookup in state storage) 2025-06-25T15:04:20.240391Z node 4 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:215: tablet: 9437184, type: Dummy, lookup: OK, leader: [5:438:2098] 2025-06-25T15:04:20.240871Z node 4 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:266: tablet: 9437184, type: Dummy, connect: OK 2025-06-25T15:04:20.240909Z node 4 :BOOTSTRAPPER INFO: bootstrapper.cpp:277: tablet: 9437184, type: Dummy, connected to leader, waiting ... waiting for pipe to connect ... disconnecting nodes 1 <-> 0 (tablet connect attempt) ... blocking NKikimr::TEvTabletPipe::TEvConnect from TABLET_PIPE_CLIENT to cookie 1 >> TResourceBroker::TestChangeTaskType [GOOD] >> TTabletPipeTest::TestTwoNodesAndRebootOfProducer [GOOD] >> TResourceBroker::TestNotifyActorDied [GOOD] |93.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TTabletPipeTest::TestPipeWithVersionInfo [GOOD] |93.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TResourceBroker::TestChangeTaskType [GOOD] |93.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TTabletPipeTest::TestTwoNodesAndRebootOfProducer [GOOD] |93.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TResourceBroker::TestNotifyActorDied [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_kqp_errors/unittest >> KqpErrors::ProposeErrorEvWrite [GOOD] Test command err: 2025-06-25T15:04:08.986593Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:628:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T15:04:08.987000Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T15:04:08.987271Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T15:04:08.987501Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:625:2319], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T15:04:08.987626Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T15:04:08.987700Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001b4b/r3tmp/tmprvyeYa/pdisk_1.dat 2025-06-25T15:04:09.257156Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:04:09.366861Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:04:09.483741Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:04:09.483845Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:04:09.486781Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:04:09.486854Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:04:09.500093Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T15:04:09.500476Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:04:09.500733Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:04:09.787031Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:04:10.635230Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:1508:2913], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:04:10.635324Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:1519:2918], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:04:10.635375Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:04:10.639476Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:04:10.797032Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:04:10.798149Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:04:11.220873Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:1522:2921], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T15:04:11.418459Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:1660:3000] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:04:11.675929Z node 1 :KQP_EXECUTER DEBUG: kqp_literal_executer.cpp:96: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: , Database: , DatabaseId: , SessionId: , CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. Begin literal execution. Operation timeout: 0.000000s, cancelAfter: (empty maybe) 2025-06-25T15:04:11.675998Z node 1 :KQP_EXECUTER DEBUG: kqp_literal_executer.cpp:125: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: , Database: , DatabaseId: , SessionId: , CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. Begin literal execution, txs: 1 2025-06-25T15:04:11.676056Z node 1 :KQP_EXECUTER DEBUG: kqp_tasks_graph.cpp:25: StageInfo: StageId #[0,0], InputsCount: 0, OutputsCount: 1 2025-06-25T15:04:11.676097Z node 1 :KQP_EXECUTER DEBUG: kqp_literal_executer.cpp:135: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: , Database: , DatabaseId: , SessionId: , CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. Stage [0,0] AST: ( (return (lambda '() (block '( (let $1 (Just (Uint32 '1))) (let $2 (Just (Uint32 '2))) (let $3 (Just (Uint32 '3))) (return (ToStream (Just (AsList (AsStruct '('"key" $1) '('"value" $1)) (AsStruct '('"key" $2) '('"value" $2)) (AsStruct '('"key" $3) '('"value" $3)))))) )))) ) 2025-06-25T15:04:11.676150Z node 1 :KQP_EXECUTER DEBUG: kqp_tasks_graph.cpp:241: Create result channelId: 1 from task: 1 with index: 0 2025-06-25T15:04:11.683932Z node 1 :KQP_EXECUTER DEBUG: kqp_literal_executer.cpp:275: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: , Database: , DatabaseId: , SessionId: , CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. Execution is complete, results: 1 2025-06-25T15:04:11.694952Z node 1 :KQP_EXECUTER DEBUG: kqp_literal_executer.cpp:96: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: 01jykswb495jrr1ag6pwskap72, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NDQxYmYyOTctM2ZmYjg3NDctNzk1NzY5MTUtOGJhMjNlZjY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Begin literal execution. Operation timeout: 299.421713s, cancelAfter: (empty maybe) 2025-06-25T15:04:11.695019Z node 1 :KQP_EXECUTER DEBUG: kqp_literal_executer.cpp:125: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: 01jykswb495jrr1ag6pwskap72, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NDQxYmYyOTctM2ZmYjg3NDctNzk1NzY5MTUtOGJhMjNlZjY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Begin literal execution, txs: 1 2025-06-25T15:04:11.695073Z node 1 :KQP_EXECUTER DEBUG: kqp_tasks_graph.cpp:25: StageInfo: StageId #[0,0], InputsCount: 0, OutputsCount: 1 2025-06-25T15:04:11.695117Z node 1 :KQP_EXECUTER DEBUG: kqp_literal_executer.cpp:135: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: 01jykswb495jrr1ag6pwskap72, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NDQxYmYyOTctM2ZmYjg3NDctNzk1NzY5MTUtOGJhMjNlZjY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Stage [0,0] AST: ( (return (lambda '() (block '( (let $1 (Just (Uint32 '1))) (let $2 (Just (Uint32 '2))) (let $3 (Just (Uint32 '3))) (return (ToStream (Just (AsList (AsStruct '('"key" $1) '('"value" $1)) (AsStruct '('"key" $2) '('"value" $2)) (AsStruct '('"key" $3) '('"value" $3)))))) )))) ) 2025-06-25T15:04:11.695193Z node 1 :KQP_EXECUTER DEBUG: kqp_tasks_graph.cpp:241: Create result channelId: 1 from task: 1 with index: 0 2025-06-25T15:04:11.695771Z node 1 :KQP_EXECUTER DEBUG: kqp_literal_executer.cpp:275: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: 01jykswb495jrr1ag6pwskap72, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NDQxYmYyOTctM2ZmYjg3NDctNzk1NzY5MTUtOGJhMjNlZjY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Execution is complete, results: 1 2025-06-25T15:04:11.695953Z node 1 :KQP_EXECUTER TRACE: kqp_executer_impl.h:198: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: 01jykswb495jrr1ag6pwskap72, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NDQxYmYyOTctM2ZmYjg3NDctNzk1NzY5MTUtOGJhMjNlZjY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Bootstrap done, become ReadyState 2025-06-25T15:04:11.696140Z node 1 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:608: ActorId: [1:1686:2911] TxId: 281474976715660. Ctx: { TraceId: 01jykswb495jrr1ag6pwskap72, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NDQxYmYyOTctM2ZmYjg3NDctNzk1NzY5MTUtOGJhMjNlZjY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Executing physical tx, type: 2, stages: 1 2025-06-25T15:04:11.696179Z node 1 :KQP_EXECUTER DEBUG: kqp_tasks_graph.cpp:25: StageInfo: StageId #[0,0], InputsCount: 0, OutputsCount: 1 2025-06-25T15:04:11.696321Z node 1 :KQP_EXECUTER TRACE: kqp_executer_impl.h:623: ActorId: [1:1686:2911] TxId: 281474976715660. Ctx: { TraceId: 01jykswb495jrr1ag6pwskap72, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NDQxYmYyOTctM2ZmYjg3NDctNzk1NzY5MTUtOGJhMjNlZjY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Got request, become WaitResolveState 2025-06-25T15:04:11.696604Z node 1 :KQP_EXECUTER DEBUG: kqp_table_resolver.cpp:271: TxId: 281474976715660. Resolved key sets: 1 2025-06-25T15:04:11.696765Z node 1 :KQP_EXECUTER DEBUG: kqp_table_resolver.cpp:295: TxId: 281474976715660. Resolved key: { TableId: [OwnerId: 72057594046644480, LocalPathId: 2] Access: 2 SyncVersion: false Status: OkData Kind: KindRegularTable PartitionsCount: 4 DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } From: (Uint32 : NULL) IncFrom: 1 To: () IncTo: 0 } 2025-06-25T15:04:11.696901Z node 1 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:2035: ActorId: [1:1686:2911] TxId: 281474976715660. Ctx: { TraceId: 01jykswb495jrr1ag6pwskap72, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NDQxYmYyOTctM2ZmYjg3NDctNzk1NzY5MTUtOGJhMjNlZjY=, CurrentExecutionId: , ... 4NzVkMmFkYg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:04:21.179954Z node 3 :KQP_EXECUTER DEBUG: kqp_planner.cpp:562: TxId: 281474976715672. Ctx: { TraceId: 01jykswnce7sepz5kw0c1xn5zs, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=ZjcwMTllZjEtMmZiY2E0M2EtNDA1ZmQxNy04NzVkMmFkYg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Total tasks: 0, readonly: true, 0 scan tasks on 0 nodes, localComputeTasks: 0, snapshot: {0, 0} 2025-06-25T15:04:21.180171Z node 3 :KQP_EXECUTER DEBUG: kqp_planner.cpp:802: TxId: 281474976715672. Ctx: { TraceId: 01jykswnce7sepz5kw0c1xn5zs, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=ZjcwMTllZjEtMmZiY2E0M2EtNDA1ZmQxNy04NzVkMmFkYg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Collect channels updates for task: 1 at actor [3:1948:3146] 2025-06-25T15:04:21.180236Z node 3 :KQP_EXECUTER DEBUG: kqp_planner.cpp:794: TxId: 281474976715672. Ctx: { TraceId: 01jykswnce7sepz5kw0c1xn5zs, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=ZjcwMTllZjEtMmZiY2E0M2EtNDA1ZmQxNy04NzVkMmFkYg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Sending channels info to compute actor: [3:1948:3146], channels: 0 2025-06-25T15:04:21.180295Z node 3 :KQP_EXECUTER INFO: kqp_data_executer.cpp:2806: ActorId: [3:1945:3146] TxId: 281474976715672. Ctx: { TraceId: 01jykswnce7sepz5kw0c1xn5zs, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=ZjcwMTllZjEtMmZiY2E0M2EtNDA1ZmQxNy04NzVkMmFkYg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Total tasks: 1, readonly: 0, datashardTxs: 0, evWriteTxs: 0, topicTxs: 0, volatile: 0, immediate: 1, pending compute tasks0, useFollowers: 0 2025-06-25T15:04:21.180351Z node 3 :KQP_EXECUTER TRACE: kqp_data_executer.cpp:2809: ActorId: [3:1945:3146] TxId: 281474976715672. Ctx: { TraceId: 01jykswnce7sepz5kw0c1xn5zs, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=ZjcwMTllZjEtMmZiY2E0M2EtNDA1ZmQxNy04NzVkMmFkYg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Updating channels after the creation of compute actors 2025-06-25T15:04:21.180387Z node 3 :KQP_EXECUTER DEBUG: kqp_planner.cpp:802: TxId: 281474976715672. Ctx: { TraceId: 01jykswnce7sepz5kw0c1xn5zs, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=ZjcwMTllZjEtMmZiY2E0M2EtNDA1ZmQxNy04NzVkMmFkYg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Collect channels updates for task: 1 at actor [3:1948:3146] 2025-06-25T15:04:21.180427Z node 3 :KQP_EXECUTER DEBUG: kqp_planner.cpp:794: TxId: 281474976715672. Ctx: { TraceId: 01jykswnce7sepz5kw0c1xn5zs, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=ZjcwMTllZjEtMmZiY2E0M2EtNDA1ZmQxNy04NzVkMmFkYg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Sending channels info to compute actor: [3:1948:3146], channels: 0 2025-06-25T15:04:21.180469Z node 3 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:668: ActorId: [3:1945:3146] TxId: 281474976715672. Ctx: { TraceId: 01jykswnce7sepz5kw0c1xn5zs, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=ZjcwMTllZjEtMmZiY2E0M2EtNDA1ZmQxNy04NzVkMmFkYg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Waiting for: CA [3:1948:3146], 2025-06-25T15:04:21.180513Z node 3 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:156: ActorId: [3:1945:3146] TxId: 281474976715672. Ctx: { TraceId: 01jykswnce7sepz5kw0c1xn5zs, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=ZjcwMTllZjEtMmZiY2E0M2EtNDA1ZmQxNy04NzVkMmFkYg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: WaitResolveState, waiting for 1 compute actor(s) and 0 datashard(s): CA [3:1948:3146], 2025-06-25T15:04:21.180573Z node 3 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:2368: ActorId: [3:1945:3146] TxId: 281474976715672. Ctx: { TraceId: 01jykswnce7sepz5kw0c1xn5zs, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=ZjcwMTllZjEtMmZiY2E0M2EtNDA1ZmQxNy04NzVkMmFkYg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: WaitResolveState, immediate tx, become ExecuteState 2025-06-25T15:04:21.181283Z node 3 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:442: ActorId: [3:1945:3146] TxId: 281474976715672. Ctx: { TraceId: 01jykswnce7sepz5kw0c1xn5zs, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=ZjcwMTllZjEtMmZiY2E0M2EtNDA1ZmQxNy04NzVkMmFkYg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [3:1948:3146], task: 1, state: COMPUTE_STATE_EXECUTING, stats: { } 2025-06-25T15:04:21.181326Z node 3 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:668: ActorId: [3:1945:3146] TxId: 281474976715672. Ctx: { TraceId: 01jykswnce7sepz5kw0c1xn5zs, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=ZjcwMTllZjEtMmZiY2E0M2EtNDA1ZmQxNy04NzVkMmFkYg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Waiting for: CA [3:1948:3146], 2025-06-25T15:04:21.181372Z node 3 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:156: ActorId: [3:1945:3146] TxId: 281474976715672. Ctx: { TraceId: 01jykswnce7sepz5kw0c1xn5zs, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=ZjcwMTllZjEtMmZiY2E0M2EtNDA1ZmQxNy04NzVkMmFkYg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, waiting for 1 compute actor(s) and 0 datashard(s): CA [3:1948:3146], 2025-06-25T15:04:21.181949Z node 3 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:442: ActorId: [3:1945:3146] TxId: 281474976715672. Ctx: { TraceId: 01jykswnce7sepz5kw0c1xn5zs, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=ZjcwMTllZjEtMmZiY2E0M2EtNDA1ZmQxNy04NzVkMmFkYg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [3:1948:3146], task: 1, state: COMPUTE_STATE_FINISHED, stats: { CpuTimeUs: 527 Tasks { TaskId: 1 CpuTimeUs: 108 FinishTimeMs: 1750863861181 EgressBytes: 10 EgressRows: 1 ComputeCpuTimeUs: 22 BuildCpuTimeUs: 86 HostName: "ghrun-kqfvx6aroe" NodeId: 3 CreateTimeMs: 1750863861180 UpdateTimeMs: 1750863861181 } MaxMemoryUsage: 1048576 } 2025-06-25T15:04:21.182052Z node 3 :KQP_EXECUTER INFO: kqp_planner.cpp:697: TxId: 281474976715672. Ctx: { TraceId: 01jykswnce7sepz5kw0c1xn5zs, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=ZjcwMTllZjEtMmZiY2E0M2EtNDA1ZmQxNy04NzVkMmFkYg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Compute actor has finished execution: [3:1948:3146] 2025-06-25T15:04:21.182121Z node 3 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:276: ActorId: [3:1945:3146] TxId: 281474976715672. Ctx: { TraceId: 01jykswnce7sepz5kw0c1xn5zs, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=ZjcwMTllZjEtMmZiY2E0M2EtNDA1ZmQxNy04NzVkMmFkYg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Send Commit to BufferActor=[3:1944:3146] 2025-06-25T15:04:21.182166Z node 3 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:862: ActorId: [3:1945:3146] TxId: 281474976715672. Ctx: { TraceId: 01jykswnce7sepz5kw0c1xn5zs, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=ZjcwMTllZjEtMmZiY2E0M2EtNDA1ZmQxNy04NzVkMmFkYg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Resource usage for last stat interval: ComputeTime: 0.000527s ReadRows: 0 ReadBytes: 0 ru: 1 rate limiter was not found force flag: 1 2025-06-25T15:04:21.195800Z node 3 :KQP_COMPUTE WARN: kqp_write_actor.cpp:715: SelfId: [3:1951:3146], Table: `/Root/table-1` ([72057594046644480:2:1]), SessionActorId: [3:1935:3146]Got OUT_OF_SPACE for table `/Root/table-1`. ShardID=72075186224037888, Sink=[3:1951:3146]. Ignored this error. 2025-06-25T15:04:21.195935Z node 3 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:3004: SelfId: [3:1944:3146], SessionActorId: [3:1935:3146], statusCode=OVERLOADED. Issue=
: Error: Tablet 72075186224037888 is out of space. Table `/Root/table-1`., code: 2006 . sessionActorId=[3:1935:3146]. isRollback=0 2025-06-25T15:04:21.196241Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:1895: SessionId: ydb://session/3?node_id=3&id=ZjcwMTllZjEtMmZiY2E0M2EtNDA1ZmQxNy04NzVkMmFkYg==, ActorId: [3:1935:3146], ActorState: ExecuteState, TraceId: 01jykswnce7sepz5kw0c1xn5zs, got TEvKqpBuffer::TEvError in ExecuteState, status: OVERLOADED send to: [3:1945:3146] from: [3:1944:3146] 2025-06-25T15:04:21.196386Z node 3 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:814: ActorId: [3:1945:3146] TxId: 281474976715672. Ctx: { TraceId: 01jykswnce7sepz5kw0c1xn5zs, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=ZjcwMTllZjEtMmZiY2E0M2EtNDA1ZmQxNy04NzVkMmFkYg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Got EvAbortExecution, status: OVERLOADED, message: {
: Error: Tablet 72075186224037888 is out of space. Table `/Root/table-1`., code: 2006 } 2025-06-25T15:04:21.196454Z node 3 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [3:1945:3146] TxId: 281474976715672. Ctx: { TraceId: 01jykswnce7sepz5kw0c1xn5zs, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=ZjcwMTllZjEtMmZiY2E0M2EtNDA1ZmQxNy04NzVkMmFkYg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. OVERLOADED: {
: Error: Tablet 72075186224037888 is out of space. Table `/Root/table-1`., code: 2006 } 2025-06-25T15:04:21.196508Z node 3 :KQP_EXECUTER INFO: kqp_executer_impl.h:1951: ActorId: [3:1945:3146] TxId: 281474976715672. Ctx: { TraceId: 01jykswnce7sepz5kw0c1xn5zs, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=ZjcwMTllZjEtMmZiY2E0M2EtNDA1ZmQxNy04NzVkMmFkYg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. task: 1, does not have the CA id yet or is already complete 2025-06-25T15:04:21.196652Z node 3 :KQP_EXECUTER TRACE: kqp_executer_impl.h:2062: ActorId: [3:1945:3146] TxId: 281474976715672. Ctx: { TraceId: 01jykswnce7sepz5kw0c1xn5zs, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=ZjcwMTllZjEtMmZiY2E0M2EtNDA1ZmQxNy04NzVkMmFkYg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ReplyErrorAndDie. Response: Status: OVERLOADED Issues { message: "Tablet 72075186224037888 is out of space. Table `/Root/table-1`." issue_code: 2006 severity: 1 } Result { Stats { CpuTimeUs: 527 } } , to ActorId: [3:1935:3146] 2025-06-25T15:04:21.196696Z node 3 :KQP_EXECUTER INFO: kqp_data_executer.cpp:2877: ActorId: [3:1945:3146] TxId: 281474976715672. Ctx: { TraceId: 01jykswnce7sepz5kw0c1xn5zs, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=ZjcwMTllZjEtMmZiY2E0M2EtNDA1ZmQxNy04NzVkMmFkYg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Shutdown immediately - nothing to wait 2025-06-25T15:04:21.196807Z node 3 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:2188: ActorId: [3:1945:3146] TxId: 281474976715672. Ctx: { TraceId: 01jykswnce7sepz5kw0c1xn5zs, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=ZjcwMTllZjEtMmZiY2E0M2EtNDA1ZmQxNy04NzVkMmFkYg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. terminate execution. 2025-06-25T15:04:21.196846Z node 3 :KQP_EXECUTER TRACE: kqp_executer_impl.h:2202: ActorId: [3:1945:3146] TxId: 281474976715672. Ctx: { TraceId: 01jykswnce7sepz5kw0c1xn5zs, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=ZjcwMTllZjEtMmZiY2E0M2EtNDA1ZmQxNy04NzVkMmFkYg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Terminate, become ZombieState 2025-06-25T15:04:21.196972Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=3&id=ZjcwMTllZjEtMmZiY2E0M2EtNDA1ZmQxNy04NzVkMmFkYg==, ActorId: [3:1935:3146], ActorState: ExecuteState, TraceId: 01jykswnce7sepz5kw0c1xn5zs, Create QueryResponse for error on request, msg: >> ReadIteratorExternalBlobs::ExtBlobsWithSpecificKeys [GOOD] >> ReadIteratorExternalBlobs::ExtBlobsWithDeletesInTheBeginning |93.5%| [TA] $(B)/ydb/core/tx/datashard/ut_kqp_errors/test-results/unittest/{meta.json ... results_accumulator.log} |93.5%| [TA] {RESULT} $(B)/ydb/core/tx/datashard/ut_kqp_errors/test-results/unittest/{meta.json ... results_accumulator.log} |93.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/s3/py3test >> test_s3.py::TestYdbS3TTL::test_s3[table_index_1__SYNC-pk_types3-all_types3-index3---SYNC] [GOOD] >> TColumnShardTestReadWrite::CompactionInGranule_PKDatetime >> TColumnShardTestReadWrite::WriteReadDuplicate >> TColumnShardTestReadWrite::WriteReadStandalone >> TColumnShardTestReadWrite::RebootWriteRead >> TColumnShardTestReadWrite::PortionInfoSize [GOOD] >> TColumnShardTestReadWrite::CompactionInGranule_PKUtf8_Reboot >> TColumnShardTestReadWrite::CompactionInGranule_PKString >> EvWrite::WriteWithSplit >> TColumnShardTestReadWrite::ReadWithProgramNoProjection >> TColumnShardTestReadWrite::WriteReadStandaloneExoticTypes >> TColumnShardTestReadWrite::WriteExoticTypes ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::PortionInfoSize [GOOD] Test command err: 208 136 28 48 32 24 16 24 56 >> KqpLimits::TooBigKey+useSink [GOOD] >> KqpLimits::TooBigKey-useSink >> Normalizers::CleanUnusedTablesNormalizer >> TColumnShardTestReadWrite::ReadStale >> TColumnShardTestReadWrite::ReadWithProgramNoProjection [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::ReadWithProgramNoProjection [GOOD] Test command err: 2025-06-25T15:04:24.430619Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:99;event=initialize_shard;step=OnActivateExecutor; 2025-06-25T15:04:24.456707Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:117;event=initialize_shard;step=initialize_tiring_finished; 2025-06-25T15:04:24.456905Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-25T15:04:24.463000Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T15:04:24.463180Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T15:04:24.463362Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T15:04:24.463459Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T15:04:24.463545Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T15:04:24.463638Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T15:04:24.463746Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T15:04:24.463867Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T15:04:24.463981Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T15:04:24.464065Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T15:04:24.464154Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T15:04:24.488448Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-25T15:04:24.488570Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-25T15:04:24.488608Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-25T15:04:24.488771Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:04:24.488907Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-25T15:04:24.488990Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-25T15:04:24.489029Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-25T15:04:24.489121Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-25T15:04:24.489173Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-25T15:04:24.489209Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-25T15:04:24.489246Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-25T15:04:24.489423Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:04:24.489474Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-25T15:04:24.489507Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-25T15:04:24.489532Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-25T15:04:24.489626Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-25T15:04:24.489677Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-25T15:04:24.489721Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-25T15:04:24.489747Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-25T15:04:24.489787Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-25T15:04:24.489817Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-25T15:04:24.489847Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-25T15:04:24.490016Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-25T15:04:24.490052Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-25T15:04:24.490087Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-25T15:04:24.490249Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-25T15:04:24.490292Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-25T15:04:24.490322Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-25T15:04:24.490441Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-25T15:04:24.490477Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-25T15:04:24.490517Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-25T15:04:24.490589Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-25T15:04:24.490670Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-25T15:04:24.490703Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-25T15:04:24.490728Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-25T15:04:24.490895Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=32; 2025-06-25T15:04:24.490963Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=30; 2025-06-25T15:04:24.491035Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=39; 2025-06-25T15:04:24.491105Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=34; 2025-06-25T15:04:24.491175Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-25T15:04:24.491238Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-25T15:04:24.491274Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-25T15:04:24.491324Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: table ... harding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,5,9;column_names=level,saved_at,timestamp;);;program_input=(column_ids=1,5,9;column_names=level,saved_at,timestamp;);;;); 2025-06-25T15:04:25.288914Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:278:2290];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=0;count=0;finished=1; 2025-06-25T15:04:25.289027Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:278:2290];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:234;stage=ready result;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=1,9;column_names=saved_at,timestamp;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,5,9;column_names=level,saved_at,timestamp;);;program_input=(column_ids=1,5,9;column_names=level,saved_at,timestamp;);;;);columns=2;rows=100; 2025-06-25T15:04:25.289080Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:278:2290];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:254;stage=data_format;batch_size=1200;num_rows=100;batch_columns=level,timestamp; 2025-06-25T15:04:25.289339Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:278:2290];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:270:2282];bytes=1200;rows=100;faults=0;finished=0;fault=0;schema=level: int32 timestamp: timestamp[us]; 2025-06-25T15:04:25.289475Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:278:2290];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:274;stage=finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=1,9;column_names=saved_at,timestamp;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,5,9;column_names=level,saved_at,timestamp;);;program_input=(column_ids=1,5,9;column_names=level,saved_at,timestamp;);;;); 2025-06-25T15:04:25.289573Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:278:2290];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=1,9;column_names=saved_at,timestamp;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,5,9;column_names=level,saved_at,timestamp;);;program_input=(column_ids=1,5,9;column_names=level,saved_at,timestamp;);;;); 2025-06-25T15:04:25.289693Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:278:2290];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=1,9;column_names=saved_at,timestamp;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,5,9;column_names=level,saved_at,timestamp;);;program_input=(column_ids=1,5,9;column_names=level,saved_at,timestamp;);;;); 2025-06-25T15:04:25.289820Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:278:2290];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-25T15:04:25.289921Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:278:2290];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=1,9;column_names=saved_at,timestamp;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,5,9;column_names=level,saved_at,timestamp;);;program_input=(column_ids=1,5,9;column_names=level,saved_at,timestamp;);;;); 2025-06-25T15:04:25.290018Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:278:2290];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=1,9;column_names=saved_at,timestamp;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,5,9;column_names=level,saved_at,timestamp;);;program_input=(column_ids=1,5,9;column_names=level,saved_at,timestamp;);;;); 2025-06-25T15:04:25.290062Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: actor.cpp:414: Scan [1:278:2290] finished for tablet 9437184 2025-06-25T15:04:25.290420Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:278:2290];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=actor.cpp:420;event=scan_finish;compute_actor_id=[1:270:2282];stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.003},{"events":["l_bootstrap"],"t":0.004},{"events":["f_processing","f_task_result"],"t":0.008},{"events":["f_ack","l_task_result"],"t":0.034},{"events":["l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.036}],"full":{"a":1750863865253840,"name":"_full_task","f":1750863865253840,"d_finished":0,"c":0,"l":1750863865290104,"d":36264},"events":[{"name":"bootstrap","f":1750863865254052,"d_finished":4616,"c":1,"l":1750863865258668,"d":4616},{"a":1750863865289803,"name":"ack","f":1750863865288700,"d_finished":1020,"c":1,"l":1750863865289720,"d":1321},{"a":1750863865289790,"name":"processing","f":1750863865262147,"d_finished":21586,"c":10,"l":1750863865289722,"d":21900},{"name":"ProduceResults","f":1750863865257112,"d_finished":2530,"c":13,"l":1750863865290052,"d":2530},{"a":1750863865290054,"name":"Finish","f":1750863865290054,"d_finished":0,"c":0,"l":1750863865290104,"d":50},{"name":"task_result","f":1750863865262162,"d_finished":20434,"c":9,"l":1750863865288606,"d":20434}],"id":"9437184::1"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=1,9;column_names=saved_at,timestamp;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,5,9;column_names=level,saved_at,timestamp;);;program_input=(column_ids=1,5,9;column_names=level,saved_at,timestamp;);;;); 2025-06-25T15:04:25.290484Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:278:2290];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:270:2282];bytes=0;rows=0;faults=0;finished=1;fault=0;schema=; 2025-06-25T15:04:25.290829Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:278:2290];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=actor.cpp:375;event=scan_finished;compute_actor_id=[1:270:2282];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.003},{"events":["l_bootstrap"],"t":0.004},{"events":["f_processing","f_task_result"],"t":0.008},{"events":["f_ack","l_task_result"],"t":0.034},{"events":["l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.036}],"full":{"a":1750863865253840,"name":"_full_task","f":1750863865253840,"d_finished":0,"c":0,"l":1750863865290523,"d":36683},"events":[{"name":"bootstrap","f":1750863865254052,"d_finished":4616,"c":1,"l":1750863865258668,"d":4616},{"a":1750863865289803,"name":"ack","f":1750863865288700,"d_finished":1020,"c":1,"l":1750863865289720,"d":1740},{"a":1750863865289790,"name":"processing","f":1750863865262147,"d_finished":21586,"c":10,"l":1750863865289722,"d":22319},{"name":"ProduceResults","f":1750863865257112,"d_finished":2530,"c":13,"l":1750863865290052,"d":2530},{"a":1750863865290054,"name":"Finish","f":1750863865290054,"d_finished":0,"c":0,"l":1750863865290523,"d":469},{"name":"task_result","f":1750863865262162,"d_finished":20434,"c":9,"l":1750863865288606,"d":20434}],"id":"9437184::1"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=1,9;column_names=saved_at,timestamp;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,5,9;column_names=level,saved_at,timestamp;);;program_input=(column_ids=1,5,9;column_names=level,saved_at,timestamp;);;;); 2025-06-25T15:04:25.290891Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:278:2290];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-06-25T15:04:25.252561Z;index_granules=0;index_portions=1;index_batches=1;schema_columns=3;filter_columns=0;additional_columns=0;compacted_portions_bytes=0;inserted_portions_bytes=8392;committed_portions_bytes=0;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=8392;selected_rows=0; 2025-06-25T15:04:25.290923Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:278:2290];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=read_context.h:192;event=scan_aborted;reason=unexpected on destructor; 2025-06-25T15:04:25.291171Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:278:2290];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=1,9;column_names=saved_at,timestamp;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,5,9;column_names=level,saved_at,timestamp;);;program_input=(column_ids=1,5,9;column_names=level,saved_at,timestamp;);;; 2025-06-25T15:04:25.291713Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Finished read cookie: 1 at tablet 9437184 2025-06-25T15:04:25.291899Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: EvScan txId: 100 scanId: 0 version: {1750863865390:100} readable: {1750863865390:max} at tablet 9437184 2025-06-25T15:04:25.291999Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TTxScan prepare txId: 100 scanId: 0 at tablet 9437184 2025-06-25T15:04:25.292234Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=100;scan_id=0;gen=0;table=;snapshot={1750863865390:100};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=program.cpp:33;event=parse_program;program=Command { Assign { Column { Id: 100 } Function { Id: 1 Arguments { Id: 1 } Arguments { Id: 9 } } } } Command { Filter { Predicate { Id: 100 } } } ; 2025-06-25T15:04:25.292333Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=100;scan_id=0;gen=0;table=;snapshot={1750863865390:100};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=program.cpp:102;parse_proto_program=Command { Assign { Column { Id: 100 } Function { Id: 1 Arguments { Id: 1 } Arguments { Id: 9 } } } } Command { Filter { Predicate { Id: 100 } } } ; 2025-06-25T15:04:25.292436Z node 1 :TX_COLUMNSHARD_SCAN WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=100;scan_id=0;gen=0;table=;snapshot={1750863865390:100};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=tx_scan.cpp:14;event=TTxScan failed;problem=cannot parse program;details=Can't parse SsaProgram: program has no projections; >> RetryPolicy::RetryWithBatching [GOOD] >> TColumnShardTestReadWrite::ReadStale [GOOD] >> EvWrite::WriteWithSplit [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::ReadStale [GOOD] Test command err: 2025-06-25T15:04:25.706111Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:99;event=initialize_shard;step=OnActivateExecutor; 2025-06-25T15:04:25.723843Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:117;event=initialize_shard;step=initialize_tiring_finished; 2025-06-25T15:04:25.724032Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-25T15:04:25.729462Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T15:04:25.729623Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T15:04:25.729805Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T15:04:25.729899Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T15:04:25.729993Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T15:04:25.730097Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T15:04:25.730171Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T15:04:25.730250Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T15:04:25.730333Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T15:04:25.730395Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T15:04:25.730451Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T15:04:25.748291Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-25T15:04:25.748421Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-25T15:04:25.748457Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-25T15:04:25.748572Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:04:25.748688Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-25T15:04:25.748747Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-25T15:04:25.748772Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-25T15:04:25.748863Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-25T15:04:25.748911Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-25T15:04:25.748951Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-25T15:04:25.748982Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-25T15:04:25.749101Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:04:25.749138Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-25T15:04:25.749178Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-25T15:04:25.749199Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-25T15:04:25.749251Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-25T15:04:25.749282Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-25T15:04:25.749306Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-25T15:04:25.749320Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-25T15:04:25.749344Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-25T15:04:25.749364Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-25T15:04:25.749383Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-25T15:04:25.749576Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-25T15:04:25.749636Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-25T15:04:25.749669Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-25T15:04:25.749863Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-25T15:04:25.749926Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-25T15:04:25.749969Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-25T15:04:25.750120Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-25T15:04:25.750199Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-25T15:04:25.750234Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-25T15:04:25.750321Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-25T15:04:25.750371Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-25T15:04:25.750399Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-25T15:04:25.750420Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-25T15:04:25.750576Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=42; 2025-06-25T15:04:25.750644Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=30; 2025-06-25T15:04:25.750697Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=27; 2025-06-25T15:04:25.750750Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=28; 2025-06-25T15:04:25.750817Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-25T15:04:25.750864Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-25T15:04:25.750887Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-25T15:04:25.750925Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: table ... 6-25T15:04:26.292303Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=10;fline=column_engine_logs.cpp:471;event=OnTieringModified;new_count_tierings=0; 2025-06-25T15:04:26.292413Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=10;fline=tables_manager.cpp:304;method=RegisterTable;path_id=9438184000001; 2025-06-25T15:04:26.292458Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=10;fline=column_engine.h:144;event=RegisterTable;path_id=9438184000001; 2025-06-25T15:04:26.296904Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=10;fline=column_engine_logs.cpp:463;event=OnTieringModified;path_id=9438184000001; 2025-06-25T15:04:26.297052Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=10;fline=tx_controller.cpp:215;event=finished_tx;tx_id=10; 2025-06-25T15:04:26.320215Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxPlanStep[2] complete at tablet 9437184 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=54320;columns=10; 2025-06-25T15:04:26.331906Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=9437184;event=TEvWrite;fline=manager.cpp:210;event=register_operation;operation_id=1;last=1; 2025-06-25T15:04:26.331962Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=9437184;event=TEvWrite;fline=write_queue.cpp:14;writing_size=54320;operation_id=af885bbc-51d511f0-a260086e-25ba6902;in_flight=1;size_in_flight=54320; 2025-06-25T15:04:26.344435Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=9437184;event=TEvWrite;scope=TBuildBatchesTask::DoExecute;tablet_id=9437184;parent_id=[1:128:2158];write_id=1;path_id={internal: 9438184000001, ss: 1};entity_id=3;size=11104;limit=10240;r_count=999;fline=column_info.h:139;sizes=5552,5552;s_splitted=5616,5720;r_splitted=499,500; 2025-06-25T15:04:26.345240Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=9437184;event=TEvWrite;scope=TBuildBatchesTask::DoExecute;tablet_id=9437184;parent_id=[1:128:2158];write_id=1;path_id={internal: 9438184000001, ss: 1};fline=write_actor.cpp:24;event=actor_created;tablet_id=9437184;debug=size=74272;count=11;actions=__DEFAULT,;waiting=1;; 2025-06-25T15:04:26.348979Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;fline=columnshard__write.cpp:105;writing_size=54320;event=data_write_finished;writing_id=af885bbc-51d511f0-a260086e-25ba6902; 2025-06-25T15:04:26.349146Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=4;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=constructor_meta.cpp:51;memory_size=86;data_size=62;sum=86;count=1; 2025-06-25T15:04:26.349202Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=4;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=constructor_meta.cpp:71;memory_size=558;data_size=550;sum=558;count=2;size_of_meta=136; 2025-06-25T15:04:26.349275Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=4;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=constructor_portion.cpp:40;memory_size=630;data_size=622;sum=630;count=1;size_of_portion=208; 2025-06-25T15:04:26.349933Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager on execute at tablet 9437184 Save Batch GenStep: 2:1 Blob count: 11 2025-06-25T15:04:26.350058Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=4;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=manager.h:156;event=add_by_insert_id;id=2;operation_id=1; 2025-06-25T15:04:26.361484Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Save Batch GenStep: 2:1 Blob count: 11 2025-06-25T15:04:26.373807Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: PlanStep 1750863866686 at tablet 9437184, mediator 0 2025-06-25T15:04:26.373866Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxPlanStep[6] execute at tablet 9437184 2025-06-25T15:04:26.374124Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=100;fline=abstract.h:83;progress_tx_id=100;lock_id=1;broken=0; 2025-06-25T15:04:26.374497Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=100;fline=tx_controller.cpp:215;event=finished_tx;tx_id=100; 2025-06-25T15:04:26.386136Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxPlanStep[6] complete at tablet 9437184 2025-06-25T15:04:26.386232Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Complete;fline=abstract.h:93;progress_tx_id=100;lock_id=1;broken=0; 2025-06-25T15:04:26.386471Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Complete;commit_tx_id=100;commit_lock_id=1;fline=manager.cpp:177;event=remove_by_insert_id;id=2;operation_id=1; 2025-06-25T15:04:26.386515Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Complete;commit_tx_id=100;commit_lock_id=1;fline=manager.cpp:180;event=remove_operation;operation_id=1; 2025-06-25T15:04:26.386859Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;fline=columnshard.cpp:236;event=TEvPrivate::TEvPeriodicWakeup::MANUAL;tablet_id=9437184; 2025-06-25T15:04:26.386912Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:442;event=EnqueueBackgroundActivities;periodic=0; 2025-06-25T15:04:26.386989Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=0; 2025-06-25T15:04:26.397899Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=0;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-25T15:04:26.397956Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:791;background=cleanup;skip_reason=no_changes; 2025-06-25T15:04:26.397998Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:820;background=cleanup;skip_reason=no_changes; 2025-06-25T15:04:26.398072Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:749;background=ttl;skip_reason=no_changes; 2025-06-25T15:04:26.398372Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: EvScan txId: 18446744073709551615 scanId: 1 version: {1750863506686:max} readable: {1750863866686:max} at tablet 9437184 2025-06-25T15:04:26.409758Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TTxScan prepare txId: 18446744073709551615 scanId: 1 at tablet 9437184 2025-06-25T15:04:26.409856Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: tx_id=18446744073709551615;scan_id=1;gen=0;table=test_olap_table;snapshot={1750863506686:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=constructor.cpp:18;event=overriden_columns;ids=1,2,3,4,5,6,7,8,9,10,4294967040,4294967041,4294967042,4294967043; 2025-06-25T15:04:26.409913Z node 1 :TX_COLUMNSHARD_SCAN WARN: log.cpp:784: tx_id=18446744073709551615;scan_id=1;gen=0;table=test_olap_table;snapshot={1750863506686:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=tx_scan.cpp:14;event=TTxScan failed;problem=cannot build metadata withno ranges;details=Snapshot too old: {1750863506686:max}. CS min read snapshot: {1750863566686:max}. now: 2025-06-25T15:04:26.409893Z; 2025-06-25T15:04:26.417958Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: EvScan txId: 18446744073709551615 scanId: 0 version: {1750863506686:max} readable: {1750863866686:max} at tablet 9437184 2025-06-25T15:04:26.429374Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TTxScan prepare txId: 18446744073709551615 scanId: 0 at tablet 9437184 2025-06-25T15:04:26.430695Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750863506686:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=program.cpp:33;event=parse_program;program=Command { Projection { Columns { Id: 1 } Columns { Id: 6 } } } ; 2025-06-25T15:04:26.430757Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750863506686:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=program.cpp:102;parse_proto_program=Command { Projection { Columns { Id: 1 } Columns { Id: 6 } } } ; 2025-06-25T15:04:26.431430Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750863506686:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=program.cpp:51;event=program_parsed;result={"edges":[{"owner_id":0,"inputs":[{"from":2},{"from":4}]},{"owner_id":2,"inputs":[{"from":5}]},{"owner_id":4,"inputs":[{"from":5}]},{"owner_id":5,"inputs":[{"from":6}]},{"owner_id":6,"inputs":[]}],"nodes":{"2":{"p":{"i":"1","p":{"address":{"name":"timestamp","id":1}},"o":"1","t":"AssembleOriginalData"},"w":9,"id":2},"6":{"p":{"p":{"data":[{"name":"timestamp","id":1},{"name":"message","id":6}]},"o":"0","t":"ReserveMemory"},"w":0,"id":6},"5":{"p":{"i":"0","p":{"data":[{"name":"timestamp","id":1},{"name":"message","id":6}]},"o":"1,6","t":"FetchOriginalData"},"w":4,"id":5},"4":{"p":{"i":"6","p":{"address":{"name":"message","id":6}},"o":"6","t":"AssembleOriginalData"},"w":9,"id":4},"0":{"p":{"i":"1,6","t":"Projection"},"w":18,"id":0}}}; 2025-06-25T15:04:26.431520Z node 1 :TX_COLUMNSHARD_SCAN WARN: log.cpp:784: tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750863506686:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=tx_scan.cpp:14;event=TTxScan failed;problem=cannot build metadata withno ranges;details=Snapshot too old: {1750863506686:max}. CS min read snapshot: {1750863566686:max}. now: 2025-06-25T15:04:26.431486Z; >> Normalizers::CleanUnusedTablesNormalizer [GOOD] >> TColumnShardTestReadWrite::CompactionSplitGranule_PKUInt32 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> EvWrite::WriteWithSplit [GOOD] Test command err: 2025-06-25T15:04:24.430619Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:99;event=initialize_shard;step=OnActivateExecutor; 2025-06-25T15:04:24.456707Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:117;event=initialize_shard;step=initialize_tiring_finished; 2025-06-25T15:04:24.456898Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-25T15:04:24.462993Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T15:04:24.463179Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T15:04:24.463369Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T15:04:24.463463Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T15:04:24.463550Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T15:04:24.463685Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T15:04:24.463789Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T15:04:24.463908Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T15:04:24.464010Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T15:04:24.464101Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T15:04:24.464199Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T15:04:24.488448Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-25T15:04:24.488574Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-25T15:04:24.488611Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-25T15:04:24.488767Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:04:24.488918Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-25T15:04:24.488997Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-25T15:04:24.489031Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-25T15:04:24.489105Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-25T15:04:24.489170Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-25T15:04:24.489208Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-25T15:04:24.489247Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-25T15:04:24.489395Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:04:24.489447Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-25T15:04:24.489481Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-25T15:04:24.489519Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-25T15:04:24.489598Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-25T15:04:24.489642Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-25T15:04:24.489680Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-25T15:04:24.489711Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-25T15:04:24.489762Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-25T15:04:24.489798Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-25T15:04:24.489826Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-25T15:04:24.490012Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-25T15:04:24.490049Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-25T15:04:24.490074Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-25T15:04:24.490231Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-25T15:04:24.490275Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-25T15:04:24.490299Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-25T15:04:24.490435Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-25T15:04:24.490475Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-25T15:04:24.490519Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-25T15:04:24.490582Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-25T15:04:24.490646Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-25T15:04:24.490685Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-25T15:04:24.490730Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-25T15:04:24.490894Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=31; 2025-06-25T15:04:24.490982Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=39; 2025-06-25T15:04:24.491051Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=30; 2025-06-25T15:04:24.491109Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=26; 2025-06-25T15:04:24.491181Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-25T15:04:24.491238Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-25T15:04:24.491278Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-25T15:04:24.491325Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: table ... n_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1;column_names=key;);;ff=(column_ids=1,2;column_names=field,key;);;program_input=(column_ids=1,2;column_names=field,key;);;;); 2025-06-25T15:04:26.617604Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:857:2869];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=0;count=0;finished=0; 2025-06-25T15:04:26.617699Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:857:2869];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:219;stage=no data is ready yet;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1;column_names=key;);;ff=(column_ids=1,2;column_names=field,key;);;program_input=(column_ids=1,2;column_names=field,key;);;;); 2025-06-25T15:04:26.617938Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:857:2869];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:85;event=TEvTaskProcessedResult; 2025-06-25T15:04:26.617969Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:857:2869];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=merge.cpp:75;event=DoApply;interval_idx=0; 2025-06-25T15:04:26.618009Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:857:2869];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=scanner.cpp:21;event=interval_result_received;interval_idx=0;intervalId=1; 2025-06-25T15:04:26.618064Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:857:2869];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=scanner.cpp:47;event=interval_result;interval_idx=0;count=1024;merger=0;interval_id=1; 2025-06-25T15:04:26.618100Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:857:2869];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=scanner.cpp:65;event=intervals_finished; 2025-06-25T15:04:26.618185Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:857:2869];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1;column_names=key;);;ff=(column_ids=1,2;column_names=field,key;);;program_input=(column_ids=1,2;column_names=field,key;);;;); 2025-06-25T15:04:26.618217Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:857:2869];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=1;count=1024;finished=1; 2025-06-25T15:04:26.618303Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:857:2869];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:234;stage=ready result;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1;column_names=key;);;ff=(column_ids=1,2;column_names=field,key;);;program_input=(column_ids=1,2;column_names=field,key;);;;);columns=2;rows=1024; 2025-06-25T15:04:26.618353Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:857:2869];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:254;stage=data_format;batch_size=8400896;num_rows=1024;batch_columns=key,field; 2025-06-25T15:04:26.618516Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:857:2869];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:851:2863];bytes=8400896;rows=1024;faults=0;finished=0;fault=0;schema=key: uint64 field: string; 2025-06-25T15:04:26.618609Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:857:2869];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:274;stage=finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1;column_names=key;);;ff=(column_ids=1,2;column_names=field,key;);;program_input=(column_ids=1,2;column_names=field,key;);;;); 2025-06-25T15:04:26.618677Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:857:2869];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1;column_names=key;);;ff=(column_ids=1,2;column_names=field,key;);;program_input=(column_ids=1,2;column_names=field,key;);;;); 2025-06-25T15:04:26.618751Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:857:2869];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1;column_names=key;);;ff=(column_ids=1,2;column_names=field,key;);;program_input=(column_ids=1,2;column_names=field,key;);;;); 2025-06-25T15:04:26.620640Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:857:2869];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-25T15:04:26.620732Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:857:2869];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1;column_names=key;);;ff=(column_ids=1,2;column_names=field,key;);;program_input=(column_ids=1,2;column_names=field,key;);;;); 2025-06-25T15:04:26.620818Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:857:2869];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1;column_names=key;);;ff=(column_ids=1,2;column_names=field,key;);;program_input=(column_ids=1,2;column_names=field,key;);;;); 2025-06-25T15:04:26.620859Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: actor.cpp:414: Scan [1:857:2869] finished for tablet 9437184 2025-06-25T15:04:26.621209Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:857:2869];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:420;event=scan_finish;compute_actor_id=[1:851:2863];stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.002},{"events":["l_bootstrap"],"t":0.003},{"events":["f_processing","f_task_result"],"t":0.007},{"events":["f_ack"],"t":0.439},{"events":["l_task_result"],"t":0.514},{"events":["l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.517}],"full":{"a":1750863866103803,"name":"_full_task","f":1750863866103803,"d_finished":0,"c":0,"l":1750863866620911,"d":517108},"events":[{"name":"bootstrap","f":1750863866104044,"d_finished":2840,"c":1,"l":1750863866106884,"d":2840},{"a":1750863866620621,"name":"ack","f":1750863866543761,"d_finished":72175,"c":2,"l":1750863866617727,"d":72465},{"a":1750863866620584,"name":"processing","f":1750863866111092,"d_finished":136425,"c":11,"l":1750863866618800,"d":136752},{"name":"ProduceResults","f":1750863866105963,"d_finished":74049,"c":15,"l":1750863866620846,"d":74049},{"a":1750863866620849,"name":"Finish","f":1750863866620849,"d_finished":0,"c":0,"l":1750863866620911,"d":62},{"name":"task_result","f":1750863866111107,"d_finished":64033,"c":9,"l":1750863866618799,"d":64033}],"id":"9437184::1"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1;column_names=key;);;ff=(column_ids=1,2;column_names=field,key;);;program_input=(column_ids=1,2;column_names=field,key;);;;); 2025-06-25T15:04:26.621270Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:857:2869];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:851:2863];bytes=0;rows=0;faults=0;finished=1;fault=0;schema=; 2025-06-25T15:04:26.621559Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:857:2869];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:375;event=scan_finished;compute_actor_id=[1:851:2863];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.002},{"events":["l_bootstrap"],"t":0.003},{"events":["f_processing","f_task_result"],"t":0.007},{"events":["f_ack"],"t":0.439},{"events":["l_task_result"],"t":0.514},{"events":["l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.517}],"full":{"a":1750863866103803,"name":"_full_task","f":1750863866103803,"d_finished":0,"c":0,"l":1750863866621308,"d":517505},"events":[{"name":"bootstrap","f":1750863866104044,"d_finished":2840,"c":1,"l":1750863866106884,"d":2840},{"a":1750863866620621,"name":"ack","f":1750863866543761,"d_finished":72175,"c":2,"l":1750863866617727,"d":72862},{"a":1750863866620584,"name":"processing","f":1750863866111092,"d_finished":136425,"c":11,"l":1750863866618800,"d":137149},{"name":"ProduceResults","f":1750863866105963,"d_finished":74049,"c":15,"l":1750863866620846,"d":74049},{"a":1750863866620849,"name":"Finish","f":1750863866620849,"d_finished":0,"c":0,"l":1750863866621308,"d":459},{"name":"task_result","f":1750863866111107,"d_finished":64033,"c":9,"l":1750863866618799,"d":64033}],"id":"9437184::1"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1;column_names=key;);;ff=(column_ids=1,2;column_names=field,key;);;program_input=(column_ids=1,2;column_names=field,key;);;;); 2025-06-25T15:04:26.621630Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:857:2869];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-06-25T15:04:26.102651Z;index_granules=0;index_portions=1;index_batches=2050;schema_columns=2;filter_columns=0;additional_columns=0;compacted_portions_bytes=0;inserted_portions_bytes=17137952;committed_portions_bytes=0;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=17137952;selected_rows=0; 2025-06-25T15:04:26.621673Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:857:2869];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=read_context.h:192;event=scan_aborted;reason=unexpected on destructor; 2025-06-25T15:04:26.621856Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:857:2869];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1;column_names=key;);;ff=(column_ids=1,2;column_names=field,key;);;program_input=(column_ids=1,2;column_names=field,key;);;; ------- [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/src/client/persqueue_public/ut/unittest >> RetryPolicy::RetryWithBatching [GOOD] Test command err: 2025-06-25T14:59:21.769340Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.769381Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.769416Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-25T14:59:21.769762Z :ERROR: [db] [sessionid] [cluster] Got error. Status: INTERNAL_ERROR. Description: 2025-06-25T14:59:21.769795Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.769812Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.770787Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.005721s 2025-06-25T14:59:21.771212Z :ERROR: [db] [sessionid] [cluster] Got error. Status: INTERNAL_ERROR. Description: 2025-06-25T14:59:21.771238Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.771256Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.771289Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.006091s 2025-06-25T14:59:21.771613Z :ERROR: [db] [sessionid] [cluster] Got error. Status: INTERNAL_ERROR. Description: 2025-06-25T14:59:21.771658Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.771670Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T14:59:21.771724Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.005570s 2025-06-25T14:59:21.783921Z :TWriteSession_TestPolicy INFO: Random seed for debugging is 1750863561783894 2025-06-25T14:59:22.163948Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901740763045957:2191];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:59:22.164072Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T14:59:22.214014Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519901739612840023:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:59:22.214063Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00116b/r3tmp/tmpK3MttL/pdisk_1.dat 2025-06-25T14:59:22.435163Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-25T14:59:22.434642Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-25T14:59:22.660507Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:59:22.682939Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:59:22.683063Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:59:22.683211Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:59:22.683274Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:59:22.689361Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T14:59:22.689721Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T14:59:22.691197Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 2843, node 1 2025-06-25T14:59:22.881602Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/yft8/00116b/r3tmp/yandexovuQdL.tmp 2025-06-25T14:59:22.881631Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/yft8/00116b/r3tmp/yandexovuQdL.tmp 2025-06-25T14:59:22.881758Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/yft8/00116b/r3tmp/yandexovuQdL.tmp 2025-06-25T14:59:22.882060Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T14:59:23.108875Z INFO: TTestServer started on Port 9925 GrpcPort 2843 2025-06-25T14:59:23.162258Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:59:23.230869Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:9925 PQClient connected to localhost:2843 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:59:23.395453Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... waiting... waiting... 2025-06-25T14:59:25.063108Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901753647948663:2298], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:59:25.063239Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:59:25.063477Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901753647948678:2301], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:59:25.066466Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710661:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T14:59:25.086764Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519901753647948680:2302], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710661 completed, doublechecking } 2025-06-25T14:59:25.178548Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519901753647948766:2666] txid# 281474976710662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T14:59:25.470635Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519901753647948783:2309], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:59:25.470692Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7519901752497742201:2274], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T14:59:25.470990Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=2&id=ZGVmZGRlNi05YjdkODdmZi1lNTI3NzJhZi00OWU5N2Y3Ng==, ActorId: [2:7519901752497742161:2268], ActorState: ExecuteState, TraceId: 01jykskm9c4vcw2fd5qm71rc7m, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:59:25.475195Z node 2 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-25T14:59:25.476760Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=ZjhmNzNmNTMtMTdiNGM2MjctMWUwNGMyODEtNWIwNmVhNmM=, ActorId: [1:7519901753647948648:2296], ActorState: ExecuteState, TraceId: 01jykskm7reqkks0twdkbwk40q, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T14:59:25.477009Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. ... 5186224037892, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-25T15:04:24.801402Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Answering for message sourceid: '\0test-message-group-id', Topic: 'rt3.dc1--test-topic', Partition: 0, SeqNo: 2, partNo: 0, Offset: 1 is stored on disk 2025-06-25T15:04:24.801426Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-25T15:04:24.801460Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Answering for message sourceid: '\0test-message-group-id', Topic: 'rt3.dc1--test-topic', Partition: 0, SeqNo: 3, partNo: 0, Offset: 2 is stored on disk 2025-06-25T15:04:24.801484Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-25T15:04:24.801512Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Answering for message sourceid: '\0test-message-group-id', Topic: 'rt3.dc1--test-topic', Partition: 0, SeqNo: 4, partNo: 0, Offset: 3 is stored on disk 2025-06-25T15:04:24.801529Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-25T15:04:24.801551Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Answering for message sourceid: '\0test-message-group-id', Topic: 'rt3.dc1--test-topic', Partition: 0, SeqNo: 5, partNo: 0, Offset: 4 is stored on disk 2025-06-25T15:04:24.801566Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-25T15:04:24.801586Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Answering for message sourceid: '\0test-message-group-id', Topic: 'rt3.dc1--test-topic', Partition: 0, SeqNo: 6, partNo: 0, Offset: 5 is stored on disk 2025-06-25T15:04:24.801599Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-25T15:04:24.801623Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Answering for message sourceid: '\0test-message-group-id', Topic: 'rt3.dc1--test-topic', Partition: 0, SeqNo: 7, partNo: 0, Offset: 6 is stored on disk 2025-06-25T15:04:24.801637Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-25T15:04:24.801658Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Answering for message sourceid: '\0test-message-group-id', Topic: 'rt3.dc1--test-topic', Partition: 0, SeqNo: 8, partNo: 0, Offset: 7 is stored on disk 2025-06-25T15:04:24.801670Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-25T15:04:24.801692Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Answering for message sourceid: '\0test-message-group-id', Topic: 'rt3.dc1--test-topic', Partition: 0, SeqNo: 9, partNo: 0, Offset: 8 is stored on disk 2025-06-25T15:04:24.801706Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:57: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-25T15:04:24.801729Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:379: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Answering for message sourceid: '\0test-message-group-id', Topic: 'rt3.dc1--test-topic', Partition: 0, SeqNo: 10, partNo: 0, Offset: 9 is stored on disk 2025-06-25T15:04:24.801748Z node 17 :PERSQUEUE DEBUG: pq_impl.cpp:383: Answer ok topic: 'rt3.dc1--test-topic' partition: 0 messageNo: 1 requestId: cookie: 1 2025-06-25T15:04:24.801809Z node 17 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037892 (partition=0) Received event: NKikimr::TEvPersQueue::TEvResponse 2025-06-25T15:04:24.801950Z node 17 :PERSQUEUE DEBUG: partition_read.cpp:882: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Topic 'rt3.dc1--test-topic' partition 0 user user readTimeStamp for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 0 2025-06-25T15:04:24.801984Z node 17 :PERSQUEUE DEBUG: partition_read.cpp:924: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Topic 'rt3.dc1--test-topic' partition 0 user user send read request for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 1 rrg 0 2025-06-25T15:04:24.802029Z node 17 :PERSQUEUE DEBUG: partition_compaction.cpp:175: [PQ: 72075186224037892, Partition: 0, State: StateIdle] need more data for compaction. cumulativeSize=1208, count=1, cumulativeSizeLimit=8388608, bodyKeysCountLimit=300 2025-06-25T15:04:24.802147Z node 17 :PERSQUEUE DEBUG: partition_read.cpp:839: [PQ: 72075186224037892, Partition: 0, State: StateIdle] read cookie 2 Topic 'rt3.dc1--test-topic' partition 0 user user offset 0 count 1 size 1024000 endOffset 10 max time lag 0ms effective offset 0 2025-06-25T15:04:24.802404Z node 17 :PERSQUEUE DEBUG: partition_read.cpp:1043: [PQ: 72075186224037892, Partition: 0, State: StateIdle] read cookie 2 added 1 blobs, size 1208 count 10 last offset 0, current partition end offset: 10 2025-06-25T15:04:24.802440Z node 17 :PERSQUEUE DEBUG: partition_read.cpp:1069: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Reading cookie 2. Send blob request. 2025-06-25T15:04:24.802500Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|e6e20cae-7cb3a3df-83a88689-e32ba858_0] Write session got write response: sequence_numbers: 1 sequence_numbers: 2 sequence_numbers: 3 sequence_numbers: 4 sequence_numbers: 5 sequence_numbers: 6 sequence_numbers: 7 sequence_numbers: 8 sequence_numbers: 9 sequence_numbers: 10 offsets: 0 offsets: 1 offsets: 2 offsets: 3 offsets: 4 offsets: 5 offsets: 6 offsets: 7 offsets: 8 offsets: 9 already_written: false already_written: false already_written: false already_written: false already_written: false already_written: false already_written: false already_written: false already_written: false already_written: false write_statistics { persist_duration_ms: 4 queued_in_partition_duration_ms: 1 } 2025-06-25T15:04:24.802487Z node 17 :PERSQUEUE DEBUG: cache_eviction.h:492: Got data from cache. Partition 0 offset 0 partno 0 count 10 parts_count 0 source 1 size 1208 accessed 0 times before, last time 2025-06-25T15:04:24.000000Z 2025-06-25T15:04:24.802517Z node 17 :PERSQUEUE DEBUG: read.h:121: Reading cookie 2. All 1 blobs are from cache. 2025-06-25T15:04:24.802552Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|e6e20cae-7cb3a3df-83a88689-e32ba858_0] Write session: acknoledged message 1 2025-06-25T15:04:24.802570Z node 17 :PERSQUEUE DEBUG: partition_read.cpp:551: FormAnswer for 1 blobs 2025-06-25T15:04:24.802590Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|e6e20cae-7cb3a3df-83a88689-e32ba858_0] Write session: acknoledged message 2 2025-06-25T15:04:24.802591Z node 17 :PERSQUEUE DEBUG: pq_l2_cache.cpp:192: PQ Cache (L2). Touched. Tablet '72075186224037892' partition 0 offset 0 partno 0 count 10 parts 0 suffix '63' 2025-06-25T15:04:24.802609Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|e6e20cae-7cb3a3df-83a88689-e32ba858_0] Write session: acknoledged message 3 2025-06-25T15:04:24.802627Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|e6e20cae-7cb3a3df-83a88689-e32ba858_0] Write session: acknoledged message 4 2025-06-25T15:04:24.802649Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|e6e20cae-7cb3a3df-83a88689-e32ba858_0] Write session: acknoledged message 5 2025-06-25T15:04:24.802668Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|e6e20cae-7cb3a3df-83a88689-e32ba858_0] Write session: acknoledged message 6 2025-06-25T15:04:24.802694Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|e6e20cae-7cb3a3df-83a88689-e32ba858_0] Write session: acknoledged message 7 2025-06-25T15:04:24.802714Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|e6e20cae-7cb3a3df-83a88689-e32ba858_0] Write session: acknoledged message 8 2025-06-25T15:04:24.802738Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|e6e20cae-7cb3a3df-83a88689-e32ba858_0] Write session: acknoledged message 9 2025-06-25T15:04:24.802762Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|e6e20cae-7cb3a3df-83a88689-e32ba858_0] Write session: acknoledged message 10 2025-06-25T15:04:24.802779Z node 17 :PERSQUEUE DEBUG: partition_read.cpp:476: FormAnswer processing batch offset 0 totakecount 10 count 10 size 1188 from pos 0 cbcount 10 2025-06-25T15:04:24.802856Z node 17 :PERSQUEUE DEBUG: partition_read.cpp:964: Topic 'rt3.dc1--test-topic' partition 0 user user readTimeStamp done, result 1750863864796 queuesize 0 startOffset 0 2025-06-25T15:04:24.802992Z :INFO: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|e6e20cae-7cb3a3df-83a88689-e32ba858_0] Write session: close. Timeout = 0 ms 2025-06-25T15:04:24.803051Z :INFO: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|e6e20cae-7cb3a3df-83a88689-e32ba858_0] Write session will now close 2025-06-25T15:04:24.803088Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|e6e20cae-7cb3a3df-83a88689-e32ba858_0] Write session: aborting 2025-06-25T15:04:24.803793Z :INFO: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|e6e20cae-7cb3a3df-83a88689-e32ba858_0] Write session: gracefully shut down, all writes complete 2025-06-25T15:04:24.803824Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|e6e20cae-7cb3a3df-83a88689-e32ba858_0] Write session is aborting and will not restart 2025-06-25T15:04:24.803888Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|e6e20cae-7cb3a3df-83a88689-e32ba858_0] Write session: destroy 2025-06-25T15:04:24.803997Z node 17 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 7 sessionId: test-message-group-id|e6e20cae-7cb3a3df-83a88689-e32ba858_0 grpc read done: success: 0 data: 2025-06-25T15:04:24.804024Z node 17 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 7 sessionId: test-message-group-id|e6e20cae-7cb3a3df-83a88689-e32ba858_0 grpc read failed 2025-06-25T15:04:24.804359Z node 17 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:818: session v1 closed cookie: 7 sessionId: test-message-group-id|e6e20cae-7cb3a3df-83a88689-e32ba858_0 2025-06-25T15:04:24.804389Z node 17 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 7 sessionId: test-message-group-id|e6e20cae-7cb3a3df-83a88689-e32ba858_0 is DEAD 2025-06-25T15:04:24.804778Z node 17 :PQ_WRITE_PROXY DEBUG: writer.cpp:559: TPartitionWriter 72075186224037892 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-25T15:04:24.804906Z node 17 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72075186224037892] server disconnected, pipe [17:7519903037071348511:2601] destroyed 2025-06-25T15:04:24.804937Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:137: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::DropOwner. ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> Normalizers::CleanUnusedTablesNormalizer [GOOD] Test command err: 2025-06-25T15:04:25.311431Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:99;event=initialize_shard;step=OnActivateExecutor; 2025-06-25T15:04:25.330088Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:117;event=initialize_shard;step=initialize_tiring_finished; 2025-06-25T15:04:25.330305Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-25T15:04:25.335850Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanIndexColumns; 2025-06-25T15:04:25.336106Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=NO_VALUE_OPTIONAL; 2025-06-25T15:04:25.336329Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T15:04:25.336556Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T15:04:25.336688Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T15:04:25.336843Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T15:04:25.336979Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T15:04:25.337139Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T15:04:25.337277Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T15:04:25.337391Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T15:04:25.337509Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T15:04:25.337626Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T15:04:25.356710Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-25T15:04:25.356849Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=11;current_normalizer=CLASS_NAME=CleanIndexColumns; 2025-06-25T15:04:25.356898Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=NO_VALUE_OPTIONAL;type=NO_VALUE_OPTIONAL; 2025-06-25T15:04:25.357083Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanIndexColumns;id=NO_VALUE_OPTIONAL; 2025-06-25T15:04:25.357153Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Granules;id=Granules; 2025-06-25T15:04:25.357207Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=1;type=Granules; 2025-06-25T15:04:25.357319Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:04:25.357377Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-25T15:04:25.357402Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-25T15:04:25.357419Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=2;type=Chunks; 2025-06-25T15:04:25.357475Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-25T15:04:25.357532Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-25T15:04:25.357564Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-25T15:04:25.357581Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=4;type=TablesCleaner; 2025-06-25T15:04:25.357697Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:04:25.357735Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-25T15:04:25.357764Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-25T15:04:25.357779Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=6;type=CleanGranuleId; 2025-06-25T15:04:25.357846Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-25T15:04:25.357887Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-25T15:04:25.357929Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-25T15:04:25.357963Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=9;type=GCCountersNormalizer; 2025-06-25T15:04:25.357999Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-25T15:04:25.358040Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-25T15:04:25.358061Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=11;type=SyncPortionFromChunks; 2025-06-25T15:04:25.358097Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-25T15:04:25.358120Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-25T15:04:25.358141Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-25T15:04:25.358305Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-25T15:04:25.358336Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-25T15:04:25.358357Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=16;type=RestoreV2Chunks; 2025-06-25T15:04:25.358432Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-25T15:04:25.358455Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-25T15:04:25.358473Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-25T15:04:25.358502Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-25T15:04:25.358525Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-25T15:04:25.358546Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-25T15:04:25.358575Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-25T15:04:25.358603Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-25T15:04:25.358667Z node 1 :TX_COLUMNS ... ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=actor.cpp:85;event=TEvTaskProcessedResult; 2025-06-25T15:04:27.005611Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:519:2518];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=merge.cpp:75;event=DoApply;interval_idx=0; 2025-06-25T15:04:27.005658Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:519:2518];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=scanner.cpp:21;event=interval_result_received;interval_idx=0;intervalId=2; 2025-06-25T15:04:27.005705Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:519:2518];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=scanner.cpp:47;event=interval_result;interval_idx=0;count=20048;merger=0;interval_id=2; 2025-06-25T15:04:27.005735Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:519:2518];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=scanner.cpp:65;event=intervals_finished; 2025-06-25T15:04:27.005799Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:519:2518];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;;); 2025-06-25T15:04:27.005822Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:519:2518];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=1;count=20048;finished=1; 2025-06-25T15:04:27.005845Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:519:2518];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:203;stage=limit exhausted;limit=limits:(bytes=0;chunks=0);; 2025-06-25T15:04:27.005990Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:519:2518];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-25T15:04:27.006116Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:519:2518];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:1;records_count:20048;schema=key1: uint64 key2: uint64 field: string;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;;); 2025-06-25T15:04:27.006156Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:519:2518];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=0;count=0;finished=1; 2025-06-25T15:04:27.006248Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:519:2518];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:234;stage=ready result;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;;);columns=3;rows=20048; 2025-06-25T15:04:27.006295Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:519:2518];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:254;stage=data_format;batch_size=2405760;num_rows=20048;batch_columns=key1,key2,field; 2025-06-25T15:04:27.006456Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:519:2518];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:514:2514];bytes=2405760;rows=20048;faults=0;finished=0;fault=0;schema=key1: uint64 key2: uint64 field: string; 2025-06-25T15:04:27.006556Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:519:2518];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:274;stage=finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;;); 2025-06-25T15:04:27.006639Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:519:2518];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;;); 2025-06-25T15:04:27.006714Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:519:2518];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;;); 2025-06-25T15:04:27.007273Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:519:2518];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-25T15:04:27.007394Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:519:2518];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;;); 2025-06-25T15:04:27.007475Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:519:2518];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;;); 2025-06-25T15:04:27.007508Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: actor.cpp:414: Scan [1:519:2518] finished for tablet 9437184 2025-06-25T15:04:27.007959Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:519:2518];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=actor.cpp:420;event=scan_finish;compute_actor_id=[1:514:2514];stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.001},{"events":["l_bootstrap","f_processing","f_task_result"],"t":0.002},{"events":["f_ack","l_task_result"],"t":0.215},{"events":["l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.217}],"full":{"a":1750863866790269,"name":"_full_task","f":1750863866790269,"d_finished":0,"c":0,"l":1750863867007547,"d":217278},"events":[{"name":"bootstrap","f":1750863866790420,"d_finished":2680,"c":1,"l":1750863866793100,"d":2680},{"a":1750863867007253,"name":"ack","f":1750863867005968,"d_finished":766,"c":1,"l":1750863867006734,"d":1060},{"a":1750863867007243,"name":"processing","f":1750863866793163,"d_finished":143516,"c":16,"l":1750863867006736,"d":143820},{"name":"ProduceResults","f":1750863866791953,"d_finished":2799,"c":19,"l":1750863867007492,"d":2799},{"a":1750863867007500,"name":"Finish","f":1750863867007500,"d_finished":0,"c":0,"l":1750863867007547,"d":47},{"name":"task_result","f":1750863866793176,"d_finished":142562,"c":15,"l":1750863867005887,"d":142562}],"id":"9437184::2"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;;); 2025-06-25T15:04:27.008056Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:519:2518];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:514:2514];bytes=0;rows=0;faults=0;finished=1;fault=0;schema=; 2025-06-25T15:04:27.008437Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:519:2518];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=actor.cpp:375;event=scan_finished;compute_actor_id=[1:514:2514];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.001},{"events":["l_bootstrap","f_processing","f_task_result"],"t":0.002},{"events":["f_ack","l_task_result"],"t":0.215},{"events":["l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.217}],"full":{"a":1750863866790269,"name":"_full_task","f":1750863866790269,"d_finished":0,"c":0,"l":1750863867008101,"d":217832},"events":[{"name":"bootstrap","f":1750863866790420,"d_finished":2680,"c":1,"l":1750863866793100,"d":2680},{"a":1750863867007253,"name":"ack","f":1750863867005968,"d_finished":766,"c":1,"l":1750863867006734,"d":1614},{"a":1750863867007243,"name":"processing","f":1750863866793163,"d_finished":143516,"c":16,"l":1750863867006736,"d":144374},{"name":"ProduceResults","f":1750863866791953,"d_finished":2799,"c":19,"l":1750863867007492,"d":2799},{"a":1750863867007500,"name":"Finish","f":1750863867007500,"d_finished":0,"c":0,"l":1750863867008101,"d":601},{"name":"task_result","f":1750863866793176,"d_finished":142562,"c":15,"l":1750863867005887,"d":142562}],"id":"9437184::2"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;;); 2025-06-25T15:04:27.008525Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:519:2518];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-06-25T15:04:26.789823Z;index_granules=0;index_portions=2;index_batches=748;schema_columns=3;filter_columns=0;additional_columns=0;compacted_portions_bytes=2776560;inserted_portions_bytes=0;committed_portions_bytes=2488696;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=5265256;selected_rows=0; 2025-06-25T15:04:27.008566Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:519:2518];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=read_context.h:192;event=scan_aborted;reason=unexpected on destructor; 2025-06-25T15:04:27.008774Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:519:2518];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;; >> TColumnShardTestReadWrite::WriteReadStandalone [GOOD] >> TColumnShardTestReadWrite::WriteReadStandaloneExoticTypes [GOOD] |93.5%| [TA] $(B)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/test-results/unittest/{meta.json ... results_accumulator.log} |93.5%| [TA] {RESULT} $(B)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TColumnShardTestReadWrite::WriteExoticTypes [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::WriteReadStandalone [GOOD] Test command err: 2025-06-25T15:04:24.430613Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:99;event=initialize_shard;step=OnActivateExecutor; 2025-06-25T15:04:24.448143Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:117;event=initialize_shard;step=initialize_tiring_finished; 2025-06-25T15:04:24.449143Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-25T15:04:24.459953Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T15:04:24.460132Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T15:04:24.460376Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T15:04:24.460485Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T15:04:24.460569Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T15:04:24.460633Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T15:04:24.460681Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T15:04:24.460765Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T15:04:24.460837Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T15:04:24.460908Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T15:04:24.460971Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T15:04:24.480008Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-25T15:04:24.480099Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-25T15:04:24.480128Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-25T15:04:24.480240Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:04:24.481671Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-25T15:04:24.481754Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-25T15:04:24.481796Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-25T15:04:24.481906Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-25T15:04:24.481979Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-25T15:04:24.482034Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-25T15:04:24.482075Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-25T15:04:24.482190Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:04:24.482228Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-25T15:04:24.482249Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-25T15:04:24.482265Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-25T15:04:24.482323Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-25T15:04:24.482352Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-25T15:04:24.482375Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-25T15:04:24.482392Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-25T15:04:24.482431Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-25T15:04:24.482452Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-25T15:04:24.482469Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-25T15:04:24.482588Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-25T15:04:24.482633Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-25T15:04:24.482662Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-25T15:04:24.482782Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-25T15:04:24.482835Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-25T15:04:24.482866Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-25T15:04:24.483005Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-25T15:04:24.483039Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-25T15:04:24.483059Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-25T15:04:24.483101Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-25T15:04:24.483147Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-25T15:04:24.483174Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-25T15:04:24.483193Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-25T15:04:24.483321Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=28; 2025-06-25T15:04:24.483375Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=25; 2025-06-25T15:04:24.483424Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=23; 2025-06-25T15:04:24.483469Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=20; 2025-06-25T15:04:24.484018Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-25T15:04:24.484088Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-25T15:04:24.484117Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-25T15:04:24.484150Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: table ... column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-25T15:04:27.561287Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:397:2408];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=0;count=0;finished=1; 2025-06-25T15:04:27.561398Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:397:2408];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:234;stage=ready result;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;);columns=10;rows=31; 2025-06-25T15:04:27.561496Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:397:2408];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:254;stage=data_format;batch_size=1984;num_rows=31;batch_columns=timestamp,resource_type,resource_id,uid,level,message,json_payload,ingested_at,saved_at,request_id; 2025-06-25T15:04:27.561856Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:397:2408];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:396:2407];bytes=1984;rows=31;faults=0;finished=0;fault=0;schema=timestamp: timestamp[us] resource_type: string resource_id: string uid: string level: int32 message: string json_payload: string ingested_at: timestamp[us] saved_at: timestamp[us] request_id: string; 2025-06-25T15:04:27.562044Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:397:2408];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:274;stage=finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-25T15:04:27.562196Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:397:2408];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-25T15:04:27.562329Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:397:2408];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-25T15:04:27.562555Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:397:2408];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-25T15:04:27.562674Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:397:2408];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-25T15:04:27.562767Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:397:2408];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-25T15:04:27.562797Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: actor.cpp:414: Scan [1:397:2408] finished for tablet 9437184 2025-06-25T15:04:27.563118Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=11;SelfId=[1:397:2408];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=actor.cpp:420;event=scan_finish;compute_actor_id=[1:396:2407];stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.001},{"events":["l_bootstrap"],"t":0.002},{"events":["f_processing","f_task_result"],"t":0.003},{"events":["f_ack","l_task_result"],"t":0.015},{"events":["l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.017}],"full":{"a":1750863867545564,"name":"_full_task","f":1750863867545564,"d_finished":0,"c":0,"l":1750863867562836,"d":17272},"events":[{"name":"bootstrap","f":1750863867545687,"d_finished":2259,"c":1,"l":1750863867547946,"d":2259},{"a":1750863867562538,"name":"ack","f":1750863867561049,"d_finished":1309,"c":1,"l":1750863867562358,"d":1607},{"a":1750863867562518,"name":"processing","f":1750863867548776,"d_finished":10460,"c":10,"l":1750863867562361,"d":10778},{"name":"ProduceResults","f":1750863867546975,"d_finished":3001,"c":13,"l":1750863867562786,"d":3001},{"a":1750863867562788,"name":"Finish","f":1750863867562788,"d_finished":0,"c":0,"l":1750863867562836,"d":48},{"name":"task_result","f":1750863867548793,"d_finished":9039,"c":9,"l":1750863867560908,"d":9039}],"id":"9437184::12"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-25T15:04:27.563167Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:397:2408];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:396:2407];bytes=0;rows=0;faults=0;finished=1;fault=0;schema=; 2025-06-25T15:04:27.563429Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=11;SelfId=[1:397:2408];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=actor.cpp:375;event=scan_finished;compute_actor_id=[1:396:2407];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.001},{"events":["l_bootstrap"],"t":0.002},{"events":["f_processing","f_task_result"],"t":0.003},{"events":["f_ack","l_task_result"],"t":0.015},{"events":["l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.017}],"full":{"a":1750863867545564,"name":"_full_task","f":1750863867545564,"d_finished":0,"c":0,"l":1750863867563195,"d":17631},"events":[{"name":"bootstrap","f":1750863867545687,"d_finished":2259,"c":1,"l":1750863867547946,"d":2259},{"a":1750863867562538,"name":"ack","f":1750863867561049,"d_finished":1309,"c":1,"l":1750863867562358,"d":1966},{"a":1750863867562518,"name":"processing","f":1750863867548776,"d_finished":10460,"c":10,"l":1750863867562361,"d":11137},{"name":"ProduceResults","f":1750863867546975,"d_finished":3001,"c":13,"l":1750863867562786,"d":3001},{"a":1750863867562788,"name":"Finish","f":1750863867562788,"d_finished":0,"c":0,"l":1750863867563195,"d":407},{"name":"task_result","f":1750863867548793,"d_finished":9039,"c":9,"l":1750863867560908,"d":9039}],"id":"9437184::12"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-25T15:04:27.563479Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:397:2408];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-06-25T15:04:27.545121Z;index_granules=0;index_portions=1;index_batches=1;schema_columns=10;filter_columns=0;additional_columns=0;compacted_portions_bytes=0;inserted_portions_bytes=7600;committed_portions_bytes=0;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=7600;selected_rows=0; 2025-06-25T15:04:27.563506Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:397:2408];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=read_context.h:192;event=scan_aborted;reason=unexpected on destructor; 2025-06-25T15:04:27.563753Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=11;SelfId=[1:397:2408];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::WriteReadStandaloneExoticTypes [GOOD] Test command err: 2025-06-25T15:04:24.430643Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:99;event=initialize_shard;step=OnActivateExecutor; 2025-06-25T15:04:24.457410Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:117;event=initialize_shard;step=initialize_tiring_finished; 2025-06-25T15:04:24.457632Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-25T15:04:24.463888Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T15:04:24.464059Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T15:04:24.464254Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T15:04:24.464387Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T15:04:24.464518Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T15:04:24.464630Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T15:04:24.464730Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T15:04:24.464859Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T15:04:24.464965Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T15:04:24.465064Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T15:04:24.465172Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T15:04:24.489773Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-25T15:04:24.489887Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-25T15:04:24.489934Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-25T15:04:24.490084Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:04:24.490220Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-25T15:04:24.490290Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-25T15:04:24.490350Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-25T15:04:24.490457Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-25T15:04:24.490523Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-25T15:04:24.490561Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-25T15:04:24.490599Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-25T15:04:24.490745Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:04:24.490795Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-25T15:04:24.490829Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-25T15:04:24.490855Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-25T15:04:24.490949Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-25T15:04:24.491000Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-25T15:04:24.491048Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-25T15:04:24.491076Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-25T15:04:24.491117Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-25T15:04:24.491153Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-25T15:04:24.491180Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-25T15:04:24.491401Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-25T15:04:24.491445Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-25T15:04:24.491478Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-25T15:04:24.491646Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-25T15:04:24.491689Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-25T15:04:24.491713Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-25T15:04:24.491833Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-25T15:04:24.491874Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-25T15:04:24.491898Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-25T15:04:24.491961Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-25T15:04:24.492028Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-25T15:04:24.492065Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-25T15:04:24.492094Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-25T15:04:24.492271Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=38; 2025-06-25T15:04:24.492367Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=56; 2025-06-25T15:04:24.492436Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=33; 2025-06-25T15:04:24.492509Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=38; 2025-06-25T15:04:24.492602Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-25T15:04:24.492673Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-25T15:04:24.492707Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-25T15:04:24.492790Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: table ... ource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-25T15:04:27.632762Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:397:2408];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=0;count=0;finished=1; 2025-06-25T15:04:27.632863Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:397:2408];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:234;stage=ready result;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;);columns=10;rows=31; 2025-06-25T15:04:27.632925Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:397:2408];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:254;stage=data_format;batch_size=2759;num_rows=31;batch_columns=timestamp,resource_type,resource_id,uid,level,message,json_payload,ingested_at,saved_at,request_id; 2025-06-25T15:04:27.633138Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:397:2408];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:396:2407];bytes=2759;rows=31;faults=0;finished=0;fault=0;schema=timestamp: timestamp[us] resource_type: string resource_id: string uid: string level: int32 message: binary json_payload: binary ingested_at: timestamp[us] saved_at: timestamp[us] request_id: binary; 2025-06-25T15:04:27.633242Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:397:2408];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:274;stage=finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-25T15:04:27.633322Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:397:2408];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-25T15:04:27.633409Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:397:2408];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-25T15:04:27.633564Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:397:2408];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-25T15:04:27.633658Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:397:2408];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-25T15:04:27.633741Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:397:2408];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-25T15:04:27.633770Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: actor.cpp:414: Scan [1:397:2408] finished for tablet 9437184 2025-06-25T15:04:27.634070Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=11;SelfId=[1:397:2408];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=actor.cpp:420;event=scan_finish;compute_actor_id=[1:396:2407];stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.001},{"events":["l_bootstrap","f_processing","f_task_result"],"t":0.003},{"events":["f_ack","l_task_result"],"t":0.024},{"events":["l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.026}],"full":{"a":1750863867607637,"name":"_full_task","f":1750863867607637,"d_finished":0,"c":0,"l":1750863867633809,"d":26172},"events":[{"name":"bootstrap","f":1750863867607773,"d_finished":2987,"c":1,"l":1750863867610760,"d":2987},{"a":1750863867633549,"name":"ack","f":1750863867632476,"d_finished":953,"c":1,"l":1750863867633429,"d":1213},{"a":1750863867633539,"name":"processing","f":1750863867611559,"d_finished":18905,"c":10,"l":1750863867633431,"d":19175},{"name":"ProduceResults","f":1750863867609367,"d_finished":2624,"c":13,"l":1750863867633759,"d":2624},{"a":1750863867633761,"name":"Finish","f":1750863867633761,"d_finished":0,"c":0,"l":1750863867633809,"d":48},{"name":"task_result","f":1750863867611569,"d_finished":17835,"c":9,"l":1750863867632229,"d":17835}],"id":"9437184::12"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-25T15:04:27.634116Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:397:2408];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:396:2407];bytes=0;rows=0;faults=0;finished=1;fault=0;schema=; 2025-06-25T15:04:27.634373Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=11;SelfId=[1:397:2408];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=actor.cpp:375;event=scan_finished;compute_actor_id=[1:396:2407];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.001},{"events":["l_bootstrap","f_processing","f_task_result"],"t":0.003},{"events":["f_ack","l_task_result"],"t":0.024},{"events":["l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.026}],"full":{"a":1750863867607637,"name":"_full_task","f":1750863867607637,"d_finished":0,"c":0,"l":1750863867634143,"d":26506},"events":[{"name":"bootstrap","f":1750863867607773,"d_finished":2987,"c":1,"l":1750863867610760,"d":2987},{"a":1750863867633549,"name":"ack","f":1750863867632476,"d_finished":953,"c":1,"l":1750863867633429,"d":1547},{"a":1750863867633539,"name":"processing","f":1750863867611559,"d_finished":18905,"c":10,"l":1750863867633431,"d":19509},{"name":"ProduceResults","f":1750863867609367,"d_finished":2624,"c":13,"l":1750863867633759,"d":2624},{"a":1750863867633761,"name":"Finish","f":1750863867633761,"d_finished":0,"c":0,"l":1750863867634143,"d":382},{"name":"task_result","f":1750863867611569,"d_finished":17835,"c":9,"l":1750863867632229,"d":17835}],"id":"9437184::12"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-25T15:04:27.634438Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:397:2408];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-06-25T15:04:27.607171Z;index_granules=0;index_portions=1;index_batches=1;schema_columns=10;filter_columns=0;additional_columns=0;compacted_portions_bytes=0;inserted_portions_bytes=7928;committed_portions_bytes=0;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=7928;selected_rows=0; 2025-06-25T15:04:27.634467Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:397:2408];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=read_context.h:192;event=scan_aborted;reason=unexpected on destructor; 2025-06-25T15:04:27.634715Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=11;SelfId=[1:397:2408];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;; >> TColumnShardTestReadWrite::CompactionSplitGranule_PKUInt64 >> Normalizers::CleanEmptyPortionsNormalizer >> TColumnShardTestReadWrite::CompactionGCFailingBs >> TColumnShardTestReadWrite::RebootWriteReadStandalone ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::WriteExoticTypes [GOOD] Test command err: 2025-06-25T15:04:24.430649Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:99;event=initialize_shard;step=OnActivateExecutor; 2025-06-25T15:04:24.457185Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:117;event=initialize_shard;step=initialize_tiring_finished; 2025-06-25T15:04:24.457382Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-25T15:04:24.463582Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T15:04:24.463762Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T15:04:24.463964Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T15:04:24.464061Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T15:04:24.464160Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T15:04:24.464276Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T15:04:24.464393Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T15:04:24.464507Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T15:04:24.464605Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T15:04:24.464700Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T15:04:24.464807Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T15:04:24.489278Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-25T15:04:24.489391Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-25T15:04:24.489435Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-25T15:04:24.489597Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:04:24.489746Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-25T15:04:24.489812Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-25T15:04:24.489840Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-25T15:04:24.489903Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-25T15:04:24.489958Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-25T15:04:24.489989Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-25T15:04:24.490018Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-25T15:04:24.490127Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:04:24.490173Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-25T15:04:24.490218Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-25T15:04:24.490250Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-25T15:04:24.490331Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-25T15:04:24.490380Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-25T15:04:24.490416Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-25T15:04:24.490440Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-25T15:04:24.490483Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-25T15:04:24.490517Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-25T15:04:24.490552Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-25T15:04:24.490747Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-25T15:04:24.490787Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-25T15:04:24.490815Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-25T15:04:24.490980Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-25T15:04:24.491046Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-25T15:04:24.491085Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-25T15:04:24.491213Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-25T15:04:24.491256Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-25T15:04:24.491282Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-25T15:04:24.491357Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-25T15:04:24.491425Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-25T15:04:24.491462Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-25T15:04:24.491507Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-25T15:04:24.491693Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=62; 2025-06-25T15:04:24.491769Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=36; 2025-06-25T15:04:24.491835Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=31; 2025-06-25T15:04:24.491910Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=37; 2025-06-25T15:04:24.491990Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-25T15:04:24.492048Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-25T15:04:24.492084Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-25T15:04:24.492129Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: table ... [{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"19,19,19,19,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"20,20,20,20,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"21,21,21,21,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"22,22,22,22,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"23,23,23,23,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"24,24,24,24,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"25,25,25,25,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"26,26,26,26,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"27,27,27,27,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"28,28,28,28,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"29,29,29,29,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"30,30,30,30,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"31,31,31,31,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"32,32,32,32,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"33,33,33,33,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"34,34,34,34,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"35,35,35,35,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"36,36,36,36,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"37,37,37,37,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"38,38,38,38,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"39,39,39,39,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"40,40,40,40,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"41,41,41,41,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"42,42,42,42,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"43,43,43,43,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"44,44,44,44,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"45,45,45,45,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"46,46,46,46,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"47,47,47,47,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"48,48,48,48,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"49,49,49,49,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"50,50,50,50,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"51,51,51,51,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"52,52,52,52,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"53,53,53,53,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"54,54,54,54,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"55,55,55,55,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"56,56,56,56,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"57,57,57,57,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"58,58,58,58,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"59,59,59,59,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"60,60,60,60,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"61,61,61,61,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"62,62,62,62,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"63,63,63,63,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"64,64,64,64,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"65,65,65,65,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"66,66,66,66,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"67,67,67,67,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"68,68,68,68,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"69,69,69,69,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"70,70,70,70,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"71,71,71,71,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"72,72,72,72,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"73,73,73,73,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"74,74,74,74,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"75,75,75,75,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"76,76,76,76,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"77,77,77,77,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"78,78,78,78,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"79,79,79,79,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"80,80,80,80,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"81,81,81,81,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"82,82,82,82,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"83,83,83,83,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"84,84,84,84,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"85,85,85,85,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"86,86,86,86,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"87,87,87,87,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"88,88,88,88,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"89,89,89,89,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"90,90,90,90,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"91,91,91,91,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"92,92,92,92,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"93,93,93,93,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"94,94,94,94,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"95,95,95,95,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"96,96,96,96,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"97,97,97,97,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"98,98,98,98,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"99,99,99,99,"}}]}; >> TColumnShardTestReadWrite::RebootWriteRead [GOOD] >> TColumnShardTestReadWrite::CompactionInGranule_PKTimestamp >> TColumnShardTestReadWrite::CompactionGC ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::RebootWriteRead [GOOD] Test command err: 2025-06-25T15:04:24.430662Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:99;event=initialize_shard;step=OnActivateExecutor; 2025-06-25T15:04:24.457502Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:117;event=initialize_shard;step=initialize_tiring_finished; 2025-06-25T15:04:24.457719Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-25T15:04:24.464510Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T15:04:24.464687Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T15:04:24.464901Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T15:04:24.465031Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T15:04:24.465138Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T15:04:24.465245Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T15:04:24.465334Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T15:04:24.465463Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T15:04:24.465568Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T15:04:24.465680Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T15:04:24.465777Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T15:04:24.490493Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-25T15:04:24.490617Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-25T15:04:24.490664Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-25T15:04:24.490824Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:04:24.490970Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-25T15:04:24.491041Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-25T15:04:24.491081Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-25T15:04:24.491182Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-25T15:04:24.491256Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-25T15:04:24.491315Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-25T15:04:24.491357Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-25T15:04:24.491511Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:04:24.491582Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-25T15:04:24.491626Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-25T15:04:24.491653Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-25T15:04:24.491781Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-25T15:04:24.491836Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-25T15:04:24.491887Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-25T15:04:24.491925Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-25T15:04:24.491979Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-25T15:04:24.492015Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-25T15:04:24.492046Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-25T15:04:24.492250Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-25T15:04:24.492297Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-25T15:04:24.492353Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-25T15:04:24.492537Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-25T15:04:24.492584Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-25T15:04:24.492612Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-25T15:04:24.492761Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-25T15:04:24.492813Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-25T15:04:24.492850Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-25T15:04:24.492920Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-25T15:04:24.492999Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-25T15:04:24.493039Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-25T15:04:24.493072Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-25T15:04:24.493281Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=45; 2025-06-25T15:04:24.493358Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=32; 2025-06-25T15:04:24.493416Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=27; 2025-06-25T15:04:24.493485Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=39; 2025-06-25T15:04:24.493585Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-25T15:04:24.493661Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-25T15:04:24.493702Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-25T15:04:24.493759Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: table ... 3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-25T15:04:29.044659Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:987:2842];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=0;count=0;finished=1; 2025-06-25T15:04:29.044773Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:987:2842];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:234;stage=ready result;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;);columns=10;rows=31; 2025-06-25T15:04:29.044856Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:987:2842];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:254;stage=data_format;batch_size=1984;num_rows=31;batch_columns=timestamp,resource_type,resource_id,uid,level,message,json_payload,ingested_at,saved_at,request_id; 2025-06-25T15:04:29.045075Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:987:2842];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:986:2841];bytes=1984;rows=31;faults=0;finished=0;fault=0;schema=timestamp: timestamp[us] resource_type: string resource_id: string uid: string level: int32 message: string json_payload: string ingested_at: timestamp[us] saved_at: timestamp[us] request_id: string; 2025-06-25T15:04:29.045193Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:987:2842];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:274;stage=finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-25T15:04:29.045275Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:987:2842];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-25T15:04:29.045369Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:987:2842];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-25T15:04:29.045529Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:987:2842];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-25T15:04:29.045632Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:987:2842];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-25T15:04:29.045724Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:987:2842];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-25T15:04:29.045751Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: actor.cpp:414: Scan [1:987:2842] finished for tablet 9437184 2025-06-25T15:04:29.046087Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=11;SelfId=[1:987:2842];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=actor.cpp:420;event=scan_finish;compute_actor_id=[1:986:2841];stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.001},{"events":["l_bootstrap"],"t":0.002},{"events":["f_processing","f_task_result"],"t":0.003},{"events":["f_ack","l_task_result"],"t":0.009},{"events":["l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.01}],"full":{"a":1750863869034808,"name":"_full_task","f":1750863869034808,"d_finished":0,"c":0,"l":1750863869045788,"d":10980},"events":[{"name":"bootstrap","f":1750863869034943,"d_finished":2489,"c":1,"l":1750863869037432,"d":2489},{"a":1750863869045513,"name":"ack","f":1750863869044456,"d_finished":942,"c":1,"l":1750863869045398,"d":1217},{"a":1750863869045504,"name":"processing","f":1750863869038226,"d_finished":4501,"c":10,"l":1750863869045400,"d":4785},{"name":"ProduceResults","f":1750863869036361,"d_finished":2370,"c":13,"l":1750863869045743,"d":2370},{"a":1750863869045744,"name":"Finish","f":1750863869045744,"d_finished":0,"c":0,"l":1750863869045788,"d":44},{"name":"task_result","f":1750863869038244,"d_finished":3471,"c":9,"l":1750863869044324,"d":3471}],"id":"9437184::12"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-25T15:04:29.046140Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:987:2842];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:986:2841];bytes=0;rows=0;faults=0;finished=1;fault=0;schema=; 2025-06-25T15:04:29.046436Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=11;SelfId=[1:987:2842];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=actor.cpp:375;event=scan_finished;compute_actor_id=[1:986:2841];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.001},{"events":["l_bootstrap"],"t":0.002},{"events":["f_processing","f_task_result"],"t":0.003},{"events":["f_ack","l_task_result"],"t":0.009},{"events":["l_ProduceResults","f_Finish"],"t":0.01},{"events":["l_ack","l_processing","l_Finish"],"t":0.011}],"full":{"a":1750863869034808,"name":"_full_task","f":1750863869034808,"d_finished":0,"c":0,"l":1750863869046170,"d":11362},"events":[{"name":"bootstrap","f":1750863869034943,"d_finished":2489,"c":1,"l":1750863869037432,"d":2489},{"a":1750863869045513,"name":"ack","f":1750863869044456,"d_finished":942,"c":1,"l":1750863869045398,"d":1599},{"a":1750863869045504,"name":"processing","f":1750863869038226,"d_finished":4501,"c":10,"l":1750863869045400,"d":5167},{"name":"ProduceResults","f":1750863869036361,"d_finished":2370,"c":13,"l":1750863869045743,"d":2370},{"a":1750863869045744,"name":"Finish","f":1750863869045744,"d_finished":0,"c":0,"l":1750863869046170,"d":426},{"name":"task_result","f":1750863869038244,"d_finished":3471,"c":9,"l":1750863869044324,"d":3471}],"id":"9437184::12"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-25T15:04:29.046492Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:987:2842];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-06-25T15:04:29.034313Z;index_granules=0;index_portions=1;index_batches=1;schema_columns=10;filter_columns=0;additional_columns=0;compacted_portions_bytes=0;inserted_portions_bytes=7600;committed_portions_bytes=0;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=7600;selected_rows=0; 2025-06-25T15:04:29.046535Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:987:2842];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=read_context.h:192;event=scan_aborted;reason=unexpected on destructor; 2025-06-25T15:04:29.046791Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=11;SelfId=[1:987:2842];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;; >> TColumnShardTestReadWrite::CompactionInGranule_PKInt32 >> TColumnShardTestReadWrite::CompactionSplitGranule_PKDatetime >> KqpLimits::TooBigKey-useSink [GOOD] >> KqpLimits::TooBigColumn-useSink >> KqpLimits::OutOfSpaceYQLUpsertFail+useSink [GOOD] >> KqpLimits::OutOfSpaceYQLUpsertFail-useSink >> TColumnShardTestReadWrite::RebootWriteReadStandalone [GOOD] >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_index_0__SYNC-pk_types4-all_types4-index4---SYNC] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::RebootWriteReadStandalone [GOOD] Test command err: 2025-06-25T15:04:29.099674Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:99;event=initialize_shard;step=OnActivateExecutor; 2025-06-25T15:04:29.117164Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:117;event=initialize_shard;step=initialize_tiring_finished; 2025-06-25T15:04:29.117343Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-25T15:04:29.122419Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T15:04:29.122563Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T15:04:29.122709Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T15:04:29.122793Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T15:04:29.122877Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T15:04:29.122953Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T15:04:29.123022Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T15:04:29.123089Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T15:04:29.123163Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T15:04:29.123229Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T15:04:29.123295Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T15:04:29.139524Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-25T15:04:29.139628Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-25T15:04:29.139658Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-25T15:04:29.139780Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:04:29.139922Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-25T15:04:29.139972Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-25T15:04:29.139996Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-25T15:04:29.140062Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-25T15:04:29.140104Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-25T15:04:29.140126Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-25T15:04:29.140150Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-25T15:04:29.140272Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:04:29.140326Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-25T15:04:29.140362Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-25T15:04:29.140384Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-25T15:04:29.140448Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-25T15:04:29.140483Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-25T15:04:29.140515Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-25T15:04:29.140539Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-25T15:04:29.140576Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-25T15:04:29.140614Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-25T15:04:29.140633Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-25T15:04:29.140766Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-25T15:04:29.140796Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-25T15:04:29.140821Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-25T15:04:29.140938Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-25T15:04:29.140974Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-25T15:04:29.140992Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-25T15:04:29.141070Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-25T15:04:29.141095Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-25T15:04:29.141110Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-25T15:04:29.141159Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-25T15:04:29.141207Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-25T15:04:29.141238Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-25T15:04:29.141264Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-25T15:04:29.141389Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=26; 2025-06-25T15:04:29.141457Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=40; 2025-06-25T15:04:29.141508Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=27; 2025-06-25T15:04:29.141562Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=29; 2025-06-25T15:04:29.141644Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-25T15:04:29.141715Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-25T15:04:29.141742Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-25T15:04:29.141775Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: table ... ;;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-25T15:04:34.189770Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:987:2842];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=0;count=0;finished=1; 2025-06-25T15:04:34.189899Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:987:2842];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:234;stage=ready result;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;);columns=10;rows=31; 2025-06-25T15:04:34.189989Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:987:2842];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:254;stage=data_format;batch_size=1984;num_rows=31;batch_columns=timestamp,resource_type,resource_id,uid,level,message,json_payload,ingested_at,saved_at,request_id; 2025-06-25T15:04:34.190238Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:987:2842];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:986:2841];bytes=1984;rows=31;faults=0;finished=0;fault=0;schema=timestamp: timestamp[us] resource_type: string resource_id: string uid: string level: int32 message: string json_payload: string ingested_at: timestamp[us] saved_at: timestamp[us] request_id: string; 2025-06-25T15:04:34.190372Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:987:2842];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:274;stage=finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-25T15:04:34.190474Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:987:2842];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-25T15:04:34.190573Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:987:2842];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-25T15:04:34.190771Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:987:2842];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-25T15:04:34.190886Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:987:2842];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-25T15:04:34.190999Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:987:2842];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-25T15:04:34.191034Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: actor.cpp:414: Scan [1:987:2842] finished for tablet 9437184 2025-06-25T15:04:34.191378Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=11;SelfId=[1:987:2842];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=actor.cpp:420;event=scan_finish;compute_actor_id=[1:986:2841];stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.001},{"events":["l_bootstrap"],"t":0.002},{"events":["f_processing","f_task_result"],"t":0.003},{"events":["f_ack","l_task_result"],"t":0.01},{"events":["l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.012}],"full":{"a":1750863874178952,"name":"_full_task","f":1750863874178952,"d_finished":0,"c":0,"l":1750863874191076,"d":12124},"events":[{"name":"bootstrap","f":1750863874179133,"d_finished":2729,"c":1,"l":1750863874181862,"d":2729},{"a":1750863874190752,"name":"ack","f":1750863874189520,"d_finished":1082,"c":1,"l":1750863874190602,"d":1406},{"a":1750863874190741,"name":"processing","f":1750863874182789,"d_finished":4942,"c":10,"l":1750863874190605,"d":5277},{"name":"ProduceResults","f":1750863874180709,"d_finished":2595,"c":13,"l":1750863874191023,"d":2595},{"a":1750863874191025,"name":"Finish","f":1750863874191025,"d_finished":0,"c":0,"l":1750863874191076,"d":51},{"name":"task_result","f":1750863874182805,"d_finished":3762,"c":9,"l":1750863874189383,"d":3762}],"id":"9437184::12"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-25T15:04:34.191438Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:987:2842];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:986:2841];bytes=0;rows=0;faults=0;finished=1;fault=0;schema=; 2025-06-25T15:04:34.191769Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=11;SelfId=[1:987:2842];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=actor.cpp:375;event=scan_finished;compute_actor_id=[1:986:2841];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.001},{"events":["l_bootstrap"],"t":0.002},{"events":["f_processing","f_task_result"],"t":0.003},{"events":["f_ack","l_task_result"],"t":0.01},{"events":["l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.012}],"full":{"a":1750863874178952,"name":"_full_task","f":1750863874178952,"d_finished":0,"c":0,"l":1750863874191469,"d":12517},"events":[{"name":"bootstrap","f":1750863874179133,"d_finished":2729,"c":1,"l":1750863874181862,"d":2729},{"a":1750863874190752,"name":"ack","f":1750863874189520,"d_finished":1082,"c":1,"l":1750863874190602,"d":1799},{"a":1750863874190741,"name":"processing","f":1750863874182789,"d_finished":4942,"c":10,"l":1750863874190605,"d":5670},{"name":"ProduceResults","f":1750863874180709,"d_finished":2595,"c":13,"l":1750863874191023,"d":2595},{"a":1750863874191025,"name":"Finish","f":1750863874191025,"d_finished":0,"c":0,"l":1750863874191469,"d":444},{"name":"task_result","f":1750863874182805,"d_finished":3762,"c":9,"l":1750863874189383,"d":3762}],"id":"9437184::12"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-25T15:04:34.191826Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:987:2842];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-06-25T15:04:34.178374Z;index_granules=0;index_portions=1;index_batches=1;schema_columns=10;filter_columns=0;additional_columns=0;compacted_portions_bytes=0;inserted_portions_bytes=7600;committed_portions_bytes=0;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=7600;selected_rows=0; 2025-06-25T15:04:34.191860Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:987:2842];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=read_context.h:192;event=scan_aborted;reason=unexpected on destructor; 2025-06-25T15:04:34.192139Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=11;SelfId=[1:987:2842];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;; >> Normalizers::CleanEmptyPortionsNormalizer [GOOD] >> Normalizers::PortionsNormalizer ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> Normalizers::CleanEmptyPortionsNormalizer [GOOD] Test command err: 2025-06-25T15:04:28.926289Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:99;event=initialize_shard;step=OnActivateExecutor; 2025-06-25T15:04:28.947455Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:117;event=initialize_shard;step=initialize_tiring_finished; 2025-06-25T15:04:28.947637Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-25T15:04:28.952385Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=EmptyPortionsCleaner; 2025-06-25T15:04:28.952603Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=NO_VALUE_OPTIONAL; 2025-06-25T15:04:28.952783Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T15:04:28.952957Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T15:04:28.953056Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T15:04:28.953166Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T15:04:28.953281Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T15:04:28.953416Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T15:04:28.953536Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T15:04:28.953632Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T15:04:28.953744Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T15:04:28.953850Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T15:04:28.978953Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-25T15:04:28.979100Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=11;current_normalizer=CLASS_NAME=EmptyPortionsCleaner; 2025-06-25T15:04:28.979149Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=NO_VALUE_OPTIONAL;type=NO_VALUE_OPTIONAL; 2025-06-25T15:04:28.979464Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_empty.cpp:323;tasks_for_remove=0;distribution=; 2025-06-25T15:04:28.979639Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=EmptyPortionsCleaner;id=NO_VALUE_OPTIONAL; 2025-06-25T15:04:28.979712Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Granules;id=Granules; 2025-06-25T15:04:28.979749Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=1;type=Granules; 2025-06-25T15:04:28.979904Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:04:28.979979Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-25T15:04:28.980023Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-25T15:04:28.980050Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=2;type=Chunks; 2025-06-25T15:04:28.980131Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-25T15:04:28.980240Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-25T15:04:28.980286Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-25T15:04:28.980334Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=4;type=TablesCleaner; 2025-06-25T15:04:28.980525Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:04:28.980593Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-25T15:04:28.980633Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-25T15:04:28.980655Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=6;type=CleanGranuleId; 2025-06-25T15:04:28.980726Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-25T15:04:28.980763Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-25T15:04:28.980786Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-25T15:04:28.980802Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=9;type=GCCountersNormalizer; 2025-06-25T15:04:28.980843Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-25T15:04:28.980867Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-25T15:04:28.980883Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=11;type=SyncPortionFromChunks; 2025-06-25T15:04:28.980909Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-25T15:04:28.980941Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-25T15:04:28.980978Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-25T15:04:28.981103Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-25T15:04:28.981144Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-25T15:04:28.981168Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=16;type=RestoreV2Chunks; 2025-06-25T15:04:28.981314Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-25T15:04:28.981349Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-25T15:04:28.981368Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-25T15:04:28.981399Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-25T15:04:28.981425Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-25T15:04:28.981440Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-25T15:04:28.981487Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-25T15:04:28.981525Z n ... line=actor.cpp:85;event=TEvTaskProcessedResult; 2025-06-25T15:04:35.495588Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:549:2545];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=merge.cpp:75;event=DoApply;interval_idx=0; 2025-06-25T15:04:35.495625Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:549:2545];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=scanner.cpp:21;event=interval_result_received;interval_idx=0;intervalId=2; 2025-06-25T15:04:35.495674Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:549:2545];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=scanner.cpp:47;event=interval_result;interval_idx=0;count=20048;merger=0;interval_id=2; 2025-06-25T15:04:35.495719Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:549:2545];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=scanner.cpp:65;event=intervals_finished; 2025-06-25T15:04:35.495789Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:549:2545];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;;); 2025-06-25T15:04:35.495821Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:549:2545];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=1;count=20048;finished=1; 2025-06-25T15:04:35.495845Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:549:2545];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:203;stage=limit exhausted;limit=limits:(bytes=0;chunks=0);; 2025-06-25T15:04:35.496003Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:549:2545];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-25T15:04:35.496124Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:549:2545];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:1;records_count:20048;schema=key1: uint64 key2: uint64 field: string;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;;); 2025-06-25T15:04:35.496173Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:549:2545];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=0;count=0;finished=1; 2025-06-25T15:04:35.496298Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:549:2545];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:234;stage=ready result;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;;);columns=3;rows=20048; 2025-06-25T15:04:35.496373Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:549:2545];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:254;stage=data_format;batch_size=2405760;num_rows=20048;batch_columns=key1,key2,field; 2025-06-25T15:04:35.496533Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:549:2545];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:544:2541];bytes=2405760;rows=20048;faults=0;finished=0;fault=0;schema=key1: uint64 key2: uint64 field: string; 2025-06-25T15:04:35.496675Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:549:2545];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:274;stage=finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;;); 2025-06-25T15:04:35.496759Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:549:2545];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;;); 2025-06-25T15:04:35.496849Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:549:2545];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;;); 2025-06-25T15:04:35.497475Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:549:2545];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-25T15:04:35.497576Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:549:2545];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;;); 2025-06-25T15:04:35.497663Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:549:2545];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;;); 2025-06-25T15:04:35.497691Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: actor.cpp:414: Scan [1:549:2545] finished for tablet 9437184 2025-06-25T15:04:35.498042Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:549:2545];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=actor.cpp:420;event=scan_finish;compute_actor_id=[1:544:2541];stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.002},{"events":["l_bootstrap"],"t":0.003},{"events":["f_processing","f_task_result"],"t":0.004},{"events":["f_ack","l_task_result"],"t":0.202},{"events":["l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.204}],"full":{"a":1750863875293530,"name":"_full_task","f":1750863875293530,"d_finished":0,"c":0,"l":1750863875497733,"d":204203},"events":[{"name":"bootstrap","f":1750863875293772,"d_finished":3125,"c":1,"l":1750863875296897,"d":3125},{"a":1750863875497449,"name":"ack","f":1750863875495983,"d_finished":892,"c":1,"l":1750863875496875,"d":1176},{"a":1750863875497436,"name":"processing","f":1750863875298296,"d_finished":143605,"c":16,"l":1750863875496877,"d":143902},{"name":"ProduceResults","f":1750863875295543,"d_finished":3024,"c":19,"l":1750863875497680,"d":3024},{"a":1750863875497681,"name":"Finish","f":1750863875497681,"d_finished":0,"c":0,"l":1750863875497733,"d":52},{"name":"task_result","f":1750863875298312,"d_finished":142517,"c":15,"l":1750863875495895,"d":142517}],"id":"9437184::2"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;;); 2025-06-25T15:04:35.498103Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:549:2545];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:544:2541];bytes=0;rows=0;faults=0;finished=1;fault=0;schema=; 2025-06-25T15:04:35.498377Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:549:2545];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=actor.cpp:375;event=scan_finished;compute_actor_id=[1:544:2541];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.002},{"events":["l_bootstrap"],"t":0.003},{"events":["f_processing","f_task_result"],"t":0.004},{"events":["f_ack","l_task_result"],"t":0.202},{"events":["l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.204}],"full":{"a":1750863875293530,"name":"_full_task","f":1750863875293530,"d_finished":0,"c":0,"l":1750863875498135,"d":204605},"events":[{"name":"bootstrap","f":1750863875293772,"d_finished":3125,"c":1,"l":1750863875296897,"d":3125},{"a":1750863875497449,"name":"ack","f":1750863875495983,"d_finished":892,"c":1,"l":1750863875496875,"d":1578},{"a":1750863875497436,"name":"processing","f":1750863875298296,"d_finished":143605,"c":16,"l":1750863875496877,"d":144304},{"name":"ProduceResults","f":1750863875295543,"d_finished":3024,"c":19,"l":1750863875497680,"d":3024},{"a":1750863875497681,"name":"Finish","f":1750863875497681,"d_finished":0,"c":0,"l":1750863875498135,"d":454},{"name":"task_result","f":1750863875298312,"d_finished":142517,"c":15,"l":1750863875495895,"d":142517}],"id":"9437184::2"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;;); 2025-06-25T15:04:35.498443Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:549:2545];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-06-25T15:04:35.292978Z;index_granules=0;index_portions=2;index_batches=748;schema_columns=3;filter_columns=0;additional_columns=0;compacted_portions_bytes=2776560;inserted_portions_bytes=0;committed_portions_bytes=2488696;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=5265256;selected_rows=0; 2025-06-25T15:04:35.498489Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:549:2545];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=read_context.h:192;event=scan_aborted;reason=unexpected on destructor; 2025-06-25T15:04:35.498700Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:549:2545];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;; >> KqpLimits::TooBigColumn-useSink [GOOD] >> TColumnShardTestReadWrite::CompactionInGranule_PKUInt32 >> Normalizers::PortionsNormalizer [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> Normalizers::PortionsNormalizer [GOOD] Test command err: 2025-06-25T15:04:36.467856Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:99;event=initialize_shard;step=OnActivateExecutor; 2025-06-25T15:04:36.486376Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:117;event=initialize_shard;step=initialize_tiring_finished; 2025-06-25T15:04:36.486586Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-25T15:04:36.493749Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=EmptyPortionsCleaner; 2025-06-25T15:04:36.494058Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=LeakedBlobsNormalizer; 2025-06-25T15:04:36.494140Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=NO_VALUE_OPTIONAL; 2025-06-25T15:04:36.494265Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T15:04:36.494443Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T15:04:36.494545Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T15:04:36.494624Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T15:04:36.494707Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T15:04:36.494791Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T15:04:36.494853Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T15:04:36.494921Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T15:04:36.494988Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T15:04:36.495048Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T15:04:36.511510Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-25T15:04:36.511716Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=12;current_normalizer=CLASS_NAME=EmptyPortionsCleaner; 2025-06-25T15:04:36.511769Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=NO_VALUE_OPTIONAL;type=NO_VALUE_OPTIONAL; 2025-06-25T15:04:36.512023Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_empty.cpp:323;tasks_for_remove=0;distribution=; 2025-06-25T15:04:36.512130Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=EmptyPortionsCleaner;id=NO_VALUE_OPTIONAL; 2025-06-25T15:04:36.512199Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=LeakedBlobsNormalizer;id=NO_VALUE_OPTIONAL; 2025-06-25T15:04:36.512230Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=NO_VALUE_OPTIONAL;type=NO_VALUE_OPTIONAL; 2025-06-25T15:04:36.512435Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=41; 2025-06-25T15:04:36.512519Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=46; 2025-06-25T15:04:36.512599Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=49; 2025-06-25T15:04:36.512666Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=28; 2025-06-25T15:04:36.512753Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=LeakedBlobsNormalizer;id=NO_VALUE_OPTIONAL; 2025-06-25T15:04:36.512798Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Granules;id=Granules; 2025-06-25T15:04:36.512843Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=1;type=Granules; 2025-06-25T15:04:36.512963Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:04:36.513123Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-25T15:04:36.513153Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-25T15:04:36.513173Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=2;type=Chunks; 2025-06-25T15:04:36.513250Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-25T15:04:36.513290Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-25T15:04:36.513321Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-25T15:04:36.513340Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=4;type=TablesCleaner; 2025-06-25T15:04:36.513457Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:04:36.513498Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-25T15:04:36.513521Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-25T15:04:36.513541Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=6;type=CleanGranuleId; 2025-06-25T15:04:36.513603Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-25T15:04:36.513645Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-25T15:04:36.513675Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-25T15:04:36.513693Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=9;type=GCCountersNormalizer; 2025-06-25T15:04:36.513727Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-25T15:04:36.513749Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-25T15:04:36.513766Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=11;type=SyncPortionFromChunks; 2025-06-25T15:04:36.513802Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-25T15:04:36.513822Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-25T15:04:36.513838Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-25T15:04:36.514000Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-25T15:04:36.514030Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-25T15:04:36.514048Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_O ... T15:04:37.755744Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Send periodic stats. 2025-06-25T15:04:37.755768Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Disabled periodic stats at tablet 9437184 2025-06-25T15:04:37.755809Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:366:2374];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:442;event=EnqueueBackgroundActivities;periodic=0; 2025-06-25T15:04:37.755890Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:366:2374];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=0; 2025-06-25T15:04:37.755953Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:366:2374];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=0;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-25T15:04:37.756003Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:366:2374];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:791;background=cleanup;skip_reason=no_changes; 2025-06-25T15:04:37.756038Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:366:2374];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:820;background=cleanup;skip_reason=no_changes; 2025-06-25T15:04:37.756100Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:366:2374];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;queue=ttl;external_count=0;fline=granule.cpp:168;event=skip_actualization;waiting=1.000000s; 2025-06-25T15:04:37.756144Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:366:2374];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:749;background=ttl;skip_reason=no_changes; 2025-06-25T15:04:37.825768Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: EvScan txId: 111 scanId: 0 version: {1750863877479:111} readable: {1750863877479:max} at tablet 9437184 2025-06-25T15:04:37.825960Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TTxScan prepare txId: 111 scanId: 0 at tablet 9437184 2025-06-25T15:04:37.826166Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:366:2374];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=111;scan_id=0;gen=0;table=;snapshot={1750863877479:111};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=program.cpp:33;event=parse_program;program=Command { Projection { Columns { Id: 1 } Columns { Id: 2 } Columns { Id: 3 } } } ; 2025-06-25T15:04:37.826233Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:366:2374];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=111;scan_id=0;gen=0;table=;snapshot={1750863877479:111};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=program.cpp:102;parse_proto_program=Command { Projection { Columns { Id: 1 } Columns { Id: 2 } Columns { Id: 3 } } } ; 2025-06-25T15:04:37.826927Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:366:2374];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=111;scan_id=0;gen=0;table=;snapshot={1750863877479:111};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=program.cpp:51;event=program_parsed;result={"edges":[{"owner_id":7,"inputs":[{"from":8}]},{"owner_id":0,"inputs":[{"from":2},{"from":4},{"from":6}]},{"owner_id":8,"inputs":[]},{"owner_id":2,"inputs":[{"from":7}]},{"owner_id":4,"inputs":[{"from":7}]},{"owner_id":6,"inputs":[{"from":7}]}],"nodes":{"8":{"p":{"p":{"data":[{"name":"key1","id":1},{"name":"key2","id":2},{"name":"field","id":3}]},"o":"0","t":"ReserveMemory"},"w":0,"id":8},"2":{"p":{"i":"1","p":{"address":{"name":"key1","id":1}},"o":"1","t":"AssembleOriginalData"},"w":11,"id":2},"6":{"p":{"i":"3","p":{"address":{"name":"field","id":3}},"o":"3","t":"AssembleOriginalData"},"w":11,"id":6},"7":{"p":{"i":"0","p":{"data":[{"name":"key1","id":1},{"name":"key2","id":2},{"name":"field","id":3}]},"o":"1,2,3","t":"FetchOriginalData"},"w":6,"id":7},"4":{"p":{"i":"2","p":{"address":{"name":"key2","id":2}},"o":"2","t":"AssembleOriginalData"},"w":11,"id":4},"0":{"p":{"i":"1,2,3","t":"Projection"},"w":33,"id":0}}}; 2025-06-25T15:04:37.827023Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:366:2374];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=111;scan_id=0;gen=0;table=;snapshot={1750863877479:111};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=read_metadata.h:142;filter_limit_not_detected= range{ from {+Inf} to {-Inf}}; 2025-06-25T15:04:37.827465Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:366:2374];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=111;scan_id=0;gen=0;table=;snapshot={1750863877479:111};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=tx_scan.cpp:172;event=TTxScan started;actor_id=[1:439:2438];trace_detailed=; 2025-06-25T15:04:37.828005Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: fline=context.cpp:84;ff_first=(column_ids=1,2,3;column_names=field,key1,key2;);; 2025-06-25T15:04:37.828171Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: fline=context.cpp:99;columns_context_info=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;; 2025-06-25T15:04:37.828462Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:439:2438];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-25T15:04:37.828594Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:439:2438];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;;); 2025-06-25T15:04:37.828692Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:439:2438];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;;); 2025-06-25T15:04:37.828733Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: actor.cpp:414: Scan [1:439:2438] finished for tablet 9437184 2025-06-25T15:04:37.829040Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:439:2438];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=actor.cpp:420;event=scan_finish;compute_actor_id=[1:437:2437];stats={"p":[{"events":["f_bootstrap","l_bootstrap","f_ProduceResults"],"t":0},{"events":["f_ack","l_ack","f_processing","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.001}],"full":{"a":1750863877827410,"name":"_full_task","f":1750863877827410,"d_finished":0,"c":0,"l":1750863877828789,"d":1379},"events":[{"name":"bootstrap","f":1750863877827558,"d_finished":741,"c":1,"l":1750863877828299,"d":741},{"a":1750863877828439,"name":"ack","f":1750863877828439,"d_finished":0,"c":0,"l":1750863877828789,"d":350},{"a":1750863877828419,"name":"processing","f":1750863877828419,"d_finished":0,"c":0,"l":1750863877828789,"d":370},{"name":"ProduceResults","f":1750863877828287,"d_finished":230,"c":2,"l":1750863877828722,"d":230},{"a":1750863877828725,"name":"Finish","f":1750863877828725,"d_finished":0,"c":0,"l":1750863877828789,"d":64}],"id":"9437184::2"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;;); 2025-06-25T15:04:37.829093Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:439:2438];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:437:2437];bytes=0;rows=0;faults=0;finished=1;fault=0;schema=; 2025-06-25T15:04:37.829341Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:439:2438];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=actor.cpp:375;event=scan_finished;compute_actor_id=[1:437:2437];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap","l_bootstrap","f_ProduceResults"],"t":0},{"events":["f_ack","l_ack","f_processing","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.001}],"full":{"a":1750863877827410,"name":"_full_task","f":1750863877827410,"d_finished":0,"c":0,"l":1750863877829133,"d":1723},"events":[{"name":"bootstrap","f":1750863877827558,"d_finished":741,"c":1,"l":1750863877828299,"d":741},{"a":1750863877828439,"name":"ack","f":1750863877828439,"d_finished":0,"c":0,"l":1750863877829133,"d":694},{"a":1750863877828419,"name":"processing","f":1750863877828419,"d_finished":0,"c":0,"l":1750863877829133,"d":714},{"name":"ProduceResults","f":1750863877828287,"d_finished":230,"c":2,"l":1750863877828722,"d":230},{"a":1750863877828725,"name":"Finish","f":1750863877828725,"d_finished":0,"c":0,"l":1750863877829133,"d":408}],"id":"9437184::2"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;;); 2025-06-25T15:04:37.829401Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:439:2438];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-06-25T15:04:37.826993Z;index_granules=0;index_portions=0;index_batches=0;schema_columns=3;filter_columns=0;additional_columns=0;compacted_portions_bytes=0;inserted_portions_bytes=0;committed_portions_bytes=0;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=0;selected_rows=0; 2025-06-25T15:04:37.829437Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:439:2438];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=read_context.h:192;event=scan_aborted;reason=unexpected on destructor; 2025-06-25T15:04:37.829511Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:439:2438];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;; FALLBACK_ACTOR_LOGGING;priority=INFO;component=2100;fline=simple_arrays_cache.h:65;event=slice_from_cache;key=uint64;records=0;count=0; FALLBACK_ACTOR_LOGGING;priority=INFO;component=2100;fline=simple_arrays_cache.h:65;event=slice_from_cache;key=uint64;records=0;count=0; FALLBACK_ACTOR_LOGGING;priority=INFO;component=2100;fline=simple_arrays_cache.h:49;event=insert_to_cache;key=string;records=0;size=0; FALLBACK_ACTOR_LOGGING;priority=INFO;component=2100;fline=simple_arrays_cache.h:65;event=slice_from_cache;key=string;records=0;count=0; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpLimits::TooBigColumn-useSink [GOOD] Test command err: Trying to start YDB, gRPC: 1174, MsgBus: 3830 2025-06-25T15:00:51.175062Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519902121477962695:2228];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:00:51.175195Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001439/r3tmp/tmpFx1CbR/pdisk_1.dat 2025-06-25T15:00:51.609081Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:00:51.620713Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519902121477962498:2080] 1750863651164808 != 1750863651164811 2025-06-25T15:00:51.651311Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:00:51.651416Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:00:51.652957Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 1174, node 1 2025-06-25T15:00:51.820964Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:00:51.820992Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:00:51.821003Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:00:51.821147Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T15:00:52.180413Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:3830 TClient is connected to server localhost:3830 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:00:52.658071Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:00:52.677050Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T15:00:52.692154Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:00:52.856838Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:00:53.013520Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:00:53.108465Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:00:54.330224Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902134362866023:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:00:54.330332Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:00:54.866565Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:54.894743Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:54.928257Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:54.971752Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:55.003320Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:55.071844Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:55.109694Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:55.195991Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902138657833987:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:00:55.196086Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:00:55.197057Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902138657833992:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:00:55.201079Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:00:55.211048Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519902138657833994:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T15:00:55.289606Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519902138657834047:3425] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:00:56.176427Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519902121477962695:2228];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:00:56.235007Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T15:00:56.378273Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work ... WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:04:31.486586Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:04:31.488532Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 62077, node 4 2025-06-25T15:04:31.525054Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:04:31.525077Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:04:31.525085Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:04:31.525191Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25062 TClient is connected to server localhost:25062 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:04:32.036815Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:04:32.046434Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:04:32.118246Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:04:32.247877Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:04:32.320898Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:04:32.331329Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:04:34.837493Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519903080071126466:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:04:34.837576Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:04:34.909118Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:04:34.935963Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:04:34.963980Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:04:34.994057Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:04:35.021670Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:04:35.051839Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:04:35.080603Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:04:35.159839Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519903084366094420:2430], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:04:35.159891Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519903084366094425:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:04:35.159925Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:04:35.162766Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:04:35.171006Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519903084366094427:2434], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-25T15:04:35.233427Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519903084366094478:3418] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:04:36.324948Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519903067186222963:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:04:36.325039Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T15:04:37.104297Z node 4 :TX_DATASHARD ERROR: check_data_tx_unit.cpp:186: Transaction write column value of 20971522 bytes is larger than the allowed threshold 2025-06-25T15:04:37.104423Z node 4 :TX_DATASHARD ERROR: finish_propose_unit.cpp:174: Errors while proposing transaction txid 281474976715672 at tablet 72075186224037911 status: EXEC_ERROR errors: BAD_ARGUMENT (Transaction write column value of 20971522 bytes is larger than the allowed threshold) | 2025-06-25T15:04:37.104582Z node 4 :KQP_EXECUTER ERROR: kqp_data_executer.cpp:864: ActorId: [4:7519903088661062024:2465] TxId: 281474976715672. Ctx: { TraceId: 01jyksx4grffnm4pe6ezhha1fh, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=MjU5YTY3NzQtZGYyMmY1NDQtNzQ5YjNiNDctYzVjNDE3ZDU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. EXEC_ERROR: [BAD_ARGUMENT] Transaction write column value of 20971522 bytes is larger than the allowed threshold; 2025-06-25T15:04:37.104855Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=MjU5YTY3NzQtZGYyMmY1NDQtNzQ5YjNiNDctYzVjNDE3ZDU=, ActorId: [4:7519903088661062006:2465], ActorState: ExecuteState, TraceId: 01jyksx4grffnm4pe6ezhha1fh, Create QueryResponse for error on request, msg:
: Error: Error executing transaction (ExecError): Execution failed
: Error: [BAD_ARGUMENT] Transaction write column value of 20971522 bytes is larger than the allowed threshold >> TColumnShardTestReadWrite::CompactionSplitGranule_PKInt32 >> TColumnShardTestReadWrite::WriteReadExoticTypes >> TColumnShardTestReadWrite::WriteReadExoticTypes [GOOD] >> test_s3.py::TestYdbS3TTL::test_s3[table_index_2__SYNC-pk_types2-all_types2-index2---SYNC] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::WriteReadExoticTypes [GOOD] Test command err: 2025-06-25T15:04:40.495088Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:99;event=initialize_shard;step=OnActivateExecutor; 2025-06-25T15:04:40.512291Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:117;event=initialize_shard;step=initialize_tiring_finished; 2025-06-25T15:04:40.512492Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-25T15:04:40.517680Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T15:04:40.517820Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T15:04:40.518007Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T15:04:40.518088Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T15:04:40.518169Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T15:04:40.518242Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T15:04:40.518293Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T15:04:40.518382Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T15:04:40.518476Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T15:04:40.518547Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T15:04:40.518611Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T15:04:40.534436Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-25T15:04:40.534550Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-25T15:04:40.534587Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-25T15:04:40.534718Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:04:40.534832Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-25T15:04:40.534883Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-25T15:04:40.534914Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-25T15:04:40.534979Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-25T15:04:40.535025Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-25T15:04:40.535049Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-25T15:04:40.535080Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-25T15:04:40.535202Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:04:40.535249Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-25T15:04:40.535272Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-25T15:04:40.535288Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-25T15:04:40.535343Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-25T15:04:40.535376Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-25T15:04:40.535413Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-25T15:04:40.535435Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-25T15:04:40.535475Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-25T15:04:40.535504Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-25T15:04:40.535522Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-25T15:04:40.535676Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-25T15:04:40.535701Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-25T15:04:40.535729Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-25T15:04:40.535848Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-25T15:04:40.535878Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-25T15:04:40.535895Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-25T15:04:40.535984Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-25T15:04:40.536009Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-25T15:04:40.536024Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-25T15:04:40.536063Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-25T15:04:40.536111Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-25T15:04:40.536138Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-25T15:04:40.536154Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-25T15:04:40.536271Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=25; 2025-06-25T15:04:40.536345Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=49; 2025-06-25T15:04:40.536410Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=33; 2025-06-25T15:04:40.536457Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=21; 2025-06-25T15:04:40.536544Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-25T15:04:40.536605Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-25T15:04:40.536644Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-25T15:04:40.536689Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: table ... es=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-25T15:04:43.520671Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:397:2408];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=0;count=0;finished=1; 2025-06-25T15:04:43.520797Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:397:2408];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:234;stage=ready result;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;);columns=10;rows=31; 2025-06-25T15:04:43.520877Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:397:2408];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:254;stage=data_format;batch_size=2759;num_rows=31;batch_columns=timestamp,resource_type,resource_id,uid,level,message,json_payload,ingested_at,saved_at,request_id; 2025-06-25T15:04:43.521167Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:397:2408];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:396:2407];bytes=2759;rows=31;faults=0;finished=0;fault=0;schema=timestamp: timestamp[us] resource_type: string resource_id: string uid: string level: int32 message: binary json_payload: binary ingested_at: timestamp[us] saved_at: timestamp[us] request_id: binary; 2025-06-25T15:04:43.521305Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:397:2408];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:274;stage=finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-25T15:04:43.521417Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:397:2408];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-25T15:04:43.521534Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:397:2408];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-25T15:04:43.521751Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:397:2408];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-25T15:04:43.521881Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:397:2408];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-25T15:04:43.522001Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:397:2408];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-25T15:04:43.522064Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: actor.cpp:414: Scan [1:397:2408] finished for tablet 9437184 2025-06-25T15:04:43.522566Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=11;SelfId=[1:397:2408];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=actor.cpp:420;event=scan_finish;compute_actor_id=[1:396:2407];stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.002},{"events":["l_bootstrap"],"t":0.003},{"events":["f_processing","f_task_result"],"t":0.004},{"events":["f_ack","l_task_result"],"t":0.033},{"events":["l_ProduceResults","f_Finish"],"t":0.034},{"events":["l_ack","l_processing","l_Finish"],"t":0.035}],"full":{"a":1750863883487086,"name":"_full_task","f":1750863883487086,"d_finished":0,"c":0,"l":1750863883522123,"d":35037},"events":[{"name":"bootstrap","f":1750863883487250,"d_finished":3127,"c":1,"l":1750863883490377,"d":3127},{"a":1750863883521710,"name":"ack","f":1750863883520386,"d_finished":1174,"c":1,"l":1750863883521560,"d":1587},{"a":1750863883521700,"name":"processing","f":1750863883491423,"d_finished":5944,"c":10,"l":1750863883521562,"d":6367},{"name":"ProduceResults","f":1750863883489092,"d_finished":3034,"c":13,"l":1750863883522045,"d":3034},{"a":1750863883522049,"name":"Finish","f":1750863883522049,"d_finished":0,"c":0,"l":1750863883522123,"d":74},{"name":"task_result","f":1750863883491439,"d_finished":4649,"c":9,"l":1750863883520198,"d":4649}],"id":"9437184::12"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-25T15:04:43.522634Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:397:2408];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:396:2407];bytes=0;rows=0;faults=0;finished=1;fault=0;schema=; 2025-06-25T15:04:43.523045Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=11;SelfId=[1:397:2408];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=actor.cpp:375;event=scan_finished;compute_actor_id=[1:396:2407];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.002},{"events":["l_bootstrap"],"t":0.003},{"events":["f_processing","f_task_result"],"t":0.004},{"events":["f_ack","l_task_result"],"t":0.033},{"events":["l_ProduceResults","f_Finish"],"t":0.034},{"events":["l_ack","l_processing","l_Finish"],"t":0.035}],"full":{"a":1750863883487086,"name":"_full_task","f":1750863883487086,"d_finished":0,"c":0,"l":1750863883522674,"d":35588},"events":[{"name":"bootstrap","f":1750863883487250,"d_finished":3127,"c":1,"l":1750863883490377,"d":3127},{"a":1750863883521710,"name":"ack","f":1750863883520386,"d_finished":1174,"c":1,"l":1750863883521560,"d":2138},{"a":1750863883521700,"name":"processing","f":1750863883491423,"d_finished":5944,"c":10,"l":1750863883521562,"d":6918},{"name":"ProduceResults","f":1750863883489092,"d_finished":3034,"c":13,"l":1750863883522045,"d":3034},{"a":1750863883522049,"name":"Finish","f":1750863883522049,"d_finished":0,"c":0,"l":1750863883522674,"d":625},{"name":"task_result","f":1750863883491439,"d_finished":4649,"c":9,"l":1750863883520198,"d":4649}],"id":"9437184::12"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-25T15:04:43.523112Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:397:2408];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-06-25T15:04:43.486466Z;index_granules=0;index_portions=1;index_batches=1;schema_columns=10;filter_columns=0;additional_columns=0;compacted_portions_bytes=0;inserted_portions_bytes=7928;committed_portions_bytes=0;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=7928;selected_rows=0; 2025-06-25T15:04:43.523151Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:397:2408];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=read_context.h:192;event=scan_aborted;reason=unexpected on destructor; 2025-06-25T15:04:43.523499Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=11;SelfId=[1:397:2408];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;; >> test_s3.py::TestYdbS3TTL::test_s3[table_index_1__ASYNC-pk_types5-all_types5-index5---ASYNC] [GOOD] >> TColumnShardTestReadWrite::CompactionInGranule_PKTimestamp_Reboot >> KqpLimits::CancelAfterRwTx-useSink [GOOD] >> KqpLimits::CancelAfterRoTxWithFollowerStreamLookupDepededRead >> test_s3.py::TestYdbS3TTL::test_s3[table_index_3__SYNC-pk_types1-all_types1-index1---SYNC] [GOOD] |93.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/s3/py3test >> test_s3.py::TestYdbS3TTL::test_s3[table_index_2__SYNC-pk_types2-all_types2-index2---SYNC] [GOOD] >> ReadIteratorExternalBlobs::ExtBlobsWithDeletesInTheBeginning [GOOD] >> ReadIteratorExternalBlobs::ExtBlobsWithDeletesInTheEnd |93.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/s3/py3test >> test_s3.py::TestYdbS3TTL::test_s3[table_index_1__ASYNC-pk_types5-all_types5-index5---ASYNC] [GOOD] >> TColumnShardTestReadWrite::ReadAggregate >> TColumnShardTestReadWrite::CompactionInGranule_PKUtf8 >> TColumnShardTestReadWrite::ReadAggregate [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::ReadAggregate [GOOD] Test command err: FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=8328;columns=19; 2025-06-25T15:04:54.395845Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:99;event=initialize_shard;step=OnActivateExecutor; 2025-06-25T15:04:54.419512Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:117;event=initialize_shard;step=initialize_tiring_finished; 2025-06-25T15:04:54.419769Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-25T15:04:54.425943Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T15:04:54.426083Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T15:04:54.426241Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T15:04:54.426315Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T15:04:54.426376Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T15:04:54.426457Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T15:04:54.426515Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T15:04:54.426572Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T15:04:54.426639Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T15:04:54.426721Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T15:04:54.426803Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T15:04:54.441987Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-25T15:04:54.442126Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-25T15:04:54.442163Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-25T15:04:54.442316Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:04:54.442443Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-25T15:04:54.442499Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-25T15:04:54.442542Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-25T15:04:54.442605Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-25T15:04:54.442647Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-25T15:04:54.442678Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-25T15:04:54.442698Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-25T15:04:54.442804Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:04:54.442844Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-25T15:04:54.442866Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-25T15:04:54.442881Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-25T15:04:54.442936Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-25T15:04:54.442977Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-25T15:04:54.443030Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-25T15:04:54.443049Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-25T15:04:54.443077Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-25T15:04:54.443102Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-25T15:04:54.443121Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-25T15:04:54.443253Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-25T15:04:54.443296Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-25T15:04:54.443325Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-25T15:04:54.443460Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-25T15:04:54.443491Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-25T15:04:54.443510Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-25T15:04:54.443590Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-25T15:04:54.443616Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-25T15:04:54.443635Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-25T15:04:54.443682Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-25T15:04:54.443727Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-25T15:04:54.443752Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-25T15:04:54.443769Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-25T15:04:54.443912Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=33; 2025-06-25T15:04:54.443994Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=38; 2025-06-25T15:04:54.444058Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=33; 2025-06-25T15:04:54.444107Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=24; 2025-06-25T15:04:54.444178Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-25T15:04:54.444276Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-25T15:04:54.444333Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract. ... :420:2432];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=actor.cpp:85;event=TEvTaskProcessedResult; 2025-06-25T15:04:57.015263Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[2:420:2432];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=merge.cpp:75;event=DoApply;interval_idx=0; 2025-06-25T15:04:57.015287Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[2:420:2432];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=scanner.cpp:21;event=interval_result_received;interval_idx=0;intervalId=76; 2025-06-25T15:04:57.015323Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[2:420:2432];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=scanner.cpp:47;event=interval_result;interval_idx=0;count=1;merger=0;interval_id=76; 2025-06-25T15:04:57.015347Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[2:420:2432];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=scanner.cpp:65;event=intervals_finished; 2025-06-25T15:04:57.015389Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[2:420:2432];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=4;column_names=i32;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=i16,i32,i8,ts;);;ff=(column_ids=4,19;column_names=i32,jsondoc;);;program_input=(column_ids=4,19;column_names=i32,jsondoc;);;;); 2025-06-25T15:04:57.015407Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[2:420:2432];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=1;count=1;finished=1; 2025-06-25T15:04:57.015426Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[2:420:2432];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:203;stage=limit exhausted;limit=limits:(bytes=0;chunks=0);; 2025-06-25T15:04:57.015688Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[2:420:2432];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-25T15:04:57.015762Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[2:420:2432];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:1;records_count:1;schema=100: binary 101: binary 102: binary 103: uint64;);indexed_data:(ef=(column_ids=4;column_names=i32;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=i16,i32,i8,ts;);;ff=(column_ids=4,19;column_names=i32,jsondoc;);;program_input=(column_ids=4,19;column_names=i32,jsondoc;);;;); 2025-06-25T15:04:57.015783Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[2:420:2432];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=0;count=0;finished=1; 2025-06-25T15:04:57.015836Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[2:420:2432];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:234;stage=ready result;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=4;column_names=i32;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=i16,i32,i8,ts;);;ff=(column_ids=4,19;column_names=i32,jsondoc;);;program_input=(column_ids=4,19;column_names=i32,jsondoc;);;;);columns=4;rows=1; 2025-06-25T15:04:57.015867Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[2:420:2432];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:254;stage=data_format;batch_size=26;num_rows=1;batch_columns=100,101,102,103; 2025-06-25T15:04:57.016006Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[2:420:2432];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:370;event=send_data;compute_actor_id=[2:419:2431];bytes=26;rows=1;faults=0;finished=0;fault=0;schema=100: binary 101: binary 102: binary 103: uint64; 2025-06-25T15:04:57.016064Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[2:420:2432];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:274;stage=finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=4;column_names=i32;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=i16,i32,i8,ts;);;ff=(column_ids=4,19;column_names=i32,jsondoc;);;program_input=(column_ids=4,19;column_names=i32,jsondoc;);;;); 2025-06-25T15:04:57.016118Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[2:420:2432];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=4;column_names=i32;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=i16,i32,i8,ts;);;ff=(column_ids=4,19;column_names=i32,jsondoc;);;program_input=(column_ids=4,19;column_names=i32,jsondoc;);;;); 2025-06-25T15:04:57.016163Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[2:420:2432];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=4;column_names=i32;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=i16,i32,i8,ts;);;ff=(column_ids=4,19;column_names=i32,jsondoc;);;program_input=(column_ids=4,19;column_names=i32,jsondoc;);;;); 2025-06-25T15:04:57.016298Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[2:420:2432];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-25T15:04:57.016369Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[2:420:2432];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=4;column_names=i32;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=i16,i32,i8,ts;);;ff=(column_ids=4,19;column_names=i32,jsondoc;);;program_input=(column_ids=4,19;column_names=i32,jsondoc;);;;); 2025-06-25T15:04:57.016415Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[2:420:2432];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=4;column_names=i32;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=i16,i32,i8,ts;);;ff=(column_ids=4,19;column_names=i32,jsondoc;);;program_input=(column_ids=4,19;column_names=i32,jsondoc;);;;); 2025-06-25T15:04:57.016435Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: actor.cpp:414: Scan [2:420:2432] finished for tablet 9437184 2025-06-25T15:04:57.016648Z node 2 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[2:420:2432];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=actor.cpp:420;event=scan_finish;compute_actor_id=[2:419:2431];stats={"p":[{"events":["f_bootstrap","f_ProduceResults"],"t":0},{"events":["l_bootstrap"],"t":0.001},{"events":["f_processing","f_task_result"],"t":0.002},{"events":["f_ack","l_task_result"],"t":0.006},{"events":["l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.007}],"full":{"a":1750863897008815,"name":"_full_task","f":1750863897008815,"d_finished":0,"c":0,"l":1750863897016463,"d":7648},"events":[{"name":"bootstrap","f":1750863897008914,"d_finished":1447,"c":1,"l":1750863897010361,"d":1447},{"a":1750863897016287,"name":"ack","f":1750863897015671,"d_finished":506,"c":1,"l":1750863897016177,"d":682},{"a":1750863897016280,"name":"processing","f":1750863897010901,"d_finished":3753,"c":10,"l":1750863897016179,"d":3936},{"name":"ProduceResults","f":1750863897009725,"d_finished":1378,"c":13,"l":1750863897016428,"d":1378},{"a":1750863897016429,"name":"Finish","f":1750863897016429,"d_finished":0,"c":0,"l":1750863897016463,"d":34},{"name":"task_result","f":1750863897010909,"d_finished":3156,"c":9,"l":1750863897015465,"d":3156}],"id":"9437184::76"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=4;column_names=i32;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=i16,i32,i8,ts;);;ff=(column_ids=4,19;column_names=i32,jsondoc;);;program_input=(column_ids=4,19;column_names=i32,jsondoc;);;;); 2025-06-25T15:04:57.016701Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[2:420:2432];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=actor.cpp:370;event=send_data;compute_actor_id=[2:419:2431];bytes=0;rows=0;faults=0;finished=1;fault=0;schema=; 2025-06-25T15:04:57.016915Z node 2 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[2:420:2432];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=actor.cpp:375;event=scan_finished;compute_actor_id=[2:419:2431];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap","f_ProduceResults"],"t":0},{"events":["l_bootstrap"],"t":0.001},{"events":["f_processing","f_task_result"],"t":0.002},{"events":["f_ack","l_task_result"],"t":0.006},{"events":["l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.007}],"full":{"a":1750863897008815,"name":"_full_task","f":1750863897008815,"d_finished":0,"c":0,"l":1750863897016722,"d":7907},"events":[{"name":"bootstrap","f":1750863897008914,"d_finished":1447,"c":1,"l":1750863897010361,"d":1447},{"a":1750863897016287,"name":"ack","f":1750863897015671,"d_finished":506,"c":1,"l":1750863897016177,"d":941},{"a":1750863897016280,"name":"processing","f":1750863897010901,"d_finished":3753,"c":10,"l":1750863897016179,"d":4195},{"name":"ProduceResults","f":1750863897009725,"d_finished":1378,"c":13,"l":1750863897016428,"d":1378},{"a":1750863897016429,"name":"Finish","f":1750863897016429,"d_finished":0,"c":0,"l":1750863897016722,"d":293},{"name":"task_result","f":1750863897010909,"d_finished":3156,"c":9,"l":1750863897015465,"d":3156}],"id":"9437184::76"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=4;column_names=i32;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=i16,i32,i8,ts;);;ff=(column_ids=4,19;column_names=i32,jsondoc;);;program_input=(column_ids=4,19;column_names=i32,jsondoc;);;;); 2025-06-25T15:04:57.016965Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[2:420:2432];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-06-25T15:04:57.008544Z;index_granules=0;index_portions=1;index_batches=2;schema_columns=2;filter_columns=0;additional_columns=0;compacted_portions_bytes=0;inserted_portions_bytes=14056;committed_portions_bytes=0;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=14056;selected_rows=0; 2025-06-25T15:04:57.016986Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[2:420:2432];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=read_context.h:192;event=scan_aborted;reason=unexpected on destructor; 2025-06-25T15:04:57.017121Z node 2 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[2:420:2432];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=4;column_names=i32;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=i16,i32,i8,ts;);;ff=(column_ids=4,19;column_names=i32,jsondoc;);;program_input=(column_ids=4,19;column_names=i32,jsondoc;);;; ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/olap/ttl_tiering/py3test >> ttl_delete_s3.py::TestDeleteTtl::test_ttl_delete 2025-06-25 15:04:42,362 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper execution timed out 2025-06-25 15:04:42,679 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper has overrun 600 secs timeout. Process tree before termination: pid rss ref pdirt 559974 636M 617M 549M ydb-tests-olap-ttl_tiering --basetemp /home/runner/.ya/build/build_root/yft8/000cef/tmp --capture no -c pkg:library.python.pytest:pytest.yatest.ini -p no:factor --doctest-modu 561274 5.2G 5.0G 4.6G ├─ ydbd server --suppress-version-check --yaml-config=/home/runner/.ya/build/build_root/yft8/000cef/ydb/tests/olap/ttl_tiering/test-results/py3test/testing_out_stuff/chunk4 563697 391M 367M 359M └─ moto_server s3 --port 9992 Test command err: File "library/python/pytest/main.py", line 101, in main rc = pytest.main( File "contrib/python/pytest/py3/_pytest/config/__init__.py", line 175, in main ret: Union[ExitCode, int] = config.hook.pytest_cmdline_main( File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 512, in __call__ return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120, in _hookexec return self._inner_hookexec(hook_name, methods, kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_callers.py", line 121, in _multicall res = hook_impl.function(*args) File "contrib/python/pytest/py3/_pytest/main.py", line 320, in pytest_cmdline_main return wrap_session(config, _main) File "contrib/python/pytest/py3/_pytest/main.py", line 273, in wrap_session session.exitstatus = doit(config, session) or 0 File "contrib/python/pytest/py3/_pytest/main.py", line 327, in _main config.hook.pytest_runtestloop(session=session) File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 512, in __call__ return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120, in _hookexec return self._inner_hookexec(hook_name, methods, kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_callers.py", line 121, in _multicall res = hook_impl.function(*args) File "contrib/python/pytest/py3/_pytest/main.py", line 352, in pytest_runtestloop item.config.hook.pytest_runtest_protocol(item=item, nextitem=nextitem) File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 512, in __call__ return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120, in _hookexec return self._inner_hookexec(hook_name, methods, kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_callers.py", line 121, in _multicall res = hook_impl.function(*args) File "contrib/python/pytest/py3/_pytest/runner.py", line 115, in pytest_runtest_protocol runtestprotocol(item, nextitem=nextitem) File "contrib/python/pytest/py3/_pytest/runner.py", line 134, in runtestprotocol reports.append(call_and_report(item, "call", log)) File "contrib/python/pytest/py3/_pytest/runner.py", line 223, in call_and_report call = call_runtest_hook(item, when, **kwds) File "contrib/python/pytest/py3/_pytest/runner.py", line 262, in call_runtest_hook return CallInfo.from_call( File "contrib/python/pytest/py3/_pytest/runner.py", line 342, in from_call result: Optional[TResult] = func() File "contrib/python/pytest/py3/_pytest/runner.py", line 263, in lambda: ihook(item=item, **kwds), when=when, reraise=reraise File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 512, in __call__ return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120, in _hookexec return self._inner_hookexec(hook_name, methods, kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_callers.py", line 121, in _multicall res = hook_impl.function(*args) File "contrib/python/pytest/py3/_pytest/runner.py", line 170, in pytest_runtest_call item.runtest() File "contrib/python/pytest/py3/_pytest/python.py", line 1844, in runtest self.ihook.pytest_pyfunc_call(pyfuncitem=self) File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 512, in __call__ return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120, in _hookexec return self._inner_hookexec(hook_name, methods, kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_callers.py", line 121, in _multicall res = hook_impl.function(*args) File "library/python/pytest/plugins/ya.py", line 563, in pytest_pyfunc_call pyfuncitem.retval = testfunction(**testargs) File "ydb/tests/olap/ttl_tiering/ttl_delete_s3.py", line 367, in test_ttl_delete self.ydb_client.query(""" File "ydb/tests/olap/common/ydb_client.py", line 24, in query return self.session_pool.execute_with_retries(statement) File "contrib/python/ydb/py3/ydb/query/pool.py", line 204, in execute_with_retries return retry_operation_sync(wrapped_callee, retry_settings) File "contrib/python/ydb/py3/ydb/retries.py", line 133, in retry_operation_sync for next_opt in opt_generator: File "contrib/python/ydb/py3/ydb/retries.py", line 94, in retry_operation_impl result = YdbRetryOperationFinalResult(callee(*args, **kwargs)) File "contrib/python/ydb/py3/ydb/query/pool.py", line 202, in wrapped_callee return [result_set for result_set in it] File "contrib/python/ydb/py3/ydb/_utilities.py", line 173, in __next__ return self._next() File "contrib/python/ydb/py3/ydb/_utilities.py", line 164, in _next res = self.wrapper(next(self.it)) File "contrib/python/grpcio/py3/grpc/_channel.py", line 475, in __next__ return self._next() File "contrib/python/grpcio/py3/grpc/_channel.py", line 872, in _next _common.wait(self._state.condition.wait, _response_ready) File "contrib/python/grpcio/py3/grpc/_common.py", line 150, in wait _wait_once(wait_fn, MAXIMUM_WAIT_TIMEOUT, spin_cb) File "contrib/python/grpcio/py3/grpc/_common.py", line 112, in _wait_once wait_fn(timeout=timeout) File "contrib/tools/python3/Lib/threading.py", line 359, in wait gotit = waiter.acquire(True, timeout) File "library/python/pytest/plugins/ya.py", line 344, in _graceful_shutdown traceback.print_stack(file=sys.stderr) Traceback (most recent call last): File "library/python/testing/yatest_common/yatest/common/process.py", line 384, in wait wait_for( File "library/python/testing/yatest_common/yatest/common/process.py", line 765, in wait_for raise TimeoutError(truncate(message, MAX_MESSAGE_LEN)) yatest.common.process.TimeoutError: ...d_root/yft8/000cef/tmp', '--capture', 'no', '-c', 'pkg:library.python.pytest:pytest.yatest.ini', '-p', 'no:factor', '--doctest-modules', '--ya-trace', '/home/runner/.ya/build/build_root/yft8/000cef/ydb/tests/olap/ttl_tiering/test-results/py3test/testing_out_stuff/chunk4/ytest.report.trace', '--build-root', '/home/runner/.ya/build/build_root/yft8/000cef', '--source-root', '/home/runner/.ya/build/build_root/yft8/000cef/environment/arcadia', '--output-dir', '/home/runner/.ya/build/build_root/yft8/000cef/ydb/tests/olap/ttl_tiering/test-results/py3test/testing_out_stuff/chunk4/testing_out_stuff', '--durations', '0', '--project-path', 'ydb/tests/olap/ttl_tiering', '--test-tool-bin', '/home/runner/.ya/tools/v4/9029509511/test_tool', '--ya-version', '2', '--collect-cores', '--sanitizer-extra-checks', '--build-type', 'release', '--tb', 'short', '--modulo', '10', '--modulo-index', '4', '--partition-mode', 'SEQUENTIAL', '--dep-root', 'ydb/tests/olap/ttl_tiering', '--flags', 'APPLE_SDK_LOCAL=yes', '--flags', 'CFLAGS=-fno-omit-frame-pointer -Wno-unknown-argument', '--flags', 'DEBUGINFO_LINES_ONLY=yes', '--flags', 'DISABLE_FLAKE8_MIGRATIONS=yes', '--flags', 'OPENSOURCE=yes', '--flags', 'SANITIZER_TYPE=address', '--flags', 'TESTS_REQUESTED=yes', '--flags', 'USE_AIO=static', '--flags', 'USE_CLANG_CL=yes', '--flags', 'USE_EAT_MY_DATA=yes', '--flags', 'USE_ICONV=static', '--flags', 'USE_IDN=static', '--flags', 'USE_PREBUILT_TOOLS=no', '--sanitize', 'address']' stopped by 600 seconds timeout During handling of the above exception, another exception occurred: Traceback (most recent call last): File "devtools/ya/test/programs/test_tool/run_test/run_test.py", line 1738, in main res.wait(check_exit_code=False, timeout=run_timeout, on_timeout=timeout_callback) File "library/python/testing/yatest_common/yatest/common/process.py", line 398, in wait raise ExecutionTimeoutError(self, str(e)) yatest.common.process.ExecutionTimeoutError: (("...d_root/yft8/000cef/tmp', '--capture', 'no', '-c', 'pkg:library.python.pytest:pytest.yatest.ini', '-p', 'no:factor', '--doctest-modules', '--ya-trace', '/home/runner/.ya/build/build_root/yft8/000cef/ydb/tests/olap/ttl_tiering/test-results/py3test/testing_out_stuff/chunk4/ytest.report.trace', '--build-root', '/home/runner/.ya/build/build_root/yft8/000cef', '--source-root', '/home/runner/.ya/build/build_root/yft8/000cef/environment/arcadia', '--output-dir', '/home/runner/.ya/build/build_root/yft8/000cef/ydb/tests/olap/ttl_tiering/test-results/py3test/testing_out_stuff/chunk4/testing_out_stuff', '--durations', '0', '--project-path', 'ydb/tests/olap/ttl_tiering', '--test-tool-bin', '/home/runner/.ya/tools/v4/9029509511/test_tool', '--ya-version', '2', '--collect-cores', '--sanitizer-extra-checks', '--build-type', 'release', '--tb', 'short', '--modulo', '10', '--modulo-index', '4', '--partition-mode', 'SEQUENTIAL', '--dep-root', 'ydb/tests/olap/ttl_tiering', '--flags', 'APPLE_SDK_LOCAL=yes', '--flags', 'CFLAGS=-fno-omit-frame-pointer -Wno-unknown-argument', '--flags', 'DEBUGINFO_LINES_ONLY=yes', '--flags', 'DISABLE_FLAKE8_MIGRATIONS=yes', '--flags', 'OPENSOURCE=yes', '--flags', 'SANITIZER_TYPE=address', '--flags', 'TESTS_REQUESTED=yes', '--flags', 'USE_AIO=static', '--flags', 'USE_CLANG_CL=yes', '--flags', 'USE_EAT_MY_DATA=yes', '--flags', 'USE_ICONV=static', '--flags', 'USE_IDN=static', '--flags', 'USE_PREBUILT_TOOLS=no', '--sanitize', 'address']' stopped by 600 seconds timeout",), {}) >> EvWrite::WriteInTransaction >> TColumnShardTestReadWrite::CompactionInGranule_PKInt64_Reboot >> EvWrite::WriteInTransaction [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> EvWrite::WriteInTransaction [GOOD] Test command err: 2025-06-25T15:04:59.113706Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:99;event=initialize_shard;step=OnActivateExecutor; 2025-06-25T15:04:59.140767Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:117;event=initialize_shard;step=initialize_tiring_finished; 2025-06-25T15:04:59.140977Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-25T15:04:59.147492Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T15:04:59.147696Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T15:04:59.147896Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T15:04:59.148001Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T15:04:59.148090Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T15:04:59.148199Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T15:04:59.148347Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T15:04:59.148466Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T15:04:59.148581Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T15:04:59.148684Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T15:04:59.148788Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T15:04:59.174474Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-25T15:04:59.174621Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-25T15:04:59.174665Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-25T15:04:59.174836Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:04:59.174981Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-25T15:04:59.175056Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-25T15:04:59.175097Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-25T15:04:59.175188Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-25T15:04:59.175248Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-25T15:04:59.175295Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-25T15:04:59.175337Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-25T15:04:59.175484Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:04:59.175534Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-25T15:04:59.175572Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-25T15:04:59.175617Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-25T15:04:59.175713Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-25T15:04:59.175766Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-25T15:04:59.175805Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-25T15:04:59.175830Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-25T15:04:59.175872Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-25T15:04:59.175923Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-25T15:04:59.175972Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-25T15:04:59.176167Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-25T15:04:59.176204Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-25T15:04:59.176247Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-25T15:04:59.176433Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-25T15:04:59.176476Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-25T15:04:59.176502Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-25T15:04:59.176651Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-25T15:04:59.176699Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-25T15:04:59.176738Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-25T15:04:59.176808Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-25T15:04:59.176865Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-25T15:04:59.176916Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-25T15:04:59.176951Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-25T15:04:59.177125Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=43; 2025-06-25T15:04:59.177231Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=49; 2025-06-25T15:04:59.177301Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=36; 2025-06-25T15:04:59.177366Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=32; 2025-06-25T15:04:59.177445Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-25T15:04:59.177511Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-25T15:04:59.177551Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-25T15:04:59.177621Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: table ... :287:2299];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=merge.cpp:75;event=DoApply;interval_idx=0; 2025-06-25T15:05:00.183227Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:287:2299];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=scanner.cpp:21;event=interval_result_received;interval_idx=0;intervalId=1; 2025-06-25T15:05:00.183262Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:287:2299];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=scanner.cpp:47;event=interval_result;interval_idx=0;count=2048;merger=0;interval_id=1; 2025-06-25T15:05:00.183311Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:287:2299];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=scanner.cpp:65;event=intervals_finished; 2025-06-25T15:05:00.183374Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:287:2299];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1;column_names=key;);;ff=(column_ids=1,2;column_names=field,key;);;program_input=(column_ids=1,2;column_names=field,key;);;;); 2025-06-25T15:05:00.183392Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:287:2299];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=1;count=2048;finished=1; 2025-06-25T15:05:00.183409Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:287:2299];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:203;stage=limit exhausted;limit=limits:(bytes=0;chunks=0);; 2025-06-25T15:05:00.183548Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:287:2299];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-25T15:05:00.183643Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:287:2299];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:1;records_count:2048;schema=key: uint64 field: string;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1;column_names=key;);;ff=(column_ids=1,2;column_names=field,key;);;program_input=(column_ids=1,2;column_names=field,key;);;;); 2025-06-25T15:05:00.183673Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:287:2299];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=0;count=0;finished=1; 2025-06-25T15:05:00.183732Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:287:2299];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:234;stage=ready result;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1;column_names=key;);;ff=(column_ids=1,2;column_names=field,key;);;program_input=(column_ids=1,2;column_names=field,key;);;;);columns=2;rows=2048; 2025-06-25T15:05:00.183769Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:287:2299];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:254;stage=data_format;batch_size=229376;num_rows=2048;batch_columns=key,field; 2025-06-25T15:05:00.183898Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:287:2299];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:286:2298];bytes=229376;rows=2048;faults=0;finished=0;fault=0;schema=key: uint64 field: string; 2025-06-25T15:05:00.183978Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:287:2299];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:274;stage=finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1;column_names=key;);;ff=(column_ids=1,2;column_names=field,key;);;program_input=(column_ids=1,2;column_names=field,key;);;;); 2025-06-25T15:05:00.184046Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:287:2299];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1;column_names=key;);;ff=(column_ids=1,2;column_names=field,key;);;program_input=(column_ids=1,2;column_names=field,key;);;;); 2025-06-25T15:05:00.184120Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:287:2299];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1;column_names=key;);;ff=(column_ids=1,2;column_names=field,key;);;program_input=(column_ids=1,2;column_names=field,key;);;;); 2025-06-25T15:05:00.184324Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:287:2299];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-25T15:05:00.184403Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:287:2299];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1;column_names=key;);;ff=(column_ids=1,2;column_names=field,key;);;program_input=(column_ids=1,2;column_names=field,key;);;;); 2025-06-25T15:05:00.184480Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:287:2299];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1;column_names=key;);;ff=(column_ids=1,2;column_names=field,key;);;program_input=(column_ids=1,2;column_names=field,key;);;;); 2025-06-25T15:05:00.184508Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: actor.cpp:414: Scan [1:287:2299] finished for tablet 9437184 2025-06-25T15:05:00.184770Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:287:2299];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:420;event=scan_finish;compute_actor_id=[1:286:2298];stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.001},{"events":["l_bootstrap"],"t":0.002},{"events":["f_processing","f_task_result"],"t":0.003},{"events":["l_task_result"],"t":0.016},{"events":["f_ack"],"t":0.017},{"events":["l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.018}],"full":{"a":1750863900166456,"name":"_full_task","f":1750863900166456,"d_finished":0,"c":0,"l":1750863900184539,"d":18083},"events":[{"name":"bootstrap","f":1750863900166567,"d_finished":2048,"c":1,"l":1750863900168615,"d":2048},{"a":1750863900184297,"name":"ack","f":1750863900183531,"d_finished":617,"c":1,"l":1750863900184148,"d":859},{"a":1750863900184289,"name":"processing","f":1750863900170320,"d_finished":7519,"c":9,"l":1750863900184150,"d":7769},{"name":"ProduceResults","f":1750863900167668,"d_finished":1514,"c":12,"l":1750863900184495,"d":1514},{"a":1750863900184497,"name":"Finish","f":1750863900184497,"d_finished":0,"c":0,"l":1750863900184539,"d":42},{"name":"task_result","f":1750863900170330,"d_finished":6808,"c":8,"l":1750863900183438,"d":6808}],"id":"9437184::2"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1;column_names=key;);;ff=(column_ids=1,2;column_names=field,key;);;program_input=(column_ids=1,2;column_names=field,key;);;;); 2025-06-25T15:05:00.184829Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:287:2299];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:286:2298];bytes=0;rows=0;faults=0;finished=1;fault=0;schema=; 2025-06-25T15:05:00.185064Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:287:2299];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:375;event=scan_finished;compute_actor_id=[1:286:2298];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.001},{"events":["l_bootstrap"],"t":0.002},{"events":["f_processing","f_task_result"],"t":0.003},{"events":["l_task_result"],"t":0.016},{"events":["f_ack"],"t":0.017},{"events":["l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.018}],"full":{"a":1750863900166456,"name":"_full_task","f":1750863900166456,"d_finished":0,"c":0,"l":1750863900184856,"d":18400},"events":[{"name":"bootstrap","f":1750863900166567,"d_finished":2048,"c":1,"l":1750863900168615,"d":2048},{"a":1750863900184297,"name":"ack","f":1750863900183531,"d_finished":617,"c":1,"l":1750863900184148,"d":1176},{"a":1750863900184289,"name":"processing","f":1750863900170320,"d_finished":7519,"c":9,"l":1750863900184150,"d":8086},{"name":"ProduceResults","f":1750863900167668,"d_finished":1514,"c":12,"l":1750863900184495,"d":1514},{"a":1750863900184497,"name":"Finish","f":1750863900184497,"d_finished":0,"c":0,"l":1750863900184856,"d":359},{"name":"task_result","f":1750863900170330,"d_finished":6808,"c":8,"l":1750863900183438,"d":6808}],"id":"9437184::2"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1;column_names=key;);;ff=(column_ids=1,2;column_names=field,key;);;program_input=(column_ids=1,2;column_names=field,key;);;;); 2025-06-25T15:05:00.185128Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:287:2299];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-06-25T15:05:00.166066Z;index_granules=0;index_portions=1;index_batches=44;schema_columns=2;filter_columns=0;additional_columns=0;compacted_portions_bytes=0;inserted_portions_bytes=237240;committed_portions_bytes=0;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=237240;selected_rows=0; 2025-06-25T15:05:00.185167Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:287:2299];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=read_context.h:192;event=scan_aborted;reason=unexpected on destructor; 2025-06-25T15:05:00.185324Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:287:2299];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1;column_names=key;);;ff=(column_ids=1,2;column_names=field,key;);;program_input=(column_ids=1,2;column_names=field,key;);;; >> TColumnShardTestReadWrite::CompactionInGranule_PKUInt32_Reboot >> TColumnShardTestReadWrite::WriteReadDuplicate [GOOD] |93.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/s3/py3test >> test_s3.py::TestYdbS3TTL::test_s3[table_index_3__SYNC-pk_types1-all_types1-index1---SYNC] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::WriteReadDuplicate [GOOD] Test command err: 2025-06-25T15:04:24.430603Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:99;event=initialize_shard;step=OnActivateExecutor; 2025-06-25T15:04:24.452772Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:117;event=initialize_shard;step=initialize_tiring_finished; 2025-06-25T15:04:24.452968Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-25T15:04:24.459975Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T15:04:24.460198Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T15:04:24.460473Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T15:04:24.460637Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T15:04:24.460793Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T15:04:24.460877Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T15:04:24.460941Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T15:04:24.461021Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T15:04:24.461085Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T15:04:24.461168Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T15:04:24.461248Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T15:04:24.479918Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-25T15:04:24.480061Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-25T15:04:24.480117Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-25T15:04:24.480291Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:04:24.481676Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-25T15:04:24.481779Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-25T15:04:24.481836Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-25T15:04:24.481957Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-25T15:04:24.482054Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-25T15:04:24.482149Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-25T15:04:24.482213Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-25T15:04:24.482417Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:04:24.482498Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-25T15:04:24.482555Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-25T15:04:24.482598Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-25T15:04:24.482700Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-25T15:04:24.482745Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-25T15:04:24.482772Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-25T15:04:24.482790Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-25T15:04:24.482819Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-25T15:04:24.482841Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-25T15:04:24.482860Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-25T15:04:24.483041Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-25T15:04:24.483070Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-25T15:04:24.483088Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-25T15:04:24.483213Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-25T15:04:24.483244Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-25T15:04:24.483264Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-25T15:04:24.483353Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-25T15:04:24.483385Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-25T15:04:24.483403Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-25T15:04:24.483447Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-25T15:04:24.483500Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-25T15:04:24.483524Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-25T15:04:24.483544Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-25T15:04:24.483687Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=32; 2025-06-25T15:04:24.483746Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=29; 2025-06-25T15:04:24.483806Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=30; 2025-06-25T15:04:24.483866Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=25; 2025-06-25T15:04:24.483926Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-25T15:04:24.483979Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-25T15:04:24.484010Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-25T15:04:24.484051Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: table ... 709551615;ScanGen=0;task_identifier=;fline=scanner.cpp:47;event=interval_result;interval_idx=0;count=10;merger=0;interval_id=49; 2025-06-25T15:05:04.314510Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:10328:12334];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=scanner.cpp:65;event=intervals_finished; 2025-06-25T15:05:04.314576Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:10328:12334];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:05:04.314610Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:10328:12334];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=1;count=10;finished=1; 2025-06-25T15:05:04.314641Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:10328:12334];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:203;stage=limit exhausted;limit=limits:(bytes=0;chunks=0);; 2025-06-25T15:05:04.315480Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:10328:12334];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-25T15:05:04.315580Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:10328:12334];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:1;records_count:10;schema=timestamp: timestamp[us];);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:05:04.315630Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:10328:12334];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=0;count=0;finished=1; 2025-06-25T15:05:04.315699Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:10328:12334];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:234;stage=ready result;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;);columns=1;rows=10; 2025-06-25T15:05:04.315748Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:10328:12334];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:254;stage=data_format;batch_size=80;num_rows=10;batch_columns=timestamp; 2025-06-25T15:05:04.315940Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:10328:12334];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:10324:12330];bytes=80;rows=10;faults=0;finished=0;fault=0;schema=timestamp: timestamp[us]; 2025-06-25T15:05:04.316010Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:10328:12334];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:274;stage=finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:05:04.316093Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:10328:12334];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:05:04.316175Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:10328:12334];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:05:04.316457Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:10328:12334];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-25T15:05:04.316523Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:10328:12334];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:05:04.316583Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:10328:12334];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:05:04.316607Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: actor.cpp:414: Scan [1:10328:12334] finished for tablet 9437184 2025-06-25T15:05:04.316923Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:10328:12334];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:420;event=scan_finish;compute_actor_id=[1:10324:12330];stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.129},{"events":["l_bootstrap"],"t":0.241},{"events":["f_processing","f_task_result"],"t":0.242},{"events":["l_task_result"],"t":1.129},{"events":["f_ack"],"t":1.13},{"events":["l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":1.131}],"full":{"a":1750863903185073,"name":"_full_task","f":1750863903185073,"d_finished":0,"c":0,"l":1750863904316644,"d":1131571},"events":[{"name":"bootstrap","f":1750863903185427,"d_finished":241225,"c":1,"l":1750863903426652,"d":241225},{"a":1750863904316443,"name":"ack","f":1750863904315459,"d_finished":731,"c":1,"l":1750863904316190,"d":932},{"a":1750863904316430,"name":"processing","f":1750863903427962,"d_finished":382206,"c":1766,"l":1750863904316192,"d":382420},{"name":"ProduceResults","f":1750863903314786,"d_finished":182486,"c":1769,"l":1750863904316596,"d":182486},{"a":1750863904316597,"name":"Finish","f":1750863904316597,"d_finished":0,"c":0,"l":1750863904316644,"d":47},{"name":"task_result","f":1750863903427981,"d_finished":364738,"c":1765,"l":1750863904314717,"d":364738}],"id":"9437184::49"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:05:04.316974Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:10328:12334];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:10324:12330];bytes=0;rows=0;faults=0;finished=1;fault=0;schema=; 2025-06-25T15:05:04.317262Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:10328:12334];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:375;event=scan_finished;compute_actor_id=[1:10324:12330];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.129},{"events":["l_bootstrap"],"t":0.241},{"events":["f_processing","f_task_result"],"t":0.242},{"events":["l_task_result"],"t":1.129},{"events":["f_ack"],"t":1.13},{"events":["l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":1.131}],"full":{"a":1750863903185073,"name":"_full_task","f":1750863903185073,"d_finished":0,"c":0,"l":1750863904317003,"d":1131930},"events":[{"name":"bootstrap","f":1750863903185427,"d_finished":241225,"c":1,"l":1750863903426652,"d":241225},{"a":1750863904316443,"name":"ack","f":1750863904315459,"d_finished":731,"c":1,"l":1750863904316190,"d":1291},{"a":1750863904316430,"name":"processing","f":1750863903427962,"d_finished":382206,"c":1766,"l":1750863904316192,"d":382779},{"name":"ProduceResults","f":1750863903314786,"d_finished":182486,"c":1769,"l":1750863904316596,"d":182486},{"a":1750863904316597,"name":"Finish","f":1750863904316597,"d_finished":0,"c":0,"l":1750863904317003,"d":406},{"name":"task_result","f":1750863903427981,"d_finished":364738,"c":1765,"l":1750863904314717,"d":364738}],"id":"9437184::49"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:05:04.317307Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:10328:12334];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-06-25T15:05:03.184376Z;index_granules=0;index_portions=294;index_batches=294;schema_columns=1;filter_columns=0;additional_columns=0;compacted_portions_bytes=0;inserted_portions_bytes=686784;committed_portions_bytes=0;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=686784;selected_rows=0; 2025-06-25T15:05:04.317330Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:10328:12334];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=read_context.h:192;event=scan_aborted;reason=unexpected on destructor; 2025-06-25T15:05:04.317498Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:10328:12334];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;; >> TColumnShardTestReadWrite::WriteReadZSTD >> TColumnShardTestReadWrite::CompactionInGranule_PKInt32_Reboot |93.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/async_replication/py3test >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_index_0__SYNC-pk_types4-all_types4-index4---SYNC] [GOOD] >> TColumnShardTestReadWrite::CompactionInGranule_PKDatetime_Reboot >> TColumnShardTestReadWrite::WriteReadZSTD [GOOD] >> TColumnShardTestReadWrite::CompactionInGranule_PKDatetime [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::WriteReadZSTD [GOOD] Test command err: 2025-06-25T15:05:07.468061Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:99;event=initialize_shard;step=OnActivateExecutor; 2025-06-25T15:05:07.484058Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:117;event=initialize_shard;step=initialize_tiring_finished; 2025-06-25T15:05:07.484239Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-25T15:05:07.489030Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T15:05:07.489164Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T15:05:07.489327Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T15:05:07.489403Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T15:05:07.489483Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T15:05:07.489557Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T15:05:07.489612Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T15:05:07.489681Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T15:05:07.489758Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T15:05:07.489813Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T15:05:07.489869Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T15:05:07.505130Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-25T15:05:07.505231Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-25T15:05:07.505261Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-25T15:05:07.505383Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:05:07.505510Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-25T15:05:07.505570Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-25T15:05:07.505599Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-25T15:05:07.505679Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-25T15:05:07.505717Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-25T15:05:07.505748Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-25T15:05:07.505775Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-25T15:05:07.505899Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:05:07.505942Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-25T15:05:07.505973Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-25T15:05:07.505989Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-25T15:05:07.506040Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-25T15:05:07.506069Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-25T15:05:07.506091Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-25T15:05:07.506105Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-25T15:05:07.506132Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-25T15:05:07.506150Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-25T15:05:07.506166Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-25T15:05:07.506283Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-25T15:05:07.506318Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-25T15:05:07.506339Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-25T15:05:07.506457Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-25T15:05:07.506486Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-25T15:05:07.506501Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-25T15:05:07.506585Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-25T15:05:07.506614Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-25T15:05:07.506628Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-25T15:05:07.506671Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-25T15:05:07.506720Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-25T15:05:07.506743Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-25T15:05:07.506762Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-25T15:05:07.506898Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=25; 2025-06-25T15:05:07.506967Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=35; 2025-06-25T15:05:07.507030Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=35; 2025-06-25T15:05:07.507083Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=21; 2025-06-25T15:05:07.507146Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-25T15:05:07.507194Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-25T15:05:07.507225Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-25T15:05:07.507264Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: table ... urce_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-25T15:05:11.732907Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:987:2842];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=0;count=0;finished=1; 2025-06-25T15:05:11.733002Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:987:2842];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:234;stage=ready result;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;);columns=10;rows=31; 2025-06-25T15:05:11.733057Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:987:2842];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:254;stage=data_format;batch_size=1984;num_rows=31;batch_columns=timestamp,resource_type,resource_id,uid,level,message,json_payload,ingested_at,saved_at,request_id; 2025-06-25T15:05:11.733243Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:987:2842];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:986:2841];bytes=1984;rows=31;faults=0;finished=0;fault=0;schema=timestamp: timestamp[us] resource_type: string resource_id: string uid: string level: int32 message: string json_payload: string ingested_at: timestamp[us] saved_at: timestamp[us] request_id: string; 2025-06-25T15:05:11.733342Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:987:2842];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:274;stage=finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-25T15:05:11.733422Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:987:2842];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-25T15:05:11.733542Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:987:2842];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-25T15:05:11.733766Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:987:2842];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-25T15:05:11.733902Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:987:2842];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-25T15:05:11.734032Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:987:2842];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-25T15:05:11.734087Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: actor.cpp:414: Scan [1:987:2842] finished for tablet 9437184 2025-06-25T15:05:11.734424Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=11;SelfId=[1:987:2842];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=actor.cpp:420;event=scan_finish;compute_actor_id=[1:986:2841];stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.001},{"events":["l_bootstrap","f_processing","f_task_result"],"t":0.002},{"events":["f_ack","l_task_result"],"t":0.008},{"events":["l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.009}],"full":{"a":1750863911724469,"name":"_full_task","f":1750863911724469,"d_finished":0,"c":0,"l":1750863911734141,"d":9672},"events":[{"name":"bootstrap","f":1750863911724579,"d_finished":2165,"c":1,"l":1750863911726744,"d":2165},{"a":1750863911733737,"name":"ack","f":1750863911732708,"d_finished":866,"c":1,"l":1750863911733574,"d":1270},{"a":1750863911733722,"name":"processing","f":1750863911727452,"d_finished":3830,"c":10,"l":1750863911733576,"d":4249},{"name":"ProduceResults","f":1750863911725779,"d_finished":2119,"c":13,"l":1750863911734066,"d":2119},{"a":1750863911734069,"name":"Finish","f":1750863911734069,"d_finished":0,"c":0,"l":1750863911734141,"d":72},{"name":"task_result","f":1750863911727462,"d_finished":2891,"c":9,"l":1750863911732604,"d":2891}],"id":"9437184::12"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-25T15:05:11.734476Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:987:2842];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:986:2841];bytes=0;rows=0;faults=0;finished=1;fault=0;schema=; 2025-06-25T15:05:11.734737Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=11;SelfId=[1:987:2842];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=actor.cpp:375;event=scan_finished;compute_actor_id=[1:986:2841];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.001},{"events":["l_bootstrap","f_processing","f_task_result"],"t":0.002},{"events":["f_ack","l_task_result"],"t":0.008},{"events":["l_ProduceResults","f_Finish"],"t":0.009},{"events":["l_ack","l_processing","l_Finish"],"t":0.01}],"full":{"a":1750863911724469,"name":"_full_task","f":1750863911724469,"d_finished":0,"c":0,"l":1750863911734504,"d":10035},"events":[{"name":"bootstrap","f":1750863911724579,"d_finished":2165,"c":1,"l":1750863911726744,"d":2165},{"a":1750863911733737,"name":"ack","f":1750863911732708,"d_finished":866,"c":1,"l":1750863911733574,"d":1633},{"a":1750863911733722,"name":"processing","f":1750863911727452,"d_finished":3830,"c":10,"l":1750863911733576,"d":4612},{"name":"ProduceResults","f":1750863911725779,"d_finished":2119,"c":13,"l":1750863911734066,"d":2119},{"a":1750863911734069,"name":"Finish","f":1750863911734069,"d_finished":0,"c":0,"l":1750863911734504,"d":435},{"name":"task_result","f":1750863911727462,"d_finished":2891,"c":9,"l":1750863911732604,"d":2891}],"id":"9437184::12"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-25T15:05:11.734794Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:987:2842];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-06-25T15:05:11.724039Z;index_granules=0;index_portions=1;index_batches=1;schema_columns=10;filter_columns=0;additional_columns=0;compacted_portions_bytes=0;inserted_portions_bytes=4512;committed_portions_bytes=0;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=4512;selected_rows=0; 2025-06-25T15:05:11.734829Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:987:2842];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=read_context.h:192;event=scan_aborted;reason=unexpected on destructor; 2025-06-25T15:05:11.735058Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=11;SelfId=[1:987:2842];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::CompactionInGranule_PKDatetime [GOOD] Test command err: 2025-06-25T15:04:24.430633Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:99;event=initialize_shard;step=OnActivateExecutor; 2025-06-25T15:04:24.449369Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:117;event=initialize_shard;step=initialize_tiring_finished; 2025-06-25T15:04:24.449561Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-25T15:04:24.459939Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T15:04:24.460128Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T15:04:24.460341Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T15:04:24.460432Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T15:04:24.460490Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T15:04:24.460547Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T15:04:24.460614Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T15:04:24.460696Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T15:04:24.460784Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T15:04:24.460853Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T15:04:24.460935Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T15:04:24.479883Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-25T15:04:24.480024Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-25T15:04:24.480072Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-25T15:04:24.480224Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:04:24.481664Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-25T15:04:24.481738Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-25T15:04:24.481781Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-25T15:04:24.481857Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-25T15:04:24.481914Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-25T15:04:24.481941Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-25T15:04:24.481970Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-25T15:04:24.482086Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:04:24.482128Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-25T15:04:24.482162Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-25T15:04:24.482186Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-25T15:04:24.482258Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-25T15:04:24.482296Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-25T15:04:24.482319Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-25T15:04:24.482340Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-25T15:04:24.482376Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-25T15:04:24.482397Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-25T15:04:24.482422Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-25T15:04:24.482569Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-25T15:04:24.482595Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-25T15:04:24.482615Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-25T15:04:24.482789Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-25T15:04:24.482829Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-25T15:04:24.482849Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-25T15:04:24.482964Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-25T15:04:24.482995Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-25T15:04:24.483015Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-25T15:04:24.483073Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-25T15:04:24.483116Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-25T15:04:24.483142Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-25T15:04:24.483169Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-25T15:04:24.483318Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=29; 2025-06-25T15:04:24.483387Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=34; 2025-06-25T15:04:24.483445Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=27; 2025-06-25T15:04:24.483514Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=38; 2025-06-25T15:04:24.483827Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-25T15:04:24.483880Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-25T15:04:24.483907Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-25T15:04:24.483947Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: table ... LOB:0:9240];;column_id:9;chunk_idx:44;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:45;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:46;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:47;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:48;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:49;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:50;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:51;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:52;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:53;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:54;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:55;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:56;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:57;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:58;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:59;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:60;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:61;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:62;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:63;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:64;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:65;blob_range:[NO_BLOB:0:9240];;column_id:10;chunk_idx:0;blob_range:[NO_BLOB:0:7272];;column_id:10;chunk_idx:1;blob_range:[NO_BLOB:0:8384];;column_id:10;chunk_idx:2;blob_range:[NO_BLOB:0:8384];;column_id:10;chunk_idx:3;blob_range:[NO_BLOB:0:8384];;column_id:10;chunk_idx:4;blob_range:[NO_BLOB:0:8384];;column_id:10;chunk_idx:5;blob_range:[NO_BLOB:0:8384];;column_id:10;chunk_idx:6;blob_range:[NO_BLOB:0:8384];;column_id:10;chunk_idx:7;blob_range:[NO_BLOB:0:8384];;column_id:10;chunk_idx:8;blob_range:[NO_BLOB:0:8384];;column_id:10;chunk_idx:9;blob_range:[NO_BLOB:0:8656];;column_id:10;chunk_idx:10;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:11;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:12;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:13;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:14;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:15;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:16;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:17;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:18;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:19;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:20;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:21;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:22;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:23;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:24;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:25;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:26;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:27;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:28;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:29;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:30;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:31;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:32;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:33;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:34;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:35;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:36;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:37;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:38;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:39;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:40;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:41;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:42;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:43;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:44;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:45;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:46;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:47;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:48;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:49;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:50;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:51;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:52;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:53;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:54;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:55;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:56;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:57;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:58;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:59;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:60;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:61;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:62;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:63;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:64;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:65;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:66;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:67;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:68;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:69;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:70;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:71;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:72;blob_range:[NO_BLOB:0:9424];;;;switched=(portion_id:221;path_id:9438184000001;records_count:75000;schema_version:1;level:1;;column_size:7200040;index_size:0;meta:(()););(portion_id:220;path_id:9438184000001;records_count:75000;schema_version:1;level:2;;column_size:7198464;index_size:0;meta:(()););; 2025-06-25T15:05:10.915462Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: event_type=NKikimr::NBlobCache::TEvBlobCache::TEvReadBlobRangeResult;event=on_execution;consumer=GENERAL_COMPACTION;task_id=c968d5b6-51d511f0-a36c2293-f5cffa8a;script=FULL_PORTIONS_FETCHING::GENERAL_COMPACTION;event=on_finished;consumer=GENERAL_COMPACTION;task_id=c968d5b6-51d511f0-a36c2293-f5cffa8a;script=FULL_PORTIONS_FETCHING::GENERAL_COMPACTION;tablet_id=9437184;parent_id=[1:5585:7572];task_id=c968d5b6-51d511f0-a36c2293-f5cffa8a;task_class=CS::GENERAL;fline=general_compaction.cpp:140;event=blobs_created;appended=1;switched=2; 2025-06-25T15:05:10.916877Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:5585:7572];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=columnshard__write_index.cpp:52;event=TEvWriteIndex;count=1; 2025-06-25T15:05:10.919938Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:5585:7572];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=columnshard__write_index.cpp:59;event=TTxWriteDraft; 2025-06-25T15:05:11.051497Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: fline=tx_draft.cpp:16;event=draft_completed; 2025-06-25T15:05:11.051573Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: fline=write_actor.cpp:24;event=actor_created;tablet_id=9437184;debug=size=7198464;count=779;actions=__MEMORY,__DEFAULT,;waiting=2;; 2025-06-25T15:05:11.428076Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: WriteIndex at tablet 9437184 2025-06-25T15:05:11.428201Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:5585:7572];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=common_level.h:121;from=0,0,0,0,;to=74999,74999,74999,74999,; 2025-06-25T15:05:11.428245Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:5585:7572];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=common_level.h:141;itFrom=1;itTo=1;raw=7069450;count=1;packed=7200040; 2025-06-25T15:05:11.428302Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:5585:7572];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=constructor_meta.cpp:51;memory_size=86;data_size=60;sum=88752;count=1743; 2025-06-25T15:05:11.428390Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:5585:7572];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=constructor_meta.cpp:71;memory_size=25134;data_size=25124;sum=2348976;count=1744;size_of_meta=136; 2025-06-25T15:05:11.428441Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:5585:7572];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=constructor_portion.cpp:40;memory_size=25206;data_size=25196;sum=2411760;count=872;size_of_portion=208; 2025-06-25T15:05:11.428895Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxWriteIndex[2] (CS::GENERAL) apply at tablet 9437184 2025-06-25T15:05:11.491312Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager on execute at tablet 9437184 Save Batch GenStep: 4:1 Blob count: 633 2025-06-25T15:05:11.494639Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Index: tables 1 inserted {blob_bytes=0;raw_bytes=0;count=0;records=0} compacted {blob_bytes=0;raw_bytes=0;count=0;records=0} s-compacted {blob_bytes=28824008;raw_bytes=28296800;count=4;records=300200} inactive {blob_bytes=112525736;raw_bytes=109396450;count=217;records=1275200} evicted {blob_bytes=0;raw_bytes=0;count=0;records=0} at tablet 9437184 2025-06-25T15:05:11.707575Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=c968d5b6-51d511f0-a36c2293-f5cffa8a;fline=abstract.cpp:53;event=WriteIndexComplete;type=CS::GENERAL;success=1; 2025-06-25T15:05:11.707627Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=c968d5b6-51d511f0-a36c2293-f5cffa8a;fline=with_appended.cpp:65;portions=222,;task_id=c968d5b6-51d511f0-a36c2293-f5cffa8a; 2025-06-25T15:05:11.708096Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=c968d5b6-51d511f0-a36c2293-f5cffa8a;fline=manager.cpp:15;event=unlock;process_id=CS::GENERAL::c968d5b6-51d511f0-a36c2293-f5cffa8a; 2025-06-25T15:05:11.708153Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=c968d5b6-51d511f0-a36c2293-f5cffa8a;fline=granule.cpp:97;event=OnCompactionFinished;info=(granule:9438184000001;path_id:9438184000001;size:21623968;portions_count:222;); 2025-06-25T15:05:11.708188Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=c968d5b6-51d511f0-a36c2293-f5cffa8a;tablet_id=9437184;fline=columnshard_impl.cpp:442;event=EnqueueBackgroundActivities;periodic=0; 2025-06-25T15:05:11.708241Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=c968d5b6-51d511f0-a36c2293-f5cffa8a;tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=9; 2025-06-25T15:05:11.708291Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=c968d5b6-51d511f0-a36c2293-f5cffa8a;tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750863567811;tx_id=18446744073709551615;;current_snapshot_ts=1750863865992; 2025-06-25T15:05:11.708337Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=c968d5b6-51d511f0-a36c2293-f5cffa8a;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=9;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-25T15:05:11.708373Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=c968d5b6-51d511f0-a36c2293-f5cffa8a;tablet_id=9437184;fline=columnshard_impl.cpp:791;background=cleanup;skip_reason=no_changes; 2025-06-25T15:05:11.708399Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=c968d5b6-51d511f0-a36c2293-f5cffa8a;tablet_id=9437184;fline=columnshard_impl.cpp:820;background=cleanup;skip_reason=no_changes; 2025-06-25T15:05:11.708454Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=c968d5b6-51d511f0-a36c2293-f5cffa8a;tablet_id=9437184;queue=ttl;external_count=0;fline=granule.cpp:168;event=skip_actualization;waiting=0.895000s; 2025-06-25T15:05:11.708493Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=c968d5b6-51d511f0-a36c2293-f5cffa8a;tablet_id=9437184;fline=columnshard_impl.cpp:749;background=ttl;skip_reason=no_changes; 2025-06-25T15:05:11.708642Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Save Batch GenStep: 4:1 Blob count: 633 >> TColumnShardTestReadWrite::WriteRead >> TColumnShardTestReadWrite::CompactionInGranule_PKUInt64 >> TColumnShardTestReadWrite::WriteRead [GOOD] >> TColumnShardTestReadWrite::CompactionInGranule_PKString [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::WriteRead [GOOD] Test command err: 2025-06-25T15:05:13.728475Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:99;event=initialize_shard;step=OnActivateExecutor; 2025-06-25T15:05:13.744392Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:117;event=initialize_shard;step=initialize_tiring_finished; 2025-06-25T15:05:13.744563Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-25T15:05:13.749323Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T15:05:13.749447Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T15:05:13.749608Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T15:05:13.749675Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T15:05:13.749753Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T15:05:13.749827Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T15:05:13.749885Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T15:05:13.749968Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T15:05:13.750029Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T15:05:13.750102Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T15:05:13.750162Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T15:05:13.765138Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-25T15:05:13.765246Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-25T15:05:13.765275Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-25T15:05:13.765399Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:05:13.765516Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-25T15:05:13.765564Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-25T15:05:13.765589Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-25T15:05:13.765635Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-25T15:05:13.765679Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-25T15:05:13.765706Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-25T15:05:13.765756Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-25T15:05:13.765869Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:05:13.765905Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-25T15:05:13.765925Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-25T15:05:13.765940Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-25T15:05:13.765993Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-25T15:05:13.766021Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-25T15:05:13.766040Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-25T15:05:13.766053Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-25T15:05:13.766089Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-25T15:05:13.766116Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-25T15:05:13.766136Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-25T15:05:13.766268Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-25T15:05:13.766294Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-25T15:05:13.766311Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-25T15:05:13.766420Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-25T15:05:13.766457Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-25T15:05:13.766482Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-25T15:05:13.766567Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-25T15:05:13.766592Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-25T15:05:13.766607Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-25T15:05:13.766649Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-25T15:05:13.766695Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-25T15:05:13.766717Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-25T15:05:13.766734Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-25T15:05:13.766866Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=35; 2025-06-25T15:05:13.766925Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=30; 2025-06-25T15:05:13.766972Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=25; 2025-06-25T15:05:13.767017Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=22; 2025-06-25T15:05:13.767082Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-25T15:05:13.767138Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-25T15:05:13.767165Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-25T15:05:13.767203Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: table ... 4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-25T15:05:16.589090Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:397:2408];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=0;count=0;finished=1; 2025-06-25T15:05:16.589186Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:397:2408];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:234;stage=ready result;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;);columns=10;rows=31; 2025-06-25T15:05:16.589243Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:397:2408];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:254;stage=data_format;batch_size=1984;num_rows=31;batch_columns=timestamp,resource_type,resource_id,uid,level,message,json_payload,ingested_at,saved_at,request_id; 2025-06-25T15:05:16.589488Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:397:2408];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:396:2407];bytes=1984;rows=31;faults=0;finished=0;fault=0;schema=timestamp: timestamp[us] resource_type: string resource_id: string uid: string level: int32 message: string json_payload: string ingested_at: timestamp[us] saved_at: timestamp[us] request_id: string; 2025-06-25T15:05:16.589598Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:397:2408];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:274;stage=finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-25T15:05:16.589683Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:397:2408];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-25T15:05:16.589769Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:397:2408];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-25T15:05:16.589916Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:397:2408];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-25T15:05:16.590014Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:397:2408];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-25T15:05:16.590105Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:397:2408];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-25T15:05:16.590136Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: actor.cpp:414: Scan [1:397:2408] finished for tablet 9437184 2025-06-25T15:05:16.590456Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=11;SelfId=[1:397:2408];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=actor.cpp:420;event=scan_finish;compute_actor_id=[1:396:2407];stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.001},{"events":["l_bootstrap"],"t":0.003},{"events":["f_processing","f_task_result"],"t":0.004},{"events":["f_ack","l_task_result"],"t":0.013},{"events":["l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.014}],"full":{"a":1750863916575208,"name":"_full_task","f":1750863916575208,"d_finished":0,"c":0,"l":1750863916590173,"d":14965},"events":[{"name":"bootstrap","f":1750863916575388,"d_finished":3076,"c":1,"l":1750863916578464,"d":3076},{"a":1750863916589898,"name":"ack","f":1750863916588885,"d_finished":903,"c":1,"l":1750863916589788,"d":1178},{"a":1750863916589890,"name":"processing","f":1750863916579453,"d_finished":7917,"c":10,"l":1750863916589790,"d":8200},{"name":"ProduceResults","f":1750863916577172,"d_finished":2258,"c":13,"l":1750863916590124,"d":2258},{"a":1750863916590126,"name":"Finish","f":1750863916590126,"d_finished":0,"c":0,"l":1750863916590173,"d":47},{"name":"task_result","f":1750863916579465,"d_finished":6928,"c":9,"l":1750863916588765,"d":6928}],"id":"9437184::12"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-25T15:05:16.590532Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:397:2408];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:396:2407];bytes=0;rows=0;faults=0;finished=1;fault=0;schema=; 2025-06-25T15:05:16.590813Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=11;SelfId=[1:397:2408];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=actor.cpp:375;event=scan_finished;compute_actor_id=[1:396:2407];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.001},{"events":["l_bootstrap"],"t":0.003},{"events":["f_processing","f_task_result"],"t":0.004},{"events":["f_ack","l_task_result"],"t":0.013},{"events":["l_ProduceResults","f_Finish"],"t":0.014},{"events":["l_ack","l_processing","l_Finish"],"t":0.015}],"full":{"a":1750863916575208,"name":"_full_task","f":1750863916575208,"d_finished":0,"c":0,"l":1750863916590559,"d":15351},"events":[{"name":"bootstrap","f":1750863916575388,"d_finished":3076,"c":1,"l":1750863916578464,"d":3076},{"a":1750863916589898,"name":"ack","f":1750863916588885,"d_finished":903,"c":1,"l":1750863916589788,"d":1564},{"a":1750863916589890,"name":"processing","f":1750863916579453,"d_finished":7917,"c":10,"l":1750863916589790,"d":8586},{"name":"ProduceResults","f":1750863916577172,"d_finished":2258,"c":13,"l":1750863916590124,"d":2258},{"a":1750863916590126,"name":"Finish","f":1750863916590126,"d_finished":0,"c":0,"l":1750863916590559,"d":433},{"name":"task_result","f":1750863916579465,"d_finished":6928,"c":9,"l":1750863916588765,"d":6928}],"id":"9437184::12"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-25T15:05:16.590864Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:397:2408];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-06-25T15:05:16.574621Z;index_granules=0;index_portions=1;index_batches=1;schema_columns=10;filter_columns=0;additional_columns=0;compacted_portions_bytes=0;inserted_portions_bytes=7600;committed_portions_bytes=0;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=7600;selected_rows=0; 2025-06-25T15:05:16.590896Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:397:2408];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=read_context.h:192;event=scan_aborted;reason=unexpected on destructor; 2025-06-25T15:05:16.591148Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=11;SelfId=[1:397:2408];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::CompactionInGranule_PKString [GOOD] Test command err: 2025-06-25T15:04:24.430649Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:99;event=initialize_shard;step=OnActivateExecutor; 2025-06-25T15:04:24.458006Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:117;event=initialize_shard;step=initialize_tiring_finished; 2025-06-25T15:04:24.458232Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-25T15:04:24.465060Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T15:04:24.465264Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T15:04:24.465481Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T15:04:24.465626Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T15:04:24.465734Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T15:04:24.465828Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T15:04:24.465958Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T15:04:24.466063Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T15:04:24.466166Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T15:04:24.466279Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T15:04:24.466387Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T15:04:24.490874Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-25T15:04:24.491017Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-25T15:04:24.491063Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-25T15:04:24.491223Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:04:24.491365Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-25T15:04:24.491456Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-25T15:04:24.491515Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-25T15:04:24.491613Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-25T15:04:24.491678Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-25T15:04:24.491720Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-25T15:04:24.491761Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-25T15:04:24.491925Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:04:24.491993Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-25T15:04:24.492034Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-25T15:04:24.492065Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-25T15:04:24.492167Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-25T15:04:24.492247Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-25T15:04:24.492302Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-25T15:04:24.492351Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-25T15:04:24.492406Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-25T15:04:24.492444Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-25T15:04:24.492475Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-25T15:04:24.492688Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-25T15:04:24.492721Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-25T15:04:24.492758Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-25T15:04:24.492906Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-25T15:04:24.492961Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-25T15:04:24.493015Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-25T15:04:24.493155Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-25T15:04:24.493194Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-25T15:04:24.493214Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-25T15:04:24.493263Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-25T15:04:24.493323Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-25T15:04:24.493360Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-25T15:04:24.493391Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-25T15:04:24.493596Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=45; 2025-06-25T15:04:24.493686Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=41; 2025-06-25T15:04:24.493777Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=33; 2025-06-25T15:04:24.493878Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=47; 2025-06-25T15:04:24.493963Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-25T15:04:24.494036Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-25T15:04:24.494093Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-25T15:04:24.494148Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: table ... mn_id:9;chunk_idx:46;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:47;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:48;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:49;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:50;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:51;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:52;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:53;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:54;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:55;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:56;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:57;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:58;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:59;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:60;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:61;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:62;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:63;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:64;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:65;blob_range:[NO_BLOB:0:9240];;column_id:10;chunk_idx:0;blob_range:[NO_BLOB:0:9288];;column_id:10;chunk_idx:1;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:2;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:3;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:4;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:5;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:6;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:7;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:8;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:9;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:10;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:11;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:12;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:13;blob_range:[NO_BLOB:0:9304];;column_id:10;chunk_idx:14;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:15;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:16;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:17;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:18;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:19;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:20;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:21;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:22;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:23;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:24;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:25;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:26;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:27;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:28;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:29;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:30;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:31;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:32;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:33;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:34;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:35;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:36;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:37;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:38;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:39;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:40;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:41;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:42;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:43;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:44;blob_range:[NO_BLOB:0:9312];;column_id:10;chunk_idx:45;blob_range:[NO_BLOB:0:9312];;column_id:10;chunk_idx:46;blob_range:[NO_BLOB:0:9312];;column_id:10;chunk_idx:47;blob_range:[NO_BLOB:0:9312];;column_id:10;chunk_idx:48;blob_range:[NO_BLOB:0:9312];;column_id:10;chunk_idx:49;blob_range:[NO_BLOB:0:9312];;column_id:10;chunk_idx:50;blob_range:[NO_BLOB:0:9312];;column_id:10;chunk_idx:51;blob_range:[NO_BLOB:0:9312];;column_id:10;chunk_idx:52;blob_range:[NO_BLOB:0:9312];;column_id:10;chunk_idx:53;blob_range:[NO_BLOB:0:9312];;column_id:10;chunk_idx:54;blob_range:[NO_BLOB:0:9304];;column_id:10;chunk_idx:55;blob_range:[NO_BLOB:0:9312];;column_id:10;chunk_idx:56;blob_range:[NO_BLOB:0:9312];;column_id:10;chunk_idx:57;blob_range:[NO_BLOB:0:9312];;column_id:10;chunk_idx:58;blob_range:[NO_BLOB:0:9312];;column_id:10;chunk_idx:59;blob_range:[NO_BLOB:0:9312];;column_id:10;chunk_idx:60;blob_range:[NO_BLOB:0:9312];;column_id:10;chunk_idx:61;blob_range:[NO_BLOB:0:9304];;column_id:10;chunk_idx:62;blob_range:[NO_BLOB:0:9312];;column_id:10;chunk_idx:63;blob_range:[NO_BLOB:0:9312];;column_id:10;chunk_idx:64;blob_range:[NO_BLOB:0:9304];;column_id:10;chunk_idx:65;blob_range:[NO_BLOB:0:9304];;column_id:10;chunk_idx:66;blob_range:[NO_BLOB:0:9312];;column_id:10;chunk_idx:67;blob_range:[NO_BLOB:0:9312];;column_id:10;chunk_idx:68;blob_range:[NO_BLOB:0:9312];;column_id:10;chunk_idx:69;blob_range:[NO_BLOB:0:9304];;column_id:10;chunk_idx:70;blob_range:[NO_BLOB:0:8592];;column_id:10;chunk_idx:71;blob_range:[NO_BLOB:0:8280];;column_id:10;chunk_idx:72;blob_range:[NO_BLOB:0:8288];;;;switched=(portion_id:220;path_id:9438184000001;records_count:75000;schema_version:1;level:1;;column_size:7574640;index_size:0;meta:(()););(portion_id:218;path_id:9438184000001;records_count:75000;schema_version:1;level:2;;column_size:7570008;index_size:0;meta:(()););(portion_id:221;path_id:9438184000001;records_count:75000;schema_version:1;level:1;;column_size:7574640;index_size:0;meta:(()););; 2025-06-25T15:05:16.038544Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: event_type=NKikimr::NBlobCache::TEvBlobCache::TEvReadBlobRangeResult;event=on_execution;consumer=GENERAL_COMPACTION;task_id=cb6f69b0-51d511f0-b3c78d0f-6f523ba1;script=FULL_PORTIONS_FETCHING::GENERAL_COMPACTION;event=on_finished;consumer=GENERAL_COMPACTION;task_id=cb6f69b0-51d511f0-b3c78d0f-6f523ba1;script=FULL_PORTIONS_FETCHING::GENERAL_COMPACTION;tablet_id=9437184;parent_id=[1:6028:8014];task_id=cb6f69b0-51d511f0-b3c78d0f-6f523ba1;task_class=CS::GENERAL;fline=general_compaction.cpp:140;event=blobs_created;appended=1;switched=3; 2025-06-25T15:05:16.040718Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:6028:8014];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=columnshard__write_index.cpp:52;event=TEvWriteIndex;count=1; 2025-06-25T15:05:16.043332Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:6028:8014];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=columnshard__write_index.cpp:59;event=TTxWriteDraft; 2025-06-25T15:05:16.128364Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: fline=tx_draft.cpp:16;event=draft_completed; 2025-06-25T15:05:16.128444Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: fline=write_actor.cpp:24;event=actor_created;tablet_id=9437184;debug=size=7570008;count=819;actions=__MEMORY,__DEFAULT,;waiting=2;; 2025-06-25T15:05:16.556362Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: WriteIndex at tablet 9437184 2025-06-25T15:05:16.556453Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:6028:8014];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=common_level.h:121;from=0,0,0,0,;to=9999,9999,9999,9999,; 2025-06-25T15:05:16.556495Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:6028:8014];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=common_level.h:141;itFrom=1;itTo=1;raw=7433340;count=1;packed=7574640; 2025-06-25T15:05:16.556545Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:6028:8014];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=constructor_meta.cpp:51;memory_size=86;data_size=62;sum=95362;count=1749; 2025-06-25T15:05:16.556593Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:6028:8014];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=constructor_meta.cpp:71;memory_size=26414;data_size=26406;sum=2613226;count=1750;size_of_meta=136; 2025-06-25T15:05:16.556641Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:6028:8014];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=constructor_portion.cpp:40;memory_size=26486;data_size=26478;sum=2676226;count=875;size_of_portion=208; 2025-06-25T15:05:16.557047Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxWriteIndex[2] (CS::GENERAL) apply at tablet 9437184 2025-06-25T15:05:16.632789Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager on execute at tablet 9437184 Save Batch GenStep: 4:1 Blob count: 673 2025-06-25T15:05:16.636138Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Index: tables 1 inserted {blob_bytes=0;raw_bytes=0;count=0;records=0} compacted {blob_bytes=0;raw_bytes=0;count=0;records=0} s-compacted {blob_bytes=37893352;raw_bytes=37186700;count=5;records=375200} inactive {blob_bytes=111591656;raw_bytes=108150240;count=216;records=1200200} evicted {blob_bytes=0;raw_bytes=0;count=0;records=0} at tablet 9437184 2025-06-25T15:05:16.962754Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=cb6f69b0-51d511f0-b3c78d0f-6f523ba1;fline=abstract.cpp:53;event=WriteIndexComplete;type=CS::GENERAL;success=1; 2025-06-25T15:05:16.962811Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=cb6f69b0-51d511f0-b3c78d0f-6f523ba1;fline=with_appended.cpp:65;portions=222,;task_id=cb6f69b0-51d511f0-b3c78d0f-6f523ba1; 2025-06-25T15:05:16.963563Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=cb6f69b0-51d511f0-b3c78d0f-6f523ba1;fline=manager.cpp:15;event=unlock;process_id=CS::GENERAL::cb6f69b0-51d511f0-b3c78d0f-6f523ba1; 2025-06-25T15:05:16.963629Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=cb6f69b0-51d511f0-b3c78d0f-6f523ba1;fline=granule.cpp:97;event=OnCompactionFinished;info=(granule:9438184000001;path_id:9438184000001;size:22744072;portions_count:222;); 2025-06-25T15:05:16.963669Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=cb6f69b0-51d511f0-b3c78d0f-6f523ba1;tablet_id=9437184;fline=columnshard_impl.cpp:442;event=EnqueueBackgroundActivities;periodic=0; 2025-06-25T15:05:16.963734Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=cb6f69b0-51d511f0-b3c78d0f-6f523ba1;tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=9; 2025-06-25T15:05:16.963797Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=cb6f69b0-51d511f0-b3c78d0f-6f523ba1;tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750863567937;tx_id=18446744073709551615;;current_snapshot_ts=1750863865998; 2025-06-25T15:05:16.963832Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=cb6f69b0-51d511f0-b3c78d0f-6f523ba1;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=9;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-25T15:05:16.963867Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=cb6f69b0-51d511f0-b3c78d0f-6f523ba1;tablet_id=9437184;fline=columnshard_impl.cpp:791;background=cleanup;skip_reason=no_changes; 2025-06-25T15:05:16.963896Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=cb6f69b0-51d511f0-b3c78d0f-6f523ba1;tablet_id=9437184;fline=columnshard_impl.cpp:820;background=cleanup;skip_reason=no_changes; 2025-06-25T15:05:16.963959Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=cb6f69b0-51d511f0-b3c78d0f-6f523ba1;tablet_id=9437184;queue=ttl;external_count=0;fline=granule.cpp:168;event=skip_actualization;waiting=0.886000s; 2025-06-25T15:05:16.964018Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=cb6f69b0-51d511f0-b3c78d0f-6f523ba1;tablet_id=9437184;fline=columnshard_impl.cpp:749;background=ttl;skip_reason=no_changes; 2025-06-25T15:05:16.964171Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Save Batch GenStep: 4:1 Blob count: 673 >> TColumnShardTestReadWrite::CompactionInGranule_PKInt32 [GOOD] >> TColumnShardTestReadWrite::CompactionSplitGranuleStrKey_PKString >> TColumnShardTestReadWrite::CompactionInGranule_PKTimestamp [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::CompactionInGranule_PKInt32 [GOOD] Test command err: 2025-06-25T15:04:30.417866Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:99;event=initialize_shard;step=OnActivateExecutor; 2025-06-25T15:04:30.443403Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:117;event=initialize_shard;step=initialize_tiring_finished; 2025-06-25T15:04:30.443656Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-25T15:04:30.450518Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T15:04:30.450734Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T15:04:30.450949Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T15:04:30.451047Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T15:04:30.451106Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T15:04:30.451162Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T15:04:30.451233Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T15:04:30.451290Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T15:04:30.451362Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T15:04:30.451422Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T15:04:30.451480Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T15:04:30.469775Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-25T15:04:30.469931Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-25T15:04:30.469977Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-25T15:04:30.470103Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:04:30.470233Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-25T15:04:30.470296Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-25T15:04:30.470339Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-25T15:04:30.470433Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-25T15:04:30.470476Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-25T15:04:30.470503Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-25T15:04:30.470530Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-25T15:04:30.470661Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:04:30.470700Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-25T15:04:30.470726Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-25T15:04:30.470745Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-25T15:04:30.470813Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-25T15:04:30.470858Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-25T15:04:30.470886Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-25T15:04:30.470902Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-25T15:04:30.470929Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-25T15:04:30.470950Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-25T15:04:30.470980Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-25T15:04:30.471125Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-25T15:04:30.471151Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-25T15:04:30.471175Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-25T15:04:30.471312Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-25T15:04:30.471355Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-25T15:04:30.471378Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-25T15:04:30.471482Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-25T15:04:30.471511Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-25T15:04:30.471529Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-25T15:04:30.471572Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-25T15:04:30.471608Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-25T15:04:30.471634Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-25T15:04:30.471663Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-25T15:04:30.471808Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=32; 2025-06-25T15:04:30.471884Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=36; 2025-06-25T15:04:30.471974Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=43; 2025-06-25T15:04:30.472036Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=26; 2025-06-25T15:04:30.472104Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-25T15:04:30.472171Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-25T15:04:30.472196Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-25T15:04:30.472233Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: table ... ;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:44;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:45;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:46;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:47;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:48;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:49;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:50;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:51;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:52;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:53;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:54;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:55;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:56;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:57;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:58;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:59;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:60;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:61;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:62;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:63;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:64;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:65;blob_range:[NO_BLOB:0:9240];;column_id:10;chunk_idx:0;blob_range:[NO_BLOB:0:7272];;column_id:10;chunk_idx:1;blob_range:[NO_BLOB:0:8384];;column_id:10;chunk_idx:2;blob_range:[NO_BLOB:0:8384];;column_id:10;chunk_idx:3;blob_range:[NO_BLOB:0:8384];;column_id:10;chunk_idx:4;blob_range:[NO_BLOB:0:8384];;column_id:10;chunk_idx:5;blob_range:[NO_BLOB:0:8384];;column_id:10;chunk_idx:6;blob_range:[NO_BLOB:0:8384];;column_id:10;chunk_idx:7;blob_range:[NO_BLOB:0:8384];;column_id:10;chunk_idx:8;blob_range:[NO_BLOB:0:8384];;column_id:10;chunk_idx:9;blob_range:[NO_BLOB:0:8656];;column_id:10;chunk_idx:10;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:11;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:12;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:13;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:14;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:15;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:16;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:17;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:18;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:19;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:20;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:21;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:22;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:23;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:24;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:25;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:26;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:27;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:28;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:29;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:30;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:31;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:32;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:33;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:34;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:35;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:36;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:37;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:38;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:39;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:40;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:41;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:42;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:43;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:44;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:45;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:46;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:47;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:48;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:49;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:50;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:51;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:52;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:53;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:54;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:55;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:56;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:57;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:58;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:59;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:60;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:61;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:62;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:63;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:64;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:65;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:66;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:67;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:68;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:69;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:70;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:71;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:72;blob_range:[NO_BLOB:0:9424];;;;switched=(portion_id:221;path_id:9438184000001;records_count:75000;schema_version:1;level:1;;column_size:7200040;index_size:0;meta:(()););(portion_id:220;path_id:9438184000001;records_count:75000;schema_version:1;level:2;;column_size:7198464;index_size:0;meta:(()););; 2025-06-25T15:05:17.147219Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: event_type=NKikimr::NBlobCache::TEvBlobCache::TEvReadBlobRangeResult;event=on_execution;consumer=GENERAL_COMPACTION;task_id=cd205f76-51d511f0-902fbf87-74e3829;script=FULL_PORTIONS_FETCHING::GENERAL_COMPACTION;event=on_finished;consumer=GENERAL_COMPACTION;task_id=cd205f76-51d511f0-902fbf87-74e3829;script=FULL_PORTIONS_FETCHING::GENERAL_COMPACTION;tablet_id=9437184;parent_id=[1:5585:7572];task_id=cd205f76-51d511f0-902fbf87-74e3829;task_class=CS::GENERAL;fline=general_compaction.cpp:140;event=blobs_created;appended=1;switched=2; 2025-06-25T15:05:17.149274Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:5585:7572];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=columnshard__write_index.cpp:52;event=TEvWriteIndex;count=1; 2025-06-25T15:05:17.151872Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:5585:7572];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=columnshard__write_index.cpp:59;event=TTxWriteDraft; 2025-06-25T15:05:17.282927Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: fline=tx_draft.cpp:16;event=draft_completed; 2025-06-25T15:05:17.283007Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: fline=write_actor.cpp:24;event=actor_created;tablet_id=9437184;debug=size=7198464;count=779;actions=__MEMORY,__DEFAULT,;waiting=2;; 2025-06-25T15:05:17.658558Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: WriteIndex at tablet 9437184 2025-06-25T15:05:17.658693Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:5585:7572];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=common_level.h:121;from=0,0,0,0,;to=74999,74999,74999,74999,; 2025-06-25T15:05:17.658752Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:5585:7572];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=common_level.h:141;itFrom=1;itTo=1;raw=7069450;count=1;packed=7200040; 2025-06-25T15:05:17.658836Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:5585:7572];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=constructor_meta.cpp:51;memory_size=86;data_size=60;sum=88752;count=1743; 2025-06-25T15:05:17.658990Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:5585:7572];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=constructor_meta.cpp:71;memory_size=25134;data_size=25124;sum=2348976;count=1744;size_of_meta=136; 2025-06-25T15:05:17.659060Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:5585:7572];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=constructor_portion.cpp:40;memory_size=25206;data_size=25196;sum=2411760;count=872;size_of_portion=208; 2025-06-25T15:05:17.659796Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxWriteIndex[2] (CS::GENERAL) apply at tablet 9437184 2025-06-25T15:05:17.751365Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager on execute at tablet 9437184 Save Batch GenStep: 4:1 Blob count: 633 2025-06-25T15:05:17.754157Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Index: tables 1 inserted {blob_bytes=0;raw_bytes=0;count=0;records=0} compacted {blob_bytes=0;raw_bytes=0;count=0;records=0} s-compacted {blob_bytes=28824008;raw_bytes=28296800;count=4;records=300200} inactive {blob_bytes=112525736;raw_bytes=109396450;count=217;records=1275200} evicted {blob_bytes=0;raw_bytes=0;count=0;records=0} at tablet 9437184 2025-06-25T15:05:17.969725Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=cd205f76-51d511f0-902fbf87-74e3829;fline=abstract.cpp:53;event=WriteIndexComplete;type=CS::GENERAL;success=1; 2025-06-25T15:05:17.969780Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=cd205f76-51d511f0-902fbf87-74e3829;fline=with_appended.cpp:65;portions=222,;task_id=cd205f76-51d511f0-902fbf87-74e3829; 2025-06-25T15:05:17.970257Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=cd205f76-51d511f0-902fbf87-74e3829;fline=manager.cpp:15;event=unlock;process_id=CS::GENERAL::cd205f76-51d511f0-902fbf87-74e3829; 2025-06-25T15:05:17.970309Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=cd205f76-51d511f0-902fbf87-74e3829;fline=granule.cpp:97;event=OnCompactionFinished;info=(granule:9438184000001;path_id:9438184000001;size:21623968;portions_count:222;); 2025-06-25T15:05:17.970341Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=cd205f76-51d511f0-902fbf87-74e3829;tablet_id=9437184;fline=columnshard_impl.cpp:442;event=EnqueueBackgroundActivities;periodic=0; 2025-06-25T15:05:17.970391Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=cd205f76-51d511f0-902fbf87-74e3829;tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=9; 2025-06-25T15:05:17.970435Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=cd205f76-51d511f0-902fbf87-74e3829;tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750863573815;tx_id=18446744073709551615;;current_snapshot_ts=1750863871996; 2025-06-25T15:05:17.970466Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=cd205f76-51d511f0-902fbf87-74e3829;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=9;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-25T15:05:17.970501Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=cd205f76-51d511f0-902fbf87-74e3829;tablet_id=9437184;fline=columnshard_impl.cpp:791;background=cleanup;skip_reason=no_changes; 2025-06-25T15:05:17.970527Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=cd205f76-51d511f0-902fbf87-74e3829;tablet_id=9437184;fline=columnshard_impl.cpp:820;background=cleanup;skip_reason=no_changes; 2025-06-25T15:05:17.970579Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=cd205f76-51d511f0-902fbf87-74e3829;tablet_id=9437184;queue=ttl;external_count=0;fline=granule.cpp:168;event=skip_actualization;waiting=0.895000s; 2025-06-25T15:05:17.970615Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=cd205f76-51d511f0-902fbf87-74e3829;tablet_id=9437184;fline=columnshard_impl.cpp:749;background=ttl;skip_reason=no_changes; 2025-06-25T15:05:17.970737Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Save Batch GenStep: 4:1 Blob count: 633 >> TColumnShardTestReadWrite::ReadGroupBy ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::CompactionInGranule_PKTimestamp [GOOD] Test command err: 2025-06-25T15:04:29.717996Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:99;event=initialize_shard;step=OnActivateExecutor; 2025-06-25T15:04:29.739529Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:117;event=initialize_shard;step=initialize_tiring_finished; 2025-06-25T15:04:29.739705Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-25T15:04:29.745369Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T15:04:29.745517Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T15:04:29.745683Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T15:04:29.745800Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T15:04:29.745862Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T15:04:29.745930Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T15:04:29.746007Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T15:04:29.746072Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T15:04:29.746145Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T15:04:29.746205Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T15:04:29.746268Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T15:04:29.762750Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-25T15:04:29.762936Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-25T15:04:29.762986Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-25T15:04:29.763198Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:04:29.763406Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-25T15:04:29.763470Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-25T15:04:29.763505Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-25T15:04:29.763594Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-25T15:04:29.763636Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-25T15:04:29.763661Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-25T15:04:29.763685Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-25T15:04:29.763791Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:04:29.763835Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-25T15:04:29.763881Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-25T15:04:29.763903Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-25T15:04:29.763961Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-25T15:04:29.763992Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-25T15:04:29.764020Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-25T15:04:29.764034Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-25T15:04:29.764059Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-25T15:04:29.764078Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-25T15:04:29.764100Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-25T15:04:29.764231Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-25T15:04:29.764255Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-25T15:04:29.764272Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-25T15:04:29.764397Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-25T15:04:29.764429Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-25T15:04:29.764447Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-25T15:04:29.764550Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-25T15:04:29.764576Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-25T15:04:29.764591Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-25T15:04:29.764650Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-25T15:04:29.764704Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-25T15:04:29.764729Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-25T15:04:29.764747Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-25T15:04:29.764882Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=30; 2025-06-25T15:04:29.764947Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=35; 2025-06-25T15:04:29.765014Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=30; 2025-06-25T15:04:29.765075Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=33; 2025-06-25T15:04:29.765128Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-25T15:04:29.765186Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-25T15:04:29.765212Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-25T15:04:29.765245Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: table ... d:9;chunk_idx:46;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:47;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:48;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:49;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:50;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:51;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:52;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:53;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:54;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:55;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:56;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:57;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:58;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:59;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:60;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:61;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:62;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:63;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:64;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:65;blob_range:[NO_BLOB:0:9240];;column_id:10;chunk_idx:0;blob_range:[NO_BLOB:0:7272];;column_id:10;chunk_idx:1;blob_range:[NO_BLOB:0:8384];;column_id:10;chunk_idx:2;blob_range:[NO_BLOB:0:8384];;column_id:10;chunk_idx:3;blob_range:[NO_BLOB:0:8384];;column_id:10;chunk_idx:4;blob_range:[NO_BLOB:0:8384];;column_id:10;chunk_idx:5;blob_range:[NO_BLOB:0:8384];;column_id:10;chunk_idx:6;blob_range:[NO_BLOB:0:8384];;column_id:10;chunk_idx:7;blob_range:[NO_BLOB:0:8384];;column_id:10;chunk_idx:8;blob_range:[NO_BLOB:0:8384];;column_id:10;chunk_idx:9;blob_range:[NO_BLOB:0:8656];;column_id:10;chunk_idx:10;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:11;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:12;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:13;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:14;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:15;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:16;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:17;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:18;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:19;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:20;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:21;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:22;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:23;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:24;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:25;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:26;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:27;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:28;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:29;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:30;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:31;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:32;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:33;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:34;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:35;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:36;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:37;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:38;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:39;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:40;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:41;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:42;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:43;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:44;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:45;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:46;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:47;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:48;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:49;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:50;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:51;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:52;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:53;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:54;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:55;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:56;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:57;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:58;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:59;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:60;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:61;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:62;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:63;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:64;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:65;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:66;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:67;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:68;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:69;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:70;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:71;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:72;blob_range:[NO_BLOB:0:9424];;;;switched=(portion_id:220;path_id:9438184000001;records_count:75000;schema_version:1;level:1;;column_size:7504840;index_size:0;meta:(()););(portion_id:218;path_id:9438184000001;records_count:75000;schema_version:1;level:2;;column_size:7503120;index_size:0;meta:(()););(portion_id:221;path_id:9438184000001;records_count:75000;schema_version:1;level:1;;column_size:7504840;index_size:0;meta:(()););; 2025-06-25T15:05:17.828092Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: event_type=NKikimr::NBlobCache::TEvBlobCache::TEvReadBlobRangeResult;event=on_execution;consumer=GENERAL_COMPACTION;task_id=cc985680-51d511f0-beb239be-1570a732;script=FULL_PORTIONS_FETCHING::GENERAL_COMPACTION;event=on_finished;consumer=GENERAL_COMPACTION;task_id=cc985680-51d511f0-beb239be-1570a732;script=FULL_PORTIONS_FETCHING::GENERAL_COMPACTION;tablet_id=9437184;parent_id=[1:5973:7960];task_id=cc985680-51d511f0-beb239be-1570a732;task_class=CS::GENERAL;fline=general_compaction.cpp:140;event=blobs_created;appended=1;switched=3; 2025-06-25T15:05:17.829742Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:5973:7960];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=columnshard__write_index.cpp:52;event=TEvWriteIndex;count=1; 2025-06-25T15:05:17.832569Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:5973:7960];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=columnshard__write_index.cpp:59;event=TTxWriteDraft; 2025-06-25T15:05:17.914497Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: fline=tx_draft.cpp:16;event=draft_completed; 2025-06-25T15:05:17.914569Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: fline=write_actor.cpp:24;event=actor_created;tablet_id=9437184;debug=size=7503120;count=812;actions=__MEMORY,__DEFAULT,;waiting=2;; 2025-06-25T15:05:18.332029Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: WriteIndex at tablet 9437184 2025-06-25T15:05:18.332134Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:5973:7960];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=common_level.h:121;from=0,0,0,0,;to=74999,74999,74999,74999,; 2025-06-25T15:05:18.332181Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:5973:7960];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=common_level.h:141;itFrom=1;itTo=1;raw=7369450;count=1;packed=7504840; 2025-06-25T15:05:18.332240Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:5973:7960];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=constructor_meta.cpp:51;memory_size=94;data_size=68;sum=95658;count=1749; 2025-06-25T15:05:18.332297Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:5973:7960];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=constructor_meta.cpp:71;memory_size=26198;data_size=26188;sum=2562418;count=1750;size_of_meta=136; 2025-06-25T15:05:18.332360Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:5973:7960];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=constructor_portion.cpp:40;memory_size=26270;data_size=26260;sum=2625418;count=875;size_of_portion=208; 2025-06-25T15:05:18.332824Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxWriteIndex[2] (CS::GENERAL) apply at tablet 9437184 2025-06-25T15:05:18.408790Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager on execute at tablet 9437184 Save Batch GenStep: 4:1 Blob count: 666 2025-06-25T15:05:18.412569Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Index: tables 1 inserted {blob_bytes=0;raw_bytes=0;count=0;records=0} compacted {blob_bytes=0;raw_bytes=0;count=0;records=0} s-compacted {blob_bytes=37548672;raw_bytes=36867050;count=5;records=375200} inactive {blob_bytes=110272840;raw_bytes=107127800;count=216;records=1200200} evicted {blob_bytes=0;raw_bytes=0;count=0;records=0} at tablet 9437184 2025-06-25T15:05:18.693797Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=cc985680-51d511f0-beb239be-1570a732;fline=abstract.cpp:53;event=WriteIndexComplete;type=CS::GENERAL;success=1; 2025-06-25T15:05:18.693852Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=cc985680-51d511f0-beb239be-1570a732;fline=with_appended.cpp:65;portions=222,;task_id=cc985680-51d511f0-beb239be-1570a732; 2025-06-25T15:05:18.694332Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=cc985680-51d511f0-beb239be-1570a732;fline=manager.cpp:15;event=unlock;process_id=CS::GENERAL::cc985680-51d511f0-beb239be-1570a732; 2025-06-25T15:05:18.694388Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=cc985680-51d511f0-beb239be-1570a732;fline=granule.cpp:97;event=OnCompactionFinished;info=(granule:9438184000001;path_id:9438184000001;size:22538992;portions_count:222;); 2025-06-25T15:05:18.694419Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=cc985680-51d511f0-beb239be-1570a732;tablet_id=9437184;fline=columnshard_impl.cpp:442;event=EnqueueBackgroundActivities;periodic=0; 2025-06-25T15:05:18.694473Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=cc985680-51d511f0-beb239be-1570a732;tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=9; 2025-06-25T15:05:18.694522Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=cc985680-51d511f0-beb239be-1570a732;tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750863573235;tx_id=18446744073709551615;;current_snapshot_ts=1750863871305; 2025-06-25T15:05:18.694556Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=cc985680-51d511f0-beb239be-1570a732;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=9;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-25T15:05:18.694592Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=cc985680-51d511f0-beb239be-1570a732;tablet_id=9437184;fline=columnshard_impl.cpp:791;background=cleanup;skip_reason=no_changes; 2025-06-25T15:05:18.694621Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=cc985680-51d511f0-beb239be-1570a732;tablet_id=9437184;fline=columnshard_impl.cpp:820;background=cleanup;skip_reason=no_changes; 2025-06-25T15:05:18.694677Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=cc985680-51d511f0-beb239be-1570a732;tablet_id=9437184;queue=ttl;external_count=0;fline=granule.cpp:168;event=skip_actualization;waiting=0.873000s; 2025-06-25T15:05:18.694712Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=cc985680-51d511f0-beb239be-1570a732;tablet_id=9437184;fline=columnshard_impl.cpp:749;background=ttl;skip_reason=no_changes; 2025-06-25T15:05:18.694834Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Save Batch GenStep: 4:1 Blob count: 666 >> TColumnShardTestReadWrite::CompactionSplitGranule_PKInt64 >> TColumnShardTestReadWrite::WriteOverload+InStore >> ReadIteratorExternalBlobs::ExtBlobsWithDeletesInTheEnd [GOOD] >> ReadIteratorExternalBlobs::ExtBlobsWithDeletesInTheMiddle >> TColumnShardTestReadWrite::CompactionInGranule_PKUInt32 [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::CompactionInGranule_PKUInt32 [GOOD] Test command err: 2025-06-25T15:04:38.264434Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:99;event=initialize_shard;step=OnActivateExecutor; 2025-06-25T15:04:38.283067Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:117;event=initialize_shard;step=initialize_tiring_finished; 2025-06-25T15:04:38.283294Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-25T15:04:38.288742Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T15:04:38.288924Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T15:04:38.289121Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T15:04:38.289199Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T15:04:38.289269Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T15:04:38.289326Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T15:04:38.289405Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T15:04:38.289482Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T15:04:38.289581Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T15:04:38.289651Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T15:04:38.289716Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T15:04:38.306673Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-25T15:04:38.306808Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-25T15:04:38.306843Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-25T15:04:38.306992Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:04:38.307153Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-25T15:04:38.307210Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-25T15:04:38.307240Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-25T15:04:38.307313Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-25T15:04:38.307353Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-25T15:04:38.307379Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-25T15:04:38.307415Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-25T15:04:38.307529Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:04:38.307579Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-25T15:04:38.307605Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-25T15:04:38.307625Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-25T15:04:38.307700Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-25T15:04:38.307741Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-25T15:04:38.307774Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-25T15:04:38.307794Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-25T15:04:38.307824Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-25T15:04:38.307850Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-25T15:04:38.307876Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-25T15:04:38.308018Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-25T15:04:38.308043Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-25T15:04:38.308061Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-25T15:04:38.308188Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-25T15:04:38.308217Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-25T15:04:38.308243Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-25T15:04:38.308364Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-25T15:04:38.308415Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-25T15:04:38.308443Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-25T15:04:38.308517Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-25T15:04:38.308570Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-25T15:04:38.308594Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-25T15:04:38.308628Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-25T15:04:38.308807Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=34; 2025-06-25T15:04:38.308886Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=36; 2025-06-25T15:04:38.308967Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=51; 2025-06-25T15:04:38.309034Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=35; 2025-06-25T15:04:38.309108Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-25T15:04:38.309160Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-25T15:04:38.309194Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-25T15:04:38.309230Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: table ... LOB:0:9240];;column_id:9;chunk_idx:44;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:45;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:46;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:47;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:48;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:49;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:50;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:51;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:52;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:53;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:54;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:55;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:56;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:57;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:58;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:59;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:60;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:61;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:62;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:63;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:64;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:65;blob_range:[NO_BLOB:0:9240];;column_id:10;chunk_idx:0;blob_range:[NO_BLOB:0:7272];;column_id:10;chunk_idx:1;blob_range:[NO_BLOB:0:8384];;column_id:10;chunk_idx:2;blob_range:[NO_BLOB:0:8384];;column_id:10;chunk_idx:3;blob_range:[NO_BLOB:0:8384];;column_id:10;chunk_idx:4;blob_range:[NO_BLOB:0:8384];;column_id:10;chunk_idx:5;blob_range:[NO_BLOB:0:8384];;column_id:10;chunk_idx:6;blob_range:[NO_BLOB:0:8384];;column_id:10;chunk_idx:7;blob_range:[NO_BLOB:0:8384];;column_id:10;chunk_idx:8;blob_range:[NO_BLOB:0:8384];;column_id:10;chunk_idx:9;blob_range:[NO_BLOB:0:8656];;column_id:10;chunk_idx:10;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:11;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:12;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:13;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:14;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:15;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:16;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:17;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:18;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:19;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:20;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:21;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:22;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:23;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:24;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:25;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:26;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:27;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:28;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:29;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:30;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:31;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:32;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:33;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:34;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:35;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:36;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:37;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:38;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:39;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:40;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:41;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:42;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:43;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:44;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:45;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:46;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:47;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:48;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:49;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:50;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:51;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:52;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:53;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:54;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:55;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:56;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:57;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:58;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:59;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:60;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:61;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:62;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:63;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:64;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:65;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:66;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:67;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:68;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:69;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:70;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:71;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:72;blob_range:[NO_BLOB:0:9424];;;;switched=(portion_id:221;path_id:9438184000001;records_count:75000;schema_version:1;level:1;;column_size:7200040;index_size:0;meta:(()););(portion_id:220;path_id:9438184000001;records_count:75000;schema_version:1;level:2;;column_size:7198464;index_size:0;meta:(()););; 2025-06-25T15:05:24.054621Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: event_type=NKikimr::NBlobCache::TEvBlobCache::TEvReadBlobRangeResult;event=on_execution;consumer=GENERAL_COMPACTION;task_id=d139fbc6-51d511f0-9837fa7b-57dc7521;script=FULL_PORTIONS_FETCHING::GENERAL_COMPACTION;event=on_finished;consumer=GENERAL_COMPACTION;task_id=d139fbc6-51d511f0-9837fa7b-57dc7521;script=FULL_PORTIONS_FETCHING::GENERAL_COMPACTION;tablet_id=9437184;parent_id=[1:5585:7572];task_id=d139fbc6-51d511f0-9837fa7b-57dc7521;task_class=CS::GENERAL;fline=general_compaction.cpp:140;event=blobs_created;appended=1;switched=2; 2025-06-25T15:05:24.055902Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:5585:7572];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=columnshard__write_index.cpp:52;event=TEvWriteIndex;count=1; 2025-06-25T15:05:24.058521Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:5585:7572];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=columnshard__write_index.cpp:59;event=TTxWriteDraft; 2025-06-25T15:05:24.151992Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: fline=tx_draft.cpp:16;event=draft_completed; 2025-06-25T15:05:24.152076Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: fline=write_actor.cpp:24;event=actor_created;tablet_id=9437184;debug=size=7198464;count=779;actions=__MEMORY,__DEFAULT,;waiting=2;; 2025-06-25T15:05:24.529430Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: WriteIndex at tablet 9437184 2025-06-25T15:05:24.529529Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:5585:7572];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=common_level.h:121;from=0,0,0,0,;to=74999,74999,74999,74999,; 2025-06-25T15:05:24.529575Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:5585:7572];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=common_level.h:141;itFrom=1;itTo=1;raw=7069450;count=1;packed=7200040; 2025-06-25T15:05:24.529628Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:5585:7572];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=constructor_meta.cpp:51;memory_size=86;data_size=60;sum=88752;count=1743; 2025-06-25T15:05:24.529707Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:5585:7572];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=constructor_meta.cpp:71;memory_size=25134;data_size=25124;sum=2348976;count=1744;size_of_meta=136; 2025-06-25T15:05:24.529759Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:5585:7572];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=constructor_portion.cpp:40;memory_size=25206;data_size=25196;sum=2411760;count=872;size_of_portion=208; 2025-06-25T15:05:24.530214Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxWriteIndex[2] (CS::GENERAL) apply at tablet 9437184 2025-06-25T15:05:24.585732Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager on execute at tablet 9437184 Save Batch GenStep: 4:1 Blob count: 633 2025-06-25T15:05:24.588895Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Index: tables 1 inserted {blob_bytes=0;raw_bytes=0;count=0;records=0} compacted {blob_bytes=0;raw_bytes=0;count=0;records=0} s-compacted {blob_bytes=28824008;raw_bytes=28296800;count=4;records=300200} inactive {blob_bytes=112525736;raw_bytes=109396450;count=217;records=1275200} evicted {blob_bytes=0;raw_bytes=0;count=0;records=0} at tablet 9437184 2025-06-25T15:05:24.837452Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=d139fbc6-51d511f0-9837fa7b-57dc7521;fline=abstract.cpp:53;event=WriteIndexComplete;type=CS::GENERAL;success=1; 2025-06-25T15:05:24.837511Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=d139fbc6-51d511f0-9837fa7b-57dc7521;fline=with_appended.cpp:65;portions=222,;task_id=d139fbc6-51d511f0-9837fa7b-57dc7521; 2025-06-25T15:05:24.837963Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=d139fbc6-51d511f0-9837fa7b-57dc7521;fline=manager.cpp:15;event=unlock;process_id=CS::GENERAL::d139fbc6-51d511f0-9837fa7b-57dc7521; 2025-06-25T15:05:24.838021Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=d139fbc6-51d511f0-9837fa7b-57dc7521;fline=granule.cpp:97;event=OnCompactionFinished;info=(granule:9438184000001;path_id:9438184000001;size:21623968;portions_count:222;); 2025-06-25T15:05:24.838055Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=d139fbc6-51d511f0-9837fa7b-57dc7521;tablet_id=9437184;fline=columnshard_impl.cpp:442;event=EnqueueBackgroundActivities;periodic=0; 2025-06-25T15:05:24.838109Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=d139fbc6-51d511f0-9837fa7b-57dc7521;tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=9; 2025-06-25T15:05:24.838161Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=d139fbc6-51d511f0-9837fa7b-57dc7521;tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750863581668;tx_id=18446744073709551615;;current_snapshot_ts=1750863879849; 2025-06-25T15:05:24.838194Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=d139fbc6-51d511f0-9837fa7b-57dc7521;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=9;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-25T15:05:24.838229Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=d139fbc6-51d511f0-9837fa7b-57dc7521;tablet_id=9437184;fline=columnshard_impl.cpp:791;background=cleanup;skip_reason=no_changes; 2025-06-25T15:05:24.838257Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=d139fbc6-51d511f0-9837fa7b-57dc7521;tablet_id=9437184;fline=columnshard_impl.cpp:820;background=cleanup;skip_reason=no_changes; 2025-06-25T15:05:24.838313Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=d139fbc6-51d511f0-9837fa7b-57dc7521;tablet_id=9437184;queue=ttl;external_count=0;fline=granule.cpp:168;event=skip_actualization;waiting=0.895000s; 2025-06-25T15:05:24.838351Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=d139fbc6-51d511f0-9837fa7b-57dc7521;tablet_id=9437184;fline=columnshard_impl.cpp:749;background=ttl;skip_reason=no_changes; 2025-06-25T15:05:24.838484Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Save Batch GenStep: 4:1 Blob count: 633 >> TColumnShardTestReadWrite::CompactionGC [GOOD] >> TColumnShardTestReadWrite::WriteStandalone ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::CompactionGC [GOOD] Test command err: 2025-06-25T15:04:29.756628Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:99;event=initialize_shard;step=OnActivateExecutor; 2025-06-25T15:04:29.775519Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:117;event=initialize_shard;step=initialize_tiring_finished; 2025-06-25T15:04:29.775679Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-25T15:04:29.780949Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T15:04:29.781080Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T15:04:29.781245Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T15:04:29.781323Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T15:04:29.781383Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T15:04:29.781438Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T15:04:29.781510Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T15:04:29.781580Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T15:04:29.781673Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T15:04:29.781799Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T15:04:29.781927Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T15:04:29.800151Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-25T15:04:29.800297Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-25T15:04:29.800360Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-25T15:04:29.800560Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:04:29.800721Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-25T15:04:29.800776Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-25T15:04:29.800803Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-25T15:04:29.800864Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-25T15:04:29.800902Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-25T15:04:29.800936Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-25T15:04:29.800964Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-25T15:04:29.801070Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:04:29.801113Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-25T15:04:29.801134Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-25T15:04:29.801149Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-25T15:04:29.801210Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-25T15:04:29.801243Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-25T15:04:29.801263Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-25T15:04:29.801277Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-25T15:04:29.801301Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-25T15:04:29.801319Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-25T15:04:29.801337Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-25T15:04:29.801454Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-25T15:04:29.801477Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-25T15:04:29.801497Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-25T15:04:29.801590Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-25T15:04:29.801622Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-25T15:04:29.801652Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-25T15:04:29.801755Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-25T15:04:29.801790Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-25T15:04:29.801810Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-25T15:04:29.801852Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-25T15:04:29.801885Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-25T15:04:29.801907Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-25T15:04:29.801924Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-25T15:04:29.802042Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=32; 2025-06-25T15:04:29.802110Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=31; 2025-06-25T15:04:29.802163Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=26; 2025-06-25T15:04:29.802220Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=26; 2025-06-25T15:04:29.802275Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-25T15:04:29.802317Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-25T15:04:29.802351Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-25T15:04:29.802392Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: table ... -83d30f2a-9150c1fa;fline=abstract.cpp:53;event=WriteIndexComplete;type=CS::CLEANUP::PORTIONS;success=1; 2025-06-25T15:05:06.115489Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=c73b46e8-51d511f0-83d30f2a-9150c1fa;fline=manager.cpp:15;event=unlock;process_id=CS::CLEANUP::PORTIONS::PORTIONS_DROP::c73b46e8-51d511f0-83d30f2a-9150c1fa; 2025-06-25T15:05:06.115588Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=c73b46e8-51d511f0-83d30f2a-9150c1fa;tablet_id=9437184;fline=columnshard_impl.cpp:442;event=EnqueueBackgroundActivities;periodic=0; 2025-06-25T15:05:06.115657Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;task_id=c73b46e8-51d511f0-83d30f2a-9150c1fa;tablet_id=9437184;fline=columnshard_impl.cpp:481;event=skip_compaction;reason=disabled; 2025-06-25T15:05:06.115707Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=c73b46e8-51d511f0-83d30f2a-9150c1fa;tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=0; 2025-06-25T15:05:06.115772Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=c73b46e8-51d511f0-83d30f2a-9150c1fa;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=0;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-25T15:05:06.115827Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=c73b46e8-51d511f0-83d30f2a-9150c1fa;tablet_id=9437184;fline=columnshard_impl.cpp:791;background=cleanup;skip_reason=no_changes; 2025-06-25T15:05:06.115868Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=c73b46e8-51d511f0-83d30f2a-9150c1fa;tablet_id=9437184;fline=columnshard_impl.cpp:820;background=cleanup;skip_reason=no_changes; 2025-06-25T15:05:06.115950Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=c73b46e8-51d511f0-83d30f2a-9150c1fa;tablet_id=9437184;queue=ttl;external_count=0;fline=granule.cpp:168;event=skip_actualization;waiting=0.500000s; 2025-06-25T15:05:06.116008Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=c73b46e8-51d511f0-83d30f2a-9150c1fa;tablet_id=9437184;fline=columnshard_impl.cpp:749;background=ttl;skip_reason=no_changes; 2025-06-25T15:05:06.116226Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:18:2:0:6043488:0] 2025-06-25T15:05:06.116296Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:45:2:0:6171112:0] 2025-06-25T15:05:06.116351Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:46:3:0:6043488:0] 2025-06-25T15:05:06.116383Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:38:4:0:6043488:0] 2025-06-25T15:05:06.116413Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:24:2:0:6171112:0] 2025-06-25T15:05:06.116442Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:21:2:0:6171112:0] 2025-06-25T15:05:06.116463Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:16:3:0:6171112:0] 2025-06-25T15:05:06.116483Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:8:4:0:6171112:0] 2025-06-25T15:05:06.116515Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:43:3:0:6171112:0] 2025-06-25T15:05:06.116549Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:13:3:0:6043488:0] 2025-06-25T15:05:06.116571Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:55:3:0:6171112:0] 2025-06-25T15:05:06.116597Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:36:2:0:6171112:0] 2025-06-25T15:05:06.116623Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:22:3:0:6043488:0] 2025-06-25T15:05:06.116644Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:27:2:0:6043488:0] 2025-06-25T15:05:06.116664Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:23:4:0:6043488:0] 2025-06-25T15:05:06.116683Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:51:2:0:6043488:0] 2025-06-25T15:05:06.116717Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:32:4:0:6043488:0] 2025-06-25T15:05:06.116744Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:30:2:0:6043488:0] 2025-06-25T15:05:06.116768Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:28:3:0:6043488:0] 2025-06-25T15:05:06.116788Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:34:3:0:1792:0] 2025-06-25T15:05:06.116809Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:2:4:0:6171112:0] 2025-06-25T15:05:06.116829Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:44:4:0:6043488:0] 2025-06-25T15:05:06.116848Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:49:3:0:6043488:0] 2025-06-25T15:05:06.116877Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:48:2:0:6171112:0] 2025-06-25T15:05:06.116899Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:52:3:0:6043488:0] 2025-06-25T15:05:06.116929Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:10:3:0:6043488:0] 2025-06-25T15:05:06.116951Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:40:3:0:6171112:0] 2025-06-25T15:05:06.116970Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:41:4:0:6043488:0] 2025-06-25T15:05:06.116989Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:12:2:0:6043488:0] 2025-06-25T15:05:06.117010Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:1:3:0:6171112:0] 2025-06-25T15:05:06.117040Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:7:3:0:6043488:0] 2025-06-25T15:05:06.117072Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:9:2:0:6043488:0] 2025-06-25T15:05:06.117108Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:35:4:0:6043488:0] 2025-06-25T15:05:06.117140Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:42:2:0:6043488:0] 2025-06-25T15:05:06.117184Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:19:3:0:6171112:0] 2025-06-25T15:05:06.117216Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:54:2:0:6043488:0] 2025-06-25T15:05:06.117245Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:15:2:0:6043488:0] 2025-06-25T15:05:06.117276Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:57:2:0:6043488:0] 2025-06-25T15:05:06.117299Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:20:4:0:6043488:0] 2025-06-25T15:05:06.117322Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:29:4:0:6171112:0] 2025-06-25T15:05:06.117344Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:26:4:0:6171112:0] 2025-06-25T15:05:06.117366Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:11:4:0:6171112:0] 2025-06-25T15:05:06.117387Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:31:3:0:6171112:0] 2025-06-25T15:05:06.117409Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:14:4:0:6171112:0] 2025-06-25T15:05:06.117429Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:53:4:0:6171112:0] 2025-06-25T15:05:06.117449Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:50:4:0:6171112:0] 2025-06-25T15:05:06.117483Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:33:2:0:6043488:0] 2025-06-25T15:05:06.117507Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:56:4:0:6043488:0] 2025-06-25T15:05:06.117528Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:39:2:0:6043488:0] 2025-06-25T15:05:06.117547Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:6:2:0:6171112:0] 2025-06-25T15:05:06.117582Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:47:4:0:6043488:0] 2025-06-25T15:05:06.117615Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:37:3:0:6171112:0] 2025-06-25T15:05:06.117636Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:25:3:0:6043488:0] 2025-06-25T15:05:06.117657Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:17:4:0:6043488:0] 2025-06-25T15:05:06.117681Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:4:3:0:6171112:0] GC for channel 3 deletes blobs: WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 Compactions happened: 34 Cleanups happened: 1 Old portions: 1 2 4 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 58 Cleaned up portions: 1 2 4 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 58 >> TColumnShardTestReadWrite::WriteOverload+InStore [GOOD] >> TColumnShardTestReadWrite::CompactionGCFailingBs [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::WriteOverload+InStore [GOOD] Test command err: 2025-06-25T15:05:21.143967Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:99;event=initialize_shard;step=OnActivateExecutor; 2025-06-25T15:05:21.160197Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:117;event=initialize_shard;step=initialize_tiring_finished; 2025-06-25T15:05:21.160371Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-25T15:05:21.165088Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T15:05:21.165228Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T15:05:21.165378Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T15:05:21.165439Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T15:05:21.165523Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T15:05:21.165606Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T15:05:21.165675Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T15:05:21.165744Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T15:05:21.165803Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T15:05:21.165875Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T15:05:21.165935Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T15:05:21.181417Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-25T15:05:21.181533Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-25T15:05:21.181564Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-25T15:05:21.181713Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:05:21.181849Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-25T15:05:21.181908Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-25T15:05:21.181940Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-25T15:05:21.182001Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-25T15:05:21.182039Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-25T15:05:21.182063Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-25T15:05:21.182105Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-25T15:05:21.182202Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:05:21.182235Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-25T15:05:21.182266Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-25T15:05:21.182285Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-25T15:05:21.182349Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-25T15:05:21.182390Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-25T15:05:21.182424Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-25T15:05:21.182441Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-25T15:05:21.182467Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-25T15:05:21.182486Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-25T15:05:21.182504Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-25T15:05:21.182637Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-25T15:05:21.182665Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-25T15:05:21.182680Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-25T15:05:21.182789Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-25T15:05:21.182839Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-25T15:05:21.182858Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-25T15:05:21.182955Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-25T15:05:21.182979Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-25T15:05:21.182998Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-25T15:05:21.183050Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-25T15:05:21.183099Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-25T15:05:21.183126Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-25T15:05:21.183145Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-25T15:05:21.183268Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=34; 2025-06-25T15:05:21.183335Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=34; 2025-06-25T15:05:21.183388Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=26; 2025-06-25T15:05:21.183442Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=25; 2025-06-25T15:05:21.183516Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-25T15:05:21.183566Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-25T15:05:21.183598Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-25T15:05:21.183641Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: table ... e::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=21;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=constructor_meta.cpp:71;memory_size=246;data_size=236;sum=4428;count=36;size_of_meta=136; 2025-06-25T15:05:27.270688Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=21;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=constructor_portion.cpp:40;memory_size=318;data_size=308;sum=5724;count=18;size_of_portion=208; 2025-06-25T15:05:27.271530Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager on execute at tablet 9437184 Save Batch GenStep: 2:18 Blob count: 1 2025-06-25T15:05:27.271624Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=21;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=manager.h:156;event=add_by_insert_id;id=19;operation_id=18; 2025-06-25T15:05:27.283177Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Save Batch GenStep: 2:18 Blob count: 1 2025-06-25T15:05:27.284778Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;fline=columnshard__write.cpp:105;writing_size=6330728;event=data_write_finished;writing_id=d2f6b9f4-51d511f0-8d4f5ea7-f979dfee; 2025-06-25T15:05:27.284939Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=22;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=constructor_meta.cpp:51;memory_size=94;data_size=68;sum=1786;count=37; 2025-06-25T15:05:27.285014Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=22;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=constructor_meta.cpp:71;memory_size=246;data_size=236;sum=4674;count=38;size_of_meta=136; 2025-06-25T15:05:27.285072Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=22;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=constructor_portion.cpp:40;memory_size=318;data_size=308;sum=6042;count=19;size_of_portion=208; 2025-06-25T15:05:27.285885Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager on execute at tablet 9437184 Save Batch GenStep: 2:19 Blob count: 1 2025-06-25T15:05:27.285984Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=22;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=manager.h:156;event=add_by_insert_id;id=20;operation_id=19; 2025-06-25T15:05:27.297862Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Save Batch GenStep: 2:19 Blob count: 1 2025-06-25T15:05:27.307028Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;fline=columnshard__write.cpp:105;writing_size=6330728;event=data_write_finished;writing_id=d3148a06-51d511f0-93d9a432-59dc1082; 2025-06-25T15:05:27.307233Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=23;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=constructor_meta.cpp:51;memory_size=94;data_size=68;sum=1880;count=39; 2025-06-25T15:05:27.307301Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=23;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=constructor_meta.cpp:71;memory_size=246;data_size=236;sum=4920;count=40;size_of_meta=136; 2025-06-25T15:05:27.307359Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=23;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=constructor_portion.cpp:40;memory_size=318;data_size=308;sum=6360;count=20;size_of_portion=208; 2025-06-25T15:05:27.308286Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager on execute at tablet 9437184 Save Batch GenStep: 2:20 Blob count: 1 2025-06-25T15:05:27.308402Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=23;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=manager.h:156;event=add_by_insert_id;id=21;operation_id=20; 2025-06-25T15:05:27.320172Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Save Batch GenStep: 2:20 Blob count: 1 2025-06-25T15:05:27.321851Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;fline=columnshard__write.cpp:105;writing_size=6330728;event=data_write_finished;writing_id=d3354250-51d511f0-a9324fa0-7dce2473; 2025-06-25T15:05:27.322014Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=24;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=constructor_meta.cpp:51;memory_size=94;data_size=68;sum=1974;count=41; 2025-06-25T15:05:27.322089Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=24;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=constructor_meta.cpp:71;memory_size=246;data_size=236;sum=5166;count=42;size_of_meta=136; 2025-06-25T15:05:27.322148Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=24;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=constructor_portion.cpp:40;memory_size=318;data_size=308;sum=6678;count=21;size_of_portion=208; 2025-06-25T15:05:27.322992Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager on execute at tablet 9437184 Save Batch GenStep: 2:21 Blob count: 1 2025-06-25T15:05:27.323090Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=24;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=manager.h:156;event=add_by_insert_id;id=22;operation_id=21; 2025-06-25T15:05:27.334409Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Save Batch GenStep: 2:21 Blob count: 1 2025-06-25T15:05:27.339953Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=9437184;event=TEvWrite;fline=manager.cpp:210;event=register_operation;operation_id=22;last=22; 2025-06-25T15:05:27.340011Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=9437184;event=TEvWrite;fline=write_queue.cpp:14;writing_size=6330728;operation_id=d3e573d2-51d511f0-9961783a-bbefc65d;in_flight=1;size_in_flight=6330728; 2025-06-25T15:05:27.533686Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=9437184;event=TEvWrite;scope=TBuildBatchesTask::DoExecute;tablet_id=9437184;parent_id=[1:128:2158];write_id=22;path_id={internal: 9438184000001, ss: 1};fline=write_actor.cpp:24;event=actor_created;tablet_id=9437184;debug=size=8246112;count=1;actions=__DEFAULT,;waiting=1;; 2025-06-25T15:05:27.570053Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;fline=columnshard__write.cpp:105;writing_size=6330728;event=data_write_finished;writing_id=d3e573d2-51d511f0-9961783a-bbefc65d; 2025-06-25T15:05:27.570240Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=25;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=constructor_meta.cpp:51;memory_size=94;data_size=68;sum=2068;count=43; 2025-06-25T15:05:27.570294Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=25;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=constructor_meta.cpp:71;memory_size=246;data_size=236;sum=5412;count=44;size_of_meta=136; 2025-06-25T15:05:27.570347Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=25;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=constructor_portion.cpp:40;memory_size=318;data_size=308;sum=6996;count=22;size_of_portion=208; 2025-06-25T15:05:27.571140Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager on execute at tablet 9437184 Save Batch GenStep: 2:22 Blob count: 1 2025-06-25T15:05:27.571225Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=25;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=manager.h:156;event=add_by_insert_id;id=23;operation_id=22; 2025-06-25T15:05:27.582647Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Save Batch GenStep: 2:22 Blob count: 1 >> TColumnShardTestReadWrite::CompactionInGranule_PKInt64 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::CompactionGCFailingBs [GOOD] Test command err: 2025-06-25T15:04:29.062322Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:99;event=initialize_shard;step=OnActivateExecutor; 2025-06-25T15:04:29.081837Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:117;event=initialize_shard;step=initialize_tiring_finished; 2025-06-25T15:04:29.082019Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-25T15:04:29.087071Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T15:04:29.087212Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T15:04:29.087379Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T15:04:29.087452Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T15:04:29.087517Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T15:04:29.087586Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T15:04:29.087665Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T15:04:29.087726Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T15:04:29.087786Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T15:04:29.087873Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T15:04:29.087935Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T15:04:29.104244Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-25T15:04:29.104360Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-25T15:04:29.104391Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-25T15:04:29.104529Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:04:29.104645Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-25T15:04:29.104724Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-25T15:04:29.104753Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-25T15:04:29.104806Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-25T15:04:29.104846Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-25T15:04:29.104870Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-25T15:04:29.104903Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-25T15:04:29.105029Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:04:29.105079Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-25T15:04:29.105107Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-25T15:04:29.105123Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-25T15:04:29.105180Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-25T15:04:29.105226Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-25T15:04:29.105251Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-25T15:04:29.105274Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-25T15:04:29.105313Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-25T15:04:29.105342Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-25T15:04:29.105364Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-25T15:04:29.105514Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-25T15:04:29.105552Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-25T15:04:29.105576Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-25T15:04:29.105718Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-25T15:04:29.105769Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-25T15:04:29.105800Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-25T15:04:29.105885Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-25T15:04:29.105925Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-25T15:04:29.105944Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-25T15:04:29.105991Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-25T15:04:29.106027Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-25T15:04:29.106050Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-25T15:04:29.106066Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-25T15:04:29.106211Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=37; 2025-06-25T15:04:29.106300Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=41; 2025-06-25T15:04:29.106358Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=30; 2025-06-25T15:04:29.106416Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=29; 2025-06-25T15:04:29.106484Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-25T15:04:29.106549Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-25T15:04:29.106573Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-25T15:04:29.106618Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: table ... 388e-51d511f0-9bfa4de4-309aba9;fline=abstract.cpp:53;event=WriteIndexComplete;type=CS::CLEANUP::PORTIONS;success=1; 2025-06-25T15:05:06.861340Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=c7ad388e-51d511f0-9bfa4de4-309aba9;fline=manager.cpp:15;event=unlock;process_id=CS::CLEANUP::PORTIONS::PORTIONS_DROP::c7ad388e-51d511f0-9bfa4de4-309aba9; 2025-06-25T15:05:06.861399Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=c7ad388e-51d511f0-9bfa4de4-309aba9;tablet_id=9437184;fline=columnshard_impl.cpp:442;event=EnqueueBackgroundActivities;periodic=0; 2025-06-25T15:05:06.861457Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;task_id=c7ad388e-51d511f0-9bfa4de4-309aba9;tablet_id=9437184;fline=columnshard_impl.cpp:481;event=skip_compaction;reason=disabled; 2025-06-25T15:05:06.861499Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=c7ad388e-51d511f0-9bfa4de4-309aba9;tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=0; 2025-06-25T15:05:06.861551Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=c7ad388e-51d511f0-9bfa4de4-309aba9;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=0;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-25T15:05:06.861594Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=c7ad388e-51d511f0-9bfa4de4-309aba9;tablet_id=9437184;fline=columnshard_impl.cpp:791;background=cleanup;skip_reason=no_changes; 2025-06-25T15:05:06.861629Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=c7ad388e-51d511f0-9bfa4de4-309aba9;tablet_id=9437184;fline=columnshard_impl.cpp:820;background=cleanup;skip_reason=no_changes; 2025-06-25T15:05:06.861684Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=c7ad388e-51d511f0-9bfa4de4-309aba9;tablet_id=9437184;queue=ttl;external_count=0;fline=granule.cpp:168;event=skip_actualization;waiting=0.500000s; 2025-06-25T15:05:06.861735Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=c7ad388e-51d511f0-9bfa4de4-309aba9;tablet_id=9437184;fline=columnshard_impl.cpp:749;background=ttl;skip_reason=no_changes; 2025-06-25T15:05:06.861894Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:3:8:4:0:6043488:0] 2025-06-25T15:05:06.861950Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:3:35:4:0:6171112:0] 2025-06-25T15:05:06.861978Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:3:55:3:0:6043488:0] 2025-06-25T15:05:06.862014Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:3:39:2:0:6043488:0] 2025-06-25T15:05:06.862034Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:3:50:4:0:6043488:0] 2025-06-25T15:05:06.862054Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:3:27:2:0:6171112:0] 2025-06-25T15:05:06.862077Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:3:18:2:0:6043488:0] 2025-06-25T15:05:06.862100Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:3:36:2:0:6043488:0] 2025-06-25T15:05:06.862124Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:3:45:2:0:6043488:0] 2025-06-25T15:05:06.862145Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:3:44:4:0:6043488:0] 2025-06-25T15:05:06.862166Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:3:23:4:0:6043488:0] 2025-06-25T15:05:06.862185Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:3:11:4:0:6043488:0] 2025-06-25T15:05:06.862204Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:3:51:2:0:6171112:0] 2025-06-25T15:05:06.862224Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:3:34:3:0:6171112:0] 2025-06-25T15:05:06.862244Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:3:41:4:0:6171112:0] 2025-06-25T15:05:06.862265Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:3:53:4:0:6171112:0] 2025-06-25T15:05:06.862285Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:3:5:4:0:6043488:0] 2025-06-25T15:05:06.862306Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:3:54:2:0:6043488:0] 2025-06-25T15:05:06.862330Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:3:7:3:0:6043488:0] 2025-06-25T15:05:06.862352Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:3:20:4:0:6043488:0] 2025-06-25T15:05:06.862374Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:2:4:0:6171112:0] 2025-06-25T15:05:06.862403Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:3:46:3:0:6171112:0] 2025-06-25T15:05:06.862428Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:3:26:4:0:6043488:0] 2025-06-25T15:05:06.862451Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:3:24:2:0:6171112:0] 2025-06-25T15:05:06.862471Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:2:1:3:0:6171112:0] 2025-06-25T15:05:06.862501Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:3:43:3:0:6171112:0] 2025-06-25T15:05:06.862534Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:3:22:3:0:6171112:0] 2025-06-25T15:05:06.862555Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:3:30:2:0:6043488:0] 2025-06-25T15:05:06.862575Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:3:40:3:0:6043488:0] 2025-06-25T15:05:06.862605Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:3:16:3:0:6043488:0] 2025-06-25T15:05:06.862629Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:3:31:3:0:6043488:0] 2025-06-25T15:05:06.862649Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:3:49:3:0:6043488:0] 2025-06-25T15:05:06.862666Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:3:48:2:0:6171112:0] 2025-06-25T15:05:06.862685Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:3:33:2:0:6043488:0] 2025-06-25T15:05:06.862704Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:3:29:4:0:6171112:0] 2025-06-25T15:05:06.862724Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:3:2:4:0:6171112:0] 2025-06-25T15:05:06.862754Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:3:13:3:0:6043488:0] 2025-06-25T15:05:06.862777Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:3:47:4:0:6043488:0] 2025-06-25T15:05:06.862798Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:3:38:4:0:6171112:0] 2025-06-25T15:05:06.862819Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:3:28:3:0:6043488:0] 2025-06-25T15:05:06.862838Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:3:32:4:0:1792:0] 2025-06-25T15:05:06.862858Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:3:6:2:0:6171112:0] 2025-06-25T15:05:06.862880Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:3:15:2:0:6043488:0] 2025-06-25T15:05:06.862903Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:3:4:3:0:6171112:0] 2025-06-25T15:05:06.862935Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:3:17:4:0:6171112:0] 2025-06-25T15:05:06.862956Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:3:21:2:0:6043488:0] 2025-06-25T15:05:06.862976Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:3:37:3:0:6043488:0] 2025-06-25T15:05:06.862997Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:3:14:4:0:6171112:0] 2025-06-25T15:05:06.863016Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:3:10:3:0:6043488:0] 2025-06-25T15:05:06.863039Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:3:12:2:0:6171112:0] 2025-06-25T15:05:06.863066Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:3:19:3:0:6171112:0] 2025-06-25T15:05:06.863094Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:3:52:3:0:6043488:0] 2025-06-25T15:05:06.863114Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:3:25:3:0:6043488:0] 2025-06-25T15:05:06.863133Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:3:42:2:0:6043488:0] 2025-06-25T15:05:06.863156Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Delete Blob DS:0:[9437184:3:9:2:0:6171112:0] GC for channel 4 deletes blobs: WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 WAIT_CLEANING: 1 Compactions happened: 35 Cleanups happened: 1 Old portions: 1 2 4 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 58 Cleaned up portions: 1 2 4 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 58 >> TColumnShardTestReadWrite::CompactionInGranule_PKString_Reboot >> TColumnShardTestReadWrite::WriteReadNoCompression >> TColumnShardTestReadWrite::CompactionSplitGranule_PKUInt32 [GOOD] >> TColumnShardTestReadWrite::WriteStandalone [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::WriteStandalone [GOOD] Test command err: 2025-06-25T15:05:27.383552Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:99;event=initialize_shard;step=OnActivateExecutor; 2025-06-25T15:05:27.401240Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:117;event=initialize_shard;step=initialize_tiring_finished; 2025-06-25T15:05:27.401421Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-25T15:05:27.406658Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T15:05:27.406813Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T15:05:27.406974Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T15:05:27.407054Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T15:05:27.407125Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T15:05:27.407208Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T15:05:27.407311Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T15:05:27.407399Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T15:05:27.407469Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T15:05:27.407551Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T15:05:27.407658Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T15:05:27.424819Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-25T15:05:27.424930Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-25T15:05:27.424962Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-25T15:05:27.425079Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:05:27.425205Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-25T15:05:27.425249Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-25T15:05:27.425273Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-25T15:05:27.425331Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-25T15:05:27.425367Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-25T15:05:27.425397Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-25T15:05:27.425424Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-25T15:05:27.425542Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:05:27.425582Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-25T15:05:27.425602Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-25T15:05:27.425617Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-25T15:05:27.425667Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-25T15:05:27.425707Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-25T15:05:27.425738Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-25T15:05:27.425757Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-25T15:05:27.425788Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-25T15:05:27.425809Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-25T15:05:27.425827Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-25T15:05:27.425968Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-25T15:05:27.425996Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-25T15:05:27.426012Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-25T15:05:27.426139Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-25T15:05:27.426168Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-25T15:05:27.426201Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-25T15:05:27.426295Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-25T15:05:27.426323Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-25T15:05:27.426346Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-25T15:05:27.426388Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-25T15:05:27.426434Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-25T15:05:27.426459Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-25T15:05:27.426477Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-25T15:05:27.426605Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=27; 2025-06-25T15:05:27.426666Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=29; 2025-06-25T15:05:27.426717Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=27; 2025-06-25T15:05:27.426764Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=22; 2025-06-25T15:05:27.426824Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-25T15:05:27.426865Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-25T15:05:27.426889Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-25T15:05:27.426924Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: table ... [{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"19,19,19,19,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"20,20,20,20,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"21,21,21,21,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"22,22,22,22,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"23,23,23,23,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"24,24,24,24,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"25,25,25,25,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"26,26,26,26,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"27,27,27,27,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"28,28,28,28,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"29,29,29,29,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"30,30,30,30,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"31,31,31,31,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"32,32,32,32,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"33,33,33,33,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"34,34,34,34,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"35,35,35,35,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"36,36,36,36,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"37,37,37,37,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"38,38,38,38,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"39,39,39,39,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"40,40,40,40,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"41,41,41,41,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"42,42,42,42,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"43,43,43,43,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"44,44,44,44,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"45,45,45,45,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"46,46,46,46,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"47,47,47,47,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"48,48,48,48,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"49,49,49,49,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"50,50,50,50,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"51,51,51,51,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"52,52,52,52,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"53,53,53,53,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"54,54,54,54,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"55,55,55,55,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"56,56,56,56,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"57,57,57,57,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"58,58,58,58,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"59,59,59,59,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"60,60,60,60,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"61,61,61,61,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"62,62,62,62,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"63,63,63,63,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"64,64,64,64,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"65,65,65,65,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"66,66,66,66,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"67,67,67,67,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"68,68,68,68,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"69,69,69,69,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"70,70,70,70,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"71,71,71,71,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"72,72,72,72,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"73,73,73,73,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"74,74,74,74,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"75,75,75,75,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"76,76,76,76,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"77,77,77,77,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"78,78,78,78,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"79,79,79,79,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"80,80,80,80,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"81,81,81,81,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"82,82,82,82,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"83,83,83,83,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"84,84,84,84,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"85,85,85,85,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"86,86,86,86,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"87,87,87,87,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"88,88,88,88,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"89,89,89,89,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"90,90,90,90,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"91,91,91,91,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"92,92,92,92,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"93,93,93,93,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"94,94,94,94,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"95,95,95,95,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"96,96,96,96,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"97,97,97,97,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"98,98,98,98,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"99,99,99,99,"}}]}; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::CompactionSplitGranule_PKUInt32 [GOOD] Test command err: 2025-06-25T15:04:27.505680Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:99;event=initialize_shard;step=OnActivateExecutor; 2025-06-25T15:04:27.532327Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:117;event=initialize_shard;step=initialize_tiring_finished; 2025-06-25T15:04:27.532506Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-25T15:04:27.537564Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T15:04:27.537768Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T15:04:27.537926Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T15:04:27.537990Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T15:04:27.538045Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T15:04:27.538114Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T15:04:27.538197Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T15:04:27.538264Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T15:04:27.538326Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T15:04:27.538400Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T15:04:27.538485Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T15:04:27.555454Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-25T15:04:27.555569Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-25T15:04:27.555600Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-25T15:04:27.555737Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:04:27.555870Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-25T15:04:27.555947Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-25T15:04:27.555990Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-25T15:04:27.556071Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-25T15:04:27.556126Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-25T15:04:27.556161Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-25T15:04:27.556195Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-25T15:04:27.556365Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:04:27.556425Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-25T15:04:27.556485Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-25T15:04:27.556510Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-25T15:04:27.556569Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-25T15:04:27.556601Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-25T15:04:27.556630Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-25T15:04:27.556649Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-25T15:04:27.556682Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-25T15:04:27.556719Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-25T15:04:27.556737Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-25T15:04:27.556869Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-25T15:04:27.556894Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-25T15:04:27.556914Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-25T15:04:27.557102Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-25T15:04:27.557147Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-25T15:04:27.557173Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-25T15:04:27.557294Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-25T15:04:27.557329Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-25T15:04:27.557351Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-25T15:04:27.557404Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-25T15:04:27.557456Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-25T15:04:27.557491Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-25T15:04:27.557513Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-25T15:04:27.557631Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=26; 2025-06-25T15:04:27.557699Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=36; 2025-06-25T15:04:27.557761Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=33; 2025-06-25T15:04:27.557808Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=23; 2025-06-25T15:04:27.557882Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-25T15:04:27.557924Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-25T15:04:27.557950Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-25T15:04:27.557985Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: table ... ule;load_stage_name=EXECUTE:granule/portions;fline=constructor_meta.cpp:71;memory_size=2054;data_size=2030;sum=506740;count=432;size_of_meta=136; 2025-06-25T15:05:30.225622Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;load_stage_name=EXECUTE:granule/portions;fline=constructor_portion.cpp:40;memory_size=2126;data_size=2102;sum=522292;count=216;size_of_portion=208; 2025-06-25T15:05:30.225764Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;EXECUTE:portionsLoadingTime=17943; 2025-06-25T15:05:30.225833Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;PRECHARGE:granule_finished_commonLoadingTime=8; 2025-06-25T15:05:30.226532Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;EXECUTE:granule_finished_commonLoadingTime=655; 2025-06-25T15:05:30.226578Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;fline=common_data.cpp:29;EXECUTE:granuleLoadingTime=18896; 2025-06-25T15:05:30.226634Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:granulesLoadingTime=19037; 2025-06-25T15:05:30.226693Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;PRECHARGE:finishLoadingTime=9; 2025-06-25T15:05:30.226898Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:finishLoadingTime=163; 2025-06-25T15:05:30.226938Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:column_enginesLoadingTime=19763; 2025-06-25T15:05:30.227123Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tx_controllerLoadingTime=120; 2025-06-25T15:05:30.227225Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tx_controllerLoadingTime=62; 2025-06-25T15:05:30.227360Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:operations_managerLoadingTime=103; 2025-06-25T15:05:30.227499Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:operations_managerLoadingTime=107; 2025-06-25T15:05:30.230225Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:storages_managerLoadingTime=2678; 2025-06-25T15:05:30.233337Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:storages_managerLoadingTime=3037; 2025-06-25T15:05:30.233402Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:db_locksLoadingTime=8; 2025-06-25T15:05:30.233452Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:db_locksLoadingTime=9; 2025-06-25T15:05:30.233486Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:bg_sessionsLoadingTime=8; 2025-06-25T15:05:30.233555Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:bg_sessionsLoadingTime=32; 2025-06-25T15:05:30.233589Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:sharing_sessionsLoadingTime=4; 2025-06-25T15:05:30.233669Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:sharing_sessionsLoadingTime=53; 2025-06-25T15:05:30.233714Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:in_flight_readsLoadingTime=8; 2025-06-25T15:05:30.233780Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:in_flight_readsLoadingTime=27; 2025-06-25T15:05:30.233841Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tiers_managerLoadingTime=35; 2025-06-25T15:05:30.233930Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tiers_managerLoadingTime=61; 2025-06-25T15:05:30.233973Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;fline=common_data.cpp:29;EXECUTE:composite_initLoadingTime=32673; 2025-06-25T15:05:30.234138Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Index: tables 1 inserted {blob_bytes=0;raw_bytes=0;count=0;records=0} compacted {blob_bytes=0;raw_bytes=0;count=0;records=0} s-compacted {blob_bytes=108238352;raw_bytes=183045560;count=15;records=1915000} inactive {blob_bytes=205426288;raw_bytes=316809958;count=39;records=3635000} evicted {blob_bytes=0;raw_bytes=0;count=0;records=0} at tablet 9437184 2025-06-25T15:05:30.234238Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:4074:6043];process=SwitchToWork;fline=columnshard.cpp:74;event=initialize_shard;step=SwitchToWork; 2025-06-25T15:05:30.234291Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:4074:6043];process=SwitchToWork;fline=columnshard.cpp:77;event=initialize_shard;step=SignalTabletActive; 2025-06-25T15:05:30.234367Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4074:6043];process=SwitchToWork;fline=columnshard_impl.cpp:1331;event=OnTieringModified;path_id=NO_VALUE_OPTIONAL; 2025-06-25T15:05:30.234412Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4074:6043];process=SwitchToWork;fline=column_engine_logs.cpp:471;event=OnTieringModified;new_count_tierings=0; 2025-06-25T15:05:30.234529Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:442;event=EnqueueBackgroundActivities;periodic=0; 2025-06-25T15:05:30.234592Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=23; 2025-06-25T15:05:30.234651Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750863571615;tx_id=18446744073709551615;;current_snapshot_ts=1750863868546; 2025-06-25T15:05:30.234690Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=23;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-25T15:05:30.234732Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:791;background=cleanup;skip_reason=no_changes; 2025-06-25T15:05:30.234776Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:820;background=cleanup;skip_reason=no_changes; 2025-06-25T15:05:30.234884Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:749;background=ttl;skip_reason=no_changes; 2025-06-25T15:05:30.238734Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4074:6043];ev=NActors::TEvents::TEvWakeup;fline=columnshard.cpp:250;event=TEvPrivate::TEvPeriodicWakeup::MANUAL;tablet_id=9437184; 2025-06-25T15:05:30.239043Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4074:6043];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;fline=columnshard.cpp:239;event=TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184; 2025-06-25T15:05:30.239088Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Send periodic stats. 2025-06-25T15:05:30.239121Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Disabled periodic stats at tablet 9437184 2025-06-25T15:05:30.239170Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4074:6043];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:442;event=EnqueueBackgroundActivities;periodic=0; 2025-06-25T15:05:30.239260Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4074:6043];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=23; 2025-06-25T15:05:30.239343Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4074:6043];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750863571615;tx_id=18446744073709551615;;current_snapshot_ts=1750863868546; 2025-06-25T15:05:30.239392Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4074:6043];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=23;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-25T15:05:30.239440Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4074:6043];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:791;background=cleanup;skip_reason=no_changes; 2025-06-25T15:05:30.239489Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4074:6043];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:820;background=cleanup;skip_reason=no_changes; 2025-06-25T15:05:30.239570Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4074:6043];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;queue=ttl;external_count=0;fline=granule.cpp:168;event=skip_actualization;waiting=0.999000s; 2025-06-25T15:05:30.239621Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4074:6043];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:749;background=ttl;skip_reason=no_changes; >> TColumnShardTestReadWrite::ReadSomePrograms >> Normalizers::SchemaVersionsNormalizer >> TColumnShardTestReadWrite::ReadSomePrograms [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::ReadSomePrograms [GOOD] Test command err: 2025-06-25T15:05:32.559295Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:99;event=initialize_shard;step=OnActivateExecutor; 2025-06-25T15:05:32.576180Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:117;event=initialize_shard;step=initialize_tiring_finished; 2025-06-25T15:05:32.576379Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-25T15:05:32.581468Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T15:05:32.581612Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T15:05:32.581772Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T15:05:32.581832Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T15:05:32.581881Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T15:05:32.581948Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T15:05:32.582008Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T15:05:32.582122Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T15:05:32.582217Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T15:05:32.582288Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T15:05:32.582344Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T15:05:32.597918Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-25T15:05:32.598054Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-25T15:05:32.598086Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-25T15:05:32.598244Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:05:32.598368Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-25T15:05:32.598415Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-25T15:05:32.598450Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-25T15:05:32.598500Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-25T15:05:32.598537Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-25T15:05:32.598570Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-25T15:05:32.598599Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-25T15:05:32.598705Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:05:32.598759Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-25T15:05:32.598787Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-25T15:05:32.598805Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-25T15:05:32.598858Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-25T15:05:32.598901Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-25T15:05:32.598934Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-25T15:05:32.598952Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-25T15:05:32.598977Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-25T15:05:32.599001Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-25T15:05:32.599024Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-25T15:05:32.599143Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-25T15:05:32.599169Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-25T15:05:32.599184Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-25T15:05:32.599315Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-25T15:05:32.599358Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-25T15:05:32.599377Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-25T15:05:32.599468Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-25T15:05:32.599494Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-25T15:05:32.599512Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-25T15:05:32.599554Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-25T15:05:32.599607Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-25T15:05:32.599634Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-25T15:05:32.599663Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-25T15:05:32.599830Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=27; 2025-06-25T15:05:32.599886Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=29; 2025-06-25T15:05:32.599951Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=34; 2025-06-25T15:05:32.600008Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=28; 2025-06-25T15:05:32.600061Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-25T15:05:32.600108Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-25T15:05:32.600133Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-25T15:05:32.600170Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: table ... lem=Background activities cannot be started: no index at tablet; 2025-06-25T15:05:33.118800Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::TEvColumnShard::TEvProposeTransaction;tablet_id=9437184;tx_id=10;this=88923003846432;method=TTxController::StartProposeOnExecute;tx_info=10:TX_KIND_SCHEMA;min=1750863933539;max=18446744073709551615;plan=0;src=[1:103:2136];cookie=00:0;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=1;result=not_found; 2025-06-25T15:05:33.118873Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::TEvColumnShard::TEvProposeTransaction;tablet_id=9437184;tx_id=10;this=88923003846432;method=TTxController::StartProposeOnExecute;tx_info=10:TX_KIND_SCHEMA;min=1750863933539;max=18446744073709551615;plan=0;src=[1:103:2136];cookie=00:0;;fline=schema.h:38;event=sync_schema; 2025-06-25T15:05:33.130609Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;request_tx=10:TX_KIND_SCHEMA;min=1750863933539;max=18446744073709551615;plan=0;src=[1:103:2136];cookie=00:0;;this=88923003846432;op_tx=10:TX_KIND_SCHEMA;min=1750863933539;max=18446744073709551615;plan=0;src=[1:103:2136];cookie=00:0;;int_op_tx=10:TX_KIND_SCHEMA;min=1750863933539;max=18446744073709551615;plan=0;src=[1:103:2136];cookie=00:0;;int_this=89129165449216;fline=columnshard__propose_transaction.cpp:105;event=actual tx operator; 2025-06-25T15:05:33.130727Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;request_tx=10:TX_KIND_SCHEMA;min=1750863933539;max=18446744073709551615;plan=0;src=[1:103:2136];cookie=00:0;;this=88923003846432;op_tx=10:TX_KIND_SCHEMA;min=1750863933539;max=18446744073709551615;plan=0;src=[1:103:2136];cookie=00:0;;int_op_tx=10:TX_KIND_SCHEMA;min=1750863933539;max=18446744073709551615;plan=0;src=[1:103:2136];cookie=00:0;;int_this=89129165449216;method=TTxController::FinishProposeOnComplete;tx_id=10;fline=propose_tx.cpp:11;event=scheme_shard_tablet_not_initialized;source=[1:103:2136]; 2025-06-25T15:05:33.130786Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;request_tx=10:TX_KIND_SCHEMA;min=1750863933539;max=18446744073709551615;plan=0;src=[1:103:2136];cookie=00:0;;this=88923003846432;op_tx=10:TX_KIND_SCHEMA;min=1750863933539;max=18446744073709551615;plan=0;src=[1:103:2136];cookie=00:0;;int_op_tx=10:TX_KIND_SCHEMA;min=1750863933539;max=18446744073709551615;plan=0;src=[1:103:2136];cookie=00:0;;int_this=89129165449216;method=TTxController::FinishProposeOnComplete;tx_id=10;fline=propose_tx.cpp:32;message=;tablet_id=9437184;tx_id=10; 2025-06-25T15:05:33.131063Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TTxNotifyTxCompletion.Execute at tablet 9437184 2025-06-25T15:05:33.131208Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: PlanStep 1750863933539 at tablet 9437184, mediator 0 2025-06-25T15:05:33.131258Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxPlanStep[2] execute at tablet 9437184 2025-06-25T15:05:33.131517Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=10;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=1;result=not_found; 2025-06-25T15:05:33.131600Z node 1 :TX_COLUMNSHARD INFO: ctor_logger.h:56: EnsureTable for pathId: {internal: 9438184000001, ss: 1} ttl settings: { Version: 1 } at tablet 9437184 2025-06-25T15:05:33.136738Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=10;fline=column_engine_logs.cpp:471;event=OnTieringModified;new_count_tierings=0; 2025-06-25T15:05:33.136840Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=10;fline=tables_manager.cpp:304;method=RegisterTable;path_id=9438184000001; 2025-06-25T15:05:33.136900Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=10;fline=column_engine.h:144;event=RegisterTable;path_id=9438184000001; 2025-06-25T15:05:33.142550Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=10;fline=column_engine_logs.cpp:463;event=OnTieringModified;path_id=9438184000001; 2025-06-25T15:05:33.142671Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=10;fline=tx_controller.cpp:215;event=finished_tx;tx_id=10; 2025-06-25T15:05:33.165601Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxPlanStep[2] complete at tablet 9437184 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=3200;columns=5; 2025-06-25T15:05:33.168119Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=9437184;event=TEvWrite;fline=manager.cpp:210;event=register_operation;operation_id=1;last=1; 2025-06-25T15:05:33.168218Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=9437184;event=TEvWrite;fline=write_queue.cpp:14;writing_size=3200;operation_id=d75ec25c-51d511f0-aa4e4f4c-af21dbee;in_flight=1;size_in_flight=3200; 2025-06-25T15:05:33.178199Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=9437184;event=TEvWrite;scope=TBuildBatchesTask::DoExecute;tablet_id=9437184;parent_id=[1:128:2158];write_id=1;path_id={internal: 9438184000001, ss: 1};fline=write_actor.cpp:24;event=actor_created;tablet_id=9437184;debug=size=3768;count=1;actions=__DEFAULT,;waiting=1;; 2025-06-25T15:05:33.179555Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;fline=columnshard__write.cpp:105;writing_size=3200;event=data_write_finished;writing_id=d75ec25c-51d511f0-aa4e4f4c-af21dbee; 2025-06-25T15:05:33.179761Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=4;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=constructor_meta.cpp:51;memory_size=60;data_size=20;sum=60;count=1; 2025-06-25T15:05:33.179834Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=4;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=constructor_meta.cpp:71;memory_size=212;data_size=188;sum=212;count=2;size_of_meta=136; 2025-06-25T15:05:33.179910Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=4;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=constructor_portion.cpp:40;memory_size=284;data_size=260;sum=284;count=1;size_of_portion=208; 2025-06-25T15:05:33.180502Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager on execute at tablet 9437184 Save Batch GenStep: 2:1 Blob count: 1 2025-06-25T15:05:33.180665Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=4;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=manager.h:156;event=add_by_insert_id;id=2;operation_id=1; 2025-06-25T15:05:33.192234Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Save Batch GenStep: 2:1 Blob count: 1 2025-06-25T15:05:33.204869Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: PlanStep 1750863933545 at tablet 9437184, mediator 0 2025-06-25T15:05:33.204937Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxPlanStep[6] execute at tablet 9437184 2025-06-25T15:05:33.205170Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=100;fline=abstract.h:83;progress_tx_id=100;lock_id=1;broken=0; 2025-06-25T15:05:33.205440Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=100;fline=tx_controller.cpp:215;event=finished_tx;tx_id=100; 2025-06-25T15:05:33.216885Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxPlanStep[6] complete at tablet 9437184 2025-06-25T15:05:33.216976Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Complete;fline=abstract.h:93;progress_tx_id=100;lock_id=1;broken=0; 2025-06-25T15:05:33.217181Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Complete;commit_tx_id=100;commit_lock_id=1;fline=manager.cpp:177;event=remove_by_insert_id;id=2;operation_id=1; 2025-06-25T15:05:33.217221Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Complete;commit_tx_id=100;commit_lock_id=1;fline=manager.cpp:180;event=remove_operation;operation_id=1; 2025-06-25T15:05:33.217512Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;fline=columnshard.cpp:236;event=TEvPrivate::TEvPeriodicWakeup::MANUAL;tablet_id=9437184; 2025-06-25T15:05:33.217570Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:442;event=EnqueueBackgroundActivities;periodic=0; 2025-06-25T15:05:33.217643Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=0; 2025-06-25T15:05:33.230968Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=0;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-25T15:05:33.231052Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:791;background=cleanup;skip_reason=no_changes; 2025-06-25T15:05:33.231097Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:820;background=cleanup;skip_reason=no_changes; 2025-06-25T15:05:33.231206Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:749;background=ttl;skip_reason=no_changes; 2025-06-25T15:05:33.231568Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: EvScan txId: 100 scanId: 0 version: {1750863933545:100} readable: {1750863933545:max} at tablet 9437184 2025-06-25T15:05:33.243153Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TTxScan prepare txId: 100 scanId: 0 at tablet 9437184 2025-06-25T15:05:33.243279Z node 1 :TX_COLUMNSHARD_SCAN WARN: log.cpp:784: tx_id=100;scan_id=0;gen=0;table=;snapshot={1750863933545:100};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=tx_scan.cpp:14;event=TTxScan failed;problem=cannot parse program;details=Can't parse SsaProgram: Can't parse TOlapProgram protobuf; >> TColumnShardTestReadWrite::CompactionSplitGranule_PKUInt64 [GOOD] >> TColumnShardTestReadWrite::WriteReadNoCompression [GOOD] >> Normalizers::SchemaVersionsNormalizer [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::CompactionSplitGranule_PKUInt64 [GOOD] Test command err: 2025-06-25T15:04:28.573746Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:99;event=initialize_shard;step=OnActivateExecutor; 2025-06-25T15:04:28.594129Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:117;event=initialize_shard;step=initialize_tiring_finished; 2025-06-25T15:04:28.594304Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-25T15:04:28.599928Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T15:04:28.600134Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T15:04:28.600304Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T15:04:28.600402Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T15:04:28.600484Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T15:04:28.600558Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T15:04:28.600704Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T15:04:28.600785Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T15:04:28.600882Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T15:04:28.600943Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T15:04:28.601017Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T15:04:28.619149Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-25T15:04:28.619267Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-25T15:04:28.619301Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-25T15:04:28.619445Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:04:28.619584Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-25T15:04:28.619654Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-25T15:04:28.619684Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-25T15:04:28.619747Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-25T15:04:28.619788Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-25T15:04:28.619815Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-25T15:04:28.619851Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-25T15:04:28.619961Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:04:28.620012Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-25T15:04:28.620047Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-25T15:04:28.620063Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-25T15:04:28.620116Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-25T15:04:28.620147Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-25T15:04:28.620173Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-25T15:04:28.620187Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-25T15:04:28.620213Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-25T15:04:28.620235Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-25T15:04:28.620253Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-25T15:04:28.620416Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-25T15:04:28.620446Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-25T15:04:28.620465Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-25T15:04:28.620580Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-25T15:04:28.620609Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-25T15:04:28.620626Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-25T15:04:28.620739Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-25T15:04:28.620784Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-25T15:04:28.620803Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-25T15:04:28.620855Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-25T15:04:28.620895Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-25T15:04:28.620916Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-25T15:04:28.620946Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-25T15:04:28.621076Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=33; 2025-06-25T15:04:28.621133Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=31; 2025-06-25T15:04:28.621192Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=33; 2025-06-25T15:04:28.621238Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=23; 2025-06-25T15:04:28.621307Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-25T15:04:28.621358Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-25T15:04:28.621381Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-25T15:04:28.621418Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: table ... ule;load_stage_name=EXECUTE:granule/portions;fline=constructor_meta.cpp:71;memory_size=2086;data_size=2078;sum=513488;count=432;size_of_meta=136; 2025-06-25T15:05:33.439876Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;load_stage_name=EXECUTE:granule/portions;fline=constructor_portion.cpp:40;memory_size=2158;data_size=2150;sum=529040;count=216;size_of_portion=208; 2025-06-25T15:05:33.440004Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;EXECUTE:portionsLoadingTime=22160; 2025-06-25T15:05:33.440061Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;PRECHARGE:granule_finished_commonLoadingTime=8; 2025-06-25T15:05:33.440712Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;EXECUTE:granule_finished_commonLoadingTime=581; 2025-06-25T15:05:33.440753Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;fline=common_data.cpp:29;EXECUTE:granuleLoadingTime=23021; 2025-06-25T15:05:33.440804Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:granulesLoadingTime=23134; 2025-06-25T15:05:33.440856Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;PRECHARGE:finishLoadingTime=10; 2025-06-25T15:05:33.441018Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:finishLoadingTime=128; 2025-06-25T15:05:33.441058Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:column_enginesLoadingTime=23751; 2025-06-25T15:05:33.441176Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tx_controllerLoadingTime=73; 2025-06-25T15:05:33.441273Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tx_controllerLoadingTime=63; 2025-06-25T15:05:33.441385Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:operations_managerLoadingTime=81; 2025-06-25T15:05:33.441488Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:operations_managerLoadingTime=72; 2025-06-25T15:05:33.444254Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:storages_managerLoadingTime=2722; 2025-06-25T15:05:33.447753Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:storages_managerLoadingTime=3382; 2025-06-25T15:05:33.447826Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:db_locksLoadingTime=10; 2025-06-25T15:05:33.447869Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:db_locksLoadingTime=10; 2025-06-25T15:05:33.447911Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:bg_sessionsLoadingTime=5; 2025-06-25T15:05:33.447970Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:bg_sessionsLoadingTime=33; 2025-06-25T15:05:33.447998Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:sharing_sessionsLoadingTime=4; 2025-06-25T15:05:33.448078Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:sharing_sessionsLoadingTime=46; 2025-06-25T15:05:33.448107Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:in_flight_readsLoadingTime=4; 2025-06-25T15:05:33.448163Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:in_flight_readsLoadingTime=28; 2025-06-25T15:05:33.448228Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tiers_managerLoadingTime=38; 2025-06-25T15:05:33.448299Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tiers_managerLoadingTime=39; 2025-06-25T15:05:33.448347Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;fline=common_data.cpp:29;EXECUTE:composite_initLoadingTime=36448; 2025-06-25T15:05:33.448509Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Index: tables 1 inserted {blob_bytes=0;raw_bytes=0;count=0;records=0} compacted {blob_bytes=0;raw_bytes=0;count=0;records=0} s-compacted {blob_bytes=108275536;raw_bytes=198365560;count=15;records=1915000} inactive {blob_bytes=205496480;raw_bytes=345889958;count=39;records=3635000} evicted {blob_bytes=0;raw_bytes=0;count=0;records=0} at tablet 9437184 2025-06-25T15:05:33.448602Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:4102:6070];process=SwitchToWork;fline=columnshard.cpp:74;event=initialize_shard;step=SwitchToWork; 2025-06-25T15:05:33.448656Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:4102:6070];process=SwitchToWork;fline=columnshard.cpp:77;event=initialize_shard;step=SignalTabletActive; 2025-06-25T15:05:33.448711Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4102:6070];process=SwitchToWork;fline=columnshard_impl.cpp:1331;event=OnTieringModified;path_id=NO_VALUE_OPTIONAL; 2025-06-25T15:05:33.448751Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4102:6070];process=SwitchToWork;fline=column_engine_logs.cpp:471;event=OnTieringModified;new_count_tierings=0; 2025-06-25T15:05:33.448854Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:442;event=EnqueueBackgroundActivities;periodic=0; 2025-06-25T15:05:33.448925Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=23; 2025-06-25T15:05:33.448991Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750863572693;tx_id=18446744073709551615;;current_snapshot_ts=1750863869612; 2025-06-25T15:05:33.449031Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=23;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-25T15:05:33.449066Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:791;background=cleanup;skip_reason=no_changes; 2025-06-25T15:05:33.449097Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:820;background=cleanup;skip_reason=no_changes; 2025-06-25T15:05:33.449176Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:749;background=ttl;skip_reason=no_changes; 2025-06-25T15:05:33.452237Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4102:6070];ev=NActors::TEvents::TEvWakeup;fline=columnshard.cpp:250;event=TEvPrivate::TEvPeriodicWakeup::MANUAL;tablet_id=9437184; 2025-06-25T15:05:33.452560Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4102:6070];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;fline=columnshard.cpp:239;event=TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184; 2025-06-25T15:05:33.452596Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Send periodic stats. 2025-06-25T15:05:33.452620Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Disabled periodic stats at tablet 9437184 2025-06-25T15:05:33.452659Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4102:6070];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:442;event=EnqueueBackgroundActivities;periodic=0; 2025-06-25T15:05:33.452723Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4102:6070];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=23; 2025-06-25T15:05:33.452786Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4102:6070];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750863572693;tx_id=18446744073709551615;;current_snapshot_ts=1750863869612; 2025-06-25T15:05:33.452826Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4102:6070];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=23;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-25T15:05:33.452883Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4102:6070];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:791;background=cleanup;skip_reason=no_changes; 2025-06-25T15:05:33.452923Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4102:6070];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:820;background=cleanup;skip_reason=no_changes; 2025-06-25T15:05:33.452989Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4102:6070];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;queue=ttl;external_count=0;fline=granule.cpp:168;event=skip_actualization;waiting=0.999000s; 2025-06-25T15:05:33.453033Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4102:6070];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:749;background=ttl;skip_reason=no_changes; >> Backup::ProposeBackup ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::WriteReadNoCompression [GOOD] Test command err: 2025-06-25T15:05:30.045635Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:99;event=initialize_shard;step=OnActivateExecutor; 2025-06-25T15:05:30.062897Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:117;event=initialize_shard;step=initialize_tiring_finished; 2025-06-25T15:05:30.063058Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-25T15:05:30.067816Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T15:05:30.067941Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T15:05:30.068078Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T15:05:30.068160Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T15:05:30.068237Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T15:05:30.068295Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T15:05:30.068371Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T15:05:30.068441Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T15:05:30.068518Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T15:05:30.068599Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T15:05:30.068658Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T15:05:30.085944Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-25T15:05:30.086048Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-25T15:05:30.086077Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-25T15:05:30.086187Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:05:30.086325Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-25T15:05:30.086385Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-25T15:05:30.086422Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-25T15:05:30.086505Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-25T15:05:30.086551Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-25T15:05:30.086586Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-25T15:05:30.086618Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-25T15:05:30.086776Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:05:30.086822Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-25T15:05:30.086844Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-25T15:05:30.086859Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-25T15:05:30.086908Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-25T15:05:30.086944Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-25T15:05:30.086976Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-25T15:05:30.087001Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-25T15:05:30.087030Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-25T15:05:30.087054Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-25T15:05:30.087069Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-25T15:05:30.087189Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-25T15:05:30.087224Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-25T15:05:30.087240Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-25T15:05:30.087370Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-25T15:05:30.087401Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-25T15:05:30.087416Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-25T15:05:30.087499Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-25T15:05:30.087523Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-25T15:05:30.087542Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-25T15:05:30.087590Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-25T15:05:30.087647Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-25T15:05:30.087672Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-25T15:05:30.087692Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-25T15:05:30.087832Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=40; 2025-06-25T15:05:30.087887Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=29; 2025-06-25T15:05:30.087939Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=27; 2025-06-25T15:05:30.087987Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=20; 2025-06-25T15:05:30.088085Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-25T15:05:30.088158Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-25T15:05:30.088184Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-25T15:05:30.088223Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: table ... 3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-25T15:05:34.389455Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:987:2842];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=0;count=0;finished=1; 2025-06-25T15:05:34.389571Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:987:2842];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:234;stage=ready result;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;);columns=10;rows=31; 2025-06-25T15:05:34.389634Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:987:2842];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:254;stage=data_format;batch_size=1984;num_rows=31;batch_columns=timestamp,resource_type,resource_id,uid,level,message,json_payload,ingested_at,saved_at,request_id; 2025-06-25T15:05:34.389846Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:987:2842];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:986:2841];bytes=1984;rows=31;faults=0;finished=0;fault=0;schema=timestamp: timestamp[us] resource_type: string resource_id: string uid: string level: int32 message: string json_payload: string ingested_at: timestamp[us] saved_at: timestamp[us] request_id: string; 2025-06-25T15:05:34.389974Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:987:2842];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:274;stage=finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-25T15:05:34.390079Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:987:2842];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-25T15:05:34.390179Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:987:2842];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-25T15:05:34.390346Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:987:2842];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-25T15:05:34.390463Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:987:2842];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-25T15:05:34.390577Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:987:2842];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-25T15:05:34.390610Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: actor.cpp:414: Scan [1:987:2842] finished for tablet 9437184 2025-06-25T15:05:34.390963Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=11;SelfId=[1:987:2842];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=actor.cpp:420;event=scan_finish;compute_actor_id=[1:986:2841];stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.001},{"events":["l_bootstrap"],"t":0.002},{"events":["f_processing","f_task_result"],"t":0.003},{"events":["f_ack","l_task_result"],"t":0.009},{"events":["l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.01}],"full":{"a":1750863934379694,"name":"_full_task","f":1750863934379694,"d_finished":0,"c":0,"l":1750863934390664,"d":10970},"events":[{"name":"bootstrap","f":1750863934379853,"d_finished":2449,"c":1,"l":1750863934382302,"d":2449},{"a":1750863934390322,"name":"ack","f":1750863934389250,"d_finished":948,"c":1,"l":1750863934390198,"d":1290},{"a":1750863934390310,"name":"processing","f":1750863934383126,"d_finished":4435,"c":10,"l":1750863934390200,"d":4789},{"name":"ProduceResults","f":1750863934381281,"d_finished":2463,"c":13,"l":1750863934390598,"d":2463},{"a":1750863934390601,"name":"Finish","f":1750863934390601,"d_finished":0,"c":0,"l":1750863934390664,"d":63},{"name":"task_result","f":1750863934383136,"d_finished":3401,"c":9,"l":1750863934389109,"d":3401}],"id":"9437184::12"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-25T15:05:34.391028Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:987:2842];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:986:2841];bytes=0;rows=0;faults=0;finished=1;fault=0;schema=; 2025-06-25T15:05:34.391398Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=11;SelfId=[1:987:2842];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=actor.cpp:375;event=scan_finished;compute_actor_id=[1:986:2841];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.001},{"events":["l_bootstrap"],"t":0.002},{"events":["f_processing","f_task_result"],"t":0.003},{"events":["f_ack","l_task_result"],"t":0.009},{"events":["l_ProduceResults","f_Finish"],"t":0.01},{"events":["l_ack","l_processing","l_Finish"],"t":0.011}],"full":{"a":1750863934379694,"name":"_full_task","f":1750863934379694,"d_finished":0,"c":0,"l":1750863934391060,"d":11366},"events":[{"name":"bootstrap","f":1750863934379853,"d_finished":2449,"c":1,"l":1750863934382302,"d":2449},{"a":1750863934390322,"name":"ack","f":1750863934389250,"d_finished":948,"c":1,"l":1750863934390198,"d":1686},{"a":1750863934390310,"name":"processing","f":1750863934383126,"d_finished":4435,"c":10,"l":1750863934390200,"d":5185},{"name":"ProduceResults","f":1750863934381281,"d_finished":2463,"c":13,"l":1750863934390598,"d":2463},{"a":1750863934390601,"name":"Finish","f":1750863934390601,"d_finished":0,"c":0,"l":1750863934391060,"d":459},{"name":"task_result","f":1750863934383136,"d_finished":3401,"c":9,"l":1750863934389109,"d":3401}],"id":"9437184::12"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;;); 2025-06-25T15:05:34.391454Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:987:2842];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-06-25T15:05:34.379228Z;index_granules=0;index_portions=1;index_batches=1;schema_columns=10;filter_columns=0;additional_columns=0;compacted_portions_bytes=0;inserted_portions_bytes=8392;committed_portions_bytes=0;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=8392;selected_rows=0; 2025-06-25T15:05:34.391488Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=11;SelfId=[1:987:2842];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=read_context.h:192;event=scan_aborted;reason=unexpected on destructor; 2025-06-25T15:05:34.391777Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=11;SelfId=[1:987:2842];TabletId=9437184;ScanId=0;TxId=103;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;program_input=(column_ids=1,2,3,4,5,6,7,8,9,10;column_names=ingested_at,json_payload,level,message,request_id,resource_id,resource_type,saved_at,timestamp,uid;);;; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> Normalizers::SchemaVersionsNormalizer [GOOD] Test command err: 2025-06-25T15:05:32.990218Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:99;event=initialize_shard;step=OnActivateExecutor; 2025-06-25T15:05:33.008138Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:117;event=initialize_shard;step=initialize_tiring_finished; 2025-06-25T15:05:33.008319Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-25T15:05:33.013622Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SchemaVersionCleaner; 2025-06-25T15:05:33.013809Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=NO_VALUE_OPTIONAL; 2025-06-25T15:05:33.013960Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T15:05:33.014079Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T15:05:33.014157Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T15:05:33.014228Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T15:05:33.014289Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T15:05:33.014359Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T15:05:33.014435Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T15:05:33.014494Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T15:05:33.014568Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T15:05:33.014637Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T15:05:33.031394Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-25T15:05:33.031497Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=11;current_normalizer=CLASS_NAME=SchemaVersionCleaner; 2025-06-25T15:05:33.031537Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=NO_VALUE_OPTIONAL;type=NO_VALUE_OPTIONAL; 2025-06-25T15:05:33.031771Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SchemaVersionCleaner;id=NO_VALUE_OPTIONAL; 2025-06-25T15:05:33.031856Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Granules;id=Granules; 2025-06-25T15:05:33.031915Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=1;type=Granules; 2025-06-25T15:05:33.032088Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:05:33.032173Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-25T15:05:33.032207Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-25T15:05:33.032227Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=2;type=Chunks; 2025-06-25T15:05:33.032291Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-25T15:05:33.032352Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-25T15:05:33.032402Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-25T15:05:33.032432Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=4;type=TablesCleaner; 2025-06-25T15:05:33.032524Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:05:33.032557Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-25T15:05:33.032602Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-25T15:05:33.032630Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=6;type=CleanGranuleId; 2025-06-25T15:05:33.032700Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-25T15:05:33.032735Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-25T15:05:33.032758Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-25T15:05:33.032774Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=9;type=GCCountersNormalizer; 2025-06-25T15:05:33.032803Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-25T15:05:33.032835Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-25T15:05:33.032856Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=11;type=SyncPortionFromChunks; 2025-06-25T15:05:33.032887Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-25T15:05:33.032912Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-25T15:05:33.032934Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-25T15:05:33.033053Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-25T15:05:33.033089Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-25T15:05:33.033104Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=16;type=RestoreV2Chunks; 2025-06-25T15:05:33.033170Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-25T15:05:33.033191Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-25T15:05:33.033205Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-25T15:05:33.033230Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-25T15:05:33.033265Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-25T15:05:33.033288Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-25T15:05:33.033311Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-25T15:05:33.033337Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-25T15:05:33.033390Z node 1 :T ... 81056Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:517:2516];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=merge.cpp:75;event=DoApply;interval_idx=0; 2025-06-25T15:05:34.581092Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:517:2516];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=scanner.cpp:21;event=interval_result_received;interval_idx=0;intervalId=2; 2025-06-25T15:05:34.581138Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:517:2516];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=scanner.cpp:47;event=interval_result;interval_idx=0;count=20048;merger=0;interval_id=2; 2025-06-25T15:05:34.581177Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:517:2516];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=scanner.cpp:65;event=intervals_finished; 2025-06-25T15:05:34.581244Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:517:2516];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;;); 2025-06-25T15:05:34.581266Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:517:2516];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=1;count=20048;finished=1; 2025-06-25T15:05:34.581302Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:517:2516];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:203;stage=limit exhausted;limit=limits:(bytes=0;chunks=0);; 2025-06-25T15:05:34.581510Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:517:2516];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-25T15:05:34.581623Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:517:2516];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:1;records_count:20048;schema=key1: uint64 key2: uint64 field: string;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;;); 2025-06-25T15:05:34.581656Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:517:2516];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=0;count=0;finished=1; 2025-06-25T15:05:34.581744Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:517:2516];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:234;stage=ready result;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;;);columns=3;rows=20048; 2025-06-25T15:05:34.581810Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:517:2516];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:254;stage=data_format;batch_size=2405760;num_rows=20048;batch_columns=key1,key2,field; 2025-06-25T15:05:34.581955Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:517:2516];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:512:2512];bytes=2405760;rows=20048;faults=0;finished=0;fault=0;schema=key1: uint64 key2: uint64 field: string; 2025-06-25T15:05:34.582073Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:517:2516];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:274;stage=finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;;); 2025-06-25T15:05:34.582156Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:517:2516];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;;); 2025-06-25T15:05:34.582259Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:517:2516];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;;); 2025-06-25T15:05:34.582886Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:517:2516];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-25T15:05:34.582974Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:517:2516];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;;); 2025-06-25T15:05:34.583048Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:517:2516];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;;); 2025-06-25T15:05:34.583077Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: actor.cpp:414: Scan [1:517:2516] finished for tablet 9437184 2025-06-25T15:05:34.583447Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:517:2516];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=actor.cpp:420;event=scan_finish;compute_actor_id=[1:512:2512];stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.001},{"events":["l_bootstrap"],"t":0.002},{"events":["f_processing","f_task_result"],"t":0.004},{"events":["l_task_result"],"t":0.191},{"events":["f_ack"],"t":0.192},{"events":["l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.193}],"full":{"a":1750863934389398,"name":"_full_task","f":1750863934389398,"d_finished":0,"c":0,"l":1750863934583133,"d":193735},"events":[{"name":"bootstrap","f":1750863934389561,"d_finished":2296,"c":1,"l":1750863934391857,"d":2296},{"a":1750863934582870,"name":"ack","f":1750863934581490,"d_finished":799,"c":1,"l":1750863934582289,"d":1062},{"a":1750863934582855,"name":"processing","f":1750863934393793,"d_finished":130899,"c":16,"l":1750863934582291,"d":131177},{"name":"ProduceResults","f":1750863934390842,"d_finished":2641,"c":19,"l":1750863934583068,"d":2641},{"a":1750863934583070,"name":"Finish","f":1750863934583070,"d_finished":0,"c":0,"l":1750863934583133,"d":63},{"name":"task_result","f":1750863934393808,"d_finished":129924,"c":15,"l":1750863934581349,"d":129924}],"id":"9437184::2"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;;); 2025-06-25T15:05:34.583512Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:517:2516];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:512:2512];bytes=0;rows=0;faults=0;finished=1;fault=0;schema=; 2025-06-25T15:05:34.583809Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:517:2516];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=actor.cpp:375;event=scan_finished;compute_actor_id=[1:512:2512];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.001},{"events":["l_bootstrap"],"t":0.002},{"events":["f_processing","f_task_result"],"t":0.004},{"events":["l_task_result"],"t":0.191},{"events":["f_ack"],"t":0.192},{"events":["l_ProduceResults","f_Finish"],"t":0.193},{"events":["l_ack","l_processing","l_Finish"],"t":0.194}],"full":{"a":1750863934389398,"name":"_full_task","f":1750863934389398,"d_finished":0,"c":0,"l":1750863934583540,"d":194142},"events":[{"name":"bootstrap","f":1750863934389561,"d_finished":2296,"c":1,"l":1750863934391857,"d":2296},{"a":1750863934582870,"name":"ack","f":1750863934581490,"d_finished":799,"c":1,"l":1750863934582289,"d":1469},{"a":1750863934582855,"name":"processing","f":1750863934393793,"d_finished":130899,"c":16,"l":1750863934582291,"d":131584},{"name":"ProduceResults","f":1750863934390842,"d_finished":2641,"c":19,"l":1750863934583068,"d":2641},{"a":1750863934583070,"name":"Finish","f":1750863934583070,"d_finished":0,"c":0,"l":1750863934583540,"d":470},{"name":"task_result","f":1750863934393808,"d_finished":129924,"c":15,"l":1750863934581349,"d":129924}],"id":"9437184::2"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;;); 2025-06-25T15:05:34.583877Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:517:2516];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-06-25T15:05:34.388998Z;index_granules=0;index_portions=2;index_batches=748;schema_columns=3;filter_columns=0;additional_columns=0;compacted_portions_bytes=2776560;inserted_portions_bytes=0;committed_portions_bytes=2488696;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=5265256;selected_rows=0; 2025-06-25T15:05:34.583906Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:517:2516];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=read_context.h:192;event=scan_aborted;reason=unexpected on destructor; 2025-06-25T15:05:34.584104Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:517:2516];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;; >> Normalizers::EmptyTablesNormalizer >> TColumnShardTestReadWrite::WriteReadModifications >> EvWrite::WriteWithLock >> TColumnShardTestReadWrite::CompactionSplitGranule_PKDatetime [GOOD] >> Backup::ProposeBackup [GOOD] >> EvWrite::AbortInTransaction ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::CompactionSplitGranule_PKDatetime [GOOD] Test command err: 2025-06-25T15:04:31.160036Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:99;event=initialize_shard;step=OnActivateExecutor; 2025-06-25T15:04:31.190986Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:117;event=initialize_shard;step=initialize_tiring_finished; 2025-06-25T15:04:31.191228Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-25T15:04:31.197729Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T15:04:31.197868Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T15:04:31.198026Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T15:04:31.198089Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T15:04:31.198156Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T15:04:31.198219Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T15:04:31.198300Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T15:04:31.198358Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T15:04:31.198420Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T15:04:31.198474Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T15:04:31.198541Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T15:04:31.213979Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-25T15:04:31.214102Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-25T15:04:31.214142Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-25T15:04:31.214278Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:04:31.214397Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-25T15:04:31.214452Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-25T15:04:31.214482Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-25T15:04:31.214547Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-25T15:04:31.214585Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-25T15:04:31.214610Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-25T15:04:31.214634Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-25T15:04:31.214747Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:04:31.214792Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-25T15:04:31.214814Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-25T15:04:31.214832Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-25T15:04:31.214883Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-25T15:04:31.214914Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-25T15:04:31.214938Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-25T15:04:31.214952Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-25T15:04:31.214977Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-25T15:04:31.214998Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-25T15:04:31.215015Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-25T15:04:31.215151Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-25T15:04:31.215175Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-25T15:04:31.215191Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-25T15:04:31.215327Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-25T15:04:31.215356Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-25T15:04:31.215372Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-25T15:04:31.215467Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-25T15:04:31.215504Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-25T15:04:31.215521Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-25T15:04:31.215562Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-25T15:04:31.215599Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-25T15:04:31.215622Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-25T15:04:31.215641Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-25T15:04:31.215771Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=29; 2025-06-25T15:04:31.215833Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=34; 2025-06-25T15:04:31.215888Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=26; 2025-06-25T15:04:31.215935Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=24; 2025-06-25T15:04:31.215994Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-25T15:04:31.216033Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-25T15:04:31.216058Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-25T15:04:31.216100Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: table ... nule;load_stage_name=EXECUTE:granule/portions;fline=constructor_meta.cpp:71;memory_size=2054;data_size=2030;sum=506740;count=432;size_of_meta=136; 2025-06-25T15:05:36.242104Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;load_stage_name=EXECUTE:granule/portions;fline=constructor_portion.cpp:40;memory_size=2126;data_size=2102;sum=522292;count=216;size_of_portion=208; 2025-06-25T15:05:36.242233Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;EXECUTE:portionsLoadingTime=36138; 2025-06-25T15:05:36.242289Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;PRECHARGE:granule_finished_commonLoadingTime=9; 2025-06-25T15:05:36.242937Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;EXECUTE:granule_finished_commonLoadingTime=608; 2025-06-25T15:05:36.242983Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;fline=common_data.cpp:29;EXECUTE:granuleLoadingTime=37000; 2025-06-25T15:05:36.243023Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:granulesLoadingTime=37110; 2025-06-25T15:05:36.243083Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;PRECHARGE:finishLoadingTime=14; 2025-06-25T15:05:36.243278Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:finishLoadingTime=154; 2025-06-25T15:05:36.243315Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:column_enginesLoadingTime=37778; 2025-06-25T15:05:36.243456Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tx_controllerLoadingTime=96; 2025-06-25T15:05:36.243582Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tx_controllerLoadingTime=80; 2025-06-25T15:05:36.243730Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:operations_managerLoadingTime=108; 2025-06-25T15:05:36.243863Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:operations_managerLoadingTime=90; 2025-06-25T15:05:36.246962Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:storages_managerLoadingTime=3053; 2025-06-25T15:05:36.250216Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:storages_managerLoadingTime=3200; 2025-06-25T15:05:36.250281Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:db_locksLoadingTime=8; 2025-06-25T15:05:36.250318Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:db_locksLoadingTime=8; 2025-06-25T15:05:36.250346Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:bg_sessionsLoadingTime=4; 2025-06-25T15:05:36.250402Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:bg_sessionsLoadingTime=32; 2025-06-25T15:05:36.250460Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:sharing_sessionsLoadingTime=4; 2025-06-25T15:05:36.250539Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:sharing_sessionsLoadingTime=53; 2025-06-25T15:05:36.250568Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:in_flight_readsLoadingTime=4; 2025-06-25T15:05:36.250611Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:in_flight_readsLoadingTime=22; 2025-06-25T15:05:36.250668Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tiers_managerLoadingTime=33; 2025-06-25T15:05:36.250751Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tiers_managerLoadingTime=54; 2025-06-25T15:05:36.250785Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;fline=common_data.cpp:29;EXECUTE:composite_initLoadingTime=50423; 2025-06-25T15:05:36.250951Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Index: tables 1 inserted {blob_bytes=0;raw_bytes=0;count=0;records=0} compacted {blob_bytes=0;raw_bytes=0;count=0;records=0} s-compacted {blob_bytes=108238352;raw_bytes=183045560;count=15;records=1915000} inactive {blob_bytes=205426288;raw_bytes=316809958;count=39;records=3635000} evicted {blob_bytes=0;raw_bytes=0;count=0;records=0} at tablet 9437184 2025-06-25T15:05:36.251044Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:4074:6043];process=SwitchToWork;fline=columnshard.cpp:74;event=initialize_shard;step=SwitchToWork; 2025-06-25T15:05:36.251087Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:4074:6043];process=SwitchToWork;fline=columnshard.cpp:77;event=initialize_shard;step=SignalTabletActive; 2025-06-25T15:05:36.251155Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4074:6043];process=SwitchToWork;fline=columnshard_impl.cpp:1331;event=OnTieringModified;path_id=NO_VALUE_OPTIONAL; 2025-06-25T15:05:36.251199Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4074:6043];process=SwitchToWork;fline=column_engine_logs.cpp:471;event=OnTieringModified;new_count_tierings=0; 2025-06-25T15:05:36.251316Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:442;event=EnqueueBackgroundActivities;periodic=0; 2025-06-25T15:05:36.251376Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=23; 2025-06-25T15:05:36.251443Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750863575257;tx_id=18446744073709551615;;current_snapshot_ts=1750863872188; 2025-06-25T15:05:36.251504Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=23;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-25T15:05:36.251545Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:791;background=cleanup;skip_reason=no_changes; 2025-06-25T15:05:36.251583Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:820;background=cleanup;skip_reason=no_changes; 2025-06-25T15:05:36.251678Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:749;background=ttl;skip_reason=no_changes; 2025-06-25T15:05:36.254988Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4074:6043];ev=NActors::TEvents::TEvWakeup;fline=columnshard.cpp:250;event=TEvPrivate::TEvPeriodicWakeup::MANUAL;tablet_id=9437184; 2025-06-25T15:05:36.255300Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4074:6043];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;fline=columnshard.cpp:239;event=TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184; 2025-06-25T15:05:36.255331Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Send periodic stats. 2025-06-25T15:05:36.255355Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Disabled periodic stats at tablet 9437184 2025-06-25T15:05:36.255392Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4074:6043];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:442;event=EnqueueBackgroundActivities;periodic=0; 2025-06-25T15:05:36.255465Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4074:6043];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=23; 2025-06-25T15:05:36.255536Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4074:6043];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750863575257;tx_id=18446744073709551615;;current_snapshot_ts=1750863872188; 2025-06-25T15:05:36.255573Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4074:6043];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=23;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-25T15:05:36.255613Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4074:6043];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:791;background=cleanup;skip_reason=no_changes; 2025-06-25T15:05:36.255648Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4074:6043];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:820;background=cleanup;skip_reason=no_changes; 2025-06-25T15:05:36.255724Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4074:6043];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;queue=ttl;external_count=0;fline=granule.cpp:168;event=skip_actualization;waiting=0.999000s; 2025-06-25T15:05:36.255783Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4074:6043];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:749;background=ttl;skip_reason=no_changes; >> EvWrite::AbortInTransaction [GOOD] >> EvWrite::WriteWithLock [GOOD] >> Normalizers::EmptyTablesNormalizer [GOOD] >> TColumnShardTestReadWrite::WriteReadModifications [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> EvWrite::WriteWithLock [GOOD] Test command err: 2025-06-25T15:05:36.680085Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:99;event=initialize_shard;step=OnActivateExecutor; 2025-06-25T15:05:36.696539Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:117;event=initialize_shard;step=initialize_tiring_finished; 2025-06-25T15:05:36.696687Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-25T15:05:36.701643Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T15:05:36.701793Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T15:05:36.701931Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T15:05:36.701995Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T15:05:36.702051Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T15:05:36.702120Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T15:05:36.702194Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T15:05:36.702280Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T15:05:36.702362Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T15:05:36.702425Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T15:05:36.702493Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T15:05:36.718294Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-25T15:05:36.718415Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-25T15:05:36.718447Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-25T15:05:36.718569Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:05:36.718734Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-25T15:05:36.718817Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-25T15:05:36.718845Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-25T15:05:36.718913Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-25T15:05:36.718959Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-25T15:05:36.718995Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-25T15:05:36.719026Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-25T15:05:36.719144Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:05:36.719187Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-25T15:05:36.719223Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-25T15:05:36.719262Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-25T15:05:36.719368Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-25T15:05:36.719416Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-25T15:05:36.719442Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-25T15:05:36.719458Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-25T15:05:36.719496Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-25T15:05:36.719525Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-25T15:05:36.719544Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-25T15:05:36.719686Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-25T15:05:36.719728Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-25T15:05:36.719750Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-25T15:05:36.719855Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-25T15:05:36.719897Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-25T15:05:36.719929Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-25T15:05:36.720094Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-25T15:05:36.720148Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-25T15:05:36.720193Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-25T15:05:36.720296Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-25T15:05:36.720392Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-25T15:05:36.720435Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-25T15:05:36.720481Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-25T15:05:36.720638Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=43; 2025-06-25T15:05:36.720731Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=42; 2025-06-25T15:05:36.720806Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=38; 2025-06-25T15:05:36.720858Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=25; 2025-06-25T15:05:36.720917Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-25T15:05:36.720964Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-25T15:05:36.720993Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-25T15:05:36.721034Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: table ... ids=1;column_names=key;);;ff=(column_ids=1,2;column_names=field,key;);;program_input=(column_ids=1,2;column_names=field,key;);;;);columns=2;rows=2048; 2025-06-25T15:05:37.423020Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:302:2314];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:254;stage=data_format;batch_size=229376;num_rows=2048;batch_columns=key,field; 2025-06-25T15:05:37.423201Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:302:2314];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:301:2313];bytes=229376;rows=2048;faults=0;finished=0;fault=0;schema=key: uint64 field: string; 2025-06-25T15:05:37.423296Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:302:2314];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:274;stage=finished;iterator=ready_results:(count:1;records_count:2048;schema=key: uint64 field: string;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1;column_names=key;);;ff=(column_ids=1,2;column_names=field,key;);;program_input=(column_ids=1,2;column_names=field,key;);;;); 2025-06-25T15:05:37.423381Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:302:2314];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:1;records_count:2048;schema=key: uint64 field: string;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1;column_names=key;);;ff=(column_ids=1,2;column_names=field,key;);;program_input=(column_ids=1,2;column_names=field,key;);;;); 2025-06-25T15:05:37.423408Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:302:2314];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=0;count=0;finished=1; 2025-06-25T15:05:37.423436Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:302:2314];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:203;stage=limit exhausted;limit=limits:(bytes=0;chunks=0);; 2025-06-25T15:05:37.423631Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:302:2314];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-25T15:05:37.423731Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:302:2314];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:1;records_count:2048;schema=key: uint64 field: string;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1;column_names=key;);;ff=(column_ids=1,2;column_names=field,key;);;program_input=(column_ids=1,2;column_names=field,key;);;;); 2025-06-25T15:05:37.423774Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:302:2314];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=0;count=0;finished=1; 2025-06-25T15:05:37.423861Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:302:2314];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:234;stage=ready result;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1;column_names=key;);;ff=(column_ids=1,2;column_names=field,key;);;program_input=(column_ids=1,2;column_names=field,key;);;;);columns=2;rows=2048; 2025-06-25T15:05:37.423899Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:302:2314];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:254;stage=data_format;batch_size=229376;num_rows=2048;batch_columns=key,field; 2025-06-25T15:05:37.424002Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:302:2314];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:301:2313];bytes=229376;rows=2048;faults=0;finished=0;fault=0;schema=key: uint64 field: string; 2025-06-25T15:05:37.424092Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:302:2314];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:274;stage=finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1;column_names=key;);;ff=(column_ids=1,2;column_names=field,key;);;program_input=(column_ids=1,2;column_names=field,key;);;;); 2025-06-25T15:05:37.424157Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:302:2314];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1;column_names=key;);;ff=(column_ids=1,2;column_names=field,key;);;program_input=(column_ids=1,2;column_names=field,key;);;;); 2025-06-25T15:05:37.424245Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:302:2314];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1;column_names=key;);;ff=(column_ids=1,2;column_names=field,key;);;program_input=(column_ids=1,2;column_names=field,key;);;;); 2025-06-25T15:05:37.424455Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:302:2314];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-25T15:05:37.424520Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:302:2314];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1;column_names=key;);;ff=(column_ids=1,2;column_names=field,key;);;program_input=(column_ids=1,2;column_names=field,key;);;;); 2025-06-25T15:05:37.424563Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:302:2314];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1;column_names=key;);;ff=(column_ids=1,2;column_names=field,key;);;program_input=(column_ids=1,2;column_names=field,key;);;;); 2025-06-25T15:05:37.424588Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: actor.cpp:414: Scan [1:302:2314] finished for tablet 9437184 2025-06-25T15:05:37.424863Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:302:2314];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=actor.cpp:420;event=scan_finish;compute_actor_id=[1:301:2313];stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.002},{"events":["l_bootstrap","f_processing","f_task_result"],"t":0.003},{"events":["f_ack","l_task_result"],"t":0.034},{"events":["l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.036}],"full":{"a":1750863937388025,"name":"_full_task","f":1750863937388025,"d_finished":0,"c":0,"l":1750863937424632,"d":36607},"events":[{"name":"bootstrap","f":1750863937388181,"d_finished":3725,"c":1,"l":1750863937391906,"d":3725},{"a":1750863937424439,"name":"ack","f":1750863937422758,"d_finished":1343,"c":2,"l":1750863937424265,"d":1536},{"a":1750863937424422,"name":"processing","f":1750863937391934,"d_finished":16615,"c":18,"l":1750863937424267,"d":16825},{"name":"ProduceResults","f":1750863937390101,"d_finished":3614,"c":22,"l":1750863937424573,"d":3614},{"a":1750863937424574,"name":"Finish","f":1750863937424574,"d_finished":0,"c":0,"l":1750863937424632,"d":58},{"name":"task_result","f":1750863937391948,"d_finished":15030,"c":16,"l":1750863937422684,"d":15030}],"id":"9437184::3"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1;column_names=key;);;ff=(column_ids=1,2;column_names=field,key;);;program_input=(column_ids=1,2;column_names=field,key;);;;); 2025-06-25T15:05:37.424927Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:302:2314];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:301:2313];bytes=0;rows=0;faults=0;finished=1;fault=0;schema=; 2025-06-25T15:05:37.425170Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:302:2314];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=actor.cpp:375;event=scan_finished;compute_actor_id=[1:301:2313];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.002},{"events":["l_bootstrap","f_processing","f_task_result"],"t":0.003},{"events":["f_ack","l_task_result"],"t":0.034},{"events":["l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.036}],"full":{"a":1750863937388025,"name":"_full_task","f":1750863937388025,"d_finished":0,"c":0,"l":1750863937424954,"d":36929},"events":[{"name":"bootstrap","f":1750863937388181,"d_finished":3725,"c":1,"l":1750863937391906,"d":3725},{"a":1750863937424439,"name":"ack","f":1750863937422758,"d_finished":1343,"c":2,"l":1750863937424265,"d":1858},{"a":1750863937424422,"name":"processing","f":1750863937391934,"d_finished":16615,"c":18,"l":1750863937424267,"d":17147},{"name":"ProduceResults","f":1750863937390101,"d_finished":3614,"c":22,"l":1750863937424573,"d":3614},{"a":1750863937424574,"name":"Finish","f":1750863937424574,"d_finished":0,"c":0,"l":1750863937424954,"d":380},{"name":"task_result","f":1750863937391948,"d_finished":15030,"c":16,"l":1750863937422684,"d":15030}],"id":"9437184::3"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1;column_names=key;);;ff=(column_ids=1,2;column_names=field,key;);;program_input=(column_ids=1,2;column_names=field,key;);;;); 2025-06-25T15:05:37.425245Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:302:2314];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-06-25T15:05:37.387619Z;index_granules=0;index_portions=2;index_batches=88;schema_columns=2;filter_columns=0;additional_columns=0;compacted_portions_bytes=0;inserted_portions_bytes=474480;committed_portions_bytes=0;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=474480;selected_rows=0; 2025-06-25T15:05:37.425268Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:302:2314];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=read_context.h:192;event=scan_aborted;reason=unexpected on destructor; 2025-06-25T15:05:37.425442Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:302:2314];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1;column_names=key;);;ff=(column_ids=1,2;column_names=field,key;);;program_input=(column_ids=1,2;column_names=field,key;);;; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> EvWrite::AbortInTransaction [GOOD] Test command err: 2025-06-25T15:05:35.187938Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:99;event=initialize_shard;step=OnActivateExecutor; 2025-06-25T15:05:35.205120Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:117;event=initialize_shard;step=initialize_tiring_finished; 2025-06-25T15:05:35.205281Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-25T15:05:35.210210Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T15:05:35.210363Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T15:05:35.210512Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T15:05:35.210575Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T15:05:35.210630Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T15:05:35.210707Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T15:05:35.210791Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T15:05:35.210867Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T15:05:35.210943Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T15:05:35.210996Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T15:05:35.211070Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T15:05:35.227435Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-25T15:05:35.227552Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-25T15:05:35.227586Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-25T15:05:35.227733Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:05:35.227834Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-25T15:05:35.227885Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-25T15:05:35.227912Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-25T15:05:35.227962Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-25T15:05:35.227995Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-25T15:05:35.228028Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-25T15:05:35.228056Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-25T15:05:35.228164Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:05:35.228199Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-25T15:05:35.228223Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-25T15:05:35.228238Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-25T15:05:35.228363Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-25T15:05:35.228398Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-25T15:05:35.228423Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-25T15:05:35.228442Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-25T15:05:35.228477Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-25T15:05:35.228506Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-25T15:05:35.228524Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-25T15:05:35.228647Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-25T15:05:35.228681Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-25T15:05:35.228697Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-25T15:05:35.228814Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-25T15:05:35.228842Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-25T15:05:35.228857Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-25T15:05:35.228940Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-25T15:05:35.228968Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-25T15:05:35.228992Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-25T15:05:35.229046Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-25T15:05:35.229095Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-25T15:05:35.229118Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-25T15:05:35.229133Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-25T15:05:35.229262Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=28; 2025-06-25T15:05:35.229314Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=27; 2025-06-25T15:05:35.229366Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=27; 2025-06-25T15:05:35.229426Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=35; 2025-06-25T15:05:35.229488Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-25T15:05:35.229536Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-25T15:05:35.229560Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-25T15:05:35.229595Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: table ... l_tx_no=4;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=constructor_meta.cpp:71;memory_size=1588;data_size=1564;sum=2224;count=8;size_of_meta=136; 2025-06-25T15:05:37.402604Z node 2 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[2:111:2141];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=4;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=constructor_portion.cpp:40;memory_size=1660;data_size=1636;sum=2512;count=4;size_of_portion=208; 2025-06-25T15:05:37.404826Z node 2 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager on execute at tablet 9437184 Save Batch GenStep: 2:1 Blob count: 44 2025-06-25T15:05:37.405366Z node 2 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: tablet_id=9437184;self_id=[2:111:2141];ev=NKikimr::NColumnShard::NPrivateEvents::NWrite::TEvWritePortionResult;tablet_id=9437184;event=TEvWritePortionResult;tablet_id=9437184;local_tx_no=4;method=execute;tx_info=TTxBlobsWritingFinished;tablet_id=9437184;tx_state=execute;fline=manager.h:156;event=add_by_insert_id;id=2;operation_id=1; 2025-06-25T15:05:37.418237Z node 2 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Save Batch GenStep: 2:1 Blob count: 44 2025-06-25T15:05:37.418969Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=9437184;self_id=[2:111:2141];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=9437184;event=TEvWrite;fline=manager.cpp:116;event=abort;tx_id=222;problem=finished; 2025-06-25T15:05:37.419065Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=9437184;self_id=[2:111:2141];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=9437184;event=TEvWrite;fline=manager.cpp:134;event=abort;tx_id=222;problem=finished; 2025-06-25T15:05:37.419301Z node 2 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: PlanStep 1750863937778 at tablet 9437184, mediator 0 2025-06-25T15:05:37.419363Z node 2 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxPlanStep[5] execute at tablet 9437184 2025-06-25T15:05:37.419419Z node 2 :TX_COLUMNSHARD ERROR: ctor_logger.h:56: TxPlanStep[5] Ignore old txIds [112] for step 1750863937778 last planned step 1750863937778 at tablet 9437184 2025-06-25T15:05:37.419478Z node 2 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxPlanStep[5] complete at tablet 9437184 2025-06-25T15:05:37.419815Z node 2 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: EvScan txId: 18446744073709551615 scanId: 0 version: {1750863937778:max} readable: {1750863937778:max} at tablet 9437184 2025-06-25T15:05:37.419922Z node 2 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TTxScan prepare txId: 18446744073709551615 scanId: 0 at tablet 9437184 2025-06-25T15:05:37.422309Z node 2 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[2:111:2141];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750863937778:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=program.cpp:33;event=parse_program;program=Command { Projection { Columns { Id: 1 } Columns { Id: 2 } } } ; 2025-06-25T15:05:37.422417Z node 2 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[2:111:2141];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750863937778:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=program.cpp:102;parse_proto_program=Command { Projection { Columns { Id: 1 } Columns { Id: 2 } } } ; 2025-06-25T15:05:37.423327Z node 2 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[2:111:2141];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750863937778:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=program.cpp:51;event=program_parsed;result={"edges":[{"owner_id":0,"inputs":[{"from":2},{"from":4}]},{"owner_id":2,"inputs":[{"from":5}]},{"owner_id":4,"inputs":[{"from":5}]},{"owner_id":5,"inputs":[{"from":6}]},{"owner_id":6,"inputs":[]}],"nodes":{"2":{"p":{"i":"1","p":{"address":{"name":"key","id":1}},"o":"1","t":"AssembleOriginalData"},"w":9,"id":2},"6":{"p":{"p":{"data":[{"name":"key","id":1},{"name":"field","id":2}]},"o":"0","t":"ReserveMemory"},"w":0,"id":6},"5":{"p":{"i":"0","p":{"data":[{"name":"key","id":1},{"name":"field","id":2}]},"o":"1,2","t":"FetchOriginalData"},"w":4,"id":5},"4":{"p":{"i":"2","p":{"address":{"name":"field","id":2}},"o":"2","t":"AssembleOriginalData"},"w":9,"id":4},"0":{"p":{"i":"1,2","t":"Projection"},"w":18,"id":0}}}; 2025-06-25T15:05:37.423476Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: tablet_id=9437184;self_id=[2:111:2141];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750863937778:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=read_metadata.h:142;filter_limit_not_detected= range{ from {+Inf} to {-Inf}}; 2025-06-25T15:05:37.424120Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: tablet_id=9437184;self_id=[2:111:2141];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1750863937778:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=tx_scan.cpp:172;event=TTxScan started;actor_id=[2:182:2194];trace_detailed=; 2025-06-25T15:05:37.424804Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: fline=context.cpp:84;ff_first=(column_ids=1,2;column_names=field,key;);; 2025-06-25T15:05:37.425019Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: fline=context.cpp:99;columns_context_info=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1;column_names=key;);;ff=(column_ids=1,2;column_names=field,key;);;program_input=(column_ids=1,2;column_names=field,key;);;; 2025-06-25T15:05:37.425329Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[2:182:2194];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-25T15:05:37.425478Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[2:182:2194];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1;column_names=key;);;ff=(column_ids=1,2;column_names=field,key;);;program_input=(column_ids=1,2;column_names=field,key;);;;); 2025-06-25T15:05:37.425592Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[2:182:2194];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1;column_names=key;);;ff=(column_ids=1,2;column_names=field,key;);;program_input=(column_ids=1,2;column_names=field,key;);;;); 2025-06-25T15:05:37.425641Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: actor.cpp:414: Scan [2:182:2194] finished for tablet 9437184 2025-06-25T15:05:37.426052Z node 2 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[2:182:2194];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:420;event=scan_finish;compute_actor_id=[2:181:2193];stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["l_bootstrap","f_ack","l_ack","f_processing","l_processing","f_ProduceResults","l_ProduceResults","f_Finish","l_Finish"],"t":0.001}],"full":{"a":1750863937424035,"name":"_full_task","f":1750863937424035,"d_finished":0,"c":0,"l":1750863937425707,"d":1672},"events":[{"name":"bootstrap","f":1750863937424233,"d_finished":945,"c":1,"l":1750863937425178,"d":945},{"a":1750863937425295,"name":"ack","f":1750863937425295,"d_finished":0,"c":0,"l":1750863937425707,"d":412},{"a":1750863937425273,"name":"processing","f":1750863937425273,"d_finished":0,"c":0,"l":1750863937425707,"d":434},{"name":"ProduceResults","f":1750863937425164,"d_finished":268,"c":2,"l":1750863937425620,"d":268},{"a":1750863937425625,"name":"Finish","f":1750863937425625,"d_finished":0,"c":0,"l":1750863937425707,"d":82}],"id":"9437184::2"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1;column_names=key;);;ff=(column_ids=1,2;column_names=field,key;);;program_input=(column_ids=1,2;column_names=field,key;);;;); 2025-06-25T15:05:37.426130Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[2:182:2194];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:370;event=send_data;compute_actor_id=[2:181:2193];bytes=0;rows=0;faults=0;finished=1;fault=0;schema=; 2025-06-25T15:05:37.426499Z node 2 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[2:182:2194];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:375;event=scan_finished;compute_actor_id=[2:181:2193];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["l_bootstrap","f_ack","f_processing","f_ProduceResults","l_ProduceResults","f_Finish"],"t":0.001},{"events":["l_ack","l_processing","l_Finish"],"t":0.002}],"full":{"a":1750863937424035,"name":"_full_task","f":1750863937424035,"d_finished":0,"c":0,"l":1750863937426176,"d":2141},"events":[{"name":"bootstrap","f":1750863937424233,"d_finished":945,"c":1,"l":1750863937425178,"d":945},{"a":1750863937425295,"name":"ack","f":1750863937425295,"d_finished":0,"c":0,"l":1750863937426176,"d":881},{"a":1750863937425273,"name":"processing","f":1750863937425273,"d_finished":0,"c":0,"l":1750863937426176,"d":903},{"name":"ProduceResults","f":1750863937425164,"d_finished":268,"c":2,"l":1750863937425620,"d":268},{"a":1750863937425625,"name":"Finish","f":1750863937425625,"d_finished":0,"c":0,"l":1750863937426176,"d":551}],"id":"9437184::2"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1;column_names=key;);;ff=(column_ids=1,2;column_names=field,key;);;program_input=(column_ids=1,2;column_names=field,key;);;;); 2025-06-25T15:05:37.426582Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[2:182:2194];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-06-25T15:05:37.423431Z;index_granules=0;index_portions=0;index_batches=0;schema_columns=2;filter_columns=0;additional_columns=0;compacted_portions_bytes=0;inserted_portions_bytes=0;committed_portions_bytes=0;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=0;selected_rows=0; 2025-06-25T15:05:37.426628Z node 2 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[2:182:2194];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=read_context.h:192;event=scan_aborted;reason=unexpected on destructor; 2025-06-25T15:05:37.426723Z node 2 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[2:182:2194];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1;column_names=key;);;ff=(column_ids=1,2;column_names=field,key;);;program_input=(column_ids=1,2;column_names=field,key;);;; FALLBACK_ACTOR_LOGGING;priority=INFO;component=2100;fline=simple_arrays_cache.h:65;event=slice_from_cache;key=uint64;records=0;count=0; FALLBACK_ACTOR_LOGGING;priority=INFO;component=2100;fline=simple_arrays_cache.h:49;event=insert_to_cache;key=string;records=0;size=0; FALLBACK_ACTOR_LOGGING;priority=INFO;component=2100;fline=simple_arrays_cache.h:65;event=slice_from_cache;key=string;records=0;count=0; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> Normalizers::EmptyTablesNormalizer [GOOD] Test command err: 2025-06-25T15:05:36.304398Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:99;event=initialize_shard;step=OnActivateExecutor; 2025-06-25T15:05:36.322355Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:117;event=initialize_shard;step=initialize_tiring_finished; 2025-06-25T15:05:36.322518Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-25T15:05:36.327229Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=PortionsCleaner; 2025-06-25T15:05:36.327377Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=NO_VALUE_OPTIONAL; 2025-06-25T15:05:36.327502Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T15:05:36.327611Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T15:05:36.327670Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T15:05:36.327750Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T15:05:36.327822Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T15:05:36.327890Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T15:05:36.327967Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T15:05:36.328030Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T15:05:36.328094Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T15:05:36.328154Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T15:05:36.343990Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-25T15:05:36.344087Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=11;current_normalizer=CLASS_NAME=PortionsCleaner; 2025-06-25T15:05:36.344116Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=NO_VALUE_OPTIONAL;type=NO_VALUE_OPTIONAL; 2025-06-25T15:05:36.344277Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=32; 2025-06-25T15:05:36.344388Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=63; 2025-06-25T15:05:36.344464Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=33; 2025-06-25T15:05:36.344544Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=46; 2025-06-25T15:05:36.344672Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=PortionsCleaner;id=NO_VALUE_OPTIONAL; 2025-06-25T15:05:36.344729Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Granules;id=Granules; 2025-06-25T15:05:36.344758Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=1;type=Granules; 2025-06-25T15:05:36.344858Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:05:36.344907Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-25T15:05:36.344938Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-25T15:05:36.344967Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=2;type=Chunks; 2025-06-25T15:05:36.345018Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-25T15:05:36.345047Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-25T15:05:36.345070Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-25T15:05:36.345084Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=4;type=TablesCleaner; 2025-06-25T15:05:36.345183Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:05:36.345216Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-25T15:05:36.345252Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-25T15:05:36.345273Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=6;type=CleanGranuleId; 2025-06-25T15:05:36.345334Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-25T15:05:36.345368Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-25T15:05:36.345396Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-25T15:05:36.345410Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=9;type=GCCountersNormalizer; 2025-06-25T15:05:36.345436Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-25T15:05:36.345456Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-25T15:05:36.345481Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=11;type=SyncPortionFromChunks; 2025-06-25T15:05:36.345510Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-25T15:05:36.345540Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-25T15:05:36.345556Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-25T15:05:36.345678Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-25T15:05:36.345700Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-25T15:05:36.345718Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=16;type=RestoreV2Chunks; 2025-06-25T15:05:36.345795Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-25T15:05:36.345818Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-25T15:05:36.345839Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-25T15:05:36.345867Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-25T15:05:36.345891Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0Chunks ... composite_init/tables_manager;fline=tables_manager.cpp:195;event=load_preset;preset_id=1;snapshot={1750863937285:10};version=1; 2025-06-25T15:05:37.396918Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:composite_init/tables_manager;fline=tables_manager.cpp:199;event=index_schema;preset_id=1;snapshot={1750863937285:10};version=1; 2025-06-25T15:05:37.401579Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:composite_init/tables_manager;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=4787; 2025-06-25T15:05:37.401673Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tables_managerLoadingTime=5361; 2025-06-25T15:05:37.402743Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:column_enginesLoadingTime=14; 2025-06-25T15:05:37.402938Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;PRECHARGE:countersLoadingTime=97; 2025-06-25T15:05:37.403078Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:countersLoadingTime=71; 2025-06-25T15:05:37.403158Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;PRECHARGE:sharding_infoLoadingTime=27; 2025-06-25T15:05:37.403214Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:sharding_infoLoadingTime=23; 2025-06-25T15:05:37.403272Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;PRECHARGE:finishLoadingTime=9; 2025-06-25T15:05:37.403312Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:finishLoadingTime=5; 2025-06-25T15:05:37.403361Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:column_enginesLoadingTime=547; 2025-06-25T15:05:37.403465Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tx_controllerLoadingTime=53; 2025-06-25T15:05:37.403586Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tx_controllerLoadingTime=81; 2025-06-25T15:05:37.403709Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:operations_managerLoadingTime=86; 2025-06-25T15:05:37.403814Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:operations_managerLoadingTime=72; 2025-06-25T15:05:37.403991Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:storages_managerLoadingTime=142; 2025-06-25T15:05:37.413689Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:storages_managerLoadingTime=9645; 2025-06-25T15:05:37.413788Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:db_locksLoadingTime=12; 2025-06-25T15:05:37.413846Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:db_locksLoadingTime=9; 2025-06-25T15:05:37.413885Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:bg_sessionsLoadingTime=6; 2025-06-25T15:05:37.413985Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:bg_sessionsLoadingTime=64; 2025-06-25T15:05:37.414028Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:sharing_sessionsLoadingTime=5; 2025-06-25T15:05:37.414121Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:sharing_sessionsLoadingTime=62; 2025-06-25T15:05:37.414159Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:in_flight_readsLoadingTime=5; 2025-06-25T15:05:37.414219Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:in_flight_readsLoadingTime=34; 2025-06-25T15:05:37.414315Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tiers_managerLoadingTime=56; 2025-06-25T15:05:37.414393Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tiers_managerLoadingTime=44; 2025-06-25T15:05:37.414434Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;fline=common_data.cpp:29;EXECUTE:composite_initLoadingTime=18961; 2025-06-25T15:05:37.414549Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Index: tables 0 inserted {blob_bytes=0;raw_bytes=0;count=0;records=0} compacted {blob_bytes=0;raw_bytes=0;count=0;records=0} s-compacted {blob_bytes=0;raw_bytes=0;count=0;records=0} inactive {blob_bytes=0;raw_bytes=0;count=0;records=0} evicted {blob_bytes=0;raw_bytes=0;count=0;records=0} at tablet 9437184 2025-06-25T15:05:37.414652Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:366:2374];process=SwitchToWork;fline=columnshard.cpp:74;event=initialize_shard;step=SwitchToWork; 2025-06-25T15:05:37.414705Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:366:2374];process=SwitchToWork;fline=columnshard.cpp:77;event=initialize_shard;step=SignalTabletActive; 2025-06-25T15:05:37.414792Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:366:2374];process=SwitchToWork;fline=columnshard_impl.cpp:1331;event=OnTieringModified;path_id=NO_VALUE_OPTIONAL; 2025-06-25T15:05:37.414843Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:366:2374];process=SwitchToWork;fline=column_engine_logs.cpp:471;event=OnTieringModified;new_count_tierings=0; 2025-06-25T15:05:37.414891Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:442;event=EnqueueBackgroundActivities;periodic=0; 2025-06-25T15:05:37.415029Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=0; 2025-06-25T15:05:37.439758Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=0;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-25T15:05:37.439832Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:791;background=cleanup;skip_reason=no_changes; 2025-06-25T15:05:37.439870Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:820;background=cleanup;skip_reason=no_changes; 2025-06-25T15:05:37.439947Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:749;background=ttl;skip_reason=no_changes; 2025-06-25T15:05:37.443360Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:366:2374];ev=NActors::TEvents::TEvWakeup;fline=columnshard.cpp:250;event=TEvPrivate::TEvPeriodicWakeup::MANUAL;tablet_id=9437184; 2025-06-25T15:05:37.443425Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:366:2374];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;fline=columnshard.cpp:239;event=TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184; 2025-06-25T15:05:37.443445Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Send periodic stats. 2025-06-25T15:05:37.443471Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Disabled periodic stats at tablet 9437184 2025-06-25T15:05:37.443516Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:366:2374];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:442;event=EnqueueBackgroundActivities;periodic=0; 2025-06-25T15:05:37.443590Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:366:2374];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=0; 2025-06-25T15:05:37.443648Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:366:2374];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=0;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-25T15:05:37.443707Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:366:2374];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:791;background=cleanup;skip_reason=no_changes; 2025-06-25T15:05:37.443749Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:366:2374];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:820;background=cleanup;skip_reason=no_changes; 2025-06-25T15:05:37.443825Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:366:2374];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:749;background=ttl;skip_reason=no_changes; 2025-06-25T15:05:37.509344Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: EvScan txId: 111 scanId: 0 version: {1750863937330:111} readable: {1750863937330:max} at tablet 9437184 2025-06-25T15:05:37.509420Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:366:2374];ev=NKikimr::TEvDataShard::TEvKqpScan;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=1;result=not_found; FALLBACK_ACTOR_LOGGING;priority=INFO;component=2100;fline=simple_arrays_cache.h:65;event=slice_from_cache;key=uint64;records=0;count=0; FALLBACK_ACTOR_LOGGING;priority=INFO;component=2100;fline=simple_arrays_cache.h:65;event=slice_from_cache;key=uint64;records=0;count=0; FALLBACK_ACTOR_LOGGING;priority=INFO;component=2100;fline=simple_arrays_cache.h:49;event=insert_to_cache;key=string;records=0;size=0; FALLBACK_ACTOR_LOGGING;priority=INFO;component=2100;fline=simple_arrays_cache.h:65;event=slice_from_cache;key=string;records=0;count=0; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::WriteReadModifications [GOOD] Test command err: 2025-06-25T15:05:36.488279Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:99;event=initialize_shard;step=OnActivateExecutor; 2025-06-25T15:05:36.506057Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:117;event=initialize_shard;step=initialize_tiring_finished; 2025-06-25T15:05:36.506213Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-25T15:05:36.511211Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T15:05:36.511353Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T15:05:36.511506Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T15:05:36.511564Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T15:05:36.511628Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T15:05:36.511726Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T15:05:36.511795Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T15:05:36.511851Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T15:05:36.511940Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T15:05:36.512009Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T15:05:36.512072Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T15:05:36.527529Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-25T15:05:36.527631Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-25T15:05:36.527664Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-25T15:05:36.527799Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:05:36.527909Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-25T15:05:36.527968Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-25T15:05:36.527998Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-25T15:05:36.528049Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-25T15:05:36.528081Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-25T15:05:36.528109Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-25T15:05:36.528139Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-25T15:05:36.528242Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:05:36.528287Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-25T15:05:36.528332Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-25T15:05:36.528357Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-25T15:05:36.528434Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-25T15:05:36.528473Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-25T15:05:36.528496Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-25T15:05:36.528512Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-25T15:05:36.528536Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-25T15:05:36.528556Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-25T15:05:36.528574Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-25T15:05:36.528698Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-25T15:05:36.528723Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-25T15:05:36.528741Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-25T15:05:36.528872Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-25T15:05:36.528913Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-25T15:05:36.528930Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-25T15:05:36.529005Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-25T15:05:36.529029Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-25T15:05:36.529046Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-25T15:05:36.529097Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-25T15:05:36.529136Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-25T15:05:36.529158Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-25T15:05:36.529176Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-25T15:05:36.529310Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=26; 2025-06-25T15:05:36.529385Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=34; 2025-06-25T15:05:36.529436Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=26; 2025-06-25T15:05:36.529492Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=31; 2025-06-25T15:05:36.529553Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-25T15:05:36.529598Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-25T15:05:36.529631Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-25T15:05:36.529669Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: table ... D_SCAN DEBUG: log.cpp:784: SelfId=[1:392:2404];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=interval.cpp:28;event=fetched;interval_idx=0; 2025-06-25T15:05:37.638874Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:392:2404];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=interval.cpp:17;event=start_construct_result;interval_idx=0;interval_id=6;memory=8394164;count=4; 2025-06-25T15:05:37.639335Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:392:2404];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=merge.cpp:152;event=DoExecute;interval_idx=0; 2025-06-25T15:05:37.639849Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:392:2404];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=source.cpp:52;event=source_ready;intervals_count=1;source_idx=3; 2025-06-25T15:05:37.639962Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:392:2404];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:05:37.640006Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:392:2404];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=0;count=0;finished=0; 2025-06-25T15:05:37.640036Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:392:2404];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:203;stage=limit exhausted;limit=limits:(bytes=0;chunks=0);; 2025-06-25T15:05:37.640133Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:392:2404];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:85;event=TEvTaskProcessedResult; 2025-06-25T15:05:37.640220Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:392:2404];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:05:37.640255Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:392:2404];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=0;count=0;finished=0; 2025-06-25T15:05:37.640298Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:392:2404];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:203;stage=limit exhausted;limit=limits:(bytes=0;chunks=0);; 2025-06-25T15:05:37.640562Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:392:2404];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:85;event=TEvTaskProcessedResult; 2025-06-25T15:05:37.640611Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:392:2404];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=merge.cpp:75;event=DoApply;interval_idx=0; 2025-06-25T15:05:37.640673Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:392:2404];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=scanner.cpp:21;event=interval_result_received;interval_idx=0;intervalId=6; 2025-06-25T15:05:37.640712Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:392:2404];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=scanner.cpp:47;event=interval_result;interval_idx=0;count=0;merger=0;interval_id=6; 2025-06-25T15:05:37.640745Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:392:2404];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=scanner.cpp:65;event=intervals_finished; 2025-06-25T15:05:37.640803Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:392:2404];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:05:37.640883Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:392:2404];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:05:37.641042Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:392:2404];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-25T15:05:37.641127Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:392:2404];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:05:37.641198Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:392:2404];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:05:37.641232Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: actor.cpp:414: Scan [1:392:2404] finished for tablet 9437184 2025-06-25T15:05:37.641628Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:392:2404];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:420;event=scan_finish;compute_actor_id=[1:388:2400];stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.002},{"events":["l_bootstrap"],"t":0.004},{"events":["f_processing","f_task_result"],"t":0.005},{"events":["f_ack","l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish","l_task_result"],"t":0.018}],"full":{"a":1750863937622860,"name":"_full_task","f":1750863937622860,"d_finished":0,"c":0,"l":1750863937641325,"d":18465},"events":[{"name":"bootstrap","f":1750863937623064,"d_finished":4345,"c":1,"l":1750863937627409,"d":4345},{"a":1750863937641026,"name":"ack","f":1750863937641026,"d_finished":0,"c":0,"l":1750863937641325,"d":299},{"a":1750863937641014,"name":"processing","f":1750863937628093,"d_finished":5560,"c":26,"l":1750863937640930,"d":5871},{"name":"ProduceResults","f":1750863937625765,"d_finished":3157,"c":28,"l":1750863937641216,"d":3157},{"a":1750863937641218,"name":"Finish","f":1750863937641218,"d_finished":0,"c":0,"l":1750863937641325,"d":107},{"name":"task_result","f":1750863937628105,"d_finished":5322,"c":26,"l":1750863937640929,"d":5322}],"id":"9437184::9"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:05:37.641675Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:392:2404];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:388:2400];bytes=0;rows=0;faults=0;finished=1;fault=0;schema=; 2025-06-25T15:05:37.641954Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:392:2404];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:375;event=scan_finished;compute_actor_id=[1:388:2400];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.002},{"events":["l_bootstrap"],"t":0.004},{"events":["f_processing","f_task_result"],"t":0.005},{"events":["f_ack","l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish","l_task_result"],"t":0.018}],"full":{"a":1750863937622860,"name":"_full_task","f":1750863937622860,"d_finished":0,"c":0,"l":1750863937641712,"d":18852},"events":[{"name":"bootstrap","f":1750863937623064,"d_finished":4345,"c":1,"l":1750863937627409,"d":4345},{"a":1750863937641026,"name":"ack","f":1750863937641026,"d_finished":0,"c":0,"l":1750863937641712,"d":686},{"a":1750863937641014,"name":"processing","f":1750863937628093,"d_finished":5560,"c":26,"l":1750863937640930,"d":6258},{"name":"ProduceResults","f":1750863937625765,"d_finished":3157,"c":28,"l":1750863937641216,"d":3157},{"a":1750863937641218,"name":"Finish","f":1750863937641218,"d_finished":0,"c":0,"l":1750863937641712,"d":494},{"name":"task_result","f":1750863937628105,"d_finished":5322,"c":26,"l":1750863937640929,"d":5322}],"id":"9437184::9"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-25T15:05:37.642014Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:392:2404];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-06-25T15:05:37.622338Z;index_granules=0;index_portions=4;index_batches=4;schema_columns=1;filter_columns=0;additional_columns=0;compacted_portions_bytes=0;inserted_portions_bytes=9344;committed_portions_bytes=0;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=9344;selected_rows=0; 2025-06-25T15:05:37.642048Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:392:2404];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=read_context.h:192;event=scan_aborted;reason=unexpected on destructor; 2025-06-25T15:05:37.642294Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:392:2404];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;; >> TColumnShardTestReadWrite::CompactionSplitGranule_PKTimestamp >> Normalizers::ChunksV0MetaNormalizer >> TColumnShardTestReadWrite::ReadWithProgram >> TColumnShardTestReadWrite::ReadWithProgramLike >> TColumnShardTestReadWrite::WriteStandaloneExoticTypes >> AnalyzeDatashard::AnalyzeTwoTables [GOOD] >> TColumnShardTestReadWrite::ReadWithProgram [GOOD] >> TColumnShardTestReadWrite::ReadWithProgramLike [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest >> AnalyzeDatashard::AnalyzeTwoTables [GOOD] Test command err: 2025-06-25T15:03:09.752462Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:419:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T15:03:09.753016Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T15:03:09.753151Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/002022/r3tmp/tmpQvOWtr/pdisk_1.dat 2025-06-25T15:03:10.302890Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 10964, node 1 2025-06-25T15:03:10.928240Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:03:10.928297Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:03:10.928344Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:03:10.928970Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T15:03:10.942091Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:03:11.046768Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:03:11.046916Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:03:11.064944Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:30484 2025-06-25T15:03:11.682515Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-25T15:03:14.718619Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-25T15:03:14.756960Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:03:14.757078Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:03:14.799249Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T15:03:14.801519Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:03:15.072465Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:03:15.109770Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:15.112391Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:15.112922Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:15.113075Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:15.113282Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:15.113356Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:15.113428Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:15.113495Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:15.113562Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:15.311407Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:03:15.311491Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:03:15.324598Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:03:15.475407Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:03:15.532638Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-25T15:03:15.532747Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-25T15:03:15.571311Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-25T15:03:15.571508Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-25T15:03:15.571712Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-25T15:03:15.571803Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-25T15:03:15.571859Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-25T15:03:15.571926Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-25T15:03:15.571975Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-25T15:03:15.572028Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-25T15:03:15.572609Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-25T15:03:15.597101Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7949: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-25T15:03:15.597219Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7979: ConnectToSA(), pipe client id: [2:1793:2562], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-25T15:03:15.606733Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1808:2573] 2025-06-25T15:03:15.616789Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1849:2589] 2025-06-25T15:03:15.617066Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1849:2589], schemeshard id = 72075186224037897 2025-06-25T15:03:15.626953Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-25T15:03:15.652175Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-25T15:03:15.652232Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-25T15:03:15.652298Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-25T15:03:15.667000Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:03:15.674359Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-25T15:03:15.674499Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-25T15:03:15.905347Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-25T15:03:16.041288Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-25T15:03:16.110724Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-25T15:03:16.721216Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:03:17.207218Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2153:3026], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:03:17.207446Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:03:17.302985Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:03:17.826384Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2452:3074], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:03:17.826556Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:03:17.827863Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [1:2457:3078]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T15:03:17.828054Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-25T15:03:17.828139Z node 1 :STATISTICS DEBUG: service_impl.cpp:1219: ConnectToSA(), pipe client id = [1:2459:3080] 2025-06-25T15:03:17.829075Z no ... gateStatistics, node id = 2 2025-06-25T15:05:31.167823Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 1, ReplyToActorId = [2:7065:4965], StatRequests.size() = 1 2025-06-25T15:05:31.344853Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=NmQ3ZWU1NzktZjVlMmIwNjUtZGVhZGQ2ZTMtYmY2YTkzNWE=, TxId: 2025-06-25T15:05:31.344930Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=NmQ3ZWU1NzktZjVlMmIwNjUtZGVhZGQ2ZTMtYmY2YTkzNWE=, TxId: 2025-06-25T15:05:31.347018Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-25T15:05:31.360371Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 5] 2025-06-25T15:05:31.360449Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-25T15:05:31.402743Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:217: [72075186224037894] EvFastPropagateCheck 2025-06-25T15:05:31.402811Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:357: [72075186224037894] PropagateFastStatistics(), node count = 0, schemeshard count = 0 2025-06-25T15:05:31.497400Z node 2 :STATISTICS DEBUG: service_impl.cpp:1189: EvRequestTimeout, pipe client id = [2:7067:4967], schemeshard count = 1 2025-06-25T15:05:32.621105Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:626: [72075186224037894] ScheduleNextAnalyze 2025-06-25T15:05:32.621205Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 4] is data table. 2025-06-25T15:05:32.621266Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:645: [72075186224037894] ScheduleNextAnalyze. Skip analyze for datashard table [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-25T15:05:33.797260Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:285: [72075186224037894] EvPropagateTimeout 2025-06-25T15:05:33.818481Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-25T15:05:33.818611Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 4] is data table. 2025-06-25T15:05:33.818646Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:723: [72075186224037894] ScheduleNextTraversal. Skip traversal for datashard table [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-25T15:05:33.819002Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-25T15:05:33.821364Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DELETE FROM `.metadata/_statistics` WHERE owner_id = $owner_id AND local_path_id = $local_path_id; 2025-06-25T15:05:33.829372Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=NDc2YmJmZDctZDM3NDI4MDQtY2M0YTgyZjAtYzVmOWY3Zg==, TxId: 2025-06-25T15:05:33.829415Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=NDc2YmJmZDctZDM3NDI4MDQtY2M0YTgyZjAtYzVmOWY3Zg==, TxId: 2025-06-25T15:05:33.829710Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-25T15:05:33.842377Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete force traversal for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-25T15:05:33.842434Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:47: [72075186224037894] TTxFinishTraversal::Complete. Don't send TEvAnalyzeResponse. There are pending operations, OperationId operationId , ActorId=[1:3011:3264] 2025-06-25T15:05:35.123953Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:626: [72075186224037894] ScheduleNextAnalyze 2025-06-25T15:05:35.124015Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 5] is data table. 2025-06-25T15:05:35.124045Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:645: [72075186224037894] ScheduleNextAnalyze. Skip analyze for datashard table [OwnerId: 72075186224037897, LocalPathId: 5] 2025-06-25T15:05:36.312449Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 2, schemeshard count = 1 2025-06-25T15:05:36.312784Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-06-25T15:05:36.312974Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-25T15:05:36.334510Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-25T15:05:36.334586Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 3] is data table. 2025-06-25T15:05:36.334621Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:723: [72075186224037894] ScheduleNextTraversal. Skip traversal for datashard table [OwnerId: 72075186224037897, LocalPathId: 3] 2025-06-25T15:05:36.334964Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-25T15:05:36.337654Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DELETE FROM `.metadata/_statistics` WHERE owner_id = $owner_id AND local_path_id = $local_path_id; 2025-06-25T15:05:36.346683Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=OGM5Yjk4MjUtZTlkYmYyMjYtM2I4MDI4M2ItYjViZWZkMmQ=, TxId: 2025-06-25T15:05:36.346743Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=OGM5Yjk4MjUtZTlkYmYyMjYtM2I4MDI4M2ItYjViZWZkMmQ=, TxId: 2025-06-25T15:05:36.347477Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-25T15:05:36.360509Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 3] 2025-06-25T15:05:36.360560Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-25T15:05:37.604988Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:626: [72075186224037894] ScheduleNextAnalyze 2025-06-25T15:05:37.605047Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:652: [72075186224037894] ScheduleNextAnalyze. All the force traversal tables sent the requests. OperationId=operationId 2025-06-25T15:05:37.605086Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:656: [72075186224037894] ScheduleNextAnalyze. All the force traversal operations sent the requests. 2025-06-25T15:05:38.844590Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-25T15:05:38.844715Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 5] is data table. 2025-06-25T15:05:38.844755Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:723: [72075186224037894] ScheduleNextTraversal. Skip traversal for datashard table [OwnerId: 72075186224037897, LocalPathId: 5] 2025-06-25T15:05:38.845115Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-25T15:05:38.847632Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DELETE FROM `.metadata/_statistics` WHERE owner_id = $owner_id AND local_path_id = $local_path_id; 2025-06-25T15:05:38.855857Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=YzI5ZDBmLWJiNzE2YjY3LTVjY2M0NmIxLWRjNzQ1MzQz, TxId: 2025-06-25T15:05:38.855919Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=YzI5ZDBmLWJiNzE2YjY3LTVjY2M0NmIxLWRjNzQ1MzQz, TxId: 2025-06-25T15:05:38.856287Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-25T15:05:38.869700Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete force traversal for path [OwnerId: 72075186224037897, LocalPathId: 5] 2025-06-25T15:05:38.869767Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:50: [72075186224037894] TTxFinishTraversal::Complete. Send TEvAnalyzeResponse, OperationId=operationId, ActorId=[1:3011:3264] 2025-06-25T15:05:38.870242Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [2:7362:5127]], StatType[ 2 ], StatRequestsCount[ 1 ] 2025-06-25T15:05:38.873129Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-25T15:05:38.873188Z node 2 :STATISTICS DEBUG: service_impl.cpp:812: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] resolve DatabasePath[ [OwnerId: 72057594046644480, LocalPathId: 2] ] 2025-06-25T15:05:38.877279Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-25T15:05:38.877362Z node 2 :STATISTICS DEBUG: service_impl.cpp:715: [TStatService::QueryStatistics] RequestId[ 2 ], Database[ Root/Database ], TablePath[ /Root/Database/.metadata/_statistics ] 2025-06-25T15:05:38.877421Z node 2 :STATISTICS DEBUG: service_impl.cpp:656: [TStatService::LoadStatistics] QueryId[ 1 ], PathId[ [OwnerId: 72075186224037897, LocalPathId: 4] ], StatType[ 2 ], ColumnTag[ 1 ] 2025-06-25T15:05:38.879624Z node 2 :STATISTICS ERROR: service_impl.cpp:691: [TStatService::ReadRowsResponse] QueryId[ 1 ], RowsCount[ 0 ] 2025-06-25T15:05:38.879887Z node 2 :STATISTICS DEBUG: service_impl.cpp:1152: TEvLoadStatisticsQueryResponse, request id = 2 2025-06-25T15:05:38.880285Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 3 ], ReplyToActorId[ [2:7392:5139]], StatType[ 2 ], StatRequestsCount[ 1 ] 2025-06-25T15:05:38.882965Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] 2025-06-25T15:05:38.883034Z node 2 :STATISTICS DEBUG: service_impl.cpp:812: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] resolve DatabasePath[ [OwnerId: 72057594046644480, LocalPathId: 2] ] 2025-06-25T15:05:38.883562Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] 2025-06-25T15:05:38.883623Z node 2 :STATISTICS DEBUG: service_impl.cpp:715: [TStatService::QueryStatistics] RequestId[ 3 ], Database[ Root/Database ], TablePath[ /Root/Database/.metadata/_statistics ] 2025-06-25T15:05:38.883695Z node 2 :STATISTICS DEBUG: service_impl.cpp:656: [TStatService::LoadStatistics] QueryId[ 2 ], PathId[ [OwnerId: 72075186224037897, LocalPathId: 5] ], StatType[ 2 ], ColumnTag[ 1 ] 2025-06-25T15:05:38.885883Z node 2 :STATISTICS ERROR: service_impl.cpp:691: [TStatService::ReadRowsResponse] QueryId[ 2 ], RowsCount[ 0 ] 2025-06-25T15:05:38.886093Z node 2 :STATISTICS DEBUG: service_impl.cpp:1152: TEvLoadStatisticsQueryResponse, request id = 3 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::ReadWithProgram [GOOD] Test command err: 2025-06-25T15:05:39.470799Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:99;event=initialize_shard;step=OnActivateExecutor; 2025-06-25T15:05:39.487959Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:117;event=initialize_shard;step=initialize_tiring_finished; 2025-06-25T15:05:39.488151Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-25T15:05:39.493909Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T15:05:39.494066Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T15:05:39.494247Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T15:05:39.494322Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T15:05:39.494377Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T15:05:39.494449Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T15:05:39.494519Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T15:05:39.494601Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T15:05:39.494676Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T15:05:39.494746Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T15:05:39.494804Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T15:05:39.510543Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-25T15:05:39.510682Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-25T15:05:39.510721Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-25T15:05:39.510867Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:05:39.510985Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-25T15:05:39.511042Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-25T15:05:39.511074Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-25T15:05:39.511125Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-25T15:05:39.511163Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-25T15:05:39.511192Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-25T15:05:39.511225Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-25T15:05:39.511350Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:05:39.511392Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-25T15:05:39.511419Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-25T15:05:39.511441Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-25T15:05:39.511508Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-25T15:05:39.511557Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-25T15:05:39.511582Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-25T15:05:39.511598Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-25T15:05:39.511627Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-25T15:05:39.511664Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-25T15:05:39.511695Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-25T15:05:39.511842Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-25T15:05:39.511874Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-25T15:05:39.511891Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-25T15:05:39.512009Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-25T15:05:39.512038Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-25T15:05:39.512057Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-25T15:05:39.512174Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-25T15:05:39.512205Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-25T15:05:39.512224Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-25T15:05:39.512282Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-25T15:05:39.512364Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-25T15:05:39.512391Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-25T15:05:39.512410Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-25T15:05:39.512552Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=38; 2025-06-25T15:05:39.512617Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=31; 2025-06-25T15:05:39.512675Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=27; 2025-06-25T15:05:39.512744Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=35; 2025-06-25T15:05:39.512821Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-25T15:05:39.512885Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-25T15:05:39.512910Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-25T15:05:39.512950Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: table ... =saved_at,timestamp;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,5,9;column_names=level,saved_at,timestamp;);;program_input=(column_ids=1,5,9;column_names=level,saved_at,timestamp;);;;); 2025-06-25T15:05:40.198563Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:292:2304];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=0;count=0;finished=0; 2025-06-25T15:05:40.198579Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:292:2304];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:203;stage=limit exhausted;limit=limits:(bytes=0;chunks=0);; 2025-06-25T15:05:40.198614Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:292:2304];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=actor.cpp:85;event=TEvTaskProcessedResult; 2025-06-25T15:05:40.198631Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:292:2304];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=fetching.cpp:17;event=apply; 2025-06-25T15:05:40.198653Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:292:2304];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=interval.cpp:28;event=fetched;interval_idx=0; 2025-06-25T15:05:40.198678Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:292:2304];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=interval.cpp:17;event=start_construct_result;interval_idx=0;interval_id=2;memory=8398003;count=1; 2025-06-25T15:05:40.198951Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:292:2304];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=merge.cpp:152;event=DoExecute;interval_idx=0; 2025-06-25T15:05:40.200847Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:292:2304];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=source.cpp:52;event=source_ready;intervals_count=1;source_idx=0; 2025-06-25T15:05:40.200937Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:292:2304];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=1,9;column_names=saved_at,timestamp;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,5,9;column_names=level,saved_at,timestamp;);;program_input=(column_ids=1,5,9;column_names=level,saved_at,timestamp;);;;); 2025-06-25T15:05:40.200971Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:292:2304];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=0;count=0;finished=0; 2025-06-25T15:05:40.200991Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:292:2304];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:203;stage=limit exhausted;limit=limits:(bytes=0;chunks=0);; 2025-06-25T15:05:40.201100Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:292:2304];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=actor.cpp:85;event=TEvTaskProcessedResult; 2025-06-25T15:05:40.201134Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:292:2304];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=merge.cpp:75;event=DoApply;interval_idx=0; 2025-06-25T15:05:40.201171Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:292:2304];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=scanner.cpp:21;event=interval_result_received;interval_idx=0;intervalId=2; 2025-06-25T15:05:40.201218Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:292:2304];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=scanner.cpp:47;event=interval_result;interval_idx=0;count=0;merger=0;interval_id=2; 2025-06-25T15:05:40.201281Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:292:2304];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=scanner.cpp:65;event=intervals_finished; 2025-06-25T15:05:40.201396Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:292:2304];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=1,9;column_names=saved_at,timestamp;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,5,9;column_names=level,saved_at,timestamp;);;program_input=(column_ids=1,5,9;column_names=level,saved_at,timestamp;);;;); 2025-06-25T15:05:40.201520Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:292:2304];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=1,9;column_names=saved_at,timestamp;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,5,9;column_names=level,saved_at,timestamp;);;program_input=(column_ids=1,5,9;column_names=level,saved_at,timestamp;);;;); 2025-06-25T15:05:40.201725Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:292:2304];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-25T15:05:40.201830Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:292:2304];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=1,9;column_names=saved_at,timestamp;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,5,9;column_names=level,saved_at,timestamp;);;program_input=(column_ids=1,5,9;column_names=level,saved_at,timestamp;);;;); 2025-06-25T15:05:40.201901Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:292:2304];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=1,9;column_names=saved_at,timestamp;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,5,9;column_names=level,saved_at,timestamp;);;program_input=(column_ids=1,5,9;column_names=level,saved_at,timestamp;);;;); 2025-06-25T15:05:40.201935Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: actor.cpp:414: Scan [1:292:2304] finished for tablet 9437184 2025-06-25T15:05:40.202284Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:292:2304];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=actor.cpp:420;event=scan_finish;compute_actor_id=[1:291:2303];stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["l_bootstrap","f_ProduceResults"],"t":0.001},{"events":["f_processing","f_task_result"],"t":0.002},{"events":["l_task_result"],"t":0.007},{"events":["f_ack","l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.008}],"full":{"a":1750863940193698,"name":"_full_task","f":1750863940193698,"d_finished":0,"c":0,"l":1750863940201983,"d":8285},"events":[{"name":"bootstrap","f":1750863940193817,"d_finished":1645,"c":1,"l":1750863940195462,"d":1645},{"a":1750863940201699,"name":"ack","f":1750863940201699,"d_finished":0,"c":0,"l":1750863940201983,"d":284},{"a":1750863940201686,"name":"processing","f":1750863940196163,"d_finished":3956,"c":9,"l":1750863940201596,"d":4253},{"name":"ProduceResults","f":1750863940194763,"d_finished":1315,"c":11,"l":1750863940201921,"d":1315},{"a":1750863940201923,"name":"Finish","f":1750863940201923,"d_finished":0,"c":0,"l":1750863940201983,"d":60},{"name":"task_result","f":1750863940196173,"d_finished":3881,"c":9,"l":1750863940201593,"d":3881}],"id":"9437184::2"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=1,9;column_names=saved_at,timestamp;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,5,9;column_names=level,saved_at,timestamp;);;program_input=(column_ids=1,5,9;column_names=level,saved_at,timestamp;);;;); 2025-06-25T15:05:40.202346Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:292:2304];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:291:2303];bytes=0;rows=0;faults=0;finished=1;fault=0;schema=; 2025-06-25T15:05:40.202638Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:292:2304];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=actor.cpp:375;event=scan_finished;compute_actor_id=[1:291:2303];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["l_bootstrap","f_ProduceResults"],"t":0.001},{"events":["f_processing","f_task_result"],"t":0.002},{"events":["l_task_result"],"t":0.007},{"events":["f_ack","l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.008}],"full":{"a":1750863940193698,"name":"_full_task","f":1750863940193698,"d_finished":0,"c":0,"l":1750863940202377,"d":8679},"events":[{"name":"bootstrap","f":1750863940193817,"d_finished":1645,"c":1,"l":1750863940195462,"d":1645},{"a":1750863940201699,"name":"ack","f":1750863940201699,"d_finished":0,"c":0,"l":1750863940202377,"d":678},{"a":1750863940201686,"name":"processing","f":1750863940196163,"d_finished":3956,"c":9,"l":1750863940201596,"d":4647},{"name":"ProduceResults","f":1750863940194763,"d_finished":1315,"c":11,"l":1750863940201921,"d":1315},{"a":1750863940201923,"name":"Finish","f":1750863940201923,"d_finished":0,"c":0,"l":1750863940202377,"d":454},{"name":"task_result","f":1750863940196173,"d_finished":3881,"c":9,"l":1750863940201593,"d":3881}],"id":"9437184::2"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=1,9;column_names=saved_at,timestamp;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,5,9;column_names=level,saved_at,timestamp;);;program_input=(column_ids=1,5,9;column_names=level,saved_at,timestamp;);;;); 2025-06-25T15:05:40.202697Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:292:2304];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-06-25T15:05:40.193302Z;index_granules=0;index_portions=1;index_batches=1;schema_columns=3;filter_columns=0;additional_columns=0;compacted_portions_bytes=0;inserted_portions_bytes=8392;committed_portions_bytes=0;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=8392;selected_rows=0; 2025-06-25T15:05:40.202733Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:292:2304];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=read_context.h:192;event=scan_aborted;reason=unexpected on destructor; 2025-06-25T15:05:40.202970Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:292:2304];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=1,9;column_names=saved_at,timestamp;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1,5,9;column_names=level,saved_at,timestamp;);;program_input=(column_ids=1,5,9;column_names=level,saved_at,timestamp;);;; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::ReadWithProgramLike [GOOD] Test command err: 2025-06-25T15:05:39.566849Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:99;event=initialize_shard;step=OnActivateExecutor; 2025-06-25T15:05:39.597787Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:117;event=initialize_shard;step=initialize_tiring_finished; 2025-06-25T15:05:39.598045Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-25T15:05:39.605609Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T15:05:39.605866Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T15:05:39.606115Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T15:05:39.606232Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T15:05:39.606326Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T15:05:39.606450Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T15:05:39.606567Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T15:05:39.606704Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T15:05:39.606830Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T15:05:39.606953Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T15:05:39.607058Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T15:05:39.630625Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-25T15:05:39.630760Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-25T15:05:39.630802Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-25T15:05:39.630944Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:05:39.631085Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-25T15:05:39.631150Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-25T15:05:39.631179Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-25T15:05:39.631243Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-25T15:05:39.631286Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-25T15:05:39.631324Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-25T15:05:39.631364Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-25T15:05:39.631512Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:05:39.631558Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-25T15:05:39.631589Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-25T15:05:39.631608Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-25T15:05:39.631681Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-25T15:05:39.631724Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-25T15:05:39.631754Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-25T15:05:39.631771Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-25T15:05:39.631798Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-25T15:05:39.631820Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-25T15:05:39.631845Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-25T15:05:39.631984Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-25T15:05:39.632013Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-25T15:05:39.632031Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-25T15:05:39.632150Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-25T15:05:39.632179Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-25T15:05:39.632214Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-25T15:05:39.632361Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-25T15:05:39.632401Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-25T15:05:39.632420Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-25T15:05:39.632473Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-25T15:05:39.632551Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-25T15:05:39.632582Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-25T15:05:39.632601Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-25T15:05:39.632754Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=39; 2025-06-25T15:05:39.632821Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=34; 2025-06-25T15:05:39.632906Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=42; 2025-06-25T15:05:39.632960Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=25; 2025-06-25T15:05:39.633025Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-25T15:05:39.633076Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-25T15:05:39.633104Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-25T15:05:39.633143Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: table ... actor.cpp:85;event=TEvTaskProcessedResult; 2025-06-25T15:05:40.363007Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:308:2320];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=merge.cpp:75;event=DoApply;interval_idx=0; 2025-06-25T15:05:40.363037Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:308:2320];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=scanner.cpp:21;event=interval_result_received;interval_idx=0;intervalId=6; 2025-06-25T15:05:40.363092Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:308:2320];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=scanner.cpp:47;event=interval_result;interval_idx=0;count=10;merger=0;interval_id=6; 2025-06-25T15:05:40.363123Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:308:2320];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=scanner.cpp:65;event=intervals_finished; 2025-06-25T15:05:40.363184Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:308:2320];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=6;column_names=message;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=6;column_names=message;);;program_input=(column_ids=6;column_names=message;);;;); 2025-06-25T15:05:40.363214Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:308:2320];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=1;count=10;finished=1; 2025-06-25T15:05:40.363238Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:308:2320];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:203;stage=limit exhausted;limit=limits:(bytes=0;chunks=0);; 2025-06-25T15:05:40.363394Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:308:2320];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-25T15:05:40.363460Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:308:2320];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:1;records_count:10;schema=message: string;);indexed_data:(ef=(column_ids=6;column_names=message;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=6;column_names=message;);;program_input=(column_ids=6;column_names=message;);;;); 2025-06-25T15:05:40.363488Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:308:2320];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=0;count=0;finished=1; 2025-06-25T15:05:40.363535Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:308:2320];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:234;stage=ready result;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=6;column_names=message;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=6;column_names=message;);;program_input=(column_ids=6;column_names=message;);;;);columns=1;rows=10; 2025-06-25T15:05:40.363559Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:308:2320];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:254;stage=data_format;batch_size=61;num_rows=10;batch_columns=message; 2025-06-25T15:05:40.363714Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:308:2320];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:307:2319];bytes=61;rows=10;faults=0;finished=0;fault=0;schema=message: string; 2025-06-25T15:05:40.363794Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:308:2320];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:274;stage=finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=6;column_names=message;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=6;column_names=message;);;program_input=(column_ids=6;column_names=message;);;;); 2025-06-25T15:05:40.363854Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:308:2320];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=6;column_names=message;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=6;column_names=message;);;program_input=(column_ids=6;column_names=message;);;;); 2025-06-25T15:05:40.363901Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:308:2320];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=6;column_names=message;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=6;column_names=message;);;program_input=(column_ids=6;column_names=message;);;;); 2025-06-25T15:05:40.363967Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:308:2320];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-25T15:05:40.364022Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:308:2320];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=6;column_names=message;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=6;column_names=message;);;program_input=(column_ids=6;column_names=message;);;;); 2025-06-25T15:05:40.364077Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:308:2320];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=6;column_names=message;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=6;column_names=message;);;program_input=(column_ids=6;column_names=message;);;;); 2025-06-25T15:05:40.364102Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: actor.cpp:414: Scan [1:308:2320] finished for tablet 9437184 2025-06-25T15:05:40.364330Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:308:2320];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=actor.cpp:420;event=scan_finish;compute_actor_id=[1:307:2319];stats={"p":[{"events":["f_bootstrap","f_ProduceResults"],"t":0},{"events":["l_bootstrap","f_processing","f_task_result"],"t":0.001},{"events":["l_task_result"],"t":0.005},{"events":["f_ack","l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.006}],"full":{"a":1750863940357304,"name":"_full_task","f":1750863940357304,"d_finished":0,"c":0,"l":1750863940364130,"d":6826},"events":[{"name":"bootstrap","f":1750863940357397,"d_finished":1416,"c":1,"l":1750863940358813,"d":1416},{"a":1750863940363958,"name":"ack","f":1750863940363382,"d_finished":530,"c":1,"l":1750863940363912,"d":702},{"a":1750863940363952,"name":"processing","f":1750863940358837,"d_finished":3334,"c":9,"l":1750863940363913,"d":3512},{"name":"ProduceResults","f":1750863940358178,"d_finished":1458,"c":12,"l":1750863940364090,"d":1458},{"a":1750863940364092,"name":"Finish","f":1750863940364092,"d_finished":0,"c":0,"l":1750863940364130,"d":38},{"name":"task_result","f":1750863940358843,"d_finished":2706,"c":8,"l":1750863940363291,"d":2706}],"id":"9437184::6"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=6;column_names=message;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=6;column_names=message;);;program_input=(column_ids=6;column_names=message;);;;); 2025-06-25T15:05:40.364371Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:308:2320];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:307:2319];bytes=0;rows=0;faults=0;finished=1;fault=0;schema=; 2025-06-25T15:05:40.364592Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:308:2320];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=actor.cpp:375;event=scan_finished;compute_actor_id=[1:307:2319];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap","f_ProduceResults"],"t":0},{"events":["l_bootstrap","f_processing","f_task_result"],"t":0.001},{"events":["l_task_result"],"t":0.005},{"events":["f_ack","l_ProduceResults","f_Finish"],"t":0.006},{"events":["l_ack","l_processing","l_Finish"],"t":0.007}],"full":{"a":1750863940357304,"name":"_full_task","f":1750863940357304,"d_finished":0,"c":0,"l":1750863940364390,"d":7086},"events":[{"name":"bootstrap","f":1750863940357397,"d_finished":1416,"c":1,"l":1750863940358813,"d":1416},{"a":1750863940363958,"name":"ack","f":1750863940363382,"d_finished":530,"c":1,"l":1750863940363912,"d":962},{"a":1750863940363952,"name":"processing","f":1750863940358837,"d_finished":3334,"c":9,"l":1750863940363913,"d":3772},{"name":"ProduceResults","f":1750863940358178,"d_finished":1458,"c":12,"l":1750863940364090,"d":1458},{"a":1750863940364092,"name":"Finish","f":1750863940364092,"d_finished":0,"c":0,"l":1750863940364390,"d":298},{"name":"task_result","f":1750863940358843,"d_finished":2706,"c":8,"l":1750863940363291,"d":2706}],"id":"9437184::6"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=6;column_names=message;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=6;column_names=message;);;program_input=(column_ids=6;column_names=message;);;;); 2025-06-25T15:05:40.364656Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:308:2320];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-06-25T15:05:40.356985Z;index_granules=0;index_portions=1;index_batches=1;schema_columns=1;filter_columns=0;additional_columns=0;compacted_portions_bytes=0;inserted_portions_bytes=8392;committed_portions_bytes=0;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=8392;selected_rows=0; 2025-06-25T15:05:40.364675Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:308:2320];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=read_context.h:192;event=scan_aborted;reason=unexpected on destructor; 2025-06-25T15:05:40.364817Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:308:2320];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=6;column_names=message;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=6;column_names=message;);;program_input=(column_ids=6;column_names=message;);;; >> Normalizers::ChunksV0MetaNormalizer [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> Normalizers::ChunksV0MetaNormalizer [GOOD] Test command err: 2025-06-25T15:05:39.414346Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:99;event=initialize_shard;step=OnActivateExecutor; 2025-06-25T15:05:39.431382Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];fline=columnshard.cpp:117;event=initialize_shard;step=initialize_tiring_finished; 2025-06-25T15:05:39.431545Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-25T15:05:39.436425Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T15:05:39.436602Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=NO_VALUE_OPTIONAL; 2025-06-25T15:05:39.436712Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T15:05:39.436824Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T15:05:39.436883Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T15:05:39.436948Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T15:05:39.437017Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T15:05:39.437083Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T15:05:39.437181Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T15:05:39.437240Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T15:05:39.437322Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T15:05:39.437385Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:127:2157];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T15:05:39.453701Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-25T15:05:39.453808Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=11;current_normalizer=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T15:05:39.453845Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=NO_VALUE_OPTIONAL;type=NO_VALUE_OPTIONAL; 2025-06-25T15:05:39.453956Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=NO_VALUE_OPTIONAL; 2025-06-25T15:05:39.454013Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Granules;id=Granules; 2025-06-25T15:05:39.454041Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=1;type=Granules; 2025-06-25T15:05:39.454179Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:05:39.454256Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-25T15:05:39.454287Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-25T15:05:39.454306Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=2;type=Chunks; 2025-06-25T15:05:39.454368Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-25T15:05:39.454418Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-25T15:05:39.454449Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-25T15:05:39.454476Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=4;type=TablesCleaner; 2025-06-25T15:05:39.454578Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:05:39.454611Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-25T15:05:39.454637Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-25T15:05:39.454651Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=6;type=CleanGranuleId; 2025-06-25T15:05:39.454708Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-25T15:05:39.454740Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-25T15:05:39.454764Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-25T15:05:39.454788Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=9;type=GCCountersNormalizer; 2025-06-25T15:05:39.454814Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-25T15:05:39.454838Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-25T15:05:39.454854Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=11;type=SyncPortionFromChunks; 2025-06-25T15:05:39.454890Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-25T15:05:39.454923Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-25T15:05:39.454940Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-25T15:05:39.455052Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-25T15:05:39.455088Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-25T15:05:39.455104Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=16;type=RestoreV2Chunks; 2025-06-25T15:05:39.455202Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-25T15:05:39.455228Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-25T15:05:39.455252Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-25T15:05:39.455283Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-25T15:05:39.455317Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-25T15:05:39.455338Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=NO_VALUE_OPTIONAL;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-25T15:05:39.455363Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-25T15:05:39.455387Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-25T15:05:39.455428Z node 1 :TX_C ... =TEvTaskProcessedResult; 2025-06-25T15:05:40.967825Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:511:2510];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=merge.cpp:75;event=DoApply;interval_idx=0; 2025-06-25T15:05:40.967856Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:511:2510];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=scanner.cpp:21;event=interval_result_received;interval_idx=0;intervalId=2; 2025-06-25T15:05:40.967903Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:511:2510];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=scanner.cpp:47;event=interval_result;interval_idx=0;count=20048;merger=0;interval_id=2; 2025-06-25T15:05:40.967935Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:511:2510];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=scanner.cpp:65;event=intervals_finished; 2025-06-25T15:05:40.967988Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:511:2510];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;;); 2025-06-25T15:05:40.968009Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:511:2510];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=1;count=20048;finished=1; 2025-06-25T15:05:40.968027Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:511:2510];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:203;stage=limit exhausted;limit=limits:(bytes=0;chunks=0);; 2025-06-25T15:05:40.968159Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:511:2510];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-25T15:05:40.968264Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:511:2510];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:1;records_count:20048;schema=key1: uint64 key2: uint64 field: string;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;;); 2025-06-25T15:05:40.968296Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:511:2510];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=0;count=0;finished=1; 2025-06-25T15:05:40.968399Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:511:2510];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:234;stage=ready result;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;;);columns=3;rows=20048; 2025-06-25T15:05:40.968469Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:511:2510];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:254;stage=data_format;batch_size=2405760;num_rows=20048;batch_columns=key1,key2,field; 2025-06-25T15:05:40.968608Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:511:2510];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:509:2509];bytes=2405760;rows=20048;faults=0;finished=0;fault=0;schema=key1: uint64 key2: uint64 field: string; 2025-06-25T15:05:40.968698Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:511:2510];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:274;stage=finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;;); 2025-06-25T15:05:40.968764Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:511:2510];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;;); 2025-06-25T15:05:40.968837Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:511:2510];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;;); 2025-06-25T15:05:40.969390Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:511:2510];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-25T15:05:40.969492Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:511:2510];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;;); 2025-06-25T15:05:40.969595Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:511:2510];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;;); 2025-06-25T15:05:40.969624Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: actor.cpp:414: Scan [1:511:2510] finished for tablet 9437184 2025-06-25T15:05:40.969907Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:511:2510];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=actor.cpp:420;event=scan_finish;compute_actor_id=[1:509:2509];stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.001},{"events":["l_bootstrap"],"t":0.002},{"events":["f_processing","f_task_result"],"t":0.003},{"events":["f_ack","l_task_result"],"t":0.192},{"events":["l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.193}],"full":{"a":1750863940775991,"name":"_full_task","f":1750863940775991,"d_finished":0,"c":0,"l":1750863940969658,"d":193667},"events":[{"name":"bootstrap","f":1750863940776161,"d_finished":2437,"c":1,"l":1750863940778598,"d":2437},{"a":1750863940969367,"name":"ack","f":1750863940968141,"d_finished":718,"c":1,"l":1750863940968859,"d":1009},{"a":1750863940969356,"name":"processing","f":1750863940779881,"d_finished":127309,"c":16,"l":1750863940968861,"d":127611},{"name":"ProduceResults","f":1750863940777568,"d_finished":2628,"c":19,"l":1750863940969612,"d":2628},{"a":1750863940969615,"name":"Finish","f":1750863940969615,"d_finished":0,"c":0,"l":1750863940969658,"d":43},{"name":"task_result","f":1750863940779903,"d_finished":126398,"c":15,"l":1750863940968066,"d":126398}],"id":"9437184::2"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;;); 2025-06-25T15:05:40.969959Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:511:2510];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=actor.cpp:370;event=send_data;compute_actor_id=[1:509:2509];bytes=0;rows=0;faults=0;finished=1;fault=0;schema=; 2025-06-25T15:05:40.970318Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:511:2510];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=actor.cpp:375;event=scan_finished;compute_actor_id=[1:509:2509];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.001},{"events":["l_bootstrap"],"t":0.002},{"events":["f_processing","f_task_result"],"t":0.003},{"events":["f_ack","l_task_result"],"t":0.192},{"events":["l_ProduceResults","f_Finish"],"t":0.193},{"events":["l_ack","l_processing","l_Finish"],"t":0.194}],"full":{"a":1750863940775991,"name":"_full_task","f":1750863940775991,"d_finished":0,"c":0,"l":1750863940970013,"d":194022},"events":[{"name":"bootstrap","f":1750863940776161,"d_finished":2437,"c":1,"l":1750863940778598,"d":2437},{"a":1750863940969367,"name":"ack","f":1750863940968141,"d_finished":718,"c":1,"l":1750863940968859,"d":1364},{"a":1750863940969356,"name":"processing","f":1750863940779881,"d_finished":127309,"c":16,"l":1750863940968861,"d":127966},{"name":"ProduceResults","f":1750863940777568,"d_finished":2628,"c":19,"l":1750863940969612,"d":2628},{"a":1750863940969615,"name":"Finish","f":1750863940969615,"d_finished":0,"c":0,"l":1750863940970013,"d":398},{"name":"task_result","f":1750863940779903,"d_finished":126398,"c":15,"l":1750863940968066,"d":126398}],"id":"9437184::2"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;;); 2025-06-25T15:05:40.970383Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:511:2510];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-06-25T15:05:40.775523Z;index_granules=0;index_portions=2;index_batches=748;schema_columns=3;filter_columns=0;additional_columns=0;compacted_portions_bytes=2776560;inserted_portions_bytes=0;committed_portions_bytes=2488696;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=5265256;selected_rows=0; 2025-06-25T15:05:40.970411Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:511:2510];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=read_context.h:192;event=scan_aborted;reason=unexpected on destructor; 2025-06-25T15:05:40.970595Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:511:2510];TabletId=9437184;ScanId=0;TxId=111;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2;column_names=key1,key2;);;ff=(column_ids=1,2,3;column_names=field,key1,key2;);;program_input=(column_ids=1,2,3;column_names=field,key1,key2;);;; >> TColumnShardTestReadWrite::Write >> TColumnShardTestReadWrite::CompactionSplitGranule_PKInt32 [GOOD] >> TColumnShardTestReadWrite::CompactionSplitGranuleStrKey_PKUtf8 >> TColumnShardTestReadWrite::WriteStandaloneExoticTypes [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::WriteStandaloneExoticTypes [GOOD] Test command err: 2025-06-25T15:05:39.694111Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:99;event=initialize_shard;step=OnActivateExecutor; 2025-06-25T15:05:39.714334Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:117;event=initialize_shard;step=initialize_tiring_finished; 2025-06-25T15:05:39.714540Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-25T15:05:39.719750Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T15:05:39.719884Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T15:05:39.720026Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T15:05:39.720085Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T15:05:39.720148Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T15:05:39.720208Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T15:05:39.720317Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T15:05:39.720386Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T15:05:39.720448Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T15:05:39.720512Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T15:05:39.720568Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T15:05:39.735604Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-25T15:05:39.735739Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-25T15:05:39.735772Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-25T15:05:39.735898Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:05:39.736028Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-25T15:05:39.736088Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-25T15:05:39.736122Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-25T15:05:39.736183Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-25T15:05:39.736223Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-25T15:05:39.736247Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-25T15:05:39.736275Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-25T15:05:39.736422Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:05:39.736483Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-25T15:05:39.736519Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-25T15:05:39.736536Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-25T15:05:39.736602Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-25T15:05:39.736649Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-25T15:05:39.736683Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-25T15:05:39.736705Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-25T15:05:39.736760Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-25T15:05:39.736790Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-25T15:05:39.736820Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-25T15:05:39.736967Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-25T15:05:39.736994Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-25T15:05:39.737021Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-25T15:05:39.737180Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-25T15:05:39.737241Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-25T15:05:39.737279Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-25T15:05:39.737399Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-25T15:05:39.737438Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-25T15:05:39.737459Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-25T15:05:39.737503Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-25T15:05:39.737564Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-25T15:05:39.737598Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-25T15:05:39.737625Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-25T15:05:39.737787Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=28; 2025-06-25T15:05:39.737846Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=31; 2025-06-25T15:05:39.737903Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=28; 2025-06-25T15:05:39.737952Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=23; 2025-06-25T15:05:39.738005Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-25T15:05:39.738053Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-25T15:05:39.738115Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-25T15:05:39.738157Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: table ... [{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"19,19,19,19,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"20,20,20,20,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"21,21,21,21,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"22,22,22,22,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"23,23,23,23,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"24,24,24,24,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"25,25,25,25,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"26,26,26,26,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"27,27,27,27,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"28,28,28,28,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"29,29,29,29,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"30,30,30,30,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"31,31,31,31,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"32,32,32,32,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"33,33,33,33,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"34,34,34,34,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"35,35,35,35,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"36,36,36,36,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"37,37,37,37,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"38,38,38,38,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"39,39,39,39,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"40,40,40,40,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"41,41,41,41,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"42,42,42,42,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"43,43,43,43,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"44,44,44,44,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"45,45,45,45,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"46,46,46,46,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"47,47,47,47,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"48,48,48,48,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"49,49,49,49,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"50,50,50,50,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"51,51,51,51,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"52,52,52,52,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"53,53,53,53,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"54,54,54,54,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"55,55,55,55,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"56,56,56,56,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"57,57,57,57,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"58,58,58,58,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"59,59,59,59,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"60,60,60,60,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"61,61,61,61,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"62,62,62,62,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"63,63,63,63,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"64,64,64,64,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"65,65,65,65,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"66,66,66,66,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"67,67,67,67,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"68,68,68,68,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"69,69,69,69,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"70,70,70,70,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"71,71,71,71,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"72,72,72,72,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"73,73,73,73,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"74,74,74,74,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"75,75,75,75,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"76,76,76,76,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"77,77,77,77,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"78,78,78,78,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"79,79,79,79,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"80,80,80,80,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"81,81,81,81,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"82,82,82,82,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"83,83,83,83,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"84,84,84,84,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"85,85,85,85,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"86,86,86,86,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"87,87,87,87,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"88,88,88,88,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"89,89,89,89,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"90,90,90,90,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"91,91,91,91,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"92,92,92,92,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"93,93,93,93,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"94,94,94,94,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"95,95,95,95,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"96,96,96,96,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"97,97,97,97,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"98,98,98,98,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"99,99,99,99,"}}]}; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::CompactionSplitGranule_PKInt32 [GOOD] Test command err: 2025-06-25T15:04:39.912391Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:99;event=initialize_shard;step=OnActivateExecutor; 2025-06-25T15:04:39.931164Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:117;event=initialize_shard;step=initialize_tiring_finished; 2025-06-25T15:04:39.931329Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-25T15:04:39.936398Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T15:04:39.936575Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T15:04:39.936730Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T15:04:39.936798Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T15:04:39.936855Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T15:04:39.936925Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T15:04:39.937010Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T15:04:39.937081Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T15:04:39.937149Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T15:04:39.937203Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T15:04:39.937278Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T15:04:39.955379Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-25T15:04:39.955563Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-25T15:04:39.955623Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-25T15:04:39.955811Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:04:39.956002Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-25T15:04:39.956091Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-25T15:04:39.956141Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-25T15:04:39.956222Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-25T15:04:39.956280Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-25T15:04:39.956333Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-25T15:04:39.956379Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-25T15:04:39.956555Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:04:39.956625Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-25T15:04:39.956662Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-25T15:04:39.956707Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-25T15:04:39.956790Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-25T15:04:39.956840Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-25T15:04:39.956874Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-25T15:04:39.956898Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-25T15:04:39.956926Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-25T15:04:39.956950Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-25T15:04:39.956983Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-25T15:04:39.957124Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-25T15:04:39.957151Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-25T15:04:39.957167Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-25T15:04:39.957289Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-25T15:04:39.957323Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-25T15:04:39.957339Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-25T15:04:39.957438Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-25T15:04:39.957466Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-25T15:04:39.957492Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-25T15:04:39.957536Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-25T15:04:39.957586Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-25T15:04:39.957616Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-25T15:04:39.957637Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-25T15:04:39.957762Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=31; 2025-06-25T15:04:39.957829Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=39; 2025-06-25T15:04:39.957892Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=32; 2025-06-25T15:04:39.957941Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=25; 2025-06-25T15:04:39.958003Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-25T15:04:39.958046Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-25T15:04:39.958072Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-25T15:04:39.958107Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: table ... anule;load_stage_name=EXECUTE:granule/portions;fline=constructor_meta.cpp:71;memory_size=2054;data_size=2030;sum=506740;count=432;size_of_meta=136; 2025-06-25T15:05:42.337348Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;load_stage_name=EXECUTE:granule/portions;fline=constructor_portion.cpp:40;memory_size=2126;data_size=2102;sum=522292;count=216;size_of_portion=208; 2025-06-25T15:05:42.337471Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;EXECUTE:portionsLoadingTime=14544; 2025-06-25T15:05:42.337533Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;PRECHARGE:granule_finished_commonLoadingTime=11; 2025-06-25T15:05:42.338155Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;EXECUTE:granule_finished_commonLoadingTime=586; 2025-06-25T15:05:42.338189Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;fline=common_data.cpp:29;EXECUTE:granuleLoadingTime=15387; 2025-06-25T15:05:42.338225Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:granulesLoadingTime=15486; 2025-06-25T15:05:42.338265Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;PRECHARGE:finishLoadingTime=8; 2025-06-25T15:05:42.338421Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:finishLoadingTime=127; 2025-06-25T15:05:42.338447Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:column_enginesLoadingTime=16018; 2025-06-25T15:05:42.338584Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tx_controllerLoadingTime=76; 2025-06-25T15:05:42.338673Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tx_controllerLoadingTime=56; 2025-06-25T15:05:42.338797Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:operations_managerLoadingTime=93; 2025-06-25T15:05:42.338901Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:operations_managerLoadingTime=68; 2025-06-25T15:05:42.341861Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:storages_managerLoadingTime=2921; 2025-06-25T15:05:42.344775Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:storages_managerLoadingTime=2850; 2025-06-25T15:05:42.344838Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:db_locksLoadingTime=7; 2025-06-25T15:05:42.344879Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:db_locksLoadingTime=5; 2025-06-25T15:05:42.344908Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:bg_sessionsLoadingTime=4; 2025-06-25T15:05:42.344957Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:bg_sessionsLoadingTime=27; 2025-06-25T15:05:42.344994Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:sharing_sessionsLoadingTime=4; 2025-06-25T15:05:42.345060Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:sharing_sessionsLoadingTime=43; 2025-06-25T15:05:42.345087Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:in_flight_readsLoadingTime=4; 2025-06-25T15:05:42.345132Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:in_flight_readsLoadingTime=24; 2025-06-25T15:05:42.345185Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tiers_managerLoadingTime=28; 2025-06-25T15:05:42.345269Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tiers_managerLoadingTime=50; 2025-06-25T15:05:42.345305Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;fline=common_data.cpp:29;EXECUTE:composite_initLoadingTime=27482; 2025-06-25T15:05:42.345445Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Index: tables 1 inserted {blob_bytes=0;raw_bytes=0;count=0;records=0} compacted {blob_bytes=0;raw_bytes=0;count=0;records=0} s-compacted {blob_bytes=108238352;raw_bytes=183045560;count=15;records=1915000} inactive {blob_bytes=205426288;raw_bytes=316809958;count=39;records=3635000} evicted {blob_bytes=0;raw_bytes=0;count=0;records=0} at tablet 9437184 2025-06-25T15:05:42.345523Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:4074:6043];process=SwitchToWork;fline=columnshard.cpp:74;event=initialize_shard;step=SwitchToWork; 2025-06-25T15:05:42.345561Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:4074:6043];process=SwitchToWork;fline=columnshard.cpp:77;event=initialize_shard;step=SignalTabletActive; 2025-06-25T15:05:42.345615Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4074:6043];process=SwitchToWork;fline=columnshard_impl.cpp:1331;event=OnTieringModified;path_id=NO_VALUE_OPTIONAL; 2025-06-25T15:05:42.345656Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4074:6043];process=SwitchToWork;fline=column_engine_logs.cpp:471;event=OnTieringModified;new_count_tierings=0; 2025-06-25T15:05:42.345743Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:442;event=EnqueueBackgroundActivities;periodic=0; 2025-06-25T15:05:42.345806Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=23; 2025-06-25T15:05:42.345876Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750863584019;tx_id=18446744073709551615;;current_snapshot_ts=1750863880950; 2025-06-25T15:05:42.345914Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=23;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-25T15:05:42.345947Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:791;background=cleanup;skip_reason=no_changes; 2025-06-25T15:05:42.345977Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:820;background=cleanup;skip_reason=no_changes; 2025-06-25T15:05:42.346060Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:749;background=ttl;skip_reason=no_changes; 2025-06-25T15:05:42.349210Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4074:6043];ev=NActors::TEvents::TEvWakeup;fline=columnshard.cpp:250;event=TEvPrivate::TEvPeriodicWakeup::MANUAL;tablet_id=9437184; 2025-06-25T15:05:42.349482Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4074:6043];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;fline=columnshard.cpp:239;event=TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184; 2025-06-25T15:05:42.349513Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Send periodic stats. 2025-06-25T15:05:42.349554Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Disabled periodic stats at tablet 9437184 2025-06-25T15:05:42.349606Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4074:6043];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:442;event=EnqueueBackgroundActivities;periodic=0; 2025-06-25T15:05:42.349671Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4074:6043];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=23; 2025-06-25T15:05:42.349728Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4074:6043];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750863584019;tx_id=18446744073709551615;;current_snapshot_ts=1750863880950; 2025-06-25T15:05:42.349765Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4074:6043];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=23;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-25T15:05:42.349807Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4074:6043];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:791;background=cleanup;skip_reason=no_changes; 2025-06-25T15:05:42.349838Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4074:6043];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:820;background=cleanup;skip_reason=no_changes; 2025-06-25T15:05:42.349904Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4074:6043];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;queue=ttl;external_count=0;fline=granule.cpp:168;event=skip_actualization;waiting=0.999000s; 2025-06-25T15:05:42.349951Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4074:6043];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:749;background=ttl;skip_reason=no_changes; >> TColumnShardTestReadWrite::CompactionInGranule_PKUtf8 [GOOD] |93.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_serverless/unittest >> TSchemeShardServerLess::BaseCase-AlterDatabaseCreateHiveFirst-false >> TSchemeShardServerLess::Fake [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::CompactionInGranule_PKUtf8 [GOOD] Test command err: 2025-06-25T15:04:55.392862Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:99;event=initialize_shard;step=OnActivateExecutor; 2025-06-25T15:04:55.409767Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:117;event=initialize_shard;step=initialize_tiring_finished; 2025-06-25T15:04:55.409944Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-25T15:04:55.415118Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T15:04:55.415244Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T15:04:55.415397Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T15:04:55.415521Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T15:04:55.415587Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T15:04:55.415639Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T15:04:55.415721Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T15:04:55.415779Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T15:04:55.415844Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T15:04:55.415928Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T15:04:55.415991Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T15:04:55.432099Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-25T15:04:55.432217Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-25T15:04:55.432252Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-25T15:04:55.432395Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:04:55.432530Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-25T15:04:55.432585Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-25T15:04:55.432624Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-25T15:04:55.432678Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-25T15:04:55.432720Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-25T15:04:55.432743Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-25T15:04:55.432767Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-25T15:04:55.432885Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:04:55.432920Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-25T15:04:55.432951Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-25T15:04:55.432988Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-25T15:04:55.433042Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-25T15:04:55.433073Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-25T15:04:55.433095Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-25T15:04:55.433116Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-25T15:04:55.433141Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-25T15:04:55.433163Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-25T15:04:55.433184Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-25T15:04:55.433316Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-25T15:04:55.433338Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-25T15:04:55.433362Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-25T15:04:55.433470Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-25T15:04:55.433506Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-25T15:04:55.433526Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-25T15:04:55.433604Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-25T15:04:55.433625Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-25T15:04:55.433645Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-25T15:04:55.433682Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-25T15:04:55.433728Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-25T15:04:55.433748Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-25T15:04:55.433763Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-25T15:04:55.433890Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=32; 2025-06-25T15:04:55.433949Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=28; 2025-06-25T15:04:55.434015Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=31; 2025-06-25T15:04:55.434092Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=43; 2025-06-25T15:04:55.434158Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-25T15:04:55.434223Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-25T15:04:55.434258Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-25T15:04:55.434299Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: table ... mn_id:9;chunk_idx:46;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:47;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:48;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:49;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:50;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:51;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:52;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:53;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:54;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:55;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:56;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:57;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:58;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:59;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:60;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:61;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:62;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:63;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:64;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:65;blob_range:[NO_BLOB:0:9240];;column_id:10;chunk_idx:0;blob_range:[NO_BLOB:0:9288];;column_id:10;chunk_idx:1;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:2;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:3;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:4;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:5;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:6;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:7;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:8;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:9;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:10;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:11;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:12;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:13;blob_range:[NO_BLOB:0:9304];;column_id:10;chunk_idx:14;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:15;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:16;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:17;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:18;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:19;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:20;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:21;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:22;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:23;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:24;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:25;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:26;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:27;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:28;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:29;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:30;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:31;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:32;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:33;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:34;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:35;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:36;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:37;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:38;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:39;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:40;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:41;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:42;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:43;blob_range:[NO_BLOB:0:9296];;column_id:10;chunk_idx:44;blob_range:[NO_BLOB:0:9312];;column_id:10;chunk_idx:45;blob_range:[NO_BLOB:0:9312];;column_id:10;chunk_idx:46;blob_range:[NO_BLOB:0:9312];;column_id:10;chunk_idx:47;blob_range:[NO_BLOB:0:9312];;column_id:10;chunk_idx:48;blob_range:[NO_BLOB:0:9312];;column_id:10;chunk_idx:49;blob_range:[NO_BLOB:0:9312];;column_id:10;chunk_idx:50;blob_range:[NO_BLOB:0:9312];;column_id:10;chunk_idx:51;blob_range:[NO_BLOB:0:9312];;column_id:10;chunk_idx:52;blob_range:[NO_BLOB:0:9312];;column_id:10;chunk_idx:53;blob_range:[NO_BLOB:0:9312];;column_id:10;chunk_idx:54;blob_range:[NO_BLOB:0:9304];;column_id:10;chunk_idx:55;blob_range:[NO_BLOB:0:9312];;column_id:10;chunk_idx:56;blob_range:[NO_BLOB:0:9312];;column_id:10;chunk_idx:57;blob_range:[NO_BLOB:0:9312];;column_id:10;chunk_idx:58;blob_range:[NO_BLOB:0:9312];;column_id:10;chunk_idx:59;blob_range:[NO_BLOB:0:9312];;column_id:10;chunk_idx:60;blob_range:[NO_BLOB:0:9312];;column_id:10;chunk_idx:61;blob_range:[NO_BLOB:0:9304];;column_id:10;chunk_idx:62;blob_range:[NO_BLOB:0:9312];;column_id:10;chunk_idx:63;blob_range:[NO_BLOB:0:9312];;column_id:10;chunk_idx:64;blob_range:[NO_BLOB:0:9304];;column_id:10;chunk_idx:65;blob_range:[NO_BLOB:0:9304];;column_id:10;chunk_idx:66;blob_range:[NO_BLOB:0:9312];;column_id:10;chunk_idx:67;blob_range:[NO_BLOB:0:9312];;column_id:10;chunk_idx:68;blob_range:[NO_BLOB:0:9312];;column_id:10;chunk_idx:69;blob_range:[NO_BLOB:0:9304];;column_id:10;chunk_idx:70;blob_range:[NO_BLOB:0:8592];;column_id:10;chunk_idx:71;blob_range:[NO_BLOB:0:8280];;column_id:10;chunk_idx:72;blob_range:[NO_BLOB:0:8288];;;;switched=(portion_id:220;path_id:9438184000001;records_count:75000;schema_version:1;level:1;;column_size:7574640;index_size:0;meta:(()););(portion_id:218;path_id:9438184000001;records_count:75000;schema_version:1;level:2;;column_size:7570008;index_size:0;meta:(()););(portion_id:221;path_id:9438184000001;records_count:75000;schema_version:1;level:1;;column_size:7574640;index_size:0;meta:(()););; 2025-06-25T15:05:43.053864Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: event_type=NKikimr::NBlobCache::TEvBlobCache::TEvReadBlobRangeResult;event=on_execution;consumer=GENERAL_COMPACTION;task_id=db9eaf58-51d511f0-aba1b1c9-85182984;script=FULL_PORTIONS_FETCHING::GENERAL_COMPACTION;event=on_finished;consumer=GENERAL_COMPACTION;task_id=db9eaf58-51d511f0-aba1b1c9-85182984;script=FULL_PORTIONS_FETCHING::GENERAL_COMPACTION;tablet_id=9437184;parent_id=[1:6028:8014];task_id=db9eaf58-51d511f0-aba1b1c9-85182984;task_class=CS::GENERAL;fline=general_compaction.cpp:140;event=blobs_created;appended=1;switched=3; 2025-06-25T15:05:43.056033Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:6028:8014];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=columnshard__write_index.cpp:52;event=TEvWriteIndex;count=1; 2025-06-25T15:05:43.060142Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:6028:8014];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=columnshard__write_index.cpp:59;event=TTxWriteDraft; 2025-06-25T15:05:43.144949Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: fline=tx_draft.cpp:16;event=draft_completed; 2025-06-25T15:05:43.145038Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: fline=write_actor.cpp:24;event=actor_created;tablet_id=9437184;debug=size=7570008;count=819;actions=__MEMORY,__DEFAULT,;waiting=2;; 2025-06-25T15:05:43.558885Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: WriteIndex at tablet 9437184 2025-06-25T15:05:43.558991Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:6028:8014];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=common_level.h:121;from=0,0,0,0,;to=9999,9999,9999,9999,; 2025-06-25T15:05:43.559029Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:6028:8014];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=common_level.h:141;itFrom=1;itTo=1;raw=7433340;count=1;packed=7574640; 2025-06-25T15:05:43.559092Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:6028:8014];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=constructor_meta.cpp:51;memory_size=86;data_size=62;sum=95362;count=1749; 2025-06-25T15:05:43.559146Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:6028:8014];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=constructor_meta.cpp:71;memory_size=26414;data_size=26406;sum=2613226;count=1750;size_of_meta=136; 2025-06-25T15:05:43.559192Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:6028:8014];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=constructor_portion.cpp:40;memory_size=26486;data_size=26478;sum=2676226;count=875;size_of_portion=208; 2025-06-25T15:05:43.559607Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxWriteIndex[2] (CS::GENERAL) apply at tablet 9437184 2025-06-25T15:05:43.627304Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager on execute at tablet 9437184 Save Batch GenStep: 4:1 Blob count: 673 2025-06-25T15:05:43.630330Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Index: tables 1 inserted {blob_bytes=0;raw_bytes=0;count=0;records=0} compacted {blob_bytes=0;raw_bytes=0;count=0;records=0} s-compacted {blob_bytes=37893352;raw_bytes=37186700;count=5;records=375200} inactive {blob_bytes=111591656;raw_bytes=108150240;count=216;records=1200200} evicted {blob_bytes=0;raw_bytes=0;count=0;records=0} at tablet 9437184 2025-06-25T15:05:43.919499Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=db9eaf58-51d511f0-aba1b1c9-85182984;fline=abstract.cpp:53;event=WriteIndexComplete;type=CS::GENERAL;success=1; 2025-06-25T15:05:43.919550Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=db9eaf58-51d511f0-aba1b1c9-85182984;fline=with_appended.cpp:65;portions=222,;task_id=db9eaf58-51d511f0-aba1b1c9-85182984; 2025-06-25T15:05:43.920090Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=db9eaf58-51d511f0-aba1b1c9-85182984;fline=manager.cpp:15;event=unlock;process_id=CS::GENERAL::db9eaf58-51d511f0-aba1b1c9-85182984; 2025-06-25T15:05:43.920150Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=db9eaf58-51d511f0-aba1b1c9-85182984;fline=granule.cpp:97;event=OnCompactionFinished;info=(granule:9438184000001;path_id:9438184000001;size:22744072;portions_count:222;); 2025-06-25T15:05:43.920187Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=db9eaf58-51d511f0-aba1b1c9-85182984;tablet_id=9437184;fline=columnshard_impl.cpp:442;event=EnqueueBackgroundActivities;periodic=0; 2025-06-25T15:05:43.920239Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=db9eaf58-51d511f0-aba1b1c9-85182984;tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=9; 2025-06-25T15:05:43.920284Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=db9eaf58-51d511f0-aba1b1c9-85182984;tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750863598927;tx_id=18446744073709551615;;current_snapshot_ts=1750863896988; 2025-06-25T15:05:43.920327Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=db9eaf58-51d511f0-aba1b1c9-85182984;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=9;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-25T15:05:43.920366Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=db9eaf58-51d511f0-aba1b1c9-85182984;tablet_id=9437184;fline=columnshard_impl.cpp:791;background=cleanup;skip_reason=no_changes; 2025-06-25T15:05:43.920396Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=db9eaf58-51d511f0-aba1b1c9-85182984;tablet_id=9437184;fline=columnshard_impl.cpp:820;background=cleanup;skip_reason=no_changes; 2025-06-25T15:05:43.920445Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=db9eaf58-51d511f0-aba1b1c9-85182984;tablet_id=9437184;queue=ttl;external_count=0;fline=granule.cpp:168;event=skip_actualization;waiting=0.886000s; 2025-06-25T15:05:43.920478Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=db9eaf58-51d511f0-aba1b1c9-85182984;tablet_id=9437184;fline=columnshard_impl.cpp:749;background=ttl;skip_reason=no_changes; 2025-06-25T15:05:43.920579Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Save Batch GenStep: 4:1 Blob count: 673 >> TSchemeShardServerLess::TestServerlessComputeResourcesMode |93.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_serverless/unittest >> TSchemeShardServerLess::Fake [GOOD] >> TColumnShardTestReadWrite::Write [GOOD] >> AnalyzeDatashard::DropTableNavigateError [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::Write [GOOD] Test command err: 2025-06-25T15:05:42.042497Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:99;event=initialize_shard;step=OnActivateExecutor; 2025-06-25T15:05:42.069367Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:117;event=initialize_shard;step=initialize_tiring_finished; 2025-06-25T15:05:42.069608Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-25T15:05:42.076536Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T15:05:42.076749Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T15:05:42.076971Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T15:05:42.077076Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T15:05:42.077189Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T15:05:42.077315Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T15:05:42.077417Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T15:05:42.077555Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T15:05:42.077662Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T15:05:42.077754Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T15:05:42.077871Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T15:05:42.102458Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-25T15:05:42.102594Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-25T15:05:42.102636Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-25T15:05:42.102830Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:05:42.103015Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-25T15:05:42.103096Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-25T15:05:42.103133Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-25T15:05:42.103206Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-25T15:05:42.103257Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-25T15:05:42.103293Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-25T15:05:42.103343Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-25T15:05:42.103505Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:05:42.103562Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-25T15:05:42.103630Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-25T15:05:42.103659Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-25T15:05:42.103755Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-25T15:05:42.103824Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-25T15:05:42.103865Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-25T15:05:42.103890Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-25T15:05:42.103933Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-25T15:05:42.103965Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-25T15:05:42.103994Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-25T15:05:42.104198Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-25T15:05:42.104241Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-25T15:05:42.104269Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-25T15:05:42.104466Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-25T15:05:42.104525Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-25T15:05:42.104558Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-25T15:05:42.104708Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-25T15:05:42.104771Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-25T15:05:42.104801Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-25T15:05:42.104882Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-25T15:05:42.104962Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-25T15:05:42.105000Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-25T15:05:42.105030Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-25T15:05:42.105202Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=38; 2025-06-25T15:05:42.105289Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=44; 2025-06-25T15:05:42.105359Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=36; 2025-06-25T15:05:42.105442Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=41; 2025-06-25T15:05:42.105532Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-25T15:05:42.105611Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-25T15:05:42.105645Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-25T15:05:42.105704Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: table ... [{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"19,19,19,19,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"20,20,20,20,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"21,21,21,21,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"22,22,22,22,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"23,23,23,23,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"24,24,24,24,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"25,25,25,25,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"26,26,26,26,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"27,27,27,27,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"28,28,28,28,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"29,29,29,29,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"30,30,30,30,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"31,31,31,31,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"32,32,32,32,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"33,33,33,33,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"34,34,34,34,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"35,35,35,35,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"36,36,36,36,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"37,37,37,37,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"38,38,38,38,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"39,39,39,39,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"40,40,40,40,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"41,41,41,41,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"42,42,42,42,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"43,43,43,43,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"44,44,44,44,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"45,45,45,45,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"46,46,46,46,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"47,47,47,47,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"48,48,48,48,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"49,49,49,49,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"50,50,50,50,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"51,51,51,51,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"52,52,52,52,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"53,53,53,53,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"54,54,54,54,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"55,55,55,55,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"56,56,56,56,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"57,57,57,57,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"58,58,58,58,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"59,59,59,59,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"60,60,60,60,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"61,61,61,61,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"62,62,62,62,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"63,63,63,63,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"64,64,64,64,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"65,65,65,65,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"66,66,66,66,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"67,67,67,67,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"68,68,68,68,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"69,69,69,69,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"70,70,70,70,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"71,71,71,71,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"72,72,72,72,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"73,73,73,73,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"74,74,74,74,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"75,75,75,75,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"76,76,76,76,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"77,77,77,77,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"78,78,78,78,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"79,79,79,79,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"80,80,80,80,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"81,81,81,81,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"82,82,82,82,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"83,83,83,83,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"84,84,84,84,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"85,85,85,85,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"86,86,86,86,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"87,87,87,87,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"88,88,88,88,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"89,89,89,89,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"90,90,90,90,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"91,91,91,91,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"92,92,92,92,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"93,93,93,93,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"94,94,94,94,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"95,95,95,95,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"96,96,96,96,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"97,97,97,97,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"98,98,98,98,"}},{"i":{"txs":[],"starts":[{"inc":{"count_include":1},"id":1}],"finishes":[{"inc":{"count_include":1},"id":1}]},"p":{"include":0,"pk":"99,99,99,99,"}}]}; >> TSchemeShardServerLess::TestServerlessComputeResourcesModeValidation >> TSchemeShardServerLess::TestServerlessComputeResourcesMode [GOOD] >> TSchemeShardServerLess::StorageBillingLabels ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest >> AnalyzeDatashard::DropTableNavigateError [GOOD] Test command err: 2025-06-25T15:03:40.765655Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:419:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T15:03:40.766129Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T15:03:40.766198Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001f9c/r3tmp/tmpH01nVT/pdisk_1.dat 2025-06-25T15:03:41.138155Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 12573, node 1 2025-06-25T15:03:41.347246Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:03:41.347305Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:03:41.347335Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:03:41.347713Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T15:03:41.356071Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:03:41.448231Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:03:41.448360Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:03:41.463470Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:16557 2025-06-25T15:03:42.025439Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-25T15:03:45.150799Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-25T15:03:45.185425Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:03:45.185553Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:03:45.224145Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T15:03:45.226584Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:03:45.420503Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:03:45.445906Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:45.446478Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:45.446984Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:45.447144Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:45.447256Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:45.447489Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:45.447592Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:45.447669Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:45.447730Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:45.613227Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:03:45.613371Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:03:45.636942Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:03:45.803223Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:03:45.863435Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-25T15:03:45.863527Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-25T15:03:45.902024Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-25T15:03:45.902161Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-25T15:03:45.902441Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-25T15:03:45.902510Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-25T15:03:45.902562Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-25T15:03:45.902614Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-25T15:03:45.902673Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-25T15:03:45.902724Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-25T15:03:45.903145Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-25T15:03:45.924973Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7949: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-25T15:03:45.925115Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7979: ConnectToSA(), pipe client id: [2:1789:2560], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-25T15:03:45.930813Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1801:2569] 2025-06-25T15:03:45.935431Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1820:2582] 2025-06-25T15:03:45.937067Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1820:2582], schemeshard id = 72075186224037897 2025-06-25T15:03:45.944031Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-25T15:03:45.967122Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-25T15:03:45.967171Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-25T15:03:45.967223Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-25T15:03:45.978265Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:03:45.983991Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-25T15:03:45.984102Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-25T15:03:46.176372Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-25T15:03:46.348347Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-25T15:03:46.425813Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-25T15:03:47.098697Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:03:47.350999Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2142:3020], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:03:47.351167Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:03:47.368717Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:03:47.815202Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2445:3071], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:03:47.815379Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:03:47.817108Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [1:2450:3075]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T15:03:47.817226Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-25T15:03:47.817296Z node 1 :STATISTICS DEBUG: service_impl.cpp:1219: ConnectToSA(), pipe client id = [1:2452:3077] 2025-06-25T15:03:47.817344Z no ... 25T15:05:16.873088Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-06-25T15:05:19.349569Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:285: [72075186224037894] EvPropagateTimeout 2025-06-25T15:05:21.446818Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 1, schemeshard count = 1 2025-06-25T15:05:21.447210Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-06-25T15:05:24.426103Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:285: [72075186224037894] EvPropagateTimeout 2025-06-25T15:05:26.643624Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 1, schemeshard count = 1 2025-06-25T15:05:26.643906Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-06-25T15:05:29.670335Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:285: [72075186224037894] EvPropagateTimeout 2025-06-25T15:05:31.801927Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 1, schemeshard count = 1 2025-06-25T15:05:31.802219Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-06-25T15:05:34.921629Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:285: [72075186224037894] EvPropagateTimeout 2025-06-25T15:05:36.945706Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 1, schemeshard count = 1 2025-06-25T15:05:36.946005Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-06-25T15:05:39.989749Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:285: [72075186224037894] EvPropagateTimeout 2025-06-25T15:05:41.097776Z node 1 :STATISTICS DEBUG: schemeshard_impl.cpp:7949: ResolveSA(), StatisticsAggregatorId=18446744073709551615, at schemeshard: 72057594046644480 2025-06-25T15:05:41.097851Z node 1 :STATISTICS DEBUG: schemeshard_impl.cpp:7961: ConnectToSA(), no StatisticsAggregatorId, at schemeshard: 72057594046644480 2025-06-25T15:05:41.097885Z node 1 :STATISTICS DEBUG: schemeshard_impl.cpp:7992: SendBaseStatsToSA(), no StatisticsAggregatorId, at schemeshard: 72057594046644480 2025-06-25T15:05:41.097917Z node 1 :STATISTICS DEBUG: schemeshard_impl.cpp:7919: Schedule next SendBaseStatsToSA in 30.000000s, at schemeshard: 72057594046644480 2025-06-25T15:05:42.390215Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 1, schemeshard count = 1 2025-06-25T15:05:42.390465Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-06-25T15:05:42.432725Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:8076: SendBaseStatsToSA(), path count: 1, at schemeshard: 72075186224037897 2025-06-25T15:05:42.432785Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7919: Schedule next SendBaseStatsToSA in 184.000000s, at schemeshard: 72075186224037897 2025-06-25T15:05:42.433021Z node 2 :STATISTICS DEBUG: tx_schemeshard_stats.cpp:21: [72075186224037894] TTxSchemeShardStats::Execute: schemeshard id# 72075186224037897, stats size# 25 2025-06-25T15:05:42.445320Z node 2 :STATISTICS DEBUG: tx_schemeshard_stats.cpp:132: [72075186224037894] TTxSchemeShardStats::Complete 2025-06-25T15:05:43.460399Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-25T15:05:43.460488Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:680: [72075186224037894] ScheduleNextTraversal. All the force traversal tables sent the requests. OperationId=operationId 2025-06-25T15:05:43.460533Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:689: [72075186224037894] ScheduleNextTraversal. All the force traversal operations sent the requests. 2025-06-25T15:05:43.460587Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 3] is data table. 2025-06-25T15:05:43.460630Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:723: [72075186224037894] ScheduleNextTraversal. Skip traversal for datashard table [OwnerId: 72075186224037897, LocalPathId: 3] 2025-06-25T15:05:43.461009Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-25T15:05:43.464597Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DELETE FROM `.metadata/_statistics` WHERE owner_id = $owner_id AND local_path_id = $local_path_id; 2025-06-25T15:05:43.467818Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:6618:4679], DatabaseId: /Root/Database, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:05:43.467925Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:6628:4684], DatabaseId: /Root/Database, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:05:43.468078Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root/Database, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:05:43.479480Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976720658:2, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:05:43.540630Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:6632:4687], DatabaseId: /Root/Database, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976720658 completed, doublechecking } 2025-06-25T15:05:43.724336Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:6728:4733] txid# 281474976720659, issues: { message: "Check failed: path: \'/Root/Database/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72075186224037897, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:05:43.769137Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [2:6757:4748]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T15:05:43.769366Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-25T15:05:43.769460Z node 2 :STATISTICS DEBUG: service_impl.cpp:1219: ConnectToSA(), pipe client id = [2:6759:4750] 2025-06-25T15:05:43.769515Z node 2 :STATISTICS DEBUG: service_impl.cpp:1248: SyncNode(), pipe client id = [2:6759:4750] 2025-06-25T15:05:43.769861Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:6760:4751] 2025-06-25T15:05:43.769983Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:6759:4750], server id = [2:6760:4751], tablet id = 72075186224037894, status = OK 2025-06-25T15:05:43.770047Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:133: [72075186224037894] EvConnectNode, pipe server id = [2:6760:4751], node id = 2, have schemeshards count = 0, need schemeshards count = 1 2025-06-25T15:05:43.770092Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:314: [72075186224037894] SendStatisticsToNode(), node id = 2, schemeshard count = 1 2025-06-25T15:05:43.770233Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-25T15:05:43.770306Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 1, ReplyToActorId = [2:6757:4748], StatRequests.size() = 1 2025-06-25T15:05:43.860607Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=YTM5MGM3OGEtYWJmYjA4YjYtNDJjZGM1ZDItN2FjZjVjYzA=, TxId: 2025-06-25T15:05:43.860659Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=YTM5MGM3OGEtYWJmYjA4YjYtNDJjZGM1ZDItN2FjZjVjYzA=, TxId: 2025-06-25T15:05:43.861022Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-25T15:05:43.873589Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 3] 2025-06-25T15:05:43.873637Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-25T15:05:43.926290Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:217: [72075186224037894] EvFastPropagateCheck 2025-06-25T15:05:43.926378Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:357: [72075186224037894] PropagateFastStatistics(), node count = 0, schemeshard count = 0 2025-06-25T15:05:43.978838Z node 2 :STATISTICS DEBUG: service_impl.cpp:1189: EvRequestTimeout, pipe client id = [2:6759:4750], schemeshard count = 1 2025-06-25T15:05:44.813187Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:626: [72075186224037894] ScheduleNextAnalyze 2025-06-25T15:05:44.813284Z node 2 :STATISTICS ERROR: aggregator_impl.cpp:805: [72075186224037894] IsColumnTable. traversal path [OwnerId: 72075186224037897, LocalPathId: 4] is not known to schemeshard 2025-06-25T15:05:44.813576Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-25T15:05:44.815988Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DELETE FROM `.metadata/_statistics` WHERE owner_id = $owner_id AND local_path_id = $local_path_id; 2025-06-25T15:05:44.824178Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=Y2ExM2ZlNzctYjhmMGFiZmQtNmMwNmJhODEtNTkyOWVkNWE=, TxId: 2025-06-25T15:05:44.824231Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=Y2ExM2ZlNzctYjhmMGFiZmQtNmMwNmJhODEtNTkyOWVkNWE=, TxId: 2025-06-25T15:05:44.824547Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-25T15:05:44.837142Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-25T15:05:44.837188Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:50: [72075186224037894] TTxFinishTraversal::Complete. Send TEvAnalyzeResponse, OperationId=operationId, ActorId=[1:2748:3215] 2025-06-25T15:05:44.837523Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [2:6832:4798]], StatType[ 2 ], StatRequestsCount[ 1 ] 2025-06-25T15:05:44.839339Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-25T15:05:44.839374Z node 2 :STATISTICS ERROR: service_impl.cpp:796: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] Navigate failed 2025-06-25T15:05:44.839399Z node 2 :STATISTICS DEBUG: service_impl.cpp:1304: ReplyFailed(), request id = 2 >> TSchemeShardServerLess::StorageBilling >> TSchemeShardServerLess::BaseCase-AlterDatabaseCreateHiveFirst-false [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_serverless/unittest >> TSchemeShardServerLess::TestServerlessComputeResourcesMode [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T15:05:45.445197Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T15:05:45.445262Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T15:05:45.445290Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T15:05:45.445314Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T15:05:45.446543Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T15:05:45.446568Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T15:05:45.446644Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T15:05:45.446691Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T15:05:45.447154Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T15:05:45.448216Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T15:05:45.503376Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:05:45.503418Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:05:45.515812Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T15:05:45.516071Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T15:05:45.516271Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T15:05:45.521217Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T15:05:45.521553Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T15:05:45.523481Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T15:05:45.523772Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T15:05:45.528843Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T15:05:45.530254Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T15:05:45.537822Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T15:05:45.537883Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T15:05:45.538023Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T15:05:45.538068Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T15:05:45.538145Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T15:05:45.538233Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T15:05:45.543896Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T15:05:45.641425Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T15:05:45.642416Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:05:45.643904Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T15:05:45.643956Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T15:05:45.644961Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T15:05:45.645038Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:05:45.647784Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T15:05:45.648878Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T15:05:45.649069Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:05:45.649186Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T15:05:45.649228Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T15:05:45.649271Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T15:05:45.651085Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:05:45.651132Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T15:05:45.651179Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T15:05:45.652681Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:05:45.652723Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:05:45.652777Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T15:05:45.652818Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T15:05:45.657038Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T15:05:45.658723Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T15:05:45.659849Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T15:05:45.660743Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T15:05:45.660856Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T15:05:45.660899Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T15:05:45.662221Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T15:05:45.662279Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T15:05:45.662474Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T15:05:45.662568Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T15:05:45.664498Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T15:05:45.664540Z node 1 :FLAT_TX_SCHEMESHARD ... TxId: 106, tablet: 72075186233409546, partId: 0 2025-06-25T15:05:45.967408Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:632: TTxOperationReply execute, operationId: 106:0, at schemeshard: 72057594046678944, message: Origin: 72075186233409546 TxId: 106 2025-06-25T15:05:45.967439Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_extsubdomain.cpp:796: [72057594046678944] TSyncHive, operationId 106:0, HandleReply TEvUpdateDomainReply, from hive: 72075186233409546 2025-06-25T15:05:45.967468Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 106:0 138 -> 240 2025-06-25T15:05:45.967752Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72075186234409549, msg: Owner: 72075186234409549 Generation: 2 LocalPathId: 1 Version: 6 PathOwnerId: 72075186234409549, cookie: 0 2025-06-25T15:05:45.968387Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 106 2025-06-25T15:05:45.968433Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__sync_update_tenants.cpp:36: TTxSyncTenant DoComplete, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-06-25T15:05:45.969458Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 106:0, at schemeshard: 72057594046678944 2025-06-25T15:05:45.969551Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 106:0, at schemeshard: 72057594046678944 2025-06-25T15:05:45.969635Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 106:0 ProgressState 2025-06-25T15:05:45.969727Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#106:0 progress is 1/1 2025-06-25T15:05:45.969757Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 106 ready parts: 1/1 2025-06-25T15:05:45.969789Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#106:0 progress is 1/1 2025-06-25T15:05:45.969814Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 106 ready parts: 1/1 2025-06-25T15:05:45.969873Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 106, ready parts: 1/1, is published: true 2025-06-25T15:05:45.969910Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 106 ready parts: 1/1 2025-06-25T15:05:45.969940Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 106:0 2025-06-25T15:05:45.969976Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 106:0 2025-06-25T15:05:45.970045Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 5 TestModificationResult got TxId: 106, wait until txId: 106 TestWaitNotification wait txId: 106 2025-06-25T15:05:45.971396Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 106: send EvNotifyTxCompletion 2025-06-25T15:05:45.971428Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 106 2025-06-25T15:05:45.971759Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 106, at schemeshard: 72057594046678944 2025-06-25T15:05:45.971831Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 106: got EvNotifyTxCompletionResult 2025-06-25T15:05:45.971856Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 106: satisfy waiter [1:854:2734] TestWaitNotification: OK eventTxId 106 2025-06-25T15:05:45.972272Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/ServerLess0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T15:05:45.972417Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/ServerLess0" took 150us result status StatusSuccess 2025-06-25T15:05:45.972721Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/ServerLess0" PathDescription { Self { Name: "ServerLess0" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeExtSubDomain CreateFinished: true CreateTxId: 103 CreateStep: 5000004 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 4 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 4 PlanResolution: 50 Coordinators: 72075186234409550 TimeCastBucketsPerMediator: 2 Mediators: 72075186234409551 SchemeShard: 72075186234409549 } DomainKey { SchemeShard: 72057594046678944 PathId: 3 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } SharedHive: 72075186233409546 ServerlessComputeResourcesMode: EServerlessComputeResourcesModeShared SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T15:05:45.973126Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/ServerLess0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72075186234409549 2025-06-25T15:05:45.973239Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72075186234409549 describe path "/MyRoot/ServerLess0" took 120us result status StatusSuccess 2025-06-25T15:05:45.973450Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/ServerLess0" PathDescription { Self { Name: "MyRoot/ServerLess0" PathId: 1 SchemeshardId: 72075186234409549 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 4 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 3 ProcessingParams { Version: 4 PlanResolution: 50 Coordinators: 72075186234409550 TimeCastBucketsPerMediator: 2 Mediators: 72075186234409551 SchemeShard: 72075186234409549 } DomainKey { SchemeShard: 72057594046678944 PathId: 3 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot/ServerLess0" } SharedHive: 72075186233409546 ServerlessComputeResourcesMode: EServerlessComputeResourcesModeShared SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72075186234409549, at schemeshard: 72075186234409549 2025-06-25T15:05:45.973874Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/ServerLess0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T15:05:45.974006Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/ServerLess0" took 111us result status StatusSuccess 2025-06-25T15:05:45.974220Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/ServerLess0" PathDescription { Self { Name: "ServerLess0" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeExtSubDomain CreateFinished: true CreateTxId: 103 CreateStep: 5000004 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 4 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 4 PlanResolution: 50 Coordinators: 72075186234409550 TimeCastBucketsPerMediator: 2 Mediators: 72075186234409551 SchemeShard: 72075186234409549 } DomainKey { SchemeShard: 72057594046678944 PathId: 3 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } SharedHive: 72075186233409546 ServerlessComputeResourcesMode: EServerlessComputeResourcesModeShared SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T15:05:45.974557Z node 1 :HIVE INFO: tablet_helpers.cpp:1470: [72075186233409546] TEvRequestDomainInfo, 72057594046678944:3 >> TSchemeShardServerLess::TestServerlessComputeResourcesModeValidation [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_serverless/unittest >> TSchemeShardServerLess::BaseCase-AlterDatabaseCreateHiveFirst-false [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T15:05:45.445269Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T15:05:45.445358Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T15:05:45.445402Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T15:05:45.445434Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T15:05:45.446600Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T15:05:45.446650Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T15:05:45.446728Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T15:05:45.446798Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T15:05:45.447515Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T15:05:45.448326Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T15:05:45.526800Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:05:45.526860Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:05:45.543847Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T15:05:45.544248Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T15:05:45.544459Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T15:05:45.550397Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T15:05:45.550752Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T15:05:45.551346Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T15:05:45.551661Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T15:05:45.555057Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T15:05:45.555235Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T15:05:45.556401Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T15:05:45.556456Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T15:05:45.556581Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T15:05:45.556625Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T15:05:45.556678Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T15:05:45.556806Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T15:05:45.563286Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T15:05:45.658782Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T15:05:45.658940Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:05:45.659077Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T15:05:45.659110Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T15:05:45.659290Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T15:05:45.659355Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:05:45.661182Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T15:05:45.661302Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T15:05:45.661425Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:05:45.661485Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T15:05:45.661518Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T15:05:45.661548Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T15:05:45.662751Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:05:45.662789Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T15:05:45.662815Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T15:05:45.663934Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:05:45.663968Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:05:45.664013Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T15:05:45.664043Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T15:05:45.666285Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T15:05:45.667406Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T15:05:45.667553Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T15:05:45.668195Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T15:05:45.668277Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T15:05:45.668305Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T15:05:45.668528Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T15:05:45.668561Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T15:05:45.668683Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T15:05:45.668740Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T15:05:45.670067Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T15:05:45.670098Z node 1 :FLAT_TX_SCHEMESHARD ... :05:46.273585Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72075186233409546 TxId_Deprecated: 5 ShardOwnerId: 72057594046678944 ShardLocalIdx: 5, at schemeshard: 72057594046678944 2025-06-25T15:05:46.273886Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-06-25T15:05:46.276344Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72075186233409546] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 7 TxId_Deprecated: 7 TabletID: 72075186234409548 2025-06-25T15:05:46.277128Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72075186233409546] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 6 TxId_Deprecated: 6 TabletID: 72075186234409547 2025-06-25T15:05:46.277550Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72075186233409546 TxId_Deprecated: 7 ShardOwnerId: 72057594046678944 ShardLocalIdx: 7, at schemeshard: 72057594046678944 2025-06-25T15:05:46.277816Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 Forgetting tablet 72075186234409548 2025-06-25T15:05:46.278969Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 106 Forgetting tablet 72075186234409547 2025-06-25T15:05:46.279142Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72075186233409546 TxId_Deprecated: 6 ShardOwnerId: 72057594046678944 ShardLocalIdx: 6, at schemeshard: 72057594046678944 2025-06-25T15:05:46.279297Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-25T15:05:46.280564Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-25T15:05:46.280611Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-06-25T15:05:46.280707Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-25T15:05:46.281261Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-25T15:05:46.281324Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-06-25T15:05:46.281390Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-25T15:05:46.284160Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:5 2025-06-25T15:05:46.284212Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:5 tabletId 72075186234409546 2025-06-25T15:05:46.284982Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:7 2025-06-25T15:05:46.285019Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:7 tabletId 72075186234409548 2025-06-25T15:05:46.285110Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:6 2025-06-25T15:05:46.285162Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:6 tabletId 72075186234409547 2025-06-25T15:05:46.285722Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-25T15:05:46.285804Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 106, wait until txId: 106 TestWaitNotification wait txId: 106 2025-06-25T15:05:46.286090Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 106: send EvNotifyTxCompletion 2025-06-25T15:05:46.286124Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 106 2025-06-25T15:05:46.286505Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 106, at schemeshard: 72057594046678944 2025-06-25T15:05:46.286660Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 106: got EvNotifyTxCompletionResult 2025-06-25T15:05:46.286696Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 106: satisfy waiter [1:935:2792] TestWaitNotification: OK eventTxId 106 2025-06-25T15:05:46.287265Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/ServerLess0/dir/table0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T15:05:46.287447Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/ServerLess0/dir/table0" took 207us result status StatusPathDoesNotExist 2025-06-25T15:05:46.287628Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/ServerLess0/dir/table0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/ServerLess0/dir/table0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: true } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-25T15:05:46.288094Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/ServerLess0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T15:05:46.288271Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/ServerLess0" took 157us result status StatusPathDoesNotExist 2025-06-25T15:05:46.288436Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/ServerLess0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/ServerLess0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: true } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-25T15:05:46.288924Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T15:05:46.289123Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 208us result status StatusSuccess 2025-06-25T15:05:46.289582Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 9 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 9 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 7 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "SharedDB" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeExtSubDomain CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 wait until 72075186233409550 is deleted wait until 72075186233409551 is deleted wait until 72075186233409552 is deleted wait until 72075186233409553 is deleted 2025-06-25T15:05:46.291468Z node 1 :HIVE INFO: tablet_helpers.cpp:1476: [72075186233409546] TEvSubscribeToTabletDeletion, 72075186233409550 2025-06-25T15:05:46.291585Z node 1 :HIVE INFO: tablet_helpers.cpp:1476: [72075186233409546] TEvSubscribeToTabletDeletion, 72075186233409551 2025-06-25T15:05:46.291623Z node 1 :HIVE INFO: tablet_helpers.cpp:1476: [72075186233409546] TEvSubscribeToTabletDeletion, 72075186233409552 2025-06-25T15:05:46.291681Z node 1 :HIVE INFO: tablet_helpers.cpp:1476: [72075186233409546] TEvSubscribeToTabletDeletion, 72075186233409553 Deleted tabletId 72075186233409550 Deleted tabletId 72075186233409551 Deleted tabletId 72075186233409552 Deleted tabletId 72075186233409553 >> TSchemeShardServerLess::BaseCase-AlterDatabaseCreateHiveFirst-true ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_serverless/unittest >> TSchemeShardServerLess::TestServerlessComputeResourcesModeValidation [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T15:05:46.273787Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T15:05:46.273845Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T15:05:46.273871Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T15:05:46.273896Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T15:05:46.273926Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T15:05:46.273962Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T15:05:46.274004Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T15:05:46.274051Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T15:05:46.274535Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T15:05:46.274772Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T15:05:46.329533Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:05:46.329598Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:05:46.341881Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T15:05:46.342187Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T15:05:46.342360Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T15:05:46.346446Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T15:05:46.346680Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T15:05:46.347112Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T15:05:46.347287Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T15:05:46.349696Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T15:05:46.349832Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T15:05:46.350580Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T15:05:46.350624Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T15:05:46.350717Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T15:05:46.350749Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T15:05:46.350787Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T15:05:46.350856Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T15:05:46.355051Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T15:05:46.427165Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T15:05:46.427340Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:05:46.427478Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T15:05:46.427508Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T15:05:46.427694Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T15:05:46.427749Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:05:46.429504Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T15:05:46.429634Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T15:05:46.429780Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:05:46.429863Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T15:05:46.429906Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T15:05:46.429949Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T15:05:46.431279Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:05:46.431322Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T15:05:46.431352Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T15:05:46.432423Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:05:46.432466Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:05:46.432506Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T15:05:46.432543Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T15:05:46.439181Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T15:05:46.440828Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T15:05:46.440982Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T15:05:46.441617Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T15:05:46.441700Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T15:05:46.441731Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T15:05:46.441976Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T15:05:46.442013Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T15:05:46.442147Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T15:05:46.442223Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T15:05:46.443796Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T15:05:46.443833Z node 1 :FLAT_TX_SCHEMESHARD ... 104 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000005 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T15:05:46.751373Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 104:0, at tablet# 72057594046678944 2025-06-25T15:05:46.751630Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 104:0 128 -> 240 2025-06-25T15:05:46.751678Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 104:0, at tablet# 72057594046678944 2025-06-25T15:05:46.751771Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 5 2025-06-25T15:05:46.751851Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:569: DoUpdateTenant no hasChanges, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], tenantLink: TSubDomainsLinks::TLink { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 3], Generation: 2, ActorId:[1:619:2545], EffectiveACLVersion: 0, SubdomainVersion: 2, UserAttributesVersion: 1, TenantHive: 18446744073709551615, TenantSysViewProcessor: 18446744073709551615, TenantStatisticsAggregator: 18446744073709551615, TenantGraphShard: 18446744073709551615, TenantRootACL: }, subDomain->GetVersion(): 2, actualEffectiveACLVersion: 0, actualUserAttrsVersion: 1, tenantHive: 18446744073709551615, tenantSysViewProcessor: 18446744073709551615, at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 104 2025-06-25T15:05:46.753234Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T15:05:46.753275Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 104, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-25T15:05:46.753404Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T15:05:46.753431Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 104, path id: 3 2025-06-25T15:05:46.753665Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-25T15:05:46.753696Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_extsubdomain.cpp:761: [72057594046678944] TSyncHive, operationId 104:0, ProgressState, NeedSyncHive: 0 2025-06-25T15:05:46.753719Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 104:0 240 -> 240 2025-06-25T15:05:46.754072Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 4 PathOwnerId: 72057594046678944, cookie: 104 2025-06-25T15:05:46.754135Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 4 PathOwnerId: 72057594046678944, cookie: 104 2025-06-25T15:05:46.754160Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 104 2025-06-25T15:05:46.754193Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 104, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 4 2025-06-25T15:05:46.754218Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 6 2025-06-25T15:05:46.754291Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 104, ready parts: 0/1, is published: true 2025-06-25T15:05:46.756103Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-25T15:05:46.756136Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 104:0 ProgressState 2025-06-25T15:05:46.756211Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#104:0 progress is 1/1 2025-06-25T15:05:46.756235Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-25T15:05:46.756270Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#104:0 progress is 1/1 2025-06-25T15:05:46.756293Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-25T15:05:46.756337Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 104, ready parts: 1/1, is published: true 2025-06-25T15:05:46.756381Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1656: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:309:2298] message: TxId: 104 2025-06-25T15:05:46.756413Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-25T15:05:46.756454Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 104:0 2025-06-25T15:05:46.756477Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 104:0 2025-06-25T15:05:46.756626Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 5 2025-06-25T15:05:46.757134Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2025-06-25T15:05:46.758359Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 104: got EvNotifyTxCompletionResult 2025-06-25T15:05:46.758406Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 104: satisfy waiter [1:752:2631] TestWaitNotification: OK eventTxId 104 TestModificationResults wait txId: 105 2025-06-25T15:05:46.761161Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpAlterExtSubDomain SubDomain { Name: "SharedDB" ServerlessComputeResourcesMode: EServerlessComputeResourcesModeShared } } TxId: 105 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T15:05:46.761303Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_extsubdomain.cpp:1079: [72057594046678944] CreateCompatibleAlterExtSubDomain, opId 105:0, feature flag EnableAlterDatabaseCreateHiveFirst 1, tx WorkingDir: "/MyRoot" OperationType: ESchemeOpAlterExtSubDomain SubDomain { Name: "SharedDB" ServerlessComputeResourcesMode: EServerlessComputeResourcesModeShared } 2025-06-25T15:05:46.761364Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_extsubdomain.cpp:1085: [72057594046678944] CreateCompatibleAlterExtSubDomain, opId 105:0, path /MyRoot/SharedDB 2025-06-25T15:05:46.761496Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_just_reject.cpp:47: TReject Propose, opId: 105:0, explain: Invalid AlterExtSubDomain request: Invalid ExtSubDomain request: ServerlessComputeResourcesMode can be changed only for serverless, at schemeshard: 72057594046678944 2025-06-25T15:05:46.761531Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 105:1, propose status:StatusInvalidParameter, reason: Invalid AlterExtSubDomain request: Invalid ExtSubDomain request: ServerlessComputeResourcesMode can be changed only for serverless, at schemeshard: 72057594046678944 2025-06-25T15:05:46.764709Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 105, response: Status: StatusInvalidParameter Reason: "Invalid AlterExtSubDomain request: Invalid ExtSubDomain request: ServerlessComputeResourcesMode can be changed only for serverless" TxId: 105 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T15:05:46.764924Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 105, database: /MyRoot, subject: , status: StatusInvalidParameter, reason: Invalid AlterExtSubDomain request: Invalid ExtSubDomain request: ServerlessComputeResourcesMode can be changed only for serverless, operation: ALTER DATABASE, path: /MyRoot/SharedDB TestModificationResult got TxId: 105, wait until txId: 105 TestModificationResults wait txId: 106 2025-06-25T15:05:46.766754Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpAlterExtSubDomain SubDomain { Name: "ServerLess0" ServerlessComputeResourcesMode: EServerlessComputeResourcesModeUnspecified } } TxId: 106 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T15:05:46.766851Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_extsubdomain.cpp:1079: [72057594046678944] CreateCompatibleAlterExtSubDomain, opId 106:0, feature flag EnableAlterDatabaseCreateHiveFirst 1, tx WorkingDir: "/MyRoot" OperationType: ESchemeOpAlterExtSubDomain SubDomain { Name: "ServerLess0" ServerlessComputeResourcesMode: EServerlessComputeResourcesModeUnspecified } 2025-06-25T15:05:46.766876Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_extsubdomain.cpp:1085: [72057594046678944] CreateCompatibleAlterExtSubDomain, opId 106:0, path /MyRoot/ServerLess0 2025-06-25T15:05:46.766990Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_just_reject.cpp:47: TReject Propose, opId: 106:0, explain: Invalid AlterExtSubDomain request: Invalid ExtSubDomain request: can not set ServerlessComputeResourcesMode to EServerlessComputeResourcesModeUnspecified, at schemeshard: 72057594046678944 2025-06-25T15:05:46.767049Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 106:1, propose status:StatusInvalidParameter, reason: Invalid AlterExtSubDomain request: Invalid ExtSubDomain request: can not set ServerlessComputeResourcesMode to EServerlessComputeResourcesModeUnspecified, at schemeshard: 72057594046678944 2025-06-25T15:05:46.769056Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 106, response: Status: StatusInvalidParameter Reason: "Invalid AlterExtSubDomain request: Invalid ExtSubDomain request: can not set ServerlessComputeResourcesMode to EServerlessComputeResourcesModeUnspecified" TxId: 106 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T15:05:46.769237Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 106, database: /MyRoot, subject: , status: StatusInvalidParameter, reason: Invalid AlterExtSubDomain request: Invalid ExtSubDomain request: can not set ServerlessComputeResourcesMode to EServerlessComputeResourcesModeUnspecified, operation: ALTER DATABASE, path: /MyRoot/ServerLess0 TestModificationResult got TxId: 106, wait until txId: 106 >> TColumnShardTestReadWrite::CompactionInGranule_PKUtf8_Reboot [GOOD] >> TSchemeShardServerLess::TestServerlessComputeResourcesModeFeatureFlag >> TSchemeShardServerLess::BaseCase-AlterDatabaseCreateHiveFirst-true [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_serverless/unittest >> TSchemeShardServerLess::BaseCase-AlterDatabaseCreateHiveFirst-true [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T15:05:47.387547Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T15:05:47.387606Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T15:05:47.387635Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T15:05:47.387655Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T15:05:47.387682Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T15:05:47.387698Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T15:05:47.387728Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T15:05:47.387770Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T15:05:47.388212Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T15:05:47.388452Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T15:05:47.433021Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:05:47.433067Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:05:47.442749Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T15:05:47.442977Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T15:05:47.443078Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T15:05:47.446905Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T15:05:47.447107Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T15:05:47.447465Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T15:05:47.447655Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T15:05:47.449704Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T15:05:47.449812Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T15:05:47.450497Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T15:05:47.450537Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T15:05:47.450615Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T15:05:47.450645Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T15:05:47.450672Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T15:05:47.450730Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T15:05:47.454668Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T15:05:47.523878Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T15:05:47.524018Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:05:47.524133Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T15:05:47.524160Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T15:05:47.524328Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T15:05:47.524378Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:05:47.525886Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T15:05:47.526007Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T15:05:47.526115Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:05:47.526157Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T15:05:47.526181Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T15:05:47.526215Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T15:05:47.527366Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:05:47.527402Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T15:05:47.527430Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T15:05:47.528451Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:05:47.528486Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:05:47.528521Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T15:05:47.528549Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T15:05:47.534653Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T15:05:47.536007Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T15:05:47.536152Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T15:05:47.536683Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T15:05:47.536752Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T15:05:47.536781Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T15:05:47.536989Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T15:05:47.537026Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T15:05:47.537144Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T15:05:47.537191Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T15:05:47.538422Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T15:05:47.538479Z node 1 :FLAT_TX_SCHEMESHARD ... 2075186234409551 2025-06-25T15:05:47.967607Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72075186233409546 TxId_Deprecated: 5 ShardOwnerId: 72057594046678944 ShardLocalIdx: 5, at schemeshard: 72057594046678944 2025-06-25T15:05:47.967880Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 Forgetting tablet 72075186234409549 2025-06-25T15:05:47.968227Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72075186233409546 TxId_Deprecated: 7 ShardOwnerId: 72057594046678944 ShardLocalIdx: 7, at schemeshard: 72057594046678944 2025-06-25T15:05:47.968345Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-25T15:05:47.968788Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T15:05:47.970844Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72075186233409546] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 6 TxId_Deprecated: 6 TabletID: 72075186234409550 Forgetting tablet 72075186234409551 2025-06-25T15:05:47.971411Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72075186233409546 TxId_Deprecated: 6 ShardOwnerId: 72057594046678944 ShardLocalIdx: 6, at schemeshard: 72057594046678944 2025-06-25T15:05:47.971510Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 Forgetting tablet 72075186234409550 2025-06-25T15:05:47.973011Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-25T15:05:47.973052Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-06-25T15:05:47.973119Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-25T15:05:47.973344Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 106 2025-06-25T15:05:47.973417Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-25T15:05:47.973455Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-06-25T15:05:47.973504Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-25T15:05:47.975461Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:5 2025-06-25T15:05:47.975505Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:5 tabletId 72075186234409549 2025-06-25T15:05:47.975608Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:7 2025-06-25T15:05:47.975640Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:7 tabletId 72075186234409551 2025-06-25T15:05:47.975932Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:6 2025-06-25T15:05:47.975989Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:6 tabletId 72075186234409550 2025-06-25T15:05:47.976887Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-25T15:05:47.976946Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 106, wait until txId: 106 TestWaitNotification wait txId: 106 2025-06-25T15:05:47.977173Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 106: send EvNotifyTxCompletion 2025-06-25T15:05:47.977202Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 106 2025-06-25T15:05:47.977532Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 106, at schemeshard: 72057594046678944 2025-06-25T15:05:47.977583Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 106: got EvNotifyTxCompletionResult 2025-06-25T15:05:47.977607Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 106: satisfy waiter [1:940:2800] TestWaitNotification: OK eventTxId 106 2025-06-25T15:05:47.978015Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/ServerLess0/dir/table0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T15:05:47.978173Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/ServerLess0/dir/table0" took 152us result status StatusPathDoesNotExist 2025-06-25T15:05:47.978279Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/ServerLess0/dir/table0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/ServerLess0/dir/table0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: true } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-25T15:05:47.978574Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/ServerLess0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T15:05:47.978665Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/ServerLess0" took 84us result status StatusPathDoesNotExist 2025-06-25T15:05:47.978735Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/ServerLess0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1])" Path: "/MyRoot/ServerLess0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: true } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-25T15:05:47.979025Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T15:05:47.979120Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 99us result status StatusSuccess 2025-06-25T15:05:47.979352Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 9 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 9 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 7 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "SharedDB" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeExtSubDomain CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 wait until 72075186234409549 is deleted wait until 72075186234409550 is deleted wait until 72075186234409551 is deleted wait until 72075186234409552 is deleted 2025-06-25T15:05:47.979769Z node 1 :HIVE INFO: tablet_helpers.cpp:1476: [72075186233409546] TEvSubscribeToTabletDeletion, 72075186234409549 2025-06-25T15:05:47.979823Z node 1 :HIVE INFO: tablet_helpers.cpp:1476: [72075186233409546] TEvSubscribeToTabletDeletion, 72075186234409550 2025-06-25T15:05:47.979844Z node 1 :HIVE INFO: tablet_helpers.cpp:1476: [72075186233409546] TEvSubscribeToTabletDeletion, 72075186234409551 2025-06-25T15:05:47.979865Z node 1 :HIVE INFO: tablet_helpers.cpp:1476: [72075186233409546] TEvSubscribeToTabletDeletion, 72075186234409552 Deleted tabletId 72075186234409549 Deleted tabletId 72075186234409550 Deleted tabletId 72075186234409551 Deleted tabletId 72075186234409552 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::CompactionInGranule_PKUtf8_Reboot [GOOD] Test command err: 2025-06-25T15:04:24.430635Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:99;event=initialize_shard;step=OnActivateExecutor; 2025-06-25T15:04:24.450999Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:117;event=initialize_shard;step=initialize_tiring_finished; 2025-06-25T15:04:24.451268Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-25T15:04:24.459928Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T15:04:24.460090Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T15:04:24.460304Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T15:04:24.460394Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T15:04:24.460470Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T15:04:24.460524Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T15:04:24.460593Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T15:04:24.460697Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T15:04:24.460790Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T15:04:24.460858Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T15:04:24.460919Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T15:04:24.480095Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-25T15:04:24.480226Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-25T15:04:24.480263Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-25T15:04:24.480403Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:04:24.481640Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-25T15:04:24.481697Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-25T15:04:24.481728Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-25T15:04:24.481795Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-25T15:04:24.481831Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-25T15:04:24.481856Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-25T15:04:24.481882Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-25T15:04:24.481989Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:04:24.482034Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-25T15:04:24.482062Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-25T15:04:24.482077Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-25T15:04:24.482139Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-25T15:04:24.482167Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-25T15:04:24.482200Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-25T15:04:24.482221Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-25T15:04:24.482258Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-25T15:04:24.482284Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-25T15:04:24.482305Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-25T15:04:24.482459Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-25T15:04:24.482484Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-25T15:04:24.482500Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-25T15:04:24.482615Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-25T15:04:24.482642Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-25T15:04:24.482660Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-25T15:04:24.482747Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-25T15:04:24.482781Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-25T15:04:24.482808Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-25T15:04:24.482854Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-25T15:04:24.482891Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-25T15:04:24.482916Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-25T15:04:24.482940Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-25T15:04:24.483076Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=29; 2025-06-25T15:04:24.483137Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=27; 2025-06-25T15:04:24.483203Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=29; 2025-06-25T15:04:24.483267Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=26; 2025-06-25T15:04:24.483844Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-25T15:04:24.483939Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-25T15:04:24.483977Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-25T15:04:24.484040Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: table ... /portions;fline=constructor_meta.cpp:71;memory_size=26438;data_size=26406;sum=13447440;count=14328;size_of_meta=136; 2025-06-25T15:05:46.720229Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;load_stage_name=EXECUTE:granule/portions;fline=constructor_portion.cpp:40;memory_size=26510;data_size=26478;sum=13963248;count=7164;size_of_portion=208; 2025-06-25T15:05:46.720954Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;EXECUTE:portionsLoadingTime=71846; 2025-06-25T15:05:46.721014Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;PRECHARGE:granule_finished_commonLoadingTime=8; 2025-06-25T15:05:46.722385Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;EXECUTE:granule_finished_commonLoadingTime=1329; 2025-06-25T15:05:46.722423Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;fline=common_data.cpp:29;EXECUTE:granuleLoadingTime=73408; 2025-06-25T15:05:46.722456Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:granulesLoadingTime=73489; 2025-06-25T15:05:46.722503Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;PRECHARGE:finishLoadingTime=8; 2025-06-25T15:05:46.723078Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:finishLoadingTime=539; 2025-06-25T15:05:46.723109Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:column_enginesLoadingTime=74424; 2025-06-25T15:05:46.723211Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tx_controllerLoadingTime=64; 2025-06-25T15:05:46.723295Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tx_controllerLoadingTime=44; 2025-06-25T15:05:46.723524Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:operations_managerLoadingTime=196; 2025-06-25T15:05:46.723699Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:operations_managerLoadingTime=133; 2025-06-25T15:05:46.736839Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:storages_managerLoadingTime=13091; 2025-06-25T15:05:46.754875Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:storages_managerLoadingTime=17959; 2025-06-25T15:05:46.754948Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:db_locksLoadingTime=8; 2025-06-25T15:05:46.754988Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:db_locksLoadingTime=7; 2025-06-25T15:05:46.755021Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:bg_sessionsLoadingTime=4; 2025-06-25T15:05:46.755082Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:bg_sessionsLoadingTime=33; 2025-06-25T15:05:46.755117Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:sharing_sessionsLoadingTime=4; 2025-06-25T15:05:46.755181Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:sharing_sessionsLoadingTime=39; 2025-06-25T15:05:46.755221Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:in_flight_readsLoadingTime=4; 2025-06-25T15:05:46.755268Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:in_flight_readsLoadingTime=22; 2025-06-25T15:05:46.755331Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tiers_managerLoadingTime=34; 2025-06-25T15:05:46.755394Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tiers_managerLoadingTime=34; 2025-06-25T15:05:46.755420Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;fline=common_data.cpp:29;EXECUTE:composite_initLoadingTime=110804; 2025-06-25T15:05:46.755525Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Index: tables 1 inserted {blob_bytes=0;raw_bytes=0;count=0;records=0} compacted {blob_bytes=0;raw_bytes=0;count=0;records=0} s-compacted {blob_bytes=22744072;raw_bytes=22320020;count=3;records=225200} inactive {blob_bytes=149450960;raw_bytes=145316940;count=221;records=1575200} evicted {blob_bytes=0;raw_bytes=0;count=0;records=0} at tablet 9437184 2025-06-25T15:05:46.755625Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:10355:11947];process=SwitchToWork;fline=columnshard.cpp:74;event=initialize_shard;step=SwitchToWork; 2025-06-25T15:05:46.755666Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:10355:11947];process=SwitchToWork;fline=columnshard.cpp:77;event=initialize_shard;step=SignalTabletActive; 2025-06-25T15:05:46.755723Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10355:11947];process=SwitchToWork;fline=columnshard_impl.cpp:1331;event=OnTieringModified;path_id=NO_VALUE_OPTIONAL; 2025-06-25T15:05:46.755756Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10355:11947];process=SwitchToWork;fline=column_engine_logs.cpp:471;event=OnTieringModified;new_count_tierings=0; 2025-06-25T15:05:46.755899Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:442;event=EnqueueBackgroundActivities;periodic=0; 2025-06-25T15:05:46.755950Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=9; 2025-06-25T15:05:46.755998Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750863570023;tx_id=18446744073709551615;;current_snapshot_ts=1750863866215; 2025-06-25T15:05:46.756026Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=9;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-25T15:05:46.756057Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:791;background=cleanup;skip_reason=no_changes; 2025-06-25T15:05:46.756082Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:820;background=cleanup;skip_reason=no_changes; 2025-06-25T15:05:46.756144Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:749;background=ttl;skip_reason=no_changes; 2025-06-25T15:05:46.760556Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10355:11947];ev=NActors::TEvents::TEvWakeup;fline=columnshard.cpp:250;event=TEvPrivate::TEvPeriodicWakeup::MANUAL;tablet_id=9437184; 2025-06-25T15:05:46.761080Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10355:11947];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;fline=columnshard.cpp:239;event=TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184; 2025-06-25T15:05:46.761103Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Send periodic stats. 2025-06-25T15:05:46.761120Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Disabled periodic stats at tablet 9437184 2025-06-25T15:05:46.761150Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10355:11947];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:442;event=EnqueueBackgroundActivities;periodic=0; 2025-06-25T15:05:46.761201Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10355:11947];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=9; 2025-06-25T15:05:46.761245Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10355:11947];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750863570023;tx_id=18446744073709551615;;current_snapshot_ts=1750863866215; 2025-06-25T15:05:46.761275Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10355:11947];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=9;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-25T15:05:46.761306Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10355:11947];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:791;background=cleanup;skip_reason=no_changes; 2025-06-25T15:05:46.761332Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10355:11947];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:820;background=cleanup;skip_reason=no_changes; 2025-06-25T15:05:46.761396Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10355:11947];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;queue=ttl;external_count=0;fline=granule.cpp:168;event=skip_actualization;waiting=1.000000s; 2025-06-25T15:05:46.761429Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10355:11947];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:749;background=ttl;skip_reason=no_changes; >> TSchemeShardServerLess::TestServerlessComputeResourcesModeFeatureFlag [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_serverless/unittest >> TSchemeShardServerLess::TestServerlessComputeResourcesModeFeatureFlag [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T15:05:48.455093Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T15:05:48.455153Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T15:05:48.455178Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T15:05:48.455199Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T15:05:48.455236Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T15:05:48.455257Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T15:05:48.455297Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T15:05:48.455342Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T15:05:48.455869Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T15:05:48.456093Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T15:05:48.506174Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:05:48.506231Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:05:48.518197Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T15:05:48.518502Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T15:05:48.518635Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T15:05:48.522495Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T15:05:48.522697Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T15:05:48.523106Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T15:05:48.523291Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T15:05:48.525457Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T15:05:48.525571Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T15:05:48.526480Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T15:05:48.526521Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T15:05:48.526625Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T15:05:48.526669Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T15:05:48.526707Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T15:05:48.526785Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T15:05:48.531308Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T15:05:48.615696Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T15:05:48.615859Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:05:48.616017Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T15:05:48.616052Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T15:05:48.616221Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T15:05:48.616274Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:05:48.618303Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T15:05:48.618436Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T15:05:48.618583Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:05:48.618620Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T15:05:48.618652Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T15:05:48.618684Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T15:05:48.620026Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:05:48.620060Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T15:05:48.620087Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T15:05:48.621212Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:05:48.621247Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:05:48.621286Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T15:05:48.621322Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T15:05:48.623622Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T15:05:48.624976Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T15:05:48.625137Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T15:05:48.625750Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T15:05:48.625835Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T15:05:48.625865Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T15:05:48.626103Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T15:05:48.626137Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T15:05:48.626256Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T15:05:48.626340Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T15:05:48.627972Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T15:05:48.628013Z node 1 :FLAT_TX_SCHEMESHARD ... rd__operation_common_subdomain.cpp:120: NSubDomainState::TConfigureParts operationId# 104:0 Got OK TEvConfigureStatus from tablet# 72075186234409551 shardIdx# 72057594046678944:7 at schemeshard# 72057594046678944 2025-06-25T15:05:48.923688Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 104:0 3 -> 128 2025-06-25T15:05:48.926163Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-25T15:05:48.926340Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-25T15:05:48.926382Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-25T15:05:48.926428Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 104:0, at tablet# 72057594046678944 2025-06-25T15:05:48.926474Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 104 ready parts: 1/1 2025-06-25T15:05:48.926617Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 104 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T15:05:48.928205Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 104:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:104 msg type: 269090816 2025-06-25T15:05:48.928374Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 104, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 104 at step: 5000005 FAKE_COORDINATOR: advance: minStep5000005 State->FrontStep: 5000004 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 104 at step: 5000005 2025-06-25T15:05:48.928683Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000005, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T15:05:48.928795Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 104 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000005 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T15:05:48.928835Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 104:0, at tablet# 72057594046678944 2025-06-25T15:05:48.929098Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 104:0 128 -> 240 2025-06-25T15:05:48.929154Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 104:0, at tablet# 72057594046678944 2025-06-25T15:05:48.929260Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 5 2025-06-25T15:05:48.929352Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:569: DoUpdateTenant no hasChanges, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], tenantLink: TSubDomainsLinks::TLink { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 3], Generation: 2, ActorId:[1:619:2545], EffectiveACLVersion: 0, SubdomainVersion: 2, UserAttributesVersion: 1, TenantHive: 18446744073709551615, TenantSysViewProcessor: 18446744073709551615, TenantStatisticsAggregator: 18446744073709551615, TenantGraphShard: 18446744073709551615, TenantRootACL: }, subDomain->GetVersion(): 2, actualEffectiveACLVersion: 0, actualUserAttrsVersion: 1, tenantHive: 18446744073709551615, tenantSysViewProcessor: 18446744073709551615, at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 104 2025-06-25T15:05:48.931049Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T15:05:48.931110Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 104, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-25T15:05:48.931303Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T15:05:48.931348Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 104, path id: 3 2025-06-25T15:05:48.931717Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-25T15:05:48.931765Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_extsubdomain.cpp:761: [72057594046678944] TSyncHive, operationId 104:0, ProgressState, NeedSyncHive: 0 2025-06-25T15:05:48.931799Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 104:0 240 -> 240 2025-06-25T15:05:48.932345Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 4 PathOwnerId: 72057594046678944, cookie: 104 2025-06-25T15:05:48.932445Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 4 PathOwnerId: 72057594046678944, cookie: 104 2025-06-25T15:05:48.932480Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 104 2025-06-25T15:05:48.932537Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 104, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 4 2025-06-25T15:05:48.932585Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 6 2025-06-25T15:05:48.932663Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 104, ready parts: 0/1, is published: true 2025-06-25T15:05:48.934952Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-25T15:05:48.935005Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 104:0 ProgressState 2025-06-25T15:05:48.935107Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#104:0 progress is 1/1 2025-06-25T15:05:48.935140Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-25T15:05:48.935194Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#104:0 progress is 1/1 2025-06-25T15:05:48.935226Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-25T15:05:48.935259Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 104, ready parts: 1/1, is published: true 2025-06-25T15:05:48.935319Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1656: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:309:2298] message: TxId: 104 2025-06-25T15:05:48.935371Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-25T15:05:48.935448Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 104:0 2025-06-25T15:05:48.935480Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 104:0 2025-06-25T15:05:48.935639Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 5 2025-06-25T15:05:48.936113Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2025-06-25T15:05:48.937433Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 104: got EvNotifyTxCompletionResult 2025-06-25T15:05:48.937485Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 104: satisfy waiter [1:752:2631] TestWaitNotification: OK eventTxId 104 TestModificationResults wait txId: 105 2025-06-25T15:05:48.940344Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpAlterExtSubDomain SubDomain { Name: "ServerLess0" ServerlessComputeResourcesMode: EServerlessComputeResourcesModeExclusive } } TxId: 105 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T15:05:48.940478Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_extsubdomain.cpp:1079: [72057594046678944] CreateCompatibleAlterExtSubDomain, opId 105:0, feature flag EnableAlterDatabaseCreateHiveFirst 1, tx WorkingDir: "/MyRoot" OperationType: ESchemeOpAlterExtSubDomain SubDomain { Name: "ServerLess0" ServerlessComputeResourcesMode: EServerlessComputeResourcesModeExclusive } 2025-06-25T15:05:48.940541Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_extsubdomain.cpp:1085: [72057594046678944] CreateCompatibleAlterExtSubDomain, opId 105:0, path /MyRoot/ServerLess0 2025-06-25T15:05:48.940674Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_just_reject.cpp:47: TReject Propose, opId: 105:0, explain: Invalid AlterExtSubDomain request: Unsupported: feature flag EnableServerlessExclusiveDynamicNodes is off, at schemeshard: 72057594046678944 2025-06-25T15:05:48.940725Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 105:1, propose status:StatusPreconditionFailed, reason: Invalid AlterExtSubDomain request: Unsupported: feature flag EnableServerlessExclusiveDynamicNodes is off, at schemeshard: 72057594046678944 2025-06-25T15:05:48.942619Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 105, response: Status: StatusPreconditionFailed Reason: "Invalid AlterExtSubDomain request: Unsupported: feature flag EnableServerlessExclusiveDynamicNodes is off" TxId: 105 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T15:05:48.942801Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 105, database: /MyRoot, subject: , status: StatusPreconditionFailed, reason: Invalid AlterExtSubDomain request: Unsupported: feature flag EnableServerlessExclusiveDynamicNodes is off, operation: ALTER DATABASE, path: /MyRoot/ServerLess0 TestModificationResult got TxId: 105, wait until txId: 105 >> DataStreams::TestControlPlaneAndMeteringData >> DataStreams::TestReservedResourcesMetering >> DataStreams::TestDeleteStream >> DataStreams::TestUpdateStorage >> DataStreams::TestPutRecordsOfAnauthorizedUser >> DataStreams::TestGetRecordsStreamWithSingleShard >> AnalyzeColumnshard::AnalyzeTwoColumnTables [GOOD] >> ReadIteratorExternalBlobs::ExtBlobsWithDeletesInTheMiddle [GOOD] >> ReadIteratorExternalBlobs::ExtBlobsWithFirstRowPreloaded ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest >> AnalyzeColumnshard::AnalyzeTwoColumnTables [GOOD] Test command err: 2025-06-25T15:03:14.106501Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:419:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T15:03:14.106901Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T15:03:14.107022Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001ffc/r3tmp/tmpaCXkDN/pdisk_1.dat 2025-06-25T15:03:14.481727Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 8646, node 1 2025-06-25T15:03:14.755579Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:03:14.755639Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:03:14.755673Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:03:14.756302Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T15:03:14.758805Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:03:14.856444Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:03:14.857805Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:03:14.872981Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:25205 2025-06-25T15:03:15.473634Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-25T15:03:18.442484Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-25T15:03:18.477810Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:03:18.477918Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:03:18.525941Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T15:03:18.529047Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:03:18.739073Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:03:18.777929Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:18.778551Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:18.779063Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:18.779219Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:18.779432Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:18.779540Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:18.779636Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:18.779714Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:18.779777Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:18.969577Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:03:18.969709Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:03:19.000971Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:03:19.125129Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:03:19.160350Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-25T15:03:19.160435Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-25T15:03:19.218824Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-25T15:03:19.218996Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-25T15:03:19.219234Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-25T15:03:19.219299Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-25T15:03:19.219373Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-25T15:03:19.219452Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-25T15:03:19.219507Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-25T15:03:19.219561Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-25T15:03:19.220385Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-25T15:03:19.257333Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7949: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-25T15:03:19.257477Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7979: ConnectToSA(), pipe client id: [2:1794:2562], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-25T15:03:19.265757Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1808:2573] 2025-06-25T15:03:19.271741Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1830:2584] 2025-06-25T15:03:19.272510Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1830:2584], schemeshard id = 72075186224037897 2025-06-25T15:03:19.284018Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-25T15:03:19.306424Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-25T15:03:19.306472Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-25T15:03:19.306534Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-25T15:03:19.318364Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:03:19.323908Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-25T15:03:19.324029Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-25T15:03:19.490576Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-25T15:03:19.672458Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-25T15:03:19.773052Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-25T15:03:20.321056Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:03:20.599620Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2157:3028], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:03:20.599756Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:03:20.630013Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715659:0, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T15:03:20.765436Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2227:2795];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T15:03:20.765810Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2227:2795];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T15:03:20.766164Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2227:2795];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T15:03:20.766321Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2227:2795];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T15:03:20.766447Z node 2 :TX_COLUMNSHARD WARN: ... 6-25T15:05:46.345124Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-25T15:05:46.358298Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete force traversal for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-25T15:05:46.358357Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:47: [72075186224037894] TTxFinishTraversal::Complete. Don't send TEvAnalyzeResponse. There are pending operations, OperationId operationId , ActorId=[1:3809:3567] 2025-06-25T15:05:46.897786Z node 2 :STATISTICS DEBUG: service_impl.cpp:252: Event round 2 is different from the current 0 2025-06-25T15:05:46.897851Z node 2 :STATISTICS DEBUG: service_impl.cpp:379: Skip TEvDispatchKeepAlive 2025-06-25T15:05:47.555328Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:626: [72075186224037894] ScheduleNextAnalyze 2025-06-25T15:05:47.555390Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 5] is column table. 2025-06-25T15:05:47.557661Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-25T15:05:47.572602Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-25T15:05:47.573215Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-25T15:05:47.573276Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:51: [72075186224037894] TTxResolve::ExecuteAnalyze. Table OperationId operationId, PathId [OwnerId: 72075186224037897, LocalPathId: 5], AnalyzedShards 1 2025-06-25T15:05:47.597916Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-25T15:05:47.608748Z node 2 :STATISTICS DEBUG: tx_analyze_table_request.cpp:56: [72075186224037894] TTxAnalyzeTableRequest::Complete. Send 1 events. 2025-06-25T15:05:47.609357Z node 2 :STATISTICS DEBUG: tx_analyze_table_response.cpp:21: [72075186224037894] TTxAnalyzeTableResponse::Execute 2025-06-25T15:05:47.609422Z node 2 :STATISTICS DEBUG: tx_analyze_table_response.cpp:52: [72075186224037894] TTxAnalyzeTableResponse::Execute. All shards are analyzed 2025-06-25T15:05:47.622154Z node 2 :STATISTICS DEBUG: tx_analyze_table_response.cpp:57: [72075186224037894] TTxAnalyzeTableResponse::Complete. 2025-06-25T15:05:48.800303Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-25T15:05:48.800374Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 5] is column table. 2025-06-25T15:05:48.800405Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:732: [72075186224037894] Start schedule traversal navigate for path [OwnerId: 72075186224037897, LocalPathId: 5] 2025-06-25T15:05:48.800874Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-25T15:05:48.813644Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-25T15:05:48.813921Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-25T15:05:48.813996Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-25T15:05:48.814290Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:58: [72075186224037894] TTxResponseTabletDistribution::Execute. Node count = 1 2025-06-25T15:05:48.838292Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:92: [72075186224037894] TTxResponseTabletDistribution::Complete 2025-06-25T15:05:48.838425Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 3, current Round: 0 2025-06-25T15:05:48.838807Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8158:5887], server id = [2:8159:5888], tablet id = 72075186224037900, status = OK 2025-06-25T15:05:48.838871Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8158:5887], path = { OwnerId: 72075186224037897 LocalId: 5 } 2025-06-25T15:05:48.841385Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037900 2025-06-25T15:05:48.841504Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-25T15:05:48.841814Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:27: [72075186224037894] TTxAggregateStatisticsResponse::Execute 2025-06-25T15:05:48.841995Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:118: [72075186224037894] TTxAggregateStatisticsResponse::Complete 2025-06-25T15:05:48.842224Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-25T15:05:48.843872Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8158:5887], server id = [2:8159:5888], tablet id = 72075186224037900 2025-06-25T15:05:48.843913Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-25T15:05:48.844444Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-06-25T15:05:48.864758Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=ZWM3Zjg0MDctNjdjYzNhNjEtMWRmMTY1NjctNGI4OTRhZQ==, TxId: 2025-06-25T15:05:48.864827Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=ZWM3Zjg0MDctNjdjYzNhNjEtMWRmMTY1NjctNGI4OTRhZQ==, TxId: 2025-06-25T15:05:48.865389Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-25T15:05:48.889236Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 5] 2025-06-25T15:05:48.889282Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-25T15:05:49.417049Z node 2 :STATISTICS DEBUG: service_impl.cpp:252: Event round 3 is different from the current 0 2025-06-25T15:05:49.417127Z node 2 :STATISTICS DEBUG: service_impl.cpp:379: Skip TEvDispatchKeepAlive 2025-06-25T15:05:50.025814Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 1, schemeshard count = 1 2025-06-25T15:05:50.025987Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-25T15:05:50.047349Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:626: [72075186224037894] ScheduleNextAnalyze 2025-06-25T15:05:50.047414Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:652: [72075186224037894] ScheduleNextAnalyze. All the force traversal tables sent the requests. OperationId=operationId 2025-06-25T15:05:50.047446Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:656: [72075186224037894] ScheduleNextAnalyze. All the force traversal operations sent the requests. 2025-06-25T15:05:51.198797Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-25T15:05:51.198927Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 5] is column table. 2025-06-25T15:05:51.198967Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:732: [72075186224037894] Start force traversal navigate for path [OwnerId: 72075186224037897, LocalPathId: 5] 2025-06-25T15:05:51.199554Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-25T15:05:51.212695Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-25T15:05:51.213015Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-25T15:05:51.213074Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-25T15:05:51.213413Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:58: [72075186224037894] TTxResponseTabletDistribution::Execute. Node count = 1 2025-06-25T15:05:51.226774Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:92: [72075186224037894] TTxResponseTabletDistribution::Complete 2025-06-25T15:05:51.226927Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 4, current Round: 0 2025-06-25T15:05:51.227440Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8259:5945], server id = [2:8260:5946], tablet id = 72075186224037900, status = OK 2025-06-25T15:05:51.227545Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8259:5945], path = { OwnerId: 72075186224037897 LocalId: 5 } 2025-06-25T15:05:51.228759Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037900 2025-06-25T15:05:51.228852Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-25T15:05:51.229036Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:27: [72075186224037894] TTxAggregateStatisticsResponse::Execute 2025-06-25T15:05:51.229175Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:118: [72075186224037894] TTxAggregateStatisticsResponse::Complete 2025-06-25T15:05:51.229403Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-25T15:05:51.231598Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8259:5945], server id = [2:8260:5946], tablet id = 72075186224037900 2025-06-25T15:05:51.231636Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-25T15:05:51.232274Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-06-25T15:05:51.252244Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=NDQ3ZTdmNzQtMWE0ODU0ZTAtZWZhZmQyMzUtOGJmZDMzNzM=, TxId: 2025-06-25T15:05:51.252296Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=NDQ3ZTdmNzQtMWE0ODU0ZTAtZWZhZmQyMzUtOGJmZDMzNzM=, TxId: 2025-06-25T15:05:51.252885Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-25T15:05:51.265798Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete force traversal for path [OwnerId: 72075186224037897, LocalPathId: 5] 2025-06-25T15:05:51.265858Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:50: [72075186224037894] TTxFinishTraversal::Complete. Send TEvAnalyzeResponse, OperationId=operationId, ActorId=[1:3809:3567] >> AnalyzeColumnshard::AnalyzeDeadline [GOOD] >> DataStreams::TestDeleteStream [GOOD] >> DataStreams::TestDeleteStreamWithEnforceFlag >> DataStreams::TestUpdateStorage [GOOD] >> DataStreams::TestStreamTimeRetention >> AnalyzeColumnshard::AnalyzeRebootSaInAggregate [GOOD] >> DataStreams::TestPutRecordsOfAnauthorizedUser [GOOD] >> DataStreams::TestPutRecordsWithRead ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest >> AnalyzeColumnshard::AnalyzeDeadline [GOOD] Test command err: 2025-06-25T15:03:25.181851Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:419:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T15:03:25.182249Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T15:03:25.182358Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001fea/r3tmp/tmpP4jnIK/pdisk_1.dat 2025-06-25T15:03:25.573361Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 63856, node 1 2025-06-25T15:03:25.812949Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:03:25.812995Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:03:25.813020Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:03:25.813435Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T15:03:25.815593Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:03:25.918942Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:03:25.919083Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:03:25.936060Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:6957 2025-06-25T15:03:26.481612Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-25T15:03:29.517456Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-25T15:03:29.552158Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:03:29.552280Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:03:29.591321Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T15:03:29.593807Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:03:29.813816Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:03:29.848918Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:29.849489Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:29.850101Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:29.850245Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:29.850437Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:29.850510Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:29.850591Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:29.850647Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:29.850698Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:30.049833Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:03:30.049963Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:03:30.063379Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:03:30.245985Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:03:30.301843Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-25T15:03:30.301951Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-25T15:03:30.354228Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-25T15:03:30.354409Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-25T15:03:30.354614Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-25T15:03:30.354678Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-25T15:03:30.354730Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-25T15:03:30.354780Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-25T15:03:30.354834Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-25T15:03:30.354884Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-25T15:03:30.355420Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-25T15:03:30.380104Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7949: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-25T15:03:30.380204Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7979: ConnectToSA(), pipe client id: [2:1793:2562], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-25T15:03:30.395969Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1808:2573] 2025-06-25T15:03:30.403453Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1849:2589] 2025-06-25T15:03:30.403752Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1849:2589], schemeshard id = 72075186224037897 2025-06-25T15:03:30.413638Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-25T15:03:30.436571Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-25T15:03:30.436652Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-25T15:03:30.436730Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-25T15:03:30.448736Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:03:30.456908Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-25T15:03:30.457038Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-25T15:03:30.663704Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-25T15:03:30.800211Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-25T15:03:30.863278Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-25T15:03:31.422384Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:03:31.670633Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2153:3026], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:03:31.670802Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:03:31.687169Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715659:0, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T15:03:31.810516Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2224:2794];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T15:03:31.810743Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2224:2794];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T15:03:31.811038Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2224:2794];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T15:03:31.811175Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2224:2794];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T15:03:31.811338Z node 2 :TX_COLUMNSHARD WARN: ... .394513Z node 1 :STATISTICS DEBUG: schemeshard_impl.cpp:7992: SendBaseStatsToSA(), no StatisticsAggregatorId, at schemeshard: 72057594046644480 2025-06-25T15:05:44.394570Z node 1 :STATISTICS DEBUG: schemeshard_impl.cpp:7919: Schedule next SendBaseStatsToSA in 30.000000s, at schemeshard: 72057594046644480 2025-06-25T15:05:45.925634Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:8076: SendBaseStatsToSA(), path count: 2, at schemeshard: 72075186224037897 2025-06-25T15:05:45.925698Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7919: Schedule next SendBaseStatsToSA in 201.000000s, at schemeshard: 72075186224037897 2025-06-25T15:05:45.925934Z node 2 :STATISTICS DEBUG: tx_schemeshard_stats.cpp:21: [72075186224037894] TTxSchemeShardStats::Execute: schemeshard id# 72075186224037897, stats size# 51 2025-06-25T15:05:45.938562Z node 2 :STATISTICS DEBUG: tx_schemeshard_stats.cpp:132: [72075186224037894] TTxSchemeShardStats::Complete 2025-06-25T15:05:45.970604Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-25T15:05:45.970672Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:680: [72075186224037894] ScheduleNextTraversal. All the force traversal tables sent the requests. OperationId=operationId 2025-06-25T15:05:45.970704Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:689: [72075186224037894] ScheduleNextTraversal. All the force traversal operations sent the requests. 2025-06-25T15:05:45.970744Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 3] is data table. 2025-06-25T15:05:45.970780Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:723: [72075186224037894] ScheduleNextTraversal. Skip traversal for datashard table [OwnerId: 72075186224037897, LocalPathId: 3] 2025-06-25T15:05:45.971052Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-25T15:05:45.973596Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DELETE FROM `.metadata/_statistics` WHERE owner_id = $owner_id AND local_path_id = $local_path_id; 2025-06-25T15:05:45.976166Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7157:5283], DatabaseId: /Root/Database, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:05:45.976241Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7167:5288], DatabaseId: /Root/Database, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:05:45.976294Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root/Database, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:05:45.983019Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976720658:2, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:05:46.018800Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7171:5291], DatabaseId: /Root/Database, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976720658 completed, doublechecking } 2025-06-25T15:05:46.228128Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7253:5335] txid# 281474976720659, issues: { message: "Check failed: path: \'/Root/Database/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72075186224037897, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:05:46.274355Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [2:7275:5349]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T15:05:46.274535Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-25T15:05:46.274595Z node 2 :STATISTICS DEBUG: service_impl.cpp:1219: ConnectToSA(), pipe client id = [2:7277:5351] 2025-06-25T15:05:46.274645Z node 2 :STATISTICS DEBUG: service_impl.cpp:1248: SyncNode(), pipe client id = [2:7277:5351] 2025-06-25T15:05:46.274903Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:7278:5352] 2025-06-25T15:05:46.275008Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:7277:5351], server id = [2:7278:5352], tablet id = 72075186224037894, status = OK 2025-06-25T15:05:46.275079Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:133: [72075186224037894] EvConnectNode, pipe server id = [2:7278:5352], node id = 2, have schemeshards count = 0, need schemeshards count = 1 2025-06-25T15:05:46.275126Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:314: [72075186224037894] SendStatisticsToNode(), node id = 2, schemeshard count = 1 2025-06-25T15:05:46.275206Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-25T15:05:46.275269Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 1, ReplyToActorId = [2:7275:5349], StatRequests.size() = 1 2025-06-25T15:05:46.365228Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=ODM0YzU0Y2YtMWRjODc0MDEtMjQ5MDIzYTctZjdkYzVmMjI=, TxId: 2025-06-25T15:05:46.365289Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=ODM0YzU0Y2YtMWRjODc0MDEtMjQ5MDIzYTctZjdkYzVmMjI=, TxId: 2025-06-25T15:05:46.365684Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-25T15:05:46.378604Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 3] 2025-06-25T15:05:46.378655Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-25T15:05:46.453383Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:217: [72075186224037894] EvFastPropagateCheck 2025-06-25T15:05:46.453454Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:357: [72075186224037894] PropagateFastStatistics(), node count = 0, schemeshard count = 0 2025-06-25T15:05:46.527819Z node 2 :STATISTICS DEBUG: service_impl.cpp:1189: EvRequestTimeout, pipe client id = [2:7277:5351], schemeshard count = 1 2025-06-25T15:05:47.499392Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:626: [72075186224037894] ScheduleNextAnalyze 2025-06-25T15:05:47.499467Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 4] is column table. 2025-06-25T15:05:47.501490Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-25T15:05:47.515238Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-25T15:05:47.515602Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-25T15:05:47.515647Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:51: [72075186224037894] TTxResolve::ExecuteAnalyze. Table OperationId operationId, PathId [OwnerId: 72075186224037897, LocalPathId: 4], AnalyzedShards 1 2025-06-25T15:05:47.528134Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-25T15:05:47.538922Z node 2 :STATISTICS DEBUG: tx_analyze_table_request.cpp:56: [72075186224037894] TTxAnalyzeTableRequest::Complete. Send 1 events. ... blocking NKikimr::NStat::TEvStatistics::TEvAnalyzeTableResponse from TX_COLUMNSHARD_ACTOR to STATISTICS_AGGREGATOR cookie 0 ... waiting for TEvAnalyzeTableResponse (done) 2025-06-25T15:05:47.878649Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-25T15:05:47.878705Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:680: [72075186224037894] ScheduleNextTraversal. All the force traversal tables sent the requests. OperationId=operationId 2025-06-25T15:05:47.878736Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:689: [72075186224037894] ScheduleNextTraversal. All the force traversal operations sent the requests. 2025-06-25T15:05:47.878771Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 4] is column table. 2025-06-25T15:05:47.878815Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:732: [72075186224037894] Start schedule traversal navigate for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-25T15:05:47.881068Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-25T15:05:47.893907Z node 2 :STATISTICS ERROR: tx_analyze_deadline.cpp:28: [72075186224037894] Delete long analyze operation, OperationId=operationId 2025-06-25T15:05:48.266109Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 1, schemeshard count = 1 2025-06-25T15:05:48.266259Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-25T15:05:50.088806Z node 1 :STATISTICS DEBUG: schemeshard_impl.cpp:7949: ResolveSA(), StatisticsAggregatorId=18446744073709551615, at schemeshard: 72057594046644480 2025-06-25T15:05:50.088874Z node 1 :STATISTICS DEBUG: schemeshard_impl.cpp:7961: ConnectToSA(), no StatisticsAggregatorId, at schemeshard: 72057594046644480 2025-06-25T15:05:50.088916Z node 1 :STATISTICS DEBUG: schemeshard_impl.cpp:7992: SendBaseStatsToSA(), no StatisticsAggregatorId, at schemeshard: 72057594046644480 2025-06-25T15:05:50.088955Z node 1 :STATISTICS DEBUG: schemeshard_impl.cpp:7919: Schedule next SendBaseStatsToSA in 30.000000s, at schemeshard: 72057594046644480 2025-06-25T15:05:52.189763Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:8076: SendBaseStatsToSA(), path count: 2, at schemeshard: 72075186224037897 2025-06-25T15:05:52.189842Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7919: Schedule next SendBaseStatsToSA in 204.000000s, at schemeshard: 72075186224037897 2025-06-25T15:05:52.190007Z node 2 :STATISTICS DEBUG: tx_schemeshard_stats.cpp:21: [72075186224037894] TTxSchemeShardStats::Execute: schemeshard id# 72075186224037897, stats size# 51 2025-06-25T15:05:52.321848Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-25T15:05:52.322085Z node 2 :STATISTICS DEBUG: tx_analyze_deadline.cpp:46: [72075186224037894] TTxAnalyzeDeadline::Complete. Send TEvAnalyzeResponse for deleted operation, OperationId=operationId, ActorId=[1:3050:3301] 2025-06-25T15:05:52.322140Z node 2 :STATISTICS DEBUG: tx_schemeshard_stats.cpp:132: [72075186224037894] TTxSchemeShardStats::Complete 2025-06-25T15:05:52.322470Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-25T15:05:52.322522Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-25T15:05:52.323151Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:58: [72075186224037894] TTxResponseTabletDistribution::Execute. Node count = 1 >> DataStreams::TestGetRecordsStreamWithSingleShard [GOOD] >> DataStreams::TestGetRecords1MBMessagesOneByOneByTS >> DataStreams::TestUpdateStream >> DataStreams::TestControlPlaneAndMeteringData [GOOD] >> DataStreams::ChangeBetweenRetentionModes ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest >> AnalyzeColumnshard::AnalyzeRebootSaInAggregate [GOOD] Test command err: 2025-06-25T15:03:18.335255Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:419:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T15:03:18.335596Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T15:03:18.335717Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001ff4/r3tmp/tmp8gE4rS/pdisk_1.dat 2025-06-25T15:03:18.696151Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 63662, node 1 2025-06-25T15:03:18.982913Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:03:18.982968Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:03:18.983000Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:03:18.983587Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T15:03:18.985570Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:03:19.081599Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:03:19.081723Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:03:19.105789Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:15117 2025-06-25T15:03:19.657565Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-25T15:03:22.756456Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-25T15:03:22.793454Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:03:22.793567Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:03:22.845940Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T15:03:22.849398Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:03:23.085948Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:03:23.125836Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:23.126503Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:23.127048Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:23.127226Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:23.127488Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:23.127607Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:23.127708Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:23.127790Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:23.127865Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:23.328008Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:03:23.328140Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:03:23.341482Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:03:23.516086Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:03:23.567276Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-25T15:03:23.567403Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-25T15:03:23.626628Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-25T15:03:23.626828Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-25T15:03:23.627039Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-25T15:03:23.627098Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-25T15:03:23.627151Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-25T15:03:23.627234Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-25T15:03:23.627306Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-25T15:03:23.627359Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-25T15:03:23.627883Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-25T15:03:23.650513Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7949: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-25T15:03:23.650620Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7979: ConnectToSA(), pipe client id: [2:1793:2562], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-25T15:03:23.660346Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1808:2573] 2025-06-25T15:03:23.666745Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1849:2589] 2025-06-25T15:03:23.666999Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1849:2589], schemeshard id = 72075186224037897 2025-06-25T15:03:23.676121Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-25T15:03:23.695181Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-25T15:03:23.695246Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-25T15:03:23.695328Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-25T15:03:23.707932Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:03:23.716832Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-25T15:03:23.716969Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-25T15:03:23.941253Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-25T15:03:24.069167Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-25T15:03:24.129719Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-25T15:03:24.722405Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:03:24.978135Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2153:3026], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:03:24.978297Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:03:25.001008Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715659:0, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T15:03:25.295334Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2293:2831];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T15:03:25.295560Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2293:2831];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T15:03:25.295854Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2293:2831];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T15:03:25.296011Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2293:2831];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T15:03:25.296154Z node 2 :TX_COLUMNSHARD WARN: ... 5186224037894] ScheduleNextAnalyze. All the force traversal tables sent the requests. OperationId=operationId 2025-06-25T15:05:52.008759Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:656: [72075186224037894] ScheduleNextAnalyze. All the force traversal operations sent the requests. 2025-06-25T15:05:53.052478Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-25T15:05:53.052593Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 4] is column table. 2025-06-25T15:05:53.052635Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:732: [72075186224037894] Start force traversal navigate for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-25T15:05:53.053178Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-25T15:05:53.066498Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-25T15:05:53.066933Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-25T15:05:53.067006Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-25T15:05:53.067448Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:58: [72075186224037894] TTxResponseTabletDistribution::Execute. Node count = 1 2025-06-25T15:05:53.080246Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:92: [72075186224037894] TTxResponseTabletDistribution::Complete 2025-06-25T15:05:53.080431Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 4, current Round: 0 2025-06-25T15:05:53.081098Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:10438:7798], server id = [2:10443:7803], tablet id = 72075186224037899, status = OK 2025-06-25T15:05:53.081199Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:10438:7798], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-25T15:05:53.081352Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:10439:7799], server id = [2:10444:7804], tablet id = 72075186224037900, status = OK 2025-06-25T15:05:53.081382Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:10439:7799], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-25T15:05:53.082099Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:10440:7800], server id = [2:10445:7805], tablet id = 72075186224037901, status = OK 2025-06-25T15:05:53.082144Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:10440:7800], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-25T15:05:53.082780Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:10441:7801], server id = [2:10446:7806], tablet id = 72075186224037902, status = OK 2025-06-25T15:05:53.082820Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:10441:7801], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-25T15:05:53.083487Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:10442:7802], server id = [2:10448:7808], tablet id = 72075186224037903, status = OK 2025-06-25T15:05:53.083534Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:10442:7802], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-25T15:05:53.083953Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037899 2025-06-25T15:05:53.085038Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:10438:7798], server id = [2:10443:7803], tablet id = 72075186224037899 2025-06-25T15:05:53.085088Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-25T15:05:53.085651Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037900 2025-06-25T15:05:53.086072Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037901 2025-06-25T15:05:53.086731Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:10439:7799], server id = [2:10444:7804], tablet id = 72075186224037900 2025-06-25T15:05:53.086765Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-25T15:05:53.087122Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:10440:7800], server id = [2:10445:7805], tablet id = 72075186224037901 2025-06-25T15:05:53.087151Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-25T15:05:53.087531Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:10451:7811], server id = [2:10455:7815], tablet id = 72075186224037904, status = OK 2025-06-25T15:05:53.087604Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:10451:7811], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-25T15:05:53.087941Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037902 2025-06-25T15:05:53.088225Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037903 2025-06-25T15:05:53.088978Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:10454:7814], server id = [2:10457:7817], tablet id = 72075186224037905, status = OK 2025-06-25T15:05:53.089051Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:10454:7814], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-25T15:05:53.089819Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:10456:7816], server id = [2:10458:7818], tablet id = 72075186224037906, status = OK 2025-06-25T15:05:53.089856Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:10456:7816], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-25T15:05:53.090101Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:10441:7801], server id = [2:10446:7806], tablet id = 72075186224037902 2025-06-25T15:05:53.090118Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-25T15:05:53.090522Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:10442:7802], server id = [2:10448:7808], tablet id = 72075186224037903 2025-06-25T15:05:53.090539Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-25T15:05:53.090582Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:10459:7819], server id = [2:10462:7822], tablet id = 72075186224037907, status = OK 2025-06-25T15:05:53.090613Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:10459:7819], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-25T15:05:53.090908Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037904 2025-06-25T15:05:53.091309Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:10460:7820], server id = [2:10464:7824], tablet id = 72075186224037908, status = OK 2025-06-25T15:05:53.091346Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:10460:7820], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-25T15:05:53.091590Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037905 2025-06-25T15:05:53.091980Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:10451:7811], server id = [2:10455:7815], tablet id = 72075186224037904 2025-06-25T15:05:53.091998Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-25T15:05:53.092211Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037906 2025-06-25T15:05:53.092359Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:10454:7814], server id = [2:10457:7817], tablet id = 72075186224037905 2025-06-25T15:05:53.092376Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-25T15:05:53.092657Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037907 2025-06-25T15:05:53.092800Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:10456:7816], server id = [2:10458:7818], tablet id = 72075186224037906 2025-06-25T15:05:53.092838Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-25T15:05:53.092928Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037908 2025-06-25T15:05:53.092967Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-25T15:05:53.093208Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:27: [72075186224037894] TTxAggregateStatisticsResponse::Execute 2025-06-25T15:05:53.093380Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:118: [72075186224037894] TTxAggregateStatisticsResponse::Complete 2025-06-25T15:05:53.093595Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-25T15:05:53.095935Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:10459:7819], server id = [2:10462:7822], tablet id = 72075186224037907 2025-06-25T15:05:53.095959Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-25T15:05:53.096284Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:10460:7820], server id = [2:10464:7824], tablet id = 72075186224037908 2025-06-25T15:05:53.096300Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-25T15:05:53.096551Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-06-25T15:05:53.116596Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=ZWIzNjg2MzYtMTU2ZTgxNzAtODliZjUyZDctMTQ5OGIzY2E=, TxId: 2025-06-25T15:05:53.116659Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=ZWIzNjg2MzYtMTU2ZTgxNzAtODliZjUyZDctMTQ5OGIzY2E=, TxId: 2025-06-25T15:05:53.117151Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-25T15:05:53.130177Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete force traversal for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-25T15:05:53.130245Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:50: [72075186224037894] TTxFinishTraversal::Complete. Send TEvAnalyzeResponse, OperationId=operationId, ActorId=[1:5739:3821] >> DataStreams::TestStreamStorageRetention >> DataStreams::TestDeleteStreamWithEnforceFlag [GOOD] >> DataStreams::TestDeleteStreamWithEnforceFlagFalse >> DataStreams::TestPutRecordsWithRead [GOOD] >> DataStreams::TestPutRecordsCornerCases >> DataStreams::TestNonChargeableUser >> DataStreams::TestUpdateStream [GOOD] >> DataStreams::Test_AutoPartitioning_Describe >> DataStreams::ChangeBetweenRetentionModes [GOOD] >> DataStreams::TestCreateExistingStream >> DataStreams::TestStreamStorageRetention [GOOD] >> DataStreams::TestStreamPagination >> DataStreams::TestReservedResourcesMetering [GOOD] >> DataStreams::TestReservedStorageMetering >> AnalyzeColumnshard::AnalyzeRebootSaBeforeReqDistribution [GOOD] >> DataStreams::TestDeleteStreamWithEnforceFlagFalse [GOOD] >> DataStreams::TestGetRecords1MBMessagesOneByOneBySeqNo >> DataStreams::TestNonChargeableUser [GOOD] >> DataStreams::TestPutEmptyMessage ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest >> AnalyzeColumnshard::AnalyzeRebootSaBeforeReqDistribution [GOOD] Test command err: 2025-06-25T15:03:26.417333Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:419:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T15:03:26.417696Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T15:03:26.417830Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001fd0/r3tmp/tmpPFJfLC/pdisk_1.dat 2025-06-25T15:03:26.730713Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 28239, node 1 2025-06-25T15:03:26.966845Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:03:26.966894Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:03:26.966936Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:03:26.967486Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T15:03:26.973277Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:03:27.065328Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:03:27.065473Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:03:27.079859Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:23186 2025-06-25T15:03:27.646252Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-25T15:03:30.787610Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-25T15:03:30.823853Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:03:30.823986Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:03:30.865817Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T15:03:30.873258Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:03:31.092071Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:03:31.128740Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:31.129419Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:31.130000Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:31.130168Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:31.130409Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:31.130507Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:31.130619Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:31.130699Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:31.130768Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:31.345709Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:03:31.345848Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:03:31.383076Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:03:31.551680Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:03:31.604467Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-25T15:03:31.604556Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-25T15:03:31.635083Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-25T15:03:31.636518Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-25T15:03:31.636726Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-25T15:03:31.636783Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-25T15:03:31.636833Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-25T15:03:31.636896Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-25T15:03:31.636953Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-25T15:03:31.637004Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-25T15:03:31.637983Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-25T15:03:31.671068Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7949: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-25T15:03:31.671157Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7979: ConnectToSA(), pipe client id: [2:1793:2562], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-25T15:03:31.678451Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1805:2572] 2025-06-25T15:03:31.683249Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1825:2582] 2025-06-25T15:03:31.683740Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1825:2582], schemeshard id = 72075186224037897 2025-06-25T15:03:31.692797Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-25T15:03:31.712637Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-25T15:03:31.712697Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-25T15:03:31.712769Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-25T15:03:31.725389Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:03:31.740621Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-25T15:03:31.740743Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-25T15:03:31.930721Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-25T15:03:32.153605Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-25T15:03:32.203523Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-25T15:03:32.910942Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:03:33.140797Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2148:3024], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:03:33.140924Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:03:33.156806Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715659:0, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T15:03:33.301156Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2220:2791];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T15:03:33.301332Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2220:2791];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T15:03:33.301581Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2220:2791];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T15:03:33.301672Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2220:2791];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T15:03:33.301747Z node 2 :TX_COLUMNSHARD WARN: ... :216: [72075186224037894] Loaded ForceTraversalOperations: table count# 1 2025-06-25T15:05:55.876968Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 1 2025-06-25T15:05:55.877026Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-25T15:05:55.877149Z node 2 :STATISTICS DEBUG: tx_init.cpp:295: [72075186224037894] TTxInit::Complete. Start navigate. PathId [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-25T15:05:55.877990Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-25T15:05:55.878050Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-25T15:05:55.878349Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-25T15:05:55.878404Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-25T15:05:55.878941Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-25T15:05:55.878996Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-25T15:05:55.880763Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:58: [72075186224037894] TTxResponseTabletDistribution::Execute. Node count = 1 2025-06-25T15:05:55.944274Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:92: [72075186224037894] TTxResponseTabletDistribution::Complete 2025-06-25T15:05:55.944545Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 2, current Round: 0 2025-06-25T15:05:55.945249Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:7491:5466], server id = [2:7492:5467], tablet id = 72075186224037899, status = OK 2025-06-25T15:05:55.945340Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:7491:5466], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-25T15:05:55.948373Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037899 2025-06-25T15:05:55.948507Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-25T15:05:55.948732Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:27: [72075186224037894] TTxAggregateStatisticsResponse::Execute 2025-06-25T15:05:55.948969Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:118: [72075186224037894] TTxAggregateStatisticsResponse::Complete 2025-06-25T15:05:55.949199Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-25T15:05:55.951679Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:7491:5466], server id = [2:7492:5467], tablet id = 72075186224037899 2025-06-25T15:05:55.951713Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-25T15:05:55.952122Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-06-25T15:05:55.982325Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [2:7512:5486]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T15:05:55.982508Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-25T15:05:55.982552Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 2, ReplyToActorId = [2:7512:5486], StatRequests.size() = 1 2025-06-25T15:05:56.093467Z node 2 :SYSTEM_VIEWS WARN: tx_interval_summary.cpp:212: [72075186224037891] TEvIntervalQuerySummary, time mismath: node id# 2, interval end# 1970-01-01T00:02:04.000000Z, event interval end# 2025-06-25T15:05:54.000000Z 2025-06-25T15:05:56.093978Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=NTI4YjE2YzQtY2M0MGZiNDktMjdkOTg1OTAtY2QwZDhhYjY=, TxId: 2025-06-25T15:05:56.094026Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=NTI4YjE2YzQtY2M0MGZiNDktMjdkOTg1OTAtY2QwZDhhYjY=, TxId: 2025-06-25T15:05:56.094465Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-25T15:05:56.107134Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:7527:5492] 2025-06-25T15:05:56.107373Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:7527:5492], schemeshard id = 72075186224037897 2025-06-25T15:05:56.107518Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:7444:5435], server id = [2:7528:5493], tablet id = 72075186224037894, status = OK 2025-06-25T15:05:56.107607Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:7528:5493] 2025-06-25T15:05:56.107694Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:133: [72075186224037894] EvConnectNode, pipe server id = [2:7528:5493], node id = 2, have schemeshards count = 1, need schemeshards count = 0 2025-06-25T15:05:56.121440Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-25T15:05:56.121516Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-25T15:05:56.209749Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:7539:5496] 2025-06-25T15:05:56.210387Z node 2 :STATISTICS DEBUG: tx_analyze.cpp:22: [72075186224037894] TTxAnalyze::Execute. ReplyToActorId [1:3044:3299] , Record { OperationId: "operationId" Tables { PathId { OwnerId: 72075186224037897 LocalId: 4 } } Types: TYPE_COUNT_MIN_SKETCH } 2025-06-25T15:05:56.210445Z node 2 :STATISTICS DEBUG: tx_analyze.cpp:38: [72075186224037894] TTxAnalyze::Execute. Update existing force traversal. OperationId operationId , ReplyToActorId [1:3044:3299] 2025-06-25T15:05:56.210508Z node 2 :STATISTICS DEBUG: tx_analyze.cpp:97: [72075186224037894] TTxAnalyze::Complete 2025-06-25T15:05:56.715922Z node 2 :STATISTICS DEBUG: service_impl.cpp:252: Event round 2 is different from the current 0 2025-06-25T15:05:56.715994Z node 2 :STATISTICS DEBUG: service_impl.cpp:379: Skip TEvDispatchKeepAlive 2025-06-25T15:05:57.474592Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:626: [72075186224037894] ScheduleNextAnalyze 2025-06-25T15:05:57.474692Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:652: [72075186224037894] ScheduleNextAnalyze. All the force traversal tables sent the requests. OperationId=operationId 2025-06-25T15:05:57.474731Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:656: [72075186224037894] ScheduleNextAnalyze. All the force traversal operations sent the requests. 2025-06-25T15:05:58.754719Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-25T15:05:58.754831Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 4] is column table. 2025-06-25T15:05:58.754869Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:732: [72075186224037894] Start force traversal navigate for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-25T15:05:58.755427Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-25T15:05:58.767903Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-25T15:05:58.768199Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-25T15:05:58.768255Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-25T15:05:58.768609Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:58: [72075186224037894] TTxResponseTabletDistribution::Execute. Node count = 1 2025-06-25T15:05:58.781220Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:92: [72075186224037894] TTxResponseTabletDistribution::Complete 2025-06-25T15:05:58.781390Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 3, current Round: 0 2025-06-25T15:05:58.781896Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:7611:5539], server id = [2:7612:5540], tablet id = 72075186224037899, status = OK 2025-06-25T15:05:58.781996Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:7611:5539], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-25T15:05:58.783108Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037899 2025-06-25T15:05:58.783215Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-25T15:05:58.783353Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:27: [72075186224037894] TTxAggregateStatisticsResponse::Execute 2025-06-25T15:05:58.783527Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:118: [72075186224037894] TTxAggregateStatisticsResponse::Complete 2025-06-25T15:05:58.783753Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-25T15:05:58.786185Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:7611:5539], server id = [2:7612:5540], tablet id = 72075186224037899 2025-06-25T15:05:58.786226Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-25T15:05:58.786656Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-06-25T15:05:58.806190Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=MzA0MTE0MGItYzVmNjUxOTAtOTZlMjlhYjQtZmE4ZTgwMDY=, TxId: 2025-06-25T15:05:58.806251Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=MzA0MTE0MGItYzVmNjUxOTAtOTZlMjlhYjQtZmE4ZTgwMDY=, TxId: 2025-06-25T15:05:58.806631Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-25T15:05:58.840705Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete force traversal for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-25T15:05:58.840761Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:50: [72075186224037894] TTxFinishTraversal::Complete. Send TEvAnalyzeResponse, OperationId=operationId, ActorId=[1:3044:3299] >> TraverseColumnShard::TraverseColumnTableRebootColumnshard [GOOD] >> DataStreams::TestCreateExistingStream [GOOD] >> DataStreams::ListStreamsValidation >> DataStreams::TestStreamTimeRetention [GOOD] >> DataStreams::TestUnsupported >> DataStreams::Test_AutoPartitioning_Describe [GOOD] >> DataStreams::Test_Crreate_AutoPartitioning_Disabled ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest >> TraverseColumnShard::TraverseColumnTableRebootColumnshard [GOOD] Test command err: 2025-06-25T15:03:39.587410Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:419:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T15:03:39.587695Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T15:03:39.587792Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001fa2/r3tmp/tmpjxTUO3/pdisk_1.dat 2025-06-25T15:03:39.960775Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 61722, node 1 2025-06-25T15:03:40.238436Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:03:40.238489Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:03:40.238517Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:03:40.239058Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T15:03:40.241328Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:03:40.342713Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:03:40.342869Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:03:40.359140Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:9788 2025-06-25T15:03:40.931229Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-25T15:03:44.066845Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-25T15:03:44.107306Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:03:44.107438Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:03:44.147127Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T15:03:44.149853Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:03:44.382549Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:03:44.417982Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:44.418593Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:44.419204Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:44.419400Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:44.419634Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:44.419750Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:44.419852Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:44.419937Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:44.420009Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:44.612932Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:03:44.613037Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:03:44.626991Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:03:44.787964Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:03:44.844022Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-25T15:03:44.844151Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-25T15:03:44.881640Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-25T15:03:44.881851Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-25T15:03:44.882066Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-25T15:03:44.882125Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-25T15:03:44.882187Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-25T15:03:44.882263Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-25T15:03:44.882319Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-25T15:03:44.882379Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-25T15:03:44.882933Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-25T15:03:44.907697Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7949: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-25T15:03:44.907816Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7979: ConnectToSA(), pipe client id: [2:1793:2562], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-25T15:03:44.918055Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1808:2573] 2025-06-25T15:03:44.927467Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1849:2589] 2025-06-25T15:03:44.927743Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1849:2589], schemeshard id = 72075186224037897 2025-06-25T15:03:44.938006Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-25T15:03:44.958267Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-25T15:03:44.958329Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-25T15:03:44.958407Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-25T15:03:44.971330Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:03:44.979903Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-25T15:03:44.980071Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-25T15:03:45.213593Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-25T15:03:45.362903Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-25T15:03:45.421257Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-25T15:03:46.009800Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:03:46.260978Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2153:3026], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:03:46.261144Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:03:46.276677Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715659:0, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T15:03:46.428148Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2245:2806];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T15:03:46.428467Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2245:2806];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T15:03:46.428806Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2245:2806];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T15:03:46.428925Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2245:2806];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T15:03:46.429001Z node 2 :TX_COLUMNSHARD WARN: ... DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [2:8254:6108]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T15:05:57.462989Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-25T15:05:57.463058Z node 2 :STATISTICS DEBUG: service_impl.cpp:1219: ConnectToSA(), pipe client id = [2:8256:6110] 2025-06-25T15:05:57.463120Z node 2 :STATISTICS DEBUG: service_impl.cpp:1248: SyncNode(), pipe client id = [2:8256:6110] 2025-06-25T15:05:57.463526Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8256:6110], server id = [2:8257:6111], tablet id = 72075186224037894, status = OK 2025-06-25T15:05:57.463591Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:8257:6111] 2025-06-25T15:05:57.463672Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:133: [72075186224037894] EvConnectNode, pipe server id = [2:8257:6111], node id = 2, have schemeshards count = 0, need schemeshards count = 1 2025-06-25T15:05:57.463732Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:314: [72075186224037894] SendStatisticsToNode(), node id = 2, schemeshard count = 1 2025-06-25T15:05:57.463867Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-25T15:05:57.463953Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 1, ReplyToActorId = [2:8254:6108], StatRequests.size() = 1 2025-06-25T15:05:57.571110Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=ZDhhMmQ1NTEtMTE2ODI2NDUtODUwNTRjNjEtYjJiNmJhNzQ=, TxId: 2025-06-25T15:05:57.571179Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=ZDhhMmQ1NTEtMTE2ODI2NDUtODUwNTRjNjEtYjJiNmJhNzQ=, TxId: 2025-06-25T15:05:57.571618Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-25T15:05:57.585260Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 3] 2025-06-25T15:05:57.585334Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-25T15:05:57.628412Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:217: [72075186224037894] EvFastPropagateCheck 2025-06-25T15:05:57.628479Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:357: [72075186224037894] PropagateFastStatistics(), node count = 0, schemeshard count = 0 2025-06-25T15:05:57.702369Z node 2 :STATISTICS DEBUG: service_impl.cpp:1189: EvRequestTimeout, pipe client id = [2:8256:6110], schemeshard count = 1 2025-06-25T15:05:59.733479Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-25T15:05:59.733536Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-25T15:05:59.733572Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 4] is column table. 2025-06-25T15:05:59.733611Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:732: [72075186224037894] Start schedule traversal navigate for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-25T15:05:59.736843Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-25T15:05:59.752602Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-25T15:05:59.753159Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-25T15:05:59.753236Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-25T15:05:59.754092Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:58: [72075186224037894] TTxResponseTabletDistribution::Execute. Node count = 1 2025-06-25T15:05:59.767084Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:92: [72075186224037894] TTxResponseTabletDistribution::Complete 2025-06-25T15:05:59.767264Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 2, current Round: 0 2025-06-25T15:05:59.768060Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8373:6170], server id = [2:8377:6174], tablet id = 72075186224037899, status = OK 2025-06-25T15:05:59.768532Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8373:6170], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-25T15:05:59.768845Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8374:6171], server id = [2:8378:6175], tablet id = 72075186224037900, status = OK 2025-06-25T15:05:59.768899Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8374:6171], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-25T15:05:59.769047Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8375:6172], server id = [2:8379:6176], tablet id = 72075186224037901, status = OK 2025-06-25T15:05:59.769093Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8375:6172], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-25T15:05:59.770924Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8376:6173], server id = [2:8380:6177], tablet id = 72075186224037902, status = OK 2025-06-25T15:05:59.770980Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8376:6173], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-25T15:05:59.776410Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037899 2025-06-25T15:05:59.776880Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037900 2025-06-25T15:05:59.777256Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8373:6170], server id = [2:8377:6174], tablet id = 72075186224037899 2025-06-25T15:05:59.777296Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-25T15:05:59.777800Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8374:6171], server id = [2:8378:6175], tablet id = 72075186224037900 2025-06-25T15:05:59.777829Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-25T15:05:59.777935Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037901 2025-06-25T15:05:59.778164Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8375:6172], server id = [2:8379:6176], tablet id = 72075186224037901 2025-06-25T15:05:59.778192Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-25T15:05:59.778485Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037902 2025-06-25T15:05:59.778530Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-25T15:05:59.778681Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:27: [72075186224037894] TTxAggregateStatisticsResponse::Execute 2025-06-25T15:05:59.778849Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:118: [72075186224037894] TTxAggregateStatisticsResponse::Complete 2025-06-25T15:05:59.779158Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-25T15:05:59.781285Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8376:6173], server id = [2:8380:6177], tablet id = 72075186224037902 2025-06-25T15:05:59.781317Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-25T15:05:59.781816Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-06-25T15:05:59.816198Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [2:8409:6202]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T15:05:59.816485Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-25T15:05:59.816531Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 2, ReplyToActorId = [2:8409:6202], StatRequests.size() = 1 2025-06-25T15:05:59.944271Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=Yzk0YjUxM2QtMjgxYmUzMTItOTVmNmY2NGUtZGMzNGNlNzA=, TxId: 2025-06-25T15:05:59.944354Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=Yzk0YjUxM2QtMjgxYmUzMTItOTVmNmY2NGUtZGMzNGNlNzA=, TxId: ... waiting for NKikimr::NStat::TEvStatistics::TEvSaveStatisticsQueryResponse (done) 2025-06-25T15:05:59.945042Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-25T15:05:59.946280Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2245:2806];ev=NActors::IEventHandle;fline=columnshard_impl.cpp:864;event=tablet_die; 2025-06-25T15:05:59.977120Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-25T15:05:59.977190Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-25T15:06:00.062244Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:8429:6212];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=18; 2025-06-25T15:06:00.303161Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 3 ], ReplyToActorId[ [2:8540:6306]], StatType[ 2 ], StatRequestsCount[ 1 ] 2025-06-25T15:06:00.303541Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] 2025-06-25T15:06:00.303597Z node 2 :STATISTICS DEBUG: service_impl.cpp:812: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] resolve DatabasePath[ [OwnerId: 72057594046644480, LocalPathId: 2] ] 2025-06-25T15:06:00.307041Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] 2025-06-25T15:06:00.307110Z node 2 :STATISTICS DEBUG: service_impl.cpp:715: [TStatService::QueryStatistics] RequestId[ 3 ], Database[ Root/Database ], TablePath[ /Root/Database/.metadata/_statistics ] 2025-06-25T15:06:00.307162Z node 2 :STATISTICS DEBUG: service_impl.cpp:656: [TStatService::LoadStatistics] QueryId[ 1 ], PathId[ [OwnerId: 72075186224037897, LocalPathId: 4] ], StatType[ 2 ], ColumnTag[ 1 ] 2025-06-25T15:06:00.311375Z node 2 :STATISTICS DEBUG: service_impl.cpp:1152: TEvLoadStatisticsQueryResponse, request id = 3 >>> failedEstimatesCount = 0 >> DataStreams::TestStreamPagination [GOOD] >> DataStreams::TestShardPagination >> DataStreams::TestPutRecordsCornerCases [GOOD] >> DataStreams::TestPutRecords >> DataStreams::TestPutEmptyMessage [GOOD] >> DataStreams::TestListStreamConsumers >> TMessageBusServerPersQueueGetTopicMetadataMetaRequestTest::FailsOnBadRootStatusInGetNodeRequest >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::FailsOnZeroBalancerTabletIdInGetNodeRequest >> DataStreams::ListStreamsValidation [GOOD] >> TMessageBusServerPersQueueGetTopicMetadataMetaRequestTest::FailsOnBadRootStatusInGetNodeRequest [GOOD] >> TMessageBusServerPersQueueGetTopicMetadataMetaRequestTest::FailsOnBalancerDescribeResultFailureWhenTopicsAreGivenExplicitly >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::FailsOnZeroBalancerTabletIdInGetNodeRequest [GOOD] >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::HandlesPipeDisconnection_DisconnectionComesFirst >> TMessageBusServerPersQueueGetTopicMetadataMetaRequestTest::FailsOnBalancerDescribeResultFailureWhenTopicsAreGivenExplicitly [GOOD] >> TMessageBusServerPersQueueGetTopicMetadataMetaRequestTest::FailsOnEmptyTopicName >> DataStreams::Test_Crreate_AutoPartitioning_Disabled [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/services/datastreams/ut/unittest >> DataStreams::ListStreamsValidation [GOOD] Test command err: 2025-06-25T15:05:51.085068Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519903410826397821:2076];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:05:51.085167Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0016a0/r3tmp/tmplDf8Ja/pdisk_1.dat 2025-06-25T15:05:51.437234Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 26301, node 1 2025-06-25T15:05:51.492852Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:05:51.492936Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:05:51.495601Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:05:51.532402Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:05:51.532436Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:05:51.532447Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:05:51.532544Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19991 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:05:51.913943Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:05:51.982406Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T15:05:52.091180Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:19991 2025-06-25T15:05:52.102781Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... 2025-06-25T15:05:53.164892Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:05:53.323331Z node 1 :CHANGE_EXCHANGE WARN: change_sender_cdc_stream.cpp:398: [CdcChangeSenderMain][72075186224037890:1][1:7519903419416334002:2332] Failed entry at 'ResolveTopic': entry# { Path: TableId: [72057594046644480:6:0] RequestType: ByTableId Operation: OpTopic RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo } 2025-06-25T15:05:53.447976Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_pq.cpp:268) 2025-06-25T15:05:53.572453Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpDropPersQueueGroup, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_drop_pq.cpp:421) 2025-06-25T15:05:53.586773Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037900 not found 2025-06-25T15:05:53.586806Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037903 not found 2025-06-25T15:05:53.586820Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037908 not found 2025-06-25T15:05:53.586826Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037902 not found 2025-06-25T15:05:53.586832Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037899 not found 2025-06-25T15:05:53.586838Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037893 not found 2025-06-25T15:05:53.586844Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037905 not found 2025-06-25T15:05:53.586865Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037897 not found 2025-06-25T15:05:53.586876Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037904 not found 2025-06-25T15:05:53.586882Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037901 not found 2025-06-25T15:05:53.586888Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037896 not found 2025-06-25T15:05:53.586894Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037907 not found 2025-06-25T15:05:53.586900Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037906 not found 2025-06-25T15:05:53.586911Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037894 not found 2025-06-25T15:05:53.587938Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037898 not found 2025-06-25T15:05:53.587948Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037895 not found 2025-06-25T15:05:54.777703Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519903424234337506:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:05:54.777753Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0016a0/r3tmp/tmp5j4HNp/pdisk_1.dat 2025-06-25T15:05:54.870208Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:05:54.895804Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:05:54.895885Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:05:54.899573Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 22496, node 4 2025-06-25T15:05:54.929152Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:05:54.929174Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:05:54.929181Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:05:54.929308Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28862 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:05:55.092597Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:05:55.129954Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) TClient is connected to server localhost:28862 2025-06-25T15:05:55.257328Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... 2025-06-25T15:05:55.266172Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715659, at schemeshard: 72057594046644480 2025-06-25T15:05:55.423505Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_pq.cpp:268) 2025-06-25T15:05:55.476557Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_pq.cpp:268) 2025-06-25T15:05:55.532371Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_pq.cpp:268) 2025-06-25T15:05:58.041577Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519903441438251972:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:05:58.041633Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0016a0/r3tmp/tmpOGYIum/pdisk_1.dat 2025-06-25T15:05:58.149229Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:05:58.164214Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:05:58.164271Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:05:58.167833Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 21422, node 7 2025-06-25T15:05:58.199340Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:05:58.199357Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:05:58.199362Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:05:58.199481Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2698 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:05:58.398770Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:05:58.439496Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) TClient is connected to server localhost:2698 2025-06-25T15:05:58.629879Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... 2025-06-25T15:05:58.814622Z node 7 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [7:7519903441438254076:3474] txid# 281474976715661, issues: { message: "Check failed: path: \'/Root/stream_TestCreateExistingStream\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 2], type: EPathTypePersQueueGroup, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:06:01.548080Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7519903453389034668:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:06:01.548186Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0016a0/r3tmp/tmptM3L7A/pdisk_1.dat 2025-06-25T15:06:01.669634Z node 10 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:06:01.684931Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:06:01.685010Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:06:01.691888Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 27130, node 10 2025-06-25T15:06:01.727601Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:06:01.727629Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:06:01.727635Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:06:01.727804Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:12192 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:06:01.947108Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:06:01.996436Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) TClient is connected to server localhost:12192 2025-06-25T15:06:02.197575Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... >> DataStreams::TestUnsupported [GOOD] >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::HandlesPipeDisconnection_DisconnectionComesFirst [GOOD] >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::HandlesPipeDisconnection_DisconnectionComesSecond >> TMessageBusServerPersQueueGetTopicMetadataMetaRequestTest::FailsOnEmptyTopicName [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/server/ut/unittest >> TMessageBusServerPersQueueGetTopicMetadataMetaRequestTest::FailsOnEmptyTopicName [GOOD] Test command err: Assert failed: Check response: { Status: 128 ErrorReason: "path \'Root/PQ\' has unknown/invalid root prefix \'Root\', Marker# PQ14" ErrorCode: UNKNOWN_TOPIC } Assert failed: Check response: { Status: 128 ErrorReason: "topic \'Root/PQ\' describe error, Status# LookupError, Marker# PQ1" ErrorCode: ERROR } Assert failed: Check response: { Status: 128 ErrorReason: "empty topic in GetTopicMetadata request" ErrorCode: BAD_REQUEST } >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::HandlesPipeDisconnection_DisconnectionComesSecond [GOOD] >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::HandlesPipeDisconnection_AnswerDoesNotArrive >> DataStreams::TestShardPagination [GOOD] >> TColumnShardTestReadWrite::CompactionInGranule_PKUInt64 [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/services/datastreams/ut/unittest >> DataStreams::TestUnsupported [GOOD] Test command err: 2025-06-25T15:05:51.085075Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519903412423898952:2076];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:05:51.085224Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0016a8/r3tmp/tmpKgwKtQ/pdisk_1.dat 2025-06-25T15:05:51.375861Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 9722, node 1 2025-06-25T15:05:51.435931Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:05:51.436008Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:05:51.438147Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:05:51.532529Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:05:51.532552Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:05:51.532558Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:05:51.532692Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10281 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:05:51.956398Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:05:52.001395Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T15:05:52.092629Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:10281 2025-06-25T15:05:52.179210Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... 2025-06-25T15:05:52.326427Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_pq.cpp:268) 2025-06-25T15:05:54.146378Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519903424399557050:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:05:54.146468Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0016a8/r3tmp/tmpPnzEFq/pdisk_1.dat 2025-06-25T15:05:54.232646Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:05:54.249122Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:05:54.249196Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:05:54.251197Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 12657, node 4 2025-06-25T15:05:54.288715Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:05:54.288739Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:05:54.288747Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:05:54.288891Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4412 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:05:54.463668Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:05:54.507505Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) TClient is connected to server localhost:4412 2025-06-25T15:05:54.639183Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... 2025-06-25T15:05:54.782722Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_pq.cpp:268) 2025-06-25T15:05:54.810065Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_pq.cpp:268) encryption_type: NONE records { sequence_number: "0" shard_id: "shard-000000" } records { sequence_number: "1" shard_id: "shard-000000" } records { sequence_number: "2" shard_id: "shard-000000" } records { sequence_number: "3" shard_id: "shard-000000" } records { sequence_number: "4" shard_id: "shard-000000" } records { sequence_number: "5" shard_id: "shard-000000" } records { sequence_number: "6" shard_id: "shard-000000" } records { sequence_number: "7" shard_id: "shard-000000" } records { sequence_number: "8" shard_id: "shard-000000" } records { sequence_number: "9" shard_id: "shard-000000" } records { sequence_number: "10" shard_id: "shard-000000" } records { sequence_number: "11" shard_id: "shard-000000" } records { sequence_number: "12" shard_id: "shard-000000" } records { sequence_number: "13" shard_id: "shard-000000" } records { sequence_number: "14" shard_id: "shard-000000" } records { sequence_number: "15" shard_id: "shard-000000" } records { sequence_number: "16" shard_id: "shard-000000" } records { sequence_number: "17" shard_id: "shard-000000" } records { sequence_number: "18" shard_id: "shard-000000" } records { sequence_number: "19" shard_id: "shard-000000" } records { sequence_number: "20" shard_id: "shard-000000" } records { sequence_number: "21" shard_id: "shard-000000" } records { sequence_number: "22" shard_id: "shard-000000" } records { sequence_number: "23" shard_id: "shard-000000" } records { sequence_number: "24" shard_id: "shard-000000" } records { sequence_number: "25" shard_id: "shard-000000" } records { sequence_number: "26" shard_id: "shard-000000" } records { sequence_number: "27" shard_id: "shard-000000" } records { sequence_number: "28" shard_id: "shard-000000" } records { sequence_number: "29" shard_id: "shard-000000" } 2025-06-25T15:05:55.151121Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; encryption_type: NONE records { sequence_number: "30" shard_id: "shard-000000" } records { sequence_number: "31" shard_id: "shard-000000 ... "81" shard_id: "shard-000000" } records { sequence_number: "82" shard_id: "shard-000000" } records { sequence_number: "83" shard_id: "shard-000000" } records { sequence_number: "84" shard_id: "shard-000000" } records { sequence_number: "85" shard_id: "shard-000000" } records { sequence_number: "86" shard_id: "shard-000000" } records { sequence_number: "87" shard_id: "shard-000000" } records { sequence_number: "88" shard_id: "shard-000000" } records { sequence_number: "89" shard_id: "shard-000000" } encryption_type: NONE records { sequence_number: "90" shard_id: "shard-000000" } records { sequence_number: "91" shard_id: "shard-000000" } records { sequence_number: "92" shard_id: "shard-000000" } records { sequence_number: "93" shard_id: "shard-000000" } records { sequence_number: "94" shard_id: "shard-000000" } records { sequence_number: "95" shard_id: "shard-000000" } records { sequence_number: "96" shard_id: "shard-000000" } records { sequence_number: "97" shard_id: "shard-000000" } records { sequence_number: "98" shard_id: "shard-000000" } records { sequence_number: "99" shard_id: "shard-000000" } records { sequence_number: "100" shard_id: "shard-000000" } records { sequence_number: "101" shard_id: "shard-000000" } records { sequence_number: "102" shard_id: "shard-000000" } records { sequence_number: "103" shard_id: "shard-000000" } records { sequence_number: "104" shard_id: "shard-000000" } records { sequence_number: "105" shard_id: "shard-000000" } records { sequence_number: "106" shard_id: "shard-000000" } records { sequence_number: "107" shard_id: "shard-000000" } records { sequence_number: "108" shard_id: "shard-000000" } records { sequence_number: "109" shard_id: "shard-000000" } records { sequence_number: "110" shard_id: "shard-000000" } records { sequence_number: "111" shard_id: "shard-000000" } records { sequence_number: "112" shard_id: "shard-000000" } records { sequence_number: "113" shard_id: "shard-000000" } records { sequence_number: "114" shard_id: "shard-000000" } records { sequence_number: "115" shard_id: "shard-000000" } records { sequence_number: "116" shard_id: "shard-000000" } records { sequence_number: "117" shard_id: "shard-000000" } records { sequence_number: "118" shard_id: "shard-000000" } records { sequence_number: "119" shard_id: "shard-000000" } encryption_type: NONE records { sequence_number: "120" shard_id: "shard-000000" } records { sequence_number: "121" shard_id: "shard-000000" } records { sequence_number: "122" shard_id: "shard-000000" } records { sequence_number: "123" shard_id: "shard-000000" } records { sequence_number: "124" shard_id: "shard-000000" } records { sequence_number: "125" shard_id: "shard-000000" } records { sequence_number: "126" shard_id: "shard-000000" } records { sequence_number: "127" shard_id: "shard-000000" } records { sequence_number: "128" shard_id: "shard-000000" } records { sequence_number: "129" shard_id: "shard-000000" } records { sequence_number: "130" shard_id: "shard-000000" } records { sequence_number: "131" shard_id: "shard-000000" } records { sequence_number: "132" shard_id: "shard-000000" } records { sequence_number: "133" shard_id: "shard-000000" } records { sequence_number: "134" shard_id: "shard-000000" } records { sequence_number: "135" shard_id: "shard-000000" } records { sequence_number: "136" shard_id: "shard-000000" } records { sequence_number: "137" shard_id: "shard-000000" } records { sequence_number: "138" shard_id: "shard-000000" } records { sequence_number: "139" shard_id: "shard-000000" } records { sequence_number: "140" shard_id: "shard-000000" } records { sequence_number: "141" shard_id: "shard-000000" } records { sequence_number: "142" shard_id: "shard-000000" } records { sequence_number: "143" shard_id: "shard-000000" } records { sequence_number: "144" shard_id: "shard-000000" } records { sequence_number: "145" shard_id: "shard-000000" } records { sequence_number: "146" shard_id: "shard-000000" } records { sequence_number: "147" shard_id: "shard-000000" } records { sequence_number: "148" shard_id: "shard-000000" } records { sequence_number: "149" shard_id: "shard-000000" } 2025-06-25T15:05:59.146748Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519903424399557050:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:05:59.146815Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestStreamTimeRetention","id":"used_storage-root-72075186224037888-1750863954754-2","schema":"ydb.serverless.v1","tags":{"ydb_size":0},"usage":{"quantity":0,"unit":"byte*second","start":1750863954,"finish":1750863954},"labels":{"datastreams_stream_name":"stream_TestStreamTimeRetention","ydb_database":"root"},"version":"1.0.0","source_id":"72075186224037888","source_wt":1750863954}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestStreamTimeRetention","id":"used_storage-root-72075186224037888-1750863954796-3","schema":"ydb.serverless.v1","tags":{"ydb_size":0},"usage":{"quantity":0,"unit":"byte*second","start":1750863954,"finish":1750863954},"labels":{"datastreams_stream_name":"stream_TestStreamTimeRetention","ydb_database":"root"},"version":"1.0.0","source_id":"72075186224037888","source_wt":1750863954}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestStreamTimeRetention","id":"used_storage-root-72075186224037888-1750863954825-4","schema":"ydb.serverless.v1","tags":{"ydb_size":0},"usage":{"quantity":1,"unit":"byte*second","start":1750863954,"finish":1750863955},"labels":{"datastreams_stream_name":"stream_TestStreamTimeRetention","ydb_database":"root"},"version":"1.0.0","source_id":"72075186224037888","source_wt":1750863955}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestStreamTimeRetention","id":"used_storage-root-72075186224037888-1750863955846-5","schema":"ydb.serverless.v1","tags":{"ydb_size":0},"usage":{"quantity":1,"unit":"byte*second","start":1750863955,"finish":1750863956},"labels":{"datastreams_stream_name":"stream_TestStreamTimeRetention","ydb_database":"root"},"version":"1.0.0","source_id":"72075186224037888","source_wt":1750863956}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestStreamTimeRetention","id":"used_storage-root-72075186224037888-1750863956857-6","schema":"ydb.serverless.v1","tags":{"ydb_size":0},"usage":{"quantity":1,"unit":"byte*second","start":1750863956,"finish":1750863957},"labels":{"datastreams_stream_name":"stream_TestStreamTimeRetention","ydb_database":"root"},"version":"1.0.0","source_id":"72075186224037888","source_wt":1750863957}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestStreamTimeRetention","id":"used_storage-root-72075186224037888-1750863957867-7","schema":"ydb.serverless.v1","tags":{"ydb_size":0},"usage":{"quantity":1,"unit":"byte*second","start":1750863957,"finish":1750863958},"labels":{"datastreams_stream_name":"stream_TestStreamTimeRetention","ydb_database":"root"},"version":"1.0.0","source_id":"72075186224037888","source_wt":1750863958}' 2025-06-25T15:06:02.253591Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519903460545208864:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:06:02.253659Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0016a8/r3tmp/tmpe0s7PE/pdisk_1.dat 2025-06-25T15:06:02.390166Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:06:02.406341Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:06:02.406428Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:06:02.412869Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 15116, node 7 2025-06-25T15:06:02.460463Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:06:02.460486Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:06:02.460517Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:06:02.460669Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14633 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:06:02.723992Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:06:02.786109Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) TClient is connected to server localhost:14633 2025-06-25T15:06:02.930234Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... ------- [TM] {asan, default-linux-x86_64, release} ydb/services/datastreams/ut/unittest >> DataStreams::Test_Crreate_AutoPartitioning_Disabled [GOOD] Test command err: 2025-06-25T15:05:54.761923Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519903424742802410:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:05:54.761994Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00169a/r3tmp/tmpiKzYtH/pdisk_1.dat 2025-06-25T15:05:55.026762Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 7400, node 1 2025-06-25T15:05:55.060365Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:05:55.060394Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:05:55.060403Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:05:55.060523Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T15:05:55.105826Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:05:55.105916Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:05:55.108340Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:2755 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:05:55.302863Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:05:55.367662Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) TClient is connected to server localhost:2755 2025-06-25T15:05:55.537078Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... 2025-06-25T15:05:55.747640Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_pq.cpp:268) 2025-06-25T15:05:55.768723Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:05:55.896960Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_pq.cpp:268) 2025-06-25T15:05:58.007971Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519903439541243934:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:05:58.008036Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00169a/r3tmp/tmp7dSEhR/pdisk_1.dat 2025-06-25T15:05:58.107065Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:05:58.115100Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:05:58.115154Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:05:58.116737Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 29126, node 4 2025-06-25T15:05:58.161444Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:05:58.161479Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:05:58.161486Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:05:58.161633Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:15707 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:05:58.345802Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:05:58.383164Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) TClient is connected to server localhost:15707 2025-06-25T15:05:58.526562Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... encryption_type: NONE records { sequence_number: "0" shard_id: "shard-000000" } records { sequence_number: "0" shard_id: "shard-000002" } records { sequence_number: "0" shard_id: "shard-000001" } records { sequence_number: "1" shard_id: "shard-000001" } records { sequence_number: "1" shard_id: "shard-000002" } records { sequence_number: "2" shard_id: "shard-000001" } records { sequence_number: "3" shard_id: "shard-000001" } records { sequence_number: "4" shard_id: "shard-000001" } records { sequence_number: "1" shard_id: "shard-000000" } records { sequence_number: "2" shard_id: "shard-000002" } records { sequence_number: "3" shard_id: "shard-000002" } records { sequence_number: "2" shard_id: "shard-000000" } records { sequence_number: "4" shard_id: "shard-000002" } records { sequence_number: "5" shard_id: "shard-000002" } records { sequence_number: "3" shard_id: "shard-000000" } records { sequence_number: "6" shard_id: "shard-000002" } records { sequence_number: "5" shard_id: "shard-000001" } records { sequence_number: "6" shard_id: "shard-000001" } records { sequence_number: "4" shard_id: "shard-000000" } records { sequence_number: "7" shard_id: "shard-000002" } records { sequence_number: "8" shard_id: "shard-000002" } records { sequence_number: "5" shard_id: "shard-000000" } records { sequence_number: "9" shard_id: "shard-000002" } records { sequence_number: "6" shard_id: "shard-000000" } records { sequence_number: "10" shard_id: "shard-000002" } records { sequence_number: "7" shard_id: "shard-000000" } records { sequence_number: "11" shard_id: "shard-000002" } records { sequence_number: "7" shard_id: "shard-000001" } records { sequence_number: "8" shard_id: "shard-000000" } records { sequence_number: "9" shard_id: "shard-000000" } 2025-06-25T15:05:59.014973Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; ALTER_SCHEME: { Name: "test-topic" Split { Partition: 1 SplitBoundary: "a" } } 2025-06-25T15:05:59.665248Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 107:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_pq.cpp:268) 2025-06-25T15:06:00.794525Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_pq.cpp:268) 2025-06-25T15:06:00.853053Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_pq.cpp:268) 2025-06-25T15:06:00.944261Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_pq.cpp:268) 2025-06-25T15:06:01.135285Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976715666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_pq.cpp:268) 2025-06-25T15:06:02.525619Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519903456568975509:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:06:02.525710Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00169a/r3tmp/tmpR3zrWz/pdisk_1.dat 2025-06-25T15:06:02.621718Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:06:02.635950Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:06:02.636013Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:06:02.639490Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 6507, node 7 2025-06-25T15:06:02.678419Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:06:02.678440Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:06:02.678446Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:06:02.678546Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:64340 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:06:02.874344Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:06:02.913683Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) TClient is connected to server localhost:64340 2025-06-25T15:06:03.055707Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::FailsOnBadRootStatusInGetNodeRequest >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::HandlesPipeDisconnection_AnswerDoesNotArrive [GOOD] >> DataStreams::TestPutRecords [GOOD] >> DataStreams::TestListStreamConsumers [GOOD] >> DataStreams::TestListShards1Shard >> TColumnShardTestReadWrite::CompactionInGranule_PKTimestamp_Reboot [GOOD] >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::FailsOnBadRootStatusInGetNodeRequest [GOOD] >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::FailsOnBalancerDescribeResultFailureWhenTopicsAreGivenExplicitly >> DataStreams::TestReservedStorageMetering [GOOD] >> DataStreams::TestReservedConsumersMetering >> TraverseColumnShard::TraverseColumnTableRebootSaTabletBeforeReqDistribution [GOOD] >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::FailsOnBalancerDescribeResultFailureWhenTopicsAreGivenExplicitly [GOOD] >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::FailsOnDuplicatedTopicName ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::CompactionInGranule_PKUInt64 [GOOD] Test command err: 2025-06-25T15:05:14.234084Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:99;event=initialize_shard;step=OnActivateExecutor; 2025-06-25T15:05:14.253083Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:117;event=initialize_shard;step=initialize_tiring_finished; 2025-06-25T15:05:14.253258Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-25T15:05:14.258367Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T15:05:14.258500Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T15:05:14.258679Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T15:05:14.258749Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T15:05:14.258802Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T15:05:14.258853Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T15:05:14.258921Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T15:05:14.258978Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T15:05:14.259048Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T15:05:14.259105Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T15:05:14.259163Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T15:05:14.275516Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-25T15:05:14.275633Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-25T15:05:14.275674Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-25T15:05:14.275790Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:05:14.275908Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-25T15:05:14.275962Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-25T15:05:14.275989Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-25T15:05:14.276073Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-25T15:05:14.276111Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-25T15:05:14.276162Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-25T15:05:14.276199Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-25T15:05:14.276318Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:05:14.276365Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-25T15:05:14.276390Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-25T15:05:14.276409Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-25T15:05:14.276461Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-25T15:05:14.276489Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-25T15:05:14.276511Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-25T15:05:14.276525Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-25T15:05:14.276557Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-25T15:05:14.276586Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-25T15:05:14.276604Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-25T15:05:14.276723Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-25T15:05:14.276746Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-25T15:05:14.276761Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-25T15:05:14.276859Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-25T15:05:14.276889Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-25T15:05:14.276911Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-25T15:05:14.276991Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-25T15:05:14.277018Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-25T15:05:14.277032Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-25T15:05:14.277077Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-25T15:05:14.277115Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-25T15:05:14.277136Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-25T15:05:14.277154Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-25T15:05:14.277334Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=42; 2025-06-25T15:05:14.277417Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=40; 2025-06-25T15:05:14.277523Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=43; 2025-06-25T15:05:14.277602Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=39; 2025-06-25T15:05:14.277709Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-25T15:05:14.277775Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-25T15:05:14.277803Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-25T15:05:14.277836Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: table ... d:9;chunk_idx:46;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:47;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:48;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:49;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:50;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:51;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:52;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:53;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:54;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:55;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:56;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:57;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:58;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:59;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:60;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:61;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:62;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:63;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:64;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:65;blob_range:[NO_BLOB:0:9240];;column_id:10;chunk_idx:0;blob_range:[NO_BLOB:0:7272];;column_id:10;chunk_idx:1;blob_range:[NO_BLOB:0:8384];;column_id:10;chunk_idx:2;blob_range:[NO_BLOB:0:8384];;column_id:10;chunk_idx:3;blob_range:[NO_BLOB:0:8384];;column_id:10;chunk_idx:4;blob_range:[NO_BLOB:0:8384];;column_id:10;chunk_idx:5;blob_range:[NO_BLOB:0:8384];;column_id:10;chunk_idx:6;blob_range:[NO_BLOB:0:8384];;column_id:10;chunk_idx:7;blob_range:[NO_BLOB:0:8384];;column_id:10;chunk_idx:8;blob_range:[NO_BLOB:0:8384];;column_id:10;chunk_idx:9;blob_range:[NO_BLOB:0:8656];;column_id:10;chunk_idx:10;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:11;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:12;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:13;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:14;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:15;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:16;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:17;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:18;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:19;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:20;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:21;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:22;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:23;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:24;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:25;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:26;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:27;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:28;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:29;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:30;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:31;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:32;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:33;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:34;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:35;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:36;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:37;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:38;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:39;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:40;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:41;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:42;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:43;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:44;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:45;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:46;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:47;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:48;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:49;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:50;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:51;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:52;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:53;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:54;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:55;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:56;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:57;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:58;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:59;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:60;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:61;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:62;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:63;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:64;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:65;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:66;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:67;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:68;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:69;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:70;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:71;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:72;blob_range:[NO_BLOB:0:9424];;;;switched=(portion_id:220;path_id:9438184000001;records_count:75000;schema_version:1;level:1;;column_size:7504840;index_size:0;meta:(()););(portion_id:218;path_id:9438184000001;records_count:75000;schema_version:1;level:2;;column_size:7503120;index_size:0;meta:(()););(portion_id:221;path_id:9438184000001;records_count:75000;schema_version:1;level:1;;column_size:7504840;index_size:0;meta:(()););; 2025-06-25T15:06:05.099522Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: event_type=NKikimr::NBlobCache::TEvBlobCache::TEvReadBlobRangeResult;event=on_execution;consumer=GENERAL_COMPACTION;task_id=e8716bf8-51d511f0-aaa56bfd-31f5df50;script=FULL_PORTIONS_FETCHING::GENERAL_COMPACTION;event=on_finished;consumer=GENERAL_COMPACTION;task_id=e8716bf8-51d511f0-aaa56bfd-31f5df50;script=FULL_PORTIONS_FETCHING::GENERAL_COMPACTION;tablet_id=9437184;parent_id=[1:5973:7960];task_id=e8716bf8-51d511f0-aaa56bfd-31f5df50;task_class=CS::GENERAL;fline=general_compaction.cpp:140;event=blobs_created;appended=1;switched=3; 2025-06-25T15:06:05.101029Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:5973:7960];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=columnshard__write_index.cpp:52;event=TEvWriteIndex;count=1; 2025-06-25T15:06:05.103907Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:5973:7960];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=columnshard__write_index.cpp:59;event=TTxWriteDraft; 2025-06-25T15:06:05.203036Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: fline=tx_draft.cpp:16;event=draft_completed; 2025-06-25T15:06:05.203134Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: fline=write_actor.cpp:24;event=actor_created;tablet_id=9437184;debug=size=7503120;count=812;actions=__MEMORY,__DEFAULT,;waiting=2;; 2025-06-25T15:06:05.733445Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: WriteIndex at tablet 9437184 2025-06-25T15:06:05.733548Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:5973:7960];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=common_level.h:121;from=0,0,0,0,;to=74999,74999,74999,74999,; 2025-06-25T15:06:05.733591Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:5973:7960];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=common_level.h:141;itFrom=1;itTo=1;raw=7369450;count=1;packed=7504840; 2025-06-25T15:06:05.733644Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:5973:7960];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=constructor_meta.cpp:51;memory_size=94;data_size=68;sum=95658;count=1749; 2025-06-25T15:06:05.733694Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:5973:7960];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=constructor_meta.cpp:71;memory_size=26198;data_size=26188;sum=2562418;count=1750;size_of_meta=136; 2025-06-25T15:06:05.733739Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:5973:7960];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=constructor_portion.cpp:40;memory_size=26270;data_size=26260;sum=2625418;count=875;size_of_portion=208; 2025-06-25T15:06:05.734141Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxWriteIndex[2] (CS::GENERAL) apply at tablet 9437184 2025-06-25T15:06:05.801652Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager on execute at tablet 9437184 Save Batch GenStep: 4:1 Blob count: 666 2025-06-25T15:06:05.804881Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Index: tables 1 inserted {blob_bytes=0;raw_bytes=0;count=0;records=0} compacted {blob_bytes=0;raw_bytes=0;count=0;records=0} s-compacted {blob_bytes=37548672;raw_bytes=36867050;count=5;records=375200} inactive {blob_bytes=110272840;raw_bytes=107127800;count=216;records=1200200} evicted {blob_bytes=0;raw_bytes=0;count=0;records=0} at tablet 9437184 2025-06-25T15:06:06.152042Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=e8716bf8-51d511f0-aaa56bfd-31f5df50;fline=abstract.cpp:53;event=WriteIndexComplete;type=CS::GENERAL;success=1; 2025-06-25T15:06:06.152092Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=e8716bf8-51d511f0-aaa56bfd-31f5df50;fline=with_appended.cpp:65;portions=222,;task_id=e8716bf8-51d511f0-aaa56bfd-31f5df50; 2025-06-25T15:06:06.152610Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=e8716bf8-51d511f0-aaa56bfd-31f5df50;fline=manager.cpp:15;event=unlock;process_id=CS::GENERAL::e8716bf8-51d511f0-aaa56bfd-31f5df50; 2025-06-25T15:06:06.152663Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=e8716bf8-51d511f0-aaa56bfd-31f5df50;fline=granule.cpp:97;event=OnCompactionFinished;info=(granule:9438184000001;path_id:9438184000001;size:22538992;portions_count:222;); 2025-06-25T15:06:06.152700Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=e8716bf8-51d511f0-aaa56bfd-31f5df50;tablet_id=9437184;fline=columnshard_impl.cpp:442;event=EnqueueBackgroundActivities;periodic=0; 2025-06-25T15:06:06.152762Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=e8716bf8-51d511f0-aaa56bfd-31f5df50;tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=9; 2025-06-25T15:06:06.152809Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=e8716bf8-51d511f0-aaa56bfd-31f5df50;tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750863617756;tx_id=18446744073709551615;;current_snapshot_ts=1750863915827; 2025-06-25T15:06:06.152838Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=e8716bf8-51d511f0-aaa56bfd-31f5df50;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=9;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-25T15:06:06.152869Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=e8716bf8-51d511f0-aaa56bfd-31f5df50;tablet_id=9437184;fline=columnshard_impl.cpp:791;background=cleanup;skip_reason=no_changes; 2025-06-25T15:06:06.152897Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=e8716bf8-51d511f0-aaa56bfd-31f5df50;tablet_id=9437184;fline=columnshard_impl.cpp:820;background=cleanup;skip_reason=no_changes; 2025-06-25T15:06:06.152955Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=e8716bf8-51d511f0-aaa56bfd-31f5df50;tablet_id=9437184;queue=ttl;external_count=0;fline=granule.cpp:168;event=skip_actualization;waiting=0.873000s; 2025-06-25T15:06:06.152990Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=e8716bf8-51d511f0-aaa56bfd-31f5df50;tablet_id=9437184;fline=columnshard_impl.cpp:749;background=ttl;skip_reason=no_changes; 2025-06-25T15:06:06.153106Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Save Batch GenStep: 4:1 Blob count: 666 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/server/ut/unittest >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::HandlesPipeDisconnection_AnswerDoesNotArrive [GOOD] Test command err: Assert failed: Check response: { Status: 128 ErrorReason: "topic \'rt3.dc1--topic1\' is not created, Marker# PQ94" ErrorCode: UNKNOWN_TOPIC } 2025-06-25T15:06:05.468139Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3114: [PQ: 72057594037928037] Handle TEvInterconnect::TEvNodeInfo 2025-06-25T15:06:05.476595Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3146: [PQ: 72057594037928037] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-25T15:06:05.476888Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:752: [PQ: 72057594037928037] doesn't have tx info 2025-06-25T15:06:05.476933Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:764: [PQ: 72057594037928037] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-25T15:06:05.476961Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:985: [PQ: 72057594037928037] no config, start with empty partitions and default config 2025-06-25T15:06:05.477005Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:4949: [PQ: 72057594037928037] Txs.size=0, PlannedTxs.size=0 2025-06-25T15:06:05.477046Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037928037] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T15:06:05.477110Z node 2 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037928037] doesn't have tx writes info 2025-06-25T15:06:05.478752Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037928037] server connected, pipe [2:266:2256], now have 1 active actors on pipe 2025-06-25T15:06:05.478909Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1470: [PQ: 72057594037928037] Handle TEvPersQueue::TEvUpdateConfig 2025-06-25T15:06:05.496383Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1656: [PQ: 72057594037928037] Config update version 1(current 0) received from actor [2:103:2136] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic1" Version: 1 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-06-25T15:06:05.499838Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:591: [PQ: 72057594037928037] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic1" Version: 1 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-06-25T15:06:05.499934Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037928037] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T15:06:05.505102Z node 2 :PERSQUEUE INFO: pq_impl.cpp:1497: [PQ: 72057594037928037] Config applied version 1 actor [2:103:2136] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic1" Version: 1 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-06-25T15:06:05.505245Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic1:0:Initializer] Start initializing step TInitConfigStep 2025-06-25T15:06:05.506191Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic1:0:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-25T15:06:05.507323Z node 2 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037928037, Partition: 0, State: StateInit] bootstrapping 0 [2:274:2262] 2025-06-25T15:06:05.509657Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic1:0:Initializer] Initializing completed. 2025-06-25T15:06:05.510497Z node 2 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037928037, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--topic1' partition 0 generation 2 [2:274:2262] 2025-06-25T15:06:05.510545Z node 2 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037928037, Partition: 0, State: StateInit] SYNC INIT topic rt3.dc1--topic1 partitition 0 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-25T15:06:05.510584Z node 2 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037928037, Partition: 0, State: StateIdle] Process pending events. Count 0 2025-06-25T15:06:05.514021Z node 2 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037928037, Partition: 0, State: StateIdle] no data for compaction 2025-06-25T15:06:05.514408Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037928037] server connected, pipe [2:277:2264], now have 1 active actors on pipe 2025-06-25T15:06:05.551116Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3114: [PQ: 72057594037928137] Handle TEvInterconnect::TEvNodeInfo 2025-06-25T15:06:05.554063Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3146: [PQ: 72057594037928137] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-25T15:06:05.554315Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:752: [PQ: 72057594037928137] doesn't have tx info 2025-06-25T15:06:05.554357Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:764: [PQ: 72057594037928137] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-25T15:06:05.554393Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:985: [PQ: 72057594037928137] no config, start with empty partitions and default config 2025-06-25T15:06:05.554430Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:4949: [PQ: 72057594037928137] Txs.size=0, PlannedTxs.size=0 2025-06-25T15:06:05.554474Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037928137] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T15:06:05.554525Z node 2 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037928137] doesn't have tx writes info 2025-06-25T15:06:05.555128Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037928137] server connected, pipe [2:410:2363], now have 1 active actors on pipe 2025-06-25T15:06:05.555228Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1470: [PQ: 72057594037928137] Handle TEvPersQueue::TEvUpdateConfig 2025-06-25T15:06:05.555403Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1656: [PQ: 72057594037928137] Config update version 2(current 0) received from actor [2:103:2136] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic2" Version: 2 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-06-25T15:06:05.557391Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:591: [PQ: 72057594037928137] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic2" Version: 2 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-06-25T15:06:05.557499Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037928137] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T15:06:05.558271Z node 2 :PERSQUEUE INFO: pq_impl.cpp:1497: [PQ: 72057594037928137] Config applied version 2 actor [2:103:2136] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic2" Version: 2 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-06-25T15:06:05.558369Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:0:Initializer] Start initializing step TInitConfigStep 2025-06-25T15:06:05.558644Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:0:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-25T15:06:05.558831Z node 2 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037928137, Partition: 0, State: StateInit] bootstrapping 0 [2:418:2369] 2025-06-25T15:06:05.560707Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic2:0:Initializer] Initializing completed. 2025-06-25T15:06:05.560772Z node 2 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037928137, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--topic2' partition 0 generation 2 [2:418:2369] 2025-06-25T15:06:05.560820Z node 2 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037928137, Partition: 0, State: StateInit] SYNC INIT topic rt3.dc1--topic2 partitition 0 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-25T15:06:05.560867Z node 2 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037928137, Partition: 0, State: StateIdle] Process pending events. Count 0 2025-06-25T15:06:05.561065Z node 2 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037928137, Partition: 0, State: StateIdle] no data for compaction 2025-06-25T15:06:05.561526Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037928137] server connected, pipe [2:421:2371], now have 1 active actors on pipe 2025-06-25T15:06:05.573852Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3114: [PQ: 72057594037928138] Handle TEvInterconnect::TEvNodeInfo 2025-06-25T15:06:05.577051Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3146: [PQ: 72057594037928138] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-25T15:06:05.577310Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:752: [PQ: 72057594037928138] doesn't have tx info 2025-06-25T15:06:05.577355Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:764: [PQ: 72057594037928138] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-25T15:06:05.577396Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:985: [PQ: 72057594037928138] no config, start with empty partitions and default config 2025-06-25T15:06:05.577431Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:4949: [PQ: 72057594037928138] Txs.size=0, PlannedTxs.size=0 2025-06-25T15:06:05.577478Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037928138] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T15:06:05.577527Z node 2 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037928138] doesn't have tx writes info 2025-06-25T15:06:05.578123Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037928138] server connected, pipe [2:470:2408], now have 1 active actors on pipe 2025-06-25T15:06:05.578214Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1470: [PQ: 72057594037928138] Handle TEvPersQueue::TEvUpdateConfig 2025-06-25T15:06:05.578355Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1656: [PQ: 72057594037928138] Config update version 3(current 0) received from actor [2:103:2136] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 1 TopicName: "rt3.dc1--topic2" Version: 3 Partitions { PartitionId: 1 } AllPartitions { PartitionId: 1 } 2025-06-25T15:06:05.579936Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:591: [PQ: 72057594037928138] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 1 TopicName: "rt3.dc1--topic2" Version: 3 Partitions { PartitionId: 1 } AllPartitions { PartitionId: 1 } 2025-06-25T15:06:05.580037Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037928138] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T15:06:05.580583Z node 2 :PERSQUEUE INFO: pq_impl.cpp:1497: [PQ: 72057594037928138] Config applied version 3 actor [2 ... me: "rt3.dc1--topic2" Version: 11 Partitions { PartitionId: 1 } AllPartitions { PartitionId: 1 } 2025-06-25T15:06:06.635393Z node 4 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037928138] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T15:06:06.635796Z node 4 :PERSQUEUE INFO: pq_impl.cpp:1497: [PQ: 72057594037928138] Config applied version 11 actor [4:103:2136] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 1 TopicName: "rt3.dc1--topic2" Version: 11 Partitions { PartitionId: 1 } AllPartitions { PartitionId: 1 } 2025-06-25T15:06:06.635874Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:1:Initializer] Start initializing step TInitConfigStep 2025-06-25T15:06:06.636122Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:1:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-25T15:06:06.636320Z node 4 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037928138, Partition: 1, State: StateInit] bootstrapping 1 [4:477:2413] 2025-06-25T15:06:06.637599Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic2:1:Initializer] Initializing completed. 2025-06-25T15:06:06.637650Z node 4 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037928138, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--topic2' partition 1 generation 2 [4:477:2413] 2025-06-25T15:06:06.637693Z node 4 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037928138, Partition: 1, State: StateInit] SYNC INIT topic rt3.dc1--topic2 partitition 1 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-25T15:06:06.637728Z node 4 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037928138, Partition: 1, State: StateIdle] Process pending events. Count 0 2025-06-25T15:06:06.637903Z node 4 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037928138, Partition: 1, State: StateIdle] no data for compaction 2025-06-25T15:06:06.638223Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037928138] server connected, pipe [4:480:2415], now have 1 active actors on pipe 2025-06-25T15:06:06.647578Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:3114: [PQ: 72057594037928139] Handle TEvInterconnect::TEvNodeInfo 2025-06-25T15:06:06.649701Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:3146: [PQ: 72057594037928139] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-25T15:06:06.649885Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:752: [PQ: 72057594037928139] doesn't have tx info 2025-06-25T15:06:06.649921Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:764: [PQ: 72057594037928139] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-25T15:06:06.649949Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:985: [PQ: 72057594037928139] no config, start with empty partitions and default config 2025-06-25T15:06:06.649988Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:4949: [PQ: 72057594037928139] Txs.size=0, PlannedTxs.size=0 2025-06-25T15:06:06.650024Z node 4 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037928139] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T15:06:06.650068Z node 4 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037928139] doesn't have tx writes info 2025-06-25T15:06:06.650500Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037928139] server connected, pipe [4:529:2452], now have 1 active actors on pipe 2025-06-25T15:06:06.650567Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:1470: [PQ: 72057594037928139] Handle TEvPersQueue::TEvUpdateConfig 2025-06-25T15:06:06.650695Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:1656: [PQ: 72057594037928139] Config update version 12(current 0) received from actor [4:103:2136] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 2 TopicName: "rt3.dc1--topic2" Version: 12 Partitions { PartitionId: 2 } AllPartitions { PartitionId: 2 } 2025-06-25T15:06:06.652048Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:591: [PQ: 72057594037928139] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 2 TopicName: "rt3.dc1--topic2" Version: 12 Partitions { PartitionId: 2 } AllPartitions { PartitionId: 2 } 2025-06-25T15:06:06.652165Z node 4 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037928139] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T15:06:06.652634Z node 4 :PERSQUEUE INFO: pq_impl.cpp:1497: [PQ: 72057594037928139] Config applied version 12 actor [4:103:2136] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 2 TopicName: "rt3.dc1--topic2" Version: 12 Partitions { PartitionId: 2 } AllPartitions { PartitionId: 2 } 2025-06-25T15:06:06.652718Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitConfigStep 2025-06-25T15:06:06.652953Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-25T15:06:06.653084Z node 4 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037928139, Partition: 2, State: StateInit] bootstrapping 2 [4:537:2458] 2025-06-25T15:06:06.654351Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic2:2:Initializer] Initializing completed. 2025-06-25T15:06:06.654395Z node 4 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037928139, Partition: 2, State: StateInit] init complete for topic 'rt3.dc1--topic2' partition 2 generation 2 [4:537:2458] 2025-06-25T15:06:06.654437Z node 4 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037928139, Partition: 2, State: StateInit] SYNC INIT topic rt3.dc1--topic2 partitition 2 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-25T15:06:06.654472Z node 4 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037928139, Partition: 2, State: StateIdle] Process pending events. Count 0 2025-06-25T15:06:06.654627Z node 4 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037928139, Partition: 2, State: StateIdle] no data for compaction 2025-06-25T15:06:06.654943Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037928139] server connected, pipe [4:540:2460], now have 1 active actors on pipe 2025-06-25T15:06:06.655683Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037928037] server connected, pipe [4:546:2463], now have 1 active actors on pipe 2025-06-25T15:06:06.655784Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037928138] server connected, pipe [4:547:2464], now have 1 active actors on pipe 2025-06-25T15:06:06.655934Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037928139] server connected, pipe [4:548:2464], now have 1 active actors on pipe 2025-06-25T15:06:06.666754Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037928139] server connected, pipe [4:556:2471], now have 1 active actors on pipe 2025-06-25T15:06:06.681109Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:3114: [PQ: 72057594037928139] Handle TEvInterconnect::TEvNodeInfo 2025-06-25T15:06:06.682515Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:3146: [PQ: 72057594037928139] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-25T15:06:06.682748Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:752: [PQ: 72057594037928139] doesn't have tx info 2025-06-25T15:06:06.682793Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:764: [PQ: 72057594037928139] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-25T15:06:06.682925Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:4949: [PQ: 72057594037928139] Txs.size=0, PlannedTxs.size=0 2025-06-25T15:06:06.683290Z node 4 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037928139] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T15:06:06.683333Z node 4 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037928139] doesn't have tx writes info 2025-06-25T15:06:06.683409Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitConfigStep 2025-06-25T15:06:06.683598Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-25T15:06:06.683721Z node 4 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037928139, Partition: 2, State: StateInit] bootstrapping 2 [4:613:2516] 2025-06-25T15:06:06.685017Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitDiskStatusStep 2025-06-25T15:06:06.685735Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitMetaStep 2025-06-25T15:06:06.685918Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitInfoRangeStep 2025-06-25T15:06:06.686140Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitDataRangeStep 2025-06-25T15:06:06.686286Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitDataStep 2025-06-25T15:06:06.686327Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitEndWriteTimestampStep 2025-06-25T15:06:06.686370Z node 4 :PERSQUEUE INFO: partition_init.cpp:895: [rt3.dc1--topic2:2:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-25T15:06:06.686407Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic2:2:Initializer] Initializing completed. 2025-06-25T15:06:06.686449Z node 4 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037928139, Partition: 2, State: StateInit] init complete for topic 'rt3.dc1--topic2' partition 2 generation 3 [4:613:2516] 2025-06-25T15:06:06.686499Z node 4 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037928139, Partition: 2, State: StateInit] SYNC INIT topic rt3.dc1--topic2 partitition 2 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-25T15:06:06.686535Z node 4 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037928139, Partition: 2, State: StateIdle] Process pending events. Count 0 2025-06-25T15:06:06.686675Z node 4 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037928139, Partition: 2, State: StateIdle] no data for compaction 2025-06-25T15:06:06.687087Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72057594037928138] server disconnected, pipe [4:547:2464] destroyed 2025-06-25T15:06:06.687138Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72057594037928037] server disconnected, pipe [4:546:2463] destroyed RESPONSE Status: 1 ErrorCode: OK MetaResponse { CmdGetPartitionOffsetsResult { TopicResult { Topic: "rt3.dc1--topic1" PartitionResult { Partition: 0 StartOffset: 0 EndOffset: 0 ErrorCode: OK WriteTimestampEstimateMS: 0 } ErrorCode: OK } TopicResult { Topic: "rt3.dc1--topic2" PartitionResult { Partition: 1 StartOffset: 0 EndOffset: 0 ErrorCode: OK WriteTimestampEstimateMS: 0 } PartitionResult { Partition: 2 ErrorCode: INITIALIZING ErrorReason: "partition is not ready yet" } ErrorCode: OK } } } >> TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest::HandlesTimeout ------- [TM] {asan, default-linux-x86_64, release} ydb/services/datastreams/ut/unittest >> DataStreams::TestShardPagination [GOOD] Test command err: 2025-06-25T15:05:55.841687Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519903429803498893:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:05:55.841881Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001699/r3tmp/tmpnxGr39/pdisk_1.dat 2025-06-25T15:05:56.157185Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 4824, node 1 2025-06-25T15:05:56.177805Z node 1 :GRPC_SERVER WARN: grpc_request_proxy.cpp:529: SchemeBoardDelete /Root Strong=0 2025-06-25T15:05:56.177836Z node 1 :GRPC_SERVER WARN: grpc_request_proxy.cpp:529: SchemeBoardDelete /Root Strong=0 2025-06-25T15:05:56.197603Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:05:56.197672Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:05:56.200036Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:05:56.200663Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:05:56.200700Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:05:56.200708Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:05:56.200831Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:7623 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:05:56.454919Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:05:56.503587Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) TClient is connected to server localhost:7623 2025-06-25T15:05:56.667123Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting...
: Error: retention hours and storage megabytes must fit one of: { hours : [0, 24], storage : [0, 0]}, { hours : [0, 168], storage : [51200, 1048576]}, provided values: hours 168, storage 40960, code: 500080 2025-06-25T15:05:56.849034Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:05:56.945444Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519903434098468292:3465] txid# 281474976710661, issues: { message: "Check failed: path: \'/Root/stream_TestStreamStorageRetention\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 2], type: EPathTypePersQueueGroup, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:05:59.125195Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519903445604852910:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:05:59.125238Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001699/r3tmp/tmpsW4WuU/pdisk_1.dat 2025-06-25T15:05:59.208918Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:05:59.226796Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:05:59.226850Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 5174, node 4 2025-06-25T15:05:59.244447Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:05:59.257826Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:05:59.257852Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:05:59.257860Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:05:59.257981Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:12323 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:05:59.439082Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:05:59.485282Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) TClient is connected to server localhost:12323 2025-06-25T15:05:59.603520Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... 2025-06-25T15:06:00.130396Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:06:03.415968Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519903463328072187:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:06:03.416025Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001699/r3tmp/tmpOulPN7/pdisk_1.dat 2025-06-25T15:06:03.517576Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:06:03.529888Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:06:03.529943Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:06:03.533195Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 62217, node 7 2025-06-25T15:06:03.575145Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:06:03.575169Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:06:03.575178Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:06:03.575322Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24559 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:06:03.809213Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:06:03.859421Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) TClient is connected to server localhost:24559 2025-06-25T15:06:04.048270Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... ------- [TM] {asan, default-linux-x86_64, release} ydb/services/datastreams/ut/unittest >> DataStreams::TestPutRecords [GOOD] Test command err: 2025-06-25T15:05:51.097318Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519903409350574908:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:05:51.097541Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0016c5/r3tmp/tmpvOUM4B/pdisk_1.dat 2025-06-25T15:05:51.370312Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 13581, node 1 2025-06-25T15:05:51.437828Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:05:51.437927Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:05:51.440265Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:05:51.532612Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:05:51.532657Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:05:51.532668Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:05:51.532787Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28960 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:05:51.960787Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:05:52.024996Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T15:05:52.104542Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:28960 2025-06-25T15:05:52.159450Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... 2025-06-25T15:05:54.354747Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519903424224017342:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:05:54.354794Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0016c5/r3tmp/tmpOLZZgE/pdisk_1.dat 2025-06-25T15:05:54.435378Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:05:54.455936Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:05:54.456013Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:05:54.462845Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 21320, node 4 2025-06-25T15:05:54.490421Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:05:54.490445Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:05:54.490451Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:05:54.490592Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16201 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:05:54.687184Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:05:54.723990Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) TClient is connected to server localhost:16201 2025-06-25T15:05:54.869054Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... 2025-06-25T15:05:55.011497Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_pq.cpp:268) 2025-06-25T15:05:55.056980Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) encryption_type: NONE sequence_number: "0" shard_id: "shard-000000" encryption_type: NONE records { sequence_number: "1" shard_id: "shard-000000" } records { sequence_number: "0" shard_id: "shard-000004" } records { sequence_number: "0" shard_id: "shard-000002" } records { sequence_number: "1" shard_id: "shard-000002" } records { sequence_number: "1" shard_id: "shard-000004" } records { sequence_number: "2" shard_id: "shard-000002" } records { sequence_number: "3" shard_id: "shard-000002" } records { sequence_number: "4" shard_id: "shard-000002" } records { sequence_number: "2" shard_id: "shard-000000" } records { sequence_number: "2" shard_id: "shard-000004" } records { sequence_number: "0" shard_id: "shard-000003" } records { sequence_number: "3" shard_id: "shard-000000" } records { sequence_number: "1" shard_id: "shard-000003" } records { sequence_number: "2" shard_id: "shard-000003" } records { sequence_number: "4" shard_id: "shard-000000" } records { sequence_number: "3" shard_id: "shard-000003" } records { sequence_number: "5" shard_id: "shard-000002" } records { sequence_number: "6" shard_id: "shard-000002" } records { sequence_number: "0" shard_id: "shard-000001" } records { sequence_number: "3" shard_id: "shard-000004" } records { sequence_number: "4" shard_id: "shard-000004" } records { sequence_number: "5" shard_id: "shard-000000" } records { sequence_number: "4" shard_id: "shard-000003" } records { sequence_number: "6" shard_id: "shard-000000" } records { sequence_number: "5" shard_id: "shard-000004" } records { sequence_number: "7" shard_id: "shard-000000" } records { sequence_number: "6" shard_id: "shard-000004" } records { sequence_number: "7" shard_id: "shard-000002" } records { sequence_number: "8" shard_id: "shard-000000" } records { sequence_number: "9" shard_id: "shard-000000" } 2025-06-25T15:05:55.092422Z :INFO: [/Root/] [/Root/] [839200e3-cd916513-18b065f9-8c657e08] Starting read session 2025-06-25T15:05:55.093528Z :DEBUG: [/Root/] [/Root/] [839200e3-cd916513-18b065f9-8c657e08] Starting session to cluster null (localhost:21320) 2025-06-25T15:05:55.097941Z :DEBUG: [/Root/] [/Root/] [839200e3-cd916513-18b065f9-8c657e08] [null] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T15:05:55.097972Z :DEBUG: [/Root/] [/Root/] [839200e3-cd916513-18b065f9-8c657e08] [null] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T15:05:55.097999Z :DEBUG: [/Root/] [/Root/] [839200e3-cd916513-18b065f9-8c657e08] [null] Reconnec ... ) 2025-06-25T15:06:02.429849Z :DEBUG: [/Root/] Decompression task done. Partition/PartitionSessionId: 1 (6-6) 2025-06-25T15:06:02.429944Z :DEBUG: [/Root/] Decompression task done. Partition/PartitionSessionId: 1 (7-7) 2025-06-25T15:06:02.429961Z :DEBUG: [/Root/] Decompression task done. Partition/PartitionSessionId: 1 (8-8) 2025-06-25T15:06:02.430184Z :DEBUG: [/Root/] Take Data. Partition 4. Read: {0, 0} (0-0) 2025-06-25T15:06:02.430212Z :DEBUG: [/Root/] Take Data. Partition 4. Read: {0, 1} (1-1) 2025-06-25T15:06:02.430227Z :DEBUG: [/Root/] [/Root/] [2719c27e-b91c04e7-db5488c-c11c39c1] [null] The application data is transferred to the client. Number of messages 2, size 0 bytes 2025-06-25T15:06:02.431201Z :DEBUG: [/Root/] Take Data. Partition 1. Read: {2, 0} (2-2) 2025-06-25T15:06:02.434858Z :DEBUG: [/Root/] Take Data. Partition 1. Read: {3, 0} (3-3) 2025-06-25T15:06:02.439734Z :DEBUG: [/Root/] Take Data. Partition 1. Read: {4, 0} (4-4) 2025-06-25T15:06:02.440641Z :DEBUG: [/Root/] Take Data. Partition 1. Read: {5, 0} (5-5) 2025-06-25T15:06:02.445612Z :DEBUG: [/Root/] Take Data. Partition 1. Read: {6, 0} (6-6) 2025-06-25T15:06:02.446614Z :DEBUG: [/Root/] Take Data. Partition 1. Read: {7, 0} (7-7) 2025-06-25T15:06:02.447696Z :DEBUG: [/Root/] Take Data. Partition 1. Read: {8, 0} (8-8) 2025-06-25T15:06:02.447769Z :DEBUG: [/Root/] [/Root/] [2719c27e-b91c04e7-db5488c-c11c39c1] [null] The application data is transferred to the client. Number of messages 7, size 7340032 bytes 2025-06-25T15:06:02.448047Z :DEBUG: [/Root/] [/Root/] [2719c27e-b91c04e7-db5488c-c11c39c1] [null] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T15:06:02.448209Z :DEBUG: [/Root/] Decompression task done. Partition/PartitionSessionId: 3 (3-3) 2025-06-25T15:06:02.448209Z :DEBUG: [/Root/] Decompression task done. Partition/PartitionSessionId: 3 (0-2) 2025-06-25T15:06:02.452210Z :DEBUG: [/Root/] Take Data. Partition 3. Read: {0, 0} (0-0) 2025-06-25T15:06:02.452241Z :DEBUG: [/Root/] Take Data. Partition 3. Read: {1, 0} (1-1) 2025-06-25T15:06:02.452922Z :DEBUG: [/Root/] Take Data. Partition 3. Read: {2, 0} (2-2) 2025-06-25T15:06:02.452949Z :DEBUG: [/Root/] Take Data. Partition 3. Read: {3, 0} (3-3) 2025-06-25T15:06:02.452976Z :DEBUG: [/Root/] [/Root/] [2719c27e-b91c04e7-db5488c-c11c39c1] [null] The application data is transferred to the client. Number of messages 4, size 1049088 bytes 2025-06-25T15:06:02.453481Z :INFO: [/Root/] [/Root/] [2719c27e-b91c04e7-db5488c-c11c39c1] Closing read session. Close timeout: 0.000000s 2025-06-25T15:06:02.453562Z :INFO: [/Root/] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): null:stream_TestPutRecordsCornerCases:3:5:3:0 null:stream_TestPutRecordsCornerCases:1:4:8:0 null:stream_TestPutRecordsCornerCases:4:3:1:0 null:stream_TestPutRecordsCornerCases:0:2:1:0 null:stream_TestPutRecordsCornerCases:2:1:0:0 2025-06-25T15:06:02.453614Z :INFO: [/Root/] [/Root/] [2719c27e-b91c04e7-db5488c-c11c39c1] Counters: { Errors: 0 CurrentSessionLifetimeMs: 112 BytesRead: 9437699 MessagesRead: 17 BytesReadCompressed: 9437699 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-25T15:06:02.453594Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[7:7519903439195477192:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:06:02.453719Z :NOTICE: [/Root/] [/Root/] [2719c27e-b91c04e7-db5488c-c11c39c1] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Close with zero timeout " } 2025-06-25T15:06:02.453680Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T15:06:02.453769Z :DEBUG: [/Root/] [/Root/] [2719c27e-b91c04e7-db5488c-c11c39c1] [null] Abort session to cluster 2025-06-25T15:06:02.454490Z :NOTICE: [/Root/] [/Root/] [2719c27e-b91c04e7-db5488c-c11c39c1] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-06-25T15:06:02.454573Z node 7 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 1 consumer user1 session user1_7_1_3059411088693114775_v1 grpc read failed 2025-06-25T15:06:02.454606Z node 7 :PQ_READ_PROXY INFO: read_session_actor.cpp:1645: session cookie 1 consumer user1 session user1_7_1_3059411088693114775_v1 closed 2025-06-25T15:06:02.454961Z node 7 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 1 consumer user1 session user1_7_1_3059411088693114775_v1 is DEAD 2025-06-25T15:06:03.600970Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7519903462767746356:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:06:03.601039Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0016c5/r3tmp/tmpL3V0sx/pdisk_1.dat 2025-06-25T15:06:03.719382Z node 10 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:06:03.735858Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:06:03.735921Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:06:03.740361Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 8859, node 10 2025-06-25T15:06:03.790463Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:06:03.790488Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:06:03.790495Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:06:03.790653Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:22529 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:06:04.102596Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:06:04.203326Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) TClient is connected to server localhost:22529 2025-06-25T15:06:04.374568Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... 2025-06-25T15:06:04.607641Z node 10 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:06:04.615799Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101)
: Error: Access for stream /Root/stream_TestPutRecords is denied for subject user2@builtin, code: 500018 2025-06-25T15:06:04.701296Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) PutRecordsResponse = encryption_type: NONE records { sequence_number: "0" shard_id: "shard-000000" } records { sequence_number: "0" shard_id: "shard-000004" } records { sequence_number: "0" shard_id: "shard-000002" } records { sequence_number: "1" shard_id: "shard-000002" } records { sequence_number: "1" shard_id: "shard-000004" } records { sequence_number: "2" shard_id: "shard-000002" } records { sequence_number: "3" shard_id: "shard-000002" } records { sequence_number: "4" shard_id: "shard-000002" } records { sequence_number: "1" shard_id: "shard-000000" } records { sequence_number: "2" shard_id: "shard-000004" } records { sequence_number: "0" shard_id: "shard-000003" } records { sequence_number: "2" shard_id: "shard-000000" } records { sequence_number: "1" shard_id: "shard-000003" } records { sequence_number: "2" shard_id: "shard-000003" } records { sequence_number: "3" shard_id: "shard-000000" } records { sequence_number: "3" shard_id: "shard-000003" } records { sequence_number: "5" shard_id: "shard-000002" } records { sequence_number: "6" shard_id: "shard-000002" } records { sequence_number: "0" shard_id: "shard-000001" } records { sequence_number: "3" shard_id: "shard-000004" } records { sequence_number: "4" shard_id: "shard-000004" } records { sequence_number: "4" shard_id: "shard-000000" } records { sequence_number: "4" shard_id: "shard-000003" } records { sequence_number: "5" shard_id: "shard-000000" } records { sequence_number: "5" shard_id: "shard-000004" } records { sequence_number: "6" shard_id: "shard-000000" } records { sequence_number: "6" shard_id: "shard-000004" } records { sequence_number: "7" shard_id: "shard-000002" } records { sequence_number: "7" shard_id: "shard-000000" } records { sequence_number: "8" shard_id: "shard-000000" } PutRecord response = encryption_type: NONE sequence_number: "7" shard_id: "shard-000004" >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::FailsOnBalancerDescribeResultFailureWhenTopicsAreGivenExplicitly >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::FailsOnDuplicatedTopicName [GOOD] >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::FailsOnDuplicatedPartition >> TMessageBusServerPersQueueGetReadSessionsInfoMetaRequestTest::FailsOnBadRootStatusInGetNodeRequest >> TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest::HandlesTimeout [GOOD] >> TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest::SuccessfullyPassesResponsesFromTablets >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::FailsOnDuplicatedPartition [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::CompactionInGranule_PKTimestamp_Reboot [GOOD] Test command err: 2025-06-25T15:04:45.696282Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:99;event=initialize_shard;step=OnActivateExecutor; 2025-06-25T15:04:45.716160Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:117;event=initialize_shard;step=initialize_tiring_finished; 2025-06-25T15:04:45.716379Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-25T15:04:45.721594Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T15:04:45.721749Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T15:04:45.721918Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T15:04:45.721984Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T15:04:45.722044Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T15:04:45.722098Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T15:04:45.722162Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T15:04:45.722246Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T15:04:45.722309Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T15:04:45.722399Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T15:04:45.722455Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T15:04:45.742242Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-25T15:04:45.742417Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-25T15:04:45.742472Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-25T15:04:45.742653Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:04:45.742845Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-25T15:04:45.742916Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-25T15:04:45.742960Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-25T15:04:45.743041Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-25T15:04:45.743096Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-25T15:04:45.743145Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-25T15:04:45.743187Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-25T15:04:45.743356Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:04:45.743447Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-25T15:04:45.743490Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-25T15:04:45.743520Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-25T15:04:45.743605Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-25T15:04:45.743660Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-25T15:04:45.743721Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-25T15:04:45.743751Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-25T15:04:45.743801Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-25T15:04:45.743834Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-25T15:04:45.743864Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-25T15:04:45.744058Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-25T15:04:45.744098Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-25T15:04:45.744125Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-25T15:04:45.744299Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-25T15:04:45.744367Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-25T15:04:45.744399Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-25T15:04:45.744550Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-25T15:04:45.744593Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-25T15:04:45.744632Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-25T15:04:45.744699Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-25T15:04:45.744762Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-25T15:04:45.744816Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-25T15:04:45.744849Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-25T15:04:45.745045Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=58; 2025-06-25T15:04:45.745131Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=41; 2025-06-25T15:04:45.745226Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=55; 2025-06-25T15:04:45.745326Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=37; 2025-06-25T15:04:45.745412Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-25T15:04:45.745482Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-25T15:04:45.745521Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-25T15:04:45.745576Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: table ... tions;fline=constructor_meta.cpp:71;memory_size=26214;data_size=26188;sum=13173016;count=14328;size_of_meta=136; 2025-06-25T15:06:05.972970Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;load_stage_name=EXECUTE:granule/portions;fline=constructor_portion.cpp:40;memory_size=26286;data_size=26260;sum=13688824;count=7164;size_of_portion=208; 2025-06-25T15:06:05.973632Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;EXECUTE:portionsLoadingTime=65663; 2025-06-25T15:06:05.973709Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;PRECHARGE:granule_finished_commonLoadingTime=12; 2025-06-25T15:06:05.975122Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;EXECUTE:granule_finished_commonLoadingTime=1365; 2025-06-25T15:06:05.975160Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;fline=common_data.cpp:29;EXECUTE:granuleLoadingTime=67298; 2025-06-25T15:06:05.975198Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:granulesLoadingTime=67390; 2025-06-25T15:06:05.975254Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;PRECHARGE:finishLoadingTime=10; 2025-06-25T15:06:05.975876Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:finishLoadingTime=569; 2025-06-25T15:06:05.975912Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:column_enginesLoadingTime=68436; 2025-06-25T15:06:05.976032Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tx_controllerLoadingTime=78; 2025-06-25T15:06:05.976114Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tx_controllerLoadingTime=47; 2025-06-25T15:06:05.976353Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:operations_managerLoadingTime=207; 2025-06-25T15:06:05.976521Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:operations_managerLoadingTime=135; 2025-06-25T15:06:05.990891Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:storages_managerLoadingTime=14317; 2025-06-25T15:06:06.011118Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:storages_managerLoadingTime=20123; 2025-06-25T15:06:06.011244Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:db_locksLoadingTime=15; 2025-06-25T15:06:06.011317Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:db_locksLoadingTime=13; 2025-06-25T15:06:06.011364Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:bg_sessionsLoadingTime=8; 2025-06-25T15:06:06.011450Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:bg_sessionsLoadingTime=49; 2025-06-25T15:06:06.011503Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:sharing_sessionsLoadingTime=8; 2025-06-25T15:06:06.011611Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:sharing_sessionsLoadingTime=62; 2025-06-25T15:06:06.011667Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:in_flight_readsLoadingTime=7; 2025-06-25T15:06:06.011723Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:in_flight_readsLoadingTime=27; 2025-06-25T15:06:06.011791Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tiers_managerLoadingTime=40; 2025-06-25T15:06:06.011851Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tiers_managerLoadingTime=33; 2025-06-25T15:06:06.011878Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;fline=common_data.cpp:29;EXECUTE:composite_initLoadingTime=109130; 2025-06-25T15:06:06.012028Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Index: tables 1 inserted {blob_bytes=0;raw_bytes=0;count=0;records=0} compacted {blob_bytes=0;raw_bytes=0;count=0;records=0} s-compacted {blob_bytes=22538992;raw_bytes=22128150;count=3;records=225200} inactive {blob_bytes=147791880;raw_bytes=143975050;count=221;records=1575200} evicted {blob_bytes=0;raw_bytes=0;count=0;records=0} at tablet 9437184 2025-06-25T15:06:06.012135Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:10283:11876];process=SwitchToWork;fline=columnshard.cpp:74;event=initialize_shard;step=SwitchToWork; 2025-06-25T15:06:06.012177Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:10283:11876];process=SwitchToWork;fline=columnshard.cpp:77;event=initialize_shard;step=SignalTabletActive; 2025-06-25T15:06:06.012230Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10283:11876];process=SwitchToWork;fline=columnshard_impl.cpp:1331;event=OnTieringModified;path_id=NO_VALUE_OPTIONAL; 2025-06-25T15:06:06.012263Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10283:11876];process=SwitchToWork;fline=column_engine_logs.cpp:471;event=OnTieringModified;new_count_tierings=0; 2025-06-25T15:06:06.012419Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:442;event=EnqueueBackgroundActivities;periodic=0; 2025-06-25T15:06:06.012476Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=9; 2025-06-25T15:06:06.012529Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750863591298;tx_id=18446744073709551615;;current_snapshot_ts=1750863887501; 2025-06-25T15:06:06.012561Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=9;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-25T15:06:06.012595Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:791;background=cleanup;skip_reason=no_changes; 2025-06-25T15:06:06.012624Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:820;background=cleanup;skip_reason=no_changes; 2025-06-25T15:06:06.012692Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:749;background=ttl;skip_reason=no_changes; 2025-06-25T15:06:06.017535Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10283:11876];ev=NActors::TEvents::TEvWakeup;fline=columnshard.cpp:250;event=TEvPrivate::TEvPeriodicWakeup::MANUAL;tablet_id=9437184; 2025-06-25T15:06:06.017878Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10283:11876];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;fline=columnshard.cpp:239;event=TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184; 2025-06-25T15:06:06.017910Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Send periodic stats. 2025-06-25T15:06:06.017930Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Disabled periodic stats at tablet 9437184 2025-06-25T15:06:06.017963Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10283:11876];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:442;event=EnqueueBackgroundActivities;periodic=0; 2025-06-25T15:06:06.018024Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10283:11876];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=9; 2025-06-25T15:06:06.018075Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10283:11876];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750863591298;tx_id=18446744073709551615;;current_snapshot_ts=1750863887501; 2025-06-25T15:06:06.018110Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10283:11876];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=9;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-25T15:06:06.018144Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10283:11876];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:791;background=cleanup;skip_reason=no_changes; 2025-06-25T15:06:06.018173Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10283:11876];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:820;background=cleanup;skip_reason=no_changes; 2025-06-25T15:06:06.018228Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10283:11876];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;queue=ttl;external_count=0;fline=granule.cpp:168;event=skip_actualization;waiting=1.000000s; 2025-06-25T15:06:06.018267Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10283:11876];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:749;background=ttl;skip_reason=no_changes; >> TMessageBusServerPersQueueGetReadSessionsInfoMetaRequestTest::FailsOnBadRootStatusInGetNodeRequest [GOOD] >> TMessageBusServerPersQueueGetReadSessionsInfoMetaRequestTest::FailesOnNotATopic >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::FailsOnBalancerDescribeResultFailureWhenTopicsAreGivenExplicitly [GOOD] >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::FailsOnEmptyTopicName ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest >> TraverseColumnShard::TraverseColumnTableRebootSaTabletBeforeReqDistribution [GOOD] Test command err: 2025-06-25T15:03:25.658392Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:419:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T15:03:25.658751Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T15:03:25.658851Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001fde/r3tmp/tmpEvqegG/pdisk_1.dat 2025-06-25T15:03:26.011516Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 26947, node 1 2025-06-25T15:03:26.236715Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:03:26.236757Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:03:26.236777Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:03:26.237190Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T15:03:26.238975Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:03:26.340351Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:03:26.340467Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:03:26.359145Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:2574 2025-06-25T15:03:26.896806Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-25T15:03:30.097919Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-25T15:03:30.132961Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:03:30.133092Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:03:30.175708Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T15:03:30.178942Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:03:30.398052Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:03:30.434269Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:30.434912Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:30.435803Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:30.435943Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:30.436205Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:30.436303Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:30.436435Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:30.436532Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:30.436643Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:30.631466Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:03:30.631558Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:03:30.652047Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:03:30.810450Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:03:30.861615Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-25T15:03:30.861747Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-25T15:03:30.901923Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-25T15:03:30.902120Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-25T15:03:30.902319Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-25T15:03:30.902372Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-25T15:03:30.902419Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-25T15:03:30.902488Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-25T15:03:30.902541Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-25T15:03:30.902588Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-25T15:03:30.903046Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-25T15:03:30.926350Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7949: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-25T15:03:30.926462Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7979: ConnectToSA(), pipe client id: [2:1793:2562], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-25T15:03:30.936062Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1808:2573] 2025-06-25T15:03:30.945567Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1849:2589] 2025-06-25T15:03:30.945861Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1849:2589], schemeshard id = 72075186224037897 2025-06-25T15:03:30.956363Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-25T15:03:30.974200Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-25T15:03:30.974263Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-25T15:03:30.974331Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-25T15:03:30.985570Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:03:30.992755Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-25T15:03:30.992875Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-25T15:03:31.188961Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-25T15:03:31.358412Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-25T15:03:31.467566Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-25T15:03:32.045156Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:03:32.336189Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2153:3026], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:03:32.336372Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:03:32.358756Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715659:0, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T15:03:32.544497Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2245:2806];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T15:03:32.544728Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2245:2806];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T15:03:32.545048Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2245:2806];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T15:03:32.545222Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2245:2806];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T15:03:32.545360Z node 2 :TX_COLUMNSHARD WARN: ... 25-06-25T15:06:06.175102Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8233:6090], server id = [2:8234:6091], tablet id = 72075186224037894 2025-06-25T15:06:06.175178Z node 2 :STATISTICS DEBUG: service_impl.cpp:1219: ConnectToSA(), pipe client id = [2:8348:6148] 2025-06-25T15:06:06.175217Z node 2 :STATISTICS DEBUG: service_impl.cpp:1248: SyncNode(), pipe client id = [2:8348:6148] 2025-06-25T15:06:06.175351Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7979: ConnectToSA(), pipe client id: [2:8349:6149], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-25T15:06:06.203859Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-25T15:06:06.203936Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-25T15:06:06.204365Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-25T15:06:06.204963Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-25T15:06:06.205204Z node 2 :STATISTICS DEBUG: tx_init.cpp:55: [72075186224037894] Loaded database: /Root/Database 2025-06-25T15:06:06.205238Z node 2 :STATISTICS DEBUG: tx_init.cpp:59: [72075186224037894] Loaded traversal start key 2025-06-25T15:06:06.205270Z node 2 :STATISTICS DEBUG: tx_init.cpp:64: [72075186224037894] Loaded traversal table owner id: 72075186224037897 2025-06-25T15:06:06.205298Z node 2 :STATISTICS DEBUG: tx_init.cpp:69: [72075186224037894] Loaded traversal table local path id: 4 2025-06-25T15:06:06.205323Z node 2 :STATISTICS DEBUG: tx_init.cpp:74: [72075186224037894] Loaded traversal start time: 1750863966144815 2025-06-25T15:06:06.205346Z node 2 :STATISTICS DEBUG: tx_init.cpp:79: [72075186224037894] Loaded traversal IsColumnTable: 1 2025-06-25T15:06:06.205405Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 1 2025-06-25T15:06:06.205446Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-25T15:06:06.205504Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 2 2025-06-25T15:06:06.205550Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-25T15:06:06.205592Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-25T15:06:06.205631Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-25T15:06:06.205726Z node 2 :STATISTICS DEBUG: tx_init.cpp:295: [72075186224037894] TTxInit::Complete. Start navigate. PathId [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-25T15:06:06.206220Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-25T15:06:06.206881Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-25T15:06:06.206933Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-25T15:06:06.207007Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-25T15:06:06.207862Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-25T15:06:06.207914Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-25T15:06:06.209024Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:58: [72075186224037894] TTxResponseTabletDistribution::Execute. Node count = 1 2025-06-25T15:06:06.271877Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:92: [72075186224037894] TTxResponseTabletDistribution::Complete 2025-06-25T15:06:06.272069Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 2, current Round: 0 2025-06-25T15:06:06.272703Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8396:6180], server id = [2:8400:6184], tablet id = 72075186224037899, status = OK 2025-06-25T15:06:06.273044Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8396:6180], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-25T15:06:06.274203Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8397:6181], server id = [2:8401:6185], tablet id = 72075186224037900, status = OK 2025-06-25T15:06:06.274267Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8397:6181], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-25T15:06:06.274384Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8398:6182], server id = [2:8402:6186], tablet id = 72075186224037901, status = OK 2025-06-25T15:06:06.274421Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8398:6182], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-25T15:06:06.275148Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8399:6183], server id = [2:8403:6187], tablet id = 72075186224037902, status = OK 2025-06-25T15:06:06.275197Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8399:6183], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-25T15:06:06.279753Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037899 2025-06-25T15:06:06.280305Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8396:6180], server id = [2:8400:6184], tablet id = 72075186224037899 2025-06-25T15:06:06.280362Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-25T15:06:06.280596Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037900 2025-06-25T15:06:06.281040Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8397:6181], server id = [2:8401:6185], tablet id = 72075186224037900 2025-06-25T15:06:06.281060Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-25T15:06:06.281226Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037901 2025-06-25T15:06:06.281480Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8398:6182], server id = [2:8402:6186], tablet id = 72075186224037901 2025-06-25T15:06:06.281496Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-25T15:06:06.281530Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037902 2025-06-25T15:06:06.281552Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-25T15:06:06.281706Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:27: [72075186224037894] TTxAggregateStatisticsResponse::Execute 2025-06-25T15:06:06.281822Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:118: [72075186224037894] TTxAggregateStatisticsResponse::Complete 2025-06-25T15:06:06.281980Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-25T15:06:06.283828Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8399:6183], server id = [2:8403:6187], tablet id = 72075186224037902 2025-06-25T15:06:06.283849Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-25T15:06:06.284259Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-06-25T15:06:06.309450Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [2:8432:6212]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T15:06:06.309627Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-25T15:06:06.309663Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 2, ReplyToActorId = [2:8432:6212], StatRequests.size() = 1 2025-06-25T15:06:06.394539Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=OWM4YmUxYi00M2QyMzYxZi03NGYwNThhZS0zNDhiNDRlMw==, TxId: 2025-06-25T15:06:06.394603Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=OWM4YmUxYi00M2QyMzYxZi03NGYwNThhZS0zNDhiNDRlMw==, TxId: 2025-06-25T15:06:06.395030Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-25T15:06:06.417715Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:8447:6218] 2025-06-25T15:06:06.417851Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8348:6148], server id = [2:8447:6218], tablet id = 72075186224037894, status = OK 2025-06-25T15:06:06.417952Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:8448:6219] 2025-06-25T15:06:06.418014Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:133: [72075186224037894] EvConnectNode, pipe server id = [2:8447:6218], node id = 2, have schemeshards count = 1, need schemeshards count = 0 2025-06-25T15:06:06.418100Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:8448:6219], schemeshard id = 72075186224037897 2025-06-25T15:06:06.430498Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-25T15:06:06.430567Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-25T15:06:06.506502Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 3 ], ReplyToActorId[ [2:8451:6222]], StatType[ 2 ], StatRequestsCount[ 1 ] 2025-06-25T15:06:06.506918Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] 2025-06-25T15:06:06.506992Z node 2 :STATISTICS DEBUG: service_impl.cpp:812: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] resolve DatabasePath[ [OwnerId: 72057594046644480, LocalPathId: 2] ] 2025-06-25T15:06:06.510899Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] 2025-06-25T15:06:06.510962Z node 2 :STATISTICS DEBUG: service_impl.cpp:715: [TStatService::QueryStatistics] RequestId[ 3 ], Database[ Root/Database ], TablePath[ /Root/Database/.metadata/_statistics ] 2025-06-25T15:06:06.511013Z node 2 :STATISTICS DEBUG: service_impl.cpp:656: [TStatService::LoadStatistics] QueryId[ 1 ], PathId[ [OwnerId: 72075186224037897, LocalPathId: 4] ], StatType[ 2 ], ColumnTag[ 1 ] 2025-06-25T15:06:06.515456Z node 2 :STATISTICS DEBUG: service_impl.cpp:1152: TEvLoadStatisticsQueryResponse, request id = 3 >>> failedEstimatesCount = 0 >> TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest::SuccessfullyPassesResponsesFromTablets [GOOD] >> TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest::HandlesPipeDisconnection_DisconnectionComesSecond >> TMessageBusServerPersQueueGetReadSessionsInfoMetaRequestTest::FailesOnNotATopic [GOOD] >> TMessageBusServerPersQueueGetReadSessionsInfoMetaRequestTest::FailsOnBalancerDescribeResultFailureWhenTopicsAreGivenExplicitly >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::FailsOnFailedGetAllTopicsRequest >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::FailsOnFailedGetAllTopicsRequest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/server/ut/unittest >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::FailsOnDuplicatedPartition [GOOD] Test command err: Assert failed: Check response: { Status: 128 ErrorReason: "path \'Root/PQ\' has unknown/invalid root prefix \'Root\', Marker# PQ14" ErrorCode: UNKNOWN_TOPIC } Assert failed: Check response: { Status: 128 ErrorReason: "topic \'Root/PQ\' describe error, Status# LookupError, Marker# PQ1" ErrorCode: ERROR } Assert failed: Check response: { Status: 128 ErrorReason: "multiple TopicRequest for topic \'rt3.dc1--topic1\'" ErrorCode: BAD_REQUEST } Assert failed: Check response: { Status: 128 ErrorReason: "multiple partition 2 in TopicRequest for topic \'rt3.dc1--topic2\'" ErrorCode: BAD_REQUEST } >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::FailsOnEmptyTopicName [GOOD] >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::FailsOnDuplicatedTopicName >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::FailsOnZeroBalancerTabletIdInGetNodeRequest >> TMessageBusServerPersQueueGetReadSessionsInfoMetaRequestTest::HandlesPipeDisconnection_DisconnectionComesFirst >> TMessageBusServerPersQueueGetReadSessionsInfoMetaRequestTest::FailsOnBalancerDescribeResultFailureWhenTopicsAreGivenExplicitly [GOOD] >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::FailsOnDuplicatedTopicName [GOOD] >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::FailsOnDuplicatedPartition >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::FailsOnFailedGetAllTopicsRequest [GOOD] >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::FailsOnNotOkStatusInGetNodeRequest >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::FailsOnFailedGetAllTopicsRequest [GOOD] >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::FailsOnNotOkStatusInGetNodeRequest >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::FailsOnZeroBalancerTabletIdInGetNodeRequest [GOOD] >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::HandlesPipeDisconnection_DisconnectionComesFirst >> TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest::HandlesPipeDisconnection_DisconnectionComesSecond [GOOD] >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::FailesOnNotATopic >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::FailsOnDuplicatedPartition [GOOD] >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::HandlesTimeout >> TMessageBusServerPersQueueGetReadSessionsInfoMetaRequestTest::HandlesPipeDisconnection_DisconnectionComesFirst [GOOD] >> TMessageBusServerPersQueueGetReadSessionsInfoMetaRequestTest::HandlesPipeDisconnection_DisconnectionComesSecond ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/server/ut/unittest >> TMessageBusServerPersQueueGetReadSessionsInfoMetaRequestTest::FailsOnBalancerDescribeResultFailureWhenTopicsAreGivenExplicitly [GOOD] Test command err: Assert failed: Check response: { Status: 128 ErrorReason: "path \'Root/PQ\' has unknown/invalid root prefix \'Root\', Marker# PQ14" ErrorCode: UNKNOWN_TOPIC } Assert failed: Check response: { Status: 128 ErrorReason: "the following topics are not created: rt3.dc1--topic2, Marker# PQ95" ErrorCode: UNKNOWN_TOPIC } Assert failed: Check response: { Status: 128 ErrorReason: "topic \'Root/PQ\' describe error, Status# LookupError, Marker# PQ1" ErrorCode: ERROR } >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::FailsOnNotOkStatusInGetNodeRequest [GOOD] >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::FailsOnNoBalancerInGetNodeRequest >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::FailsOnNotOkStatusInGetNodeRequest [GOOD] >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::FailsOnNoBalancerInGetNodeRequest >> TMessageBusServerPersQueueGetReadSessionsInfoMetaRequestTest::FailsOnFailedGetAllTopicsRequest >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::FailesOnNotATopic [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/server/ut/unittest >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::FailsOnDuplicatedPartition [GOOD] Test command err: Assert failed: Check response: { Status: 128 ErrorReason: "topic \'Root/PQ\' describe error, Status# LookupError, Marker# PQ1" ErrorCode: ERROR } Assert failed: Check response: { Status: 128 ErrorReason: "TopicRequest must have Topic field." ErrorCode: BAD_REQUEST } Assert failed: Check response: { Status: 128 ErrorReason: "multiple TopicRequest for topic \'rt3.dc1--topic1\'" ErrorCode: BAD_REQUEST } Assert failed: Check response: { Status: 128 ErrorReason: "multiple partition 2 in TopicRequest for topic \'rt3.dc1--topic2\'" ErrorCode: BAD_REQUEST } >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::HandlesPipeDisconnection_DisconnectionComesFirst [GOOD] >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::HandlesPipeDisconnection_AnswerDoesNotArrive >> TMessageBusServerPersQueueGetReadSessionsInfoMetaRequestTest::HandlesPipeDisconnection_DisconnectionComesSecond [GOOD] >> TMessageBusServerPersQueueGetReadSessionsInfoMetaRequestTest::HandlesPipeDisconnection_AnswerDoesNotArrive >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::HandlesTimeout >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::FailsOnNoBalancerInGetNodeRequest [GOOD] >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::FailsOnNoBalancerInGetNodeRequest [GOOD] >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::FailsOnEmptyTopicName >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::HandlesTimeout [GOOD] >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::SuccessfullyPassesResponsesFromTablets >> TMessageBusServerPersQueueGetReadSessionsInfoMetaRequestTest::FailsOnFailedGetAllTopicsRequest [GOOD] >> TMessageBusServerPersQueueGetReadSessionsInfoMetaRequestTest::FailsOnNoBalancerInGetNodeRequest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/server/ut/unittest >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::FailesOnNotATopic [GOOD] Test command err: Assert failed: Check response: { Status: 130 ErrorReason: "Timeout while waiting for response, may be just slow, Marker# PQ16" ErrorCode: ERROR } 2025-06-25T15:06:08.242041Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3114: [PQ: 72057594037928037] Handle TEvInterconnect::TEvNodeInfo 2025-06-25T15:06:08.246807Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3146: [PQ: 72057594037928037] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-25T15:06:08.247149Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:752: [PQ: 72057594037928037] doesn't have tx info 2025-06-25T15:06:08.247205Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:764: [PQ: 72057594037928037] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-25T15:06:08.247246Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:985: [PQ: 72057594037928037] no config, start with empty partitions and default config 2025-06-25T15:06:08.247285Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:4949: [PQ: 72057594037928037] Txs.size=0, PlannedTxs.size=0 2025-06-25T15:06:08.247322Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037928037] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T15:06:08.247397Z node 2 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037928037] doesn't have tx writes info 2025-06-25T15:06:08.248030Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037928037] server connected, pipe [2:265:2255], now have 1 active actors on pipe 2025-06-25T15:06:08.248186Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1470: [PQ: 72057594037928037] Handle TEvPersQueue::TEvUpdateConfig 2025-06-25T15:06:08.269915Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1656: [PQ: 72057594037928037] Config update version 1(current 0) received from actor [2:103:2136] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic1" Version: 1 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-06-25T15:06:08.273027Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:591: [PQ: 72057594037928037] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic1" Version: 1 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-06-25T15:06:08.273227Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037928037] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T15:06:08.274151Z node 2 :PERSQUEUE INFO: pq_impl.cpp:1497: [PQ: 72057594037928037] Config applied version 1 actor [2:103:2136] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic1" Version: 1 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-06-25T15:06:08.274299Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic1:0:Initializer] Start initializing step TInitConfigStep 2025-06-25T15:06:08.274826Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic1:0:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-25T15:06:08.275176Z node 2 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037928037, Partition: 0, State: StateInit] bootstrapping 0 [2:273:2261] 2025-06-25T15:06:08.277636Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic1:0:Initializer] Initializing completed. 2025-06-25T15:06:08.277727Z node 2 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037928037, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--topic1' partition 0 generation 2 [2:273:2261] 2025-06-25T15:06:08.277795Z node 2 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037928037, Partition: 0, State: StateInit] SYNC INIT topic rt3.dc1--topic1 partitition 0 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-25T15:06:08.277866Z node 2 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037928037, Partition: 0, State: StateIdle] Process pending events. Count 0 2025-06-25T15:06:08.278321Z node 2 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037928037, Partition: 0, State: StateIdle] no data for compaction 2025-06-25T15:06:08.278984Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037928037] server connected, pipe [2:276:2263], now have 1 active actors on pipe 2025-06-25T15:06:08.320828Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3114: [PQ: 72057594037928139] Handle TEvInterconnect::TEvNodeInfo 2025-06-25T15:06:08.323968Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3146: [PQ: 72057594037928139] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-25T15:06:08.324187Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:752: [PQ: 72057594037928139] doesn't have tx info 2025-06-25T15:06:08.324219Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:764: [PQ: 72057594037928139] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-25T15:06:08.324248Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:985: [PQ: 72057594037928139] no config, start with empty partitions and default config 2025-06-25T15:06:08.324285Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:4949: [PQ: 72057594037928139] Txs.size=0, PlannedTxs.size=0 2025-06-25T15:06:08.324346Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037928139] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T15:06:08.324396Z node 2 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037928139] doesn't have tx writes info 2025-06-25T15:06:08.324889Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037928139] server connected, pipe [2:409:2362], now have 1 active actors on pipe 2025-06-25T15:06:08.324981Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1470: [PQ: 72057594037928139] Handle TEvPersQueue::TEvUpdateConfig 2025-06-25T15:06:08.325115Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1656: [PQ: 72057594037928139] Config update version 2(current 0) received from actor [2:103:2136] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 2 TopicName: "rt3.dc1--topic2" Version: 2 Partitions { PartitionId: 2 } AllPartitions { PartitionId: 2 } 2025-06-25T15:06:08.326647Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:591: [PQ: 72057594037928139] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 2 TopicName: "rt3.dc1--topic2" Version: 2 Partitions { PartitionId: 2 } AllPartitions { PartitionId: 2 } 2025-06-25T15:06:08.326786Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037928139] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T15:06:08.327406Z node 2 :PERSQUEUE INFO: pq_impl.cpp:1497: [PQ: 72057594037928139] Config applied version 2 actor [2:103:2136] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 2 TopicName: "rt3.dc1--topic2" Version: 2 Partitions { PartitionId: 2 } AllPartitions { PartitionId: 2 } 2025-06-25T15:06:08.327489Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitConfigStep 2025-06-25T15:06:08.327718Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-25T15:06:08.327922Z node 2 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037928139, Partition: 2, State: StateInit] bootstrapping 2 [2:417:2368] 2025-06-25T15:06:08.329318Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic2:2:Initializer] Initializing completed. 2025-06-25T15:06:08.329369Z node 2 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037928139, Partition: 2, State: StateInit] init complete for topic 'rt3.dc1--topic2' partition 2 generation 2 [2:417:2368] 2025-06-25T15:06:08.329415Z node 2 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037928139, Partition: 2, State: StateInit] SYNC INIT topic rt3.dc1--topic2 partitition 2 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-25T15:06:08.329453Z node 2 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037928139, Partition: 2, State: StateIdle] Process pending events. Count 0 2025-06-25T15:06:08.329630Z node 2 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037928139, Partition: 2, State: StateIdle] no data for compaction 2025-06-25T15:06:08.330025Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037928139] server connected, pipe [2:420:2370], now have 1 active actors on pipe 2025-06-25T15:06:08.330872Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037928037] server connected, pipe [2:426:2373], now have 1 active actors on pipe 2025-06-25T15:06:08.331054Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037928139] server connected, pipe [2:428:2374], now have 1 active actors on pipe 2025-06-25T15:06:08.331155Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72057594037928037] server disconnected, pipe [2:426:2373] destroyed 2025-06-25T15:06:08.331467Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72057594037928139] server disconnected, pipe [2:428:2374] destroyed 2025-06-25T15:06:08.820642Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:3114: [PQ: 72057594037928037] Handle TEvInterconnect::TEvNodeInfo 2025-06-25T15:06:08.824550Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:3146: [PQ: 72057594037928037] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-25T15:06:08.824785Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:752: [PQ: 72057594037928037] doesn't have tx info 2025-06-25T15:06:08.824819Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:764: [PQ: 72057594037928037] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-25T15:06:08.824851Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:985: [PQ: 72057594037928037] no config, start with empty partitions and default config 2025-06-25T15:06:08.824880Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4949: [PQ: 72057594037928037] Txs.size=0, PlannedTxs.size=0 2025-06-25T15:06:08.824914Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037928037] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T15:06:08.824958Z node 3 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037928037] doesn't have tx writes info 2025-06-25T15:06:08.825566Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037928037] server connected, pipe [3:265:2255], now have 1 active actors on pipe 2025-06-25T15:06:08.825641Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:1470: [PQ: 72057594037928037] Handle TEvPersQueue::TEvUpdateConfig 2025-06-25T15:06:08.825795Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:1656: [PQ: 72057594037928037] Config update version 3(current 0) received from actor [3:103:2136] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic1" Version: 3 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-06-25T15:06:08.827349Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:591: [PQ: 72057594037928037] Apply new con ... titionId: 1 } AllPartitions { PartitionId: 1 } 2025-06-25T15:06:08.890110Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037928138] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T15:06:08.890823Z node 3 :PERSQUEUE INFO: pq_impl.cpp:1497: [PQ: 72057594037928138] Config applied version 5 actor [3:103:2136] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 1 TopicName: "rt3.dc1--topic2" Version: 5 Partitions { PartitionId: 1 } AllPartitions { PartitionId: 1 } 2025-06-25T15:06:08.890964Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:1:Initializer] Start initializing step TInitConfigStep 2025-06-25T15:06:08.891289Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:1:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-25T15:06:08.891474Z node 3 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037928138, Partition: 1, State: StateInit] bootstrapping 1 [3:479:2415] 2025-06-25T15:06:08.892975Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic2:1:Initializer] Initializing completed. 2025-06-25T15:06:08.893024Z node 3 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037928138, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--topic2' partition 1 generation 2 [3:479:2415] 2025-06-25T15:06:08.893070Z node 3 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037928138, Partition: 1, State: StateInit] SYNC INIT topic rt3.dc1--topic2 partitition 1 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-25T15:06:08.893107Z node 3 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037928138, Partition: 1, State: StateIdle] Process pending events. Count 0 2025-06-25T15:06:08.893285Z node 3 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037928138, Partition: 1, State: StateIdle] no data for compaction 2025-06-25T15:06:08.893729Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037928138] server connected, pipe [3:482:2417], now have 1 active actors on pipe 2025-06-25T15:06:08.905162Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:3114: [PQ: 72057594037928139] Handle TEvInterconnect::TEvNodeInfo 2025-06-25T15:06:08.907473Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:3146: [PQ: 72057594037928139] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-25T15:06:08.907654Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:752: [PQ: 72057594037928139] doesn't have tx info 2025-06-25T15:06:08.907689Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:764: [PQ: 72057594037928139] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-25T15:06:08.907722Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:985: [PQ: 72057594037928139] no config, start with empty partitions and default config 2025-06-25T15:06:08.907754Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4949: [PQ: 72057594037928139] Txs.size=0, PlannedTxs.size=0 2025-06-25T15:06:08.907795Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037928139] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T15:06:08.907835Z node 3 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037928139] doesn't have tx writes info 2025-06-25T15:06:08.908279Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037928139] server connected, pipe [3:531:2454], now have 1 active actors on pipe 2025-06-25T15:06:08.908387Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:1470: [PQ: 72057594037928139] Handle TEvPersQueue::TEvUpdateConfig 2025-06-25T15:06:08.908512Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:1656: [PQ: 72057594037928139] Config update version 6(current 0) received from actor [3:103:2136] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 2 TopicName: "rt3.dc1--topic2" Version: 6 Partitions { PartitionId: 2 } AllPartitions { PartitionId: 2 } 2025-06-25T15:06:08.909965Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:591: [PQ: 72057594037928139] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 2 TopicName: "rt3.dc1--topic2" Version: 6 Partitions { PartitionId: 2 } AllPartitions { PartitionId: 2 } 2025-06-25T15:06:08.910053Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037928139] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T15:06:08.910604Z node 3 :PERSQUEUE INFO: pq_impl.cpp:1497: [PQ: 72057594037928139] Config applied version 6 actor [3:103:2136] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 2 TopicName: "rt3.dc1--topic2" Version: 6 Partitions { PartitionId: 2 } AllPartitions { PartitionId: 2 } 2025-06-25T15:06:08.910684Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitConfigStep 2025-06-25T15:06:08.910884Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-25T15:06:08.911036Z node 3 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037928139, Partition: 2, State: StateInit] bootstrapping 2 [3:539:2460] 2025-06-25T15:06:08.912427Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic2:2:Initializer] Initializing completed. 2025-06-25T15:06:08.912475Z node 3 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037928139, Partition: 2, State: StateInit] init complete for topic 'rt3.dc1--topic2' partition 2 generation 2 [3:539:2460] 2025-06-25T15:06:08.912515Z node 3 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037928139, Partition: 2, State: StateInit] SYNC INIT topic rt3.dc1--topic2 partitition 2 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-25T15:06:08.912547Z node 3 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037928139, Partition: 2, State: StateIdle] Process pending events. Count 0 2025-06-25T15:06:08.912767Z node 3 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037928139, Partition: 2, State: StateIdle] no data for compaction 2025-06-25T15:06:08.913143Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037928139] server connected, pipe [3:542:2462], now have 1 active actors on pipe 2025-06-25T15:06:08.914046Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037928037] server connected, pipe [3:548:2465], now have 1 active actors on pipe 2025-06-25T15:06:08.914160Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037928138] server connected, pipe [3:549:2466], now have 1 active actors on pipe 2025-06-25T15:06:08.914217Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037928139] server connected, pipe [3:550:2466], now have 1 active actors on pipe 2025-06-25T15:06:08.924948Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037928139] server connected, pipe [3:555:2470], now have 1 active actors on pipe 2025-06-25T15:06:08.941273Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:3114: [PQ: 72057594037928139] Handle TEvInterconnect::TEvNodeInfo 2025-06-25T15:06:08.943022Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:3146: [PQ: 72057594037928139] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-25T15:06:08.943251Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:752: [PQ: 72057594037928139] doesn't have tx info 2025-06-25T15:06:08.943287Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:764: [PQ: 72057594037928139] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-25T15:06:08.943386Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4949: [PQ: 72057594037928139] Txs.size=0, PlannedTxs.size=0 2025-06-25T15:06:08.944012Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037928139] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T15:06:08.944049Z node 3 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037928139] doesn't have tx writes info 2025-06-25T15:06:08.944130Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitConfigStep 2025-06-25T15:06:08.944388Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-25T15:06:08.944558Z node 3 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037928139, Partition: 2, State: StateInit] bootstrapping 2 [3:612:2515] 2025-06-25T15:06:08.945724Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitDiskStatusStep 2025-06-25T15:06:08.946482Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitMetaStep 2025-06-25T15:06:08.946645Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitInfoRangeStep 2025-06-25T15:06:08.946857Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitDataRangeStep 2025-06-25T15:06:08.947101Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitDataStep 2025-06-25T15:06:08.947133Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitEndWriteTimestampStep 2025-06-25T15:06:08.947159Z node 3 :PERSQUEUE INFO: partition_init.cpp:895: [rt3.dc1--topic2:2:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-25T15:06:08.947188Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic2:2:Initializer] Initializing completed. 2025-06-25T15:06:08.947250Z node 3 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037928139, Partition: 2, State: StateInit] init complete for topic 'rt3.dc1--topic2' partition 2 generation 3 [3:612:2515] 2025-06-25T15:06:08.947301Z node 3 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037928139, Partition: 2, State: StateInit] SYNC INIT topic rt3.dc1--topic2 partitition 2 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-25T15:06:08.947334Z node 3 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037928139, Partition: 2, State: StateIdle] Process pending events. Count 0 2025-06-25T15:06:08.947566Z node 3 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037928139, Partition: 2, State: StateIdle] no data for compaction 2025-06-25T15:06:08.948013Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72057594037928138] server disconnected, pipe [3:549:2466] destroyed 2025-06-25T15:06:08.948067Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72057594037928037] server disconnected, pipe [3:548:2465] destroyed RESPONSE Status: 1 ErrorCode: OK MetaResponse { CmdGetPartitionLocationsResult { TopicResult { Topic: "rt3.dc1--topic1" PartitionLocation { Partition: 0 Host: "::1" HostId: 3 ErrorCode: OK } ErrorCode: OK } TopicResult { Topic: "rt3.dc1--topic2" PartitionLocation { Partition: 1 Host: "::1" HostId: 3 ErrorCode: OK } PartitionLocation { Partition: 2 Host: "::1" HostId: 3 ErrorCode: OK } ErrorCode: OK } } } Assert failed: Check response: { Status: 128 ErrorReason: "the following topics are not created: rt3.dc1--topic2, Marker# PQ95" ErrorCode: UNKNOWN_TOPIC } >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::FailsOnEmptyTopicName [GOOD] >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::SuccessfullyPassesResponsesFromTablets [GOOD] >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::HandlesPipeDisconnection_DisconnectionComesSecond >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::HandlesTimeout [GOOD] >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::SuccessfullyPassesResponsesFromTablets >> TMessageBusServerPersQueueGetReadSessionsInfoMetaRequestTest::FailsOnNoBalancerInGetNodeRequest [GOOD] >> TMessageBusServerPersQueueGetReadSessionsInfoMetaRequestTest::FailsOnEmptyTopicName ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/server/ut/unittest >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::FailsOnNoBalancerInGetNodeRequest [GOOD] Test command err: Assert failed: Check response: { Status: 128 ErrorReason: "no path \'/Root/PQ/\', Marker# PQ17" ErrorCode: UNKNOWN_TOPIC } Assert failed: Check response: { Status: 128 ErrorReason: "no path \'Root/PQ\', Marker# PQ150" ErrorCode: UNKNOWN_TOPIC } Assert failed: Check response: { Status: 128 ErrorReason: "topic \'rt3.dc1--topic1\' has no balancer, Marker# PQ193" ErrorCode: UNKNOWN_TOPIC } >> TMessageBusServerPersQueueGetReadSessionsInfoMetaRequestTest::HandlesPipeDisconnection_AnswerDoesNotArrive [GOOD] >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::HandlesPipeDisconnection_AnswerDoesNotArrive [GOOD] >> TraverseColumnShard::TraverseServerlessColumnTable [GOOD] >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::SuccessfullyPassesResponsesFromTablets [GOOD] >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::FailsOnBadRootStatusInGetNodeRequest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/server/ut/unittest >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::FailsOnEmptyTopicName [GOOD] Test command err: Assert failed: Check response: { Status: 128 ErrorReason: "no path \'/Root/PQ/\', Marker# PQ17" ErrorCode: UNKNOWN_TOPIC } Assert failed: Check response: { Status: 128 ErrorReason: "no path \'Root/PQ\', Marker# PQ150" ErrorCode: UNKNOWN_TOPIC } Assert failed: Check response: { Status: 128 ErrorReason: "topic \'rt3.dc1--topic1\' has no balancer, Marker# PQ193" ErrorCode: UNKNOWN_TOPIC } Assert failed: Check response: { Status: 128 ErrorReason: "TopicRequest must have Topic field." ErrorCode: BAD_REQUEST } >> TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest::FailsOnNotOkStatusInGetNodeRequest >> DataStreams::TestListShards1Shard [GOOD] >> TMessageBusServerPersQueueGetReadSessionsInfoMetaRequestTest::FailsOnEmptyTopicName [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/server/ut/unittest >> TMessageBusServerPersQueueGetReadSessionsInfoMetaRequestTest::HandlesPipeDisconnection_AnswerDoesNotArrive [GOOD] Test command err: 2025-06-25T15:06:09.045223Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3114: [PQ: 72057594037928037] Handle TEvInterconnect::TEvNodeInfo 2025-06-25T15:06:09.047779Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3146: [PQ: 72057594037928037] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-25T15:06:09.047999Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:752: [PQ: 72057594037928037] doesn't have tx info 2025-06-25T15:06:09.048043Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:764: [PQ: 72057594037928037] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-25T15:06:09.048071Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:985: [PQ: 72057594037928037] no config, start with empty partitions and default config 2025-06-25T15:06:09.048104Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4949: [PQ: 72057594037928037] Txs.size=0, PlannedTxs.size=0 2025-06-25T15:06:09.048133Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037928037] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T15:06:09.048168Z node 1 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037928037] doesn't have tx writes info 2025-06-25T15:06:09.048634Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037928037] server connected, pipe [1:266:2256], now have 1 active actors on pipe 2025-06-25T15:06:09.048729Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:1470: [PQ: 72057594037928037] Handle TEvPersQueue::TEvUpdateConfig 2025-06-25T15:06:09.058920Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:1656: [PQ: 72057594037928037] Config update version 1(current 0) received from actor [1:103:2136] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic1" Version: 1 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-06-25T15:06:09.061040Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:591: [PQ: 72057594037928037] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic1" Version: 1 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-06-25T15:06:09.061154Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037928037] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T15:06:09.061860Z node 1 :PERSQUEUE INFO: pq_impl.cpp:1497: [PQ: 72057594037928037] Config applied version 1 actor [1:103:2136] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic1" Version: 1 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-06-25T15:06:09.061948Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic1:0:Initializer] Start initializing step TInitConfigStep 2025-06-25T15:06:09.062348Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic1:0:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-25T15:06:09.062640Z node 1 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037928037, Partition: 0, State: StateInit] bootstrapping 0 [1:274:2262] 2025-06-25T15:06:09.064252Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic1:0:Initializer] Initializing completed. 2025-06-25T15:06:09.064326Z node 1 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037928037, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--topic1' partition 0 generation 2 [1:274:2262] 2025-06-25T15:06:09.064366Z node 1 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037928037, Partition: 0, State: StateInit] SYNC INIT topic rt3.dc1--topic1 partitition 0 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-25T15:06:09.064398Z node 1 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037928037, Partition: 0, State: StateIdle] Process pending events. Count 0 2025-06-25T15:06:09.064579Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037928037, Partition: 0, State: StateIdle] no data for compaction 2025-06-25T15:06:09.064966Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037928037] server connected, pipe [1:277:2264], now have 1 active actors on pipe 2025-06-25T15:06:09.100083Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3114: [PQ: 72057594037928137] Handle TEvInterconnect::TEvNodeInfo 2025-06-25T15:06:09.103009Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3146: [PQ: 72057594037928137] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-25T15:06:09.103259Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:752: [PQ: 72057594037928137] doesn't have tx info 2025-06-25T15:06:09.103290Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:764: [PQ: 72057594037928137] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-25T15:06:09.103315Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:985: [PQ: 72057594037928137] no config, start with empty partitions and default config 2025-06-25T15:06:09.103349Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4949: [PQ: 72057594037928137] Txs.size=0, PlannedTxs.size=0 2025-06-25T15:06:09.103382Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037928137] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T15:06:09.103426Z node 1 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037928137] doesn't have tx writes info 2025-06-25T15:06:09.103886Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037928137] server connected, pipe [1:412:2365], now have 1 active actors on pipe 2025-06-25T15:06:09.103968Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:1470: [PQ: 72057594037928137] Handle TEvPersQueue::TEvUpdateConfig 2025-06-25T15:06:09.104099Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:1656: [PQ: 72057594037928137] Config update version 2(current 0) received from actor [1:103:2136] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic2" Version: 2 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-06-25T15:06:09.105661Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:591: [PQ: 72057594037928137] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic2" Version: 2 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-06-25T15:06:09.105748Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037928137] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T15:06:09.106341Z node 1 :PERSQUEUE INFO: pq_impl.cpp:1497: [PQ: 72057594037928137] Config applied version 2 actor [1:103:2136] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic2" Version: 2 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-06-25T15:06:09.106411Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:0:Initializer] Start initializing step TInitConfigStep 2025-06-25T15:06:09.106644Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:0:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-25T15:06:09.106797Z node 1 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037928137, Partition: 0, State: StateInit] bootstrapping 0 [1:420:2371] 2025-06-25T15:06:09.108035Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic2:0:Initializer] Initializing completed. 2025-06-25T15:06:09.108074Z node 1 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037928137, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--topic2' partition 0 generation 2 [1:420:2371] 2025-06-25T15:06:09.108120Z node 1 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037928137, Partition: 0, State: StateInit] SYNC INIT topic rt3.dc1--topic2 partitition 0 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-25T15:06:09.108185Z node 1 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037928137, Partition: 0, State: StateIdle] Process pending events. Count 0 2025-06-25T15:06:09.108393Z node 1 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037928137, Partition: 0, State: StateIdle] no data for compaction 2025-06-25T15:06:09.108872Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037928137] server connected, pipe [1:423:2373], now have 1 active actors on pipe 2025-06-25T15:06:09.121722Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3114: [PQ: 72057594037928138] Handle TEvInterconnect::TEvNodeInfo 2025-06-25T15:06:09.124583Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3146: [PQ: 72057594037928138] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-25T15:06:09.124876Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:752: [PQ: 72057594037928138] doesn't have tx info 2025-06-25T15:06:09.124916Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:764: [PQ: 72057594037928138] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-25T15:06:09.124949Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:985: [PQ: 72057594037928138] no config, start with empty partitions and default config 2025-06-25T15:06:09.124993Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4949: [PQ: 72057594037928138] Txs.size=0, PlannedTxs.size=0 2025-06-25T15:06:09.125032Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037928138] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T15:06:09.125081Z node 1 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037928138] doesn't have tx writes info 2025-06-25T15:06:09.125581Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037928138] server connected, pipe [1:472:2410], now have 1 active actors on pipe 2025-06-25T15:06:09.125682Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:1470: [PQ: 72057594037928138] Handle TEvPersQueue::TEvUpdateConfig 2025-06-25T15:06:09.125805Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:1656: [PQ: 72057594037928138] Config update version 3(current 0) received from actor [1:103:2136] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 1 TopicName: "rt3.dc1--topic2" Version: 3 Partitions { PartitionId: 1 } AllPartitions { PartitionId: 1 } 2025-06-25T15:06:09.127671Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:591: [PQ: 72057594037928138] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 1 TopicName: "rt3.dc1--topic2" Version: 3 Partitions { PartitionId: 1 } AllPartitions { PartitionId: 1 } 2025-06-25T15:06:09.127782Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037928138] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T15:06:09.128688Z node 1 :PERSQUEUE INFO: pq_impl.cpp:1497: [PQ: 72057594037928138] Config applied version 3 actor [1:103:2136] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 Lifetime ... :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:1:Initializer] Start initializing step TInitConfigStep 2025-06-25T15:06:10.024268Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:1:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-25T15:06:10.024430Z node 3 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037928138, Partition: 1, State: StateInit] bootstrapping 1 [3:480:2416] 2025-06-25T15:06:10.025747Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic2:1:Initializer] Initializing completed. 2025-06-25T15:06:10.025791Z node 3 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037928138, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--topic2' partition 1 generation 2 [3:480:2416] 2025-06-25T15:06:10.025834Z node 3 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037928138, Partition: 1, State: StateInit] SYNC INIT topic rt3.dc1--topic2 partitition 1 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-25T15:06:10.025866Z node 3 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037928138, Partition: 1, State: StateIdle] Process pending events. Count 0 2025-06-25T15:06:10.026031Z node 3 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037928138, Partition: 1, State: StateIdle] no data for compaction 2025-06-25T15:06:10.026349Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037928138] server connected, pipe [3:483:2418], now have 1 active actors on pipe 2025-06-25T15:06:10.036722Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:3114: [PQ: 72057594037928139] Handle TEvInterconnect::TEvNodeInfo 2025-06-25T15:06:10.039722Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:3146: [PQ: 72057594037928139] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-25T15:06:10.039955Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:752: [PQ: 72057594037928139] doesn't have tx info 2025-06-25T15:06:10.039999Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:764: [PQ: 72057594037928139] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-25T15:06:10.040035Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:985: [PQ: 72057594037928139] no config, start with empty partitions and default config 2025-06-25T15:06:10.040072Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4949: [PQ: 72057594037928139] Txs.size=0, PlannedTxs.size=0 2025-06-25T15:06:10.040112Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037928139] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T15:06:10.040161Z node 3 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037928139] doesn't have tx writes info 2025-06-25T15:06:10.040690Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037928139] server connected, pipe [3:532:2455], now have 1 active actors on pipe 2025-06-25T15:06:10.040775Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:1470: [PQ: 72057594037928139] Handle TEvPersQueue::TEvUpdateConfig 2025-06-25T15:06:10.040934Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:1656: [PQ: 72057594037928139] Config update version 12(current 0) received from actor [3:103:2136] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 2 TopicName: "rt3.dc1--topic2" Version: 12 Partitions { PartitionId: 2 } AllPartitions { PartitionId: 2 } 2025-06-25T15:06:10.042523Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:591: [PQ: 72057594037928139] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 2 TopicName: "rt3.dc1--topic2" Version: 12 Partitions { PartitionId: 2 } AllPartitions { PartitionId: 2 } 2025-06-25T15:06:10.042628Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037928139] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T15:06:10.043084Z node 3 :PERSQUEUE INFO: pq_impl.cpp:1497: [PQ: 72057594037928139] Config applied version 12 actor [3:103:2136] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 2 TopicName: "rt3.dc1--topic2" Version: 12 Partitions { PartitionId: 2 } AllPartitions { PartitionId: 2 } 2025-06-25T15:06:10.043179Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitConfigStep 2025-06-25T15:06:10.043427Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-25T15:06:10.043578Z node 3 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037928139, Partition: 2, State: StateInit] bootstrapping 2 [3:540:2461] 2025-06-25T15:06:10.045260Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic2:2:Initializer] Initializing completed. 2025-06-25T15:06:10.045315Z node 3 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037928139, Partition: 2, State: StateInit] init complete for topic 'rt3.dc1--topic2' partition 2 generation 2 [3:540:2461] 2025-06-25T15:06:10.045364Z node 3 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037928139, Partition: 2, State: StateInit] SYNC INIT topic rt3.dc1--topic2 partitition 2 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-25T15:06:10.045405Z node 3 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037928139, Partition: 2, State: StateIdle] Process pending events. Count 0 2025-06-25T15:06:10.045595Z node 3 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037928139, Partition: 2, State: StateIdle] no data for compaction 2025-06-25T15:06:10.046004Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037928139] server connected, pipe [3:543:2463], now have 1 active actors on pipe 2025-06-25T15:06:10.046994Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037928037] server connected, pipe [3:550:2466], now have 1 active actors on pipe 2025-06-25T15:06:10.047433Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037928137] server connected, pipe [3:552:2467], now have 1 active actors on pipe 2025-06-25T15:06:10.047601Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037928138] server connected, pipe [3:553:2467], now have 1 active actors on pipe 2025-06-25T15:06:10.047681Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037928139] server connected, pipe [3:554:2467], now have 1 active actors on pipe 2025-06-25T15:06:10.058645Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037928139] server connected, pipe [3:568:2478], now have 1 active actors on pipe 2025-06-25T15:06:10.076503Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:3114: [PQ: 72057594037928139] Handle TEvInterconnect::TEvNodeInfo 2025-06-25T15:06:10.078448Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:3146: [PQ: 72057594037928139] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-25T15:06:10.078670Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:752: [PQ: 72057594037928139] doesn't have tx info 2025-06-25T15:06:10.078711Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:764: [PQ: 72057594037928139] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-25T15:06:10.078825Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4949: [PQ: 72057594037928139] Txs.size=0, PlannedTxs.size=0 2025-06-25T15:06:10.079241Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037928139] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T15:06:10.079286Z node 3 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037928139] doesn't have tx writes info 2025-06-25T15:06:10.079369Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitConfigStep 2025-06-25T15:06:10.079588Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-25T15:06:10.079738Z node 3 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037928139, Partition: 2, State: StateInit] bootstrapping 2 [3:625:2523] 2025-06-25T15:06:10.081435Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitDiskStatusStep 2025-06-25T15:06:10.082348Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitMetaStep 2025-06-25T15:06:10.082562Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitInfoRangeStep 2025-06-25T15:06:10.082830Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitDataRangeStep 2025-06-25T15:06:10.083056Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitDataStep 2025-06-25T15:06:10.083092Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitEndWriteTimestampStep 2025-06-25T15:06:10.083127Z node 3 :PERSQUEUE INFO: partition_init.cpp:895: [rt3.dc1--topic2:2:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-25T15:06:10.083167Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic2:2:Initializer] Initializing completed. 2025-06-25T15:06:10.083222Z node 3 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037928139, Partition: 2, State: StateInit] init complete for topic 'rt3.dc1--topic2' partition 2 generation 3 [3:625:2523] 2025-06-25T15:06:10.083269Z node 3 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037928139, Partition: 2, State: StateInit] SYNC INIT topic rt3.dc1--topic2 partitition 2 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-25T15:06:10.083310Z node 3 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037928139, Partition: 2, State: StateIdle] Process pending events. Count 0 2025-06-25T15:06:10.083471Z node 3 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037928139, Partition: 2, State: StateIdle] no data for compaction 2025-06-25T15:06:10.084138Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72057594037928138] server disconnected, pipe [3:553:2467] destroyed 2025-06-25T15:06:10.084184Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72057594037928037] server disconnected, pipe [3:550:2466] destroyed 2025-06-25T15:06:10.084215Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72057594037928137] server disconnected, pipe [3:552:2467] destroyed RESPONSE Status: 1 ErrorCode: OK MetaResponse { CmdGetReadSessionsInfoResult { TopicResult { Topic: "rt3.dc1--topic1" PartitionResult { Partition: 0 ClientOffset: 0 StartOffset: 0 EndOffset: 0 TimeLag: 0 TabletNode: "::1" ClientReadOffset: 0 ReadTimeLag: 0 TabletNodeId: 3 ErrorCode: OK } ErrorCode: OK } TopicResult { Topic: "rt3.dc1--topic2" PartitionResult { Partition: 0 ClientOffset: 0 StartOffset: 0 EndOffset: 0 TimeLag: 0 TabletNode: "::1" ClientReadOffset: 0 ReadTimeLag: 0 TabletNodeId: 3 ErrorCode: OK } PartitionResult { Partition: 1 ClientOffset: 0 StartOffset: 0 EndOffset: 0 TimeLag: 0 TabletNode: "::1" ClientReadOffset: 0 ReadTimeLag: 0 TabletNodeId: 3 ErrorCode: OK } PartitionResult { Partition: 2 ErrorCode: INITIALIZING ErrorReason: "tablet for partition is not running" } ErrorCode: OK } } } ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/server/ut/unittest >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::HandlesPipeDisconnection_AnswerDoesNotArrive [GOOD] Test command err: Assert failed: Check response: { Status: 128 ErrorReason: "topic \'rt3.dc1--topic1\' is not created, Marker# PQ94" ErrorCode: UNKNOWN_TOPIC } 2025-06-25T15:06:09.455297Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3114: [PQ: 72057594037928037] Handle TEvInterconnect::TEvNodeInfo 2025-06-25T15:06:09.458936Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3146: [PQ: 72057594037928037] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-25T15:06:09.459231Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:752: [PQ: 72057594037928037] doesn't have tx info 2025-06-25T15:06:09.459287Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:764: [PQ: 72057594037928037] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-25T15:06:09.459329Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:985: [PQ: 72057594037928037] no config, start with empty partitions and default config 2025-06-25T15:06:09.459380Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:4949: [PQ: 72057594037928037] Txs.size=0, PlannedTxs.size=0 2025-06-25T15:06:09.459422Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037928037] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T15:06:09.459497Z node 2 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037928037] doesn't have tx writes info 2025-06-25T15:06:09.460134Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037928037] server connected, pipe [2:264:2254], now have 1 active actors on pipe 2025-06-25T15:06:09.460245Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1470: [PQ: 72057594037928037] Handle TEvPersQueue::TEvUpdateConfig 2025-06-25T15:06:09.480077Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1656: [PQ: 72057594037928037] Config update version 1(current 0) received from actor [2:103:2136] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic1" Version: 1 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-06-25T15:06:09.482753Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:591: [PQ: 72057594037928037] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic1" Version: 1 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-06-25T15:06:09.482878Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037928037] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T15:06:09.483686Z node 2 :PERSQUEUE INFO: pq_impl.cpp:1497: [PQ: 72057594037928037] Config applied version 1 actor [2:103:2136] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic1" Version: 1 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-06-25T15:06:09.483807Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic1:0:Initializer] Start initializing step TInitConfigStep 2025-06-25T15:06:09.484184Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic1:0:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-25T15:06:09.484469Z node 2 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037928037, Partition: 0, State: StateInit] bootstrapping 0 [2:272:2260] 2025-06-25T15:06:09.486772Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic1:0:Initializer] Initializing completed. 2025-06-25T15:06:09.486838Z node 2 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037928037, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--topic1' partition 0 generation 2 [2:272:2260] 2025-06-25T15:06:09.486895Z node 2 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037928037, Partition: 0, State: StateInit] SYNC INIT topic rt3.dc1--topic1 partitition 0 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-25T15:06:09.486946Z node 2 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037928037, Partition: 0, State: StateIdle] Process pending events. Count 0 2025-06-25T15:06:09.487276Z node 2 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037928037, Partition: 0, State: StateIdle] no data for compaction 2025-06-25T15:06:09.487751Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037928037] server connected, pipe [2:275:2262], now have 1 active actors on pipe 2025-06-25T15:06:09.536962Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3114: [PQ: 72057594037928137] Handle TEvInterconnect::TEvNodeInfo 2025-06-25T15:06:09.541176Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3146: [PQ: 72057594037928137] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-25T15:06:09.541502Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:752: [PQ: 72057594037928137] doesn't have tx info 2025-06-25T15:06:09.541551Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:764: [PQ: 72057594037928137] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-25T15:06:09.541599Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:985: [PQ: 72057594037928137] no config, start with empty partitions and default config 2025-06-25T15:06:09.541638Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:4949: [PQ: 72057594037928137] Txs.size=0, PlannedTxs.size=0 2025-06-25T15:06:09.541682Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037928137] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T15:06:09.541744Z node 2 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037928137] doesn't have tx writes info 2025-06-25T15:06:09.542550Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037928137] server connected, pipe [2:407:2360], now have 1 active actors on pipe 2025-06-25T15:06:09.542654Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1470: [PQ: 72057594037928137] Handle TEvPersQueue::TEvUpdateConfig 2025-06-25T15:06:09.542841Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1656: [PQ: 72057594037928137] Config update version 2(current 0) received from actor [2:103:2136] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic2" Version: 2 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-06-25T15:06:09.545206Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:591: [PQ: 72057594037928137] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic2" Version: 2 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-06-25T15:06:09.545326Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037928137] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T15:06:09.546144Z node 2 :PERSQUEUE INFO: pq_impl.cpp:1497: [PQ: 72057594037928137] Config applied version 2 actor [2:103:2136] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic2" Version: 2 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-06-25T15:06:09.546263Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:0:Initializer] Start initializing step TInitConfigStep 2025-06-25T15:06:09.546589Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:0:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-25T15:06:09.546813Z node 2 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037928137, Partition: 0, State: StateInit] bootstrapping 0 [2:415:2366] 2025-06-25T15:06:09.549186Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic2:0:Initializer] Initializing completed. 2025-06-25T15:06:09.549262Z node 2 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037928137, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--topic2' partition 0 generation 2 [2:415:2366] 2025-06-25T15:06:09.549320Z node 2 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037928137, Partition: 0, State: StateInit] SYNC INIT topic rt3.dc1--topic2 partitition 0 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-25T15:06:09.549374Z node 2 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037928137, Partition: 0, State: StateIdle] Process pending events. Count 0 2025-06-25T15:06:09.549630Z node 2 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037928137, Partition: 0, State: StateIdle] no data for compaction 2025-06-25T15:06:09.550205Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037928137] server connected, pipe [2:418:2368], now have 1 active actors on pipe 2025-06-25T15:06:09.566730Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3114: [PQ: 72057594037928138] Handle TEvInterconnect::TEvNodeInfo 2025-06-25T15:06:09.570840Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3146: [PQ: 72057594037928138] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-25T15:06:09.571161Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:752: [PQ: 72057594037928138] doesn't have tx info 2025-06-25T15:06:09.571232Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:764: [PQ: 72057594037928138] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-25T15:06:09.571276Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:985: [PQ: 72057594037928138] no config, start with empty partitions and default config 2025-06-25T15:06:09.571320Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:4949: [PQ: 72057594037928138] Txs.size=0, PlannedTxs.size=0 2025-06-25T15:06:09.571368Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037928138] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T15:06:09.571430Z node 2 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037928138] doesn't have tx writes info 2025-06-25T15:06:09.572095Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037928138] server connected, pipe [2:467:2405], now have 1 active actors on pipe 2025-06-25T15:06:09.572237Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1470: [PQ: 72057594037928138] Handle TEvPersQueue::TEvUpdateConfig 2025-06-25T15:06:09.572454Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1656: [PQ: 72057594037928138] Config update version 3(current 0) received from actor [2:103:2136] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 1 TopicName: "rt3.dc1--topic2" Version: 3 Partitions { PartitionId: 1 } AllPartitions { PartitionId: 1 } 2025-06-25T15:06:09.574881Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:591: [PQ: 72057594037928138] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 1 TopicName: "rt3.dc1--topic2" Version: 3 Partitions { PartitionId: 1 } AllPartitions { PartitionId: 1 } 2025-06-25T15:06:09.575007Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037928138] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T15:06:09.575787Z node 2 :PERSQUEUE INFO: pq_impl.cpp:1497: [PQ: 72057594037928138] Config applied version 3 actor [2 ... : 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 2 TopicName: "rt3.dc1--topic2" Version: 8 Partitions { PartitionId: 2 } AllPartitions { PartitionId: 2 } 2025-06-25T15:06:10.116420Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037928139] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T15:06:10.116998Z node 3 :PERSQUEUE INFO: pq_impl.cpp:1497: [PQ: 72057594037928139] Config applied version 8 actor [3:103:2136] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 2 TopicName: "rt3.dc1--topic2" Version: 8 Partitions { PartitionId: 2 } AllPartitions { PartitionId: 2 } 2025-06-25T15:06:10.117075Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitConfigStep 2025-06-25T15:06:10.117294Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-25T15:06:10.117429Z node 3 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037928139, Partition: 2, State: StateInit] bootstrapping 2 [3:539:2460] 2025-06-25T15:06:10.119086Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic2:2:Initializer] Initializing completed. 2025-06-25T15:06:10.119149Z node 3 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037928139, Partition: 2, State: StateInit] init complete for topic 'rt3.dc1--topic2' partition 2 generation 2 [3:539:2460] 2025-06-25T15:06:10.119204Z node 3 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037928139, Partition: 2, State: StateInit] SYNC INIT topic rt3.dc1--topic2 partitition 2 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-25T15:06:10.119269Z node 3 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037928139, Partition: 2, State: StateIdle] Process pending events. Count 0 2025-06-25T15:06:10.119556Z node 3 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037928139, Partition: 2, State: StateIdle] no data for compaction 2025-06-25T15:06:10.120168Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037928139] server connected, pipe [3:542:2462], now have 1 active actors on pipe 2025-06-25T15:06:10.121207Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037928037] server connected, pipe [3:548:2465], now have 1 active actors on pipe 2025-06-25T15:06:10.121382Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037928138] server connected, pipe [3:549:2466], now have 1 active actors on pipe 2025-06-25T15:06:10.121596Z node 3 :PERSQUEUE DEBUG: partition.cpp:873: [PQ: 72057594037928037, Partition: 0, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 0 PartitionConfig{ MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } 2025-06-25T15:06:10.121902Z node 3 :PERSQUEUE DEBUG: partition.cpp:873: [PQ: 72057594037928138, Partition: 1, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 0 PartitionConfig{ MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } 2025-06-25T15:06:10.121976Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037928139] server connected, pipe [3:550:2466], now have 1 active actors on pipe 2025-06-25T15:06:10.122169Z node 3 :PERSQUEUE DEBUG: partition.cpp:873: [PQ: 72057594037928139, Partition: 2, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 0 PartitionConfig{ MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } 2025-06-25T15:06:10.133283Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037928139] server connected, pipe [3:558:2473], now have 1 active actors on pipe 2025-06-25T15:06:10.155026Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:3114: [PQ: 72057594037928139] Handle TEvInterconnect::TEvNodeInfo 2025-06-25T15:06:10.157159Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:3146: [PQ: 72057594037928139] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-25T15:06:10.157535Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:752: [PQ: 72057594037928139] doesn't have tx info 2025-06-25T15:06:10.157592Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:764: [PQ: 72057594037928139] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-25T15:06:10.157742Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4949: [PQ: 72057594037928139] Txs.size=0, PlannedTxs.size=0 2025-06-25T15:06:10.158597Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037928139] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T15:06:10.158687Z node 3 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037928139] doesn't have tx writes info 2025-06-25T15:06:10.158840Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitConfigStep 2025-06-25T15:06:10.159219Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-25T15:06:10.159446Z node 3 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037928139, Partition: 2, State: StateInit] bootstrapping 2 [3:615:2518] 2025-06-25T15:06:10.161008Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitDiskStatusStep 2025-06-25T15:06:10.162359Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitMetaStep 2025-06-25T15:06:10.162668Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitInfoRangeStep 2025-06-25T15:06:10.163032Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitDataRangeStep 2025-06-25T15:06:10.163402Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitDataStep 2025-06-25T15:06:10.163450Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitEndWriteTimestampStep 2025-06-25T15:06:10.163490Z node 3 :PERSQUEUE INFO: partition_init.cpp:895: [rt3.dc1--topic2:2:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-25T15:06:10.163530Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic2:2:Initializer] Initializing completed. 2025-06-25T15:06:10.163595Z node 3 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037928139, Partition: 2, State: StateInit] init complete for topic 'rt3.dc1--topic2' partition 2 generation 3 [3:615:2518] 2025-06-25T15:06:10.163646Z node 3 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037928139, Partition: 2, State: StateInit] SYNC INIT topic rt3.dc1--topic2 partitition 2 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-25T15:06:10.163700Z node 3 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037928139, Partition: 2, State: StateIdle] Process pending events. Count 0 2025-06-25T15:06:10.164012Z node 3 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037928139, Partition: 2, State: StateIdle] no data for compaction 2025-06-25T15:06:10.164537Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72057594037928138] server disconnected, pipe [3:549:2466] destroyed 2025-06-25T15:06:10.164579Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72057594037928037] server disconnected, pipe [3:548:2465] destroyed RESPONSE Status: 1 ErrorCode: OK MetaResponse { CmdGetPartitionStatusResult { TopicResult { Topic: "rt3.dc1--topic1" PartitionResult { Partition: 0 Status: STATUS_OK LastInitDurationSeconds: 0 CreationTimestamp: 0 GapCount: 0 GapSize: 0 AvgWriteSpeedPerSec: 0 AvgWriteSpeedPerMin: 0 AvgWriteSpeedPerHour: 0 AvgWriteSpeedPerDay: 0 AvgReadSpeedPerSec: 0 AvgReadSpeedPerMin: 0 AvgReadSpeedPerHour: 0 AvgReadSpeedPerDay: 0 ReadBytesQuota: 0 WriteBytesQuota: 50000000 PartitionSize: 0 StartOffset: 0 EndOffset: 0 LastWriteTimestampMs: 39 WriteLagMs: 0 AvgQuotaSpeedPerSec: 0 AvgQuotaSpeedPerMin: 0 AvgQuotaSpeedPerHour: 0 AvgQuotaSpeedPerDay: 0 SourceIdCount: 0 SourceIdRetentionPeriodSec: 0 UsedReserveSize: 0 AggregatedCounters { Values: 39 Values: 0 Values: 1 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 50000000 Values: 0 Values: 9223372036854775807 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 1 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 } Generation: 2 Cookie: 1 ScaleStatus: NORMAL } ErrorCode: OK } TopicResult { Topic: "rt3.dc1--topic2" PartitionResult { Partition: 1 Status: STATUS_OK LastInitDurationSeconds: 0 CreationTimestamp: 0 GapCount: 0 GapSize: 0 AvgWriteSpeedPerSec: 0 AvgWriteSpeedPerMin: 0 AvgWriteSpeedPerHour: 0 AvgWriteSpeedPerDay: 0 AvgReadSpeedPerSec: 0 AvgReadSpeedPerMin: 0 AvgReadSpeedPerHour: 0 AvgReadSpeedPerDay: 0 ReadBytesQuota: 0 WriteBytesQuota: 50000000 PartitionSize: 0 StartOffset: 0 EndOffset: 0 LastWriteTimestampMs: 79 WriteLagMs: 0 AvgQuotaSpeedPerSec: 0 AvgQuotaSpeedPerMin: 0 AvgQuotaSpeedPerHour: 0 AvgQuotaSpeedPerDay: 0 SourceIdCount: 0 SourceIdRetentionPeriodSec: 0 UsedReserveSize: 0 AggregatedCounters { Values: 79 Values: 0 Values: 1 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 50000000 Values: 0 Values: 9223372036854775807 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 1 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 } Generation: 2 Cookie: 1 ScaleStatus: NORMAL } PartitionResult { Partition: 2 Status: STATUS_UNKNOWN } ErrorCode: OK } } } >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::HandlesPipeDisconnection_DisconnectionComesSecond [GOOD] >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::FailsOnBadRootStatusInGetNodeRequest [GOOD] >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::FailesOnNotATopic >> TMessageBusServerPersQueueGetTopicMetadataMetaRequestTest::FailsOnFailedGetAllTopicsRequest >> TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest::FailsOnNotOkStatusInGetNodeRequest [GOOD] >> TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest::FailsOnZeroBalancerTabletIdInGetNodeRequest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/server/ut/unittest >> TMessageBusServerPersQueueGetReadSessionsInfoMetaRequestTest::FailsOnEmptyTopicName [GOOD] Test command err: Assert failed: Check response: { Status: 128 ErrorReason: "no path \'/Root/PQ/\', Marker# PQ17" ErrorCode: UNKNOWN_TOPIC } Assert failed: Check response: { Status: 128 ErrorReason: "topic \'rt3.dc1--topic1\' has no balancer, Marker# PQ193" ErrorCode: UNKNOWN_TOPIC } Assert failed: Check response: { Status: 128 ErrorReason: "empty topic in GetReadSessionsInfo request" ErrorCode: BAD_REQUEST } ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/server/ut/unittest >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::HandlesPipeDisconnection_DisconnectionComesSecond [GOOD] Test command err: Assert failed: Check response: { Status: 130 ErrorReason: "Timeout while waiting for response, may be just slow, Marker# PQ16" ErrorCode: ERROR } 2025-06-25T15:06:10.062428Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3114: [PQ: 72057594037928037] Handle TEvInterconnect::TEvNodeInfo 2025-06-25T15:06:10.065231Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3146: [PQ: 72057594037928037] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-25T15:06:10.065418Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:752: [PQ: 72057594037928037] doesn't have tx info 2025-06-25T15:06:10.065460Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:764: [PQ: 72057594037928037] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-25T15:06:10.065482Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:985: [PQ: 72057594037928037] no config, start with empty partitions and default config 2025-06-25T15:06:10.065515Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:4949: [PQ: 72057594037928037] Txs.size=0, PlannedTxs.size=0 2025-06-25T15:06:10.065563Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037928037] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T15:06:10.065603Z node 2 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037928037] doesn't have tx writes info 2025-06-25T15:06:10.066052Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037928037] server connected, pipe [2:266:2256], now have 1 active actors on pipe 2025-06-25T15:06:10.066126Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1470: [PQ: 72057594037928037] Handle TEvPersQueue::TEvUpdateConfig 2025-06-25T15:06:10.074791Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1656: [PQ: 72057594037928037] Config update version 1(current 0) received from actor [2:103:2136] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic1" Version: 1 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-06-25T15:06:10.076575Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:591: [PQ: 72057594037928037] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic1" Version: 1 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-06-25T15:06:10.076697Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037928037] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T15:06:10.077398Z node 2 :PERSQUEUE INFO: pq_impl.cpp:1497: [PQ: 72057594037928037] Config applied version 1 actor [2:103:2136] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic1" Version: 1 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-06-25T15:06:10.077501Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic1:0:Initializer] Start initializing step TInitConfigStep 2025-06-25T15:06:10.077803Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic1:0:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-25T15:06:10.078080Z node 2 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037928037, Partition: 0, State: StateInit] bootstrapping 0 [2:274:2262] 2025-06-25T15:06:10.079620Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic1:0:Initializer] Initializing completed. 2025-06-25T15:06:10.079662Z node 2 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037928037, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--topic1' partition 0 generation 2 [2:274:2262] 2025-06-25T15:06:10.079695Z node 2 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037928037, Partition: 0, State: StateInit] SYNC INIT topic rt3.dc1--topic1 partitition 0 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-25T15:06:10.079724Z node 2 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037928037, Partition: 0, State: StateIdle] Process pending events. Count 0 2025-06-25T15:06:10.079956Z node 2 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037928037, Partition: 0, State: StateIdle] no data for compaction 2025-06-25T15:06:10.080274Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037928037] server connected, pipe [2:277:2264], now have 1 active actors on pipe 2025-06-25T15:06:10.111319Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3114: [PQ: 72057594037928139] Handle TEvInterconnect::TEvNodeInfo 2025-06-25T15:06:10.113830Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3146: [PQ: 72057594037928139] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-25T15:06:10.113997Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:752: [PQ: 72057594037928139] doesn't have tx info 2025-06-25T15:06:10.114030Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:764: [PQ: 72057594037928139] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-25T15:06:10.114054Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:985: [PQ: 72057594037928139] no config, start with empty partitions and default config 2025-06-25T15:06:10.114080Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:4949: [PQ: 72057594037928139] Txs.size=0, PlannedTxs.size=0 2025-06-25T15:06:10.114111Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037928139] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T15:06:10.114147Z node 2 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037928139] doesn't have tx writes info 2025-06-25T15:06:10.114569Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037928139] server connected, pipe [2:410:2363], now have 1 active actors on pipe 2025-06-25T15:06:10.114641Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1470: [PQ: 72057594037928139] Handle TEvPersQueue::TEvUpdateConfig 2025-06-25T15:06:10.114794Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1656: [PQ: 72057594037928139] Config update version 2(current 0) received from actor [2:103:2136] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 2 TopicName: "rt3.dc1--topic2" Version: 2 Partitions { PartitionId: 2 } AllPartitions { PartitionId: 2 } 2025-06-25T15:06:10.116287Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:591: [PQ: 72057594037928139] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 2 TopicName: "rt3.dc1--topic2" Version: 2 Partitions { PartitionId: 2 } AllPartitions { PartitionId: 2 } 2025-06-25T15:06:10.116381Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037928139] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T15:06:10.116960Z node 2 :PERSQUEUE INFO: pq_impl.cpp:1497: [PQ: 72057594037928139] Config applied version 2 actor [2:103:2136] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 2 TopicName: "rt3.dc1--topic2" Version: 2 Partitions { PartitionId: 2 } AllPartitions { PartitionId: 2 } 2025-06-25T15:06:10.117037Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitConfigStep 2025-06-25T15:06:10.117243Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-25T15:06:10.117373Z node 2 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037928139, Partition: 2, State: StateInit] bootstrapping 2 [2:418:2369] 2025-06-25T15:06:10.118604Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic2:2:Initializer] Initializing completed. 2025-06-25T15:06:10.118646Z node 2 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037928139, Partition: 2, State: StateInit] init complete for topic 'rt3.dc1--topic2' partition 2 generation 2 [2:418:2369] 2025-06-25T15:06:10.118682Z node 2 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037928139, Partition: 2, State: StateInit] SYNC INIT topic rt3.dc1--topic2 partitition 2 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-25T15:06:10.118712Z node 2 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037928139, Partition: 2, State: StateIdle] Process pending events. Count 0 2025-06-25T15:06:10.118913Z node 2 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037928139, Partition: 2, State: StateIdle] no data for compaction 2025-06-25T15:06:10.119275Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037928139] server connected, pipe [2:421:2371], now have 1 active actors on pipe 2025-06-25T15:06:10.119982Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037928037] server connected, pipe [2:427:2374], now have 1 active actors on pipe 2025-06-25T15:06:10.120217Z node 2 :PERSQUEUE DEBUG: partition.cpp:873: [PQ: 72057594037928037, Partition: 0, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 0 PartitionConfig{ MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } 2025-06-25T15:06:10.120393Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037928139] server connected, pipe [2:429:2375], now have 1 active actors on pipe 2025-06-25T15:06:10.120633Z node 2 :PERSQUEUE DEBUG: partition.cpp:873: [PQ: 72057594037928139, Partition: 2, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 0 PartitionConfig{ MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } 2025-06-25T15:06:10.120763Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72057594037928037] server disconnected, pipe [2:427:2374] destroyed 2025-06-25T15:06:10.120950Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72057594037928139] server disconnected, pipe [2:429:2375] destroyed 2025-06-25T15:06:10.454965Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:3114: [PQ: 72057594037928037] Handle TEvInterconnect::TEvNodeInfo 2025-06-25T15:06:10.458259Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:3146: [PQ: 72057594037928037] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-25T15:06:10.458511Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:752: [PQ: 72057594037928037] doesn't have tx info 2025-06-25T15:06:10.458555Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:764: [PQ: 72057594037928037] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-25T15:06:10.458590Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:985: [PQ: 72057594037928037] no config, start with empty partitions and default config 2025-06-25T15:06:10.458629Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4949: [PQ: 72057594037928037] Txs.size=0, PlannedTxs.size=0 2025-06-25T15:06:10.458677Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037928037] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T15:06:10.458724Z node 3 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037928037] doesn't have tx writes info 2025-06-25T15:06:10.459265Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037928037] server connected, pipe [3:266:2256], now have 1 active actors on ... or topic 'rt3.dc1--topic2' partition 2 generation 2 [3:539:2460] 2025-06-25T15:06:10.544510Z node 3 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037928139, Partition: 2, State: StateInit] SYNC INIT topic rt3.dc1--topic2 partitition 2 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-25T15:06:10.544542Z node 3 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037928139, Partition: 2, State: StateIdle] Process pending events. Count 0 2025-06-25T15:06:10.544724Z node 3 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037928139, Partition: 2, State: StateIdle] no data for compaction 2025-06-25T15:06:10.545124Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037928139] server connected, pipe [3:542:2462], now have 1 active actors on pipe 2025-06-25T15:06:10.545808Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037928037] server connected, pipe [3:548:2465], now have 1 active actors on pipe 2025-06-25T15:06:10.545923Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037928138] server connected, pipe [3:549:2466], now have 1 active actors on pipe 2025-06-25T15:06:10.546089Z node 3 :PERSQUEUE DEBUG: partition.cpp:873: [PQ: 72057594037928037, Partition: 0, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 0 PartitionConfig{ MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } 2025-06-25T15:06:10.546213Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037928139] server connected, pipe [3:550:2466], now have 1 active actors on pipe 2025-06-25T15:06:10.546298Z node 3 :PERSQUEUE DEBUG: partition.cpp:873: [PQ: 72057594037928138, Partition: 1, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 0 PartitionConfig{ MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } 2025-06-25T15:06:10.546422Z node 3 :PERSQUEUE DEBUG: partition.cpp:873: [PQ: 72057594037928139, Partition: 2, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 0 PartitionConfig{ MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } 2025-06-25T15:06:10.557115Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037928139] server connected, pipe [3:558:2473], now have 1 active actors on pipe 2025-06-25T15:06:10.574279Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:3114: [PQ: 72057594037928139] Handle TEvInterconnect::TEvNodeInfo 2025-06-25T15:06:10.576210Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:3146: [PQ: 72057594037928139] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-25T15:06:10.576531Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:752: [PQ: 72057594037928139] doesn't have tx info 2025-06-25T15:06:10.576577Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:764: [PQ: 72057594037928139] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-25T15:06:10.576719Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4949: [PQ: 72057594037928139] Txs.size=0, PlannedTxs.size=0 2025-06-25T15:06:10.577477Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037928139] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T15:06:10.577523Z node 3 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037928139] doesn't have tx writes info 2025-06-25T15:06:10.577617Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitConfigStep 2025-06-25T15:06:10.577946Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-25T15:06:10.578143Z node 3 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037928139, Partition: 2, State: StateInit] bootstrapping 2 [3:615:2518] 2025-06-25T15:06:10.579456Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitDiskStatusStep 2025-06-25T15:06:10.580164Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitMetaStep 2025-06-25T15:06:10.580371Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitInfoRangeStep 2025-06-25T15:06:10.580569Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitDataRangeStep 2025-06-25T15:06:10.580738Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitDataStep 2025-06-25T15:06:10.580770Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitEndWriteTimestampStep 2025-06-25T15:06:10.580797Z node 3 :PERSQUEUE INFO: partition_init.cpp:895: [rt3.dc1--topic2:2:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-25T15:06:10.580823Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic2:2:Initializer] Initializing completed. 2025-06-25T15:06:10.580860Z node 3 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037928139, Partition: 2, State: StateInit] init complete for topic 'rt3.dc1--topic2' partition 2 generation 3 [3:615:2518] 2025-06-25T15:06:10.580901Z node 3 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037928139, Partition: 2, State: StateInit] SYNC INIT topic rt3.dc1--topic2 partitition 2 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-25T15:06:10.580934Z node 3 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037928139, Partition: 2, State: StateIdle] Process pending events. Count 0 2025-06-25T15:06:10.581134Z node 3 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037928139, Partition: 2, State: StateIdle] no data for compaction 2025-06-25T15:06:10.581556Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72057594037928138] server disconnected, pipe [3:549:2466] destroyed 2025-06-25T15:06:10.581592Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72057594037928037] server disconnected, pipe [3:548:2465] destroyed RESPONSE Status: 1 ErrorCode: OK MetaResponse { CmdGetPartitionStatusResult { TopicResult { Topic: "rt3.dc1--topic1" PartitionResult { Partition: 0 Status: STATUS_OK LastInitDurationSeconds: 0 CreationTimestamp: 0 GapCount: 0 GapSize: 0 AvgWriteSpeedPerSec: 0 AvgWriteSpeedPerMin: 0 AvgWriteSpeedPerHour: 0 AvgWriteSpeedPerDay: 0 AvgReadSpeedPerSec: 0 AvgReadSpeedPerMin: 0 AvgReadSpeedPerHour: 0 AvgReadSpeedPerDay: 0 ReadBytesQuota: 0 WriteBytesQuota: 50000000 PartitionSize: 0 StartOffset: 0 EndOffset: 0 LastWriteTimestampMs: 39 WriteLagMs: 0 AvgQuotaSpeedPerSec: 0 AvgQuotaSpeedPerMin: 0 AvgQuotaSpeedPerHour: 0 AvgQuotaSpeedPerDay: 0 SourceIdCount: 0 SourceIdRetentionPeriodSec: 0 UsedReserveSize: 0 AggregatedCounters { Values: 39 Values: 0 Values: 1 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 50000000 Values: 0 Values: 9223372036854775807 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 1 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 } Generation: 2 Cookie: 1 ScaleStatus: NORMAL } ErrorCode: OK } TopicResult { Topic: "rt3.dc1--topic2" PartitionResult { Partition: 1 Status: STATUS_OK LastInitDurationSeconds: 0 CreationTimestamp: 0 GapCount: 0 GapSize: 0 AvgWriteSpeedPerSec: 0 AvgWriteSpeedPerMin: 0 AvgWriteSpeedPerHour: 0 AvgWriteSpeedPerDay: 0 AvgReadSpeedPerSec: 0 AvgReadSpeedPerMin: 0 AvgReadSpeedPerHour: 0 AvgReadSpeedPerDay: 0 ReadBytesQuota: 0 WriteBytesQuota: 50000000 PartitionSize: 0 StartOffset: 0 EndOffset: 0 LastWriteTimestampMs: 79 WriteLagMs: 0 AvgQuotaSpeedPerSec: 0 AvgQuotaSpeedPerMin: 0 AvgQuotaSpeedPerHour: 0 AvgQuotaSpeedPerDay: 0 SourceIdCount: 0 SourceIdRetentionPeriodSec: 0 UsedReserveSize: 0 AggregatedCounters { Values: 79 Values: 0 Values: 1 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 50000000 Values: 0 Values: 9223372036854775807 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 1 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 } Generation: 2 Cookie: 1 ScaleStatus: NORMAL } PartitionResult { Partition: 2 Status: STATUS_OK LastInitDurationSeconds: 0 CreationTimestamp: 0 GapCount: 0 GapSize: 0 AvgWriteSpeedPerSec: 0 AvgWriteSpeedPerMin: 0 AvgWriteSpeedPerHour: 0 AvgWriteSpeedPerDay: 0 AvgReadSpeedPerSec: 0 AvgReadSpeedPerMin: 0 AvgReadSpeedPerHour: 0 AvgReadSpeedPerDay: 0 ReadBytesQuota: 0 WriteBytesQuota: 50000000 PartitionSize: 0 StartOffset: 0 EndOffset: 0 LastWriteTimestampMs: 93 WriteLagMs: 0 AvgQuotaSpeedPerSec: 0 AvgQuotaSpeedPerMin: 0 AvgQuotaSpeedPerHour: 0 AvgQuotaSpeedPerDay: 0 SourceIdCount: 0 SourceIdRetentionPeriodSec: 0 UsedReserveSize: 0 AggregatedCounters { Values: 93 Values: 0 Values: 1 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 50000000 Values: 0 Values: 9223372036854775807 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 1 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 } Generation: 2 Cookie: 1 ScaleStatus: NORMAL } ErrorCode: OK } } } >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::FailesOnNotATopic [GOOD] >> TMessageBusServerPersQueueGetTopicMetadataMetaRequestTest::FailsOnFailedGetAllTopicsRequest [GOOD] >> TMessageBusServerPersQueueGetTopicMetadataMetaRequestTest::FailsOnNotOkStatusInGetNodeRequest >> TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest::FailsOnZeroBalancerTabletIdInGetNodeRequest [GOOD] >> TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest::HandlesPipeDisconnection_DisconnectionComesFirst ------- [TM] {asan, default-linux-x86_64, release} ydb/services/datastreams/ut/unittest >> DataStreams::TestListShards1Shard [GOOD] Test command err: 2025-06-25T15:05:57.538402Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519903439276828085:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:05:57.538484Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001696/r3tmp/tmpK2z20k/pdisk_1.dat 2025-06-25T15:05:57.810932Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 11927, node 1 2025-06-25T15:05:57.854358Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:05:57.854473Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:05:57.862581Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:05:57.862601Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:05:57.862617Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:05:57.862768Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T15:05:57.880889Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:15977 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:05:58.089736Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:05:58.178838Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) TClient is connected to server localhost:15977 2025-06-25T15:05:58.302596Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... 2025-06-25T15:05:58.544330Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T15:05:58.544476Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; encryption_type: NONE records { sequence_number: "0" shard_id: "shard-000000" } records { sequence_number: "1" shard_id: "shard-000000" } records { sequence_number: "2" shard_id: "shard-000000" } records { sequence_number: "3" shard_id: "shard-000000" } records { sequence_number: "4" shard_id: "shard-000000" } records { sequence_number: "5" shard_id: "shard-000000" } records { sequence_number: "6" shard_id: "shard-000000" } records { sequence_number: "7" shard_id: "shard-000000" } records { sequence_number: "8" shard_id: "shard-000000" } records { sequence_number: "9" shard_id: "shard-000000" } records { sequence_number: "10" shard_id: "shard-000000" } records { sequence_number: "11" shard_id: "shard-000000" } records { sequence_number: "12" shard_id: "shard-000000" } records { sequence_number: "13" shard_id: "shard-000000" } records { sequence_number: "14" shard_id: "shard-000000" } records { sequence_number: "15" shard_id: "shard-000000" } records { sequence_number: "16" shard_id: "shard-000000" } records { sequence_number: "17" shard_id: "shard-000000" } records { sequence_number: "18" shard_id: "shard-000000" } records { sequence_number: "19" shard_id: "shard-000000" } records { sequence_number: "20" shard_id: "shard-000000" } records { sequence_number: "21" shard_id: "shard-000000" } records { sequence_number: "22" shard_id: "shard-000000" } records { sequence_number: "23" shard_id: "shard-000000" } records { sequence_number: "24" shard_id: "shard-000000" } records { sequence_number: "25" shard_id: "shard-000000" } records { sequence_number: "26" shard_id: "shard-000000" } records { sequence_number: "27" shard_id: "shard-000000" } records { sequence_number: "28" shard_id: "shard-000000" } records { sequence_number: "29" shard_id: "shard-000000" } 2025-06-25T15:05:58.603869Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_pq.cpp:268) 2025-06-25T15:05:58.663701Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpDropPersQueueGroup, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_drop_pq.cpp:421) 2025-06-25T15:05:58.674707Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037889 not found 2025-06-25T15:05:58.674756Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037890 not found 2025-06-25T15:05:58.674772Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037888 not found 2025-06-25T15:05:58.678395Z node 1 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,2) wasn't found 2025-06-25T15:05:58.678435Z node 1 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,3) wasn't found 2025-06-25T15:05:58.678462Z node 1 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,1) wasn't found Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestNonChargeableUser","id":"reserved_resources-root-72075186224037888-1750863958443-1","schema":"yds.resources.reserved.v1","tags":{"reserved_throughput_bps":1048576,"reserved_consumers_count":0,"reserved_storage_bytes":90596966400},"usage":{"quantity":0,"unit":"second","start":1750863958,"finish":1750863958},"labels":{"datastreams_stream_name":"stream_TestNonChargeableUser","ydb_database":"root"},"version":"v1","source_id":"72075186224037888","source_wt":1750863958}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestNonChargeableUser","id":"used_storage-root-72075186224037888-1750863958443-2","schema":"ydb.serverless.v1","tags":{"ydb_size":0},"usage":{"quantity":0,"unit":"byte*second","start":1750863958,"finish":1750863958},"labels":{"datastreams_stream_name":"stream_TestNonChargeableUser","ydb_database":"root"},"version":"1.0.0","source_id":"72075186224037888","source_wt":1750863958}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestNonChargeableUser","id":"reserved_resources-root-72075186224037890-1750863958635-3","schema":"yds.resources.reserved.v1","tags":{"reserved_throughput_bps":1048576,"reserved_consumers_count":0,"reserved_storage_bytes":90596966400},"usage":{"quantity":0,"unit":"second","start":1750863958,"finish":1750863958},"labels":{"datastreams_stream_name":"stream_TestNonChargeableUser","ydb_database":"root"},"version":"v1","source_id":"72075186224037890","source_wt":1750863958}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestNonChargeableUser","id":"used_storage-root-72075186224037890-1750863958635-4","schema":"ydb.serverless.v1","tags":{"ydb_size":0},"usage":{"quantity":0,"unit":"byte*second","start":1750863958,"finish":1750863958},"labels":{"datastreams_stream_name":"stream_TestNonChargeableUser","ydb_database":"root"},"version":"1.0.0","source_id":"72075186224037890","source_wt":1750863958}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestNonChargeableUser","id":"reserved_resources-root-72075186224037888-1750863958632-5","schema":"yds.resources.reserved.v1","tags":{"reserved_throughput_bps":1048576,"reserved_consumers_count":0,"reserved_storage_bytes":90596966400},"usage":{"quantity":0,"unit":"second","start":1750863958,"finish":1750863958},"labels":{"datastreams_stream_name":"stream_TestNonChargeableUser","ydb_database":"root"},"version":"v1","source_id":"72075186224037888","source_wt":1750863958}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestNonChargeableUser","id":"used_storage-root-72075186224037888-1750863958632-6","schema":"ydb.serverless.v1","tags":{"ydb_size":0},"usage":{"quantity":0,"unit":"byte*second","start":1750863958,"finish":1750863958},"labels":{"datastreams_stream_name":"stream_TestNonChargeableUser","ydb_database":"root"},"version":"1.0.0","source_id":"72075186224037888","source_wt":1750863958}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestNonChargeableUser","id":"reserved_resources-root-72075186224037888-1750863958443-1","schema":"yds.resources.reserved.v1","tags":{"reserved_thro ... s, AlreadyRead, StreamArn E0000 00:00:1750863964.563228 785916 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn E0000 00:00:1750863964.563358 785916 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn 2025-06-25T15:06:04.576963Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_pq.cpp:268) 2025-06-25T15:06:04.624353Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_pq.cpp:268) E0000 00:00:1750863964.676718 785916 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn E0000 00:00:1750863964.676824 785916 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn 2025-06-25T15:06:04.683946Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_pq.cpp:268) E0000 00:00:1750863964.732435 785916 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn E0000 00:00:1750863964.732547 785916 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn 2025-06-25T15:06:04.739825Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_pq.cpp:268) E0000 00:00:1750863964.792711 785916 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn E0000 00:00:1750863964.792833 785916 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn E0000 00:00:1750863964.802412 785916 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn E0000 00:00:1750863964.802539 785916 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn 2025-06-25T15:06:04.838550Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpDropPersQueueGroup, opId: 281474976715665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_drop_pq.cpp:421) 2025-06-25T15:06:04.853938Z node 7 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 7, TabletId: 72075186224037888 not found 2025-06-25T15:06:04.853981Z node 7 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 7, TabletId: 72075186224037893 not found 2025-06-25T15:06:04.853993Z node 7 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 7, TabletId: 72075186224037890 not found 2025-06-25T15:06:04.854005Z node 7 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 7, TabletId: 72075186224037892 not found 2025-06-25T15:06:04.854017Z node 7 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 7, TabletId: 72075186224037889 not found 2025-06-25T15:06:04.854031Z node 7 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 7, TabletId: 72075186224037891 not found 2025-06-25T15:06:04.858969Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:06:04.862618Z node 7 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,5) wasn't found 2025-06-25T15:06:04.862674Z node 7 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,2) wasn't found 2025-06-25T15:06:04.862742Z node 7 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,4) wasn't found 2025-06-25T15:06:04.862783Z node 7 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,6) wasn't found 2025-06-25T15:06:04.862821Z node 7 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,3) wasn't found 2025-06-25T15:06:04.862846Z node 7 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,1) wasn't found E0000 00:00:1750863964.868914 785916 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn E0000 00:00:1750863964.869029 785916 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn 2025-06-25T15:06:07.215777Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7519903479859315190:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:06:07.215854Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001696/r3tmp/tmpPHCxAX/pdisk_1.dat 2025-06-25T15:06:07.299978Z node 10 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:06:07.317546Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:06:07.317613Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:06:07.319391Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 21631, node 10 2025-06-25T15:06:07.358840Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:06:07.358871Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:06:07.358879Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:06:07.359034Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26168 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:06:07.599338Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:06:07.642552Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) TClient is connected to server localhost:26168 2025-06-25T15:06:07.792192Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... E0000 00:00:1750863967.972300 786457 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn E0000 00:00:1750863967.980111 786457 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn E0000 00:00:1750863967.985643 786457 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn E0000 00:00:1750863967.991481 786457 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn E0000 00:00:1750863967.997299 786457 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest >> TraverseColumnShard::TraverseServerlessColumnTable [GOOD] Test command err: 2025-06-25T15:03:14.097911Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:419:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T15:03:14.098256Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T15:03:14.098321Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001ff7/r3tmp/tmptckAQM/pdisk_1.dat 2025-06-25T15:03:14.428858Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 25027, node 1 2025-06-25T15:03:14.668862Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:03:14.668915Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:03:14.668945Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:03:14.669310Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T15:03:14.671594Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:03:14.773802Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:03:14.773938Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:03:14.793771Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:4414 2025-06-25T15:03:15.364842Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-25T15:03:18.341052Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-25T15:03:18.376866Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:03:18.376980Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:03:18.410201Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T15:03:18.411886Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:03:18.628135Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:03:18.653759Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:18.654280Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:18.654873Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:18.655164Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:18.655278Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:18.655398Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:18.655517Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:18.655593Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:18.655650Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:18.841314Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:03:18.841445Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:03:18.857198Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:03:19.025147Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:03:19.071178Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-25T15:03:19.071261Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-25T15:03:19.098130Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-25T15:03:19.099368Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-25T15:03:19.099577Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-25T15:03:19.099634Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-25T15:03:19.099674Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-25T15:03:19.099767Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-25T15:03:19.099822Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-25T15:03:19.099888Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-25T15:03:19.100500Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-25T15:03:19.153176Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7949: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-25T15:03:19.153286Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7979: ConnectToSA(), pipe client id: [2:1791:2559], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-25T15:03:19.165770Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1817:2576] 2025-06-25T15:03:19.170051Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1827:2580] 2025-06-25T15:03:19.170579Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1827:2580], schemeshard id = 72075186224037897 2025-06-25T15:03:19.201220Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Shared 2025-06-25T15:03:19.220939Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-25T15:03:19.220997Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-25T15:03:19.221086Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Shared/.metadata/_statistics 2025-06-25T15:03:19.232928Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:03:19.246662Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-25T15:03:19.246814Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-25T15:03:19.433379Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-25T15:03:19.616813Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-25T15:03:19.693002Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-25T15:03:20.408883Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:03:20.451295Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-25T15:03:21.225513Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:03:21.441581Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7894: Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult, at schemeshard: 72075186224037899 2025-06-25T15:03:21.441674Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7910: Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult, StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037899 2025-06-25T15:03:21.441766Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7979: ConnectToSA(), pipe client id: [2:2501:2903], at schemeshard: 72075186224037899, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037899 2025-06-25T15:03:21.443830Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:2511:2909] 2025-06-25T15:03:21.444852Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:2511:2909], schemeshard id = 72075186224037899 2025-06-25T15:03:22.809688Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2626:3196], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:03:22.809838Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06- ... pts it (id: [OwnerId: 72075186224037897, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:06:06.574119Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [2:9352:6891]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T15:06:06.574309Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-25T15:06:06.574392Z node 2 :STATISTICS DEBUG: service_impl.cpp:1219: ConnectToSA(), pipe client id = [2:9354:6893] 2025-06-25T15:06:06.574447Z node 2 :STATISTICS DEBUG: service_impl.cpp:1248: SyncNode(), pipe client id = [2:9354:6893] 2025-06-25T15:06:06.574692Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:9355:6894] 2025-06-25T15:06:06.574846Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:133: [72075186224037894] EvConnectNode, pipe server id = [2:9355:6894], node id = 2, have schemeshards count = 0, need schemeshards count = 1 2025-06-25T15:06:06.574898Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:314: [72075186224037894] SendStatisticsToNode(), node id = 2, schemeshard count = 1 2025-06-25T15:06:06.575013Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:9354:6893], server id = [2:9355:6894], tablet id = 72075186224037894, status = OK 2025-06-25T15:06:06.575057Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-25T15:06:06.575120Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 1, ReplyToActorId = [2:9352:6891], StatRequests.size() = 1 2025-06-25T15:06:06.669039Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=YzI3YjE4ODEtOGYyOTQxZDctZjMxZTM2YTAtODAwMTVjN2U=, TxId: 2025-06-25T15:06:06.669097Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=YzI3YjE4ODEtOGYyOTQxZDctZjMxZTM2YTAtODAwMTVjN2U=, TxId: 2025-06-25T15:06:06.669535Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-25T15:06:06.682867Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 3] 2025-06-25T15:06:06.682921Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-25T15:06:06.736350Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:217: [72075186224037894] EvFastPropagateCheck 2025-06-25T15:06:06.736413Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:357: [72075186224037894] PropagateFastStatistics(), node count = 0, schemeshard count = 0 2025-06-25T15:06:06.810475Z node 2 :STATISTICS DEBUG: service_impl.cpp:1189: EvRequestTimeout, pipe client id = [2:9354:6893], schemeshard count = 1 2025-06-25T15:06:07.171802Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:8076: SendBaseStatsToSA(), path count: 1, at schemeshard: 72075186224037899 2025-06-25T15:06:07.171880Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7919: Schedule next SendBaseStatsToSA in 196.000000s, at schemeshard: 72075186224037899 2025-06-25T15:06:07.172126Z node 2 :STATISTICS DEBUG: tx_schemeshard_stats.cpp:21: [72075186224037894] TTxSchemeShardStats::Execute: schemeshard id# 72075186224037899, stats size# 28 2025-06-25T15:06:07.185786Z node 2 :STATISTICS DEBUG: tx_schemeshard_stats.cpp:132: [72075186224037894] TTxSchemeShardStats::Complete 2025-06-25T15:06:09.395587Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-25T15:06:09.395659Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-25T15:06:09.395708Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037899, LocalPathId: 2] is column table. 2025-06-25T15:06:09.395758Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:732: [72075186224037894] Start schedule traversal navigate for path [OwnerId: 72075186224037899, LocalPathId: 2] 2025-06-25T15:06:09.398893Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-25T15:06:09.415832Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-25T15:06:09.416294Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-25T15:06:09.416392Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-25T15:06:09.417217Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:58: [72075186224037894] TTxResponseTabletDistribution::Execute. Node count = 1 2025-06-25T15:06:09.430903Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:92: [72075186224037894] TTxResponseTabletDistribution::Complete 2025-06-25T15:06:09.431182Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 2, current Round: 0 2025-06-25T15:06:09.432262Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:9478:6965], server id = [2:9482:6969], tablet id = 72075186224037905, status = OK 2025-06-25T15:06:09.432707Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:9478:6965], path = { OwnerId: 72075186224037899 LocalId: 2 } 2025-06-25T15:06:09.433865Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:9479:6966], server id = [2:9483:6970], tablet id = 72075186224037906, status = OK 2025-06-25T15:06:09.433940Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:9479:6966], path = { OwnerId: 72075186224037899 LocalId: 2 } 2025-06-25T15:06:09.434673Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:9480:6967], server id = [2:9485:6972], tablet id = 72075186224037907, status = OK 2025-06-25T15:06:09.434735Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:9480:6967], path = { OwnerId: 72075186224037899 LocalId: 2 } 2025-06-25T15:06:09.435333Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:9481:6968], server id = [2:9484:6971], tablet id = 72075186224037908, status = OK 2025-06-25T15:06:09.435380Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:9481:6968], path = { OwnerId: 72075186224037899 LocalId: 2 } 2025-06-25T15:06:09.438676Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037905 2025-06-25T15:06:09.439186Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:9478:6965], server id = [2:9482:6969], tablet id = 72075186224037905 2025-06-25T15:06:09.439241Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-25T15:06:09.439702Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037906 2025-06-25T15:06:09.439976Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:9479:6966], server id = [2:9483:6970], tablet id = 72075186224037906 2025-06-25T15:06:09.439995Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-25T15:06:09.440281Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037907 2025-06-25T15:06:09.440475Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:9480:6967], server id = [2:9485:6972], tablet id = 72075186224037907 2025-06-25T15:06:09.440492Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-25T15:06:09.440670Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037908 2025-06-25T15:06:09.440709Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-25T15:06:09.440847Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:27: [72075186224037894] TTxAggregateStatisticsResponse::Execute 2025-06-25T15:06:09.441000Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:118: [72075186224037894] TTxAggregateStatisticsResponse::Complete 2025-06-25T15:06:09.441255Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Shared 2025-06-25T15:06:09.442689Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:9481:6968], server id = [2:9484:6971], tablet id = 72075186224037908 2025-06-25T15:06:09.442711Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-25T15:06:09.443118Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-06-25T15:06:09.468514Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [2:9514:6997]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T15:06:09.468701Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-25T15:06:09.468731Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 2, ReplyToActorId = [2:9514:6997], StatRequests.size() = 1 2025-06-25T15:06:09.587348Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=NGFiNDg1OWEtZTVjNzFkOTEtYmE4ZmExNDctNDhkMDQ5MDI=, TxId: 2025-06-25T15:06:09.587400Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=NGFiNDg1OWEtZTVjNzFkOTEtYmE4ZmExNDctNDhkMDQ5MDI=, TxId: ... waiting for NKikimr::NStat::TEvStatistics::TEvSaveStatisticsQueryResponse (done) 2025-06-25T15:06:09.587804Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 3 ], ReplyToActorId[ [2:9527:7003]], StatType[ 2 ], StatRequestsCount[ 1 ] 2025-06-25T15:06:09.588081Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-25T15:06:09.588483Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] 2025-06-25T15:06:09.588528Z node 2 :STATISTICS DEBUG: service_impl.cpp:812: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] resolve DatabasePath[ [OwnerId: 72057594046644480, LocalPathId: 2] ] 2025-06-25T15:06:09.590859Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] 2025-06-25T15:06:09.590903Z node 2 :STATISTICS DEBUG: service_impl.cpp:715: [TStatService::QueryStatistics] RequestId[ 3 ], Database[ Root/Shared ], TablePath[ /Root/Shared/.metadata/_statistics ] 2025-06-25T15:06:09.590948Z node 2 :STATISTICS DEBUG: service_impl.cpp:656: [TStatService::LoadStatistics] QueryId[ 1 ], PathId[ [OwnerId: 72075186224037899, LocalPathId: 2] ], StatType[ 2 ], ColumnTag[ 1 ] 2025-06-25T15:06:09.594348Z node 2 :STATISTICS DEBUG: service_impl.cpp:1152: TEvLoadStatisticsQueryResponse, request id = 3 >>> failedEstimatesCount = 0 >> TMessageBusServerPersQueueGetTopicMetadataMetaRequestTest::FailsOnNotOkStatusInGetNodeRequest [GOOD] >> TMessageBusServerPersQueueGetTopicMetadataMetaRequestTest::FailsOnNoBalancerInGetNodeRequest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/server/ut/unittest >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::FailesOnNotATopic [GOOD] Test command err: Assert failed: Check response: { Status: 130 ErrorReason: "Timeout while waiting for response, may be just slow, Marker# PQ16" ErrorCode: ERROR } 2025-06-25T15:06:10.415531Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3114: [PQ: 72057594037928037] Handle TEvInterconnect::TEvNodeInfo 2025-06-25T15:06:10.418301Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3146: [PQ: 72057594037928037] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-25T15:06:10.418480Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:752: [PQ: 72057594037928037] doesn't have tx info 2025-06-25T15:06:10.418513Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:764: [PQ: 72057594037928037] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-25T15:06:10.418536Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:985: [PQ: 72057594037928037] no config, start with empty partitions and default config 2025-06-25T15:06:10.418565Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:4949: [PQ: 72057594037928037] Txs.size=0, PlannedTxs.size=0 2025-06-25T15:06:10.418624Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037928037] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T15:06:10.418693Z node 2 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037928037] doesn't have tx writes info 2025-06-25T15:06:10.419127Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037928037] server connected, pipe [2:266:2256], now have 1 active actors on pipe 2025-06-25T15:06:10.419234Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1470: [PQ: 72057594037928037] Handle TEvPersQueue::TEvUpdateConfig 2025-06-25T15:06:10.427852Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1656: [PQ: 72057594037928037] Config update version 1(current 0) received from actor [2:103:2136] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic1" Version: 1 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-06-25T15:06:10.429884Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:591: [PQ: 72057594037928037] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic1" Version: 1 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-06-25T15:06:10.429985Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037928037] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T15:06:10.430599Z node 2 :PERSQUEUE INFO: pq_impl.cpp:1497: [PQ: 72057594037928037] Config applied version 1 actor [2:103:2136] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic1" Version: 1 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-06-25T15:06:10.430704Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic1:0:Initializer] Start initializing step TInitConfigStep 2025-06-25T15:06:10.430968Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic1:0:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-25T15:06:10.431161Z node 2 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037928037, Partition: 0, State: StateInit] bootstrapping 0 [2:274:2262] 2025-06-25T15:06:10.432915Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic1:0:Initializer] Initializing completed. 2025-06-25T15:06:10.432959Z node 2 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037928037, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--topic1' partition 0 generation 2 [2:274:2262] 2025-06-25T15:06:10.432993Z node 2 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037928037, Partition: 0, State: StateInit] SYNC INIT topic rt3.dc1--topic1 partitition 0 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-25T15:06:10.433025Z node 2 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037928037, Partition: 0, State: StateIdle] Process pending events. Count 0 2025-06-25T15:06:10.433276Z node 2 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037928037, Partition: 0, State: StateIdle] no data for compaction 2025-06-25T15:06:10.433589Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037928037] server connected, pipe [2:277:2264], now have 1 active actors on pipe 2025-06-25T15:06:10.472938Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3114: [PQ: 72057594037928139] Handle TEvInterconnect::TEvNodeInfo 2025-06-25T15:06:10.475669Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3146: [PQ: 72057594037928139] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-25T15:06:10.475938Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:752: [PQ: 72057594037928139] doesn't have tx info 2025-06-25T15:06:10.475978Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:764: [PQ: 72057594037928139] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-25T15:06:10.476013Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:985: [PQ: 72057594037928139] no config, start with empty partitions and default config 2025-06-25T15:06:10.476047Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:4949: [PQ: 72057594037928139] Txs.size=0, PlannedTxs.size=0 2025-06-25T15:06:10.476087Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037928139] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T15:06:10.476140Z node 2 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037928139] doesn't have tx writes info 2025-06-25T15:06:10.476693Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037928139] server connected, pipe [2:410:2363], now have 1 active actors on pipe 2025-06-25T15:06:10.476785Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1470: [PQ: 72057594037928139] Handle TEvPersQueue::TEvUpdateConfig 2025-06-25T15:06:10.476946Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1656: [PQ: 72057594037928139] Config update version 2(current 0) received from actor [2:103:2136] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 2 TopicName: "rt3.dc1--topic2" Version: 2 Partitions { PartitionId: 2 } AllPartitions { PartitionId: 2 } 2025-06-25T15:06:10.478809Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:591: [PQ: 72057594037928139] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 2 TopicName: "rt3.dc1--topic2" Version: 2 Partitions { PartitionId: 2 } AllPartitions { PartitionId: 2 } 2025-06-25T15:06:10.478911Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037928139] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T15:06:10.479652Z node 2 :PERSQUEUE INFO: pq_impl.cpp:1497: [PQ: 72057594037928139] Config applied version 2 actor [2:103:2136] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 2 TopicName: "rt3.dc1--topic2" Version: 2 Partitions { PartitionId: 2 } AllPartitions { PartitionId: 2 } 2025-06-25T15:06:10.479742Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitConfigStep 2025-06-25T15:06:10.480044Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-25T15:06:10.480233Z node 2 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037928139, Partition: 2, State: StateInit] bootstrapping 2 [2:418:2369] 2025-06-25T15:06:10.482045Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic2:2:Initializer] Initializing completed. 2025-06-25T15:06:10.482100Z node 2 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037928139, Partition: 2, State: StateInit] init complete for topic 'rt3.dc1--topic2' partition 2 generation 2 [2:418:2369] 2025-06-25T15:06:10.482145Z node 2 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037928139, Partition: 2, State: StateInit] SYNC INIT topic rt3.dc1--topic2 partitition 2 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-25T15:06:10.482188Z node 2 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037928139, Partition: 2, State: StateIdle] Process pending events. Count 0 2025-06-25T15:06:10.482409Z node 2 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037928139, Partition: 2, State: StateIdle] no data for compaction 2025-06-25T15:06:10.482841Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037928139] server connected, pipe [2:421:2371], now have 1 active actors on pipe 2025-06-25T15:06:10.483859Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037928037] server connected, pipe [2:427:2374], now have 1 active actors on pipe 2025-06-25T15:06:10.484208Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037928139] server connected, pipe [2:429:2375], now have 1 active actors on pipe 2025-06-25T15:06:10.484566Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72057594037928037] server disconnected, pipe [2:427:2374] destroyed 2025-06-25T15:06:10.484823Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72057594037928139] server disconnected, pipe [2:429:2375] destroyed Assert failed: Check response: { Status: 128 ErrorReason: "path \'Root/PQ\' has unknown/invalid root prefix \'Root\', Marker# PQ14" ErrorCode: UNKNOWN_TOPIC } Assert failed: Check response: { Status: 128 ErrorReason: "the following topics are not created: rt3.dc1--topic2, Marker# PQ95" ErrorCode: UNKNOWN_TOPIC } >> TMessageBusServerPersQueueGetReadSessionsInfoMetaRequestTest::FailsOnNotOkStatusInGetNodeRequest >> TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest::HandlesPipeDisconnection_DisconnectionComesFirst [GOOD] >> TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest::HandlesPipeDisconnection_AnswerDoesNotArrive >> TMessageBusServerPersQueueGetTopicMetadataMetaRequestTest::FailsOnNoBalancerInGetNodeRequest [GOOD] >> DataStreams::TestGetRecords1MBMessagesOneByOneBySeqNo [GOOD] >> TMessageBusServerPersQueueGetReadSessionsInfoMetaRequestTest::FailsOnNotOkStatusInGetNodeRequest [GOOD] >> TMessageBusServerPersQueueGetReadSessionsInfoMetaRequestTest::FailsOnZeroBalancerTabletIdInGetNodeRequest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/server/ut/unittest >> TMessageBusServerPersQueueGetTopicMetadataMetaRequestTest::FailsOnNoBalancerInGetNodeRequest [GOOD] Test command err: Assert failed: Check response: { Status: 128 ErrorReason: "no path \'/Root/PQ/\', Marker# PQ17" ErrorCode: UNKNOWN_TOPIC } Assert failed: Check response: { Status: 128 ErrorReason: "no path \'Root/PQ\', Marker# PQ150" ErrorCode: UNKNOWN_TOPIC } Assert failed: Check response: { Status: 128 ErrorReason: "topic \'rt3.dc1--topic1\' has no balancer, Marker# PQ193" ErrorCode: UNKNOWN_TOPIC } >> TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest::HandlesPipeDisconnection_AnswerDoesNotArrive [GOOD] >> TMessageBusServerPersQueueGetReadSessionsInfoMetaRequestTest::FailsOnZeroBalancerTabletIdInGetNodeRequest [GOOD] >> TMessageBusServerPersQueueGetReadSessionsInfoMetaRequestTest::FailsOnNoClientSpecified ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/server/ut/unittest >> TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest::HandlesPipeDisconnection_AnswerDoesNotArrive [GOOD] Test command err: Assert failed: Check response: { Status: 128 ErrorReason: "no path \'Root/PQ\', Marker# PQ150" ErrorCode: UNKNOWN_TOPIC } Assert failed: Check response: { Status: 128 ErrorReason: "topic \'rt3.dc1--topic1\' is not created, Marker# PQ94" ErrorCode: UNKNOWN_TOPIC } 2025-06-25T15:06:11.697115Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:3114: [PQ: 72057594037928037] Handle TEvInterconnect::TEvNodeInfo 2025-06-25T15:06:11.700408Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:3146: [PQ: 72057594037928037] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-25T15:06:11.700606Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:752: [PQ: 72057594037928037] doesn't have tx info 2025-06-25T15:06:11.700648Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:764: [PQ: 72057594037928037] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-25T15:06:11.700678Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:985: [PQ: 72057594037928037] no config, start with empty partitions and default config 2025-06-25T15:06:11.700714Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4949: [PQ: 72057594037928037] Txs.size=0, PlannedTxs.size=0 2025-06-25T15:06:11.700747Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037928037] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T15:06:11.700834Z node 3 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037928037] doesn't have tx writes info 2025-06-25T15:06:11.701374Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037928037] server connected, pipe [3:266:2256], now have 1 active actors on pipe 2025-06-25T15:06:11.701461Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:1470: [PQ: 72057594037928037] Handle TEvPersQueue::TEvUpdateConfig 2025-06-25T15:06:11.716356Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:1656: [PQ: 72057594037928037] Config update version 1(current 0) received from actor [3:103:2136] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic1" Version: 1 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-06-25T15:06:11.718569Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:591: [PQ: 72057594037928037] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic1" Version: 1 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-06-25T15:06:11.718656Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037928037] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T15:06:11.719318Z node 3 :PERSQUEUE INFO: pq_impl.cpp:1497: [PQ: 72057594037928037] Config applied version 1 actor [3:103:2136] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic1" Version: 1 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-06-25T15:06:11.719430Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic1:0:Initializer] Start initializing step TInitConfigStep 2025-06-25T15:06:11.719724Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic1:0:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-25T15:06:11.719937Z node 3 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037928037, Partition: 0, State: StateInit] bootstrapping 0 [3:274:2262] 2025-06-25T15:06:11.721418Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic1:0:Initializer] Initializing completed. 2025-06-25T15:06:11.721474Z node 3 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037928037, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--topic1' partition 0 generation 2 [3:274:2262] 2025-06-25T15:06:11.721514Z node 3 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037928037, Partition: 0, State: StateInit] SYNC INIT topic rt3.dc1--topic1 partitition 0 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-25T15:06:11.721549Z node 3 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037928037, Partition: 0, State: StateIdle] Process pending events. Count 0 2025-06-25T15:06:11.721777Z node 3 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037928037, Partition: 0, State: StateIdle] no data for compaction 2025-06-25T15:06:11.722145Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037928037] server connected, pipe [3:277:2264], now have 1 active actors on pipe 2025-06-25T15:06:11.758044Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:3114: [PQ: 72057594037928137] Handle TEvInterconnect::TEvNodeInfo 2025-06-25T15:06:11.760917Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:3146: [PQ: 72057594037928137] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-25T15:06:11.761115Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:752: [PQ: 72057594037928137] doesn't have tx info 2025-06-25T15:06:11.761147Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:764: [PQ: 72057594037928137] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-25T15:06:11.761177Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:985: [PQ: 72057594037928137] no config, start with empty partitions and default config 2025-06-25T15:06:11.761205Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4949: [PQ: 72057594037928137] Txs.size=0, PlannedTxs.size=0 2025-06-25T15:06:11.761247Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037928137] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T15:06:11.761294Z node 3 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037928137] doesn't have tx writes info 2025-06-25T15:06:11.761834Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037928137] server connected, pipe [3:411:2364], now have 1 active actors on pipe 2025-06-25T15:06:11.761911Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:1470: [PQ: 72057594037928137] Handle TEvPersQueue::TEvUpdateConfig 2025-06-25T15:06:11.762045Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:1656: [PQ: 72057594037928137] Config update version 2(current 0) received from actor [3:103:2136] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic2" Version: 2 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-06-25T15:06:11.763546Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:591: [PQ: 72057594037928137] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic2" Version: 2 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-06-25T15:06:11.763630Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037928137] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T15:06:11.764216Z node 3 :PERSQUEUE INFO: pq_impl.cpp:1497: [PQ: 72057594037928137] Config applied version 2 actor [3:103:2136] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic2" Version: 2 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-06-25T15:06:11.764298Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:0:Initializer] Start initializing step TInitConfigStep 2025-06-25T15:06:11.764566Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:0:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-25T15:06:11.764707Z node 3 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037928137, Partition: 0, State: StateInit] bootstrapping 0 [3:419:2370] 2025-06-25T15:06:11.765959Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic2:0:Initializer] Initializing completed. 2025-06-25T15:06:11.766000Z node 3 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037928137, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--topic2' partition 0 generation 2 [3:419:2370] 2025-06-25T15:06:11.766041Z node 3 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037928137, Partition: 0, State: StateInit] SYNC INIT topic rt3.dc1--topic2 partitition 0 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-25T15:06:11.766077Z node 3 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037928137, Partition: 0, State: StateIdle] Process pending events. Count 0 2025-06-25T15:06:11.766224Z node 3 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037928137, Partition: 0, State: StateIdle] no data for compaction 2025-06-25T15:06:11.766593Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037928137] server connected, pipe [3:422:2372], now have 1 active actors on pipe 2025-06-25T15:06:11.777554Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:3114: [PQ: 72057594037928138] Handle TEvInterconnect::TEvNodeInfo 2025-06-25T15:06:11.780544Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:3146: [PQ: 72057594037928138] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-25T15:06:11.780764Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:752: [PQ: 72057594037928138] doesn't have tx info 2025-06-25T15:06:11.780796Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:764: [PQ: 72057594037928138] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-25T15:06:11.780833Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:985: [PQ: 72057594037928138] no config, start with empty partitions and default config 2025-06-25T15:06:11.780862Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4949: [PQ: 72057594037928138] Txs.size=0, PlannedTxs.size=0 2025-06-25T15:06:11.780899Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037928138] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T15:06:11.780937Z node 3 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037928138] doesn't have tx writes info 2025-06-25T15:06:11.781463Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037928138] server connected, pipe [3:471:2409], now have 1 active actors on pipe 2025-06-25T15:06:11.781550Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:1470: [PQ: 72057594037928138] Handle TEvPersQueue::TEvUpdateConfig 2025-06-25T15:06:11.781678Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:1656: [PQ: 72057594037928138] Config update version 3(current 0) received from actor [3:103:2136] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 1 TopicName: "rt3.dc1--topic2" Version: 3 Partitions { PartitionId: 1 } AllPartitions { PartitionId: 1 } 2025-06-25T15:06:11.783246Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:591: [PQ: 72057594037928138] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 1 TopicName: "rt3.dc1--topic2" Version: 3 Partitions { PartitionId: 1 } AllPartitions { PartitionId: 1 } 2025-06-25T15:06:11.783334Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037928138] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025- ... Seconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 1 TopicName: "rt3.dc1--topic2" Version: 7 Partitions { PartitionId: 1 } AllPartitions { PartitionId: 1 } 2025-06-25T15:06:12.319588Z node 4 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037928138] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T15:06:12.320010Z node 4 :PERSQUEUE INFO: pq_impl.cpp:1497: [PQ: 72057594037928138] Config applied version 7 actor [4:103:2136] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 1 TopicName: "rt3.dc1--topic2" Version: 7 Partitions { PartitionId: 1 } AllPartitions { PartitionId: 1 } 2025-06-25T15:06:12.320087Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:1:Initializer] Start initializing step TInitConfigStep 2025-06-25T15:06:12.320275Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:1:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-25T15:06:12.320440Z node 4 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037928138, Partition: 1, State: StateInit] bootstrapping 1 [4:477:2413] 2025-06-25T15:06:12.321579Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic2:1:Initializer] Initializing completed. 2025-06-25T15:06:12.321623Z node 4 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037928138, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--topic2' partition 1 generation 2 [4:477:2413] 2025-06-25T15:06:12.321659Z node 4 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037928138, Partition: 1, State: StateInit] SYNC INIT topic rt3.dc1--topic2 partitition 1 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-25T15:06:12.321692Z node 4 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037928138, Partition: 1, State: StateIdle] Process pending events. Count 0 2025-06-25T15:06:12.321854Z node 4 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037928138, Partition: 1, State: StateIdle] no data for compaction 2025-06-25T15:06:12.322186Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037928138] server connected, pipe [4:480:2415], now have 1 active actors on pipe 2025-06-25T15:06:12.331817Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:3114: [PQ: 72057594037928139] Handle TEvInterconnect::TEvNodeInfo 2025-06-25T15:06:12.335215Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:3146: [PQ: 72057594037928139] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-25T15:06:12.335517Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:752: [PQ: 72057594037928139] doesn't have tx info 2025-06-25T15:06:12.335565Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:764: [PQ: 72057594037928139] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-25T15:06:12.335602Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:985: [PQ: 72057594037928139] no config, start with empty partitions and default config 2025-06-25T15:06:12.335634Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:4949: [PQ: 72057594037928139] Txs.size=0, PlannedTxs.size=0 2025-06-25T15:06:12.335680Z node 4 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037928139] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T15:06:12.335732Z node 4 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037928139] doesn't have tx writes info 2025-06-25T15:06:12.336361Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037928139] server connected, pipe [4:529:2452], now have 1 active actors on pipe 2025-06-25T15:06:12.336467Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:1470: [PQ: 72057594037928139] Handle TEvPersQueue::TEvUpdateConfig 2025-06-25T15:06:12.336646Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:1656: [PQ: 72057594037928139] Config update version 8(current 0) received from actor [4:103:2136] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 2 TopicName: "rt3.dc1--topic2" Version: 8 Partitions { PartitionId: 2 } AllPartitions { PartitionId: 2 } 2025-06-25T15:06:12.338489Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:591: [PQ: 72057594037928139] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 2 TopicName: "rt3.dc1--topic2" Version: 8 Partitions { PartitionId: 2 } AllPartitions { PartitionId: 2 } 2025-06-25T15:06:12.338600Z node 4 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037928139] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T15:06:12.339399Z node 4 :PERSQUEUE INFO: pq_impl.cpp:1497: [PQ: 72057594037928139] Config applied version 8 actor [4:103:2136] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 2 TopicName: "rt3.dc1--topic2" Version: 8 Partitions { PartitionId: 2 } AllPartitions { PartitionId: 2 } 2025-06-25T15:06:12.339537Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitConfigStep 2025-06-25T15:06:12.339846Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-25T15:06:12.340064Z node 4 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037928139, Partition: 2, State: StateInit] bootstrapping 2 [4:537:2458] 2025-06-25T15:06:12.342016Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic2:2:Initializer] Initializing completed. 2025-06-25T15:06:12.342078Z node 4 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037928139, Partition: 2, State: StateInit] init complete for topic 'rt3.dc1--topic2' partition 2 generation 2 [4:537:2458] 2025-06-25T15:06:12.342134Z node 4 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037928139, Partition: 2, State: StateInit] SYNC INIT topic rt3.dc1--topic2 partitition 2 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-25T15:06:12.342179Z node 4 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037928139, Partition: 2, State: StateIdle] Process pending events. Count 0 2025-06-25T15:06:12.342408Z node 4 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037928139, Partition: 2, State: StateIdle] no data for compaction 2025-06-25T15:06:12.342967Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037928139] server connected, pipe [4:540:2460], now have 1 active actors on pipe 2025-06-25T15:06:12.344057Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037928138] server connected, pipe [4:547:2464], now have 1 active actors on pipe 2025-06-25T15:06:12.344165Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037928037] server connected, pipe [4:546:2463], now have 1 active actors on pipe 2025-06-25T15:06:12.344225Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037928139] server connected, pipe [4:548:2464], now have 1 active actors on pipe 2025-06-25T15:06:12.355137Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2898: [PQ: 72057594037928139] server connected, pipe [4:553:2468], now have 1 active actors on pipe 2025-06-25T15:06:12.376465Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:3114: [PQ: 72057594037928139] Handle TEvInterconnect::TEvNodeInfo 2025-06-25T15:06:12.378704Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:3146: [PQ: 72057594037928139] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-25T15:06:12.378992Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:752: [PQ: 72057594037928139] doesn't have tx info 2025-06-25T15:06:12.379037Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:764: [PQ: 72057594037928139] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-25T15:06:12.379163Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:4949: [PQ: 72057594037928139] Txs.size=0, PlannedTxs.size=0 2025-06-25T15:06:12.379937Z node 4 :PERSQUEUE NOTICE: pq_impl.cpp:1109: [PQ: 72057594037928139] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-25T15:06:12.379983Z node 4 :PERSQUEUE INFO: pq_impl.cpp:801: [PQ: 72057594037928139] doesn't have tx writes info 2025-06-25T15:06:12.380063Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitConfigStep 2025-06-25T15:06:12.380367Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-25T15:06:12.380582Z node 4 :PERSQUEUE INFO: partition_init.cpp:1007: [PQ: 72057594037928139, Partition: 2, State: StateInit] bootstrapping 2 [4:610:2513] 2025-06-25T15:06:12.382338Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitDiskStatusStep 2025-06-25T15:06:12.383262Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitMetaStep 2025-06-25T15:06:12.383473Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitInfoRangeStep 2025-06-25T15:06:12.383779Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitDataRangeStep 2025-06-25T15:06:12.383986Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitDataStep 2025-06-25T15:06:12.384028Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitEndWriteTimestampStep 2025-06-25T15:06:12.384062Z node 4 :PERSQUEUE INFO: partition_init.cpp:895: [rt3.dc1--topic2:2:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-25T15:06:12.384094Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic2:2:Initializer] Initializing completed. 2025-06-25T15:06:12.384157Z node 4 :PERSQUEUE INFO: partition.cpp:572: [PQ: 72057594037928139, Partition: 2, State: StateInit] init complete for topic 'rt3.dc1--topic2' partition 2 generation 3 [4:610:2513] 2025-06-25T15:06:12.384208Z node 4 :PERSQUEUE DEBUG: partition.cpp:586: [PQ: 72057594037928139, Partition: 2, State: StateInit] SYNC INIT topic rt3.dc1--topic2 partitition 2 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-25T15:06:12.384246Z node 4 :PERSQUEUE DEBUG: partition.cpp:3940: [PQ: 72057594037928139, Partition: 2, State: StateIdle] Process pending events. Count 0 2025-06-25T15:06:12.384512Z node 4 :PERSQUEUE DEBUG: partition_compaction.cpp:162: [PQ: 72057594037928139, Partition: 2, State: StateIdle] no data for compaction 2025-06-25T15:06:12.385076Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72057594037928138] server disconnected, pipe [4:547:2464] destroyed 2025-06-25T15:06:12.385273Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2923: [PQ: 72057594037928037] server disconnected, pipe [4:546:2463] destroyed RESPONSE Status: 1 ErrorCode: OK MetaResponse { CmdGetPartitionLocationsResult { TopicResult { Topic: "rt3.dc1--topic1" PartitionLocation { Partition: 0 Host: "::1" HostId: 4 ErrorCode: OK } ErrorCode: OK } TopicResult { Topic: "rt3.dc1--topic2" PartitionLocation { Partition: 1 Host: "::1" HostId: 4 ErrorCode: OK } PartitionLocation { Partition: 2 ErrorCode: INITIALIZING ErrorReason: "Tablet for that partition is not running" } ErrorCode: OK } } } >> TMessageBusServerPersQueueGetReadSessionsInfoMetaRequestTest::FailsOnNoClientSpecified [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/services/datastreams/ut/unittest >> DataStreams::TestGetRecords1MBMessagesOneByOneBySeqNo [GOOD] Test command err: 2025-06-25T15:05:51.085148Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519903411768729499:2076];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:05:51.085235Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0016ad/r3tmp/tmpQQ6zj4/pdisk_1.dat 2025-06-25T15:05:51.392087Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 17698, node 1 2025-06-25T15:05:51.447996Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:05:51.448075Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:05:51.471449Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:05:51.532472Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:05:51.532525Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:05:51.532534Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:05:51.532653Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16051 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:05:51.953828Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:05:51.999931Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T15:05:52.091140Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:16051 2025-06-25T15:05:52.141813Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... 2025-06-25T15:05:52.308788Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpDropPersQueueGroup, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_drop_pq.cpp:421) 2025-06-25T15:05:52.321392Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037889 not found 2025-06-25T15:05:52.321417Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037891 not found 2025-06-25T15:05:52.323391Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037890 not found 2025-06-25T15:05:52.323420Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037888 not found 2025-06-25T15:05:52.328170Z node 1 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,2) wasn't found 2025-06-25T15:05:52.328230Z node 1 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,4) wasn't found 2025-06-25T15:05:52.328285Z node 1 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,3) wasn't found 2025-06-25T15:05:52.328344Z node 1 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,1) wasn't found 2025-06-25T15:05:54.088702Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519903426409826955:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:05:54.088759Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0016ad/r3tmp/tmpHogotW/pdisk_1.dat 2025-06-25T15:05:54.163659Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:05:54.189295Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:05:54.189350Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:05:54.191678Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 31819, node 4 2025-06-25T15:05:54.214784Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:05:54.214803Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:05:54.214809Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:05:54.214913Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3237 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:05:54.386742Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:05:54.422220Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) TClient is connected to server localhost:3237 2025-06-25T15:05:54.556980Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... 2025-06-25T15:05:54.685082Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_pq.cpp:268) 2025-06-25T15:05:54.722250Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpDropPersQueueGroup, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_drop_pq.cpp:421) 2025-06-25T15:05:54.735457Z node 4 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 4, TabletId: 72075186224037891 not found 2025-06-25T15:05:54.735471Z node 4 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 4, TabletId: 72075186224037890 not found 2025-06-25T15:05:54.735479Z node 4 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 4, TabletId: 72075186224037888 not found 2025-06-25T15:05:54.735487Z node 4 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 4, TabletId: 72075186224037889 not found 2025-06-25T15:05:54.740540Z node 4 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,2) wasn't found 2025-06-25T15:05:54.740611Z ... lete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,4) wasn't found 2025-06-25T15:05:54.740642Z node 4 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,3) wasn't found 2025-06-25T15:05:54.740672Z node 4 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,1) wasn't found 2025-06-25T15:05:56.990996Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519903432425465847:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:05:56.991117Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0016ad/r3tmp/tmpxId2J2/pdisk_1.dat 2025-06-25T15:05:57.072744Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:05:57.092053Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:05:57.092125Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:05:57.098525Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 6324, node 7 2025-06-25T15:05:57.129655Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:05:57.129677Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:05:57.129688Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:05:57.129826Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:12613 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:05:57.326263Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:05:57.386192Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) TClient is connected to server localhost:12613 2025-06-25T15:05:57.544716Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... 2025-06-25T15:05:57.682223Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_pq.cpp:268) 2025-06-25T15:05:57.717247Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976715662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_pq.cpp:268) 2025-06-25T15:05:57.749410Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpDropPersQueueGroup, opId: 281474976715663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_drop_pq.cpp:421) 2025-06-25T15:05:57.760048Z node 7 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 7, TabletId: 72075186224037889 not found 2025-06-25T15:05:57.760076Z node 7 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 7, TabletId: 72075186224037891 not found 2025-06-25T15:05:57.760091Z node 7 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 7, TabletId: 72075186224037890 not found 2025-06-25T15:05:57.761274Z node 7 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 7, TabletId: 72075186224037888 not found 2025-06-25T15:05:57.767057Z node 7 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,2) wasn't found 2025-06-25T15:05:57.767149Z node 7 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,4) wasn't found 2025-06-25T15:05:57.767178Z node 7 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,3) wasn't found 2025-06-25T15:05:57.767210Z node 7 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,1) wasn't found 2025-06-25T15:06:00.483328Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7519903449261283358:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:06:00.483407Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0016ad/r3tmp/tmpv5PeOf/pdisk_1.dat 2025-06-25T15:06:00.594781Z node 10 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:06:00.608468Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:06:00.608538Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:06:00.613908Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 63537, node 10 2025-06-25T15:06:00.652139Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:06:00.652163Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:06:00.652173Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:06:00.652344Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:22390 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:06:00.913669Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:06:00.958321Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) TClient is connected to server localhost:22390 2025-06-25T15:06:01.150450Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... 2025-06-25T15:06:01.489893Z node 10 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:06:05.483804Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[10:7519903449261283358:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:06:05.483904Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> AnalyzeColumnshard::AnalyzeRebootSaBeforeSave [GOOD] >> TraverseColumnShard::TraverseColumnTableAggrStatUnavailableNode [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/client/server/ut/unittest >> TMessageBusServerPersQueueGetReadSessionsInfoMetaRequestTest::FailsOnNoClientSpecified [GOOD] Test command err: Assert failed: Check response: { Status: 128 ErrorReason: "no path \'Root/PQ\', Marker# PQ150" ErrorCode: UNKNOWN_TOPIC } Assert failed: Check response: { Status: 128 ErrorReason: "topic \'rt3.dc1--topic1\' is not created, Marker# PQ94" ErrorCode: UNKNOWN_TOPIC } Assert failed: Check response: { Status: 128 ErrorReason: "No clientId specified in CmdGetReadSessionsInfo" ErrorCode: BAD_REQUEST } |93.9%| [TA] $(B)/ydb/core/client/server/ut/test-results/unittest/{meta.json ... results_accumulator.log} |93.9%| [TA] {RESULT} $(B)/ydb/core/client/server/ut/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest >> AnalyzeColumnshard::AnalyzeRebootSaBeforeSave [GOOD] Test command err: 2025-06-25T15:03:49.194064Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:419:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T15:03:49.194380Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T15:03:49.194475Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001f83/r3tmp/tmpr3AXQ2/pdisk_1.dat 2025-06-25T15:03:49.530700Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 29535, node 1 2025-06-25T15:03:49.782380Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:03:49.782436Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:03:49.782470Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:03:49.783076Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T15:03:49.785640Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:03:49.882753Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:03:49.882911Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:03:49.900710Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:24379 2025-06-25T15:03:50.468769Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-25T15:03:53.430551Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-25T15:03:53.465581Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:03:53.465698Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:03:53.504379Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T15:03:53.507054Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:03:53.726469Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:03:53.761971Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:53.762617Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:53.763255Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:53.763450Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:53.763702Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:53.763815Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:53.763919Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:53.764009Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:53.764083Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:53.982013Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:03:53.982137Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:03:53.994991Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:03:54.135459Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:03:54.179291Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-25T15:03:54.179387Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-25T15:03:54.211486Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-25T15:03:54.211674Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-25T15:03:54.211902Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-25T15:03:54.211971Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-25T15:03:54.212043Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-25T15:03:54.212106Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-25T15:03:54.212184Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-25T15:03:54.212248Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-25T15:03:54.212871Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-25T15:03:54.235448Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7949: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-25T15:03:54.235545Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7979: ConnectToSA(), pipe client id: [2:1793:2562], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-25T15:03:54.244541Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1808:2573] 2025-06-25T15:03:54.250855Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1849:2589] 2025-06-25T15:03:54.251110Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1849:2589], schemeshard id = 72075186224037897 2025-06-25T15:03:54.260373Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-25T15:03:54.279874Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-25T15:03:54.279928Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-25T15:03:54.280006Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-25T15:03:54.292281Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:03:54.299709Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-25T15:03:54.299852Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-25T15:03:54.498586Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-25T15:03:54.645755Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-25T15:03:54.714169Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-25T15:03:55.234784Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:03:55.454927Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2153:3026], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:03:55.455034Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:03:55.468781Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715659:0, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T15:03:55.562584Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2224:2794];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T15:03:55.562818Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2224:2794];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T15:03:55.563114Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2224:2794];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T15:03:55.563280Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2224:2794];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T15:03:55.563420Z node 2 :TX_COLUMNSHARD WARN: ... ICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 1 2025-06-25T15:06:09.831677Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 1 2025-06-25T15:06:09.831718Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-25T15:06:09.831835Z node 2 :STATISTICS DEBUG: tx_init.cpp:295: [72075186224037894] TTxInit::Complete. Start navigate. PathId [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-25T15:06:09.832416Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-25T15:06:09.832858Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-25T15:06:09.832917Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-25T15:06:09.833160Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-25T15:06:09.834109Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-25T15:06:09.834154Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-25T15:06:09.835259Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:58: [72075186224037894] TTxResponseTabletDistribution::Execute. Node count = 1 2025-06-25T15:06:09.895854Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:92: [72075186224037894] TTxResponseTabletDistribution::Complete 2025-06-25T15:06:09.896050Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 3, current Round: 0 2025-06-25T15:06:09.896611Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:7453:5460], server id = [2:7454:5461], tablet id = 72075186224037899, status = OK 2025-06-25T15:06:09.896722Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:7453:5460], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-25T15:06:09.898083Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037899 2025-06-25T15:06:09.898184Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-25T15:06:09.898343Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:27: [72075186224037894] TTxAggregateStatisticsResponse::Execute 2025-06-25T15:06:09.898511Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:118: [72075186224037894] TTxAggregateStatisticsResponse::Complete 2025-06-25T15:06:09.898745Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-25T15:06:09.901476Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:7453:5460], server id = [2:7454:5461], tablet id = 72075186224037899 2025-06-25T15:06:09.901515Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-25T15:06:09.901960Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-06-25T15:06:09.935685Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [2:7472:5479]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T15:06:09.935847Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-25T15:06:09.935889Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 2, ReplyToActorId = [2:7472:5479], StatRequests.size() = 1 2025-06-25T15:06:10.027973Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=OWUyZGMxNjQtN2I4YzI5Yi1jNThmZDdiYS1mYTNlNTk4Ng==, TxId: 2025-06-25T15:06:10.028030Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=OWUyZGMxNjQtN2I4YzI5Yi1jNThmZDdiYS1mYTNlNTk4Ng==, TxId: 2025-06-25T15:06:10.028448Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-25T15:06:10.040863Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:7487:5485] 2025-06-25T15:06:10.041115Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:7487:5485], schemeshard id = 72075186224037897 2025-06-25T15:06:10.041207Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:7406:5429], server id = [2:7488:5486], tablet id = 72075186224037894, status = OK 2025-06-25T15:06:10.041246Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:7488:5486] 2025-06-25T15:06:10.041320Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:133: [72075186224037894] EvConnectNode, pipe server id = [2:7488:5486], node id = 2, have schemeshards count = 1, need schemeshards count = 0 2025-06-25T15:06:10.054438Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-25T15:06:10.054504Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-25T15:06:10.120987Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:7499:5489] 2025-06-25T15:06:10.121685Z node 2 :STATISTICS DEBUG: tx_analyze.cpp:22: [72075186224037894] TTxAnalyze::Execute. ReplyToActorId [1:3049:3301] , Record { OperationId: "operationId" Tables { PathId { OwnerId: 72075186224037897 LocalId: 4 } } Types: TYPE_COUNT_MIN_SKETCH } 2025-06-25T15:06:10.121762Z node 2 :STATISTICS DEBUG: tx_analyze.cpp:38: [72075186224037894] TTxAnalyze::Execute. Update existing force traversal. OperationId operationId , ReplyToActorId [1:3049:3301] 2025-06-25T15:06:10.121825Z node 2 :STATISTICS DEBUG: tx_analyze.cpp:97: [72075186224037894] TTxAnalyze::Complete 2025-06-25T15:06:10.560888Z node 2 :STATISTICS DEBUG: service_impl.cpp:252: Event round 2 is different from the current 0 2025-06-25T15:06:10.560944Z node 2 :STATISTICS DEBUG: service_impl.cpp:379: Skip TEvDispatchKeepAlive 2025-06-25T15:06:10.571462Z node 2 :STATISTICS DEBUG: service_impl.cpp:252: Event round 3 is different from the current 0 2025-06-25T15:06:10.571519Z node 2 :STATISTICS DEBUG: service_impl.cpp:379: Skip TEvDispatchKeepAlive 2025-06-25T15:06:11.247737Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:626: [72075186224037894] ScheduleNextAnalyze 2025-06-25T15:06:11.247813Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:652: [72075186224037894] ScheduleNextAnalyze. All the force traversal tables sent the requests. OperationId=operationId 2025-06-25T15:06:11.247907Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:656: [72075186224037894] ScheduleNextAnalyze. All the force traversal operations sent the requests. 2025-06-25T15:06:12.435354Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-25T15:06:12.435508Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 4] is column table. 2025-06-25T15:06:12.435568Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:732: [72075186224037894] Start force traversal navigate for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-25T15:06:12.436250Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-25T15:06:12.449718Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-25T15:06:12.450132Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-25T15:06:12.450222Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-25T15:06:12.450621Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:58: [72075186224037894] TTxResponseTabletDistribution::Execute. Node count = 1 2025-06-25T15:06:12.464035Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:92: [72075186224037894] TTxResponseTabletDistribution::Complete 2025-06-25T15:06:12.464225Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 4, current Round: 0 2025-06-25T15:06:12.464825Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:7573:5530], server id = [2:7574:5531], tablet id = 72075186224037899, status = OK 2025-06-25T15:06:12.464945Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:7573:5530], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-25T15:06:12.466411Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037899 2025-06-25T15:06:12.466530Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-25T15:06:12.466865Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:7573:5530], server id = [2:7574:5531], tablet id = 72075186224037899 2025-06-25T15:06:12.466906Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-25T15:06:12.466983Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:27: [72075186224037894] TTxAggregateStatisticsResponse::Execute 2025-06-25T15:06:12.467160Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:118: [72075186224037894] TTxAggregateStatisticsResponse::Complete 2025-06-25T15:06:12.467478Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-25T15:06:12.470629Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-06-25T15:06:12.492356Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=NDUzZTU3M2MtMTIwZjQwYi02ODFmNzdkZC1jYTgwNDU3YQ==, TxId: 2025-06-25T15:06:12.492421Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=NDUzZTU3M2MtMTIwZjQwYi02ODFmNzdkZC1jYTgwNDU3YQ==, TxId: 2025-06-25T15:06:12.492960Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-25T15:06:12.506168Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete force traversal for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-25T15:06:12.506235Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:50: [72075186224037894] TTxFinishTraversal::Complete. Send TEvAnalyzeResponse, OperationId=operationId, ActorId=[1:3049:3301] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest >> TraverseColumnShard::TraverseColumnTableAggrStatUnavailableNode [GOOD] Test command err: 2025-06-25T15:03:35.878688Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:419:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T15:03:35.879192Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T15:03:35.879258Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001fbc/r3tmp/tmprGhnGY/pdisk_1.dat 2025-06-25T15:03:36.263780Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 21430, node 1 2025-06-25T15:03:36.501853Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:03:36.501910Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:03:36.501940Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:03:36.502294Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T15:03:36.507961Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:03:36.618895Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:03:36.619026Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:03:36.637817Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:25310 2025-06-25T15:03:37.239754Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-25T15:03:40.442872Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-25T15:03:40.478640Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:03:40.478775Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:03:40.517130Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T15:03:40.518923Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:03:40.700931Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:03:40.725688Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:40.726206Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:40.726743Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:40.727002Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:40.727090Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:40.727208Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:40.727286Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:40.727355Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:40.727419Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:40.916604Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:03:40.916720Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:03:40.933923Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:03:41.084252Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:03:41.143440Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-25T15:03:41.143551Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-25T15:03:41.173826Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-25T15:03:41.175658Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-25T15:03:41.175874Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-25T15:03:41.175944Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-25T15:03:41.176013Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-25T15:03:41.176068Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-25T15:03:41.176122Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-25T15:03:41.176182Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-25T15:03:41.176753Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-25T15:03:41.211082Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7949: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-25T15:03:41.211203Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7979: ConnectToSA(), pipe client id: [2:1788:2562], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-25T15:03:41.218165Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1801:2571] 2025-06-25T15:03:41.223713Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1831:2586] 2025-06-25T15:03:41.224566Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1831:2586], schemeshard id = 72075186224037897 2025-06-25T15:03:41.230056Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-25T15:03:41.248744Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-25T15:03:41.248804Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-25T15:03:41.248890Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-25T15:03:41.264266Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:03:41.275757Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-25T15:03:41.275897Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-25T15:03:41.449769Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-25T15:03:41.614458Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-25T15:03:41.694194Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-25T15:03:42.378556Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:03:42.662936Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2142:3018], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:03:42.663119Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:03:42.684512Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715659:0, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T15:03:42.889532Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2237:2807];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T15:03:42.889773Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2237:2807];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T15:03:42.890065Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2237:2807];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T15:03:42.890200Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2237:2807];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T15:03:42.890322Z node 2 :TX_COLUMNSHARD WARN: ... 025-06-25T15:06:09.961741Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:133: [72075186224037894] EvConnectNode, pipe server id = [2:8303:6125], node id = 2, have schemeshards count = 0, need schemeshards count = 1 2025-06-25T15:06:09.961806Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:314: [72075186224037894] SendStatisticsToNode(), node id = 2, schemeshard count = 1 2025-06-25T15:06:09.961940Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-25T15:06:09.962023Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 1, ReplyToActorId = [2:8300:6122], StatRequests.size() = 1 2025-06-25T15:06:10.061555Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=ZmU1Y2JjYzQtYjNiMDAyYTItOGFhMTY2YTAtMjM4M2Y4NDA=, TxId: 2025-06-25T15:06:10.061610Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=ZmU1Y2JjYzQtYjNiMDAyYTItOGFhMTY2YTAtMjM4M2Y4NDA=, TxId: 2025-06-25T15:06:10.061927Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-25T15:06:10.085932Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 3] 2025-06-25T15:06:10.085987Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-25T15:06:10.129511Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:217: [72075186224037894] EvFastPropagateCheck 2025-06-25T15:06:10.129588Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:357: [72075186224037894] PropagateFastStatistics(), node count = 0, schemeshard count = 0 2025-06-25T15:06:10.182247Z node 2 :STATISTICS DEBUG: service_impl.cpp:1189: EvRequestTimeout, pipe client id = [2:8302:6124], schemeshard count = 1 2025-06-25T15:06:12.487886Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-25T15:06:12.487943Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-25T15:06:12.487977Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 4] is column table. 2025-06-25T15:06:12.488021Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:732: [72075186224037894] Start schedule traversal navigate for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-25T15:06:12.490570Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-25T15:06:12.505347Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-25T15:06:12.505867Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-25T15:06:12.505935Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-25T15:06:12.506614Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:58: [72075186224037894] TTxResponseTabletDistribution::Execute. Node count = 1 2025-06-25T15:06:12.519788Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:92: [72075186224037894] TTxResponseTabletDistribution::Complete 2025-06-25T15:06:12.520001Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 2, current Round: 0 2025-06-25T15:06:12.520799Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8419:6184], server id = [2:8423:6188], tablet id = 72075186224037899, status = OK 2025-06-25T15:06:12.521148Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8419:6184], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-25T15:06:12.521392Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8420:6185], server id = [2:8424:6189], tablet id = 72075186224037900, status = OK 2025-06-25T15:06:12.521451Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8420:6185], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-25T15:06:12.522254Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8421:6186], server id = [2:8425:6190], tablet id = 72075186224037901, status = OK 2025-06-25T15:06:12.522311Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8421:6186], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-25T15:06:12.522933Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8422:6187], server id = [2:8426:6191], tablet id = 72075186224037902, status = OK 2025-06-25T15:06:12.522982Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8422:6187], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-25T15:06:12.527170Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037899 2025-06-25T15:06:12.527728Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8419:6184], server id = [2:8423:6188], tablet id = 72075186224037899 2025-06-25T15:06:12.527769Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-25T15:06:12.528900Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037900 2025-06-25T15:06:12.529212Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8420:6185], server id = [2:8424:6189], tablet id = 72075186224037900 2025-06-25T15:06:12.529247Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-25T15:06:12.529656Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037902 2025-06-25T15:06:12.529914Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8422:6187], server id = [2:8426:6191], tablet id = 72075186224037902 2025-06-25T15:06:12.529947Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-25T15:06:12.530238Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037901 2025-06-25T15:06:12.530281Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-25T15:06:12.530470Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:27: [72075186224037894] TTxAggregateStatisticsResponse::Execute 2025-06-25T15:06:12.530570Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:118: [72075186224037894] TTxAggregateStatisticsResponse::Complete 2025-06-25T15:06:12.530782Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8421:6186], server id = [2:8425:6190], tablet id = 72075186224037901 2025-06-25T15:06:12.530804Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-25T15:06:12.531051Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:58: [72075186224037894] TTxResponseTabletDistribution::Execute. Node count = 1 2025-06-25T15:06:12.554256Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:92: [72075186224037894] TTxResponseTabletDistribution::Complete 2025-06-25T15:06:12.554375Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 3, current Round: 0 2025-06-25T15:06:12.554678Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8441:6202], server id = [2:8442:6203], tablet id = 72075186224037900, status = OK 2025-06-25T15:06:12.554733Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8441:6202], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-25T15:06:12.555540Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037900 2025-06-25T15:06:12.555596Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-25T15:06:12.555696Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:27: [72075186224037894] TTxAggregateStatisticsResponse::Execute 2025-06-25T15:06:12.555798Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:118: [72075186224037894] TTxAggregateStatisticsResponse::Complete 2025-06-25T15:06:12.556018Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-25T15:06:12.557322Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8441:6202], server id = [2:8442:6203], tablet id = 72075186224037900 2025-06-25T15:06:12.557345Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-25T15:06:12.557774Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-06-25T15:06:12.580740Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [2:8460:6221]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T15:06:12.580945Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-25T15:06:12.580980Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 2, ReplyToActorId = [2:8460:6221], StatRequests.size() = 1 2025-06-25T15:06:12.670768Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=NzBlMmFiZDUtM2QxOGY5Yi1iZjhhNjdmNS1jOWZjNzY5OQ==, TxId: 2025-06-25T15:06:12.670831Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=NzBlMmFiZDUtM2QxOGY5Yi1iZjhhNjdmNS1jOWZjNzY5OQ==, TxId: ... waiting for NKikimr::NStat::TEvStatistics::TEvSaveStatisticsQueryResponse (done) 2025-06-25T15:06:12.671400Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 3 ], ReplyToActorId[ [2:8474:6227]], StatType[ 2 ], StatRequestsCount[ 1 ] 2025-06-25T15:06:12.671604Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-25T15:06:12.672350Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] 2025-06-25T15:06:12.672400Z node 2 :STATISTICS DEBUG: service_impl.cpp:812: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] resolve DatabasePath[ [OwnerId: 72057594046644480, LocalPathId: 2] ] 2025-06-25T15:06:12.675463Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] 2025-06-25T15:06:12.675530Z node 2 :STATISTICS DEBUG: service_impl.cpp:715: [TStatService::QueryStatistics] RequestId[ 3 ], Database[ Root/Database ], TablePath[ /Root/Database/.metadata/_statistics ] 2025-06-25T15:06:12.675583Z node 2 :STATISTICS DEBUG: service_impl.cpp:656: [TStatService::LoadStatistics] QueryId[ 1 ], PathId[ [OwnerId: 72075186224037897, LocalPathId: 4] ], StatType[ 2 ], ColumnTag[ 1 ] 2025-06-25T15:06:12.680116Z node 2 :STATISTICS DEBUG: service_impl.cpp:1152: TEvLoadStatisticsQueryResponse, request id = 3 probe = 4 >> TraverseColumnShard::TraverseColumnTableAggrStatNonLocalTablet [GOOD] |93.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/load_test/ut/unittest |93.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/load_test/ut/unittest |93.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/load_test/ut/unittest |93.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/load_test/ut/unittest >> GroupWriteTest::Simple |94.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/load_test/ut/unittest >> DataStreams::TestReservedConsumersMetering [GOOD] >> GroupWriteTest::ByTableName >> KqpLimits::OutOfSpaceYQLUpsertFail-useSink [GOOD] >> KqpLimits::ManyPartitionsSortingLimit ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest >> TraverseColumnShard::TraverseColumnTableAggrStatNonLocalTablet [GOOD] Test command err: 2025-06-25T15:03:45.273262Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:419:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T15:03:45.273586Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T15:03:45.273685Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001f8e/r3tmp/tmpIIYIS8/pdisk_1.dat 2025-06-25T15:03:45.639724Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 5881, node 1 2025-06-25T15:03:45.866023Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:03:45.866061Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:03:45.866083Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:03:45.866509Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T15:03:45.868049Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:03:45.961002Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:03:45.961126Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:03:45.975475Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:24257 2025-06-25T15:03:46.540975Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-25T15:03:49.327707Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-25T15:03:49.361978Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:03:49.362083Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:03:49.399789Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T15:03:49.402010Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:03:49.609796Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:03:49.647640Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:49.648262Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:49.648894Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:49.649071Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:49.649319Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:49.649433Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:49.649529Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:49.649611Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:49.649676Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:49.848611Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:03:49.848751Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:03:49.862513Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:03:50.033436Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:03:50.086340Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-25T15:03:50.086434Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-25T15:03:50.124614Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-25T15:03:50.124799Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-25T15:03:50.125003Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-25T15:03:50.125056Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-25T15:03:50.125122Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-25T15:03:50.125180Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-25T15:03:50.125241Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-25T15:03:50.125294Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-25T15:03:50.125844Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-25T15:03:50.148426Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7949: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-25T15:03:50.148529Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7979: ConnectToSA(), pipe client id: [2:1793:2562], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-25T15:03:50.161074Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1808:2573] 2025-06-25T15:03:50.168754Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1849:2589] 2025-06-25T15:03:50.169078Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1849:2589], schemeshard id = 72075186224037897 2025-06-25T15:03:50.178990Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-25T15:03:50.197174Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-25T15:03:50.197238Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-25T15:03:50.197310Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-25T15:03:50.209500Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:03:50.216524Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-25T15:03:50.216635Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-25T15:03:50.409302Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-25T15:03:50.553099Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-25T15:03:50.653207Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-25T15:03:51.135835Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:03:51.385366Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2153:3026], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:03:51.385476Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:03:51.404593Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715659:0, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T15:03:51.545910Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2245:2806];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T15:03:51.546147Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2245:2806];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T15:03:51.546437Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2245:2806];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T15:03:51.546575Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2245:2806];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T15:03:51.546700Z node 2 :TX_COLUMNSHARD WARN: ... 72075186224037897, LocalPathId: 3] 2025-06-25T15:06:07.945454Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-25T15:06:07.988203Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:217: [72075186224037894] EvFastPropagateCheck 2025-06-25T15:06:07.988264Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:357: [72075186224037894] PropagateFastStatistics(), node count = 0, schemeshard count = 0 2025-06-25T15:06:08.072380Z node 2 :STATISTICS DEBUG: service_impl.cpp:1189: EvRequestTimeout, pipe client id = [2:8243:6101], schemeshard count = 1 2025-06-25T15:06:10.150633Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-25T15:06:10.150678Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-25T15:06:10.150703Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 4] is column table. 2025-06-25T15:06:10.150742Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:732: [72075186224037894] Start schedule traversal navigate for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-25T15:06:10.153336Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-25T15:06:10.167897Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-25T15:06:10.168296Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-25T15:06:10.168376Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-25T15:06:10.168995Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:58: [72075186224037894] TTxResponseTabletDistribution::Execute. Node count = 1 2025-06-25T15:06:10.181310Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:92: [72075186224037894] TTxResponseTabletDistribution::Complete 2025-06-25T15:06:10.181535Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 2, current Round: 0 2025-06-25T15:06:10.182304Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8360:6161], server id = [2:8364:6165], tablet id = 72075186224037899, status = OK 2025-06-25T15:06:10.182541Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8360:6161], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-25T15:06:10.183322Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8361:6162], server id = [2:8365:6166], tablet id = 72075186224037900, status = OK 2025-06-25T15:06:10.183367Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8361:6162], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-25T15:06:10.183516Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8362:6163], server id = [2:8366:6167], tablet id = 72075186224037901, status = OK 2025-06-25T15:06:10.183559Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8362:6163], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-25T15:06:10.184510Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8363:6164], server id = [2:8368:6169], tablet id = 72075186224037902, status = OK 2025-06-25T15:06:10.184548Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8363:6164], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-25T15:06:10.187486Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037899 2025-06-25T15:06:10.188289Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8360:6161], server id = [2:8364:6165], tablet id = 72075186224037899 2025-06-25T15:06:10.188355Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-25T15:06:10.189047Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037900 2025-06-25T15:06:10.189505Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8361:6162], server id = [2:8365:6166], tablet id = 72075186224037900 2025-06-25T15:06:10.189528Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-25T15:06:10.189824Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037901 2025-06-25T15:06:10.190107Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8362:6163], server id = [2:8366:6167], tablet id = 72075186224037901 2025-06-25T15:06:10.190144Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-25T15:06:10.190512Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037902 2025-06-25T15:06:10.190548Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-25T15:06:10.190690Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:27: [72075186224037894] TTxAggregateStatisticsResponse::Execute 2025-06-25T15:06:10.190972Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8363:6164], server id = [2:8368:6169], tablet id = 72075186224037902 2025-06-25T15:06:10.190988Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-25T15:06:10.214643Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:118: [72075186224037894] TTxAggregateStatisticsResponse::Complete 2025-06-25T15:06:10.214912Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 3, current Round: 0 2025-06-25T15:06:10.714374Z node 2 :STATISTICS DEBUG: service_impl.cpp:252: Event round 2 is different from the current 3 2025-06-25T15:06:10.714426Z node 2 :STATISTICS DEBUG: service_impl.cpp:379: Skip TEvDispatchKeepAlive 2025-06-25T15:06:13.596482Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 1, schemeshard count = 1 2025-06-25T15:06:13.596644Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-25T15:06:13.792006Z node 2 :STATISTICS INFO: service_impl.cpp:416: Node 3 is unavailable 2025-06-25T15:06:13.792101Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-25T15:06:13.792254Z node 2 :STATISTICS DEBUG: service_impl.cpp:252: Event round 3 is different from the current 0 2025-06-25T15:06:13.792296Z node 2 :STATISTICS DEBUG: service_impl.cpp:379: Skip TEvDispatchKeepAlive 2025-06-25T15:06:13.792410Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:27: [72075186224037894] TTxAggregateStatisticsResponse::Execute 2025-06-25T15:06:13.792483Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:118: [72075186224037894] TTxAggregateStatisticsResponse::Complete 2025-06-25T15:06:13.792988Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:58: [72075186224037894] TTxResponseTabletDistribution::Execute. Node count = 1 2025-06-25T15:06:13.806834Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:92: [72075186224037894] TTxResponseTabletDistribution::Complete 2025-06-25T15:06:13.807054Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 4, current Round: 0 2025-06-25T15:06:13.807569Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8494:6234], server id = [2:8495:6235], tablet id = 72075186224037900, status = OK 2025-06-25T15:06:13.807662Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8494:6234], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-25T15:06:13.809074Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037900 2025-06-25T15:06:13.809163Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-25T15:06:13.809346Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:27: [72075186224037894] TTxAggregateStatisticsResponse::Execute 2025-06-25T15:06:13.809527Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:118: [72075186224037894] TTxAggregateStatisticsResponse::Complete 2025-06-25T15:06:13.809912Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-25T15:06:13.812149Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8494:6234], server id = [2:8495:6235], tablet id = 72075186224037900 2025-06-25T15:06:13.812190Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-25T15:06:13.813092Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-06-25T15:06:13.841463Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [2:8513:6253]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T15:06:13.841612Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-25T15:06:13.841654Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 2, ReplyToActorId = [2:8513:6253], StatRequests.size() = 1 2025-06-25T15:06:13.932756Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=NmRiMjAxNDYtMTkwNzEyZDktNTQ5OGJkMDYtY2ZmZDAzNDE=, TxId: 2025-06-25T15:06:13.932809Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=NmRiMjAxNDYtMTkwNzEyZDktNTQ5OGJkMDYtY2ZmZDAzNDE=, TxId: ... waiting for NKikimr::NStat::TEvStatistics::TEvSaveStatisticsQueryResponse (done) 2025-06-25T15:06:13.933306Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 3 ], ReplyToActorId[ [2:8527:6259]], StatType[ 2 ], StatRequestsCount[ 1 ] 2025-06-25T15:06:13.933478Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-25T15:06:13.933865Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] 2025-06-25T15:06:13.933907Z node 2 :STATISTICS DEBUG: service_impl.cpp:812: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] resolve DatabasePath[ [OwnerId: 72057594046644480, LocalPathId: 2] ] 2025-06-25T15:06:13.936186Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] 2025-06-25T15:06:13.936227Z node 2 :STATISTICS DEBUG: service_impl.cpp:715: [TStatService::QueryStatistics] RequestId[ 3 ], Database[ Root/Database ], TablePath[ /Root/Database/.metadata/_statistics ] 2025-06-25T15:06:13.936269Z node 2 :STATISTICS DEBUG: service_impl.cpp:656: [TStatService::LoadStatistics] QueryId[ 1 ], PathId[ [OwnerId: 72075186224037897, LocalPathId: 4] ], StatType[ 2 ], ColumnTag[ 1 ] 2025-06-25T15:06:13.939404Z node 2 :STATISTICS DEBUG: service_impl.cpp:1152: TEvLoadStatisticsQueryResponse, request id = 3 probe = 4 >> AnalyzeColumnshard::AnalyzeAnalyzeOneColumnTableSpecificColumns [GOOD] >> AnalyzeColumnshard::AnalyzeRebootSaBeforeAnalyzeTableResponse [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/services/datastreams/ut/unittest >> DataStreams::TestReservedConsumersMetering [GOOD] Test command err: 2025-06-25T15:05:51.085036Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519903410432178877:2076];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:05:51.085152Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0016b7/r3tmp/tmpBLTXTq/pdisk_1.dat 2025-06-25T15:05:51.427170Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 19152, node 1 2025-06-25T15:05:51.433055Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:05:51.433170Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:05:51.457604Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:05:51.532391Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:05:51.532410Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:05:51.532415Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:05:51.532520Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2022 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:05:51.976710Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:05:52.048892Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T15:05:52.091160Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:2022 2025-06-25T15:05:52.217605Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... 2025-06-25T15:05:52.457549Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976715661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_pq.cpp:268) encryption_type: NONE records { sequence_number: "0" shard_id: "shard-000001" } records { sequence_number: "0" shard_id: "shard-000009" } records { sequence_number: "0" shard_id: "shard-000004" } records { sequence_number: "0" shard_id: "shard-000005" } records { sequence_number: "0" shard_id: "shard-000008" } records { sequence_number: "1" shard_id: "shard-000004" } records { sequence_number: "2" shard_id: "shard-000004" } records { sequence_number: "1" shard_id: "shard-000005" } records { sequence_number: "1" shard_id: "shard-000001" } records { sequence_number: "1" shard_id: "shard-000009" } records { sequence_number: "0" shard_id: "shard-000006" } records { sequence_number: "2" shard_id: "shard-000001" } records { sequence_number: "0" shard_id: "shard-000007" } records { sequence_number: "1" shard_id: "shard-000007" } records { sequence_number: "0" shard_id: "shard-000000" } records { sequence_number: "2" shard_id: "shard-000007" } records { sequence_number: "3" shard_id: "shard-000004" } records { sequence_number: "2" shard_id: "shard-000005" } records { sequence_number: "0" shard_id: "shard-000003" } records { sequence_number: "2" shard_id: "shard-000009" } records { sequence_number: "1" shard_id: "shard-000008" } records { sequence_number: "1" shard_id: "shard-000000" } records { sequence_number: "1" shard_id: "shard-000006" } records { sequence_number: "2" shard_id: "shard-000000" } records { sequence_number: "3" shard_id: "shard-000009" } records { sequence_number: "3" shard_id: "shard-000001" } records { sequence_number: "4" shard_id: "shard-000009" } records { sequence_number: "4" shard_id: "shard-000004" } records { sequence_number: "3" shard_id: "shard-000000" } records { sequence_number: "4" shard_id: "shard-000001" } encryption_type: NONE records { sequence_number: "5" shard_id: "shard-000001" } records { sequence_number: "5" shard_id: "shard-000009" } records { sequence_number: "5" shard_id: "shard-000004" } records { sequence_number: "3" shard_id: "shard-000005" } records { sequence_number: "2" shard_id: "shard-000008" } records { sequence_number: "6" shard_id: "shard-000004" } records { sequence_number: "7" shard_id: "shard-000004" } records { sequence_number: "4" shard_id: "shard-000005" } records { sequence_number: "6" shard_id: "shard-000001" } records { sequence_number: "6" shard_id: "shard-000009" } records { sequence_number: "2" shard_id: "shard-000006" } records { sequence_number: "7" shard_id: "shard-000001" } records { sequence_number: "3" shard_id: "shard-000007" } records { sequence_number: "4" shard_id: "shard-000007" } records { sequence_number: "4" shard_id: "shard-000000" } records { sequence_number: "5" shard_id: "shard-000007" } records { sequence_number: "8" shard_id: "shard-000004" } records { sequence_number: "5" shard_id: "shard-000005" } records { sequence_number: "1" shard_id: "shard-000003" } records { sequence_number: "7" shard_id: "shard-000009" } records { sequence_number: "3" shard_id: "shard-000008" } records { sequence_number: "5" shard_id: "shard-000000" } records { sequence_number: "3" shard_id: "shard-000006" } records { sequence_number: "6" shard_id: "shard-000000" } records { sequence_number: "8" shard_id: "shard-000009" } records { sequence_number: "8" shard_id: "shard-000001" } records { sequence_number: "9" shard_id: "shard-000009" } records { sequence_number: "9" shard_id: "shard-000004" } records { sequence_number: "7" shard_id: "shard-000000" } records { sequence_number: "9" shard_id: "shard-000001" } encryption_type: NONE records { sequence_number: "10" shard_id: "shard-000001" } records { sequence_number: "10" shard_id: "shard-000009" } records { sequence_number: "10" shard_id: "shard-000004" } records { sequence_number: "6" shard_id: "shard-000005" } records { sequence_number: "4" shard_id: "shard-000008" } records { sequence_number: "11" shard_id: "shard-000004" } records { sequence_number: "12" shard_id: "shard-000004" } records { sequence_number: "7" shard_id: "shard-000005" } records { sequence_number: "11" shard_id: "shard-000001" } records { sequence_number: "11" shard_id: "shard-000009" } records { sequence_number: "4" shard_id: "shard-000006" } records { sequence_number: "12" shard_id: "shard-000001" } records { sequence_number: "6" shard_id: "shard-000007" } records { sequence_number: "7" shard_id: "shard-000007" } records { sequence_number: "8" shard_id: "shard-000000" } records { sequence_number: "8" shard_id: "shard-000007" } records { sequence_number: "13" shard_id: "shard-000004" } records { sequence_number: "8" shard_id: "shard-000005" } records { sequence_number: "2" shard_id: "shard-000003" } records { sequence_number: "12" shard_id: "shard-000009" } records { sequence_number: "5" shard_id: "shard-000008" } records { sequence_number: "9" shard_id: "shard-000000" } records { sequence_number: "5" shard_id: "shard-000006" } records { sequence_number: "10" shard_id: "shard-000000" } records { sequence_number: "13" shard_id: "shard-000009" } records { sequence_number: "13" shard_id: "shard-000001" } records { sequence_number: "14" shard_id: "shard-000009" } records { sequence_number: "14" shard_id: "shard-000004" } records { sequence_number: "11" shard_id: "shard-000000" } records { sequence_number: "14" shard_id: "shard-000001" } encryption_type: NONE records { sequence_number: "15" shard_id: "shard-000001" } records { sequence_number: "15" shard_id: "shard-000009" } records { sequence_number: "15" shard_id: "shard-000004" } records { sequence_number: "9" shard_id: "shard-000005" } records { sequence_number: "6" shard_id: "shard-000008" } records { sequence_number: "16" shard_id: "shard-000004" } records { sequence_number: "17" shard_id: "shard-000004" } records { sequence_number: "10" shard_id: "shard-000005" } records { sequence_number: "16" shard_id: "shard-000001" } records { sequence_number: "16" shard_id: "shard-000009" } records { sequence_number: "6" shard_id: "shard-000006" } records { sequence_number: "17" shard_id: "shard-000001" } records { sequence_number: "9" shard_id: "shard-000007" } records { sequence_number: "10" shard_id: "shard-000007" } records { sequence_number: "12" shard_id: "shard-000000" } records { sequence_number: "11" shard_id: "shard-000007" } records { sequence_number: "18" shard_id: "shard-000004" } records { sequence_number: "11" shard_id: "shard-000005" } records { sequence_number: "3" shard_id: "shard-000003" } records { sequence_number: "17" shard_id: "shard-000009" } records { sequence_number: "7" shard_id: "shard-000008" } records { sequence_number: "13" shard_id: "shard-000000" } records { sequence_number: "7" shard_id: "shard-000006" } records { sequence_number: "14" shard_id: "shard-000000" } records { sequence_number: "18" shard_id ... older_id":"somefolder","resource_id":"/Root/stream_TestReservedConsumersMetering","id":"yds.reserved_resources-root-72075186224037888-1750863968529-170","schema":"yds.throughput.reserved.v1","tags":{"reserved_throughput_bps":1048576,"reserved_consumers_count":2},"usage":{"quantity":0,"unit":"second","start":1750863968,"finish":1750863968},"labels":{"datastreams_stream_name":"stream_TestReservedConsumersMetering","ydb_database":"root"},"version":"v1","source_id":"72075186224037888","source_wt":1750863968}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestReservedConsumersMetering","id":"yds.reserved_resources-root-72075186224037888-1750863968529-171","schema":"yds.storage.reserved.v1","tags":{},"usage":{"quantity":0,"unit":"mbyte*second","start":1750863968,"finish":1750863968},"labels":{"datastreams_stream_name":"stream_TestReservedConsumersMetering","ydb_database":"root"},"version":"v1","source_id":"72075186224037888","source_wt":1750863968}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestReservedConsumersMetering","id":"used_storage-root-72075186224037888-1750863968529-172","schema":"ydb.serverless.v1","tags":{"ydb_size":0},"usage":{"quantity":0,"unit":"byte*second","start":1750863968,"finish":1750863968},"labels":{"datastreams_stream_name":"stream_TestReservedConsumersMetering","ydb_database":"root"},"version":"1.0.0","source_id":"72075186224037888","source_wt":1750863968}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestReservedConsumersMetering","id":"put_units-root-72075186224037888-1750863968557-173","schema":"yds.events.puts.v1","tags":{},"usage":{"quantity":1,"unit":"put_events","start":1750863968,"finish":1750863969},"labels":{"datastreams_stream_name":"stream_TestReservedConsumersMetering","ydb_database":"root"},"version":"v1","source_id":"72075186224037888","source_wt":1750863969}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestReservedConsumersMetering","id":"yds.reserved_resources-root-72075186224037888-1750863968557-174","schema":"yds.throughput.reserved.v1","tags":{"reserved_throughput_bps":1048576,"reserved_consumers_count":2},"usage":{"quantity":1,"unit":"second","start":1750863968,"finish":1750863969},"labels":{"datastreams_stream_name":"stream_TestReservedConsumersMetering","ydb_database":"root"},"version":"v1","source_id":"72075186224037888","source_wt":1750863969}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestReservedConsumersMetering","id":"yds.reserved_resources-root-72075186224037888-1750863968557-175","schema":"yds.storage.reserved.v1","tags":{},"usage":{"quantity":56320,"unit":"mbyte*second","start":1750863968,"finish":1750863969},"labels":{"datastreams_stream_name":"stream_TestReservedConsumersMetering","ydb_database":"root"},"version":"v1","source_id":"72075186224037888","source_wt":1750863969}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestReservedConsumersMetering","id":"used_storage-root-72075186224037888-1750863968557-176","schema":"ydb.serverless.v1","tags":{"ydb_size":0},"usage":{"quantity":1,"unit":"byte*second","start":1750863968,"finish":1750863969},"labels":{"datastreams_stream_name":"stream_TestReservedConsumersMetering","ydb_database":"root"},"version":"1.0.0","source_id":"72075186224037888","source_wt":1750863969}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestReservedConsumersMetering","id":"put_units-root-72075186224037888-1750863969580-177","schema":"yds.events.puts.v1","tags":{},"usage":{"quantity":1,"unit":"put_events","start":1750863969,"finish":1750863970},"labels":{"datastreams_stream_name":"stream_TestReservedConsumersMetering","ydb_database":"root"},"version":"v1","source_id":"72075186224037888","source_wt":1750863970}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestReservedConsumersMetering","id":"yds.reserved_resources-root-72075186224037888-1750863969580-178","schema":"yds.throughput.reserved.v1","tags":{"reserved_throughput_bps":1048576,"reserved_consumers_count":2},"usage":{"quantity":1,"unit":"second","start":1750863969,"finish":1750863970},"labels":{"datastreams_stream_name":"stream_TestReservedConsumersMetering","ydb_database":"root"},"version":"v1","source_id":"72075186224037888","source_wt":1750863970}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestReservedConsumersMetering","id":"yds.reserved_resources-root-72075186224037888-1750863969580-179","schema":"yds.storage.reserved.v1","tags":{},"usage":{"quantity":56320,"unit":"mbyte*second","start":1750863969,"finish":1750863970},"labels":{"datastreams_stream_name":"stream_TestReservedConsumersMetering","ydb_database":"root"},"version":"v1","source_id":"72075186224037888","source_wt":1750863970}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestReservedConsumersMetering","id":"used_storage-root-72075186224037888-1750863969580-180","schema":"ydb.serverless.v1","tags":{"ydb_size":0},"usage":{"quantity":1,"unit":"byte*second","start":1750863969,"finish":1750863970},"labels":{"datastreams_stream_name":"stream_TestReservedConsumersMetering","ydb_database":"root"},"version":"1.0.0","source_id":"72075186224037888","source_wt":1750863970}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestReservedConsumersMetering","id":"put_units-root-72075186224037888-1750863970593-181","schema":"yds.events.puts.v1","tags":{},"usage":{"quantity":1,"unit":"put_events","start":1750863970,"finish":1750863971},"labels":{"datastreams_stream_name":"stream_TestReservedConsumersMetering","ydb_database":"root"},"version":"v1","source_id":"72075186224037888","source_wt":1750863971}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestReservedConsumersMetering","id":"yds.reserved_resources-root-72075186224037888-1750863970593-182","schema":"yds.throughput.reserved.v1","tags":{"reserved_throughput_bps":1048576,"reserved_consumers_count":2},"usage":{"quantity":1,"unit":"second","start":1750863970,"finish":1750863971},"labels":{"datastreams_stream_name":"stream_TestReservedConsumersMetering","ydb_database":"root"},"version":"v1","source_id":"72075186224037888","source_wt":1750863971}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestReservedConsumersMetering","id":"yds.reserved_resources-root-72075186224037888-1750863970593-183","schema":"yds.storage.reserved.v1","tags":{},"usage":{"quantity":56320,"unit":"mbyte*second","start":1750863970,"finish":1750863971},"labels":{"datastreams_stream_name":"stream_TestReservedConsumersMetering","ydb_database":"root"},"version":"v1","source_id":"72075186224037888","source_wt":1750863971}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestReservedConsumersMetering","id":"used_storage-root-72075186224037888-1750863970593-184","schema":"ydb.serverless.v1","tags":{"ydb_size":0},"usage":{"quantity":1,"unit":"byte*second","start":1750863970,"finish":1750863971},"labels":{"datastreams_stream_name":"stream_TestReservedConsumersMetering","ydb_database":"root"},"version":"1.0.0","source_id":"72075186224037888","source_wt":1750863971}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestReservedConsumersMetering","id":"put_units-root-72075186224037888-1750863971605-185","schema":"yds.events.puts.v1","tags":{},"usage":{"quantity":1,"unit":"put_events","start":1750863971,"finish":1750863972},"labels":{"datastreams_stream_name":"stream_TestReservedConsumersMetering","ydb_database":"root"},"version":"v1","source_id":"72075186224037888","source_wt":1750863972}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestReservedConsumersMetering","id":"yds.reserved_resources-root-72075186224037888-1750863971605-186","schema":"yds.throughput.reserved.v1","tags":{"reserved_throughput_bps":1048576,"reserved_consumers_count":2},"usage":{"quantity":1,"unit":"second","start":1750863971,"finish":1750863972},"labels":{"datastreams_stream_name":"stream_TestReservedConsumersMetering","ydb_database":"root"},"version":"v1","source_id":"72075186224037888","source_wt":1750863972}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestReservedConsumersMetering","id":"yds.reserved_resources-root-72075186224037888-1750863971605-187","schema":"yds.storage.reserved.v1","tags":{},"usage":{"quantity":56320,"unit":"mbyte*second","start":1750863971,"finish":1750863972},"labels":{"datastreams_stream_name":"stream_TestReservedConsumersMetering","ydb_database":"root"},"version":"v1","source_id":"72075186224037888","source_wt":1750863972}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestReservedConsumersMetering","id":"used_storage-root-72075186224037888-1750863971605-188","schema":"ydb.serverless.v1","tags":{"ydb_size":0},"usage":{"quantity":1,"unit":"byte*second","start":1750863971,"finish":1750863972},"labels":{"datastreams_stream_name":"stream_TestReservedConsumersMetering","ydb_database":"root"},"version":"1.0.0","source_id":"72075186224037888","source_wt":1750863972}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestReservedConsumersMetering","id":"put_units-root-72075186224037888-1750863972616-189","schema":"yds.events.puts.v1","tags":{},"usage":{"quantity":1,"unit":"put_events","start":1750863972,"finish":1750863973},"labels":{"datastreams_stream_name":"stream_TestReservedConsumersMetering","ydb_database":"root"},"version":"v1","source_id":"72075186224037888","source_wt":1750863973}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestReservedConsumersMetering","id":"yds.reserved_resources-root-72075186224037888-1750863972616-190","schema":"yds.throughput.reserved.v1","tags":{"reserved_throughput_bps":1048576,"reserved_consumers_count":2},"usage":{"quantity":1,"unit":"second","start":1750863972,"finish":1750863973},"labels":{"datastreams_stream_name":"stream_TestReservedConsumersMetering","ydb_database":"root"},"version":"v1","source_id":"72075186224037888","source_wt":1750863973}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestReservedConsumersMetering","id":"yds.reserved_resources-root-72075186224037888-1750863972616-191","schema":"yds.storage.reserved.v1","tags":{},"usage":{"quantity":56320,"unit":"mbyte*second","start":1750863972,"finish":1750863973},"labels":{"datastreams_stream_name":"stream_TestReservedConsumersMetering","ydb_database":"root"},"version":"v1","source_id":"72075186224037888","source_wt":1750863973}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestReservedConsumersMetering","id":"used_storage-root-72075186224037888-1750863972616-192","schema":"ydb.serverless.v1","tags":{"ydb_size":0},"usage":{"quantity":1,"unit":"byte*second","start":1750863972,"finish":1750863973},"labels":{"datastreams_stream_name":"stream_TestReservedConsumersMetering","ydb_database":"root"},"version":"1.0.0","source_id":"72075186224037888","source_wt":1750863973}' ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest >> AnalyzeColumnshard::AnalyzeAnalyzeOneColumnTableSpecificColumns [GOOD] Test command err: 2025-06-25T15:03:49.900663Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:419:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T15:03:49.901007Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T15:03:49.901107Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001f7f/r3tmp/tmpuUiFr6/pdisk_1.dat 2025-06-25T15:03:50.245681Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 63813, node 1 2025-06-25T15:03:50.489568Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:03:50.489632Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:03:50.489672Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:03:50.490341Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T15:03:50.493114Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:03:50.591998Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:03:50.592136Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:03:50.606873Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:1432 2025-06-25T15:03:51.121806Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-25T15:03:53.949492Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-25T15:03:53.980303Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:03:53.980430Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:03:54.019267Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T15:03:54.021492Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:03:54.240337Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:03:54.275526Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:54.276120Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:54.276744Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:54.276950Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:54.277235Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:54.277346Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:54.277446Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:54.277540Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:54.277616Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:54.459329Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:03:54.459447Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:03:54.472665Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:03:54.639859Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:03:54.687626Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-25T15:03:54.687732Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-25T15:03:54.727684Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-25T15:03:54.727880Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-25T15:03:54.728091Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-25T15:03:54.728149Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-25T15:03:54.728213Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-25T15:03:54.728290Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-25T15:03:54.728366Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-25T15:03:54.728431Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-25T15:03:54.729021Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-25T15:03:54.753526Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7949: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-25T15:03:54.753645Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7979: ConnectToSA(), pipe client id: [2:1793:2562], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-25T15:03:54.762670Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1808:2573] 2025-06-25T15:03:54.769819Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1849:2589] 2025-06-25T15:03:54.770101Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1849:2589], schemeshard id = 72075186224037897 2025-06-25T15:03:54.780164Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-25T15:03:54.798706Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-25T15:03:54.798764Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-25T15:03:54.798829Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-25T15:03:54.807331Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:03:54.813270Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-25T15:03:54.813394Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-25T15:03:55.009681Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-25T15:03:55.138178Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-25T15:03:55.206998Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-25T15:03:55.724183Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:03:55.987135Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2152:3024], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:03:55.987254Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:03:56.003054Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715659:0, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T15:03:56.100863Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2221:2793];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T15:03:56.101108Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2221:2793];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T15:03:56.101415Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2221:2793];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T15:03:56.101546Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2221:2793];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T15:03:56.101665Z node 2 :TX_COLUMNSHARD WARN: ... tistics(), node count = 0, schemeshard count = 0 2025-06-25T15:06:10.448138Z node 2 :STATISTICS DEBUG: service_impl.cpp:1189: EvRequestTimeout, pipe client id = [2:7276:5351], schemeshard count = 1 2025-06-25T15:06:11.396066Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:626: [72075186224037894] ScheduleNextAnalyze 2025-06-25T15:06:11.396161Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 4] is column table. 2025-06-25T15:06:11.399169Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-25T15:06:11.415118Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-25T15:06:11.415613Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-25T15:06:11.415682Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:51: [72075186224037894] TTxResolve::ExecuteAnalyze. Table OperationId operationId, PathId [OwnerId: 72075186224037897, LocalPathId: 4], AnalyzedShards 1 2025-06-25T15:06:11.438557Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-25T15:06:12.494280Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-25T15:06:12.494331Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:680: [72075186224037894] ScheduleNextTraversal. All the force traversal tables sent the requests. OperationId=operationId 2025-06-25T15:06:12.494357Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:689: [72075186224037894] ScheduleNextTraversal. All the force traversal operations sent the requests. 2025-06-25T15:06:12.494388Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 4] is column table. 2025-06-25T15:06:12.494437Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:732: [72075186224037894] Start schedule traversal navigate for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-25T15:06:12.494954Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-25T15:06:12.507799Z node 2 :STATISTICS DEBUG: tx_analyze_table_request.cpp:56: [72075186224037894] TTxAnalyzeTableRequest::Complete. Send 1 events. 2025-06-25T15:06:12.507893Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-25T15:06:12.508227Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-25T15:06:12.508284Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-25T15:06:12.509005Z node 2 :STATISTICS DEBUG: tx_analyze_table_response.cpp:21: [72075186224037894] TTxAnalyzeTableResponse::Execute 2025-06-25T15:06:12.509082Z node 2 :STATISTICS DEBUG: tx_analyze_table_response.cpp:52: [72075186224037894] TTxAnalyzeTableResponse::Execute. All shards are analyzed 2025-06-25T15:06:12.509423Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:58: [72075186224037894] TTxResponseTabletDistribution::Execute. Node count = 1 2025-06-25T15:06:12.532568Z node 2 :STATISTICS DEBUG: tx_analyze_table_response.cpp:57: [72075186224037894] TTxAnalyzeTableResponse::Complete. 2025-06-25T15:06:12.532626Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:92: [72075186224037894] TTxResponseTabletDistribution::Complete 2025-06-25T15:06:12.532746Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 2, current Round: 0 2025-06-25T15:06:12.533236Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:7389:5419], server id = [2:7390:5420], tablet id = 72075186224037899, status = OK 2025-06-25T15:06:12.533346Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:7389:5419], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-25T15:06:12.536165Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037899 2025-06-25T15:06:12.536236Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-25T15:06:12.536407Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:27: [72075186224037894] TTxAggregateStatisticsResponse::Execute 2025-06-25T15:06:12.536541Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:118: [72075186224037894] TTxAggregateStatisticsResponse::Complete 2025-06-25T15:06:12.536682Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:7389:5419], server id = [2:7390:5420], tablet id = 72075186224037899 2025-06-25T15:06:12.536708Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-25T15:06:12.536823Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-25T15:06:12.538470Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-06-25T15:06:12.561318Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [2:7410:5439]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T15:06:12.561420Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-25T15:06:12.561447Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 2, ReplyToActorId = [2:7410:5439], StatRequests.size() = 1 2025-06-25T15:06:12.642269Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=YjJkYTEwZDMtZGYwOTliZjQtZGIxMDU2YmItYmVjMjYwZDU=, TxId: 2025-06-25T15:06:12.642330Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=YjJkYTEwZDMtZGYwOTliZjQtZGIxMDU2YmItYmVjMjYwZDU=, TxId: 2025-06-25T15:06:12.642647Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-25T15:06:12.655162Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-25T15:06:12.655213Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-25T15:06:13.137898Z node 2 :STATISTICS DEBUG: service_impl.cpp:252: Event round 2 is different from the current 0 2025-06-25T15:06:13.137984Z node 2 :STATISTICS DEBUG: service_impl.cpp:379: Skip TEvDispatchKeepAlive 2025-06-25T15:06:13.792974Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:626: [72075186224037894] ScheduleNextAnalyze 2025-06-25T15:06:13.793057Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:652: [72075186224037894] ScheduleNextAnalyze. All the force traversal tables sent the requests. OperationId=operationId 2025-06-25T15:06:13.793105Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:656: [72075186224037894] ScheduleNextAnalyze. All the force traversal operations sent the requests. 2025-06-25T15:06:15.003965Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-25T15:06:15.004077Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 4] is column table. 2025-06-25T15:06:15.004106Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:732: [72075186224037894] Start force traversal navigate for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-25T15:06:15.004517Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-25T15:06:15.017727Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-25T15:06:15.017969Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-25T15:06:15.018008Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-25T15:06:15.018288Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:58: [72075186224037894] TTxResponseTabletDistribution::Execute. Node count = 1 2025-06-25T15:06:15.041959Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:92: [72075186224037894] TTxResponseTabletDistribution::Complete 2025-06-25T15:06:15.042086Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 3, current Round: 0 2025-06-25T15:06:15.042466Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:7506:5487], server id = [2:7507:5488], tablet id = 72075186224037899, status = OK 2025-06-25T15:06:15.042531Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:7506:5487], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-25T15:06:15.043652Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037899 2025-06-25T15:06:15.043761Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-25T15:06:15.043965Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:27: [72075186224037894] TTxAggregateStatisticsResponse::Execute 2025-06-25T15:06:15.044122Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:118: [72075186224037894] TTxAggregateStatisticsResponse::Complete 2025-06-25T15:06:15.044296Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:7506:5487], server id = [2:7507:5488], tablet id = 72075186224037899 2025-06-25T15:06:15.044348Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-25T15:06:15.044496Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-25T15:06:15.046756Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-06-25T15:06:15.065460Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=YzMxN2VmMDMtYTYxZmVlNi0yZTE3Mjg5MS05NjJhOTdiMQ==, TxId: 2025-06-25T15:06:15.065518Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=YzMxN2VmMDMtYTYxZmVlNi0yZTE3Mjg5MS05NjJhOTdiMQ==, TxId: 2025-06-25T15:06:15.065861Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-25T15:06:15.078536Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete force traversal for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-25T15:06:15.078603Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:50: [72075186224037894] TTxFinishTraversal::Complete. Send TEvAnalyzeResponse, OperationId=operationId, ActorId=[1:3045:3298] >> GroupWriteTest::WithRead >> AnalyzeColumnshard::Analyze [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest >> AnalyzeColumnshard::AnalyzeRebootSaBeforeAnalyzeTableResponse [GOOD] Test command err: 2025-06-25T15:03:52.657697Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:419:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T15:03:52.657968Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T15:03:52.658059Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001f5d/r3tmp/tmpwID70F/pdisk_1.dat 2025-06-25T15:03:52.974324Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 24336, node 1 2025-06-25T15:03:53.189909Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:03:53.189969Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:03:53.189996Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:03:53.190491Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T15:03:53.192661Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:03:53.283365Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:03:53.283499Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:03:53.298242Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:8364 2025-06-25T15:03:53.811920Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-25T15:03:56.372686Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-25T15:03:56.403706Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:03:56.403821Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:03:56.444162Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T15:03:56.446299Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:03:56.643911Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:03:56.678749Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:56.679354Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:56.679849Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:56.679997Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:56.680225Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:56.680326Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:56.680424Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:56.680501Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:56.680562Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:56.863009Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:03:56.863130Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:03:56.879089Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:03:57.016998Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:03:57.051838Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-25T15:03:57.051965Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-25T15:03:57.081071Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-25T15:03:57.081222Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-25T15:03:57.081364Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-25T15:03:57.081409Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-25T15:03:57.081447Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-25T15:03:57.081506Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-25T15:03:57.081585Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-25T15:03:57.081617Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-25T15:03:57.081998Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-25T15:03:57.103284Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7949: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-25T15:03:57.103399Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7979: ConnectToSA(), pipe client id: [2:1793:2562], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-25T15:03:57.112819Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1808:2573] 2025-06-25T15:03:57.119608Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1849:2589] 2025-06-25T15:03:57.119867Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1849:2589], schemeshard id = 72075186224037897 2025-06-25T15:03:57.128845Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-25T15:03:57.146392Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-25T15:03:57.146449Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-25T15:03:57.146518Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-25T15:03:57.157804Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:03:57.168821Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-25T15:03:57.168947Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-25T15:03:57.358800Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-25T15:03:57.477701Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-25T15:03:57.577917Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-25T15:03:58.066231Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:03:58.309272Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2153:3026], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:03:58.309374Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:03:58.324793Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715659:0, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T15:03:58.412347Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2224:2794];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T15:03:58.412595Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2224:2794];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T15:03:58.412932Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2224:2794];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T15:03:58.413070Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2224:2794];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T15:03:58.413205Z node 2 :TX_COLUMNSHARD WARN: ... :802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 4] is column table. 2025-06-25T15:06:10.695513Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:732: [72075186224037894] Start schedule traversal navigate for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-25T15:06:10.696214Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-25T15:06:10.709357Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-25T15:06:10.709682Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-25T15:06:10.709757Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-25T15:06:10.710623Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:58: [72075186224037894] TTxResponseTabletDistribution::Execute. Node count = 1 2025-06-25T15:06:10.723707Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:92: [72075186224037894] TTxResponseTabletDistribution::Complete 2025-06-25T15:06:10.723898Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 2, current Round: 0 2025-06-25T15:06:10.724424Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:7438:5449], server id = [2:7439:5450], tablet id = 72075186224037899, status = OK 2025-06-25T15:06:10.724538Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:7438:5449], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-25T15:06:10.727843Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037899 2025-06-25T15:06:10.727949Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-25T15:06:10.728134Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:27: [72075186224037894] TTxAggregateStatisticsResponse::Execute 2025-06-25T15:06:10.728329Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:118: [72075186224037894] TTxAggregateStatisticsResponse::Complete 2025-06-25T15:06:10.728604Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-25T15:06:10.731357Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:7438:5449], server id = [2:7439:5450], tablet id = 72075186224037899 2025-06-25T15:06:10.731417Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-25T15:06:10.731896Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-06-25T15:06:10.768609Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [2:7459:5469]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T15:06:10.768838Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-25T15:06:10.768891Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 2, ReplyToActorId = [2:7459:5469], StatRequests.size() = 1 2025-06-25T15:06:10.881681Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=YzdhNzQ1MDMtM2E1MzdkYWMtMjQxNzc4NTQtMmMxOTg0ZjI=, TxId: 2025-06-25T15:06:10.881757Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=YzdhNzQ1MDMtM2E1MzdkYWMtMjQxNzc4NTQtMmMxOTg0ZjI=, TxId: 2025-06-25T15:06:10.882270Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-25T15:06:10.896208Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-25T15:06:10.896279Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-25T15:06:11.376260Z node 2 :STATISTICS DEBUG: service_impl.cpp:252: Event round 2 is different from the current 0 2025-06-25T15:06:11.376347Z node 2 :STATISTICS DEBUG: service_impl.cpp:379: Skip TEvDispatchKeepAlive 2025-06-25T15:06:12.025573Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:626: [72075186224037894] ScheduleNextAnalyze 2025-06-25T15:06:12.025650Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 4] is column table. 2025-06-25T15:06:12.026069Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-25T15:06:12.038389Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-25T15:06:12.038591Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-25T15:06:12.038629Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:51: [72075186224037894] TTxResolve::ExecuteAnalyze. Table OperationId operationId, PathId [OwnerId: 72075186224037897, LocalPathId: 4], AnalyzedShards 1 2025-06-25T15:06:12.061711Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-25T15:06:13.204663Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-25T15:06:13.204728Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:680: [72075186224037894] ScheduleNextTraversal. All the force traversal tables sent the requests. OperationId=operationId 2025-06-25T15:06:13.204750Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:689: [72075186224037894] ScheduleNextTraversal. All the force traversal operations sent the requests. 2025-06-25T15:06:13.204893Z node 2 :STATISTICS DEBUG: tx_analyze_table_request.cpp:56: [72075186224037894] TTxAnalyzeTableRequest::Complete. Send 1 events. 2025-06-25T15:06:13.205194Z node 2 :STATISTICS DEBUG: tx_analyze_table_response.cpp:21: [72075186224037894] TTxAnalyzeTableResponse::Execute 2025-06-25T15:06:13.205260Z node 2 :STATISTICS DEBUG: tx_analyze_table_response.cpp:52: [72075186224037894] TTxAnalyzeTableResponse::Execute. All shards are analyzed 2025-06-25T15:06:13.218235Z node 2 :STATISTICS DEBUG: tx_analyze_table_response.cpp:57: [72075186224037894] TTxAnalyzeTableResponse::Complete. 2025-06-25T15:06:14.332893Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:626: [72075186224037894] ScheduleNextAnalyze 2025-06-25T15:06:14.332973Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:652: [72075186224037894] ScheduleNextAnalyze. All the force traversal tables sent the requests. OperationId=operationId 2025-06-25T15:06:14.333056Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:656: [72075186224037894] ScheduleNextAnalyze. All the force traversal operations sent the requests. 2025-06-25T15:06:15.392010Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 1, schemeshard count = 1 2025-06-25T15:06:15.392192Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-25T15:06:15.413860Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-25T15:06:15.413991Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 4] is column table. 2025-06-25T15:06:15.414031Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:732: [72075186224037894] Start force traversal navigate for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-25T15:06:15.414652Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-25T15:06:15.427177Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-25T15:06:15.427423Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-25T15:06:15.427461Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-25T15:06:15.427738Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:58: [72075186224037894] TTxResponseTabletDistribution::Execute. Node count = 1 2025-06-25T15:06:15.451304Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:92: [72075186224037894] TTxResponseTabletDistribution::Complete 2025-06-25T15:06:15.451464Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 3, current Round: 0 2025-06-25T15:06:15.451846Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:7631:5559], server id = [2:7632:5560], tablet id = 72075186224037899, status = OK 2025-06-25T15:06:15.451912Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:7631:5559], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-25T15:06:15.452880Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037899 2025-06-25T15:06:15.452946Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-25T15:06:15.453037Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:27: [72075186224037894] TTxAggregateStatisticsResponse::Execute 2025-06-25T15:06:15.453169Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:118: [72075186224037894] TTxAggregateStatisticsResponse::Complete 2025-06-25T15:06:15.453386Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-25T15:06:15.455031Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:7631:5559], server id = [2:7632:5560], tablet id = 72075186224037899 2025-06-25T15:06:15.455060Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-25T15:06:15.455387Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-06-25T15:06:15.474555Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=ODVmZjFiNzQtZjFlYWRmYi1hYjMxOGI0Zi1mOGNkNzRlYw==, TxId: 2025-06-25T15:06:15.474610Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=ODVmZjFiNzQtZjFlYWRmYi1hYjMxOGI0Zi1mOGNkNzRlYw==, TxId: 2025-06-25T15:06:15.474951Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-25T15:06:15.489100Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete force traversal for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-25T15:06:15.489167Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:50: [72075186224037894] TTxFinishTraversal::Complete. Send TEvAnalyzeResponse, OperationId=operationId, ActorId=[1:3049:3301] >> AnalyzeDatashard::AnalyzeOneTable [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest >> AnalyzeColumnshard::Analyze [GOOD] Test command err: 2025-06-25T15:03:53.595894Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:419:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T15:03:53.596281Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T15:03:53.596407Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001f5c/r3tmp/tmpDMKzqX/pdisk_1.dat 2025-06-25T15:03:53.922006Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 18715, node 1 2025-06-25T15:03:54.140238Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:03:54.140293Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:03:54.140343Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:03:54.140878Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T15:03:54.143001Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:03:54.231627Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:03:54.231804Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:03:54.246263Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:10843 2025-06-25T15:03:54.774511Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-25T15:03:57.543413Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-25T15:03:57.575933Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:03:57.576016Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:03:57.614001Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T15:03:57.616222Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:03:57.810200Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:03:57.844607Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:57.845040Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:57.845550Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:57.845681Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:57.845880Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:57.845969Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:57.846072Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:57.846162Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:57.846220Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:58.026341Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:03:58.026455Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:03:58.039422Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:03:58.196881Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:03:58.239067Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-25T15:03:58.239148Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-25T15:03:58.265832Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-25T15:03:58.266013Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-25T15:03:58.266219Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-25T15:03:58.266282Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-25T15:03:58.266331Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-25T15:03:58.266389Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-25T15:03:58.266454Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-25T15:03:58.266503Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-25T15:03:58.266978Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-25T15:03:58.288802Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7949: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-25T15:03:58.288924Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7979: ConnectToSA(), pipe client id: [2:1793:2562], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-25T15:03:58.297480Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1808:2573] 2025-06-25T15:03:58.303867Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1849:2589] 2025-06-25T15:03:58.304129Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1849:2589], schemeshard id = 72075186224037897 2025-06-25T15:03:58.312151Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-25T15:03:58.327632Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-25T15:03:58.327684Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-25T15:03:58.327733Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-25T15:03:58.335300Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:03:58.341103Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-25T15:03:58.341257Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-25T15:03:58.526436Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-25T15:03:58.657996Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-25T15:03:58.715675Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-25T15:03:59.224839Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:03:59.473501Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2151:3025], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:03:59.473713Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:03:59.490814Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715659:0, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T15:03:59.581922Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2221:2792];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T15:03:59.582138Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2221:2792];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T15:03:59.582401Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2221:2792];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T15:03:59.582484Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2221:2792];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T15:03:59.582549Z node 2 :TX_COLUMNSHARD WARN: ... pl.cpp:723: [72075186224037894] ScheduleNextTraversal. Skip traversal for datashard table [OwnerId: 72075186224037897, LocalPathId: 3] 2025-06-25T15:06:13.347233Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-25T15:06:13.350359Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DELETE FROM `.metadata/_statistics` WHERE owner_id = $owner_id AND local_path_id = $local_path_id; 2025-06-25T15:06:13.353293Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7162:5285], DatabaseId: /Root/Database, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:06:13.353377Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7172:5290], DatabaseId: /Root/Database, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:06:13.353448Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root/Database, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:06:13.361798Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976720658:2, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:06:13.410597Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7176:5293], DatabaseId: /Root/Database, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976720658 completed, doublechecking } 2025-06-25T15:06:13.663085Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:7260:5338] txid# 281474976720659, issues: { message: "Check failed: path: \'/Root/Database/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72075186224037897, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:06:13.728410Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [2:7282:5352]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T15:06:13.728658Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-25T15:06:13.728725Z node 2 :STATISTICS DEBUG: service_impl.cpp:1219: ConnectToSA(), pipe client id = [2:7284:5354] 2025-06-25T15:06:13.728777Z node 2 :STATISTICS DEBUG: service_impl.cpp:1248: SyncNode(), pipe client id = [2:7284:5354] 2025-06-25T15:06:13.728985Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:7285:5355] 2025-06-25T15:06:13.729075Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:133: [72075186224037894] EvConnectNode, pipe server id = [2:7285:5355], node id = 2, have schemeshards count = 0, need schemeshards count = 1 2025-06-25T15:06:13.729121Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:314: [72075186224037894] SendStatisticsToNode(), node id = 2, schemeshard count = 1 2025-06-25T15:06:13.729216Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:7284:5354], server id = [2:7285:5355], tablet id = 72075186224037894, status = OK 2025-06-25T15:06:13.729249Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-25T15:06:13.729319Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 1, ReplyToActorId = [2:7282:5352], StatRequests.size() = 1 2025-06-25T15:06:13.832661Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=ZGVlODk4ZmYtOTdjNDY3OTEtMWY0YTE0NjMtZGNlYTNhYjg=, TxId: 2025-06-25T15:06:13.832726Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=ZGVlODk4ZmYtOTdjNDY3OTEtMWY0YTE0NjMtZGNlYTNhYjg=, TxId: 2025-06-25T15:06:13.833061Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-25T15:06:13.846690Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 3] 2025-06-25T15:06:13.846748Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-25T15:06:13.889432Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:217: [72075186224037894] EvFastPropagateCheck 2025-06-25T15:06:13.889516Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:357: [72075186224037894] PropagateFastStatistics(), node count = 0, schemeshard count = 0 2025-06-25T15:06:13.985860Z node 2 :STATISTICS DEBUG: service_impl.cpp:1189: EvRequestTimeout, pipe client id = [2:7284:5354], schemeshard count = 1 2025-06-25T15:06:14.964083Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:626: [72075186224037894] ScheduleNextAnalyze 2025-06-25T15:06:14.964162Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 4] is column table. 2025-06-25T15:06:14.966428Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-25T15:06:14.980961Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-25T15:06:14.981505Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-25T15:06:14.981571Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:51: [72075186224037894] TTxResolve::ExecuteAnalyze. Table OperationId operationId, PathId [OwnerId: 72075186224037897, LocalPathId: 4], AnalyzedShards 1 2025-06-25T15:06:15.004784Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-25T15:06:15.015551Z node 2 :STATISTICS DEBUG: tx_analyze_table_request.cpp:56: [72075186224037894] TTxAnalyzeTableRequest::Complete. Send 1 events. 2025-06-25T15:06:15.016384Z node 2 :STATISTICS DEBUG: tx_analyze_table_response.cpp:21: [72075186224037894] TTxAnalyzeTableResponse::Execute 2025-06-25T15:06:15.016476Z node 2 :STATISTICS DEBUG: tx_analyze_table_response.cpp:52: [72075186224037894] TTxAnalyzeTableResponse::Execute. All shards are analyzed 2025-06-25T15:06:15.028931Z node 2 :STATISTICS DEBUG: tx_analyze_table_response.cpp:57: [72075186224037894] TTxAnalyzeTableResponse::Complete. 2025-06-25T15:06:16.137800Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-25T15:06:16.137931Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 4] is column table. 2025-06-25T15:06:16.137975Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:732: [72075186224037894] Start force traversal navigate for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-25T15:06:16.138446Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-25T15:06:16.151815Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-25T15:06:16.152124Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-25T15:06:16.152193Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-25T15:06:16.153061Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:58: [72075186224037894] TTxResponseTabletDistribution::Execute. Node count = 1 2025-06-25T15:06:16.177244Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:92: [72075186224037894] TTxResponseTabletDistribution::Complete 2025-06-25T15:06:16.177468Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 2, current Round: 0 2025-06-25T15:06:16.177964Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:7399:5424], server id = [2:7400:5425], tablet id = 72075186224037899, status = OK 2025-06-25T15:06:16.178071Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:7399:5424], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-25T15:06:16.181625Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037899 2025-06-25T15:06:16.181749Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-25T15:06:16.182112Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:27: [72075186224037894] TTxAggregateStatisticsResponse::Execute 2025-06-25T15:06:16.182315Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:118: [72075186224037894] TTxAggregateStatisticsResponse::Complete 2025-06-25T15:06:16.182687Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-25T15:06:16.184650Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:7399:5424], server id = [2:7400:5425], tablet id = 72075186224037899 2025-06-25T15:06:16.184694Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-25T15:06:16.185190Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-06-25T15:06:16.213851Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [2:7420:5444]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T15:06:16.214036Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-25T15:06:16.214075Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 2, ReplyToActorId = [2:7420:5444], StatRequests.size() = 1 2025-06-25T15:06:16.307921Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=ZWRiNjA0NTgtYzI4NmYwYWUtMjQ4ZTFiMTEtOTI3MGNhNzM=, TxId: 2025-06-25T15:06:16.307970Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=ZWRiNjA0NTgtYzI4NmYwYWUtMjQ4ZTFiMTEtOTI3MGNhNzM=, TxId: 2025-06-25T15:06:16.308279Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-25T15:06:16.321625Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete force traversal for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-25T15:06:16.321675Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:50: [72075186224037894] TTxFinishTraversal::Complete. Send TEvAnalyzeResponse, OperationId=operationId, ActorId=[1:3047:3300] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest >> AnalyzeDatashard::AnalyzeOneTable [GOOD] Test command err: 2025-06-25T15:03:58.802000Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:419:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T15:03:58.802233Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T15:03:58.802300Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001efb/r3tmp/tmpuFB5eX/pdisk_1.dat 2025-06-25T15:03:59.050662Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 11904, node 1 2025-06-25T15:03:59.263157Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:03:59.263205Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:03:59.263234Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:03:59.263812Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T15:03:59.266004Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:03:59.356450Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:03:59.356597Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:03:59.371476Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:31177 2025-06-25T15:03:59.858076Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-25T15:04:02.343154Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-25T15:04:02.369110Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:04:02.369246Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:04:02.406520Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T15:04:02.408939Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:04:02.617790Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:04:02.652499Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:04:02.652995Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:04:02.653514Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:04:02.653644Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:04:02.653843Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:04:02.653928Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:04:02.654035Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:04:02.654091Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:04:02.654144Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:04:02.837770Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:04:02.837862Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:04:02.870096Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:04:02.999324Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:04:03.035787Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-25T15:04:03.035869Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-25T15:04:03.057278Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-25T15:04:03.058466Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-25T15:04:03.058637Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-25T15:04:03.058682Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-25T15:04:03.058718Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-25T15:04:03.058762Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-25T15:04:03.058803Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-25T15:04:03.058850Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-25T15:04:03.059604Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-25T15:04:03.083535Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7949: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-25T15:04:03.083608Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7979: ConnectToSA(), pipe client id: [2:1793:2562], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-25T15:04:03.089493Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1805:2572] 2025-06-25T15:04:03.093376Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1825:2582] 2025-06-25T15:04:03.093816Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1825:2582], schemeshard id = 72075186224037897 2025-06-25T15:04:03.102157Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-25T15:04:03.116913Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-25T15:04:03.116963Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-25T15:04:03.117020Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-25T15:04:03.126798Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:04:03.132226Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-25T15:04:03.132337Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-25T15:04:03.307940Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-25T15:04:03.468292Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-25T15:04:03.513187Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-25T15:04:04.104577Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:04:04.307525Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2148:3024], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:04:04.307615Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:04:04.319275Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:04:04.603042Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2440:3070], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:04:04.603131Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:04:04.603802Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [1:2444:3074]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T15:04:04.603936Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-25T15:04:04.603982Z node 1 :STATISTICS DEBUG: service_impl.cpp:1219: ConnectToSA(), pipe client id = [1:2446:3076] 2025-06-25T15:04:04.604023Z no ... unt = 1 2025-06-25T15:06:05.847935Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-06-25T15:06:09.390608Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:285: [72075186224037894] EvPropagateTimeout 2025-06-25T15:06:10.730038Z node 1 :STATISTICS DEBUG: schemeshard_impl.cpp:7949: ResolveSA(), StatisticsAggregatorId=18446744073709551615, at schemeshard: 72057594046644480 2025-06-25T15:06:10.730128Z node 1 :STATISTICS DEBUG: schemeshard_impl.cpp:7961: ConnectToSA(), no StatisticsAggregatorId, at schemeshard: 72057594046644480 2025-06-25T15:06:10.730176Z node 1 :STATISTICS DEBUG: schemeshard_impl.cpp:7992: SendBaseStatsToSA(), no StatisticsAggregatorId, at schemeshard: 72057594046644480 2025-06-25T15:06:10.730223Z node 1 :STATISTICS DEBUG: schemeshard_impl.cpp:7919: Schedule next SendBaseStatsToSA in 30.000000s, at schemeshard: 72057594046644480 2025-06-25T15:06:12.202665Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 1, schemeshard count = 1 2025-06-25T15:06:12.202917Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-06-25T15:06:12.255731Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:8076: SendBaseStatsToSA(), path count: 2, at schemeshard: 72075186224037897 2025-06-25T15:06:12.255803Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7919: Schedule next SendBaseStatsToSA in 219.000000s, at schemeshard: 72075186224037897 2025-06-25T15:06:12.256096Z node 2 :STATISTICS DEBUG: tx_schemeshard_stats.cpp:21: [72075186224037894] TTxSchemeShardStats::Execute: schemeshard id# 72075186224037897, stats size# 49 2025-06-25T15:06:12.268516Z node 2 :STATISTICS DEBUG: tx_schemeshard_stats.cpp:132: [72075186224037894] TTxSchemeShardStats::Complete 2025-06-25T15:06:13.502736Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-25T15:06:13.502840Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:680: [72075186224037894] ScheduleNextTraversal. All the force traversal tables sent the requests. OperationId=operationId 2025-06-25T15:06:13.502885Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:689: [72075186224037894] ScheduleNextTraversal. All the force traversal operations sent the requests. 2025-06-25T15:06:13.502949Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 3] is data table. 2025-06-25T15:06:13.503005Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:723: [72075186224037894] ScheduleNextTraversal. Skip traversal for datashard table [OwnerId: 72075186224037897, LocalPathId: 3] 2025-06-25T15:06:13.503450Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-25T15:06:13.517698Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DELETE FROM `.metadata/_statistics` WHERE owner_id = $owner_id AND local_path_id = $local_path_id; 2025-06-25T15:06:13.521946Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:6457:4600], DatabaseId: /Root/Database, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:06:13.522099Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:6467:4605], DatabaseId: /Root/Database, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:06:13.522195Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root/Database, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:06:13.540249Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976720658:2, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:06:13.589525Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:6471:4608], DatabaseId: /Root/Database, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976720658 completed, doublechecking } 2025-06-25T15:06:13.770556Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:6569:4656] txid# 281474976720659, issues: { message: "Check failed: path: \'/Root/Database/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72075186224037897, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:06:13.804457Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [2:6598:4671]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T15:06:13.804643Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-25T15:06:13.804708Z node 2 :STATISTICS DEBUG: service_impl.cpp:1219: ConnectToSA(), pipe client id = [2:6600:4673] 2025-06-25T15:06:13.804782Z node 2 :STATISTICS DEBUG: service_impl.cpp:1248: SyncNode(), pipe client id = [2:6600:4673] 2025-06-25T15:06:13.804979Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:6601:4674] 2025-06-25T15:06:13.805079Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:133: [72075186224037894] EvConnectNode, pipe server id = [2:6601:4674], node id = 2, have schemeshards count = 0, need schemeshards count = 1 2025-06-25T15:06:13.805122Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:314: [72075186224037894] SendStatisticsToNode(), node id = 2, schemeshard count = 1 2025-06-25T15:06:13.805209Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:6600:4673], server id = [2:6601:4674], tablet id = 72075186224037894, status = OK 2025-06-25T15:06:13.805248Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-25T15:06:13.805302Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 1, ReplyToActorId = [2:6598:4671], StatRequests.size() = 1 2025-06-25T15:06:13.902434Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=ZTc1MTUzMmQtNjk0OTgwN2QtZTJiODVhMjAtZTAxZTJkODA=, TxId: 2025-06-25T15:06:13.902495Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=ZTc1MTUzMmQtNjk0OTgwN2QtZTJiODVhMjAtZTAxZTJkODA=, TxId: 2025-06-25T15:06:13.903057Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-25T15:06:13.915693Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 3] 2025-06-25T15:06:13.915752Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-25T15:06:13.979247Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:217: [72075186224037894] EvFastPropagateCheck 2025-06-25T15:06:13.979345Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:357: [72075186224037894] PropagateFastStatistics(), node count = 0, schemeshard count = 0 2025-06-25T15:06:14.043032Z node 2 :STATISTICS DEBUG: service_impl.cpp:1189: EvRequestTimeout, pipe client id = [2:6600:4673], schemeshard count = 1 2025-06-25T15:06:15.029518Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:626: [72075186224037894] ScheduleNextAnalyze 2025-06-25T15:06:15.029610Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 4] is data table. 2025-06-25T15:06:15.029660Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:645: [72075186224037894] ScheduleNextAnalyze. Skip analyze for datashard table [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-25T15:06:16.146016Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:285: [72075186224037894] EvPropagateTimeout 2025-06-25T15:06:16.167499Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-25T15:06:16.167657Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 4] is data table. 2025-06-25T15:06:16.167704Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:723: [72075186224037894] ScheduleNextTraversal. Skip traversal for datashard table [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-25T15:06:16.168156Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-25T15:06:16.170358Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DELETE FROM `.metadata/_statistics` WHERE owner_id = $owner_id AND local_path_id = $local_path_id; 2025-06-25T15:06:16.180789Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=OGVhOTdmN2YtOGQyMDE0OTktYWIzNDkzN2YtZjk0ODJkYjI=, TxId: 2025-06-25T15:06:16.180852Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=OGVhOTdmN2YtOGQyMDE0OTktYWIzNDkzN2YtZjk0ODJkYjI=, TxId: 2025-06-25T15:06:16.181207Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-25T15:06:16.194930Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete force traversal for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-25T15:06:16.194994Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:50: [72075186224037894] TTxFinishTraversal::Complete. Send TEvAnalyzeResponse, OperationId=operationId, ActorId=[1:2642:3184] 2025-06-25T15:06:16.195617Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [2:6708:4737]], StatType[ 2 ], StatRequestsCount[ 1 ] 2025-06-25T15:06:16.198880Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-25T15:06:16.198949Z node 2 :STATISTICS DEBUG: service_impl.cpp:812: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] resolve DatabasePath[ [OwnerId: 72057594046644480, LocalPathId: 2] ] 2025-06-25T15:06:16.203233Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-25T15:06:16.203309Z node 2 :STATISTICS DEBUG: service_impl.cpp:715: [TStatService::QueryStatistics] RequestId[ 2 ], Database[ Root/Database ], TablePath[ /Root/Database/.metadata/_statistics ] 2025-06-25T15:06:16.203375Z node 2 :STATISTICS DEBUG: service_impl.cpp:656: [TStatService::LoadStatistics] QueryId[ 1 ], PathId[ [OwnerId: 72075186224037897, LocalPathId: 4] ], StatType[ 2 ], ColumnTag[ 1 ] 2025-06-25T15:06:16.206577Z node 2 :STATISTICS ERROR: service_impl.cpp:691: [TStatService::ReadRowsResponse] QueryId[ 1 ], RowsCount[ 0 ] 2025-06-25T15:06:16.206907Z node 2 :STATISTICS DEBUG: service_impl.cpp:1152: TEvLoadStatisticsQueryResponse, request id = 2 |94.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test |94.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test |94.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> TCutHistoryRestrictions::SameTabletInBothLists [GOOD] >> THeavyPerfTest::TTestLoadEverything |94.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> KqpLimits::ManyPartitionsSortingLimit [GOOD] >> THiveTest::TestDrain >> THiveTest::TestFollowers >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delete_message_batch_deduplicates_receipt_handle[tables_format_v0-std] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_visibility_change_disables_receive_attempt_id[tables_format_v1-with_change_visibility] >> THiveTest::TestNoMigrationToSelf >> THiveTest::TestLocalDisconnect >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_visibility_timeout_works[tables_format_v1] >> THiveTest::TestUpdateChannelValues >> THiveTest::TestCreateTablet |94.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> TColumnShardTestReadWrite::CompactionInGranule_PKUInt32_Reboot [GOOD] >> DataStreams::TestGetRecords1MBMessagesOneByOneByTS [GOOD] >> DataStreams::TestGetRecordsStreamWithMultipleShards >> THiveTest::TestNoMigrationToSelf [GOOD] >> THiveTest::TestReCreateTablet >> THiveTest::TestLocalDisconnect [GOOD] >> THiveTest::TestLocalReplacement >> TargetTrackingScaleRecommenderPolicy::ScaleOut [GOOD] >> TargetTrackingScaleRecommenderPolicy::ScaleIn [GOOD] >> TargetTrackingScaleRecommenderPolicy::BigNumbersScaleOut [GOOD] >> TargetTrackingScaleRecommenderPolicy::BigNumbersScaleIn [GOOD] >> TargetTrackingScaleRecommenderPolicy::SpikeResistance [GOOD] >> TargetTrackingScaleRecommenderPolicy::NearTarget [GOOD] >> TargetTrackingScaleRecommenderPolicy::AtTarget [GOOD] >> TargetTrackingScaleRecommenderPolicy::Fluctuations [GOOD] >> TargetTrackingScaleRecommenderPolicy::FluctuationsBigNumbers [GOOD] >> TargetTrackingScaleRecommenderPolicy::ScaleInToMaxSeen [GOOD] >> TargetTrackingScaleRecommenderPolicy::Idle [GOOD] >> TStorageBalanceTest::TestScenario1 >> THiveTest::TestCreateTablet [GOOD] >> THiveTest::TestCreate100Tablets >> THiveTest::TestUpdateChannelValues [GOOD] >> THiveTest::TestStorageBalancer >> THiveTest::TestFollowers [GOOD] >> THiveTest::TestFollowersReconfiguration >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_visibility_timeout_works[tables_format_v1] >> THiveTest::TestReCreateTablet [GOOD] >> THiveTest::TestReCreateTabletError ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::CompactionInGranule_PKUInt32_Reboot [GOOD] Test command err: 2025-06-25T15:05:02.141166Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:99;event=initialize_shard;step=OnActivateExecutor; 2025-06-25T15:05:02.160550Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:117;event=initialize_shard;step=initialize_tiring_finished; 2025-06-25T15:05:02.160732Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-25T15:05:02.166127Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T15:05:02.166302Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T15:05:02.166472Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T15:05:02.166605Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T15:05:02.166736Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T15:05:02.166852Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T15:05:02.166996Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T15:05:02.167150Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T15:05:02.167273Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T15:05:02.167375Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T15:05:02.167449Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T15:05:02.185196Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-25T15:05:02.185427Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-25T15:05:02.185491Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-25T15:05:02.185704Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:05:02.185845Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-25T15:05:02.185907Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-25T15:05:02.185941Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-25T15:05:02.185995Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-25T15:05:02.186032Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-25T15:05:02.186058Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-25T15:05:02.186083Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-25T15:05:02.186202Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:05:02.186239Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-25T15:05:02.186265Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-25T15:05:02.186285Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-25T15:05:02.186343Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-25T15:05:02.186386Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-25T15:05:02.186414Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-25T15:05:02.186430Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-25T15:05:02.186455Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-25T15:05:02.186475Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-25T15:05:02.186494Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-25T15:05:02.186616Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-25T15:05:02.186640Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-25T15:05:02.186658Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-25T15:05:02.186805Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-25T15:05:02.186834Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-25T15:05:02.186851Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-25T15:05:02.186946Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-25T15:05:02.186973Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-25T15:05:02.186992Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-25T15:05:02.187038Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-25T15:05:02.187089Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-25T15:05:02.187114Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-25T15:05:02.187132Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-25T15:05:02.187292Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=37; 2025-06-25T15:05:02.187383Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=56; 2025-06-25T15:05:02.187453Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=36; 2025-06-25T15:05:02.187519Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=29; 2025-06-25T15:05:02.187588Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-25T15:05:02.187642Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-25T15:05:02.187670Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-25T15:05:02.187707Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: table ... E:granule/portions;fline=constructor_meta.cpp:71;memory_size=25158;data_size=25124;sum=12489152;count=14328;size_of_meta=136; 2025-06-25T15:06:19.686446Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;load_stage_name=EXECUTE:granule/portions;fline=constructor_portion.cpp:40;memory_size=25230;data_size=25196;sum=13004960;count=7164;size_of_portion=208; 2025-06-25T15:06:19.687171Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;EXECUTE:portionsLoadingTime=91365; 2025-06-25T15:06:19.687244Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;PRECHARGE:granule_finished_commonLoadingTime=10; 2025-06-25T15:06:19.688711Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;EXECUTE:granule_finished_commonLoadingTime=1410; 2025-06-25T15:06:19.688754Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;fline=common_data.cpp:29;EXECUTE:granuleLoadingTime=93067; 2025-06-25T15:06:19.688795Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:granulesLoadingTime=93162; 2025-06-25T15:06:19.688853Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;PRECHARGE:finishLoadingTime=11; 2025-06-25T15:06:19.689478Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:finishLoadingTime=586; 2025-06-25T15:06:19.689513Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:column_enginesLoadingTime=94192; 2025-06-25T15:06:19.689663Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tx_controllerLoadingTime=102; 2025-06-25T15:06:19.689757Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tx_controllerLoadingTime=51; 2025-06-25T15:06:19.690002Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:operations_managerLoadingTime=209; 2025-06-25T15:06:19.690189Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:operations_managerLoadingTime=159; 2025-06-25T15:06:19.704027Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:storages_managerLoadingTime=13781; 2025-06-25T15:06:19.723619Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:storages_managerLoadingTime=19491; 2025-06-25T15:06:19.723722Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:db_locksLoadingTime=11; 2025-06-25T15:06:19.723772Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:db_locksLoadingTime=11; 2025-06-25T15:06:19.723810Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:bg_sessionsLoadingTime=6; 2025-06-25T15:06:19.723885Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:bg_sessionsLoadingTime=39; 2025-06-25T15:06:19.723925Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:sharing_sessionsLoadingTime=7; 2025-06-25T15:06:19.724005Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:sharing_sessionsLoadingTime=48; 2025-06-25T15:06:19.724045Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:in_flight_readsLoadingTime=5; 2025-06-25T15:06:19.724100Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:in_flight_readsLoadingTime=26; 2025-06-25T15:06:19.724174Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tiers_managerLoadingTime=41; 2025-06-25T15:06:19.724244Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tiers_managerLoadingTime=39; 2025-06-25T15:06:19.724274Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;fline=common_data.cpp:29;EXECUTE:composite_initLoadingTime=133300; 2025-06-25T15:06:19.724406Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Index: tables 1 inserted {blob_bytes=0;raw_bytes=0;count=0;records=0} compacted {blob_bytes=0;raw_bytes=0;count=0;records=0} s-compacted {blob_bytes=21623968;raw_bytes=21227350;count=3;records=225200} inactive {blob_bytes=141321168;raw_bytes=137674250;count=221;records=1575200} evicted {blob_bytes=0;raw_bytes=0;count=0;records=0} at tablet 9437184 2025-06-25T15:06:19.724500Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:9976:11569];process=SwitchToWork;fline=columnshard.cpp:74;event=initialize_shard;step=SwitchToWork; 2025-06-25T15:06:19.724544Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:9976:11569];process=SwitchToWork;fline=columnshard.cpp:77;event=initialize_shard;step=SignalTabletActive; 2025-06-25T15:06:19.724599Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:9976:11569];process=SwitchToWork;fline=columnshard_impl.cpp:1331;event=OnTieringModified;path_id=NO_VALUE_OPTIONAL; 2025-06-25T15:06:19.724634Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:9976:11569];process=SwitchToWork;fline=column_engine_logs.cpp:471;event=OnTieringModified;new_count_tierings=0; 2025-06-25T15:06:19.724818Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:442;event=EnqueueBackgroundActivities;periodic=0; 2025-06-25T15:06:19.724878Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=9; 2025-06-25T15:06:19.724958Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750863607584;tx_id=18446744073709551615;;current_snapshot_ts=1750863903944; 2025-06-25T15:06:19.724991Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=9;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-25T15:06:19.725025Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:791;background=cleanup;skip_reason=no_changes; 2025-06-25T15:06:19.725053Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:820;background=cleanup;skip_reason=no_changes; 2025-06-25T15:06:19.725122Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:749;background=ttl;skip_reason=no_changes; 2025-06-25T15:06:19.730178Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:9976:11569];ev=NActors::TEvents::TEvWakeup;fline=columnshard.cpp:250;event=TEvPrivate::TEvPeriodicWakeup::MANUAL;tablet_id=9437184; 2025-06-25T15:06:19.730983Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:9976:11569];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;fline=columnshard.cpp:239;event=TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184; 2025-06-25T15:06:19.731015Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Send periodic stats. 2025-06-25T15:06:19.731037Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Disabled periodic stats at tablet 9437184 2025-06-25T15:06:19.731092Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:9976:11569];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:442;event=EnqueueBackgroundActivities;periodic=0; 2025-06-25T15:06:19.731161Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:9976:11569];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=9; 2025-06-25T15:06:19.731218Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:9976:11569];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750863607584;tx_id=18446744073709551615;;current_snapshot_ts=1750863903944; 2025-06-25T15:06:19.731254Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:9976:11569];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=9;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-25T15:06:19.731295Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:9976:11569];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:791;background=cleanup;skip_reason=no_changes; 2025-06-25T15:06:19.731325Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:9976:11569];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:820;background=cleanup;skip_reason=no_changes; 2025-06-25T15:06:19.731387Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:9976:11569];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;queue=ttl;external_count=0;fline=granule.cpp:168;event=skip_actualization;waiting=1.000000s; 2025-06-25T15:06:19.731422Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:9976:11569];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:749;background=ttl;skip_reason=no_changes; |94.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> THiveTest::TestLocalReplacement [GOOD] >> THiveTest::TestHiveRestart >> AnalyzeColumnshard::AnalyzeRebootSaBeforeAggregate [GOOD] >> TraverseColumnShard::TraverseColumnTableRebootSaTabletBeforeAggregate [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_set_very_big_visibility_timeout[tables_format_v1] >> test_quoting.py::TestSqsQuotingWithKesus::test_properly_creates_and_deletes_queue[tables_format_v0-fifo] >> THiveTest::TestReCreateTabletError [GOOD] >> THiveTest::TestNodeDisconnect >> TColumnShardTestReadWrite::CompactionInGranule_PKInt64 [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_validates_message_body[tables_format_v0] >> TraverseColumnShard::TraverseColumnTableHiveDistributionAbsentNodes [GOOD] >> THiveTest::TestFollowersReconfiguration [GOOD] >> THiveTest::TestFollowerPromotion >> THiveTest::TestHiveRestart [GOOD] >> THiveTest::TestLimitedNodeList ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest >> AnalyzeColumnshard::AnalyzeRebootSaBeforeAggregate [GOOD] Test command err: 2025-06-25T15:03:54.950105Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:419:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T15:03:54.950477Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T15:03:54.950595Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001f3c/r3tmp/tmpPoO8ev/pdisk_1.dat 2025-06-25T15:03:55.287287Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 11950, node 1 2025-06-25T15:03:55.528964Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:03:55.529021Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:03:55.529052Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:03:55.529647Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T15:03:55.531985Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:03:55.636238Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:03:55.636408Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:03:55.651348Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:12334 2025-06-25T15:03:56.154660Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-25T15:03:58.942549Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-25T15:03:58.974169Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:03:58.974284Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:03:59.012221Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T15:03:59.014365Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:03:59.219778Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:03:59.255171Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:59.255708Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:59.256254Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:59.256434Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:59.256661Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:59.256744Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:59.256845Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:59.256922Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:59.256983Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:59.439462Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:03:59.439572Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:03:59.472282Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:03:59.603874Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:03:59.645269Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-25T15:03:59.645365Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-25T15:03:59.670856Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-25T15:03:59.671986Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-25T15:03:59.672150Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-25T15:03:59.672199Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-25T15:03:59.672241Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-25T15:03:59.672291Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-25T15:03:59.672351Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-25T15:03:59.672400Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-25T15:03:59.673324Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-25T15:03:59.699876Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7949: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-25T15:03:59.699973Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7979: ConnectToSA(), pipe client id: [2:1793:2562], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-25T15:03:59.706264Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1805:2572] 2025-06-25T15:03:59.710121Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1825:2582] 2025-06-25T15:03:59.710554Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1825:2582], schemeshard id = 72075186224037897 2025-06-25T15:03:59.719458Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-25T15:03:59.736088Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-25T15:03:59.736149Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-25T15:03:59.736211Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-25T15:03:59.749013Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:03:59.754044Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-25T15:03:59.754145Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-25T15:03:59.915084Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-25T15:04:00.084625Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-25T15:04:00.140498Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-25T15:04:00.725681Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:04:00.991314Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2148:3024], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:04:00.991442Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:04:01.009200Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715659:0, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T15:04:01.108836Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2220:2791];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T15:04:01.109060Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2220:2791];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T15:04:01.109344Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2220:2791];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T15:04:01.109457Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2220:2791];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T15:04:01.109569Z node 2 :TX_COLUMNSHARD WARN: ... 37894] Loaded ColumnStatistics: column count# 0 2025-06-25T15:06:18.463739Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 2 2025-06-25T15:06:18.463835Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 1 2025-06-25T15:06:18.463913Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 1 2025-06-25T15:06:18.463973Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-25T15:06:18.464119Z node 2 :STATISTICS DEBUG: tx_init.cpp:295: [72075186224037894] TTxInit::Complete. Start navigate. PathId [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-25T15:06:18.465173Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-25T15:06:18.465638Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-25T15:06:18.466160Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-25T15:06:18.466249Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-25T15:06:18.467118Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-25T15:06:18.467181Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-25T15:06:18.468917Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:58: [72075186224037894] TTxResponseTabletDistribution::Execute. Node count = 1 2025-06-25T15:06:18.531333Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:92: [72075186224037894] TTxResponseTabletDistribution::Complete 2025-06-25T15:06:18.531516Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 3, current Round: 0 2025-06-25T15:06:18.532071Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:7500:5471], server id = [2:7501:5472], tablet id = 72075186224037899, status = OK 2025-06-25T15:06:18.532180Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:7500:5471], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-25T15:06:18.535277Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037899 2025-06-25T15:06:18.535377Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-25T15:06:18.535556Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:27: [72075186224037894] TTxAggregateStatisticsResponse::Execute 2025-06-25T15:06:18.535732Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:118: [72075186224037894] TTxAggregateStatisticsResponse::Complete 2025-06-25T15:06:18.535980Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-25T15:06:18.538694Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:7500:5471], server id = [2:7501:5472], tablet id = 72075186224037899 2025-06-25T15:06:18.538738Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-25T15:06:18.539312Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-06-25T15:06:18.578626Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [2:7521:5491]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T15:06:18.578904Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-25T15:06:18.578959Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 2, ReplyToActorId = [2:7521:5491], StatRequests.size() = 1 2025-06-25T15:06:18.696151Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=NzFjZDEwMjMtMWE4Yjc3NWYtMTcwM2VhYTMtYzQ5MjMzZjI=, TxId: 2025-06-25T15:06:18.696222Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=NzFjZDEwMjMtMWE4Yjc3NWYtMTcwM2VhYTMtYzQ5MjMzZjI=, TxId: 2025-06-25T15:06:18.696642Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-25T15:06:18.708496Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:7536:5497] 2025-06-25T15:06:18.708642Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:7536:5497], schemeshard id = 72075186224037897 2025-06-25T15:06:18.708712Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:7453:5440], server id = [2:7537:5498], tablet id = 72075186224037894, status = OK 2025-06-25T15:06:18.708738Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:7537:5498] 2025-06-25T15:06:18.708777Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:133: [72075186224037894] EvConnectNode, pipe server id = [2:7537:5498], node id = 2, have schemeshards count = 1, need schemeshards count = 0 2025-06-25T15:06:18.721173Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-25T15:06:18.721233Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-25T15:06:18.818979Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:7548:5501] 2025-06-25T15:06:18.819633Z node 2 :STATISTICS DEBUG: tx_analyze.cpp:22: [72075186224037894] TTxAnalyze::Execute. ReplyToActorId [1:3045:3299] , Record { OperationId: "operationId" Tables { PathId { OwnerId: 72075186224037897 LocalId: 4 } } Types: TYPE_COUNT_MIN_SKETCH } 2025-06-25T15:06:18.819685Z node 2 :STATISTICS DEBUG: tx_analyze.cpp:38: [72075186224037894] TTxAnalyze::Execute. Update existing force traversal. OperationId operationId , ReplyToActorId [1:3045:3299] 2025-06-25T15:06:18.819751Z node 2 :STATISTICS DEBUG: tx_analyze.cpp:97: [72075186224037894] TTxAnalyze::Complete 2025-06-25T15:06:19.312632Z node 2 :STATISTICS DEBUG: service_impl.cpp:252: Event round 3 is different from the current 0 2025-06-25T15:06:19.312709Z node 2 :STATISTICS DEBUG: service_impl.cpp:379: Skip TEvDispatchKeepAlive 2025-06-25T15:06:20.085714Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:626: [72075186224037894] ScheduleNextAnalyze 2025-06-25T15:06:20.085796Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:652: [72075186224037894] ScheduleNextAnalyze. All the force traversal tables sent the requests. OperationId=operationId 2025-06-25T15:06:20.085833Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:656: [72075186224037894] ScheduleNextAnalyze. All the force traversal operations sent the requests. 2025-06-25T15:06:21.360851Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-25T15:06:21.361000Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 4] is column table. 2025-06-25T15:06:21.361059Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:732: [72075186224037894] Start force traversal navigate for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-25T15:06:21.361709Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-25T15:06:21.378421Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-25T15:06:21.378816Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-25T15:06:21.378889Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-25T15:06:21.379307Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:58: [72075186224037894] TTxResponseTabletDistribution::Execute. Node count = 1 2025-06-25T15:06:21.392587Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:92: [72075186224037894] TTxResponseTabletDistribution::Complete 2025-06-25T15:06:21.392772Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 4, current Round: 0 2025-06-25T15:06:21.393231Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:7620:5544], server id = [2:7621:5545], tablet id = 72075186224037899, status = OK 2025-06-25T15:06:21.393336Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:7620:5544], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-25T15:06:21.394452Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037899 2025-06-25T15:06:21.394561Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-25T15:06:21.394742Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:27: [72075186224037894] TTxAggregateStatisticsResponse::Execute 2025-06-25T15:06:21.394917Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:118: [72075186224037894] TTxAggregateStatisticsResponse::Complete 2025-06-25T15:06:21.395207Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-25T15:06:21.398172Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:7620:5544], server id = [2:7621:5545], tablet id = 72075186224037899 2025-06-25T15:06:21.398214Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-25T15:06:21.398703Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-06-25T15:06:21.419149Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=YzBkOWZjYmMtNmMzYjZmYjUtZTVlZmFmMGMtMWMxOWI3MGM=, TxId: 2025-06-25T15:06:21.419213Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=YzBkOWZjYmMtNmMzYjZmYjUtZTVlZmFmMGMtMWMxOWI3MGM=, TxId: 2025-06-25T15:06:21.419622Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-25T15:06:21.454136Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete force traversal for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-25T15:06:21.454211Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:50: [72075186224037894] TTxFinishTraversal::Complete. Send TEvAnalyzeResponse, OperationId=operationId, ActorId=[1:3045:3299] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest >> TraverseColumnShard::TraverseColumnTableRebootSaTabletBeforeAggregate [GOOD] Test command err: 2025-06-25T15:03:50.141566Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:419:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T15:03:50.141948Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T15:03:50.142066Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001f79/r3tmp/tmpLysKno/pdisk_1.dat 2025-06-25T15:03:50.516585Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 25449, node 1 2025-06-25T15:03:50.736364Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:03:50.736415Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:03:50.736443Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:03:50.737039Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T15:03:50.742485Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:03:50.838523Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:03:50.838656Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:03:50.853571Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:29614 2025-06-25T15:03:51.387729Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-25T15:03:54.233688Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-25T15:03:54.263958Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:03:54.264058Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:03:54.302709Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T15:03:54.304979Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:03:54.511146Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:03:54.546666Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:54.547280Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:54.547837Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:54.548041Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:54.548281Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:54.548406Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:54.548511Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:54.548591Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:54.548661Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:54.768935Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:03:54.769057Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:03:54.784347Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:03:54.934547Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:03:54.986156Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-25T15:03:54.986268Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-25T15:03:55.028948Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-25T15:03:55.029212Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-25T15:03:55.029430Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-25T15:03:55.029513Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-25T15:03:55.029584Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-25T15:03:55.029654Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-25T15:03:55.029710Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-25T15:03:55.029767Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-25T15:03:55.030326Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-25T15:03:55.053062Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7949: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-25T15:03:55.053185Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7979: ConnectToSA(), pipe client id: [2:1793:2562], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-25T15:03:55.062010Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1808:2573] 2025-06-25T15:03:55.069218Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1849:2589] 2025-06-25T15:03:55.069498Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1849:2589], schemeshard id = 72075186224037897 2025-06-25T15:03:55.078725Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-25T15:03:55.098837Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-25T15:03:55.098914Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-25T15:03:55.098991Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-25T15:03:55.112995Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:03:55.127071Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-25T15:03:55.127225Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-25T15:03:55.331018Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-25T15:03:55.475278Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-25T15:03:55.574868Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-25T15:03:56.062915Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:03:56.248126Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2153:3026], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:03:56.248240Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:03:56.264421Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715659:0, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T15:03:56.408255Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2245:2806];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T15:03:56.408495Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2245:2806];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T15:03:56.408765Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2245:2806];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T15:03:56.408868Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2245:2806];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T15:03:56.408968Z node 2 :TX_COLUMNSHARD WARN: ... , at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-25T15:06:20.931869Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8265:6114], server id = [2:8266:6115], tablet id = 72075186224037894 2025-06-25T15:06:20.931959Z node 2 :STATISTICS DEBUG: service_impl.cpp:1219: ConnectToSA(), pipe client id = [2:8383:6175] 2025-06-25T15:06:20.932029Z node 2 :STATISTICS DEBUG: service_impl.cpp:1248: SyncNode(), pipe client id = [2:8383:6175] 2025-06-25T15:06:20.969445Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-25T15:06:20.969556Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-25T15:06:20.970026Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-25T15:06:20.970731Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-25T15:06:20.971034Z node 2 :STATISTICS DEBUG: tx_init.cpp:55: [72075186224037894] Loaded database: /Root/Database 2025-06-25T15:06:20.971118Z node 2 :STATISTICS DEBUG: tx_init.cpp:59: [72075186224037894] Loaded traversal start key 2025-06-25T15:06:20.971158Z node 2 :STATISTICS DEBUG: tx_init.cpp:64: [72075186224037894] Loaded traversal table owner id: 72075186224037897 2025-06-25T15:06:20.971191Z node 2 :STATISTICS DEBUG: tx_init.cpp:69: [72075186224037894] Loaded traversal table local path id: 4 2025-06-25T15:06:20.971231Z node 2 :STATISTICS DEBUG: tx_init.cpp:74: [72075186224037894] Loaded traversal start time: 1750863980894754 2025-06-25T15:06:20.971266Z node 2 :STATISTICS DEBUG: tx_init.cpp:79: [72075186224037894] Loaded traversal IsColumnTable: 1 2025-06-25T15:06:20.971303Z node 2 :STATISTICS DEBUG: tx_init.cpp:84: [72075186224037894] Loaded global traversal round: 2 2025-06-25T15:06:20.971374Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 1 2025-06-25T15:06:20.971428Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-25T15:06:20.971536Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 2 2025-06-25T15:06:20.971604Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-25T15:06:20.971655Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-25T15:06:20.971720Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-25T15:06:20.971856Z node 2 :STATISTICS DEBUG: tx_init.cpp:295: [72075186224037894] TTxInit::Complete. Start navigate. PathId [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-25T15:06:20.972691Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-25T15:06:20.973473Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-25T15:06:20.973540Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-25T15:06:20.973665Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-25T15:06:20.974980Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-25T15:06:20.975047Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-25T15:06:20.976676Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:58: [72075186224037894] TTxResponseTabletDistribution::Execute. Node count = 1 2025-06-25T15:06:21.042194Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:92: [72075186224037894] TTxResponseTabletDistribution::Complete 2025-06-25T15:06:21.042391Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 3, current Round: 0 2025-06-25T15:06:21.043281Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8430:6206], server id = [2:8434:6210], tablet id = 72075186224037899, status = OK 2025-06-25T15:06:21.043649Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8430:6206], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-25T15:06:21.043956Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8431:6207], server id = [2:8435:6211], tablet id = 72075186224037900, status = OK 2025-06-25T15:06:21.044002Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8431:6207], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-25T15:06:21.044088Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8432:6208], server id = [2:8436:6212], tablet id = 72075186224037901, status = OK 2025-06-25T15:06:21.044121Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8432:6208], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-25T15:06:21.046626Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8433:6209], server id = [2:8437:6213], tablet id = 72075186224037902, status = OK 2025-06-25T15:06:21.046688Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8433:6209], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-25T15:06:21.050916Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037899 2025-06-25T15:06:21.051898Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8430:6206], server id = [2:8434:6210], tablet id = 72075186224037899 2025-06-25T15:06:21.051946Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-25T15:06:21.052701Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037900 2025-06-25T15:06:21.052918Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8431:6207], server id = [2:8435:6211], tablet id = 72075186224037900 2025-06-25T15:06:21.052953Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-25T15:06:21.053809Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037901 2025-06-25T15:06:21.054163Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8432:6208], server id = [2:8436:6212], tablet id = 72075186224037901 2025-06-25T15:06:21.054199Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-25T15:06:21.054596Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037902 2025-06-25T15:06:21.054640Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-25T15:06:21.054861Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:27: [72075186224037894] TTxAggregateStatisticsResponse::Execute 2025-06-25T15:06:21.055029Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:118: [72075186224037894] TTxAggregateStatisticsResponse::Complete 2025-06-25T15:06:21.056151Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-25T15:06:21.058760Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8433:6209], server id = [2:8437:6213], tablet id = 72075186224037902 2025-06-25T15:06:21.058789Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-25T15:06:21.059397Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-06-25T15:06:21.095251Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [2:8466:6238]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T15:06:21.095551Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-25T15:06:21.095598Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 2, ReplyToActorId = [2:8466:6238], StatRequests.size() = 1 2025-06-25T15:06:21.209358Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=YTczYmExZGYtZGYxODkyMjktNzY2ZGRjMmUtM2U0ODNmMGE=, TxId: 2025-06-25T15:06:21.209427Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=YTczYmExZGYtZGYxODkyMjktNzY2ZGRjMmUtM2U0ODNmMGE=, TxId: 2025-06-25T15:06:21.209901Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-25T15:06:21.233206Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:8481:6244] 2025-06-25T15:06:21.233431Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:8481:6244], schemeshard id = 72075186224037897 2025-06-25T15:06:21.233516Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8383:6175], server id = [2:8482:6245], tablet id = 72075186224037894, status = OK 2025-06-25T15:06:21.233581Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:8482:6245] 2025-06-25T15:06:21.233649Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:133: [72075186224037894] EvConnectNode, pipe server id = [2:8482:6245], node id = 2, have schemeshards count = 1, need schemeshards count = 0 2025-06-25T15:06:21.257685Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-25T15:06:21.257745Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-25T15:06:21.333219Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 3 ], ReplyToActorId[ [2:8485:6248]], StatType[ 2 ], StatRequestsCount[ 1 ] 2025-06-25T15:06:21.333547Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] 2025-06-25T15:06:21.333606Z node 2 :STATISTICS DEBUG: service_impl.cpp:812: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] resolve DatabasePath[ [OwnerId: 72057594046644480, LocalPathId: 2] ] 2025-06-25T15:06:21.336324Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] 2025-06-25T15:06:21.336400Z node 2 :STATISTICS DEBUG: service_impl.cpp:715: [TStatService::QueryStatistics] RequestId[ 3 ], Database[ Root/Database ], TablePath[ /Root/Database/.metadata/_statistics ] 2025-06-25T15:06:21.336459Z node 2 :STATISTICS DEBUG: service_impl.cpp:656: [TStatService::LoadStatistics] QueryId[ 1 ], PathId[ [OwnerId: 72075186224037897, LocalPathId: 4] ], StatType[ 2 ], ColumnTag[ 1 ] 2025-06-25T15:06:21.342358Z node 2 :STATISTICS DEBUG: service_impl.cpp:1152: TEvLoadStatisticsQueryResponse, request id = 3 >>> failedEstimatesCount = 0 |94.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> THiveTest::TestStorageBalancer [GOOD] >> THiveTest::TestRestartsWithFollower >> GroupWriteTest::WithRead [GOOD] >> THiveTest::TestCreate100Tablets [GOOD] >> THiveTest::TestCreateSubHiveCreateTablet >> THiveTest::TestNodeDisconnect [GOOD] >> THiveTest::TestReassignGroupsWithRecreateTablet ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::CompactionInGranule_PKInt64 [GOOD] Test command err: 2025-06-25T15:05:28.815017Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:99;event=initialize_shard;step=OnActivateExecutor; 2025-06-25T15:05:28.832415Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:117;event=initialize_shard;step=initialize_tiring_finished; 2025-06-25T15:05:28.832576Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-25T15:05:28.837141Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T15:05:28.837268Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T15:05:28.837452Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T15:05:28.837529Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T15:05:28.837579Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T15:05:28.837630Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T15:05:28.837699Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T15:05:28.837754Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T15:05:28.837826Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T15:05:28.837886Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T15:05:28.837947Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T15:05:28.853560Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-25T15:05:28.853677Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-25T15:05:28.853708Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-25T15:05:28.853824Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:05:28.853943Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-25T15:05:28.854001Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-25T15:05:28.854028Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-25T15:05:28.854110Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-25T15:05:28.854147Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-25T15:05:28.854177Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-25T15:05:28.854204Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-25T15:05:28.854304Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:05:28.854339Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-25T15:05:28.854358Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-25T15:05:28.854375Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-25T15:05:28.854427Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-25T15:05:28.854459Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-25T15:05:28.854480Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-25T15:05:28.854494Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-25T15:05:28.854519Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-25T15:05:28.854550Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-25T15:05:28.854570Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-25T15:05:28.854696Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-25T15:05:28.854719Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-25T15:05:28.854743Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-25T15:05:28.854853Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-25T15:05:28.854885Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-25T15:05:28.854900Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-25T15:05:28.854974Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-25T15:05:28.855002Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-25T15:05:28.855017Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-25T15:05:28.855056Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-25T15:05:28.855090Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-25T15:05:28.855112Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-25T15:05:28.855128Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-25T15:05:28.855253Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=25; 2025-06-25T15:05:28.855317Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=31; 2025-06-25T15:05:28.855375Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=28; 2025-06-25T15:05:28.855425Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=24; 2025-06-25T15:05:28.855490Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-25T15:05:28.855534Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-25T15:05:28.855557Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-25T15:05:28.855590Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: table ... d:9;chunk_idx:46;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:47;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:48;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:49;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:50;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:51;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:52;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:53;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:54;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:55;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:56;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:57;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:58;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:59;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:60;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:61;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:62;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:63;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:64;blob_range:[NO_BLOB:0:9240];;column_id:9;chunk_idx:65;blob_range:[NO_BLOB:0:9240];;column_id:10;chunk_idx:0;blob_range:[NO_BLOB:0:7272];;column_id:10;chunk_idx:1;blob_range:[NO_BLOB:0:8384];;column_id:10;chunk_idx:2;blob_range:[NO_BLOB:0:8384];;column_id:10;chunk_idx:3;blob_range:[NO_BLOB:0:8384];;column_id:10;chunk_idx:4;blob_range:[NO_BLOB:0:8384];;column_id:10;chunk_idx:5;blob_range:[NO_BLOB:0:8384];;column_id:10;chunk_idx:6;blob_range:[NO_BLOB:0:8384];;column_id:10;chunk_idx:7;blob_range:[NO_BLOB:0:8384];;column_id:10;chunk_idx:8;blob_range:[NO_BLOB:0:8384];;column_id:10;chunk_idx:9;blob_range:[NO_BLOB:0:8656];;column_id:10;chunk_idx:10;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:11;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:12;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:13;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:14;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:15;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:16;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:17;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:18;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:19;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:20;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:21;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:22;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:23;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:24;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:25;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:26;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:27;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:28;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:29;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:30;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:31;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:32;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:33;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:34;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:35;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:36;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:37;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:38;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:39;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:40;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:41;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:42;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:43;blob_range:[NO_BLOB:0:9408];;column_id:10;chunk_idx:44;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:45;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:46;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:47;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:48;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:49;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:50;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:51;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:52;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:53;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:54;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:55;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:56;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:57;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:58;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:59;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:60;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:61;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:62;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:63;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:64;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:65;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:66;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:67;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:68;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:69;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:70;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:71;blob_range:[NO_BLOB:0:9424];;column_id:10;chunk_idx:72;blob_range:[NO_BLOB:0:9424];;;;switched=(portion_id:220;path_id:9438184000001;records_count:75000;schema_version:1;level:1;;column_size:7504840;index_size:0;meta:(()););(portion_id:218;path_id:9438184000001;records_count:75000;schema_version:1;level:2;;column_size:7503120;index_size:0;meta:(()););(portion_id:221;path_id:9438184000001;records_count:75000;schema_version:1;level:1;;column_size:7504840;index_size:0;meta:(()););; 2025-06-25T15:06:21.259218Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: event_type=NKikimr::NBlobCache::TEvBlobCache::TEvReadBlobRangeResult;event=on_execution;consumer=GENERAL_COMPACTION;task_id=f20f350a-51d511f0-b2e5c8b8-d141a348;script=FULL_PORTIONS_FETCHING::GENERAL_COMPACTION;event=on_finished;consumer=GENERAL_COMPACTION;task_id=f20f350a-51d511f0-b2e5c8b8-d141a348;script=FULL_PORTIONS_FETCHING::GENERAL_COMPACTION;tablet_id=9437184;parent_id=[1:5973:7960];task_id=f20f350a-51d511f0-b2e5c8b8-d141a348;task_class=CS::GENERAL;fline=general_compaction.cpp:140;event=blobs_created;appended=1;switched=3; 2025-06-25T15:06:21.262049Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:5973:7960];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=columnshard__write_index.cpp:52;event=TEvWriteIndex;count=1; 2025-06-25T15:06:21.267443Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:5973:7960];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=columnshard__write_index.cpp:59;event=TTxWriteDraft; 2025-06-25T15:06:21.360171Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: fline=tx_draft.cpp:16;event=draft_completed; 2025-06-25T15:06:21.360287Z node 1 :TX_COLUMNSHARD_WRITE DEBUG: log.cpp:784: fline=write_actor.cpp:24;event=actor_created;tablet_id=9437184;debug=size=7503120;count=812;actions=__MEMORY,__DEFAULT,;waiting=2;; 2025-06-25T15:06:21.974862Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: WriteIndex at tablet 9437184 2025-06-25T15:06:21.975009Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:5973:7960];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=common_level.h:121;from=0,0,0,0,;to=74999,74999,74999,74999,; 2025-06-25T15:06:21.975087Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:5973:7960];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=common_level.h:141;itFrom=1;itTo=1;raw=7369450;count=1;packed=7504840; 2025-06-25T15:06:21.975162Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:5973:7960];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=constructor_meta.cpp:51;memory_size=94;data_size=68;sum=95658;count=1749; 2025-06-25T15:06:21.975246Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:5973:7960];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=constructor_meta.cpp:71;memory_size=26198;data_size=26188;sum=2562418;count=1750;size_of_meta=136; 2025-06-25T15:06:21.975323Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:5973:7960];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=constructor_portion.cpp:40;memory_size=26270;data_size=26260;sum=2625418;count=875;size_of_portion=208; 2025-06-25T15:06:21.976001Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxWriteIndex[2] (CS::GENERAL) apply at tablet 9437184 2025-06-25T15:06:22.103555Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager on execute at tablet 9437184 Save Batch GenStep: 4:1 Blob count: 666 2025-06-25T15:06:22.109111Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Index: tables 1 inserted {blob_bytes=0;raw_bytes=0;count=0;records=0} compacted {blob_bytes=0;raw_bytes=0;count=0;records=0} s-compacted {blob_bytes=37548672;raw_bytes=36867050;count=5;records=375200} inactive {blob_bytes=110272840;raw_bytes=107127800;count=216;records=1200200} evicted {blob_bytes=0;raw_bytes=0;count=0;records=0} at tablet 9437184 2025-06-25T15:06:22.474675Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=f20f350a-51d511f0-b2e5c8b8-d141a348;fline=abstract.cpp:53;event=WriteIndexComplete;type=CS::GENERAL;success=1; 2025-06-25T15:06:22.474759Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=f20f350a-51d511f0-b2e5c8b8-d141a348;fline=with_appended.cpp:65;portions=222,;task_id=f20f350a-51d511f0-b2e5c8b8-d141a348; 2025-06-25T15:06:22.475617Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=f20f350a-51d511f0-b2e5c8b8-d141a348;fline=manager.cpp:15;event=unlock;process_id=CS::GENERAL::f20f350a-51d511f0-b2e5c8b8-d141a348; 2025-06-25T15:06:22.475706Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=f20f350a-51d511f0-b2e5c8b8-d141a348;fline=granule.cpp:97;event=OnCompactionFinished;info=(granule:9438184000001;path_id:9438184000001;size:22538992;portions_count:222;); 2025-06-25T15:06:22.475763Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=f20f350a-51d511f0-b2e5c8b8-d141a348;tablet_id=9437184;fline=columnshard_impl.cpp:442;event=EnqueueBackgroundActivities;periodic=0; 2025-06-25T15:06:22.475843Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=f20f350a-51d511f0-b2e5c8b8-d141a348;tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=9; 2025-06-25T15:06:22.475923Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=f20f350a-51d511f0-b2e5c8b8-d141a348;tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750863632339;tx_id=18446744073709551615;;current_snapshot_ts=1750863930410; 2025-06-25T15:06:22.475973Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=f20f350a-51d511f0-b2e5c8b8-d141a348;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=9;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-25T15:06:22.476027Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=f20f350a-51d511f0-b2e5c8b8-d141a348;tablet_id=9437184;fline=columnshard_impl.cpp:791;background=cleanup;skip_reason=no_changes; 2025-06-25T15:06:22.476070Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=f20f350a-51d511f0-b2e5c8b8-d141a348;tablet_id=9437184;fline=columnshard_impl.cpp:820;background=cleanup;skip_reason=no_changes; 2025-06-25T15:06:22.476152Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=f20f350a-51d511f0-b2e5c8b8-d141a348;tablet_id=9437184;queue=ttl;external_count=0;fline=granule.cpp:168;event=skip_actualization;waiting=0.873000s; 2025-06-25T15:06:22.476205Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;task_id=f20f350a-51d511f0-b2e5c8b8-d141a348;tablet_id=9437184;fline=columnshard_impl.cpp:749;background=ttl;skip_reason=no_changes; 2025-06-25T15:06:22.476405Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 9437184 Save Batch GenStep: 4:1 Blob count: 666 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest >> TraverseColumnShard::TraverseColumnTableHiveDistributionAbsentNodes [GOOD] Test command err: 2025-06-25T15:03:52.896579Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:419:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T15:03:52.896868Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T15:03:52.896952Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001f66/r3tmp/tmp20hRV3/pdisk_1.dat 2025-06-25T15:03:53.245312Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 20442, node 1 2025-06-25T15:03:53.499812Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:03:53.499872Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:03:53.499906Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:03:53.500644Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T15:03:53.502757Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:03:53.599727Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:03:53.599875Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:03:53.614893Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:7818 2025-06-25T15:03:54.130604Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-25T15:03:56.829096Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-25T15:03:56.863769Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:03:56.863882Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:03:56.902561Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T15:03:56.904702Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:03:57.107217Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:03:57.141476Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:57.141880Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:57.142314Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:57.142461Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:57.142625Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:57.142685Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:57.142748Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:57.142794Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:57.142834Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:57.324109Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:03:57.324241Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:03:57.337027Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:03:57.468025Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:03:57.511312Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-25T15:03:57.511412Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-25T15:03:57.542877Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-25T15:03:57.543102Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-25T15:03:57.543346Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-25T15:03:57.543422Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-25T15:03:57.543502Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-25T15:03:57.543560Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-25T15:03:57.543610Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-25T15:03:57.543658Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-25T15:03:57.544058Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-25T15:03:57.564208Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7949: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-25T15:03:57.564300Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7979: ConnectToSA(), pipe client id: [2:1793:2562], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-25T15:03:57.571260Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1808:2573] 2025-06-25T15:03:57.576707Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1849:2589] 2025-06-25T15:03:57.577032Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1849:2589], schemeshard id = 72075186224037897 2025-06-25T15:03:57.585024Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-25T15:03:57.599446Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-25T15:03:57.599491Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-25T15:03:57.599546Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-25T15:03:57.610862Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:03:57.618454Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-25T15:03:57.618593Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-25T15:03:57.818675Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-25T15:03:57.945590Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-25T15:03:58.002956Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-25T15:03:58.524618Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:03:58.768413Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2153:3026], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:03:58.768573Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:03:58.789526Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715659:0, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T15:03:58.938647Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2245:2806];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T15:03:58.938847Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2245:2806];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T15:03:58.939074Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2245:2806];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T15:03:58.939199Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2245:2806];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T15:03:58.939301Z node 2 :TX_COLUMNSHARD WARN: ... er/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:06:18.870789Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:8168:6054], DatabaseId: /Root/Database, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976720658 completed, doublechecking } 2025-06-25T15:06:19.057377Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:8242:6099] txid# 281474976720659, issues: { message: "Check failed: path: \'/Root/Database/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72075186224037897, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:06:19.122241Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [2:8264:6113]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T15:06:19.122581Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-25T15:06:19.122691Z node 2 :STATISTICS DEBUG: service_impl.cpp:1219: ConnectToSA(), pipe client id = [2:8266:6115] 2025-06-25T15:06:19.122764Z node 2 :STATISTICS DEBUG: service_impl.cpp:1248: SyncNode(), pipe client id = [2:8266:6115] 2025-06-25T15:06:19.123219Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:8267:6116] 2025-06-25T15:06:19.123377Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8266:6115], server id = [2:8267:6116], tablet id = 72075186224037894, status = OK 2025-06-25T15:06:19.123443Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:133: [72075186224037894] EvConnectNode, pipe server id = [2:8267:6116], node id = 2, have schemeshards count = 0, need schemeshards count = 1 2025-06-25T15:06:19.123500Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:314: [72075186224037894] SendStatisticsToNode(), node id = 2, schemeshard count = 1 2025-06-25T15:06:19.123659Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-25T15:06:19.123748Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 1, ReplyToActorId = [2:8264:6113], StatRequests.size() = 1 2025-06-25T15:06:19.265361Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=MTAzNjU1ZjItMTI1MWM2ZWEtZTJlZWQ3Y2ItMTE4M2E3Mzk=, TxId: 2025-06-25T15:06:19.265460Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=MTAzNjU1ZjItMTI1MWM2ZWEtZTJlZWQ3Y2ItMTE4M2E3Mzk=, TxId: 2025-06-25T15:06:19.265966Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-25T15:06:19.279904Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 3] 2025-06-25T15:06:19.279969Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-25T15:06:19.323629Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:217: [72075186224037894] EvFastPropagateCheck 2025-06-25T15:06:19.323718Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:357: [72075186224037894] PropagateFastStatistics(), node count = 0, schemeshard count = 0 2025-06-25T15:06:19.420136Z node 2 :STATISTICS DEBUG: service_impl.cpp:1189: EvRequestTimeout, pipe client id = [2:8266:6115], schemeshard count = 1 2025-06-25T15:06:21.675091Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-25T15:06:21.675160Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-25T15:06:21.675205Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 4] is column table. 2025-06-25T15:06:21.675251Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:732: [72075186224037894] Start schedule traversal navigate for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-25T15:06:21.677772Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-25T15:06:21.696863Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-25T15:06:21.697495Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-25T15:06:21.697582Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-25T15:06:21.698527Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:58: [72075186224037894] TTxResponseTabletDistribution::Execute. Node count = 1 2025-06-25T15:06:21.711722Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:92: [72075186224037894] TTxResponseTabletDistribution::Complete 2025-06-25T15:06:21.711917Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 2, current Round: 0 2025-06-25T15:06:21.712635Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8383:6175], server id = [2:8387:6179], tablet id = 72075186224037899, status = OK 2025-06-25T15:06:21.713086Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8383:6175], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-25T15:06:21.713292Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8384:6176], server id = [2:8388:6180], tablet id = 72075186224037900, status = OK 2025-06-25T15:06:21.713339Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8384:6176], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-25T15:06:21.714864Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8385:6177], server id = [2:8390:6182], tablet id = 72075186224037901, status = OK 2025-06-25T15:06:21.714919Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8385:6177], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-25T15:06:21.715426Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8386:6178], server id = [2:8389:6181], tablet id = 72075186224037902, status = OK 2025-06-25T15:06:21.715471Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8386:6178], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-25T15:06:21.719769Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037899 2025-06-25T15:06:21.720686Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8383:6175], server id = [2:8387:6179], tablet id = 72075186224037899 2025-06-25T15:06:21.720734Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-25T15:06:21.721299Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037900 2025-06-25T15:06:21.721846Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8384:6176], server id = [2:8388:6180], tablet id = 72075186224037900 2025-06-25T15:06:21.721874Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-25T15:06:21.722396Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037901 2025-06-25T15:06:21.722716Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8385:6177], server id = [2:8390:6182], tablet id = 72075186224037901 2025-06-25T15:06:21.722740Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-25T15:06:21.722895Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037902 2025-06-25T15:06:21.722934Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-25T15:06:21.723117Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:27: [72075186224037894] TTxAggregateStatisticsResponse::Execute 2025-06-25T15:06:21.723304Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:118: [72075186224037894] TTxAggregateStatisticsResponse::Complete 2025-06-25T15:06:21.723634Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-25T15:06:21.725768Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8386:6178], server id = [2:8389:6181], tablet id = 72075186224037902 2025-06-25T15:06:21.725797Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-25T15:06:21.726294Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-06-25T15:06:21.760649Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [2:8419:6207]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T15:06:21.760879Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-25T15:06:21.760920Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 2, ReplyToActorId = [2:8419:6207], StatRequests.size() = 1 2025-06-25T15:06:21.877396Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=NGE3YjE1Y2ItN2Y3ZmMxNTAtNDgyMzM4Yy0xM2U5ZDlmMg==, TxId: 2025-06-25T15:06:21.877465Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=NGE3YjE1Y2ItN2Y3ZmMxNTAtNDgyMzM4Yy0xM2U5ZDlmMg==, TxId: ... waiting for NKikimr::NStat::TEvStatistics::TEvSaveStatisticsQueryResponse (done) 2025-06-25T15:06:21.878174Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 3 ], ReplyToActorId[ [2:8432:6213]], StatType[ 2 ], StatRequestsCount[ 1 ] 2025-06-25T15:06:21.878344Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-25T15:06:21.879371Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] 2025-06-25T15:06:21.879448Z node 2 :STATISTICS DEBUG: service_impl.cpp:812: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] resolve DatabasePath[ [OwnerId: 72057594046644480, LocalPathId: 2] ] 2025-06-25T15:06:21.882864Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] 2025-06-25T15:06:21.882924Z node 2 :STATISTICS DEBUG: service_impl.cpp:715: [TStatService::QueryStatistics] RequestId[ 3 ], Database[ Root/Database ], TablePath[ /Root/Database/.metadata/_statistics ] 2025-06-25T15:06:21.882977Z node 2 :STATISTICS DEBUG: service_impl.cpp:656: [TStatService::LoadStatistics] QueryId[ 1 ], PathId[ [OwnerId: 72075186224037897, LocalPathId: 4] ], StatType[ 2 ], ColumnTag[ 1 ] 2025-06-25T15:06:21.887314Z node 2 :STATISTICS DEBUG: service_impl.cpp:1152: TEvLoadStatisticsQueryResponse, request id = 3 >>> failedEstimatesCount = 0 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/load_test/ut/unittest >> GroupWriteTest::WithRead [GOOD] Test command err: RandomSeed# 4109070464164339914 2025-06-25T15:06:17.642010Z 1 00h01m00.010512s :BS_LOAD_TEST DEBUG: TabletId# 3 Generation# 1 is bootstrapped, going to send TEvDiscover {TabletId# 3 MinGeneration# 1 ReadBody# false DiscoverBlockedGeneration# true ForceBlockedGeneration# 0 FromLeader# true Deadline# 18446744073709551} 2025-06-25T15:06:17.656634Z 1 00h01m00.010512s :BS_LOAD_TEST INFO: TabletId# 3 Generation# 1 recieved TEvDiscoverResult {Status# NODATA BlockedGeneration# 0 Id# [0:0:0:0:0:0:0] Size# 0 MinGeneration# 1} 2025-06-25T15:06:17.656678Z 1 00h01m00.010512s :BS_LOAD_TEST DEBUG: TabletId# 3 Generation# 1 going to send TEvBlock {TabletId# 3 Generation# 1 Deadline# 18446744073709551 IsMonitored# 1} 2025-06-25T15:06:17.658648Z 1 00h01m00.010512s :BS_LOAD_TEST INFO: TabletId# 3 Generation# 1 recieved TEvBlockResult {Status# OK} 2025-06-25T15:06:17.669275Z 1 00h01m00.010512s :BS_LOAD_TEST DEBUG: TabletId# 3 Generation# 2 going to send TEvCollectGarbage {TabletId# 3 RecordGeneration# 2 PerGenerationCounter# 1 Channel# 0 Deadline# 18446744073709551 Collect# true CollectGeneration# 2 CollectStep# 0 Hard# true IsMultiCollectAllowed# 0 IsMonitored# 1} 2025-06-25T15:06:17.671190Z 1 00h01m00.010512s :BS_LOAD_TEST INFO: TabletId# 3 Generation# 2 recieved TEvCollectGarbageResult {TabletId# 3 RecordGeneration# 2 PerGenerationCounter# 1 Channel# 0 Status# OK} 2025-06-25T15:06:23.552860Z 1 00h01m10.010512s :BS_LOAD_TEST DEBUG: Load tablet recieved PoisonPill, going to die 2025-06-25T15:06:23.552941Z 1 00h01m10.010512s :BS_LOAD_TEST DEBUG: TabletId# 3 Generation# 2 end working, going to send TEvCollectGarbage {TabletId# 3 RecordGeneration# 2 PerGenerationCounter# 12 Channel# 0 Deadline# 18446744073709551 Collect# true CollectGeneration# 2 CollectStep# 4294967295 Hard# true IsMultiCollectAllowed# 0 IsMonitored# 1} 2025-06-25T15:06:23.552973Z 1 00h01m10.010512s :BS_LOAD_TEST DEBUG: Load tablet recieved PoisonPill, going to die 2025-06-25T15:06:23.552994Z 1 00h01m10.010512s :BS_LOAD_TEST DEBUG: TabletId# 3 Generation# 2 end working, going to send TEvCollectGarbage {TabletId# 3 RecordGeneration# 2 PerGenerationCounter# 13 Channel# 0 Deadline# 18446744073709551 Collect# true CollectGeneration# 2 CollectStep# 4294967295 Hard# true IsMultiCollectAllowed# 0 IsMonitored# 1} 2025-06-25T15:06:23.634373Z 1 00h01m10.010512s :BS_LOAD_TEST INFO: TabletId# 3 Generation# 2 recieved TEvCollectGarbageResult {TabletId# 3 RecordGeneration# 2 PerGenerationCounter# 12 Channel# 0 Status# OK} 2025-06-25T15:06:23.634453Z 1 00h01m10.010512s :BS_LOAD_TEST INFO: TabletId# 3 Generation# 2 recieved TEvCollectGarbageResult {TabletId# 3 RecordGeneration# 2 PerGenerationCounter# 13 Channel# 0 Status# OK} |94.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> THiveTest::TestLimitedNodeList [GOOD] >> THiveTest::TestHiveFollowersWithChangingDC >> THiveTest::TestReassignGroupsWithRecreateTablet [GOOD] >> THiveTest::TestReassignUseRelativeSpace >> THiveTest::TestFollowerPromotion [GOOD] >> THiveTest::TestFollowerPromotionFollowerDies >> DataStreams::TestGetRecordsStreamWithMultipleShards [GOOD] >> THiveTest::TestCreateSubHiveCreateTablet [GOOD] >> THiveTest::TestCheckSubHiveMigrationManyTablets >> DataStreams::TestGetRecordsWithBigSeqno ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpLimits::ManyPartitionsSortingLimit [GOOD] Test command err: Trying to start YDB, gRPC: 64015, MsgBus: 19247 2025-06-25T15:00:52.830564Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519902126085717919:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:00:52.830601Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00141e/r3tmp/tmpeUCOTJ/pdisk_1.dat 2025-06-25T15:00:53.366080Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:00:53.372448Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519902126085717901:2080] 1750863652826163 != 1750863652826166 2025-06-25T15:00:53.380064Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:00:53.384664Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:00:53.402345Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 64015, node 1 2025-06-25T15:00:53.616847Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:00:53.616870Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:00:53.616881Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:00:53.617006Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19247 2025-06-25T15:00:53.911354Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:19247 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:00:54.260036Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:00:54.291696Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:00:54.479238Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:00:54.667808Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:00:54.755880Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:00:56.200270Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902143265589716:2372], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:00:56.200401Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:00:56.538378Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:56.593986Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:56.639120Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:56.688391Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:56.756525Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:56.853004Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:56.905586Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:00:57.000680Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902143265590845:2436], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:00:57.000753Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:00:57.000806Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902143265590850:2439], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:00:57.005087Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:00:57.029063Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519902147560558148:2440], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-25T15:00:57.123070Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519902147560558222:4871] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:00:57.831353Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519902126085717919:2058];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:00:57.839660Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T15:00:58.190240Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:08.305346Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7382: Cannot ... 99536658146131:7762515]; 2025-06-25T15:04:42.329897Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T15:04:52.447015Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7382: Cannot get console configs 2025-06-25T15:04:52.447050Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:05:23.398960Z node 3 :TX_DATASHARD ERROR: datashard__stats.cpp:649: CPU usage 61.6253 is higher than threshold of 60 in-flight Tx: 0 immediate Tx: 0 readIterators: 0 at datashard: 72075186224037888 table: [/Root/LargeTable] 2025-06-25T15:05:58.929204Z node 3 :OPS_COMPACT ERROR: Compact{72075186224037888.1.312, eph 79} end=Term, 34 blobs 0r (max 600), put Spent{time=2.298s,wait=0.217s,interrupts=11} 2025-06-25T15:05:58.929332Z node 3 :TABLET_EXECUTOR ERROR: Leader{72075186224037888:1:334} Compact 201 on TGenCompactionParams{1001: gen 2 epoch 0, 4 parts} step 312, product {0 parts epoch 0} thrown 2025-06-25T15:06:13.882637Z node 3 :TX_DATASHARD ERROR: datashard__stats.cpp:649: CPU usage 70.7347 is higher than threshold of 60 in-flight Tx: 0 immediate Tx: 0 readIterators: 0 at datashard: 72075186224037890 table: [/Root/LargeTable] 2025-06-25T15:06:14.190829Z node 3 :HIVE ERROR: tx__update_tablet_groups.cpp:135: HIVE#72057594037968897 THive::TTxUpdateTabletGroups::Execute{88923009734048}: tablet 72075186224037890 could not find a group for channel 1 pool /Root:test 2025-06-25T15:06:14.190874Z node 3 :HIVE WARN: tx__update_tablet_groups.cpp:272: HIVE#72057594037968897 THive::TTxUpdateTabletGroups::Execute{88923009734048}: tablet 72075186224037890 wasn't changed 2025-06-25T15:06:14.190894Z node 3 :HIVE WARN: tx__update_tablet_groups.cpp:281: HIVE#72057594037968897 THive::TTxUpdateTabletGroups::Execute{88923009734048}: tablet 72075186224037890 skipped channel 1 2025-06-25T15:06:14.234869Z node 3 :HIVE ERROR: tx__update_tablet_groups.cpp:135: HIVE#72057594037968897 THive::TTxUpdateTabletGroups::Execute{88923007627104}: tablet 72075186224037890 could not find a group for channel 0 pool /Root:test 2025-06-25T15:06:14.234935Z node 3 :HIVE ERROR: tx__update_tablet_groups.cpp:135: HIVE#72057594037968897 THive::TTxUpdateTabletGroups::Execute{88923007627104}: tablet 72075186224037890 could not find a group for channel 1 pool /Root:test 2025-06-25T15:06:14.234958Z node 3 :HIVE WARN: tx__update_tablet_groups.cpp:272: HIVE#72057594037968897 THive::TTxUpdateTabletGroups::Execute{88923007627104}: tablet 72075186224037890 wasn't changed 2025-06-25T15:06:14.234986Z node 3 :HIVE WARN: tx__update_tablet_groups.cpp:281: HIVE#72057594037968897 THive::TTxUpdateTabletGroups::Execute{88923007627104}: tablet 72075186224037890 skipped channel 0 2025-06-25T15:06:14.235020Z node 3 :HIVE WARN: tx__update_tablet_groups.cpp:281: HIVE#72057594037968897 THive::TTxUpdateTabletGroups::Execute{88923007627104}: tablet 72075186224037890 skipped channel 1 2025-06-25T15:06:14.896037Z node 3 :HIVE ERROR: tx__update_tablet_groups.cpp:135: HIVE#72057594037968897 THive::TTxUpdateTabletGroups::Execute{88923007517120}: tablet 72075186224037889 could not find a group for channel 0 pool /Root:test 2025-06-25T15:06:14.896091Z node 3 :HIVE ERROR: tx__update_tablet_groups.cpp:135: HIVE#72057594037968897 THive::TTxUpdateTabletGroups::Execute{88923007517120}: tablet 72075186224037889 could not find a group for channel 1 pool /Root:test 2025-06-25T15:06:14.896114Z node 3 :HIVE WARN: tx__update_tablet_groups.cpp:272: HIVE#72057594037968897 THive::TTxUpdateTabletGroups::Execute{88923007517120}: tablet 72075186224037889 wasn't changed 2025-06-25T15:06:14.896135Z node 3 :HIVE WARN: tx__update_tablet_groups.cpp:281: HIVE#72057594037968897 THive::TTxUpdateTabletGroups::Execute{88923007517120}: tablet 72075186224037889 skipped channel 0 2025-06-25T15:06:14.896166Z node 3 :HIVE WARN: tx__update_tablet_groups.cpp:281: HIVE#72057594037968897 THive::TTxUpdateTabletGroups::Execute{88923007517120}: tablet 72075186224037889 skipped channel 1 2025-06-25T15:06:14.986642Z node 3 :TX_DATASHARD ERROR: check_data_tx_unit.cpp:83: Cannot perform transaction: out of disk space at tablet 72075186224037890 txId 281474976715756 2025-06-25T15:06:14.986711Z node 3 :TX_DATASHARD ERROR: finish_propose_unit.cpp:245: Prepare transaction failed. txid 281474976715756 at tablet 72075186224037890 errors: OUT_OF_SPACE (Cannot perform transaction: out of disk space at tablet 72075186224037890 txId 281474976715756) | 2025-06-25T15:06:14.986776Z node 3 :TX_DATASHARD ERROR: finish_propose_unit.cpp:174: Errors while proposing transaction txid 281474976715756 at tablet 72075186224037890 status: ERROR errors: OUT_OF_SPACE (Cannot perform transaction: out of disk space at tablet 72075186224037890 txId 281474976715756) | 2025-06-25T15:06:14.986937Z node 3 :KQP_EXECUTER ERROR: kqp_data_executer.cpp:864: ActorId: [3:7519903512258380450:2299] TxId: 281474976715756. Ctx: { TraceId: 01jykt03yb0qe4804z52sgbdkb, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NThlYzI5Ni1lYWU0ZDU1ZC0yZjE5NGVkYy01ZTY4YjlkYw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ERROR: [OUT_OF_SPACE] Cannot perform transaction: out of disk space at tablet 72075186224037890 txId 281474976715756; 2025-06-25T15:06:14.987201Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=3&id=NThlYzI5Ni1lYWU0ZDU1ZC0yZjE5NGVkYy01ZTY4YjlkYw==, ActorId: [3:7519903108531443830:2299], ActorState: ExecuteState, TraceId: 01jykt03yb0qe4804z52sgbdkb, Create QueryResponse for error on request, msg: Got out of space. Successfully inserted 30 x 0 lines, each of size 1048576bytes Trying to start YDB, gRPC: 16820, MsgBus: 21170 2025-06-25T15:06:15.942257Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519903516500987423:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:06:15.942317Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00141e/r3tmp/tmp8P0yKJ/pdisk_1.dat 2025-06-25T15:06:16.078374Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:06:16.079208Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7519903516500987404:2080] 1750863975941878 != 1750863975941881 2025-06-25T15:06:16.095921Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:06:16.095994Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:06:16.099926Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 16820, node 4 2025-06-25T15:06:16.133102Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:06:16.133133Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:06:16.133141Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:06:16.133349Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:21170 TClient is connected to server localhost:21170 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:06:16.586825Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:06:16.607111Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:06:17.819847Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:06:18.507369Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519903529385894271:2595], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:06:18.507371Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519903529385894279:2598], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:06:18.507448Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:06:18.510296Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:06:18.521223Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519903529385894285:2599], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-25T15:06:18.604768Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519903529385894338:5238] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } >> THiveTest::TestRestartsWithFollower [GOOD] >> THiveTest::TestStartTabletTwiceInARow >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_crutch_groups_selection_algorithm_selects_second_group_batch[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_queue_attributes[tables_format_v1-fifo] >> TraverseColumnShard::TraverseColumnTableRebootSaTabletBeforeResolve [GOOD] >> THiveTest::TestReassignUseRelativeSpace [GOOD] >> THiveTest::TestManyFollowersOnOneNode >> THiveTest::TestStartTabletTwiceInARow [GOOD] >> THiveTest::TestSpreadNeighboursWithUpdateTabletsObject >> THiveTest::TestFollowerPromotionFollowerDies [GOOD] >> THiveTest::TestHiveBalancer >> AnalyzeColumnshard::AnalyzeStatus [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest >> TraverseColumnShard::TraverseColumnTableRebootSaTabletBeforeResolve [GOOD] Test command err: 2025-06-25T15:03:54.537997Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:419:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T15:03:54.538391Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T15:03:54.538514Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001f45/r3tmp/tmpKvrFbX/pdisk_1.dat 2025-06-25T15:03:54.843468Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 5360, node 1 2025-06-25T15:03:55.054515Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:03:55.054561Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:03:55.054587Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:03:55.055070Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T15:03:55.056836Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:03:55.151926Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:03:55.152062Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:03:55.166383Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:21020 2025-06-25T15:03:55.694371Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-25T15:03:58.283369Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-25T15:03:58.306660Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:03:58.306742Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:03:58.343138Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T15:03:58.344675Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:03:58.541352Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:03:58.576359Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:58.576900Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:58.577411Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:58.577573Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:58.577793Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:58.577879Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:58.577971Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:58.578040Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:58.578099Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:58.759894Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:03:58.760004Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:03:58.772934Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:03:58.902141Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:03:58.937095Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-25T15:03:58.937189Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-25T15:03:58.965022Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-25T15:03:58.965228Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-25T15:03:58.965463Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-25T15:03:58.965526Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-25T15:03:58.965582Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-25T15:03:58.965650Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-25T15:03:58.965719Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-25T15:03:58.965776Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-25T15:03:58.966268Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-25T15:03:58.985990Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7949: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-25T15:03:58.986092Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7979: ConnectToSA(), pipe client id: [2:1793:2562], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-25T15:03:58.992456Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1808:2573] 2025-06-25T15:03:58.997287Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1849:2589] 2025-06-25T15:03:58.997497Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1849:2589], schemeshard id = 72075186224037897 2025-06-25T15:03:59.004002Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-25T15:03:59.017328Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-25T15:03:59.017372Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-25T15:03:59.017434Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-25T15:03:59.025764Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:03:59.030738Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-25T15:03:59.030822Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-25T15:03:59.207648Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-25T15:03:59.326642Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-25T15:03:59.383089Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-25T15:03:59.904772Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:04:00.154687Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2153:3026], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:04:00.154832Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:04:00.174317Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715659:0, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T15:04:00.317654Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2245:2806];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T15:04:00.317886Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2245:2806];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T15:04:00.318157Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2245:2806];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T15:04:00.318296Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2245:2806];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T15:04:00.318405Z node 2 :TX_COLUMNSHARD WARN: ... ics from node: 2, Round: 2, current Round: 0 2025-06-25T15:06:24.822114Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8383:6174], server id = [2:8387:6178], tablet id = 72075186224037899, status = OK 2025-06-25T15:06:24.822561Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8383:6174], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-25T15:06:24.823030Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8384:6175], server id = [2:8388:6179], tablet id = 72075186224037900, status = OK 2025-06-25T15:06:24.823103Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8384:6175], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-25T15:06:24.823259Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8385:6176], server id = [2:8389:6180], tablet id = 72075186224037901, status = OK 2025-06-25T15:06:24.823313Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8385:6176], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-25T15:06:24.824169Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8386:6177], server id = [2:8390:6181], tablet id = 72075186224037902, status = OK 2025-06-25T15:06:24.824221Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8386:6177], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-25T15:06:24.830566Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037899 2025-06-25T15:06:24.831061Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8383:6174], server id = [2:8387:6178], tablet id = 72075186224037899 2025-06-25T15:06:24.831113Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-25T15:06:24.832071Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037900 2025-06-25T15:06:24.832466Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8384:6175], server id = [2:8388:6179], tablet id = 72075186224037900 2025-06-25T15:06:24.832497Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-25T15:06:24.832947Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037901 2025-06-25T15:06:24.833133Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8385:6176], server id = [2:8389:6180], tablet id = 72075186224037901 2025-06-25T15:06:24.833154Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-25T15:06:24.833404Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037902 2025-06-25T15:06:24.833448Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-25T15:06:24.833592Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:27: [72075186224037894] TTxAggregateStatisticsResponse::Execute 2025-06-25T15:06:24.833752Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:118: [72075186224037894] TTxAggregateStatisticsResponse::Complete 2025-06-25T15:06:24.834234Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-25T15:06:24.837444Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8386:6177], server id = [2:8390:6181], tablet id = 72075186224037902 2025-06-25T15:06:24.837484Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-25T15:06:24.838295Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-06-25T15:06:24.881665Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [2:8419:6206]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T15:06:24.881899Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-25T15:06:24.881951Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 2, ReplyToActorId = [2:8419:6206], StatRequests.size() = 1 ... blocking NKikimr::TEvTxProxySchemeCache::TEvResolveKeySetResult from to KQP_TABLE_RESOLVER cookie 0 ... waiting for 3rd TEvResolveKeySetResult (done) 2025-06-25T15:06:24.974462Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8264:6113], server id = [2:8265:6114], tablet id = 72075186224037894 2025-06-25T15:06:24.974580Z node 2 :STATISTICS DEBUG: service_impl.cpp:1219: ConnectToSA(), pipe client id = [2:8425:6209] 2025-06-25T15:06:24.974641Z node 2 :STATISTICS DEBUG: service_impl.cpp:1248: SyncNode(), pipe client id = [2:8425:6209] 2025-06-25T15:06:24.974804Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7979: ConnectToSA(), pipe client id: [2:8426:6210], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-25T15:06:25.032793Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-25T15:06:25.032896Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-25T15:06:25.033357Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-25T15:06:25.033972Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-25T15:06:25.034203Z node 2 :STATISTICS DEBUG: tx_init.cpp:55: [72075186224037894] Loaded database: /Root/Database 2025-06-25T15:06:25.034271Z node 2 :STATISTICS DEBUG: tx_init.cpp:59: [72075186224037894] Loaded traversal start key 2025-06-25T15:06:25.034311Z node 2 :STATISTICS DEBUG: tx_init.cpp:64: [72075186224037894] Loaded traversal table owner id: 72075186224037897 2025-06-25T15:06:25.034336Z node 2 :STATISTICS DEBUG: tx_init.cpp:69: [72075186224037894] Loaded traversal table local path id: 4 2025-06-25T15:06:25.034363Z node 2 :STATISTICS DEBUG: tx_init.cpp:74: [72075186224037894] Loaded traversal start time: 1750863984783469 2025-06-25T15:06:25.034384Z node 2 :STATISTICS DEBUG: tx_init.cpp:79: [72075186224037894] Loaded traversal IsColumnTable: 1 2025-06-25T15:06:25.034408Z node 2 :STATISTICS DEBUG: tx_init.cpp:84: [72075186224037894] Loaded global traversal round: 2 2025-06-25T15:06:25.034470Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 1 2025-06-25T15:06:25.034517Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-25T15:06:25.034597Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 2 2025-06-25T15:06:25.034642Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-25T15:06:25.034683Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-25T15:06:25.034734Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-25T15:06:25.034858Z node 2 :STATISTICS DEBUG: tx_init.cpp:295: [72075186224037894] TTxInit::Complete. Start navigate. PathId [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-25T15:06:25.035957Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-25T15:06:25.036665Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-25T15:06:25.036722Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-25T15:06:25.036814Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing ... blocking NKikimr::TEvTxProxySchemeCache::TEvResolveKeySetResult from to STATISTICS_AGGREGATOR cookie 0 2025-06-25T15:06:25.110568Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:8472:6239] 2025-06-25T15:06:25.110679Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8425:6209], server id = [2:8472:6239], tablet id = 72075186224037894, status = OK 2025-06-25T15:06:25.110794Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:8473:6240] 2025-06-25T15:06:25.110869Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:133: [72075186224037894] EvConnectNode, pipe server id = [2:8472:6239], node id = 2, have schemeshards count = 1, need schemeshards count = 0 2025-06-25T15:06:25.110978Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:8473:6240], schemeshard id = 72075186224037897 ... unblocking NKikimr::TEvTxProxySchemeCache::TEvResolveKeySetResult from to KQP_TABLE_RESOLVER ... unblocking NKikimr::TEvTxProxySchemeCache::TEvResolveKeySetResult from to STATISTICS_AGGREGATOR ... waiting for NKikimr::NStat::TEvStatistics::TEvSaveStatisticsQueryResponse 2025-06-25T15:06:25.198761Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-25T15:06:25.198862Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-25T15:06:25.201945Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:58: [72075186224037894] TTxResponseTabletDistribution::Execute. Node count = 1 2025-06-25T15:06:25.220438Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=MzlkMDE1ZmYtNjVlY2JiMzctMjcxOThkYjYtZmE3Yzk3M2E=, TxId: 2025-06-25T15:06:25.220516Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=MzlkMDE1ZmYtNjVlY2JiMzctMjcxOThkYjYtZmE3Yzk3M2E=, TxId: ... waiting for NKikimr::NStat::TEvStatistics::TEvSaveStatisticsQueryResponse (done) 2025-06-25T15:06:25.221206Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 3 ], ReplyToActorId[ [2:8481:6244]], StatType[ 2 ], StatRequestsCount[ 1 ] 2025-06-25T15:06:25.221839Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] 2025-06-25T15:06:25.221887Z node 2 :STATISTICS DEBUG: service_impl.cpp:812: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] resolve DatabasePath[ [OwnerId: 72057594046644480, LocalPathId: 2] ] 2025-06-25T15:06:25.225039Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] 2025-06-25T15:06:25.225111Z node 2 :STATISTICS DEBUG: service_impl.cpp:715: [TStatService::QueryStatistics] RequestId[ 3 ], Database[ Root/Database ], TablePath[ /Root/Database/.metadata/_statistics ] 2025-06-25T15:06:25.225167Z node 2 :STATISTICS DEBUG: service_impl.cpp:656: [TStatService::LoadStatistics] QueryId[ 1 ], PathId[ [OwnerId: 72075186224037897, LocalPathId: 4] ], StatType[ 2 ], ColumnTag[ 1 ] 2025-06-25T15:06:25.229250Z node 2 :STATISTICS DEBUG: service_impl.cpp:1152: TEvLoadStatisticsQueryResponse, request id = 3 >>> failedEstimatesCount = 0 >> THiveTest::TestManyFollowersOnOneNode [GOOD] >> THiveTest::TestLockTabletExecutionTimeout >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_visibility_change_disables_receive_attempt_id[tables_format_v1-with_change_visibility] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_visibility_change_disables_receive_attempt_id[tables_format_v1-with_delete_message] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delete_message_works[tables_format_v0] |94.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test |94.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test |94.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest >> AnalyzeColumnshard::AnalyzeStatus [GOOD] Test command err: 2025-06-25T15:03:58.992934Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:419:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T15:03:58.993191Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T15:03:58.993265Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001f11/r3tmp/tmpymrMIj/pdisk_1.dat 2025-06-25T15:03:59.278974Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 1816, node 1 2025-06-25T15:03:59.490413Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:03:59.490470Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:03:59.490498Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:03:59.491072Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T15:03:59.497037Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:03:59.591234Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:03:59.591342Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:03:59.605461Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:10282 2025-06-25T15:04:00.116441Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-25T15:04:02.680629Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-25T15:04:02.706053Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:04:02.706165Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:04:02.764618Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T15:04:02.766845Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:04:02.951191Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:04:02.986788Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:04:02.987422Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:04:02.987998Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:04:02.988141Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:04:02.988475Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:04:02.988568Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:04:02.988674Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:04:02.988771Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:04:02.988863Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:04:03.172140Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:04:03.172265Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:04:03.185193Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:04:03.339729Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:04:03.373708Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-25T15:04:03.373796Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-25T15:04:03.403331Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-25T15:04:03.404935Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-25T15:04:03.405186Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-25T15:04:03.405246Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-25T15:04:03.405312Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-25T15:04:03.405365Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-25T15:04:03.405437Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-25T15:04:03.405499Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-25T15:04:03.406294Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-25T15:04:03.440822Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7949: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-25T15:04:03.440917Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7979: ConnectToSA(), pipe client id: [2:1796:2561], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-25T15:04:03.445461Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1806:2569] 2025-06-25T15:04:03.448582Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1833:2584] 2025-06-25T15:04:03.449136Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1833:2584], schemeshard id = 72075186224037897 2025-06-25T15:04:03.455179Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-25T15:04:03.468495Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-25T15:04:03.468547Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-25T15:04:03.468622Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-25T15:04:03.485099Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:04:03.494799Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-25T15:04:03.494915Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-25T15:04:03.675113Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-25T15:04:03.834136Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-25T15:04:03.889787Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-25T15:04:04.392113Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:04:04.583860Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2148:3026], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:04:04.584007Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:04:04.597441Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715659:0, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T15:04:04.681254Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2220:2789];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T15:04:04.681421Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2220:2789];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T15:04:04.681620Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2220:2789];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T15:04:04.681690Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2220:2789];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T15:04:04.681756Z node 2 :TX_COLUMNSHARD WARN: ... 49] txid# 281474976720659, issues: { message: "Check failed: path: \'/Root/Database/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72075186224037897, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:06:23.238402Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [2:7327:5364]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T15:06:23.238632Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-25T15:06:23.238706Z node 2 :STATISTICS DEBUG: service_impl.cpp:1219: ConnectToSA(), pipe client id = [2:7329:5366] 2025-06-25T15:06:23.238778Z node 2 :STATISTICS DEBUG: service_impl.cpp:1248: SyncNode(), pipe client id = [2:7329:5366] 2025-06-25T15:06:23.239119Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:7330:5367] 2025-06-25T15:06:23.239251Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:133: [72075186224037894] EvConnectNode, pipe server id = [2:7330:5367], node id = 2, have schemeshards count = 0, need schemeshards count = 1 2025-06-25T15:06:23.239324Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:314: [72075186224037894] SendStatisticsToNode(), node id = 2, schemeshard count = 1 2025-06-25T15:06:23.239513Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:7329:5366], server id = [2:7330:5367], tablet id = 72075186224037894, status = OK 2025-06-25T15:06:23.239576Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-25T15:06:23.239670Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 1, ReplyToActorId = [2:7327:5364], StatRequests.size() = 1 2025-06-25T15:06:23.381561Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=NTFjMDVjNGEtYjgxNDQxY2UtNzJiMGJiMC05MGNiNDRiOA==, TxId: 2025-06-25T15:06:23.381636Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=NTFjMDVjNGEtYjgxNDQxY2UtNzJiMGJiMC05MGNiNDRiOA==, TxId: 2025-06-25T15:06:23.382202Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-25T15:06:23.397507Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 3] 2025-06-25T15:06:23.397577Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-25T15:06:23.440805Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:217: [72075186224037894] EvFastPropagateCheck 2025-06-25T15:06:23.440894Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:357: [72075186224037894] PropagateFastStatistics(), node count = 0, schemeshard count = 0 2025-06-25T15:06:23.526460Z node 2 :STATISTICS DEBUG: service_impl.cpp:1189: EvRequestTimeout, pipe client id = [2:7329:5366], schemeshard count = 1 2025-06-25T15:06:24.571908Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:626: [72075186224037894] ScheduleNextAnalyze 2025-06-25T15:06:24.572006Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 4] is column table. 2025-06-25T15:06:24.577540Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-25T15:06:24.595379Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-25T15:06:24.595837Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-25T15:06:24.595894Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:51: [72075186224037894] TTxResolve::ExecuteAnalyze. Table OperationId operationId, PathId [OwnerId: 72075186224037897, LocalPathId: 4], AnalyzedShards 1 2025-06-25T15:06:24.609367Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-25T15:06:24.620413Z node 2 :STATISTICS DEBUG: tx_analyze_table_request.cpp:56: [72075186224037894] TTxAnalyzeTableRequest::Complete. Send 1 events. ... blocking NKikimr::NStat::TEvStatistics::TEvAnalyzeTableResponse from TX_COLUMNSHARD_ACTOR to STATISTICS_AGGREGATOR cookie 0 ... waiting for TEvAnalyzeTableResponse (done) 2025-06-25T15:06:24.622319Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:7401:5409] 2025-06-25T15:06:24.622941Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:478: [72075186224037894] Send TEvStatistics::TEvAnalyzeStatusResponse. Status STATUS_ENQUEUED 2025-06-25T15:06:24.624056Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:7403:5410]
---- StatisticsAggregator ----
Database: /Root/Database
BaseStatistics: 1
SchemeShards: 1
    72075186224037897
Nodes: 1
    2
RequestedSchemeShards: 1
    72075186224037897
FastCounter: 3
FastCheckInFlight: 0
FastSchemeShards: 0
FastNodes: 0
PropagationInFlight: 0
PropagationSchemeShards: 0
PropagationNodes: 0
LastSSIndex: 0
PendingRequests: 0
ProcessUrgentInFlight: 0
Columns: 2
DatashardRanges: 0
CountMinSketches: 0
ScheduleTraversalsByTime: 2
  oldest table: [OwnerId: 72075186224037897, LocalPathId: 4], update time: 1970-01-01T00:00:00Z
ScheduleTraversalsBySchemeShard: 1
    72075186224037897
    [OwnerId: 72075186224037897, LocalPathId: 4], [OwnerId: 72075186224037897, LocalPathId: 3]
ForceTraversals: 1
    1970-01-01T00:00:06Z
NavigateType: Analyze
NavigateAnalyzeOperationId: 
NavigatePathId: 
ForceTraversalOperationId: 
TraversalStartTime: 1970-01-01T00:00:00Z
TraversalPathId: 
TraversalIsColumnTable: 0
TraversalStartKey: 
GlobalTraversalRound: 1
TraversalRound: 0
HiveRequestRound: 0
... unblocking NKikimr::NStat::TEvStatistics::TEvAnalyzeTableResponse from TX_COLUMNSHARD_ACTOR to STATISTICS_AGGREGATOR 2025-06-25T15:06:24.625471Z node 2 :STATISTICS DEBUG: tx_analyze_table_response.cpp:21: [72075186224037894] TTxAnalyzeTableResponse::Execute 2025-06-25T15:06:24.625600Z node 2 :STATISTICS DEBUG: tx_analyze_table_response.cpp:52: [72075186224037894] TTxAnalyzeTableResponse::Execute. All shards are analyzed 2025-06-25T15:06:24.641618Z node 2 :STATISTICS DEBUG: tx_analyze_table_response.cpp:57: [72075186224037894] TTxAnalyzeTableResponse::Complete. 2025-06-25T15:06:26.019190Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-25T15:06:26.019325Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 4] is column table. 2025-06-25T15:06:26.019385Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:732: [72075186224037894] Start force traversal navigate for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-25T15:06:26.020006Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-25T15:06:26.033232Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-25T15:06:26.033574Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-25T15:06:26.033653Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-25T15:06:26.034444Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:58: [72075186224037894] TTxResponseTabletDistribution::Execute. Node count = 1 2025-06-25T15:06:26.050953Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:92: [72075186224037894] TTxResponseTabletDistribution::Complete 2025-06-25T15:06:26.051179Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 2, current Round: 0 2025-06-25T15:06:26.051685Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:7452:5438], server id = [2:7453:5439], tablet id = 72075186224037899, status = OK 2025-06-25T15:06:26.051784Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:7452:5438], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-25T15:06:26.054676Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037899 2025-06-25T15:06:26.054785Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-25T15:06:26.055037Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:27: [72075186224037894] TTxAggregateStatisticsResponse::Execute 2025-06-25T15:06:26.055217Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:118: [72075186224037894] TTxAggregateStatisticsResponse::Complete 2025-06-25T15:06:26.055566Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-25T15:06:26.058368Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:7452:5438], server id = [2:7453:5439], tablet id = 72075186224037899 2025-06-25T15:06:26.058419Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-25T15:06:26.058973Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-06-25T15:06:26.098810Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [2:7473:5458]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T15:06:26.099011Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-25T15:06:26.099059Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 2, ReplyToActorId = [2:7473:5458], StatRequests.size() = 1 2025-06-25T15:06:26.215200Z node 2 :SYSTEM_VIEWS WARN: tx_interval_summary.cpp:212: [72075186224037891] TEvIntervalQuerySummary, time mismath: node id# 2, interval end# 1970-01-01T00:02:04.000000Z, event interval end# 2025-06-25T15:06:24.000000Z 2025-06-25T15:06:26.215793Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=ZjRiZTIwZDYtYmQ1MTM3NDAtODBkMjhjY2EtZjI4NjgwOTI=, TxId: 2025-06-25T15:06:26.215858Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=ZjRiZTIwZDYtYmQ1MTM3NDAtODBkMjhjY2EtZjI4NjgwOTI=, TxId: 2025-06-25T15:06:26.216651Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-25T15:06:26.230963Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete force traversal for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-25T15:06:26.231063Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:50: [72075186224037894] TTxFinishTraversal::Complete. Send TEvAnalyzeResponse, OperationId=operationId, ActorId=[1:915:2715] 2025-06-25T15:06:26.232400Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:7490:5466] 2025-06-25T15:06:26.233018Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:478: [72075186224037894] Send TEvStatistics::TEvAnalyzeStatusResponse. Status STATUS_NO_OPERATION >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_message[tables_format_v0-std] >> test_generic_messaging.py::TestYandexAttributesPrefix::test_allows_yandex_message_attribute_prefix[tables_format_v1] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_visibility_change_disables_receive_attempt_id[tables_format_v1-with_delete_message] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_visibility_timeout_works[tables_format_v0] >> ReadIteratorExternalBlobs::ExtBlobsWithFirstRowPreloaded [GOOD] >> ReadIteratorExternalBlobs::ExtBlobsWithFirstRowPreloadedWithReboot >> TSchemeShardServerLess::StorageBilling [GOOD] >> THiveTest::TestDrain [GOOD] >> THiveTest::TestDrainWithMaxTabletsScheduled >> DataStreams::TestGetRecordsWithBigSeqno [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_does_not_change_visibility_for_deleted_message[tables_format_v1-std] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_serverless/unittest >> TSchemeShardServerLess::StorageBilling [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T15:05:46.724988Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T15:05:46.725074Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T15:05:46.725120Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T15:05:46.725154Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T15:05:46.725196Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T15:05:46.725219Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T15:05:46.725269Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T15:05:46.725331Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T15:05:46.725885Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T15:05:46.726090Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T15:05:46.772495Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:05:46.772533Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:05:46.782742Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T15:05:46.782995Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T15:05:46.783109Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T15:05:46.786579Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T15:05:46.786756Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T15:05:46.787123Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T15:05:46.787284Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T15:05:46.789187Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T15:05:46.789300Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T15:05:46.790371Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T15:05:46.790428Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T15:05:46.790574Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T15:05:46.790624Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T15:05:46.790675Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T15:05:46.790767Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T15:05:46.794763Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T15:05:46.866818Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T15:05:46.866959Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:05:46.867073Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T15:05:46.867104Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T15:05:46.867252Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T15:05:46.867300Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:05:46.868940Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T15:05:46.869051Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T15:05:46.869167Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:05:46.869203Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T15:05:46.869235Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T15:05:46.869270Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T15:05:46.870354Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:05:46.870392Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T15:05:46.870421Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T15:05:46.871370Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:05:46.871398Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:05:46.871437Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T15:05:46.871471Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T15:05:46.877469Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T15:05:46.878874Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T15:05:46.879010Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T15:05:46.879599Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T15:05:46.879689Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T15:05:46.879722Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T15:05:46.879933Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T15:05:46.879967Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T15:05:46.880077Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T15:05:46.880132Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T15:05:46.881471Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T15:05:46.881504Z node 1 :FLAT_TX_SCHEMESHARD ... e 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:671:2581], at schemeshard: 72075186233409549, txId: 107, path id: 2 2025-06-25T15:06:29.121060Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 107:0, at schemeshard: 72075186233409549 2025-06-25T15:06:29.121121Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1086: NTableState::TProposedWaitParts operationId# 107:0 ProgressState at tablet: 72075186233409549 2025-06-25T15:06:29.121220Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:707: all shard schema changes has been received, operationId: 107:0, at schemeshard: 72075186233409549 2025-06-25T15:06:29.121262Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:719: send schema changes ack message, operation: 107:0, datashard: 72075186233409552, at schemeshard: 72075186233409549 2025-06-25T15:06:29.121321Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 107:0 129 -> 240 2025-06-25T15:06:29.122124Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72075186233409549, msg: Owner: 72075186233409549 Generation: 2 LocalPathId: 1 Version: 9 PathOwnerId: 72075186233409549, cookie: 107 2025-06-25T15:06:29.122222Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72075186233409549, msg: Owner: 72075186233409549 Generation: 2 LocalPathId: 1 Version: 9 PathOwnerId: 72075186233409549, cookie: 107 2025-06-25T15:06:29.122267Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72075186233409549, txId: 107 2025-06-25T15:06:29.122312Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72075186233409549, txId: 107, pathId: [OwnerId: 72075186233409549, LocalPathId: 1], version: 9 2025-06-25T15:06:29.122356Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72075186233409549, LocalPathId: 1] was 5 2025-06-25T15:06:29.123436Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72075186233409549, msg: Owner: 72075186233409549 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72075186233409549, cookie: 107 2025-06-25T15:06:29.123527Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72075186233409549, msg: Owner: 72075186233409549 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72075186233409549, cookie: 107 2025-06-25T15:06:29.123559Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72075186233409549, txId: 107 2025-06-25T15:06:29.123589Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72075186233409549, txId: 107, pathId: [OwnerId: 72075186233409549, LocalPathId: 2], version: 18446744073709551615 2025-06-25T15:06:29.123622Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72075186233409549, LocalPathId: 2] was 4 2025-06-25T15:06:29.123693Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 107, ready parts: 0/1, is published: true 2025-06-25T15:06:29.127091Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 107:0, at schemeshard: 72075186233409549 2025-06-25T15:06:29.127154Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_table.cpp:414: TDropTable TProposedDeletePart operationId: 107:0 ProgressState, at schemeshard: 72075186233409549 2025-06-25T15:06:29.127485Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove table for pathId [OwnerId: 72075186233409549, LocalPathId: 2] was 3 2025-06-25T15:06:29.127634Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#107:0 progress is 1/1 2025-06-25T15:06:29.127668Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 107 ready parts: 1/1 2025-06-25T15:06:29.127701Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#107:0 progress is 1/1 2025-06-25T15:06:29.127728Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 107 ready parts: 1/1 2025-06-25T15:06:29.127764Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 107, ready parts: 1/1, is published: true 2025-06-25T15:06:29.127831Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1656: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:812:2691] message: TxId: 107 2025-06-25T15:06:29.127873Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 107 ready parts: 1/1 2025-06-25T15:06:29.127913Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 107:0 2025-06-25T15:06:29.127951Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 107:0 2025-06-25T15:06:29.128036Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72075186233409549, LocalPathId: 2] was 2 2025-06-25T15:06:29.128930Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72075186233409549, cookie: 107 2025-06-25T15:06:29.130276Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72075186233409549, cookie: 107 2025-06-25T15:06:29.132748Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 107: got EvNotifyTxCompletionResult 2025-06-25T15:06:29.132797Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 107: satisfy waiter [1:2208:4051] TestWaitNotification: OK eventTxId 107 2025-06-25T15:06:29.154565Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5633: Handle TEvStateChanged, at schemeshard: 72075186233409549, message: Source { RawX1: 784 RawX2: 4294969967 } TabletId: 72075186233409552 State: 4 2025-06-25T15:06:29.154686Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186233409552, state: Offline, at schemeshard: 72075186233409549 2025-06-25T15:06:29.159098Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72075186233409549:4 hive 72057594037968897 at ss 72075186233409549 2025-06-25T15:06:29.159755Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72075186233409549 ShardLocalIdx: 4 TxId_Deprecated: 4 TabletID: 72075186233409552 2025-06-25T15:06:29.162649Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 4 ShardOwnerId: 72075186233409549 ShardLocalIdx: 4, at schemeshard: 72075186233409549 2025-06-25T15:06:29.163068Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72075186233409549, LocalPathId: 2] was 1 2025-06-25T15:06:29.163808Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72075186233409549 2025-06-25T15:06:29.163862Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72075186233409549, LocalPathId: 2], at schemeshard: 72075186233409549 2025-06-25T15:06:29.163933Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72075186233409549, LocalPathId: 1] was 4 2025-06-25T15:06:29.167443Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72075186233409549:4 2025-06-25T15:06:29.167525Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72075186233409549:4 tabletId 72075186233409552 2025-06-25T15:06:29.167984Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72075186233409549 2025-06-25T15:06:29.310879Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6716: Handle: TEvRunConditionalErase, at schemeshard: 72075186233409549 2025-06-25T15:06:29.311006Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:56: TTxRunConditionalErase DoExecute: at schemeshard: 72075186233409549 2025-06-25T15:06:29.311064Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:189: TTxRunConditionalErase DoComplete: at schemeshard: 72075186233409549 2025-06-25T15:06:29.311134Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6716: Handle: TEvRunConditionalErase, at schemeshard: 72075186233409546 2025-06-25T15:06:29.311179Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:56: TTxRunConditionalErase DoExecute: at schemeshard: 72075186233409546 2025-06-25T15:06:29.311206Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:189: TTxRunConditionalErase DoComplete: at schemeshard: 72075186233409546 2025-06-25T15:06:29.311240Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6716: Handle: TEvRunConditionalErase, at schemeshard: 72057594046678944 2025-06-25T15:06:29.311267Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:56: TTxRunConditionalErase DoExecute: at schemeshard: 72057594046678944 2025-06-25T15:06:29.311289Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:189: TTxRunConditionalErase DoComplete: at schemeshard: 72057594046678944 2025-06-25T15:06:29.379776Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T15:06:29.380186Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:191: TTxServerlessStorageBilling: make a bill, record: '{"usage":{"start":1600452180,"quantity":59,"finish":1600452239,"type":"delta","unit":"byte*second"},"tags":{"ydb_size":0},"id":"72057594046678944-3-1600452180-1600452239-0","cloud_id":"CLOUD_ID_VAL","source_wt":1600452240,"source_id":"sless-docapi-ydb-storage","resource_id":"DATABASE_ID_VAL","schema":"ydb.serverless.v1","folder_id":"FOLDER_ID_VAL","version":"1.0.0"} ', schemeshardId: 72075186233409549, domainId: [OwnerId: 72057594046678944, LocalPathId: 3], now: 2020-09-18T18:04:00.028000Z, LastBillTime: 2020-09-18T18:02:00.000000Z, lastBilled: 2020-09-18T18:02:00.000000Z--2020-09-18T18:02:59.000000Z, toBill: 2020-09-18T18:03:00.000000Z--2020-09-18T18:03:59.000000Z, next retry at: 2020-09-18T18:05:00.000000Z 2025-06-25T15:06:29.384412Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete grabMeteringMessage has happened 2025-06-25T15:06:29.384636Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:338: tests -- TFakeMetering got TEvMetering::TEvWriteMeteringJson quantity: 59, 59 unit: "byte*second", "byte*second" type: "delta", "delta" >> THiveTest::TestSpreadNeighboursWithUpdateTabletsObject [GOOD] >> THiveTest::TestSpreadNeighboursDifferentOwners >> GroupWriteTest::Simple [GOOD] >> THiveTest::TestHiveFollowersWithChangingDC [GOOD] >> THiveTest::TestHiveBalancerWithSystemTablets >> THiveTest::TestHiveBalancer [GOOD] >> THiveTest::TestFollowersCrossDC_Easy ------- [TM] {asan, default-linux-x86_64, release} ydb/services/datastreams/ut/unittest >> DataStreams::TestGetRecordsWithBigSeqno [GOOD] Test command err: 2025-06-25T15:05:51.095441Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519903412297604571:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:05:51.095852Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00169f/r3tmp/tmpr55eqQ/pdisk_1.dat 2025-06-25T15:05:51.374198Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 64880, node 1 2025-06-25T15:05:51.432935Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:05:51.433031Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:05:51.437155Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:05:51.532382Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:05:51.532404Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:05:51.532411Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:05:51.532528Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:63701 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:05:51.963965Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:05:52.032215Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T15:05:52.102897Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:63701 2025-06-25T15:05:52.198517Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... 2025-06-25T15:05:52.422466Z node 1 :PERSQUEUE ERROR: partition_read.cpp:780: [PQ: 72075186224037888, Partition: 0, State: StateIdle] reading from too big offset - topic stream_TestGetRecordsStreamWithSingleShard partition 0 client $without_consumer EndOffset 30 offset 100000 2025-06-25T15:05:54.603692Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7519903423378295521:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:05:54.603749Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00169f/r3tmp/tmpq02Aod/pdisk_1.dat 2025-06-25T15:05:54.703271Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:05:54.719442Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:05:54.719524Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:05:54.724142Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 19920, node 4 2025-06-25T15:05:54.762968Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:05:54.762987Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:05:54.762993Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:05:54.763105Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:22078 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:05:54.935353Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:05:54.972261Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) TClient is connected to server localhost:22078 2025-06-25T15:05:55.097550Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... 2025-06-25T15:05:55.108012Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715659, at schemeshard: 72057594046644480 2025-06-25T15:05:55.610359Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:05:59.603944Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519903423378295521:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:05:59.604000Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T15:06:09.691881Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7382: Cannot get console configs 2025-06-25T15:06:09.691904Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:06:21.024144Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7519903541390299177:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:06:21.024240Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00169f/r3tmp/tmpfmjFYi/pdisk_1.dat 2025-06-25T15:06:21.170145Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:06:21.173474Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:06:21.173589Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:06:21.196256Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 8216, node 7 2025-06-25T15:06:21.263557Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:06:21.263591Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:06:21.263599Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:06:21.263759Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25314 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:06:21.539500Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:06:21.593545Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) TClient is connected to server localhost:25314 2025-06-25T15:06:21.801232Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... 2025-06-25T15:06:22.028524Z node 7 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:06:25.490665Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7519903558359565267:2076];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:06:25.490725Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00169f/r3tmp/tmpRovPl9/pdisk_1.dat 2025-06-25T15:06:25.686358Z node 10 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:06:25.706846Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:06:25.706960Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:06:25.714810Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 23402, node 10 2025-06-25T15:06:25.854278Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:06:25.854311Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:06:25.854320Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:06:25.854511Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:20398 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:06:26.163143Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:06:26.221858Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) TClient is connected to server localhost:20398 2025-06-25T15:06:26.482100Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_user_attrs.cpp:72) waiting... 2025-06-25T15:06:26.519338Z node 10 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/load_test/ut/unittest >> GroupWriteTest::Simple [GOOD] Test command err: RandomSeed# 13263210803518433127 2025-06-25T15:06:15.908069Z 1 00h01m00.010512s :BS_LOAD_TEST DEBUG: TabletId# 1 Generation# 1 is bootstrapped, going to send TEvDiscover {TabletId# 1 MinGeneration# 1 ReadBody# false DiscoverBlockedGeneration# true ForceBlockedGeneration# 0 FromLeader# true Deadline# 18446744073709551} 2025-06-25T15:06:15.937995Z 1 00h01m00.010512s :BS_LOAD_TEST INFO: TabletId# 1 Generation# 1 recieved TEvDiscoverResult {Status# NODATA BlockedGeneration# 0 Id# [0:0:0:0:0:0:0] Size# 0 MinGeneration# 1} 2025-06-25T15:06:15.938038Z 1 00h01m00.010512s :BS_LOAD_TEST DEBUG: TabletId# 1 Generation# 1 going to send TEvBlock {TabletId# 1 Generation# 1 Deadline# 18446744073709551 IsMonitored# 1} 2025-06-25T15:06:15.939756Z 1 00h01m00.010512s :BS_LOAD_TEST INFO: TabletId# 1 Generation# 1 recieved TEvBlockResult {Status# OK} 2025-06-25T15:06:15.955169Z 1 00h01m00.010512s :BS_LOAD_TEST DEBUG: TabletId# 1 Generation# 2 going to send TEvCollectGarbage {TabletId# 1 RecordGeneration# 2 PerGenerationCounter# 1 Channel# 0 Deadline# 18446744073709551 Collect# true CollectGeneration# 2 CollectStep# 0 Hard# true IsMultiCollectAllowed# 0 IsMonitored# 1} 2025-06-25T15:06:15.957501Z 1 00h01m00.010512s :BS_LOAD_TEST INFO: TabletId# 1 Generation# 2 recieved TEvCollectGarbageResult {TabletId# 1 RecordGeneration# 2 PerGenerationCounter# 1 Channel# 0 Status# OK} 2025-06-25T15:06:27.240621Z 2 00h01m23.810512s :BS_LOGCUTTER ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) KEEPER: RetryCutLogEvent: limit exceeded; FreeUpToLsn# 1812 2025-06-25T15:06:30.549303Z 1 00h01m30.010512s :BS_LOAD_TEST DEBUG: Load tablet recieved PoisonPill, going to die 2025-06-25T15:06:30.549393Z 1 00h01m30.010512s :BS_LOAD_TEST DEBUG: TabletId# 1 Generation# 2 end working, going to send TEvCollectGarbage {TabletId# 1 RecordGeneration# 2 PerGenerationCounter# 32 Channel# 0 Deadline# 18446744073709551 Collect# true CollectGeneration# 2 CollectStep# 4294967295 Hard# true IsMultiCollectAllowed# 0 IsMonitored# 1} 2025-06-25T15:06:30.549440Z 1 00h01m30.010512s :BS_LOAD_TEST DEBUG: Load tablet recieved PoisonPill, going to die 2025-06-25T15:06:30.549475Z 1 00h01m30.010512s :BS_LOAD_TEST DEBUG: TabletId# 1 Generation# 2 end working, going to send TEvCollectGarbage {TabletId# 1 RecordGeneration# 2 PerGenerationCounter# 33 Channel# 0 Deadline# 18446744073709551 Collect# true CollectGeneration# 2 CollectStep# 4294967295 Hard# true IsMultiCollectAllowed# 0 IsMonitored# 1} 2025-06-25T15:06:30.601660Z 1 00h01m30.010512s :BS_LOAD_TEST INFO: TabletId# 1 Generation# 2 recieved TEvCollectGarbageResult {TabletId# 1 RecordGeneration# 2 PerGenerationCounter# 32 Channel# 0 Status# OK} 2025-06-25T15:06:30.601755Z 1 00h01m30.010512s :BS_LOAD_TEST INFO: TabletId# 1 Generation# 2 recieved TEvCollectGarbageResult {TabletId# 1 RecordGeneration# 2 PerGenerationCounter# 33 Channel# 0 Status# OK} >> TColumnShardTestReadWrite::CompactionSplitGranule_PKInt64 [GOOD] >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_does_actions_with_queue[tables_format_v0-std] >> THiveTest::TestLockTabletExecutionTimeout [GOOD] >> THiveTest::TestLockTabletExecutionRebootTimeout |94.1%| [TA] $(B)/ydb/services/datastreams/ut/test-results/unittest/{meta.json ... results_accumulator.log} |94.1%| [TA] {RESULT} $(B)/ydb/services/datastreams/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_set_very_big_visibility_timeout[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_validates_message_attribute_value[tables_format_v0] >> AnalyzeColumnshard::AnalyzeRebootSaBeforeResolve [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_visibility_batch_works[tables_format_v1-fifo] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_validates_group_id[tables_format_v0] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::CompactionSplitGranule_PKInt64 [GOOD] Test command err: 2025-06-25T15:05:20.339228Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:99;event=initialize_shard;step=OnActivateExecutor; 2025-06-25T15:05:20.357807Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:117;event=initialize_shard;step=initialize_tiring_finished; 2025-06-25T15:05:20.358024Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-25T15:05:20.362894Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T15:05:20.363040Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T15:05:20.363204Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T15:05:20.363282Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T15:05:20.363343Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T15:05:20.363404Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T15:05:20.363482Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T15:05:20.363571Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T15:05:20.363625Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T15:05:20.363692Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T15:05:20.363779Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T15:05:20.379857Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-25T15:05:20.379988Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-25T15:05:20.380020Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-25T15:05:20.380168Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:05:20.380326Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-25T15:05:20.380388Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-25T15:05:20.380422Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-25T15:05:20.380475Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-25T15:05:20.380512Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-25T15:05:20.380536Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-25T15:05:20.380569Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-25T15:05:20.380690Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:05:20.380726Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-25T15:05:20.380760Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-25T15:05:20.380776Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-25T15:05:20.380828Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-25T15:05:20.380857Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-25T15:05:20.380894Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-25T15:05:20.380918Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-25T15:05:20.380944Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-25T15:05:20.380967Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-25T15:05:20.380984Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-25T15:05:20.381114Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-25T15:05:20.381158Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-25T15:05:20.381183Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-25T15:05:20.381291Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-25T15:05:20.381318Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-25T15:05:20.381333Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-25T15:05:20.381443Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-25T15:05:20.381482Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-25T15:05:20.381507Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-25T15:05:20.381553Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-25T15:05:20.381601Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-25T15:05:20.381623Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-25T15:05:20.381640Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-25T15:05:20.381774Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=28; 2025-06-25T15:05:20.381829Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=31; 2025-06-25T15:05:20.381892Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=34; 2025-06-25T15:05:20.381952Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=32; 2025-06-25T15:05:20.382013Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-25T15:05:20.382068Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-25T15:05:20.382100Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-25T15:05:20.382138Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: table ... oad_stage_name=EXECUTE:granule/portions;fline=constructor_meta.cpp:71;memory_size=2086;data_size=2078;sum=513488;count=432;size_of_meta=136; 2025-06-25T15:06:30.756561Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;load_stage_name=EXECUTE:granule/portions;fline=constructor_portion.cpp:40;memory_size=2158;data_size=2150;sum=529040;count=216;size_of_portion=208; 2025-06-25T15:06:30.756738Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;EXECUTE:portionsLoadingTime=23361; 2025-06-25T15:06:30.756811Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;PRECHARGE:granule_finished_commonLoadingTime=12; 2025-06-25T15:06:30.757859Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;EXECUTE:granule_finished_commonLoadingTime=974; 2025-06-25T15:06:30.757930Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;fline=common_data.cpp:29;EXECUTE:granuleLoadingTime=24753; 2025-06-25T15:06:30.757986Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:granulesLoadingTime=24918; 2025-06-25T15:06:30.758056Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;PRECHARGE:finishLoadingTime=13; 2025-06-25T15:06:30.758348Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:finishLoadingTime=243; 2025-06-25T15:06:30.758393Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:column_enginesLoadingTime=25846; 2025-06-25T15:06:30.758587Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tx_controllerLoadingTime=129; 2025-06-25T15:06:30.758760Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tx_controllerLoadingTime=109; 2025-06-25T15:06:30.758988Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:operations_managerLoadingTime=178; 2025-06-25T15:06:30.759171Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:operations_managerLoadingTime=134; 2025-06-25T15:06:30.765151Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:storages_managerLoadingTime=5901; 2025-06-25T15:06:30.771242Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:storages_managerLoadingTime=5968; 2025-06-25T15:06:30.771361Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:db_locksLoadingTime=15; 2025-06-25T15:06:30.771417Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:db_locksLoadingTime=12; 2025-06-25T15:06:30.771466Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:bg_sessionsLoadingTime=6; 2025-06-25T15:06:30.771550Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:bg_sessionsLoadingTime=45; 2025-06-25T15:06:30.771592Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:sharing_sessionsLoadingTime=7; 2025-06-25T15:06:30.771689Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:sharing_sessionsLoadingTime=66; 2025-06-25T15:06:30.771728Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:in_flight_readsLoadingTime=6; 2025-06-25T15:06:30.771803Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:in_flight_readsLoadingTime=44; 2025-06-25T15:06:30.771916Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tiers_managerLoadingTime=62; 2025-06-25T15:06:30.772017Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tiers_managerLoadingTime=57; 2025-06-25T15:06:30.772063Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;fline=common_data.cpp:29;EXECUTE:composite_initLoadingTime=47042; 2025-06-25T15:06:30.772267Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Index: tables 1 inserted {blob_bytes=0;raw_bytes=0;count=0;records=0} compacted {blob_bytes=0;raw_bytes=0;count=0;records=0} s-compacted {blob_bytes=108275536;raw_bytes=198365560;count=15;records=1915000} inactive {blob_bytes=205496480;raw_bytes=345889958;count=39;records=3635000} evicted {blob_bytes=0;raw_bytes=0;count=0;records=0} at tablet 9437184 2025-06-25T15:06:30.772416Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:4102:6070];process=SwitchToWork;fline=columnshard.cpp:74;event=initialize_shard;step=SwitchToWork; 2025-06-25T15:06:30.772481Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:4102:6070];process=SwitchToWork;fline=columnshard.cpp:77;event=initialize_shard;step=SignalTabletActive; 2025-06-25T15:06:30.772577Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4102:6070];process=SwitchToWork;fline=columnshard_impl.cpp:1331;event=OnTieringModified;path_id=NO_VALUE_OPTIONAL; 2025-06-25T15:06:30.772634Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4102:6070];process=SwitchToWork;fline=column_engine_logs.cpp:471;event=OnTieringModified;new_count_tierings=0; 2025-06-25T15:06:30.772801Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:442;event=EnqueueBackgroundActivities;periodic=0; 2025-06-25T15:06:30.772889Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=23; 2025-06-25T15:06:30.772987Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750863624461;tx_id=18446744073709551615;;current_snapshot_ts=1750863921380; 2025-06-25T15:06:30.773045Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=23;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-25T15:06:30.773105Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:791;background=cleanup;skip_reason=no_changes; 2025-06-25T15:06:30.773152Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:820;background=cleanup;skip_reason=no_changes; 2025-06-25T15:06:30.773271Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:749;background=ttl;skip_reason=no_changes; 2025-06-25T15:06:30.778073Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4102:6070];ev=NActors::TEvents::TEvWakeup;fline=columnshard.cpp:250;event=TEvPrivate::TEvPeriodicWakeup::MANUAL;tablet_id=9437184; 2025-06-25T15:06:30.778451Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4102:6070];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;fline=columnshard.cpp:239;event=TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184; 2025-06-25T15:06:30.778503Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Send periodic stats. 2025-06-25T15:06:30.778541Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Disabled periodic stats at tablet 9437184 2025-06-25T15:06:30.778605Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4102:6070];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:442;event=EnqueueBackgroundActivities;periodic=0; 2025-06-25T15:06:30.778714Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4102:6070];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=23; 2025-06-25T15:06:30.778804Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4102:6070];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750863624461;tx_id=18446744073709551615;;current_snapshot_ts=1750863921380; 2025-06-25T15:06:30.778875Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4102:6070];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=23;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-25T15:06:30.778958Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4102:6070];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:791;background=cleanup;skip_reason=no_changes; 2025-06-25T15:06:30.779012Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4102:6070];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:820;background=cleanup;skip_reason=no_changes; 2025-06-25T15:06:30.779118Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4102:6070];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;queue=ttl;external_count=0;fline=granule.cpp:168;event=skip_actualization;waiting=0.999000s; 2025-06-25T15:06:30.779208Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4102:6070];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:749;background=ttl;skip_reason=no_changes; |94.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> THiveTest::TestSpreadNeighboursDifferentOwners [GOOD] >> THiveTest::TestUpdateTabletsObjectUpdatesMetrics >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_validates_message_attribute_value[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_validates_message_attribute_value[tables_format_v1] |94.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest >> AnalyzeColumnshard::AnalyzeRebootSaBeforeResolve [GOOD] Test command err: 2025-06-25T15:03:58.676150Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:419:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T15:03:58.676502Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T15:03:58.676600Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001f02/r3tmp/tmphT93c9/pdisk_1.dat 2025-06-25T15:03:58.966959Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 13875, node 1 2025-06-25T15:03:59.167316Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:03:59.167353Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:03:59.167375Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:03:59.167775Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T15:03:59.169765Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:03:59.262485Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:03:59.262629Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:03:59.277348Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:16379 2025-06-25T15:03:59.792532Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-25T15:04:02.419982Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-25T15:04:02.443317Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:04:02.443395Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:04:02.479963Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T15:04:02.481664Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:04:02.676648Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:04:02.711596Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:04:02.712095Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:04:02.712599Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:04:02.712756Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:04:02.712929Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:04:02.713010Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:04:02.713131Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:04:02.713195Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:04:02.713250Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:04:02.895824Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:04:02.895955Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:04:02.931104Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:04:03.033334Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:04:03.067671Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-25T15:04:03.067763Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-25T15:04:03.094179Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-25T15:04:03.094329Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-25T15:04:03.094507Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-25T15:04:03.094552Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-25T15:04:03.094603Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-25T15:04:03.094654Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-25T15:04:03.094710Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-25T15:04:03.094749Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-25T15:04:03.095168Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-25T15:04:03.115349Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7949: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-25T15:04:03.115439Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7979: ConnectToSA(), pipe client id: [2:1793:2562], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-25T15:04:03.123036Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1808:2573] 2025-06-25T15:04:03.128201Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1849:2589] 2025-06-25T15:04:03.128446Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1849:2589], schemeshard id = 72075186224037897 2025-06-25T15:04:03.135276Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-25T15:04:03.147683Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-25T15:04:03.147736Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-25T15:04:03.147794Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-25T15:04:03.156144Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:04:03.161152Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-25T15:04:03.161237Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-25T15:04:03.327607Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-25T15:04:03.445227Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-25T15:04:03.543881Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-25T15:04:04.026666Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:04:04.255789Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2153:3026], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:04:04.255931Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:04:04.269492Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715659:0, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T15:04:04.366793Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2224:2794];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T15:04:04.367082Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2224:2794];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T15:04:04.367377Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2224:2794];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T15:04:04.367508Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2224:2794];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T15:04:04.367626Z node 2 :TX_COLUMNSHARD WARN: ... rsalOperations: table count# 1 2025-06-25T15:06:28.007720Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 1 2025-06-25T15:06:28.007801Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-25T15:06:28.007955Z node 2 :STATISTICS DEBUG: tx_init.cpp:295: [72075186224037894] TTxInit::Complete. Start navigate. PathId [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-25T15:06:28.008994Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-25T15:06:28.009561Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-25T15:06:28.009695Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-25T15:06:28.009955Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing ... blocking NKikimr::TEvTxProxySchemeCache::TEvResolveKeySetResult from to STATISTICS_AGGREGATOR cookie 0 2025-06-25T15:06:28.086253Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:7437:5447] 2025-06-25T15:06:28.086503Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:7437:5447], schemeshard id = 72075186224037897 2025-06-25T15:06:28.086602Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:7391:5418], server id = [2:7438:5448], tablet id = 72075186224037894, status = OK 2025-06-25T15:06:28.086651Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:7438:5448] 2025-06-25T15:06:28.086741Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:133: [72075186224037894] EvConnectNode, pipe server id = [2:7438:5448], node id = 2, have schemeshards count = 1, need schemeshards count = 0 ... unblocking NKikimr::TEvTxProxySchemeCache::TEvResolveKeySetResult from to ... unblocking NKikimr::TEvTxProxySchemeCache::TEvResolveKeySetResult from to STATISTICS_AGGREGATOR 2025-06-25T15:06:28.176807Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-25T15:06:28.176916Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-25T15:06:28.177903Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:7449:5451] 2025-06-25T15:06:28.178369Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:58: [72075186224037894] TTxResponseTabletDistribution::Execute. Node count = 1 2025-06-25T15:06:28.179007Z node 2 :STATISTICS DEBUG: tx_analyze.cpp:22: [72075186224037894] TTxAnalyze::Execute. ReplyToActorId [1:3049:3301] , Record { OperationId: "operationId" Tables { PathId { OwnerId: 72075186224037897 LocalId: 4 } } Types: TYPE_COUNT_MIN_SKETCH } 2025-06-25T15:06:28.179062Z node 2 :STATISTICS DEBUG: tx_analyze.cpp:38: [72075186224037894] TTxAnalyze::Execute. Update existing force traversal. OperationId operationId , ReplyToActorId [1:3049:3301] 2025-06-25T15:06:28.192718Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:92: [72075186224037894] TTxResponseTabletDistribution::Complete 2025-06-25T15:06:28.192834Z node 2 :STATISTICS DEBUG: tx_analyze.cpp:97: [72075186224037894] TTxAnalyze::Complete 2025-06-25T15:06:28.193017Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 2, current Round: 0 2025-06-25T15:06:28.193579Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:7452:5454], server id = [2:7453:5455], tablet id = 72075186224037899, status = OK 2025-06-25T15:06:28.193690Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:7452:5454], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-25T15:06:28.197019Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037899 2025-06-25T15:06:28.197135Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-25T15:06:28.197319Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:27: [72075186224037894] TTxAggregateStatisticsResponse::Execute 2025-06-25T15:06:28.197484Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:118: [72075186224037894] TTxAggregateStatisticsResponse::Complete 2025-06-25T15:06:28.197732Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-25T15:06:28.198046Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:7452:5454], server id = [2:7453:5455], tablet id = 72075186224037899 2025-06-25T15:06:28.198090Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-25T15:06:28.201280Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-06-25T15:06:28.240524Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [2:7473:5474]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T15:06:28.240733Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-25T15:06:28.240785Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 2, ReplyToActorId = [2:7473:5474], StatRequests.size() = 1 2025-06-25T15:06:28.361431Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=OTMyZTJmZmItODUwMmI2YjYtM2U2NjdiNTUtZWEzZDNhYTA=, TxId: 2025-06-25T15:06:28.361515Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=OTMyZTJmZmItODUwMmI2YjYtM2U2NjdiNTUtZWEzZDNhYTA=, TxId: 2025-06-25T15:06:28.362124Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-25T15:06:28.376302Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-25T15:06:28.376396Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-25T15:06:28.938963Z node 2 :STATISTICS DEBUG: service_impl.cpp:252: Event round 2 is different from the current 0 2025-06-25T15:06:28.939071Z node 2 :STATISTICS DEBUG: service_impl.cpp:379: Skip TEvDispatchKeepAlive 2025-06-25T15:06:29.661351Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:626: [72075186224037894] ScheduleNextAnalyze 2025-06-25T15:06:29.661441Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:652: [72075186224037894] ScheduleNextAnalyze. All the force traversal tables sent the requests. OperationId=operationId 2025-06-25T15:06:29.661488Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:656: [72075186224037894] ScheduleNextAnalyze. All the force traversal operations sent the requests. 2025-06-25T15:06:30.962293Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-25T15:06:30.962449Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 4] is column table. 2025-06-25T15:06:30.962506Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:732: [72075186224037894] Start force traversal navigate for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-25T15:06:30.963249Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-25T15:06:30.985901Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-25T15:06:30.986347Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-25T15:06:30.986414Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-25T15:06:30.986845Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:58: [72075186224037894] TTxResponseTabletDistribution::Execute. Node count = 1 2025-06-25T15:06:31.005558Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:92: [72075186224037894] TTxResponseTabletDistribution::Complete 2025-06-25T15:06:31.005781Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 3, current Round: 0 2025-06-25T15:06:31.006308Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:7564:5523], server id = [2:7565:5524], tablet id = 72075186224037899, status = OK 2025-06-25T15:06:31.006405Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:7564:5523], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-25T15:06:31.007666Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037899 2025-06-25T15:06:31.007767Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-25T15:06:31.007918Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:27: [72075186224037894] TTxAggregateStatisticsResponse::Execute 2025-06-25T15:06:31.008090Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:118: [72075186224037894] TTxAggregateStatisticsResponse::Complete 2025-06-25T15:06:31.008491Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-25T15:06:31.010954Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:7564:5523], server id = [2:7565:5524], tablet id = 72075186224037899 2025-06-25T15:06:31.010993Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-25T15:06:31.011487Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-06-25T15:06:31.035607Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=YTVlOTM5MWUtMTRhNTUzZjYtNTFiYmZiYmQtNjk3ZTg1ZDg=, TxId: 2025-06-25T15:06:31.035677Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=YTVlOTM5MWUtMTRhNTUzZjYtNTFiYmZiYmQtNjk3ZTg1ZDg=, TxId: 2025-06-25T15:06:31.036212Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-25T15:06:31.050922Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete force traversal for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-25T15:06:31.051004Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:50: [72075186224037894] TTxFinishTraversal::Complete. Send TEvAnalyzeResponse, OperationId=operationId, ActorId=[1:3049:3301] >> test_quoting.py::TestSqsQuotingWithKesus::test_properly_creates_and_deletes_queue[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_validates_message_body[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_validates_message_body[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_validates_message_attribute_value[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_validates_message_attributes[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_validates_message_body[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_visibility_timeout_expires_on_wait_timeout[tables_format_v0] >> THiveTest::TestUpdateTabletsObjectUpdatesMetrics [GOOD] >> THiveTest::TestRestartTablets >> THiveTest::TestHiveBalancerWithSystemTablets [GOOD] >> THiveTest::TestHiveNoBalancingWithLowResourceUsage >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_message_batch[tables_format_v1-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_validates_message_attributes[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_validates_message_attributes[tables_format_v1] >> THiveTest::TestFollowersCrossDC_Easy [GOOD] >> THiveTest::TestFollowers_LocalNodeOnly >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_deduplication[tables_format_v1-by_deduplication_id] >> TRegisterCheckTest::ShouldRegisterCheckNewGenerationAndTransact |94.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_queue_attributes[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_queue_attributes[tables_format_v1-std] >> THiveTest::TestRestartTablets [GOOD] >> THiveTest::TestServerlessComputeResourcesMode >> AnalyzeColumnshard::AnalyzeServerless [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_queue_attributes[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_queue_attributes_batch[tables_format_v0] >> TraverseColumnShard::TraverseColumnTableHiveDistributionZeroNodes [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_set_very_big_visibility_timeout[tables_format_v1] >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_does_not_create_kesus >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_validates_message_attributes[tables_format_v1] [GOOD] |94.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_visibility_timeout_works[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_wrong_attribute_name[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_multi_read_dont_stall[tables_format_v1] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest >> AnalyzeColumnshard::AnalyzeServerless [GOOD] Test command err: 2025-06-25T15:03:47.557950Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:419:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T15:03:47.558339Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T15:03:47.558457Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001f89/r3tmp/tmps7KDMg/pdisk_1.dat 2025-06-25T15:03:47.954315Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 16449, node 1 2025-06-25T15:03:48.195159Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:03:48.195226Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:03:48.195255Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:03:48.195809Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T15:03:48.198252Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:03:48.294003Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:03:48.294136Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:03:48.308637Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:13742 2025-06-25T15:03:48.869598Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-25T15:03:51.979300Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-25T15:03:52.013615Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:03:52.013709Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:03:52.051498Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T15:03:52.053569Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:03:52.267373Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:03:52.302395Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:52.302919Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:52.303443Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:52.303554Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:52.303761Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:52.303849Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:52.303917Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:52.303981Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:52.304062Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:52.486526Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:03:52.486623Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:03:52.499550Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:03:52.645511Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:03:52.687227Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-25T15:03:52.687318Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-25T15:03:52.719319Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-25T15:03:52.719495Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-25T15:03:52.719697Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-25T15:03:52.719816Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-25T15:03:52.719873Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-25T15:03:52.719923Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-25T15:03:52.719970Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-25T15:03:52.720019Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-25T15:03:52.720583Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-25T15:03:52.743487Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7949: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-25T15:03:52.743636Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7979: ConnectToSA(), pipe client id: [2:1793:2562], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-25T15:03:52.752437Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1808:2573] 2025-06-25T15:03:52.759088Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1849:2589] 2025-06-25T15:03:52.759348Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1849:2589], schemeshard id = 72075186224037897 2025-06-25T15:03:52.768015Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Shared 2025-06-25T15:03:52.794627Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-25T15:03:52.794685Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-25T15:03:52.794738Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Shared/.metadata/_statistics 2025-06-25T15:03:52.806147Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:03:52.818493Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-25T15:03:52.818643Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-25T15:03:53.025159Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-25T15:03:53.153125Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-25T15:03:53.252611Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-25T15:03:53.755367Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:03:53.786931Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-25T15:03:54.349702Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:03:54.472264Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7894: Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult, at schemeshard: 72075186224037899 2025-06-25T15:03:54.472346Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7910: Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult, StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037899 2025-06-25T15:03:54.472453Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7979: ConnectToSA(), pipe client id: [2:2503:2903], at schemeshard: 72075186224037899, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037899 2025-06-25T15:03:54.475669Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:2506:2905] 2025-06-25T15:03:54.475776Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:2506:2905], schemeshard id = 72075186224037899 2025-06-25T15:03:55.587495Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2630:3198], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:03:55.587637Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06 ... 6-25T15:06:31.438297Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:8280:6072], DatabaseId: /Root/Shared, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:06:31.438422Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:8290:6077], DatabaseId: /Root/Shared, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:06:31.439103Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root/Shared, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:06:31.458320Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976720658:2, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:06:31.520790Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:8294:6080], DatabaseId: /Root/Shared, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976720658 completed, doublechecking } 2025-06-25T15:06:31.790162Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:8394:6129] txid# 281474976720659, issues: { message: "Check failed: path: \'/Root/Shared/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72075186224037897, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:06:31.868705Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [2:8423:6144]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T15:06:31.869005Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-25T15:06:31.869100Z node 2 :STATISTICS DEBUG: service_impl.cpp:1219: ConnectToSA(), pipe client id = [2:8425:6146] 2025-06-25T15:06:31.869183Z node 2 :STATISTICS DEBUG: service_impl.cpp:1248: SyncNode(), pipe client id = [2:8425:6146] 2025-06-25T15:06:31.869754Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8425:6146], server id = [2:8426:6147], tablet id = 72075186224037894, status = OK 2025-06-25T15:06:31.869836Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:8426:6147] 2025-06-25T15:06:31.869967Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:133: [72075186224037894] EvConnectNode, pipe server id = [2:8426:6147], node id = 2, have schemeshards count = 0, need schemeshards count = 1 2025-06-25T15:06:31.870060Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:314: [72075186224037894] SendStatisticsToNode(), node id = 2, schemeshard count = 1 2025-06-25T15:06:31.870251Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-25T15:06:31.870352Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 1, ReplyToActorId = [2:8423:6144], StatRequests.size() = 1 2025-06-25T15:06:32.042591Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=ZmVlMmI1NGMtYjE1MTBlYWUtNzI2MjdkM2QtYTRjMGYxNDI=, TxId: 2025-06-25T15:06:32.042657Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=ZmVlMmI1NGMtYjE1MTBlYWUtNzI2MjdkM2QtYTRjMGYxNDI=, TxId: 2025-06-25T15:06:32.043157Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-25T15:06:32.061898Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 3] 2025-06-25T15:06:32.061980Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-25T15:06:32.136111Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:217: [72075186224037894] EvFastPropagateCheck 2025-06-25T15:06:32.136226Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:357: [72075186224037894] PropagateFastStatistics(), node count = 0, schemeshard count = 0 2025-06-25T15:06:32.208585Z node 2 :STATISTICS DEBUG: service_impl.cpp:1189: EvRequestTimeout, pipe client id = [2:8425:6146], schemeshard count = 1 2025-06-25T15:06:32.511341Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:8076: SendBaseStatsToSA(), path count: 1, at schemeshard: 72075186224037899 2025-06-25T15:06:32.511429Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7919: Schedule next SendBaseStatsToSA in 186.000000s, at schemeshard: 72075186224037899 2025-06-25T15:06:32.511684Z node 2 :STATISTICS DEBUG: tx_schemeshard_stats.cpp:21: [72075186224037894] TTxSchemeShardStats::Execute: schemeshard id# 72075186224037899, stats size# 28 2025-06-25T15:06:32.530516Z node 2 :STATISTICS DEBUG: tx_schemeshard_stats.cpp:132: [72075186224037894] TTxSchemeShardStats::Complete 2025-06-25T15:06:33.521993Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:626: [72075186224037894] ScheduleNextAnalyze 2025-06-25T15:06:33.522122Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037899, LocalPathId: 2] is column table. 2025-06-25T15:06:33.526654Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-25T15:06:33.546144Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-25T15:06:33.546885Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-25T15:06:33.546956Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:51: [72075186224037894] TTxResolve::ExecuteAnalyze. Table OperationId operationId, PathId [OwnerId: 72075186224037899, LocalPathId: 2], AnalyzedShards 1 2025-06-25T15:06:33.565874Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-25T15:06:33.599200Z node 2 :STATISTICS DEBUG: tx_analyze_table_request.cpp:56: [72075186224037894] TTxAnalyzeTableRequest::Complete. Send 1 events. 2025-06-25T15:06:33.600667Z node 2 :STATISTICS DEBUG: tx_analyze_table_response.cpp:21: [72075186224037894] TTxAnalyzeTableResponse::Execute 2025-06-25T15:06:33.600786Z node 2 :STATISTICS DEBUG: tx_analyze_table_response.cpp:52: [72075186224037894] TTxAnalyzeTableResponse::Execute. All shards are analyzed 2025-06-25T15:06:33.615000Z node 2 :STATISTICS DEBUG: tx_analyze_table_response.cpp:57: [72075186224037894] TTxAnalyzeTableResponse::Complete. 2025-06-25T15:06:35.065146Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-25T15:06:35.065310Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037899, LocalPathId: 2] is column table. 2025-06-25T15:06:35.065361Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:732: [72075186224037894] Start force traversal navigate for path [OwnerId: 72075186224037899, LocalPathId: 2] 2025-06-25T15:06:35.066258Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-25T15:06:35.085840Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-25T15:06:35.086347Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-25T15:06:35.086443Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-25T15:06:35.087532Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:58: [72075186224037894] TTxResponseTabletDistribution::Execute. Node count = 1 2025-06-25T15:06:35.109651Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:92: [72075186224037894] TTxResponseTabletDistribution::Complete 2025-06-25T15:06:35.109839Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 2, current Round: 0 2025-06-25T15:06:35.110273Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8554:6228], server id = [2:8555:6229], tablet id = 72075186224037905, status = OK 2025-06-25T15:06:35.110361Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8554:6228], path = { OwnerId: 72075186224037899 LocalId: 2 } 2025-06-25T15:06:35.116673Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037905 2025-06-25T15:06:35.116829Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-25T15:06:35.117077Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:27: [72075186224037894] TTxAggregateStatisticsResponse::Execute 2025-06-25T15:06:35.117316Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:118: [72075186224037894] TTxAggregateStatisticsResponse::Complete 2025-06-25T15:06:35.117884Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Shared 2025-06-25T15:06:35.120669Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8554:6228], server id = [2:8555:6229], tablet id = 72075186224037905 2025-06-25T15:06:35.120733Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-25T15:06:35.121551Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-06-25T15:06:35.157659Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [2:8575:6248]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T15:06:35.157932Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-25T15:06:35.157981Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 2, ReplyToActorId = [2:8575:6248], StatRequests.size() = 1 2025-06-25T15:06:35.274168Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=N2VhNWNlNDEtYzY5YjMxZTktZTkwZmVlYzctMmU2N2E5ZTc=, TxId: 2025-06-25T15:06:35.274240Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=N2VhNWNlNDEtYzY5YjMxZTktZTkwZmVlYzctMmU2N2E5ZTc=, TxId: 2025-06-25T15:06:35.274893Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-25T15:06:35.289471Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete force traversal for path [OwnerId: 72075186224037899, LocalPathId: 2] 2025-06-25T15:06:35.289541Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:50: [72075186224037894] TTxFinishTraversal::Complete. Send TEvAnalyzeResponse, OperationId=operationId, ActorId=[1:3555:3471] >> TRegisterCheckTest::ShouldRegisterCheckNewGenerationAndTransact [GOOD] >> TColumnShardTestReadWrite::CompactionInGranule_PKInt64_Reboot [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest >> TraverseColumnShard::TraverseColumnTableHiveDistributionZeroNodes [GOOD] Test command err: 2025-06-25T15:04:00.892052Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:419:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T15:04:00.892382Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T15:04:00.892482Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001ed8/r3tmp/tmpItQClF/pdisk_1.dat 2025-06-25T15:04:01.158608Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 30144, node 1 2025-06-25T15:04:01.360003Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:04:01.360056Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:04:01.360083Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:04:01.360663Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T15:04:01.365903Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:04:01.456206Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:04:01.456353Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:04:01.470499Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:8614 2025-06-25T15:04:01.955182Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-25T15:04:04.506712Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-25T15:04:04.531456Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:04:04.531581Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:04:04.568679Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T15:04:04.570356Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:04:04.756920Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:04:04.791023Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:04:04.791648Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:04:04.792184Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:04:04.792405Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:04:04.792638Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:04:04.792733Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:04:04.792827Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:04:04.792895Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:04:04.792956Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:04:04.974314Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:04:04.974419Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:04:04.987088Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:04:05.102737Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:04:05.144470Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-25T15:04:05.144572Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-25T15:04:05.174474Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-25T15:04:05.174627Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-25T15:04:05.174765Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-25T15:04:05.174806Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-25T15:04:05.174852Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-25T15:04:05.174886Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-25T15:04:05.174924Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-25T15:04:05.174957Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-25T15:04:05.175328Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-25T15:04:05.194785Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7949: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-25T15:04:05.194864Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7979: ConnectToSA(), pipe client id: [2:1793:2562], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-25T15:04:05.201308Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1808:2573] 2025-06-25T15:04:05.206292Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1849:2589] 2025-06-25T15:04:05.206492Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1849:2589], schemeshard id = 72075186224037897 2025-06-25T15:04:05.213090Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-25T15:04:05.229242Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-25T15:04:05.229336Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-25T15:04:05.229436Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-25T15:04:05.241090Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:04:05.252262Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-25T15:04:05.252426Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-25T15:04:05.434880Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-25T15:04:05.564383Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-25T15:04:05.621909Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-25T15:04:06.151465Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:04:06.403968Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2153:3026], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:04:06.404163Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:04:06.429248Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715659:0, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T15:04:06.583142Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2245:2806];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T15:04:06.583325Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2245:2806];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T15:04:06.583560Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2245:2806];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T15:04:06.583655Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2245:2806];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T15:04:06.583747Z node 2 :TX_COLUMNSHARD WARN: ... iled: path: \'/Root/Database/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72075186224037897, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:06:31.551664Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [2:8260:6111]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T15:06:31.551858Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-25T15:06:31.551920Z node 2 :STATISTICS DEBUG: service_impl.cpp:1219: ConnectToSA(), pipe client id = [2:8262:6113] 2025-06-25T15:06:31.551976Z node 2 :STATISTICS DEBUG: service_impl.cpp:1248: SyncNode(), pipe client id = [2:8262:6113] 2025-06-25T15:06:31.552250Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:8263:6114] 2025-06-25T15:06:31.556709Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8262:6113], server id = [2:8263:6114], tablet id = 72075186224037894, status = OK 2025-06-25T15:06:31.556899Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:133: [72075186224037894] EvConnectNode, pipe server id = [2:8263:6114], node id = 2, have schemeshards count = 0, need schemeshards count = 1 2025-06-25T15:06:31.556980Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:314: [72075186224037894] SendStatisticsToNode(), node id = 2, schemeshard count = 1 2025-06-25T15:06:31.557147Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-25T15:06:31.557254Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 1, ReplyToActorId = [2:8260:6111], StatRequests.size() = 1 2025-06-25T15:06:31.721097Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=OWZkZjAxYTAtYjFiNDA0Ni02YmJkZDE3MC00ZGM0ZWRkNQ==, TxId: 2025-06-25T15:06:31.721173Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=OWZkZjAxYTAtYjFiNDA0Ni02YmJkZDE3MC00ZGM0ZWRkNQ==, TxId: 2025-06-25T15:06:31.721734Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-25T15:06:31.747792Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 3] 2025-06-25T15:06:31.747869Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-25T15:06:31.785017Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:217: [72075186224037894] EvFastPropagateCheck 2025-06-25T15:06:31.785108Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:357: [72075186224037894] PropagateFastStatistics(), node count = 0, schemeshard count = 0 2025-06-25T15:06:31.869947Z node 2 :STATISTICS DEBUG: service_impl.cpp:1189: EvRequestTimeout, pipe client id = [2:8262:6113], schemeshard count = 1 2025-06-25T15:06:34.366035Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-25T15:06:34.366093Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-25T15:06:34.366128Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 4] is column table. 2025-06-25T15:06:34.366169Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:732: [72075186224037894] Start schedule traversal navigate for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-25T15:06:34.369860Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-25T15:06:34.393306Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-25T15:06:34.393894Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-25T15:06:34.393976Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-25T15:06:34.394825Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:58: [72075186224037894] TTxResponseTabletDistribution::Execute. Node count = 2 2025-06-25T15:06:34.394893Z node 2 :STATISTICS WARN: tx_response_tablet_distribution.cpp:65: [72075186224037894] TTxResponseTabletDistribution::Execute. Some tablets are probably in Hive boot queue 2025-06-25T15:06:34.394942Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:92: [72075186224037894] TTxResponseTabletDistribution::Complete 2025-06-25T15:06:35.729581Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:58: [72075186224037894] TTxResponseTabletDistribution::Execute. Node count = 1 2025-06-25T15:06:35.750616Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:92: [72075186224037894] TTxResponseTabletDistribution::Complete 2025-06-25T15:06:35.750863Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 2, current Round: 0 2025-06-25T15:06:35.751563Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8416:6189], server id = [2:8420:6193], tablet id = 72075186224037899, status = OK 2025-06-25T15:06:35.751924Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8416:6189], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-25T15:06:35.753219Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8417:6190], server id = [2:8421:6194], tablet id = 72075186224037900, status = OK 2025-06-25T15:06:35.753297Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8417:6190], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-25T15:06:35.753670Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8418:6191], server id = [2:8422:6195], tablet id = 72075186224037901, status = OK 2025-06-25T15:06:35.753727Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8418:6191], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-25T15:06:35.753845Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8419:6192], server id = [2:8423:6196], tablet id = 72075186224037902, status = OK 2025-06-25T15:06:35.753886Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8419:6192], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-25T15:06:35.763857Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037899 2025-06-25T15:06:35.764377Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8416:6189], server id = [2:8420:6193], tablet id = 72075186224037899 2025-06-25T15:06:35.764433Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-25T15:06:35.764957Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037900 2025-06-25T15:06:35.765335Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8417:6190], server id = [2:8421:6194], tablet id = 72075186224037900 2025-06-25T15:06:35.765368Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-25T15:06:35.766331Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037902 2025-06-25T15:06:35.766811Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8419:6192], server id = [2:8423:6196], tablet id = 72075186224037902 2025-06-25T15:06:35.766853Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-25T15:06:35.766950Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037901 2025-06-25T15:06:35.766993Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-25T15:06:35.767162Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:27: [72075186224037894] TTxAggregateStatisticsResponse::Execute 2025-06-25T15:06:35.767378Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:118: [72075186224037894] TTxAggregateStatisticsResponse::Complete 2025-06-25T15:06:35.767713Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-25T15:06:35.769967Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8418:6191], server id = [2:8422:6195], tablet id = 72075186224037901 2025-06-25T15:06:35.769997Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-25T15:06:35.770774Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-06-25T15:06:35.803931Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [2:8452:6221]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T15:06:35.804141Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-25T15:06:35.804181Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 2, ReplyToActorId = [2:8452:6221], StatRequests.size() = 1 2025-06-25T15:06:35.925306Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=ZmVhY2I2NmMtZjZkZDNmM2UtOTcyMDE5ZDItOGMxZDgxNGI=, TxId: 2025-06-25T15:06:35.925369Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=ZmVhY2I2NmMtZjZkZDNmM2UtOTcyMDE5ZDItOGMxZDgxNGI=, TxId: ... waiting for NKikimr::NStat::TEvStatistics::TEvSaveStatisticsQueryResponse (done) 2025-06-25T15:06:35.925966Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 3 ], ReplyToActorId[ [2:8466:6227]], StatType[ 2 ], StatRequestsCount[ 1 ] 2025-06-25T15:06:35.926108Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-25T15:06:35.927327Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] 2025-06-25T15:06:35.927377Z node 2 :STATISTICS DEBUG: service_impl.cpp:812: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] resolve DatabasePath[ [OwnerId: 72057594046644480, LocalPathId: 2] ] 2025-06-25T15:06:35.930038Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] 2025-06-25T15:06:35.930088Z node 2 :STATISTICS DEBUG: service_impl.cpp:715: [TStatService::QueryStatistics] RequestId[ 3 ], Database[ Root/Database ], TablePath[ /Root/Database/.metadata/_statistics ] 2025-06-25T15:06:35.930136Z node 2 :STATISTICS DEBUG: service_impl.cpp:656: [TStatService::LoadStatistics] QueryId[ 1 ], PathId[ [OwnerId: 72075186224037897, LocalPathId: 4] ], StatType[ 2 ], ColumnTag[ 1 ] 2025-06-25T15:06:35.934299Z node 2 :STATISTICS DEBUG: service_impl.cpp:1152: TEvLoadStatisticsQueryResponse, request id = 3 >>> failedEstimatesCount = 0 >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delete_message_batch_deduplicates_receipt_handle[tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delete_message_batch_deduplicates_receipt_handle[tables_format_v1-fifo] >> test_generic_messaging.py::TestYandexAttributesPrefix::test_allows_yandex_message_attribute_prefix[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_wrong_attribute_name[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_wrong_attribute_name[tables_format_v1] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::CompactionInGranule_PKInt64_Reboot [GOOD] Test command err: 2025-06-25T15:04:59.829666Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:99;event=initialize_shard;step=OnActivateExecutor; 2025-06-25T15:04:59.846462Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:117;event=initialize_shard;step=initialize_tiring_finished; 2025-06-25T15:04:59.846630Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-25T15:04:59.851316Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T15:04:59.851456Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T15:04:59.851612Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T15:04:59.851675Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T15:04:59.851730Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T15:04:59.851785Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T15:04:59.851864Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T15:04:59.851932Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T15:04:59.851999Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T15:04:59.852063Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T15:04:59.852114Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T15:04:59.869069Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-25T15:04:59.869187Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-25T15:04:59.869218Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-25T15:04:59.869352Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:04:59.869513Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-25T15:04:59.869611Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-25T15:04:59.869658Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-25T15:04:59.869758Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-25T15:04:59.869831Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-25T15:04:59.869874Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-25T15:04:59.869928Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-25T15:04:59.870095Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:04:59.870171Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-25T15:04:59.870200Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-25T15:04:59.870218Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-25T15:04:59.870284Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-25T15:04:59.870326Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-25T15:04:59.870360Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-25T15:04:59.870378Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-25T15:04:59.870405Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-25T15:04:59.870424Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-25T15:04:59.870441Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-25T15:04:59.870567Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-25T15:04:59.870591Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-25T15:04:59.870612Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-25T15:04:59.870720Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-25T15:04:59.870759Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-25T15:04:59.870776Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-25T15:04:59.870875Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-25T15:04:59.870901Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-25T15:04:59.870917Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-25T15:04:59.870959Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-25T15:04:59.871004Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-25T15:04:59.871028Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-25T15:04:59.871046Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-25T15:04:59.871186Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=32; 2025-06-25T15:04:59.871258Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=37; 2025-06-25T15:04:59.871314Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=30; 2025-06-25T15:04:59.871371Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=27; 2025-06-25T15:04:59.871427Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-25T15:04:59.871488Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-25T15:04:59.871532Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-25T15:04:59.871577Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: table ... fline=constructor_meta.cpp:71;memory_size=26214;data_size=26188;sum=13173016;count=14328;size_of_meta=136; 2025-06-25T15:06:36.242596Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;load_stage_name=EXECUTE:granule/portions;fline=constructor_portion.cpp:40;memory_size=26286;data_size=26260;sum=13688824;count=7164;size_of_portion=208; 2025-06-25T15:06:36.243672Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;EXECUTE:portionsLoadingTime=383686; 2025-06-25T15:06:36.243756Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;PRECHARGE:granule_finished_commonLoadingTime=13; 2025-06-25T15:06:36.245933Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;EXECUTE:granule_finished_commonLoadingTime=2118; 2025-06-25T15:06:36.245986Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;fline=common_data.cpp:29;EXECUTE:granuleLoadingTime=386161; 2025-06-25T15:06:36.246028Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:granulesLoadingTime=386289; 2025-06-25T15:06:36.246090Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;PRECHARGE:finishLoadingTime=11; 2025-06-25T15:06:36.247206Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:finishLoadingTime=1066; 2025-06-25T15:06:36.247254Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:column_enginesLoadingTime=387982; 2025-06-25T15:06:36.247416Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tx_controllerLoadingTime=113; 2025-06-25T15:06:36.247525Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tx_controllerLoadingTime=64; 2025-06-25T15:06:36.247884Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:operations_managerLoadingTime=315; 2025-06-25T15:06:36.248174Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:operations_managerLoadingTime=245; 2025-06-25T15:06:36.279625Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:storages_managerLoadingTime=31383; 2025-06-25T15:06:36.309847Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:storages_managerLoadingTime=30122; 2025-06-25T15:06:36.309953Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:db_locksLoadingTime=12; 2025-06-25T15:06:36.310006Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:db_locksLoadingTime=10; 2025-06-25T15:06:36.310046Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:bg_sessionsLoadingTime=6; 2025-06-25T15:06:36.310109Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:bg_sessionsLoadingTime=34; 2025-06-25T15:06:36.310149Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:sharing_sessionsLoadingTime=5; 2025-06-25T15:06:36.310215Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:sharing_sessionsLoadingTime=40; 2025-06-25T15:06:36.310246Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:in_flight_readsLoadingTime=4; 2025-06-25T15:06:36.310289Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:in_flight_readsLoadingTime=22; 2025-06-25T15:06:36.310361Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tiers_managerLoadingTime=42; 2025-06-25T15:06:36.310436Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tiers_managerLoadingTime=43; 2025-06-25T15:06:36.310464Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;fline=common_data.cpp:29;EXECUTE:composite_initLoadingTime=458475; 2025-06-25T15:06:36.310583Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Index: tables 1 inserted {blob_bytes=0;raw_bytes=0;count=0;records=0} compacted {blob_bytes=0;raw_bytes=0;count=0;records=0} s-compacted {blob_bytes=22538992;raw_bytes=22128150;count=3;records=225200} inactive {blob_bytes=147791880;raw_bytes=143975050;count=221;records=1575200} evicted {blob_bytes=0;raw_bytes=0;count=0;records=0} at tablet 9437184 2025-06-25T15:06:36.310693Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:10283:11876];process=SwitchToWork;fline=columnshard.cpp:74;event=initialize_shard;step=SwitchToWork; 2025-06-25T15:06:36.310734Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:10283:11876];process=SwitchToWork;fline=columnshard.cpp:77;event=initialize_shard;step=SignalTabletActive; 2025-06-25T15:06:36.310787Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10283:11876];process=SwitchToWork;fline=columnshard_impl.cpp:1331;event=OnTieringModified;path_id=NO_VALUE_OPTIONAL; 2025-06-25T15:06:36.310819Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10283:11876];process=SwitchToWork;fline=column_engine_logs.cpp:471;event=OnTieringModified;new_count_tierings=0; 2025-06-25T15:06:36.310971Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:442;event=EnqueueBackgroundActivities;periodic=0; 2025-06-25T15:06:36.311027Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=9; 2025-06-25T15:06:36.311077Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750863605434;tx_id=18446744073709551615;;current_snapshot_ts=1750863901637; 2025-06-25T15:06:36.311106Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=9;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-25T15:06:36.311143Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:791;background=cleanup;skip_reason=no_changes; 2025-06-25T15:06:36.311176Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:820;background=cleanup;skip_reason=no_changes; 2025-06-25T15:06:36.311248Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:749;background=ttl;skip_reason=no_changes; 2025-06-25T15:06:36.316841Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10283:11876];ev=NActors::TEvents::TEvWakeup;fline=columnshard.cpp:250;event=TEvPrivate::TEvPeriodicWakeup::MANUAL;tablet_id=9437184; 2025-06-25T15:06:36.317479Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10283:11876];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;fline=columnshard.cpp:239;event=TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184; 2025-06-25T15:06:36.317527Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Send periodic stats. 2025-06-25T15:06:36.317554Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Disabled periodic stats at tablet 9437184 2025-06-25T15:06:36.317599Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10283:11876];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:442;event=EnqueueBackgroundActivities;periodic=0; 2025-06-25T15:06:36.317681Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10283:11876];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=9; 2025-06-25T15:06:36.317742Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10283:11876];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750863605434;tx_id=18446744073709551615;;current_snapshot_ts=1750863901637; 2025-06-25T15:06:36.317787Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10283:11876];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=9;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-25T15:06:36.317834Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10283:11876];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:791;background=cleanup;skip_reason=no_changes; 2025-06-25T15:06:36.317869Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10283:11876];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:820;background=cleanup;skip_reason=no_changes; 2025-06-25T15:06:36.317932Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10283:11876];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;queue=ttl;external_count=0;fline=granule.cpp:168;event=skip_actualization;waiting=1.000000s; 2025-06-25T15:06:36.317975Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10283:11876];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:749;background=ttl;skip_reason=no_changes; >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_wrong_attribute_name[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_wrong_delete_fails[tables_format_v0] >> THiveTest::TestServerlessComputeResourcesMode [GOOD] >> THiveTest::TestSkipBadNode >> THiveTest::TestFollowers_LocalNodeOnly [GOOD] >> THiveTest::TestFollowersCrossDC_Tight >> test_quoting.py::TestSqsQuotingWithKesus::test_properly_creates_and_deletes_queue[tables_format_v1-std] >> TSchemeshardStatsBatchingTest::TopicPeriodicStatMeteringModeReserved >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_does_not_change_visibility_for_deleted_message[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_does_not_change_visibility_not_in_flight[tables_format_v0-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_wrong_delete_fails[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_wrong_delete_fails[tables_format_v1] >> THiveTest::TestSkipBadNode [GOOD] >> THiveTest::TestStopTenant |94.1%| [TS] {asan, default-linux-x86_64, release} ydb/core/fq/libs/ydb/ut/unittest >> TRegisterCheckTest::ShouldRegisterCheckNewGenerationAndTransact [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_create_queue_by_nonexistent_user_fails[tables_format_v1] |94.1%| [TA] $(B)/ydb/core/fq/libs/ydb/ut/test-results/unittest/{meta.json ... results_accumulator.log} |94.1%| [TA] {RESULT} $(B)/ydb/core/fq/libs/ydb/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_wrong_delete_fails[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_queue_attributes[tables_format_v1-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delete_message_batch_deduplicates_receipt_handle[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delete_message_batch_deduplicates_receipt_handle[tables_format_v1-std] >> TColumnShardTestReadWrite::CompactionInGranule_PKInt32_Reboot [GOOD] >> THiveTest::TestStopTenant [GOOD] >> THiveTest::TestTabletAvailability >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_does_actions_with_queue[tables_format_v0-std] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_validates_group_id[tables_format_v0] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_validates_group_id[tables_format_v1] >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_create_queue_rate[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_does_not_change_visibility_not_in_flight[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_does_not_change_visibility_not_in_flight[tables_format_v0-std] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_validates_group_id[tables_format_v1] [GOOD] >> TSchemeshardStatsBatchingTest::TopicPeriodicStatMeteringModeReserved [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_validates_receive_attempt_id[tables_format_v0] >> THiveTest::TestTabletAvailability [GOOD] >> THiveTest::TestTabletsStartingCounter >> GroupWriteTest::ByTableName [GOOD] >> TColumnShardTestReadWrite::CompactionInGranule_PKDatetime_Reboot [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_stats/unittest >> TSchemeshardStatsBatchingTest::TopicPeriodicStatMeteringModeReserved [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T15:06:40.903789Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T15:06:40.903893Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T15:06:40.903933Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T15:06:40.903971Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T15:06:40.904015Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T15:06:40.904043Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T15:06:40.904097Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T15:06:40.904162Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T15:06:40.905290Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T15:06:40.905672Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T15:06:41.004147Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:06:41.004221Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:06:41.033732Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T15:06:41.034208Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T15:06:41.034421Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T15:06:41.041769Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T15:06:41.042158Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T15:06:41.042824Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T15:06:41.043115Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T15:06:41.046987Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T15:06:41.047204Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T15:06:41.048629Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T15:06:41.048692Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T15:06:41.048819Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T15:06:41.048864Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T15:06:41.048904Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T15:06:41.048988Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T15:06:41.056235Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T15:06:41.216070Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T15:06:41.216355Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:06:41.216584Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T15:06:41.216633Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T15:06:41.216872Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T15:06:41.216938Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:06:41.219191Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T15:06:41.219336Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T15:06:41.219506Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:06:41.219548Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T15:06:41.219577Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T15:06:41.219616Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T15:06:41.225669Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:06:41.225721Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T15:06:41.225788Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T15:06:41.228845Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:06:41.228910Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:06:41.228968Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T15:06:41.229054Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T15:06:41.243392Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T15:06:41.245721Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T15:06:41.245944Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T15:06:41.246995Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T15:06:41.247140Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T15:06:41.247193Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T15:06:41.247492Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T15:06:41.247548Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T15:06:41.247710Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T15:06:41.247803Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T15:06:41.251526Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T15:06:41.251613Z node 1 :FLAT_TX_SCHEMESHARD ... tionSize: 0 UsedReserveSize: 0 ReserveSize: 45532800 PartitionConfig{ LifetimeSeconds: 2678400 WriteSpeedInBytesPerSecond: 17 TotalPartitions: 3 } 2025-06-25T15:06:43.277690Z node 1 :PERSQUEUE DEBUG: partition.cpp:873: [PQ: 72075186233409546, Partition: 2, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 45532800 PartitionConfig{ LifetimeSeconds: 2678400 WriteSpeedInBytesPerSecond: 17 TotalPartitions: 3 } 2025-06-25T15:06:43.277754Z node 1 :PERSQUEUE DEBUG: partition.cpp:873: [PQ: 72075186233409546, Partition: 0, State: StateIdle] Topic PartitionStatus PartitionSize: 16975298 UsedReserveSize: 16975298 ReserveSize: 45532800 PartitionConfig{ LifetimeSeconds: 2678400 WriteSpeedInBytesPerSecond: 17 TotalPartitions: 3 } 2025-06-25T15:06:43.278071Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:688: [72075186233409547][Topic1] Send TEvPeriodicTopicStats PathId: 2 Generation: 2 StatsReportRound: 2 DataSize: 16975298 UsedReserveSize: 16975298 2025-06-25T15:06:43.278179Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1823: [72075186233409547][Topic1] ProcessPendingStats. PendingUpdates size 0 2025-06-25T15:06:43.278391Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__pq_stats.cpp:100: Got periodic topic stats at partition [OwnerId: 72057594046678944, LocalPathId: 2] DataSize 16975298 UsedReserveSize 16975298 2025-06-25T15:06:43.299701Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__pq_stats.cpp:119: Started TEvPersistStats at tablet 72057594046678944, queue size# 0 2025-06-25T15:06:43.310244Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: PathId: 2 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T15:06:43.310421Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:44: Tablet 72057594046678944 describe pathId 2 took 196us result status StatusSuccess 2025-06-25T15:06:43.310859Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Topic1" PathDescription { Self { Name: "Topic1" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 1 } ChildrenExist: false BalancerTabletID: 72075186233409547 } PersQueueGroup { Name: "Topic1" PathId: 2 TotalGroupCount: 3 PartitionPerTablet: 3 PQTabletConfig { PartitionConfig { LifetimeSeconds: 2678400 WriteSpeedInBytesPerSecond: 17 } YdbDatabasePath: "/MyRoot" MeteringMode: METERING_MODE_RESERVED_CAPACITY } Partitions { PartitionId: 0 TabletId: 72075186233409546 Status: Active } Partitions { PartitionId: 1 TabletId: 72075186233409546 Status: Active } Partitions { PartitionId: 2 TabletId: 72075186233409546 Status: Active } AlterVersion: 1 BalancerTabletID: 72075186233409547 NextPartitionId: 3 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 136598400 AccountSize: 136598400 DataSize: 16975298 UsedReserveSize: 16975298 } } PQPartitionsInside: 3 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T15:06:43.845177Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:157: [72075186233409547][Topic1] TPersQueueReadBalancer::HandleWakeup 2025-06-25T15:06:43.845277Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:550: [72075186233409547][Topic1] Send TEvPersQueue::TEvStatus TabletId: 72075186233409546 Cookie: 3 2025-06-25T15:06:43.845668Z node 1 :PERSQUEUE DEBUG: partition.cpp:873: [PQ: 72075186233409546, Partition: 0, State: StateIdle] Topic PartitionStatus PartitionSize: 16975298 UsedReserveSize: 16975298 ReserveSize: 45532800 PartitionConfig{ LifetimeSeconds: 2678400 WriteSpeedInBytesPerSecond: 17 TotalPartitions: 3 } 2025-06-25T15:06:43.845818Z node 1 :PERSQUEUE DEBUG: partition.cpp:873: [PQ: 72075186233409546, Partition: 1, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 45532800 PartitionConfig{ LifetimeSeconds: 2678400 WriteSpeedInBytesPerSecond: 17 TotalPartitions: 3 } 2025-06-25T15:06:43.845881Z node 1 :PERSQUEUE DEBUG: partition.cpp:873: [PQ: 72075186233409546, Partition: 2, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 45532800 PartitionConfig{ LifetimeSeconds: 2678400 WriteSpeedInBytesPerSecond: 17 TotalPartitions: 3 } 2025-06-25T15:06:43.846299Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:688: [72075186233409547][Topic1] Send TEvPeriodicTopicStats PathId: 2 Generation: 2 StatsReportRound: 3 DataSize: 16975298 UsedReserveSize: 16975298 2025-06-25T15:06:43.846402Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1823: [72075186233409547][Topic1] ProcessPendingStats. PendingUpdates size 0 2025-06-25T15:06:43.846567Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__pq_stats.cpp:100: Got periodic topic stats at partition [OwnerId: 72057594046678944, LocalPathId: 2] DataSize 16975298 UsedReserveSize 16975298 2025-06-25T15:06:43.867628Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__pq_stats.cpp:119: Started TEvPersistStats at tablet 72057594046678944, queue size# 0 2025-06-25T15:06:43.878896Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: PathId: 2 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T15:06:43.879106Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:44: Tablet 72057594046678944 describe pathId 2 took 221us result status StatusSuccess 2025-06-25T15:06:43.879566Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Topic1" PathDescription { Self { Name: "Topic1" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 1 } ChildrenExist: false BalancerTabletID: 72075186233409547 } PersQueueGroup { Name: "Topic1" PathId: 2 TotalGroupCount: 3 PartitionPerTablet: 3 PQTabletConfig { PartitionConfig { LifetimeSeconds: 2678400 WriteSpeedInBytesPerSecond: 17 } YdbDatabasePath: "/MyRoot" MeteringMode: METERING_MODE_RESERVED_CAPACITY } Partitions { PartitionId: 0 TabletId: 72075186233409546 Status: Active } Partitions { PartitionId: 1 TabletId: 72075186233409546 Status: Active } Partitions { PartitionId: 2 TabletId: 72075186233409546 Status: Active } AlterVersion: 1 BalancerTabletID: 72075186233409547 NextPartitionId: 3 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 136598400 AccountSize: 136598400 DataSize: 16975298 UsedReserveSize: 16975298 } } PQPartitionsInside: 3 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T15:06:43.935934Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Topic1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T15:06:43.936180Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Topic1" took 305us result status StatusSuccess 2025-06-25T15:06:43.936688Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Topic1" PathDescription { Self { Name: "Topic1" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 1 } ChildrenExist: false BalancerTabletID: 72075186233409547 } PersQueueGroup { Name: "Topic1" PathId: 2 TotalGroupCount: 3 PartitionPerTablet: 3 PQTabletConfig { PartitionConfig { LifetimeSeconds: 2678400 WriteSpeedInBytesPerSecond: 17 } YdbDatabasePath: "/MyRoot" MeteringMode: METERING_MODE_RESERVED_CAPACITY } Partitions { PartitionId: 0 TabletId: 72075186233409546 Status: Active } Partitions { PartitionId: 1 TabletId: 72075186233409546 Status: Active } Partitions { PartitionId: 2 TabletId: 72075186233409546 Status: Active } AlterVersion: 1 BalancerTabletID: 72075186233409547 NextPartitionId: 3 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 136598400 AccountSize: 136598400 DataSize: 16975298 UsedReserveSize: 16975298 } } PQPartitionsInside: 3 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::CompactionInGranule_PKInt32_Reboot [GOOD] Test command err: 2025-06-25T15:05:07.567421Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:99;event=initialize_shard;step=OnActivateExecutor; 2025-06-25T15:05:07.583776Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:117;event=initialize_shard;step=initialize_tiring_finished; 2025-06-25T15:05:07.583953Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-25T15:05:07.589033Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T15:05:07.589204Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T15:05:07.589377Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T15:05:07.589450Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T15:05:07.589508Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T15:05:07.589563Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T15:05:07.589637Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T15:05:07.589715Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T15:05:07.589776Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T15:05:07.589860Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T15:05:07.589923Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T15:05:07.605573Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-25T15:05:07.605708Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-25T15:05:07.605756Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-25T15:05:07.605888Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:05:07.606021Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-25T15:05:07.606076Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-25T15:05:07.606103Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-25T15:05:07.606159Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-25T15:05:07.606200Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-25T15:05:07.606226Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-25T15:05:07.606259Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-25T15:05:07.606367Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:05:07.606409Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-25T15:05:07.606435Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-25T15:05:07.606459Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-25T15:05:07.606525Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-25T15:05:07.606558Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-25T15:05:07.606601Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-25T15:05:07.606620Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-25T15:05:07.606646Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-25T15:05:07.606668Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-25T15:05:07.606684Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-25T15:05:07.606820Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-25T15:05:07.606844Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-25T15:05:07.606859Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-25T15:05:07.606969Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-25T15:05:07.607025Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-25T15:05:07.607042Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-25T15:05:07.607130Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-25T15:05:07.607158Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-25T15:05:07.607172Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-25T15:05:07.607213Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-25T15:05:07.607245Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-25T15:05:07.607291Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-25T15:05:07.607310Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-25T15:05:07.607459Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=36; 2025-06-25T15:05:07.607533Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=36; 2025-06-25T15:05:07.607596Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=31; 2025-06-25T15:05:07.607645Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=24; 2025-06-25T15:05:07.607702Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-25T15:05:07.607746Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-25T15:05:07.607771Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-25T15:05:07.607812Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: table ... :granule/portions;fline=constructor_meta.cpp:71;memory_size=25158;data_size=25124;sum=12489152;count=14328;size_of_meta=136; 2025-06-25T15:06:42.287438Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;load_stage_name=EXECUTE:granule/portions;fline=constructor_portion.cpp:40;memory_size=25230;data_size=25196;sum=13004960;count=7164;size_of_portion=208; 2025-06-25T15:06:42.288166Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;EXECUTE:portionsLoadingTime=417918; 2025-06-25T15:06:42.288230Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;PRECHARGE:granule_finished_commonLoadingTime=9; 2025-06-25T15:06:42.289665Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;EXECUTE:granule_finished_commonLoadingTime=1385; 2025-06-25T15:06:42.289706Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;fline=common_data.cpp:29;EXECUTE:granuleLoadingTime=419603; 2025-06-25T15:06:42.289744Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:granulesLoadingTime=419712; 2025-06-25T15:06:42.289798Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;PRECHARGE:finishLoadingTime=9; 2025-06-25T15:06:42.290440Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:finishLoadingTime=604; 2025-06-25T15:06:42.290476Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:column_enginesLoadingTime=420853; 2025-06-25T15:06:42.290599Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tx_controllerLoadingTime=81; 2025-06-25T15:06:42.290686Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tx_controllerLoadingTime=47; 2025-06-25T15:06:42.290946Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:operations_managerLoadingTime=219; 2025-06-25T15:06:42.291143Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:operations_managerLoadingTime=158; 2025-06-25T15:06:42.306880Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:storages_managerLoadingTime=15670; 2025-06-25T15:06:42.328643Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:storages_managerLoadingTime=21666; 2025-06-25T15:06:42.328735Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:db_locksLoadingTime=11; 2025-06-25T15:06:42.328799Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:db_locksLoadingTime=12; 2025-06-25T15:06:42.328851Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:bg_sessionsLoadingTime=6; 2025-06-25T15:06:42.328916Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:bg_sessionsLoadingTime=36; 2025-06-25T15:06:42.328956Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:sharing_sessionsLoadingTime=7; 2025-06-25T15:06:42.329023Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:sharing_sessionsLoadingTime=39; 2025-06-25T15:06:42.329056Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:in_flight_readsLoadingTime=4; 2025-06-25T15:06:42.329107Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:in_flight_readsLoadingTime=24; 2025-06-25T15:06:42.329176Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tiers_managerLoadingTime=41; 2025-06-25T15:06:42.329233Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tiers_managerLoadingTime=30; 2025-06-25T15:06:42.329267Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;fline=common_data.cpp:29;EXECUTE:composite_initLoadingTime=466189; 2025-06-25T15:06:42.329402Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Index: tables 1 inserted {blob_bytes=0;raw_bytes=0;count=0;records=0} compacted {blob_bytes=0;raw_bytes=0;count=0;records=0} s-compacted {blob_bytes=21623968;raw_bytes=21227350;count=3;records=225200} inactive {blob_bytes=141321168;raw_bytes=137674250;count=221;records=1575200} evicted {blob_bytes=0;raw_bytes=0;count=0;records=0} at tablet 9437184 2025-06-25T15:06:42.329509Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:9976:11569];process=SwitchToWork;fline=columnshard.cpp:74;event=initialize_shard;step=SwitchToWork; 2025-06-25T15:06:42.329553Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:9976:11569];process=SwitchToWork;fline=columnshard.cpp:77;event=initialize_shard;step=SignalTabletActive; 2025-06-25T15:06:42.329606Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:9976:11569];process=SwitchToWork;fline=columnshard_impl.cpp:1331;event=OnTieringModified;path_id=NO_VALUE_OPTIONAL; 2025-06-25T15:06:42.329641Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:9976:11569];process=SwitchToWork;fline=column_engine_logs.cpp:471;event=OnTieringModified;new_count_tierings=0; 2025-06-25T15:06:42.329795Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:442;event=EnqueueBackgroundActivities;periodic=0; 2025-06-25T15:06:42.329860Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=9; 2025-06-25T15:06:42.329919Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750863613013;tx_id=18446744073709551615;;current_snapshot_ts=1750863909373; 2025-06-25T15:06:42.329952Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=9;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-25T15:06:42.329989Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:791;background=cleanup;skip_reason=no_changes; 2025-06-25T15:06:42.330015Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:820;background=cleanup;skip_reason=no_changes; 2025-06-25T15:06:42.330087Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:749;background=ttl;skip_reason=no_changes; 2025-06-25T15:06:42.335808Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:9976:11569];ev=NActors::TEvents::TEvWakeup;fline=columnshard.cpp:250;event=TEvPrivate::TEvPeriodicWakeup::MANUAL;tablet_id=9437184; 2025-06-25T15:06:42.336543Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:9976:11569];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;fline=columnshard.cpp:239;event=TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184; 2025-06-25T15:06:42.336587Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Send periodic stats. 2025-06-25T15:06:42.336613Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Disabled periodic stats at tablet 9437184 2025-06-25T15:06:42.336659Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:9976:11569];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:442;event=EnqueueBackgroundActivities;periodic=0; 2025-06-25T15:06:42.336743Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:9976:11569];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=9; 2025-06-25T15:06:42.336814Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:9976:11569];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750863613013;tx_id=18446744073709551615;;current_snapshot_ts=1750863909373; 2025-06-25T15:06:42.336860Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:9976:11569];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=9;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-25T15:06:42.336913Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:9976:11569];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:791;background=cleanup;skip_reason=no_changes; 2025-06-25T15:06:42.336953Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:9976:11569];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:820;background=cleanup;skip_reason=no_changes; 2025-06-25T15:06:42.337028Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:9976:11569];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;queue=ttl;external_count=0;fline=granule.cpp:168;event=skip_actualization;waiting=1.000000s; 2025-06-25T15:06:42.337075Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:9976:11569];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:749;background=ttl;skip_reason=no_changes; >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_validates_receive_attempt_id[tables_format_v0] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_validates_receive_attempt_id[tables_format_v1] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_visibility_timeout_works[tables_format_v0] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_visibility_timeout_works[tables_format_v1] >> THiveTest::TestFollowersCrossDC_Tight [GOOD] >> THiveTest::TestFollowersCrossDC_MovingLeader ------- [TM] {asan, default-linux-x86_64, release} ydb/core/load_test/ut/unittest >> GroupWriteTest::ByTableName [GOOD] Test command err: RandomSeed# 10428533517867021956 2025-06-25T15:06:16.443872Z 1 00h01m00.010512s :BS_LOAD_TEST DEBUG: TabletId# 72058428954028033 Generation# 1 is bootstrapped, going to send TEvDiscover {TabletId# 72058428954028033 MinGeneration# 1 ReadBody# false DiscoverBlockedGeneration# true ForceBlockedGeneration# 0 FromLeader# true Deadline# 18446744073709551} 2025-06-25T15:06:16.457611Z 1 00h01m00.010512s :BS_LOAD_TEST INFO: TabletId# 72058428954028033 Generation# 1 recieved TEvDiscoverResult {Status# NODATA BlockedGeneration# 0 Id# [0:0:0:0:0:0:0] Size# 0 MinGeneration# 1} 2025-06-25T15:06:16.457661Z 1 00h01m00.010512s :BS_LOAD_TEST DEBUG: TabletId# 72058428954028033 Generation# 1 going to send TEvBlock {TabletId# 72058428954028033 Generation# 1 Deadline# 18446744073709551 IsMonitored# 1} 2025-06-25T15:06:16.459362Z 1 00h01m00.010512s :BS_LOAD_TEST INFO: TabletId# 72058428954028033 Generation# 1 recieved TEvBlockResult {Status# OK} 2025-06-25T15:06:16.470554Z 1 00h01m00.010512s :BS_LOAD_TEST DEBUG: TabletId# 72058428954028033 Generation# 2 going to send TEvCollectGarbage {TabletId# 72058428954028033 RecordGeneration# 2 PerGenerationCounter# 1 Channel# 0 Deadline# 18446744073709551 Collect# true CollectGeneration# 2 CollectStep# 0 Hard# true IsMultiCollectAllowed# 0 IsMonitored# 1} 2025-06-25T15:06:16.472275Z 1 00h01m00.010512s :BS_LOAD_TEST INFO: TabletId# 72058428954028033 Generation# 2 recieved TEvCollectGarbageResult {TabletId# 72058428954028033 RecordGeneration# 2 PerGenerationCounter# 1 Channel# 0 Status# OK} 2025-06-25T15:06:27.003294Z 8 00h01m11.810512s :BS_LOGCUTTER ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) KEEPER: RetryCutLogEvent: limit exceeded; FreeUpToLsn# 1098 2025-06-25T15:06:37.924584Z 1 00h01m23.810512s :BS_LOGCUTTER ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) KEEPER: RetryCutLogEvent: limit exceeded; FreeUpToLsn# 2703 2025-06-25T15:06:44.394739Z 1 00h01m30.010512s :BS_LOAD_TEST DEBUG: Load tablet recieved PoisonPill, going to die 2025-06-25T15:06:44.394843Z 1 00h01m30.010512s :BS_LOAD_TEST DEBUG: TabletId# 72058428954028033 Generation# 2 end working, going to send TEvCollectGarbage {TabletId# 72058428954028033 RecordGeneration# 2 PerGenerationCounter# 32 Channel# 0 Deadline# 18446744073709551 Collect# true CollectGeneration# 2 CollectStep# 4294967295 Hard# true IsMultiCollectAllowed# 0 IsMonitored# 1} 2025-06-25T15:06:44.394893Z 1 00h01m30.010512s :BS_LOAD_TEST DEBUG: Load tablet recieved PoisonPill, going to die 2025-06-25T15:06:44.394929Z 1 00h01m30.010512s :BS_LOAD_TEST DEBUG: TabletId# 72058428954028033 Generation# 2 end working, going to send TEvCollectGarbage {TabletId# 72058428954028033 RecordGeneration# 2 PerGenerationCounter# 33 Channel# 0 Deadline# 18446744073709551 Collect# true CollectGeneration# 2 CollectStep# 4294967295 Hard# true IsMultiCollectAllowed# 0 IsMonitored# 1} 2025-06-25T15:06:44.448175Z 1 00h01m30.010512s :BS_LOAD_TEST INFO: TabletId# 72058428954028033 Generation# 2 recieved TEvCollectGarbageResult {TabletId# 72058428954028033 RecordGeneration# 2 PerGenerationCounter# 32 Channel# 0 Status# OK} 2025-06-25T15:06:44.448274Z 1 00h01m30.010512s :BS_LOAD_TEST INFO: TabletId# 72058428954028033 Generation# 2 recieved TEvCollectGarbageResult {TabletId# 72058428954028033 RecordGeneration# 2 PerGenerationCounter# 33 Channel# 0 Status# OK} |94.2%| [TA] $(B)/ydb/core/tx/schemeshard/ut_stats/test-results/unittest/{meta.json ... results_accumulator.log} |94.2%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_stats/test-results/unittest/{meta.json ... results_accumulator.log} >> THiveTest::TestTabletsStartingCounter [GOOD] >> THiveTest::TestTabletsStartingCounterExternalBoot >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_validates_receive_attempt_id[tables_format_v1] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_visibility_change_disables_receive_attempt_id[tables_format_v0-with_change_visibility] |94.2%| [TA] $(B)/ydb/core/load_test/ut/test-results/unittest/{meta.json ... results_accumulator.log} |94.2%| [TA] {RESULT} $(B)/ydb/core/load_test/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_visibility_timeout_expires_on_wait_timeout[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_visibility_timeout_expires_on_wait_timeout[tables_format_v1] >> TStorageBalanceTest::TestScenario1 [GOOD] >> TStorageBalanceTest::TestScenario2 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::CompactionInGranule_PKDatetime_Reboot [GOOD] Test command err: 2025-06-25T15:05:10.526790Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:99;event=initialize_shard;step=OnActivateExecutor; 2025-06-25T15:05:10.543731Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:117;event=initialize_shard;step=initialize_tiring_finished; 2025-06-25T15:05:10.543902Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-25T15:05:10.548897Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T15:05:10.549048Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T15:05:10.549207Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T15:05:10.549282Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T15:05:10.549335Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T15:05:10.549395Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T15:05:10.549472Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T15:05:10.549556Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T15:05:10.549623Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T15:05:10.549678Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T15:05:10.549736Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T15:05:10.565544Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-25T15:05:10.565661Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-25T15:05:10.565693Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-25T15:05:10.565802Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:05:10.565922Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-25T15:05:10.565967Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-25T15:05:10.565992Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-25T15:05:10.566050Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-25T15:05:10.566107Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-25T15:05:10.566138Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-25T15:05:10.566165Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-25T15:05:10.566275Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:05:10.566321Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-25T15:05:10.566345Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-25T15:05:10.566367Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-25T15:05:10.566429Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-25T15:05:10.566460Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-25T15:05:10.566481Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-25T15:05:10.566498Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-25T15:05:10.566523Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-25T15:05:10.566542Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-25T15:05:10.566558Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-25T15:05:10.566680Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-25T15:05:10.566706Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-25T15:05:10.566721Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-25T15:05:10.566826Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-25T15:05:10.566858Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-25T15:05:10.566874Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-25T15:05:10.566962Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-25T15:05:10.566990Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-25T15:05:10.567010Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-25T15:05:10.567056Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-25T15:05:10.567093Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-25T15:05:10.567127Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-25T15:05:10.567148Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-25T15:05:10.567300Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=53; 2025-06-25T15:05:10.567357Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=31; 2025-06-25T15:05:10.567411Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=29; 2025-06-25T15:05:10.567458Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=24; 2025-06-25T15:05:10.567516Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-25T15:05:10.567559Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-25T15:05:10.567584Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-25T15:05:10.567615Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: table ... anule/portions;fline=constructor_meta.cpp:71;memory_size=25158;data_size=25124;sum=12489152;count=14328;size_of_meta=136; 2025-06-25T15:06:43.765230Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;load_stage_name=EXECUTE:granule/portions;fline=constructor_portion.cpp:40;memory_size=25230;data_size=25196;sum=13004960;count=7164;size_of_portion=208; 2025-06-25T15:06:43.766215Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;EXECUTE:portionsLoadingTime=349691; 2025-06-25T15:06:43.766298Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;PRECHARGE:granule_finished_commonLoadingTime=12; 2025-06-25T15:06:43.768195Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;EXECUTE:granule_finished_commonLoadingTime=1841; 2025-06-25T15:06:43.768248Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;fline=common_data.cpp:29;EXECUTE:granuleLoadingTime=351857; 2025-06-25T15:06:43.768293Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:granulesLoadingTime=351986; 2025-06-25T15:06:43.768724Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;PRECHARGE:finishLoadingTime=12; 2025-06-25T15:06:43.769679Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:finishLoadingTime=887; 2025-06-25T15:06:43.769734Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:column_enginesLoadingTime=353822; 2025-06-25T15:06:43.769903Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tx_controllerLoadingTime=112; 2025-06-25T15:06:43.770020Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tx_controllerLoadingTime=68; 2025-06-25T15:06:43.770416Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:operations_managerLoadingTime=343; 2025-06-25T15:06:43.770810Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:operations_managerLoadingTime=327; 2025-06-25T15:06:43.800654Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:storages_managerLoadingTime=29748; 2025-06-25T15:06:43.833768Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:storages_managerLoadingTime=32980; 2025-06-25T15:06:43.833894Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:db_locksLoadingTime=13; 2025-06-25T15:06:43.833955Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:db_locksLoadingTime=11; 2025-06-25T15:06:43.833999Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:bg_sessionsLoadingTime=6; 2025-06-25T15:06:43.834080Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:bg_sessionsLoadingTime=48; 2025-06-25T15:06:43.834124Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:sharing_sessionsLoadingTime=6; 2025-06-25T15:06:43.834216Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:sharing_sessionsLoadingTime=55; 2025-06-25T15:06:43.834263Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:in_flight_readsLoadingTime=6; 2025-06-25T15:06:43.834332Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:in_flight_readsLoadingTime=33; 2025-06-25T15:06:43.834423Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tiers_managerLoadingTime=51; 2025-06-25T15:06:43.834515Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tiers_managerLoadingTime=47; 2025-06-25T15:06:43.834552Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;fline=common_data.cpp:29;EXECUTE:composite_initLoadingTime=423904; 2025-06-25T15:06:43.834693Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Index: tables 1 inserted {blob_bytes=0;raw_bytes=0;count=0;records=0} compacted {blob_bytes=0;raw_bytes=0;count=0;records=0} s-compacted {blob_bytes=21623968;raw_bytes=21227350;count=3;records=225200} inactive {blob_bytes=141321168;raw_bytes=137674250;count=221;records=1575200} evicted {blob_bytes=0;raw_bytes=0;count=0;records=0} at tablet 9437184 2025-06-25T15:06:43.834825Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:9976:11569];process=SwitchToWork;fline=columnshard.cpp:74;event=initialize_shard;step=SwitchToWork; 2025-06-25T15:06:43.834874Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:9976:11569];process=SwitchToWork;fline=columnshard.cpp:77;event=initialize_shard;step=SignalTabletActive; 2025-06-25T15:06:43.834941Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:9976:11569];process=SwitchToWork;fline=columnshard_impl.cpp:1331;event=OnTieringModified;path_id=NO_VALUE_OPTIONAL; 2025-06-25T15:06:43.834982Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:9976:11569];process=SwitchToWork;fline=column_engine_logs.cpp:471;event=OnTieringModified;new_count_tierings=0; 2025-06-25T15:06:43.835172Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:442;event=EnqueueBackgroundActivities;periodic=0; 2025-06-25T15:06:43.835242Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=9; 2025-06-25T15:06:43.835307Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750863615971;tx_id=18446744073709551615;;current_snapshot_ts=1750863912331; 2025-06-25T15:06:43.835347Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=9;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-25T15:06:43.835393Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:791;background=cleanup;skip_reason=no_changes; 2025-06-25T15:06:43.835430Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:820;background=cleanup;skip_reason=no_changes; 2025-06-25T15:06:43.835519Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:749;background=ttl;skip_reason=no_changes; 2025-06-25T15:06:43.841619Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:9976:11569];ev=NActors::TEvents::TEvWakeup;fline=columnshard.cpp:250;event=TEvPrivate::TEvPeriodicWakeup::MANUAL;tablet_id=9437184; 2025-06-25T15:06:43.842271Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:9976:11569];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;fline=columnshard.cpp:239;event=TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184; 2025-06-25T15:06:43.842311Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Send periodic stats. 2025-06-25T15:06:43.842341Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Disabled periodic stats at tablet 9437184 2025-06-25T15:06:43.842385Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:9976:11569];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:442;event=EnqueueBackgroundActivities;periodic=0; 2025-06-25T15:06:43.842474Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:9976:11569];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=9; 2025-06-25T15:06:43.842543Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:9976:11569];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750863615971;tx_id=18446744073709551615;;current_snapshot_ts=1750863912331; 2025-06-25T15:06:43.842586Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:9976:11569];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=9;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-25T15:06:43.842638Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:9976:11569];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:791;background=cleanup;skip_reason=no_changes; 2025-06-25T15:06:43.842679Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:9976:11569];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:820;background=cleanup;skip_reason=no_changes; 2025-06-25T15:06:43.842771Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:9976:11569];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;queue=ttl;external_count=0;fline=granule.cpp:168;event=skip_actualization;waiting=1.000000s; 2025-06-25T15:06:43.842819Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:9976:11569];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:749;background=ttl;skip_reason=no_changes; >> THiveTest::TestTabletsStartingCounterExternalBoot [GOOD] >> TScaleRecommenderTest::BasicTest >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_invalid_queue_url[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delete_message_batch_deduplicates_receipt_handle[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delete_message_batch_works[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_queue_attributes_batch[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_queue_attributes_batch[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_does_not_change_visibility_not_in_flight[tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_does_not_change_visibility_not_in_flight[tables_format_v1-fifo] >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_does_not_create_kesus [GOOD] >> TScaleRecommenderTest::BasicTest [GOOD] >> TraverseColumnShard::TraverseColumnTable [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/hive/ut/unittest >> TScaleRecommenderTest::BasicTest [GOOD] Test command err: 2025-06-25T15:06:20.255925Z node 1 :BS_NODE DEBUG: {NW26@node_warden_impl.cpp:328} Bootstrap 2025-06-25T15:06:20.281264Z node 1 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# false Origin# initial ServiceSet# {PDisks { NodeID: 1 PDiskID: 1 Path: "SectorMap:0:3200" PDiskGuid: 1 } VDisks { VDiskID { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } Groups { GroupID: 0 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } } } AvailabilityDomains: 0 } 2025-06-25T15:06:20.281521Z node 1 :BS_NODE DEBUG: {NW04@node_warden_pdisk.cpp:196} StartLocalPDisk NodeId# 1 PDiskId# 1 Path# "SectorMap:0:3200" PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} Temporary# false 2025-06-25T15:06:20.282398Z node 1 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:106} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2025-06-25T15:06:20.282705Z node 1 :BS_NODE DEBUG: {NW23@node_warden_vdisk.cpp:67} StartLocalVDiskActor SlayInFlight# false VDiskId# [0:1:0:0:0] VSlotId# 1:1:0 PDiskGuid# 1 DonorMode# false PDiskRestartInFlight# false PDisksWaitingToStart# false 2025-06-25T15:06:20.283661Z node 1 :BS_NODE DEBUG: {NW24@node_warden_vdisk.cpp:267} StartLocalVDiskActor done VDiskId# [0:1:0:0:0] VSlotId# 1:1:0 PDiskGuid# 1 2025-06-25T15:06:20.283709Z node 1 :BS_NODE DEBUG: {NW12@node_warden_proxy.cpp:24} StartLocalProxy GroupId# 0 2025-06-25T15:06:20.284552Z node 1 :BS_NODE DEBUG: {NW21@node_warden_pipe.cpp:23} EstablishPipe AvailDomainId# 0 PipeClientId# [1:30:2076] ControllerId# 72057594037932033 2025-06-25T15:06:20.284587Z node 1 :BS_NODE DEBUG: {NW20@node_warden_pipe.cpp:72} SendRegisterNode 2025-06-25T15:06:20.284678Z node 1 :BS_NODE DEBUG: {NW11@node_warden_impl.cpp:303} StartInvalidGroupProxy GroupId# 4294967295 2025-06-25T15:06:20.284785Z node 1 :BS_NODE DEBUG: {NW62@node_warden_impl.cpp:315} StartRequestReportingThrottler 2025-06-25T15:06:20.295566Z node 1 :BS_PROXY INFO: dsproxy_state.cpp:157: Group# 0 TEvConfigureProxy received GroupGeneration# 1 IsLimitedKeyless# false Marker# DSP02 2025-06-25T15:06:20.295623Z node 1 :BS_PROXY NOTICE: dsproxy_state.cpp:305: EnsureMonitoring Group# 0 IsLimitedKeyless# 0 fullIfPossible# 0 Marker# DSP58 2025-06-25T15:06:20.297554Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:29:2075] Create Queue# [1:38:2081] targetNodeId# 1 Marker# DSP01 2025-06-25T15:06:20.297712Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:29:2075] Create Queue# [1:39:2082] targetNodeId# 1 Marker# DSP01 2025-06-25T15:06:20.297853Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:29:2075] Create Queue# [1:40:2083] targetNodeId# 1 Marker# DSP01 2025-06-25T15:06:20.298064Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:29:2075] Create Queue# [1:41:2084] targetNodeId# 1 Marker# DSP01 2025-06-25T15:06:20.298190Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:29:2075] Create Queue# [1:42:2085] targetNodeId# 1 Marker# DSP01 2025-06-25T15:06:20.298319Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:29:2075] Create Queue# [1:43:2086] targetNodeId# 1 Marker# DSP01 2025-06-25T15:06:20.298450Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:29:2075] Create Queue# [1:44:2087] targetNodeId# 1 Marker# DSP01 2025-06-25T15:06:20.298474Z node 1 :BS_PROXY INFO: dsproxy_state.cpp:31: Group# 0 SetStateEstablishingSessions Marker# DSP03 2025-06-25T15:06:20.298551Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037932033] ::Bootstrap [1:30:2076] 2025-06-25T15:06:20.298595Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037932033] lookup [1:30:2076] 2025-06-25T15:06:20.298637Z node 1 :BS_PROXY NOTICE: dsproxy_state.cpp:245: Group# 4294967295 HasInvalidGroupId# 1 Bootstrap -> StateEjected Marker# DSP42 2025-06-25T15:06:20.298673Z node 1 :BS_NODE DEBUG: {NWDC00@distconf.cpp:20} Bootstrap 2025-06-25T15:06:20.299274Z node 1 :BS_NODE DEBUG: {NWDC40@distconf_persistent_storage.cpp:25} TReaderActor bootstrap Paths# [] 2025-06-25T15:06:20.299502Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72057594037932033 entry.State: StInit ev: {EvForward TabletID: 72057594037932033 Ev: nullptr Flags: 1:2:0} 2025-06-25T15:06:20.306874Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72057594037932033] queue send [1:30:2076] 2025-06-25T15:06:20.306936Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 131082 StorageConfigLoaded# false NodeListObtained# false PendingEvents.size# 0 2025-06-25T15:06:20.307003Z node 1 :BS_NODE DEBUG: {NWDC11@distconf_binding.cpp:6} TEvNodesInfo 2025-06-25T15:06:20.307091Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 2146435074 StorageConfigLoaded# false NodeListObtained# false PendingEvents.size# 0 2025-06-25T15:06:20.307127Z node 1 :BS_NODE DEBUG: {NWDC32@distconf_persistent_storage.cpp:221} TEvStorageConfigLoaded Cookie# 0 NumItemsRead# 0 2025-06-25T15:06:20.312941Z node 1 :BS_NODE DEBUG: {NWDC35@distconf_persistent_storage.cpp:184} PersistConfig Record# {} Drives# [] 2025-06-25T15:06:20.314331Z node 1 :BS_NODE DEBUG: {NWDC51@distconf_persistent_storage.cpp:103} TWriterActor bootstrap Drives# [] Record# {} 2025-06-25T15:06:20.314826Z node 1 :STATESTORAGE DEBUG: statestorage_proxy.cpp:281: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72057594037932033 Cookie: 0 ProxyOptions: SigNone} 2025-06-25T15:06:20.315041Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72057594037932033] queue send [1:30:2076] 2025-06-25T15:06:20.315104Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 268639258 StorageConfigLoaded# true NodeListObtained# false PendingEvents.size# 0 2025-06-25T15:06:20.315263Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 2146435075 StorageConfigLoaded# true NodeListObtained# false PendingEvents.size# 1 2025-06-25T15:06:20.315319Z node 1 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 0} 2025-06-25T15:06:20.315363Z node 1 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 1} 2025-06-25T15:06:20.315397Z node 1 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 2} 2025-06-25T15:06:20.315439Z node 1 :STATESTORAGE DEBUG: statestorage_proxy.cpp:390: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037932033 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-25T15:06:20.315545Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037936129] ::Bootstrap [1:34:2063] 2025-06-25T15:06:20.315569Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037936129] lookup [1:34:2063] 2025-06-25T15:06:20.315923Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 131082 StorageConfigLoaded# true NodeListObtained# false PendingEvents.size# 2 2025-06-25T15:06:20.315946Z node 1 :BS_NODE DEBUG: {NWDC11@distconf_binding.cpp:6} TEvNodesInfo 2025-06-25T15:06:20.316077Z node 1 :BS_NODE DEBUG: {NWDC18@distconf_binding.cpp:342} UpdateBound RefererNodeId# 1 NodeId# ::1:12001/1 Meta# {Fingerprint: "\363\365\\\016\336\205\240m2\241c\3010\003\261\342\227\n\267}" } 2025-06-25T15:06:20.316244Z node 1 :STATESTORAGE DEBUG: statestorage_proxy.cpp:390: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037932033 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-25T15:06:20.316352Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72057594037936129 entry.State: StInit ev: {EvForward TabletID: 72057594037936129 Ev: nullptr Flags: 1:2:0} 2025-06-25T15:06:20.316426Z node 1 :STATESTORAGE DEBUG: statestorage_proxy.cpp:390: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037932033 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-25T15:06:20.316638Z node 1 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# true Origin# distconf ServiceSet# {PDisks { NodeID: 1 PDiskID: 1 Path: "SectorMap:0:3200" PDiskGuid: 1 } VDisks { VDiskID { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } Groups { GroupID: 0 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } } } AvailabilityDomains: 0 } 2025-06-25T15:06:20.316829Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:610: Handle TEvInfo tabletId: 72057594037932033 entry.State: StInitResolve success: false ev: {EvInfo Status: 5 TabletID: 72057594037932033 Cookie: 0 CurrentLeader: [0:0:0] CurrentLeaderTablet: [0:0:0] CurrentGeneration: 0 CurrentStep: 0 Locked: false LockedFor: 0 Signature: { Size: 3 Signature: {{[1:24343667:0] : 2}, {[1:2199047599219:0] : 8}, {[1:1099535971443:0] : 5}}}} 2025-06-25T15:06:20.316872Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:361: DropEntry tabletId: 72057594037932033 followers: 0 2025-06-25T15:06:20.316980Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:172: TClient[72057594037932033] forward result error, check reconnect [1:30:2076] 2025-06-25T15:06:20.317011Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:562: TClient[72057594037932033] schedule retry [1:30:2076] 2025-06-25T15:06:20.317049Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 2146435072 StorageConfigLoaded# true NodeListObtained# true PendingEvents.size# 2 2025-06-25T15:06:20.317088Z node 1 :BS_NODE DEBUG: {NWDC15@distconf.cpp:345} StateFunc Type# 268639258 Sender# [1:12:2059] SessionId# [0:0:0] Cookie# 0 2025-06-25T15:06:20.320856Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 268639248 StorageConfigLoaded# true NodeListObtained# true PendingEvents.size# 1 2025-06-25T15:06:20.320910Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 2146435072 StorageConfigLoaded# true NodeListObtained# true PendingEvents.size# 2 2025-06-25T15:06:20.320942Z node 1 :BS_NODE DEBUG: {NWDC15@distconf.cpp:345} StateFunc Type# 2146435075 Sender# [1:48:2091] SessionId# [0:0:0] Cookie# 0 2025-06-25T15:06:20.320977Z node 1 :BS_NODE DEBUG: {NWDC36@distconf_persistent_storage.cpp:205} TEvStorageConfigStored NumOk# 0 NumError# 0 Passed# 0.013748s 2025-06-25T15:06:20.321316Z node 1 :STATESTORAGE DEBUG: statestorage_proxy.cpp:281: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72057594037936129 Cookie: 0 ProxyOptions: SigNone} 2025-06-25T15:06:20.321964Z node 1 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936129 Cookie: 0} 2025-06-25T15:06:20.322014Z node 1 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936129 Cookie: 1} 2025-06-25T15:06:20.322048Z node 1 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936129 Cookie: 2} 2025-06-25T15:06:20.322077Z node 1 :STATESTORAGE DEBUG: statestorage_proxy.cpp:390: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936129 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-25T15:06:20.322151Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 2146435072 StorageConfigLoaded# true NodeListObtained# true PendingEvents.size# 1 2025-06-25T15:06:20.322180Z node 1 :BS_NODE DEBUG: {NWDC15@distconf.cpp:345} ... 2075186224037888] connected with status OK role: Leader [24:567:2482] 2025-06-25T15:06:47.937696Z node 24 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:325: TClient[72075186224037888] send queued [24:567:2482] 2025-06-25T15:06:47.937755Z node 24 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:629: TClient[72075186224037888] push event to server [24:567:2482] 2025-06-25T15:06:47.937838Z node 24 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:332: TClient[72075186224037888] shutdown pipe due to pending shutdown request [24:567:2482] 2025-06-25T15:06:47.937893Z node 24 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:505: TClient[72075186224037888] notify reset [24:567:2482] 2025-06-25T15:06:47.937992Z node 24 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:141: [72075186224037888] HandleSend Sender# [24:566:2481] EventType# 268697642 2025-06-25T15:06:47.938550Z node 24 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72075186224037888] ::Bootstrap [24:570:2485] 2025-06-25T15:06:47.938627Z node 24 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72075186224037888] lookup [24:570:2485] 2025-06-25T15:06:47.938757Z node 24 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72075186224037888 entry.State: StNormal ev: {EvForward TabletID: 72075186224037888 Ev: nullptr Flags: 1:2:0} 2025-06-25T15:06:47.938844Z node 24 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 24 selfDC leaderDC 1:2:0 local 1 localDc 1 other 0 disallowed 0 tabletId: 72075186224037888 followers: 0 countLeader 1 allowFollowers 0 winner: [24:419:2365] 2025-06-25T15:06:47.938929Z node 24 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72075186224037888] queue send [24:570:2485] 2025-06-25T15:06:47.938993Z node 24 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:411: TClient[72075186224037888] received pending shutdown [24:570:2485] 2025-06-25T15:06:47.939064Z node 24 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:190: TClient[72075186224037888] forward result local node, try to connect [24:570:2485] 2025-06-25T15:06:47.939152Z node 24 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[72075186224037888]::SendEvent [24:570:2485] 2025-06-25T15:06:47.939279Z node 24 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:291: [72075186224037888] Accept Connect Originator# [24:570:2485] 2025-06-25T15:06:47.939452Z node 24 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:310: TClient[72075186224037888] connected with status OK role: Leader [24:570:2485] 2025-06-25T15:06:47.939525Z node 24 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:325: TClient[72075186224037888] send queued [24:570:2485] 2025-06-25T15:06:47.939584Z node 24 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:629: TClient[72075186224037888] push event to server [24:570:2485] 2025-06-25T15:06:47.939664Z node 24 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:332: TClient[72075186224037888] shutdown pipe due to pending shutdown request [24:570:2485] 2025-06-25T15:06:47.939725Z node 24 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:505: TClient[72075186224037888] notify reset [24:570:2485] 2025-06-25T15:06:47.939812Z node 24 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:141: [72075186224037888] HandleSend Sender# [24:569:2484] EventType# 268697612 2025-06-25T15:06:47.940064Z node 24 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:12} Tx{22, NKikimr::NHive::TTxUpdateTabletMetrics} queued, type NKikimr::NHive::TTxUpdateTabletMetrics 2025-06-25T15:06:47.940158Z node 24 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:12} Tx{22, NKikimr::NHive::TTxUpdateTabletMetrics} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-25T15:06:47.940380Z node 24 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:12} Tx{22, NKikimr::NHive::TTxUpdateTabletMetrics} hope 1 -> done Change{15, redo 82b alter 0b annex 0, ~{ 4 } -{ }, 0 gb} 2025-06-25T15:06:47.940471Z node 24 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:12} Tx{22, NKikimr::NHive::TTxUpdateTabletMetrics} release 4194304b of static, Memory{0 dyn 0} 2025-06-25T15:06:47.953706Z node 24 :BS_PROXY_PUT INFO: dsproxy_put.cpp:645: [8b41b9ac8186ade8] bootstrap ActorId# [24:573:2488] Group# 2147483648 BlobCount# 1 BlobIDs# [[72075186224037888:1:12:0:0:92:0]] HandleClass# TabletLog Tactic# MinLatency RestartCounter# 0 Marker# BPP13 2025-06-25T15:06:47.953891Z node 24 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [8b41b9ac8186ade8] Id# [72075186224037888:1:12:0:0:92:0] restore disk# 0 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-06-25T15:06:47.954016Z node 24 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:65: [8b41b9ac8186ade8] restore Id# [72075186224037888:1:12:0:0:92:0] optimisticReplicas# 1 optimisticState# EBS_FULL Marker# BPG55 2025-06-25T15:06:47.954114Z node 24 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [8b41b9ac8186ade8] partPlacement record partSituation# ESituation::Unknown to# 0 blob Id# [72075186224037888:1:12:0:0:92:1] Marker# BPG33 2025-06-25T15:06:47.954201Z node 24 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [8b41b9ac8186ade8] Sending missing VPut part# 0 to# 0 blob Id# [72075186224037888:1:12:0:0:92:1] Marker# BPG32 2025-06-25T15:06:47.954417Z node 24 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [24:425:2368] NKikimr::TEvBlobStorage::TEvVPut# {ID# [72075186224037888:1:12:0:0:92:1] FDS# 92 HandleClass# TabletLog {MsgQoS ExtQueueId# PutTabletLog} DataSize# 0 Data# } cookie# 0 2025-06-25T15:06:47.957940Z node 24 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [8b41b9ac8186ade8] received {EvVPutResult Status# OK ID# [72075186224037888:1:12:0:0:92:1] {MsgQoS MsgId# { SequenceId: 1 MsgId: 18 } Cost# 80724 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 19 }}}} from# [80000000:1:0:0:0] Marker# BPP01 2025-06-25T15:06:47.958118Z node 24 :BS_PROXY_PUT DEBUG: dsproxy_put_impl.cpp:72: [8b41b9ac8186ade8] Result# TEvPutResult {Id# [72075186224037888:1:12:0:0:92:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.998955} GroupId# 2147483648 Marker# BPP12 2025-06-25T15:06:47.958252Z node 24 :BS_PROXY_PUT INFO: dsproxy_put.cpp:486: [8b41b9ac8186ade8] SendReply putResult# TEvPutResult {Id# [72075186224037888:1:12:0:0:92:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.998955} ResponsesSent# 0 PutImpl.Blobs.size# 1 Last# true Marker# BPP21 2025-06-25T15:06:47.958473Z node 24 :BS_PROXY_PUT DEBUG: {BPP72@dsproxy_put.cpp:470} Query history GroupId# 2147483648 HandleClass# TabletLog Tactic# MinLatency History# THistory { Entries# [ TEvVPut{ TimestampMs# 1.076 sample PartId# [72075186224037888:1:12:0:0:92:1] QueryCount# 1 VDiskId# [80000000:1:0:0:0] NodeId# 24 } TEvVPutResult{ TimestampMs# 4.603 VDiskId# [80000000:1:0:0:0] NodeId# 24 Status# OK } ] } 2025-06-25T15:06:47.958709Z node 24 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72075186224037888:1:12:0:0:92:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.998955} 2025-06-25T15:06:47.958925Z node 24 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:13} commited cookie 1 for step 12 2025-06-25T15:06:47.959516Z node 24 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72075186224037888] ::Bootstrap [24:575:2490] 2025-06-25T15:06:47.959586Z node 24 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72075186224037888] lookup [24:575:2490] 2025-06-25T15:06:47.959696Z node 24 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72075186224037888 entry.State: StNormal ev: {EvForward TabletID: 72075186224037888 Ev: nullptr Flags: 1:2:0} 2025-06-25T15:06:47.959785Z node 24 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 24 selfDC leaderDC 1:2:0 local 1 localDc 1 other 0 disallowed 0 tabletId: 72075186224037888 followers: 0 countLeader 1 allowFollowers 0 winner: [24:419:2365] 2025-06-25T15:06:47.959874Z node 24 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72075186224037888] queue send [24:575:2490] 2025-06-25T15:06:47.959943Z node 24 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:411: TClient[72075186224037888] received pending shutdown [24:575:2490] 2025-06-25T15:06:47.960015Z node 24 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:190: TClient[72075186224037888] forward result local node, try to connect [24:575:2490] 2025-06-25T15:06:47.960084Z node 24 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[72075186224037888]::SendEvent [24:575:2490] 2025-06-25T15:06:47.960241Z node 24 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:291: [72075186224037888] Accept Connect Originator# [24:575:2490] 2025-06-25T15:06:47.960471Z node 24 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:310: TClient[72075186224037888] connected with status OK role: Leader [24:575:2490] 2025-06-25T15:06:47.960556Z node 24 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:325: TClient[72075186224037888] send queued [24:575:2490] 2025-06-25T15:06:47.960612Z node 24 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:629: TClient[72075186224037888] push event to server [24:575:2490] 2025-06-25T15:06:47.960685Z node 24 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:332: TClient[72075186224037888] shutdown pipe due to pending shutdown request [24:575:2490] 2025-06-25T15:06:47.960738Z node 24 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:505: TClient[72075186224037888] notify reset [24:575:2490] 2025-06-25T15:06:47.960862Z node 24 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:141: [72075186224037888] HandleSend Sender# [24:574:2489] EventType# 2146435094 2025-06-25T15:06:47.961469Z node 24 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72075186224037888] ::Bootstrap [24:578:2493] 2025-06-25T15:06:47.961531Z node 24 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72075186224037888] lookup [24:578:2493] 2025-06-25T15:06:47.965494Z node 24 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72075186224037888 entry.State: StNormal ev: {EvForward TabletID: 72075186224037888 Ev: nullptr Flags: 1:2:0} 2025-06-25T15:06:47.965585Z node 24 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 24 selfDC leaderDC 1:2:0 local 1 localDc 1 other 0 disallowed 0 tabletId: 72075186224037888 followers: 0 countLeader 1 allowFollowers 0 winner: [24:419:2365] 2025-06-25T15:06:47.965696Z node 24 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72075186224037888] queue send [24:578:2493] 2025-06-25T15:06:47.965767Z node 24 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:411: TClient[72075186224037888] received pending shutdown [24:578:2493] 2025-06-25T15:06:47.965840Z node 24 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:190: TClient[72075186224037888] forward result local node, try to connect [24:578:2493] 2025-06-25T15:06:47.965943Z node 24 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[72075186224037888]::SendEvent [24:578:2493] 2025-06-25T15:06:47.966112Z node 24 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:291: [72075186224037888] Accept Connect Originator# [24:578:2493] 2025-06-25T15:06:47.966334Z node 24 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:310: TClient[72075186224037888] connected with status OK role: Leader [24:578:2493] 2025-06-25T15:06:47.966410Z node 24 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:325: TClient[72075186224037888] send queued [24:578:2493] 2025-06-25T15:06:47.966473Z node 24 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:629: TClient[72075186224037888] push event to server [24:578:2493] 2025-06-25T15:06:47.966552Z node 24 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:332: TClient[72075186224037888] shutdown pipe due to pending shutdown request [24:578:2493] 2025-06-25T15:06:47.966608Z node 24 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:505: TClient[72075186224037888] notify reset [24:578:2493] 2025-06-25T15:06:47.966715Z node 24 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:141: [72075186224037888] HandleSend Sender# [24:577:2492] EventType# 268697642 >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_visibility_change_disables_receive_attempt_id[tables_format_v0-with_change_visibility] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_visibility_change_disables_receive_attempt_id[tables_format_v0-with_delete_message] >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_create_queue_rate[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_message_batch[tables_format_v1-fifo] |94.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_deduplication[tables_format_v1-by_deduplication_id] [GOOD] >> THiveTest::TestHiveNoBalancingWithLowResourceUsage [GOOD] >> THiveTest::TestLockTabletExecution >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_deduplication[tables_format_v1-content_based] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest >> TraverseColumnShard::TraverseColumnTable [GOOD] Test command err: 2025-06-25T15:04:04.358548Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:419:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T15:04:04.358851Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T15:04:04.358940Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001ea7/r3tmp/tmpJMtCT7/pdisk_1.dat 2025-06-25T15:04:04.641330Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 61537, node 1 2025-06-25T15:04:04.854340Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:04:04.854409Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:04:04.854443Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:04:04.855106Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T15:04:04.862313Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:04:04.960033Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:04:04.960150Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:04:04.974840Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:3882 2025-06-25T15:04:05.475929Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-25T15:04:07.724522Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-25T15:04:07.749145Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:04:07.749237Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:04:07.786264Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T15:04:07.788174Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:04:07.975395Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:04:08.010109Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:04:08.010687Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:04:08.011190Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:04:08.011354Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:04:08.011545Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:04:08.011645Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:04:08.011740Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:04:08.011827Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:04:08.011891Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:04:08.195439Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:04:08.195533Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:04:08.208004Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:04:08.336919Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:04:08.371674Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-25T15:04:08.371777Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-25T15:04:08.398939Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-25T15:04:08.399138Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-25T15:04:08.399318Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-25T15:04:08.399385Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-25T15:04:08.399429Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-25T15:04:08.399473Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-25T15:04:08.399515Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-25T15:04:08.399559Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-25T15:04:08.399988Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-25T15:04:08.420614Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7949: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-25T15:04:08.420706Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7979: ConnectToSA(), pipe client id: [2:1793:2562], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-25T15:04:08.427675Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1808:2573] 2025-06-25T15:04:08.433072Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1849:2589] 2025-06-25T15:04:08.433296Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1849:2589], schemeshard id = 72075186224037897 2025-06-25T15:04:08.439942Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-25T15:04:08.453349Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-25T15:04:08.453414Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-25T15:04:08.453469Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-25T15:04:08.463618Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:04:08.469605Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-25T15:04:08.469695Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-25T15:04:08.644093Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-25T15:04:08.777424Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-25T15:04:08.834322Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-25T15:04:09.357510Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:04:09.577267Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2153:3026], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:04:09.577398Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:04:09.592504Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715659:0, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T15:04:09.711511Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2245:2806];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T15:04:09.711680Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2245:2806];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T15:04:09.711908Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2245:2806];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T15:04:09.712000Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2245:2806];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T15:04:09.712091Z node 2 :TX_COLUMNSHARD WARN: ... er/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:06:43.755424Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:8168:6056], DatabaseId: /Root/Database, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976720658 completed, doublechecking } 2025-06-25T15:06:43.942172Z node 2 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [2:8242:6101] txid# 281474976720659, issues: { message: "Check failed: path: \'/Root/Database/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72075186224037897, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:06:44.027075Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [2:8264:6115]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T15:06:44.027409Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-25T15:06:44.027525Z node 2 :STATISTICS DEBUG: service_impl.cpp:1219: ConnectToSA(), pipe client id = [2:8266:6117] 2025-06-25T15:06:44.027615Z node 2 :STATISTICS DEBUG: service_impl.cpp:1248: SyncNode(), pipe client id = [2:8266:6117] 2025-06-25T15:06:44.028058Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:8267:6118] 2025-06-25T15:06:44.028238Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8266:6117], server id = [2:8267:6118], tablet id = 72075186224037894, status = OK 2025-06-25T15:06:44.028324Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:133: [72075186224037894] EvConnectNode, pipe server id = [2:8267:6118], node id = 2, have schemeshards count = 0, need schemeshards count = 1 2025-06-25T15:06:44.028390Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:314: [72075186224037894] SendStatisticsToNode(), node id = 2, schemeshard count = 1 2025-06-25T15:06:44.028532Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-25T15:06:44.028637Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 1, ReplyToActorId = [2:8264:6115], StatRequests.size() = 1 2025-06-25T15:06:44.184907Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=OWE4Y2E2ZGUtY2I1OGZkMmItMjhjZjJiOTctMWZiYTEwYzY=, TxId: 2025-06-25T15:06:44.184990Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=OWE4Y2E2ZGUtY2I1OGZkMmItMjhjZjJiOTctMWZiYTEwYzY=, TxId: 2025-06-25T15:06:44.185547Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-25T15:06:44.210924Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 3] 2025-06-25T15:06:44.210998Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-25T15:06:44.249056Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:217: [72075186224037894] EvFastPropagateCheck 2025-06-25T15:06:44.249144Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:357: [72075186224037894] PropagateFastStatistics(), node count = 0, schemeshard count = 0 2025-06-25T15:06:44.296375Z node 2 :STATISTICS DEBUG: service_impl.cpp:1189: EvRequestTimeout, pipe client id = [2:8266:6117], schemeshard count = 1 2025-06-25T15:06:46.856100Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-25T15:06:46.856159Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-25T15:06:46.856194Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 4] is column table. 2025-06-25T15:06:46.856234Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:732: [72075186224037894] Start schedule traversal navigate for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-25T15:06:46.860346Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-25T15:06:46.883834Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-25T15:06:46.884432Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-25T15:06:46.884527Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-25T15:06:46.885399Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:58: [72075186224037894] TTxResponseTabletDistribution::Execute. Node count = 1 2025-06-25T15:06:46.902098Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:92: [72075186224037894] TTxResponseTabletDistribution::Complete 2025-06-25T15:06:46.902368Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 2, current Round: 0 2025-06-25T15:06:46.903143Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8383:6177], server id = [2:8387:6181], tablet id = 72075186224037899, status = OK 2025-06-25T15:06:46.903553Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8383:6177], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-25T15:06:46.904071Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8384:6178], server id = [2:8388:6182], tablet id = 72075186224037900, status = OK 2025-06-25T15:06:46.904141Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8384:6178], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-25T15:06:46.909557Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8385:6179], server id = [2:8389:6183], tablet id = 72075186224037901, status = OK 2025-06-25T15:06:46.909674Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8385:6179], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-25T15:06:46.910199Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8386:6180], server id = [2:8390:6184], tablet id = 72075186224037902, status = OK 2025-06-25T15:06:46.910268Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8386:6180], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-25T15:06:46.930814Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037899 2025-06-25T15:06:46.931847Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8383:6177], server id = [2:8387:6181], tablet id = 72075186224037899 2025-06-25T15:06:46.931911Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-25T15:06:46.937522Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037900 2025-06-25T15:06:46.938369Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8384:6178], server id = [2:8388:6182], tablet id = 72075186224037900 2025-06-25T15:06:46.938412Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-25T15:06:46.940033Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037901 2025-06-25T15:06:46.945119Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8385:6179], server id = [2:8389:6183], tablet id = 72075186224037901 2025-06-25T15:06:46.945179Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-25T15:06:46.945299Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037902 2025-06-25T15:06:46.945363Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-25T15:06:46.945601Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:27: [72075186224037894] TTxAggregateStatisticsResponse::Execute 2025-06-25T15:06:46.945830Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:118: [72075186224037894] TTxAggregateStatisticsResponse::Complete 2025-06-25T15:06:46.946269Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-25T15:06:46.953272Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8386:6180], server id = [2:8390:6184], tablet id = 72075186224037902 2025-06-25T15:06:46.953334Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-25T15:06:46.954325Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-06-25T15:06:47.004085Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [2:8419:6209]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-25T15:06:47.004224Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-25T15:06:47.004261Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 2, ReplyToActorId = [2:8419:6209], StatRequests.size() = 1 2025-06-25T15:06:47.144688Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=YWZkYzgzYWYtODIyMmYzZDMtOTA1YWM1M2UtY2ZmZGU5ODk=, TxId: 2025-06-25T15:06:47.144774Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=YWZkYzgzYWYtODIyMmYzZDMtOTA1YWM1M2UtY2ZmZGU5ODk=, TxId: ... waiting for NKikimr::NStat::TEvStatistics::TEvSaveStatisticsQueryResponse (done) 2025-06-25T15:06:47.145292Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 3 ], ReplyToActorId[ [2:8432:6215]], StatType[ 2 ], StatRequestsCount[ 1 ] 2025-06-25T15:06:47.145736Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] 2025-06-25T15:06:47.145799Z node 2 :STATISTICS DEBUG: service_impl.cpp:812: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] resolve DatabasePath[ [OwnerId: 72057594046644480, LocalPathId: 2] ] 2025-06-25T15:06:47.146015Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-25T15:06:47.149525Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] 2025-06-25T15:06:47.149593Z node 2 :STATISTICS DEBUG: service_impl.cpp:715: [TStatService::QueryStatistics] RequestId[ 3 ], Database[ Root/Database ], TablePath[ /Root/Database/.metadata/_statistics ] 2025-06-25T15:06:47.149665Z node 2 :STATISTICS DEBUG: service_impl.cpp:656: [TStatService::LoadStatistics] QueryId[ 1 ], PathId[ [OwnerId: 72075186224037897, LocalPathId: 4] ], StatType[ 2 ], ColumnTag[ 1 ] 2025-06-25T15:06:47.154087Z node 2 :STATISTICS DEBUG: service_impl.cpp:1152: TEvLoadStatisticsQueryResponse, request id = 3 >>> failedEstimatesCount = 0 >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_queue_attributes_batch[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_read_dont_stall[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_does_not_change_visibility_not_in_flight[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_does_not_change_visibility_not_in_flight[tables_format_v1-std] >> test_quoting.py::TestSqsQuotingWithKesus::test_properly_creates_and_deletes_queue[tables_format_v1-std] [GOOD] >> KqpLimits::CancelAfterRoTxWithFollowerStreamLookupDepededRead [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delete_message_batch_works[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delete_message_batch_works[tables_format_v1] |94.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_create_queue_by_nonexistent_user_fails[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delay_message_batch[tables_format_v0-fifo] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/query/unittest >> KqpLimits::CancelAfterRoTxWithFollowerStreamLookupDepededRead [GOOD] Test command err: Trying to start YDB, gRPC: 17289, MsgBus: 6660 2025-06-25T15:01:06.531431Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519902185950605377:2227];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:01:06.532364Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0013f0/r3tmp/tmpUkQEMN/pdisk_1.dat 2025-06-25T15:01:07.045377Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:01:07.045461Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:01:07.074092Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:01:07.111526Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 17289, node 1 2025-06-25T15:01:07.209013Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:01:07.209041Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:01:07.209053Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:01:07.209168Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6660 2025-06-25T15:01:07.537043Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:6660 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:01:07.741966Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:01:07.759688Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-25T15:01:07.772469Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:01:09.681456Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902198835508046:2317], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:09.681524Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519902198835508057:2320], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:09.681566Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:01:09.685239Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:01:09.695511Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519902198835508060:2321], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-25T15:01:09.794583Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519902198835508111:2561] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:01:10.171397Z node 1 :KQP_COMPUTE WARN: log.cpp:784: fline=kqp_compute_actor_factory.cpp:41;problem=cannot_allocate_memory;tx_id=281474976710661;task_id=1;memory=1048576; 2025-06-25T15:01:10.171431Z node 1 :KQP_COMPUTE WARN: dq_compute_memory_quota.h:152: TxId: 281474976710661, task: 1. [Mem] memory 1048576 NOT granted 2025-06-25T15:01:10.182025Z node 1 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:678: SelfId: [1:7519902203130475449:2329], TxId: 281474976710661, task: 1. Ctx: { TraceId : 01jyksptdefpsxmpedskbt4s4r. SessionId : ydb://session/3?node_id=1&id=MjQzMGEwZGEtOTc0YjZkYTgtM2NjOWEwYjEtYzNjZTUxMzA=. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. InternalError: OVERLOADED KIKIMR_PRECONDITION_FAILED: {
: Error: Mkql memory limit exceeded, allocated by task 1: 10, host: ghrun-kqfvx6aroe, canAllocateExtraMemory: 1, memory manager details for current node: TxResourcesInfo { TxId: 281474976710661, Database: /Root, PoolId: default, MemoryPoolPercent: 100.00, tx initially granted memory: 20B, tx total memory allocations: 1MiB, tx largest successful memory allocation: 1MiB, tx last failed memory allocation: 1MiB, tx total execution units: 2, started at: 2025-06-25T15:01:10.169012Z }, code: 2029 }. 2025-06-25T15:01:10.188740Z node 1 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [1:7519902203130475450:2330], TxId: 281474976710661, task: 2. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=1&id=MjQzMGEwZGEtOTc0YjZkYTgtM2NjOWEwYjEtYzNjZTUxMzA=. TraceId : 01jyksptdefpsxmpedskbt4s4r. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Handle abort execution event from: [1:7519902203130475438:2315], status: OVERLOADED, reason: {
: Error: Terminate execution } 2025-06-25T15:01:10.204269Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=1&id=MjQzMGEwZGEtOTc0YjZkYTgtM2NjOWEwYjEtYzNjZTUxMzA=, ActorId: [1:7519902198835508020:2315], ActorState: ExecuteState, TraceId: 01jyksptdefpsxmpedskbt4s4r, Create QueryResponse for error on request, msg:
: Error: Mkql memory limit exceeded, allocated by task 1: 10, host: ghrun-kqfvx6aroe, canAllocateExtraMemory: 1, memory manager details for current node: TxResourcesInfo { TxId: 281474976710661, Database: /Root, PoolId: default, MemoryPoolPercent: 100.00, tx initially granted memory: 20B, tx total memory allocations: 1MiB, tx largest successful memory allocation: 1MiB, tx last failed memory allocation: 1MiB, tx total execution units: 2, started at: 2025-06-25T15:01:10.169012Z } , code: 2029 Trying to start YDB, gRPC: 9466, MsgBus: 12399 2025-06-25T15:01:11.120783Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519902207803771297:2077];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0013f0/r3tmp/tmpqO0HHn/pdisk_1.dat 2025-06-25T15:01:11.238508Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T15:01:11.310474Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:01:11.310559Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:01:11.313067Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:01:11.314089Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519902207803771249:2080] 1750863671103481 != 1750863671103484 2025-06-25T15:01:11.335294Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 9466, node 2 2025-06-25T15:01:11.484853Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:01:11.484874Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:01:11.484881Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:01:11.484959Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:12399 TClient is connected to server localhost:12399 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root ... te, TraceId: 01jykt0gjx0p467pxnbkdjhkk6, Create QueryResponse for error on request, msg: 2025-06-25T15:06:28.636825Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=Y2VhNTExNDYtYmM1NTBjMS1kMDFmMTVlOS0xMzBkODY0Zg==, ActorId: [4:7519903172342863958:2473], ActorState: ExecuteState, TraceId: 01jykt0hem9p3ba96d5hsvtjq4, Create QueryResponse for error on request, msg: 2025-06-25T15:06:29.555932Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=Y2VhNTExNDYtYmM1NTBjMS1kMDFmMTVlOS0xMzBkODY0Zg==, ActorId: [4:7519903172342863958:2473], ActorState: ExecuteState, TraceId: 01jykt0jb93wnr5fjy7xmp4trq, Create QueryResponse for error on request, msg: 2025-06-25T15:06:30.029698Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=Y2VhNTExNDYtYmM1NTBjMS1kMDFmMTVlOS0xMzBkODY0Zg==, ActorId: [4:7519903172342863958:2473], ActorState: ExecuteState, TraceId: 01jykt0jt16jspf7d5vs0y2mq5, Create QueryResponse for error on request, msg: 2025-06-25T15:06:30.596413Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=Y2VhNTExNDYtYmM1NTBjMS1kMDFmMTVlOS0xMzBkODY0Zg==, ActorId: [4:7519903172342863958:2473], ActorState: ExecuteState, TraceId: 01jykt0kbp4d43k16ka6kedj1j, Create QueryResponse for error on request, msg: 2025-06-25T15:06:31.504561Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=Y2VhNTExNDYtYmM1NTBjMS1kMDFmMTVlOS0xMzBkODY0Zg==, ActorId: [4:7519903172342863958:2473], ActorState: ExecuteState, TraceId: 01jykt0m80cjfms1a9wp7ephfr, Create QueryResponse for error on request, msg: 2025-06-25T15:06:32.420622Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=Y2VhNTExNDYtYmM1NTBjMS1kMDFmMTVlOS0xMzBkODY0Zg==, ActorId: [4:7519903172342863958:2473], ActorState: ExecuteState, TraceId: 01jykt0n4hfqe8ezfngxy37sj8, Create QueryResponse for error on request, msg: 2025-06-25T15:06:33.309874Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=Y2VhNTExNDYtYmM1NTBjMS1kMDFmMTVlOS0xMzBkODY0Zg==, ActorId: [4:7519903172342863958:2473], ActorState: ExecuteState, TraceId: 01jykt0p0a3tz480b4v8jsqw42, Create QueryResponse for error on request, msg: 2025-06-25T15:06:34.207202Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=Y2VhNTExNDYtYmM1NTBjMS1kMDFmMTVlOS0xMzBkODY0Zg==, ActorId: [4:7519903172342863958:2473], ActorState: ExecuteState, TraceId: 01jykt0pw9a7fhdvfm34126qr5, Create QueryResponse for error on request, msg: 2025-06-25T15:06:35.148144Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=Y2VhNTExNDYtYmM1NTBjMS1kMDFmMTVlOS0xMzBkODY0Zg==, ActorId: [4:7519903172342863958:2473], ActorState: ExecuteState, TraceId: 01jykt0qsm1a8vxy8jttbkntry, Create QueryResponse for error on request, msg: 2025-06-25T15:06:35.631253Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=Y2VhNTExNDYtYmM1NTBjMS1kMDFmMTVlOS0xMzBkODY0Zg==, ActorId: [4:7519903172342863958:2473], ActorState: ExecuteState, TraceId: 01jykt0r8p324e338pxvv4r8qq, Create QueryResponse for error on request, msg: 2025-06-25T15:06:36.157605Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=Y2VhNTExNDYtYmM1NTBjMS1kMDFmMTVlOS0xMzBkODY0Zg==, ActorId: [4:7519903172342863958:2473], ActorState: ExecuteState, TraceId: 01jykt0rs3f21gv5enckgznw46, Create QueryResponse for error on request, msg: 2025-06-25T15:06:36.649678Z node 4 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [4:7519903606134566590:2473] TxId: 281474976715950. Ctx: { TraceId: 01jykt0s8ccv75ewg8myk3b7z3, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=Y2VhNTExNDYtYmM1NTBjMS1kMDFmMTVlOS0xMzBkODY0Zg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. CANCELLED: [ {
: Error: Request canceled after 475ms } {
: Error: Cancelling after 476ms during execution } ] 2025-06-25T15:06:36.649809Z node 4 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [4:7519903606134566597:4458], TxId: 281474976715950, task: 2. Ctx: { TraceId : 01jykt0s8ccv75ewg8myk3b7z3. SessionId : ydb://session/3?node_id=4&id=Y2VhNTExNDYtYmM1NTBjMS1kMDFmMTVlOS0xMzBkODY0Zg==. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Handle abort execution event from: [4:7519903606134566590:2473], status: CANCELLED, reason: {
: Error: Terminate execution } 2025-06-25T15:06:36.650078Z node 4 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [4:7519903606134566598:4459], TxId: 281474976715950, task: 3. Ctx: { TraceId : 01jykt0s8ccv75ewg8myk3b7z3. SessionId : ydb://session/3?node_id=4&id=Y2VhNTExNDYtYmM1NTBjMS1kMDFmMTVlOS0xMzBkODY0Zg==. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Handle abort execution event from: [4:7519903606134566590:2473], status: CANCELLED, reason: {
: Error: Terminate execution } 2025-06-25T15:06:36.650480Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=Y2VhNTExNDYtYmM1NTBjMS1kMDFmMTVlOS0xMzBkODY0Zg==, ActorId: [4:7519903172342863958:2473], ActorState: ExecuteState, TraceId: 01jykt0s8ccv75ewg8myk3b7z3, Create QueryResponse for error on request, msg: 2025-06-25T15:06:37.143165Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=Y2VhNTExNDYtYmM1NTBjMS1kMDFmMTVlOS0xMzBkODY0Zg==, ActorId: [4:7519903172342863958:2473], ActorState: ExecuteState, TraceId: 01jykt0sqp5h370mhzaf838wjb, Create QueryResponse for error on request, msg: 2025-06-25T15:06:38.092394Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=Y2VhNTExNDYtYmM1NTBjMS1kMDFmMTVlOS0xMzBkODY0Zg==, ActorId: [4:7519903172342863958:2473], ActorState: ExecuteState, TraceId: 01jykt0tnd2xnrdzk8q0yzan97, Create QueryResponse for error on request, msg: 2025-06-25T15:06:39.037118Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=Y2VhNTExNDYtYmM1NTBjMS1kMDFmMTVlOS0xMzBkODY0Zg==, ActorId: [4:7519903172342863958:2473], ActorState: ExecuteState, TraceId: 01jykt0vjwbt9gr6fkg0anmbnq, Create QueryResponse for error on request, msg: 2025-06-25T15:06:39.977084Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=Y2VhNTExNDYtYmM1NTBjMS1kMDFmMTVlOS0xMzBkODY0Zg==, ActorId: [4:7519903172342863958:2473], ActorState: ExecuteState, TraceId: 01jykt0wg50hek2rrdtbskrc0g, Create QueryResponse for error on request, msg: 2025-06-25T15:06:40.475827Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=Y2VhNTExNDYtYmM1NTBjMS1kMDFmMTVlOS0xMzBkODY0Zg==, ActorId: [4:7519903172342863958:2473], ActorState: ExecuteState, TraceId: 01jykt0wzk5cy84ejzcxhctdb5, Create QueryResponse for error on request, msg: 2025-06-25T15:06:40.984277Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=Y2VhNTExNDYtYmM1NTBjMS1kMDFmMTVlOS0xMzBkODY0Zg==, ActorId: [4:7519903172342863958:2473], ActorState: ExecuteState, TraceId: 01jykt0xfb45bzva2zqefv8xvw, Create QueryResponse for error on request, msg: 2025-06-25T15:06:41.489021Z node 4 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [4:7519903627609403242:2473] TxId: 0. Ctx: { TraceId: 01jykt0xz9b62vjq6b3j5byrq3, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=Y2VhNTExNDYtYmM1NTBjMS1kMDFmMTVlOS0xMzBkODY0Zg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. CANCELLED: [ {
: Error: Request canceled after 485ms } {
: Error: Cancelling after 486ms during execution } ] 2025-06-25T15:06:41.489239Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=Y2VhNTExNDYtYmM1NTBjMS1kMDFmMTVlOS0xMzBkODY0Zg==, ActorId: [4:7519903172342863958:2473], ActorState: ExecuteState, TraceId: 01jykt0xz9b62vjq6b3j5byrq3, Create QueryResponse for error on request, msg: 2025-06-25T15:06:41.992593Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=Y2VhNTExNDYtYmM1NTBjMS1kMDFmMTVlOS0xMzBkODY0Zg==, ActorId: [4:7519903172342863958:2473], ActorState: ExecuteState, TraceId: 01jykt0yeycq061fhhknz495hc, Create QueryResponse for error on request, msg: 2025-06-25T15:06:42.496202Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=Y2VhNTExNDYtYmM1NTBjMS1kMDFmMTVlOS0xMzBkODY0Zg==, ActorId: [4:7519903172342863958:2473], ActorState: ExecuteState, TraceId: 01jykt0yys70dgn42176mgy9ca, Create QueryResponse for error on request, msg: 2025-06-25T15:06:43.056465Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=Y2VhNTExNDYtYmM1NTBjMS1kMDFmMTVlOS0xMzBkODY0Zg==, ActorId: [4:7519903172342863958:2473], ActorState: ExecuteState, TraceId: 01jykt0zg68enbj2xk21wfjagc, Create QueryResponse for error on request, msg: 2025-06-25T15:06:43.564326Z node 4 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [4:7519903636199337900:2473] TxId: 281474976715963. Ctx: { TraceId: 01jykt1001afeamxn7je7gmqqd, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=Y2VhNTExNDYtYmM1NTBjMS1kMDFmMTVlOS0xMzBkODY0Zg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. CANCELLED: [ {
: Error: Request canceled after 490ms } {
: Error: Cancelling after 490ms during execution } ] 2025-06-25T15:06:43.564645Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=Y2VhNTExNDYtYmM1NTBjMS1kMDFmMTVlOS0xMzBkODY0Zg==, ActorId: [4:7519903172342863958:2473], ActorState: ExecuteState, TraceId: 01jykt1001afeamxn7je7gmqqd, Create QueryResponse for error on request, msg: 2025-06-25T15:06:44.075879Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=Y2VhNTExNDYtYmM1NTBjMS1kMDFmMTVlOS0xMzBkODY0Zg==, ActorId: [4:7519903172342863958:2473], ActorState: ExecuteState, TraceId: 01jykt10fs9ejj9nvb3zncn48k, Create QueryResponse for error on request, msg: 2025-06-25T15:06:44.984581Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=Y2VhNTExNDYtYmM1NTBjMS1kMDFmMTVlOS0xMzBkODY0Zg==, ActorId: [4:7519903172342863958:2473], ActorState: ExecuteState, TraceId: 01jykt11c88rav67kqq7bw9qxs, Create QueryResponse for error on request, msg: 2025-06-25T15:06:45.492931Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=Y2VhNTExNDYtYmM1NTBjMS1kMDFmMTVlOS0xMzBkODY0Zg==, ActorId: [4:7519903172342863958:2473], ActorState: ExecuteState, TraceId: 01jykt11w6d3mr39ahfcqqtr5e, Create QueryResponse for error on request, msg: 2025-06-25T15:06:46.095403Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=Y2VhNTExNDYtYmM1NTBjMS1kMDFmMTVlOS0xMzBkODY0Zg==, ActorId: [4:7519903172342863958:2473], ActorState: ExecuteState, TraceId: 01jykt12exdvr75psg0svr7wxf, Create QueryResponse for error on request, msg: 2025-06-25T15:06:47.053069Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=Y2VhNTExNDYtYmM1NTBjMS1kMDFmMTVlOS0xMzBkODY0Zg==, ActorId: [4:7519903172342863958:2473], ActorState: ExecuteState, TraceId: 01jykt13cs5wf02x8mn4sc926a, Create QueryResponse for error on request, msg: 2025-06-25T15:06:48.019080Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=4&id=Y2VhNTExNDYtYmM1NTBjMS1kMDFmMTVlOS0xMzBkODY0Zg==, ActorId: [4:7519903172342863958:2473], ActorState: ExecuteState, TraceId: 01jykt14asae3tgd15kbk90g6c, Create QueryResponse for error on request, msg: >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_message[tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_message[tables_format_v1-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_read_dont_stall[tables_format_v0] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_visibility_timeout_works[tables_format_v1] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_write_and_read_to_different_groups[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_message_batch[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_message_batch[tables_format_v1-std] >> THiveTest::TestLockTabletExecution [GOOD] >> THiveTest::TestLockTabletExecutionBadOwner >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_visibility_change_disables_receive_attempt_id[tables_format_v0-with_delete_message] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_message[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_message[tables_format_v1-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_does_not_change_visibility_not_in_flight[tables_format_v1-std] [GOOD] |94.2%| [TA] $(B)/ydb/core/kqp/ut/query/test-results/unittest/{meta.json ... results_accumulator.log} |94.2%| [TA] {RESULT} $(B)/ydb/core/kqp/ut/query/test-results/unittest/{meta.json ... results_accumulator.log} >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_multi_read_dont_stall[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_partial_delete_works[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_message[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_message_batch[tables_format_v0-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_message_batch[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_to_nonexistent_queue[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_set_very_big_visibility_timeout[tables_format_v1] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_can_read_from_different_groups[tables_format_v0] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_deduplication[tables_format_v1-content_based] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_delete_message_works[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_validates_message_attribute_value[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delete_message_works[tables_format_v0] [GOOD] |94.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delete_message_works[tables_format_v1] >> THiveTest::TestLockTabletExecutionBadOwner [GOOD] >> THiveTest::TestLockTabletExecutionDelete >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delete_message_batch_works[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delete_message_batch_deduplicates_receipt_handle[tables_format_v0-std] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_write_and_read_to_different_groups[tables_format_v0] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_write_and_read_to_different_groups[tables_format_v1] >> TColumnShardTestReadWrite::ReadGroupBy [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_to_nonexistent_queue[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_to_nonexistent_queue[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_validates_message_attribute_value[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_validates_message_attribute_value[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_to_nonexistent_queue[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_set_very_big_visibility_timeout[tables_format_v0] |94.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_validates_message_attributes[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_invalid_queue_url[tables_format_v0] [GOOD] >> THiveTest::TestLockTabletExecutionDelete [GOOD] >> THiveTest::TestLockTabletExecutionDeleteReboot >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_validates_message_attribute_value[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_validates_message_attributes[tables_format_v0] >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_other_requests_rate[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_invalid_queue_url[tables_format_v1] |94.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_does_not_create_kesus [GOOD] >> test_polling.py::TestSqsPolling::test_receive_message_with_polling[tables_format_v0-long_polling-fifo] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_write_and_read_to_different_groups[tables_format_v1] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_write_read_delete_many_groups[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_invalid_queue_url[tables_format_v1] [GOOD] >> test_quoting.py::TestSqsQuotingWithKesus::test_creates_quoter >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_list_queues_of_nonexistent_user[tables_format_v0] >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_create_queue_rate[tables_format_v1] [GOOD] |94.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> test_quoting.py::TestSqsQuotingWithKesus::test_properly_creates_and_deletes_queue[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_list_queues_of_nonexistent_user[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_list_queues_of_nonexistent_user[tables_format_v1] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::ReadGroupBy [GOOD] Test command err: FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=8328;columns=19; -- group by key: 0 2025-06-25T15:05:19.594358Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:99;event=initialize_shard;step=OnActivateExecutor; 2025-06-25T15:05:19.620236Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:117;event=initialize_shard;step=initialize_tiring_finished; 2025-06-25T15:05:19.620492Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-25T15:05:19.626792Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T15:05:19.626996Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T15:05:19.627202Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T15:05:19.627318Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T15:05:19.627409Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T15:05:19.627519Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T15:05:19.627616Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T15:05:19.627701Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T15:05:19.627791Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T15:05:19.627929Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T15:05:19.628047Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T15:05:19.652179Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-25T15:05:19.652291Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-25T15:05:19.652346Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-25T15:05:19.652468Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:05:19.652578Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-25T15:05:19.652626Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-25T15:05:19.652661Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-25T15:05:19.652716Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-25T15:05:19.652756Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-25T15:05:19.652782Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-25T15:05:19.652809Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-25T15:05:19.652927Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:05:19.652984Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-25T15:05:19.653022Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-25T15:05:19.653048Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-25T15:05:19.653147Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-25T15:05:19.653201Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-25T15:05:19.653266Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-25T15:05:19.653295Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-25T15:05:19.653349Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-25T15:05:19.653381Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-25T15:05:19.653408Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-25T15:05:19.653630Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-25T15:05:19.653675Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-25T15:05:19.653705Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-25T15:05:19.653898Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-25T15:05:19.653947Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-25T15:05:19.653974Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-25T15:05:19.654088Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-25T15:05:19.654133Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-25T15:05:19.654160Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-25T15:05:19.654275Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-25T15:05:19.654331Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-25T15:05:19.654382Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-25T15:05:19.654409Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-25T15:05:19.654600Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=39; 2025-06-25T15:05:19.654697Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=47; 2025-06-25T15:05:19.654780Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=43; 2025-06-25T15:05:19.654858Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=41; 2025-06-25T15:05:19.654936Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-25T15:05:19.655004Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-25T15:05:19.655040Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Exec ... anGen=0;task_identifier=;fline=scanner.cpp:21;event=interval_result_received;interval_idx=0;intervalId=2052; 2025-06-25T15:06:54.318149Z node 54 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[54:418:2430];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=scanner.cpp:47;event=interval_result;interval_idx=0;count=1;merger=0;interval_id=2052; 2025-06-25T15:06:54.318188Z node 54 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[54:418:2430];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=scanner.cpp:65;event=intervals_finished; 2025-06-25T15:06:54.318287Z node 54 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[54:418:2430];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=4;column_names=i32;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=i16,i32,i8,ts;);;ff=(column_ids=4,17,18,19;column_names=i32,json,jsondoc,yson;);;program_input=(column_ids=4,17,18,19;column_names=i32,json,jsondoc,yson;);;;); 2025-06-25T15:06:54.318322Z node 54 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[54:418:2430];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=1;count=1;finished=1; 2025-06-25T15:06:54.318360Z node 54 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[54:418:2430];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:203;stage=limit exhausted;limit=limits:(bytes=0;chunks=0);; 2025-06-25T15:06:54.318911Z node 54 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[54:418:2430];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-25T15:06:54.319060Z node 54 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[54:418:2430];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:1;records_count:1;schema=100: binary 101: binary 102: binary 103: uint64;);indexed_data:(ef=(column_ids=4;column_names=i32;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=i16,i32,i8,ts;);;ff=(column_ids=4,17,18,19;column_names=i32,json,jsondoc,yson;);;program_input=(column_ids=4,17,18,19;column_names=i32,json,jsondoc,yson;);;;); 2025-06-25T15:06:54.319100Z node 54 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[54:418:2430];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=0;count=0;finished=1; 2025-06-25T15:06:54.319196Z node 54 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[54:418:2430];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:234;stage=ready result;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=4;column_names=i32;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=i16,i32,i8,ts;);;ff=(column_ids=4,17,18,19;column_names=i32,json,jsondoc,yson;);;program_input=(column_ids=4,17,18,19;column_names=i32,json,jsondoc,yson;);;;);columns=4;rows=1; 2025-06-25T15:06:54.319259Z node 54 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[54:418:2430];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:254;stage=data_format;batch_size=26;num_rows=1;batch_columns=100,101,102,103; 2025-06-25T15:06:54.319500Z node 54 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[54:418:2430];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:370;event=send_data;compute_actor_id=[54:417:2429];bytes=26;rows=1;faults=0;finished=0;fault=0;schema=100: binary 101: binary 102: binary 103: uint64; 2025-06-25T15:06:54.319618Z node 54 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[54:418:2430];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:274;stage=finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=4;column_names=i32;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=i16,i32,i8,ts;);;ff=(column_ids=4,17,18,19;column_names=i32,json,jsondoc,yson;);;program_input=(column_ids=4,17,18,19;column_names=i32,json,jsondoc,yson;);;;); 2025-06-25T15:06:54.319754Z node 54 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[54:418:2430];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=4;column_names=i32;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=i16,i32,i8,ts;);;ff=(column_ids=4,17,18,19;column_names=i32,json,jsondoc,yson;);;program_input=(column_ids=4,17,18,19;column_names=i32,json,jsondoc,yson;);;;); 2025-06-25T15:06:54.319854Z node 54 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[54:418:2430];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=4;column_names=i32;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=i16,i32,i8,ts;);;ff=(column_ids=4,17,18,19;column_names=i32,json,jsondoc,yson;);;program_input=(column_ids=4,17,18,19;column_names=i32,json,jsondoc,yson;);;;); 2025-06-25T15:06:54.320114Z node 54 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[54:418:2430];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-25T15:06:54.320215Z node 54 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[54:418:2430];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=4;column_names=i32;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=i16,i32,i8,ts;);;ff=(column_ids=4,17,18,19;column_names=i32,json,jsondoc,yson;);;program_input=(column_ids=4,17,18,19;column_names=i32,json,jsondoc,yson;);;;); 2025-06-25T15:06:54.320305Z node 54 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[54:418:2430];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=4;column_names=i32;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=i16,i32,i8,ts;);;ff=(column_ids=4,17,18,19;column_names=i32,json,jsondoc,yson;);;program_input=(column_ids=4,17,18,19;column_names=i32,json,jsondoc,yson;);;;); 2025-06-25T15:06:54.320361Z node 54 :TX_COLUMNSHARD_SCAN DEBUG: actor.cpp:414: Scan [54:418:2430] finished for tablet 9437184 2025-06-25T15:06:54.320807Z node 54 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[54:418:2430];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=actor.cpp:420;event=scan_finish;compute_actor_id=[54:417:2429];stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.001},{"events":["l_bootstrap"],"t":0.002},{"events":["f_processing","f_task_result"],"t":0.004},{"events":["f_ack","l_task_result"],"t":0.013},{"events":["l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.015}],"full":{"a":1750864014305256,"name":"_full_task","f":1750864014305256,"d_finished":0,"c":0,"l":1750864014320408,"d":15152},"events":[{"name":"bootstrap","f":1750864014305467,"d_finished":2723,"c":1,"l":1750864014308190,"d":2723},{"a":1750864014320093,"name":"ack","f":1750864014318887,"d_finished":990,"c":1,"l":1750864014319877,"d":1305},{"a":1750864014320080,"name":"processing","f":1750864014309329,"d_finished":7448,"c":10,"l":1750864014319879,"d":7776},{"name":"ProduceResults","f":1750864014307074,"d_finished":2736,"c":13,"l":1750864014320346,"d":2736},{"a":1750864014320350,"name":"Finish","f":1750864014320350,"d_finished":0,"c":0,"l":1750864014320408,"d":58},{"name":"task_result","f":1750864014309346,"d_finished":6311,"c":9,"l":1750864014318446,"d":6311}],"id":"9437184::2052"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=4;column_names=i32;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=i16,i32,i8,ts;);;ff=(column_ids=4,17,18,19;column_names=i32,json,jsondoc,yson;);;program_input=(column_ids=4,17,18,19;column_names=i32,json,jsondoc,yson;);;;); 2025-06-25T15:06:54.320870Z node 54 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[54:418:2430];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=actor.cpp:370;event=send_data;compute_actor_id=[54:417:2429];bytes=0;rows=0;faults=0;finished=1;fault=0;schema=; 2025-06-25T15:06:54.321264Z node 54 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[54:418:2430];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=actor.cpp:375;event=scan_finished;compute_actor_id=[54:417:2429];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.001},{"events":["l_bootstrap"],"t":0.002},{"events":["f_processing","f_task_result"],"t":0.004},{"events":["f_ack","l_task_result"],"t":0.013},{"events":["l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.015}],"full":{"a":1750864014305256,"name":"_full_task","f":1750864014305256,"d_finished":0,"c":0,"l":1750864014320905,"d":15649},"events":[{"name":"bootstrap","f":1750864014305467,"d_finished":2723,"c":1,"l":1750864014308190,"d":2723},{"a":1750864014320093,"name":"ack","f":1750864014318887,"d_finished":990,"c":1,"l":1750864014319877,"d":1802},{"a":1750864014320080,"name":"processing","f":1750864014309329,"d_finished":7448,"c":10,"l":1750864014319879,"d":8273},{"name":"ProduceResults","f":1750864014307074,"d_finished":2736,"c":13,"l":1750864014320346,"d":2736},{"a":1750864014320350,"name":"Finish","f":1750864014320350,"d_finished":0,"c":0,"l":1750864014320905,"d":555},{"name":"task_result","f":1750864014309346,"d_finished":6311,"c":9,"l":1750864014318446,"d":6311}],"id":"9437184::2052"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=4;column_names=i32;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=i16,i32,i8,ts;);;ff=(column_ids=4,17,18,19;column_names=i32,json,jsondoc,yson;);;program_input=(column_ids=4,17,18,19;column_names=i32,json,jsondoc,yson;);;;); 2025-06-25T15:06:54.321331Z node 54 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[54:418:2430];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-06-25T15:06:54.304720Z;index_granules=0;index_portions=1;index_batches=2;schema_columns=4;filter_columns=0;additional_columns=0;compacted_portions_bytes=0;inserted_portions_bytes=14056;committed_portions_bytes=0;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=14056;selected_rows=0; 2025-06-25T15:06:54.321366Z node 54 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[54:418:2430];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=read_context.h:192;event=scan_aborted;reason=unexpected on destructor; 2025-06-25T15:06:54.321658Z node 54 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[54:418:2430];TabletId=9437184;ScanId=0;TxId=100;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=4;column_names=i32;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=i16,i32,i8,ts;);;ff=(column_ids=4,17,18,19;column_names=i32,json,jsondoc,yson;);;program_input=(column_ids=4,17,18,19;column_names=i32,json,jsondoc,yson;);;; >> THiveTest::TestDrainWithMaxTabletsScheduled [GOOD] >> THiveTest::TestDownAfterDrain >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_set_very_big_visibility_timeout[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_list_queues_of_nonexistent_user[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_validates_message_attributes[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_validates_message_attributes[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_multi_read_dont_stall[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delete_message_works[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_does_not_change_visibility_for_deleted_message[tables_format_v0-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delay_message_batch[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delay_message_batch[tables_format_v0-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_visibility_timeout_expires_on_wait_timeout[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_visibility_timeout_works[tables_format_v0] >> THiveTest::TestLockTabletExecutionDeleteReboot [GOOD] >> THiveTest::TestLockTabletExecutionBadUnlock >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_validates_message_attributes[tables_format_v1] [GOOD] >> test_quoting.py::TestSqsQuotingWithKesus::test_properly_creates_and_deletes_queue[tables_format_v0-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_does_not_change_visibility_for_deleted_message[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_does_not_change_visibility_for_deleted_message[tables_format_v0-std] >> THiveTest::TestFollowersCrossDC_MovingLeader [GOOD] >> THiveTest::TestFollowersCrossDC_KillingHiveAndFollower >> THiveTest::TestLockTabletExecutionBadUnlock [GOOD] >> THiveTest::TestLockTabletExecutionGoodUnlock >> TSchemeShardServerLess::StorageBillingLabels [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_message_batch[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_message_batch[tables_format_v1-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_message_batch[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_message_batch[tables_format_v0-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_queue_attributes[tables_format_v1-fifo] [GOOD] >> THiveTest::TestDownAfterDrain [GOOD] >> THiveTest::TestCreateTabletsWithRaceForStoragePoolsKIKIMR_9659 >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_queue_attributes[tables_format_v1-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_visibility_to_zero_works[tables_format_v1-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_create_q_twice[tables_format_v0-fifo] |94.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_wrong_delete_fails[tables_format_v1] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_serverless/unittest >> TSchemeShardServerLess::StorageBillingLabels [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T15:05:46.612118Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T15:05:46.612185Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T15:05:46.612213Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T15:05:46.612235Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T15:05:46.612264Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T15:05:46.612282Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T15:05:46.612329Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T15:05:46.612384Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T15:05:46.612844Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T15:05:46.613142Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T15:05:46.662967Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:05:46.663033Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:05:46.674106Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T15:05:46.674335Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T15:05:46.674478Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T15:05:46.678345Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T15:05:46.678524Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T15:05:46.678914Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T15:05:46.679079Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T15:05:46.681388Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T15:05:46.681573Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T15:05:46.682312Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T15:05:46.682364Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T15:05:46.682463Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T15:05:46.682514Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T15:05:46.682561Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T15:05:46.682668Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T15:05:46.687604Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T15:05:46.779655Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T15:05:46.779851Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:05:46.779995Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T15:05:46.780023Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T15:05:46.780182Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T15:05:46.780234Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:05:46.781905Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T15:05:46.782027Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T15:05:46.782154Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:05:46.782195Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T15:05:46.782224Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T15:05:46.782259Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T15:05:46.783474Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:05:46.783511Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T15:05:46.783536Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T15:05:46.784533Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:05:46.784568Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:05:46.784638Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T15:05:46.784668Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T15:05:46.786905Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T15:05:46.788138Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T15:05:46.788371Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T15:05:46.789163Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T15:05:46.789247Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T15:05:46.789278Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T15:05:46.789503Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T15:05:46.789543Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T15:05:46.789683Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T15:05:46.789750Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T15:05:46.791440Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T15:05:46.791473Z node 1 :FLAT_TX_SCHEMESHARD ... ributesVersion: 2, at schemeshard: 72075186233409549 2025-06-25T15:05:47.053071Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:590: Cannot publish paths for unknown operation id#0 2025-06-25T15:05:47.053443Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 5 PathOwnerId: 72057594046678944, cookie: 105 2025-06-25T15:05:47.053497Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 5 PathOwnerId: 72057594046678944, cookie: 105 2025-06-25T15:05:47.053521Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 105 2025-06-25T15:05:47.053554Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 105, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 5 2025-06-25T15:05:47.053582Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 5 2025-06-25T15:05:47.053642Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 105, subscribers: 0 2025-06-25T15:05:47.055296Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72075186233409549 2025-06-25T15:05:47.055320Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72075186233409549, txId: 0, path id: [OwnerId: 72075186233409549, LocalPathId: 1] 2025-06-25T15:05:47.055415Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72075186233409549 2025-06-25T15:05:47.055441Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:674:2584], at schemeshard: 72075186233409549, txId: 0, path id: 1 2025-06-25T15:05:47.055556Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5893: Handle TEvSyncTenantSchemeShard, at schemeshard: 72057594046678944, msg: DomainSchemeShard: 72057594046678944 DomainPathId: 3 TabletID: 72075186233409549 Generation: 2 EffectiveACLVersion: 0 SubdomainVersion: 2 UserAttributesVersion: 2 TenantHive: 18446744073709551615 TenantSysViewProcessor: 18446744073709551615 TenantRootACL: "" TenantStatisticsAggregator: 18446744073709551615 TenantGraphShard: 18446744073709551615 2025-06-25T15:05:47.055630Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__sync_update_tenants.cpp:26: TTxSyncTenant DoExecute, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-06-25T15:05:47.055687Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:569: DoUpdateTenant no hasChanges, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], tenantLink: TSubDomainsLinks::TLink { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 3], Generation: 2, ActorId:[1:574:2511], EffectiveACLVersion: 0, SubdomainVersion: 2, UserAttributesVersion: 2, TenantHive: 18446744073709551615, TenantSysViewProcessor: 18446744073709551615, TenantStatisticsAggregator: 18446744073709551615, TenantGraphShard: 18446744073709551615, TenantRootACL: }, subDomain->GetVersion(): 2, actualEffectiveACLVersion: 0, actualUserAttrsVersion: 2, tenantHive: 18446744073709551615, tenantSysViewProcessor: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-25T15:05:47.056284Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72075186233409549, msg: Owner: 72075186233409549 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72075186233409549, cookie: 0 2025-06-25T15:05:47.056399Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 105 2025-06-25T15:05:47.056442Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__sync_update_tenants.cpp:36: TTxSyncTenant DoComplete, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 TestModificationResult got TxId: 105, wait until txId: 105 TestWaitNotification wait txId: 105 2025-06-25T15:05:47.056594Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 105: send EvNotifyTxCompletion 2025-06-25T15:05:47.056619Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 105 2025-06-25T15:05:47.056902Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 105, at schemeshard: 72057594046678944 2025-06-25T15:05:47.056950Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 105: got EvNotifyTxCompletionResult 2025-06-25T15:05:47.056975Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 105: satisfy waiter [1:757:2647] TestWaitNotification: OK eventTxId 105 ... waiting for metering 2025-06-25T15:05:51.440648Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7382: Cannot get console configs 2025-06-25T15:05:51.440749Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:05:51.494055Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7382: Cannot get console configs 2025-06-25T15:05:51.494128Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:05:51.536357Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7382: Cannot get console configs 2025-06-25T15:05:51.536430Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:06:08.884488Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T15:06:08.884654Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:90: TTxServerlessStorageBilling: initiate at first time, schemeshardId: 72075186233409549, domainId: [OwnerId: 72057594046678944, LocalPathId: 3], now: 1970-01-01T00:01:00.000000Z, set LastBillTime: 1970-01-01T00:01:00.000000Z, next retry at: 1970-01-01T00:02:00.000000Z 2025-06-25T15:06:08.887545Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T15:06:08.981588Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6716: Handle: TEvRunConditionalErase, at schemeshard: 72057594046678944 2025-06-25T15:06:08.981724Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:56: TTxRunConditionalErase DoExecute: at schemeshard: 72057594046678944 2025-06-25T15:06:08.981797Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:189: TTxRunConditionalErase DoComplete: at schemeshard: 72057594046678944 2025-06-25T15:06:09.055476Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6716: Handle: TEvRunConditionalErase, at schemeshard: 72075186233409546 2025-06-25T15:06:09.055572Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:56: TTxRunConditionalErase DoExecute: at schemeshard: 72075186233409546 2025-06-25T15:06:09.055616Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:189: TTxRunConditionalErase DoComplete: at schemeshard: 72075186233409546 2025-06-25T15:06:09.097598Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6716: Handle: TEvRunConditionalErase, at schemeshard: 72075186233409549 2025-06-25T15:06:09.097680Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:56: TTxRunConditionalErase DoExecute: at schemeshard: 72075186233409549 2025-06-25T15:06:09.097741Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:189: TTxRunConditionalErase DoComplete: at schemeshard: 72075186233409549 2025-06-25T15:06:33.535014Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T15:06:33.535178Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:121: TTxServerlessStorageBilling: too soon call, wait until current period ends, schemeshardId: 72075186233409549, domainId: [OwnerId: 72057594046678944, LocalPathId: 3], now: 1970-01-01T00:02:00.000000Z, LastBillTime: 1970-01-01T00:01:00.000000Z, lastBilled: 1970-01-01T00:01:00.000000Z--1970-01-01T00:01:59.000000Z, toBill: 1970-01-01T00:01:00.000000Z--1970-01-01T00:01:59.000000Z, next retry at: 1970-01-01T00:03:00.000000Z 2025-06-25T15:06:33.535249Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T15:06:33.620737Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6716: Handle: TEvRunConditionalErase, at schemeshard: 72057594046678944 2025-06-25T15:06:33.620832Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:56: TTxRunConditionalErase DoExecute: at schemeshard: 72057594046678944 2025-06-25T15:06:33.620883Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:189: TTxRunConditionalErase DoComplete: at schemeshard: 72057594046678944 2025-06-25T15:06:33.690383Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6716: Handle: TEvRunConditionalErase, at schemeshard: 72075186233409546 2025-06-25T15:06:33.690502Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:56: TTxRunConditionalErase DoExecute: at schemeshard: 72075186233409546 2025-06-25T15:06:33.690567Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:189: TTxRunConditionalErase DoComplete: at schemeshard: 72075186233409546 2025-06-25T15:06:33.749634Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6716: Handle: TEvRunConditionalErase, at schemeshard: 72075186233409549 2025-06-25T15:06:33.749750Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:56: TTxRunConditionalErase DoExecute: at schemeshard: 72075186233409549 2025-06-25T15:06:33.749829Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:189: TTxRunConditionalErase DoComplete: at schemeshard: 72075186233409549 2025-06-25T15:06:59.003069Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T15:06:59.003543Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:191: TTxServerlessStorageBilling: make a bill, record: '{"usage":{"start":120,"quantity":59,"finish":179,"type":"delta","unit":"byte*second"},"tags":{"ydb_size":0},"id":"72057594046678944-3-120-179-0","cloud_id":"CLOUD_ID_VAL","source_wt":180,"source_id":"sless-docapi-ydb-storage","resource_id":"DATABASE_ID_VAL","schema":"ydb.serverless.v1","labels":{"k":"v"},"folder_id":"FOLDER_ID_VAL","version":"1.0.0"} ', schemeshardId: 72075186233409549, domainId: [OwnerId: 72057594046678944, LocalPathId: 3], now: 1970-01-01T00:03:00.000000Z, LastBillTime: 1970-01-01T00:01:00.000000Z, lastBilled: 1970-01-01T00:01:00.000000Z--1970-01-01T00:01:59.000000Z, toBill: 1970-01-01T00:02:00.000000Z--1970-01-01T00:02:59.000000Z, next retry at: 1970-01-01T00:04:00.000000Z 2025-06-25T15:06:59.007284Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete ... blocking NKikimr::NMetering::TEvMetering::TEvWriteMeteringJson from FLAT_SCHEMESHARD_ACTOR to TFakeMetering cookie 0 ... waiting for metering (done) >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_does_not_change_visibility_for_deleted_message[tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_does_not_change_visibility_for_deleted_message[tables_format_v1-fifo] |94.2%| [TA] $(B)/ydb/core/tx/schemeshard/ut_serverless/test-results/unittest/{meta.json ... results_accumulator.log} |94.2%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_serverless/test-results/unittest/{meta.json ... results_accumulator.log} >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_queue_attributes[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_queue_attributes_batch[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_message_batch[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_to_nonexistent_queue[tables_format_v0] >> THiveTest::TestLockTabletExecutionGoodUnlock [GOOD] >> THiveTest::TestLockTabletExecutionLocalGone >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_visibility_timeout_works[tables_format_v1] [GOOD] >> THiveTest::TestCreateTabletsWithRaceForStoragePoolsKIKIMR_9659 [GOOD] >> THiveTest::TestDeleteTablet >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_crutch_groups_selection_algorithm_selects_second_group_batch[tables_format_v1] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_write_and_read_to_different_groups[tables_format_v0] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_deduplication[tables_format_v0-by_deduplication_id] >> THiveTest::TestFollowersCrossDC_KillingHiveAndFollower [GOOD] >> THiveTest::TestFollowerCompatability1 >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_does_not_change_visibility_for_deleted_message[tables_format_v1-fifo] [GOOD] |94.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_to_nonexistent_queue[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_to_nonexistent_queue[tables_format_v1] >> THiveTest::TestDeleteTablet [GOOD] >> THiveTest::TestDeleteOwnerTablets >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_delete_message_works[tables_format_v0] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_delete_message_works[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_to_nonexistent_queue[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_set_very_big_visibility_timeout[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delay_message_batch[tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delay_message_batch[tables_format_v1-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_partial_delete_works[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_partial_delete_works[tables_format_v1] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_write_and_read_to_different_groups[tables_format_v0] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_write_and_read_to_different_groups[tables_format_v1] >> THiveTest::TestDeleteOwnerTablets [GOOD] >> THiveTest::TestDeleteOwnerTabletsMany >> THiveTest::TestLockTabletExecutionLocalGone [GOOD] >> THiveTest::TestLocalRegistrationInSharedHive >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_create_queue_rate[tables_format_v0] [GOOD] |94.3%| [TM] {asan, default-linux-x86_64, release} ydb/services/persqueue_v1/ut/describes_ut/unittest |94.3%| [TA] $(B)/ydb/services/persqueue_v1/ut/describes_ut/test-results/unittest/{meta.json ... results_accumulator.log} |94.3%| [TA] {RESULT} $(B)/ydb/services/persqueue_v1/ut/describes_ut/test-results/unittest/{meta.json ... results_accumulator.log} >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_empty_queue_url[tables_format_v0] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_write_and_read_to_different_groups[tables_format_v1] [GOOD] >> THiveTest::TestFollowerCompatability1 [GOOD] >> THiveTest::TestFollowerCompatability2 >> TColumnShardTestReadWrite::CompactionSplitGranule_PKTimestamp [GOOD] |94.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_visibility_timeout_works[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_visibility_batch_works[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_visibility_batch_works[tables_format_v1-std] >> THiveTest::TestLocalRegistrationInSharedHive [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_set_very_big_visibility_timeout[tables_format_v0] [GOOD] >> test_quoting.py::TestSqsQuotingWithKesus::test_creates_quoter [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_multi_read_dont_stall[tables_format_v0] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_deduplication[tables_format_v0-by_deduplication_id] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_zero_visibility_timeout_works[tables_format_v0-fifo] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_deduplication[tables_format_v0-content_based] |94.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_does_actions_with_queue[tables_format_v0-std] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::CompactionSplitGranule_PKTimestamp [GOOD] Test command err: 2025-06-25T15:05:38.948488Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:99;event=initialize_shard;step=OnActivateExecutor; 2025-06-25T15:05:38.965386Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:117;event=initialize_shard;step=initialize_tiring_finished; 2025-06-25T15:05:38.965590Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-25T15:05:38.970724Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T15:05:38.970885Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T15:05:38.971050Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T15:05:38.971113Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T15:05:38.971163Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T15:05:38.971243Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T15:05:38.971338Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T15:05:38.971405Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T15:05:38.971472Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T15:05:38.971521Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T15:05:38.971581Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T15:05:38.987410Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-25T15:05:38.987540Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-25T15:05:38.987572Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-25T15:05:38.987725Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:05:38.987848Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-25T15:05:38.987912Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-25T15:05:38.987939Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-25T15:05:38.987988Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-25T15:05:38.988025Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-25T15:05:38.988047Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-25T15:05:38.988069Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-25T15:05:38.988184Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:05:38.988221Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-25T15:05:38.988250Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-25T15:05:38.988279Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-25T15:05:38.988369Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-25T15:05:38.988407Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-25T15:05:38.988437Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-25T15:05:38.988453Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-25T15:05:38.988494Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-25T15:05:38.988527Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-25T15:05:38.988556Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-25T15:05:38.988775Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-25T15:05:38.988824Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-25T15:05:38.988855Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-25T15:05:38.989037Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-25T15:05:38.989120Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-25T15:05:38.989153Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-25T15:05:38.989276Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-25T15:05:38.989308Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-25T15:05:38.989343Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-25T15:05:38.989414Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-25T15:05:38.989477Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-25T15:05:38.989507Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-25T15:05:38.989539Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-25T15:05:38.989682Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=36; 2025-06-25T15:05:38.989768Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=45; 2025-06-25T15:05:38.989841Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=35; 2025-06-25T15:05:38.989894Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=22; 2025-06-25T15:05:38.989951Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-25T15:05:38.989993Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-25T15:05:38.990020Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-25T15:05:38.990054Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: table ... load_stage_name=EXECUTE:granule/portions;fline=constructor_meta.cpp:71;memory_size=2086;data_size=2078;sum=513488;count=432;size_of_meta=136; 2025-06-25T15:07:04.265883Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;load_stage_name=EXECUTE:granule/portions;fline=constructor_portion.cpp:40;memory_size=2158;data_size=2150;sum=529040;count=216;size_of_portion=208; 2025-06-25T15:07:04.266079Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;EXECUTE:portionsLoadingTime=23975; 2025-06-25T15:07:04.266161Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;PRECHARGE:granule_finished_commonLoadingTime=10; 2025-06-25T15:07:04.267065Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;EXECUTE:granule_finished_commonLoadingTime=840; 2025-06-25T15:07:04.267139Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;fline=common_data.cpp:29;EXECUTE:granuleLoadingTime=25177; 2025-06-25T15:07:04.267185Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:granulesLoadingTime=25331; 2025-06-25T15:07:04.267248Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;PRECHARGE:finishLoadingTime=12; 2025-06-25T15:07:04.267527Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:finishLoadingTime=236; 2025-06-25T15:07:04.267577Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:column_enginesLoadingTime=26237; 2025-06-25T15:07:04.267754Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tx_controllerLoadingTime=120; 2025-06-25T15:07:04.267882Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tx_controllerLoadingTime=84; 2025-06-25T15:07:04.268081Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:operations_managerLoadingTime=148; 2025-06-25T15:07:04.268274Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:operations_managerLoadingTime=138; 2025-06-25T15:07:04.272672Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:storages_managerLoadingTime=4334; 2025-06-25T15:07:04.277922Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:storages_managerLoadingTime=5148; 2025-06-25T15:07:04.278044Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:db_locksLoadingTime=13; 2025-06-25T15:07:04.278095Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:db_locksLoadingTime=11; 2025-06-25T15:07:04.278159Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:bg_sessionsLoadingTime=6; 2025-06-25T15:07:04.278231Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:bg_sessionsLoadingTime=38; 2025-06-25T15:07:04.278270Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:sharing_sessionsLoadingTime=6; 2025-06-25T15:07:04.278370Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:sharing_sessionsLoadingTime=68; 2025-06-25T15:07:04.278411Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:in_flight_readsLoadingTime=6; 2025-06-25T15:07:04.278508Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:in_flight_readsLoadingTime=37; 2025-06-25T15:07:04.278600Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tiers_managerLoadingTime=53; 2025-06-25T15:07:04.278699Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tiers_managerLoadingTime=54; 2025-06-25T15:07:04.278740Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;fline=common_data.cpp:29;EXECUTE:composite_initLoadingTime=44646; 2025-06-25T15:07:04.278919Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Index: tables 1 inserted {blob_bytes=0;raw_bytes=0;count=0;records=0} compacted {blob_bytes=0;raw_bytes=0;count=0;records=0} s-compacted {blob_bytes=108275528;raw_bytes=198365560;count=15;records=1915000} inactive {blob_bytes=205496480;raw_bytes=345889958;count=39;records=3635000} evicted {blob_bytes=0;raw_bytes=0;count=0;records=0} at tablet 9437184 2025-06-25T15:07:04.279035Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:4104:6072];process=SwitchToWork;fline=columnshard.cpp:74;event=initialize_shard;step=SwitchToWork; 2025-06-25T15:07:04.279089Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:4104:6072];process=SwitchToWork;fline=columnshard.cpp:77;event=initialize_shard;step=SignalTabletActive; 2025-06-25T15:07:04.279160Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4104:6072];process=SwitchToWork;fline=columnshard_impl.cpp:1331;event=OnTieringModified;path_id=NO_VALUE_OPTIONAL; 2025-06-25T15:07:04.279219Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4104:6072];process=SwitchToWork;fline=column_engine_logs.cpp:471;event=OnTieringModified;new_count_tierings=0; 2025-06-25T15:07:04.279366Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:442;event=EnqueueBackgroundActivities;periodic=0; 2025-06-25T15:07:04.279457Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=23; 2025-06-25T15:07:04.279530Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750863643070;tx_id=18446744073709551615;;current_snapshot_ts=1750863939989; 2025-06-25T15:07:04.279600Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=23;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-25T15:07:04.279647Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:791;background=cleanup;skip_reason=no_changes; 2025-06-25T15:07:04.279686Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:820;background=cleanup;skip_reason=no_changes; 2025-06-25T15:07:04.279784Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:749;background=ttl;skip_reason=no_changes; 2025-06-25T15:07:04.283935Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4104:6072];ev=NActors::TEvents::TEvWakeup;fline=columnshard.cpp:250;event=TEvPrivate::TEvPeriodicWakeup::MANUAL;tablet_id=9437184; 2025-06-25T15:07:04.284374Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4104:6072];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;fline=columnshard.cpp:239;event=TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184; 2025-06-25T15:07:04.284422Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Send periodic stats. 2025-06-25T15:07:04.284458Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Disabled periodic stats at tablet 9437184 2025-06-25T15:07:04.284509Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4104:6072];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:442;event=EnqueueBackgroundActivities;periodic=0; 2025-06-25T15:07:04.284609Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4104:6072];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=23; 2025-06-25T15:07:04.284711Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4104:6072];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750863643070;tx_id=18446744073709551615;;current_snapshot_ts=1750863939989; 2025-06-25T15:07:04.284790Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4104:6072];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=23;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-25T15:07:04.284863Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4104:6072];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:791;background=cleanup;skip_reason=no_changes; 2025-06-25T15:07:04.284916Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4104:6072];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:820;background=cleanup;skip_reason=no_changes; 2025-06-25T15:07:04.285006Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4104:6072];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;queue=ttl;external_count=0;fline=granule.cpp:168;event=skip_actualization;waiting=0.999000s; 2025-06-25T15:07:04.285083Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:4104:6072];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:749;background=ttl;skip_reason=no_changes; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/hive/ut/unittest >> THiveTest::TestLocalRegistrationInSharedHive [GOOD] Test command err: 2025-06-25T15:06:20.083189Z node 1 :BS_NODE DEBUG: {NW26@node_warden_impl.cpp:328} Bootstrap 2025-06-25T15:06:20.100164Z node 1 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# false Origin# initial ServiceSet# {PDisks { NodeID: 1 PDiskID: 1 Path: "SectorMap:0:3200" PDiskGuid: 1 } VDisks { VDiskID { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } Groups { GroupID: 0 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } } } AvailabilityDomains: 0 } 2025-06-25T15:06:20.100444Z node 1 :BS_NODE DEBUG: {NW04@node_warden_pdisk.cpp:196} StartLocalPDisk NodeId# 1 PDiskId# 1 Path# "SectorMap:0:3200" PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} Temporary# false 2025-06-25T15:06:20.101249Z node 1 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:106} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2025-06-25T15:06:20.101489Z node 1 :BS_NODE DEBUG: {NW23@node_warden_vdisk.cpp:67} StartLocalVDiskActor SlayInFlight# false VDiskId# [0:1:0:0:0] VSlotId# 1:1:0 PDiskGuid# 1 DonorMode# false PDiskRestartInFlight# false PDisksWaitingToStart# false 2025-06-25T15:06:20.102176Z node 1 :BS_NODE DEBUG: {NW24@node_warden_vdisk.cpp:267} StartLocalVDiskActor done VDiskId# [0:1:0:0:0] VSlotId# 1:1:0 PDiskGuid# 1 2025-06-25T15:06:20.102207Z node 1 :BS_NODE DEBUG: {NW12@node_warden_proxy.cpp:24} StartLocalProxy GroupId# 0 2025-06-25T15:06:20.102728Z node 1 :BS_NODE DEBUG: {NW21@node_warden_pipe.cpp:23} EstablishPipe AvailDomainId# 0 PipeClientId# [1:30:2076] ControllerId# 72057594037932033 2025-06-25T15:06:20.102751Z node 1 :BS_NODE DEBUG: {NW20@node_warden_pipe.cpp:72} SendRegisterNode 2025-06-25T15:06:20.102826Z node 1 :BS_NODE DEBUG: {NW11@node_warden_impl.cpp:303} StartInvalidGroupProxy GroupId# 4294967295 2025-06-25T15:06:20.102901Z node 1 :BS_NODE DEBUG: {NW62@node_warden_impl.cpp:315} StartRequestReportingThrottler 2025-06-25T15:06:20.111108Z node 1 :BS_PROXY INFO: dsproxy_state.cpp:157: Group# 0 TEvConfigureProxy received GroupGeneration# 1 IsLimitedKeyless# false Marker# DSP02 2025-06-25T15:06:20.111142Z node 1 :BS_PROXY NOTICE: dsproxy_state.cpp:305: EnsureMonitoring Group# 0 IsLimitedKeyless# 0 fullIfPossible# 0 Marker# DSP58 2025-06-25T15:06:20.112563Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:29:2075] Create Queue# [1:38:2081] targetNodeId# 1 Marker# DSP01 2025-06-25T15:06:20.112698Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:29:2075] Create Queue# [1:39:2082] targetNodeId# 1 Marker# DSP01 2025-06-25T15:06:20.112778Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:29:2075] Create Queue# [1:40:2083] targetNodeId# 1 Marker# DSP01 2025-06-25T15:06:20.112872Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:29:2075] Create Queue# [1:41:2084] targetNodeId# 1 Marker# DSP01 2025-06-25T15:06:20.112962Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:29:2075] Create Queue# [1:42:2085] targetNodeId# 1 Marker# DSP01 2025-06-25T15:06:20.113046Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:29:2075] Create Queue# [1:43:2086] targetNodeId# 1 Marker# DSP01 2025-06-25T15:06:20.113111Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:29:2075] Create Queue# [1:44:2087] targetNodeId# 1 Marker# DSP01 2025-06-25T15:06:20.113124Z node 1 :BS_PROXY INFO: dsproxy_state.cpp:31: Group# 0 SetStateEstablishingSessions Marker# DSP03 2025-06-25T15:06:20.113167Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037932033] ::Bootstrap [1:30:2076] 2025-06-25T15:06:20.113196Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037932033] lookup [1:30:2076] 2025-06-25T15:06:20.113225Z node 1 :BS_PROXY NOTICE: dsproxy_state.cpp:245: Group# 4294967295 HasInvalidGroupId# 1 Bootstrap -> StateEjected Marker# DSP42 2025-06-25T15:06:20.113261Z node 1 :BS_NODE DEBUG: {NWDC00@distconf.cpp:20} Bootstrap 2025-06-25T15:06:20.113675Z node 1 :BS_NODE DEBUG: {NWDC40@distconf_persistent_storage.cpp:25} TReaderActor bootstrap Paths# [] 2025-06-25T15:06:20.113832Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72057594037932033 entry.State: StInit ev: {EvForward TabletID: 72057594037932033 Ev: nullptr Flags: 1:2:0} 2025-06-25T15:06:20.122178Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72057594037932033] queue send [1:30:2076] 2025-06-25T15:06:20.122243Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 131082 StorageConfigLoaded# false NodeListObtained# false PendingEvents.size# 0 2025-06-25T15:06:20.122271Z node 1 :BS_NODE DEBUG: {NWDC11@distconf_binding.cpp:6} TEvNodesInfo 2025-06-25T15:06:20.122338Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 2146435074 StorageConfigLoaded# false NodeListObtained# false PendingEvents.size# 0 2025-06-25T15:06:20.122371Z node 1 :BS_NODE DEBUG: {NWDC32@distconf_persistent_storage.cpp:221} TEvStorageConfigLoaded Cookie# 0 NumItemsRead# 0 2025-06-25T15:06:20.125822Z node 1 :BS_NODE DEBUG: {NWDC35@distconf_persistent_storage.cpp:184} PersistConfig Record# {} Drives# [] 2025-06-25T15:06:20.132430Z node 1 :BS_NODE DEBUG: {NWDC51@distconf_persistent_storage.cpp:103} TWriterActor bootstrap Drives# [] Record# {} 2025-06-25T15:06:20.132902Z node 1 :STATESTORAGE DEBUG: statestorage_proxy.cpp:281: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72057594037932033 Cookie: 0 ProxyOptions: SigNone} 2025-06-25T15:06:20.135282Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72057594037932033] queue send [1:30:2076] 2025-06-25T15:06:20.135338Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 268639258 StorageConfigLoaded# true NodeListObtained# false PendingEvents.size# 0 2025-06-25T15:06:20.137685Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 2146435075 StorageConfigLoaded# true NodeListObtained# false PendingEvents.size# 1 2025-06-25T15:06:20.137752Z node 1 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 0} 2025-06-25T15:06:20.137819Z node 1 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 1} 2025-06-25T15:06:20.137839Z node 1 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 2} 2025-06-25T15:06:20.137868Z node 1 :STATESTORAGE DEBUG: statestorage_proxy.cpp:390: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037932033 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-25T15:06:20.137971Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037936129] ::Bootstrap [1:34:2063] 2025-06-25T15:06:20.137992Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037936129] lookup [1:34:2063] 2025-06-25T15:06:20.138362Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 131082 StorageConfigLoaded# true NodeListObtained# false PendingEvents.size# 2 2025-06-25T15:06:20.138385Z node 1 :BS_NODE DEBUG: {NWDC11@distconf_binding.cpp:6} TEvNodesInfo 2025-06-25T15:06:20.138504Z node 1 :BS_NODE DEBUG: {NWDC18@distconf_binding.cpp:342} UpdateBound RefererNodeId# 1 NodeId# ::1:12001/1 Meta# {Fingerprint: "\363\365\\\016\336\205\240m2\241c\3010\003\261\342\227\n\267}" } 2025-06-25T15:06:20.138674Z node 1 :STATESTORAGE DEBUG: statestorage_proxy.cpp:390: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037932033 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-25T15:06:20.138746Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72057594037936129 entry.State: StInit ev: {EvForward TabletID: 72057594037936129 Ev: nullptr Flags: 1:2:0} 2025-06-25T15:06:20.138824Z node 1 :STATESTORAGE DEBUG: statestorage_proxy.cpp:390: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037932033 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-25T15:06:20.139801Z node 1 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# true Origin# distconf ServiceSet# {PDisks { NodeID: 1 PDiskID: 1 Path: "SectorMap:0:3200" PDiskGuid: 1 } VDisks { VDiskID { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } Groups { GroupID: 0 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } } } AvailabilityDomains: 0 } 2025-06-25T15:06:20.139928Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:610: Handle TEvInfo tabletId: 72057594037932033 entry.State: StInitResolve success: false ev: {EvInfo Status: 5 TabletID: 72057594037932033 Cookie: 0 CurrentLeader: [0:0:0] CurrentLeaderTablet: [0:0:0] CurrentGeneration: 0 CurrentStep: 0 Locked: false LockedFor: 0 Signature: { Size: 3 Signature: {{[1:24343667:0] : 2}, {[1:2199047599219:0] : 8}, {[1:1099535971443:0] : 5}}}} 2025-06-25T15:06:20.139957Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:361: DropEntry tabletId: 72057594037932033 followers: 0 2025-06-25T15:06:20.140034Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:172: TClient[72057594037932033] forward result error, check reconnect [1:30:2076] 2025-06-25T15:06:20.140060Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:562: TClient[72057594037932033] schedule retry [1:30:2076] 2025-06-25T15:06:20.140092Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 2146435072 StorageConfigLoaded# true NodeListObtained# true PendingEvents.size# 2 2025-06-25T15:06:20.140122Z node 1 :BS_NODE DEBUG: {NWDC15@distconf.cpp:345} StateFunc Type# 268639258 Sender# [1:12:2059] SessionId# [0:0:0] Cookie# 0 2025-06-25T15:06:20.151617Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 268639248 StorageConfigLoaded# true NodeListObtained# true PendingEvents.size# 1 2025-06-25T15:06:20.151676Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 2146435072 StorageConfigLoaded# true NodeListObtained# true PendingEvents.size# 2 2025-06-25T15:06:20.151708Z node 1 :BS_NODE DEBUG: {NWDC15@distconf.cpp:345} StateFunc Type# 2146435075 Sender# [1:48:2091] SessionId# [0:0:0] Cookie# 0 2025-06-25T15:06:20.151743Z node 1 :BS_NODE DEBUG: {NWDC36@distconf_persistent_storage.cpp:205} TEvStorageConfigStored NumOk# 0 NumError# 0 Passed# 0.029270s 2025-06-25T15:06:20.155136Z node 1 :STATESTORAGE DEBUG: statestorage_proxy.cpp:281: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72057594037936129 Cookie: 0 ProxyOptions: SigNone} 2025-06-25T15:06:20.155907Z node 1 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936129 Cookie: 0} 2025-06-25T15:06:20.155963Z node 1 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936129 Cookie: 1} 2025-06-25T15:06:20.156001Z node 1 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936129 Cookie: 2} 2025-06-25T15:06:20.156034Z node 1 :STATESTORAGE DEBUG: statestorage_proxy.cpp:390: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936129 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-25T15:06:20.156119Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 2146435072 StorageConfigLoaded# true NodeListObtained# true PendingEvents.size# 1 2025-06-25T15:06:20.156152Z node 1 :BS_NODE DEBUG: {NWDC15@distconf.cpp:345} ... DEBUG: tablet_pipe_client.cpp:195: TClient[72057594046678944] forward result remote node 40 [41:554:2160] 2025-06-25T15:07:04.919274Z node 41 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:229: TClient[72057594046678944] remote node connected [41:554:2160] 2025-06-25T15:07:04.919339Z node 41 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[72057594046678944]::SendEvent [41:554:2160] 2025-06-25T15:07:04.919506Z node 40 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:291: [72057594046678944] Accept Connect Originator# [41:554:2160] 2025-06-25T15:07:04.919761Z node 41 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:310: TClient[72057594046678944] connected with status OK role: Leader [41:554:2160] 2025-06-25T15:07:04.919820Z node 41 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:325: TClient[72057594046678944] send queued [41:554:2160] 2025-06-25T15:07:04.919908Z node 41 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:155: TClient[72057594046678944] send [41:554:2160] 2025-06-25T15:07:04.919934Z node 41 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:629: TClient[72057594046678944] push event to server [41:554:2160] 2025-06-25T15:07:04.919989Z node 41 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[72057594046678944]::SendEvent [41:554:2160] 2025-06-25T15:07:04.920108Z node 40 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:72: [72057594046678944] Push Sender# [41:553:2160] EventType# 271122945 2025-06-25T15:07:04.920243Z node 40 :TABLET_EXECUTOR DEBUG: Leader{72057594046678944:2:12} Tx{16, NKikimr::NSchemeShard::TSchemeShard::TTxDescribeScheme} queued, type NKikimr::NSchemeShard::TSchemeShard::TTxDescribeScheme 2025-06-25T15:07:04.920333Z node 40 :TABLET_EXECUTOR DEBUG: Leader{72057594046678944:2:12} Tx{16, NKikimr::NSchemeShard::TSchemeShard::TTxDescribeScheme} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-25T15:07:04.920557Z node 40 :TABLET_EXECUTOR DEBUG: Leader{72057594046678944:2:12} Tx{16, NKikimr::NSchemeShard::TSchemeShard::TTxDescribeScheme} hope 1 -> done Change{11, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 2025-06-25T15:07:04.920657Z node 40 :TABLET_EXECUTOR DEBUG: Leader{72057594046678944:2:12} Tx{16, NKikimr::NSchemeShard::TSchemeShard::TTxDescribeScheme} release 4194304b of static, Memory{0 dyn 0} 2025-06-25T15:07:04.923017Z node 41 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037927937] ::Bootstrap [41:560:2161] 2025-06-25T15:07:04.923055Z node 41 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037927937] lookup [41:560:2161] 2025-06-25T15:07:04.923291Z node 41 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72057594037927937 entry.State: StNormal ev: {EvForward TabletID: 72057594037927937 Ev: nullptr Flags: 1:2:0} 2025-06-25T15:07:04.923349Z node 41 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 41 selfDC 2 leaderDC 1 1:2:0 local 0 localDc 0 other 1 disallowed 0 tabletId: 72057594037927937 followers: 0 countLeader 1 allowFollowers 0 winner: [40:332:2201] 2025-06-25T15:07:04.923427Z node 41 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72057594037927937] queue send [41:560:2161] 2025-06-25T15:07:04.923477Z node 41 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72075186224037888] ::Bootstrap [41:564:2162] 2025-06-25T15:07:04.923519Z node 41 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72075186224037888] lookup [41:564:2162] 2025-06-25T15:07:04.923680Z node 41 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72075186224037888] queue send [41:564:2162] 2025-06-25T15:07:04.923766Z node 41 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72075186224037888 entry.State: StInit ev: {EvForward TabletID: 72075186224037888 Ev: nullptr Flags: 1:2:0} 2025-06-25T15:07:04.923851Z node 41 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:195: TClient[72057594037927937] forward result remote node 40 [41:560:2161] 2025-06-25T15:07:04.924216Z node 41 :STATESTORAGE DEBUG: statestorage_proxy.cpp:281: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72075186224037888 Cookie: 0 ProxyOptions: SigNone} 2025-06-25T15:07:04.924459Z node 41 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:229: TClient[72057594037927937] remote node connected [41:560:2161] 2025-06-25T15:07:04.924498Z node 41 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[72057594037927937]::SendEvent [41:560:2161] 2025-06-25T15:07:04.925257Z node 40 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72075186224037888 Cookie: 0} 2025-06-25T15:07:04.925310Z node 40 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72075186224037888 Cookie: 1} 2025-06-25T15:07:04.925376Z node 40 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72075186224037888 Cookie: 2} 2025-06-25T15:07:04.925559Z node 40 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:291: [72057594037927937] Accept Connect Originator# [41:560:2161] 2025-06-25T15:07:04.925938Z node 41 :STATESTORAGE DEBUG: statestorage_proxy.cpp:390: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 0 TabletID: 72075186224037888 ClusterStateGeneration: 0 ClusterStateGuid: 0 CurrentLeader: [40:469:2302] CurrentLeaderTablet: [40:485:2313] CurrentGeneration: 1 CurrentStep: 0} 2025-06-25T15:07:04.926131Z node 41 :STATESTORAGE DEBUG: statestorage_proxy.cpp:390: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 0 TabletID: 72075186224037888 ClusterStateGeneration: 0 ClusterStateGuid: 0 CurrentLeader: [40:469:2302] CurrentLeaderTablet: [40:485:2313] CurrentGeneration: 1 CurrentStep: 0} 2025-06-25T15:07:04.926228Z node 41 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:610: Handle TEvInfo tabletId: 72075186224037888 entry.State: StInitResolve success: true ev: {EvInfo Status: 0 TabletID: 72075186224037888 Cookie: 0 CurrentLeader: [40:469:2302] CurrentLeaderTablet: [40:485:2313] CurrentGeneration: 1 CurrentStep: 0 Locked: false LockedFor: 0 Signature: { Size: 2 Signature: {{[40:24343667:0] : 3}, {[40:1099535971443:0] : 6}}}} 2025-06-25T15:07:04.926261Z node 41 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:354: ApplyEntry leader tabletId: 72075186224037888 followers: 0 2025-06-25T15:07:04.926303Z node 41 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 41 selfDC 2 leaderDC 1 1:2:0 local 0 localDc 0 other 1 disallowed 0 tabletId: 72075186224037888 followers: 0 countLeader 1 allowFollowers 0 winner: [40:469:2302] 2025-06-25T15:07:04.926450Z node 41 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:195: TClient[72075186224037888] forward result remote node 40 [41:564:2162] 2025-06-25T15:07:04.926893Z node 41 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:229: TClient[72075186224037888] remote node connected [41:564:2162] 2025-06-25T15:07:04.926935Z node 41 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[72075186224037888]::SendEvent [41:564:2162] 2025-06-25T15:07:04.927061Z node 41 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:310: TClient[72057594037927937] connected with status OK role: Leader [41:560:2161] 2025-06-25T15:07:04.927095Z node 41 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:325: TClient[72057594037927937] send queued [41:560:2161] 2025-06-25T15:07:04.927136Z node 41 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:629: TClient[72057594037927937] push event to server [41:560:2161] 2025-06-25T15:07:04.927235Z node 41 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[72057594037927937]::SendEvent [41:560:2161] 2025-06-25T15:07:04.927421Z node 40 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:291: [72075186224037888] Accept Connect Originator# [41:564:2162] 2025-06-25T15:07:04.927510Z node 40 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:72: [72057594037927937] Push Sender# [41:557:2161] EventType# 268959744 2025-06-25T15:07:04.927701Z node 40 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:10} Tx{25, NKikimr::NHive::TTxRegisterNode} queued, type NKikimr::NHive::TTxRegisterNode 2025-06-25T15:07:04.927772Z node 40 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:10} Tx{25, NKikimr::NHive::TTxRegisterNode} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-25T15:07:04.927983Z node 40 :HIVE WARN: node_info.cpp:25: HIVE#72057594037927937 Node(41, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:07:04.928104Z node 40 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:10} Tx{25, NKikimr::NHive::TTxRegisterNode} hope 1 -> done Change{14, redo 208b alter 0b annex 0, ~{ 4 } -{ }, 0 gb} 2025-06-25T15:07:04.928184Z node 40 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:10} Tx{25, NKikimr::NHive::TTxRegisterNode} release 4194304b of static, Memory{0 dyn 0} 2025-06-25T15:07:04.932828Z node 40 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:10} Tx{26, NKikimr::NHive::TTxProcessBootQueue} queued, type NKikimr::NHive::TTxProcessBootQueue 2025-06-25T15:07:04.932927Z node 40 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:10} Tx{26, NKikimr::NHive::TTxProcessBootQueue} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-25T15:07:04.933054Z node 40 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:10} Tx{26, NKikimr::NHive::TTxProcessBootQueue} hope 1 -> done Change{15, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 2025-06-25T15:07:04.933141Z node 40 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:10} Tx{26, NKikimr::NHive::TTxProcessBootQueue} release 4194304b of static, Memory{0 dyn 0} 2025-06-25T15:07:04.933366Z node 41 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:310: TClient[72075186224037888] connected with status OK role: Leader [41:564:2162] 2025-06-25T15:07:04.933406Z node 41 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:325: TClient[72075186224037888] send queued [41:564:2162] 2025-06-25T15:07:04.933433Z node 41 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:629: TClient[72075186224037888] push event to server [41:564:2162] 2025-06-25T15:07:04.933498Z node 41 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[72075186224037888]::SendEvent [41:564:2162] 2025-06-25T15:07:04.933626Z node 40 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:72: [72075186224037888] Push Sender# [41:558:2162] EventType# 268959744 2025-06-25T15:07:04.933760Z node 40 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:5} Tx{6, NKikimr::NHive::TTxRegisterNode} queued, type NKikimr::NHive::TTxRegisterNode 2025-06-25T15:07:04.933802Z node 40 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:5} Tx{6, NKikimr::NHive::TTxRegisterNode} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-25T15:07:04.933941Z node 40 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(41, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:07:04.934036Z node 40 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(41, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:07:04.934124Z node 40 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:5} Tx{6, NKikimr::NHive::TTxRegisterNode} hope 1 -> done Change{6, redo 199b alter 0b annex 0, ~{ 4 } -{ }, 0 gb} 2025-06-25T15:07:04.934177Z node 40 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:5} Tx{6, NKikimr::NHive::TTxRegisterNode} release 4194304b of static, Memory{0 dyn 0} 2025-06-25T15:07:04.934343Z node 40 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:6} Tx{7, NKikimr::NHive::TTxProcessBootQueue} queued, type NKikimr::NHive::TTxProcessBootQueue 2025-06-25T15:07:04.934384Z node 40 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:6} Tx{7, NKikimr::NHive::TTxProcessBootQueue} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-25T15:07:04.934460Z node 40 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:6} Tx{7, NKikimr::NHive::TTxProcessBootQueue} hope 1 -> done Change{7, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 2025-06-25T15:07:04.934495Z node 40 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:6} Tx{7, NKikimr::NHive::TTxProcessBootQueue} release 4194304b of static, Memory{0 dyn 0} >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_write_read_delete_many_groups[tables_format_v0] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_write_read_delete_many_groups[tables_format_v1] >> THiveTest::TestFollowerCompatability2 [GOOD] >> THiveTest::TestFollowerCompatability3 >> test_polling.py::TestSqsPolling::test_receive_message_with_polling[tables_format_v0-long_polling-fifo] [GOOD] >> test_polling.py::TestSqsPolling::test_receive_message_with_polling[tables_format_v0-long_polling-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_message_batch[tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delay_message_batch[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delay_message_batch[tables_format_v1-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delete_message_batch_deduplicates_receipt_handle[tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delete_message_batch_deduplicates_receipt_handle[tables_format_v1-fifo] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_can_read_from_different_groups[tables_format_v0] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_can_read_from_different_groups[tables_format_v1] |94.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> test_quoting.py::TestSqsQuotingWithKesus::test_properly_creates_and_deletes_queue[tables_format_v1-std] [GOOD] |94.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> test_quoting.py::TestSqsQuotingWithKesus::test_properly_creates_and_deletes_queue[tables_format_v0-std] [GOOD] >> test_polling.py::TestSqsPolling::test_receive_message_with_polling[tables_format_v0-long_polling-std] [GOOD] >> test_polling.py::TestSqsPolling::test_receive_message_with_polling[tables_format_v1-long_polling-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_visibility_to_zero_works[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_visibility_works[tables_format_v0-fifo] |94.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_create_q_twice[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_create_q_twice[tables_format_v0-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_queue_attributes_batch[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_queue_attributes_batch[tables_format_v1] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_delete_message_works[tables_format_v1] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_fifo_read_delete_single_message >> THiveTest::TestFollowerCompatability3 [GOOD] >> THiveTest::TestGetStorageInfo >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_deduplication[tables_format_v0-content_based] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_deduplication[tables_format_v1-by_deduplication_id] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_and_read_message[tables_format_v1-fifo] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_can_read_from_different_groups[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_create_q_twice[tables_format_v0-std] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_crutch_groups_selection_algorithm_selects_second_group_batch[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_visibility_to_zero_works[tables_format_v1-std] |94.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delete_message_batch_deduplicates_receipt_handle[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delete_message_batch_deduplicates_receipt_handle[tables_format_v1-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_create_q_twice[tables_format_v1-fifo] >> THiveTest::TestCheckSubHiveMigrationManyTablets [GOOD] >> THiveTest::TestCreateSubHiveCreateManyTablets >> test_polling.py::TestSqsPolling::test_receive_message_with_polling[tables_format_v1-long_polling-fifo] [GOOD] >> test_polling.py::TestSqsPolling::test_receive_message_with_polling[tables_format_v1-long_polling-std] |94.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_visibility_change_disables_receive_attempt_id[tables_format_v0-with_delete_message] [GOOD] |94.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_does_not_change_visibility_not_in_flight[tables_format_v1-std] [GOOD] >> THiveTest::TestDeleteOwnerTabletsMany [GOOD] >> THiveTest::TestDeleteTabletWithFollowers >> THiveTest::TestGetStorageInfo [GOOD] >> THiveTest::TestGetStorageInfoDeleteTabletBeforeAssigned >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_other_requests_rate[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_create_q_twice[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_create_q_twice[tables_format_v1-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_partial_delete_works[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_queue_attributes[tables_format_v0-fifo] >> THiveTest::TestGetStorageInfoDeleteTabletBeforeAssigned [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_fifo_read_delete_single_message [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_only_single_read_infly_from_fifo >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_create_q_twice[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_create_queue_by_nonexistent_user_fails[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delay_message_batch[tables_format_v1-std] [GOOD] |94.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delete_message_batch_works[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_queue_attributes_batch[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_read_dont_stall[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_create_queue_by_nonexistent_user_fails[tables_format_v0] [GOOD] >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_does_actions_with_queue[tables_format_v0-fifo] >> test_polling.py::TestSqsPolling::test_receive_message_with_polling[tables_format_v1-long_polling-std] [GOOD] |94.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_queue_attributes[tables_format_v0-fifo] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/hive/ut/unittest >> THiveTest::TestGetStorageInfoDeleteTabletBeforeAssigned [GOOD] Test command err: 2025-06-25T15:06:20.063644Z node 2 :BS_NODE DEBUG: {NW26@node_warden_impl.cpp:328} Bootstrap 2025-06-25T15:06:20.089811Z node 2 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# false Origin# initial ServiceSet# {PDisks { NodeID: 1 PDiskID: 1 Path: "SectorMap:0:3200" PDiskGuid: 1 } PDisks { NodeID: 2 PDiskID: 1 Path: "SectorMap:1:3200" PDiskGuid: 2 } PDisks { NodeID: 3 PDiskID: 1 Path: "SectorMap:2:3200" PDiskGuid: 3 } VDisks { VDiskID { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } Groups { GroupID: 0 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } } } AvailabilityDomains: 0 } 2025-06-25T15:06:20.090080Z node 2 :BS_NODE DEBUG: {NW04@node_warden_pdisk.cpp:196} StartLocalPDisk NodeId# 2 PDiskId# 1 Path# "SectorMap:1:3200" PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} Temporary# false 2025-06-25T15:06:20.090955Z node 2 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:106} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2025-06-25T15:06:20.091232Z node 2 :BS_NODE DEBUG: {NW12@node_warden_proxy.cpp:24} StartLocalProxy GroupId# 0 2025-06-25T15:06:20.092044Z node 2 :BS_NODE DEBUG: {NW21@node_warden_pipe.cpp:23} EstablishPipe AvailDomainId# 0 PipeClientId# [2:76:2077] ControllerId# 72057594037932033 2025-06-25T15:06:20.092084Z node 2 :BS_NODE DEBUG: {NW20@node_warden_pipe.cpp:72} SendRegisterNode 2025-06-25T15:06:20.092173Z node 2 :BS_NODE DEBUG: {NW11@node_warden_impl.cpp:303} StartInvalidGroupProxy GroupId# 4294967295 2025-06-25T15:06:20.092326Z node 2 :BS_NODE DEBUG: {NW62@node_warden_impl.cpp:315} StartRequestReportingThrottler 2025-06-25T15:06:20.101214Z node 2 :BS_PROXY INFO: dsproxy_state.cpp:157: Group# 0 TEvConfigureProxy received GroupGeneration# 1 IsLimitedKeyless# false Marker# DSP02 2025-06-25T15:06:20.101273Z node 2 :BS_PROXY NOTICE: dsproxy_state.cpp:305: EnsureMonitoring Group# 0 IsLimitedKeyless# 0 fullIfPossible# 0 Marker# DSP58 2025-06-25T15:06:20.103215Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:75:2076] Create Queue# [2:83:2081] targetNodeId# 1 Marker# DSP01 2025-06-25T15:06:20.103382Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:75:2076] Create Queue# [2:84:2082] targetNodeId# 1 Marker# DSP01 2025-06-25T15:06:20.103511Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:75:2076] Create Queue# [2:85:2083] targetNodeId# 1 Marker# DSP01 2025-06-25T15:06:20.103638Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:75:2076] Create Queue# [2:86:2084] targetNodeId# 1 Marker# DSP01 2025-06-25T15:06:20.103757Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:75:2076] Create Queue# [2:87:2085] targetNodeId# 1 Marker# DSP01 2025-06-25T15:06:20.103887Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:75:2076] Create Queue# [2:88:2086] targetNodeId# 1 Marker# DSP01 2025-06-25T15:06:20.104025Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:75:2076] Create Queue# [2:89:2087] targetNodeId# 1 Marker# DSP01 2025-06-25T15:06:20.104051Z node 2 :BS_PROXY INFO: dsproxy_state.cpp:31: Group# 0 SetStateEstablishingSessions Marker# DSP03 2025-06-25T15:06:20.104126Z node 2 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037932033] ::Bootstrap [2:76:2077] 2025-06-25T15:06:20.104159Z node 2 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037932033] lookup [2:76:2077] 2025-06-25T15:06:20.104214Z node 2 :BS_PROXY NOTICE: dsproxy_state.cpp:245: Group# 4294967295 HasInvalidGroupId# 1 Bootstrap -> StateEjected Marker# DSP42 2025-06-25T15:06:20.104255Z node 2 :BS_NODE DEBUG: {NWDC00@distconf.cpp:20} Bootstrap 2025-06-25T15:06:20.104766Z node 2 :BS_NODE DEBUG: {NWDC40@distconf_persistent_storage.cpp:25} TReaderActor bootstrap Paths# [] 2025-06-25T15:06:20.104845Z node 3 :BS_NODE DEBUG: {NW26@node_warden_impl.cpp:328} Bootstrap 2025-06-25T15:06:20.107269Z node 3 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# false Origin# initial ServiceSet# {PDisks { NodeID: 1 PDiskID: 1 Path: "SectorMap:0:3200" PDiskGuid: 1 } PDisks { NodeID: 2 PDiskID: 1 Path: "SectorMap:1:3200" PDiskGuid: 2 } PDisks { NodeID: 3 PDiskID: 1 Path: "SectorMap:2:3200" PDiskGuid: 3 } VDisks { VDiskID { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } Groups { GroupID: 0 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } } } AvailabilityDomains: 0 } 2025-06-25T15:06:20.107393Z node 3 :BS_NODE DEBUG: {NW04@node_warden_pdisk.cpp:196} StartLocalPDisk NodeId# 3 PDiskId# 1 Path# "SectorMap:2:3200" PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} Temporary# false 2025-06-25T15:06:20.107852Z node 3 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:106} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2025-06-25T15:06:20.108033Z node 3 :BS_NODE DEBUG: {NW12@node_warden_proxy.cpp:24} StartLocalProxy GroupId# 0 2025-06-25T15:06:20.108807Z node 3 :BS_NODE DEBUG: {NW21@node_warden_pipe.cpp:23} EstablishPipe AvailDomainId# 0 PipeClientId# [3:99:2077] ControllerId# 72057594037932033 2025-06-25T15:06:20.108838Z node 3 :BS_NODE DEBUG: {NW20@node_warden_pipe.cpp:72} SendRegisterNode 2025-06-25T15:06:20.108894Z node 3 :BS_NODE DEBUG: {NW11@node_warden_impl.cpp:303} StartInvalidGroupProxy GroupId# 4294967295 2025-06-25T15:06:20.108993Z node 3 :BS_NODE DEBUG: {NW62@node_warden_impl.cpp:315} StartRequestReportingThrottler 2025-06-25T15:06:20.109279Z node 1 :BS_NODE DEBUG: {NW26@node_warden_impl.cpp:328} Bootstrap 2025-06-25T15:06:20.111589Z node 1 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# false Origin# initial ServiceSet# {PDisks { NodeID: 1 PDiskID: 1 Path: "SectorMap:0:3200" PDiskGuid: 1 } PDisks { NodeID: 2 PDiskID: 1 Path: "SectorMap:1:3200" PDiskGuid: 2 } PDisks { NodeID: 3 PDiskID: 1 Path: "SectorMap:2:3200" PDiskGuid: 3 } VDisks { VDiskID { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } Groups { GroupID: 0 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } } } AvailabilityDomains: 0 } 2025-06-25T15:06:20.111705Z node 1 :BS_NODE DEBUG: {NW04@node_warden_pdisk.cpp:196} StartLocalPDisk NodeId# 1 PDiskId# 1 Path# "SectorMap:0:3200" PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} Temporary# false 2025-06-25T15:06:20.112107Z node 1 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:106} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2025-06-25T15:06:20.112343Z node 1 :BS_NODE DEBUG: {NW23@node_warden_vdisk.cpp:67} StartLocalVDiskActor SlayInFlight# false VDiskId# [0:1:0:0:0] VSlotId# 1:1:0 PDiskGuid# 1 DonorMode# false PDiskRestartInFlight# false PDisksWaitingToStart# false 2025-06-25T15:06:20.113396Z node 1 :BS_NODE DEBUG: {NW24@node_warden_vdisk.cpp:267} StartLocalVDiskActor done VDiskId# [0:1:0:0:0] VSlotId# 1:1:0 PDiskGuid# 1 2025-06-25T15:06:20.113440Z node 1 :BS_NODE DEBUG: {NW12@node_warden_proxy.cpp:24} StartLocalProxy GroupId# 0 2025-06-25T15:06:20.114144Z node 1 :BS_NODE DEBUG: {NW21@node_warden_pipe.cpp:23} EstablishPipe AvailDomainId# 0 PipeClientId# [1:112:2078] ControllerId# 72057594037932033 2025-06-25T15:06:20.114185Z node 1 :BS_NODE DEBUG: {NW20@node_warden_pipe.cpp:72} SendRegisterNode 2025-06-25T15:06:20.114242Z node 1 :BS_NODE DEBUG: {NW11@node_warden_impl.cpp:303} StartInvalidGroupProxy GroupId# 4294967295 2025-06-25T15:06:20.114319Z node 1 :BS_NODE DEBUG: {NW62@node_warden_impl.cpp:315} StartRequestReportingThrottler 2025-06-25T15:06:20.125055Z node 1 :BS_PROXY INFO: dsproxy_state.cpp:157: Group# 0 TEvConfigureProxy received GroupGeneration# 1 IsLimitedKeyless# false Marker# DSP02 2025-06-25T15:06:20.125100Z node 1 :BS_PROXY NOTICE: dsproxy_state.cpp:305: EnsureMonitoring Group# 0 IsLimitedKeyless# 0 fullIfPossible# 0 Marker# DSP58 2025-06-25T15:06:20.126677Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:111:2077] Create Queue# [1:120:2083] targetNodeId# 1 Marker# DSP01 2025-06-25T15:06:20.126814Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:111:2077] Create Queue# [1:121:2084] targetNodeId# 1 Marker# DSP01 2025-06-25T15:06:20.126938Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:111:2077] Create Queue# [1:122:2085] targetNodeId# 1 Marker# DSP01 2025-06-25T15:06:20.127060Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:111:2077] Create Queue# [1:123:2086] targetNodeId# 1 Marker# DSP01 2025-06-25T15:06:20.127217Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:111:2077] Create Queue# [1:124:2087] targetNodeId# 1 Marker# DSP01 2025-06-25T15:06:20.127346Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:111:2077] Create Queue# [1:125:2088] targetNodeId# 1 Marker# DSP01 2025-06-25T15:06:20.127468Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:111:2077] Create Queue# [1:126:2089] targetNodeId# 1 Marker# DSP01 2025-06-25T15:06:20.127490Z node 1 :BS_PROXY INFO: dsproxy_state.cpp:31: Group# 0 SetStateEstablishingSessions Marker# DSP03 2025-06-25T15:06:20.127545Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037932033] ::Bootstrap [1:112:2078] 2025-06-25T15:06:20.127570Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037932033] lookup [1:112:2078] 2025-06-25T15:06:20.127605Z node 1 :BS_PROXY NOTICE: dsproxy_state.cpp:245: Group# 4294967295 HasInvalidGroupId# 1 Bootstrap -> StateEjected Marker# DSP42 2025-06-25T15:06:20.127690Z node 1 :BS_NODE DEBUG: {NWDC00@distconf.cpp:20} Bootstrap 2025-06-25T15:06:20.128287Z node 1 :BS_NODE DEBUG: {NWDC40@distconf_persistent_storage.cpp:25} TReaderActor bootstrap Paths# [] 2025-06-25T15:06:20.128434Z node 2 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72057594037932033 entry.State: StInit ev: {EvForward TabletID: 72057594037932033 Ev: nullptr Flags: 1:2:0} 2025-06-25T15:06:20.128877Z node 2 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72057594037932033] queue send [2:76:2077] 2025-06-25T15:06:20.128930Z node 2 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 131082 StorageConfigLoaded# false NodeListObtained# false PendingEvents.size# 0 2025-06-25T15:06:20.128958Z node 2 :BS_NODE DEBUG: {NWDC11@distconf_binding.cpp:6} TEvNodesInfo 2025-06-25T15:06:20.144533Z node 3 :BS_PROXY INFO: dsproxy_state.cpp:157: Group# 0 TEvConfigureProxy received GroupGeneration# 1 IsLimitedKeyless# false Marker# DSP02 2025-06-25T15:06:20.144596Z node 3 :BS_PROXY NOTICE: dsproxy_state.cpp:305: EnsureMonitoring Group# 0 IsLimitedKeyless# 0 fullIfPossible# 0 Marker# DSP58 2025-06-25T15:06:20.146165Z node 3 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [3:98:2076] Create Queue# [3:133:2081] targetNodeId# 1 Marker# DSP01 2025-06-25T15:06:20.146303Z node 3 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [3:98:2076] Create Queue# [3:134:2082] targetNodeId# 1 Marker# DSP01 2025-06-25T15:06:20.146425Z node 3 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [3:98:2076] Create Queue# [3:135:2083] targetNodeId# 1 Marker# DSP01 2025-06-25T15:06:20.146554Z node 3 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [3:98:2076] Create Queue# [3:136:2084] targetNodeId# 1 Marker# DSP01 2025-06-25T15:06:20.146689Z node 3 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [3:98:2076] Create Queue# [3:137:2085] targetNodeId# 1 Marker# DSP01 2025-06-25T15:06:20.146807Z node 3 :BS_PROXY D ... _strategy_restore.h:65: [210b5ea57fa83861] restore Id# [72057594037927937:2:4:0:0:483:0] optimisticReplicas# 1 optimisticState# EBS_FULL Marker# BPG55 2025-06-25T15:07:11.717163Z node 65 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [210b5ea57fa83861] partPlacement record partSituation# ESituation::Unknown to# 0 blob Id# [72057594037927937:2:4:0:0:483:1] Marker# BPG33 2025-06-25T15:07:11.717195Z node 65 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [210b5ea57fa83861] Sending missing VPut part# 0 to# 0 blob Id# [72057594037927937:2:4:0:0:483:1] Marker# BPG32 2025-06-25T15:07:11.717317Z node 65 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [65:36:2080] NKikimr::TEvBlobStorage::TEvVPut# {ID# [72057594037927937:2:4:0:0:483:1] FDS# 483 HandleClass# TabletLog {MsgQoS ExtQueueId# PutTabletLog} DataSize# 0 Data# } cookie# 0 2025-06-25T15:07:11.718392Z node 65 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [210b5ea57fa83861] received {EvVPutResult Status# OK ID# [72057594037927937:2:4:0:0:483:1] {MsgQoS MsgId# { SequenceId: 1 MsgId: 18 } Cost# 83803 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 19 }}}} from# [0:1:0:0:0] Marker# BPP01 2025-06-25T15:07:11.718486Z node 65 :BS_PROXY_PUT DEBUG: dsproxy_put_impl.cpp:72: [210b5ea57fa83861] Result# TEvPutResult {Id# [72057594037927937:2:4:0:0:483:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.998955} GroupId# 0 Marker# BPP12 2025-06-25T15:07:11.718535Z node 65 :BS_PROXY_PUT INFO: dsproxy_put.cpp:486: [210b5ea57fa83861] SendReply putResult# TEvPutResult {Id# [72057594037927937:2:4:0:0:483:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.998955} ResponsesSent# 0 PutImpl.Blobs.size# 1 Last# true Marker# BPP21 2025-06-25T15:07:11.718652Z node 65 :BS_PROXY_PUT DEBUG: {BPP72@dsproxy_put.cpp:470} Query history GroupId# 0 HandleClass# TabletLog Tactic# MinLatency History# THistory { Entries# [ TEvVPut{ TimestampMs# 0.5 sample PartId# [72057594037927937:2:4:0:0:483:1] QueryCount# 1 VDiskId# [0:1:0:0:0] NodeId# 65 } TEvVPutResult{ TimestampMs# 1.594 VDiskId# [0:1:0:0:0] NodeId# 65 Status# OK } ] } 2025-06-25T15:07:11.718815Z node 65 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594037927937:2:4:0:0:483:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.998955} 2025-06-25T15:07:11.718921Z node 65 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:5} commited cookie 1 for step 4 2025-06-25T15:07:11.719282Z node 65 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037927937] ::Bootstrap [65:314:2292] 2025-06-25T15:07:11.719345Z node 65 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037927937] lookup [65:314:2292] 2025-06-25T15:07:11.719457Z node 65 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72057594037927937 entry.State: StNormal ev: {EvForward TabletID: 72057594037927937 Ev: nullptr Flags: 1:2:0} 2025-06-25T15:07:11.719535Z node 65 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 65 selfDC leaderDC 1:2:0 local 1 localDc 1 other 0 disallowed 0 tabletId: 72057594037927937 followers: 0 countLeader 1 allowFollowers 0 winner: [65:272:2262] 2025-06-25T15:07:11.719612Z node 65 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72057594037927937] queue send [65:314:2292] 2025-06-25T15:07:11.719686Z node 65 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:411: TClient[72057594037927937] received pending shutdown [65:314:2292] 2025-06-25T15:07:11.719750Z node 65 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:190: TClient[72057594037927937] forward result local node, try to connect [65:314:2292] 2025-06-25T15:07:11.719859Z node 65 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[72057594037927937]::SendEvent [65:314:2292] 2025-06-25T15:07:11.720021Z node 65 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:291: [72057594037927937] Accept Connect Originator# [65:314:2292] 2025-06-25T15:07:11.720188Z node 65 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:310: TClient[72057594037927937] connected with status OK role: Leader [65:314:2292] 2025-06-25T15:07:11.720288Z node 65 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:325: TClient[72057594037927937] send queued [65:314:2292] 2025-06-25T15:07:11.720354Z node 65 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:629: TClient[72057594037927937] push event to server [65:314:2292] 2025-06-25T15:07:11.720432Z node 65 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:332: TClient[72057594037927937] shutdown pipe due to pending shutdown request [65:314:2292] 2025-06-25T15:07:11.720485Z node 65 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:505: TClient[72057594037927937] notify reset [65:314:2292] 2025-06-25T15:07:11.720557Z node 65 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:141: [72057594037927937] HandleSend Sender# [65:313:2291] EventType# 268697621 2025-06-25T15:07:11.721023Z node 65 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037927937] ::Bootstrap [65:317:2295] 2025-06-25T15:07:11.721082Z node 65 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037927937] lookup [65:317:2295] 2025-06-25T15:07:11.721176Z node 65 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72057594037927937 entry.State: StNormal ev: {EvForward TabletID: 72057594037927937 Ev: nullptr Flags: 1:2:0} 2025-06-25T15:07:11.721248Z node 65 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 65 selfDC leaderDC 1:2:0 local 1 localDc 1 other 0 disallowed 0 tabletId: 72057594037927937 followers: 0 countLeader 1 allowFollowers 0 winner: [65:272:2262] 2025-06-25T15:07:11.721332Z node 65 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72057594037927937] queue send [65:317:2295] 2025-06-25T15:07:11.721389Z node 65 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:411: TClient[72057594037927937] received pending shutdown [65:317:2295] 2025-06-25T15:07:11.721440Z node 65 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:190: TClient[72057594037927937] forward result local node, try to connect [65:317:2295] 2025-06-25T15:07:11.721506Z node 65 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[72057594037927937]::SendEvent [65:317:2295] 2025-06-25T15:07:11.721609Z node 65 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:291: [72057594037927937] Accept Connect Originator# [65:317:2295] 2025-06-25T15:07:11.721742Z node 65 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:310: TClient[72057594037927937] connected with status OK role: Leader [65:317:2295] 2025-06-25T15:07:11.721848Z node 65 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:325: TClient[72057594037927937] send queued [65:317:2295] 2025-06-25T15:07:11.721897Z node 65 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:629: TClient[72057594037927937] push event to server [65:317:2295] 2025-06-25T15:07:11.721975Z node 65 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:332: TClient[72057594037927937] shutdown pipe due to pending shutdown request [65:317:2295] 2025-06-25T15:07:11.722027Z node 65 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:505: TClient[72057594037927937] notify reset [65:317:2295] 2025-06-25T15:07:11.722093Z node 65 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:141: [72057594037927937] HandleSend Sender# [65:316:2294] EventType# 268697615 2025-06-25T15:07:11.722283Z node 65 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:5} Tx{5, NKikimr::NHive::TTxDeleteTablet} queued, type NKikimr::NHive::TTxDeleteTablet 2025-06-25T15:07:11.722364Z node 65 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:5} Tx{5, NKikimr::NHive::TTxDeleteTablet} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-25T15:07:11.722600Z node 65 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:5} Tx{5, NKikimr::NHive::TTxDeleteTablet} hope 1 -> done Change{5, redo 102b alter 0b annex 0, ~{ 1 } -{ }, 0 gb} 2025-06-25T15:07:11.722735Z node 65 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:5} Tx{5, NKikimr::NHive::TTxDeleteTablet} release 4194304b of static, Memory{0 dyn 0} 2025-06-25T15:07:11.722943Z node 65 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:6} Tx{6, NKikimr::NHive::TTxDeleteTabletResult} queued, type NKikimr::NHive::TTxDeleteTabletResult 2025-06-25T15:07:11.723018Z node 65 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:6} Tx{6, NKikimr::NHive::TTxDeleteTabletResult} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-25T15:07:11.723282Z node 65 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:6} Tx{6, NKikimr::NHive::TTxDeleteTabletResult} hope 1 -> done Change{6, redo 106b alter 0b annex 0, ~{ 16, 1 } -{ }, 0 gb} 2025-06-25T15:07:11.723359Z node 65 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:6} Tx{6, NKikimr::NHive::TTxDeleteTabletResult} release 4194304b of static, Memory{0 dyn 0} 2025-06-25T15:07:11.734617Z node 65 :BS_PROXY_PUT INFO: dsproxy_put.cpp:645: [8729fbeaec2f6015] bootstrap ActorId# [65:320:2298] Group# 0 BlobCount# 1 BlobIDs# [[72057594037927937:2:5:0:0:157:0]] HandleClass# TabletLog Tactic# MinLatency RestartCounter# 0 Marker# BPP13 2025-06-25T15:07:11.734798Z node 65 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [8729fbeaec2f6015] Id# [72057594037927937:2:5:0:0:157:0] restore disk# 0 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-06-25T15:07:11.734883Z node 65 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:65: [8729fbeaec2f6015] restore Id# [72057594037927937:2:5:0:0:157:0] optimisticReplicas# 1 optimisticState# EBS_FULL Marker# BPG55 2025-06-25T15:07:11.734976Z node 65 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [8729fbeaec2f6015] partPlacement record partSituation# ESituation::Unknown to# 0 blob Id# [72057594037927937:2:5:0:0:157:1] Marker# BPG33 2025-06-25T15:07:11.735043Z node 65 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [8729fbeaec2f6015] Sending missing VPut part# 0 to# 0 blob Id# [72057594037927937:2:5:0:0:157:1] Marker# BPG32 2025-06-25T15:07:11.735263Z node 65 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [65:36:2080] NKikimr::TEvBlobStorage::TEvVPut# {ID# [72057594037927937:2:5:0:0:157:1] FDS# 157 HandleClass# TabletLog {MsgQoS ExtQueueId# PutTabletLog} DataSize# 0 Data# } cookie# 0 2025-06-25T15:07:11.736583Z node 65 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [8729fbeaec2f6015] received {EvVPutResult Status# OK ID# [72057594037927937:2:5:0:0:157:1] {MsgQoS MsgId# { SequenceId: 1 MsgId: 19 } Cost# 81236 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 20 }}}} from# [0:1:0:0:0] Marker# BPP01 2025-06-25T15:07:11.736752Z node 65 :BS_PROXY_PUT DEBUG: dsproxy_put_impl.cpp:72: [8729fbeaec2f6015] Result# TEvPutResult {Id# [72057594037927937:2:5:0:0:157:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.998955} GroupId# 0 Marker# BPP12 2025-06-25T15:07:11.736843Z node 65 :BS_PROXY_PUT INFO: dsproxy_put.cpp:486: [8729fbeaec2f6015] SendReply putResult# TEvPutResult {Id# [72057594037927937:2:5:0:0:157:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.998955} ResponsesSent# 0 PutImpl.Blobs.size# 1 Last# true Marker# BPP21 2025-06-25T15:07:11.737022Z node 65 :BS_PROXY_PUT DEBUG: {BPP72@dsproxy_put.cpp:470} Query history GroupId# 0 HandleClass# TabletLog Tactic# MinLatency History# THistory { Entries# [ TEvVPut{ TimestampMs# 0.904 sample PartId# [72057594037927937:2:5:0:0:157:1] QueryCount# 1 VDiskId# [0:1:0:0:0] NodeId# 65 } TEvVPutResult{ TimestampMs# 2.259 VDiskId# [0:1:0:0:0] NodeId# 65 Status# OK } ] } 2025-06-25T15:07:11.737210Z node 65 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594037927937:2:5:0:0:157:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.998955} 2025-06-25T15:07:11.737362Z node 65 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:6} commited cookie 1 for step 5 >> TColumnShardTestReadWrite::CompactionInGranule_PKString_Reboot [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_queue_attributes[tables_format_v0-std] >> THiveTest::TestDeleteTabletWithFollowers [GOOD] >> THiveTest::TestCreateTabletBeforeLocal >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_and_read_message[tables_format_v1-fifo] >> THiveTest::TestCreateTabletBeforeLocal [GOOD] >> THiveTest::TestCreateTabletReboots |94.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> test_quoting.py::TestSqsQuotingWithKesus::test_creates_quoter [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delete_message_batch_deduplicates_receipt_handle[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delete_message_batch_works[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_queue_attributes[tables_format_v0-std] [GOOD] >> TColumnShardTestSchema::TTL+Reboot-Internal-FirstPkColumn |94.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_create_queue_rate[tables_format_v1] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::CompactionInGranule_PKString_Reboot [GOOD] Test command err: 2025-06-25T15:05:29.528462Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:99;event=initialize_shard;step=OnActivateExecutor; 2025-06-25T15:05:29.544612Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:117;event=initialize_shard;step=initialize_tiring_finished; 2025-06-25T15:05:29.544788Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-25T15:05:29.549528Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T15:05:29.549690Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T15:05:29.549862Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T15:05:29.549924Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T15:05:29.549976Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T15:05:29.550026Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T15:05:29.550092Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T15:05:29.550165Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T15:05:29.550226Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T15:05:29.550281Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T15:05:29.550340Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T15:05:29.565362Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-25T15:05:29.565488Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-25T15:05:29.565531Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-25T15:05:29.565691Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:05:29.565818Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-25T15:05:29.565884Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-25T15:05:29.565919Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-25T15:05:29.565977Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-25T15:05:29.566014Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-25T15:05:29.566036Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-25T15:05:29.566062Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-25T15:05:29.566154Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:05:29.566197Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-25T15:05:29.566219Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-25T15:05:29.566236Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-25T15:05:29.566296Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-25T15:05:29.566329Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-25T15:05:29.566350Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-25T15:05:29.566374Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-25T15:05:29.566401Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-25T15:05:29.566425Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-25T15:05:29.566442Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-25T15:05:29.566563Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-25T15:05:29.566587Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-25T15:05:29.566608Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-25T15:05:29.566701Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-25T15:05:29.566726Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-25T15:05:29.566749Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-25T15:05:29.566851Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-25T15:05:29.566879Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-25T15:05:29.566896Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-25T15:05:29.566938Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-25T15:05:29.566975Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-25T15:05:29.566996Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-25T15:05:29.567041Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-25T15:05:29.567160Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=32; 2025-06-25T15:05:29.567228Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=31; 2025-06-25T15:05:29.567279Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=26; 2025-06-25T15:05:29.567341Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=24; 2025-06-25T15:05:29.567392Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-25T15:05:29.567433Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-25T15:05:29.567470Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-25T15:05:29.567506Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: table ... ns;fline=constructor_meta.cpp:71;memory_size=26438;data_size=26406;sum=13447440;count=14328;size_of_meta=136; 2025-06-25T15:07:11.614559Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;load_stage_name=EXECUTE:granule/portions;fline=constructor_portion.cpp:40;memory_size=26510;data_size=26478;sum=13963248;count=7164;size_of_portion=208; 2025-06-25T15:07:11.615477Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;EXECUTE:portionsLoadingTime=96449; 2025-06-25T15:07:11.615555Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;PRECHARGE:granule_finished_commonLoadingTime=12; 2025-06-25T15:07:11.617598Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;EXECUTE:granule_finished_commonLoadingTime=1974; 2025-06-25T15:07:11.617657Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;fline=common_data.cpp:29;EXECUTE:granuleLoadingTime=98779; 2025-06-25T15:07:11.617707Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:granulesLoadingTime=98899; 2025-06-25T15:07:11.617785Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;PRECHARGE:finishLoadingTime=14; 2025-06-25T15:07:11.618874Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:finishLoadingTime=1004; 2025-06-25T15:07:11.618933Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:column_enginesLoadingTime=100553; 2025-06-25T15:07:11.619131Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tx_controllerLoadingTime=131; 2025-06-25T15:07:11.619272Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tx_controllerLoadingTime=79; 2025-06-25T15:07:11.619699Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:operations_managerLoadingTime=369; 2025-06-25T15:07:11.620044Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:operations_managerLoadingTime=288; 2025-06-25T15:07:11.646734Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:storages_managerLoadingTime=26599; 2025-06-25T15:07:11.676817Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:storages_managerLoadingTime=29939; 2025-06-25T15:07:11.676930Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:db_locksLoadingTime=15; 2025-06-25T15:07:11.676982Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:db_locksLoadingTime=10; 2025-06-25T15:07:11.677022Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:bg_sessionsLoadingTime=6; 2025-06-25T15:07:11.677082Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:bg_sessionsLoadingTime=33; 2025-06-25T15:07:11.677118Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:sharing_sessionsLoadingTime=4; 2025-06-25T15:07:11.677199Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:sharing_sessionsLoadingTime=52; 2025-06-25T15:07:11.677236Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:in_flight_readsLoadingTime=4; 2025-06-25T15:07:11.677290Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:in_flight_readsLoadingTime=27; 2025-06-25T15:07:11.677361Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tiers_managerLoadingTime=43; 2025-06-25T15:07:11.677423Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tiers_managerLoadingTime=35; 2025-06-25T15:07:11.677467Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;fline=common_data.cpp:29;EXECUTE:composite_initLoadingTime=165570; 2025-06-25T15:07:11.677606Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Index: tables 1 inserted {blob_bytes=0;raw_bytes=0;count=0;records=0} compacted {blob_bytes=0;raw_bytes=0;count=0;records=0} s-compacted {blob_bytes=22744072;raw_bytes=22320020;count=3;records=225200} inactive {blob_bytes=149450960;raw_bytes=145316940;count=221;records=1575200} evicted {blob_bytes=0;raw_bytes=0;count=0;records=0} at tablet 9437184 2025-06-25T15:07:11.677735Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:10355:11947];process=SwitchToWork;fline=columnshard.cpp:74;event=initialize_shard;step=SwitchToWork; 2025-06-25T15:07:11.677798Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:10355:11947];process=SwitchToWork;fline=columnshard.cpp:77;event=initialize_shard;step=SignalTabletActive; 2025-06-25T15:07:11.677859Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10355:11947];process=SwitchToWork;fline=columnshard_impl.cpp:1331;event=OnTieringModified;path_id=NO_VALUE_OPTIONAL; 2025-06-25T15:07:11.677897Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10355:11947];process=SwitchToWork;fline=column_engine_logs.cpp:471;event=OnTieringModified;new_count_tierings=0; 2025-06-25T15:07:11.678040Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:442;event=EnqueueBackgroundActivities;periodic=0; 2025-06-25T15:07:11.678096Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=9; 2025-06-25T15:07:11.678154Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750863635149;tx_id=18446744073709551615;;current_snapshot_ts=1750863931341; 2025-06-25T15:07:11.678184Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=9;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-25T15:07:11.678220Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:791;background=cleanup;skip_reason=no_changes; 2025-06-25T15:07:11.678255Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:820;background=cleanup;skip_reason=no_changes; 2025-06-25T15:07:11.678344Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:749;background=ttl;skip_reason=no_changes; 2025-06-25T15:07:11.684187Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10355:11947];ev=NActors::TEvents::TEvWakeup;fline=columnshard.cpp:250;event=TEvPrivate::TEvPeriodicWakeup::MANUAL;tablet_id=9437184; 2025-06-25T15:07:11.684705Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10355:11947];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;fline=columnshard.cpp:239;event=TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184; 2025-06-25T15:07:11.684744Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Send periodic stats. 2025-06-25T15:07:11.684775Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Disabled periodic stats at tablet 9437184 2025-06-25T15:07:11.684828Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10355:11947];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:442;event=EnqueueBackgroundActivities;periodic=0; 2025-06-25T15:07:11.684911Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10355:11947];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=9; 2025-06-25T15:07:11.684968Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10355:11947];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750863635149;tx_id=18446744073709551615;;current_snapshot_ts=1750863931341; 2025-06-25T15:07:11.685007Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10355:11947];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=9;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-25T15:07:11.685048Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10355:11947];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:791;background=cleanup;skip_reason=no_changes; 2025-06-25T15:07:11.685078Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10355:11947];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:820;background=cleanup;skip_reason=no_changes; 2025-06-25T15:07:11.685144Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10355:11947];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;queue=ttl;external_count=0;fline=granule.cpp:168;event=skip_actualization;waiting=1.000000s; 2025-06-25T15:07:11.685181Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:10355:11947];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:749;background=ttl;skip_reason=no_changes; >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_read_dont_stall[tables_format_v0] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_only_single_read_infly_from_fifo [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_deduplication[tables_format_v1-by_deduplication_id] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_deduplication[tables_format_v1-content_based] |94.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_set_very_big_visibility_timeout[tables_format_v0] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_queue_attributes[tables_format_v1] |94.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test |94.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_validates_message_attributes[tables_format_v1] [GOOD] |94.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test |94.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_read_dont_stall[tables_format_v0] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_write_read_delete_many_groups[tables_format_v1] [GOOD] >> TColumnShardTestSchema::RebootForgetAfterFail >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_does_not_change_visibility_for_deleted_message[tables_format_v1-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delete_message_batch_works[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delete_message_batch_works[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_visibility_works[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_visibility_works[tables_format_v0-std] >> THiveTest::TestCreateTabletReboots [GOOD] >> THiveTest::TestCreateTabletWithWrongSPoolsAndReassignGroupsFailButDeletionIsOk |94.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_invalid_queue_url[tables_format_v0] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_receive_attempt_reloads_same_messages[tables_format_v1-after_crutch_batch] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_visibility_batch_works[tables_format_v1-std] [GOOD] >> THiveTest::TestCreateTabletWithWrongSPoolsAndReassignGroupsFailButDeletionIsOk [GOOD] >> THiveTest::TestCreateTabletAndReassignGroupsWithReboots >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_read_dont_stall[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_visibility_to_zero_works[tables_format_v0-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_empty_queue_url[tables_format_v0] [GOOD] >> ReadIteratorExternalBlobs::ExtBlobsWithFirstRowPreloadedWithReboot [GOOD] >> ReadIteratorExternalBlobs::ExtBlobsMultipleColumns >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_deduplication[tables_format_v1-content_based] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_empty_queue_url[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_empty_queue_url[tables_format_v1] [GOOD] >> AnalyzeColumnshard::AnalyzeMultiOperationId [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_get_queue_attributes_only_attributes_table[tables_format_v0-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delete_message_works[tables_format_v0] |94.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_does_not_change_visibility_for_deleted_message[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_read_dont_stall[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_visibility_to_zero_works[tables_format_v0-fifo] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_validates_deduplication_id[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_and_read_message[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_visibility_to_zero_works[tables_format_v0-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_and_read_message[tables_format_v1-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delete_message_batch_works[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_get_queue_attributes_only_attributes_table[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_get_queue_attributes_only_attributes_table[tables_format_v0-std] |94.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest >> AnalyzeColumnshard::AnalyzeMultiOperationId [GOOD] Test command err: 2025-06-25T15:03:47.372269Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:419:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T15:03:47.372685Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T15:03:47.372797Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001f8d/r3tmp/tmpEhjEmW/pdisk_1.dat 2025-06-25T15:03:47.747584Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 5407, node 1 2025-06-25T15:03:48.045768Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:03:48.045842Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:03:48.045881Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:03:48.046583Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T15:03:48.049141Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:03:48.145991Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:03:48.146109Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:03:48.162272Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:19147 2025-06-25T15:03:48.744661Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-25T15:03:51.668042Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-25T15:03:51.700902Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:03:51.701008Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:03:51.739969Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T15:03:51.742669Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:03:51.957602Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:03:51.992466Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:51.993033Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:51.993625Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:51.993801Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:51.994025Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:51.994130Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:51.994233Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:51.994322Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:51.994395Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:03:52.183137Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:03:52.183254Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:03:52.195815Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:03:52.329262Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:03:52.372929Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-25T15:03:52.373030Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-25T15:03:52.407008Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-25T15:03:52.407308Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-25T15:03:52.407505Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-25T15:03:52.407571Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-25T15:03:52.407636Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-25T15:03:52.407682Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-25T15:03:52.407731Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-25T15:03:52.407780Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-25T15:03:52.408300Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-25T15:03:52.431867Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7949: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-25T15:03:52.431991Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7979: ConnectToSA(), pipe client id: [2:1794:2562], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-25T15:03:52.439687Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1808:2573] 2025-06-25T15:03:52.443406Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1830:2584] 2025-06-25T15:03:52.444043Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1830:2584], schemeshard id = 72075186224037897 2025-06-25T15:03:52.453641Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-25T15:03:52.472177Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-25T15:03:52.472229Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-25T15:03:52.472292Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-25T15:03:52.484418Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:03:52.491222Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-25T15:03:52.491341Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-25T15:03:52.656916Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-25T15:03:52.813622Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-25T15:03:52.870989Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-25T15:03:53.409466Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:03:53.602636Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2157:3028], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:03:53.602762Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:03:53.620438Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715659:0, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T15:03:53.734974Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2227:2795];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T15:03:53.735218Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2227:2795];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T15:03:53.735612Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2227:2795];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T15:03:53.735791Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2227:2795];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T15:03:53.735913Z node 2 :TX_COLUMNSHARD WARN: ... p:652: [72075186224037894] ScheduleNextAnalyze. All the force traversal tables sent the requests. OperationId=operationId9 2025-06-25T15:07:09.198197Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:656: [72075186224037894] ScheduleNextAnalyze. All the force traversal operations sent the requests. 2025-06-25T15:07:10.434860Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 1, schemeshard count = 1 2025-06-25T15:07:10.435015Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-25T15:07:10.445870Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-25T15:07:11.776863Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:626: [72075186224037894] ScheduleNextAnalyze 2025-06-25T15:07:11.776935Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:652: [72075186224037894] ScheduleNextAnalyze. All the force traversal tables sent the requests. OperationId=operationId8 2025-06-25T15:07:11.776961Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:652: [72075186224037894] ScheduleNextAnalyze. All the force traversal tables sent the requests. OperationId=operationId9 2025-06-25T15:07:11.776982Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:656: [72075186224037894] ScheduleNextAnalyze. All the force traversal operations sent the requests. 2025-06-25T15:07:13.034497Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-25T15:07:13.034645Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 4] is column table. 2025-06-25T15:07:13.034718Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:732: [72075186224037894] Start force traversal navigate for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-25T15:07:13.035353Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-25T15:07:13.048799Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-25T15:07:13.049177Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-25T15:07:13.049250Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-25T15:07:13.049607Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:58: [72075186224037894] TTxResponseTabletDistribution::Execute. Node count = 1 2025-06-25T15:07:13.074172Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:92: [72075186224037894] TTxResponseTabletDistribution::Complete 2025-06-25T15:07:13.074408Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 11, current Round: 0 2025-06-25T15:07:13.075058Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:9507:6523], server id = [2:9508:6524], tablet id = 72075186224037899, status = OK 2025-06-25T15:07:13.075162Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:9507:6523], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-25T15:07:13.076383Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037899 2025-06-25T15:07:13.076478Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-25T15:07:13.076605Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:27: [72075186224037894] TTxAggregateStatisticsResponse::Execute 2025-06-25T15:07:13.076758Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:118: [72075186224037894] TTxAggregateStatisticsResponse::Complete 2025-06-25T15:07:13.077106Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-25T15:07:13.079168Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:9507:6523], server id = [2:9508:6524], tablet id = 72075186224037899 2025-06-25T15:07:13.079203Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-25T15:07:13.079941Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-06-25T15:07:13.119355Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=MTUxYTUxOS0xMzgzYzIxMi1mMTQ3Nzk3Yi01OWQ1NTAwMw==, TxId: 2025-06-25T15:07:13.119423Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=MTUxYTUxOS0xMzgzYzIxMi1mMTQ3Nzk3Yi01OWQ1NTAwMw==, TxId: 2025-06-25T15:07:13.119876Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-25T15:07:13.148901Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete force traversal for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-25T15:07:13.148969Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:50: [72075186224037894] TTxFinishTraversal::Complete. Send TEvAnalyzeResponse, OperationId=operationId8, ActorId=[1:3053:3303] 2025-06-25T15:07:13.718706Z node 2 :STATISTICS DEBUG: service_impl.cpp:252: Event round 11 is different from the current 0 2025-06-25T15:07:13.718780Z node 2 :STATISTICS DEBUG: service_impl.cpp:379: Skip TEvDispatchKeepAlive 2025-06-25T15:07:14.392159Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:285: [72075186224037894] EvPropagateTimeout 2025-06-25T15:07:14.403080Z node 2 :STATISTICS DEBUG: service_impl.cpp:252: Event round 10 is different from the current 0 2025-06-25T15:07:14.403162Z node 2 :STATISTICS DEBUG: service_impl.cpp:1021: Skip TEvStatisticsRequestTimeout 2025-06-25T15:07:14.403280Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:626: [72075186224037894] ScheduleNextAnalyze 2025-06-25T15:07:14.403326Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:652: [72075186224037894] ScheduleNextAnalyze. All the force traversal tables sent the requests. OperationId=operationId9 2025-06-25T15:07:14.403356Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:656: [72075186224037894] ScheduleNextAnalyze. All the force traversal operations sent the requests. 2025-06-25T15:07:15.643140Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-25T15:07:16.863488Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 1, schemeshard count = 1 2025-06-25T15:07:16.863673Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-25T15:07:16.885434Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:626: [72075186224037894] ScheduleNextAnalyze 2025-06-25T15:07:16.885513Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:652: [72075186224037894] ScheduleNextAnalyze. All the force traversal tables sent the requests. OperationId=operationId9 2025-06-25T15:07:16.885539Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:656: [72075186224037894] ScheduleNextAnalyze. All the force traversal operations sent the requests. 2025-06-25T15:07:18.086929Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-25T15:07:18.087052Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 4] is column table. 2025-06-25T15:07:18.087088Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:732: [72075186224037894] Start force traversal navigate for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-25T15:07:18.087705Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-25T15:07:18.103250Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-25T15:07:18.103710Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-25T15:07:18.103783Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-25T15:07:18.104136Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:58: [72075186224037894] TTxResponseTabletDistribution::Execute. Node count = 1 2025-06-25T15:07:18.118318Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:92: [72075186224037894] TTxResponseTabletDistribution::Complete 2025-06-25T15:07:18.118542Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 12, current Round: 0 2025-06-25T15:07:18.119137Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:9672:6612], server id = [2:9673:6613], tablet id = 72075186224037899, status = OK 2025-06-25T15:07:18.119232Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:9672:6612], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-25T15:07:18.120728Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037899 2025-06-25T15:07:18.120816Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-25T15:07:18.120980Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:27: [72075186224037894] TTxAggregateStatisticsResponse::Execute 2025-06-25T15:07:18.121152Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:118: [72075186224037894] TTxAggregateStatisticsResponse::Complete 2025-06-25T15:07:18.121525Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-25T15:07:18.123635Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:9672:6612], server id = [2:9673:6613], tablet id = 72075186224037899 2025-06-25T15:07:18.123673Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-25T15:07:18.124582Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-06-25T15:07:18.146576Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=MzJhNDg5NDQtYWFmMmQ0NDgtZDg1MjRiYTEtMzcxZWZhOTc=, TxId: 2025-06-25T15:07:18.146647Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=MzJhNDg5NDQtYWFmMmQ0NDgtZDg1MjRiYTEtMzcxZWZhOTc=, TxId: 2025-06-25T15:07:18.147126Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-25T15:07:18.184263Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete force traversal for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-25T15:07:18.184348Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:50: [72075186224037894] TTxFinishTraversal::Complete. Send TEvAnalyzeResponse, OperationId=operationId9, ActorId=[1:3053:3303] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_and_read_message[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_and_read_multiple_messages[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_zero_visibility_timeout_works[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_zero_visibility_timeout_works[tables_format_v0-std] >> MoveTable::RenameToItself_Negative |94.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_create_queue_rate[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_get_queue_attributes_only_attributes_table[tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_get_queue_attributes_only_attributes_table[tables_format_v1-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_visibility_to_zero_works[tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_visibility_to_zero_works[tables_format_v1-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delay_one_message[tables_format_v0-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_get_queue_attributes_only_attributes_table[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_zero_visibility_timeout_works[tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_zero_visibility_timeout_works[tables_format_v1-fifo] >> THiveTest::TestCreateTabletAndReassignGroupsWithReboots [GOOD] >> THiveTest::TestCreateTabletChangeToExternal >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_visibility_to_zero_works[tables_format_v1-fifo] [GOOD] >> MoveTable::RenameToItself_Negative [GOOD] >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_does_actions_with_queue[tables_format_v0-fifo] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest >> MoveTable::RenameToItself_Negative [GOOD] Test command err: 2025-06-25T15:07:21.812659Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:99;event=initialize_shard;step=OnActivateExecutor; 2025-06-25T15:07:21.843532Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:117;event=initialize_shard;step=initialize_tiring_finished; 2025-06-25T15:07:21.843849Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-25T15:07:21.851042Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T15:07:21.851249Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T15:07:21.851470Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T15:07:21.851575Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T15:07:21.851710Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T15:07:21.851813Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T15:07:21.851894Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T15:07:21.852000Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T15:07:21.852098Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T15:07:21.852201Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T15:07:21.852348Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T15:07:21.881098Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-25T15:07:21.881248Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-25T15:07:21.881297Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-25T15:07:21.881497Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:07:21.881646Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-25T15:07:21.881730Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-25T15:07:21.881772Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-25T15:07:21.881847Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-25T15:07:21.881896Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-25T15:07:21.881933Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-25T15:07:21.881984Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-25T15:07:21.882148Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:07:21.882225Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-25T15:07:21.882266Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-25T15:07:21.882311Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-25T15:07:21.882410Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-25T15:07:21.882467Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-25T15:07:21.882508Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-25T15:07:21.882552Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-25T15:07:21.882616Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-25T15:07:21.882655Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-25T15:07:21.882685Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-25T15:07:21.882893Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-25T15:07:21.882932Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-25T15:07:21.882957Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-25T15:07:21.883123Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-25T15:07:21.883167Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-25T15:07:21.883196Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-25T15:07:21.883348Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-25T15:07:21.883388Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-25T15:07:21.883420Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-25T15:07:21.883491Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-25T15:07:21.883556Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-25T15:07:21.883589Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-25T15:07:21.883625Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-25T15:07:21.883842Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=54; 2025-06-25T15:07:21.883936Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=41; 2025-06-25T15:07:21.884003Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=32; 2025-06-25T15:07:21.884080Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=40; 2025-06-25T15:07:21.884185Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-25T15:07:21.884273Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-25T15:07:21.884396Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-25T15:07:21.884448Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: table ... agerLoadingTime=418; 2025-06-25T15:07:22.148665Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tx_controllerLoadingTime=34; 2025-06-25T15:07:22.148753Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tx_controllerLoadingTime=51; 2025-06-25T15:07:22.148852Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:operations_managerLoadingTime=50; 2025-06-25T15:07:22.148933Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:operations_managerLoadingTime=47; 2025-06-25T15:07:22.149081Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:storages_managerLoadingTime=102; 2025-06-25T15:07:22.149424Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:storages_managerLoadingTime=300; 2025-06-25T15:07:22.149470Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:db_locksLoadingTime=6; 2025-06-25T15:07:22.149504Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:db_locksLoadingTime=5; 2025-06-25T15:07:22.149538Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:bg_sessionsLoadingTime=5; 2025-06-25T15:07:22.149618Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:bg_sessionsLoadingTime=45; 2025-06-25T15:07:22.149655Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:sharing_sessionsLoadingTime=5; 2025-06-25T15:07:22.149791Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:sharing_sessionsLoadingTime=56; 2025-06-25T15:07:22.149849Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:in_flight_readsLoadingTime=6; 2025-06-25T15:07:22.149910Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:in_flight_readsLoadingTime=28; 2025-06-25T15:07:22.149961Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tiers_managerLoadingTime=21; 2025-06-25T15:07:22.150012Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tiers_managerLoadingTime=24; 2025-06-25T15:07:22.150056Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;fline=common_data.cpp:29;EXECUTE:composite_initLoadingTime=2623; 2025-06-25T15:07:22.150210Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];process=SwitchToWork;fline=columnshard.cpp:74;event=initialize_shard;step=SwitchToWork; 2025-06-25T15:07:22.150265Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];process=SwitchToWork;fline=columnshard.cpp:77;event=initialize_shard;step=SignalTabletActive; 2025-06-25T15:07:22.150358Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];process=SwitchToWork;fline=columnshard_impl.cpp:1331;event=OnTieringModified;path_id=NO_VALUE_OPTIONAL; 2025-06-25T15:07:22.150634Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:442;event=EnqueueBackgroundActivities;periodic=0; 2025-06-25T15:07:22.150678Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:448;problem=Background activities cannot be started: no index at tablet; 2025-06-25T15:07:22.150938Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NActors::TEvents::TEvWakeup;fline=columnshard.cpp:250;event=TEvPrivate::TEvPeriodicWakeup::MANUAL;tablet_id=9437184; 2025-06-25T15:07:22.151044Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;fline=columnshard.cpp:239;event=TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184; 2025-06-25T15:07:22.151071Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Send periodic stats. 2025-06-25T15:07:22.151098Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Disabled periodic stats at tablet 9437184 2025-06-25T15:07:22.151142Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:442;event=EnqueueBackgroundActivities;periodic=0; 2025-06-25T15:07:22.151180Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:448;problem=Background activities cannot be started: no index at tablet; 2025-06-25T15:07:22.453620Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::TEvColumnShard::TEvProposeTransaction;tablet_id=9437184;tx_id=10;this=88923004793280;method=TTxController::StartProposeOnExecute;tx_info=10:TX_KIND_SCHEMA;min=1750864042780;max=18446744073709551615;plan=0;src=[1:156:2178];cookie=00:0;;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=1;result=not_found; 2025-06-25T15:07:22.453703Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::TEvColumnShard::TEvProposeTransaction;tablet_id=9437184;tx_id=10;this=88923004793280;method=TTxController::StartProposeOnExecute;tx_info=10:TX_KIND_SCHEMA;min=1750864042780;max=18446744073709551615;plan=0;src=[1:156:2178];cookie=00:0;;fline=schema.h:38;event=sync_schema; 2025-06-25T15:07:22.456463Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;request_tx=10:TX_KIND_SCHEMA;min=1750864042780;max=18446744073709551615;plan=0;src=[1:156:2178];cookie=00:0;;this=88923004793280;op_tx=10:TX_KIND_SCHEMA;min=1750864042780;max=18446744073709551615;plan=0;src=[1:156:2178];cookie=00:0;;int_op_tx=10:TX_KIND_SCHEMA;min=1750864042780;max=18446744073709551615;plan=0;src=[1:156:2178];cookie=00:0;;int_this=89129165578496;fline=columnshard__propose_transaction.cpp:105;event=actual tx operator; 2025-06-25T15:07:22.456568Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;request_tx=10:TX_KIND_SCHEMA;min=1750864042780;max=18446744073709551615;plan=0;src=[1:156:2178];cookie=00:0;;this=88923004793280;op_tx=10:TX_KIND_SCHEMA;min=1750864042780;max=18446744073709551615;plan=0;src=[1:156:2178];cookie=00:0;;int_op_tx=10:TX_KIND_SCHEMA;min=1750864042780;max=18446744073709551615;plan=0;src=[1:156:2178];cookie=00:0;;int_this=89129165578496;method=TTxController::FinishProposeOnComplete;tx_id=10;fline=propose_tx.cpp:11;event=scheme_shard_tablet_not_initialized;source=[1:156:2178]; 2025-06-25T15:07:22.456622Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;request_tx=10:TX_KIND_SCHEMA;min=1750864042780;max=18446744073709551615;plan=0;src=[1:156:2178];cookie=00:0;;this=88923004793280;op_tx=10:TX_KIND_SCHEMA;min=1750864042780;max=18446744073709551615;plan=0;src=[1:156:2178];cookie=00:0;;int_op_tx=10:TX_KIND_SCHEMA;min=1750864042780;max=18446744073709551615;plan=0;src=[1:156:2178];cookie=00:0;;int_this=89129165578496;method=TTxController::FinishProposeOnComplete;tx_id=10;fline=propose_tx.cpp:32;message=;tablet_id=9437184;tx_id=10; 2025-06-25T15:07:22.456987Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TTxNotifyTxCompletion.Execute at tablet 9437184 2025-06-25T15:07:22.457155Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: PlanStep 1750864042780 at tablet 9437184, mediator 0 2025-06-25T15:07:22.457211Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxPlanStep[2] execute at tablet 9437184 2025-06-25T15:07:22.457572Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=10;fline=tables_manager.cpp:45;method=resolve_internal_path_id;ss_local=1;result=not_found; 2025-06-25T15:07:22.457878Z node 1 :TX_COLUMNSHARD INFO: ctor_logger.h:56: EnsureTable for pathId: {internal: 9438184000001, ss: 1} ttl settings: { Version: 1 } at tablet 9437184 2025-06-25T15:07:22.464163Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=10;fline=column_engine_logs.cpp:471;event=OnTieringModified;new_count_tierings=0; 2025-06-25T15:07:22.464328Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=10;fline=tables_manager.cpp:304;method=RegisterTable;path_id=9438184000001; 2025-06-25T15:07:22.464404Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=10;fline=column_engine.h:144;event=RegisterTable;path_id=9438184000001; 2025-06-25T15:07:22.471368Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=10;fline=column_engine_logs.cpp:463;event=OnTieringModified;path_id=9438184000001; 2025-06-25T15:07:22.471563Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=9437184;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=10;fline=tx_controller.cpp:215;event=finished_tx;tx_id=10; 2025-06-25T15:07:22.496172Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxPlanStep[2] complete at tablet 9437184 2025-06-25T15:07:22.497082Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::TEvColumnShard::TEvProposeTransaction;tablet_id=9437184;tx_id=11;this=88923004831808;method=TTxController::StartProposeOnExecute;tx_info=11:TX_KIND_SCHEMA;min=1750864042783;max=18446744073709551615;plan=0;src=[1:103:2136];cookie=00:1;;fline=schema.cpp:134;propose_execute=move_table;src=1;dst=1; 2025-06-25T15:07:22.497149Z node 1 :TX_COLUMNSHARD_TX ERROR: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];ev=NKikimr::TEvColumnShard::TEvProposeTransaction;tablet_id=9437184;tx_id=11;this=88923004831808;method=TTxController::StartProposeOnExecute;tx_info=11:TX_KIND_SCHEMA;min=1750864042783;max=18446744073709551615;plan=0;src=[1:103:2136];cookie=00:1;;fline=tx_controller.cpp:364;error=problem on start;message=Rename to existing table; 2025-06-25T15:07:22.511371Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;request_tx=11:TX_KIND_SCHEMA;min=1750864042783;max=18446744073709551615;plan=0;src=[1:103:2136];cookie=00:1;;this=88923004831808;op_tx=11:TX_KIND_SCHEMA;min=1750864042783;max=18446744073709551615;plan=0;src=[1:103:2136];cookie=00:1;;fline=propose_tx.cpp:11;event=scheme_shard_tablet_not_initialized;source=[1:103:2136]; 2025-06-25T15:07:22.511437Z node 1 :TX_COLUMNSHARD ERROR: log.cpp:784: tablet_id=9437184;request_tx=11:TX_KIND_SCHEMA;min=1750864042783;max=18446744073709551615;plan=0;src=[1:103:2136];cookie=00:1;;this=88923004831808;op_tx=11:TX_KIND_SCHEMA;min=1750864042783;max=18446744073709551615;plan=0;src=[1:103:2136];cookie=00:1;;fline=propose_tx.cpp:23;message=Rename to existing table;tablet_id=9437184;tx_id=11; >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_zero_visibility_timeout_works[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_zero_visibility_timeout_works[tables_format_v1-std] >> THiveTest::TestCreateTabletChangeToExternal [GOOD] >> THiveTest::TestExternalBoot |94.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_multi_read_dont_stall[tables_format_v0] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_crutch_groups_selection_algorithm_selects_second_group_batch[tables_format_v0] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_crutch_groups_selection_algorithm_selects_second_group_batch[tables_format_v1] >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_other_requests_rate[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_zero_visibility_timeout_works[tables_format_v1-std] [GOOD] >> THiveTest::TestExternalBoot [GOOD] >> THiveTest::TestExternalBootWhenLocked >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_get_queue_attributes_only_attributes_table[tables_format_v1-std] |94.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_message_batch[tables_format_v0-std] [GOOD] |94.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_visibility_to_zero_works[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_visibility_works[tables_format_v0-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_visibility_works[tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_visibility_works[tables_format_v1-fifo] |94.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> THiveTest::TestExternalBootWhenLocked [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_and_read_multiple_messages[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_and_read_multiple_messages[tables_format_v1] |94.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_write_and_read_to_different_groups[tables_format_v1] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/hive/ut/unittest >> THiveTest::TestExternalBootWhenLocked [GOOD] Test command err: 2025-06-25T15:06:20.091129Z node 2 :BS_NODE DEBUG: {NW26@node_warden_impl.cpp:328} Bootstrap 2025-06-25T15:06:20.108013Z node 2 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# false Origin# initial ServiceSet# {PDisks { NodeID: 1 PDiskID: 1 Path: "SectorMap:0:3200" PDiskGuid: 1 } PDisks { NodeID: 2 PDiskID: 1 Path: "SectorMap:1:3200" PDiskGuid: 2 } PDisks { NodeID: 3 PDiskID: 1 Path: "SectorMap:2:3200" PDiskGuid: 3 } VDisks { VDiskID { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } Groups { GroupID: 0 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } } } AvailabilityDomains: 0 } 2025-06-25T15:06:20.108217Z node 2 :BS_NODE DEBUG: {NW04@node_warden_pdisk.cpp:196} StartLocalPDisk NodeId# 2 PDiskId# 1 Path# "SectorMap:1:3200" PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} Temporary# false 2025-06-25T15:06:20.108852Z node 2 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:106} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2025-06-25T15:06:20.109065Z node 2 :BS_NODE DEBUG: {NW12@node_warden_proxy.cpp:24} StartLocalProxy GroupId# 0 2025-06-25T15:06:20.109594Z node 2 :BS_NODE DEBUG: {NW21@node_warden_pipe.cpp:23} EstablishPipe AvailDomainId# 0 PipeClientId# [2:76:2077] ControllerId# 72057594037932033 2025-06-25T15:06:20.109618Z node 2 :BS_NODE DEBUG: {NW20@node_warden_pipe.cpp:72} SendRegisterNode 2025-06-25T15:06:20.109692Z node 2 :BS_NODE DEBUG: {NW11@node_warden_impl.cpp:303} StartInvalidGroupProxy GroupId# 4294967295 2025-06-25T15:06:20.109784Z node 2 :BS_NODE DEBUG: {NW62@node_warden_impl.cpp:315} StartRequestReportingThrottler 2025-06-25T15:06:20.116120Z node 2 :BS_PROXY INFO: dsproxy_state.cpp:157: Group# 0 TEvConfigureProxy received GroupGeneration# 1 IsLimitedKeyless# false Marker# DSP02 2025-06-25T15:06:20.116164Z node 2 :BS_PROXY NOTICE: dsproxy_state.cpp:305: EnsureMonitoring Group# 0 IsLimitedKeyless# 0 fullIfPossible# 0 Marker# DSP58 2025-06-25T15:06:20.117631Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:75:2076] Create Queue# [2:83:2081] targetNodeId# 1 Marker# DSP01 2025-06-25T15:06:20.117739Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:75:2076] Create Queue# [2:84:2082] targetNodeId# 1 Marker# DSP01 2025-06-25T15:06:20.117816Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:75:2076] Create Queue# [2:85:2083] targetNodeId# 1 Marker# DSP01 2025-06-25T15:06:20.117893Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:75:2076] Create Queue# [2:86:2084] targetNodeId# 1 Marker# DSP01 2025-06-25T15:06:20.117965Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:75:2076] Create Queue# [2:87:2085] targetNodeId# 1 Marker# DSP01 2025-06-25T15:06:20.118047Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:75:2076] Create Queue# [2:88:2086] targetNodeId# 1 Marker# DSP01 2025-06-25T15:06:20.118129Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:75:2076] Create Queue# [2:89:2087] targetNodeId# 1 Marker# DSP01 2025-06-25T15:06:20.118150Z node 2 :BS_PROXY INFO: dsproxy_state.cpp:31: Group# 0 SetStateEstablishingSessions Marker# DSP03 2025-06-25T15:06:20.118201Z node 2 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037932033] ::Bootstrap [2:76:2077] 2025-06-25T15:06:20.118224Z node 2 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037932033] lookup [2:76:2077] 2025-06-25T15:06:20.118270Z node 2 :BS_PROXY NOTICE: dsproxy_state.cpp:245: Group# 4294967295 HasInvalidGroupId# 1 Bootstrap -> StateEjected Marker# DSP42 2025-06-25T15:06:20.118300Z node 2 :BS_NODE DEBUG: {NWDC00@distconf.cpp:20} Bootstrap 2025-06-25T15:06:20.118670Z node 2 :BS_NODE DEBUG: {NWDC40@distconf_persistent_storage.cpp:25} TReaderActor bootstrap Paths# [] 2025-06-25T15:06:20.118728Z node 3 :BS_NODE DEBUG: {NW26@node_warden_impl.cpp:328} Bootstrap 2025-06-25T15:06:20.120585Z node 3 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# false Origin# initial ServiceSet# {PDisks { NodeID: 1 PDiskID: 1 Path: "SectorMap:0:3200" PDiskGuid: 1 } PDisks { NodeID: 2 PDiskID: 1 Path: "SectorMap:1:3200" PDiskGuid: 2 } PDisks { NodeID: 3 PDiskID: 1 Path: "SectorMap:2:3200" PDiskGuid: 3 } VDisks { VDiskID { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } Groups { GroupID: 0 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } } } AvailabilityDomains: 0 } 2025-06-25T15:06:20.120688Z node 3 :BS_NODE DEBUG: {NW04@node_warden_pdisk.cpp:196} StartLocalPDisk NodeId# 3 PDiskId# 1 Path# "SectorMap:2:3200" PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} Temporary# false 2025-06-25T15:06:20.120982Z node 3 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:106} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2025-06-25T15:06:20.121111Z node 3 :BS_NODE DEBUG: {NW12@node_warden_proxy.cpp:24} StartLocalProxy GroupId# 0 2025-06-25T15:06:20.121575Z node 3 :BS_NODE DEBUG: {NW21@node_warden_pipe.cpp:23} EstablishPipe AvailDomainId# 0 PipeClientId# [3:99:2077] ControllerId# 72057594037932033 2025-06-25T15:06:20.121599Z node 3 :BS_NODE DEBUG: {NW20@node_warden_pipe.cpp:72} SendRegisterNode 2025-06-25T15:06:20.121649Z node 3 :BS_NODE DEBUG: {NW11@node_warden_impl.cpp:303} StartInvalidGroupProxy GroupId# 4294967295 2025-06-25T15:06:20.121730Z node 3 :BS_NODE DEBUG: {NW62@node_warden_impl.cpp:315} StartRequestReportingThrottler 2025-06-25T15:06:20.121936Z node 1 :BS_NODE DEBUG: {NW26@node_warden_impl.cpp:328} Bootstrap 2025-06-25T15:06:20.123599Z node 1 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# false Origin# initial ServiceSet# {PDisks { NodeID: 1 PDiskID: 1 Path: "SectorMap:0:3200" PDiskGuid: 1 } PDisks { NodeID: 2 PDiskID: 1 Path: "SectorMap:1:3200" PDiskGuid: 2 } PDisks { NodeID: 3 PDiskID: 1 Path: "SectorMap:2:3200" PDiskGuid: 3 } VDisks { VDiskID { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } Groups { GroupID: 0 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } } } AvailabilityDomains: 0 } 2025-06-25T15:06:20.123688Z node 1 :BS_NODE DEBUG: {NW04@node_warden_pdisk.cpp:196} StartLocalPDisk NodeId# 1 PDiskId# 1 Path# "SectorMap:0:3200" PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} Temporary# false 2025-06-25T15:06:20.123956Z node 1 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:106} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2025-06-25T15:06:20.124123Z node 1 :BS_NODE DEBUG: {NW23@node_warden_vdisk.cpp:67} StartLocalVDiskActor SlayInFlight# false VDiskId# [0:1:0:0:0] VSlotId# 1:1:0 PDiskGuid# 1 DonorMode# false PDiskRestartInFlight# false PDisksWaitingToStart# false 2025-06-25T15:06:20.125879Z node 1 :BS_NODE DEBUG: {NW24@node_warden_vdisk.cpp:267} StartLocalVDiskActor done VDiskId# [0:1:0:0:0] VSlotId# 1:1:0 PDiskGuid# 1 2025-06-25T15:06:20.125927Z node 1 :BS_NODE DEBUG: {NW12@node_warden_proxy.cpp:24} StartLocalProxy GroupId# 0 2025-06-25T15:06:20.126388Z node 1 :BS_NODE DEBUG: {NW21@node_warden_pipe.cpp:23} EstablishPipe AvailDomainId# 0 PipeClientId# [1:112:2078] ControllerId# 72057594037932033 2025-06-25T15:06:20.126417Z node 1 :BS_NODE DEBUG: {NW20@node_warden_pipe.cpp:72} SendRegisterNode 2025-06-25T15:06:20.126463Z node 1 :BS_NODE DEBUG: {NW11@node_warden_impl.cpp:303} StartInvalidGroupProxy GroupId# 4294967295 2025-06-25T15:06:20.126527Z node 1 :BS_NODE DEBUG: {NW62@node_warden_impl.cpp:315} StartRequestReportingThrottler 2025-06-25T15:06:20.133834Z node 1 :BS_PROXY INFO: dsproxy_state.cpp:157: Group# 0 TEvConfigureProxy received GroupGeneration# 1 IsLimitedKeyless# false Marker# DSP02 2025-06-25T15:06:20.133866Z node 1 :BS_PROXY NOTICE: dsproxy_state.cpp:305: EnsureMonitoring Group# 0 IsLimitedKeyless# 0 fullIfPossible# 0 Marker# DSP58 2025-06-25T15:06:20.134937Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:111:2077] Create Queue# [1:120:2083] targetNodeId# 1 Marker# DSP01 2025-06-25T15:06:20.135019Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:111:2077] Create Queue# [1:121:2084] targetNodeId# 1 Marker# DSP01 2025-06-25T15:06:20.135101Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:111:2077] Create Queue# [1:122:2085] targetNodeId# 1 Marker# DSP01 2025-06-25T15:06:20.135172Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:111:2077] Create Queue# [1:123:2086] targetNodeId# 1 Marker# DSP01 2025-06-25T15:06:20.135255Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:111:2077] Create Queue# [1:124:2087] targetNodeId# 1 Marker# DSP01 2025-06-25T15:06:20.135326Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:111:2077] Create Queue# [1:125:2088] targetNodeId# 1 Marker# DSP01 2025-06-25T15:06:20.135396Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:111:2077] Create Queue# [1:126:2089] targetNodeId# 1 Marker# DSP01 2025-06-25T15:06:20.135413Z node 1 :BS_PROXY INFO: dsproxy_state.cpp:31: Group# 0 SetStateEstablishingSessions Marker# DSP03 2025-06-25T15:06:20.135452Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037932033] ::Bootstrap [1:112:2078] 2025-06-25T15:06:20.135471Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037932033] lookup [1:112:2078] 2025-06-25T15:06:20.135496Z node 1 :BS_PROXY NOTICE: dsproxy_state.cpp:245: Group# 4294967295 HasInvalidGroupId# 1 Bootstrap -> StateEjected Marker# DSP42 2025-06-25T15:06:20.135566Z node 1 :BS_NODE DEBUG: {NWDC00@distconf.cpp:20} Bootstrap 2025-06-25T15:06:20.136011Z node 1 :BS_NODE DEBUG: {NWDC40@distconf_persistent_storage.cpp:25} TReaderActor bootstrap Paths# [] 2025-06-25T15:06:20.136110Z node 2 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72057594037932033 entry.State: StInit ev: {EvForward TabletID: 72057594037932033 Ev: nullptr Flags: 1:2:0} 2025-06-25T15:06:20.136471Z node 2 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72057594037932033] queue send [2:76:2077] 2025-06-25T15:06:20.136511Z node 2 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 131082 StorageConfigLoaded# false NodeListObtained# false PendingEvents.size# 0 2025-06-25T15:06:20.136533Z node 2 :BS_NODE DEBUG: {NWDC11@distconf_binding.cpp:6} TEvNodesInfo 2025-06-25T15:06:20.147293Z node 3 :BS_PROXY INFO: dsproxy_state.cpp:157: Group# 0 TEvConfigureProxy received GroupGeneration# 1 IsLimitedKeyless# false Marker# DSP02 2025-06-25T15:06:20.147358Z node 3 :BS_PROXY NOTICE: dsproxy_state.cpp:305: EnsureMonitoring Group# 0 IsLimitedKeyless# 0 fullIfPossible# 0 Marker# DSP58 2025-06-25T15:06:20.148491Z node 3 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [3:98:2076] Create Queue# [3:133:2081] targetNodeId# 1 Marker# DSP01 2025-06-25T15:06:20.148582Z node 3 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [3:98:2076] Create Queue# [3:134:2082] targetNodeId# 1 Marker# DSP01 2025-06-25T15:06:20.148657Z node 3 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [3:98:2076] Create Queue# [3:135:2083] targetNodeId# 1 Marker# DSP01 2025-06-25T15:06:20.148736Z node 3 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [3:98:2076] Create Queue# [3:136:2084] targetNodeId# 1 Marker# DSP01 2025-06-25T15:06:20.148819Z node 3 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [3:98:2076] Create Queue# [3:137:2085] targetNodeId# 1 Marker# DSP01 2025-06-25T15:06:20.148897Z node 3 :BS_PROXY D ... PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:629: TClient[72057594037927937] push event to server [29:128:2094] 2025-06-25T15:07:25.907247Z node 29 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:141: [72057594037927937] HandleSend Sender# [29:102:2094] EventType# 268960257 2025-06-25T15:07:25.907344Z node 29 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:373: TClient[72075186224037888] peer closed [29:449:2286] 2025-06-25T15:07:25.907401Z node 29 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:505: TClient[72075186224037888] notify reset [29:449:2286] 2025-06-25T15:07:25.907633Z node 29 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:9} Tx{21, NKikimr::NHive::TTxUpdateTabletStatus} queued, type NKikimr::NHive::TTxUpdateTabletStatus 2025-06-25T15:07:25.907710Z node 29 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:9} Tx{21, NKikimr::NHive::TTxUpdateTabletStatus} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-25T15:07:25.907839Z node 29 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:9} Tx{21, NKikimr::NHive::TTxUpdateTabletStatus} hope 1 -> done Change{13, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 2025-06-25T15:07:25.907930Z node 29 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:9} Tx{21, NKikimr::NHive::TTxUpdateTabletStatus} release 4194304b of static, Memory{0 dyn 0} 2025-06-25T15:07:25.908179Z node 29 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:9} Tx{22, NKikimr::NHive::TTxProcessBootQueue} queued, type NKikimr::NHive::TTxProcessBootQueue 2025-06-25T15:07:25.908266Z node 29 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:9} Tx{22, NKikimr::NHive::TTxProcessBootQueue} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-25T15:07:25.908631Z node 29 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:9} Tx{22, NKikimr::NHive::TTxProcessBootQueue} hope 1 -> done Change{13, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 2025-06-25T15:07:25.908713Z node 29 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:9} Tx{22, NKikimr::NHive::TTxProcessBootQueue} release 4194304b of static, Memory{0 dyn 0} 2025-06-25T15:07:25.909243Z node 29 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72075186224037888] ::Bootstrap [29:462:2293] 2025-06-25T15:07:25.909299Z node 29 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72075186224037888] lookup [29:462:2293] 2025-06-25T15:07:25.909450Z node 29 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72075186224037888 entry.State: StNormal ev: {EvForward TabletID: 72075186224037888 Ev: nullptr Flags: 1:2:0} 2025-06-25T15:07:25.909538Z node 29 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 29 selfDC 1 leaderDC 1 1:2:0 local 1 localDc 1 other 0 disallowed 0 tabletId: 72075186224037888 followers: 0 countLeader 1 allowFollowers 0 winner: [29:380:2236] 2025-06-25T15:07:25.909624Z node 29 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:190: TClient[72075186224037888] forward result local node, try to connect [29:462:2293] 2025-06-25T15:07:25.909686Z node 29 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[72075186224037888]::SendEvent [29:462:2293] 2025-06-25T15:07:25.909804Z node 29 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:349: TClient[72075186224037888] connect request undelivered [29:462:2293] 2025-06-25T15:07:25.909865Z node 29 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:498: TClient[72075186224037888] connect failed [29:462:2293] 2025-06-25T15:07:25.910021Z node 29 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:536: Handle TEvTabletProblem tabletId: 72075186224037888 entry.State: StNormal 2025-06-25T15:07:25.910197Z node 29 :STATESTORAGE DEBUG: statestorage_proxy.cpp:281: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72075186224037888 Cookie: 0 ProxyOptions: SigNone} 2025-06-25T15:07:25.910361Z node 29 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72075186224037888 Cookie: 0} 2025-06-25T15:07:25.910443Z node 29 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72075186224037888 Cookie: 1} 2025-06-25T15:07:25.910480Z node 29 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72075186224037888 Cookie: 2} 2025-06-25T15:07:25.910566Z node 29 :STATESTORAGE DEBUG: statestorage_proxy.cpp:390: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 0 TabletID: 72075186224037888 ClusterStateGeneration: 0 ClusterStateGuid: 0 CurrentLeader: [29:380:2236] CurrentLeaderTablet: [29:396:2247] CurrentGeneration: 1 CurrentStep: 0} 2025-06-25T15:07:25.910692Z node 29 :STATESTORAGE DEBUG: statestorage_proxy.cpp:390: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 0 TabletID: 72075186224037888 ClusterStateGeneration: 0 ClusterStateGuid: 0 CurrentLeader: [29:380:2236] CurrentLeaderTablet: [29:396:2247] CurrentGeneration: 1 CurrentStep: 0} 2025-06-25T15:07:25.910844Z node 29 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:610: Handle TEvInfo tabletId: 72075186224037888 entry.State: StProblemResolve success: true ev: {EvInfo Status: 0 TabletID: 72075186224037888 Cookie: 0 CurrentLeader: [29:380:2236] CurrentLeaderTablet: [29:396:2247] CurrentGeneration: 1 CurrentStep: 0 Locked: false LockedFor: 0 Signature: { Size: 2 Signature: {{[29:24343667:0] : 3}, {[29:1099535971443:0] : 6}}}} 2025-06-25T15:07:25.911004Z node 29 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:361: DropEntry tabletId: 72075186224037888 followers: 0 2025-06-25T15:07:25.911407Z node 30 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037927937] ::Bootstrap [30:464:2162] 2025-06-25T15:07:25.911465Z node 30 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037927937] lookup [30:464:2162] 2025-06-25T15:07:25.911556Z node 30 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72057594037927937 entry.State: StNormal ev: {EvForward TabletID: 72057594037927937 Ev: nullptr Flags: 1:2:0} 2025-06-25T15:07:25.911641Z node 30 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 30 selfDC 2 leaderDC 1 1:2:0 local 0 localDc 0 other 1 disallowed 0 tabletId: 72057594037927937 followers: 0 countLeader 1 allowFollowers 0 winner: [29:330:2200] 2025-06-25T15:07:25.911732Z node 30 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72057594037927937] queue send [30:464:2162] 2025-06-25T15:07:25.911799Z node 30 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:411: TClient[72057594037927937] received pending shutdown [30:464:2162] 2025-06-25T15:07:25.911890Z node 30 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:195: TClient[72057594037927937] forward result remote node 29 [30:464:2162] 2025-06-25T15:07:25.912029Z node 30 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:229: TClient[72057594037927937] remote node connected [30:464:2162] 2025-06-25T15:07:25.912099Z node 30 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[72057594037927937]::SendEvent [30:464:2162] 2025-06-25T15:07:25.912414Z node 29 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:291: [72057594037927937] Accept Connect Originator# [30:464:2162] 2025-06-25T15:07:25.912746Z node 30 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:310: TClient[72057594037927937] connected with status OK role: Leader [30:464:2162] 2025-06-25T15:07:25.912814Z node 30 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:325: TClient[72057594037927937] send queued [30:464:2162] 2025-06-25T15:07:25.912878Z node 30 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:629: TClient[72057594037927937] push event to server [30:464:2162] 2025-06-25T15:07:25.912999Z node 30 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[72057594037927937]::SendEvent [30:464:2162] 2025-06-25T15:07:25.913060Z node 30 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:332: TClient[72057594037927937] shutdown pipe due to pending shutdown request [30:464:2162] 2025-06-25T15:07:25.913110Z node 30 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:505: TClient[72057594037927937] notify reset [30:464:2162] 2025-06-25T15:07:25.913441Z node 29 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:72: [72057594037927937] Push Sender# [30:452:2157] EventType# 268697624 2025-06-25T15:07:25.913652Z node 29 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:9} Tx{23, NKikimr::NHive::TTxStartTablet} queued, type NKikimr::NHive::TTxStartTablet 2025-06-25T15:07:25.913734Z node 29 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:9} Tx{23, NKikimr::NHive::TTxStartTablet} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-25T15:07:25.913972Z node 29 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:9} Tx{23, NKikimr::NHive::TTxStartTablet} hope 1 -> done Change{13, redo 144b alter 0b annex 0, ~{ 1, 16 } -{ }, 0 gb} 2025-06-25T15:07:25.914047Z node 29 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:9} Tx{23, NKikimr::NHive::TTxStartTablet} release 4194304b of static, Memory{0 dyn 0} 2025-06-25T15:07:25.925502Z node 29 :BS_PROXY_PUT INFO: dsproxy_put.cpp:645: [db158bc7997c188e] bootstrap ActorId# [29:467:2296] Group# 0 BlobCount# 1 BlobIDs# [[72057594037927937:2:9:0:0:127:0]] HandleClass# TabletLog Tactic# MinLatency RestartCounter# 0 Marker# BPP13 2025-06-25T15:07:25.925716Z node 29 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [db158bc7997c188e] Id# [72057594037927937:2:9:0:0:127:0] restore disk# 0 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-06-25T15:07:25.925826Z node 29 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:65: [db158bc7997c188e] restore Id# [72057594037927937:2:9:0:0:127:0] optimisticReplicas# 1 optimisticState# EBS_FULL Marker# BPG55 2025-06-25T15:07:25.925934Z node 29 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [db158bc7997c188e] partPlacement record partSituation# ESituation::Unknown to# 0 blob Id# [72057594037927937:2:9:0:0:127:1] Marker# BPG33 2025-06-25T15:07:25.926033Z node 29 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [db158bc7997c188e] Sending missing VPut part# 0 to# 0 blob Id# [72057594037927937:2:9:0:0:127:1] Marker# BPG32 2025-06-25T15:07:25.926295Z node 29 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [29:81:2082] NKikimr::TEvBlobStorage::TEvVPut# {ID# [72057594037927937:2:9:0:0:127:1] FDS# 127 HandleClass# TabletLog {MsgQoS ExtQueueId# PutTabletLog} DataSize# 0 Data# } cookie# 0 2025-06-25T15:07:25.929725Z node 29 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [db158bc7997c188e] received {EvVPutResult Status# OK ID# [72057594037927937:2:9:0:0:127:1] {MsgQoS MsgId# { SequenceId: 1 MsgId: 24 } Cost# 81000 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 25 }}}} from# [0:1:0:0:0] Marker# BPP01 2025-06-25T15:07:25.929897Z node 29 :BS_PROXY_PUT DEBUG: dsproxy_put_impl.cpp:72: [db158bc7997c188e] Result# TEvPutResult {Id# [72057594037927937:2:9:0:0:127:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.998955} GroupId# 0 Marker# BPP12 2025-06-25T15:07:25.929995Z node 29 :BS_PROXY_PUT INFO: dsproxy_put.cpp:486: [db158bc7997c188e] SendReply putResult# TEvPutResult {Id# [72057594037927937:2:9:0:0:127:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.998955} ResponsesSent# 0 PutImpl.Blobs.size# 1 Last# true Marker# BPP21 2025-06-25T15:07:25.930272Z node 29 :BS_PROXY_PUT DEBUG: {BPP72@dsproxy_put.cpp:470} Query history GroupId# 0 HandleClass# TabletLog Tactic# MinLatency History# THistory { Entries# [ TEvVPut{ TimestampMs# 1.07 sample PartId# [72057594037927937:2:9:0:0:127:1] QueryCount# 1 VDiskId# [0:1:0:0:0] NodeId# 29 } TEvVPutResult{ TimestampMs# 4.518 VDiskId# [0:1:0:0:0] NodeId# 29 Status# OK } ] } 2025-06-25T15:07:25.930499Z node 29 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594037927937:2:9:0:0:127:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.998955} 2025-06-25T15:07:25.930654Z node 29 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:10} commited cookie 1 for step 9 |94.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_visibility_timeout_works[tables_format_v0] [GOOD] |94.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_set_very_big_visibility_timeout[tables_format_v0] [GOOD] |94.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test |94.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_other_requests_rate[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_read_dont_stall[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_receive_with_very_big_visibility_timeout[tables_format_v0] |94.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delay_message_batch[tables_format_v1-std] [GOOD] |94.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test |94.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_create_queue_by_nonexistent_user_fails[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_multi_read_dont_stall[tables_format_v1] |94.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_polling.py::TestSqsPolling::test_receive_message_with_polling[tables_format_v1-long_polling-std] [GOOD] |94.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_receive_with_very_big_visibility_timeout[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_receive_with_very_big_visibility_timeout[tables_format_v1] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_queue_attributes[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_receive_with_very_big_visibility_timeout[tables_format_v1] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_read_dont_stall[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_and_read_message[tables_format_v0-fifo] |94.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> test_quoting.py::TestSqsQuotingWithKesus::test_properly_creates_and_deletes_queue[tables_format_v0-std] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_write_read_delete_many_groups[tables_format_v0] |94.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_read_dont_stall[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_zero_visibility_timeout_works[tables_format_v0-fifo] >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_send_message_rate[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_does_not_change_visibility_for_deleted_message[tables_format_v1-std] [GOOD] |94.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_only_single_read_infly_from_fifo [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_does_not_change_visibility_not_in_flight[tables_format_v0-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_and_read_message[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_and_read_message[tables_format_v0-std] |94.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_and_read_multiple_messages[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_message[tables_format_v0-fifo] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_queue_attributes[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_invalid_queue_url[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_invalid_queue_url[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_and_read_message[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_and_read_message[tables_format_v1-std] |94.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> THiveTest::TestLockTabletExecutionRebootTimeout [GOOD] >> THiveTest::TestLockTabletExecutionReconnect |94.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_write_read_delete_many_groups[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_invalid_queue_url[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_list_queues_of_nonexistent_user[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_visibility_works[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_visibility_works[tables_format_v1-std] >> TNodeBrokerTest::NodesMigrationReuseRemovedID >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_visibility_works[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_list_queues_of_nonexistent_user[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_visibility_works[tables_format_v0-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_list_queues_of_nonexistent_user[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_create_queue_by_nonexistent_user_fails[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_and_read_message[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_and_read_multiple_messages[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_list_queues_of_nonexistent_user[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_multi_read_dont_stall[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_message[tables_format_v0-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_empty_queue_url[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_get_queue_attributes_only_attributes_table[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_get_queue_attributes_only_runtime_attributes[tables_format_v0-fifo] |94.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_validates_deduplication_id[tables_format_v1] [GOOD] |94.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_validates_group_id[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_does_not_change_visibility_not_in_flight[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_does_not_change_visibility_not_in_flight[tables_format_v0-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_get_queue_attributes_only_runtime_attributes[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_get_queue_attributes_only_runtime_attributes[tables_format_v0-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_and_read_message[tables_format_v0-std] [GOOD] |94.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_deduplication[tables_format_v1-content_based] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delete_message_works[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delete_message_works[tables_format_v1] >> THiveTest::TestLockTabletExecutionReconnect [GOOD] >> THiveTest::TestLockTabletExecutionRebootReconnect >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_validates_group_id[tables_format_v0] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_validates_group_id[tables_format_v1] |94.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_queue_attributes[tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_read_dont_stall[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_receive_with_very_big_visibility_timeout[tables_format_v0] >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_ttl_Timestamp-pk_types12-all_types12-index12-Timestamp--] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_get_queue_attributes_only_runtime_attributes[tables_format_v0-std] [GOOD] >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_ttl_Date-pk_types13-all_types13-index13-Date--] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_get_queue_attributes_only_runtime_attributes[tables_format_v1-fifo] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_validates_group_id[tables_format_v1] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_validates_receive_attempt_id[tables_format_v0] |94.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delete_message_batch_works[tables_format_v1] [GOOD] >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_index_1__SYNC-pk_types3-all_types3-index3---SYNC] |94.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/dump_restore/py3test >> THiveTest::TestLockTabletExecutionRebootReconnect [GOOD] >> THiveTest::TestLockTabletExecutionReconnectExpire >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_message[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_get_queue_attributes_only_runtime_attributes[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_get_queue_attributes_only_runtime_attributes[tables_format_v1-std] >> TNodeBrokerTest::NodesMigrationReuseRemovedID [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_receive_with_very_big_visibility_timeout[tables_format_v0] [GOOD] |94.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/dump_restore/py3test >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_validates_receive_attempt_id[tables_format_v0] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_validates_receive_attempt_id[tables_format_v1] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_crutch_groups_selection_algorithm_selects_second_group_batch[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_receive_with_very_big_visibility_timeout[tables_format_v1] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_deduplication[tables_format_v0-by_deduplication_id] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/ut/unittest >> TNodeBrokerTest::NodesMigrationReuseRemovedID [GOOD] Test command err: 2025-06-25T15:07:35.141303Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:07:35.141367Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for nameservers are connected ... waiting for nameservers are connected (done) >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_ttl_Uint32-pk_types9-all_types9-index9-Uint32--] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_validates_receive_attempt_id[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_receive_with_very_big_visibility_timeout[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_and_read_message[tables_format_v0-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_does_not_change_visibility_not_in_flight[tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_does_not_change_visibility_not_in_flight[tables_format_v1-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delete_message_works[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_does_not_change_visibility_for_deleted_message[tables_format_v0-fifo] |94.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_get_queue_attributes_only_attributes_table[tables_format_v1-fifo] [GOOD] |94.5%| [TA] $(B)/ydb/core/mind/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_all_types-pk_types7-all_types7-index7---] |94.5%| [TA] {RESULT} $(B)/ydb/core/mind/ut/test-results/unittest/{meta.json ... results_accumulator.log} |94.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_visibility_to_zero_works[tables_format_v1-fifo] [GOOD] >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_other_requests_rate[tables_format_v0] [GOOD] >> THiveTest::TestLockTabletExecutionReconnectExpire [GOOD] >> THiveTest::TestLockTabletExecutionStealLock >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_ttl_Uint64-pk_types10-all_types10-index10-Uint64--] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_and_read_message[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_and_read_message[tables_format_v0-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_get_queue_attributes_only_runtime_attributes[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestYandexAttributesPrefix::test_allows_yandex_message_attribute_prefix[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_does_not_change_visibility_for_deleted_message[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_does_not_change_visibility_for_deleted_message[tables_format_v0-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_and_read_multiple_messages[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_and_read_multiple_messages[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_visibility_works[tables_format_v1-std] [GOOD] >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_ttl_Datetime-pk_types11-all_types11-index11-Datetime--] >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_ttl_DyNumber-pk_types8-all_types8-index8-DyNumber--] >> THiveTest::TestLockTabletExecutionStealLock [GOOD] >> THiveTest::TestProgressWithMaxTabletsScheduled >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_multi_read_dont_stall[tables_format_v0] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_ttl_Timestamp-pk_types17-all_types17-index17-Timestamp--] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_and_read_message[tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_does_not_change_visibility_not_in_flight[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_does_not_change_visibility_not_in_flight[tables_format_v1-std] >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_index_2__SYNC-pk_types2-all_types2-index2---SYNC] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_does_not_change_visibility_for_deleted_message[tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_does_not_change_visibility_for_deleted_message[tables_format_v1-fifo] >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_index_0__SYNC-pk_types4-all_types4-index4---SYNC] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_visibility_works[tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_visibility_works[tables_format_v1-fifo] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_deduplication[tables_format_v0-by_deduplication_id] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_deduplication[tables_format_v0-content_based] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_zero_visibility_timeout_works[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_zero_visibility_timeout_works[tables_format_v0-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_does_not_change_visibility_for_deleted_message[tables_format_v1-fifo] [GOOD] >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_index_1__ASYNC-pk_types5-all_types5-index5---ASYNC] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_queue_attributes[tables_format_v0] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_queue_attributes[tables_format_v1] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_3__SYNC-pk_types6-all_types6-index6---SYNC] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delay_one_message[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delay_one_message[tables_format_v0-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_empty_queue_url[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_empty_queue_url[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_zero_visibility_timeout_works[tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_zero_visibility_timeout_works[tables_format_v1-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_empty_queue_url[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_get_queue_attributes_only_attributes_table[tables_format_v0-fifo] |94.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_does_actions_with_queue[tables_format_v0-fifo] [GOOD] >> THiveTest::TestProgressWithMaxTabletsScheduled [GOOD] >> THiveTest::TestResetServerlessComputeResourcesMode >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_does_not_change_visibility_not_in_flight[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_zero_visibility_timeout_works[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_zero_visibility_timeout_works[tables_format_v1-std] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_read_dont_stall[tables_format_v0] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_read_dont_stall[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_get_queue_attributes_only_attributes_table[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_get_queue_attributes_only_attributes_table[tables_format_v0-std] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_queue_attributes[tables_format_v1] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_read_dont_stall[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_zero_visibility_timeout_works[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_and_read_multiple_messages[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_message[tables_format_v0-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_get_queue_attributes_only_attributes_table[tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_get_queue_attributes_only_attributes_table[tables_format_v1-fifo] >> THiveTest::TestResetServerlessComputeResourcesMode [GOOD] >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_send_message_rate[tables_format_v0] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_deduplication[tables_format_v0-content_based] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/hive/ut/unittest >> THiveTest::TestResetServerlessComputeResourcesMode [GOOD] Test command err: 2025-06-25T15:06:20.137333Z node 1 :BS_NODE DEBUG: {NW26@node_warden_impl.cpp:328} Bootstrap 2025-06-25T15:06:20.153664Z node 1 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# false Origin# initial ServiceSet# {PDisks { NodeID: 1 PDiskID: 1 Path: "SectorMap:0:3200" PDiskGuid: 1 } PDisks { NodeID: 2 PDiskID: 1 Path: "SectorMap:1:3200" PDiskGuid: 2 } VDisks { VDiskID { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } Groups { GroupID: 0 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } } } AvailabilityDomains: 0 } 2025-06-25T15:06:20.153855Z node 1 :BS_NODE DEBUG: {NW04@node_warden_pdisk.cpp:196} StartLocalPDisk NodeId# 1 PDiskId# 1 Path# "SectorMap:0:3200" PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} Temporary# false 2025-06-25T15:06:20.154489Z node 1 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:106} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2025-06-25T15:06:20.154702Z node 1 :BS_NODE DEBUG: {NW23@node_warden_vdisk.cpp:67} StartLocalVDiskActor SlayInFlight# false VDiskId# [0:1:0:0:0] VSlotId# 1:1:0 PDiskGuid# 1 DonorMode# false PDiskRestartInFlight# false PDisksWaitingToStart# false 2025-06-25T15:06:20.155434Z node 1 :BS_NODE DEBUG: {NW24@node_warden_vdisk.cpp:267} StartLocalVDiskActor done VDiskId# [0:1:0:0:0] VSlotId# 1:1:0 PDiskGuid# 1 2025-06-25T15:06:20.155469Z node 1 :BS_NODE DEBUG: {NW12@node_warden_proxy.cpp:24} StartLocalProxy GroupId# 0 2025-06-25T15:06:20.155959Z node 1 :BS_NODE DEBUG: {NW21@node_warden_pipe.cpp:23} EstablishPipe AvailDomainId# 0 PipeClientId# [1:53:2077] ControllerId# 72057594037932033 2025-06-25T15:06:20.155984Z node 1 :BS_NODE DEBUG: {NW20@node_warden_pipe.cpp:72} SendRegisterNode 2025-06-25T15:06:20.156054Z node 1 :BS_NODE DEBUG: {NW11@node_warden_impl.cpp:303} StartInvalidGroupProxy GroupId# 4294967295 2025-06-25T15:06:20.156128Z node 1 :BS_NODE DEBUG: {NW62@node_warden_impl.cpp:315} StartRequestReportingThrottler 2025-06-25T15:06:20.164053Z node 1 :BS_PROXY INFO: dsproxy_state.cpp:157: Group# 0 TEvConfigureProxy received GroupGeneration# 1 IsLimitedKeyless# false Marker# DSP02 2025-06-25T15:06:20.164090Z node 1 :BS_PROXY NOTICE: dsproxy_state.cpp:305: EnsureMonitoring Group# 0 IsLimitedKeyless# 0 fullIfPossible# 0 Marker# DSP58 2025-06-25T15:06:20.165439Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:52:2076] Create Queue# [1:61:2082] targetNodeId# 1 Marker# DSP01 2025-06-25T15:06:20.165531Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:52:2076] Create Queue# [1:62:2083] targetNodeId# 1 Marker# DSP01 2025-06-25T15:06:20.165616Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:52:2076] Create Queue# [1:63:2084] targetNodeId# 1 Marker# DSP01 2025-06-25T15:06:20.165690Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:52:2076] Create Queue# [1:64:2085] targetNodeId# 1 Marker# DSP01 2025-06-25T15:06:20.165768Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:52:2076] Create Queue# [1:65:2086] targetNodeId# 1 Marker# DSP01 2025-06-25T15:06:20.165844Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:52:2076] Create Queue# [1:66:2087] targetNodeId# 1 Marker# DSP01 2025-06-25T15:06:20.165923Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:52:2076] Create Queue# [1:67:2088] targetNodeId# 1 Marker# DSP01 2025-06-25T15:06:20.165940Z node 1 :BS_PROXY INFO: dsproxy_state.cpp:31: Group# 0 SetStateEstablishingSessions Marker# DSP03 2025-06-25T15:06:20.166004Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037932033] ::Bootstrap [1:53:2077] 2025-06-25T15:06:20.166022Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037932033] lookup [1:53:2077] 2025-06-25T15:06:20.166049Z node 1 :BS_PROXY NOTICE: dsproxy_state.cpp:245: Group# 4294967295 HasInvalidGroupId# 1 Bootstrap -> StateEjected Marker# DSP42 2025-06-25T15:06:20.166085Z node 1 :BS_NODE DEBUG: {NWDC00@distconf.cpp:20} Bootstrap 2025-06-25T15:06:20.166607Z node 1 :BS_NODE DEBUG: {NWDC40@distconf_persistent_storage.cpp:25} TReaderActor bootstrap Paths# [] 2025-06-25T15:06:20.166665Z node 2 :BS_NODE DEBUG: {NW26@node_warden_impl.cpp:328} Bootstrap 2025-06-25T15:06:20.168391Z node 2 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# false Origin# initial ServiceSet# {PDisks { NodeID: 1 PDiskID: 1 Path: "SectorMap:0:3200" PDiskGuid: 1 } PDisks { NodeID: 2 PDiskID: 1 Path: "SectorMap:1:3200" PDiskGuid: 2 } VDisks { VDiskID { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } Groups { GroupID: 0 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } } } AvailabilityDomains: 0 } 2025-06-25T15:06:20.168471Z node 2 :BS_NODE DEBUG: {NW04@node_warden_pdisk.cpp:196} StartLocalPDisk NodeId# 2 PDiskId# 1 Path# "SectorMap:1:3200" PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} Temporary# false 2025-06-25T15:06:20.168752Z node 2 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:106} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2025-06-25T15:06:20.168876Z node 2 :BS_NODE DEBUG: {NW12@node_warden_proxy.cpp:24} StartLocalProxy GroupId# 0 2025-06-25T15:06:20.169353Z node 2 :BS_NODE DEBUG: {NW21@node_warden_pipe.cpp:23} EstablishPipe AvailDomainId# 0 PipeClientId# [2:78:2076] ControllerId# 72057594037932033 2025-06-25T15:06:20.169370Z node 2 :BS_NODE DEBUG: {NW20@node_warden_pipe.cpp:72} SendRegisterNode 2025-06-25T15:06:20.169404Z node 2 :BS_NODE DEBUG: {NW11@node_warden_impl.cpp:303} StartInvalidGroupProxy GroupId# 4294967295 2025-06-25T15:06:20.169477Z node 2 :BS_NODE DEBUG: {NW62@node_warden_impl.cpp:315} StartRequestReportingThrottler 2025-06-25T15:06:20.169632Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72057594037932033] queue send [1:53:2077] 2025-06-25T15:06:20.169680Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 131082 StorageConfigLoaded# false NodeListObtained# false PendingEvents.size# 0 2025-06-25T15:06:20.169699Z node 1 :BS_NODE DEBUG: {NWDC11@distconf_binding.cpp:6} TEvNodesInfo 2025-06-25T15:06:20.173917Z node 2 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037936129] ::Bootstrap [2:43:2064] 2025-06-25T15:06:20.173943Z node 2 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037936129] lookup [2:43:2064] 2025-06-25T15:06:20.178744Z node 2 :BS_PROXY INFO: dsproxy_state.cpp:157: Group# 0 TEvConfigureProxy received GroupGeneration# 1 IsLimitedKeyless# false Marker# DSP02 2025-06-25T15:06:20.178778Z node 2 :BS_PROXY NOTICE: dsproxy_state.cpp:305: EnsureMonitoring Group# 0 IsLimitedKeyless# 0 fullIfPossible# 0 Marker# DSP58 2025-06-25T15:06:20.179772Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:77:2075] Create Queue# [2:84:2080] targetNodeId# 1 Marker# DSP01 2025-06-25T15:06:20.179846Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:77:2075] Create Queue# [2:85:2081] targetNodeId# 1 Marker# DSP01 2025-06-25T15:06:20.179906Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:77:2075] Create Queue# [2:86:2082] targetNodeId# 1 Marker# DSP01 2025-06-25T15:06:20.179977Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:77:2075] Create Queue# [2:87:2083] targetNodeId# 1 Marker# DSP01 2025-06-25T15:06:20.180051Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:77:2075] Create Queue# [2:88:2084] targetNodeId# 1 Marker# DSP01 2025-06-25T15:06:20.180152Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:77:2075] Create Queue# [2:89:2085] targetNodeId# 1 Marker# DSP01 2025-06-25T15:06:20.180266Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:77:2075] Create Queue# [2:90:2086] targetNodeId# 1 Marker# DSP01 2025-06-25T15:06:20.180284Z node 2 :BS_PROXY INFO: dsproxy_state.cpp:31: Group# 0 SetStateEstablishingSessions Marker# DSP03 2025-06-25T15:06:20.180331Z node 2 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037932033] ::Bootstrap [2:78:2076] 2025-06-25T15:06:20.180347Z node 2 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037932033] lookup [2:78:2076] 2025-06-25T15:06:20.180368Z node 2 :BS_PROXY NOTICE: dsproxy_state.cpp:245: Group# 4294967295 HasInvalidGroupId# 1 Bootstrap -> StateEjected Marker# DSP42 2025-06-25T15:06:20.180402Z node 2 :BS_NODE DEBUG: {NWDC00@distconf.cpp:20} Bootstrap 2025-06-25T15:06:20.180637Z node 2 :BS_NODE DEBUG: {NWDC40@distconf_persistent_storage.cpp:25} TReaderActor bootstrap Paths# [] 2025-06-25T15:06:20.180740Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72057594037932033 entry.State: StInit ev: {EvForward TabletID: 72057594037932033 Ev: nullptr Flags: 1:2:0} 2025-06-25T15:06:20.189188Z node 2 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72057594037936129 entry.State: StInit ev: {EvForward TabletID: 72057594037936129 Ev: nullptr Flags: 1:2:0} 2025-06-25T15:06:20.189488Z node 2 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72057594037932033] queue send [2:78:2076] 2025-06-25T15:06:20.189532Z node 2 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 131082 StorageConfigLoaded# false NodeListObtained# false PendingEvents.size# 0 2025-06-25T15:06:20.189554Z node 2 :BS_NODE DEBUG: {NWDC11@distconf_binding.cpp:6} TEvNodesInfo 2025-06-25T15:06:20.189720Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 2146435074 StorageConfigLoaded# false NodeListObtained# false PendingEvents.size# 0 2025-06-25T15:06:20.189741Z node 1 :BS_NODE DEBUG: {NWDC32@distconf_persistent_storage.cpp:221} TEvStorageConfigLoaded Cookie# 0 NumItemsRead# 0 2025-06-25T15:06:20.195002Z node 1 :BS_NODE DEBUG: {NWDC35@distconf_persistent_storage.cpp:184} PersistConfig Record# {} Drives# [] 2025-06-25T15:06:20.195163Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 268639258 StorageConfigLoaded# true NodeListObtained# false PendingEvents.size# 0 2025-06-25T15:06:20.195553Z node 2 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72057594037932033 entry.State: StInit ev: {EvForward TabletID: 72057594037932033 Ev: nullptr Flags: 1:2:0} 2025-06-25T15:06:20.195603Z node 1 :BS_NODE DEBUG: {NWDC51@distconf_persistent_storage.cpp:103} TWriterActor bootstrap Drives# [] Record# {} 2025-06-25T15:06:20.195716Z node 2 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 2146435074 StorageConfigLoaded# false NodeListObtained# false PendingEvents.size# 0 2025-06-25T15:06:20.195733Z node 2 :BS_NODE DEBUG: {NWDC32@distconf_persistent_storage.cpp:221} TEvStorageConfigLoaded Cookie# 0 NumItemsRead# 0 2025-06-25T15:06:20.195795Z node 2 :BS_NODE DEBUG: {NWDC35@distconf_persistent_storage.cpp:184} PersistConfig Record# {} Drives# [] 2025-06-25T15:06:20.195970Z node 2 :BS_NODE DEBUG: {NWDC51@distconf_persistent_storage.cpp:103} TWriterActor bootstrap Drives# [] Record# {} 2025-06-25T15:06:20.196014Z node 1 :STATESTORAGE DEBUG: statestorage_proxy.cpp:281: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72057594037932033 Cookie: 0 ProxyOptions: SigNone} 2025-06-25T15:06:20.196172Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72057594037932033] queue send [1:53:2077] 2025-06-25T15:06:20.196209Z node 1 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 0} 2025-06-25T15:06:20.196244Z node 1 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvRepl ... T_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72057594046678944 entry.State: StNormal ev: {EvForward TabletID: 72057594046678944 Ev: nullptr Flags: 1:2:0} 2025-06-25T15:07:48.227791Z node 25 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 25 selfDC 2 leaderDC 1 1:2:0 local 0 localDc 0 other 1 disallowed 0 tabletId: 72057594046678944 followers: 0 countLeader 1 allowFollowers 0 winner: [24:333:2202] 2025-06-25T15:07:48.227871Z node 25 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:195: TClient[72057594046678944] forward result remote node 24 [25:696:2221] 2025-06-25T15:07:48.227965Z node 25 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:229: TClient[72057594046678944] remote node connected [25:696:2221] 2025-06-25T15:07:48.228002Z node 25 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[72057594046678944]::SendEvent [25:696:2221] 2025-06-25T15:07:48.228113Z node 24 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:349: TClient[72075186224037888] connect request undelivered [24:692:2409] 2025-06-25T15:07:48.228177Z node 24 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:559: TClient[72075186224037888] immediate retry [24:692:2409] 2025-06-25T15:07:48.228229Z node 24 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72075186224037888] lookup [24:692:2409] 2025-06-25T15:07:48.228338Z node 24 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:536: Handle TEvTabletProblem tabletId: 72075186224037888 entry.State: StNormal 2025-06-25T15:07:48.228508Z node 24 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72075186224037888 entry.State: StProblemResolve ev: {EvForward TabletID: 72075186224037888 Ev: nullptr Flags: 1:2:0} 2025-06-25T15:07:48.228606Z node 24 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:291: [72057594046678944] Accept Connect Originator# [25:696:2221] 2025-06-25T15:07:48.228741Z node 24 :STATESTORAGE DEBUG: statestorage_proxy.cpp:281: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72075186224037888 Cookie: 0 ProxyOptions: SigNone} 2025-06-25T15:07:48.228929Z node 24 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72075186224037888 Cookie: 0} 2025-06-25T15:07:48.229010Z node 24 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72075186224037888 Cookie: 1} 2025-06-25T15:07:48.229047Z node 24 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72075186224037888 Cookie: 2} 2025-06-25T15:07:48.229125Z node 24 :STATESTORAGE DEBUG: statestorage_proxy.cpp:390: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 0 TabletID: 72075186224037888 ClusterStateGeneration: 0 ClusterStateGuid: 0 CurrentLeader: [24:653:2383] CurrentLeaderTablet: [24:655:2384] CurrentGeneration: 3 CurrentStep: 0} 2025-06-25T15:07:48.229267Z node 24 :STATESTORAGE DEBUG: statestorage_proxy.cpp:390: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 0 TabletID: 72075186224037888 ClusterStateGeneration: 0 ClusterStateGuid: 0 CurrentLeader: [24:653:2383] CurrentLeaderTablet: [24:655:2384] CurrentGeneration: 3 CurrentStep: 0} 2025-06-25T15:07:48.229401Z node 24 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:610: Handle TEvInfo tabletId: 72075186224037888 entry.State: StProblemResolve success: true ev: {EvInfo Status: 0 TabletID: 72075186224037888 Cookie: 0 CurrentLeader: [24:653:2383] CurrentLeaderTablet: [24:655:2384] CurrentGeneration: 3 CurrentStep: 0 Locked: false LockedFor: 0 Signature: { Size: 2 Signature: {{[24:24343667:0] : 3}, {[24:1099535971443:0] : 6}}}} 2025-06-25T15:07:48.229473Z node 24 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:354: ApplyEntry leader tabletId: 72075186224037888 followers: 0 2025-06-25T15:07:48.229546Z node 24 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 24 selfDC 1 leaderDC 1 1:2:0 local 1 localDc 1 other 0 disallowed 0 tabletId: 72075186224037888 followers: 0 countLeader 1 allowFollowers 0 winner: [24:653:2383] 2025-06-25T15:07:48.229708Z node 24 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:190: TClient[72075186224037888] forward result local node, try to connect [24:692:2409] 2025-06-25T15:07:48.229823Z node 24 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[72075186224037888]::SendEvent [24:692:2409] 2025-06-25T15:07:48.229927Z node 25 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:310: TClient[72057594046678944] connected with status OK role: Leader [25:696:2221] 2025-06-25T15:07:48.229990Z node 25 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:325: TClient[72057594046678944] send queued [25:696:2221] 2025-06-25T15:07:48.230113Z node 24 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:291: [72075186224037888] Accept Connect Originator# [24:692:2409] 2025-06-25T15:07:48.230246Z node 24 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:310: TClient[72075186224037888] connected with status OK role: Leader [24:692:2409] 2025-06-25T15:07:48.230281Z node 24 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:325: TClient[72075186224037888] send queued [24:692:2409] 2025-06-25T15:07:48.230325Z node 25 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:155: TClient[72057594046678944] send [25:696:2221] 2025-06-25T15:07:48.230385Z node 25 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:629: TClient[72057594046678944] push event to server [25:696:2221] 2025-06-25T15:07:48.230489Z node 25 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[72057594046678944]::SendEvent [25:696:2221] 2025-06-25T15:07:48.231719Z node 24 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:72: [72057594046678944] Push Sender# [25:695:2221] EventType# 271122945 2025-06-25T15:07:48.231978Z node 24 :TABLET_EXECUTOR DEBUG: Leader{72057594046678944:2:12} Tx{17, NKikimr::NSchemeShard::TSchemeShard::TTxDescribeScheme} queued, type NKikimr::NSchemeShard::TSchemeShard::TTxDescribeScheme 2025-06-25T15:07:48.232057Z node 24 :TABLET_EXECUTOR DEBUG: Leader{72057594046678944:2:12} Tx{17, NKikimr::NSchemeShard::TSchemeShard::TTxDescribeScheme} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-25T15:07:48.232328Z node 24 :TABLET_EXECUTOR DEBUG: Leader{72057594046678944:2:12} Tx{17, NKikimr::NSchemeShard::TSchemeShard::TTxDescribeScheme} hope 1 -> done Change{11, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 2025-06-25T15:07:48.232424Z node 24 :TABLET_EXECUTOR DEBUG: Leader{72057594046678944:2:12} Tx{17, NKikimr::NSchemeShard::TSchemeShard::TTxDescribeScheme} release 4194304b of static, Memory{0 dyn 0} 2025-06-25T15:07:48.233840Z node 25 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037927937] ::Bootstrap [25:702:2222] 2025-06-25T15:07:48.233881Z node 25 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037927937] lookup [25:702:2222] 2025-06-25T15:07:48.234111Z node 25 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72057594037927937 entry.State: StNormal ev: {EvForward TabletID: 72057594037927937 Ev: nullptr Flags: 1:2:0} 2025-06-25T15:07:48.234164Z node 25 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 25 selfDC 2 leaderDC 1 1:2:0 local 0 localDc 0 other 1 disallowed 0 tabletId: 72057594037927937 followers: 0 countLeader 1 allowFollowers 0 winner: [24:332:2201] 2025-06-25T15:07:48.234224Z node 25 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72057594037927937] queue send [25:702:2222] 2025-06-25T15:07:48.234551Z node 25 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:195: TClient[72057594037927937] forward result remote node 24 [25:702:2222] 2025-06-25T15:07:48.235029Z node 25 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:229: TClient[72057594037927937] remote node connected [25:702:2222] 2025-06-25T15:07:48.235075Z node 25 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[72057594037927937]::SendEvent [25:702:2222] 2025-06-25T15:07:48.235700Z node 24 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:291: [72057594037927937] Accept Connect Originator# [25:702:2222] 2025-06-25T15:07:48.236704Z node 25 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:310: TClient[72057594037927937] connected with status OK role: Leader [25:702:2222] 2025-06-25T15:07:48.236750Z node 25 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:325: TClient[72057594037927937] send queued [25:702:2222] 2025-06-25T15:07:48.236798Z node 25 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:629: TClient[72057594037927937] push event to server [25:702:2222] 2025-06-25T15:07:48.236908Z node 25 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[72057594037927937]::SendEvent [25:702:2222] 2025-06-25T15:07:48.237116Z node 24 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:72: [72057594037927937] Push Sender# [25:700:2222] EventType# 268959744 2025-06-25T15:07:48.237314Z node 24 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:14} Tx{41, NKikimr::NHive::TTxRegisterNode} queued, type NKikimr::NHive::TTxRegisterNode 2025-06-25T15:07:48.237396Z node 24 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:14} Tx{41, NKikimr::NHive::TTxRegisterNode} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-25T15:07:48.237602Z node 24 :HIVE WARN: node_info.cpp:25: HIVE#72057594037927937 Node(25, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:07:48.237721Z node 24 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:14} Tx{41, NKikimr::NHive::TTxRegisterNode} hope 1 -> done Change{24, redo 152b alter 0b annex 0, ~{ 4 } -{ }, 0 gb} 2025-06-25T15:07:48.237825Z node 24 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:14} Tx{41, NKikimr::NHive::TTxRegisterNode} release 4194304b of static, Memory{0 dyn 0} 2025-06-25T15:07:48.238424Z node 24 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037927937] ::Bootstrap [24:711:2414] 2025-06-25T15:07:48.238490Z node 24 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037927937] lookup [24:711:2414] 2025-06-25T15:07:48.238601Z node 24 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72057594037927937 entry.State: StNormal ev: {EvForward TabletID: 72057594037927937 Ev: nullptr Flags: 1:2:0} 2025-06-25T15:07:48.238684Z node 24 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 24 selfDC 1 leaderDC 1 1:2:0 local 1 localDc 1 other 0 disallowed 0 tabletId: 72057594037927937 followers: 0 countLeader 1 allowFollowers 0 winner: [24:332:2201] 2025-06-25T15:07:48.238779Z node 24 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72057594037927937] queue send [24:711:2414] 2025-06-25T15:07:48.238853Z node 24 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:411: TClient[72057594037927937] received pending shutdown [24:711:2414] 2025-06-25T15:07:48.238934Z node 24 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:190: TClient[72057594037927937] forward result local node, try to connect [24:711:2414] 2025-06-25T15:07:48.239006Z node 24 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[72057594037927937]::SendEvent [24:711:2414] 2025-06-25T15:07:48.239155Z node 24 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:291: [72057594037927937] Accept Connect Originator# [24:711:2414] 2025-06-25T15:07:48.239332Z node 24 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:310: TClient[72057594037927937] connected with status OK role: Leader [24:711:2414] 2025-06-25T15:07:48.239397Z node 24 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:325: TClient[72057594037927937] send queued [24:711:2414] 2025-06-25T15:07:48.239451Z node 24 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:629: TClient[72057594037927937] push event to server [24:711:2414] 2025-06-25T15:07:48.239523Z node 24 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:332: TClient[72057594037927937] shutdown pipe due to pending shutdown request [24:711:2414] 2025-06-25T15:07:48.239579Z node 24 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:505: TClient[72057594037927937] notify reset [24:711:2414] 2025-06-25T15:07:48.239663Z node 24 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:141: [72057594037927937] HandleSend Sender# [24:710:2413] EventType# 268697616 >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_get_queue_attributes_only_attributes_table[tables_format_v1-fifo] [GOOD] |94.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/dump_restore/py3test >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_receive_attempt_reloads_same_messages[tables_format_v1-after_crutch_batch] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_receive_attempt_reloads_same_messages[tables_format_v1-standard_mode] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_create_queue_by_nonexistent_user_fails[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delay_message_batch[tables_format_v0-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_visibility_works[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_visibility_works[tables_format_v1-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delay_one_message[tables_format_v0-std] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_write_read_delete_many_groups[tables_format_v0] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_write_read_delete_many_groups[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delay_one_message[tables_format_v1-fifo] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_receive_attempt_reloads_same_messages[tables_format_v1-standard_mode] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_send_and_read_multiple_messages[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_message[tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_message[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestYandexAttributesPrefix::test_allows_yandex_message_attribute_prefix[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_message[tables_format_v1-fifo] |94.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/dump_restore/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_message[tables_format_v1-fifo] [GOOD] >> TColumnShardTestSchema::TTL+Reboot-Internal-FirstPkColumn [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_message[tables_format_v1-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_multi_read_dont_stall[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_partial_delete_works[tables_format_v0] |94.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_and_read_message[tables_format_v0-std] [GOOD] >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_index_4__SYNC-pk_types0-all_types0-index0---SYNC] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_message[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_message_batch[tables_format_v0-fifo] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest >> TColumnShardTestSchema::TTL+Reboot-Internal-FirstPkColumn [GOOD] Test command err: Running TestTtl ttlColumnType=Timestamp 2025-06-25T15:07:14.305593Z node 1 :TX_COLUMNSHARD TRACE: columnshard_impl.h:381: StateInit, received event# 268828672, Sender [1:106:2138], Recipient [1:128:2158]: NKikimr::TEvTablet::TEvBoot 2025-06-25T15:07:14.310056Z node 1 :TX_COLUMNSHARD TRACE: columnshard_impl.h:381: StateInit, received event# 268828673, Sender [1:106:2138], Recipient [1:128:2158]: NKikimr::TEvTablet::TEvRestored 2025-06-25T15:07:14.310456Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:99;event=initialize_shard;step=OnActivateExecutor; 2025-06-25T15:07:14.334293Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:117;event=initialize_shard;step=initialize_tiring_finished; 2025-06-25T15:07:14.334540Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-25T15:07:14.340735Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T15:07:14.340910Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T15:07:14.341105Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T15:07:14.341218Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T15:07:14.341359Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T15:07:14.341467Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T15:07:14.341590Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T15:07:14.341686Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T15:07:14.341773Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T15:07:14.341878Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T15:07:14.341994Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T15:07:14.344537Z node 1 :TX_COLUMNSHARD TRACE: columnshard_impl.h:381: StateInit, received event# 268828684, Sender [1:106:2138], Recipient [1:128:2158]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-25T15:07:14.365245Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-25T15:07:14.365368Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-25T15:07:14.365418Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-25T15:07:14.365567Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:07:14.365701Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-25T15:07:14.365770Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-25T15:07:14.365809Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-25T15:07:14.365890Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-25T15:07:14.365959Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-25T15:07:14.366009Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-25T15:07:14.366048Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-25T15:07:14.366223Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:07:14.366295Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-25T15:07:14.366343Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-25T15:07:14.366373Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-25T15:07:14.366462Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-25T15:07:14.366523Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-25T15:07:14.366558Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-25T15:07:14.366584Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-25T15:07:14.366632Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-25T15:07:14.366664Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-25T15:07:14.366696Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-25T15:07:14.366886Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-25T15:07:14.366925Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-25T15:07:14.366951Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-25T15:07:14.367162Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-25T15:07:14.367240Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-25T15:07:14.367274Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-25T15:07:14.367378Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-25T15:07:14.367411Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-25T15:07:14.367442Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-25T15:07:14.367515Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-25T15:07:14.367596Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-25T15:07:14.367633Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-25T15:07:14.367658Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-25T15:07:14.367884Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=43; 2025-06-25T15:07:14.367959Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=36; 2025-06-25T15:07:14.368036Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=40; 2025-06-25T15:07:14.368123Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=49; 2025-06-25T15:07:14.368249Z node 1 :T ... _COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:735:2712];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:274;stage=finished;iterator=ready_results:(count:1;records_count:31;schema=saved_at: uint64;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=9;column_names=saved_at;);;program_input=(column_ids=9;column_names=saved_at;);;;); 2025-06-25T15:07:54.179634Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:735:2712];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:1;records_count:31;schema=saved_at: uint64;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=9;column_names=saved_at;);;program_input=(column_ids=9;column_names=saved_at;);;;); 2025-06-25T15:07:54.179670Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:735:2712];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=0;count=0;finished=1; 2025-06-25T15:07:54.179705Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:735:2712];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:203;stage=limit exhausted;limit=limits:(bytes=0;chunks=0);; 2025-06-25T15:07:54.179847Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:735:2712];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-25T15:07:54.179935Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:735:2712];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:1;records_count:31;schema=saved_at: uint64;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=9;column_names=saved_at;);;program_input=(column_ids=9;column_names=saved_at;);;;); 2025-06-25T15:07:54.179970Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:735:2712];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:49;event=DoExtractReadyResults;result=0;count=0;finished=1; 2025-06-25T15:07:54.180053Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:735:2712];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:234;stage=ready result;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=9;column_names=saved_at;);;program_input=(column_ids=9;column_names=saved_at;);;;);columns=1;rows=31; 2025-06-25T15:07:54.180101Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:735:2712];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:254;stage=data_format;batch_size=248;num_rows=31;batch_columns=saved_at; 2025-06-25T15:07:54.180352Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:735:2712];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:370;event=send_data;compute_actor_id=[5:734:2711];bytes=248;rows=31;faults=0;finished=0;fault=0;schema=saved_at: uint64; 2025-06-25T15:07:54.180450Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:735:2712];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:274;stage=finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=9;column_names=saved_at;);;program_input=(column_ids=9;column_names=saved_at;);;;); 2025-06-25T15:07:54.180545Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:735:2712];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=9;column_names=saved_at;);;program_input=(column_ids=9;column_names=saved_at;);;;); 2025-06-25T15:07:54.180708Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:735:2712];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=9;column_names=saved_at;);;program_input=(column_ids=9;column_names=saved_at;);;;); 2025-06-25T15:07:54.180862Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:735:2712];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:103;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-25T15:07:54.180951Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:735:2712];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:192;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=9;column_names=saved_at;);;program_input=(column_ids=9;column_names=saved_at;);;;); 2025-06-25T15:07:54.181033Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:735:2712];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:197;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=9;column_names=saved_at;);;program_input=(column_ids=9;column_names=saved_at;);;;); 2025-06-25T15:07:54.181074Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: actor.cpp:414: Scan [5:735:2712] finished for tablet 9437184 2025-06-25T15:07:54.181732Z node 5 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[5:735:2712];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:420;event=scan_finish;compute_actor_id=[5:734:2711];stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.003},{"events":["l_bootstrap"],"t":0.007},{"events":["f_processing","f_task_result"],"t":0.01},{"events":["f_ack","l_task_result"],"t":0.049},{"events":["l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.055}],"full":{"a":1750864074125976,"name":"_full_task","f":1750864074125976,"d_finished":0,"c":0,"l":1750864074181155,"d":55179},"events":[{"name":"bootstrap","f":1750864074126309,"d_finished":7623,"c":1,"l":1750864074133932,"d":7623},{"a":1750864074180845,"name":"ack","f":1750864074175782,"d_finished":4561,"c":4,"l":1750864074180749,"d":4871},{"a":1750864074180830,"name":"processing","f":1750864074135994,"d_finished":31995,"c":36,"l":1750864074180752,"d":32320},{"name":"ProduceResults","f":1750864074129481,"d_finished":9860,"c":42,"l":1750864074181054,"d":9860},{"a":1750864074181057,"name":"Finish","f":1750864074181057,"d_finished":0,"c":0,"l":1750864074181155,"d":98},{"name":"task_result","f":1750864074136030,"d_finished":26839,"c":32,"l":1750864074175526,"d":26839}],"id":"9437184::30"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=9;column_names=saved_at;);;program_input=(column_ids=9;column_names=saved_at;);;;); 2025-06-25T15:07:54.181845Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:735:2712];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:370;event=send_data;compute_actor_id=[5:734:2711];bytes=0;rows=0;faults=0;finished=1;fault=0;schema=; 2025-06-25T15:07:54.182420Z node 5 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[5:735:2712];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:375;event=scan_finished;compute_actor_id=[5:734:2711];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.003},{"events":["l_bootstrap"],"t":0.007},{"events":["f_processing","f_task_result"],"t":0.01},{"events":["f_ack","l_task_result"],"t":0.049},{"events":["l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish"],"t":0.055}],"full":{"a":1750864074125976,"name":"_full_task","f":1750864074125976,"d_finished":0,"c":0,"l":1750864074181907,"d":55931},"events":[{"name":"bootstrap","f":1750864074126309,"d_finished":7623,"c":1,"l":1750864074133932,"d":7623},{"a":1750864074180845,"name":"ack","f":1750864074175782,"d_finished":4561,"c":4,"l":1750864074180749,"d":5623},{"a":1750864074180830,"name":"processing","f":1750864074135994,"d_finished":31995,"c":36,"l":1750864074180752,"d":33072},{"name":"ProduceResults","f":1750864074129481,"d_finished":9860,"c":42,"l":1750864074181054,"d":9860},{"a":1750864074181057,"name":"Finish","f":1750864074181057,"d_finished":0,"c":0,"l":1750864074181907,"d":850},{"name":"task_result","f":1750864074136030,"d_finished":26839,"c":32,"l":1750864074175526,"d":26839}],"id":"9437184::30"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=9;column_names=saved_at;);;program_input=(column_ids=9;column_names=saved_at;);;;); 2025-06-25T15:07:54.182522Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:735:2712];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-06-25T15:07:54.122528Z;index_granules=0;index_portions=4;index_batches=13;schema_columns=1;filter_columns=0;additional_columns=0;compacted_portions_bytes=0;inserted_portions_bytes=71800;committed_portions_bytes=0;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=71800;selected_rows=0; 2025-06-25T15:07:54.182583Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:735:2712];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=read_context.h:192;event=scan_aborted;reason=unexpected on destructor; 2025-06-25T15:07:54.183041Z node 5 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[5:735:2712];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=9;column_names=saved_at;);;program_input=(column_ids=9;column_names=saved_at;);;; >> THiveTest::TestCreateSubHiveCreateManyTablets [GOOD] >> THiveTest::TestCreateSubHiveCreateManyTabletsWithReboots >> TStorageBalanceTest::TestScenario2 [GOOD] >> TStorageBalanceTest::TestScenario3 |94.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_message[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delay_message_batch[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delay_message_batch[tables_format_v0-std] |94.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_validates_receive_attempt_id[tables_format_v1] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_1__SYNC-pk_types8-all_types8-index8---SYNC] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Decimal150-pk_types25-all_types25-index25---] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delay_one_message[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delay_one_message[tables_format_v1-std] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_send_and_read_multiple_messages[tables_format_v0] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_send_and_read_multiple_messages[tables_format_v1] |94.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_visibility_works[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_visibility_works[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_message_batch[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_message_batch[tables_format_v0-std] |94.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_multi_read_dont_stall[tables_format_v0] [GOOD] |94.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_and_read_message[tables_format_v0-std] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_read_dont_stall[tables_format_v1] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_receive_attempt_reloads_same_messages[tables_format_v0-after_crutch_batch] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_read_dont_stall[tables_format_v0] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_read_dont_stall[tables_format_v1] >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_index_3__SYNC-pk_types1-all_types1-index1---SYNC] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delay_message_batch[tables_format_v0-std] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_write_read_delete_many_groups[tables_format_v1] [GOOD] >> DataShardTxOrder::RandomDotRanges_DelayRS [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delay_message_batch[tables_format_v1-fifo] |94.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_does_not_change_visibility_for_deleted_message[tables_format_v1-fifo] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_order/unittest >> DataShardTxOrder::RandomDotRanges_DelayRS [GOOD] Test command err: 2025-06-25T15:01:47.828824Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:01:47.828867Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:01:47.829767Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:112:2142], Recipient [1:135:2156]: NKikimr::TEvTablet::TEvBoot 2025-06-25T15:01:47.839926Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:112:2142], Recipient [1:135:2156]: NKikimr::TEvTablet::TEvRestored 2025-06-25T15:01:47.840334Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 9437184 actor [1:135:2156] 2025-06-25T15:01:47.840569Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T15:01:47.849794Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:112:2142], Recipient [1:135:2156]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-25T15:01:47.888651Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T15:01:47.888829Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T15:01:47.890669Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 9437184 2025-06-25T15:01:47.890801Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 9437184 2025-06-25T15:01:47.890864Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 9437184 2025-06-25T15:01:47.891235Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T15:01:47.891319Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T15:01:47.891387Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 9437184 persisting started state actor id [1:204:2156] in generation 2 2025-06-25T15:01:47.960996Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T15:01:48.003547Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 9437184 2025-06-25T15:01:48.003745Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 9437184 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T15:01:48.003850Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 9437184, actorId: [1:219:2215] 2025-06-25T15:01:48.003902Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 9437184 2025-06-25T15:01:48.003946Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 9437184, state: WaitScheme 2025-06-25T15:01:48.003984Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:01:48.004203Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:135:2156], Recipient [1:135:2156]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:48.004263Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:48.004686Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 9437184 2025-06-25T15:01:48.004792Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 9437184 2025-06-25T15:01:48.004856Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-25T15:01:48.004895Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:01:48.004940Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 9437184 2025-06-25T15:01:48.004976Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437184 has no attached operations 2025-06-25T15:01:48.005023Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 9437184 2025-06-25T15:01:48.005059Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 9437184 TxInFly 0 2025-06-25T15:01:48.005102Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-25T15:01:48.005206Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:215:2212], Recipient [1:135:2156]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T15:01:48.005251Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T15:01:48.005302Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:213:2211], serverId# [1:215:2212], sessionId# [0:0:0] 2025-06-25T15:01:48.008253Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:103:2136], Recipient [1:135:2156]: NKikimrTxDataShard.TEvProposeTransaction TxKind: TX_KIND_SCHEME SourceDeprecated { RawX1: 103 RawX2: 4294969432 } TxBody: "\nI\n\006table1\020\r\032\t\n\003key\030\002 \"\032\014\n\005value\030\200$ 8\032\n\n\004uint\030\002 9(\":\010Z\006\010\010\030\001(\000J\014/Root/table1" TxId: 1 ExecLevel: 0 Flags: 0 SchemeShardId: 4200 ProcessingParams { } 2025-06-25T15:01:48.008467Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-25T15:01:48.008582Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-25T15:01:48.008782Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit CheckSchemeTx 2025-06-25T15:01:48.008856Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 9437184 txId 1 ssId 4200 seqNo 0:0 2025-06-25T15:01:48.008912Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 1 at tablet 9437184 2025-06-25T15:01:48.008972Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is ExecutedNoMoreRestarts 2025-06-25T15:01:48.009009Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit CheckSchemeTx 2025-06-25T15:01:48.009049Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit StoreSchemeTx 2025-06-25T15:01:48.009084Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit StoreSchemeTx 2025-06-25T15:01:48.009386Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayCompleteNoMoreRestarts 2025-06-25T15:01:48.009422Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit StoreSchemeTx 2025-06-25T15:01:48.009465Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit FinishPropose 2025-06-25T15:01:48.009521Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit FinishPropose 2025-06-25T15:01:48.009589Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayComplete 2025-06-25T15:01:48.009630Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit FinishPropose 2025-06-25T15:01:48.009671Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit WaitForPlan 2025-06-25T15:01:48.009706Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit WaitForPlan 2025-06-25T15:01:48.009734Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:1] at 9437184 is not ready to execute on unit WaitForPlan 2025-06-25T15:01:48.024723Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 9437184 2025-06-25T15:01:48.024794Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit StoreSchemeTx 2025-06-25T15:01:48.024838Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit FinishPropose 2025-06-25T15:01:48.024894Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 1 at tablet 9437184 send to client, exec latency: 0 ms, propose latency: 1 ms, status: PREPARED 2025-06-25T15:01:48.024982Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 9437184 not sending time cast registration request in state WaitScheme 2025-06-25T15:01:48.025513Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:225:2221], Recipient [1:135:2156]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T15:01:48.025570Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T15:01:48.025631Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:224:2220], serverId# [1:225:2221], sessionId# [0:0:0] 2025-06-25T15:01:48.025802Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287424, Sender [1:103:2136], Recipient [1:135:2156]: {TEvPlanStep step# 1000001 MediatorId# 0 TabletID 9437184} 2025-06-25T15:01:48.025842Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3150: StateWork, processing event TEvTxProcessing::TEvPlanStep 2025-06-25T15:01:48.025989Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1790: Trying to execute [1000001:1] at 9437184 on unit WaitForPlan 2025-06-25T15:01:48.026043Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1805: Execution status for [1000001:1] at 9437184 is Executed 2025-06-25T15:01:48.026081Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000001:1] at 9437184 executing on unit WaitForPlan 2025-06-25T15:01:48.074763Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000001:1] at 9437184 to execution unit PlanQueue 2025-06-25T15:01:48.078563Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 1 at step 1000001 at tablet 9437184 { Transactions { TxId: 1 AckTo { RawX1: 103 RawX2: 4294969432 } } Step: 1000001 MediatorID: 0 TabletID: 9437184 } 2025-06-25T15:01:48.078628Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:01:48.078822Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:135:2156], Recipient [1:135:2156]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:48.078861Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T15:01:48.078909Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-25T15:01:48.078948Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 1 2025-06-25T15:01:48.078978Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437184 2025-06-25T15:01:48.079029Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000001:1] in PlanQueue unit at 9437184 2025-06-25T15:01:48.079061Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [100000 ... 5Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000004:26] at 9437184 on unit CompleteOperation 2025-06-25T15:08:01.734720Z node 32 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000004 : 26] from 9437184 at tablet 9437184 send result to client [32:103:2136], exec latency: 0 ms, propose latency: 1 ms 2025-06-25T15:08:01.831538Z node 32 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:08:01.831861Z node 32 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-25T15:08:01.831906Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000004:27] at 9437184 on unit CompleteOperation 2025-06-25T15:08:01.831976Z node 32 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000004 : 27] from 9437184 at tablet 9437184 send result to client [32:103:2136], exec latency: 0 ms, propose latency: 1 ms 2025-06-25T15:08:01.832022Z node 32 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:08:01.832264Z node 32 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-25T15:08:01.832301Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000004:28] at 9437184 on unit CompleteOperation 2025-06-25T15:08:01.832376Z node 32 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000004 : 28] from 9437184 at tablet 9437184 send result to client [32:103:2136], exec latency: 0 ms, propose latency: 1 ms 2025-06-25T15:08:01.832413Z node 32 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:08:01.832597Z node 32 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-25T15:08:01.832631Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000004:29] at 9437184 on unit CompleteOperation 2025-06-25T15:08:01.832689Z node 32 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000004 : 29] from 9437184 at tablet 9437184 send result to client [32:103:2136], exec latency: 0 ms, propose latency: 1 ms 2025-06-25T15:08:01.832720Z node 32 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:08:01.832883Z node 32 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-25T15:08:01.832912Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000004:30] at 9437184 on unit CompleteOperation 2025-06-25T15:08:01.832954Z node 32 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000004 : 30] from 9437184 at tablet 9437184 send result to client [32:103:2136], exec latency: 0 ms, propose latency: 1 ms 2025-06-25T15:08:01.832984Z node 32 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:08:01.833196Z node 32 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-25T15:08:01.833227Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000004:31] at 9437184 on unit CompleteOperation 2025-06-25T15:08:01.833272Z node 32 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000004 : 31] from 9437184 at tablet 9437184 send result to client [32:103:2136], exec latency: 0 ms, propose latency: 1 ms 2025-06-25T15:08:01.833303Z node 32 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:08:01.833444Z node 32 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-25T15:08:01.833472Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000004:32] at 9437184 on unit CompleteOperation 2025-06-25T15:08:01.833511Z node 32 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000004 : 32] from 9437184 at tablet 9437184 send result to client [32:103:2136], exec latency: 0 ms, propose latency: 1 ms 2025-06-25T15:08:01.833540Z node 32 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:08:01.833759Z node 32 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-25T15:08:01.833792Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000004:33] at 9437184 on unit CompleteOperation 2025-06-25T15:08:01.833834Z node 32 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000004 : 33] from 9437184 at tablet 9437184 send result to client [32:103:2136], exec latency: 0 ms, propose latency: 1 ms 2025-06-25T15:08:01.833867Z node 32 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:08:01.834035Z node 32 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-25T15:08:01.834064Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000004:34] at 9437184 on unit CompleteOperation 2025-06-25T15:08:01.834103Z node 32 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000004 : 34] from 9437184 at tablet 9437184 send result to client [32:103:2136], exec latency: 0 ms, propose latency: 1 ms 2025-06-25T15:08:01.834133Z node 32 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:08:01.834293Z node 32 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-25T15:08:01.834323Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000004:35] at 9437184 on unit CompleteOperation 2025-06-25T15:08:01.834362Z node 32 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000004 : 35] from 9437184 at tablet 9437184 send result to client [32:103:2136], exec latency: 0 ms, propose latency: 1 ms 2025-06-25T15:08:01.834392Z node 32 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:08:01.834618Z node 32 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-25T15:08:01.834646Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000004:36] at 9437184 on unit CompleteOperation 2025-06-25T15:08:01.834685Z node 32 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000004 : 36] from 9437184 at tablet 9437184 send result to client [32:103:2136], exec latency: 0 ms, propose latency: 1 ms 2025-06-25T15:08:01.834718Z node 32 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:08:01.834900Z node 32 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-25T15:08:01.834927Z node 32 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000004:37] at 9437184 on unit CompleteOperation 2025-06-25T15:08:01.834964Z node 32 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000004 : 37] from 9437184 at tablet 9437184 send result to client [32:103:2136], exec latency: 0 ms, propose latency: 1 ms 2025-06-25T15:08:01.834991Z node 32 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:08:01.835280Z node 32 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [32:238:2229], Recipient [32:350:2315]: {TEvReadSet step# 1000004 txid# 36 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 32} 2025-06-25T15:08:01.835322Z node 32 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:08:01.835357Z node 32 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 36 2025-06-25T15:08:01.835490Z node 32 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [32:238:2229], Recipient [32:350:2315]: {TEvReadSet step# 1000004 txid# 5 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 2} 2025-06-25T15:08:01.932422Z node 32 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:08:01.932510Z node 32 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 5 2025-06-25T15:08:01.932752Z node 32 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [32:238:2229], Recipient [32:350:2315]: {TEvReadSet step# 1000004 txid# 6 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 3} 2025-06-25T15:08:01.932789Z node 32 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:08:01.932825Z node 32 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 6 2025-06-25T15:08:01.932911Z node 32 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [32:238:2229], Recipient [32:350:2315]: {TEvReadSet step# 1000004 txid# 8 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 5} 2025-06-25T15:08:01.932955Z node 32 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:08:01.932988Z node 32 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 8 2025-06-25T15:08:01.933075Z node 32 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [32:238:2229], Recipient [32:350:2315]: {TEvReadSet step# 1000004 txid# 9 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 6} 2025-06-25T15:08:01.933108Z node 32 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:08:01.933136Z node 32 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 9 2025-06-25T15:08:01.933223Z node 32 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [32:238:2229], Recipient [32:350:2315]: {TEvReadSet step# 1000004 txid# 10 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 7} 2025-06-25T15:08:01.933253Z node 32 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:08:01.933283Z node 32 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 10 2025-06-25T15:08:01.933366Z node 32 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [32:238:2229], Recipient [32:350:2315]: {TEvReadSet step# 1000004 txid# 12 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 8} 2025-06-25T15:08:01.933396Z node 32 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:08:01.933425Z node 32 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 12 expect 30 31 31 30 31 30 29 25 31 30 22 31 11 28 31 17 28 31 31 31 30 20 22 30 25 20 30 30 30 - 8 - actual 30 31 31 30 31 30 29 25 31 30 22 31 11 28 31 17 28 31 31 31 30 20 22 30 25 20 30 30 30 - 8 - interm 6 6 5 4 6 6 5 4 - 6 - 1 4 4 2 4 4 2 - 1 4 - 4 - 1 4 - - - - - - |94.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_other_requests_rate[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_partial_delete_works[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_partial_delete_works[tables_format_v1] |94.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_does_not_change_visibility_not_in_flight[tables_format_v1-std] [GOOD] |94.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_get_queue_attributes_only_runtime_attributes[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_can_read_new_written_data_on_visibility_timeout[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delay_one_message[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delete_message_batch_deduplicates_receipt_handle[tables_format_v0-fifo] |94.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_1_UNIQUE_SYNC-pk_types3-all_types3-index3--UNIQUE-SYNC] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_send_and_read_multiple_messages[tables_format_v1] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_validates_deduplication_id[tables_format_v0] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Timestamp-pk_types34-all_types34-index34---] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_validates_deduplication_id[tables_format_v0] [GOOD] |94.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_deduplication[tables_format_v0-content_based] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_message_batch[tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delay_message_batch[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delay_message_batch[tables_format_v1-std] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Date-pk_types32-all_types32-index32---] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Uint8-pk_types24-all_types24-index24---] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_3_UNIQUE_SYNC-pk_types1-all_types1-index1--UNIQUE-SYNC] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_8_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 8] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_18_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 18] >> ImportBigEncryptedFileTest::ImportBigEncryptedFile [FAIL] >> test_retry_high_rate.py::TestRetry::test_high_rate[kikimr0] >> test_cp_ic.py::TestCpIc::test_discovery >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delete_message_batch_deduplicates_receipt_handle[tables_format_v0-fifo] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_String-pk_types29-all_types29-index29---] |94.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_send_message_rate[tables_format_v0] [GOOD] |94.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_message[tables_format_v0-fifo] [GOOD] |94.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestYandexAttributesPrefix::test_allows_yandex_message_attribute_prefix[tables_format_v0] [GOOD] >> ListObjectsInS3Export::ExportWithSchemaMapping >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delay_message_batch[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_partial_delete_works[tables_format_v1] [GOOD] |94.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_get_queue_attributes_only_attributes_table[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_queue_attributes[tables_format_v0-fifo] >> test_dispatch.py::TestMapping::test_mapping >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_read_dont_stall[tables_format_v1] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_receive_attempt_reloads_same_messages[tables_format_v0-after_crutch_batch] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_queue_attributes[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_queue_attributes[tables_format_v0-std] >> alter_compression.py::TestAllCompression::test_all_supported_compression[lz4_compression-COMPRESSION = "lz4"] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Date32-pk_types36-all_types36-index36---] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Utf8-pk_types30-all_types30-index30---] >> test_cp_ic.py::TestCpIc::test_discovery [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_queue_attributes[tables_format_v0-std] [GOOD] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_20_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 20] |94.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_visibility_works[tables_format_v1-std] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_can_read_from_different_groups[tables_format_v0] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_receive_attempt_reloads_same_messages[tables_format_v0-after_crutch_batch] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_receive_attempt_reloads_same_messages[tables_format_v0-standard_mode] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_2_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 2] |94.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_validates_deduplication_id[tables_format_v0] [GOOD] |94.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_message_batch[tables_format_v0-std] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_receive_attempt_reloads_same_messages[tables_format_v0-standard_mode] [GOOD] >> ListObjectsInS3Export::ExportWithSchemaMapping [GOOD] >> TColumnShardTestSchema::RebootForgetAfterFail [GOOD] >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_ttl_Date-pk_types13-all_types13-index13-Date--] [GOOD] >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_ttl_Timestamp-pk_types12-all_types12-index12-Timestamp--] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_schema/unittest >> TColumnShardTestSchema::RebootForgetAfterFail [GOOD] Test command err: FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; WaitEmptyAfter=0;Tiers=;TTL={Column=timestamp;EvictAfter=0.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=150864636.000000s;Name=cold;Codec=};};TTL={Column=timestamp;EvictAfter=0.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=130864636.000000s;Name=cold;Codec=};};TTL={Column=timestamp;EvictAfter=0.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=130863436.000000s;Name=cold;Codec=};};TTL={Column=timestamp;EvictAfter=0.000000s;Name=;Codec=}; 2025-06-25T15:07:18.293259Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:99;event=initialize_shard;step=OnActivateExecutor; 2025-06-25T15:07:18.324996Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:117;event=initialize_shard;step=initialize_tiring_finished; 2025-06-25T15:07:18.325280Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-25T15:07:18.332453Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T15:07:18.332694Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T15:07:18.332924Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T15:07:18.333059Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T15:07:18.333176Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T15:07:18.333286Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T15:07:18.333402Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T15:07:18.333514Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T15:07:18.333629Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T15:07:18.333739Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T15:07:18.333855Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T15:07:18.369270Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-25T15:07:18.369434Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-25T15:07:18.369490Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-25T15:07:18.369662Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:07:18.369836Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-25T15:07:18.369929Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-25T15:07:18.369975Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-25T15:07:18.370066Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-25T15:07:18.370131Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-25T15:07:18.370183Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-25T15:07:18.370244Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-25T15:07:18.370419Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:07:18.370485Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-25T15:07:18.370529Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-25T15:07:18.370560Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-25T15:07:18.370645Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-25T15:07:18.370714Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-25T15:07:18.370771Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-25T15:07:18.370805Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-25T15:07:18.370856Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-25T15:07:18.370893Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-25T15:07:18.370923Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-25T15:07:18.371146Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-25T15:07:18.371192Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-25T15:07:18.371229Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-25T15:07:18.371441Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-25T15:07:18.371495Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-25T15:07:18.371533Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-25T15:07:18.371660Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-25T15:07:18.371703Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-25T15:07:18.371736Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-25T15:07:18.371825Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-25T15:07:18.371903Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-25T15:07:18.371944Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-25T15:07:18.371976Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-25T15:07:18.372188Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=50; 2025-06-25T15:07:18.372280Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=45; 2025-06-25T15:07:18.372831Z node 1 :TX_COLUMNSHA ... ize=36951;sum=965896;count=36;size_of_portion=208; 2025-06-25T15:08:27.174834Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;EXECUTE:portionsLoadingTime=10132; 2025-06-25T15:08:27.174899Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;PRECHARGE:granule_finished_commonLoadingTime=11; 2025-06-25T15:08:27.175147Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;EXECUTE:granule_finished_commonLoadingTime=203; 2025-06-25T15:08:27.175186Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;fline=common_data.cpp:29;EXECUTE:granuleLoadingTime=10597; 2025-06-25T15:08:27.175227Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:granulesLoadingTime=10708; 2025-06-25T15:08:27.175283Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;PRECHARGE:finishLoadingTime=10; 2025-06-25T15:08:27.175480Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:finishLoadingTime=158; 2025-06-25T15:08:27.175511Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:column_enginesLoadingTime=11384; 2025-06-25T15:08:27.175638Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tx_controllerLoadingTime=83; 2025-06-25T15:08:27.175733Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tx_controllerLoadingTime=58; 2025-06-25T15:08:27.175843Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:operations_managerLoadingTime=71; 2025-06-25T15:08:27.175932Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:operations_managerLoadingTime=56; 2025-06-25T15:08:27.179070Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:storages_managerLoadingTime=3085; 2025-06-25T15:08:27.182582Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:storages_managerLoadingTime=3427; 2025-06-25T15:08:27.182672Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:db_locksLoadingTime=11; 2025-06-25T15:08:27.182722Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:db_locksLoadingTime=11; 2025-06-25T15:08:27.182762Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:bg_sessionsLoadingTime=6; 2025-06-25T15:08:27.182829Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:bg_sessionsLoadingTime=36; 2025-06-25T15:08:27.182870Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:sharing_sessionsLoadingTime=6; 2025-06-25T15:08:27.182946Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:sharing_sessionsLoadingTime=44; 2025-06-25T15:08:27.182988Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:in_flight_readsLoadingTime=6; 2025-06-25T15:08:27.183052Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:in_flight_readsLoadingTime=30; 2025-06-25T15:08:27.183132Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tiers_managerLoadingTime=44; 2025-06-25T15:08:27.183361Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tiers_managerLoadingTime=189; 2025-06-25T15:08:27.183403Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;event=initialize_shard;fline=common_data.cpp:29;EXECUTE:composite_initLoadingTime=25632; 2025-06-25T15:08:27.183528Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Index: tables 1 inserted {blob_bytes=0;raw_bytes=0;count=0;records=0} compacted {blob_bytes=0;raw_bytes=0;count=0;records=0} s-compacted {blob_bytes=0;raw_bytes=0;count=0;records=0} inactive {blob_bytes=21099992;raw_bytes=29608900;count=3;records=320000} evicted {blob_bytes=10565848;raw_bytes=16084450;count=1;records=160000} at tablet 9437184 2025-06-25T15:08:27.183632Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1701:3529];process=SwitchToWork;fline=columnshard.cpp:74;event=initialize_shard;step=SwitchToWork; 2025-06-25T15:08:27.183678Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1701:3529];process=SwitchToWork;fline=columnshard.cpp:77;event=initialize_shard;step=SignalTabletActive; 2025-06-25T15:08:27.183738Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1701:3529];process=SwitchToWork;fline=columnshard_impl.cpp:1331;event=OnTieringModified;path_id=NO_VALUE_OPTIONAL; 2025-06-25T15:08:27.192853Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1701:3529];process=SwitchToWork;fline=column_engine_logs.cpp:471;event=OnTieringModified;new_count_tierings=1; 2025-06-25T15:08:27.193025Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;fline=columnshard_impl.cpp:442;event=EnqueueBackgroundActivities;periodic=0; 2025-06-25T15:08:27.193109Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=2; 2025-06-25T15:08:27.193174Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750863794840;tx_id=18446744073709551615;;current_snapshot_ts=1750864039445; 2025-06-25T15:08:27.193212Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=2;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-25T15:08:27.193278Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;fline=columnshard_impl.cpp:791;background=cleanup;skip_reason=no_changes; 2025-06-25T15:08:27.193319Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;fline=columnshard_impl.cpp:820;background=cleanup;skip_reason=no_changes; 2025-06-25T15:08:27.193403Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;fline=columnshard_impl.cpp:749;background=ttl;skip_reason=no_changes; 2025-06-25T15:08:27.194756Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1701:3529];ev=NActors::TEvents::TEvWakeup;fline=columnshard.cpp:250;event=TEvPrivate::TEvPeriodicWakeup::MANUAL;tablet_id=9437184; 2025-06-25T15:08:27.198192Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1701:3529];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;fline=columnshard.cpp:239;event=TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184; 2025-06-25T15:08:27.198239Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Send periodic stats. 2025-06-25T15:08:27.198267Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Disabled periodic stats at tablet 9437184 2025-06-25T15:08:27.198309Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1701:3529];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:442;event=EnqueueBackgroundActivities;periodic=0; 2025-06-25T15:08:27.198409Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1701:3529];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=2; 2025-06-25T15:08:27.198482Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1701:3529];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750863794840;tx_id=18446744073709551615;;current_snapshot_ts=1750864039445; 2025-06-25T15:08:27.198535Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1701:3529];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=2;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-25T15:08:27.198584Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1701:3529];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:791;background=cleanup;skip_reason=no_changes; 2025-06-25T15:08:27.198620Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1701:3529];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:820;background=cleanup;skip_reason=no_changes; 2025-06-25T15:08:27.198713Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=3;tablet_id=9437184;self_id=[1:1701:3529];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:749;background=ttl;skip_reason=no_changes; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/cold' stopped at tablet 9437184 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/cold' stopped at tablet 9437184 160000/10565848 160000/10565848 0/0 160000/10565848 |94.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delete_message_batch_deduplicates_receipt_handle[tables_format_v0-fifo] [GOOD] >> ListObjectsInS3Export::ExportWithoutSchemaMapping |94.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/fq/multi_plane/py3test >> test_cp_ic.py::TestCpIc::test_discovery [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Datetime64-pk_types37-all_types37-index37---] >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_ttl_Uint32-pk_types9-all_types9-index9-Uint32--] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Int64-pk_types19-all_types19-index19---] |94.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delay_message_batch[tables_format_v1-std] [GOOD] >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_ttl_Uint64-pk_types10-all_types10-index10-Uint64--] [GOOD] >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_ttl_Datetime-pk_types11-all_types11-index11-Datetime--] [GOOD] |94.6%| [TA] $(B)/ydb/core/tx/columnshard/ut_schema/test-results/unittest/{meta.json ... results_accumulator.log} |94.7%| [TA] {RESULT} $(B)/ydb/core/tx/columnshard/ut_schema/test-results/unittest/{meta.json ... results_accumulator.log} >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_4_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 4] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Interval-pk_types35-all_types35-index35---] >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_ttl_DyNumber-pk_types8-all_types8-index8-DyNumber--] [GOOD] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_compression-COMPRESSION = "zstd"] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_12_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 12] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_receive_attempt_reloads_same_messages[tables_format_v0-after_crutch_batch] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_receive_attempt_reloads_same_messages[tables_format_v0-standard_mode] |94.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/dump_restore/py3test >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_ttl_Date-pk_types13-all_types13-index13-Date--] [GOOD] |94.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_queue_attributes[tables_format_v0-std] [GOOD] >> ListObjectsInS3Export::ExportWithoutSchemaMapping [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_receive_attempt_reloads_same_messages[tables_format_v0-standard_mode] [GOOD] >> ListObjectsInS3Export::ExportWithEncryption |94.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/dump_restore/py3test >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_ttl_Timestamp-pk_types12-all_types12-index12-Timestamp--] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Timestamp64-pk_types38-all_types38-index38---] |94.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/dump_restore/py3test >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_ttl_Uint32-pk_types9-all_types9-index9-Uint32--] [GOOD] |94.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/dump_restore/py3test >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_ttl_Uint64-pk_types10-all_types10-index10-Uint64--] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_can_read_new_written_data_on_visibility_timeout[tables_format_v0] [GOOD] |94.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/dump_restore/py3test >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_ttl_Datetime-pk_types11-all_types11-index11-Datetime--] [GOOD] |94.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_receive_attempt_reloads_same_messages[tables_format_v0-standard_mode] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_0__ASYNC-pk_types11-all_types11-index11---ASYNC] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_can_read_from_different_groups[tables_format_v0] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_can_read_from_different_groups[tables_format_v1] |94.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/dump_restore/py3test >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_ttl_DyNumber-pk_types8-all_types8-index8-DyNumber--] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_DyNumber-pk_types28-all_types28-index28---] >> ListObjectsInS3Export::ExportWithEncryption [GOOD] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_15_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 15] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Uint64-pk_types22-all_types22-index22---] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Uint32-pk_types23-all_types23-index23---] >> ListObjectsInS3Export::ExportWithWrongEncryptionKey >> test_retry.py::TestRetry::test_fail_first[kikimr0] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_can_read_from_different_groups[tables_format_v1] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_crutch_groups_selection_algorithm_selects_second_group_batch[tables_format_v0] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_6_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 6] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_1__ASYNC-pk_types10-all_types10-index10---ASYNC] [GOOD] |94.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_receive_attempt_reloads_same_messages[tables_format_v0-standard_mode] [GOOD] >> ListObjectsInS3Export::ExportWithWrongEncryptionKey [GOOD] >> ReadIteratorExternalBlobs::ExtBlobsMultipleColumns [GOOD] >> ReadIteratorExternalBlobs::ExtBlobsWithCompactingMiddleRows >> ListObjectsInS3Export::PagingParameters >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_index_1__SYNC-pk_types3-all_types3-index3---SYNC] [GOOD] |94.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test |94.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_can_read_new_written_data_on_visibility_timeout[tables_format_v0] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Int32-pk_types20-all_types20-index20---] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_0_UNIQUE_SYNC-pk_types4-all_types4-index4--UNIQUE-SYNC] |94.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/secondary_index/py3test >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_1__ASYNC-pk_types10-all_types10-index10---ASYNC] [GOOD] >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_index_0__SYNC-pk_types4-all_types4-index4---SYNC] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_crutch_groups_selection_algorithm_selects_second_group_batch[tables_format_v0] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_ttl_Uint64-pk_types15-all_types15-index15-Uint64--] |94.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/dump_restore/py3test >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_index_1__SYNC-pk_types3-all_types3-index3---SYNC] [GOOD] >> TColumnShardTestReadWrite::CompactionSplitGranuleStrKey_PKString [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::CompactionSplitGranuleStrKey_PKString [GOOD] Test command err: 2025-06-25T15:05:18.668453Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:99;event=initialize_shard;step=OnActivateExecutor; 2025-06-25T15:05:18.685485Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:117;event=initialize_shard;step=initialize_tiring_finished; 2025-06-25T15:05:18.685663Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-25T15:05:18.690617Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T15:05:18.690767Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T15:05:18.690925Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T15:05:18.690985Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T15:05:18.691054Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T15:05:18.691120Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T15:05:18.691210Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T15:05:18.691284Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T15:05:18.691348Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T15:05:18.691414Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T15:05:18.691480Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T15:05:18.708263Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-25T15:05:18.708401Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-25T15:05:18.708460Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-25T15:05:18.708581Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:05:18.708721Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-25T15:05:18.708768Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-25T15:05:18.708793Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-25T15:05:18.708857Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-25T15:05:18.708899Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-25T15:05:18.708922Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-25T15:05:18.708953Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-25T15:05:18.709063Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:05:18.709130Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-25T15:05:18.709163Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-25T15:05:18.709182Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-25T15:05:18.709244Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-25T15:05:18.709319Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-25T15:05:18.709344Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-25T15:05:18.709363Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-25T15:05:18.709387Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-25T15:05:18.709416Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-25T15:05:18.709439Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-25T15:05:18.709567Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-25T15:05:18.709591Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-25T15:05:18.709606Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-25T15:05:18.709715Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-25T15:05:18.709744Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-25T15:05:18.709779Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-25T15:05:18.709890Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-25T15:05:18.709917Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-25T15:05:18.709937Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-25T15:05:18.709980Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-25T15:05:18.710014Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-25T15:05:18.710035Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-25T15:05:18.710053Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-25T15:05:18.710188Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=26; 2025-06-25T15:05:18.710245Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=30; 2025-06-25T15:05:18.710299Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=29; 2025-06-25T15:05:18.710353Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=30; 2025-06-25T15:05:18.710414Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-25T15:05:18.710472Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-25T15:05:18.710518Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-25T15:05:18.710559Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: table ... oad_stage_name=EXECUTE:granule/portions;fline=constructor_meta.cpp:71;memory_size=2374;data_size=2365;sum=265594;count=510;size_of_meta=136; 2025-06-25T15:09:15.739315Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;load_stage_name=EXECUTE:granule/portions;fline=constructor_portion.cpp:40;memory_size=2446;data_size=2437;sum=283954;count=255;size_of_portion=208; 2025-06-25T15:09:15.739628Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;EXECUTE:portionsLoadingTime=38256; 2025-06-25T15:09:15.739727Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;PRECHARGE:granule_finished_commonLoadingTime=14; 2025-06-25T15:09:15.740497Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;EXECUTE:granule_finished_commonLoadingTime=704; 2025-06-25T15:09:15.740596Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;fline=common_data.cpp:29;EXECUTE:granuleLoadingTime=39505; 2025-06-25T15:09:15.740646Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:granulesLoadingTime=39726; 2025-06-25T15:09:15.740720Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;PRECHARGE:finishLoadingTime=14; 2025-06-25T15:09:15.740965Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:finishLoadingTime=197; 2025-06-25T15:09:15.741017Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:column_enginesLoadingTime=41049; 2025-06-25T15:09:15.741230Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tx_controllerLoadingTime=146; 2025-06-25T15:09:15.741385Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tx_controllerLoadingTime=99; 2025-06-25T15:09:15.741590Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:operations_managerLoadingTime=156; 2025-06-25T15:09:15.741807Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:operations_managerLoadingTime=158; 2025-06-25T15:09:15.744839Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:storages_managerLoadingTime=2962; 2025-06-25T15:09:15.748138Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:storages_managerLoadingTime=3206; 2025-06-25T15:09:15.748236Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:db_locksLoadingTime=13; 2025-06-25T15:09:15.748334Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:db_locksLoadingTime=14; 2025-06-25T15:09:15.748446Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:bg_sessionsLoadingTime=19; 2025-06-25T15:09:15.748574Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:bg_sessionsLoadingTime=62; 2025-06-25T15:09:15.748651Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:sharing_sessionsLoadingTime=17; 2025-06-25T15:09:15.748813Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:sharing_sessionsLoadingTime=90; 2025-06-25T15:09:15.748886Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:in_flight_readsLoadingTime=8; 2025-06-25T15:09:15.748982Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:in_flight_readsLoadingTime=43; 2025-06-25T15:09:15.749118Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tiers_managerLoadingTime=73; 2025-06-25T15:09:15.749235Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tiers_managerLoadingTime=58; 2025-06-25T15:09:15.749289Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;fline=common_data.cpp:29;EXECUTE:composite_initLoadingTime=63705; 2025-06-25T15:09:15.749501Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Index: tables 1 inserted {blob_bytes=0;raw_bytes=0;count=0;records=0} compacted {blob_bytes=0;raw_bytes=0;count=0;records=0} s-compacted {blob_bytes=119665944;raw_bytes=192854450;count=5;records=1855000} inactive {blob_bytes=632703072;raw_bytes=989320282;count=54;records=9818750} evicted {blob_bytes=0;raw_bytes=0;count=0;records=0} at tablet 9437184 2025-06-25T15:09:15.749642Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:3135:5090];process=SwitchToWork;fline=columnshard.cpp:74;event=initialize_shard;step=SwitchToWork; 2025-06-25T15:09:15.749746Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:3135:5090];process=SwitchToWork;fline=columnshard.cpp:77;event=initialize_shard;step=SignalTabletActive; 2025-06-25T15:09:15.749846Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:3135:5090];process=SwitchToWork;fline=columnshard_impl.cpp:1331;event=OnTieringModified;path_id=NO_VALUE_OPTIONAL; 2025-06-25T15:09:15.749925Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:3135:5090];process=SwitchToWork;fline=column_engine_logs.cpp:471;event=OnTieringModified;new_count_tierings=0; 2025-06-25T15:09:15.750119Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:442;event=EnqueueBackgroundActivities;periodic=0; 2025-06-25T15:09:15.750230Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=23; 2025-06-25T15:09:15.750328Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750863621753;tx_id=18446744073709551615;;current_snapshot_ts=1750863919673; 2025-06-25T15:09:15.750407Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=23;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-25T15:09:15.750491Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:791;background=cleanup;skip_reason=no_changes; 2025-06-25T15:09:15.750558Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:820;background=cleanup;skip_reason=no_changes; 2025-06-25T15:09:15.750677Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:749;background=ttl;skip_reason=no_changes; 2025-06-25T15:09:15.754488Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:3135:5090];ev=NActors::TEvents::TEvWakeup;fline=columnshard.cpp:250;event=TEvPrivate::TEvPeriodicWakeup::MANUAL;tablet_id=9437184; 2025-06-25T15:09:15.754742Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:3135:5090];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;fline=columnshard.cpp:239;event=TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184; 2025-06-25T15:09:15.754790Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Send periodic stats. 2025-06-25T15:09:15.754827Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Disabled periodic stats at tablet 9437184 2025-06-25T15:09:15.754904Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:3135:5090];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:442;event=EnqueueBackgroundActivities;periodic=0; 2025-06-25T15:09:15.755048Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:3135:5090];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=23; 2025-06-25T15:09:15.755156Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:3135:5090];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750863621753;tx_id=18446744073709551615;;current_snapshot_ts=1750863919673; 2025-06-25T15:09:15.755223Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:3135:5090];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=23;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-25T15:09:15.755306Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:3135:5090];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:791;background=cleanup;skip_reason=no_changes; 2025-06-25T15:09:15.755371Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:3135:5090];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:820;background=cleanup;skip_reason=no_changes; 2025-06-25T15:09:15.755490Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:3135:5090];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;queue=ttl;external_count=0;fline=granule.cpp:168;event=skip_actualization;waiting=0.999000s; 2025-06-25T15:09:15.755567Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:3135:5090];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:749;background=ttl;skip_reason=no_changes; >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Decimal3510-pk_types27-all_types27-index27---] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Decimal229-pk_types26-all_types26-index26---] |94.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/dump_restore/py3test >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_index_0__SYNC-pk_types4-all_types4-index4---SYNC] [GOOD] >> test_retry_high_rate.py::TestRetry::test_high_rate[kikimr0] [GOOD] >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_index_1__ASYNC-pk_types5-all_types5-index5---ASYNC] [GOOD] >> test_retry.py::TestRetry::test_fail_first[kikimr0] [GOOD] |94.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test |94.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_crutch_groups_selection_algorithm_selects_second_group_batch[tables_format_v0] [GOOD] >> test_dispatch.py::TestMapping::test_mapping [GOOD] >> test_retry.py::TestRetry::test_low_rate[kikimr0] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Datetime-pk_types33-all_types33-index33---] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_ttl_Datetime-pk_types16-all_types16-index16-Datetime--] |94.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/dump_restore/py3test >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_index_1__ASYNC-pk_types5-all_types5-index5---ASYNC] [GOOD] |94.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/fq/multi_plane/py3test >> test_retry_high_rate.py::TestRetry::test_high_rate[kikimr0] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Int8-pk_types21-all_types21-index21---] >> test_dispatch.py::TestMapping::test_idle ------- [TM] {asan, default-linux-x86_64, release} ydb/services/ydb/backup_ut/unittest >> ListObjectsInS3Export::PagingParameters 2025-06-25 15:09:22,667 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper execution timed out 2025-06-25 15:09:23,044 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper has overrun 600 secs timeout. Process tree before termination: pid rss ref pdirt 672418 49.8M 49.6M 26.6M test_tool run_ut @/home/runner/.ya/build/build_root/yft8/0014ab/ydb/services/ydb/backup_ut/test-results/unittest/testing_out_stuff/chunk9/testing_out_stuff/test_tool.args 674122 2.9G 2.9G 2.5G └─ ydb-services-ydb-backup_ut --trace-path-append /home/runner/.ya/build/build_root/yft8/0014ab/ydb/services/ydb/backup_ut/test-results/unittest/testing_out_stuff/chunk9/yt Test command err: 2025-06-25T14:59:27.275716Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519901764165643129:2076];send_to=[0:7307199536658146131:7762515]; 2025-06-25T14:59:27.275779Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0014ab/r3tmp/tmpMYlFCN/pdisk_1.dat 2025-06-25T14:59:27.692765Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T14:59:27.706519Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T14:59:27.706611Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T14:59:27.714325Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 7258, node 1 2025-06-25T14:59:27.860917Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T14:59:27.860945Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T14:59:27.860953Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T14:59:27.861082Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1406 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T14:59:28.255979Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T14:59:28.294778Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T14:59:28.664572Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:7519901764165643427:2200]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:59:28.664614Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:59:28.664684Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [1:7519901764165643427:2200], Recipient [1:7519901764165643427:2200]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:59:28.664700Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:59:29.664823Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:7519901764165643427:2200]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:59:29.664851Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T14:59:29.664882Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [1:7519901764165643427:2200], Recipient [1:7519901764165643427:2200]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:59:29.664890Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T14:59:29.834789Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901772755578702:2300], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:59:29.834789Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519901772755578691:2297], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:59:29.834887Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T14:59:29.835049Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7519901764165643317:2143] Handle TEvProposeTransaction 2025-06-25T14:59:29.835067Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:7519901764165643317:2143] TxId# 281474976710658 ProcessProposeTransaction 2025-06-25T14:59:29.835099Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:7519901764165643317:2143] Cookie# 0 userReqId# "" txid# 281474976710658 SEND to# [1:7519901772755578706:2620] 2025-06-25T14:59:29.877506Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:7519901772755578706:2620] txid# 281474976710658 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root/.metadata/workload_manager/pools" OperationType: ESchemeOpCreateResourcePool ModifyACL { Name: "default" DiffACL: "\n!\010\000\022\035\010\001\020\201\004\032\024all-users@well-known \003\n\031\010\000\022\025\010\001\020\201\004\032\014root@builtin \003" NewOwner: "metadata@system" } Internal: true CreateResourcePool { Name: "default" Properties { Properties { key: "concurrent_query_limit" value: "-1" } Properties { key: "database_load_cpu_threshold" value: "-1" } Properties { key: "query_cancel_after_seconds" value: "0" } Properties { key: "query_cpu_limit_percent_per_node" value: "-1" } Properties { key: "query_memory_limit_percent_per_node" value: "-1" } Properties { key: "queue_size" value: "-1" } Properties { key: "resource_weight" value: "-1" } Properties { key: "total_cpu_limit_percent_per_node" value: "-1" } } } } } UserToken: "\n\017metadata@system\022\000" DatabaseName: "/Root" 2025-06-25T14:59:29.877558Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:7519901772755578706:2620] txid# 281474976710658 Bootstrap, UserSID: metadata@system CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-25T14:59:29.877578Z node 1 :TX_PROXY DEBUG: schemereq.cpp:585: Actor# [1:7519901772755578706:2620] txid# 281474976710658 Bootstrap, UserSID: metadata@system IsClusterAdministrator: 1 2025-06-25T14:59:29.878755Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1660: Actor# [1:7519901772755578706:2620] txid# 281474976710658 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-06-25T14:59:29.878815Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:7519901772755578706:2620] txid# 281474976710658 TEvNavigateKeySet requested from SchemeCache 2025-06-25T14:59:29.879036Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:7519901772755578706:2620] txid# 281474976710658 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-25T14:59:29.879215Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:7519901772755578706:2620] HANDLE EvNavigateKeySetResult, txid# 281474976710658 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-25T14:59:29.879293Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:7519901772755578706:2620] txid# 281474976710658 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976710658 TabletId# 72057594046644480} 2025-06-25T14:59:29.879444Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:7519901772755578706:2620] txid# 281474976710658 HANDLE EvClientConnected 2025-06-25T14:59:29.879469Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 269877761, Sender [1:7519901772755578731:2626], Recipient [1:7519901764165643427:2200]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T14:59:29.879487Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5052: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T14:59:29.879510Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5837: Pipe server connected, at tablet: 72057594046644480 2025-06-25T14:59:29.879548Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271122432, Sender [1:7519901772755578706:2620], Recipient [1:7519901764165643427:2200]: {TEvModifySchemeTransaction txid# 281474976710658 TabletId# 72057594046644480} 2025-06-25T14:59:29.879568Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4966: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-06-25T14:59:29.881215Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/Root" OperationType: ESchemeOpCreateResourcePool ModifyACL { Name: "default" DiffACL: "\n!\010\000\022\035\010\001\020\201\004\032\024all-users@well-known \003\n\031\010\000\022\025\010\001\020\201\004\032\014root@builtin \003" NewOwner: "metadata@system" } Internal: true CreateResourcePool { Name: ".metadata/workload_manager/pools/default" Properties { Properties { key: "concurrent_query_limit" value: "-1" } Properties { key: "database_load_cpu_threshold" value: "-1" } Properties { key: "query_cancel_after_seconds" value: "0" } Properties { key: "query_cpu_limit_percent_per_node" value: "-1" } Properties { key: "query_memory_limit_percent_per_node" value: "-1" } Properties { key: "queue_size" value: "-1" } Properties { key: "resource_weight" value: "-1" } Properties { key: "total_cpu_limit_percent_per_node" value: "-1" } } } } TxId: 281474976710658 TabletId: 72057594046644480 Owner: "metadata@system" UserToken: "***" PeerName: "" , at schemeshard: 72057594046644480 2025-06-25T14:59:29.881505Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_mkdir.cpp:115: TMkDir Propose, path: /Root/.metadata, operationId: 281474976710658:0, at schemeshard: 72057594046644480 2025-06-25T14:59:29.881620Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:347: AttachChild: child attached as only one child to the parent, parent id: [ ... 11847"},"Folder3/Subfolder1/Object11848":{"exportPrefix":"Prefix11848"},"Folder4/Subfolder2/Object11849":{"exportPrefix":"Prefix11849"},"Folder0/Subfolder0/Object11850":{"exportPrefix":"Prefix11850"},"Folder1/Subfolder1/Object11851":{"exportPrefix":"Prefix11851"},"Folder2/Subfolder2/Object11852":{"exportPrefix":"Prefix11852"},"Folder3/Subfolder0/Object11853":{"exportPrefix":"Prefix11853"},"Folder4/Subfolder1/Object11854":{"exportPrefix":"Prefix11854"},"Folder0/Subfolder2/Object11855":{"exportPrefix":"Prefix11855"},"Folder1/Subfolder0/Object11856":{"exportPrefix":"Prefix11856"},"Folder2/Subfolder1/Object11857":{"exportPrefix":"Prefix11857"},"Folder3/Subfolder2/Object11858":{"exportPrefix":"Prefix11858"},"Folder4/Subfolder0/Object11859":{"exportPrefix":"Prefix11859"},"Folder0/Subfolder1/Object11860":{"exportPrefix":"Prefix11860"},"Folder1/Subfolder2/Object11861":{"exportPrefix":"Prefix11861"},"Folder2/Subfolder0/Object11862":{"exportPrefix":"Prefix11862"},"Folder3/Subfolder1/Object11863":{"exportPrefix":"Prefix11863"},"Folder4/Subfolder2/Object11864":{"exportPrefix":"Prefix11864"},"Folder0/Subfolder0/Object11865":{"exportPrefix":"Prefix11865"},"Folder1/Subfolder1/Object11866":{"exportPrefix":"Prefix11866"},"Folder2/Subfolder2/Object11867":{"exportPrefix":"Prefix11867"},"Folder3/Subfolder0/Object11868":{"exportPrefix":"Prefix11868"},"Folder4/Subfolder1/Object11869":{"exportPrefix":"Prefix11869"},"Folder0/Subfolder2/Object11870":{"exportPrefix":"Prefix11870"},"Folder1/Subfolder0/Object11871":{"exportPrefix":"Prefix11871"},"Folder2/Subfolder1/Object11872":{"exportPrefix":"Prefix11872"},"Folder3/Subfolder2/Object11873":{"exportPrefix":"Prefix11873"},"Folder4/Subfolder0/Object11874":{"exportPrefix":"Prefix11874"},"Folder0/Subfolder1/Object11875":{"exportPrefix":"Prefix11875"},"Folder1/Subfolder2/Object11876":{"exportPrefix":"Prefix11876"},"Folder2/Subfolder0/Object11877":{"exportPrefix":"Prefix11877"},"Folder3/Subfolder1/Object11878":{"exportPrefix":"Prefix11878"},"Folder4/Subfolder2/Object11879":{"exportPrefix":"Prefix11879"},"Folder0/Subfolder0/Object11880":{"exportPrefix":"Prefix11880"},"Folder1/Subfolder1/Object11881":{"exportPrefix":"Prefix11881"},"Folder2/Subfolder2/Object11882":{"exportPrefix":"Prefix11882"},"Folder3/Subfolder0/Object11883":{"exportPrefix":"Prefix11883"},"Folder4/Subfolder1/Object11884":{"exportPrefix":"Prefix11884"},"Folder0/Subfolder2/Object11885":{"exportPrefix":"Prefix11885"},"Folder1/Subfolder0/Object11886":{"exportPrefix":"Prefix11886"},"Folder2/Subfolder1/Object11887":{"exportPrefix":"Prefix11887"},"Folder3/Subfolder2/Object11888":{"exportPrefix":"Prefix11888"},"Folder4/Subfolder0/Object11889":{"exportPrefix":"Prefix11889"},"Folder0/Subfolder1/Object11890":{"exportPrefix":"Prefix11890"},"Folder1/Subfolder2/Object11891":{"exportPrefix":"Prefix11891"},"Folder2/Subfolder0/Object11892":{"exportPrefix":"Prefix11892"},"Folder3/Subfolder1/Object11893":{"exportPrefix":"Prefix11893"},"Folder4/Subfolder2/Object11894":{"exportPrefix":"Prefix11894"},"Folder0/Subfolder0/Object11895":{"exportPrefix":"Prefix11895"},"Folder1/Subfolder1/Object11896":{"exportPrefix":"Prefix11896"},"Folder2/Subfolder2/Object11897":{"exportPrefix":"Prefix11897"},"Folder3/Subfolder0/Object11898":{"exportPrefix":"Prefix11898"},"Folder4/Subfolder1/Object11899":{"exportPrefix":"Prefix11899"},"Folder0/Subfolder2/Object11900":{"exportPrefix":"Prefix11900"},"Folder1/Subfolder0/Object11901":{"exportPrefix":"Prefix11901"},"Folder2/Subfolder1/Object11902":{"exportPrefix":"Prefix11902"},"Folder3/Subfolder2/Object11903":{"exportPrefix":"Prefix11903"},"Folder4/Subfolder0/Object11904":{"exportPrefix":"Prefix11904"},"Folder0/Subfolder1/Object11905":{"exportPrefix":"Prefix11905"},"Folder1/Subfolder2/Object11906":{"exportPrefix":"Prefix11906"},"Folder2/Subfolder0/Object11907":{"exportPrefix":"Prefix11907"},"Folder3/Subfolder1/Object11908":{"exportPrefix":"Prefix11908"},"Folder4/Subfolder2/Object11909":{"exportPrefix":"Prefix11909"},"Folder0/Subfolder0/Object11910":{"exportPrefix":"Prefix11910"},"Folder1/Subfolder1/Object11911":{"exportPrefix":"Prefix11911"},"Folder2/Subfolder2/Object11912":{"exportPrefix":"Prefix11912"},"Folder3/Subfolder0/Object11913":{"exportPrefix":"Prefix11913"},"Folder4/Subfolder1/Object11914":{"exportPrefix":"Prefix11914"},"Folder0/Subfolder2/Object11915":{"exportPrefix":"Prefix11915"},"Folder1/Subfolder0/Object11916":{"exportPrefix":"Prefix11916"},"Folder2/Subfolder1/Object11917":{"exportPrefix":"Prefix11917"},"Folder3/Subfolder2/Object11918":{"exportPrefix":"Prefix11918"},"Folder4/Subfolder0/Object11919":{"exportPrefix":"Prefix11919"},"Folder0/Subfolder1/Object11920":{"exportPrefix":"Prefix11920"},"Folder1/Subfolder2/Object11921":{"exportPrefix":"Prefix11921"},"Folder2/Subfolder0/Object11922":{"exportPrefix":"Prefix11922"},"Folder3/Subfolder1/Object11923":{"exportPrefix":"Prefix11923"},"Folder4/Subfolder2/Object11924":{"exportPrefix":"Prefix11924"},"Folder0/Subfolder0/Object11925":{"exportPrefix":"Prefix11925"},"Folder1/Subfolder1/Object11926":{"exportPrefix":"Prefix11926"},"Folder2/Subfolder2/Object11927":{"exportPrefix":"Prefix11927"},"Folder3/Subfolder0/Object11928":{"exportPrefix":"Prefix11928"},"Folder4/Subfolder1/Object11929":{"exportPrefix":"Prefix11929"},"Folder0/Subfolder2/Object11930":{"exportPrefix":"Prefix11930"},"Folder1/Subfolder0/Object11931":{"exportPrefix":"Prefix11931"},"Folder2/Subfolder1/Object11932":{"exportPrefix":"Prefix11932"},"Folder3/Subfolder2/Object11933":{"exportPrefix":"Prefix11933"},"Folder4/Subfolder0/Object11934":{"exportPrefix":"Prefix11934"},"Folder0/Subfolder1/Object11935":{"exportPrefix":"Prefix11935"},"Folder1/Subfolder2/Object11936":{"exportPrefix":"Prefix11936"},"Folder2/Subfolder0/Object11937":{"exportPrefix":"Prefix11937"},"Folder3/Subfolder1/Object11938":{"exportPrefix":"Prefix11938"},"Folder4/Subfolder2/Object11939":{"exportPrefix":"Prefix11939"},"Folder0/Subfolder0/Object11940":{"exportPrefix":"Prefix11940"},"Folder1/Subfolder1/Object11941":{"exportPrefix":"Prefix11941"},"Folder2/Subfolder2/Object11942":{"exportPrefix":"Prefix11942"},"Folder3/Subfolder0/Object11943":{"exportPrefix":"Prefix11943"},"Folder4/Subfolder1/Object11944":{"exportPrefix":"Prefix11944"},"Folder0/Subfolder2/Object11945":{"exportPrefix":"Prefix11945"},"Folder1/Subfolder0/Object11946":{"exportPrefix":"Prefix11946"},"Folder2/Subfolder1/Object11947":{"exportPrefix":"Prefix11947"},"Folder3/Subfolder2/Object11948":{"exportPrefix":"Prefix11948"},"Folder4/Subfolder0/Object11949":{"exportPrefix":"Prefix11949"},"Folder0/Subfolder1/Object11950":{"exportPrefix":"Prefix11950"},"Folder1/Subfolder2/Object11951":{"exportPrefix":"Prefix11951"},"Folder2/Subfolder0/Object11952":{"exportPrefix":"Prefix11952"},"Folder3/Subfolder1/Object11953":{"exportPrefix":"Prefix11953"},"Folder4/Subfolder2/Object11954":{"exportPrefix":"Prefix11954"},"Folder0/Subfolder0/Object11955":{"exportPrefix":"Prefix11955"},"Folder1/Subfolder1/Object11956":{"exportPrefix":"Prefix11956"},"Folder2/Subfolder2/Object11957":{"exportPrefix":"Prefix11957"},"Folder3/Subfolder0/Object11958":{"exportPrefix":"Prefix11958"},"Folder4/Subfolder1/Object11959":{"exportPrefix":"Prefix11959"},"Folder0/Subfolder2/Object11960":{"exportPrefix":"Prefix11960"},"Folder1/Subfolder0/Object11961":{"exportPrefix":"Prefix11961"},"Folder2/Subfolder1/Object11962":{"exportPrefix":"Prefix11962"},"Folder3/Subfolder2/Object11963":{"exportPrefix":"Prefix11963"},"Folder4/Subfolder0/Object11964":{"exportPrefix":"Prefix11964"},"Folder0/Subfolder1/Object11965":{"exportPrefix":"Prefix11965"},"Folder1/Subfolder2/Object11966":{"exportPrefix":"Prefix11966"},"Folder2/Subfolder0/Object11967":{"exportPrefix":"Prefix11967"},"Folder3/Subfolder1/Object11968":{"exportPrefix":"Prefix11968"},"Folder4/Subfolder2/Object11969":{"exportPrefix":"Prefix11969"},"Folder0/Subfolder0/Object11970":{"exportPrefix":"Prefix11970"},"Folder1/Subfolder1/Object11971":{"exportPrefix":"Prefix11971"},"Folder2/Subfolder2/Object11972":{"exportPrefix":"Prefix11972"},"Folder3/Subfolder0/Object11973":{"exportPrefix":"Prefix11973"},"Folder4/Subfolder1/Object11974":{"exportPrefix":"Prefix11974"},"Folder0/Subfolder2/Object11975":{"exportPrefix":"Prefix11975"},"Folder1/Subfolder0/Object11976":{"exportPrefix":"Prefix11976"},"Folder2/Subfolder1/Object11977":{"exportPrefix":"Prefix11977"},"Folder3/Subfolder2/Object11978":{"exportPrefix":"Prefix11978"},"Folder4/Subfolder0/Object11979":{"exportPrefix":"Prefix11979"},"Folder0/Subfolder1/Object11980":{"exportPrefix":"Prefix11980"},"Folder1/Subfolder2/Object11981":{"exportPrefix":"Prefix11981"},"Folder2/Subfolder0/Object11982":{"exportPrefix":"Prefix11982"},"Folder3/Subfolder1/Object11983":{"exportPrefix":"Prefix11983"},"Folder4/Subfolder2/Object11984":{"exportPrefix":"Prefix11984"},"Folder0/Subfolder0/Object11985":{"exportPrefix":"Prefix11985"},"Folder1/Subfolder1/Object11986":{"exportPrefix":"Prefix11986"},"Folder2/Subfolder2/Object11987":{"exportPrefix":"Prefix11987"},"Folder3/Subfolder0/Object11988":{"exportPrefix":"Prefix11988"},"Folder4/Subfolder1/Object11989":{"exportPrefix":"Prefix11989"},"Folder0/Subfolder2/Object11990":{"exportPrefix":"Prefix11990"},"Folder1/Subfolder0/Object11991":{"exportPrefix":"Prefix11991"},"Folder2/Subfolder1/Object11992":{"exportPrefix":"Prefix11992"},"Folder3/Subfolder2/Object11993":{"exportPrefix":"Prefix11993"},"Folder4/Subfolder0/Object11994":{"exportPrefix":"Prefix11994"},"Folder0/Subfolder1/Object11995":{"exportPrefix":"Prefix11995"},"Folder1/Subfolder2/Object11996":{"exportPrefix":"Prefix11996"},"Folder2/Subfolder0/Object11997":{"exportPrefix":"Prefix11997"},"Folder3/Subfolder1/Object11998":{"exportPrefix":"Prefix11998"},"Folder4/Subfolder2/Object11999":{"exportPrefix":"Prefix11999"}}} 2025-06-25T15:09:22.645113Z node 55 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271125000, Sender [0:0:0], Recipient [55:7519904228611570869:2197]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T15:09:22.645175Z node 55 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4963: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-25T15:09:22.645241Z node 55 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4955: StateWork, received event# 271124999, Sender [55:7519904228611570869:2197], Recipient [55:7519904228611570869:2197]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-25T15:09:22.645265Z node 55 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4962: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime Traceback (most recent call last): File "library/python/testing/yatest_common/yatest/common/process.py", line 384, in wait wait_for( File "library/python/testing/yatest_common/yatest/common/process.py", line 765, in wait_for raise TimeoutError(truncate(message, MAX_MESSAGE_LEN)) yatest.common.process.TimeoutError: 600 second(s) wait timeout has expired: Command '['/home/runner/.ya/tools/v4/9029509511/test_tool', 'run_ut', '@/home/runner/.ya/build/build_root/yft8/0014ab/ydb/services/ydb/backup_ut/test-results/unittest/testing_out_stuff/chunk9/testing_out_stuff/test_tool.args']' stopped by 600 seconds timeout During handling of the above exception, another exception occurred: Traceback (most recent call last): File "devtools/ya/test/programs/test_tool/run_test/run_test.py", line 1738, in main res.wait(check_exit_code=False, timeout=run_timeout, on_timeout=timeout_callback) File "library/python/testing/yatest_common/yatest/common/process.py", line 398, in wait raise ExecutionTimeoutError(self, str(e)) yatest.common.process.ExecutionTimeoutError: (("600 second(s) wait timeout has expired: Command '['/home/runner/.ya/tools/v4/9029509511/test_tool', 'run_ut', '@/home/runner/.ya/build/build_root/yft8/0014ab/ydb/services/ydb/backup_ut/test-results/unittest/testing_out_stuff/chunk9/testing_out_stuff/test_tool.args']' stopped by 600 seconds timeout",), {}) >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_0__SYNC-pk_types9-all_types9-index9---SYNC] >> THeavyPerfTest::TTestLoadEverything [GOOD] >> THiveImplTest::BootQueueSpeed |94.7%| [TA] $(B)/ydb/services/ydb/backup_ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TStorageBalanceTest::TestScenario3 [GOOD] |94.7%| [TA] {RESULT} $(B)/ydb/services/ydb/backup_ut/test-results/unittest/{meta.json ... results_accumulator.log} >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Interval64-pk_types39-all_types39-index39---] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/hive/ut/unittest >> TStorageBalanceTest::TestScenario3 [GOOD] Test command err: c[def1] ---------------------------------------------------------------------------------------------------- (0) 2025-06-25T15:06:21.324677Z node 1 :BS_NODE DEBUG: {NW26@node_warden_impl.cpp:328} Bootstrap 2025-06-25T15:06:21.349396Z node 1 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# false Origin# initial ServiceSet# {PDisks { NodeID: 1 PDiskID: 1 Path: "SectorMap:0:3200" PDiskGuid: 1 } VDisks { VDiskID { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } Groups { GroupID: 0 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } } } AvailabilityDomains: 0 } 2025-06-25T15:06:21.349648Z node 1 :BS_NODE DEBUG: {NW04@node_warden_pdisk.cpp:196} StartLocalPDisk NodeId# 1 PDiskId# 1 Path# "SectorMap:0:3200" PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} Temporary# false 2025-06-25T15:06:21.350533Z node 1 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:106} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2025-06-25T15:06:21.350806Z node 1 :BS_NODE DEBUG: {NW23@node_warden_vdisk.cpp:67} StartLocalVDiskActor SlayInFlight# false VDiskId# [0:1:0:0:0] VSlotId# 1:1:0 PDiskGuid# 1 DonorMode# false PDiskRestartInFlight# false PDisksWaitingToStart# false 2025-06-25T15:06:21.351784Z node 1 :BS_NODE DEBUG: {NW24@node_warden_vdisk.cpp:267} StartLocalVDiskActor done VDiskId# [0:1:0:0:0] VSlotId# 1:1:0 PDiskGuid# 1 2025-06-25T15:06:21.351841Z node 1 :BS_NODE DEBUG: {NW12@node_warden_proxy.cpp:24} StartLocalProxy GroupId# 0 2025-06-25T15:06:21.352642Z node 1 :BS_NODE DEBUG: {NW21@node_warden_pipe.cpp:23} EstablishPipe AvailDomainId# 0 PipeClientId# [1:30:2076] ControllerId# 72057594037932033 2025-06-25T15:06:21.352676Z node 1 :BS_NODE DEBUG: {NW20@node_warden_pipe.cpp:72} SendRegisterNode 2025-06-25T15:06:21.352794Z node 1 :BS_NODE DEBUG: {NW11@node_warden_impl.cpp:303} StartInvalidGroupProxy GroupId# 4294967295 2025-06-25T15:06:21.352894Z node 1 :BS_NODE DEBUG: {NW62@node_warden_impl.cpp:315} StartRequestReportingThrottler 2025-06-25T15:06:21.364578Z node 1 :BS_PROXY INFO: dsproxy_state.cpp:157: Group# 0 TEvConfigureProxy received GroupGeneration# 1 IsLimitedKeyless# false Marker# DSP02 2025-06-25T15:06:21.364626Z node 1 :BS_PROXY NOTICE: dsproxy_state.cpp:305: EnsureMonitoring Group# 0 IsLimitedKeyless# 0 fullIfPossible# 0 Marker# DSP58 2025-06-25T15:06:21.366719Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:29:2075] Create Queue# [1:38:2081] targetNodeId# 1 Marker# DSP01 2025-06-25T15:06:21.366888Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:29:2075] Create Queue# [1:39:2082] targetNodeId# 1 Marker# DSP01 2025-06-25T15:06:21.367031Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:29:2075] Create Queue# [1:40:2083] targetNodeId# 1 Marker# DSP01 2025-06-25T15:06:21.367207Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:29:2075] Create Queue# [1:41:2084] targetNodeId# 1 Marker# DSP01 2025-06-25T15:06:21.367333Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:29:2075] Create Queue# [1:42:2085] targetNodeId# 1 Marker# DSP01 2025-06-25T15:06:21.367440Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:29:2075] Create Queue# [1:43:2086] targetNodeId# 1 Marker# DSP01 2025-06-25T15:06:21.367582Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:29:2075] Create Queue# [1:44:2087] targetNodeId# 1 Marker# DSP01 2025-06-25T15:06:21.367608Z node 1 :BS_PROXY INFO: dsproxy_state.cpp:31: Group# 0 SetStateEstablishingSessions Marker# DSP03 2025-06-25T15:06:21.367678Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037932033] ::Bootstrap [1:30:2076] 2025-06-25T15:06:21.367711Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037932033] lookup [1:30:2076] 2025-06-25T15:06:21.367747Z node 1 :BS_PROXY NOTICE: dsproxy_state.cpp:245: Group# 4294967295 HasInvalidGroupId# 1 Bootstrap -> StateEjected Marker# DSP42 2025-06-25T15:06:21.367781Z node 1 :BS_NODE DEBUG: {NWDC00@distconf.cpp:20} Bootstrap 2025-06-25T15:06:21.368378Z node 1 :BS_NODE DEBUG: {NWDC40@distconf_persistent_storage.cpp:25} TReaderActor bootstrap Paths# [] 2025-06-25T15:06:21.368597Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72057594037932033 entry.State: StInit ev: {EvForward TabletID: 72057594037932033 Ev: nullptr Flags: 1:2:0} 2025-06-25T15:06:21.383463Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72057594037932033] queue send [1:30:2076] 2025-06-25T15:06:21.383530Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 131082 StorageConfigLoaded# false NodeListObtained# false PendingEvents.size# 0 2025-06-25T15:06:21.383560Z node 1 :BS_NODE DEBUG: {NWDC11@distconf_binding.cpp:6} TEvNodesInfo 2025-06-25T15:06:21.383626Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 2146435074 StorageConfigLoaded# false NodeListObtained# false PendingEvents.size# 0 2025-06-25T15:06:21.383656Z node 1 :BS_NODE DEBUG: {NWDC32@distconf_persistent_storage.cpp:221} TEvStorageConfigLoaded Cookie# 0 NumItemsRead# 0 2025-06-25T15:06:21.387964Z node 1 :BS_NODE DEBUG: {NWDC35@distconf_persistent_storage.cpp:184} PersistConfig Record# {} Drives# [] 2025-06-25T15:06:21.389407Z node 1 :BS_NODE DEBUG: {NWDC51@distconf_persistent_storage.cpp:103} TWriterActor bootstrap Drives# [] Record# {} 2025-06-25T15:06:21.389921Z node 1 :STATESTORAGE DEBUG: statestorage_proxy.cpp:281: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72057594037932033 Cookie: 0 ProxyOptions: SigNone} 2025-06-25T15:06:21.390165Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72057594037932033] queue send [1:30:2076] 2025-06-25T15:06:21.390207Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 268639258 StorageConfigLoaded# true NodeListObtained# false PendingEvents.size# 0 2025-06-25T15:06:21.390399Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 2146435075 StorageConfigLoaded# true NodeListObtained# false PendingEvents.size# 1 2025-06-25T15:06:21.390452Z node 1 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 0} 2025-06-25T15:06:21.390495Z node 1 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 1} 2025-06-25T15:06:21.390521Z node 1 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 2} 2025-06-25T15:06:21.390556Z node 1 :STATESTORAGE DEBUG: statestorage_proxy.cpp:390: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037932033 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-25T15:06:21.390682Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037936129] ::Bootstrap [1:34:2063] 2025-06-25T15:06:21.390723Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037936129] lookup [1:34:2063] 2025-06-25T15:06:21.391061Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 131082 StorageConfigLoaded# true NodeListObtained# false PendingEvents.size# 2 2025-06-25T15:06:21.391095Z node 1 :BS_NODE DEBUG: {NWDC11@distconf_binding.cpp:6} TEvNodesInfo 2025-06-25T15:06:21.391209Z node 1 :BS_NODE DEBUG: {NWDC18@distconf_binding.cpp:342} UpdateBound RefererNodeId# 1 NodeId# ::1:12001/1 Meta# {Fingerprint: "\363\365\\\016\336\205\240m2\241c\3010\003\261\342\227\n\267}" } 2025-06-25T15:06:21.391349Z node 1 :STATESTORAGE DEBUG: statestorage_proxy.cpp:390: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037932033 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-25T15:06:21.391443Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72057594037936129 entry.State: StInit ev: {EvForward TabletID: 72057594037936129 Ev: nullptr Flags: 1:2:0} 2025-06-25T15:06:21.391518Z node 1 :STATESTORAGE DEBUG: statestorage_proxy.cpp:390: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037932033 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-25T15:06:21.391699Z node 1 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# true Origin# distconf ServiceSet# {PDisks { NodeID: 1 PDiskID: 1 Path: "SectorMap:0:3200" PDiskGuid: 1 } VDisks { VDiskID { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } Groups { GroupID: 0 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } } } AvailabilityDomains: 0 } 2025-06-25T15:06:21.391833Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:610: Handle TEvInfo tabletId: 72057594037932033 entry.State: StInitResolve success: false ev: {EvInfo Status: 5 TabletID: 72057594037932033 Cookie: 0 CurrentLeader: [0:0:0] CurrentLeaderTablet: [0:0:0] CurrentGeneration: 0 CurrentStep: 0 Locked: false LockedFor: 0 Signature: { Size: 3 Signature: {{[1:24343667:0] : 2}, {[1:2199047599219:0] : 8}, {[1:1099535971443:0] : 5}}}} 2025-06-25T15:06:21.391871Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:361: DropEntry tabletId: 72057594037932033 followers: 0 2025-06-25T15:06:21.391998Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:172: TClient[72057594037932033] forward result error, check reconnect [1:30:2076] 2025-06-25T15:06:21.392053Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:562: TClient[72057594037932033] schedule retry [1:30:2076] 2025-06-25T15:06:21.392093Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 2146435072 StorageConfigLoaded# true NodeListObtained# true PendingEvents.size# 2 2025-06-25T15:06:21.392133Z node 1 :BS_NODE DEBUG: {NWDC15@distconf.cpp:345} StateFunc Type# 268639258 Sender# [1:12:2059] SessionId# [0:0:0] Cookie# 0 2025-06-25T15:06:21.395998Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 268639248 StorageConfigLoaded# true NodeListObtained# true PendingEvents.size# 1 2025-06-25T15:06:21.396073Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 2146435072 StorageConfigLoaded# true NodeListObtained# true PendingEvents.size# 2 2025-06-25T15:06:21.396105Z node 1 :BS_NODE DEBUG: {NWDC15@distconf.cpp:345} StateFunc Type# 2146435075 Sender# [1:48:2091] SessionId# [0:0:0] Cookie# 0 2025-06-25T15:06:21.396140Z node 1 :BS_NODE DEBUG: {NWDC36@distconf_persistent_storage.cpp:205} TEvStorageConfigStored NumOk# 0 NumError# 0 Passed# 0.012400s 2025-06-25T15:06:21.396471Z node 1 :STATESTORAGE DEBUG: statestorage_proxy.cpp:281: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72057594037936129 Cookie: 0 ProxyOptions: SigNone} 2025-06-25T15:06:21.397161Z node 1 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936129 Cookie: 0} 2025-06-25T15:06:21.397236Z node 1 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936129 Cookie: 1} 2025-06-25T15:06:21.397269Z node 1 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936129 Cookie: 2} 2025-06-25T15:06:21.397300Z node 1 :STATESTORAGE DEBUG: statestorage_proxy.cpp:390: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936129 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-25T15:06:21.397374Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 2146435072 StorageConfigLoaded# true NodeList ... odeId# 12 } TEvVPutResult{ TimestampMs# 4.506 VDiskId# [0:1:0:0:0] NodeId# 12 Status# OK } ] } 2025-06-25T15:09:38.273616Z node 12 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594037927937:2:487:0:0:246:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.998955} 2025-06-25T15:09:38.274135Z node 12 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:488} commited cookie 1 for step 487 2025-06-25T15:09:38.275423Z node 12 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:488} Tx{1477, NKikimr::NHive::TTxReassignGroups} queued, type NKikimr::NHive::TTxReassignGroups 2025-06-25T15:09:38.275468Z node 12 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:488} Tx{1477, NKikimr::NHive::TTxReassignGroups} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-25T15:09:38.275648Z node 12 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:488} Tx{1477, NKikimr::NHive::TTxReassignGroups} hope 1 -> done Change{986, redo 303b alter 0b annex 0, ~{ 1, 2 } -{ }, 0 gb} 2025-06-25T15:09:38.275681Z node 12 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:488} Tx{1477, NKikimr::NHive::TTxReassignGroups} release 4194304b of static, Memory{0 dyn 0} 2025-06-25T15:09:38.275762Z node 12 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:155: TClient[72057594037932033] send [12:1367:2260] 2025-06-25T15:09:38.275805Z node 12 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:629: TClient[72057594037932033] push event to server [12:1367:2260] 2025-06-25T15:09:38.275841Z node 12 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:141: [72057594037932033] HandleSend Sender# [12:1301:2222] EventType# 268637702 c[def1] *****----------------------------------------------------------------------------------------------- (0.05) *****----------------------------------------------------------------------------------------------- (0.054) ******---------------------------------------------------------------------------------------------- (0.062) *****----------------------------------------------------------------------------------------------- (0.052) *****----------------------------------------------------------------------------------------------- (0.05) ******---------------------------------------------------------------------------------------------- (0.06) *****----------------------------------------------------------------------------------------------- (0.05) ******---------------------------------------------------------------------------------------------- (0.056) ******---------------------------------------------------------------------------------------------- (0.064) *****----------------------------------------------------------------------------------------------- (0.05) *****----------------------------------------------------------------------------------------------- (0.052) 2025-06-25T15:09:38.377837Z node 12 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:489} Tx{1478, NKikimr::NHive::TTxUpdateTabletGroups} queued, type NKikimr::NHive::TTxUpdateTabletGroups 2025-06-25T15:09:38.377906Z node 12 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:489} Tx{1478, NKikimr::NHive::TTxUpdateTabletGroups} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-25T15:09:38.378014Z node 12 :HIVE WARN: tx__update_tablet_groups.cpp:272: HIVE#72057594037927937 THive::TTxUpdateTabletGroups::Execute{88923008111616}: tablet 72075186224037972 wasn't changed 2025-06-25T15:09:38.378073Z node 12 :HIVE WARN: tx__update_tablet_groups.cpp:281: HIVE#72057594037927937 THive::TTxUpdateTabletGroups::Execute{88923008111616}: tablet 72075186224037972 skipped channel 0 2025-06-25T15:09:38.378161Z node 12 :HIVE WARN: tx__update_tablet_groups.cpp:281: HIVE#72057594037927937 THive::TTxUpdateTabletGroups::Execute{88923008111616}: tablet 72075186224037972 skipped channel 1 2025-06-25T15:09:38.378190Z node 12 :HIVE WARN: tx__update_tablet_groups.cpp:281: HIVE#72057594037927937 THive::TTxUpdateTabletGroups::Execute{88923008111616}: tablet 72075186224037972 skipped channel 2 2025-06-25T15:09:38.378252Z node 12 :HIVE NOTICE: tx__update_tablet_groups.cpp:326: HIVE#72057594037927937 THive::TTxUpdateTabletGroups{88923008111616}(72075186224037972)::Execute - TryToBoot was not successfull 2025-06-25T15:09:38.378345Z node 12 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:489} Tx{1478, NKikimr::NHive::TTxUpdateTabletGroups} hope 1 -> done Change{987, redo 257b alter 0b annex 0, ~{ 2, 1 } -{ }, 0 gb} 2025-06-25T15:09:38.378388Z node 12 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:489} Tx{1478, NKikimr::NHive::TTxUpdateTabletGroups} release 4194304b of static, Memory{0 dyn 0} 2025-06-25T15:09:38.406328Z node 12 :BS_PROXY_PUT INFO: dsproxy_put.cpp:645: [10b1fde6d2294410] bootstrap ActorId# [12:11699:4468] Group# 0 BlobCount# 1 BlobIDs# [[72057594037927937:2:488:0:0:246:0]] HandleClass# TabletLog Tactic# MinLatency RestartCounter# 0 Marker# BPP13 2025-06-25T15:09:38.406481Z node 12 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [10b1fde6d2294410] Id# [72057594037927937:2:488:0:0:246:0] restore disk# 0 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-06-25T15:09:38.406525Z node 12 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:65: [10b1fde6d2294410] restore Id# [72057594037927937:2:488:0:0:246:0] optimisticReplicas# 1 optimisticState# EBS_FULL Marker# BPG55 2025-06-25T15:09:38.406587Z node 12 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [10b1fde6d2294410] partPlacement record partSituation# ESituation::Unknown to# 0 blob Id# [72057594037927937:2:488:0:0:246:1] Marker# BPG33 2025-06-25T15:09:38.406622Z node 12 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [10b1fde6d2294410] Sending missing VPut part# 0 to# 0 blob Id# [72057594037927937:2:488:0:0:246:1] Marker# BPG32 2025-06-25T15:09:38.406735Z node 12 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [12:444:2090] NKikimr::TEvBlobStorage::TEvVPut# {ID# [72057594037927937:2:488:0:0:246:1] FDS# 246 HandleClass# TabletLog {MsgQoS ExtQueueId# PutTabletLog} DataSize# 0 Data# } cookie# 0 2025-06-25T15:09:38.409813Z node 12 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [10b1fde6d2294410] received {EvVPutResult Status# OK ID# [72057594037927937:2:488:0:0:246:1] {MsgQoS MsgId# { SequenceId: 1 MsgId: 504 } Cost# 81937 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 505 }}}} from# [0:1:0:0:0] Marker# BPP01 2025-06-25T15:09:38.409926Z node 12 :BS_PROXY_PUT DEBUG: dsproxy_put_impl.cpp:72: [10b1fde6d2294410] Result# TEvPutResult {Id# [72057594037927937:2:488:0:0:246:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.998955} GroupId# 0 Marker# BPP12 2025-06-25T15:09:38.409992Z node 12 :BS_PROXY_PUT INFO: dsproxy_put.cpp:486: [10b1fde6d2294410] SendReply putResult# TEvPutResult {Id# [72057594037927937:2:488:0:0:246:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.998955} ResponsesSent# 0 PutImpl.Blobs.size# 1 Last# true Marker# BPP21 2025-06-25T15:09:38.410111Z node 12 :BS_PROXY_PUT DEBUG: {BPP72@dsproxy_put.cpp:470} Query history GroupId# 0 HandleClass# TabletLog Tactic# MinLatency History# THistory { Entries# [ TEvVPut{ TimestampMs# 0.725 sample PartId# [72057594037927937:2:488:0:0:246:1] QueryCount# 1 VDiskId# [0:1:0:0:0] NodeId# 12 } TEvVPutResult{ TimestampMs# 3.835 VDiskId# [0:1:0:0:0] NodeId# 12 Status# OK } ] } 2025-06-25T15:09:38.410891Z node 12 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594037927937:2:488:0:0:246:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.998955} 2025-06-25T15:09:38.411534Z node 12 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:489} commited cookie 1 for step 488 2025-06-25T15:09:38.412983Z node 12 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:489} Tx{1479, NKikimr::NHive::TTxReassignGroups} queued, type NKikimr::NHive::TTxReassignGroups 2025-06-25T15:09:38.413047Z node 12 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:489} Tx{1479, NKikimr::NHive::TTxReassignGroups} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-25T15:09:38.413293Z node 12 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:489} Tx{1479, NKikimr::NHive::TTxReassignGroups} hope 1 -> done Change{988, redo 303b alter 0b annex 0, ~{ 1, 2 } -{ }, 0 gb} 2025-06-25T15:09:38.413345Z node 12 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:489} Tx{1479, NKikimr::NHive::TTxReassignGroups} release 4194304b of static, Memory{0 dyn 0} 2025-06-25T15:09:38.413449Z node 12 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:155: TClient[72057594037932033] send [12:1367:2260] 2025-06-25T15:09:38.413478Z node 12 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:629: TClient[72057594037932033] push event to server [12:1367:2260] 2025-06-25T15:09:38.413524Z node 12 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:141: [72057594037932033] HandleSend Sender# [12:1301:2222] EventType# 268637702 c[def1] *****----------------------------------------------------------------------------------------------- (0.05) *****----------------------------------------------------------------------------------------------- (0.054) ******---------------------------------------------------------------------------------------------- (0.062) *****----------------------------------------------------------------------------------------------- (0.052) *****----------------------------------------------------------------------------------------------- (0.05) ******---------------------------------------------------------------------------------------------- (0.06) *****----------------------------------------------------------------------------------------------- (0.05) ******---------------------------------------------------------------------------------------------- (0.056) ******---------------------------------------------------------------------------------------------- (0.064) *****----------------------------------------------------------------------------------------------- (0.05) *****----------------------------------------------------------------------------------------------- (0.052) 2025-06-25T15:09:38.516473Z node 12 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:490} Tx{1480, NKikimr::NHive::TTxUpdateTabletGroups} queued, type NKikimr::NHive::TTxUpdateTabletGroups 2025-06-25T15:09:38.516544Z node 12 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:490} Tx{1480, NKikimr::NHive::TTxUpdateTabletGroups} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-25T15:09:38.516656Z node 12 :HIVE WARN: tx__update_tablet_groups.cpp:272: HIVE#72057594037927937 THive::TTxUpdateTabletGroups::Execute{88923008112064}: tablet 72075186224037982 wasn't changed 2025-06-25T15:09:38.516692Z node 12 :HIVE WARN: tx__update_tablet_groups.cpp:281: HIVE#72057594037927937 THive::TTxUpdateTabletGroups::Execute{88923008112064}: tablet 72075186224037982 skipped channel 0 2025-06-25T15:09:38.516771Z node 12 :HIVE WARN: tx__update_tablet_groups.cpp:281: HIVE#72057594037927937 THive::TTxUpdateTabletGroups::Execute{88923008112064}: tablet 72075186224037982 skipped channel 1 2025-06-25T15:09:38.516807Z node 12 :HIVE WARN: tx__update_tablet_groups.cpp:281: HIVE#72057594037927937 THive::TTxUpdateTabletGroups::Execute{88923008112064}: tablet 72075186224037982 skipped channel 2 2025-06-25T15:09:38.516868Z node 12 :HIVE NOTICE: tx__update_tablet_groups.cpp:326: HIVE#72057594037927937 THive::TTxUpdateTabletGroups{88923008112064}(72075186224037982)::Execute - TryToBoot was not successfull 2025-06-25T15:09:38.516955Z node 12 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:490} Tx{1480, NKikimr::NHive::TTxUpdateTabletGroups} hope 1 -> done Change{989, redo 257b alter 0b annex 0, ~{ 2, 1 } -{ }, 0 gb} 2025-06-25T15:09:38.517012Z node 12 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:490} Tx{1480, NKikimr::NHive::TTxUpdateTabletGroups} release 4194304b of static, Memory{0 dyn 0} >> THiveImplTest::BootQueueSpeed [GOOD] >> THiveImplTest::BalancerSpeedAndDistribution >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_index_2__SYNC-pk_types2-all_types2-index2---SYNC] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_4_UNIQUE_SYNC-pk_types0-all_types0-index0--UNIQUE-SYNC] >> THiveImplTest::BalancerSpeedAndDistribution [GOOD] >> THiveImplTest::TestShortTabletTypes [GOOD] >> THiveImplTest::TestStDev [GOOD] >> THiveImplTest::BootQueueConfigurePriorities [GOOD] >> THiveTest::TestBlockCreateTablet >> THiveTest::TestBlockCreateTablet [GOOD] >> THiveTest::DrainWithHiveRestart >> alter_compression.py::TestAllCompression::test_all_supported_compression[lz4_compression-COMPRESSION = "lz4"] [GOOD] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_10_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 10] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_8_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 8] [GOOD] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_9_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 9] |94.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/dump_restore/py3test >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_index_2__SYNC-pk_types2-all_types2-index2---SYNC] [GOOD] >> TColumnShardTestReadWrite::CompactionSplitGranuleStrKey_PKUtf8 [GOOD] >> THiveTest::DrainWithHiveRestart [GOOD] >> THiveTest::TestCheckSubHiveForwarding >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_2_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 2] [GOOD] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_3_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 3] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::CompactionSplitGranuleStrKey_PKUtf8 [GOOD] Test command err: 2025-06-25T15:05:43.150417Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:99;event=initialize_shard;step=OnActivateExecutor; 2025-06-25T15:05:43.168245Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];fline=columnshard.cpp:117;event=initialize_shard;step=initialize_tiring_finished; 2025-06-25T15:05:43.168469Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-25T15:05:43.173284Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T15:05:43.173461Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T15:05:43.173622Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T15:05:43.173684Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T15:05:43.173750Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-25T15:05:43.173813Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-25T15:05:43.173898Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-25T15:05:43.173966Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-25T15:05:43.174018Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-25T15:05:43.174082Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-25T15:05:43.174147Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:128:2158];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-25T15:05:43.190099Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-25T15:05:43.190233Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:132;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=10;current_normalizer=CLASS_NAME=Granules; 2025-06-25T15:05:43.190270Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-25T15:05:43.190395Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:05:43.190545Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-25T15:05:43.190615Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-25T15:05:43.190647Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-25T15:05:43.190699Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-25T15:05:43.190739Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-25T15:05:43.190763Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-25T15:05:43.190803Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-25T15:05:43.190940Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-25T15:05:43.190978Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-25T15:05:43.191000Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-25T15:05:43.191022Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-25T15:05:43.191086Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-25T15:05:43.191121Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-25T15:05:43.191147Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-25T15:05:43.191170Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-25T15:05:43.191223Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-25T15:05:43.191251Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-25T15:05:43.191273Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-25T15:05:43.191398Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-25T15:05:43.191425Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-25T15:05:43.191441Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-25T15:05:43.191553Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-25T15:05:43.191589Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-25T15:05:43.191625Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-25T15:05:43.191748Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-25T15:05:43.191781Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-25T15:05:43.191797Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-25T15:05:43.191839Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-25T15:05:43.191875Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-25T15:05:43.191897Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-25T15:05:43.191913Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-25T15:05:43.192039Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=28; 2025-06-25T15:05:43.192104Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=38; 2025-06-25T15:05:43.192168Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TableVersionssLoadingTime=33; 2025-06-25T15:05:43.192216Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetVersionsLoadingTime=25; 2025-06-25T15:05:43.192275Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks_v0_meta.cpp:100;normalizer=TChunksV0MetaNormalizer;message=0 chunks found; 2025-06-25T15:05:43.192359Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-25T15:05:43.192387Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-25T15:05:43.192436Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: table ... oad_stage_name=EXECUTE:granule/portions;fline=constructor_meta.cpp:71;memory_size=2374;data_size=2365;sum=265594;count=510;size_of_meta=136; 2025-06-25T15:09:54.270799Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;load_stage_name=EXECUTE:granule/portions;fline=constructor_portion.cpp:40;memory_size=2446;data_size=2437;sum=283954;count=255;size_of_portion=208; 2025-06-25T15:09:54.271157Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;EXECUTE:portionsLoadingTime=31803; 2025-06-25T15:09:54.271272Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;PRECHARGE:granule_finished_commonLoadingTime=14; 2025-06-25T15:09:54.272041Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;load_stage_name=EXECUTE:granules/granule;fline=common_data.cpp:29;EXECUTE:granule_finished_commonLoadingTime=705; 2025-06-25T15:09:54.272100Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;load_stage_name=EXECUTE:column_engines/granules;fline=common_data.cpp:29;EXECUTE:granuleLoadingTime=33011; 2025-06-25T15:09:54.272164Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:granulesLoadingTime=33212; 2025-06-25T15:09:54.272243Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;PRECHARGE:finishLoadingTime=16; 2025-06-25T15:09:54.272531Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;load_stage_name=EXECUTE:column_engines;fline=common_data.cpp:29;EXECUTE:finishLoadingTime=240; 2025-06-25T15:09:54.272587Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:column_enginesLoadingTime=34442; 2025-06-25T15:09:54.272780Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tx_controllerLoadingTime=124; 2025-06-25T15:09:54.272954Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tx_controllerLoadingTime=119; 2025-06-25T15:09:54.273173Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:operations_managerLoadingTime=162; 2025-06-25T15:09:54.273353Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:operations_managerLoadingTime=138; 2025-06-25T15:09:54.276141Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:storages_managerLoadingTime=2729; 2025-06-25T15:09:54.278955Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:storages_managerLoadingTime=2732; 2025-06-25T15:09:54.279056Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:db_locksLoadingTime=14; 2025-06-25T15:09:54.279114Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:db_locksLoadingTime=12; 2025-06-25T15:09:54.279190Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:bg_sessionsLoadingTime=16; 2025-06-25T15:09:54.279308Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:bg_sessionsLoadingTime=51; 2025-06-25T15:09:54.279379Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:sharing_sessionsLoadingTime=7; 2025-06-25T15:09:54.279522Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:sharing_sessionsLoadingTime=78; 2025-06-25T15:09:54.279601Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:in_flight_readsLoadingTime=9; 2025-06-25T15:09:54.279677Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:in_flight_readsLoadingTime=37; 2025-06-25T15:09:54.279817Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;PRECHARGE:tiers_managerLoadingTime=98; 2025-06-25T15:09:54.279908Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;load_stage_name=EXECUTE:composite_init;fline=common_data.cpp:29;EXECUTE:tiers_managerLoadingTime=53; 2025-06-25T15:09:54.279950Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;event=initialize_shard;fline=common_data.cpp:29;EXECUTE:composite_initLoadingTime=52541; 2025-06-25T15:09:54.280192Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Index: tables 1 inserted {blob_bytes=0;raw_bytes=0;count=0;records=0} compacted {blob_bytes=0;raw_bytes=0;count=0;records=0} s-compacted {blob_bytes=119665944;raw_bytes=192854450;count=5;records=1855000} inactive {blob_bytes=632703064;raw_bytes=989320282;count=54;records=9818750} evicted {blob_bytes=0;raw_bytes=0;count=0;records=0} at tablet 9437184 2025-06-25T15:09:54.280342Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:3126:5081];process=SwitchToWork;fline=columnshard.cpp:74;event=initialize_shard;step=SwitchToWork; 2025-06-25T15:09:54.280414Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:3126:5081];process=SwitchToWork;fline=columnshard.cpp:77;event=initialize_shard;step=SignalTabletActive; 2025-06-25T15:09:54.280491Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:3126:5081];process=SwitchToWork;fline=columnshard_impl.cpp:1331;event=OnTieringModified;path_id=NO_VALUE_OPTIONAL; 2025-06-25T15:09:54.280561Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:3126:5081];process=SwitchToWork;fline=column_engine_logs.cpp:471;event=OnTieringModified;new_count_tierings=0; 2025-06-25T15:09:54.280725Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:442;event=EnqueueBackgroundActivities;periodic=0; 2025-06-25T15:09:54.280815Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=23; 2025-06-25T15:09:54.280900Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750863646238;tx_id=18446744073709551615;;current_snapshot_ts=1750863944158; 2025-06-25T15:09:54.280949Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=23;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-25T15:09:54.281003Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:791;background=cleanup;skip_reason=no_changes; 2025-06-25T15:09:54.281053Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:820;background=cleanup;skip_reason=no_changes; 2025-06-25T15:09:54.281179Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;fline=columnshard_impl.cpp:749;background=ttl;skip_reason=no_changes; 2025-06-25T15:09:54.285577Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:3126:5081];ev=NActors::TEvents::TEvWakeup;fline=columnshard.cpp:250;event=TEvPrivate::TEvPeriodicWakeup::MANUAL;tablet_id=9437184; 2025-06-25T15:09:54.286019Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:3126:5081];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;fline=columnshard.cpp:239;event=TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184; 2025-06-25T15:09:54.286071Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Send periodic stats. 2025-06-25T15:09:54.286110Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Disabled periodic stats at tablet 9437184 2025-06-25T15:09:54.286182Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:3126:5081];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:442;event=EnqueueBackgroundActivities;periodic=0; 2025-06-25T15:09:54.286306Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:3126:5081];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=23; 2025-06-25T15:09:54.286411Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:3126:5081];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:266;event=StartCleanupStop;snapshot=plan_step=1750863646238;tx_id=18446744073709551615;;current_snapshot_ts=1750863944158; 2025-06-25T15:09:54.286488Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:3126:5081];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=23;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-25T15:09:54.286561Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:3126:5081];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:791;background=cleanup;skip_reason=no_changes; 2025-06-25T15:09:54.286625Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:3126:5081];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:820;background=cleanup;skip_reason=no_changes; 2025-06-25T15:09:54.286748Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:3126:5081];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;queue=ttl;external_count=0;fline=granule.cpp:168;event=skip_actualization;waiting=0.999000s; 2025-06-25T15:09:54.286818Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=9437184;self_id=[1:3126:5081];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:749;background=ttl;skip_reason=no_changes; >> THiveTest::TestCheckSubHiveForwarding [GOOD] >> THiveTest::TestCheckSubHiveDrain >> test_split_merge.py::TestSplitMerge::test_merge_split[table_all_types-pk_types12-all_types12-index12---] >> THiveTest::TestCheckSubHiveDrain [GOOD] >> THiveTest::TestCheckSubHiveMigration >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_index_4__SYNC-pk_types0-all_types0-index0---SYNC] [GOOD] >> THiveTest::TestCheckSubHiveMigration [GOOD] >> THiveTest::PipeAlivenessOfDeadTablet |94.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> THiveTest::PipeAlivenessOfDeadTablet [GOOD] >> THiveTest::TestBootProgress >> THiveTest::TestBootProgress [GOOD] >> THiveTest::TestBridgeCreateTablet |94.8%| [TA] $(B)/ydb/core/tx/columnshard/ut_rw/test-results/unittest/{meta.json ... results_accumulator.log} >> THiveTest::TestBridgeCreateTablet [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_1__ASYNC-pk_types10-all_types10-index10---ASYNC] |94.8%| [TA] {RESULT} $(B)/ydb/core/tx/columnshard/ut_rw/test-results/unittest/{meta.json ... results_accumulator.log} >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_index_3__SYNC-pk_types1-all_types1-index1---SYNC] [GOOD] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_4_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 4] [GOOD] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_5_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 5] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_compression-COMPRESSION = "zstd"] [GOOD] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_18_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 18] [GOOD] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_19_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 19] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/hive/ut/unittest >> THiveTest::TestBridgeCreateTablet [GOOD] Test command err: 2025-06-25T15:06:19.999620Z node 1 :BS_NODE DEBUG: {NW26@node_warden_impl.cpp:328} Bootstrap 2025-06-25T15:06:20.024521Z node 1 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# false Origin# initial ServiceSet# {PDisks { NodeID: 1 PDiskID: 1 Path: "SectorMap:0:3200" PDiskGuid: 1 } PDisks { NodeID: 2 PDiskID: 1 Path: "SectorMap:1:3200" PDiskGuid: 2 } VDisks { VDiskID { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } Groups { GroupID: 0 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } } } AvailabilityDomains: 0 } 2025-06-25T15:06:20.026528Z node 1 :BS_NODE DEBUG: {NW04@node_warden_pdisk.cpp:196} StartLocalPDisk NodeId# 1 PDiskId# 1 Path# "SectorMap:0:3200" PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} Temporary# false 2025-06-25T15:06:20.033248Z node 1 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:106} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2025-06-25T15:06:20.035700Z node 1 :BS_NODE DEBUG: {NW23@node_warden_vdisk.cpp:67} StartLocalVDiskActor SlayInFlight# false VDiskId# [0:1:0:0:0] VSlotId# 1:1:0 PDiskGuid# 1 DonorMode# false PDiskRestartInFlight# false PDisksWaitingToStart# false 2025-06-25T15:06:20.038657Z node 1 :BS_NODE DEBUG: {NW24@node_warden_vdisk.cpp:267} StartLocalVDiskActor done VDiskId# [0:1:0:0:0] VSlotId# 1:1:0 PDiskGuid# 1 2025-06-25T15:06:20.038717Z node 1 :BS_NODE DEBUG: {NW12@node_warden_proxy.cpp:24} StartLocalProxy GroupId# 0 2025-06-25T15:06:20.040771Z node 1 :BS_NODE DEBUG: {NW21@node_warden_pipe.cpp:23} EstablishPipe AvailDomainId# 0 PipeClientId# [1:53:2077] ControllerId# 72057594037932033 2025-06-25T15:06:20.040818Z node 1 :BS_NODE DEBUG: {NW20@node_warden_pipe.cpp:72} SendRegisterNode 2025-06-25T15:06:20.042720Z node 1 :BS_NODE DEBUG: {NW11@node_warden_impl.cpp:303} StartInvalidGroupProxy GroupId# 4294967295 2025-06-25T15:06:20.042903Z node 1 :BS_NODE DEBUG: {NW62@node_warden_impl.cpp:315} StartRequestReportingThrottler 2025-06-25T15:06:20.057111Z node 1 :BS_PROXY INFO: dsproxy_state.cpp:157: Group# 0 TEvConfigureProxy received GroupGeneration# 1 IsLimitedKeyless# false Marker# DSP02 2025-06-25T15:06:20.057177Z node 1 :BS_PROXY NOTICE: dsproxy_state.cpp:305: EnsureMonitoring Group# 0 IsLimitedKeyless# 0 fullIfPossible# 0 Marker# DSP58 2025-06-25T15:06:20.059533Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:52:2076] Create Queue# [1:61:2082] targetNodeId# 1 Marker# DSP01 2025-06-25T15:06:20.059709Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:52:2076] Create Queue# [1:62:2083] targetNodeId# 1 Marker# DSP01 2025-06-25T15:06:20.059863Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:52:2076] Create Queue# [1:63:2084] targetNodeId# 1 Marker# DSP01 2025-06-25T15:06:20.060014Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:52:2076] Create Queue# [1:64:2085] targetNodeId# 1 Marker# DSP01 2025-06-25T15:06:20.060153Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:52:2076] Create Queue# [1:65:2086] targetNodeId# 1 Marker# DSP01 2025-06-25T15:06:20.060341Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:52:2076] Create Queue# [1:66:2087] targetNodeId# 1 Marker# DSP01 2025-06-25T15:06:20.060494Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:52:2076] Create Queue# [1:67:2088] targetNodeId# 1 Marker# DSP01 2025-06-25T15:06:20.060523Z node 1 :BS_PROXY INFO: dsproxy_state.cpp:31: Group# 0 SetStateEstablishingSessions Marker# DSP03 2025-06-25T15:06:20.060610Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037932033] ::Bootstrap [1:53:2077] 2025-06-25T15:06:20.060644Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037932033] lookup [1:53:2077] 2025-06-25T15:06:20.060691Z node 1 :BS_PROXY NOTICE: dsproxy_state.cpp:245: Group# 4294967295 HasInvalidGroupId# 1 Bootstrap -> StateEjected Marker# DSP42 2025-06-25T15:06:20.060736Z node 1 :BS_NODE DEBUG: {NWDC00@distconf.cpp:20} Bootstrap 2025-06-25T15:06:20.064244Z node 1 :BS_NODE DEBUG: {NWDC40@distconf_persistent_storage.cpp:25} TReaderActor bootstrap Paths# [] 2025-06-25T15:06:20.064458Z node 2 :BS_NODE DEBUG: {NW26@node_warden_impl.cpp:328} Bootstrap 2025-06-25T15:06:20.066992Z node 2 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# false Origin# initial ServiceSet# {PDisks { NodeID: 1 PDiskID: 1 Path: "SectorMap:0:3200" PDiskGuid: 1 } PDisks { NodeID: 2 PDiskID: 1 Path: "SectorMap:1:3200" PDiskGuid: 2 } VDisks { VDiskID { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } Groups { GroupID: 0 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } } } AvailabilityDomains: 0 } 2025-06-25T15:06:20.067147Z node 2 :BS_NODE DEBUG: {NW04@node_warden_pdisk.cpp:196} StartLocalPDisk NodeId# 2 PDiskId# 1 Path# "SectorMap:1:3200" PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} Temporary# false 2025-06-25T15:06:20.067594Z node 2 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:106} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2025-06-25T15:06:20.067808Z node 2 :BS_NODE DEBUG: {NW12@node_warden_proxy.cpp:24} StartLocalProxy GroupId# 0 2025-06-25T15:06:20.068595Z node 2 :BS_NODE DEBUG: {NW21@node_warden_pipe.cpp:23} EstablishPipe AvailDomainId# 0 PipeClientId# [2:78:2076] ControllerId# 72057594037932033 2025-06-25T15:06:20.068628Z node 2 :BS_NODE DEBUG: {NW20@node_warden_pipe.cpp:72} SendRegisterNode 2025-06-25T15:06:20.068690Z node 2 :BS_NODE DEBUG: {NW11@node_warden_impl.cpp:303} StartInvalidGroupProxy GroupId# 4294967295 2025-06-25T15:06:20.068794Z node 2 :BS_NODE DEBUG: {NW62@node_warden_impl.cpp:315} StartRequestReportingThrottler 2025-06-25T15:06:20.071098Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72057594037932033] queue send [1:53:2077] 2025-06-25T15:06:20.071181Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 131082 StorageConfigLoaded# false NodeListObtained# false PendingEvents.size# 0 2025-06-25T15:06:20.071214Z node 1 :BS_NODE DEBUG: {NWDC11@distconf_binding.cpp:6} TEvNodesInfo 2025-06-25T15:06:20.085693Z node 2 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037936129] ::Bootstrap [2:43:2064] 2025-06-25T15:06:20.085758Z node 2 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037936129] lookup [2:43:2064] 2025-06-25T15:06:20.092899Z node 2 :BS_PROXY INFO: dsproxy_state.cpp:157: Group# 0 TEvConfigureProxy received GroupGeneration# 1 IsLimitedKeyless# false Marker# DSP02 2025-06-25T15:06:20.092968Z node 2 :BS_PROXY NOTICE: dsproxy_state.cpp:305: EnsureMonitoring Group# 0 IsLimitedKeyless# 0 fullIfPossible# 0 Marker# DSP58 2025-06-25T15:06:20.094727Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:77:2075] Create Queue# [2:84:2080] targetNodeId# 1 Marker# DSP01 2025-06-25T15:06:20.094874Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:77:2075] Create Queue# [2:85:2081] targetNodeId# 1 Marker# DSP01 2025-06-25T15:06:20.095003Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:77:2075] Create Queue# [2:86:2082] targetNodeId# 1 Marker# DSP01 2025-06-25T15:06:20.095173Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:77:2075] Create Queue# [2:87:2083] targetNodeId# 1 Marker# DSP01 2025-06-25T15:06:20.095308Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:77:2075] Create Queue# [2:88:2084] targetNodeId# 1 Marker# DSP01 2025-06-25T15:06:20.095461Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:77:2075] Create Queue# [2:89:2085] targetNodeId# 1 Marker# DSP01 2025-06-25T15:06:20.095603Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:77:2075] Create Queue# [2:90:2086] targetNodeId# 1 Marker# DSP01 2025-06-25T15:06:20.095638Z node 2 :BS_PROXY INFO: dsproxy_state.cpp:31: Group# 0 SetStateEstablishingSessions Marker# DSP03 2025-06-25T15:06:20.095704Z node 2 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037932033] ::Bootstrap [2:78:2076] 2025-06-25T15:06:20.095732Z node 2 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037932033] lookup [2:78:2076] 2025-06-25T15:06:20.095772Z node 2 :BS_PROXY NOTICE: dsproxy_state.cpp:245: Group# 4294967295 HasInvalidGroupId# 1 Bootstrap -> StateEjected Marker# DSP42 2025-06-25T15:06:20.095810Z node 2 :BS_NODE DEBUG: {NWDC00@distconf.cpp:20} Bootstrap 2025-06-25T15:06:20.096191Z node 2 :BS_NODE DEBUG: {NWDC40@distconf_persistent_storage.cpp:25} TReaderActor bootstrap Paths# [] 2025-06-25T15:06:20.097288Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72057594037932033 entry.State: StInit ev: {EvForward TabletID: 72057594037932033 Ev: nullptr Flags: 1:2:0} 2025-06-25T15:06:20.115730Z node 2 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72057594037932033] queue send [2:78:2076] 2025-06-25T15:06:20.115791Z node 2 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 131082 StorageConfigLoaded# false NodeListObtained# false PendingEvents.size# 0 2025-06-25T15:06:20.115837Z node 2 :BS_NODE DEBUG: {NWDC11@distconf_binding.cpp:6} TEvNodesInfo 2025-06-25T15:06:20.117407Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 2146435074 StorageConfigLoaded# false NodeListObtained# false PendingEvents.size# 0 2025-06-25T15:06:20.117465Z node 1 :BS_NODE DEBUG: {NWDC32@distconf_persistent_storage.cpp:221} TEvStorageConfigLoaded Cookie# 0 NumItemsRead# 0 2025-06-25T15:06:20.124710Z node 1 :BS_NODE DEBUG: {NWDC35@distconf_persistent_storage.cpp:184} PersistConfig Record# {} Drives# [] 2025-06-25T15:06:20.132442Z node 1 :BS_NODE DEBUG: {NWDC51@distconf_persistent_storage.cpp:103} TWriterActor bootstrap Drives# [] Record# {} 2025-06-25T15:06:20.132550Z node 2 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72057594037936129 entry.State: StInit ev: {EvForward TabletID: 72057594037936129 Ev: nullptr Flags: 1:2:0} 2025-06-25T15:06:20.137641Z node 2 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72057594037932033 entry.State: StInit ev: {EvForward TabletID: 72057594037932033 Ev: nullptr Flags: 1:2:0} 2025-06-25T15:06:20.137809Z node 2 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 2146435074 StorageConfigLoaded# false NodeListObtained# false PendingEvents.size# 0 2025-06-25T15:06:20.137843Z node 2 :BS_NODE DEBUG: {NWDC32@distconf_persistent_storage.cpp:221} TEvStorageConfigLoaded Cookie# 0 NumItemsRead# 0 2025-06-25T15:06:20.137929Z node 2 :BS_NODE DEBUG: {NWDC35@distconf_persistent_storage.cpp:184} PersistConfig Record# {} Drives# [] 2025-06-25T15:06:20.138010Z node 2 :BS_NODE DEBUG: {NWDC51@distconf_persistent_storage.cpp:103} TWriterActor bootstrap Drives# [] Record# {} 2025-06-25T15:06:20.138081Z node 1 :STATESTORAGE DEBUG: statestorage_proxy.cpp:281: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72057594037932033 Cookie: 0 ProxyOptions: SigNone} 2025-06-25T15:06:20.138295Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72057594037932033] queue send [1:53:2077] 2025-06-25T15:06:20.138330Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 268639258 StorageConfigLoaded# true NodeListObtained# false PendingEvents.size# 0 2025-06-25T15:06:20.138378Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 2146435075 StorageConfigLoaded# true NodeListObtained# false PendingEvents.size# 1 2025-06-25T15:06:20.138447Z node 2 :STATESTORAGE DEBUG: statestorage_proxy.cpp:281: Pr ... ect.cpp:44: [14773c3bc185cab4] received TEvVCollectGarbageResult# {EvVCollectGarbageResult Status# OK TabletId# 72075186224037893 RecordGeneration# 1 Channel# 0 VDisk# [80000001:1:0:0:0]} Marker# DSPC01 2025-06-25T15:10:04.499910Z node 15 :BS_PROXY_COLLECT INFO: dsproxy_collect.cpp:112: [14773c3bc185cab4] Result# TEvCollectGarbageResult {TabletId# 72075186224037893 RecordGeneration# 1 PerGenerationCounter# 1 Channel# 0 Status# OK} Marker# DSPC02 2025-06-25T15:10:04.500218Z node 15 :BS_PROXY_BRIDGE NOTICE: {BPB02@bridge.cpp:295} intermediate response RequestId# bbe2e3e924fcd205 GroupId# 2147483649 Status# OK PileState# SYNCHRONIZED Response# TEvCollectGarbageResult {TabletId# 72075186224037893 RecordGeneration# 1 PerGenerationCounter# 1 Channel# 0 Status# OK} 2025-06-25T15:10:04.500301Z node 15 :BS_PROXY_BRIDGE DEBUG: {BPB01@bridge.cpp:318} request finished RequestId# bbe2e3e924fcd205 Response# TEvCollectGarbageResult {TabletId# 72075186224037893 RecordGeneration# 1 PerGenerationCounter# 1 Channel# 0 Status# OK} 2025-06-25T15:10:04.500566Z node 14 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:19} Tx{52, NKikimr::NHive::TTxProcessBootQueue} queued, type NKikimr::NHive::TTxProcessBootQueue 2025-06-25T15:10:04.500623Z node 14 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:19} Tx{52, NKikimr::NHive::TTxProcessBootQueue} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-25T15:10:04.500681Z node 14 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:19} Tx{52, NKikimr::NHive::TTxProcessBootQueue} hope 1 -> done Change{33, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 2025-06-25T15:10:04.500754Z node 14 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:19} Tx{52, NKikimr::NHive::TTxProcessBootQueue} release 4194304b of static, Memory{0 dyn 0} 2025-06-25T15:10:04.500861Z node 15 :BS_PROXY_COLLECT DEBUG: dsproxy_collect.cpp:44: [8a5cae695f6b7442] received TEvVCollectGarbageResult# {EvVCollectGarbageResult Status# OK TabletId# 72075186224037893 RecordGeneration# 1 Channel# 1 VDisk# [80000004:1:0:0:0]} Marker# DSPC01 2025-06-25T15:10:04.500913Z node 15 :BS_PROXY_COLLECT INFO: dsproxy_collect.cpp:112: [8a5cae695f6b7442] Result# TEvCollectGarbageResult {TabletId# 72075186224037893 RecordGeneration# 1 PerGenerationCounter# 1 Channel# 1 Status# OK} Marker# DSPC02 2025-06-25T15:10:04.501041Z node 15 :BS_PROXY_BRIDGE NOTICE: {BPB02@bridge.cpp:295} intermediate response RequestId# 7f98d2ced968d7ae GroupId# 2147483652 Status# OK PileState# SYNCHRONIZED Response# TEvCollectGarbageResult {TabletId# 72075186224037893 RecordGeneration# 1 PerGenerationCounter# 1 Channel# 1 Status# OK} 2025-06-25T15:10:04.501092Z node 15 :BS_PROXY_BRIDGE DEBUG: {BPB01@bridge.cpp:318} request finished RequestId# 7f98d2ced968d7ae Response# TEvCollectGarbageResult {TabletId# 72075186224037893 RecordGeneration# 1 PerGenerationCounter# 1 Channel# 1 Status# OK} 2025-06-25T15:10:04.513145Z node 14 :BS_PROXY_PUT INFO: dsproxy_put.cpp:645: [83027ed5f9e3eb97] bootstrap ActorId# [14:1003:2573] Group# 0 BlobCount# 1 BlobIDs# [[72057594037927937:2:18:0:0:154:0]] HandleClass# TabletLog Tactic# MinLatency RestartCounter# 0 Marker# BPP13 2025-06-25T15:10:04.513318Z node 14 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [83027ed5f9e3eb97] Id# [72057594037927937:2:18:0:0:154:0] restore disk# 0 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-06-25T15:10:04.513392Z node 14 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:65: [83027ed5f9e3eb97] restore Id# [72057594037927937:2:18:0:0:154:0] optimisticReplicas# 1 optimisticState# EBS_FULL Marker# BPG55 2025-06-25T15:10:04.513481Z node 14 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [83027ed5f9e3eb97] partPlacement record partSituation# ESituation::Unknown to# 0 blob Id# [72057594037927937:2:18:0:0:154:1] Marker# BPG33 2025-06-25T15:10:04.513550Z node 14 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [83027ed5f9e3eb97] Sending missing VPut part# 0 to# 0 blob Id# [72057594037927937:2:18:0:0:154:1] Marker# BPG32 2025-06-25T15:10:04.513729Z node 14 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [14:83:2082] NKikimr::TEvBlobStorage::TEvVPut# {ID# [72057594037927937:2:18:0:0:154:1] FDS# 154 HandleClass# TabletLog {MsgQoS ExtQueueId# PutTabletLog} DataSize# 0 Data# } cookie# 0 2025-06-25T15:10:04.514967Z node 14 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [83027ed5f9e3eb97] received {EvVPutResult Status# OK ID# [72057594037927937:2:18:0:0:154:1] {MsgQoS MsgId# { SequenceId: 1 MsgId: 33 } Cost# 81212 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 34 }}}} from# [0:1:0:0:0] Marker# BPP01 2025-06-25T15:10:04.515084Z node 14 :BS_PROXY_PUT DEBUG: dsproxy_put_impl.cpp:72: [83027ed5f9e3eb97] Result# TEvPutResult {Id# [72057594037927937:2:18:0:0:154:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.998955} GroupId# 0 Marker# BPP12 2025-06-25T15:10:04.515172Z node 14 :BS_PROXY_PUT INFO: dsproxy_put.cpp:486: [83027ed5f9e3eb97] SendReply putResult# TEvPutResult {Id# [72057594037927937:2:18:0:0:154:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.998955} ResponsesSent# 0 PutImpl.Blobs.size# 1 Last# true Marker# BPP21 2025-06-25T15:10:04.515334Z node 14 :BS_PROXY_PUT DEBUG: {BPP72@dsproxy_put.cpp:470} Query history GroupId# 0 HandleClass# TabletLog Tactic# MinLatency History# THistory { Entries# [ TEvVPut{ TimestampMs# 0.932 sample PartId# [72057594037927937:2:18:0:0:154:1] QueryCount# 1 VDiskId# [0:1:0:0:0] NodeId# 14 } TEvVPutResult{ TimestampMs# 2.179 VDiskId# [0:1:0:0:0] NodeId# 14 Status# OK } ] } 2025-06-25T15:10:04.515537Z node 14 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594037927937:2:18:0:0:154:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.998955} 2025-06-25T15:10:04.515692Z node 14 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:19} commited cookie 1 for step 18 2025-06-25T15:10:04.517086Z node 14 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72075186224037893] ::Bootstrap [14:1005:2575] 2025-06-25T15:10:04.517137Z node 14 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72075186224037893] lookup [14:1005:2575] 2025-06-25T15:10:04.517248Z node 14 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72075186224037893 entry.State: StInit ev: {EvForward TabletID: 72075186224037893 Ev: nullptr Flags: 1:2:0} 2025-06-25T15:10:04.517444Z node 14 :STATESTORAGE DEBUG: statestorage_proxy.cpp:281: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72075186224037893 Cookie: 0 ProxyOptions: SigNone} 2025-06-25T15:10:04.517627Z node 14 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72075186224037893 Cookie: 0} 2025-06-25T15:10:04.517741Z node 14 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72075186224037893 Cookie: 1} 2025-06-25T15:10:04.517804Z node 14 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72075186224037893 Cookie: 2} 2025-06-25T15:10:04.517893Z node 14 :STATESTORAGE DEBUG: statestorage_proxy.cpp:390: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 0 TabletID: 72075186224037893 ClusterStateGeneration: 0 ClusterStateGuid: 0 CurrentLeader: [15:954:2283] CurrentLeaderTablet: [15:962:2288] CurrentGeneration: 1 CurrentStep: 0} 2025-06-25T15:10:04.518009Z node 14 :STATESTORAGE DEBUG: statestorage_proxy.cpp:390: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 0 TabletID: 72075186224037893 ClusterStateGeneration: 0 ClusterStateGuid: 0 CurrentLeader: [15:954:2283] CurrentLeaderTablet: [15:962:2288] CurrentGeneration: 1 CurrentStep: 0} 2025-06-25T15:10:04.518159Z node 14 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:610: Handle TEvInfo tabletId: 72075186224037893 entry.State: StInitResolve success: true ev: {EvInfo Status: 0 TabletID: 72075186224037893 Cookie: 0 CurrentLeader: [15:954:2283] CurrentLeaderTablet: [15:962:2288] CurrentGeneration: 1 CurrentStep: 0 Locked: false LockedFor: 0 Signature: { Size: 2 Signature: {{[14:24343667:0] : 3}, {[14:1099535971443:0] : 6}}}} 2025-06-25T15:10:04.518226Z node 14 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:354: ApplyEntry leader tabletId: 72075186224037893 followers: 0 2025-06-25T15:10:04.518304Z node 14 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 14 selfDC 1 leaderDC 2 1:2:0 local 0 localDc 0 other 1 disallowed 0 tabletId: 72075186224037893 followers: 0 countLeader 1 allowFollowers 0 winner: [15:954:2283] 2025-06-25T15:10:04.518448Z node 14 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:195: TClient[72075186224037893] forward result remote node 15 [14:1005:2575] 2025-06-25T15:10:04.518644Z node 14 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:229: TClient[72075186224037893] remote node connected [14:1005:2575] 2025-06-25T15:10:04.518708Z node 14 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[72075186224037893]::SendEvent [14:1005:2575] 2025-06-25T15:10:04.519051Z node 15 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:291: [72075186224037893] Accept Connect Originator# [14:1005:2575] 2025-06-25T15:10:04.519487Z node 14 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:310: TClient[72075186224037893] connected with status OK role: Leader [14:1005:2575] 2025-06-25T15:10:04.519548Z node 14 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:325: TClient[72075186224037893] send queued [14:1005:2575] 2025-06-25T15:10:04.520508Z node 14 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037927937] ::Bootstrap [14:1009:2577] 2025-06-25T15:10:04.520572Z node 14 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037927937] lookup [14:1009:2577] 2025-06-25T15:10:04.520667Z node 14 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72057594037927937 entry.State: StNormal ev: {EvForward TabletID: 72057594037927937 Ev: nullptr Flags: 1:2:0} 2025-06-25T15:10:04.520733Z node 14 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 14 selfDC 1 leaderDC 1 1:2:0 local 1 localDc 1 other 0 disallowed 0 tabletId: 72057594037927937 followers: 0 countLeader 1 allowFollowers 0 winner: [14:450:2264] 2025-06-25T15:10:04.520815Z node 14 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72057594037927937] queue send [14:1009:2577] 2025-06-25T15:10:04.520886Z node 14 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:411: TClient[72057594037927937] received pending shutdown [14:1009:2577] 2025-06-25T15:10:04.520946Z node 14 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:190: TClient[72057594037927937] forward result local node, try to connect [14:1009:2577] 2025-06-25T15:10:04.521001Z node 14 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[72057594037927937]::SendEvent [14:1009:2577] 2025-06-25T15:10:04.521105Z node 14 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:291: [72057594037927937] Accept Connect Originator# [14:1009:2577] 2025-06-25T15:10:04.521339Z node 14 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:310: TClient[72057594037927937] connected with status OK role: Leader [14:1009:2577] 2025-06-25T15:10:04.521390Z node 14 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:325: TClient[72057594037927937] send queued [14:1009:2577] 2025-06-25T15:10:04.521430Z node 14 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:629: TClient[72057594037927937] push event to server [14:1009:2577] 2025-06-25T15:10:04.521516Z node 14 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:332: TClient[72057594037927937] shutdown pipe due to pending shutdown request [14:1009:2577] 2025-06-25T15:10:04.521589Z node 14 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:505: TClient[72057594037927937] notify reset [14:1009:2577] 2025-06-25T15:10:04.521660Z node 14 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:141: [72057594037927937] HandleSend Sender# [14:1008:2576] EventType# 268697616 |94.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/dump_restore/py3test >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_index_4__SYNC-pk_types0-all_types0-index0---SYNC] [GOOD] |94.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/dump_restore/py3test >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_index_3__SYNC-pk_types1-all_types1-index1---SYNC] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_2__SYNC-pk_types7-all_types7-index7---SYNC] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_ttl_Uint32-pk_types14-all_types14-index14-Uint32--] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_20_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 20] [GOOD] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_21_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 21] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_4__SYNC-pk_types5-all_types5-index5---SYNC] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_12_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 12] [GOOD] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_13_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 13] >> alter_compression.py::TestAlterCompression::test_availability_data >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_6_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 6] [GOOD] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_7_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 7] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_15_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 15] [GOOD] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_16_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 16] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_10_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 10] [GOOD] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_11_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 11] >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_all_types-pk_types7-all_types7-index7---] [GOOD] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_3_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 3] [GOOD] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_9_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 9] [GOOD] >> test_dispatch.py::TestMapping::test_idle [GOOD] |94.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/dump_restore/py3test >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_all_types-pk_types7-all_types7-index7---] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_2_UNIQUE_SYNC-pk_types2-all_types2-index2--UNIQUE-SYNC] |94.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/olap/column_family/compression/py3test >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_3_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 3] [GOOD] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_5_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 5] [GOOD] |94.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/fq/multi_plane/py3test >> test_dispatch.py::TestMapping::test_idle [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_ttl_Date-pk_types18-all_types18-index18-Date--] |94.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/olap/column_family/compression/py3test >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_9_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 9] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_UUID-pk_types31-all_types31-index31---] |94.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/olap/column_family/compression/py3test >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_5_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 5] [GOOD] >> test_retry.py::TestRetry::test_low_rate[kikimr0] [GOOD] |94.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_ttl_DyNumber-pk_types13-all_types13-index13-DyNumber--] |94.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/partitioning/py3test |94.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/fq/multi_plane/py3test >> test_retry.py::TestRetry::test_low_rate[kikimr0] [GOOD] |94.8%| [TA] $(B)/ydb/tests/fq/multi_plane/test-results/py3test/{meta.json ... results_accumulator.log} |94.8%| [TA] {RESULT} $(B)/ydb/tests/fq/multi_plane/test-results/py3test/{meta.json ... results_accumulator.log} >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_7_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 7] [GOOD] >> DataShardTxOrder::RandomPoints_DelayData |94.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/partitioning/py3test >> DataShardTxOrder::ZigZag_oo8_dirty >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_13_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 13] [GOOD] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_14_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 14] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_19_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 19] [GOOD] |94.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/olap/column_family/compression/py3test >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_7_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 7] [GOOD] >> DataShardTxOrder::ZigZag_oo8_dirty [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_order/unittest >> DataShardTxOrder::ZigZag_oo8_dirty [GOOD] Test command err: 2025-06-25T15:11:16.587613Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:11:16.587691Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:11:16.589015Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:112:2142], Recipient [1:135:2156]: NKikimr::TEvTablet::TEvBoot 2025-06-25T15:11:16.600770Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:112:2142], Recipient [1:135:2156]: NKikimr::TEvTablet::TEvRestored 2025-06-25T15:11:16.601239Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 9437184 actor [1:135:2156] 2025-06-25T15:11:16.601501Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T15:11:16.611432Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:112:2142], Recipient [1:135:2156]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-25T15:11:16.656813Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T15:11:16.657054Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T15:11:16.658851Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 9437184 2025-06-25T15:11:16.658929Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 9437184 2025-06-25T15:11:16.658979Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 9437184 2025-06-25T15:11:16.659333Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T15:11:16.659425Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T15:11:16.659500Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 9437184 persisting started state actor id [1:204:2156] in generation 2 2025-06-25T15:11:16.724389Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T15:11:16.760626Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 9437184 2025-06-25T15:11:16.760857Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 9437184 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T15:11:16.760985Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 9437184, actorId: [1:219:2215] 2025-06-25T15:11:16.761049Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 9437184 2025-06-25T15:11:16.761089Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 9437184, state: WaitScheme 2025-06-25T15:11:16.761124Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:11:16.761370Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:135:2156], Recipient [1:135:2156]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T15:11:16.761434Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T15:11:16.761730Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 9437184 2025-06-25T15:11:16.761870Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 9437184 2025-06-25T15:11:16.761943Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-25T15:11:16.761983Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:11:16.762031Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 9437184 2025-06-25T15:11:16.762064Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437184 has no attached operations 2025-06-25T15:11:16.762117Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 9437184 2025-06-25T15:11:16.762151Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 9437184 TxInFly 0 2025-06-25T15:11:16.762193Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-25T15:11:16.762295Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:215:2212], Recipient [1:135:2156]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T15:11:16.762340Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T15:11:16.762394Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:213:2211], serverId# [1:215:2212], sessionId# [0:0:0] 2025-06-25T15:11:16.765306Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:103:2136], Recipient [1:135:2156]: NKikimrTxDataShard.TEvProposeTransaction TxKind: TX_KIND_SCHEME SourceDeprecated { RawX1: 103 RawX2: 4294969432 } TxBody: "\nI\n\006table1\020\r\032\t\n\003key\030\002 \"\032\014\n\005value\030\200$ 8\032\n\n\004uint\030\002 9(\":\010Z\006\010\010\030\001(\001J\014/Root/table1" TxId: 1 ExecLevel: 0 Flags: 0 SchemeShardId: 4200 ProcessingParams { } 2025-06-25T15:11:16.765374Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-25T15:11:16.765451Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-25T15:11:16.765633Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit CheckSchemeTx 2025-06-25T15:11:16.765701Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 9437184 txId 1 ssId 4200 seqNo 0:0 2025-06-25T15:11:16.765761Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 1 at tablet 9437184 2025-06-25T15:11:16.765814Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is ExecutedNoMoreRestarts 2025-06-25T15:11:16.765859Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit CheckSchemeTx 2025-06-25T15:11:16.765897Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit StoreSchemeTx 2025-06-25T15:11:16.765933Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit StoreSchemeTx 2025-06-25T15:11:16.766283Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayCompleteNoMoreRestarts 2025-06-25T15:11:16.766321Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit StoreSchemeTx 2025-06-25T15:11:16.766369Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit FinishPropose 2025-06-25T15:11:16.766408Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit FinishPropose 2025-06-25T15:11:16.766488Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayComplete 2025-06-25T15:11:16.766524Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit FinishPropose 2025-06-25T15:11:16.766562Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit WaitForPlan 2025-06-25T15:11:16.766594Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit WaitForPlan 2025-06-25T15:11:16.766623Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:1] at 9437184 is not ready to execute on unit WaitForPlan 2025-06-25T15:11:16.778907Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 9437184 2025-06-25T15:11:16.778981Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit StoreSchemeTx 2025-06-25T15:11:16.779016Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit FinishPropose 2025-06-25T15:11:16.779070Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 1 at tablet 9437184 send to client, exec latency: 0 ms, propose latency: 1 ms, status: PREPARED 2025-06-25T15:11:16.779132Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 9437184 not sending time cast registration request in state WaitScheme 2025-06-25T15:11:16.779647Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:225:2221], Recipient [1:135:2156]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T15:11:16.779695Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T15:11:16.779742Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:224:2220], serverId# [1:225:2221], sessionId# [0:0:0] 2025-06-25T15:11:16.779896Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287424, Sender [1:103:2136], Recipient [1:135:2156]: {TEvPlanStep step# 1000001 MediatorId# 0 TabletID 9437184} 2025-06-25T15:11:16.779931Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3150: StateWork, processing event TEvTxProcessing::TEvPlanStep 2025-06-25T15:11:16.780097Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1790: Trying to execute [1000001:1] at 9437184 on unit WaitForPlan 2025-06-25T15:11:16.780144Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1805: Execution status for [1000001:1] at 9437184 is Executed 2025-06-25T15:11:16.780182Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000001:1] at 9437184 executing on unit WaitForPlan 2025-06-25T15:11:16.780216Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000001:1] at 9437184 to execution unit PlanQueue 2025-06-25T15:11:16.783861Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 1 at step 1000001 at tablet 9437184 { Transactions { TxId: 1 AckTo { RawX1: 103 RawX2: 4294969432 } } Step: 1000001 MediatorID: 0 TabletID: 9437184 } 2025-06-25T15:11:16.783927Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:11:16.784133Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:135:2156], Recipient [1:135:2156]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T15:11:16.784181Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T15:11:16.784238Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-25T15:11:16.784276Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 1 2025-06-25T15:11:16.784328Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437184 2025-06-25T15:11:16.784374Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000001:1] in PlanQueue unit at 9437184 2025-06-25T15:11:16.784409Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [100000 ... 1000016:45] at 9437185 is Executed 2025-06-25T15:11:23.230521Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000016:45] at 9437185 executing on unit WaitForPlan 2025-06-25T15:11:23.230580Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000016:45] at 9437185 to execution unit PlanQueue 2025-06-25T15:11:23.230740Z node 2 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 45 at step 1000016 at tablet 9437185 { Transactions { TxId: 45 AckTo { RawX1: 103 RawX2: 8589936728 } } Step: 1000016 MediatorID: 0 TabletID: 9437185 } 2025-06-25T15:11:23.230789Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437185 2025-06-25T15:11:23.231003Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [2:351:2316], Recipient [2:351:2316]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T15:11:23.231033Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T15:11:23.231072Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437185 2025-06-25T15:11:23.231100Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437185 active 0 active planned 0 immediate 0 planned 1 2025-06-25T15:11:23.231123Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437185 2025-06-25T15:11:23.231165Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000016:45] in PlanQueue unit at 9437185 2025-06-25T15:11:23.231198Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000016:45] at 9437185 on unit PlanQueue 2025-06-25T15:11:23.231223Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000016:45] at 9437185 is Executed 2025-06-25T15:11:23.231246Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000016:45] at 9437185 executing on unit PlanQueue 2025-06-25T15:11:23.231288Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000016:45] at 9437185 to execution unit LoadTxDetails 2025-06-25T15:11:23.231310Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000016:45] at 9437185 on unit LoadTxDetails 2025-06-25T15:11:23.236931Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 9437185 loaded tx from db 1000016:45 keys extracted: 2 2025-06-25T15:11:23.236988Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000016:45] at 9437185 is Executed 2025-06-25T15:11:23.237018Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000016:45] at 9437185 executing on unit LoadTxDetails 2025-06-25T15:11:23.237045Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000016:45] at 9437185 to execution unit FinalizeDataTxPlan 2025-06-25T15:11:23.237075Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000016:45] at 9437185 on unit FinalizeDataTxPlan 2025-06-25T15:11:23.237120Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000016:45] at 9437185 is Executed 2025-06-25T15:11:23.237142Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000016:45] at 9437185 executing on unit FinalizeDataTxPlan 2025-06-25T15:11:23.237174Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000016:45] at 9437185 to execution unit BuildAndWaitDependencies 2025-06-25T15:11:23.237195Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000016:45] at 9437185 on unit BuildAndWaitDependencies 2025-06-25T15:11:23.237245Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:455: Operation [1000016:45] is the new logically complete end at 9437185 2025-06-25T15:11:23.237270Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:461: Operation [1000016:45] is the new logically incomplete end at 9437185 2025-06-25T15:11:23.237306Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [1000016:45] at 9437185 2025-06-25T15:11:23.237342Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000016:45] at 9437185 is Executed 2025-06-25T15:11:23.237366Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000016:45] at 9437185 executing on unit BuildAndWaitDependencies 2025-06-25T15:11:23.237386Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000016:45] at 9437185 to execution unit BuildDataTxOutRS 2025-06-25T15:11:23.237413Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000016:45] at 9437185 on unit BuildDataTxOutRS 2025-06-25T15:11:23.237466Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000016:45] at 9437185 is Executed 2025-06-25T15:11:23.237508Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000016:45] at 9437185 executing on unit BuildDataTxOutRS 2025-06-25T15:11:23.237533Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000016:45] at 9437185 to execution unit StoreAndSendOutRS 2025-06-25T15:11:23.237553Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000016:45] at 9437185 on unit StoreAndSendOutRS 2025-06-25T15:11:23.237588Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000016:45] at 9437185 is Executed 2025-06-25T15:11:23.237608Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000016:45] at 9437185 executing on unit StoreAndSendOutRS 2025-06-25T15:11:23.237627Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000016:45] at 9437185 to execution unit PrepareDataTxInRS 2025-06-25T15:11:23.237646Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000016:45] at 9437185 on unit PrepareDataTxInRS 2025-06-25T15:11:23.237670Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000016:45] at 9437185 is Executed 2025-06-25T15:11:23.237688Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000016:45] at 9437185 executing on unit PrepareDataTxInRS 2025-06-25T15:11:23.237706Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000016:45] at 9437185 to execution unit LoadAndWaitInRS 2025-06-25T15:11:23.237745Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000016:45] at 9437185 on unit LoadAndWaitInRS 2025-06-25T15:11:23.237777Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000016:45] at 9437185 is Executed 2025-06-25T15:11:23.237797Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000016:45] at 9437185 executing on unit LoadAndWaitInRS 2025-06-25T15:11:23.237819Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000016:45] at 9437185 to execution unit ExecuteDataTx 2025-06-25T15:11:23.237839Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000016:45] at 9437185 on unit ExecuteDataTx 2025-06-25T15:11:23.238238Z node 2 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:306: Executed operation [1000016:45] at tablet 9437185 with status COMPLETE 2025-06-25T15:11:23.238288Z node 2 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:312: Datashard execution counters for [1000016:45] at 9437185: {NSelectRow: 2, NSelectRange: 0, NUpdateRow: 0, NEraseRow: 0, SelectRowRows: 2, SelectRowBytes: 16, SelectRangeRows: 0, SelectRangeBytes: 0, UpdateRowBytes: 0, EraseRowBytes: 0, SelectRangeDeletedRowSkips: 0, InvisibleRowSkips: 0} 2025-06-25T15:11:23.238331Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000016:45] at 9437185 is Executed 2025-06-25T15:11:23.238351Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000016:45] at 9437185 executing on unit ExecuteDataTx 2025-06-25T15:11:23.238371Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000016:45] at 9437185 to execution unit CompleteOperation 2025-06-25T15:11:23.238399Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000016:45] at 9437185 on unit CompleteOperation 2025-06-25T15:11:23.238609Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000016:45] at 9437185 is DelayComplete 2025-06-25T15:11:23.238635Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000016:45] at 9437185 executing on unit CompleteOperation 2025-06-25T15:11:23.238657Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000016:45] at 9437185 to execution unit CompletedOperations 2025-06-25T15:11:23.238695Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000016:45] at 9437185 on unit CompletedOperations 2025-06-25T15:11:23.238722Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000016:45] at 9437185 is Executed 2025-06-25T15:11:23.238756Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000016:45] at 9437185 executing on unit CompletedOperations 2025-06-25T15:11:23.238779Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [1000016:45] at 9437185 has finished 2025-06-25T15:11:23.238817Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437185 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:11:23.238841Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437185 2025-06-25T15:11:23.238865Z node 2 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437185 has no attached operations 2025-06-25T15:11:23.238900Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 9437185 2025-06-25T15:11:23.256853Z node 2 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:91: Sending '{TEvPlanStepAck TabletId# 9437185 step# 1000016 txid# 45} 2025-06-25T15:11:23.256927Z node 2 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 9437185 step# 1000016} 2025-06-25T15:11:23.256985Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437185 2025-06-25T15:11:23.257052Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000016:45] at 9437185 on unit CompleteOperation 2025-06-25T15:11:23.257128Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000016 : 45] from 9437185 at tablet 9437185 send result to client [2:103:2136], exec latency: 0 ms, propose latency: 1 ms 2025-06-25T15:11:23.257184Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437185 2025-06-25T15:11:23.258440Z node 2 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:91: Sending '{TEvPlanStepAck TabletId# 9437184 step# 1000016 txid# 45} 2025-06-25T15:11:23.258487Z node 2 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 9437184 step# 1000016} 2025-06-25T15:11:23.258526Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-25T15:11:23.258553Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000016:45] at 9437184 on unit CompleteOperation 2025-06-25T15:11:23.258593Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000016 : 45] from 9437184 at tablet 9437184 send result to client [2:103:2136], exec latency: 0 ms, propose latency: 2 ms 2025-06-25T15:11:23.258624Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 >> THiveTest::TestCreateSubHiveCreateManyTabletsWithReboots [GOOD] >> THiveTest::TestCheckSubHiveMigrationWithReboots >> test_partitioning.py::TestPartitionong::test_partition_at_keys[table_Utf8-pk_types7-all_types7-index7] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_21_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 21] [GOOD] >> test_partitioning.py::TestPartitionong::test_partition_at_keys[table_Int64-pk_types0-all_types0-index0] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_11_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 11] [GOOD] |94.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/olap/column_family/compression/py3test >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_21_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 21] [GOOD] |94.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/olap/column_family/compression/py3test >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_19_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 19] [GOOD] >> alter_compression.py::TestAlterCompression::test_availability_data [GOOD] |94.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/partitioning/py3test >> test_partitioning.py::TestPartitionong::test_partition_at_keys[table_Uint64-pk_types3-all_types3-index3] |94.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/olap/column_family/compression/py3test >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_11_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 11] [GOOD] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_16_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 16] [GOOD] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_17_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 17] |94.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/partitioning/py3test |94.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/olap/column_family/compression/py3test >> alter_compression.py::TestAlterCompression::test_availability_data [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_ttl_Timestamp-pk_types17-all_types17-index17-Timestamp--] [GOOD] |94.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/partitioning/py3test >> test_partitioning.py::TestPartitionong::test_partition_at_keys[table_Int8-pk_types2-all_types2-index2] |94.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/partitioning/py3test >> test_partitioning.py::TestPartitionong::test_partition_at_keys[table_Uint32-pk_types4-all_types4-index4] >> test_partitioning.py::TestPartitionong::test_uniform_partitiona[table_ttl_Date-pk_types0-all_types0-index0] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Decimal150-pk_types25-all_types25-index25---] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_read_iterator/unittest >> ReadIteratorExternalBlobs::ExtBlobsWithCompactingMiddleRows 2025-06-25 15:11:55,044 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper execution timed out 2025-06-25 15:11:55,279 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper has overrun 600 secs timeout. Process tree before termination: pid rss ref pdirt 741275 47.5M 44.8M 24.0M test_tool run_ut @/home/runner/.ya/build/build_root/yft8/000ded/ydb/core/tx/datashard/ut_read_iterator/test-results/unittest/testing_out_stuff/chunk9/testing_out_stuff/test_to 741837 2.0G 2.0G 1.6G └─ ydb-core-tx-datashard-ut_read_iterator --trace-path-append /home/runner/.ya/build/build_root/yft8/000ded/ydb/core/tx/datashard/ut_read_iterator/test-results/unittest/tes Test command err: 2025-06-25T15:02:01.877772Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T15:02:01.877930Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T15:02:01.878072Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000ded/r3tmp/tmpBgIOEr/pdisk_1.dat 2025-06-25T15:02:02.203088Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T15:02:02.211500Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:02:02.259347Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:02:02.264879Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750863719121057 != 1750863719121061 2025-06-25T15:02:02.312472Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:02:02.312603Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:02:02.325483Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:02:02.419576Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:02:02.472162Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvBoot 2025-06-25T15:02:02.476671Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvRestored 2025-06-25T15:02:02.477105Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:627:2531] 2025-06-25T15:02:02.477336Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T15:02:02.523966Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-25T15:02:02.524679Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T15:02:02.524798Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T15:02:02.526398Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-25T15:02:02.526468Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-25T15:02:02.526523Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-25T15:02:02.526873Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T15:02:02.527003Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T15:02:02.527077Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:643:2531] in generation 1 2025-06-25T15:02:02.540821Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T15:02:02.565787Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-25T15:02:02.565942Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T15:02:02.566019Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:645:2541] 2025-06-25T15:02:02.566048Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T15:02:02.566095Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-25T15:02:02.566133Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T15:02:02.566304Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:627:2531], Recipient [1:627:2531]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T15:02:02.566342Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T15:02:02.566629Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-25T15:02:02.566701Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-25T15:02:02.566747Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T15:02:02.566813Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:02:02.566848Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-25T15:02:02.566888Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-25T15:02:02.566923Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-25T15:02:02.566950Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-25T15:02:02.566991Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T15:02:02.567341Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:634:2535], Recipient [1:627:2531]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T15:02:02.567417Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T15:02:02.567455Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:623:2528], serverId# [1:634:2535], sessionId# [0:0:0] 2025-06-25T15:02:02.567533Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:373:2367], Recipient [1:634:2535] 2025-06-25T15:02:02.567567Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-25T15:02:02.567668Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T15:02:02.567849Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-25T15:02:02.567903Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-25T15:02:02.567977Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-25T15:02:02.568017Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-25T15:02:02.568052Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-06-25T15:02:02.568081Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-06-25T15:02:02.568111Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-25T15:02:02.568390Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-06-25T15:02:02.568433Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit StoreSchemeTx 2025-06-25T15:02:02.568464Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit FinishPropose 2025-06-25T15:02:02.568496Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-25T15:02:02.568555Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayComplete 2025-06-25T15:02:02.568584Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit FinishPropose 2025-06-25T15:02:02.568614Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit WaitForPlan 2025-06-25T15:02:02.568641Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit WaitForPlan 2025-06-25T15:02:02.568665Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:281474976715657] at 72075186224037888 is not ready to execute on unit WaitForPlan 2025-06-25T15:02:02.570000Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269746185, Sender [1:646:2542], Recipient [1:627:2531]: NKikimr::TEvTxProxySchemeCache::TEvWatchNotifyUpdated 2025-06-25T15:02:02.570053Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T15:02:02.580742Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-25T15:02:02.580803Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-25T15:02:02.580834Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-25T15:02:02.580870Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715657 at tablet 72075186224037888 send to client, exec latency: 0 ms, propose late ... 6 } ShardState: 2 UserTablePartOwners: 72075186224037888 NodeId: 13 StartTime: 450 TableOwnerId: 72057594046644480 FollowerId: 0 2025-06-25T15:10:36.922164Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715678. Ctx: { TraceId: 01jykt80162ckg0ycfqn7959ez, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=NTM0NTczMy01MjVhMGE1Yi00ZDRiM2ZkLTg4OGI5ZDQz, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:10:41.730623Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715679. Ctx: { TraceId: 01jykt84ek46sfhskmdd459brd, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=YzRiMThjYTctMzU3MzA1YzMtMzAyNWRhMzAtM2JlY2U1YTc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:10:42.109490Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715680. Ctx: { TraceId: 01jykt8957e0rgpa5sp6q760b0, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=OTVmY2Y2ODQtMmUxNWExMjktMzdjMWI1NDgtOTdlNWMyYjQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:10:48.517720Z node 14 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [14:108:2154], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T15:10:48.518056Z node 14 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T15:10:48.518321Z node 14 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000ded/r3tmp/tmpUgLzwm/pdisk_1.dat 2025-06-25T15:10:48.872042Z node 14 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 14 Type# 268639257 2025-06-25T15:10:48.874186Z node 14 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:10:48.910253Z node 14 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:10:48.912663Z node 14 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [14:33:2080] 1750864243518698 != 1750864243518702 2025-06-25T15:10:48.960523Z node 14 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(14, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:10:48.960673Z node 14 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(14, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:10:48.972545Z node 14 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(14, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:10:49.056002Z node 14 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:10:49.401195Z node 14 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [14:699:2580], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:10:49.401317Z node 14 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [14:710:2585], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:10:49.401398Z node 14 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:10:49.407544Z node 14 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:10:49.462612Z node 14 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:10:49.596417Z node 14 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [14:713:2588], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T15:10:49.632344Z node 14 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [14:784:2628] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:10:54.803222Z node 14 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715660. Ctx: { TraceId: 01jykt8ghq4nrhcahqwnc1zzeg, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=14&id=OGUyZGJkMjUtMzFjNDM5NmMtMzQyOTk0YS03NzIwMmQ5OA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:10:59.549081Z node 14 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jykt8nx9espzmpzx67hhmgg3, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=14&id=M2M4MTcwOTMtNDU2NjYxYzUtNWM5ZmJiYzYtZTQ5ODdhNWM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:11:04.550158Z node 14 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715662. Ctx: { TraceId: 01jykt8thw96xa6t15bdxn9cm9, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=14&id=NjZlY2U2OGYtMzY2M2QwNDAtOTRmY2UxMDgtYWU0NWZlOWU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:11:09.313267Z node 14 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715663. Ctx: { TraceId: 01jykt8ze3fn02273fnj3nvjdp, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=14&id=YWU3NTllMTMtOGYwYmM3ZDEtNGJkNTllNjktNTc0NmIwNmY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:11:14.090550Z node 14 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715664. Ctx: { TraceId: 01jykt942w2ykqxgxdamhea3k2, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=14&id=M2FjNDc5NWUtZmE0MmQ3ZjctMWJlMDc4ZjItMTBlNGFjZDA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:11:18.811672Z node 14 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715665. Ctx: { TraceId: 01jykt98p49qq3damap0cksjnp, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=14&id=ZTIxYThkOGEtMTE0NzMxZDQtYzBiNjM0ZC1lZTNlM2UxMQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:11:23.677823Z node 14 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715666. Ctx: { TraceId: 01jykt9dbj7s6m7k0k7jt4wwzm, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=14&id=OGY2Y2UyNDctODZlMzBiMzUtZjUwMDM1ZGItNDg1MTkzOWU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:11:28.285493Z node 14 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715667. Ctx: { TraceId: 01jykt9j3ybxbtv9z88ksekwx2, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=14&id=ZWUzNGZiZTItOWJhNzlmN2QtODA0NTZjY2ItZDc5ZDEwNDI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:11:33.008465Z node 14 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715668. Ctx: { TraceId: 01jykt9pkrdbgab65rgbdetgra, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=14&id=YTYxZjYyNjgtODFiYTE2Zi05MDA1ZjUtNjY0NjA4MTU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:11:37.503237Z node 14 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715669. Ctx: { TraceId: 01jykt9v5878wcc4m586d9vs95, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=14&id=Yjc3M2U3NDQtNDM4NjcxOTktZDI4YmM3Ni1lMDFjYzVkNA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:11:42.070702Z node 14 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715670. Ctx: { TraceId: 01jykt9zkqapqrx4w47tynb90v, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=14&id=OTNjZTEyOTgtZTg2MWY3MzktOGU2ZjgwNDMtNDdjMGRiMjA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:11:46.636843Z node 14 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715671. Ctx: { TraceId: 01jykta42b5tj6az9eyjsqp8wg, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=14&id=OGRmOTEwN2EtZGIzNmVjZTktNzA2OGIzM2YtNzhmM2JmM2U=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:11:51.178483Z node 14 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715672. Ctx: { TraceId: 01jykta8f4282rm1xjfwesgs20, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=14&id=YTQ5Njg0NTUtYzcyMjA3MmYtZTQ2OWNhMDQtZjFmZmU5MTM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root Traceback (most recent call last): File "library/python/testing/yatest_common/yatest/common/process.py", line 384, in wait wait_for( File "library/python/testing/yatest_common/yatest/common/process.py", line 765, in wait_for raise TimeoutError(truncate(message, MAX_MESSAGE_LEN)) yatest.common.process.TimeoutError: 600 second(s) wait timeout has expired: Command '['/home/runner/.ya/tools/v4/9029509511/test_tool', 'run_ut', '@/home/runner/.ya/build/build_root/yft8/000ded/ydb/core/tx/datashard/ut_read_iterator/test-results/unittest/testing_out_stuff/chunk9/testing_out_stuff/test_tool.args']' stopped by 600 seconds timeout During handling of the above exception, another exception occurred: Traceback (most recent call last): File "devtools/ya/test/programs/test_tool/run_test/run_test.py", line 1738, in main res.wait(check_exit_code=False, timeout=run_timeout, on_timeout=timeout_callback) File "library/python/testing/yatest_common/yatest/common/process.py", line 398, in wait raise ExecutionTimeoutError(self, str(e)) yatest.common.process.ExecutionTimeoutError: (("600 second(s) wait timeout has expired: Command '['/home/runner/.ya/tools/v4/9029509511/test_tool', 'run_ut', '@/home/runner/.ya/build/build_root/yft8/000ded/ydb/core/tx/datashard/ut_read_iterator/test-results/unittest/testing_out_stuff/chunk9/testing_out_stuff/test_tool.args']' stopped by 600 seconds timeout",), {}) |94.9%| [TA] $(B)/ydb/core/tx/datashard/ut_read_iterator/test-results/unittest/{meta.json ... results_accumulator.log} |94.9%| [TA] {RESULT} $(B)/ydb/core/tx/datashard/ut_read_iterator/test-results/unittest/{meta.json ... results_accumulator.log} >> test_partitioning.py::TestPartitionong::test_uniform_partitiona[table_ttl_Date-pk_types1-all_types1-index1] |94.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_ttl_Timestamp-pk_types17-all_types17-index17-Timestamp--] [GOOD] |94.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Decimal150-pk_types25-all_types25-index25---] [GOOD] |94.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/partitioning/py3test >> test_partitioning.py::TestPartitionong::test_partition_at_keys[table_Int32-pk_types1-all_types1-index1] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Timestamp-pk_types34-all_types34-index34---] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Uint8-pk_types24-all_types24-index24---] [GOOD] |94.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/partitioning/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_1__SYNC-pk_types8-all_types8-index8---SYNC] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Date-pk_types32-all_types32-index32---] [GOOD] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_14_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 14] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_String-pk_types29-all_types29-index29---] [GOOD] >> test_partitioning.py::TestPartitionong::test_partition_at_keys[table_String-pk_types6-all_types6-index6] |94.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Timestamp-pk_types34-all_types34-index34---] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Date32-pk_types36-all_types36-index36---] [GOOD] >> THiveTest::TestCheckSubHiveMigrationWithReboots [GOOD] >> THiveTest::TestCreateAndDeleteTabletWithStoragePoolsReboots |94.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Uint8-pk_types24-all_types24-index24---] [GOOD] >> test_partitioning.py::TestPartitionong::test_partition_at_keys[table_Uint8-pk_types5-all_types5-index5] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Utf8-pk_types30-all_types30-index30---] [GOOD] >> THiveTest::TestCreateAndDeleteTabletWithStoragePoolsReboots [GOOD] >> THiveTest::TestCreateAndDeleteTabletWithStoragePools |94.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/partitioning/py3test |94.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_String-pk_types29-all_types29-index29---] [GOOD] >> THiveTest::TestCreateAndDeleteTabletWithStoragePools [GOOD] >> THiveTest::TestCreateAndReassignTabletWithStoragePools >> THiveTest::TestCreateAndReassignTabletWithStoragePools [GOOD] >> THiveTest::TestCreateAndReassignTabletWhileStarting |94.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_1__SYNC-pk_types8-all_types8-index8---SYNC] [GOOD] >> THiveTest::TestCreateAndReassignTabletWhileStarting [GOOD] >> THiveTest::TestCreateTabletAndReassignGroups |94.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Date-pk_types32-all_types32-index32---] [GOOD] |94.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/partitioning/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_3__SYNC-pk_types6-all_types6-index6---SYNC] [GOOD] >> THiveTest::TestCreateTabletAndReassignGroups [GOOD] >> THiveTest::TestCreateTabletAndReassignGroups3 |94.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/partitioning/py3test >> THiveTest::TestCreateTabletAndReassignGroups3 [GOOD] >> THiveTest::TestCreateTabletAndMixedReassignGroups3 |94.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/partitioning/py3test >> THiveTest::TestCreateTabletAndMixedReassignGroups3 [GOOD] >> THiveTest::TestCreateExternalTablet >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_1_UNIQUE_SYNC-pk_types3-all_types3-index3--UNIQUE-SYNC] [GOOD] |95.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Date32-pk_types36-all_types36-index36---] [GOOD] >> THiveTest::TestCreateExternalTablet [GOOD] |95.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/olap/column_family/compression/py3test >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_14_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 14] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Datetime64-pk_types37-all_types37-index37---] [GOOD] |95.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/olap/delete/py3test |95.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/olap/delete/py3test |95.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/olap/delete/py3test >> test_partitioning.py::TestPartitionong::test_partition_at_keys[table_Utf8-pk_types7-all_types7-index7] [GOOD] |95.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Utf8-pk_types30-all_types30-index30---] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Int64-pk_types19-all_types19-index19---] [GOOD] |95.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/olap/delete/py3test >> test_partitioning.py::TestPartitionong::test_partition_at_keys[table_Int64-pk_types0-all_types0-index0] [GOOD] |95.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_3__SYNC-pk_types6-all_types6-index6---SYNC] [GOOD] |95.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/olap/delete/py3test >> test_delete_all_after_inserts.py::TestDeleteAllAfterInserts::test_delete_all_rows_after_several_inserts [SKIPPED] >> test_delete_by_explicit_row_id.py::TestDeleteByExplicitRowId::test_delete_row_by_explicit_row_id |95.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/olap/delete/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Interval-pk_types35-all_types35-index35---] [GOOD] |95.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/olap/delete/py3test >> test_delete_all_after_inserts.py::TestDeleteAllAfterInserts::test_delete_all_rows_after_several_inserts [SKIPPED] |95.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/olap/delete/py3test ------- [TM] {asan, default-linux-x86_64, release} ydb/core/mind/hive/ut/unittest >> THiveTest::TestCreateExternalTablet [GOOD] Test command err: 2025-06-25T15:06:20.429896Z node 1 :BS_NODE DEBUG: {NW26@node_warden_impl.cpp:328} Bootstrap 2025-06-25T15:06:20.455707Z node 1 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# false Origin# initial ServiceSet# {PDisks { NodeID: 1 PDiskID: 1 Path: "SectorMap:0:3200" PDiskGuid: 1 } VDisks { VDiskID { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } Groups { GroupID: 0 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } } } AvailabilityDomains: 0 } 2025-06-25T15:06:20.455963Z node 1 :BS_NODE DEBUG: {NW04@node_warden_pdisk.cpp:196} StartLocalPDisk NodeId# 1 PDiskId# 1 Path# "SectorMap:0:3200" PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} Temporary# false 2025-06-25T15:06:20.457799Z node 1 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:106} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2025-06-25T15:06:20.458129Z node 1 :BS_NODE DEBUG: {NW23@node_warden_vdisk.cpp:67} StartLocalVDiskActor SlayInFlight# false VDiskId# [0:1:0:0:0] VSlotId# 1:1:0 PDiskGuid# 1 DonorMode# false PDiskRestartInFlight# false PDisksWaitingToStart# false 2025-06-25T15:06:20.459164Z node 1 :BS_NODE DEBUG: {NW24@node_warden_vdisk.cpp:267} StartLocalVDiskActor done VDiskId# [0:1:0:0:0] VSlotId# 1:1:0 PDiskGuid# 1 2025-06-25T15:06:20.459210Z node 1 :BS_NODE DEBUG: {NW12@node_warden_proxy.cpp:24} StartLocalProxy GroupId# 0 2025-06-25T15:06:20.460074Z node 1 :BS_NODE DEBUG: {NW21@node_warden_pipe.cpp:23} EstablishPipe AvailDomainId# 0 PipeClientId# [1:30:2076] ControllerId# 72057594037932033 2025-06-25T15:06:20.460109Z node 1 :BS_NODE DEBUG: {NW20@node_warden_pipe.cpp:72} SendRegisterNode 2025-06-25T15:06:20.460203Z node 1 :BS_NODE DEBUG: {NW11@node_warden_impl.cpp:303} StartInvalidGroupProxy GroupId# 4294967295 2025-06-25T15:06:20.460327Z node 1 :BS_NODE DEBUG: {NW62@node_warden_impl.cpp:315} StartRequestReportingThrottler 2025-06-25T15:06:20.471832Z node 1 :BS_PROXY INFO: dsproxy_state.cpp:157: Group# 0 TEvConfigureProxy received GroupGeneration# 1 IsLimitedKeyless# false Marker# DSP02 2025-06-25T15:06:20.471879Z node 1 :BS_PROXY NOTICE: dsproxy_state.cpp:305: EnsureMonitoring Group# 0 IsLimitedKeyless# 0 fullIfPossible# 0 Marker# DSP58 2025-06-25T15:06:20.473541Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:29:2075] Create Queue# [1:38:2081] targetNodeId# 1 Marker# DSP01 2025-06-25T15:06:20.473643Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:29:2075] Create Queue# [1:39:2082] targetNodeId# 1 Marker# DSP01 2025-06-25T15:06:20.473733Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:29:2075] Create Queue# [1:40:2083] targetNodeId# 1 Marker# DSP01 2025-06-25T15:06:20.473823Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:29:2075] Create Queue# [1:41:2084] targetNodeId# 1 Marker# DSP01 2025-06-25T15:06:20.473903Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:29:2075] Create Queue# [1:42:2085] targetNodeId# 1 Marker# DSP01 2025-06-25T15:06:20.474003Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:29:2075] Create Queue# [1:43:2086] targetNodeId# 1 Marker# DSP01 2025-06-25T15:06:20.474079Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:29:2075] Create Queue# [1:44:2087] targetNodeId# 1 Marker# DSP01 2025-06-25T15:06:20.474097Z node 1 :BS_PROXY INFO: dsproxy_state.cpp:31: Group# 0 SetStateEstablishingSessions Marker# DSP03 2025-06-25T15:06:20.474146Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037932033] ::Bootstrap [1:30:2076] 2025-06-25T15:06:20.474192Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037932033] lookup [1:30:2076] 2025-06-25T15:06:20.474221Z node 1 :BS_PROXY NOTICE: dsproxy_state.cpp:245: Group# 4294967295 HasInvalidGroupId# 1 Bootstrap -> StateEjected Marker# DSP42 2025-06-25T15:06:20.474254Z node 1 :BS_NODE DEBUG: {NWDC00@distconf.cpp:20} Bootstrap 2025-06-25T15:06:20.474645Z node 1 :BS_NODE DEBUG: {NWDC40@distconf_persistent_storage.cpp:25} TReaderActor bootstrap Paths# [] 2025-06-25T15:06:20.474789Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72057594037932033 entry.State: StInit ev: {EvForward TabletID: 72057594037932033 Ev: nullptr Flags: 1:2:0} 2025-06-25T15:06:20.483487Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72057594037932033] queue send [1:30:2076] 2025-06-25T15:06:20.483550Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 131082 StorageConfigLoaded# false NodeListObtained# false PendingEvents.size# 0 2025-06-25T15:06:20.483580Z node 1 :BS_NODE DEBUG: {NWDC11@distconf_binding.cpp:6} TEvNodesInfo 2025-06-25T15:06:20.483651Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 2146435074 StorageConfigLoaded# false NodeListObtained# false PendingEvents.size# 0 2025-06-25T15:06:20.483686Z node 1 :BS_NODE DEBUG: {NWDC32@distconf_persistent_storage.cpp:221} TEvStorageConfigLoaded Cookie# 0 NumItemsRead# 0 2025-06-25T15:06:20.487989Z node 1 :BS_NODE DEBUG: {NWDC35@distconf_persistent_storage.cpp:184} PersistConfig Record# {} Drives# [] 2025-06-25T15:06:20.489452Z node 1 :BS_NODE DEBUG: {NWDC51@distconf_persistent_storage.cpp:103} TWriterActor bootstrap Drives# [] Record# {} 2025-06-25T15:06:20.489964Z node 1 :STATESTORAGE DEBUG: statestorage_proxy.cpp:281: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72057594037932033 Cookie: 0 ProxyOptions: SigNone} 2025-06-25T15:06:20.490178Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72057594037932033] queue send [1:30:2076] 2025-06-25T15:06:20.490228Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 268639258 StorageConfigLoaded# true NodeListObtained# false PendingEvents.size# 0 2025-06-25T15:06:20.490416Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 2146435075 StorageConfigLoaded# true NodeListObtained# false PendingEvents.size# 1 2025-06-25T15:06:20.490475Z node 1 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 0} 2025-06-25T15:06:20.490516Z node 1 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 1} 2025-06-25T15:06:20.490546Z node 1 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 2} 2025-06-25T15:06:20.490597Z node 1 :STATESTORAGE DEBUG: statestorage_proxy.cpp:390: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037932033 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-25T15:06:20.490719Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037936129] ::Bootstrap [1:34:2063] 2025-06-25T15:06:20.490757Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037936129] lookup [1:34:2063] 2025-06-25T15:06:20.491092Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 131082 StorageConfigLoaded# true NodeListObtained# false PendingEvents.size# 2 2025-06-25T15:06:20.491121Z node 1 :BS_NODE DEBUG: {NWDC11@distconf_binding.cpp:6} TEvNodesInfo 2025-06-25T15:06:20.491254Z node 1 :BS_NODE DEBUG: {NWDC18@distconf_binding.cpp:342} UpdateBound RefererNodeId# 1 NodeId# ::1:12001/1 Meta# {Fingerprint: "\363\365\\\016\336\205\240m2\241c\3010\003\261\342\227\n\267}" } 2025-06-25T15:06:20.491418Z node 1 :STATESTORAGE DEBUG: statestorage_proxy.cpp:390: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037932033 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-25T15:06:20.491516Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72057594037936129 entry.State: StInit ev: {EvForward TabletID: 72057594037936129 Ev: nullptr Flags: 1:2:0} 2025-06-25T15:06:20.491593Z node 1 :STATESTORAGE DEBUG: statestorage_proxy.cpp:390: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037932033 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-25T15:06:20.491776Z node 1 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# true Origin# distconf ServiceSet# {PDisks { NodeID: 1 PDiskID: 1 Path: "SectorMap:0:3200" PDiskGuid: 1 } VDisks { VDiskID { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } Groups { GroupID: 0 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } } } AvailabilityDomains: 0 } 2025-06-25T15:06:20.491925Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:610: Handle TEvInfo tabletId: 72057594037932033 entry.State: StInitResolve success: false ev: {EvInfo Status: 5 TabletID: 72057594037932033 Cookie: 0 CurrentLeader: [0:0:0] CurrentLeaderTablet: [0:0:0] CurrentGeneration: 0 CurrentStep: 0 Locked: false LockedFor: 0 Signature: { Size: 3 Signature: {{[1:24343667:0] : 2}, {[1:2199047599219:0] : 8}, {[1:1099535971443:0] : 5}}}} 2025-06-25T15:06:20.491980Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:361: DropEntry tabletId: 72057594037932033 followers: 0 2025-06-25T15:06:20.492090Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:172: TClient[72057594037932033] forward result error, check reconnect [1:30:2076] 2025-06-25T15:06:20.492123Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:562: TClient[72057594037932033] schedule retry [1:30:2076] 2025-06-25T15:06:20.492157Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 2146435072 StorageConfigLoaded# true NodeListObtained# true PendingEvents.size# 2 2025-06-25T15:06:20.492193Z node 1 :BS_NODE DEBUG: {NWDC15@distconf.cpp:345} StateFunc Type# 268639258 Sender# [1:12:2059] SessionId# [0:0:0] Cookie# 0 2025-06-25T15:06:20.496094Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 268639248 StorageConfigLoaded# true NodeListObtained# true PendingEvents.size# 1 2025-06-25T15:06:20.496142Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 2146435072 StorageConfigLoaded# true NodeListObtained# true PendingEvents.size# 2 2025-06-25T15:06:20.496173Z node 1 :BS_NODE DEBUG: {NWDC15@distconf.cpp:345} StateFunc Type# 2146435075 Sender# [1:48:2091] SessionId# [0:0:0] Cookie# 0 2025-06-25T15:06:20.496218Z node 1 :BS_NODE DEBUG: {NWDC36@distconf_persistent_storage.cpp:205} TEvStorageConfigStored NumOk# 0 NumError# 0 Passed# 0.012392s 2025-06-25T15:06:20.496534Z node 1 :STATESTORAGE DEBUG: statestorage_proxy.cpp:281: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72057594037936129 Cookie: 0 ProxyOptions: SigNone} 2025-06-25T15:06:20.497154Z node 1 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936129 Cookie: 0} 2025-06-25T15:06:20.497200Z node 1 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936129 Cookie: 1} 2025-06-25T15:06:20.497228Z node 1 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936129 Cookie: 2} 2025-06-25T15:06:20.497258Z node 1 :STATESTORAGE DEBUG: statestorage_proxy.cpp:390: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037936129 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-25T15:06:20.497340Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 2146435072 StorageConfigLoaded# true NodeListObtained# true PendingEvents.size# 1 2025-06-25T15:06:20.497369Z node 1 :BS_NODE DEBUG: {NWDC15@distconf.cpp:345} ... PE_SERVER DEBUG: tablet_pipe_server.cpp:141: [72057594037927937] HandleSend Sender# [149:268:2260] EventType# 268697601 2025-06-25T15:12:34.901195Z node 149 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:4} Tx{4, NKikimr::NHive::TTxCreateTablet} queued, type NKikimr::NHive::TTxCreateTablet 2025-06-25T15:12:34.901292Z node 149 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:4} Tx{4, NKikimr::NHive::TTxCreateTablet} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-25T15:12:34.902145Z node 149 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:4} Tx{4, NKikimr::NHive::TTxCreateTablet} hope 1 -> done Change{4, redo 1157b alter 0b annex 0, ~{ 14, 0, 1, 2 } -{ }, 0 gb} 2025-06-25T15:12:34.902261Z node 149 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:4} Tx{4, NKikimr::NHive::TTxCreateTablet} release 4194304b of static, Memory{0 dyn 0} 2025-06-25T15:12:34.902424Z node 149 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037932033] ::Bootstrap [149:311:2289] 2025-06-25T15:12:34.902463Z node 149 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037932033] lookup [149:311:2289] 2025-06-25T15:12:34.902522Z node 149 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72057594037932033] queue send [149:311:2289] 2025-06-25T15:12:34.902584Z node 149 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72057594037932033 entry.State: StNormal ev: {EvForward TabletID: 72057594037932033 Ev: nullptr Flags: 1:2:0} 2025-06-25T15:12:34.902651Z node 149 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 149 selfDC leaderDC 1:2:0 local 1 localDc 1 other 0 disallowed 0 tabletId: 72057594037932033 followers: 0 countLeader 1 allowFollowers 0 winner: [149:95:2123] 2025-06-25T15:12:34.902755Z node 149 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:190: TClient[72057594037932033] forward result local node, try to connect [149:311:2289] 2025-06-25T15:12:34.902797Z node 149 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[72057594037932033]::SendEvent [149:311:2289] 2025-06-25T15:12:34.902880Z node 149 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:291: [72057594037932033] Accept Connect Originator# [149:311:2289] 2025-06-25T15:12:34.903031Z node 149 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:310: TClient[72057594037932033] connected with status OK role: Leader [149:311:2289] 2025-06-25T15:12:34.903071Z node 149 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:325: TClient[72057594037932033] send queued [149:311:2289] 2025-06-25T15:12:34.903103Z node 149 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:629: TClient[72057594037932033] push event to server [149:311:2289] 2025-06-25T15:12:34.903156Z node 149 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:141: [72057594037932033] HandleSend Sender# [149:279:2266] EventType# 268637702 2025-06-25T15:12:34.903307Z node 149 :TABLET_EXECUTOR DEBUG: Leader{72057594037932033:2:9} Tx{28, NKikimr::NBsController::TBlobStorageController::TTxSelectGroups} queued, type NKikimr::NBsController::TBlobStorageController::TTxSelectGroups 2025-06-25T15:12:34.903398Z node 149 :TABLET_EXECUTOR DEBUG: Leader{72057594037932033:2:9} Tx{28, NKikimr::NBsController::TBlobStorageController::TTxSelectGroups} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-25T15:12:34.903619Z node 149 :TABLET_EXECUTOR DEBUG: Leader{72057594037932033:2:9} Tx{28, NKikimr::NBsController::TBlobStorageController::TTxSelectGroups} hope 1 -> done Change{20, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 2025-06-25T15:12:34.903707Z node 149 :TABLET_EXECUTOR DEBUG: Leader{72057594037932033:2:9} Tx{28, NKikimr::NBsController::TBlobStorageController::TTxSelectGroups} release 4194304b of static, Memory{0 dyn 0} 2025-06-25T15:12:34.904053Z node 149 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:5} Tx{5, NKikimr::NHive::TTxUpdateTabletGroups} queued, type NKikimr::NHive::TTxUpdateTabletGroups 2025-06-25T15:12:34.904141Z node 149 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:5} Tx{5, NKikimr::NHive::TTxUpdateTabletGroups} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-25T15:12:34.904592Z node 149 :HIVE NOTICE: tx__update_tablet_groups.cpp:326: HIVE#72057594037927937 THive::TTxUpdateTabletGroups{88923003593536}(72075186224037888)::Execute - TryToBoot was not successfull 2025-06-25T15:12:34.904721Z node 149 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:5} Tx{5, NKikimr::NHive::TTxUpdateTabletGroups} hope 1 -> done Change{5, redo 698b alter 0b annex 0, ~{ 2, 1, 3 } -{ }, 0 gb} 2025-06-25T15:12:34.904814Z node 149 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:5} Tx{5, NKikimr::NHive::TTxUpdateTabletGroups} release 4194304b of static, Memory{0 dyn 0} 2025-06-25T15:12:34.915929Z node 149 :BS_PROXY_PUT INFO: dsproxy_put.cpp:645: [49bb8b081a887568] bootstrap ActorId# [149:314:2292] Group# 0 BlobCount# 1 BlobIDs# [[72057594037927937:2:4:0:0:701:0]] HandleClass# TabletLog Tactic# MinLatency RestartCounter# 0 Marker# BPP13 2025-06-25T15:12:34.916101Z node 149 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [49bb8b081a887568] Id# [72057594037927937:2:4:0:0:701:0] restore disk# 0 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-06-25T15:12:34.916158Z node 149 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:65: [49bb8b081a887568] restore Id# [72057594037927937:2:4:0:0:701:0] optimisticReplicas# 1 optimisticState# EBS_FULL Marker# BPG55 2025-06-25T15:12:34.916218Z node 149 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [49bb8b081a887568] partPlacement record partSituation# ESituation::Unknown to# 0 blob Id# [72057594037927937:2:4:0:0:701:1] Marker# BPG33 2025-06-25T15:12:34.916262Z node 149 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [49bb8b081a887568] Sending missing VPut part# 0 to# 0 blob Id# [72057594037927937:2:4:0:0:701:1] Marker# BPG32 2025-06-25T15:12:34.916430Z node 149 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [149:36:2080] NKikimr::TEvBlobStorage::TEvVPut# {ID# [72057594037927937:2:4:0:0:701:1] FDS# 701 HandleClass# TabletLog {MsgQoS ExtQueueId# PutTabletLog} DataSize# 0 Data# } cookie# 0 2025-06-25T15:12:34.917760Z node 149 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [49bb8b081a887568] received {EvVPutResult Status# OK ID# [72057594037927937:2:4:0:0:701:1] {MsgQoS MsgId# { SequenceId: 1 MsgId: 19 } Cost# 85519 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 20 }}}} from# [0:1:0:0:0] Marker# BPP01 2025-06-25T15:12:34.917871Z node 149 :BS_PROXY_PUT DEBUG: dsproxy_put_impl.cpp:72: [49bb8b081a887568] Result# TEvPutResult {Id# [72057594037927937:2:4:0:0:701:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.998955} GroupId# 0 Marker# BPP12 2025-06-25T15:12:34.917930Z node 149 :BS_PROXY_PUT INFO: dsproxy_put.cpp:486: [49bb8b081a887568] SendReply putResult# TEvPutResult {Id# [72057594037927937:2:4:0:0:701:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.998955} ResponsesSent# 0 PutImpl.Blobs.size# 1 Last# true Marker# BPP21 2025-06-25T15:12:34.918077Z node 149 :BS_PROXY_PUT DEBUG: {BPP72@dsproxy_put.cpp:470} Query history GroupId# 0 HandleClass# TabletLog Tactic# MinLatency History# THistory { Entries# [ TEvVPut{ TimestampMs# 0.688 sample PartId# [72057594037927937:2:4:0:0:701:1] QueryCount# 1 VDiskId# [0:1:0:0:0] NodeId# 149 } TEvVPutResult{ TimestampMs# 2.034 VDiskId# [0:1:0:0:0] NodeId# 149 Status# OK } ] } 2025-06-25T15:12:34.918211Z node 149 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594037927937:2:4:0:0:701:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.998955} 2025-06-25T15:12:34.918353Z node 149 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:5} commited cookie 1 for step 4 2025-06-25T15:12:34.918710Z node 149 :STATESTORAGE DEBUG: statestorage_proxy.cpp:281: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72075186224037888 Cookie: 0 ProxyOptions: SigNone} 2025-06-25T15:12:34.918834Z node 149 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72075186224037888 Cookie: 0} 2025-06-25T15:12:34.918893Z node 149 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72075186224037888 Cookie: 1} 2025-06-25T15:12:34.918935Z node 149 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72075186224037888 Cookie: 2} 2025-06-25T15:12:34.918983Z node 149 :STATESTORAGE DEBUG: statestorage_proxy.cpp:390: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72075186224037888 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-25T15:12:34.919043Z node 149 :STATESTORAGE DEBUG: statestorage_proxy.cpp:390: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72075186224037888 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-25T15:12:34.919095Z node 149 :STATESTORAGE DEBUG: statestorage_proxy.cpp:390: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72075186224037888 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-25T15:12:34.919508Z node 149 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72075186224037888] ::Bootstrap [149:318:2295] 2025-06-25T15:12:34.919585Z node 149 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72075186224037888] lookup [149:318:2295] 2025-06-25T15:12:34.919745Z node 149 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72075186224037888 entry.State: StInit ev: {EvForward TabletID: 72075186224037888 Ev: nullptr Flags: 1:2:0} 2025-06-25T15:12:34.919956Z node 149 :STATESTORAGE DEBUG: statestorage_proxy.cpp:281: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72075186224037888 Cookie: 0 ProxyOptions: SigNone} 2025-06-25T15:12:34.920132Z node 149 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72075186224037888 Cookie: 0} 2025-06-25T15:12:34.920213Z node 149 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72075186224037888 Cookie: 1} 2025-06-25T15:12:34.920254Z node 149 :STATESTORAGE DEBUG: statestorage_replica.cpp:185: Replica::Handle ev: {EvReplicaLookup TabletID: 72075186224037888 Cookie: 2} 2025-06-25T15:12:34.920438Z node 149 :STATESTORAGE DEBUG: statestorage_proxy.cpp:390: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72075186224037888 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-25T15:12:34.920536Z node 149 :STATESTORAGE DEBUG: statestorage_proxy.cpp:390: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72075186224037888 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-25T15:12:34.920585Z node 149 :STATESTORAGE DEBUG: statestorage_proxy.cpp:390: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72075186224037888 ClusterStateGeneration: 0 ClusterStateGuid: 0} 2025-06-25T15:12:34.920762Z node 149 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:610: Handle TEvInfo tabletId: 72075186224037888 entry.State: StInitResolve success: false ev: {EvInfo Status: 5 TabletID: 72075186224037888 Cookie: 0 CurrentLeader: [0:0:0] CurrentLeaderTablet: [0:0:0] CurrentGeneration: 0 CurrentStep: 0 Locked: false LockedFor: 0 Signature: { Size: 3 Signature: {{[149:1099535971443:0] : 5}, {[149:2199047599219:0] : 8}, {[149:24343667:0] : 2}}}} 2025-06-25T15:12:34.920840Z node 149 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:361: DropEntry tabletId: 72075186224037888 followers: 0 2025-06-25T15:12:34.921012Z node 149 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:172: TClient[72075186224037888] forward result error, check reconnect [149:318:2295] 2025-06-25T15:12:34.921085Z node 149 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:498: TClient[72075186224037888] connect failed [149:318:2295] |95.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/olap/delete/py3test |95.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_1_UNIQUE_SYNC-pk_types3-all_types3-index3--UNIQUE-SYNC] [GOOD] >> test_ttl.py::TestTTL::test_ttl[table_Date_0_UNIQUE_SYNC-pk_types32-all_types32-index32-Date-UNIQUE-SYNC] >> test_ttl.py::TestTTL::test_ttl[table_Uint64_0_UNIQUE_SYNC-pk_types26-all_types26-index26-Uint64-UNIQUE-SYNC] >> test_ttl.py::TestTTL::test_ttl[table_Timestamp_1__ASYNC-pk_types10-all_types10-index10-Timestamp--ASYNC] >> test_ttl.py::TestTTL::test_ttl[table_DyNumber_0__ASYNC-pk_types13-all_types13-index13-DyNumber--ASYNC] |95.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/partitioning/py3test >> test_partitioning.py::TestPartitionong::test_partition_at_keys[table_Utf8-pk_types7-all_types7-index7] [GOOD] >> TSchemeshardCompactionQueueTest::UpdateBelowThreshold [GOOD] >> TSchemeshardCompactionQueueTest::UpdateWithEmptyShard [GOOD] >> test_ttl.py::TestTTL::test_ttl[table_Datetime_1_UNIQUE_SYNC-pk_types5-all_types5-index5-Datetime-UNIQUE-SYNC] |95.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_compaction/unittest >> TSchemeshardCompactionQueueTest::UpdateWithEmptyShard [GOOD] >> test_ttl.py::TestTTL::test_ttl[table_Uint32_1_UNIQUE_SYNC-pk_types23-all_types23-index23-Uint32-UNIQUE-SYNC] |95.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Int64-pk_types19-all_types19-index19---] [GOOD] |95.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/partitioning/py3test >> test_partitioning.py::TestPartitionong::test_partition_at_keys[table_Int64-pk_types0-all_types0-index0] [GOOD] |95.0%| [TA] $(B)/ydb/core/tx/schemeshard/ut_compaction/test-results/unittest/{meta.json ... results_accumulator.log} >> test_ttl.py::TestTTL::test_ttl[table_Date_1__SYNC-pk_types33-all_types33-index33-Date--SYNC] |95.0%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_compaction/test-results/unittest/{meta.json ... results_accumulator.log} >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Timestamp64-pk_types38-all_types38-index38---] [GOOD] >> test_ttl.py::TestTTL::test_ttl[table_Datetime_0__SYNC-pk_types0-all_types0-index0-Datetime--SYNC] |95.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Interval-pk_types35-all_types35-index35---] [GOOD] >> test_ttl.py::TestTTL::test_ttl[table_DyNumber_0_UNIQUE_SYNC-pk_types14-all_types14-index14-DyNumber-UNIQUE-SYNC] |95.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Datetime64-pk_types37-all_types37-index37---] [GOOD] >> test_ttl.py::TestTTL::test_ttl[table_Uint32_0_UNIQUE_SYNC-pk_types20-all_types20-index20-Uint32-UNIQUE-SYNC] >> test_ttl.py::TestTTL::test_ttl[table_Date_1_UNIQUE_SYNC-pk_types35-all_types35-index35-Date-UNIQUE-SYNC] >> test_ttl.py::TestTTL::test_ttl[table_Timestamp_0_UNIQUE_SYNC-pk_types8-all_types8-index8-Timestamp-UNIQUE-SYNC] >> test_partitioning.py::TestPartitionong::test_partition_at_keys[table_Uint64-pk_types3-all_types3-index3] [GOOD] |95.1%| [TA] $(B)/ydb/core/mind/hive/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> test_ttl.py::TestTTL::test_ttl[table_DyNumber_1__ASYNC-pk_types16-all_types16-index16-DyNumber--ASYNC] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_17_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 17] [GOOD] >> test_ttl.py::TestTTL::test_ttl[table_Timestamp_1_UNIQUE_SYNC-pk_types11-all_types11-index11-Timestamp-UNIQUE-SYNC] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_DyNumber-pk_types28-all_types28-index28---] [GOOD] |95.1%| [TA] {RESULT} $(B)/ydb/core/mind/hive/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Uint64-pk_types22-all_types22-index22---] [GOOD] |95.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Timestamp64-pk_types38-all_types38-index38---] [GOOD] >> test_partitioning.py::TestPartitionong::test_partition_at_keys[table_Int8-pk_types2-all_types2-index2] [GOOD] >> test_ttl.py::TestTTL::test_ttl[table_Datetime_0__ASYNC-pk_types1-all_types1-index1-Datetime--ASYNC] |95.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/partitioning/py3test >> test_partitioning.py::TestPartitionong::test_partition_at_keys[table_Uint64-pk_types3-all_types3-index3] [GOOD] >> test_partitioning.py::TestPartitionong::test_partition_at_keys[table_Uint32-pk_types4-all_types4-index4] [GOOD] >> test_partitioning.py::TestPartitionong::test_uniform_partitiona[table_ttl_Date-pk_types0-all_types0-index0] [GOOD] |95.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/olap/column_family/compression/py3test >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_17_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 17] [GOOD] |95.1%| [TA] $(B)/ydb/tests/olap/column_family/compression/test-results/py3test/{meta.json ... results_accumulator.log} |95.1%| [TA] {RESULT} $(B)/ydb/tests/olap/column_family/compression/test-results/py3test/{meta.json ... results_accumulator.log} >> test_ttl.py::TestTTL::test_ttl[table_Uint32_1__ASYNC-pk_types22-all_types22-index22-Uint32--ASYNC] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Int32-pk_types20-all_types20-index20---] [GOOD] |95.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/partitioning/py3test >> test_partitioning.py::TestPartitionong::test_partition_at_keys[table_Int8-pk_types2-all_types2-index2] [GOOD] >> test_ttl.py::TestTTL::test_ttl[table_Date_1__ASYNC-pk_types34-all_types34-index34-Date--ASYNC] >> test_partitioning.py::TestPartitionong::test_uniform_partitiona[table_ttl_Date-pk_types1-all_types1-index1] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_ttl_Uint64-pk_types15-all_types15-index15-Uint64--] [GOOD] |95.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Uint64-pk_types22-all_types22-index22---] [GOOD] |95.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/partitioning/py3test >> test_partitioning.py::TestPartitionong::test_uniform_partitiona[table_ttl_Date-pk_types0-all_types0-index0] [GOOD] >> test_ttl.py::TestTTL::test_ttl[table_DyNumber_1__SYNC-pk_types15-all_types15-index15-DyNumber--SYNC] |95.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_DyNumber-pk_types28-all_types28-index28---] [GOOD] >> test_ttl.py::TestTTL::test_ttl[table_Timestamp_0__SYNC-pk_types6-all_types6-index6-Timestamp--SYNC] >> test_ttl.py::TestTTL::test_ttl[table_Datetime_1__SYNC-pk_types3-all_types3-index3-Datetime--SYNC] |95.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/partitioning/py3test >> test_partitioning.py::TestPartitionong::test_partition_at_keys[table_Uint32-pk_types4-all_types4-index4] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_0__ASYNC-pk_types11-all_types11-index11---ASYNC] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Uint32-pk_types23-all_types23-index23---] [GOOD] >> test_ttl.py::TestTTL::test_ttl[table_Uint32_1__SYNC-pk_types21-all_types21-index21-Uint32--SYNC] |95.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Int32-pk_types20-all_types20-index20---] [GOOD] >> test_partitioning.py::TestPartitionong::test_partition_at_keys[table_Int32-pk_types1-all_types1-index1] [GOOD] >> test_ttl.py::TestTTL::test_ttl[table_Datetime_1__ASYNC-pk_types4-all_types4-index4-Datetime--ASYNC] |95.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/partitioning/py3test >> test_partitioning.py::TestPartitionong::test_uniform_partitiona[table_ttl_Date-pk_types1-all_types1-index1] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Decimal3510-pk_types27-all_types27-index27---] [GOOD] >> test_ttl.py::TestTTL::test_ttl[table_Uint32_0__ASYNC-pk_types19-all_types19-index19-Uint32--ASYNC] |95.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Uint32-pk_types23-all_types23-index23---] [GOOD] >> test_ttl.py::TestTTL::test_ttl[table_Uint64_0__ASYNC-pk_types25-all_types25-index25-Uint64--ASYNC] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Decimal229-pk_types26-all_types26-index26---] [GOOD] |95.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_ttl_Uint64-pk_types15-all_types15-index15-Uint64--] [GOOD] >> test_ttl.py::TestTTL::test_ttl[table_Uint64_1__ASYNC-pk_types28-all_types28-index28-Uint64--ASYNC] >> test_partitioning.py::TestPartitionong::test_partition_at_keys[table_String-pk_types6-all_types6-index6] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_3_UNIQUE_SYNC-pk_types1-all_types1-index1--UNIQUE-SYNC] [GOOD] >> AnalyzeColumnshard::AnalyzeRebootColumnShard [FAIL] >> test_ttl.py::TestTTL::test_ttl[table_Date_0__ASYNC-pk_types31-all_types31-index31-Date--ASYNC] |95.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/partitioning/py3test >> test_partitioning.py::TestPartitionong::test_partition_at_keys[table_Int32-pk_types1-all_types1-index1] [GOOD] >> test_partitioning.py::TestPartitionong::test_partition_at_keys[table_Uint8-pk_types5-all_types5-index5] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_ttl_Datetime-pk_types16-all_types16-index16-Datetime--] [GOOD] |95.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_0__ASYNC-pk_types11-all_types11-index11---ASYNC] [GOOD] |95.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Decimal229-pk_types26-all_types26-index26---] [GOOD] >> TOlap::CustomDefaultPresets |95.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Decimal3510-pk_types27-all_types27-index27---] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_0_UNIQUE_SYNC-pk_types4-all_types4-index4--UNIQUE-SYNC] [GOOD] >> TOlap::CustomDefaultPresets [GOOD] >> TOlap::Decimal |95.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/partitioning/py3test >> test_partitioning.py::TestPartitionong::test_partition_at_keys[table_String-pk_types6-all_types6-index6] [GOOD] >> TOlap::Decimal [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_olap/unittest >> TOlap::Decimal [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T15:13:37.222111Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T15:13:37.222192Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T15:13:37.222222Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T15:13:37.222249Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T15:13:37.222284Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T15:13:37.222311Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T15:13:37.222360Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T15:13:37.222413Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T15:13:37.223069Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T15:13:37.223343Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T15:13:37.296180Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:13:37.296247Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:13:37.312383Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T15:13:37.312771Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T15:13:37.312946Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T15:13:37.320075Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T15:13:37.320404Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T15:13:37.321008Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T15:13:37.321256Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T15:13:37.324526Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T15:13:37.324734Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T15:13:37.325732Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T15:13:37.325783Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T15:13:37.325922Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T15:13:37.325964Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T15:13:37.325999Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T15:13:37.326082Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T15:13:37.332446Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T15:13:37.457674Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T15:13:37.457901Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:13:37.458091Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T15:13:37.458139Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T15:13:37.458399Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T15:13:37.458472Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:13:37.460850Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T15:13:37.461013Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T15:13:37.461223Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:13:37.461276Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T15:13:37.461317Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T15:13:37.461359Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T15:13:37.463150Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:13:37.463199Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T15:13:37.463245Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T15:13:37.464783Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:13:37.464824Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:13:37.464880Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T15:13:37.464933Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T15:13:37.473238Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T15:13:37.475196Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T15:13:37.475356Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T15:13:37.476265Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T15:13:37.476408Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T15:13:37.476450Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T15:13:37.476744Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T15:13:37.476791Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T15:13:37.476943Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T15:13:37.477005Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T15:13:37.478943Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T15:13:37.478982Z node 1 :FLAT_TX_SCHEMESHARD ... h path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-25T15:13:38.690669Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186233409546;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=101;fline=tx_controller.cpp:215;event=finished_tx;tx_id=101; FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000002 2025-06-25T15:13:38.692383Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T15:13:38.692432Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 101, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T15:13:38.692628Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 101, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-25T15:13:38.692795Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T15:13:38.692844Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [2:208:2208], at schemeshard: 72057594046678944, txId: 101, path id: 1 2025-06-25T15:13:38.692890Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [2:208:2208], at schemeshard: 72057594046678944, txId: 101, path id: 2 2025-06-25T15:13:38.693311Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T15:13:38.693372Z node 2 :FLAT_TX_SCHEMESHARD INFO: create_store.cpp:245: TCreateOlapStore TProposedWaitParts operationId# 101:0 ProgressState at tablet: 72057594046678944 2025-06-25T15:13:38.693441Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: create_store.cpp:268: TCreateOlapStore TProposedWaitParts operationId# 101:0 ProgressState wait for NotifyTxCompletionResult tabletId: 72075186233409546 2025-06-25T15:13:38.694209Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 101 2025-06-25T15:13:38.694308Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 101 2025-06-25T15:13:38.694353Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 101 2025-06-25T15:13:38.694404Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 5 2025-06-25T15:13:38.694447Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-25T15:13:38.695328Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 101 2025-06-25T15:13:38.695421Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 101 2025-06-25T15:13:38.695450Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 101 2025-06-25T15:13:38.695478Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 3 2025-06-25T15:13:38.695508Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-25T15:13:38.695575Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 101, ready parts: 0/1, is published: true 2025-06-25T15:13:38.697560Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 101:0 from tablet: 72057594046678944 to tablet: 72075186233409546 cookie: 72057594046678944:1 msg type: 275382275 2025-06-25T15:13:38.698757Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-25T15:13:38.698872Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-25T15:13:38.711056Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6230: Handle TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, message: Origin: 72075186233409546 TxId: 101 2025-06-25T15:13:38.711138Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1791: TOperation FindRelatedPartByTabletId, TxId: 101, tablet: 72075186233409546, partId: 0 2025-06-25T15:13:38.711291Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:632: TTxOperationReply execute, operationId: 101:0, at schemeshard: 72057594046678944, message: Origin: 72075186233409546 TxId: 101 FAKE_COORDINATOR: Erasing txId 101 2025-06-25T15:13:38.713426Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:660: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T15:13:38.713595Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-25T15:13:38.713640Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 101:0 ProgressState 2025-06-25T15:13:38.713765Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#101:0 progress is 1/1 2025-06-25T15:13:38.713813Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-25T15:13:38.713863Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#101:0 progress is 1/1 2025-06-25T15:13:38.713903Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-25T15:13:38.713947Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 101, ready parts: 1/1, is published: true 2025-06-25T15:13:38.714023Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1656: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [2:340:2316] message: TxId: 101 2025-06-25T15:13:38.714081Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-25T15:13:38.714126Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 101:0 2025-06-25T15:13:38.714164Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 101:0 2025-06-25T15:13:38.714313Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-25T15:13:38.716290Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-25T15:13:38.716366Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [2:341:2317] TestWaitNotification: OK eventTxId 101 2025-06-25T15:13:38.716884Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/OlapStore" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T15:13:38.717141Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/OlapStore" took 267us result status StatusSuccess 2025-06-25T15:13:38.717808Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/OlapStore" PathDescription { Self { Name: "OlapStore" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeColumnStore CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 ColumnStoreVersion: 1 } ChildrenExist: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 1 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } ColumnStoreDescription { Name: "OlapStore" ColumnShardCount: 1 ColumnShards: 72075186233409546 SchemaPresets { Id: 1 Name: "default" Schema { Columns { Id: 1 Name: "timestamp" Type: "Timestamp" TypeId: 50 NotNull: true StorageId: "" DefaultValue { } } Columns { Id: 2 Name: "data" Type: "Decimal(35,9)" TypeId: 4865 TypeInfo { DecimalPrecision: 35 DecimalScale: 9 } NotNull: false StorageId: "" DefaultValue { } } KeyColumnNames: "timestamp" NextColumnId: 3 Version: 1 Options { SchemeNeedActualization: false } NextColumnFamilyId: 1 } } NextSchemaPresetId: 2 NextTtlSettingsPresetId: 1 } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> test_ttl.py::TestTTL::test_ttl[table_Date_0__SYNC-pk_types30-all_types30-index30-Date--SYNC] >> test_ttl.py::TestTTL::test_ttl[table_Uint64_1__SYNC-pk_types27-all_types27-index27-Uint64--SYNC] |95.1%| [TA] $(B)/ydb/core/tx/schemeshard/ut_olap/test-results/unittest/{meta.json ... results_accumulator.log} |95.2%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_olap/test-results/unittest/{meta.json ... results_accumulator.log} >> test_ttl.py::TestTTL::test_ttl[table_Timestamp_0__ASYNC-pk_types7-all_types7-index7-Timestamp--ASYNC] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Int8-pk_types21-all_types21-index21---] [GOOD] |95.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/partitioning/py3test >> test_partitioning.py::TestPartitionong::test_partition_at_keys[table_Uint8-pk_types5-all_types5-index5] [GOOD] >> test_ttl.py::TestTTL::test_ttl[table_Uint64_1_UNIQUE_SYNC-pk_types29-all_types29-index29-Uint64-UNIQUE-SYNC] |95.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_ttl_Datetime-pk_types16-all_types16-index16-Datetime--] [GOOD] >> test_ttl.py::TestTTL::test_ttl[table_Timestamp_1__SYNC-pk_types9-all_types9-index9-Timestamp--SYNC] |95.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_0_UNIQUE_SYNC-pk_types4-all_types4-index4--UNIQUE-SYNC] [GOOD] >> test_ttl.py::TestTTL::test_ttl[table_Uint64_0__SYNC-pk_types24-all_types24-index24-Uint64--SYNC] >> test_s3.py::TestYdbS3TTL::test_s3[table_all_types-pk_types7-all_types7-index7---] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/statistics/aggregator/ut/unittest >> AnalyzeColumnshard::AnalyzeRebootColumnShard [FAIL] Test command err: 2025-06-25T15:03:55.573561Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:419:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T15:03:55.573881Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T15:03:55.573975Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/001f3b/r3tmp/tmpjYl4k4/pdisk_1.dat 2025-06-25T15:03:55.886220Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 5927, node 1 2025-06-25T15:03:56.104468Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:03:56.104523Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:03:56.104552Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:03:56.105167Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T15:03:56.111902Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:03:56.209961Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:03:56.210123Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:03:56.225217Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:11651 2025-06-25T15:03:56.771413Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_extsubdomain.cpp:194) 2025-06-25T15:03:59.719173Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-25T15:03:59.746231Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:03:59.746343Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:03:59.784373Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T15:03:59.786429Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:03:59.982695Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:04:00.017840Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:04:00.018520Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:04:00.019147Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:04:00.019338Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:04:00.019607Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:04:00.019711Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:04:00.019814Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:04:00.019904Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:04:00.019973Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-25T15:04:00.199624Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:04:00.199706Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:04:00.212402Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:04:00.355458Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:04:00.398611Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-25T15:04:00.398684Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-25T15:04:00.429126Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-25T15:04:00.429298Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-25T15:04:00.429487Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-25T15:04:00.429541Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-25T15:04:00.429587Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-25T15:04:00.429650Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-25T15:04:00.429698Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-25T15:04:00.429743Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-25T15:04:00.430226Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-25T15:04:00.451543Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7949: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-25T15:04:00.451658Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7979: ConnectToSA(), pipe client id: [2:1793:2562], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-25T15:04:00.459835Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1808:2573] 2025-06-25T15:04:00.465740Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1849:2589] 2025-06-25T15:04:00.466001Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1849:2589], schemeshard id = 72075186224037897 2025-06-25T15:04:00.473947Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-25T15:04:00.489490Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-25T15:04:00.489537Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-25T15:04:00.489601Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-25T15:04:00.500122Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:04:00.507034Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-25T15:04:00.507160Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-25T15:04:00.695552Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-25T15:04:00.815665Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-25T15:04:00.914269Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-25T15:04:01.392802Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:04:01.650573Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2153:3026], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:04:01.650726Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:04:01.667971Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715659:0, at schemeshard: 72075186224037897, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/olap/operations/create_table.cpp:805) 2025-06-25T15:04:01.770069Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2224:2794];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-25T15:04:01.770282Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2224:2794];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-25T15:04:01.770548Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2224:2794];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-25T15:04:01.770666Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2224:2794];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-25T15:04:01.770765Z node 2 :TX_COLUMNSHARD WARN: ... STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 1, schemeshard count = 1 2025-06-25T15:13:09.562998Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-25T15:13:09.584789Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-25T15:13:09.584871Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:680: [72075186224037894] ScheduleNextTraversal. All the force traversal tables sent the requests. OperationId=operationId 2025-06-25T15:13:09.584902Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:689: [72075186224037894] ScheduleNextTraversal. All the force traversal operations sent the requests. 2025-06-25T15:13:10.937144Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:626: [72075186224037894] ScheduleNextAnalyze 2025-06-25T15:13:10.937229Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:652: [72075186224037894] ScheduleNextAnalyze. All the force traversal tables sent the requests. OperationId=operationId 2025-06-25T15:13:10.937262Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:656: [72075186224037894] ScheduleNextAnalyze. All the force traversal operations sent the requests. 2025-06-25T15:13:12.323777Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-25T15:13:12.323869Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:680: [72075186224037894] ScheduleNextTraversal. All the force traversal tables sent the requests. OperationId=operationId 2025-06-25T15:13:12.323905Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:689: [72075186224037894] ScheduleNextTraversal. All the force traversal operations sent the requests. 2025-06-25T15:13:13.655658Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:285: [72075186224037894] EvPropagateTimeout 2025-06-25T15:13:13.681008Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:626: [72075186224037894] ScheduleNextAnalyze 2025-06-25T15:13:13.681097Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:652: [72075186224037894] ScheduleNextAnalyze. All the force traversal tables sent the requests. OperationId=operationId 2025-06-25T15:13:13.681127Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:656: [72075186224037894] ScheduleNextAnalyze. All the force traversal operations sent the requests. 2025-06-25T15:13:14.998043Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-25T15:13:14.998122Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:680: [72075186224037894] ScheduleNextTraversal. All the force traversal tables sent the requests. OperationId=operationId 2025-06-25T15:13:14.998155Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:689: [72075186224037894] ScheduleNextTraversal. All the force traversal operations sent the requests. 2025-06-25T15:13:16.357206Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 1, schemeshard count = 1 2025-06-25T15:13:16.357408Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-25T15:13:16.385044Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:626: [72075186224037894] ScheduleNextAnalyze 2025-06-25T15:13:16.385123Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:652: [72075186224037894] ScheduleNextAnalyze. All the force traversal tables sent the requests. OperationId=operationId 2025-06-25T15:13:16.385156Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:656: [72075186224037894] ScheduleNextAnalyze. All the force traversal operations sent the requests. 2025-06-25T15:13:17.747514Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-25T15:13:17.747588Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:680: [72075186224037894] ScheduleNextTraversal. All the force traversal tables sent the requests. OperationId=operationId 2025-06-25T15:13:17.747617Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:689: [72075186224037894] ScheduleNextTraversal. All the force traversal operations sent the requests. 2025-06-25T15:13:19.091017Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:626: [72075186224037894] ScheduleNextAnalyze 2025-06-25T15:13:19.091092Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:652: [72075186224037894] ScheduleNextAnalyze. All the force traversal tables sent the requests. OperationId=operationId 2025-06-25T15:13:19.091121Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:656: [72075186224037894] ScheduleNextAnalyze. All the force traversal operations sent the requests. 2025-06-25T15:13:20.460214Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:285: [72075186224037894] EvPropagateTimeout 2025-06-25T15:13:20.482121Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-25T15:13:20.482199Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:680: [72075186224037894] ScheduleNextTraversal. All the force traversal tables sent the requests. OperationId=operationId 2025-06-25T15:13:20.482231Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:689: [72075186224037894] ScheduleNextTraversal. All the force traversal operations sent the requests. 2025-06-25T15:13:21.834820Z node 1 :STATISTICS DEBUG: schemeshard_impl.cpp:7949: ResolveSA(), StatisticsAggregatorId=18446744073709551615, at schemeshard: 72057594046644480 2025-06-25T15:13:21.834890Z node 1 :STATISTICS DEBUG: schemeshard_impl.cpp:7961: ConnectToSA(), no StatisticsAggregatorId, at schemeshard: 72057594046644480 2025-06-25T15:13:21.834921Z node 1 :STATISTICS DEBUG: schemeshard_impl.cpp:7992: SendBaseStatsToSA(), no StatisticsAggregatorId, at schemeshard: 72057594046644480 2025-06-25T15:13:21.834952Z node 1 :STATISTICS DEBUG: schemeshard_impl.cpp:7919: Schedule next SendBaseStatsToSA in 30.000000s, at schemeshard: 72057594046644480 2025-06-25T15:13:22.097111Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:626: [72075186224037894] ScheduleNextAnalyze 2025-06-25T15:13:22.097173Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:652: [72075186224037894] ScheduleNextAnalyze. All the force traversal tables sent the requests. OperationId=operationId 2025-06-25T15:13:22.097195Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:656: [72075186224037894] ScheduleNextAnalyze. All the force traversal operations sent the requests. 2025-06-25T15:13:23.519936Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 1, schemeshard count = 1 2025-06-25T15:13:23.520127Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-25T15:13:23.542094Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-25T15:13:23.542169Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:680: [72075186224037894] ScheduleNextTraversal. All the force traversal tables sent the requests. OperationId=operationId 2025-06-25T15:13:23.542197Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:689: [72075186224037894] ScheduleNextTraversal. All the force traversal operations sent the requests. 2025-06-25T15:13:24.948264Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:626: [72075186224037894] ScheduleNextAnalyze 2025-06-25T15:13:24.948359Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:652: [72075186224037894] ScheduleNextAnalyze. All the force traversal tables sent the requests. OperationId=operationId 2025-06-25T15:13:24.948401Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:656: [72075186224037894] ScheduleNextAnalyze. All the force traversal operations sent the requests. 2025-06-25T15:13:26.273439Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-25T15:13:26.273531Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:680: [72075186224037894] ScheduleNextTraversal. All the force traversal tables sent the requests. OperationId=operationId 2025-06-25T15:13:26.273559Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:689: [72075186224037894] ScheduleNextTraversal. All the force traversal operations sent the requests. 2025-06-25T15:13:27.505102Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:285: [72075186224037894] EvPropagateTimeout 2025-06-25T15:13:27.526688Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:626: [72075186224037894] ScheduleNextAnalyze 2025-06-25T15:13:27.526764Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:652: [72075186224037894] ScheduleNextAnalyze. All the force traversal tables sent the requests. OperationId=operationId 2025-06-25T15:13:27.526793Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:656: [72075186224037894] ScheduleNextAnalyze. All the force traversal operations sent the requests. 2025-06-25T15:13:28.795173Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-25T15:13:28.795260Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:680: [72075186224037894] ScheduleNextTraversal. All the force traversal tables sent the requests. OperationId=operationId 2025-06-25T15:13:28.795301Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:689: [72075186224037894] ScheduleNextTraversal. All the force traversal operations sent the requests. 2025-06-25T15:13:30.047147Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 1, schemeshard count = 1 2025-06-25T15:13:30.047364Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-25T15:13:30.084583Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:626: [72075186224037894] ScheduleNextAnalyze 2025-06-25T15:13:30.084665Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:652: [72075186224037894] ScheduleNextAnalyze. All the force traversal tables sent the requests. OperationId=operationId 2025-06-25T15:13:30.084697Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:656: [72075186224037894] ScheduleNextAnalyze. All the force traversal operations sent the requests. 2025-06-25T15:13:31.403612Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-25T15:13:31.403704Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:680: [72075186224037894] ScheduleNextTraversal. All the force traversal tables sent the requests. OperationId=operationId 2025-06-25T15:13:31.403738Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:689: [72075186224037894] ScheduleNextTraversal. All the force traversal operations sent the requests. (TWithBackTrace) ydb/library/actors/testlib/test_runtime.h:579: Exception occured while waiting for NKikimr::NStat::TEvStatistics::TEvAnalyzeResponse: (NActors::TSchedulingLimitReachedException) TestActorRuntime Processed over 100000 events.ydb/library/actors/testlib/test_runtime.cpp:716: TBackTrace::Capture()+28 (0x1976D4DC) TWithBackTrace::TWithBackTrace<>()+80 (0x1939B600) NKikimr::NStat::TEvStatistics::TEvAnalyzeResponse::TPtr NActors::TTestActorRuntimeBase::GrabEdgeEventRethrow(NActors::TActorId const&, TDuration)+485 (0x1936FC65) NKikimr::NStat::NTestSuiteAnalyzeColumnshard::TTestCaseAnalyzeRebootColumnShard::Execute_(NUnitTest::TTestContext&)+4263 (0x1938C7B7) std::__y1::__function::__func, void ()>::operator()()+280 (0x19397648) TColoredProcessor::Run(std::__y1::function, TBasicString> const&, char const*, bool)+534 (0x19C5AE86) NUnitTest::TTestBase::Run(std::__y1::function, TBasicString> const&, char const*, bool)+505 (0x19C338E9) NKikimr::NStat::NTestSuiteAnalyzeColumnshard::TCurrentTest::Execute()+1204 (0x19396814) NUnitTest::TTestFactory::Execute()+2438 (0x19C351B6) NUnitTest::RunMain(int, char**)+5213 (0x19C553FD) ??+0 (0x7F94D64D6D90) __libc_start_main+128 (0x7F94D64D6E40) _start+41 (0x16C30029) |95.2%| [TA] $(B)/ydb/core/statistics/aggregator/ut/test-results/unittest/{meta.json ... results_accumulator.log} |95.2%| [TA] {RESULT} $(B)/ydb/core/statistics/aggregator/ut/test-results/unittest/{meta.json ... results_accumulator.log} |95.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |95.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_3_UNIQUE_SYNC-pk_types1-all_types1-index1--UNIQUE-SYNC] [GOOD] |95.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Int8-pk_types21-all_types21-index21---] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Interval64-pk_types39-all_types39-index39---] [GOOD] |95.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |95.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/select/py3test >> test_alter_ops.py::TestSchemeShardAlterTest::test_alter_table_cant_add_existing_column >> test_s3.py::TestYdbS3TTL::test_s3[table_ttl_Uint32-pk_types9-all_types9-index9-Uint32--] |95.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_0__SYNC-pk_types9-all_types9-index9---SYNC] [GOOD] |95.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Datetime-pk_types33-all_types33-index33---] [GOOD] |95.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Interval64-pk_types39-all_types39-index39---] [GOOD] >> test_s3.py::TestYdbS3TTL::test_s3[table_index_4__SYNC-pk_types0-all_types0-index0---SYNC] >> test_alter_ops.py::TestSchemeShardAlterTest::test_alter_table_cant_add_existing_column [GOOD] |95.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_with_auth_root-_good_dynconfig] |95.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_0__SYNC-pk_types9-all_types9-index9---SYNC] [GOOD] |95.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/select/py3test >> test_ttl.py::TestTTL::test_ttl[table_DyNumber_0__ASYNC-pk_types13-all_types13-index13-DyNumber--ASYNC] [GOOD] >> test_ttl.py::TestTTL::test_ttl[table_Timestamp_1__ASYNC-pk_types10-all_types10-index10-Timestamp--ASYNC] [GOOD] >> test_s3.py::TestYdbS3TTL::test_s3[table_index_0__ASYNC-pk_types6-all_types6-index6---ASYNC] >> test_ttl.py::TestTTL::test_ttl[table_Date_0_UNIQUE_SYNC-pk_types32-all_types32-index32-Date-UNIQUE-SYNC] [GOOD] >> test_ttl.py::TestTTL::test_ttl[table_Uint64_0_UNIQUE_SYNC-pk_types26-all_types26-index26-Uint64-UNIQUE-SYNC] [GOOD] >> test_ttl.py::TestTTL::test_ttl[table_Datetime_1_UNIQUE_SYNC-pk_types5-all_types5-index5-Datetime-UNIQUE-SYNC] [GOOD] |95.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_ttl.py::TestTTL::test_ttl[table_Datetime_0__SYNC-pk_types0-all_types0-index0-Datetime--SYNC] [GOOD] >> test_ttl.py::TestTTL::test_ttl[table_Uint32_1_UNIQUE_SYNC-pk_types23-all_types23-index23-Uint32-UNIQUE-SYNC] [GOOD] >> test_ttl.py::TestTTL::test_ttl[table_DyNumber_0_UNIQUE_SYNC-pk_types14-all_types14-index14-DyNumber-UNIQUE-SYNC] [GOOD] >> test_ttl.py::TestTTL::test_ttl[table_Date_1__SYNC-pk_types33-all_types33-index33-Date--SYNC] [GOOD] >> test_ttl.py::TestTTL::test_ttl[table_Timestamp_0_UNIQUE_SYNC-pk_types8-all_types8-index8-Timestamp-UNIQUE-SYNC] [GOOD] |95.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/ttl/py3test >> test_ttl.py::TestTTL::test_ttl[table_DyNumber_0__ASYNC-pk_types13-all_types13-index13-DyNumber--ASYNC] [GOOD] >> test_ttl.py::TestTTL::test_ttl[table_Uint32_0_UNIQUE_SYNC-pk_types20-all_types20-index20-Uint32-UNIQUE-SYNC] [GOOD] |95.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Datetime-pk_types33-all_types33-index33---] [GOOD] |95.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_ttl_Uint32-pk_types14-all_types14-index14-Uint32--] [GOOD] |95.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/ttl/py3test >> test_ttl.py::TestTTL::test_ttl[table_Date_0_UNIQUE_SYNC-pk_types32-all_types32-index32-Date-UNIQUE-SYNC] [GOOD] >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_with_auth_root-_good_dynconfig] [GOOD] |95.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/ttl/py3test >> test_ttl.py::TestTTL::test_ttl[table_Uint64_0_UNIQUE_SYNC-pk_types26-all_types26-index26-Uint64-UNIQUE-SYNC] [GOOD] >> test_ttl.py::TestTTL::test_ttl[table_Date_1_UNIQUE_SYNC-pk_types35-all_types35-index35-Date-UNIQUE-SYNC] [GOOD] >> test_s3.py::TestYdbS3TTL::test_s3[table_ttl_Timestamp-pk_types12-all_types12-index12-Timestamp--] >> test_ttl.py::TestTTL::test_ttl[table_DyNumber_1__ASYNC-pk_types16-all_types16-index16-DyNumber--ASYNC] [GOOD] |95.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |95.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |95.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/ttl/py3test >> test_ttl.py::TestTTL::test_ttl[table_Timestamp_1__ASYNC-pk_types10-all_types10-index10-Timestamp--ASYNC] [GOOD] >> test_ttl.py::TestTTL::test_ttl[table_Timestamp_1_UNIQUE_SYNC-pk_types11-all_types11-index11-Timestamp-UNIQUE-SYNC] [GOOD] |95.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/ttl/py3test >> test_ttl.py::TestTTL::test_ttl[table_Uint32_1_UNIQUE_SYNC-pk_types23-all_types23-index23-Uint32-UNIQUE-SYNC] [GOOD] >> test_select.py::TestDML::test_select[table_ttl_Uint64-pk_types15-all_types15-index15-Uint64--] |95.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/script_execution/py3test |95.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_ttl_Uint32-pk_types14-all_types14-index14-Uint32--] [GOOD] |95.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/scheme_shard/py3test >> test_alter_ops.py::TestSchemeShardAlterTest::test_alter_table_cant_add_existing_column [GOOD] >> test_auditlog.py::test_dml_begin_commit_logged >> test_s3.py::TestYdbS3TTL::test_s3[table_ttl_Datetime-pk_types11-all_types11-index11-Datetime--] |95.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/select/py3test |95.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_ttl.py::TestTTL::test_ttl[table_Datetime_0__ASYNC-pk_types1-all_types1-index1-Datetime--ASYNC] [GOOD] |95.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/ttl/py3test >> test_ttl.py::TestTTL::test_ttl[table_Datetime_0__SYNC-pk_types0-all_types0-index0-Datetime--SYNC] [GOOD] |95.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/ttl/py3test >> test_ttl.py::TestTTL::test_ttl[table_DyNumber_0_UNIQUE_SYNC-pk_types14-all_types14-index14-DyNumber-UNIQUE-SYNC] [GOOD] >> test_s3.py::TestYdbS3TTL::test_s3[table_index_0__SYNC-pk_types4-all_types4-index4---SYNC] |95.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/ttl/py3test >> test_ttl.py::TestTTL::test_ttl[table_Date_1__SYNC-pk_types33-all_types33-index33-Date--SYNC] [GOOD] |95.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/ttl/py3test >> test_ttl.py::TestTTL::test_ttl[table_Timestamp_0_UNIQUE_SYNC-pk_types8-all_types8-index8-Timestamp-UNIQUE-SYNC] [GOOD] >> test_auditlog.py::test_dml_requests_logged_when_unauthorized |95.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/ttl/py3test >> test_ttl.py::TestTTL::test_ttl[table_Datetime_1_UNIQUE_SYNC-pk_types5-all_types5-index5-Datetime-UNIQUE-SYNC] [GOOD] |95.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/ttl/py3test >> test_ttl.py::TestTTL::test_ttl[table_Uint32_0_UNIQUE_SYNC-pk_types20-all_types20-index20-Uint32-UNIQUE-SYNC] [GOOD] |95.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> TTabletPipeTest::TestOpen >> TTabletPipeTest::TestOpen [GOOD] |95.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/scheme_shard/py3test >> test_auditlog.py::test_cloud_ids_are_logged[attrs0] |95.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |95.3%| [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TTabletPipeTest::TestOpen [GOOD] |95.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/s3/py3test |95.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/ttl/py3test >> test_ttl.py::TestTTL::test_ttl[table_Date_1_UNIQUE_SYNC-pk_types35-all_types35-index35-Date-UNIQUE-SYNC] [GOOD] >> test_ttl.py::TestTTL::test_ttl[table_Uint32_1__ASYNC-pk_types22-all_types22-index22-Uint32--ASYNC] [GOOD] |95.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/ttl/py3test >> test_ttl.py::TestTTL::test_ttl[table_DyNumber_1__ASYNC-pk_types16-all_types16-index16-DyNumber--ASYNC] [GOOD] |95.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |95.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_with_auth_root-_good_dynconfig] [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/yft8/0011aa/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk7/testing_out_stuff/test_auditlog.py.test_broken_dynconfig._client_session_pool_with_auth_root-_good_dynconfig/audit.txt 2025-06-25T15:14:18.887225Z: {"sanitized_token":"**** (B6C6F477)","subject":"root@builtin","new_config":"\n---\nmetadata:\n kind: MainConfig\n cluster: \"\"\n version: 0\nconfig:\n yaml_config_enabled: true\nallowed_labels:\n node_id:\n type: string\n host:\n type: string\n tenant:\n type: string\nselector_config: []\n ","status":"SUCCESS","component":"console","operation":"REPLACE DYNCONFIG","remote_address":"127.0.0.1"} >> test_select.py::TestDML::test_select[table_ttl_Uint32-pk_types14-all_types14-index14-Uint32--] |95.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/ttl/py3test >> test_ttl.py::TestTTL::test_ttl[table_Timestamp_1_UNIQUE_SYNC-pk_types11-all_types11-index11-Timestamp-UNIQUE-SYNC] [GOOD] >> test_ttl.py::TestTTL::test_ttl[table_Date_1__ASYNC-pk_types34-all_types34-index34-Date--ASYNC] [GOOD] |95.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/ttl/py3test >> test_ttl.py::TestTTL::test_ttl[table_Datetime_0__ASYNC-pk_types1-all_types1-index1-Datetime--ASYNC] [GOOD] |95.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/async_replication/py3test >> test_yandex_cloud_queue_counters.py::TestYmqQueueCounters::test_ymq_send_read_delete |95.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |95.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_setup_in_cloud[tables_format_v1-std] |95.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |95.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_common.py::TestCommonYandexWithTenant::test_private_queue_recreation[tables_format_v1-fifo] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_message_counters_in_cloud[tables_format_v0-std] |95.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |95.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |95.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest >> TTabletPipeTest::TestSendAfterReboot >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_1__ASYNC-pk_types10-all_types10-index10---ASYNC] [GOOD] |95.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |95.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/ttl/py3test >> test_ttl.py::TestTTL::test_ttl[table_Uint32_1__ASYNC-pk_types22-all_types22-index22-Uint32--ASYNC] [GOOD] |95.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> TTabletPipeTest::TestSendAfterReboot [GOOD] |95.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tablet/ut/unittest >> TTabletPipeTest::TestSendAfterReboot [GOOD] Test command err: Leader for TabletID 9437184 is [0:0:0] sender: [1:112:2057] recipient: [1:108:2139] IGNORE Leader for TabletID 9437184 is [0:0:0] sender: [1:112:2057] recipient: [1:108:2139] Leader for TabletID 9437185 is [0:0:0] sender: [1:113:2057] recipient: [1:109:2140] IGNORE Leader for TabletID 9437185 is [0:0:0] sender: [1:113:2057] recipient: [1:109:2140] Leader for TabletID 9437184 is [1:120:2147] sender: [1:121:2057] recipient: [1:108:2139] Leader for TabletID 9437185 is [1:123:2149] sender: [1:125:2057] recipient: [1:109:2140] Leader for TabletID 9437184 is [1:120:2147] sender: [1:160:2057] recipient: [1:14:2061] Leader for TabletID 9437185 is [1:123:2149] sender: [1:162:2057] recipient: [1:14:2061] Leader for TabletID 9437185 is [1:123:2149] sender: [1:165:2057] recipient: [1:105:2138] Leader for TabletID 9437185 is [1:123:2149] sender: [1:167:2057] recipient: [1:14:2061] Leader for TabletID 9437185 is [1:123:2149] sender: [1:169:2057] recipient: [1:168:2178] Leader for TabletID 9437185 is [1:170:2179] sender: [1:171:2057] recipient: [1:168:2178] Leader for TabletID 9437185 is [1:170:2179] sender: [1:199:2057] recipient: [1:14:2061] Leader for TabletID 9437184 is [1:120:2147] sender: [1:202:2057] recipient: [1:104:2137] Leader for TabletID 9437184 is [1:120:2147] sender: [1:205:2057] recipient: [1:14:2061] Leader for TabletID 9437184 is [1:120:2147] sender: [1:206:2057] recipient: [1:204:2202] Leader for TabletID 9437184 is [1:207:2203] sender: [1:208:2057] recipient: [1:204:2202] Leader for TabletID 9437184 is [1:207:2203] sender: [1:237:2057] recipient: [1:14:2061] |95.4%| [TA] $(B)/ydb/core/tablet/ut/test-results/unittest/{meta.json ... results_accumulator.log} |95.4%| [TA] {RESULT} $(B)/ydb/core/tablet/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> test_auditlog.py::test_dml_begin_commit_logged [GOOD] >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_index_2_UNIQUE_SYNC-pk_types2-all_types2-index2--UNIQUE-SYNC] >> test_ttl.py::TestTTL::test_ttl[table_Timestamp_0__SYNC-pk_types6-all_types6-index6-Timestamp--SYNC] [GOOD] >> test_ttl.py::TestTTL::test_ttl[table_DyNumber_1__SYNC-pk_types15-all_types15-index15-DyNumber--SYNC] [GOOD] |95.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/select/py3test |95.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |95.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/select/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_mechanics_in_cloud[tables_format_v0-tables_format_v1-std] >> test_auditlog.py::test_dml_requests_logged_when_unauthorized [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_4_UNIQUE_SYNC-pk_types0-all_types0-index0--UNIQUE-SYNC] [GOOD] |95.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_bad_auth-_good_dynconfig] >> test_common.py::TestCommonYandexWithPath::test_private_create_queue[tables_format_v1-fifo] |95.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_ttl.py::TestTTL::test_ttl[table_Datetime_1__ASYNC-pk_types4-all_types4-index4-Datetime--ASYNC] [GOOD] |95.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/ttl/py3test >> test_ttl.py::TestTTL::test_ttl[table_Date_1__ASYNC-pk_types34-all_types34-index34-Date--ASYNC] [GOOD] >> test_ttl.py::TestTTL::test_ttl[table_Datetime_1__SYNC-pk_types3-all_types3-index3-Datetime--SYNC] [GOOD] >> test_auditlog.py::test_cloud_ids_are_logged[attrs0] [GOOD] >> test_ttl.py::TestTTL::test_ttl[table_Uint32_0__ASYNC-pk_types19-all_types19-index19-Uint32--ASYNC] [GOOD] >> test_s3.py::TestYdbS3TTL::test_s3[table_ttl_Uint32-pk_types9-all_types9-index9-Uint32--] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_cloud_double_create_queue[std-tables_format_v1] |95.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_fifo_groups_with_dlq_in_cloud[tables_format_v0] >> test_ttl.py::TestTTL::test_ttl[table_Uint32_1__SYNC-pk_types21-all_types21-index21-Uint32--SYNC] [GOOD] |95.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_no_auth-_bad_dynconfig] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_message_counters_in_cloud[tables_format_v1-std] >> test_ttl.py::TestTTL::test_ttl[table_Uint64_0__ASYNC-pk_types25-all_types25-index25-Uint64--ASYNC] [GOOD] |95.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_1__ASYNC-pk_types10-all_types10-index10---ASYNC] [GOOD] |95.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_4_UNIQUE_SYNC-pk_types0-all_types0-index0--UNIQUE-SYNC] [GOOD] >> test_ttl.py::TestTTL::test_ttl[table_Uint64_1__ASYNC-pk_types28-all_types28-index28-Uint64--ASYNC] [GOOD] |95.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |95.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/ttl/py3test >> test_ttl.py::TestTTL::test_ttl[table_DyNumber_1__SYNC-pk_types15-all_types15-index15-DyNumber--SYNC] [GOOD] |95.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/select/py3test >> test_ttl.py::TestTTL::test_ttl[table_Date_0__ASYNC-pk_types31-all_types31-index31-Date--ASYNC] [GOOD] |95.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/select/py3test ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_dml_begin_commit_logged [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/yft8/00119b/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk11/testing_out_stuff/test_auditlog.py.test_dml_begin_commit_logged/audit.txt 2025-06-25T15:14:44.005561Z: {"tx_id":"01jyktfnn53kjqpzb0sf1gr7en","database":"/Root/test_auditlog.py","end_time":"2025-06-25T15:14:44.005513Z","sanitized_token":"**** (B6C6F477)","remote_address":"127.0.0.1","status":"SUCCESS","start_time":"2025-06-25T15:14:44.004771Z","subject":"root@builtin","detailed_status":"SUCCESS","operation":"BeginTransactionRequest","component":"grpc-proxy"} 2025-06-25T15:14:44.245219Z: {"tx_id":"01jyktfnn53kjqpzb0sf1gr7en","database":"/Root/test_auditlog.py","end_time":"2025-06-25T15:14:44.245167Z","sanitized_token":"**** (B6C6F477)","remote_address":"127.0.0.1","commit_tx":"0","status":"SUCCESS","query_text":"update `/Root/test_auditlog.py/test-table` set value = 0 where id = 1","start_time":"2025-06-25T15:14:44.022328Z","subject":"root@builtin","detailed_status":"SUCCESS","operation":"ExecuteDataQueryRequest","component":"grpc-proxy"} 2025-06-25T15:14:44.261299Z: {"tx_id":"01jyktfnn53kjqpzb0sf1gr7en","database":"/Root/test_auditlog.py","end_time":"2025-06-25T15:14:44.261255Z","sanitized_token":"**** (B6C6F477)","remote_address":"127.0.0.1","status":"SUCCESS","start_time":"2025-06-25T15:14:44.252355Z","subject":"root@builtin","detailed_status":"SUCCESS","operation":"CommitTransactionRequest","component":"grpc-proxy"} >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_list_clouds >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_index_2_UNIQUE_SYNC-pk_types2-all_types2-index2--UNIQUE-SYNC] [GOOD] |95.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/s3/py3test >> test_s3.py::TestYdbS3TTL::test_s3[table_ttl_Uint32-pk_types9-all_types9-index9-Uint32--] [GOOD] |95.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/ttl/py3test >> test_ttl.py::TestTTL::test_ttl[table_Datetime_1__ASYNC-pk_types4-all_types4-index4-Datetime--ASYNC] [GOOD] |95.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/ttl/py3test >> test_ttl.py::TestTTL::test_ttl[table_Timestamp_0__SYNC-pk_types6-all_types6-index6-Timestamp--SYNC] [GOOD] >> test_yandex_cloud_queue_counters.py::TestYmqQueueCounters::test_ymq_send_read_delete [GOOD] |95.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_dml_requests_logged_when_unauthorized [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/yft8/001199/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk15/testing_out_stuff/test_auditlog.py.test_dml_requests_logged_when_unauthorized/audit.txt 2025-06-25T15:14:46.623900Z: {"database":"/Root/test_auditlog.py","end_time":"2025-06-25T15:14:46.623851Z","sanitized_token":"**** (C877DF61)","begin_tx":"1","remote_address":"127.0.0.1","commit_tx":"1","status":"ERROR","query_text":"insert into `/Root/test_auditlog.py/test-table` (id, value) values (100, 100), (101, 101)","start_time":"2025-06-25T15:14:46.605564Z","subject":"__bad__@builtin","detailed_status":"SCHEME_ERROR","operation":"ExecuteDataQueryRequest","component":"grpc-proxy"} 2025-06-25T15:14:46.794782Z: {"database":"/Root/test_auditlog.py","end_time":"2025-06-25T15:14:46.794747Z","sanitized_token":"**** (C877DF61)","begin_tx":"1","remote_address":"127.0.0.1","commit_tx":"1","status":"ERROR","query_text":"delete from `/Root/test_auditlog.py/test-table` where id = 100 or id = 101","start_time":"2025-06-25T15:14:46.743873Z","subject":"__bad__@builtin","detailed_status":"SCHEME_ERROR","operation":"ExecuteDataQueryRequest","component":"grpc-proxy"} 2025-06-25T15:14:46.933674Z: {"database":"/Root/test_auditlog.py","end_time":"2025-06-25T15:14:46.933646Z","sanitized_token":"**** (C877DF61)","begin_tx":"1","remote_address":"127.0.0.1","commit_tx":"1","status":"ERROR","query_text":"select id from `/Root/test_auditlog.py/test-table`","start_time":"2025-06-25T15:14:46.906853Z","subject":"__bad__@builtin","detailed_status":"SCHEME_ERROR","operation":"ExecuteDataQueryRequest","component":"grpc-proxy"} 2025-06-25T15:14:47.100922Z: {"database":"/Root/test_auditlog.py","end_time":"2025-06-25T15:14:47.100882Z","sanitized_token":"**** (C877DF61)","begin_tx":"1","remote_address":"127.0.0.1","commit_tx":"1","status":"ERROR","query_text":"update `/Root/test_auditlog.py/test-table` set value = 0 where id = 1","start_time":"2025-06-25T15:14:47.050006Z","subject":"__bad__@builtin","detailed_status":"SCHEME_ERROR","operation":"ExecuteDataQueryRequest","component":"grpc-proxy"} 2025-06-25T15:14:47.250894Z: {"database":"/Root/test_auditlog.py","end_time":"2025-06-25T15:14:47.250858Z","sanitized_token":"**** (C877DF61)","begin_tx":"1","remote_address":"127.0.0.1","commit_tx":"1","status":"ERROR","query_text":"replace into `/Root/test_auditlog.py/test-table` (id, value) values (2, 3), (3, 3)","start_time":"2025-06-25T15:14:47.219613Z","subject":"__bad__@builtin","detailed_status":"SCHEME_ERROR","operation":"ExecuteDataQueryRequest","component":"grpc-proxy"} 2025-06-25T15:14:47.395972Z: {"database":"/Root/test_auditlog.py","end_time":"2025-06-25T15:14:47.395928Z","sanitized_token":"**** (C877DF61)","begin_tx":"1","remote_address":"127.0.0.1","commit_tx":"1","status":"ERROR","query_text":"upsert into `/Root/test_auditlog.py/test-table` (id, value) values (4, 4), (5, 5)","start_time":"2025-06-25T15:14:47.363087Z","subject":"__bad__@builtin","detailed_status":"SCHEME_ERROR","operation":"ExecuteDataQueryRequest","component":"grpc-proxy"} |95.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_cloud_double_create_queue[fifo-tables_format_v1] >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_bad_auth-_good_dynconfig] [GOOD] >> test_common.py::TestCommonYandexWithPath::test_private_create_queue[tables_format_v1-fifo] [GOOD] >> test_common.py::TestCommonYandexWithPath::test_private_create_queue[tables_format_v1-std] >> test_auditlog.py::test_create_and_remove_tenant |95.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_4__SYNC-pk_types5-all_types5-index5---SYNC] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_ttl_Date-pk_types18-all_types18-index18-Date--] [GOOD] >> DataShardTxOrder::RandomPoints_DelayData [GOOD] >> test_ttl.py::TestTTL::test_ttl[table_Timestamp_0__ASYNC-pk_types7-all_types7-index7-Timestamp--ASYNC] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_cloud_ids_are_logged[attrs0] [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/yft8/001195/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk8/testing_out_stuff/test_auditlog.py.test_cloud_ids_are_logged.attrs0/audit.txt 2025-06-25T15:14:51.259439Z: {"tx_id":"{none}","database":"/Root/test_auditlog.py","cloud_id":"cloud-id-A","end_time":"2025-06-25T15:14:51.259378Z","sanitized_token":"**** (B6C6F477)","begin_tx":"1","remote_address":"127.0.0.1","commit_tx":"1","status":"SUCCESS","query_text":"update `/Root/test_auditlog.py/test-table` set value = 0 where id = 1","start_time":"2025-06-25T15:14:51.005041Z","subject":"root@builtin","detailed_status":"SUCCESS","resource_id":"database-id-C","operation":"ExecuteDataQueryRequest","folder_id":"folder-id-B","component":"grpc-proxy"} >> test_common.py::TestCommonYandexWithPath::test_private_create_queue[tables_format_v1-std] [GOOD] |95.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |95.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |95.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/ttl/py3test >> test_ttl.py::TestTTL::test_ttl[table_Datetime_1__SYNC-pk_types3-all_types3-index3-Datetime--SYNC] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_UUID-pk_types31-all_types31-index31---] [GOOD] >> test_auditlog.py::test_single_dml_query_logged[update] |95.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_setup_in_cloud[tables_format_v1-std] [GOOD] |95.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/ttl/py3test >> test_ttl.py::TestTTL::test_ttl[table_Uint64_0__ASYNC-pk_types25-all_types25-index25-Uint64--ASYNC] [GOOD] >> test_auditlog.py::test_dml_requests_logged_when_sid_is_unexpected |95.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_empty_access_key_id[tables_format_v0] >> test_ttl.py::TestTTL::test_ttl[table_Date_0__SYNC-pk_types30-all_types30-index30-Date--SYNC] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_order/unittest >> DataShardTxOrder::RandomPoints_DelayData [GOOD] Test command err: 2025-06-25T15:11:10.841812Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:11:10.841854Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:11:10.846951Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:112:2142], Recipient [1:135:2156]: NKikimr::TEvTablet::TEvBoot 2025-06-25T15:11:10.859287Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:112:2142], Recipient [1:135:2156]: NKikimr::TEvTablet::TEvRestored 2025-06-25T15:11:10.859752Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 9437184 actor [1:135:2156] 2025-06-25T15:11:10.860004Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T15:11:10.869559Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:112:2142], Recipient [1:135:2156]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-25T15:11:10.912427Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T15:11:10.912620Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T15:11:10.914280Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 9437184 2025-06-25T15:11:10.914347Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 9437184 2025-06-25T15:11:10.914398Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 9437184 2025-06-25T15:11:10.914740Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T15:11:10.914827Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T15:11:10.915278Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 9437184 persisting started state actor id [1:204:2156] in generation 2 2025-06-25T15:11:10.976095Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T15:11:11.010470Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 9437184 2025-06-25T15:11:11.010708Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 9437184 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T15:11:11.010831Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 9437184, actorId: [1:219:2215] 2025-06-25T15:11:11.010894Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 9437184 2025-06-25T15:11:11.010928Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 9437184, state: WaitScheme 2025-06-25T15:11:11.010958Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:11:11.011170Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:135:2156], Recipient [1:135:2156]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T15:11:11.011225Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T15:11:11.011513Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 9437184 2025-06-25T15:11:11.011598Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 9437184 2025-06-25T15:11:11.011657Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-25T15:11:11.011692Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:11:11.011736Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 9437184 2025-06-25T15:11:11.011771Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437184 has no attached operations 2025-06-25T15:11:11.011817Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 9437184 2025-06-25T15:11:11.011847Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 9437184 TxInFly 0 2025-06-25T15:11:11.011882Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-25T15:11:11.011974Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:215:2212], Recipient [1:135:2156]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T15:11:11.012011Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T15:11:11.012069Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:213:2211], serverId# [1:215:2212], sessionId# [0:0:0] 2025-06-25T15:11:11.015036Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:103:2136], Recipient [1:135:2156]: NKikimrTxDataShard.TEvProposeTransaction TxKind: TX_KIND_SCHEME SourceDeprecated { RawX1: 103 RawX2: 4294969432 } TxBody: "\nK\n\006table1\020\r\032\t\n\003key\030\002 \"\032\014\n\005value\030\200$ 8\032\n\n\004uint\030\002 9(\":\n \000Z\006\010\010\030\001(\000J\014/Root/table1" TxId: 1 ExecLevel: 0 Flags: 0 SchemeShardId: 4200 ProcessingParams { } 2025-06-25T15:11:11.015105Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-25T15:11:11.015177Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-25T15:11:11.015376Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit CheckSchemeTx 2025-06-25T15:11:11.015443Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 9437184 txId 1 ssId 4200 seqNo 0:0 2025-06-25T15:11:11.015497Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 1 at tablet 9437184 2025-06-25T15:11:11.015548Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is ExecutedNoMoreRestarts 2025-06-25T15:11:11.015585Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit CheckSchemeTx 2025-06-25T15:11:11.015616Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit StoreSchemeTx 2025-06-25T15:11:11.015648Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit StoreSchemeTx 2025-06-25T15:11:11.016023Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayCompleteNoMoreRestarts 2025-06-25T15:11:11.016064Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit StoreSchemeTx 2025-06-25T15:11:11.016108Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit FinishPropose 2025-06-25T15:11:11.016144Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit FinishPropose 2025-06-25T15:11:11.016209Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayComplete 2025-06-25T15:11:11.016249Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit FinishPropose 2025-06-25T15:11:11.016286Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit WaitForPlan 2025-06-25T15:11:11.016329Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit WaitForPlan 2025-06-25T15:11:11.016352Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:1] at 9437184 is not ready to execute on unit WaitForPlan 2025-06-25T15:11:11.028850Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 9437184 2025-06-25T15:11:11.028937Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit StoreSchemeTx 2025-06-25T15:11:11.028974Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit FinishPropose 2025-06-25T15:11:11.029036Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 1 at tablet 9437184 send to client, exec latency: 0 ms, propose latency: 1 ms, status: PREPARED 2025-06-25T15:11:11.029100Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 9437184 not sending time cast registration request in state WaitScheme 2025-06-25T15:11:11.029631Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:225:2221], Recipient [1:135:2156]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T15:11:11.029679Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T15:11:11.029740Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:224:2220], serverId# [1:225:2221], sessionId# [0:0:0] 2025-06-25T15:11:11.029895Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287424, Sender [1:103:2136], Recipient [1:135:2156]: {TEvPlanStep step# 1000001 MediatorId# 0 TabletID 9437184} 2025-06-25T15:11:11.029931Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3150: StateWork, processing event TEvTxProcessing::TEvPlanStep 2025-06-25T15:11:11.030062Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1790: Trying to execute [1000001:1] at 9437184 on unit WaitForPlan 2025-06-25T15:11:11.030103Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1805: Execution status for [1000001:1] at 9437184 is Executed 2025-06-25T15:11:11.030136Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000001:1] at 9437184 executing on unit WaitForPlan 2025-06-25T15:11:11.030168Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000001:1] at 9437184 to execution unit PlanQueue 2025-06-25T15:11:11.033929Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 1 at step 1000001 at tablet 9437184 { Transactions { TxId: 1 AckTo { RawX1: 103 RawX2: 4294969432 } } Step: 1000001 MediatorID: 0 TabletID: 9437184 } 2025-06-25T15:11:11.033992Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:11:11.034210Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:135:2156], Recipient [1:135:2156]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T15:11:11.034266Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T15:11:11.034317Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-25T15:11:11.034353Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 1 2025-06-25T15:11:11.034382Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437184 2025-06-25T15:11:11.034417Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000001:1] in PlanQueue unit at 9437184 2025-06-25T15:11:11.034452Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [100 ... node 16 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [16:236:2227], Recipient [16:347:2313]: {TEvReadSet step# 1000005 txid# 535 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 29} 2025-06-25T15:15:05.369800Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:15:05.369828Z node 16 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 535 2025-06-25T15:15:05.369969Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [16:236:2227], Recipient [16:347:2313]: {TEvReadSet step# 1000005 txid# 536 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 30} 2025-06-25T15:15:05.370005Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:15:05.370036Z node 16 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 536 2025-06-25T15:15:05.370282Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [16:236:2227], Recipient [16:347:2313]: {TEvReadSet step# 1000005 txid# 512 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 7} 2025-06-25T15:15:05.370320Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:15:05.370350Z node 16 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 512 2025-06-25T15:15:05.370436Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [16:236:2227], Recipient [16:347:2313]: {TEvReadSet step# 1000005 txid# 537 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 31} 2025-06-25T15:15:05.370471Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:15:05.370505Z node 16 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 537 2025-06-25T15:15:05.370664Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [16:236:2227], Recipient [16:347:2313]: {TEvReadSet step# 1000005 txid# 514 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 8} 2025-06-25T15:15:05.370699Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:15:05.370729Z node 16 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 514 2025-06-25T15:15:05.370900Z node 16 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437185 2025-06-25T15:15:05.370948Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000005:538] at 9437185 on unit StoreAndSendOutRS 2025-06-25T15:15:05.370991Z node 16 :TX_DATASHARD DEBUG: datashard.cpp:3990: Send RS 32 at 9437185 from 9437185 to 9437184 txId 538 2025-06-25T15:15:05.371065Z node 16 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437185 2025-06-25T15:15:05.371095Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000005:538] at 9437185 on unit CompleteOperation 2025-06-25T15:15:05.371160Z node 16 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000005 : 538] from 9437185 at tablet 9437185 send result to client [16:103:2136], exec latency: 0 ms, propose latency: 3 ms 2025-06-25T15:15:05.371209Z node 16 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437185 2025-06-25T15:15:05.371411Z node 16 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437185 2025-06-25T15:15:05.371459Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000005:539] at 9437185 on unit CompleteOperation 2025-06-25T15:15:05.371510Z node 16 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000005 : 539] from 9437185 at tablet 9437185 send result to client [16:103:2136], exec latency: 0 ms, propose latency: 3 ms 2025-06-25T15:15:05.371549Z node 16 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437185 2025-06-25T15:15:05.372119Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287425, Sender [16:347:2313], Recipient [16:236:2227]: {TEvReadSet step# 1000005 txid# 538 TabletSource# 9437185 TabletDest# 9437184 SetTabletProducer# 9437185 ReadSet.Size()# 7 Seqno# 32 Flags# 0} 2025-06-25T15:15:05.372163Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3151: StateWork, processing event TEvTxProcessing::TEvReadSet 2025-06-25T15:15:05.372206Z node 16 :TX_DATASHARD DEBUG: datashard.cpp:3359: Receive RS at 9437184 source 9437185 dest 9437184 producer 9437185 txId 538 2025-06-25T15:15:05.372292Z node 16 :TX_DATASHARD DEBUG: datashard__readset.cpp:15: TTxReadSet::Execute at 9437184 got read set: {TEvReadSet step# 1000005 txid# 538 TabletSource# 9437185 TabletDest# 9437184 SetTabletProducer# 9437185 ReadSet.Size()# 7 Seqno# 32 Flags# 0} 2025-06-25T15:15:05.372449Z node 16 :TX_DATASHARD TRACE: operation.cpp:67: Filled readset for [1000005:538] from=9437185 to=9437184origin=9437185 2025-06-25T15:15:05.372540Z node 16 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 9437184 2025-06-25T15:15:05.372825Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [16:236:2227], Recipient [16:236:2227]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T15:15:05.372868Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T15:15:05.372922Z node 16 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-25T15:15:05.372970Z node 16 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 1 active planned 1 immediate 0 planned 1 2025-06-25T15:15:05.373010Z node 16 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [1000005:538] at 9437184 for LoadAndWaitInRS 2025-06-25T15:15:05.373045Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000005:538] at 9437184 on unit LoadAndWaitInRS 2025-06-25T15:15:05.373081Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000005:538] at 9437184 is Executed 2025-06-25T15:15:05.373116Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000005:538] at 9437184 executing on unit LoadAndWaitInRS 2025-06-25T15:15:05.373149Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000005:538] at 9437184 to execution unit ExecuteDataTx 2025-06-25T15:15:05.373183Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000005:538] at 9437184 on unit ExecuteDataTx 2025-06-25T15:15:05.374110Z node 16 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:306: Executed operation [1000005:538] at tablet 9437184 with status COMPLETE 2025-06-25T15:15:05.374175Z node 16 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:312: Datashard execution counters for [1000005:538] at 9437184: {NSelectRow: 2, NSelectRange: 0, NUpdateRow: 1, NEraseRow: 0, SelectRowRows: 2, SelectRowBytes: 16, SelectRangeRows: 0, SelectRangeBytes: 0, UpdateRowBytes: 8, EraseRowBytes: 0, SelectRangeDeletedRowSkips: 0, InvisibleRowSkips: 0} 2025-06-25T15:15:05.374234Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000005:538] at 9437184 is ExecutedNoMoreRestarts 2025-06-25T15:15:05.374267Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000005:538] at 9437184 executing on unit ExecuteDataTx 2025-06-25T15:15:05.374301Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000005:538] at 9437184 to execution unit CompleteOperation 2025-06-25T15:15:05.374335Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000005:538] at 9437184 on unit CompleteOperation 2025-06-25T15:15:05.374576Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000005:538] at 9437184 is DelayComplete 2025-06-25T15:15:05.374619Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000005:538] at 9437184 executing on unit CompleteOperation 2025-06-25T15:15:05.374654Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000005:538] at 9437184 to execution unit CompletedOperations 2025-06-25T15:15:05.374686Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1000005:538] at 9437184 on unit CompletedOperations 2025-06-25T15:15:05.374739Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1000005:538] at 9437184 is Executed 2025-06-25T15:15:05.374767Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000005:538] at 9437184 executing on unit CompletedOperations 2025-06-25T15:15:05.374798Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [1000005:538] at 9437184 has finished 2025-06-25T15:15:05.374831Z node 16 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:15:05.374860Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437184 2025-06-25T15:15:05.374892Z node 16 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437184 has no attached operations 2025-06-25T15:15:05.374923Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 9437184 2025-06-25T15:15:05.409875Z node 16 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-25T15:15:05.409954Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000005:538] at 9437184 on unit CompleteOperation 2025-06-25T15:15:05.410023Z node 16 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000005 : 538] from 9437184 at tablet 9437184 send result to client [16:103:2136], exec latency: 2 ms, propose latency: 3 ms 2025-06-25T15:15:05.410108Z node 16 :TX_DATASHARD DEBUG: datashard.cpp:563: Send delayed Ack RS Ack at 9437184 {TEvReadSet step# 1000005 txid# 538 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 32} 2025-06-25T15:15:05.410157Z node 16 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:15:05.410475Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287938, Sender [16:236:2227], Recipient [16:347:2313]: {TEvReadSet step# 1000005 txid# 538 TabletSource# 9437185 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 32} 2025-06-25T15:15:05.410522Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-25T15:15:05.410566Z node 16 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437185 source 9437185 dest 9437184 consumer 9437184 txId 538 expect 23 30 30 28 29 27 28 29 29 26 25 23 24 22 29 22 29 29 23 23 20 29 4 26 26 4 26 - - - - - actual 23 30 30 28 29 27 28 29 29 26 25 23 24 22 29 22 29 29 23 23 20 29 4 26 26 4 26 - - - - - interm 23 30 30 28 29 27 28 29 29 26 25 23 24 22 29 22 29 29 23 23 20 29 4 26 26 4 26 - - - - - >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_empty_access_key_id[tables_format_v0] [GOOD] |95.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |95.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/ttl/py3test >> test_ttl.py::TestTTL::test_ttl[table_Uint64_1__ASYNC-pk_types28-all_types28-index28-Uint64--ASYNC] [GOOD] |95.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/ttl/py3test >> test_ttl.py::TestTTL::test_ttl[table_Uint32_0__ASYNC-pk_types19-all_types19-index19-Uint32--ASYNC] [GOOD] |95.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/ttl/py3test >> test_ttl.py::TestTTL::test_ttl[table_Uint32_1__SYNC-pk_types21-all_types21-index21-Uint32--SYNC] [GOOD] |95.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_no_auth-_bad_dynconfig] [GOOD] |95.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |95.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_ttl.py::TestTTL::test_ttl[table_Uint64_1__SYNC-pk_types27-all_types27-index27-Uint64--SYNC] [GOOD] |95.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |95.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_ttl.py::TestTTL::test_ttl[table_Timestamp_1__SYNC-pk_types9-all_types9-index9-Timestamp--SYNC] [GOOD] |95.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |95.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/ttl/py3test >> test_ttl.py::TestTTL::test_ttl[table_Date_0__ASYNC-pk_types31-all_types31-index31-Date--ASYNC] [GOOD] |95.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_ttl.py::TestTTL::test_ttl[table_Uint64_1_UNIQUE_SYNC-pk_types29-all_types29-index29-Uint64-UNIQUE-SYNC] [GOOD] |95.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |95.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_ttl_Date-pk_types18-all_types18-index18-Date--] [GOOD] >> test_ttl.py::TestTTL::test_ttl[table_Uint64_0__SYNC-pk_types24-all_types24-index24-Uint64--SYNC] [GOOD] |95.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/parametrized_queries/py3test >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_index_2_UNIQUE_SYNC-pk_types2-all_types2-index2--UNIQUE-SYNC] [GOOD] |95.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_cloud_double_create_queue[std-tables_format_v1] [GOOD] |95.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_cloud_queues_with_iam_token[tables_format_v0-fifo] |95.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |95.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |95.6%| [TA] $(B)/ydb/core/tx/datashard/ut_order/test-results/unittest/{meta.json ... results_accumulator.log} >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_message_counters_in_cloud[tables_format_v0-std] [GOOD] |95.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_message_counters_in_cloud[tables_format_v1-fifo] |95.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |95.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |95.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |95.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |95.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |95.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |95.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |95.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |95.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_cloud_queues_with_iam_token[tables_format_v0-fifo] [GOOD] |95.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_4__SYNC-pk_types5-all_types5-index5---SYNC] [GOOD] >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_with_auth_root-_bad_dynconfig] >> test_auditlog.py::test_single_dml_query_logged[delete] |95.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/ttl/py3test >> test_ttl.py::TestTTL::test_ttl[table_Timestamp_0__ASYNC-pk_types7-all_types7-index7-Timestamp--ASYNC] [GOOD] |95.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_bad_auth-_good_dynconfig] [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/yft8/00116c/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk1/testing_out_stuff/test_auditlog.py.test_broken_dynconfig._client_session_pool_bad_auth-_good_dynconfig/audit.txt 2025-06-25T15:15:04.349028Z: {"sanitized_token":"**** (C877DF61)","subject":"__bad__@builtin","new_config":"\n---\nmetadata:\n kind: MainConfig\n cluster: \"\"\n version: 0\nconfig:\n yaml_config_enabled: true\nallowed_labels:\n node_id:\n type: string\n host:\n type: string\n tenant:\n type: string\nselector_config: []\n ","status":"SUCCESS","component":"console","operation":"REPLACE DYNCONFIG","remote_address":"127.0.0.1"} |95.7%| [TA] {RESULT} $(B)/ydb/core/tx/datashard/ut_order/test-results/unittest/{meta.json ... results_accumulator.log} >> test_common.py::TestCommonYandexWithTenant::test_private_queue_recreation[tables_format_v1-fifo] [GOOD] |95.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_common.py::TestCommonYandexWithTenant::test_private_queue_recreation[tables_format_v1-std] |95.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/ttl/py3test >> test_ttl.py::TestTTL::test_ttl[table_Timestamp_1__SYNC-pk_types9-all_types9-index9-Timestamp--SYNC] [GOOD] |95.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_UUID-pk_types31-all_types31-index31---] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_fifo_groups_with_dlq_in_cloud[tables_format_v0] [GOOD] |95.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_fifo_groups_with_dlq_in_cloud[tables_format_v1] >> test_s3.py::TestYdbS3TTL::test_s3[table_ttl_Timestamp-pk_types12-all_types12-index12-Timestamp--] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_list_clouds [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_list_queues_for_unknown_cloud[tables_format_v0] |95.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |95.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/ttl/py3test >> test_ttl.py::TestTTL::test_ttl[table_Uint64_1_UNIQUE_SYNC-pk_types29-all_types29-index29-Uint64-UNIQUE-SYNC] [GOOD] >> test_auditlog.py::test_cloud_ids_are_logged[attrs1] >> test_auditlog.py::test_single_dml_query_logged[insert] >> test_auditlog.py::test_single_dml_query_logged[update] [GOOD] |95.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |95.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |95.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |95.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |95.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |95.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |95.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |95.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_list_queues_for_unknown_cloud[tables_format_v0] [GOOD] |95.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |95.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_common.py::TestCommonYandexWithPath::test_private_create_queue[tables_format_v1-std] [GOOD] |95.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |95.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |95.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |95.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_dml_requests_logged_when_sid_is_unexpected [GOOD] |95.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_cloud_double_create_queue[fifo-tables_format_v1] [GOOD] |95.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/ttl/py3test >> test_ttl.py::TestTTL::test_ttl[table_Date_0__SYNC-pk_types30-all_types30-index30-Date--SYNC] [GOOD] |95.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_queue_counters.py::TestYmqQueueCounters::test_ymq_send_read_delete [GOOD] |95.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_cloud_double_create_queue[std-tables_format_v0] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_fifo_groups_with_dlq_in_cloud[tables_format_v1] [GOOD] |95.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_ttl_DyNumber-pk_types13-all_types13-index13-DyNumber--] [GOOD] |95.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_bad_auth-_bad_dynconfig] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_message_counters_in_cloud[tables_format_v1-fifo] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_no_auth-_bad_dynconfig] [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/yft8/001160/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk2/testing_out_stuff/test_auditlog.py.test_broken_dynconfig._client_session_pool_no_auth-_bad_dynconfig/audit.txt 2025-06-25T15:15:12.296548Z: {"reason":"ydb/library/fyamlcpp/fyamlcpp.cpp:1068: \n6:12 plain scalar cannot start with '%'","sanitized_token":"{none}","remote_address":"127.0.0.1","status":"ERROR","subject":"{none}","operation":"REPLACE DYNCONFIG","new_config":"\n---\n123metadata:\n kind: MainConfig\n cluster: \"\"\n version: %s\nconfig:\n yaml_config_enabled: true\nallowed_labels:\n node_id:\n type: string\n host:\n type: string\n tenant:\n type: string\nselector_config: []\n ","component":"console"} |95.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |95.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |95.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |95.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |95.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_empty_access_key_id[tables_format_v0] [GOOD] >> test_s3.py::TestYdbS3TTL::test_s3[table_ttl_Datetime-pk_types11-all_types11-index11-Datetime--] [GOOD] |95.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |95.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_create_and_remove_tenant [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_cloud_double_create_queue[std-tables_format_v0] [GOOD] |95.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_message_counters_in_cloud[tables_format_v1-std] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_setup_in_cloud[tables_format_v0-fifo] >> test_quoting.py::TestSqsQuotingWithKesus::test_properly_creates_and_deletes_queue[tables_format_v1-fifo] |95.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/ttl/py3test >> test_ttl.py::TestTTL::test_ttl[table_Uint64_1__SYNC-pk_types27-all_types27-index27-Uint64--SYNC] [GOOD] |95.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |95.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |95.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |95.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |95.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |95.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |95.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |95.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |95.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |95.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |95.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_single_dml_query_logged[replace] |95.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/ttl/py3test >> test_ttl.py::TestTTL::test_ttl[table_Uint64_0__SYNC-pk_types24-all_types24-index24-Uint64--SYNC] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_2__SYNC-pk_types7-all_types7-index7---SYNC] [GOOD] |95.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |95.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |95.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test |95.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |95.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |95.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |96.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |96.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_with_auth_root-_bad_dynconfig] [GOOD] >> test_auditlog.py::test_single_dml_query_logged[delete] [GOOD] |96.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/s3/py3test >> test_s3.py::TestYdbS3TTL::test_s3[table_ttl_Timestamp-pk_types12-all_types12-index12-Timestamp--] [GOOD] |96.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test |96.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |96.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |96.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |96.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |96.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |96.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_setup_in_cloud[tables_format_v0-fifo] [GOOD] >> test_auditlog.py::test_dml_requests_arent_logged_when_sid_is_expected |96.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_does_actions_with_queue[tables_format_v1-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delay_one_message[tables_format_v0-fifo] >> test_common.py::TestCommonYandexWithTenant::test_private_queue_recreation[tables_format_v1-std] [GOOD] |96.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |96.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_cloud_queues_with_iam_token[tables_format_v0-fifo] [GOOD] |96.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_single_dml_query_logged[insert] [GOOD] |96.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_single_dml_query_logged[update] [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/yft8/00113d/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk21/testing_out_stuff/test_auditlog.py.test_single_dml_query_logged.update/audit.txt 2025-06-25T15:15:22.578253Z: {"tx_id":"{none}","database":"/Root/test_auditlog.py","end_time":"2025-06-25T15:15:22.578211Z","sanitized_token":"**** (B6C6F477)","begin_tx":"1","remote_address":"127.0.0.1","commit_tx":"1","status":"SUCCESS","query_text":"update `/Root/test_auditlog.py/test-table` set value = 0 where id = 1","start_time":"2025-06-25T15:15:22.417497Z","subject":"root@builtin","detailed_status":"SUCCESS","operation":"ExecuteDataQueryRequest","component":"grpc-proxy"} >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_delete_message_works[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_can_read_new_written_data_on_visibility_timeout[tables_format_v1] |96.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_cloud_ids_are_logged[attrs1] [GOOD] |96.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |96.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |96.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_no_auth-_good_dynconfig] |96.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_index_0__ASYNC-pk_types6-all_types6-index6---ASYNC] |96.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_ttl_DyNumber-pk_types13-all_types13-index13-DyNumber--] [GOOD] |96.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_mechanics_in_cloud[tables_format_v0-tables_format_v1-std] [GOOD] |96.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_1_UNIQUE_SYNC-pk_types3-all_types3-index3--UNIQUE-SYNC] |96.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_2__SYNC-pk_types7-all_types7-index7---SYNC] [GOOD] |96.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |96.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_copy_table.py::TestCopyTable::test_copy_table[table_ttl_Date-pk_types18-all_types18-index18-Date--] >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_3__SYNC-pk_types6-all_types6-index6---SYNC] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_mechanics_in_cloud[tables_format_v1-tables_format_v0-fifo] >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_with_auth_other-_bad_dynconfig] ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_dml_requests_logged_when_sid_is_unexpected [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/yft8/00113a/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk14/testing_out_stuff/test_auditlog.py.test_dml_requests_logged_when_sid_is_unexpected/audit.txt 2025-06-25T15:15:24.393279Z: {"tx_id":"{none}","database":"/Root/test_auditlog.py","end_time":"2025-06-25T15:15:24.393234Z","sanitized_token":"othe****ltin (27F910A9)","begin_tx":"1","remote_address":"127.0.0.1","commit_tx":"1","status":"SUCCESS","query_text":"insert into `/Root/test_auditlog.py/test-table` (id, value) values (100, 100), (101, 101)","start_time":"2025-06-25T15:15:24.300067Z","subject":"other-user@builtin","detailed_status":"SUCCESS","operation":"ExecuteDataQueryRequest","component":"grpc-proxy"} 2025-06-25T15:15:24.669860Z: {"tx_id":"{none}","database":"/Root/test_auditlog.py","end_time":"2025-06-25T15:15:24.669834Z","sanitized_token":"othe****ltin (27F910A9)","begin_tx":"1","remote_address":"127.0.0.1","commit_tx":"1","status":"SUCCESS","query_text":"delete from `/Root/test_auditlog.py/test-table` where id = 100 or id = 101","start_time":"2025-06-25T15:15:24.506003Z","subject":"other-user@builtin","detailed_status":"SUCCESS","operation":"ExecuteDataQueryRequest","component":"grpc-proxy"} 2025-06-25T15:15:24.969879Z: {"tx_id":"{none}","database":"/Root/test_auditlog.py","end_time":"2025-06-25T15:15:24.969850Z","sanitized_token":"othe****ltin (27F910A9)","begin_tx":"1","remote_address":"127.0.0.1","commit_tx":"1","status":"SUCCESS","query_text":"select id from `/Root/test_auditlog.py/test-table`","start_time":"2025-06-25T15:15:24.784833Z","subject":"other-user@builtin","detailed_status":"SUCCESS","operation":"ExecuteDataQueryRequest","component":"grpc-proxy"} 2025-06-25T15:15:25.251602Z: {"tx_id":"{none}","database":"/Root/test_auditlog.py","end_time":"2025-06-25T15:15:25.251579Z","sanitized_token":"othe****ltin (27F910A9)","begin_tx":"1","remote_address":"127.0.0.1","commit_tx":"1","status":"SUCCESS","query_text":"update `/Root/test_auditlog.py/test-table` set value = 0 where id = 1","start_time":"2025-06-25T15:15:25.097743Z","subject":"other-user@builtin","detailed_status":"SUCCESS","operation":"ExecuteDataQueryRequest","component":"grpc-proxy"} 2025-06-25T15:15:25.438037Z: {"tx_id":"{none}","database":"/Root/test_auditlog.py","end_time":"2025-06-25T15:15:25.438001Z","sanitized_token":"othe****ltin (27F910A9)","begin_tx":"1","remote_address":"127.0.0.1","commit_tx":"1","status":"SUCCESS","query_text":"replace into `/Root/test_auditlog.py/test-table` (id, value) values (2, 3), (3, 3)","start_time":"2025-06-25T15:15:25.360172Z","subject":"other-user@builtin","detailed_status":"SUCCESS","operation":"ExecuteDataQueryRequest","component":"grpc-proxy"} 2025-06-25T15:15:25.633729Z: {"tx_id":"{none}","database":"/Root/test_auditlog.py","end_time":"2025-06-25T15:15:25.633693Z","sanitized_token":"othe****ltin (27F910A9)","begin_tx":"1","remote_address":"127.0.0.1","commit_tx":"1","status":"SUCCESS","query_text":"upsert into `/Root/test_auditlog.py/test-table` (id, value) values (4, 4), (5, 5)","start_time":"2025-06-25T15:15:25.546907Z","subject":"other-user@builtin","detailed_status":"SUCCESS","operation":"ExecuteDataQueryRequest","component":"grpc-proxy"} |96.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |96.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_create_and_remove_tenant [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/yft8/00114e/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk10/testing_out_stuff/test_auditlog.py.test_create_and_remove_tenant/audit.txt 2025-06-25T15:15:13.995278Z: {"sanitized_token":"{none}","subject":"{none}","status":"SUCCESS","component":"console","operation":"BEGIN INIT DATABASE CONFIG","remote_address":"::1","database":"/Root/users/database"} 2025-06-25T15:15:14.017243Z: {"paths":"[/Root/users/database]","tx_id":"281474976710660","database":"/Root","sanitized_token":"{none}","remote_address":"{none}","status":"SUCCESS","subject":"{none}","detailed_status":"StatusAccepted","operation":"CREATE DATABASE","component":"schemeshard"} 2025-06-25T15:15:14.056373Z: {"paths":"[/Root/users/database]","tx_id":"281474976710661","database":"/Root","sanitized_token":"{none}","remote_address":"{none}","status":"SUCCESS","subject":"{none}","detailed_status":"StatusAccepted","operation":"ALTER DATABASE","component":"schemeshard"} 2025-06-25T15:15:18.361898Z: {"sanitized_token":"{none}","subject":"{none}","status":"SUCCESS","component":"console","operation":"END INIT DATABASE CONFIG","remote_address":"::1","database":"/Root/users/database"} 2025-06-25T15:15:19.598110Z: {"paths":"[.metadata/workload_manager/pools/default]","tx_id":"281474976720657","new_owner":"metadata@system","acl_add":"[+(SR|DS):all-users@well-known, +(SR|DS):root@builtin]","database":"/Root/users/database","sanitized_token":"{none}","remote_address":"{none}","status":"SUCCESS","subject":"metadata@system","detailed_status":"StatusAccepted","operation":"CREATE RESOURCE POOL","component":"schemeshard"} 2025-06-25T15:15:19.737473Z: {"reason":"Check failed: path: '/Root/users/database/.metadata/workload_manager/pools/default', error: path exist, request accepts it (id: [OwnerId: 72075186224037897, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges)","paths":"[default]","tx_id":"281474976720658","new_owner":"metadata@system","acl_add":"[+(SR|DS):all-users@well-known, +(SR|DS):root@builtin]","database":"/Root/users/database","sanitized_token":"{none}","remote_address":"{none}","status":"SUCCESS","subject":"metadata@system","detailed_status":"StatusAlreadyExists","operation":"CREATE RESOURCE POOL","component":"schemeshard"} 2025-06-25T15:15:21.514329Z: {"sanitized_token":"{none}","subject":"{none}","status":"SUCCESS","component":"console","operation":"BEGIN REMOVE DATABASE","remote_address":"::1","database":"/Root/users/database"} 2025-06-25T15:15:21.523175Z: {"paths":"[/Root/users/database]","tx_id":"281474976710662","database":"/Root","sanitized_token":"{none}","remote_address":"{none}","status":"SUCCESS","subject":"{none}","detailed_status":"StatusAccepted","operation":"DROP DATABASE","component":"schemeshard"} 2025-06-25T15:15:21.551383Z: {"sanitized_token":"{none}","subject":"{none}","status":"SUCCESS","component":"console","operation":"END REMOVE DATABASE","remote_address":"::1","database":"/Root/users/database"} |96.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_list_queues_for_unknown_cloud[tables_format_v0] [GOOD] >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_bad_auth-_bad_dynconfig] [GOOD] |96.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/s3/py3test >> test_s3.py::TestYdbS3TTL::test_s3[table_ttl_Datetime-pk_types11-all_types11-index11-Datetime--] [GOOD] >> test_copy_table.py::TestCopyTable::test_copy_table[table_ttl_Timestamp-pk_types17-all_types17-index17-Timestamp--] |96.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |96.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |96.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |96.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |96.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_quoting.py::TestSqsQuotingWithKesus::test_properly_creates_and_deletes_queue[tables_format_v1-fifo] [GOOD] |96.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_s3.py::TestYdbS3TTL::test_s3[table_index_0__ASYNC-pk_types6-all_types6-index6---ASYNC] [GOOD] >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_2__SYNC-pk_types7-all_types7-index7---SYNC] >> test_copy_table.py::TestCopyTable::test_copy_table[table_ttl_Datetime-pk_types16-all_types16-index16-Datetime--] |96.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |96.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |96.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |96.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_copy_table.py::TestCopyTable::test_copy_table[table_ttl_Uint32-pk_types14-all_types14-index14-Uint32--] >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_4__SYNC-pk_types5-all_types5-index5---SYNC] |96.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_message_counters_in_cloud[tables_format_v1-fifo] [GOOD] |96.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_0__ASYNC-pk_types11-all_types11-index11---ASYNC] >> test_auditlog.py::test_single_dml_query_logged[replace] [GOOD] |96.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |96.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_1__SYNC-pk_types8-all_types8-index8---SYNC] |96.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |96.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |96.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_single_dml_query_logged[insert] [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/yft8/001062/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk18/testing_out_stuff/test_auditlog.py.test_single_dml_query_logged.insert/audit.txt 2025-06-25T15:15:37.223709Z: {"tx_id":"{none}","database":"/Root/test_auditlog.py","end_time":"2025-06-25T15:15:37.223647Z","sanitized_token":"**** (B6C6F477)","begin_tx":"1","remote_address":"127.0.0.1","commit_tx":"1","status":"SUCCESS","query_text":"insert into `/Root/test_auditlog.py/test-table` (id, value) values (100, 100), (101, 101)","start_time":"2025-06-25T15:15:37.163962Z","subject":"root@builtin","detailed_status":"SUCCESS","operation":"ExecuteDataQueryRequest","component":"grpc-proxy"} >> test_copy_table.py::TestCopyTable::test_copy_table[table_all_types-pk_types12-all_types12-index12---] |96.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_copy_table.py::TestCopyTable::test_copy_table[table_ttl_Uint64-pk_types15-all_types15-index15-Uint64--] >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_does_actions_with_queue[tables_format_v1-fifo] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_with_auth_root-_bad_dynconfig] [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/yft8/001098/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk6/testing_out_stuff/test_auditlog.py.test_broken_dynconfig._client_session_pool_with_auth_root-_bad_dynconfig/audit.txt 2025-06-25T15:15:34.379819Z: {"reason":"ydb/library/fyamlcpp/fyamlcpp.cpp:1068: \n6:12 plain scalar cannot start with '%'","sanitized_token":"**** (B6C6F477)","remote_address":"127.0.0.1","status":"ERROR","subject":"root@builtin","operation":"REPLACE DYNCONFIG","new_config":"\n---\n123metadata:\n kind: MainConfig\n cluster: \"\"\n version: %s\nconfig:\n yaml_config_enabled: true\nallowed_labels:\n node_id:\n type: string\n host:\n type: string\n tenant:\n type: string\nselector_config: []\n ","component":"console"} >> test_copy_table.py::TestCopyTable::test_copy_table[table_ttl_DyNumber-pk_types13-all_types13-index13-DyNumber--] |96.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_fifo_groups_with_dlq_in_cloud[tables_format_v1] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_single_dml_query_logged[delete] [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/yft8/00107c/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk17/testing_out_stuff/test_auditlog.py.test_single_dml_query_logged.delete/audit.txt 2025-06-25T15:15:35.205964Z: {"tx_id":"{none}","database":"/Root/test_auditlog.py","end_time":"2025-06-25T15:15:35.205921Z","sanitized_token":"**** (B6C6F477)","begin_tx":"1","remote_address":"127.0.0.1","commit_tx":"1","status":"SUCCESS","query_text":"delete from `/Root/test_auditlog.py/test-table` where id = 100 or id = 101","start_time":"2025-06-25T15:15:35.026593Z","subject":"root@builtin","detailed_status":"SUCCESS","operation":"ExecuteDataQueryRequest","component":"grpc-proxy"} |96.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_cloud_double_create_queue[std-tables_format_v0] [GOOD] |96.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/tools/nemesis/ut/py3test |96.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/tools/nemesis/ut/py3test |96.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/partitioning/py3test |96.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/tools/nemesis/ut/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delay_one_message[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delay_one_message[tables_format_v0-std] >> test_auditlog.py::test_dml_requests_arent_logged_when_sid_is_expected [GOOD] |96.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/tools/nemesis/ut/py3test |96.2%| [TA] $(B)/ydb/tests/datashard/partitioning/test-results/py3test/{meta.json ... results_accumulator.log} |96.2%| [TA] {RESULT} $(B)/ydb/tests/datashard/partitioning/test-results/py3test/{meta.json ... results_accumulator.log} |96.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/tools/nemesis/ut/py3test |96.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_setup_in_cloud[tables_format_v0-fifo] [GOOD] |96.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/tools/nemesis/ut/py3test >> test_scheme_load.py::TestSchemeLoad::test[create_and_drop_tables] ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_cloud_ids_are_logged[attrs1] [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/yft8/001069/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk9/testing_out_stuff/test_auditlog.py.test_cloud_ids_are_logged.attrs1/audit.txt 2025-06-25T15:15:38.527229Z: {"tx_id":"{none}","database":"/Root/test_auditlog.py","end_time":"2025-06-25T15:15:38.527187Z","sanitized_token":"**** (B6C6F477)","begin_tx":"1","remote_address":"127.0.0.1","commit_tx":"1","status":"SUCCESS","query_text":"update `/Root/test_auditlog.py/test-table` set value = 0 where id = 1","start_time":"2025-06-25T15:15:38.381753Z","subject":"root@builtin","detailed_status":"SUCCESS","operation":"ExecuteDataQueryRequest","folder_id":"folder-id-B","component":"grpc-proxy"} |96.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/tools/nemesis/ut/py3test |96.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/tools/nemesis/ut/py3test >> test_read_update_write_load.py::TestReadUpdateWriteLoad::test[read_update_write_load] >> test_disk.py::TestSafeDiskBreak::test_erase_method |96.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_common.py::TestCommonYandexWithTenant::test_private_queue_recreation[tables_format_v1-std] [GOOD] |96.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/tools/nemesis/ut/py3test >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_no_auth-_good_dynconfig] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_bad_auth-_bad_dynconfig] [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/yft8/001040/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk0/testing_out_stuff/test_auditlog.py.test_broken_dynconfig._client_session_pool_bad_auth-_bad_dynconfig/audit.txt 2025-06-25T15:15:42.537750Z: {"reason":"ydb/library/fyamlcpp/fyamlcpp.cpp:1068: \n6:12 plain scalar cannot start with '%'","sanitized_token":"**** (C877DF61)","remote_address":"127.0.0.1","status":"ERROR","subject":"__bad__@builtin","operation":"REPLACE DYNCONFIG","new_config":"\n---\n123metadata:\n kind: MainConfig\n cluster: \"\"\n version: %s\nconfig:\n yaml_config_enabled: true\nallowed_labels:\n node_id:\n type: string\n host:\n type: string\n tenant:\n type: string\nselector_config: []\n ","component":"console"} |96.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/tools/nemesis/ut/py3test |96.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/olap/scenario/py3test |96.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/tools/nemesis/ut/py3test |96.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/tools/nemesis/ut/py3test >> test_ttl.py::TestTTL::test_ttl[table_Datetime_0_UNIQUE_SYNC-pk_types2-all_types2-index2-Datetime-UNIQUE-SYNC] |96.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/s3/py3test >> test_s3.py::TestYdbS3TTL::test_s3[table_index_0__ASYNC-pk_types6-all_types6-index6---ASYNC] [GOOD] >> test_insert.py::TestInsert::test[read_data_during_bulk_upsert] >> test_simple.py::TestSimple::test[alter_table] |96.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/tools/nemesis/ut/py3test >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_with_auth_other-_bad_dynconfig] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delay_one_message[tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delay_one_message[tables_format_v1-fifo] |96.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/tools/nemesis/ut/py3test >> test_s3.py::TestYdbS3TTL::test_s3[table_index_0__SYNC-pk_types4-all_types4-index4---SYNC] [GOOD] |96.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/tools/nemesis/ut/py3test |96.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/tools/nemesis/ut/py3test |96.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/olap/scenario/py3test |96.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/olap/scenario/py3test >> test_alter_tiering.py::TestAlterTiering::test[many_tables] ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_single_dml_query_logged[replace] [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/yft8/00102a/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk19/testing_out_stuff/test_auditlog.py.test_single_dml_query_logged.replace/audit.txt 2025-06-25T15:15:47.270398Z: {"tx_id":"{none}","database":"/Root/test_auditlog.py","end_time":"2025-06-25T15:15:47.270353Z","sanitized_token":"**** (B6C6F477)","begin_tx":"1","remote_address":"127.0.0.1","commit_tx":"1","status":"SUCCESS","query_text":"replace into `/Root/test_auditlog.py/test-table` (id, value) values (2, 3), (3, 3)","start_time":"2025-06-25T15:15:47.192700Z","subject":"root@builtin","detailed_status":"SUCCESS","operation":"ExecuteDataQueryRequest","component":"grpc-proxy"} |96.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/olap/scenario/py3test >> test_ttl.py::TestTTL::test_ttl[table_Uint32_0__SYNC-pk_types18-all_types18-index18-Uint32--SYNC] >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_index_1__ASYNC-pk_types10-all_types10-index10---ASYNC] |96.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/script_execution/py3test |96.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/script_execution/py3test |96.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/tools/nemesis/ut/py3test |96.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/tools/nemesis/ut/py3test >> tier_delete.py::TestTierDelete::test_delete_s3_ttl >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delay_one_message[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delay_one_message[tables_format_v1-std] |96.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> test_quoting.py::TestSqsQuotingWithKesus::test_properly_creates_and_deletes_queue[tables_format_v1-fifo] [GOOD] >> test_select.py::TestDML::test_select[table_index_0_UNIQUE_SYNC-pk_types4-all_types4-index4--UNIQUE-SYNC] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_delete_message_works[tables_format_v0] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_delete_message_works[tables_format_v1] >> test_common.py::TestCommonSqsYandexCloudMode::test_private_queue_recreation[tables_format_v0-fifo] |96.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/olap/ttl_tiering/py3test |96.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/olap/ttl_tiering/py3test >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_index_1_UNIQUE_SYNC-pk_types3-all_types3-index3--UNIQUE-SYNC] |96.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/with_quotas/py3test >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_does_actions_with_queue[tables_format_v1-fifo] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_dml_requests_arent_logged_when_sid_is_expected [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/yft8/001017/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk13/testing_out_stuff/test_auditlog.py.test_dml_requests_arent_logged_when_sid_is_expected/audit.txt >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_yc_events_processor[tables_format_v1] |96.4%| [TA] $(B)/ydb/tests/functional/sqs/with_quotas/test-results/py3test/{meta.json ... results_accumulator.log} |96.4%| [TA] {RESULT} $(B)/ydb/tests/functional/sqs/with_quotas/test-results/py3test/{meta.json ... results_accumulator.log} >> test_copy_ops.py::TestSchemeShardCopyOps::test_when_copy_table_partition_config >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_ttl_Timestamp-pk_types12-all_types12-index12-Timestamp--] |96.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/select/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_cloud_queues_with_iam_token[tables_format_v0-std] >> test_ttl.py::TestTTLAlterSettings::test_case >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delay_one_message[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delete_message_batch_deduplicates_receipt_handle[tables_format_v0-fifo] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_cloud_queues_with_iam_token[tables_format_v1-std] ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_no_auth-_good_dynconfig] [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/yft8/001009/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk3/testing_out_stuff/test_auditlog.py.test_broken_dynconfig._client_session_pool_no_auth-_good_dynconfig/audit.txt 2025-06-25T15:15:57.729511Z: {"sanitized_token":"{none}","subject":"{none}","new_config":"\n---\nmetadata:\n kind: MainConfig\n cluster: \"\"\n version: 0\nconfig:\n yaml_config_enabled: true\nallowed_labels:\n node_id:\n type: string\n host:\n type: string\n tenant:\n type: string\nselector_config: []\n ","status":"SUCCESS","component":"console","operation":"REPLACE DYNCONFIG","remote_address":"127.0.0.1"} >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_mechanics_in_cloud[tables_format_v1-tables_format_v0-std] |96.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/script_execution/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_mechanics_in_cloud[tables_format_v1-tables_format_v0-fifo] [GOOD] |96.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/select/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_mechanics_in_cloud[tables_format_v0-tables_format_v0-std] ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_with_auth_other-_bad_dynconfig] [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/yft8/000fff/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk4/testing_out_stuff/test_auditlog.py.test_broken_dynconfig._client_session_pool_with_auth_other-_bad_dynconfig/audit.txt 2025-06-25T15:15:59.621544Z: {"reason":"ydb/library/fyamlcpp/fyamlcpp.cpp:1068: \n6:12 plain scalar cannot start with '%'","sanitized_token":"othe****ltin (27F910A9)","remote_address":"127.0.0.1","status":"ERROR","subject":"other-user@builtin","operation":"REPLACE DYNCONFIG","new_config":"\n---\n123metadata:\n kind: MainConfig\n cluster: \"\"\n version: %s\nconfig:\n yaml_config_enabled: true\nallowed_labels:\n node_id:\n type: string\n host:\n type: string\n tenant:\n type: string\nselector_config: []\n ","component":"console"} >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_mechanics_in_cloud[tables_format_v1-tables_format_v1-std] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_delete_message_works[tables_format_v1] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_fifo_read_delete_single_message >> test_s3.py::TestYdbS3TTL::test_s3[table_index_4__SYNC-pk_types0-all_types0-index0---SYNC] [GOOD] |96.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delete_message_batch_deduplicates_receipt_handle[tables_format_v0-fifo] [GOOD] >> test_alter_ops.py::TestSchemeShardAlterTest::test_alter_table_can_change_compaction_policy_options >> test_select.py::TestDML::test_select[table_index_1_UNIQUE_SYNC-pk_types3-all_types3-index3--UNIQUE-SYNC] >> test_simple.py::TestSimple::test[alter_table] [GOOD] >> test_simple.py::TestSimple::test[alter_tablestore] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_can_read_new_written_data_on_visibility_timeout[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_message_visibility_with_very_big_timeout[tables_format_v0] |96.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/s3/py3test >> test_s3.py::TestYdbS3TTL::test_s3[table_index_0__SYNC-pk_types4-all_types4-index4---SYNC] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_fifo_read_delete_single_message [GOOD] >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_index_1__ASYNC-pk_types10-all_types10-index10---ASYNC] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_only_single_read_infly_from_fifo |96.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/scheme_shard/py3test >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_ttl_DyNumber-pk_types13-all_types13-index13-DyNumber--] >> test_ttl.py::TestTTLAlterSettings::test_case [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_message_visibility_with_very_big_timeout[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_message_visibility_with_very_big_timeout[tables_format_v1] >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_index_1_UNIQUE_SYNC-pk_types3-all_types3-index3--UNIQUE-SYNC] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_message_visibility_with_very_big_timeout[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_visibility_batch_works[tables_format_v0-fifo] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_only_single_read_infly_from_fifo [GOOD] >> test_select.py::TestDML::test_select[table_ttl_Timestamp-pk_types17-all_types17-index17-Timestamp--] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_queue_attributes[tables_format_v0] >> test_common.py::TestCommonSqsYandexCloudMode::test_private_queue_recreation[tables_format_v1-fifo] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_queue_attributes[tables_format_v0] [GOOD] >> test_copy_ops.py::TestSchemeShardCopyOps::test_when_copy_table_partition_config [GOOD] >> test_simple.py::TestSimple::test[alter_tablestore] [GOOD] >> test_simple.py::TestSimple::test[table] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_2_UNIQUE_SYNC-pk_types2-all_types2-index2--UNIQUE-SYNC] [GOOD] >> test_disk.py::TestSafeDiskBreak::test_erase_method [GOOD] |96.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_mechanics_in_cloud[tables_format_v1-tables_format_v0-fifo] [GOOD] >> test_alter_ops.py::TestSchemeShardAlterTest::test_alter_table_can_change_compaction_policy_options [GOOD] >> test_simple.py::TestSimple::test[table] [GOOD] >> test_simple.py::TestSimple::test[tablestores] |96.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/ttl/py3test >> test_ttl.py::TestTTLAlterSettings::test_case [GOOD] |96.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/s3/py3test >> test_s3.py::TestYdbS3TTL::test_s3[table_index_4__SYNC-pk_types0-all_types0-index0---SYNC] [GOOD] |96.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/parametrized_queries/py3test >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_index_1_UNIQUE_SYNC-pk_types3-all_types3-index3--UNIQUE-SYNC] [GOOD] >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_ttl_DyNumber-pk_types13-all_types13-index13-DyNumber--] [GOOD] |96.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/parametrized_queries/py3test >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_index_1__ASYNC-pk_types10-all_types10-index10---ASYNC] [GOOD] >> test_common.py::TestCommonSqsYandexCloudMode::test_private_create_queue[tables_format_v0-fifo] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_cloud_queues_with_iam_token[tables_format_v0-std] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_cloud_queues_with_iam_token[tables_format_v1-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_visibility_batch_works[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_visibility_batch_works[tables_format_v0-std] >> test_copy_table.py::TestCopyTable::test_copy_table[table_ttl_Date-pk_types18-all_types18-index18-Date--] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_cloud_queues_with_iam_token[tables_format_v1-std] [GOOD] |96.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delete_message_batch_deduplicates_receipt_handle[tables_format_v0-fifo] [GOOD] |96.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/select/py3test >> test_common.py::TestCommonYandexWithTenant::test_private_queue_recreation[tables_format_v0-fifo] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_count_queues[tables_format_v0] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_cloud_queues_with_iam_token[tables_format_v1-fifo] [GOOD] |96.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/scheme_shard/py3test >> test_simple.py::TestSimple::test[tablestores] [GOOD] >> test_simple.py::TestSimple::test_multi[alter_table] [GOOD] >> test_simple.py::TestSimple::test_multi[alter_tablestore] [GOOD] >> test_simple.py::TestSimple::test_multi[table] [GOOD] >> test_simple.py::TestSimple::test_multi[tablestores] [GOOD] >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_create_and_drop_table_many_times_in_range |96.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_2_UNIQUE_SYNC-pk_types2-all_types2-index2--UNIQUE-SYNC] [GOOD] >> test_copy_table.py::TestCopyTable::test_copy_table[table_ttl_Datetime-pk_types16-all_types16-index16-Datetime--] [GOOD] |96.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/tools/nemesis/ut/py3test >> test_disk.py::TestSafeDiskBreak::test_erase_method [GOOD] >> test_scheme_load.py::TestSchemeLoad::test[create_and_drop_tables] [GOOD] >> test_scheme_load.py::TestSchemeLoad::test_multi[create_and_drop_tables] [GOOD] >> test_copy_table.py::TestCopyTable::test_copy_table[table_ttl_Timestamp-pk_types17-all_types17-index17-Timestamp--] [GOOD] |96.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_queue_attributes[tables_format_v0] [GOOD] >> test_select.py::TestDML::test_select[table_all_types-pk_types12-all_types12-index12---] >> test_ttl.py::TestTTLOnIndexedTable::test_case >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_index_1__ASYNC-pk_types5-all_types5-index5---ASYNC] >> test_common.py::TestCommonSqsYandexCloudMode::test_private_create_queue[tables_format_v1-fifo] >> test_copy_table.py::TestCopyTable::test_copy_table[table_ttl_Uint32-pk_types14-all_types14-index14-Uint32--] [GOOD] >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_index_1__SYNC-pk_types3-all_types3-index3---SYNC] >> test_copy_ops.py::TestSchemeShardCopyOps::test_given_table_when_create_copy_of_it_then_ok >> test_copy_table.py::TestCopyTable::test_copy_table[table_ttl_Uint64-pk_types15-all_types15-index15-Uint64--] [GOOD] |96.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/parametrized_queries/py3test >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_ttl_DyNumber-pk_types13-all_types13-index13-DyNumber--] [GOOD] >> test_common.py::TestCommonSqsYandexCloudMode::test_private_queue_recreation[tables_format_v0-fifo] [GOOD] >> test_common.py::TestCommonSqsYandexCloudMode::test_private_queue_recreation[tables_format_v0-std] |96.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/scheme_shard/py3test >> test_alter_ops.py::TestSchemeShardAlterTest::test_alter_table_can_change_compaction_policy_options [GOOD] >> test_copy_table.py::TestCopyTable::test_copy_table[table_ttl_DyNumber-pk_types13-all_types13-index13-DyNumber--] [GOOD] >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_create_and_drop_table_many_times_in_range [GOOD] >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_create_many_directories_success >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_visibility_batch_works[tables_format_v0-std] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_count_queues[tables_format_v0] [GOOD] >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_create_many_directories_success [GOOD] |96.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/scheme_shard/py3test >> test_copy_ops.py::TestSchemeShardCopyOps::test_when_copy_table_partition_config [GOOD] >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_all_types-pk_types7-all_types7-index7---] >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_ttl_Date-pk_types18-all_types18-index18-Date--] |96.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/copy_table/py3test >> test_copy_table.py::TestCopyTable::test_copy_table[table_ttl_Datetime-pk_types16-all_types16-index16-Datetime--] [GOOD] |96.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/copy_table/py3test >> test_copy_table.py::TestCopyTable::test_copy_table[table_ttl_Timestamp-pk_types17-all_types17-index17-Timestamp--] [GOOD] |96.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/copy_table/py3test >> test_copy_table.py::TestCopyTable::test_copy_table[table_ttl_Date-pk_types18-all_types18-index18-Date--] [GOOD] |96.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_cloud_queues_with_iam_token[tables_format_v1-fifo] [GOOD] |96.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/select/py3test |96.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/copy_table/py3test >> test_copy_table.py::TestCopyTable::test_copy_table[table_ttl_Uint32-pk_types14-all_types14-index14-Uint32--] [GOOD] >> test_s3.py::TestYdbS3TTL::test_s3[table_all_types-pk_types7-all_types7-index7---] [GOOD] >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_ttl_Uint32-pk_types9-all_types9-index9-Uint32--] >> test_common.py::TestCommonYandexWithPath::test_private_create_queue[tables_format_v0-fifo] |96.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/select/py3test >> test_select.py::TestDML::test_select[table_index_2__SYNC-pk_types7-all_types7-index7---SYNC] |96.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/olap/scenario/py3test >> test_simple.py::TestSimple::test_multi[tablestores] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_not_throttling_with_custom_queue_name[fifo-tables_format_v1] >> test_alter_ops.py::TestSchemeShardAlterTest::test_alter_table_by_single_key_column_failure >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_mechanics_in_cloud[tables_format_v1-tables_format_v0-std] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_mechanics_in_cloud[tables_format_v1-tables_format_v1-std] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_message_counters_in_cloud[tables_format_v0-fifo] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_mechanics_in_cloud[tables_format_v1-tables_format_v1-fifo] >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_ttl_Date-pk_types18-all_types18-index18-Date--] [GOOD] >> test_common.py::TestCommonSqsYandexCloudMode::test_private_queue_recreation[tables_format_v1-fifo] [GOOD] >> test_common.py::TestCommonSqsYandexCloudMode::test_private_queue_recreation[tables_format_v1-std] |96.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/copy_table/py3test >> test_copy_table.py::TestCopyTable::test_copy_table[table_ttl_Uint64-pk_types15-all_types15-index15-Uint64--] [GOOD] >> test_common.py::TestCommonSqsYandexCloudMode::test_private_create_queue[tables_format_v0-fifo] [GOOD] >> test_update_script_tables.py::TestUpdateScriptTablesYdb::test_recreate_tables[ALTER TABLE {} DROP COLUMN syntax, DROP COLUMN ast-`.metadata/script_executions`] |96.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/olap/scenario/py3test >> test_scheme_load.py::TestSchemeLoad::test_multi[create_and_drop_tables] [GOOD] >> test_common.py::TestCommonSqsYandexCloudMode::test_private_create_queue[tables_format_v0-std] |96.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/copy_table/py3test >> test_copy_table.py::TestCopyTable::test_copy_table[table_ttl_DyNumber-pk_types13-all_types13-index13-DyNumber--] [GOOD] >> test_select.py::TestDML::test_select[table_ttl_Uint64-pk_types15-all_types15-index15-Uint64--] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_count_queues[tables_format_v1] >> test_common.py::TestCommonSqsYandexCloudMode::test_private_queue_recreation[tables_format_v0-std] [GOOD] |96.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_visibility_batch_works[tables_format_v0-std] [GOOD] >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_index_4__SYNC-pk_types0-all_types0-index0---SYNC] |96.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_count_queues[tables_format_v0] [GOOD] >> test_common.py::TestCommonYandexWithPath::test_private_queue_recreation[tables_format_v1-fifo] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_mechanics_in_cloud[tables_format_v0-tables_format_v0-std] [GOOD] >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_ttl_Datetime-pk_types11-all_types11-index11-Datetime--] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_mechanics_in_cloud[tables_format_v0-tables_format_v1-fifo] |96.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/parametrized_queries/py3test >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_ttl_Date-pk_types18-all_types18-index18-Date--] [GOOD] |96.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/s3/py3test >> test_s3.py::TestYdbS3TTL::test_s3[table_all_types-pk_types7-all_types7-index7---] [GOOD] >> test_common.py::TestCommonSqsYandexCloudMode::test_private_create_queue[tables_format_v1-fifo] [GOOD] >> test_insert.py::TestInsert::test[read_data_during_bulk_upsert] [GOOD] >> test_insert.py::TestInsert::test_multi[read_data_during_bulk_upsert] |96.5%| [TA] $(B)/ydb/tests/functional/sqs/messaging/test-results/py3test/{meta.json ... results_accumulator.log} >> test_common.py::TestCommonSqsYandexCloudMode::test_private_create_queue[tables_format_v1-std] |96.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |96.5%| [TA] $(B)/ydb/tests/datashard/s3/test-results/py3test/{meta.json ... results_accumulator.log} |96.5%| [TA] {RESULT} $(B)/ydb/tests/datashard/s3/test-results/py3test/{meta.json ... results_accumulator.log} |96.5%| [TA] {RESULT} $(B)/ydb/tests/functional/sqs/messaging/test-results/py3test/{meta.json ... results_accumulator.log} >> test_copy_ops.py::TestSchemeShardCopyOps::test_given_table_when_create_copy_of_it_then_ok [GOOD] |96.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/async_replication/py3test |96.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest >> test_yandex_cloud_queue_counters.py::TestYmqQueueCounters::test_sqs_action_counters |96.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest >> test_common.py::TestCommonSqsYandexCloudMode::test_private_create_queue[tables_format_v0-std] [GOOD] >> test_alter_ops.py::TestSchemeShardAlterTest::test_alter_table_by_single_key_column_failure [GOOD] >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_1_UNIQUE_SYNC-pk_types3-all_types3-index3--UNIQUE-SYNC] [GOOD] >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_1__ASYNC-pk_types10-all_types10-index10---ASYNC] >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_1__SYNC-pk_types8-all_types8-index8---SYNC] [GOOD] >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_2_UNIQUE_SYNC-pk_types2-all_types2-index2--UNIQUE-SYNC] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_message_counters_in_cloud[tables_format_v0-fifo] [GOOD] >> test_common.py::TestCommonYandexWithTenant::test_private_queue_recreation[tables_format_v0-fifo] [GOOD] >> test_common.py::TestCommonYandexWithPath::test_private_create_queue[tables_format_v0-fifo] [GOOD] >> test_common.py::TestCommonYandexWithPath::test_private_create_queue[tables_format_v0-std] >> test_common.py::TestCommonYandexWithTenant::test_private_queue_recreation[tables_format_v0-std] |96.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/scheme_shard/py3test >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_create_many_directories_success [GOOD] >> test_common.py::TestCommonSqsYandexCloudMode::test_private_create_queue[tables_format_v1-std] [GOOD] >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_0__ASYNC-pk_types11-all_types11-index11---ASYNC] [GOOD] >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_0__SYNC-pk_types9-all_types9-index9---SYNC] >> test_common.py::TestCommonSqsYandexCloudMode::test_private_queue_recreation[tables_format_v1-std] [GOOD] |96.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/script_execution/py3test |96.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/select/py3test |96.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/ttl/py3test >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_ttl_Timestamp-pk_types12-all_types12-index12-Timestamp--] [GOOD] >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_index_0__ASYNC-pk_types6-all_types6-index6---ASYNC] [GOOD] >> test_select.py::TestDML::test_select[table_ttl_Uint32-pk_types14-all_types14-index14-Uint32--] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_yc_events_processor[tables_format_v1] [FAIL] |96.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/select/py3test >> test_select.py::TestDML::test_select[table_ttl_Uint64-pk_types15-all_types15-index15-Uint64--] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_ymq_expiring_counters >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_ttl_Date-pk_types13-all_types13-index13-Date--] >> test_common.py::TestCommonYandexWithPath::test_private_create_queue[tables_format_v0-std] [GOOD] >> test_common.py::TestCommonYandexWithPath::test_private_queue_recreation[tables_format_v0-fifo] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_not_throttling_with_custom_queue_name[fifo-tables_format_v1] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_not_throttling_with_custom_queue_name[std-tables_format_v0] |96.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/select/py3test >> overlapping_portions.py::TestOverlappingPortions::test >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_index_3__SYNC-pk_types1-all_types1-index1---SYNC] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_not_throttling_with_custom_queue_name[std-tables_format_v0] [GOOD] >> test_ttl.py::TestTTL::test_ttl[table_Datetime_0_UNIQUE_SYNC-pk_types2-all_types2-index2-Datetime-UNIQUE-SYNC] [GOOD] |96.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/select/py3test >> test_select.py::TestDML::test_select[table_ttl_Uint32-pk_types14-all_types14-index14-Uint32--] [GOOD] |96.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_common.py::TestCommonSqsYandexCloudMode::test_private_queue_recreation[tables_format_v0-std] [GOOD] >> test_ttl.py::TestTTL::test_ttl[table_Uint32_0__SYNC-pk_types18-all_types18-index18-Uint32--SYNC] [GOOD] >> test_select.py::TestDML::test_select[table_index_3__SYNC-pk_types6-all_types6-index6---SYNC] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_ymq_expiring_counters [GOOD] |96.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_common.py::TestCommonSqsYandexCloudMode::test_private_create_queue[tables_format_v0-std] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_mechanics_in_cloud[tables_format_v1-tables_format_v1-fifo] [GOOD] |96.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/dump_restore/py3test >> test_dump_restore.py::TestDumpRestore::test_dump_restore[table_index_0__ASYNC-pk_types6-all_types6-index6---ASYNC] [GOOD] |96.6%| [TA] $(B)/ydb/tests/datashard/dump_restore/test-results/py3test/{meta.json ... results_accumulator.log} |96.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_message_counters_in_cloud[tables_format_v0-fifo] [GOOD] |96.6%| [TA] {RESULT} $(B)/ydb/tests/datashard/dump_restore/test-results/py3test/{meta.json ... results_accumulator.log} >> test_yandex_audit.py::TestCloudEvents::test_create_update_delete_one_queue >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_queues_count_over_limit[tables_format_v1] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_count_queues[tables_format_v1] [GOOD] |96.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_common.py::TestCommonSqsYandexCloudMode::test_private_create_queue[tables_format_v1-std] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_mechanics_in_cloud[tables_format_v0-tables_format_v0-fifo] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_all_types-pk_types12-all_types12-index12---] [GOOD] >> test_yandex_cloud_queue_counters.py::TestYmqQueueCounters::test_sqs_action_counters [GOOD] >> test_common.py::TestCommonYandexWithTenant::test_private_queue_recreation[tables_format_v0-std] [GOOD] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_1__SYNC-pk_types8-all_types8-index8---SYNC] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_1_UNIQUE_SYNC-pk_types3-all_types3-index3--UNIQUE-SYNC] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_2__SYNC-pk_types7-all_types7-index7---SYNC] >> test_common.py::TestCommonYandexWithPath::test_private_queue_recreation[tables_format_v1-fifo] [GOOD] >> test_common.py::TestCommonYandexWithPath::test_private_queue_recreation[tables_format_v1-std] |96.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/scheme_shard/py3test >> test_alter_ops.py::TestSchemeShardAlterTest::test_alter_table_by_single_key_column_failure [GOOD] |96.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_common.py::TestCommonSqsYandexCloudMode::test_private_queue_recreation[tables_format_v1-std] [GOOD] |96.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test |96.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/ttl/py3test >> test_ttl.py::TestTTL::test_ttl[table_Uint32_0__SYNC-pk_types18-all_types18-index18-Uint32--SYNC] [GOOD] |96.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/scheme_shard/py3test >> test_copy_ops.py::TestSchemeShardCopyOps::test_given_table_when_create_copy_of_it_then_ok [GOOD] >> test_update_script_tables.py::TestUpdateScriptTablesYdb::test_recreate_tables[ALTER TABLE {} DROP COLUMN syntax, DROP COLUMN ast-`.metadata/script_executions`] [GOOD] >> test_ttl.py::TestTTLOnIndexedTable::test_case [GOOD] |96.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/ttl/py3test >> test_ttl.py::TestTTL::test_ttl[table_Datetime_0_UNIQUE_SYNC-pk_types2-all_types2-index2-Datetime-UNIQUE-SYNC] [GOOD] |96.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_not_throttling_with_custom_queue_name[std-tables_format_v0] [GOOD] |96.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_common.py::TestCommonYandexWithPath::test_private_create_queue[tables_format_v0-std] [GOOD] >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_index_0__ASYNC-pk_types11-all_types11-index11---ASYNC] |96.7%| [TA] $(B)/ydb/tests/datashard/ttl/test-results/py3test/{meta.json ... results_accumulator.log} >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_mechanics_in_cloud[tables_format_v0-tables_format_v1-fifo] [GOOD] |96.7%| [TA] {RESULT} $(B)/ydb/tests/datashard/ttl/test-results/py3test/{meta.json ... results_accumulator.log} >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_ttl_DyNumber-pk_types8-all_types8-index8-DyNumber--] |96.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/select/py3test |96.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/async_replication/py3test >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_delete_directory_from_leaf_success |96.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_all_types-pk_types12-all_types12-index12---] [GOOD] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_4__SYNC-pk_types5-all_types5-index5---SYNC] |96.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_mechanics_in_cloud[tables_format_v1-tables_format_v1-fifo] [GOOD] |96.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_yandex_cloud_queue_counters.py::TestYmqQueueCounters::test_purge_queue_counters >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_3__SYNC-pk_types6-all_types6-index6---SYNC] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_1_UNIQUE_SYNC-pk_types3-all_types3-index3--UNIQUE-SYNC] |96.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/ttl/py3test >> test_ttl.py::TestTTLOnIndexedTable::test_case [GOOD] |96.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/async_replication/py3test >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_ttl_Timestamp-pk_types12-all_types12-index12-Timestamp--] [GOOD] >> test_alter_ops.py::TestSchemeShardAlterTest::test_alter_table_by_not_single_key_column_failure |96.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_ymq_expiring_counters [GOOD] >> test_common.py::TestCommonYandexWithPath::test_private_queue_recreation[tables_format_v1-std] [GOOD] |96.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_common.py::TestCommonYandexWithTenant::test_private_queue_recreation[tables_format_v0-std] [GOOD] >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_with_auth_other-_good_dynconfig] |96.7%| [TA] $(B)/ydb/tests/datashard/split_merge/test-results/py3test/{meta.json ... results_accumulator.log} >> test_common.py::TestCommonYandexWithPath::test_private_queue_recreation[tables_format_v0-fifo] [GOOD] >> test_common.py::TestCommonYandexWithPath::test_private_queue_recreation[tables_format_v0-std] |96.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/async_replication/py3test >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_3__SYNC-pk_types6-all_types6-index6---SYNC] [GOOD] >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_4_UNIQUE_SYNC-pk_types0-all_types0-index0--UNIQUE-SYNC] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_3_UNIQUE_SYNC-pk_types1-all_types1-index1--UNIQUE-SYNC] |96.7%| [TA] {RESULT} $(B)/ydb/tests/datashard/split_merge/test-results/py3test/{meta.json ... results_accumulator.log} >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_ydb_create_and_remove_directory_success >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_2__SYNC-pk_types7-all_types7-index7---SYNC] [GOOD] >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_3_UNIQUE_SYNC-pk_types1-all_types1-index1--UNIQUE-SYNC] |96.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_queue_counters.py::TestYmqQueueCounters::test_sqs_action_counters [GOOD] >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_4__SYNC-pk_types5-all_types5-index5---SYNC] [GOOD] >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_delete_directory_from_leaf_success [GOOD] >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_delete_table_that_doesnt_exist_failure [GOOD] |96.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_index_0__ASYNC-pk_types11-all_types11-index11---ASYNC] [GOOD] >> test_common.py::TestCommonYandexWithTenant::test_private_create_queue[tables_format_v0-fifo] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_0__SYNC-pk_types9-all_types9-index9---SYNC] >> test_read_update_write_load.py::TestReadUpdateWriteLoad::test[read_update_write_load] [GOOD] >> test_read_update_write_load.py::TestReadUpdateWriteLoad::test_multi[read_update_write_load] >> test_read_update_write_load.py::TestReadUpdateWriteLoad::test_multi[read_update_write_load] [GOOD] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_4_UNIQUE_SYNC-pk_types0-all_types0-index0--UNIQUE-SYNC] >> test_auditlog.py::test_single_dml_query_logged[select] |96.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/script_execution/py3test >> test_update_script_tables.py::TestUpdateScriptTablesYdb::test_recreate_tables[ALTER TABLE {} DROP COLUMN syntax, DROP COLUMN ast-`.metadata/script_executions`] [GOOD] >> test_alter_ops.py::TestSchemeShardAlterTest::test_alter_table_by_not_single_key_column_failure [GOOD] |96.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_mechanics_in_cloud[tables_format_v0-tables_format_v1-fifo] [GOOD] >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_ydb_create_and_remove_directory_success [GOOD] >> test_yandex_audit.py::TestCloudEvents::test_create_update_delete_one_queue [GOOD] |96.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_common.py::TestCommonYandexWithPath::test_private_queue_recreation[tables_format_v1-std] [GOOD] >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_ttl_Uint64-pk_types10-all_types10-index10-Uint64--] >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_ttl_Uint32-pk_types9-all_types9-index9-Uint32--] [GOOD] |96.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/parametrized_queries/py3test >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_index_0__ASYNC-pk_types11-all_types11-index11---ASYNC] [GOOD] >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_create_path_with_long_name_failed >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_queues_count_over_limit[tables_format_v1] [GOOD] >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_with_auth_other-_good_dynconfig] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_retryable_iam_error[tables_format_v0] >> test_common.py::TestCommonYandexWithPath::test_private_queue_recreation[tables_format_v0-std] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_mechanics_in_cloud[tables_format_v0-tables_format_v0-fifo] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_list_queues_for_unknown_cloud[tables_format_v1] |96.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/copy_table/py3test >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_4__SYNC-pk_types5-all_types5-index5---SYNC] [GOOD] |96.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_yandex_cloud_queue_counters.py::TestYmqQueueCounters::test_purge_queue_counters [GOOD] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_2__SYNC-pk_types7-all_types7-index7---SYNC] >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_create_path_with_long_name_failed [GOOD] >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_create_table_and_path_with_name_clash_unsuccessful >> test_alter_ops.py::TestSchemeShardAlterTest::test_alter_table_after_create_table_it_is_success >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_create_table_and_path_with_name_clash_unsuccessful [GOOD] >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_ttl_Datetime-pk_types11-all_types11-index11-Datetime--] [GOOD] >> test_auditlog.py::test_single_dml_query_logged[select] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/olap/scenario/py3test >> test_read_update_write_load.py::TestReadUpdateWriteLoad::test_multi[read_update_write_load] [GOOD] Test command err: Was written: 0.0 MiB, Speed: 0.0 MiB/s Step 1. only write Write: 10% 2405 30% 2405 50% 2405 90% 2405 99% 2405 ms Write: 10% 3907 30% 3907 50% 3907 90% 3907 99% 3907 ms Write: 10% 3194 30% 3194 50% 3194 90% 3194 99% 3194 ms Write: 10% 6838 30% 6838 50% 6838 90% 6838 99% 6838 ms Write: 10% 6673 30% 6673 50% 6673 90% 6673 99% 6673 ms Write: 10% 6143 30% 6143 50% 6143 90% 6143 99% 6143 ms Write: 10% 7824 30% 7824 50% 7824 90% 7824 99% 7824 ms Write: 10% 5651 30% 5651 50% 5651 90% 5651 99% 5651 ms Write: 10% 8045 30% 8045 50% 8045 90% 8045 99% 8045 ms Write: 10% 8121 30% 8121 50% 8121 90% 8121 99% 8121 ms Write: 10% 8034 30% 8034 50% 8034 90% 8034 99% 8034 ms Write: 10% 7170 30% 7170 50% 7170 90% 7170 99% 7170 ms Write: 10% 10060 30% 10060 50% 10060 90% 10060 99% 10060 ms Write: 10% 12488 30% 12488 50% 12488 90% 12488 99% 12488 ms Write: 10% 9739 30% 9739 50% 9739 90% 9739 99% 9739 ms Write: 10% 13708 30% 13708 50% 13708 90% 13708 99% 13708 ms Write: 10% 14100 30% 14100 50% 14100 90% 14100 99% 14100 ms Write: 10% 14841 30% 14841 50% 14841 90% 14841 99% 14841 ms Write: 10% 16574 30% 16574 50% 16574 90% 16574 99% 16574 ms Write: 10% 14164 30% 14164 50% 14164 90% 14164 99% 14164 ms Write: 10% 13264 30% 13264 50% 13264 90% 13264 99% 13264 ms Write: 10% 12882 30% 12882 50% 12882 90% 12882 99% 12882 ms Write: 10% 13146 30% 13146 50% 13146 90% 13146 99% 13146 ms Write: 10% 12022 30% 12022 50% 12022 90% 12022 99% 12022 ms Write: 10% 16295 30% 16295 50% 16295 90% 16295 99% 16295 ms Write: 10% 11848 30% 11848 50% 11848 90% 11848 99% 11848 ms Write: 10% 13606 30% 13606 50% 13606 90% 13606 99% 13606 ms Write: 10% 12495 30% 12495 50% 12495 90% 12495 99% 12495 ms Write: 10% 12590 30% 12590 50% 12590 90% 12590 99% 12590 ms Write: 10% 7196 30% 7196 50% 7196 90% 7196 99% 7196 ms Write: 10% 9038 30% 9038 50% 9038 90% 9038 99% 9038 ms Write: 10% 8569 30% 8569 50% 8569 90% 8569 99% 8569 ms Write: 10% 12090 30% 12090 50% 12090 90% 12090 99% 12090 ms Write: 10% 10792 30% 10792 50% 10792 90% 10792 99% 10792 ms Write: 10% 11692 30% 11692 50% 11692 90% 11692 99% 11692 ms Write: 10% 10637 30% 10637 50% 10637 90% 10637 99% 10637 ms Write: 10% 9361 30% 9361 50% 9361 90% 9361 99% 9361 ms Write: 10% 5851 30% 5851 50% 5851 90% 5851 99% 5851 ms Write: 10% 9703 30% 9703 50% 9703 90% 9703 99% 9703 ms Write: 10% 9108 30% 9108 50% 9108 90% 9108 99% 9108 ms Write: 10% 4095 30% 4095 50% 4095 90% 4095 99% 4095 ms Write: 10% 6850 30% 6850 50% 6850 90% 6850 99% 6850 ms Write: 10% 2522 30% 2522 50% 2522 90% 2522 99% 2522 ms Write: 10% 1966 30% 1966 50% 1966 90% 1966 99% 1966 ms Write: 10% 7736 30% 7736 50% 7736 90% 7736 99% 7736 ms Write: 10% 6260 30% 6260 50% 6260 90% 6260 99% 6260 ms Write: 10% 5214 30% 5214 50% 5214 90% 5214 99% 5214 ms Write: 10% 3663 30% 3663 50% 3663 90% 3663 99% 3663 ms Write: 10% 3589 30% 3589 50% 3589 90% 3589 99% 3589 ms Write: 10% 4273 30% 4273 50% 4273 90% 4273 99% 4273 ms Write: 10% 4854 30% 4854 50% 4854 90% 4854 99% 4854 ms Write: 10% 2222 30% 2222 50% 2222 90% 2222 99% 2222 ms Write: 10% 2754 30% 2754 50% 2754 90% 2754 99% 2754 ms Write: 10% 4050 30% 4050 50% 4050 90% 4050 99% 4050 ms Write: 10% 5510 30% 5510 50% 5510 90% 5510 99% 5510 ms Write: 10% 2365 30% 2365 50% 2365 90% 2365 99% 2365 ms Write: 10% 3583 30% 3583 50% 3583 90% 3583 99% 3583 ms Write: 10% 6130 30% 6130 50% 6130 90% 6130 99% 6130 ms Write: 10% 4193 30% 4193 50% 4193 90% 4193 99% 4193 ms Write: 10% 3361 30% 3361 50% 3361 90% 3361 99% 3361 ms Write: 10% 11106 30% 11106 50% 11106 90% 11106 99% 11106 ms Write: 10% 2347 30% 2347 50% 2347 90% 2347 99% 2347 ms Write: 10% 3424 30% 3424 50% 3424 90% 3424 99% 3424 ms Write: 10% 2276 30% 2276 50% 2276 90% 2276 99% 2276 ms Step 2. read write Write: 10% 1897 30% 1897 50% 1897 90% 1897 99% 1897 ms Write: 10% 2015 30% 2015 50% 2015 90% 2015 99% 2015 ms Write: 10% 3609 30% 3609 50% 3609 90% 3609 99% 3609 ms Write: 10% 9024 30% 9024 50% 9024 90% 9024 99% 9024 ms Write: 10% 10387 30% 10387 50% 10387 90% 10387 99% 10387 ms Write: 10% 10881 30% 10881 50% 10881 90% 10881 99% 10881 ms Write: 10% 11755 30% 11755 50% 11755 90% 11755 99% 11755 ms Write: 10% 11873 30% 11873 50% 11873 90% 11873 99% 11873 ms Write: 10% 11792 30% 11792 50% 11792 90% 11792 99% 11792 ms Write: 10% 13843 30% 13843 50% 13843 90% 13843 99% 13843 ms Write: 10% 14387 30% 14387 50% 14387 90% 14387 99% 14387 ms Write: 10% 13802 30% 13802 50% 13802 90% 13802 99% 13802 ms Write: 10% 13078 30% 13078 50% 13078 90% 13078 99% 13078 ms Write: 10% 13866 30% 13866 50% 13866 90% 13866 99% 13866 ms Write: 10% 12273 30% 12273 50% 12273 90% 12273 99% 12273 ms Write: 10% 11705 30% 11705 50% 11705 90% 11705 99% 11705 ms Write: 10% 13169 30% 13169 50% 13169 90% 13169 99% 13169 ms Write: 10% 12706 30% 12706 50% 12706 90% 12706 99% 12706 ms Write: 10% 10876 30% 10876 50% 10876 90% 10876 99% 10876 ms Write: 10% 13226 30% 13226 50% 13226 90% 13226 99% 13226 ms Write: 10% 11280 30% 11280 50% 11280 90% 11280 99% 11280 ms Write: 10% 13389 30% 13389 50% 13389 90% 13389 99% 13389 ms Write: 10% 10497 30% 10497 50% 10497 90% 10497 99% 10497 ms Write: 10% 8285 30% 8285 50% 8285 90% 8285 99% 8285 ms Write: 10% 10056 30% 10056 50% 10056 90% 10056 99% 10056 ms Write: 10% 9882 30% 9882 50% 9882 90% 9882 99% 9882 ms Write: 10% 9210 30% 9210 50% 9210 90% 9210 99% 9210 ms Write: 10% 6869 30% 6869 50% 6869 90% 6869 99% 6869 ms Write: 10% 7515 30% 7515 50% 7515 90% 7515 99% 7515 ms Write: 10% 11216 30% 11216 50% 11216 90% 11216 99% 11216 ms Write: 10% 11673 30% 11673 50% 11673 90% 11673 99% 11673 ms Write: 10% 7600 30% 7600 50% 7600 90% 7600 99% 7600 ms Write: 10% 10933 30% 10933 50% 10933 90% 10933 99% 10933 ms Write: 10% 8192 30% 8192 50% 8192 90% 8192 99% 8192 ms Write: 10% 8901 30% 8901 50% 8901 90% 8901 99% 8901 ms Write: 10% 7186 30% 7186 50% 7186 90% 7186 99% 7186 ms Write: 10% 11647 30% 11647 50% 11647 90% 11647 99% 11647 ms Write: 10% 6040 30% 6040 50% 6040 90% 6040 99% 6040 ms Write: 10% 5675 30% 5675 50% 5675 90% 5675 99% 5675 ms Write: 10% 1925 30% 1925 50% 1925 90% 1925 99% 1925 ms Write: 10% 1965 30% 1965 50% 1965 90% 1965 99% 1965 ms Write: 10% 5730 30% 5730 50% 5730 90% 5730 99% 5730 ms Write: 10% 7321 30% 7321 50% 7321 90% 7321 99% 7321 ms Write: 10% 1675 30% 1675 50% 1675 90% 1675 99% 1675 ms Write: 10% 3086 30% 3086 50% 3086 90% 3086 99% 3086 ms Write: 10% 2406 30% 2406 50% 2406 90% 2406 99% 2406 ms Write: 10% 6514 30% 6514 50% 6514 90% 6514 99% 6514 ms Write: 10% 11450 30% 11450 50% 11450 90% 11450 99% 11450 ms Write: 10% 4381 30% 4381 50% 4381 90% 4381 99% 4381 ms Write: 10% 2594 30% 2594 50% 2594 90% 2594 99% 2594 ms Write: 10% 2215 30% 2215 50% 2215 90% 2215 99% 2215 ms Write: 10% 3905 30% 3905 50% 3905 90% 3905 99% 3905 ms Write: 10% 1945 30% 1945 50% 1945 90% 1945 99% 1945 ms Write: 10% 2404 30% 2404 50% 2404 90% 2404 99% 2404 ms Write: 10% 2934 30% 2934 50% 2934 90% 2934 99% 2934 ms Write: 10% 2445 30% 2445 50% 2445 90% 2445 99% 2445 ms Write: 10% 6044 30% 6044 50% 6044 90% 6044 99% 6044 ms Write: 10% 2656 30% 2656 50% 2656 90% 2656 99% 2656 ms Write: 10% 6017 30% 6017 50% 6017 90% 6017 99% 6017 ms Write: 10% 2502 30% 2502 50% 2502 90% 2502 99% 2502 ms Write: 10% 2309 30% 2309 50% 2309 90% 2309 99% 2309 ms Write: 10% 6398 30% 6398 50% 6398 90% 6398 99% 6398 ms Write: 10% 2830 30% 2830 50% 2830 90% 2830 99% 2830 ms Write: 10% 2480 30% 2480 50% 2480 90% 2480 99% 2480 ms Read: 10% 7056 30% 10084 50% 13113 90% 19169 99% 20532 ms Step 3. write modify Write: 10% 1212 30% 1212 50% 1212 90% 1212 99% 1212 ms Write: 10% 3065 30% 3065 50% 3065 90% 3065 99% 3065 ms Write: 10% 5428 30% 5428 50% 5428 90% 5428 99% 5428 ms Write: 10% 5462 30% 5462 50% 5462 90% 5462 99% 5462 ms Write: 10% 4102 30% 4102 50% 4102 90% 4102 99% 4102 ms Write: 10% 10355 30% 10355 50% 10355 90% 10355 99% 10355 ms Was written: 18.06640625 MiB, Speed: 0.3011067708333333 MiB/s Write: 10% 11234 30% 11234 50% 11234 90% 11234 99% 11234 ms Write: 10% 13851 30% 13851 50% 13851 90% 13851 99% 13851 ms Write: 10% 13006 30% 13006 50% 13006 90% 13006 99% 13006 ms Write: 10% 13778 30% 13778 50% 13778 90% 13778 99% 13778 ms Write: 10% 14690 30% 14690 50% 14690 90% 14690 99% 14690 ms Write: 10% 15431 30% 15431 50% 15431 90% 15431 99% 15431 ms Write: 10% 13633 30% 13633 50% 13633 90% 13633 99% 13633 ms Write: 10% 14796 30% 14796 50% 14796 90% 14796 99% 14796 ms Write: 10% 14854 30% 14854 50% 14854 90% 14854 99% 14854 ms Write: 10% 15267 30% 15267 50% 15267 90% 15267 99% 15267 ms Write: 10% 13326 30% 13326 50% 13326 90% 13326 99% 13326 ms Write: 10% 13156 30% 13156 50% 13156 90% 13156 99% 13156 ms Write: 10% 14769 30% 14769 50% 14769 90% 14769 99% 14769 ms Write: 10% 15139 30% 15139 50% 15139 90% 15139 99% 15139 ms Write: 10% 11616 30% 11616 50% 11616 90% 11616 99% 11616 ms Write: 10% 15150 30% 15150 50% 15150 90% 15150 99% 15150 ms Write: 10% 15808 30% 15808 50% 15808 90% 15808 99% 15808 ms Write: 10% 14856 30% 14856 50% 14856 90% 14856 99% 14856 ms Write: 10% 14887 30% 14887 50% 14887 90% 14887 99% 14887 ms Write: 10% 14206 30% 14206 50% 14206 90% 14206 99% 14206 ms Write: 10% 9929 30% 9929 50% 9929 90% 9929 99% 9929 ms Write: 10% 8321 30% 8321 50% 8321 90% 8321 99% 8321 ms Write: 10% 16437 30% 16437 50% 16437 90% 16437 99% 16437 ms Write: 10% 13057 30% 13057 50% 13057 90% 13057 99% 13057 ms Write: 10% 8021 30% 8021 50% 8021 90% 8021 99% 8021 ms Write: 10% 16144 30% 16144 50% 16144 90% 16144 99% 16144 ms Write: 10% 11950 30% 11950 50% 11950 90% 11950 99% 11950 ms Write: 10% 3610 30% 3610 50% 3610 90% 3610 99% 3610 ms Write: 10% 11065 30% 11065 50% 11065 90% 11065 99% 11065 ms Write: 10% 6743 30% 6743 50% 6743 90% 6743 99% 6743 ms Write: 10% 11273 30% 11273 50% 11273 90% 11273 99% 11273 ms Write: 10% 5577 30% 5577 50% 5577 90% 5577 99% 5577 ms Write: 10% 5080 30% 5080 50% 5080 90% 5080 99% 5080 ms Write: 10% 16622 30% 16622 50% 16622 90% 16622 99% 16622 ms Write: 10% 10208 30% 10208 50% 10208 90% 10208 99% 10208 ms Write: 10% 3710 30% 3710 50% 3710 90% 3710 99% 3710 ms Write: 10% 13860 30% 13860 50% 13860 90% 13860 99% 13860 ms Write: 10% 3525 30% 3525 50% 3525 90% 3525 99% 3525 ms Write: 10% 6523 30% 6523 50% 6523 90% 6523 99% 6523 ms Write: 10% 8156 30% 8156 50% 8156 90% 8156 99% 8156 ms Write: 10% 3112 30% 3112 50% 3112 90% 3112 99% 3112 ms Write: 10% 5003 30% 5003 50% 5003 90% 5003 99% 5003 ms Write: 10% 8904 30% 8904 50% 8904 90% 8904 99% 8904 ms Write: 10% 4730 30% 4730 50% 4730 90% 4730 99% 4730 ms Write: 10% 5521 30% 5521 50% 5521 90% 5521 99% 5521 ms Write: 10% 4610 30% 4610 50% 4610 90% 4610 99% 4610 ms Write: 10% 5913 30% 5913 50% 5913 90% 5913 99% 5913 ms Write: 10% 11073 30% 11073 50% 11073 90% 11073 99% 11073 ms Write: 10% 2601 30% 2601 50% 2601 90% 2601 99% 2601 ms Write: 10% 4383 30% 4383 50% 4383 90% 4383 99% 4383 ms Write: 10% 2647 30% 2647 50% 2647 90% 2647 99% 2647 ms Write: 10% 9169 30% 9169 50% 9169 90% 9169 99% 9169 ms Write: 10% 5203 30% 5203 50% 5203 90% 5203 99% 5203 ms Write: 10% 7447 30% 7447 50% 7447 90% 7447 99% 7447 ms Write: 10% 3985 30% 3985 50% 3985 90% 3985 99% 3985 ms Write: 10% 3394 30% 3394 50% 3394 90% 3394 99% 3394 ms Write: 10% 5715 30% 5715 50% 5715 90% 5715 99% 5715 ms Write: 10% 4619 30% 4619 50% 4619 90% 4619 99% 4619 ms Update: 10% 1134 30% 1134 50% 1134 90% 1134 99% 1134 ms Step 4. read modify write Write: 10% 1044 30% 1044 50% 1044 90% 1044 99% 1044 ms Write: 10% 1256 30% 1256 50% 1256 90% 1256 99% 1256 ms Write: 10% 1936 30% 1936 50% 1936 90% 1936 99% 1936 ms Write: 10% 2481 30% 2481 50% 2481 90% 2481 99% 2481 ms Write: 10% 11177 30% 11177 50% 11177 90% 11177 99% 11177 ms Write: 10% 10237 30% 10237 50% 10237 90% 10237 99% 10237 ms Write: 10% 10717 30% 10717 50% 10717 90% 10717 99% 10717 ms Write: 10% 12351 30% 12351 50% 12351 90% 12351 99% 12351 ms Write: 10% 13240 30% 13240 50% 13240 90% 13240 99% 13240 ms Write: 10% 15900 30% 15900 50% 15900 90% 15900 99% 15900 ms Write: 10% 15635 30% 15635 50% 15635 90% 15635 99% 15635 ms Write: 10% 16928 30% 16928 50% 16928 90% 16928 99% 16928 ms Write: 10% 13560 30% 13560 50% 13560 90% 13560 99% 13560 ms Write: 10% 15431 30% 15431 50% 15431 90% 15431 99% 15431 ms Write: 10% 14781 30% 14781 50% 14781 90% 14781 99% 14781 ms Write: 10% 11367 30% 11367 50% 11367 90% 11367 99% 11367 ms Write: 10% 15458 30% 15458 50% 15458 90% 15458 99% 15458 ms Write: 10% 11698 30% 11698 50% 11698 90% 11698 99% 11698 ms Write: 10% 5831 30% 5831 50% 5831 90% 5831 99% 5831 ms Write: 10% 15197 30% 15197 50% 15197 90% 15197 99% 15197 ms Write: 10% 6461 30% 6461 50% 6461 90% 6461 99% 6461 ms Write: 10% 14646 30% 14646 50% 14646 90% 14646 99% 14646 ms Write: 10% 10481 30% 10481 50% 10481 90% 10481 99% 10481 ms Write: 10% 9336 30% 9336 50% 9336 90% 9336 99% 9336 ms Write: 10% 14879 30% 14879 50% 14879 90% 14879 99% 14879 ms Write: 10% 3784 30% 3784 50% 3784 90% 3784 99% 3784 ms Write: 10% 13312 30% 13312 50% 13312 90% 13312 99% 13312 ms Write: 10% 9137 30% 9137 50% 9137 90% 9137 99% 9137 ms Write: 10% 8718 30% 8718 50% 8718 90% 8718 99% 8718 ms Write: 10% 6862 30% 6862 50% 6862 90% 6862 99% 6862 ms Write: 10% 7583 30% 7583 50% 7583 90% 7583 99% 7583 ms Write: 10% 12482 30% 12482 50% 12482 90% 12482 99% 12482 ms Write: 10% 15622 30% 15622 50% 15622 90% 15622 99% 15622 ms Write: 10% 13316 30% 13316 50% 13316 90% 13316 99% 13316 ms Write: 10% 12648 30% 12648 50% 12648 90% 12648 99% 12648 ms Write: 10% 15294 30% 15294 50% 15294 90% 15294 99% 15294 ms Write: 10% 10004 30% 10004 50% 10004 90% 10004 99% 10004 ms Write: 10% 10347 30% 10347 50% 10347 90% 10347 99% 10347 ms Write: 10% 4478 30% 4478 50% 4478 90% 4478 99% 4478 ms Write: 10% 14482 30% 14482 50% 14482 90% 14482 99% 14482 ms Write: 10% 3392 30% 3392 50% 3392 90% 3392 99% 3392 ms Write: 10% 3050 30% 3050 50% 3050 90% 3050 99% 3050 ms Write: 10% 8834 30% 8834 50% 8834 90% 8834 99% 8834 ms Write: 10% 8126 30% 8126 50% 8126 90% 8126 99% 8126 ms Write: 10% 6599 30% 6599 50% 6599 90% 6599 99% 6599 ms Write: 10% 2489 30% 2489 50% 2489 90% 2489 99% 2489 ms Write: 10% 5641 30% 5641 50% 5641 90% 5641 99% 5641 ms Write: 10% 6056 30% 6056 50% 6056 90% 6056 99% 6056 ms Write: 10% 5699 30% 5699 50% 5699 90% 5699 99% 5699 ms Write: 10% 10834 30% 10834 50% 10834 90% 10834 99% 10834 ms Write: 10% 4697 30% 4697 50% 4697 90% 4697 99% 4697 ms Write: 10% 10652 30% 10652 50% 10652 90% 10652 99% 10652 ms Write: 10% 10258 30% 10258 50% 10258 90% 10258 99% 10258 ms Write: 10% 3523 30% 3523 50% 3523 90% 3523 99% 3523 ms Write: 10% 2980 30% 2980 50% 2980 90% 2980 99% 2980 ms Write: 10% 3035 30% 3035 50% 3035 90% 3035 99% 3035 ms Write: 10% 6517 30% 6517 50% 6517 90% 6517 99% 6517 ms Write: 10% 10965 30% 10965 50% 10965 90% 10965 99% 10965 ms Write: 10% 5427 30% 5427 50% 5427 90% 5427 99% 5427 ms Write: 10% 11056 30% 11056 50% 11056 90% 11056 99% 11056 ms Write: 10% 4167 30% 4167 50% 4167 90% 4167 99% 4167 ms Write: 10% 3279 30% 3279 50% 3279 90% 3279 99% 3279 ms Write: 10% 4749 30% 4749 50% 4749 90% 4749 99% 4749 ms Write: 10% 8798 30% 8798 50% 8798 90% 8798 99% 8798 ms Read: 10% 8780 30% 11720 50% 14661 90% 20542 99% 21865 ms Update: 10% 1864 30% 1864 50% 1864 90% 1864 99% 1864 ms >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_cloud_double_create_queue[fifo-tables_format_v0] |96.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/scheme_shard/py3test >> test_alter_ops.py::TestSchemeShardAlterTest::test_alter_table_by_not_single_key_column_failure [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_with_auth_other-_good_dynconfig] [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/yft8/001020/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk5/testing_out_stuff/test_auditlog.py.test_broken_dynconfig._client_session_pool_with_auth_other-_good_dynconfig/audit.txt 2025-06-25T15:18:24.690132Z: {"sanitized_token":"othe****ltin (27F910A9)","subject":"other-user@builtin","new_config":"\n---\nmetadata:\n kind: MainConfig\n cluster: \"\"\n version: 0\nconfig:\n yaml_config_enabled: true\nallowed_labels:\n node_id:\n type: string\n host:\n type: string\n tenant:\n type: string\nselector_config: []\n ","status":"SUCCESS","component":"console","operation":"REPLACE DYNCONFIG","remote_address":"127.0.0.1"} >> test_common.py::TestCommonYandexWithTenant::test_private_create_queue[tables_format_v0-fifo] [GOOD] >> test_common.py::TestCommonYandexWithTenant::test_private_create_queue[tables_format_v0-std] |96.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/scheme_shard/py3test >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_ydb_create_and_remove_directory_success [GOOD] >> test_alter_ops.py::TestSchemeShardAlterTest::test_alter_table_decreasing_number_of_generations_it_is_raise_error |96.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/scheme_shard/py3test >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_delete_table_that_doesnt_exist_failure [GOOD] |96.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_common.py::TestCommonYandexWithPath::test_private_queue_recreation[tables_format_v0-std] [GOOD] >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_index_0__ASYNC-pk_types6-all_types6-index6---ASYNC] |96.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/secondary_index/py3test >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_2_UNIQUE_SYNC-pk_types2-all_types2-index2--UNIQUE-SYNC] >> test_alter_ops.py::TestSchemeShardAlterTest::test_alter_table_add_and_remove_column_many_times_success >> test_common.py::TestCommonYandexWithTenant::test_private_create_queue[tables_format_v0-std] [GOOD] >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_ttl_Date-pk_types13-all_types13-index13-Date--] [GOOD] >> test_alter_ops.py::TestSchemeShardAlterTest::test_alter_table_after_create_table_it_is_success [GOOD] |96.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/scheme_shard/py3test |96.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_mechanics_in_cloud[tables_format_v0-tables_format_v0-fifo] [GOOD] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_3__SYNC-pk_types6-all_types6-index6---SYNC] ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_single_dml_query_logged[select] [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/yft8/001010/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk20/testing_out_stuff/test_auditlog.py.test_single_dml_query_logged.select/audit.txt 2025-06-25T15:18:36.176021Z: {"tx_id":"{none}","database":"/Root/test_auditlog.py","end_time":"2025-06-25T15:18:36.175980Z","sanitized_token":"**** (B6C6F477)","begin_tx":"1","remote_address":"127.0.0.1","commit_tx":"1","status":"SUCCESS","query_text":"select id from `/Root/test_auditlog.py/test-table`","start_time":"2025-06-25T15:18:36.018789Z","subject":"root@builtin","detailed_status":"SUCCESS","operation":"ExecuteDataQueryRequest","component":"grpc-proxy"} |96.8%| [TA] $(B)/ydb/tests/functional/audit/test-results/py3test/{meta.json ... results_accumulator.log} |96.8%| [TA] {RESULT} $(B)/ydb/tests/functional/audit/test-results/py3test/{meta.json ... results_accumulator.log} |96.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_queue_counters.py::TestYmqQueueCounters::test_purge_queue_counters [GOOD] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_4_UNIQUE_SYNC-pk_types0-all_types0-index0--UNIQUE-SYNC] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_list_queues_for_unknown_cloud[tables_format_v1] [GOOD] >> test_insert.py::TestInsert::test_multi[read_data_during_bulk_upsert] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_not_throttling_with_custom_queue_name[fifo-tables_format_v0] >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_index_1__ASYNC-pk_types5-all_types5-index5---ASYNC] [GOOD] >> test_alter_ops.py::TestSchemeShardAlterTest::test_alter_table_decreasing_number_of_generations_it_is_raise_error [GOOD] >> tier_delete.py::TestTierDelete::test_delete_s3_ttl [GOOD] >> test_alter_ops.py::TestSchemeShardAlterTest::test_alter_table_add_column_after_table_creation_with_data_and_success >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_queue_counters_are_in_folder[tables_format_v1] |96.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/async_replication/py3test >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_ttl_Uint32-pk_types9-all_types9-index9-Uint32--] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_not_throttling_with_custom_queue_name[fifo-tables_format_v0] [GOOD] >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_0__SYNC-pk_types9-all_types9-index9---SYNC] [GOOD] |96.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/scheme_shard/py3test |96.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/secondary_index/py3test >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_0_UNIQUE_SYNC-pk_types4-all_types4-index4--UNIQUE-SYNC] >> test_alter_tiering.py::TestAlterTiering::test[many_tables] [FAIL] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_cloud_double_create_queue[fifo-tables_format_v0] [GOOD] >> test_alter_tiering.py::TestAlterTiering::test_multi[many_tables] >> test_alter_tiering.py::TestAlterTiering::test_multi[many_tables] [GOOD] |96.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_common.py::TestCommonYandexWithTenant::test_private_create_queue[tables_format_v0-std] [GOOD] |96.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/scheme_shard/py3test >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_create_table_and_path_with_name_clash_unsuccessful [GOOD] |96.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/scheme_shard/py3test >> test_alter_ops.py::TestSchemeShardAlterTest::test_alter_table_after_create_table_it_is_success [GOOD] |96.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/copy_table/py3test >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_0__SYNC-pk_types9-all_types9-index9---SYNC] [GOOD] >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_index_1__SYNC-pk_types3-all_types3-index3---SYNC] [GOOD] >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_given_table_when_drop_table_and_create_with_other_keys_then_ok |96.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/scheme_shard/py3test >> test_alter_ops.py::TestSchemeShardAlterTest::test_alter_table_decreasing_number_of_generations_it_is_raise_error [GOOD] |96.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/async_replication/py3test >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_ttl_Datetime-pk_types11-all_types11-index11-Datetime--] [GOOD] >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_given_table_when_drop_table_and_create_with_same_primary_key_and_other_scheme_then_ok >> test_alter_ops.py::TestSchemeShardAlterTest::test_alter_table_add_and_remove_column_many_times_success [GOOD] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_2_UNIQUE_SYNC-pk_types2-all_types2-index2--UNIQUE-SYNC] >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_1__ASYNC-pk_types10-all_types10-index10---ASYNC] [GOOD] >> test_alter_ops.py::TestSchemeShardAlterTest::test_alter_table_add_column_after_table_creation_with_data_and_success [GOOD] |96.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/async_replication/py3test >> test_tablet.py::TestMassiveKills::test_tablets_are_ok_after_many_kills >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_setup_in_cloud[tables_format_v0-std] >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_ttl_DyNumber-pk_types8-all_types8-index8-DyNumber--] [GOOD] |96.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/scheme_shard/py3test |96.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/olap/ttl_tiering/py3test >> tier_delete.py::TestTierDelete::test_delete_s3_ttl [GOOD] |96.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_not_throttling_with_custom_queue_name[fifo-tables_format_v0] [GOOD] >> test_alter_ops.py::TestSchemeShardAlterTest::test_alter_table_can_change_partition_config_options >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_given_table_when_drop_table_and_create_with_other_keys_then_ok [GOOD] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_4__SYNC-pk_types5-all_types5-index5---SYNC] >> test_copy_table.py::TestCopyTable::test_copy_table[table_all_types-pk_types12-all_types12-index12---] [GOOD] >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_0_UNIQUE_SYNC-pk_types4-all_types4-index4--UNIQUE-SYNC] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_0__ASYNC-pk_types11-all_types11-index11---ASYNC] |96.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/scheme_shard/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_queue_counters_are_in_folder[tables_format_v1] [GOOD] >> test_select.py::TestDML::test_select[table_ttl_Timestamp-pk_types17-all_types17-index17-Timestamp--] [GOOD] |96.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/olap/scenario/py3test >> test_insert.py::TestInsert::test_multi[read_data_during_bulk_upsert] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_queues_count_over_limit[tables_format_v0] ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_cloud_double_create_queue[fifo-tables_format_v0] [GOOD] Test command err: run test with cloud_id=CLOUD_FOR_folder_acc_985b1ac8-51d7-11f0-a77e-d00d12801a25 folder_id=folder_acc_985b1ac8-51d7-11f0-a77e-d00d12801a25 iam_token=usr_acc_985b1ac8-51d7-11f0-a77e-d00d12801a25 cloud_account=acc_985b1ac8-51d7-11f0-a77e-d00d12801a25 2025-06-25T15:18:06.480833Z: {"paths":"[/Root/TenantSQS/CLOUD_FOR_folder_acc_985b1ac8-51d7-11f0-a77e-d00d12801a25]","tx_id":"281474976720691","database":"/Root/TenantSQS","sanitized_token":"{none}","remote_address":"{none}","status":"SUCCESS","subject":"{none}","detailed_status":"StatusAccepted","operation":"CREATE DIRECTORY","component":"schemeshard"} ======================================== 2025-06-25T15:18:06.796614Z: {"paths":"[/Root/TenantSQS/CLOUD_FOR_folder_acc_985b1ac8-51d7-11f0-a77e-d00d12801a25/000000000000000107ti]","tx_id":"281474976720697","database":"/Root/TenantSQS","sanitized_token":"{none}","remote_address":"{none}","status":"SUCCESS","subject":"{none}","detailed_status":"StatusAccepted","operation":"CREATE DIRECTORY","component":"schemeshard"} ======================================== 2025-06-25T15:18:06.869360Z: {"paths":"[/Root/TenantSQS/CLOUD_FOR_folder_acc_985b1ac8-51d7-11f0-a77e-d00d12801a25/000000000000000107ti/v2]","tx_id":"281474976720698","database":"/Root/TenantSQS","sanitized_token":"{none}","remote_address":"{none}","status":"SUCCESS","subject":"{none}","detailed_status":"StatusAccepted","operation":"CREATE DIRECTORY","component":"schemeshard"} ======================================== 2025-06-25T15:18:07.027473Z: {"request_id":"11bf508e-e5ae5a0e-d3383252-50d66092","cloud_id":"CLOUD_FOR_folder_acc_985b1ac8-51d7-11f0-a77e-d00d12801a25","status":"SUCCESS","account":"CLOUD_FOR_folder_acc_985b1ac8-51d7-11f0-a77e-d00d12801a25","subject":"fake_user_sid@as","operation":"create_queue","component":"ymq","folder_id":"folder_acc_985b1ac8-51d7-11f0-a77e-d00d12801a25"} ======================================== 2025-06-25T15:18:07.970777Z: {"request_id":"11bf508e-e5ae5a0e-d3383252-50d66092","permission":"ymq.queues.create","id":"11024294127891519796$CreateMessageQueue$1750864686670","idempotency_id":"{none}","cloud_id":"CLOUD_FOR_folder_acc_985b1ac8-51d7-11f0-a77e-d00d12801a25","masked_token":"CLOU****1a25 (98D9E488)","auth_type":"{none}","created_at":"1750864686670","status":"SUCCESS","subject":"fake_user_sid@as","queue":"000000000000000107ti","labels":"{}","operation":"CreateMessageQueue","folder_id":"folder_acc_985b1ac8-51d7-11f0-a77e-d00d12801a25","component":"ymq"} ======================================== 2025-06-25T15:18:08.936477Z: {"request_id":"6b93a07e-255fb519-daaa5c6e-a523c547","permission":"ymq.queues.setAttributes","id":"9326019861391098199$UpdateMessageQueue$1750864688243","idempotency_id":"{none}","cloud_id":"CLOUD_FOR_folder_acc_985b1ac8-51d7-11f0-a77e-d00d12801a25","masked_token":"CLOU****1a25 (98D9E488)","auth_type":"{none}","created_at":"1750864688243","status":"SUCCESS","subject":"fake_user_sid@as","queue":"000000000000000107ti","labels":"{}","operation":"UpdateMessageQueue","folder_id":"folder_acc_985b1ac8-51d7-11f0-a77e-d00d12801a25","component":"ymq"} ======================================== 2025-06-25T15:18:10.182264Z: {"request_id":"be919be2-150d9499-e5cf76ef-d28e943d","permission":"ymq.queues.setAttributes","id":"342226294135578002$UpdateMessageQueue$1750864689338","idempotency_id":"{none}","cloud_id":"CLOUD_FOR_folder_acc_985b1ac8-51d7-11f0-a77e-d00d12801a25","masked_token":"CLOU****1a25 (98D9E488)","auth_type":"{none}","created_at":"1750864689338","status":"SUCCESS","subject":"fake_user_sid@as","queue":"000000000000000107ti","labels":"{}","operation":"UpdateMessageQueue","folder_id":"folder_acc_985b1ac8-51d7-11f0-a77e-d00d12801a25","component":"ymq"} ======================================== 2025-06-25T15:18:10.525596Z: {"paths":"[/Root/TenantSQS/CLOUD_FOR_folder_acc_985b1ac8-51d7-11f0-a77e-d00d12801a25/000000000000000107ti/v2]","tx_id":"281474976720721","database":"/Root/TenantSQS","sanitized_token":"{none}","remote_address":"{none}","status":"SUCCESS","subject":"{none}","detailed_status":"StatusAccepted","operation":"DROP DIRECTORY","component":"schemeshard"} ======================================== 2025-06-25T15:18:10.617280Z: {"paths":"[/Root/TenantSQS/CLOUD_FOR_folder_acc_985b1ac8-51d7-11f0-a77e-d00d12801a25/000000000000000107ti]","tx_id":"281474976720722","database":"/Root/TenantSQS","sanitized_token":"{none}","remote_address":"{none}","status":"SUCCESS","subject":"{none}","detailed_status":"StatusAccepted","operation":"DROP DIRECTORY","component":"schemeshard"} ======================================== 2025-06-25T15:18:10.697061Z: {"request_id":"c7719660-4ab0a38f-7a1b766d-ccc2b526","cloud_id":"CLOUD_FOR_folder_acc_985b1ac8-51d7-11f0-a77e-d00d12801a25","status":"SUCCESS","account":"CLOUD_FOR_folder_acc_985b1ac8-51d7-11f0-a77e-d00d12801a25","subject":"fake_user_sid@as","queue":"000000000000000107ti","resource_id":"000000000000000107ti","operation":"delete_queue","component":"ymq","folder_id":"folder_acc_985b1ac8-51d7-11f0-a77e-d00d12801a25"} ======================================== 2025-06-25T15:18:11.184506Z: {"request_id":"c7719660-4ab0a38f-7a1b766d-ccc2b526","permission":"ymq.queues.delete","id":"11184580416614429400$DeleteMessageQueue$1750864690369","idempotency_id":"{none}","cloud_id":"CLOUD_FOR_folder_acc_985b1ac8-51d7-11f0-a77e-d00d12801a25","masked_token":"CLOU****1a25 (98D9E488)","auth_type":"{none}","created_at":"1750864690369","status":"SUCCESS","subject":"fake_user_sid@as","queue":"000000000000000107ti","labels":"{}","operation":"DeleteMessageQueue","folder_id":"folder_acc_985b1ac8-51d7-11f0-a77e-d00d12801a25","component":"ymq"} ======================================== ======================================== |96.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/copy_table/py3test >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_1__ASYNC-pk_types10-all_types10-index10---ASYNC] [GOOD] >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_given_table_when_drop_table_and_create_with_same_primary_key_and_other_scheme_then_ok [GOOD] >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_index_2__SYNC-pk_types2-all_types2-index2---SYNC] |96.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/secondary_index/py3test |96.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/async_replication/py3test >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_index_1__ASYNC-pk_types5-all_types5-index5---ASYNC] [GOOD] >> test_alter_compression.py::TestAlterCompression::test[alter_compression] >> test_common.py::TestCommonYandexWithTenant::test_private_create_queue[tables_format_v1-fifo] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_0__SYNC-pk_types9-all_types9-index9---SYNC] [GOOD] >> test_alter_ops.py::TestSchemeShardAlterTest::test_alter_table_can_change_partition_config_options [GOOD] |96.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/async_replication/py3test >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_ttl_Date-pk_types13-all_types13-index13-Date--] [GOOD] >> test_select.py::TestDML::test_select[table_index_1__ASYNC-pk_types10-all_types10-index10---ASYNC] |96.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/scheme_shard/py3test |96.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/scheme_shard/py3test >> test_alter_ops.py::TestSchemeShardAlterTest::test_alter_table_add_column_after_table_creation_with_data_and_success [GOOD] |96.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/select/py3test >> test_select.py::TestDML::test_select[table_ttl_Timestamp-pk_types17-all_types17-index17-Timestamp--] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/olap/scenario/py3test >> test_alter_tiering.py::TestAlterTiering::test_multi[many_tables] [GOOD] Test command err: Thread 0 failed |96.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/scheme_shard/py3test >> test_alter_ops.py::TestSchemeShardAlterTest::test_alter_table_add_and_remove_column_many_times_success [GOOD] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_1_UNIQUE_SYNC-pk_types3-all_types3-index3--UNIQUE-SYNC] [GOOD] |96.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/select/py3test >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_0__ASYNC-pk_types11-all_types11-index11---ASYNC] >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_given_table_when_drop_table_and_create_with_same_scheme_then_ok |96.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/async_replication/py3test >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_index_1__SYNC-pk_types3-all_types3-index3---SYNC] [GOOD] >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_ttl_Uint64-pk_types10-all_types10-index10-Uint64--] [GOOD] |96.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/async_replication/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_empty_access_key_id[tables_format_v1] |96.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |96.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/secondary_index/py3test >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_0__SYNC-pk_types9-all_types9-index9---SYNC] [GOOD] |96.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/scheme_shard/py3test >> test_alter_ops.py::TestSchemeShardAlterTest::test_alter_table_can_change_partition_config_options [GOOD] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_0__SYNC-pk_types9-all_types9-index9---SYNC] >> test_yandex_cloud_queue_counters.py::TestYmqQueueCounters::test_counters_when_reading_from_empty_queue >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_index_3_UNIQUE_SYNC-pk_types1-all_types1-index1--UNIQUE-SYNC] |96.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/async_replication/py3test >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_ttl_DyNumber-pk_types8-all_types8-index8-DyNumber--] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_setup_in_cloud[tables_format_v0-std] [GOOD] >> test_ttl.py::TestTTLValueSinceUnixEpoch::test_case >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_setup_in_cloud[tables_format_v1-fifo] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_0_UNIQUE_SYNC-pk_types4-all_types4-index4--UNIQUE-SYNC] |96.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/scheme_shard/py3test >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_given_table_when_drop_table_and_create_with_other_keys_then_ok [GOOD] |97.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/ttl/py3test |97.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/secondary_index/py3test >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_1_UNIQUE_SYNC-pk_types3-all_types3-index3--UNIQUE-SYNC] [GOOD] |97.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/scheme_shard/py3test >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_given_table_when_drop_table_and_create_with_same_primary_key_and_other_scheme_then_ok [GOOD] >> test_select.py::TestDML::test_select[table_ttl_Datetime-pk_types16-all_types16-index16-Datetime--] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_setup_in_cloud[tables_format_v1-fifo] [GOOD] |97.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/select/py3test >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_index_4_UNIQUE_SYNC-pk_types0-all_types0-index0--UNIQUE-SYNC] >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_when_delete_path_with_folder_then_get_error_response >> test_select.py::TestDML::test_select[table_ttl_DyNumber-pk_types13-all_types13-index13-DyNumber--] |97.0%| [TA] $(B)/ydb/tests/olap/ttl_tiering/test-results/py3test/{meta.json ... results_accumulator.log} >> test_select.py::TestDML::test_select[table_index_0_UNIQUE_SYNC-pk_types4-all_types4-index4--UNIQUE-SYNC] [GOOD] >> test_common.py::TestCommonYandexWithTenant::test_private_create_queue[tables_format_v1-fifo] [GOOD] >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_given_table_when_drop_table_and_create_with_same_scheme_then_ok [GOOD] >> test_common.py::TestCommonYandexWithTenant::test_private_create_queue[tables_format_v1-std] |97.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/ttl/py3test >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_index_3_UNIQUE_SYNC-pk_types1-all_types1-index1--UNIQUE-SYNC] [GOOD] >> test_common.py::TestCommonYandexWithTenant::test_private_create_queue[tables_format_v1-std] [GOOD] >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_when_delete_path_with_folder_then_get_error_response [GOOD] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_3_UNIQUE_SYNC-pk_types1-all_types1-index1--UNIQUE-SYNC] |97.0%| [TA] {RESULT} $(B)/ydb/tests/olap/ttl_tiering/test-results/py3test/{meta.json ... results_accumulator.log} >> test_select.py::TestDML::test_select[table_index_1_UNIQUE_SYNC-pk_types3-all_types3-index3--UNIQUE-SYNC] [GOOD] |97.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/parametrized_queries/py3test >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_index_3_UNIQUE_SYNC-pk_types1-all_types1-index1--UNIQUE-SYNC] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_empty_access_key_id[tables_format_v1] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_empty_auth_header >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_empty_auth_header [GOOD] >> test_yandex_cloud_queue_counters.py::TestYmqQueueCounters::test_counters_when_reading_from_empty_queue [GOOD] |97.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_setup_in_cloud[tables_format_v1-fifo] [GOOD] >> test_yandex_cloud_queue_counters.py::TestYmqQueueCounters::test_counters_when_sending_duplicates >> test_yandex_cloud_queue_counters.py::TestYmqQueueCounters::test_counters_when_sending_duplicates [GOOD] >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_index_4_UNIQUE_SYNC-pk_types0-all_types0-index0--UNIQUE-SYNC] [GOOD] |97.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/async_replication/py3test >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_ttl_Uint64-pk_types10-all_types10-index10-Uint64--] [GOOD] |97.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/select/py3test >> test_select.py::TestDML::test_select[table_index_0_UNIQUE_SYNC-pk_types4-all_types4-index4--UNIQUE-SYNC] [GOOD] >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_ttl_Uint32-pk_types14-all_types14-index14-Uint32--] |97.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |97.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/select/py3test >> test_tablet.py::TestMassiveKills::test_tablets_are_ok_after_many_kills [GOOD] |97.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest |97.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/ttl/py3test |97.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_common.py::TestCommonYandexWithTenant::test_private_create_queue[tables_format_v1-std] [GOOD] |97.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/select/py3test >> test_select.py::TestDML::test_select[table_index_1_UNIQUE_SYNC-pk_types3-all_types3-index3--UNIQUE-SYNC] [GOOD] >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_index_0_UNIQUE_SYNC-pk_types4-all_types4-index4--UNIQUE-SYNC] >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_2_UNIQUE_SYNC-pk_types2-all_types2-index2--UNIQUE-SYNC] [GOOD] >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_index_0__SYNC-pk_types9-all_types9-index9---SYNC] |97.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/select/py3test >> test_select.py::TestDML::test_select[table_index_0__ASYNC-pk_types11-all_types11-index11---ASYNC] |97.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/scheme_shard/py3test >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_given_table_when_drop_table_and_create_with_same_scheme_then_ok [GOOD] >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_all_types-pk_types12-all_types12-index12---] |97.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/tools/nemesis/ut/py3test >> test_tablet.py::TestMassiveKills::test_tablets_are_ok_after_many_kills [GOOD] |97.0%| [TA] $(B)/ydb/tests/tools/nemesis/ut/test-results/py3test/{meta.json ... results_accumulator.log} |97.0%| [TA] {RESULT} $(B)/ydb/tests/tools/nemesis/ut/test-results/py3test/{meta.json ... results_accumulator.log} >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_ttl_Uint32-pk_types14-all_types14-index14-Uint32--] [GOOD] |97.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/scheme_shard/py3test >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_when_delete_path_with_folder_then_get_error_response [GOOD] |97.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/ttl/py3test >> test_select.py::TestDML::test_select[table_index_3_UNIQUE_SYNC-pk_types1-all_types1-index1--UNIQUE-SYNC] |97.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/parametrized_queries/py3test >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_index_4_UNIQUE_SYNC-pk_types0-all_types0-index0--UNIQUE-SYNC] [GOOD] >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_ttl_Uint64-pk_types15-all_types15-index15-Uint64--] >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_ttl_Timestamp-pk_types17-all_types17-index17-Timestamp--] |97.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_empty_auth_header [GOOD] |97.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/copy_table/py3test >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_2_UNIQUE_SYNC-pk_types2-all_types2-index2--UNIQUE-SYNC] [GOOD] |97.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/select/py3test |97.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_queue_counters.py::TestYmqQueueCounters::test_counters_when_sending_duplicates [GOOD] |97.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/ttl/py3test >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_index_0_UNIQUE_SYNC-pk_types4-all_types4-index4--UNIQUE-SYNC] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_queues_count_over_limit[tables_format_v0] [GOOD] >> ColumnShardTiers::TieringUsage >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_index_0__SYNC-pk_types9-all_types9-index9---SYNC] [GOOD] >> test_select.py::TestDML::test_select[table_index_1__SYNC-pk_types8-all_types8-index8---SYNC] |97.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/select/py3test >> overlapping_portions.py::TestOverlappingPortions::test [GOOD] >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_index_2__SYNC-pk_types7-all_types7-index7---SYNC] |97.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/parametrized_queries/py3test >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_ttl_Uint32-pk_types14-all_types14-index14-Uint32--] [GOOD] |97.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/secondary_index/py3test >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_all_types-pk_types12-all_types12-index12---] [GOOD] >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_index_1__SYNC-pk_types8-all_types8-index8---SYNC] >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_ttl_Uint64-pk_types15-all_types15-index15-Uint64--] [GOOD] >> test_ttl.py::TestTTLDefaultEnv::test_case >> test_select.py::TestDML::test_select[table_index_2_UNIQUE_SYNC-pk_types2-all_types2-index2--UNIQUE-SYNC] >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_ttl_Timestamp-pk_types17-all_types17-index17-Timestamp--] [GOOD] >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_index_0__ASYNC-pk_types6-all_types6-index6---ASYNC] [GOOD] >> test_ttl.py::TestTTLValueSinceUnixEpoch::test_case [GOOD] |97.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/parametrized_queries/py3test >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_index_0_UNIQUE_SYNC-pk_types4-all_types4-index4--UNIQUE-SYNC] [GOOD] |97.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/parametrized_queries/py3test >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_index_0__SYNC-pk_types9-all_types9-index9---SYNC] [GOOD] >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_3_UNIQUE_SYNC-pk_types1-all_types1-index1--UNIQUE-SYNC] [GOOD] |97.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/script_execution/py3test >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_all_types-pk_types7-all_types7-index7---] [GOOD] |97.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/select/py3test >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_index_2__SYNC-pk_types7-all_types7-index7---SYNC] [GOOD] |97.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/parametrized_queries/py3test >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_ttl_Uint64-pk_types15-all_types15-index15-Uint64--] [GOOD] |97.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/ttl/py3test >> test_ttl.py::TestTTLValueSinceUnixEpoch::test_case [GOOD] >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_index_1__SYNC-pk_types8-all_types8-index8---SYNC] [GOOD] |97.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_retryable_iam_error[tables_format_v0] [GOOD] |97.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/parametrized_queries/py3test >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_ttl_Timestamp-pk_types17-all_types17-index17-Timestamp--] [GOOD] >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_ydb_remove_directory_that_does_not_exist_failure >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_1__SYNC-pk_types8-all_types8-index8---SYNC] [GOOD] |97.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/olap/oom/py3test >> overlapping_portions.py::TestOverlappingPortions::test [GOOD] |97.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/parametrized_queries/py3test >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_all_types-pk_types12-all_types12-index12---] [GOOD] |97.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_queues_count_over_limit[tables_format_v0] [GOOD] >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_ttl_Datetime-pk_types16-all_types16-index16-Datetime--] >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_4_UNIQUE_SYNC-pk_types0-all_types0-index0--UNIQUE-SYNC] [GOOD] |97.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/select/py3test >> test_update_script_tables.py::TestUpdateScriptTablesYdb::test_recreate_tables[ALTER TABLE {} DROP COLUMN syntax-`.metadata/script_executions`] >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_index_4__SYNC-pk_types0-all_types0-index0---SYNC] [GOOD] >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_index_3__SYNC-pk_types6-all_types6-index6---SYNC] >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_0_UNIQUE_SYNC-pk_types4-all_types4-index4--UNIQUE-SYNC] [GOOD] >> test_select.py::TestDML::test_select[table_ttl_Date-pk_types18-all_types18-index18-Date--] >> test_select.py::TestDML::test_select[table_index_0__SYNC-pk_types9-all_types9-index9---SYNC] >> test_query_cache.py::TestQueryCache::test >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_ydb_remove_directory_that_does_not_exist_failure [GOOD] |97.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/copy_table/py3test >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_3_UNIQUE_SYNC-pk_types1-all_types1-index1--UNIQUE-SYNC] [GOOD] >> TTestYqlToMiniKQLCompile::CheckResolve >> TTestYqlToMiniKQLCompile::CheckResolve [GOOD] >> TTestYqlToMiniKQLCompile::OnlyResult >> TTestYqlToMiniKQLCompile::OnlyResult [GOOD] >> TTestYqlToMiniKQLCompile::EraseRow |97.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/copy_table/py3test >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_4_UNIQUE_SYNC-pk_types0-all_types0-index0--UNIQUE-SYNC] [GOOD] >> TTestYqlToMiniKQLCompile::EraseRow [GOOD] >> TTestYqlToMiniKQLCompile::UpdateRow >> TTestYqlToMiniKQLCompile::UpdateRow [GOOD] >> TTestYqlToMiniKQLCompile::SelectRow >> TTestYqlToMiniKQLCompile::SelectRow [GOOD] >> TTestYqlToMiniKQLCompile::SelectRange >> TTestYqlToMiniKQLCompile::SelectRange [GOOD] >> TTestYqlToMiniKQLCompile::SimpleCrossShardTx [GOOD] >> TTestYqlToMiniKQLCompile::AcquireLocks >> TTestYqlToMiniKQLCompile::AcquireLocks [GOOD] >> TTestYqlToMiniKQLCompile::StaticMapTypeOf >> TTestYqlToMiniKQLCompile::StaticMapTypeOf [GOOD] >> TTestYqlToMiniKQLCompile::SelectRangeAtomInRange >> TTestYqlToMiniKQLCompile::SelectRangeAtomInRange [GOOD] >> TTestYqlToMiniKQLCompile::Extract >> TTestYqlToMiniKQLCompile::Extract [GOOD] |97.2%| [TS] {asan, default-linux-x86_64, release} ydb/core/client/minikql_compile/ut/unittest >> TTestYqlToMiniKQLCompile::Extract [GOOD] |97.2%| [TS] {RESULT} ydb/core/client/minikql_compile/ut/unittest |97.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/parametrized_queries/py3test >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_index_1__SYNC-pk_types8-all_types8-index8---SYNC] [GOOD] |97.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/copy_table/py3test >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_0_UNIQUE_SYNC-pk_types4-all_types4-index4--UNIQUE-SYNC] [GOOD] |97.2%| [TA] $(B)/ydb/tests/datashard/copy_table/test-results/py3test/{meta.json ... results_accumulator.log} |97.2%| [TA] {RESULT} $(B)/ydb/tests/datashard/copy_table/test-results/py3test/{meta.json ... results_accumulator.log} |97.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/parametrized_queries/py3test >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_index_2__SYNC-pk_types7-all_types7-index7---SYNC] [GOOD] >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_ttl_Datetime-pk_types16-all_types16-index16-Datetime--] [GOOD] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_0_UNIQUE_SYNC-pk_types4-all_types4-index4--UNIQUE-SYNC] [GOOD] >> SequenceShardTests::Basics |97.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/scheme_shard/py3test >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_ydb_remove_directory_that_does_not_exist_failure [GOOD] >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_index_3__SYNC-pk_types6-all_types6-index6---SYNC] [GOOD] |97.2%| [TA] $(B)/ydb/tests/functional/scheme_shard/test-results/py3test/{meta.json ... results_accumulator.log} |97.2%| [TA] {RESULT} $(B)/ydb/tests/functional/scheme_shard/test-results/py3test/{meta.json ... results_accumulator.log} >> GenericProviderLookupActor::Lookup >> SequenceShardTests::Basics [GOOD] >> SequenceShardTests::MarkedPipeRetries |97.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_retryable_iam_error[tables_format_v0] [GOOD] >> GenericProviderLookupActor::Lookup [GOOD] >> GenericProviderLookupActor::LookupWithErrors >> GenericProviderLookupActor::LookupWithErrors [GOOD] |97.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/secondary_index/py3test >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_1__SYNC-pk_types8-all_types8-index8---SYNC] [GOOD] >> test_query_cache.py::TestQueryCache::test [GOOD] >> SequenceShardTests::MarkedPipeRetries [GOOD] >> SequenceShardTests::FreezeRestoreRedirect >> KesusProxyTest::ReconnectsWithKesusWhenNotConnected >> KesusProxyTest::ReconnectsWithKesusWhenNotConnected [GOOD] >> KesusProxyTest::ReconnectsWithKesusWhenPipeDestroyed [GOOD] >> KesusProxyTest::RejectsNotCanonizedResourceName [GOOD] >> KesusProxyTest::SubscribesOnResource ------- [TS] {asan, default-linux-x86_64, release} ydb/library/yql/providers/generic/actors/ut/unittest >> GenericProviderLookupActor::LookupWithErrors [GOOD] Test command err: 2025-06-25 15:21:24.808 INFO ydb-library-yql-providers-generic-actors-ut(pid=1005166, tid=0x00007F01D40A3B80) [generic] yql_generic_lookup_actor.cpp:151: New generic proivider lookup source actor(ActorId=[1:4:2051]) for kind=YDB, endpoint=host: "some_host" port: 2135, database=some_db, use_tls=1, protocol=NATIVE, table=lookup_test 2025-06-25 15:21:24.860 DEBUG ydb-library-yql-providers-generic-actors-ut(pid=1005166, tid=0x00007F01D40A3B80) [generic] yql_generic_lookup_actor.cpp:288: ActorId=[1:4:2051] Got LookupRequest for 3 keys Call ListSplits. selects { data_source_instance { kind: YDB endpoint { host: "some_host" port: 2135 } database: "some_db" credentials { token { type: "IAM" value: "TEST_TOKEN" } } use_tls: true protocol: NATIVE } what { items { column { name: "id" type { type_id: UINT64 } } } items { column { name: "optional_id" type { optional_type { item { type_id: UINT64 } } } } } items { column { name: "string_value" type { optional_type { item { type_id: STRING } } } } } } from { table: "lookup_test" } where { filter_typed { disjunction { operands { conjunction { operands { comparison { operation: EQ left_value { column: "id" } right_value { typed_value { type { type_id: UINT64 } value { uint64_value: 2 } } } } } operands { comparison { operation: EQ left_value { column: "optional_id" } right_value { typed_value { type { optional_type { item { type_id: UINT64 } } } value { uint64_value: 102 } } } } } } } operands { conjunction { operands { comparison { operation: EQ left_value { column: "id" } right_value { typed_value { type { type_id: UINT64 } value { uint64_value: 1 } } } } } operands { comparison { operation: EQ left_value { column: "optional_id" } right_value { typed_value { type { optional_type { item { type_id: UINT64 } } } value { uint64_value: 101 } } } } } } } operands { conjunction { operands { comparison { operation: EQ left_value { column: "id" } right_value { typed_value { type { type_id: UINT64 } value { uint64_value: 0 } } } } } operands { comparison { operation: EQ left_value { column: "optional_id" } right_value { typed_value { type { optional_type { item { type_id: UINT64 } } } value { uint64_value: 100 } } } } } } } operands { conjunction { operands { comparison { operation: EQ left_value { column: "id" } right_value { typed_value { type { type_id: UINT64 } value { uint64_value: 2 } } } } } operands { comparison { operation: EQ left_value { column: "optional_id" } right_value { typed_value { type { optional_type { item { type_id: UINT64 } } } value { uint64_value: 102 } } } } } } } } } } } max_split_count: 1 CRAB Expected: selects { data_source_instance { kind: YDB endpoint { host: "some_host" port: 2135 } database: "some_db" credentials { token { type: "IAM" value: "TEST_TOKEN" } } use_tls: true protocol: NATIVE } what { items { column { name: "id" type { type_id: UINT64 } } } items { column { name: "optional_id" type { optional_type { item { type_id: UINT64 } } } } } items { column { name: "string_value" type { optional_type { item { type_id: STRING } } } } } } from { table: "lookup_test" } where { filter_typed { disjunction { operands { conjunction { operands { comparison { operation: EQ left_value { column: "id" } right_value { typed_value { type { type_id: UINT64 } value { uint64_value: 2 } } } } } operands { comparison { operation: EQ left_value { column: "optional_id" } right_value { typed_value { type { optional_type { item { type_id: UINT64 } } } value { uint64_value: 102 } } } } } } } operands { conjunction { operands { comparison { operation: EQ left_value { column: "id" } right_value { typed_value { type { type_id: UINT64 } value { uint64_value: 1 } } } } } operands { comparison { operation: EQ left_value { column: "optional_id" } right_value { typed_value { type { optional_type { item { type_id: UINT64 } } } value { uint64_value: 101 } } } } } } } operands { conjunction { operands { comparison { operation: EQ left_value { column: "id" } right_value { typed_value { type { type_id: UINT64 } value { uint64_value: 0 } } } } } operands { comparison { operation: EQ left_value { column: "optional_id" } right_value { typed_value { type { optional_type { item { type_id: UINT64 } } } value { uint64_value: 100 } } } } } } } operands { conjunction { operands { comparison { operation: EQ left_value { column: "id" } right_value { typed_value { type { type_id: UINT64 } value { uint64_value: 2 } } } } } operands { comparison { operation: EQ left_value { column: "optional_id" } right_value { typed_value { type { optional_type { item { type_id: UINT64 } } } value { uint64_value: 102 } } } } } } } } } } } max_split_count: 1 CRAB Actual: selects { data_source_instance { kind: YDB endpoint { host: "some_host" port: 2135 } database: "some_db" credentials { token { type: "IAM" value: "TEST_TOKEN" } } use_tls: true protocol: NATIVE } what { items { column { name: "id" type { type_id: UINT64 } } } items { column { name: "optional_id" type { optional_type { item { type_id: UINT64 } } } } } i ... left_value { column: "optional_id" } right_value { typed_value { type { optional_type { item { type_id: UINT64 } } } value { uint64_value: 102 } } } } } } } operands { conjunction { operands { comparison { operation: EQ left_value { column: "id" } right_value { typed_value { type { type_id: UINT64 } value { uint64_value: 1 } } } } } operands { comparison { operation: EQ left_value { column: "optional_id" } right_value { typed_value { type { optional_type { item { type_id: UINT64 } } } value { uint64_value: 101 } } } } } } } operands { conjunction { operands { comparison { operation: EQ left_value { column: "id" } right_value { typed_value { type { type_id: UINT64 } value { uint64_value: 0 } } } } } operands { comparison { operation: EQ left_value { column: "optional_id" } right_value { typed_value { type { optional_type { item { type_id: UINT64 } } } value { uint64_value: 100 } } } } } } } operands { conjunction { operands { comparison { operation: EQ left_value { column: "id" } right_value { typed_value { type { type_id: UINT64 } value { uint64_value: 2 } } } } } operands { comparison { operation: EQ left_value { column: "optional_id" } right_value { typed_value { type { optional_type { item { type_id: UINT64 } } } value { uint64_value: 102 } } } } } } } } } } } max_split_count: 1 CRAB Actual: selects { data_source_instance { kind: YDB endpoint { host: "some_host" port: 2135 } database: "some_db" credentials { token { type: "IAM" value: "TEST_TOKEN" } } use_tls: true protocol: NATIVE } what { items { column { name: "id" type { type_id: UINT64 } } } items { column { name: "optional_id" type { optional_type { item { type_id: UINT64 } } } } } items { column { name: "string_value" type { optional_type { item { type_id: STRING } } } } } } from { table: "lookup_test" } where { filter_typed { disjunction { operands { conjunction { operands { comparison { operation: EQ left_value { column: "id" } right_value { typed_value { type { type_id: UINT64 } value { uint64_value: 2 } } } } } operands { comparison { operation: EQ left_value { column: "optional_id" } right_value { typed_value { type { optional_type { item { type_id: UINT64 } } } value { uint64_value: 102 } } } } } } } operands { conjunction { operands { comparison { operation: EQ left_value { column: "id" } right_value { typed_value { type { type_id: UINT64 } value { uint64_value: 1 } } } } } operands { comparison { operation: EQ left_value { column: "optional_id" } right_value { typed_value { type { optional_type { item { type_id: UINT64 } } } value { uint64_value: 101 } } } } } } } operands { conjunction { operands { comparison { operation: EQ left_value { column: "id" } right_value { typed_value { type { type_id: UINT64 } value { uint64_value: 0 } } } } } operands { comparison { operation: EQ left_value { column: "optional_id" } right_value { typed_value { type { optional_type { item { type_id: UINT64 } } } value { uint64_value: 100 } } } } } } } operands { conjunction { operands { comparison { operation: EQ left_value { column: "id" } right_value { typed_value { type { type_id: UINT64 } value { uint64_value: 2 } } } } } operands { comparison { operation: EQ left_value { column: "optional_id" } right_value { typed_value { type { optional_type { item { type_id: UINT64 } } } value { uint64_value: 102 } } } } } } } } } } } max_split_count: 1 ListSplits result. GRpcStatusCode: 0 2025-06-25 15:21:24.994 DEBUG ydb-library-yql-providers-generic-actors-ut(pid=1005166, tid=0x00007F01CFB1E640) [generic] yql_generic_lookup_actor.cpp:319: ActorId=[2:7519907420765694332:2051] Got TListSplitsStreamIterator 2025-06-25 15:21:24.994 DEBUG ydb-library-yql-providers-generic-actors-ut(pid=1005166, tid=0x00007F01CFB1E640) [generic] yql_generic_lookup_actor.cpp:196: ActorId=[2:7519907420765694332:2051] Got TListSplitsResponse from Connector Call ReadSplits. data_source_instance { kind: YDB endpoint { host: "some_host" port: 2135 } database: "some_db" credentials { token { type: "IAM" value: "TEST_TOKEN" } } use_tls: true protocol: NATIVE } splits { select { from { table: "example_1" } } description: "Actual split info is not important" } format: ARROW_IPC_STREAMING filtering: FILTERING_MANDATORY CRAB Expected: data_source_instance { kind: YDB endpoint { host: "some_host" port: 2135 } database: "some_db" credentials { token { type: "IAM" value: "TEST_TOKEN" } } use_tls: true protocol: NATIVE } splits { select { from { table: "example_1" } } description: "Actual split info is not important" } format: ARROW_IPC_STREAMING filtering: FILTERING_MANDATORY CRAB Actual: data_source_instance { kind: YDB endpoint { host: "some_host" port: 2135 } database: "some_db" credentials { token { type: "IAM" value: "TEST_TOKEN" } } use_tls: true protocol: NATIVE } splits { select { from { table: "example_1" } } description: "Actual split info is not important" } format: ARROW_IPC_STREAMING filtering: FILTERING_MANDATORY ReadSplits result. GRpcStatusCode: 0 2025-06-25 15:21:24.995 DEBUG ydb-library-yql-providers-generic-actors-ut(pid=1005166, tid=0x00007F01CFB1E640) [generic] yql_generic_lookup_actor.cpp:229: ActorId=[2:7519907420765694332:2051] Got ReadSplitsStreamIterator from Connector 2025-06-25 15:21:24.995 DEBUG ydb-library-yql-providers-generic-actors-ut(pid=1005166, tid=0x00007F01CFB1E640) [generic] yql_generic_lookup_actor.cpp:341: ActorId=[2:7519907420765694332:2051] Got DataChunk 2025-06-25 15:21:24.995 DEBUG ydb-library-yql-providers-generic-actors-ut(pid=1005166, tid=0x00007F01CFB1E640) [generic] yql_generic_lookup_actor.cpp:352: ActorId=[2:7519907420765694332:2051] Got EOF 2025-06-25 15:21:24.995 DEBUG ydb-library-yql-providers-generic-actors-ut(pid=1005166, tid=0x00007F01CFB1E640) [generic] yql_generic_lookup_actor.cpp:402: Sending lookup results for 3 keys |97.2%| [TS] {RESULT} ydb/library/yql/providers/generic/actors/ut/unittest >> KesusProxyTest::SubscribesOnResource [GOOD] >> KesusProxyTest::SubscribesOnResourcesWhenReconnected [GOOD] >> KesusProxyTest::ProxyRequestDuringDisconnection [GOOD] >> KesusProxyTest::DeactivateSessionWhenResourceClosed [GOOD] >> KesusProxyTest::SendsProxySessionOnceOnSuccess [GOOD] >> KesusProxyTest::SendsProxySessionOnceOnFailure [GOOD] >> KesusProxyTest::AnswersWithSessionWhenResourceIsAlreadyKnown >> KesusProxyTest::AnswersWithSessionWhenResourceIsAlreadyKnown [GOOD] >> KesusProxyTest::SendsBrokenUpdateWhenKesusPassesError [GOOD] >> KesusProxyTest::AllocatesResourceWithKesus [GOOD] >> KesusProxyTest::DisconnectsDuringActiveSession [GOOD] >> KesusProxyTest::AllocatesResourceOffline [GOOD] >> KesusProxyTest::ConnectsDuringOfflineAllocation [GOOD] >> KesusResourceAllocationStatisticsTest::ReturnsDefaultValues [GOOD] >> KesusResourceAllocationStatisticsTest::CalculatesAverage [GOOD] >> KesusResourceAllocationStatisticsTest::TakesBestStat [GOOD] >> TQuoterServiceTest::StaticRateLimiter >> SequenceShardTests::FreezeRestoreRedirect [GOOD] >> SequenceShardTests::NegativeIncrement >> SequenceShardTests::NegativeIncrement [GOOD] |97.2%| [TA] $(B)/ydb/tests/functional/sqs/cloud/test-results/py3test/{meta.json ... results_accumulator.log} >> DataShardFollowers::FollowerKeepsWorkingAfterMvccReadTable ------- [TS] {asan, default-linux-x86_64, release} ydb/core/tx/sequenceshard/ut/unittest >> SequenceShardTests::NegativeIncrement [GOOD] Test command err: 2025-06-25T15:21:24.218583Z node 1 :SEQUENCESHARD TRACE: sequenceshard_impl.cpp:38: [sequenceshard 72057594037927937] OnActivateExecutor 2025-06-25T15:21:24.218787Z node 1 :SEQUENCESHARD TRACE: tx_init_schema.cpp:14: [sequenceshard 72057594037927937] TTxInitSchema.Execute 2025-06-25T15:21:24.233373Z node 1 :SEQUENCESHARD TRACE: tx_init.cpp:14: [sequenceshard 72057594037927937] TTxInit.Execute 2025-06-25T15:21:24.237873Z node 1 :SEQUENCESHARD TRACE: tx_init_schema.cpp:22: [sequenceshard 72057594037927937] TTxInitSchema.Complete 2025-06-25T15:21:24.237965Z node 1 :SEQUENCESHARD TRACE: tx_init.cpp:112: [sequenceshard 72057594037927937] TTxInit.Complete 2025-06-25T15:21:24.245868Z node 1 :SEQUENCESHARD TRACE: tx_create_sequence.cpp:21: [sequenceshard 72057594037927937] TTxCreateSequence.Execute PathId# [OwnerId: 123, LocalPathId: 42] Record# PathId { OwnerId: 123 LocalId: 42 } 2025-06-25T15:21:24.246075Z node 1 :SEQUENCESHARD NOTICE: tx_create_sequence.cpp:113: [sequenceshard 72057594037927937] TTxCreateSequence.Execute SUCCESS PathId# [OwnerId: 123, LocalPathId: 42] MinValue# 1 MaxValue# 9223372036854775807 StartValue# 1 Cache# 1 Increment# 1 Cycle# false State# Active 2025-06-25T15:21:24.272268Z node 1 :SEQUENCESHARD TRACE: tx_create_sequence.cpp:118: [sequenceshard 72057594037927937] TTxCreateSequence.Complete 2025-06-25T15:21:24.272647Z node 1 :SEQUENCESHARD TRACE: tx_create_sequence.cpp:21: [sequenceshard 72057594037927937] TTxCreateSequence.Execute PathId# [OwnerId: 123, LocalPathId: 42] Record# PathId { OwnerId: 123 LocalId: 42 } 2025-06-25T15:21:24.272698Z node 1 :SEQUENCESHARD TRACE: tx_create_sequence.cpp:33: [sequenceshard 72057594037927937] TTxCreateSequence.Execute SEQUENCE_ALREADY_EXISTS PathId# [OwnerId: 123, LocalPathId: 42] 2025-06-25T15:21:24.272760Z node 1 :SEQUENCESHARD TRACE: tx_create_sequence.cpp:118: [sequenceshard 72057594037927937] TTxCreateSequence.Complete 2025-06-25T15:21:24.273016Z node 1 :SEQUENCESHARD TRACE: tx_create_sequence.cpp:21: [sequenceshard 72057594037927937] TTxCreateSequence.Execute PathId# [OwnerId: 123, LocalPathId: 51] Record# PathId { OwnerId: 123 LocalId: 51 } StartValue: 100001 Cache: 10 2025-06-25T15:21:24.273128Z node 1 :SEQUENCESHARD NOTICE: tx_create_sequence.cpp:113: [sequenceshard 72057594037927937] TTxCreateSequence.Execute SUCCESS PathId# [OwnerId: 123, LocalPathId: 51] MinValue# 1 MaxValue# 9223372036854775807 StartValue# 100001 Cache# 10 Increment# 1 Cycle# false State# Active 2025-06-25T15:21:24.286073Z node 1 :SEQUENCESHARD TRACE: tx_create_sequence.cpp:118: [sequenceshard 72057594037927937] TTxCreateSequence.Complete 2025-06-25T15:21:24.286510Z node 1 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:22: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute PathId# [OwnerId: 123, LocalPathId: 42] Cache# 0 2025-06-25T15:21:24.286598Z node 1 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:89: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute SUCCESS PathId# [OwnerId: 123, LocalPathId: 42] AllocationStart# 1 AllocationCount# 1 AllocationIncrement# 1 2025-06-25T15:21:24.302015Z node 1 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:174: [sequenceshard 72057594037927937] TTxAllocateSequence.Complete 2025-06-25T15:21:24.302337Z node 1 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:22: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute PathId# [OwnerId: 123, LocalPathId: 42] Cache# 10 2025-06-25T15:21:24.302435Z node 1 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:89: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute SUCCESS PathId# [OwnerId: 123, LocalPathId: 42] AllocationStart# 2 AllocationCount# 10 AllocationIncrement# 1 2025-06-25T15:21:24.318213Z node 1 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:174: [sequenceshard 72057594037927937] TTxAllocateSequence.Complete 2025-06-25T15:21:24.318519Z node 1 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:22: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute PathId# [OwnerId: 123, LocalPathId: 51] Cache# 0 2025-06-25T15:21:24.318614Z node 1 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:89: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute SUCCESS PathId# [OwnerId: 123, LocalPathId: 51] AllocationStart# 100001 AllocationCount# 10 AllocationIncrement# 1 2025-06-25T15:21:24.334061Z node 1 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:174: [sequenceshard 72057594037927937] TTxAllocateSequence.Complete 2025-06-25T15:21:24.334442Z node 1 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:22: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute PathId# [OwnerId: 123, LocalPathId: 51] Cache# 50 2025-06-25T15:21:24.334546Z node 1 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:89: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute SUCCESS PathId# [OwnerId: 123, LocalPathId: 51] AllocationStart# 100011 AllocationCount# 50 AllocationIncrement# 1 2025-06-25T15:21:24.349996Z node 1 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:174: [sequenceshard 72057594037927937] TTxAllocateSequence.Complete 2025-06-25T15:21:24.350326Z node 1 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:22: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute PathId# [OwnerId: 123, LocalPathId: 99] Cache# 0 2025-06-25T15:21:24.350378Z node 1 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:35: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute SEQUENCE_NOT_FOUND PathId# [OwnerId: 123, LocalPathId: 99] 2025-06-25T15:21:24.350439Z node 1 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:174: [sequenceshard 72057594037927937] TTxAllocateSequence.Complete 2025-06-25T15:21:24.350623Z node 1 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:22: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute PathId# [OwnerId: 123, LocalPathId: 42] Cache# 18446744073709551615 2025-06-25T15:21:24.350709Z node 1 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:89: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute SUCCESS PathId# [OwnerId: 123, LocalPathId: 42] AllocationStart# 12 AllocationCount# 9223372036854775796 AllocationIncrement# 1 2025-06-25T15:21:24.365668Z node 1 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:174: [sequenceshard 72057594037927937] TTxAllocateSequence.Complete 2025-06-25T15:21:24.366060Z node 1 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:22: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute PathId# [OwnerId: 123, LocalPathId: 42] Cache# 1 2025-06-25T15:21:24.366105Z node 1 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:72: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute SEQUENCE_OVERFLOW PathId# [OwnerId: 123, LocalPathId: 42] 2025-06-25T15:21:24.366155Z node 1 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:174: [sequenceshard 72057594037927937] TTxAllocateSequence.Complete 2025-06-25T15:21:24.366371Z node 1 :SEQUENCESHARD TRACE: tx_drop_sequence.cpp:20: [sequenceshard 72057594037927937] TTxDropSequence.Execute PathId# [OwnerId: 123, LocalPathId: 42] 2025-06-25T15:21:24.366438Z node 1 :SEQUENCESHARD NOTICE: tx_drop_sequence.cpp:43: [sequenceshard 72057594037927937] TTxDropSequence.Execute SUCCESS PathId# [OwnerId: 123, LocalPathId: 42] 2025-06-25T15:21:24.379852Z node 1 :SEQUENCESHARD TRACE: tx_drop_sequence.cpp:48: [sequenceshard 72057594037927937] TTxDropSequence.Complete 2025-06-25T15:21:24.380187Z node 1 :SEQUENCESHARD TRACE: tx_drop_sequence.cpp:20: [sequenceshard 72057594037927937] TTxDropSequence.Execute PathId# [OwnerId: 123, LocalPathId: 42] 2025-06-25T15:21:24.380232Z node 1 :SEQUENCESHARD TRACE: tx_drop_sequence.cpp:33: [sequenceshard 72057594037927937] TTxDropSequence.Execute SEQUENCE_NOT_FOUND PathId# [OwnerId: 123, LocalPathId: 42] 2025-06-25T15:21:24.380294Z node 1 :SEQUENCESHARD TRACE: tx_drop_sequence.cpp:48: [sequenceshard 72057594037927937] TTxDropSequence.Complete 2025-06-25T15:21:24.398014Z node 1 :SEQUENCESHARD TRACE: sequenceshard_impl.cpp:38: [sequenceshard 72057594037927937] OnActivateExecutor 2025-06-25T15:21:24.398102Z node 1 :SEQUENCESHARD TRACE: tx_init_schema.cpp:14: [sequenceshard 72057594037927937] TTxInitSchema.Execute 2025-06-25T15:21:24.398515Z node 1 :SEQUENCESHARD TRACE: tx_init_schema.cpp:22: [sequenceshard 72057594037927937] TTxInitSchema.Complete 2025-06-25T15:21:24.399036Z node 1 :SEQUENCESHARD TRACE: tx_init.cpp:14: [sequenceshard 72057594037927937] TTxInit.Execute 2025-06-25T15:21:24.400522Z node 1 :SEQUENCESHARD TRACE: tx_init.cpp:112: [sequenceshard 72057594037927937] TTxInit.Complete 2025-06-25T15:21:24.404364Z node 1 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:22: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute PathId# [OwnerId: 123, LocalPathId: 42] Cache# 0 2025-06-25T15:21:24.404418Z node 1 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:35: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute SEQUENCE_NOT_FOUND PathId# [OwnerId: 123, LocalPathId: 42] 2025-06-25T15:21:24.404464Z node 1 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:174: [sequenceshard 72057594037927937] TTxAllocateSequence.Complete 2025-06-25T15:21:24.404775Z node 1 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:22: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute PathId# [OwnerId: 123, LocalPathId: 51] Cache# 0 2025-06-25T15:21:24.404868Z node 1 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:89: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute SUCCESS PathId# [OwnerId: 123, LocalPathId: 51] AllocationStart# 100061 AllocationCount# 10 AllocationIncrement# 1 2025-06-25T15:21:24.417112Z node 1 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:174: [sequenceshard 72057594037927937] TTxAllocateSequence.Complete 2025-06-25T15:21:24.417570Z node 1 :SEQUENCESHARD TRACE: tx_update_sequence.cpp:21: [sequenceshard 72057594037927937] TTxUpdateSequence.Execute PathId# [OwnerId: 123, LocalPathId: 51] Record# PathId { OwnerId: 123 LocalId: 51 } NextValue: 200000 NextUsed: true 2025-06-25T15:21:24.417659Z node 1 :SEQUENCESHARD TRACE: tx_update_sequence.cpp:103: [sequenceshard 72057594037927937] TTxUpdateSequence.Execute SUCCESS PathId# [OwnerId: 123, LocalPathId: 51] 2025-06-25T15:21:24.433302Z node 1 :SEQUENCESHARD TRACE: tx_update_sequence.cpp:108: [sequenceshard 72057594037927937] TTxUpdateSequence.Complete 2025-06-25T15:21:24.433706Z node 1 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:22: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute PathId# [OwnerId: 123, LocalPathId: 51] Cache# 0 2025-06-25T15:21:24.433796Z node 1 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:89: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute SUCCESS PathId# [OwnerId: 123, LocalPathId: 51] AllocationStart# 200001 AllocationCount# 10 AllocationIncrement# 1 2025-06-25T15:21:24.446116Z node 1 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:174: [sequenceshard 72057594037927937] TTxAllocateSequence.Complete 2025-06-25T15:21:24.446547Z node 1 :SEQUENCESHARD TRACE: tx_update_sequence.cpp:21: [sequenceshard 72057594037927937] TTxUpdateSequence.Execute PathId# [OwnerId: 123, LocalPathId: 51] Record# PathId { OwnerId: 123 LocalId: 51 } Cache: 5 2025-06-25T15:21:24.446630Z node 1 :SEQUENCESHARD TRACE: tx_update_sequence.cpp:103: [sequenceshard 72057594037927937] TTxUpdateSequence.Execute SUCCESS PathId# [OwnerId: 123, LocalPathId: 51] 2025-06-25T15:21:24.472706Z node 1 :SEQUENCESHARD TRACE: tx_update_sequence.cpp:108: [sequenceshard 72057594037927937] TTxUpdateSequence.Complete 2025-06-25T15:21:24.473096Z node 1 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:22: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute PathId# [OwnerId: 123, LocalPathId: 51] Cache# 0 2025-06-25T15:21:24.473186Z node 1 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:89: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute SUCCESS PathId# [OwnerId: 123, LocalPathId: 51] AllocationStart# 200011 AllocationCount# 5 AllocationIncrement# 1 2025-06-25T15:21:24.485257Z node 1 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:174: [sequenceshard ... Id: 43] Cache# 0 2025-06-25T15:21:25.689624Z node 3 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:89: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute SUCCESS PathId# [OwnerId: 123, LocalPathId: 43] AllocationStart# 11 AllocationCount# 100 AllocationIncrement# 1 2025-06-25T15:21:25.701454Z node 3 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:174: [sequenceshard 72057594037927937] TTxAllocateSequence.Complete 2025-06-25T15:21:25.701869Z node 3 :SEQUENCESHARD TRACE: tx_restore_sequence.cpp:21: [sequenceshard 72057594037927937] TTxRestoreSequence.Execute PathId# [OwnerId: 123, LocalPathId: 43] Record# PathId { OwnerId: 123 LocalId: 43 } MinValue: 1 MaxValue: 9223372036854775807 StartValue: 1 NextValue: 11 Cache: 100 Increment: 1 2025-06-25T15:21:25.701930Z node 3 :SEQUENCESHARD TRACE: tx_restore_sequence.cpp:66: [sequenceshard 72057594037927937] TTxRestoreSequence.Execute SEQUENCE_ALREADY_ACTIVE PathId# [OwnerId: 123, LocalPathId: 43] 2025-06-25T15:21:25.701984Z node 3 :SEQUENCESHARD TRACE: tx_restore_sequence.cpp:103: [sequenceshard 72057594037927937] TTxRestoreSequence.Complete 2025-06-25T15:21:25.702201Z node 3 :SEQUENCESHARD TRACE: tx_redirect_sequence.cpp:22: [sequenceshard 72057594037927937] TTxRedirectSequence.Execute PathId# [OwnerId: 123, LocalPathId: 42] RedirectTo# 12345 2025-06-25T15:21:25.702275Z node 3 :SEQUENCESHARD NOTICE: tx_redirect_sequence.cpp:59: [sequenceshard 72057594037927937] TTxRedirectSequence.Execute SUCCESS PathId# [OwnerId: 123, LocalPathId: 42] RedirectTo# 12345 2025-06-25T15:21:25.720952Z node 3 :SEQUENCESHARD TRACE: tx_redirect_sequence.cpp:64: [sequenceshard 72057594037927937] TTxRedirectSequence.Complete 2025-06-25T15:21:25.721327Z node 3 :SEQUENCESHARD TRACE: tx_redirect_sequence.cpp:22: [sequenceshard 72057594037927937] TTxRedirectSequence.Execute PathId# [OwnerId: 123, LocalPathId: 42] RedirectTo# 12345 2025-06-25T15:21:25.721407Z node 3 :SEQUENCESHARD NOTICE: tx_redirect_sequence.cpp:59: [sequenceshard 72057594037927937] TTxRedirectSequence.Execute SUCCESS PathId# [OwnerId: 123, LocalPathId: 42] RedirectTo# 12345 2025-06-25T15:21:25.738970Z node 3 :SEQUENCESHARD TRACE: tx_redirect_sequence.cpp:64: [sequenceshard 72057594037927937] TTxRedirectSequence.Complete 2025-06-25T15:21:25.739278Z node 3 :SEQUENCESHARD TRACE: tx_redirect_sequence.cpp:22: [sequenceshard 72057594037927937] TTxRedirectSequence.Execute PathId# [OwnerId: 123, LocalPathId: 42] RedirectTo# 12345 2025-06-25T15:21:25.739350Z node 3 :SEQUENCESHARD NOTICE: tx_redirect_sequence.cpp:59: [sequenceshard 72057594037927937] TTxRedirectSequence.Execute SUCCESS PathId# [OwnerId: 123, LocalPathId: 42] RedirectTo# 12345 2025-06-25T15:21:25.753184Z node 3 :SEQUENCESHARD TRACE: tx_redirect_sequence.cpp:64: [sequenceshard 72057594037927937] TTxRedirectSequence.Complete 2025-06-25T15:21:25.753632Z node 3 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:22: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute PathId# [OwnerId: 123, LocalPathId: 42] Cache# 0 2025-06-25T15:21:25.753697Z node 3 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:54: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute SEQUENCE_MOVED PathId# [OwnerId: 123, LocalPathId: 42] MovedTo# 12345 2025-06-25T15:21:25.753770Z node 3 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:174: [sequenceshard 72057594037927937] TTxAllocateSequence.Complete 2025-06-25T15:21:25.754074Z node 3 :SEQUENCESHARD TRACE: tx_freeze_sequence.cpp:20: [sequenceshard 72057594037927937] TTxFreezeSequence.Execute PathId# [OwnerId: 123, LocalPathId: 43] 2025-06-25T15:21:25.754175Z node 3 :SEQUENCESHARD NOTICE: tx_freeze_sequence.cpp:68: [sequenceshard 72057594037927937] TTxFreezeSequence.Execute SUCCESS PathId# [OwnerId: 123, LocalPathId: 43] 2025-06-25T15:21:25.769227Z node 3 :SEQUENCESHARD TRACE: tx_freeze_sequence.cpp:73: [sequenceshard 72057594037927937] TTxFreezeSequence.Complete 2025-06-25T15:21:25.769828Z node 3 :SEQUENCESHARD TRACE: tx_restore_sequence.cpp:21: [sequenceshard 72057594037927937] TTxRestoreSequence.Execute PathId# [OwnerId: 123, LocalPathId: 42] Record# PathId { OwnerId: 123 LocalId: 42 } MinValue: 1 MaxValue: 9223372036854775807 StartValue: 1 NextValue: 111 Cache: 100 Increment: 1 2025-06-25T15:21:25.770017Z node 3 :SEQUENCESHARD NOTICE: tx_restore_sequence.cpp:98: [sequenceshard 72057594037927937] TTxRestoreSequence.Execute SUCCESS PathId# [OwnerId: 123, LocalPathId: 42] Record# PathId { OwnerId: 123 LocalId: 42 } MinValue: 1 MaxValue: 9223372036854775807 StartValue: 1 NextValue: 111 Cache: 100 Increment: 1 2025-06-25T15:21:25.782401Z node 3 :SEQUENCESHARD TRACE: tx_restore_sequence.cpp:103: [sequenceshard 72057594037927937] TTxRestoreSequence.Complete 2025-06-25T15:21:25.782869Z node 3 :SEQUENCESHARD TRACE: tx_redirect_sequence.cpp:22: [sequenceshard 72057594037927937] TTxRedirectSequence.Execute PathId# [OwnerId: 123, LocalPathId: 43] RedirectTo# 54321 2025-06-25T15:21:25.782975Z node 3 :SEQUENCESHARD NOTICE: tx_redirect_sequence.cpp:59: [sequenceshard 72057594037927937] TTxRedirectSequence.Execute SUCCESS PathId# [OwnerId: 123, LocalPathId: 43] RedirectTo# 54321 2025-06-25T15:21:25.797623Z node 3 :SEQUENCESHARD TRACE: tx_redirect_sequence.cpp:64: [sequenceshard 72057594037927937] TTxRedirectSequence.Complete 2025-06-25T15:21:25.798070Z node 3 :SEQUENCESHARD TRACE: tx_freeze_sequence.cpp:20: [sequenceshard 72057594037927937] TTxFreezeSequence.Execute PathId# [OwnerId: 123, LocalPathId: 43] 2025-06-25T15:21:25.798140Z node 3 :SEQUENCESHARD TRACE: tx_freeze_sequence.cpp:48: [sequenceshard 72057594037927937] TTxFreezeSequence.Execute SEQUENCE_MOVED PathId# [OwnerId: 123, LocalPathId: 43] MovedTo# 54321 2025-06-25T15:21:25.798204Z node 3 :SEQUENCESHARD TRACE: tx_freeze_sequence.cpp:73: [sequenceshard 72057594037927937] TTxFreezeSequence.Complete 2025-06-25T15:21:25.798458Z node 3 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:22: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute PathId# [OwnerId: 123, LocalPathId: 42] Cache# 0 2025-06-25T15:21:25.798546Z node 3 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:89: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute SUCCESS PathId# [OwnerId: 123, LocalPathId: 42] AllocationStart# 111 AllocationCount# 100 AllocationIncrement# 1 2025-06-25T15:21:25.813168Z node 3 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:174: [sequenceshard 72057594037927937] TTxAllocateSequence.Complete 2025-06-25T15:21:26.334453Z node 4 :SEQUENCESHARD TRACE: sequenceshard_impl.cpp:38: [sequenceshard 72057594037927937] OnActivateExecutor 2025-06-25T15:21:26.334542Z node 4 :SEQUENCESHARD TRACE: tx_init_schema.cpp:14: [sequenceshard 72057594037927937] TTxInitSchema.Execute 2025-06-25T15:21:26.343266Z node 4 :SEQUENCESHARD TRACE: tx_init.cpp:14: [sequenceshard 72057594037927937] TTxInit.Execute 2025-06-25T15:21:26.345865Z node 4 :SEQUENCESHARD TRACE: tx_init_schema.cpp:22: [sequenceshard 72057594037927937] TTxInitSchema.Complete 2025-06-25T15:21:26.345949Z node 4 :SEQUENCESHARD TRACE: tx_init.cpp:112: [sequenceshard 72057594037927937] TTxInit.Complete 2025-06-25T15:21:26.347673Z node 4 :SEQUENCESHARD TRACE: tx_create_sequence.cpp:21: [sequenceshard 72057594037927937] TTxCreateSequence.Execute PathId# [OwnerId: 123, LocalPathId: 42] Record# PathId { OwnerId: 123 LocalId: 42 } Cache: 10 Increment: -1 2025-06-25T15:21:26.347779Z node 4 :SEQUENCESHARD NOTICE: tx_create_sequence.cpp:113: [sequenceshard 72057594037927937] TTxCreateSequence.Execute SUCCESS PathId# [OwnerId: 123, LocalPathId: 42] MinValue# -9223372036854775808 MaxValue# -1 StartValue# -1 Cache# 10 Increment# -1 Cycle# false State# Active 2025-06-25T15:21:26.371656Z node 4 :SEQUENCESHARD TRACE: tx_create_sequence.cpp:118: [sequenceshard 72057594037927937] TTxCreateSequence.Complete 2025-06-25T15:21:26.371929Z node 4 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:22: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute PathId# [OwnerId: 123, LocalPathId: 42] Cache# 0 2025-06-25T15:21:26.372026Z node 4 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:89: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute SUCCESS PathId# [OwnerId: 123, LocalPathId: 42] AllocationStart# -1 AllocationCount# 10 AllocationIncrement# -1 2025-06-25T15:21:26.395070Z node 4 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:174: [sequenceshard 72057594037927937] TTxAllocateSequence.Complete 2025-06-25T15:21:26.395407Z node 4 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:22: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute PathId# [OwnerId: 123, LocalPathId: 42] Cache# 0 2025-06-25T15:21:26.395498Z node 4 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:89: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute SUCCESS PathId# [OwnerId: 123, LocalPathId: 42] AllocationStart# -11 AllocationCount# 10 AllocationIncrement# -1 2025-06-25T15:21:26.414789Z node 4 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:174: [sequenceshard 72057594037927937] TTxAllocateSequence.Complete 2025-06-25T15:21:26.415260Z node 4 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:22: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute PathId# [OwnerId: 123, LocalPathId: 42] Cache# 18446744073709551615 2025-06-25T15:21:26.415376Z node 4 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:89: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute SUCCESS PathId# [OwnerId: 123, LocalPathId: 42] AllocationStart# -21 AllocationCount# 9223372036854775788 AllocationIncrement# -1 2025-06-25T15:21:26.427843Z node 4 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:174: [sequenceshard 72057594037927937] TTxAllocateSequence.Complete 2025-06-25T15:21:26.428248Z node 4 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:22: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute PathId# [OwnerId: 123, LocalPathId: 42] Cache# 1 2025-06-25T15:21:26.428331Z node 4 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:72: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute SEQUENCE_OVERFLOW PathId# [OwnerId: 123, LocalPathId: 42] 2025-06-25T15:21:26.428411Z node 4 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:174: [sequenceshard 72057594037927937] TTxAllocateSequence.Complete 2025-06-25T15:21:26.428761Z node 4 :SEQUENCESHARD TRACE: tx_update_sequence.cpp:21: [sequenceshard 72057594037927937] TTxUpdateSequence.Execute PathId# [OwnerId: 123, LocalPathId: 42] Record# PathId { OwnerId: 123 LocalId: 42 } Cycle: true 2025-06-25T15:21:26.428864Z node 4 :SEQUENCESHARD TRACE: tx_update_sequence.cpp:103: [sequenceshard 72057594037927937] TTxUpdateSequence.Execute SUCCESS PathId# [OwnerId: 123, LocalPathId: 42] 2025-06-25T15:21:26.443280Z node 4 :SEQUENCESHARD TRACE: tx_update_sequence.cpp:108: [sequenceshard 72057594037927937] TTxUpdateSequence.Complete 2025-06-25T15:21:26.443692Z node 4 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:22: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute PathId# [OwnerId: 123, LocalPathId: 42] Cache# 0 2025-06-25T15:21:26.443806Z node 4 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:89: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute SUCCESS PathId# [OwnerId: 123, LocalPathId: 42] AllocationStart# -1 AllocationCount# 10 AllocationIncrement# -1 2025-06-25T15:21:26.457284Z node 4 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:174: [sequenceshard 72057594037927937] TTxAllocateSequence.Complete 2025-06-25T15:21:26.457749Z node 4 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:22: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute PathId# [OwnerId: 123, LocalPathId: 42] Cache# 0 2025-06-25T15:21:26.457876Z node 4 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:89: [sequenceshard 72057594037927937] TTxAllocateSequence.Execute SUCCESS PathId# [OwnerId: 123, LocalPathId: 42] AllocationStart# -11 AllocationCount# 10 AllocationIncrement# -1 2025-06-25T15:21:26.472811Z node 4 :SEQUENCESHARD TRACE: tx_allocate_sequence.cpp:174: [sequenceshard 72057594037927937] TTxAllocateSequence.Complete |97.2%| [TS] {RESULT} ydb/core/tx/sequenceshard/ut/unittest |97.2%| [TA] {RESULT} $(B)/ydb/tests/functional/sqs/cloud/test-results/py3test/{meta.json ... results_accumulator.log} >> NodeWardenDsProxyConfigRetrieval::Disconnect >> TCheckpointCoordinatorTests::ShouldTriggerCheckpointWithSource >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_index_3__SYNC-pk_types1-all_types1-index1---SYNC] [GOOD] |97.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/parametrized_queries/py3test >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_ttl_Datetime-pk_types16-all_types16-index16-Datetime--] [GOOD] >> TCheckpointCoordinatorTests::ShouldTriggerCheckpointWithSource [GOOD] >> TCheckpointCoordinatorTests::ShouldTriggerCheckpointWithSourcesAndWithChannel [GOOD] >> TCheckpointCoordinatorTests::ShouldAllSnapshots >> TCheckpointCoordinatorTests::ShouldAllSnapshots [GOOD] >> TCheckpointCoordinatorTests::Should2Increments1Snapshot >> TQuoterServiceTest::StaticRateLimiter [GOOD] >> TQuoterServiceTest::StaticMultipleAndResources >> NodeWardenDsProxyConfigRetrieval::Disconnect [GOOD] |97.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/secondary_index/py3test >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_0_UNIQUE_SYNC-pk_types4-all_types4-index4--UNIQUE-SYNC] [GOOD] >> TCheckpointCoordinatorTests::Should2Increments1Snapshot [GOOD] >> TCheckpointCoordinatorTests::ShouldAbortPreviousCheckpointsIfNodeStateCantBeSaved >> TCheckpointCoordinatorTests::ShouldAbortPreviousCheckpointsIfNodeStateCantBeSaved [GOOD] >> KqpTpch::Query01 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/blobstorage/nodewarden/ut_sequence/unittest >> NodeWardenDsProxyConfigRetrieval::Disconnect [GOOD] Test command err: Caught NodeWarden registration actorId# [1:11:2058] 2025-06-25T15:21:28.542869Z node 1 :BS_NODE DEBUG: {NW26@node_warden_impl.cpp:328} Bootstrap 2025-06-25T15:21:28.587163Z node 1 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# false Origin# initial ServiceSet# {PDisks { NodeID: 1 PDiskID: 1 Path: "SectorMap:/home/runner/.ya/build/build_root/yft8/0007b4/r3tmp/tmpbePiqu/static.dat" PDiskGuid: 14459762468593695341 PDiskCategory: 0 PDiskConfig { ExpectedSlotCount: 2 } } VDisks { VDiskID { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 1 VDiskSlotID: 1 PDiskGuid: 14459762468593695341 } VDiskKind: Default } Groups { GroupID: 0 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1 VDiskSlotID: 1 PDiskGuid: 14459762468593695341 } } } } AvailabilityDomains: 0 } 2025-06-25T15:21:28.588967Z node 1 :BS_NODE DEBUG: {NW04@node_warden_pdisk.cpp:196} StartLocalPDisk NodeId# 1 PDiskId# 1 Path# "SectorMap:/home/runner/.ya/build/build_root/yft8/0007b4/r3tmp/tmpbePiqu/static.dat" PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} Temporary# false 2025-06-25T15:21:28.594897Z node 1 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:106} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2025-06-25T15:21:28.599680Z node 1 :BS_NODE DEBUG: {NW23@node_warden_vdisk.cpp:67} StartLocalVDiskActor SlayInFlight# false VDiskId# [0:1:0:0:0] VSlotId# 1:1:1 PDiskGuid# 14459762468593695341 DonorMode# false PDiskRestartInFlight# false PDisksWaitingToStart# false 2025-06-25T15:21:28.601090Z node 1 :BS_NODE DEBUG: {NW24@node_warden_vdisk.cpp:267} StartLocalVDiskActor done VDiskId# [0:1:0:0:0] VSlotId# 1:1:1 PDiskGuid# 14459762468593695341 2025-06-25T15:21:28.601181Z node 1 :BS_NODE DEBUG: {NW12@node_warden_proxy.cpp:24} StartLocalProxy GroupId# 0 2025-06-25T15:21:28.603041Z node 1 :BS_NODE DEBUG: {NW21@node_warden_pipe.cpp:23} EstablishPipe AvailDomainId# 0 PipeClientId# [1:29:2076] ControllerId# 72057594037932033 2025-06-25T15:21:28.603105Z node 1 :BS_NODE DEBUG: {NW20@node_warden_pipe.cpp:72} SendRegisterNode 2025-06-25T15:21:28.605094Z node 1 :BS_NODE DEBUG: {NW11@node_warden_impl.cpp:303} StartInvalidGroupProxy GroupId# 4294967295 2025-06-25T15:21:28.605300Z node 1 :BS_NODE DEBUG: {NW62@node_warden_impl.cpp:315} StartRequestReportingThrottler 2025-06-25T15:21:28.627838Z node 1 :BS_NODE DEBUG: {NWDC00@distconf.cpp:20} Bootstrap 2025-06-25T15:21:28.634433Z node 1 :BS_NODE DEBUG: {NWDC40@distconf_persistent_storage.cpp:25} TReaderActor bootstrap Paths# [] 2025-06-25T15:21:28.654970Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 131082 StorageConfigLoaded# false NodeListObtained# false PendingEvents.size# 0 2025-06-25T15:21:28.655038Z node 1 :BS_NODE DEBUG: {NWDC11@distconf_binding.cpp:6} TEvNodesInfo 2025-06-25T15:21:28.668598Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 2146435074 StorageConfigLoaded# false NodeListObtained# false PendingEvents.size# 0 2025-06-25T15:21:28.668677Z node 1 :BS_NODE DEBUG: {NWDC32@distconf_persistent_storage.cpp:221} TEvStorageConfigLoaded Cookie# 0 NumItemsRead# 0 2025-06-25T15:21:28.678526Z node 1 :BS_NODE DEBUG: {NWDC35@distconf_persistent_storage.cpp:184} PersistConfig Record# {} Drives# [] 2025-06-25T15:21:28.683020Z node 1 :BS_NODE DEBUG: {NWDC51@distconf_persistent_storage.cpp:103} TWriterActor bootstrap Drives# [] Record# {} 2025-06-25T15:21:28.699026Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 268639258 StorageConfigLoaded# true NodeListObtained# false PendingEvents.size# 0 2025-06-25T15:21:28.701556Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 2146435075 StorageConfigLoaded# true NodeListObtained# false PendingEvents.size# 1 2025-06-25T15:21:28.702250Z node 1 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# true Origin# distconf ServiceSet# {PDisks { NodeID: 1 PDiskID: 1 Path: "SectorMap:/home/runner/.ya/build/build_root/yft8/0007b4/r3tmp/tmpbePiqu/static.dat" PDiskGuid: 14459762468593695341 PDiskCategory: 0 PDiskConfig { ExpectedSlotCount: 2 } } VDisks { VDiskID { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 1 VDiskSlotID: 1 PDiskGuid: 14459762468593695341 } VDiskKind: Default } Groups { GroupID: 0 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1 VDiskSlotID: 1 PDiskGuid: 14459762468593695341 } } } } AvailabilityDomains: 0 } 2025-06-25T15:21:28.708096Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 268639248 StorageConfigLoaded# true NodeListObtained# false PendingEvents.size# 2 2025-06-25T15:21:28.722156Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 131082 StorageConfigLoaded# true NodeListObtained# false PendingEvents.size# 3 2025-06-25T15:21:28.722219Z node 1 :BS_NODE DEBUG: {NWDC11@distconf_binding.cpp:6} TEvNodesInfo 2025-06-25T15:21:28.723270Z node 1 :BS_NODE DEBUG: {NWDC18@distconf_binding.cpp:342} UpdateBound RefererNodeId# 1 NodeId# ::1:12001/1 Meta# {Fingerprint: "u\256\266\021zK\000a\344\260k\354N\025\344_0~\353\370" } 2025-06-25T15:21:28.723656Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 2146435072 StorageConfigLoaded# true NodeListObtained# true PendingEvents.size# 3 2025-06-25T15:21:28.723731Z node 1 :BS_NODE DEBUG: {NWDC15@distconf.cpp:345} StateFunc Type# 268639258 Sender# [1:11:2058] SessionId# [0:0:0] Cookie# 0 2025-06-25T15:21:28.729347Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 2146435072 StorageConfigLoaded# true NodeListObtained# true PendingEvents.size# 2 2025-06-25T15:21:28.729497Z node 1 :BS_NODE DEBUG: {NWDC15@distconf.cpp:345} StateFunc Type# 2146435075 Sender# [1:48:2091] SessionId# [0:0:0] Cookie# 0 2025-06-25T15:21:28.729565Z node 1 :BS_NODE DEBUG: {NWDC36@distconf_persistent_storage.cpp:205} TEvStorageConfigStored NumOk# 0 NumError# 0 Passed# 0.060731s 2025-06-25T15:21:28.731173Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:280} StateWaitForInit event Type# 2146435072 StorageConfigLoaded# true NodeListObtained# true PendingEvents.size# 1 2025-06-25T15:21:28.731246Z node 1 :BS_NODE DEBUG: {NWDC15@distconf.cpp:345} StateFunc Type# 268639248 Sender# [1:11:2058] SessionId# [0:0:0] Cookie# 0 2025-06-25T15:21:28.765816Z node 1 :BS_NODE DEBUG: {NW47@node_warden_impl.cpp:1148} Handle(TEvStatusUpdate) 2025-06-25T15:21:28.774012Z node 1 :BS_NODE DEBUG: {NW47@node_warden_impl.cpp:1148} Handle(TEvStatusUpdate) 2025-06-25T15:21:28.826901Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828672 Event# NKikimr::TEvTablet::TEvBoot 2025-06-25T15:21:28.852057Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828673 Event# NKikimr::TEvTablet::TEvRestored 2025-06-25T15:21:28.853746Z node 1 :BS_CONTROLLER DEBUG: {BSC22@console_interaction.cpp:14} Console interaction started 2025-06-25T15:21:28.857221Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268828684 Event# NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-25T15:21:28.858592Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 268639244 Event# NKikimr::TEvNodeWardenStorageConfig 2025-06-25T15:21:28.858817Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2082} StateInit event Type# 131082 Event# NActors::TEvInterconnect::TEvNodesInfo 2025-06-25T15:21:28.859297Z node 1 :BS_CONTROLLER DEBUG: {BSC01@bsc.cpp:531} Handle TEvInterconnect::TEvNodesInfo 2025-06-25T15:21:28.859552Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS01@init_scheme.cpp:17} TTxInitScheme Execute 2025-06-25T15:21:28.890663Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS03@init_scheme.cpp:44} TTxInitScheme Complete 2025-06-25T15:21:28.893195Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM01@migrate.cpp:190} Execute tx 2025-06-25T15:21:28.894476Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM02@migrate.cpp:251} Complete tx IncompatibleData# false 2025-06-25T15:21:28.894950Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-25T15:21:28.895094Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-25T15:21:28.895235Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion 2025-06-25T15:21:28.925378Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion 2025-06-25T15:21:28.925546Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-25T15:21:28.941455Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-25T15:21:28.941614Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-25T15:21:28.941692Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-25T15:21:28.941767Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-25T15:21:28.941897Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-25T15:21:28.942035Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-25T15:21:28.942088Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-25T15:21:28.942137Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-25T15:21:28.957290Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-25T15:21:28.957442Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-25T15:21:28.970067Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-25T15:21:28.970236Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE01@load_everything.cpp:21} TTxLoadEverything Execute 2025-06-25T15:21:28.973525Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE03@load_everything.cpp:587} TTxLoadEverything Complete 2025-06-25T15:21:28.973616Z node 1 :BS_CONTROLLER DEBUG: {BSC09@impl.h:2213} LoadFinished 2025-06-25T15:21:28.989071Z node 1 :BS_CONTROLLER DEBUG: {BSC18@console_interaction.cpp:31} Console connection service started 2025-06-25T15:21:28.990004Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE04@load_everything.cpp:592} TTxLoadEverything InitQueue processed 2025-06-25T15:21:28.990758Z node 1 :BS_NODE DEBUG: {NWDC15@distconf.cpp:345} StateFunc Type# 268639257 Sender# [1:93:2123] SessionId# ... tatic.dat" PDiskConfig { ExpectedSlotCount: 2 } } } } Command { DefineBox { BoxId: 1 Host { Key { Fqdn: "::1" IcPort: 12001 } HostConfigId: 1 } } } Command { DefineStoragePool { BoxId: 1 StoragePoolId: 1 ErasureSpecies: "none" VDiskKind: "Default" NumGroups: 1 PDiskFilter { Property { Type: ROT } } } } } 2025-06-25T15:21:28.997799Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 1:1 Path# SectorMap:/home/runner/.ya/build/build_root/yft8/0007b4/r3tmp/tmpbePiqu/static.dat 2025-06-25T15:21:29.016051Z node 1 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {VDisksMetrics { VDiskId { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1 } State: Initial Replicated: false DiskSpace: Green } } 2025-06-25T15:21:29.016211Z node 1 :BS_CONTROLLER DEBUG: {BSC10@scrub.cpp:187} Handle(TEvControllerScrubQueryStartQuantum) Msg# {VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1 } } 2025-06-25T15:21:29.016298Z node 1 :BS_CONTROLLER DEBUG: {BSC13@scrub.cpp:597} sending TEvControllerScrubStartQuantum Msg# NKikimrBlobStorage.TEvControllerScrubStartQuantum VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1 } 2025-06-25T15:21:29.016642Z node 1 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {VDiskStatus { VDiskId { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } NodeId: 1 PDiskId: 1 VSlotId: 1 PDiskGuid: 14459762468593695341 Status: REPLICATING OnlyPhantomsRemain: false } } 2025-06-25T15:21:29.017276Z node 1 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {VDiskStatus { VDiskId { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } NodeId: 1 PDiskId: 1 VSlotId: 1 PDiskGuid: 14459762468593695341 Status: READY OnlyPhantomsRemain: false } } 2025-06-25T15:21:29.018385Z node 1 :BS_NODE DEBUG: {NW47@node_warden_impl.cpp:1148} Handle(TEvStatusUpdate) 2025-06-25T15:21:29.019864Z node 1 :BS_NODE DEBUG: {NW47@node_warden_impl.cpp:1148} Handle(TEvStatusUpdate) 2025-06-25T15:21:29.019999Z node 1 :BS_NODE DEBUG: {NW47@node_warden_impl.cpp:1148} Handle(TEvStatusUpdate) 2025-06-25T15:21:29.020179Z node 1 :BS_CONTROLLER DEBUG: {BSC11@scrub.cpp:214} Handle(TEvControllerScrubQuantumFinished) Msg# {VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1 } Success: true } 2025-06-25T15:21:29.020506Z node 1 :BS_CONTROLLER DEBUG: {BSC10@scrub.cpp:187} Handle(TEvControllerScrubQueryStartQuantum) Msg# {VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1 } } 2025-06-25T15:21:29.036182Z node 1 :BS_CONTROLLER DEBUG: {BSCTXRN05@register_node.cpp:34} Add devicesData from NodeWarden NodeId# 1 Devices# [] 2025-06-25T15:21:29.036365Z node 1 :BS_NODE DEBUG: {NWDC15@distconf.cpp:345} StateFunc Type# 268639257 Sender# [1:93:2123] SessionId# [0:0:0] Cookie# 0 === Waiting for pipe to establish === === Breaking pipe === === Sending put === Pipe disconnected clientId# [1:29:2076] 2025-06-25T15:21:29.037066Z node 1 :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [1:29:2076] ServerId# [1:125:2147] TabletId# 72057594037932033 PipeClientId# [1:29:2076] 2025-06-25T15:21:29.037146Z node 1 :BS_NODE DEBUG: {NW21@node_warden_pipe.cpp:23} EstablishPipe AvailDomainId# 0 PipeClientId# [1:140:2160] ControllerId# 72057594037932033 2025-06-25T15:21:29.037176Z node 1 :BS_NODE DEBUG: {NW20@node_warden_pipe.cpp:72} SendRegisterNode 2025-06-25T15:21:29.037626Z node 1 :BS_NODE DEBUG: {NW46@node_warden_proxy.cpp:134} HandleForwarded GroupId# 2147483648 EnableProxyMock# false NoGroup# false 2025-06-25T15:21:29.037681Z node 1 :BS_NODE DEBUG: {NW12@node_warden_proxy.cpp:24} StartLocalProxy GroupId# 2147483648 2025-06-25T15:21:29.037713Z node 1 :BS_NODE DEBUG: {NW98@node_warden_group.cpp:266} RequestGroupConfig GroupId# 2147483648 2025-06-25T15:21:29.038228Z node 1 :BS_NODE INFO: {NW79@node_warden_group_resolver.cpp:74} TGroupResolverActor::Bootstrap GroupId# 2147483648 2025-06-25T15:21:29.038342Z node 1 :BS_NODE DEBUG: {NWDC15@distconf.cpp:345} StateFunc Type# 268639258 Sender# [1:11:2058] SessionId# [0:0:0] Cookie# 0 Pipe connected clientId# [1:140:2160] 2025-06-25T15:21:29.041779Z node 1 :BS_NODE DEBUG: {NW05@node_warden_pipe.cpp:52} TEvTabletPipe::TEvClientConnected OK ClientId# [1:140:2160] ServerId# [1:151:2169] TabletId# 72057594037932033 PipeClientId# [1:140:2160] 2025-06-25T15:21:29.042015Z node 1 :BS_CONTROLLER DEBUG: {BSCTXRN01@register_node.cpp:216} Handle TEvControllerRegisterNode Request# {NodeID: 1 VDiskStatus { VDiskId { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } NodeId: 1 PDiskId: 1 VSlotId: 1 PDiskGuid: 14459762468593695341 Status: READY OnlyPhantomsRemain: false } DeclarativePDiskManagement: true } 2025-06-25T15:21:29.042308Z node 1 :BS_CONTROLLER DEBUG: {BSC11@scrub.cpp:214} Handle(TEvControllerScrubQuantumFinished) Msg# {VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1 } Success: true } 2025-06-25T15:21:29.042500Z node 1 :BS_CONTROLLER DEBUG: {BSCTXGG02@get_group.cpp:58} TEvControllerGetGroup Sender# [1:11:2058] Cookie# 0 Recipient# [1:151:2169] RecipientRewrite# [1:93:2123] Request# {NodeID: 1 GroupIDs: 2147483648 } StopGivingGroups# false 2025-06-25T15:21:29.042576Z node 1 :BS_CONTROLLER DEBUG: {BSCTXGG01@get_group.cpp:22} Handle TEvControllerGetGroup Request# {NodeID: 1 GroupIDs: 2147483648 } 2025-06-25T15:21:29.042678Z node 1 :BS_CONTROLLER DEBUG: {BSC10@scrub.cpp:187} Handle(TEvControllerScrubQueryStartQuantum) Msg# {VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1 } } 2025-06-25T15:21:29.042785Z node 1 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {VDisksMetrics { VDiskId { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } SatisfactionRank: 0 VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1 } State: OK Replicated: true DiskSpace: Green IsThrottling: false ThrottlingRate: 0 } } 2025-06-25T15:21:29.058224Z node 1 :BS_NODE DEBUG: {NW17@node_warden_impl.cpp:807} Handle(TEvBlobStorage::TEvControllerNodeServiceSetUpdate) Msg# {Status: OK NodeID: 1 ServiceSet { PDisks { NodeID: 1 PDiskID: 1 Path: "SectorMap:/home/runner/.ya/build/build_root/yft8/0007b4/r3tmp/tmpbePiqu/static.dat" PDiskGuid: 14459762468593695341 PDiskCategory: 0 PDiskConfig { ExpectedSlotCount: 2 } EntityStatus: INITIAL ExpectedSerial: "" ManagementStage: DISCOVER_SERIAL SpaceColorBorder: GREEN } VDisks { VDiskID { GroupID: 2147483648 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 1 VDiskSlotID: 1000 PDiskGuid: 14459762468593695341 } VDiskKind: Default StoragePoolName: "" GroupSizeInUnits: 0 } Groups { GroupID: 2147483648 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1 VDiskSlotID: 1000 PDiskGuid: 14459762468593695341 } } } EncryptionMode: 0 LifeCyclePhase: 0 MainKeyId: "" EncryptedGroupKey: "" GroupKeyNonce: 2147483648 MainKeyVersion: 0 StoragePoolName: "" DeviceType: ROT GroupSizeInUnits: 0 } } InstanceId: "adfe3cbb-886f8ebd-83673ca2-749fb60" Comprehensive: true AvailDomain: 0 } 2025-06-25T15:21:29.058426Z node 1 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# false Comprehensive# true Origin# controller ServiceSet# {PDisks { NodeID: 1 PDiskID: 1 Path: "SectorMap:/home/runner/.ya/build/build_root/yft8/0007b4/r3tmp/tmpbePiqu/static.dat" PDiskGuid: 14459762468593695341 PDiskCategory: 0 PDiskConfig { ExpectedSlotCount: 2 } EntityStatus: INITIAL ExpectedSerial: "" ManagementStage: DISCOVER_SERIAL SpaceColorBorder: GREEN } VDisks { VDiskID { GroupID: 2147483648 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 1 VDiskSlotID: 1000 PDiskGuid: 14459762468593695341 } VDiskKind: Default StoragePoolName: "" GroupSizeInUnits: 0 } Groups { GroupID: 2147483648 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1 VDiskSlotID: 1000 PDiskGuid: 14459762468593695341 } } } EncryptionMode: 0 LifeCyclePhase: 0 MainKeyId: "" EncryptedGroupKey: "" GroupKeyNonce: 2147483648 MainKeyVersion: 0 StoragePoolName: "" DeviceType: ROT GroupSizeInUnits: 0 } } 2025-06-25T15:21:29.058592Z node 1 :BS_NODE DEBUG: {NW23@node_warden_vdisk.cpp:67} StartLocalVDiskActor SlayInFlight# false VDiskId# [80000000:1:0:0:0] VSlotId# 1:1:1000 PDiskGuid# 14459762468593695341 DonorMode# false PDiskRestartInFlight# false PDisksWaitingToStart# false 2025-06-25T15:21:29.059442Z node 1 :BS_NODE DEBUG: {NW24@node_warden_vdisk.cpp:267} StartLocalVDiskActor done VDiskId# [80000000:1:0:0:0] VSlotId# 1:1:1000 PDiskGuid# 14459762468593695341 2025-06-25T15:21:29.062343Z node 1 :BS_CONTROLLER DEBUG: {BSCTXRN05@register_node.cpp:34} Add devicesData from NodeWarden NodeId# 1 Devices# [] 2025-06-25T15:21:29.069033Z node 1 :BS_NODE DEBUG: {NW17@node_warden_impl.cpp:807} Handle(TEvBlobStorage::TEvControllerNodeServiceSetUpdate) Msg# {Status: OK NodeID: 1 ServiceSet { Groups { GroupID: 2147483648 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1 VDiskSlotID: 1000 PDiskGuid: 14459762468593695341 } } } EncryptionMode: 0 LifeCyclePhase: 0 MainKeyId: "" EncryptedGroupKey: "" GroupKeyNonce: 2147483648 MainKeyVersion: 0 StoragePoolName: "" DeviceType: ROT GroupSizeInUnits: 0 } } } 2025-06-25T15:21:29.069160Z node 1 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# false Comprehensive# false Origin# controller ServiceSet# {Groups { GroupID: 2147483648 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1 VDiskSlotID: 1000 PDiskGuid: 14459762468593695341 } } } EncryptionMode: 0 LifeCyclePhase: 0 MainKeyId: "" EncryptedGroupKey: "" GroupKeyNonce: 2147483648 MainKeyVersion: 0 StoragePoolName: "" DeviceType: ROT GroupSizeInUnits: 0 } } 2025-06-25T15:21:29.070496Z node 1 :BS_NODE INFO: {NW81@node_warden_group_resolver.cpp:270} TGroupResolverActor::PassAway GroupId# 2147483648 2025-06-25T15:21:29.070725Z node 1 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {VDiskStatus { VDiskId { GroupID: 2147483648 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } NodeId: 1 PDiskId: 1 VSlotId: 1000 PDiskGuid: 14459762468593695341 Status: INIT_PENDING OnlyPhantomsRemain: false } } 2025-06-25T15:21:29.071586Z node 1 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {VDisksMetrics { VDiskId { GroupID: 2147483648 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1000 } State: Initial Replicated: false DiskSpace: Green } } 2025-06-25T15:21:29.076008Z node 1 :BS_CONTROLLER DEBUG: {BSC10@scrub.cpp:187} Handle(TEvControllerScrubQueryStartQuantum) Msg# {VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1000 } } 2025-06-25T15:21:29.079269Z node 1 :BS_NODE DEBUG: {NW47@node_warden_impl.cpp:1148} Handle(TEvStatusUpdate) 2025-06-25T15:21:29.079612Z node 1 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {VDiskStatus { VDiskId { GroupID: 2147483648 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } NodeId: 1 PDiskId: 1 VSlotId: 1000 PDiskGuid: 14459762468593695341 Status: REPLICATING OnlyPhantomsRemain: false } } 2025-06-25T15:21:29.080558Z node 1 :BS_NODE DEBUG: {NW47@node_warden_impl.cpp:1148} Handle(TEvStatusUpdate) 2025-06-25T15:21:29.080731Z node 1 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {VDiskStatus { VDiskId { GroupID: 2147483648 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } NodeId: 1 PDiskId: 1 VSlotId: 1000 PDiskGuid: 14459762468593695341 Status: READY OnlyPhantomsRemain: false } } 2025-06-25T15:21:29.729384Z node 1 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {PDisksMetrics { PDiskId: 1 AvailableSize: 34189869056 TotalSize: 34359738368 MaxReadThroughput: 127000000 MaxWriteThroughput: 127000000 NonRealTimeMs: 0 SlowDeviceMs: 0 MaxIOPS: 125 EnforcedDynamicSlotSize: 17041457152 State: Normal } } |97.2%| [TM] {RESULT} ydb/core/blobstorage/nodewarden/ut_sequence/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/checkpointing/ut/unittest >> TCheckpointCoordinatorTests::ShouldAbortPreviousCheckpointsIfNodeStateCantBeSaved [GOOD] Test command err: 2025-06-25T15:21:29.870064Z node 1 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:72: [my-graph-id.42] TEvReadyState, streaming disposition { }, state load mode FROM_LAST_CHECKPOINT 2025-06-25T15:21:29.870268Z node 1 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:113: [my-graph-id.42] Send TEvRegisterCoordinatorRequest Waiting for TEvRegisterCoordinatorRequest (storage) 2025-06-25T15:21:29.870439Z node 1 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:130: [my-graph-id.42] Got TEvRegisterCoordinatorResponse; issues: 2025-06-25T15:21:29.870472Z node 1 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:139: [my-graph-id.42] Successfully registered in storage 2025-06-25T15:21:29.870506Z node 1 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:140: [my-graph-id.42] Send TEvNewCheckpointCoordinator to 3 actor(s) 2025-06-25T15:21:29.871376Z node 1 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:148: [my-graph-id.42] Send TEvGetCheckpointsMetadataRequest; state load mode: FROM_LAST_CHECKPOINT; load graph: 0 Waiting for TEvGetCheckpointsMetadataRequest (storage) 2025-06-25T15:21:29.881531Z node 1 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:185: [my-graph-id.42] Got TEvGetCheckpointsMetadataResponse 2025-06-25T15:21:29.881592Z node 1 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:211: [my-graph-id.42] Found no checkpoints to restore from, creating a 'zero' checkpoint 2025-06-25T15:21:29.881625Z node 1 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:348: [my-graph-id.42] [42:1] Registering new checkpoint in storage Waiting for TEvCreateCheckpointRequest (storage) 2025-06-25T15:21:29.896939Z node 1 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:388: [my-graph-id.42] [42:1] Got TEvCreateCheckpointResponse 2025-06-25T15:21:29.897014Z node 1 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:434: [my-graph-id.42] [42:1] Checkpoint successfully created, going to inject barriers to 1 actor(s) 2025-06-25T15:21:29.897084Z node 1 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:440: [my-graph-id.42] [42:1] Send TEvRun to all actors Waiting for TEvInjectCheckpointBarrier (ingress) 2025-06-25T15:21:29.897223Z node 1 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:461: [my-graph-id.42] [42:1] Got TEvSaveTaskStateResult; task 0, status: OK, size: 100 2025-06-25T15:21:29.897264Z node 1 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:471: [my-graph-id.42] [42:1] Task state saved, need 2 more acks 2025-06-25T15:21:29.897307Z node 1 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:461: [my-graph-id.42] [42:1] Got TEvSaveTaskStateResult; task 0, status: OK, size: 100 2025-06-25T15:21:29.897381Z node 1 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:471: [my-graph-id.42] [42:1] Task state saved, need 1 more acks 2025-06-25T15:21:29.897423Z node 1 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:461: [my-graph-id.42] [42:1] Got TEvSaveTaskStateResult; task 0, status: OK, size: 100 2025-06-25T15:21:29.897454Z node 1 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:471: [my-graph-id.42] [42:1] Task state saved, need 0 more acks 2025-06-25T15:21:29.897497Z node 1 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:484: [my-graph-id.42] [42:1] Got all acks, changing checkpoint status to 'PendingCommit' Waiting for TEvSetCheckpointPendingCommitStatusRequest (storage) 2025-06-25T15:21:29.897581Z node 1 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:496: [my-graph-id.42] [42:1] Got TEvSetCheckpointPendingCommitStatusResponse 2025-06-25T15:21:29.897617Z node 1 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:511: [my-graph-id.42] [42:1] Checkpoint status changed to 'PendingCommit', committing states Waiting for TEvCommitChanges (ingress) Waiting for TEvCommitChanges (egress) 2025-06-25T15:21:29.897795Z node 1 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:527: [my-graph-id.42] [42:1] Got TEvStateCommitted; task: 1 2025-06-25T15:21:29.897832Z node 1 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:536: [my-graph-id.42] [42:1] State committed [1:6:2053], need 1 more acks 2025-06-25T15:21:29.897880Z node 1 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:527: [my-graph-id.42] [42:1] Got TEvStateCommitted; task: 3 2025-06-25T15:21:29.897915Z node 1 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:536: [my-graph-id.42] [42:1] State committed [1:8:2055], need 0 more acks 2025-06-25T15:21:29.897944Z node 1 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:538: [my-graph-id.42] [42:1] Got all acks, changing checkpoint status to 'Completed' Waiting for TEvCompleteCheckpointRequest (storage) 2025-06-25T15:21:29.898021Z node 1 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:549: [my-graph-id.42] [42:1] Got TEvCompleteCheckpointResponse 2025-06-25T15:21:29.898056Z node 1 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:564: [my-graph-id.42] [42:1] Checkpoint completed 2025-06-25T15:21:29.978297Z node 2 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:72: [my-graph-id.42] TEvReadyState, streaming disposition { }, state load mode FROM_LAST_CHECKPOINT 2025-06-25T15:21:29.978462Z node 2 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:113: [my-graph-id.42] Send TEvRegisterCoordinatorRequest Waiting for TEvRegisterCoordinatorRequest (storage) 2025-06-25T15:21:29.978562Z node 2 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:130: [my-graph-id.42] Got TEvRegisterCoordinatorResponse; issues: 2025-06-25T15:21:29.978595Z node 2 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:139: [my-graph-id.42] Successfully registered in storage 2025-06-25T15:21:29.978641Z node 2 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:140: [my-graph-id.42] Send TEvNewCheckpointCoordinator to 3 actor(s) 2025-06-25T15:21:29.978695Z node 2 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:148: [my-graph-id.42] Send TEvGetCheckpointsMetadataRequest; state load mode: FROM_LAST_CHECKPOINT; load graph: 0 Waiting for TEvGetCheckpointsMetadataRequest (storage) 2025-06-25T15:21:29.978842Z node 2 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:185: [my-graph-id.42] Got TEvGetCheckpointsMetadataResponse 2025-06-25T15:21:29.978873Z node 2 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:211: [my-graph-id.42] Found no checkpoints to restore from, creating a 'zero' checkpoint 2025-06-25T15:21:29.978900Z node 2 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:348: [my-graph-id.42] [42:1] Registering new checkpoint in storage Waiting for TEvCreateCheckpointRequest (storage) 2025-06-25T15:21:29.979012Z node 2 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:388: [my-graph-id.42] [42:1] Got TEvCreateCheckpointResponse 2025-06-25T15:21:29.979082Z node 2 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:434: [my-graph-id.42] [42:1] Checkpoint successfully created, going to inject barriers to 1 actor(s) 2025-06-25T15:21:29.979129Z node 2 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:440: [my-graph-id.42] [42:1] Send TEvRun to all actors Waiting for TEvInjectCheckpointBarrier (ingress) 2025-06-25T15:21:29.979221Z node 2 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:461: [my-graph-id.42] [42:1] Got TEvSaveTaskStateResult; task 0, status: OK, size: 100 2025-06-25T15:21:29.979256Z node 2 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:471: [my-graph-id.42] [42:1] Task state saved, need 2 more acks 2025-06-25T15:21:29.979291Z node 2 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:461: [my-graph-id.42] [42:1] Got TEvSaveTaskStateResult; task 0, status: OK, size: 100 2025-06-25T15:21:29.979336Z node 2 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:471: [my-graph-id.42] [42:1] Task state saved, need 1 more acks 2025-06-25T15:21:29.979379Z node 2 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:461: [my-graph-id.42] [42:1] Got TEvSaveTaskStateResult; task 0, status: OK, size: 100 2025-06-25T15:21:29.979408Z node 2 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:471: [my-graph-id.42] [42:1] Task state saved, need 0 more acks 2025-06-25T15:21:29.979436Z node 2 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:484: [my-graph-id.42] [42:1] Got all acks, changing checkpoint status to 'PendingCommit' Waiting for TEvSetCheckpointPendingCommitStatusRequest (storage) 2025-06-25T15:21:29.979505Z node 2 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:496: [my-graph-id.42] [42:1] Got TEvSetCheckpointPendingCommitStatusResponse 2025-06-25T15:21:29.979543Z node 2 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:511: [my-graph-id.42] [42:1] Checkpoint status changed to 'PendingCommit', committing states Waiting for TEvCommitChanges (ingress) Waiting for TEvCommitChanges (egress) 2025-06-25T15:21:29.979662Z node 2 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:527: [my-graph-id.42] [42:1] Got TEvStateCommitted; task: 1 2025-06-25T15:21:29.979697Z node 2 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:536: [my-graph-id.42] [42:1] State committed [2:6:2053], need 1 more acks 2025-06-25T15:21:29.979728Z node 2 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:527: [my-graph-id.42] [42:1] Got TEvStateCommitted; task: 3 2025-06-25T15:21:29.979769Z node 2 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:536: [my-graph-id.42] [42:1] State committed [2:8:2055], need 0 more acks 2025-06-25T15:21:29.979801Z node 2 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:538: [my-graph-id.42] [42:1] Got all acks, changing checkpoint status to 'Completed' Waiting for TEvCompleteCheckpointRequest (storage) 2025-06-25T15:21:29.979857Z node 2 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:549: [my-graph-id.42] [42:1] Got TEvCompleteCheckpointResponse 2025-06-25T15:21:29.979884Z node 2 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:564: [my-graph-id.42] [42:1] Checkpoint completed 2025-06-25T15:21:30.061784Z node 3 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:72: [my-graph-id.42] TEvReadyState, streaming disposition { }, state load mode FROM_LAST_CHECKPOINT 2025-06-25T15:21:30.061965Z node 3 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:113: [my-graph-id.42] Send TEvRegisterCoordinatorRequest Waiting for TEvRegisterCoordinatorRequest (storage) 2025-06-25T15:21:30.062072Z node 3 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:130: [my-graph-id.42] Got TEvRegisterCoordinatorResponse; issues: 2025-06-25T15:21:30.062107Z node 3 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:139: [my-graph-id.42] Successfully registered in storage 2025-06-25T15:21:30.062138Z node 3 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:140: [my-graph-id.42] Send TEvNewCheckpointCoordinator to 3 actor(s) 2025-06-25T15:21:30.062203Z node 3 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:148: [my-graph-id.42] Send TEvGetCheckpointsMetadataRequest; state load mode: FROM_LAST_CHECKPOINT; load graph: 0 Waiting for TEvGetCheckpointsMetadataRequest (storage) 2025-06-25T15:21:30.062374Z node 3 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:185: [my-graph-id.42] Got TEvGetCheckpointsMetadataResponse 2025-06-25T15:21:30.062408Z node 3 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkp ... Registering new checkpoint in storage Waiting for TEvCreateCheckpointRequest (storage) 2025-06-25T15:21:30.149102Z node 4 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:388: [my-graph-id.42] [42:3] Got TEvCreateCheckpointResponse 2025-06-25T15:21:30.149137Z node 4 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:434: [my-graph-id.42] [42:3] Checkpoint successfully created, going to inject barriers to 1 actor(s) Waiting for TEvInjectCheckpointBarrier (ingress) 2025-06-25T15:21:30.149214Z node 4 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:461: [my-graph-id.42] [42:3] Got TEvSaveTaskStateResult; task 0, status: OK, size: 100 2025-06-25T15:21:30.149260Z node 4 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:471: [my-graph-id.42] [42:3] Task state saved, need 2 more acks 2025-06-25T15:21:30.149301Z node 4 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:461: [my-graph-id.42] [42:3] Got TEvSaveTaskStateResult; task 0, status: OK, size: 100 2025-06-25T15:21:30.149337Z node 4 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:471: [my-graph-id.42] [42:3] Task state saved, need 1 more acks 2025-06-25T15:21:30.149380Z node 4 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:461: [my-graph-id.42] [42:3] Got TEvSaveTaskStateResult; task 0, status: OK, size: 100 2025-06-25T15:21:30.149413Z node 4 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:471: [my-graph-id.42] [42:3] Task state saved, need 0 more acks 2025-06-25T15:21:30.149443Z node 4 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:484: [my-graph-id.42] [42:3] Got all acks, changing checkpoint status to 'PendingCommit' Waiting for TEvSetCheckpointPendingCommitStatusRequest (storage) 2025-06-25T15:21:30.149491Z node 4 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:496: [my-graph-id.42] [42:3] Got TEvSetCheckpointPendingCommitStatusResponse 2025-06-25T15:21:30.149533Z node 4 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:511: [my-graph-id.42] [42:3] Checkpoint status changed to 'PendingCommit', committing states Waiting for TEvCommitChanges (ingress) Waiting for TEvCommitChanges (egress) 2025-06-25T15:21:30.149656Z node 4 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:527: [my-graph-id.42] [42:3] Got TEvStateCommitted; task: 1 2025-06-25T15:21:30.149694Z node 4 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:536: [my-graph-id.42] [42:3] State committed [4:6:2053], need 1 more acks 2025-06-25T15:21:30.149751Z node 4 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:527: [my-graph-id.42] [42:3] Got TEvStateCommitted; task: 3 2025-06-25T15:21:30.149789Z node 4 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:536: [my-graph-id.42] [42:3] State committed [4:8:2055], need 0 more acks 2025-06-25T15:21:30.149826Z node 4 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:538: [my-graph-id.42] [42:3] Got all acks, changing checkpoint status to 'Completed' Waiting for TEvCompleteCheckpointRequest (storage) 2025-06-25T15:21:30.149901Z node 4 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:549: [my-graph-id.42] [42:3] Got TEvCompleteCheckpointResponse 2025-06-25T15:21:30.149932Z node 4 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:564: [my-graph-id.42] [42:3] Checkpoint completed 2025-06-25T15:21:30.149972Z node 4 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:372: [my-graph-id.42] Got TEvScheduleCheckpointing 2025-06-25T15:21:30.150010Z node 4 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:348: [my-graph-id.42] [42:4] Registering new checkpoint in storage Waiting for TEvCreateCheckpointRequest (storage) 2025-06-25T15:21:30.150076Z node 4 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:388: [my-graph-id.42] [42:4] Got TEvCreateCheckpointResponse 2025-06-25T15:21:30.150116Z node 4 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:434: [my-graph-id.42] [42:4] Checkpoint successfully created, going to inject barriers to 1 actor(s) Waiting for TEvInjectCheckpointBarrier (ingress) 2025-06-25T15:21:30.150195Z node 4 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:461: [my-graph-id.42] [42:4] Got TEvSaveTaskStateResult; task 0, status: OK, size: 100 2025-06-25T15:21:30.150233Z node 4 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:471: [my-graph-id.42] [42:4] Task state saved, need 2 more acks 2025-06-25T15:21:30.150284Z node 4 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:461: [my-graph-id.42] [42:4] Got TEvSaveTaskStateResult; task 0, status: OK, size: 100 2025-06-25T15:21:30.150326Z node 4 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:471: [my-graph-id.42] [42:4] Task state saved, need 1 more acks 2025-06-25T15:21:30.150381Z node 4 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:461: [my-graph-id.42] [42:4] Got TEvSaveTaskStateResult; task 0, status: OK, size: 100 2025-06-25T15:21:30.150409Z node 4 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:471: [my-graph-id.42] [42:4] Task state saved, need 0 more acks 2025-06-25T15:21:30.150439Z node 4 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:484: [my-graph-id.42] [42:4] Got all acks, changing checkpoint status to 'PendingCommit' Waiting for TEvSetCheckpointPendingCommitStatusRequest (storage) 2025-06-25T15:21:30.150498Z node 4 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:496: [my-graph-id.42] [42:4] Got TEvSetCheckpointPendingCommitStatusResponse 2025-06-25T15:21:30.150531Z node 4 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:511: [my-graph-id.42] [42:4] Checkpoint status changed to 'PendingCommit', committing states Waiting for TEvCommitChanges (ingress) Waiting for TEvCommitChanges (egress) 2025-06-25T15:21:30.150648Z node 4 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:527: [my-graph-id.42] [42:4] Got TEvStateCommitted; task: 1 2025-06-25T15:21:30.150689Z node 4 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:536: [my-graph-id.42] [42:4] State committed [4:6:2053], need 1 more acks 2025-06-25T15:21:30.150723Z node 4 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:527: [my-graph-id.42] [42:4] Got TEvStateCommitted; task: 3 2025-06-25T15:21:30.150767Z node 4 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:536: [my-graph-id.42] [42:4] State committed [4:8:2055], need 0 more acks 2025-06-25T15:21:30.150800Z node 4 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:538: [my-graph-id.42] [42:4] Got all acks, changing checkpoint status to 'Completed' Waiting for TEvCompleteCheckpointRequest (storage) 2025-06-25T15:21:30.150860Z node 4 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:549: [my-graph-id.42] [42:4] Got TEvCompleteCheckpointResponse 2025-06-25T15:21:30.150886Z node 4 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:564: [my-graph-id.42] [42:4] Checkpoint completed 2025-06-25T15:21:30.239500Z node 5 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:72: [my-graph-id.42] TEvReadyState, streaming disposition { }, state load mode FROM_LAST_CHECKPOINT 2025-06-25T15:21:30.239658Z node 5 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:113: [my-graph-id.42] Send TEvRegisterCoordinatorRequest Waiting for TEvRegisterCoordinatorRequest (storage) 2025-06-25T15:21:30.239756Z node 5 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:130: [my-graph-id.42] Got TEvRegisterCoordinatorResponse; issues: 2025-06-25T15:21:30.239786Z node 5 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:139: [my-graph-id.42] Successfully registered in storage 2025-06-25T15:21:30.239817Z node 5 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:140: [my-graph-id.42] Send TEvNewCheckpointCoordinator to 3 actor(s) 2025-06-25T15:21:30.239894Z node 5 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:148: [my-graph-id.42] Send TEvGetCheckpointsMetadataRequest; state load mode: FROM_LAST_CHECKPOINT; load graph: 0 Waiting for TEvGetCheckpointsMetadataRequest (storage) 2025-06-25T15:21:30.240073Z node 5 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:185: [my-graph-id.42] Got TEvGetCheckpointsMetadataResponse 2025-06-25T15:21:30.240110Z node 5 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:211: [my-graph-id.42] Found no checkpoints to restore from, creating a 'zero' checkpoint 2025-06-25T15:21:30.240139Z node 5 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:348: [my-graph-id.42] [42:1] Registering new checkpoint in storage Waiting for TEvCreateCheckpointRequest (storage) 2025-06-25T15:21:30.240266Z node 5 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:388: [my-graph-id.42] [42:1] Got TEvCreateCheckpointResponse 2025-06-25T15:21:30.240329Z node 5 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:434: [my-graph-id.42] [42:1] Checkpoint successfully created, going to inject barriers to 1 actor(s) 2025-06-25T15:21:30.240379Z node 5 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:440: [my-graph-id.42] [42:1] Send TEvRun to all actors Waiting for TEvInjectCheckpointBarrier (ingress) 2025-06-25T15:21:30.240483Z node 5 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:461: [my-graph-id.42] [42:1] Got TEvSaveTaskStateResult; task 0, status: OK, size: 100 2025-06-25T15:21:30.240523Z node 5 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:471: [my-graph-id.42] [42:1] Task state saved, need 2 more acks 2025-06-25T15:21:30.240564Z node 5 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:461: [my-graph-id.42] [42:1] Got TEvSaveTaskStateResult; task 0, status: STORAGE_ERROR, size: 0 2025-06-25T15:21:30.240598Z node 5 :STREAMS_CHECKPOINT_COORDINATOR ERROR: checkpoint_coordinator.cpp:474: [my-graph-id.42] [42:1] StorageError: can't save node state, aborting checkpoint 2025-06-25T15:21:30.240638Z node 5 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:461: [my-graph-id.42] [42:1] Got TEvSaveTaskStateResult; task 0, status: STORAGE_ERROR, size: 0 2025-06-25T15:21:30.240671Z node 5 :STREAMS_CHECKPOINT_COORDINATOR ERROR: checkpoint_coordinator.cpp:474: [my-graph-id.42] [42:1] StorageError: can't save node state, aborting checkpoint 2025-06-25T15:21:30.240697Z node 5 :STREAMS_CHECKPOINT_COORDINATOR ERROR: checkpoint_coordinator.cpp:479: [my-graph-id.42] [42:1] Got all acks for aborted checkpoint, aborting in storage Waiting for TEvAbortCheckpointRequest (storage) 2025-06-25T15:21:30.240758Z node 5 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:575: [my-graph-id.42] [42:1] Got TEvAbortCheckpointResponse 2025-06-25T15:21:30.240803Z node 5 :STREAMS_CHECKPOINT_COORDINATOR WARN: checkpoint_coordinator.cpp:581: [my-graph-id.42] [42:1] Checkpoint aborted 2025-06-25T15:21:30.240840Z node 5 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:372: [my-graph-id.42] Got TEvScheduleCheckpointing 2025-06-25T15:21:30.240888Z node 5 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:348: [my-graph-id.42] [42:2] Registering new checkpoint in storage Waiting for TEvCreateCheckpointRequest (storage) 2025-06-25T15:21:30.240945Z node 5 :STREAMS_CHECKPOINT_COORDINATOR DEBUG: checkpoint_coordinator.cpp:388: [my-graph-id.42] [42:2] Got TEvCreateCheckpointResponse 2025-06-25T15:21:30.240984Z node 5 :STREAMS_CHECKPOINT_COORDINATOR INFO: checkpoint_coordinator.cpp:434: [my-graph-id.42] [42:2] Checkpoint successfully created, going to inject barriers to 1 actor(s) Waiting for TEvInjectCheckpointBarrier (ingress) |97.3%| [TM] {RESULT} ydb/core/fq/libs/checkpointing/ut/unittest >> test_tpch_import.py::TestS3TpchImport::test_import_and_export >> SequenceProxy::Basics >> TSentinelUnstableTests::BSControllerCantChangeStatus |97.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/parametrized_queries/py3test >> test_parametrized_queries.py::TestParametrizedQueries::test_parametrized_queries[table_index_3__SYNC-pk_types6-all_types6-index6---SYNC] [GOOD] |97.3%| [TA] $(B)/ydb/tests/datashard/parametrized_queries/test-results/py3test/{meta.json ... results_accumulator.log} |97.3%| [TA] {RESULT} $(B)/ydb/tests/datashard/parametrized_queries/test-results/py3test/{meta.json ... results_accumulator.log} |97.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/query_cache/py3test >> test_query_cache.py::TestQueryCache::test [GOOD] |97.3%| [TM] {RESULT} ydb/tests/functional/query_cache/py3test >> TQuoterServiceTest::StaticMultipleAndResources [GOOD] >> TQuoterServiceTest::StaticDeadlines |97.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/async_replication/py3test >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_index_0__ASYNC-pk_types6-all_types6-index6---ASYNC] [GOOD] |97.3%| [TA] $(B)/ydb/tests/olap/oom/test-results/py3test/{meta.json ... results_accumulator.log} >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_1_UNIQUE_SYNC-pk_types3-all_types3-index3--UNIQUE-SYNC] [GOOD] >> SequenceProxy::Basics [GOOD] >> LongTxService::BasicTransactions >> DataShardFollowers::FollowerKeepsWorkingAfterMvccReadTable [GOOD] >> TxKeys::ComparePointKeys >> TQuoterServiceTest::StaticDeadlines [GOOD] >> MediatorTest::BasicTimecastUpdates >> SequenceProxy::DropRecreate >> DataShardFollowers::FollowerStaleRo >> QuoterWithKesusTest::ForbidsNotCanonizedQuoterPath >> SequenceProxy::DropRecreate [GOOD] >> TxKeys::ComparePointKeys [GOOD] >> TxKeys::ComparePointKeysWithNull |97.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/async_replication/py3test >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_all_types-pk_types7-all_types7-index7---] [GOOD] ------- [TS] {asan, default-linux-x86_64, release} ydb/core/tx/sequenceproxy/ut/unittest >> SequenceProxy::DropRecreate [GOOD] Test command err: 2025-06-25T15:21:32.091821Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:21:32.091979Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:21:32.222043Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:21:33.100170Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateSequence, opId: 281474976715657:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_sequence.cpp:543) 2025-06-25T15:21:33.438517Z node 2 :BS_PDISK WARN: {BPD01@blobstorage_pdisk_blockdevice_async.cpp:922} Warning# got EIoResult::FileOpenError from IoContext->Setup PDiskId# 1000 2025-06-25T15:21:33.439080Z node 2 :BS_PDISK CRIT: {BPD39@blobstorage_pdisk_impl.cpp:2897} BlockDevice initialization error Details# Can't open file "/home/runner/.ya/build/build_root/yft8/00072e/r3tmp/tmpxvBXt7/pdisk_1.dat": unknown reason, errno# 0. PDiskId# 1000 2025-06-25T15:21:33.439612Z node 2 :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:300} PDiskId# 1000 bootstrapped to the StateError, reason# Can't open file "/home/runner/.ya/build/build_root/yft8/00072e/r3tmp/tmpxvBXt7/pdisk_1.dat": unknown reason, errno# 0. Can not be initialized Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/yft8/00072e/r3tmp/tmpxvBXt7/pdisk_1.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 13470223608623844591 PDiskId# 1000 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 2 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 1000 2025-06-25T15:21:34.306149Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:21:34.306218Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:21:34.372580Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:21:34.975097Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateSequence, opId: 281474976715657:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_sequence.cpp:543) 2025-06-25T15:21:35.238158Z node 4 :BS_PDISK WARN: {BPD01@blobstorage_pdisk_blockdevice_async.cpp:922} Warning# got EIoResult::FileOpenError from IoContext->Setup PDiskId# 1000 2025-06-25T15:21:35.239232Z node 4 :BS_PDISK CRIT: {BPD39@blobstorage_pdisk_impl.cpp:2897} BlockDevice initialization error Details# Can't open file "/home/runner/.ya/build/build_root/yft8/00072e/r3tmp/tmpggpckM/pdisk_1.dat": unknown reason, errno# 0. PDiskId# 1000 2025-06-25T15:21:35.239713Z node 4 :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:300} PDiskId# 1000 bootstrapped to the StateError, reason# Can't open file "/home/runner/.ya/build/build_root/yft8/00072e/r3tmp/tmpggpckM/pdisk_1.dat": unknown reason, errno# 0. Can not be initialized Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/yft8/00072e/r3tmp/tmpggpckM/pdisk_1.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 14288246779364490810 PDiskId# 1000 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 2 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 1000 2025-06-25T15:21:35.376423Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpDropSequence, opId: 281474976715658:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_drop_sequence.cpp:343) 2025-06-25T15:21:35.676877Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateSequence, opId: 281474976715659:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_sequence.cpp:543) >> DiscoveryIsNotBroken::NoKafkaEndpointInDiscovery >> KqpTpch::Query01 [GOOD] >> TxKeys::ComparePointKeysWithNull [GOOD] >> LongTxService::BasicTransactions [GOOD] >> DataShardFollowers::FollowerStaleRo [GOOD] >> QuoterWithKesusTest::ForbidsNotCanonizedQuoterPath [GOOD] >> MediatorTest::BasicTimecastUpdates [GOOD] >> LongTxService::AcquireSnapshot >> KqpTpch::Query02 >> DataShardFollowers::FollowerRebootAfterSysCompaction >> TxKeys::ComparePointAndRange >> QuoterWithKesusTest::ForbidsNotCanonizedResourcePath >> LongTxService::AcquireSnapshot [GOOD] >> MediatorTest::MultipleTablets >> TxKeys::ComparePointAndRange [GOOD] >> LongTxService::LockSubscribe >> LongTxService::LockSubscribe [GOOD] >> TxKeys::ComparePointAndRangeWithNull >> TxKeys::ComparePointAndRangeWithNull [GOOD] >> TxKeys::ComparePointAndRangeWithInf >> TxKeys::ComparePointAndRangeWithInf [GOOD] |97.3%| [TS] {RESULT} ydb/core/tx/sequenceproxy/ut/unittest |97.3%| [TA] {RESULT} $(B)/ydb/tests/olap/oom/test-results/py3test/{meta.json ... results_accumulator.log} ------- [TS] {asan, default-linux-x86_64, release} ydb/core/tx/long_tx_service/ut/unittest >> LongTxService::LockSubscribe [GOOD] Test command err: 2025-06-25T15:21:37.799194Z node 2 :BS_PDISK WARN: {BPD01@blobstorage_pdisk_blockdevice_async.cpp:922} Warning# got EIoResult::FileOpenError from IoContext->Setup PDiskId# 1000 2025-06-25T15:21:37.799726Z node 2 :BS_PDISK CRIT: {BPD39@blobstorage_pdisk_impl.cpp:2897} BlockDevice initialization error Details# Can't open file "/home/runner/.ya/build/build_root/yft8/0006c6/r3tmp/tmpQuS6rI/pdisk_1.dat": unknown reason, errno# 0. PDiskId# 1000 2025-06-25T15:21:37.800363Z node 2 :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:300} PDiskId# 1000 bootstrapped to the StateError, reason# Can't open file "/home/runner/.ya/build/build_root/yft8/0006c6/r3tmp/tmpQuS6rI/pdisk_1.dat": unknown reason, errno# 0. Can not be initialized Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/yft8/0006c6/r3tmp/tmpQuS6rI/pdisk_1.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 16188797525885753711 PDiskId# 1000 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 2 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 1000 2025-06-25T15:21:37.855125Z node 1 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:94: TLongTxService [Node 1] Received TEvBeginTx from [1:430:2319] 2025-06-25T15:21:37.855280Z node 1 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:123: TLongTxService [Node 1] Created new LongTxId# ydb://long-tx/000000001f3g9c89nek29gxtgj?node_id=1 2025-06-25T15:21:37.867399Z node 2 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:265: TLongTxService [Node 2] Received TEvAttachColumnShardWrites from [2:431:2101] LongTxId# ydb://long-tx/000000001f3g9c89nek29gxtgj?node_id=1 2025-06-25T15:21:37.867534Z node 2 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:833: TLongTxService [Node 2] Received TEvNodeConnected for NodeId# 1 from session [2:100:2048] 2025-06-25T15:21:37.867726Z node 1 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:265: TLongTxService [Node 1] Received TEvAttachColumnShardWrites from [2:154:2090] LongTxId# ydb://long-tx/000000001f3g9c89nek29gxtgj?node_id=1 2025-06-25T15:21:37.867942Z node 2 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:139: TLongTxService [Node 2] Received TEvCommitTx from [2:431:2101] LongTxId# ydb://long-tx/000000001f3g9c89nek29gxtgj?node_id=1 2025-06-25T15:21:37.868081Z node 1 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:139: TLongTxService [Node 1] Received TEvCommitTx from [2:154:2090] LongTxId# ydb://long-tx/000000001f3g9c89nek29gxtgj?node_id=1 2025-06-25T15:21:37.869027Z node 1 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:162: TLongTxService [Node 1] Committed LongTxId# ydb://long-tx/000000001f3g9c89nek29gxtgj?node_id=1 without side-effects 2025-06-25T15:21:37.869336Z node 2 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:212: TLongTxService [Node 2] Received TEvRollbackTx from [2:431:2101] LongTxId# ydb://long-tx/000000001f3g9c89nek29gxtgj?node_id=1 2025-06-25T15:21:37.869487Z node 1 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:212: TLongTxService [Node 1] Received TEvRollbackTx from [2:154:2090] LongTxId# ydb://long-tx/000000001f3g9c89nek29gxtgj?node_id=1 2025-06-25T15:21:37.870683Z node 2 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:212: TLongTxService [Node 2] Received TEvRollbackTx from [2:431:2101] LongTxId# ydb://long-tx/000000001f3g9c89nek29gxtgj?node_id=1 2025-06-25T15:21:37.871207Z node 1 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:212: TLongTxService [Node 1] Received TEvRollbackTx from [2:154:2090] LongTxId# ydb://long-tx/000000001f3g9c89nek29gxtgj?node_id=1 2025-06-25T15:21:37.872372Z node 1 :PIPE_SERVER ERROR: tablet_pipe_server.cpp:228: [72057594037932033] NodeDisconnected NodeId# 2 2025-06-25T15:21:37.872780Z node 1 :PIPE_SERVER ERROR: tablet_pipe_server.cpp:228: [72057594046578946] NodeDisconnected NodeId# 2 2025-06-25T15:21:37.873018Z node 2 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:871: TLongTxService [Node 2] Received TEvNodeDisconnected for NodeId# 1 from session [2:100:2048] 2025-06-25T15:21:37.873313Z node 2 :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [2:74:2075] ServerId# [1:354:2271] TabletId# 72057594037932033 PipeClientId# [2:74:2075] 2025-06-25T15:21:37.878136Z node 2 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:139: TLongTxService [Node 2] Received TEvCommitTx from [2:431:2101] LongTxId# ydb://long-tx/000000001f3g9c89nek29gxtgj?node_id=3 2025-06-25T15:21:37.878354Z node 2 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:871: TLongTxService [Node 2] Received TEvNodeDisconnected for NodeId# 3 from session [2:469:2103] 2025-06-25T15:21:38.615841Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:21:38.615901Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:21:38.692137Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046578944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:21:39.567972Z node 4 :BS_PDISK WARN: {BPD01@blobstorage_pdisk_blockdevice_async.cpp:922} Warning# got EIoResult::FileOpenError from IoContext->Setup PDiskId# 1000 2025-06-25T15:21:39.568534Z node 4 :BS_PDISK CRIT: {BPD39@blobstorage_pdisk_impl.cpp:2897} BlockDevice initialization error Details# Can't open file "/home/runner/.ya/build/build_root/yft8/0006c6/r3tmp/tmpBAvMuV/pdisk_1.dat": unknown reason, errno# 0. PDiskId# 1000 2025-06-25T15:21:39.568787Z node 4 :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:300} PDiskId# 1000 bootstrapped to the StateError, reason# Can't open file "/home/runner/.ya/build/build_root/yft8/0006c6/r3tmp/tmpBAvMuV/pdisk_1.dat": unknown reason, errno# 0. Can not be initialized Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/yft8/0006c6/r3tmp/tmpBAvMuV/pdisk_1.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 14160248321284481232 PDiskId# 1000 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 2 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 1000 2025-06-25T15:21:39.915928Z node 3 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:346: TLongTxService [Node 3] Received TEvAcquireReadSnapshot from [3:514:2384] for database /dc-1 2025-06-25T15:21:39.916021Z node 3 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:381: TLongTxService [Node 3] Scheduling TEvAcquireSnapshotFlush for database /dc-1 2025-06-25T15:21:39.926407Z node 3 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:388: TLongTxService [Node 3] Received TEvAcquireSnapshotFlush for database /dc-1 2025-06-25T15:21:39.926633Z node 3 :LONG_TX_SERVICE DEBUG: acquire_snapshot_impl.cpp:48: LongTxService.AcquireSnapshot [3:565:2420] Sending navigate request for /dc-1 2025-06-25T15:21:39.931154Z node 3 :LONG_TX_SERVICE DEBUG: acquire_snapshot_impl.cpp:75: LongTxService.AcquireSnapshot [3:565:2420] Received navigate response status Ok 2025-06-25T15:21:39.931235Z node 3 :LONG_TX_SERVICE DEBUG: acquire_snapshot_impl.cpp:129: LongTxService.AcquireSnapshot [3:565:2420] Sending acquire step to coordinator 72057594046316545 2025-06-25T15:21:39.933471Z node 3 :LONG_TX_SERVICE DEBUG: acquire_snapshot_impl.cpp:165: LongTxService.AcquireSnapshot [3:565:2420] Received read step 1000 2025-06-25T15:21:39.933606Z node 3 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:400: TLongTxService [Node 3] Received TEvAcquireSnapshotFinished, cookie = 1 2025-06-25T15:21:39.942178Z node 3 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:94: TLongTxService [Node 3] Received TEvBeginTx from [3:514:2384] 2025-06-25T15:21:39.942239Z node 3 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:381: TLongTxService [Node 3] Scheduling TEvAcquireSnapshotFlush for database /dc-1 2025-06-25T15:21:39.954436Z node 3 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:388: TLongTxService [Node 3] Received TEvAcquireSnapshotFlush for database /dc-1 2025-06-25T15:21:39.954640Z node 3 :LONG_TX_SERVICE DEBUG: acquire_snapshot_impl.cpp:48: LongTxService.AcquireSnapshot [3:582:2431] Sending navigate request for /dc-1 2025-06-25T15:21:39.954874Z node 3 :LONG_TX_SERVICE DEBUG: acquire_snapshot_impl.cpp:75: LongTxService.AcquireSnapshot [3:582:2431] Received navigate response status Ok 2025-06-25T15:21:39.954929Z node 3 :LONG_TX_SERVICE DEBUG: acquire_snapshot_impl.cpp:129: LongTxService.AcquireSnapshot [3:582:2431] Sending acquire step to coordinator 72057594046316545 2025-06-25T15:21:39.955113Z node 3 :LONG_TX_SERVICE DEBUG: acquire_snapshot_impl.cpp:165: LongTxService.AcquireSnapshot [3:582:2431] Received read step 1500 2025-06-25T15:21:39.955219Z node 3 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:400: TLongTxService [Node 3] Received TEvAcquireSnapshotFinished, cookie = 2 2025-06-25T15:21:39.955277Z node 3 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:425: TLongTxService [Node 3] Created new read-only LongTxId# ydb://long-tx/read-only?snapshot=1500%3Amax 2025-06-25T15:21:39.955465Z node 3 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:94: TLongTxService [Node 3] Received TEvBeginTx from [3:514:2384] 2025-06-25T15:21:39.955513Z node 3 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:381: TLongTxService [Node 3] Scheduling TEvAcquireSnapshotFlush for database /dc-1 2025-06-25T15:21:39.965876Z node 3 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:388: TLongTxService [Node 3] Received TEvAcquireSnapshotFlush for database /dc-1 2025-06-25T15:21:39.966097Z node 3 :LONG_TX_SERVICE DEBUG: acquire_snapshot_impl.cpp:48: LongTxService.AcquireSnapshot [3:584:2433] Sending navigate request for /dc-1 2025-06-25T15:21:39.966379Z node 3 :LONG_TX_SERVICE DEBUG: acquire_snapshot_impl.cpp:75: LongTxService.AcquireSnapshot [3:584:2433] Received navigate response status Ok 2025-06-25T15:21:39.966432Z node 3 :LONG_TX_SERVICE DEBUG: acquire_snapshot_impl.cpp:129: LongTxService.AcquireSnapshot [3:584:2433] Sending acquire step to coordinator 72057594046316545 2025-06-25T15:21:39.966631Z node 3 :LONG_TX_SERVICE DEBUG: acquire_snapshot_impl.cpp:165: LongTxService.AcquireSnapshot [3:584:2433] Received read step 1500 2025-06-25T15:21:39.966731Z node 3 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:400: TLongTxService [Node 3] Received TEvAcquireSnapshotFinished, cookie = 3 2025-06-25T15:21:39.966806Z node 3 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:423: TLongTxService [Node 3] Created new read-write LongTxId# ydb://long-tx/00000001e90d61ec0szb17a763?node_id=3&snapshot=1500%3Amax 2025-06-25T15:21:40.984261Z node 6 :BS_PDISK WARN: {BPD01@blobstorage_pdisk_blockdevice_async.cpp:922} Warning# got EIoResult::FileOpenError from IoContext->Setup PDiskId# 1000 2025-06-25T15:21:40.985533Z node 6 :BS_PDISK CRIT: {BPD39@blobstorage_pdisk_impl.cpp:2897} BlockDevice initialization error Details# Can't open file "/home/runner/.ya/build/build_root/yft8/0006c6/r3tmp/tmpC6Lmaf/pdisk_1.dat": unknown reason, errno# 0. PDiskId# 1000 2025-06-25T15:21:40.985736Z node 6 :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:300} PDiskId# 1000 bootstrapped to the StateError, reason# Can't open file "/home/runner/.ya/build/build_root/yft8/0006c6/r3tmp/tmpC6Lmaf/pdisk_1.dat": unknown reason, errno# 0. Can not be initialized Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/yft8/0006c6/r3tmp/tmpC6Lmaf/pdisk_1.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 7993700900749272347 PDiskId# 1000 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 2 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 SlotSizeInUnits# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 1000 2025-06-25T15:21:41.023142Z node 5 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:468: TLongTxService [Node 5] Received TEvRegisterLock for LockId# 123 2025-06-25T15:21:41.023253Z node 5 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:519: TLongTxService [Node 5] Received TEvSubscribeLock from [5:434:2323] for LockId# 987 LockNode# 5 2025-06-25T15:21:41.033700Z node 6 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:519: TLongTxService [Node 6] Received TEvSubscribeLock from [6:435:2101] for LockId# 987 LockNode# 5 2025-06-25T15:21:41.035010Z node 6 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:833: TLongTxService [Node 6] Received TEvNodeConnected for NodeId# 5 from session [6:100:2048] 2025-06-25T15:21:41.036597Z node 5 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:519: TLongTxService [Node 5] Received TEvSubscribeLock from [6:154:2090] for LockId# 987 LockNode# 5 2025-06-25T15:21:41.037813Z node 6 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:611: TLongTxService [Node 6] Received TEvLockStatus from [5:153:2137] for LockId# 987 LockNode# 5 LockStatus# STATUS_NOT_FOUND 2025-06-25T15:21:41.038006Z node 5 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:519: TLongTxService [Node 5] Received TEvSubscribeLock from [5:434:2323] for LockId# 123 LockNode# 5 2025-06-25T15:21:41.038150Z node 6 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:519: TLongTxService [Node 6] Received TEvSubscribeLock from [6:435:2101] for LockId# 123 LockNode# 5 2025-06-25T15:21:41.039567Z node 5 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:519: TLongTxService [Node 5] Received TEvSubscribeLock from [6:154:2090] for LockId# 123 LockNode# 5 2025-06-25T15:21:41.039785Z node 6 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:611: TLongTxService [Node 6] Received TEvLockStatus from [5:153:2137] for LockId# 123 LockNode# 5 LockStatus# STATUS_SUBSCRIBED 2025-06-25T15:21:41.039977Z node 5 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:479: TLongTxService [Node 5] Received TEvUnregisterLock for LockId# 123 2025-06-25T15:21:41.040114Z node 6 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:611: TLongTxService [Node 6] Received TEvLockStatus from [5:153:2137] for LockId# 123 LockNode# 5 LockStatus# STATUS_NOT_FOUND 2025-06-25T15:21:41.040268Z node 6 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:519: TLongTxService [Node 6] Received TEvSubscribeLock from [6:435:2101] for LockId# 234 LockNode# 5 2025-06-25T15:21:41.040473Z node 5 :PIPE_SERVER ERROR: tablet_pipe_server.cpp:228: [72057594037932033] NodeDisconnected NodeId# 6 2025-06-25T15:21:41.040837Z node 5 :PIPE_SERVER ERROR: tablet_pipe_server.cpp:228: [72057594046578946] NodeDisconnected NodeId# 6 2025-06-25T15:21:41.041109Z node 6 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:871: TLongTxService [Node 6] Received TEvNodeDisconnected for NodeId# 5 from session [6:100:2048] 2025-06-25T15:21:41.041293Z node 6 :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [6:74:2075] ServerId# [5:356:2273] TabletId# 72057594037932033 PipeClientId# [6:74:2075] 2025-06-25T15:21:41.273999Z node 6 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:833: TLongTxService [Node 6] Received TEvNodeConnected for NodeId# 5 from session [6:465:2048] 2025-06-25T15:21:41.274229Z node 5 :PIPE_SERVER ERROR: tablet_pipe_server.cpp:228: [72057594037932033] NodeDisconnected NodeId# 6 2025-06-25T15:21:41.274276Z node 5 :PIPE_SERVER ERROR: tablet_pipe_server.cpp:228: [72057594046578946] NodeDisconnected NodeId# 6 2025-06-25T15:21:41.274307Z node 5 :PIPE_SERVER ERROR: tablet_pipe_server.cpp:228: [72057594046447617] NodeDisconnected NodeId# 6 2025-06-25T15:21:41.274708Z node 6 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:871: TLongTxService [Node 6] Received TEvNodeDisconnected for NodeId# 5 from session [6:465:2048] 2025-06-25T15:21:41.274957Z node 6 :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [6:466:2102] ServerId# [5:470:2342] TabletId# 72057594037932033 PipeClientId# [6:466:2102] 2025-06-25T15:21:41.275073Z node 6 :TX_PROXY WARN: proxy_impl.cpp:227: actor# [6:152:2089] HANDLE TEvClientDestroyed from tablet# 72057594046447617 2025-06-25T15:21:41.551970Z node 6 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:833: TLongTxService [Node 6] Received TEvNodeConnected for NodeId# 5 from session [6:497:2048] 2025-06-25T15:21:41.552194Z node 5 :PIPE_SERVER ERROR: tablet_pipe_server.cpp:228: [72057594046578946] NodeDisconnected NodeId# 6 2025-06-25T15:21:41.552245Z node 5 :PIPE_SERVER ERROR: tablet_pipe_server.cpp:228: [72057594037932033] NodeDisconnected NodeId# 6 2025-06-25T15:21:41.552811Z node 6 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:871: TLongTxService [Node 6] Received TEvNodeDisconnected for NodeId# 5 from session [6:497:2048] 2025-06-25T15:21:41.553088Z node 6 :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [6:498:2103] ServerId# [5:502:2363] TabletId# 72057594037932033 PipeClientId# [6:498:2103] 2025-06-25T15:21:41.805989Z node 6 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:833: TLongTxService [Node 6] Received TEvNodeConnected for NodeId# 5 from session [6:519:2048] 2025-06-25T15:21:41.806248Z node 5 :PIPE_SERVER ERROR: tablet_pipe_server.cpp:228: [72057594046578946] NodeDisconnected NodeId# 6 2025-06-25T15:21:41.806293Z node 5 :PIPE_SERVER ERROR: tablet_pipe_server.cpp:228: [72057594037932033] NodeDisconnected NodeId# 6 2025-06-25T15:21:41.806435Z node 6 :LONG_TX_SERVICE DEBUG: long_tx_service_impl.cpp:871: TLongTxService [Node 6] Received TEvNodeDisconnected for NodeId# 5 from session [6:519:2048] 2025-06-25T15:21:41.806845Z node 6 :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [6:520:2105] ServerId# [5:524:2376] TabletId# 72057594037932033 PipeClientId# [6:520:2105] |97.3%| [TS] {RESULT} ydb/core/tx/long_tx_service/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_keys/unittest >> TxKeys::ComparePointAndRangeWithInf [GOOD] Test command err: 2025-06-25T15:21:37.098308Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:21:37.098384Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:21:37.101001Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:112:2142], Recipient [1:135:2156]: NKikimr::TEvTablet::TEvBoot 2025-06-25T15:21:37.115494Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:112:2142], Recipient [1:135:2156]: NKikimr::TEvTablet::TEvRestored 2025-06-25T15:21:37.116804Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 9437184 actor [1:135:2156] 2025-06-25T15:21:37.117995Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T15:21:37.131768Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:112:2142], Recipient [1:135:2156]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-25T15:21:37.180847Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T15:21:37.181022Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T15:21:37.184357Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 9437184 2025-06-25T15:21:37.184429Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 9437184 2025-06-25T15:21:37.184464Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 9437184 2025-06-25T15:21:37.185967Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T15:21:37.186098Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T15:21:37.186156Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 9437184 persisting started state actor id [1:204:2156] in generation 2 2025-06-25T15:21:37.267118Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T15:21:37.304792Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 9437184 2025-06-25T15:21:37.311200Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 9437184 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T15:21:37.311378Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 9437184, actorId: [1:219:2215] 2025-06-25T15:21:37.311426Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 9437184 2025-06-25T15:21:37.311461Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 9437184, state: WaitScheme 2025-06-25T15:21:37.311492Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:21:37.311710Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:135:2156], Recipient [1:135:2156]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T15:21:37.311758Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T15:21:37.313344Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 9437184 2025-06-25T15:21:37.313509Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 9437184 2025-06-25T15:21:37.313569Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-25T15:21:37.313611Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:21:37.313707Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 9437184 2025-06-25T15:21:37.313758Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437184 has no attached operations 2025-06-25T15:21:37.313809Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 9437184 2025-06-25T15:21:37.313842Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 9437184 TxInFly 0 2025-06-25T15:21:37.313881Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-25T15:21:37.313985Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:215:2212], Recipient [1:135:2156]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T15:21:37.314053Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T15:21:37.314104Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:213:2211], serverId# [1:215:2212], sessionId# [0:0:0] 2025-06-25T15:21:37.329374Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:103:2136], Recipient [1:135:2156]: NKikimrTxDataShard.TEvProposeTransaction TxKind: TX_KIND_SCHEME SourceDeprecated { RawX1: 103 RawX2: 4294969432 } TxBody: "\nY\n\006table2\032\n\n\004key1\030\002 \"\032\013\n\004key2\030\200$ #\032\014\n\005value\030\200$ 8(\"(#:\010Z\006\010\000\030\000(\000J\014/Root/table2\222\002\013\th\020\000\000\000\000\000\000\020\016" TxId: 1 ExecLevel: 0 Flags: 0 SchemeShardId: 4200 ProcessingParams { } 2025-06-25T15:21:37.329456Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-25T15:21:37.329553Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-25T15:21:37.329856Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit CheckSchemeTx 2025-06-25T15:21:37.329923Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 9437184 txId 1 ssId 4200 seqNo 0:0 2025-06-25T15:21:37.329978Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 1 at tablet 9437184 2025-06-25T15:21:37.330089Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is ExecutedNoMoreRestarts 2025-06-25T15:21:37.330126Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit CheckSchemeTx 2025-06-25T15:21:37.330156Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit StoreSchemeTx 2025-06-25T15:21:37.330188Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit StoreSchemeTx 2025-06-25T15:21:37.330552Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayCompleteNoMoreRestarts 2025-06-25T15:21:37.330606Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit StoreSchemeTx 2025-06-25T15:21:37.330647Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit FinishPropose 2025-06-25T15:21:37.330698Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit FinishPropose 2025-06-25T15:21:37.330776Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayComplete 2025-06-25T15:21:37.330807Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit FinishPropose 2025-06-25T15:21:37.330837Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit WaitForPlan 2025-06-25T15:21:37.330867Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit WaitForPlan 2025-06-25T15:21:37.330899Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:1] at 9437184 is not ready to execute on unit WaitForPlan 2025-06-25T15:21:37.345311Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 9437184 2025-06-25T15:21:37.345383Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit StoreSchemeTx 2025-06-25T15:21:37.345407Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit FinishPropose 2025-06-25T15:21:37.345447Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 1 at tablet 9437184 send to client, exec latency: 0 ms, propose latency: 1 ms, status: PREPARED 2025-06-25T15:21:37.346458Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 9437184 not sending time cast registration request in state WaitScheme 2025-06-25T15:21:37.350081Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:225:2221], Recipient [1:135:2156]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T15:21:37.350147Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T15:21:37.350216Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:224:2220], serverId# [1:225:2221], sessionId# [0:0:0] 2025-06-25T15:21:37.350408Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287424, Sender [1:103:2136], Recipient [1:135:2156]: {TEvPlanStep step# 1000001 MediatorId# 0 TabletID 9437184} 2025-06-25T15:21:37.350447Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3150: StateWork, processing event TEvTxProcessing::TEvPlanStep 2025-06-25T15:21:37.350564Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1790: Trying to execute [1000001:1] at 9437184 on unit WaitForPlan 2025-06-25T15:21:37.350605Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1805: Execution status for [1000001:1] at 9437184 is Executed 2025-06-25T15:21:37.350636Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000001:1] at 9437184 executing on unit WaitForPlan 2025-06-25T15:21:37.350660Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000001:1] at 9437184 to execution unit PlanQueue 2025-06-25T15:21:37.353900Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 1 at step 1000001 at tablet 9437184 { Transactions { TxId: 1 AckTo { RawX1: 103 RawX2: 4294969432 } } Step: 1000001 MediatorID: 0 TabletID: 9437184 } 2025-06-25T15:21:37.353973Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:21:37.354225Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:135:2156], Recipient [1:135:2156]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T15:21:37.354273Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T15:21:37.354336Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-25T15:21:37.354375Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 1 2025-06-25T15:21:37.354408Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437184 2025-06-25T15:21:37.354455Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000001:1] in PlanQueue unit at 9437184 2025-06-25T15:21:37.354487Z node 1 :TX_DATASHARD TRACE: dat ... ode 5 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 9437184 2025-06-25T15:21:41.575819Z node 5 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 270270976, Sender [5:25:2072], Recipient [5:136:2157]: {TEvRegisterTabletResult TabletId# 9437184 Entry# 0} 2025-06-25T15:21:41.575857Z node 5 :TX_DATASHARD TRACE: datashard_impl.h:3170: StateWork, processing event TEvMediatorTimecast::TEvRegisterTabletResult 2025-06-25T15:21:41.575892Z node 5 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 9437184 time 0 2025-06-25T15:21:41.575926Z node 5 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:21:41.578444Z node 5 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:91: Sending '{TEvPlanStepAck TabletId# 9437184 step# 1000001 txid# 1} 2025-06-25T15:21:41.578518Z node 5 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 9437184 step# 1000001} 2025-06-25T15:21:41.578584Z node 5 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-25T15:21:41.578756Z node 5 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-25T15:21:41.578790Z node 5 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000001:1] at 9437184 on unit CreateTable 2025-06-25T15:21:41.578835Z node 5 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 9437184 2025-06-25T15:21:41.578891Z node 5 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 9437184 2025-06-25T15:21:41.578925Z node 5 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1000001:1] at 9437184 on unit CompleteOperation 2025-06-25T15:21:41.579006Z node 5 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000001 : 1] from 9437184 at tablet 9437184 send result to client [5:103:2136], exec latency: 0 ms, propose latency: 1 ms 2025-06-25T15:21:41.579066Z node 5 :TX_DATASHARD INFO: datashard.cpp:1590: 9437184 Sending notify to schemeshard 4200 txId 1 state Ready TxInFly 0 2025-06-25T15:21:41.579168Z node 5 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:21:41.579837Z node 5 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877760, Sender [5:230:2226], Recipient [5:136:2157]: NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 4200 Status: OK ServerId: [5:232:2227] Leader: 1 Dead: 0 Generation: 2 VersionInfo: } 2025-06-25T15:21:41.579887Z node 5 :TX_DATASHARD TRACE: datashard_impl.h:3165: StateWork, processing event TEvTabletPipe::TEvClientConnected 2025-06-25T15:21:41.580011Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:5607: Got TEvDataShard::TEvSchemaChanged for unknown txId 1 message# Source { RawX1: 136 RawX2: 21474838637 } Origin: 9437184 State: 2 TxId: 1 Step: 0 Generation: 2 2025-06-25T15:21:41.580089Z node 5 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269552132, Sender [5:127:2151], Recipient [5:136:2157]: NKikimrTxDataShard.TEvSchemaChangedResult TxId: 1 2025-06-25T15:21:41.580124Z node 5 :TX_DATASHARD TRACE: datashard_impl.h:3136: StateWork, processing event TEvDataShard::TEvSchemaChangedResult 2025-06-25T15:21:41.580169Z node 5 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 1 datashard 9437184 state Ready 2025-06-25T15:21:41.580239Z node 5 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 9437184 Got TEvSchemaChangedResult from SS at 9437184 2025-06-25T15:21:41.580684Z node 5 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 65543, Sender [5:103:2136], Recipient [5:136:2157]: NActors::TEvents::TEvPoison 2025-06-25T15:21:41.581061Z node 5 :TX_DATASHARD INFO: datashard.cpp:190: OnDetach: 9437184 2025-06-25T15:21:41.581173Z node 5 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 9437184 2025-06-25T15:21:41.591427Z node 5 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [5:235:2228], Recipient [5:238:2229]: NKikimr::TEvTablet::TEvBoot 2025-06-25T15:21:41.595116Z node 5 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [5:235:2228], Recipient [5:238:2229]: NKikimr::TEvTablet::TEvRestored 2025-06-25T15:21:41.595404Z node 5 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828684, Sender [5:235:2228], Recipient [5:238:2229]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-25T15:21:41.602719Z node 5 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 9437184 actor [5:238:2229] 2025-06-25T15:21:41.602994Z node 5 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T15:21:41.607227Z node 5 :TX_DATASHARD DEBUG: datashard__init.cpp:673: TxInitSchema.Execute Persist Sys_SubDomainInfo 2025-06-25T15:21:41.648170Z node 5 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T15:21:41.648343Z node 5 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T15:21:41.650810Z node 5 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 9437184 2025-06-25T15:21:41.650919Z node 5 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 9437184 2025-06-25T15:21:41.650982Z node 5 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 9437184 2025-06-25T15:21:41.651568Z node 5 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T15:21:41.651830Z node 5 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T15:21:41.651927Z node 5 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 9437184 persisting started state actor id [5:281:2229] in generation 3 2025-06-25T15:21:41.666164Z node 5 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T15:21:41.666350Z node 5 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state Ready tabletId 9437184 2025-06-25T15:21:41.666452Z node 5 :TX_DATASHARD INFO: datashard.cpp:1590: 9437184 Sending notify to schemeshard 4200 txId 1 state Ready TxInFly 0 2025-06-25T15:21:41.666583Z node 5 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 9437184 mediators count is 0 coordinators count is 1 buckets per mediator 2 2025-06-25T15:21:41.666827Z node 5 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 9437184, actorId: [5:286:2268] 2025-06-25T15:21:41.666871Z node 5 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 9437184 2025-06-25T15:21:41.666926Z node 5 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 9437184 2025-06-25T15:21:41.666989Z node 5 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:21:41.667172Z node 5 :TX_DATASHARD DEBUG: datashard__init.cpp:711: TxInitSchemaDefaults.Execute 2025-06-25T15:21:41.667307Z node 5 :TX_DATASHARD DEBUG: datashard__init.cpp:723: TxInitSchemaDefaults.Complete 2025-06-25T15:21:41.667562Z node 5 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [5:238:2229], Recipient [5:238:2229]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T15:21:41.667620Z node 5 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T15:21:41.668016Z node 5 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 9437184 2025-06-25T15:21:41.668128Z node 5 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 9437184 2025-06-25T15:21:41.668272Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:5607: Got TEvDataShard::TEvSchemaChanged for unknown txId 1 message# Source { RawX1: 238 RawX2: 21474838709 } Origin: 9437184 State: 2 TxId: 1 Step: 0 Generation: 3 2025-06-25T15:21:41.668387Z node 5 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 270270976, Sender [5:25:2072], Recipient [5:238:2229]: {TEvRegisterTabletResult TabletId# 9437184 Entry# 0} 2025-06-25T15:21:41.668437Z node 5 :TX_DATASHARD TRACE: datashard_impl.h:3170: StateWork, processing event TEvMediatorTimecast::TEvRegisterTabletResult 2025-06-25T15:21:41.668482Z node 5 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 9437184 time 0 2025-06-25T15:21:41.668532Z node 5 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:21:41.668614Z node 5 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-25T15:21:41.668666Z node 5 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:21:41.668710Z node 5 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 9437184 2025-06-25T15:21:41.668760Z node 5 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437184 has no attached operations 2025-06-25T15:21:41.668802Z node 5 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 9437184 2025-06-25T15:21:41.668842Z node 5 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 9437184 TxInFly 0 2025-06-25T15:21:41.668896Z node 5 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-25T15:21:41.669006Z node 5 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 270270978, Sender [5:25:2072], Recipient [5:238:2229]: NKikimr::TEvMediatorTimecast::TEvSubscribeReadStepResult{ CoordinatorId# 72057594046316545 LastReadStep# 0 NextReadStep# 0 ReadStep# 0 } 2025-06-25T15:21:41.669052Z node 5 :TX_DATASHARD TRACE: datashard_impl.h:3171: StateWork, processing event TEvMediatorTimecast::TEvSubscribeReadStepResult 2025-06-25T15:21:41.669097Z node 5 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 9437184 coordinator 72057594046316545 last step 0 next step 0 2025-06-25T15:21:41.669230Z node 5 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877760, Sender [5:284:2266], Recipient [5:238:2229]: NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 4200 Status: OK ServerId: [5:288:2270] Leader: 1 Dead: 0 Generation: 2 VersionInfo: } 2025-06-25T15:21:41.669285Z node 5 :TX_DATASHARD TRACE: datashard_impl.h:3165: StateWork, processing event TEvTabletPipe::TEvClientConnected 2025-06-25T15:21:41.669416Z node 5 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269552132, Sender [5:127:2151], Recipient [5:238:2229]: NKikimrTxDataShard.TEvSchemaChangedResult TxId: 1 2025-06-25T15:21:41.669481Z node 5 :TX_DATASHARD TRACE: datashard_impl.h:3136: StateWork, processing event TEvDataShard::TEvSchemaChangedResult 2025-06-25T15:21:41.669529Z node 5 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 1 datashard 9437184 state Ready 2025-06-25T15:21:41.669594Z node 5 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 9437184 Got TEvSchemaChangedResult from SS at 9437184 2025-06-25T15:21:41.683568Z node 5 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877763, Sender [5:284:2266], Recipient [5:238:2229]: NKikimr::TEvTabletPipe::TEvClientDestroyed { TabletId: 4200 ClientId: [5:284:2266] ServerId: [5:288:2270] } 2025-06-25T15:21:41.683640Z node 5 :TX_DATASHARD TRACE: datashard_impl.h:3166: StateWork, processing event TEvTabletPipe::TEvClientDestroyed |97.3%| [TM] {RESULT} ydb/core/tx/datashard/ut_keys/unittest >> MediatorTest::MultipleTablets [GOOD] >> MediatorTest::TabletAckBeforePlanComplete >> QuoterWithKesusTest::ForbidsNotCanonizedResourcePath [GOOD] >> QuoterWithKesusTest::HandlesNonExistentResource |97.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/secondary_index/py3test >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_1_UNIQUE_SYNC-pk_types3-all_types3-index3--UNIQUE-SYNC] [GOOD] >> TRUCalculatorTests::TestReadTable [GOOD] >> TRUCalculatorTests::TestBulkUpsert [GOOD] >> test_update_script_tables.py::TestUpdateScriptTablesYdb::test_recreate_tables[ALTER TABLE {} DROP COLUMN syntax-`.metadata/script_executions`] [GOOD] |97.3%| [TS] {asan, default-linux-x86_64, release} ydb/core/tx/schemeshard/ut_ru_calculator/unittest >> TRUCalculatorTests::TestBulkUpsert [GOOD] |97.3%| [TS] {RESULT} ydb/core/tx/schemeshard/ut_ru_calculator/unittest >> DataShardFollowers::FollowerRebootAfterSysCompaction [GOOD] >> DataShardFollowers::FollowerAfterSysCompaction >> KqpTpch::Query02 [GOOD] >> KqpTpch::Query03 >> test.py::TestSqsSplitMergeStdTables::test_std_merge_split >> MediatorTest::TabletAckBeforePlanComplete [GOOD] >> MediatorTest::TabletAckWhenDead >> DiscoveryIsNotBroken::NoKafkaEndpointInDiscovery [GOOD] >> DiscoveryIsNotBroken::NoKafkaSslEndpointInDiscovery |97.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/async_replication/py3test >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_index_4__SYNC-pk_types0-all_types0-index0---SYNC] [GOOD] >> test_kqprun_recipe.py::TestKqprunRecipe::test_query_execution >> test_select.py::TestDML::test_select[table_index_2__SYNC-pk_types7-all_types7-index7---SYNC] [GOOD] >> KqpTpch::Query03 [GOOD] >> KqpTpch::Query04 >> test_kqprun_recipe.py::TestKqprunRecipe::test_query_execution [GOOD] >> QuoterWithKesusTest::HandlesNonExistentResource [GOOD] >> QuoterWithKesusTest::HandlesAllRequestsForNonExistentResource >> DataShardFollowers::FollowerAfterSysCompaction [GOOD] >> DataShardFollowers::FollowerAfterDataCompaction >> ConfigGRPCService::ReplaceConfig >> test_yt_reading.py::TestYtReading::test_partitioned_reading >> KqpTpch::Query04 [GOOD] >> KqpTpch::Query05 >> Graph::CreateGraphShard >> test_liveness_wardens.py::TestLivenessWarden::test_hive_liveness_warden_reports_issues >> MediatorTimeCast::ReadStepSubscribe >> BasicExample::BasicExample >> TCreateAndDropViewTest::CheckCreatedView ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/tools/kqprun/tests/py3test >> test_kqprun_recipe.py::TestKqprunRecipe::test_query_execution [GOOD] Test command err: contrib/python/ydb/py3/ydb/__init__.py:43: UserWarning: Used deprecated behavior, for fix ADD PEERDIR kikimr/public/sdk/python/ydb_v3_new_behavior contrib/python/ydb/py3/ydb/global_settings.py:22: UserWarning: Global allow split transaction is deprecated behaviour. contrib/python/ydb/py3/ydb/global_settings.py:12: UserWarning: Global allow truncated response is deprecated behaviour. |97.3%| [TM] {RESULT} ydb/tests/tools/kqprun/tests/py3test >> Graph::CreateGraphShard [GOOD] >> Graph::UseGraphShard >> test_ttl.py::TestTTLDefaultEnv::test_case [GOOD] >> MediatorTest::TabletAckWhenDead [GOOD] >> QuoterWithKesusTest::HandlesAllRequestsForNonExistentResource [GOOD] >> QuoterWithKesusTest::GetsQuota >> MediatorTest::PlanStepAckToReconnectedMediator >> Graph::UseGraphShard [GOOD] >> Graph::MemoryBackendFullCycle >> ConfigGRPCService::ReplaceConfig [GOOD] >> ConfigGRPCService::FetchConfig >> DataShardFollowers::FollowerAfterDataCompaction [GOOD] >> DataShardFollowers::FollowerDuringSysPartSwitch >> KqpTpch::Query05 [GOOD] >> KqpTpch::Query06 >> test_yt_reading.py::TestYtReading::test_partitioned_reading [GOOD] >> test_yt_reading.py::TestYtReading::test_block_reading >> TCreateAndDropViewTest::CheckCreatedView [GOOD] >> TCreateAndDropViewTest::CreateViewDisabledFeatureFlag >> MediatorTimeCast::ReadStepSubscribe [GOOD] >> MediatorTimeCast::GranularTimecast >> BasicExample::BasicExample [GOOD] >> KqpTpch::Query06 [GOOD] >> KqpTpch::Query07 >> DiscoveryIsNotBroken::NoKafkaSslEndpointInDiscovery [GOOD] >> DiscoveryIsNotBroken::HaveKafkaEndpointInDiscovery >> ExportS3BufferTest::MinBufferSize [GOOD] >> ExportS3BufferTest::MinBufferSizeWithCompression [GOOD] >> ExportS3BufferTest::MinBufferSizeWithCompressionAndEncryption >> ExportS3BufferTest::MinBufferSizeWithCompressionAndEncryption [GOOD] >> MediatorTest::PlanStepAckToReconnectedMediator [GOOD] |97.3%| [TS] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_export/unittest >> ExportS3BufferTest::MinBufferSizeWithCompressionAndEncryption [GOOD] |97.4%| [TS] {RESULT} ydb/core/tx/datashard/ut_export/unittest >> MediatorTest::WatcherReconnect >> ConfigGRPCService::FetchConfig [GOOD] |97.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/select/py3test >> test_select.py::TestDML::test_select[table_index_2__SYNC-pk_types7-all_types7-index7---SYNC] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/services/config/ut/unittest >> ConfigGRPCService::FetchConfig [GOOD] Test command err: 2025-06-25T15:21:55.733962Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519907552746312424:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:21:55.734120Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00097d/r3tmp/tmpBAgHur/pdisk_1.dat 2025-06-25T15:21:56.667032Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:21:56.667129Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:21:56.692254Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:21:56.743833Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:21:56.802901Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:21:56.818426Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; TServer::EnableGrpc on GrpcPort 18874, node 1 2025-06-25T15:21:56.868417Z node 1 :GRPC_SERVER NOTICE: grpc_request_proxy.cpp:367: Grpc request proxy started, nodeid# 1, serve as static node 2025-06-25T15:21:56.868773Z node 1 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:557: Subscribe to /Root 2025-06-25T15:21:56.869061Z node 1 :GRPC_SERVER INFO: grpc_request_proxy.cpp:403: Subscribed for config changes 2025-06-25T15:21:56.869074Z node 1 :GRPC_SERVER INFO: grpc_request_proxy.cpp:411: Updated app config 2025-06-25T15:21:56.869163Z node 1 :GRPC_SERVER NOTICE: grpc_request_proxy.cpp:367: Grpc request proxy started, nodeid# 1, serve as static node 2025-06-25T15:21:56.869291Z node 1 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:557: Subscribe to /Root 2025-06-25T15:21:56.869500Z node 1 :GRPC_SERVER INFO: grpc_request_proxy.cpp:403: Subscribed for config changes 2025-06-25T15:21:56.869514Z node 1 :GRPC_SERVER INFO: grpc_request_proxy.cpp:411: Updated app config 2025-06-25T15:21:56.876411Z node 1 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:420: Got proxy service configuration 2025-06-25T15:21:56.876424Z node 1 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:420: Got proxy service configuration 2025-06-25T15:21:56.878808Z node 1 :GRPC_SERVER WARN: grpc_request_proxy.cpp:529: SchemeBoardDelete /Root Strong=0 2025-06-25T15:21:56.878826Z node 1 :GRPC_SERVER WARN: grpc_request_proxy.cpp:529: SchemeBoardDelete /Root Strong=0 2025-06-25T15:21:56.894023Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046644480 2025-06-25T15:21:56.894952Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T15:21:56.898949Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046644480 2025-06-25T15:21:56.898991Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046644480 2025-06-25T15:21:56.899026Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T15:21:56.899043Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046644480, domainId: [OwnerId: 72057594046644480, LocalPathId: 1] 2025-06-25T15:21:56.899089Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T15:21:56.899166Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046644480 2025-06-25T15:21:56.906962Z node 1 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:489: SchemeBoardUpdate /Root 2025-06-25T15:21:56.907012Z node 1 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:518: Can't update SecurityState for /Root - no PublicKeys 2025-06-25T15:21:56.907021Z node 1 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:489: SchemeBoardUpdate /Root 2025-06-25T15:21:56.907031Z node 1 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:518: Can't update SecurityState for /Root - no PublicKeys 2025-06-25T15:21:56.934951Z node 1 :GRPC_SERVER DEBUG: grpc_server.cpp:82: [0x51a00002ac80] created request Name# BlobStorageConfig 2025-06-25T15:21:56.936678Z node 1 :GRPC_SERVER DEBUG: grpc_server.cpp:82: [0x51a00002b280] created request Name# HiveCreateTablet 2025-06-25T15:21:56.937059Z node 1 :GRPC_SERVER DEBUG: grpc_server.cpp:82: [0x51a00002b880] created request Name# TabletStateRequest 2025-06-25T15:21:56.937374Z node 1 :GRPC_SERVER DEBUG: grpc_server.cpp:82: [0x51a00002be80] created request Name# SchemeOperationStatus 2025-06-25T15:21:56.938727Z node 1 :GRPC_SERVER DEBUG: grpc_server.cpp:82: [0x51a00002c480] created request Name# ChooseProxy 2025-06-25T15:21:56.938988Z node 1 :GRPC_SERVER DEBUG: grpc_server.cpp:82: [0x51a00002ca80] created request Name# ResolveNode 2025-06-25T15:21:56.940395Z node 1 :GRPC_SERVER DEBUG: grpc_server.cpp:82: [0x51a00002d080] created request Name# FillNode 2025-06-25T15:21:56.940709Z node 1 :GRPC_SERVER DEBUG: grpc_server.cpp:82: [0x51a00002d680] created request Name# DrainNode 2025-06-25T15:21:56.941868Z node 1 :GRPC_SERVER DEBUG: grpc_server.cpp:82: [0x51a00002dc80] created request Name# InterconnectDebug 2025-06-25T15:21:56.942192Z node 1 :GRPC_SERVER DEBUG: grpc_server.cpp:82: [0x51a00002e280] created request Name# TestShardControl 2025-06-25T15:21:56.943292Z node 1 :GRPC_SERVER DEBUG: grpc_server.cpp:82: [0x51a00002e880] created request Name# RegisterNode 2025-06-25T15:21:56.943664Z node 1 :GRPC_SERVER DEBUG: grpc_server.cpp:82: [0x51a00002ee80] created request Name# CmsRequest 2025-06-25T15:21:56.943933Z node 1 :GRPC_SERVER DEBUG: grpc_server.cpp:82: [0x51a0000df880] created request Name# ConsoleRequest 2025-06-25T15:21:56.944419Z node 1 :GRPC_SERVER DEBUG: grpc_server.cpp:82: [0x51a0000dfe80] created request Name# SchemeInitRoot 2025-06-25T15:21:56.944712Z node 1 :GRPC_SERVER DEBUG: grpc_server.cpp:82: [0x51a0000e0480] created request Name# PersQueueRequest 2025-06-25T15:21:56.945140Z node 1 :GRPC_SERVER DEBUG: grpc_server.cpp:82: [0x51a0000e0a80] created request Name# SchemeOperation 2025-06-25T15:21:56.945420Z node 1 :GRPC_SERVER DEBUG: grpc_server.cpp:82: [0x51a0000e1080] created request Name# SchemeDescribe 2025-06-25T15:21:57.141362Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:21:57.141392Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:21:57.141399Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:21:57.141510Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16407 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:21:58.061536Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "Root" StoragePools { Name: "hdd2" Kind: "hdd2" } StoragePools { Name: "hdd" Kind: "hdd" } StoragePools { Name: "hdd1" Kind: "hdd1" } StoragePools { Name: "ssd" Kind: "ssd" } StoragePools { Name: "/Root:test" Kind: "test" } } } TxId: 281474976715657 TabletId: 72057594046644480 PeerName: "" , at schemeshard: 72057594046644480 2025-06-25T15:21:58.064562Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //Root, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-25T15:21:58.066434Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 0 2025-06-25T15:21:58.066466Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 281474976715657:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046644480, LocalPathId: 1] source path: 2025-06-25T15:21:58.069909Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 281474976715657:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-25T15:21:58.069997Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:21:58.074442Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 281474976715657, response: Status: StatusAccepted TxId: 281474976715657 SchemeshardId: 72057594046644480 PathId: 1, at schemeshard: 72057594046644480 2025-06-25T15:21:58.080590Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976715657, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //Root 2025-06-25T15:21:58.080833Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-25T15:21:58.080871Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 281474976715657:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046644480 2025-06-25T15:21:58.080954Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 281474976715657:0 ProgressState no shards to create, do next state 2025-06-25T15:21:58.080976Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 281474976715657:0 2 -> 3 waiting... 2025-06-25T15:21:58.090285Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-25T15:21:58.090330Z node 1 :FLAT_TX_SCHEMESHARD INFO: s ... te::TPropose ProgressState, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-25T15:22:01.213567Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 281474976710657:0, at tablet# 72057594046644480 2025-06-25T15:22:01.213591Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 281474976710657 ready parts: 1/1 2025-06-25T15:22:01.213722Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046644480 Flags: 2 } ExecLevel: 0 TxId: 281474976710657 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T15:22:01.219258Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 281474976710657:4294967295 from tablet: 72057594046644480 to tablet: 72057594046316545 cookie: 0:281474976710657 msg type: 269090816 2025-06-25T15:22:01.219391Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 281474976710657, partId: 4294967295, tablet: 72057594046316545 2025-06-25T15:22:01.226330Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 1750864921267, transactions count in step: 1, at schemeshard: 72057594046644480 2025-06-25T15:22:01.226441Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976710657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1750864921267 MediatorID: 72057594046382081 TabletID: 72057594046644480, at schemeshard: 72057594046644480 2025-06-25T15:22:01.226467Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 281474976710657:0, at tablet# 72057594046644480 2025-06-25T15:22:01.226751Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 281474976710657:0 128 -> 240 2025-06-25T15:22:01.226784Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 281474976710657:0, at tablet# 72057594046644480 2025-06-25T15:22:01.226900Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 1 2025-06-25T15:22:01.226954Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046644480, LocalPathId: 1], at schemeshard: 72057594046644480 2025-06-25T15:22:01.229384Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046644480 2025-06-25T15:22:01.229416Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046644480, txId: 281474976710657, path id: [OwnerId: 72057594046644480, LocalPathId: 1] 2025-06-25T15:22:01.229583Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046644480 2025-06-25T15:22:01.229598Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [3:7519907573721598919:2376], at schemeshard: 72057594046644480, txId: 281474976710657, path id: 1 2025-06-25T15:22:01.229633Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-25T15:22:01.229654Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046644480] TDone opId# 281474976710657:0 ProgressState 2025-06-25T15:22:01.229728Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976710657:0 progress is 1/1 2025-06-25T15:22:01.229739Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976710657 ready parts: 1/1 2025-06-25T15:22:01.229754Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#281474976710657:0 progress is 1/1 2025-06-25T15:22:01.229763Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976710657 ready parts: 1/1 2025-06-25T15:22:01.229792Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 281474976710657, ready parts: 1/1, is published: false 2025-06-25T15:22:01.229810Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 281474976710657 ready parts: 1/1 2025-06-25T15:22:01.229821Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 281474976710657:0 2025-06-25T15:22:01.229828Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 281474976710657:0 2025-06-25T15:22:01.229866Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 2 2025-06-25T15:22:01.229880Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:984: Publication still in progress, tx: 281474976710657, publications: 1, subscribers: 1 2025-06-25T15:22:01.229889Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 281474976710657, [OwnerId: 72057594046644480, LocalPathId: 1], 3 2025-06-25T15:22:01.230726Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046644480, cookie: 281474976710657 2025-06-25T15:22:01.230786Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046644480, cookie: 281474976710657 2025-06-25T15:22:01.230796Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046644480, txId: 281474976710657 2025-06-25T15:22:01.230811Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046644480, txId: 281474976710657, pathId: [OwnerId: 72057594046644480, LocalPathId: 1], version: 3 2025-06-25T15:22:01.230824Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 1 2025-06-25T15:22:01.230901Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046644480, txId: 281474976710657, subscribers: 1 2025-06-25T15:22:01.230916Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:212: TTxAckPublishToSchemeBoard Notify send TEvNotifyTxCompletionResult, at schemeshard: 72057594046644480, to actorId: [3:7519907578016566516:2276] 2025-06-25T15:22:01.231517Z node 3 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:489: SchemeBoardUpdate /Root 2025-06-25T15:22:01.231577Z node 3 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:518: Can't update SecurityState for /Root - no PublicKeys 2025-06-25T15:22:01.231589Z node 3 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:489: SchemeBoardUpdate /Root 2025-06-25T15:22:01.231611Z node 3 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:518: Can't update SecurityState for /Root - no PublicKeys 2025-06-25T15:22:01.233523Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046644480, cookie: 281474976710657 2025-06-25T15:22:01.274108Z node 3 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:595: Got grpc request# FetchConfigRequest, traceId# 01jyktx0ns98hvdqxgdexjajhn, sdkBuildInfo# undef, state# AS_NOT_PERFORMED, database# undef, peer# ipv6:[::1]:49934, grpcInfo# grpc-c++/1.54.3 grpc-c/31.0.0 (linux; chttp2), timeout# undef 2025-06-25T15:22:01.278034Z node 3 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000116480] received request Name# SchemeOperationStatus ok# false data# peer# current inflight# 0 2025-06-25T15:22:01.278074Z node 3 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000014480] received request Name# SchemeOperation ok# false data# peer# current inflight# 0 2025-06-25T15:22:01.278247Z node 3 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000013e80] received request Name# SchemeDescribe ok# false data# peer# current inflight# 0 2025-06-25T15:22:01.278282Z node 3 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000116a80] received request Name# ChooseProxy ok# false data# peer# current inflight# 0 2025-06-25T15:22:01.278384Z node 3 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000014a80] received request Name# PersQueueRequest ok# false data# peer# current inflight# 0 2025-06-25T15:22:01.278425Z node 3 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000015080] received request Name# SchemeInitRoot ok# false data# peer# current inflight# 0 2025-06-25T15:22:01.278538Z node 3 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0000d8080] received request Name# ResolveNode ok# false data# peer# current inflight# 0 2025-06-25T15:22:01.278603Z node 3 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0000d7480] received request Name# FillNode ok# false data# peer# current inflight# 0 2025-06-25T15:22:01.278703Z node 3 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000017480] received request Name# DrainNode ok# false data# peer# current inflight# 0 2025-06-25T15:22:01.278753Z node 3 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000115280] received request Name# BlobStorageConfig ok# false data# peer# current inflight# 0 2025-06-25T15:22:01.278821Z node 3 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000115880] received request Name# HiveCreateTablet ok# false data# peer# current inflight# 0 2025-06-25T15:22:01.278892Z node 3 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000016880] received request Name# TestShardControl ok# false data# peer# current inflight# 0 2025-06-25T15:22:01.278939Z node 3 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000016280] received request Name# RegisterNode ok# false data# peer# current inflight# 0 2025-06-25T15:22:01.279041Z node 3 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000015c80] received request Name# CmsRequest ok# false data# peer# current inflight# 0 2025-06-25T15:22:01.279082Z node 3 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000015680] received request Name# ConsoleRequest ok# false data# peer# current inflight# 0 2025-06-25T15:22:01.279162Z node 3 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000016e80] received request Name# InterconnectDebug ok# false data# peer# current inflight# 0 2025-06-25T15:22:01.279206Z node 3 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000115e80] received request Name# TabletStateRequest ok# false data# peer# current inflight# 0 2025-06-25T15:22:01.506528Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; |97.4%| [TM] {RESULT} ydb/services/config/ut/unittest >> QuoterWithKesusTest::GetsQuota [GOOD] >> QuoterWithKesusTest::GetsBigQuota >> TCreateAndDropViewTest::CreateViewDisabledFeatureFlag [GOOD] >> TCreateAndDropViewTest::InvalidQuery |97.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/ttl/py3test >> test_ttl.py::TestTTLDefaultEnv::test_case [GOOD] |97.4%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/integration/basic_example/gtest >> BasicExample::BasicExample [GOOD] |97.4%| [TM] {RESULT} ydb/public/sdk/cpp/tests/integration/basic_example/gtest >> MediatorTest::WatcherReconnect [GOOD] |97.4%| [TA] $(B)/ydb/tests/functional/ttl/test-results/py3test/{meta.json ... results_accumulator.log} >> test_insert_restarts.py::TestS3::test_atomic_upload_commit[v1-client0] |97.4%| [TA] {RESULT} $(B)/ydb/tests/functional/ttl/test-results/py3test/{meta.json ... results_accumulator.log} >> MediatorTimeCast::GranularTimecast [GOOD] >> Graph::MemoryBackendFullCycle [GOOD] >> Graph::LocalBackendFullCycle >> MediatorTest::MultipleSteps >> test_yt_reading.py::TestYtReading::test_block_reading [GOOD] >> test_ctas.py::TestYtCtas::test_simple_ctast >> DataShardFollowers::FollowerDuringSysPartSwitch [GOOD] >> DataShardFollowers::FollowerDuringDataPartSwitch ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/time_cast/ut/unittest >> MediatorTimeCast::GranularTimecast [GOOD] Test command err: 2025-06-25T15:21:59.486785Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T15:21:59.486973Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T15:21:59.487026Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0006f8/r3tmp/tmpgEouXG/pdisk_1.dat 2025-06-25T15:21:59.965033Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T15:21:59.965923Z node 1 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:916: Actor# [1:25:2072] HANDLE NKikimr::TEvMediatorTimecast::TEvSubscribeReadStep{ CoordinatorId# 72057594046316545 } 2025-06-25T15:21:59.966824Z node 1 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:578: Actor# [1:25:2072] HANDLE EvClientConnected 2025-06-25T15:21:59.971382Z node 1 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:993: Actor# [1:25:2072] HANDLE TEvSubscribeReadStepResult CoordinatorID: 72057594046316545 SeqNo: 1 LastAcquireStep: 0 NextAcquireStep: 0 2025-06-25T15:22:00.026703Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:22:00.039319Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750864916088046 != 1750864916088050 2025-06-25T15:22:00.091838Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:22:00.091988Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:22:00.104869Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:22:00.190684Z node 1 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:1043: Actor# [1:25:2072] HANDLE TEvSubscribeReadStepUpdate CoordinatorID: 72057594046316545 SeqNo: 1 NextAcquireStep: 500 2025-06-25T15:22:00.305790Z node 1 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:1043: Actor# [1:25:2072] HANDLE TEvSubscribeReadStepUpdate CoordinatorID: 72057594046316545 SeqNo: 1 NextAcquireStep: 1000 2025-06-25T15:22:00.326883Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:22:00.499397Z node 1 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:1043: Actor# [1:25:2072] HANDLE TEvSubscribeReadStepUpdate CoordinatorID: 72057594046316545 SeqNo: 1 NextAcquireStep: 2000 2025-06-25T15:22:00.640470Z node 1 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:1043: Actor# [1:25:2072] HANDLE TEvSubscribeReadStepUpdate CoordinatorID: 72057594046316545 SeqNo: 1 NextAcquireStep: 3000 2025-06-25T15:22:00.793597Z node 1 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:1043: Actor# [1:25:2072] HANDLE TEvSubscribeReadStepUpdate CoordinatorID: 72057594046316545 SeqNo: 1 NextAcquireStep: 4000 2025-06-25T15:22:00.949081Z node 1 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:1043: Actor# [1:25:2072] HANDLE TEvSubscribeReadStepUpdate CoordinatorID: 72057594046316545 SeqNo: 1 NextAcquireStep: 5000 2025-06-25T15:22:01.005849Z node 1 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:964: Actor# [1:25:2072] HANDLE NKikimr::TEvMediatorTimecast::TEvWaitReadStep{ CoordinatorId# 72057594046316545 ReadStep# 7000 } 2025-06-25T15:22:01.161332Z node 1 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:1043: Actor# [1:25:2072] HANDLE TEvSubscribeReadStepUpdate CoordinatorID: 72057594046316545 SeqNo: 1 NextAcquireStep: 6000 2025-06-25T15:22:01.293317Z node 1 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:1043: Actor# [1:25:2072] HANDLE TEvSubscribeReadStepUpdate CoordinatorID: 72057594046316545 SeqNo: 1 NextAcquireStep: 7000 2025-06-25T15:22:01.303924Z node 1 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:587: Actor# [1:25:2072] HANDLE EvClientDestroyed 2025-06-25T15:22:01.325310Z node 1 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:578: Actor# [1:25:2072] HANDLE EvClientConnected 2025-06-25T15:22:01.326097Z node 1 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:993: Actor# [1:25:2072] HANDLE TEvSubscribeReadStepResult CoordinatorID: 72057594046316545 SeqNo: 2 LastAcquireStep: 0 NextAcquireStep: 7000 2025-06-25T15:22:01.340123Z node 1 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:964: Actor# [1:25:2072] HANDLE NKikimr::TEvMediatorTimecast::TEvWaitReadStep{ CoordinatorId# 72057594046316545 ReadStep# 12000 } 2025-06-25T15:22:01.463474Z node 1 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:1043: Actor# [1:25:2072] HANDLE TEvSubscribeReadStepUpdate CoordinatorID: 72057594046316545 SeqNo: 2 NextAcquireStep: 7500 2025-06-25T15:22:01.569359Z node 1 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:1043: Actor# [1:25:2072] HANDLE TEvSubscribeReadStepUpdate CoordinatorID: 72057594046316545 SeqNo: 2 NextAcquireStep: 8000 2025-06-25T15:22:01.727502Z node 1 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:1043: Actor# [1:25:2072] HANDLE TEvSubscribeReadStepUpdate CoordinatorID: 72057594046316545 SeqNo: 2 NextAcquireStep: 9000 2025-06-25T15:22:01.862231Z node 1 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:1043: Actor# [1:25:2072] HANDLE TEvSubscribeReadStepUpdate CoordinatorID: 72057594046316545 SeqNo: 2 NextAcquireStep: 10000 2025-06-25T15:22:02.024069Z node 1 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:1043: Actor# [1:25:2072] HANDLE TEvSubscribeReadStepUpdate CoordinatorID: 72057594046316545 SeqNo: 2 NextAcquireStep: 11000 2025-06-25T15:22:02.189452Z node 1 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:1043: Actor# [1:25:2072] HANDLE TEvSubscribeReadStepUpdate CoordinatorID: 72057594046316545 SeqNo: 2 NextAcquireStep: 12000 2025-06-25T15:22:06.051360Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:265:2309], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T15:22:06.051534Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T15:22:06.051674Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0006f8/r3tmp/tmpbIUgdW/pdisk_1.dat 2025-06-25T15:22:06.327704Z node 2 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 2 Type# 268639257 2025-06-25T15:22:06.344583Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:454: Actor# [2:25:2072] HANDLE {TEvRegisterTablet TabletId# 72057594047365120 ProcessingParams { Version: 0 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 1 Mediators: 72057594046382081 }} 2025-06-25T15:22:06.345363Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:270: Actor# [2:25:2072] SEND to Mediator# 72057594046382081 NKikimrTxMediatorTimecast.TEvGranularWatch Bucket: 0 SubscriptionId: 1 Tablets: 72057594047365120 MinStep: 0 2025-06-25T15:22:06.345435Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:372: Actor# [2:25:2072] SEND to Mediator# 72057594046382081 {TEvWatch Bucket# 0} 2025-06-25T15:22:06.345486Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:502: Actor# [2:25:2072] SEND to Sender# [2:611:2510] {TEvRegisterTabletResult TabletId# 72057594047365120 Entry# 0} 2025-06-25T15:22:06.345884Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:578: Actor# [2:25:2072] HANDLE EvClientConnected 2025-06-25T15:22:06.346042Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:692: Actor# [2:25:2072] HANDLE NKikimrTxMediatorTimecast.TEvGranularUpdate Mediator: 72057594046382081 Bucket: 0 SubscriptionId: 1 LatestStep: 0 2025-06-25T15:22:06.346113Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:625: Actor# [2:25:2072] HANDLE {TEvUpdate Mediator# 72057594046382081 Bucket# 0 TimeBarrier# 0} 2025-06-25T15:22:06.346321Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:454: Actor# [2:25:2072] HANDLE {TEvRegisterTablet TabletId# 72057594047365121 ProcessingParams { Version: 0 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 1 Mediators: 72057594046382081 }} 2025-06-25T15:22:06.346391Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:298: Actor# [2:25:2072] SEND to Mediator# 72057594046382081 NKikimrTxMediatorTimecast.TEvGranularWatchModify Bucket: 0 SubscriptionId: 2 AddTablets: 72057594047365121 2025-06-25T15:22:06.346438Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:502: Actor# [2:25:2072] SEND to Sender# [2:614:2512] {TEvRegisterTabletResult TabletId# 72057594047365121 Entry# 0} 2025-06-25T15:22:06.346603Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:692: Actor# [2:25:2072] HANDLE NKikimrTxMediatorTimecast.TEvGranularUpdate Mediator: 72057594046382081 Bucket: 0 SubscriptionId: 2 LatestStep: 0 2025-06-25T15:22:06.346797Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:454: Actor# [2:25:2072] HANDLE {TEvRegisterTablet TabletId# 72057594047365123 ProcessingParams { Version: 0 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 1 Mediators: 72057594046382081 }} 2025-06-25T15:22:06.346869Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:298: Actor# [2:25:2072] SEND to Mediator# 72057594046382081 NKikimrTxMediatorTimecast.TEvGranularWatchModify Bucket: 0 SubscriptionId: 3 AddTablets: 72057594047365123 2025-06-25T15:22:06.346909Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:502: Actor# [2:25:2072] SEND to Sender# [2:615:2513] {TEvRegisterTabletResult TabletId# 72057594047365123 Entry# 0} 2025-06-25T15:22:06.347163Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:692: Actor# [2:25:2072] HANDLE NKikimrTxMediatorTimecast.TEvGranularUpdate Mediator: 72057594046382081 Bucket: 0 SubscriptionId: 3 LatestStep: 0 2025-06-25T15:22:06.374717Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:22:06.376940Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:33:2080] 1750864922768574 != 1750864922768578 2025-06-25T15:22:06.426473Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:22:06.426605Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:22:06.438735Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:22:06.518369Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:692: Actor# [2:25:2072] HANDLE NKikimrTxMediatorTimecast.TEvGranularUpdate Mediator: 72057594046382081 Bucket: 0 SubscriptionId: 3 LatestStep: 500 2025-06-25T15:22:06.518488Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:625: Actor# [2:25:2072] HANDLE {TEvUpdate Mediator# 72057594046382081 Bucket# 0 TimeBarrier# 500} 2025-06-25T15:22:06.645552Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:692: Actor# [2:25:2072] HANDLE NKikimrTxMediatorTimecast.TEvGranularUpdate Mediator: 72057594046382081 Bucket: 0 SubscriptionId: 3 LatestStep: 1000 2025-06-25T15:22:06.645645Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:625: Actor# [2:25:2072] HANDLE {TEvUpdate Mediator# 7205759404638208 ... 0 ... unblocking NKikimr::TEvMediatorTimecast::TEvGranularUpdate from TX_MEDIATOR_TABLET_QUEUE_ACTOR to TX_MEDIATOR_TIMECAST_ACTOR 2025-06-25T15:22:07.265422Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:692: Actor# [2:25:2072] HANDLE NKikimrTxMediatorTimecast.TEvGranularUpdate Mediator: 72057594046382081 Bucket: 0 SubscriptionId: 7 LatestStep: 0 ... unblocking update: Mediator: 72057594046382081 Bucket: 0 SubscriptionId: 7 LatestStep: 2500 FrozenTablets: 72057594047365120 FrozenTablets: 72057594047365121 FrozenSteps: 2499 FrozenSteps: 2499 ... unblocking NKikimr::TEvMediatorTimecast::TEvGranularUpdate from TX_MEDIATOR_TABLET_QUEUE_ACTOR to TX_MEDIATOR_TIMECAST_ACTOR 2025-06-25T15:22:07.276184Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:692: Actor# [2:25:2072] HANDLE NKikimrTxMediatorTimecast.TEvGranularUpdate Mediator: 72057594046382081 Bucket: 0 SubscriptionId: 7 LatestStep: 2500 FrozenTablets: 72057594047365120 FrozenTablets: 72057594047365121 FrozenSteps: 2499 FrozenSteps: 2499 ... unblocking update: Mediator: 72057594046382081 Bucket: 0 SubscriptionId: 7 LatestStep: 3000 ... unblocking NKikimr::TEvMediatorTimecast::TEvGranularUpdate from TX_MEDIATOR_TABLET_QUEUE_ACTOR to TX_MEDIATOR_TIMECAST_ACTOR 2025-06-25T15:22:07.293503Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:692: Actor# [2:25:2072] HANDLE NKikimrTxMediatorTimecast.TEvGranularUpdate Mediator: 72057594046382081 Bucket: 0 SubscriptionId: 7 LatestStep: 3000 ... unblocking update: Mediator: 72057594046382081 Bucket: 0 SubscriptionId: 7 LatestStep: 3500 ... unblocking NKikimr::TEvMediatorTimecast::TEvGranularUpdate from TX_MEDIATOR_TABLET_QUEUE_ACTOR to TX_MEDIATOR_TIMECAST_ACTOR 2025-06-25T15:22:07.304088Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:692: Actor# [2:25:2072] HANDLE NKikimrTxMediatorTimecast.TEvGranularUpdate Mediator: 72057594046382081 Bucket: 0 SubscriptionId: 7 LatestStep: 3500 ... unblocking plan for tablet2 ... unblocking NKikimr::TEvTxProcessing::TEvPlanStep from TX_MEDIATOR_TABLET_QUEUE_ACTOR to NKikimr::NMediatorTimeCastTest::NTestSuiteMediatorTimeCast::TTargetTablet ... blocking NKikimr::TEvTxProcessing::TEvPlanStepAck from TX_MEDIATOR_TABLET_QUEUE_ACTOR to TX_COORDINATOR_MEDIATORQ_ACTOR cookie 0 ... blocking NKikimr::TEvMediatorTimecast::TEvGranularUpdate from TX_MEDIATOR_TABLET_QUEUE_ACTOR to TX_MEDIATOR_TIMECAST_ACTOR cookie 0 ... unblocking update: Mediator: 72057594046382081 Bucket: 0 SubscriptionId: 7 LatestStep: 3500 FrozenTablets: 72057594047365121 FrozenSteps: 2999 ... unblocking NKikimr::TEvMediatorTimecast::TEvGranularUpdate from TX_MEDIATOR_TABLET_QUEUE_ACTOR to TX_MEDIATOR_TIMECAST_ACTOR 2025-06-25T15:22:07.331516Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:692: Actor# [2:25:2072] HANDLE NKikimrTxMediatorTimecast.TEvGranularUpdate Mediator: 72057594046382081 Bucket: 0 SubscriptionId: 7 LatestStep: 3500 FrozenTablets: 72057594047365121 FrozenSteps: 2999 ... unblocking plan for tablet2 ... unblocking NKikimr::TEvTxProcessing::TEvPlanStep from TX_MEDIATOR_TABLET_QUEUE_ACTOR to NKikimr::NMediatorTimeCastTest::NTestSuiteMediatorTimeCast::TTargetTablet ... blocking NKikimr::TEvTxProcessing::TEvPlanStepAck from TX_MEDIATOR_TABLET_QUEUE_ACTOR to TX_COORDINATOR_MEDIATORQ_ACTOR cookie 0 ... blocking NKikimr::TEvMediatorTimecast::TEvGranularUpdate from TX_MEDIATOR_TABLET_QUEUE_ACTOR to TX_MEDIATOR_TIMECAST_ACTOR cookie 0 ... unblocking update: Mediator: 72057594046382081 Bucket: 0 SubscriptionId: 7 LatestStep: 3500 FrozenTablets: 72057594047365121 FrozenSteps: 3499 ... unblocking NKikimr::TEvMediatorTimecast::TEvGranularUpdate from TX_MEDIATOR_TABLET_QUEUE_ACTOR to TX_MEDIATOR_TIMECAST_ACTOR 2025-06-25T15:22:07.353090Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:692: Actor# [2:25:2072] HANDLE NKikimrTxMediatorTimecast.TEvGranularUpdate Mediator: 72057594046382081 Bucket: 0 SubscriptionId: 7 LatestStep: 3500 FrozenTablets: 72057594047365121 FrozenSteps: 3499 ... unblocking plan for tablet2 ... unblocking NKikimr::TEvTxProcessing::TEvPlanStep from TX_MEDIATOR_TABLET_QUEUE_ACTOR to NKikimr::NMediatorTimeCastTest::NTestSuiteMediatorTimeCast::TTargetTablet ... blocking NKikimr::TEvTxProcessing::TEvPlanStepAck from TX_MEDIATOR_TABLET_QUEUE_ACTOR to TX_COORDINATOR_MEDIATORQ_ACTOR cookie 0 ... blocking NKikimr::TEvMediatorTimecast::TEvGranularUpdate from TX_MEDIATOR_TABLET_QUEUE_ACTOR to TX_MEDIATOR_TIMECAST_ACTOR cookie 0 ... unblocking update: Mediator: 72057594046382081 Bucket: 0 SubscriptionId: 7 LatestStep: 3500 UnfrozenTablets: 72057594047365121 ... unblocking NKikimr::TEvMediatorTimecast::TEvGranularUpdate from TX_MEDIATOR_TABLET_QUEUE_ACTOR to TX_MEDIATOR_TIMECAST_ACTOR 2025-06-25T15:22:07.376131Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:692: Actor# [2:25:2072] HANDLE NKikimrTxMediatorTimecast.TEvGranularUpdate Mediator: 72057594046382081 Bucket: 0 SubscriptionId: 7 LatestStep: 3500 UnfrozenTablets: 72057594047365121 ... restarting mediator 2025-06-25T15:22:07.390104Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:587: Actor# [2:25:2072] HANDLE EvClientDestroyed 2025-06-25T15:22:07.390276Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:270: Actor# [2:25:2072] SEND to Mediator# 72057594046382081 NKikimrTxMediatorTimecast.TEvGranularWatch Bucket: 0 SubscriptionId: 8 Tablets: 72057594047365123 Tablets: 72057594047365120 Tablets: 72057594047365121 MinStep: 3500 2025-06-25T15:22:07.390322Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:355: Actor# [2:25:2072] SEND to Mediator# 72057594046382081 {TEvWatch Bucket# 0} 2025-06-25T15:22:07.390860Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:578: Actor# [2:25:2072] HANDLE EvClientConnected 2025-06-25T15:22:07.390976Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:270: Actor# [2:25:2072] SEND to Mediator# 72057594046382081 NKikimrTxMediatorTimecast.TEvGranularWatch Bucket: 0 SubscriptionId: 9 Tablets: 72057594047365123 Tablets: 72057594047365120 Tablets: 72057594047365121 MinStep: 3500 2025-06-25T15:22:07.391025Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:355: Actor# [2:25:2072] SEND to Mediator# 72057594046382081 {TEvWatch Bucket# 0} 2025-06-25T15:22:07.391588Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:578: Actor# [2:25:2072] HANDLE EvClientConnected 2025-06-25T15:22:07.391666Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:270: Actor# [2:25:2072] SEND to Mediator# 72057594046382081 NKikimrTxMediatorTimecast.TEvGranularWatch Bucket: 0 SubscriptionId: 10 Tablets: 72057594047365123 Tablets: 72057594047365120 Tablets: 72057594047365121 MinStep: 3500 2025-06-25T15:22:07.391694Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:355: Actor# [2:25:2072] SEND to Mediator# 72057594046382081 {TEvWatch Bucket# 0} 2025-06-25T15:22:07.393289Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:578: Actor# [2:25:2072] HANDLE EvClientConnected 2025-06-25T15:22:07.393413Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:270: Actor# [2:25:2072] SEND to Mediator# 72057594046382081 NKikimrTxMediatorTimecast.TEvGranularWatch Bucket: 0 SubscriptionId: 11 Tablets: 72057594047365123 Tablets: 72057594047365120 Tablets: 72057594047365121 MinStep: 3500 2025-06-25T15:22:07.393445Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:355: Actor# [2:25:2072] SEND to Mediator# 72057594046382081 {TEvWatch Bucket# 0} 2025-06-25T15:22:07.404161Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:578: Actor# [2:25:2072] HANDLE EvClientConnected ... blocking NKikimr::TEvMediatorTimecast::TEvGranularUpdate from TX_MEDIATOR_TABLET_QUEUE_ACTOR to TX_MEDIATOR_TIMECAST_ACTOR cookie 0 2025-06-25T15:22:07.408910Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:625: Actor# [2:25:2072] HANDLE {TEvUpdate Mediator# 72057594046382081 Bucket# 0 TimeBarrier# 0} ... blocking NKikimr::TEvMediatorTimecast::TEvGranularUpdate from TX_MEDIATOR_TABLET_QUEUE_ACTOR to TX_MEDIATOR_TIMECAST_ACTOR cookie 0 ... blocking NKikimr::TEvMediatorTimecast::TEvGranularUpdate from TX_MEDIATOR_TABLET_QUEUE_ACTOR to TX_MEDIATOR_TIMECAST_ACTOR cookie 0 ... blocking NKikimr::TEvMediatorTimecast::TEvGranularUpdate from TX_MEDIATOR_TABLET_QUEUE_ACTOR to TX_MEDIATOR_TIMECAST_ACTOR cookie 0 ... blocking NKikimr::TEvTxProcessing::TEvPlanStep from TX_MEDIATOR_TABLET_QUEUE_ACTOR to NKikimr::NMediatorTimeCastTest::NTestSuiteMediatorTimeCast::TTargetTablet cookie 0 ... blocking NKikimr::TEvTxProcessing::TEvPlanStep from TX_MEDIATOR_TABLET_QUEUE_ACTOR to NKikimr::NMediatorTimeCastTest::NTestSuiteMediatorTimeCast::TTargetTablet cookie 0 ... blocking NKikimr::TEvTxProcessing::TEvPlanStep from TX_MEDIATOR_TABLET_QUEUE_ACTOR to NKikimr::NMediatorTimeCastTest::NTestSuiteMediatorTimeCast::TTargetTablet cookie 0 ... blocking NKikimr::TEvTxProcessing::TEvPlanStep from TX_MEDIATOR_TABLET_QUEUE_ACTOR to NKikimr::NMediatorTimeCastTest::NTestSuiteMediatorTimeCast::TTargetTablet cookie 0 ... blocking NKikimr::TEvTxProcessing::TEvPlanStep from TX_MEDIATOR_TABLET_QUEUE_ACTOR to NKikimr::NMediatorTimeCastTest::NTestSuiteMediatorTimeCast::TTargetTablet cookie 0 ... fully unblocking tx1 ... unblocking NKikimr::TEvTxProcessing::TEvPlanStep from TX_MEDIATOR_TABLET_QUEUE_ACTOR to NKikimr::NMediatorTimeCastTest::NTestSuiteMediatorTimeCast::TTargetTablet ... unblocking NKikimr::TEvTxProcessing::TEvPlanStep from TX_MEDIATOR_TABLET_QUEUE_ACTOR to NKikimr::NMediatorTimeCastTest::NTestSuiteMediatorTimeCast::TTargetTablet ... blocking NKikimr::TEvTxProcessing::TEvPlanStepAck from TX_MEDIATOR_TABLET_QUEUE_ACTOR to TX_COORDINATOR_MEDIATORQ_ACTOR cookie 0 ... blocking NKikimr::TEvMediatorTimecast::TEvGranularUpdate from TX_MEDIATOR_TABLET_QUEUE_ACTOR to TX_MEDIATOR_TIMECAST_ACTOR cookie 0 ... blocking NKikimr::TEvTxProcessing::TEvPlanStepAck from TX_MEDIATOR_TABLET_QUEUE_ACTOR to TX_COORDINATOR_MEDIATORQ_ACTOR cookie 0 ... blocking NKikimr::TEvMediatorTimecast::TEvGranularUpdate from TX_MEDIATOR_TABLET_QUEUE_ACTOR to TX_MEDIATOR_TIMECAST_ACTOR cookie 0 2025-06-25T15:22:07.437715Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:625: Actor# [2:25:2072] HANDLE {TEvUpdate Mediator# 72057594046382081 Bucket# 0 TimeBarrier# 2500} ... tablet1 at 2500 ... tablet2 at 3500 ... tablet3 at 3500 ... fully unblocking tx2 ... unblocking NKikimr::TEvTxProcessing::TEvPlanStep from TX_MEDIATOR_TABLET_QUEUE_ACTOR to NKikimr::NMediatorTimeCastTest::NTestSuiteMediatorTimeCast::TTargetTablet ... unblocking NKikimr::TEvTxProcessing::TEvPlanStep from TX_MEDIATOR_TABLET_QUEUE_ACTOR to NKikimr::NMediatorTimeCastTest::NTestSuiteMediatorTimeCast::TTargetTablet ... blocking NKikimr::TEvTxProcessing::TEvPlanStepAck from TX_MEDIATOR_TABLET_QUEUE_ACTOR to TX_COORDINATOR_MEDIATORQ_ACTOR cookie 0 ... blocking NKikimr::TEvMediatorTimecast::TEvGranularUpdate from TX_MEDIATOR_TABLET_QUEUE_ACTOR to TX_MEDIATOR_TIMECAST_ACTOR cookie 0 ... blocking NKikimr::TEvTxProcessing::TEvPlanStepAck from TX_MEDIATOR_TABLET_QUEUE_ACTOR to TX_COORDINATOR_MEDIATORQ_ACTOR cookie 0 ... blocking NKikimr::TEvMediatorTimecast::TEvGranularUpdate from TX_MEDIATOR_TABLET_QUEUE_ACTOR to TX_MEDIATOR_TIMECAST_ACTOR cookie 0 2025-06-25T15:22:07.448725Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:625: Actor# [2:25:2072] HANDLE {TEvUpdate Mediator# 72057594046382081 Bucket# 0 TimeBarrier# 3000} ... tablet1 at 3000 ... tablet2 at 3500 ... tablet3 at 3500 ... fully unblocking tx3 ... unblocking NKikimr::TEvTxProcessing::TEvPlanStep from TX_MEDIATOR_TABLET_QUEUE_ACTOR to NKikimr::NMediatorTimeCastTest::NTestSuiteMediatorTimeCast::TTargetTablet ... blocking NKikimr::TEvTxProcessing::TEvPlanStepAck from TX_MEDIATOR_TABLET_QUEUE_ACTOR to TX_COORDINATOR_MEDIATORQ_ACTOR cookie 0 ... blocking NKikimr::TEvMediatorTimecast::TEvGranularUpdate from TX_MEDIATOR_TABLET_QUEUE_ACTOR to TX_MEDIATOR_TIMECAST_ACTOR cookie 0 2025-06-25T15:22:07.460791Z node 2 :TX_MEDIATOR_TIMECAST DEBUG: time_cast.cpp:625: Actor# [2:25:2072] HANDLE {TEvUpdate Mediator# 72057594046382081 Bucket# 0 TimeBarrier# 3500} ... tablet1 at 3500 ... tablet2 at 3500 ... tablet3 at 3500 |97.4%| [TM] {RESULT} ydb/core/tx/time_cast/ut/unittest |97.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/async_replication/py3test >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_index_3__SYNC-pk_types1-all_types1-index1---SYNC] [GOOD] >> TCreateAndDropViewTest::InvalidQuery [GOOD] >> TCreateAndDropViewTest::ParsingSecurityInvoker >> KqpTpch::Query07 [GOOD] >> KqpTpch::Query08 >> MediatorTest::MultipleSteps [GOOD] >> MediatorTest::WatchesBeforeFirstStep >> QuoterWithKesusTest::GetsBigQuota [GOOD] >> QuoterWithKesusTest::GetsBigQuotaWithDeadline >> DiscoveryIsNotBroken::HaveKafkaEndpointInDiscovery [GOOD] >> DiscoveryIsNotBroken::HaveKafkaSslEndpointInDiscovery |97.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/script_execution/py3test >> test_update_script_tables.py::TestUpdateScriptTablesYdb::test_recreate_tables[ALTER TABLE {} DROP COLUMN syntax-`.metadata/script_executions`] [GOOD] |97.4%| [TA] $(B)/ydb/tests/functional/script_execution/test-results/py3test/{meta.json ... results_accumulator.log} |97.4%| [TA] {RESULT} $(B)/ydb/tests/functional/script_execution/test-results/py3test/{meta.json ... results_accumulator.log} >> TRangeTreap::Simple [GOOD] >> TRangeTreap::Sequential >> Graph::LocalBackendFullCycle [GOOD] >> Graph::MemoryBordersOnGet >> MediatorTest::WatchesBeforeFirstStep [GOOD] >> TCreateAndDropViewTest::ParsingSecurityInvoker [GOOD] >> TCreateAndDropViewTest::ListCreatedView >> MediatorTest::RebootTargetTablets >> test_ctas.py::TestYtCtas::test_simple_ctast [GOOD] >> test_select.py::TestDML::test_select[table_index_3__SYNC-pk_types6-all_types6-index6---SYNC] [GOOD] >> DataShardFollowers::FollowerDuringDataPartSwitch [GOOD] >> DataShardFollowers::FollowerReadDuringSplit |97.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/fq/yt/kqp_yt_import/py3test >> test_ctas.py::TestYtCtas::test_simple_ctast [GOOD] |97.4%| [TM] {RESULT} ydb/tests/fq/yt/kqp_yt_import/py3test >> Graph::MemoryBordersOnGet [GOOD] >> Graph::LocalBordersOnGet >> TDataShardRSTest::TestCleanupInRS+UseSink >> Graph::LocalBordersOnGet [GOOD] >> QuoterWithKesusTest::GetsBigQuotaWithDeadline [GOOD] >> QuoterWithKesusTest::FailsToGetBigQuota >> MediatorTest::RebootTargetTablets [GOOD] ------- [TS] {asan, default-linux-x86_64, release} ydb/core/graph/ut/unittest >> Graph::LocalBordersOnGet [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T15:21:56.566802Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T15:21:56.567012Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T15:21:56.567056Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T15:21:56.567094Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T15:21:56.567779Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T15:21:56.567824Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T15:21:56.567926Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T15:21:56.568021Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T15:21:56.568804Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T15:21:56.570051Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T15:21:56.687120Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:21:56.696760Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:21:56.725571Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T15:21:56.726051Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T15:21:56.726296Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T15:21:56.740946Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T15:21:56.741397Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T15:21:56.744394Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T15:21:56.745068Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T15:21:56.755672Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T15:21:56.756833Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T15:21:56.763997Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T15:21:56.764092Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T15:21:56.764393Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T15:21:56.764469Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T15:21:56.764566Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T15:21:56.764666Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T15:21:56.781654Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T15:21:56.926572Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T15:21:56.927834Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:21:56.929083Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T15:21:56.929156Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T15:21:56.930427Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T15:21:56.930520Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:21:56.933699Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T15:21:56.934474Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T15:21:56.934749Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:21:56.934888Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T15:21:56.934934Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T15:21:56.934968Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T15:21:56.937077Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:21:56.937138Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T15:21:56.937184Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T15:21:56.939000Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:21:56.939044Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:21:56.939091Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T15:21:56.939145Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T15:21:56.945679Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T15:21:56.950892Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T15:21:56.952176Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T15:21:56.953329Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T15:21:56.953493Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T15:21:56.953555Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T15:21:56.955077Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T15:21:56.955165Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T15:21:56.955359Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T15:21:56.955460Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T15:21:56.960861Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T15:21:56.960921Z node 1 :FLAT_TX_SCHEMESHARD ... -25T15:22:20.848191Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:20: SHARD TTxStoreMetrics::Execute 2025-06-25T15:22:20.848222Z node 6 :GRAPH TRACE: backends.cpp:303: DB Stored metrics 2025-06-25T15:22:20.848288Z node 6 :GRAPH TRACE: shard_impl.cpp:192: SHARD Metrics { Name: "test.metric0" Value: 109 } Time: 109 2025-06-25T15:22:20.848322Z node 6 :GRAPH TRACE: shard_impl.cpp:197: SHARD Executing direct TxStoreMetrics 2025-06-25T15:22:20.848354Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:20: SHARD TTxStoreMetrics::Execute 2025-06-25T15:22:20.848386Z node 6 :GRAPH TRACE: backends.cpp:303: DB Stored metrics 2025-06-25T15:22:20.848472Z node 6 :GRAPH TRACE: shard_impl.cpp:192: SHARD Metrics { Name: "test.metric0" Value: 110 } Time: 110 2025-06-25T15:22:20.848497Z node 6 :GRAPH TRACE: shard_impl.cpp:197: SHARD Executing direct TxStoreMetrics 2025-06-25T15:22:20.848525Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:20: SHARD TTxStoreMetrics::Execute 2025-06-25T15:22:20.848565Z node 6 :GRAPH TRACE: backends.cpp:303: DB Stored metrics 2025-06-25T15:22:20.848652Z node 6 :GRAPH TRACE: shard_impl.cpp:192: SHARD Metrics { Name: "test.metric0" Value: 111 } Time: 111 2025-06-25T15:22:20.848671Z node 6 :GRAPH TRACE: shard_impl.cpp:197: SHARD Executing direct TxStoreMetrics 2025-06-25T15:22:20.848690Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:20: SHARD TTxStoreMetrics::Execute 2025-06-25T15:22:20.848711Z node 6 :GRAPH TRACE: backends.cpp:303: DB Stored metrics 2025-06-25T15:22:20.848778Z node 6 :GRAPH TRACE: shard_impl.cpp:192: SHARD Metrics { Name: "test.metric0" Value: 112 } Time: 112 2025-06-25T15:22:20.848800Z node 6 :GRAPH TRACE: shard_impl.cpp:197: SHARD Executing direct TxStoreMetrics 2025-06-25T15:22:20.848822Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:20: SHARD TTxStoreMetrics::Execute 2025-06-25T15:22:20.848844Z node 6 :GRAPH TRACE: backends.cpp:303: DB Stored metrics 2025-06-25T15:22:20.848894Z node 6 :GRAPH TRACE: shard_impl.cpp:192: SHARD Metrics { Name: "test.metric0" Value: 113 } Time: 113 2025-06-25T15:22:20.848913Z node 6 :GRAPH TRACE: shard_impl.cpp:197: SHARD Executing direct TxStoreMetrics 2025-06-25T15:22:20.848939Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:20: SHARD TTxStoreMetrics::Execute 2025-06-25T15:22:20.848970Z node 6 :GRAPH TRACE: backends.cpp:303: DB Stored metrics 2025-06-25T15:22:20.849049Z node 6 :GRAPH TRACE: shard_impl.cpp:192: SHARD Metrics { Name: "test.metric0" Value: 114 } Time: 114 2025-06-25T15:22:20.849074Z node 6 :GRAPH TRACE: shard_impl.cpp:197: SHARD Executing direct TxStoreMetrics 2025-06-25T15:22:20.849102Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:20: SHARD TTxStoreMetrics::Execute 2025-06-25T15:22:20.849149Z node 6 :GRAPH TRACE: backends.cpp:303: DB Stored metrics 2025-06-25T15:22:20.849248Z node 6 :GRAPH TRACE: shard_impl.cpp:192: SHARD Metrics { Name: "test.metric0" Value: 115 } Time: 115 2025-06-25T15:22:20.849275Z node 6 :GRAPH TRACE: shard_impl.cpp:197: SHARD Executing direct TxStoreMetrics 2025-06-25T15:22:20.849303Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:20: SHARD TTxStoreMetrics::Execute 2025-06-25T15:22:20.849558Z node 6 :GRAPH TRACE: backends.cpp:303: DB Stored metrics 2025-06-25T15:22:20.849657Z node 6 :GRAPH TRACE: shard_impl.cpp:192: SHARD Metrics { Name: "test.metric0" Value: 116 } Time: 116 2025-06-25T15:22:20.849675Z node 6 :GRAPH TRACE: shard_impl.cpp:197: SHARD Executing direct TxStoreMetrics 2025-06-25T15:22:20.849695Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:20: SHARD TTxStoreMetrics::Execute 2025-06-25T15:22:20.849718Z node 6 :GRAPH TRACE: backends.cpp:303: DB Stored metrics 2025-06-25T15:22:20.849765Z node 6 :GRAPH TRACE: shard_impl.cpp:192: SHARD Metrics { Name: "test.metric0" Value: 117 } Time: 117 2025-06-25T15:22:20.849785Z node 6 :GRAPH TRACE: shard_impl.cpp:197: SHARD Executing direct TxStoreMetrics 2025-06-25T15:22:20.849813Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:20: SHARD TTxStoreMetrics::Execute 2025-06-25T15:22:20.849844Z node 6 :GRAPH TRACE: backends.cpp:303: DB Stored metrics 2025-06-25T15:22:20.849916Z node 6 :GRAPH TRACE: shard_impl.cpp:192: SHARD Metrics { Name: "test.metric0" Value: 118 } Time: 118 2025-06-25T15:22:20.849938Z node 6 :GRAPH TRACE: shard_impl.cpp:197: SHARD Executing direct TxStoreMetrics 2025-06-25T15:22:20.849964Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:20: SHARD TTxStoreMetrics::Execute 2025-06-25T15:22:20.849995Z node 6 :GRAPH TRACE: backends.cpp:303: DB Stored metrics 2025-06-25T15:22:20.850068Z node 6 :GRAPH TRACE: shard_impl.cpp:192: SHARD Metrics { Name: "test.metric0" Value: 119 } Time: 119 2025-06-25T15:22:20.850086Z node 6 :GRAPH TRACE: shard_impl.cpp:197: SHARD Executing direct TxStoreMetrics 2025-06-25T15:22:20.850111Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:20: SHARD TTxStoreMetrics::Execute 2025-06-25T15:22:20.850143Z node 6 :GRAPH TRACE: backends.cpp:303: DB Stored metrics 2025-06-25T15:22:20.850205Z node 6 :GRAPH TRACE: shard_impl.cpp:226: SHARD Handle TEvGraph::TEvGetMetrics from [6:575:2502] 2025-06-25T15:22:20.850266Z node 6 :GRAPH DEBUG: tx_get_metrics.cpp:20: SHARD TTxGetMetrics::Execute 2025-06-25T15:22:20.850318Z node 6 :GRAPH DEBUG: backends.cpp:326: DB Querying from 0 to 119 2025-06-25T15:22:20.862919Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-25T15:22:20.863005Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-25T15:22:20.863032Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-25T15:22:20.863057Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-25T15:22:20.863082Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-25T15:22:20.863106Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-25T15:22:20.863147Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-25T15:22:20.863174Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-25T15:22:20.863197Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-25T15:22:20.863223Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-25T15:22:20.863252Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-25T15:22:20.863278Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-25T15:22:20.863302Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-25T15:22:20.863472Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-25T15:22:20.863512Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-25T15:22:20.863536Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-25T15:22:20.863565Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-25T15:22:20.863590Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-25T15:22:20.863615Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-25T15:22:20.863638Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-25T15:22:20.863661Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-25T15:22:20.863686Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-25T15:22:20.863708Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-25T15:22:20.863732Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-25T15:22:20.863756Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-25T15:22:20.863779Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-25T15:22:20.863803Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-25T15:22:20.863828Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-25T15:22:20.863852Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-25T15:22:20.863877Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-25T15:22:20.863901Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-25T15:22:20.863924Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-25T15:22:20.863947Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-25T15:22:20.863970Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-25T15:22:20.863992Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-25T15:22:20.864010Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-25T15:22:20.864026Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-25T15:22:20.864043Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-25T15:22:20.864059Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-25T15:22:20.864078Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-25T15:22:20.864095Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-25T15:22:20.864114Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-25T15:22:20.864140Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-25T15:22:20.864162Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-25T15:22:20.864185Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-25T15:22:20.864206Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-25T15:22:20.864228Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-25T15:22:20.864251Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-25T15:22:20.864276Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-25T15:22:20.864297Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-25T15:22:20.864336Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-25T15:22:20.864358Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-25T15:22:20.864379Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-25T15:22:20.864401Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-25T15:22:20.864418Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-25T15:22:20.864433Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-25T15:22:20.864449Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-25T15:22:20.864463Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-25T15:22:20.864489Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-25T15:22:20.864505Z node 6 :GRAPH DEBUG: tx_store_metrics.cpp:25: SHARD TTxStoreMetrics::Complete 2025-06-25T15:22:20.864528Z node 6 :GRAPH DEBUG: tx_get_metrics.cpp:25: SHARD TTxGetMetric::Complete 2025-06-25T15:22:20.864565Z node 6 :GRAPH TRACE: tx_get_metrics.cpp:26: SHARD TxGetMetrics returned 60 points for request 3 2025-06-25T15:22:20.864688Z node 6 :GRAPH TRACE: service_impl.cpp:199: SVC TEvMetricsResult 3 2025-06-25T15:22:20.864740Z node 6 :GRAPH TRACE: service_impl.cpp:202: SVC TEvMetricsResult found request 3 resending to [6:576:2503] |97.4%| [TS] {RESULT} ydb/core/graph/ut/unittest >> DataShardStats::OneChannelStatsCorrect >> MediatorTest::ResendSubset >> TCreateAndDropViewTest::ListCreatedView [GOOD] >> TCreateAndDropViewTest::CreateSameViewTwice >> KeyValueGRPCService::SimpleAcquireLock >> TIndexProcesorTests::TestCreateIndexProcessor >> ReadUpdateWrite::Load >> DiscoveryIsNotBroken::HaveKafkaSslEndpointInDiscovery [GOOD] >> Functions::CreateRequest [GOOD] >> Functions::CreateResponse [GOOD] >> KafkaProtocol::ProduceScenario >> KqpTpch::Query08 [GOOD] >> KqpTpch::Query09 >> DataShardFollowers::FollowerReadDuringSplit [GOOD] >> TIndexProcesorTests::TestCreateIndexProcessor [GOOD] >> TIndexProcesorTests::TestSingleCreateQueueEvent >> MediatorTest::ResendSubset [GOOD] >> MediatorTest::ResendNotSubset ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_followers/unittest >> DataShardFollowers::FollowerReadDuringSplit [GOOD] Test command err: 2025-06-25T15:21:30.850984Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T15:21:30.851176Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T15:21:30.851232Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00099c/r3tmp/tmp89bIA2/pdisk_1.dat 2025-06-25T15:21:31.380581Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T15:21:31.390085Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:21:31.445292Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:21:31.463905Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750864887897016 != 1750864887897020 2025-06-25T15:21:31.510933Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:61:2108] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-25T15:21:31.512009Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 2025-06-25T15:21:31.513643Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:21:31.513785Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:21:31.533387Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:21:31.625916Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:61:2108] Handle TEvProposeTransaction 2025-06-25T15:21:31.625996Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:61:2108] TxId# 281474976715657 ProcessProposeTransaction 2025-06-25T15:21:31.627011Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:61:2108] Cookie# 0 userReqId# "" txid# 281474976715657 SEND to# [1:602:2510] 2025-06-25T15:21:31.778198Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:602:2510] txid# 281474976715657 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateTable CreateTable { Name: "table-1" Columns { Name: "key" Type: "Uint32" FamilyName: "" NotNull: false } Columns { Name: "value" Type: "Uint32" FamilyName: "" NotNull: false } KeyColumnNames: "key" UniformPartitionsCount: 1 PartitionConfig { FollowerGroups { FollowerCount: 1 AllowLeaderPromotion: false } } } } } ExecTimeoutPeriod: 18446744073709551615 2025-06-25T15:21:31.778319Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:602:2510] txid# 281474976715657 Bootstrap, UserSID: CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-25T15:21:31.779030Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1660: Actor# [1:602:2510] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-06-25T15:21:31.779120Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:602:2510] txid# 281474976715657 TEvNavigateKeySet requested from SchemeCache 2025-06-25T15:21:31.779502Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:602:2510] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-25T15:21:31.779687Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:602:2510] HANDLE EvNavigateKeySetResult, txid# 281474976715657 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-25T15:21:31.779815Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:602:2510] txid# 281474976715657 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715657 TabletId# 72057594046644480} 2025-06-25T15:21:31.780094Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:602:2510] txid# 281474976715657 HANDLE EvClientConnected 2025-06-25T15:21:31.783814Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:21:31.785045Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [1:602:2510] txid# 281474976715657 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715657} 2025-06-25T15:21:31.785138Z node 1 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [1:602:2510] txid# 281474976715657 SEND to# [1:554:2480] Source {TEvProposeTransactionStatus txid# 281474976715657 Status# 53} 2025-06-25T15:21:31.831392Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvBoot 2025-06-25T15:21:31.832856Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvRestored 2025-06-25T15:21:31.833366Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:627:2531] 2025-06-25T15:21:31.833721Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T15:21:31.885274Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-25T15:21:31.886052Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T15:21:31.886155Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T15:21:31.888420Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-25T15:21:31.888511Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-25T15:21:31.888560Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-25T15:21:31.896650Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T15:21:31.896868Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T15:21:31.897022Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:643:2531] in generation 1 2025-06-25T15:21:31.907859Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T15:21:31.930804Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-25T15:21:31.932197Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T15:21:31.932364Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:645:2541] 2025-06-25T15:21:31.932403Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T15:21:31.932436Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-25T15:21:31.932465Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T15:21:31.932706Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:627:2531], Recipient [1:627:2531]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T15:21:31.933456Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T15:21:31.934617Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-25T15:21:31.934713Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-25T15:21:31.934791Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T15:21:31.934824Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:21:31.935508Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-25T15:21:31.935558Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-25T15:21:31.935599Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-25T15:21:31.935647Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-25T15:21:31.935718Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T15:21:31.937365Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:634:2535], Recipient [1:627:2531]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T15:21:31.937407Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T15:21:31.937465Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:623:2528], serverId# [1:634:2535], sessionId# [0:0:0] 2025-06-25T15:21:31.937548Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:373:2367], Recipient [1:634:2535] 2025-06-25T15:21:31.937582Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-25T15:21:31.937701Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T15:21:31.939173Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-25T15:21:31.939242Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-25T15:21:31.939355Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-25T15:21:31.939514Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status f ... eKqpTransaction 2025-06-25T15:22:25.374902Z node 8 :TX_PROXY DEBUG: proxy_impl.cpp:342: actor# [8:61:2108] TxId# 281474976715665 ProcessProposeKqpTransaction 2025-06-25T15:22:25.376833Z node 8 :TX_DATASHARD TRACE: datashard_impl.h:3269: StateWorkAsFollower, received event# 269877761, Sender [8:1064:2828], Recipient [8:1037:2810]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T15:22:25.376913Z node 8 :TX_DATASHARD TRACE: datashard_impl.h:3277: StateWorkAsFollower, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T15:22:25.376981Z node 8 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at follower 1 tablet# 72075186224037890, clientId# [8:1062:2827], serverId# [8:1064:2828], sessionId# [0:0:0] 2025-06-25T15:22:25.377151Z node 8 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715665. Ctx: { TraceId: 01jyktxr0r3dvgvqc08k2kgs45, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=8&id=YTY0ZGU0MzAtZWU5ZGRkNmEtNDVlOWE2MTctNGI4ZWJiYjg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:22:25.379099Z node 8 :TX_DATASHARD TRACE: datashard_impl.h:3269: StateWorkAsFollower, received event# 269553215, Sender [8:1068:2829], Recipient [8:1037:2810]: NKikimrTxDataShard.TEvRead ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 ResultFormat: FORMAT_CELLVEC MaxRows: 1001 MaxBytes: 5242880 MaxRowsInResult: 1 Reverse: false TotalRowsLimit: 1001 RangesSize: 1 2025-06-25T15:22:25.379153Z node 8 :TX_DATASHARD TRACE: datashard_impl.h:3279: StateWorkAsFollower, processing event TEvDataShard::TEvRead 2025-06-25T15:22:25.379277Z node 8 :TABLET_EXECUTOR DEBUG: Follower{72075186224037890:1:7} Tx{1, NKikimr::NDataShard::TDataShard::TTxReadViaPipeline} queued, type NKikimr::NDataShard::TDataShard::TTxReadViaPipeline 2025-06-25T15:22:25.379355Z node 8 :TABLET_EXECUTOR DEBUG: Follower{72075186224037890:1:7} Tx{1, NKikimr::NDataShard::TDataShard::TTxReadViaPipeline} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-25T15:22:25.379491Z node 8 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2452: TTxReadViaPipeline execute: at tablet# 72075186224037890, FollowerId 1 2025-06-25T15:22:25.379590Z node 8 :TX_DATASHARD DEBUG: datashard__init.cpp:806: Updating sys metadata on follower, tabletId 72075186224037890 prev TChangeCounter{serial=0, epoch=0} current TChangeCounter{serial=6, epoch=1} 2025-06-25T15:22:25.380259Z node 8 :TX_DATASHARD DEBUG: datashard__init.cpp:823: Updating tables metadata on follower, tabletId 72075186224037890 prev TChangeCounter{serial=0, epoch=0} current TChangeCounter{serial=4, epoch=1} 2025-06-25T15:22:25.381312Z node 8 :TX_DATASHARD DEBUG: datashard__init.cpp:894: Updating snapshots metadata on follower, tabletId 72075186224037890 prev TChangeCounter{serial=0, epoch=0} current TChangeCounter{serial=0, epoch=1} 2025-06-25T15:22:25.381425Z node 8 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2555: 72075186224037890 changed HEAD read to repeatable v1500/18446744073709551615 2025-06-25T15:22:25.381515Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 72075186224037890 on unit CheckRead 2025-06-25T15:22:25.381627Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 72075186224037890 is Executed 2025-06-25T15:22:25.381697Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 72075186224037890 executing on unit CheckRead 2025-06-25T15:22:25.381746Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 72075186224037890 to execution unit BuildAndWaitDependencies 2025-06-25T15:22:25.381799Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 72075186224037890 on unit BuildAndWaitDependencies 2025-06-25T15:22:25.381838Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:1] at 72075186224037890 2025-06-25T15:22:25.381893Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 72075186224037890 is Executed 2025-06-25T15:22:25.381917Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 72075186224037890 executing on unit BuildAndWaitDependencies 2025-06-25T15:22:25.381938Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 72075186224037890 to execution unit ExecuteRead 2025-06-25T15:22:25.381960Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 72075186224037890 on unit ExecuteRead 2025-06-25T15:22:25.382113Z node 8 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1578: 72075186224037890 Execute read# 1, request: { ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 ResultFormat: FORMAT_CELLVEC MaxRows: 1001 MaxBytes: 5242880 MaxRowsInResult: 1 Reverse: false TotalRowsLimit: 1001 } 2025-06-25T15:22:25.382371Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 72075186224037890 is Restart 2025-06-25T15:22:25.382406Z node 8 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2687: TTxReadViaPipeline(69) Execute with status# Restart at tablet# 72075186224037890 2025-06-25T15:22:25.382490Z node 8 :TABLET_EXECUTOR DEBUG: Follower{72075186224037890:1:7} Tx{1, NKikimr::NDataShard::TDataShard::TTxReadViaPipeline} hope 1 -> retry Change{7, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 2025-06-25T15:22:25.382575Z node 8 :TABLET_EXECUTOR DEBUG: Follower{72075186224037890:1:7} Tx{1, NKikimr::NDataShard::TDataShard::TTxReadViaPipeline} touch new 0b, 65b lo load (65b in total), 0b requested for data (4194304b in total) 2025-06-25T15:22:25.382638Z node 8 :TABLET_EXECUTOR DEBUG: Follower{72075186224037890:1:7} Tx{1, NKikimr::NDataShard::TDataShard::TTxReadViaPipeline} took 8388608b of static mem, Memory{8388608 dyn 0} 2025-06-25T15:22:25.382719Z node 8 :TABLET_EXECUTOR DEBUG: Follower{72075186224037890:1:7} requests PageCollection [72075186224037888:1:23:1:12288:190:0] 65 bytes, 1 pages: [0 4] 2025-06-25T15:22:25.382803Z node 8 :TABLET_EXECUTOR DEBUG: Follower{72075186224037890:1:7} Tx{1, NKikimr::NDataShard::TDataShard::TTxReadViaPipeline} postponed, 65b, pages {1 wait, 1 load}, freshly touched 1 pages 2025-06-25T15:22:25.383089Z node 8 :TABLET_EXECUTOR DEBUG: Follower{72075186224037890:1:7} got result TEvResult{1 pages [72075186224037888:1:23:1:12288:190:0] ok OK}, category 1 2025-06-25T15:22:25.383235Z node 8 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2452: TTxReadViaPipeline execute: at tablet# 72075186224037890, FollowerId 1 2025-06-25T15:22:25.383274Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 72075186224037890 on unit ExecuteRead 2025-06-25T15:22:25.383363Z node 8 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1578: 72075186224037890 Execute read# 2, request: { ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 ResultFormat: FORMAT_CELLVEC MaxRows: 1001 MaxBytes: 5242880 MaxRowsInResult: 1 Reverse: false TotalRowsLimit: 1001 } 2025-06-25T15:22:25.383560Z node 8 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2163: 72075186224037890 Complete read# {[8:1068:2829], 0} after executionsCount# 2 2025-06-25T15:22:25.383638Z node 8 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2137: 72075186224037890 read iterator# {[8:1068:2829], 0} sends rowCount# 1, bytes# 32, quota rows left# 1000, quota bytes left# 5242848, hasUnreadQueries# 1, total queries# 1, firstUnprocessed# 0 2025-06-25T15:22:25.383756Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 72075186224037890 is Executed 2025-06-25T15:22:25.383784Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 72075186224037890 executing on unit ExecuteRead 2025-06-25T15:22:25.383810Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 72075186224037890 to execution unit CompletedOperations 2025-06-25T15:22:25.383838Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 72075186224037890 on unit CompletedOperations 2025-06-25T15:22:25.383878Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 72075186224037890 is Executed 2025-06-25T15:22:25.383900Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 72075186224037890 executing on unit CompletedOperations 2025-06-25T15:22:25.383927Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:1] at 72075186224037890 has finished 2025-06-25T15:22:25.383970Z node 8 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2687: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037890 2025-06-25T15:22:25.384054Z node 8 :TABLET_EXECUTOR DEBUG: Follower{72075186224037890:1:7} Tx{1, NKikimr::NDataShard::TDataShard::TTxReadViaPipeline} hope 2 -> done Change{7, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 2025-06-25T15:22:25.384135Z node 8 :TABLET_EXECUTOR DEBUG: Follower{72075186224037890:1:7} Tx{1, NKikimr::NDataShard::TDataShard::TTxReadViaPipeline} release 8388608b of static, Memory{0 dyn 0} 2025-06-25T15:22:25.384196Z node 8 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2736: TTxReadViaPipeline(69) Complete: at tablet# 72075186224037890 2025-06-25T15:22:25.384432Z node 8 :TX_DATASHARD TRACE: datashard_impl.h:3269: StateWorkAsFollower, received event# 269553217, Sender [8:1037:2810], Recipient [8:1037:2810]: NKikimr::TEvDataShard::TEvReadContinue 2025-06-25T15:22:25.384481Z node 8 :TX_DATASHARD TRACE: datashard_impl.h:3280: StateWorkAsFollower, processing event TEvDataShard::TEvReadContinue 2025-06-25T15:22:25.384577Z node 8 :TABLET_EXECUTOR DEBUG: Follower{72075186224037890:1:7} Tx{2, NKikimr::NDataShard::TDataShard::TTxReadContinue} queued, type NKikimr::NDataShard::TDataShard::TTxReadContinue 2025-06-25T15:22:25.384645Z node 8 :TABLET_EXECUTOR DEBUG: Follower{72075186224037890:1:7} Tx{2, NKikimr::NDataShard::TDataShard::TTxReadContinue} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-25T15:22:25.384722Z node 8 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2836: 72075186224037890 ReadContinue for iterator# {[8:1068:2829], 0}, firstUnprocessedQuery# 0 2025-06-25T15:22:25.384805Z node 8 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2929: 72075186224037890 ReadContinue: iterator# {[8:1068:2829], 0}, FirstUnprocessedQuery# 0 2025-06-25T15:22:25.384925Z node 8 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3079: 72075186224037890 readContinue iterator# {[8:1068:2829], 0} sends rowCount# 0, bytes# 0, quota rows left# 1000, quota bytes left# 5242848, hasUnreadQueries# 0, total queries# 1, firstUnprocessed# 0 2025-06-25T15:22:25.385003Z node 8 :TX_DATASHARD DEBUG: datashard__read_iterator.cpp:3103: 72075186224037890 read iterator# {[8:1068:2829], 0} finished in ReadContinue 2025-06-25T15:22:25.385138Z node 8 :TABLET_EXECUTOR DEBUG: Follower{72075186224037890:1:7} Tx{2, NKikimr::NDataShard::TDataShard::TTxReadContinue} hope 1 -> done Change{7, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 2025-06-25T15:22:25.385214Z node 8 :TABLET_EXECUTOR DEBUG: Follower{72075186224037890:1:7} Tx{2, NKikimr::NDataShard::TDataShard::TTxReadContinue} release 4194304b of static, Memory{0 dyn 0} 2025-06-25T15:22:25.386111Z node 8 :TX_DATASHARD TRACE: datashard_impl.h:3269: StateWorkAsFollower, received event# 269553219, Sender [8:1068:2829], Recipient [8:1037:2810]: NKikimrTxDataShard.TEvReadCancel ReadId: 0 2025-06-25T15:22:25.386164Z node 8 :TX_DATASHARD TRACE: datashard_impl.h:3282: StateWorkAsFollower, processing event TEvDataShard::TEvReadCancel 2025-06-25T15:22:25.386238Z node 8 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3409: 72075186224037890 ReadCancel: { ReadId: 0 } { items { uint32_value: 3 } items { uint32_value: 33 } } |97.4%| [TM] {RESULT} ydb/core/tx/datashard/ut_followers/unittest >> TCreateAndDropViewTest::CreateSameViewTwice [GOOD] >> TCreateAndDropViewTest::CreateViewOccupiedName >> KeyValueGRPCService::SimpleAcquireLock [GOOD] >> KeyValueGRPCService::SimpleExecuteTransaction >> QuoterWithKesusTest::FailsToGetBigQuota [GOOD] >> QuoterWithKesusTest::PrefetchCoefficient >> KafkaProtocol::ProduceScenario [GOOD] >> KafkaProtocol::IdempotentProducerScenario >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_0_UNIQUE_SYNC-pk_types4-all_types4-index4--UNIQUE-SYNC] [GOOD] >> TRangeTreap::Sequential [GOOD] >> TRangeTreap::Random >> TRangeTreap::Random [GOOD] >> TIndexProcesorTests::TestSingleCreateQueueEvent [GOOD] >> TIndexProcesorTests::TestReindexSingleQueue >> TDqSolomonWriteActorTest::TestWriteFormat |97.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/select/py3test >> test_select.py::TestDML::test_select[table_index_3__SYNC-pk_types6-all_types6-index6---SYNC] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/locks/ut_range_treap/unittest >> TRangeTreap::Random [GOOD] Test command err: NOTE: building treap of size 1000000 got height 50 and needed 1000000 ops (1000000 inserts 0 updates 0 deletes) and 32527827 comparisons (32.527827 per op) NOTE: building treap of size 8781 got height 29 and needed 10975 ops (9756 inserts 244 updates 975 deletes) and 221900 comparisons (20.21867882 per op) Checking point 443 ... found 404 ranges, needed 934 comparisons (2.311881188 per range) Checking point 9568 ... found 2223 ranges, needed 7987 comparisons (3.592892488 per range) Checking point 3651 ... found 2099 ranges, needed 6776 comparisons (3.228203907 per range) Checking point 365 ... found 347 ranges, needed 792 comparisons (2.282420749 per range) Checking point 805 ... found 702 ranges, needed 1660 comparisons (2.364672365 per range) Checking point 5065 ... found 2267 ranges, needed 8393 comparisons (3.702249669 per range) Checking point 6313 ... found 2260 ranges, needed 8281 comparisons (3.664159292 per range) Checking point 8038 ... found 2230 ranges, needed 8306 comparisons (3.724663677 per range) Checking point 707 ... found 634 ranges, needed 1493 comparisons (2.35488959 per range) Checking point 7980 ... found 2240 ranges, needed 8352 comparisons (3.728571429 per range) |97.5%| [TM] {RESULT} ydb/core/tx/locks/ut_range_treap/unittest >> MediatorTest::ResendNotSubset [GOOD] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_3_UNIQUE_SYNC-pk_types1-all_types1-index1--UNIQUE-SYNC] [GOOD] >> TIndexProcesorTests::TestReindexSingleQueue [GOOD] >> TIndexProcesorTests::TestDeletedQueueNotReindexed >> MediatorTest::OneCoordinatorResendTxNotLost >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_4__SYNC-pk_types5-all_types5-index5---SYNC] [GOOD] >> KeyValueGRPCService::SimpleExecuteTransaction [GOOD] >> KeyValueGRPCService::SimpleExecuteTransactionWithWrongGeneration >> TCreateAndDropViewTest::CreateViewOccupiedName [GOOD] >> TCreateAndDropViewTest::CreateViewIfNotExists >> KqpTpch::Query09 [GOOD] >> KqpTpch::Query10 >> TDqPqRdReadActorTests::TestReadFromTopic2 >> TDqPqRdReadActorTests::TestReadFromTopic2 [GOOD] >> TDqPqRdReadActorTests::IgnoreUndeliveredWithWrongGeneration >> TDqPqRdReadActorTests::IgnoreUndeliveredWithWrongGeneration [GOOD] >> TIndexProcesorTests::TestDeletedQueueNotReindexed [GOOD] >> TIndexProcesorTests::TestManyMessages >> TDqPqRdReadActorTests::SessionError >> MediatorTest::OneCoordinatorResendTxNotLost [GOOD] >> TDqPqRdReadActorTests::SessionError [GOOD] >> KeyValueGRPCService::SimpleExecuteTransactionWithWrongGeneration [GOOD] >> KeyValueGRPCService::SimpleRenameUnexistedKey >> TDqPqRdReadActorTests::ReadWithFreeSpace >> QuoterWithKesusTest::PrefetchCoefficient [FAIL] >> QuoterWithKesusTest::GetsQuotaAfterPause >> TDqPqRdReadActorTests::ReadWithFreeSpace [GOOD] >> DataShardStats::OneChannelStatsCorrect [GOOD] >> DataShardStats::MultipleChannelsStatsCorrect >> TDqPqRdReadActorTests::TestSaveLoadPqRdRead >> KafkaProtocol::IdempotentProducerScenario [GOOD] >> KafkaProtocol::FetchScenario >> TSentinelBaseTests::PDiskInitialStatus [GOOD] >> TSentinelBaseTests::PDiskErrorState [GOOD] >> TSentinelBaseTests::PDiskInactiveAfterStateChange [GOOD] >> TSentinelBaseTests::PDiskFaultyState [GOOD] >> TSentinelBaseTests::PDiskStateChangeNormalFlow [GOOD] >> TSentinelBaseTests::PDiskStateChangeNodePermanentlyBad [GOOD] >> TSentinelBaseTests::PDiskStateChangeNodeNotExpectedRestart [GOOD] >> TSentinelBaseTests::PDiskStateChangeNodeExpectedRestart [GOOD] >> TSentinelBaseTests::GuardianDataCenterRatio [GOOD] >> TSentinelBaseTests::GuardianFaultyPDisks >> test_unknown_data_source.py::TestUnknownDataSource::test_should_fail_unknown_data_source[v2-client0] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/mediator/ut/unittest >> MediatorTest::OneCoordinatorResendTxNotLost [GOOD] Test command err: 2025-06-25T15:21:41.705858Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T15:21:41.706004Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T15:21:41.706059Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000958/r3tmp/tmpcs15zh/pdisk_1.dat 2025-06-25T15:21:42.269197Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T15:21:42.274382Z node 1 :TX_MEDIATOR INFO: mediator__schema.cpp:23: tablet# 72057594047365120 TTxSchema Complete 2025-06-25T15:21:42.274950Z node 1 :TX_MEDIATOR INFO: mediator__init.cpp:88: tablet# 72057594047365120 CreateTxInit wait TEvMediatorConfiguration for switching to StateWork from external 2025-06-25T15:21:42.275520Z node 1 :TX_MEDIATOR DEBUG: mediator_impl.cpp:316: tablet# 72057594047365120 server# [1:578:2494] connected 2025-06-25T15:21:42.275645Z node 1 :TX_MEDIATOR NOTICE: mediator_impl.cpp:133: tablet# 72057594047365120 actor# [1:561:2484] HANDLE TEvMediatorConfiguration Version# 1 2025-06-25T15:21:42.276063Z node 1 :TX_MEDIATOR DEBUG: mediator__configure.cpp:77: tablet# 72057594047365120 version# 1 TTxConfigure Complete 2025-06-25T15:21:42.276251Z node 1 :TX_MEDIATOR INFO: mediator__init.cpp:64: tablet# 72057594047365120 CreateTxInit Complete ... waiting for watcher to connect 2025-06-25T15:21:42.276758Z node 1 :TX_MEDIATOR DEBUG: mediator_impl.cpp:316: tablet# 72057594047365120 server# [1:584:2499] connected 2025-06-25T15:21:42.276850Z node 1 :TX_MEDIATOR DEBUG: mediator_impl.cpp:308: tablet# 72057594047365120 FORWARD Watch from# [1:582:2498] to# [1:580:2496] ExecQueue 2025-06-25T15:21:42.276973Z node 1 :TX_MEDIATOR_EXEC_QUEUE DEBUG: execute_queue.cpp:175: Actor# [1:580:2496] MediatorId# 72057594047365120 HANDLE TEvGranularWatch from# [1:582:2498] bucket# 0 ... waiting for watcher to connect (done) 2025-06-25T15:21:42.280054Z node 1 :TX_MEDIATOR DEBUG: mediator_impl.cpp:308: tablet# 72057594047365120 FORWARD Watch from# [1:582:2498] to# [1:580:2496] ExecQueue 2025-06-25T15:21:42.280115Z node 1 :TX_MEDIATOR_EXEC_QUEUE DEBUG: execute_queue.cpp:159: Actor# [1:580:2496] MediatorId# 72057594047365120 HANDLE TEvWatch 2025-06-25T15:21:42.280153Z node 1 :TX_MEDIATOR_EXEC_QUEUE DEBUG: execute_queue.cpp:164: Actor# [1:580:2496] MediatorId# 72057594047365120 SEND TEvWatchBucket to# [1:581:2497] bucket.ActiveActor 2025-06-25T15:21:42.280211Z node 1 :TX_MEDIATOR_TABLETQUEUE DEBUG: tablet_queue.cpp:380: Actor# [1:581:2497] Mediator# 72057594047365120 HANDLE {TEvWatchBucket Source# [1:582:2498]} 2025-06-25T15:21:42.280296Z node 1 :TX_MEDIATOR_TABLETQUEUE DEBUG: tablet_queue.cpp:391: Actor# [1:581:2497] Mediator# 72057594047365120 SEND to# [1:582:2498] {TEvUpdate Mediator# 72057594047365120 Bucket# 0 TimeBarrier# 0} 2025-06-25T15:21:42.292979Z node 1 :TX_MEDIATOR DEBUG: mediator_impl.cpp:316: tablet# 72057594047365120 server# [1:588:2503] connected 2025-06-25T15:21:42.293117Z node 1 :TX_MEDIATOR DEBUG: mediator_impl.cpp:139: tablet# 72057594047365120 HANDLE EvCoordinatorSync 2025-06-25T15:21:42.293187Z node 1 :TX_MEDIATOR DEBUG: mediator_impl.cpp:83: tablet# 72057594047365120 SEND EvCoordinatorSyncResult to# [1:586:2501] Cookie# 1 CompleteStep# 0 LatestKnownStep# 0 SubjectiveTime# 3 Coordinator# 72057594046316545 2025-06-25T15:21:42.293571Z node 1 :TX_MEDIATOR_EXEC_QUEUE DEBUG: execute_queue.cpp:72: Actor# [1:580:2496] MediatorId# 72057594047365120 HANDLE TEvCommitStep {TMediateStep From 0 To# 1000Steps: {{TCoordinatorStep step# 1000 PrevStep# 0}}} marker# M1 2025-06-25T15:21:42.293637Z node 1 :TX_MEDIATOR_EXEC_QUEUE DEBUG: execute_queue.cpp:119: Actor# [1:580:2496] MediatorId# 72057594047365120 SEND TEvStepPlanComplete to# [1:581:2497] bucket.ActiveActor step# 1000 2025-06-25T15:21:42.293714Z node 1 :TX_MEDIATOR_TABLETQUEUE DEBUG: tablet_queue.cpp:319: Actor# [1:581:2497] Mediator# 72057594047365120 HANDLE {TEvStepPlanComplete step# 1000} 2025-06-25T15:21:42.293932Z node 1 :TX_MEDIATOR_TABLETQUEUE DEBUG: tablet_queue.cpp:171: Actor# [1:581:2497] Mediator# 72057594047365120 SEND to# [1:582:2498] {TEvUpdate Mediator# 72057594047365120 Bucket# 0 TimeBarrier# 1000} ... waiting for blocked plan step 2025-06-25T15:21:42.314801Z node 1 :TX_MEDIATOR DEBUG: mediator_impl.cpp:280: tablet# 72057594047365120 HANDLE EvCoordinatorStep coordinator# 72057594046316545 step# 1010 2025-06-25T15:21:42.314867Z node 1 :TX_MEDIATOR INFO: mediator_impl.cpp:287: Coordinator step: Mediator [72057594047365120], Coordinator [72057594046316545], step# [1010] transactions [1] 2025-06-25T15:21:42.314967Z node 1 :TX_MEDIATOR DEBUG: mediator_impl.cpp:205: tablet# 72057594047365120 SEND EvCommitStep to# [1:580:2496] ExecQueue {TMediateStep From 1000 To# 1010Steps: {{TCoordinatorStep step# 1010 PrevStep# 0Transactions: {{TTx Moderator# 0 txid# 1 AckTo# [1:586:2501]}}TabletsToTransaction: {{tablet# 72057594047365121 txid# 1}}}}} marker# M0 2025-06-25T15:21:42.315102Z node 1 :TX_MEDIATOR_EXEC_QUEUE DEBUG: execute_queue.cpp:72: Actor# [1:580:2496] MediatorId# 72057594047365120 HANDLE TEvCommitStep {TMediateStep From 1000 To# 1010Steps: {{TCoordinatorStep step# 1010 PrevStep# 0Transactions: {{TTx Moderator# 0 txid# 1 AckTo# [1:586:2501]}}TabletsToTransaction: {{tablet# 72057594047365121 txid# 1}}}}} marker# M1 2025-06-25T15:21:42.315147Z node 1 :TX_MEDIATOR_PRIVATE DEBUG: execute_queue.cpp:44: Mediator exec queue [72057594047365120], step# 1010 for tablet [72057594047365121]. TxIds: txid# 1 marker# M2 2025-06-25T15:21:42.315194Z node 1 :TX_MEDIATOR_EXEC_QUEUE DEBUG: execute_queue.cpp:54: Actor# [1:580:2496] MediatorId# 72057594047365120 SEND Ev to# [1:581:2497] step# 1010 forTablet# 72057594047365121 txid# 1 marker# M3 2025-06-25T15:21:42.315239Z node 1 :TX_MEDIATOR_EXEC_QUEUE DEBUG: execute_queue.cpp:119: Actor# [1:580:2496] MediatorId# 72057594047365120 SEND TEvStepPlanComplete to# [1:581:2497] bucket.ActiveActor step# 1010 2025-06-25T15:21:42.315316Z node 1 :TX_MEDIATOR_TABLETQUEUE DEBUG: tablet_queue.cpp:183: Actor# [1:581:2497] Mediator# 72057594047365120 HANDLE {TEvCommitTabletStep step# 1010 TabletId# 72057594047365121 Transactions {{TTx Moderator# 0 txid# 1 AckTo# [1:586:2501]}}} marker# M4 2025-06-25T15:21:42.315497Z node 1 :TX_MEDIATOR_TABLETQUEUE DEBUG: tablet_queue.cpp:319: Actor# [1:581:2497] Mediator# 72057594047365120 HANDLE {TEvStepPlanComplete step# 1010} 2025-06-25T15:21:42.316650Z node 1 :TX_MEDIATOR_TABLETQUEUE DEBUG: tablet_queue.cpp:248: Actor# [1:581:2497] Mediator# 72057594047365120 HANDLE NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594047365121 Status: OK ServerId: [1:610:2515] Leader: 1 Dead: 0 Generation: 2 VersionInfo: } 2025-06-25T15:21:42.316742Z node 1 :TX_MEDIATOR_PRIVATE DEBUG: tablet_queue.cpp:117: Send from 72057594047365120 to tablet 72057594047365121, step# 1010, txid# 1, marker M5lu 2025-06-25T15:21:42.316789Z node 1 :TX_MEDIATOR_TABLETQUEUE DEBUG: tablet_queue.cpp:120: Actor# [1:581:2497] Mediator# 72057594047365120 SEND to# 72057594047365121 {TEvPlanStep step# 1010 MediatorId# 72057594047365120 TabletID 72057594047365121} ... blocking NKikimr::TEvTxProcessing::TEvPlanStep from TX_MEDIATOR_TABLET_QUEUE_ACTOR to NKikimr::NTxMediator::NTestSuiteMediatorTest::TTargetTablet cookie 0 ... waiting for blocked plan step (done) ... waiting for no pending commands 2025-06-25T15:21:42.317288Z node 1 :TX_MEDIATOR DEBUG: mediator_impl.cpp:308: tablet# 72057594047365120 FORWARD Watch from# [1:582:2498] to# [1:580:2496] ExecQueue 2025-06-25T15:21:42.317357Z node 1 :TX_MEDIATOR_EXEC_QUEUE DEBUG: execute_queue.cpp:189: Actor# [1:580:2496] MediatorId# 72057594047365120 HANDLE TEvGranularWatchModify from# [1:582:2498] bucket# 0 ... waiting for no pending commands (done) ... unblocking NKikimr::TEvTxProcessing::TEvPlanStep from TX_MEDIATOR_TABLET_QUEUE_ACTOR to NKikimr::NTxMediator::NTestSuiteMediatorTest::TTargetTablet ... waiting for watch updates 2025-06-25T15:21:42.317625Z node 1 :TX_MEDIATOR_TABLETQUEUE DEBUG: tablet_queue.cpp:342: Actor# [1:581:2497] Mediator# 72057594047365120 HANDLE {TEvPlanStepAccepted TabletId# 72057594047365121 step# 1010} 2025-06-25T15:21:42.317702Z node 1 :TX_MEDIATOR_TABLETQUEUE DEBUG: tablet_queue.cpp:415: Actor# [1:581:2497] Mediator# 72057594047365120 SEND to# [1:586:2501] {TEvPlanStepAck TabletId# 72057594047365121 step# 1010 txid# 1} 2025-06-25T15:21:42.317801Z node 1 :TX_MEDIATOR_TABLETQUEUE DEBUG: tablet_queue.cpp:171: Actor# [1:581:2497] Mediator# 72057594047365120 SEND to# [1:582:2498] {TEvUpdate Mediator# 72057594047365120 Bucket# 0 TimeBarrier# 1010} ... waiting for watch updates (done) 2025-06-25T15:21:45.766083Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:265:2309], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T15:21:45.766244Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T15:21:45.766371Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000958/r3tmp/tmprma4qq/pdisk_1.dat 2025-06-25T15:21:46.018409Z node 2 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 2 Type# 268639257 2025-06-25T15:21:46.022648Z node 2 :TX_MEDIATOR INFO: mediator__schema.cpp:23: tablet# 72057594047365120 TTxSchema Complete 2025-06-25T15:21:46.023271Z node 2 :TX_MEDIATOR INFO: mediator__init.cpp:88: tablet# 72057594047365120 CreateTxInit wait TEvMediatorConfiguration for switching to StateWork from external 2025-06-25T15:21:46.023949Z node 2 :TX_MEDIATOR DEBUG: mediator_impl.cpp:316: tablet# 72057594047365120 server# [2:578:2494] connected 2025-06-25T15:21:46.024053Z node 2 :TX_MEDIATOR NOTICE: mediator_impl.cpp:133: tablet# 72057594047365120 actor# [2:561:2484] HANDLE TEvMediatorConfiguration Version# 1 2025-06-25T15:21:46.024499Z node 2 :TX_MEDIATOR DEBUG: mediator__configure.cpp:77: tablet# 72057594047365120 version# 1 TTxConfigure Complete 2025-06-25T15:21:46.024649Z node 2 :TX_MEDIATOR INFO: mediator__init.cpp:64: tablet# 72057594047365120 CreateTxInit Complete ... waiting for watcher to connect 2025-06-25T15:21:46.025106Z node 2 :TX_MEDIATOR DEBUG: mediator_impl.cpp:316: tablet# 72057594047365120 server# [2:584:2499] connected 2025-06-25T15:21:46.025202Z node 2 :TX_MEDIATOR DEBUG: mediator_impl.cpp:308: tablet# 72057594047365120 FORWARD Watch from# [2:582:2498] to# [2:580:2496] ExecQueue 2025-06-25T15:21:46.025250Z node 2 :TX_MEDIATOR_EXEC_QUEUE DEBUG: execute_queue.cpp:175: Actor# [2:580:2496] MediatorId# 72057594047365120 HANDLE TEvGranularWatch from# [2:582:2498] bucket# 0 ... waiting for watcher to connect (done) 2025-06-25T15:21:46.025533Z node 2 :TX_MEDIATOR DEBUG: mediator_impl.cpp:308: tablet# 72057594047365120 FORWARD Watch from# [2:582:2498] to# [2:580:2496] ExecQueue 2025-06-25T15:21:46.025586Z node 2 :TX_MEDIATOR_EXEC_QUEUE DEBUG: execute_queue.cpp:159: Actor# [2:580:2496] MediatorId# 72057594047365120 HANDLE TEvWatch 2025-06-25T15:21:46.025644Z node 2 :T ... -25T15:22:37.463696Z node 12 :TX_MEDIATOR DEBUG: mediator_impl.cpp:280: tablet# 72057594047365120 HANDLE EvCoordinatorStep coordinator# 72057594046316546 step# 1010 2025-06-25T15:22:37.463713Z node 12 :TX_MEDIATOR INFO: mediator_impl.cpp:287: Coordinator step: Mediator [72057594047365120], Coordinator [72057594046316546], step# [1010] transactions [1] 2025-06-25T15:22:37.463786Z node 12 :TX_MEDIATOR DEBUG: mediator_impl.cpp:205: tablet# 72057594047365120 SEND EvCommitStep to# [12:580:2496] ExecQueue {TMediateStep From 0 To# 1010Steps: {{TCoordinatorStep step# 1010 PrevStep# 0Transactions: {{TTx Moderator# 0 txid# 1 AckTo# [12:623:2520]}}TabletsToTransaction: {{tablet# 72057594047365121 txid# 1}{tablet# 72057594047365122 txid# 1}}}{TCoordinatorStep step# 1010 PrevStep# 0Transactions: {{TTx Moderator# 0 txid# 2 AckTo# [12:626:2523]}}TabletsToTransaction: {{tablet# 72057594047365121 txid# 2}{tablet# 72057594047365122 txid# 2}}}}} marker# M0 2025-06-25T15:22:37.463866Z node 12 :TX_MEDIATOR_EXEC_QUEUE DEBUG: execute_queue.cpp:72: Actor# [12:580:2496] MediatorId# 72057594047365120 HANDLE TEvCommitStep {TMediateStep From 0 To# 1010Steps: {{TCoordinatorStep step# 1010 PrevStep# 0Transactions: {{TTx Moderator# 0 txid# 1 AckTo# [12:623:2520]}}TabletsToTransaction: {{tablet# 72057594047365121 txid# 1}{tablet# 72057594047365122 txid# 1}}}{TCoordinatorStep step# 1010 PrevStep# 0Transactions: {{TTx Moderator# 0 txid# 2 AckTo# [12:626:2523]}}TabletsToTransaction: {{tablet# 72057594047365121 txid# 2}{tablet# 72057594047365122 txid# 2}}}}} marker# M1 2025-06-25T15:22:37.463903Z node 12 :TX_MEDIATOR_PRIVATE DEBUG: execute_queue.cpp:44: Mediator exec queue [72057594047365120], step# 1010 for tablet [72057594047365121]. TxIds: txid# 1 txid# 2 marker# M2 2025-06-25T15:22:37.463940Z node 12 :TX_MEDIATOR_EXEC_QUEUE DEBUG: execute_queue.cpp:54: Actor# [12:580:2496] MediatorId# 72057594047365120 SEND Ev to# [12:581:2497] step# 1010 forTablet# 72057594047365121 txid# 1 txid# 2 marker# M3 2025-06-25T15:22:37.463979Z node 12 :TX_MEDIATOR_PRIVATE DEBUG: execute_queue.cpp:44: Mediator exec queue [72057594047365120], step# 1010 for tablet [72057594047365122]. TxIds: txid# 1 txid# 2 marker# M2 2025-06-25T15:22:37.464002Z node 12 :TX_MEDIATOR_EXEC_QUEUE DEBUG: execute_queue.cpp:54: Actor# [12:580:2496] MediatorId# 72057594047365120 SEND Ev to# [12:581:2497] step# 1010 forTablet# 72057594047365122 txid# 1 txid# 2 marker# M3 2025-06-25T15:22:37.464032Z node 12 :TX_MEDIATOR_EXEC_QUEUE DEBUG: execute_queue.cpp:119: Actor# [12:580:2496] MediatorId# 72057594047365120 SEND TEvStepPlanComplete to# [12:581:2497] bucket.ActiveActor step# 1010 2025-06-25T15:22:37.464095Z node 12 :TX_MEDIATOR_TABLETQUEUE DEBUG: tablet_queue.cpp:183: Actor# [12:581:2497] Mediator# 72057594047365120 HANDLE {TEvCommitTabletStep step# 1010 TabletId# 72057594047365121 Transactions {{TTx Moderator# 0 txid# 1 AckTo# [12:623:2520]}{TTx Moderator# 0 txid# 2 AckTo# [12:626:2523]}}} marker# M4 2025-06-25T15:22:37.464267Z node 12 :TX_MEDIATOR_TABLETQUEUE DEBUG: tablet_queue.cpp:183: Actor# [12:581:2497] Mediator# 72057594047365120 HANDLE {TEvCommitTabletStep step# 1010 TabletId# 72057594047365122 Transactions {{TTx Moderator# 0 txid# 1 AckTo# [12:623:2520]}{TTx Moderator# 0 txid# 2 AckTo# [12:626:2523]}}} marker# M4 2025-06-25T15:22:37.464415Z node 12 :TX_MEDIATOR_TABLETQUEUE DEBUG: tablet_queue.cpp:319: Actor# [12:581:2497] Mediator# 72057594047365120 HANDLE {TEvStepPlanComplete step# 1010} 2025-06-25T15:22:37.464959Z node 12 :TX_MEDIATOR_TABLETQUEUE DEBUG: tablet_queue.cpp:248: Actor# [12:581:2497] Mediator# 72057594047365120 HANDLE NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594047365121 Status: OK ServerId: [12:634:2529] Leader: 1 Dead: 0 Generation: 2 VersionInfo: } 2025-06-25T15:22:37.465006Z node 12 :TX_MEDIATOR_PRIVATE DEBUG: tablet_queue.cpp:117: Send from 72057594047365120 to tablet 72057594047365121, step# 1010, txid# 1, marker M5lu 2025-06-25T15:22:37.465036Z node 12 :TX_MEDIATOR_PRIVATE DEBUG: tablet_queue.cpp:117: Send from 72057594047365120 to tablet 72057594047365121, step# 1010, txid# 2, marker M5lu 2025-06-25T15:22:37.465069Z node 12 :TX_MEDIATOR_TABLETQUEUE DEBUG: tablet_queue.cpp:120: Actor# [12:581:2497] Mediator# 72057594047365120 SEND to# 72057594047365121 {TEvPlanStep step# 1010 MediatorId# 72057594047365120 TabletID 72057594047365121} ... blocking NKikimr::TEvTxProcessing::TEvPlanStep from TX_MEDIATOR_TABLET_QUEUE_ACTOR to NKikimr::NTxMediator::NTestSuiteMediatorTest::TTargetTablet cookie 0 2025-06-25T15:22:37.465254Z node 12 :TX_MEDIATOR_TABLETQUEUE DEBUG: tablet_queue.cpp:248: Actor# [12:581:2497] Mediator# 72057594047365120 HANDLE NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594047365122 Status: OK ServerId: [12:635:2530] Leader: 1 Dead: 0 Generation: 2 VersionInfo: } 2025-06-25T15:22:37.465278Z node 12 :TX_MEDIATOR_PRIVATE DEBUG: tablet_queue.cpp:117: Send from 72057594047365120 to tablet 72057594047365122, step# 1010, txid# 1, marker M5lu 2025-06-25T15:22:37.465299Z node 12 :TX_MEDIATOR_PRIVATE DEBUG: tablet_queue.cpp:117: Send from 72057594047365120 to tablet 72057594047365122, step# 1010, txid# 2, marker M5lu 2025-06-25T15:22:37.465322Z node 12 :TX_MEDIATOR_TABLETQUEUE DEBUG: tablet_queue.cpp:120: Actor# [12:581:2497] Mediator# 72057594047365120 SEND to# 72057594047365122 {TEvPlanStep step# 1010 MediatorId# 72057594047365120 TabletID 72057594047365122} ... blocking NKikimr::TEvTxProcessing::TEvPlanStep from TX_MEDIATOR_TABLET_QUEUE_ACTOR to NKikimr::NTxMediator::NTestSuiteMediatorTest::TTargetTablet cookie 0 2025-06-25T15:22:37.477285Z node 12 :TX_MEDIATOR DEBUG: mediator_impl.cpp:316: tablet# 72057594047365120 server# [12:638:2533] connected 2025-06-25T15:22:37.477427Z node 12 :TX_MEDIATOR DEBUG: mediator_impl.cpp:139: tablet# 72057594047365120 HANDLE EvCoordinatorSync 2025-06-25T15:22:37.477482Z node 12 :TX_MEDIATOR DEBUG: mediator_impl.cpp:83: tablet# 72057594047365120 SEND EvCoordinatorSyncResult to# [12:636:2531] Cookie# 2 CompleteStep# 1010 LatestKnownStep# 1010 SubjectiveTime# 3 Coordinator# 72057594046316546 2025-06-25T15:22:37.477777Z node 12 :TX_MEDIATOR DEBUG: mediator_impl.cpp:280: tablet# 72057594047365120 HANDLE EvCoordinatorStep coordinator# 72057594046316546 step# 1010 2025-06-25T15:22:37.477830Z node 12 :TX_MEDIATOR INFO: mediator_impl.cpp:287: Coordinator step: Mediator [72057594047365120], Coordinator [72057594046316546], step# [1010] transactions [1] 2025-06-25T15:22:37.477897Z node 12 :TX_MEDIATOR DEBUG: mediator_impl.cpp:223: tablet# 72057594047365120 SEND EvRequestLostAcks to# [12:580:2496] ExecQueue step {TCoordinatorStep step# 1010 PrevStep# 0Transactions: {{TTx Moderator# 0 txid# 2 AckTo# [0:0:0]}}TabletsToTransaction: {{tablet# 72057594047365121 txid# 2}{tablet# 72057594047365122 txid# 2}}} 2025-06-25T15:22:37.477982Z node 12 :TX_MEDIATOR_EXEC_QUEUE DEBUG: execute_queue.cpp:130: Actor# [12:580:2496] MediatorId# 72057594047365120 HANDLE TEvRequestLostAcks {TCoordinatorStep step# 1010 PrevStep# 0Transactions: {{TTx Moderator# 0 txid# 2 AckTo# [0:0:0]}}TabletsToTransaction: {{tablet# 72057594047365121 txid# 2}{tablet# 72057594047365122 txid# 2}}} AckTo# [12:636:2531] 2025-06-25T15:22:37.478023Z node 12 :TX_MEDIATOR_PRIVATE DEBUG: execute_queue.cpp:44: Mediator exec queue [72057594047365120], step# 1010 for tablet [72057594047365121]. TxIds: txid# 2 marker# M2 2025-06-25T15:22:37.478071Z node 12 :TX_MEDIATOR_EXEC_QUEUE DEBUG: execute_queue.cpp:54: Actor# [12:580:2496] MediatorId# 72057594047365120 SEND Ev to# [12:581:2497] step# 1010 forTablet# 72057594047365121 txid# 2 marker# M3 2025-06-25T15:22:37.478113Z node 12 :TX_MEDIATOR_PRIVATE DEBUG: execute_queue.cpp:44: Mediator exec queue [72057594047365120], step# 1010 for tablet [72057594047365122]. TxIds: txid# 2 marker# M2 2025-06-25T15:22:37.478149Z node 12 :TX_MEDIATOR_EXEC_QUEUE DEBUG: execute_queue.cpp:54: Actor# [12:580:2496] MediatorId# 72057594047365120 SEND Ev to# [12:581:2497] step# 1010 forTablet# 72057594047365122 txid# 2 marker# M3 2025-06-25T15:22:37.478223Z node 12 :TX_MEDIATOR_TABLETQUEUE DEBUG: tablet_queue.cpp:222: Actor# [12:581:2497] Mediator# 72057594047365120 HANDLE {TEvOoOTabletStep step# 1010 TabletId# 72057594047365121 Transactions {{TTx Moderator# 0 txid# 2 AckTo# [12:636:2531]}}} 2025-06-25T15:22:37.478277Z node 12 :TX_MEDIATOR_TABLETQUEUE DEBUG: tablet_queue.cpp:222: Actor# [12:581:2497] Mediator# 72057594047365120 HANDLE {TEvOoOTabletStep step# 1010 TabletId# 72057594047365122 Transactions {{TTx Moderator# 0 txid# 2 AckTo# [12:636:2531]}}} 2025-06-25T15:22:37.489969Z node 12 :TX_MEDIATOR_TABLETQUEUE DEBUG: tablet_queue.cpp:294: Actor# [12:581:2497] Mediator# 72057594047365120 HANDLE NKikimr::TEvTabletPipe::TEvClientDestroyed { TabletId: 72057594047365121 ClientId: [12:630:2527] ServerId: [12:634:2529] } 2025-06-25T15:22:37.533837Z node 12 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:22:37.535734Z node 12 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [12:33:2080] 1750864952933149 != 1750864952933152 2025-06-25T15:22:37.573422Z node 12 :TX_MEDIATOR_TABLETQUEUE DEBUG: tablet_queue.cpp:248: Actor# [12:581:2497] Mediator# 72057594047365120 HANDLE NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594047365121 Status: OK ServerId: [12:663:2546] Leader: 1 Dead: 0 Generation: 3 VersionInfo: } 2025-06-25T15:22:37.573522Z node 12 :TX_MEDIATOR_PRIVATE DEBUG: tablet_queue.cpp:117: Send from 72057594047365120 to tablet 72057594047365121, step# 1010, txid# 1, marker M5lu 2025-06-25T15:22:37.573581Z node 12 :TX_MEDIATOR_PRIVATE DEBUG: tablet_queue.cpp:117: Send from 72057594047365120 to tablet 72057594047365121, step# 1010, txid# 2, marker M5lu 2025-06-25T15:22:37.573629Z node 12 :TX_MEDIATOR_TABLETQUEUE DEBUG: tablet_queue.cpp:120: Actor# [12:581:2497] Mediator# 72057594047365120 SEND to# 72057594047365121 {TEvPlanStep step# 1010 MediatorId# 72057594047365120 TabletID 72057594047365121} ... blocking NKikimr::TEvTxProcessing::TEvPlanStep from TX_MEDIATOR_TABLET_QUEUE_ACTOR to NKikimr::NTxMediator::NTestSuiteMediatorTest::TTargetTablet cookie 0 2025-06-25T15:22:37.585334Z node 12 :TX_MEDIATOR_TABLETQUEUE DEBUG: tablet_queue.cpp:294: Actor# [12:581:2497] Mediator# 72057594047365120 HANDLE NKikimr::TEvTabletPipe::TEvClientDestroyed { TabletId: 72057594047365122 ClientId: [12:631:2528] ServerId: [12:635:2530] } 2025-06-25T15:22:37.602263Z node 12 :TX_MEDIATOR_TABLETQUEUE DEBUG: tablet_queue.cpp:248: Actor# [12:581:2497] Mediator# 72057594047365120 HANDLE NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594047365122 Status: OK ServerId: [12:684:2556] Leader: 1 Dead: 0 Generation: 3 VersionInfo: } 2025-06-25T15:22:37.602362Z node 12 :TX_MEDIATOR_PRIVATE DEBUG: tablet_queue.cpp:117: Send from 72057594047365120 to tablet 72057594047365122, step# 1010, txid# 1, marker M5lu 2025-06-25T15:22:37.602414Z node 12 :TX_MEDIATOR_PRIVATE DEBUG: tablet_queue.cpp:117: Send from 72057594047365120 to tablet 72057594047365122, step# 1010, txid# 2, marker M5lu 2025-06-25T15:22:37.602468Z node 12 :TX_MEDIATOR_TABLETQUEUE DEBUG: tablet_queue.cpp:120: Actor# [12:581:2497] Mediator# 72057594047365120 SEND to# 72057594047365122 {TEvPlanStep step# 1010 MediatorId# 72057594047365120 TabletID 72057594047365122} ... blocking NKikimr::TEvTxProcessing::TEvPlanStep from TX_MEDIATOR_TABLET_QUEUE_ACTOR to NKikimr::NTxMediator::NTestSuiteMediatorTest::TTargetTablet cookie 0 2025-06-25T15:22:37.628603Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:22:37.628756Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:22:37.640596Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Connecting -> Connected |97.5%| [TM] {RESULT} ydb/core/tx/mediator/ut/unittest >> TSentinelBaseTests::GuardianFaultyPDisks [GOOD] >> TSentinelBaseTests::GuardianRackRatio >> TSentinelBaseTests::GuardianRackRatio [GOOD] >> TSentinelTests::Smoke |97.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/secondary_index/py3test >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_3_UNIQUE_SYNC-pk_types1-all_types1-index1--UNIQUE-SYNC] [GOOD] >> TIndexProcesorTests::TestManyMessages [GOOD] >> TIndexProcesorTests::TestOver1000Queues >> KqpTpch::Query10 [GOOD] >> KqpTpch::Query11 >> TCreateAndDropViewTest::CreateViewIfNotExists [GOOD] >> TCreateAndDropViewTest::DropView >> TDqSolomonWriteActorTest::TestWriteFormat [GOOD] >> TDqSolomonWriteActorTest::TestWriteBigBatchMonitoring >> TDqPqRdReadActorTests::TestSaveLoadPqRdRead [GOOD] >> TDqPqRdReadActorTests::CoordinatorChanged >> StatisticsScan::RunScanOnShard >> KeyValueGRPCService::SimpleRenameUnexistedKey [GOOD] >> KeyValueGRPCService::SimpleConcatUnexistedKey >> TSentinelTests::Smoke [GOOD] >> TSentinelTests::PDiskUnknownState >> test_liveness_wardens.py::TestLivenessWarden::test_hive_liveness_warden_reports_issues [GOOD] >> test_liveness_wardens.py::TestLivenessWarden::test_scheme_shard_has_no_in_flight_transactions >> TDqPqRdReadActorTests::CoordinatorChanged [GOOD] >> TDqPqRdReadActorTests::Backpressure >> TSentinelTests::PDiskUnknownState [GOOD] >> TSentinelTests::PDiskErrorState >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_0__SYNC-pk_types9-all_types9-index9---SYNC] [GOOD] |97.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/secondary_index/py3test >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_4__SYNC-pk_types5-all_types5-index5---SYNC] [GOOD] >> kikimr_config.py::test_kikimr_config_generator_generic_connector_config >> kikimr_config.py::test_kikimr_config_generator_generic_connector_config [GOOD] >> KafkaProtocol::FetchScenario [GOOD] >> KafkaProtocol::BalanceScenario >> QuoterWithKesusTest::GetsQuotaAfterPause [GOOD] >> QuoterWithKesusTest::GetsSeveralQuotas |97.5%| [TS] {asan, default-linux-x86_64, release} ydb/tests/library/ut/py3test >> kikimr_config.py::test_kikimr_config_generator_generic_connector_config [GOOD] |97.5%| [TS] {RESULT} ydb/tests/library/ut/py3test |97.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/secondary_index/py3test >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_0_UNIQUE_SYNC-pk_types4-all_types4-index4--UNIQUE-SYNC] [GOOD] >> test_timeout.py::TestTimeout::test_timeout >> test_select.py::TestDML::test_select[table_ttl_DyNumber-pk_types13-all_types13-index13-DyNumber--] [GOOD] >> test_select.py::TestDML::test_select[table_ttl_Datetime-pk_types16-all_types16-index16-Datetime--] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/olap/delete/py3test >> test_delete_by_explicit_row_id.py::TestDeleteByExplicitRowId::test_delete_row_by_explicit_row_id 2025-06-25 15:22:36,357 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper execution timed out 2025-06-25 15:22:36,763 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper has overrun 600 secs timeout. Process tree before termination: pid rss ref pdirt 882438 661M 659M 582M ydb-tests-olap-delete --basetemp /home/runner/.ya/build/build_root/yft8/0016ab/tmp --capture no -c pkg:library.python.pytest:pytest.yatest.ini -p no:factor --doctest-modules - 882636 2.0G 2.0G 1.4G └─ ydbd server --suppress-version-check --yaml-config=/home/runner/.ya/build/build_root/yft8/0016ab/ydb/tests/olap/delete/test-results/py3test/testing_out_stuff/chunk1/test Test command err: File "library/python/pytest/main.py", line 101, in main rc = pytest.main( File "contrib/python/pytest/py3/_pytest/config/__init__.py", line 175, in main ret: Union[ExitCode, int] = config.hook.pytest_cmdline_main( File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 512, in __call__ return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120, in _hookexec return self._inner_hookexec(hook_name, methods, kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_callers.py", line 121, in _multicall res = hook_impl.function(*args) File "contrib/python/pytest/py3/_pytest/main.py", line 320, in pytest_cmdline_main return wrap_session(config, _main) File "contrib/python/pytest/py3/_pytest/main.py", line 273, in wrap_session session.exitstatus = doit(config, session) or 0 File "contrib/python/pytest/py3/_pytest/main.py", line 327, in _main config.hook.pytest_runtestloop(session=session) File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 512, in __call__ return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120, in _hookexec return self._inner_hookexec(hook_name, methods, kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_callers.py", line 121, in _multicall res = hook_impl.function(*args) File "contrib/python/pytest/py3/_pytest/main.py", line 352, in pytest_runtestloop item.config.hook.pytest_runtest_protocol(item=item, nextitem=nextitem) File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 512, in __call__ return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120, in _hookexec return self._inner_hookexec(hook_name, methods, kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_callers.py", line 121, in _multicall res = hook_impl.function(*args) File "contrib/python/pytest/py3/_pytest/runner.py", line 115, in pytest_runtest_protocol runtestprotocol(item, nextitem=nextitem) File "contrib/python/pytest/py3/_pytest/runner.py", line 134, in runtestprotocol reports.append(call_and_report(item, "call", log)) File "contrib/python/pytest/py3/_pytest/runner.py", line 223, in call_and_report call = call_runtest_hook(item, when, **kwds) File "contrib/python/pytest/py3/_pytest/runner.py", line 262, in call_runtest_hook return CallInfo.from_call( File "contrib/python/pytest/py3/_pytest/runner.py", line 342, in from_call result: Optional[TResult] = func() File "contrib/python/pytest/py3/_pytest/runner.py", line 263, in lambda: ihook(item=item, **kwds), when=when, reraise=reraise File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 512, in __call__ return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120, in _hookexec return self._inner_hookexec(hook_name, methods, kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_callers.py", line 121, in _multicall res = hook_impl.function(*args) File "contrib/python/pytest/py3/_pytest/runner.py", line 170, in pytest_runtest_call item.runtest() File "contrib/python/pytest/py3/_pytest/python.py", line 1844, in runtest self.ihook.pytest_pyfunc_call(pyfuncitem=self) File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 512, in __call__ return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120, in _hookexec return self._inner_hookexec(hook_name, methods, kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_callers.py", line 121, in _multicall res = hook_impl.function(*args) File "library/python/pytest/plugins/ya.py", line 563, in pytest_pyfunc_call pyfuncitem.retval = testfunction(**testargs) File "ydb/tests/olap/delete/test_delete_by_explicit_row_id.py", line 117, in test_delete_row_by_explicit_row_id self._test_single_column_pk(rows_to_insert=1000, rows_to_delete=10, iterations=10) File "ydb/tests/olap/delete/test_delete_by_explicit_row_id.py", line 59, in _test_single_column_pk assert self._get_row_count(table_path) == rows_in_table File "ydb/tests/olap/delete/test_delete_by_explicit_row_id.py", line 18, in _get_row_count return self.ydb_client.query(f"SELECT count(*) as Rows from `{table_path}`")[0].rows[0]["Rows"] File "ydb/tests/olap/common/ydb_client.py", line 24, in query return self.session_pool.execute_with_retries(statement) File "contrib/python/ydb/py3/ydb/query/pool.py", line 204, in execute_with_retries return retry_operation_sync(wrapped_callee, retry_settings) File "contrib/python/ydb/py3/ydb/retries.py", line 133, in retry_operation_sync for next_opt in opt_generator: File "contrib/python/ydb/py3/ydb/retries.py", line 94, in retry_operation_impl result = YdbRetryOperationFinalResult(callee(*args, **kwargs)) File "contrib/python/ydb/py3/ydb/query/pool.py", line 202, in wrapped_callee return [result_set for result_set in it] File "contrib/python/ydb/py3/ydb/_utilities.py", line 173, in __next__ return self._next() File "contrib/python/ydb/py3/ydb/_utilities.py", line 164, in _next res = self.wrapper(next(self.it)) File "contrib/python/grpcio/py3/grpc/_channel.py", line 475, in __next__ return self._next() File "contrib/python/grpcio/py3/grpc/_channel.py", line 872, in _next _common.wait(self._state.condition.wait, _response_ready) File "contrib/python/grpcio/py3/grpc/_common.py", line 150, in wait _wait_once(wait_fn, MAXIMUM_WAIT_TIMEOUT, spin_cb) File "contrib/python/grpcio/py3/grpc/_common.py", line 112, in _wait_once wait_fn(timeout=timeout) File "contrib/tools/python3/Lib/threading.py", line 359, in wait gotit = waiter.acquire(True, timeout) File "library/python/pytest/plugins/ya.py", line 344, in _graceful_shutdown traceback.print_stack(file=sys.stderr) Traceback (most recent call last): File "library/python/testing/yatest_common/yatest/common/process.py", line 384, in wait wait_for( File "library/python/testing/yatest_common/yatest/common/process.py", line 765, in wait_for raise TimeoutError(truncate(message, MAX_MESSAGE_LEN)) yatest.common.process.TimeoutError: ...d_root/yft8/0016ab/tmp', '--capture', 'no', '-c', 'pkg:library.python.pytest:pytest.yatest.ini', '-p', 'no:factor', '--doctest-modules', '--ya-trace', '/home/runner/.ya/build/build_root/yft8/0016ab/ydb/tests/olap/delete/test-results/py3test/testing_out_stuff/chunk1/ytest.report.trace', '--build-root', '/home/runner/.ya/build/build_root/yft8/0016ab', '--source-root', '/home/runner/.ya/build/build_root/yft8/0016ab/environment/arcadia', '--output-dir', '/home/runner/.ya/build/build_root/yft8/0016ab/ydb/tests/olap/delete/test-results/py3test/testing_out_stuff/chunk1/testing_out_stuff', '--durations', '0', '--project-path', 'ydb/tests/olap/delete', '--test-tool-bin', '/home/runner/.ya/tools/v4/9029509511/test_tool', '--ya-version', '2', '--collect-cores', '--sanitizer-extra-checks', '--build-type', 'release', '--tb', 'short', '--modulo', '10', '--modulo-index', '1', '--partition-mode', 'SEQUENTIAL', '--split-by-tests', '--dep-root', 'ydb/tests/olap/delete', '--flags', 'APPLE_SDK_LOCAL=yes', '--flags', 'CFLAGS=-fno-omit-frame-pointer -Wno-unknown-argument', '--flags', 'DEBUGINFO_LINES_ONLY=yes', '--flags', 'DISABLE_FLAKE8_MIGRATIONS=yes', '--flags', 'OPENSOURCE=yes', '--flags', 'SANITIZER_TYPE=address', '--flags', 'TESTS_REQUESTED=yes', '--flags', 'USE_AIO=static', '--flags', 'USE_CLANG_CL=yes', '--flags', 'USE_EAT_MY_DATA=yes', '--flags', 'USE_ICONV=static', '--flags', 'USE_IDN=static', '--flags', 'USE_PREBUILT_TOOLS=no', '--sanitize', 'address']' stopped by 600 seconds timeout During handling of the above exception, another exception occurred: Traceback (most recent call last): File "devtools/ya/test/programs/test_tool/run_test/run_test.py", line 1738, in main res.wait(check_exit_code=False, timeout=run_timeout, on_timeout=timeout_callback) File "library/python/testing/yatest_common/yatest/common/process.py", line 398, in wait raise ExecutionTimeoutError(self, str(e)) yatest.common.process.ExecutionTimeoutError: (("...d_root/yft8/0016ab/tmp', '--capture', 'no', '-c', 'pkg:library.python.pytest:pytest.yatest.ini', '-p', 'no:factor', '--doctest-modules', '--ya-trace', '/home/runner/.ya/build/build_root/yft8/0016ab/ydb/tests/olap/delete/test-results/py3test/testing_out_stuff/chunk1/ytest.report.trace', '--build-root', '/home/runner/.ya/build/build_root/yft8/0016ab', '--source-root', '/home/runner/.ya/build/build_root/yft8/0016ab/environment/arcadia', '--output-dir', '/home/runner/.ya/build/build_root/yft8/0016ab/ydb/tests/olap/delete/test-results/py3test/testing_out_stuff/chunk1/testing_out_stuff', '--durations', '0', '--project-path', 'ydb/tests/olap/delete', '--test-tool-bin', '/home/runner/.ya/tools/v4/9029509511/test_tool', '--ya-version', '2', '--collect-cores', '--sanitizer-extra-checks', '--build-type', 'release', '--tb', 'short', '--modulo', '10', '--modulo-index', '1', '--partition-mode', 'SEQUENTIAL', '--split-by-tests', '--dep-root', 'ydb/tests/olap/delete', '--flags', 'APPLE_SDK_LOCAL=yes', '--flags', 'CFLAGS=-fno-omit-frame-pointer -Wno-unknown-argument', '--flags', 'DEBUGINFO_LINES_ONLY=yes', '--flags', 'DISABLE_FLAKE8_MIGRATIONS=yes', '--flags', 'OPENSOURCE=yes', '--flags', 'SANITIZER_TYPE=address', '--flags', 'TESTS_REQUESTED=yes', '--flags', 'USE_AIO=static', '--flags', 'USE_CLANG_CL=yes', '--flags', 'USE_EAT_MY_DATA=yes', '--flags', 'USE_ICONV=static', '--flags', 'USE_IDN=static', '--flags', 'USE_PREBUILT_TOOLS=no', '--sanitize', 'address']' stopped by 600 seconds timeout",), {}) >> TCreateAndDropViewTest::DropView [GOOD] >> TCreateAndDropViewTest::DropViewDisabledFeatureFlag >> test_liveness_wardens.py::TestLivenessWarden::test_scheme_shard_has_no_in_flight_transactions [GOOD] |97.5%| [TA] $(B)/ydb/tests/olap/delete/test-results/py3test/{meta.json ... results_accumulator.log} |97.5%| [TA] {RESULT} $(B)/ydb/tests/olap/delete/test-results/py3test/{meta.json ... results_accumulator.log} >> ReadUpdateWrite::Load [GOOD] >> KqpTpch::Query11 [GOOD] >> KqpTpch::Query12 >> KeyValueGRPCService::SimpleConcatUnexistedKey [GOOD] >> KeyValueGRPCService::SimpleCopyUnexistedKey >> StatisticsScan::RunScanOnShard [GOOD] >> DataShardStats::MultipleChannelsStatsCorrect [GOOD] >> DataShardStats::HistogramStatsCorrect ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_column_stats/unittest >> StatisticsScan::RunScanOnShard [GOOD] Test command err: 2025-06-25T15:22:47.010265Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T15:22:47.010452Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T15:22:47.010518Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0007c0/r3tmp/tmpENVv4y/pdisk_1.dat 2025-06-25T15:22:47.527046Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T15:22:47.537207Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:22:47.599273Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:22:47.625104Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750864963198483 != 1750864963198487 2025-06-25T15:22:47.673432Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:22:47.673590Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:22:47.686885Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:22:47.810483Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:22:48.288087Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:694:2576], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:22:48.288212Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:704:2581], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:22:48.288294Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:22:48.297315Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:22:48.355442Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:22:48.481191Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:708:2584], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T15:22:48.574276Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:778:2623] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:22:50.005673Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715660. Ctx: { TraceId: 01jyktyejw356zmnx9cg3edvvm, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTdjNTBjM2ItZGYzMjU2NDAtNDVmNjg1M2MtYWQwZmE4YWE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root |97.5%| [TM] {RESULT} ydb/core/tx/datashard/ut_column_stats/unittest >> Splitter::Simple >> test_timeout.py::TestTimeout::test_timeout [GOOD] >> test_commit.py::TestCommit::test_commit >> test.py::test[solomon-BadDownsamplingAggregation-] >> TDqSolomonWriteActorTest::TestWriteBigBatchMonitoring [GOOD] >> TDqSolomonWriteActorTest::TestWriteBigBatchSolomon [GOOD] >> TDqSolomonWriteActorTest::TestWriteWithTimeseries >> Splitter::Simple [GOOD] >> Splitter::Small [GOOD] >> Splitter::Minimal [GOOD] >> Splitter::Trivial [GOOD] >> Splitter::BigAndSmall >> KqpTpch::Query12 [GOOD] >> KqpTpch::Query13 >> Splitter::BigAndSmall [GOOD] >> Splitter::CritSmallPortions ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/olap/high_load/unittest >> ReadUpdateWrite::Load [GOOD] Test command err: Step 1. only write Was written: 1 MiB, Speed: 1844674407370955159 MiB/s Write: 10% 1.067697s 30% 1.067697s 50% 1.067697s 90% 1.067697s 99% 1.067697s Write: 10% 0.877161s 30% 0.877161s 50% 0.877161s 90% 0.877161s 99% 0.877161sWrite: 10% 0.723868s 30% 0.723868s 50% 0.723868s 90% 0.723868s 99% 0.723868sWrite: 10% 1.176101s 30% 1.176101s 50% 1.176101s 90% 1.176101s 99% 1.176101s Write: 10% 0.993845s 30% 0.993845s 50% 0.993845s 90% 0.993845s 99% 0.993845s Write: 10% 0.795840s 30% 0.795840s 50% 0.795840s 90% 0.795840s 99% 0.795840s Write: 10% 1.106031s 30% 1.106031s 50% 1.106031s 90% 1.106031s 99% 1.106031s Write: 10% 1.202275s 30% 1.202275s 50% 1.202275s 90% 1.202275s 99% 1.202275s Write: 10% 1.200195s 30% 1.200195s 50% 1.200195s 90% 1.200195s 99% 1.200195s Write: 10% 0.841231s 30% 0.841231s 50% 0.841231s 90% 0.841231s 99% 0.841231s Write: 10% 1.199434s 30% 1.199434s 50% 1.199434s 90% 1.199434s 99% 1.199434s Write: 10% 1.124316s 30% 1.124316s 50% 1.124316s 90% 1.124316s 99% 1.124316s Write: 10% 1.163536s 30% 1.163536s 50% 1.163536s 90% 1.163536s 99% 1.163536s Write: 10% 1.445985s 30% 1.445985s 50% 1.445985s 90% 1.445985s 99% 1.445985s Write: 10% 1.440164s 30% 1.440164s 50% 1.440164s 90% 1.440164s 99% 1.440164s Write: 10% 0.995818s 30% 0.995818s 50% 0.995818s 90% 0.995818s 99% 0.995818s Write: 10% 1.586306s 30% 1.586306s 50% 1.586306s 90% 1.586306s 99% 1.586306s Write: 10% 1.449657s 30% 1.449657s 50% 1.449657s 90% 1.449657s 99% 1.449657s Write: 10% 1.662987s 30% 1.662987s 50% 1.662987s 90% 1.662987s 99% 1.662987s Write: 10% 1.376345s 30% 1.376345s 50% 1.376345s 90% 1.376345s 99% 1.376345s Write: 10% 1.410807s 30% 1.410807s 50% 1.410807s 90% 1.410807s 99% 1.410807s Write: 10% 0.961279s 30% 0.961279s 50% 0.961279s 90% 0.961279s 99% 0.961279s Write: 10% 0.873175s 30% 0.873175s 50% 0.873175s 90% 0.873175s 99% 0.873175s Write: 10% 1.127332s 30% 1.127332s 50% 1.127332s 90% 1.127332s 99% 1.127332s Write: 10% 1.315293s 30% 1.315293s 50% 1.315293s 90% 1.315293s 99% 1.315293s Write: 10% 1.442746s 30% 1.442746s 50% 1.442746s 90% 1.442746s 99% 1.442746s Write: 10% 0.721457s 30% 0.721457s 50% 0.721457s 90% 0.721457s 99% 0.721457s Write: 10% 1.017098s 30% 1.017098s 50% 1.017098s 90% 1.017098s 99% 1.017098s Write: 10% 0.901052s 30% 0.901052s 50% 0.901052s 90% 0.901052s 99% 0.901052s Write: 10% 1.003028s 30% 1.003028s 50% 1.003028s 90% 1.003028s 99% 1.003028s Write: 10% 0.901220s 30% 0.901220s 50% 0.901220s 90% 0.901220s 99% 0.901220s Write: 10% 1.143272s 30% 1.143272s 50% 1.143272s 90% 1.143272s 99% 1.143272s Write: 10% 0.989798s 30% 0.989798s 50% 0.989798s 90% 0.989798s 99% 0.989798s Write: 10% 1.331237s 30% 1.331237s 50% 1.331237s 90% 1.331237s 99% 1.331237s Write: 10% 0.916469s 30% 0.916469s 50% 0.916469s 90% 0.916469s 99% 0.916469s Write: 10% 0.791107s 30% 0.791107s 50% 0.791107s 90% 0.791107s 99% 0.791107s Write: 10% 0.571473s 30% 0.571473s 50% 0.571473s 90% 0.571473s 99% 0.571473s Write: 10% 0.552438s 30% 0.552438s 50% 0.552438s 90% 0.552438s 99% 0.552438s Write: 10% 1.733964s 30% 1.733964s 50% 1.733964s 90% 1.733964s 99% 1.733964s Write: 10% 0.693525s 30% 0.693525s 50% 0.693525s 90% 0.693525s 99% 0.693525s Write: 10% 1.726493s 30% 1.726493s 50% 1.726493s 90% 1.726493s 99% 1.726493s Write: 10% 1.602208s 30% 1.602208s 50% 1.602208s 90% 1.602208s 99% 1.602208s Write: 10% 0.495824s 30% 0.495824s 50% 0.495824s 90% 0.495824s 99% 0.495824s Write: 10% 0.783742s 30% 0.783742s 50% 0.783742s 90% 0.783742s 99% 0.783742s Write: 10% 0.695112s 30% 0.695112s 50% 0.695112s 90% 0.695112s 99% 0.695112s Write: 10% 0.637641s 30% 0.637641s 50% 0.637641s 90% 0.637641s 99% 0.637641s Write: 10% 0.661130s 30% 0.661130s 50% 0.661130s 90% 0.661130s 99% 0.661130s Write: 10% 0.979768s 30% 0.979768s 50% 0.979768s 90% 0.979768s 99% 0.979768s Write: 10% 0.671601s 30% 0.671601s 50% 0.671601s 90% 0.671601s 99% 0.671601s Write: 10% 1.726078s 30% 1.726078s 50% 1.726078s 90% 1.726078s 99% 1.726078s Write: 10% 0.862967s 30% 0.862967s 50% 0.862967s 90% 0.862967s 99% 0.862967s Write: 10% 0.690776s 30% 0.690776s 50% 0.690776s 90% 0.690776s 99% 0.690776s Write: 10% 0.607668s 30% 0.607668s 50% 0.607668s 90% 0.607668s 99% 0.607668s Write: 10% Write: 10% 0.835259s 30% 0.835259s 50% 0.835259s 90% 0.835259s 99% 0.835259s Write: 10% 0.813852s 30% 0.813852s 50% 0.813852s 90% 0.813852s 99% 0.813852s 0.525103s 30% 0.525103s 50% 0.525103s 90% 0.525103s 99% Write: 10% Write: 10% Write: 10% 0.655911s 30% 0.655911s 50% 0.655911s 90% 0.655911s 99% 0.655911s 0.681469s 30% 0.681469s 50% 0.681469s 90% 0.681469s 99% Write: 10% 0.683637s 30% 0.683637s 50% 0.683637s 90% 0.683637s 99% 0.683637s 0.525103s0.610949s 30% 0.610949s 50% 0.610949s 90% 0.610949s 99% 0.681469s 0.610949s Write: 10% 0.608498s 30% 0.608498s 50% 0.608498s 90% 0.608498s 99% 0.608498s Write: 10% 0.681941s 30% 0.681941s 50% 0.681941s 90% 0.681941s 99% 0.681941s Write: 10% 0.682075s 30% 0.682075s 50% 0.682075s 90% 0.682075s 99% 0.682075s Write: 10% 0.636885s 30% 0.636885s 50% 0.636885s 90% 0.636885s 99% 0.636885s Step 2. read write Write: 10% 0.560375s 30% 0.560375s 50% 0.560375s 90% 0.560375s 99% 0.560375s Write: 10% 0.420225s 30% 0.420225s 50% 0.420225s 90% 0.420225s 99% 0.420225s Write: 10% 0.626307s 30% 0.626307s 50% 0.626307s 90% 0.626307s 99% 0.626307s Write: 10% 0.643802s 30% 0.643802s 50% 0.643802s 90% 0.643802s 99% 0.643802s Write: 10% 0.754028s 30% 0.754028s 50% 0.754028s 90% 0.754028s 99% 0.754028s Write: 10% 0.800716s 30% 0.800716s 50% 0.800716s 90% Write: 10% 0.837623s 30% 0.837623s 50% 0.837623s 90% Write: 10% 0.800716s 99% 0.800716s0.837623s 99% 0.837623s 0.360054s 30% 0.360054s 50% 0.360054s 90% 0.360054s 99% 0.360054s Write: 10% 0.477813s 30% 0.477813s 50% 0.477813s 90% Write: 10% 0.488507s 30% 0.488507s 50% 0.488507s 90% Write: 10% 0.477813s 99% 0.477813s Write: 10% 0.613828s 30% 0.613828s 50% 0.613828s 90% 0.613828s 99% 0.613828sWrite: 10% 0.658053s 30% 0.658053s 50% 0.658053s 90% 0.454716s 30% 0.454716s 50% 0.454716s 90% 0.454716s 99% 0.454716s 0.658053s 99% 0.658053s0.488507s 99% 0.488507sWrite: 10% Write: 10% 0.527844s 30% 0.527844s 50% 0.527844s 90% 0.471318s 30% 0.471318s 50% 0.471318s 90% 0.471318s 99% 0.527844s 99% Write: 10% 0.527844sWrite: 10% 0.559797s 30% 0.559797s 50% 0.559797s 90% 0.889615s 30% 0.889615s 50% 0.889615s 90% 0.471318s 0.559797s 99% 0.559797s0.889615s 99% 0.889615s Write: 10% 0.823853s 30% 0.823853s 50% 0.823853s 90% 0.823853s 99% 0.823853s Write: 10% 0.670474s 30% 0.670474s 50% 0.670474s 90% 0.670474s 99% 0.670474s Write: 10% 0.575780s 30% 0.575780s 50% 0.575780s 90% 0.575780s 99% 0.575780s Write: 10% Write: 10% 0.953019s 30% 0.953019s 50% 0.978956s 30% 0.953019s 90% 0.978956s 50% 0.978956s 90% 0.953019s 99% 0.953019s0.978956s 99% 0.978956s Write: 10% 1.189798s 30% 1.189798s 50% 1.189798s 90% 1.189798s 99% 1.189798sWrite: 10% 0.852045s 30% 0.852045s 50% 0.852045s 90% 0.852045s 99% 0.852045s Write: 10% 1.022062s 30% 1.022062s 50% 1.022062s 90% 1.022062s 99% 1.022062s Write: 10% 0.683903s 30% 0.683903s 50% 0.683903s 90% 0.683903s 99% 0.683903s Write: 10% 0.656934s 30% 0.656934s 50% 0.656934s 90% 0.656934s 99% 0.656934s Write: 10% 0.492541s 30% 0.492541s 50% 0.492541s 90% 0.492541s 99% 0.492541s Write: 10% 0.692972s 30% 0.692972s 50% 0.692972s 90% 0.692972s 99% 0.692972s Write: 10% 0.701900s 30% 0.701900s 50% 0.701900s 90% 0.701900s 99% 0.701900s Write: 10% 0.712652s 30% 0.712652s 50% 0.712652s 90% 0.712652s 99% 0.712652s Write: 10% 0.373880s 30% 0.373880s 50% 0.373880s 90% 0.373880s 99% 0.373880s Write: 10% 0.417924s 30% 0.417924s 50% 0.417924s 90% 0.417924s 99% 0.417924s Write: 10% 0.465573s 30% 0.465573s 50% 0.465573s 90% 0.465573s 99% 0.465573s Write: 10% 1.290375s 30% 1.290375s 50% 1.290375s 90% 1.290375s 99% 1.290375s Write: 10% 0.819446s 30% 0.819446s 50% 0.819446s 90% 0.819446s 99% 0.819446s Write: 10% 0.407687s 30% 0.407687s 50% 0.407687s 90% 0.407687s 99% 0.407687s Write: 10% 0.803229s 30% 0.803229s 50% 0.803229s 90% 0.803229s 99% 0.803229s Write: 10% 1.192907s 30% 1.192907s 50% 1.192907s 90% 1.192907s 99% 1.192907s Write: 10% 1.230763s 30% 1.230763s 50% 1.230763s 90% 1.230763s 99% 1.230763s Write: 10% 0.431778s 30% 0.431778s 50% 0.431778s 90% 0.431778s 99% 0.431778s Write: 10% 0.972388s 30% 0.972388s 50% 0.972388s 90% 0.972388s 99% 0.972388s Write: 10% 0.964479s 30% 0.964479s 50% 0.964479s 90% 0.964479s 99% 0.964479s Write: 10% 0.584845s 30% 0.584845s 50% 0.584845s 90% 0.584845s 99% 0.584845s Write: 10% 0.569622s 30% 0.569622s 50% 0.569622s 90% 0.569622s 99% 0.569622s Write: 10% 0.488011s 30% 0.488011s 50% 0.488011s 90% 0.488011s 99% 0.488011s Write: 10% 0.574266s 30% 0.574266s 50% 0.574266s 90% 0.574266s 99% 0.574266s Write: 10% 0.628233s 30% 0.628233s 50% 0.628233s 90% 0.628233s 99% 0.628233s Write: 10% 0.583951s 30% 0.583951s 50% 0.583951s 90% 0.583951s 99% 0.583951s Write: 10% 0.597096s 30% 0.597096s 50% 0.597096s 90% 0.597096s 99% 0.597096s Write: 10% 0.993586s 30% 0.993586s 50% 0.993586s 90% 0.993586s 99% 0.993586s Write: 10% Write0.556988s 30% 0.556988s 50% : 10% 0.589236s 30% 0.589236s 50% 0.589236s 90% 0.589236s 99% 0.589236s 0.556988s 90% 0.556988s 99% 0.556988s Write: 10% 0.562487s 30% 0.562487s 50% 0.562487s 90% 0.562487s 99% 0.562487s Write: 10% 0.526589s 30% 0.526589s 50% 0.526589s 90% 0.526589s 99% 0.526589s Write: 10% 0.595752s 30% 0.595752s 50% 0.595752s 90% 0.595752s 99% 0.595752s Write: 10% 0.377141s 30% 0.377141s 50% 0.377141s 90% 0.377141s 99% 0.377141s Write: 10% 0.462365s 30% 0.462365s 50% 0.462365s 90% 0.462365s 99% 0.462365s Write: 10% 0.598522s 30% 0.598522s 50% 0.598522s 90% 0.598522s 99% 0.598522s Write: 10% 0.354201s 30% 0.354201s 50% 0.354201s 90% 0.354201s 99% 0.354201s Write: 10% 0.734875s 30% 0.734875s 50% 0.734875s 90% 0.734875s 99% 0.734875s Write: 10% 0.489102s 30% 0.489102s 50% 0.489102s 90% 0.489102s 99% Write: 10% 0.466994s 30% 0.466994s 50% 0.466994s 90% 0.466994s 99% 0.466994s 0.489102s Write: 10% 1.315282s 30% 1.315282s 50% 1.315282s 90% 1.315282s 99% 1.315282s Read: 10% 3.186388s 30% 3.186388s 50% 3.186388s 90% 3.186388s 99% 3.186388s Step 3. write modify Write: 10% 0.427627s 30% 0.427627s 50% 0.427627s 90% 0.427627s 99% 0.427627s Write: 10% 0.491557s 30% 0.491557s 50% 0.491557s 90% 0.491557s 99% 0.491557s Write: 10% 0.364258s 30% 0.364258s 50% 0.364258s 90% 0.364258s 99% 0.364258s Write: 10% 0.289773s 30% 0.289773s 50% 0.289773s 90% 0.289773s 99% 0.289773sWrite: 10% 0.454171s 30% 0.454171s 50% 0.454171s 90% 0.454171s 99% 0.454171s Write: 10% 0.414188s 30% 0.414188s 50% 0.414188s 90% 0.414188s 99% 0.414188s Write: 10% 0.540214s 30% 0.540214s 50% 0.540214s 90% 0.540214s 99% 0.540214s Write: 10% 0.587349s 30% 0.587349s 50% 0.587349s 90% 0.587349s 99% 0.587349sWrite: 10% 0.495227s 30% 0.495227s 50% 0.495227s 90% 0.495227s 99% Write: 10% 0.590958s 30% 0.590958s 50% 0.590958s 90% 0.590958s 99% 0.590958s 0.495227s Write: 10% 0.689761s 30% 0.689761s 50% 0.689761s 90% 0.689761s 99% 0.689761s Write: 10% 0.637349s 30% 0.637349s 50% 0.637349s 90% 0.637349s 99% 0.637349s Write: 10% 0.407591s 30% 0.407591s 50% 0.407591s 90% 0.407591s 99% 0.407591s Write: 10% 0.717648s 30% 0.717648s 50% 0.717648s 90% 0.717648s 99% 0.717648sWrite: 10% Write: 10% 0.707843s 30% 0.707843s 50% 0.707843s 90% 0.678407s 30% 0.678407s 50% 0.678407s 90% 0.707843s 99% 0.707843s 0.678407s 99% 0.678407sWrite: 10% Write: 10% 0.364708s 30% 0.364708s 50% 0.364708s 90% 0.364708s 99% 0.364708s 0.477871s 30% 0.477871s 50% 0.477871s 90% 0.477871s 99% 0.477871sWrite: 10% 0.457269s 30% 0.457269s 50% 0.457269s 90% 0.457269s 99% 0.457269s Write: 10% 0.751586s 30% 0.751586s 50% 0.751586s 90% 0.751586s 99% 0.751586s Write: 10% 0.733614s 30% 0.733614s 50% 0.733614s 90% 0.733614s 99% 0.733614s Write: 10% 0.440227s 30% 0.440227s 50% 0.440227s 90% 0.440227s 99% 0.440227sWrite: 10% 0.575061s 30% 0.575061s 50% 0.575061s 90% 0.575061s 99% 0.575061s Write: 10% 0.270382s 30% 0.270382s 50% 0.270382s 90% 0.270382s 99% 0.270382s Write: 10% 0.595279s 30% 0.595279s 50% 0.595279s 90% 0.595279s 99% Write: 10% Write: 10% 0.820955s 30% 0.820955s 50% 0.820955s 90% Write: 10% 0.820955s 99% 0.820955s0.595279s 0.274637s 30% 0.274637s 50% 0.274637s 90% 0.274637s 99% 0.274637s0.413687s 30% 0.413687s 50% 0.413687s 90% 0.413687s 99% 0.413687s Write: 10% 0.510959s 30% 0.510959s 50% 0.510959s 90% 0.510959s 99% Write: 10% 0.331856s 30% 0.331856s 50% 0.331856s 90% 0.331856s 99% 0.331856s 0.510959s Write: 10% 0.273862s 30% 0.273862s 50% 0.273862s 90% 0.273862s 99% 0.273862s Write: 10% 0.468312s 30% 0.468312s 50% 0.468312s 90% 0.468312s 99% 0.468312s Write: 10% 1.027397s 30% 1.027397s 50% 1.027397s 90% 1.027397s 99% 1.027397s Write: 10% 0.225878s 30% 0.225878s 50% 0.225878s 90% 0.225878s 99% 0.225878s Write: 10% 0.419001s 30% 0.419001s 50% 0.419001s 90% 0.419001s 99% 0.419001s Write: 10% 0.613860s 30% 0.613860s 50% 0.613860s 90% 0.613860s 99% 0.613860s Write: 10% 0.169017s 30% 0.169017s 50% 0.169017s 90% 0.169017s 99% 0.169017s Write: 10% 0.163169s 30% 0.163169s 50% 0.163169s 90% 0.163169s 99% 0.163169s Write: 10% 0.429249s 30% 0.429249s 50% 0.429249s 90% 0.429249s 99% 0.429249s Write: 10% 0.319809s 30% 0.319809s 50% 0.319809s 90% 0.319809s 99% 0.319809s Write: 10% 0.220405s 30% 0.220405s 50% 0.220405s 90% 0.220405s 99% 0.220405s Write: 10% 0.319176s 30% 0.319176s 50% 0.319176s 90% 0.319176s 99% 0.319176s Write: 10% 0.311338s 30% 0.311338s 50% 0.311338s 90% 0.311338s 99% 0.311338sWrite: 10% 0.624293s 30% 0.624293s 50% 0.624293s 90% 0.624293s 99% Write: 10% 0.189454s 30% 0.189454s 50% 0.189454s 90% 0.189454s 99% Write: 10% 0.194973s 30% 0.194973s 50% 0.194973s 90% 0.194973s 99% 0.194973s 0.189454s0.624293sWrite: 10% 0.427536s 30% 0.427536s 50% 0.427536s 90% 0.427536s 99% 0.427536s Write: 10% 0.192063s 30% 0.192063s 50% 0.192063s 90% 0.192063s 99% 0.192063s Write: 10% 0.198474s 30% 0.198474s 50% 0.198474s 90% 0.198474s 99% 0.198474s Write: 10% 0.200582s 30% 0.200582s 50% 0.200582s 90% 0.200582s 99% 0.200582s Write: 10% 0.157255s 30% 0.157255s 50% 0.157255s 90% 0.157255s 99% 0.157255s Write: 10% 0.164983s 30% 0.164983s 50% 0.164983s 90% 0.164983s 99% 0.164983s Write: 10% 0.197025s 30% 0.197025s 50% 0.197025s 90% 0.197025s 99% 0.197025s Write: 10% 0.151604s 30% 0.151604s 50% 0.151604s 90% 0.151604s 99% 0.151604s Write: 10% 0.212867s 30% 0.212867s 50% 0.212867s 90% 0.212867s 99% 0.212867s Write: 10% 0.167737s 30% 0.167737s 50% 0.167737s 90% 0.167737s 99% 0.167737s Write: 10% 0.231696s 30% 0.231696s 50% 0.231696s 90% 0.231696s 99% 0.231696s Write: 10% 0.251645s 30% Write: 10% 0.533573s 30% 0.533573s 50% 0.533573s 90% 0.533573s 99% 0.533573s 0.251645s 50% 0.251645s 90% 0.251645s 99% 0.251645s Write: 10% 0.289814s 30% 0.289814s 50% 0.289814s 90% 0.289814s 99% 0.289814s Write: 10% 0.248142s 30% 0.248142s 50% 0.248142s 90% 0.248142s 99% 0.248142s Write: 10% 0.141601s 30% 0.141601s 50% 0.141601s 90% 0.141601s 99% 0.141601s Write: 10% 0.240866s 30% 0.240866s 50% 0.240866s 90% 0.240866s 99% 0.240866s Write: 10% 0.230319s 30% 0.230319s 50% 0.230319s 90% 0.230319s 99% 0.230319s Update: 10% 0.203652s 30% 0.203652s 50% 0.203652s 90% 0.203652s 99% 0.203652s Step 4. read modify write Write: 10% 0.316485s 30% 0.316485s 50% 0.316485s 90% 0.316485s 99% 0.316485s Write: 10% 0.528922s 30% 0.528922s 50% 0.528922s 90% 0.528922s 99% 0.528922s Write: 10% 0.457836s 30% 0.457836s 50% 0.457836s 90% 0.457836s 99% 0.457836s Write: 10% 0.418978s 30% 0.418978s 50% 0.418978s 90% 0.418978s 99% 0.418978s Write: 10% 0.569148s 30% 0.569148s 50% 0.569148s 90% 0.569148s 99% 0.569148s Write: 10% 0.625472s 30% 0.625472s 50% 0.625472s 90% 0.625472s 99% 0.625472s Write: 10% 0.764042s 30% 0.764042s 50% 0.764042s 90% 0.764042s 99% 0.764042s Write: 10% 0.816609s 30% 0.816609s 50% 0.816609s 90% 0.816609s 99% 0.816609s Write: 10% 0.461925s 30% 0.461925s 50% 0.461925s 90% 0.461925s 99% 0.461925s Write: 10% 0.950532s 30% 0.950532s 50% 0.950532s 90% 0.950532s 99% 0.950532s Write: 10% 0.505844s 30% 0.505844s 50% 0.505844s 90% 0.505844s 99% 0.505844s Write: 10% 0.650774s 30% 0.650774s 50% 0.650774s 90% 0.650774s 99% 0.650774s Write: 10% 0.482421s 30% 0.482421s 50% 0.482421s 90% 0.482421s 99% 0.482421s Write: 10% 0.558948s 30% 0.558948s 50% 0.558948s 90% 0.558948s 99% 0.558948s Write: 10% 0.556385s 30% 0.556385s 50% 0.556385s 90% 0.556385s 99% 0.556385s Write: 10% 0.731528s 30% 0.731528s 50% 0.731528s 90% 0.731528s 99% 0.731528s Write: 10% 0.675615s 30% 0.675615s 50% 0.675615s 90% 0.675615s 99% 0.675615s Write: 10% 0.767275s 30% 0.767275s 50% 0.767275s 90% 0.767275s 99% 0.767275s Write: 10% 0.711584s 30% 0.711584s 50% 0.711584s 90% 0.711584s 99% 0.711584s Write: 10% 0.783424s 30% 0.783424s 50% 0.783424s 90% 0.783424s 99% 0.783424s Write: 10% 0.811714s 30% 0.811714s 50% 0.811714s 90% 0.811714s 99% 0.811714s Write: 10% 0.874446s 30% 0.874446s 50% 0.874446s 90% 0.874446s 99% 0.874446s Write: 10% 0.903113s 30% 0.903113s 50% 0.903113s 90% 0.903113s 99% 0.903113s Write: 10% 0.920921s 30% 0.920921s 50% 0.920921s 90% 0.920921s 99% 0.920921s Write: 10% 0.973726s 30% 0.973726s 50% 0.973726s 90% 0.973726s 99% 0.973726s Write: 10% 0.989657s 30% 0.989657s 50% 0.989657s 90% 0.989657s 99% 0.989657s Write: 10% 1.001727s 30% 1.001727s 50% Write: 10% 1.001727s 90% 1.001727s 99% 1.001727s 0.977481s 30% 0.977481s 50% 0.977481s 90% 0.977481s 99% 0.977481s Write: 10% 0.987226s 30% 0.987226s 50% 0.987226s 90% 0.987226s 99% 0.987226s Write: 10% 0.975813s 30% 0.975813s 50% 0.975813s 90% 0.975813s 99% 0.975813s Write: 10% 0.972543s 30% 0.972543s 50% 0.972543s 90% 0.972543s 99% 0.972543s Write: 10% 0.945091s 30% 0.945091s 50% 0.945091s 90% 0.945091s 99% 0.945091s Write: 10% 1.008720s 30% 1.008720s 50% 1.008720s 90% 1.008720s 99% 1.008720s Write: 10% 1.033690s 30% 1.033690s 50% 1.033690s 90% 1.033690s 99% 1.033690s Write: 10% 0.995025s 30% 0.995025s 50% 0.995025s 90% 0.995025s 99% 0.995025s Write: 10% 1.069492s 30% 1.069492s 50% 1.069492s 90% 1.069492s 99% 1.069492s Write: 10% 1.081486sWrite: 10% 30% 1.081486s 50% 1.081486s 90% 1.065300s 30% 1.065300s 50% 1.065300s 90% 1.065300s 99% 1.065300s1.081486s 99% 1.081486s Write: 10% 0.990879s 30% 0.990879s 50% 0.990879s 90% 0.990879s 99% 0.990879s Write: 10% 1.092471s 30% 1.092471s 50% 1.092471s 90% 1.092471s 99% 1.092471s Write: 10% 1.038407s 30% 1.038407s 50% 1.038407s 90% 1.038407s 99% 1.038407s Write: 10% 1.208425s 30% 1.208425s 50% 1.208425s 90% 1.208425s 99% 1.208425s Write: 10% 1.031941s 30% 1.031941s 50% 1.031941s 90% 1.031941s 99% 1.031941s Write: 10% 1.179188s 30% 1.179188s 50% 1.179188s 90% 1.179188s 99% 1.179188s Write: 10% 1.107905s 30% 1.107905s 50% 1.107905s 90% 1.107905s 99% 1.107905s Write: 10% 1.177312s 30% 1.177312s 50% 1.177312s 90% 1.177312s 99% 1.177312s Write: 10% 1.265792s 30% 1.265792s 50% 1.265792s 90% 1.265792s 99% 1.265792s Write: 10% 1.495314s 30% 1.495314s 50% 1.495314s 90% 1.495314s 99% 1.495314s Write: 10% 1.196789s 30% 1.196789s 50% 1.196789s 90% 1.196789s 99% 1.196789s Write: 10% 1.252611s 30% 1.252611s 50% 1.252611s 90% 1.252611s 99% 1.252611s Write: 10% 1.252279s 30% 1.252279s 50% 1.252279s 90% 1.252279s 99% 1.252279s Write: 10% 1.251444s 30% 1.251444s 50% 1.251444s 90% 1.251444s 99% 1.251444s Write: 10% 1.205479s 30% 1.205479s 50% 1.205479s 90% 1.205479s 99% 1.205479s Write: 10% 1.268052s 30% 1.268052s 50% 1.268052s 90% 1.268052s 99% 1.268052s Write: 10% 1.336493s 30% 1.336493s 50% 1.336493s 90% 1.336493s 99% 1.336493s Write: 10% 1.257571s 30% 1.257571s 50% 1.257571s 90% 1.257571s 99% 1.257571s Write: 10% 1.185185s 30% 1.185185s 50% 1.185185s 90% 1.185185s 99% 1.185185s Write: 10% 1.223557s 30% 1.223557s 50% 1.223557s 90% 1.223557s 99% 1.223557s Write: 10% 1.258166s 30% 1.258166s 50% 1.258166s 90% 1.258166s 99% 1.258166s Write: 10% 1.255181s 30% 1.255181s 50% 1.255181s 90% 1.255181s 99% 1.255181s Write: 10% 1.271754s 30% 1.271754s 50% 1.271754s 90% 1.271754s 99% 1.271754s Write: 10% 1.285198s 30% 1.285198s 50% 1.285198s 90% 1.285198s 99% 1.285198s Write: 10% 1.456803s 30% 1.456803s 50% 1.456803s 90% 1.456803s 99% 1.456803s Write: 10% 1.259776s 30% 1.259776s 50% 1.259776s 90% 1.259776s 99% 1.259776s Update: 10% 0.378461s 30% 0.378461s 50% 1.269489s 90% 1.269489s 99% 1.269489s Was written: 87 MiB, Speed: 8 MiB/s Read: 10% 3.308536s 30% 3.308536s 50% 3.308536s 90% 3.308536s 99% 3.308536s |97.5%| [TM] {RESULT} ydb/tests/olap/high_load/unittest >> test_commit.py::TestCommit::test_commit [GOOD] >> test_kv.py::TestYdbKvWorkload::test_minimal_maximal_values[Int32--2147483648-True] |97.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/secondary_index/py3test >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_0__SYNC-pk_types9-all_types9-index9---SYNC] [GOOD] >> TCreateAndDropViewTest::DropViewDisabledFeatureFlag [GOOD] >> TCreateAndDropViewTest::DropNonexistingView >> KeyValueGRPCService::SimpleCopyUnexistedKey [GOOD] >> KeyValueGRPCService::SimpleWriteRead >> TTxDataShardTestInit::TestGetShardStateAfterInitialization >> SdkCredProvider::PingFromProviderSyncDiscovery |97.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/select/py3test >> test_select.py::TestDML::test_select[table_ttl_DyNumber-pk_types13-all_types13-index13-DyNumber--] [GOOD] >> TTxDataShardTestInit::TestGetShardStateAfterInitialization [GOOD] >> TTxDataShardTestInit::TestTableHasPath >> KqpTpch::Query13 [GOOD] >> KqpTpch::Query14 >> Splitter::CritSmallPortions [GOOD] >> Splitter::Crit |97.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/select/py3test >> test_select.py::TestDML::test_select[table_ttl_Datetime-pk_types16-all_types16-index16-Datetime--] [GOOD] >> QuoterWithKesusTest::GetsSeveralQuotas [GOOD] >> QuoterWithKesusTest::KesusRecreation >> SdkCredProvider::PingFromProviderSyncDiscovery [GOOD] >> SdkCredProvider::PingFromProviderAsyncDiscovery >> test.py::test[solomon-BadDownsamplingAggregation-] [GOOD] >> test.py::test[solomon-BadDownsamplingDisabled-] |97.5%| [TS] {asan, default-linux-x86_64, release} ydb/tests/tools/pq_read/test/py3test >> test_commit.py::TestCommit::test_commit [GOOD] |97.5%| [TS] {RESULT} ydb/tests/tools/pq_read/test/py3test >> KqpTpch::Query14 [GOOD] >> KqpTpch::Query15 >> test_select.py::TestDML::test_select[table_all_types-pk_types12-all_types12-index12---] [GOOD] >> SdkCredProvider::PingFromProviderAsyncDiscovery [GOOD] >> KeyValueGRPCService::SimpleWriteRead [GOOD] >> KeyValueGRPCService::SimpleWriteReadWithIncorreectPath >> tablet_scheme_tests.py::TestTabletSchemes::test_tablet_schemes[flat_bs_controller] >> TTxDataShardTestInit::TestTableHasPath [GOOD] >> TTxDataShardTestInit::TestResolvePathAfterRestart >> Splitter::Crit [GOOD] >> Splitter::CritSimple >> TCreateAndDropViewTest::DropNonexistingView [GOOD] >> TCreateAndDropViewTest::CallDropViewOnTable >> test_unknown_data_source.py::TestUnknownDataSource::test_should_fail_unknown_data_source[v2-client0] [GOOD] >> TTxDataShardBuildIndexScan::BadRequest >> TDqSolomonWriteActorTest::TestWriteWithTimeseries [GOOD] >> TDqSolomonWriteActorTest::TestCheckpoints >> MetadataConversion::MakeAuthTest [GOOD] >> MetadataConversion::ConvertingExternalSourceMetadata [GOOD] >> BulkUpsert::BulkUpsert >> test.py::test_wait_for_cluster_ready [GOOD] >> test.py::test_counter >> KqpTpch::Query15 [GOOD] >> KqpTpch::Query16 |97.5%| [TS] {asan, default-linux-x86_64, release} ydb/core/kqp/gateway/ut/gtest >> MetadataConversion::ConvertingExternalSourceMetadata [GOOD] |97.5%| [TS] {RESULT} ydb/core/kqp/gateway/ut/gtest >> TDqSolomonWriteActorTest::TestCheckpoints [GOOD] >> TDqSolomonWriteActorTest::TestShouldReturnAfterCheckpoint ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sdk/cpp/sdk_credprovider/unittest >> SdkCredProvider::PingFromProviderAsyncDiscovery [GOOD] Test command err: 2 2 |97.6%| [TM] {RESULT} ydb/tests/functional/sdk/cpp/sdk_credprovider/unittest >> test.py::test[solomon-BadDownsamplingDisabled-] [GOOD] >> test.py::test[solomon-BadDownsamplingFill-] >> test.py::test_counter [GOOD] >> test.py::test_viewer_nodes >> test.py::test_viewer_nodes [GOOD] >> test.py::test_viewer_nodes_all >> test.py::test_viewer_nodes_all [GOOD] >> test.py::test_viewer_storage_nodes >> test.py::test_viewer_storage_nodes [GOOD] >> test.py::test_viewer_storage_nodes_all >> DataShardCompaction::CompactBorrowed >> test.py::test_viewer_storage_nodes_all [GOOD] >> test.py::test_storage_groups >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_3__SYNC-pk_types6-all_types6-index6---SYNC] [GOOD] >> test.py::test_storage_groups [GOOD] >> test.py::test_viewer_sysinfo [GOOD] >> test.py::test_viewer_vdiskinfo [GOOD] >> test.py::test_viewer_pdiskinfo [GOOD] >> test.py::test_viewer_bsgroupinfo >> TTxDataShardTestInit::TestResolvePathAfterRestart [GOOD] >> test.py::test_viewer_bsgroupinfo [GOOD] >> test.py::test_viewer_tabletinfo >> QuoterWithKesusTest::KesusRecreation [GOOD] >> QuoterWithKesusTest::AllocationStatistics >> Splitter::CritSimple [GOOD] >> test.py::test_viewer_tabletinfo [GOOD] >> test.py::test_viewer_describe >> TSentinelTests::PDiskErrorState [GOOD] >> TSentinelTests::PDiskFaultyState >> test.py::test_viewer_describe [GOOD] >> test.py::test_viewer_cluster ------- [TS] {asan, default-linux-x86_64, release} ydb/core/tx/columnshard/splitter/ut/unittest >> Splitter::CritSimple [GOOD] Test command err: FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=280384;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=280384;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=280384;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=280384;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=280384;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=280384;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=280384;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=280336;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=280384;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=280384;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=280384;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=280384;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=280384;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=280384;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=280384;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=280336;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=2088936;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=2088936;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=5184936;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=5184936;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=50240;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=50240;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=50240;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=50240;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=50240;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=50240;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=50240;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=50200;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=5163264;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=50240;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=50240;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=50240;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=50240;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=50240;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=50240;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=50240;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=50200;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7124168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=132168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7124168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=132168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7124168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=132168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7124168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=132168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7124168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=132168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7124168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=132168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7124168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=132168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7124168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=132168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7124168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=132168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7124168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=132168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7124168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=132168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7124168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=132168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7124168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=132168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7124168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=132168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7124168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=132168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7124168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=132168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7124168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=132168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7124168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=132168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7124168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=132168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=seria ... 82944;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=8905200;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=8905200;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=8905200;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=8905200;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=8905200;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=8905200;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=8905200;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=8947912;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7964832;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=71282912;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=8905200;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=8905200;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=8905200;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=8905200;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=8905200;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=8905200;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=8905200;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=8947912;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=7964800;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7964832;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7964832;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7964832;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7964832;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7964832;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7964832;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7964832;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7914944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=7964800;columns=1; |97.6%| [TS] {RESULT} ydb/core/tx/columnshard/splitter/ut/unittest >> test.py::test_viewer_cluster [GOOD] >> test.py::test_viewer_tenantinfo [GOOD] >> test.py::test_viewer_tenantinfo_db >> test.py::test_viewer_tenantinfo_db [GOOD] >> test.py::test_viewer_healthcheck |97.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/wardens/py3test >> test_liveness_wardens.py::TestLivenessWarden::test_scheme_shard_has_no_in_flight_transactions [GOOD] |97.6%| [TM] {RESULT} ydb/tests/functional/wardens/py3test >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_2__SYNC-pk_types7-all_types7-index7---SYNC] [GOOD] >> test.py::test_viewer_healthcheck [GOOD] >> test.py::test_viewer_acl >> KeyValueGRPCService::SimpleWriteReadWithIncorreectPath [GOOD] >> KeyValueGRPCService::SimpleWriteReadWithoutToken >> test.py::test_viewer_acl [GOOD] >> test.py::test_viewer_acl_write >> test.py::test_viewer_acl_write [GOOD] >> test.py::test_viewer_autocomplete >> test.py::test_viewer_autocomplete [GOOD] >> test.py::test_viewer_check_access ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_init/unittest >> TTxDataShardTestInit::TestResolvePathAfterRestart [GOOD] Test command err: 2025-06-25T15:22:55.902065Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:106:2138], Recipient [1:112:2142]: NKikimr::TEvTablet::TEvBoot 2025-06-25T15:22:55.934050Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:106:2138], Recipient [1:112:2142]: NKikimr::TEvTablet::TEvRestored 2025-06-25T15:22:55.938836Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 9437184 actor [1:112:2142] 2025-06-25T15:22:55.939967Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T15:22:55.993586Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:106:2138], Recipient [1:112:2142]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-25T15:22:56.001979Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T15:22:56.002092Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T15:22:56.005098Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 9437184 2025-06-25T15:22:56.005192Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 9437184 2025-06-25T15:22:56.005240Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 9437184 2025-06-25T15:22:56.007160Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T15:22:56.007273Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T15:22:56.007361Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 9437184 persisting started state actor id [1:132:2142] in generation 2 2025-06-25T15:22:56.055602Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T15:22:56.082627Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 9437184 2025-06-25T15:22:56.084164Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 9437184 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T15:22:56.084349Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 9437184, actorId: [1:138:2160] 2025-06-25T15:22:56.084395Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 9437184 2025-06-25T15:22:56.084426Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 9437184, state: WaitScheme 2025-06-25T15:22:56.084456Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-25T15:22:56.084686Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:112:2142], Recipient [1:112:2142]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T15:22:56.085633Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T15:22:56.086865Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 9437184 2025-06-25T15:22:56.086968Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 9437184 2025-06-25T15:22:56.087020Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-25T15:22:56.087054Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:22:56.087090Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 9437184 2025-06-25T15:22:56.087149Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437184 has no attached operations 2025-06-25T15:22:56.087204Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 9437184 2025-06-25T15:22:56.087229Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 9437184 TxInFly 0 2025-06-25T15:22:56.087262Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-25T15:22:56.090367Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269551617, Sender [1:103:2136], Recipient [1:112:2142]: NKikimrTxDataShard.TEvGetShardState Source { RawX1: 103 RawX2: 4294969432 } 2025-06-25T15:22:56.090428Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3135: StateWork, processing event TEvDataShard::TEvGetShardState 2025-06-25T15:22:59.538509Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:265:2309], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T15:22:59.538873Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T15:22:59.539054Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0006ed/r3tmp/tmprtPQUe/pdisk_1.dat 2025-06-25T15:23:00.157236Z node 2 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 2 Type# 268639257 2025-06-25T15:23:00.177897Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:23:00.283109Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:23:00.297486Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:33:2080] 1750864976440455 != 1750864976440459 2025-06-25T15:23:00.360869Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:23:00.361057Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:23:00.385281Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:23:00.537287Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:23:00.613657Z node 2 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [2:627:2531] 2025-06-25T15:23:00.613955Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T15:23:00.691289Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T15:23:00.691453Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T15:23:00.694455Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-25T15:23:00.694587Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-25T15:23:00.694650Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-25T15:23:00.695058Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T15:23:00.695274Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T15:23:00.695389Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [2:643:2531] in generation 1 2025-06-25T15:23:00.706290Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T15:23:00.706439Z node 2 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-25T15:23:00.706605Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T15:23:00.706723Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [2:645:2541] 2025-06-25T15:23:00.706765Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T15:23:00.706801Z node 2 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-25T15:23:00.706842Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T15:23:00.707292Z node 2 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-25T15:23:00.707416Z node 2 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-25T15:23:00.707571Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T15:23:00.707616Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:23:00.707653Z node 2 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-25T15:23:00.707702Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T15:23:00.708163Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [2:624:2529], serverId# [2:634:2535], sessionId# [0:0:0] 2025-06-25T15:23:00.708653Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T15:23:00.713822Z node 2 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-25T15:23:00.713983Z node 2 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-25T15:23:00.716129Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T15:23:00.727005Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-25T15:23:00.727151Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-25T15:23:00.890866Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [2:661:2551], serverId# [2:662:2552], sessionId# [0:0:0] 2025-06-25T15:23:00.909412Z node 2 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction ... tive planned 0 immediate 0 planned 1 2025-06-25T15:23:05.320977Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-06-25T15:23:05.321184Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-06-25T15:23:05.321293Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-25T15:23:05.322028Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T15:23:05.322090Z node 3 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-06-25T15:23:05.322466Z node 3 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-25T15:23:05.322805Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:23:05.325913Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-06-25T15:23:05.325957Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T15:23:05.326315Z node 3 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-06-25T15:23:05.326370Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T15:23:05.327435Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T15:23:05.327482Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T15:23:05.327519Z node 3 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-25T15:23:05.327578Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [3:373:2367], exec latency: 0 ms, propose latency: 0 ms 2025-06-25T15:23:05.327619Z node 3 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-25T15:23:05.327692Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T15:23:05.330214Z node 3 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T15:23:05.331843Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-25T15:23:05.331919Z node 3 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-25T15:23:05.332247Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-25T15:23:05.369092Z node 3 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186224037888 2025-06-25T15:23:05.370341Z node 3 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186224037888 2025-06-25T15:23:05.455998Z node 3 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [3:699:2579] 2025-06-25T15:23:05.456239Z node 3 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T15:23:05.459978Z node 3 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T15:23:05.460433Z node 3 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T15:23:05.462404Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-25T15:23:05.462488Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-25T15:23:05.462545Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-25T15:23:05.462889Z node 3 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T15:23:05.463583Z node 3 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T15:23:05.463670Z node 3 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [3:714:2579] in generation 2 2025-06-25T15:23:05.485220Z node 3 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T15:23:05.485357Z node 3 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state Ready tabletId 72075186224037888 2025-06-25T15:23:05.485444Z node 3 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-25T15:23:05.485591Z node 3 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-25T15:23:05.485672Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4180: Resolve path at 72075186224037888: reason# empty path 2025-06-25T15:23:05.485799Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [3:718:2589] 2025-06-25T15:23:05.485833Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T15:23:05.485878Z node 3 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-25T15:23:05.485909Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T15:23:05.486171Z node 3 :TX_DATASHARD DEBUG: datashard__init.cpp:711: TxInitSchemaDefaults.Execute 2025-06-25T15:23:05.486351Z node 3 :TX_DATASHARD DEBUG: datashard__init.cpp:723: TxInitSchemaDefaults.Complete 2025-06-25T15:23:05.487149Z node 3 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-25T15:23:05.487240Z node 3 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-25T15:23:05.487527Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 1000 2025-06-25T15:23:05.487571Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T15:23:05.487780Z node 3 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T15:23:05.489275Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:5607: Got TEvDataShard::TEvSchemaChanged for unknown txId 281474976715657 message# Source { RawX1: 699 RawX2: 12884904467 } Origin: 72075186224037888 State: 2 TxId: 281474976715657 Step: 0 Generation: 2 2025-06-25T15:23:05.490093Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T15:23:05.490165Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:23:05.490213Z node 3 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-25T15:23:05.490281Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T15:23:05.490844Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-25T15:23:05.490903Z node 3 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-25T15:23:05.542297Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4241: Got scheme resolve result at 72075186224037888: Status: StatusSuccess Path: "/Root/table-1" PathDescription { Self { Name: "table-1" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715657 CreateStep: 1000 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "table-1" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 1 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046644480 2025-06-25T15:23:05.542434Z node 3 :TX_DATASHARD DEBUG: datashard__store_table_path.cpp:20: TTxStoreTablePath::Execute at 72075186224037888 2025-06-25T15:23:05.544007Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-25T15:23:05.544940Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [3:722:2593], serverId# [3:724:2594], sessionId# [0:0:0] 2025-06-25T15:23:05.558547Z node 3 :TX_DATASHARD DEBUG: datashard__store_table_path.cpp:39: TTxStoreTablePath::Complete at 72075186224037888 >> test.py::test_viewer_check_access [GOOD] >> test.py::test_viewer_query |97.6%| [TM] {RESULT} ydb/core/tx/datashard/ut_init/unittest >> test.py::test_viewer_query [GOOD] >> test.py::test_viewer_query_from_table >> test.py::test_viewer_query_from_table [GOOD] >> test.py::test_viewer_query_from_table_different_schemas >> test_kv.py::TestYdbKvWorkload::test_minimal_maximal_values[Int32--2147483648-True] [GOOD] >> test_kv.py::TestYdbKvWorkload::test_minimal_maximal_values[Int32--2147483648-False] >> KqpTpch::Query16 [GOOD] >> KqpTpch::Query17 >> test_kv.py::TestYdbKvWorkload::test_minimal_maximal_values[Int32--2147483648-False] [GOOD] >> test_kv.py::TestYdbKvWorkload::test_minimal_maximal_values[Int32-2147483647-True] >> TCreateAndDropViewTest::CallDropViewOnTable [GOOD] >> TCreateAndDropViewTest::DropSameViewTwice >> test.py::test_viewer_query_from_table_different_schemas [GOOD] >> test.py::test_viewer_query_issue_13757 >> TTxDataShardBuildIndexScan::BadRequest [GOOD] >> TTxDataShardBuildIndexScan::RunScan >> tablet_scheme_tests.py::TestTabletSchemes::test_tablet_schemes[flat_bs_controller] [GOOD] >> tablet_scheme_tests.py::TestTabletSchemes::test_tablet_schemes[flat_datashard] >> test.py::test_viewer_query_issue_13757 [GOOD] >> test.py::test_viewer_query_issue_13945 >> test.py::test_viewer_query_issue_13945 [GOOD] >> test.py::test_pqrb_tablet >> test.py::test[solomon-BadDownsamplingFill-] [GOOD] >> test.py::test[solomon-BadDownsamplingInterval-] >> test.py::TestSqsSplitMergeStdTables::test_std_merge_split [GOOD] >> test.py::test_pqrb_tablet [GOOD] >> test.py::test_viewer_nodes_issue_14992 [GOOD] >> test.py::test_operations_list [GOOD] >> test.py::test_operations_list_page >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_4_UNIQUE_SYNC-pk_types0-all_types0-index0--UNIQUE-SYNC] [GOOD] >> test.py::test_operations_list_page [GOOD] >> test.py::test_operations_list_page_bad [GOOD] >> test.py::test_scheme_directory >> test.py::test_scheme_directory [GOOD] >> test.py::test_topic_data >> HttpRouter::Basic [GOOD] |97.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/select/py3test >> test_select.py::TestDML::test_select[table_all_types-pk_types12-all_types12-index12---] [GOOD] >> TSentinelTests::PDiskFaultyState [GOOD] >> TSentinelTests::PDiskRackGuardHalfRack >> TIndexProcesorTests::TestOver1000Queues [GOOD] >> tablet_scheme_tests.py::TestTabletSchemes::test_tablet_schemes[flat_datashard] [GOOD] >> tablet_scheme_tests.py::TestTabletSchemes::test_tablet_schemes[flat_hive] >> test_kv.py::TestYdbKvWorkload::test_minimal_maximal_values[Int32-2147483647-True] [GOOD] >> test_kv.py::TestYdbKvWorkload::test_minimal_maximal_values[Int32-2147483647-False] |97.6%| [TS] {asan, default-linux-x86_64, release} ydb/core/public_http/ut/unittest >> HttpRouter::Basic [GOOD] |97.6%| [TS] {RESULT} ydb/core/public_http/ut/unittest >> tablet_scheme_tests.py::TestTabletSchemes::test_tablet_schemes[flat_hive] [GOOD] >> tablet_scheme_tests.py::TestTabletSchemes::test_tablet_schemes[flat_schemeshard] >> KeyValueGRPCService::SimpleWriteReadWithoutToken [GOOD] >> KeyValueGRPCService::SimpleWriteReadWithoutLockGeneration1 >> test_kv.py::TestYdbKvWorkload::test_minimal_maximal_values[Int32-2147483647-False] [GOOD] >> test_kv.py::TestYdbKvWorkload::test_minimal_maximal_values[UInt32-0-True] >> tablet_scheme_tests.py::TestTabletSchemes::test_tablet_schemes[flat_schemeshard] [GOOD] >> tablet_scheme_tests.py::TestTabletSchemes::test_tablet_schemes[flat_tx_coordinator] >> DataShardReplication::SimpleApplyChanges >> tablet_scheme_tests.py::TestTabletSchemes::test_tablet_schemes[flat_tx_coordinator] [GOOD] >> tablet_scheme_tests.py::TestTabletSchemes::test_tablet_schemes[tx_allocator] [GOOD] >> tablet_scheme_tests.py::TestTabletSchemes::test_tablet_schemes[keyvalueflat] >> TMemoryController::Counters >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_index_2__SYNC-pk_types2-all_types2-index2---SYNC] [GOOD] >> KqpTpch::Query17 [GOOD] >> KqpTpch::Query18 >> TDqSolomonWriteActorTest::TestShouldReturnAfterCheckpoint [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/ymq/actor/yc_search_ut/unittest >> TIndexProcesorTests::TestOver1000Queues [GOOD] Test command err: 2025-06-25T15:22:22.011730Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519907665390818294:2149];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:22:22.015857Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0009a1/r3tmp/tmpg3NWOd/pdisk_1.dat 2025-06-25T15:22:22.550593Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:22:22.574927Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:22:22.575029Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:22:22.576684Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 63683, node 1 2025-06-25T15:22:22.735317Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:22:22.735350Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:22:22.735375Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:22:22.735524Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T15:22:23.012123Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:6030 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:22:23.208895Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-25T15:22:23.243066Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 TClient is connected to server localhost:6030 waiting... 2025-06-25T15:22:23.608704Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710659, at schemeshard: 72057594046644480 2025-06-25T15:22:25.699202Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:22:25.700891Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) TClient is connected to server localhost:6030 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1750864943268 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 4 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "SQS" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 281474976710658 CreateStep: 1750864943282 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" ChildrenExist: true } Children { Name: ".sys" PathId: 184467... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:22:26.181973Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710662:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:22:26.187138Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710662, at schemeshard: 72057594046644480 Error 1: Check failed: path: '/Root/SQS', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 2], type: EPathTypeDir, state: EPathStateNoChanges) 2025-06-25T15:22:26.190775Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519907686865655540:2469] txid# 281474976710663, issues: { message: "Check failed: path: \'/Root/SQS\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 2], type: EPathTypeDir, state: EPathStateNoChanges)" severity: 1 } TClient is connected to server localhost:6030 waiting... 2025-06-25T15:22:26.426810Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710664, at schemeshard: 72057594046644480 2025-06-25T15:22:26.454883Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:22:26.465875Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) ===Execute query: UPSERT INTO `/Root/SQS/SingleCreateQueueEvent/.Events` (Account, QueueName, EventType, CustomQueueName, EventTimestamp, FolderId, Labels) VALUES ("cloud1", "queue1", 1, "myQueueCustomName", 1750864946214, "myFolder", "{\"k1\": \"v1\"}"); 2025-06-25T15:22:26.687024Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519907686865655710:2324], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:22:26.687189Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:22:26.687801Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519907686865655722:2327], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:22:26.700438Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710667:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:22:26.729134Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519907686865655724:2328], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710667 completed, doublechecking } 2025-06-25T15:22:26.805157Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519907686865655775:2620] txid# 281474976710668, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:22:26.995948Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519907665390818294:2149];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:22:26.996048Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T15:22:28.164454Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710669. Ctx: { TraceId: 01jyktxsff6wdjeaqnfskh05qh, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTJiZTU1YzUtMzlkOWQ1MjAtMTEyMTZmY2QtZTU1M2NmNzI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:22:29.004794Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710670. Ctx: { TraceId: 01jyktxv3t2mk5vxdn6ntfs8bb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MTViMWIxN2EtMTk0OTQzMjUtYzYyNWJmN2EtMzQ3NDk5NDQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:22:29.280096Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710671. Ctx: { TraceId: 01jyktxvtq8san6kfnaw6dnxzy, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YzY3OTg2OWYtMTViZDg2OTgtYjFhODIxZTQtZjA1ZDdmODA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:22:29.433842Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474 ... p:120: TxId: 281474976710780. Ctx: { TraceId: 01jyktyg048xeftdn51frs3a2e, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTJiZTU1YzUtMzlkOWQ1MjAtMTEyMTZmY2QtZTU1M2NmNzI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:22:51.199983Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710781. Ctx: { TraceId: 01jyktyhde4rkvfyt6z7xjb3da, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTJiZTU1YzUtMzlkOWQ1MjAtMTEyMTZmY2QtZTU1M2NmNzI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:22:51.316785Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710782. Ctx: { TraceId: 01jyktyhde4rkvfyt6z7xjb3da, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTJiZTU1YzUtMzlkOWQ1MjAtMTEyMTZmY2QtZTU1M2NmNzI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:22:52.658414Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710783. Ctx: { TraceId: 01jyktyjv164z5swxgsjmhaygx, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTJiZTU1YzUtMzlkOWQ1MjAtMTEyMTZmY2QtZTU1M2NmNzI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:22:52.864263Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710784. Ctx: { TraceId: 01jyktyjv164z5swxgsjmhaygx, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTJiZTU1YzUtMzlkOWQ1MjAtMTEyMTZmY2QtZTU1M2NmNzI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:22:54.135765Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710785. Ctx: { TraceId: 01jyktym9de5es3az9s2xedcq1, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTJiZTU1YzUtMzlkOWQ1MjAtMTEyMTZmY2QtZTU1M2NmNzI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:22:54.214326Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710786. Ctx: { TraceId: 01jyktym9de5es3az9s2xedcq1, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTJiZTU1YzUtMzlkOWQ1MjAtMTEyMTZmY2QtZTU1M2NmNzI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:22:55.486439Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710787. Ctx: { TraceId: 01jyktynke9d405r4wpgjeq9mv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTJiZTU1YzUtMzlkOWQ1MjAtMTEyMTZmY2QtZTU1M2NmNzI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:22:55.585409Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710788. Ctx: { TraceId: 01jyktynke9d405r4wpgjeq9mv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTJiZTU1YzUtMzlkOWQ1MjAtMTEyMTZmY2QtZTU1M2NmNzI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:22:56.848210Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710789. Ctx: { TraceId: 01jyktypy5405nk3nes3253mxs, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTJiZTU1YzUtMzlkOWQ1MjAtMTEyMTZmY2QtZTU1M2NmNzI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:22:56.965739Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710790. Ctx: { TraceId: 01jyktypy5405nk3nes3253mxs, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTJiZTU1YzUtMzlkOWQ1MjAtMTEyMTZmY2QtZTU1M2NmNzI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:22:58.340666Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710791. Ctx: { TraceId: 01jyktyrcp43xtx6qrafvyc18v, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTJiZTU1YzUtMzlkOWQ1MjAtMTEyMTZmY2QtZTU1M2NmNzI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:22:58.483960Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710792. Ctx: { TraceId: 01jyktyrcp43xtx6qrafvyc18v, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTJiZTU1YzUtMzlkOWQ1MjAtMTEyMTZmY2QtZTU1M2NmNzI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:22:59.928261Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710793. Ctx: { TraceId: 01jyktysydarejbwmgpxxqrsns, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTJiZTU1YzUtMzlkOWQ1MjAtMTEyMTZmY2QtZTU1M2NmNzI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:23:00.024195Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710794. Ctx: { TraceId: 01jyktysydarejbwmgpxxqrsns, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTJiZTU1YzUtMzlkOWQ1MjAtMTEyMTZmY2QtZTU1M2NmNzI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:23:01.390392Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710795. Ctx: { TraceId: 01jyktyvbzcm0r10p4fhzdr4f1, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTJiZTU1YzUtMzlkOWQ1MjAtMTEyMTZmY2QtZTU1M2NmNzI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:23:01.582257Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710796. Ctx: { TraceId: 01jyktyvbzcm0r10p4fhzdr4f1, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTJiZTU1YzUtMzlkOWQ1MjAtMTEyMTZmY2QtZTU1M2NmNzI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:23:02.779973Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710797. Ctx: { TraceId: 01jyktywqde5fgndkca0mffqqq, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTJiZTU1YzUtMzlkOWQ1MjAtMTEyMTZmY2QtZTU1M2NmNzI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:23:02.891378Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710798. Ctx: { TraceId: 01jyktywqde5fgndkca0mffqqq, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTJiZTU1YzUtMzlkOWQ1MjAtMTEyMTZmY2QtZTU1M2NmNzI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:23:04.235068Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710799. Ctx: { TraceId: 01jyktyy4t1td41v24jf5xx6av, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTJiZTU1YzUtMzlkOWQ1MjAtMTEyMTZmY2QtZTU1M2NmNzI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:23:04.376931Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710800. Ctx: { TraceId: 01jyktyy4t1td41v24jf5xx6av, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTJiZTU1YzUtMzlkOWQ1MjAtMTEyMTZmY2QtZTU1M2NmNzI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:23:05.890014Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710801. Ctx: { TraceId: 01jyktyzraehc20hmqh3x7zp5n, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTJiZTU1YzUtMzlkOWQ1MjAtMTEyMTZmY2QtZTU1M2NmNzI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:23:06.016058Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710802. Ctx: { TraceId: 01jyktyzraehc20hmqh3x7zp5n, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTJiZTU1YzUtMzlkOWQ1MjAtMTEyMTZmY2QtZTU1M2NmNzI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:23:07.507521Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710803. Ctx: { TraceId: 01jyktz1ah9sdb32kq9b1ewdka, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTJiZTU1YzUtMzlkOWQ1MjAtMTEyMTZmY2QtZTU1M2NmNzI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:23:07.660701Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710804. Ctx: { TraceId: 01jyktz1ah9sdb32kq9b1ewdka, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTJiZTU1YzUtMzlkOWQ1MjAtMTEyMTZmY2QtZTU1M2NmNzI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:23:09.195163Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710805. Ctx: { TraceId: 01jyktz2zt9wv0rhr74m1kdsd7, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTJiZTU1YzUtMzlkOWQ1MjAtMTEyMTZmY2QtZTU1M2NmNzI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:23:09.322340Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710806. Ctx: { TraceId: 01jyktz2zt9wv0rhr74m1kdsd7, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTJiZTU1YzUtMzlkOWQ1MjAtMTEyMTZmY2QtZTU1M2NmNzI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:23:10.211936Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710807. Ctx: { TraceId: 01jyktz3zq29qvda382pr09f3g, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTJiZTU1YzUtMzlkOWQ1MjAtMTEyMTZmY2QtZTU1M2NmNzI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:23:10.845769Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710808. Ctx: { TraceId: 01jyktz437a3pvvw9n17se8qdz, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTJiZTU1YzUtMzlkOWQ1MjAtMTEyMTZmY2QtZTU1M2NmNzI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:23:12.131619Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710809. Ctx: { TraceId: 01jyktz5ky3h0j1aytwz9ngrh1, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZWY2NjA1ZjMtZjQ4OTVmZTQtYjhlNDM1MzktNjgzMjE5YjY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:23:12.194285Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710810. Ctx: { TraceId: 01jyktz5xw63s0vnd3ftawqaw7, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZWY2NjA1ZjMtZjQ4OTVmZTQtYjhlNDM1MzktNjgzMjE5YjY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:23:12.275804Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710811. Ctx: { TraceId: 01jyktz60j3hsb6h0w5rqf8188, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZWY2NjA1ZjMtZjQ4OTVmZTQtYjhlNDM1MzktNjgzMjE5YjY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:23:12.286277Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710812. Ctx: { TraceId: 01jyktz60w4yv1xx95mgbgpzj3, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZWY2NjA1ZjMtZjQ4OTVmZTQtYjhlNDM1MzktNjgzMjE5YjY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:23:12.488434Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710813. Ctx: { TraceId: 01jyktz6157pzqfz3x3vken4v5, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NmEzZDQzMTEtZGZmMDAwMTItOTQ3NDE1ZDAtODMxYmU0ZmE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root |97.6%| [TM] {RESULT} ydb/core/ymq/actor/yc_search_ut/unittest >> QuoterWithKesusTest::AllocationStatistics [GOOD] >> QuoterWithKesusTest::UpdatesCountersForParentResources >> tablet_scheme_tests.py::TestTabletSchemes::test_tablet_schemes[keyvalueflat] [GOOD] >> tablet_scheme_tests.py::TestTabletSchemes::test_tablet_schemes[tx_mediator] [GOOD] >> DataShardStats::HistogramStatsCorrect [GOOD] >> DataShardStats::BlobsStatsCorrect >> tablet_scheme_tests.py::TestTabletSchemes::test_tablet_schemes[persqueue] ------- [TS] {asan, default-linux-x86_64, release} ydb/library/yql/providers/solomon/actors/ut/unittest >> TDqSolomonWriteActorTest::TestShouldReturnAfterCheckpoint [GOOD] Test command err: 2025-06-25T15:22:31.422043Z node 1 :KQP_COMPUTE DEBUG: dq_solomon_write_actor.cpp:117: TxId: TxId-42, Solomon sink. Init 2025-06-25T15:22:31.424628Z node 1 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:135: TxId: TxId-42, Solomon sink. Got 1 items to send. Checkpoint: 0. Send queue: 0. Inflight: 0. Checkpoint in progress: 0 2025-06-25T15:22:31.428262Z node 1 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:292: TxId: TxId-42, Solomon sink. Push 121 bytes of data to buffer 2025-06-25T15:22:31.430071Z node 1 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:363: TxId: TxId-42, Solomon sink. Sent 1 metrics with size of 121 bytes to solomon 2025-06-25T15:22:31.430095Z node 1 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:345: TxId: TxId-42, Solomon sink. Skip sending to solomon. Reason: Empty buffer 2025-06-25T15:22:31.443595Z node 1 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:386: TxId: TxId-42, Solomon sink. Solomon response[0]: HTTP/1.1 200 OK Content-Type: application/json; charset=utf-8 Content-Length: 26 Date: Wed, 25 Jun 2025 15:22:31 GMT Server: Python/3.12 aiohttp/3.9.5 {"writtenMetricsCount": 1} 2025-06-25T15:22:31.452520Z node 1 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:345: TxId: TxId-42, Solomon sink. Skip sending to solomon. Reason: Empty buffer 2025-06-25T15:22:41.741462Z node 2 :KQP_COMPUTE DEBUG: dq_solomon_write_actor.cpp:117: TxId: TxId-42, Solomon sink. Init 2025-06-25T15:22:41.770725Z node 2 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:135: TxId: TxId-42, Solomon sink. Got 7500 items to send. Checkpoint: 0. Send queue: 0. Inflight: 0. Checkpoint in progress: 0 2025-06-25T15:22:41.787533Z node 2 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:292: TxId: TxId-42, Solomon sink. Push 107903 bytes of data to buffer 2025-06-25T15:22:41.801935Z node 2 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:292: TxId: TxId-42, Solomon sink. Push 109013 bytes of data to buffer 2025-06-25T15:22:41.816053Z node 2 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:292: TxId: TxId-42, Solomon sink. Push 109013 bytes of data to buffer 2025-06-25T15:22:41.830225Z node 2 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:292: TxId: TxId-42, Solomon sink. Push 109013 bytes of data to buffer 2025-06-25T15:22:41.845138Z node 2 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:292: TxId: TxId-42, Solomon sink. Push 109013 bytes of data to buffer 2025-06-25T15:22:41.861083Z node 2 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:292: TxId: TxId-42, Solomon sink. Push 109013 bytes of data to buffer 2025-06-25T15:22:41.876053Z node 2 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:292: TxId: TxId-42, Solomon sink. Push 109013 bytes of data to buffer 2025-06-25T15:22:41.884519Z node 2 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:292: TxId: TxId-42, Solomon sink. Push 54513 bytes of data to buffer 2025-06-25T15:22:41.884837Z node 2 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:363: TxId: TxId-42, Solomon sink. Sent 1000 metrics with size of 107903 bytes to solomon 2025-06-25T15:22:41.885075Z node 2 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:363: TxId: TxId-42, Solomon sink. Sent 1000 metrics with size of 109013 bytes to solomon 2025-06-25T15:22:41.885318Z node 2 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:363: TxId: TxId-42, Solomon sink. Sent 1000 metrics with size of 109013 bytes to solomon 2025-06-25T15:22:41.885337Z node 2 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:345: TxId: TxId-42, Solomon sink. Skip sending to solomon. Reason: MaxRequestsInflight 2025-06-25T15:22:42.057710Z node 2 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:386: TxId: TxId-42, Solomon sink. Solomon response[0]: HTTP/1.1 200 OK Content-Type: application/json; charset=utf-8 Content-Length: 29 Date: Wed, 25 Jun 2025 15:22:42 GMT Server: Python/3.12 aiohttp/3.9.5 {"writtenMetricsCount": 1000} 2025-06-25T15:22:42.058094Z node 2 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:363: TxId: TxId-42, Solomon sink. Sent 1000 metrics with size of 109013 bytes to solomon 2025-06-25T15:22:42.058121Z node 2 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:345: TxId: TxId-42, Solomon sink. Skip sending to solomon. Reason: MaxRequestsInflight 2025-06-25T15:22:42.118151Z node 2 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:386: TxId: TxId-42, Solomon sink. Solomon response[2]: HTTP/1.1 200 OK Content-Type: application/json; charset=utf-8 Content-Length: 29 Date: Wed, 25 Jun 2025 15:22:42 GMT Server: Python/3.12 aiohttp/3.9.5 {"writtenMetricsCount": 1000} 2025-06-25T15:22:42.118484Z node 2 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:363: TxId: TxId-42, Solomon sink. Sent 1000 metrics with size of 109013 bytes to solomon 2025-06-25T15:22:42.118501Z node 2 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:345: TxId: TxId-42, Solomon sink. Skip sending to solomon. Reason: MaxRequestsInflight 2025-06-25T15:22:42.173035Z node 2 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:386: TxId: TxId-42, Solomon sink. Solomon response[1]: HTTP/1.1 200 OK Content-Type: application/json; charset=utf-8 Content-Length: 29 Date: Wed, 25 Jun 2025 15:22:42 GMT Server: Python/3.12 aiohttp/3.9.5 {"writtenMetricsCount": 1000} 2025-06-25T15:22:42.173447Z node 2 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:363: TxId: TxId-42, Solomon sink. Sent 1000 metrics with size of 109013 bytes to solomon 2025-06-25T15:22:42.173466Z node 2 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:345: TxId: TxId-42, Solomon sink. Skip sending to solomon. Reason: MaxRequestsInflight 2025-06-25T15:22:42.276848Z node 2 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:386: TxId: TxId-42, Solomon sink. Solomon response[4]: HTTP/1.1 200 OK Content-Type: application/json; charset=utf-8 Content-Length: 29 Date: Wed, 25 Jun 2025 15:22:42 GMT Server: Python/3.12 aiohttp/3.9.5 {"writtenMetricsCount": 1000} 2025-06-25T15:22:42.277172Z node 2 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:363: TxId: TxId-42, Solomon sink. Sent 1000 metrics with size of 109013 bytes to solomon 2025-06-25T15:22:42.277195Z node 2 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:345: TxId: TxId-42, Solomon sink. Skip sending to solomon. Reason: MaxRequestsInflight 2025-06-25T15:22:42.337565Z node 2 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:386: TxId: TxId-42, Solomon sink. Solomon response[3]: HTTP/1.1 200 OK Content-Type: application/json; charset=utf-8 Content-Length: 29 Date: Wed, 25 Jun 2025 15:22:42 GMT Server: Python/3.12 aiohttp/3.9.5 {"writtenMetricsCount": 1000} 2025-06-25T15:22:42.337789Z node 2 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:363: TxId: TxId-42, Solomon sink. Sent 500 metrics with size of 54513 bytes to solomon 2025-06-25T15:22:42.337804Z node 2 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:345: TxId: TxId-42, Solomon sink. Skip sending to solomon. Reason: Empty buffer MaxRequestsInflight 2025-06-25T15:22:42.393510Z node 2 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:386: TxId: TxId-42, Solomon sink. Solomon response[5]: HTTP/1.1 200 OK Content-Type: application/json; charset=utf-8 Content-Length: 29 Date: Wed, 25 Jun 2025 15:22:42 GMT Server: Python/3.12 aiohttp/3.9.5 {"writtenMetricsCount": 1000} 2025-06-25T15:22:42.393604Z node 2 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:345: TxId: TxId-42, Solomon sink. Skip sending to solomon. Reason: Empty buffer 2025-06-25T15:22:42.426986Z node 2 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:386: TxId: TxId-42, Solomon sink. Solomon response[7]: HTTP/1.1 200 OK Content-Type: application/json; charset=utf-8 Content-Length: 28 Date: Wed, 25 Jun 2025 15:22:42 GMT Server: Python/3.12 aiohttp/3.9.5 {"writtenMetricsCount": 500} 2025-06-25T15:22:42.427080Z node 2 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:345: TxId: TxId-42, Solomon sink. Skip sending to solomon. Reason: Empty buffer 2025-06-25T15:22:42.567723Z node 2 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:386: TxId: TxId-42, Solomon sink. Solomon response[6]: HTTP/1.1 200 OK Content-Type: application/json; charset=utf-8 Content-Length: 29 Date: Wed, 25 Jun 2025 15:22:42 GMT Server: Python/3.12 aiohttp/3.9.5 {"writtenMetricsCount": 1000} 2025-06-25T15:22:42.567858Z node 2 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:345: TxId: TxId-42, Solomon sink. Skip sending to solomon. Reason: Empty buffer 2025-06-25T15:22:53.106298Z node 3 :KQP_COMPUTE DEBUG: dq_solomon_write_actor.cpp:117: TxId: TxId-42, Solomon sink. Init 2025-06-25T15:22:53.107173Z node 3 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:135: TxId: TxId-42, Solomon sink. Got 10 items to send. Checkpoint: 0. Send queue: 0. Inflight: 0. Checkpoint in progress: 0 2025-06-25T15:22:53.107767Z node 3 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:292: TxId: TxId-42, Solomon sink. Push 579 bytes of data to buffer 2025-06-25T15:22:53.107863Z node 3 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:363: TxId: TxId-42, Solomon sink. Sent 10 metrics with size of 579 bytes to solomon 2025-06-25T15:22:53.107877Z node 3 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:345: TxId: TxId-42, Solomon sink. Skip sending to solomon. Reason: Empty buffer 2025-06-25T15:22:53.136100Z node 3 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:386: TxId: TxId-42, Solomon sink. Solomon response[0]: HTTP/1.1 200 OK Content-Type: application/json; charset=utf-8 Content-Length: 27 Date: Wed, 25 Jun 2025 15:22:53 GMT Server: Python/3.12 aiohttp/3.9.5 {"writtenMetricsCount": 10} 2025-06-25T15:22:53.136249Z node 3 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:345: TxId: TxId-42, Solomon sink. Skip sending to solomon. Reason: Empty buffer 2025-06-25T15:23:03.455919Z node 4 :KQP_COMPUTE DEBUG: dq_solomon_write_actor.cpp:117: TxId: TxId-42, Solomon sink. Init 2025-06-25T15:23:03.458471Z node 4 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:135: TxId: TxId-42, Solomon sink. Got 2400 items to send. Checkpoint: 1. Send queue: 0. Inflight: 0. Checkpoint in progress: 0 2025-06-25T15:23:03.503955Z node 4 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:292: TxId: TxId-42, Solomon sink. Push 107903 bytes of data to buffer 2025-06-25T15:23:03.519438Z node 4 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:292: TxId: TxId-42, Solomon sink. Push 109013 bytes of data to buffer 2025-06-25T15:23:03.525406Z node 4 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:292: TxId: TxId-42, Solomon sink. Push 43613 bytes of data to buffer 2025-06-25T15:23:03.525728Z node 4 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:363: TxId: TxId-42, Solomon sink. Sent 1000 metrics with size of 107903 bytes to solomon 2025-06-25T15:23:03.525980Z node 4 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:363: TxId: TxId-42, Solomon sink. Sent 1000 metrics with size of 109013 bytes to solomon 2025-06-25T15:23:03.526063Z node 4 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:363: TxId: TxId-42, Solomon sink. Sent 400 metrics with size of 43613 bytes to solomon 2025-06-25T15:23:03.526075Z node 4 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:345: TxId: TxId-42, Solomon sink. Skip sending to solomon. Reason: MaxRequestsInflight 2025-06-25T15:23:03.562100Z node 4 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:386: TxId: TxId-42, Solomon sink. Solomon response[2]: HTTP/1.1 200 OK Content-Type: application/json; charset=utf-8 Content-Length: 28 Date: Wed, 25 Jun 2025 15:23:03 GMT Server: Python/3.12 aiohttp/3.9.5 {"writtenMetricsCount": 400} 2025-06-25T15:23:03.562232Z node 4 :KQP_COMPUTE DEBUG: dq_solomon_write_actor.cpp:373: TxId: TxId-42, Solomon sink. Process checkpoint. Inflight before checkpoint: 2 2025-06-25T15:23:03.713452Z node 4 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:386: TxId: TxId-42, Solomon sink. Solomon response[1]: HTTP/1.1 200 OK Content-Type: application/json; charset=utf-8 Content-Length: 29 Date: Wed, 25 Jun 2025 15:23:03 GMT Server: Python/3.12 aiohttp/3.9.5 {"writtenMetricsCount": 1000} 2025-06-25T15:23:03.713583Z node 4 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:345: TxId: TxId-42, Solomon sink. Skip sending to solomon. Reason: CheckpointInProgress Empty buffer 2025-06-25T15:23:03.768859Z node 4 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:386: TxId: TxId-42, Solomon sink. Solomon response[0]: HTTP/1.1 200 OK Content-Type: application/json; charset=utf-8 Content-Length: 29 Date: Wed, 25 Jun 2025 15:23:03 GMT Server: Python/3.12 aiohttp/3.9.5 {"writtenMetricsCount": 1000} 2025-06-25T15:23:03.768999Z node 4 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:345: TxId: TxId-42, Solomon sink. Skip sending to solomon. Reason: Empty buffer 2025-06-25T15:23:04.272876Z node 5 :KQP_COMPUTE DEBUG: dq_solomon_write_actor.cpp:117: TxId: TxId-42, Solomon sink. Init 2025-06-25T15:23:04.273310Z node 5 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:135: TxId: TxId-42, Solomon sink. Got 1 items to send. Checkpoint: 1. Send queue: 0. Inflight: 0. Checkpoint in progress: 0 2025-06-25T15:23:04.273441Z node 5 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:292: TxId: TxId-42, Solomon sink. Push 121 bytes of data to buffer 2025-06-25T15:23:04.273580Z node 5 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:363: TxId: TxId-42, Solomon sink. Sent 1 metrics with size of 121 bytes to solomon 2025-06-25T15:23:04.273599Z node 5 :KQP_COMPUTE DEBUG: dq_solomon_write_actor.cpp:373: TxId: TxId-42, Solomon sink. Process checkpoint. Inflight before checkpoint: 1 2025-06-25T15:23:04.277831Z node 5 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:386: TxId: TxId-42, Solomon sink. Solomon response[0]: HTTP/1.1 200 OK Content-Type: application/json; charset=utf-8 Content-Length: 26 Date: Wed, 25 Jun 2025 15:23:04 GMT Server: Python/3.12 aiohttp/3.9.5 {"writtenMetricsCount": 1} 2025-06-25T15:23:04.277914Z node 5 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:345: TxId: TxId-42, Solomon sink. Skip sending to solomon. Reason: Empty buffer 2025-06-25T15:23:04.278124Z node 5 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:135: TxId: TxId-42, Solomon sink. Got 1 items to send. Checkpoint: 0. Send queue: 0. Inflight: 0. Checkpoint in progress: 0 2025-06-25T15:23:04.278205Z node 5 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:292: TxId: TxId-42, Solomon sink. Push 121 bytes of data to buffer 2025-06-25T15:23:04.278285Z node 5 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:363: TxId: TxId-42, Solomon sink. Sent 1 metrics with size of 121 bytes to solomon 2025-06-25T15:23:04.278307Z node 5 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:345: TxId: TxId-42, Solomon sink. Skip sending to solomon. Reason: Empty buffer 2025-06-25T15:23:04.281700Z node 5 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:386: TxId: TxId-42, Solomon sink. Solomon response[1]: HTTP/1.1 200 OK Content-Type: application/json; charset=utf-8 Content-Length: 26 Date: Wed, 25 Jun 2025 15:23:04 GMT Server: Python/3.12 aiohttp/3.9.5 {"writtenMetricsCount": 1} 2025-06-25T15:23:04.281827Z node 5 :KQP_COMPUTE TRACE: dq_solomon_write_actor.cpp:345: TxId: TxId-42, Solomon sink. Skip sending to solomon. Reason: Empty buffer |97.6%| [TS] {RESULT} ydb/library/yql/providers/solomon/actors/ut/unittest >> test_kv.py::TestYdbKvWorkload::test_minimal_maximal_values[UInt32-0-True] [GOOD] >> test_kv.py::TestYdbKvWorkload::test_minimal_maximal_values[UInt32-0-False] >> test_http_api.py::TestHttpApi::test_simple_analytics_query >> TCreateAndDropViewTest::DropSameViewTwice [GOOD] >> TCreateAndDropViewTest::DropViewIfExists >> test_unknown_data_source.py::TestUnknownDataSource::test_should_fail_unknown_data_source[v1-client0] >> test.py::test_topic_data [GOOD] >> test.py::test_transfer_describe >> test_kv.py::TestYdbKvWorkload::test_minimal_maximal_values[UInt32-0-False] [GOOD] >> test_kv.py::TestYdbKvWorkload::test_minimal_maximal_values[UInt32-4294967295-True] >> test.py::test[solomon-BadDownsamplingInterval-] [GOOD] >> test.py::test[solomon-Basic-default.txt] >> test.py::test_transfer_describe [GOOD] >> test.py::test_viewer_query_long >> TTxDataShardBuildIndexScan::RunScan [GOOD] >> TTxDataShardBuildIndexScan::ShadowBorrowCompaction >> tablet_scheme_tests.py::TestTabletSchemes::test_tablet_schemes[persqueue] [GOOD] >> tablet_scheme_tests.py::TestTabletSchemes::test_tablet_schemes[kesus] >> TestFilterSet::FilterGroup >> KqpTpch::Query18 [GOOD] >> KqpTpch::Query19 >> KafkaProtocol::BalanceScenario [GOOD] >> KafkaProtocol::BalanceScenarioForFederation |97.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/secondary_index/py3test >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_3__SYNC-pk_types6-all_types6-index6---SYNC] [GOOD] >> DataShardReplication::SimpleApplyChanges [GOOD] >> DataShardReplication::SplitMergeChanges >> tablet_scheme_tests.py::TestTabletSchemes::test_tablet_schemes[kesus] [GOOD] >> KeyValueGRPCService::SimpleWriteReadWithoutLockGeneration1 [GOOD] >> KeyValueGRPCService::SimpleWriteReadWithoutLockGeneration2 |97.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/secondary_index/py3test >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_4_UNIQUE_SYNC-pk_types0-all_types0-index0--UNIQUE-SYNC] [GOOD] >> TMemoryController::Counters [GOOD] >> TMemoryController::Counters_HardLimit |97.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/secondary_index/py3test >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_2__SYNC-pk_types7-all_types7-index7---SYNC] [GOOD] >> test_kv.py::TestYdbKvWorkload::test_minimal_maximal_values[UInt32-4294967295-True] [GOOD] >> test_kv.py::TestYdbKvWorkload::test_minimal_maximal_values[UInt32-4294967295-False] >> test_postgres.py::TestPostgresSuite::test_postgres_suite[select_1] >> test_kv.py::TestYdbKvWorkload::test_minimal_maximal_values[UInt32-4294967295-False] [GOOD] >> test_kv.py::TestYdbKvWorkload::test_minimal_maximal_values[Int64--9223372036854775808-True] >> test.py::test_viewer_query_long [GOOD] >> test.py::test_viewer_query_long_multipart >> TestFilterSet::FilterGroup [GOOD] >> DataShardReassign::AutoReassignOnYellowFlag >> TestFilterSet::DuplicationValidation >> test.py::test_viewer_query_long_multipart [GOOD] >> test_cte.py::TestCte::test_toplevel >> test.py::test[solomon-Basic-default.txt] [GOOD] >> test.py::test[solomon-BasicExtractMembers-default.txt] >> TCreateAndDropViewTest::DropViewIfExists [GOOD] >> TCreateAndDropViewTest::DropViewInFolder |97.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/viewer/tests/py3test >> test.py::test_viewer_query_long_multipart [GOOD] |97.6%| [TM] {RESULT} ydb/core/viewer/tests/py3test >> test_kv.py::TestYdbKvWorkload::test_minimal_maximal_values[Int64--9223372036854775808-True] [GOOD] >> test_kv.py::TestYdbKvWorkload::test_minimal_maximal_values[Int64--9223372036854775808-False] >> test_kv.py::TestYdbKvWorkload::test_minimal_maximal_values[Int64--9223372036854775808-False] [GOOD] >> test_kv.py::TestYdbKvWorkload::test_minimal_maximal_values[Int64-9223372036854775807-True] >> TMemoryController::Counters_HardLimit [GOOD] >> TMemoryController::Counters_NoHardLimit >> QuoterWithKesusTest::UpdatesCountersForParentResources [GOOD] >> QuoterWithKesusTest::CanDeleteResourceWhenUsingIt >> TestFilterSet::DuplicationValidation [GOOD] >> DataShardCompaction::CompactBorrowed [GOOD] >> DataShardCompaction::CompactBorrowedTxStatus >> TestFilterSet::CompilationValidation >> KqpTpch::Query19 [GOOD] >> KqpTpch::Query20 >> DataShardReplication::SplitMergeChanges [GOOD] >> DataShardReplication::SplitMergeChangesReboots >> KeyValueGRPCService::SimpleWriteReadWithoutLockGeneration2 [GOOD] >> KeyValueGRPCService::SimpleWriteReadWithGetChannelStatus >> DataShardStats::BlobsStatsCorrect [GOOD] >> DataShardStats::SharedCacheGarbage |97.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/scheme_tests/py3test >> tablet_scheme_tests.py::TestTabletSchemes::test_tablet_schemes[kesus] [GOOD] |97.6%| [TM] {RESULT} ydb/tests/functional/scheme_tests/py3test >> CoordinatorTests::Route >> RangeOps::Intersection [GOOD] >> test_postgres.py::TestPostgresSuite::test_postgres_suite[select_1] [GOOD] >> test_postgres.py::TestPostgresSuite::test_postgres_suite[text] >> CoordinatorTests::Route [GOOD] >> CoordinatorTests::RouteTwoTopicWichSameName >> CoordinatorTests::RouteTwoTopicWichSameName [GOOD] >> LeaderElectionTests::Test1 >> test_kv.py::TestYdbKvWorkload::test_minimal_maximal_values[Int64-9223372036854775807-True] [GOOD] >> test_kv.py::TestYdbKvWorkload::test_minimal_maximal_values[Int64-9223372036854775807-False] >> LeaderElectionTests::Test1 [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_range_ops/unittest >> RangeOps::Intersection [GOOD] Test command err: first [(Uint64 : NULL, Uint64 : NULL) ; ()) second [(Uint64 : NULL, Uint64 : 1) ; (Uint64 : 20, Uint64 : 20)] result [(Uint64 : NULL, Uint64 : 1) ; (Uint64 : 20, Uint64 : 20)] correct [(Uint64 : NULL, Uint64 : 1) ; (Uint64 : 20, Uint64 : 20)] first [(Uint64 : NULL) ; ()) second [(Uint64 : NULL, Uint64 : 1) ; (Uint64 : 20, Uint64 : 20)] result [(Uint64 : NULL) ; (Uint64 : 20, Uint64 : 20)] correct [(Uint64 : NULL) ; (Uint64 : 20, Uint64 : 20)] first [(Uint64 : 10) ; (Uint64 : 20)] second [(Uint64 : 1) ; (Uint64 : 5)] result [(Uint64 : 10) ; (Uint64 : 5)] first [(Uint64 : 10) ; (Uint64 : 20)] second [(Uint64 : 1) ; (Uint64 : 10)] result [(Uint64 : 10) ; (Uint64 : 10)] correct [(Uint64 : 10) ; (Uint64 : 10)] first [(Uint64 : 10) ; (Uint64 : 20)] second [(Uint64 : 1) ; (Uint64 : 15)] result [(Uint64 : 10) ; (Uint64 : 15)] correct [(Uint64 : 10) ; (Uint64 : 15)] first [(Uint64 : 10) ; (Uint64 : 20)] second [(Uint64 : 1) ; (Uint64 : 20)] result [(Uint64 : 10) ; (Uint64 : 20)] correct [(Uint64 : 10) ; (Uint64 : 20)] first [(Uint64 : 10) ; (Uint64 : 20)] second [(Uint64 : 1) ; (Uint64 : 30)] result [(Uint64 : 10) ; (Uint64 : 20)] correct [(Uint64 : 10) ; (Uint64 : 20)] first [(Uint64 : 10) ; (Uint64 : 20)] second [(Uint64 : 10) ; (Uint64 : 10)] result [(Uint64 : 10) ; (Uint64 : 10)] correct [(Uint64 : 10) ; (Uint64 : 10)] first [(Uint64 : 10) ; (Uint64 : 20)] second [(Uint64 : 10) ; (Uint64 : 15)] result [(Uint64 : 10) ; (Uint64 : 15)] correct [(Uint64 : 10) ; (Uint64 : 15)] first [(Uint64 : 10) ; (Uint64 : 20)] second [(Uint64 : 10) ; (Uint64 : 20)] result [(Uint64 : 10) ; (Uint64 : 20)] correct [(Uint64 : 10) ; (Uint64 : 20)] first [(Uint64 : 10) ; (Uint64 : 20)] second [(Uint64 : 10) ; (Uint64 : 30)] result [(Uint64 : 10) ; (Uint64 : 20)] correct [(Uint64 : 10) ; (Uint64 : 20)] first [(Uint64 : 10) ; (Uint64 : 20)] second [(Uint64 : 15) ; (Uint64 : 17)] result [(Uint64 : 15) ; (Uint64 : 17)] correct [(Uint64 : 15) ; (Uint64 : 17)] first [(Uint64 : 10) ; (Uint64 : 20)] second [(Uint64 : 15) ; (Uint64 : 20)] result [(Uint64 : 15) ; (Uint64 : 20)] correct [(Uint64 : 15) ; (Uint64 : 20)] first [(Uint64 : 10) ; (Uint64 : 20)] second [(Uint64 : 15) ; (Uint64 : 30)] result [(Uint64 : 15) ; (Uint64 : 20)] correct [(Uint64 : 15) ; (Uint64 : 20)] first [(Uint64 : 10) ; (Uint64 : 20)] second [(Uint64 : 20) ; (Uint64 : 20)] result [(Uint64 : 20) ; (Uint64 : 20)] correct [(Uint64 : 20) ; (Uint64 : 20)] first [(Uint64 : 10) ; (Uint64 : 20)] second [(Uint64 : 20) ; (Uint64 : 30)] result [(Uint64 : 20) ; (Uint64 : 20)] correct [(Uint64 : 20) ; (Uint64 : 20)] first [(Uint64 : 10) ; (Uint64 : 20)] second [(Uint64 : 25) ; (Uint64 : 30)] result [(Uint64 : 25) ; (Uint64 : 20)] first ((Uint64 : 10) ; (Uint64 : 20)] second [(Uint64 : 1) ; (Uint64 : 10)] result ((Uint64 : 10) ; (Uint64 : 10)] first [(Uint64 : 10) ; (Uint64 : 20)] second [(Uint64 : 1) ; (Uint64 : 10)) result [(Uint64 : 10) ; (Uint64 : 10)) first ((Uint64 : 10) ; (Uint64 : 20)] second [(Uint64 : 1) ; (Uint64 : 10)) result ((Uint64 : 10) ; (Uint64 : 10)) first ((Uint64 : 10) ; (Uint64 : 20)] second [(Uint64 : 1) ; (Uint64 : 15)] result ((Uint64 : 10) ; (Uint64 : 15)] correct ((Uint64 : 10) ; (Uint64 : 15)] first ((Uint64 : 10) ; (Uint64 : 20)] second [(Uint64 : 1) ; (Uint64 : 15)) result ((Uint64 : 10) ; (Uint64 : 15)) correct ((Uint64 : 10) ; (Uint64 : 15)) first ((Uint64 : 10) ; (Uint64 : 20)] second [(Uint64 : 1) ; (Uint64 : 20)) result ((Uint64 : 10) ; (Uint64 : 20)) correct ((Uint64 : 10) ; (Uint64 : 20)) first ((Uint64 : 10) ; (Uint64 : 20)) second [(Uint64 : 1) ; (Uint64 : 20)) result ((Uint64 : 10) ; (Uint64 : 20)) correct ((Uint64 : 10) ; (Uint64 : 20)) first [(Uint64 : NULL) ; ()) second [(Uint64 : 1) ; (Uint64 : 20)) result [(Uint64 : 1) ; (Uint64 : 20)) correct [(Uint64 : 1) ; (Uint64 : 20)) first [(Uint64 : 10) ; ()) second [(Uint64 : 1) ; (Uint64 : 20)) result [(Uint64 : 10) ; (Uint64 : 20)) correct [(Uint64 : 10) ; (Uint64 : 20)) first ((Uint64 : 10) ; ()) second [(Uint64 : 1) ; (Uint64 : 10)) result ((Uint64 : 10) ; (Uint64 : 10)) first ((Uint64 : 10) ; ()) second [(Uint64 : 1) ; (Uint64 : 20)) result ((Uint64 : 10) ; (Uint64 : 20)) correct ((Uint64 : 10) ; (Uint64 : 20)) first [(Uint64 : NULL) ; (Uint64 : 10)] second [(Uint64 : 1) ; (Uint64 : 20)) result [(Uint64 : 1) ; (Uint64 : 10)] correct [(Uint64 : 1) ; (Uint64 : 10)] first [(Uint64 : NULL) ; (Uint64 : 20)] second [(Uint64 : 1) ; (Uint64 : 10)) result [(Uint64 : 1) ; (Uint64 : 10)) correct [(Uint64 : 1) ; (Uint64 : 10)) |97.6%| [TM] {RESULT} ydb/core/tx/datashard/ut_range_ops/unittest >> LeaderElectionTests::TestLocalMode [GOOD] >> test_scheduling.py::TestSchedule::test_skip_busy[kikimr0] [SKIPPED] >> test_result_limits.py::TestResultLimits::test_many_rows >> TopicSessionTests::TwoSessionsWithoutOffsets >> test_postgres.py::TestPostgresSuite::test_postgres_suite[text] [GOOD] >> TestFilterSet::CompilationValidation [GOOD] >> test_postgres.py::TestPostgresSuite::test_postgres_suite[withtable] >> test_kv.py::TestYdbKvWorkload::test_minimal_maximal_values[Int64-9223372036854775807-False] [GOOD] >> test_kv.py::TestYdbKvWorkload::test_minimal_maximal_values[Uint64-0-True] >> TestFormatHandler::ManyJsonClients >> test.py::test[solomon-BasicExtractMembers-default.txt] [GOOD] >> test.py::test[solomon-Downsampling-default.txt] >> TSentinelTests::PDiskRackGuardHalfRack [GOOD] >> TSentinelTests::PDiskRackGuardFullRack >> TMemoryController::Counters_NoHardLimit [GOOD] >> TMemoryController::Config_ConsumerLimits >> KqpTpch::Query20 [GOOD] >> KqpTpch::Query21 >> TCloudEventsProcessorTests::TestCreateCloudEventProcessor >> DataShardBackgroundCompaction::ShouldCompact >> test_unknown_data_source.py::TestUnknownDataSource::test_should_fail_unknown_data_source[v1-client0] [GOOD] >> TCreateAndDropViewTest::DropViewInFolder [GOOD] >> TCreateAndDropViewTest::ContextPollution >> test_cte.py::TestCte::test_toplevel [GOOD] >> DataShardReassign::AutoReassignOnYellowFlag [GOOD] >> test_kv.py::TestYdbKvWorkload::test_minimal_maximal_values[Uint64-0-True] [GOOD] >> test_kv.py::TestYdbKvWorkload::test_minimal_maximal_values[Uint64-0-False] |97.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/kqp/plan2svg/py3test >> test_cte.py::TestCte::test_toplevel [GOOD] |97.7%| [TM] {RESULT} ydb/tests/functional/kqp/plan2svg/py3test >> test_kv.py::TestYdbKvWorkload::test_minimal_maximal_values[Uint64-0-False] [GOOD] >> test_kv.py::TestYdbKvWorkload::test_minimal_maximal_values[Uint64-18446744073709551615-True] >> TestFormatHandler::ManyJsonClients [GOOD] >> DataShardReplication::SplitMergeChangesReboots [GOOD] >> DataShardReplication::ReplicatedTable+UseSink >> KeyValueGRPCService::SimpleWriteReadWithGetChannelStatus [GOOD] >> KeyValueGRPCService::SimpleWriteReadOverrun >> TestFormatHandler::ManyRawClients ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_reassign/unittest >> DataShardReassign::AutoReassignOnYellowFlag [GOOD] Test command err: 2025-06-25T15:23:26.116022Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T15:23:26.116185Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T15:23:26.116246Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000758/r3tmp/tmpydfeXp/pdisk_1.dat 2025-06-25T15:23:26.704571Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T15:23:26.710445Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046644480:2:5} Tx{8, NKikimr::NSchemeShard::TSchemeShard::TTxOperationPropose} queued, type NKikimr::NSchemeShard::TSchemeShard::TTxOperationPropose 2025-06-25T15:23:26.710715Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046644480:2:5} Tx{8, NKikimr::NSchemeShard::TSchemeShard::TTxOperationPropose} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-25T15:23:26.739474Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:23:26.744516Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046644480:2:5} Tx{8, NKikimr::NSchemeShard::TSchemeShard::TTxOperationPropose} hope 1 -> done Change{4, redo 987b alter 0b annex 0, ~{ 1, 33, 35, 42, 4 } -{ }, 0 gb} 2025-06-25T15:23:26.744623Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046644480:2:5} Tx{8, NKikimr::NSchemeShard::TSchemeShard::TTxOperationPropose} release 4194304b of static, Memory{0 dyn 0} 2025-06-25T15:23:26.745667Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046644480:2:5:1:24576:515:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T15:23:26.745800Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046644480:2:5:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T15:23:26.745904Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046644480:2:6} commited cookie 1 for step 5 2025-06-25T15:23:26.764421Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046644480:2:6} Tx{9, NKikimr::NSchemeShard::TSchemeShard::TTxOperationProgress} queued, type NKikimr::NSchemeShard::TSchemeShard::TTxOperationProgress 2025-06-25T15:23:26.764520Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046644480:2:6} Tx{9, NKikimr::NSchemeShard::TSchemeShard::TTxOperationProgress} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-25T15:23:26.768570Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046644480:2:6} Tx{9, NKikimr::NSchemeShard::TSchemeShard::TTxOperationProgress} hope 1 -> done Change{5, redo 174b alter 0b annex 0, ~{ 42, 4 } -{ }, 0 gb} 2025-06-25T15:23:26.768685Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046644480:2:6} Tx{9, NKikimr::NSchemeShard::TSchemeShard::TTxOperationProgress} release 4194304b of static, Memory{0 dyn 0} 2025-06-25T15:23:26.769281Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046644480:2:6:1:24576:129:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T15:23:26.769357Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046644480:2:6:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T15:23:26.769445Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046644480:2:7} commited cookie 1 for step 6 2025-06-25T15:23:26.769622Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046644480:2:7} Tx{10, NKikimr::NSchemeShard::TSchemeShard::TTxOperationProgress} queued, type NKikimr::NSchemeShard::TSchemeShard::TTxOperationProgress 2025-06-25T15:23:26.769691Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046644480:2:7} Tx{10, NKikimr::NSchemeShard::TSchemeShard::TTxOperationProgress} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-25T15:23:26.769855Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046644480:2:7} Tx{10, NKikimr::NSchemeShard::TSchemeShard::TTxOperationProgress} hope 1 -> done Change{6, redo 174b alter 0b annex 0, ~{ 42, 4 } -{ }, 0 gb} 2025-06-25T15:23:26.769894Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046644480:2:7} Tx{10, NKikimr::NSchemeShard::TSchemeShard::TTxOperationProgress} release 4194304b of static, Memory{0 dyn 0} 2025-06-25T15:23:26.770148Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046644480:2:7:1:24576:130:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T15:23:26.770220Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046644480:2:7:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T15:23:26.770288Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046644480:2:8} commited cookie 1 for step 7 2025-06-25T15:23:26.770419Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046644480:2:8} Tx{11, NKikimr::NSchemeShard::TSchemeShard::TTxOperationProgress} queued, type NKikimr::NSchemeShard::TSchemeShard::TTxOperationProgress 2025-06-25T15:23:26.770458Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046644480:2:8} Tx{11, NKikimr::NSchemeShard::TSchemeShard::TTxOperationProgress} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-25T15:23:26.774894Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046644480:2:8} Tx{11, NKikimr::NSchemeShard::TSchemeShard::TTxOperationProgress} hope 1 -> done Change{7, redo 120b alter 0b annex 0, ~{ 4 } -{ }, 0 gb} 2025-06-25T15:23:26.774969Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046644480:2:8} Tx{11, NKikimr::NSchemeShard::TSchemeShard::TTxOperationProgress} release 4194304b of static, Memory{0 dyn 0} 2025-06-25T15:23:26.775288Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046644480:2:8:1:24576:89:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T15:23:26.775346Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046644480:2:8:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T15:23:26.775419Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046644480:2:9} commited cookie 1 for step 8 2025-06-25T15:23:26.787685Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046644480:2:9} Tx{12, NKikimr::NSchemeShard::TSchemeShard::TTxNotifyCompletion} queued, type NKikimr::NSchemeShard::TSchemeShard::TTxNotifyCompletion 2025-06-25T15:23:26.787767Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046644480:2:9} Tx{12, NKikimr::NSchemeShard::TSchemeShard::TTxNotifyCompletion} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-25T15:23:26.787861Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046644480:2:9} Tx{12, NKikimr::NSchemeShard::TSchemeShard::TTxNotifyCompletion} hope 1 -> done Change{8, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 2025-06-25T15:23:26.787917Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046644480:2:9} Tx{12, NKikimr::NSchemeShard::TSchemeShard::TTxNotifyCompletion} release 4194304b of static, Memory{0 dyn 0} 2025-06-25T15:23:26.799294Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594037932033:2:8} Tx{14, NKikimr::NBsController::TBlobStorageController::TTxRegisterNode} queued, type NKikimr::NBsController::TBlobStorageController::TTxRegisterNode 2025-06-25T15:23:26.799380Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594037932033:2:8} Tx{14, NKikimr::NBsController::TBlobStorageController::TTxRegisterNode} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-25T15:23:26.799716Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594037932033:2:8} Tx{14, NKikimr::NBsController::TBlobStorageController::TTxRegisterNode} hope 1 -> done Change{7, redo 79b alter 0b annex 0, ~{ 2 } -{ }, 0 gb} 2025-06-25T15:23:26.799785Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594037932033:2:8} Tx{14, NKikimr::NBsController::TBlobStorageController::TTxRegisterNode} release 4194304b of static, Memory{0 dyn 0} 2025-06-25T15:23:26.840200Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:23:26.845332Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594037936130:2:3} Tx{3, NKikimr::NTenantSlotBroker::TTenantSlotBroker::TTxUpdateConfig} queued, type NKikimr::NTenantSlotBroker::TTenantSlotBroker::TTxUpdateConfig 2025-06-25T15:23:26.845409Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594037936130:2:3} Tx{3, NKikimr::NTenantSlotBroker::TTenantSlotBroker::TTxUpdateConfig} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-25T15:23:26.851592Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594037936130:2:3} Tx{3, NKikimr::NTenantSlotBroker::TTenantSlotBroker::TTxUpdateConfig} hope 1 -> done Change{2, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 2025-06-25T15:23:26.851703Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594037936130:2:3} Tx{3, NKikimr::NTenantSlotBroker::TTenantSlotBroker::TTxUpdateConfig} release 4194304b of static, Memory{0 dyn 0} 2025-06-25T15:23:26.851873Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594037936129:2:4} Tx{4, NKikimr::NNodeBroker::TNodeBroker::TTxUpdateConfig} queued, type NKikimr::NNodeBroker::TNodeBroker::TTxUpdateConfig 2025-06-25T15:23:26.851938Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594037936129:2:4} Tx{4, NKikimr::NNodeBroker::TNodeBroker::TTxUpdateConfig} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-25T15:23:26.854171Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594037936129:2:4} Tx{4, NKikimr::NNodeBroker::TNodeBroker::TTxUpdateConfig} hope 1 -> done Change{3, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 2025-06-25T15:23:26.854253Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594037936129:2:4} Tx{4, NKikimr::NNodeBroker::TNodeBroker::TTxUpdateConfig} release 4194304b of static, Memory{0 dyn 0} 2025-06-25T15:23:26.860833Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750865002600425 != 1750865002600429 2025-06-25T15:23:26.881873Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594037932033:2:8:0:0:87:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T15:23:26.882016Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594037932033:2:9} commited cookie 1 for step 8 2025-06-25T15:23:26.886030Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594037932033:2:9} Tx{15, NKikimr::NBsController::TBlobStorageController::TTxUpdateNodeDrives} queued, type NKikimr::NBsController::TBlobStorageController::TTxUpdateNodeDrives 2025-06-25T15:23:26.886105Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594037932033:2:9} Tx{15, NKikimr::NBsController::TBlobStorageController::TTxUpdateNodeDrives} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-25T15:23:26.886560Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594037932033:2:9} Tx{15, NKikimr::NBsController::TBlobStorageController::TTxUpdateNodeDrives} hope 1 -> done Change{8, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 2025-06-25T15:23:26.886635Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594037932033:2:9} Tx{15, NKikimr::NBsController::TBlobStorageController::TTxUpdateNodeDrives} release 4194304b of static, Memory{0 dyn 0} 2025-06-25T15:23:26.910506Z ... UG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046316545:2:21:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T15:23:30.944261Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046316545:2:22} commited cookie 1 for step 21 2025-06-25T15:23:30.973059Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594037968897:2:10} Tx{17, NKikimr::NHive::TTxUpdateTabletMetrics} queued, type NKikimr::NHive::TTxUpdateTabletMetrics 2025-06-25T15:23:30.973123Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594037968897:2:10} Tx{17, NKikimr::NHive::TTxUpdateTabletMetrics} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-25T15:23:30.973271Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594037968897:2:10} Tx{17, NKikimr::NHive::TTxUpdateTabletMetrics} hope 1 -> done Change{12, redo 143b alter 0b annex 0, ~{ 16, 4 } -{ }, 0 gb} 2025-06-25T15:23:30.973321Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594037968897:2:10} Tx{17, NKikimr::NHive::TTxUpdateTabletMetrics} release 4194304b of static, Memory{0 dyn 0} 2025-06-25T15:23:30.984817Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594037968897:2:10:0:0:137:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T15:23:30.984932Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594037968897:2:11} commited cookie 1 for step 10 2025-06-25T15:23:31.133563Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046316545:2:22} Tx{25, NKikimr::NFlatTxCoordinator::TTxCoordinator::TTxPlanStep} queued, type NKikimr::NFlatTxCoordinator::TTxCoordinator::TTxPlanStep 2025-06-25T15:23:31.133647Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046316545:2:22} Tx{25, NKikimr::NFlatTxCoordinator::TTxCoordinator::TTxPlanStep} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-25T15:23:31.133811Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046316545:2:22} Tx{25, NKikimr::NFlatTxCoordinator::TTxCoordinator::TTxPlanStep} hope 1 -> done Change{21, redo 134b alter 0b annex 0, ~{ 2 } -{ }, 0 gb} 2025-06-25T15:23:31.133896Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046316545:2:22} Tx{25, NKikimr::NFlatTxCoordinator::TTxCoordinator::TTxPlanStep} release 4194304b of static, Memory{0 dyn 0} 2025-06-25T15:23:31.134288Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046316545:2:22:1:24576:90:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T15:23:31.134356Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046316545:2:22:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T15:23:31.134440Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046316545:2:23} commited cookie 1 for step 22 2025-06-25T15:23:31.288614Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046316545:2:23} Tx{26, NKikimr::NFlatTxCoordinator::TTxCoordinator::TTxPlanStep} queued, type NKikimr::NFlatTxCoordinator::TTxCoordinator::TTxPlanStep 2025-06-25T15:23:31.288684Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046316545:2:23} Tx{26, NKikimr::NFlatTxCoordinator::TTxCoordinator::TTxPlanStep} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-25T15:23:31.288861Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046316545:2:23} Tx{26, NKikimr::NFlatTxCoordinator::TTxCoordinator::TTxPlanStep} hope 1 -> done Change{22, redo 134b alter 0b annex 0, ~{ 2 } -{ }, 0 gb} 2025-06-25T15:23:31.288920Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046316545:2:23} Tx{26, NKikimr::NFlatTxCoordinator::TTxCoordinator::TTxPlanStep} release 4194304b of static, Memory{0 dyn 0} 2025-06-25T15:23:31.289222Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046316545:2:23:1:24576:90:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T15:23:31.289285Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046316545:2:23:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T15:23:31.289350Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046316545:2:24} commited cookie 1 for step 23 2025-06-25T15:23:31.429180Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046316545:2:24} Tx{27, NKikimr::NFlatTxCoordinator::TTxCoordinator::TTxPlanStep} queued, type NKikimr::NFlatTxCoordinator::TTxCoordinator::TTxPlanStep 2025-06-25T15:23:31.429276Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046316545:2:24} Tx{27, NKikimr::NFlatTxCoordinator::TTxCoordinator::TTxPlanStep} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-25T15:23:31.429425Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046316545:2:24} Tx{27, NKikimr::NFlatTxCoordinator::TTxCoordinator::TTxPlanStep} hope 1 -> done Change{23, redo 134b alter 0b annex 0, ~{ 2 } -{ }, 0 gb} 2025-06-25T15:23:31.429487Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046316545:2:24} Tx{27, NKikimr::NFlatTxCoordinator::TTxCoordinator::TTxPlanStep} release 4194304b of static, Memory{0 dyn 0} 2025-06-25T15:23:31.429904Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046316545:2:24:1:24576:90:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T15:23:31.429957Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046316545:2:24:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T15:23:31.430056Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046316545:2:25} commited cookie 1 for step 24 2025-06-25T15:23:31.584614Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046316545:2:25} Tx{28, NKikimr::NFlatTxCoordinator::TTxCoordinator::TTxPlanStep} queued, type NKikimr::NFlatTxCoordinator::TTxCoordinator::TTxPlanStep 2025-06-25T15:23:31.584691Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046316545:2:25} Tx{28, NKikimr::NFlatTxCoordinator::TTxCoordinator::TTxPlanStep} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-25T15:23:31.584826Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046316545:2:25} Tx{28, NKikimr::NFlatTxCoordinator::TTxCoordinator::TTxPlanStep} hope 1 -> done Change{24, redo 134b alter 0b annex 0, ~{ 2 } -{ }, 0 gb} 2025-06-25T15:23:31.584899Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046316545:2:25} Tx{28, NKikimr::NFlatTxCoordinator::TTxCoordinator::TTxPlanStep} release 4194304b of static, Memory{0 dyn 0} 2025-06-25T15:23:31.585265Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046316545:2:25:1:24576:90:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T15:23:31.585332Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046316545:2:25:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T15:23:31.585426Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046316545:2:26} commited cookie 1 for step 25 2025-06-25T15:23:31.597747Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7382: Cannot get console configs 2025-06-25T15:23:31.597812Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:23:31.627643Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594037932033:2:10} Tx{18, NKikimr::NBsController::TBlobStorageController::TTxUpdateDiskMetrics} queued, type NKikimr::NBsController::TBlobStorageController::TTxUpdateDiskMetrics 2025-06-25T15:23:31.627739Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594037932033:2:10} Tx{18, NKikimr::NBsController::TBlobStorageController::TTxUpdateDiskMetrics} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-25T15:23:31.627843Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594037932033:2:10} Tx{18, NKikimr::NBsController::TBlobStorageController::TTxUpdateDiskMetrics} hope 1 -> done Change{9, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 2025-06-25T15:23:31.627908Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594037932033:2:10} Tx{18, NKikimr::NBsController::TBlobStorageController::TTxUpdateDiskMetrics} release 4194304b of static, Memory{0 dyn 0} 2025-06-25T15:23:31.731238Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435073, Sender [0:0:0], Recipient [1:627:2531]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvCleanupTransaction 2025-06-25T15:23:31.731314Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3158: StateWork, processing event TEvPrivate::TEvCleanupTransaction 2025-06-25T15:23:31.731410Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:10} Tx{19, NKikimr::NDataShard::TDataShard::TTxCleanupTransaction} queued, type NKikimr::NDataShard::TDataShard::TTxCleanupTransaction 2025-06-25T15:23:31.731497Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:10} Tx{19, NKikimr::NDataShard::TDataShard::TTxCleanupTransaction} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-25T15:23:31.731633Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:214: No cleanup at 72075186224037888 outdated step 15000 last cleanup 0 2025-06-25T15:23:31.731710Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:23:31.731747Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 72075186224037888 2025-06-25T15:23:31.731784Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-25T15:23:31.731820Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-25T15:23:31.731900Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:10} Tx{19, NKikimr::NDataShard::TDataShard::TTxCleanupTransaction} hope 1 -> done Change{11, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 2025-06-25T15:23:31.732072Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:10} Tx{19, NKikimr::NDataShard::TDataShard::TTxCleanupTransaction} release 4194304b of static, Memory{0 dyn 0} 2025-06-25T15:23:31.732252Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435079, Sender [0:0:0], Recipient [1:627:2531]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvPeriodicWakeup 2025-06-25T15:23:31.797968Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046316545:2:26} Tx{29, NKikimr::NFlatTxCoordinator::TTxCoordinator::TTxPlanStep} queued, type NKikimr::NFlatTxCoordinator::TTxCoordinator::TTxPlanStep 2025-06-25T15:23:31.798064Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046316545:2:26} Tx{29, NKikimr::NFlatTxCoordinator::TTxCoordinator::TTxPlanStep} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-25T15:23:31.798241Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046316545:2:26} Tx{29, NKikimr::NFlatTxCoordinator::TTxCoordinator::TTxPlanStep} hope 1 -> done Change{25, redo 134b alter 0b annex 0, ~{ 2 } -{ }, 0 gb} 2025-06-25T15:23:31.798331Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046316545:2:26} Tx{29, NKikimr::NFlatTxCoordinator::TTxCoordinator::TTxPlanStep} release 4194304b of static, Memory{0 dyn 0} 2025-06-25T15:23:31.798848Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046316545:2:26:1:24576:90:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T15:23:31.798938Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046316545:2:26:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-25T15:23:31.799067Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72057594046316545:2:27} commited cookie 1 for step 26 --- Captured TEvCheckBlobstorageStatusResult event --- Waiting for TEvReassignTablet event... 2025-06-25T15:23:31.942030Z node 1 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:10} CheckYellow current light yellow move channels: 0 1 2 --- Captured TEvReassignTablet event |97.7%| [TM] {RESULT} ydb/core/tx/datashard/ut_reassign/unittest >> QuoterWithKesusTest::CanDeleteResourceWhenUsingIt [GOOD] >> QuoterWithKesusTest::CanKillKesusWhenUsingIt |97.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/sqs/merge_split_common_table/std/py3test >> test.py::TestSqsSplitMergeStdTables::test_std_merge_split [GOOD] |97.7%| [TM] {RESULT} ydb/tests/functional/sqs/merge_split_common_table/std/py3test >> TTxDataShardBuildIndexScan::ShadowBorrowCompaction [GOOD] >> TTxDataShardLocalKMeansScan::BadRequest >> test_http_api.py::TestHttpApi::test_simple_analytics_query [GOOD] >> test_http_api.py::TestHttpApi::test_empty_query [GOOD] >> test_http_api.py::TestHttpApi::test_warning >> test_postgres.py::TestPostgresSuite::test_postgres_suite[withtable] [GOOD] >> test_postgres.py::TestPostgresSuite::test_postgres_suite[horology] >> test.py::test[solomon-Downsampling-default.txt] [GOOD] >> test.py::test[solomon-DownsamplingValidSettings-default.txt] >> TMemoryController::Config_ConsumerLimits [GOOD] >> TMemoryController::SharedCache >> test_kv.py::TestYdbKvWorkload::test_minimal_maximal_values[Uint64-18446744073709551615-True] [GOOD] >> test_kv.py::TestYdbKvWorkload::test_minimal_maximal_values[Uint64-18446744073709551615-False] >> test_kv.py::TestYdbKvWorkload::test_minimal_maximal_values[Uint64-18446744073709551615-False] [GOOD] >> test_kv.py::TestYdbKvWorkload::test_dynumber >> TabletService_ChangeSchema::Basics >> ColumnShardTiers::TieringUsage [GOOD] >> TestFormatHandler::ManyRawClients [GOOD] >> test_select.py::TestDML::test_select[table_index_1__ASYNC-pk_types10-all_types10-index10---ASYNC] [GOOD] >> test_kv.py::TestYdbKvWorkload::test_dynumber [GOOD] >> TestFormatHandler::ClientValidation >> Coordinator::ReadStepSubscribe ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/tiering/ut/unittest >> ColumnShardTiers::TieringUsage [GOOD] Test command err: 2025-06-25T15:20:48.803059Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T15:20:48.803336Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T15:20:48.803405Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000acb/r3tmp/tmpeBzq7Q/pdisk_1.dat 2025-06-25T15:20:49.468155Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 TServer::EnableGrpc on GrpcPort 20170, node 1 TClient is connected to server localhost:1252 2025-06-25T15:20:50.515583Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:20:50.585149Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:20:50.595189Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:20:50.595323Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:20:50.595361Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:20:50.596719Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T15:20:50.597112Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750864844840260 != 1750864844840264 2025-06-25T15:20:50.657816Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:20:50.659106Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:20:50.671994Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected REQUEST= UPSERT OBJECT `accessKey` (TYPE SECRET) WITH (value = `secretAccessKey`); UPSERT OBJECT `secretKey` (TYPE SECRET) WITH (value = `fakeSecret`); ;EXPECTATION=1;WAITING=1 2025-06-25T15:21:01.860950Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:640:2530], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:21:01.861135Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:21:01.969032Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:21:02.004030Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:21:02.382330Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:778:2617], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:21:02.382459Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:21:02.382750Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:783:2622], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:21:02.392215Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:21:02.560202Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:785:2624], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-25T15:21:03.645474Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:877:2687] txid# 281474976715661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:21:04.666794Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T15:21:05.318948Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:21:06.096209Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715670:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:21:06.884855Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715673:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T15:21:07.497043Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715676:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_modify_acl.cpp:101) 2025-06-25T15:21:09.367522Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:21:09.704936Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710759:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_finalize_build_index.cpp:383) REQUEST= UPSERT OBJECT `accessKey` (TYPE SECRET) WITH (value = `secretAccessKey`); UPSERT OBJECT `secretKey` (TYPE SECRET) WITH (value = `fakeSecret`); ;RESULT=;EXPECTATION=1 FINISHED_REQUEST= UPSERT OBJECT `accessKey` (TYPE SECRET) WITH (value = `secretAccessKey`); UPSERT OBJECT `secretKey` (TYPE SECRET) WITH (value = `fakeSecret`); ;EXPECTATION=1;WAITING=1 REQUEST= CREATE EXTERNAL DATA SOURCE `/Root/tier1` WITH ( SOURCE_TYPE="ObjectStorage", LOCATION="http://fake.fake/fake", AUTH_METHOD="AWS", AWS_ACCESS_KEY_ID_SECRET_NAME="accessKey", AWS_SECRET_ACCESS_KEY_SECRET_NAME="secretKey", AWS_REGION="ru-central1" ); ;EXPECTATION=1;WAITING=1 2025-06-25T15:21:25.580929Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715702:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_external_data_source.cpp:267) REQUEST= CREATE EXTERNAL DATA SOURCE `/Root/tier1` WITH ( SOURCE_TYPE="ObjectStorage", LOCATION="http://fake.fake/fake", AUTH_METHOD="AWS", AWS_ACCESS_KEY_ID_SECRET_NAME="accessKey", AWS_SECRET_ACCESS_KEY_SECRET_NAME="secretKey", AWS_REGION="ru-central1" ); ;RESULT=;EXPECTATION=1 FINISHED_REQUEST= CREATE EXTERNAL DATA SOURCE `/Root/tier1` WITH ( SOURCE_TYPE="ObjectStorage", LOCATION="http://fake.fake/fake", AUTH_METHOD="AWS", AWS_ACCESS_KEY_ID_SECRET_NAME="accessKey", AWS_SECRET_ACCESS_KEY_SECRET_NAME="secretKey", AWS_REGION="ru-central1" ); ;EXPECTATION=1;WAITING=1 REQUEST= CREATE EXTERNAL DATA SOURCE `/Root/tier2` WITH ( SOURCE_TYPE="ObjectStorage", LOCATION="http://fake.fake/fake", AUTH_METHOD="AWS", AWS_ACCESS_KEY_ID_SECRET_NAME="accessKey", AWS_SECRET_ACCESS_KEY_SECRET_NAME="secretKey", AWS_REGION="ru-central1" ); ;EXPECTATION=1;WAITING=1 2025-06-25T15:21:37.876800Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715709:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/scheme ... ne=columnshard_impl.cpp:820;background=cleanup;skip_reason=no_changes; 2025-06-25T15:23:36.256803Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037893;task_id=5cc306e0-51d811f0-841054e9-a94cac68;tablet_id=72075186224037893;queue=ttl;external_count=0;fline=granule.cpp:168;event=skip_actualization;waiting=1.000000s; 2025-06-25T15:23:36.256849Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037893;task_id=5cc306e0-51d811f0-841054e9-a94cac68;tablet_id=72075186224037893;fline=columnshard_impl.cpp:749;background=ttl;skip_reason=no_changes; 2025-06-25T15:23:36.256979Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 72075186224037893 Delete Blob DS:2181038080:[72075186224037893:1:13:15:0:1136:0] 2025-06-25T15:23:36.257034Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 72075186224037893 Delete Blob DS:2181038080:[72075186224037893:1:12:14:0:1520:0] 2025-06-25T15:23:36.257194Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037893;task_id=5cc3957e-51d811f0-a1e9c070-b5776388;fline=abstract.cpp:53;event=WriteIndexComplete;type=CS::GENERAL;success=1; 2025-06-25T15:23:36.257237Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037893;task_id=5cc3957e-51d811f0-a1e9c070-b5776388;fline=with_appended.cpp:65;portions=19,;task_id=5cc3957e-51d811f0-a1e9c070-b5776388; 2025-06-25T15:23:36.257834Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037893;task_id=5cc3957e-51d811f0-a1e9c070-b5776388;fline=manager.cpp:15;event=unlock;process_id=CS::GENERAL::5cc3957e-51d811f0-a1e9c070-b5776388; 2025-06-25T15:23:36.257905Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037893;task_id=5cc3957e-51d811f0-a1e9c070-b5776388;fline=granule.cpp:97;event=OnCompactionFinished;info=(granule:16;path_id:16;size:112864;portions_count:5;); 2025-06-25T15:23:36.257948Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037893;task_id=5cc3957e-51d811f0-a1e9c070-b5776388;tablet_id=72075186224037893;fline=columnshard_impl.cpp:442;event=EnqueueBackgroundActivities;periodic=0; 2025-06-25T15:23:36.258025Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037893;task_id=5cc3957e-51d811f0-a1e9c070-b5776388;tablet_id=72075186224037893;fline=column_engine_logs.cpp:224;event=StartCleanup;portions_count=1; 2025-06-25T15:23:36.258106Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037893;task_id=5cc3957e-51d811f0-a1e9c070-b5776388;tablet_id=72075186224037893;fline=column_engine_logs.cpp:299;event=StartCleanup;portions_count=0;portions_prepared=2;drop=0;skip=0;portions_counter=2;chunks=18;limit=0;max_portions=1000;max_chunks=500000; 2025-06-25T15:23:36.258184Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037893;task_id=5cc3957e-51d811f0-a1e9c070-b5776388;tablet_id=72075186224037893;fline=manager.cpp:10;event=lock;process_id=CS::CLEANUP::PORTIONS::PORTIONS_DROP::5cf12be2-51d811f0-982f7e93-c7459561; 2025-06-25T15:23:36.258306Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037893;task_id=5cc3957e-51d811f0-a1e9c070-b5776388;tablet_id=72075186224037893;fline=columnshard_impl.cpp:820;background=cleanup;skip_reason=no_changes; 2025-06-25T15:23:36.258366Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037893;task_id=5cc3957e-51d811f0-a1e9c070-b5776388;tablet_id=72075186224037893;queue=ttl;external_count=0;fline=granule.cpp:168;event=skip_actualization;waiting=1.000000s; 2025-06-25T15:23:36.258414Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037893;task_id=5cc3957e-51d811f0-a1e9c070-b5776388;tablet_id=72075186224037893;fline=columnshard_impl.cpp:749;background=ttl;skip_reason=no_changes; 2025-06-25T15:23:36.258496Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 72075186224037893 Save Batch GenStep: 1:16 Blob count: 1 2025-06-25T15:23:36.258718Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager at tablet 72075186224037893 Save Batch GenStep: 1:17 Blob count: 1 2025-06-25T15:23:36.260210Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=72075186224037893;self_id=[1:2766:4074];ev=NKikimr::NColumnShard::TEvPrivate::TEvStartCompaction;path_id=16;fline=storage.cpp:87;event=granule_compaction_weight;priority=(10,19999998864); 2025-06-25T15:23:36.260387Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=72075186224037893;self_id=[1:2766:4074];ev=NKikimr::NColumnShard::TEvPrivate::TEvStartCompaction;path_id=16;fline=optimizer.h:900;stop_instant=NO_VALUE_OPTIONAL;size=2656;next=;count=2;info={bytes=1136;count=1;records=1};event=start_optimization;stop_point=;main_portion=19; 2025-06-25T15:23:36.260662Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037893;self_id=[1:2766:4074];ev=NKikimr::NColumnShard::TEvPrivate::TEvStartCompaction;fline=manager.cpp:10;event=lock;process_id=CS::GENERAL::5cf18934-51d811f0-9997933f-4d0f753d; 2025-06-25T15:23:36.262066Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: WriteIndex at tablet 72075186224037893 2025-06-25T15:23:36.262153Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxWriteIndex[47] (CS::CLEANUP::PORTIONS) apply at tablet 72075186224037893 2025-06-25T15:23:36.262659Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Index: tables 1 inserted {blob_bytes=4408;raw_bytes=72284;count=2;records=64} compacted {blob_bytes=0;raw_bytes=0;count=0;records=0} s-compacted {blob_bytes=109592;raw_bytes=3573310;count=2;records=2978} inactive {blob_bytes=2656;raw_bytes=2178;count=2;records=2} evicted {blob_bytes=0;raw_bytes=0;count=0;records=0} at tablet 72075186224037893 2025-06-25T15:23:36.263232Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: external_task_id=5cf18934-51d811f0-9997933f-4d0f753d;fline=actor.cpp:48;task=agents_waiting=1;additional_info=();; 2025-06-25T15:23:36.263442Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: event_type=NKikimr::NBlobCache::TEvBlobCache::TEvReadBlobRangeResult;fline=task.cpp:110;event=OnDataReady;task=agents_waiting=0;additional_info=();;external_task_id=5cf18934-51d811f0-9997933f-4d0f753d; 2025-06-25T15:23:36.269309Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: event=on_execution;consumer=GENERAL_COMPACTION;task_id=5cf18934-51d811f0-9997933f-4d0f753d;script=FULL_PORTIONS_FETCHING::GENERAL_COMPACTION;event=on_finished;consumer=GENERAL_COMPACTION;task_id=5cf18934-51d811f0-9997933f-4d0f753d;script=FULL_PORTIONS_FETCHING::GENERAL_COMPACTION;tablet_id=72075186224037893;parent_id=[1:2766:4074];task_id=5cf18934-51d811f0-9997933f-4d0f753d;task_class=CS::GENERAL;fline=general_compaction.cpp:138;event=blobs_created_diff;appended=0;;column_id:1;chunk_idx:0;blob_range:[NO_BLOB:0:192];;column_id:2;chunk_idx:0;blob_range:[NO_BLOB:192:232];;column_id:3;chunk_idx:0;blob_range:[NO_BLOB:424:256];;column_id:4;chunk_idx:0;blob_range:[NO_BLOB:680:192];;column_id:5;chunk_idx:0;blob_range:[NO_BLOB:872:264];;column_id:4294967040;chunk_idx:0;blob_range:[NO_BLOB:1136:192];;column_id:4294967041;chunk_idx:0;blob_range:[NO_BLOB:1328:192];;;;switched=(portion_id:20;path_id:16;records_count:1;schema_version:2;level:0;cs:plan_step=1742227112000;tx_id=18446744073709551615;;wi:11;;column_size:1136;index_size:0;meta:(()););(portion_id:19;path_id:16;records_count:1;schema_version:2;level:0;;column_size:1520;index_size:0;meta:(()););; 2025-06-25T15:23:36.269393Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: event=on_execution;consumer=GENERAL_COMPACTION;task_id=5cf18934-51d811f0-9997933f-4d0f753d;script=FULL_PORTIONS_FETCHING::GENERAL_COMPACTION;event=on_finished;consumer=GENERAL_COMPACTION;task_id=5cf18934-51d811f0-9997933f-4d0f753d;script=FULL_PORTIONS_FETCHING::GENERAL_COMPACTION;tablet_id=72075186224037893;parent_id=[1:2766:4074];task_id=5cf18934-51d811f0-9997933f-4d0f753d;task_class=CS::GENERAL;fline=general_compaction.cpp:140;event=blobs_created;appended=1;switched=2; 2025-06-25T15:23:36.269578Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037893;self_id=[1:2766:4074];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=columnshard__write_index.cpp:52;event=TEvWriteIndex;count=1; 2025-06-25T15:23:36.269673Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037893;self_id=[1:2766:4074];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=columnshard__write_index.cpp:63;event=Limiter; 2025-06-25T15:23:36.269993Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: WriteIndex at tablet 72075186224037893 2025-06-25T15:23:36.270097Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037893;self_id=[1:2766:4074];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=constructor_meta.cpp:51;memory_size=94;data_size=70;sum=2726;count=57; 2025-06-25T15:23:36.270163Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037893;self_id=[1:2766:4074];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=constructor_meta.cpp:71;memory_size=246;data_size=238;sum=7134;count=58;size_of_meta=136; 2025-06-25T15:23:36.270219Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037893;self_id=[1:2766:4074];ev=NKikimr::NColumnShard::TEvPrivate::TEvWriteIndex;fline=constructor_portion.cpp:40;memory_size=318;data_size=310;sum=9222;count=29;size_of_portion=208; 2025-06-25T15:23:36.270370Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxWriteIndex[49] (CS::GENERAL) apply at tablet 72075186224037893 2025-06-25T15:23:36.272198Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: BlobManager on execute at tablet 72075186224037893 Save Batch GenStep: 1:18 Blob count: 1 2025-06-25T15:23:36.272418Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: Index: tables 1 inserted {blob_bytes=4408;raw_bytes=72284;count=2;records=64} compacted {blob_bytes=0;raw_bytes=0;count=0;records=0} s-compacted {blob_bytes=109592;raw_bytes=3573310;count=2;records=2978} inactive {blob_bytes=2656;raw_bytes=2178;count=2;records=2} evicted {blob_bytes=0;raw_bytes=0;count=0;records=0} at tablet 72075186224037893 Cleaning waiting... Fake storage clean FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/Root/tier1' stopped at tablet 0 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/Root/tier2' stopped at tablet 0 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/Root/tier2' stopped at tablet 0 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/Root/tier1' stopped at tablet 0 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/Root/tier1' stopped at tablet 72075186224037892 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/Root/tier2' stopped at tablet 72075186224037892 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/Root/tier2' stopped at tablet 72075186224037892 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/Root/tier1' stopped at tablet 72075186224037892 FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=abstract.cpp:105;event=AbortEmergency;reason=TTxWriteIndex destructor withno CompleteReady flag;prev_reason=; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=manager.cpp:51;message=aborted data locks manager; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=abstract.cpp:105;event=AbortEmergency;reason=TTxWriteIndex destructor withno CompleteReady flag;prev_reason=; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=manager.cpp:51;message=aborted data locks manager; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/Root/tier1' stopped at tablet 72075186224037893 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/Root/tier2' stopped at tablet 72075186224037893 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/Root/tier2' stopped at tablet 72075186224037893 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/Root/tier1' stopped at tablet 72075186224037893 >> test_http_api.py::TestHttpApi::test_warning [GOOD] >> test_http_api.py::TestHttpApi::test_get_unknown_query [GOOD] >> test_http_api.py::TestHttpApi::test_unauthenticated [GOOD] >> test_http_api.py::TestHttpApi::test_create_idempotency >> DataShardBackgroundCompaction::ShouldCompact [GOOD] >> DataShardBackgroundCompaction::ShouldNotCompactWhenBorrowed >> test.py::test_order_conflict [GOOD] >> test.py::test_missing_value [GOOD] >> test.py::test_unexpected_value [GOOD] >> test.py::test_local >> TTxDataShardLocalKMeansScan::BadRequest [GOOD] >> TTxDataShardLocalKMeansScan::TooManyClusters |97.7%| [TA] $(B)/ydb/core/tx/tiering/ut/test-results/unittest/{meta.json ... results_accumulator.log} |97.7%| [TA] {RESULT} $(B)/ydb/core/tx/tiering/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> DataShardReplication::ReplicatedTable+UseSink [GOOD] >> DataShardReplication::ReplicatedTable-UseSink >> TCreateAndDropViewTest::ContextPollution [GOOD] >> TEvaluateExprInViewTest::EvaluateExpr >> KeyValueGRPCService::SimpleWriteReadOverrun [GOOD] >> KeyValueGRPCService::SimpleWriteReadRange >> TDataShardRSTest::TestCleanupInRS+UseSink [GOOD] >> TDataShardRSTest::TestCleanupInRS-UseSink >> TCloudEventsProcessorTests::TestCreateCloudEventProcessor [GOOD] >> TestFormatHandler::ClientValidation [GOOD] >> test.py::test[solomon-DownsamplingValidSettings-default.txt] [GOOD] >> test.py::test[solomon-HistResponse-default.txt] >> TopicSessionTests::TwoSessionsWithoutOffsets [GOOD] >> TestFormatHandler::ClientError >> QueryActorTest::SimpleQuery >> TopicSessionTests::TwoSessionWithoutPredicate >> GraphShard::NormalizeAndDownsample1 [GOOD] >> GraphShard::NormalizeAndDownsample2 [GOOD] >> GraphShard::NormalizeAndDownsample3 [GOOD] >> GraphShard::NormalizeAndDownsample4 [GOOD] >> GraphShard::NormalizeAndDownsample5 [GOOD] >> GraphShard::NormalizeAndDownsample6 [GOOD] >> GraphShard::CheckHistogramToPercentileConversions [GOOD] >> GraphShard::CreateGraphShard >> test_postgres.py::TestPostgresSuite::test_postgres_suite[horology] [GOOD] >> test_postgres.py::TestPostgresSuite::test_postgres_suite[float8] >> TabletService_ChangeSchema::Basics [GOOD] >> TabletService_ChangeSchema::OnlyAdminsAllowed ------- [TS] {asan, default-linux-x86_64, release} ydb/core/ymq/actor/cloud_events/cloud_events_ut/unittest >> TCloudEventsProcessorTests::TestCreateCloudEventProcessor [GOOD] Test command err: 2025-06-25T15:23:29.896218Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519907953651577815:2233];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:23:29.902187Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000775/r3tmp/tmprbErNf/pdisk_1.dat 2025-06-25T15:23:30.323296Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:23:30.323394Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:23:30.347522Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7519907953651577609:2080] 1750865009851940 != 1750865009851943 2025-06-25T15:23:30.358737Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:23:30.365593Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 62858, node 1 2025-06-25T15:23:30.568164Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:23:30.568188Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:23:30.568195Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:23:30.568325Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T15:23:30.894539Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:30898 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:23:31.175045Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... TClient is connected to server localhost:30898 waiting... 2025-06-25T15:23:31.498893Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710659, at schemeshard: 72057594046644480 2025-06-25T15:23:33.285219Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519907970831447495:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:23:33.285347Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:23:33.286190Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519907970831447507:2295], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:23:33.293033Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710660:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:23:33.308680Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519907970831447509:2296], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710660 completed, doublechecking } 2025-06-25T15:23:33.333420Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:1, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:23:33.381443Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519907970831447610:2409] txid# 281474976710662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } ===Execute query: UPSERT INTO`/Root/SQS/CreateCloudEventProcessor/.CloudEventsYmq` (CreatedAt,Id,QueueName,Type,CloudId,FolderId,UserSID,MaskedToken,AuthType,PeerName,RequestId,IdempotencyId,Labels)VALUES(4012246954,12736973818550555076,'queue1','CreateMessageQueue','cloud1','folder1','username','maskedToken123','authtype','localhost:8000','req1','idemp1','{"k1" : "v1"}'); 2025-06-25T15:23:34.788519Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710663. Ctx: { TraceId: 01jyktzvsg3z5mnxs4ykkkh09b, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTQxZTc5NTItOTdlNzNmZjgtMTA4YjVlNDctMjgwYjI5NjI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:23:34.791256Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710664. Ctx: { TraceId: 01jyktztgq6tw26zsf26p6s92x, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MTY0MDZhMWItZWUwZWFmNTQtM2NmMjZiN2EtZWYxMDlhNGQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:23:34.893050Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519907953651577815:2233];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:23:34.893121Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; End execute query=== ===Execute query: UPSERT INTO`/Root/SQS/CreateCloudEventProcessor/.CloudEventsYmq` (CreatedAt,Id,QueueName,Type,CloudId,FolderId,UserSID,MaskedToken,AuthType,PeerName,RequestId,IdempotencyId,Labels)VALUES(4012602790,8958047732825683008,'queue1','UpdateMessageQueue','cloud1','folder1','username','maskedToken123','authtype','localhost:8000','req1','idemp1','{"k1" : "v1"}'); 2025-06-25T15:23:35.025664Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710665. Ctx: { TraceId: 01jyktzw3tbyr1h2n5x8rnzfn2, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTQxZTc5NTItOTdlNzNmZjgtMTA4YjVlNDctMjgwYjI5NjI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root End execute query=== ===Execute query: UPSERT INTO`/Root/SQS/CreateCloudEventProcessor/.CloudEventsYmq` (CreatedAt,Id,QueueName,Type,CloudId,FolderId,UserSID,MaskedToken,AuthType,PeerName,RequestId,IdempotencyId,Labels)VALUES(4012737679,9099496875415951477,'queue1','DeleteMessageQueue','cloud1','folder1','username','maskedToken123','authtype','localhost:8000','req1','idemp1','{"k1" : "v1"}'); 2025-06-25T15:23:35.118360Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710666. Ctx: { TraceId: 01jyktzw7z75edcx1ckv1svz39, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTQxZTc5NTItOTdlNzNmZjgtMTA4YjVlNDctMjgwYjI5NjI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root End execute query=== 2025-06-25T15:23:36.896137Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710667. Ctx: { TraceId: 01jyktzy1yf76dnbzkhvmanqfw, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDFkYjE0YWYtODI3YmZjMTUtM2I2Zjc5Y2QtYWIwNzNlYzc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:23:37.141572Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710668. Ctx: { TraceId: 01jyktzy282zb0wrd17vxkw921, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDFkYjE0YWYtODI3YmZjMTUtM2I2Zjc5Y2QtYWIwNzNlYzc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:23:39.154881Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710669. Ctx: { TraceId: 01jykv008h8v5pdcpyxvd76qyj, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MjE2NTJjYWMtOGM0YTE1MGYtMzgyZGRjODYtMWYwMGY5MTQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:23:31.196883Z: component=schemeshard, tx_id=281474976710657, remote_address={none}, subject={none}, sanitized_token={none}, database={none}, operation=ALTER DATABASE, paths=[//Root], status=SUCCESS, detailed_status=StatusAccepted 2025-06-25T15:23:31.213161Z: component=schemeshard, tx_id=281474976710658, remote_address={none}, subject={none}, sanitized_token={none}, database=/Root, operation=CREATE DIRECTORY, paths=[/Root/SQS], status=SUCCESS, detailed_status=StatusAccepted 2025-06-25T15:23:31.479996Z: component=schemeshard, tx_id=281474976710659, remote_address={none}, subject={none}, sanitized_token={none}, database=/Root, operation=CREATE DIRECTORY, paths=[/Root/SQS/Root/SQS/CreateCloudEventProcessor], status=SUCCESS, detailed_status=StatusAccepted 2025-06-25T15:23:33.296352Z: component=schemeshard, tx_id=281474976710660, remote_address={none}, subject=metadata@system, sanitized_token={none}, database=/Root, operation=CREATE RESOURCE POOL, paths=[.metadata/workload_manager/pools/default], status=SUCCESS, detailed_status=StatusAccepted, new_owner=metadata@system, acl_add=[+(SR|DS):all-users@well-known, +(SR|DS):root@builtin] 2025-06-25T15:23:33.334805Z: component=schemeshard, tx_id=281474976710661, remote_address=::1, subject={none}, sanitized_token={none}, database=/Root, operation=CREATE TABLE, paths=[/Root/SQS/CreateCloudEventProcessor/.CloudEventsYmq], status=SUCCESS, detailed_status=StatusAccepted 2025-06-25T15:23:33.380973Z: component=schemeshard, tx_id=281474976710662, remote_address={none}, subject=metadata@system, sanitized_token={none}, database=/Root, operation=CREATE RESOURCE POOL, paths=[default], status=SUCCESS, detailed_status=StatusAlreadyExists, reason=Check failed: path: '/Root/.metadata/workload_manager/pools/default', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges), new_owner=metadata@system, acl_add=[+(SR|DS):all-users@well-known, +(SR|DS):root@builtin] 2025-06-25T15:23:36.900637Z: component=ymq, id=12736973818550555076$CreateMessageQueue$4012246954, operation=CreateMessageQueue, status=SUCCESS, remote_address=localhost:8000, subject=username, masked_token=maskedToken123, auth_type=authtype, permission=ymq.queues.create, created_at=4012246954, cloud_id=cloud1, folder_id=folder1, request_id=req1, idempotency_id=idemp1, queue=queue1, labels={"k1" : "v1"} 2025-06-25T15:23:36.900661Z: component=ymq, id=8958047732825683008$UpdateMessageQueue$4012602790, operation=UpdateMessageQueue, status=SUCCESS, remote_address=localhost:8000, subject=username, masked_token=maskedToken123, auth_type=authtype, permission=ymq.queues.setAttributes, created_at=4012602790, cloud_id=cloud1, folder_id=folder1, request_id=req1, idempotency_id=idemp1, queue=queue1, labels={"k1" : "v1"} 2025-06-25T15:23:36.900678Z: component=ymq, id=9099496875415951477$DeleteMessageQueue$4012737679, operation=DeleteMessageQueue, status=SUCCESS, remote_address=localhost:8000, subject=username, masked_token=maskedToken123, auth_type=authtype, permission=ymq.queues.delete, created_at=4012737679, cloud_id=cloud1, folder_id=folder1, request_id=req1, idempotency_id=idemp1, queue=queue1, labels={"k1" : "v1"} |97.7%| [TS] {RESULT} ydb/core/ymq/actor/cloud_events/cloud_events_ut/unittest >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_2_UNIQUE_SYNC-pk_types2-all_types2-index2--UNIQUE-SYNC] [GOOD] >> QuoterWithKesusTest::CanKillKesusWhenUsingIt [GOOD] >> test_http_api.py::TestHttpApi::test_create_idempotency [GOOD] >> test_http_api.py::TestHttpApi::test_stop_idempotency >> DataShardCompaction::CompactBorrowedTxStatus [GOOD] |97.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/fq/common/py3test >> test_unknown_data_source.py::TestUnknownDataSource::test_should_fail_unknown_data_source[v1-client0] [GOOD] |97.7%| [TM] {RESULT} ydb/tests/fq/common/py3test >> DataShardBackgroundCompaction::ShouldNotCompactWhenBorrowed [GOOD] >> DataShardBackgroundCompaction::ShouldNotCompactWhenCopyTable >> TTxDataShardLocalKMeansScan::TooManyClusters [GOOD] >> TTxDataShardLocalKMeansScan::MainToPosting >> GraphShard::CreateGraphShard [GOOD] >> test_example.py::TestExample::test_example ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_compaction/unittest >> DataShardCompaction::CompactBorrowedTxStatus [GOOD] Test command err: 2025-06-25T15:23:09.465112Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T15:23:09.465372Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T15:23:09.465442Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00085e/r3tmp/tmpVy2veQ/pdisk_1.dat 2025-06-25T15:23:10.221277Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T15:23:10.232766Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:23:10.280466Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:23:10.287037Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750864985962127 != 1750864985962131 2025-06-25T15:23:10.334591Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:61:2108] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-25T15:23:10.335585Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 2025-06-25T15:23:10.337668Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:23:10.337818Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:23:10.350738Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:23:10.444638Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:61:2108] Handle TEvProposeTransaction 2025-06-25T15:23:10.444727Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:61:2108] TxId# 281474976715657 ProcessProposeTransaction 2025-06-25T15:23:10.446183Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:61:2108] Cookie# 0 userReqId# "" txid# 281474976715657 SEND to# [1:602:2510] 2025-06-25T15:23:10.574780Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:602:2510] txid# 281474976715657 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateTable CreateTable { Name: "table-1" Columns { Name: "key" Type: "Uint32" FamilyName: "" NotNull: false } Columns { Name: "value" Type: "Uint32" FamilyName: "" NotNull: false } KeyColumnNames: "key" UniformPartitionsCount: 1 } } } ExecTimeoutPeriod: 18446744073709551615 2025-06-25T15:23:10.574906Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:602:2510] txid# 281474976715657 Bootstrap, UserSID: CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-25T15:23:10.575597Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1660: Actor# [1:602:2510] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-06-25T15:23:10.575716Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:602:2510] txid# 281474976715657 TEvNavigateKeySet requested from SchemeCache 2025-06-25T15:23:10.576076Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:602:2510] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-25T15:23:10.576268Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:602:2510] HANDLE EvNavigateKeySetResult, txid# 281474976715657 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-25T15:23:10.576453Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:602:2510] txid# 281474976715657 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715657 TabletId# 72057594046644480} 2025-06-25T15:23:10.576747Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:602:2510] txid# 281474976715657 HANDLE EvClientConnected 2025-06-25T15:23:10.579822Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:23:10.581666Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [1:602:2510] txid# 281474976715657 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715657} 2025-06-25T15:23:10.581762Z node 1 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [1:602:2510] txid# 281474976715657 SEND to# [1:554:2480] Source {TEvProposeTransactionStatus txid# 281474976715657 Status# 53} 2025-06-25T15:23:10.643681Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvBoot 2025-06-25T15:23:10.644981Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvRestored 2025-06-25T15:23:10.650882Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:627:2531] 2025-06-25T15:23:10.651274Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T15:23:10.714621Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-25T15:23:10.715421Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T15:23:10.715536Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T15:23:10.717520Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-25T15:23:10.717600Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-25T15:23:10.717699Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-25T15:23:10.720898Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T15:23:10.721095Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T15:23:10.721195Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:643:2531] in generation 1 2025-06-25T15:23:10.733902Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T15:23:10.801616Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-25T15:23:10.802983Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T15:23:10.803159Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:645:2541] 2025-06-25T15:23:10.803204Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T15:23:10.803243Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-25T15:23:10.803278Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T15:23:10.803535Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:627:2531], Recipient [1:627:2531]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T15:23:10.803592Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T15:23:10.804883Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-25T15:23:10.804988Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-25T15:23:10.805043Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T15:23:10.805084Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:23:10.805215Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-25T15:23:10.805271Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-25T15:23:10.805328Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-25T15:23:10.805366Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-25T15:23:10.805411Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T15:23:10.806912Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:634:2535], Recipient [1:627:2531]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T15:23:10.806959Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T15:23:10.807020Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:623:2528], serverId# [1:634:2535], sessionId# [0:0:0] 2025-06-25T15:23:10.807120Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:373:2367], Recipient [1:634:2535] 2025-06-25T15:23:10.807158Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-25T15:23:10.807282Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T15:23:10.807654Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-25T15:23:10.807732Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-25T15:23:10.807843Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-25T15:23:10.807957Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-25T15: ... 1474976715661] at 72075186224037892 is DelayComplete 2025-06-25T15:23:43.063284Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [71500:281474976715661] at 72075186224037892 executing on unit CompleteOperation 2025-06-25T15:23:43.063313Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [71500:281474976715661] at 72075186224037892 to execution unit CompletedOperations 2025-06-25T15:23:43.063340Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [71500:281474976715661] at 72075186224037892 on unit CompletedOperations 2025-06-25T15:23:43.063373Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [71500:281474976715661] at 72075186224037892 is Executed 2025-06-25T15:23:43.063401Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [71500:281474976715661] at 72075186224037892 executing on unit CompletedOperations 2025-06-25T15:23:43.063429Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [71500:281474976715661] at 72075186224037892 has finished 2025-06-25T15:23:43.063458Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037892 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:23:43.063487Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 72075186224037892 2025-06-25T15:23:43.063516Z node 2 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037892 has no attached operations 2025-06-25T15:23:43.063547Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 72075186224037892 2025-06-25T15:23:43.074812Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037892 2025-06-25T15:23:43.074885Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037892 2025-06-25T15:23:43.074936Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [71500:281474976715661] at 72075186224037892 on unit CompleteOperation 2025-06-25T15:23:43.074993Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [71500 : 281474976715661] from 72075186224037892 at tablet 72075186224037892 send result to client [2:1408:3209], exec latency: 0 ms, propose latency: 1 ms 2025-06-25T15:23:43.075039Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037892 2025-06-25T15:23:43.075195Z node 2 :TX_PROXY DEBUG: datareq.cpp:2286: Actor# [2:1408:3209] txid# 281474976715661 HANDLE Plan TEvProposeTransactionResult TDataReq GetStatus# COMPLETE shard id 72075186224037892 marker# P12 2025-06-25T15:23:43.075240Z node 2 :TX_PROXY DEBUG: datareq.cpp:2968: Send stream clearance, shard: 72075186224037890, txid: 281474976715661, cleared: 1 2025-06-25T15:23:43.075385Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287940, Sender [2:1408:3209], Recipient [2:726:2600]: NKikimrTx.TEvStreamClearanceResponse TxId: 281474976715661 Cleared: true 2025-06-25T15:23:43.075427Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3153: StateWork, processing event TEvTxProcessing::TEvStreamClearanceResponse 2025-06-25T15:23:43.075507Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [2:726:2600], Recipient [2:726:2600]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T15:23:43.075540Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T15:23:43.075598Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037890 2025-06-25T15:23:43.075633Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037890 active 1 active planned 1 immediate 0 planned 1 2025-06-25T15:23:43.075669Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [71500:281474976715661] at 72075186224037890 for WaitForStreamClearance 2025-06-25T15:23:43.075699Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [71500:281474976715661] at 72075186224037890 on unit WaitForStreamClearance 2025-06-25T15:23:43.075734Z node 2 :TX_DATASHARD TRACE: wait_for_stream_clearance_unit.cpp:156: Got stream clearance for [71500:281474976715661] at 72075186224037890 2025-06-25T15:23:43.075772Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [71500:281474976715661] at 72075186224037890 is Executed 2025-06-25T15:23:43.075804Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [71500:281474976715661] at 72075186224037890 executing on unit WaitForStreamClearance 2025-06-25T15:23:43.075835Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [71500:281474976715661] at 72075186224037890 to execution unit ReadTableScan 2025-06-25T15:23:43.075866Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [71500:281474976715661] at 72075186224037890 on unit ReadTableScan 2025-06-25T15:23:43.076092Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [71500:281474976715661] at 72075186224037890 is Continue 2025-06-25T15:23:43.076125Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037890 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-25T15:23:43.076154Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037890 2025-06-25T15:23:43.076184Z node 2 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037890 has no attached operations 2025-06-25T15:23:43.076215Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037890 2025-06-25T15:23:43.076720Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435082, Sender [2:1440:3238], Recipient [2:726:2600]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvRegisterScanActor 2025-06-25T15:23:43.076762Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3162: StateWork, processing event TEvPrivate::TEvRegisterScanActor 2025-06-25T15:23:43.076959Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:514: Got quota for read table scan ShardId: 72075186224037890, TxId: 281474976715661, MessageQuota: 1 2025-06-25T15:23:43.077046Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:718: Finish scan ShardId: 72075186224037890, TxId: 281474976715661, MessageQuota: 1 2025-06-25T15:23:43.127349Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037890 2025-06-25T15:23:43.127421Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:4477: Found op: cookie: 281474976715661, at: 72075186224037890 2025-06-25T15:23:43.127760Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [2:726:2600], Recipient [2:726:2600]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T15:23:43.127807Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T15:23:43.127872Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037890 2025-06-25T15:23:43.127924Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037890 active 1 active planned 1 immediate 0 planned 1 2025-06-25T15:23:43.127963Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [71500:281474976715661] at 72075186224037890 for ReadTableScan 2025-06-25T15:23:43.127994Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [71500:281474976715661] at 72075186224037890 on unit ReadTableScan 2025-06-25T15:23:43.128031Z node 2 :TX_DATASHARD TRACE: read_table_scan_unit.cpp:158: ReadTable scan complete for [71500:281474976715661] at 72075186224037890 error: , IsFatalError: 0 2025-06-25T15:23:43.128073Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [71500:281474976715661] at 72075186224037890 is Executed 2025-06-25T15:23:43.128119Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [71500:281474976715661] at 72075186224037890 executing on unit ReadTableScan 2025-06-25T15:23:43.128164Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [71500:281474976715661] at 72075186224037890 to execution unit CompleteOperation 2025-06-25T15:23:43.128193Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [71500:281474976715661] at 72075186224037890 on unit CompleteOperation 2025-06-25T15:23:43.128544Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [71500:281474976715661] at 72075186224037890 is DelayComplete 2025-06-25T15:23:43.128583Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [71500:281474976715661] at 72075186224037890 executing on unit CompleteOperation 2025-06-25T15:23:43.128612Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [71500:281474976715661] at 72075186224037890 to execution unit CompletedOperations 2025-06-25T15:23:43.128640Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [71500:281474976715661] at 72075186224037890 on unit CompletedOperations 2025-06-25T15:23:43.128674Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [71500:281474976715661] at 72075186224037890 is Executed 2025-06-25T15:23:43.128697Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [71500:281474976715661] at 72075186224037890 executing on unit CompletedOperations 2025-06-25T15:23:43.128720Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [71500:281474976715661] at 72075186224037890 has finished 2025-06-25T15:23:43.128747Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037890 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:23:43.128771Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 72075186224037890 2025-06-25T15:23:43.128797Z node 2 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037890 has no attached operations 2025-06-25T15:23:43.128823Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 72075186224037890 2025-06-25T15:23:43.139847Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037890 2025-06-25T15:23:43.139936Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037890 2025-06-25T15:23:43.139974Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [71500:281474976715661] at 72075186224037890 on unit CompleteOperation 2025-06-25T15:23:43.140032Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [71500 : 281474976715661] from 72075186224037890 at tablet 72075186224037890 send result to client [2:1408:3209], exec latency: 1 ms, propose latency: 1 ms 2025-06-25T15:23:43.140076Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037890 2025-06-25T15:23:43.140260Z node 2 :TX_PROXY DEBUG: datareq.cpp:2286: Actor# [2:1408:3209] txid# 281474976715661 HANDLE Plan TEvProposeTransactionResult TDataReq GetStatus# COMPLETE shard id 72075186224037890 marker# P12 2025-06-25T15:23:43.140358Z node 2 :TX_PROXY INFO: datareq.cpp:834: Actor# [2:1408:3209] txid# 281474976715661 RESPONSE Status# ExecComplete prepare time: 0.000500s execute time: 0.001500s total time: 0.002000s marker# P13 |97.7%| [TM] {RESULT} ydb/core/tx/datashard/ut_compaction/unittest ------- [TS] {asan, default-linux-x86_64, release} ydb/core/graph/shard/ut/unittest >> GraphShard::CreateGraphShard [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:118:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:130:2058] recipient: [1:112:2142] 2025-06-25T15:23:43.480606Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7569: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-25T15:23:43.480851Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7597: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T15:23:43.480929Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7483: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-25T15:23:43.480969Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7499: OperationsProcessing config: using default configuration 2025-06-25T15:23:43.481017Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-25T15:23:43.481054Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7505: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-25T15:23:43.481179Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7629: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-25T15:23:43.481246Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-25T15:23:43.481925Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7700: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-25T15:23:43.483874Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-25T15:23:43.572663Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7390: Cannot subscribe to console configs 2025-06-25T15:23:43.572730Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:23:43.607032Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-25T15:23:43.607582Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-25T15:23:43.607823Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-25T15:23:43.614789Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-25T15:23:43.615206Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-25T15:23:43.617743Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1358: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-25T15:23:43.618871Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:33: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-25T15:23:43.627706Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:158: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-25T15:23:43.629094Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-25T15:23:43.640047Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T15:23:43.640169Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T15:23:43.640332Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-25T15:23:43.640382Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-25T15:23:43.640552Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-25T15:23:43.640661Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6755: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-25T15:23:43.647235Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2150] sender: [1:244:2058] recipient: [1:15:2062] 2025-06-25T15:23:43.784463Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-25T15:23:43.786130Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:23:43.787303Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-25T15:23:43.787347Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 1:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046678944, LocalPathId: 1] source path: 2025-06-25T15:23:43.788635Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-25T15:23:43.788755Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:23:43.794085Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-25T15:23:43.796521Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-25T15:23:43.796809Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:23:43.796965Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-25T15:23:43.797017Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-25T15:23:43.797074Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 2 -> 3 2025-06-25T15:23:43.802599Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:23:43.802675Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-25T15:23:43.802728Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 3 -> 128 2025-06-25T15:23:43.806112Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:23:43.806190Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-25T15:23:43.806231Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-25T15:23:43.806301Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-25T15:23:43.815811Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T15:23:43.820271Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-25T15:23:43.822321Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-25T15:23:43.823204Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T15:23:43.823323Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T15:23:43.823367Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T15:23:43.827725Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 1:0 128 -> 240 2025-06-25T15:23:43.827807Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-25T15:23:43.828009Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-25T15:23:43.828084Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:403: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-25T15:23:43.833114Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T15:23:43.833167Z node 1 :FLAT_TX_SCHEMESHARD ... bDomainState::TPropose ProgressState leave, operationId 102:1, at tablet# 72057594046678944 2025-06-25T15:23:44.092030Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 102 ready parts: 2/2 2025-06-25T15:23:44.092161Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 102 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T15:23:44.094442Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:654: Send tablet strongly msg operationId: 102:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:102 msg type: 269090816 2025-06-25T15:23:44.094619Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1778: TOperation RegisterRelationByTabletId, TxId: 102, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 102 at step: 5000003 FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000002 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 102 at step: 5000003 2025-06-25T15:23:44.095023Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:689: TTxOperationPlanStep Execute, stepId: 5000003, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-25T15:23:44.095123Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:693: TTxOperationPlanStep Execute, message: Transactions { TxId: 102 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000003 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-25T15:23:44.095156Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:719: TTxOperationPlanStep Execute operation part is already done, operationId: 102:0 2025-06-25T15:23:44.095190Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 102:1, at tablet# 72057594046678944 2025-06-25T15:23:44.095473Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 102:1 128 -> 240 2025-06-25T15:23:44.095536Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 102:1, at tablet# 72057594046678944 2025-06-25T15:23:44.095643Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 8 2025-06-25T15:23:44.095738Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:569: DoUpdateTenant no hasChanges, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], tenantLink: TSubDomainsLinks::TLink { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 2], Generation: 2, ActorId:[1:412:2378], EffectiveACLVersion: 0, SubdomainVersion: 3, UserAttributesVersion: 1, TenantHive: 72075186233409546, TenantSysViewProcessor: 18446744073709551615, TenantStatisticsAggregator: 18446744073709551615, TenantGraphShard: 72075186234409549, TenantRootACL: }, subDomain->GetVersion(): 3, actualEffectiveACLVersion: 0, actualUserAttrsVersion: 1, tenantHive: 72075186233409546, tenantSysViewProcessor: 18446744073709551615, at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 102 2025-06-25T15:23:44.097558Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-25T15:23:44.097592Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-25T15:23:44.097777Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-25T15:23:44.097819Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:211:2211], at schemeshard: 72057594046678944, txId: 102, path id: 2 2025-06-25T15:23:44.098100Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 102:1, at schemeshard: 72057594046678944 2025-06-25T15:23:44.098147Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_extsubdomain.cpp:761: [72057594046678944] TSyncHive, operationId 102:1, ProgressState, NeedSyncHive: 0 2025-06-25T15:23:44.098178Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 102:1 240 -> 240 2025-06-25T15:23:44.098645Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 5 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T15:23:44.098757Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 5 PathOwnerId: 72057594046678944, cookie: 102 2025-06-25T15:23:44.098788Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 102 2025-06-25T15:23:44.098822Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 5 2025-06-25T15:23:44.098861Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 9 2025-06-25T15:23:44.098919Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/2, is published: true 2025-06-25T15:23:44.101280Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 102:1, at schemeshard: 72057594046678944 2025-06-25T15:23:44.101319Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:494: [72057594046678944] TDone opId# 102:1 ProgressState 2025-06-25T15:23:44.101392Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:1 progress is 2/2 2025-06-25T15:23:44.101416Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 2/2 2025-06-25T15:23:44.101442Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:908: Part operation is done id#102:1 progress is 2/2 2025-06-25T15:23:44.101460Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 2/2 2025-06-25T15:23:44.101481Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 102, ready parts: 2/2, is published: true 2025-06-25T15:23:44.101522Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1668: TOperation IsReadyToDone TxId: 102 ready parts: 2/2 2025-06-25T15:23:44.101565Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 102:0 2025-06-25T15:23:44.101602Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 102:0 2025-06-25T15:23:44.101717Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 8 2025-06-25T15:23:44.101755Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:975: Operation and all the parts is done, operation id: 102:1 2025-06-25T15:23:44.101775Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5258: RemoveTx for txid 102:1 2025-06-25T15:23:44.101835Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 7 2025-06-25T15:23:44.102277Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 102 2025-06-25T15:23:44.103561Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:213: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-06-25T15:23:44.103728Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:259: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 2025-06-25T15:23:44.104084Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-06-25T15:23:44.104163Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:230: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-25T15:23:44.104192Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:239: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:578:2505] TestWaitNotification: OK eventTxId 102 2025-06-25T15:23:44.105495Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/db1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-25T15:23:44.105729Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/db1" took 191us result status StatusSuccess 2025-06-25T15:23:44.107881Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/db1" PathDescription { Self { Name: "db1" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeExtSubDomain CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 3 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 3 PlanResolution: 50 Coordinators: 72075186234409547 TimeCastBucketsPerMediator: 2 Mediators: 72075186234409548 SchemeShard: 72075186234409546 Hive: 72075186233409546 GraphShard: 72075186234409549 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "pool-1" Kind: "hdd" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 5 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 |97.7%| [TS] {RESULT} ydb/core/graph/shard/ut/unittest |97.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/select/py3test >> test_select.py::TestDML::test_select[table_index_1__ASYNC-pk_types10-all_types10-index10---ASYNC] [GOOD] >> ServerRestartTest::RestartOnGetSession >> TestFormatHandler::ClientError [GOOD] >> TestFormatHandler::ClientErrorWithEmptyFilter >> KqpTpch::Query21 [GOOD] >> KqpTpch::Query22 >> DataShardReplication::ReplicatedTable-UseSink [GOOD] >> DataShardReplication::ApplyChangesToReplicatedTable >> TabletService_ChangeSchema::OnlyAdminsAllowed [GOOD] >> TabletService_ExecuteMiniKQL::BasicMiniKQLRead >> QueryActorTest::SimpleQuery [GOOD] >> QueryActorTest::Rollback >> KeyValueGRPCService::SimpleWriteReadRange [GOOD] >> KeyValueGRPCService::SimpleWriteListRange >> test.py::test[solomon-HistResponse-default.txt] [GOOD] >> test.py::test[solomon-InvalidProject-] >> TSentinelTests::PDiskRackGuardFullRack [GOOD] >> TSentinelTests::PDiskFaultyGuard >> test_postgres.py::TestPostgresSuite::test_postgres_suite[float8] [GOOD] >> test_postgres.py::TestPostgresSuite::test_postgres_suite[roles] >> TopicSessionTests::TwoSessionWithoutPredicate [GOOD] >> KafkaProtocol::BalanceScenarioForFederation [GOOD] >> KafkaProtocol::BalanceScenarioCdc >> DataShardBackgroundCompaction::ShouldNotCompactWhenCopyTable [GOOD] >> DataShardBackgroundCompaction::ShouldNotCompactEmptyTable >> test_postgres.py::TestPostgresSuite::test_postgres_suite[roles] [GOOD] >> test_postgres.py::TestPostgresSuite::test_postgres_suite[char] >> TopicSessionTests::SessionWithPredicateAndSessionWithoutPredicate >> test_postgres.py::TestPostgresSuite::test_postgres_suite[char] [GOOD] >> test_postgres.py::TestPostgresSuite::test_postgres_suite[float4] >> test_workload_simple_queue.py::TestWorkloadSimpleQueue::test_workload_simple_queue[row] >> test_tpch.py::TestTpchS1::test_tpch[1] >> test_workload_oltp.py::TestWorkloadSimpleQueue::test_workload_oltp >> test_restarts.py::TestRestartSingleMirror3DC::test_restart_single_node_is_ok >> TestFormatHandler::ClientErrorWithEmptyFilter [GOOD] >> TestJsonParser::Simple1 >> TTxDataShardLocalKMeansScan::MainToPosting [GOOD] >> TTxDataShardLocalKMeansScan::MainToBuild >> TestJsonParser::Simple1 [GOOD] >> TestJsonParser::Simple2 >> TSentinelTests::PDiskFaultyGuard [GOOD] >> TSentinelTests::PDiskFaultyGuardWithForced >> test_postgres.py::TestPostgresSuite::test_postgres_suite[float4] [GOOD] >> test_postgres.py::TestPostgresSuite::test_postgres_suite[numeric] >> TabletService_ExecuteMiniKQL::BasicMiniKQLRead [GOOD] >> TabletService_ExecuteMiniKQL::ParamsMiniKQLRead >> TestJsonParser::Simple2 [GOOD] >> DataShardReplication::ApplyChangesToReplicatedTable [GOOD] >> DataShardReplication::ApplyChangesToCommonTable >> TestJsonParser::Simple3 >> QueryActorTest::Rollback [GOOD] >> QueryActorTest::Commit >> TestJsonParser::Simple3 [GOOD] >> TestJsonParser::Simple4 >> test_example.py::TestExample::test_example [GOOD] >> TestJsonParser::Simple4 [GOOD] >> TSentinelTests::PDiskFaultyGuardWithForced [GOOD] >> TSentinelTests::BSControllerUnresponsive >> TestJsonParser::LargeStrings >> TEvaluateExprInViewTest::EvaluateExpr [GOOD] >> TEvaluateExprInViewTest::NakedCallToCurrentTimeFunction >> TestJsonParser::LargeStrings [GOOD] >> TestJsonParser::ManyValues >> KqpTpch::Query22 [GOOD] >> TestJsonParser::ManyValues [GOOD] |97.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/async_replication/py3test >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_index_2__SYNC-pk_types2-all_types2-index2---SYNC] [GOOD] >> TestJsonParser::MissingFields >> test_crud.py::TestYdbCrudOperations::test_crud_operations >> TestJsonParser::MissingFields [GOOD] >> DataShardBackgroundCompaction::ShouldNotCompactEmptyTable [GOOD] >> DataShardBackgroundCompaction::ShouldNotCompactSecondTime >> TestJsonParser::NestedTypes >> Coordinator::ReadStepSubscribe [GOOD] >> Coordinator::LastStepSubscribe >> TestJsonParser::NestedTypes [GOOD] |97.7%| [TA] $(B)/ydb/tests/datashard/async_replication/test-results/py3test/{meta.json ... results_accumulator.log} >> TestJsonParser::SimpleBooleans |97.7%| [TA] {RESULT} $(B)/ydb/tests/datashard/async_replication/test-results/py3test/{meta.json ... results_accumulator.log} >> TestJsonParser::SimpleBooleans [GOOD] >> test.py::test[solomon-InvalidProject-] [GOOD] >> test.py::test[solomon-LabelColumns-default.txt] >> TestJsonParser::ManyBatches >> KeyValueGRPCService::SimpleWriteListRange [GOOD] >> KeyValueGRPCService::SimpleGetStorageChannelStatus |97.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/secondary_index/py3test >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_2_UNIQUE_SYNC-pk_types2-all_types2-index2--UNIQUE-SYNC] [GOOD] >> TestJsonParser::ManyBatches [GOOD] >> TestJsonParser::LittleBatches >> TestJsonParser::LittleBatches [GOOD] >> TestJsonParser::MissingFieldsValidation >> TestJsonParser::MissingFieldsValidation [GOOD] >> TabletService_ExecuteMiniKQL::ParamsMiniKQLRead [GOOD] >> TabletService_ExecuteMiniKQL::MalformedParams >> TestJsonParser::TypeKindsValidation >> test_http_api.py::TestHttpApi::test_stop_idempotency [GOOD] >> test_http_api.py::TestHttpApi::test_restart_idempotency >> TopicSessionTests::SessionWithPredicateAndSessionWithoutPredicate [GOOD] >> ServerRestartTest::RestartOnGetSession [GOOD] >> QueryActorTest::Commit [GOOD] >> QueryActorTest::StreamQuery >> TopicSessionTests::SecondSessionWithoutOffsetsAfterSessionConnected >> test_select.py::TestDML::test_select[table_ttl_Date-pk_types18-all_types18-index18-Date--] [GOOD] >> TestJsonParser::TypeKindsValidation [GOOD] >> DataShardReplication::ApplyChangesToCommonTable [GOOD] >> DataShardReplication::ApplyChangesWithConcurrentTx >> TestJsonParser::NumbersValidation >> TestJsonParser::NumbersValidation [GOOD] >> TestJsonParser::StringsValidation ------- [TM] {asan, default-linux-x86_64, pic, release} ydb/core/kqp/tests/kikimr_tpch/unittest >> KqpTpch::Query22 [GOOD] Test command err: -- result -- rowIndex: 0 rowIndex: 4 -- result -- rowIndex: 0 rowIndex: 1 -- result -- rowIndex: 0 rowIndex: 10 -- result -- rowIndex: 0 rowIndex: 5 -- result -- rowIndex: 0 rowIndex: 2 -- result -- rowIndex: 0 rowIndex: 1 -- result -- rowIndex: 0 rowIndex: 4 -- result -- rowIndex: 0 rowIndex: 4 -- result -- rowIndex: 0 rowIndex: 10 -- result -- rowIndex: 0 rowIndex: 20 -- result -- rowIndex: 0 rowIndex: 10 -- result -- rowIndex: 0 rowIndex: 2 -- result -- rowIndex: 0 rowIndex: 28 -- result -- rowIndex: 0 rowIndex: 1 -- result -- rowIndex: 0 rowIndex: 1 -- result -- rowIndex: 0 rowIndex: 37 -- result -- rowIndex: 0 rowIndex: 1 -- result -- rowIndex: 0 rowIndex: 4 -- result -- rowIndex: 0 rowIndex: 1 -- result -- rowIndex: 0 rowIndex: 1 -- result -- rowIndex: 0 rowIndex: 1 -- result -- rowIndex: 0 rowIndex: 5 |97.8%| [TM] {RESULT} ydb/core/kqp/tests/kikimr_tpch/unittest >> TestJsonParser::StringsValidation [GOOD] >> TestJsonParser::NestedJsonValidation >> TTxDataShardLocalKMeansScan::MainToBuild [GOOD] >> TTxDataShardLocalKMeansScan::BuildToPosting >> TestJsonParser::NestedJsonValidation [GOOD] >> TMemoryController::SharedCache [GOOD] >> TMemoryController::SharedCache_ConfigLimit >> TestJsonParser::BoolsValidation >> TestJsonParser::BoolsValidation [GOOD] >> TestJsonParser::JsonStructureValidation >> TestJsonParser::JsonStructureValidation [GOOD] >> TestPurecalcFilter::Simple1 >> DataShardBackgroundCompaction::ShouldNotCompactSecondTime [GOOD] >> test.py::test_local [GOOD] |97.8%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/integration/server_restart/gtest >> ServerRestartTest::RestartOnGetSession [GOOD] |97.8%| [TM] {RESULT} ydb/public/sdk/cpp/tests/integration/server_restart/gtest >> TabletService_ExecuteMiniKQL::MalformedParams [GOOD] >> TabletService_ExecuteMiniKQL::MalformedProgram >> test_drain.py::TestHive::test_drain_on_stop >> test.py::test[solomon-LabelColumns-default.txt] [GOOD] >> test.py::test[solomon-Subquery-default.txt] [SKIPPED] >> test.py::test[solomon-UnknownSetting-] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_background_compaction/unittest >> DataShardBackgroundCompaction::ShouldNotCompactSecondTime [GOOD] Test command err: 2025-06-25T15:23:35.265095Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T15:23:35.265336Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T15:23:35.265403Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000719/r3tmp/tmpAdb4V9/pdisk_1.dat 2025-06-25T15:23:35.848662Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T15:23:35.881869Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:23:35.996657Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:23:36.010044Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750865012028562 != 1750865012028566 2025-06-25T15:23:36.057116Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:61:2108] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-25T15:23:36.058124Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 2025-06-25T15:23:36.059773Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:23:36.059930Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:23:36.072390Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:23:36.164512Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:61:2108] Handle TEvProposeTransaction 2025-06-25T15:23:36.164600Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:61:2108] TxId# 281474976715657 ProcessProposeTransaction 2025-06-25T15:23:36.168252Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:61:2108] Cookie# 0 userReqId# "" txid# 281474976715657 SEND to# [1:602:2510] 2025-06-25T15:23:36.315485Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:602:2510] txid# 281474976715657 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateTable CreateTable { Name: "table-1" Columns { Name: "key" Type: "Uint32" FamilyName: "" NotNull: false } Columns { Name: "value" Type: "Uint32" FamilyName: "" NotNull: false } KeyColumnNames: "key" UniformPartitionsCount: 1 } } } ExecTimeoutPeriod: 18446744073709551615 2025-06-25T15:23:36.315618Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:602:2510] txid# 281474976715657 Bootstrap, UserSID: CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-25T15:23:36.316463Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1660: Actor# [1:602:2510] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-06-25T15:23:36.316597Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:602:2510] txid# 281474976715657 TEvNavigateKeySet requested from SchemeCache 2025-06-25T15:23:36.317008Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:602:2510] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-25T15:23:36.317229Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:602:2510] HANDLE EvNavigateKeySetResult, txid# 281474976715657 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-25T15:23:36.317381Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:602:2510] txid# 281474976715657 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715657 TabletId# 72057594046644480} 2025-06-25T15:23:36.317716Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:602:2510] txid# 281474976715657 HANDLE EvClientConnected 2025-06-25T15:23:36.321206Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:23:36.322967Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [1:602:2510] txid# 281474976715657 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715657} 2025-06-25T15:23:36.323041Z node 1 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [1:602:2510] txid# 281474976715657 SEND to# [1:554:2480] Source {TEvProposeTransactionStatus txid# 281474976715657 Status# 53} 2025-06-25T15:23:36.366850Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvBoot 2025-06-25T15:23:36.368103Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvRestored 2025-06-25T15:23:36.369888Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:627:2531] 2025-06-25T15:23:36.370186Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T15:23:36.428022Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-25T15:23:36.429506Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T15:23:36.429653Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T15:23:36.431607Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-25T15:23:36.431689Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-25T15:23:36.431750Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-25T15:23:36.432815Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T15:23:36.432975Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T15:23:36.433089Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:643:2531] in generation 1 2025-06-25T15:23:36.444618Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T15:23:36.502827Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-25T15:23:36.504205Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T15:23:36.504474Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:645:2541] 2025-06-25T15:23:36.504531Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T15:23:36.504584Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-25T15:23:36.504637Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T15:23:36.504923Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:627:2531], Recipient [1:627:2531]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T15:23:36.504981Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T15:23:36.506671Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-25T15:23:36.506784Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-25T15:23:36.506855Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T15:23:36.506899Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:23:36.507018Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-25T15:23:36.507091Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-25T15:23:36.507149Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-25T15:23:36.507188Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-25T15:23:36.507235Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T15:23:36.508461Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:634:2535], Recipient [1:627:2531]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T15:23:36.508526Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T15:23:36.508573Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:623:2528], serverId# [1:634:2535], sessionId# [0:0:0] 2025-06-25T15:23:36.508707Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:373:2367], Recipient [1:634:2535] 2025-06-25T15:23:36.508748Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-25T15:23:36.508853Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T15:23:36.509215Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-25T15:23:36.509286Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-25T15:23:36.509385Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-25T15:23:36.509497Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-25T15: ... node 5 :TX_DATASHARD DEBUG: execute_write_unit.cpp:416: Executed write operation for [0:2] at 72075186224037888, row count=3 2025-06-25T15:24:00.750671Z node 5 :TX_DATASHARD TRACE: execute_write_unit.cpp:47: add locks to result: 0 2025-06-25T15:24:00.750738Z node 5 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-25T15:24:00.750776Z node 5 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:2] at 72075186224037888 executing on unit ExecuteWrite 2025-06-25T15:24:00.750816Z node 5 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:2] at 72075186224037888 to execution unit FinishProposeWrite 2025-06-25T15:24:00.750873Z node 5 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:2] at 72075186224037888 on unit FinishProposeWrite 2025-06-25T15:24:00.750963Z node 5 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-06-25T15:24:00.750998Z node 5 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:2] at 72075186224037888 executing on unit FinishProposeWrite 2025-06-25T15:24:00.751040Z node 5 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:2] at 72075186224037888 to execution unit CompletedOperations 2025-06-25T15:24:00.751074Z node 5 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:2] at 72075186224037888 on unit CompletedOperations 2025-06-25T15:24:00.751406Z node 5 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 72075186224037888 is Executed 2025-06-25T15:24:00.751439Z node 5 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:2] at 72075186224037888 executing on unit CompletedOperations 2025-06-25T15:24:00.751467Z node 5 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:2] at 72075186224037888 has finished 2025-06-25T15:24:00.762252Z node 5 :TX_DATASHARD TRACE: datashard__write.cpp:150: TTxWrite complete: at tablet# 72075186224037888 2025-06-25T15:24:00.762334Z node 5 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:2] at 72075186224037888 on unit FinishProposeWrite 2025-06-25T15:24:00.762390Z node 5 :TX_DATASHARD TRACE: finish_propose_write_unit.cpp:163: Propose transaction complete txid 2 at tablet 72075186224037888 send to client, propose latency: 0 ms, status: STATUS_COMPLETED 2025-06-25T15:24:00.762542Z node 5 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T15:24:00.764639Z node 5 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [5:61:2108] Handle TEvNavigate describe path /Root/table-1 2025-06-25T15:24:00.787968Z node 5 :TX_PROXY DEBUG: describe.cpp:272: Actor# [5:814:2644] HANDLE EvNavigateScheme /Root/table-1 2025-06-25T15:24:00.788574Z node 5 :TX_PROXY DEBUG: describe.cpp:356: Actor# [5:814:2644] HANDLE EvNavigateKeySetResult TDescribeReq marker# P5 ErrorCount# 0 2025-06-25T15:24:00.788715Z node 5 :TX_PROXY DEBUG: describe.cpp:435: Actor# [5:814:2644] SEND to# 72057594046644480 shardToRequest NKikimrSchemeOp.TDescribePath Path: "/Root/table-1" Options { ShowPrivateTable: true } 2025-06-25T15:24:00.789884Z node 5 :TX_PROXY DEBUG: describe.cpp:448: Actor# [5:814:2644] Handle TEvDescribeSchemeResult Forward to# [5:555:2481] Cookie: 0 TEvDescribeSchemeResult: NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 20 Record# Status: StatusSuccess Path: "/Root/table-1" PathDescription { Self { Name: "table-1" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715657 CreateStep: 1000 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "table-1" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 1 } ColumnFamilies { Id: 0 Name: "default" } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 5 PathsLimit: 10000 ShardsInside: 1 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } PathId: 2 PathOwnerId: 72057594046644480 2025-06-25T15:24:00.792568Z node 5 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [5:818:2648], Recipient [5:628:2532]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T15:24:00.792648Z node 5 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T15:24:00.792712Z node 5 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [5:817:2647], serverId# [5:818:2648], sessionId# [0:0:0] 2025-06-25T15:24:00.792887Z node 5 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553169, Sender [5:816:2646], Recipient [5:628:2532]: NKikimrTxDataShard.TEvGetInfoRequest 2025-06-25T15:24:00.793826Z node 5 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [5:821:2651], Recipient [5:628:2532]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T15:24:00.793891Z node 5 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T15:24:00.793940Z node 5 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [5:820:2650], serverId# [5:821:2651], sessionId# [0:0:0] 2025-06-25T15:24:00.794105Z node 5 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553210, Sender [5:819:2649], Recipient [5:628:2532]: NKikimrTxDataShard.TEvCompactTable PathId { OwnerId: 72057594046644480 LocalId: 2 } CompactBorrowed: false 2025-06-25T15:24:00.794282Z node 5 :TX_DATASHARD INFO: datashard__compaction.cpp:141: Started background compaction# 1 of 72075186224037888 tableId# 2 localTid# 1001, requested from [5:819:2649], partsCount# 0, memtableSize# 728, memtableWaste# 3880, memtableRows# 3 2025-06-25T15:24:00.865254Z node 5 :TX_DATASHARD DEBUG: datashard__compaction.cpp:203: CompactionComplete of tablet# 72075186224037888, table# 1001, finished edge# 1, ts 1970-01-01T00:00:01.546365Z 2025-06-25T15:24:00.865367Z node 5 :TX_DATASHARD DEBUG: datashard__compaction.cpp:240: ReplyCompactionWaiters of tablet# 72075186224037888, table# 1001, finished edge# 1, front# 1 2025-06-25T15:24:00.865437Z node 5 :TX_DATASHARD DEBUG: datashard__compaction.cpp:260: ReplyCompactionWaiters of tablet# 72075186224037888, table# 1001 sending TEvCompactTableResult to# [5:819:2649]pathId# [OwnerId: 72057594046644480, LocalPathId: 2] 2025-06-25T15:24:00.866269Z node 5 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268828683, Sender [5:619:2526], Recipient [5:628:2532]: NKikimr::TEvTablet::TEvFollowerGcApplied 2025-06-25T15:24:00.866701Z node 5 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [5:828:2657], Recipient [5:628:2532]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T15:24:00.866755Z node 5 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T15:24:00.866806Z node 5 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [5:827:2656], serverId# [5:828:2657], sessionId# [0:0:0] 2025-06-25T15:24:00.867002Z node 5 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553210, Sender [5:826:2655], Recipient [5:628:2532]: NKikimrTxDataShard.TEvCompactTable PathId { OwnerId: 72057594046644480 LocalId: 2 } CompactBorrowed: false 2025-06-25T15:24:00.867116Z node 5 :TX_DATASHARD DEBUG: datashard__compaction.cpp:118: Background compaction of tablet# 72075186224037888 of path# [OwnerId: 72057594046644480, LocalPathId: 2], requested from# [5:826:2655] is not needed |97.8%| [TM] {RESULT} ydb/core/tx/datashard/ut_background_compaction/unittest >> test_postgres.py::TestPostgresSuite::test_postgres_suite[numeric] [GOOD] >> test_postgres.py::TestPostgresSuite::test_postgres_suite[name] >> test_postgres.py::TestPostgresSuite::test_postgres_suite[name] [GOOD] >> test_postgres.py::TestPostgresSuite::test_postgres_suite[int2] >> test_postgres.py::TestPostgresSuite::test_postgres_suite[int2] [GOOD] >> test_postgres.py::TestPostgresSuite::test_postgres_suite[comments] |97.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test >> KeyValueGRPCService::SimpleGetStorageChannelStatus [GOOD] >> KeyValueGRPCService::SimpleCreateAlterDropVolume >> test_example.py::TestExample::test_example2 >> DataShardReplication::ApplyChangesWithConcurrentTx [GOOD] >> TEvaluateExprInViewTest::NakedCallToCurrentTimeFunction [GOOD] >> TSelectFromViewTest::OneTable >> test_postgres.py::TestPostgresSuite::test_postgres_suite[comments] [GOOD] >> test_postgres.py::TestPostgresSuite::test_postgres_suite[boolean] >> TopicSessionTests::SecondSessionWithoutOffsetsAfterSessionConnected [GOOD] >> TMemoryController::SharedCache_ConfigLimit [GOOD] >> TMemoryController::MemTable >> test_select.py::TestDML::test_select[table_index_1__SYNC-pk_types8-all_types8-index8---SYNC] [GOOD] >> TopicSessionTests::TwoSessionsWithOffsets ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_replication/unittest >> DataShardReplication::ApplyChangesWithConcurrentTx [GOOD] Test command err: 2025-06-25T15:23:16.486426Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T15:23:16.486595Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T15:23:16.486648Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000821/r3tmp/tmppxz6tU/pdisk_1.dat 2025-06-25T15:23:17.028833Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T15:23:17.054754Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:23:17.133856Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:23:17.142585Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750864993508535 != 1750864993508539 2025-06-25T15:23:17.193575Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:61:2108] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-25T15:23:17.194612Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 2025-06-25T15:23:17.199194Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:23:17.199330Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:23:17.217194Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:23:17.315499Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:61:2108] Handle TEvProposeTransaction 2025-06-25T15:23:17.315607Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:61:2108] TxId# 281474976715657 ProcessProposeTransaction 2025-06-25T15:23:17.316787Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:61:2108] Cookie# 0 userReqId# "" txid# 281474976715657 SEND to# [1:602:2510] 2025-06-25T15:23:17.467051Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1595: Actor# [1:602:2510] txid# 281474976715657 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateTable CreateTable { Name: "table-1" Columns { Name: "key" Type: "Uint32" FamilyName: "" NotNull: false } Columns { Name: "value" Type: "Uint32" FamilyName: "" NotNull: false } KeyColumnNames: "key" UniformPartitionsCount: 1 ReplicationConfig { Mode: REPLICATION_MODE_READ_ONLY ConsistencyLevel: CONSISTENCY_LEVEL_GLOBAL } } } } ExecTimeoutPeriod: 18446744073709551615 2025-06-25T15:23:17.467193Z node 1 :TX_PROXY DEBUG: schemereq.cpp:576: Actor# [1:602:2510] txid# 281474976715657 Bootstrap, UserSID: CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-25T15:23:17.467891Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1660: Actor# [1:602:2510] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-06-25T15:23:17.467994Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1650: Actor# [1:602:2510] txid# 281474976715657 TEvNavigateKeySet requested from SchemeCache 2025-06-25T15:23:17.468621Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1483: Actor# [1:602:2510] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-25T15:23:17.468893Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1530: Actor# [1:602:2510] HANDLE EvNavigateKeySetResult, txid# 281474976715657 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-25T15:23:17.469048Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:602:2510] txid# 281474976715657 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715657 TabletId# 72057594046644480} 2025-06-25T15:23:17.469380Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1385: Actor# [1:602:2510] txid# 281474976715657 HANDLE EvClientConnected 2025-06-25T15:23:17.473006Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:23:17.474705Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1407: Actor# [1:602:2510] txid# 281474976715657 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715657} 2025-06-25T15:23:17.474796Z node 1 :TX_PROXY DEBUG: schemereq.cpp:556: Actor# [1:602:2510] txid# 281474976715657 SEND to# [1:554:2480] Source {TEvProposeTransactionStatus txid# 281474976715657 Status# 53} 2025-06-25T15:23:17.531036Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvBoot 2025-06-25T15:23:17.532600Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvRestored 2025-06-25T15:23:17.536982Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:627:2531] 2025-06-25T15:23:17.537286Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T15:23:17.600896Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-25T15:23:17.601813Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T15:23:17.602007Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T15:23:17.607026Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-25T15:23:17.607165Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-25T15:23:17.607239Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-25T15:23:17.610153Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T15:23:17.610364Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T15:23:17.610476Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:643:2531] in generation 1 2025-06-25T15:23:17.621827Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T15:23:17.667289Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-25T15:23:17.669354Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T15:23:17.669587Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:645:2541] 2025-06-25T15:23:17.669633Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T15:23:17.669674Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-25T15:23:17.669710Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T15:23:17.669968Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:627:2531], Recipient [1:627:2531]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T15:23:17.670029Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T15:23:17.671674Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-25T15:23:17.671807Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-25T15:23:17.671875Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T15:23:17.671940Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:23:17.672077Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-25T15:23:17.672144Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-25T15:23:17.672193Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-25T15:23:17.672236Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-25T15:23:17.672291Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T15:23:17.673940Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:634:2535], Recipient [1:627:2531]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T15:23:17.674004Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T15:23:17.674056Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:623:2528], serverId# [1:634:2535], sessionId# [0:0:0] 2025-06-25T15:23:17.674185Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:373:2367], Recipient [1:634:2535] 2025-06-25T15:23:17.674242Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-25T15:23:17.674357Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T15:23:17.674772Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-25T15:23:17.674836Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-25T15:23:17.674932Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-25T15:23:17.675042Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Exec ... 06-25T15:24:03.999332Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:2] at 72075186224037888 on unit ExecuteRead 2025-06-25T15:24:03.999511Z node 8 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1578: 72075186224037888 Execute read# 1, request: { ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 Snapshot { Step: 1500 TxId: 18446744073709551615 } LockTxId: 281474976715660 ResultFormat: FORMAT_CELLVEC MaxRows: 1001 MaxBytes: 5242880 Reverse: false LockNodeId: 8 TotalRowsLimit: 1001 LockMode: OPTIMISTIC } 2025-06-25T15:24:03.999883Z node 8 :TX_DATASHARD DEBUG: datashard__read_iterator.cpp:2427: 72075186224037888 Acquired lock# 281474976715660, counter# 0 for [OwnerId: 72057594046644480, LocalPathId: 2] 2025-06-25T15:24:03.999952Z node 8 :TX_DATASHARD TRACE: datashard.cpp:2476: PromoteImmediatePostExecuteEdges at 72075186224037888 promoting UnprotectedReadEdge to v1500/18446744073709551615 2025-06-25T15:24:04.000026Z node 8 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2163: 72075186224037888 Complete read# {[8:825:2656], 0} after executionsCount# 1 2025-06-25T15:24:04.000092Z node 8 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2137: 72075186224037888 read iterator# {[8:825:2656], 0} sends rowCount# 1, bytes# 32, quota rows left# 1000, quota bytes left# 5242848, hasUnreadQueries# 0, total queries# 1, firstUnprocessed# 0 2025-06-25T15:24:04.000189Z node 8 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2188: 72075186224037888 read iterator# {[8:825:2656], 0} finished in read 2025-06-25T15:24:04.000288Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 72075186224037888 is Executed 2025-06-25T15:24:04.000577Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:2] at 72075186224037888 executing on unit ExecuteRead 2025-06-25T15:24:04.000612Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:2] at 72075186224037888 to execution unit CompletedOperations 2025-06-25T15:24:04.000642Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:2] at 72075186224037888 on unit CompletedOperations 2025-06-25T15:24:04.000699Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 72075186224037888 is Executed 2025-06-25T15:24:04.000724Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:2] at 72075186224037888 executing on unit CompletedOperations 2025-06-25T15:24:04.000754Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:2] at 72075186224037888 has finished 2025-06-25T15:24:04.000816Z node 8 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2687: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037888 2025-06-25T15:24:04.000950Z node 8 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2736: TTxReadViaPipeline(69) Complete: at tablet# 72075186224037888 2025-06-25T15:24:04.001180Z node 8 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 275709965, Sender [8:63:2110], Recipient [8:628:2532]: NKikimrLongTxService.TEvLockStatus LockId: 281474976715660 LockNode: 8 Status: STATUS_SUBSCRIBED 2025-06-25T15:24:04.002050Z node 8 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553219, Sender [8:825:2656], Recipient [8:628:2532]: NKikimrTxDataShard.TEvReadCancel ReadId: 0 2025-06-25T15:24:04.002126Z node 8 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3409: 72075186224037888 ReadCancel: { ReadId: 0 } { items { uint32_value: 1 } items { uint32_value: 11 } } 2025-06-25T15:24:04.005845Z node 8 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [8:829:2660], Recipient [8:628:2532]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T15:24:04.005926Z node 8 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T15:24:04.006009Z node 8 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [8:828:2659], serverId# [8:829:2660], sessionId# [0:0:0] 2025-06-25T15:24:04.006278Z node 8 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549570, Sender [8:827:2658], Recipient [8:628:2532]: NKikimrTxDataShard.TEvApplyReplicationChanges TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Source: "my-source" Changes { SourceOffset: 1 WriteTxId: 0 Key: "\001\000\004\000\000\000\001\000\000\000" Upsert { Tags: 2 Data: "\001\000\004\000\000\000\025\000\000\000" } } 2025-06-25T15:24:04.006487Z node 8 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 72075186224037888 CompleteEdge# v1000/281474976715657 IncompleteEdge# v{min} UnprotectedReadEdge# v1500/18446744073709551615 ImmediateWriteEdge# v1000/18446744073709551615 ImmediateWriteEdgeReplied# v1000/18446744073709551615 2025-06-25T15:24:04.006628Z node 8 :TX_DATASHARD TRACE: locks.cpp:194: Lock 281474976715660 marked broken at v{min} 2025-06-25T15:24:04.017964Z node 8 :TX_DATASHARD DEBUG: datashard.cpp:2560: Waiting for PlanStep# 1501 from mediator time cast 2025-06-25T15:24:04.018878Z node 8 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 270270977, Sender [8:25:2072], Recipient [8:628:2532]: {TEvNotifyPlanStep TabletId# 72075186224037888 PlanStep# 1501} 2025-06-25T15:24:04.018937Z node 8 :TX_DATASHARD TRACE: datashard_impl.h:3172: StateWork, processing event TEvMediatorTimecast::TEvNotifyPlanStep 2025-06-25T15:24:04.018987Z node 8 :TX_DATASHARD DEBUG: datashard.cpp:3780: Notified by mediator time cast with PlanStep# 1501 at tablet 72075186224037888 2025-06-25T15:24:04.019056Z node 8 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T15:24:04.175411Z node 8 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976715661. Ctx: { TraceId: 01jykv0rhke39swybjgfx4gzca, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=8&id=YjEyNjI2MGYtZGQ1NDU3MGItZjhkODIzNDgtYjU2Y2EzYWE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-25T15:24:04.177340Z node 8 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553215, Sender [8:852:2677], Recipient [8:628:2532]: NKikimrTxDataShard.TEvRead ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 Snapshot { Step: 1500 TxId: 18446744073709551615 } LockTxId: 281474976715660 ResultFormat: FORMAT_CELLVEC MaxRows: 1001 MaxBytes: 5242880 Reverse: false LockNodeId: 8 TotalRowsLimit: 1001 LockMode: OPTIMISTIC RangesSize: 1 2025-06-25T15:24:04.177511Z node 8 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2452: TTxReadViaPipeline execute: at tablet# 72075186224037888, FollowerId 0 2025-06-25T15:24:04.177595Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:3] at 72075186224037888 on unit CheckRead 2025-06-25T15:24:04.177722Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:3] at 72075186224037888 is Executed 2025-06-25T15:24:04.177783Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:3] at 72075186224037888 executing on unit CheckRead 2025-06-25T15:24:04.177832Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:3] at 72075186224037888 to execution unit BuildAndWaitDependencies 2025-06-25T15:24:04.251886Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:3] at 72075186224037888 on unit BuildAndWaitDependencies 2025-06-25T15:24:04.251993Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:3] at 72075186224037888 2025-06-25T15:24:04.252055Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:3] at 72075186224037888 is Executed 2025-06-25T15:24:04.252088Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:3] at 72075186224037888 executing on unit BuildAndWaitDependencies 2025-06-25T15:24:04.252114Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:3] at 72075186224037888 to execution unit ExecuteRead 2025-06-25T15:24:04.252140Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:3] at 72075186224037888 on unit ExecuteRead 2025-06-25T15:24:04.252320Z node 8 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1578: 72075186224037888 Execute read# 1, request: { ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 Snapshot { Step: 1500 TxId: 18446744073709551615 } LockTxId: 281474976715660 ResultFormat: FORMAT_CELLVEC MaxRows: 1001 MaxBytes: 5242880 Reverse: false LockNodeId: 8 TotalRowsLimit: 1001 LockMode: OPTIMISTIC } 2025-06-25T15:24:04.252650Z node 8 :TX_DATASHARD DEBUG: datashard__read_iterator.cpp:2427: 72075186224037888 Acquired lock# 281474976715660, counter# 18446744073709551612 for [OwnerId: 72057594046644480, LocalPathId: 2] 2025-06-25T15:24:04.252747Z node 8 :TX_DATASHARD TRACE: datashard.cpp:2476: PromoteImmediatePostExecuteEdges at 72075186224037888 promoting UnprotectedReadEdge to v1500/18446744073709551615 2025-06-25T15:24:04.252809Z node 8 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2163: 72075186224037888 Complete read# {[8:852:2677], 0} after executionsCount# 1 2025-06-25T15:24:04.252875Z node 8 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2137: 72075186224037888 read iterator# {[8:852:2677], 0} sends rowCount# 1, bytes# 32, quota rows left# 1000, quota bytes left# 5242848, hasUnreadQueries# 0, total queries# 1, firstUnprocessed# 0 2025-06-25T15:24:04.252987Z node 8 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2188: 72075186224037888 read iterator# {[8:852:2677], 0} finished in read 2025-06-25T15:24:04.253070Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:3] at 72075186224037888 is Executed 2025-06-25T15:24:04.253122Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:3] at 72075186224037888 executing on unit ExecuteRead 2025-06-25T15:24:04.253156Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:3] at 72075186224037888 to execution unit CompletedOperations 2025-06-25T15:24:04.253183Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:3] at 72075186224037888 on unit CompletedOperations 2025-06-25T15:24:04.253234Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:3] at 72075186224037888 is Executed 2025-06-25T15:24:04.253256Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:3] at 72075186224037888 executing on unit CompletedOperations 2025-06-25T15:24:04.253282Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:3] at 72075186224037888 has finished 2025-06-25T15:24:04.253332Z node 8 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2687: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037888 2025-06-25T15:24:04.253469Z node 8 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2736: TTxReadViaPipeline(69) Complete: at tablet# 72075186224037888 2025-06-25T15:24:04.254218Z node 8 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553219, Sender [8:852:2677], Recipient [8:628:2532]: NKikimrTxDataShard.TEvReadCancel ReadId: 0 2025-06-25T15:24:04.254290Z node 8 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3409: 72075186224037888 ReadCancel: { ReadId: 0 } 2025-06-25T15:24:04.257922Z node 8 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 275709965, Sender [8:63:2110], Recipient [8:628:2532]: NKikimrLongTxService.TEvLockStatus LockId: 281474976715660 LockNode: 8 Status: STATUS_NOT_FOUND { items { uint32_value: 1 } items { uint32_value: 11 } } |97.8%| [TM] {RESULT} ydb/core/tx/datashard/ut_replication/unittest >> TestPurecalcFilter::Simple1 [GOOD] >> TTxDataShardLocalKMeansScan::BuildToPosting [GOOD] >> TTxDataShardLocalKMeansScan::BuildToBuild >> KafkaProtocol::BalanceScenarioCdc [GOOD] >> KafkaProtocol::OffsetCommitAndFetchScenario >> TabletService_ExecuteMiniKQL::MalformedProgram [GOOD] >> TabletService_ExecuteMiniKQL::DryRunEraseRow >> TestPurecalcFilter::Simple2 |97.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/serializable/py3test >> test.py::test_local [GOOD] |97.8%| [TM] {RESULT} ydb/tests/functional/serializable/py3test |97.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test >> Coordinator::LastStepSubscribe [GOOD] >> Coordinator::RestoreDomainConfiguration >> test.py::test[solomon-UnknownSetting-] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/quoter/ut/unittest >> QuoterWithKesusTest::CanKillKesusWhenUsingIt [GOOD] Test command err: 2025-06-25T15:21:25.431667Z node 1 :QUOTER_PROXY INFO: kesus_quoter_proxy.cpp:1086: [/Path/KesusName]: Created kesus quoter proxy. Tablet id: 100500 2025-06-25T15:21:25.431837Z node 1 :QUOTER_PROXY DEBUG: kesus_quoter_proxy.cpp:1149: [/Path/KesusName]: Connecting to kesus 2025-06-25T15:21:25.433875Z node 1 :QUOTER_PROXY WARN: kesus_quoter_proxy.cpp:806: [/Path/KesusName]: Failed to connect to tablet. Status: ERROR 2025-06-25T15:21:25.433953Z node 1 :QUOTER_PROXY INFO: kesus_quoter_proxy.cpp:1143: [/Path/KesusName]: Reconnecting to kesus 2025-06-25T15:21:25.456235Z node 2 :QUOTER_PROXY INFO: kesus_quoter_proxy.cpp:1086: [/Path/KesusName]: Created kesus quoter proxy. Tablet id: 100500 2025-06-25T15:21:25.456377Z node 2 :QUOTER_PROXY DEBUG: kesus_quoter_proxy.cpp:1149: [/Path/KesusName]: Connecting to kesus 2025-06-25T15:21:25.456655Z node 2 :QUOTER_PROXY DEBUG: kesus_quoter_proxy.cpp:798: [/Path/KesusName]: Successfully connected to tablet 2025-06-25T15:21:25.456739Z node 2 :QUOTER_PROXY WARN: kesus_quoter_proxy.cpp:819: [/Path/KesusName]: Disconnected from tablet 2025-06-25T15:21:25.456802Z node 2 :QUOTER_PROXY INFO: kesus_quoter_proxy.cpp:1143: [/Path/KesusName]: Reconnecting to kesus 2025-06-25T15:21:25.457129Z node 2 :QUOTER_PROXY DEBUG: kesus_quoter_proxy.cpp:798: [/Path/KesusName]: Successfully connected to tablet 2025-06-25T15:21:25.475961Z node 3 :QUOTER_PROXY INFO: kesus_quoter_proxy.cpp:1086: [/Path/KesusName]: Created kesus quoter proxy. Tablet id: 100500 2025-06-25T15:21:25.476052Z node 3 :QUOTER_PROXY DEBUG: kesus_quoter_proxy.cpp:1149: [/Path/KesusName]: Connecting to kesus 2025-06-25T15:21:25.476472Z node 3 :QUOTER_PROXY INFO: kesus_quoter_proxy.cpp:484: [/Path/KesusName]: ProxyRequest "/resource" 2025-06-25T15:21:25.476515Z node 3 :QUOTER_PROXY WARN: kesus_quoter_proxy.cpp:491: [/Path/KesusName]: Resource "/resource" has incorrect name. Maybe this was some error on client side. 2025-06-25T15:21:25.476553Z node 3 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:354: [/Path/KesusName]: ProxySession("/resource", Error: GenericError) 2025-06-25T15:21:25.476603Z node 3 :QUOTER_PROXY DEBUG: kesus_quoter_proxy.cpp:798: [/Path/KesusName]: Successfully connected to tablet 2025-06-25T15:21:25.476674Z node 3 :QUOTER_PROXY INFO: kesus_quoter_proxy.cpp:484: [/Path/KesusName]: ProxyRequest "resource//resource" 2025-06-25T15:21:25.476699Z node 3 :QUOTER_PROXY WARN: kesus_quoter_proxy.cpp:491: [/Path/KesusName]: Resource "resource//resource" has incorrect name. Maybe this was some error on client side. 2025-06-25T15:21:25.476721Z node 3 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:354: [/Path/KesusName]: ProxySession("resource//resource", Error: GenericError) 2025-06-25T15:21:25.490581Z node 4 :QUOTER_PROXY INFO: kesus_quoter_proxy.cpp:1086: [/Path/KesusName]: Created kesus quoter proxy. Tablet id: 100500 2025-06-25T15:21:25.490700Z node 4 :QUOTER_PROXY DEBUG: kesus_quoter_proxy.cpp:1149: [/Path/KesusName]: Connecting to kesus 2025-06-25T15:21:25.490821Z node 4 :QUOTER_PROXY INFO: kesus_quoter_proxy.cpp:484: [/Path/KesusName]: ProxyRequest "res" 2025-06-25T15:21:25.492504Z node 4 :QUOTER_PROXY DEBUG: kesus_quoter_proxy.cpp:798: [/Path/KesusName]: Successfully connected to tablet 2025-06-25T15:21:25.534509Z node 4 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:834: [/Path/KesusName]: SubscribeOnResourceResult({ Results { ResourceId: 42 Error { Status: SUCCESS } EffectiveProps { ResourceId: 42 HierarchicalDRRResourceConfig { MaxUnitsPerSecond: 100 } } } }) 2025-06-25T15:21:25.534589Z node 4 :QUOTER_PROXY INFO: kesus_quoter_proxy.cpp:843: [/Path/KesusName]: Initialized new session with resource "res" 2025-06-25T15:21:25.534647Z node 4 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:380: [/Path/KesusName]: ProxySession("res", 42) 2025-06-25T15:21:25.534715Z node 4 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:1022: [/Path/KesusName]: ProxyUpdate(Normal, [{ "res", Normal, {0: Front(20, 2)} }]) 2025-06-25T15:21:25.551179Z node 5 :QUOTER_PROXY INFO: kesus_quoter_proxy.cpp:1086: [/Path/KesusName]: Created kesus quoter proxy. Tablet id: 100500 2025-06-25T15:21:25.551302Z node 5 :QUOTER_PROXY DEBUG: kesus_quoter_proxy.cpp:1149: [/Path/KesusName]: Connecting to kesus 2025-06-25T15:21:25.551583Z node 5 :QUOTER_PROXY INFO: kesus_quoter_proxy.cpp:484: [/Path/KesusName]: ProxyRequest "res0" 2025-06-25T15:21:25.556491Z node 5 :QUOTER_PROXY DEBUG: kesus_quoter_proxy.cpp:798: [/Path/KesusName]: Successfully connected to tablet 2025-06-25T15:21:25.556820Z node 5 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:834: [/Path/KesusName]: SubscribeOnResourceResult({ Results { ResourceId: 42 Error { Status: SUCCESS } EffectiveProps { ResourceId: 42 HierarchicalDRRResourceConfig { MaxUnitsPerSecond: 5 } } } }) 2025-06-25T15:21:25.556857Z node 5 :QUOTER_PROXY INFO: kesus_quoter_proxy.cpp:843: [/Path/KesusName]: Initialized new session with resource "res0" 2025-06-25T15:21:25.556904Z node 5 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:380: [/Path/KesusName]: ProxySession("res0", 42) 2025-06-25T15:21:25.556965Z node 5 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:1022: [/Path/KesusName]: ProxyUpdate(Normal, [{ "res0", Normal, {0: Front(1, 2)} }]) 2025-06-25T15:21:25.557072Z node 5 :QUOTER_PROXY INFO: kesus_quoter_proxy.cpp:484: [/Path/KesusName]: ProxyRequest "res1" 2025-06-25T15:21:25.557169Z node 5 :QUOTER_PROXY DEBUG: kesus_quoter_proxy.cpp:515: [/Path/KesusName]: Subscribe on resource "res1" 2025-06-25T15:21:25.557355Z node 5 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:834: [/Path/KesusName]: SubscribeOnResourceResult({ Results { ResourceId: 43 Error { Status: SUCCESS } EffectiveProps { ResourceId: 43 HierarchicalDRRResourceConfig { MaxUnitsPerSecond: 5 } } } }) 2025-06-25T15:21:25.557387Z node 5 :QUOTER_PROXY INFO: kesus_quoter_proxy.cpp:843: [/Path/KesusName]: Initialized new session with resource "res1" 2025-06-25T15:21:25.557422Z node 5 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:380: [/Path/KesusName]: ProxySession("res1", 43) 2025-06-25T15:21:25.557465Z node 5 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:1022: [/Path/KesusName]: ProxyUpdate(Normal, [{ "res1", Normal, {0: Front(1, 2)} }]) 2025-06-25T15:21:25.557552Z node 5 :QUOTER_PROXY INFO: kesus_quoter_proxy.cpp:484: [/Path/KesusName]: ProxyRequest "res2" 2025-06-25T15:21:25.557621Z node 5 :QUOTER_PROXY DEBUG: kesus_quoter_proxy.cpp:515: [/Path/KesusName]: Subscribe on resource "res2" 2025-06-25T15:21:25.557831Z node 5 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:834: [/Path/KesusName]: SubscribeOnResourceResult({ Results { ResourceId: 44 Error { Status: SUCCESS } EffectiveProps { ResourceId: 44 HierarchicalDRRResourceConfig { MaxUnitsPerSecond: 5 } } } }) 2025-06-25T15:21:25.557857Z node 5 :QUOTER_PROXY INFO: kesus_quoter_proxy.cpp:843: [/Path/KesusName]: Initialized new session with resource "res2" 2025-06-25T15:21:25.557886Z node 5 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:380: [/Path/KesusName]: ProxySession("res2", 44) 2025-06-25T15:21:25.557938Z node 5 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:1022: [/Path/KesusName]: ProxyUpdate(Normal, [{ "res2", Normal, {0: Front(1, 2)} }]) 2025-06-25T15:21:25.558138Z node 5 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:730: [/Path/KesusName]: ProxyStats([{"res1", Consumed: 0, Queue: 5}]) 2025-06-25T15:21:25.558181Z node 5 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:751: [/Path/KesusName]: Set info for resource "res1": { Available: 1, QueueWeight: 5 } 2025-06-25T15:21:25.558220Z node 5 :QUOTER_PROXY INFO: kesus_quoter_proxy.cpp:660: [/Path/KesusName]: Activate session to "res1". Connected: 1 2025-06-25T15:21:25.558890Z node 5 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:583: [/Path/KesusName]: UpdateConsumptionState({ ResourcesInfo { ResourceId: 43 ConsumeResource: true Amount: inf } ActorID { RawX1: 5 RawX2: 21474838532 } }) 2025-06-25T15:21:25.558955Z node 5 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:1022: [/Path/KesusName]: ProxyUpdate(Normal, [{ "res1", Normal, {0: Front(1, 2)} }]) 2025-06-25T15:21:25.559209Z node 5 :QUOTER_PROXY WARN: kesus_quoter_proxy.cpp:819: [/Path/KesusName]: Disconnected from tablet 2025-06-25T15:21:25.559238Z node 5 :QUOTER_PROXY INFO: kesus_quoter_proxy.cpp:1143: [/Path/KesusName]: Reconnecting to kesus 2025-06-25T15:21:25.559358Z node 5 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:638: [/Path/KesusName]: Mark "res1" for offline allocation. Connected: 0, SessionIsActive: 1, AverageDuration: 0.100000s, AverageAmount: 0.5 2025-06-25T15:21:25.559399Z node 5 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:612: [/Path/KesusName]: Schedule offline allocation in 0.000000s: [{ "res1", 0.5 }] 2025-06-25T15:21:25.559548Z node 5 :QUOTER_PROXY DEBUG: kesus_quoter_proxy.cpp:798: [/Path/KesusName]: Successfully connected to tablet 2025-06-25T15:21:25.559788Z node 5 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:834: [/Path/KesusName]: SubscribeOnResourceResult({ Results { ResourceId: 42 Error { Status: SUCCESS } EffectiveProps { ResourceId: 42 HierarchicalDRRResourceConfig { MaxUnitsPerSecond: 5 } } } Results { ResourceId: 43 Error { Status: SUCCESS } EffectiveProps { ResourceId: 43 HierarchicalDRRResourceConfig { MaxUnitsPerSecond: 5 } } } Results { ResourceId: 44 Error { Status: SUCCESS } EffectiveProps { ResourceId: 44 HierarchicalDRRResourceConfig { MaxUnitsPerSecond: 5 } } } }) 2025-06-25T15:21:25.559818Z node 5 :QUOTER_PROXY INFO: kesus_quoter_proxy.cpp:843: [/Path/KesusName]: Initialized new session with resource "res0" 2025-06-25T15:21:25.559846Z node 5 :QUOTER_PROXY INFO: kesus_quoter_proxy.cpp:843: [/Path/KesusName]: Initialized new session with resource "res1" 2025-06-25T15:21:25.559880Z node 5 :QUOTER_PROXY INFO: kesus_quoter_proxy.cpp:843: [/Path/KesusName]: Initialized new session with resource "res2" 2025-06-25T15:21:25.559951Z node 5 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:1022: [/Path/KesusName]: ProxyUpdate(Normal, [{ "res0", Normal, {0: Front(1, 2)} }, { "res1", Normal, {0: Front(1, 2)} }, { "res2", Normal, {0: Front(1, 2)} }]) 2025-06-25T15:21:25.568605Z node 6 :QUOTER_PROXY INFO: kesus_quoter_proxy.cpp:1086: [/Path/KesusName]: Created kesus quoter proxy. Tablet id: 100500 2025-06-25T15:21:25.568754Z node 6 :QUOTER_PROXY DEBUG: kesus_quoter_proxy.cpp:1149: [/Path/KesusName]: Connecting to kesus 2025-06-25T15:21:25.569127Z node 6 :QUOTER_PROXY INFO: kesus_quoter_proxy.cpp:484: [/Path/KesusName]: ProxyRequest "res" 2025-06-25T15:21:25.569233Z node 6 :QUOTER_PROXY DEBUG: kesus_quoter_proxy.cpp:798: [/Path/KesusName]: Successfully connected to tablet 2025-06-25T15:21:25.569470Z node 6 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:834: [/Path/KesusName]: SubscribeOnResourceResult({ Results { ResourceId: 42 Error { Status: SUCCESS } EffectiveProps { ResourceId: 42 HierarchicalDRRResourceConfig { MaxUnitsPerSecond: 100 } } } }) 2025-06-25T15:21:25.569531Z node 6 :QUOTER_PROXY INFO: kesus_quoter_proxy.cpp:843: [/Path/KesusName]: Initialized new session with resource "res" 2025-06-25T15:21:25.569576Z node 6 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:380: [/Path/KesusName]: ProxySession("res", 42) 2025-06-25T15:21:25.569636Z node 6 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:1022: [/Path/KesusName]: ProxyUpdate(Normal, [{ "res", Normal, {0: Front(20, 2)} }]) 2025-06-25T15:21:25.584450Z node 7 :QUOTER_PROXY INFO: kesus_quoter_proxy.cpp:1086: [/Path/KesusName]: Created kesus quoter proxy. Tablet id: 100500 2025-06-25T15:21:25.584588Z node 7 :QUOTER_PROXY DEBUG: kesus_quoter_proxy.cpp:1149: [/Path/KesusName]: Connecting to kesus 2025-06-25T15:21:25.584916Z node 7 :QUOTER_PROXY INFO: kesus_quoter_proxy.cpp:484: [/Path/KesusName]: ProxyRequest "res" 2025-06-25T15:21:25.587976Z node 7 :QUOTER_PROXY DEBUG: kesus_quoter_proxy.cpp:798: [/Path/KesusName]: Successfully connected to tablet 2025-06-25T15:21:25.588289Z node 7 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:834: [/Path/KesusName]: SubscribeOnResourceResult({ Results { ResourceId: 42 Error { Status: SUCCESS } EffectiveProps { ResourceId: 42 Hierarchic ... eNotification { Status: SUCCESS } } } 2025-06-25T15:23:36.808607Z node 49 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:868: [/dc-1/KesusQuoter]: ResourcesAllocated({ ResourcesInfo { ResourceId: 1 Amount: 1 StateNotification { Status: SUCCESS } } }) 2025-06-25T15:23:36.808669Z node 49 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:877: [/dc-1/KesusQuoter]: Kesus allocated {"Resource", 1} 2025-06-25T15:23:36.808721Z node 49 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:1022: [/dc-1/KesusQuoter]: ProxyUpdate(Normal, [{ "Resource", Normal, {0: Sustained(0, 0)} }]) 2025-06-25T15:23:36.812404Z node 49 :QUOTER_SERVICE DEBUG: quoter_service.cpp:1085: ProxyUpdate for quoter /dc-1/KesusQuoter 2025-06-25T15:23:36.812471Z node 49 :QUOTER_SERVICE TRACE: quoter_service.cpp:1301: Feed resource "Resource". Balance: 0. FreeBalance: 0 2025-06-25T15:23:36.812500Z node 49 :QUOTER_SERVICE TRACE: quoter_service.cpp:346: Schedule next tick for "Resource". Tick size: 0.100000s. Time: 2025-06-25T15:23:36.908000Z 2025-06-25T15:23:36.812526Z node 49 :QUOTER_SERVICE TRACE: quoter_service.cpp:1336: Allocate resource "Resource" 2025-06-25T15:23:36.812761Z node 49 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:730: [/dc-1/KesusQuoter]: ProxyStats([{"Resource", Consumed: 0, Queue: 5}]) 2025-06-25T15:23:36.812806Z node 49 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:751: [/dc-1/KesusQuoter]: Set info for resource "Resource": { Available: -1.000100628, QueueWeight: 5 } 2025-06-25T15:23:36.812857Z node 49 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:1022: [/dc-1/KesusQuoter]: ProxyUpdate(Normal, [{ "Resource", Normal, {0: Sustained(0, 0)} }]) 2025-06-25T15:23:36.812922Z node 49 :QUOTER_SERVICE DEBUG: quoter_service.cpp:1085: ProxyUpdate for quoter /dc-1/KesusQuoter 2025-06-25T15:23:36.903090Z node 50 :KESUS_TABLET TRACE: quoter_runtime.cpp:93: [72075186224037888] Send TEvResourcesAllocated to [49:7519907985982202641:2263]. Cookie: 0. Data: { ResourcesInfo { ResourceId: 1 Amount: 1 StateNotification { Status: SUCCESS } } } 2025-06-25T15:23:36.904087Z node 49 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:868: [/dc-1/KesusQuoter]: ResourcesAllocated({ ResourcesInfo { ResourceId: 1 Amount: 1 StateNotification { Status: SUCCESS } } }) 2025-06-25T15:23:36.904142Z node 49 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:877: [/dc-1/KesusQuoter]: Kesus allocated {"Resource", 1} 2025-06-25T15:23:36.904200Z node 49 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:1022: [/dc-1/KesusQuoter]: ProxyUpdate(Normal, [{ "Resource", Normal, {0: Sustained(0, 0)} }]) 2025-06-25T15:23:36.908415Z node 49 :QUOTER_SERVICE DEBUG: quoter_service.cpp:1085: ProxyUpdate for quoter /dc-1/KesusQuoter 2025-06-25T15:23:36.912422Z node 49 :QUOTER_SERVICE TRACE: quoter_service.cpp:1301: Feed resource "Resource". Balance: 0. FreeBalance: 0 2025-06-25T15:23:36.912474Z node 49 :QUOTER_SERVICE TRACE: quoter_service.cpp:346: Schedule next tick for "Resource". Tick size: 0.100000s. Time: 2025-06-25T15:23:37.008000Z 2025-06-25T15:23:36.912502Z node 49 :QUOTER_SERVICE TRACE: quoter_service.cpp:1336: Allocate resource "Resource" 2025-06-25T15:23:36.912680Z node 49 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:730: [/dc-1/KesusQuoter]: ProxyStats([{"Resource", Consumed: 0, Queue: 5}]) 2025-06-25T15:23:36.912727Z node 49 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:751: [/dc-1/KesusQuoter]: Set info for resource "Resource": { Available: -0.0001006282617, QueueWeight: 5 } 2025-06-25T15:23:36.912783Z node 49 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:1022: [/dc-1/KesusQuoter]: ProxyUpdate(Normal, [{ "Resource", Normal, {0: Sustained(0, 0)} }]) 2025-06-25T15:23:36.912845Z node 49 :QUOTER_SERVICE DEBUG: quoter_service.cpp:1085: ProxyUpdate for quoter /dc-1/KesusQuoter 2025-06-25T15:23:36.955714Z node 49 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:155: TClient[72057594046382081] send [49:7519907981687235119:2383] 2025-06-25T15:23:36.955763Z node 49 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:629: TClient[72057594046382081] push event to server [49:7519907981687235119:2383] 2025-06-25T15:23:37.003385Z node 50 :KESUS_TABLET TRACE: quoter_runtime.cpp:93: [72075186224037888] Send TEvResourcesAllocated to [49:7519907985982202641:2263]. Cookie: 0. Data: { ResourcesInfo { ResourceId: 1 Amount: 1 StateNotification { Status: SUCCESS } } } 2025-06-25T15:23:37.006346Z node 49 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:868: [/dc-1/KesusQuoter]: ResourcesAllocated({ ResourcesInfo { ResourceId: 1 Amount: 1 StateNotification { Status: SUCCESS } } }) 2025-06-25T15:23:37.006426Z node 49 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:877: [/dc-1/KesusQuoter]: Kesus allocated {"Resource", 1} 2025-06-25T15:23:37.006505Z node 49 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:1022: [/dc-1/KesusQuoter]: ProxyUpdate(Normal, [{ "Resource", Normal, {0: Front(0.9998993717, 2)} }]) 2025-06-25T15:23:37.008405Z node 49 :QUOTER_SERVICE TRACE: quoter_service.cpp:1301: Feed resource "Resource". Balance: 0. FreeBalance: 0 2025-06-25T15:23:37.008440Z node 49 :QUOTER_SERVICE TRACE: quoter_service.cpp:346: Schedule next tick for "Resource". Tick size: 0.100000s. Time: 2025-06-25T15:23:37.108000Z 2025-06-25T15:23:37.008465Z node 49 :QUOTER_SERVICE TRACE: quoter_service.cpp:1336: Allocate resource "Resource" 2025-06-25T15:23:37.008531Z node 49 :QUOTER_SERVICE DEBUG: quoter_service.cpp:1085: ProxyUpdate for quoter /dc-1/KesusQuoter 2025-06-25T15:23:37.008803Z node 49 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:730: [/dc-1/KesusQuoter]: ProxyStats([{"Resource", Consumed: 0, Queue: 5}]) 2025-06-25T15:23:37.008836Z node 49 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:751: [/dc-1/KesusQuoter]: Set info for resource "Resource": { Available: 0.9998993717, QueueWeight: 5 } 2025-06-25T15:23:37.008879Z node 49 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:1022: [/dc-1/KesusQuoter]: ProxyUpdate(Normal, [{ "Resource", Normal, {0: Front(0.9998993717, 2)} }]) 2025-06-25T15:23:37.008935Z node 49 :QUOTER_SERVICE DEBUG: quoter_service.cpp:1085: ProxyUpdate for quoter /dc-1/KesusQuoter 2025-06-25T15:23:37.106015Z node 49 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:868: [/dc-1/KesusQuoter]: ResourcesAllocated({ ResourcesInfo { ResourceId: 1 Amount: 1 StateNotification { Status: SUCCESS } } }) 2025-06-25T15:23:37.106069Z node 49 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:877: [/dc-1/KesusQuoter]: Kesus allocated {"Resource", 1} 2025-06-25T15:23:37.106123Z node 49 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:1022: [/dc-1/KesusQuoter]: ProxyUpdate(Normal, [{ "Resource", Normal, {0: Front(1.999899372, 2)} }]) 2025-06-25T15:23:37.106191Z node 49 :QUOTER_SERVICE DEBUG: quoter_service.cpp:1085: ProxyUpdate for quoter /dc-1/KesusQuoter 2025-06-25T15:23:37.104833Z node 50 :KESUS_TABLET TRACE: quoter_runtime.cpp:93: [72075186224037888] Send TEvResourcesAllocated to [49:7519907985982202641:2263]. Cookie: 0. Data: { ResourcesInfo { ResourceId: 1 Amount: 1 StateNotification { Status: SUCCESS } } } 2025-06-25T15:23:37.110687Z node 49 :QUOTER_SERVICE TRACE: quoter_service.cpp:1301: Feed resource "Resource". Balance: 1.999899372. FreeBalance: 1.999899372 2025-06-25T15:23:37.110733Z node 49 :QUOTER_SERVICE TRACE: quoter_service.cpp:346: Schedule next tick for "Resource". Tick size: 0.100000s. Time: 2025-06-25T15:23:37.208000Z 2025-06-25T15:23:37.110759Z node 49 :QUOTER_SERVICE TRACE: quoter_service.cpp:1336: Allocate resource "Resource" 2025-06-25T15:23:37.110823Z node 49 :QUOTER_SERVICE TRACE: quoter_service.cpp:275: Charge "Resource" for 5. Balance: 1.999899372. FreeBalance: 1.999899372. TicksToFullfill: 2.500125792. DurationToFullfillInUs: 250012.5792. TimeToFullfill: 2025-06-25T15:23:36.355649Z. Now: 2025-06-25T15:23:37.110584Z. LastAllocated: 2025-06-25T15:23:36.105636Z 2025-06-25T15:23:37.111234Z node 49 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:730: [/dc-1/KesusQuoter]: ProxyStats([{"Resource", Consumed: 5, Queue: 0}]) 2025-06-25T15:23:37.111267Z node 49 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:751: [/dc-1/KesusQuoter]: Set info for resource "Resource": { Available: -3.000100628, QueueWeight: 0 } 2025-06-25T15:23:37.111327Z node 49 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:1022: [/dc-1/KesusQuoter]: ProxyUpdate(Normal, [{ "Resource", Normal, {0: Sustained(0, 0)} }]) 2025-06-25T15:23:37.111757Z node 49 :QUOTER_SERVICE DEBUG: quoter_service.cpp:1085: ProxyUpdate for quoter /dc-1/KesusQuoter 2025-06-25T15:23:37.203992Z node 49 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:868: [/dc-1/KesusQuoter]: ResourcesAllocated({ ResourcesInfo { ResourceId: 1 Amount: 1 StateNotification { Status: SUCCESS } } }) 2025-06-25T15:23:37.204059Z node 49 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:877: [/dc-1/KesusQuoter]: Kesus allocated {"Resource", 1} 2025-06-25T15:23:37.204128Z node 49 :QUOTER_PROXY TRACE: kesus_quoter_proxy.cpp:1022: [/dc-1/KesusQuoter]: ProxyUpdate(Normal, [{ "Resource", Normal, {0: Sustained(0, 0)} }]) 2025-06-25T15:23:37.203547Z node 50 :KESUS_TABLET TRACE: quoter_runtime.cpp:93: [72075186224037888] Send TEvResourcesAllocated to [49:7519907985982202641:2263]. Cookie: 0. Data: { ResourcesInfo { ResourceId: 1 Amount: 1 StateNotification { Status: SUCCESS } } } 2025-06-25T15:23:37.204513Z node 49 :QUOTER_SERVICE DEBUG: quoter_service.cpp:1085: ProxyUpdate for quoter /dc-1/KesusQuoter 2025-06-25T15:23:37.209410Z node 49 :QUOTER_SERVICE TRACE: quoter_service.cpp:1301: Feed resource "Resource". Balance: 0. FreeBalance: 0 2025-06-25T15:23:37.957505Z node 49 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:155: TClient[72057594046382081] send [49:7519907981687235119:2383] 2025-06-25T15:23:37.957551Z node 49 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:629: TClient[72057594046382081] push event to server [49:7519907981687235119:2383] 2025-06-25T15:23:38.957971Z node 49 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:155: TClient[72057594046382081] send [49:7519907981687235119:2383] 2025-06-25T15:23:38.958011Z node 49 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:629: TClient[72057594046382081] push event to server [49:7519907981687235119:2383] 2025-06-25T15:23:39.683989Z node 49 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[49:7519907977392267226:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:23:39.684095Z node 49 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=timeout; 2025-06-25T15:23:39.954909Z node 49 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:155: TClient[72057594046382081] send [49:7519907981687235119:2383] 2025-06-25T15:23:39.954944Z node 49 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:629: TClient[72057594046382081] push event to server [49:7519907981687235119:2383] 2025-06-25T15:23:40.022628Z node 49 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:155: TClient[72057594037968897] send [49:7519907981687234730:2113] 2025-06-25T15:23:40.022668Z node 49 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:629: TClient[72057594037968897] push event to server [49:7519907981687234730:2113] 2025-06-25T15:23:40.410362Z node 49 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:155: TClient[72057594037968897] send [49:7519907981687234730:2113] 2025-06-25T15:23:40.410405Z node 49 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:629: TClient[72057594037968897] push event to server [49:7519907981687234730:2113] 2025-06-25T15:23:40.825973Z node 49 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:155: TClient[72057594037936129] send [49:7519907977392267182:2081] 2025-06-25T15:23:40.826012Z node 49 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:629: TClient[72057594037936129] push event to server [49:7519907977392267182:2081] 2025-06-25T15:23:40.955462Z node 49 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:155: TClient[72057594046382081] send [49:7519907981687235119:2383] 2025-06-25T15:23:40.955503Z node 49 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:629: TClient[72057594046382081] push event to server [49:7519907981687235119:2383] |97.8%| [TM] {RESULT} ydb/core/quoter/ut/unittest >> QueryActorTest::StreamQuery [GOOD] |97.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test >> test_postgres.py::TestPostgresSuite::test_postgres_suite[boolean] [GOOD] >> test_postgres.py::TestPostgresSuite::test_postgres_suite[strings] |97.8%| [TM] {asan, default-linux-x86_64, pic, release} ydb/tests/fq/solomon/py3test >> test.py::test[solomon-UnknownSetting-] [GOOD] |97.8%| [TM] {RESULT} ydb/tests/fq/solomon/py3test |97.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test ------- [TS] {asan, default-linux-x86_64, release} ydb/library/query_actor/ut/unittest >> QueryActorTest::StreamQuery [GOOD] Test command err: 2025-06-25T15:23:42.522556Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519908012381137024:2226];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:23:42.523388Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0007ba/r3tmp/tmpSRuntW/pdisk_1.dat 2025-06-25T15:23:43.195798Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:23:43.206418Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:23:43.206527Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:23:43.215876Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:11098 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: 2025-06-25T15:23:43.523564Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T15:23:43.574319Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:23:43.649469Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:23:43.839219Z node 1 :KQP_PROXY DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: dc-1 2025-06-25T15:23:45.605919Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1692: Updated YQL logs priority to current level: 4 2025-06-25T15:23:45.608202Z node 1 :KQP_PROXY INFO: kqp_proxy_service.cpp:454: Cannot start publishing usage, tenants: /dc-1, empty 2025-06-25T15:23:45.631712Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1489: Request has 18444993208683.919952s seconds to be completed 2025-06-25T15:23:45.651320Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1564: Created new session, sessionId: ydb://session/3?node_id=1&id=ZjIzZDkwMWYtNTA0ZjRjODEtNjljM2Q0ZGUtNzAxZTUwMmM=, workerId: [1:7519908025266039403:2276], database: /dc-1, longSession: 1, local sessions count: 1 2025-06-25T15:23:45.651386Z node 1 :KQP_PROXY INFO: kqp_proxy_service.cpp:454: Cannot start publishing usage, tenants: /dc-1, empty 2025-06-25T15:23:45.651552Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:663: Received create session request, trace_id: 2025-06-25T15:23:45.651751Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-25T15:23:45.651852Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-25T15:23:45.651888Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:524: Subscribed for config changes. 2025-06-25T15:23:45.651912Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:531: Updated table service config. 2025-06-25T15:23:45.651943Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1692: Updated YQL logs priority to current level: 4 2025-06-25T15:23:45.652550Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-25T15:23:45.652580Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-25T15:23:45.656835Z node 1 :KQP_PROXY DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: SELECT 42 2025-06-25T15:23:45.661250Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:788: Ctx: { TraceId: , Database: /dc-1, DatabaseId: , SessionId: ydb://session/3?node_id=1&id=ZjIzZDkwMWYtNTA0ZjRjODEtNjljM2Q0ZGUtNzAxZTUwMmM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 300.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 3, targetId: [1:7519908025266039403:2276] 2025-06-25T15:23:45.661303Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1315: Scheduled timeout timer for requestId: 3 timeout: 300.000000s actor id: [1:7519908025266039405:2353] 2025-06-25T15:23:45.663938Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519908025266039414:2281], DatabaseId: /dc-1, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:23:45.663938Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519908025266039406:2278], DatabaseId: /dc-1, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:23:45.664023Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /dc-1, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:23:45.673475Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:23:45.690806Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710659, at schemeshard: 72057594046644480 2025-06-25T15:23:45.691823Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519908025266039420:2282], DatabaseId: /dc-1, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-25T15:23:45.782240Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519908025266039471:2388] txid# 281474976710660, issues: { message: "Check failed: path: \'/dc-1/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:23:46.652606Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-25T15:23:46.829329Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:974: Forwarded response to sender actor, requestId: 3, sender: [1:7519908025266039404:2277], selfId: [1:7519908012381137056:2238], source: [1:7519908025266039403:2276] 2025-06-25T15:23:46.829659Z node 1 :KQP_PROXY DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=1&id=ZjIzZDkwMWYtNTA0ZjRjODEtNjljM2Q0ZGUtNzAxZTUwMmM=, TxId: 2025-06-25T15:23:46.830702Z node 1 :KQP_PROXY DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=1&id=ZjIzZDkwMWYtNTA0ZjRjODEtNjljM2Q0ZGUtNzAxZTUwMmM=, TxId: 2025-06-25T15:23:46.831055Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1374: Session closed, sessionId: ydb://session/3?node_id=1&id=ZjIzZDkwMWYtNTA0ZjRjODEtNjljM2Q0ZGUtNzAxZTUwMmM=, workerId: [1:7519908025266039403:2276], local sessions count: 0 2025-06-25T15:23:47.433086Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519908032022968884:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:23:47.433137Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0007ba/r3tmp/tmpc10BEo/pdisk_1.dat 2025-06-25T15:23:47.650196Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:23:47.759818Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:23:47.759954Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:23:47.761610Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:2311 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T15:23:48.049683Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057 ... Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-25T15:23:58.529559Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:23:58.558404Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... 2025-06-25T15:23:58.632569Z node 4 :KQP_PROXY DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: dc-1 2025-06-25T15:23:59.088412Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:24:01.477179Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1692: Updated YQL logs priority to current level: 4 2025-06-25T15:24:01.479132Z node 4 :KQP_PROXY INFO: kqp_proxy_service.cpp:454: Cannot start publishing usage, tenants: /dc-1, empty 2025-06-25T15:24:01.496419Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1489: Request has 18444993208668.055224s seconds to be completed 2025-06-25T15:24:01.498327Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1564: Created new session, sessionId: ydb://session/3?node_id=4&id=MWViYjBiOWYtZTE0NzVkYjUtYWYxYjkyY2YtZDcwYjk1NzI=, workerId: [4:7519908091755467219:2276], database: /dc-1, longSession: 1, local sessions count: 1 2025-06-25T15:24:01.498364Z node 4 :KQP_PROXY INFO: kqp_proxy_service.cpp:454: Cannot start publishing usage, tenants: /dc-1, empty 2025-06-25T15:24:01.498523Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:663: Received create session request, trace_id: 2025-06-25T15:24:01.498565Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:524: Subscribed for config changes. 2025-06-25T15:24:01.498644Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:531: Updated table service config. 2025-06-25T15:24:01.498661Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1692: Updated YQL logs priority to current level: 4 2025-06-25T15:24:01.498723Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-25T15:24:01.498762Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-25T15:24:01.499499Z node 4 :KQP_PROXY DEBUG: query_actor.cpp:269: [TQueryBase] RunStreamQuery: DECLARE $value AS Text; DECLARE $table_size AS Uint64; SELECT x FROM AS_TABLE( ()->(Yql::ToStream(ListReplicate(<|x:$value|>, $table_size))) ); 2025-06-25T15:24:01.500627Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-25T15:24:01.500688Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-25T15:24:01.502211Z node 4 :KQP_PROXY DEBUG: query_actor.cpp:288: [TQueryBase] Start read next stream part 2025-06-25T15:24:01.504770Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1564: TraceId: "01jykv0p2y68ahrw503wy70abd", Created new session, sessionId: ydb://session/3?node_id=4&id=OGI5Y2MzMzEtNGNlM2VmZDMtMmVjZWUzOGYtY2MzNzFjZDY=, workerId: [4:7519908091755467245:2277], database: /dc-1, longSession: 0, local sessions count: 2 2025-06-25T15:24:01.505040Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:788: Ctx: { TraceId: 01jykv0p2y68ahrw503wy70abd, Database: /dc-1, DatabaseId: , SessionId: ydb://session/3?node_id=4&id=OGI5Y2MzMzEtNGNlM2VmZDMtMmVjZWUzOGYtY2MzNzFjZDY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 600.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 3, targetId: [4:7519908091755467245:2277] 2025-06-25T15:24:01.505068Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1315: Scheduled timeout timer for requestId: 3 timeout: 600.000000s actor id: [4:7519908091755467246:2351] 2025-06-25T15:24:01.506821Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519908091755467247:2278], DatabaseId: /dc-1, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:24:01.506897Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /dc-1, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:24:01.507767Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7519908091755467259:2281], DatabaseId: /dc-1, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:24:01.512295Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:24:01.532012Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7519908091755467261:2282], DatabaseId: /dc-1, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-25T15:24:01.631495Z node 4 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [4:7519908091755467312:2386] txid# 281474976710660, issues: { message: "Check failed: path: \'/dc-1/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:24:02.740569Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-25T15:24:03.080445Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7519908078870564698:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:24:03.080534Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=timeout; 2025-06-25T15:24:07.334258Z node 4 :KQP_PROXY DEBUG: query_actor.cpp:299: [TQueryBase] TEvStreamQueryResultPart SUCCESS, Issues: 2025-06-25T15:24:07.346351Z node 4 :KQP_PROXY DEBUG: query_actor.cpp:329: [TQueryBase] Cancel stream request 2025-06-25T15:24:07.346448Z node 4 :KQP_PROXY DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=4&id=MWViYjBiOWYtZTE0NzVkYjUtYWYxYjkyY2YtZDcwYjk1NzI=, TxId: 2025-06-25T15:24:07.349789Z node 4 :KQP_PROXY DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: dc-1 2025-06-25T15:24:07.683058Z node 4 :RPC_REQUEST WARN: rpc_stream_execute_scan_query.cpp:410: Client lost 2025-06-25T15:24:08.116065Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1374: Session closed, sessionId: ydb://session/3?node_id=4&id=MWViYjBiOWYtZTE0NzVkYjUtYWYxYjkyY2YtZDcwYjk1NzI=, workerId: [4:7519908091755467219:2276], local sessions count: 1 2025-06-25T15:24:08.124729Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:437: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-25T15:24:08.125005Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1489: Request has 18444993208661.426627s seconds to be completed 2025-06-25T15:24:08.127088Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1564: Created new session, sessionId: ydb://session/3?node_id=4&id=NTMxN2EzNTAtYTI0MTM0MWUtZTMyMDI4NDMtMzZjOWI1ZGE=, workerId: [4:7519908121820238461:2306], database: /dc-1, longSession: 1, local sessions count: 2 2025-06-25T15:24:08.127249Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:663: Received create session request, trace_id: 2025-06-25T15:24:08.128165Z node 4 :KQP_PROXY DEBUG: query_actor.cpp:269: [TQueryBase] RunStreamQuery: DECLARE $value AS Text; DECLARE $table_size AS Uint64; SELECT x FROM AS_TABLE( ()->(Yql::ToStream(ListReplicate(<|x:$value|>, $table_size))) ); 2025-06-25T15:24:08.128247Z node 4 :KQP_PROXY DEBUG: query_actor.cpp:288: [TQueryBase] Start read next stream part 2025-06-25T15:24:08.134318Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1564: TraceId: "01jykv0wj040qnv82rxa91jskh", Created new session, sessionId: ydb://session/3?node_id=4&id=ZjY1OWM1YjctZmNhOGI0YjYtYWVjZDlmOWItMTA2MTNhOWQ=, workerId: [4:7519908121820238464:2307], database: /dc-1, longSession: 0, local sessions count: 3 2025-06-25T15:24:08.134542Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:788: Ctx: { TraceId: 01jykv0wj040qnv82rxa91jskh, Database: /dc-1, DatabaseId: , SessionId: ydb://session/3?node_id=4&id=ZjY1OWM1YjctZmNhOGI0YjYtYWVjZDlmOWItMTA2MTNhOWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 600.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 5, targetId: [4:7519908121820238464:2307] 2025-06-25T15:24:08.134572Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1315: Scheduled timeout timer for requestId: 5 timeout: 600.000000s actor id: [4:7519908121820238465:2427] 2025-06-25T15:24:08.326464Z node 4 :KQP_PROXY DEBUG: query_actor.cpp:299: [TQueryBase] TEvStreamQueryResultPart SUCCESS, Issues: 2025-06-25T15:24:08.335453Z node 4 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1750865048301, txId: 281474976710663] shutting down 2025-06-25T15:24:08.338324Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:974: TraceId: "01jykv0wj040qnv82rxa91jskh", Forwarded response to sender actor, requestId: 5, sender: [4:7519908121820238462:2422], selfId: [4:7519908078870564714:2073], source: [4:7519908121820238464:2307] 2025-06-25T15:24:08.338764Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1374: Session closed, sessionId: ydb://session/3?node_id=4&id=ZjY1OWM1YjctZmNhOGI0YjYtYWVjZDlmOWItMTA2MTNhOWQ=, workerId: [4:7519908121820238464:2307], local sessions count: 2 2025-06-25T15:24:08.343643Z node 4 :KQP_PROXY DEBUG: query_actor.cpp:288: [TQueryBase] Start read next stream part 2025-06-25T15:24:08.347726Z node 4 :KQP_PROXY DEBUG: query_actor.cpp:299: [TQueryBase] TEvStreamQueryResultPart SUCCESS, Issues: 2025-06-25T15:24:08.347822Z node 4 :KQP_PROXY DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=4&id=NTMxN2EzNTAtYTI0MTM0MWUtZTMyMDI4NDMtMzZjOWI1ZGE=, TxId: 2025-06-25T15:24:08.349437Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1374: Session closed, sessionId: ydb://session/3?node_id=4&id=NTMxN2EzNTAtYTI0MTM0MWUtZTMyMDI4NDMtMzZjOWI1ZGE=, workerId: [4:7519908121820238461:2306], local sessions count: 1 |97.8%| [TS] {RESULT} ydb/library/query_actor/ut/unittest >> test_http_api.py::TestHttpApi::test_restart_idempotency [GOOD] >> test_http_api.py::TestHttpApi::test_simple_streaming_query >> TabletService_ExecuteMiniKQL::DryRunEraseRow [GOOD] >> TabletService_ExecuteMiniKQL::OnlyAdminsAllowed >> TestPurecalcFilter::Simple2 [GOOD] >> Coordinator::RestoreDomainConfiguration [GOOD] >> Coordinator::RestoreTenantConfiguration-AlterDatabaseCreateHiveFirst-false >> KeyValueGRPCService::SimpleCreateAlterDropVolume [GOOD] >> KeyValueGRPCService::SimpleListPartitions [GOOD] >> TestPurecalcFilter::ManyValues |97.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test >> test_example.py::TestExample::test_example2 [GOOD] >> test_crud.py::TestYdbCrudOperations::test_crud_operations [GOOD] >> DataShardStats::SharedCacheGarbage [GOOD] >> DataShardStats::CollectStatsForSeveralParts >> KafkaProtocol::OffsetCommitAndFetchScenario [GOOD] >> KafkaProtocol::CreateTopicsScenarioWithKafkaAuth ------- [TM] {asan, default-linux-x86_64, release} ydb/services/keyvalue/ut/unittest >> KeyValueGRPCService::SimpleListPartitions [GOOD] Test command err: 2025-06-25T15:22:23.870501Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519907670333710747:2238];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:22:23.874127Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000737/r3tmp/tmpZAhAdh/pdisk_1.dat 2025-06-25T15:22:24.599272Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:22:24.610445Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:22:24.610539Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:22:24.621790Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 10189, node 1 2025-06-25T15:22:24.742902Z node 1 :GRPC_SERVER NOTICE: grpc_request_proxy.cpp:367: Grpc request proxy started, nodeid# 1, serve as static node 2025-06-25T15:22:24.744120Z node 1 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:557: Subscribe to /Root 2025-06-25T15:22:24.744193Z node 1 :GRPC_SERVER NOTICE: grpc_request_proxy.cpp:367: Grpc request proxy started, nodeid# 1, serve as static node 2025-06-25T15:22:24.744324Z node 1 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:557: Subscribe to /Root 2025-06-25T15:22:24.745198Z node 1 :GRPC_SERVER INFO: grpc_request_proxy.cpp:403: Subscribed for config changes 2025-06-25T15:22:24.745213Z node 1 :GRPC_SERVER INFO: grpc_request_proxy.cpp:411: Updated app config 2025-06-25T15:22:24.745298Z node 1 :GRPC_SERVER INFO: grpc_request_proxy.cpp:403: Subscribed for config changes 2025-06-25T15:22:24.745306Z node 1 :GRPC_SERVER INFO: grpc_request_proxy.cpp:411: Updated app config 2025-06-25T15:22:24.760719Z node 1 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:420: Got proxy service configuration 2025-06-25T15:22:24.764973Z node 1 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:420: Got proxy service configuration 2025-06-25T15:22:24.791021Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:22:24.805849Z node 1 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:489: SchemeBoardUpdate /Root 2025-06-25T15:22:24.805902Z node 1 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:518: Can't update SecurityState for /Root - no PublicKeys 2025-06-25T15:22:24.805925Z node 1 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:489: SchemeBoardUpdate /Root 2025-06-25T15:22:24.805935Z node 1 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:518: Can't update SecurityState for /Root - no PublicKeys 2025-06-25T15:22:25.065416Z node 1 :GRPC_SERVER DEBUG: grpc_server.cpp:82: [0x51a00002a080] created request Name# BlobStorageConfig 2025-06-25T15:22:25.072497Z node 1 :GRPC_SERVER DEBUG: grpc_server.cpp:82: [0x51a00002a680] created request Name# HiveCreateTablet 2025-06-25T15:22:25.073075Z node 1 :GRPC_SERVER DEBUG: grpc_server.cpp:82: [0x51a00002ac80] created request Name# TabletStateRequest 2025-06-25T15:22:25.076418Z node 1 :GRPC_SERVER DEBUG: grpc_server.cpp:82: [0x51a00002b280] created request Name# SchemeOperationStatus 2025-06-25T15:22:25.080407Z node 1 :GRPC_SERVER DEBUG: grpc_server.cpp:82: [0x51a00002b880] created request Name# ChooseProxy 2025-06-25T15:22:25.080807Z node 1 :GRPC_SERVER DEBUG: grpc_server.cpp:82: [0x51a00002be80] created request Name# ResolveNode 2025-06-25T15:22:25.084405Z node 1 :GRPC_SERVER DEBUG: grpc_server.cpp:82: [0x51a00002c480] created request Name# FillNode 2025-06-25T15:22:25.084758Z node 1 :GRPC_SERVER DEBUG: grpc_server.cpp:82: [0x51a00002ca80] created request Name# DrainNode 2025-06-25T15:22:25.085043Z node 1 :GRPC_SERVER DEBUG: grpc_server.cpp:82: [0x51a00002d080] created request Name# InterconnectDebug 2025-06-25T15:22:25.085310Z node 1 :GRPC_SERVER DEBUG: grpc_server.cpp:82: [0x51a00002d680] created request Name# TestShardControl 2025-06-25T15:22:25.087100Z node 1 :GRPC_SERVER DEBUG: grpc_server.cpp:82: [0x51a00002dc80] created request Name# RegisterNode 2025-06-25T15:22:25.087421Z node 1 :GRPC_SERVER DEBUG: grpc_server.cpp:82: [0x51a00002e280] created request Name# CmsRequest 2025-06-25T15:22:25.092418Z node 1 :GRPC_SERVER DEBUG: grpc_server.cpp:82: [0x51a00002e880] created request Name# ConsoleRequest 2025-06-25T15:22:25.092745Z node 1 :GRPC_SERVER DEBUG: grpc_server.cpp:82: [0x51a00002ee80] created request Name# SchemeInitRoot 2025-06-25T15:22:25.096397Z node 1 :GRPC_SERVER DEBUG: grpc_server.cpp:82: [0x51a0000cfc80] created request Name# PersQueueRequest 2025-06-25T15:22:25.096749Z node 1 :GRPC_SERVER DEBUG: grpc_server.cpp:82: [0x51a0000d0280] created request Name# SchemeOperation 2025-06-25T15:22:25.100407Z node 1 :GRPC_SERVER DEBUG: grpc_server.cpp:82: [0x51a0000d0880] created request Name# SchemeDescribe 2025-06-25T15:22:25.349017Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:22:25.349056Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:22:25.349063Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:22:25.349181Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19393 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:22:26.292909Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:385: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "Root" StoragePools { Name: "hdd2-pool" Kind: "hdd2" } StoragePools { Name: "hdd-pool" Kind: "hdd" } StoragePools { Name: "hdd1-pool" Kind: "hdd1" } StoragePools { Name: "ssd-pool" Kind: "ssd" } StoragePools { Name: "/Root:test" Kind: "test" } } } TxId: 281474976715657 TabletId: 72057594046644480 PeerName: "" , at schemeshard: 72057594046644480 2025-06-25T15:22:26.295212Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //Root, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-25T15:22:26.300543Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:504: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 0 2025-06-25T15:22:26.300585Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5234: CreateTx for txid 281474976715657:0 type: TxAlterSubDomain target path: [OwnerId: 72057594046644480, LocalPathId: 1] source path: 2025-06-25T15:22:26.304449Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:135: IgniteOperation, opId: 281474976715657:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-25T15:22:26.304567Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:22:26.313126Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:464: TTxOperationPropose Complete, txId: 281474976715657, response: Status: StatusAccepted TxId: 281474976715657 SchemeshardId: 72057594046644480 PathId: 1, at schemeshard: 72057594046644480 2025-06-25T15:22:26.314732Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976715657, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //Root 2025-06-25T15:22:26.314953Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-25T15:22:26.315000Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:315: TCreateParts opId# 281474976715657:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046644480 2025-06-25T15:22:26.315076Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:369: TCreateParts opId# 281474976715657:0 ProgressState no shards to create, do next state 2025-06-25T15:22:26.315089Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 281474976715657:0 2 -> 3 2025-06-25T15:22:26.322719Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-25T15:22:26.322787Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 281474976715657:0 ProgressState, at schemeshard: 72057594046644480 2025-06-25T15:22:26.322808Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2563: Change state for txid 281474976715657:0 3 -> 128 waiting... 2025-06-25T15:22:26.325593Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:498: TTxOperationProgress Execute, operationId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-25T15:22:26.325623Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-25T15:22:26.325672Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 281474976715657:0, at tablet# 72057594046644480 2025-06-25T15:22:26.325697Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1677: TOperation IsReadyToPropose , TxId: 281474976715657 ready parts: 1/1 2025-06-25T15:22:26.333538Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1746: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046644480 Flags: 2 } ExecLevel: 0 TxId: 281474976715657 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-25T15:22:26.333945Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T15:22:26.333971Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1633: TOperation IsReadyToNotify, TxId: 281474976715657, ready parts: 0/1, is published: ... lications: 2, subscribers: 1 2025-06-25T15:24:07.307520Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 281474976715662, [OwnerId: 72057594046644480, LocalPathId: 2], 7 2025-06-25T15:24:07.307531Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:991: Publication details: tx: 281474976715662, [OwnerId: 72057594046644480, LocalPathId: 3], 18446744073709551615 2025-06-25T15:24:07.308248Z node 33 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046644480, cookie: 281474976715662 2025-06-25T15:24:07.308364Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046644480, cookie: 281474976715662 2025-06-25T15:24:07.308384Z node 33 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046644480, txId: 281474976715662 2025-06-25T15:24:07.308411Z node 33 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046644480, txId: 281474976715662, pathId: [OwnerId: 72057594046644480, LocalPathId: 3], version: 18446744073709551615 2025-06-25T15:24:07.308437Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 4 2025-06-25T15:24:07.310511Z node 33 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5918: Handle TEvUpdateAck, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 2 Version: 7 PathOwnerId: 72057594046644480, cookie: 281474976715662 2025-06-25T15:24:07.310623Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 2 Version: 7 PathOwnerId: 72057594046644480, cookie: 281474976715662 2025-06-25T15:24:07.310642Z node 33 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046644480, txId: 281474976715662 2025-06-25T15:24:07.310671Z node 33 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046644480, txId: 281474976715662, pathId: [OwnerId: 72057594046644480, LocalPathId: 2], version: 7 2025-06-25T15:24:07.310694Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 2 2025-06-25T15:24:07.310771Z node 33 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046644480, txId: 281474976715662, subscribers: 1 2025-06-25T15:24:07.310796Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:212: TTxAckPublishToSchemeBoard Notify send TEvNotifyTxCompletionResult, at schemeshard: 72057594046644480, to actorId: [33:7519908119205931513:2306] 2025-06-25T15:24:07.315233Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:2 hive 72057594037968897 at ss 72057594046644480 2025-06-25T15:24:07.315271Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:3 hive 72057594037968897 at ss 72057594046644480 2025-06-25T15:24:07.315282Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:1 hive 72057594037968897 at ss 72057594046644480 2025-06-25T15:24:07.315410Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046644480, cookie: 281474976715662 2025-06-25T15:24:07.315444Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046644480, cookie: 281474976715662 2025-06-25T15:24:07.319132Z node 33 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:595: Got grpc request# ListDirectoryRequest, traceId# 01jykv0vrp3wh0zdm70ew1gy6p, sdkBuildInfo# undef, state# AS_NOT_PERFORMED, database# undef, peer# ipv6:[::1]:37418, grpcInfo# grpc-c++/1.54.3 grpc-c/31.0.0 (linux; chttp2), timeout# undef 2025-06-25T15:24:07.322444Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046644480 ShardLocalIdx: 2, at schemeshard: 72057594046644480 2025-06-25T15:24:07.322883Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 3 2025-06-25T15:24:07.323160Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 3 ShardOwnerId: 72057594046644480 ShardLocalIdx: 3, at schemeshard: 72057594046644480 2025-06-25T15:24:07.323336Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 2 2025-06-25T15:24:07.323479Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6022: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 1 ShardOwnerId: 72057594046644480 ShardLocalIdx: 1, at schemeshard: 72057594046644480 2025-06-25T15:24:07.323612Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 1 2025-06-25T15:24:07.323769Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046644480 2025-06-25T15:24:07.323796Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046644480, LocalPathId: 3], at schemeshard: 72057594046644480 2025-06-25T15:24:07.328445Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:515: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 1 2025-06-25T15:24:07.332637Z node 33 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 33, TabletId: 72075186224037889 not found 2025-06-25T15:24:07.332684Z node 33 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 33, TabletId: 72075186224037890 not found 2025-06-25T15:24:07.332702Z node 33 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 33, TabletId: 72075186224037888 not found 2025-06-25T15:24:07.333747Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:2 2025-06-25T15:24:07.333788Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:2 tabletId 72075186224037889 2025-06-25T15:24:07.333848Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:3 2025-06-25T15:24:07.333858Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:3 tabletId 72075186224037890 2025-06-25T15:24:07.333877Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:1 2025-06-25T15:24:07.333896Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:1 tabletId 72075186224037888 2025-06-25T15:24:07.333938Z node 33 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046644480 2025-06-25T15:24:07.335179Z node 33 :KEYVALUE DEBUG: keyvalue_flat_impl.h:365: KeyValue# 72075186224037889 OnTabletDead NKikimr::TEvTablet::TEvTabletDead 2025-06-25T15:24:07.335882Z node 33 :KEYVALUE DEBUG: keyvalue_flat_impl.h:365: KeyValue# 72075186224037890 OnTabletDead NKikimr::TEvTablet::TEvTabletDead 2025-06-25T15:24:07.336202Z node 33 :KEYVALUE DEBUG: keyvalue_flat_impl.h:365: KeyValue# 72075186224037888 OnTabletDead NKikimr::TEvTablet::TEvTabletDead 2025-06-25T15:24:07.372441Z node 33 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a00001fe80] received request Name# SchemeOperation ok# false data# peer# current inflight# 0 2025-06-25T15:24:07.372823Z node 33 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000135680] received request Name# SchemeOperationStatus ok# false data# peer# current inflight# 0 2025-06-25T15:24:07.373100Z node 33 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a00001f280] received request Name# SchemeDescribe ok# false data# peer# current inflight# 0 2025-06-25T15:24:07.373393Z node 33 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0000d7a80] received request Name# ChooseProxy ok# false data# peer# current inflight# 0 2025-06-25T15:24:07.373682Z node 33 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000020a80] received request Name# PersQueueRequest ok# false data# peer# current inflight# 0 2025-06-25T15:24:07.373964Z node 33 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000021080] received request Name# SchemeInitRoot ok# false data# peer# current inflight# 0 2025-06-25T15:24:07.374270Z node 33 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000116480] received request Name# ResolveNode ok# false data# peer# current inflight# 0 2025-06-25T15:24:07.374554Z node 33 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a00010c880] received request Name# FillNode ok# false data# peer# current inflight# 0 2025-06-25T15:24:07.374759Z node 33 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0000d9280] received request Name# DrainNode ok# false data# peer# current inflight# 0 2025-06-25T15:24:07.374834Z node 33 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000032480] received request Name# BlobStorageConfig ok# false data# peer# current inflight# 0 2025-06-25T15:24:07.375080Z node 33 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0000f4280] received request Name# HiveCreateTablet ok# false data# peer# current inflight# 0 2025-06-25T15:24:07.375130Z node 33 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000114c80] received request Name# TestShardControl ok# false data# peer# current inflight# 0 2025-06-25T15:24:07.375350Z node 33 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0000c7880] received request Name# RegisterNode ok# false data# peer# current inflight# 0 2025-06-25T15:24:07.375449Z node 33 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a00002e880] received request Name# CmsRequest ok# false data# peer# current inflight# 0 2025-06-25T15:24:07.375648Z node 33 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a0000f9680] received request Name# ConsoleRequest ok# false data# peer# current inflight# 0 2025-06-25T15:24:07.375764Z node 33 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000122480] received request Name# InterconnectDebug ok# false data# peer# current inflight# 0 2025-06-25T15:24:07.375917Z node 33 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x51a000132680] received request Name# TabletStateRequest ok# false data# peer# current inflight# 0 >> TopicSessionTests::TwoSessionsWithOffsets [GOOD] |97.8%| [TM] {RESULT} ydb/services/keyvalue/ut/unittest >> TTxDataShardLocalKMeansScan::BuildToBuild [GOOD] >> TTxDataShardLocalKMeansScan::BuildToBuild_Ranges >> TopicSessionTests::BadDataSessionError >> TSentinelTests::BSControllerUnresponsive [GOOD] >> test_http_api.py::TestHttpApi::test_simple_streaming_query [GOOD] >> test_http_api.py::TestHttpApi::test_integral_results >> test_rename.py::test_client_gets_retriable_errors_when_rename[substitute_table-create_indexed_async_table-True] |97.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/select/py3test >> test_select.py::TestDML::test_select[table_ttl_Date-pk_types18-all_types18-index18-Date--] [GOOD] >> test_rename.py::test_client_gets_retriable_errors_when_rename[substitute_table-create_indexed_table-True] |97.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/rename/py3test >> TabletService_ExecuteMiniKQL::OnlyAdminsAllowed [GOOD] >> TabletService_Restart::Basics >> test_result_limits.py::TestResultLimits::test_many_rows [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/cms/ut_sentinel/unittest >> TSentinelTests::BSControllerUnresponsive [GOOD] Test command err: 2025-06-25T15:22:42.200406Z node 1 :CMS DEBUG: sentinel.cpp:939: [Sentinel] [Main] UpdateConfig 2025-06-25T15:22:42.200499Z node 1 :CMS DEBUG: sentinel.cpp:884: [Sentinel] [Main] Start ConfigUpdater 2025-06-25T15:22:42.200586Z node 1 :CMS DEBUG: sentinel.cpp:955: [Sentinel] [Main] UpdateState 2025-06-25T15:22:42.200622Z node 1 :CMS INFO: sentinel.cpp:879: [Sentinel] [Main] StateUpdater was delayed 2025-06-25T15:22:42.200681Z node 1 :CMS DEBUG: sentinel.cpp:464: [Sentinel] [ConfigUpdater] Request blobstorage config: attempt# 0 2025-06-25T15:22:42.200763Z node 1 :CMS DEBUG: sentinel.cpp:477: [Sentinel] [ConfigUpdater] Request CMS cluster state: attempt# 0 2025-06-25T15:22:42.203379Z node 1 :CMS DEBUG: sentinel.cpp:486: [Sentinel] [ConfigUpdater] Handle TEvCms::TEvClusterStateResponse: response# Status { Code: OK } State { Hosts { Name: "node-1" State: UNKNOWN Devices { Name: "pdisk-1-4" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-1-5" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-1-6" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-1-7" State: DOWN Timestamp: 0 } Timestamp: 0 NodeId: 1 InterconnectPort: 10000 Location { Rack: "rack-1" } StartTimeSeconds: 0 } Hosts { Name: "node-2" State: UNKNOWN Devices { Name: "pdisk-2-8" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-2-9" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-2-10" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-2-11" State: DOWN Timestamp: 0 } Timestamp: 0 NodeId: 2 InterconnectPort: 10000 Location { Rack: "rack-2" } StartTimeSeconds: 0 } Hosts { Name: "node-3" State: UNKNOWN Devices { Name: "pdisk-3-12" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-3-13" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-3-14" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-3-15" State: DOWN Timestamp: 0 } Timestamp: 0 NodeId: 3 InterconnectPort: 10000 Location { Rack: "rack-3" } StartTimeSeconds: 0 } Hosts { Name: "node-4" State: UNKNOWN Devices { Name: "pdisk-4-16" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-4-17" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-4-18" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-4-19" State: DOWN Timestamp: 0 } Timestamp: 0 NodeId: 4 InterconnectPort: 10000 Location { Rack: "rack-4" } StartTimeSeconds: 0 } Hosts { Name: "node-5" State: UNKNOWN Devices { Name: "pdisk-5-20" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-5-21" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-5-22" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-5-23" State: DOWN Timestamp: 0 } Timestamp: 0 NodeId: 5 InterconnectPort: 10000 Location { Rack: "rack-5" } StartTimeSeconds: 0 } Hosts { Name: "node-6" State: UNKNOWN Devices { Name: "pdisk-6-24" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-6-25" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-6-26" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-6-27" State: DOWN Timestamp: 0 } Timestamp: 0 NodeId: 6 InterconnectPort: 10000 Location { Rack: "rack-6" } StartTimeSeconds: 0 } Hosts { Name: "node-7" State: UNKNOWN Devices { Name: "pdisk-7-28" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-7-29" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-7-30" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-7-31" State: DOWN Timestamp: 0 } Timestamp: 0 NodeId: 7 InterconnectPort: 10000 Location { Rack: "rack-7" } StartTimeSeconds: 0 } Hosts { Name: "node-8" State: UNKNOWN Devices { Name: "pdisk-8-32" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-8-33" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-8-34" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-8-35" State: DOWN Timestamp: 0 } Timestamp: 0 NodeId: 8 InterconnectPort: 10000 Location { Rack: "rack-8" } StartTimeSeconds: 0 } } 2025-06-25T15:22:42.209208Z node 1 :CMS DEBUG: sentinel.cpp:530: [Sentinel] [ConfigUpdater] Handle TEvBlobStorage::TEvControllerConfigResponse: response# Status { Success: true BaseConfig { PDisk { NodeId: 1 PDiskId: 4 Path: "/1/pdisk-4.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 1 PDiskId: 5 Path: "/1/pdisk-5.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 1 PDiskId: 6 Path: "/1/pdisk-6.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 1 PDiskId: 7 Path: "/1/pdisk-7.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 2 PDiskId: 8 Path: "/2/pdisk-8.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 2 PDiskId: 9 Path: "/2/pdisk-9.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 2 PDiskId: 10 Path: "/2/pdisk-10.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 2 PDiskId: 11 Path: "/2/pdisk-11.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 3 PDiskId: 12 Path: "/3/pdisk-12.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 3 PDiskId: 13 Path: "/3/pdisk-13.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 3 PDiskId: 14 Path: "/3/pdisk-14.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 3 PDiskId: 15 Path: "/3/pdisk-15.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 4 PDiskId: 16 Path: "/4/pdisk-16.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 4 PDiskId: 17 Path: "/4/pdisk-17.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 4 PDiskId: 18 Path: "/4/pdisk-18.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 4 PDiskId: 19 Path: "/4/pdisk-19.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 5 PDiskId: 20 Path: "/5/pdisk-20.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 5 PDiskId: 21 Path: "/5/pdisk-21.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 5 PDiskId: 22 Path: "/5/pdisk-22.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 5 PDiskId: 23 Path: "/5/pdisk-23.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 6 PDiskId: 24 Path: "/6/pdisk-24.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 6 PDiskId: 25 Path: "/6/pdisk-25.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 6 PDiskId: 26 Path: "/6/pdisk-26.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 6 PDiskId: 27 Path: "/6/pdisk-27.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 7 PDiskId: 28 Path: "/7/pdisk-28.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 7 PDiskId: 29 Path: "/7/pdisk-29.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 7 PDiskId: 30 Path: "/7/pdisk-30.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 7 PDiskId: 31 Path: "/7/pdisk-31.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 8 PDiskId: 32 Path: "/8/pdisk-32.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 8 PDiskId: 33 Path: "/8/pdisk-33.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 8 PDiskId: 34 Path: "/8/pdisk-34.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 8 PDiskId: 35 Path: "/8/pdisk-35.data" Guid: 1 DriveStatus: ACTIVE } VSlot { VSlotId { NodeId: 1 PDiskId: 4 VSlotId: 1000 } GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 4 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 4 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 4 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 5 VSlotId: 1000 } GroupId: 4 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 5 VSlotId: 1001 } GroupId: 5 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 5 VSlotId: 1002 } GroupId: 6 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 5 VSlotId: 1003 } GroupId: 7 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 6 VSlotId: 1000 } GroupId: 8 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 6 VSlotId: 1001 } GroupId: 9 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 6 VSlotId: 1002 } GroupId: 10 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 6 VSlotId: 1003 } GroupId: 11 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 7 VSlotId: 1000 } GroupId: 12 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 7 VSlotId: 1001 } GroupId: 13 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 7 VSlotId: 1002 } GroupId: 14 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 7 VSlotId: 1003 } GroupId: 15 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 8 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 8 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 8 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 8 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 9 VSlotId: 1000 } GroupId: 4 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 9 VSlotId: 1001 } GroupId: 5 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 9 VSlotId: 1002 } GroupId: 6 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 9 VSlotId: 1003 } GroupId: 7 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 10 VSlotId: 1000 } GroupId: 8 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 10 VSlotId: 1001 } GroupId: 9 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 10 VSlotId: 1002 } GroupId: 10 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 10 VSlotId: 1003 } GroupId: 11 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 11 VSlotId: 1000 } GroupId: 12 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 11 VSlotId: 1001 } GroupId: 13 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 11 VSlotId: 1002 } GroupId: 14 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 11 VSlotId: 1003 } GroupId: 15 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 3 PDiskId: 12 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 12 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 12 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 12 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 13 VSlotId: 1000 } GroupId: 4 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 13 VSlotId: 1001 } GroupId: 5 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 13 VSlotId: 1002 } GroupId: 6 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 13 VSlotId: 1003 } GroupId: 7 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 14 VSlotId: 1000 } GroupId: 8 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 14 VSlotId: 1001 } GroupId: 9 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 14 VSlotId: 1002 } GroupId: 10 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 14 VSlotId: 1003 } GroupId: 11 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 15 VSlotId: 1000 } GroupId: 12 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 15 VSlotId: 1001 } GroupId: 13 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 15 VSlotId: 1002 } GroupId: 14 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 15 VSlotId: 1003 } GroupId: 15 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 4 PDiskId: 16 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 16 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 16 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 16 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 17 VSlotId: 1000 } GroupId: 4 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 17 VSlotId: 1001 } GroupId: 5 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 17 VSlotId: 1002 ... Size: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 301 CreateTime: 0 ChangeTime: 0 Path: "/75/pdisk-301.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 302 CreateTime: 0 ChangeTime: 0 Path: "/75/pdisk-302.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 303 CreateTime: 0 ChangeTime: 0 Path: "/75/pdisk-303.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 37860028 2025-06-25T15:24:14.284957Z node 71 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 76, response# PDiskStateInfo { PDiskId: 304 CreateTime: 0 ChangeTime: 0 Path: "/76/pdisk-304.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 305 CreateTime: 0 ChangeTime: 0 Path: "/76/pdisk-305.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 306 CreateTime: 0 ChangeTime: 0 Path: "/76/pdisk-306.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 307 CreateTime: 0 ChangeTime: 0 Path: "/76/pdisk-307.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 37860028 2025-06-25T15:24:14.285072Z node 71 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 77, response# PDiskStateInfo { PDiskId: 308 CreateTime: 0 ChangeTime: 0 Path: "/77/pdisk-308.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 309 CreateTime: 0 ChangeTime: 0 Path: "/77/pdisk-309.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 310 CreateTime: 0 ChangeTime: 0 Path: "/77/pdisk-310.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 311 CreateTime: 0 ChangeTime: 0 Path: "/77/pdisk-311.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 37860028 2025-06-25T15:24:14.285167Z node 71 :CMS DEBUG: sentinel.cpp:960: [Sentinel] [Main] State was updated in 0.000000s 2025-06-25T15:24:14.285517Z node 71 :CMS NOTICE: sentinel.cpp:1039: [Sentinel] [Main] PDisk status changed: pdiskId# 77:311, status# INACTIVE, required status# ACTIVE, reason# PrevState# Normal State# Normal StateCounter# 60 StateLimit# 60, dry run# 0 2025-06-25T15:24:14.285580Z node 71 :CMS NOTICE: sentinel.cpp:1039: [Sentinel] [Main] PDisk status changed: pdiskId# 72:288, status# INACTIVE, required status# ACTIVE, reason# PrevState# Normal State# Normal StateCounter# 60 StateLimit# 60, dry run# 0 2025-06-25T15:24:14.285621Z node 71 :CMS DEBUG: sentinel.cpp:1076: [Sentinel] [Main] Change pdisk status: requestsSize# 2 2025-06-25T15:24:14.296042Z node 71 :CMS DEBUG: sentinel.cpp:1262: [Sentinel] [Main] Retrying: attempt# 1 2025-06-25T15:24:14.296108Z node 71 :CMS DEBUG: sentinel.cpp:1076: [Sentinel] [Main] Change pdisk status: requestsSize# 2 2025-06-25T15:24:14.308758Z node 71 :CMS DEBUG: sentinel.cpp:955: [Sentinel] [Main] UpdateState 2025-06-25T15:24:14.308823Z node 71 :CMS DEBUG: sentinel.cpp:884: [Sentinel] [Main] Start StateUpdater 2025-06-25T15:24:14.308965Z node 71 :CMS DEBUG: sentinel.cpp:1262: [Sentinel] [Main] Retrying: attempt# 2 2025-06-25T15:24:14.308997Z node 71 :CMS DEBUG: sentinel.cpp:1076: [Sentinel] [Main] Change pdisk status: requestsSize# 2 2025-06-25T15:24:14.309152Z node 71 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 71, wbId# [71:8388350642965737326:1634689637] 2025-06-25T15:24:14.309194Z node 71 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 72, wbId# [72:8388350642965737326:1634689637] 2025-06-25T15:24:14.309226Z node 71 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 73, wbId# [73:8388350642965737326:1634689637] 2025-06-25T15:24:14.309258Z node 71 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 74, wbId# [74:8388350642965737326:1634689637] 2025-06-25T15:24:14.309290Z node 71 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 75, wbId# [75:8388350642965737326:1634689637] 2025-06-25T15:24:14.309322Z node 71 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 76, wbId# [76:8388350642965737326:1634689637] 2025-06-25T15:24:14.309380Z node 71 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 77, wbId# [77:8388350642965737326:1634689637] 2025-06-25T15:24:14.309417Z node 71 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 78, wbId# [78:8388350642965737326:1634689637] 2025-06-25T15:24:14.309750Z node 71 :CMS DEBUG: sentinel.cpp:1202: [Sentinel] [Main] Handle TEvBlobStorage::TEvControllerConfigResponse: response# Status { Success: true } Status { }, cookie# 123 2025-06-25T15:24:14.309796Z node 71 :CMS ERROR: sentinel.cpp:1244: [Sentinel] [Main] Unsuccesful response from BSC: error# 2025-06-25T15:24:14.310298Z node 71 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 71, response# PDiskStateInfo { PDiskId: 284 CreateTime: 0 ChangeTime: 0 Path: "/71/pdisk-284.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 285 CreateTime: 0 ChangeTime: 0 Path: "/71/pdisk-285.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 286 CreateTime: 0 ChangeTime: 0 Path: "/71/pdisk-286.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 287 CreateTime: 0 ChangeTime: 0 Path: "/71/pdisk-287.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 37880028 2025-06-25T15:24:14.310699Z node 71 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 77, response# PDiskStateInfo { PDiskId: 308 CreateTime: 0 ChangeTime: 0 Path: "/77/pdisk-308.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 309 CreateTime: 0 ChangeTime: 0 Path: "/77/pdisk-309.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 310 CreateTime: 0 ChangeTime: 0 Path: "/77/pdisk-310.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 311 CreateTime: 0 ChangeTime: 0 Path: "/77/pdisk-311.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 37880028 2025-06-25T15:24:14.311050Z node 71 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 78, response# PDiskStateInfo { PDiskId: 312 CreateTime: 0 ChangeTime: 0 Path: "/78/pdisk-312.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 313 CreateTime: 0 ChangeTime: 0 Path: "/78/pdisk-313.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 314 CreateTime: 0 ChangeTime: 0 Path: "/78/pdisk-314.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 315 CreateTime: 0 ChangeTime: 0 Path: "/78/pdisk-315.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 37880028 2025-06-25T15:24:14.311302Z node 71 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 74, response# PDiskStateInfo { PDiskId: 296 CreateTime: 0 ChangeTime: 0 Path: "/74/pdisk-296.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 297 CreateTime: 0 ChangeTime: 0 Path: "/74/pdisk-297.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 298 CreateTime: 0 ChangeTime: 0 Path: "/74/pdisk-298.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 299 CreateTime: 0 ChangeTime: 0 Path: "/74/pdisk-299.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 37880028 2025-06-25T15:24:14.311475Z node 71 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 75, response# PDiskStateInfo { PDiskId: 300 CreateTime: 0 ChangeTime: 0 Path: "/75/pdisk-300.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 301 CreateTime: 0 ChangeTime: 0 Path: "/75/pdisk-301.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 302 CreateTime: 0 ChangeTime: 0 Path: "/75/pdisk-302.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 303 CreateTime: 0 ChangeTime: 0 Path: "/75/pdisk-303.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 37880028 2025-06-25T15:24:14.311653Z node 71 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 76, response# PDiskStateInfo { PDiskId: 304 CreateTime: 0 ChangeTime: 0 Path: "/76/pdisk-304.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 305 CreateTime: 0 ChangeTime: 0 Path: "/76/pdisk-305.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 306 CreateTime: 0 ChangeTime: 0 Path: "/76/pdisk-306.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 307 CreateTime: 0 ChangeTime: 0 Path: "/76/pdisk-307.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 37880028 2025-06-25T15:24:14.311828Z node 71 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 72, response# PDiskStateInfo { PDiskId: 288 CreateTime: 0 ChangeTime: 0 Path: "/72/pdisk-288.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 289 CreateTime: 0 ChangeTime: 0 Path: "/72/pdisk-289.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 290 CreateTime: 0 ChangeTime: 0 Path: "/72/pdisk-290.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 291 CreateTime: 0 ChangeTime: 0 Path: "/72/pdisk-291.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 37880028 2025-06-25T15:24:14.311954Z node 71 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 73, response# PDiskStateInfo { PDiskId: 292 CreateTime: 0 ChangeTime: 0 Path: "/73/pdisk-292.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 293 CreateTime: 0 ChangeTime: 0 Path: "/73/pdisk-293.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 294 CreateTime: 0 ChangeTime: 0 Path: "/73/pdisk-294.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 295 CreateTime: 0 ChangeTime: 0 Path: "/73/pdisk-295.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 37880028 2025-06-25T15:24:14.312027Z node 71 :CMS DEBUG: sentinel.cpp:960: [Sentinel] [Main] State was updated in 0.000000s >> TSelectFromViewTest::OneTable [GOOD] >> test_rename.py::test_client_gets_retriable_errors_when_rename[substitute_table-create_simple_table-False] >> TSelectFromViewTest::OneTableUsingRelativeName >> TestPurecalcFilter::ManyValues [GOOD] |97.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/select/py3test >> test_select.py::TestDML::test_select[table_index_1__SYNC-pk_types8-all_types8-index8---SYNC] [GOOD] >> TestPurecalcFilter::NullValues >> test_rename.py::test_client_gets_retriable_errors_when_rename[replace_table-create_indexed_async_table-True] |97.9%| [TM] {RESULT} ydb/core/cms/ut_sentinel/unittest >> test_select.py::TestDML::test_select[table_index_0__ASYNC-pk_types11-all_types11-index11---ASYNC] [GOOD] >> test_example.py::TestExample::test_linked_with_testcase >> test_http_api.py::TestHttpApi::test_integral_results [GOOD] >> test_http_api.py::TestHttpApi::test_optional_results >> test_postgres.py::TestPostgresSuite::test_postgres_suite[strings] [GOOD] >> TopicSessionTests::BadDataSessionError [GOOD] >> DataShardStats::CollectStatsForSeveralParts [GOOD] >> DataShardStats::NoData >> KafkaProtocol::CreateTopicsScenarioWithKafkaAuth [GOOD] >> KafkaProtocol::CreateTopicsScenarioWithoutKafkaAuth >> TopicSessionTests::WrongFieldType >> TabletService_Restart::Basics [GOOD] >> TabletService_Restart::OnlyAdminsAllowed |97.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/rename/py3test >> test_select.py::TestDML::test_select[table_index_0__SYNC-pk_types9-all_types9-index9---SYNC] [GOOD] |97.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/rename/py3test >> test_inserts.py::TestYdbInsertsOperations::test_insert_multiple_rows >> TestPurecalcFilter::NullValues [GOOD] >> TestPurecalcFilter::PartialPush >> test_http_api.py::TestHttpApi::test_optional_results [GOOD] >> test_http_api.py::TestHttpApi::test_pg_results >> test_insert_restarts.py::TestS3::test_atomic_upload_commit[v1-client0] [GOOD] |97.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/select/py3test >> test_select.py::TestDML::test_select[table_index_0__ASYNC-pk_types11-all_types11-index11---ASYNC] [GOOD] >> test_rename.py::test_client_gets_retriable_errors_when_rename[replace_table-create_simple_table-False] >> test_example.py::TestExample::test_linked_with_testcase [GOOD] |97.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/rename/py3test >> KafkaProtocol::CreateTopicsScenarioWithoutKafkaAuth [GOOD] >> KafkaProtocol::CreatePartitionsScenario >> TabletService_Restart::OnlyAdminsAllowed [GOOD] >> TMemoryController::MemTable [GOOD] >> TMemoryController::ResourceBroker >> test_http_api.py::TestHttpApi::test_pg_results [GOOD] >> test_http_api.py::TestHttpApi::test_set_result >> TopicSessionTests::WrongFieldType [GOOD] >> TSelectFromViewTest::OneTableUsingRelativeName [GOOD] >> TSelectFromViewTest::DisabledFeatureFlag >> TopicSessionTests::RestartSessionIfNewClientWithOffset ------- [TM] {asan, default-linux-x86_64, release} ydb/core/grpc_services/tablet/ut/unittest >> TabletService_Restart::OnlyAdminsAllowed [GOOD] Test command err: 2025-06-25T15:23:40.456345Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T15:23:40.456528Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T15:23:40.456592Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0007a9/r3tmp/tmpnBTLjk/pdisk_1.dat 2025-06-25T15:23:41.085449Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T15:23:41.098229Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:23:41.154884Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:23:41.167917Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750865017257513 != 1750865017257517 2025-06-25T15:23:41.220844Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:23:41.221071Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:23:41.241241Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected ... reading schema ... changing schema (dry run) ... reading schema ... changing schema ... reading schema 2025-06-25T15:23:46.157363Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:265:2309], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T15:23:46.157551Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T15:23:46.157694Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0007a9/r3tmp/tmpMbuCJZ/pdisk_1.dat 2025-06-25T15:23:46.441048Z node 2 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 2 Type# 268639257 2025-06-25T15:23:46.442399Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:23:46.473966Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:23:46.476124Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:33:2080] 1750865023045332 != 1750865023045336 2025-06-25T15:23:46.527535Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:23:46.527671Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:23:46.539997Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected ... reading schema (without token) ... reading schema (non-admin token) ... reading schema (admin token) 2025-06-25T15:23:50.504249Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [3:275:2318], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T15:23:50.504465Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T15:23:50.504615Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0007a9/r3tmp/tmp39ypBN/pdisk_1.dat 2025-06-25T15:23:50.828906Z node 3 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 3 Type# 268639257 2025-06-25T15:23:50.830769Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:23:50.880115Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:23:50.885613Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:33:2080] 1750865027252049 != 1750865027252052 2025-06-25T15:23:50.931440Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:23:50.931568Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:23:50.945977Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:23:56.452785Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [4:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T15:23:56.453043Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T15:23:56.453216Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0007a9/r3tmp/tmpjrS9V1/pdisk_1.dat 2025-06-25T15:23:56.755016Z node 4 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 4 Type# 268639257 2025-06-25T15:23:56.756922Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:23:56.796068Z node 4 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:23:56.799081Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:33:2080] 1750865032154475 != 1750865032154479 2025-06-25T15:23:56.845636Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:23:56.845781Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:23:56.860896Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:24:00.821577Z node 5 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [5:260:2306], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T15:24:00.821872Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T15:24:00.822177Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0007a9/r3tmp/tmpsu3I3W/pdisk_1.dat 2025-06-25T15:24:01.112339Z node 5 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 5 Type# 268639257 2025-06-25T15:24:01.114203Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:24:01.164840Z node 5 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:24:01.167169Z node 5 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [5:33:2080] 1750865037505624 != 1750865037505628 2025-06-25T15:24:01.215409Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:24:01.215560Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:24:01.227582Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:24:05.931315Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [6:275:2318], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T15:24:05.931555Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T15:24:05.931744Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0007a9/r3tmp/tmpVLAlCv/pdisk_1.dat 2025-06-25T15:24:06.361125Z node 6 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 6 Type# 268639257 2025-06-25T15:24:06.362830Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:24:06.394774Z node 6 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:24:06.398850Z node 6 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [6:33:2080] 1750865042118341 != 1750865042118344 2025-06-25T15:24:06.450780Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:24:06.450930Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:24:06.462613Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:24:11.617563Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [7:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T15:24:11.617775Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T15:24:11.617926Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0007a9/r3tmp/tmpMXz7G8/pdisk_1.dat 2025-06-25T15:24:11.917213Z node 7 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 7 Type# 268639257 2025-06-25T15:24:11.919248Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:24:11.954928Z node 7 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:24:11.957581Z node 7 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [7:33:2080] 1750865047563634 != 1750865047563638 2025-06-25T15:24:12.006266Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:24:12.006410Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:24:12.018011Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:24:16.896927Z node 8 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [8:196:2242], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T15:24:16.897378Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T15:24:16.897445Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0007a9/r3tmp/tmpN99sRv/pdisk_1.dat 2025-06-25T15:24:17.290152Z node 8 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 8 Type# 268639257 2025-06-25T15:24:17.291995Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:24:17.331005Z node 8 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:24:17.333902Z node 8 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [8:33:2080] 1750865052724259 != 1750865052724263 2025-06-25T15:24:17.386440Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:24:17.386601Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:24:17.401574Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:24:23.300613Z node 9 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [9:275:2318], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T15:24:23.300982Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T15:24:23.301120Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0007a9/r3tmp/tmpHnPzGL/pdisk_1.dat 2025-06-25T15:24:23.707604Z node 9 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 9 Type# 268639257 2025-06-25T15:24:23.709602Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:24:23.747307Z node 9 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:24:23.750240Z node 9 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [9:33:2080] 1750865058252973 != 1750865058252976 2025-06-25T15:24:23.797681Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:24:23.797849Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:24:23.809777Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Connecting -> Connected ... restarting tablet 72057594046644480 2025-06-25T15:24:23.993903Z node 9 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:24:24.163734Z node 9 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:24:30.043835Z node 10 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [10:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T15:24:30.044005Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T15:24:30.044085Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0007a9/r3tmp/tmpj2Bm2w/pdisk_1.dat 2025-06-25T15:24:30.472842Z node 10 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 10 Type# 268639257 2025-06-25T15:24:30.474801Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:24:30.530865Z node 10 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:24:30.533654Z node 10 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [10:33:2080] 1750865065121152 != 1750865065121156 2025-06-25T15:24:30.580832Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:24:30.581001Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:24:30.594058Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected ... restarting tablet 72057594046644480 (without token) ... restarting tablet 72057594046644480 (non-admin token) 2025-06-25T15:24:30.808506Z node 10 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; ... restarting tablet 72057594046644480 (admin token) 2025-06-25T15:24:30.988277Z node 10 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded |97.9%| [TM] {RESULT} ydb/core/grpc_services/tablet/ut/unittest >> TestPurecalcFilter::PartialPush [GOOD] >> test_rename.py::test_client_gets_retriable_errors_when_rename[replace_table-create_indexed_table-True] >> TestPurecalcFilter::CompilationValidation >> test_insert_restarts.py::TestS3::test_atomic_upload_commit[v2-client0] >> test_example.py::TestExample::test_skipped_with_issue [SKIPPED] >> test_http_api.py::TestHttpApi::test_set_result [GOOD] >> test_http_api.py::TestHttpApi::test_complex_results |97.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/example/py3test >> test_example.py::TestExample::test_skipped_with_issue [SKIPPED] |97.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/limits/py3test |97.9%| [TM] {RESULT} ydb/tests/example/py3test >> TMemoryController::ResourceBroker [GOOD] >> TMemoryController::ResourceBroker_ConfigLimit >> TestPurecalcFilter::CompilationValidation [GOOD] >> KafkaProtocol::CreatePartitionsScenario [GOOD] >> KafkaProtocol::TopicsWithCleaunpPolicyScenario >> Coordinator::RestoreTenantConfiguration-AlterDatabaseCreateHiveFirst-false [GOOD] >> Coordinator::RestoreTenantConfiguration-AlterDatabaseCreateHiveFirst-true >> TestRawParser::Simple >> TestRawParser::Simple [GOOD] >> TestRawParser::ManyValues >> TTxDataShardLocalKMeansScan::BuildToBuild_Ranges [GOOD] >> TTxDataShardPrefixKMeansScan::BadRequest >> TestRawParser::ManyValues [GOOD] >> TestRawParser::TypeKindsValidation >> TestRawParser::TypeKindsValidation [GOOD] |97.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/select/py3test >> test_select.py::TestDML::test_select[table_index_0__SYNC-pk_types9-all_types9-index9---SYNC] [GOOD] >> TSelectFromViewTest::DisabledFeatureFlag [GOOD] >> TSelectFromViewTest::ReadTestCasesFromFiles >> test_http_api.py::TestHttpApi::test_complex_results [GOOD] >> test_http_api.py::TestHttpApi::test_result_offset_limit >> TopicSessionTests::RestartSessionIfNewClientWithOffset [GOOD] |97.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/limits/py3test |97.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/limits/py3test >> TopicSessionTests::ReadNonExistentTopic >> test_drain.py::TestHive::test_drain_on_stop [FAIL] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/row_dispatcher/format_handler/ut/unittest >> TestRawParser::TypeKindsValidation [GOOD] Test command err: 2025-06-25T15:23:18.083538Z node 1 :FQ_ROW_DISPATCHER TRACE: filters_set.cpp:196: TTopicFilters: Create filter with id [0:0:0] 2025-06-25T15:23:18.083567Z node 1 :FQ_ROW_DISPATCHER TRACE: filters_set.cpp:200: TTopicFilters: Create purecalc filter for predicate 'where col_0 == "str1"' (filter id: [0:0:0]) 2025-06-25T15:23:18.083585Z node 1 :FQ_ROW_DISPATCHER DEBUG: purecalc_filter.cpp:324: TPurecalcFilter: Generated sql: PRAGMA config.flags("LLVM", "OFF"); SELECT _offset FROM Input where col_0 == "str1"; 2025-06-25T15:23:18.083610Z node 1 :FQ_ROW_DISPATCHER TRACE: filters_set.cpp:82: TTopicFilters: TFilterHandler [0:0:0] : Send compile request with id 1 2025-06-25T15:23:18.090245Z node 1 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:132: TPurecalcCompileService: Add to compile queue request with id 1 from [1:7519907908625076254:2051] 2025-06-25T15:23:20.692400Z node 1 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:53: TPurecalcCompileActor [1:7519907908625076254:2051] [id 1]: Started compile request 2025-06-25T15:23:21.806776Z node 1 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:71: TPurecalcCompileActor [1:7519907908625076254:2051] [id 1]: Compilation completed for request 2025-06-25T15:23:21.808587Z node 1 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:152: TPurecalcCompileService: Compile finished for request with id 1 from [1:7519907908625076254:2051] 2025-06-25T15:23:21.812471Z node 1 :FQ_ROW_DISPATCHER TRACE: filters_set.cpp:175: TTopicFilters: Got compile response for request with id 1 2025-06-25T15:23:21.812501Z node 1 :FQ_ROW_DISPATCHER TRACE: filters_set.cpp:120: TTopicFilters: TFilterHandler [0:0:0] : Filter compilation finished 2025-06-25T15:23:21.812551Z node 1 :FQ_ROW_DISPATCHER TRACE: filters_set.cpp:196: TTopicFilters: Create filter with id [1:0:0] 2025-06-25T15:23:21.812586Z node 1 :FQ_ROW_DISPATCHER TRACE: filters_set.cpp:200: TTopicFilters: Create purecalc filter for predicate 'where col_1 == "str2"' (filter id: [1:0:0]) 2025-06-25T15:23:21.812628Z node 1 :FQ_ROW_DISPATCHER DEBUG: purecalc_filter.cpp:324: TPurecalcFilter: Generated sql: PRAGMA config.flags("LLVM", "OFF"); SELECT _offset FROM Input where col_1 == "str2"; 2025-06-25T15:23:21.812672Z node 1 :FQ_ROW_DISPATCHER TRACE: filters_set.cpp:82: TTopicFilters: TFilterHandler [1:0:0] : Send compile request with id 2 2025-06-25T15:23:21.816143Z node 1 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:132: TPurecalcCompileService: Add to compile queue request with id 2 from [1:7519907908625076254:2051] 2025-06-25T15:23:21.816209Z node 1 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:53: TPurecalcCompileActor [1:7519907908625076254:2051] [id 2]: Started compile request 2025-06-25T15:23:21.864379Z node 1 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:71: TPurecalcCompileActor [1:7519907908625076254:2051] [id 2]: Compilation completed for request 2025-06-25T15:23:21.864502Z node 1 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:152: TPurecalcCompileService: Compile finished for request with id 2 from [1:7519907908625076254:2051] 2025-06-25T15:23:21.864756Z node 1 :FQ_ROW_DISPATCHER TRACE: filters_set.cpp:175: TTopicFilters: Got compile response for request with id 2 2025-06-25T15:23:21.864774Z node 1 :FQ_ROW_DISPATCHER TRACE: filters_set.cpp:120: TTopicFilters: TFilterHandler [1:0:0] : Filter compilation finished 2025-06-25T15:23:21.864806Z node 1 :FQ_ROW_DISPATCHER TRACE: filters_set.cpp:196: TTopicFilters: Create filter with id [2:0:0] 2025-06-25T15:23:21.864869Z node 1 :FQ_ROW_DISPATCHER TRACE: filters_set.cpp:155: TTopicFilters: FilterData for 3 clients, number rows: 3 2025-06-25T15:23:21.864882Z node 1 :FQ_ROW_DISPATCHER TRACE: filters_set.cpp:259: TTopicFilters: Pass 3 rows to purecalc filter (filter id: [1:0:0]) 2025-06-25T15:23:21.864893Z node 1 :FQ_ROW_DISPATCHER TRACE: purecalc_filter.cpp:298: TPurecalcFilter: Do filtering for 3 rows 2025-06-25T15:23:21.869709Z node 1 :FQ_ROW_DISPATCHER TRACE: filters_set.cpp:262: TTopicFilters: Add 3 rows to client [2:0:0] without filtering 2025-06-25T15:23:21.869746Z node 1 :FQ_ROW_DISPATCHER TRACE: filters_set.cpp:259: TTopicFilters: Pass 3 rows to purecalc filter (filter id: [0:0:0]) 2025-06-25T15:23:21.869754Z node 1 :FQ_ROW_DISPATCHER TRACE: purecalc_filter.cpp:298: TPurecalcFilter: Do filtering for 3 rows 2025-06-25T15:23:21.869826Z node 1 :FQ_ROW_DISPATCHER TRACE: filters_set.cpp:219: TTopicFilters: Remove filter with id [2:0:0] 2025-06-25T15:23:21.869881Z node 1 :FQ_ROW_DISPATCHER TRACE: filters_set.cpp:155: TTopicFilters: FilterData for 2 clients, number rows: 1 2025-06-25T15:23:21.869894Z node 1 :FQ_ROW_DISPATCHER TRACE: filters_set.cpp:259: TTopicFilters: Pass 1 rows to purecalc filter (filter id: [1:0:0]) 2025-06-25T15:23:21.869904Z node 1 :FQ_ROW_DISPATCHER TRACE: purecalc_filter.cpp:298: TPurecalcFilter: Do filtering for 1 rows 2025-06-25T15:23:21.869942Z node 1 :FQ_ROW_DISPATCHER TRACE: filters_set.cpp:259: TTopicFilters: Pass 1 rows to purecalc filter (filter id: [0:0:0]) 2025-06-25T15:23:21.869951Z node 1 :FQ_ROW_DISPATCHER TRACE: purecalc_filter.cpp:298: TPurecalcFilter: Do filtering for 1 rows 2025-06-25T15:23:22.234066Z node 2 :FQ_ROW_DISPATCHER TRACE: filters_set.cpp:196: TTopicFilters: Create filter with id [0:0:0] 2025-06-25T15:23:22.234107Z node 2 :FQ_ROW_DISPATCHER TRACE: filters_set.cpp:200: TTopicFilters: Create purecalc filter for predicate 'where a1 = "str1"' (filter id: [0:0:0]) 2025-06-25T15:23:22.234136Z node 2 :FQ_ROW_DISPATCHER DEBUG: purecalc_filter.cpp:324: TPurecalcFilter: Generated sql: PRAGMA config.flags("LLVM", "OFF"); SELECT _offset FROM Input where a1 = "str1"; 2025-06-25T15:23:22.234161Z node 2 :FQ_ROW_DISPATCHER TRACE: filters_set.cpp:82: TTopicFilters: TFilterHandler [0:0:0] : Send compile request with id 1 2025-06-25T15:23:22.245346Z node 2 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:132: TPurecalcCompileService: Add to compile queue request with id 1 from [2:7519907925635999088:2051] 2025-06-25T15:23:25.204414Z node 2 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:53: TPurecalcCompileActor [2:7519907925635999088:2051] [id 1]: Started compile request 2025-06-25T15:23:25.232237Z node 2 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:71: TPurecalcCompileActor [2:7519907925635999088:2051] [id 1]: Compilation completed for request 2025-06-25T15:23:25.232359Z node 2 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:152: TPurecalcCompileService: Compile finished for request with id 1 from [2:7519907925635999088:2051] 2025-06-25T15:23:25.232500Z node 2 :FQ_ROW_DISPATCHER TRACE: filters_set.cpp:175: TTopicFilters: Got compile response for request with id 1 2025-06-25T15:23:25.232525Z node 2 :FQ_ROW_DISPATCHER TRACE: filters_set.cpp:120: TTopicFilters: TFilterHandler [0:0:0] : Filter compilation finished 2025-06-25T15:23:25.232545Z node 2 :FQ_ROW_DISPATCHER TRACE: filters_set.cpp:196: TTopicFilters: Create filter with id [0:0:0] 2025-06-25T15:23:25.653071Z node 3 :FQ_ROW_DISPATCHER TRACE: filters_set.cpp:196: TTopicFilters: Create filter with id [0:0:0] 2025-06-25T15:23:25.653088Z node 3 :FQ_ROW_DISPATCHER TRACE: filters_set.cpp:200: TTopicFilters: Create purecalc filter for predicate 'where a2 ... 50' (filter id: [0:0:0]) 2025-06-25T15:23:25.653106Z node 3 :FQ_ROW_DISPATCHER DEBUG: purecalc_filter.cpp:324: TPurecalcFilter: Generated sql: PRAGMA config.flags("LLVM", "OFF"); SELECT _offset FROM Input where a2 ... 50; 2025-06-25T15:23:25.653136Z node 3 :FQ_ROW_DISPATCHER TRACE: filters_set.cpp:82: TTopicFilters: TFilterHandler [0:0:0] : Send compile request with id 1 2025-06-25T15:23:25.653308Z node 3 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:132: TPurecalcCompileService: Add to compile queue request with id 1 from [3:7519907939050879748:2051] 2025-06-25T15:23:28.852453Z node 3 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:53: TPurecalcCompileActor [3:7519907939050879748:2051] [id 1]: Started compile request 2025-06-25T15:23:28.886077Z node 3 :FQ_ROW_DISPATCHER ERROR: compile_service.cpp:68: TPurecalcCompileActor [3:7519907939050879748:2051] [id 1]: Compilation failed for request 2025-06-25T15:23:28.886236Z node 3 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:152: TPurecalcCompileService: Compile finished for request with id 1 from [3:7519907939050879748:2051] 2025-06-25T15:23:28.888545Z node 3 :FQ_ROW_DISPATCHER TRACE: filters_set.cpp:175: TTopicFilters: Got compile response for request with id 1 2025-06-25T15:23:28.888727Z node 3 :FQ_ROW_DISPATCHER ERROR: filters_set.cpp:110: TTopicFilters: TFilterHandler [0:0:0] : Filter compilation error: {
: Error: Failed to compile purecalc program subissue: {
: Error: Compile issues: generated.sql:2:36: Error: mismatched input '.' expecting {'$', ABORT, ACTION, ADD, AFTER, ALL, ALTER, ANALYZE, AND, ANSI, ANY, ARRAY, AS, ASC, ASSUME, ASYMMETRIC, ASYNC, AT, ATTACH, ATTRIBUTES, AUTOINCREMENT, BACKUP, BATCH, COLLECTION, BEFORE, BEGIN, BERNOULLI, BETWEEN, BITCAST, BY, CALLABLE, CASCADE, CASE, CAST, CHANGEFEED, CHECK, CLASSIFIER, COLLATE, COLUMN, COLUMNS, COMMIT, COMPACT, CONDITIONAL, CONFLICT, CONNECT, CONSTRAINT, CONSUMER, COVER, CREATE, CROSS, CUBE, CURRENT, CURRENT_DATE, CURRENT_TIME, CURRENT_TIMESTAMP, DATA, DATABASE, DECIMAL, DECLARE, DEFAULT, DEFERRABLE, DEFERRED, DEFINE, DELETE, DESC, DESCRIBE, DETACH, DICT, DIRECTORY, DISABLE, DISCARD, DISTINCT, DO, DROP, EACH, ELSE, EMPTY, EMPTY_ACTION, ENCRYPTED, END, ENUM, ERASE, ERROR, ESCAPE, EVALUATE, EXCEPT, EXCLUDE, EXCLUSION, EXCLUSIVE, EXISTS, EXPLAIN, EXPORT, EXTERNAL, FAIL, FAMILY, FILTER, FIRST, FLATTEN, FLOW, FOLLOWING, FOR, FOREIGN, FROM, FULL, FUNCTION, GLOB, GLOBAL, GRANT, GROUP, GROUPING, GROUPS, HASH, HAVING, HOP, IF, IGNORE, ILIKE, IMMEDIATE, IMPORT, IN, INCREMENT, INCREMENTAL, INDEX, INDEXED, INHERITS, INITIAL, INITIALLY, INNER, INSERT, INSTEAD, INTERSECT, INTO, IS, ISNULL, JOIN, JSON_EXISTS, JSON_QUERY, JSON_VALUE, KEY, LAST, LEFT, LEGACY, LIKE, LIMIT, LIST, LOCAL, LOGIN, MANAGE, MATCH, MATCHES, MATCH_RECOGNIZE, MEASURES, MICROSECONDS, MILLISECONDS, MODIFY, NANOSECONDS, NATURAL, NEXT, NO, NOLOGIN, NOT, NOTNULL, NULL, NULLS, OBJECT, OF, OFFSET, OMIT, ON, ONE, ONLY, OPTION, OPTIONAL, OR, ORDER, OTHERS, OUTER, OVER, OWNER, PARALLEL, PARTITION, PASSING, PASSWORD, PAST, PATTERN, PER, PERMUTE, PLAN, POOL, PRAGMA, PRECEDING, PRESORT, PRIMARY, PRIVILEGES, PROCESS, QUERY, QUEUE, RAISE, RANGE, REDUCE, REFERENCES, REGEXP, REINDEX, RELEASE, REMOVE, RENAME, REPLACE, REPLICATION, RESET, RESOURCE, RESPECT, RESTART, RESTORE, RESTRICT, RESULT, RETURN, RETURNING, REVERT, REVOKE, RIGHT, RLIKE, ROLLBACK, ROLLUP, ROW, ROWS, SAMPLE, SAVEPOINT, SCHEMA, SECONDS, SEEK, SELECT, SEMI, SET, SETS, SHOW, TSKIP, SEQUENCE, SOURCE, START, STREAM, STRUCT, SUBQUERY, SUBSET, SYMBOLS, SYMMETRIC, SYNC, SYSTEM, TABLE, TABLES, TABLESAMPLE, TABLESTORE, TAGGED, TEMP, TEMPORARY, THEN, TIES, TO, TOPIC, TRANSACTION, TRANSFER, TRIGGER, TUPLE, TYPE, UNBOUNDED, UNCONDITIONAL, UNION, UNIQUE, UNKNOWN, UNMATCHED, UPDATE, UPSERT, USE, USER, USING, VACUUM, VALUES, VARIANT, VIEW, VIRTUAL, WHEN, WHERE, WINDOW, WITH, WITHOUT, WRAPPER, XOR, STRING_VALUE, ID_PLAIN, ID_QUOTED, DIGITS} } subissue: {
: Error: Final yql: PRAGMA config.flags("LLVM", "OFF"); SELECT _offset FROM Input where a2 ... 50; } } 2025-06-25T15:23:32.588353Z node 4 :FQ_ROW_DISPATCHER DEBUG: format_handler.cpp:379: TTopicFormatHandler [json_each_row]: Add client with id [0:0:0] 2025-06-25T15:23:32.590956Z node 4 :FQ_ROW_DISPATCHER DEBUG: format_handler.cpp:497: TTopicFormatHandler [json_each_row]: UpdateParser to new schema with size 2 2025-06-25T15:23:32.680621Z node 4 :FQ_ROW_DISPATCHER INFO: json_parser.cpp:350: TJsonParser: Simdjson active implementation icelake 2025-06-25T15:23:32.680834Z node 4 :FQ_ROW_DISPATCHER DEBUG: format_handler.cpp:506: TTopicFormatHandler [json_each_row]: Parser was updated on new schema with 2 columns 2025-06-25T15:23:32.682435Z node 4 :FQ_ROW_DISPATCHER TRACE: filters_set.cpp:196: TTopicFilters: Create filter with id [0:0:0] 2025-06-25T15:23:32.682486Z node 4 :FQ_ROW_DISPATCHER ... st offset: 45, values: {"a1": "456", "a2": 42, "a3": 1.11.1} 2025-06-25T15:23:59.119439Z node 23 :FQ_ROW_DISPATCHER INFO: json_parser.cpp:350: TJsonParser: Simdjson active implementation icelake 2025-06-25T15:23:59.119701Z node 23 :FQ_ROW_DISPATCHER TRACE: json_parser.cpp:369: TJsonParser: Add 1 messages to parse 2025-06-25T15:23:59.119733Z node 23 :FQ_ROW_DISPATCHER TRACE: json_parser.cpp:420: TJsonParser: Do parsing, first offset: 42, values: {"a1": "-456"} 2025-06-25T15:23:59.705777Z node 24 :FQ_ROW_DISPATCHER INFO: json_parser.cpp:350: TJsonParser: Simdjson active implementation icelake 2025-06-25T15:23:59.706060Z node 24 :FQ_ROW_DISPATCHER TRACE: json_parser.cpp:369: TJsonParser: Add 1 messages to parse 2025-06-25T15:23:59.706109Z node 24 :FQ_ROW_DISPATCHER TRACE: json_parser.cpp:420: TJsonParser: Do parsing, first offset: 42, values: {"a1": {"key": "value"}, "a2": {"key2": "value2"}} 2025-06-25T15:23:59.706635Z node 24 :FQ_ROW_DISPATCHER TRACE: json_parser.cpp:369: TJsonParser: Add 1 messages to parse 2025-06-25T15:23:59.706679Z node 24 :FQ_ROW_DISPATCHER TRACE: json_parser.cpp:420: TJsonParser: Do parsing, first offset: 43, values: {"a1": {"key": "value", "nested": {"a": "b", "c":}}, "a2": "str"} 2025-06-25T15:23:59.706966Z node 24 :FQ_ROW_DISPATCHER TRACE: json_parser.cpp:369: TJsonParser: Add 1 messages to parse 2025-06-25T15:23:59.706998Z node 24 :FQ_ROW_DISPATCHER TRACE: json_parser.cpp:420: TJsonParser: Do parsing, first offset: 44, values: {"a1": {"key" "value"}, "a2": "str"} 2025-06-25T15:24:00.278308Z node 25 :FQ_ROW_DISPATCHER INFO: json_parser.cpp:350: TJsonParser: Simdjson active implementation icelake 2025-06-25T15:24:00.278631Z node 25 :FQ_ROW_DISPATCHER TRACE: json_parser.cpp:369: TJsonParser: Add 1 messages to parse 2025-06-25T15:24:00.278673Z node 25 :FQ_ROW_DISPATCHER TRACE: json_parser.cpp:420: TJsonParser: Do parsing, first offset: 42, values: {"a1": true, "a2": false} 2025-06-25T15:24:00.279057Z node 25 :FQ_ROW_DISPATCHER TRACE: json_parser.cpp:369: TJsonParser: Add 1 messages to parse 2025-06-25T15:24:00.279099Z node 25 :FQ_ROW_DISPATCHER TRACE: json_parser.cpp:420: TJsonParser: Do parsing, first offset: 43, values: {"a1": "true", "a2": falce} 2025-06-25T15:24:00.823012Z node 26 :FQ_ROW_DISPATCHER INFO: json_parser.cpp:350: TJsonParser: Simdjson active implementation icelake 2025-06-25T15:24:00.823267Z node 26 :FQ_ROW_DISPATCHER TRACE: json_parser.cpp:369: TJsonParser: Add 1 messages to parse 2025-06-25T15:24:00.823304Z node 26 :FQ_ROW_DISPATCHER TRACE: json_parser.cpp:420: TJsonParser: Do parsing, first offset: 42, values: {"a1": Yelse} 2025-06-25T15:24:00.823724Z node 26 :FQ_ROW_DISPATCHER TRACE: json_parser.cpp:369: TJsonParser: Add 1 messages to parse 2025-06-25T15:24:00.823765Z node 26 :FQ_ROW_DISPATCHER TRACE: json_parser.cpp:420: TJsonParser: Do parsing, first offset: 43, values: {"a1": "st""r"} 2025-06-25T15:24:00.823941Z node 26 :FQ_ROW_DISPATCHER TRACE: json_parser.cpp:369: TJsonParser: Add 1 messages to parse 2025-06-25T15:24:00.823975Z node 26 :FQ_ROW_DISPATCHER TRACE: json_parser.cpp:420: TJsonParser: Do parsing, first offset: 44, values: {"a1": "x"} {"a1": "y"} 2025-06-25T15:24:00.824132Z node 26 :FQ_ROW_DISPATCHER TRACE: json_parser.cpp:369: TJsonParser: Add 1 messages to parse 2025-06-25T15:24:00.824160Z node 26 :FQ_ROW_DISPATCHER TRACE: json_parser.cpp:420: TJsonParser: Do parsing, first offset: 45, values: { 2025-06-25T15:24:01.297857Z node 27 :FQ_ROW_DISPATCHER DEBUG: purecalc_filter.cpp:324: TPurecalcFilter: Generated sql: PRAGMA config.flags("LLVM", "OFF"); SELECT _offset FROM Input where a2 > 100; 2025-06-25T15:24:01.301562Z node 27 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:132: TPurecalcCompileService: Add to compile queue request with id 0 from [27:7519908092941293204:2051] 2025-06-25T15:24:06.704570Z node 27 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:53: TPurecalcCompileActor [27:7519908092941293204:2051] [id 0]: Started compile request 2025-06-25T15:24:06.779815Z node 27 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:71: TPurecalcCompileActor [27:7519908092941293204:2051] [id 0]: Compilation completed for request 2025-06-25T15:24:06.779959Z node 27 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:152: TPurecalcCompileService: Compile finished for request with id 0 from [27:7519908092941293204:2051] 2025-06-25T15:24:06.780328Z node 27 :FQ_ROW_DISPATCHER TRACE: purecalc_filter.cpp:298: TPurecalcFilter: Do filtering for 1 rows 2025-06-25T15:24:06.781213Z node 27 :FQ_ROW_DISPATCHER TRACE: purecalc_filter.cpp:298: TPurecalcFilter: Do filtering for 1 rows 2025-06-25T15:24:07.490793Z node 28 :FQ_ROW_DISPATCHER DEBUG: purecalc_filter.cpp:324: TPurecalcFilter: Generated sql: PRAGMA config.flags("LLVM", "OFF"); SELECT _offset FROM Input where a2 > 100; 2025-06-25T15:24:07.504637Z node 28 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:132: TPurecalcCompileService: Add to compile queue request with id 0 from [28:7519908118392011007:2051] 2025-06-25T15:24:12.784564Z node 28 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:53: TPurecalcCompileActor [28:7519908118392011007:2051] [id 0]: Started compile request 2025-06-25T15:24:12.835463Z node 28 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:71: TPurecalcCompileActor [28:7519908118392011007:2051] [id 0]: Compilation completed for request 2025-06-25T15:24:12.835605Z node 28 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:152: TPurecalcCompileService: Compile finished for request with id 0 from [28:7519908118392011007:2051] 2025-06-25T15:24:12.835853Z node 28 :FQ_ROW_DISPATCHER TRACE: purecalc_filter.cpp:298: TPurecalcFilter: Do filtering for 1 rows 2025-06-25T15:24:12.835964Z node 28 :FQ_ROW_DISPATCHER TRACE: purecalc_filter.cpp:298: TPurecalcFilter: Do filtering for 1 rows 2025-06-25T15:24:13.529320Z node 29 :FQ_ROW_DISPATCHER DEBUG: purecalc_filter.cpp:324: TPurecalcFilter: Generated sql: PRAGMA config.flags("LLVM", "OFF"); SELECT _offset FROM Input where a2 > 100; 2025-06-25T15:24:13.532426Z node 29 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:132: TPurecalcCompileService: Add to compile queue request with id 0 from [29:7519908144016677765:2051] 2025-06-25T15:24:19.316443Z node 29 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:53: TPurecalcCompileActor [29:7519908144016677765:2051] [id 0]: Started compile request 2025-06-25T15:24:19.407074Z node 29 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:71: TPurecalcCompileActor [29:7519908144016677765:2051] [id 0]: Compilation completed for request 2025-06-25T15:24:19.407204Z node 29 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:152: TPurecalcCompileService: Compile finished for request with id 0 from [29:7519908144016677765:2051] 2025-06-25T15:24:19.412759Z node 29 :FQ_ROW_DISPATCHER TRACE: purecalc_filter.cpp:298: TPurecalcFilter: Do filtering for 2 rows 2025-06-25T15:24:19.413026Z node 29 :FQ_ROW_DISPATCHER TRACE: purecalc_filter.cpp:298: TPurecalcFilter: Do filtering for 2 rows 2025-06-25T15:24:19.413085Z node 29 :FQ_ROW_DISPATCHER TRACE: purecalc_filter.cpp:298: TPurecalcFilter: Do filtering for 2 rows 2025-06-25T15:24:19.413126Z node 29 :FQ_ROW_DISPATCHER TRACE: purecalc_filter.cpp:298: TPurecalcFilter: Do filtering for 2 rows 2025-06-25T15:24:19.413162Z node 29 :FQ_ROW_DISPATCHER TRACE: purecalc_filter.cpp:298: TPurecalcFilter: Do filtering for 2 rows 2025-06-25T15:24:20.360207Z node 30 :FQ_ROW_DISPATCHER DEBUG: purecalc_filter.cpp:324: TPurecalcFilter: Generated sql: PRAGMA config.flags("LLVM", "OFF"); SELECT _offset FROM Input where a1 is null; 2025-06-25T15:24:20.372874Z node 30 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:132: TPurecalcCompileService: Add to compile queue request with id 0 from [30:7519908174735156978:2051] 2025-06-25T15:24:26.723589Z node 30 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:53: TPurecalcCompileActor [30:7519908174735156978:2051] [id 0]: Started compile request 2025-06-25T15:24:26.859925Z node 30 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:71: TPurecalcCompileActor [30:7519908174735156978:2051] [id 0]: Compilation completed for request 2025-06-25T15:24:26.860067Z node 30 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:152: TPurecalcCompileService: Compile finished for request with id 0 from [30:7519908174735156978:2051] 2025-06-25T15:24:26.860339Z node 30 :FQ_ROW_DISPATCHER TRACE: purecalc_filter.cpp:298: TPurecalcFilter: Do filtering for 1 rows 2025-06-25T15:24:27.626090Z node 31 :FQ_ROW_DISPATCHER DEBUG: purecalc_filter.cpp:324: TPurecalcFilter: Generated sql: PRAGMA config.flags("LLVM", "OFF"); SELECT _offset FROM Input where a2 > 50; 2025-06-25T15:24:27.644384Z node 31 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:132: TPurecalcCompileService: Add to compile queue request with id 0 from [31:7519908206368456699:2051] 2025-06-25T15:24:34.396437Z node 31 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:53: TPurecalcCompileActor [31:7519908206368456699:2051] [id 0]: Started compile request 2025-06-25T15:24:34.485980Z node 31 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:71: TPurecalcCompileActor [31:7519908206368456699:2051] [id 0]: Compilation completed for request 2025-06-25T15:24:34.486098Z node 31 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:152: TPurecalcCompileService: Compile finished for request with id 0 from [31:7519908206368456699:2051] 2025-06-25T15:24:34.486389Z node 31 :FQ_ROW_DISPATCHER TRACE: purecalc_filter.cpp:298: TPurecalcFilter: Do filtering for 1 rows 2025-06-25T15:24:35.197017Z node 32 :FQ_ROW_DISPATCHER DEBUG: purecalc_filter.cpp:324: TPurecalcFilter: Generated sql: PRAGMA config.flags("LLVM", "OFF"); SELECT _offset FROM Input where a2 ... 50; 2025-06-25T15:24:35.197330Z node 32 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:132: TPurecalcCompileService: Add to compile queue request with id 0 from [32:7519908241103667603:2051] 2025-06-25T15:24:41.476420Z node 32 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:53: TPurecalcCompileActor [32:7519908241103667603:2051] [id 0]: Started compile request 2025-06-25T15:24:41.483891Z node 32 :FQ_ROW_DISPATCHER ERROR: compile_service.cpp:68: TPurecalcCompileActor [32:7519908241103667603:2051] [id 0]: Compilation failed for request 2025-06-25T15:24:41.484081Z node 32 :FQ_ROW_DISPATCHER TRACE: compile_service.cpp:152: TPurecalcCompileService: Compile finished for request with id 0 from [32:7519908241103667603:2051] 2025-06-25T15:24:42.549752Z node 33 :FQ_ROW_DISPATCHER TRACE: raw_parser.cpp:54: TRawParser: Add 1 messages to parse 2025-06-25T15:24:42.549816Z node 33 :FQ_ROW_DISPATCHER TRACE: raw_parser.cpp:74: TRawParser: Do parsing, first offset: 42, value: {"a1": "hello1__large_str", "a2": 101, "event": "event1"} 2025-06-25T15:24:43.275859Z node 34 :FQ_ROW_DISPATCHER TRACE: raw_parser.cpp:54: TRawParser: Add 3 messages to parse 2025-06-25T15:24:43.275923Z node 34 :FQ_ROW_DISPATCHER TRACE: raw_parser.cpp:74: TRawParser: Do parsing, first offset: 42, value: {"a1": "hello1", "a2": "101", "event": "event1"} 2025-06-25T15:24:43.276050Z node 34 :FQ_ROW_DISPATCHER TRACE: raw_parser.cpp:74: TRawParser: Do parsing, first offset: 43, value: {"a1": "hello1", "a2": "101", "event": "event2"} 2025-06-25T15:24:43.276078Z node 34 :FQ_ROW_DISPATCHER TRACE: raw_parser.cpp:74: TRawParser: Do parsing, first offset: 44, value: {"a2": "101", "a1": "hello1", "event": "event3"} |97.9%| [TM] {RESULT} ydb/core/fq/libs/row_dispatcher/format_handler/ut/unittest >> DataShardStats::NoData [GOOD] >> DataShardStats::Follower >> TMemoryController::ResourceBroker_ConfigLimit [GOOD] >> TMemoryController::ResourceBroker_ConfigCS >> TTxDataShardPrefixKMeansScan::BadRequest [GOOD] >> TTxDataShardPrefixKMeansScan::BuildToPosting >> test_http_api.py::TestHttpApi::test_result_offset_limit [GOOD] >> test_http_api.py::TestHttpApi::test_openapi_spec >> test_http_api.py::TestHttpApi::test_openapi_spec [GOOD] |97.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/limits/py3test >> test_schemeshard_limits.py::TestSchemeShardLimitsCase0::test_effective_acls_are_too_large >> KafkaProtocol::TopicsWithCleaunpPolicyScenario [GOOD] >> KafkaProtocol::DescribeConfigsScenario |97.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/limits/py3test >> TopicSessionTests::ReadNonExistentTopic [GOOD] >> TopicSessionTests::SlowSession |97.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test >> test_drain.py::TestHive::test_drain_on_stop [FAIL] >> test_schemeshard_limits.py::TestSchemeShardLimitsCase1::test_too_large_acls >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_3__SYNC-pk_types6-all_types6-index6---SYNC] [GOOD] >> TMemoryController::ResourceBroker_ConfigCS [GOOD] >> TMemTableMemoryConsumersCollection::Empty [GOOD] >> TMemTableMemoryConsumersCollection::Destruction [GOOD] >> TMemTableMemoryConsumersCollection::Register [GOOD] >> TMemTableMemoryConsumersCollection::Unregister [GOOD] >> TMemTableMemoryConsumersCollection::SetConsumption [GOOD] >> TMemTableMemoryConsumersCollection::CompactionComplete [GOOD] >> TMemTableMemoryConsumersCollection::SelectForCompaction [GOOD] |97.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/limits/py3test >> DataShardStats::Follower [GOOD] >> DataShardStats::Tli ------- [TM] {asan, default-linux-x86_64, release} ydb/core/memory_controller/ut/unittest >> TMemTableMemoryConsumersCollection::SelectForCompaction [GOOD] Test command err: ResourceBrokerSelfConfig: LimitBytes: 0 QueryExecutionLimitBytes: 0 ColumnTablesCompactionLimitBytes: 0 2025-06-25T15:23:16.780275Z node 1 :MEMORY_CONTROLLER INFO: memory_controller.cpp:228: Periodic memory stats: AnonRss: none CGroupLimit: none MemTotal: none MemAvailable: none AllocatedMemory: 0B AllocatorCachesMemory: 0B HardLimit: 200MiB SoftLimit: 150MiB TargetUtilization: 100MiB ActivitiesLimitBytes: 60MiB ConsumersConsumption: 0B OtherConsumption: 0B ExternalConsumption: 0B TargetConsumersConsumption: 100MiB ResultingConsumersConsumption: 6MiB Coefficient: 0.9999990463 2025-06-25T15:23:16.782219Z node 1 :MEMORY_CONTROLLER INFO: memory_controller.cpp:273: Consumer MemTable state: Consumption: 0B Limit: 6MiB Min: 2MiB Max: 6MiB 2025-06-25T15:23:16.783120Z node 1 :MEMORY_CONTROLLER INFO: memory_controller.cpp:380: Consumer QueryExecution state: Consumption: 0B Limit: 40MiB 2025-06-25T15:23:16.789252Z node 1 :MEMORY_CONTROLLER INFO: memory_controller.cpp:149: Bootstrapped with config HardLimitBytes: 209715200 2025-06-25T15:23:16.818330Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:1115: TResourceBrokerActor bootstrap 2025-06-25T15:23:16.821569Z node 1 :TABLET_SAUSAGECACHE NOTICE: shared_sausagecache.cpp:1265: Bootstrap with config MemoryLimit: 33554432 2025-06-25T15:23:16.839465Z node 1 :MEMORY_CONTROLLER INFO: memory_controller.cpp:298: Consumer SharedCache [1:20:2067] registered 2025-06-25T15:23:16.839731Z node 1 :TABLET_SAUSAGECACHE NOTICE: shared_sausagecache.cpp:343: Register memory consumer 2025-06-25T15:23:16.874173Z node 1 :RESOURCE_BROKER INFO: resource_broker.cpp:1189: New config diff: Queues { Name: "queue_kqp_resource_manager" Limit { Memory: 41943040 } } Queues { Name: "queue_cs_indexation" Limit { Memory: 4194304 } } Queues { Name: "queue_cs_ttl" Limit { Memory: 4194304 } } Queues { Name: "queue_cs_general" Limit { Memory: 12582912 } } Queues { Name: "queue_cs_normalizer" Limit { Memory: 12582912 } } ResourceLimit { Memory: 62914560 } 2025-06-25T15:23:16.874943Z node 1 :RESOURCE_BROKER INFO: resource_broker.cpp:1195: New config: Queues { Name: "queue_default" Weight: 30 Limit { Cpu: 2 } } Queues { Name: "queue_compaction_gen0" Weight: 100 Limit { Cpu: 10 } } Queues { Name: "queue_compaction_gen1" Weight: 100 Limit { Cpu: 6 } } Queues { Name: "queue_compaction_gen2" Weight: 100 Limit { Cpu: 3 } } Queues { Name: "queue_compaction_gen3" Weight: 100 Limit { Cpu: 3 } } Queues { Name: "queue_compaction_borrowed" Weight: 100 Limit { Cpu: 3 } } Queues { Name: "queue_cs_indexation" Weight: 100 Limit { Cpu: 3 Memory: 4194304 } } Queues { Name: "queue_cs_ttl" Weight: 100 Limit { Cpu: 3 Memory: 4194304 } } Queues { Name: "queue_cs_general" Weight: 100 Limit { Cpu: 3 Memory: 12582912 } } Queues { Name: "queue_cs_scan_read" Weight: 100 Limit { Cpu: 3 Memory: 3221225472 } } Queues { Name: "queue_cs_normalizer" Weight: 100 Limit { Cpu: 3 Memory: 12582912 } } Queues { Name: "queue_transaction" Weight: 100 Limit { Cpu: 4 } } Queues { Name: "queue_background_compaction" Weight: 10 Limit { Cpu: 1 } } Queues { Name: "queue_scan" Weight: 100 Limit { Cpu: 10 } } Queues { Name: "queue_backup" Weight: 100 Limit { Cpu: 2 } } Queues { Name: "queue_restore" Weight: 100 Limit { Cpu: 10 } } Queues { Name: "queue_kqp_resource_manager" Weight: 30 Limit { Cpu: 4 Memory: 41943040 } } Queues { Name: "queue_build_index" Weight: 100 Limit { Cpu: 10 } } Queues { Name: "queue_ttl" Weight: 100 Limit { Cpu: 2 } } Queues { Name: "queue_datashard_build_stats" Weight: 100 Limit { Cpu: 1 } } Queues { Name: "queue_cdc_initial_scan" Weight: 100 Limit { Cpu: 2 } } Queues { Name: "queue_statistics_scan" Weight: 100 Limit { Cpu: 1 } } Tasks { Name: "unknown" QueueName: "queue_default" DefaultDuration: 60000000 } Tasks { Name: "compaction_gen0" QueueName: "queue_compaction_gen0" DefaultDuration: 10000000 } Tasks { Name: "compaction_gen1" QueueName: "queue_compaction_gen1" DefaultDuration: 30000000 } Tasks { Name: "compaction_gen2" QueueName: "queue_compaction_gen2" DefaultDuration: 120000000 } Tasks { Name: "compaction_gen3" QueueName: "queue_compaction_gen3" DefaultDuration: 600000000 } Tasks { Name: "compaction_borrowed" QueueName: "queue_compaction_borrowed" DefaultDuration: 600000000 } Tasks { Name: "CS::TTL" QueueName: "queue_cs_ttl" DefaultDuration: 600000000 } Tasks { Name: "CS::INDEXATION" QueueName: "queue_cs_indexation" DefaultDuration: 600000000 } Tasks { Name: "CS::GENERAL" QueueName: "queue_cs_general" DefaultDuration: 600000000 } Tasks { Name: "CS::SCAN_READ" QueueName: "queue_cs_scan_read" DefaultDuration: 600000000 } Tasks { Name: "CS::NORMALIZER" QueueName: "queue_cs_normalizer" DefaultDuration: 600000000 } Tasks { Name: "transaction" QueueName: "queue_transaction" DefaultDuration: 600000000 } Tasks { Name: "background_compaction" QueueName: "queue_background_compaction" DefaultDuration: 60000000 } Tasks { Name: "background_compaction_gen0" QueueName: "queue_background_compaction" DefaultDuration: 10000000 } Tasks { Name: "background_compaction_gen1" QueueName: "queue_background_compaction" DefaultDuration: 20000000 } Tasks { Name: "background_compaction_gen2" QueueName: "queue_background_compaction" DefaultDuration: 60000000 } Tasks { Name: "background_compaction_gen3" QueueName: "queue_background_compaction" DefaultDuration: 300000000 } Tasks { Name: "scan" QueueName: "queue_scan" DefaultDuration: 300000000 } Tasks { Name: "backup" QueueName: "queue_backup" DefaultDuration: 300000000 } Tasks { Name: "restore" QueueName: "queue_restore" DefaultDuration: 300000000 } Tasks { Name: "kqp_query" QueueName: "queue_kqp_resource_manager" DefaultDuration: 600000000 } Tasks { Name: "build_index" QueueName: "queue_build_index" DefaultDuration: 600000000 } Tasks { Name: "ttl" QueueName: "queue_ttl" DefaultDuration: 300000000 } Tasks { Name: "datashard_build_stats" QueueName: "queue_datashard_build_stats" DefaultDuration: 5000000 } Tasks { Name: "cdc_initial_scan" QueueName: "queue_cdc_initial_scan" DefaultDuration: 600000000 } Tasks { Name: "statistics_scan" QueueName: "queue_statistics_scan" DefaultDuration: 600000000 } ResourceLimit { Cpu: 256 Memory: 62914560 } 2025-06-25T15:23:16.876809Z node 1 :RESOURCE_BROKER INFO: resource_broker.cpp:1240: Configure result: Success: true 2025-06-25T15:23:16.879549Z node 1 :MEMORY_CONTROLLER INFO: memory_controller.cpp:328: ResourceBroker configure result Success: true 2025-06-25T15:23:16.932360Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:109:2155], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T15:23:16.932946Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T15:23:16.933076Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T15:23:17.048496Z node 1 :MEMORY_CONTROLLER TRACE: memory_controller.cpp:305: MemTable [1:403:2363] 1 registered 2025-06-25T15:23:17.068270Z node 1 :MEMORY_CONTROLLER TRACE: memory_controller.cpp:305: MemTable [1:416:2365] 0 registered 2025-06-25T15:23:17.075254Z node 1 :MEMORY_CONTROLLER TRACE: memory_controller.cpp:305: MemTable [1:416:2365] 2 registered 2025-06-25T15:23:17.076528Z node 1 :MEMORY_CONTROLLER TRACE: memory_controller.cpp:305: MemTable [1:416:2365] 4 registered 2025-06-25T15:23:17.076694Z node 1 :MEMORY_CONTROLLER TRACE: memory_controller.cpp:305: MemTable [1:416:2365] 5 registered 2025-06-25T15:23:17.079241Z node 1 :MEMORY_CONTROLLER TRACE: memory_controller.cpp:305: MemTable [1:423:2367] 1 registered 2025-06-25T15:23:17.084068Z node 1 :MEMORY_CONTROLLER TRACE: memory_controller.cpp:305: MemTable [1:423:2367] 2 registered 2025-06-25T15:23:17.137538Z node 1 :MEMORY_CONTROLLER TRACE: memory_controller.cpp:305: MemTable [1:433:2369] 1 registered 2025-06-25T15:23:17.191583Z node 1 :MEMORY_CONTROLLER TRACE: memory_controller.cpp:305: MemTable [1:433:2369] 2 registered 2025-06-25T15:23:17.191955Z node 1 :MEMORY_CONTROLLER TRACE: memory_controller.cpp:305: MemTable [1:433:2369] 3 registered 2025-06-25T15:23:17.192238Z node 1 :MEMORY_CONTROLLER TRACE: memory_controller.cpp:305: MemTable [1:433:2369] 4 registered 2025-06-25T15:23:17.193648Z node 1 :MEMORY_CONTROLLER TRACE: memory_controller.cpp:305: MemTable [1:433:2369] 5 registered 2025-06-25T15:23:17.193992Z node 1 :MEMORY_CONTROLLER TRACE: memory_controller.cpp:305: MemTable [1:433:2369] 6 registered 2025-06-25T15:23:17.194223Z node 1 :MEMORY_CONTROLLER TRACE: memory_controller.cpp:305: MemTable [1:433:2369] 7 registered 2025-06-25T15:23:17.194544Z node 1 :MEMORY_CONTROLLER TRACE: memory_controller.cpp:305: MemTable [1:433:2369] 8 registered 2025-06-25T15:23:17.194948Z node 1 :MEMORY_CONTROLLER TRACE: memory_controller.cpp:305: MemTable [1:433:2369] 9 registered 2025-06-25T15:23:17.208149Z node 1 :MEMORY_CONTROLLER TRACE: memory_controller.cpp:305: MemTable [1:433:2369] 10 registered 2025-06-25T15:23:17.233785Z node 1 :MEMORY_CONTROLLER TRACE: memory_controller.cpp:305: MemTable [1:433:2369] 11 registered 2025-06-25T15:23:17.234074Z node 1 :MEMORY_CONTROLLER TRACE: memory_controller.cpp:305: MemTable [1:433:2369] 12 registered 2025-06-25T15:23:17.237893Z node 1 :MEMORY_CONTROLLER TRACE: memory_controller.cpp:305: MemTable [1:433:2369] 13 registered 2025-06-25T15:23:17.239192Z node 1 :MEMORY_CONTROLLER TRACE: memory_controller.cpp:305: MemTable [1:433:2369] 14 registered 2025-06-25T15:23:17.239626Z node 1 :MEMORY_CONTROLLER TRACE: memory_controller.cpp:305: MemTable [1:433:2369] 15 registered 2025-06-25T15:23:17.239864Z node 1 :MEMORY_CONTROLLER TRACE: memory_controller.cpp:305: MemTable [1:433:2369] 16 registered 2025-06-25T15:23:17.239929Z node 1 :MEMORY_CONTROLLER TRACE: memory_controller.cpp:305: MemTable [1:433:2369] 17 registered 2025-06-25T15:23:17.240084Z node 1 :MEMORY_CONTROLLER TRACE: memory_controller.cpp:305: MemTable [1:433:2369] 18 registered 2025-06-25T15:23:17.240138Z node 1 :MEMORY_CONTROLLER TRACE: memory_controller.cpp:305: MemTable [1:433:2369] 19 registered 2025-06-25T15:23:17.241903Z node 1 :MEMORY_CONTROLLER TRACE: memory_controller.cpp:305: MemTable [1:433:2369] 20 registered 2025-06-25T15:23:17.242799Z node 1 :MEMORY_CONTROLLER TRACE: memory_controller.cpp:305: MemTable [1:433:2369] 22 registered 2025-06-25T15:23:17.267946Z node 1 :MEMORY_CONTROLLER TRACE: memory_controller.cpp:305: MemTable [1:433:2369] 23 registered 2025-06-25T15:23:17.268367Z node 1 :MEMORY_CONTROLLER TRACE: memory_controller.cpp:305: MemTable [1:433:2369] 24 registered 2025-06-25T15:23:17.269502Z node 1 :MEMORY_CONTROLLER TRACE: memory_controller.cpp:305: MemTable [1:433:2369] 25 registered 2025-06-25T15:23:17.280513Z node 1 :MEMORY_CONTROLLER TRACE: memory_controller.cpp:305: MemTable [1:433:2369] 26 registered 2025-06-25T15:23:17.283829Z node 1 :MEMORY_CONTROLLER TRACE: memory_controller.cpp:305: MemTable [1:433:2369] 27 registered 2025-06-25T15:23:17.284106Z node 1 :MEMORY_CONTROLLER TRACE: memory_controller.cpp:305: MemTable [1:433:2369] 28 registered 2025-06-25T15:23:17.292753Z node 1 :MEMORY_CONTROLLER TRACE: memory_controller.cpp:305: MemTable [1:433:2369] 29 registered 2025-06-25T15:23:17.293030Z node 1 :MEMORY_CONTROLLER TRACE: memory_controller.cpp:305: MemTable [1:433:2369] 30 registered 2025-06-25T15:23:17.293325Z node 1 :MEMORY_CONTROLLER TRACE: memory_controller.cpp:305: MemTable [1:433:2369] 31 registered 2025-06-25T15:23:17.293412Z node 1 :MEMORY_CONTROLLER TRACE: memory_controller.cpp:305: MemTable [1:433:2369] 32 registered 2025-06-25T15:23:17.293505Z node 1 :MEMORY_CONTROLLER TRACE: memory_controller.cpp:305: MemTable [1:433:2369] 33 registered 2025-06-25T15 ... 9120 } } Queues { Name: "queue_cs_scan_read" Weight: 100 Limit { Cpu: 3 Memory: 3221225472 } } Queues { Name: "queue_cs_normalizer" Weight: 100 Limit { Cpu: 3 Memory: 125829120 } } Queues { Name: "queue_transaction" Weight: 100 Limit { Cpu: 4 } } Queues { Name: "queue_background_compaction" Weight: 10 Limit { Cpu: 1 } } Queues { Name: "queue_scan" Weight: 100 Limit { Cpu: 10 } } Queues { Name: "queue_backup" Weight: 100 Limit { Cpu: 2 } } Queues { Name: "queue_restore" Weight: 100 Limit { Cpu: 10 } } Queues { Name: "queue_kqp_resource_manager" Weight: 30 Limit { Cpu: 4 Memory: 209715200 } } Queues { Name: "queue_build_index" Weight: 100 Limit { Cpu: 10 } } Queues { Name: "queue_ttl" Weight: 100 Limit { Cpu: 2 } } Queues { Name: "queue_datashard_build_stats" Weight: 100 Limit { Cpu: 1 } } Queues { Name: "queue_cdc_initial_scan" Weight: 100 Limit { Cpu: 2 } } Queues { Name: "queue_statistics_scan" Weight: 100 Limit { Cpu: 1 } } Tasks { Name: "unknown" QueueName: "queue_default" DefaultDuration: 60000000 } Tasks { Name: "compaction_gen0" QueueName: "queue_compaction_gen0" DefaultDuration: 10000000 } Tasks { Name: "compaction_gen1" QueueName: "queue_compaction_gen1" DefaultDuration: 30000000 } Tasks { Name: "compaction_gen2" QueueName: "queue_compaction_gen2" DefaultDuration: 120000000 } Tasks { Name: "compaction_gen3" QueueName: "queue_compaction_gen3" DefaultDuration: 600000000 } Tasks { Name: "compaction_borrowed" QueueName: "queue_compaction_borrowed" DefaultDuration: 600000000 } Tasks { Name: "CS::TTL" QueueName: "queue_cs_ttl" DefaultDuration: 600000000 } Tasks { Name: "CS::INDEXATION" QueueName: "queue_cs_indexation" DefaultDuration: 600000000 } Tasks { Name: "CS::GENERAL" QueueName: "queue_cs_general" DefaultDuration: 600000000 } Tasks { Name: "CS::SCAN_READ" QueueName: "queue_cs_scan_read" DefaultDuration: 600000000 } Tasks { Name: "CS::NORMALIZER" QueueName: "queue_cs_normalizer" DefaultDuration: 600000000 } Tasks { Name: "transaction" QueueName: "queue_transaction" DefaultDuration: 600000000 } Tasks { Name: "background_compaction" QueueName: "queue_background_compaction" DefaultDuration: 60000000 } Tasks { Name: "background_compaction_gen0" QueueName: "queue_background_compaction" DefaultDuration: 10000000 } Tasks { Name: "background_compaction_gen1" QueueName: "queue_background_compaction" DefaultDuration: 20000000 } Tasks { Name: "background_compaction_gen2" QueueName: "queue_background_compaction" DefaultDuration: 60000000 } Tasks { Name: "background_compaction_gen3" QueueName: "queue_background_compaction" DefaultDuration: 300000000 } Tasks { Name: "scan" QueueName: "queue_scan" DefaultDuration: 300000000 } Tasks { Name: "backup" QueueName: "queue_backup" DefaultDuration: 300000000 } Tasks { Name: "restore" QueueName: "queue_restore" DefaultDuration: 300000000 } Tasks { Name: "kqp_query" QueueName: "queue_kqp_resource_manager" DefaultDuration: 600000000 } Tasks { Name: "build_index" QueueName: "queue_build_index" DefaultDuration: 600000000 } Tasks { Name: "ttl" QueueName: "queue_ttl" DefaultDuration: 300000000 } Tasks { Name: "datashard_build_stats" QueueName: "queue_datashard_build_stats" DefaultDuration: 5000000 } Tasks { Name: "cdc_initial_scan" QueueName: "queue_cdc_initial_scan" DefaultDuration: 600000000 } Tasks { Name: "statistics_scan" QueueName: "queue_statistics_scan" DefaultDuration: 600000000 } ResourceLimit { Cpu: 256 Memory: 1048576000 } 2025-06-25T15:24:55.697789Z node 10 :RESOURCE_BROKER INFO: resource_broker.cpp:1240: Configure result: Success: true 2025-06-25T15:24:55.698416Z node 10 :MEMORY_CONTROLLER INFO: memory_controller.cpp:328: ResourceBroker configure result Success: true 2025-06-25T15:24:55.698501Z node 10 :TABLET_SAUSAGECACHE INFO: shared_sausagecache.cpp:353: Limit memory consumer with 472MiB 2025-06-25T15:24:55.900432Z node 10 :MEMORY_CONTROLLER INFO: memory_controller.cpp:228: Periodic memory stats: AnonRss: none CGroupLimit: 1000MiB MemTotal: none MemAvailable: none AllocatedMemory: 0B AllocatorCachesMemory: 0B HardLimit: 1000MiB SoftLimit: 750MiB TargetUtilization: 500MiB ActivitiesLimitBytes: 1000MiB ConsumersConsumption: 33.5KiB OtherConsumption: 0B ExternalConsumption: 0B TargetConsumersConsumption: 500MiB ResultingConsumersConsumption: 500MiB Coefficient: 0.90625 2025-06-25T15:24:55.900961Z node 10 :MEMORY_CONTROLLER INFO: memory_controller.cpp:273: Consumer SharedCache state: Consumption: 0B Limit: 472MiB Min: 200MiB Max: 500MiB 2025-06-25T15:24:55.901012Z node 10 :MEMORY_CONTROLLER INFO: memory_controller.cpp:273: Consumer MemTable state: Consumption: 33.5KiB Limit: 28.1MiB Min: 10MiB Max: 30MiB 2025-06-25T15:24:55.901044Z node 10 :MEMORY_CONTROLLER INFO: memory_controller.cpp:380: Consumer QueryExecution state: Consumption: 1.94MiB Limit: 200MiB 2025-06-25T15:24:55.901131Z node 10 :TABLET_SAUSAGECACHE INFO: shared_sausagecache.cpp:353: Limit memory consumer with 472MiB 2025-06-25T15:24:56.054117Z node 10 :MEMORY_CONTROLLER INFO: memory_controller.cpp:228: Periodic memory stats: AnonRss: none CGroupLimit: 50MiB MemTotal: none MemAvailable: none AllocatedMemory: 0B AllocatorCachesMemory: 0B HardLimit: 50MiB SoftLimit: 37.5MiB TargetUtilization: 25MiB ActivitiesLimitBytes: 1000MiB ConsumersConsumption: 33.6KiB OtherConsumption: 0B ExternalConsumption: 0B TargetConsumersConsumption: 25MiB ResultingConsumersConsumption: 25MiB Coefficient: 0.90625 2025-06-25T15:24:56.054774Z node 10 :MEMORY_CONTROLLER INFO: memory_controller.cpp:273: Consumer SharedCache state: Consumption: 0B Limit: 23.6MiB Min: 10MiB Max: 25MiB 2025-06-25T15:24:56.054832Z node 10 :MEMORY_CONTROLLER INFO: memory_controller.cpp:273: Consumer MemTable state: Consumption: 33.6KiB Limit: 1.41MiB Min: 512KiB Max: 1.5MiB 2025-06-25T15:24:56.054870Z node 10 :MEMORY_CONTROLLER INFO: memory_controller.cpp:380: Consumer QueryExecution state: Consumption: 1.94MiB Limit: 10MiB 2025-06-25T15:24:56.055196Z node 10 :RESOURCE_BROKER INFO: resource_broker.cpp:1189: New config diff: Queues { Name: "queue_kqp_resource_manager" Limit { Memory: 10485760 } } Queues { Name: "queue_cs_indexation" Limit { Memory: 2097152 } } Queues { Name: "queue_cs_ttl" Limit { Memory: 2097152 } } Queues { Name: "queue_cs_general" Limit { Memory: 6291456 } } Queues { Name: "queue_cs_normalizer" Limit { Memory: 6291456 } } ResourceLimit { Memory: 1048576000 } 2025-06-25T15:24:56.055933Z node 10 :RESOURCE_BROKER INFO: resource_broker.cpp:1195: New config: Queues { Name: "queue_default" Weight: 30 Limit { Cpu: 2 } } Queues { Name: "queue_compaction_gen0" Weight: 100 Limit { Cpu: 10 } } Queues { Name: "queue_compaction_gen1" Weight: 100 Limit { Cpu: 6 } } Queues { Name: "queue_compaction_gen2" Weight: 100 Limit { Cpu: 3 } } Queues { Name: "queue_compaction_gen3" Weight: 100 Limit { Cpu: 3 } } Queues { Name: "queue_compaction_borrowed" Weight: 100 Limit { Cpu: 3 } } Queues { Name: "queue_cs_indexation" Weight: 100 Limit { Cpu: 3 Memory: 2097152 } } Queues { Name: "queue_cs_ttl" Weight: 100 Limit { Cpu: 3 Memory: 2097152 } } Queues { Name: "queue_cs_general" Weight: 100 Limit { Cpu: 3 Memory: 6291456 } } Queues { Name: "queue_cs_scan_read" Weight: 100 Limit { Cpu: 3 Memory: 3221225472 } } Queues { Name: "queue_cs_normalizer" Weight: 100 Limit { Cpu: 3 Memory: 6291456 } } Queues { Name: "queue_transaction" Weight: 100 Limit { Cpu: 4 } } Queues { Name: "queue_background_compaction" Weight: 10 Limit { Cpu: 1 } } Queues { Name: "queue_scan" Weight: 100 Limit { Cpu: 10 } } Queues { Name: "queue_backup" Weight: 100 Limit { Cpu: 2 } } Queues { Name: "queue_restore" Weight: 100 Limit { Cpu: 10 } } Queues { Name: "queue_kqp_resource_manager" Weight: 30 Limit { Cpu: 4 Memory: 10485760 } } Queues { Name: "queue_build_index" Weight: 100 Limit { Cpu: 10 } } Queues { Name: "queue_ttl" Weight: 100 Limit { Cpu: 2 } } Queues { Name: "queue_datashard_build_stats" Weight: 100 Limit { Cpu: 1 } } Queues { Name: "queue_cdc_initial_scan" Weight: 100 Limit { Cpu: 2 } } Queues { Name: "queue_statistics_scan" Weight: 100 Limit { Cpu: 1 } } Tasks { Name: "unknown" QueueName: "queue_default" DefaultDuration: 60000000 } Tasks { Name: "compaction_gen0" QueueName: "queue_compaction_gen0" DefaultDuration: 10000000 } Tasks { Name: "compaction_gen1" QueueName: "queue_compaction_gen1" DefaultDuration: 30000000 } Tasks { Name: "compaction_gen2" QueueName: "queue_compaction_gen2" DefaultDuration: 120000000 } Tasks { Name: "compaction_gen3" QueueName: "queue_compaction_gen3" DefaultDuration: 600000000 } Tasks { Name: "compaction_borrowed" QueueName: "queue_compaction_borrowed" DefaultDuration: 600000000 } Tasks { Name: "CS::TTL" QueueName: "queue_cs_ttl" DefaultDuration: 600000000 } Tasks { Name: "CS::INDEXATION" QueueName: "queue_cs_indexation" DefaultDuration: 600000000 } Tasks { Name: "CS::GENERAL" QueueName: "queue_cs_general" DefaultDuration: 600000000 } Tasks { Name: "CS::SCAN_READ" QueueName: "queue_cs_scan_read" DefaultDuration: 600000000 } Tasks { Name: "CS::NORMALIZER" QueueName: "queue_cs_normalizer" DefaultDuration: 600000000 } Tasks { Name: "transaction" QueueName: "queue_transaction" DefaultDuration: 600000000 } Tasks { Name: "background_compaction" QueueName: "queue_background_compaction" DefaultDuration: 60000000 } Tasks { Name: "background_compaction_gen0" QueueName: "queue_background_compaction" DefaultDuration: 10000000 } Tasks { Name: "background_compaction_gen1" QueueName: "queue_background_compaction" DefaultDuration: 20000000 } Tasks { Name: "background_compaction_gen2" QueueName: "queue_background_compaction" DefaultDuration: 60000000 } Tasks { Name: "background_compaction_gen3" QueueName: "queue_background_compaction" DefaultDuration: 300000000 } Tasks { Name: "scan" QueueName: "queue_scan" DefaultDuration: 300000000 } Tasks { Name: "backup" QueueName: "queue_backup" DefaultDuration: 300000000 } Tasks { Name: "restore" QueueName: "queue_restore" DefaultDuration: 300000000 } Tasks { Name: "kqp_query" QueueName: "queue_kqp_resource_manager" DefaultDuration: 600000000 } Tasks { Name: "build_index" QueueName: "queue_build_index" DefaultDuration: 600000000 } Tasks { Name: "ttl" QueueName: "queue_ttl" DefaultDuration: 300000000 } Tasks { Name: "datashard_build_stats" QueueName: "queue_datashard_build_stats" DefaultDuration: 5000000 } Tasks { Name: "cdc_initial_scan" QueueName: "queue_cdc_initial_scan" DefaultDuration: 600000000 } Tasks { Name: "statistics_scan" QueueName: "queue_statistics_scan" DefaultDuration: 600000000 } ResourceLimit { Cpu: 256 Memory: 1048576000 } 2025-06-25T15:24:56.058936Z node 10 :RESOURCE_BROKER INFO: resource_broker.cpp:1240: Configure result: Success: true 2025-06-25T15:24:56.059494Z node 10 :TABLET_SAUSAGECACHE INFO: shared_sausagecache.cpp:353: Limit memory consumer with 23.6MiB 2025-06-25T15:24:56.059820Z node 10 :MEMORY_CONTROLLER INFO: memory_controller.cpp:328: ResourceBroker configure result Success: true 2025-06-25T15:24:56.199762Z node 10 :MEMORY_CONTROLLER INFO: memory_controller.cpp:228: Periodic memory stats: AnonRss: none CGroupLimit: 50MiB MemTotal: none MemAvailable: none AllocatedMemory: 0B AllocatorCachesMemory: 0B HardLimit: 50MiB SoftLimit: 37.5MiB TargetUtilization: 25MiB ActivitiesLimitBytes: 1000MiB ConsumersConsumption: 33.7KiB OtherConsumption: 0B ExternalConsumption: 0B TargetConsumersConsumption: 25MiB ResultingConsumersConsumption: 25MiB Coefficient: 0.90625 2025-06-25T15:24:56.200431Z node 10 :MEMORY_CONTROLLER INFO: memory_controller.cpp:273: Consumer SharedCache state: Consumption: 0B Limit: 23.6MiB Min: 10MiB Max: 25MiB 2025-06-25T15:24:56.200493Z node 10 :MEMORY_CONTROLLER INFO: memory_controller.cpp:273: Consumer MemTable state: Consumption: 33.7KiB Limit: 1.41MiB Min: 512KiB Max: 1.5MiB 2025-06-25T15:24:56.200531Z node 10 :MEMORY_CONTROLLER INFO: memory_controller.cpp:380: Consumer QueryExecution state: Consumption: 1.94MiB Limit: 10MiB 2025-06-25T15:24:56.200638Z node 10 :TABLET_SAUSAGECACHE INFO: shared_sausagecache.cpp:353: Limit memory consumer with 23.6MiB |97.9%| [TM] {RESULT} ydb/core/memory_controller/ut/unittest |98.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/limits/py3test >> KafkaProtocol::DescribeConfigsScenario [GOOD] >> KafkaProtocol::AlterConfigsScenario >> TDqPqRdReadActorTests::Backpressure [GOOD] >> TDqPqRdReadActorTests::RowDispatcherIsRestarted2 >> test_inserts.py::TestYdbInsertsOperations::test_insert_multiple_rows [GOOD] >> test_inserts.py::TestYdbInsertsOperations::test_concurrent_inserts >> test_workload_oltp.py::TestWorkloadSimpleQueue::test_workload_oltp [FAIL] >> test_rename.py::test_client_gets_retriable_errors_when_rename[substitute_table-create_simple_table-False] [GOOD] >> test_select.py::TestDML::test_select[table_index_3_UNIQUE_SYNC-pk_types1-all_types1-index1--UNIQUE-SYNC] [GOOD] >> TDqPqRdReadActorTests::RowDispatcherIsRestarted2 [GOOD] >> TDqPqRdReadActorTests::TwoPartitionsRowDispatcherIsRestarted >> test_rename.py::test_client_gets_retriable_errors_when_rename[replace_table-create_simple_table-False] [GOOD] >> test_rename.py::test_client_gets_retriable_errors_when_rename[substitute_table-create_indexed_async_table-True] [GOOD] |98.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/limits/py3test |98.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/tpc/medium/py3test >> test_workload_oltp.py::TestWorkloadSimpleQueue::test_workload_oltp [FAIL] >> TDqPqRdReadActorTests::TwoPartitionsRowDispatcherIsRestarted [GOOD] >> test_kill_tablets.py::TestKillTablets::test_when_kill_keyvalue_tablet_it_will_be_restarted >> TDqPqRdReadActorTests::IgnoreMessageIfNoSessions >> TopicSessionTests::SlowSession [GOOD] |98.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/fq/http_api/py3test >> test_http_api.py::TestHttpApi::test_openapi_spec [GOOD] |98.0%| [TM] {RESULT} ydb/tests/fq/http_api/py3test >> TDataShardRSTest::TestCleanupInRS-UseSink [GOOD] >> TDataShardRSTest::TestDelayedRSAckForUnknownTx >> test_postgres.py::TestPostgresSuite::test_postgres_suite[strings] [FAIL] >> TDqPqRdReadActorTests::IgnoreMessageIfNoSessions [GOOD] >> TopicSessionTests::TwoSessionsWithDifferentSchemes >> TDqPqRdReadActorTests::MetadataFields >> TDqPqRdReadActorTests::MetadataFields [GOOD] |98.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/postgresql/py3test >> test_postgres.py::TestPostgresSuite::test_postgres_suite[strings] [FAIL] |98.0%| [TM] {RESULT} ydb/tests/functional/postgresql/py3test >> TDqPqRdReadActorTests::IgnoreCoordinatorResultIfWrongState >> test_rename.py::test_client_gets_retriable_errors_when_rename[replace_table-create_indexed_async_table-True] [GOOD] >> DataShardStats::Tli [GOOD] >> DataShardStats::HasSchemaChanges_BTreeIndex >> TDqPqRdReadActorTests::IgnoreCoordinatorResultIfWrongState [GOOD] >> test_workload_simple_queue.py::TestWorkloadSimpleQueue::test_workload_simple_queue[row] [FAIL] >> test_workload_simple_queue.py::TestWorkloadSimpleQueue::test_workload_simple_queue[column] |98.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/restarts/py3test >> TDqPqReadActorTest::TestReadFromTopic >> KafkaProtocol::AlterConfigsScenario [GOOD] >> KafkaProtocol::LoginWithApiKey |98.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/secondary_index/py3test >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_3__SYNC-pk_types6-all_types6-index6---SYNC] [GOOD] >> Coordinator::RestoreTenantConfiguration-AlterDatabaseCreateHiveFirst-true [GOOD] >> Coordinator::LastEmptyStepResent |98.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test >> test_rename.py::test_client_gets_retriable_errors_when_rename[substitute_table-create_indexed_table-True] [GOOD] |98.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test >> test_schemeshard_limits.py::TestSchemeShardLimitsCase0::test_effective_acls_are_too_large [GOOD] >> test_inserts.py::TestYdbInsertsOperations::test_concurrent_inserts [GOOD] >> test_inserts.py::TestYdbInsertsOperations::test_transactional_update >> TDataShardRSTest::TestDelayedRSAckForUnknownTx [GOOD] >> TDataShardRSTest::TestDelayedRSAckForOutOfOrderCompletedTx >> test_kill_tablets.py::TestKillTablets::test_when_kill_keyvalue_tablet_it_will_be_restarted [GOOD] |98.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/select/py3test >> test_select.py::TestDML::test_select[table_index_3_UNIQUE_SYNC-pk_types1-all_types1-index1--UNIQUE-SYNC] [GOOD] >> TopicSessionTests::TwoSessionsWithDifferentSchemes [GOOD] |98.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_0__ASYNC-pk_types11-all_types11-index11---ASYNC] [GOOD] |98.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test |98.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/cms/py3test >> TopicSessionTests::TwoSessionsWithDifferentColumnTypes >> TTxDataShardPrefixKMeansScan::BuildToPosting [GOOD] >> TTxDataShardPrefixKMeansScan::BuildToBuild |98.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test >> Coordinator::LastEmptyStepResent [GOOD] >> CoordinatorVolatile::PlanResentOnReboots >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_2__SYNC-pk_types7-all_types7-index7---SYNC] [GOOD] >> TDqPqReadActorTest::TestReadFromTopic [GOOD] >> TDqPqReadActorTest::TestReadFromTopicFromNow >> KafkaProtocol::LoginWithApiKey [GOOD] >> KafkaProtocol::LoginWithApiKeyWithoutAt |98.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test |98.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test |98.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/limits/py3test >> test_schemeshard_limits.py::TestSchemeShardLimitsCase0::test_effective_acls_are_too_large [GOOD] |98.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest >> TDqPqReadActorTest::TestReadFromTopicFromNow [GOOD] >> TDataShardRSTest::TestDelayedRSAckForOutOfOrderCompletedTx [GOOD] >> TDataShardRSTest::TestGenericReadSetDecisionCommit >> TDqPqReadActorTest::ReadWithFreeSpace >> test_inserts.py::TestYdbInsertsOperations::test_transactional_update [GOOD] >> DataShardStats::HasSchemaChanges_BTreeIndex [GOOD] >> DataShardStats::HasSchemaChanges_ByKeyFilter >> test_inserts.py::TestYdbInsertsOperations::test_bulk_upsert |98.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test >> test_kill_tablets.py::TestKillTablets::test_when_kill_keyvalue_tablet_it_will_be_restarted [GOOD] |98.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test |98.0%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest >> test_schemeshard_limits.py::TestSchemeShardLimitsCase1::test_too_large_acls [GOOD] >> CoordinatorVolatile::PlanResentOnReboots [GOOD] >> CoordinatorVolatile::MediatorReconnectPlanRace >> test_rename.py::test_client_gets_retriable_errors_when_rename[replace_table-create_indexed_table-True] [GOOD] |98.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test |98.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest >> TDqPqReadActorTest::ReadWithFreeSpace [GOOD] >> TopicSessionTests::TwoSessionsWithDifferentColumnTypes [GOOD] >> test_create_tablets.py::TestHive::test_when_create_tablets_after_bs_groups_and_kill_hive_then_tablets_start >> TDqPqReadActorTest::ReadNonExistentTopic >> TDqPqReadActorTest::ReadNonExistentTopic [GOOD] >> TDqPqReadActorTest::TestSaveLoadPqRead >> TopicSessionTests::RestartSessionIfQueryStopped |98.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test >> test_inserts.py::TestYdbInsertsOperations::test_bulk_upsert [GOOD] >> test_inserts.py::TestYdbInsertsOperations::test_bulk_upsert_same_values |98.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest |98.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/rename/py3test >> test_rename.py::test_client_gets_retriable_errors_when_rename[replace_table-create_simple_table-False] [GOOD] >> test_create_tablets.py::TestHive::test_when_create_tablets_then_can_lookup_them |98.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test |98.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest |98.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test |98.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/rename/py3test >> test_rename.py::test_client_gets_retriable_errors_when_rename[substitute_table-create_simple_table-False] [GOOD] >> TDataShardRSTest::TestGenericReadSetDecisionCommit [GOOD] >> TDataShardRSTest::TestGenericReadSetDecisionAbort >> KafkaProtocol::LoginWithApiKeyWithoutAt [GOOD] >> KafkaProtocol::MetadataScenario >> test_inserts.py::TestYdbInsertsOperations::test_bulk_upsert_same_values [GOOD] >> test_inserts.py::TestYdbInsertsOperations::test_bulk_upsert_same_values_simple |98.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest >> CoordinatorVolatile::MediatorReconnectPlanRace [GOOD] >> CoordinatorVolatile::CoordinatorMigrateUncommittedVolatileTx |98.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/rename/py3test >> test_rename.py::test_client_gets_retriable_errors_when_rename[substitute_table-create_indexed_async_table-True] [GOOD] |98.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test |98.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest |98.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/limits/py3test >> test_schemeshard_limits.py::TestSchemeShardLimitsCase1::test_too_large_acls [GOOD] >> TSelectFromViewTest::ReadTestCasesFromFiles [GOOD] >> TSelectFromViewTest::QueryCacheIsUpdated |98.1%| [TA] $(B)/ydb/tests/functional/limits/test-results/py3test/{meta.json ... results_accumulator.log} |98.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test |98.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest |98.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test |98.1%| [TA] {RESULT} $(B)/ydb/tests/functional/limits/test-results/py3test/{meta.json ... results_accumulator.log} >> DataShardStats::HasSchemaChanges_ByKeyFilter [GOOD] >> DataShardStats::HasSchemaChanges_Columns |98.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/rename/py3test >> test_rename.py::test_client_gets_retriable_errors_when_rename[replace_table-create_indexed_async_table-True] [GOOD] |98.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test |98.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test >> test_inserts.py::TestYdbInsertsOperations::test_bulk_upsert_same_values_simple [GOOD] |98.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/secondary_index/py3test >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_0__ASYNC-pk_types11-all_types11-index11---ASYNC] [GOOD] >> test_inserts.py::TestYdbInsertsOperations::test_bulk_upsert_with_valid_and_invalid_data >> test_inserts.py::TestYdbInsertsOperations::test_bulk_upsert_with_valid_and_invalid_data [GOOD] >> test_inserts.py::TestYdbInsertsOperations::test_bulk_upsert_parallel >> CompositeConveyorTests::Test10xDistribution |98.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/secondary_index/py3test >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_2__SYNC-pk_types7-all_types7-index7---SYNC] [GOOD] |98.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test |98.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test >> TDataShardRSTest::TestGenericReadSetDecisionAbort [GOOD] >> CoordinatorVolatile::CoordinatorMigrateUncommittedVolatileTx [GOOD] >> CoordinatorVolatile::CoordinatorRestartWithEnqueuedVolatileStep |98.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/rename/py3test >> test_rename.py::test_client_gets_retriable_errors_when_rename[substitute_table-create_indexed_table-True] [GOOD] |98.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test >> test_create_tablets.py::TestHive::test_when_create_tablets_after_bs_groups_and_kill_hive_then_tablets_start [GOOD] |98.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test |98.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test |98.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_rs/unittest >> TDataShardRSTest::TestGenericReadSetDecisionAbort [GOOD] Test command err: 2025-06-25T15:22:22.905755Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T15:22:22.905923Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T15:22:22.905988Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000975/r3tmp/tmpl7M7ZK/pdisk_1.dat 2025-06-25T15:22:23.462279Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T15:22:23.472524Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:22:23.539340Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:22:23.572785Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750864939655282 != 1750864939655286 2025-06-25T15:22:23.632834Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:22:23.632991Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:22:23.649180Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:22:23.797434Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:22:23.908990Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:641:2540] 2025-06-25T15:22:23.909360Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T15:22:23.985583Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T15:22:23.985810Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T15:22:23.987360Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-25T15:22:23.987433Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-25T15:22:23.987495Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-25T15:22:23.991796Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T15:22:23.992248Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T15:22:23.992358Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:677:2540] in generation 1 2025-06-25T15:22:23.995986Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037889 actor [1:647:2542] 2025-06-25T15:22:23.996152Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T15:22:24.005690Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T15:22:24.005837Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T15:22:24.007001Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037889 2025-06-25T15:22:24.007066Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037889 2025-06-25T15:22:24.007122Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037889 2025-06-25T15:22:24.007368Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T15:22:24.007555Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T15:22:24.007601Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037889 persisting started state actor id [1:685:2542] in generation 1 2025-06-25T15:22:24.010362Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037891 actor [1:651:2544] 2025-06-25T15:22:24.010528Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T15:22:24.019177Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037890 actor [1:653:2546] 2025-06-25T15:22:24.019360Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T15:22:24.027668Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T15:22:24.027854Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T15:22:24.029554Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037891 2025-06-25T15:22:24.029615Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037891 2025-06-25T15:22:24.029657Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037891 2025-06-25T15:22:24.029909Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T15:22:24.030052Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T15:22:24.030113Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037891 persisting started state actor id [1:712:2544] in generation 1 2025-06-25T15:22:24.030387Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T15:22:24.030489Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T15:22:24.031600Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037890 2025-06-25T15:22:24.031667Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037890 2025-06-25T15:22:24.031702Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037890 2025-06-25T15:22:24.031928Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T15:22:24.031980Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T15:22:24.032022Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037890 persisting started state actor id [1:713:2546] in generation 1 2025-06-25T15:22:24.043077Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T15:22:24.079018Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-25T15:22:24.080389Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T15:22:24.080535Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:718:2581] 2025-06-25T15:22:24.080577Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T15:22:24.080608Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-25T15:22:24.080639Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T15:22:24.080736Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T15:22:24.080769Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037889 2025-06-25T15:22:24.080824Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037889 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T15:22:24.080866Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037889, actorId: [1:719:2582] 2025-06-25T15:22:24.080884Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037889 2025-06-25T15:22:24.080931Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037889, state: WaitScheme 2025-06-25T15:22:24.080954Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-25T15:22:24.082189Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T15:22:24.082234Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037891 2025-06-25T15:22:24.082300Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037891 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T15:22:24.082357Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037891, actorId: [1:720:2583] 2025-06-25T15:22:24.082378Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037891 2025-06-25T15:22:24.082398Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037891, state: WaitScheme 2025-06-25T15:22:24.082417Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037891 2025-06-25T15:22:24.082610Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T15:22:24.082638Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037890 2025-06-25T15:22:24.082697Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037890 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T15:22:24.082737Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037890, actorId: [1:721:2584] 2025-06-25T15:22:24.082766Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037890 2025-06-25T15:22:24.082786Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037890, state: WaitScheme 2025-06-25T15:22:24.082805Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037890 2025-06-25T15:22:24.083030Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-25T15:22:24.083119Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets t ... BUG: datashard.cpp:3359: Receive RS at 72075186224037888 source 72075186224037889 dest 72075186224037888 producer 72075186224037889 txId 281474976715664 2025-06-25T15:25:36.183117Z node 6 :TX_DATASHARD DEBUG: datashard__readset.cpp:15: TTxReadSet::Execute at 72075186224037888 got read set: {TEvReadSet step# 2002 txid# 281474976715664 TabletSource# 72075186224037889 TabletDest# 72075186224037888 SetTabletProducer# 72075186224037889 ReadSet.Size()# 0 Seqno# 0 Flags# 7} 2025-06-25T15:25:36.183261Z node 6 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 2002} 2025-06-25T15:25:36.183494Z node 6 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T15:25:36.183551Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [2002:281474976715664] at 72075186224037888 on unit CompleteWrite 2025-06-25T15:25:36.183628Z node 6 :TX_DATASHARD DEBUG: datashard.cpp:826: Complete write [2002 : 281474976715664] from 72075186224037888 at tablet 72075186224037888 send result to client [6:947:2702] 2025-06-25T15:25:36.183706Z node 6 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T15:25:36.183810Z node 6 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 72075186224037888 2025-06-25T15:25:36.183883Z node 6 :TX_DATASHARD DEBUG: datashard__readset.cpp:99: Send RS Reply at 72075186224037888 {TEvReadSet step# 2002 txid# 281474976715664 TabletSource# 72075186224037889 TabletDest# 72075186224037888 SetTabletProducer# 72075186224037889 ReadSet.Size()# 0 Seqno# 0 Flags# 7} ... nodata readset 2025-06-25T15:25:36.184000Z node 6 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269287425, Sender [6:627:2531], Recipient [6:712:2591]: {TEvReadSet step# 2002 txid# 281474976715664 TabletSource# 72075186224037888 TabletDest# 72075186224037889 SetTabletProducer# 72075186224037888 ReadSet.Size()# 0 Seqno# 0 Flags# 3} 2025-06-25T15:25:36.184030Z node 6 :TX_DATASHARD TRACE: datashard_impl.h:3151: StateWork, processing event TEvTxProcessing::TEvReadSet 2025-06-25T15:25:36.184059Z node 6 :TX_DATASHARD DEBUG: datashard.cpp:3359: Receive RS at 72075186224037889 source 72075186224037888 dest 72075186224037889 producer 72075186224037888 txId 281474976715664 2025-06-25T15:25:36.184109Z node 6 :TX_DATASHARD DEBUG: datashard__readset.cpp:15: TTxReadSet::Execute at 72075186224037889 got read set: {TEvReadSet step# 2002 txid# 281474976715664 TabletSource# 72075186224037888 TabletDest# 72075186224037889 SetTabletProducer# 72075186224037888 ReadSet.Size()# 0 Seqno# 0 Flags# 3} 2025-06-25T15:25:36.184152Z node 6 :TX_DATASHARD TRACE: volatile_tx.cpp:863: Processed readset without data from 72075186224037888 to 72075186224037889 at tablet 72075186224037889 2025-06-25T15:25:36.184467Z node 6 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:2751: SelfId: [6:947:2702], SessionActorId: [6:889:2702], Got LOCKS BROKEN for table. ShardID=72075186224037888, Sink=[6:947:2702].{
: Error: Operation is aborting because locks are not valid, code: 2001 } 2025-06-25T15:25:36.184647Z node 6 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:3004: SelfId: [6:947:2702], SessionActorId: [6:889:2702], statusCode=ABORTED. Issue=
: Error: Transaction locks invalidated. Table: `/Root/table-1`., code: 2001
: Error: Operation is aborting because locks are not valid, code: 2001 . sessionActorId=[6:889:2702]. isRollback=0 2025-06-25T15:25:36.185157Z node 6 :KQP_SESSION WARN: kqp_session_actor.cpp:1895: SessionId: ydb://session/3?node_id=6&id=NzYzNGJhYjctNGNkY2JhZWItYzY2ODlmYS0zNzVkMmI2Mw==, ActorId: [6:889:2702], ActorState: ExecuteState, TraceId: 01jykv3jct35zybtf8p6pew4bj, got TEvKqpBuffer::TEvError in ExecuteState, status: ABORTED send to: [6:948:2702] from: [6:947:2702] 2025-06-25T15:25:36.185431Z node 6 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 278003712, Sender [6:947:2702], Recipient [6:627:2531]: NKikimrDataEvents.TEvWrite TxMode: MODE_IMMEDIATE Locks { Locks { LockId: 281474976715662 DataShard: 72075186224037888 Generation: 1 Counter: 0 SchemeShard: 72057594046644480 PathId: 2 } Op: Rollback } 2025-06-25T15:25:36.185465Z node 6 :TX_DATASHARD TRACE: datashard__write.cpp:182: Handle TTxWrite: at tablet# 72075186224037888 2025-06-25T15:25:36.185905Z node 6 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435074, Sender [6:627:2531], Recipient [6:627:2531]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvDelayedProposeTransaction 2025-06-25T15:25:36.185953Z node 6 :TX_DATASHARD TRACE: datashard_impl.h:3159: StateWork, processing event TEvPrivate::TEvDelayedProposeTransaction 2025-06-25T15:25:36.186043Z node 6 :TX_DATASHARD TRACE: datashard__write.cpp:28: TTxWrite:: execute at tablet# 72075186224037888 2025-06-25T15:25:36.186191Z node 6 :TX_DATASHARD TRACE: datashard_write_operation.cpp:64: Parsing write transaction for 0 at 72075186224037888, record: TxMode: MODE_IMMEDIATE Locks { Locks { LockId: 281474976715662 DataShard: 72075186224037888 Generation: 1 Counter: 0 SchemeShard: 72057594046644480 PathId: 2 } Op: Rollback } 2025-06-25T15:25:36.186300Z node 6 :TX_DATASHARD TRACE: key_validator.cpp:54: -- AddWriteRange: (Uint64 : 281474976715662, Uint64 : 72075186224037888, Uint64 : 72057594046644480, Uint64 : 2) table: [1:997:0] 2025-06-25T15:25:36.186403Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:6] at 72075186224037888 on unit CheckWrite 2025-06-25T15:25:36.186470Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:6] at 72075186224037888 is Executed 2025-06-25T15:25:36.186526Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:6] at 72075186224037888 executing on unit CheckWrite 2025-06-25T15:25:36.186574Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:6] at 72075186224037888 to execution unit BuildAndWaitDependencies 2025-06-25T15:25:36.186622Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:6] at 72075186224037888 on unit BuildAndWaitDependencies 2025-06-25T15:25:36.186668Z node 6 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 72075186224037888 CompleteEdge# v2002/281474976715664 IncompleteEdge# v{min} UnprotectedReadEdge# v2000/18446744073709551615 ImmediateWriteEdge# v2001/0 ImmediateWriteEdgeReplied# v2001/0 2025-06-25T15:25:36.186738Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:6] at 72075186224037888 2025-06-25T15:25:36.186788Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:6] at 72075186224037888 is Executed 2025-06-25T15:25:36.186815Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:6] at 72075186224037888 executing on unit BuildAndWaitDependencies 2025-06-25T15:25:36.186839Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:6] at 72075186224037888 to execution unit ExecuteWrite 2025-06-25T15:25:36.186862Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:6] at 72075186224037888 on unit ExecuteWrite 2025-06-25T15:25:36.186898Z node 6 :TX_DATASHARD DEBUG: execute_write_unit.cpp:251: Executing write operation for [0:6] at 72075186224037888 2025-06-25T15:25:36.187015Z node 6 :TX_DATASHARD TRACE: datashard_kqp.cpp:787: KqpEraseLock LockId: 281474976715662 DataShard: 72075186224037888 Generation: 1 Counter: 0 SchemeShard: 72057594046644480 PathId: 2 2025-06-25T15:25:36.187069Z node 6 :TX_DATASHARD DEBUG: execute_write_unit.cpp:420: Skip empty write operation for [0:6] at 72075186224037888 2025-06-25T15:25:36.187124Z node 6 :TX_DATASHARD TRACE: execute_write_unit.cpp:47: add locks to result: 0 2025-06-25T15:25:36.187234Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:6] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-25T15:25:36.187266Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:6] at 72075186224037888 executing on unit ExecuteWrite 2025-06-25T15:25:36.187313Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:6] at 72075186224037888 to execution unit FinishProposeWrite 2025-06-25T15:25:36.187357Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:6] at 72075186224037888 on unit FinishProposeWrite 2025-06-25T15:25:36.187395Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:6] at 72075186224037888 is DelayComplete 2025-06-25T15:25:36.187426Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:6] at 72075186224037888 executing on unit FinishProposeWrite 2025-06-25T15:25:36.187475Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:6] at 72075186224037888 to execution unit CompletedOperations 2025-06-25T15:25:36.187513Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:6] at 72075186224037888 on unit CompletedOperations 2025-06-25T15:25:36.187565Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:6] at 72075186224037888 is Executed 2025-06-25T15:25:36.187589Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:6] at 72075186224037888 executing on unit CompletedOperations 2025-06-25T15:25:36.187619Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:6] at 72075186224037888 has finished 2025-06-25T15:25:36.187699Z node 6 :TX_DATASHARD TRACE: datashard__write.cpp:150: TTxWrite complete: at tablet# 72075186224037888 2025-06-25T15:25:36.187751Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:6] at 72075186224037888 on unit FinishProposeWrite 2025-06-25T15:25:36.187817Z node 6 :TX_DATASHARD TRACE: finish_propose_write_unit.cpp:163: Propose transaction complete txid 6 at tablet 72075186224037888 send to client, propose latency: 0 ms, status: STATUS_COMPLETED 2025-06-25T15:25:36.187905Z node 6 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T15:25:36.188124Z node 6 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-06-25T15:25:36.188159Z node 6 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [2002:281474976715664] at 72075186224037889 on unit CompleteWrite 2025-06-25T15:25:36.188207Z node 6 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-25T15:25:36.188264Z node 6 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 72075186224037889 2025-06-25T15:25:36.188630Z node 6 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1992: ActorId: [6:948:2702] TxId: 281474976715664. Ctx: { TraceId: 01jykv3jct35zybtf8p6pew4bj, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=6&id=NzYzNGJhYjctNGNkY2JhZWItYzY2ODlmYS0zNzVkMmI2Mw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Transaction locks invalidated. Table: `/Root/table-1`., code: 2001 subissue: {
: Error: Operation is aborting because locks are not valid, code: 2001 } } 2025-06-25T15:25:36.189049Z node 6 :KQP_SESSION WARN: kqp_session_actor.cpp:2633: SessionId: ydb://session/3?node_id=6&id=NzYzNGJhYjctNGNkY2JhZWItYzY2ODlmYS0zNzVkMmI2Mw==, ActorId: [6:889:2702], ActorState: ExecuteState, TraceId: 01jykv3jct35zybtf8p6pew4bj, Create QueryResponse for error on request, msg: 2025-06-25T15:25:36.189929Z node 6 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 275709965, Sender [6:63:2110], Recipient [6:627:2531]: NKikimrLongTxService.TEvLockStatus LockId: 281474976715662 LockNode: 6 Status: STATUS_NOT_FOUND |98.2%| [TM] {RESULT} ydb/core/tx/datashard/ut_rs/unittest >> test_create_tablets.py::TestHive::test_when_create_tablets_then_can_lookup_them [GOOD] |98.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test |98.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test >> KafkaProtocol::MetadataScenario [GOOD] >> KafkaProtocol::MetadataInServerlessScenario |98.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test >> TopicSessionTests::RestartSessionIfQueryStopped [GOOD] >> RowDispatcherTests::OneClientOneSession >> test_drain.py::TestHive::test_drain_tablets >> RowDispatcherTests::OneClientOneSession [GOOD] |98.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test >> RowDispatcherTests::TwoClientOneSession >> RowDispatcherTests::TwoClientOneSession [GOOD] >> RowDispatcherTests::SessionError >> RowDispatcherTests::SessionError [GOOD] >> RowDispatcherTests::CoordinatorSubscribe >> RowDispatcherTests::CoordinatorSubscribe [GOOD] |98.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test >> RowDispatcherTests::CoordinatorSubscribeBeforeCoordinatorChanged [GOOD] >> RowDispatcherTests::TwoClients4Sessions [GOOD] |98.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test >> RowDispatcherTests::ReinitConsumerIfNewGeneration >> CoordinatorVolatile::CoordinatorRestartWithEnqueuedVolatileStep [GOOD] >> RowDispatcherTests::ReinitConsumerIfNewGeneration [GOOD] |98.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test >> RowDispatcherTests::HandleTEvUndelivered >> RowDispatcherTests::HandleTEvUndelivered [GOOD] >> TTxDataShardPrefixKMeansScan::BuildToBuild [GOOD] >> RowDispatcherTests::TwoClientTwoConnection >> TTxDataShardRecomputeKMeansScan::BadRequest >> RowDispatcherTests::TwoClientTwoConnection [GOOD] >> RowDispatcherTests::ProcessNoSession >> RowDispatcherTests::ProcessNoSession [GOOD] |98.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test >> RowDispatcherTests::IgnoreWrongPartitionId |98.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test >> RowDispatcherTests::IgnoreWrongPartitionId [GOOD] >> RowDispatcherTests::SessionFatalError >> RowDispatcherTests::SessionFatalError [GOOD] |98.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test >> TDqPqReadActorTest::TestSaveLoadPqRead [GOOD] >> TDqPqReadActorTest::LoadCorruptedState ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/coordinator/ut/unittest >> CoordinatorVolatile::CoordinatorRestartWithEnqueuedVolatileStep [GOOD] Test command err: 2025-06-25T15:23:44.393440Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:628:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T15:23:44.393900Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T15:23:44.394124Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:625:2319], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T15:23:44.394238Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T15:23:44.394537Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T15:23:44.394572Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0006d4/r3tmp/tmpr3Az4Q/pdisk_1.dat 2025-06-25T15:23:45.122292Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded ... waiting for the first mediator step 2025-06-25T15:23:45.500462Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:23:45.500624Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:23:45.508353Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:23:45.508455Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:23:45.533796Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T15:23:45.534413Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:23:45.534864Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected ... found first step to be 500 2025-06-25T15:23:45.843004Z node 1 :TX_COORDINATOR DEBUG: coordinator__acquire_read_step.cpp:97: tablet# 72057594046316545 HANDLE TEvAcquireReadStep ... acquired read step 500 ... waiting for the next mediator step ... found second step to be 1000 ... read step subscribe result: [500, 1000] 2025-06-25T15:23:46.292212Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:23:46.293586Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; ... read step subscribe update: 2000 2025-06-25T15:23:46.993517Z node 1 :TX_COORDINATOR DEBUG: coordinator__acquire_read_step.cpp:97: tablet# 72057594046316545 HANDLE TEvAcquireReadStep ... acquired read step 2000 ... read step subscribe result: [2000, 2000] ... read step subscribe update: 2500 ... read step subscribe update: 2500 ... read step subscribe update: 3000 ... read step subscribe update: 4000 ... read step subscribe update: 5000 ... read step subscribe update: 6000 ... read step subscribe result: [2000, 6000] 2025-06-25T15:23:49.933494Z node 2 :TX_PROXY WARN: proxy_impl.cpp:227: actor# [2:245:2131] HANDLE TEvClientDestroyed from tablet# 72057594046447617 2025-06-25T15:23:49.933961Z node 1 :PIPE_SERVER ERROR: tablet_pipe_server.cpp:228: [72057594037932033] NodeDisconnected NodeId# 2 2025-06-25T15:23:49.934020Z node 1 :PIPE_SERVER ERROR: tablet_pipe_server.cpp:228: [72057594037936131] NodeDisconnected NodeId# 2 2025-06-25T15:23:49.934067Z node 1 :PIPE_SERVER ERROR: tablet_pipe_server.cpp:228: [72057594046447617] NodeDisconnected NodeId# 2 2025-06-25T15:23:49.934118Z node 1 :PIPE_SERVER ERROR: tablet_pipe_server.cpp:228: [72057594037968897] NodeDisconnected NodeId# 2 2025-06-25T15:23:49.934172Z node 1 :PIPE_SERVER ERROR: tablet_pipe_server.cpp:228: [72057594037936129] NodeDisconnected NodeId# 2 2025-06-25T15:23:49.934389Z node 2 :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [2:113:2092] ServerId# [1:997:2594] TabletId# 72057594037932033 PipeClientId# [2:113:2092] 2025-06-25T15:23:49.934940Z node 1 :HIVE WARN: hive_impl.cpp:791: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeDisconnected, NodeId 2 2025-06-25T15:23:49.935030Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connected -> Disconnecting 2025-06-25T15:23:49.935897Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnecting -> Disconnected 2025-06-25T15:23:49.950963Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:23:49.987073Z node 1 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-25T15:23:49.987739Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected ... read step subscribe update: 7000 ... read step subscribe update: 8000 ... read step subscribe update: 9000 ... read step subscribe update: 10000 ... read step subscribe update: 11000 2025-06-25T15:24:01.775696Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [3:628:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T15:24:01.776020Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T15:24:01.776186Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T15:24:01.778097Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [4:625:2319], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T15:24:01.778312Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T15:24:01.778578Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0006d4/r3tmp/tmp4upoSy/pdisk_1.dat 2025-06-25T15:24:02.139304Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:24:02.270975Z node 3 :TX_COORDINATOR DEBUG: coordinator__last_step_subscriptions.cpp:52: Processing TEvSubscribeLastStep from [4:1061:2338] at coordinator 72057594046316545 with seqNo 123 and cookie 234 2025-06-25T15:24:02.371927Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:24:02.372049Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:24:02.377586Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:24:02.377691Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:24:02.405340Z node 3 :HIVE WARN: hive_impl.cpp:781: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 4 Cookie 4 2025-06-25T15:24:02.406332Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:24:02.406733Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:24:03.125794Z node 3 :TX_COORDINATOR DEBUG: coordinator__last_step_subscriptions.cpp:52: Processing TEvSubscribeLastStep from [4:1062:2339] at coordinator 72057594046316545 with seqNo 234 and cookie 345 2025-06-25T15:24:03.212193Z node 4 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:24:03.212293Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:24:03.914341Z node 3 :TX_COORDINATOR DEBUG: coordinator__last_step_subscriptions.cpp:52: Processing TEvSubscribeLastStep from [4:1061:2338] at coordinator 72057594046316545 with seqNo 124 and cookie 245 2025-06-25T15:24:03.926191Z node 3 :TX_COORDINATOR DEBUG: coordinator__last_step_subscriptions.cpp:37: Ignored TEvSubscribeLastStep from [4:1061:2338] at coordinator 72057594046316545 with seqNo 123 existing seqNo 124 2025-06-25T15:24:04.630913Z node 3 :TX_COORDINATOR DEBUG: coordinator__last_step_subscriptions.cpp:97: Processing TEvUnsubscribeLastStep from [4:1061:2338] at coordinator 72057594046316545 with seqNo 124 2025-06-25T15:24:11.675994Z node 5 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [5:260:2306], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T15:24:11.676157Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-25T15:24:11.676383Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0006d4/r3tmp/tmpUBrMOc/pdisk_1.dat 2025-06-25T15:24:11.940259Z node 5 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 5 Type# 268639257 2025-06-25T15:24:11.963832Z node 5 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:24:11.967483Z node 5 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [5:33:2080] 1750865048758596 != 1750865048758600 2025-06-25T15:24:12.014129Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:24:12.014259Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:24:12.026990Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:24:12.22 ... rker# C1 ... waiting for blocked put responses ... coordinator 72057594046316545 gen 2 is planning step 1050 2025-06-25T15:25:41.869026Z node 20 :TX_COORDINATOR DEBUG: coordinator__plan_step.cpp:184: Transaction 10000000 has been planned 2025-06-25T15:25:41.869142Z node 20 :TX_COORDINATOR DEBUG: coordinator__plan_step.cpp:197: Planned transaction 10000000 for mediator 72057594046382081 tablet 72057594047365120 2025-06-25T15:25:41.869666Z node 20 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; ... blocking put [72057594046316545:2:7:1:24576:168:0] response ... waiting for planning for the required step ... waiting for planning for the required step ... coordinator 72057594046316545 gen 2 is planning step 1100 ... starting a new coordinator instance ... waiting for migrated state 2025-06-25T15:25:41.929346Z node 20 :TX_COORDINATOR INFO: coordinator_impl.cpp:614: OnTabletStop: 72057594046316545 reason = ReasonDemoted 2025-06-25T15:25:41.929589Z node 20 :TX_COORDINATOR INFO: coordinator_impl.cpp:614: OnTabletStop: 72057594046316545 reason = ReasonDemoted 2025-06-25T15:25:41.949460Z node 20 :TX_COORDINATOR INFO: coordinator__init.cpp:120: tablet# 72057594046316545 CreateTxInit Complete 2025-06-25T15:25:41.949713Z node 20 :TX_COORDINATOR INFO: coordinator_impl.cpp:614: OnTabletStop: 72057594046316545 reason = ReasonDemoted ... blocking state response from [20:512:2363] to [20:648:2521] LastSentStep: 1000 LastAcquiredStep: 0 LastConfirmedStep: 0 ... unblocking put responses and requests 2025-06-25T15:25:41.950592Z node 20 :TX_COORDINATOR DEBUG: coordinator_impl.cpp:424: tablet# 72057594046316545 txid# 10000000 stepId# 1050 Status# 17 SEND EvProposeTransactionStatus to# [20:556:2481] Proxy 2025-06-25T15:25:41.952025Z node 20 :TX_MEDIATOR DEBUG: mediator_impl.cpp:322: tablet# 72057594046382081 server# [20:519:2452] disconnnected 2025-06-25T15:25:41.952129Z node 20 :TX_MEDIATOR_EXEC_QUEUE DEBUG: execute_queue.cpp:201: Actor# [20:533:2460] MediatorId# 72057594046382081 HANDLE TEvServerDisconnected server# [20:519:2452] ... trying to plan tx 10000011 ... waiting for planned another persistent tx 2025-06-25T15:25:41.978018Z node 20 :TX_MEDIATOR DEBUG: mediator_impl.cpp:316: tablet# 72057594046382081 server# [20:656:2532] connected 2025-06-25T15:25:41.978239Z node 20 :TX_MEDIATOR DEBUG: mediator_impl.cpp:139: tablet# 72057594046382081 HANDLE EvCoordinatorSync 2025-06-25T15:25:41.978305Z node 20 :TX_MEDIATOR DEBUG: mediator_impl.cpp:83: tablet# 72057594046382081 SEND EvCoordinatorSyncResult to# [20:651:2529] Cookie# 1 CompleteStep# 1000 LatestKnownStep# 1000 SubjectiveTime# 952 Coordinator# 72057594046316545 2025-06-25T15:25:41.978465Z node 20 :TX_COORDINATOR DEBUG: coordinator_impl.cpp:183: tablet# 72057594046316545 txid# 10000011 HANDLE EvProposeTransaction marker# C0 2025-06-25T15:25:41.978529Z node 20 :TX_COORDINATOR DEBUG: coordinator_impl.cpp:29: tablet# 72057594046316545 txid# 10000011 step# 1100 Status# 16 SEND to# [20:556:2481] Proxy marker# C1 2025-06-25T15:25:41.978645Z node 20 :TX_COORDINATOR NOTICE: coordinator_impl.cpp:412: tablet# 72057594046316545 HANDLE EvMediatorQueueRestart MediatorId# 72057594046382081 2025-06-25T15:25:41.978700Z node 20 :TX_COORDINATOR DEBUG: coordinator_impl.cpp:579: Send from# 72057594046316545 to mediator# 72057594046382081, step# 1050, txid# 10000000 marker# C2 ... observed step: Transactions { AffectedSet: 72057594047365120 TxId: 10000000 } Step: 1050 PrevStep: 0 MediatorID: 72057594046382081 CoordinatorID: 72057594046316545 ActiveCoordinatorGeneration: 3 2025-06-25T15:25:41.996286Z node 20 :TX_MEDIATOR DEBUG: mediator_impl.cpp:280: tablet# 72057594046382081 HANDLE EvCoordinatorStep coordinator# 72057594046316545 step# 1050 2025-06-25T15:25:41.996446Z node 20 :TX_MEDIATOR INFO: mediator_impl.cpp:287: Coordinator step: Mediator [72057594046382081], Coordinator [72057594046316545], step# [1050] transactions [1] 2025-06-25T15:25:41.996650Z node 20 :TX_MEDIATOR DEBUG: mediator_impl.cpp:205: tablet# 72057594046382081 SEND EvCommitStep to# [20:533:2460] ExecQueue {TMediateStep From 1000 To# 1050Steps: {{TCoordinatorStep step# 1050 PrevStep# 0Transactions: {{TTx Moderator# 0 txid# 10000000 AckTo# [20:651:2529]}}TabletsToTransaction: {{tablet# 72057594047365120 txid# 10000000}}}}} marker# M0 2025-06-25T15:25:41.996902Z node 20 :TX_MEDIATOR_EXEC_QUEUE DEBUG: execute_queue.cpp:72: Actor# [20:533:2460] MediatorId# 72057594046382081 HANDLE TEvCommitStep {TMediateStep From 1000 To# 1050Steps: {{TCoordinatorStep step# 1050 PrevStep# 0Transactions: {{TTx Moderator# 0 txid# 10000000 AckTo# [20:651:2529]}}TabletsToTransaction: {{tablet# 72057594047365120 txid# 10000000}}}}} marker# M1 2025-06-25T15:25:41.997001Z node 20 :TX_MEDIATOR_EXEC_QUEUE DEBUG: execute_queue.cpp:54: Actor# [20:533:2460] MediatorId# 72057594046382081 SEND Ev to# [20:534:2461] step# 1050 forTablet# 72057594047365120 txid# 10000000 marker# M3 2025-06-25T15:25:41.997103Z node 20 :TX_MEDIATOR_EXEC_QUEUE DEBUG: execute_queue.cpp:119: Actor# [20:533:2460] MediatorId# 72057594046382081 SEND TEvStepPlanComplete to# [20:534:2461] bucket.ActiveActor step# 1050 2025-06-25T15:25:41.997161Z node 20 :TX_MEDIATOR_EXEC_QUEUE DEBUG: execute_queue.cpp:119: Actor# [20:533:2460] MediatorId# 72057594046382081 SEND TEvStepPlanComplete to# [20:535:2462] bucket.ActiveActor step# 1050 2025-06-25T15:25:41.997300Z node 20 :TX_MEDIATOR_TABLETQUEUE DEBUG: tablet_queue.cpp:183: Actor# [20:534:2461] Mediator# 72057594046382081 HANDLE {TEvCommitTabletStep step# 1050 TabletId# 72057594047365120 Transactions {{TTx Moderator# 0 txid# 10000000 AckTo# [20:651:2529]}}} marker# M4 2025-06-25T15:25:41.997499Z node 20 :TX_MEDIATOR_TABLETQUEUE DEBUG: tablet_queue.cpp:319: Actor# [20:535:2462] Mediator# 72057594046382081 HANDLE {TEvStepPlanComplete step# 1050} 2025-06-25T15:25:41.997715Z node 20 :TX_MEDIATOR_TABLETQUEUE DEBUG: tablet_queue.cpp:319: Actor# [20:534:2461] Mediator# 72057594046382081 HANDLE {TEvStepPlanComplete step# 1050} 2025-06-25T15:25:41.998269Z node 20 :TX_MEDIATOR_TABLETQUEUE DEBUG: tablet_queue.cpp:248: Actor# [20:534:2461] Mediator# 72057594046382081 HANDLE NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594047365120 Status: OK ServerId: [20:659:2534] Leader: 1 Dead: 0 Generation: 2 VersionInfo: } 2025-06-25T15:25:41.998372Z node 20 :TX_MEDIATOR_TABLETQUEUE DEBUG: tablet_queue.cpp:120: Actor# [20:534:2461] Mediator# 72057594046382081 SEND to# 72057594047365120 {TEvPlanStep step# 1050 MediatorId# 72057594046382081 TabletID 72057594047365120} ... observed tablet step: Transactions { TxId: 10000000 AckTo { RawX1: 0 RawX2: 0 } } Step: 1050 MediatorID: 72057594046382081 TabletID: 72057594047365120 ... blocked accept from 72057594047365120 ... waiting for planned another persistent tx ... coordinator 72057594046316545 gen 3 is planning step 1100 2025-06-25T15:25:42.013882Z node 20 :TX_COORDINATOR DEBUG: coordinator__plan_step.cpp:184: Transaction 10000011 has been planned 2025-06-25T15:25:42.014217Z node 20 :TX_COORDINATOR DEBUG: coordinator__plan_step.cpp:197: Planned transaction 10000011 for mediator 72057594046382081 tablet 72057594047365120 2025-06-25T15:25:42.015214Z node 20 :TX_COORDINATOR DEBUG: coordinator_impl.cpp:579: Send from# 72057594046316545 to mediator# 72057594046382081, step# 1100, txid# 10000011 marker# C2 2025-06-25T15:25:42.015323Z node 20 :TX_COORDINATOR DEBUG: coordinator_impl.cpp:424: tablet# 72057594046316545 txid# 10000011 stepId# 1100 Status# 17 SEND EvProposeTransactionStatus to# [20:556:2481] Proxy ... observed step: Transactions { AffectedSet: 72057594047365120 TxId: 10000011 } Step: 1100 PrevStep: 1050 MediatorID: 72057594046382081 CoordinatorID: 72057594046316545 ActiveCoordinatorGeneration: 3 2025-06-25T15:25:42.015720Z node 20 :TX_MEDIATOR DEBUG: mediator_impl.cpp:280: tablet# 72057594046382081 HANDLE EvCoordinatorStep coordinator# 72057594046316545 step# 1100 2025-06-25T15:25:42.015797Z node 20 :TX_MEDIATOR INFO: mediator_impl.cpp:287: Coordinator step: Mediator [72057594046382081], Coordinator [72057594046316545], step# [1100] transactions [1] 2025-06-25T15:25:42.015956Z node 20 :TX_MEDIATOR DEBUG: mediator_impl.cpp:205: tablet# 72057594046382081 SEND EvCommitStep to# [20:533:2460] ExecQueue {TMediateStep From 1050 To# 1100Steps: {{TCoordinatorStep step# 1100 PrevStep# 1050Transactions: {{TTx Moderator# 0 txid# 10000011 AckTo# [20:651:2529]}}TabletsToTransaction: {{tablet# 72057594047365120 txid# 10000011}}}}} marker# M0 2025-06-25T15:25:42.016143Z node 20 :TX_MEDIATOR_EXEC_QUEUE DEBUG: execute_queue.cpp:72: Actor# [20:533:2460] MediatorId# 72057594046382081 HANDLE TEvCommitStep {TMediateStep From 1050 To# 1100Steps: {{TCoordinatorStep step# 1100 PrevStep# 1050Transactions: {{TTx Moderator# 0 txid# 10000011 AckTo# [20:651:2529]}}TabletsToTransaction: {{tablet# 72057594047365120 txid# 10000011}}}}} marker# M1 2025-06-25T15:25:42.016239Z node 20 :TX_MEDIATOR_EXEC_QUEUE DEBUG: execute_queue.cpp:54: Actor# [20:533:2460] MediatorId# 72057594046382081 SEND Ev to# [20:534:2461] step# 1100 forTablet# 72057594047365120 txid# 10000011 marker# M3 2025-06-25T15:25:42.016369Z node 20 :TX_MEDIATOR_EXEC_QUEUE DEBUG: execute_queue.cpp:119: Actor# [20:533:2460] MediatorId# 72057594046382081 SEND TEvStepPlanComplete to# [20:534:2461] bucket.ActiveActor step# 1100 2025-06-25T15:25:42.016443Z node 20 :TX_MEDIATOR_EXEC_QUEUE DEBUG: execute_queue.cpp:119: Actor# [20:533:2460] MediatorId# 72057594046382081 SEND TEvStepPlanComplete to# [20:535:2462] bucket.ActiveActor step# 1100 2025-06-25T15:25:42.016577Z node 20 :TX_MEDIATOR_TABLETQUEUE DEBUG: tablet_queue.cpp:183: Actor# [20:534:2461] Mediator# 72057594046382081 HANDLE {TEvCommitTabletStep step# 1100 TabletId# 72057594047365120 Transactions {{TTx Moderator# 0 txid# 10000011 AckTo# [20:651:2529]}}} marker# M4 2025-06-25T15:25:42.016674Z node 20 :TX_MEDIATOR_TABLETQUEUE DEBUG: tablet_queue.cpp:120: Actor# [20:534:2461] Mediator# 72057594046382081 SEND to# 72057594047365120 {TEvPlanStep step# 1100 MediatorId# 72057594046382081 TabletID 72057594047365120} 2025-06-25T15:25:42.016775Z node 20 :TX_MEDIATOR_TABLETQUEUE DEBUG: tablet_queue.cpp:319: Actor# [20:535:2462] Mediator# 72057594046382081 HANDLE {TEvStepPlanComplete step# 1100} 2025-06-25T15:25:42.016922Z node 20 :TX_MEDIATOR_TABLETQUEUE DEBUG: tablet_queue.cpp:319: Actor# [20:534:2461] Mediator# 72057594046382081 HANDLE {TEvStepPlanComplete step# 1100} ... observed tablet step: Transactions { TxId: 10000011 AckTo { RawX1: 0 RawX2: 0 } } Step: 1100 MediatorID: 72057594046382081 TabletID: 72057594047365120 ... blocked accept from 72057594047365120 ... coordinator 72057594046316545 gen 3 is planning step 1150 ... observed step: Step: 1150 PrevStep: 1100 MediatorID: 72057594046382081 CoordinatorID: 72057594046316545 ActiveCoordinatorGeneration: 3 2025-06-25T15:25:42.029350Z node 20 :TX_MEDIATOR_EXEC_QUEUE DEBUG: execute_queue.cpp:72: Actor# [20:533:2460] MediatorId# 72057594046382081 HANDLE TEvCommitStep {TMediateStep From 1100 To# 1150Steps: {{TCoordinatorStep step# 1150 PrevStep# 1100}}} marker# M1 2025-06-25T15:25:42.029419Z node 20 :TX_MEDIATOR_EXEC_QUEUE DEBUG: execute_queue.cpp:119: Actor# [20:533:2460] MediatorId# 72057594046382081 SEND TEvStepPlanComplete to# [20:534:2461] bucket.ActiveActor step# 1150 2025-06-25T15:25:42.029463Z node 20 :TX_MEDIATOR_EXEC_QUEUE DEBUG: execute_queue.cpp:119: Actor# [20:533:2460] MediatorId# 72057594046382081 SEND TEvStepPlanComplete to# [20:535:2462] bucket.ActiveActor step# 1150 2025-06-25T15:25:42.029516Z node 20 :TX_MEDIATOR_TABLETQUEUE DEBUG: tablet_queue.cpp:319: Actor# [20:534:2461] Mediator# 72057594046382081 HANDLE {TEvStepPlanComplete step# 1150} 2025-06-25T15:25:42.029556Z node 20 :TX_MEDIATOR_TABLETQUEUE DEBUG: tablet_queue.cpp:319: Actor# [20:535:2462] Mediator# 72057594046382081 HANDLE {TEvStepPlanComplete step# 1150} |98.2%| [TM] {RESULT} ydb/core/tx/coordinator/ut/unittest >> TSelectFromViewTest::QueryCacheIsUpdated [GOOD] >> BulkUpsert::BulkUpsert [GOOD] |98.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test |98.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test |98.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test >> TDqPqReadActorTest::LoadCorruptedState [GOOD] >> TDqPqReadActorTest::TestLoadFromSeveralStates |98.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test >> DataShardStats::HasSchemaChanges_Columns [GOOD] >> DataShardStats::HasSchemaChanges_Families |98.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test >> test_create_tablets.py::TestHive::test_when_create_tablets_after_bs_groups_and_kill_hive_then_tablets_start [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kqp/ut/view/unittest >> TSelectFromViewTest::QueryCacheIsUpdated [GOOD] Test command err: Trying to start YDB, gRPC: 3230, MsgBus: 14531 2025-06-25T15:21:56.861778Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519907555255205427:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:21:56.861905Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00091a/r3tmp/tmplVJ0My/pdisk_1.dat 2025-06-25T15:21:57.353038Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:21:57.353153Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:21:57.402008Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:21:57.410109Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 3230, node 1 2025-06-25T15:21:57.740990Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:21:57.741022Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:21:57.741032Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:21:57.741177Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T15:21:57.872942Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:14531 TClient is connected to server localhost:14531 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:21:59.150923Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... 2025-06-25T15:22:00.489693Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519907572435075213:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:22:00.489838Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } Trying to start YDB, gRPC: 13403, MsgBus: 4458 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00091a/r3tmp/tmph2uQaj/pdisk_1.dat 2025-06-25T15:22:02.590706Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519907582907855538:2226];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:22:02.601277Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T15:22:02.733463Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:22:02.736740Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519907582907855348:2080] 1750864922481480 != 1750864922481483 2025-06-25T15:22:02.755108Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:22:02.755198Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:22:02.761016Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 13403, node 2 2025-06-25T15:22:02.892945Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:22:02.892971Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:22:02.892980Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:22:02.893100Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4458 TClient is connected to server localhost:4458 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-25T15:22:03.357721Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:22:03.368048Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-25T15:22:03.480810Z node 2 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:22:05.700510Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7519907595792757870:2292], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:22:05.700604Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } Trying to start YDB, gRPC: 23616, MsgBus: 27884 2025-06-25T15:22:06.562533Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7519907599381771144:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:22:06.562587Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00091a/r3tmp/tmpKCu6Sk/pdisk_1.dat 2025-06-25T15:22:06.828712Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7519907599381771115:2080] 1750864926561804 != 1750864926561807 2025-06-25T15:22:06.849191Z node 3 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:22:06.881120Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:22:06.881203Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:22:06.882882Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 23616, node 3 2025-06-25T15:22:06.996457Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:22:06.996483Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:22:06.996491Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:22:06.996605Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27884 2025-06-25T15:22:07.586514Z node 3 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:27884 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRoot ... pid=1012804, tid=0x00007FDE2231B640) [KQP] yql_optimize.cpp:135: KqpLogical-RewriteEquiJoin 2025-06-25 15:25:29.145 INFO ydb-core-kqp-ut-view(pid=1012804, tid=0x00007FDE2231B640) [KQP] yql_optimize.cpp:135: KqpLogical-JoinToIndexLookup 2025-06-25 15:25:29.186 INFO ydb-core-kqp-ut-view(pid=1012804, tid=0x00007FDE2231B640) [KQP] yql_optimize.cpp:135: KqpLogical-JoinToIndexLookup 2025-06-25 15:25:29.255 INFO ydb-core-kqp-ut-view(pid=1012804, tid=0x00007FDE2231B640) [KQP] yql_optimize.cpp:135: KqpPhysical-BuildReadTableRangesStage 2025-06-25 15:25:29.277 INFO ydb-core-kqp-ut-view(pid=1012804, tid=0x00007FDE2231B640) [KQP] yql_optimize.cpp:135: KqpPhysical-PushAggregateCombineToStage 2025-06-25 15:25:29.295 INFO ydb-core-kqp-ut-view(pid=1012804, tid=0x00007FDE2231B640) [KQP] yql_optimize.cpp:135: KqpPhysical-ExpandAggregatePhase 2025-06-25 15:25:29.315 INFO ydb-core-kqp-ut-view(pid=1012804, tid=0x00007FDE2231B640) [KQP] yql_optimize.cpp:135: KqpPhysical-ExpandAggregatePhase 2025-06-25 15:25:29.341 INFO ydb-core-kqp-ut-view(pid=1012804, tid=0x00007FDE2231B640) [KQP] yql_optimize.cpp:135: KqpPhysical-ExpandAggregatePhase 2025-06-25 15:25:29.364 INFO ydb-core-kqp-ut-view(pid=1012804, tid=0x00007FDE2231B640) [KQP] yql_optimize.cpp:135: KqpPhysical-ExpandAggregatePhase 2025-06-25 15:25:29.403 INFO ydb-core-kqp-ut-view(pid=1012804, tid=0x00007FDE2231B640) [KQP] yql_optimize.cpp:135: KqpPhysical-BuildShuffleStage 2025-06-25 15:25:29.460 INFO ydb-core-kqp-ut-view(pid=1012804, tid=0x00007FDE2231B640) [KQP] yql_optimize.cpp:135: KqpPhysical-BuildPrecompute 2025-06-25 15:25:29.513 INFO ydb-core-kqp-ut-view(pid=1012804, tid=0x00007FDE2231B640) [KQP] yql_optimize.cpp:135: KqpPhysical-BuildStreamLookupTableStages 2025-06-25 15:25:29.543 INFO ydb-core-kqp-ut-view(pid=1012804, tid=0x00007FDE2231B640) [KQP] yql_optimize.cpp:135: KqpPhysical-PrecomputeToInput 2025-06-25 15:25:29.575 INFO ydb-core-kqp-ut-view(pid=1012804, tid=0x00007FDE2231B640) [KQP] yql_optimize.cpp:135: KqpPhysical-PushSkipNullMembersToStage 2025-06-25 15:25:29.608 INFO ydb-core-kqp-ut-view(pid=1012804, tid=0x00007FDE2231B640) [KQP] yql_optimize.cpp:135: KqpPhysical-BuildJoin 2025-06-25 15:25:29.648 INFO ydb-core-kqp-ut-view(pid=1012804, tid=0x00007FDE2231B640) [KQP] yql_optimize.cpp:135: KqpPhysical-PrecomputeToInput 2025-06-25 15:25:29.674 INFO ydb-core-kqp-ut-view(pid=1012804, tid=0x00007FDE2231B640) [KQP] yql_optimize.cpp:135: KqpPhysical-BuildPrecompute 2025-06-25 15:25:29.699 INFO ydb-core-kqp-ut-view(pid=1012804, tid=0x00007FDE2231B640) [KQP] yql_optimize.cpp:135: KqpPhysical-BuildStreamLookupTableStages 2025-06-25 15:25:29.723 INFO ydb-core-kqp-ut-view(pid=1012804, tid=0x00007FDE2231B640) [KQP] yql_optimize.cpp:135: KqpPhysical-PrecomputeToInput 2025-06-25 15:25:29.751 INFO ydb-core-kqp-ut-view(pid=1012804, tid=0x00007FDE2231B640) [KQP] yql_optimize.cpp:135: KqpPhysical-PushSkipNullMembersToStage 2025-06-25 15:25:29.782 INFO ydb-core-kqp-ut-view(pid=1012804, tid=0x00007FDE2231B640) [KQP] yql_optimize.cpp:135: KqpPhysical-BuildJoin 2025-06-25 15:25:29.818 INFO ydb-core-kqp-ut-view(pid=1012804, tid=0x00007FDE2231B640) [KQP] yql_optimize.cpp:135: KqpPhysical-PrecomputeToInput 2025-06-25 15:25:29.847 INFO ydb-core-kqp-ut-view(pid=1012804, tid=0x00007FDE2231B640) [KQP] yql_optimize.cpp:135: KqpPhysical-PushExtractMembersToStage 2025-06-25 15:25:29.887 INFO ydb-core-kqp-ut-view(pid=1012804, tid=0x00007FDE2231B640) [KQP] yql_optimize.cpp:135: KqpPhysical-PushFlatmapToStage 2025-06-25 15:25:29.918 INFO ydb-core-kqp-ut-view(pid=1012804, tid=0x00007FDE2231B640) [KQP] yql_optimize.cpp:135: KqpPhysical-BuildSortStage 2025-06-25 15:25:29.959 INFO ydb-core-kqp-ut-view(pid=1012804, tid=0x00007FDE2231B640) [KQP] yql_optimize.cpp:135: KqpPhysical-RewriteKqpReadTable 2025-06-25 15:25:30.189 INFO ydb-core-kqp-ut-view(pid=1012804, tid=0x00007FDE2231B640) [KQP] yql_optimize.cpp:135: KqpPeephole-RewriteMapJoinWithMapCore 2025-06-25 15:25:30.389 INFO ydb-core-kqp-ut-view(pid=1012804, tid=0x00007FDE2231B640) [KQP] yql_optimize.cpp:135: KqpPeephole-RewriteMapJoinWithMapCore 2025-06-25 15:25:30.599 INFO ydb-core-kqp-ut-view(pid=1012804, tid=0x00007FDE2231B640) [KQP] yql_optimize.cpp:135: KqpPeepholeFinal-SetCombinerMemoryLimit 2025-06-25 15:25:31.112 INFO ydb-core-kqp-ut-view(pid=1012804, tid=0x00007FDE22B1C640) [KQP] kqp_host.cpp:1386: Compiled query: ( (return (Write! world (DataSink '"kikimr" '"db") (Key '('objectId (String '"/Root/count_episodes_with_titles")) '('typeId (String '"VIEW"))) (Void) '('('mode 'dropObject)))) ) 2025-06-25 15:25:31.146 INFO ydb-core-kqp-ut-view(pid=1012804, tid=0x00007FDE22B1C640) [KQP] kqp_transform.cpp:33: Optimized expr: ( (let $1 (DataSink '"kikimr" '"db")) (let $2 (KiDropObject! world $1 '"/Root/count_episodes_with_titles" '"VIEW" '() '0)) (return (Commit! $2 $1 '('('"mode" '"flush")))) ) 2025-06-25T15:25:31.176482Z node 22 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710740, at schemeshard: 72057594046644480 Trying to start YDB, gRPC: 20032, MsgBus: 32237 2025-06-25T15:25:33.376580Z node 23 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[23:7519908490215969049:2131];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/00091a/r3tmp/tmpAlRfsK/pdisk_1.dat 2025-06-25T15:25:33.810626Z node 23 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-25T15:25:33.976547Z node 23 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:25:33.980771Z node 23 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [23:7519908490215968958:2080] 1750865133318222 != 1750865133318225 2025-06-25T15:25:33.988234Z node 23 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(23, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:25:33.995396Z node 23 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(23, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:25:34.006214Z node 23 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(23, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 20032, node 23 2025-06-25T15:25:34.240731Z node 23 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-25T15:25:34.240766Z node 23 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-25T15:25:34.240782Z node 23 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-25T15:25:34.241024Z node 23 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T15:25:34.378434Z node 23 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; TClient is connected to server localhost:32237 TClient is connected to server localhost:32237 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-25T15:25:35.791727Z node 23 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:25:38.336484Z node 23 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[23:7519908490215969049:2131];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:25:38.336619Z node 23 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-25T15:25:42.267399Z node 23 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [23:7519908528870675284:2300], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:25:42.267606Z node 23 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:25:42.428601Z node 23 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [23:7519908528870675315:2303], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:25:42.428877Z node 23 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:25:42.429837Z node 23 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [23:7519908528870675320:2306], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:25:42.438159Z node 23 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:25:42.460233Z node 23 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [23:7519908528870675322:2307], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-25T15:25:42.535065Z node 23 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [23:7519908528870675373:2369] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } |98.3%| [TM] {RESULT} ydb/core/kqp/ut/view/unittest >> test_select.py::TestDML::test_select[table_index_2_UNIQUE_SYNC-pk_types2-all_types2-index2--UNIQUE-SYNC] [GOOD] >> test_kill_tablets.py::TestKillTablets::test_when_kill_hive_it_will_be_restarted_and_can_create_tablets |98.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test |98.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test ------- [TM] {asan, default-linux-x86_64, release} ydb/core/fq/libs/row_dispatcher/ut/unittest >> RowDispatcherTests::SessionFatalError [GOOD] Test command err: 2025-06-25T15:23:28.151479Z node 1 :FQ_ROW_DISPATCHER DEBUG: coordinator.cpp:247: Coordinator: Successfully bootstrapped coordinator, id [1:30:2057] 2025-06-25T15:23:28.152035Z node 1 :FQ_ROW_DISPATCHER TRACE: coordinator.cpp:294: Coordinator: TEvPing received, [1:25:2054] 2025-06-25T15:23:28.152152Z node 1 :FQ_ROW_DISPATCHER TRACE: coordinator.cpp:297: Coordinator: Send TEvPong to [1:25:2054] 2025-06-25T15:23:28.152179Z node 1 :FQ_ROW_DISPATCHER TRACE: coordinator.cpp:294: Coordinator: TEvPing received, [2:26:2054] 2025-06-25T15:23:28.152198Z node 1 :FQ_ROW_DISPATCHER TRACE: coordinator.cpp:297: Coordinator: Send TEvPong to [2:26:2054] 2025-06-25T15:23:28.152236Z node 1 :FQ_ROW_DISPATCHER TRACE: coordinator.cpp:294: Coordinator: TEvPing received, [3:27:2054] 2025-06-25T15:23:28.152254Z node 1 :FQ_ROW_DISPATCHER TRACE: coordinator.cpp:297: Coordinator: Send TEvPong to [3:27:2054] 2025-06-25T15:23:28.152332Z node 1 :FQ_ROW_DISPATCHER INFO: coordinator.cpp:421: Coordinator: TEvCoordinatorRequest from [1:28:2055], topic1, partIds: 0 2025-06-25T15:23:28.152414Z node 1 :FQ_ROW_DISPATCHER DEBUG: coordinator.cpp:473: Coordinator: Send TEvCoordinatorResult to [1:28:2055] 2025-06-25T15:23:28.152503Z node 1 :FQ_ROW_DISPATCHER INFO: coordinator.cpp:421: Coordinator: TEvCoordinatorRequest from [1:29:2056], topic1, partIds: 0 2025-06-25T15:23:28.152534Z node 1 :FQ_ROW_DISPATCHER DEBUG: coordinator.cpp:473: Coordinator: Send TEvCoordinatorResult to [1:29:2056] 2025-06-25T15:23:28.159449Z node 1 :FQ_ROW_DISPATCHER INFO: coordinator.cpp:421: Coordinator: TEvCoordinatorRequest from [1:29:2056], topic1, partIds: 1 2025-06-25T15:23:28.159545Z node 1 :FQ_ROW_DISPATCHER DEBUG: coordinator.cpp:473: Coordinator: Send TEvCoordinatorResult to [1:29:2056] 2025-06-25T15:23:28.159653Z node 1 :FQ_ROW_DISPATCHER TRACE: coordinator.cpp:294: Coordinator: TEvPing received, [2:31:2055] 2025-06-25T15:23:28.159688Z node 1 :FQ_ROW_DISPATCHER TRACE: coordinator.cpp:265: Coordinator: Move all Locations from old actor [2:26:2054] to new [2:31:2055] 2025-06-25T15:23:28.159716Z node 1 :FQ_ROW_DISPATCHER TRACE: coordinator.cpp:297: Coordinator: Send TEvPong to [2:31:2055] 2025-06-25T15:23:28.159767Z node 1 :FQ_ROW_DISPATCHER TRACE: coordinator.cpp:294: Coordinator: TEvPing received, [2:32:2056] 2025-06-25T15:23:28.159801Z node 1 :FQ_ROW_DISPATCHER TRACE: coordinator.cpp:265: Coordinator: Move all Locations from old actor [2:31:2055] to new [2:32:2056] 2025-06-25T15:23:28.159825Z node 1 :FQ_ROW_DISPATCHER TRACE: coordinator.cpp:297: Coordinator: Send TEvPong to [2:32:2056] 2025-06-25T15:23:28.159876Z node 1 :FQ_ROW_DISPATCHER INFO: coordinator.cpp:421: Coordinator: TEvCoordinatorRequest from [1:28:2055], topic1, partIds: 0 2025-06-25T15:23:28.159914Z node 1 :FQ_ROW_DISPATCHER DEBUG: coordinator.cpp:473: Coordinator: Send TEvCoordinatorResult to [1:28:2055] 2025-06-25T15:23:28.159980Z node 1 :FQ_ROW_DISPATCHER INFO: coordinator.cpp:421: Coordinator: TEvCoordinatorRequest from [1:29:2056], topic1, partIds: 1 2025-06-25T15:23:28.160007Z node 1 :FQ_ROW_DISPATCHER DEBUG: coordinator.cpp:473: Coordinator: Send TEvCoordinatorResult to [1:29:2056] 2025-06-25T15:23:28.242129Z node 5 :FQ_ROW_DISPATCHER DEBUG: coordinator.cpp:247: Coordinator: Successfully bootstrapped coordinator, id [5:30:2057] 2025-06-25T15:23:28.242544Z node 5 :FQ_ROW_DISPATCHER TRACE: coordinator.cpp:294: Coordinator: TEvPing received, [5:25:2054] 2025-06-25T15:23:28.242590Z node 5 :FQ_ROW_DISPATCHER TRACE: coordinator.cpp:297: Coordinator: Send TEvPong to [5:25:2054] 2025-06-25T15:23:28.242631Z node 5 :FQ_ROW_DISPATCHER TRACE: coordinator.cpp:294: Coordinator: TEvPing received, [6:26:2054] 2025-06-25T15:23:28.242830Z node 5 :FQ_ROW_DISPATCHER TRACE: coordinator.cpp:297: Coordinator: Send TEvPong to [6:26:2054] 2025-06-25T15:23:28.242871Z node 5 :FQ_ROW_DISPATCHER TRACE: coordinator.cpp:294: Coordinator: TEvPing received, [7:27:2054] 2025-06-25T15:23:28.242908Z node 5 :FQ_ROW_DISPATCHER TRACE: coordinator.cpp:297: Coordinator: Send TEvPong to [7:27:2054] 2025-06-25T15:23:28.243000Z node 5 :FQ_ROW_DISPATCHER INFO: coordinator.cpp:421: Coordinator: TEvCoordinatorRequest from [5:28:2055], topic1, partIds: 0, 1, 2 2025-06-25T15:23:28.243315Z node 5 :FQ_ROW_DISPATCHER DEBUG: coordinator.cpp:473: Coordinator: Send TEvCoordinatorResult to [5:28:2055] 2025-06-25T15:23:28.243419Z node 5 :FQ_ROW_DISPATCHER INFO: coordinator.cpp:421: Coordinator: TEvCoordinatorRequest from [5:29:2056], topic1, partIds: 3 2025-06-25T15:23:28.243469Z node 5 :FQ_ROW_DISPATCHER DEBUG: coordinator.cpp:473: Coordinator: Send TEvCoordinatorResult to [5:29:2056] 2025-06-25T15:23:28.369361Z node 9 :FQ_ROW_DISPATCHER DEBUG: leader_election.cpp:224: TLeaderElection [9:9:2056] Successfully bootstrapped, local coordinator id [9:6:2053] 2025-06-25T15:23:28.369508Z node 9 :FQ_ROW_DISPATCHER DEBUG: leader_election.cpp:224: TLeaderElection [9:10:2057] Successfully bootstrapped, local coordinator id [9:7:2054] 2025-06-25T15:23:28.369602Z node 9 :FQ_ROW_DISPATCHER DEBUG: schema.cpp:71: Run create coordination node "local/row_dispatcher//tenant" actor 2025-06-25T15:23:28.369639Z node 9 :FQ_ROW_DISPATCHER DEBUG: schema.cpp:113: Call create coordination node "local/row_dispatcher//tenant" 2025-06-25T15:23:28.369676Z node 9 :FQ_ROW_DISPATCHER DEBUG: schema.cpp:411: Call create coordination node "local/row_dispatcher//tenant" 2025-06-25T15:23:28.376523Z node 9 :FQ_ROW_DISPATCHER DEBUG: schema.cpp:71: Run create coordination node "local/row_dispatcher//tenant" actor 2025-06-25T15:23:28.376588Z node 9 :FQ_ROW_DISPATCHER DEBUG: schema.cpp:113: Call create coordination node "local/row_dispatcher//tenant" 2025-06-25T15:23:28.376621Z node 9 :FQ_ROW_DISPATCHER DEBUG: schema.cpp:411: Call create coordination node "local/row_dispatcher//tenant" 2025-06-25T15:23:28.377025Z node 9 :FQ_ROW_DISPATCHER DEBUG: leader_election.cpp:224: TLeaderElection [9:8:2055] Successfully bootstrapped, local coordinator id [9:5:2052] 2025-06-25T15:23:28.377132Z node 9 :FQ_ROW_DISPATCHER DEBUG: schema.cpp:71: Run create coordination node "local/row_dispatcher//tenant" actor 2025-06-25T15:23:28.377163Z node 9 :FQ_ROW_DISPATCHER DEBUG: schema.cpp:113: Call create coordination node "local/row_dispatcher//tenant" 2025-06-25T15:23:28.377192Z node 9 :FQ_ROW_DISPATCHER DEBUG: schema.cpp:411: Call create coordination node "local/row_dispatcher//tenant" 2025-06-25T15:23:28.483447Z node 9 :FQ_ROW_DISPATCHER ERROR: schema.cpp:160: Create coordination node "local/row_dispatcher//tenant" error: OVERLOADED {
: Error: Check failed: path: '/local/row_dispatcher/tenant', error: path exists but creating right now (id: [OwnerId: 72057594046678944, LocalPathId: 3], type: EPathTypeKesus, state: EPathStateCreate) } 2025-06-25T15:23:28.483578Z node 9 :FQ_ROW_DISPATCHER ERROR: schema.cpp:160: Create coordination node "local/row_dispatcher//tenant" error: OVERLOADED {
: Error: Check failed: path: '/local/row_dispatcher/tenant', error: path exists but creating right now (id: [OwnerId: 72057594046678944, LocalPathId: 3], type: EPathTypeKesus, state: EPathStateCreate) } 2025-06-25T15:23:28.483679Z node 9 :FQ_ROW_DISPATCHER DEBUG: schema.cpp:113: Call create coordination node "local/row_dispatcher//tenant" 2025-06-25T15:23:28.483716Z node 9 :FQ_ROW_DISPATCHER DEBUG: schema.cpp:411: Call create coordination node "local/row_dispatcher//tenant" 2025-06-25T15:23:28.495801Z node 9 :FQ_ROW_DISPATCHER DEBUG: schema.cpp:113: Call create coordination node "local/row_dispatcher//tenant" 2025-06-25T15:23:28.495869Z node 9 :FQ_ROW_DISPATCHER DEBUG: schema.cpp:411: Call create coordination node "local/row_dispatcher//tenant" 2025-06-25T15:23:28.510422Z node 9 :FQ_ROW_DISPATCHER ERROR: schema.cpp:160: Create coordination node "local/row_dispatcher//tenant" error: OVERLOADED {
: Error: Check failed: path: '/local/row_dispatcher/tenant', error: path exists but creating right now (id: [OwnerId: 72057594046678944, LocalPathId: 3], type: EPathTypeKesus, state: EPathStateCreate) } 2025-06-25T15:23:28.510840Z node 9 :FQ_ROW_DISPATCHER DEBUG: schema.cpp:113: Call create coordination node "local/row_dispatcher//tenant" 2025-06-25T15:23:28.510880Z node 9 :FQ_ROW_DISPATCHER DEBUG: schema.cpp:411: Call create coordination node "local/row_dispatcher//tenant" 2025-06-25T15:23:28.514103Z node 9 :FQ_ROW_DISPATCHER ERROR: schema.cpp:160: Create coordination node "local/row_dispatcher//tenant" error: OVERLOADED {
: Error: Check failed: path: '/local/row_dispatcher/tenant', error: path exists but creating right now (id: [OwnerId: 72057594046678944, LocalPathId: 3], type: EPathTypeKesus, state: EPathStateCreate) } 2025-06-25T15:23:28.525142Z node 9 :FQ_ROW_DISPATCHER DEBUG: schema.cpp:113: Call create coordination node "local/row_dispatcher//tenant" 2025-06-25T15:23:28.525364Z node 9 :FQ_ROW_DISPATCHER DEBUG: schema.cpp:411: Call create coordination node "local/row_dispatcher//tenant" 2025-06-25T15:23:28.529996Z node 9 :FQ_ROW_DISPATCHER ERROR: schema.cpp:160: Create coordination node "local/row_dispatcher//tenant" error: OVERLOADED {
: Error: Check failed: path: '/local/row_dispatcher/tenant', error: path exists but creating right now (id: [OwnerId: 72057594046678944, LocalPathId: 3], type: EPathTypeKesus, state: EPathStateCreate) } 2025-06-25T15:23:28.530117Z node 9 :FQ_ROW_DISPATCHER DEBUG: schema.cpp:113: Call create coordination node "local/row_dispatcher//tenant" 2025-06-25T15:23:28.530296Z node 9 :FQ_ROW_DISPATCHER DEBUG: schema.cpp:411: Call create coordination node "local/row_dispatcher//tenant" 2025-06-25T15:23:28.552516Z node 9 :FQ_ROW_DISPATCHER DEBUG: schema.cpp:155: Successfully created coordination node "local/row_dispatcher//tenant" 2025-06-25T15:23:28.552605Z node 9 :FQ_ROW_DISPATCHER DEBUG: schema.cpp:122: Reply for create coordination node "local/row_dispatcher//tenant": 2025-06-25T15:23:28.552686Z node 9 :FQ_ROW_DISPATCHER DEBUG: leader_election.cpp:333: TLeaderElection [9:8:2055] Coordination node successfully created 2025-06-25T15:23:28.552716Z node 9 :FQ_ROW_DISPATCHER DEBUG: leader_election.cpp:312: TLeaderElection [9:8:2055] Start session 2025-06-25T15:23:28.556197Z node 9 :FQ_ROW_DISPATCHER DEBUG: schema.cpp:155: Successfully created coordination node "local/row_dispatcher//tenant" 2025-06-25T15:23:28.556450Z node 9 :FQ_ROW_DISPATCHER DEBUG: schema.cpp:122: Reply for create coordination node "local/row_dispatcher//tenant": {
: Error: Check failed: path: '/local/row_dispatcher/tenant', error: path exist, request accepts it (id: [OwnerId: 72057594046678944, LocalPathId: 3], type: EPathTypeKesus, state: EPathStateNoChanges) } 2025-06-25T15:23:28.556531Z node 9 :FQ_ROW_DISPATCHER DEBUG: leader_election.cpp:333: TLeaderElection [9:9:2056] Coordination node successfully created 2025-06-25T15:23:28.556725Z node 9 :FQ_ROW_DISPATCHER DEBUG: leader_election.cpp:312: TLeaderElection [9:9:2056] Start session 2025-06-25T15:23:28.560840Z node 9 :FQ_ROW_DISPATCHER DEBUG: schema.cpp:155: Successfully created coordination node "local/row_dispatcher//tenant" 2025-06-25T15:23:28.560931Z node 9 :FQ_ROW_DISPATCHER DEBUG: schema.cpp:122: Reply for create coordination node "local/row_dispatcher//tenant": {
: Error: Check failed: path: '/local/row_dispatcher/tenant', error: path exist, request accepts it (id: [OwnerId: 72057594046678944, LocalPathId: 3], type: EPathTypeKesus, state: EPathStateNoChanges) } 2025-06-25T15:23:28.561008Z node 9 :FQ_ROW_DISPATCHER DEBUG: leader_election.cpp:333: TLeaderElection [9:10:2057] Coordination node successfully created 2025-06-25T15:23:28.561032Z node 9 :FQ_ROW_DISPATCHER DEBUG: leader_election.cpp:312: TLeaderElection [9:10:2057] Start session 2025-06-25T15:23:28.593755Z node 9 :FQ_ROW_DISPATCHER DEBUG: leader_election.cpp:347: TLeaderElection [9:10:2057] Session successfully created 2025-06-25T15:23:28.593902Z node 9 :FQ_ROW_DISPATCHER DEBUG: leader_election.cpp:347: TLeaderElection [9:8:2055] Session successfull ... ER DEBUG: leader_election.cpp:224: TLeaderElection [42:19:2060] Successfully bootstrapped, local coordinator id [42:18:2059] 2025-06-25T15:25:43.094491Z node 42 :FQ_ROW_DISPATCHER DEBUG: schema.cpp:71: Run create coordination node "YDB_DATABASE/RowDispatcher/Tenant" actor 2025-06-25T15:25:43.094523Z node 42 :FQ_ROW_DISPATCHER DEBUG: schema.cpp:113: Call create coordination node "YDB_DATABASE/RowDispatcher/Tenant" 2025-06-25T15:25:43.094554Z node 42 :FQ_ROW_DISPATCHER DEBUG: schema.cpp:411: Call create coordination node "YDB_DATABASE/RowDispatcher/Tenant" 2025-06-25T15:25:43.094913Z node 42 :FQ_ROW_DISPATCHER DEBUG: row_dispatcher.cpp:566: RowDispatcher: TEvCoordinatorChangesSubscribe from [42:18:2059] 2025-06-25T15:25:43.095139Z node 42 :FQ_ROW_DISPATCHER DEBUG: row_dispatcher.cpp:792: RowDispatcher: Received TEvStartSession from [43:16:2053], read group connection_id1, topicPath topic part id 100 query id QueryId cookie 42 2025-06-25T15:25:43.095367Z node 42 :FQ_ROW_DISPATCHER DEBUG: row_dispatcher.cpp:829: RowDispatcher: Create new session: read group connection_id1 topic topic part id 100 2025-06-25T15:25:43.107239Z node 42 :FQ_ROW_DISPATCHER TRACE: row_dispatcher.cpp:987: RowDispatcher: TEvTryConnect to node id 43 2025-06-25T15:25:43.116526Z node 42 :FQ_ROW_DISPATCHER DEBUG: row_dispatcher.cpp:516: RowDispatcher: EvNodeConnected, node id 43 2025-06-25T15:25:43.128911Z node 42 :FQ_ROW_DISPATCHER ERROR: schema.cpp:160: Create coordination node "YDB_DATABASE/RowDispatcher/Tenant" error: TRANSPORT_UNAVAILABLE [ {
: Error: GRpc error: (14): DNS resolution failed for YDB_ENDPOINT: C-ares status is not ARES_SUCCESS qtype=A name=YDB_ENDPOINT is_balancer=0: DNS server returned general failure } {
: Error: Grpc error response on endpoint YDB_ENDPOINT } ] 2025-06-25T15:25:43.129604Z node 42 :FQ_ROW_DISPATCHER TRACE: row_dispatcher.cpp:1016: RowDispatcher: Forward TEvNewDataArrived from [42:22:2063] to [43:16:2053] query id QueryId 2025-06-25T15:25:43.129934Z node 42 :FQ_ROW_DISPATCHER TRACE: row_dispatcher.cpp:875: RowDispatcher: Received TEvGetNextBatch from [43:16:2053] part id 100 query id QueryId 2025-06-25T15:25:43.130036Z node 42 :FQ_ROW_DISPATCHER TRACE: row_dispatcher.cpp:1035: RowDispatcher: Forward TEvMessageBatch from [42:22:2063] to [43:16:2053] query id QueryId 2025-06-25T15:25:43.130308Z node 42 :FQ_ROW_DISPATCHER DEBUG: row_dispatcher.cpp:902: RowDispatcher: Received TEvNoSession from [43:16:2053], generation 41 2025-06-25T15:25:43.130366Z node 42 :FQ_ROW_DISPATCHER TRACE: row_dispatcher.cpp:1016: RowDispatcher: Forward TEvNewDataArrived from [42:22:2063] to [43:16:2053] query id QueryId 2025-06-25T15:25:43.130600Z node 42 :FQ_ROW_DISPATCHER TRACE: row_dispatcher.cpp:875: RowDispatcher: Received TEvGetNextBatch from [43:16:2053] part id 100 query id QueryId 2025-06-25T15:25:43.130695Z node 42 :FQ_ROW_DISPATCHER TRACE: row_dispatcher.cpp:1035: RowDispatcher: Forward TEvMessageBatch from [42:22:2063] to [43:16:2053] query id QueryId 2025-06-25T15:25:43.130905Z node 42 :FQ_ROW_DISPATCHER DEBUG: row_dispatcher.cpp:902: RowDispatcher: Received TEvNoSession from [43:16:2053], generation 42 2025-06-25T15:25:43.130959Z node 42 :FQ_ROW_DISPATCHER DEBUG: row_dispatcher.cpp:951: RowDispatcher: DeleteConsumer, readActorId [43:16:2053] query id QueryId, partitions size 1 2025-06-25T15:25:43.131037Z node 42 :FQ_ROW_DISPATCHER DEBUG: row_dispatcher.cpp:971: RowDispatcher: Session is not used, sent TEvPoisonPill to [42:22:2063] 2025-06-25T15:25:43.269343Z node 44 :FQ_ROW_DISPATCHER DEBUG: row_dispatcher.cpp:474: RowDispatcher: Successfully bootstrapped row dispatcher, id [44:17:2058], tenant Tenant 2025-06-25T15:25:43.280191Z node 44 :FQ_ROW_DISPATCHER DEBUG: coordinator.cpp:247: Coordinator: Successfully bootstrapped coordinator, id [44:18:2059] 2025-06-25T15:25:43.280279Z node 44 :FQ_ROW_DISPATCHER DEBUG: leader_election.cpp:224: TLeaderElection [44:19:2060] Successfully bootstrapped, local coordinator id [44:18:2059] 2025-06-25T15:25:43.280369Z node 44 :FQ_ROW_DISPATCHER DEBUG: schema.cpp:71: Run create coordination node "YDB_DATABASE/RowDispatcher/Tenant" actor 2025-06-25T15:25:43.280410Z node 44 :FQ_ROW_DISPATCHER DEBUG: schema.cpp:113: Call create coordination node "YDB_DATABASE/RowDispatcher/Tenant" 2025-06-25T15:25:43.280439Z node 44 :FQ_ROW_DISPATCHER DEBUG: schema.cpp:411: Call create coordination node "YDB_DATABASE/RowDispatcher/Tenant" 2025-06-25T15:25:43.280562Z node 44 :FQ_ROW_DISPATCHER DEBUG: row_dispatcher.cpp:566: RowDispatcher: TEvCoordinatorChangesSubscribe from [44:18:2059] 2025-06-25T15:25:43.280761Z node 44 :FQ_ROW_DISPATCHER DEBUG: row_dispatcher.cpp:792: RowDispatcher: Received TEvStartSession from [44:14:2056], read group connection_id1, topicPath topic part id 100 query id QueryId cookie 1 2025-06-25T15:25:43.280975Z node 44 :FQ_ROW_DISPATCHER DEBUG: row_dispatcher.cpp:829: RowDispatcher: Create new session: read group connection_id1 topic topic part id 100 2025-06-25T15:25:43.281348Z node 44 :FQ_ROW_DISPATCHER TRACE: row_dispatcher.cpp:1016: RowDispatcher: Forward TEvNewDataArrived from [44:22:2063] to [44:14:2056] query id QueryId 2025-06-25T15:25:43.281461Z node 44 :FQ_ROW_DISPATCHER DEBUG: row_dispatcher.cpp:936: RowDispatcher: Received TEvStopSession from [44:14:2056] topic topic query id QueryId 2025-06-25T15:25:43.281524Z node 44 :FQ_ROW_DISPATCHER DEBUG: row_dispatcher.cpp:951: RowDispatcher: DeleteConsumer, readActorId [44:14:2056] query id QueryId, partitions size 1 2025-06-25T15:25:43.281612Z node 44 :FQ_ROW_DISPATCHER DEBUG: row_dispatcher.cpp:971: RowDispatcher: Session is not used, sent TEvPoisonPill to [44:22:2063] 2025-06-25T15:25:43.419220Z node 46 :FQ_ROW_DISPATCHER DEBUG: row_dispatcher.cpp:474: RowDispatcher: Successfully bootstrapped row dispatcher, id [46:17:2058], tenant Tenant 2025-06-25T15:25:43.431101Z node 46 :FQ_ROW_DISPATCHER DEBUG: coordinator.cpp:247: Coordinator: Successfully bootstrapped coordinator, id [46:18:2059] 2025-06-25T15:25:43.431191Z node 46 :FQ_ROW_DISPATCHER DEBUG: leader_election.cpp:224: TLeaderElection [46:19:2060] Successfully bootstrapped, local coordinator id [46:18:2059] 2025-06-25T15:25:43.431284Z node 46 :FQ_ROW_DISPATCHER DEBUG: schema.cpp:71: Run create coordination node "YDB_DATABASE/RowDispatcher/Tenant" actor 2025-06-25T15:25:43.431320Z node 46 :FQ_ROW_DISPATCHER DEBUG: schema.cpp:113: Call create coordination node "YDB_DATABASE/RowDispatcher/Tenant" 2025-06-25T15:25:43.431352Z node 46 :FQ_ROW_DISPATCHER DEBUG: schema.cpp:411: Call create coordination node "YDB_DATABASE/RowDispatcher/Tenant" 2025-06-25T15:25:43.431744Z node 46 :FQ_ROW_DISPATCHER DEBUG: row_dispatcher.cpp:566: RowDispatcher: TEvCoordinatorChangesSubscribe from [46:18:2059] 2025-06-25T15:25:43.431953Z node 46 :FQ_ROW_DISPATCHER DEBUG: row_dispatcher.cpp:792: RowDispatcher: Received TEvStartSession from [46:14:2056], read group connection_id1, topicPath topic part id 100,101 query id QueryId cookie 1 2025-06-25T15:25:43.432147Z node 46 :FQ_ROW_DISPATCHER DEBUG: row_dispatcher.cpp:829: RowDispatcher: Create new session: read group connection_id1 topic topic part id 100 2025-06-25T15:25:43.432331Z node 46 :FQ_ROW_DISPATCHER DEBUG: row_dispatcher.cpp:829: RowDispatcher: Create new session: read group connection_id1 topic topic part id 101 2025-06-25T15:25:43.432771Z node 46 :FQ_ROW_DISPATCHER DEBUG: row_dispatcher.cpp:792: RowDispatcher: Received TEvStartSession from [46:15:2057], read group connection_id1, topicPath topic part id 100,101 query id QueryId cookie 1 2025-06-25T15:25:43.433197Z node 46 :FQ_ROW_DISPATCHER TRACE: row_dispatcher.cpp:1055: RowDispatcher: Forward TEvSessionError from [46:22:2063] to [46:14:2056] query id QueryId 2025-06-25T15:25:43.433269Z node 46 :FQ_ROW_DISPATCHER WARN: row_dispatcher.cpp:1075: RowDispatcher: Fatal session error, remove session [46:22:2063] 2025-06-25T15:25:43.433342Z node 46 :FQ_ROW_DISPATCHER DEBUG: row_dispatcher.cpp:951: RowDispatcher: DeleteConsumer, readActorId [46:14:2056] query id QueryId, partitions size 2 2025-06-25T15:25:43.433584Z node 46 :FQ_ROW_DISPATCHER TRACE: row_dispatcher.cpp:1016: RowDispatcher: Forward TEvNewDataArrived from [46:23:2064] to [46:15:2057] query id QueryId 2025-06-25T15:25:43.433694Z node 46 :FQ_ROW_DISPATCHER TRACE: row_dispatcher.cpp:875: RowDispatcher: Received TEvGetNextBatch from [46:15:2057] part id 101 query id QueryId 2025-06-25T15:25:43.433797Z node 46 :FQ_ROW_DISPATCHER TRACE: row_dispatcher.cpp:1035: RowDispatcher: Forward TEvMessageBatch from [46:23:2064] to [46:15:2057] query id QueryId 2025-06-25T15:25:43.433933Z node 46 :FQ_ROW_DISPATCHER DEBUG: row_dispatcher.cpp:792: RowDispatcher: Received TEvStartSession from [46:14:2056], read group connection_id1, topicPath topic part id 100,101 query id QueryId cookie 1 2025-06-25T15:25:43.434086Z node 46 :FQ_ROW_DISPATCHER DEBUG: row_dispatcher.cpp:829: RowDispatcher: Create new session: read group connection_id1 topic topic part id 100 2025-06-25T15:25:43.434415Z node 46 :FQ_ROW_DISPATCHER TRACE: row_dispatcher.cpp:1055: RowDispatcher: Forward TEvSessionError from [46:22:2063] to [46:15:2057] query id QueryId 2025-06-25T15:25:43.434486Z node 46 :FQ_ROW_DISPATCHER DEBUG: row_dispatcher.cpp:951: RowDispatcher: DeleteConsumer, readActorId [46:15:2057] query id QueryId, partitions size 2 2025-06-25T15:25:43.434567Z node 46 :FQ_ROW_DISPATCHER ERROR: row_dispatcher.cpp:968: RowDispatcher: Wrong readActorId [46:15:2057], no such consumer 2025-06-25T15:25:43.434622Z node 46 :FQ_ROW_DISPATCHER DEBUG: row_dispatcher.cpp:971: RowDispatcher: Session is not used, sent TEvPoisonPill to [46:22:2063] 2025-06-25T15:25:43.434823Z node 46 :FQ_ROW_DISPATCHER DEBUG: row_dispatcher.cpp:792: RowDispatcher: Received TEvStartSession from [46:15:2057], read group connection_id1, topicPath topic part id 100,101 query id QueryId cookie 1 2025-06-25T15:25:43.435209Z node 46 :FQ_ROW_DISPATCHER TRACE: row_dispatcher.cpp:1016: RowDispatcher: Forward TEvNewDataArrived from [46:24:2065] to [46:14:2056] query id QueryId 2025-06-25T15:25:43.435323Z node 46 :FQ_ROW_DISPATCHER TRACE: row_dispatcher.cpp:875: RowDispatcher: Received TEvGetNextBatch from [46:14:2056] part id 100 query id QueryId 2025-06-25T15:25:43.435413Z node 46 :FQ_ROW_DISPATCHER TRACE: row_dispatcher.cpp:1035: RowDispatcher: Forward TEvMessageBatch from [46:24:2065] to [46:14:2056] query id QueryId 2025-06-25T15:25:43.435502Z node 46 :FQ_ROW_DISPATCHER TRACE: row_dispatcher.cpp:1016: RowDispatcher: Forward TEvNewDataArrived from [46:24:2065] to [46:15:2057] query id QueryId 2025-06-25T15:25:43.435602Z node 46 :FQ_ROW_DISPATCHER TRACE: row_dispatcher.cpp:875: RowDispatcher: Received TEvGetNextBatch from [46:15:2057] part id 100 query id QueryId 2025-06-25T15:25:43.435687Z node 46 :FQ_ROW_DISPATCHER TRACE: row_dispatcher.cpp:1035: RowDispatcher: Forward TEvMessageBatch from [46:24:2065] to [46:15:2057] query id QueryId 2025-06-25T15:25:43.435782Z node 46 :FQ_ROW_DISPATCHER TRACE: row_dispatcher.cpp:1016: RowDispatcher: Forward TEvNewDataArrived from [46:23:2064] to [46:14:2056] query id QueryId 2025-06-25T15:25:43.435903Z node 46 :FQ_ROW_DISPATCHER TRACE: row_dispatcher.cpp:875: RowDispatcher: Received TEvGetNextBatch from [46:14:2056] part id 101 query id QueryId 2025-06-25T15:25:43.436030Z node 46 :FQ_ROW_DISPATCHER TRACE: row_dispatcher.cpp:1035: RowDispatcher: Forward TEvMessageBatch from [46:23:2064] to [46:14:2056] query id QueryId 2025-06-25T15:25:43.436155Z node 46 :FQ_ROW_DISPATCHER TRACE: row_dispatcher.cpp:1016: RowDispatcher: Forward TEvNewDataArrived from [46:23:2064] to [46:15:2057] query id QueryId 2025-06-25T15:25:43.436922Z node 46 :FQ_ROW_DISPATCHER TRACE: row_dispatcher.cpp:875: RowDispatcher: Received TEvGetNextBatch from [46:15:2057] part id 101 query id QueryId 2025-06-25T15:25:43.437073Z node 46 :FQ_ROW_DISPATCHER TRACE: row_dispatcher.cpp:1035: RowDispatcher: Forward TEvMessageBatch from [46:23:2064] to [46:15:2057] query id QueryId |98.3%| [TM] {RESULT} ydb/core/fq/libs/row_dispatcher/ut/unittest |98.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test |98.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test |98.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test >> TTxDataShardRecomputeKMeansScan::BadRequest [GOOD] >> TTxDataShardRecomputeKMeansScan::MainTable >> test_kill_tablets.py::TestKillTablets::test_then_kill_system_tablets_and_it_increases_generation |98.3%| [TM] {asan, default-linux-x86_64, release} ydb/public/sdk/cpp/tests/integration/bulk_upsert/gtest >> BulkUpsert::BulkUpsert [GOOD] |98.3%| [TM] {RESULT} ydb/public/sdk/cpp/tests/integration/bulk_upsert/gtest |98.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test |98.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/rename/py3test >> test_rename.py::test_client_gets_retriable_errors_when_rename[replace_table-create_indexed_table-True] [GOOD] >> KafkaProtocol::MetadataInServerlessScenario [GOOD] >> KafkaProtocol::NativeKafkaBalanceScenario |98.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test |98.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test |98.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test |98.3%| [TA] $(B)/ydb/tests/functional/rename/test-results/py3test/{meta.json ... results_accumulator.log} |98.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test |98.3%| [TA] {RESULT} $(B)/ydb/tests/functional/rename/test-results/py3test/{meta.json ... results_accumulator.log} |98.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test >> test_create_tablets.py::TestHive::test_when_create_tablets_then_can_lookup_them [GOOD] |98.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test |98.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test >> TDqPqReadActorTest::TestLoadFromSeveralStates [GOOD] |98.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test >> TDqPqReadActorTest::TestReadFromTopicFirstWatermark |98.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test |98.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test |98.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test |98.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test |98.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test |98.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test >> TTxDataShardRecomputeKMeansScan::MainTable [GOOD] >> TTxDataShardRecomputeKMeansScan::BuildTable |98.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test >> TDqPqReadActorTest::TestReadFromTopicFirstWatermark [GOOD] |98.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test |98.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test >> test_kill_tablets.py::TestKillTablets::test_when_kill_hive_it_will_be_restarted_and_can_create_tablets [GOOD] >> TDqPqReadActorTest::TestReadFromTopicWatermarks1 |98.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test |98.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest |98.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test |98.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test |98.4%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest |98.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test |98.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test |98.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test >> CompositeConveyorTests::Test10xMultiDistribution |98.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test |98.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test >> CompositeConveyorTests::TestUniformProcessDistribution |98.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest |98.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test |98.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/select/py3test >> test_select.py::TestDML::test_select[table_index_2_UNIQUE_SYNC-pk_types2-all_types2-index2--UNIQUE-SYNC] [GOOD] |98.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest |98.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest >> DataShardStats::HasSchemaChanges_Families [GOOD] >> CompositeConveyorTests::TestUniformScopesDistribution |98.5%| [TA] $(B)/ydb/tests/datashard/select/test-results/py3test/{meta.json ... results_accumulator.log} |98.5%| [TA] {RESULT} $(B)/ydb/tests/datashard/select/test-results/py3test/{meta.json ... results_accumulator.log} |98.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest >> TTxDataShardRecomputeKMeansScan::BuildTable [GOOD] >> TTxDataShardRecomputeKMeansScan::EmptyCluster |98.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_0__ASYNC-pk_types11-all_types11-index11---ASYNC] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/ut_stats/unittest >> DataShardStats::HasSchemaChanges_Families [GOOD] Test command err: 2025-06-25T15:22:26.471509Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T15:22:26.471728Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T15:22:26.471785Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/0007c8/r3tmp/tmpANpEt4/pdisk_1.dat 2025-06-25T15:22:27.136865Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T15:22:27.163786Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:22:27.258482Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:22:27.284230Z node 1 :TABLET_SAUSAGECACHE NOTICE: shared_sausagecache.cpp:1191: Update config MemoryLimit: 33554432 2025-06-25T15:22:27.293190Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750864942820584 != 1750864942820588 2025-06-25T15:22:27.342463Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:22:27.342577Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:22:27.361158Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:22:27.489746Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:22:27.593199Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828672, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvBoot 2025-06-25T15:22:27.594526Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3100: StateInit, received event# 268828673, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvRestored 2025-06-25T15:22:27.595054Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:627:2531] 2025-06-25T15:22:27.595413Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T15:22:27.695171Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3113: StateInactive, received event# 268828684, Sender [1:618:2525], Recipient [1:627:2531]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-25T15:22:27.695961Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T15:22:27.696114Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T15:22:27.707033Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-25T15:22:27.707180Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-25T15:22:27.707244Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-25T15:22:27.724726Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T15:22:27.724969Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T15:22:27.725096Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:643:2531] in generation 1 2025-06-25T15:22:27.736967Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T15:22:27.870650Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-25T15:22:27.876594Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T15:22:27.876830Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:645:2541] 2025-06-25T15:22:27.876873Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T15:22:27.876919Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-25T15:22:27.876962Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T15:22:27.877255Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435072, Sender [1:627:2531], Recipient [1:627:2531]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-25T15:22:27.877331Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-25T15:22:27.879422Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-25T15:22:27.879539Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-25T15:22:27.879605Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T15:22:27.879645Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:22:27.879701Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-25T15:22:27.879740Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-25T15:22:27.879799Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-25T15:22:27.879836Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-25T15:22:27.879895Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T15:22:27.885756Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [1:634:2535], Recipient [1:627:2531]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T15:22:27.885849Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T15:22:27.885911Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:623:2528], serverId# [1:634:2535], sessionId# [0:0:0] 2025-06-25T15:22:27.886061Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269549568, Sender [1:373:2367], Recipient [1:634:2535] 2025-06-25T15:22:27.886124Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3138: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-25T15:22:27.886257Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T15:22:27.886555Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-25T15:22:27.886638Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-25T15:22:27.886754Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-25T15:22:27.886811Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-25T15:22:27.886856Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-06-25T15:22:27.886891Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-06-25T15:22:27.886936Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-25T15:22:27.887284Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-06-25T15:22:27.887320Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit StoreSchemeTx 2025-06-25T15:22:27.887362Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit FinishPropose 2025-06-25T15:22:27.887394Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-25T15:22:27.887448Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayComplete 2025-06-25T15:22:27.887476Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit FinishPropose 2025-06-25T15:22:27.887512Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit WaitForPlan 2025-06-25T15:22:27.887569Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit WaitForPlan 2025-06-25T15:22:27.887596Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:281474976715657] at 72075186224037888 is not ready to execute on unit WaitForPlan 2025-06-25T15:22:27.889319Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269746185, Sender [1:646:2542], Recipient [1:627:2531]: NKikimr::TEvTxProxySchemeCache::TEvWatchNotifyUpdated 2025-06-25T15:22:27.889380Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T15:22:27.901660Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-25T15:22:27.901768Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-25T15:22:27.901827Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-25T15:22:27.901883Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:1 ... 186224037888 2025-06-25T15:25:58.102813Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877760, Sender [13:1112:2921], Recipient [13:888:2714]: NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594046644480 Status: OK ServerId: [13:1117:2926] Leader: 1 Dead: 0 Generation: 2 VersionInfo: } 2025-06-25T15:25:58.102905Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3165: StateWork, processing event TEvTabletPipe::TEvClientConnected 2025-06-25T15:25:58.111908Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269552132, Sender [13:373:2367], Recipient [13:888:2714]: NKikimrTxDataShard.TEvSchemaChangedResult TxId: 281474976715664 2025-06-25T15:25:58.111999Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3136: StateWork, processing event TEvDataShard::TEvSchemaChangedResult 2025-06-25T15:25:58.112086Z node 13 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715664 datashard 72075186224037888 state Ready 2025-06-25T15:25:58.112230Z node 13 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 waiting for schema changes 2025-06-25T15:25:58.129225Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877763, Sender [13:1112:2921], Recipient [13:888:2714]: NKikimr::TEvTabletPipe::TEvClientDestroyed { TabletId: 72057594046644480 ClientId: [13:1112:2921] ServerId: [13:1117:2926] } 2025-06-25T15:25:58.129359Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3166: StateWork, processing event TEvTabletPipe::TEvClientDestroyed 2025-06-25T15:25:59.037717Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435079, Sender [0:0:0], Recipient [13:888:2714]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvPeriodicWakeup 2025-06-25T15:25:59.037818Z node 13 :TABLET_STATS_BUILDER INFO: datashard__stats.cpp:614: UpdateTableStats at datashard 72075186224037888 2025-06-25T15:25:59.038175Z node 13 :TABLET_STATS_BUILDER INFO: datashard__stats.cpp:509: Skipped at datashard 72075186224037888, for tableId 2: RowCount: 3 DataSize: 130 IndexSize: 82 ByKeyFilterSize: 0 RowCountHistogram: 0 DataSizeHistogram: 0 PartCount 1, with schema changes 2025-06-25T15:25:59.038419Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3441: TEvPeriodicTableStats from datashard 72075186224037888, FollowerId 0, tableId 2 Captured TEvDataShard::TEvPeriodicTableStats DatashardId: 72075186224037888 TableLocalId: 2 Generation: 2 Round: 9 TableStats { DataSize: 130 RowCount: 3 IndexSize: 82 InMemSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 3 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 SearchHeight: 1 LastFullCompactionTs: 20 HasLoanedParts: false Channels { Channel: 1 DataSize: 65 IndexSize: 82 } Channels { Channel: 2 DataSize: 65 IndexSize: 0 } ByKeyFilterSize: 0 HasSchemaChanges: true LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 3371 Memory: 124352 Storage: 254 } ShardState: 2 UserTablePartOwners: 72075186224037888 NodeId: 13 StartTime: 5451 TableOwnerId: 72057594046644480 FollowerId: 0 2025-06-25T15:25:59.039773Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269877761, Sender [13:1150:2959], Recipient [13:888:2714]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-25T15:25:59.039862Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-25T15:25:59.039939Z node 13 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [13:1149:2958], serverId# [13:1150:2959], sessionId# [0:0:0] 2025-06-25T15:25:59.040168Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 269553210, Sender [13:1148:2957], Recipient [13:888:2714]: NKikimrTxDataShard.TEvCompactTable PathId { OwnerId: 72057594046644480 LocalId: 2 } CompactBorrowed: false 2025-06-25T15:25:59.040430Z node 13 :TX_DATASHARD INFO: datashard__compaction.cpp:141: Started background compaction# 3 of 72075186224037888 tableId# 2 localTid# 1001, requested from [13:1148:2957], partsCount# 1, memtableSize# 0, memtableWaste# 0, memtableRows# 0 2025-06-25T15:25:59.042309Z node 13 :TX_DATASHARD DEBUG: datashard__compaction.cpp:203: CompactionComplete of tablet# 72075186224037888, table# 1001, finished edge# 2, ts 1970-01-01T00:00:20.452024Z 2025-06-25T15:25:59.042382Z node 13 :TABLET_STATS_BUILDER INFO: datashard__stats.cpp:614: UpdateTableStats at datashard 72075186224037888 2025-06-25T15:25:59.042491Z node 13 :TX_DATASHARD DEBUG: datashard__compaction.cpp:240: ReplyCompactionWaiters of tablet# 72075186224037888, table# 1001, finished edge# 2, front# 3 2025-06-25T15:25:59.053099Z node 13 :TABLET_STATS_BUILDER TRACE: flat_stat_table.cpp:22: Building stats at datashard 72075186224037888, for tableId 2: starting for mixed index 2025-06-25T15:25:59.055416Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268828683, Sender [13:885:2712], Recipient [13:888:2714]: NKikimr::TEvTablet::TEvFollowerGcApplied 2025-06-25T15:25:59.056090Z node 13 :TABLET_STATS_BUILDER TRACE: flat_stat_table.cpp:30: Building stats at datashard 72075186224037888, for tableId 2: finished for mixed index ready: 1 stats: RowCount: 3 DataSize: 130 IndexSize: 82 ByKeyFilterSize: 0 RowCountHistogram: 0 DataSizeHistogram: 0 2025-06-25T15:25:59.056217Z node 13 :TABLET_STATS_BUILDER INFO: datashard__stats.cpp:177: Stats at datashard 72075186224037888, for tableId 2: RowCount: 3 DataSize: 130 IndexSize: 82 ByKeyFilterSize: 0 RowCountHistogram: 0 DataSizeHistogram: 0 PartCount: 1, with schema changes, LoadedSize 82, Spent{time=0.000s,wait=0.000s,interrupts=2} 2025-06-25T15:25:59.058302Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435080, Sender [13:1155:2963], Recipient [13:888:2714]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvAsyncTableStats 2025-06-25T15:25:59.058414Z node 13 :TABLET_STATS_BUILDER INFO: datashard__stats.cpp:381: Result received at datashard 72075186224037888, for tableId 2: RowCount: 3 DataSize: 130 IndexSize: 82 ByKeyFilterSize: 0 RowCountHistogram: 0 DataSizeHistogram: 0 2025-06-25T15:25:59.058567Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3441: TEvPeriodicTableStats from datashard 72075186224037888, FollowerId 0, tableId 2 2025-06-25T15:25:59.117393Z node 13 :TX_DATASHARD DEBUG: datashard__compaction.cpp:203: CompactionComplete of tablet# 72075186224037888, table# 1001, finished edge# 3, ts 1970-01-01T00:00:30.452024Z 2025-06-25T15:25:59.117514Z node 13 :TABLET_STATS_BUILDER INFO: datashard__stats.cpp:614: UpdateTableStats at datashard 72075186224037888 2025-06-25T15:25:59.117567Z node 13 :TX_DATASHARD DEBUG: datashard__compaction.cpp:240: ReplyCompactionWaiters of tablet# 72075186224037888, table# 1001, finished edge# 3, front# 3 2025-06-25T15:25:59.117637Z node 13 :TX_DATASHARD DEBUG: datashard__compaction.cpp:260: ReplyCompactionWaiters of tablet# 72075186224037888, table# 1001 sending TEvCompactTableResult to# [13:1148:2957]pathId# [OwnerId: 72057594046644480, LocalPathId: 2] 2025-06-25T15:25:59.119923Z node 13 :TABLET_STATS_BUILDER TRACE: flat_stat_table.cpp:22: Building stats at datashard 72075186224037888, for tableId 2: starting for mixed index 2025-06-25T15:25:59.120565Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 268828683, Sender [13:885:2712], Recipient [13:888:2714]: NKikimr::TEvTablet::TEvFollowerGcApplied 2025-06-25T15:25:59.122405Z node 13 :TABLET_STATS_BUILDER TRACE: flat_stat_table.cpp:30: Building stats at datashard 72075186224037888, for tableId 2: finished for mixed index ready: 1 stats: RowCount: 3 DataSize: 130 IndexSize: 82 ByKeyFilterSize: 0 RowCountHistogram: 0 DataSizeHistogram: 0 2025-06-25T15:25:59.122525Z node 13 :TABLET_STATS_BUILDER INFO: datashard__stats.cpp:177: Stats at datashard 72075186224037888, for tableId 2: RowCount: 3 DataSize: 130 IndexSize: 82 ByKeyFilterSize: 0 RowCountHistogram: 0 DataSizeHistogram: 0 PartCount: 1, LoadedSize 82, Spent{time=0.000s,wait=0.000s,interrupts=2} 2025-06-25T15:25:59.122837Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435080, Sender [13:1162:2969], Recipient [13:888:2714]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvAsyncTableStats 2025-06-25T15:25:59.122898Z node 13 :TABLET_STATS_BUILDER INFO: datashard__stats.cpp:381: Result received at datashard 72075186224037888, for tableId 2: RowCount: 3 DataSize: 130 IndexSize: 82 ByKeyFilterSize: 0 RowCountHistogram: 0 DataSizeHistogram: 0 2025-06-25T15:25:59.122970Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3441: TEvPeriodicTableStats from datashard 72075186224037888, FollowerId 0, tableId 2 waiting for no schema changes 2025-06-25T15:25:59.134500Z node 13 :TX_DATASHARD DEBUG: datashard__compaction.cpp:189: Updated last full compaction of tablet# 72075186224037888, tableId# 2, last full compaction# 1970-01-01T00:00:30.452024Z 2025-06-25T15:25:59.979318Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435073, Sender [0:0:0], Recipient [13:888:2714]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvCleanupTransaction 2025-06-25T15:25:59.979474Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3158: StateWork, processing event TEvPrivate::TEvCleanupTransaction 2025-06-25T15:25:59.979622Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:214: No cleanup at 72075186224037888 outdated step 35000 last cleanup 0 2025-06-25T15:25:59.979743Z node 13 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:25:59.979816Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-25T15:25:59.979887Z node 13 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-25T15:25:59.979960Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-25T15:25:59.980196Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, received event# 2146435079, Sender [0:0:0], Recipient [13:888:2714]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvPeriodicWakeup 2025-06-25T15:25:59.980248Z node 13 :TABLET_STATS_BUILDER INFO: datashard__stats.cpp:614: UpdateTableStats at datashard 72075186224037888 2025-06-25T15:25:59.980494Z node 13 :TABLET_STATS_BUILDER INFO: datashard__stats.cpp:509: Skipped at datashard 72075186224037888, for tableId 2: RowCount: 3 DataSize: 130 IndexSize: 82 ByKeyFilterSize: 0 RowCountHistogram: 0 DataSizeHistogram: 0 PartCount 1 2025-06-25T15:25:59.980672Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3441: TEvPeriodicTableStats from datashard 72075186224037888, FollowerId 0, tableId 2 Captured TEvDataShard::TEvPeriodicTableStats DatashardId: 72075186224037888 TableLocalId: 2 Generation: 2 Round: 12 TableStats { DataSize: 130 RowCount: 3 IndexSize: 82 InMemSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 3 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 SearchHeight: 1 LastFullCompactionTs: 30 HasLoanedParts: false Channels { Channel: 1 DataSize: 80 IndexSize: 82 } Channels { Channel: 2 DataSize: 50 IndexSize: 0 } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 3435 Memory: 124352 Storage: 254 } ShardState: 2 UserTablePartOwners: 72075186224037888 NodeId: 13 StartTime: 5451 TableOwnerId: 72057594046644480 FollowerId: 0 |98.5%| [TM] {RESULT} ydb/core/tx/datashard/ut_stats/unittest |98.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest |98.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest |98.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test >> test_kill_tablets.py::TestKillTablets::test_when_kill_hive_it_will_be_restarted_and_can_create_tablets [GOOD] >> TDqPqReadActorTest::TestReadFromTopicWatermarks1 [GOOD] >> TDqPqReadActorTest::WatermarkCheckpointWithItemsInReadyBuffer |98.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest |98.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest |98.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/config/py3test |98.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest >> test_indexes.py::TestSecondaryIndexes::test_create_table_with_global_index |98.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest |98.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest |98.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest >> test_discovery.py::TestDiscoveryExtEndpoint::test_scenario >> test_kill_tablets.py::TestKillTablets::test_then_kill_system_tablets_and_it_increases_generation [GOOD] >> TTxDataShardRecomputeKMeansScan::EmptyCluster [GOOD] >> TTxDataShardReshuffleKMeansScan::BadRequest |98.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest |98.5%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest |98.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest >> test_inserts.py::TestYdbInsertsOperations::test_bulk_upsert_parallel [GOOD] >> test_inserts.py::TestYdbInsertsOperations::test_insert_multiple_empty_rows |98.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest |98.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest >> TDqPqReadActorTest::WatermarkCheckpointWithItemsInReadyBuffer [GOOD] >> TPqWriterTest::TestWriteToTopic |98.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest >> TPqWriterTest::TestWriteToTopic [GOOD] >> TPqWriterTest::TestWriteToTopicMultiBatch >> test_indexes.py::TestSecondaryIndexes::test_create_table_with_global_index [GOOD] >> TTxDataShardReshuffleKMeansScan::BadRequest [GOOD] >> TTxDataShardReshuffleKMeansScan::MainToPosting >> TPqWriterTest::TestWriteToTopicMultiBatch [GOOD] >> test_cms_state_storage.py::TestCmsStateStorageSimpleKeep::test_check_shutdown_state_storage_nodes |98.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest >> test_generate_dynamic_config.py::TestGenerateDynamicConfigFromConfigDir::test_generate_dynamic_config_from_config_store >> TPqWriterTest::TestDeferredWriteToTopic |98.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/secondary_index/py3test >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_0__ASYNC-pk_types11-all_types11-index11---ASYNC] [GOOD] >> TPqWriterTest::TestDeferredWriteToTopic [GOOD] |98.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test >> test_kill_tablets.py::TestKillTablets::test_then_kill_system_tablets_and_it_increases_generation [GOOD] >> TPqWriterTest::WriteNonExistentTopic >> TPqWriterTest::WriteNonExistentTopic [GOOD] >> TPqWriterTest::TestCheckpoints |98.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/config/py3test >> TPqWriterTest::TestCheckpoints [GOOD] |98.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest >> TPqWriterTest::TestCheckpointWithEmptyBatch >> TPqWriterTest::TestCheckpointWithEmptyBatch [GOOD] >> test_insert.py::TestInsertOperations::test_several_inserts_per_transaction_are_success >> TTxDataShardReshuffleKMeansScan::MainToPosting [GOOD] >> TTxDataShardReshuffleKMeansScan::MainToBuild >> KafkaProtocol::NativeKafkaBalanceScenario [FAIL] >> KafkaProtocol::InitProducerId_withoutTransactionalIdShouldReturnRandomInt |98.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest >> test_discovery.py::TestDiscoveryExtEndpoint::test_scenario [GOOD] |98.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest |98.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/api/py3test >> test_indexes.py::TestSecondaryIndexes::test_create_table_with_global_index [GOOD] |98.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/fq/pq_async_io/ut/unittest >> TPqWriterTest::TestCheckpointWithEmptyBatch [GOOD] Test command err: 2025-06-25T15:22:36.127877Z node 1 :KQP_COMPUTE INFO: dq_pq_rd_read_actor.cpp:528: SelfId: [0:0:0], TxId: query_1, task: 0. PQ source. Start read actor, local row dispatcher [1:7519907721748963691:2054], metadatafields: , partitions: 666 2025-06-25T15:22:36.337434Z node 1 :KQP_COMPUTE TRACE: dq_pq_rd_read_actor.cpp:729: SelfId: [0:0:0], TxId: query_1, task: 0. PQ source. GetAsyncInputData freeSpace = 12345 2025-06-25T15:22:36.337528Z node 1 :KQP_COMPUTE DEBUG: dq_pq_rd_read_actor.cpp:1387: SelfId: [1:7519907726043930993:2048], TxId: query_1, task: 0, Cluster: . PQ source. Switch to single-cluster mode 2025-06-25T15:22:36.337547Z node 1 :KQP_COMPUTE INFO: dq_pq_rd_read_actor.cpp:578: SelfId: [1:7519907726043930993:2048], TxId: query_1, task: 0, Cluster: . PQ source. Send TEvCoordinatorChangesSubscribe to local RD ([1:7519907721748963691:2054]) 2025-06-25T15:22:36.337588Z node 1 :KQP_COMPUTE INFO: dq_pq_rd_read_actor.cpp:605: SelfId: [1:7519907726043930993:2048], TxId: query_1, task: 0, Cluster: . PQ source. Send TEvCoordinatorChangesSubscribe to local row dispatcher, self id [1:7519907726043930993:2048] 2025-06-25T15:22:36.337936Z node 1 :KQP_COMPUTE DEBUG: dq_pq_rd_read_actor.cpp:921: SelfId: [1:7519907726043930993:2048], TxId: query_1, task: 0, Cluster: . PQ source. TEvCoordinatorChanged, new coordinator [1:7519907721748963692:2055] 2025-06-25T15:22:36.337977Z node 1 :KQP_COMPUTE INFO: dq_pq_rd_read_actor.cpp:617: SelfId: [1:7519907726043930993:2048], TxId: query_1, task: 0, Cluster: . PQ source. Send TEvCoordinatorRequest to coordinator [1:7519907721748963692:2055], partIds: 666 cookie 1 2025-06-25T15:22:36.338186Z node 1 :KQP_COMPUTE INFO: dq_pq_rd_read_actor.cpp:965: SelfId: [1:7519907726043930993:2048], TxId: query_1, task: 0, Cluster: . PQ source. Received TEvCoordinatorResult from [1:7519907721748963692:2055], cookie 1 2025-06-25T15:22:36.338243Z node 1 :KQP_COMPUTE INFO: dq_pq_rd_read_actor.cpp:1224: SelfId: [1:7519907726043930993:2048], TxId: query_1, task: 0, Cluster: . PQ source. UpdateSessions, Sessions size 0 2025-06-25T15:22:36.338256Z node 1 :KQP_COMPUTE INFO: dq_pq_rd_read_actor.cpp:1227: SelfId: [1:7519907726043930993:2048], TxId: query_1, task: 0, Cluster: . PQ source. Distribution is changed, remove sessions 2025-06-25T15:22:36.338276Z node 1 :KQP_COMPUTE INFO: dq_pq_rd_read_actor.cpp:1246: SelfId: [1:7519907726043930993:2048], TxId: query_1, task: 0, Cluster: . PQ source. Create session to [1:7519907721748963694:2057], generation 1 2025-06-25T15:22:36.338351Z node 1 :KQP_COMPUTE INFO: dq_pq_rd_read_actor.cpp:681: SelfId: [1:7519907726043930993:2048], TxId: query_1, task: 0, Cluster: . PQ source. Send TEvStartSession to [1:7519907721748963694:2057], connection id 1 partitions offsets (666 / ), 2025-06-25T15:22:36.338595Z node 1 :KQP_COMPUTE INFO: dq_pq_rd_read_actor.cpp:789: SelfId: [1:7519907726043930993:2048], TxId: query_1, task: 0, Cluster: . PQ source. Received TEvStartSessionAck from [1:7519907721748963694:2057], seqNo 0, ConfirmedSeqNo 0, generation 1 2025-06-25T15:22:36.339396Z node 1 :KQP_COMPUTE TRACE: dq_pq_rd_read_actor.cpp:857: SelfId: [1:7519907726043930993:2048], TxId: query_1, task: 0, Cluster: . PQ source. Received TEvNewDataArrived from [1:7519907721748963694:2057], partition 666, seqNo 0, ConfirmedSeqNo 0 generation 1 2025-06-25T15:22:36.343361Z node 1 :KQP_COMPUTE TRACE: dq_pq_rd_read_actor.cpp:1029: SelfId: [1:7519907726043930993:2048], TxId: query_1, task: 0, Cluster: . PQ source. Received TEvMessageBatch from [1:7519907721748963694:2057], seqNo 0, ConfirmedSeqNo 0 generation 1 2025-06-25T15:22:36.343394Z node 1 :KQP_COMPUTE TRACE: dq_pq_rd_read_actor.cpp:1065: SelfId: [1:7519907726043930993:2048], TxId: query_1, task: 0, Cluster: . PQ source. TEvMessageBatch NextOffset 1 2025-06-25T15:22:36.343399Z node 1 :KQP_COMPUTE TRACE: dq_pq_rd_read_actor.cpp:1065: SelfId: [1:7519907726043930993:2048], TxId: query_1, task: 0, Cluster: . PQ source. TEvMessageBatch NextOffset 2 2025-06-25T15:22:36.343440Z node 1 :KQP_COMPUTE TRACE: dq_pq_rd_read_actor.cpp:729: SelfId: [1:7519907726043930993:2048], TxId: query_1, task: 0, Cluster: . PQ source. GetAsyncInputData freeSpace = 1000 2025-06-25T15:22:36.343636Z node 1 :KQP_COMPUTE TRACE: dq_pq_rd_read_actor.cpp:750: SelfId: [1:7519907726043930993:2048], TxId: query_1, task: 0, Cluster: . PQ source. NextOffset 2 2025-06-25T15:22:36.343647Z node 1 :KQP_COMPUTE TRACE: dq_pq_rd_read_actor.cpp:754: SelfId: [1:7519907726043930993:2048], TxId: query_1, task: 0, Cluster: . PQ source. Return 2 rows, buffer size 0, free space 948, result size 52 2025-06-25T15:22:36.344774Z node 1 :KQP_COMPUTE INFO: dq_pq_rd_read_actor.cpp:706: SelfId: [1:7519907726043930993:2048], TxId: query_1, task: 0, Cluster: . PQ source. PassAway 2025-06-25T15:22:36.344838Z node 1 :KQP_COMPUTE INFO: dq_pq_rd_read_actor.cpp:1114: SelfId: [1:7519907726043930993:2048], TxId: query_1, task: 0, Cluster: . PQ source. SelfId: [1:7519907726043930993:2048], TxId: query_1, task: 0, Cluster: . PQ source. State: used buffer size 0 ready buffer event size 0 state 5 InFlyAsyncInputData 0 Counters: CoordinatorChanged 1 CoordinatorResult 1 MessageBatch 1 StartSessionAck 1 NewDataArrived 1 SessionError 0 Statistics 0 NodeDisconnected 0 NodeConnected 0 Undelivered 0 Retry 0 PrivateHeartbeat 0 SessionClosed 0 Pong 0 Heartbeat 0 PrintState 0 ProcessState 0 GetAsyncInputData 2 NotifyCA 1 [1:7519907721748963694:2057] status 2 is waiting ack 0 connection id 1 id 1, LocalRecipient partitions 666 offsets 666=2 has pending data 2025-06-25T15:22:36.344850Z node 1 :KQP_COMPUTE INFO: dq_pq_rd_read_actor.cpp:698: SelfId: [1:7519907726043930993:2048], TxId: query_1, task: 0, Cluster: . PQ source. Send StopSession to [1:7519907721748963694:2057] generation 1 2025-06-25T15:22:36.907266Z node 3 :KQP_COMPUTE INFO: dq_pq_rd_read_actor.cpp:528: SelfId: [0:0:0], TxId: query_1, task: 0. PQ source. Start read actor, local row dispatcher [3:7519907728731857941:2054], metadatafields: , partitions: 666 2025-06-25T15:22:37.124920Z node 3 :KQP_COMPUTE TRACE: dq_pq_rd_read_actor.cpp:729: SelfId: [0:0:0], TxId: query_1, task: 0. PQ source. GetAsyncInputData freeSpace = 12345 2025-06-25T15:22:37.124969Z node 3 :KQP_COMPUTE DEBUG: dq_pq_rd_read_actor.cpp:1387: SelfId: [3:7519907728731857947:2048], TxId: query_1, task: 0, Cluster: . PQ source. Switch to single-cluster mode 2025-06-25T15:22:37.124985Z node 3 :KQP_COMPUTE INFO: dq_pq_rd_read_actor.cpp:578: SelfId: [3:7519907728731857947:2048], TxId: query_1, task: 0, Cluster: . PQ source. Send TEvCoordinatorChangesSubscribe to local RD ([3:7519907728731857941:2054]) 2025-06-25T15:22:37.125016Z node 3 :KQP_COMPUTE INFO: dq_pq_rd_read_actor.cpp:605: SelfId: [3:7519907728731857947:2048], TxId: query_1, task: 0, Cluster: . PQ source. Send TEvCoordinatorChangesSubscribe to local row dispatcher, self id [3:7519907728731857947:2048] 2025-06-25T15:22:37.125480Z node 3 :KQP_COMPUTE DEBUG: dq_pq_rd_read_actor.cpp:921: SelfId: [3:7519907728731857947:2048], TxId: query_1, task: 0, Cluster: . PQ source. TEvCoordinatorChanged, new coordinator [3:7519907728731857942:2055] 2025-06-25T15:22:37.125506Z node 3 :KQP_COMPUTE INFO: dq_pq_rd_read_actor.cpp:617: SelfId: [3:7519907728731857947:2048], TxId: query_1, task: 0, Cluster: . PQ source. Send TEvCoordinatorRequest to coordinator [3:7519907728731857942:2055], partIds: 666 cookie 1 2025-06-25T15:22:37.125769Z node 3 :KQP_COMPUTE INFO: dq_pq_rd_read_actor.cpp:965: SelfId: [3:7519907728731857947:2048], TxId: query_1, task: 0, Cluster: . PQ source. Received TEvCoordinatorResult from [3:7519907728731857942:2055], cookie 1 2025-06-25T15:22:37.125791Z node 3 :KQP_COMPUTE INFO: dq_pq_rd_read_actor.cpp:1224: SelfId: [3:7519907728731857947:2048], TxId: query_1, task: 0, Cluster: . PQ source. UpdateSessions, Sessions size 0 2025-06-25T15:22:37.125800Z node 3 :KQP_COMPUTE INFO: dq_pq_rd_read_actor.cpp:1227: SelfId: [3:7519907728731857947:2048], TxId: query_1, task: 0, Cluster: . PQ source. Distribution is changed, remove sessions 2025-06-25T15:22:37.125819Z node 3 :KQP_COMPUTE INFO: dq_pq_rd_read_actor.cpp:1246: SelfId: [3:7519907728731857947:2048], TxId: query_1, task: 0, Cluster: . PQ source. Create session to [3:7519907728731857944:2057], generation 1 2025-06-25T15:22:37.125846Z node 3 :KQP_COMPUTE INFO: dq_pq_rd_read_actor.cpp:681: SelfId: [3:7519907728731857947:2048], TxId: query_1, task: 0, Cluster: . PQ source. Send TEvStartSession to [3:7519907728731857944:2057], connection id 1 partitions offsets (666 / ), 2025-06-25T15:22:37.126242Z node 3 :KQP_COMPUTE INFO: dq_pq_rd_read_actor.cpp:789: SelfId: [3:7519907728731857947:2048], TxId: query_1, task: 0, Cluster: . PQ source. Received TEvStartSessionAck from [3:7519907728731857944:2057], seqNo 0, ConfirmedSeqNo 0, generation 1 2025-06-25T15:22:37.126417Z node 3 :KQP_COMPUTE TRACE: dq_pq_rd_read_actor.cpp:857: SelfId: [3:7519907728731857947:2048], TxId: query_1, task: 0, Cluster: . PQ source. Received TEvNewDataArrived from [3:7519907728731857944:2057], partition 666, seqNo 0, ConfirmedSeqNo 0 generation 1 2025-06-25T15:22:37.127198Z node 3 :KQP_COMPUTE TRACE: dq_pq_rd_read_actor.cpp:1029: SelfId: [3:7519907728731857947:2048], TxId: query_1, task: 0, Cluster: . PQ source. Received TEvMessageBatch from [3:7519907728731857944:2057], seqNo 0, ConfirmedSeqNo 0 generation 1 2025-06-25T15:22:37.127227Z node 3 :KQP_COMPUTE TRACE: dq_pq_rd_read_actor.cpp:1065: SelfId: [3:7519907728731857947:2048], TxId: query_1, task: 0, Cluster: . PQ source. TEvMessageBatch NextOffset 1 2025-06-25T15:22:37.127233Z node 3 :KQP_COMPUTE TRACE: dq_pq_rd_read_actor.cpp:1065: SelfId: [3:7519907728731857947:2048], TxId: query_1, task: 0, Cluster: . PQ source. TEvMessageBatch NextOffset 2 2025-06-25T15:22:37.127440Z node 3 :KQP_COMPUTE TRACE: dq_pq_rd_read_actor.cpp:729: SelfId: [3:7519907728731857947:2048], TxId: query_1, task: 0, Cluster: . PQ source. GetAsyncInputData freeSpace = 1000 2025-06-25T15:22:37.127557Z node 3 :KQP_COMPUTE TRACE: dq_pq_rd_read_actor.cpp:750: SelfId: [3:7519907728731857947:2048], TxId: query_1, task: 0, Cluster: . PQ source. NextOffset 2 2025-06-25T15:22:37.127576Z node 3 :KQP_COMPUTE TRACE: dq_pq_rd_read_actor.cpp:754: SelfId: [3:7519907728731857947:2048], TxId: query_1, task: 0, Cluster: . PQ source. Return 2 rows, buffer size 0, free space 948, result size 52 2025-06-25T15:22:37.127863Z node 3 :KQP_COMPUTE DEBUG: dq_pq_rd_read_actor.cpp:1003: SelfId: [3:7519907728731857947:2048], TxId: query_1, task: 0, Cluster: . PQ source. Received TEvUndelivered, TSystem::Undelivered from [3:7519907728731857944:2057], reason Disconnected, cookie 999 2025-06-25T15:22:37.127985Z node 3 :KQP_COMPUTE TRACE: dq_pq_rd_read_actor.cpp:875: SelfId: [3:7519907728731857947:2048], TxId: query_1, task: 0, Cluster: . PQ source. Received TEvRetry, EventQueueId 1 2025-06-25T15:22:37.128008Z node 3 :KQP_COMPUTE TRACE: dq_pq_rd_read_actor.cpp:857: SelfId: [3:7519907728731857947:2048], TxId: query_1, task: 0, Cluster: . PQ source. Received TEvNewDataArrived from [3:7519907728731857944:2057], partition 666, seqNo 0, ConfirmedSeqNo 0 generation 1 2025-06-25T15:22:37.128555Z node 3 :KQP_COMPUTE TRACE: dq_pq_rd_read_actor.cpp:1029: SelfId: [3:7519907728731857947:2048], TxId: query_1, task: 0, Cluster: . PQ source. Received TEvMessageBatch from [3:7519907728731857944:2057], seqNo 0, ConfirmedSeqNo 0 generation 1 2025-06-25T15:22:37.128572Z node 3 :KQP_COMPUTE TRACE: dq_pq_rd_read_actor.cpp:1065: SelfId: [3:7519907728731857947:2048], TxId: query_1, task: 0, Cluster: . PQ source. TEvMessageBatch NextOffset 3 2025-06-25T15:22:37.129219Z node 3 :KQP_COMPUTE TRACE: dq_pq_rd_read_actor.cpp:729: SelfId: [3:7519907728731857947:2048], TxId: query_1, task: 0, Cluster: . PQ source. GetAsyncInputData freeSpace = 1000 2025-06-25T15:22:37.129279Z node 3 :KQP_COMPUTE TRACE: dq_pq_rd_read_actor.cpp:750: SelfId: [3:7519907728731857947:2048], TxId: query_1, task: 0, Cluster: . PQ source. NextOffset 3 2025 ... n Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T15:26:21.528940Z :DEBUG: [local] [local] [901468f5-ffb27e2-a577f53e-643e7a50] [] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T15:26:21.528991Z :DEBUG: [local] [local] [901468f5-ffb27e2-a577f53e-643e7a50] [] Reconnecting session to cluster in 0.000000s 2025-06-25T15:26:21.686710Z :DEBUG: [local] [local] [901468f5-ffb27e2-a577f53e-643e7a50] [] Successfully connected. Initializing session 2025-06-25T15:26:21.691137Z :INFO: [local] [local] [901468f5-ffb27e2-a577f53e-643e7a50] [] Got InitResponse. ReadSessionId: test_client_1_22_7232457704736620544_v1 2025-06-25T15:26:21.691204Z :DEBUG: [local] [local] [901468f5-ffb27e2-a577f53e-643e7a50] [] In ContinueReadingDataImpl, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-25T15:26:21.691435Z :DEBUG: [local] [local] [901468f5-ffb27e2-a577f53e-643e7a50] [] After sending read request: ReadSizeBudget = 0, ReadSizeServerDelta = 52428800 [] [] Start federated write session to database '' (previous was ) FederationState: { Status: SUCCESS SelfLocation: "" DbInfos: [ { path: "local" endpoint: "localhost:18437" status: AVAILABLE weight: 100 } ] }2025-06-25T15:26:21.692355Z :DEBUG: [local] TraceId [SelfId: [0:0:0], TxId: query_1, TaskId: 0, PQ sink. ] SessionId [] MessageGroupId [7aeb4342-c3998e14-33d65426-15763502] Write session: try to update token 2025-06-25T15:26:21.692653Z :INFO: [local] TraceId [SelfId: [0:0:0], TxId: query_1, TaskId: 0, PQ sink. ] SessionId [] MessageGroupId [7aeb4342-c3998e14-33d65426-15763502] Start write session. Will connect to nodeId: 0 2025-06-25T15:26:21.706056Z :DEBUG: [local] TraceId [SelfId: [0:0:0], TxId: query_1, TaskId: 0, PQ sink. ] SessionId [] MessageGroupId [7aeb4342-c3998e14-33d65426-15763502] Write session: write to message_group: 7aeb4342-c3998e14-33d65426-15763502 2025-06-25T15:26:21.706213Z :DEBUG: [local] TraceId [SelfId: [0:0:0], TxId: query_1, TaskId: 0, PQ sink. ] SessionId [] MessageGroupId [7aeb4342-c3998e14-33d65426-15763502] Write session: send init request: init_request { path: "Checkpoints" producer_id: "7aeb4342-c3998e14-33d65426-15763502" message_group_id: "7aeb4342-c3998e14-33d65426-15763502" } 2025-06-25T15:26:21.706254Z :TRACE: [local] TRACE_EVENT InitRequest 2025-06-25T15:26:21.706656Z :DEBUG: [local] TraceId [SelfId: [0:0:0], TxId: query_1, TaskId: 0, PQ sink. ] SessionId [] MessageGroupId [7aeb4342-c3998e14-33d65426-15763502] Write session: OnWriteDone gRpcStatusCode: 0 2025-06-25T15:26:21.716457Z :INFO: [local] [local] [901468f5-ffb27e2-a577f53e-643e7a50] [] Confirm partition stream create. Partition stream id: 1. Cluster: "-". Topic: "Checkpoints". Partition: 0. Read offset: (NULL) 2025-06-25T15:26:21.724641Z :DEBUG: [local] [local] [901468f5-ffb27e2-a577f53e-643e7a50] [] Got ReadResponse, serverBytesSize = 936, now ReadSizeBudget = 0, ReadSizeServerDelta = 52427864 2025-06-25T15:26:21.724830Z :DEBUG: [local] [local] [901468f5-ffb27e2-a577f53e-643e7a50] [] In ContinueReadingDataImpl, ReadSizeBudget = 0, ReadSizeServerDelta = 52427864 2025-06-25T15:26:21.727835Z :DEBUG: [local] Decompression task done. Partition/PartitionSessionId: 1 (0-4) 2025-06-25T15:26:21.727935Z :DEBUG: [local] [local] [901468f5-ffb27e2-a577f53e-643e7a50] [] Returning serverBytesSize = 936 to budget 2025-06-25T15:26:21.727995Z :DEBUG: [local] [local] [901468f5-ffb27e2-a577f53e-643e7a50] [] In ContinueReadingDataImpl, ReadSizeBudget = 936, ReadSizeServerDelta = 52427864 2025-06-25T15:26:21.728374Z :DEBUG: [local] [local] [901468f5-ffb27e2-a577f53e-643e7a50] [] After sending read request: ReadSizeBudget = 0, ReadSizeServerDelta = 52428800 2025-06-25T15:26:21.728486Z :DEBUG: [local] Take Data. Partition 0. Read: {0, 0} (0-0) 2025-06-25T15:26:21.728545Z :DEBUG: [local] Take Data. Partition 0. Read: {1, 0} (1-1) 2025-06-25T15:26:21.728596Z :DEBUG: [local] Take Data. Partition 0. Read: {1, 1} (2-2) 2025-06-25T15:26:21.728639Z :DEBUG: [local] Take Data. Partition 0. Read: {2, 0} (3-3) 2025-06-25T15:26:21.728722Z :DEBUG: [local] Take Data. Partition 0. Read: {3, 0} (4-4) 2025-06-25T15:26:21.736470Z :DEBUG: [local] [local] [901468f5-ffb27e2-a577f53e-643e7a50] [] The application data is transferred to the client. Number of messages 5, size 5 bytes 2025-06-25T15:26:21.736558Z :DEBUG: [local] [local] [901468f5-ffb27e2-a577f53e-643e7a50] [] Returning serverBytesSize = 0 to budget 2025-06-25T15:26:21.736776Z :INFO: [local] [local] [901468f5-ffb27e2-a577f53e-643e7a50] Closing read session. Close timeout: 0.000000s 2025-06-25T15:26:21.736843Z :INFO: [local] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): -:Checkpoints:0:1:4:0 2025-06-25T15:26:21.736932Z :INFO: [local] [local] [901468f5-ffb27e2-a577f53e-643e7a50] Counters: { Errors: 0 CurrentSessionLifetimeMs: 225 BytesRead: 5 MessagesRead: 5 BytesReadCompressed: 5 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-25T15:26:21.737113Z :NOTICE: [local] [local] [901468f5-ffb27e2-a577f53e-643e7a50] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Close with zero timeout " } 2025-06-25T15:26:21.737188Z :DEBUG: [local] [local] [901468f5-ffb27e2-a577f53e-643e7a50] [] Abort session to cluster 2025-06-25T15:26:21.737818Z :INFO: [local] [local] [901468f5-ffb27e2-a577f53e-643e7a50] Closing read session. Close timeout: 0.000000s 2025-06-25T15:26:21.737889Z :INFO: [local] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): -:Checkpoints:0:1:4:0 2025-06-25T15:26:21.737950Z :INFO: [local] [local] [901468f5-ffb27e2-a577f53e-643e7a50] Counters: { Errors: 0 CurrentSessionLifetimeMs: 226 BytesRead: 5 MessagesRead: 5 BytesReadCompressed: 5 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-25T15:26:21.738083Z :NOTICE: [local] [local] [901468f5-ffb27e2-a577f53e-643e7a50] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-06-25T15:26:21.750217Z :DEBUG: [local] TraceId [SelfId: [0:0:0], TxId: query_1, TaskId: 0, PQ sink. ] SessionId [] MessageGroupId [7aeb4342-c3998e14-33d65426-15763502] Write session: OnReadDone gRpcStatusCode: 0 2025-06-25T15:26:21.750312Z :INFO: [local] TraceId [SelfId: [0:0:0], TxId: query_1, TaskId: 0, PQ sink. ] SessionId [] MessageGroupId [7aeb4342-c3998e14-33d65426-15763502] Counters: { Errors: 0 CurrentSessionLifetimeMs: 1750865181750 BytesWritten: 0 MessagesWritten: 0 BytesWrittenCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-25T15:26:21.750428Z :INFO: [local] TraceId [SelfId: [0:0:0], TxId: query_1, TaskId: 0, PQ sink. ] SessionId [] MessageGroupId [7aeb4342-c3998e14-33d65426-15763502] Write session established. Init response: last_seq_no: 5 session_id: "7aeb4342-c3998e14-33d65426-15763502|ec55243-cccc4f49-964ffa54-11948db_0" 2025-06-25T15:26:21.750483Z :TRACE: [local] TRACE_EVENT InitResponse partition_id=0 session_id=7aeb4342-c3998e14-33d65426-15763502|ec55243-cccc4f49-964ffa54-11948db_0 2025-06-25T15:26:21.750524Z :DEBUG: [local] TraceId [SelfId: [0:0:0], TxId: query_1, TaskId: 0, PQ sink. ] SessionId [7aeb4342-c3998e14-33d65426-15763502|ec55243-cccc4f49-964ffa54-11948db_0] MessageGroupId [7aeb4342-c3998e14-33d65426-15763502] Write session: set DirectWriteToPartitionId 0 2025-06-25T15:26:21.750719Z :INFO: [local] TraceId [SelfId: [0:0:0], TxId: query_1, TaskId: 0, PQ sink. ] SessionId [7aeb4342-c3998e14-33d65426-15763502|ec55243-cccc4f49-964ffa54-11948db_0] PartitionId [0] Generation [0] Get partition location async, partition 0, delay 0.000000s 2025-06-25T15:26:21.750777Z :TRACE: [local] TRACE_EVENT DescribePartitionRequest path=local/Checkpoints partition_id=0 2025-06-25T15:26:21.752363Z :DEBUG: [local] TraceId [SelfId: [0:0:0], TxId: query_1, TaskId: 0, PQ sink. ] SessionId [7aeb4342-c3998e14-33d65426-15763502|ec55243-cccc4f49-964ffa54-11948db_0] PartitionId [0] Generation [0] Getting partition location, partition 0 2025-06-25T15:26:21.767066Z :INFO: [local] TraceId [SelfId: [0:0:0], TxId: query_1, TaskId: 0, PQ sink. ] SessionId [7aeb4342-c3998e14-33d65426-15763502|ec55243-cccc4f49-964ffa54-11948db_0] PartitionId [0] Generation [0] Got PartitionLocation response. Status SUCCESS, proto: partition { active: true partition_location { node_id: 1 generation: 1 } } 2025-06-25T15:26:21.767134Z :TRACE: [local] TRACE_EVENT DescribePartitionResponse partition_id=0 active=1 pl_node_id=1 pl_generation=1 2025-06-25T15:26:21.767193Z :DEBUG: [local] TraceId [SelfId: [0:0:0], TxId: query_1, TaskId: 0, PQ sink. ] SessionId [7aeb4342-c3998e14-33d65426-15763502|ec55243-cccc4f49-964ffa54-11948db_0] PartitionId [0] Generation [0] GetPreferredEndpoint: partitionId 0, partitionNodeId 1 exists in the endpoint pool. 2025-06-25T15:26:21.767235Z :TRACE: [local] TRACE_EVENT PreferredPartitionLocation Endpoint= NodeId=1 Generation=1 2025-06-25T15:26:21.767274Z :INFO: [local] TraceId [SelfId: [0:0:0], TxId: query_1, TaskId: 0, PQ sink. ] SessionId [7aeb4342-c3998e14-33d65426-15763502|ec55243-cccc4f49-964ffa54-11948db_0] PartitionId [0] Generation [1] Start write session. Will connect to nodeId: 1 2025-06-25T15:26:21.770959Z :DEBUG: [local] TraceId [SelfId: [0:0:0], TxId: query_1, TaskId: 0, PQ sink. ] SessionId [7aeb4342-c3998e14-33d65426-15763502|ec55243-cccc4f49-964ffa54-11948db_0] PartitionId [0] Generation [1] Write session: OnReadDone gRpcStatusCode: 1, Msg: CANCELLED, Details: , InternalError: 0 2025-06-25T15:26:21.773416Z :INFO: [local] TraceId [SelfId: [0:0:0], TxId: query_1, TaskId: 0, PQ sink. ] SessionId [7aeb4342-c3998e14-33d65426-15763502|ec55243-cccc4f49-964ffa54-11948db_0] PartitionId [0] Generation [1] Write session: close. Timeout 0.000000s 2025-06-25T15:26:21.773460Z :INFO: [local] TraceId [SelfId: [0:0:0], TxId: query_1, TaskId: 0, PQ sink. ] SessionId [7aeb4342-c3998e14-33d65426-15763502|ec55243-cccc4f49-964ffa54-11948db_0] PartitionId [0] Generation [1] Write session will now close 2025-06-25T15:26:21.773536Z :DEBUG: [local] TraceId [SelfId: [0:0:0], TxId: query_1, TaskId: 0, PQ sink. ] SessionId [7aeb4342-c3998e14-33d65426-15763502|ec55243-cccc4f49-964ffa54-11948db_0] PartitionId [0] Generation [1] Write session: aborting 2025-06-25T15:26:21.773662Z :INFO: [local] TraceId [SelfId: [0:0:0], TxId: query_1, TaskId: 0, PQ sink. ] SessionId [7aeb4342-c3998e14-33d65426-15763502|ec55243-cccc4f49-964ffa54-11948db_0] PartitionId [0] Generation [1] Write session: gracefully shut down, all writes complete 2025-06-25T15:26:21.773696Z :DEBUG: [local] TraceId [SelfId: [0:0:0], TxId: query_1, TaskId: 0, PQ sink. ] SessionId [7aeb4342-c3998e14-33d65426-15763502|ec55243-cccc4f49-964ffa54-11948db_0] PartitionId [0] Generation [1] Write session: destroy 2025-06-25T15:26:22.845477Z node 54 :KQP_COMPUTE TRACE: dq_pq_write_actor.cpp:179: SelfId: [0:0:0], TxId: query_1, TaskId: 0, PQ sink. SendData. Batch: 0. Checkpoint: 1. Finished: 0 2025-06-25T15:26:22.875955Z node 54 :KQP_COMPUTE DEBUG: dq_pq_write_actor.cpp:224: SelfId: [0:0:0], TxId: query_1, TaskId: 0, PQ sink. [Checkpoint 0.0] Send checkpoint state immediately 2025-06-25T15:26:22.876158Z node 54 :KQP_COMPUTE TRACE: dq_pq_write_actor.cpp:396: SelfId: [0:0:0], TxId: query_1, TaskId: 0, PQ sink. Save checkpoint { Id: 0 Generation: 0 } state: { SourceId: "9b772a2-f258aa25-4c9a3f7c-86bee448" } 2025-06-25T15:26:22.950000Z :INFO: [local] OnFederationDiscovery fall back to single mode, database=local E0625 15:26:23.048905962 1118026 dns_resolver_ares.cc:452] no server name supplied in dns URI E0625 15:26:23.049068605 1118026 channel.cc:120] channel stack builder failed: UNKNOWN: the target uri is not valid: dns:/// |98.6%| [TM] {RESULT} ydb/tests/fq/pq_async_io/ut/unittest |98.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest |98.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest >> TTxDataShardReshuffleKMeansScan::MainToBuild [GOOD] >> TTxDataShardReshuffleKMeansScan::BuildToPosting >> test_public_api.py::TestExplain::test_explain_data_query >> test_inserts.py::TestYdbInsertsOperations::test_insert_multiple_empty_rows [GOOD] |98.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest >> test_insert.py::TestInsertOperations::test_several_inserts_per_transaction_are_success [GOOD] >> test_insert.py::TestInsertOperations::test_insert_plus_update_per_transaction_are_success >> test_insert.py::TestInsertOperations::test_insert_plus_update_per_transaction_are_success [GOOD] >> test_insert.py::TestInsertOperations::test_update_plus_insert_per_transaction_are_success_prepared_case >> test_generate_dynamic_config.py::TestGenerateDynamicConfigFromConfigDir::test_generate_dynamic_config_from_config_store [GOOD] >> KafkaProtocol::InitProducerId_withoutTransactionalIdShouldReturnRandomInt [GOOD] >> KafkaProtocol::InitProducerId_forNewTransactionalIdShouldReturnIncrementingInt |98.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest >> test_insert.py::TestInsertOperations::test_update_plus_insert_per_transaction_are_success_prepared_case [GOOD] >> test_insert.py::TestInsertOperations::test_upsert_plus_insert_per_transaction_are_success_prepared_case >> test_restarts.py::TestRestartSingleMirror3DC::test_restart_single_node_is_ok [GOOD] >> test_insert.py::TestInsertOperations::test_upsert_plus_insert_per_transaction_are_success_prepared_case [GOOD] >> test_insert.py::TestInsertOperations::test_insert_plus_upsert_are_success |98.6%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest >> test_insert.py::TestInsertOperations::test_insert_plus_upsert_are_success [GOOD] >> test_insert.py::TestInsertOperations::test_insert_revert_basis >> test_insert.py::TestInsertOperations::test_insert_revert_basis [GOOD] >> test_insert.py::TestInsertOperations::test_query_pairs >> test_public_api.py::TestExplain::test_explain_data_query [GOOD] >> TTxDataShardReshuffleKMeansScan::BuildToPosting [GOOD] >> TTxDataShardReshuffleKMeansScan::BuildToBuild >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_4__SYNC-pk_types5-all_types5-index5---SYNC] [GOOD] |98.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/cms/py3test |98.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/config/py3test >> test_generate_dynamic_config.py::TestGenerateDynamicConfigFromConfigDir::test_generate_dynamic_config_from_config_store [GOOD] |98.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/restarts/py3test >> test_restarts.py::TestRestartSingleMirror3DC::test_restart_single_node_is_ok [GOOD] |98.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest |98.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/cms/py3test >> test_configuration_version.py::TestConfigurationVersion::test_configuration_version >> TTxDataShardReshuffleKMeansScan::BuildToBuild [GOOD] >> TTxDataShardSampleKScan::BadRequest |98.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest |98.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest |98.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest >> CompositeConveyorTests::TestUniformDistribution |98.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest >> CompositeConveyorTests::Test10xDistribution [GOOD] >> KafkaProtocol::InitProducerId_forNewTransactionalIdShouldReturnIncrementingInt [GOOD] >> KafkaProtocol::InitProducerId_forSqlInjectionShouldReturnWithoutDropingDatabase |98.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest |98.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/sql/py3test >> test_inserts.py::TestYdbInsertsOperations::test_insert_multiple_empty_rows [GOOD] |98.7%| [TM] {RESULT} ydb/tests/sql/py3test >> TTxDataShardSampleKScan::BadRequest [GOOD] >> TTxDataShardSampleKScan::RunScan ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest >> CompositeConveyorTests::Test10xDistribution [GOOD] Test command err: {I:139711};{S:105999};{N:151997}; {I:151614};{S:122928};{N:170890}; {I:151614};{S:144307};{N:201880}; {I:151614};{S:168288};{N:240527}; {I:151614};{S:220797};{N:256059}; {I:181390};{S:256060};{N:256059}; {I:237143};{S:256060};{N:256059}; {I:256060};{S:256060};{N:256059}; {I:256060};{S:256060};{N:256059}; {I:256060};{S:256060};{N:271454}; {I:256060};{S:256060};{N:294312}; {I:256060};{S:256060};{N:316063}; {I:256060};{S:256060};{N:339043}; {I:256060};{S:256060};{N:361836}; {I:256060};{S:256060};{N:383048}; {I:256060};{S:256060};{N:407339}; {I:256060};{S:256060};{N:438521}; {I:256060};{S:256060};{N:481688}; {I:256060};{S:256060};{N:527783}; {I:256060};{S:266759};{N:565813}; {I:256060};{S:290892};{N:588773}; {I:256060};{S:315376};{N:628691}; {I:263411};{S:339983};{N:676278}; {I:286359};{S:383784};{N:701357}; {I:309096};{S:429596};{N:725026}; {I:331503};{S:471517};{N:745105}; {I:351650};{S:497465};{N:772526}; {I:372018};{S:519353};{N:818494}; {I:396177};{S:543247};{N:864691}; {I:419290};{S:581750};{N:909148}; {I:443470};{S:628058};{N:955455}; {I:462594};{S:680269};{N:983388}; {I:483677};{S:727887};{N:1000000}; {I:525761};{S:770735};{N:1000000}; {I:569669};{S:808237};{N:1000000}; {I:602930};{S:845441};{N:1000000}; {I:639999};{S:884919};{N:1000000}; {I:669039};{S:920608};{N:1000000}; {I:705741};{S:962718};{N:1000000}; {I:747321};{S:999395};{N:1000000}; {I:789844};{S:1000000};{N:1000000}; {I:829996};{S:1000000};{N:1000000}; {I:849763};{S:1000000};{N:1000000}; {I:867798};{S:1000000};{N:1000000}; {I:891519};{S:1000000};{N:1000000}; {I:912667};{S:1000000};{N:1000000}; {I:932812};{S:1000000};{N:1000000}; {I:953704};{S:1000000};{N:1000000}; {I:975578};{S:1000000};{N:1000000}; {I:997684};{S:1000000};{N:1000000}; {I:1000000};{S:1000000};{N:1000000}; 158us per task 50.075673s;40.067138s;32.056194s; |98.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest |98.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest |98.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest |98.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/secondary_index/py3test >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_4__SYNC-pk_types5-all_types5-index5---SYNC] [GOOD] |98.7%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest >> test_insert_restarts.py::TestS3::test_atomic_upload_commit[v2-client0] [GOOD] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_3_UNIQUE_SYNC-pk_types1-all_types1-index1--UNIQUE-SYNC] [GOOD] >> test_drain.py::TestHive::test_drain_tablets [GOOD] >> test_session_pool.py::TestSessionPool::test_session_pool_simple_acquire >> test_insert.py::TestInsertOperations::test_query_pairs [GOOD] >> TTxDataShardSampleKScan::RunScan [GOOD] |98.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test |98.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/datashard/build_index/ut/unittest >> TTxDataShardSampleKScan::RunScan [GOOD] Test command err: 2025-06-25T15:23:06.541256Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:286:2328], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-25T15:23:06.541493Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-25T15:23:06.541572Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000965/r3tmp/tmpD7gXSp/pdisk_1.dat 2025-06-25T15:23:07.165144Z node 1 :BS_CONTROLLER WARN: {BSC17@register_node.cpp:674} SendToWarden dropped event NodeId# 1 Type# 268639257 2025-06-25T15:23:07.183032Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) 2025-06-25T15:23:07.256820Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:23:07.280867Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:33:2080] 1750864983293305 != 1750864983293309 2025-06-25T15:23:07.340806Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:23:07.340988Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:23:07.357231Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-25T15:23:07.503750Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:23:07.613662Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:627:2531] 2025-06-25T15:23:07.613980Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-25T15:23:07.666770Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-25T15:23:07.666920Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-25T15:23:07.669797Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-25T15:23:07.669913Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-25T15:23:07.669981Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-25T15:23:07.674750Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-25T15:23:07.674963Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-25T15:23:07.675088Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:643:2531] in generation 1 2025-06-25T15:23:07.686464Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-25T15:23:07.716817Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-25T15:23:07.720944Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-25T15:23:07.721242Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:645:2541] 2025-06-25T15:23:07.721322Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T15:23:07.721368Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-25T15:23:07.721412Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T15:23:07.729054Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-25T15:23:07.729251Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-25T15:23:07.729333Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T15:23:07.729376Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:23:07.729501Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-25T15:23:07.729553Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T15:23:07.736737Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:623:2528], serverId# [1:634:2535], sessionId# [0:0:0] 2025-06-25T15:23:07.737017Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T15:23:07.737437Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-25T15:23:07.737551Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-25T15:23:07.739626Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T15:23:07.753698Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-25T15:23:07.754874Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-25T15:23:07.926813Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:660:2550], serverId# [1:662:2552], sessionId# [0:0:0] 2025-06-25T15:23:07.931886Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1000 at tablet 72075186224037888 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1000 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-06-25T15:23:07.931988Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T15:23:07.937304Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T15:23:07.937379Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-06-25T15:23:07.937460Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-06-25T15:23:07.937763Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-06-25T15:23:07.937958Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-25T15:23:07.938504Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T15:23:07.938588Z node 1 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-06-25T15:23:07.948686Z node 1 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-25T15:23:07.953116Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:23:07.960934Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-06-25T15:23:07.961020Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T15:23:07.961659Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-06-25T15:23:07.961752Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T15:23:07.962557Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T15:23:07.962601Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-25T15:23:07.962666Z node 1 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-25T15:23:07.962732Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [1:373:2367], exec latency: 0 ms, propose latency: 0 ms 2025-06-25T15:23:07.962781Z node 1 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-25T15:23:07.962931Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-25T15:23:07.982446Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-25T15:23:07.984933Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-25T15:23:07.985013Z node 1 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-25T15:23:07.986069Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-25T15:23:08.077145Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:700:2581], serverId# [1:701:2582], sessionId# [0:0:0] 2025-06-25T15:23:08.079406Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-25T15:23:08.079629Z node 1 :TX_DATASHARD DEBUG: check_snapshot_tx_unit.cpp:153: Prepared Snapshot transaction txId 281474976715658 at tabl ... .212938Z node 24 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-25T15:26:58.212961Z node 24 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T15:26:58.244304Z node 24 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [24:7519908851549708273:2436], serverId# [24:7519908851549708275:2438], sessionId# [0:0:0] 2025-06-25T15:26:58.244528Z node 24 :BUILD_INDEX NOTICE: sample_k.cpp:263: Starting TSampleKScan TabletId: 72075186224037888 Id: 1 TabletId: 72075186224037888 PathId { OwnerId: 72057594046644480 LocalId: 2 } K: 9 Seed: 0 MaxProbability: 18446744073709551615 Columns: "value" Columns: "key" SnapshotTxId: 281474976710661 SnapshotStep: 1750865218186 SeqNoGeneration: 12 SeqNoRound: 1 row version v1750865218186/281474976710661 2025-06-25T15:26:58.244590Z node 24 :BUILD_INDEX INFO: sample_k.cpp:89: Create TSampleKScan TabletId: 72075186224037888 Id: 1 K: 9 Sample: 0 2025-06-25T15:26:58.244751Z node 24 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [24:7519908851549708274:2437], serverId# [24:7519908851549708276:2439], sessionId# [0:0:0] 2025-06-25T15:26:58.244851Z node 24 :BUILD_INDEX NOTICE: sample_k.cpp:263: Starting TSampleKScan TabletId: 72075186224037888 Id: 1 TabletId: 72075186224037888 PathId { OwnerId: 72057594046644480 LocalId: 2 } K: 9 Seed: 0 MaxProbability: 18446744073709551615 Columns: "value" Columns: "key" SnapshotTxId: 281474976710661 SnapshotStep: 1750865218186 SeqNoGeneration: 12 SeqNoRound: 1 row version v1750865218186/281474976710661 2025-06-25T15:26:58.244888Z node 24 :BUILD_INDEX INFO: sample_k.cpp:89: Create TSampleKScan TabletId: 72075186224037888 Id: 1 K: 9 Sample: 0 2025-06-25T15:26:58.245017Z node 24 :BUILD_INDEX INFO: sample_k.cpp:108: Prepare TSampleKScan TabletId: 72075186224037888 Id: 1 K: 9 Sample: 0 2025-06-25T15:26:58.245057Z node 24 :BUILD_INDEX TRACE: sample_k.cpp:116: Seek 0 TSampleKScan TabletId: 72075186224037888 Id: 1 K: 9 Sample: 0 2025-06-25T15:26:58.245197Z node 24 :BUILD_INDEX TRACE: sample_k.cpp:142: Exhausted TSampleKScan TabletId: 72075186224037888 Id: 1 K: 9 Sample: 5 2025-06-25T15:26:58.245321Z node 24 :BUILD_INDEX NOTICE: sample_k.cpp:171: Done TSampleKScan TabletId: 72075186224037888 Id: 1 K: 9 Sample: 0 Id: 1 TabletId: 72075186224037888 Status: DONE ReadRows: 5 ReadBytes: 60 RequestSeqNoGeneration: 12 RequestSeqNoRound: 1 Probabilities: 2033423113665717216 Probabilities: 6650325416439211286 Probabilities: 7100737942502591086 Probabilities: 7451216775397157654 Probabilities: 16483534760593850368 Rows: 5 2025-06-25T15:26:58.245608Z node 24 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T15:26:58.245639Z node 24 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:26:58.245657Z node 24 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-25T15:26:58.245683Z node 24 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T15:26:58.273448Z node 24 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [24:7519908851549708283:2444], serverId# [24:7519908851549708285:2446], sessionId# [0:0:0] 2025-06-25T15:26:58.273622Z node 24 :BUILD_INDEX NOTICE: sample_k.cpp:263: Starting TSampleKScan TabletId: 72075186224037888 Id: 1 TabletId: 72075186224037888 PathId { OwnerId: 72057594046644480 LocalId: 2 } K: 1 Seed: 111 MaxProbability: 18446744073709551615 Columns: "value" Columns: "key" SeqNoGeneration: 13 SeqNoRound: 1 row version v1750865218193/18446744073709551615 2025-06-25T15:26:58.273680Z node 24 :BUILD_INDEX INFO: sample_k.cpp:89: Create TSampleKScan TabletId: 72075186224037888 Id: 1 K: 1 Sample: 0 2025-06-25T15:26:58.273835Z node 24 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [24:7519908851549708284:2445], serverId# [24:7519908851549708286:2447], sessionId# [0:0:0] 2025-06-25T15:26:58.273928Z node 24 :BUILD_INDEX NOTICE: sample_k.cpp:263: Starting TSampleKScan TabletId: 72075186224037888 Id: 1 TabletId: 72075186224037888 PathId { OwnerId: 72057594046644480 LocalId: 2 } K: 1 Seed: 111 MaxProbability: 18446744073709551615 Columns: "value" Columns: "key" SeqNoGeneration: 13 SeqNoRound: 1 row version v1750865218193/18446744073709551615 2025-06-25T15:26:58.273963Z node 24 :BUILD_INDEX INFO: sample_k.cpp:89: Create TSampleKScan TabletId: 72075186224037888 Id: 1 K: 1 Sample: 0 2025-06-25T15:26:58.274095Z node 24 :BUILD_INDEX INFO: sample_k.cpp:108: Prepare TSampleKScan TabletId: 72075186224037888 Id: 1 K: 1 Sample: 0 2025-06-25T15:26:58.274146Z node 24 :BUILD_INDEX TRACE: sample_k.cpp:116: Seek 0 TSampleKScan TabletId: 72075186224037888 Id: 1 K: 1 Sample: 0 2025-06-25T15:26:58.274268Z node 24 :BUILD_INDEX TRACE: sample_k.cpp:142: Exhausted TSampleKScan TabletId: 72075186224037888 Id: 1 K: 1 Sample: 1 2025-06-25T15:26:58.274373Z node 24 :BUILD_INDEX NOTICE: sample_k.cpp:171: Done TSampleKScan TabletId: 72075186224037888 Id: 1 K: 1 Sample: 0 Id: 1 TabletId: 72075186224037888 Status: DONE ReadRows: 5 ReadBytes: 60 RequestSeqNoGeneration: 13 RequestSeqNoRound: 1 Probabilities: 1182976742465768660 Rows: 1 2025-06-25T15:26:58.274508Z node 24 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T15:26:58.274536Z node 24 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:26:58.274555Z node 24 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-25T15:26:58.274578Z node 24 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T15:26:58.305588Z node 24 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [24:7519908851549708293:2452], serverId# [24:7519908851549708295:2454], sessionId# [0:0:0] 2025-06-25T15:26:58.305640Z node 24 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [24:7519908851549708294:2453], serverId# [24:7519908851549708296:2455], sessionId# [0:0:0] 2025-06-25T15:26:58.305814Z node 24 :BUILD_INDEX NOTICE: sample_k.cpp:263: Starting TSampleKScan TabletId: 72075186224037888 Id: 1 TabletId: 72075186224037888 PathId { OwnerId: 72057594046644480 LocalId: 2 } K: 3 Seed: 111 MaxProbability: 18446744073709551615 Columns: "value" Columns: "key" SeqNoGeneration: 14 SeqNoRound: 1 row version v1750865218193/18446744073709551615 2025-06-25T15:26:58.305878Z node 24 :BUILD_INDEX INFO: sample_k.cpp:89: Create TSampleKScan TabletId: 72075186224037888 Id: 1 K: 3 Sample: 0 2025-06-25T15:26:58.306105Z node 24 :BUILD_INDEX NOTICE: sample_k.cpp:263: Starting TSampleKScan TabletId: 72075186224037888 Id: 1 TabletId: 72075186224037888 PathId { OwnerId: 72057594046644480 LocalId: 2 } K: 3 Seed: 111 MaxProbability: 18446744073709551615 Columns: "value" Columns: "key" SeqNoGeneration: 14 SeqNoRound: 1 row version v1750865218193/18446744073709551615 2025-06-25T15:26:58.306141Z node 24 :BUILD_INDEX INFO: sample_k.cpp:89: Create TSampleKScan TabletId: 72075186224037888 Id: 1 K: 3 Sample: 0 2025-06-25T15:26:58.306284Z node 24 :BUILD_INDEX INFO: sample_k.cpp:108: Prepare TSampleKScan TabletId: 72075186224037888 Id: 1 K: 3 Sample: 0 2025-06-25T15:26:58.306325Z node 24 :BUILD_INDEX TRACE: sample_k.cpp:116: Seek 0 TSampleKScan TabletId: 72075186224037888 Id: 1 K: 3 Sample: 0 2025-06-25T15:26:58.306462Z node 24 :BUILD_INDEX TRACE: sample_k.cpp:142: Exhausted TSampleKScan TabletId: 72075186224037888 Id: 1 K: 3 Sample: 3 2025-06-25T15:26:58.306576Z node 24 :BUILD_INDEX NOTICE: sample_k.cpp:171: Done TSampleKScan TabletId: 72075186224037888 Id: 1 K: 3 Sample: 0 Id: 1 TabletId: 72075186224037888 Status: DONE ReadRows: 5 ReadBytes: 60 RequestSeqNoGeneration: 14 RequestSeqNoRound: 1 Probabilities: 1182976742465768660 Probabilities: 5318693189955351404 Probabilities: 10350790534088709650 Rows: 3 2025-06-25T15:26:58.307483Z node 24 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T15:26:58.307513Z node 24 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:26:58.307531Z node 24 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-25T15:26:58.307558Z node 24 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-25T15:26:58.341311Z node 24 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [24:7519908851549708303:2460], serverId# [24:7519908851549708305:2462], sessionId# [0:0:0] 2025-06-25T15:26:58.341350Z node 24 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [24:7519908851549708304:2461], serverId# [24:7519908851549708306:2463], sessionId# [0:0:0] 2025-06-25T15:26:58.341528Z node 24 :BUILD_INDEX NOTICE: sample_k.cpp:263: Starting TSampleKScan TabletId: 72075186224037888 Id: 1 TabletId: 72075186224037888 PathId { OwnerId: 72057594046644480 LocalId: 2 } K: 9 Seed: 111 MaxProbability: 18446744073709551615 Columns: "value" Columns: "key" SeqNoGeneration: 15 SeqNoRound: 1 row version v1750865218193/18446744073709551615 2025-06-25T15:26:58.341589Z node 24 :BUILD_INDEX INFO: sample_k.cpp:89: Create TSampleKScan TabletId: 72075186224037888 Id: 1 K: 9 Sample: 0 2025-06-25T15:26:58.341849Z node 24 :BUILD_INDEX NOTICE: sample_k.cpp:263: Starting TSampleKScan TabletId: 72075186224037888 Id: 1 TabletId: 72075186224037888 PathId { OwnerId: 72057594046644480 LocalId: 2 } K: 9 Seed: 111 MaxProbability: 18446744073709551615 Columns: "value" Columns: "key" SeqNoGeneration: 15 SeqNoRound: 1 row version v1750865218193/18446744073709551615 2025-06-25T15:26:58.341894Z node 24 :BUILD_INDEX INFO: sample_k.cpp:89: Create TSampleKScan TabletId: 72075186224037888 Id: 1 K: 9 Sample: 0 2025-06-25T15:26:58.342018Z node 24 :BUILD_INDEX INFO: sample_k.cpp:108: Prepare TSampleKScan TabletId: 72075186224037888 Id: 1 K: 9 Sample: 0 2025-06-25T15:26:58.342069Z node 24 :BUILD_INDEX TRACE: sample_k.cpp:116: Seek 0 TSampleKScan TabletId: 72075186224037888 Id: 1 K: 9 Sample: 0 2025-06-25T15:26:58.342221Z node 24 :BUILD_INDEX TRACE: sample_k.cpp:142: Exhausted TSampleKScan TabletId: 72075186224037888 Id: 1 K: 9 Sample: 5 2025-06-25T15:26:58.342354Z node 24 :BUILD_INDEX NOTICE: sample_k.cpp:171: Done TSampleKScan TabletId: 72075186224037888 Id: 1 K: 9 Sample: 0 Id: 1 TabletId: 72075186224037888 Status: DONE ReadRows: 5 ReadBytes: 60 RequestSeqNoGeneration: 15 RequestSeqNoRound: 1 Probabilities: 1182976742465768660 Probabilities: 5318693189955351404 Probabilities: 10350790534088709650 Probabilities: 15054120856910380438 Probabilities: 17710393315211645100 Rows: 5 2025-06-25T15:26:58.342506Z node 24 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-25T15:26:58.342537Z node 24 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-25T15:26:58.342557Z node 24 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-25T15:26:58.342586Z node 24 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 |98.7%| [TM] {RESULT} ydb/core/tx/datashard/build_index/ut/unittest |98.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test |98.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test >> test_session_pool.py::TestSessionPool::test_session_pool_simple_acquire [GOOD] >> test_session_pool.py::TestSessionPool::test_session_pool_no_race_after_future_cancel_case_1 [GOOD] >> test_session_pool.py::TestSessionPool::test_session_pool_no_race_after_future_cancel_case_2 [GOOD] |98.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/secondary_index/py3test >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_3_UNIQUE_SYNC-pk_types1-all_types1-index1--UNIQUE-SYNC] [GOOD] >> test_session_pool.py::TestSessionPool::test_session_pool_keep_alive [GOOD] >> test_session_pool.py::TestSessionPool::test_session_pool_no_race_after_future_cancel_case_3 [GOOD] >> test_session_pool.py::TestSessionPool::test_session_pool_no_race_after_future_cancel_case_4 [GOOD] >> test_session_pool.py::TestSessionPool::test_session_pool_release_logic [GOOD] >> test_session_pool.py::TestSessionPool::test_session_pool_close_basic_logic_case_1 >> test_session_pool.py::TestSessionPool::test_session_pool_close_basic_logic_case_1 [GOOD] >> test_session_pool.py::TestSessionPool::test_no_cluster_endpoints_no_failure >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_4_UNIQUE_SYNC-pk_types0-all_types0-index0--UNIQUE-SYNC] [GOOD] >> test_public_api.py::TestCRUDOperations::test_prepared_query_pipeline |98.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/hive/py3test >> test_drain.py::TestHive::test_drain_tablets [GOOD] |98.8%| [TA] $(B)/ydb/tests/functional/hive/test-results/py3test/{meta.json ... results_accumulator.log} |98.8%| [TA] {RESULT} $(B)/ydb/tests/functional/hive/test-results/py3test/{meta.json ... results_accumulator.log} |98.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/fq/restarts/py3test >> test_insert_restarts.py::TestS3::test_atomic_upload_commit[v2-client0] [GOOD] |98.8%| [TM] {RESULT} ydb/tests/fq/restarts/py3test >> KafkaProtocol::InitProducerId_forSqlInjectionShouldReturnWithoutDropingDatabase [GOOD] >> KafkaProtocol::InitProducerId_forPreviouslySeenTransactionalIdShouldReturnSameProducerIdAndIncrementEpoch |98.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/config/py3test >> test_discovery.py::TestDiscoveryFaultInjectionSlotStop::test_scenario |98.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test >> test_isolation.py::TestTransactionIsolation::test_prevents_write_cycles_g0 |98.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test |98.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test >> test_public_api.py::TestCRUDOperations::test_prepared_query_pipeline [GOOD] >> test_public_api.py::TestCRUDOperations::test_scheme_client_ops >> test_public_api.py::TestCRUDOperations::test_scheme_client_ops [GOOD] >> test_public_api.py::TestCRUDOperations::test_scheme_operation_errors_handle [GOOD] >> test_public_api.py::TestCRUDOperations::test_none_values >> test_public_api.py::TestCRUDOperations::test_none_values [GOOD] >> test_public_api.py::TestCRUDOperations::test_parse_list_type [GOOD] >> test_public_api.py::TestCRUDOperations::test_parse_tuple [GOOD] >> test_public_api.py::TestCRUDOperations::test_dict_type >> test_public_api.py::TestCRUDOperations::test_dict_type [GOOD] >> test_public_api.py::TestCRUDOperations::test_struct_type >> test_public_api.py::TestCRUDOperations::test_struct_type [GOOD] >> test_public_api.py::TestCRUDOperations::test_data_types >> test_public_api.py::TestCRUDOperations::test_data_types [GOOD] >> test_public_api.py::TestCRUDOperations::test_struct_type_parameter >> test_public_api.py::TestCRUDOperations::test_struct_type_parameter [GOOD] >> test_public_api.py::TestCRUDOperations::test_bulk_prepared_insert_many_values |98.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/secondary_index/py3test >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_4_UNIQUE_SYNC-pk_types0-all_types0-index0--UNIQUE-SYNC] [GOOD] >> test_isolation.py::TestTransactionIsolation::test_prevents_write_cycles_g0 [GOOD] >> test_isolation.py::TestTransactionIsolation::test_prevents_aborted_reads_g1a >> test_public_api.py::TestCRUDOperations::test_bulk_prepared_insert_many_values [GOOD] >> test_public_api.py::TestCRUDOperations::test_bulk_upsert >> test_workload_simple_queue.py::TestWorkloadSimpleQueue::test_workload_simple_queue[column] [GOOD] >> test_replication.py::TestReplicationAfterNodesRestart::test_replication[mirror-3-dc] >> test_isolation.py::TestTransactionIsolation::test_prevents_aborted_reads_g1a [GOOD] >> test_isolation.py::TestTransactionIsolation::test_prevents_intermediate_reads_g1b >> test_public_api.py::TestCRUDOperations::test_bulk_upsert [GOOD] |98.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test >> test_public_api.py::TestCRUDOperations::test_all_enums_are_presented_as_exceptions [GOOD] >> test_public_api.py::TestCRUDOperations::test_type_builders_str_methods [GOOD] >> test_public_api.py::TestCRUDOperations::test_create_and_delete_session_then_use_it_again [GOOD] >> test_public_api.py::TestCRUDOperations::test_locks_invalidated_error >> test_public_api.py::TestCRUDOperations::test_locks_invalidated_error [GOOD] >> test_public_api.py::TestCRUDOperations::test_tcl [GOOD] >> test_public_api.py::TestCRUDOperations::test_tcl_2 [GOOD] >> test_public_api.py::TestCRUDOperations::test_tcl_3 [GOOD] >> test_isolation.py::TestTransactionIsolation::test_prevents_intermediate_reads_g1b [GOOD] >> test_isolation.py::TestTransactionIsolation::test_prevents_circular_information_flow_g1c >> test_public_api.py::TestCRUDOperations::test_reuse_session_to_tx_leak |98.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/tpc/medium/py3test >> test_workload_simple_queue.py::TestWorkloadSimpleQueue::test_workload_simple_queue[column] [GOOD] >> test_isolation.py::TestTransactionIsolation::test_prevents_circular_information_flow_g1c [GOOD] >> test_isolation.py::TestTransactionIsolation::test_isolation_mailing_list_example >> test_isolation.py::TestTransactionIsolation::test_isolation_mailing_list_example [GOOD] >> test_isolation.py::TestTransactionIsolation::test_prevents_observed_transaction_vanishes_otv |98.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/api/py3test >> test_insert.py::TestInsertOperations::test_query_pairs [GOOD] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_2_UNIQUE_SYNC-pk_types2-all_types2-index2--UNIQUE-SYNC] [GOOD] >> test_execute_scheme.py::TestExecuteSchemeOperations::test_create_table_if_it_is_created_success >> test_isolation.py::TestTransactionIsolation::test_prevents_observed_transaction_vanishes_otv [GOOD] >> test_isolation.py::TestTransactionIsolation::test_does_not_prevent_predicate_many_preceders_pmp >> test_isolation.py::TestTransactionIsolation::test_does_not_prevent_predicate_many_preceders_pmp [GOOD] >> test_isolation.py::TestTransactionIsolation::test_does_not_prevent_predicate_many_preceders_pmp_for_write_predicates >> test_isolation.py::TestTransactionIsolation::test_does_not_prevent_predicate_many_preceders_pmp_for_write_predicates [GOOD] >> test_isolation.py::TestTransactionIsolation::test_lost_update_p4 |98.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test >> CompositeConveyorTests::Test10xMultiDistribution [GOOD] >> test_isolation.py::TestTransactionIsolation::test_lost_update_p4 [GOOD] >> test_isolation.py::TestTransactionIsolation::test_lost_update_on_value_p4 >> test_isolation.py::TestTransactionIsolation::test_lost_update_on_value_p4 [GOOD] >> test_isolation.py::TestTransactionIsolation::test_lost_update_on_value_with_upsert_p4 |98.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test >> test_crud.py::TestCreateAndUpsertWithRepetitions::test_create_and_select_with_repetitions[10-64] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest >> CompositeConveyorTests::Test10xMultiDistribution [GOOD] Test command err: {I:56519};{S:156596};{N:91306}; {I:80171};{S:193260};{N:97040}; {I:102954};{S:222040};{N:127087}; {I:143392};{S:244398};{N:165093}; {I:167053};{S:269041};{N:183723}; {I:188420};{S:272441};{N:205413}; {I:194292};{S:272441};{N:229702}; {I:199588};{S:275818};{N:245372}; {I:222140};{S:275818};{N:245372}; {I:245284};{S:275818};{N:249968}; {I:245373};{S:275818};{N:270237}; {I:245373};{S:275818};{N:289817}; {I:245373};{S:275818};{N:309131}; {I:245373};{S:275818};{N:326778}; {I:245373};{S:275818};{N:343085}; {I:245373};{S:275818};{N:379769}; {I:245373};{S:275818};{N:414410}; {I:245373};{S:275818};{N:454834}; {I:245373};{S:275818};{N:499123}; {I:245373};{S:277778};{N:538865}; {I:245373};{S:299997};{N:560340}; {I:245373};{S:333965};{N:583098}; {I:245373};{S:379943};{N:604518}; {I:245373};{S:462486};{N:608378}; {I:245373};{S:553061};{N:608378}; {I:245373};{S:639703};{N:608378}; {I:250352};{S:717713};{N:608378}; {I:269955};{S:755291};{N:608378}; {I:293291};{S:801908};{N:627341}; {I:316811};{S:825320};{N:646385}; {I:352175};{S:848508};{N:667877}; {I:395315};{S:871634};{N:691207}; {I:441709};{S:895417};{N:715427}; {I:485312};{S:919431};{N:736670}; {I:509123};{S:943704};{N:761169}; {I:533779};{S:968224};{N:785815}; {I:558531};{S:992866};{N:810585}; {I:583112};{S:1000000};{N:835159}; {I:607752};{S:1000000};{N:859724}; {I:632394};{S:1000000};{N:884063}; {I:657045};{S:1000000};{N:908608}; {I:680618};{S:1000000};{N:932633}; {I:704448};{S:1000000};{N:955717}; {I:704448};{S:1000000};{N:979342}; {I:707369};{S:1000000};{N:1000000}; {I:728926};{S:1000000};{N:1000000}; {I:753371};{S:1000000};{N:1000000}; {I:777250};{S:1000000};{N:1000000}; {I:800549};{S:1000000};{N:1000000}; {I:824196};{S:1000000};{N:1000000}; {I:848785};{S:1000000};{N:1000000}; {I:873450};{S:1000000};{N:1000000}; {I:898118};{S:1000000};{N:1000000}; {I:922903};{S:1000000};{N:1000000}; {I:947557};{S:1000000};{N:1000000}; {I:972201};{S:1000000};{N:1000000}; {I:996949};{S:1000000};{N:1000000}; {I:1000000};{S:1000000};{N:1000000}; 180us per task 57.074675s;37.053365s;44.057250s; >> test_isolation.py::TestTransactionIsolation::test_lost_update_on_value_with_upsert_p4 [GOOD] >> test_isolation.py::TestTransactionIsolation::test_read_skew_g_single >> test_isolation.py::TestTransactionIsolation::test_read_skew_g_single [GOOD] >> test_result_limits.py::TestResultLimits::test_large_row >> test_isolation.py::TestTransactionIsolation::test_read_skew_g_single_predicate_deps >> test_public_api.py::TestCRUDOperations::test_reuse_session_to_tx_leak [GOOD] >> test_public_api.py::TestCRUDOperations::test_direct_leak_tx_but_no_actual_leak_by_best_efforts >> test_isolation.py::TestTransactionIsolation::test_read_skew_g_single_predicate_deps [GOOD] >> test_isolation.py::TestTransactionIsolation::test_read_skew_g_single_write_predicate >> test_public_api.py::TestCRUDOperations::test_direct_leak_tx_but_no_actual_leak_by_best_efforts [GOOD] >> test_public_api.py::TestCRUDOperations::test_presented_in_cache [GOOD] >> test_public_api.py::TestCRUDOperations::test_decimal_values_negative_stories >> test_public_api.py::TestCRUDOperations::test_decimal_values_negative_stories [GOOD] >> test_public_api.py::TestCRUDOperations::test_decimal_values >> test_isolation.py::TestTransactionIsolation::test_read_skew_g_single_write_predicate [GOOD] >> test_isolation.py::TestTransactionIsolation::test_write_skew_g2_item >> test_public_api.py::TestCRUDOperations::test_decimal_values [GOOD] >> test_public_api.py::TestCRUDOperations::test_list_directory_with_children >> test_public_api.py::TestCRUDOperations::test_list_directory_with_children [GOOD] >> test_public_api.py::TestCRUDOperations::test_validate_describe_path_result [GOOD] >> test_public_api.py::TestCRUDOperations::test_acl_modifications_1 [GOOD] >> test_public_api.py::TestCRUDOperations::test_acl_modification_2 [GOOD] >> KafkaProtocol::InitProducerId_forPreviouslySeenTransactionalIdShouldReturnSameProducerIdAndIncrementEpoch [GOOD] >> KafkaProtocol::InitProducerId_forPreviouslySeenTransactionalIdShouldReturnNewProducerIdIfEpochOverflown >> test_public_api.py::TestCRUDOperations::test_can_execute_valid_statement_after_invalid_success >> test_public_api.py::TestCRUDOperations::test_can_execute_valid_statement_after_invalid_success [GOOD] >> test_public_api.py::TestCRUDOperations::test_modify_permissions_3 [GOOD] >> test_public_api.py::TestCRUDOperations::test_directory_that_doesnt_exists |98.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test >> test_public_api.py::TestCRUDOperations::test_directory_that_doesnt_exists [GOOD] >> test_public_api.py::TestCRUDOperations::test_crud_acl_actions [GOOD] >> test_public_api.py::TestCRUDOperations::test_too_many_pending_transactions >> test_public_api.py::TestCRUDOperations::test_too_many_pending_transactions [GOOD] >> test_public_api.py::TestCRUDOperations::test_query_set1 >> CompositeConveyorTests::TestUniformProcessDistribution [GOOD] >> test_isolation.py::TestTransactionIsolation::test_write_skew_g2_item [GOOD] >> test_isolation.py::TestTransactionIsolation::test_anti_dependency_cycles_g2 >> test_isolation.py::TestTransactionIsolation::test_anti_dependency_cycles_g2 [GOOD] >> test_isolation.py::TestTransactionIsolation::test_anti_dependency_cycles_g2_two_edges >> test_execute_scheme.py::TestExecuteSchemeOperations::test_create_table_if_it_is_created_success [GOOD] >> test_execute_scheme.py::TestExecuteSchemeOperations::test_create_table_if_it_is_created_fail_add_new_column |98.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test >> test_execute_scheme.py::TestExecuteSchemeOperations::test_create_table_if_it_is_created_fail_add_new_column [GOOD] >> test_execute_scheme.py::TestExecuteSchemeOperations::test_create_table_if_it_is_created_fail_change_column_type >> CompositeConveyorTests::TestUniformScopesDistribution [GOOD] >> test_execute_scheme.py::TestExecuteSchemeOperations::test_create_table_if_it_is_created_fail_change_column_type [GOOD] >> test_execute_scheme.py::TestExecuteSchemeOperations::test_create_table_if_it_is_created_fail_remove_column >> test_execute_scheme.py::TestExecuteSchemeOperations::test_create_table_if_it_is_created_fail_remove_column [GOOD] >> test_execute_scheme.py::TestExecuteSchemeOperations::test_create_table_if_it_is_created_fail_add_to_key |98.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test >> test_execute_scheme.py::TestExecuteSchemeOperations::test_create_table_if_it_is_created_fail_add_to_key [GOOD] >> test_execute_scheme.py::TestExecuteSchemeOperations::test_create_table_if_it_is_created_fail_remove_from_key >> test_execute_scheme.py::TestExecuteSchemeOperations::test_create_table_if_it_is_created_fail_remove_from_key [GOOD] >> test_isolation.py::TestTransactionIsolation::test_anti_dependency_cycles_g2_two_edges [GOOD] >> test_public_api.py::TestCRUDOperations::test_query_set1 [GOOD] >> test_public_api.py::TestCRUDOperations::test_queries_set2 ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest >> CompositeConveyorTests::TestUniformProcessDistribution [GOOD] Test command err: {1:101469};{2:102900};{3:101235}; {1:135709};{2:136306};{3:136095}; {1:158502};{2:158929};{3:158731}; {1:179618};{2:179889};{3:180361}; {1:194625};{2:194624};{3:194624}; {1:194625};{2:194624};{3:194624}; {1:196396};{2:195952};{3:197278}; {1:203904};{2:203636};{3:205538}; {1:215024};{2:214434};{3:217008}; {1:229328};{2:229070};{3:232741}; {1:243061};{2:243122};{3:247848}; {1:256534};{2:256907};{3:261687}; {1:269813};{2:270496};{3:272192}; {1:284762};{2:282378};{3:284013}; {1:301460};{2:293992};{3:296663}; {1:318599};{2:305995};{3:309734}; {1:335289};{2:317449};{3:322218}; {1:344582};{2:333586};{3:339899}; {1:352073};{2:351463};{3:349451}; {1:359537};{2:359101};{3:357662}; {1:376935};{2:366346};{3:365450}; {1:399694};{2:372039};{3:377798}; {1:417572};{2:378270};{3:395998}; {1:425744};{2:384292};{3:403970}; {1:436042};{2:394327};{3:414031}; {1:450763};{2:408688};{3:428393}; {1:465648};{2:423211};{3:442915}; {1:481182};{2:438367};{3:458071}; {1:494609};{2:451465};{3:471168}; {1:509022};{2:465527};{3:485231}; {1:524409};{2:480539};{3:500243}; {1:545946};{2:501073};{3:520583}; {1:570053};{2:524592};{3:544102}; {1:594796};{2:548731};{3:568242}; {1:619578};{2:572910};{3:592419}; {1:644484};{2:597209};{3:616718}; {1:669291};{2:621412};{3:640921}; {1:694016};{2:645531};{3:665041}; {1:718760};{2:669673};{3:689183}; {1:743554};{2:693862};{3:713372}; {1:767583};{2:717305};{3:736814}; {1:792063};{2:741188};{3:760699}; {1:813237};{2:761846};{3:781356}; {1:829278};{2:777525};{3:797035}; {1:844361};{2:792775};{3:812285}; {1:860215};{2:808771};{3:828282}; {1:876423};{2:824785};{3:844294}; {1:892360};{2:840525};{3:860036}; {1:908294};{2:856264};{3:875936}; {1:924285};{2:872061};{3:891929}; {1:940764};{2:888413};{3:908204}; {1:951163};{2:898957};{3:918546}; {1:959394};{2:907146};{3:926777}; {1:967661};{2:915216};{3:935043}; {1:975932};{2:923290};{3:943314}; {1:984234};{2:931395};{3:951617}; {1:992537};{2:939500};{3:959920}; {1:1000000};{2:948031};{3:968660}; {1:1000000};{2:960249};{3:981175}; {1:1000000};{2:972479};{3:993703}; {1:1000000};{2:990972};{3:1000000}; {1:1000000};{2:1000000};{3:1000000}; 193us per task 57.078261s;61.078872s;60.078720s; ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest >> CompositeConveyorTests::TestUniformScopesDistribution [GOOD] Test command err: {1:95025};{2:86054};{3:93809}; {1:102479};{2:93325};{3:101263}; {1:116218};{2:100898};{3:113865}; {1:129325};{2:113088};{3:128188}; {1:142507};{2:132884};{3:141393}; {1:170128};{2:156096};{3:169678}; {1:210051};{2:191608};{3:209343}; {1:240477};{2:225512};{3:240496}; {1:258989};{2:258307};{3:258784}; {1:277889};{2:272517};{3:279168}; {1:297700};{2:284625};{3:300241}; {1:318368};{2:302015};{3:320745}; {1:337158};{2:320660};{3:339390}; {1:357610};{2:341111};{3:359784}; {1:371918};{2:362402};{3:373402}; {1:386782};{2:370715};{3:389297}; {1:402141};{2:385887};{3:404468}; {1:416788};{2:400350};{3:418933}; {1:431643};{2:415021};{3:433604}; {1:447184};{2:430372};{3:448954}; {1:462953};{2:445939};{3:464522}; {1:478999};{2:461791};{3:480372}; {1:493533};{2:480947};{3:493232}; {1:507782};{2:498676};{3:506237}; {1:523400};{2:514096};{3:521657}; {1:537519};{2:528039};{3:535600}; {1:552293};{2:542632};{3:550193}; {1:566775};{2:556947};{3:564509}; {1:581624};{2:571613};{3:579176}; {1:595435};{2:588095};{3:594372}; {1:603303};{2:602261};{3:602241}; {1:612801};{2:611551};{3:611373}; {1:628515};{2:631717};{3:627089}; {1:644492};{2:647693};{3:643065}; {1:660744};{2:663946};{3:659318}; {1:677020};{2:680221};{3:675594}; {1:693161};{2:696379};{3:691844}; {1:709275};{2:712307};{3:708153}; {1:725467};{2:728311};{3:724541}; {1:741459};{2:744118};{3:740726}; {1:757487};{2:759959};{3:756951}; {1:773327};{2:775615};{3:772984}; {1:789346};{2:791445};{3:789200}; {1:804758};{2:806674};{3:804806}; {1:820195};{2:821926};{3:820434}; {1:836326};{2:837871};{3:836763}; {1:851928};{2:853289};{3:852557}; {1:867713};{2:868890};{3:868536}; {1:883857};{2:884845};{3:884876}; {1:899992};{2:900792};{3:901209}; {1:915255};{2:915868};{3:916669}; {1:923384};{2:923807};{3:924995}; {1:931553};{2:931787};{3:933365}; {1:939717};{2:939761};{3:941728}; {1:947935};{2:947787};{3:950145}; {1:956160};{2:955821};{3:958572}; {1:964387};{2:963856};{3:966999}; {1:972611};{2:971889};{3:975424}; {1:980778};{2:979866};{3:983790}; {1:989016};{2:987913};{3:992229}; {1:997612};{2:996309};{3:1000000}; {1:1000000};{2:1000000};{3:1000000}; 193us per task 61.072701s;61.072715s;60.072598s; >> test_public_api.py::TestCRUDOperations::test_queries_set2 [GOOD] >> test_public_api.py::TestCRUDOperations::test_when_result_set_is_large_then_issue_occure |98.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test >> test_replication.py::TestReplicationAfterNodesRestart::test_replication[none] >> test_crud.py::TestCreateAndUpsertWithRepetitions::test_create_and_select_with_repetitions[10-64] [GOOD] >> test_crud.py::TestCreateAndUpsertWithRepetitions::test_create_and_upsert_data_with_repetitions[10-64] |98.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test |98.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test |98.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/datashard/secondary_index/py3test >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_2_UNIQUE_SYNC-pk_types2-all_types2-index2--UNIQUE-SYNC] [GOOD] |98.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/config/py3test |98.9%| [TA] $(B)/ydb/tests/datashard/secondary_index/test-results/py3test/{meta.json ... results_accumulator.log} |98.9%| [TA] {RESULT} $(B)/ydb/tests/datashard/secondary_index/test-results/py3test/{meta.json ... results_accumulator.log} |98.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/config/py3test >> test_crud.py::TestCreateAndUpsertWithRepetitions::test_create_and_upsert_data_with_repetitions[10-64] [GOOD] |98.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test |98.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test |98.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test >> test_session_pool.py::TestSessionPool::test_no_cluster_endpoints_no_failure [GOOD] >> test_session_pool.py::TestSessionPool::test_session_pool_close_basic_logic_case_2 |98.9%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest >> test_session_pool.py::TestSessionPool::test_session_pool_close_basic_logic_case_2 [GOOD] >> test_session_pool.py::TestSessionPool::test_session_pool_min_size_feature >> test_session_pool.py::TestSessionPool::test_session_pool_min_size_feature [GOOD] |98.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test |98.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test >> test_public_api.py::TestCRUDOperations::test_when_result_set_is_large_then_issue_occure [GOOD] |98.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test |98.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test >> KafkaProtocol::InitProducerId_forPreviouslySeenTransactionalIdShouldReturnNewProducerIdIfEpochOverflown [GOOD] >> KafkaProtocol::CommitTransactionScenario >> test_discovery.py::TestDiscoveryFaultInjectionSlotStop::test_scenario [GOOD] |98.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test |98.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test |98.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test |98.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test |98.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test >> test_replication.py::TestReplicationAfterNodesRestart::test_replication[none] [GOOD] |98.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test |98.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test |98.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test >> test_replication.py::TestReplicationAfterNodesRestart::test_replication[none] [GOOD] |98.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test >> test_self_heal.py::TestEnableSelfHeal::test_replication >> test_configuration_version.py::TestConfigurationVersion::test_configuration_version [GOOD] |98.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/api/py3test >> test_execute_scheme.py::TestExecuteSchemeOperations::test_create_table_if_it_is_created_fail_remove_from_key [GOOD] |98.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/api/py3test >> test_isolation.py::TestTransactionIsolation::test_anti_dependency_cycles_g2_two_edges [GOOD] |99.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test |99.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test >> test_crud.py::TestCRUDOperations::test_create_table_and_drop_table_success >> test_restarts.py::TestRestartMultipleMirror34::test_tablets_are_successfully_started_after_few_killed_nodes |99.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test >> test_restarts.py::TestRestartMultipleBlock42::test_tablets_are_successfully_started_after_few_killed_nodes >> test_session_grace_shutdown.py::Test::test_grace_shutdown_of_session >> test_cms_restart.py::TestCmsStateStorageRestartsMirrorKeep::test_restart_as_much_as_can |99.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/api/py3test >> test_session_pool.py::TestSessionPool::test_session_pool_min_size_feature [GOOD] >> test_tablet_channel_migration.py::TestChannelsOps::test_when_write_and_change_tablet_channel_then_can_read_from_tablet |99.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/config/py3test |99.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/config/py3test >> test_configuration_version.py::TestConfigurationVersion::test_configuration_version [GOOD] >> test_crud.py::TestCRUDOperations::test_create_table_and_drop_table_success [GOOD] >> test_crud.py::TestCRUDOperations::test_create_table_wrong_primary_key_failed1 >> test_crud.py::TestCRUDOperations::test_create_table_wrong_primary_key_failed1 [GOOD] >> test_crud.py::TestCRUDOperations::test_create_table_wrong_primary_key_failed2 >> test_crud.py::TestCRUDOperations::test_create_table_wrong_primary_key_failed2 [GOOD] >> test_discovery.py::TestMirror3DCDiscovery::test_mirror3dc_discovery_logic >> test_read_table.py::TestReadTableSuccessStories::test_read_table_only_specified_ranges >> test_session_grace_shutdown.py::Test::test_grace_shutdown_of_session [GOOD] |99.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test >> test_public_api.py::TestSessionNotFound::test_session_not_found |99.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test >> test_self_heal.py::TestEnableSelfHeal::test_replication [GOOD] >> test_tablet_channel_migration.py::TestChannelsOps::test_when_write_and_change_tablet_channel_then_can_read_from_tablet [GOOD] |99.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/config/py3test |99.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test >> test_self_heal.py::TestEnableSelfHeal::test_replication [GOOD] |99.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test >> test_session_grace_shutdown.py::TestIdle::test_idle_shutdown_of_session >> test_read_table.py::TestReadTableSuccessStories::test_read_table_only_specified_ranges [GOOD] >> test_read_table.py::TestReadTableSuccessStories::test_read_table_constructed_key_range >> test_replication.py::TestReplicationAfterNodesRestart::test_replication[mirror-3-dc] [GOOD] >> test_replication.py::TestReplicationAfterNodesRestart::test_replication[mirror-3] |99.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test |99.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test >> test_read_table.py::TestReadTableSuccessStories::test_read_table_constructed_key_range [GOOD] >> test_read_table.py::TestReadTableSuccessStories::test_read_table_reads_only_specified_columns >> test_read_table.py::TestReadTableSuccessStories::test_read_table_reads_only_specified_columns [GOOD] >> test_read_table.py::TestReadTableSuccessStories::test_read_table_without_data_has_snapshot >> test_read_table.py::TestReadTableSuccessStories::test_read_table_without_data_has_snapshot [GOOD] |99.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test >> test_replication.py::TestReplicationAfterNodesRestart::test_replication[mirror-3-dc] [GOOD] |99.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test >> test_tablet_channel_migration.py::TestChannelsOps::test_when_write_and_change_tablet_channel_then_can_read_from_tablet [GOOD] |99.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test >> test_pdisk_format_info.py::TestPDiskInfo::test_read_disk_state >> test_crud.py::TestSelect::test_advanced_select_failed[select distinct b, a from (select a, b from t1 union all select b, a from t1 order by b) order by B-Column B is not in source column set.*] >> KafkaProtocol::CommitTransactionScenario [GOOD] >> KafkaProtocol::AbortTransactionScenario |99.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test |99.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test >> test_pdisk_format_info.py::TestPDiskInfo::test_read_disk_state [GOOD] |99.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test >> test_result_limits.py::TestResultLimits::test_large_row [GOOD] >> test_crud.py::TestSelect::test_advanced_select_failed[select distinct b, a from (select a, b from t1 union all select b, a from t1 order by b) order by B-Column B is not in source column set.*] [GOOD] >> test_crud.py::TestSelect::test_advanced_select_failed[select count(a, b) from t1-Aggregation function Count requires exactly 1 argument] >> test_crud.py::TestSelect::test_advanced_select_failed[select count(a, b) from t1-Aggregation function Count requires exactly 1 argument] [GOOD] >> test_crud.py::TestSelect::test_advanced_select_failed[select min(a, b) from t1-Aggregation function Min requires exactly 1 argument] [GOOD] >> test_crud.py::TestSelect::test_advanced_select_failed[select min(*) from t1-.*is not allowed here] >> test_crud.py::TestSelect::test_advanced_select_failed[select min(*) from t1-.*is not allowed here] [GOOD] |99.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test |99.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/cms/py3test >> test_distconf.py::TestKiKiMRDistConfReassignStateStorageToTheSameConfig::test_cluster_change_state_storage >> test_tablet_channel_migration.py::TestChannelsOps::test_when_write_in_new_channel_then_can_read_from_tablet |99.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test >> test_result_limits.py::TestResultLimits::test_quotas[kikimr0] >> test_discovery.py::TestMirror3DCDiscovery::test_mirror3dc_discovery_logic [GOOD] |99.0%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test |99.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test >> test_pdisk_format_info.py::TestPDiskInfo::test_read_disk_state [GOOD] >> test_public_api.py::TestSessionNotFound::test_session_not_found [GOOD] >> test_tablet_channel_migration.py::TestChannelsOps::test_when_write_in_new_channel_then_can_read_from_tablet [GOOD] |99.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test >> test_read_table.py::TestReadTableTruncatedResults::test_truncated_results[async_read_table] |99.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test >> test_replication.py::TestReplicationAfterNodesRestart::test_replication[mirror-3] [GOOD] >> test_session_grace_shutdown.py::TestIdle::test_idle_shutdown_of_session [GOOD] >> test_cms_state_storage.py::TestCmsStateStorageSimpleKeep::test_check_shutdown_state_storage_nodes [GOOD] |99.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test >> test_public_api.py::TestSessionNotFoundOperations::test_session_pool |99.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test >> test_tablet_channel_migration.py::TestChannelsOps::test_when_write_in_new_channel_then_can_read_from_tablet [GOOD] |99.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test >> test_distconf.py::TestKiKiMRDistConfReassignStateStorage::test_cluster_change_state_storage >> test_crud.py::TestClientTimeouts::test_can_set_timeouts_on_query |99.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/cms/py3test >> test_cms_state_storage.py::TestCmsStateStorageSimpleKeep::test_check_shutdown_state_storage_nodes [GOOD] |99.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test |99.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test >> test_public_api.py::TestSessionNotFoundOperations::test_session_pool [GOOD] >> test_public_api.py::TestSessionNotFoundOperations::test_ok_keep_alive_example [GOOD] >> test_public_api.py::TestSessionNotFoundOperations::test_can_commit_bad_tx [GOOD] >> test_public_api.py::TestSessionNotFoundOperations::test_cannot_commit_bad_tx [GOOD] >> test_public_api.py::TestSessionNotFoundOperations::test_commit_successfully_after_success_commit >> test_public_api.py::TestSessionNotFoundOperations::test_commit_successfully_after_success_commit [GOOD] >> test_public_api.py::TestSessionNotFoundOperations::test_invalid_keep_alive_example [GOOD] >> test_public_api.py::TestSessionNotFoundOperations::test_describe_table_with_bounds >> test_public_api.py::TestSessionNotFoundOperations::test_describe_table_with_bounds [GOOD] >> test_public_api.py::TestSessionNotFoundOperations::test_native_datetime_types >> test_public_api.py::TestSessionNotFoundOperations::test_native_datetime_types [GOOD] >> test_public_api.py::TestSessionNotFoundOperations::test_native_date_types |99.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/api/py3test >> test_session_grace_shutdown.py::TestIdle::test_idle_shutdown_of_session [GOOD] >> test_public_api.py::TestSessionNotFoundOperations::test_native_date_types [GOOD] >> test_public_api.py::TestSessionNotFoundOperations::test_keep_in_cache_disabled [GOOD] >> test_public_api.py::TestSessionNotFoundOperations::test_explicit_partitions_case_1 >> test_public_api.py::TestSessionNotFoundOperations::test_explicit_partitions_case_1 [GOOD] >> test_public_api.py::TestSessionNotFoundOperations::test_explict_partitions_case_2 >> test_public_api.py::TestSessionNotFoundOperations::test_explict_partitions_case_2 [GOOD] >> test_public_api.py::TestSessionNotFoundOperations::test_simple_table_profile_settings [GOOD] >> test_read_table.py::TestReadTableTruncatedResults::test_truncated_results[async_read_table] [GOOD] >> test_read_table.py::TestReadTableTruncatedResults::test_truncated_results[read_table] |99.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test |99.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/api/py3test >> test_discovery.py::TestMirror3DCDiscovery::test_mirror3dc_discovery_logic [GOOD] >> test_alter_compression.py::TestAlterCompression::test[alter_compression] [GOOD] >> test_alter_compression.py::TestAlterCompression::test_multi[alter_compression] [GOOD] |99.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test |99.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test >> KafkaProtocol::AbortTransactionScenario [GOOD] >> KafkaProtocol::TransactionShouldBeAbortedIfPartitionIsAddedToTransactionButNoWritesToItWereReceived >> test_crud.py::TestClientTimeouts::test_can_set_timeouts_on_query [GOOD] |99.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test >> test_serverless.py::test_seamless_migration_to_exclusive_nodes[enable_alter_database_create_hive_first--false] >> test_read_table.py::TestReadTableTruncatedResults::test_truncated_results[read_table] [GOOD] >> test_serverless.py::test_database_with_disk_quotas[enable_alter_database_create_hive_first--false] |99.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/serverless/py3test >> test_serverless.py::test_turn_on_serverless_storage_billing[enable_alter_database_create_hive_first--false] >> test_result_limits.py::TestResultLimits::test_quotas[kikimr0] [GOOD] |99.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test >> test_replication.py::TestReplicationAfterNodesRestart::test_replication[mirror-3] [GOOD] |99.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test >> test_dc_local.py::TestAlloc::test_dc_locality[kikimr0] |99.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/serverless/py3test |99.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/serverless/py3test >> test_public_api.py::TestBadSession::test_simple |99.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/serverless/py3test >> test_serverless.py::test_seamless_migration_to_exclusive_nodes[enable_alter_database_create_hive_first--false] [GOOD] >> test_serverless.py::test_seamless_migration_to_exclusive_nodes[enable_alter_database_create_hive_first--true] |99.1%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test |99.1%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest >> KafkaProtocol::TransactionShouldBeAbortedIfPartitionIsAddedToTransactionButNoWritesToItWereReceived [GOOD] >> KafkaProtocol::ProducerFencedInTransactionScenario |99.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/serverless/py3test |99.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test |99.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/olap/scenario/py3test >> test_alter_compression.py::TestAlterCompression::test_multi[alter_compression] [GOOD] >> test_read_table.py::TestReadTableWithTabletKills::test_read_table_async_simple[async_read_table] >> test_serverless.py::test_create_table_using_exclusive_nodes[enable_alter_database_create_hive_first--true] |99.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test |99.2%| [TA] $(B)/ydb/tests/olap/scenario/test-results/py3test/{meta.json ... results_accumulator.log} >> test_crud.py::TestManySelectsInRow::test_selects_in_row_success[500-500-50] >> test_serverless.py::test_database_with_column_disk_quotas[enable_alter_database_create_hive_first--false] |99.2%| [TA] {RESULT} $(B)/ydb/tests/olap/scenario/test-results/py3test/{meta.json ... results_accumulator.log} >> test_read_table.py::TestReadTableWithTabletKills::test_read_table_async_simple[async_read_table] [GOOD] >> test_read_table.py::TestReadTableWithTabletKills::test_read_table_async_simple[read_table] >> test_read_table.py::TestReadTableWithTabletKills::test_read_table_async_simple[read_table] [GOOD] >> test_dc_local.py::TestAlloc::test_dc_locality[kikimr0] [GOOD] >> test_serverless.py::test_create_table_using_exclusive_nodes[enable_alter_database_create_hive_first--true] [GOOD] >> test_serverless.py::test_create_table_with_alter_quotas[enable_alter_database_create_hive_first--false] >> test_alloc_default.py::TestAlloc::test_default_limits[kikimr0] >> test_public_api.py::TestBadSession::test_simple [GOOD] >> test_public_api.py::TestDriverCanRecover::test_driver_recovery >> test_distconf.py::TestKiKiMRDistConfReassignStateStorageToTheSameConfig::test_cluster_change_state_storage [GOOD] >> KafkaProtocol::ProducerFencedInTransactionScenario [GOOD] >> KafkaProtocol::ConsumerFencedInTransactionScenario |99.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/api/py3test >> test_read_table.py::TestReadTableWithTabletKills::test_read_table_async_simple[read_table] [GOOD] >> test_serverless.py::test_database_with_disk_quotas[enable_alter_database_create_hive_first--false] [FAIL] >> test_serverless.py::test_database_with_disk_quotas[enable_alter_database_create_hive_first--true] >> test_alloc_default.py::TestAlloc::test_default_limits[kikimr0] [GOOD] >> test_public_api.py::TestDriverCanRecover::test_driver_recovery [GOOD] |99.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test >> test_restarts.py::TestRestartMultipleMirror34::test_tablets_are_successfully_started_after_few_killed_nodes [GOOD] |99.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/config/py3test >> test_distconf.py::TestKiKiMRDistConfReassignStateStorageToTheSameConfig::test_cluster_change_state_storage [GOOD] |99.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest >> test_serverless.py::test_discovery_exclusive_nodes[enable_alter_database_create_hive_first--false] >> test_alloc_default.py::TestAlloc::test_default_delta[kikimr0] >> test_distconf.py::TestKiKiMRDistConfReassignStateStorage::test_cluster_change_state_storage [GOOD] |99.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/serverless/py3test >> test_restarts.py::TestRestartMultipleBlock42::test_tablets_are_successfully_started_after_few_killed_nodes [GOOD] |99.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/restarts/py3test >> test_restarts.py::TestRestartMultipleMirror34::test_tablets_are_successfully_started_after_few_killed_nodes [GOOD] |99.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/serverless/py3test |99.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest |99.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/serverless/py3test |99.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/config/py3test >> test_distconf.py::TestKiKiMRDistConfReassignStateStorage::test_cluster_change_state_storage [GOOD] |99.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest |99.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/restarts/py3test >> test_restarts.py::TestRestartMultipleBlock42::test_tablets_are_successfully_started_after_few_killed_nodes [GOOD] >> test_serverless.py::test_fixtures[enable_alter_database_create_hive_first--false] >> test_serverless.py::test_discovery_exclusive_nodes[enable_alter_database_create_hive_first--false] [GOOD] >> test_serverless.py::test_discovery_exclusive_nodes[enable_alter_database_create_hive_first--true] >> test_public_api.py::TestSelectAfterDropWithRepetitions::test_select_on_dropped_table_unsuccessful[10] >> test_serverless.py::test_create_table_with_quotas[enable_alter_database_create_hive_first--false] |99.2%| [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest >> test_cms_restart.py::TestCmsStateStorageRestartsBlockMax::test_restart_as_much_as_can |99.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/cms/py3test >> test_config_with_metadata.py::TestConfigWithoutMetadataMirror::test_cluster_is_operational_without_metadata >> test_public_api.py::TestSelectAfterDropWithRepetitions::test_select_on_dropped_table_unsuccessful[10] [GOOD] >> test_alloc_default.py::TestAlloc::test_default_delta[kikimr0] [GOOD] >> test_serverless.py::test_fixtures[enable_alter_database_create_hive_first--false] [GOOD] >> test_serverless.py::test_fixtures[enable_alter_database_create_hive_first--true] >> test_serverless.py::test_create_table_with_quotas[enable_alter_database_create_hive_first--false] [GOOD] >> test_serverless.py::test_create_table_with_quotas[enable_alter_database_create_hive_first--true] >> test_alloc_default.py::TestAlloc::test_node_limit[kikimr0] >> KafkaProtocol::ConsumerFencedInTransactionScenario [GOOD] >> TMetadataActorTests::TopicMetadataGoodAndBad >> test_public_api.py::TestMetaDataInvalidation::test_invalidation_success >> test_config_with_metadata.py::TestConfigWithoutMetadataMirror::test_cluster_is_operational_without_metadata [GOOD] >> test_public_api.py::TestMetaDataInvalidation::test_invalidation_success [GOOD] >> test_cms_restart.py::TestCmsStateStorageRestartsMirrorKeep::test_restart_as_much_as_can [GOOD] >> test_serverless.py::test_seamless_migration_to_exclusive_nodes[enable_alter_database_create_hive_first--true] [GOOD] |99.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/config/py3test >> test_config_with_metadata.py::TestConfigWithoutMetadataMirror::test_cluster_is_operational_without_metadata [GOOD] >> test_serverless.py::test_discovery_exclusive_nodes[enable_alter_database_create_hive_first--true] [GOOD] >> test_serverless.py::test_fixtures[enable_alter_database_create_hive_first--true] [GOOD] |99.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/config/py3test |99.2%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/cms/py3test >> test_cms_restart.py::TestCmsStateStorageRestartsMirrorKeep::test_restart_as_much_as_can [GOOD] >> TMetadataActorTests::TopicMetadataGoodAndBad [GOOD] >> ProduceActor::OnProduceWithTransactionalIdAndNewEpoch_shouldRegisterNewPartitionWriterAndSendPoisonPillToOld >> ProduceActor::OnProduceWithTransactionalIdAndNewEpoch_shouldRegisterNewPartitionWriterAndSendPoisonPillToOld [GOOD] >> ProduceActor::OnProduceWithoutTransactionalId_shouldNotKillOldWriter >> test_config_with_metadata.py::TestKiKiMRWithoutMetadata::test_cluster_is_operational_without_metadata >> ProduceActor::OnProduceWithoutTransactionalId_shouldNotKillOldWriter [GOOD] >> PublishKafkaEndpoints::HaveEndpointInLookup >> test_serverless.py::test_create_table_with_alter_quotas[enable_alter_database_create_hive_first--false] [GOOD] |99.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/config/py3test >> test_serverless.py::test_create_table_with_alter_quotas[enable_alter_database_create_hive_first--true] >> test_replication.py::TestReplicationAfterNodesRestart::test_replication[block-4-2] >> test_public_api.py::TestJsonExample::test_json_unexpected_failure >> test_public_api.py::TestJsonExample::test_json_unexpected_failure [GOOD] >> test_public_api.py::TestJsonExample::test_json_success >> test_public_api.py::TestJsonExample::test_json_success [GOOD] >> TSentinelUnstableTests::BSControllerCantChangeStatus [FAIL] >> PublishKafkaEndpoints::HaveEndpointInLookup [GOOD] >> PublishKafkaEndpoints::MetadataActorGetsEndpoint >> test_config_with_metadata.py::TestKiKiMRWithoutMetadata::test_cluster_is_operational_without_metadata [GOOD] |99.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/serverless/py3test >> test_serverless.py::test_discovery_exclusive_nodes[enable_alter_database_create_hive_first--true] [GOOD] |99.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/serverless/py3test >> test_serverless.py::test_fixtures[enable_alter_database_create_hive_first--true] [GOOD] |99.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/restarts/py3test >> test_restarts.py::TestRestartClusterMirror3DC::test_when_create_many_tablets_and_restart_cluster_then_every_thing_is_ok |99.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/config/py3test >> test_config_with_metadata.py::TestKiKiMRWithoutMetadata::test_cluster_is_operational_without_metadata [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/cms/ut_sentinel_unstable/unittest >> TSentinelUnstableTests::BSControllerCantChangeStatus [FAIL] Test command err: 2025-06-25T15:21:32.903737Z node 1 :CMS DEBUG: sentinel.cpp:939: [Sentinel] [Main] UpdateConfig 2025-06-25T15:21:32.903823Z node 1 :CMS DEBUG: sentinel.cpp:884: [Sentinel] [Main] Start ConfigUpdater 2025-06-25T15:21:32.903889Z node 1 :CMS DEBUG: sentinel.cpp:955: [Sentinel] [Main] UpdateState 2025-06-25T15:21:32.903931Z node 1 :CMS INFO: sentinel.cpp:879: [Sentinel] [Main] StateUpdater was delayed 2025-06-25T15:21:32.903984Z node 1 :CMS DEBUG: sentinel.cpp:464: [Sentinel] [ConfigUpdater] Request blobstorage config: attempt# 0 2025-06-25T15:21:32.904072Z node 1 :CMS DEBUG: sentinel.cpp:477: [Sentinel] [ConfigUpdater] Request CMS cluster state: attempt# 0 2025-06-25T15:21:32.908863Z node 1 :CMS DEBUG: sentinel.cpp:486: [Sentinel] [ConfigUpdater] Handle TEvCms::TEvClusterStateResponse: response# Status { Code: OK } State { Hosts { Name: "node-1" State: UNKNOWN Devices { Name: "pdisk-1-4" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-1-5" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-1-6" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-1-7" State: DOWN Timestamp: 0 } Timestamp: 0 NodeId: 1 InterconnectPort: 10000 Location { Rack: "rack-1" } StartTimeSeconds: 0 } Hosts { Name: "node-2" State: UNKNOWN Devices { Name: "pdisk-2-8" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-2-9" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-2-10" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-2-11" State: DOWN Timestamp: 0 } Timestamp: 0 NodeId: 2 InterconnectPort: 10000 Location { Rack: "rack-2" } StartTimeSeconds: 0 } Hosts { Name: "node-3" State: UNKNOWN Devices { Name: "pdisk-3-12" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-3-13" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-3-14" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-3-15" State: DOWN Timestamp: 0 } Timestamp: 0 NodeId: 3 InterconnectPort: 10000 Location { Rack: "rack-3" } StartTimeSeconds: 0 } Hosts { Name: "node-4" State: UNKNOWN Devices { Name: "pdisk-4-16" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-4-17" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-4-18" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-4-19" State: DOWN Timestamp: 0 } Timestamp: 0 NodeId: 4 InterconnectPort: 10000 Location { Rack: "rack-4" } StartTimeSeconds: 0 } Hosts { Name: "node-5" State: UNKNOWN Devices { Name: "pdisk-5-20" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-5-21" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-5-22" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-5-23" State: DOWN Timestamp: 0 } Timestamp: 0 NodeId: 5 InterconnectPort: 10000 Location { Rack: "rack-5" } StartTimeSeconds: 0 } Hosts { Name: "node-6" State: UNKNOWN Devices { Name: "pdisk-6-24" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-6-25" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-6-26" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-6-27" State: DOWN Timestamp: 0 } Timestamp: 0 NodeId: 6 InterconnectPort: 10000 Location { Rack: "rack-6" } StartTimeSeconds: 0 } Hosts { Name: "node-7" State: UNKNOWN Devices { Name: "pdisk-7-28" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-7-29" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-7-30" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-7-31" State: DOWN Timestamp: 0 } Timestamp: 0 NodeId: 7 InterconnectPort: 10000 Location { Rack: "rack-7" } StartTimeSeconds: 0 } Hosts { Name: "node-8" State: UNKNOWN Devices { Name: "pdisk-8-32" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-8-33" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-8-34" State: DOWN Timestamp: 0 } Devices { Name: "pdisk-8-35" State: DOWN Timestamp: 0 } Timestamp: 0 NodeId: 8 InterconnectPort: 10000 Location { Rack: "rack-8" } StartTimeSeconds: 0 } } 2025-06-25T15:21:32.931067Z node 1 :CMS DEBUG: sentinel.cpp:530: [Sentinel] [ConfigUpdater] Handle TEvBlobStorage::TEvControllerConfigResponse: response# Status { Success: true BaseConfig { PDisk { NodeId: 1 PDiskId: 4 Path: "/1/pdisk-4.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 1 PDiskId: 5 Path: "/1/pdisk-5.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 1 PDiskId: 6 Path: "/1/pdisk-6.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 1 PDiskId: 7 Path: "/1/pdisk-7.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 2 PDiskId: 8 Path: "/2/pdisk-8.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 2 PDiskId: 9 Path: "/2/pdisk-9.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 2 PDiskId: 10 Path: "/2/pdisk-10.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 2 PDiskId: 11 Path: "/2/pdisk-11.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 3 PDiskId: 12 Path: "/3/pdisk-12.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 3 PDiskId: 13 Path: "/3/pdisk-13.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 3 PDiskId: 14 Path: "/3/pdisk-14.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 3 PDiskId: 15 Path: "/3/pdisk-15.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 4 PDiskId: 16 Path: "/4/pdisk-16.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 4 PDiskId: 17 Path: "/4/pdisk-17.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 4 PDiskId: 18 Path: "/4/pdisk-18.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 4 PDiskId: 19 Path: "/4/pdisk-19.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 5 PDiskId: 20 Path: "/5/pdisk-20.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 5 PDiskId: 21 Path: "/5/pdisk-21.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 5 PDiskId: 22 Path: "/5/pdisk-22.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 5 PDiskId: 23 Path: "/5/pdisk-23.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 6 PDiskId: 24 Path: "/6/pdisk-24.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 6 PDiskId: 25 Path: "/6/pdisk-25.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 6 PDiskId: 26 Path: "/6/pdisk-26.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 6 PDiskId: 27 Path: "/6/pdisk-27.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 7 PDiskId: 28 Path: "/7/pdisk-28.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 7 PDiskId: 29 Path: "/7/pdisk-29.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 7 PDiskId: 30 Path: "/7/pdisk-30.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 7 PDiskId: 31 Path: "/7/pdisk-31.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 8 PDiskId: 32 Path: "/8/pdisk-32.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 8 PDiskId: 33 Path: "/8/pdisk-33.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 8 PDiskId: 34 Path: "/8/pdisk-34.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 8 PDiskId: 35 Path: "/8/pdisk-35.data" Guid: 1 DriveStatus: ACTIVE } VSlot { VSlotId { NodeId: 1 PDiskId: 4 VSlotId: 1000 } GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 4 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 4 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 4 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 5 VSlotId: 1000 } GroupId: 4 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 5 VSlotId: 1001 } GroupId: 5 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 5 VSlotId: 1002 } GroupId: 6 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 5 VSlotId: 1003 } GroupId: 7 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 6 VSlotId: 1000 } GroupId: 8 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 6 VSlotId: 1001 } GroupId: 9 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 6 VSlotId: 1002 } GroupId: 10 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 6 VSlotId: 1003 } GroupId: 11 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 7 VSlotId: 1000 } GroupId: 12 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 7 VSlotId: 1001 } GroupId: 13 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 7 VSlotId: 1002 } GroupId: 14 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 7 VSlotId: 1003 } GroupId: 15 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 8 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 8 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 8 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 8 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 9 VSlotId: 1000 } GroupId: 4 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 9 VSlotId: 1001 } GroupId: 5 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 9 VSlotId: 1002 } GroupId: 6 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 9 VSlotId: 1003 } GroupId: 7 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 10 VSlotId: 1000 } GroupId: 8 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 10 VSlotId: 1001 } GroupId: 9 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 10 VSlotId: 1002 } GroupId: 10 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 10 VSlotId: 1003 } GroupId: 11 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 11 VSlotId: 1000 } GroupId: 12 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 11 VSlotId: 1001 } GroupId: 13 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 11 VSlotId: 1002 } GroupId: 14 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 11 VSlotId: 1003 } GroupId: 15 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 3 PDiskId: 12 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 12 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 12 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 12 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 13 VSlotId: 1000 } GroupId: 4 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 13 VSlotId: 1001 } GroupId: 5 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 13 VSlotId: 1002 } GroupId: 6 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 13 VSlotId: 1003 } GroupId: 7 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 14 VSlotId: 1000 } GroupId: 8 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 14 VSlotId: 1001 } GroupId: 9 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 14 VSlotId: 1002 } GroupId: 10 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 14 VSlotId: 1003 } GroupId: 11 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 15 VSlotId: 1000 } GroupId: 12 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 15 VSlotId: 1001 } GroupId: 13 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 15 VSlotId: 1002 } GroupId: 14 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 15 VSlotId: 1003 } GroupId: 15 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 4 PDiskId: 16 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 16 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 16 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 16 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 17 VSlotId: 1000 } GroupId: 4 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 17 VSlotId: 1001 } GroupId: 5 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 17 VSlotId: 1002 ... 31:06.064686Z node 1 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 1, wbId# [1:8388350642965737326:1634689637] 2025-06-25T15:31:06.064711Z node 1 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 2, wbId# [2:8388350642965737326:1634689637] 2025-06-25T15:31:06.064732Z node 1 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 3, wbId# [3:8388350642965737326:1634689637] 2025-06-25T15:31:06.064758Z node 1 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 4, wbId# [4:8388350642965737326:1634689637] 2025-06-25T15:31:06.064776Z node 1 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 5, wbId# [5:8388350642965737326:1634689637] 2025-06-25T15:31:06.064791Z node 1 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 6, wbId# [6:8388350642965737326:1634689637] 2025-06-25T15:31:06.064811Z node 1 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 7, wbId# [7:8388350642965737326:1634689637] 2025-06-25T15:31:06.064827Z node 1 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 8, wbId# [8:8388350642965737326:1634689637] 2025-06-25T15:31:06.065128Z node 1 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 1, response# PDiskStateInfo { PDiskId: 4 CreateTime: 0 ChangeTime: 0 Path: "/1/pdisk-4.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 5 CreateTime: 0 ChangeTime: 0 Path: "/1/pdisk-5.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 6 CreateTime: 0 ChangeTime: 0 Path: "/1/pdisk-6.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: CommonLoggerInitError } PDiskStateInfo { PDiskId: 7 CreateTime: 0 ChangeTime: 0 Path: "/1/pdisk-7.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 825320027 2025-06-25T15:31:06.065425Z node 1 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 4, response# PDiskStateInfo { PDiskId: 16 CreateTime: 0 ChangeTime: 0 Path: "/4/pdisk-16.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 17 CreateTime: 0 ChangeTime: 0 Path: "/4/pdisk-17.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 18 CreateTime: 0 ChangeTime: 0 Path: "/4/pdisk-18.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 19 CreateTime: 0 ChangeTime: 0 Path: "/4/pdisk-19.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 825320027 2025-06-25T15:31:06.065506Z node 1 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 5, response# PDiskStateInfo { PDiskId: 20 CreateTime: 0 ChangeTime: 0 Path: "/5/pdisk-20.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 21 CreateTime: 0 ChangeTime: 0 Path: "/5/pdisk-21.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 22 CreateTime: 0 ChangeTime: 0 Path: "/5/pdisk-22.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 23 CreateTime: 0 ChangeTime: 0 Path: "/5/pdisk-23.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 825320027 2025-06-25T15:31:06.065589Z node 1 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 6, response# PDiskStateInfo { PDiskId: 24 CreateTime: 0 ChangeTime: 0 Path: "/6/pdisk-24.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 25 CreateTime: 0 ChangeTime: 0 Path: "/6/pdisk-25.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 26 CreateTime: 0 ChangeTime: 0 Path: "/6/pdisk-26.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 27 CreateTime: 0 ChangeTime: 0 Path: "/6/pdisk-27.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 825320027 2025-06-25T15:31:06.065662Z node 1 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 7, response# PDiskStateInfo { PDiskId: 28 CreateTime: 0 ChangeTime: 0 Path: "/7/pdisk-28.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 29 CreateTime: 0 ChangeTime: 0 Path: "/7/pdisk-29.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 30 CreateTime: 0 ChangeTime: 0 Path: "/7/pdisk-30.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: CommonLoggerInitError } PDiskStateInfo { PDiskId: 31 CreateTime: 0 ChangeTime: 0 Path: "/7/pdisk-31.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 825320027 2025-06-25T15:31:06.065743Z node 1 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 8, response# PDiskStateInfo { PDiskId: 32 CreateTime: 0 ChangeTime: 0 Path: "/8/pdisk-32.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 33 CreateTime: 0 ChangeTime: 0 Path: "/8/pdisk-33.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 34 CreateTime: 0 ChangeTime: 0 Path: "/8/pdisk-34.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 35 CreateTime: 0 ChangeTime: 0 Path: "/8/pdisk-35.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 825320027 2025-06-25T15:31:06.065820Z node 1 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 2, response# PDiskStateInfo { PDiskId: 8 CreateTime: 0 ChangeTime: 0 Path: "/2/pdisk-8.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: CommonLoggerInitError } PDiskStateInfo { PDiskId: 9 CreateTime: 0 ChangeTime: 0 Path: "/2/pdisk-9.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 10 CreateTime: 0 ChangeTime: 0 Path: "/2/pdisk-10.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 11 CreateTime: 0 ChangeTime: 0 Path: "/2/pdisk-11.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 825320027 2025-06-25T15:31:06.065922Z node 1 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 3, response# PDiskStateInfo { PDiskId: 12 CreateTime: 0 ChangeTime: 0 Path: "/3/pdisk-12.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 13 CreateTime: 0 ChangeTime: 0 Path: "/3/pdisk-13.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 14 CreateTime: 0 ChangeTime: 0 Path: "/3/pdisk-14.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 15 CreateTime: 0 ChangeTime: 0 Path: "/3/pdisk-15.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 825320027 2025-06-25T15:31:06.065956Z node 1 :CMS DEBUG: sentinel.cpp:960: [Sentinel] [Main] State was updated in 0.000000s 2025-06-25T15:31:06.076371Z node 1 :CMS DEBUG: sentinel.cpp:955: [Sentinel] [Main] UpdateState 2025-06-25T15:31:06.076422Z node 1 :CMS DEBUG: sentinel.cpp:884: [Sentinel] [Main] Start StateUpdater 2025-06-25T15:31:06.076500Z node 1 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 1, wbId# [1:8388350642965737326:1634689637] 2025-06-25T15:31:06.076526Z node 1 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 2, wbId# [2:8388350642965737326:1634689637] 2025-06-25T15:31:06.076554Z node 1 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 3, wbId# [3:8388350642965737326:1634689637] 2025-06-25T15:31:06.076575Z node 1 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 4, wbId# [4:8388350642965737326:1634689637] 2025-06-25T15:31:06.076596Z node 1 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 5, wbId# [5:8388350642965737326:1634689637] 2025-06-25T15:31:06.076614Z node 1 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 6, wbId# [6:8388350642965737326:1634689637] 2025-06-25T15:31:06.076633Z node 1 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 7, wbId# [7:8388350642965737326:1634689637] 2025-06-25T15:31:06.076650Z node 1 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 8, wbId# [8:8388350642965737326:1634689637] 2025-06-25T15:31:06.076840Z node 1 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 1, response# PDiskStateInfo { PDiskId: 4 CreateTime: 0 ChangeTime: 0 Path: "/1/pdisk-4.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 5 CreateTime: 0 ChangeTime: 0 Path: "/1/pdisk-5.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 6 CreateTime: 0 ChangeTime: 0 Path: "/1/pdisk-6.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: CommonLoggerInitError } PDiskStateInfo { PDiskId: 7 CreateTime: 0 ChangeTime: 0 Path: "/1/pdisk-7.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 825360027 (TWithBackTrace) ydb/library/actors/testlib/test_runtime.cpp:1262: Dispatched 2500000 events, limit reached. TBackTrace::Capture()+28 (0x1062E96C) TWithBackTrace::TWithBackTrace<>()+80 (0x10253850) NActors::TTestActorRuntimeBase::DispatchEventsInternal(NActors::TDispatchOptions const&, TInstant)+26278 (0x302191D6) NActors::TTestActorRuntimeBase::DispatchEvents(NActors::TDispatchOptions const&)+49 (0x30212A11) NKikimr::NCmsTest::TTestEnv::SetPDiskState(TSet, std::__y1::allocator> const&, NKikimrBlobStorage::TPDiskState_E, NKikimrBlobStorage::EDriveStatus)+901 (0x10285635) NKikimr::NCmsTest::NTestSuiteTSentinelUnstableTests::TTestCaseBSControllerCantChangeStatus::Execute_(NUnitTest::TTestContext&)+2440 (0x10283EC8) std::__y1::__function::__func, void ()>::operator()()+280 (0x10287E08) TColoredProcessor::Run(std::__y1::function, TBasicString> const&, char const*, bool)+534 (0x10B1C0B6) NUnitTest::TTestBase::Run(std::__y1::function, TBasicString> const&, char const*, bool)+505 (0x10AF4B19) NKikimr::NCmsTest::NTestSuiteTSentinelUnstableTests::TCurrentTest::Execute()+1204 (0x10286CB4) NUnitTest::TTestFactory::Execute()+2438 (0x10AF63E6) NUnitTest::RunMain(int, char**)+5213 (0x10B1662D) ??+0 (0x7F7E0B094D90) __libc_start_main+128 (0x7F7E0B094E40) _start+41 (0xDAB6029) >> test_public_api.py::TestForPotentialDeadlock::test_deadlocked_threads_on_cleanup |99.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/cms/py3test |99.3%| [TM] {RESULT} ydb/core/cms/ut_sentinel_unstable/unittest |99.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/cms/py3test >> test_public_api.py::TestForPotentialDeadlock::test_deadlocked_threads_on_cleanup [GOOD] >> test_alloc_default.py::TestAlloc::test_node_limit[kikimr0] [GOOD] >> test_distconf.py::TestKiKiMRDistConfReassignStateStorageReuseSameNodes::test_cluster_change_state_storage |99.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/config/py3test >> test_crud.py::TestManySelectsInRow::test_selects_in_row_success[500-500-50] [GOOD] |99.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/config/py3test >> test_public_api.py::TestRecursiveCreation::test_mkdir >> test_replication.py::TestReplicationAfterNodesRestart::test_replication[block-4-2] [GOOD] >> PublishKafkaEndpoints::MetadataActorGetsEndpoint [GOOD] >> PublishKafkaEndpoints::DiscoveryResponsesWithNoNode >> test_alloc_default.py::TestAlloc::test_alloc_and_free[kikimr0] |99.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/serverless/py3test >> test_serverless.py::test_seamless_migration_to_exclusive_nodes[enable_alter_database_create_hive_first--true] [GOOD] >> test_distconf.py::TestKiKiMRDistConfBasic::test_cluster_expand_with_distconf >> test_public_api.py::TestRecursiveCreation::test_mkdir [GOOD] >> test_public_api.py::TestRecursiveCreation::test_create_table >> test_public_api.py::TestRecursiveCreation::test_create_table [GOOD] |99.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test >> test_serverless.py::test_create_table_with_quotas[enable_alter_database_create_hive_first--true] [GOOD] >> test_cms_erasure.py::TestDegradedGroupBlock42Keep::test_no_degraded_groups_after_shutdown >> test_serverless.py::test_create_table_with_alter_quotas[enable_alter_database_create_hive_first--true] [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/core/kafka_proxy/ut/unittest >> PublishKafkaEndpoints::DiscoveryResponsesWithNoNode 2025-06-25 15:31:31,958 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper execution timed out 2025-06-25 15:31:32,286 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper has overrun 600 secs timeout. Process tree before termination: pid rss ref pdirt 1006698 47.8M 47.0M 24.3M test_tool run_ut @/home/runner/.ya/build/build_root/yft8/000856/ydb/core/kafka_proxy/ut/test-results/unittest/testing_out_stuff/test_tool.args 1008186 3.5G 3.5G 3.0G └─ ydb-core-kafka_proxy-ut --trace-path-append /home/runner/.ya/build/build_root/yft8/000856/ydb/core/kafka_proxy/ut/test-results/unittest/ytest.report.trace +DiscoveryIsN Test command err: 2025-06-25T15:21:39.226612Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7519907484473143483:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:21:39.226690Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000856/r3tmp/tmp20aJB2/pdisk_1.dat 2025-06-25T15:21:39.778946Z node 1 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:21:39.783412Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:21:39.783533Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:21:39.791992Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 11868, node 1 2025-06-25T15:21:40.001827Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/yft8/000856/r3tmp/yandex9TWaPe.tmp 2025-06-25T15:21:40.001850Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/yft8/000856/r3tmp/yandex9TWaPe.tmp 2025-06-25T15:21:40.004804Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/yft8/000856/r3tmp/yandex9TWaPe.tmp 2025-06-25T15:21:40.005582Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-25T15:21:40.236779Z node 1 :TX_CONVEYOR ERROR: log.cpp:784: fline=service.h:53;problem=unexpected event for task executor;ev_type=NActors::TEvents::TEvWakeup; 2025-06-25T15:21:40.327085Z INFO: TTestServer started on Port 4425 GrpcPort 11868 TClient is connected to server localhost:4425 PQClient connected to localhost:11868 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-25T15:21:40.701969Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_alter_subdomain.cpp:311) waiting... waiting... 2025-06-25T15:21:40.745133Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-25T15:21:40.769397Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) waiting... waiting... 2025-06-25T15:21:40.928661Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710660, at schemeshard: 72057594046644480 waiting... 2025-06-25T15:21:40.944669Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710661, at schemeshard: 72057594046644480 2025-06-25T15:21:42.231164Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519907497358046140:2303], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:21:42.231279Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7519907497358046153:2307], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:21:42.231374Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-25T15:21:42.243976Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710662:3, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:179) 2025-06-25T15:21:42.258917Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7519907497358046155:2308], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710662 completed, doublechecking } 2025-06-25T15:21:42.338349Z node 1 :TX_PROXY ERROR: schemereq.cpp:553: Actor# [1:7519907497358046221:2444] txid# 281474976710663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges)" severity: 1 } 2025-06-25T15:21:42.819165Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7519907497358046229:2315], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-25T15:21:42.829108Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2202: SessionId: ydb://session/3?node_id=1&id=ZmFhZDE3My03YTkxZjc4ZC02ZDg3MGU2NS1mMWQ1ZmU4YQ==, ActorId: [1:7519907497358046138:2302], ActorState: ExecuteState, TraceId: 01jyktwe1w5bj4frb078vmr3nc, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-25T15:21:42.832974Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-25T15:21:42.860002Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:21:42.895010Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) 2025-06-25T15:21:43.042459Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:191: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480, first GetDB called at: (GetDB first called at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); 2025-06-25T15:21:43.555354Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710667. Ctx: { TraceId: 01jyktwey1fkn5rmxctej950mq, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTMzYzM2MWItNjU3YmI1ZDMtY2Y5Y2IyYWEtZjRlNWNkNTA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root === CheckClustersList. Subcribe to ClusterTracker from [1:7519907501653013817:2621] 2025-06-25T15:21:44.226128Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7519907484473143483:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:21:44.226235Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; === CheckClustersList. Ok Run with port = 11868, kafka port = 13762 2025-06-25T15:21:50.498215Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7519907529096509544:2223];send_to=[0:7307199536658146131:7762515]; 2025-06-25T15:21:50.508598Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/yft8/000856/r3tmp/tmprJwTvO/pdisk_1.dat 2025-06-25T15:21:50.964608Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7519907529096509356:2080] 1750864910473144 != 1750864910473147 2025-06-25T15:21:51.000606Z node 2 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded 2025-06-25T15:21:51.018094Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-25T15:21:51.018174Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-25T15:21:51.024744Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting ... d at /home/runner/actions_runner/_work/ydb/ydb/ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:664) === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); 2025-06-25T15:31:21.401420Z node 37 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710667. Ctx: { TraceId: 01jykve39hb7x4qpq8yvqjz4y3, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=37&id=ZDZhMTJlYWQtMzFiZmIwNmUtNjY3OTg5Y2ItZTUxZmRlNjE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root === CheckClustersList. Subcribe to ClusterTracker from [37:7519909983927668950:2642] 2025-06-25T15:31:26.660433Z node 37 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7382: Cannot get console configs 2025-06-25T15:31:26.660494Z node 37 :IMPORT WARN: schemeshard_import.cpp:305: Table profiles were not loaded === CheckClustersList. Ok PQ Client: create topic: rt3.dc1--topic1 with 1 partitions CallPersQueueGRPC request to localhost:11048 MetaRequest { CmdGetTopicMetadata { Topic: "rt3.dc1--topic1" } } CallPersQueueGRPC response: Status: 128 ErrorReason: "the following topics are not created: rt3.dc1--topic1, Marker# PQ95" ErrorCode: UNKNOWN_TOPIC CallPersQueueGRPC request to localhost:11048 MetaRequest { CmdCreateTopic { Topic: "rt3.dc1--topic1" NumPartitions: 1 Config { PartitionConfig { LifetimeSeconds: 86400 LowWatermark: 8388608 SourceIdLifetimeSeconds: 86400 WriteSpeedInBytesPerSecond: 20000000 BurstSize: 20000000 SourceIdMaxCounts: 6000000 } LocalDC: true ReadRules: "user" ReadFromTimestampsMs: 0 ConsumerFormatVersions: 0 ConsumerCodecs { } Codecs { Ids: 0 Ids: 1 Ids: 2 Codecs: "raw" Codecs: "gzip" Codecs: "lzop" } ReadRuleVersions: 0 } } } CallPersQueueGRPC response: Status: 129 ProxyErrorCode: 53 SchemeStatus: 1 FlatTxId { TxId: 281474976710678 SchemeShardTabletId: 72057594046644480 PathId: 13 } ErrorCode: OK AddTopic: rt3.dc1--topic1 ===Run query:``DECLARE $version as Int64; DECLARE $path AS Utf8; DECLARE $cluster as Utf8; UPSERT INTO `/Root/PQ/Config/V2/Topics` (path, dc) VALUES ($path, $cluster); UPSERT INTO `/Root/PQ/Config/V2/Versions` (name, version) VALUES ("Topics", $version);`` with topic = topic1, dc = dc1 2025-06-25T15:31:28.501986Z node 37 :KQP_EXECUTER ERROR: kqp_planner.cpp:120: TxId: 281474976710681. Ctx: { TraceId: 01jykveaazbn5hv323wvhsm95m, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=37&id=YjZjMmMzNzItMzA5YTBhNmQtZjA1MDJmMzUtNmY3OTQ5NWY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root ===Query complete TClient::Ls request: /Root/PQ/rt3.dc1--topic1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "rt3.dc1--topic1" PathId: 13 SchemeshardId: 72057594046644480 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 281474976710678 CreateStep: 1750865488239 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 1 } ChildrenExist: false BalancerTabletID: 72075186224037893 } PersQueueGroup { Name: "rt3.dc1--topic1" PathId: 13 TotalGroupCount: 1 PartitionPerTablet: 5 PQTabletConfig { PartitionConfig { LifetimeSeconds: 86400 LowWatermark: 8388608 SourceIdLifetimeSeconds: 86400 WriteSpeedInBytesPerSecond: 20000000 BurstSize: 20... (TRUNCATED) GetTopicVersionFromPath: record Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "rt3.dc1--topic1" PathId: 13 SchemeshardId: 72057594046644480 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 281474976710678 CreateStep: 1750865488239 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 1 } ChildrenExist: false BalancerTabletID: 72075186224037893 } PersQueueGroup { Name: "rt3.dc1--topic1" PathId: 13 TotalGroupCount: 1 PartitionPerTablet: 5 PQTabletConfig { PartitionConfig { LifetimeSeconds: 86400 LowWatermark: 8388608 SourceIdLifetimeSeconds: 86400 WriteSpeedInBytesPerSecond: 20000000 BurstSize: 20000000 SourceIdMaxCounts: 6000000 } LocalDC: true ReadRules: "user" ReadFromTimestampsMs: 0 ConsumerFormatVersions: 0 ConsumerCodecs { } Codecs { Ids: 0 Ids: 1 Ids: 2 Codecs: "raw" Codecs: "gzip" Codecs: "lzop" } ReadRuleVersions: 0 YdbDatabasePath: "/Root" } Partitions { PartitionId: 0 TabletId: 72075186224037892 Status: Active } AlterVersion: 1 BalancerTabletID: 72075186224037893 NextPartitionId: 1 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 12 PathsLimit: 10000 ShardsInside: 6 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 1 PQPartitionsLimit: 1000000 SchemeLimits { MaxDepth: 32 MaxPaths: 10000 MaxChildrenInDir: 100000 MaxAclBytesSize: 10240 MaxTableColumns: 200 MaxTableColumnNameLength: 255 MaxTableKeyColumns: 30 MaxTableIndices: 20 MaxShards: 200000 MaxShardsInPath: 35000 MaxConsistentCopyTargets: 10000 MaxPathElementLength: 255 ExtraPathSymbolsAllowed: "!\"#$%&\'()*+,-.:;<=>?@[\\]^_`{|}~" MaxPQPartitions: 1000000 MaxTableCdcStreams: 5 MaxExports: 10 MaxImports: 10 MaxColumnTableColumns: 10000 } } } Path: "/Root/PQ/rt3.dc1--topic1" name rt3.dc1--topic1 version1 CallPersQueueGRPC request to localhost:11048 MetaRequest { CmdGetTopicMetadata { Topic: "rt3.dc1--topic1" } } CallPersQueueGRPC response: Status: 128 ErrorReason: "the following topics are not created: rt3.dc1--topic1, Marker# PQ95" ErrorCode: UNKNOWN_TOPIC CallPersQueueGRPC request to localhost:11048 MetaRequest { CmdGetTopicMetadata { Topic: "rt3.dc1--topic1" } } CallPersQueueGRPC response: Status: 128 ErrorReason: "the following topics are not created: rt3.dc1--topic1, Marker# PQ95" ErrorCode: UNKNOWN_TOPIC CallPersQueueGRPC request to localhost:11048 MetaRequest { CmdGetTopicMetadata { Topic: "rt3.dc1--topic1" } } CallPersQueueGRPC response: Status: 1 ErrorCode: OK MetaResponse { CmdGetTopicMetadataResult { TopicInfo { Topic: "rt3.dc1--topic1" NumPartitions: 1 Config { PartitionConfig { LifetimeSeconds: 86400 LowWatermark: 8388608 SourceIdLifetimeSeconds: 86400 WriteSpeedInBytesPerSecond: 20000000 BurstSize: 20000000 SourceIdMaxCounts: 6000000 } Version: 1 LocalDC: true Codecs { Ids: 0 Ids: 1 Ids: 2 Codecs: "raw" Codecs: "gzip" Codecs: "lzop" } TopicPath: "/Root/PQ/rt3.dc1--topic1" YdbDatabasePath: "/Root" Consumers { Name: "user" ReadFromTimestampsMs: 0 FormatVersion: 0 Codec { } Version: 0 Important: false } } ErrorCode: OK } } } === Topic created, have version: 1 2025-06-25T15:31:29.658475Z :DEBUG: [] MessageGroupId [src] SessionId [] Write session: try to update token 2025-06-25T15:31:29.659210Z :INFO: [] MessageGroupId [src] SessionId [] Write session: Do CDS request 2025-06-25T15:31:29.659326Z :INFO: [] MessageGroupId [src] SessionId [] Start write session. Will connect to endpoint: localhost:11048 2025-06-25T15:31:29.673694Z :DEBUG: [] MessageGroupId [src] SessionId [] Write session: send init request: init_request { topic: "topic1" message_group_id: "src" } 2025-06-25T15:31:30.414947Z :INFO: [] MessageGroupId [src] SessionId [] Counters: { Errors: 0 CurrentSessionLifetimeMs: 1750865490414 BytesWritten: 0 MessagesWritten: 0 BytesWrittenCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-25T15:31:30.415088Z :INFO: [] MessageGroupId [src] SessionId [] Write session established. Init response: session_id: "src|3caa70e2-95d870d-1dc3dbe1-9029d3c9_0" topic: "topic1" cluster: "dc1" supported_codecs: CODEC_RAW supported_codecs: CODEC_GZIP supported_codecs: CODEC_LZOP 2025-06-25T15:31:30.415278Z :INFO: [] MessageGroupId [src] SessionId [src|3caa70e2-95d870d-1dc3dbe1-9029d3c9_0] Write session: close. Timeout = 0 ms 2025-06-25T15:31:30.415399Z :INFO: [] MessageGroupId [src] SessionId [src|3caa70e2-95d870d-1dc3dbe1-9029d3c9_0] Write session will now close 2025-06-25T15:31:30.415518Z :DEBUG: [] MessageGroupId [src] SessionId [src|3caa70e2-95d870d-1dc3dbe1-9029d3c9_0] Write session: aborting 2025-06-25T15:31:30.417068Z :INFO: [] MessageGroupId [src] SessionId [src|3caa70e2-95d870d-1dc3dbe1-9029d3c9_0] Write session: gracefully shut down, all writes complete 2025-06-25T15:31:30.417143Z :DEBUG: [] MessageGroupId [src] SessionId [src|3caa70e2-95d870d-1dc3dbe1-9029d3c9_0] Write session is aborting and will not restart 2025-06-25T15:31:30.417332Z :DEBUG: [] MessageGroupId [src] SessionId [src|3caa70e2-95d870d-1dc3dbe1-9029d3c9_0] Write session: destroy Broker 37 - [::]:10225 Traceback (most recent call last): File "library/python/testing/yatest_common/yatest/common/process.py", line 384, in wait wait_for( File "library/python/testing/yatest_common/yatest/common/process.py", line 765, in wait_for raise TimeoutError(truncate(message, MAX_MESSAGE_LEN)) yatest.common.process.TimeoutError: 600 second(s) wait timeout has expired: Command '['/home/runner/.ya/tools/v4/9029509511/test_tool', 'run_ut', '@/home/runner/.ya/build/build_root/yft8/000856/ydb/core/kafka_proxy/ut/test-results/unittest/testing_out_stuff/test_tool.args']' stopped by 600 seconds timeout During handling of the above exception, another exception occurred: Traceback (most recent call last): File "devtools/ya/test/programs/test_tool/run_test/run_test.py", line 1738, in main res.wait(check_exit_code=False, timeout=run_timeout, on_timeout=timeout_callback) File "library/python/testing/yatest_common/yatest/common/process.py", line 398, in wait raise ExecutionTimeoutError(self, str(e)) yatest.common.process.ExecutionTimeoutError: (("600 second(s) wait timeout has expired: Command '['/home/runner/.ya/tools/v4/9029509511/test_tool', 'run_ut', '@/home/runner/.ya/build/build_root/yft8/000856/ydb/core/kafka_proxy/ut/test-results/unittest/testing_out_stuff/test_tool.args']' stopped by 600 seconds timeout",), {}) |99.3%| [TM] {RESULT} ydb/core/kafka_proxy/ut/unittest >> CompositeConveyorTests::TestUniformDistribution [GOOD] |99.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/api/py3test >> test_crud.py::TestManySelectsInRow::test_selects_in_row_success[500-500-50] [GOOD] |99.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test >> test_replication.py::TestReplicationAfterNodesRestart::test_replication[block-4-2] [GOOD] >> test_alloc_default.py::TestAlloc::test_alloc_and_free[kikimr0] [GOOD] >> test_cms_restart.py::TestCmsStateStorageRestartsMirrorMax::test_restart_as_much_as_can >> test_distconf.py::TestKiKiMRDistConfBasic::test_cluster_is_operational_with_distconf ------- [TM] {asan, default-linux-x86_64, release} ydb/core/tx/conveyor_composite/ut/unittest >> CompositeConveyorTests::TestUniformDistribution [GOOD] Test command err: {I_1_1:112930};{I_2_1:153487};{I_3_1:145762};{S_1_1:156906};{S_2_1:156071};{S_3_1:129815};{N_1_1:161816};{N_2_1:155344};{N_3_1:153524};{I_1_2:151366};{I_2_2:153764};{I_3_2:143300};{S_1_2:136151};{S_2_2:148974};{S_3_2:133985};{N_1_2:156609};{N_2_2:142822};{N_3_2:149516}; {I_1_1:114431};{I_2_1:155918};{I_3_1:148877};{S_1_1:163074};{S_2_1:162601};{S_3_1:131019};{N_1_1:167311};{N_2_1:160236};{N_3_1:158515};{I_1_2:154481};{I_2_2:156757};{I_3_2:146312};{S_1_2:141205};{S_2_2:155462};{S_3_2:137056};{N_1_2:161601};{N_2_2:146535};{N_3_2:154422}; {I_1_1:116856};{I_2_1:157593};{I_3_1:153049};{S_1_1:170243};{S_2_1:170254};{S_3_1:132575};{N_1_1:173659};{N_2_1:167522};{N_3_1:164259};{I_1_2:159572};{I_2_2:162366};{I_3_2:150248};{S_1_2:148297};{S_2_2:163478};{S_3_2:141838};{N_1_2:169992};{N_2_2:151015};{N_3_2:159675}; {I_1_1:121973};{I_2_1:158152};{I_3_1:160079};{S_1_1:175203};{S_2_1:175213};{S_3_1:135911};{N_1_1:177352};{N_2_1:171464};{N_3_1:169244};{I_1_2:163034};{I_2_2:165808};{I_3_2:156439};{S_1_2:158988};{S_2_2:170749};{S_3_2:147486};{N_1_2:173688};{N_2_2:161167};{N_3_2:167127}; {I_1_1:143101};{I_2_1:158152};{I_3_1:163945};{S_1_1:178644};{S_2_1:177920};{S_3_1:143778};{N_1_1:179226};{N_2_1:174712};{N_3_1:172493};{I_1_2:167205};{I_2_2:172500};{I_3_2:160806};{S_1_2:170107};{S_2_2:176656};{S_3_2:156695};{N_1_2:176936};{N_2_2:162215};{N_3_2:170342}; {I_1_1:158152};{I_2_1:158152};{I_3_1:168220};{S_1_1:180286};{S_2_1:179563};{S_3_1:158151};{N_1_1:180595};{N_2_1:177685};{N_3_1:176578};{I_1_2:171341};{I_2_2:176776};{I_3_2:164229};{S_1_2:176340};{S_2_2:178692};{S_3_2:165501};{N_1_2:178787};{N_2_2:163217};{N_3_2:174913}; {I_1_1:158152};{I_2_1:158418};{I_3_1:172666};{S_1_1:185351};{S_2_1:185374};{S_3_1:162596};{N_1_1:186536};{N_2_1:180219};{N_3_1:178007};{I_1_2:175652};{I_2_2:179498};{I_3_2:168675};{S_1_2:181586};{S_2_2:182647};{S_3_2:170789};{N_1_2:182735};{N_2_2:171628};{N_3_2:176934}; {I_1_1:160019};{I_2_1:168558};{I_3_1:177003};{S_1_1:190021};{S_2_1:190043};{S_3_1:166644};{N_1_1:190526};{N_2_1:184208};{N_3_1:181996};{I_1_2:178743};{I_2_2:181145};{I_3_2:174993};{S_1_2:186332};{S_2_2:187317};{S_3_2:172436};{N_1_2:186725};{N_2_2:173721};{N_3_2:179898}; {I_1_1:166645};{I_2_1:170189};{I_3_1:178633};{S_1_1:193881};{S_2_1:193904};{S_3_1:166998};{N_1_1:193725};{N_2_1:187406};{N_3_1:185196};{I_1_2:181246};{I_2_2:187353};{I_3_2:176624};{S_1_2:190250};{S_2_2:191178};{S_3_2:173996};{N_1_2:189924};{N_2_2:174716};{N_3_2:183066}; {I_1_1:166645};{I_2_1:171824};{I_3_1:182853};{S_1_1:197246};{S_2_1:197269};{S_3_1:168361};{N_1_1:196689};{N_2_1:190372};{N_3_1:188160};{I_1_2:185407};{I_2_2:191626};{I_3_2:178409};{S_1_2:193664};{S_2_2:194543};{S_3_2:175358};{N_1_2:192889};{N_2_2:175713};{N_3_2:185997}; {I_1_1:166645};{I_2_1:173473};{I_3_1:186515};{S_1_1:199661};{S_2_1:200372};{S_3_1:169849};{N_1_1:199541};{N_2_1:193382};{N_3_1:191171};{I_1_2:188957};{I_2_2:195287};{I_3_2:182070};{S_1_2:196809};{S_2_2:197646};{S_3_2:178479};{N_1_2:195900};{N_2_2:176739};{N_3_2:188974}; {I_1_1:166645};{I_2_1:175232};{I_3_1:190290};{S_1_1:203158};{S_2_1:204132};{S_3_1:172297};{N_1_1:201153};{N_2_1:196653};{N_3_1:194442};{I_1_2:192611};{I_2_2:198548};{I_3_2:185844};{S_1_2:202808};{S_2_2:200374};{S_3_2:184153};{N_1_2:199160};{N_2_2:177953};{N_3_2:192205}; {I_1_1:166645};{I_2_1:177319};{I_3_1:194386};{S_1_1:207903};{S_2_1:208961};{S_3_1:177095};{N_1_1:206711};{N_2_1:200298};{N_3_1:198181};{I_1_2:196564};{I_2_2:200557};{I_3_2:189941};{S_1_2:208398};{S_2_2:205118};{S_3_2:192702};{N_1_2:203104};{N_2_2:180144};{N_3_2:196213}; {I_1_1:166645};{I_2_1:182206};{I_3_1:197573};{S_1_1:213642};{S_2_1:215743};{S_3_1:183034};{N_1_1:210756};{N_2_1:204410};{N_3_1:202075};{I_1_2:198609};{I_2_2:202203};{I_3_2:195004};{S_1_2:214241};{S_2_2:209149};{S_3_2:202513};{N_1_2:207148};{N_2_2:187241};{N_3_2:199947}; {I_1_1:175837};{I_2_1:190390};{I_3_1:199338};{S_1_1:219803};{S_2_1:222025};{S_3_1:191088};{N_1_1:217961};{N_2_1:209333};{N_3_1:205790};{I_1_2:201972};{I_2_2:208485};{I_3_2:196754};{S_1_2:220511};{S_2_2:215310};{S_3_2:208783};{N_1_2:212672};{N_2_2:190332};{N_3_2:203096}; {I_1_1:187304};{I_2_1:192011};{I_3_1:203353};{S_1_1:224925};{S_2_1:226501};{S_3_1:193166};{N_1_1:223231};{N_2_1:214886};{N_3_1:211343};{I_1_2:205988};{I_2_2:212561};{I_3_2:199255};{S_1_2:225149};{S_2_2:221352};{S_3_2:214935};{N_1_2:218184};{N_2_2:191733};{N_3_2:208213}; {I_1_1:187304};{I_2_1:193776};{I_3_1:207182};{S_1_1:228282};{S_2_1:229941};{S_3_1:197218};{N_1_1:226630};{N_2_1:219903};{N_3_1:216360};{I_1_2:211375};{I_2_2:220976};{I_3_2:203046};{S_1_2:228549};{S_2_2:224900};{S_3_2:222387};{N_1_2:223160};{N_2_2:193353};{N_3_2:213274}; {I_1_1:187304};{I_2_1:195800};{I_3_1:213715};{S_1_1:231437};{S_2_1:231974};{S_3_1:201521};{N_1_1:229903};{N_2_1:224658};{N_3_1:221815};{I_1_2:217846};{I_2_2:225536};{I_3_2:207888};{S_1_2:231452};{S_2_2:229763};{S_3_2:229712};{N_1_2:226460};{N_2_2:195535};{N_3_2:218768}; {I_1_1:187304};{I_2_1:199048};{I_3_1:219633};{S_1_1:232820};{S_2_1:233391};{S_3_1:216016};{N_1_1:232143};{N_2_1:228167};{N_3_1:225727};{I_1_2:222679};{I_2_2:229625};{I_3_2:214980};{S_1_2:232835};{S_2_2:231146};{S_3_2:232506};{N_1_2:229929};{N_2_2:202606};{N_3_2:223777}; {I_1_1:199202};{I_2_1:208358};{I_3_1:224685};{S_1_1:234165};{S_2_1:234769};{S_3_1:219696};{N_1_1:233490};{N_2_1:230792};{N_3_1:229561};{I_1_2:227647};{I_2_2:232153};{I_3_2:221159};{S_1_2:234180};{S_2_2:232490};{S_3_2:233851};{N_1_2:231875};{N_2_2:211271};{N_3_2:228479}; {I_1_1:206478};{I_2_1:218533};{I_3_1:228986};{S_1_1:235499};{S_2_1:236137};{S_3_1:221031};{N_1_1:234825};{N_2_1:232160};{N_3_1:230929};{I_1_2:230584};{I_2_2:233795};{I_3_2:227323};{S_1_2:235514};{S_2_2:233825};{S_3_2:235186};{N_1_2:233209};{N_2_2:219859};{N_3_2:229847}; {I_1_1:217195};{I_2_1:222279};{I_3_1:230591};{S_1_1:236836};{S_2_1:237508};{S_3_1:222368};{N_1_1:236162};{N_2_1:233530};{N_3_1:232299};{I_1_2:232189};{I_2_2:235439};{I_3_2:228928};{S_1_2:236851};{S_2_2:235162};{S_3_2:236523};{N_1_2:234546};{N_2_2:221196};{N_3_2:231218}; {I_1_1:217195};{I_2_1:223933};{I_3_1:232205};{S_1_1:238181};{S_2_1:238886};{S_3_1:223713};{N_1_1:237507};{N_2_1:234909};{N_3_1:233677};{I_1_2:233802};{I_2_2:237093};{I_3_2:230542};{S_1_2:238196};{S_2_2:236506};{S_3_2:237867};{N_1_2:235891};{N_2_2:222541};{N_3_2:232596}; {I_1_1:217195};{I_2_1:225524};{I_3_1:233756};{S_1_1:239474};{S_2_1:240211};{S_3_1:225006};{N_1_1:238800};{N_2_1:236234};{N_3_1:235003};{I_1_2:235354};{I_2_2:238683};{I_3_2:232093};{S_1_2:239489};{S_2_2:237799};{S_3_2:239160};{N_1_2:237184};{N_2_2:223833};{N_3_2:233921}; {I_1_1:217195};{I_2_1:227123};{I_3_1:235317};{S_1_1:240774};{S_2_1:241544};{S_3_1:226306};{N_1_1:240100};{N_2_1:237567};{N_3_1:236335};{I_1_2:236914};{I_2_2:240283};{I_3_2:233654};{S_1_2:240790};{S_2_2:239100};{S_3_2:240461};{N_1_2:238485};{N_2_2:225134};{N_3_2:235254}; {I_1_1:217195};{I_2_1:228698};{I_3_1:236853};{S_1_1:242054};{S_2_1:242856};{S_3_1:227586};{N_1_1:241380};{N_2_1:238879};{N_3_1:237647};{I_1_2:238450};{I_2_2:241857};{I_3_2:235189};{S_1_2:242070};{S_2_2:240380};{S_3_2:241741};{N_1_2:239765};{N_2_2:226414};{N_3_2:236566}; {I_1_1:217195};{I_2_1:230186};{I_3_1:238305};{S_1_1:243264};{S_2_1:244097};{S_3_1:228796};{N_1_1:242590};{N_2_1:240119};{N_3_1:238888};{I_1_2:239902};{I_2_2:243346};{I_3_2:236641};{S_1_2:243279};{S_2_2:241590};{S_3_2:242950};{N_1_2:240974};{N_2_2:227624};{N_3_2:237807}; {I_1_1:217195};{I_2_1:231610};{I_3_1:239694};{S_1_1:244422};{S_2_1:245283};{S_3_1:229953};{N_1_1:243748};{N_2_1:241305};{N_3_1:240074};{I_1_2:241291};{I_2_2:244769};{I_3_2:238030};{S_1_2:244437};{S_2_2:242747};{S_3_2:244108};{N_1_2:242132};{N_2_2:228781};{N_3_2:238993}; {I_1_1:217195};{I_2_1:233062};{I_3_1:241111};{S_1_1:245603};{S_2_1:246493};{S_3_1:231134};{N_1_1:244929};{N_2_1:242516};{N_3_1:241285};{I_1_2:242708};{I_2_2:246222};{I_3_2:239448};{S_1_2:245618};{S_2_2:243928};{S_3_2:245289};{N_1_2:243313};{N_2_2:229962};{N_3_2:240203}; {I_1_1:217195};{I_2_1:234664};{I_3_1:242673};{S_1_1:246905};{S_2_1:247359};{S_3_1:232437};{N_1_1:246155};{N_2_1:243773};{N_3_1:242542};{I_1_2:244271};{I_2_2:247358};{I_3_2:241010};{S_1_2:246921};{S_2_2:245231};{S_3_2:246592};{N_1_2:244539};{N_2_2:231189};{N_3_2:241460}; {I_1_1:217195};{I_2_1:236500};{I_3_1:244465};{S_1_1:248581};{S_2_1:249596};{S_3_1:235818};{N_1_1:247349};{N_2_1:244997};{N_3_1:243766};{I_1_2:246062};{I_2_2:247358};{I_3_2:242801};{S_1_2:249070};{S_2_2:247358};{S_3_2:248415};{N_1_2:245734};{N_2_2:232383};{N_3_2:242684}; {I_1_1:217746};{I_2_1:238363};{I_3_1:246282};{S_1_1:252327};{S_2_1:253343};{S_3_1:243118};{N_1_1:250530};{N_2_1:247176};{N_3_1:245260};{I_1_2:247358};{I_2_2:247514};{I_3_2:244619};{S_1_2:252817};{S_2_2:250514};{S_3_2:252161};{N_1_2:248351};{N_2_2:233841};{N_3_2:244179}; {I_1_1:220112};{I_2_1:240787};{I_3_1:248743};{S_1_1:253845};{S_2_1:254860};{S_3_1:247359};{N_1_1:252387};{N_2_1:249902};{N_3_1:248709};{I_1_2:248669};{I_2_2:252286};{I_3_2:246985};{S_1_2:254334};{S_2_2:251994};{S_3_2:253679};{N_1_2:250422};{N_2_2:236397};{N_3_2:247312}; {I_1_1:224821};{I_2_1:245615};{I_3_1:251196};{S_1_1:255369};{S_2_1:256385};{S_3_1:247359};{N_1_1:253875};{N_2_1:251389};{N_3_1:250233};{I_1_2:251122};{I_2_2:254678};{I_3_2:247623};{S_1_2:255859};{S_2_2:253482};{S_3_2:255203};{N_1_2:251947};{N_2_2:245646};{N_3_2:249357}; {I_1_1:243042};{I_2_1:247359};{I_3_1:253008};{S_1_1:256819};{S_2_1:257834};{S_3_1:247359};{N_1_1:255289};{N_2_1:252803};{N_3_1:251683};{I_1_2:252934};{I_2_2:256446};{I_3_2:249435};{S_1_2:257308};{S_2_2:254896};{S_3_2:256653};{N_1_2:253396};{N_2_2:247358};{N_3_2:250772}; {I_1_1:247359};{I_2_1:247359};{I_3_1:255024};{S_1_1:258432};{S_2_1:259447};{S_3_1:247359};{N_1_1:256863};{N_2_1:254377};{N_3_1:253295};{I_1_2:254950};{I_2_2:258413};{I_3_2:251450};{S_1_2:258921};{S_2_2:256469};{S_3_2:258266};{N_1_2:255009};{N_2_2:247358};{N_3_2:252345}; {I_1_1:247359};{I_2_1:247444};{I_3_1:257047};{S_1_1:260067};{S_2_1:261082};{S_3_1:247359};{N_1_1:258458};{N_2_1:255972};{N_3_1:254930};{I_1_2:256972};{I_2_2:260386};{I_3_2:253473};{S_1_2:260556};{S_2_2:258064};{S_3_2:259901};{N_1_2:256644};{N_2_2:247358};{N_3_2:253940}; {I_1_1:247359};{I_2_1:249082};{I_3_1:258685};{S_1_1:261511};{S_2_1:262526};{S_3_1:248330};{N_1_1:260056};{N_2_1:257570};{N_3_1:256568};{I_1_2:258611};{I_2_2:261984};{I_3_2:255111};{S_1_2:262000};{S_2_2:259473};{S_3_2:261345};{N_1_2:258282};{N_2_2:247358};{N_3_2:255539}; {I_1_1:247359};{I_2_1:250723};{I_3_1:260326};{S_1_1:262878};{S_2_1:263894};{S_3_1:249698};{N_1_1:261657};{N_2_1:259172};{N_3_1:258210};{I_1_2:260252};{I_2_2:263586};{I_3_2:256753};{S_1_2:263368};{S_2_2:260808};{S_3_2:262712};{N_1_2:259924};{N_2_2:247358};{N_3_2:257140}; {I_1_1:247859};{I_2_1:252264};{I_3_1:261867};{S_1_1:264246};{S_2_1:265262};{S_3_1:251065};{N_1_1:263152};{N_2_1:260667};{N_3_1:259742};{I_1_2:261793};{I_2_2:265089};{I_3_2:258294};{S_1_2:264735};{S_2_2:262141};{S_3_2:264080};{N_1_2:261456};{N_2_2:247901};{N_3_2:258635}; {I_1_1:249221};{I_2_1:253626};{I_3_1:263229};{S_1_1:265608};{S_2_1:266624};{S_3_1:252427};{N_1_1:264481};{N_2_1:261995};{N_3_1:261104};{I_1_2:263155};{I_2_2:266418};{I_3_2:259656};{S_1_2:266097};{S_2_2:263470};{S_3_2:265442};{N_1_2:262818};{N_2_2:249263};{N_3_2:259964}; {I_1_1:250587};{I_2_1:254992};{I_3_1:264595};{S_1_1:266973};{S_2_1:267989};{S_3_1:253793};{N_1_1:265813};{N_2_1:263327};{N_3_1:262470};{I_1_2:264520};{I_2_2:267750};{I_3_2:261021};{S_1_2:267463};{S_2_2:264803};{S_3_2:266807};{N_1_2:264184};{N_2_2:250628};{N_3_2:261296}; {I_1_1:251874};{I_2_1:256279};{I_3_1:265882};{S_1_1:268261};{S_2_1:269277};{S_3_1:255080};{N_1_1:267069};{N_2_1:264584};{N_3_1:263757};{I_1_2:265808};{I_2_2:269006};{I_3_2:262309};{S_1_2:268750};{S_2_2:266059};{S_3_2:268095};{N_1_2:265471};{N_2_2:251916};{N_3_2:262552}; {I_1_1:253235};{I_2_1:257640};{I_3_1:267243};{S_1_1:269622};{S_2_1:270638};{S_3_1:256441};{N_1_1:268397};{N_2_1:265912};{N_3_1:265118};{I_1_2:267169};{I_2_2:270334};{I_3_2:263670};{S_1_2:270111};{S_2_2:267387};{S_3_2:269456};{N_1_2:266832};{N_2_2:253277};{N_3_2:263880}; {I_1_1:254597};{I_2_1:259002};{I_3_1:268605};{S_1_1:270984};{S_2_1:272000};{ ... 4959};{S_1_2:919926};{S_2_2:917963};{S_3_2:919503};{N_1_2:909737};{N_2_2:902025};{N_3_2:906472}; {I_1_1:899864};{I_2_1:911264};{I_3_1:923778};{S_1_1:922243};{S_2_1:922721};{S_3_1:913245};{N_1_1:908450};{N_2_1:913068};{N_3_1:915860};{I_1_2:922830};{I_2_2:919839};{I_3_2:917663};{S_1_2:922629};{S_2_2:920699};{S_3_2:922207};{N_1_2:912440};{N_2_2:904762};{N_3_2:909177}; {I_1_1:902566};{I_2_1:913966};{I_3_1:926514};{S_1_1:924945};{S_2_1:925423};{S_3_1:915947};{N_1_1:911120};{N_2_1:915804};{N_3_1:918596};{I_1_2:925565};{I_2_2:922509};{I_3_2:920365};{S_1_2:925332};{S_2_2:923435};{S_3_2:924909};{N_1_2:915142};{N_2_2:907497};{N_3_2:911879}; {I_1_1:905278};{I_2_1:916678};{I_3_1:929259};{S_1_1:927657};{S_2_1:928135};{S_3_1:918658};{N_1_1:913799};{N_2_1:918550};{N_3_1:921341};{I_1_2:928312};{I_2_2:925189};{I_3_2:923077};{S_1_2:928044};{S_2_2:926181};{S_3_2:927621};{N_1_2:917854};{N_2_2:910244};{N_3_2:914591}; {I_1_1:907968};{I_2_1:919367};{I_3_1:931982};{S_1_1:930347};{S_2_1:930824};{S_3_1:921348};{N_1_1:916456};{N_2_1:921273};{N_3_1:924065};{I_1_2:931034};{I_2_2:927846};{I_3_2:925767};{S_1_2:930733};{S_2_2:928904};{S_3_2:930311};{N_1_2:920544};{N_2_2:912966};{N_3_2:917280}; {I_1_1:910639};{I_2_1:922038};{I_3_1:934686};{S_1_1:933018};{S_2_1:933495};{S_3_1:924018};{N_1_1:919096};{N_2_1:923977};{N_3_1:926769};{I_1_2:933738};{I_2_2:930485};{I_3_2:928438};{S_1_2:933404};{S_2_2:931608};{S_3_2:932982};{N_1_2:923215};{N_2_2:915670};{N_3_2:919951}; {I_1_1:913331};{I_2_1:924731};{I_3_1:937412};{S_1_1:935709};{S_2_1:936187};{S_3_1:926711};{N_1_1:921757};{N_2_1:926702};{N_3_1:929494};{I_1_2:936464};{I_2_2:933144};{I_3_2:931129};{S_1_2:936097};{S_2_2:934333};{S_3_2:935673};{N_1_2:925907};{N_2_2:918396};{N_3_2:922643}; {I_1_1:916030};{I_2_1:927430};{I_3_1:940145};{S_1_1:938409};{S_2_1:938887};{S_3_1:929410};{N_1_1:924424};{N_2_1:929435};{N_3_1:932227};{I_1_2:939197};{I_2_2:935812};{I_3_2:933829};{S_1_2:938796};{S_2_2:937066};{S_3_2:938373};{N_1_2:928606};{N_2_2:921129};{N_3_2:925343}; {I_1_1:918742};{I_2_1:930142};{I_3_1:942890};{S_1_1:941120};{S_2_1:941599};{S_3_1:932122};{N_1_1:927103};{N_2_1:932181};{N_3_1:934972};{I_1_2:941942};{I_2_2:938492};{I_3_2:936540};{S_1_2:941508};{S_2_2:939812};{S_3_2:941085};{N_1_2:931318};{N_2_2:923875};{N_3_2:928054}; {I_1_1:921435};{I_2_1:932834};{I_3_1:945615};{S_1_1:943813};{S_2_1:944291};{S_3_1:934815};{N_1_1:929764};{N_2_1:934907};{N_3_1:937699};{I_1_2:944668};{I_2_2:941152};{I_3_2:939233};{S_1_2:944200};{S_2_2:942538};{S_3_2:943777};{N_1_2:934011};{N_2_2:926600};{N_3_2:930746}; {I_1_1:924084};{I_2_1:935484};{I_3_1:948299};{S_1_1:946463};{S_2_1:946941};{S_3_1:937464};{N_1_1:932382};{N_2_1:937589};{N_3_1:940382};{I_1_2:947352};{I_2_2:943770};{I_3_2:941883};{S_1_2:946850};{S_2_2:945221};{S_3_2:946428};{N_1_2:936660};{N_2_2:929284};{N_3_2:933397}; {I_1_1:926766};{I_2_1:938166};{I_3_1:951015};{S_1_1:949145};{S_2_1:949623};{S_3_1:940147};{N_1_1:935033};{N_2_1:940305};{N_3_1:943098};{I_1_2:950067};{I_2_2:946420};{I_3_2:944566};{S_1_2:949532};{S_2_2:947937};{S_3_2:949110};{N_1_2:939342};{N_2_2:931999};{N_3_2:936079}; {I_1_1:929458};{I_2_1:940857};{I_3_1:953740};{S_1_1:951837};{S_2_1:952314};{S_3_1:942838};{N_1_1:937693};{N_2_1:943030};{N_3_1:945822};{I_1_2:952791};{I_2_2:949079};{I_3_2:947257};{S_1_2:952223};{S_2_2:950661};{S_3_2:951801};{N_1_2:942034};{N_2_2:934723};{N_3_2:938770}; {I_1_1:932154};{I_2_1:943553};{I_3_1:956469};{S_1_1:954533};{S_2_1:955010};{S_3_1:945534};{N_1_1:940356};{N_2_1:945760};{N_3_1:948551};{I_1_2:955521};{I_2_2:951743};{I_3_2:949953};{S_1_2:954919};{S_2_2:953391};{S_3_2:954497};{N_1_2:944730};{N_2_2:937453};{N_3_2:941466}; {I_1_1:934860};{I_2_1:946260};{I_3_1:959209};{S_1_1:957239};{S_2_1:957717};{S_3_1:948240};{N_1_1:943031};{N_2_1:948499};{N_3_1:951291};{I_1_2:958261};{I_2_2:954416};{I_3_2:952659};{S_1_2:957625};{S_2_2:956130};{S_3_2:957203};{N_1_2:947436};{N_2_2:940193};{N_3_2:944173}; {I_1_1:937705};{I_2_1:949104};{I_3_1:961255};{S_1_1:959945};{S_2_1:960423};{S_3_1:950946};{N_1_1:945705};{N_2_1:951239};{N_3_1:954031};{I_1_2:961140};{I_2_2:957226};{I_3_2:955504};{S_1_2:960332};{S_2_2:958870};{S_3_2:959909};{N_1_2:950142};{N_2_2:942933};{N_3_2:946879}; {I_1_1:941070};{I_2_1:952469};{I_3_1:962639};{S_1_1:962657};{S_2_1:963135};{S_3_1:953659};{N_1_1:948385};{N_2_1:953986};{N_3_1:956777};{I_1_2:962639};{I_2_2:960542};{I_3_2:958868};{S_1_2:963044};{S_2_2:961616};{S_3_2:962621};{N_1_2:952855};{N_2_2:945679};{N_3_2:949590}; {I_1_1:944468};{I_2_1:955868};{I_3_1:964027};{S_1_1:964905};{S_2_1:964905};{S_3_1:959424};{N_1_1:951070};{N_2_1:956737};{N_3_1:959528};{I_1_2:964028};{I_2_2:963893};{I_3_2:962267};{S_1_2:964905};{S_2_2:964028};{S_3_2:964905};{N_1_2:955572};{N_2_2:948430};{N_3_2:952308}; {I_1_1:948755};{I_2_1:960155};{I_3_1:965418};{S_1_1:966261};{S_2_1:966261};{S_3_1:966262};{N_1_1:953984};{N_2_1:959716};{N_3_1:962508};{I_1_2:965418};{I_2_2:966261};{I_3_2:966261};{S_1_2:966262};{S_2_2:965418};{S_3_2:966261};{N_1_2:958517};{N_2_2:951410};{N_3_2:955254}; {I_1_1:956255};{I_2_1:967616};{I_3_1:966805};{S_1_1:967615};{S_2_1:967615};{S_3_1:967616};{N_1_1:957347};{N_2_1:963146};{N_3_1:965938};{I_1_2:966806};{I_2_2:967615};{I_3_2:967615};{S_1_2:967615};{S_2_2:966806};{S_3_2:967615};{N_1_2:961914};{N_2_2:954840};{N_3_2:958650}; {I_1_1:968962};{I_2_1:968962};{I_3_1:968186};{S_1_1:968962};{S_2_1:968962};{S_3_1:968963};{N_1_1:961103};{N_2_1:966967};{N_3_1:968186};{I_1_2:968187};{I_2_2:968962};{I_3_2:968962};{S_1_2:968962};{S_2_2:968186};{S_3_2:968962};{N_1_2:965701};{N_2_2:958661};{N_3_2:962438}; {I_1_1:970318};{I_2_1:970319};{I_3_1:969576};{S_1_1:970318};{S_2_1:970318};{S_3_1:970319};{N_1_1:969218};{N_2_1:969577};{N_3_1:969576};{I_1_2:969577};{I_2_2:970318};{I_3_2:970318};{S_1_2:970319};{S_2_2:969576};{S_3_2:970318};{N_1_2:970318};{N_2_2:966843};{N_3_2:970318}; {I_1_1:971667};{I_2_1:971667};{I_3_1:970958};{S_1_1:971666};{S_2_1:971667};{S_3_1:971667};{N_1_1:972341};{N_2_1:970959};{N_3_1:970958};{I_1_2:970958};{I_2_2:971667};{I_3_2:971666};{S_1_2:971667};{S_2_2:970959};{S_3_2:971666};{N_1_2:971667};{N_2_2:970959};{N_3_2:971666}; {I_1_1:973014};{I_2_1:973014};{I_3_1:972338};{S_1_1:973013};{S_2_1:973013};{S_3_1:973014};{N_1_1:973656};{N_2_1:972339};{N_3_1:972339};{I_1_2:972339};{I_2_2:973013};{I_3_2:973013};{S_1_2:973014};{S_2_2:972339};{S_3_2:973013};{N_1_2:973013};{N_2_2:972339};{N_3_2:973013}; {I_1_1:974372};{I_2_1:974372};{I_3_1:973730};{S_1_1:974371};{S_2_1:974371};{S_3_1:974372};{N_1_1:974981};{N_2_1:973731};{N_3_1:973731};{I_1_2:973731};{I_2_2:974371};{I_3_2:974371};{S_1_2:974372};{S_2_2:973731};{S_3_2:974371};{N_1_2:974371};{N_2_2:973731};{N_3_2:974371}; {I_1_1:975727};{I_2_1:975727};{I_3_1:975120};{S_1_1:975727};{S_2_1:975727};{S_3_1:975727};{N_1_1:976305};{N_2_1:975120};{N_3_1:975120};{I_1_2:975121};{I_2_2:975727};{I_3_2:975727};{S_1_2:975727};{S_2_2:975120};{S_3_2:975727};{N_1_2:975727};{N_2_2:975121};{N_3_2:975727}; {I_1_1:977080};{I_2_1:977081};{I_3_1:976507};{S_1_1:977080};{S_2_1:977080};{S_3_1:977081};{N_1_1:977626};{N_2_1:976508};{N_3_1:976507};{I_1_2:976508};{I_2_2:977080};{I_3_2:977080};{S_1_2:977081};{S_2_2:976507};{S_3_2:977080};{N_1_2:977080};{N_2_2:976508};{N_3_2:977080}; {I_1_1:978436};{I_2_1:978436};{I_3_1:977896};{S_1_1:978435};{S_2_1:978435};{S_3_1:978436};{N_1_1:978949};{N_2_1:977897};{N_3_1:977896};{I_1_2:977896};{I_2_2:978435};{I_3_2:978435};{S_1_2:978436};{S_2_2:977896};{S_3_2:978435};{N_1_2:978435};{N_2_2:977897};{N_3_2:978435}; {I_1_1:979790};{I_2_1:979790};{I_3_1:979284};{S_1_1:979789};{S_2_1:979790};{S_3_1:979790};{N_1_1:980271};{N_2_1:979284};{N_3_1:979284};{I_1_2:979284};{I_2_2:979790};{I_3_2:979789};{S_1_2:979790};{S_2_2:979284};{S_3_2:979789};{N_1_2:979789};{N_2_2:979285};{N_3_2:979789}; {I_1_1:981144};{I_2_1:981144};{I_3_1:980672};{S_1_1:981143};{S_2_1:981144};{S_3_1:981144};{N_1_1:981593};{N_2_1:980672};{N_3_1:980672};{I_1_2:980673};{I_2_2:981144};{I_3_2:981143};{S_1_2:981144};{S_2_2:980673};{S_3_2:981143};{N_1_2:981144};{N_2_2:980673};{N_3_2:981143}; {I_1_1:982483};{I_2_1:982484};{I_3_1:982045};{S_1_1:982483};{S_2_1:982483};{S_3_1:982484};{N_1_1:982900};{N_2_1:982046};{N_3_1:982045};{I_1_2:982046};{I_2_2:982483};{I_3_2:982483};{S_1_2:982483};{S_2_2:982046};{S_3_2:982483};{N_1_2:982483};{N_2_2:982046};{N_3_2:982483}; {I_1_1:983793};{I_2_1:983793};{I_3_1:983387};{S_1_1:983792};{S_2_1:983792};{S_3_1:983793};{N_1_1:984178};{N_2_1:983387};{N_3_1:983387};{I_1_2:983387};{I_2_2:983792};{I_3_2:983792};{S_1_2:983793};{S_2_2:983387};{S_3_2:983792};{N_1_2:983792};{N_2_2:983388};{N_3_2:983792}; {I_1_1:985139};{I_2_1:985140};{I_3_1:984768};{S_1_1:985139};{S_2_1:985139};{S_3_1:985140};{N_1_1:985493};{N_2_1:984768};{N_3_1:984768};{I_1_2:984768};{I_2_2:985139};{I_3_2:985139};{S_1_2:985140};{S_2_2:984768};{S_3_2:985139};{N_1_2:985140};{N_2_2:984768};{N_3_2:985139}; {I_1_1:986490};{I_2_1:986490};{I_3_1:986152};{S_1_1:986490};{S_2_1:986490};{S_3_1:986490};{N_1_1:986811};{N_2_1:986152};{N_3_1:986152};{I_1_2:986152};{I_2_2:986490};{I_3_2:986490};{S_1_2:986490};{S_2_2:986152};{S_3_2:986490};{N_1_2:986490};{N_2_2:986153};{N_3_2:986490}; {I_1_1:987846};{I_2_1:987846};{I_3_1:987541};{S_1_1:987845};{S_2_1:987846};{S_3_1:987846};{N_1_1:988135};{N_2_1:987542};{N_3_1:987542};{I_1_2:987542};{I_2_2:987846};{I_3_2:987845};{S_1_2:987846};{S_2_2:987542};{S_3_2:987845};{N_1_2:987846};{N_2_2:987542};{N_3_2:987845}; {I_1_1:989202};{I_2_1:989202};{I_3_1:988931};{S_1_1:989201};{S_2_1:989202};{S_3_1:989202};{N_1_1:989459};{N_2_1:988932};{N_3_1:988931};{I_1_2:988932};{I_2_2:989202};{I_3_2:989201};{S_1_2:989202};{S_2_2:988932};{S_3_2:989201};{N_1_2:989202};{N_2_2:988932};{N_3_2:989201}; {I_1_1:990564};{I_2_1:990564};{I_3_1:990328};{S_1_1:990564};{S_2_1:990564};{S_3_1:990564};{N_1_1:990788};{N_2_1:990328};{N_3_1:990328};{I_1_2:990328};{I_2_2:990564};{I_3_2:990563};{S_1_2:990564};{S_2_2:990328};{S_3_2:990563};{N_1_2:990564};{N_2_2:990328};{N_3_2:990564}; {I_1_1:991921};{I_2_1:991921};{I_3_1:991719};{S_1_1:991921};{S_2_1:991921};{S_3_1:991921};{N_1_1:992113};{N_2_1:991719};{N_3_1:991719};{I_1_2:991719};{I_2_2:991921};{I_3_2:991920};{S_1_2:991921};{S_2_2:991719};{S_3_2:991920};{N_1_2:991921};{N_2_2:991719};{N_3_2:991921}; {I_1_1:993272};{I_2_1:993272};{I_3_1:993104};{S_1_1:993272};{S_2_1:993272};{S_3_1:993272};{N_1_1:993432};{N_2_1:993104};{N_3_1:993104};{I_1_2:993104};{I_2_2:993272};{I_3_2:993272};{S_1_2:993272};{S_2_2:993104};{S_3_2:993272};{N_1_2:993272};{N_2_2:993104};{N_3_2:993272}; {I_1_1:994621};{I_2_1:994621};{I_3_1:994487};{S_1_1:994621};{S_2_1:994621};{S_3_1:994621};{N_1_1:994749};{N_2_1:994487};{N_3_1:994487};{I_1_2:994487};{I_2_2:994621};{I_3_2:994621};{S_1_2:994621};{S_2_2:994487};{S_3_2:994621};{N_1_2:994621};{N_2_2:994487};{N_3_2:994621}; {I_1_1:995971};{I_2_1:995971};{I_3_1:995870};{S_1_1:995971};{S_2_1:995971};{S_3_1:995971};{N_1_1:996067};{N_2_1:995870};{N_3_1:995870};{I_1_2:995870};{I_2_2:995971};{I_3_2:995971};{S_1_2:995971};{S_2_2:995870};{S_3_2:995970};{N_1_2:995971};{N_2_2:995870};{N_3_2:995970}; {I_1_1:997324};{I_2_1:997324};{I_3_1:997257};{S_1_1:997324};{S_2_1:997324};{S_3_1:997325};{N_1_1:997388};{N_2_1:997257};{N_3_1:997257};{I_1_2:997258};{I_2_2:997324};{I_3_2:997324};{S_1_2:997324};{S_2_2:997257};{S_3_2:997324};{N_1_2:997324};{N_2_2:997258};{N_3_2:997324}; {I_1_1:998681};{I_2_1:998681};{I_3_1:998648};{S_1_1:998681};{S_2_1:998681};{S_3_1:998681};{N_1_1:998712};{N_2_1:998648};{N_3_1:998648};{I_1_2:998649};{I_2_2:998681};{I_3_2:998681};{S_1_2:998681};{S_2_2:998648};{S_3_2:998681};{N_1_2:998681};{N_2_2:998649};{N_3_2:998681}; {I_1_1:1000000};{I_2_1:1000000};{I_3_1:1000000};{S_1_1:1000000};{S_2_1:1000000};{S_3_1:1000000};{N_1_1:1000000};{N_2_1:1000000};{N_3_1:1000000};{I_1_2:1000000};{I_2_2:1000000};{I_3_2:1000000};{S_1_2:1000000};{S_2_2:1000000};{S_3_2:1000000};{N_1_2:1000000};{N_2_2:1000000};{N_3_2:1000000}; 222us per task 244.090103s;244.090117s;244.090121s;244.090134s;244.090137s;244.090140s;244.090146s;244.090148s;244.090151s;244.090154s;244.090155s;244.090158s;244.090161s;244.090163s;244.090174s;244.090176s;244.090178s;244.090180s; ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/olap/s3_import/py3test >> test_tpch_import.py::TestS3TpchImport::test_import_and_export 2025-06-25 15:31:25,674 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper execution timed out 2025-06-25 15:31:26,485 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper has overrun 600 secs timeout. Process tree before termination: pid rss ref pdirt 1005421 598M 590M 515M ydb-tests-olap-s3_import --basetemp /home/runner/.ya/build/build_root/yft8/000c9f/tmp --capture no -c pkg:library.python.pytest:pytest.yatest.ini -p no:factor --doctest-modul 1006717 13.2G 13.2G 12.7G ├─ ydbd server --suppress-version-check --yaml-config=/home/runner/.ya/build/build_root/yft8/000c9f/ydb/tests/olap/s3_import/test-results/py3test/testing_out_stuff/test_tp 1008091 391M 390M 359M └─ moto_server s3 --port 64989 Test command err: File "library/python/pytest/main.py", line 101, in main rc = pytest.main( File "contrib/python/pytest/py3/_pytest/config/__init__.py", line 175, in main ret: Union[ExitCode, int] = config.hook.pytest_cmdline_main( File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 512, in __call__ return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120, in _hookexec return self._inner_hookexec(hook_name, methods, kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_callers.py", line 121, in _multicall res = hook_impl.function(*args) File "contrib/python/pytest/py3/_pytest/main.py", line 320, in pytest_cmdline_main return wrap_session(config, _main) File "contrib/python/pytest/py3/_pytest/main.py", line 273, in wrap_session session.exitstatus = doit(config, session) or 0 File "contrib/python/pytest/py3/_pytest/main.py", line 327, in _main config.hook.pytest_runtestloop(session=session) File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 512, in __call__ return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120, in _hookexec return self._inner_hookexec(hook_name, methods, kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_callers.py", line 121, in _multicall res = hook_impl.function(*args) File "contrib/python/pytest/py3/_pytest/main.py", line 352, in pytest_runtestloop item.config.hook.pytest_runtest_protocol(item=item, nextitem=nextitem) File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 512, in __call__ return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120, in _hookexec return self._inner_hookexec(hook_name, methods, kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_callers.py", line 121, in _multicall res = hook_impl.function(*args) File "contrib/python/pytest/py3/_pytest/runner.py", line 115, in pytest_runtest_protocol runtestprotocol(item, nextitem=nextitem) File "contrib/python/pytest/py3/_pytest/runner.py", line 134, in runtestprotocol reports.append(call_and_report(item, "call", log)) File "contrib/python/pytest/py3/_pytest/runner.py", line 223, in call_and_report call = call_runtest_hook(item, when, **kwds) File "contrib/python/pytest/py3/_pytest/runner.py", line 262, in call_runtest_hook return CallInfo.from_call( File "contrib/python/pytest/py3/_pytest/runner.py", line 342, in from_call result: Optional[TResult] = func() File "contrib/python/pytest/py3/_pytest/runner.py", line 263, in lambda: ihook(item=item, **kwds), when=when, reraise=reraise File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 512, in __call__ return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120, in _hookexec return self._inner_hookexec(hook_name, methods, kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_callers.py", line 121, in _multicall res = hook_impl.function(*args) File "contrib/python/pytest/py3/_pytest/runner.py", line 170, in pytest_runtest_call item.runtest() File "contrib/python/pytest/py3/_pytest/python.py", line 1844, in runtest self.ihook.pytest_pyfunc_call(pyfuncitem=self) File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 512, in __call__ return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120, in _hookexec return self._inner_hookexec(hook_name, methods, kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_callers.py", line 121, in _multicall res = hook_impl.function(*args) File "library/python/pytest/plugins/ya.py", line 563, in pytest_pyfunc_call pyfuncitem.retval = testfunction(**testargs) File "ydb/tests/olap/s3_import/test_tpch_import.py", line 97, in test_import_and_export self.ydb_client.query("INSERT INTO s3_table SELECT * FROM lineitem") File "ydb/tests/olap/common/ydb_client.py", line 24, in query return self.session_pool.execute_with_retries(statement) File "contrib/python/ydb/py3/ydb/query/pool.py", line 204, in execute_with_retries return retry_operation_sync(wrapped_callee, retry_settings) File "contrib/python/ydb/py3/ydb/retries.py", line 133, in retry_operation_sync for next_opt in opt_generator: File "contrib/python/ydb/py3/ydb/retries.py", line 94, in retry_operation_impl result = YdbRetryOperationFinalResult(callee(*args, **kwargs)) File "contrib/python/ydb/py3/ydb/query/pool.py", line 202, in wrapped_callee return [result_set for result_set in it] File "contrib/python/ydb/py3/ydb/_utilities.py", line 173, in __next__ return self._next() File "contrib/python/ydb/py3/ydb/_utilities.py", line 164, in _next res = self.wrapper(next(self.it)) File "contrib/python/grpcio/py3/grpc/_channel.py", line 475, in __next__ return self._next() File "contrib/python/grpcio/py3/grpc/_channel.py", line 872, in _next _common.wait(self._state.condition.wait, _response_ready) File "contrib/python/grpcio/py3/grpc/_common.py", line 150, in wait _wait_once(wait_fn, MAXIMUM_WAIT_TIMEOUT, spin_cb) File "contrib/python/grpcio/py3/grpc/_common.py", line 112, in _wait_once wait_fn(timeout=timeout) File "contrib/tools/python3/Lib/threading.py", line 359, in wait gotit = waiter.acquire(True, timeout) File "library/python/pytest/plugins/ya.py", line 344, in _graceful_shutdown traceback.print_stack(file=sys.stderr) Traceback (most recent call last): File "library/python/testing/yatest_common/yatest/common/process.py", line 384, in wait wait_for( File "library/python/testing/yatest_common/yatest/common/process.py", line 765, in wait_for raise TimeoutError(truncate(message, MAX_MESSAGE_LEN)) yatest.common.process.TimeoutError: ...unner/.ya/build/build_root/yft8/000c9f/ydb/tests/olap/s3_import/ydb-tests-olap-s3_import', '--basetemp', '/home/runner/.ya/build/build_root/yft8/000c9f/tmp', '--capture', 'no', '-c', 'pkg:library.python.pytest:pytest.yatest.ini', '-p', 'no:factor', '--doctest-modules', '--ya-trace', '/home/runner/.ya/build/build_root/yft8/000c9f/ydb/tests/olap/s3_import/test-results/py3test/ytest.report.trace', '--build-root', '/home/runner/.ya/build/build_root/yft8/000c9f', '--source-root', '/home/runner/.ya/build/build_root/yft8/000c9f/environment/arcadia', '--output-dir', '/home/runner/.ya/build/build_root/yft8/000c9f/ydb/tests/olap/s3_import/test-results/py3test/testing_out_stuff', '--durations', '0', '--project-path', 'ydb/tests/olap/s3_import', '--test-tool-bin', '/home/runner/.ya/tools/v4/9029509511/test_tool', '--ya-version', '2', '--collect-cores', '--sanitizer-extra-checks', '--build-type', 'release', '--tb', 'short', '--dep-root', 'ydb/tests/olap/s3_import', '--flags', 'APPLE_SDK_LOCAL=yes', '--flags', 'CFLAGS=-fno-omit-frame-pointer -Wno-unknown-argument', '--flags', 'DEBUGINFO_LINES_ONLY=yes', '--flags', 'DISABLE_FLAKE8_MIGRATIONS=yes', '--flags', 'OPENSOURCE=yes', '--flags', 'SANITIZER_TYPE=address', '--flags', 'TESTS_REQUESTED=yes', '--flags', 'USE_AIO=static', '--flags', 'USE_CLANG_CL=yes', '--flags', 'USE_EAT_MY_DATA=yes', '--flags', 'USE_ICONV=static', '--flags', 'USE_IDN=static', '--flags', 'USE_PREBUILT_TOOLS=no', '--sanitize', 'address']' stopped by 600 seconds timeout During handling of the above exception, another exception occurred: Traceback (most recent call last): File "devtools/ya/test/programs/test_tool/run_test/run_test.py", line 1738, in main res.wait(check_exit_code=False, timeout=run_timeout, on_timeout=timeout_callback) File "library/python/testing/yatest_common/yatest/common/process.py", line 398, in wait raise ExecutionTimeoutError(self, str(e)) yatest.common.process.ExecutionTimeoutError: (("...unner/.ya/build/build_root/yft8/000c9f/ydb/tests/olap/s3_import/ydb-tests-olap-s3_import', '--basetemp', '/home/runner/.ya/build/build_root/yft8/000c9f/tmp', '--capture', 'no', '-c', 'pkg:library.python.pytest:pytest.yatest.ini', '-p', 'no:factor', '--doctest-modules', '--ya-trace', '/home/runner/.ya/build/build_root/yft8/000c9f/ydb/tests/olap/s3_import/test-results/py3test/ytest.report.trace', '--build-root', '/home/runner/.ya/build/build_root/yft8/000c9f', '--source-root', '/home/runner/.ya/build/build_root/yft8/000c9f/environment/arcadia', '--output-dir', '/home/runner/.ya/build/build_root/yft8/000c9f/ydb/tests/olap/s3_import/test-results/py3test/testing_out_stuff', '--durations', '0', '--project-path', 'ydb/tests/olap/s3_import', '--test-tool-bin', '/home/runner/.ya/tools/v4/9029509511/test_tool', '--ya-version', '2', '--collect-cores', '--sanitizer-extra-checks', '--build-type', 'release', '--tb', 'short', '--dep-root', 'ydb/tests/olap/s3_import', '--flags', 'APPLE_SDK_LOCAL=yes', '--flags', 'CFLAGS=-fno-omit-frame-pointer -Wno-unknown-argument', '--flags', 'DEBUGINFO_LINES_ONLY=yes', '--flags', 'DISABLE_FLAKE8_MIGRATIONS=yes', '--flags', 'OPENSOURCE=yes', '--flags', 'SANITIZER_TYPE=address', '--flags', 'TESTS_REQUESTED=yes', '--flags', 'USE_AIO=static', '--flags', 'USE_CLANG_CL=yes', '--flags', 'USE_EAT_MY_DATA=yes', '--flags', 'USE_ICONV=static', '--flags', 'USE_IDN=static', '--flags', 'USE_PREBUILT_TOOLS=no', '--sanitize', 'address']' stopped by 600 seconds timeout",), {}) |99.3%| [TA] $(B)/ydb/core/tx/conveyor_composite/ut/test-results/unittest/{meta.json ... results_accumulator.log} |99.3%| [TM] {RESULT} ydb/tests/olap/s3_import/py3test |99.3%| [TA] {RESULT} $(B)/ydb/core/tx/conveyor_composite/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> test_public_api.py::TestAttributes::test_create_table >> test_alloc_default.py::TestAlloc::test_up_down[kikimr0] |99.3%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/serverless/py3test >> test_serverless.py::test_create_table_with_quotas[enable_alter_database_create_hive_first--true] [GOOD] |99.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/config/py3test |99.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/config/py3test >> test_distconf.py::TestKiKiMRDistConfBasic::test_cluster_expand_with_distconf [GOOD] >> test_serverless.py::test_database_with_disk_quotas[enable_alter_database_create_hive_first--true] [FAIL] |99.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/serverless/py3test >> test_serverless.py::test_create_table_with_alter_quotas[enable_alter_database_create_hive_first--true] [GOOD] |99.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test |99.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/config/py3test >> test_generate_dynamic_config.py::TestGenerateDynamicConfig::test_generate_dynamic_config |99.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/config/py3test >> test_distconf.py::TestKiKiMRDistConfBasic::test_cluster_expand_with_distconf [GOOD] |99.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/config/py3test >> test_public_api.py::TestAttributes::test_create_table [GOOD] >> test_public_api.py::TestAttributes::test_copy_table >> test_public_api.py::TestAttributes::test_copy_table [GOOD] >> test_public_api.py::TestAttributes::test_create_indexed_table |99.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test >> test_public_api.py::TestAttributes::test_create_indexed_table [GOOD] >> test_public_api.py::TestAttributes::test_alter_table >> test_serverless.py::test_turn_on_serverless_storage_billing[enable_alter_database_create_hive_first--false] [GOOD] |99.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test >> test_public_api.py::TestAttributes::test_alter_table [GOOD] >> test_public_api.py::TestAttributes::test_limits[attributes0] [GOOD] >> test_public_api.py::TestAttributes::test_limits[attributes1] >> test_public_api.py::TestAttributes::test_limits[attributes1] [GOOD] >> test_public_api.py::TestAttributes::test_limits[attributes2] [GOOD] >> test_public_api.py::TestAttributes::test_limits[attributes3] [GOOD] >> test_public_api.py::TestAttributes::test_limits[attributes4] >> test_public_api.py::TestAttributes::test_limits[attributes4] [GOOD] >> test_serverless.py::test_turn_on_serverless_storage_billing[enable_alter_database_create_hive_first--true] |99.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/config/py3test >> test_distconf.py::TestKiKiMRDistConfBasic::test_cluster_is_operational_with_distconf [GOOD] >> test_config_migration.py::TestConfigMigrationToV2::test_migration_to_v2 |99.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/config/py3test |99.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test |99.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test |99.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/config/py3test |99.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/config/py3test |99.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/config/py3test >> test_distconf.py::TestKiKiMRDistConfBasic::test_cluster_is_operational_with_distconf [GOOD] |99.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test >> test_distconf.py::TestKiKiMRDistConfReassignStateStorageNoChanges::test_cluster_change_state_storage |99.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test >> test_alloc_default.py::TestAlloc::test_up_down[kikimr0] [GOOD] |99.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/config/py3test |99.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test >> test_alloc_default.py::TestAlloc::test_mkql_not_increased[kikimr0] >> test_config_with_metadata.py::TestKiKiMRStoreConfigDir::test_cluster_works_with_auto_conf_dir >> test_generate_dynamic_config.py::TestGenerateDynamicConfig::test_generate_dynamic_config [GOOD] >> test_public_api.py::TestDocApiTables::test_create_table >> test_config_with_metadata.py::TestConfigWithoutMetadataBlock::test_cluster_is_operational_without_metadata |99.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/config/py3test |99.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/config/py3test >> test_generate_dynamic_config.py::TestGenerateDynamicConfig::test_generate_dynamic_config [GOOD] |99.4%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test >> test_config_with_metadata.py::TestKiKiMRStoreConfigDir::test_cluster_works_with_auto_conf_dir [GOOD] >> test_config_with_metadata.py::TestConfigWithMetadataBlock::test_cluster_is_operational_with_metadata |99.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/config/py3test >> test_public_api.py::TestDocApiTables::test_create_table [GOOD] >> test_public_api.py::TestDocApiTables::test_alter_table[None-BadRequest] >> test_public_api.py::TestDocApiTables::test_alter_table[None-BadRequest] [GOOD] >> test_public_api.py::TestDocApiTables::test_alter_table[settings1-None] >> test_public_api.py::TestDocApiTables::test_alter_table[settings1-None] [GOOD] >> test_public_api.py::TestDocApiTables::test_drop_table[None-None] >> test_serverless.py::test_database_with_column_disk_quotas[enable_alter_database_create_hive_first--false] [GOOD] >> test_public_api.py::TestDocApiTables::test_drop_table[None-None] [GOOD] >> test_public_api.py::TestDocApiTables::test_drop_table[settings1-None] >> test_public_api.py::TestDocApiTables::test_drop_table[settings1-None] [GOOD] >> test_serverless.py::test_database_with_column_disk_quotas[enable_alter_database_create_hive_first--true] |99.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/config/py3test |99.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/config/py3test >> test_config_with_metadata.py::TestKiKiMRStoreConfigDir::test_cluster_works_with_auto_conf_dir [GOOD] >> test_alloc_default.py::TestAlloc::test_mkql_not_increased[kikimr0] [GOOD] >> test_cms_restart.py::TestCmsStateStorageRestartsBlockMax::test_restart_as_much_as_can [GOOD] >> test_distconf.py::TestKiKiMRDistConfReassignStateStorageReuseSameNodes::test_cluster_change_state_storage [GOOD] >> test_config_with_metadata.py::TestConfigWithoutMetadataBlock::test_cluster_is_operational_without_metadata [GOOD] >> test_config_with_metadata.py::TestConfigWithMetadataMirrorMax::test_cluster_is_operational_with_metadata |99.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/blobstorage/py3test >> test_distconf.py::TestKiKiMRDistConfReassignStateStorageNoChanges::test_cluster_change_state_storage [GOOD] |99.5%| [TA] $(B)/ydb/tests/functional/blobstorage/test-results/py3test/{meta.json ... results_accumulator.log} |99.5%| [TA] {RESULT} $(B)/ydb/tests/functional/blobstorage/test-results/py3test/{meta.json ... results_accumulator.log} >> test_alloc_default.py::TestAlloc::test_hard_limit[kikimr0] >> test_config_with_metadata.py::TestKiKiMRWithMetadata::test_cluster_is_operational_with_metadata |99.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/config/py3test >> test_distconf.py::TestKiKiMRDistConfReassignStateStorageReuseSameNodes::test_cluster_change_state_storage [GOOD] |99.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/config/py3test >> test_distconf.py::TestKiKiMRDistConfReassignStateStorageNoChanges::test_cluster_change_state_storage [GOOD] >> test_config_with_metadata.py::TestConfigWithMetadataBlock::test_cluster_is_operational_with_metadata [GOOD] |99.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/api/py3test >> test_public_api.py::TestDocApiTables::test_drop_table[settings1-None] [GOOD] |99.5%| [TA] $(B)/ydb/tests/functional/api/test-results/py3test/{meta.json ... results_accumulator.log} >> test_distconf.py::TestKiKiMRDistConfReassignStateStorageBadCases::test_cluster_change_state_storage |99.5%| [TA] {RESULT} $(B)/ydb/tests/functional/api/test-results/py3test/{meta.json ... results_accumulator.log} |99.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/cms/py3test >> test_cms_restart.py::TestCmsStateStorageRestartsBlockMax::test_restart_as_much_as_can [GOOD] |99.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/serverless/py3test >> test_serverless.py::test_database_with_disk_quotas[enable_alter_database_create_hive_first--true] [FAIL] >> test_distconf.py::TestKiKiMRDistConfBasic::test_cluster_expand_with_seed_nodes |99.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/config/py3test >> test_config_with_metadata.py::TestConfigWithoutMetadataBlock::test_cluster_is_operational_without_metadata [GOOD] |99.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/config/py3test >> test_cms_restart.py::TestCmsStateStorageRestartsBlockKeep::test_restart_as_much_as_can |99.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/cms/py3test >> test_config_with_metadata.py::TestConfigWithMetadataMirrorMax::test_cluster_is_operational_with_metadata [GOOD] |99.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/serverless/py3test |99.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/cms/py3test |99.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/config/py3test >> test_config_with_metadata.py::TestConfigWithMetadataBlock::test_cluster_is_operational_with_metadata [GOOD] >> test_config_with_metadata.py::TestKiKiMRWithMetadata::test_cluster_is_operational_with_metadata [GOOD] >> test_cms_erasure.py::TestDegradedGroupBlock42Max::test_no_degraded_groups_after_shutdown |99.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/cms/py3test |99.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/serverless/py3test |99.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/config/py3test >> test_config_with_metadata.py::TestConfigWithMetadataMirrorMax::test_cluster_is_operational_with_metadata [GOOD] |99.5%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/cms/py3test >> test_serverless.py::test_discovery[enable_alter_database_create_hive_first--false] |99.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/cms/py3test |99.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/config/py3test >> test_config_with_metadata.py::TestKiKiMRWithMetadata::test_cluster_is_operational_with_metadata [GOOD] >> test_serverless.py::test_create_table[enable_alter_database_create_hive_first--false] >> test_restarts.py::TestRestartClusterBlock42::test_when_create_many_tablets_and_restart_cluster_then_every_thing_is_ok |99.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/cms/py3test >> test_alloc_default.py::TestAlloc::test_hard_limit[kikimr0] [GOOD] >> test_distconf.py::TestKiKiMRDistConfBasic::test_cluster_expand_with_seed_nodes [GOOD] >> test_tpch.py::TestTpchS1::test_tpch[1] [GOOD] >> test_tpch.py::TestTpchS1::test_tpch[2] |99.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/cms/py3test >> test_serverless.py::test_discovery[enable_alter_database_create_hive_first--false] [GOOD] >> test_serverless.py::test_discovery[enable_alter_database_create_hive_first--true] >> test_serverless.py::test_create_table[enable_alter_database_create_hive_first--false] [GOOD] >> test_serverless.py::test_create_table[enable_alter_database_create_hive_first--true] ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/fq/mem_alloc/py3test >> test_alloc_default.py::TestAlloc::test_hard_limit[kikimr0] [GOOD] 2025-06-25 15:33:22,700 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper execution timed out 2025-06-25 15:33:23,008 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper has overrun 600 secs timeout. Process tree before termination: pid rss ref pdirt 1039058 787M 780M 708M ydb-tests-fq-mem_alloc --basetemp /home/runner/.ya/build/build_root/yft8/001015/tmp --capture no -c pkg:library.python.pytest:pytest.yatest.ini -p no:factor --doctest-modules 1200313 1.5G 1.5G 1.1G └─ ydbd server --suppress-version-check --yaml-config=/home/runner/.ya/build/build_root/yft8/001015/ydb/tests/fq/mem_alloc/test-results/py3test/testing_out_stuff/test_allo Test command err: File "library/python/pytest/main.py", line 101, in main rc = pytest.main( File "contrib/python/pytest/py3/_pytest/config/__init__.py", line 175, in main ret: Union[ExitCode, int] = config.hook.pytest_cmdline_main( File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 512, in __call__ return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120, in _hookexec return self._inner_hookexec(hook_name, methods, kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_callers.py", line 121, in _multicall res = hook_impl.function(*args) File "contrib/python/pytest/py3/_pytest/main.py", line 320, in pytest_cmdline_main return wrap_session(config, _main) File "contrib/python/pytest/py3/_pytest/main.py", line 273, in wrap_session session.exitstatus = doit(config, session) or 0 File "contrib/python/pytest/py3/_pytest/main.py", line 327, in _main config.hook.pytest_runtestloop(session=session) File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 512, in __call__ return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120, in _hookexec return self._inner_hookexec(hook_name, methods, kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_callers.py", line 121, in _multicall res = hook_impl.function(*args) File "contrib/python/pytest/py3/_pytest/main.py", line 352, in pytest_runtestloop item.config.hook.pytest_runtest_protocol(item=item, nextitem=nextitem) File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 512, in __call__ return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120, in _hookexec return self._inner_hookexec(hook_name, methods, kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_callers.py", line 121, in _multicall res = hook_impl.function(*args) File "contrib/python/pytest/py3/_pytest/runner.py", line 115, in pytest_runtest_protocol runtestprotocol(item, nextitem=nextitem) File "contrib/python/pytest/py3/_pytest/runner.py", line 135, in runtestprotocol reports.append(call_and_report(item, "teardown", log, nextitem=nextitem)) File "contrib/python/pytest/py3/_pytest/runner.py", line 223, in call_and_report call = call_runtest_hook(item, when, **kwds) File "contrib/python/pytest/py3/_pytest/runner.py", line 262, in call_runtest_hook return CallInfo.from_call( File "contrib/python/pytest/py3/_pytest/runner.py", line 342, in from_call result: Optional[TResult] = func() File "contrib/python/pytest/py3/_pytest/runner.py", line 263, in lambda: ihook(item=item, **kwds), when=when, reraise=reraise File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 512, in __call__ return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120, in _hookexec return self._inner_hookexec(hook_name, methods, kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_callers.py", line 121, in _multicall res = hook_impl.function(*args) File "contrib/python/pytest/py3/_pytest/runner.py", line 183, in pytest_runtest_teardown item.session._setupstate.teardown_exact(nextitem) File "contrib/python/pytest/py3/_pytest/runner.py", line 543, in teardown_exact fin() File "contrib/python/pytest/py3/_pytest/fixtures.py", line 1042, in finish func() File "contrib/python/pytest/py3/_pytest/fixtures.py", line 926, in _teardown_yield_fixture next(it) File "ydb/tests/fq/mem_alloc/test_alloc_default.py", line 52, in kikimr kikimr.stop() File "ydb/tests/tools/fq_runner/kikimr_runner.py", line 746, in stop tenant.stop() File "ydb/tests/tools/fq_runner/kikimr_runner.py", line 62, in stop self.kikimr_cluster.stop(kill=False) File "ydb/tests/library/harness/kikimr_runner.py", line 599, in stop thread.join() File "contrib/tools/python3/Lib/threading.py", line 1149, in join self._wait_for_tstate_lock() File "contrib/tools/python3/Lib/threading.py", line 1169, in _wait_for_tstate_lock if lock.acquire(block, timeout): File "library/python/pytest/plugins/ya.py", line 344, in _graceful_shutdown traceback.print_stack(file=sys.stderr) Traceback (most recent call last): File "library/python/testing/yatest_common/yatest/common/process.py", line 384, in wait wait_for( File "library/python/testing/yatest_common/yatest/common/process.py", line 765, in wait_for raise TimeoutError(truncate(message, MAX_MESSAGE_LEN)) yatest.common.process.TimeoutError: ...d '['/home/runner/.ya/build/build_root/yft8/001015/ydb/tests/fq/mem_alloc/ydb-tests-fq-mem_alloc', '--basetemp', '/home/runner/.ya/build/build_root/yft8/001015/tmp', '--capture', 'no', '-c', 'pkg:library.python.pytest:pytest.yatest.ini', '-p', 'no:factor', '--doctest-modules', '--ya-trace', '/home/runner/.ya/build/build_root/yft8/001015/ydb/tests/fq/mem_alloc/test-results/py3test/ytest.report.trace', '--build-root', '/home/runner/.ya/build/build_root/yft8/001015', '--source-root', '/home/runner/.ya/build/build_root/yft8/001015/environment/arcadia', '--output-dir', '/home/runner/.ya/build/build_root/yft8/001015/ydb/tests/fq/mem_alloc/test-results/py3test/testing_out_stuff', '--durations', '0', '--project-path', 'ydb/tests/fq/mem_alloc', '--test-tool-bin', '/home/runner/.ya/tools/v4/9029509511/test_tool', '--ya-version', '2', '--collect-cores', '--sanitizer-extra-checks', '--build-type', 'release', '--tb', 'short', '--dep-root', 'ydb/tests/fq/mem_alloc', '--flags', 'APPLE_SDK_LOCAL=yes', '--flags', 'CFLAGS=-fno-omit-frame-pointer -Wno-unknown-argument', '--flags', 'DEBUGINFO_LINES_ONLY=yes', '--flags', 'DISABLE_FLAKE8_MIGRATIONS=yes', '--flags', 'OPENSOURCE=yes', '--flags', 'SANITIZER_TYPE=address', '--flags', 'TESTS_REQUESTED=yes', '--flags', 'USE_AIO=static', '--flags', 'USE_CLANG_CL=yes', '--flags', 'USE_EAT_MY_DATA=yes', '--flags', 'USE_ICONV=static', '--flags', 'USE_IDN=static', '--flags', 'USE_PREBUILT_TOOLS=no', '--sanitize', 'address']' stopped by 600 seconds timeout During handling of the above exception, another exception occurred: Traceback (most recent call last): File "devtools/ya/test/programs/test_tool/run_test/run_test.py", line 1738, in main res.wait(check_exit_code=False, timeout=run_timeout, on_timeout=timeout_callback) File "library/python/testing/yatest_common/yatest/common/process.py", line 398, in wait raise ExecutionTimeoutError(self, str(e)) yatest.common.process.ExecutionTimeoutError: (("...d '['/home/runner/.ya/build/build_root/yft8/001015/ydb/tests/fq/mem_alloc/ydb-tests-fq-mem_alloc', '--basetemp', '/home/runner/.ya/build/build_root/yft8/001015/tmp', '--capture', 'no', '-c', 'pkg:library.python.pytest:pytest.yatest.ini', '-p', 'no:factor', '--doctest-modules', '--ya-trace', '/home/runner/.ya/build/build_root/yft8/001015/ydb/tests/fq/mem_alloc/test-results/py3test/ytest.report.trace', '--build-root', '/home/runner/.ya/build/build_root/yft8/001015', '--source-root', '/home/runner/.ya/build/build_root/yft8/001015/environment/arcadia', '--output-dir', '/home/runner/.ya/build/build_root/yft8/001015/ydb/tests/fq/mem_alloc/test-results/py3test/testing_out_stuff', '--durations', '0', '--project-path', 'ydb/tests/fq/mem_alloc', '--test-tool-bin', '/home/runner/.ya/tools/v4/9029509511/test_tool', '--ya-version', '2', '--collect-cores', '--sanitizer-extra-checks', '--build-type', 'release', '--tb', 'short', '--dep-root', 'ydb/tests/fq/mem_alloc', '--flags', 'APPLE_SDK_LOCAL=yes', '--flags', 'CFLAGS=-fno-omit-frame-pointer -Wno-unknown-argument', '--flags', 'DEBUGINFO_LINES_ONLY=yes', '--flags', 'DISABLE_FLAKE8_MIGRATIONS=yes', '--flags', 'OPENSOURCE=yes', '--flags', 'SANITIZER_TYPE=address', '--flags', 'TESTS_REQUESTED=yes', '--flags', 'USE_AIO=static', '--flags', 'USE_CLANG_CL=yes', '--flags', 'USE_EAT_MY_DATA=yes', '--flags', 'USE_ICONV=static', '--flags', 'USE_IDN=static', '--flags', 'USE_PREBUILT_TOOLS=no', '--sanitize', 'address']' stopped by 600 seconds timeout",), {}) |99.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/config/py3test >> test_distconf.py::TestKiKiMRDistConfBasic::test_cluster_expand_with_seed_nodes [GOOD] |99.6%| [TM] {RESULT} ydb/tests/fq/mem_alloc/py3test |99.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/config/py3test >> test_cms_state_storage.py::TestCmsStateStorageSimpleMax::test_check_shutdown_state_storage_nodes >> test_config_with_metadata.py::TestKiKiMRStoreConfigDir::test_config_stored_in_config_store |99.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/cms/py3test >> test_clean.py::TestClean::test >> test_config_with_metadata.py::TestKiKiMRStoreConfigDir::test_config_stored_in_config_store [GOOD] >> test_restarts.py::TestRestartClusterMirror3DC::test_when_create_many_tablets_and_restart_cluster_then_every_thing_is_ok [GOOD] >> test_serverless.py::test_discovery[enable_alter_database_create_hive_first--true] [GOOD] >> test_tpch.py::TestTpchS1::test_tpch[2] [FAIL] ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/tpc/medium/py3test >> test_tpch.py::TestTpchS1::test_tpch[2] [FAIL] 2025-06-25 15:33:43,696 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper execution timed out 2025-06-25 15:33:44,795 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper has overrun 600 secs timeout. Process tree before termination: pid rss ref pdirt 1048904 567M 548M 485M ydb-tests-functional-tpc-medium --basetemp /home/runner/.ya/build/build_root/yft8/000a2b/tmp --capture no -c pkg:library.python.pytest:pytest.yatest.ini -p no:factor --doctes 1051925 8.7G 8.5G 8.2G ├─ ydbd server --suppress-version-check --yaml-config=/home/runner/.ya/build/build_root/yft8/000a2b/ydb/tests/functional/tpc/medium/test-results/py3test/testing_out_stuff/ 1056596 4.7G 4.6G 4.1G ├─ ydbd server --suppress-version-check --yaml-config=/home/runner/.ya/build/build_root/yft8/000a2b/ydb/tests/functional/tpc/medium/test-results/py3test/testing_out_stuff/ 1212075 172M 162M 93.3M └─ ydb -e grpc://localhost:9669 -d /local/test_db workload tpch --path olap_yatests/tpch/s1 run --json Query02.stats.json --output Query02.result --include Query02 --itera Test command err: File "library/python/pytest/main.py", line 101, in main rc = pytest.main( File "contrib/python/pytest/py3/_pytest/config/__init__.py", line 175, in main ret: Union[ExitCode, int] = config.hook.pytest_cmdline_main( File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 512, in __call__ return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120, in _hookexec return self._inner_hookexec(hook_name, methods, kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_callers.py", line 121, in _multicall res = hook_impl.function(*args) File "contrib/python/pytest/py3/_pytest/main.py", line 320, in pytest_cmdline_main return wrap_session(config, _main) File "contrib/python/pytest/py3/_pytest/main.py", line 273, in wrap_session session.exitstatus = doit(config, session) or 0 File "contrib/python/pytest/py3/_pytest/main.py", line 327, in _main config.hook.pytest_runtestloop(session=session) File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 512, in __call__ return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120, in _hookexec return self._inner_hookexec(hook_name, methods, kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_callers.py", line 121, in _multicall res = hook_impl.function(*args) File "contrib/python/pytest/py3/_pytest/main.py", line 352, in pytest_runtestloop item.config.hook.pytest_runtest_protocol(item=item, nextitem=nextitem) File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 512, in __call__ return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120, in _hookexec return self._inner_hookexec(hook_name, methods, kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_callers.py", line 121, in _multicall res = hook_impl.function(*args) File "contrib/python/pytest/py3/_pytest/runner.py", line 115, in pytest_runtest_protocol runtestprotocol(item, nextitem=nextitem) File "contrib/python/pytest/py3/_pytest/runner.py", line 134, in runtestprotocol reports.append(call_and_report(item, "call", log)) File "contrib/python/pytest/py3/_pytest/runner.py", line 223, in call_and_report call = call_runtest_hook(item, when, **kwds) File "contrib/python/pytest/py3/_pytest/runner.py", line 262, in call_runtest_hook return CallInfo.from_call( File "contrib/python/pytest/py3/_pytest/runner.py", line 342, in from_call result: Optional[TResult] = func() File "contrib/python/pytest/py3/_pytest/runner.py", line 263, in lambda: ihook(item=item, **kwds), when=when, reraise=reraise File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 512, in __call__ return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120, in _hookexec return self._inner_hookexec(hook_name, methods, kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_callers.py", line 121, in _multicall res = hook_impl.function(*args) File "contrib/python/pytest/py3/_pytest/runner.py", line 170, in pytest_runtest_call item.runtest() File "contrib/python/pytest/py3/_pytest/python.py", line 1844, in runtest self.ihook.pytest_pyfunc_call(pyfuncitem=self) File "contrib/python/pluggy/py3/pluggy/_hooks.py", line 512, in __call__ return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_manager.py", line 120, in _hookexec return self._inner_hookexec(hook_name, methods, kwargs, firstresult) File "contrib/python/pluggy/py3/pluggy/_callers.py", line 121, in _multicall res = hook_impl.function(*args) File "library/python/pytest/plugins/ya.py", line 563, in pytest_pyfunc_call pyfuncitem.retval = testfunction(**testargs) File "ydb/tests/olap/load/lib/tpch.py", line 44, in test_tpch self.run_workload_test(self._get_path(), query_num) File "ydb/tests/olap/load/lib/conftest.py", line 437, in run_workload_test result = YdbCliHelper.workload_run( File "ydb/tests/olap/lib/ydb_cli.py", line 353, in workload_run if runner.run(): File "ydb/tests/olap/lib/ydb_cli.py", line 213, in run process = yatest.common.process.execute(self.__get_cmd(), check_exit_code=False, text=True) File "library/python/testing/yatest_common/yatest/common/process.py", line 656, in execute res.wait(check_exit_code, timeout, on_timeout) File "library/python/testing/yatest_common/yatest/common/process.py", line 400, in wait _wait() File "library/python/testing/yatest_common/yatest/common/process.py", line 335, in _wait pid, sts, rusage = os.wait4(self._process.pid, 0) File "library/python/pytest/plugins/ya.py", line 344, in _graceful_shutdown traceback.print_stack(file=sys.stderr) Traceback (most recent call last): File "library/python/testing/yatest_common/yatest/common/process.py", line 384, in wait wait_for( File "library/python/testing/yatest_common/yatest/common/process.py", line 765, in wait_for raise TimeoutError(truncate(message, MAX_MESSAGE_LEN)) yatest.common.process.TimeoutError: .../build/build_root/yft8/000a2b/tmp', '--capture', 'no', '-c', 'pkg:library.python.pytest:pytest.yatest.ini', '-p', 'no:factor', '--doctest-modules', '--ya-trace', '/home/runner/.ya/build/build_root/yft8/000a2b/ydb/tests/functional/tpc/medium/test-results/py3test/testing_out_stuff/test_tpch/ytest.report.trace', '--build-root', '/home/runner/.ya/build/build_root/yft8/000a2b', '--source-root', '/home/runner/.ya/build/build_root/yft8/000a2b/environment/arcadia', '--output-dir', '/home/runner/.ya/build/build_root/yft8/000a2b/ydb/tests/functional/tpc/medium/test-results/py3test/testing_out_stuff/test_tpch/testing_out_stuff', '--durations', '0', '--project-path', 'ydb/tests/functional/tpc/medium', '--test-tool-bin', '/home/runner/.ya/tools/v4/9029509511/test_tool', '--ya-version', '2', '--collect-cores', '--sanitizer-extra-checks', '--build-type', 'release', '--tb', 'short', '--dep-root', 'ydb/tests/functional/tpc/medium', '--flags', 'APPLE_SDK_LOCAL=yes', '--flags', 'CFLAGS=-fno-omit-frame-pointer -Wno-unknown-argument', '--flags', 'DEBUGINFO_LINES_ONLY=yes', '--flags', 'DISABLE_FLAKE8_MIGRATIONS=yes', '--flags', 'OPENSOURCE=yes', '--flags', 'SANITIZER_TYPE=address', '--flags', 'TESTS_REQUESTED=yes', '--flags', 'USE_AIO=static', '--flags', 'USE_CLANG_CL=yes', '--flags', 'USE_EAT_MY_DATA=yes', '--flags', 'USE_ICONV=static', '--flags', 'USE_IDN=static', '--flags', 'USE_PREBUILT_TOOLS=no', '--sanitize', 'address', '--test-file-filter', 'test_tpch.py']' stopped by 600 seconds timeout During handling of the above exception, another exception occurred: Traceback (most recent call last): File "devtools/ya/test/programs/test_tool/run_test/run_test.py", line 1738, in main res.wait(check_exit_code=False, timeout=run_timeout, on_timeout=timeout_callback) File "library/python/testing/yatest_common/yatest/common/process.py", line 398, in wait raise ExecutionTimeoutError(self, str(e)) yatest.common.process.ExecutionTimeoutError: ((".../build/build_root/yft8/000a2b/tmp', '--capture', 'no', '-c', 'pkg:library.python.pytest:pytest.yatest.ini', '-p', 'no:factor', '--doctest-modules', '--ya-trace', '/home/runner/.ya/build/build_root/yft8/000a2b/ydb/tests/functional/tpc/medium/test-results/py3test/testing_out_stuff/test_tpch/ytest.report.trace', '--build-root', '/home/runner/.ya/build/build_root/yft8/000a2b', '--source-root', '/home/runner/.ya/build/build_root/yft8/000a2b/environment/arcadia', '--output-dir', '/home/runner/.ya/build/build_root/yft8/000a2b/ydb/tests/functional/tpc/medium/test-results/py3test/testing_out_stuff/test_tpch/testing_out_stuff', '--durations', '0', '--project-path', 'ydb/tests/functional/tpc/medium', '--test-tool-bin', '/home/runner/.ya/tools/v4/9029509511/test_tool', '--ya-version', '2', '--collect-cores', '--sanitizer-extra-checks', '--build-type', 'release', '--tb', 'short', '--dep-root', 'ydb/tests/functional/tpc/medium', '--flags', 'APPLE_SDK_LOCAL=yes', '--flags', 'CFLAGS=-fno-omit-frame-pointer -Wno-unknown-argument', '--flags', 'DEBUGINFO_LINES_ONLY=yes', '--flags', 'DISABLE_FLAKE8_MIGRATIONS=yes', '--flags', 'OPENSOURCE=yes', '--flags', 'SANITIZER_TYPE=address', '--flags', 'TESTS_REQUESTED=yes', '--flags', 'USE_AIO=static', '--flags', 'USE_CLANG_CL=yes', '--flags', 'USE_EAT_MY_DATA=yes', '--flags', 'USE_ICONV=static', '--flags', 'USE_IDN=static', '--flags', 'USE_PREBUILT_TOOLS=no', '--sanitize', 'address', '--test-file-filter', 'test_tpch.py']' stopped by 600 seconds timeout",), {}) >> test_upload.py::TestUploadTpchS1::test |99.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/config/py3test >> test_config_with_metadata.py::TestKiKiMRStoreConfigDir::test_config_stored_in_config_store [GOOD] >> test_cms_erasure.py::TestDegradedGroupBlock42Keep::test_no_degraded_groups_after_shutdown [GOOD] |99.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/restarts/py3test >> test_restarts.py::TestRestartClusterMirror3DC::test_when_create_many_tablets_and_restart_cluster_then_every_thing_is_ok [GOOD] |99.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/cms/py3test >> test_cms_erasure.py::TestDegradedGroupBlock42Keep::test_no_degraded_groups_after_shutdown [GOOD] >> test_cms_erasure.py::TestDegradedGroupMirror3dcKeep::test_no_degraded_groups_after_shutdown |99.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/bridge/py3test >> test_distconf.py::TestKiKiMRDistConfReassignStateStorageBadCases::test_cluster_change_state_storage [GOOD] |99.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/serverless/py3test >> test_serverless.py::test_discovery[enable_alter_database_create_hive_first--true] [GOOD] |99.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/cms/py3test |99.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/config/py3test |99.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/cms/py3test |99.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/config/py3test |99.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/config/py3test |99.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/config/py3test >> test_distconf.py::TestKiKiMRDistConfReassignStateStorageBadCases::test_cluster_change_state_storage [GOOD] >> test_config_migration.py::TestConfigMigrationToV2::test_migration_to_v2 [GOOD] >> test_bridge.py::TestBridgeValidation::test_invalid_updates[updates0-no_updates] >> test_external.py::TestExternalE1::test[first_query_set.1.sql] >> test_bridge.py::TestBridgeValidation::test_invalid_updates[updates3-duplicate_pile_update] >> test_distconf.py::TestKiKiMRDistConfReassignStateStorageMultipleRingGroup::test_cluster_change_state_storage >> test_cms_restart.py::TestCmsStateStorageRestartsMirrorMax::test_restart_as_much_as_can [GOOD] |99.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/config/py3test >> test_config_migration.py::TestConfigMigrationToV2::test_migration_to_v2 [GOOD] |99.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/bridge/py3test |99.6%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/config/py3test |99.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/cms/py3test >> test_cms_restart.py::TestCmsStateStorageRestartsMirrorMax::test_restart_as_much_as_can [GOOD] >> test_bridge.py::TestBridgeValidation::test_invalid_updates[updates0-no_updates] [GOOD] >> test_bridge.py::TestBridgeValidation::test_invalid_updates[updates3-duplicate_pile_update] [GOOD] >> test_bridge.py::TestBridgeBasic::test_update_and_get_cluster_state >> test_bridge.py::TestBridgeValidation::test_invalid_updates[updates1-multiple_primary_piles_in_request] >> test_external.py::TestExternalE1::test[first_query_set.1.sql] [GOOD] >> test_external.py::TestExternalE1::test[first_query_set.2.yql] >> test_serverless.py::test_create_table[enable_alter_database_create_hive_first--true] [GOOD] >> test_serverless.py::test_create_table_using_exclusive_nodes[enable_alter_database_create_hive_first--false] >> test_external.py::TestExternalE1::test[first_query_set.2.yql] [GOOD] >> test_external.py::TestExternalE1::test[second_query_set.join.sql] |99.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/bridge/py3test >> test_bridge.py::TestBridgeValidation::test_invalid_updates[updates0-no_updates] [GOOD] >> test_external.py::TestExternalE1::test[second_query_set.join.sql] [GOOD] |99.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/bridge/py3test >> test_bridge.py::TestBridgeValidation::test_invalid_updates[updates3-duplicate_pile_update] [GOOD] |99.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/tpc/medium/py3test >> test_external.py::TestExternalE1::test[second_query_set.join.sql] [GOOD] >> test_bridge.py::TestBridgeValidation::test_invalid_updates[updates1-multiple_primary_piles_in_request] [GOOD] >> test_bridge.py::TestBridgeBasic::test_update_and_get_cluster_state [GOOD] >> test_bridge.py::TestBridgeValidation::test_invalid_updates[updates2-no_primary_pile_in_result] |99.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/bridge/py3test |99.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/cms/py3test >> test_bridge.py::TestBridgeValidation::test_invalid_updates[updates4-invalid_pile_id] |99.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/cms/py3test >> test_cms_erasure.py::TestDegradedGroupMirror3dcMax::test_no_degraded_groups_after_shutdown |99.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/bridge/py3test >> test_bridge.py::TestBridgeValidation::test_invalid_updates[updates1-multiple_primary_piles_in_request] [GOOD] |99.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/bridge/py3test >> test_bridge.py::TestBridgeBasic::test_update_and_get_cluster_state [GOOD] >> test_bridge.py::TestBridgeValidation::test_invalid_updates[updates2-no_primary_pile_in_result] [GOOD] |99.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/cms/py3test >> test_diff_processing.py::TestTpchDiffProcessing::test_tpch[CheckCanonicalPolicy.NO] >> test_cms_erasure.py::TestDegradedGroupBlock42Max::test_no_degraded_groups_after_shutdown [GOOD] >> test_bridge.py::TestBridgeValidation::test_invalid_updates[updates4-invalid_pile_id] [GOOD] >> test_import_csv.py::TestExternalImportCsv::test >> test_serverless.py::test_create_table_using_exclusive_nodes[enable_alter_database_create_hive_first--false] [GOOD] |99.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/cms/py3test >> test_cms_erasure.py::TestDegradedGroupBlock42Max::test_no_degraded_groups_after_shutdown [GOOD] |99.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/bridge/py3test >> test_bridge.py::TestBridgeValidation::test_invalid_updates[updates2-no_primary_pile_in_result] [GOOD] >> test_clickbench.py::TestClickbench::test_clickbench[Query00] |99.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/bridge/py3test >> test_bridge.py::TestBridgeValidation::test_invalid_updates[updates4-invalid_pile_id] [GOOD] >> test_cms_state_storage.py::TestCmsStateStorageSimpleMax::test_check_shutdown_state_storage_nodes [GOOD] |99.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/bridge/py3test >> test_cms_restart.py::TestCmsStateStorageRestartsBlockKeep::test_restart_as_much_as_can [GOOD] |99.7%| [TA] $(B)/ydb/tests/functional/bridge/test-results/py3test/{meta.json ... results_accumulator.log} >> test_restarts.py::TestRestartClusterMirror34::test_when_create_many_tablets_and_restart_cluster_then_every_thing_is_ok |99.7%| [TA] {RESULT} $(B)/ydb/tests/functional/bridge/test-results/py3test/{meta.json ... results_accumulator.log} >> test_restarts.py::TestRestartSingleBlock42::test_restart_single_node_is_ok >> test_restarts.py::TestRestartClusterBlock42::test_when_create_many_tablets_and_restart_cluster_then_every_thing_is_ok [GOOD] |99.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/cms/py3test >> test_cms_state_storage.py::TestCmsStateStorageSimpleMax::test_check_shutdown_state_storage_nodes [GOOD] |99.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/cms/py3test >> test_cms_restart.py::TestCmsStateStorageRestartsBlockKeep::test_restart_as_much_as_can [GOOD] >> test_diff_processing.py::TestTpchDiffProcessing::test_tpch[CheckCanonicalPolicy.NO] [GOOD] >> test_diff_processing.py::TestTpchDiffProcessing::test_tpch[CheckCanonicalPolicy.WARNING] >> test_restarts.py::TestRestartMultipleMirror3DC::test_tablets_are_successfully_started_after_few_killed_nodes >> test_diff_processing.py::TestTpchDiffProcessing::test_tpch[CheckCanonicalPolicy.WARNING] [GOOD] >> test_diff_processing.py::TestTpchDiffProcessing::test_tpch[CheckCanonicalPolicy.ERROR] >> test_diff_processing.py::TestTpchDiffProcessing::test_tpch[CheckCanonicalPolicy.ERROR] [GOOD] >> test_diff_processing.py::TestTpcdsDiffProcessing::test_tpcds[CheckCanonicalPolicy.NO] >> test_workload.py::TestYdbKvWorkload::test[row] |99.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/serverless/py3test >> test_serverless.py::test_create_table_using_exclusive_nodes[enable_alter_database_create_hive_first--false] [GOOD] >> test_import_csv.py::TestExternalImportCsv::test [GOOD] >> test_import_csv.py::TestExternalImportCsvArrow::test |99.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/restarts/py3test >> test_restarts.py::TestRestartClusterBlock42::test_when_create_many_tablets_and_restart_cluster_then_every_thing_is_ok [GOOD] >> test_serverless.py::test_turn_on_serverless_storage_billing[enable_alter_database_create_hive_first--true] [GOOD] >> KqpQueryService::ReplyPartLimitProxyNode >> Backup::UuidValue >> KqpQueryService::ReplyPartLimitProxyNode [GOOD] >> NodeIdDescribe::HasDistribution >> Backup::UuidValue [GOOD] |99.7%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/serverless/py3test >> test_serverless.py::test_turn_on_serverless_storage_billing[enable_alter_database_create_hive_first--true] [GOOD] >> test_cms_erasure.py::TestDegradedGroupMirror3dcKeep::test_no_degraded_groups_after_shutdown [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/backup/unittest >> Backup::UuidValue [GOOD] Test command err: Found S3 object: "ProducerUuidValueBackup/data_00.csv" Found S3 object: "ProducerUuidValueBackup/metadata.json" Found S3 object: "ProducerUuidValueBackup/scheme.pb" |99.7%| [TM] {RESULT} ydb/tests/functional/backup/unittest >> test_workload.py::TestYdbLogWorkload::test[row] |99.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/cms/py3test >> test_cms_erasure.py::TestDegradedGroupMirror3dcKeep::test_no_degraded_groups_after_shutdown [GOOD] >> test_distconf.py::TestKiKiMRDistConfReassignStateStorageMultipleRingGroup::test_cluster_change_state_storage [GOOD] >> test_import_csv.py::TestExternalImportCsvArrow::test [GOOD] >> KqpQuerySession::NoLocalAttach >> test_workload.py::TestYdbWorkload::test[row] |99.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/tpc/medium/py3test >> test_import_csv.py::TestExternalImportCsvArrow::test [GOOD] |99.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/config/py3test >> test_distconf.py::TestKiKiMRDistConfReassignStateStorageMultipleRingGroup::test_cluster_change_state_storage [GOOD] |99.8%| [TA] $(B)/ydb/tests/functional/config/test-results/py3test/{meta.json ... results_accumulator.log} |99.8%| [TA] {RESULT} $(B)/ydb/tests/functional/config/test-results/py3test/{meta.json ... results_accumulator.log} >> test_workload.py::TestYdbTransferWorkload::test[row] >> test_serverless.py::test_database_with_column_disk_quotas[enable_alter_database_create_hive_first--true] [GOOD] >> test_workload.py::TestYdbWorkload::test >> KqpQuerySession::NoLocalAttach [GOOD] >> NodeIdDescribe::HasDistribution [GOOD] |99.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/kqp/kqp_query_session/unittest >> KqpQuerySession::NoLocalAttach [GOOD] |99.8%| [TM] {RESULT} ydb/tests/functional/kqp/kqp_query_session/unittest |99.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/kqp/kqp_query_svc/unittest >> NodeIdDescribe::HasDistribution [GOOD] |99.8%| [TM] {RESULT} ydb/tests/functional/kqp/kqp_query_svc/unittest >> test_workload.py::TestYdbWorkload::test |99.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/serverless/py3test >> test_serverless.py::test_database_with_column_disk_quotas[enable_alter_database_create_hive_first--true] [GOOD] >> Transfer::BaseScenario_Local >> test_clickbench.py::TestClickbench::test_clickbench[Query00] [GOOD] >> test_clickbench.py::TestClickbench::test_clickbench[Query01] >> test_cms_erasure.py::TestDegradedGroupMirror3dcMax::test_no_degraded_groups_after_shutdown [GOOD] |99.8%| [TA] $(B)/ydb/tests/functional/serverless/test-results/py3test/{meta.json ... results_accumulator.log} >> Transfer::BaseScenario_Local [GOOD] >> Transfer::BaseScenario_Remote |99.8%| [TA] {RESULT} $(B)/ydb/tests/functional/serverless/test-results/py3test/{meta.json ... results_accumulator.log} >> test_clickbench.py::TestClickbench::test_clickbench[Query01] [GOOD] >> test_clickbench.py::TestClickbench::test_clickbench[Query02] |99.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/cms/py3test >> test_cms_erasure.py::TestDegradedGroupMirror3dcMax::test_no_degraded_groups_after_shutdown [GOOD] |99.8%| [TA] $(B)/ydb/tests/functional/cms/test-results/py3test/{meta.json ... results_accumulator.log} |99.8%| [TA] {RESULT} $(B)/ydb/tests/functional/cms/test-results/py3test/{meta.json ... results_accumulator.log} >> test_workload.py::TestYdbKvWorkload::test[row] [GOOD] >> test_workload.py::TestYdbKvWorkload::test[column] >> test_workload.py::TestYdbWorkload::test >> Transfer::BaseScenario_Remote [GOOD] >> Transfer::CreateTransfer_TargetNotFound >> Transfer::CreateTransfer_TargetNotFound [GOOD] >> Transfer::ConnectionString_BadChar >> test_scheme_board_workload.py::TestReconfigSchemeBoardWorkload::test_scheme_board >> test_diff_processing.py::TestTpcdsDiffProcessing::test_tpcds[CheckCanonicalPolicy.NO] [GOOD] >> test_diff_processing.py::TestTpcdsDiffProcessing::test_tpcds[CheckCanonicalPolicy.WARNING] >> test_clickbench.py::TestClickbench::test_clickbench[Query02] [GOOD] >> test_clickbench.py::TestClickbench::test_clickbench[Query03] >> Transfer::ConnectionString_BadChar [GOOD] >> Transfer::ConnectionString_BadDNSName >> test_diff_processing.py::TestTpcdsDiffProcessing::test_tpcds[CheckCanonicalPolicy.WARNING] [GOOD] >> test_diff_processing.py::TestTpcdsDiffProcessing::test_tpcds[CheckCanonicalPolicy.ERROR] >> test_restarts.py::TestRestartSingleBlock42::test_restart_single_node_is_ok [GOOD] >> Transfer::ConnectionString_BadDNSName [GOOD] >> Transfer::Create_WithPermission >> test_clickbench.py::TestClickbench::test_clickbench[Query03] [GOOD] >> test_clickbench.py::TestClickbench::test_clickbench[Query04] >> Transfer::Create_WithPermission [GOOD] >> Transfer::Create_WithoutTablePermission >> test_diff_processing.py::TestTpcdsDiffProcessing::test_tpcds[CheckCanonicalPolicy.ERROR] [GOOD] >> test_diff_processing.py::TestClickbenchDiffProcessing::test_clickbench[CheckCanonicalPolicy.NO] >> test_restarts.py::TestRestartClusterMirror34::test_when_create_many_tablets_and_restart_cluster_then_every_thing_is_ok [GOOD] |99.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/stress/reconfig_state_storage_workload/tests/py3test >> test_scheme_board_workload.py::TestReconfigSchemeBoardWorkload::test_scheme_board |99.8%| [TM] {RESULT} ydb/tests/stress/reconfig_state_storage_workload/tests/py3test >> Transfer::Create_WithoutTablePermission [GOOD] >> Transfer::LocalTopic_WithPermission >> test_workload.py::TestYdbWorkload::test[row] [GOOD] >> test_workload.py::TestYdbWorkload::test[column] |99.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/restarts/py3test >> test_restarts.py::TestRestartSingleBlock42::test_restart_single_node_is_ok [GOOD] |99.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/restarts/py3test >> test_restarts.py::TestRestartClusterMirror34::test_when_create_many_tablets_and_restart_cluster_then_every_thing_is_ok [GOOD] >> test_clickbench.py::TestClickbench::test_clickbench[Query04] [GOOD] >> test_clickbench.py::TestClickbench::test_clickbench[Query05] >> test_workload.py::TestYdbTransferWorkload::test[row] [GOOD] >> test_workload.py::TestYdbTransferWorkload::test[column] >> Transfer::LocalTopic_WithPermission [GOOD] >> Transfer::LocalTopic_BigMessage >> test_encryption.py::TestEncryption::test_simple_encryption >> test_workload.py::TestYdbWorkload::test >> test_clickbench.py::TestClickbench::test_clickbench[Query05] [GOOD] >> test_clickbench.py::TestClickbench::test_clickbench[Query06] >> ConsistentIndexRead::InteractiveTx >> test_diff_processing.py::TestClickbenchDiffProcessing::test_clickbench[CheckCanonicalPolicy.NO] [GOOD] >> test_diff_processing.py::TestClickbenchDiffProcessing::test_clickbench[CheckCanonicalPolicy.WARNING] >> test_diff_processing.py::TestClickbenchDiffProcessing::test_clickbench[CheckCanonicalPolicy.WARNING] [GOOD] >> test_diff_processing.py::TestClickbenchDiffProcessing::test_clickbench[CheckCanonicalPolicy.ERROR] >> test_diff_processing.py::TestClickbenchDiffProcessing::test_clickbench[CheckCanonicalPolicy.ERROR] [GOOD] |99.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/tpc/medium/py3test >> test_diff_processing.py::TestClickbenchDiffProcessing::test_clickbench[CheckCanonicalPolicy.ERROR] [GOOD] >> test_clickbench.py::TestClickbench::test_clickbench[Query06] [GOOD] >> test_clickbench.py::TestClickbench::test_clickbench[Query07] >> test_workload.py::TestYdbWorkload::test [GOOD] >> test_workload.py::TestShowCreateViewWorkload::test_show_create_view_workload[30-None] |99.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/stress/cdc/tests/py3test >> test_workload.py::TestYdbWorkload::test [GOOD] |99.8%| [TM] {RESULT} ydb/tests/stress/cdc/tests/py3test >> test_restarts.py::TestRestartMultipleMirror3DC::test_tablets_are_successfully_started_after_few_killed_nodes [GOOD] >> test_workload.py::TestYdbLogWorkload::test[row] [FAIL] >> test_workload.py::TestYdbLogWorkload::test[column] >> test_workload.py::TestYdbWorkload::test [FAIL] |99.8%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/restarts/py3test >> test_restarts.py::TestRestartMultipleMirror3DC::test_tablets_are_successfully_started_after_few_killed_nodes [GOOD] >> test_workload.py::TestYdbWorkload::test |99.9%| [TA] $(B)/ydb/tests/functional/restarts/test-results/py3test/{meta.json ... results_accumulator.log} |99.9%| [TA] {RESULT} $(B)/ydb/tests/functional/restarts/test-results/py3test/{meta.json ... results_accumulator.log} >> test_clickbench.py::TestClickbench::test_clickbench[Query07] [GOOD] >> test_clickbench.py::TestClickbench::test_clickbench[Query08] >> test_workload.py::TestYdbKvWorkload::test[column] [GOOD] |99.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/stress/kv/tests/py3test >> test_workload.py::TestYdbKvWorkload::test[column] [GOOD] |99.9%| [TM] {RESULT} ydb/tests/stress/kv/tests/py3test >> test_clickbench.py::TestClickbench::test_clickbench[Query08] [GOOD] >> test_clickbench.py::TestClickbench::test_clickbench[Query09] >> test_clean.py::TestClean::test [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/tpc/medium/py3test >> test_clean.py::TestClean::test [GOOD] 2025-06-25 15:39:55,867 WARNING libarchive: File (test_clean.py.TestClean.test/cluster/slot_1/logfile_d693qkoi.log) size has changed. Can't write more data than was declared in the tar header (51355159). (probably file was changed during archiving) >> test_workload.py::TestYdbTransferWorkload::test[column] [GOOD] >> S3PathStyleBackup::DisableVirtualAddressing >> test_workload.py::TestShowCreateViewWorkload::test_show_create_view_workload[30-None] [GOOD] >> test_workload.py::TestShowCreateViewWorkload::test_show_create_view_workload[30-test_scv] >> test_workload.py::TestYdbWorkload::test[column] [GOOD] >> S3PathStyleBackup::DisableVirtualAddressing [GOOD] |99.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/stress/transfer/tests/py3test >> test_workload.py::TestYdbTransferWorkload::test[column] [GOOD] |99.9%| [TM] {RESULT} ydb/tests/stress/transfer/tests/py3test >> test_workload.py::TestYdbLogWorkload::test[column] [FAIL] |99.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/stress/log/tests/py3test >> test_workload.py::TestYdbLogWorkload::test[column] [FAIL] |99.9%| [TM] {RESULT} ydb/tests/stress/log/tests/py3test >> Replication::Types >> test_clickbench.py::TestClickbench::test_clickbench[Query09] [GOOD] >> test_clickbench.py::TestClickbench::test_clickbench[Query10] |99.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/backup/s3_path_style/unittest >> S3PathStyleBackup::DisableVirtualAddressing [GOOD] |99.9%| [TM] {RESULT} ydb/tests/functional/backup/s3_path_style/unittest >> Replication::Types [GOOD] >> Replication::PauseAndResumeReplication |99.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/stress/simple_queue/tests/py3test >> test_workload.py::TestYdbWorkload::test[column] [GOOD] |99.9%| [TM] {RESULT} ydb/tests/stress/simple_queue/tests/py3test >> test_clickbench.py::TestClickbench::test_clickbench[Query10] [GOOD] >> test_clickbench.py::TestClickbench::test_clickbench[Query11] >> Replication::PauseAndResumeReplication [GOOD] |99.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/stress/olap_workload/tests/py3test >> test_workload.py::TestYdbWorkload::test [FAIL] |99.9%| [TM] {RESULT} ydb/tests/stress/olap_workload/tests/py3test ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/replication/unittest >> Replication::PauseAndResumeReplication [GOOD] Test command err: DDL: CREATE TABLE `SourceTable_12356396034707066476` ( Key Uint32, Key2 Uuid, v01 Uuid, v02 Uuid NOT NULL, v03 Double, PRIMARY KEY (Key, Key2) ); >>>>> Query: UPSERT INTO `SourceTable_12356396034707066476` (Key,Key2,v01,v02,v03) VALUES ( 1, CAST("00078af5-0000-0000-6c0b-040000000000" as Uuid), CAST("00078af5-0000-0000-6c0b-040000000001" as Uuid), UNWRAP(CAST("00078af5-0000-0000-6c0b-040000000002" as Uuid)), CAST("311111111113.222222223" as Double) ); DDL: CREATE ASYNC REPLICATION `Replication_12356396034707066476` FOR `SourceTable_12356396034707066476` AS `Table_12356396034707066476` WITH ( CONNECTION_STRING = 'grpc://localhost:29353/?database=local' ); >>>>> Query: SELECT `Key2`, `v01`, `v02`, `v03` FROM `Table_12356396034707066476` ORDER BY `Key2`, `v01`, `v02`, `v03` >>>>> Query error:
: Error: Type annotation, code: 1030
:1:1: Error: At function: KiReadTable!
:1:1: Error: Cannot find table 'db.[/local/Table_12356396034707066476]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 Attempt=19 count=-1 >>>>> Query: SELECT `Key2`, `v01`, `v02`, `v03` FROM `Table_12356396034707066476` ORDER BY `Key2`, `v01`, `v02`, `v03` Attempt=18 count=1 DDL: DROP ASYNC REPLICATION `Replication_12356396034707066476`; DDL: DROP TABLE `SourceTable_12356396034707066476` DDL: CREATE TABLE `SourceTable_6975831527893210042` ( Key Uint64 NOT NULL, Message Utf8, PRIMARY KEY (Key) ); DDL: CREATE ASYNC REPLICATION `Replication_6975831527893210042` FOR `SourceTable_6975831527893210042` AS `Table_6975831527893210042` WITH ( CONNECTION_STRING = 'grpc://localhost:29353/?database=local' ); >>>>> Query: INSERT INTO `SourceTable_6975831527893210042` (`Key`, `Message`) VALUES (1, 'Message-1'); >>>>> Query: SELECT `Message` FROM `Table_6975831527893210042` ORDER BY `Message` >>>>> Query error:
: Error: Type annotation, code: 1030
:1:1: Error: At function: KiReadTable!
:1:1: Error: Cannot find table 'db.[/local/Table_6975831527893210042]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 Attempt=19 count=-1 >>>>> Query: SELECT `Message` FROM `Table_6975831527893210042` ORDER BY `Message` Attempt=18 count=1 State: Paused DDL: ALTER ASYNC REPLICATION `Replication_6975831527893210042` SET ( STATE = "Paused" ); >>>>> Query: INSERT INTO `SourceTable_6975831527893210042` (`Key`, `Message`) VALUES (2, 'Message-2'); >>>>> Query: SELECT `Message` FROM `Table_6975831527893210042` ORDER BY `Message` Attempt=19 count=1 State: StandBy DDL: ALTER ASYNC REPLICATION `Replication_6975831527893210042` SET ( STATE = "StandBy" ); >>>>> Query: SELECT `Message` FROM `Table_6975831527893210042` ORDER BY `Message` Attempt=19 count=1 >>>>> Query: SELECT `Message` FROM `Table_6975831527893210042` ORDER BY `Message` Attempt=18 count=2 DDL: ALTER ASYNC REPLICATION `Replication_6975831527893210042` SET ( STATE = "Paused" ); DDL: ALTER ASYNC REPLICATION `Replication_6975831527893210042` SET ( STATE = "StandBy" ); DDL: DROP ASYNC REPLICATION `Replication_6975831527893210042`; DDL: DROP TABLE `SourceTable_6975831527893210042` |99.9%| [TM] {RESULT} ydb/tests/functional/replication/unittest >> test_clickbench.py::TestClickbench::test_clickbench[Query11] [GOOD] >> test_clickbench.py::TestClickbench::test_clickbench[Query12] >> test_workload.py::TestShowCreateViewWorkload::test_show_create_view_workload[30-test_scv] [GOOD] >> test_workload.py::TestYdbWorkload::test [GOOD] |99.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/stress/show_create/view/tests/py3test >> test_workload.py::TestShowCreateViewWorkload::test_show_create_view_workload[30-test_scv] [GOOD] |99.9%| [TM] {RESULT} ydb/tests/stress/show_create/view/tests/py3test >> test_clickbench.py::TestClickbench::test_clickbench[Query12] [GOOD] >> test_clickbench.py::TestClickbench::test_clickbench[Query13] >> test_encryption.py::TestEncryption::test_simple_encryption [GOOD] >> test_clickbench.py::TestClickbench::test_clickbench[Query13] [GOOD] >> test_clickbench.py::TestClickbench::test_clickbench[Query14] |99.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/encryption/py3test >> test_encryption.py::TestEncryption::test_simple_encryption [GOOD] |99.9%| [TM] {RESULT} ydb/tests/functional/encryption/py3test >> test_clickbench.py::TestClickbench::test_clickbench[Query14] [GOOD] >> test_clickbench.py::TestClickbench::test_clickbench[Query15] |99.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/stress/oltp_workload/tests/py3test >> test_workload.py::TestYdbWorkload::test [GOOD] |99.9%| [TM] {RESULT} ydb/tests/stress/oltp_workload/tests/py3test >> test_clickbench.py::TestClickbench::test_clickbench[Query15] [GOOD] >> test_clickbench.py::TestClickbench::test_clickbench[Query16] >> test_workload.py::TestYdbWorkload::test [GOOD] >> test_clickbench.py::TestClickbench::test_clickbench[Query16] [GOOD] >> test_clickbench.py::TestClickbench::test_clickbench[Query17] |99.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/stress/s3_backups/tests/py3test >> test_workload.py::TestYdbWorkload::test [GOOD] |99.9%| [TM] {RESULT} ydb/tests/stress/s3_backups/tests/py3test >> test_clickbench.py::TestClickbench::test_clickbench[Query17] [GOOD] >> test_clickbench.py::TestClickbench::test_clickbench[Query18] >> test_clickbench.py::TestClickbench::test_clickbench[Query18] [GOOD] >> test_clickbench.py::TestClickbench::test_clickbench[Query19] >> test_clickbench.py::TestClickbench::test_clickbench[Query19] [GOOD] >> test_clickbench.py::TestClickbench::test_clickbench[Query20] >> test_workload.py::TestYdbWorkload::test [GOOD] >> test_clickbench.py::TestClickbench::test_clickbench[Query20] [GOOD] >> test_clickbench.py::TestClickbench::test_clickbench[Query21] >> test_clickbench.py::TestClickbench::test_clickbench[Query21] [GOOD] >> test_clickbench.py::TestClickbench::test_clickbench[Query22] |99.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/stress/node_broker/tests/py3test >> test_workload.py::TestYdbWorkload::test [GOOD] |99.9%| [TM] {RESULT} ydb/tests/stress/node_broker/tests/py3test >> test_clickbench.py::TestClickbench::test_clickbench[Query22] [GOOD] >> test_clickbench.py::TestClickbench::test_clickbench[Query23] >> ConsistentIndexRead::InteractiveTx [GOOD] >> KqpExtTest::SecondaryIndexSelectUsingScripting >> KqpExtTest::SecondaryIndexSelectUsingScripting [GOOD] |99.9%| [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/kqp/kqp_indexes/unittest >> KqpExtTest::SecondaryIndexSelectUsingScripting [GOOD] |99.9%| [TM] {RESULT} ydb/tests/functional/kqp/kqp_indexes/unittest >> test_clickbench.py::TestClickbench::test_clickbench[Query23] [GOOD] >> test_clickbench.py::TestClickbench::test_clickbench[Query24] >> test_clickbench.py::TestClickbench::test_clickbench[Query24] [GOOD] >> test_clickbench.py::TestClickbench::test_clickbench[Query25] >> test_clickbench.py::TestClickbench::test_clickbench[Query25] [GOOD] >> test_clickbench.py::TestClickbench::test_clickbench[Query26] >> test_clickbench.py::TestClickbench::test_clickbench[Query26] [GOOD] >> test_clickbench.py::TestClickbench::test_clickbench[Query27] >> test_clickbench.py::TestClickbench::test_clickbench[Query27] [GOOD] >> test_clickbench.py::TestClickbench::test_clickbench[Query28] >> test_clickbench.py::TestClickbench::test_clickbench[Query28] [GOOD] >> test_clickbench.py::TestClickbench::test_clickbench[Query29] >> test_clickbench.py::TestClickbench::test_clickbench[Query29] [GOOD] >> test_clickbench.py::TestClickbench::test_clickbench[Query30] >> test_clickbench.py::TestClickbench::test_clickbench[Query30] [GOOD] >> test_clickbench.py::TestClickbench::test_clickbench[Query31] >> test_upload.py::TestUploadTpchS1::test [GOOD] ------- [TM] {asan, default-linux-x86_64, release} ydb/tests/functional/tpc/medium/py3test >> test_upload.py::TestUploadTpchS1::test [GOOD] 2025-06-25 15:43:51,949 WARNING libarchive: File (test_upload.py.TestUploadTpchS1.test/cluster/slot_1/logfile_5u72jgr8.log) size has changed. Can't write more data than was declared in the tar header (215973459). (probably file was changed during archiving) >> test_clickbench.py::TestClickbench::test_clickbench[Query31] [GOOD] >> test_clickbench.py::TestClickbench::test_clickbench[Query32]